From 849cf81cd4901a19ea344272376bdc9d7867fe9e Mon Sep 17 00:00:00 2001 From: Samuel Marks Date: Sun, 11 Nov 2018 22:59:12 +1100 Subject: [PATCH 1/2] Merged postgres/postgres --- .dir-locals.el | 6 +- .gitignore | 4 +- COPYRIGHT | 2 +- GNUmakefile.in | 3 +- Makefile | 4 + aclocal.m4 | 1 + config/c-compiler.m4 | 284 +- config/c-library.m4 | 141 +- config/config.guess | 572 +- config/config.sub | 1652 +- config/docbook.m4 | 22 +- config/llvm.m4 | 109 + config/perl.m4 | 47 +- config/python.m4 | 20 +- config/tcl.m4 | 13 +- configure | 4109 ++- configure.in | 570 +- contrib/Makefile | 9 +- contrib/adminpack/.gitignore | 4 + contrib/adminpack/Makefile | 4 +- contrib/adminpack/adminpack--1.0--1.1.sql | 6 + contrib/adminpack/adminpack--1.1--2.0.sql | 51 + contrib/adminpack/adminpack.c | 253 +- contrib/adminpack/adminpack.control | 2 +- contrib/adminpack/expected/adminpack.out | 155 + contrib/adminpack/sql/adminpack.sql | 70 + contrib/amcheck/Makefile | 2 +- contrib/amcheck/amcheck--1.0--1.1.sql | 29 + contrib/amcheck/amcheck.control | 2 +- contrib/amcheck/expected/check_btree.out | 63 +- contrib/amcheck/sql/check_btree.sql | 42 +- contrib/amcheck/verify_nbtree.c | 837 +- contrib/auth_delay/auth_delay.c | 2 +- contrib/auto_explain/auto_explain.c | 36 +- contrib/bloom/blcost.c | 2 +- contrib/bloom/blinsert.c | 36 +- contrib/bloom/bloom.h | 2 +- contrib/bloom/blscan.c | 4 +- contrib/bloom/blutils.c | 3 +- contrib/bloom/blvacuum.c | 4 +- contrib/bloom/blvalidate.c | 2 +- contrib/bloom/expected/bloom.out | 17 + contrib/bloom/sql/bloom.sql | 11 + contrib/bloom/t/001_wal.pl | 25 +- contrib/btree_gin/Makefile | 4 +- contrib/btree_gin/btree_gin--1.2--1.3.sql | 128 + contrib/btree_gin/btree_gin.c | 38 +- contrib/btree_gin/btree_gin.control | 2 +- contrib/btree_gin/expected/bool.out | 119 + contrib/btree_gin/expected/bpchar.out | 109 + contrib/btree_gin/expected/name.out | 97 + contrib/btree_gin/expected/uuid.out | 104 + contrib/btree_gin/sql/bool.sql | 27 + contrib/btree_gin/sql/bpchar.sql | 22 + contrib/btree_gin/sql/name.sql | 21 + contrib/btree_gin/sql/uuid.sql | 28 + contrib/btree_gist/btree_bit.c | 6 +- contrib/btree_gist/btree_bytea.c | 2 +- contrib/btree_gist/btree_cash.c | 10 +- contrib/btree_gist/btree_inet.c | 13 +- contrib/btree_gist/btree_int2.c | 10 +- contrib/btree_gist/btree_int4.c | 10 +- contrib/btree_gist/btree_int8.c | 10 +- contrib/btree_gist/btree_interval.c | 4 +- contrib/btree_gist/btree_numeric.c | 2 +- contrib/btree_gist/btree_text.c | 6 +- contrib/btree_gist/btree_time.c | 2 +- contrib/btree_gist/btree_ts.c | 3 +- contrib/btree_gist/btree_utils_num.c | 40 +- contrib/btree_gist/btree_utils_num.h | 2 - contrib/btree_gist/btree_utils_var.c | 64 +- contrib/btree_gist/btree_uuid.c | 6 +- contrib/btree_gist/expected/bit.out | 6 +- contrib/btree_gist/expected/inet.out | 35 + contrib/btree_gist/expected/varbit.out | 6 +- contrib/btree_gist/sql/inet.sql | 18 + contrib/chkpass/Makefile | 21 - contrib/chkpass/chkpass--1.0.sql | 70 - contrib/chkpass/chkpass--unpackaged--1.0.sql | 13 - contrib/chkpass/chkpass.c | 175 - contrib/chkpass/chkpass.control | 5 - contrib/citext/Makefile | 3 +- contrib/citext/citext--1.4--1.5.sql | 88 + contrib/citext/citext.c | 123 +- contrib/citext/citext.control | 2 +- contrib/citext/expected/citext.out | 354 +- contrib/citext/expected/citext_1.out | 354 +- contrib/citext/sql/citext.sql | 81 + contrib/cube/Makefile | 7 +- contrib/cube/cube--1.2--1.3.sql | 12 + contrib/cube/cube--1.3--1.4.sql | 45 + contrib/cube/cube.c | 394 +- contrib/cube/cube.control | 2 +- contrib/cube/cubedata.h | 6 +- contrib/cube/cubeparse.y | 2 +- contrib/cube/cubescan.l | 4 + contrib/cube/expected/cube.out | 647 +- contrib/cube/expected/cube_2.out | 1747 -- contrib/cube/expected/cube_sci.out | 106 + contrib/cube/sql/cube.sql | 87 +- contrib/cube/sql/cube_sci.sql | 22 + contrib/dblink/Makefile | 2 +- contrib/dblink/dblink.c | 147 +- contrib/dblink/expected/dblink.out | 81 +- contrib/dblink/sql/dblink.sql | 38 + contrib/dict_int/dict_int.c | 6 +- contrib/dict_xsyn/dict_xsyn.c | 14 +- .../earthdistance/expected/earthdistance.out | 52 +- contrib/earthdistance/sql/earthdistance.sql | 8 +- contrib/file_fdw/data/list1.csv | 2 + contrib/file_fdw/data/list2.bad | 2 + contrib/file_fdw/data/list2.csv | 2 + contrib/file_fdw/file_fdw.c | 69 +- contrib/file_fdw/input/file_fdw.source | 27 + contrib/file_fdw/output/file_fdw.source | 90 +- contrib/fuzzystrmatch/.gitignore | 4 + contrib/fuzzystrmatch/Makefile | 2 + contrib/fuzzystrmatch/dmetaphone.c | 6 +- .../fuzzystrmatch/expected/fuzzystrmatch.out | 67 + contrib/fuzzystrmatch/fuzzystrmatch.c | 38 +- contrib/fuzzystrmatch/sql/fuzzystrmatch.sql | 21 + contrib/hstore/Makefile | 5 +- contrib/hstore/hstore--1.4--1.5.sql | 14 + contrib/hstore/hstore.control | 2 +- contrib/hstore/hstore.h | 2 +- contrib/hstore/hstore_gin.c | 4 +- contrib/hstore/hstore_gist.c | 12 +- contrib/hstore/hstore_io.c | 103 +- contrib/hstore/hstore_op.c | 50 +- contrib/hstore_plperl/Makefile | 12 +- .../hstore_plperl/expected/hstore_plperl.out | 19 + contrib/hstore_plperl/hstore_plperl.c | 18 +- contrib/hstore_plperl/sql/hstore_plperl.sql | 19 + contrib/hstore_plpython/Makefile | 8 +- .../expected/hstore_plpython.out | 20 +- contrib/hstore_plpython/hstore_plpython.c | 8 +- .../hstore_plpython/sql/hstore_plpython.sql | 16 +- contrib/intarray/_int_bool.c | 6 +- contrib/intarray/_int_gin.c | 14 +- contrib/intarray/_int_gist.c | 18 +- contrib/intarray/_int_op.c | 8 +- contrib/intarray/_int_selfuncs.c | 2 +- contrib/intarray/_int_tool.c | 23 +- contrib/intarray/_intbig_gist.c | 6 +- contrib/intarray/bench/create_test.pl | 1 + contrib/intarray/expected/_int.out | 24 + contrib/intarray/sql/_int.sql | 4 + contrib/isn/Makefile | 6 +- contrib/isn/isn--1.1--1.2.sql | 228 + contrib/isn/isn.c | 43 +- contrib/isn/isn.control | 2 +- contrib/isn/isn.h | 2 +- contrib/jsonb_plperl/.gitignore | 4 + contrib/jsonb_plperl/Makefile | 39 + .../jsonb_plperl/expected/jsonb_plperl.out | 233 + .../jsonb_plperl/expected/jsonb_plperlu.out | 260 + contrib/jsonb_plperl/jsonb_plperl--1.0.sql | 19 + contrib/jsonb_plperl/jsonb_plperl.c | 303 + contrib/jsonb_plperl/jsonb_plperl.control | 6 + contrib/jsonb_plperl/jsonb_plperlu--1.0.sql | 19 + contrib/jsonb_plperl/jsonb_plperlu.control | 6 + contrib/jsonb_plperl/sql/jsonb_plperl.sql | 104 + contrib/jsonb_plperl/sql/jsonb_plperlu.sql | 108 + contrib/jsonb_plpython/.gitignore | 6 + contrib/jsonb_plpython/Makefile | 39 + .../expected/jsonb_plpython.out | 306 + contrib/jsonb_plpython/jsonb_plpython.c | 471 + .../jsonb_plpython/jsonb_plpython2u--1.0.sql | 19 + .../jsonb_plpython/jsonb_plpython2u.control | 6 + .../jsonb_plpython/jsonb_plpython3u--1.0.sql | 19 + .../jsonb_plpython/jsonb_plpython3u.control | 6 + .../jsonb_plpython/jsonb_plpythonu--1.0.sql | 19 + .../jsonb_plpython/jsonb_plpythonu.control | 6 + contrib/jsonb_plpython/sql/jsonb_plpython.sql | 183 + contrib/lo/.gitignore | 4 + contrib/lo/Makefile | 2 + contrib/lo/expected/lo.out | 42 + contrib/lo/sql/lo.sql | 25 + contrib/ltree/Makefile | 2 + contrib/ltree/_ltree_gist.c | 6 +- contrib/ltree/_ltree_op.c | 16 +- contrib/ltree/expected/ltree.out | 18 + contrib/ltree/lquery_op.c | 6 +- contrib/ltree/ltree.h | 21 +- contrib/ltree/ltree_gist.c | 30 +- contrib/ltree/ltree_io.c | 4 +- contrib/ltree/ltree_op.c | 113 +- contrib/ltree/ltxtquery_io.c | 2 +- contrib/ltree/ltxtquery_op.c | 6 +- contrib/ltree/sql/ltree.sql | 3 + contrib/ltree_plpython/Makefile | 8 +- contrib/ltree_plpython/ltree_plpython.c | 8 +- contrib/oid2name/.gitignore | 2 + contrib/oid2name/Makefile | 10 +- contrib/oid2name/oid2name.c | 129 +- contrib/oid2name/t/001_basic.pl | 12 + contrib/pageinspect/Makefile | 3 +- contrib/pageinspect/brinfuncs.c | 2 +- contrib/pageinspect/btreefuncs.c | 19 +- contrib/pageinspect/expected/btree.out | 16 +- contrib/pageinspect/expected/page.out | 23 +- contrib/pageinspect/fsmfuncs.c | 2 +- contrib/pageinspect/ginfuncs.c | 2 +- contrib/pageinspect/hashfuncs.c | 11 +- contrib/pageinspect/heapfuncs.c | 35 +- contrib/pageinspect/pageinspect--1.6--1.7.sql | 26 + contrib/pageinspect/pageinspect.control | 2 +- contrib/pageinspect/pageinspect.h | 2 +- contrib/pageinspect/rawpage.c | 12 +- contrib/pageinspect/sql/page.sql | 13 +- contrib/passwordcheck/.gitignore | 4 + contrib/passwordcheck/Makefile | 2 + .../passwordcheck/expected/passwordcheck.out | 19 + contrib/passwordcheck/passwordcheck.c | 4 +- contrib/passwordcheck/sql/passwordcheck.sql | 23 + contrib/pg_prewarm/Makefile | 4 +- contrib/pg_prewarm/autoprewarm.c | 931 + contrib/pg_prewarm/pg_prewarm--1.1--1.2.sql | 14 + contrib/pg_prewarm/pg_prewarm.c | 9 +- contrib/pg_prewarm/pg_prewarm.control | 2 +- contrib/pg_standby/pg_standby.c | 124 +- contrib/pg_stat_statements/Makefile | 7 +- .../pg_stat_statements--1.5--1.6.sql | 7 + .../pg_stat_statements/pg_stat_statements.c | 122 +- .../pg_stat_statements.control | 2 +- contrib/pg_trgm/Makefile | 5 +- .../pg_trgm/expected/pg_strict_word_trgm.out | 1025 + contrib/pg_trgm/expected/pg_trgm.out | 3 + contrib/pg_trgm/pg_trgm--1.3--1.4.sql | 68 + contrib/pg_trgm/pg_trgm.control | 2 +- contrib/pg_trgm/sql/pg_strict_word_trgm.sql | 42 + contrib/pg_trgm/sql/pg_trgm.sql | 4 + contrib/pg_trgm/trgm.h | 21 +- contrib/pg_trgm/trgm_gin.c | 9 +- contrib/pg_trgm/trgm_gist.c | 22 +- contrib/pg_trgm/trgm_op.c | 252 +- contrib/pg_trgm/trgm_regexp.c | 6 +- contrib/pg_visibility/pg_visibility.c | 2 +- contrib/pgcrypto/crypt-des.c | 47 +- .../pgcrypto/expected/pgp-compression_1.out | 16 +- contrib/pgcrypto/expected/pgp-decrypt_1.out | 8 +- contrib/pgcrypto/expected/pgp-encrypt_1.out | 92 +- .../expected/pgp-pubkey-encrypt_1.out | 24 +- contrib/pgcrypto/expected/rijndael.out | 2 +- contrib/pgcrypto/imath.c | 15 +- contrib/pgcrypto/openssl.c | 2 +- contrib/pgcrypto/pgcrypto.c | 4 +- contrib/pgcrypto/pgp-armor.c | 20 +- contrib/pgcrypto/px.c | 4 +- contrib/pgcrypto/rijndael.c | 2 +- contrib/pgcrypto/sql/rijndael.sql | 2 +- contrib/pgrowlocks/pgrowlocks.c | 2 +- contrib/pgstattuple/expected/pgstattuple.out | 17 +- contrib/pgstattuple/pgstatapprox.c | 40 +- contrib/pgstattuple/pgstatindex.c | 5 +- contrib/pgstattuple/pgstattuple.c | 7 +- contrib/pgstattuple/sql/pgstattuple.sql | 2 + contrib/postgres_fdw/Makefile | 2 +- contrib/postgres_fdw/connection.c | 14 +- contrib/postgres_fdw/deparse.c | 356 +- .../postgres_fdw/expected/postgres_fdw.out | 1818 +- contrib/postgres_fdw/option.c | 4 +- contrib/postgres_fdw/postgres_fdw.c | 1035 +- contrib/postgres_fdw/postgres_fdw.h | 16 +- contrib/postgres_fdw/shippable.c | 2 +- contrib/postgres_fdw/sql/postgres_fdw.sql | 624 +- contrib/seg/Makefile | 5 +- contrib/seg/expected/seg.out | 28 + contrib/seg/expected/seg_1.out | 1238 - contrib/seg/seg--1.1--1.2.sql | 14 + contrib/seg/seg--1.2--1.3.sql | 45 + contrib/seg/seg.c | 22 +- contrib/seg/seg.control | 2 +- contrib/seg/segdata.h | 2 +- contrib/seg/segparse.y | 4 +- contrib/seg/segscan.l | 4 + contrib/seg/sql/seg.sql | 9 + contrib/sepgsql/database.c | 2 +- contrib/sepgsql/dml.c | 9 +- contrib/sepgsql/expected/alter.out | 8 +- contrib/sepgsql/expected/misc.out | 67 +- contrib/sepgsql/hooks.c | 2 +- contrib/sepgsql/label.c | 10 +- contrib/sepgsql/launcher | 2 +- contrib/sepgsql/proc.c | 2 +- contrib/sepgsql/relation.c | 2 +- contrib/sepgsql/schema.c | 2 +- contrib/sepgsql/selinux.c | 2 +- contrib/sepgsql/sepgsql.h | 2 +- contrib/sepgsql/uavc.c | 2 +- contrib/spi/Makefile | 9 +- contrib/spi/refint.c | 11 +- contrib/spi/timetravel--1.0.sql | 19 - contrib/spi/timetravel--unpackaged--1.0.sql | 8 - contrib/spi/timetravel.c | 553 - contrib/spi/timetravel.control | 5 - contrib/spi/timetravel.example | 81 - contrib/start-scripts/freebsd | 4 +- contrib/start-scripts/linux | 4 +- contrib/start-scripts/macos/README | 24 + .../macos/org.postgresql.postgres.plist | 17 + .../start-scripts/macos/postgres-wrapper.sh | 25 + contrib/start-scripts/osx/PostgreSQL | 111 - contrib/start-scripts/osx/README | 3 - .../start-scripts/osx/StartupParameters.plist | 33 - contrib/start-scripts/osx/install.sh | 10 - contrib/tablefunc/tablefunc.c | 44 +- contrib/tablefunc/tablefunc.h | 2 +- contrib/tcn/tcn.c | 11 +- contrib/test_decoding/Makefile | 5 +- .../expected/concurrent_ddl_dml.out | 82 +- contrib/test_decoding/expected/ddl.out | 20 +- .../expected/decoding_into_rel.out | 25 + .../test_decoding/expected/oldest_xmin.out | 30 + .../test_decoding/expected/permissions.out | 4 +- contrib/test_decoding/expected/replorigin.out | 9 +- contrib/test_decoding/expected/rewrite.out | 75 + contrib/test_decoding/expected/slot.out | 49 + .../expected/snapshot_transfer.out | 49 + contrib/test_decoding/expected/truncate.out | 27 + .../specs/concurrent_ddl_dml.spec | 2 +- contrib/test_decoding/specs/oldest_xmin.spec | 42 + .../specs/snapshot_transfer.spec | 44 + contrib/test_decoding/sql/ddl.sql | 5 +- .../test_decoding/sql/decoding_into_rel.sql | 11 + contrib/test_decoding/sql/replorigin.sql | 5 + contrib/test_decoding/sql/rewrite.sql | 42 +- contrib/test_decoding/sql/slot.sql | 23 + contrib/test_decoding/sql/truncate.sql | 13 + contrib/test_decoding/test_decoding.c | 86 +- contrib/tsm_system_rows/tsm_system_rows.c | 2 +- contrib/tsm_system_time/tsm_system_time.c | 5 +- contrib/unaccent/generate_unaccent_rules.py | 19 +- contrib/unaccent/unaccent.c | 24 +- contrib/unaccent/unaccent.rules | 221 + contrib/uuid-ossp/uuid-ossp.c | 21 +- contrib/vacuumlo/.gitignore | 2 + contrib/vacuumlo/Makefile | 10 +- contrib/vacuumlo/t/001_basic.pl | 9 + contrib/vacuumlo/vacuumlo.c | 91 +- doc/bug.template | 2 +- doc/src/sgml/Makefile | 93 +- doc/src/sgml/acronyms.sgml | 140 +- doc/src/sgml/adminpack.sgml | 103 +- doc/src/sgml/advanced.sgml | 132 +- doc/src/sgml/amcheck.sgml | 199 +- doc/src/sgml/arch-dev.sgml | 78 +- doc/src/sgml/array.sgml | 148 +- doc/src/sgml/auth-delay.sgml | 6 +- doc/src/sgml/auto-explain.sgml | 63 +- doc/src/sgml/backup.sgml | 617 +- doc/src/sgml/bgworker.sgml | 112 +- doc/src/sgml/biblio.sgml | 42 +- doc/src/sgml/bki.sgml | 786 +- doc/src/sgml/bloom.sgml | 43 +- doc/src/sgml/brin.sgml | 174 +- doc/src/sgml/btree-gin.sgml | 19 +- doc/src/sgml/btree-gist.sgml | 36 +- doc/src/sgml/btree.sgml | 443 + doc/src/sgml/catalogs.sgml | 1476 +- doc/src/sgml/charset.sgml | 539 +- doc/src/sgml/chkpass.sgml | 95 - doc/src/sgml/citext.sgml | 130 +- doc/src/sgml/client-auth.sgml | 673 +- doc/src/sgml/config.sgml | 3376 +- doc/src/sgml/contacts.sgml | 26 - doc/src/sgml/contrib-spi.sgml | 149 +- doc/src/sgml/contrib.sgml | 43 +- doc/src/sgml/cube.sgml | 170 +- doc/src/sgml/custom-scan.sgml | 157 +- doc/src/sgml/datatype.sgml | 1071 +- doc/src/sgml/datetime.sgml | 80 +- doc/src/sgml/dblink.sgml | 352 +- doc/src/sgml/ddl.sgml | 1109 +- doc/src/sgml/dfunc.sgml | 42 +- doc/src/sgml/dict-int.sgml | 20 +- doc/src/sgml/dict-xsyn.sgml | 42 +- doc/src/sgml/diskusage.sgml | 26 +- doc/src/sgml/dml.sgml | 58 +- doc/src/sgml/docguide.sgml | 355 +- doc/src/sgml/earthdistance.sgml | 36 +- doc/src/sgml/ecpg.sgml | 1148 +- doc/src/sgml/errcodes.sgml | 26 +- doc/src/sgml/event-trigger.sgml | 123 +- doc/src/sgml/extend.sgml | 578 +- doc/src/sgml/external-projects.sgml | 30 +- doc/src/sgml/fdwhandler.sgml | 1234 +- doc/src/sgml/file-fdw.sgml | 58 +- doc/src/sgml/filelist.sgml | 15 +- doc/src/sgml/func.sgml | 3894 ++- doc/src/sgml/fuzzystrmatch.sgml | 34 +- doc/src/sgml/generate-errcodes-table.pl | 6 +- doc/src/sgml/generic-wal.sgml | 42 +- doc/src/sgml/geqo.sgml | 18 +- doc/src/sgml/gin.sgml | 328 +- doc/src/sgml/gist.sgml | 426 +- doc/src/sgml/high-availability.sgml | 706 +- doc/src/sgml/history.sgml | 24 +- doc/src/sgml/hstore.sgml | 160 +- doc/src/sgml/indexam.sgml | 525 +- doc/src/sgml/indices.sgml | 312 +- doc/src/sgml/info.sgml | 6 +- doc/src/sgml/information_schema.sgml | 433 +- doc/src/sgml/install-windows.sgml | 135 +- doc/src/sgml/installation.sgml | 913 +- doc/src/sgml/intagg.sgml | 28 +- doc/src/sgml/intarray.sgml | 72 +- doc/src/sgml/intro.sgml | 18 +- doc/src/sgml/isn.sgml | 46 +- doc/src/sgml/jit.sgml | 285 + doc/src/sgml/json.sgml | 212 +- doc/src/sgml/keywords.sgml | 39 +- doc/src/sgml/legal.sgml | 6 +- doc/src/sgml/libpq.sgml | 2007 +- doc/src/sgml/lo.sgml | 40 +- doc/src/sgml/lobj.sgml | 199 +- doc/src/sgml/logical-replication.sgml | 63 +- doc/src/sgml/logicaldecoding.sgml | 112 +- doc/src/sgml/ltree.sgml | 252 +- doc/src/sgml/maintenance.sgml | 460 +- doc/src/sgml/manage-ag.sgml | 226 +- doc/src/sgml/mk_feature_tables.pl | 4 +- doc/src/sgml/monitoring.sgml | 1681 +- doc/src/sgml/mvcc.sgml | 199 +- doc/src/sgml/nls.sgml | 28 +- doc/src/sgml/notation.sgml | 8 +- doc/src/sgml/oid2name.sgml | 121 +- doc/src/sgml/pageinspect.sgml | 52 +- doc/src/sgml/parallel.sgml | 249 +- doc/src/sgml/passwordcheck.sgml | 10 +- doc/src/sgml/perform.sgml | 426 +- doc/src/sgml/pgbuffercache.sgml | 18 +- doc/src/sgml/pgcrypto.sgml | 212 +- doc/src/sgml/pgfreespacemap.sgml | 8 +- doc/src/sgml/pgprewarm.sgml | 69 +- doc/src/sgml/pgrowlocks.sgml | 30 +- doc/src/sgml/pgstandby.sgml | 108 +- doc/src/sgml/pgstatstatements.sgml | 114 +- doc/src/sgml/pgstattuple.sgml | 48 +- doc/src/sgml/pgtrgm.sgml | 235 +- doc/src/sgml/pgvisibility.sgml | 12 +- doc/src/sgml/planstats.sgml | 58 +- doc/src/sgml/plhandler.sgml | 73 +- doc/src/sgml/plperl.sgml | 243 +- doc/src/sgml/plpgsql.sgml | 1845 +- doc/src/sgml/plpython.sgml | 208 +- doc/src/sgml/pltcl.sgml | 328 +- doc/src/sgml/postgres-fdw.sgml | 230 +- doc/src/sgml/postgres.sgml | 44 +- doc/src/sgml/problems.sgml | 22 +- doc/src/sgml/protocol.sgml | 910 +- doc/src/sgml/queries.sgml | 804 +- doc/src/sgml/query.sgml | 44 +- doc/src/sgml/rangetypes.sgml | 82 +- doc/src/sgml/recovery-config.sgml | 168 +- doc/src/sgml/ref/abort.sgml | 14 +- doc/src/sgml/ref/allfiles.sgml | 7 + doc/src/sgml/ref/alter_aggregate.sgml | 26 +- doc/src/sgml/ref/alter_collation.sgml | 10 +- doc/src/sgml/ref/alter_conversion.sgml | 8 +- doc/src/sgml/ref/alter_database.sgml | 48 +- .../sgml/ref/alter_default_privileges.sgml | 68 +- doc/src/sgml/ref/alter_domain.sgml | 94 +- doc/src/sgml/ref/alter_event_trigger.sgml | 22 +- doc/src/sgml/ref/alter_extension.sgml | 112 +- .../sgml/ref/alter_foreign_data_wrapper.sgml | 30 +- doc/src/sgml/ref/alter_foreign_table.sgml | 172 +- doc/src/sgml/ref/alter_function.sgml | 58 +- doc/src/sgml/ref/alter_group.sgml | 36 +- doc/src/sgml/ref/alter_index.sgml | 109 +- doc/src/sgml/ref/alter_language.sgml | 6 +- doc/src/sgml/ref/alter_large_object.sgml | 6 +- doc/src/sgml/ref/alter_materialized_view.sgml | 56 +- doc/src/sgml/ref/alter_opclass.sgml | 10 +- doc/src/sgml/ref/alter_operator.sgml | 12 +- doc/src/sgml/ref/alter_opfamily.sgml | 52 +- doc/src/sgml/ref/alter_policy.sgml | 12 +- doc/src/sgml/ref/alter_procedure.sgml | 281 + doc/src/sgml/ref/alter_publication.sgml | 34 +- doc/src/sgml/ref/alter_role.sgml | 94 +- doc/src/sgml/ref/alter_routine.sgml | 102 + doc/src/sgml/ref/alter_rule.sgml | 14 +- doc/src/sgml/ref/alter_schema.sgml | 8 +- doc/src/sgml/ref/alter_sequence.sgml | 46 +- doc/src/sgml/ref/alter_server.sgml | 28 +- doc/src/sgml/ref/alter_statistics.sgml | 14 +- doc/src/sgml/ref/alter_subscription.sgml | 38 +- doc/src/sgml/ref/alter_system.sgml | 28 +- doc/src/sgml/ref/alter_table.sgml | 557 +- doc/src/sgml/ref/alter_tablespace.sgml | 20 +- doc/src/sgml/ref/alter_trigger.sgml | 22 +- doc/src/sgml/ref/alter_tsconfig.sgml | 26 +- doc/src/sgml/ref/alter_tsdictionary.sgml | 14 +- doc/src/sgml/ref/alter_tsparser.sgml | 8 +- doc/src/sgml/ref/alter_tstemplate.sgml | 8 +- doc/src/sgml/ref/alter_type.sgml | 69 +- doc/src/sgml/ref/alter_user.sgml | 30 +- doc/src/sgml/ref/alter_user_mapping.sgml | 24 +- doc/src/sgml/ref/alter_view.sgml | 24 +- doc/src/sgml/ref/analyze.sgml | 89 +- doc/src/sgml/ref/begin.sgml | 32 +- doc/src/sgml/ref/call.sgml | 115 + doc/src/sgml/ref/checkpoint.sgml | 6 +- doc/src/sgml/ref/close.sgml | 18 +- doc/src/sgml/ref/cluster.sgml | 28 +- doc/src/sgml/ref/clusterdb.sgml | 74 +- doc/src/sgml/ref/comment.sgml | 123 +- doc/src/sgml/ref/commit.sgml | 10 +- doc/src/sgml/ref/commit_prepared.sgml | 12 +- doc/src/sgml/ref/copy.sgml | 249 +- doc/src/sgml/ref/create_access_method.sgml | 16 +- doc/src/sgml/ref/create_aggregate.sgml | 397 +- doc/src/sgml/ref/create_cast.sgml | 92 +- doc/src/sgml/ref/create_collation.sgml | 45 +- doc/src/sgml/ref/create_conversion.sgml | 14 +- doc/src/sgml/ref/create_database.sgml | 76 +- doc/src/sgml/ref/create_domain.sgml | 46 +- doc/src/sgml/ref/create_event_trigger.sgml | 38 +- doc/src/sgml/ref/create_extension.sgml | 46 +- .../sgml/ref/create_foreign_data_wrapper.sgml | 28 +- doc/src/sgml/ref/create_foreign_table.sgml | 112 +- doc/src/sgml/ref/create_function.sgml | 222 +- doc/src/sgml/ref/create_group.sgml | 26 +- doc/src/sgml/ref/create_index.sgml | 349 +- doc/src/sgml/ref/create_language.sgml | 66 +- .../sgml/ref/create_materialized_view.sgml | 42 +- doc/src/sgml/ref/create_opclass.sgml | 58 +- doc/src/sgml/ref/create_operator.sgml | 60 +- doc/src/sgml/ref/create_opfamily.sgml | 18 +- doc/src/sgml/ref/create_policy.sgml | 342 +- doc/src/sgml/ref/create_procedure.sgml | 357 + doc/src/sgml/ref/create_publication.sgml | 32 +- doc/src/sgml/ref/create_role.sgml | 154 +- doc/src/sgml/ref/create_rule.sgml | 44 +- doc/src/sgml/ref/create_schema.sgml | 50 +- doc/src/sgml/ref/create_sequence.sgml | 38 +- doc/src/sgml/ref/create_server.sgml | 32 +- doc/src/sgml/ref/create_statistics.sgml | 48 +- doc/src/sgml/ref/create_subscription.sgml | 42 +- doc/src/sgml/ref/create_table.sgml | 1067 +- doc/src/sgml/ref/create_table_as.sgml | 90 +- doc/src/sgml/ref/create_tablespace.sgml | 55 +- doc/src/sgml/ref/create_transform.sgml | 16 +- doc/src/sgml/ref/create_trigger.sgml | 353 +- doc/src/sgml/ref/create_tsconfig.sgml | 10 +- doc/src/sgml/ref/create_tsdictionary.sgml | 8 +- doc/src/sgml/ref/create_tsparser.sgml | 8 +- doc/src/sgml/ref/create_tstemplate.sgml | 10 +- doc/src/sgml/ref/create_type.sgml | 138 +- doc/src/sgml/ref/create_user.sgml | 32 +- doc/src/sgml/ref/create_user_mapping.sgml | 26 +- doc/src/sgml/ref/create_view.sgml | 191 +- doc/src/sgml/ref/createdb.sgml | 86 +- doc/src/sgml/ref/createuser.sgml | 132 +- doc/src/sgml/ref/deallocate.sgml | 8 +- doc/src/sgml/ref/declare.sgml | 89 +- doc/src/sgml/ref/delete.sgml | 92 +- doc/src/sgml/ref/discard.sgml | 14 +- doc/src/sgml/ref/do.sgml | 35 +- doc/src/sgml/ref/drop_access_method.sgml | 8 +- doc/src/sgml/ref/drop_aggregate.sgml | 20 +- doc/src/sgml/ref/drop_cast.sgml | 4 +- doc/src/sgml/ref/drop_collation.sgml | 12 +- doc/src/sgml/ref/drop_conversion.sgml | 8 +- doc/src/sgml/ref/drop_database.sgml | 12 +- doc/src/sgml/ref/drop_domain.sgml | 24 +- doc/src/sgml/ref/drop_event_trigger.sgml | 12 +- doc/src/sgml/ref/drop_extension.sgml | 18 +- .../sgml/ref/drop_foreign_data_wrapper.sgml | 14 +- doc/src/sgml/ref/drop_foreign_table.sgml | 14 +- doc/src/sgml/ref/drop_function.sgml | 26 +- doc/src/sgml/ref/drop_group.sgml | 8 +- doc/src/sgml/ref/drop_index.sgml | 22 +- doc/src/sgml/ref/drop_language.sgml | 18 +- doc/src/sgml/ref/drop_materialized_view.sgml | 14 +- doc/src/sgml/ref/drop_opclass.sgml | 24 +- doc/src/sgml/ref/drop_operator.sgml | 10 +- doc/src/sgml/ref/drop_opfamily.sgml | 20 +- doc/src/sgml/ref/drop_owned.sgml | 18 +- doc/src/sgml/ref/drop_policy.sgml | 6 +- doc/src/sgml/ref/drop_procedure.sgml | 162 + doc/src/sgml/ref/drop_publication.sgml | 10 +- doc/src/sgml/ref/drop_role.sgml | 22 +- doc/src/sgml/ref/drop_routine.sgml | 94 + doc/src/sgml/ref/drop_rule.sgml | 10 +- doc/src/sgml/ref/drop_schema.sgml | 14 +- doc/src/sgml/ref/drop_sequence.sgml | 14 +- doc/src/sgml/ref/drop_server.sgml | 14 +- doc/src/sgml/ref/drop_statistics.sgml | 10 +- doc/src/sgml/ref/drop_subscription.sgml | 15 +- doc/src/sgml/ref/drop_table.sgml | 22 +- doc/src/sgml/ref/drop_tablespace.sgml | 18 +- doc/src/sgml/ref/drop_transform.sgml | 8 +- doc/src/sgml/ref/drop_trigger.sgml | 16 +- doc/src/sgml/ref/drop_tsconfig.sgml | 14 +- doc/src/sgml/ref/drop_tsdictionary.sgml | 12 +- doc/src/sgml/ref/drop_tsparser.sgml | 12 +- doc/src/sgml/ref/drop_tstemplate.sgml | 12 +- doc/src/sgml/ref/drop_type.sgml | 22 +- doc/src/sgml/ref/drop_user.sgml | 8 +- doc/src/sgml/ref/drop_user_mapping.sgml | 20 +- doc/src/sgml/ref/drop_view.sgml | 14 +- doc/src/sgml/ref/dropdb.sgml | 62 +- doc/src/sgml/ref/dropuser.sgml | 60 +- doc/src/sgml/ref/ecpg-ref.sgml | 18 +- doc/src/sgml/ref/end.sgml | 16 +- doc/src/sgml/ref/execute.sgml | 22 +- doc/src/sgml/ref/explain.sgml | 26 +- doc/src/sgml/ref/fetch.sgml | 114 +- doc/src/sgml/ref/grant.sgml | 223 +- doc/src/sgml/ref/import_foreign_schema.sgml | 48 +- doc/src/sgml/ref/initdb.sgml | 103 +- doc/src/sgml/ref/insert.sgml | 201 +- doc/src/sgml/ref/listen.sgml | 22 +- doc/src/sgml/ref/load.sgml | 26 +- doc/src/sgml/ref/lock.sgml | 110 +- doc/src/sgml/ref/move.sgml | 26 +- doc/src/sgml/ref/notify.sgml | 26 +- doc/src/sgml/ref/pg_basebackup.sgml | 203 +- doc/src/sgml/ref/pg_config-ref.sgml | 100 +- doc/src/sgml/ref/pg_controldata.sgml | 20 +- doc/src/sgml/ref/pg_ctl-ref.sgml | 100 +- doc/src/sgml/ref/pg_dump.sgml | 418 +- doc/src/sgml/ref/pg_dumpall.sgml | 231 +- doc/src/sgml/ref/pg_isready.sgml | 36 +- doc/src/sgml/ref/pg_receivewal.sgml | 93 +- doc/src/sgml/ref/pg_recvlogical.sgml | 57 +- doc/src/sgml/ref/pg_resetwal.sgml | 139 +- doc/src/sgml/ref/pg_restore.sgml | 204 +- doc/src/sgml/ref/pg_rewind.sgml | 102 +- doc/src/sgml/ref/pg_verify_checksums.sgml | 122 + doc/src/sgml/ref/pg_waldump.sgml | 20 +- doc/src/sgml/ref/pgarchivecleanup.sgml | 56 +- doc/src/sgml/ref/pgbench.sgml | 1068 +- doc/src/sgml/ref/pgtestfsync.sgml | 22 +- doc/src/sgml/ref/pgtesttiming.sgml | 24 +- doc/src/sgml/ref/pgupgrade.sgml | 362 +- doc/src/sgml/ref/postgres-ref.sgml | 128 +- doc/src/sgml/ref/postmaster.sgml | 4 +- doc/src/sgml/ref/prepare.sgml | 48 +- doc/src/sgml/ref/prepare_transaction.sgml | 51 +- doc/src/sgml/ref/psql-ref.sgml | 1060 +- doc/src/sgml/ref/reassign_owned.sgml | 30 +- .../sgml/ref/refresh_materialized_view.sgml | 21 +- doc/src/sgml/ref/reindex.sgml | 62 +- doc/src/sgml/ref/reindexdb.sgml | 94 +- doc/src/sgml/ref/release_savepoint.sgml | 16 +- doc/src/sgml/ref/reset.sgml | 28 +- doc/src/sgml/ref/revoke.sgml | 114 +- doc/src/sgml/ref/rollback.sgml | 12 +- doc/src/sgml/ref/rollback_prepared.sgml | 12 +- doc/src/sgml/ref/rollback_to.sgml | 42 +- doc/src/sgml/ref/savepoint.sgml | 24 +- doc/src/sgml/ref/security_label.sgml | 80 +- doc/src/sgml/ref/select.sgml | 789 +- doc/src/sgml/ref/select_into.sgml | 20 +- doc/src/sgml/ref/set.sgml | 70 +- doc/src/sgml/ref/set_constraints.sgml | 16 +- doc/src/sgml/ref/set_role.sgml | 46 +- doc/src/sgml/ref/set_session_auth.sgml | 24 +- doc/src/sgml/ref/set_transaction.sgml | 26 +- doc/src/sgml/ref/show.sgml | 20 +- doc/src/sgml/ref/start_transaction.sgml | 26 +- doc/src/sgml/ref/truncate.sgml | 48 +- doc/src/sgml/ref/unlisten.sgml | 16 +- doc/src/sgml/ref/update.sgml | 146 +- doc/src/sgml/ref/vacuum.sgml | 103 +- doc/src/sgml/ref/vacuumdb.sgml | 68 +- doc/src/sgml/ref/values.sgml | 84 +- doc/src/sgml/reference.sgml | 9 +- doc/src/sgml/regress.sgml | 206 +- doc/src/sgml/release-10.sgml | 7475 ++++- doc/src/sgml/release-11.sgml | 3946 +++ doc/src/sgml/release-12.sgml | 11 + doc/src/sgml/release-7.4.sgml | 804 +- doc/src/sgml/release-8.0.sgml | 1364 +- doc/src/sgml/release-8.1.sgml | 1432 +- doc/src/sgml/release-8.2.sgml | 1702 +- doc/src/sgml/release-8.3.sgml | 1772 +- doc/src/sgml/release-8.4.sgml | 2554 +- doc/src/sgml/release-9.0.sgml | 2686 +- doc/src/sgml/release-9.1.sgml | 2800 +- doc/src/sgml/release-9.2.sgml | 3156 +- doc/src/sgml/release-9.3.sgml | 4689 ++- doc/src/sgml/release-9.4.sgml | 4645 ++- doc/src/sgml/release-9.5.sgml | 4557 ++- doc/src/sgml/release-9.6.sgml | 4777 ++- doc/src/sgml/release-old.sgml | 436 +- doc/src/sgml/release.sgml | 18 +- doc/src/sgml/replication-origins.sgml | 2 +- doc/src/sgml/rowtypes.sgml | 163 +- doc/src/sgml/rules.sgml | 348 +- doc/src/sgml/runtime.sgml | 983 +- doc/src/sgml/seg.sgml | 76 +- doc/src/sgml/sepgsql.sgml | 216 +- doc/src/sgml/sourcerepo.sgml | 37 +- doc/src/sgml/sources.sgml | 233 +- doc/src/sgml/spgist.sgml | 641 +- doc/src/sgml/spi.sgml | 570 +- doc/src/sgml/sslinfo.sgml | 22 +- doc/src/sgml/standalone-install.sgml | 28 - doc/src/sgml/standalone-install.xml | 167 + doc/src/sgml/standalone-profile.xsl | 81 + doc/src/sgml/start.sgml | 32 +- doc/src/sgml/storage.sgml | 373 +- doc/src/sgml/stylesheet-common.xsl | 1 + doc/src/sgml/stylesheet-html-common.xsl | 25 + doc/src/sgml/stylesheet-man.xsl | 10 +- doc/src/sgml/syntax.sgml | 568 +- doc/src/sgml/tablefunc.sgml | 174 +- doc/src/sgml/tablesample-method.sgml | 130 +- doc/src/sgml/tcn.sgml | 10 +- doc/src/sgml/test-decoding.sgml | 6 +- doc/src/sgml/textsearch.sgml | 926 +- doc/src/sgml/trigger.sgml | 323 +- doc/src/sgml/tsm-system-rows.sgml | 8 +- doc/src/sgml/tsm-system-time.sgml | 8 +- doc/src/sgml/typeconv.sgml | 277 +- doc/src/sgml/unaccent.sgml | 50 +- doc/src/sgml/user-manag.sgml | 215 +- doc/src/sgml/uuid-ossp.sgml | 32 +- doc/src/sgml/vacuumlo.sgml | 77 +- doc/src/sgml/wal.sgml | 215 +- doc/src/sgml/xaggr.sgml | 188 +- doc/src/sgml/xfunc.sgml | 812 +- doc/src/sgml/xindex.sgml | 347 +- doc/src/sgml/xml2.sgml | 62 +- doc/src/sgml/xoper.sgml | 161 +- doc/src/sgml/xplang.sgml | 48 +- doc/src/sgml/xtypes.sgml | 78 +- src/Makefile | 5 + src/Makefile.global.in | 229 +- src/Makefile.shlib | 68 +- src/backend/Makefile | 134 +- src/backend/access/brin/brin.c | 173 +- src/backend/access/brin/brin_inclusion.c | 20 +- src/backend/access/brin/brin_minmax.c | 8 +- src/backend/access/brin/brin_pageops.c | 177 +- src/backend/access/brin/brin_revmap.c | 19 +- src/backend/access/brin/brin_tuple.c | 4 +- src/backend/access/brin/brin_validate.c | 2 +- src/backend/access/brin/brin_xlog.c | 25 +- src/backend/access/common/Makefile | 2 +- src/backend/access/common/bufmask.c | 12 +- src/backend/access/common/heaptuple.c | 867 +- src/backend/access/common/indextuple.c | 111 +- src/backend/access/common/printsimple.c | 24 +- src/backend/access/common/printtup.c | 212 +- src/backend/access/common/reloptions.c | 107 +- src/backend/access/common/scankey.c | 2 +- src/backend/access/common/session.c | 208 + src/backend/access/common/tupconvert.c | 249 +- src/backend/access/common/tupdesc.c | 254 +- src/backend/access/gin/README | 34 + src/backend/access/gin/ginarrayproc.c | 2 +- src/backend/access/gin/ginbtree.c | 33 +- src/backend/access/gin/ginbulk.c | 43 +- src/backend/access/gin/gindatapage.c | 47 +- src/backend/access/gin/ginentrypage.c | 24 +- src/backend/access/gin/ginfast.c | 63 +- src/backend/access/gin/ginget.c | 114 +- src/backend/access/gin/gininsert.c | 28 +- src/backend/access/gin/ginlogic.c | 2 +- src/backend/access/gin/ginpostinglist.c | 2 +- src/backend/access/gin/ginscan.c | 21 +- src/backend/access/gin/ginutil.c | 58 +- src/backend/access/gin/ginvacuum.c | 55 +- src/backend/access/gin/ginvalidate.c | 4 +- src/backend/access/gin/ginxlog.c | 136 +- src/backend/access/gist/gist.c | 49 +- src/backend/access/gist/gistbuild.c | 10 +- src/backend/access/gist/gistbuildbuffers.c | 2 +- src/backend/access/gist/gistget.c | 65 +- src/backend/access/gist/gistproc.c | 207 +- src/backend/access/gist/gistscan.c | 2 +- src/backend/access/gist/gistsplit.c | 40 +- src/backend/access/gist/gistutil.c | 108 +- src/backend/access/gist/gistvacuum.c | 23 +- src/backend/access/gist/gistvalidate.c | 7 +- src/backend/access/gist/gistxlog.c | 6 +- src/backend/access/hash/README | 85 +- src/backend/access/hash/hash.c | 172 +- src/backend/access/hash/hash_xlog.c | 8 +- src/backend/access/hash/hashfunc.c | 426 +- src/backend/access/hash/hashinsert.c | 11 +- src/backend/access/hash/hashovfl.c | 7 +- src/backend/access/hash/hashpage.c | 40 +- src/backend/access/hash/hashsearch.c | 570 +- src/backend/access/hash/hashsort.c | 3 +- src/backend/access/hash/hashutil.c | 66 +- src/backend/access/hash/hashvalidate.c | 49 +- src/backend/access/heap/README.tuplock | 8 +- src/backend/access/heap/heapam.c | 548 +- src/backend/access/heap/hio.c | 50 +- src/backend/access/heap/pruneheap.c | 12 +- src/backend/access/heap/rewriteheap.c | 43 +- src/backend/access/heap/syncscan.c | 2 +- src/backend/access/heap/tuptoaster.c | 50 +- src/backend/access/heap/visibilitymap.c | 34 +- src/backend/access/index/amapi.c | 2 +- src/backend/access/index/amvalidate.c | 8 +- src/backend/access/index/genam.c | 37 +- src/backend/access/index/indexam.c | 78 +- src/backend/access/nbtree/README | 89 +- src/backend/access/nbtree/nbtcompare.c | 79 +- src/backend/access/nbtree/nbtinsert.c | 341 +- src/backend/access/nbtree/nbtpage.c | 285 +- src/backend/access/nbtree/nbtree.c | 276 +- src/backend/access/nbtree/nbtsearch.c | 86 +- src/backend/access/nbtree/nbtsort.c | 979 +- src/backend/access/nbtree/nbtutils.c | 181 +- src/backend/access/nbtree/nbtvalidate.c | 34 +- src/backend/access/nbtree/nbtxlog.c | 90 +- src/backend/access/rmgrdesc/brindesc.c | 2 +- src/backend/access/rmgrdesc/clogdesc.c | 2 +- src/backend/access/rmgrdesc/committsdesc.c | 2 +- src/backend/access/rmgrdesc/dbasedesc.c | 2 +- src/backend/access/rmgrdesc/genericdesc.c | 2 +- src/backend/access/rmgrdesc/gindesc.c | 2 +- src/backend/access/rmgrdesc/gistdesc.c | 2 +- src/backend/access/rmgrdesc/hashdesc.c | 2 +- src/backend/access/rmgrdesc/heapdesc.c | 18 +- src/backend/access/rmgrdesc/logicalmsgdesc.c | 2 +- src/backend/access/rmgrdesc/mxactdesc.c | 2 +- src/backend/access/rmgrdesc/nbtdesc.c | 22 +- src/backend/access/rmgrdesc/relmapdesc.c | 2 +- src/backend/access/rmgrdesc/replorigindesc.c | 2 +- src/backend/access/rmgrdesc/seqdesc.c | 2 +- src/backend/access/rmgrdesc/smgrdesc.c | 3 +- src/backend/access/rmgrdesc/spgdesc.c | 2 +- src/backend/access/rmgrdesc/standbydesc.c | 2 +- src/backend/access/rmgrdesc/tblspcdesc.c | 2 +- src/backend/access/rmgrdesc/xactdesc.c | 44 +- src/backend/access/rmgrdesc/xlogdesc.c | 2 +- src/backend/access/spgist/Makefile | 3 +- src/backend/access/spgist/README | 6 +- src/backend/access/spgist/spgdoinsert.c | 44 +- src/backend/access/spgist/spginsert.c | 17 +- src/backend/access/spgist/spgkdtreeproc.c | 82 +- src/backend/access/spgist/spgproc.c | 88 + src/backend/access/spgist/spgquadtreeproc.c | 112 +- src/backend/access/spgist/spgscan.c | 909 +- src/backend/access/spgist/spgtextproc.c | 63 +- src/backend/access/spgist/spgutils.c | 142 +- src/backend/access/spgist/spgvacuum.c | 25 +- src/backend/access/spgist/spgvalidate.c | 81 +- src/backend/access/spgist/spgxlog.c | 11 +- src/backend/access/tablesample/bernoulli.c | 5 +- src/backend/access/tablesample/system.c | 5 +- src/backend/access/tablesample/tablesample.c | 2 +- src/backend/access/transam/README | 4 +- src/backend/access/transam/README.parallel | 13 +- src/backend/access/transam/clog.c | 280 +- src/backend/access/transam/commit_ts.c | 16 +- src/backend/access/transam/generic_xlog.c | 25 +- src/backend/access/transam/multixact.c | 22 +- src/backend/access/transam/parallel.c | 460 +- src/backend/access/transam/slru.c | 15 +- src/backend/access/transam/subtrans.c | 2 +- src/backend/access/transam/timeline.c | 10 +- src/backend/access/transam/transam.c | 2 +- src/backend/access/transam/twophase.c | 305 +- src/backend/access/transam/twophase_rmgr.c | 2 +- src/backend/access/transam/varsup.c | 74 +- src/backend/access/transam/xact.c | 515 +- src/backend/access/transam/xlog.c | 1284 +- src/backend/access/transam/xlogarchive.c | 37 +- src/backend/access/transam/xlogfuncs.c | 103 +- src/backend/access/transam/xloginsert.c | 26 +- src/backend/access/transam/xlogreader.c | 119 +- src/backend/access/transam/xlogutils.c | 47 +- src/backend/bootstrap/bootparse.y | 62 +- src/backend/bootstrap/bootscanner.l | 100 +- src/backend/bootstrap/bootstrap.c | 98 +- src/backend/catalog/.gitignore | 2 + src/backend/catalog/Catalog.pm | 611 +- src/backend/catalog/Makefile | 99 +- src/backend/catalog/README | 111 - src/backend/catalog/aclchk.c | 838 +- src/backend/catalog/catalog.c | 52 +- src/backend/catalog/dependency.c | 95 +- src/backend/catalog/genbki.pl | 896 +- src/backend/catalog/heap.c | 762 +- src/backend/catalog/index.c | 933 +- src/backend/catalog/indexing.c | 7 +- src/backend/catalog/information_schema.sql | 47 +- src/backend/catalog/namespace.c | 133 +- src/backend/catalog/objectaccess.c | 2 +- src/backend/catalog/objectaddress.c | 436 +- src/backend/catalog/partition.c | 2394 +- src/backend/catalog/pg_aggregate.c | 39 +- src/backend/catalog/pg_collation.c | 3 +- src/backend/catalog/pg_constraint.c | 773 +- src/backend/catalog/pg_conversion.c | 3 +- src/backend/catalog/pg_db_role_setting.c | 2 +- src/backend/catalog/pg_depend.c | 17 +- src/backend/catalog/pg_enum.c | 141 +- src/backend/catalog/pg_inherits.c | 92 +- src/backend/catalog/pg_largeobject.c | 2 +- src/backend/catalog/pg_namespace.c | 7 +- src/backend/catalog/pg_operator.c | 15 +- src/backend/catalog/pg_proc.c | 122 +- src/backend/catalog/pg_publication.c | 23 +- src/backend/catalog/pg_range.c | 2 +- src/backend/catalog/pg_shdepend.c | 2 +- src/backend/catalog/pg_subscription.c | 142 +- src/backend/catalog/pg_type.c | 172 +- src/backend/catalog/sql_features.txt | 8 +- src/backend/catalog/storage.c | 3 +- src/backend/catalog/system_views.sql | 52 +- src/backend/catalog/toasting.c | 42 +- src/backend/commands/aggregatecmds.c | 99 +- src/backend/commands/alter.c | 26 +- src/backend/commands/amcmds.c | 2 +- src/backend/commands/analyze.c | 332 +- src/backend/commands/async.c | 43 +- src/backend/commands/cluster.c | 83 +- src/backend/commands/collationcmds.c | 105 +- src/backend/commands/comment.c | 6 +- src/backend/commands/constraint.c | 4 +- src/backend/commands/conversioncmds.c | 7 +- src/backend/commands/copy.c | 788 +- src/backend/commands/createas.c | 9 +- src/backend/commands/dbcommands.c | 30 +- src/backend/commands/define.c | 2 +- src/backend/commands/discard.c | 4 +- src/backend/commands/dropcmds.c | 42 +- src/backend/commands/event_trigger.c | 244 +- src/backend/commands/explain.c | 635 +- src/backend/commands/extension.c | 20 +- src/backend/commands/foreigncmds.c | 26 +- src/backend/commands/functioncmds.c | 446 +- src/backend/commands/indexcmds.c | 766 +- src/backend/commands/lockcmds.c | 156 +- src/backend/commands/matview.c | 196 +- src/backend/commands/opclasscmds.c | 101 +- src/backend/commands/operatorcmds.c | 77 +- src/backend/commands/policy.c | 36 +- src/backend/commands/portalcmds.c | 44 +- src/backend/commands/prepare.c | 7 +- src/backend/commands/proclang.c | 18 +- src/backend/commands/publicationcmds.c | 36 +- src/backend/commands/schemacmds.c | 12 +- src/backend/commands/seclabel.c | 6 +- src/backend/commands/sequence.c | 43 +- src/backend/commands/statscmds.c | 183 +- src/backend/commands/subscriptioncmds.c | 53 +- src/backend/commands/tablecmds.c | 2951 +- src/backend/commands/tablespace.c | 32 +- src/backend/commands/trigger.c | 1401 +- src/backend/commands/tsearchcmds.c | 33 +- src/backend/commands/typecmds.c | 299 +- src/backend/commands/user.c | 4 +- src/backend/commands/vacuum.c | 641 +- src/backend/commands/vacuumlazy.c | 180 +- src/backend/commands/variable.c | 59 +- src/backend/commands/view.c | 24 +- src/backend/common.mk | 13 +- src/backend/executor/Makefile | 2 +- src/backend/executor/README | 4 +- src/backend/executor/execAmi.c | 2 +- src/backend/executor/execCurrent.c | 163 +- src/backend/executor/execExpr.c | 915 +- src/backend/executor/execExprInterp.c | 1087 +- src/backend/executor/execGrouping.c | 262 +- src/backend/executor/execIndexing.c | 18 +- src/backend/executor/execJunk.c | 4 +- src/backend/executor/execMain.c | 873 +- src/backend/executor/execParallel.c | 834 +- src/backend/executor/execPartition.c | 1874 ++ src/backend/executor/execProcnode.c | 179 +- src/backend/executor/execReplication.c | 60 +- src/backend/executor/execSRF.c | 42 +- src/backend/executor/execScan.c | 97 +- src/backend/executor/execTuples.c | 649 +- src/backend/executor/execUtils.c | 386 +- src/backend/executor/functions.c | 117 +- src/backend/executor/instrument.c | 5 +- src/backend/executor/nodeAgg.c | 1304 +- src/backend/executor/nodeAppend.c | 613 +- src/backend/executor/nodeBitmapAnd.c | 16 +- src/backend/executor/nodeBitmapHeapscan.c | 292 +- src/backend/executor/nodeBitmapIndexscan.c | 20 +- src/backend/executor/nodeBitmapOr.c | 16 +- src/backend/executor/nodeCtescan.c | 47 +- src/backend/executor/nodeCustom.c | 50 +- src/backend/executor/nodeForeignscan.c | 77 +- src/backend/executor/nodeFunctionscan.c | 41 +- src/backend/executor/nodeGather.c | 197 +- src/backend/executor/nodeGatherMerge.c | 483 +- src/backend/executor/nodeGroup.c | 59 +- src/backend/executor/nodeHash.c | 1902 +- src/backend/executor/nodeHashjoin.c | 668 +- src/backend/executor/nodeIndexonlyscan.c | 145 +- src/backend/executor/nodeIndexscan.c | 206 +- src/backend/executor/nodeLimit.c | 97 +- src/backend/executor/nodeLockRows.c | 18 +- src/backend/executor/nodeMaterial.c | 24 +- src/backend/executor/nodeMergeAppend.c | 157 +- src/backend/executor/nodeMergejoin.c | 62 +- src/backend/executor/nodeModifyTable.c | 1118 +- .../executor/nodeNamedtuplestorescan.c | 32 +- src/backend/executor/nodeNestloop.c | 31 +- src/backend/executor/nodeProjectSet.c | 54 +- src/backend/executor/nodeRecursiveunion.c | 27 +- src/backend/executor/nodeResult.c | 27 +- src/backend/executor/nodeSamplescan.c | 83 +- src/backend/executor/nodeSeqscan.c | 104 +- src/backend/executor/nodeSetOp.c | 74 +- src/backend/executor/nodeSort.c | 121 +- src/backend/executor/nodeSubplan.c | 188 +- src/backend/executor/nodeSubqueryscan.c | 33 +- src/backend/executor/nodeTableFuncscan.c | 84 +- src/backend/executor/nodeTidscan.c | 57 +- src/backend/executor/nodeUnique.c | 44 +- src/backend/executor/nodeValuesscan.c | 70 +- src/backend/executor/nodeWindowAgg.c | 1189 +- src/backend/executor/nodeWorktablescan.c | 24 +- src/backend/executor/spi.c | 300 +- src/backend/executor/tqueue.c | 1136 +- src/backend/executor/tstoreReceiver.c | 15 +- src/backend/foreign/foreign.c | 12 +- src/backend/jit/Makefile | 22 + src/backend/jit/README | 295 + src/backend/jit/jit.c | 211 + src/backend/jit/llvm/Makefile | 62 + src/backend/jit/llvm/llvmjit.c | 914 + src/backend/jit/llvm/llvmjit_deform.c | 707 + src/backend/jit/llvm/llvmjit_error.cpp | 141 + src/backend/jit/llvm/llvmjit_expr.c | 2687 ++ src/backend/jit/llvm/llvmjit_inline.cpp | 876 + src/backend/jit/llvm/llvmjit_types.c | 107 + src/backend/jit/llvm/llvmjit_wrap.cpp | 46 + src/backend/lib/Makefile | 4 +- src/backend/lib/README | 12 +- src/backend/lib/binaryheap.c | 2 +- src/backend/lib/bipartite_match.c | 4 +- src/backend/lib/bloomfilter.c | 306 + src/backend/lib/dshash.c | 899 + src/backend/lib/hyperloglog.c | 2 +- src/backend/lib/ilist.c | 2 +- src/backend/lib/knapsack.c | 7 +- src/backend/lib/pairingheap.c | 2 +- src/backend/lib/rbtree.c | 480 +- src/backend/lib/stringinfo.c | 29 +- src/backend/libpq/Makefile | 2 +- src/backend/libpq/README.SSL | 22 + src/backend/libpq/auth-scram.c | 311 +- src/backend/libpq/auth.c | 306 +- src/backend/libpq/be-fsstubs.c | 121 +- src/backend/libpq/be-secure-common.c | 194 + src/backend/libpq/be-secure-openssl.c | 290 +- src/backend/libpq/be-secure.c | 31 +- src/backend/libpq/crypt.c | 2 +- src/backend/libpq/hba.c | 87 +- src/backend/libpq/ifaddr.c | 8 +- src/backend/libpq/pqcomm.c | 14 +- src/backend/libpq/pqformat.c | 128 +- src/backend/libpq/pqmq.c | 20 +- src/backend/libpq/pqsignal.c | 2 +- src/backend/main/main.c | 18 +- src/backend/nls.mk | 2 +- src/backend/nodes/bitmapset.c | 219 +- src/backend/nodes/copyfuncs.c | 170 +- src/backend/nodes/equalfuncs.c | 63 +- src/backend/nodes/extensible.c | 2 +- src/backend/nodes/list.c | 71 +- src/backend/nodes/makefuncs.c | 24 +- src/backend/nodes/nodeFuncs.c | 83 +- src/backend/nodes/nodes.c | 2 +- src/backend/nodes/outfuncs.c | 183 +- src/backend/nodes/params.c | 66 +- src/backend/nodes/print.c | 2 +- src/backend/nodes/read.c | 92 +- src/backend/nodes/readfuncs.c | 177 +- src/backend/nodes/tidbitmap.c | 61 +- src/backend/nodes/value.c | 4 +- src/backend/optimizer/README | 74 +- src/backend/optimizer/geqo/geqo_copy.c | 2 +- src/backend/optimizer/geqo/geqo_eval.c | 26 +- src/backend/optimizer/geqo/geqo_main.c | 2 +- src/backend/optimizer/geqo/geqo_misc.c | 4 +- src/backend/optimizer/geqo/geqo_pool.c | 2 +- src/backend/optimizer/geqo/geqo_random.c | 2 +- src/backend/optimizer/geqo/geqo_selection.c | 2 +- src/backend/optimizer/path/allpaths.c | 721 +- src/backend/optimizer/path/clausesel.c | 16 +- src/backend/optimizer/path/costsize.c | 388 +- src/backend/optimizer/path/equivclass.c | 79 +- src/backend/optimizer/path/indxpath.c | 74 +- src/backend/optimizer/path/joinpath.c | 182 +- src/backend/optimizer/path/joinrels.c | 346 +- src/backend/optimizer/path/pathkeys.c | 169 +- src/backend/optimizer/path/tidpath.c | 2 +- src/backend/optimizer/plan/analyzejoins.c | 62 +- src/backend/optimizer/plan/createplan.c | 664 +- src/backend/optimizer/plan/initsplan.c | 57 +- src/backend/optimizer/plan/planagg.c | 10 +- src/backend/optimizer/plan/planmain.c | 38 +- src/backend/optimizer/plan/planner.c | 2892 +- src/backend/optimizer/plan/setrefs.c | 125 +- src/backend/optimizer/plan/subselect.c | 131 +- src/backend/optimizer/prep/prepjointree.c | 83 +- src/backend/optimizer/prep/prepqual.c | 75 +- src/backend/optimizer/prep/preptlist.c | 107 +- src/backend/optimizer/prep/prepunion.c | 1153 +- src/backend/optimizer/util/clauses.c | 483 +- src/backend/optimizer/util/joininfo.c | 2 +- src/backend/optimizer/util/orclauses.c | 2 +- src/backend/optimizer/util/pathnode.c | 639 +- src/backend/optimizer/util/placeholder.c | 4 +- src/backend/optimizer/util/plancat.c | 395 +- src/backend/optimizer/util/predtest.c | 440 +- src/backend/optimizer/util/relnode.c | 477 +- src/backend/optimizer/util/restrictinfo.c | 7 +- src/backend/optimizer/util/tlist.c | 148 +- src/backend/optimizer/util/var.c | 6 +- src/backend/parser/Makefile | 17 +- src/backend/parser/analyze.c | 197 +- src/backend/parser/check_keywords.pl | 9 +- src/backend/parser/gram.y | 1152 +- src/backend/parser/parse_agg.c | 29 +- src/backend/parser/parse_clause.c | 296 +- src/backend/parser/parse_coerce.c | 238 +- src/backend/parser/parse_collate.c | 2 +- src/backend/parser/parse_cte.c | 2 +- src/backend/parser/parse_enr.c | 2 +- src/backend/parser/parse_expr.c | 27 +- src/backend/parser/parse_func.c | 421 +- src/backend/parser/parse_node.c | 2 +- src/backend/parser/parse_oper.c | 9 +- src/backend/parser/parse_param.c | 2 +- src/backend/parser/parse_relation.c | 220 +- src/backend/parser/parse_target.c | 86 +- src/backend/parser/parse_type.c | 40 +- src/backend/parser/parse_utilcmd.c | 755 +- src/backend/parser/parser.c | 2 +- src/backend/parser/scan.l | 74 +- src/backend/parser/scansup.c | 4 +- src/backend/partitioning/Makefile | 17 + src/backend/partitioning/partbounds.c | 2307 ++ src/backend/partitioning/partprune.c | 3338 ++ src/backend/po/de.po | 14600 +++++---- src/backend/po/fr.po | 3841 +-- src/backend/po/it.po | 6706 ++-- src/backend/po/ko.po | 13297 ++++---- src/backend/po/ru.po | 9702 +++--- src/backend/po/sv.po | 26101 ++++++++++++++++ src/backend/port/.gitignore | 1 - src/backend/port/Makefile | 2 +- src/backend/port/atomics.c | 23 +- src/backend/port/dynloader/aix.c | 7 - src/backend/port/dynloader/aix.h | 39 - src/backend/port/dynloader/cygwin.c | 3 - src/backend/port/dynloader/cygwin.h | 36 - src/backend/port/dynloader/darwin.c | 138 - src/backend/port/dynloader/darwin.h | 8 - src/backend/port/dynloader/freebsd.c | 106 - src/backend/port/dynloader/freebsd.h | 58 - src/backend/port/dynloader/hpux.c | 68 - src/backend/port/dynloader/hpux.h | 25 - src/backend/port/dynloader/linux.c | 133 - src/backend/port/dynloader/linux.h | 44 - src/backend/port/dynloader/netbsd.c | 106 - src/backend/port/dynloader/netbsd.h | 59 - src/backend/port/dynloader/openbsd.c | 106 - src/backend/port/dynloader/openbsd.h | 58 - src/backend/port/dynloader/solaris.c | 7 - src/backend/port/dynloader/solaris.h | 38 - src/backend/port/dynloader/win32.c | 85 - src/backend/port/dynloader/win32.h | 19 - src/backend/port/posix_sema.c | 12 +- src/backend/port/sysv_sema.c | 2 +- src/backend/port/sysv_shmem.c | 2 +- src/backend/port/tas/sunstudio_sparc.s | 4 +- src/backend/port/tas/sunstudio_x86.s | 2 +- src/backend/port/win32/crashdump.c | 2 +- src/backend/port/win32/mingwcompat.c | 2 +- src/backend/port/win32/signal.c | 2 +- src/backend/port/win32/socket.c | 38 +- src/backend/port/win32/timer.c | 2 +- src/backend/port/win32_sema.c | 2 +- src/backend/port/win32_shmem.c | 129 +- src/backend/postmaster/autovacuum.c | 167 +- src/backend/postmaster/bgworker.c | 101 +- src/backend/postmaster/bgwriter.c | 40 +- src/backend/postmaster/checkpointer.c | 45 +- src/backend/postmaster/fork_process.c | 2 +- src/backend/postmaster/pgarch.c | 9 +- src/backend/postmaster/pgstat.c | 184 +- src/backend/postmaster/postmaster.c | 336 +- src/backend/postmaster/startup.c | 26 +- src/backend/postmaster/syslogger.c | 200 +- src/backend/postmaster/walwriter.c | 40 +- src/backend/regex/regc_lex.c | 1 + src/backend/regex/regc_pg_locale.c | 72 +- src/backend/regex/regcomp.c | 3 +- src/backend/regex/regexport.c | 2 +- src/backend/regex/regprefix.c | 2 +- src/backend/replication/basebackup.c | 522 +- .../replication/libpqwalreceiver/Makefile | 3 +- .../libpqwalreceiver/libpqwalreceiver.c | 41 +- src/backend/replication/logical/decode.c | 88 +- src/backend/replication/logical/launcher.c | 146 +- src/backend/replication/logical/logical.c | 108 +- .../replication/logical/logicalfuncs.c | 10 +- src/backend/replication/logical/message.c | 2 +- src/backend/replication/logical/origin.c | 89 +- src/backend/replication/logical/proto.c | 85 +- src/backend/replication/logical/relation.c | 98 +- .../replication/logical/reorderbuffer.c | 729 +- src/backend/replication/logical/snapbuild.c | 124 +- src/backend/replication/logical/tablesync.c | 39 +- src/backend/replication/logical/worker.c | 223 +- src/backend/replication/pgoutput/pgoutput.c | 143 +- src/backend/replication/repl_gram.y | 38 +- src/backend/replication/repl_scanner.l | 7 +- src/backend/replication/slot.c | 151 +- src/backend/replication/slotfuncs.c | 252 +- src/backend/replication/syncrep.c | 10 +- src/backend/replication/syncrep_gram.y | 2 +- src/backend/replication/syncrep_scanner.l | 5 +- src/backend/replication/walreceiver.c | 139 +- src/backend/replication/walreceiverfuncs.c | 13 +- src/backend/replication/walsender.c | 345 +- src/backend/rewrite/rewriteDefine.c | 23 +- src/backend/rewrite/rewriteHandler.c | 369 +- src/backend/rewrite/rewriteManip.c | 104 +- src/backend/rewrite/rewriteRemove.c | 2 +- src/backend/rewrite/rewriteSupport.c | 2 +- src/backend/rewrite/rowsecurity.c | 32 +- src/backend/snowball/Makefile | 16 +- src/backend/snowball/README | 53 +- src/backend/snowball/dict_snowball.c | 94 +- .../libstemmer/stem_ISO_8859_1_danish.c | 130 +- .../libstemmer/stem_ISO_8859_1_dutch.c | 298 +- .../libstemmer/stem_ISO_8859_1_english.c | 542 +- .../libstemmer/stem_ISO_8859_1_finnish.c | 513 +- .../libstemmer/stem_ISO_8859_1_french.c | 577 +- .../libstemmer/stem_ISO_8859_1_german.c | 418 +- .../libstemmer/stem_ISO_8859_1_indonesian.c | 414 + .../libstemmer/stem_ISO_8859_1_irish.c | 490 + .../libstemmer/stem_ISO_8859_1_italian.c | 372 +- .../libstemmer/stem_ISO_8859_1_norwegian.c | 120 +- .../libstemmer/stem_ISO_8859_1_porter.c | 329 +- .../libstemmer/stem_ISO_8859_1_portuguese.c | 461 +- .../libstemmer/stem_ISO_8859_1_spanish.c | 375 +- .../libstemmer/stem_ISO_8859_1_swedish.c | 102 +- ...ungarian.c => stem_ISO_8859_2_hungarian.c} | 751 +- .../libstemmer/stem_ISO_8859_2_romanian.c | 286 +- .../snowball/libstemmer/stem_KOI8_R_russian.c | 313 +- .../snowball/libstemmer/stem_UTF_8_arabic.c | 1683 + .../snowball/libstemmer/stem_UTF_8_danish.c | 130 +- .../snowball/libstemmer/stem_UTF_8_dutch.c | 298 +- .../snowball/libstemmer/stem_UTF_8_english.c | 542 +- .../snowball/libstemmer/stem_UTF_8_finnish.c | 515 +- .../snowball/libstemmer/stem_UTF_8_french.c | 577 +- .../snowball/libstemmer/stem_UTF_8_german.c | 412 +- .../libstemmer/stem_UTF_8_hungarian.c | 763 +- .../libstemmer/stem_UTF_8_indonesian.c | 414 + .../snowball/libstemmer/stem_UTF_8_irish.c | 490 + .../snowball/libstemmer/stem_UTF_8_italian.c | 372 +- .../libstemmer/stem_UTF_8_lithuanian.c | 850 + .../snowball/libstemmer/stem_UTF_8_nepali.c | 424 + .../libstemmer/stem_UTF_8_norwegian.c | 120 +- .../snowball/libstemmer/stem_UTF_8_porter.c | 329 +- .../libstemmer/stem_UTF_8_portuguese.c | 461 +- .../snowball/libstemmer/stem_UTF_8_romanian.c | 286 +- .../snowball/libstemmer/stem_UTF_8_russian.c | 359 +- .../snowball/libstemmer/stem_UTF_8_spanish.c | 375 +- .../snowball/libstemmer/stem_UTF_8_swedish.c | 102 +- .../snowball/libstemmer/stem_UTF_8_tamil.c | 1915 ++ .../snowball/libstemmer/stem_UTF_8_turkish.c | 1680 +- src/backend/snowball/libstemmer/utilities.c | 43 +- src/backend/snowball/snowball.sql.in | 2 +- src/backend/snowball/snowball_func.sql.in | 2 +- src/backend/snowball/stopwords/nepali.stop | 304 + src/backend/statistics/dependencies.c | 123 +- src/backend/statistics/extended_stats.c | 30 +- src/backend/statistics/mvdistinct.c | 13 +- src/backend/storage/buffer/buf_init.c | 2 +- src/backend/storage/buffer/buf_table.c | 2 +- src/backend/storage/buffer/bufmgr.c | 42 +- src/backend/storage/buffer/freelist.c | 19 +- src/backend/storage/buffer/localbuf.c | 8 +- src/backend/storage/file/Makefile | 2 +- src/backend/storage/file/buffile.c | 403 +- src/backend/storage/file/copydir.c | 55 +- src/backend/storage/file/fd.c | 878 +- src/backend/storage/file/reinit.c | 111 +- src/backend/storage/file/sharedfileset.c | 244 + src/backend/storage/freespace/README | 12 +- src/backend/storage/freespace/freespace.c | 229 +- src/backend/storage/freespace/fsmpage.c | 2 +- src/backend/storage/freespace/indexfsm.c | 2 +- src/backend/storage/ipc/Makefile | 6 +- src/backend/storage/ipc/barrier.c | 311 + src/backend/storage/ipc/dsm.c | 70 +- src/backend/storage/ipc/dsm_impl.c | 230 +- src/backend/storage/ipc/ipc.c | 29 +- src/backend/storage/ipc/ipci.c | 2 +- src/backend/storage/ipc/latch.c | 10 +- src/backend/storage/ipc/pmsignal.c | 130 +- src/backend/storage/ipc/procarray.c | 237 +- src/backend/storage/ipc/procsignal.c | 2 +- src/backend/storage/ipc/shm_mq.c | 427 +- src/backend/storage/ipc/shm_toc.c | 8 +- src/backend/storage/ipc/shmem.c | 10 +- src/backend/storage/ipc/shmqueue.c | 10 +- src/backend/storage/ipc/signalfuncs.c | 215 + src/backend/storage/ipc/sinval.c | 2 +- src/backend/storage/ipc/sinvaladt.c | 4 +- src/backend/storage/ipc/standby.c | 205 +- src/backend/storage/large_object/inv_api.c | 154 +- src/backend/storage/lmgr/Makefile | 5 +- src/backend/storage/lmgr/README-SSI | 21 +- src/backend/storage/lmgr/condition_variable.c | 251 +- src/backend/storage/lmgr/deadlock.c | 16 +- .../storage/lmgr/generate-lwlocknames.pl | 6 +- src/backend/storage/lmgr/lmgr.c | 125 +- src/backend/storage/lmgr/lock.c | 119 +- src/backend/storage/lmgr/lwlock.c | 28 +- src/backend/storage/lmgr/predicate.c | 68 +- src/backend/storage/lmgr/proc.c | 13 +- src/backend/storage/lmgr/s_lock.c | 6 +- src/backend/storage/lmgr/spin.c | 2 +- src/backend/storage/page/bufpage.c | 48 +- src/backend/storage/page/checksum.c | 2 +- src/backend/storage/page/itemptr.c | 2 +- src/backend/storage/smgr/md.c | 88 +- src/backend/storage/smgr/smgr.c | 73 +- src/backend/storage/smgr/smgrtype.c | 2 +- src/backend/tcop/dest.c | 35 +- src/backend/tcop/fastpath.c | 18 +- src/backend/tcop/postgres.c | 473 +- src/backend/tcop/pquery.c | 32 +- src/backend/tcop/utility.c | 182 +- src/backend/tsearch/Makefile | 2 +- src/backend/tsearch/dict.c | 2 +- src/backend/tsearch/dict_ispell.c | 8 +- src/backend/tsearch/dict_simple.c | 6 +- src/backend/tsearch/dict_synonym.c | 6 +- src/backend/tsearch/dict_thesaurus.c | 8 +- .../tsearch/dicts/hunspell_sample_long.affix | 25 +- .../tsearch/dicts/hunspell_sample_long.dict | 3 + .../tsearch/dicts/hunspell_sample_num.affix | 8 + .../tsearch/dicts/hunspell_sample_num.dict | 3 +- src/backend/tsearch/regis.c | 7 +- src/backend/tsearch/spell.c | 33 +- src/backend/tsearch/to_tsany.c | 170 +- src/backend/tsearch/ts_locale.c | 36 +- src/backend/tsearch/ts_parse.c | 2 +- src/backend/tsearch/ts_selfuncs.c | 2 +- src/backend/tsearch/ts_typanalyze.c | 2 +- src/backend/tsearch/ts_utils.c | 2 +- src/backend/tsearch/wparser.c | 6 +- src/backend/tsearch/wparser_def.c | 163 +- src/backend/utils/.gitignore | 1 + src/backend/utils/Gen_dummy_probes.pl | 25 +- src/backend/utils/Gen_dummy_probes.sed | 2 +- src/backend/utils/Gen_fmgrtab.pl | 183 +- src/backend/utils/Makefile | 69 +- src/backend/utils/adt/Makefile | 11 +- src/backend/utils/adt/acl.c | 179 +- src/backend/utils/adt/amutils.c | 163 +- src/backend/utils/adt/array_expanded.c | 6 +- src/backend/utils/adt/array_selfuncs.c | 2 +- src/backend/utils/adt/array_typanalyze.c | 4 +- src/backend/utils/adt/array_userfuncs.c | 15 +- src/backend/utils/adt/arrayfuncs.c | 273 +- src/backend/utils/adt/arrayutils.c | 5 +- src/backend/utils/adt/ascii.c | 2 +- src/backend/utils/adt/bool.c | 2 +- src/backend/utils/adt/cash.c | 43 +- src/backend/utils/adt/char.c | 2 +- src/backend/utils/adt/cryptohashes.c | 169 + src/backend/utils/adt/date.c | 191 +- src/backend/utils/adt/datetime.c | 88 +- src/backend/utils/adt/datum.c | 15 +- src/backend/utils/adt/dbsize.c | 13 +- src/backend/utils/adt/domains.c | 11 +- src/backend/utils/adt/encode.c | 12 +- src/backend/utils/adt/enum.c | 44 +- src/backend/utils/adt/expandeddatum.c | 6 +- src/backend/utils/adt/expandedrecord.c | 1638 + src/backend/utils/adt/float.c | 1413 +- src/backend/utils/adt/format_type.c | 134 +- src/backend/utils/adt/formatting.c | 893 +- src/backend/utils/adt/genfile.c | 154 +- src/backend/utils/adt/geo_ops.c | 2408 +- src/backend/utils/adt/geo_selfuncs.c | 2 +- src/backend/utils/adt/geo_spgist.c | 229 +- src/backend/utils/adt/inet_cidr_ntop.c | 22 +- src/backend/utils/adt/inet_net_pton.c | 44 +- src/backend/utils/adt/int.c | 362 +- src/backend/utils/adt/int8.c | 421 +- src/backend/utils/adt/json.c | 234 +- src/backend/utils/adt/jsonb.c | 405 +- src/backend/utils/adt/jsonb_gin.c | 14 +- src/backend/utils/adt/jsonb_op.c | 89 +- src/backend/utils/adt/jsonb_util.c | 49 +- src/backend/utils/adt/jsonfuncs.c | 808 +- src/backend/utils/adt/levenshtein.c | 2 +- src/backend/utils/adt/like.c | 2 +- src/backend/utils/adt/like_match.c | 2 +- src/backend/utils/adt/lockfuncs.c | 2 +- src/backend/utils/adt/mac.c | 11 +- src/backend/utils/adt/mac8.c | 15 +- src/backend/utils/adt/misc.c | 190 +- src/backend/utils/adt/nabstime.c | 1571 - src/backend/utils/adt/name.c | 4 +- src/backend/utils/adt/network.c | 25 +- src/backend/utils/adt/network_gist.c | 20 +- src/backend/utils/adt/network_selfuncs.c | 2 +- src/backend/utils/adt/network_spgist.c | 2 +- src/backend/utils/adt/numeric.c | 489 +- src/backend/utils/adt/numutils.c | 161 +- src/backend/utils/adt/oid.c | 4 +- src/backend/utils/adt/oracle_compat.c | 22 +- src/backend/utils/adt/orderedsetaggs.c | 203 +- src/backend/utils/adt/partitionfuncs.c | 154 + src/backend/utils/adt/pg_locale.c | 30 +- src/backend/utils/adt/pg_lsn.c | 8 +- src/backend/utils/adt/pg_upgrade_support.c | 18 +- src/backend/utils/adt/pgstatfuncs.c | 37 +- src/backend/utils/adt/pseudotypes.c | 2 +- src/backend/utils/adt/quote.c | 2 +- src/backend/utils/adt/rangetypes.c | 202 +- src/backend/utils/adt/rangetypes_gist.c | 130 +- src/backend/utils/adt/rangetypes_selfuncs.c | 23 +- src/backend/utils/adt/rangetypes_spgist.c | 58 +- src/backend/utils/adt/rangetypes_typanalyze.c | 7 +- src/backend/utils/adt/regexp.c | 196 +- src/backend/utils/adt/regproc.c | 4 +- src/backend/utils/adt/ri_triggers.c | 1121 +- src/backend/utils/adt/rowtypes.c | 200 +- src/backend/utils/adt/ruleutils.c | 562 +- src/backend/utils/adt/selfuncs.c | 671 +- src/backend/utils/adt/tid.c | 18 +- src/backend/utils/adt/timestamp.c | 177 +- src/backend/utils/adt/trigfuncs.c | 2 +- src/backend/utils/adt/tsginidx.c | 8 +- src/backend/utils/adt/tsgistidx.c | 20 +- src/backend/utils/adt/tsquery.c | 403 +- src/backend/utils/adt/tsquery_cleanup.c | 2 +- src/backend/utils/adt/tsquery_gist.c | 15 +- src/backend/utils/adt/tsquery_op.c | 2 +- src/backend/utils/adt/tsquery_rewrite.c | 2 +- src/backend/utils/adt/tsquery_util.c | 2 +- src/backend/utils/adt/tsrank.c | 2 +- src/backend/utils/adt/tsvector.c | 10 +- src/backend/utils/adt/tsvector_op.c | 2 +- src/backend/utils/adt/tsvector_parser.c | 38 +- src/backend/utils/adt/txid.c | 18 +- src/backend/utils/adt/uuid.c | 10 +- src/backend/utils/adt/varbit.c | 16 +- src/backend/utils/adt/varchar.c | 20 +- src/backend/utils/adt/varlena.c | 299 +- src/backend/utils/adt/version.c | 2 +- src/backend/utils/adt/windowfuncs.c | 2 +- src/backend/utils/adt/xid.c | 6 +- src/backend/utils/adt/xml.c | 99 +- src/backend/utils/cache/Makefile | 6 +- src/backend/utils/cache/attoptcache.c | 4 +- src/backend/utils/cache/catcache.c | 704 +- src/backend/utils/cache/evtcache.c | 2 +- src/backend/utils/cache/inval.c | 54 +- src/backend/utils/cache/lsyscache.c | 247 +- src/backend/utils/cache/partcache.c | 964 + src/backend/utils/cache/plancache.c | 48 +- src/backend/utils/cache/relcache.c | 877 +- src/backend/utils/cache/relfilenodemap.c | 6 +- src/backend/utils/cache/relmapper.c | 128 +- src/backend/utils/cache/spccache.c | 2 +- src/backend/utils/cache/syscache.c | 101 +- src/backend/utils/cache/ts_cache.c | 10 +- src/backend/utils/cache/typcache.c | 1059 +- src/backend/utils/errcodes.txt | 5 +- src/backend/utils/error/assert.c | 2 +- src/backend/utils/error/elog.c | 311 +- src/backend/utils/fmgr/dfmgr.c | 50 +- src/backend/utils/fmgr/fmgr.c | 144 +- src/backend/utils/fmgr/funcapi.c | 252 +- src/backend/utils/generate-errcodes.pl | 2 +- src/backend/utils/hash/dynahash.c | 16 +- src/backend/utils/hash/hashfn.c | 2 +- src/backend/utils/hash/pg_crc.c | 2 +- src/backend/utils/init/globals.c | 24 +- src/backend/utils/init/miscinit.c | 146 +- src/backend/utils/init/postinit.c | 81 +- src/backend/utils/mb/Unicode/Makefile | 26 +- src/backend/utils/mb/Unicode/UCS_to_BIG5.pl | 16 +- src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl | 10 +- .../utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl | 16 +- src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl | 520 +- src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl | 22 +- src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl | 10 +- .../utils/mb/Unicode/UCS_to_GB18030.pl | 10 +- src/backend/utils/mb/Unicode/UCS_to_JOHAB.pl | 22 +- .../utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl | 16 +- src/backend/utils/mb/Unicode/UCS_to_SJIS.pl | 52 +- src/backend/utils/mb/Unicode/UCS_to_UHC.pl | 16 +- src/backend/utils/mb/Unicode/UCS_to_most.pl | 6 +- src/backend/utils/mb/Unicode/big5_to_utf8.map | 2 +- src/backend/utils/mb/Unicode/convutils.pm | 61 +- .../utils/mb/Unicode/euc_cn_to_utf8.map | 2 +- .../utils/mb/Unicode/euc_jis_2004_to_utf8.map | 2 +- .../utils/mb/Unicode/euc_jp_to_utf8.map | 2 +- .../utils/mb/Unicode/euc_kr_to_utf8.map | 2 +- .../utils/mb/Unicode/euc_tw_to_utf8.map | 2 +- .../utils/mb/Unicode/gb18030_to_utf8.map | 2 +- src/backend/utils/mb/Unicode/gbk_to_utf8.map | 2 +- .../utils/mb/Unicode/iso8859_10_to_utf8.map | 2 +- .../utils/mb/Unicode/iso8859_13_to_utf8.map | 2 +- .../utils/mb/Unicode/iso8859_14_to_utf8.map | 2 +- .../utils/mb/Unicode/iso8859_15_to_utf8.map | 2 +- .../utils/mb/Unicode/iso8859_16_to_utf8.map | 2 +- .../utils/mb/Unicode/iso8859_2_to_utf8.map | 2 +- .../utils/mb/Unicode/iso8859_3_to_utf8.map | 2 +- .../utils/mb/Unicode/iso8859_4_to_utf8.map | 2 +- .../utils/mb/Unicode/iso8859_5_to_utf8.map | 2 +- .../utils/mb/Unicode/iso8859_6_to_utf8.map | 2 +- .../utils/mb/Unicode/iso8859_7_to_utf8.map | 2 +- .../utils/mb/Unicode/iso8859_8_to_utf8.map | 2 +- .../utils/mb/Unicode/iso8859_9_to_utf8.map | 2 +- .../utils/mb/Unicode/johab_to_utf8.map | 2 +- .../utils/mb/Unicode/koi8r_to_utf8.map | 2 +- .../utils/mb/Unicode/koi8u_to_utf8.map | 2 +- .../mb/Unicode/shift_jis_2004_to_utf8.map | 2 +- src/backend/utils/mb/Unicode/sjis_to_utf8.map | 2 +- src/backend/utils/mb/Unicode/uhc_to_utf8.map | 2 +- src/backend/utils/mb/Unicode/utf8_to_big5.map | 2 +- .../utils/mb/Unicode/utf8_to_euc_cn.map | 2 +- .../utils/mb/Unicode/utf8_to_euc_jis_2004.map | 2 +- .../utils/mb/Unicode/utf8_to_euc_jp.map | 2 +- .../utils/mb/Unicode/utf8_to_euc_kr.map | 2 +- .../utils/mb/Unicode/utf8_to_euc_tw.map | 2 +- .../utils/mb/Unicode/utf8_to_gb18030.map | 2 +- src/backend/utils/mb/Unicode/utf8_to_gbk.map | 2 +- .../utils/mb/Unicode/utf8_to_iso8859_10.map | 2 +- .../utils/mb/Unicode/utf8_to_iso8859_13.map | 2 +- .../utils/mb/Unicode/utf8_to_iso8859_14.map | 2 +- .../utils/mb/Unicode/utf8_to_iso8859_15.map | 2 +- .../utils/mb/Unicode/utf8_to_iso8859_16.map | 2 +- .../utils/mb/Unicode/utf8_to_iso8859_2.map | 2 +- .../utils/mb/Unicode/utf8_to_iso8859_3.map | 2 +- .../utils/mb/Unicode/utf8_to_iso8859_4.map | 2 +- .../utils/mb/Unicode/utf8_to_iso8859_5.map | 2 +- .../utils/mb/Unicode/utf8_to_iso8859_6.map | 2 +- .../utils/mb/Unicode/utf8_to_iso8859_7.map | 2 +- .../utils/mb/Unicode/utf8_to_iso8859_8.map | 2 +- .../utils/mb/Unicode/utf8_to_iso8859_9.map | 2 +- .../utils/mb/Unicode/utf8_to_johab.map | 2 +- .../utils/mb/Unicode/utf8_to_koi8r.map | 2 +- .../utils/mb/Unicode/utf8_to_koi8u.map | 2 +- .../mb/Unicode/utf8_to_shift_jis_2004.map | 2 +- src/backend/utils/mb/Unicode/utf8_to_sjis.map | 2 +- src/backend/utils/mb/Unicode/utf8_to_uhc.map | 2 +- .../utils/mb/Unicode/utf8_to_win1250.map | 2 +- .../utils/mb/Unicode/utf8_to_win1251.map | 2 +- .../utils/mb/Unicode/utf8_to_win1252.map | 2 +- .../utils/mb/Unicode/utf8_to_win1253.map | 2 +- .../utils/mb/Unicode/utf8_to_win1254.map | 2 +- .../utils/mb/Unicode/utf8_to_win1255.map | 2 +- .../utils/mb/Unicode/utf8_to_win1256.map | 2 +- .../utils/mb/Unicode/utf8_to_win1257.map | 2 +- .../utils/mb/Unicode/utf8_to_win1258.map | 2 +- .../utils/mb/Unicode/utf8_to_win866.map | 2 +- .../utils/mb/Unicode/utf8_to_win874.map | 2 +- .../utils/mb/Unicode/win1250_to_utf8.map | 2 +- .../utils/mb/Unicode/win1251_to_utf8.map | 2 +- .../utils/mb/Unicode/win1252_to_utf8.map | 2 +- .../utils/mb/Unicode/win1253_to_utf8.map | 2 +- .../utils/mb/Unicode/win1254_to_utf8.map | 2 +- .../utils/mb/Unicode/win1255_to_utf8.map | 2 +- .../utils/mb/Unicode/win1256_to_utf8.map | 2 +- .../utils/mb/Unicode/win1257_to_utf8.map | 2 +- .../utils/mb/Unicode/win1258_to_utf8.map | 2 +- .../utils/mb/Unicode/win866_to_utf8.map | 2 +- .../utils/mb/Unicode/win874_to_utf8.map | 2 +- src/backend/utils/mb/conv.c | 2 +- .../ascii_and_mic/ascii_and_mic.c | 2 +- .../cyrillic_and_mic/cyrillic_and_mic.c | 2 +- .../euc2004_sjis2004/euc2004_sjis2004.c | 2 +- .../euc_cn_and_mic/euc_cn_and_mic.c | 2 +- .../euc_jp_and_sjis/euc_jp_and_sjis.c | 2 +- .../conversion_procs/euc_jp_and_sjis/sjis.map | 2 +- .../euc_kr_and_mic/euc_kr_and_mic.c | 2 +- .../conversion_procs/euc_tw_and_big5/big5.c | 4 +- .../euc_tw_and_big5/euc_tw_and_big5.c | 2 +- .../latin2_and_win1250/latin2_and_win1250.c | 2 +- .../latin_and_mic/latin_and_mic.c | 2 +- .../utf8_and_ascii/utf8_and_ascii.c | 2 +- .../utf8_and_big5/utf8_and_big5.c | 2 +- .../utf8_and_cyrillic/utf8_and_cyrillic.c | 2 +- .../utf8_and_euc2004/utf8_and_euc2004.c | 2 +- .../utf8_and_euc_cn/utf8_and_euc_cn.c | 2 +- .../utf8_and_euc_jp/utf8_and_euc_jp.c | 2 +- .../utf8_and_euc_kr/utf8_and_euc_kr.c | 2 +- .../utf8_and_euc_tw/utf8_and_euc_tw.c | 2 +- .../utf8_and_gb18030/utf8_and_gb18030.c | 2 +- .../utf8_and_gbk/utf8_and_gbk.c | 2 +- .../utf8_and_iso8859/utf8_and_iso8859.c | 2 +- .../utf8_and_iso8859_1/utf8_and_iso8859_1.c | 2 +- .../utf8_and_johab/utf8_and_johab.c | 2 +- .../utf8_and_sjis/utf8_and_sjis.c | 2 +- .../utf8_and_sjis2004/utf8_and_sjis2004.c | 2 +- .../utf8_and_uhc/utf8_and_uhc.c | 2 +- .../utf8_and_win/utf8_and_win.c | 2 +- src/backend/utils/mb/mbutils.c | 35 +- src/backend/utils/mb/wchar.c | 12 +- src/backend/utils/misc/backend_random.c | 2 +- src/backend/utils/misc/guc-file.l | 6 +- src/backend/utils/misc/guc.c | 686 +- src/backend/utils/misc/help_config.c | 3 +- src/backend/utils/misc/pg_config.c | 7 +- src/backend/utils/misc/pg_controldata.c | 93 +- src/backend/utils/misc/pg_rusage.c | 2 +- src/backend/utils/misc/postgresql.conf.sample | 130 +- src/backend/utils/misc/ps_status.c | 13 +- src/backend/utils/misc/queryenvironment.c | 4 +- src/backend/utils/misc/rls.c | 3 +- src/backend/utils/misc/sampling.c | 2 +- src/backend/utils/misc/superuser.c | 2 +- src/backend/utils/misc/timeout.c | 4 +- src/backend/utils/misc/tzparser.c | 8 +- src/backend/utils/mmgr/Makefile | 2 +- src/backend/utils/mmgr/README | 37 +- src/backend/utils/mmgr/aset.c | 526 +- src/backend/utils/mmgr/dsa.c | 154 +- src/backend/utils/mmgr/freepage.c | 2 +- src/backend/utils/mmgr/generation.c | 838 + src/backend/utils/mmgr/mcxt.c | 358 +- src/backend/utils/mmgr/memdebug.c | 4 +- src/backend/utils/mmgr/portalmem.c | 254 +- src/backend/utils/mmgr/slab.c | 140 +- src/backend/utils/probes.d | 4 +- src/backend/utils/resowner/README | 4 - src/backend/utils/resowner/resowner.c | 146 +- src/backend/utils/sort/Makefile | 2 +- src/backend/utils/sort/gen_qsort_tuple.pl | 4 + src/backend/utils/sort/logtape.c | 239 +- src/backend/utils/sort/sharedtuplestore.c | 633 + src/backend/utils/sort/sortsupport.c | 2 +- src/backend/utils/sort/tuplesort.c | 1132 +- src/backend/utils/sort/tuplestore.c | 16 +- src/backend/utils/time/combocid.c | 6 +- src/backend/utils/time/snapmgr.c | 44 +- src/backend/utils/time/tqual.c | 91 +- src/bin/Makefile | 3 +- src/bin/initdb/Makefile | 4 +- src/bin/initdb/findtimezone.c | 138 +- src/bin/initdb/initdb.c | 378 +- src/bin/initdb/nls.mk | 2 +- src/bin/initdb/po/de.po | 270 +- src/bin/initdb/po/fr.po | 2 +- src/bin/initdb/po/it.po | 29 +- src/bin/initdb/po/ko.po | 414 +- src/bin/initdb/po/ru.po | 241 +- src/bin/initdb/po/sv.po | 388 +- src/bin/initdb/po/tr.po | 1060 + src/bin/initdb/po/vi.po | 1044 + src/bin/initdb/t/001_initdb.pl | 45 +- src/bin/pg_archivecleanup/.gitignore | 2 + src/bin/pg_archivecleanup/Makefile | 7 + src/bin/pg_archivecleanup/nls.mk | 2 +- src/bin/pg_archivecleanup/po/de.po | 18 +- src/bin/pg_archivecleanup/po/es.po | 37 +- src/bin/pg_archivecleanup/po/fr.po | 29 +- src/bin/pg_archivecleanup/po/ja.po | 174 + src/bin/pg_archivecleanup/po/ko.po | 179 + src/bin/pg_archivecleanup/po/ru.po | 73 +- src/bin/pg_archivecleanup/po/tr.po | 173 + src/bin/pg_archivecleanup/po/vi.po | 183 + .../t/010_pg_archivecleanup.pl | 98 + src/bin/pg_basebackup/Makefile | 4 +- src/bin/pg_basebackup/nls.mk | 2 +- src/bin/pg_basebackup/pg_basebackup.c | 186 +- src/bin/pg_basebackup/pg_receivewal.c | 101 +- src/bin/pg_basebackup/pg_recvlogical.c | 25 +- src/bin/pg_basebackup/po/de.po | 672 +- src/bin/pg_basebackup/po/es.po | 565 +- src/bin/pg_basebackup/po/fr.po | 673 +- src/bin/pg_basebackup/po/it.po | 302 +- src/bin/pg_basebackup/po/ja.po | 1449 + src/bin/pg_basebackup/po/ko.po | 772 +- src/bin/pg_basebackup/po/ru.po | 534 +- src/bin/pg_basebackup/po/sv.po | 1398 + src/bin/pg_basebackup/po/tr.po | 1332 + src/bin/pg_basebackup/po/vi.po | 1520 + src/bin/pg_basebackup/receivelog.c | 58 +- src/bin/pg_basebackup/receivelog.h | 3 +- src/bin/pg_basebackup/streamutil.c | 231 +- src/bin/pg_basebackup/streamutil.h | 7 +- src/bin/pg_basebackup/t/010_pg_basebackup.pl | 325 +- src/bin/pg_basebackup/t/020_pg_receivewal.pl | 68 +- src/bin/pg_basebackup/t/030_pg_recvlogical.pl | 18 +- src/bin/pg_basebackup/walmethods.c | 78 +- src/bin/pg_basebackup/walmethods.h | 2 +- src/bin/pg_config/Makefile | 2 +- src/bin/pg_config/nls.mk | 2 +- src/bin/pg_config/pg_config.c | 2 +- src/bin/pg_config/po/it.po | 22 +- src/bin/pg_config/po/ko.po | 16 +- src/bin/pg_config/po/ru.po | 15 +- src/bin/pg_config/po/vi.po | 300 + src/bin/pg_controldata/Makefile | 2 +- src/bin/pg_controldata/nls.mk | 2 +- src/bin/pg_controldata/pg_controldata.c | 50 +- src/bin/pg_controldata/po/de.po | 227 +- src/bin/pg_controldata/po/it.po | 27 +- src/bin/pg_controldata/po/ko.po | 173 +- src/bin/pg_controldata/po/ru.po | 5 +- src/bin/pg_controldata/po/sv.po | 283 +- src/bin/pg_controldata/po/tr.po | 479 + src/bin/pg_controldata/po/vi.po | 476 + .../pg_controldata/t/001_pg_controldata.pl | 25 +- src/bin/pg_ctl/Makefile | 4 +- src/bin/pg_ctl/nls.mk | 2 +- src/bin/pg_ctl/pg_ctl.c | 198 +- src/bin/pg_ctl/po/de.po | 287 +- src/bin/pg_ctl/po/fr.po | 2 +- src/bin/pg_ctl/po/it.po | 323 +- src/bin/pg_ctl/po/ko.po | 398 +- src/bin/pg_ctl/po/ru.po | 409 +- src/bin/pg_ctl/po/sv.po | 347 +- src/bin/pg_ctl/po/tr.po | 852 + src/bin/pg_ctl/t/001_start_stop.pl | 46 +- src/bin/pg_ctl/t/004_logrotate.pl | 42 + src/bin/pg_dump/Makefile | 4 +- src/bin/pg_dump/common.c | 127 +- src/bin/pg_dump/compress_io.c | 2 +- src/bin/pg_dump/compress_io.h | 2 +- src/bin/pg_dump/dumputils.c | 372 +- src/bin/pg_dump/dumputils.h | 18 +- src/bin/pg_dump/nls.mk | 2 +- src/bin/pg_dump/parallel.c | 10 +- src/bin/pg_dump/parallel.h | 2 +- src/bin/pg_dump/pg_backup.h | 18 +- src/bin/pg_dump/pg_backup_archiver.c | 850 +- src/bin/pg_dump/pg_backup_archiver.h | 26 +- src/bin/pg_dump/pg_backup_custom.c | 66 +- src/bin/pg_dump/pg_backup_db.c | 24 +- src/bin/pg_dump/pg_backup_db.h | 2 +- src/bin/pg_dump/pg_backup_directory.c | 72 +- src/bin/pg_dump/pg_backup_null.c | 2 +- src/bin/pg_dump/pg_backup_tar.c | 4 +- src/bin/pg_dump/pg_backup_utils.c | 6 +- src/bin/pg_dump/pg_backup_utils.h | 2 +- src/bin/pg_dump/pg_dump.c | 3369 +- src/bin/pg_dump/pg_dump.h | 40 +- src/bin/pg_dump/pg_dump_sort.c | 179 +- src/bin/pg_dump/pg_dumpall.c | 593 +- src/bin/pg_dump/pg_restore.c | 17 +- src/bin/pg_dump/po/de.po | 963 +- src/bin/pg_dump/po/es.po | 909 +- src/bin/pg_dump/po/fr.po | 448 +- src/bin/pg_dump/po/he.po | 2883 ++ src/bin/pg_dump/po/it.po | 429 +- src/bin/pg_dump/po/ko.po | 1148 +- src/bin/pg_dump/po/ru.po | 918 +- src/bin/pg_dump/po/sv.po | 939 +- src/bin/pg_dump/po/tr.po | 3021 ++ src/bin/pg_dump/t/001_basic.pl | 48 +- src/bin/pg_dump/t/002_pg_dump.pl | 6192 +--- src/bin/pg_dump/t/010_dump_connstr.pl | 60 +- src/bin/pg_resetwal/.gitignore | 1 + src/bin/pg_resetwal/Makefile | 9 +- src/bin/pg_resetwal/nls.mk | 2 +- src/bin/pg_resetwal/pg_resetwal.c | 171 +- src/bin/pg_resetwal/po/de.po | 305 +- src/bin/pg_resetwal/po/fr.po | 2 +- src/bin/pg_resetwal/po/ko.po | 260 +- src/bin/pg_resetwal/po/ru.po | 271 +- src/bin/pg_resetwal/po/sv.po | 333 +- src/bin/pg_resetwal/po/tr.po | 653 + src/bin/pg_resetwal/t/001_basic.pl | 27 + src/bin/pg_resetwal/t/002_corrupted.pl | 53 + src/bin/pg_rewind/Makefile | 4 +- src/bin/pg_rewind/RewindTest.pm | 65 +- src/bin/pg_rewind/copy_fetch.c | 19 +- src/bin/pg_rewind/datapagemap.c | 2 +- src/bin/pg_rewind/datapagemap.h | 2 +- src/bin/pg_rewind/fetch.c | 4 +- src/bin/pg_rewind/fetch.h | 4 +- src/bin/pg_rewind/file_ops.c | 39 +- src/bin/pg_rewind/file_ops.h | 3 +- src/bin/pg_rewind/filemap.c | 175 +- src/bin/pg_rewind/filemap.h | 2 +- src/bin/pg_rewind/libpq_fetch.c | 61 +- src/bin/pg_rewind/logging.c | 4 +- src/bin/pg_rewind/logging.h | 2 +- src/bin/pg_rewind/nls.mk | 2 +- src/bin/pg_rewind/parsexlog.c | 46 +- src/bin/pg_rewind/pg_rewind.c | 84 +- src/bin/pg_rewind/pg_rewind.h | 3 +- src/bin/pg_rewind/po/de.po | 312 +- src/bin/pg_rewind/po/es.po | 221 +- src/bin/pg_rewind/po/fr.po | 44 +- src/bin/pg_rewind/po/it.po | 76 +- src/bin/pg_rewind/po/ko.po | 262 +- src/bin/pg_rewind/po/ru.po | 65 +- src/bin/pg_rewind/po/sv.po | 372 +- src/bin/pg_rewind/po/tr.po | 860 + src/bin/pg_rewind/t/001_basic.pl | 21 +- src/bin/pg_rewind/t/002_databases.pl | 17 +- src/bin/pg_rewind/t/003_extrafiles.pl | 11 +- src/bin/pg_rewind/t/004_pg_xlog_symlink.pl | 5 +- src/bin/pg_rewind/timeline.c | 2 +- src/bin/pg_test_fsync/nls.mk | 2 +- src/bin/pg_test_fsync/pg_test_fsync.c | 37 +- src/bin/pg_test_fsync/po/de.po | 195 + src/bin/pg_test_fsync/po/es.po | 121 +- src/bin/pg_test_fsync/po/fr.po | 107 +- src/bin/pg_test_fsync/po/ja.po | 195 + src/bin/pg_test_fsync/po/ko.po | 197 + src/bin/pg_test_fsync/po/ru.po | 104 +- src/bin/pg_test_fsync/po/sv.po | 121 +- src/bin/pg_test_fsync/po/tr.po | 189 + src/bin/pg_test_fsync/po/vi.po | 195 + src/bin/pg_test_timing/nls.mk | 2 +- src/bin/pg_test_timing/pg_test_timing.c | 17 +- src/bin/pg_test_timing/po/de.po | 25 +- src/bin/pg_test_timing/po/es.po | 39 +- src/bin/pg_test_timing/po/fr.po | 30 +- src/bin/pg_test_timing/po/ja.po | 78 + src/bin/pg_test_timing/po/ko.po | 77 + src/bin/pg_test_timing/po/ru.po | 33 +- src/bin/pg_test_timing/po/tr.po | 78 + src/bin/pg_test_timing/po/vi.po | 78 + src/bin/pg_upgrade/.gitignore | 1 + src/bin/pg_upgrade/Makefile | 2 +- src/bin/pg_upgrade/check.c | 59 +- src/bin/pg_upgrade/controldata.c | 69 +- src/bin/pg_upgrade/dump.c | 14 +- src/bin/pg_upgrade/exec.c | 65 +- src/bin/pg_upgrade/file.c | 145 +- src/bin/pg_upgrade/function.c | 162 +- src/bin/pg_upgrade/info.c | 4 +- src/bin/pg_upgrade/nls.mk | 2 +- src/bin/pg_upgrade/option.c | 31 +- src/bin/pg_upgrade/parallel.c | 11 +- src/bin/pg_upgrade/pg_upgrade.c | 188 +- src/bin/pg_upgrade/pg_upgrade.h | 39 +- src/bin/pg_upgrade/po/de.po | 1623 + src/bin/pg_upgrade/po/fr.po | 345 +- src/bin/pg_upgrade/po/ja.po | 1757 ++ src/bin/pg_upgrade/po/ko.po | 1694 + src/bin/pg_upgrade/po/ru.po | 583 +- src/bin/pg_upgrade/po/sv.po | 1665 + src/bin/pg_upgrade/po/tr.po | 1617 + src/bin/pg_upgrade/relfilenode.c | 50 +- src/bin/pg_upgrade/server.c | 49 +- src/bin/pg_upgrade/tablespace.c | 4 +- src/bin/pg_upgrade/test.sh | 60 +- src/bin/pg_upgrade/util.c | 2 +- src/bin/pg_upgrade/version.c | 10 +- src/bin/pg_verify_checksums/.gitignore | 3 + src/bin/pg_verify_checksums/Makefile | 42 + src/bin/pg_verify_checksums/nls.mk | 4 + .../pg_verify_checksums/pg_verify_checksums.c | 367 + src/bin/pg_verify_checksums/t/001_basic.pl | 8 + src/bin/pg_verify_checksums/t/002_actions.pl | 127 + src/bin/pg_waldump/compat.c | 5 +- src/bin/pg_waldump/nls.mk | 2 +- src/bin/pg_waldump/pg_waldump.c | 281 +- src/bin/pg_waldump/po/de.po | 283 + src/bin/pg_waldump/po/es.po | 54 +- src/bin/pg_waldump/po/fr.po | 59 +- src/bin/pg_waldump/po/ja.po | 276 + src/bin/pg_waldump/po/ko.po | 252 + src/bin/pg_waldump/po/ru.po | 276 + src/bin/pg_waldump/po/sv.po | 165 +- src/bin/pg_waldump/po/tr.po | 251 + src/bin/pg_waldump/po/vi.po | 309 + src/bin/pgbench/Makefile | 2 +- src/bin/pgbench/exprparse.y | 277 +- src/bin/pgbench/exprscan.l | 122 +- src/bin/pgbench/pgbench.c | 2441 +- src/bin/pgbench/pgbench.h | 36 +- src/bin/pgbench/t/001_pgbench.pl | 25 - src/bin/pgbench/t/001_pgbench_with_server.pl | 864 + src/bin/pgbench/t/002_pgbench_no_server.pl | 318 + src/bin/pgevent/Makefile | 2 +- src/bin/psql/Makefile | 13 +- src/bin/psql/command.c | 437 +- src/bin/psql/command.h | 4 +- src/bin/psql/common.c | 220 +- src/bin/psql/common.h | 2 +- src/bin/psql/copy.c | 2 +- src/bin/psql/copy.h | 2 +- src/bin/psql/create_help.pl | 13 +- src/bin/psql/crosstabview.c | 2 +- src/bin/psql/crosstabview.h | 4 +- src/bin/psql/describe.c | 739 +- src/bin/psql/describe.h | 2 +- src/bin/psql/help.c | 206 +- src/bin/psql/help.h | 2 +- src/bin/psql/input.c | 2 +- src/bin/psql/input.h | 2 +- src/bin/psql/large_obj.c | 4 +- src/bin/psql/large_obj.h | 2 +- src/bin/psql/mainloop.c | 145 +- src/bin/psql/mainloop.h | 2 +- src/bin/psql/po/de.po | 3823 ++- src/bin/psql/po/fr.po | 5452 ++-- src/bin/psql/po/it.po | 2561 +- src/bin/psql/po/ja.po | 5275 ++-- src/bin/psql/po/ko.po | 3400 +- src/bin/psql/po/ru.po | 2776 +- src/bin/psql/po/sv.po | 6472 ++-- src/bin/psql/prompt.c | 2 +- src/bin/psql/prompt.h | 4 +- src/bin/psql/psqlscanslash.h | 2 +- src/bin/psql/psqlscanslash.l | 26 +- src/bin/psql/settings.h | 5 +- src/bin/psql/startup.c | 40 +- src/bin/psql/stringutils.c | 8 +- src/bin/psql/stringutils.h | 2 +- src/bin/psql/tab-complete.c | 3415 +- src/bin/psql/tab-complete.h | 2 +- src/bin/psql/variables.c | 8 +- src/bin/psql/variables.h | 2 +- src/bin/scripts/Makefile | 4 +- src/bin/scripts/clusterdb.c | 14 +- src/bin/scripts/common.c | 138 +- src/bin/scripts/common.h | 12 +- src/bin/scripts/createdb.c | 4 +- src/bin/scripts/createuser.c | 4 +- src/bin/scripts/dropdb.c | 5 +- src/bin/scripts/dropuser.c | 4 +- src/bin/scripts/nls.mk | 2 +- src/bin/scripts/pg_isready.c | 2 +- src/bin/scripts/po/de.po | 218 +- src/bin/scripts/po/it.po | 62 +- src/bin/scripts/po/ko.po | 410 +- src/bin/scripts/po/ru.po | 107 +- src/bin/scripts/po/sv.po | 317 +- src/bin/scripts/po/tr.po | 1137 + src/bin/scripts/reindexdb.c | 25 +- src/bin/scripts/t/010_clusterdb.pl | 4 +- src/bin/scripts/t/040_createuser.pl | 8 +- src/bin/scripts/t/080_pg_isready.pl | 4 +- src/bin/scripts/t/090_reindexdb.pl | 6 +- src/bin/scripts/t/100_vacuumdb.pl | 33 +- src/bin/scripts/t/102_vacuumdb_stages.pl | 18 +- src/bin/scripts/vacuumdb.c | 226 +- src/common/Makefile | 68 +- src/common/base64.c | 2 +- src/common/config_info.c | 2 +- src/common/controldata_utils.c | 38 +- src/common/exec.c | 113 +- src/common/fe_memutils.c | 2 +- src/common/file_perm.c | 91 + src/common/file_utils.c | 6 +- src/common/ip.c | 8 +- src/common/keywords.c | 2 +- src/common/link-canary.c | 36 + src/common/md5.c | 4 +- src/common/pg_lzcompress.c | 4 +- src/common/pgfnames.c | 2 +- src/common/psprintf.c | 78 +- src/common/relpath.c | 5 +- src/common/restricted_token.c | 2 +- src/common/rmtree.c | 2 +- src/common/saslprep.c | 13 +- src/common/scram-common.c | 9 +- src/common/sha2.c | 2 +- src/common/sha2_openssl.c | 2 +- src/common/string.c | 51 +- .../unicode/generate-norm_test_table.pl | 4 +- .../unicode/generate-unicode_norm_table.pl | 4 +- src/common/unicode/norm_test.c | 4 +- src/common/unicode_norm.c | 2 +- src/common/username.c | 2 +- src/common/wait_error.c | 2 +- src/fe_utils/Makefile | 4 +- src/{bin/psql => fe_utils}/conditional.c | 34 +- src/fe_utils/mbprint.c | 2 +- src/fe_utils/print.c | 8 +- src/fe_utils/psqlscan.l | 84 +- src/fe_utils/simple_list.c | 2 +- src/fe_utils/string_utils.c | 27 +- src/include/.gitignore | 1 - src/include/Makefile | 24 +- src/include/access/amapi.h | 13 +- src/include/access/amvalidate.h | 2 +- src/include/access/attnum.h | 2 +- src/include/access/brin.h | 2 +- src/include/access/brin_internal.h | 2 +- src/include/access/brin_page.h | 2 +- src/include/access/brin_pageops.h | 4 +- src/include/access/brin_revmap.h | 2 +- src/include/access/brin_tuple.h | 2 +- src/include/access/brin_xlog.h | 2 +- src/include/access/bufmask.h | 4 +- src/include/access/clog.h | 2 +- src/include/access/commit_ts.h | 2 +- src/include/access/genam.h | 5 +- src/include/access/generic_xlog.h | 2 +- src/include/access/gin.h | 6 +- src/include/access/gin_private.h | 11 +- src/include/access/ginblock.h | 9 +- src/include/access/ginxlog.h | 2 +- src/include/access/gist.h | 2 +- src/include/access/gist_private.h | 4 +- src/include/access/gistscan.h | 2 +- src/include/access/gistxlog.h | 2 +- src/include/access/hash.h | 115 +- src/include/access/hash_xlog.h | 12 +- src/include/access/heapam.h | 11 +- src/include/access/heapam_xlog.h | 36 +- src/include/access/hio.h | 2 +- src/include/access/htup.h | 3 +- src/include/access/htup_details.h | 49 +- src/include/access/itup.h | 28 +- src/include/access/multixact.h | 2 +- src/include/access/nbtree.h | 157 +- src/include/access/nbtxlog.h | 18 +- src/include/access/parallel.h | 19 +- src/include/access/printsimple.h | 2 +- src/include/access/printtup.h | 6 +- src/include/access/reloptions.h | 20 +- src/include/access/relscan.h | 5 +- src/include/access/rewriteheap.h | 8 +- src/include/access/rmgrlist.h | 2 +- src/include/access/sdir.h | 2 +- src/include/access/session.h | 44 + src/include/access/skey.h | 2 +- src/include/access/slru.h | 4 +- src/include/access/spgist.h | 20 +- src/include/access/spgist_private.h | 57 +- src/include/access/spgxlog.h | 2 +- src/include/access/stratnum.h | 5 +- src/include/access/subtrans.h | 2 +- src/include/access/sysattr.h | 2 +- src/include/access/timeline.h | 2 +- src/include/access/transam.h | 2 +- src/include/access/tsmapi.h | 2 +- src/include/access/tupconvert.h | 10 +- src/include/access/tupdesc.h | 27 +- src/include/access/tupdesc_details.h | 28 + src/include/access/tupmacs.h | 2 +- src/include/access/tuptoaster.h | 2 +- src/include/access/twophase.h | 9 +- src/include/access/twophase_rmgr.h | 2 +- src/include/access/valid.h | 2 +- src/include/access/visibilitymap.h | 2 +- src/include/access/xact.h | 55 +- src/include/access/xlog.h | 14 +- src/include/access/xlog_internal.h | 84 +- src/include/access/xlogdefs.h | 2 +- src/include/access/xloginsert.h | 2 +- src/include/access/xlogreader.h | 14 +- src/include/access/xlogrecord.h | 2 +- src/include/access/xlogutils.h | 2 +- src/include/bootstrap/bootstrap.h | 4 +- src/include/c.h | 588 +- src/include/catalog/.gitignore | 2 + src/include/catalog/Makefile | 31 + src/include/catalog/binary_upgrade.h | 2 +- src/include/catalog/catalog.h | 11 +- src/include/catalog/catversion.h | 4 +- src/include/catalog/dependency.h | 17 +- src/include/catalog/duplicate_oids | 43 +- src/include/catalog/genbki.h | 34 +- src/include/catalog/heap.h | 19 +- src/include/catalog/index.h | 58 +- src/include/catalog/indexing.h | 14 +- src/include/catalog/namespace.h | 18 +- src/include/catalog/objectaccess.h | 4 +- src/include/catalog/objectaddress.h | 6 +- src/include/catalog/opfam_internal.h | 2 +- src/include/catalog/partition.h | 88 +- src/include/catalog/pg_aggregate.dat | 592 + src/include/catalog/pg_aggregate.h | 348 +- src/include/catalog/pg_am.dat | 34 + src/include/catalog/pg_am.h | 66 +- src/include/catalog/pg_amop.dat | 2386 ++ src/include/catalog/pg_amop.h | 1135 +- src/include/catalog/pg_amproc.dat | 1232 + src/include/catalog/pg_amproc.h | 511 +- src/include/catalog/pg_attrdef.h | 27 +- src/include/catalog/pg_attribute.h | 91 +- src/include/catalog/pg_auth_members.h | 26 +- src/include/catalog/pg_authid.dat | 64 + src/include/catalog/pg_authid.h | 69 +- src/include/catalog/pg_cast.dat | 509 + src/include/catalog/pg_cast.h | 365 +- src/include/catalog/pg_class.dat | 68 + src/include/catalog/pg_class.h | 86 +- src/include/catalog/pg_collation.dat | 31 + src/include/catalog/pg_collation.h | 60 +- src/include/catalog/pg_collation_fn.h | 27 - src/include/catalog/pg_constraint.h | 157 +- src/include/catalog/pg_constraint_fn.h | 82 - src/include/catalog/pg_control.h | 5 +- src/include/catalog/pg_conversion.h | 42 +- src/include/catalog/pg_conversion_fn.h | 27 - src/include/catalog/pg_database.dat | 23 + src/include/catalog/pg_database.h | 38 +- src/include/catalog/pg_db_role_setting.h | 33 +- src/include/catalog/pg_default_acl.h | 34 +- src/include/catalog/pg_depend.h | 53 +- src/include/catalog/pg_description.h | 41 +- src/include/catalog/pg_enum.h | 38 +- src/include/catalog/pg_event_trigger.h | 26 +- src/include/catalog/pg_extension.h | 33 +- src/include/catalog/pg_foreign_data_wrapper.h | 27 +- src/include/catalog/pg_foreign_server.h | 27 +- src/include/catalog/pg_foreign_table.h | 23 +- src/include/catalog/pg_index.h | 44 +- src/include/catalog/pg_inherits.h | 38 +- src/include/catalog/pg_inherits_fn.h | 27 - src/include/catalog/pg_init_privs.h | 46 +- src/include/catalog/pg_language.dat | 25 + src/include/catalog/pg_language.h | 73 +- src/include/catalog/pg_largeobject.h | 23 +- src/include/catalog/pg_largeobject_metadata.h | 23 +- src/include/catalog/pg_namespace.dat | 25 + src/include/catalog/pg_namespace.h | 41 +- src/include/catalog/pg_opclass.dat | 341 + src/include/catalog/pg_opclass.h | 225 +- src/include/catalog/pg_operator.dat | 3199 ++ src/include/catalog/pg_operator.h | 1870 +- src/include/catalog/pg_operator_fn.h | 36 - src/include/catalog/pg_opfamily.dat | 232 + src/include/catalog/pg_opfamily.h | 174 +- src/include/catalog/pg_partitioned_table.h | 31 +- src/include/catalog/pg_pltemplate.dat | 51 + src/include/catalog/pg_pltemplate.h | 43 +- src/include/catalog/pg_policy.h | 35 +- src/include/catalog/pg_proc.dat | 10041 ++++++ src/include/catalog/pg_proc.h | 5537 +--- src/include/catalog/pg_proc_fn.h | 51 - src/include/catalog/pg_publication.h | 35 +- src/include/catalog/pg_publication_rel.h | 26 +- src/include/catalog/pg_range.dat | 31 + src/include/catalog/pg_range.h | 65 +- src/include/catalog/pg_replication_origin.h | 28 +- src/include/catalog/pg_rewrite.h | 31 +- src/include/catalog/pg_seclabel.h | 26 +- src/include/catalog/pg_sequence.h | 36 +- src/include/catalog/pg_shdepend.h | 46 +- src/include/catalog/pg_shdescription.h | 40 +- src/include/catalog/pg_shseclabel.h | 26 +- src/include/catalog/pg_statistic.h | 58 +- src/include/catalog/pg_statistic_ext.h | 32 +- src/include/catalog/pg_subscription.h | 33 +- src/include/catalog/pg_subscription_rel.h | 43 +- src/include/catalog/pg_tablespace.dat | 22 + src/include/catalog/pg_tablespace.h | 31 +- src/include/catalog/pg_transform.h | 25 +- src/include/catalog/pg_trigger.h | 39 +- src/include/catalog/pg_ts_config.dat | 19 + src/include/catalog/pg_ts_config.h | 34 +- src/include/catalog/pg_ts_config_map.dat | 35 + src/include/catalog/pg_ts_config_map.h | 52 +- src/include/catalog/pg_ts_dict.dat | 20 + src/include/catalog/pg_ts_dict.h | 35 +- src/include/catalog/pg_ts_parser.dat | 20 + src/include/catalog/pg_ts_parser.h | 62 +- src/include/catalog/pg_ts_template.dat | 30 + src/include/catalog/pg_ts_template.h | 53 +- src/include/catalog/pg_type.dat | 589 + src/include/catalog/pg_type.h | 639 +- src/include/catalog/pg_type_fn.h | 84 - src/include/catalog/pg_user_mapping.h | 23 +- src/include/catalog/reformat_dat_file.pl | 316 + src/include/catalog/storage.h | 2 +- src/include/catalog/storage_xlog.h | 2 +- src/include/catalog/toasting.h | 44 +- src/include/catalog/unused_oids | 85 +- src/include/commands/alter.h | 2 +- src/include/commands/async.h | 2 +- src/include/commands/cluster.h | 5 +- src/include/commands/collationcmds.h | 2 +- src/include/commands/comment.h | 6 +- src/include/commands/conversioncmds.h | 2 +- src/include/commands/copy.h | 2 +- src/include/commands/createas.h | 2 +- src/include/commands/dbcommands.h | 2 +- src/include/commands/dbcommands_xlog.h | 2 +- src/include/commands/defrem.h | 21 +- src/include/commands/discard.h | 2 +- src/include/commands/event_trigger.h | 3 +- src/include/commands/explain.h | 24 +- src/include/commands/extension.h | 4 +- src/include/commands/lockcmds.h | 2 +- src/include/commands/matview.h | 2 +- src/include/commands/policy.h | 2 +- src/include/commands/portalcmds.h | 2 +- src/include/commands/prepare.h | 2 +- src/include/commands/progress.h | 2 +- src/include/commands/publicationcmds.h | 2 +- src/include/commands/schemacmds.h | 2 +- src/include/commands/seclabel.h | 2 +- src/include/commands/sequence.h | 2 +- src/include/commands/subscriptioncmds.h | 2 +- src/include/commands/tablecmds.h | 11 +- src/include/commands/tablespace.h | 2 +- src/include/commands/trigger.h | 39 +- src/include/commands/typecmds.h | 4 +- src/include/commands/vacuum.h | 20 +- src/include/commands/variable.h | 6 +- src/include/commands/view.h | 4 +- src/include/common/base64.h | 2 +- src/include/common/config_info.h | 2 +- src/include/common/controldata_utils.h | 2 +- src/include/common/fe_memutils.h | 2 +- src/include/common/file_perm.h | 56 + src/include/common/file_utils.h | 2 +- src/include/common/int.h | 273 + src/include/common/int128.h | 2 +- src/include/common/ip.h | 2 +- src/include/common/keywords.h | 2 +- src/include/common/link-canary.h | 17 + src/include/common/md5.h | 2 +- src/include/common/relpath.h | 18 +- src/include/common/restricted_token.h | 2 +- src/include/common/saslprep.h | 2 +- src/include/common/scram-common.h | 19 +- src/include/common/sha2.h | 2 +- src/include/common/string.h | 5 +- src/include/common/unicode_norm.h | 2 +- src/include/common/unicode_norm_table.h | 2 +- src/include/common/username.h | 2 +- src/include/datatype/timestamp.h | 2 +- src/include/executor/execExpr.h | 124 +- src/include/executor/execParallel.h | 27 +- src/include/executor/execPartition.h | 202 + src/include/executor/execdebug.h | 2 +- src/include/executor/execdesc.h | 2 +- src/include/executor/executor.h | 112 +- src/include/executor/functions.h | 4 +- src/include/executor/hashjoin.h | 187 +- src/include/executor/instrument.h | 9 +- src/include/executor/nodeAgg.h | 291 +- src/include/executor/nodeAppend.h | 7 +- src/include/executor/nodeBitmapAnd.h | 2 +- src/include/executor/nodeBitmapHeapscan.h | 6 +- src/include/executor/nodeBitmapIndexscan.h | 2 +- src/include/executor/nodeBitmapOr.h | 2 +- src/include/executor/nodeCtescan.h | 2 +- src/include/executor/nodeCustom.h | 6 +- src/include/executor/nodeForeignscan.h | 6 +- src/include/executor/nodeFunctionscan.h | 2 +- src/include/executor/nodeGather.h | 2 +- src/include/executor/nodeGatherMerge.h | 2 +- src/include/executor/nodeGroup.h | 2 +- src/include/executor/nodeHash.h | 31 +- src/include/executor/nodeHashjoin.h | 9 +- src/include/executor/nodeIndexonlyscan.h | 6 +- src/include/executor/nodeIndexscan.h | 6 +- src/include/executor/nodeLimit.h | 2 +- src/include/executor/nodeLockRows.h | 2 +- src/include/executor/nodeMaterial.h | 2 +- src/include/executor/nodeMergeAppend.h | 2 +- src/include/executor/nodeMergejoin.h | 2 +- src/include/executor/nodeModifyTable.h | 2 +- .../executor/nodeNamedtuplestorescan.h | 2 +- src/include/executor/nodeNestloop.h | 2 +- src/include/executor/nodeProjectSet.h | 2 +- src/include/executor/nodeRecursiveunion.h | 2 +- src/include/executor/nodeResult.h | 2 +- src/include/executor/nodeSamplescan.h | 2 +- src/include/executor/nodeSeqscan.h | 6 +- src/include/executor/nodeSetOp.h | 2 +- src/include/executor/nodeSort.h | 9 +- src/include/executor/nodeSubplan.h | 4 +- src/include/executor/nodeSubqueryscan.h | 2 +- src/include/executor/nodeTableFuncscan.h | 2 +- src/include/executor/nodeTidscan.h | 2 +- src/include/executor/nodeUnique.h | 2 +- src/include/executor/nodeValuesscan.h | 2 +- src/include/executor/nodeWindowAgg.h | 2 +- src/include/executor/nodeWorktablescan.h | 2 +- src/include/executor/spi.h | 11 +- src/include/executor/spi_priv.h | 18 +- src/include/executor/tablefunc.h | 12 +- src/include/executor/tqueue.h | 5 +- src/include/executor/tstoreReceiver.h | 2 +- src/include/executor/tuptable.h | 102 +- .../psql => include/fe_utils}/conditional.h | 25 +- src/include/fe_utils/connect.h | 28 + src/include/fe_utils/mbprint.h | 2 +- src/include/fe_utils/print.h | 10 +- src/include/fe_utils/psqlscan.h | 2 +- src/include/fe_utils/psqlscan_int.h | 4 +- src/include/fe_utils/simple_list.h | 2 +- src/include/fe_utils/string_utils.h | 5 +- src/include/fmgr.h | 24 +- src/include/foreign/fdwapi.h | 23 +- src/include/foreign/foreign.h | 2 +- src/include/funcapi.h | 46 +- src/include/getaddrinfo.h | 2 +- src/include/getopt_long.h | 2 +- src/include/jit/jit.h | 105 + src/include/jit/llvmjit.h | 138 + src/include/jit/llvmjit_emit.h | 211 + src/include/lib/binaryheap.h | 2 +- src/include/lib/bipartite_match.h | 2 +- src/include/lib/bloomfilter.h | 27 + src/include/lib/dshash.h | 90 + src/include/lib/hyperloglog.h | 2 +- src/include/lib/ilist.h | 2 +- src/include/lib/knapsack.h | 3 +- src/include/lib/pairingheap.h | 2 +- src/include/lib/rbtree.h | 65 +- src/include/lib/simplehash.h | 38 +- src/include/lib/stringinfo.h | 10 +- src/include/libpq/auth.h | 2 +- src/include/libpq/be-fsstubs.h | 7 +- src/include/libpq/crypt.h | 2 +- src/include/libpq/hba.h | 2 + src/include/libpq/ifaddr.h | 2 +- src/include/libpq/libpq-be.h | 84 +- src/include/libpq/libpq-fs.h | 2 +- src/include/libpq/libpq.h | 15 +- src/include/libpq/pqcomm.h | 2 +- src/include/libpq/pqformat.h | 170 +- src/include/libpq/pqmq.h | 2 +- src/include/libpq/pqsignal.h | 2 +- src/include/libpq/scram.h | 9 +- src/include/mb/pg_wchar.h | 15 +- src/include/miscadmin.h | 34 +- src/include/nodes/bitmapset.h | 5 +- src/include/nodes/execnodes.h | 406 +- src/include/nodes/extensible.h | 8 +- src/include/nodes/lockoptions.h | 2 +- src/include/nodes/makefuncs.h | 4 +- src/include/nodes/memnodes.h | 22 +- src/include/nodes/nodeFuncs.h | 2 +- src/include/nodes/nodes.h | 19 +- src/include/nodes/params.h | 86 +- src/include/nodes/parsenodes.h | 220 +- src/include/nodes/pg_list.h | 5 +- src/include/nodes/plannodes.h | 217 +- src/include/nodes/primnodes.h | 58 +- src/include/nodes/print.h | 2 +- src/include/nodes/readfuncs.h | 15 +- src/include/nodes/relation.h | 325 +- src/include/nodes/replnodes.h | 3 +- src/include/nodes/tidbitmap.h | 3 +- src/include/nodes/value.h | 8 +- src/include/optimizer/clauses.h | 7 +- src/include/optimizer/cost.h | 44 +- src/include/optimizer/geqo.h | 2 +- src/include/optimizer/geqo_copy.h | 2 +- src/include/optimizer/geqo_gene.h | 2 +- src/include/optimizer/geqo_misc.h | 2 +- src/include/optimizer/geqo_mutation.h | 2 +- src/include/optimizer/geqo_pool.h | 2 +- src/include/optimizer/geqo_random.h | 2 +- src/include/optimizer/geqo_recombination.h | 2 +- src/include/optimizer/geqo_selection.h | 2 +- src/include/optimizer/joininfo.h | 2 +- src/include/optimizer/orclauses.h | 2 +- src/include/optimizer/pathnode.h | 29 +- src/include/optimizer/paths.h | 36 +- src/include/optimizer/placeholder.h | 4 +- src/include/optimizer/plancat.h | 2 +- src/include/optimizer/planmain.h | 5 +- src/include/optimizer/planner.h | 10 +- src/include/optimizer/predtest.h | 6 +- src/include/optimizer/prep.h | 15 +- src/include/optimizer/restrictinfo.h | 3 +- src/include/optimizer/subselect.h | 2 +- src/include/optimizer/tlist.h | 2 +- src/include/optimizer/var.h | 2 +- src/include/parser/analyze.h | 5 +- src/include/parser/gramparse.h | 2 +- src/include/parser/kwlist.h | 10 +- src/include/parser/parse_agg.h | 2 +- src/include/parser/parse_clause.h | 2 +- src/include/parser/parse_coerce.h | 15 +- src/include/parser/parse_collate.h | 2 +- src/include/parser/parse_cte.h | 2 +- src/include/parser/parse_enr.h | 2 +- src/include/parser/parse_expr.h | 2 +- src/include/parser/parse_func.h | 10 +- src/include/parser/parse_node.h | 8 +- src/include/parser/parse_oper.h | 2 +- src/include/parser/parse_param.h | 2 +- src/include/parser/parse_relation.h | 11 +- src/include/parser/parse_target.h | 4 +- src/include/parser/parse_type.h | 6 +- src/include/parser/parse_utilcmd.h | 6 +- src/include/parser/parser.h | 2 +- src/include/parser/parsetree.h | 12 +- src/include/parser/scanner.h | 2 +- src/include/parser/scansup.h | 2 +- src/include/partitioning/partbounds.h | 150 + src/include/partitioning/partdefs.h | 24 + src/include/partitioning/partprune.h | 83 + src/include/pg_config.h.in | 183 +- src/include/pg_config.h.win32 | 192 +- src/include/pg_config_manual.h | 29 +- src/include/pg_getopt.h | 2 +- src/include/pg_trace.h | 2 +- src/include/pgstat.h | 37 +- src/include/pgtar.h | 2 +- src/include/pgtime.h | 4 +- src/include/port.h | 172 +- src/include/port/atomics.h | 10 +- src/include/port/atomics/arch-arm.h | 2 +- src/include/port/atomics/arch-hppa.h | 2 +- src/include/port/atomics/arch-ia64.h | 2 +- src/include/port/atomics/arch-ppc.h | 2 +- src/include/port/atomics/arch-x86.h | 2 +- src/include/port/atomics/fallback.h | 15 +- src/include/port/atomics/generic-acc.h | 2 +- src/include/port/atomics/generic-gcc.h | 60 +- src/include/port/atomics/generic-msvc.h | 2 +- src/include/port/atomics/generic-sunpro.h | 2 +- src/include/port/atomics/generic-xlc.h | 2 +- src/include/port/atomics/generic.h | 88 +- src/include/port/pg_bswap.h | 134 +- src/include/port/pg_crc32c.h | 30 +- src/include/port/win32.h | 433 +- src/include/port/win32_port.h | 520 + src/include/portability/instr_time.h | 2 +- src/include/portability/mem.h | 2 +- src/include/postgres.h | 110 +- src/include/postgres_fe.h | 2 +- src/include/postmaster/autovacuum.h | 4 +- src/include/postmaster/bgworker.h | 19 +- src/include/postmaster/bgworker_internals.h | 2 +- src/include/postmaster/bgwriter.h | 2 +- src/include/postmaster/fork_process.h | 2 +- src/include/postmaster/pgarch.h | 2 +- src/include/postmaster/postmaster.h | 7 +- src/include/postmaster/startup.h | 2 +- src/include/postmaster/syslogger.h | 5 +- src/include/postmaster/walwriter.h | 2 +- src/include/regex/regexport.h | 2 +- src/include/replication/basebackup.h | 2 +- src/include/replication/decode.h | 2 +- src/include/replication/logical.h | 10 +- src/include/replication/logicalfuncs.h | 2 +- src/include/replication/logicallauncher.h | 3 +- src/include/replication/logicalproto.h | 13 +- src/include/replication/logicalrelation.h | 4 +- src/include/replication/logicalworker.h | 2 +- src/include/replication/message.h | 2 +- src/include/replication/origin.h | 2 +- src/include/replication/output_plugin.h | 13 +- src/include/replication/pgoutput.h | 2 +- src/include/replication/reorderbuffer.h | 73 +- src/include/replication/slot.h | 23 +- src/include/replication/snapbuild.h | 2 +- src/include/replication/syncrep.h | 2 +- src/include/replication/walreceiver.h | 33 +- src/include/replication/walsender.h | 2 +- src/include/replication/walsender_private.h | 2 +- src/include/replication/worker_internal.h | 4 +- src/include/rewrite/prs2lock.h | 2 +- src/include/rewrite/rewriteDefine.h | 4 +- src/include/rewrite/rewriteHandler.h | 5 +- src/include/rewrite/rewriteManip.h | 2 +- src/include/rewrite/rewriteRemove.h | 2 +- src/include/rewrite/rewriteSupport.h | 2 +- src/include/rewrite/rowsecurity.h | 2 +- src/include/rusagestub.h | 2 +- src/include/snowball/header.h | 2 +- src/include/snowball/libstemmer/header.h | 2 + .../libstemmer/stem_ISO_8859_1_danish.h | 4 +- .../libstemmer/stem_ISO_8859_1_dutch.h | 4 +- .../libstemmer/stem_ISO_8859_1_english.h | 4 +- .../libstemmer/stem_ISO_8859_1_finnish.h | 4 +- .../libstemmer/stem_ISO_8859_1_french.h | 4 +- .../libstemmer/stem_ISO_8859_1_german.h | 4 +- .../libstemmer/stem_ISO_8859_1_hungarian.h | 16 - .../libstemmer/stem_ISO_8859_1_indonesian.h | 16 + .../libstemmer/stem_ISO_8859_1_irish.h | 16 + .../libstemmer/stem_ISO_8859_1_italian.h | 4 +- .../libstemmer/stem_ISO_8859_1_norwegian.h | 4 +- .../libstemmer/stem_ISO_8859_1_porter.h | 4 +- .../libstemmer/stem_ISO_8859_1_portuguese.h | 4 +- .../libstemmer/stem_ISO_8859_1_spanish.h | 4 +- .../libstemmer/stem_ISO_8859_1_swedish.h | 4 +- .../libstemmer/stem_ISO_8859_2_hungarian.h | 16 + .../libstemmer/stem_ISO_8859_2_romanian.h | 4 +- .../snowball/libstemmer/stem_KOI8_R_russian.h | 4 +- .../snowball/libstemmer/stem_UTF_8_arabic.h | 16 + .../snowball/libstemmer/stem_UTF_8_danish.h | 4 +- .../snowball/libstemmer/stem_UTF_8_dutch.h | 4 +- .../snowball/libstemmer/stem_UTF_8_english.h | 4 +- .../snowball/libstemmer/stem_UTF_8_finnish.h | 4 +- .../snowball/libstemmer/stem_UTF_8_french.h | 4 +- .../snowball/libstemmer/stem_UTF_8_german.h | 4 +- .../libstemmer/stem_UTF_8_hungarian.h | 4 +- .../libstemmer/stem_UTF_8_indonesian.h | 16 + .../snowball/libstemmer/stem_UTF_8_irish.h | 16 + .../snowball/libstemmer/stem_UTF_8_italian.h | 4 +- .../libstemmer/stem_UTF_8_lithuanian.h | 16 + .../snowball/libstemmer/stem_UTF_8_nepali.h | 16 + .../libstemmer/stem_UTF_8_norwegian.h | 4 +- .../snowball/libstemmer/stem_UTF_8_porter.h | 4 +- .../libstemmer/stem_UTF_8_portuguese.h | 4 +- .../snowball/libstemmer/stem_UTF_8_romanian.h | 4 +- .../snowball/libstemmer/stem_UTF_8_russian.h | 4 +- .../snowball/libstemmer/stem_UTF_8_spanish.h | 4 +- .../snowball/libstemmer/stem_UTF_8_swedish.h | 4 +- .../snowball/libstemmer/stem_UTF_8_tamil.h | 16 + .../snowball/libstemmer/stem_UTF_8_turkish.h | 4 +- .../statistics/extended_stats_internal.h | 2 +- src/include/statistics/statistics.h | 2 +- src/include/storage/backendid.h | 2 +- src/include/storage/barrier.h | 45 + src/include/storage/block.h | 4 +- src/include/storage/buf.h | 2 +- src/include/storage/buf_internals.h | 3 +- src/include/storage/buffile.h | 13 +- src/include/storage/bufmgr.h | 2 +- src/include/storage/bufpage.h | 2 +- src/include/storage/checksum.h | 2 +- src/include/storage/checksum_impl.h | 40 +- src/include/storage/condition_variable.h | 25 +- src/include/storage/copydir.h | 2 +- src/include/storage/dsm.h | 6 +- src/include/storage/dsm_impl.h | 7 +- src/include/storage/fd.h | 50 +- src/include/storage/freespace.h | 8 +- src/include/storage/fsm_internals.h | 2 +- src/include/storage/indexfsm.h | 2 +- src/include/storage/ipc.h | 3 +- src/include/storage/item.h | 2 +- src/include/storage/itemid.h | 2 +- src/include/storage/itemptr.h | 43 +- src/include/storage/large_object.h | 15 +- src/include/storage/latch.h | 4 +- src/include/storage/lmgr.h | 4 +- src/include/storage/lock.h | 13 +- src/include/storage/lockdefs.h | 10 +- src/include/storage/lwlock.h | 12 +- src/include/storage/off.h | 2 +- src/include/storage/pg_sema.h | 2 +- src/include/storage/pg_shmem.h | 2 +- src/include/storage/pmsignal.h | 42 +- src/include/storage/predicate.h | 2 +- src/include/storage/predicate_internals.h | 2 +- src/include/storage/proc.h | 23 +- src/include/storage/procarray.h | 2 +- src/include/storage/proclist.h | 57 +- src/include/storage/proclist_types.h | 14 +- src/include/storage/procsignal.h | 2 +- src/include/storage/reinit.h | 7 +- src/include/storage/relfilenode.h | 2 +- src/include/storage/s_lock.h | 28 +- src/include/storage/sharedfileset.h | 45 + src/include/storage/shm_mq.h | 6 +- src/include/storage/shm_toc.h | 2 +- src/include/storage/shmem.h | 2 +- src/include/storage/sinval.h | 2 +- src/include/storage/sinvaladt.h | 2 +- src/include/storage/smgr.h | 3 +- src/include/storage/spin.h | 4 +- src/include/storage/standby.h | 4 +- src/include/storage/standbydefs.h | 4 +- src/include/tcop/deparse_utility.h | 7 +- src/include/tcop/dest.h | 5 +- src/include/tcop/fastpath.h | 2 +- src/include/tcop/pquery.h | 2 +- src/include/tcop/tcopprot.h | 4 +- src/include/tcop/utility.h | 4 +- src/include/tsearch/dicts/regis.h | 2 +- src/include/tsearch/dicts/spell.h | 2 +- src/include/tsearch/ts_cache.h | 2 +- src/include/tsearch/ts_locale.h | 22 +- src/include/tsearch/ts_public.h | 2 +- src/include/tsearch/ts_type.h | 2 +- src/include/tsearch/ts_utils.h | 30 +- src/include/utils/.gitignore | 1 + src/include/utils/acl.h | 59 +- src/include/utils/aclchk_internal.h | 6 +- src/include/utils/array.h | 15 +- src/include/utils/arrayaccess.h | 2 +- src/include/utils/ascii.h | 2 +- src/include/utils/attoptcache.h | 2 +- src/include/utils/backend_random.h | 2 +- src/include/utils/builtins.h | 35 +- src/include/utils/bytea.h | 2 +- src/include/utils/catcache.h | 126 +- src/include/utils/combocid.h | 2 +- src/include/utils/date.h | 7 +- src/include/utils/datetime.h | 4 +- src/include/utils/datum.h | 2 +- src/include/utils/dsa.h | 12 +- src/include/utils/dynahash.h | 2 +- src/include/utils/dynamic_loader.h | 25 - src/include/utils/elog.h | 31 +- src/include/utils/evtcache.h | 6 +- src/include/utils/expandeddatum.h | 2 +- src/include/utils/expandedrecord.h | 231 + src/include/utils/float.h | 376 + src/include/utils/fmgrtab.h | 15 +- src/include/utils/formatting.h | 2 +- src/include/utils/freepage.h | 2 +- src/include/utils/geo_decls.h | 24 +- src/include/utils/guc.h | 15 +- src/include/utils/guc_tables.h | 5 +- src/include/utils/hashutils.h | 53 + src/include/utils/help_config.h | 2 +- src/include/utils/hsearch.h | 2 +- src/include/utils/index_selfuncs.h | 2 +- src/include/utils/inet.h | 2 +- src/include/utils/int8.h | 2 +- src/include/utils/inval.h | 2 +- src/include/utils/json.h | 2 +- src/include/utils/jsonapi.h | 30 +- src/include/utils/jsonb.h | 12 +- src/include/utils/logtape.h | 38 +- src/include/utils/lsyscache.h | 12 +- src/include/utils/memdebug.h | 2 +- src/include/utils/memutils.h | 44 +- src/include/utils/nabstime.h | 103 - src/include/utils/numeric.h | 2 +- src/include/utils/palloc.h | 2 +- src/include/utils/partcache.h | 96 + src/include/utils/pg_crc.h | 2 +- src/include/utils/pg_locale.h | 4 +- src/include/utils/pg_lsn.h | 2 +- src/include/utils/pg_rusage.h | 2 +- src/include/utils/pidfile.h | 2 +- src/include/utils/plancache.h | 13 +- src/include/utils/portal.h | 14 +- src/include/utils/queryenvironment.h | 2 +- src/include/utils/rangetypes.h | 14 +- src/include/utils/regproc.h | 2 +- src/include/utils/rel.h | 118 +- src/include/utils/relcache.h | 10 +- src/include/utils/relfilenodemap.h | 2 +- src/include/utils/relmapper.h | 8 +- src/include/utils/relptr.h | 2 +- src/include/utils/reltrigger.h | 2 +- src/include/utils/resowner.h | 5 +- src/include/utils/resowner_private.h | 9 +- src/include/utils/rls.h | 2 +- src/include/utils/ruleutils.h | 2 +- src/include/utils/sampling.h | 2 +- src/include/utils/selfuncs.h | 11 +- src/include/utils/sharedtuplestore.h | 61 + src/include/utils/snapmgr.h | 8 +- src/include/utils/snapshot.h | 6 +- src/include/utils/sortsupport.h | 13 +- src/include/utils/spccache.h | 2 +- src/include/utils/syscache.h | 38 +- src/include/utils/timeout.h | 2 +- src/include/utils/timestamp.h | 2 +- src/include/utils/tqual.h | 13 +- src/include/utils/tuplesort.h | 168 +- src/include/utils/tuplestore.h | 2 +- src/include/utils/typcache.h | 40 +- src/include/utils/tzparser.h | 2 +- src/include/utils/uuid.h | 2 +- src/include/utils/varbit.h | 2 +- src/include/utils/varlena.h | 6 +- src/include/utils/xml.h | 8 +- src/include/windowapi.h | 2 +- src/interfaces/ecpg/compatlib/.gitignore | 1 - src/interfaces/ecpg/compatlib/Makefile | 16 +- src/interfaces/ecpg/compatlib/informix.c | 30 +- src/interfaces/ecpg/ecpglib/.gitignore | 7 - src/interfaces/ecpg/ecpglib/Makefile | 29 +- src/interfaces/ecpg/ecpglib/connect.c | 12 +- src/interfaces/ecpg/ecpglib/data.c | 140 +- src/interfaces/ecpg/ecpglib/descriptor.c | 49 +- src/interfaces/ecpg/ecpglib/error.c | 16 +- src/interfaces/ecpg/ecpglib/execute.c | 131 +- src/interfaces/ecpg/ecpglib/extern.h | 3 +- src/interfaces/ecpg/ecpglib/memory.c | 6 +- src/interfaces/ecpg/ecpglib/misc.c | 48 +- src/interfaces/ecpg/ecpglib/nls.mk | 2 +- src/interfaces/ecpg/ecpglib/pg_type.h | 79 - src/interfaces/ecpg/ecpglib/po/it.po | 115 +- src/interfaces/ecpg/ecpglib/po/ko.po | 4 +- src/interfaces/ecpg/ecpglib/po/ru.po | 5 +- src/interfaces/ecpg/ecpglib/po/sv.po | 198 + src/interfaces/ecpg/ecpglib/po/vi.po | 200 + src/interfaces/ecpg/ecpglib/prepare.c | 167 +- src/interfaces/ecpg/ecpglib/sqlda.c | 3 +- src/interfaces/ecpg/ecpglib/typename.c | 3 +- src/interfaces/ecpg/include/Makefile | 2 +- src/interfaces/ecpg/include/ecpg_informix.h | 12 +- src/interfaces/ecpg/include/ecpglib.h | 2 +- src/interfaces/ecpg/include/pgtypes.h | 17 + src/interfaces/ecpg/include/pgtypes_date.h | 3 +- .../ecpg/include/pgtypes_interval.h | 1 + src/interfaces/ecpg/include/pgtypes_numeric.h | 2 + .../ecpg/include/pgtypes_timestamp.h | 3 +- src/interfaces/ecpg/include/sqlda-compat.h | 2 +- src/interfaces/ecpg/pgtypeslib/.gitignore | 3 - src/interfaces/ecpg/pgtypeslib/Makefile | 21 +- src/interfaces/ecpg/pgtypeslib/common.c | 22 +- src/interfaces/ecpg/pgtypeslib/datetime.c | 7 +- src/interfaces/ecpg/pgtypeslib/dt.h | 8 +- src/interfaces/ecpg/pgtypeslib/dt_common.c | 57 +- src/interfaces/ecpg/pgtypeslib/exports.txt | 1 + src/interfaces/ecpg/pgtypeslib/interval.c | 56 +- src/interfaces/ecpg/pgtypeslib/numeric.c | 20 +- src/interfaces/ecpg/pgtypeslib/timestamp.c | 21 +- src/interfaces/ecpg/preproc/Makefile | 9 +- src/interfaces/ecpg/preproc/check_rules.pl | 6 +- src/interfaces/ecpg/preproc/ecpg.c | 26 +- src/interfaces/ecpg/preproc/ecpg.header | 12 +- src/interfaces/ecpg/preproc/ecpg.tokens | 2 +- src/interfaces/ecpg/preproc/ecpg.trailer | 11 +- src/interfaces/ecpg/preproc/ecpg_keywords.c | 1 - src/interfaces/ecpg/preproc/extern.h | 4 +- src/interfaces/ecpg/preproc/keywords.c | 2 +- src/interfaces/ecpg/preproc/nls.mk | 2 +- src/interfaces/ecpg/preproc/output.c | 6 + src/interfaces/ecpg/preproc/parse.pl | 48 +- src/interfaces/ecpg/preproc/parser.c | 2 +- src/interfaces/ecpg/preproc/pgc.l | 188 +- src/interfaces/ecpg/preproc/po/de.po | 162 +- src/interfaces/ecpg/preproc/po/fr.po | 124 +- src/interfaces/ecpg/preproc/po/it.po | 23 +- src/interfaces/ecpg/preproc/po/ko.po | 164 +- src/interfaces/ecpg/preproc/po/pt_BR.po | 6 +- src/interfaces/ecpg/preproc/po/ru.po | 104 +- src/interfaces/ecpg/preproc/po/sv.po | 664 + src/interfaces/ecpg/preproc/po/vi.po | 684 + src/interfaces/ecpg/preproc/type.c | 98 +- src/interfaces/ecpg/preproc/type.h | 2 +- src/interfaces/ecpg/preproc/variable.c | 36 +- src/interfaces/ecpg/test/Makefile | 12 +- src/interfaces/ecpg/test/Makefile.regress | 6 +- .../ecpg/test/compat_informix/Makefile | 3 +- .../ecpg/test/compat_informix/dec_test.pgc | 15 +- .../ecpg/test/compat_informix/describe.pgc | 2 +- .../ecpg/test/compat_informix/rfmtdate.pgc | 8 +- .../ecpg/test/compat_informix/rfmtlong.pgc | 4 +- .../ecpg/test/compat_informix/sqlda.pgc | 4 +- .../test/compat_informix/test_informix2.pgc | 2 +- .../ecpg/test/compat_oracle/.gitignore | 2 + .../ecpg/test/compat_oracle/Makefile | 11 + .../ecpg/test/compat_oracle/char_array.pgc | 64 + src/interfaces/ecpg/test/connect/test1.pgc | 4 +- src/interfaces/ecpg/test/connect/test2.pgc | 2 +- src/interfaces/ecpg/test/connect/test3.pgc | 2 +- src/interfaces/ecpg/test/connect/test4.pgc | 2 +- src/interfaces/ecpg/test/connect/test5.pgc | 2 +- src/interfaces/ecpg/test/ecpg_schedule | 2 + src/interfaces/ecpg/test/ecpg_schedule_tcp | 55 - .../compat_informix-dec_test-MinGW32.stdout | 1293 - .../test/expected/compat_informix-dec_test.c | 36 +- .../test/expected/compat_informix-describe.c | 2 +- .../test/expected/compat_informix-rfmtdate.c | 8 +- .../test/expected/compat_informix-rfmtlong.c | 4 +- .../test/expected/compat_informix-sqlda.c | 4 +- .../expected/compat_informix-test_informix2.c | 2 +- .../test/expected/compat_oracle-char_array.c | 223 + .../expected/compat_oracle-char_array.stderr | 139 + .../expected/compat_oracle-char_array.stdout | 10 + .../ecpg/test/expected/connect-test1.c | 4 +- .../ecpg/test/expected/connect-test1.stderr | 7 +- .../ecpg/test/expected/connect-test2.c | 2 +- .../ecpg/test/expected/connect-test3.c | 2 +- .../ecpg/test/expected/connect-test4.c | 2 +- .../ecpg/test/expected/connect-test5.c | 2 +- .../ecpg/test/expected/pgtypeslib-dt_test.c | 108 +- .../ecpg/test/expected/pgtypeslib-dt_test2.c | 14 +- .../ecpg/test/expected/pgtypeslib-nan_test.c | 2 +- .../pgtypeslib-num_test-MinGW32.stdout | 6 - .../ecpg/test/expected/pgtypeslib-num_test.c | 82 +- .../test/expected/pgtypeslib-num_test.stderr | 26 +- .../test/expected/pgtypeslib-num_test.stdout | 2 +- .../pgtypeslib-num_test2-MinGW32.stdout | 1117 - .../ecpg/test/expected/pgtypeslib-num_test2.c | 64 +- .../test/expected/preproc-array_of_struct.c | 2 +- .../test/expected/preproc-autoprep.stderr | 10 +- .../ecpg/test/expected/preproc-cursor.c | 2 +- .../ecpg/test/expected/preproc-define.c | 2 +- .../ecpg/test/expected/preproc-describe.c | 2 +- .../ecpg/test/expected/preproc-init.c | 2 +- .../ecpg/test/expected/preproc-outofscope.c | 6 +- .../test/expected/preproc-pointer_to_struct.c | 2 +- .../ecpg/test/expected/preproc-strings.c | 2 +- .../ecpg/test/expected/preproc-variable.c | 2 +- .../ecpg/test/expected/preproc-whenever.c | 2 +- .../expected/preproc-whenever_do_continue.c | 161 + .../preproc-whenever_do_continue.stderr | 112 + .../preproc-whenever_do_continue.stdout | 2 + src/interfaces/ecpg/test/expected/sql-array.c | 2 +- .../ecpg/test/expected/sql-describe.c | 2 +- .../ecpg/test/expected/sql-execute.c | 2 +- .../ecpg/test/expected/sql-oldexec.c | 2 +- src/interfaces/ecpg/test/expected/sql-sqlda.c | 152 +- .../ecpg/test/expected/sql-sqlda.stderr | 326 +- .../ecpg/test/expected/sql-sqlda.stdout | 8 + .../ecpg/test/expected/sql-twophase.c | 2 +- .../ecpg/test/expected/thread-alloc.c | 41 +- .../ecpg/test/expected/thread-descriptor.c | 21 +- .../ecpg/test/expected/thread-prep.c | 69 +- .../ecpg/test/expected/thread-thread.c | 70 +- .../test/expected/thread-thread_implicit.c | 70 +- .../ecpg/test/performance/perftest.pgc | 2 +- src/interfaces/ecpg/test/pg_regress_ecpg.c | 2 +- .../ecpg/test/pgtypeslib/dt_test.pgc | 108 +- .../ecpg/test/pgtypeslib/dt_test2.pgc | 14 +- .../ecpg/test/pgtypeslib/nan_test.pgc | 2 +- .../ecpg/test/pgtypeslib/num_test.pgc | 23 +- .../ecpg/test/pgtypeslib/num_test2.pgc | 39 +- src/interfaces/ecpg/test/preproc/.gitignore | 2 + src/interfaces/ecpg/test/preproc/Makefile | 1 + .../ecpg/test/preproc/array_of_struct.pgc | 2 +- src/interfaces/ecpg/test/preproc/cursor.pgc | 2 +- src/interfaces/ecpg/test/preproc/define.pgc | 2 +- src/interfaces/ecpg/test/preproc/init.pgc | 2 +- .../ecpg/test/preproc/outofscope.pgc | 4 +- .../ecpg/test/preproc/pointer_to_struct.pgc | 2 +- src/interfaces/ecpg/test/preproc/strings.pgc | 2 +- src/interfaces/ecpg/test/preproc/variable.pgc | 2 +- src/interfaces/ecpg/test/preproc/whenever.pgc | 2 +- .../test/preproc/whenever_do_continue.pgc | 63 + src/interfaces/ecpg/test/printf_hack.h | 29 + src/interfaces/ecpg/test/resultmap | 12 - src/interfaces/ecpg/test/sql/array.pgc | 2 +- src/interfaces/ecpg/test/sql/describe.pgc | 2 +- src/interfaces/ecpg/test/sql/execute.pgc | 2 +- src/interfaces/ecpg/test/sql/oldexec.pgc | 2 +- src/interfaces/ecpg/test/sql/sqlda.pgc | 26 +- src/interfaces/ecpg/test/sql/twophase.pgc | 2 +- src/interfaces/ecpg/test/thread/alloc.pgc | 7 + .../ecpg/test/thread/descriptor.pgc | 7 + src/interfaces/ecpg/test/thread/prep.pgc | 7 + src/interfaces/ecpg/test/thread/thread.pgc | 16 +- .../ecpg/test/thread/thread_implicit.pgc | 16 +- src/interfaces/libpq/.gitignore | 28 - src/interfaces/libpq/Makefile | 58 +- src/interfaces/libpq/exports.txt | 3 +- src/interfaces/libpq/fe-auth-scram.c | 264 +- src/interfaces/libpq/fe-auth.c | 98 +- src/interfaces/libpq/fe-auth.h | 8 +- src/interfaces/libpq/fe-connect.c | 1074 +- src/interfaces/libpq/fe-exec.c | 192 +- src/interfaces/libpq/fe-lobj.c | 27 +- src/interfaces/libpq/fe-misc.c | 20 +- src/interfaces/libpq/fe-print.c | 9 +- src/interfaces/libpq/fe-protocol2.c | 31 +- src/interfaces/libpq/fe-protocol3.c | 31 +- src/interfaces/libpq/fe-secure-common.c | 211 + src/interfaces/libpq/fe-secure-common.h | 26 + src/interfaces/libpq/fe-secure-openssl.c | 385 +- src/interfaces/libpq/fe-secure.c | 27 +- src/interfaces/libpq/libpq-events.c | 36 +- src/interfaces/libpq/libpq-events.h | 2 +- src/interfaces/libpq/libpq-fe.h | 3 +- src/interfaces/libpq/libpq-int.h | 128 +- src/interfaces/libpq/libpq.rc.in | 10 +- src/interfaces/libpq/nls.mk | 2 +- src/interfaces/libpq/po/de.po | 586 +- src/interfaces/libpq/po/es.po | 315 +- src/interfaces/libpq/po/fr.po | 285 +- src/interfaces/libpq/po/it.po | 459 +- src/interfaces/libpq/po/ko.po | 596 +- src/interfaces/libpq/po/ru.po | 529 +- src/interfaces/libpq/po/sv.po | 658 +- src/interfaces/libpq/po/tr.po | 999 +- src/interfaces/libpq/pqexpbuffer.c | 77 +- src/interfaces/libpq/pqexpbuffer.h | 2 +- src/interfaces/libpq/pthread-win32.c | 2 +- src/interfaces/libpq/test/Makefile | 4 +- src/interfaces/libpq/test/uri-regress.c | 2 +- src/interfaces/libpq/win32.c | 2 +- src/makefiles/pgxs.mk | 139 +- src/pl/plperl/GNUmakefile | 20 +- src/pl/plperl/SPI.xs | 9 + src/pl/plperl/expected/plperl.out | 97 +- src/pl/plperl/expected/plperl_call.out | 54 + src/pl/plperl/expected/plperl_transaction.out | 196 + src/pl/plperl/expected/plperl_util.out | 11 +- src/pl/plperl/nls.mk | 2 +- src/pl/plperl/plc_perlboot.pl | 7 +- src/pl/plperl/plc_trusted.pl | 5 +- src/pl/plperl/plperl.c | 226 +- src/pl/plperl/plperl.h | 112 +- src/pl/plperl/plperl_opmask.pl | 10 +- src/pl/plperl/po/it.po | 22 +- src/pl/plperl/po/ja.po | 215 +- src/pl/plperl/po/ko.po | 100 +- src/pl/plperl/po/ru.po | 91 +- src/pl/plperl/po/tr.po | 159 +- src/pl/plperl/po/vi.po | 242 + src/pl/plperl/sql/plperl.sql | 51 +- src/pl/plperl/sql/plperl_call.sql | 58 + src/pl/plperl/sql/plperl_transaction.sql | 163 + src/pl/plperl/sql/plperl_util.sql | 9 + src/pl/plperl/text2macro.pl | 3 +- src/pl/plpgsql/src/.gitignore | 3 + src/pl/plpgsql/src/Makefile | 24 +- src/pl/plpgsql/src/expected/plpgsql_cache.out | 67 + .../plpgsql/src/expected/plpgsql_cache_1.out | 72 + src/pl/plpgsql/src/expected/plpgsql_call.out | 340 + .../plpgsql/src/expected/plpgsql_control.out | 672 + .../plpgsql/src/expected/plpgsql_domain.out | 397 + .../plpgsql/src/expected/plpgsql_record.out | 656 + .../src/expected/plpgsql_transaction.out | 528 + .../plpgsql/src/expected/plpgsql_varprops.out | 298 + src/pl/plpgsql/src/generate-plerrcodes.pl | 2 +- src/pl/plpgsql/src/nls.mk | 2 +- src/pl/plpgsql/src/pl_comp.c | 512 +- src/pl/plpgsql/src/pl_exec.c | 3932 ++- src/pl/plpgsql/src/pl_funcs.c | 164 +- src/pl/plpgsql/src/pl_gram.y | 346 +- src/pl/plpgsql/src/pl_handler.c | 24 +- src/pl/plpgsql/src/pl_scanner.c | 8 +- src/pl/plpgsql/src/plpgsql.h | 316 +- src/pl/plpgsql/src/po/de.po | 424 +- src/pl/plpgsql/src/po/it.po | 24 +- src/pl/plpgsql/src/po/ja.po | 655 +- src/pl/plpgsql/src/po/ko.po | 362 +- src/pl/plpgsql/src/po/ru.po | 171 +- src/pl/plpgsql/src/po/sv.po | 411 +- src/pl/plpgsql/src/po/tr.po | 887 + src/pl/plpgsql/src/po/vi.po | 850 + src/pl/plpgsql/src/sql/plpgsql_cache.sql | 50 + src/pl/plpgsql/src/sql/plpgsql_call.sql | 317 + src/pl/plpgsql/src/sql/plpgsql_control.sql | 476 + src/pl/plpgsql/src/sql/plpgsql_domain.sql | 279 + src/pl/plpgsql/src/sql/plpgsql_record.sql | 443 + .../plpgsql/src/sql/plpgsql_transaction.sql | 450 + src/pl/plpgsql/src/sql/plpgsql_varprops.sql | 247 + src/pl/plpython/Makefile | 26 +- src/pl/plpython/expected/plpython_call.out | 58 + src/pl/plpython/expected/plpython_error.out | 23 + src/pl/plpython/expected/plpython_error_0.out | 23 + src/pl/plpython/expected/plpython_error_5.out | 23 + .../expected/plpython_subtransaction.out | 10 +- .../expected/plpython_subtransaction_0.out | 4 +- .../expected/plpython_subtransaction_5.out | 4 +- src/pl/plpython/expected/plpython_test.out | 4 +- .../expected/plpython_transaction.out | 195 + src/pl/plpython/expected/plpython_types.out | 130 +- src/pl/plpython/expected/plpython_types_3.out | 130 +- src/pl/plpython/generate-spiexceptions.pl | 2 +- src/pl/plpython/nls.mk | 2 +- src/pl/plpython/plpy_cursorobject.c | 143 +- src/pl/plpython/plpy_cursorobject.h | 2 +- src/pl/plpython/plpy_elog.c | 4 +- src/pl/plpython/plpy_elog.h | 24 +- src/pl/plpython/plpy_exec.c | 211 +- src/pl/plpython/plpy_main.c | 90 +- src/pl/plpython/plpy_planobject.c | 37 +- src/pl/plpython/plpy_planobject.h | 2 +- src/pl/plpython/plpy_plpymodule.c | 72 +- src/pl/plpython/plpy_procedure.c | 159 +- src/pl/plpython/plpy_procedure.h | 11 +- src/pl/plpython/plpy_resultobject.c | 104 +- src/pl/plpython/plpy_spi.c | 112 +- src/pl/plpython/plpy_spi.h | 1 - src/pl/plpython/plpy_subxactobject.c | 40 +- src/pl/plpython/plpy_typeio.c | 1221 +- src/pl/plpython/plpy_typeio.h | 220 +- src/pl/plpython/plpy_util.c | 3 +- src/pl/plpython/plpython.h | 56 +- src/pl/plpython/po/de.po | 228 +- src/pl/plpython/po/fr.po | 146 +- src/pl/plpython/po/it.po | 79 +- src/pl/plpython/po/ja.po | 443 +- src/pl/plpython/po/ko.po | 201 +- src/pl/plpython/po/ru.po | 105 +- src/pl/plpython/po/sv.po | 222 +- src/pl/plpython/po/tr.po | 534 + src/pl/plpython/po/vi.po | 485 + src/pl/plpython/sql/plpython_call.sql | 61 + src/pl/plpython/sql/plpython_error.sql | 16 + .../plpython/sql/plpython_subtransaction.sql | 2 +- src/pl/plpython/sql/plpython_transaction.sql | 152 + src/pl/plpython/sql/plpython_types.sql | 91 + src/pl/tcl/Makefile | 2 +- src/pl/tcl/expected/pltcl_call.out | 55 + src/pl/tcl/expected/pltcl_queries.out | 94 + src/pl/tcl/expected/pltcl_subxact.out | 6 +- src/pl/tcl/expected/pltcl_transaction.out | 100 + src/pl/tcl/generate-pltclerrcodes.pl | 2 +- src/pl/tcl/nls.mk | 2 +- src/pl/tcl/pltcl.c | 213 +- src/pl/tcl/po/it.po | 21 +- src/pl/tcl/po/ja.po | 104 +- src/pl/tcl/po/ko.po | 93 +- src/pl/tcl/po/ru.po | 25 +- src/pl/tcl/po/tr.po | 90 +- src/pl/tcl/po/vi.po | 107 + src/pl/tcl/sql/pltcl_call.sql | 59 + src/pl/tcl/sql/pltcl_queries.sql | 43 + src/pl/tcl/sql/pltcl_transaction.sql | 98 + src/port/.gitignore | 1 + src/port/Makefile | 68 +- src/port/README | 2 +- src/port/chklocale.c | 2 +- src/port/dirent.c | 2 +- src/port/dirmod.c | 2 +- src/port/dlopen.c | 145 + src/port/fls.c | 2 +- src/port/fseeko.c | 2 +- src/port/getaddrinfo.c | 15 +- src/port/getpeereid.c | 2 +- src/port/getrusage.c | 2 +- src/port/inet_aton.c | 4 +- src/port/inet_net_ntop.c | 2 +- src/port/isinf.c | 2 +- src/port/kill.c | 2 +- src/port/mkdtemp.c | 2 +- src/port/noblock.c | 2 +- src/port/open.c | 16 +- src/port/path.c | 2 +- src/port/pg_crc32c_armv8.c | 75 + src/port/pg_crc32c_armv8_choose.c | 95 + src/port/pg_crc32c_sb8.c | 2 +- src/port/pg_crc32c_sse42.c | 2 +- ...c32c_choose.c => pg_crc32c_sse42_choose.c} | 15 +- src/port/pg_strong_random.c | 31 +- src/port/pgcheckdir.c | 2 +- src/port/pgsleep.c | 2 +- src/port/pgstrcasecmp.c | 2 +- src/port/pqsignal.c | 2 +- src/port/pread.c | 55 + src/port/pwrite.c | 55 + src/port/quotes.c | 2 +- src/port/random.c | 2 +- src/port/rint.c | 1 - src/port/snprintf.c | 1069 +- src/port/sprompt.c | 42 +- src/port/srandom.c | 2 +- src/port/strerror.c | 324 +- src/port/strlcpy.c | 2 +- src/port/strnlen.c | 33 + src/port/system.c | 2 +- src/port/thread.c | 29 +- src/port/unsetenv.c | 2 +- src/port/win32env.c | 2 +- src/port/win32error.c | 2 +- src/port/win32security.c | 2 +- src/port/win32setlocale.c | 4 +- src/port/win32ver.rc | 6 +- src/template/aix | 4 + src/template/darwin | 13 + src/template/linux | 1 + src/test/Makefile | 28 +- src/test/authentication/Makefile | 5 +- src/test/authentication/README | 12 +- src/test/authentication/t/001_password.pl | 10 +- src/test/authentication/t/002_saslprep.pl | 5 +- src/test/examples/Makefile | 4 +- src/test/examples/testlibpq.c | 21 +- src/test/examples/testlibpq2.c | 30 +- src/test/examples/testlibpq2.sql | 6 +- src/test/examples/testlibpq3.c | 13 +- src/test/examples/testlibpq3.sql | 3 +- src/test/examples/testlibpq4.c | 19 +- src/test/examples/testlo.c | 13 +- src/test/examples/testlo64.c | 13 +- src/test/isolation/Makefile | 16 +- src/test/isolation/expected/alter-table-4.out | 57 + .../isolation/expected/eval-plan-qual.out | 99 + .../isolation/expected/freeze-the-dead.out | 36 + src/test/isolation/expected/multiple-cic.out | 19 + .../isolation/expected/multiple-cic_1.out | 20 + .../expected/partition-key-update-1.out | 119 + .../expected/partition-key-update-2.out | 29 + .../expected/partition-key-update-3.out | 139 + .../expected/partition-key-update-4.out | 60 + src/test/isolation/expected/plpgsql-toast.out | 189 + .../expected/predicate-gin-fastupdate.out | 30 + .../expected/predicate-gin-nomatch.out | 15 + src/test/isolation/expected/predicate-gin.out | 756 + .../isolation/expected/predicate-gist.out | 659 + .../isolation/expected/predicate-hash.out | 659 + .../isolation/expected/truncate-conflict.out | 99 + .../expected/vacuum-concurrent-drop.out | 76 + .../isolation/expected/vacuum-conflict.out | 149 + .../isolation/expected/vacuum-skip-locked.out | 171 + src/test/isolation/isolation_main.c | 26 +- src/test/isolation/isolation_schedule | 17 + src/test/isolation/isolationtester.c | 36 +- src/test/isolation/isolationtester.h | 2 +- src/test/isolation/specparse.y | 2 +- src/test/isolation/specs/alter-table-4.spec | 37 + src/test/isolation/specs/eval-plan-qual.spec | 58 +- src/test/isolation/specs/freeze-the-dead.spec | 59 + .../specs/insert-conflict-do-nothing-2.spec | 2 +- .../specs/insert-conflict-do-update-2.spec | 2 +- .../specs/lock-committed-keyupdate.spec | 2 +- .../specs/lock-update-traversal.spec | 5 +- src/test/isolation/specs/multiple-cic.spec | 40 + .../specs/partition-key-update-1.spec | 85 + .../specs/partition-key-update-2.spec | 45 + .../specs/partition-key-update-3.spec | 44 + .../specs/partition-key-update-4.spec | 76 + src/test/isolation/specs/plpgsql-toast.spec | 137 + .../specs/predicate-gin-fastupdate.spec | 49 + .../specs/predicate-gin-nomatch.spec | 35 + src/test/isolation/specs/predicate-gin.spec | 133 + src/test/isolation/specs/predicate-gist.spec | 117 + src/test/isolation/specs/predicate-hash.spec | 122 + .../isolation/specs/truncate-conflict.spec | 38 + .../specs/vacuum-concurrent-drop.spec | 45 + src/test/isolation/specs/vacuum-conflict.spec | 51 + .../isolation/specs/vacuum-skip-locked.spec | 59 + src/test/isolation/specscanner.l | 35 +- src/test/kerberos/.gitignore | 2 + src/test/kerberos/Makefile | 25 + src/test/kerberos/README | 43 + src/test/kerberos/t/001_auth.pl | 195 + src/test/ldap/.gitignore | 2 + src/test/ldap/Makefile | 25 + src/test/ldap/README | 52 + src/test/ldap/authdata.ldif | 32 + src/test/ldap/t/001_auth.pl | 289 + src/test/locale/Makefile | 1 + src/test/modules/Makefile | 5 +- src/test/modules/brin/Makefile | 14 +- src/test/modules/brin/t/01_workitems.pl | 41 + src/test/modules/commit_ts/Makefile | 2 +- src/test/modules/commit_ts/t/001_base.pl | 6 +- src/test/modules/commit_ts/t/002_standby.pl | 6 +- src/test/modules/commit_ts/t/003_standby_2.pl | 4 +- src/test/modules/commit_ts/t/004_restart.pl | 36 +- .../modules/dummy_seclabel/dummy_seclabel.c | 2 +- .../expected/dummy_seclabel.out | 4 +- src/test/modules/test_bloomfilter/.gitignore | 4 + src/test/modules/test_bloomfilter/Makefile | 21 + src/test/modules/test_bloomfilter/README | 68 + .../expected/test_bloomfilter.out | 22 + .../test_bloomfilter/sql/test_bloomfilter.sql | 19 + .../test_bloomfilter--1.0.sql | 11 + .../test_bloomfilter/test_bloomfilter.c | 138 + .../test_bloomfilter/test_bloomfilter.control | 4 + .../test_ddl_deparse/expected/alter_table.out | 12 + .../test_ddl_deparse/expected/matviews.out | 6 +- .../expected/test_ddl_deparse.out | 6 +- .../test_ddl_deparse/sql/alter_table.sql | 8 + .../modules/test_ddl_deparse/sql/matviews.sql | 6 +- .../test_ddl_deparse/sql/test_ddl_deparse.sql | 6 +- .../test_ddl_deparse/test_ddl_deparse.c | 2 +- src/test/modules/test_parser/test_parser.c | 2 +- src/test/modules/test_pg_dump/Makefile | 2 +- src/test/modules/test_pg_dump/t/001_base.pl | 635 +- src/test/modules/test_predtest/.gitignore | 4 + src/test/modules/test_predtest/Makefile | 21 + src/test/modules/test_predtest/README | 28 + .../test_predtest/expected/test_predtest.out | 839 + .../test_predtest/sql/test_predtest.sql | 327 + .../test_predtest/test_predtest--1.0.sql | 16 + .../modules/test_predtest/test_predtest.c | 218 + .../test_predtest/test_predtest.control | 4 + src/test/modules/test_rbtree/.gitignore | 4 + src/test/modules/test_rbtree/Makefile | 21 + src/test/modules/test_rbtree/README | 13 + .../test_rbtree/expected/test_rbtree.out | 12 + .../modules/test_rbtree/sql/test_rbtree.sql | 8 + .../modules/test_rbtree/test_rbtree--1.0.sql | 8 + src/test/modules/test_rbtree/test_rbtree.c | 413 + .../modules/test_rbtree/test_rbtree.control | 4 + .../modules/test_rls_hooks/test_rls_hooks.c | 30 +- .../modules/test_rls_hooks/test_rls_hooks.h | 2 +- src/test/modules/test_shm_mq/setup.c | 4 +- src/test/modules/test_shm_mq/test.c | 2 +- src/test/modules/test_shm_mq/test_shm_mq.h | 2 +- src/test/modules/test_shm_mq/worker.c | 22 +- src/test/modules/worker_spi/worker_spi.c | 12 +- src/test/perl/Makefile | 2 +- src/test/perl/PostgresNode.pm | 253 +- src/test/perl/RecursiveCopy.pm | 71 +- src/test/perl/SimpleTee.pm | 2 +- src/test/perl/TestLib.pm | 225 +- src/test/recovery/Makefile | 7 +- src/test/recovery/README | 18 +- src/test/recovery/t/001_stream_rep.pl | 33 +- src/test/recovery/t/003_recovery_targets.pl | 17 +- src/test/recovery/t/004_timeline_switch.pl | 11 +- src/test/recovery/t/006_logical_decoding.pl | 54 +- src/test/recovery/t/007_sync_rep.pl | 3 +- src/test/recovery/t/009_twophase.pl | 9 + .../t/010_logical_decoding_timelines.pl | 14 +- src/test/recovery/t/011_crash_recovery.pl | 8 +- src/test/recovery/t/012_subtransactions.pl | 4 +- src/test/recovery/t/013_crash_restart.pl | 275 + src/test/recovery/t/014_unlogged_reinit.pl | 81 + src/test/recovery/t/015_promotion_pages.pl | 87 + src/test/regress/GNUmakefile | 14 +- src/test/regress/Makefile | 5 + src/test/regress/expected/abstime.out | 136 - src/test/regress/expected/aggregates.out | 288 +- src/test/regress/expected/alter_generic.out | 330 +- src/test/regress/expected/alter_operator.out | 3 + src/test/regress/expected/alter_table.out | 914 +- src/test/regress/expected/amutils.out | 84 +- src/test/regress/expected/bit.out | 23 + src/test/regress/expected/boolean.out | 83 + src/test/regress/expected/box.out | 44 +- src/test/regress/expected/btree_index.out | 29 + src/test/regress/expected/case.out | 14 + src/test/regress/expected/circle.out | 39 +- src/test/regress/expected/cluster.out | 25 +- .../regress/expected/collate.icu.utf8.out | 20 +- .../regress/expected/collate.linux.utf8.out | 20 +- src/test/regress/expected/collate.out | 19 +- src/test/regress/expected/copy2.out | 8 +- .../regress/expected/create_aggregate.out | 45 +- src/test/regress/expected/create_am.out | 6 +- .../regress/expected/create_function_3.out | 267 +- src/test/regress/expected/create_index.out | 315 +- src/test/regress/expected/create_misc.out | 10 + src/test/regress/expected/create_operator.out | 109 +- .../regress/expected/create_procedure.out | 202 + src/test/regress/expected/create_table.out | 282 +- .../regress/expected/create_table_like.out | 38 +- src/test/regress/expected/create_type.out | 57 + src/test/regress/expected/create_view.out | 4 +- src/test/regress/expected/domain.out | 220 +- src/test/regress/expected/enum.out | 31 +- src/test/regress/expected/equivclass.out | 18 + src/test/regress/expected/event_trigger.out | 91 +- src/test/regress/expected/fast_default.out | 760 + .../expected/float4-exp-three-digits.out | 259 - .../float8-exp-three-digits-win32.out | 550 - .../regress/expected/float8-small-is-zero.out | 36 + .../expected/float8-small-is-zero_1.out | 548 - src/test/regress/expected/float8.out | 36 + src/test/regress/expected/foreign_data.out | 395 +- src/test/regress/expected/foreign_key.out | 366 + src/test/regress/expected/geometry.out | 4883 ++- src/test/regress/expected/geometry_1.out | 563 - src/test/regress/expected/geometry_2.out | 563 - src/test/regress/expected/gist.out | 18 + src/test/regress/expected/groupingsets.out | 116 +- src/test/regress/expected/hash_func.out | 300 + src/test/regress/expected/hash_index.out | 12 + src/test/regress/expected/hash_part.out | 104 + src/test/regress/expected/horology.out | 362 +- src/test/regress/expected/identity.out | 74 +- src/test/regress/expected/index_including.out | 367 + src/test/regress/expected/indexing.out | 1406 + src/test/regress/expected/indirect_toast.out | 48 +- src/test/regress/expected/inherit.out | 195 +- src/test/regress/expected/insert.out | 311 +- src/test/regress/expected/insert_conflict.out | 119 + src/test/regress/expected/int2.out | 14 +- src/test/regress/expected/int4.out | 14 +- .../expected/int8-exp-three-digits.out | 888 - src/test/regress/expected/int8.out | 10 +- src/test/regress/expected/join.out | 1139 +- src/test/regress/expected/json.out | 365 +- src/test/regress/expected/jsonb.out | 468 +- src/test/regress/expected/line.out | 272 +- src/test/regress/expected/lock.out | 114 +- src/test/regress/expected/lseg.out | 24 +- src/test/regress/expected/misc_sanity.out | 33 +- src/test/regress/expected/namespace.out | 36 +- src/test/regress/expected/numeric.out | 169 + src/test/regress/expected/numerology_1.out | 136 - src/test/regress/expected/object_address.out | 24 +- src/test/regress/expected/oidjoins.out | 16 + src/test/regress/expected/opr_sanity.out | 180 +- .../regress/expected/partition_aggregate.out | 1533 + src/test/regress/expected/partition_info.out | 114 + src/test/regress/expected/partition_join.out | 2028 ++ src/test/regress/expected/partition_prune.out | 3590 +++ src/test/regress/expected/path.out | 49 +- src/test/regress/expected/plancache.out | 103 + src/test/regress/expected/plpgsql.out | 778 +- src/test/regress/expected/point.out | 358 +- src/test/regress/expected/polygon.out | 402 +- src/test/regress/expected/polymorphism.out | 78 +- src/test/regress/expected/portals.out | 94 + src/test/regress/expected/privileges.out | 1103 +- src/test/regress/expected/psql.out | 283 +- src/test/regress/expected/publication.out | 86 +- src/test/regress/expected/rangefuncs.out | 788 +- src/test/regress/expected/rangetypes.out | 12 + src/test/regress/expected/reloptions.out | 185 + src/test/regress/expected/reltime.out | 109 - src/test/regress/expected/rolenames.out | 51 +- src/test/regress/expected/rowsecurity.out | 35 +- src/test/regress/expected/rowtypes.out | 350 +- src/test/regress/expected/rules.out | 245 +- src/test/regress/expected/sanity_check.out | 18 +- src/test/regress/expected/select.out | 19 +- src/test/regress/expected/select_into.out | 14 +- src/test/regress/expected/select_parallel.out | 811 +- src/test/regress/expected/sequence.out | 25 +- src/test/regress/expected/spgist.out | 30 +- src/test/regress/expected/stats.out | 3 + src/test/regress/expected/stats_ext.out | 4 +- src/test/regress/expected/strings.out | 96 + src/test/regress/expected/subselect.out | 137 +- src/test/regress/expected/sysviews.out | 35 +- src/test/regress/expected/temp.out | 102 + src/test/regress/expected/text.out | 2 +- src/test/regress/expected/timestamptz.out | 74 +- src/test/regress/expected/timetz.out | 2 +- src/test/regress/expected/tinterval.out | 172 - src/test/regress/expected/transactions.out | 150 +- src/test/regress/expected/triggers.out | 701 +- src/test/regress/expected/truncate.out | 79 +- src/test/regress/expected/tsdicts.out | 38 + src/test/regress/expected/tsearch.out | 428 + src/test/regress/expected/tsrf.out | 22 + src/test/regress/expected/type_sanity.out | 29 + src/test/regress/expected/union.out | 115 + src/test/regress/expected/updatable_views.out | 408 +- src/test/regress/expected/update.out | 703 +- src/test/regress/expected/vacuum.out | 172 +- src/test/regress/expected/window.out | 2237 +- src/test/regress/expected/with.out | 56 +- src/test/regress/expected/write_parallel.out | 79 + src/test/regress/expected/xml.out | 55 +- src/test/regress/expected/xml_1.out | 59 +- src/test/regress/expected/xml_2.out | 55 +- src/test/regress/input/constraints.source | 16 + src/test/regress/input/copy.source | 49 + .../regress/input/create_function_1.source | 8 +- .../regress/input/create_function_2.source | 5 - src/test/regress/input/tablespace.source | 14 +- src/test/regress/output/constraints.source | 28 + src/test/regress/output/copy.source | 47 + .../regress/output/create_function_1.source | 7 +- .../regress/output/create_function_2.source | 4 - src/test/regress/output/tablespace.source | 119 +- src/test/regress/parallel_schedule | 16 +- src/test/regress/pg_regress.c | 159 +- src/test/regress/pg_regress.h | 4 +- src/test/regress/pg_regress_main.c | 30 +- src/test/regress/regress.c | 305 +- src/test/regress/resultmap | 12 - src/test/regress/serial_schedule | 17 +- src/test/regress/sql/abstime.sql | 67 - src/test/regress/sql/aggregates.sql | 152 + src/test/regress/sql/alter_generic.sql | 166 +- src/test/regress/sql/alter_operator.sql | 3 + src/test/regress/sql/alter_table.sql | 659 +- src/test/regress/sql/amutils.sql | 20 +- src/test/regress/sql/bit.sql | 11 + src/test/regress/sql/boolean.sql | 29 + src/test/regress/sql/box.sql | 9 + src/test/regress/sql/btree_index.sql | 19 + src/test/regress/sql/case.sql | 13 + src/test/regress/sql/circle.sql | 10 +- src/test/regress/sql/cluster.sql | 21 +- src/test/regress/sql/collate.sql | 2 + src/test/regress/sql/create_aggregate.sql | 29 +- src/test/regress/sql/create_am.sql | 6 +- src/test/regress/sql/create_function_3.sql | 150 +- src/test/regress/sql/create_index.sql | 127 +- src/test/regress/sql/create_misc.sql | 5 + src/test/regress/sql/create_operator.sql | 51 +- src/test/regress/sql/create_procedure.sql | 155 + src/test/regress/sql/create_table.sql | 181 +- src/test/regress/sql/create_table_like.sql | 5 +- src/test/regress/sql/create_type.sql | 20 + src/test/regress/sql/create_view.sql | 2 +- src/test/regress/sql/domain.sql | 110 + src/test/regress/sql/enum.sql | 24 +- src/test/regress/sql/equivclass.sql | 8 + src/test/regress/sql/event_trigger.sql | 57 +- src/test/regress/sql/fast_default.sql | 502 + src/test/regress/sql/float8.sql | 6 + src/test/regress/sql/foreign_data.sql | 223 +- src/test/regress/sql/foreign_key.sql | 234 + src/test/regress/sql/geometry.sql | 400 +- src/test/regress/sql/gist.sql | 14 + src/test/regress/sql/groupingsets.sql | 43 +- src/test/regress/sql/hash_func.sql | 222 + src/test/regress/sql/hash_index.sql | 10 + src/test/regress/sql/hash_part.sql | 80 + src/test/regress/sql/horology.sql | 99 +- src/test/regress/sql/identity.sql | 64 +- src/test/regress/sql/index_including.sql | 210 + src/test/regress/sql/indexing.sql | 755 + src/test/regress/sql/indirect_toast.sql | 48 +- src/test/regress/sql/inherit.sql | 98 +- src/test/regress/sql/insert.sql | 191 +- src/test/regress/sql/insert_conflict.sql | 106 + src/test/regress/sql/join.sql | 586 + src/test/regress/sql/json.sql | 82 + src/test/regress/sql/jsonb.sql | 105 +- src/test/regress/sql/line.sql | 79 +- src/test/regress/sql/lock.sql | 68 +- src/test/regress/sql/lseg.sql | 9 +- src/test/regress/sql/misc_sanity.sql | 21 +- src/test/regress/sql/namespace.sql | 28 +- src/test/regress/sql/numeric.sql | 38 + src/test/regress/sql/object_address.sql | 9 +- src/test/regress/sql/oidjoins.sql | 8 + src/test/regress/sql/opr_sanity.sql | 106 +- src/test/regress/sql/partition_aggregate.sql | 325 + src/test/regress/sql/partition_info.sql | 68 + src/test/regress/sql/partition_join.sql | 434 + src/test/regress/sql/partition_prune.sql | 960 + src/test/regress/sql/path.sql | 22 +- src/test/regress/sql/plancache.sql | 54 + src/test/regress/sql/plpgsql.sql | 540 +- src/test/regress/sql/point.sql | 10 + src/test/regress/sql/polygon.sql | 183 +- src/test/regress/sql/polymorphism.sql | 35 +- src/test/regress/sql/portals.sql | 34 + src/test/regress/sql/privileges.sql | 691 +- src/test/regress/sql/psql.sql | 128 + src/test/regress/sql/rangefuncs.sql | 524 +- src/test/regress/sql/rangetypes.sql | 12 + src/test/regress/sql/reloptions.sql | 113 + src/test/regress/sql/reltime.sql | 50 - src/test/regress/sql/rolenames.sql | 37 +- src/test/regress/sql/rowsecurity.sql | 16 +- src/test/regress/sql/rowtypes.sql | 122 + src/test/regress/sql/rules.sql | 117 +- src/test/regress/sql/select.sql | 14 +- src/test/regress/sql/select_into.sql | 8 +- src/test/regress/sql/select_parallel.sql | 315 +- src/test/regress/sql/sequence.sql | 6 + src/test/regress/sql/spgist.sql | 25 +- src/test/regress/sql/stats.sql | 3 + src/test/regress/sql/strings.sql | 38 + src/test/regress/sql/subselect.sql | 89 +- src/test/regress/sql/temp.sql | 73 + src/test/regress/sql/timestamptz.sql | 20 +- src/test/regress/sql/tinterval.sql | 97 - src/test/regress/sql/transactions.sql | 102 +- src/test/regress/sql/triggers.sql | 464 +- src/test/regress/sql/truncate.sql | 47 + src/test/regress/sql/tsdicts.sql | 13 + src/test/regress/sql/tsearch.sql | 97 + src/test/regress/sql/tsrf.sql | 5 + src/test/regress/sql/type_sanity.sql | 24 + src/test/regress/sql/union.sql | 43 + src/test/regress/sql/updatable_views.sql | 261 +- src/test/regress/sql/update.sql | 488 +- src/test/regress/sql/vacuum.sql | 115 +- src/test/regress/sql/window.sql | 622 +- src/test/regress/sql/with.sql | 46 +- src/test/regress/sql/write_parallel.sql | 42 + src/test/regress/sql/xml.sql | 43 +- src/test/ssl/Makefile | 14 +- src/test/ssl/README | 23 +- src/test/ssl/ServerSetup.pm | 96 +- src/test/ssl/ssl/.gitignore | 2 +- src/test/ssl/ssl/server-password.key | 18 + src/test/ssl/t/001_ssltests.pl | 357 +- src/test/ssl/t/002_scram.pl | 48 + src/test/subscription/Makefile | 5 +- src/test/subscription/README | 15 +- src/test/subscription/t/001_rep_changes.pl | 61 +- src/test/subscription/t/002_types.pl | 42 +- src/test/subscription/t/003_constraints.pl | 23 +- src/test/subscription/t/004_sync.pl | 28 +- src/test/subscription/t/005_encoding.pl | 21 +- src/test/subscription/t/006_rewrite.pl | 66 + src/test/subscription/t/007_ddl.pl | 43 + src/test/subscription/t/008_diff_schema.pl | 93 + src/test/subscription/t/009_matviews.pl | 50 + src/test/subscription/t/010_truncate.pl | 160 + src/test/thread/Makefile | 2 +- src/test/thread/thread_test.c | 38 +- src/timezone/Makefile | 6 +- src/timezone/README | 37 +- src/timezone/data/africa | 1192 - src/timezone/data/antarctica | 340 - src/timezone/data/asia | 3084 -- src/timezone/data/australasia | 1778 -- src/timezone/data/backward | 126 - src/timezone/data/backzone | 675 - src/timezone/data/etcetera | 78 - src/timezone/data/europe | 3839 --- src/timezone/data/factory | 10 - src/timezone/data/northamerica | 3374 -- src/timezone/data/pacificnew | 27 - src/timezone/data/southamerica | 1793 -- src/timezone/data/systemv | 37 - src/timezone/data/tzdata.zi | 4177 +++ src/timezone/known_abbrevs.txt | 6 +- src/timezone/localtime.c | 339 +- src/timezone/pgtz.c | 11 +- src/timezone/pgtz.h | 12 +- src/timezone/private.h | 32 +- src/timezone/strftime.c | 68 +- src/timezone/tzfile.h | 11 +- src/timezone/tznames/Africa.txt | 3 +- src/timezone/tznames/America.txt | 3 + src/timezone/tznames/Asia.txt | 8 +- src/timezone/tznames/Default | 5 +- src/timezone/tznames/Europe.txt | 2 +- src/timezone/tznames/Pacific.txt | 3 + src/timezone/zic.c | 701 +- src/tools/RELEASE_CHANGES | 8 +- src/tools/check_bison_recursion.pl | 4 +- src/tools/copyright.pl | 3 +- src/tools/editors/emacs.samples | 21 +- src/tools/findoidjoins/Makefile | 4 +- src/tools/findoidjoins/README | 8 +- src/tools/findoidjoins/findoidjoins.c | 13 +- src/tools/fix-old-flex-code.pl | 4 +- src/tools/git-external-diff | 35 + src/tools/git_changelog | 17 +- src/tools/ifaddrs/Makefile | 2 +- src/tools/msvc/Install.pm | 114 +- src/tools/msvc/MSBuildProject.pm | 110 +- src/tools/msvc/Mkvcbuild.pm | 279 +- src/tools/msvc/Project.pm | 32 +- src/tools/msvc/README | 22 +- src/tools/msvc/Solution.pm | 246 +- src/tools/msvc/VCBuildProject.pm | 294 - src/tools/msvc/VSObjectFactory.pm | 65 +- src/tools/msvc/build.pl | 11 +- src/tools/msvc/builddoc.bat | 7 - src/tools/msvc/builddoc.pl | 124 - src/tools/msvc/clean.bat | 19 +- src/tools/msvc/config_default.pl | 2 +- src/tools/msvc/dummylib/README | 13 + src/tools/msvc/dummylib/Win32.pm | 4 + src/tools/msvc/dummylib/Win32/Registry.pm | 13 + src/tools/msvc/dummylib/Win32API/File.pm | 14 + src/tools/msvc/ecpg_regression.proj | 3 + src/tools/msvc/gendef.pl | 77 +- src/tools/msvc/install.pl | 4 + src/tools/msvc/mkvcbuild.pl | 4 + src/tools/msvc/vcregress.pl | 159 +- src/tools/perlcheck/find_perl_files | 14 + src/tools/perlcheck/perlcriticrc | 18 + src/tools/perlcheck/pgperlcritic | 20 + src/tools/perlcheck/pgperlsyncheck | 16 + src/tools/pginclude/pgcheckdefines | 6 +- src/tools/pgindent/README | 17 +- src/tools/pgindent/exclude_file_patterns | 3 + src/tools/pgindent/perltidyrc | 8 +- src/tools/pgindent/pgindent | 48 +- src/tools/pgindent/pgperltidy | 15 +- src/tools/pgindent/typedefs.list | 191 +- src/tools/pgtest | 21 +- src/tools/testint128.c | 2 +- src/tools/valgrind.supp | 4 +- src/tools/version_stamp.pl | 19 +- src/tools/win32tzlist.pl | 18 +- src/tutorial/complex.c | 4 +- src/tutorial/complex.source | 6 +- src/tutorial/syscat.source | 2 +- 3539 files changed, 435402 insertions(+), 209005 deletions(-) create mode 100644 config/llvm.m4 create mode 100644 contrib/adminpack/.gitignore create mode 100644 contrib/adminpack/adminpack--1.0--1.1.sql create mode 100644 contrib/adminpack/adminpack--1.1--2.0.sql create mode 100644 contrib/adminpack/expected/adminpack.out create mode 100644 contrib/adminpack/sql/adminpack.sql create mode 100644 contrib/amcheck/amcheck--1.0--1.1.sql create mode 100644 contrib/btree_gin/btree_gin--1.2--1.3.sql create mode 100644 contrib/btree_gin/expected/bool.out create mode 100644 contrib/btree_gin/expected/bpchar.out create mode 100644 contrib/btree_gin/expected/name.out create mode 100644 contrib/btree_gin/expected/uuid.out create mode 100644 contrib/btree_gin/sql/bool.sql create mode 100644 contrib/btree_gin/sql/bpchar.sql create mode 100644 contrib/btree_gin/sql/name.sql create mode 100644 contrib/btree_gin/sql/uuid.sql delete mode 100644 contrib/chkpass/Makefile delete mode 100644 contrib/chkpass/chkpass--1.0.sql delete mode 100644 contrib/chkpass/chkpass--unpackaged--1.0.sql delete mode 100644 contrib/chkpass/chkpass.c delete mode 100644 contrib/chkpass/chkpass.control create mode 100644 contrib/citext/citext--1.4--1.5.sql create mode 100644 contrib/cube/cube--1.2--1.3.sql create mode 100644 contrib/cube/cube--1.3--1.4.sql delete mode 100644 contrib/cube/expected/cube_2.out create mode 100644 contrib/cube/expected/cube_sci.out create mode 100644 contrib/cube/sql/cube_sci.sql create mode 100644 contrib/file_fdw/data/list1.csv create mode 100644 contrib/file_fdw/data/list2.bad create mode 100644 contrib/file_fdw/data/list2.csv create mode 100644 contrib/fuzzystrmatch/.gitignore create mode 100644 contrib/fuzzystrmatch/expected/fuzzystrmatch.out create mode 100644 contrib/fuzzystrmatch/sql/fuzzystrmatch.sql create mode 100644 contrib/hstore/hstore--1.4--1.5.sql create mode 100644 contrib/isn/isn--1.1--1.2.sql create mode 100644 contrib/jsonb_plperl/.gitignore create mode 100644 contrib/jsonb_plperl/Makefile create mode 100644 contrib/jsonb_plperl/expected/jsonb_plperl.out create mode 100644 contrib/jsonb_plperl/expected/jsonb_plperlu.out create mode 100644 contrib/jsonb_plperl/jsonb_plperl--1.0.sql create mode 100644 contrib/jsonb_plperl/jsonb_plperl.c create mode 100644 contrib/jsonb_plperl/jsonb_plperl.control create mode 100644 contrib/jsonb_plperl/jsonb_plperlu--1.0.sql create mode 100644 contrib/jsonb_plperl/jsonb_plperlu.control create mode 100644 contrib/jsonb_plperl/sql/jsonb_plperl.sql create mode 100644 contrib/jsonb_plperl/sql/jsonb_plperlu.sql create mode 100644 contrib/jsonb_plpython/.gitignore create mode 100644 contrib/jsonb_plpython/Makefile create mode 100644 contrib/jsonb_plpython/expected/jsonb_plpython.out create mode 100644 contrib/jsonb_plpython/jsonb_plpython.c create mode 100644 contrib/jsonb_plpython/jsonb_plpython2u--1.0.sql create mode 100644 contrib/jsonb_plpython/jsonb_plpython2u.control create mode 100644 contrib/jsonb_plpython/jsonb_plpython3u--1.0.sql create mode 100644 contrib/jsonb_plpython/jsonb_plpython3u.control create mode 100644 contrib/jsonb_plpython/jsonb_plpythonu--1.0.sql create mode 100644 contrib/jsonb_plpython/jsonb_plpythonu.control create mode 100644 contrib/jsonb_plpython/sql/jsonb_plpython.sql create mode 100644 contrib/lo/.gitignore create mode 100644 contrib/lo/expected/lo.out create mode 100644 contrib/lo/sql/lo.sql create mode 100644 contrib/oid2name/t/001_basic.pl create mode 100644 contrib/pageinspect/pageinspect--1.6--1.7.sql create mode 100644 contrib/passwordcheck/.gitignore create mode 100644 contrib/passwordcheck/expected/passwordcheck.out create mode 100644 contrib/passwordcheck/sql/passwordcheck.sql create mode 100644 contrib/pg_prewarm/autoprewarm.c create mode 100644 contrib/pg_prewarm/pg_prewarm--1.1--1.2.sql create mode 100644 contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql create mode 100644 contrib/pg_trgm/expected/pg_strict_word_trgm.out create mode 100644 contrib/pg_trgm/pg_trgm--1.3--1.4.sql create mode 100644 contrib/pg_trgm/sql/pg_strict_word_trgm.sql delete mode 100644 contrib/seg/expected/seg_1.out create mode 100644 contrib/seg/seg--1.1--1.2.sql create mode 100644 contrib/seg/seg--1.2--1.3.sql delete mode 100644 contrib/spi/timetravel--1.0.sql delete mode 100644 contrib/spi/timetravel--unpackaged--1.0.sql delete mode 100644 contrib/spi/timetravel.c delete mode 100644 contrib/spi/timetravel.control delete mode 100644 contrib/spi/timetravel.example create mode 100644 contrib/start-scripts/macos/README create mode 100644 contrib/start-scripts/macos/org.postgresql.postgres.plist create mode 100644 contrib/start-scripts/macos/postgres-wrapper.sh delete mode 100755 contrib/start-scripts/osx/PostgreSQL delete mode 100644 contrib/start-scripts/osx/README delete mode 100644 contrib/start-scripts/osx/StartupParameters.plist delete mode 100755 contrib/start-scripts/osx/install.sh create mode 100644 contrib/test_decoding/expected/oldest_xmin.out create mode 100644 contrib/test_decoding/expected/snapshot_transfer.out create mode 100644 contrib/test_decoding/expected/truncate.out create mode 100644 contrib/test_decoding/specs/oldest_xmin.spec create mode 100644 contrib/test_decoding/specs/snapshot_transfer.spec create mode 100644 contrib/test_decoding/sql/truncate.sql create mode 100644 contrib/vacuumlo/t/001_basic.pl create mode 100644 doc/src/sgml/btree.sgml delete mode 100644 doc/src/sgml/chkpass.sgml delete mode 100644 doc/src/sgml/contacts.sgml create mode 100644 doc/src/sgml/jit.sgml create mode 100644 doc/src/sgml/ref/alter_procedure.sgml create mode 100644 doc/src/sgml/ref/alter_routine.sgml create mode 100644 doc/src/sgml/ref/call.sgml create mode 100644 doc/src/sgml/ref/create_procedure.sgml create mode 100644 doc/src/sgml/ref/drop_procedure.sgml create mode 100644 doc/src/sgml/ref/drop_routine.sgml create mode 100644 doc/src/sgml/ref/pg_verify_checksums.sgml create mode 100644 doc/src/sgml/release-11.sgml create mode 100644 doc/src/sgml/release-12.sgml delete mode 100644 doc/src/sgml/standalone-install.sgml create mode 100644 doc/src/sgml/standalone-install.xml create mode 100644 doc/src/sgml/standalone-profile.xsl create mode 100644 src/backend/access/common/session.c create mode 100644 src/backend/access/spgist/spgproc.c delete mode 100644 src/backend/catalog/README create mode 100644 src/backend/executor/execPartition.c create mode 100644 src/backend/jit/Makefile create mode 100644 src/backend/jit/README create mode 100644 src/backend/jit/jit.c create mode 100644 src/backend/jit/llvm/Makefile create mode 100644 src/backend/jit/llvm/llvmjit.c create mode 100644 src/backend/jit/llvm/llvmjit_deform.c create mode 100644 src/backend/jit/llvm/llvmjit_error.cpp create mode 100644 src/backend/jit/llvm/llvmjit_expr.c create mode 100644 src/backend/jit/llvm/llvmjit_inline.cpp create mode 100644 src/backend/jit/llvm/llvmjit_types.c create mode 100644 src/backend/jit/llvm/llvmjit_wrap.cpp create mode 100644 src/backend/lib/bloomfilter.c create mode 100644 src/backend/lib/dshash.c create mode 100644 src/backend/libpq/be-secure-common.c create mode 100644 src/backend/partitioning/Makefile create mode 100644 src/backend/partitioning/partbounds.c create mode 100644 src/backend/partitioning/partprune.c create mode 100644 src/backend/po/sv.po delete mode 100644 src/backend/port/dynloader/aix.c delete mode 100644 src/backend/port/dynloader/aix.h delete mode 100644 src/backend/port/dynloader/cygwin.c delete mode 100644 src/backend/port/dynloader/cygwin.h delete mode 100644 src/backend/port/dynloader/darwin.c delete mode 100644 src/backend/port/dynloader/darwin.h delete mode 100644 src/backend/port/dynloader/freebsd.c delete mode 100644 src/backend/port/dynloader/freebsd.h delete mode 100644 src/backend/port/dynloader/hpux.c delete mode 100644 src/backend/port/dynloader/hpux.h delete mode 100644 src/backend/port/dynloader/linux.c delete mode 100644 src/backend/port/dynloader/linux.h delete mode 100644 src/backend/port/dynloader/netbsd.c delete mode 100644 src/backend/port/dynloader/netbsd.h delete mode 100644 src/backend/port/dynloader/openbsd.c delete mode 100644 src/backend/port/dynloader/openbsd.h delete mode 100644 src/backend/port/dynloader/solaris.c delete mode 100644 src/backend/port/dynloader/solaris.h delete mode 100644 src/backend/port/dynloader/win32.c delete mode 100644 src/backend/port/dynloader/win32.h create mode 100644 src/backend/snowball/libstemmer/stem_ISO_8859_1_indonesian.c create mode 100644 src/backend/snowball/libstemmer/stem_ISO_8859_1_irish.c rename src/backend/snowball/libstemmer/{stem_ISO_8859_1_hungarian.c => stem_ISO_8859_2_hungarian.c} (54%) create mode 100644 src/backend/snowball/libstemmer/stem_UTF_8_arabic.c create mode 100644 src/backend/snowball/libstemmer/stem_UTF_8_indonesian.c create mode 100644 src/backend/snowball/libstemmer/stem_UTF_8_irish.c create mode 100644 src/backend/snowball/libstemmer/stem_UTF_8_lithuanian.c create mode 100644 src/backend/snowball/libstemmer/stem_UTF_8_nepali.c create mode 100644 src/backend/snowball/libstemmer/stem_UTF_8_tamil.c create mode 100644 src/backend/snowball/stopwords/nepali.stop create mode 100644 src/backend/storage/file/sharedfileset.c create mode 100644 src/backend/storage/ipc/barrier.c create mode 100644 src/backend/storage/ipc/signalfuncs.c create mode 100644 src/backend/utils/adt/cryptohashes.c create mode 100644 src/backend/utils/adt/expandedrecord.c delete mode 100644 src/backend/utils/adt/nabstime.c create mode 100644 src/backend/utils/adt/partitionfuncs.c create mode 100644 src/backend/utils/cache/partcache.c create mode 100644 src/backend/utils/mmgr/generation.c create mode 100644 src/backend/utils/sort/sharedtuplestore.c create mode 100644 src/bin/initdb/po/tr.po create mode 100644 src/bin/initdb/po/vi.po create mode 100644 src/bin/pg_archivecleanup/po/ja.po create mode 100644 src/bin/pg_archivecleanup/po/ko.po create mode 100644 src/bin/pg_archivecleanup/po/tr.po create mode 100644 src/bin/pg_archivecleanup/po/vi.po create mode 100644 src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl create mode 100644 src/bin/pg_basebackup/po/ja.po create mode 100644 src/bin/pg_basebackup/po/sv.po create mode 100644 src/bin/pg_basebackup/po/tr.po create mode 100644 src/bin/pg_basebackup/po/vi.po create mode 100644 src/bin/pg_config/po/vi.po create mode 100644 src/bin/pg_controldata/po/tr.po create mode 100644 src/bin/pg_controldata/po/vi.po create mode 100644 src/bin/pg_ctl/po/tr.po create mode 100644 src/bin/pg_ctl/t/004_logrotate.pl create mode 100644 src/bin/pg_dump/po/he.po create mode 100644 src/bin/pg_dump/po/tr.po create mode 100644 src/bin/pg_resetwal/po/tr.po create mode 100644 src/bin/pg_resetwal/t/001_basic.pl create mode 100644 src/bin/pg_resetwal/t/002_corrupted.pl create mode 100644 src/bin/pg_rewind/po/tr.po create mode 100644 src/bin/pg_test_fsync/po/de.po create mode 100644 src/bin/pg_test_fsync/po/ja.po create mode 100644 src/bin/pg_test_fsync/po/ko.po create mode 100644 src/bin/pg_test_fsync/po/tr.po create mode 100644 src/bin/pg_test_fsync/po/vi.po create mode 100644 src/bin/pg_test_timing/po/ja.po create mode 100644 src/bin/pg_test_timing/po/ko.po create mode 100644 src/bin/pg_test_timing/po/tr.po create mode 100644 src/bin/pg_test_timing/po/vi.po create mode 100644 src/bin/pg_upgrade/po/de.po create mode 100644 src/bin/pg_upgrade/po/ja.po create mode 100644 src/bin/pg_upgrade/po/ko.po create mode 100644 src/bin/pg_upgrade/po/sv.po create mode 100644 src/bin/pg_upgrade/po/tr.po create mode 100644 src/bin/pg_verify_checksums/.gitignore create mode 100644 src/bin/pg_verify_checksums/Makefile create mode 100644 src/bin/pg_verify_checksums/nls.mk create mode 100644 src/bin/pg_verify_checksums/pg_verify_checksums.c create mode 100644 src/bin/pg_verify_checksums/t/001_basic.pl create mode 100644 src/bin/pg_verify_checksums/t/002_actions.pl create mode 100644 src/bin/pg_waldump/po/de.po create mode 100644 src/bin/pg_waldump/po/ja.po create mode 100644 src/bin/pg_waldump/po/ko.po create mode 100644 src/bin/pg_waldump/po/ru.po create mode 100644 src/bin/pg_waldump/po/tr.po create mode 100644 src/bin/pg_waldump/po/vi.po delete mode 100644 src/bin/pgbench/t/001_pgbench.pl create mode 100644 src/bin/pgbench/t/001_pgbench_with_server.pl create mode 100644 src/bin/pgbench/t/002_pgbench_no_server.pl create mode 100644 src/bin/scripts/po/tr.po create mode 100644 src/common/file_perm.c create mode 100644 src/common/link-canary.c rename src/{bin/psql => fe_utils}/conditional.c (82%) create mode 100644 src/include/access/session.h create mode 100644 src/include/access/tupdesc_details.h create mode 100644 src/include/catalog/Makefile create mode 100644 src/include/catalog/pg_aggregate.dat create mode 100644 src/include/catalog/pg_am.dat create mode 100644 src/include/catalog/pg_amop.dat create mode 100644 src/include/catalog/pg_amproc.dat create mode 100644 src/include/catalog/pg_authid.dat create mode 100644 src/include/catalog/pg_cast.dat create mode 100644 src/include/catalog/pg_class.dat create mode 100644 src/include/catalog/pg_collation.dat delete mode 100644 src/include/catalog/pg_collation_fn.h delete mode 100644 src/include/catalog/pg_constraint_fn.h delete mode 100644 src/include/catalog/pg_conversion_fn.h create mode 100644 src/include/catalog/pg_database.dat delete mode 100644 src/include/catalog/pg_inherits_fn.h create mode 100644 src/include/catalog/pg_language.dat create mode 100644 src/include/catalog/pg_namespace.dat create mode 100644 src/include/catalog/pg_opclass.dat create mode 100644 src/include/catalog/pg_operator.dat delete mode 100644 src/include/catalog/pg_operator_fn.h create mode 100644 src/include/catalog/pg_opfamily.dat create mode 100644 src/include/catalog/pg_pltemplate.dat create mode 100644 src/include/catalog/pg_proc.dat delete mode 100644 src/include/catalog/pg_proc_fn.h create mode 100644 src/include/catalog/pg_range.dat create mode 100644 src/include/catalog/pg_tablespace.dat create mode 100644 src/include/catalog/pg_ts_config.dat create mode 100644 src/include/catalog/pg_ts_config_map.dat create mode 100644 src/include/catalog/pg_ts_dict.dat create mode 100644 src/include/catalog/pg_ts_parser.dat create mode 100644 src/include/catalog/pg_ts_template.dat create mode 100644 src/include/catalog/pg_type.dat delete mode 100644 src/include/catalog/pg_type_fn.h create mode 100755 src/include/catalog/reformat_dat_file.pl create mode 100644 src/include/common/file_perm.h create mode 100644 src/include/common/int.h create mode 100644 src/include/common/link-canary.h create mode 100644 src/include/executor/execPartition.h rename src/{bin/psql => include/fe_utils}/conditional.h (75%) create mode 100644 src/include/fe_utils/connect.h create mode 100644 src/include/jit/jit.h create mode 100644 src/include/jit/llvmjit.h create mode 100644 src/include/jit/llvmjit_emit.h create mode 100644 src/include/lib/bloomfilter.h create mode 100644 src/include/lib/dshash.h create mode 100644 src/include/partitioning/partbounds.h create mode 100644 src/include/partitioning/partdefs.h create mode 100644 src/include/partitioning/partprune.h create mode 100644 src/include/port/win32_port.h delete mode 100644 src/include/snowball/libstemmer/stem_ISO_8859_1_hungarian.h create mode 100644 src/include/snowball/libstemmer/stem_ISO_8859_1_indonesian.h create mode 100644 src/include/snowball/libstemmer/stem_ISO_8859_1_irish.h create mode 100644 src/include/snowball/libstemmer/stem_ISO_8859_2_hungarian.h create mode 100644 src/include/snowball/libstemmer/stem_UTF_8_arabic.h create mode 100644 src/include/snowball/libstemmer/stem_UTF_8_indonesian.h create mode 100644 src/include/snowball/libstemmer/stem_UTF_8_irish.h create mode 100644 src/include/snowball/libstemmer/stem_UTF_8_lithuanian.h create mode 100644 src/include/snowball/libstemmer/stem_UTF_8_nepali.h create mode 100644 src/include/snowball/libstemmer/stem_UTF_8_tamil.h create mode 100644 src/include/storage/barrier.h create mode 100644 src/include/storage/sharedfileset.h delete mode 100644 src/include/utils/dynamic_loader.h create mode 100644 src/include/utils/expandedrecord.h create mode 100644 src/include/utils/float.h create mode 100644 src/include/utils/hashutils.h delete mode 100644 src/include/utils/nabstime.h create mode 100644 src/include/utils/partcache.h create mode 100644 src/include/utils/sharedtuplestore.h delete mode 100644 src/interfaces/ecpg/ecpglib/pg_type.h create mode 100644 src/interfaces/ecpg/ecpglib/po/sv.po create mode 100644 src/interfaces/ecpg/ecpglib/po/vi.po create mode 100644 src/interfaces/ecpg/include/pgtypes.h create mode 100644 src/interfaces/ecpg/preproc/po/sv.po create mode 100644 src/interfaces/ecpg/preproc/po/vi.po create mode 100644 src/interfaces/ecpg/test/compat_oracle/.gitignore create mode 100644 src/interfaces/ecpg/test/compat_oracle/Makefile create mode 100644 src/interfaces/ecpg/test/compat_oracle/char_array.pgc delete mode 100644 src/interfaces/ecpg/test/ecpg_schedule_tcp delete mode 100644 src/interfaces/ecpg/test/expected/compat_informix-dec_test-MinGW32.stdout create mode 100644 src/interfaces/ecpg/test/expected/compat_oracle-char_array.c create mode 100644 src/interfaces/ecpg/test/expected/compat_oracle-char_array.stderr create mode 100644 src/interfaces/ecpg/test/expected/compat_oracle-char_array.stdout delete mode 100644 src/interfaces/ecpg/test/expected/pgtypeslib-num_test-MinGW32.stdout delete mode 100644 src/interfaces/ecpg/test/expected/pgtypeslib-num_test2-MinGW32.stdout create mode 100644 src/interfaces/ecpg/test/expected/preproc-whenever_do_continue.c create mode 100644 src/interfaces/ecpg/test/expected/preproc-whenever_do_continue.stderr create mode 100644 src/interfaces/ecpg/test/expected/preproc-whenever_do_continue.stdout create mode 100644 src/interfaces/ecpg/test/preproc/whenever_do_continue.pgc create mode 100644 src/interfaces/ecpg/test/printf_hack.h delete mode 100644 src/interfaces/ecpg/test/resultmap create mode 100644 src/interfaces/libpq/fe-secure-common.c create mode 100644 src/interfaces/libpq/fe-secure-common.h create mode 100644 src/pl/plperl/expected/plperl_call.out create mode 100644 src/pl/plperl/expected/plperl_transaction.out create mode 100644 src/pl/plperl/po/vi.po create mode 100644 src/pl/plperl/sql/plperl_call.sql create mode 100644 src/pl/plperl/sql/plperl_transaction.sql create mode 100644 src/pl/plpgsql/src/expected/plpgsql_cache.out create mode 100644 src/pl/plpgsql/src/expected/plpgsql_cache_1.out create mode 100644 src/pl/plpgsql/src/expected/plpgsql_call.out create mode 100644 src/pl/plpgsql/src/expected/plpgsql_control.out create mode 100644 src/pl/plpgsql/src/expected/plpgsql_domain.out create mode 100644 src/pl/plpgsql/src/expected/plpgsql_record.out create mode 100644 src/pl/plpgsql/src/expected/plpgsql_transaction.out create mode 100644 src/pl/plpgsql/src/expected/plpgsql_varprops.out create mode 100644 src/pl/plpgsql/src/po/tr.po create mode 100644 src/pl/plpgsql/src/po/vi.po create mode 100644 src/pl/plpgsql/src/sql/plpgsql_cache.sql create mode 100644 src/pl/plpgsql/src/sql/plpgsql_call.sql create mode 100644 src/pl/plpgsql/src/sql/plpgsql_control.sql create mode 100644 src/pl/plpgsql/src/sql/plpgsql_domain.sql create mode 100644 src/pl/plpgsql/src/sql/plpgsql_record.sql create mode 100644 src/pl/plpgsql/src/sql/plpgsql_transaction.sql create mode 100644 src/pl/plpgsql/src/sql/plpgsql_varprops.sql create mode 100644 src/pl/plpython/expected/plpython_call.out create mode 100644 src/pl/plpython/expected/plpython_transaction.out create mode 100644 src/pl/plpython/po/tr.po create mode 100644 src/pl/plpython/po/vi.po create mode 100644 src/pl/plpython/sql/plpython_call.sql create mode 100644 src/pl/plpython/sql/plpython_transaction.sql create mode 100644 src/pl/tcl/expected/pltcl_call.out create mode 100644 src/pl/tcl/expected/pltcl_transaction.out create mode 100644 src/pl/tcl/po/vi.po create mode 100644 src/pl/tcl/sql/pltcl_call.sql create mode 100644 src/pl/tcl/sql/pltcl_transaction.sql create mode 100644 src/port/dlopen.c create mode 100644 src/port/pg_crc32c_armv8.c create mode 100644 src/port/pg_crc32c_armv8_choose.c rename src/port/{pg_crc32c_choose.c => pg_crc32c_sse42_choose.c} (74%) create mode 100644 src/port/pread.c create mode 100644 src/port/pwrite.c create mode 100644 src/port/strnlen.c create mode 100644 src/test/isolation/expected/alter-table-4.out create mode 100644 src/test/isolation/expected/freeze-the-dead.out create mode 100644 src/test/isolation/expected/multiple-cic.out create mode 100644 src/test/isolation/expected/multiple-cic_1.out create mode 100644 src/test/isolation/expected/partition-key-update-1.out create mode 100644 src/test/isolation/expected/partition-key-update-2.out create mode 100644 src/test/isolation/expected/partition-key-update-3.out create mode 100644 src/test/isolation/expected/partition-key-update-4.out create mode 100644 src/test/isolation/expected/plpgsql-toast.out create mode 100644 src/test/isolation/expected/predicate-gin-fastupdate.out create mode 100644 src/test/isolation/expected/predicate-gin-nomatch.out create mode 100644 src/test/isolation/expected/predicate-gin.out create mode 100644 src/test/isolation/expected/predicate-gist.out create mode 100644 src/test/isolation/expected/predicate-hash.out create mode 100644 src/test/isolation/expected/truncate-conflict.out create mode 100644 src/test/isolation/expected/vacuum-concurrent-drop.out create mode 100644 src/test/isolation/expected/vacuum-conflict.out create mode 100644 src/test/isolation/expected/vacuum-skip-locked.out create mode 100644 src/test/isolation/specs/alter-table-4.spec create mode 100644 src/test/isolation/specs/freeze-the-dead.spec create mode 100644 src/test/isolation/specs/multiple-cic.spec create mode 100644 src/test/isolation/specs/partition-key-update-1.spec create mode 100644 src/test/isolation/specs/partition-key-update-2.spec create mode 100644 src/test/isolation/specs/partition-key-update-3.spec create mode 100644 src/test/isolation/specs/partition-key-update-4.spec create mode 100644 src/test/isolation/specs/plpgsql-toast.spec create mode 100644 src/test/isolation/specs/predicate-gin-fastupdate.spec create mode 100644 src/test/isolation/specs/predicate-gin-nomatch.spec create mode 100644 src/test/isolation/specs/predicate-gin.spec create mode 100644 src/test/isolation/specs/predicate-gist.spec create mode 100644 src/test/isolation/specs/predicate-hash.spec create mode 100644 src/test/isolation/specs/truncate-conflict.spec create mode 100644 src/test/isolation/specs/vacuum-concurrent-drop.spec create mode 100644 src/test/isolation/specs/vacuum-conflict.spec create mode 100644 src/test/isolation/specs/vacuum-skip-locked.spec create mode 100644 src/test/kerberos/.gitignore create mode 100644 src/test/kerberos/Makefile create mode 100644 src/test/kerberos/README create mode 100644 src/test/kerberos/t/001_auth.pl create mode 100644 src/test/ldap/.gitignore create mode 100644 src/test/ldap/Makefile create mode 100644 src/test/ldap/README create mode 100644 src/test/ldap/authdata.ldif create mode 100644 src/test/ldap/t/001_auth.pl create mode 100644 src/test/modules/brin/t/01_workitems.pl create mode 100644 src/test/modules/test_bloomfilter/.gitignore create mode 100644 src/test/modules/test_bloomfilter/Makefile create mode 100644 src/test/modules/test_bloomfilter/README create mode 100644 src/test/modules/test_bloomfilter/expected/test_bloomfilter.out create mode 100644 src/test/modules/test_bloomfilter/sql/test_bloomfilter.sql create mode 100644 src/test/modules/test_bloomfilter/test_bloomfilter--1.0.sql create mode 100644 src/test/modules/test_bloomfilter/test_bloomfilter.c create mode 100644 src/test/modules/test_bloomfilter/test_bloomfilter.control create mode 100644 src/test/modules/test_predtest/.gitignore create mode 100644 src/test/modules/test_predtest/Makefile create mode 100644 src/test/modules/test_predtest/README create mode 100644 src/test/modules/test_predtest/expected/test_predtest.out create mode 100644 src/test/modules/test_predtest/sql/test_predtest.sql create mode 100644 src/test/modules/test_predtest/test_predtest--1.0.sql create mode 100644 src/test/modules/test_predtest/test_predtest.c create mode 100644 src/test/modules/test_predtest/test_predtest.control create mode 100644 src/test/modules/test_rbtree/.gitignore create mode 100644 src/test/modules/test_rbtree/Makefile create mode 100644 src/test/modules/test_rbtree/README create mode 100644 src/test/modules/test_rbtree/expected/test_rbtree.out create mode 100644 src/test/modules/test_rbtree/sql/test_rbtree.sql create mode 100644 src/test/modules/test_rbtree/test_rbtree--1.0.sql create mode 100644 src/test/modules/test_rbtree/test_rbtree.c create mode 100644 src/test/modules/test_rbtree/test_rbtree.control create mode 100644 src/test/recovery/t/013_crash_restart.pl create mode 100644 src/test/recovery/t/014_unlogged_reinit.pl create mode 100644 src/test/recovery/t/015_promotion_pages.pl delete mode 100644 src/test/regress/expected/abstime.out create mode 100644 src/test/regress/expected/create_procedure.out create mode 100644 src/test/regress/expected/fast_default.out delete mode 100644 src/test/regress/expected/float4-exp-three-digits.out delete mode 100644 src/test/regress/expected/float8-exp-three-digits-win32.out delete mode 100644 src/test/regress/expected/float8-small-is-zero_1.out delete mode 100644 src/test/regress/expected/geometry_1.out delete mode 100644 src/test/regress/expected/geometry_2.out create mode 100644 src/test/regress/expected/hash_func.out create mode 100644 src/test/regress/expected/hash_part.out create mode 100644 src/test/regress/expected/index_including.out create mode 100644 src/test/regress/expected/indexing.out delete mode 100644 src/test/regress/expected/int8-exp-three-digits.out delete mode 100644 src/test/regress/expected/numerology_1.out create mode 100644 src/test/regress/expected/partition_aggregate.out create mode 100644 src/test/regress/expected/partition_info.out create mode 100644 src/test/regress/expected/partition_join.out create mode 100644 src/test/regress/expected/partition_prune.out create mode 100644 src/test/regress/expected/reloptions.out delete mode 100644 src/test/regress/expected/reltime.out delete mode 100644 src/test/regress/expected/tinterval.out create mode 100644 src/test/regress/expected/write_parallel.out delete mode 100644 src/test/regress/sql/abstime.sql create mode 100644 src/test/regress/sql/create_procedure.sql create mode 100644 src/test/regress/sql/fast_default.sql create mode 100644 src/test/regress/sql/hash_func.sql create mode 100644 src/test/regress/sql/hash_part.sql create mode 100644 src/test/regress/sql/index_including.sql create mode 100644 src/test/regress/sql/indexing.sql create mode 100644 src/test/regress/sql/partition_aggregate.sql create mode 100644 src/test/regress/sql/partition_info.sql create mode 100644 src/test/regress/sql/partition_join.sql create mode 100644 src/test/regress/sql/partition_prune.sql create mode 100644 src/test/regress/sql/reloptions.sql delete mode 100644 src/test/regress/sql/reltime.sql delete mode 100644 src/test/regress/sql/tinterval.sql create mode 100644 src/test/regress/sql/write_parallel.sql create mode 100644 src/test/ssl/ssl/server-password.key create mode 100644 src/test/ssl/t/002_scram.pl create mode 100644 src/test/subscription/t/006_rewrite.pl create mode 100644 src/test/subscription/t/007_ddl.pl create mode 100644 src/test/subscription/t/008_diff_schema.pl create mode 100644 src/test/subscription/t/009_matviews.pl create mode 100644 src/test/subscription/t/010_truncate.pl delete mode 100644 src/timezone/data/africa delete mode 100644 src/timezone/data/antarctica delete mode 100644 src/timezone/data/asia delete mode 100644 src/timezone/data/australasia delete mode 100644 src/timezone/data/backward delete mode 100644 src/timezone/data/backzone delete mode 100644 src/timezone/data/etcetera delete mode 100644 src/timezone/data/europe delete mode 100644 src/timezone/data/factory delete mode 100644 src/timezone/data/northamerica delete mode 100644 src/timezone/data/pacificnew delete mode 100644 src/timezone/data/southamerica delete mode 100644 src/timezone/data/systemv create mode 100644 src/timezone/data/tzdata.zi delete mode 100644 src/tools/msvc/VCBuildProject.pm delete mode 100755 src/tools/msvc/builddoc.bat delete mode 100644 src/tools/msvc/builddoc.pl create mode 100644 src/tools/msvc/dummylib/README create mode 100644 src/tools/msvc/dummylib/Win32.pm create mode 100644 src/tools/msvc/dummylib/Win32/Registry.pm create mode 100644 src/tools/msvc/dummylib/Win32API/File.pm create mode 100644 src/tools/perlcheck/find_perl_files create mode 100644 src/tools/perlcheck/perlcriticrc create mode 100755 src/tools/perlcheck/pgperlcritic create mode 100755 src/tools/perlcheck/pgperlsyncheck diff --git a/.dir-locals.el b/.dir-locals.el index d8827a669a..eff4671ef8 100644 --- a/.dir-locals.el +++ b/.dir-locals.el @@ -5,10 +5,10 @@ (fill-column . 78) (indent-tabs-mode . t) (tab-width . 4))) - (dsssl-mode . ((indent-tabs-mode . nil))) - (nxml-mode . ((indent-tabs-mode . nil))) + (nxml-mode . ((fill-column . 78) + (indent-tabs-mode . nil))) (perl-mode . ((perl-indent-level . 4) - (perl-continued-statement-offset . 4) + (perl-continued-statement-offset . 2) (perl-continued-brace-offset . 4) (perl-brace-offset . 0) (perl-brace-imaginary-offset . 0) diff --git a/.gitignore b/.gitignore index 36882a0176..4c3c87772d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # Global excludes across all subdirectories *.o *.obj +*.bc *.so *.so.[0-9] *.so.[0-9].[0-9] @@ -21,8 +22,9 @@ objfiles.txt *.gcda *.gcov *.gcov.out -lcov.info +lcov*.info coverage/ +coverage-html-stamp *.vcproj *.vcxproj win32ver.rc diff --git a/COPYRIGHT b/COPYRIGHT index c320eccac0..33e6e4842a 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -1,7 +1,7 @@ PostgreSQL Database Management System (formerly known as Postgres, then as Postgres95) -Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California diff --git a/GNUmakefile.in b/GNUmakefile.in index dc76a5d11d..563c83b5a2 100644 --- a/GNUmakefile.in +++ b/GNUmakefile.in @@ -63,7 +63,7 @@ distclean maintainer-clean: @rm -rf autom4te.cache/ rm -f config.cache config.log config.status GNUmakefile -check check-tests installcheck installcheck-parallel installcheck-tests: +check check-tests installcheck installcheck-parallel installcheck-tests: submake-generated-headers $(MAKE) -C src/test/regress $@ $(call recurse,check-world,src/test src/pl src/interfaces/ecpg contrib src/bin,check) @@ -78,7 +78,6 @@ GNUmakefile: GNUmakefile.in $(top_builddir)/config.status distdir = postgresql-$(VERSION) dummy = =install= -garbage = =* "#"* ."#"* *~* *.orig *.rej core postgresql-* dist: $(distdir).tar.gz $(distdir).tar.bz2 rm -rf $(distdir) diff --git a/Makefile b/Makefile index 4c68950e90..c400854cd3 100644 --- a/Makefile +++ b/Makefile @@ -11,6 +11,10 @@ # GNUmakefile won't exist yet, so we catch that case as well. +# AIX make defaults to building *every* target of the first rule. Start with +# a single-target, empty rule to make the other targets non-default. +all: + all check install installdirs installcheck installcheck-parallel uninstall clean distclean maintainer-clean dist distcheck world check-world install-world installcheck-world: @if [ ! -f GNUmakefile ] ; then \ echo "You need to run the 'configure' program first. See the file"; \ diff --git a/aclocal.m4 b/aclocal.m4 index 0e95ed4b4d..a517e949f1 100644 --- a/aclocal.m4 +++ b/aclocal.m4 @@ -7,6 +7,7 @@ m4_include([config/c-library.m4]) m4_include([config/docbook.m4]) m4_include([config/general.m4]) m4_include([config/libtool.m4]) +m4_include([config/llvm.m4]) m4_include([config/perl.m4]) m4_include([config/pkg.m4]) m4_include([config/programs.m4]) diff --git a/config/c-compiler.m4 b/config/c-compiler.m4 index 7275ea69fe..af2dea1c2a 100644 --- a/config/c-compiler.m4 +++ b/config/c-compiler.m4 @@ -19,24 +19,38 @@ fi])# PGAC_C_SIGNED # PGAC_C_PRINTF_ARCHETYPE # ----------------------- -# Set the format archetype used by gcc to check printf type functions. We -# prefer "gnu_printf", which includes what glibc uses, such as %m for error -# strings and %lld for 64 bit long longs. GCC 4.4 introduced it. It makes a -# dramatic difference on Windows. +# Select the format archetype to be used by gcc to check printf-type functions. +# We prefer "gnu_printf", as that most closely matches the features supported +# by src/port/snprintf.c (particularly the %m conversion spec). However, +# on some NetBSD versions, that doesn't work while "__syslog__" does. +# If all else fails, use "printf". AC_DEFUN([PGAC_PRINTF_ARCHETYPE], [AC_CACHE_CHECK([for printf format archetype], pgac_cv_printf_archetype, +[pgac_cv_printf_archetype=gnu_printf +PGAC_TEST_PRINTF_ARCHETYPE +if [[ "$ac_archetype_ok" = no ]]; then + pgac_cv_printf_archetype=__syslog__ + PGAC_TEST_PRINTF_ARCHETYPE + if [[ "$ac_archetype_ok" = no ]]; then + pgac_cv_printf_archetype=printf + fi +fi]) +AC_DEFINE_UNQUOTED([PG_PRINTF_ATTRIBUTE], [$pgac_cv_printf_archetype], +[Define to best printf format archetype, usually gnu_printf if available.]) +])# PGAC_PRINTF_ARCHETYPE + +# Subroutine: test $pgac_cv_printf_archetype, set $ac_archetype_ok to yes or no +AC_DEFUN([PGAC_TEST_PRINTF_ARCHETYPE], [ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes AC_COMPILE_IFELSE([AC_LANG_PROGRAM( -[extern int -pgac_write(int ignore, const char *fmt,...) -__attribute__((format(gnu_printf, 2, 3)));], [])], - [pgac_cv_printf_archetype=gnu_printf], - [pgac_cv_printf_archetype=printf]) -ac_c_werror_flag=$ac_save_c_werror_flag]) -AC_DEFINE_UNQUOTED([PG_PRINTF_ATTRIBUTE], [$pgac_cv_printf_archetype], - [Define to gnu_printf if compiler supports it, else printf.]) -])# PGAC_PRINTF_ARCHETYPE +[extern void pgac_write(int ignore, const char *fmt,...) +__attribute__((format($pgac_cv_printf_archetype, 2, 3)));], +[pgac_write(0, "error %s: %m", "foo");])], + [ac_archetype_ok=yes], + [ac_archetype_ok=no]) +ac_c_werror_flag=$ac_save_c_werror_flag +])# PGAC_TEST_PRINTF_ARCHETYPE # PGAC_TYPE_64BIT_INT(TYPE) @@ -96,9 +110,11 @@ undefine([Ac_cachevar])dnl # PGAC_TYPE_128BIT_INT # --------------------- # Check if __int128 is a working 128 bit integer type, and if so -# define PG_INT128_TYPE to that typename. This currently only detects -# a GCC/clang extension, but support for different environments may be -# added in the future. +# define PG_INT128_TYPE to that typename, and define ALIGNOF_PG_INT128_TYPE +# as its alignment requirement. +# +# This currently only detects a GCC/clang extension, but support for other +# environments may be added in the future. # # For the moment we only test for support for 128bit math; support for # 128bit literals and snprintf is not required. @@ -106,28 +122,61 @@ AC_DEFUN([PGAC_TYPE_128BIT_INT], [AC_CACHE_CHECK([for __int128], [pgac_cv__128bit_int], [AC_LINK_IFELSE([AC_LANG_PROGRAM([ /* + * We don't actually run this test, just link it to verify that any support + * functions needed for __int128 are present. + * * These are globals to discourage the compiler from folding all the * arithmetic tests down to compile-time constants. We do not have - * convenient support for 64bit literals at this point... + * convenient support for 128bit literals at this point... */ __int128 a = 48828125; -__int128 b = 97656255; +__int128 b = 97656250; ],[ __int128 c,d; a = (a << 12) + 1; /* 200000000001 */ b = (b << 12) + 5; /* 400000000005 */ -/* use the most relevant arithmetic ops */ +/* try the most relevant arithmetic ops */ c = a * b; d = (c + b) / b; -/* return different values, to prevent optimizations */ +/* must use the results, else compiler may optimize arithmetic away */ if (d != a+1) - return 0; -return 1; + return 1; ])], [pgac_cv__128bit_int=yes], [pgac_cv__128bit_int=no])]) if test x"$pgac_cv__128bit_int" = xyes ; then - AC_DEFINE(PG_INT128_TYPE, __int128, [Define to the name of a signed 128-bit integer type.]) + # Use of non-default alignment with __int128 tickles bugs in some compilers. + # If not cross-compiling, we can test for bugs and disable use of __int128 + # with buggy compilers. If cross-compiling, hope for the best. + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83925 + AC_CACHE_CHECK([for __int128 alignment bug], [pgac_cv__128bit_int_bug], + [AC_RUN_IFELSE([AC_LANG_PROGRAM([ +/* This must match the corresponding code in c.h: */ +#if defined(__GNUC__) || defined(__SUNPRO_C) || defined(__IBMC__) +#define pg_attribute_aligned(a) __attribute__((aligned(a))) +#endif +typedef __int128 int128a +#if defined(pg_attribute_aligned) +pg_attribute_aligned(8) +#endif +; +int128a holder; +void pass_by_val(void *buffer, int128a par) { holder = par; } +],[ +long int i64 = 97656225L << 12; +int128a q; +pass_by_val(main, (int128a) i64); +q = (int128a) i64; +if (q != holder) + return 1; +])], + [pgac_cv__128bit_int_bug=ok], + [pgac_cv__128bit_int_bug=broken], + [pgac_cv__128bit_int_bug="assuming ok"])]) + if test x"$pgac_cv__128bit_int_bug" != xbroken ; then + AC_DEFINE(PG_INT128_TYPE, __int128, [Define to the name of a signed 128-bit integer type.]) + AC_CHECK_ALIGNOF(PG_INT128_TYPE) + fi fi])# PGAC_TYPE_128BIT_INT @@ -224,6 +273,23 @@ AC_DEFINE(HAVE__BUILTIN_TYPES_COMPATIBLE_P, 1, fi])# PGAC_C_TYPES_COMPATIBLE +# PGAC_C_BUILTIN_BSWAP16 +# ------------------------- +# Check if the C compiler understands __builtin_bswap16(), +# and define HAVE__BUILTIN_BSWAP16 if so. +AC_DEFUN([PGAC_C_BUILTIN_BSWAP16], +[AC_CACHE_CHECK(for __builtin_bswap16, pgac_cv__builtin_bswap16, +[AC_COMPILE_IFELSE([AC_LANG_SOURCE( +[static unsigned long int x = __builtin_bswap16(0xaabb);] +)], +[pgac_cv__builtin_bswap16=yes], +[pgac_cv__builtin_bswap16=no])]) +if test x"$pgac_cv__builtin_bswap16" = xyes ; then +AC_DEFINE(HAVE__BUILTIN_BSWAP16, 1, + [Define to 1 if your compiler understands __builtin_bswap16.]) +fi])# PGAC_C_BUILTIN_BSWAP16 + + # PGAC_C_BUILTIN_BSWAP32 # ------------------------- @@ -265,10 +331,15 @@ fi])# PGAC_C_BUILTIN_BSWAP64 # ------------------------- # Check if the C compiler understands __builtin_constant_p(), # and define HAVE__BUILTIN_CONSTANT_P if so. +# We need __builtin_constant_p("string literal") to be true, but some older +# compilers don't think that, so test for that case explicitly. AC_DEFUN([PGAC_C_BUILTIN_CONSTANT_P], [AC_CACHE_CHECK(for __builtin_constant_p, pgac_cv__builtin_constant_p, [AC_COMPILE_IFELSE([AC_LANG_SOURCE( -[[static int x; static int y[__builtin_constant_p(x) ? x : 1];]] +[[static int x; + static int y[__builtin_constant_p(x) ? x : 1]; + static int z[__builtin_constant_p("string literal") ? 1 : x]; +]] )], [pgac_cv__builtin_constant_p=yes], [pgac_cv__builtin_constant_p=no])]) @@ -279,6 +350,34 @@ fi])# PGAC_C_BUILTIN_CONSTANT_P +# PGAC_C_BUILTIN_OP_OVERFLOW +# ------------------------- +# Check if the C compiler understands __builtin_$op_overflow(), +# and define HAVE__BUILTIN_OP_OVERFLOW if so. +# +# Check for the most complicated case, 64 bit multiplication, as a +# proxy for all of the operations. To detect the case where the compiler +# knows the function but library support is missing, we must link not just +# compile, and store the results in global variables so the compiler doesn't +# optimize away the call. +AC_DEFUN([PGAC_C_BUILTIN_OP_OVERFLOW], +[AC_CACHE_CHECK(for __builtin_mul_overflow, pgac_cv__builtin_op_overflow, +[AC_LINK_IFELSE([AC_LANG_PROGRAM([ +PG_INT64_TYPE a = 1; +PG_INT64_TYPE b = 1; +PG_INT64_TYPE result; +int oflo; +], +[oflo = __builtin_mul_overflow(a, b, &result);])], +[pgac_cv__builtin_op_overflow=yes], +[pgac_cv__builtin_op_overflow=no])]) +if test x"$pgac_cv__builtin_op_overflow" = xyes ; then +AC_DEFINE(HAVE__BUILTIN_OP_OVERFLOW, 1, + [Define to 1 if your compiler understands __builtin_$op_overflow.]) +fi])# PGAC_C_BUILTIN_OP_OVERFLOW + + + # PGAC_C_BUILTIN_UNREACHABLE # -------------------------- # Check if the C compiler understands __builtin_unreachable(), @@ -324,45 +423,40 @@ fi])# PGAC_C_COMPUTED_GOTO -# PGAC_C_VA_ARGS -# -------------- -# Check if the C compiler understands C99-style variadic macros, -# and define HAVE__VA_ARGS if so. -AC_DEFUN([PGAC_C_VA_ARGS], -[AC_CACHE_CHECK(for __VA_ARGS__, pgac_cv__va_args, -[AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include ], -[#define debug(...) fprintf(stderr, __VA_ARGS__) -debug("%s", "blarg"); -])], -[pgac_cv__va_args=yes], -[pgac_cv__va_args=no])]) -if test x"$pgac_cv__va_args" = xyes ; then -AC_DEFINE(HAVE__VA_ARGS, 1, - [Define to 1 if your compiler understands __VA_ARGS__ in macros.]) -fi])# PGAC_C_VA_ARGS - - - -# PGAC_PROG_CC_CFLAGS_OPT +# PGAC_PROG_VARCC_VARFLAGS_OPT # ----------------------- -# Given a string, check if the compiler supports the string as a -# command-line option. If it does, add the string to CFLAGS. -AC_DEFUN([PGAC_PROG_CC_CFLAGS_OPT], -[define([Ac_cachevar], [AS_TR_SH([pgac_cv_prog_cc_cflags_$1])])dnl -AC_CACHE_CHECK([whether $CC supports $1], [Ac_cachevar], +# Given a compiler, variable name and a string, check if the compiler +# supports the string as a command-line option. If it does, add the +# string to the given variable. +AC_DEFUN([PGAC_PROG_VARCC_VARFLAGS_OPT], +[define([Ac_cachevar], [AS_TR_SH([pgac_cv_prog_$1_cflags_$3])])dnl +AC_CACHE_CHECK([whether ${$1} supports $3, for $2], [Ac_cachevar], [pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS $1" +pgac_save_CC=$CC +CC=${$1} +CFLAGS="${$2} $3" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes _AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], [Ac_cachevar=yes], [Ac_cachevar=no]) ac_c_werror_flag=$ac_save_c_werror_flag -CFLAGS="$pgac_save_CFLAGS"]) +CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC"]) if test x"$Ac_cachevar" = x"yes"; then - CFLAGS="$CFLAGS $1" + $2="${$2} $3" fi undefine([Ac_cachevar])dnl +])# PGAC_PROG_VARCC_VARFLAGS_OPT + + + +# PGAC_PROG_CC_CFLAGS_OPT +# ----------------------- +# Given a string, check if the compiler supports the string as a +# command-line option. If it does, add the string to CFLAGS. +AC_DEFUN([PGAC_PROG_CC_CFLAGS_OPT], [ +PGAC_PROG_VARCC_VARFLAGS_OPT(CC, CFLAGS, $1) ])# PGAC_PROG_CC_CFLAGS_OPT @@ -373,22 +467,48 @@ undefine([Ac_cachevar])dnl # the string as a command-line option. If it does, add the string to # the given variable. AC_DEFUN([PGAC_PROG_CC_VAR_OPT], -[define([Ac_cachevar], [AS_TR_SH([pgac_cv_prog_cc_cflags_$2])])dnl -AC_CACHE_CHECK([whether $CC supports $2], [Ac_cachevar], -[pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS $2" -ac_save_c_werror_flag=$ac_c_werror_flag -ac_c_werror_flag=yes +[PGAC_PROG_VARCC_VARFLAGS_OPT(CC, $1, $2) +])# PGAC_PROG_CC_VAR_OPT + + + +# PGAC_PROG_VARCXX_VARFLAGS_OPT +# ----------------------- +# Given a compiler, variable name and a string, check if the compiler +# supports the string as a command-line option. If it does, add the +# string to the given variable. +AC_DEFUN([PGAC_PROG_VARCXX_VARFLAGS_OPT], +[define([Ac_cachevar], [AS_TR_SH([pgac_cv_prog_$1_cxxflags_$3])])dnl +AC_CACHE_CHECK([whether ${$1} supports $3, for $2], [Ac_cachevar], +[pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${$1} +CXXFLAGS="${$2} $3" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +AC_LANG_PUSH(C++) _AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], [Ac_cachevar=yes], [Ac_cachevar=no]) -ac_c_werror_flag=$ac_save_c_werror_flag -CFLAGS="$pgac_save_CFLAGS"]) +AC_LANG_POP([]) +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX"]) if test x"$Ac_cachevar" = x"yes"; then - $1="${$1} $2" + $2="${$2} $3" fi undefine([Ac_cachevar])dnl -])# PGAC_PROG_CC_VAR_OPT +])# PGAC_PROG_VARCXX_VARFLAGS_OPT + + + +# PGAC_PROG_CXX_CFLAGS_OPT +# ----------------------- +# Given a string, check if the compiler supports the string as a +# command-line option. If it does, add the string to CXXFLAGS. +AC_DEFUN([PGAC_PROG_CXX_CFLAGS_OPT], +[PGAC_PROG_VARCXX_VARFLAGS_OPT(CXX, CXXFLAGS, $1) +])# PGAC_PROG_CXX_VAR_OPT @@ -463,7 +583,7 @@ AC_DEFUN([PGAC_HAVE_GCC__SYNC_INT32_CAS], [pgac_cv_gcc_sync_int32_cas="yes"], [pgac_cv_gcc_sync_int32_cas="no"])]) if test x"$pgac_cv_gcc_sync_int32_cas" = x"yes"; then - AC_DEFINE(HAVE_GCC__SYNC_INT32_CAS, 1, [Define to 1 if you have __sync_compare_and_swap(int *, int, int).]) + AC_DEFINE(HAVE_GCC__SYNC_INT32_CAS, 1, [Define to 1 if you have __sync_val_compare_and_swap(int *, int, int).]) fi])# PGAC_HAVE_GCC__SYNC_INT32_CAS # PGAC_HAVE_GCC__SYNC_INT64_CAS @@ -478,7 +598,7 @@ AC_DEFUN([PGAC_HAVE_GCC__SYNC_INT64_CAS], [pgac_cv_gcc_sync_int64_cas="yes"], [pgac_cv_gcc_sync_int64_cas="no"])]) if test x"$pgac_cv_gcc_sync_int64_cas" = x"yes"; then - AC_DEFINE(HAVE_GCC__SYNC_INT64_CAS, 1, [Define to 1 if you have __sync_compare_and_swap(int64 *, int64, int64).]) + AC_DEFINE(HAVE_GCC__SYNC_INT64_CAS, 1, [Define to 1 if you have __sync_val_compare_and_swap(int64 *, int64, int64).]) fi])# PGAC_HAVE_GCC__SYNC_INT64_CAS # PGAC_HAVE_GCC__ATOMIC_INT32_CAS @@ -510,7 +630,7 @@ AC_DEFUN([PGAC_HAVE_GCC__ATOMIC_INT64_CAS], [pgac_cv_gcc_atomic_int64_cas="yes"], [pgac_cv_gcc_atomic_int64_cas="no"])]) if test x"$pgac_cv_gcc_atomic_int64_cas" = x"yes"; then - AC_DEFINE(HAVE_GCC__ATOMIC_INT64_CAS, 1, [Define to 1 if you have __atomic_compare_exchange_n(int64 *, int *, int64).]) + AC_DEFINE(HAVE_GCC__ATOMIC_INT64_CAS, 1, [Define to 1 if you have __atomic_compare_exchange_n(int64 *, int64 *, int64).]) fi])# PGAC_HAVE_GCC__ATOMIC_INT64_CAS # PGAC_SSE42_CRC32_INTRINSICS @@ -542,3 +662,37 @@ if test x"$Ac_cachevar" = x"yes"; then fi undefine([Ac_cachevar])dnl ])# PGAC_SSE42_CRC32_INTRINSICS + + +# PGAC_ARMV8_CRC32C_INTRINSICS +# ----------------------- +# Check if the compiler supports the CRC32C instructions using the __crc32cb, +# __crc32ch, __crc32cw, and __crc32cd intrinsic functions. These instructions +# were first introduced in ARMv8 in the optional CRC Extension, and became +# mandatory in ARMv8.1. +# +# An optional compiler flag can be passed as argument (e.g. +# -march=armv8-a+crc). If the intrinsics are supported, sets +# pgac_armv8_crc32c_intrinsics, and CFLAGS_ARMV8_CRC32C. +AC_DEFUN([PGAC_ARMV8_CRC32C_INTRINSICS], +[define([Ac_cachevar], [AS_TR_SH([pgac_cv_armv8_crc32c_intrinsics_$1])])dnl +AC_CACHE_CHECK([for __crc32cb, __crc32ch, __crc32cw, and __crc32cd with CFLAGS=$1], [Ac_cachevar], +[pgac_save_CFLAGS=$CFLAGS +CFLAGS="$pgac_save_CFLAGS $1" +AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], + [unsigned int crc = 0; + crc = __crc32cb(crc, 0); + crc = __crc32ch(crc, 0); + crc = __crc32cw(crc, 0); + crc = __crc32cd(crc, 0); + /* return computed value, to prevent the above being optimized away */ + return crc == 0;])], + [Ac_cachevar=yes], + [Ac_cachevar=no]) +CFLAGS="$pgac_save_CFLAGS"]) +if test x"$Ac_cachevar" = x"yes"; then + CFLAGS_ARMV8_CRC32C="$1" + pgac_armv8_crc32c_intrinsics=yes +fi +undefine([Ac_cachevar])dnl +])# PGAC_ARMV8_CRC32C_INTRINSICS diff --git a/config/c-library.m4 b/config/c-library.m4 index 9c2207b03d..6f2b0fbb4e 100644 --- a/config/c-library.m4 +++ b/config/c-library.m4 @@ -82,23 +82,23 @@ AH_VERBATIM(GETTIMEOFDAY_1ARG_, # PGAC_FUNC_STRERROR_R_INT # --------------------------- -# Check if strerror_r() returns an int (SUSv3) rather than a char * (GNU libc) -# If so, define STRERROR_R_INT +# Check if strerror_r() returns int (POSIX) rather than char * (GNU libc). +# If so, define STRERROR_R_INT. +# The result is uncertain if strerror_r() isn't provided, +# but we don't much care. AC_DEFUN([PGAC_FUNC_STRERROR_R_INT], [AC_CACHE_CHECK(whether strerror_r returns int, pgac_cv_func_strerror_r_int, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include ], -[#ifndef _AIX -int strerror_r(int, char *, size_t); -#else -/* Older AIX has 'int' for the third argument so we don't test the args. */ -int strerror_r(); -#endif])], +[[char buf[100]; + switch (strerror_r(1, buf, sizeof(buf))) + { case 0: break; default: break; } +]])], [pgac_cv_func_strerror_r_int=yes], [pgac_cv_func_strerror_r_int=no])]) if test x"$pgac_cv_func_strerror_r_int" = xyes ; then AC_DEFINE(STRERROR_R_INT, 1, - [Define to 1 if strerror_r() returns a int.]) + [Define to 1 if strerror_r() returns int.]) fi ])# PGAC_FUNC_STRERROR_R_INT @@ -171,129 +171,6 @@ AC_DEFUN([PGAC_STRUCT_ADDRINFO], ])])# PGAC_STRUCT_ADDRINFO -# PGAC_FUNC_SNPRINTF_LONG_LONG_INT_MODIFIER -# --------------------------------------- -# Determine which length modifier snprintf uses for long long int. We -# handle ll, q, and I64. The result is in shell variable -# LONG_LONG_INT_MODIFIER. -# -# MinGW uses '%I64d', though gcc throws an warning with -Wall, -# while '%lld' doesn't generate a warning, but doesn't work. -# -AC_DEFUN([PGAC_FUNC_SNPRINTF_LONG_LONG_INT_MODIFIER], -[AC_MSG_CHECKING([snprintf length modifier for long long int]) -AC_CACHE_VAL(pgac_cv_snprintf_long_long_int_modifier, -[for pgac_modifier in 'll' 'q' 'I64'; do -AC_RUN_IFELSE([AC_LANG_SOURCE([[#include -#include -typedef long long int ac_int64; -#define INT64_FORMAT "%${pgac_modifier}d" - -ac_int64 a = 20000001; -ac_int64 b = 40000005; - -int does_int64_snprintf_work() -{ - ac_int64 c; - char buf[100]; - - if (sizeof(ac_int64) != 8) - return 0; /* doesn't look like the right size */ - - c = a * b; - snprintf(buf, 100, INT64_FORMAT, c); - if (strcmp(buf, "800000140000005") != 0) - return 0; /* either multiply or snprintf is busted */ - return 1; -} - -int -main() { - return (! does_int64_snprintf_work()); -}]])], -[pgac_cv_snprintf_long_long_int_modifier=$pgac_modifier; break], -[], -[pgac_cv_snprintf_long_long_int_modifier=cross; break]) -done])dnl AC_CACHE_VAL - -LONG_LONG_INT_MODIFIER='' - -case $pgac_cv_snprintf_long_long_int_modifier in - cross) AC_MSG_RESULT([cannot test (not on host machine)]);; - ?*) AC_MSG_RESULT([$pgac_cv_snprintf_long_long_int_modifier]) - LONG_LONG_INT_MODIFIER=$pgac_cv_snprintf_long_long_int_modifier;; - *) AC_MSG_RESULT(none);; -esac])# PGAC_FUNC_SNPRINTF_LONG_LONG_INT_MODIFIER - - -# PGAC_FUNC_SNPRINTF_ARG_CONTROL -# --------------------------------------- -# Determine if snprintf supports %1$ argument selection, e.g. %5$ selects -# the fifth argument after the printf format string. -# This is not in the C99 standard, but in the Single Unix Specification (SUS). -# It is used in our language translation strings. -# -AC_DEFUN([PGAC_FUNC_SNPRINTF_ARG_CONTROL], -[AC_MSG_CHECKING([whether snprintf supports argument control]) -AC_CACHE_VAL(pgac_cv_snprintf_arg_control, -[AC_RUN_IFELSE([AC_LANG_SOURCE([[#include -#include - -int main() -{ - char buf[100]; - - /* can it swap arguments? */ - snprintf(buf, 100, "%2\$d %1\$d", 3, 4); - if (strcmp(buf, "4 3") != 0) - return 1; - return 0; -}]])], -[pgac_cv_snprintf_arg_control=yes], -[pgac_cv_snprintf_arg_control=no], -[pgac_cv_snprintf_arg_control=cross]) -])dnl AC_CACHE_VAL -AC_MSG_RESULT([$pgac_cv_snprintf_arg_control]) -])# PGAC_FUNC_SNPRINTF_ARG_CONTROL - -# PGAC_FUNC_SNPRINTF_SIZE_T_SUPPORT -# --------------------------------------- -# Determine if snprintf supports the z length modifier for printing -# size_t-sized variables. That's supported by C99 and POSIX but not -# all platforms play ball, so we must test whether it's working. -# -AC_DEFUN([PGAC_FUNC_SNPRINTF_SIZE_T_SUPPORT], -[AC_MSG_CHECKING([whether snprintf supports the %z modifier]) -AC_CACHE_VAL(pgac_cv_snprintf_size_t_support, -[AC_RUN_IFELSE([AC_LANG_SOURCE([[#include -#include - -int main() -{ - char bufz[100]; - char buf64[100]; - - /* - * Print the largest unsigned number fitting in a size_t using both %zu - * and the previously-determined format for 64-bit integers. Note that - * we don't run this code unless we know snprintf handles 64-bit ints. - */ - bufz[0] = '\0'; /* in case snprintf fails to emit anything */ - snprintf(bufz, sizeof(bufz), "%zu", ~((size_t) 0)); - snprintf(buf64, sizeof(buf64), "%" INT64_MODIFIER "u", - (unsigned PG_INT64_TYPE) ~((size_t) 0)); - if (strcmp(bufz, buf64) != 0) - return 1; - return 0; -}]])], -[pgac_cv_snprintf_size_t_support=yes], -[pgac_cv_snprintf_size_t_support=no], -[pgac_cv_snprintf_size_t_support=cross]) -])dnl AC_CACHE_VAL -AC_MSG_RESULT([$pgac_cv_snprintf_size_t_support]) -])# PGAC_FUNC_SNPRINTF_SIZE_T_SUPPORT - - # PGAC_TYPE_LOCALE_T # ------------------ # Check for the locale_t type and find the right header file. macOS diff --git a/config/config.guess b/config/config.guess index faa63aa942..883a6713bf 100644 --- a/config/config.guess +++ b/config/config.guess @@ -1,8 +1,8 @@ #! /bin/sh # Attempt to guess a canonical system name. -# Copyright 1992-2017 Free Software Foundation, Inc. +# Copyright 1992-2018 Free Software Foundation, Inc. -timestamp='2017-05-11' +timestamp='2018-05-19' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -15,7 +15,7 @@ timestamp='2017-05-11' # General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program; if not, see . +# along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a @@ -27,7 +27,7 @@ timestamp='2017-05-11' # Originally written by Per Bothner; maintained since 2000 by Ben Elliston. # # You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess +# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess # # Please send patches to . @@ -39,7 +39,7 @@ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. -Operation modes: +Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit @@ -50,7 +50,7 @@ version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. -Copyright 1992-2017 Free Software Foundation, Inc. +Copyright 1992-2018 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -106,10 +106,10 @@ trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; dummy=$tmp/dummy ; tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; -case $CC_FOR_BUILD,$HOST_CC,$CC in - ,,) echo "int x;" > $dummy.c ; +case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in + ,,) echo "int x;" > "$dummy.c" ; for c in cc gcc c89 c99 ; do - if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + if ($c -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then CC_FOR_BUILD="$c"; break ; fi ; done ; @@ -132,14 +132,14 @@ UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown -case "${UNAME_SYSTEM}" in +case "$UNAME_SYSTEM" in Linux|GNU|GNU/*) # If the system lacks a compiler, then just pick glibc. # We could probably try harder. LIBC=gnu - eval $set_cc_for_build - cat <<-EOF > $dummy.c + eval "$set_cc_for_build" + cat <<-EOF > "$dummy.c" #include #if defined(__UCLIBC__) LIBC=uclibc @@ -149,13 +149,20 @@ Linux|GNU|GNU/*) LIBC=gnu #endif EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`" + + # If ldd exists, use it to detect musl libc. + if command -v ldd >/dev/null && \ + ldd --version 2>&1 | grep -q ^musl + then + LIBC=musl + fi ;; esac # Note: order is significant - the case branches are not exclusive. -case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in +case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, @@ -169,30 +176,30 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ - /sbin/$sysctl 2>/dev/null || \ - /usr/sbin/$sysctl 2>/dev/null || \ + "/sbin/$sysctl" 2>/dev/null || \ + "/usr/sbin/$sysctl" 2>/dev/null || \ echo unknown)` - case "${UNAME_MACHINE_ARCH}" in + case "$UNAME_MACHINE_ARCH" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; earmv*) - arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'` - endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'` - machine=${arch}${endian}-unknown + arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'` + endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'` + machine="${arch}${endian}"-unknown ;; - *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + *) machine="$UNAME_MACHINE_ARCH"-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently (or will in the future) and ABI. - case "${UNAME_MACHINE_ARCH}" in + case "$UNAME_MACHINE_ARCH" in earm*) os=netbsdelf ;; arm*|i386|m68k|ns32k|sh3*|sparc|vax) - eval $set_cc_for_build + eval "$set_cc_for_build" if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then @@ -208,10 +215,10 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in ;; esac # Determine ABI tags. - case "${UNAME_MACHINE_ARCH}" in + case "$UNAME_MACHINE_ARCH" in earm*) expr='s/^earmv[0-9]/-eabi/;s/eb$//' - abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"` + abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"` ;; esac # The OS release @@ -219,46 +226,55 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. - case "${UNAME_VERSION}" in + case "$UNAME_VERSION" in Debian*) release='-gnu' ;; *) - release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2` + release=`echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. - echo "${machine}-${os}${release}${abi}" + echo "$machine-${os}${release}${abi-}" exit ;; *:Bitrig:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} + echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE" exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE" exit ;; *:LibertyBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE} + echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE" + exit ;; + *:MidnightBSD:*:*) + echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE" exit ;; *:ekkoBSD:*:*) - echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE" exit ;; *:SolidBSD:*:*) - echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE" exit ;; macppc:MirBSD:*:*) - echo powerpc-unknown-mirbsd${UNAME_RELEASE} + echo powerpc-unknown-mirbsd"$UNAME_RELEASE" exit ;; *:MirBSD:*:*) - echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE" exit ;; *:Sortix:*:*) - echo ${UNAME_MACHINE}-unknown-sortix + echo "$UNAME_MACHINE"-unknown-sortix + exit ;; + *:Redox:*:*) + echo "$UNAME_MACHINE"-unknown-redox exit ;; + mips:OSF1:*.*) + echo mips-dec-osf1 + exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) @@ -310,28 +326,19 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. - echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + echo "$UNAME_MACHINE"-dec-osf"`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`" # Reset EXIT trap before exiting to avoid spurious non-zero exit code. exitcode=$? trap '' 0 exit $exitcode ;; - Alpha\ *:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # Should we change UNAME_MACHINE based on the output of uname instead - # of the specific Alpha model? - echo alpha-pc-interix - exit ;; - 21064:Windows_NT:50:3) - echo alpha-dec-winnt3.5 - exit ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-amigaos + echo "$UNAME_MACHINE"-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-morphos + echo "$UNAME_MACHINE"-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition @@ -343,7 +350,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) - echo arm-acorn-riscix${UNAME_RELEASE} + echo arm-acorn-riscix"$UNAME_RELEASE" exit ;; arm*:riscos:*:*|arm*:RISCOS:*:*) echo arm-unknown-riscos @@ -370,19 +377,19 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) - echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo "$UNAME_MACHINE"-ibm-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" exit ;; sun4H:SunOS:5.*:*) - echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo sparc-hal-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) - echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo sparc-sun-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) - echo i386-pc-auroraux${UNAME_RELEASE} + echo i386-pc-auroraux"$UNAME_RELEASE" exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) - eval $set_cc_for_build + eval "$set_cc_for_build" SUN_ARCH=i386 # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. @@ -395,13 +402,13 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in SUN_ARCH=x86_64 fi fi - echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo "$SUN_ARCH"-pc-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. - echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo sparc-sun-solaris3"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; sun4*:SunOS:*:*) case "`/usr/bin/arch -k`" in @@ -410,25 +417,25 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. - echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + echo sparc-sun-sunos"`echo "$UNAME_RELEASE"|sed -e 's/-/_/'`" exit ;; sun3*:SunOS:*:*) - echo m68k-sun-sunos${UNAME_RELEASE} + echo m68k-sun-sunos"$UNAME_RELEASE" exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` - test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3 + test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) - echo m68k-sun-sunos${UNAME_RELEASE} + echo m68k-sun-sunos"$UNAME_RELEASE" ;; sun4) - echo sparc-sun-sunos${UNAME_RELEASE} + echo sparc-sun-sunos"$UNAME_RELEASE" ;; esac exit ;; aushp:SunOS:*:*) - echo sparc-auspex-sunos${UNAME_RELEASE} + echo sparc-auspex-sunos"$UNAME_RELEASE" exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not @@ -439,44 +446,44 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} + echo m68k-atari-mint"$UNAME_RELEASE" exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} + echo m68k-atari-mint"$UNAME_RELEASE" exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} + echo m68k-atari-mint"$UNAME_RELEASE" exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) - echo m68k-milan-mint${UNAME_RELEASE} + echo m68k-milan-mint"$UNAME_RELEASE" exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) - echo m68k-hades-mint${UNAME_RELEASE} + echo m68k-hades-mint"$UNAME_RELEASE" exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) - echo m68k-unknown-mint${UNAME_RELEASE} + echo m68k-unknown-mint"$UNAME_RELEASE" exit ;; m68k:machten:*:*) - echo m68k-apple-machten${UNAME_RELEASE} + echo m68k-apple-machten"$UNAME_RELEASE" exit ;; powerpc:machten:*:*) - echo powerpc-apple-machten${UNAME_RELEASE} + echo powerpc-apple-machten"$UNAME_RELEASE" exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) - echo mips-dec-ultrix${UNAME_RELEASE} + echo mips-dec-ultrix"$UNAME_RELEASE" exit ;; VAX*:ULTRIX*:*:*) - echo vax-dec-ultrix${UNAME_RELEASE} + echo vax-dec-ultrix"$UNAME_RELEASE" exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) - echo clipper-intergraph-clix${UNAME_RELEASE} + echo clipper-intergraph-clix"$UNAME_RELEASE" exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { @@ -485,23 +492,23 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) - printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) - printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) - printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF - $CC_FOR_BUILD -o $dummy $dummy.c && - dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && - SYSTEM_NAME=`$dummy $dummyarg` && + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && + dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`"$dummy" "$dummyarg"` && { echo "$SYSTEM_NAME"; exit; } - echo mips-mips-riscos${UNAME_RELEASE} + echo mips-mips-riscos"$UNAME_RELEASE" exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax @@ -527,17 +534,17 @@ EOF AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` - if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + if [ "$UNAME_PROCESSOR" = mc88100 ] || [ "$UNAME_PROCESSOR" = mc88110 ] then - if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ - [ ${TARGET_BINARY_INTERFACE}x = x ] + if [ "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx ] || \ + [ "$TARGET_BINARY_INTERFACE"x = x ] then - echo m88k-dg-dgux${UNAME_RELEASE} + echo m88k-dg-dgux"$UNAME_RELEASE" else - echo m88k-dg-dguxbcs${UNAME_RELEASE} + echo m88k-dg-dguxbcs"$UNAME_RELEASE" fi else - echo i586-dg-dgux${UNAME_RELEASE} + echo i586-dg-dgux"$UNAME_RELEASE" fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) @@ -554,7 +561,7 @@ EOF echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) - echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + echo mips-sgi-irix"`echo "$UNAME_RELEASE"|sed -e 's/-/_/g'`" exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id @@ -566,14 +573,14 @@ EOF if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi - echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV" exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" #include main() @@ -584,7 +591,7 @@ EOF exit(0); } EOF - if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` then echo "$SYSTEM_NAME" else @@ -598,7 +605,7 @@ EOF exit ;; *:AIX:*:[4567]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` - if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc @@ -607,18 +614,18 @@ EOF IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi - echo ${IBM_ARCH}-ibm-aix${IBM_REV} + echo "$IBM_ARCH"-ibm-aix"$IBM_REV" exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; - ibmrt:4.4BSD:*|romp-ibm:BSD:*) + ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and - echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx @@ -633,28 +640,28 @@ EOF echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - case "${UNAME_MACHINE}" in - 9000/31? ) HP_ARCH=m68000 ;; - 9000/[34]?? ) HP_ARCH=m68k ;; + HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` + case "$UNAME_MACHINE" in + 9000/31?) HP_ARCH=m68000 ;; + 9000/[34]??) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if [ -x /usr/bin/getconf ]; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` - case "${sc_cpu_version}" in + case "$sc_cpu_version" in 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 - case "${sc_kernel_bits}" in + case "$sc_kernel_bits" in 32) HP_ARCH=hppa2.0n ;; 64) HP_ARCH=hppa2.0w ;; '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 esac ;; esac fi - if [ "${HP_ARCH}" = "" ]; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + if [ "$HP_ARCH" = "" ]; then + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" #define _HPUX_SOURCE #include @@ -687,13 +694,13 @@ EOF exit (0); } EOF - (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=`"$dummy"` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac - if [ ${HP_ARCH} = hppa2.0w ] + if [ "$HP_ARCH" = hppa2.0w ] then - eval $set_cc_for_build + eval "$set_cc_for_build" # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler @@ -712,15 +719,15 @@ EOF HP_ARCH=hppa64 fi fi - echo ${HP_ARCH}-hp-hpux${HPUX_REV} + echo "$HP_ARCH"-hp-hpux"$HPUX_REV" exit ;; ia64:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - echo ia64-hp-hpux${HPUX_REV} + HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux"$HPUX_REV" exit ;; 3050*:HI-UX:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" #include int main () @@ -745,11 +752,11 @@ EOF exit (0); } EOF - $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; - 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) @@ -758,7 +765,7 @@ EOF *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; - hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) @@ -766,9 +773,9 @@ EOF exit ;; i*86:OSF1:*:*) if [ -x /usr/sbin/sysversion ] ; then - echo ${UNAME_MACHINE}-unknown-osf1mk + echo "$UNAME_MACHINE"-unknown-osf1mk else - echo ${UNAME_MACHINE}-unknown-osf1 + echo "$UNAME_MACHINE"-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) @@ -793,128 +800,109 @@ EOF echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) - echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) - echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) - echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) - echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) - echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) - echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` + FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) - echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE" exit ;; sparc*:BSD/OS:*:*) - echo sparc-unknown-bsdi${UNAME_RELEASE} + echo sparc-unknown-bsdi"$UNAME_RELEASE" exit ;; *:BSD/OS:*:*) - echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE" exit ;; *:FreeBSD:*:*) UNAME_PROCESSOR=`/usr/bin/uname -p` - case ${UNAME_PROCESSOR} in + case "$UNAME_PROCESSOR" in amd64) UNAME_PROCESSOR=x86_64 ;; i386) UNAME_PROCESSOR=i586 ;; esac - echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + echo "$UNAME_PROCESSOR"-unknown-freebsd"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" exit ;; i*:CYGWIN*:*) - echo ${UNAME_MACHINE}-pc-cygwin + echo "$UNAME_MACHINE"-pc-cygwin exit ;; *:MINGW64*:*) - echo ${UNAME_MACHINE}-pc-mingw64 + echo "$UNAME_MACHINE"-pc-mingw64 exit ;; *:MINGW*:*) - echo ${UNAME_MACHINE}-pc-mingw32 + echo "$UNAME_MACHINE"-pc-mingw32 exit ;; *:MSYS*:*) - echo ${UNAME_MACHINE}-pc-msys - exit ;; - i*:windows32*:*) - # uname -m includes "-pc" on this system. - echo ${UNAME_MACHINE}-mingw32 + echo "$UNAME_MACHINE"-pc-msys exit ;; i*:PW*:*) - echo ${UNAME_MACHINE}-pc-pw32 + echo "$UNAME_MACHINE"-pc-pw32 exit ;; *:Interix*:*) - case ${UNAME_MACHINE} in + case "$UNAME_MACHINE" in x86) - echo i586-pc-interix${UNAME_RELEASE} + echo i586-pc-interix"$UNAME_RELEASE" exit ;; authenticamd | genuineintel | EM64T) - echo x86_64-unknown-interix${UNAME_RELEASE} + echo x86_64-unknown-interix"$UNAME_RELEASE" exit ;; IA64) - echo ia64-unknown-interix${UNAME_RELEASE} + echo ia64-unknown-interix"$UNAME_RELEASE" exit ;; esac ;; - [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) - echo i${UNAME_MACHINE}-pc-mks - exit ;; - 8664:Windows_NT:*) - echo x86_64-pc-mks - exit ;; - i*:Windows_NT*:* | Pentium*:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we - # UNAME_MACHINE based on the output of uname instead of i386? - echo i586-pc-interix - exit ;; i*:UWIN*:*) - echo ${UNAME_MACHINE}-pc-uwin + echo "$UNAME_MACHINE"-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-unknown-cygwin exit ;; - p*:CYGWIN*:*) - echo powerpcle-unknown-cygwin - exit ;; prep*:SunOS:5.*:*) - echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo powerpcle-unknown-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; *:GNU:*:*) # the GNU system - echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + echo "`echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,'`-unknown-$LIBC`echo "$UNAME_RELEASE"|sed -e 's,/.*$,,'`" exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland - echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} + echo "$UNAME_MACHINE-unknown-`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`-$LIBC" exit ;; i*86:Minix:*:*) - echo ${UNAME_MACHINE}-pc-minix + echo "$UNAME_MACHINE"-pc-minix exit ;; aarch64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in @@ -928,63 +916,63 @@ EOF esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC=gnulibc1 ; fi - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; arc:Linux:*:* | arceb:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; arm*:Linux:*:*) - eval $set_cc_for_build + eval "$set_cc_for_build" if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then - echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi else - echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf fi fi exit ;; avr32*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; cris:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-${LIBC} + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" exit ;; crisv32:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-${LIBC} + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" exit ;; e2k:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; frv:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; hexagon:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:Linux:*:*) - echo ${UNAME_MACHINE}-pc-linux-${LIBC} + echo "$UNAME_MACHINE"-pc-linux-"$LIBC" exit ;; ia64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; k1om:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m32r*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m68*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; mips:Linux:*:* | mips64:Linux:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + eval "$set_cc_for_build" + sed 's/^ //' << EOF > "$dummy.c" #undef CPU #undef ${UNAME_MACHINE} #undef ${UNAME_MACHINE}el @@ -998,70 +986,70 @@ EOF #endif #endif EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` - test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } + eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU'`" + test "x$CPU" != x && { echo "$CPU-unknown-linux-$LIBC"; exit; } ;; mips64el:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; openrisc*:Linux:*:*) - echo or1k-unknown-linux-${LIBC} + echo or1k-unknown-linux-"$LIBC" exit ;; or32:Linux:*:* | or1k*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; padre:Linux:*:*) - echo sparc-unknown-linux-${LIBC} + echo sparc-unknown-linux-"$LIBC" exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) - echo hppa64-unknown-linux-${LIBC} + echo hppa64-unknown-linux-"$LIBC" exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in - PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; - PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; - *) echo hppa-unknown-linux-${LIBC} ;; + PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;; + PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;; + *) echo hppa-unknown-linux-"$LIBC" ;; esac exit ;; ppc64:Linux:*:*) - echo powerpc64-unknown-linux-${LIBC} + echo powerpc64-unknown-linux-"$LIBC" exit ;; ppc:Linux:*:*) - echo powerpc-unknown-linux-${LIBC} + echo powerpc-unknown-linux-"$LIBC" exit ;; ppc64le:Linux:*:*) - echo powerpc64le-unknown-linux-${LIBC} + echo powerpc64le-unknown-linux-"$LIBC" exit ;; ppcle:Linux:*:*) - echo powerpcle-unknown-linux-${LIBC} + echo powerpcle-unknown-linux-"$LIBC" exit ;; riscv32:Linux:*:* | riscv64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; s390:Linux:*:* | s390x:Linux:*:*) - echo ${UNAME_MACHINE}-ibm-linux-${LIBC} + echo "$UNAME_MACHINE"-ibm-linux-"$LIBC" exit ;; sh64*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sh*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; tile*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; vax:Linux:*:*) - echo ${UNAME_MACHINE}-dec-linux-${LIBC} + echo "$UNAME_MACHINE"-dec-linux-"$LIBC" exit ;; x86_64:Linux:*:*) - echo ${UNAME_MACHINE}-pc-linux-${LIBC} + echo "$UNAME_MACHINE"-pc-linux-"$LIBC" exit ;; xtensa*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. @@ -1075,34 +1063,34 @@ EOF # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. - echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION" exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. - echo ${UNAME_MACHINE}-pc-os2-emx + echo "$UNAME_MACHINE"-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) - echo ${UNAME_MACHINE}-unknown-stop + echo "$UNAME_MACHINE"-unknown-stop exit ;; i*86:atheos:*:*) - echo ${UNAME_MACHINE}-unknown-atheos + echo "$UNAME_MACHINE"-unknown-atheos exit ;; i*86:syllable:*:*) - echo ${UNAME_MACHINE}-pc-syllable + echo "$UNAME_MACHINE"-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) - echo i386-unknown-lynxos${UNAME_RELEASE} + echo i386-unknown-lynxos"$UNAME_RELEASE" exit ;; i*86:*DOS:*:*) - echo ${UNAME_MACHINE}-pc-msdosdjgpp + echo "$UNAME_MACHINE"-pc-msdosdjgpp exit ;; - i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) - UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + i*86:*:4.*:*) + UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then - echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL" else - echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL" fi exit ;; i*86:*:5:[678]*) @@ -1112,12 +1100,12 @@ EOF *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac - echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}{$UNAME_VERSION}" exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 @@ -1127,9 +1115,9 @@ EOF && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 - echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL" else - echo ${UNAME_MACHINE}-pc-sysv32 + echo "$UNAME_MACHINE"-pc-sysv32 fi exit ;; pc:*:*:*) @@ -1149,9 +1137,9 @@ EOF exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then - echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. - echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) @@ -1171,9 +1159,9 @@ EOF test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; @@ -1182,28 +1170,28 @@ EOF test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) - echo m68k-unknown-lynxos${UNAME_RELEASE} + echo m68k-unknown-lynxos"$UNAME_RELEASE" exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) - echo sparc-unknown-lynxos${UNAME_RELEASE} + echo sparc-unknown-lynxos"$UNAME_RELEASE" exit ;; rs6000:LynxOS:2.*:*) - echo rs6000-unknown-lynxos${UNAME_RELEASE} + echo rs6000-unknown-lynxos"$UNAME_RELEASE" exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) - echo powerpc-unknown-lynxos${UNAME_RELEASE} + echo powerpc-unknown-lynxos"$UNAME_RELEASE" exit ;; SM[BE]S:UNIX_SV:*:*) - echo mips-dde-sysv${UNAME_RELEASE} + echo mips-dde-sysv"$UNAME_RELEASE" exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 @@ -1214,7 +1202,7 @@ EOF *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` - echo ${UNAME_MACHINE}-sni-sysv4 + echo "$UNAME_MACHINE"-sni-sysv4 else echo ns32k-sni-sysv fi @@ -1234,23 +1222,23 @@ EOF exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. - echo ${UNAME_MACHINE}-stratus-vos + echo "$UNAME_MACHINE"-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) - echo m68k-apple-aux${UNAME_RELEASE} + echo m68k-apple-aux"$UNAME_RELEASE" exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if [ -d /usr/nec ]; then - echo mips-nec-sysv${UNAME_RELEASE} + echo mips-nec-sysv"$UNAME_RELEASE" else - echo mips-unknown-sysv${UNAME_RELEASE} + echo mips-unknown-sysv"$UNAME_RELEASE" fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. @@ -1269,49 +1257,56 @@ EOF echo x86_64-unknown-haiku exit ;; SX-4:SUPER-UX:*:*) - echo sx4-nec-superux${UNAME_RELEASE} + echo sx4-nec-superux"$UNAME_RELEASE" exit ;; SX-5:SUPER-UX:*:*) - echo sx5-nec-superux${UNAME_RELEASE} + echo sx5-nec-superux"$UNAME_RELEASE" exit ;; SX-6:SUPER-UX:*:*) - echo sx6-nec-superux${UNAME_RELEASE} + echo sx6-nec-superux"$UNAME_RELEASE" exit ;; SX-7:SUPER-UX:*:*) - echo sx7-nec-superux${UNAME_RELEASE} + echo sx7-nec-superux"$UNAME_RELEASE" exit ;; SX-8:SUPER-UX:*:*) - echo sx8-nec-superux${UNAME_RELEASE} + echo sx8-nec-superux"$UNAME_RELEASE" exit ;; SX-8R:SUPER-UX:*:*) - echo sx8r-nec-superux${UNAME_RELEASE} + echo sx8r-nec-superux"$UNAME_RELEASE" exit ;; SX-ACE:SUPER-UX:*:*) - echo sxace-nec-superux${UNAME_RELEASE} + echo sxace-nec-superux"$UNAME_RELEASE" exit ;; Power*:Rhapsody:*:*) - echo powerpc-apple-rhapsody${UNAME_RELEASE} + echo powerpc-apple-rhapsody"$UNAME_RELEASE" exit ;; *:Rhapsody:*:*) - echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE" exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown - eval $set_cc_for_build + eval "$set_cc_for_build" if test "$UNAME_PROCESSOR" = unknown ; then UNAME_PROCESSOR=powerpc fi - if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then + if test "`echo "$UNAME_RELEASE" | sed -e 's/\..*//'`" -le 10 ; then if [ "$CC_FOR_BUILD" != no_compiler_found ]; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null then case $UNAME_PROCESSOR in i386) UNAME_PROCESSOR=x86_64 ;; powerpc) UNAME_PROCESSOR=powerpc64 ;; esac fi + # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc + if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_PPC >/dev/null + then + UNAME_PROCESSOR=powerpc + fi fi elif test "$UNAME_PROCESSOR" = i386 ; then # Avoid executing cc on OS X 10.9, as it ships with a stub @@ -1322,7 +1317,7 @@ EOF # that Apple uses in portable devices. UNAME_PROCESSOR=x86_64 fi - echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE" exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` @@ -1330,22 +1325,25 @@ EOF UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi - echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE" exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; NEO-*:NONSTOP_KERNEL:*:*) - echo neo-tandem-nsk${UNAME_RELEASE} + echo neo-tandem-nsk"$UNAME_RELEASE" exit ;; NSE-*:NONSTOP_KERNEL:*:*) - echo nse-tandem-nsk${UNAME_RELEASE} + echo nse-tandem-nsk"$UNAME_RELEASE" exit ;; NSR-*:NONSTOP_KERNEL:*:*) - echo nsr-tandem-nsk${UNAME_RELEASE} + echo nsr-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSV-*:NONSTOP_KERNEL:*:*) + echo nsv-tandem-nsk"$UNAME_RELEASE" exit ;; NSX-*:NONSTOP_KERNEL:*:*) - echo nsx-tandem-nsk${UNAME_RELEASE} + echo nsx-tandem-nsk"$UNAME_RELEASE" exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux @@ -1354,7 +1352,7 @@ EOF echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) - echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE" exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 @@ -1365,7 +1363,7 @@ EOF else UNAME_MACHINE="$cputype" fi - echo ${UNAME_MACHINE}-unknown-plan9 + echo "$UNAME_MACHINE"-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 @@ -1386,14 +1384,14 @@ EOF echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) - echo mips-sei-seiux${UNAME_RELEASE} + echo mips-sei-seiux"$UNAME_RELEASE" exit ;; *:DragonFly:*:*) - echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + echo "$UNAME_MACHINE"-unknown-dragonfly"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" exit ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` - case "${UNAME_MACHINE}" in + case "$UNAME_MACHINE" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; @@ -1402,32 +1400,44 @@ EOF echo i386-pc-xenix exit ;; i*86:skyos:*:*) - echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'` + echo "$UNAME_MACHINE"-pc-skyos"`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'`" exit ;; i*86:rdos:*:*) - echo ${UNAME_MACHINE}-pc-rdos + echo "$UNAME_MACHINE"-pc-rdos exit ;; i*86:AROS:*:*) - echo ${UNAME_MACHINE}-pc-aros + echo "$UNAME_MACHINE"-pc-aros exit ;; x86_64:VMkernel:*:*) - echo ${UNAME_MACHINE}-unknown-esx + echo "$UNAME_MACHINE"-unknown-esx exit ;; amd64:Isilon\ OneFS:*:*) echo x86_64-unknown-onefs exit ;; esac +echo "$0: unable to guess system type" >&2 + +case "$UNAME_MACHINE:$UNAME_SYSTEM" in + mips:Linux | mips64:Linux) + # If we got here on MIPS GNU/Linux, output extra information. + cat >&2 <&2 </dev/null` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` -UNAME_MACHINE = ${UNAME_MACHINE} -UNAME_RELEASE = ${UNAME_RELEASE} -UNAME_SYSTEM = ${UNAME_SYSTEM} -UNAME_VERSION = ${UNAME_VERSION} +UNAME_MACHINE = "$UNAME_MACHINE" +UNAME_RELEASE = "$UNAME_RELEASE" +UNAME_SYSTEM = "$UNAME_SYSTEM" +UNAME_VERSION = "$UNAME_VERSION" EOF exit 1 # Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) +# eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" diff --git a/config/config.sub b/config/config.sub index 40ea5dfe11..d1f5b54903 100644 --- a/config/config.sub +++ b/config/config.sub @@ -1,8 +1,8 @@ #! /bin/sh # Configuration validation subroutine script. -# Copyright 1992-2017 Free Software Foundation, Inc. +# Copyright 1992-2018 Free Software Foundation, Inc. -timestamp='2017-04-02' +timestamp='2018-05-24' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -15,7 +15,7 @@ timestamp='2017-04-02' # General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program; if not, see . +# along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a @@ -33,7 +33,7 @@ timestamp='2017-04-02' # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub +# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases @@ -57,7 +57,7 @@ Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS Canonicalize a configuration name. -Operation modes: +Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit @@ -67,7 +67,7 @@ Report bugs and patches to ." version="\ GNU config.sub ($timestamp) -Copyright 1992-2017 Free Software Foundation, Inc. +Copyright 1992-2018 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -94,7 +94,7 @@ while test $# -gt 0 ; do *local*) # First pass through any local machine types. - echo $1 + echo "$1" exit ;; * ) @@ -110,134 +110,455 @@ case $# in exit 1;; esac -# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). -# Here we must recognize all the valid KERNEL-OS combinations. -maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` -case $maybe_os in - nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ - linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ - knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ - kopensolaris*-gnu* | cloudabi*-eabi* | \ - storm-chaos* | os2-emx* | rtmk-nova*) - os=-$maybe_os - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` - ;; - android-linux) - os=-linux-android - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown - ;; - *) - basic_machine=`echo $1 | sed 's/-[^-]*$//'` - if [ $basic_machine != $1 ] - then os=`echo $1 | sed 's/.*-/-/'` - else os=; fi - ;; -esac +# Split fields of configuration type +IFS="-" read -r field1 field2 field3 field4 <&2 + exit 1 ;; - -ptx*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + *-*-*-*) + basic_machine=$field1-$field2 + os=$field3-$field4 ;; - -windowsnt*) - os=`echo $os | sed -e 's/windowsnt/winnt/'` + *-*-*) + # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two + # parts + maybe_os=$field2-$field3 + case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc \ + | linux-newlib* | linux-musl* | linux-uclibc* | uclinux-uclibc* \ + | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ + | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ + | storm-chaos* | os2-emx* | rtmk-nova*) + basic_machine=$field1 + os=$maybe_os + ;; + android-linux) + basic_machine=$field1-unknown + os=linux-android + ;; + *) + basic_machine=$field1-$field2 + os=$field3 + ;; + esac ;; - -psos*) - os=-psos + *-*) + # Second component is usually, but not always the OS + case $field2 in + # Prevent following clause from handling this valid os + sun*os*) + basic_machine=$field1 + os=$field2 + ;; + # Manufacturers + dec* | mips* | sequent* | encore* | pc532* | sgi* | sony* \ + | att* | 7300* | 3300* | delta* | motorola* | sun[234]* \ + | unicom* | ibm* | next | hp | isi* | apollo | altos* \ + | convergent* | ncr* | news | 32* | 3600* | 3100* | hitachi* \ + | c[123]* | convex* | sun | crds | omron* | dg | ultra | tti* \ + | harris | dolphin | highlevel | gould | cbm | ns | masscomp \ + | apple | axis | knuth | cray | microblaze* \ + | sim | cisco | oki | wec | wrs | winbond) + basic_machine=$field1-$field2 + os= + ;; + *) + basic_machine=$field1 + os=$field2 + ;; + esac ;; - -mint | -mint[0-9]*) - basic_machine=m68k-atari - os=-mint + *) + # Convert single-component short-hands not valid as part of + # multi-component configurations. + case $field1 in + 386bsd) + basic_machine=i386-pc + os=bsd + ;; + a29khif) + basic_machine=a29k-amd + os=udi + ;; + adobe68k) + basic_machine=m68010-adobe + os=scout + ;; + am29k) + basic_machine=a29k-none + os=bsd + ;; + amdahl) + basic_machine=580-amdahl + os=sysv + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=bsd + ;; + aros) + basic_machine=i386-pc + os=aros + ;; + aux) + basic_machine=m68k-apple + os=aux + ;; + balance) + basic_machine=ns32k-sequent + os=dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=linux + ;; + cegcc) + basic_machine=arm-unknown + os=cegcc + ;; + cray) + basic_machine=j90-cray + os=unicos + ;; + craynv) + basic_machine=craynv-cray + os=unicosmp + ;; + delta88) + basic_machine=m88k-motorola + os=sysv3 + ;; + dicos) + basic_machine=i686-pc + os=dicos + ;; + djgpp) + basic_machine=i586-pc + os=msdosdjgpp + ;; + ebmon29k) + basic_machine=a29k-amd + os=ebmon + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=ose + ;; + gmicro) + basic_machine=tron-gmicro + os=sysv + ;; + go32) + basic_machine=i386-pc + os=go32 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=hms + ;; + harris) + basic_machine=m88k-harris + os=sysv3 + ;; + hp300bsd) + basic_machine=m68k-hp + os=bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=hpux + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=proelf + ;; + i386mach) + basic_machine=i386-mach + os=mach + ;; + vsta) + basic_machine=i386-unknown + os=vsta + ;; + isi68 | isi) + basic_machine=m68k-isi + os=sysv + ;; + m68knommu) + basic_machine=m68k-unknown + os=linux + ;; + magnum | m3230) + basic_machine=mips-mips + os=sysv + ;; + merlin) + basic_machine=ns32k-utek + os=sysv + ;; + mingw64) + basic_machine=x86_64-pc + os=mingw64 + ;; + mingw32) + basic_machine=i686-pc + os=mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=mingw32ce + ;; + monitor) + basic_machine=m68k-rom68k + os=coff + ;; + morphos) + basic_machine=powerpc-unknown + os=morphos + ;; + moxiebox) + basic_machine=moxie-unknown + os=moxiebox + ;; + msdos) + basic_machine=i386-pc + os=msdos + ;; + msys) + basic_machine=i686-pc + os=msys + ;; + mvs) + basic_machine=i370-ibm + os=mvs + ;; + nacl) + basic_machine=le32-unknown + os=nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=newsos + ;; + news1000) + basic_machine=m68030-sony + os=newsos + ;; + necv70) + basic_machine=v70-nec + os=sysv + ;; + nh3000) + basic_machine=m68k-harris + os=cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=cxux + ;; + nindy960) + basic_machine=i960-intel + os=nindy + ;; + mon960) + basic_machine=i960-intel + os=mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=nonstopux + ;; + os400) + basic_machine=powerpc-ibm + os=os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=ose + ;; + os68k) + basic_machine=m68k-none + os=os68k + ;; + paragon) + basic_machine=i860-intel + os=osf + ;; + parisc) + basic_machine=hppa-unknown + os=linux + ;; + pw32) + basic_machine=i586-unknown + os=pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + os=rdos + ;; + rdos32) + basic_machine=i386-pc + os=rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=coff + ;; + sa29200) + basic_machine=a29k-amd + os=udi + ;; + sei) + basic_machine=mips-sei + os=seiux + ;; + sps7) + basic_machine=m68k-bull + os=sysv2 + ;; + stratus) + basic_machine=i860-stratus + os=sysv4 + ;; + sun2os3) + basic_machine=m68000-sun + os=sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=solaris2 + ;; + sv1) + basic_machine=sv1-cray + os=unicos + ;; + symmetry) + basic_machine=i386-sequent + os=dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=unicos + ;; + t90) + basic_machine=t90-cray + os=unicos + ;; + toad1) + basic_machine=pdp10-xkl + os=tops20 + ;; + tpf) + basic_machine=s390x-ibm + os=tpf + ;; + udi29k) + basic_machine=a29k-amd + os=udi + ;; + ultra3) + basic_machine=a29k-nyu + os=sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=none + ;; + vaxv) + basic_machine=vax-dec + os=sysv + ;; + vms) + basic_machine=vax-dec + os=vms + ;; + vxworks960) + basic_machine=i960-wrs + os=vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=vxworks + ;; + xbox) + basic_machine=i686-pc + os=mingw32 + ;; + ymp) + basic_machine=ymp-cray + os=unicos + ;; + *) + basic_machine=$1 + os= + ;; + esac ;; esac @@ -252,12 +573,12 @@ case $basic_machine in | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ | am33_2.0 \ | arc | arceb \ - | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv6m | armv[78][arm] \ | avr | avr32 \ | ba \ | be32 | be64 \ | bfin \ - | c4x | c8051 | clipper \ + | c4x | c8051 | clipper | csky \ | d10v | d30v | dlx | dsp16xx \ | e2k | epiphany \ | fido | fr30 | frv | ft32 \ @@ -296,10 +617,11 @@ case $basic_machine in | mt \ | msp430 \ | nds32 | nds32le | nds32be \ + | nfp \ | nios | nios2 | nios2eb | nios2el \ | ns16k | ns32k \ | open8 | or1k | or1knd | or32 \ - | pdp10 | pdp11 | pj | pjl \ + | pdp10 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle \ | pru \ | pyramid \ @@ -316,7 +638,6 @@ case $basic_machine in | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ | visium \ | wasm32 \ - | we32k \ | x86 | xc16x | xstormy16 | xtensa \ | z8k | z80) basic_machine=$basic_machine-unknown @@ -335,20 +656,23 @@ case $basic_machine in ;; m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) basic_machine=$basic_machine-unknown - os=-none + os=${os:-none} ;; - m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65) + ;; + m9s12z | m68hcs12z | hcs12z | s12z) + basic_machine=s12z-unknown + os=${os:-none} ;; ms1) basic_machine=mt-unknown ;; - strongarm | thumb | xscale) basic_machine=arm-unknown ;; xgate) basic_machine=$basic_machine-unknown - os=-none + os=${os:-none} ;; xscaleeb) basic_machine=armeb-unknown @@ -364,11 +688,6 @@ case $basic_machine in i*86 | x86_64) basic_machine=$basic_machine-pc ;; - # Object if more than one company name word. - *-*-*) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; # Recognize the basic CPU types with company name. 580-* \ | a29k-* \ @@ -382,7 +701,7 @@ case $basic_machine in | be32-* | be64-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* \ - | c8051-* | clipper-* | craynv-* | cydra-* \ + | c8051-* | clipper-* | craynv-* | csky-* | cydra-* \ | d10v-* | d30v-* | dlx-* \ | e2k-* | elxsi-* \ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ @@ -423,6 +742,7 @@ case $basic_machine in | mt-* \ | msp430-* \ | nds32-* | nds32le-* | nds32be-* \ + | nfp-* \ | nios-* | nios2-* | nios2eb-* | nios2el-* \ | none-* | np1-* | ns16k-* | ns32k-* \ | open8-* \ @@ -460,141 +780,77 @@ case $basic_machine in ;; # Recognize the various machine names and aliases which stand # for a CPU type and a company and sometimes even an OS. - 386bsd) - basic_machine=i386-unknown - os=-bsd - ;; 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) basic_machine=m68000-att ;; 3b*) basic_machine=we32k-att ;; - a29khif) - basic_machine=a29k-amd - os=-udi - ;; abacus) basic_machine=abacus-unknown ;; - adobe68k) - basic_machine=m68010-adobe - os=-scout - ;; alliant | fx80) basic_machine=fx80-alliant ;; altos | altos3068) basic_machine=m68k-altos ;; - am29k) - basic_machine=a29k-none - os=-bsd - ;; amd64) basic_machine=x86_64-pc ;; amd64-*) - basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - amdahl) - basic_machine=580-amdahl - os=-sysv + basic_machine=x86_64-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; amiga | amiga-*) basic_machine=m68k-unknown ;; - amigaos | amigados) - basic_machine=m68k-unknown - os=-amigaos - ;; - amigaunix | amix) - basic_machine=m68k-unknown - os=-sysv4 - ;; - apollo68) - basic_machine=m68k-apollo - os=-sysv - ;; - apollo68bsd) - basic_machine=m68k-apollo - os=-bsd - ;; - aros) - basic_machine=i386-pc - os=-aros - ;; asmjs) basic_machine=asmjs-unknown ;; - aux) - basic_machine=m68k-apple - os=-aux - ;; - balance) - basic_machine=ns32k-sequent - os=-dynix - ;; - blackfin) - basic_machine=bfin-unknown - os=-linux - ;; blackfin-*) - basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux + basic_machine=bfin-`echo "$basic_machine" | sed 's/^[^-]*-//'` + os=linux ;; bluegene*) basic_machine=powerpc-ibm - os=-cnk + os=cnk ;; c54x-*) - basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=tic54x-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; c55x-*) - basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=tic55x-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; c6x-*) - basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=tic6x-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; c90) basic_machine=c90-cray - os=-unicos - ;; - cegcc) - basic_machine=arm-unknown - os=-cegcc + os=${os:-unicos} ;; convex-c1) basic_machine=c1-convex - os=-bsd + os=bsd ;; convex-c2) basic_machine=c2-convex - os=-bsd + os=bsd ;; convex-c32) basic_machine=c32-convex - os=-bsd + os=bsd ;; convex-c34) basic_machine=c34-convex - os=-bsd + os=bsd ;; convex-c38) basic_machine=c38-convex - os=-bsd - ;; - cray | j90) - basic_machine=j90-cray - os=-unicos - ;; - craynv) - basic_machine=craynv-cray - os=-unicosmp + os=bsd ;; cr16 | cr16-*) basic_machine=cr16-unknown - os=-elf + os=${os:-elf} ;; crds | unos) basic_machine=m68k-crds @@ -607,7 +863,7 @@ case $basic_machine in ;; crx) basic_machine=crx-unknown - os=-elf + os=${os:-elf} ;; da30 | da30-*) basic_machine=m68k-da30 @@ -617,58 +873,38 @@ case $basic_machine in ;; decsystem10* | dec10*) basic_machine=pdp10-dec - os=-tops10 + os=tops10 ;; decsystem20* | dec20*) basic_machine=pdp10-dec - os=-tops20 + os=tops20 ;; delta | 3300 | motorola-3300 | motorola-delta \ | 3300-motorola | delta-motorola) basic_machine=m68k-motorola ;; - delta88) - basic_machine=m88k-motorola - os=-sysv3 - ;; - dicos) - basic_machine=i686-pc - os=-dicos - ;; - djgpp) - basic_machine=i586-pc - os=-msdosdjgpp - ;; dpx20 | dpx20-*) basic_machine=rs6000-bull - os=-bosx + os=${os:-bosx} ;; - dpx2* | dpx2*-bull) + dpx2*) basic_machine=m68k-bull - os=-sysv3 + os=sysv3 ;; e500v[12]) basic_machine=powerpc-unknown os=$os"spe" ;; e500v[12]-*) - basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'` os=$os"spe" ;; - ebmon29k) - basic_machine=a29k-amd - os=-ebmon - ;; - elxsi) - basic_machine=elxsi-elxsi - os=-bsd - ;; encore | umax | mmax) basic_machine=ns32k-encore ;; - es1800 | OSE68k | ose68k | ose | OSE) - basic_machine=m68k-ericsson - os=-ose + elxsi) + basic_machine=elxsi-elxsi + os=${os:-bsd} ;; fx2800) basic_machine=i860-alliant @@ -676,45 +912,13 @@ case $basic_machine in genix) basic_machine=ns32k-ns ;; - gmicro) - basic_machine=tron-gmicro - os=-sysv - ;; - go32) - basic_machine=i386-pc - os=-go32 - ;; h3050r* | hiux*) basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - h8300hms) - basic_machine=h8300-hitachi - os=-hms - ;; - h8300xray) - basic_machine=h8300-hitachi - os=-xray - ;; - h8500hms) - basic_machine=h8500-hitachi - os=-hms - ;; - harris) - basic_machine=m88k-harris - os=-sysv3 + os=hiuxwe2 ;; hp300-*) basic_machine=m68k-hp ;; - hp300bsd) - basic_machine=m68k-hp - os=-bsd - ;; - hp300hpux) - basic_machine=m68k-hp - os=-hpux - ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) basic_machine=hppa1.0-hp ;; @@ -744,200 +948,82 @@ case $basic_machine in hp9k8[0-9][0-9] | hp8[0-9][0-9]) basic_machine=hppa1.0-hp ;; - hppa-next) - os=-nextstep3 - ;; - hppaosf) - basic_machine=hppa1.1-hp - os=-osf - ;; - hppro) - basic_machine=hppa1.1-hp - os=-proelf - ;; i370-ibm* | ibm*) basic_machine=i370-ibm ;; i*86v32) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv32 + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` + os=sysv32 ;; i*86v4*) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv4 + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` + os=sysv4 ;; i*86v) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` + os=sysv ;; i*86sol2) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-solaris2 - ;; - i386mach) - basic_machine=i386-mach - os=-mach + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` + os=solaris2 ;; - i386-vsta | vsta) - basic_machine=i386-unknown - os=-vsta + j90 | j90-cray) + basic_machine=j90-cray + os=${os:-unicos} ;; iris | iris4d) basic_machine=mips-sgi case $os in - -irix*) + irix*) ;; *) - os=-irix4 + os=irix4 ;; esac ;; - isi68 | isi) - basic_machine=m68k-isi - os=-sysv - ;; leon-*|leon[3-9]-*) - basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'` - ;; - m68knommu) - basic_machine=m68k-unknown - os=-linux + basic_machine=sparc-`echo "$basic_machine" | sed 's/-.*//'` ;; m68knommu-*) - basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - m88k-omron*) - basic_machine=m88k-omron - ;; - magnum | m3230) - basic_machine=mips-mips - os=-sysv - ;; - merlin) - basic_machine=ns32k-utek - os=-sysv + basic_machine=m68k-`echo "$basic_machine" | sed 's/^[^-]*-//'` + os=linux ;; microblaze*) basic_machine=microblaze-xilinx ;; - mingw64) - basic_machine=x86_64-pc - os=-mingw64 - ;; - mingw32) - basic_machine=i686-pc - os=-mingw32 - ;; - mingw32ce) - basic_machine=arm-unknown - os=-mingw32ce - ;; miniframe) basic_machine=m68000-convergent ;; - *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*) basic_machine=m68k-atari - os=-mint + os=mint ;; mips3*-*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'` ;; mips3*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown - ;; - monitor) - basic_machine=m68k-rom68k - os=-coff - ;; - morphos) - basic_machine=powerpc-unknown - os=-morphos - ;; - moxiebox) - basic_machine=moxie-unknown - os=-moxiebox - ;; - msdos) - basic_machine=i386-pc - os=-msdos + basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'`-unknown ;; ms1-*) - basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` - ;; - msys) - basic_machine=i686-pc - os=-msys - ;; - mvs) - basic_machine=i370-ibm - os=-mvs - ;; - nacl) - basic_machine=le32-unknown - os=-nacl - ;; - ncr3000) - basic_machine=i486-ncr - os=-sysv4 - ;; - netbsd386) - basic_machine=i386-unknown - os=-netbsd - ;; - netwinder) - basic_machine=armv4l-rebel - os=-linux - ;; - news | news700 | news800 | news900) - basic_machine=m68k-sony - os=-newsos - ;; - news1000) - basic_machine=m68030-sony - os=-newsos + basic_machine=`echo "$basic_machine" | sed -e 's/ms1-/mt-/'` ;; news-3600 | risc-news) basic_machine=mips-sony - os=-newsos + os=newsos ;; - necv70) - basic_machine=v70-nec - os=-sysv - ;; - next | m*-next ) + next | m*-next) basic_machine=m68k-next case $os in - -nextstep* ) + nextstep* ) ;; - -ns2*) - os=-nextstep2 + ns2*) + os=nextstep2 ;; *) - os=-nextstep3 + os=nextstep3 ;; esac ;; - nh3000) - basic_machine=m68k-harris - os=-cxux - ;; - nh[45]000) - basic_machine=m88k-harris - os=-cxux - ;; - nindy960) - basic_machine=i960-intel - os=-nindy - ;; - mon960) - basic_machine=i960-intel - os=-mon960 - ;; - nonstopux) - basic_machine=mips-compaq - os=-nonstopux - ;; np1) basic_machine=np1-gould ;; @@ -950,43 +1036,26 @@ case $basic_machine in nsr-tandem) basic_machine=nsr-tandem ;; + nsv-tandem) + basic_machine=nsv-tandem + ;; nsx-tandem) basic_machine=nsx-tandem ;; op50n-* | op60c-*) basic_machine=hppa1.1-oki - os=-proelf + os=proelf ;; openrisc | openrisc-*) basic_machine=or32-unknown ;; - os400) - basic_machine=powerpc-ibm - os=-os400 - ;; - OSE68000 | ose68000) - basic_machine=m68000-ericsson - os=-ose - ;; - os68k) - basic_machine=m68k-none - os=-os68k - ;; pa-hitachi) basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - paragon) - basic_machine=i860-intel - os=-osf - ;; - parisc) - basic_machine=hppa-unknown - os=-linux + os=hiuxwe2 ;; parisc-*) - basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux + basic_machine=hppa-`echo "$basic_machine" | sed 's/^[^-]*-//'` + os=linux ;; pbd) basic_machine=sparc-tti @@ -1001,7 +1070,7 @@ case $basic_machine in basic_machine=i386-pc ;; pc98-*) - basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i386-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentium | p5 | k5 | k6 | nexgen | viac3) basic_machine=i586-pc @@ -1016,16 +1085,16 @@ case $basic_machine in basic_machine=i786-pc ;; pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) - basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i586-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentiumpro-* | p6-* | 6x86-* | athlon-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentium4-*) - basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i786-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pn) basic_machine=pn-gould @@ -1035,43 +1104,27 @@ case $basic_machine in ppc | ppcbe) basic_machine=powerpc-unknown ;; ppc-* | ppcbe-*) - basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; ppcle | powerpclittle) basic_machine=powerpcle-unknown ;; ppcle-* | powerpclittle-*) - basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=powerpcle-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; ppc64) basic_machine=powerpc64-unknown ;; - ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ppc64-*) basic_machine=powerpc64-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; ppc64le | powerpc64little) basic_machine=powerpc64le-unknown ;; ppc64le-* | powerpc64little-*) - basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=powerpc64le-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; ps2) basic_machine=i386-ibm ;; - pw32) - basic_machine=i586-unknown - os=-pw32 - ;; - rdos | rdos64) - basic_machine=x86_64-pc - os=-rdos - ;; - rdos32) - basic_machine=i386-pc - os=-rdos - ;; - rom68k) - basic_machine=m68k-rom68k - os=-coff - ;; rm[46]00) basic_machine=mips-siemens ;; @@ -1084,10 +1137,6 @@ case $basic_machine in s390x | s390x-*) basic_machine=s390x-ibm ;; - sa29200) - basic_machine=a29k-amd - os=-udi - ;; sb1) basic_machine=mipsisa64sb1-unknown ;; @@ -1096,32 +1145,17 @@ case $basic_machine in ;; sde) basic_machine=mipsisa32-sde - os=-elf - ;; - sei) - basic_machine=mips-sei - os=-seiux + os=${os:-elf} ;; sequent) basic_machine=i386-sequent ;; - sh) - basic_machine=sh-hitachi - os=-hms - ;; sh5el) basic_machine=sh5le-unknown ;; - sh64) - basic_machine=sh64-unknown - ;; - sparclite-wrs | simso-wrs) + simso-wrs) basic_machine=sparclite-wrs - os=-vxworks - ;; - sps7) - basic_machine=m68k-bull - os=-sysv2 + os=vxworks ;; spur) basic_machine=spur-unknown @@ -1129,44 +1163,12 @@ case $basic_machine in st2000) basic_machine=m68k-tandem ;; - stratus) - basic_machine=i860-stratus - os=-sysv4 - ;; strongarm-* | thumb-*) - basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=arm-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; sun2) basic_machine=m68000-sun ;; - sun2os3) - basic_machine=m68000-sun - os=-sunos3 - ;; - sun2os4) - basic_machine=m68000-sun - os=-sunos4 - ;; - sun3os3) - basic_machine=m68k-sun - os=-sunos3 - ;; - sun3os4) - basic_machine=m68k-sun - os=-sunos4 - ;; - sun4os3) - basic_machine=sparc-sun - os=-sunos3 - ;; - sun4os4) - basic_machine=sparc-sun - os=-sunos4 - ;; - sun4sol2) - basic_machine=sparc-sun - os=-solaris2 - ;; sun3 | sun3-*) basic_machine=m68k-sun ;; @@ -1176,25 +1178,9 @@ case $basic_machine in sun386 | sun386i | roadrunner) basic_machine=i386-sun ;; - sv1) - basic_machine=sv1-cray - os=-unicos - ;; - symmetry) - basic_machine=i386-sequent - os=-dynix - ;; - t3e) - basic_machine=alphaev5-cray - os=-unicos - ;; - t90) - basic_machine=t90-cray - os=-unicos - ;; tile*) basic_machine=$basic_machine-unknown - os=-linux-gnu + os=linux-gnu ;; tx39) basic_machine=mipstx39-unknown @@ -1202,88 +1188,32 @@ case $basic_machine in tx39el) basic_machine=mipstx39el-unknown ;; - toad1) - basic_machine=pdp10-xkl - os=-tops20 - ;; tower | tower-32) basic_machine=m68k-ncr ;; - tpf) - basic_machine=s390x-ibm - os=-tpf - ;; - udi29k) - basic_machine=a29k-amd - os=-udi - ;; - ultra3) - basic_machine=a29k-nyu - os=-sym1 - ;; - v810 | necv810) - basic_machine=v810-nec - os=-none - ;; - vaxv) - basic_machine=vax-dec - os=-sysv - ;; - vms) - basic_machine=vax-dec - os=-vms - ;; vpp*|vx|vx-*) basic_machine=f301-fujitsu ;; - vxworks960) - basic_machine=i960-wrs - os=-vxworks - ;; - vxworks68) - basic_machine=m68k-wrs - os=-vxworks - ;; - vxworks29k) - basic_machine=a29k-wrs - os=-vxworks - ;; - wasm32) - basic_machine=wasm32-unknown - ;; w65*) basic_machine=w65-wdc - os=-none + os=none ;; w89k-*) basic_machine=hppa1.1-winbond - os=-proelf + os=proelf ;; - xbox) - basic_machine=i686-pc - os=-mingw32 + x64) + basic_machine=x86_64-pc ;; xps | xps100) basic_machine=xps100-honeywell ;; xscale-* | xscalee[bl]-*) - basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` - ;; - ymp) - basic_machine=ymp-cray - os=-unicos - ;; - z8k-*-coff) - basic_machine=z8k-unknown - os=-sim - ;; - z80-*-coff) - basic_machine=z80-unknown - os=-sim + basic_machine=`echo "$basic_machine" | sed 's/^xscale/arm/'` ;; none) basic_machine=none-none - os=-none + os=${os:-none} ;; # Here we handle the default manufacturer of certain CPU types. It is in @@ -1309,10 +1239,6 @@ case $basic_machine in vax) basic_machine=vax-dec ;; - pdp10) - # there are many clones, so DEC is not a safe bet - basic_machine=pdp10-unknown - ;; pdp11) basic_machine=pdp11-dec ;; @@ -1322,9 +1248,6 @@ case $basic_machine in sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) basic_machine=sh-unknown ;; - sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) - basic_machine=sparc-sun - ;; cydra) basic_machine=cydra-cydrome ;; @@ -1344,7 +1267,7 @@ case $basic_machine in # Make sure to match an already-canonicalized machine name. ;; *) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + echo Invalid configuration \`"$1"\': machine \`"$basic_machine"\' not recognized 1>&2 exit 1 ;; esac @@ -1352,10 +1275,10 @@ esac # Here we canonicalize certain aliases for manufacturers. case $basic_machine in *-digital*) - basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + basic_machine=`echo "$basic_machine" | sed 's/digital.*/dec/'` ;; *-commodore*) - basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + basic_machine=`echo "$basic_machine" | sed 's/commodore.*/cbm/'` ;; *) ;; @@ -1363,200 +1286,246 @@ esac # Decode manufacturer-specific aliases for certain operating systems. -if [ x"$os" != x"" ] +if [ x$os != x ] then case $os in - # First match some system type aliases - # that might get confused with valid system types. - # -solaris* is a basic system type, with this one exception. - -auroraux) - os=-auroraux + # First match some system type aliases that might get confused + # with valid system types. + # solaris* is a basic system type, with this one exception. + auroraux) + os=auroraux ;; - -solaris1 | -solaris1.*) - os=`echo $os | sed -e 's|solaris1|sunos4|'` + bluegene*) + os=cnk ;; - -solaris) - os=-solaris2 + solaris1 | solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` ;; - -svr4*) - os=-sysv4 + solaris) + os=solaris2 ;; - -unixware*) - os=-sysv4.2uw + unixware*) + os=sysv4.2uw ;; - -gnu/linux*) + gnu/linux*) os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` ;; - # First accept the basic system types. + # es1800 is here to avoid being matched by es* (a different OS) + es1800*) + os=ose + ;; + # Some version numbers need modification + chorusos*) + os=chorusos + ;; + isc) + os=isc2.2 + ;; + sco6) + os=sco5v6 + ;; + sco5) + os=sco3.2v5 + ;; + sco4) + os=sco3.2v4 + ;; + sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + ;; + sco3.2v[4-9]* | sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + ;; + scout) + # Don't match below + ;; + sco*) + os=sco3.2v2 + ;; + psos*) + os=psos + ;; + # Now accept the basic system types. # The portable systems comes first. - # Each alternative MUST END IN A *, to match a version number. - # -sysv* is not here because it comes later, after sysvr4. - -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ - | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ - | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ - | -sym* | -kopensolaris* | -plan9* \ - | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ - | -aos* | -aros* | -cloudabi* | -sortix* \ - | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ - | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ - | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ - | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ - | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ - | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ - | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ - | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ - | -chorusos* | -chorusrdb* | -cegcc* | -glidix* \ - | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ - | -linux-newlib* | -linux-musl* | -linux-uclibc* \ - | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ - | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ - | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ - | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ - | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ - | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ - | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ - | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ - | -onefs* | -tirtos* | -phoenix* | -fuchsia* | -redox*) + # Each alternative MUST end in a * to match a version number. + # sysv* is not here because it comes later, after sysvr4. + gnu* | bsd* | mach* | minix* | genix* | ultrix* | irix* \ + | *vms* | esix* | aix* | cnk* | sunos | sunos[34]*\ + | hpux* | unos* | osf* | luna* | dgux* | auroraux* | solaris* \ + | sym* | kopensolaris* | plan9* \ + | amigaos* | amigados* | msdos* | newsos* | unicos* | aof* \ + | aos* | aros* | cloudabi* | sortix* \ + | nindy* | vxsim* | vxworks* | ebmon* | hms* | mvs* \ + | clix* | riscos* | uniplus* | iris* | rtu* | xenix* \ + | knetbsd* | mirbsd* | netbsd* \ + | bitrig* | openbsd* | solidbsd* | libertybsd* \ + | ekkobsd* | kfreebsd* | freebsd* | riscix* | lynxos* \ + | bosx* | nextstep* | cxux* | aout* | elf* | oabi* \ + | ptx* | coff* | ecoff* | winnt* | domain* | vsta* \ + | udi* | eabi* | lites* | ieee* | go32* | aux* | hcos* \ + | chorusrdb* | cegcc* | glidix* \ + | cygwin* | msys* | pe* | moss* | proelf* | rtems* \ + | midipix* | mingw32* | mingw64* | linux-gnu* | linux-android* \ + | linux-newlib* | linux-musl* | linux-uclibc* \ + | uxpv* | beos* | mpeix* | udk* | moxiebox* \ + | interix* | uwin* | mks* | rhapsody* | darwin* \ + | openstep* | oskit* | conix* | pw32* | nonstopux* \ + | storm-chaos* | tops10* | tenex* | tops20* | its* \ + | os2* | vos* | palmos* | uclinux* | nucleus* \ + | morphos* | superux* | rtmk* | windiss* \ + | powermax* | dnix* | nx6 | nx7 | sei* | dragonfly* \ + | skyos* | haiku* | rdos* | toppers* | drops* | es* \ + | onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \ + | midnightbsd*) # Remember, each alternative MUST END IN *, to match a version number. ;; - -qnx*) + qnx*) case $basic_machine in x86-* | i*86-*) ;; *) - os=-nto$os + os=nto-$os ;; esac ;; - -nto-qnx*) + hiux*) + os=hiuxwe2 ;; - -nto*) - os=`echo $os | sed -e 's|nto|nto-qnx|'` + nto-qnx*) ;; - -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ - | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ - | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` ;; - -mac*) - os=`echo $os | sed -e 's|mac|macos|'` + sim | xray | os68k* | v88r* \ + | windows* | osx | abug | netware* | os9* \ + | macos* | mpw* | magic* | mmixware* | mon960* | lnews*) ;; - -linux-dietlibc) - os=-linux-dietlibc + linux-dietlibc) + os=linux-dietlibc ;; - -linux*) + linux*) os=`echo $os | sed -e 's|linux|linux-gnu|'` ;; - -sunos5*) - os=`echo $os | sed -e 's|sunos5|solaris2|'` + lynx*178) + os=lynxos178 + ;; + lynx*5) + os=lynxos5 + ;; + lynx*) + os=lynxos ;; - -sunos6*) - os=`echo $os | sed -e 's|sunos6|solaris3|'` + mac*) + os=`echo "$os" | sed -e 's|mac|macos|'` ;; - -opened*) - os=-openedition + opened*) + os=openedition ;; - -os400*) - os=-os400 + os400*) + os=os400 ;; - -wince*) - os=-wince + sunos5*) + os=`echo "$os" | sed -e 's|sunos5|solaris2|'` ;; - -osfrose*) - os=-osfrose + sunos6*) + os=`echo "$os" | sed -e 's|sunos6|solaris3|'` ;; - -osf*) - os=-osf + wince*) + os=wince ;; - -utek*) - os=-bsd + utek*) + os=bsd ;; - -dynix*) - os=-bsd + dynix*) + os=bsd ;; - -acis*) - os=-aos + acis*) + os=aos ;; - -atheos*) - os=-atheos + atheos*) + os=atheos ;; - -syllable*) - os=-syllable + syllable*) + os=syllable ;; - -386bsd) - os=-bsd + 386bsd) + os=bsd ;; - -ctix* | -uts*) - os=-sysv + ctix* | uts*) + os=sysv ;; - -nova*) - os=-rtmk-nova + nova*) + os=rtmk-nova ;; - -ns2 ) - os=-nextstep2 + ns2) + os=nextstep2 ;; - -nsk*) - os=-nsk + nsk*) + os=nsk ;; # Preserve the version number of sinix5. - -sinix5.*) + sinix5.*) os=`echo $os | sed -e 's|sinix|sysv|'` ;; - -sinix*) - os=-sysv4 - ;; - -tpf*) - os=-tpf + sinix*) + os=sysv4 ;; - -triton*) - os=-sysv3 + tpf*) + os=tpf ;; - -oss*) - os=-sysv3 + triton*) + os=sysv3 ;; - -svr4) - os=-sysv4 + oss*) + os=sysv3 ;; - -svr3) - os=-sysv3 + svr4*) + os=sysv4 ;; - -sysvr4) - os=-sysv4 + svr3) + os=sysv3 ;; - # This must come after -sysvr4. - -sysv*) + sysvr4) + os=sysv4 ;; - -ose*) - os=-ose + # This must come after sysvr4. + sysv*) ;; - -es1800*) - os=-ose + ose*) + os=ose ;; - -xenix) - os=-xenix + *mint | mint[0-9]* | *MiNT | MiNT[0-9]*) + os=mint ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - os=-mint + zvmoe) + os=zvmoe ;; - -aros*) - os=-aros + dicos*) + os=dicos ;; - -zvmoe) - os=-zvmoe + pikeos*) + # Until real need of OS specific support for + # particular features comes up, bare metal + # configurations are quite functional. + case $basic_machine in + arm*) + os=eabi + ;; + *) + os=elf + ;; + esac ;; - -dicos*) - os=-dicos + nacl*) ;; - -nacl*) + ios) ;; - -ios) + none) ;; - -none) + *-eabi) ;; *) - # Get rid of the `-' at the beginning of $os. - os=`echo $os | sed 's/[^-]*-//'` - echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + echo Invalid configuration \`"$1"\': system \`"$os"\' not recognized 1>&2 exit 1 ;; esac @@ -1574,179 +1543,179 @@ else case $basic_machine in score-*) - os=-elf + os=elf ;; spu-*) - os=-elf + os=elf ;; *-acorn) - os=-riscix1.2 + os=riscix1.2 ;; arm*-rebel) - os=-linux + os=linux ;; arm*-semi) - os=-aout + os=aout ;; c4x-* | tic4x-*) - os=-coff + os=coff ;; c8051-*) - os=-elf + os=elf + ;; + clipper-intergraph) + os=clix ;; hexagon-*) - os=-elf + os=elf ;; tic54x-*) - os=-coff + os=coff ;; tic55x-*) - os=-coff + os=coff ;; tic6x-*) - os=-coff + os=coff ;; # This must come before the *-dec entry. pdp10-*) - os=-tops20 + os=tops20 ;; pdp11-*) - os=-none + os=none ;; *-dec | vax-*) - os=-ultrix4.2 + os=ultrix4.2 ;; m68*-apollo) - os=-domain + os=domain ;; i386-sun) - os=-sunos4.0.2 + os=sunos4.0.2 ;; m68000-sun) - os=-sunos3 + os=sunos3 ;; m68*-cisco) - os=-aout + os=aout ;; mep-*) - os=-elf + os=elf ;; mips*-cisco) - os=-elf + os=elf ;; mips*-*) - os=-elf + os=elf ;; or32-*) - os=-coff + os=coff ;; *-tti) # must be before sparc entry or we get the wrong os. - os=-sysv3 + os=sysv3 ;; sparc-* | *-sun) - os=-sunos4.1.1 + os=sunos4.1.1 ;; pru-*) - os=-elf + os=elf ;; *-be) - os=-beos - ;; - *-haiku) - os=-haiku + os=beos ;; *-ibm) - os=-aix + os=aix ;; *-knuth) - os=-mmixware + os=mmixware ;; *-wec) - os=-proelf + os=proelf ;; *-winbond) - os=-proelf + os=proelf ;; *-oki) - os=-proelf + os=proelf ;; *-hp) - os=-hpux + os=hpux ;; *-hitachi) - os=-hiux + os=hiux ;; i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) - os=-sysv + os=sysv ;; *-cbm) - os=-amigaos + os=amigaos ;; *-dg) - os=-dgux + os=dgux ;; *-dolphin) - os=-sysv3 + os=sysv3 ;; m68k-ccur) - os=-rtu + os=rtu ;; m88k-omron*) - os=-luna + os=luna ;; - *-next ) - os=-nextstep + *-next) + os=nextstep ;; *-sequent) - os=-ptx + os=ptx ;; *-crds) - os=-unos + os=unos ;; *-ns) - os=-genix + os=genix ;; i370-*) - os=-mvs - ;; - *-next) - os=-nextstep3 + os=mvs ;; *-gould) - os=-sysv + os=sysv ;; *-highlevel) - os=-bsd + os=bsd ;; *-encore) - os=-bsd + os=bsd ;; *-sgi) - os=-irix + os=irix ;; *-siemens) - os=-sysv4 + os=sysv4 ;; *-masscomp) - os=-rtu + os=rtu ;; f30[01]-fujitsu | f700-fujitsu) - os=-uxpv + os=uxpv ;; *-rom68k) - os=-coff + os=coff ;; *-*bug) - os=-coff + os=coff ;; *-apple) - os=-macos + os=macos ;; *-atari*) - os=-mint + os=mint + ;; + *-wrs) + os=vxworks ;; *) - os=-none + os=none ;; esac fi @@ -1757,79 +1726,82 @@ vendor=unknown case $basic_machine in *-unknown) case $os in - -riscix*) + riscix*) vendor=acorn ;; - -sunos*) + sunos*) vendor=sun ;; - -cnk*|-aix*) + cnk*|-aix*) vendor=ibm ;; - -beos*) + beos*) vendor=be ;; - -hpux*) + hpux*) vendor=hp ;; - -mpeix*) + mpeix*) vendor=hp ;; - -hiux*) + hiux*) vendor=hitachi ;; - -unos*) + unos*) vendor=crds ;; - -dgux*) + dgux*) vendor=dg ;; - -luna*) + luna*) vendor=omron ;; - -genix*) + genix*) vendor=ns ;; - -mvs* | -opened*) + clix*) + vendor=intergraph + ;; + mvs* | opened*) vendor=ibm ;; - -os400*) + os400*) vendor=ibm ;; - -ptx*) + ptx*) vendor=sequent ;; - -tpf*) + tpf*) vendor=ibm ;; - -vxsim* | -vxworks* | -windiss*) + vxsim* | vxworks* | windiss*) vendor=wrs ;; - -aux*) + aux*) vendor=apple ;; - -hms*) + hms*) vendor=hitachi ;; - -mpw* | -macos*) + mpw* | macos*) vendor=apple ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + *mint | mint[0-9]* | *MiNT | MiNT[0-9]*) vendor=atari ;; - -vos*) + vos*) vendor=stratus ;; esac - basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + basic_machine=`echo "$basic_machine" | sed "s/unknown/$vendor/"` ;; esac -echo $basic_machine$os +echo "$basic_machine-$os" exit # Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) +# eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" diff --git a/config/docbook.m4 b/config/docbook.m4 index f9307f329e..34b829eade 100644 --- a/config/docbook.m4 +++ b/config/docbook.m4 @@ -1,18 +1,18 @@ # config/docbook.m4 -# PGAC_PROG_NSGMLS -# ---------------- -AC_DEFUN([PGAC_PROG_NSGMLS], -[PGAC_PATH_PROGS(NSGMLS, [onsgmls nsgmls])]) +# PGAC_PATH_XMLLINT +# ----------------- +AC_DEFUN([PGAC_PATH_XMLLINT], +[PGAC_PATH_PROGS(XMLLINT, xmllint)]) # PGAC_CHECK_DOCBOOK(VERSION) # --------------------------- AC_DEFUN([PGAC_CHECK_DOCBOOK], -[AC_REQUIRE([PGAC_PROG_NSGMLS]) -AC_CACHE_CHECK([for DocBook V$1], [pgac_cv_check_docbook], -[cat >conftest.sgml < +[AC_REQUIRE([PGAC_PATH_XMLLINT]) +AC_CACHE_CHECK([for DocBook XML V$1], [pgac_cv_check_docbook], +[cat >conftest.xml < test @@ -27,13 +27,13 @@ EOF pgac_cv_check_docbook=no -if test -n "$NSGMLS"; then - $NSGMLS -s conftest.sgml 1>&AS_MESSAGE_LOG_FD 2>&1 +if test -n "$XMLLINT"; then + $XMLLINT --noout --valid conftest.xml 1>&AS_MESSAGE_LOG_FD 2>&1 if test $? -eq 0; then pgac_cv_check_docbook=yes fi fi -rm -f conftest.sgml]) +rm -f conftest.xml]) have_docbook=$pgac_cv_check_docbook AC_SUBST([have_docbook]) diff --git a/config/llvm.m4 b/config/llvm.m4 new file mode 100644 index 0000000000..926d684ed1 --- /dev/null +++ b/config/llvm.m4 @@ -0,0 +1,109 @@ +# config/llvm.m4 + +# PGAC_LLVM_SUPPORT +# --------------- +# +# Look for the LLVM installation, check that it's new enough, set the +# corresponding LLVM_{CFLAGS,CXXFLAGS,BINPATH} and LDFLAGS +# variables. Also verifies that CLANG is available, to transform C +# into bitcode. +# +AC_DEFUN([PGAC_LLVM_SUPPORT], +[ + AC_REQUIRE([AC_PROG_AWK]) + + AC_ARG_VAR(LLVM_CONFIG, [path to llvm-config command]) + PGAC_PATH_PROGS(LLVM_CONFIG, llvm-config llvm-config-7 llvm-config-6.0 llvm-config-5.0 llvm-config-4.0 llvm-config-3.9) + + # no point continuing if llvm wasn't found + if test -z "$LLVM_CONFIG"; then + AC_MSG_ERROR([llvm-config not found, but required when compiling --with-llvm, specify with LLVM_CONFIG=]) + fi + # check if detected $LLVM_CONFIG is executable + pgac_llvm_version="$($LLVM_CONFIG --version 2> /dev/null || echo no)" + if test "x$pgac_llvm_version" = "xno"; then + AC_MSG_ERROR([$LLVM_CONFIG does not work]) + fi + # and whether the version is supported + if echo $pgac_llvm_version | $AWK -F '.' '{ if ([$]1 >= 4 || ([$]1 == 3 && [$]2 >= 9)) exit 1; else exit 0;}';then + AC_MSG_ERROR([$LLVM_CONFIG version is $pgac_llvm_version but at least 3.9 is required]) + fi + + # need clang to create some bitcode files + AC_ARG_VAR(CLANG, [path to clang compiler to generate bitcode]) + PGAC_PATH_PROGS(CLANG, clang clang-7 clang-6.0 clang-5.0 clang-4.0 clang-3.9) + if test -z "$CLANG"; then + AC_MSG_ERROR([clang not found, but required when compiling --with-llvm, specify with CLANG=]) + fi + # make sure clang is executable + if test "x$($CLANG --version 2> /dev/null || echo no)" = "xno"; then + AC_MSG_ERROR([$CLANG does not work]) + fi + # Could check clang version, but it doesn't seem that + # important. Systems with a new enough LLVM version are usually + # going to have a decent clang version too. It's also not entirely + # clear what the minimum version is. + + # Collect compiler flags necessary to build the LLVM dependent + # shared library. + for pgac_option in `$LLVM_CONFIG --cppflags`; do + case $pgac_option in + -I*|-D*) LLVM_CPPFLAGS="$pgac_option $LLVM_CPPFLAGS";; + esac + done + + for pgac_option in `$LLVM_CONFIG --ldflags`; do + case $pgac_option in + -L*) LDFLAGS="$LDFLAGS $pgac_option";; + esac + done + + # ABI influencing options, standard influencing options + for pgac_option in `$LLVM_CONFIG --cxxflags`; do + case $pgac_option in + -fno-rtti*) LLVM_CXXFLAGS="$LLVM_CXXFLAGS $pgac_option";; + -std=*) LLVM_CXXFLAGS="$LLVM_CXXFLAGS $pgac_option";; + esac + done + + # Look for components we're interested in, collect necessary + # libs. As some components are optional, we can't just list all of + # them as it'd raise an error. + pgac_components=''; + for pgac_component in `$LLVM_CONFIG --components`; do + case $pgac_component in + engine) pgac_components="$pgac_components $pgac_component";; + debuginfodwarf) pgac_components="$pgac_components $pgac_component";; + orcjit) pgac_components="$pgac_components $pgac_component";; + passes) pgac_components="$pgac_components $pgac_component";; + perfjitevents) pgac_components="$pgac_components $pgac_component";; + esac + done; + + # And then get the libraries that need to be linked in for the + # selected components. They're large libraries, we only want to + # link them into the LLVM using shared library. + for pgac_option in `$LLVM_CONFIG --libs --system-libs $pgac_components`; do + case $pgac_option in + -l*) LLVM_LIBS="$LLVM_LIBS $pgac_option";; + esac + done + + LLVM_BINPATH=`$LLVM_CONFIG --bindir` + + # Check which functionality is present + SAVE_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $LLVM_CPPFLAGS" + AC_CHECK_DECLS([LLVMOrcGetSymbolAddressIn], [], [], [[#include ]]) + AC_CHECK_DECLS([LLVMGetHostCPUName, LLVMGetHostCPUFeatures], [], [], [[#include ]]) + AC_CHECK_DECLS([LLVMCreateGDBRegistrationListener, LLVMCreatePerfJITEventListener], [], [], [[#include ]]) + CPPFLAGS="$SAVE_CPPFLAGS" + + # LLVM_CONFIG, CLANG are already output via AC_ARG_VAR + AC_SUBST(LLVM_LIBS) + AC_SUBST(LLVM_CPPFLAGS) + AC_SUBST(LLVM_CFLAGS) + AC_SUBST(LLVM_CXXFLAGS) + AC_SUBST(LLVM_BINPATH) + +])# PGAC_LLVM_SUPPORT diff --git a/config/perl.m4 b/config/perl.m4 index 8c21d0fb39..caefb0705e 100644 --- a/config/perl.m4 +++ b/config/perl.m4 @@ -48,19 +48,23 @@ AC_DEFUN([PGAC_CHECK_PERL_CONFIGS], # PGAC_CHECK_PERL_EMBED_CCFLAGS # ----------------------------- -# We selectively extract stuff from $Config{ccflags}. We don't really need -# anything except -D switches, and other sorts of compiler switches can -# actively break things if Perl was compiled with a different compiler. -# Moreover, although Perl likes to put stuff like -D_LARGEFILE_SOURCE and -# -D_FILE_OFFSET_BITS=64 here, it would be fatal to try to compile PL/Perl -# to a different libc ABI than core Postgres uses. The available information -# says that all the symbols that affect Perl's own ABI begin with letters, -# so it should be sufficient to adopt -D switches for symbols not beginning -# with underscore. An exception is that we need to let through -# -D_USE_32BIT_TIME_T if it's present. (We probably could restrict that to -# only get through on Windows, but for the moment we let it through always.) -# For debugging purposes, let's have the configure output report the raw -# ccflags value as well as the set of flags we chose to adopt. +# We selectively extract stuff from $Config{ccflags}. For debugging purposes, +# let's have the configure output report the raw ccflags value as well as the +# set of flags we chose to adopt. We don't really need anything except -D +# switches, and other sorts of compiler switches can actively break things if +# Perl was compiled with a different compiler. Moreover, although Perl likes +# to put stuff like -D_LARGEFILE_SOURCE and -D_FILE_OFFSET_BITS=64 here, it +# would be fatal to try to compile PL/Perl to a different libc ABI than core +# Postgres uses. The available information says that most symbols that affect +# Perl's own ABI begin with letters, so it's almost sufficient to adopt -D +# switches for symbols not beginning with underscore. Some exceptions are the +# Windows-specific -D_USE_32BIT_TIME_T and -D__MINGW_USE_VC2005_COMPAT; see +# Mkvcbuild.pm for details. We absorb the former when Perl reports it. Perl +# never reports the latter, and we don't attempt to deduce when it's needed. +# Consequently, we don't support using MinGW to link to MSVC-built Perl. As +# of 2017, all supported ActivePerl and Strawberry Perl are MinGW-built. If +# that changes or an MSVC-built Perl distribution becomes prominent, we can +# revisit this limitation. AC_DEFUN([PGAC_CHECK_PERL_EMBED_CCFLAGS], [AC_REQUIRE([PGAC_PATH_PERL]) AC_MSG_CHECKING([for CFLAGS recommended by Perl]) @@ -83,12 +87,19 @@ AC_DEFUN([PGAC_CHECK_PERL_EMBED_LDFLAGS], [AC_REQUIRE([PGAC_PATH_PERL]) AC_MSG_CHECKING(for flags to link embedded Perl) if test "$PORTNAME" = "win32" ; then -perl_lib=`basename $perl_archlibexp/CORE/perl[[5-9]]*.lib .lib` -test -e "$perl_archlibexp/CORE/$perl_lib.lib" && perl_embed_ldflags="-L$perl_archlibexp/CORE -l$perl_lib" + perl_lib=`basename $perl_archlibexp/CORE/perl[[5-9]]*.lib .lib` + if test -e "$perl_archlibexp/CORE/$perl_lib.lib"; then + perl_embed_ldflags="-L$perl_archlibexp/CORE -l$perl_lib" + else + perl_lib=`basename $perl_archlibexp/CORE/libperl[[5-9]]*.a .a | sed 's/^lib//'` + if test -e "$perl_archlibexp/CORE/lib$perl_lib.a"; then + perl_embed_ldflags="-L$perl_archlibexp/CORE -l$perl_lib" + fi + fi else -pgac_tmp1=`$PERL -MExtUtils::Embed -e ldopts` -pgac_tmp2=`$PERL -MConfig -e 'print $Config{ccdlflags}'` -perl_embed_ldflags=`echo X"$pgac_tmp1" | sed -e "s/^X//" -e "s%$pgac_tmp2%%" -e ["s/ -arch [-a-zA-Z0-9_]*//g"]` + pgac_tmp1=`$PERL -MExtUtils::Embed -e ldopts` + pgac_tmp2=`$PERL -MConfig -e 'print $Config{ccdlflags}'` + perl_embed_ldflags=`echo X"$pgac_tmp1" | sed -e "s/^X//" -e "s%$pgac_tmp2%%" -e ["s/ -arch [-a-zA-Z0-9_]*//g"]` fi AC_SUBST(perl_embed_ldflags)dnl if test -z "$perl_embed_ldflags" ; then diff --git a/config/python.m4 b/config/python.m4 index f3c7642229..587bca99d5 100644 --- a/config/python.m4 +++ b/config/python.m4 @@ -22,6 +22,17 @@ fi # as well as the Python version. AC_DEFUN([_PGAC_CHECK_PYTHON_DIRS], [AC_REQUIRE([PGAC_PATH_PYTHON]) +python_fullversion=`${PYTHON} -c "import sys; print(sys.version)" | sed q` +AC_MSG_NOTICE([using python $python_fullversion]) +# python_fullversion is typically n.n.n plus some trailing junk +python_majorversion=`echo "$python_fullversion" | sed '[s/^\([0-9]*\).*/\1/]'` +python_minorversion=`echo "$python_fullversion" | sed '[s/^[0-9]*\.\([0-9]*\).*/\1/]'` +python_version=`echo "$python_fullversion" | sed '[s/^\([0-9]*\.[0-9]*\).*/\1/]'` +# Reject unsupported Python versions as soon as practical. +if test "$python_majorversion" -lt 3 -a "$python_minorversion" -lt 4; then + AC_MSG_ERROR([Python version $python_version is too old (version 2.4 or later is required)]) +fi + AC_MSG_CHECKING([for Python distutils module]) if "${PYTHON}" -c 'import distutils' 2>&AS_MESSAGE_LOG_FD then @@ -30,18 +41,11 @@ else AC_MSG_RESULT(no) AC_MSG_ERROR([distutils module not found]) fi + AC_MSG_CHECKING([Python configuration directory]) -python_majorversion=`${PYTHON} -c "import sys; print(sys.version[[0]])"` -python_minorversion=`${PYTHON} -c "import sys; print(sys.version[[2]])"` -python_version=`${PYTHON} -c "import sys; print(sys.version[[:3]])"` python_configdir=`${PYTHON} -c "import distutils.sysconfig; print(' '.join(filter(None,distutils.sysconfig.get_config_vars('LIBPL'))))"` AC_MSG_RESULT([$python_configdir]) -# Reject unsupported Python versions as soon as practical. -if test "$python_majorversion" -lt 3 -a "$python_minorversion" -lt 4; then - AC_MSG_ERROR([Python version $python_version is too old (version 2.4 or later is required)]) -fi - AC_MSG_CHECKING([Python include directories]) python_includespec=`${PYTHON} -c " import distutils.sysconfig diff --git a/config/tcl.m4 b/config/tcl.m4 index a4bf231947..581471f338 100644 --- a/config/tcl.m4 +++ b/config/tcl.m4 @@ -13,6 +13,10 @@ fi # PGAC_PATH_TCLCONFIGSH([SEARCH-PATH]) # ------------------------------------ +# If the user doesn't specify $TCL_CONFIG_SH directly, search for it in +# the list of directories passed as parameter (from --with-tclconfig). +# If no list is given, try the Tcl shell's $auto_path. + AC_DEFUN([PGAC_PATH_TCLCONFIGSH], [AC_REQUIRE([PGAC_PATH_TCLSH])[]dnl AC_BEFORE([$0], [PGAC_PATH_TKCONFIGSH])[]dnl @@ -24,7 +28,14 @@ if test -z "$TCL_CONFIG_SH"; then set X $pgac_test_dirs; shift if test $[#] -eq 0; then test -z "$TCLSH" && AC_MSG_ERROR([unable to locate tclConfig.sh because no Tcl shell was found]) - set X `echo 'puts $auto_path' | $TCLSH`; shift + pgac_test_dirs=`echo 'puts $auto_path' | $TCLSH` + # On newer macOS, $auto_path frequently doesn't include the place + # where tclConfig.sh actually lives. Append that to the end, so as not + # to break cases where a non-default Tcl installation is being used. + if test -d "$PG_SYSROOT/System/Library/Frameworks/Tcl.framework" ; then + pgac_test_dirs="$pgac_test_dirs $PG_SYSROOT/System/Library/Frameworks/Tcl.framework" + fi + set X $pgac_test_dirs; shift fi for pgac_dir do diff --git a/configure b/configure index a2f9a256b4..aa341ddf00 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for PostgreSQL 11devel. +# Generated by GNU Autoconf 2.69 for PostgreSQL 12devel. # # Report bugs to . # @@ -11,7 +11,7 @@ # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. # -# Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Copyright (c) 1996-2018, PostgreSQL Global Development Group ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## @@ -582,8 +582,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='PostgreSQL' PACKAGE_TARNAME='postgresql' -PACKAGE_VERSION='11devel' -PACKAGE_STRING='PostgreSQL 11devel' +PACKAGE_VERSION='12devel' +PACKAGE_STRING='PostgreSQL 12devel' PACKAGE_BUGREPORT='pgsql-bugs@postgresql.org' PACKAGE_URL='' @@ -627,15 +627,14 @@ ac_includes_default="\ ac_subst_vars='LTLIBOBJS vpath_build +PG_SYSROOT PG_VERSION_NUM PROVE FOP -OSX XSLTPROC -XMLLINT DBTOEPUB have_docbook -NSGMLS +XMLLINT TCL_SHLIB_LD_LIBS TCL_SHARED_BUILD TCL_LIB_SPEC @@ -648,9 +647,9 @@ MSGMERGE MSGFMT_FLAGS MSGFMT PG_CRC32C_OBJS +CFLAGS_ARMV8_CRC32C CFLAGS_SSE42 have_win32_dbghelp -HAVE_IPV6 LIBOBJS UUID_LIBS LDAP_LIBS_BE @@ -669,6 +668,7 @@ python_majorversion PYTHON perl_embed_ldflags perl_embed_ccflags +perl_includespec perl_useshrplib perl_privlibexp perl_archlibexp @@ -678,7 +678,6 @@ FLEX BISONFLAGS BISON MKDIR_P -AWK LN_S TAR install_bin @@ -710,7 +709,10 @@ with_uuid with_systemd with_selinux with_openssl +with_ldap +with_krb_srvnam krb_srvtab +with_gssapi with_python with_perl with_tcl @@ -726,8 +728,22 @@ autodepend TAS GCC CPP +BITCODE_CXXFLAGS +BITCODE_CFLAGS CFLAGS_VECTOR +LLVM_BINPATH +LLVM_CXXFLAGS +LLVM_CFLAGS +LLVM_CPPFLAGS +LLVM_LIBS +CLANG +LLVM_CONFIG +AWK +with_llvm SUN_STUDIO_CC +ac_ct_CXX +CXXFLAGS +CXX OBJEXT EXEEXT ac_ct_CC @@ -821,8 +837,8 @@ enable_tap_tests with_blocksize with_segsize with_wal_blocksize -with_wal_segsize with_CC +with_llvm enable_depend enable_cassert enable_thread_safety @@ -861,6 +877,11 @@ CFLAGS LDFLAGS LIBS CPPFLAGS +CXX +CXXFLAGS +CCC +LLVM_CONFIG +CLANG CPP PKG_CONFIG PKG_CONFIG_PATH @@ -1409,7 +1430,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures PostgreSQL 11devel to adapt to many kinds of systems. +\`configure' configures PostgreSQL 12devel to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1474,7 +1495,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of PostgreSQL 11devel:";; + short | recursive ) echo "Configuration of PostgreSQL 12devel:";; esac cat <<\_ACEOF @@ -1518,9 +1539,8 @@ Optional Packages: --with-segsize=SEGSIZE set table segment size in GB [1] --with-wal-blocksize=BLOCKSIZE set WAL block size in kB [8] - --with-wal-segsize=SEGSIZE - set WAL segment size in MB [16] --with-CC=CMD set compiler (deprecated) + --with-llvm build with LLVM based JIT support --with-icu build with ICU support --with-tcl build Tcl modules (PL/Tcl) --with-tclconfig=DIR tclConfig.sh is in DIR @@ -1556,6 +1576,10 @@ Some influential environment variables: LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory + CXX C++ compiler command + CXXFLAGS C++ compiler flags + LLVM_CONFIG path to llvm-config command + CLANG path to clang compiler to generate bitcode CPP C preprocessor PKG_CONFIG path to pkg-config utility PKG_CONFIG_PATH @@ -1633,14 +1657,14 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -PostgreSQL configure 11devel +PostgreSQL configure 12devel generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. -Copyright (c) 1996-2017, PostgreSQL Global Development Group +Copyright (c) 1996-2018, PostgreSQL Global Development Group _ACEOF exit fi @@ -1687,6 +1711,90 @@ fi } # ac_fn_c_try_compile +# ac_fn_cxx_try_compile LINENO +# ---------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_compile + +# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES +# --------------------------------------------- +# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR +# accordingly. +ac_fn_c_check_decl () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + as_decl_name=`echo $2|sed 's/ *(.*//'` + as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 +$as_echo_n "checking whether $as_decl_name is declared... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +#ifndef $as_decl_name +#ifdef __cplusplus + (void) $as_decl_use; +#else + (void) $as_decl_name; +#endif +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_decl + # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. @@ -2001,116 +2109,116 @@ $as_echo "$ac_res" >&6; } } # ac_fn_c_check_func -# ac_fn_c_check_member LINENO AGGR MEMBER VAR INCLUDES -# ---------------------------------------------------- -# Tries to find if the field MEMBER exists in type AGGR, after including -# INCLUDES, setting cache variable VAR accordingly. -ac_fn_c_check_member () +# ac_fn_c_check_type LINENO TYPE VAR INCLUDES +# ------------------------------------------- +# Tests whether TYPE exists after having included INCLUDES, setting cache +# variable VAR accordingly. +ac_fn_c_check_type () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2.$3" >&5 -$as_echo_n "checking for $2.$3... " >&6; } -if eval \${$4+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else + eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -$5 +$4 int main () { -static $2 ac_aggr; -if (ac_aggr.$3) -return 0; +if (sizeof ($2)) + return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - eval "$4=yes" -else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -$5 +$4 int main () { -static $2 ac_aggr; -if (sizeof ac_aggr.$3) -return 0; +if (sizeof (($2))) + return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - eval "$4=yes" + else - eval "$4=no" + eval "$3=yes" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi -eval ac_res=\$$4 +eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno -} # ac_fn_c_check_member +} # ac_fn_c_check_type -# ac_fn_c_check_type LINENO TYPE VAR INCLUDES -# ------------------------------------------- -# Tests whether TYPE exists after having included INCLUDES, setting cache -# variable VAR accordingly. -ac_fn_c_check_type () +# ac_fn_c_check_member LINENO AGGR MEMBER VAR INCLUDES +# ---------------------------------------------------- +# Tries to find if the field MEMBER exists in type AGGR, after including +# INCLUDES, setting cache variable VAR accordingly. +ac_fn_c_check_member () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2.$3" >&5 +$as_echo_n "checking for $2.$3... " >&6; } +if eval \${$4+:} false; then : $as_echo_n "(cached) " >&6 else - eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -$4 +$5 int main () { -if (sizeof ($2)) - return 0; +static $2 ac_aggr; +if (ac_aggr.$3) +return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : + eval "$4=yes" +else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -$4 +$5 int main () { -if (sizeof (($2))) - return 0; +static $2 ac_aggr; +if (sizeof ac_aggr.$3) +return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - + eval "$4=yes" else - eval "$3=yes" + eval "$4=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi -eval ac_res=\$$3 +eval ac_res=\$$4 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno -} # ac_fn_c_check_type +} # ac_fn_c_check_member # ac_fn_c_compute_int LINENO EXPR VAR INCLUDES # -------------------------------------------- @@ -2294,57 +2402,11 @@ rm -f conftest.val as_fn_set_status $ac_retval } # ac_fn_c_compute_int - -# ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES -# --------------------------------------------- -# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR -# accordingly. -ac_fn_c_check_decl () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - as_decl_name=`echo $2|sed 's/ *(.*//'` - as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 -$as_echo_n "checking whether $as_decl_name is declared... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -#ifndef $as_decl_name -#ifdef __cplusplus - (void) $as_decl_use; -#else - (void) $as_decl_name; -#endif -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_decl cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by PostgreSQL $as_me 11devel, which was +It was created by PostgreSQL $as_me 12devel, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -3733,57 +3795,6 @@ cat >>confdefs.h <<_ACEOF _ACEOF -# -# WAL segment size -# -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for WAL segment size" >&5 -$as_echo_n "checking for WAL segment size... " >&6; } - - - -# Check whether --with-wal-segsize was given. -if test "${with_wal_segsize+set}" = set; then : - withval=$with_wal_segsize; - case $withval in - yes) - as_fn_error $? "argument required for --with-wal-segsize option" "$LINENO" 5 - ;; - no) - as_fn_error $? "argument required for --with-wal-segsize option" "$LINENO" 5 - ;; - *) - wal_segsize=$withval - ;; - esac - -else - wal_segsize=16 -fi - - -case ${wal_segsize} in - 1) ;; - 2) ;; - 4) ;; - 8) ;; - 16) ;; - 32) ;; - 64) ;; - 128) ;; - 256) ;; - 512) ;; - 1024) ;; - *) as_fn_error $? "Invalid WAL segment size. Allowed values are 1,2,4,8,16,32,64,128,256,512,1024." "$LINENO" 5 -esac -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${wal_segsize}MB" >&5 -$as_echo "${wal_segsize}MB" >&6; } - - -cat >>confdefs.h <<_ACEOF -#define XLOG_SEG_SIZE (${wal_segsize} * 1024 * 1024) -_ACEOF - - # # C compiler # @@ -3814,8 +3825,8 @@ fi case $template in - aix) pgac_cc_list="gcc xlc";; - *) pgac_cc_list="gcc cc";; + aix) pgac_cc_list="gcc xlc"; pgac_cxx_list="g++ xlC";; + *) pgac_cc_list="gcc cc"; pgac_cxx_list="g++ c++";; esac ac_ext=c @@ -4415,108 +4426,1644 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu - -# Check if it's Intel's compiler, which (usually) pretends to be gcc, -# but has idiosyncrasies of its own. We assume icc will define -# __INTEL_COMPILER regardless of CFLAGS. - + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C99" >&5 +$as_echo_n "checking for $CC option to accept ISO C99... " >&6; } +if ${ac_cv_prog_cc_c99+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c99=no +ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ +#include +#include +#include +#include +#include -int -main () +// Check varargs macros. These examples are taken from C99 6.10.3.5. +#define debug(...) fprintf (stderr, __VA_ARGS__) +#define showlist(...) puts (#__VA_ARGS__) +#define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__)) +static void +test_varargs_macros (void) { -#ifndef __INTEL_COMPILER -choke me + int x = 1234; + int y = 5678; + debug ("Flag"); + debug ("X = %d\n", x); + showlist (The first, second, and third items.); + report (x>y, "x is %d but y is %d", x, y); +} + +// Check long long types. +#define BIG64 18446744073709551615ull +#define BIG32 4294967295ul +#define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0) +#if !BIG_OK + your preprocessor is broken; #endif +#if BIG_OK +#else + your preprocessor is broken; +#endif +static long long int bignum = -9223372036854775807LL; +static unsigned long long int ubignum = BIG64; + +struct incomplete_array +{ + int datasize; + double data[]; +}; + +struct named_init { + int number; + const wchar_t *name; + double average; +}; + +typedef const char *ccp; + +static inline int +test_restrict (ccp restrict text) +{ + // See if C++-style comments work. + // Iterate through items via the restricted pointer. + // Also check for declarations in for loops. + for (unsigned int i = 0; *(text+i) != '\0'; ++i) + continue; + return 0; +} + +// Check varargs and va_copy. +static void +test_varargs (const char *format, ...) +{ + va_list args; + va_start (args, format); + va_list args_copy; + va_copy (args_copy, args); + + const char *str; + int number; + float fnumber; + + while (*format) + { + switch (*format++) + { + case 's': // string + str = va_arg (args_copy, const char *); + break; + case 'd': // int + number = va_arg (args_copy, int); + break; + case 'f': // float + fnumber = va_arg (args_copy, double); + break; + default: + break; + } + } + va_end (args_copy); + va_end (args); +} + +int +main () +{ + + // Check bool. + _Bool success = false; + + // Check restrict. + if (test_restrict ("String literal") == 0) + success = true; + char *restrict newvar = "Another string"; + + // Check varargs. + test_varargs ("s, d' f .", "string", 65, 34.234); + test_varargs_macros (); + + // Check flexible array members. + struct incomplete_array *ia = + malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10)); + ia->datasize = 10; + for (int i = 0; i < ia->datasize; ++i) + ia->data[i] = i * 1.234; + + // Check named initializers. + struct named_init ni = { + .number = 34, + .name = L"Test wide string", + .average = 543.34343, + }; + + ni.number = 58; + + int dynamic_array[ni.number]; + dynamic_array[ni.number - 1] = 543; + + // work around unused variable warnings + return (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == 'x' + || dynamic_array[ni.number - 1] != 543); + + ; + return 0; +} +_ACEOF +for ac_arg in '' -std=gnu99 -std=c99 -c99 -AC99 -D_STDC_C99= -qlanglvl=extc99 +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c99=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c99" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c99" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c99" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5 +$as_echo "$ac_cv_prog_cc_c99" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c99" != xno; then : + +fi + + + +# Error out if the compiler does not support C99, as the codebase +# relies on that. +if test "$ac_cv_prog_cc_c99" = no; then + as_fn_error $? "C compiler \"$CC\" does not support C99" "$LINENO" 5 +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -z "$CXX"; then + if test -n "$CCC"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then + for ac_prog in $pgac_cxx_list + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 +$as_echo "$CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in $pgac_cxx_list +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 +$as_echo "$ac_ct_CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CXX" && break +done + + if test "x$ac_ct_CXX" = x; then + CXX="g++" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CXX=$ac_ct_CXX + fi +fi + + fi +fi +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 +$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } +if ${ac_cv_cxx_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 +$as_echo "$ac_cv_cxx_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GXX=yes +else + GXX= +fi +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 +$as_echo_n "checking whether $CXX accepts -g... " >&6; } +if ${ac_cv_prog_cxx_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +else + CXXFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 +$as_echo "$ac_cv_prog_cxx_g" >&6; } +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +# Check if it's Intel's compiler, which (usually) pretends to be gcc, +# but has idiosyncrasies of its own. We assume icc will define +# __INTEL_COMPILER regardless of CFLAGS. + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __INTEL_COMPILER +choke me +#endif + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ICC=yes +else + ICC=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +# Check if it's Sun Studio compiler. We assume that +# __SUNPRO_C will be defined for Sun Studio compilers +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __SUNPRO_C +choke me +#endif + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + SUN_STUDIO_CC=yes +else + SUN_STUDIO_CC=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + + + +# +# LLVM +# +# Checked early because subsequent tests depend on it. + + + +# Check whether --with-llvm was given. +if test "${with_llvm+set}" = set; then : + withval=$with_llvm; + case $withval in + yes) + +$as_echo "#define USE_LLVM 1" >>confdefs.h + + ;; + no) + : + ;; + *) + as_fn_error $? "no argument expected for --with-llvm option" "$LINENO" 5 + ;; + esac + +else + with_llvm=no + +fi + + + +if test "$with_llvm" = yes ; then + for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AWK+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AWK="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +$as_echo "$AWK" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AWK" && break +done + + + + + + if test -z "$LLVM_CONFIG"; then + for ac_prog in llvm-config llvm-config-7 llvm-config-6.0 llvm-config-5.0 llvm-config-4.0 llvm-config-3.9 +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_LLVM_CONFIG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $LLVM_CONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_LLVM_CONFIG="$LLVM_CONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_LLVM_CONFIG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +LLVM_CONFIG=$ac_cv_path_LLVM_CONFIG +if test -n "$LLVM_CONFIG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LLVM_CONFIG" >&5 +$as_echo "$LLVM_CONFIG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$LLVM_CONFIG" && break +done + +else + # Report the value of LLVM_CONFIG in configure's output in all cases. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LLVM_CONFIG" >&5 +$as_echo_n "checking for LLVM_CONFIG... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LLVM_CONFIG" >&5 +$as_echo "$LLVM_CONFIG" >&6; } +fi + + + # no point continuing if llvm wasn't found + if test -z "$LLVM_CONFIG"; then + as_fn_error $? "llvm-config not found, but required when compiling --with-llvm, specify with LLVM_CONFIG=" "$LINENO" 5 + fi + # check if detected $LLVM_CONFIG is executable + pgac_llvm_version="$($LLVM_CONFIG --version 2> /dev/null || echo no)" + if test "x$pgac_llvm_version" = "xno"; then + as_fn_error $? "$LLVM_CONFIG does not work" "$LINENO" 5 + fi + # and whether the version is supported + if echo $pgac_llvm_version | $AWK -F '.' '{ if ($1 >= 4 || ($1 == 3 && $2 >= 9)) exit 1; else exit 0;}';then + as_fn_error $? "$LLVM_CONFIG version is $pgac_llvm_version but at least 3.9 is required" "$LINENO" 5 + fi + + # need clang to create some bitcode files + + if test -z "$CLANG"; then + for ac_prog in clang clang-7 clang-6.0 clang-5.0 clang-4.0 clang-3.9 +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_CLANG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $CLANG in + [\\/]* | ?:[\\/]*) + ac_cv_path_CLANG="$CLANG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_CLANG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +CLANG=$ac_cv_path_CLANG +if test -n "$CLANG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CLANG" >&5 +$as_echo "$CLANG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CLANG" && break +done + +else + # Report the value of CLANG in configure's output in all cases. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CLANG" >&5 +$as_echo_n "checking for CLANG... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CLANG" >&5 +$as_echo "$CLANG" >&6; } +fi + + if test -z "$CLANG"; then + as_fn_error $? "clang not found, but required when compiling --with-llvm, specify with CLANG=" "$LINENO" 5 + fi + # make sure clang is executable + if test "x$($CLANG --version 2> /dev/null || echo no)" = "xno"; then + as_fn_error $? "$CLANG does not work" "$LINENO" 5 + fi + # Could check clang version, but it doesn't seem that + # important. Systems with a new enough LLVM version are usually + # going to have a decent clang version too. It's also not entirely + # clear what the minimum version is. + + # Collect compiler flags necessary to build the LLVM dependent + # shared library. + for pgac_option in `$LLVM_CONFIG --cppflags`; do + case $pgac_option in + -I*|-D*) LLVM_CPPFLAGS="$pgac_option $LLVM_CPPFLAGS";; + esac + done + + for pgac_option in `$LLVM_CONFIG --ldflags`; do + case $pgac_option in + -L*) LDFLAGS="$LDFLAGS $pgac_option";; + esac + done + + # ABI influencing options, standard influencing options + for pgac_option in `$LLVM_CONFIG --cxxflags`; do + case $pgac_option in + -fno-rtti*) LLVM_CXXFLAGS="$LLVM_CXXFLAGS $pgac_option";; + -std=*) LLVM_CXXFLAGS="$LLVM_CXXFLAGS $pgac_option";; + esac + done + + # Look for components we're interested in, collect necessary + # libs. As some components are optional, we can't just list all of + # them as it'd raise an error. + pgac_components=''; + for pgac_component in `$LLVM_CONFIG --components`; do + case $pgac_component in + engine) pgac_components="$pgac_components $pgac_component";; + debuginfodwarf) pgac_components="$pgac_components $pgac_component";; + orcjit) pgac_components="$pgac_components $pgac_component";; + passes) pgac_components="$pgac_components $pgac_component";; + perfjitevents) pgac_components="$pgac_components $pgac_component";; + esac + done; + + # And then get the libraries that need to be linked in for the + # selected components. They're large libraries, we only want to + # link them into the LLVM using shared library. + for pgac_option in `$LLVM_CONFIG --libs --system-libs $pgac_components`; do + case $pgac_option in + -l*) LLVM_LIBS="$LLVM_LIBS $pgac_option";; + esac + done + + LLVM_BINPATH=`$LLVM_CONFIG --bindir` + + # Check which functionality is present + SAVE_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $LLVM_CPPFLAGS" + ac_fn_c_check_decl "$LINENO" "LLVMOrcGetSymbolAddressIn" "ac_cv_have_decl_LLVMOrcGetSymbolAddressIn" "#include +" +if test "x$ac_cv_have_decl_LLVMOrcGetSymbolAddressIn" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN $ac_have_decl +_ACEOF + + ac_fn_c_check_decl "$LINENO" "LLVMGetHostCPUName" "ac_cv_have_decl_LLVMGetHostCPUName" "#include +" +if test "x$ac_cv_have_decl_LLVMGetHostCPUName" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_LLVMGETHOSTCPUNAME $ac_have_decl +_ACEOF +ac_fn_c_check_decl "$LINENO" "LLVMGetHostCPUFeatures" "ac_cv_have_decl_LLVMGetHostCPUFeatures" "#include +" +if test "x$ac_cv_have_decl_LLVMGetHostCPUFeatures" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_LLVMGETHOSTCPUFEATURES $ac_have_decl +_ACEOF + + ac_fn_c_check_decl "$LINENO" "LLVMCreateGDBRegistrationListener" "ac_cv_have_decl_LLVMCreateGDBRegistrationListener" "#include +" +if test "x$ac_cv_have_decl_LLVMCreateGDBRegistrationListener" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER $ac_have_decl +_ACEOF +ac_fn_c_check_decl "$LINENO" "LLVMCreatePerfJITEventListener" "ac_cv_have_decl_LLVMCreatePerfJITEventListener" "#include +" +if test "x$ac_cv_have_decl_LLVMCreatePerfJITEventListener" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_LLVMCREATEPERFJITEVENTLISTENER $ac_have_decl +_ACEOF + + CPPFLAGS="$SAVE_CPPFLAGS" + + # LLVM_CONFIG, CLANG are already output via AC_ARG_VAR + + + + + + + +fi + + +unset CFLAGS +unset CXXFLAGS + +# +# Read the template +# +. "$srcdir/src/template/$template" || exit + +# C[XX]FLAGS are selected so: +# If the user specifies something in the environment, that is used. +# else: If the template file set something, that is used. +# else: If coverage was enabled, don't set anything. +# else: If the compiler is GCC, then we use -O2. +# else: If the compiler is something else, then we use -O, unless debugging. + +if test "$ac_env_CFLAGS_set" = set; then + CFLAGS=$ac_env_CFLAGS_value +elif test "${CFLAGS+set}" = set; then + : # (keep what template set) +elif test "$enable_coverage" = yes; then + : # no optimization by default +elif test "$GCC" = yes; then + CFLAGS="-O2" +else + # if the user selected debug mode, don't use -O + if test "$enable_debug" != yes; then + CFLAGS="-O" + fi +fi + +if test "$ac_env_CXXFLAGS_set" = set; then + CXXFLAGS=$ac_env_CXXFLAGS_value +elif test "${CXXFLAGS+set}" = set; then + : # (keep what template set) +elif test "$enable_coverage" = yes; then + : # no optimization by default +elif test "$GCC" = yes; then + CXXFLAGS="-O2" +else + # if the user selected debug mode, don't use -O + if test "$enable_debug" != yes; then + CXXFLAGS="-O" + fi +fi + +# When generating bitcode (for inlining) we always want to use -O2 +# even when --enable-debug is specified. The bitcode it's not going to +# be used for line-by-line debugging, and JIT inlining doesn't work +# without at least -O1 (otherwise clang will emit 'noinline' +# attributes everywhere), which is bad for testing. Still allow the +# environment to override if done explicitly. +if test "$ac_env_BITCODE_CFLAGS_set" = set; then + BITCODE_CFLAGS=$ac_env_BITCODE_CFLAGS_value +else + BITCODE_CFLAGS="-O2 $BITCODE_CFLAGS" +fi +if test "$ac_env_BITCODE_CXXFLAGS_set" = set; then + BITCODE_CXXFLAGS=$ac_env_BITCODE_CXXFLAGS_value +else + BITCODE_CXXFLAGS="-O2 $BITCODE_CXXFLAGS" +fi + +# C[XX]FLAGS we determined above will be added back at the end +user_CFLAGS=$CFLAGS +CFLAGS="" +user_CXXFLAGS=$CXXFLAGS +CXXFLAGS="" +user_BITCODE_CFLAGS=$BITCODE_CFLAGS +BITCODE_CFLAGS="" +user_BITCODE_CXXFLAGS=$BITCODE_CXXFLAGS +BITCODE_CXXFLAGS="" + +# set CFLAGS_VECTOR from the environment, if available +if test "$ac_env_CFLAGS_VECTOR_set" = set; then + CFLAGS_VECTOR=$ac_env_CFLAGS_VECTOR_value +fi + +# Some versions of GCC support some additional useful warning flags. +# Check whether they are supported, and add them to CFLAGS if so. +# ICC pretends to be GCC but it's lying; it doesn't support these flags, +# but has its own. Also check other compiler-specific flags here. + +if test "$GCC" = yes -a "$ICC" = no; then + CFLAGS="-Wall -Wmissing-prototypes -Wpointer-arith" + CXXFLAGS="-Wall -Wpointer-arith" + # These work in some but not all gcc versions + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Wdeclaration-after-statement, for CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -Wdeclaration-after-statement, for CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__Wdeclaration_after_statement+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS} -Wdeclaration-after-statement" +ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_prog_CC_cflags__Wdeclaration_after_statement=yes +else + pgac_cv_prog_CC_cflags__Wdeclaration_after_statement=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__Wdeclaration_after_statement" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__Wdeclaration_after_statement" >&6; } +if test x"$pgac_cv_prog_CC_cflags__Wdeclaration_after_statement" = x"yes"; then + CFLAGS="${CFLAGS} -Wdeclaration-after-statement" +fi + + + # -Wdeclaration-after-statement isn't applicable for C++ + # Really don't want VLAs to be used in our dialect of C + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Werror=vla, for CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -Werror=vla, for CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__Werror_vla+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS} -Werror=vla" +ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_prog_CC_cflags__Werror_vla=yes +else + pgac_cv_prog_CC_cflags__Werror_vla=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__Werror_vla" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__Werror_vla" >&6; } +if test x"$pgac_cv_prog_CC_cflags__Werror_vla" = x"yes"; then + CFLAGS="${CFLAGS} -Werror=vla" +fi + + + # -Wvla is not applicable for C++ + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Wendif-labels, for CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -Wendif-labels, for CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__Wendif_labels+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS} -Wendif-labels" +ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_prog_CC_cflags__Wendif_labels=yes +else + pgac_cv_prog_CC_cflags__Wendif_labels=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__Wendif_labels" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__Wendif_labels" >&6; } +if test x"$pgac_cv_prog_CC_cflags__Wendif_labels" = x"yes"; then + CFLAGS="${CFLAGS} -Wendif-labels" +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CXX} supports -Wendif-labels, for CXXFLAGS" >&5 +$as_echo_n "checking whether ${CXX} supports -Wendif-labels, for CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CXX_cxxflags__Wendif_labels+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CXX} +CXXFLAGS="${CXXFLAGS} -Wendif-labels" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CXX_cxxflags__Wendif_labels=yes +else + pgac_cv_prog_CXX_cxxflags__Wendif_labels=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CXX_cxxflags__Wendif_labels" >&5 +$as_echo "$pgac_cv_prog_CXX_cxxflags__Wendif_labels" >&6; } +if test x"$pgac_cv_prog_CXX_cxxflags__Wendif_labels" = x"yes"; then + CXXFLAGS="${CXXFLAGS} -Wendif-labels" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Wmissing-format-attribute, for CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -Wmissing-format-attribute, for CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__Wmissing_format_attribute+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS} -Wmissing-format-attribute" +ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_prog_CC_cflags__Wmissing_format_attribute=yes +else + pgac_cv_prog_CC_cflags__Wmissing_format_attribute=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__Wmissing_format_attribute" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__Wmissing_format_attribute" >&6; } +if test x"$pgac_cv_prog_CC_cflags__Wmissing_format_attribute" = x"yes"; then + CFLAGS="${CFLAGS} -Wmissing-format-attribute" +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CXX} supports -Wmissing-format-attribute, for CXXFLAGS" >&5 +$as_echo_n "checking whether ${CXX} supports -Wmissing-format-attribute, for CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CXX_cxxflags__Wmissing_format_attribute+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CXX} +CXXFLAGS="${CXXFLAGS} -Wmissing-format-attribute" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CXX_cxxflags__Wmissing_format_attribute=yes +else + pgac_cv_prog_CXX_cxxflags__Wmissing_format_attribute=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CXX_cxxflags__Wmissing_format_attribute" >&5 +$as_echo "$pgac_cv_prog_CXX_cxxflags__Wmissing_format_attribute" >&6; } +if test x"$pgac_cv_prog_CXX_cxxflags__Wmissing_format_attribute" = x"yes"; then + CXXFLAGS="${CXXFLAGS} -Wmissing-format-attribute" +fi + + + # This was included in -Wall/-Wformat in older GCC versions + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Wformat-security, for CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -Wformat-security, for CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__Wformat_security+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS} -Wformat-security" +ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - ICC=yes + pgac_cv_prog_CC_cflags__Wformat_security=yes +else + pgac_cv_prog_CC_cflags__Wformat_security=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__Wformat_security" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__Wformat_security" >&6; } +if test x"$pgac_cv_prog_CC_cflags__Wformat_security" = x"yes"; then + CFLAGS="${CFLAGS} -Wformat-security" +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CXX} supports -Wformat-security, for CXXFLAGS" >&5 +$as_echo_n "checking whether ${CXX} supports -Wformat-security, for CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CXX_cxxflags__Wformat_security+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CXX} +CXXFLAGS="${CXXFLAGS} -Wformat-security" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CXX_cxxflags__Wformat_security=yes +else + pgac_cv_prog_CXX_cxxflags__Wformat_security=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CXX_cxxflags__Wformat_security" >&5 +$as_echo "$pgac_cv_prog_CXX_cxxflags__Wformat_security" >&6; } +if test x"$pgac_cv_prog_CXX_cxxflags__Wformat_security" = x"yes"; then + CXXFLAGS="${CXXFLAGS} -Wformat-security" +fi + + + # Disable strict-aliasing rules; needed for gcc 3.3+ + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -fno-strict-aliasing, for CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -fno-strict-aliasing, for CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__fno_strict_aliasing+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS} -fno-strict-aliasing" +ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_prog_CC_cflags__fno_strict_aliasing=yes +else + pgac_cv_prog_CC_cflags__fno_strict_aliasing=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__fno_strict_aliasing" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__fno_strict_aliasing" >&6; } +if test x"$pgac_cv_prog_CC_cflags__fno_strict_aliasing" = x"yes"; then + CFLAGS="${CFLAGS} -fno-strict-aliasing" +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CXX} supports -fno-strict-aliasing, for CXXFLAGS" >&5 +$as_echo_n "checking whether ${CXX} supports -fno-strict-aliasing, for CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CXX_cxxflags__fno_strict_aliasing+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CXX} +CXXFLAGS="${CXXFLAGS} -fno-strict-aliasing" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CXX_cxxflags__fno_strict_aliasing=yes +else + pgac_cv_prog_CXX_cxxflags__fno_strict_aliasing=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CXX_cxxflags__fno_strict_aliasing" >&5 +$as_echo "$pgac_cv_prog_CXX_cxxflags__fno_strict_aliasing" >&6; } +if test x"$pgac_cv_prog_CXX_cxxflags__fno_strict_aliasing" = x"yes"; then + CXXFLAGS="${CXXFLAGS} -fno-strict-aliasing" +fi + + + # Disable optimizations that assume no overflow; needed for gcc 4.3+ + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -fwrapv, for CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -fwrapv, for CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__fwrapv+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS} -fwrapv" +ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_prog_CC_cflags__fwrapv=yes +else + pgac_cv_prog_CC_cflags__fwrapv=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__fwrapv" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__fwrapv" >&6; } +if test x"$pgac_cv_prog_CC_cflags__fwrapv" = x"yes"; then + CFLAGS="${CFLAGS} -fwrapv" +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CXX} supports -fwrapv, for CXXFLAGS" >&5 +$as_echo_n "checking whether ${CXX} supports -fwrapv, for CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CXX_cxxflags__fwrapv+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CXX} +CXXFLAGS="${CXXFLAGS} -fwrapv" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CXX_cxxflags__fwrapv=yes +else + pgac_cv_prog_CXX_cxxflags__fwrapv=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CXX_cxxflags__fwrapv" >&5 +$as_echo "$pgac_cv_prog_CXX_cxxflags__fwrapv" >&6; } +if test x"$pgac_cv_prog_CXX_cxxflags__fwrapv" = x"yes"; then + CXXFLAGS="${CXXFLAGS} -fwrapv" +fi + + + # Disable FP optimizations that cause various errors on gcc 4.5+ or maybe 4.6+ + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -fexcess-precision=standard, for CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -fexcess-precision=standard, for CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__fexcess_precision_standard+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS} -fexcess-precision=standard" +ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_prog_CC_cflags__fexcess_precision_standard=yes +else + pgac_cv_prog_CC_cflags__fexcess_precision_standard=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__fexcess_precision_standard" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__fexcess_precision_standard" >&6; } +if test x"$pgac_cv_prog_CC_cflags__fexcess_precision_standard" = x"yes"; then + CFLAGS="${CFLAGS} -fexcess-precision=standard" +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CXX} supports -fexcess-precision=standard, for CXXFLAGS" >&5 +$as_echo_n "checking whether ${CXX} supports -fexcess-precision=standard, for CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CXX_cxxflags__fexcess_precision_standard+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CXX} +CXXFLAGS="${CXXFLAGS} -fexcess-precision=standard" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CXX_cxxflags__fexcess_precision_standard=yes +else + pgac_cv_prog_CXX_cxxflags__fexcess_precision_standard=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CXX_cxxflags__fexcess_precision_standard" >&5 +$as_echo "$pgac_cv_prog_CXX_cxxflags__fexcess_precision_standard" >&6; } +if test x"$pgac_cv_prog_CXX_cxxflags__fexcess_precision_standard" = x"yes"; then + CXXFLAGS="${CXXFLAGS} -fexcess-precision=standard" +fi + + + # Optimization flags for specific files that benefit from vectorization + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -funroll-loops, for CFLAGS_VECTOR" >&5 +$as_echo_n "checking whether ${CC} supports -funroll-loops, for CFLAGS_VECTOR... " >&6; } +if ${pgac_cv_prog_CC_cflags__funroll_loops+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS_VECTOR} -funroll-loops" +ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_prog_CC_cflags__funroll_loops=yes else - ICC=no + pgac_cv_prog_CC_cflags__funroll_loops=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__funroll_loops" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__funroll_loops" >&6; } +if test x"$pgac_cv_prog_CC_cflags__funroll_loops" = x"yes"; then + CFLAGS_VECTOR="${CFLAGS_VECTOR} -funroll-loops" +fi -# Check if it's Sun Studio compiler. We assume that -# __SUNPRO_C will be defined for Sun Studio compilers + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -ftree-vectorize, for CFLAGS_VECTOR" >&5 +$as_echo_n "checking whether ${CC} supports -ftree-vectorize, for CFLAGS_VECTOR... " >&6; } +if ${pgac_cv_prog_CC_cflags__ftree_vectorize+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS_VECTOR} -ftree-vectorize" +ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { -#ifndef __SUNPRO_C -choke me -#endif + ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - SUN_STUDIO_CC=yes + pgac_cv_prog_CC_cflags__ftree_vectorize=yes else - SUN_STUDIO_CC=no + pgac_cv_prog_CC_cflags__ftree_vectorize=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - - -unset CFLAGS - -# -# Read the template -# -. "$srcdir/src/template/$template" || exit - -# CFLAGS are selected so: -# If the user specifies something in the environment, that is used. -# else: If the template file set something, that is used. -# else: If coverage was enabled, don't set anything. -# else: If the compiler is GCC, then we use -O2. -# else: If the compiler is something else, then we use -O, unless debugging. - -if test "$ac_env_CFLAGS_set" = set; then - CFLAGS=$ac_env_CFLAGS_value -elif test "${CFLAGS+set}" = set; then - : # (keep what template set) -elif test "$enable_coverage" = yes; then - : # no optimization by default -elif test "$GCC" = yes; then - CFLAGS="-O2" -else - # if the user selected debug mode, don't use -O - if test "$enable_debug" != yes; then - CFLAGS="-O" - fi +ac_c_werror_flag=$ac_save_c_werror_flag +CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" fi - -# CFLAGS we determined above will be added back at the end -user_CFLAGS=$CFLAGS -CFLAGS="" - -# set CFLAGS_VECTOR from the environment, if available -if test "$ac_env_CFLAGS_VECTOR_set" = set; then - CFLAGS_VECTOR=$ac_env_CFLAGS_VECTOR_value +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__ftree_vectorize" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__ftree_vectorize" >&6; } +if test x"$pgac_cv_prog_CC_cflags__ftree_vectorize" = x"yes"; then + CFLAGS_VECTOR="${CFLAGS_VECTOR} -ftree-vectorize" fi -# Some versions of GCC support some additional useful warning flags. -# Check whether they are supported, and add them to CFLAGS if so. -# ICC pretends to be GCC but it's lying; it doesn't support these flags, -# but has its own. Also check other compiler-specific flags here. -if test "$GCC" = yes -a "$ICC" = no; then - CFLAGS="-Wall -Wmissing-prototypes -Wpointer-arith" - # These work in some but not all gcc versions - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wdeclaration-after-statement" >&5 -$as_echo_n "checking whether $CC supports -Wdeclaration-after-statement... " >&6; } -if ${pgac_cv_prog_cc_cflags__Wdeclaration_after_statement+:} false; then : + # We want to suppress clang's unhelpful unused-command-line-argument warnings + # but gcc won't complain about unrecognized -Wno-foo switches, so we have to + # test for the positive form and if that works, add the negative form + NOT_THE_CFLAGS="" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Wunused-command-line-argument, for NOT_THE_CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -Wunused-command-line-argument, for NOT_THE_CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__Wunused_command_line_argument+:} false; then : $as_echo_n "(cached) " >&6 else pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -Wdeclaration-after-statement" +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${NOT_THE_CFLAGS} -Wunused-command-line-argument" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -4531,27 +6078,36 @@ main () } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__Wdeclaration_after_statement=yes + pgac_cv_prog_CC_cflags__Wunused_command_line_argument=yes else - pgac_cv_prog_cc_cflags__Wdeclaration_after_statement=no + pgac_cv_prog_CC_cflags__Wunused_command_line_argument=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__Wdeclaration_after_statement" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__Wdeclaration_after_statement" >&6; } -if test x"$pgac_cv_prog_cc_cflags__Wdeclaration_after_statement" = x"yes"; then - CFLAGS="$CFLAGS -Wdeclaration-after-statement" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__Wunused_command_line_argument" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__Wunused_command_line_argument" >&6; } +if test x"$pgac_cv_prog_CC_cflags__Wunused_command_line_argument" = x"yes"; then + NOT_THE_CFLAGS="${NOT_THE_CFLAGS} -Wunused-command-line-argument" fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wendif-labels" >&5 -$as_echo_n "checking whether $CC supports -Wendif-labels... " >&6; } -if ${pgac_cv_prog_cc_cflags__Wendif_labels+:} false; then : + + if test -n "$NOT_THE_CFLAGS"; then + CFLAGS="$CFLAGS -Wno-unused-command-line-argument" + fi + # Similarly disable useless truncation warnings from gcc 8+ + NOT_THE_CFLAGS="" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Wformat-truncation, for NOT_THE_CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -Wformat-truncation, for NOT_THE_CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__Wformat_truncation+:} false; then : $as_echo_n "(cached) " >&6 else pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -Wendif-labels" +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${NOT_THE_CFLAGS} -Wformat-truncation" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -4566,27 +6122,35 @@ main () } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__Wendif_labels=yes + pgac_cv_prog_CC_cflags__Wformat_truncation=yes else - pgac_cv_prog_cc_cflags__Wendif_labels=no + pgac_cv_prog_CC_cflags__Wformat_truncation=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__Wendif_labels" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__Wendif_labels" >&6; } -if test x"$pgac_cv_prog_cc_cflags__Wendif_labels" = x"yes"; then - CFLAGS="$CFLAGS -Wendif-labels" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__Wformat_truncation" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__Wformat_truncation" >&6; } +if test x"$pgac_cv_prog_CC_cflags__Wformat_truncation" = x"yes"; then + NOT_THE_CFLAGS="${NOT_THE_CFLAGS} -Wformat-truncation" fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wmissing-format-attribute" >&5 -$as_echo_n "checking whether $CC supports -Wmissing-format-attribute... " >&6; } -if ${pgac_cv_prog_cc_cflags__Wmissing_format_attribute+:} false; then : + + if test -n "$NOT_THE_CFLAGS"; then + CFLAGS="$CFLAGS -Wno-format-truncation" + fi + NOT_THE_CFLAGS="" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -Wstringop-truncation, for NOT_THE_CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -Wstringop-truncation, for NOT_THE_CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__Wstringop_truncation+:} false; then : $as_echo_n "(cached) " >&6 else pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -Wmissing-format-attribute" +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${NOT_THE_CFLAGS} -Wstringop-truncation" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -4601,28 +6165,38 @@ main () } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__Wmissing_format_attribute=yes + pgac_cv_prog_CC_cflags__Wstringop_truncation=yes else - pgac_cv_prog_cc_cflags__Wmissing_format_attribute=no + pgac_cv_prog_CC_cflags__Wstringop_truncation=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__Wmissing_format_attribute" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__Wmissing_format_attribute" >&6; } -if test x"$pgac_cv_prog_cc_cflags__Wmissing_format_attribute" = x"yes"; then - CFLAGS="$CFLAGS -Wmissing-format-attribute" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__Wstringop_truncation" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__Wstringop_truncation" >&6; } +if test x"$pgac_cv_prog_CC_cflags__Wstringop_truncation" = x"yes"; then + NOT_THE_CFLAGS="${NOT_THE_CFLAGS} -Wstringop-truncation" fi - # This was included in -Wall/-Wformat in older GCC versions - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wformat-security" >&5 -$as_echo_n "checking whether $CC supports -Wformat-security... " >&6; } -if ${pgac_cv_prog_cc_cflags__Wformat_security+:} false; then : + + if test -n "$NOT_THE_CFLAGS"; then + CFLAGS="$CFLAGS -Wno-stringop-truncation" + fi +elif test "$ICC" = yes; then + # Intel's compiler has a bug/misoptimization in checking for + # division by NAN (NaN == 0), -mp1 fixes it, so add it to the CFLAGS. + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -mp1, for CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -mp1, for CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__mp1+:} false; then : $as_echo_n "(cached) " >&6 else pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -Wformat-security" +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS} -mp1" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -4637,30 +6211,39 @@ main () } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__Wformat_security=yes + pgac_cv_prog_CC_cflags__mp1=yes else - pgac_cv_prog_cc_cflags__Wformat_security=no + pgac_cv_prog_CC_cflags__mp1=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__Wformat_security" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__Wformat_security" >&6; } -if test x"$pgac_cv_prog_cc_cflags__Wformat_security" = x"yes"; then - CFLAGS="$CFLAGS -Wformat-security" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__mp1" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__mp1" >&6; } +if test x"$pgac_cv_prog_CC_cflags__mp1" = x"yes"; then + CFLAGS="${CFLAGS} -mp1" fi - # Disable strict-aliasing rules; needed for gcc 3.3+ - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -fno-strict-aliasing" >&5 -$as_echo_n "checking whether $CC supports -fno-strict-aliasing... " >&6; } -if ${pgac_cv_prog_cc_cflags__fno_strict_aliasing+:} false; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CXX} supports -mp1, for CXXFLAGS" >&5 +$as_echo_n "checking whether ${CXX} supports -mp1, for CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CXX_cxxflags__mp1+:} false; then : $as_echo_n "(cached) " >&6 else - pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -fno-strict-aliasing" -ac_save_c_werror_flag=$ac_c_werror_flag -ac_c_werror_flag=yes + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CXX} +CXXFLAGS="${CXXFLAGS} -mp1" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -4672,29 +6255,40 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__fno_strict_aliasing=yes +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CXX_cxxflags__mp1=yes else - pgac_cv_prog_cc_cflags__fno_strict_aliasing=no + pgac_cv_prog_CXX_cxxflags__mp1=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_c_werror_flag=$ac_save_c_werror_flag -CFLAGS="$pgac_save_CFLAGS" +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__fno_strict_aliasing" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__fno_strict_aliasing" >&6; } -if test x"$pgac_cv_prog_cc_cflags__fno_strict_aliasing" = x"yes"; then - CFLAGS="$CFLAGS -fno-strict-aliasing" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CXX_cxxflags__mp1" >&5 +$as_echo "$pgac_cv_prog_CXX_cxxflags__mp1" >&6; } +if test x"$pgac_cv_prog_CXX_cxxflags__mp1" = x"yes"; then + CXXFLAGS="${CXXFLAGS} -mp1" fi - # Disable optimizations that assume no overflow; needed for gcc 4.3+ - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -fwrapv" >&5 -$as_echo_n "checking whether $CC supports -fwrapv... " >&6; } -if ${pgac_cv_prog_cc_cflags__fwrapv+:} false; then : + + # Make sure strict aliasing is off (though this is said to be the default) + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -fno-strict-aliasing, for CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -fno-strict-aliasing, for CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__fno_strict_aliasing+:} false; then : $as_echo_n "(cached) " >&6 else pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -fwrapv" +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS} -fno-strict-aliasing" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -4709,30 +6303,39 @@ main () } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__fwrapv=yes + pgac_cv_prog_CC_cflags__fno_strict_aliasing=yes else - pgac_cv_prog_cc_cflags__fwrapv=no + pgac_cv_prog_CC_cflags__fno_strict_aliasing=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__fwrapv" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__fwrapv" >&6; } -if test x"$pgac_cv_prog_cc_cflags__fwrapv" = x"yes"; then - CFLAGS="$CFLAGS -fwrapv" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__fno_strict_aliasing" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__fno_strict_aliasing" >&6; } +if test x"$pgac_cv_prog_CC_cflags__fno_strict_aliasing" = x"yes"; then + CFLAGS="${CFLAGS} -fno-strict-aliasing" fi - # Disable FP optimizations that cause various errors on gcc 4.5+ or maybe 4.6+ - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -fexcess-precision=standard" >&5 -$as_echo_n "checking whether $CC supports -fexcess-precision=standard... " >&6; } -if ${pgac_cv_prog_cc_cflags__fexcess_precision_standard+:} false; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CXX} supports -fno-strict-aliasing, for CXXFLAGS" >&5 +$as_echo_n "checking whether ${CXX} supports -fno-strict-aliasing, for CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CXX_cxxflags__fno_strict_aliasing+:} false; then : $as_echo_n "(cached) " >&6 else - pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -fexcess-precision=standard" -ac_save_c_werror_flag=$ac_c_werror_flag -ac_c_werror_flag=yes + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CXX} +CXXFLAGS="${CXXFLAGS} -fno-strict-aliasing" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -4744,29 +6347,41 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__fexcess_precision_standard=yes +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CXX_cxxflags__fno_strict_aliasing=yes else - pgac_cv_prog_cc_cflags__fexcess_precision_standard=no + pgac_cv_prog_CXX_cxxflags__fno_strict_aliasing=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_c_werror_flag=$ac_save_c_werror_flag -CFLAGS="$pgac_save_CFLAGS" +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__fexcess_precision_standard" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__fexcess_precision_standard" >&6; } -if test x"$pgac_cv_prog_cc_cflags__fexcess_precision_standard" = x"yes"; then - CFLAGS="$CFLAGS -fexcess-precision=standard" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CXX_cxxflags__fno_strict_aliasing" >&5 +$as_echo "$pgac_cv_prog_CXX_cxxflags__fno_strict_aliasing" >&6; } +if test x"$pgac_cv_prog_CXX_cxxflags__fno_strict_aliasing" = x"yes"; then + CXXFLAGS="${CXXFLAGS} -fno-strict-aliasing" fi - # Optimization flags for specific files that benefit from vectorization - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -funroll-loops" >&5 -$as_echo_n "checking whether $CC supports -funroll-loops... " >&6; } -if ${pgac_cv_prog_cc_cflags__funroll_loops+:} false; then : + +elif test "$PORTNAME" = "aix"; then + # AIX's xlc has to have strict aliasing turned off too + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -qnoansialias, for CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -qnoansialias, for CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__qnoansialias+:} false; then : $as_echo_n "(cached) " >&6 else pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -funroll-loops" +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS} -qnoansialias" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -4781,27 +6396,83 @@ main () } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__funroll_loops=yes + pgac_cv_prog_CC_cflags__qnoansialias=yes else - pgac_cv_prog_cc_cflags__funroll_loops=no + pgac_cv_prog_CC_cflags__qnoansialias=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__funroll_loops" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__funroll_loops" >&6; } -if test x"$pgac_cv_prog_cc_cflags__funroll_loops" = x"yes"; then - CFLAGS_VECTOR="${CFLAGS_VECTOR} -funroll-loops" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__qnoansialias" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__qnoansialias" >&6; } +if test x"$pgac_cv_prog_CC_cflags__qnoansialias" = x"yes"; then + CFLAGS="${CFLAGS} -qnoansialias" +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CXX} supports -qnoansialias, for CXXFLAGS" >&5 +$as_echo_n "checking whether ${CXX} supports -qnoansialias, for CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CXX_cxxflags__qnoansialias+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CXX} +CXXFLAGS="${CXXFLAGS} -qnoansialias" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CXX_cxxflags__qnoansialias=yes +else + pgac_cv_prog_CXX_cxxflags__qnoansialias=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CXX_cxxflags__qnoansialias" >&5 +$as_echo "$pgac_cv_prog_CXX_cxxflags__qnoansialias" >&6; } +if test x"$pgac_cv_prog_CXX_cxxflags__qnoansialias" = x"yes"; then + CXXFLAGS="${CXXFLAGS} -qnoansialias" fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -ftree-vectorize" >&5 -$as_echo_n "checking whether $CC supports -ftree-vectorize... " >&6; } -if ${pgac_cv_prog_cc_cflags__ftree_vectorize+:} false; then : + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports -qlonglong, for CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports -qlonglong, for CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags__qlonglong+:} false; then : $as_echo_n "(cached) " >&6 else pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -ftree-vectorize" +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS} -qlonglong" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -4816,30 +6487,86 @@ main () } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__ftree_vectorize=yes + pgac_cv_prog_CC_cflags__qlonglong=yes else - pgac_cv_prog_cc_cflags__ftree_vectorize=no + pgac_cv_prog_CC_cflags__qlonglong=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__ftree_vectorize" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__ftree_vectorize" >&6; } -if test x"$pgac_cv_prog_cc_cflags__ftree_vectorize" = x"yes"; then - CFLAGS_VECTOR="${CFLAGS_VECTOR} -ftree-vectorize" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags__qlonglong" >&5 +$as_echo "$pgac_cv_prog_CC_cflags__qlonglong" >&6; } +if test x"$pgac_cv_prog_CC_cflags__qlonglong" = x"yes"; then + CFLAGS="${CFLAGS} -qlonglong" fi - # We want to suppress clang's unhelpful unused-command-line-argument warnings - # but gcc won't complain about unrecognized -Wno-foo switches, so we have to - # test for the positive form and if that works, add the negative form - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wunused-command-line-argument" >&5 -$as_echo_n "checking whether $CC supports -Wunused-command-line-argument... " >&6; } -if ${pgac_cv_prog_cc_cflags__Wunused_command_line_argument+:} false; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CXX} supports -qlonglong, for CXXFLAGS" >&5 +$as_echo_n "checking whether ${CXX} supports -qlonglong, for CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CXX_cxxflags__qlonglong+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CXX} +CXXFLAGS="${CXXFLAGS} -qlonglong" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CXX_cxxflags__qlonglong=yes +else + pgac_cv_prog_CXX_cxxflags__qlonglong=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CXX_cxxflags__qlonglong" >&5 +$as_echo "$pgac_cv_prog_CXX_cxxflags__qlonglong" >&6; } +if test x"$pgac_cv_prog_CXX_cxxflags__qlonglong" = x"yes"; then + CXXFLAGS="${CXXFLAGS} -qlonglong" +fi + + +elif test "$PORTNAME" = "hpux"; then + # On some versions of HP-UX, libm functions do not set errno by default. + # Fix that by using +Olibmerrno if the compiler recognizes it. + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CC} supports +Olibmerrno, for CFLAGS" >&5 +$as_echo_n "checking whether ${CC} supports +Olibmerrno, for CFLAGS... " >&6; } +if ${pgac_cv_prog_CC_cflags_pOlibmerrno+:} false; then : $as_echo_n "(cached) " >&6 else pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -Wunused-command-line-argument" +pgac_save_CC=$CC +CC=${CC} +CFLAGS="${CFLAGS} +Olibmerrno" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -4854,33 +6581,93 @@ main () } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__Wunused_command_line_argument=yes + pgac_cv_prog_CC_cflags_pOlibmerrno=yes else - pgac_cv_prog_cc_cflags__Wunused_command_line_argument=no + pgac_cv_prog_CC_cflags_pOlibmerrno=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__Wunused_command_line_argument" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__Wunused_command_line_argument" >&6; } -if test x"$pgac_cv_prog_cc_cflags__Wunused_command_line_argument" = x"yes"; then - NOT_THE_CFLAGS="${NOT_THE_CFLAGS} -Wunused-command-line-argument" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CC_cflags_pOlibmerrno" >&5 +$as_echo "$pgac_cv_prog_CC_cflags_pOlibmerrno" >&6; } +if test x"$pgac_cv_prog_CC_cflags_pOlibmerrno" = x"yes"; then + CFLAGS="${CFLAGS} +Olibmerrno" fi - if test -n "$NOT_THE_CFLAGS"; then - CFLAGS="$CFLAGS -Wno-unused-command-line-argument" - fi -elif test "$ICC" = yes; then - # Intel's compiler has a bug/misoptimization in checking for - # division by NAN (NaN == 0), -mp1 fixes it, so add it to the CFLAGS. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -mp1" >&5 -$as_echo_n "checking whether $CC supports -mp1... " >&6; } -if ${pgac_cv_prog_cc_cflags__mp1+:} false; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CXX} supports +Olibmerrno, for CXXFLAGS" >&5 +$as_echo_n "checking whether ${CXX} supports +Olibmerrno, for CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CXX_cxxflags_pOlibmerrno+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CXX} +CXXFLAGS="${CXXFLAGS} +Olibmerrno" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CXX_cxxflags_pOlibmerrno=yes +else + pgac_cv_prog_CXX_cxxflags_pOlibmerrno=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CXX_cxxflags_pOlibmerrno" >&5 +$as_echo "$pgac_cv_prog_CXX_cxxflags_pOlibmerrno" >&6; } +if test x"$pgac_cv_prog_CXX_cxxflags_pOlibmerrno" = x"yes"; then + CXXFLAGS="${CXXFLAGS} +Olibmerrno" +fi + + +fi + +CFLAGS_VECTOR=$CFLAGS_VECTOR + + +# Determine flags used to emit bitcode for JIT inlining. Need to test +# for behaviour changing compiler flags, to keep compatibility with +# compiler used for normal postgres code. +if test "$with_llvm" = yes ; then + CLANGXX="$CLANG -xc++" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CLANG} supports -fno-strict-aliasing, for BITCODE_CFLAGS" >&5 +$as_echo_n "checking whether ${CLANG} supports -fno-strict-aliasing, for BITCODE_CFLAGS... " >&6; } +if ${pgac_cv_prog_CLANG_cflags__fno_strict_aliasing+:} false; then : $as_echo_n "(cached) " >&6 else pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -mp1" +pgac_save_CC=$CC +CC=${CLANG} +CFLAGS="${BITCODE_CFLAGS} -fno-strict-aliasing" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -4895,28 +6682,80 @@ main () } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__mp1=yes + pgac_cv_prog_CLANG_cflags__fno_strict_aliasing=yes else - pgac_cv_prog_cc_cflags__mp1=no + pgac_cv_prog_CLANG_cflags__fno_strict_aliasing=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CLANG_cflags__fno_strict_aliasing" >&5 +$as_echo "$pgac_cv_prog_CLANG_cflags__fno_strict_aliasing" >&6; } +if test x"$pgac_cv_prog_CLANG_cflags__fno_strict_aliasing" = x"yes"; then + BITCODE_CFLAGS="${BITCODE_CFLAGS} -fno-strict-aliasing" +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CLANGXX} supports -fno-strict-aliasing, for BITCODE_CXXFLAGS" >&5 +$as_echo_n "checking whether ${CLANGXX} supports -fno-strict-aliasing, for BITCODE_CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CLANGXX_cxxflags__fno_strict_aliasing+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CLANGXX} +CXXFLAGS="${BITCODE_CXXFLAGS} -fno-strict-aliasing" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CLANGXX_cxxflags__fno_strict_aliasing=yes +else + pgac_cv_prog_CLANGXX_cxxflags__fno_strict_aliasing=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__mp1" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__mp1" >&6; } -if test x"$pgac_cv_prog_cc_cflags__mp1" = x"yes"; then - CFLAGS="$CFLAGS -mp1" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CLANGXX_cxxflags__fno_strict_aliasing" >&5 +$as_echo "$pgac_cv_prog_CLANGXX_cxxflags__fno_strict_aliasing" >&6; } +if test x"$pgac_cv_prog_CLANGXX_cxxflags__fno_strict_aliasing" = x"yes"; then + BITCODE_CXXFLAGS="${BITCODE_CXXFLAGS} -fno-strict-aliasing" fi - # Make sure strict aliasing is off (though this is said to be the default) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -fno-strict-aliasing" >&5 -$as_echo_n "checking whether $CC supports -fno-strict-aliasing... " >&6; } -if ${pgac_cv_prog_cc_cflags__fno_strict_aliasing+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CLANG} supports -fwrapv, for BITCODE_CFLAGS" >&5 +$as_echo_n "checking whether ${CLANG} supports -fwrapv, for BITCODE_CFLAGS... " >&6; } +if ${pgac_cv_prog_CLANG_cflags__fwrapv+:} false; then : $as_echo_n "(cached) " >&6 else pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -fno-strict-aliasing" +pgac_save_CC=$CC +CC=${CLANG} +CFLAGS="${BITCODE_CFLAGS} -fwrapv" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -4931,31 +6770,38 @@ main () } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__fno_strict_aliasing=yes + pgac_cv_prog_CLANG_cflags__fwrapv=yes else - pgac_cv_prog_cc_cflags__fno_strict_aliasing=no + pgac_cv_prog_CLANG_cflags__fwrapv=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__fno_strict_aliasing" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__fno_strict_aliasing" >&6; } -if test x"$pgac_cv_prog_cc_cflags__fno_strict_aliasing" = x"yes"; then - CFLAGS="$CFLAGS -fno-strict-aliasing" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CLANG_cflags__fwrapv" >&5 +$as_echo "$pgac_cv_prog_CLANG_cflags__fwrapv" >&6; } +if test x"$pgac_cv_prog_CLANG_cflags__fwrapv" = x"yes"; then + BITCODE_CFLAGS="${BITCODE_CFLAGS} -fwrapv" fi -elif test "$PORTNAME" = "aix"; then - # AIX's xlc has to have strict aliasing turned off too - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -qnoansialias" >&5 -$as_echo_n "checking whether $CC supports -qnoansialias... " >&6; } -if ${pgac_cv_prog_cc_cflags__qnoansialias+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CLANGXX} supports -fwrapv, for BITCODE_CXXFLAGS" >&5 +$as_echo_n "checking whether ${CLANGXX} supports -fwrapv, for BITCODE_CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CLANGXX_cxxflags__fwrapv+:} false; then : $as_echo_n "(cached) " >&6 else - pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -qnoansialias" -ac_save_c_werror_flag=$ac_c_werror_flag -ac_c_werror_flag=yes + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CLANGXX} +CXXFLAGS="${BITCODE_CXXFLAGS} -fwrapv" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -4967,28 +6813,37 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__qnoansialias=yes +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CLANGXX_cxxflags__fwrapv=yes else - pgac_cv_prog_cc_cflags__qnoansialias=no + pgac_cv_prog_CLANGXX_cxxflags__fwrapv=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_c_werror_flag=$ac_save_c_werror_flag -CFLAGS="$pgac_save_CFLAGS" +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__qnoansialias" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__qnoansialias" >&6; } -if test x"$pgac_cv_prog_cc_cflags__qnoansialias" = x"yes"; then - CFLAGS="$CFLAGS -qnoansialias" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CLANGXX_cxxflags__fwrapv" >&5 +$as_echo "$pgac_cv_prog_CLANGXX_cxxflags__fwrapv" >&6; } +if test x"$pgac_cv_prog_CLANGXX_cxxflags__fwrapv" = x"yes"; then + BITCODE_CXXFLAGS="${BITCODE_CXXFLAGS} -fwrapv" fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -qlonglong" >&5 -$as_echo_n "checking whether $CC supports -qlonglong... " >&6; } -if ${pgac_cv_prog_cc_cflags__qlonglong+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CLANG} supports -fexcess-precision=standard, for BITCODE_CFLAGS" >&5 +$as_echo_n "checking whether ${CLANG} supports -fexcess-precision=standard, for BITCODE_CFLAGS... " >&6; } +if ${pgac_cv_prog_CLANG_cflags__fexcess_precision_standard+:} false; then : $as_echo_n "(cached) " >&6 else pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS -qlonglong" +pgac_save_CC=$CC +CC=${CLANG} +CFLAGS="${BITCODE_CFLAGS} -fexcess-precision=standard" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -5003,32 +6858,38 @@ main () } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags__qlonglong=yes + pgac_cv_prog_CLANG_cflags__fexcess_precision_standard=yes else - pgac_cv_prog_cc_cflags__qlonglong=no + pgac_cv_prog_CLANG_cflags__fexcess_precision_standard=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$pgac_save_CFLAGS" +CC="$pgac_save_CC" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__qlonglong" >&5 -$as_echo "$pgac_cv_prog_cc_cflags__qlonglong" >&6; } -if test x"$pgac_cv_prog_cc_cflags__qlonglong" = x"yes"; then - CFLAGS="$CFLAGS -qlonglong" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CLANG_cflags__fexcess_precision_standard" >&5 +$as_echo "$pgac_cv_prog_CLANG_cflags__fexcess_precision_standard" >&6; } +if test x"$pgac_cv_prog_CLANG_cflags__fexcess_precision_standard" = x"yes"; then + BITCODE_CFLAGS="${BITCODE_CFLAGS} -fexcess-precision=standard" fi -elif test "$PORTNAME" = "hpux"; then - # On some versions of HP-UX, libm functions do not set errno by default. - # Fix that by using +Olibmerrno if the compiler recognizes it. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports +Olibmerrno" >&5 -$as_echo_n "checking whether $CC supports +Olibmerrno... " >&6; } -if ${pgac_cv_prog_cc_cflags_pOlibmerrno+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${CLANGXX} supports -fexcess-precision=standard, for BITCODE_CXXFLAGS" >&5 +$as_echo_n "checking whether ${CLANGXX} supports -fexcess-precision=standard, for BITCODE_CXXFLAGS... " >&6; } +if ${pgac_cv_prog_CLANGXX_cxxflags__fexcess_precision_standard+:} false; then : $as_echo_n "(cached) " >&6 else - pgac_save_CFLAGS=$CFLAGS -CFLAGS="$pgac_save_CFLAGS +Olibmerrno" -ac_save_c_werror_flag=$ac_c_werror_flag -ac_c_werror_flag=yes + pgac_save_CXXFLAGS=$CXXFLAGS +pgac_save_CXX=$CXX +CXX=${CLANGXX} +CXXFLAGS="${BITCODE_CXXFLAGS} -fexcess-precision=standard" +ac_save_cxx_werror_flag=$ac_cxx_werror_flag +ac_cxx_werror_flag=yes +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -5040,35 +6901,44 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_prog_cc_cflags_pOlibmerrno=yes +if ac_fn_cxx_try_compile "$LINENO"; then : + pgac_cv_prog_CLANGXX_cxxflags__fexcess_precision_standard=yes else - pgac_cv_prog_cc_cflags_pOlibmerrno=no + pgac_cv_prog_CLANGXX_cxxflags__fexcess_precision_standard=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_c_werror_flag=$ac_save_c_werror_flag -CFLAGS="$pgac_save_CFLAGS" +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_cxx_werror_flag=$ac_save_cxx_werror_flag +CXXFLAGS="$pgac_save_CXXFLAGS" +CXX="$pgac_save_CXX" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags_pOlibmerrno" >&5 -$as_echo "$pgac_cv_prog_cc_cflags_pOlibmerrno" >&6; } -if test x"$pgac_cv_prog_cc_cflags_pOlibmerrno" = x"yes"; then - CFLAGS="$CFLAGS +Olibmerrno" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_CLANGXX_cxxflags__fexcess_precision_standard" >&5 +$as_echo "$pgac_cv_prog_CLANGXX_cxxflags__fexcess_precision_standard" >&6; } +if test x"$pgac_cv_prog_CLANGXX_cxxflags__fexcess_precision_standard" = x"yes"; then + BITCODE_CXXFLAGS="${BITCODE_CXXFLAGS} -fexcess-precision=standard" fi fi -CFLAGS_VECTOR=$CFLAGS_VECTOR - - # supply -g if --enable-debug if test "$enable_debug" = yes && test "$ac_cv_prog_cc_g" = yes; then CFLAGS="$CFLAGS -g" fi +if test "$enable_debug" = yes && test "$ac_cv_prog_cxx_g" = yes; then + CXXFLAGS="$CXXFLAGS -g" +fi + # enable code coverage if --enable-coverage if test "$enable_coverage" = yes; then if test "$GCC" = yes; then CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" + CXXFLAGS="$CXXFLAGS -fprofile-arcs -ftest-coverage" else as_fn_error $? "--enable-coverage is supported only when using GCC" "$LINENO" 5 fi @@ -5081,6 +6951,7 @@ if test "$enable_profiling" = yes && test "$ac_cv_prog_cc_g" = yes; then $as_echo "#define PROFILE_PID_DIR 1" >>confdefs.h CFLAGS="$CFLAGS -pg $PLATFORM_PROFILE_FLAGS" + CXXFLAGS="$CXXFLAGS -pg $PLATFORM_PROFILE_FLAGS" else as_fn_error $? "--enable-profiling is supported only when using GCC" "$LINENO" 5 fi @@ -5091,12 +6962,21 @@ if test "$PORTNAME" = "win32"; then CPPFLAGS="$CPPFLAGS -I$srcdir/src/include/port/win32 -DEXEC_BACKEND" fi -# Now that we're done automatically adding stuff to CFLAGS, put back the +# Now that we're done automatically adding stuff to C[XX]FLAGS, put back the # user-specified flags (if any) at the end. This lets users override # the automatic additions. CFLAGS="$CFLAGS $user_CFLAGS" +CXXFLAGS="$CXXFLAGS $user_CXXFLAGS" +BITCODE_CFLAGS="$BITCODE_CFLAGS $user_BITCODE_CFLAGS" +BITCODE_CXXFLAGS="$BITCODE_CXXFLAGS $user_BITCODE_CXXFLAGS" + +BITCODE_CFLAGS=$BITCODE_CFLAGS + +BITCODE_CXXFLAGS=$BITCODE_CXXFLAGS + # Check if the compiler still works with the final flag settings +# (note, we're not checking that for CXX, which is optional) { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler still works" >&5 $as_echo_n "checking whether the C compiler still works... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -5144,6 +7024,39 @@ fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi +# Defend against clang being used on x86-32 without SSE2 enabled. As current +# versions of clang do not understand -fexcess-precision=standard, the use of +# x87 floating point operations leads to problems like isinf possibly returning +# false for a value that is infinite when converted from the 80bit register to +# the 8byte memory representation. +# +# Only perform the test if the compiler doesn't understand +# -fexcess-precision=standard, that way a potentially fixed compiler will work +# automatically. +if test "$pgac_cv_prog_CC_cflags__fexcess_precision_standard" = no; then +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + +#if defined(__clang__) && defined(__i386__) && !defined(__SSE2_MATH__) +choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + as_fn_error $? "Compiling PostgreSQL with clang, on 32bit x86, requires SSE2 support. Use -msse2 or use gcc." "$LINENO" 5 +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' @@ -5843,6 +7756,7 @@ $as_echo "$with_gssapi" >&6; } + # # Kerberos configuration parameters # @@ -5870,6 +7784,7 @@ fi + cat >>confdefs.h <<_ACEOF #define PG_KRB_SRVNAM "$with_krb_srvnam" _ACEOF @@ -5981,6 +7896,7 @@ fi $as_echo "$with_ldap" >&6; } + # # Bonjour # @@ -7858,6 +9774,15 @@ You might have to rebuild your Perl installation. Refer to the documentation for details. Use --without-perl to disable building PL/Perl." "$LINENO" 5 fi + # On most platforms, archlibexp is also where the Perl include files live ... + perl_includespec="-I$perl_archlibexp/CORE" + # ... but on newer macOS versions, we must use -iwithsysroot to look + # under $PG_SYSROOT + if test \! -f "$perl_archlibexp/CORE/perl.h" ; then + if test -f "$PG_SYSROOT$perl_archlibexp/CORE/perl.h" ; then + perl_includespec="-iwithsysroot $perl_archlibexp/CORE" + fi + fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CFLAGS recommended by Perl" >&5 $as_echo_n "checking for CFLAGS recommended by Perl... " >&6; } @@ -7874,12 +9799,19 @@ $as_echo "$perl_embed_ccflags" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for flags to link embedded Perl" >&5 $as_echo_n "checking for flags to link embedded Perl... " >&6; } if test "$PORTNAME" = "win32" ; then -perl_lib=`basename $perl_archlibexp/CORE/perl[5-9]*.lib .lib` -test -e "$perl_archlibexp/CORE/$perl_lib.lib" && perl_embed_ldflags="-L$perl_archlibexp/CORE -l$perl_lib" + perl_lib=`basename $perl_archlibexp/CORE/perl[5-9]*.lib .lib` + if test -e "$perl_archlibexp/CORE/$perl_lib.lib"; then + perl_embed_ldflags="-L$perl_archlibexp/CORE -l$perl_lib" + else + perl_lib=`basename $perl_archlibexp/CORE/libperl[5-9]*.a .a | sed 's/^lib//'` + if test -e "$perl_archlibexp/CORE/lib$perl_lib.a"; then + perl_embed_ldflags="-L$perl_archlibexp/CORE -l$perl_lib" + fi + fi else -pgac_tmp1=`$PERL -MExtUtils::Embed -e ldopts` -pgac_tmp2=`$PERL -MConfig -e 'print $Config{ccdlflags}'` -perl_embed_ldflags=`echo X"$pgac_tmp1" | sed -e "s/^X//" -e "s%$pgac_tmp2%%" -e "s/ -arch [-a-zA-Z0-9_]*//g"` + pgac_tmp1=`$PERL -MExtUtils::Embed -e ldopts` + pgac_tmp2=`$PERL -MConfig -e 'print $Config{ccdlflags}'` + perl_embed_ldflags=`echo X"$pgac_tmp1" | sed -e "s/^X//" -e "s%$pgac_tmp2%%" -e "s/ -arch [-a-zA-Z0-9_]*//g"` fi if test -z "$perl_embed_ldflags" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 @@ -7954,6 +9886,18 @@ if test x"$PYTHON" = x""; then fi +python_fullversion=`${PYTHON} -c "import sys; print(sys.version)" | sed q` +{ $as_echo "$as_me:${as_lineno-$LINENO}: using python $python_fullversion" >&5 +$as_echo "$as_me: using python $python_fullversion" >&6;} +# python_fullversion is typically n.n.n plus some trailing junk +python_majorversion=`echo "$python_fullversion" | sed 's/^\([0-9]*\).*/\1/'` +python_minorversion=`echo "$python_fullversion" | sed 's/^[0-9]*\.\([0-9]*\).*/\1/'` +python_version=`echo "$python_fullversion" | sed 's/^\([0-9]*\.[0-9]*\).*/\1/'` +# Reject unsupported Python versions as soon as practical. +if test "$python_majorversion" -lt 3 -a "$python_minorversion" -lt 4; then + as_fn_error $? "Python version $python_version is too old (version 2.4 or later is required)" "$LINENO" 5 +fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Python distutils module" >&5 $as_echo_n "checking for Python distutils module... " >&6; } if "${PYTHON}" -c 'import distutils' 2>&5 @@ -7965,20 +9909,13 @@ else $as_echo "no" >&6; } as_fn_error $? "distutils module not found" "$LINENO" 5 fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking Python configuration directory" >&5 $as_echo_n "checking Python configuration directory... " >&6; } -python_majorversion=`${PYTHON} -c "import sys; print(sys.version[0])"` -python_minorversion=`${PYTHON} -c "import sys; print(sys.version[2])"` -python_version=`${PYTHON} -c "import sys; print(sys.version[:3])"` python_configdir=`${PYTHON} -c "import distutils.sysconfig; print(' '.join(filter(None,distutils.sysconfig.get_config_vars('LIBPL'))))"` { $as_echo "$as_me:${as_lineno-$LINENO}: result: $python_configdir" >&5 $as_echo "$python_configdir" >&6; } -# Reject unsupported Python versions as soon as practical. -if test "$python_majorversion" -lt 3 -a "$python_minorversion" -lt 4; then - as_fn_error $? "Python version $python_version is too old (version 2.4 or later is required)" "$LINENO" 5 -fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking Python include directories" >&5 $as_echo_n "checking Python include directories... " >&6; } python_includespec=`${PYTHON} -c " @@ -8856,12 +10793,10 @@ else int main () { -#ifndef _AIX -int strerror_r(int, char *, size_t); -#else -/* Older AIX has 'int' for the third argument so we don't test the args. */ -int strerror_r(); -#endif +char buf[100]; + switch (strerror_r(1, buf, sizeof(buf))) + { case 0: break; default: break; } + ; return 0; } @@ -9170,67 +11105,6 @@ if test "$ac_res" != no; then : fi -# We only use libld in port/dynloader/aix.c -case $host_os in - aix*) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing ldopen" >&5 -$as_echo_n "checking for library containing ldopen... " >&6; } -if ${ac_cv_search_ldopen+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char ldopen (); -int -main () -{ -return ldopen (); - ; - return 0; -} -_ACEOF -for ac_lib in '' ld; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_ldopen=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_ldopen+:} false; then : - break -fi -done -if ${ac_cv_search_ldopen+:} false; then : - -else - ac_cv_search_ldopen=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_ldopen" >&5 -$as_echo "$ac_cv_search_ldopen" >&6; } -ac_res=$ac_cv_search_ldopen -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - -fi - - ;; -esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing getopt_long" >&5 $as_echo_n "checking for library containing getopt_long... " >&6; } if ${ac_cv_search_getopt_long+:} false; then : @@ -10174,12 +12048,13 @@ else fi fi - for ac_func in SSL_get_current_compression + for ac_func in SSL_clear_options SSL_get_current_compression X509_get_signature_nid do : - ac_fn_c_check_func "$LINENO" "SSL_get_current_compression" "ac_cv_func_SSL_get_current_compression" -if test "x$ac_cv_func_SSL_get_current_compression" = xyes; then : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF -#define HAVE_SSL_GET_CURRENT_COMPRESSION 1 +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi @@ -10473,6 +12348,17 @@ fi else LDAP_LIBS_FE="-lldap $EXTRA_LDAP_LIBS" fi + for ac_func in ldap_initialize +do : + ac_fn_c_check_func "$LINENO" "ldap_initialize" "ac_cv_func_ldap_initialize" +if test "x$ac_cv_func_ldap_initialize" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LDAP_INITIALIZE 1 +_ACEOF + +fi +done + else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_bind in -lwldap32" >&5 $as_echo_n "checking for ldap_bind in -lwldap32... " >&6; } @@ -10732,7 +12618,101 @@ fi ## Header files ## -for ac_header in atomic.h crypt.h dld.h fp_class.h getopt.h ieeefp.h ifaddrs.h langinfo.h mbarrier.h poll.h sys/epoll.h sys/ipc.h sys/pstat.h sys/resource.h sys/select.h sys/sem.h sys/shm.h sys/sockio.h sys/tas.h sys/un.h termios.h ucred.h utime.h wchar.h wctype.h +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5 +$as_echo_n "checking for stdbool.h that conforms to C99... " >&6; } +if ${ac_cv_header_stdbool_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #ifndef bool + "error: bool is not defined" + #endif + #ifndef false + "error: false is not defined" + #endif + #if false + "error: false is not 0" + #endif + #ifndef true + "error: true is not defined" + #endif + #if true != 1 + "error: true is not 1" + #endif + #ifndef __bool_true_false_are_defined + "error: __bool_true_false_are_defined is not defined" + #endif + + struct s { _Bool s: 1; _Bool t; } s; + + char a[true == 1 ? 1 : -1]; + char b[false == 0 ? 1 : -1]; + char c[__bool_true_false_are_defined == 1 ? 1 : -1]; + char d[(bool) 0.5 == true ? 1 : -1]; + /* See body of main program for 'e'. */ + char f[(_Bool) 0.0 == false ? 1 : -1]; + char g[true]; + char h[sizeof (_Bool)]; + char i[sizeof s.t]; + enum { j = false, k = true, l = false * true, m = true * 256 }; + /* The following fails for + HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ + _Bool n[m]; + char o[sizeof n == m * sizeof n[0] ? 1 : -1]; + char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; + /* Catch a bug in an HP-UX C compiler. See + http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html + http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html + */ + _Bool q = true; + _Bool *pq = &q; + +int +main () +{ + + bool e = &s; + *pq |= q; + *pq |= ! q; + /* Refer to every declared value, to avoid compiler optimizations. */ + return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l + + !m + !n + !o + !p + !q + !pq); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdbool_h=yes +else + ac_cv_header_stdbool_h=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5 +$as_echo "$ac_cv_header_stdbool_h" >&6; } + ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default" +if test "x$ac_cv_type__Bool" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE__BOOL 1 +_ACEOF + + +fi + + +if test $ac_cv_header_stdbool_h = yes; then + +$as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h + +fi + + +for ac_header in atomic.h copyfile.h crypt.h fp_class.h getopt.h ieeefp.h ifaddrs.h langinfo.h mbarrier.h poll.h sys/epoll.h sys/ipc.h sys/prctl.h sys/procctl.h sys/pstat.h sys/resource.h sys/select.h sys/sem.h sys/shm.h sys/sockio.h sys/tas.h sys/un.h termios.h ucred.h utime.h wchar.h wctype.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" @@ -11604,28 +13584,57 @@ $as_echo_n "checking for printf format archetype... " >&6; } if ${pgac_cv_printf_archetype+:} false; then : $as_echo_n "(cached) " >&6 else - ac_save_c_werror_flag=$ac_c_werror_flag + pgac_cv_printf_archetype=gnu_printf +ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -extern int -pgac_write(int ignore, const char *fmt,...) -__attribute__((format(gnu_printf, 2, 3))); +extern void pgac_write(int ignore, const char *fmt,...) +__attribute__((format($pgac_cv_printf_archetype, 2, 3))); int main () { +pgac_write(0, "error %s: %m", "foo"); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_archetype_ok=yes +else + ac_archetype_ok=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +if [ "$ac_archetype_ok" = no ]; then + pgac_cv_printf_archetype=__syslog__ + ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +extern void pgac_write(int ignore, const char *fmt,...) +__attribute__((format($pgac_cv_printf_archetype, 2, 3))); +int +main () +{ +pgac_write(0, "error %s: %m", "foo"); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_printf_archetype=gnu_printf + ac_archetype_ok=yes else - pgac_cv_printf_archetype=printf + ac_archetype_ok=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag + + if [ "$ac_archetype_ok" = no ]; then + pgac_cv_printf_archetype=printf + fi +fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_printf_archetype" >&5 $as_echo "$pgac_cv_printf_archetype" >&6; } @@ -11870,6 +13879,30 @@ if test x"$pgac_cv__types_compatible" = xyes ; then $as_echo "#define HAVE__BUILTIN_TYPES_COMPATIBLE_P 1" >>confdefs.h +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_bswap16" >&5 +$as_echo_n "checking for __builtin_bswap16... " >&6; } +if ${pgac_cv__builtin_bswap16+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +static unsigned long int x = __builtin_bswap16(0xaabb); + +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv__builtin_bswap16=yes +else + pgac_cv__builtin_bswap16=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_bswap16" >&5 +$as_echo "$pgac_cv__builtin_bswap16" >&6; } +if test x"$pgac_cv__builtin_bswap16" = xyes ; then + +$as_echo "#define HAVE__BUILTIN_BSWAP16 1" >>confdefs.h + fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_bswap32" >&5 $as_echo_n "checking for __builtin_bswap32... " >&6; } @@ -11926,7 +13959,10 @@ if ${pgac_cv__builtin_constant_p+:} false; then : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -static int x; static int y[__builtin_constant_p(x) ? x : 1]; +static int x; + static int y[__builtin_constant_p(x) ? x : 1]; + static int z[__builtin_constant_p("string literal") ? 1 : x]; + _ACEOF if ac_fn_c_try_compile "$LINENO"; then : @@ -12007,38 +14043,6 @@ if test x"$pgac_cv_computed_goto" = xyes ; then $as_echo "#define HAVE_COMPUTED_GOTO 1" >>confdefs.h -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __VA_ARGS__" >&5 -$as_echo_n "checking for __VA_ARGS__... " >&6; } -if ${pgac_cv__va_args+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -#define debug(...) fprintf(stderr, __VA_ARGS__) -debug("%s", "blarg"); - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv__va_args=yes -else - pgac_cv__va_args=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__va_args" >&5 -$as_echo "$pgac_cv__va_args" >&6; } -if test x"$pgac_cv__va_args" = xyes ; then - -$as_echo "#define HAVE__VA_ARGS 1" >>confdefs.h - fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether struct tm is in sys/time.h or time.h" >&5 $as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; } @@ -12486,12 +14490,77 @@ if test "$pgac_cv_type_locale_t" != no; then $as_echo "#define HAVE_LOCALE_T 1" >>confdefs.h fi -if test "$pgac_cv_type_locale_t" = 'yes (in xlocale.h)'; then +if test "$pgac_cv_type_locale_t" = 'yes (in xlocale.h)'; then + +$as_echo "#define LOCALE_T_IN_XLOCALE 1" >>confdefs.h + +fi + +# MSVC doesn't cope well with defining restrict to __restrict, the +# spelling it understands, because it conflicts with +# __declspec(restrict). Therefore we define pg_restrict to the +# appropriate definition, which presumably won't conflict. +# +# Allow platforms with buggy compilers to force restrict to not be +# used by setting $FORCE_DISABLE_RESTRICT=yes in the relevant +# template. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C/C++ restrict keyword" >&5 +$as_echo_n "checking for C/C++ restrict keyword... " >&6; } +if ${ac_cv_c_restrict+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_c_restrict=no + # The order here caters to the fact that C++ does not require restrict. + for ac_kw in __restrict __restrict__ _Restrict restrict; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +typedef int * int_ptr; + int foo (int_ptr $ac_kw ip) { + return ip[0]; + } +int +main () +{ +int s[1]; + int * $ac_kw t = s; + t[0] = 0; + return foo(t) + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_restrict=$ac_kw +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + test "$ac_cv_c_restrict" != no && break + done + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_restrict" >&5 +$as_echo "$ac_cv_c_restrict" >&6; } -$as_echo "#define LOCALE_T_IN_XLOCALE 1" >>confdefs.h + case $ac_cv_c_restrict in + restrict) ;; + no) $as_echo "#define restrict /**/" >>confdefs.h + ;; + *) cat >>confdefs.h <<_ACEOF +#define restrict $ac_cv_c_restrict +_ACEOF + ;; + esac +if test "$ac_cv_c_restrict" = "no" -o "x$FORCE_DISABLE_RESTRICT" = "xyes"; then + pg_restrict="" +else + pg_restrict="$ac_cv_c_restrict" fi +cat >>confdefs.h <<_ACEOF +#define pg_restrict $pg_restrict +_ACEOF + + ac_fn_c_check_type "$LINENO" "struct cmsgcred" "ac_cv_type_struct_cmsgcred" "#include #include #ifdef HAVE_SYS_UCRED_H @@ -12818,6 +14887,43 @@ if test "$ac_cv_sizeof_off_t" -lt 8 -a "$segsize" != "1"; then as_fn_error $? "Large file support is not enabled. Segment size cannot be larger than 1GB." "$LINENO" 5 fi +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of bool" >&5 +$as_echo_n "checking size of bool... " >&6; } +if ${ac_cv_sizeof_bool+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (bool))" "ac_cv_sizeof_bool" "#ifdef HAVE_STDBOOL_H +#include +#endif +"; then : + +else + if test "$ac_cv_type_bool" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (bool) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_bool=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_bool" >&5 +$as_echo "$ac_cv_sizeof_bool" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_BOOL $ac_cv_sizeof_bool +_ACEOF + + + ## ## Functions, global variables @@ -13024,7 +15130,7 @@ fi LIBS_including_readline="$LIBS" LIBS=`echo "$LIBS" | sed -e 's/-ledit//g' -e 's/-lreadline//g'` -for ac_func in cbrt clock_gettime dlopen fdatasync getifaddrs getpeerucred getrlimit mbstowcs_l memmove poll pstat pthread_is_threaded_np readlink setproctitle setsid shm_open symlink sync_file_range towlower utime utimes wcstombs wcstombs_l +for ac_func in cbrt clock_gettime copyfile fdatasync getifaddrs getpeerucred getrlimit mbstowcs_l memmove poll posix_fallocate ppoll pstat pthread_is_threaded_np readlink setproctitle setproctitle_fast setsid shm_open strchrnul symlink sync_file_range utime utimes wcstombs_l do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" @@ -13191,6 +15297,16 @@ fi cat >>confdefs.h <<_ACEOF #define HAVE_DECL_STRLCPY $ac_have_decl _ACEOF +ac_fn_c_check_decl "$LINENO" "strnlen" "ac_cv_have_decl_strnlen" "$ac_includes_default" +if test "x$ac_cv_have_decl_strnlen" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_STRNLEN $ac_have_decl +_ACEOF # This is probably only present on macOS, but may as well check always ac_fn_c_check_decl "$LINENO" "F_FULLFSYNC" "ac_cv_have_decl_F_FULLFSYNC" "#include @@ -13206,7 +15322,30 @@ cat >>confdefs.h <<_ACEOF _ACEOF -HAVE_IPV6=no +ac_fn_c_check_decl "$LINENO" "RTLD_GLOBAL" "ac_cv_have_decl_RTLD_GLOBAL" "#include +" +if test "x$ac_cv_have_decl_RTLD_GLOBAL" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_RTLD_GLOBAL $ac_have_decl +_ACEOF +ac_fn_c_check_decl "$LINENO" "RTLD_NOW" "ac_cv_have_decl_RTLD_NOW" "#include +" +if test "x$ac_cv_have_decl_RTLD_NOW" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_RTLD_NOW $ac_have_decl +_ACEOF + + ac_fn_c_check_type "$LINENO" "struct sockaddr_in6" "ac_cv_type_struct_sockaddr_in6" "$ac_includes_default #include " @@ -13214,11 +15353,9 @@ if test "x$ac_cv_type_struct_sockaddr_in6" = xyes; then : $as_echo "#define HAVE_IPV6 1" >>confdefs.h - HAVE_IPV6=yes fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PS_STRINGS" >&5 $as_echo_n "checking for PS_STRINGS... " >&6; } if ${pgac_cv_var_PS_STRINGS+:} false; then : @@ -13255,97 +15392,6 @@ $as_echo "#define HAVE_PS_STRINGS 1" >>confdefs.h fi -# We use our snprintf.c emulation if either snprintf() or vsnprintf() -# is missing. Yes, there are machines that have only one. We may -# also decide to use snprintf.c if snprintf() is present but does not -# have all the features we need --- see below. - -if test "$PORTNAME" = "win32"; then - # Win32 gets snprintf.c built unconditionally. - # - # To properly translate all NLS languages strings, we must support the - # *printf() %$ format, which allows *printf() arguments to be selected - # by position in the translated string. - # - # libintl versions < 0.13 use the native *printf() functions, and Win32 - # *printf() doesn't understand %$, so we must use our /port versions, - # which do understand %$. libintl versions >= 0.13 include their own - # *printf versions on Win32. The libintl 0.13 release note text is: - # - # C format strings with positions, as they arise when a translator - # needs to reorder a sentence, are now supported on all platforms. - # On those few platforms (NetBSD and Woe32) for which the native - # printf()/fprintf()/... functions don't support such format - # strings, replacements are provided through . - # - # We could use libintl >= 0.13's *printf() if we were sure that we had - # a litint >= 0.13 at runtime, but seeing that there is no clean way - # to guarantee that, it is best to just use our own, so we are sure to - # get %$ support. In include/port.h we disable the *printf() macros - # that might have been defined by libintl. - # - # We do this unconditionally whether NLS is used or not so we are sure - # that all Win32 libraries and binaries behave the same. - pgac_need_repl_snprintf=yes -else - pgac_need_repl_snprintf=no - for ac_func in snprintf -do : - ac_fn_c_check_func "$LINENO" "snprintf" "ac_cv_func_snprintf" -if test "x$ac_cv_func_snprintf" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_SNPRINTF 1 -_ACEOF - -else - pgac_need_repl_snprintf=yes -fi -done - - for ac_func in vsnprintf -do : - ac_fn_c_check_func "$LINENO" "vsnprintf" "ac_cv_func_vsnprintf" -if test "x$ac_cv_func_vsnprintf" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_VSNPRINTF 1 -_ACEOF - -else - pgac_need_repl_snprintf=yes -fi -done - -fi - - -# Check whether declares snprintf() and vsnprintf(); if not, -# include/c.h will provide declarations. Note this is a separate test -# from whether the functions exist in the C library --- there are -# systems that have the functions but don't bother to declare them :-( - -ac_fn_c_check_decl "$LINENO" "snprintf" "ac_cv_have_decl_snprintf" "$ac_includes_default" -if test "x$ac_cv_have_decl_snprintf" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_SNPRINTF $ac_have_decl -_ACEOF -ac_fn_c_check_decl "$LINENO" "vsnprintf" "ac_cv_have_decl_vsnprintf" "$ac_includes_default" -if test "x$ac_cv_have_decl_vsnprintf" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_VSNPRINTF $ac_have_decl -_ACEOF - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for isinf" >&5 $as_echo_n "checking for isinf... " >&6; } if ${ac_cv_func_isinf+:} false; then : @@ -13415,6 +15461,19 @@ esac fi +ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" +if test "x$ac_cv_func_dlopen" = xyes; then : + $as_echo "#define HAVE_DLOPEN 1" >>confdefs.h + +else + case " $LIBOBJS " in + *" dlopen.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS dlopen.$ac_objext" + ;; +esac + +fi + ac_fn_c_check_func "$LINENO" "fls" "ac_cv_func_fls" if test "x$ac_cv_func_fls" = xyes; then : $as_echo "#define HAVE_FLS 1" >>confdefs.h @@ -13480,6 +15539,32 @@ esac fi +ac_fn_c_check_func "$LINENO" "pread" "ac_cv_func_pread" +if test "x$ac_cv_func_pread" = xyes; then : + $as_echo "#define HAVE_PREAD 1" >>confdefs.h + +else + case " $LIBOBJS " in + *" pread.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS pread.$ac_objext" + ;; +esac + +fi + +ac_fn_c_check_func "$LINENO" "pwrite" "ac_cv_func_pwrite" +if test "x$ac_cv_func_pwrite" = xyes; then : + $as_echo "#define HAVE_PWRITE 1" >>confdefs.h + +else + case " $LIBOBJS " in + *" pwrite.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS pwrite.$ac_objext" + ;; +esac + +fi + ac_fn_c_check_func "$LINENO" "random" "ac_cv_func_random" if test "x$ac_cv_func_random" = xyes; then : $as_echo "#define HAVE_RANDOM 1" >>confdefs.h @@ -13519,19 +15604,6 @@ esac fi -ac_fn_c_check_func "$LINENO" "strerror" "ac_cv_func_strerror" -if test "x$ac_cv_func_strerror" = xyes; then : - $as_echo "#define HAVE_STRERROR 1" >>confdefs.h - -else - case " $LIBOBJS " in - *" strerror.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS strerror.$ac_objext" - ;; -esac - -fi - ac_fn_c_check_func "$LINENO" "strlcat" "ac_cv_func_strlcat" if test "x$ac_cv_func_strlcat" = xyes; then : $as_echo "#define HAVE_STRLCAT 1" >>confdefs.h @@ -13558,6 +15630,19 @@ esac fi +ac_fn_c_check_func "$LINENO" "strnlen" "ac_cv_func_strnlen" +if test "x$ac_cv_func_strnlen" = xyes; then : + $as_echo "#define HAVE_STRNLEN 1" >>confdefs.h + +else + case " $LIBOBJS " in + *" strnlen.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS strnlen.$ac_objext" + ;; +esac + +fi + case $host_os in @@ -13884,7 +15969,7 @@ $as_echo "#define HAVE_INT_OPTRESET 1" >>confdefs.h fi -for ac_func in strtoll strtoq +for ac_func in strtoll __strtoll strtoq do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" @@ -13896,7 +15981,7 @@ _ACEOF fi done -for ac_func in strtoull strtouq +for ac_func in strtoull __strtoull strtouq do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" @@ -13908,6 +15993,28 @@ _ACEOF fi done +# strto[u]ll may exist but not be declared +ac_fn_c_check_decl "$LINENO" "strtoll" "ac_cv_have_decl_strtoll" "$ac_includes_default" +if test "x$ac_cv_have_decl_strtoll" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_STRTOLL $ac_have_decl +_ACEOF +ac_fn_c_check_decl "$LINENO" "strtoull" "ac_cv_have_decl_strtoull" "$ac_includes_default" +if test "x$ac_cv_have_decl_strtoull" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_STRTOULL $ac_have_decl +_ACEOF + if test "$with_icu" = yes; then ac_save_CPPFLAGS=$CPPFLAGS @@ -14028,54 +16135,6 @@ fi # Run tests below here # -------------------- -# Force use of our snprintf if system's doesn't do arg control -# See comment above at snprintf test for details. -if test "$enable_nls" = yes -a "$pgac_need_repl_snprintf" = no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether snprintf supports argument control" >&5 -$as_echo_n "checking whether snprintf supports argument control... " >&6; } -if ${pgac_cv_snprintf_arg_control+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test "$cross_compiling" = yes; then : - pgac_cv_snprintf_arg_control=cross -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include - -int main() -{ - char buf[100]; - - /* can it swap arguments? */ - snprintf(buf, 100, "%2\$d %1\$d", 3, 4); - if (strcmp(buf, "4 3") != 0) - return 1; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - pgac_cv_snprintf_arg_control=yes -else - pgac_cv_snprintf_arg_control=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_snprintf_arg_control" >&5 -$as_echo "$pgac_cv_snprintf_arg_control" >&6; } - - if test $pgac_cv_snprintf_arg_control != yes ; then - pgac_need_repl_snprintf=yes - fi -fi - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether long int is 64 bits" >&5 $as_echo_n "checking whether long int is 64 bits... " >&6; } @@ -14254,176 +16313,55 @@ cat >>confdefs.h <<_ACEOF _ACEOF - -if test x"$HAVE_LONG_LONG_INT_64" = xyes ; then - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#define INT64CONST(x) x##LL -long long int foo = INT64CONST(0x1234567890123456); - -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -$as_echo "#define HAVE_LL_CONSTANTS 1" >>confdefs.h - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi - - -# If we found "long int" is 64 bits, assume snprintf handles it. If -# we found we need to use "long long int", better check. We cope with -# snprintfs that use %lld, %qd, or %I64d as the format. If none of these -# work, fall back to our own snprintf emulation (which we know uses %lld). - -if test "$HAVE_LONG_LONG_INT_64" = yes ; then - if test $pgac_need_repl_snprintf = no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking snprintf length modifier for long long int" >&5 -$as_echo_n "checking snprintf length modifier for long long int... " >&6; } -if ${pgac_cv_snprintf_long_long_int_modifier+:} false; then : - $as_echo_n "(cached) " >&6 -else - for pgac_modifier in 'll' 'q' 'I64'; do -if test "$cross_compiling" = yes; then : - pgac_cv_snprintf_long_long_int_modifier=cross; break -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -typedef long long int ac_int64; -#define INT64_FORMAT "%${pgac_modifier}d" - -ac_int64 a = 20000001; -ac_int64 b = 40000005; - -int does_int64_snprintf_work() -{ - ac_int64 c; - char buf[100]; - - if (sizeof(ac_int64) != 8) - return 0; /* doesn't look like the right size */ - - c = a * b; - snprintf(buf, 100, INT64_FORMAT, c); - if (strcmp(buf, "800000140000005") != 0) - return 0; /* either multiply or snprintf is busted */ - return 1; -} - -int -main() { - return (! does_int64_snprintf_work()); -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - pgac_cv_snprintf_long_long_int_modifier=$pgac_modifier; break -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -done -fi - -LONG_LONG_INT_MODIFIER='' - -case $pgac_cv_snprintf_long_long_int_modifier in - cross) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cannot test (not on host machine)" >&5 -$as_echo "cannot test (not on host machine)" >&6; };; - ?*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_snprintf_long_long_int_modifier" >&5 -$as_echo "$pgac_cv_snprintf_long_long_int_modifier" >&6; } - LONG_LONG_INT_MODIFIER=$pgac_cv_snprintf_long_long_int_modifier;; - *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 -$as_echo "none" >&6; };; -esac - if test "$LONG_LONG_INT_MODIFIER" = ""; then - # Force usage of our own snprintf, since system snprintf is broken - pgac_need_repl_snprintf=yes - LONG_LONG_INT_MODIFIER='ll' - fi - else - # Here if we previously decided we needed to use our own snprintf - LONG_LONG_INT_MODIFIER='ll' - fi +# Select the printf length modifier that goes with that, too. +if test x"$pg_int64_type" = x"long long int" ; then + INT64_MODIFIER='"ll"' else - # Here if we are not using 'long long int' at all - LONG_LONG_INT_MODIFIER='l' + INT64_MODIFIER='"l"' fi -INT64_MODIFIER="\"$LONG_LONG_INT_MODIFIER\"" - cat >>confdefs.h <<_ACEOF #define INT64_MODIFIER $INT64_MODIFIER _ACEOF -# Also force use of our snprintf if the system's doesn't support the %z flag. -if test "$pgac_need_repl_snprintf" = no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether snprintf supports the %z modifier" >&5 -$as_echo_n "checking whether snprintf supports the %z modifier... " >&6; } -if ${pgac_cv_snprintf_size_t_support+:} false; then : +# has to be down here, rather than with the other builtins, because +# the test uses PG_INT64_TYPE. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_mul_overflow" >&5 +$as_echo_n "checking for __builtin_mul_overflow... " >&6; } +if ${pgac_cv__builtin_op_overflow+:} false; then : $as_echo_n "(cached) " >&6 -else - if test "$cross_compiling" = yes; then : - pgac_cv_snprintf_size_t_support=cross else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -#include -int main() +PG_INT64_TYPE a = 1; +PG_INT64_TYPE b = 1; +PG_INT64_TYPE result; +int oflo; + +int +main () { - char bufz[100]; - char buf64[100]; - - /* - * Print the largest unsigned number fitting in a size_t using both %zu - * and the previously-determined format for 64-bit integers. Note that - * we don't run this code unless we know snprintf handles 64-bit ints. - */ - bufz[0] = '\0'; /* in case snprintf fails to emit anything */ - snprintf(bufz, sizeof(bufz), "%zu", ~((size_t) 0)); - snprintf(buf64, sizeof(buf64), "%" INT64_MODIFIER "u", - (unsigned PG_INT64_TYPE) ~((size_t) 0)); - if (strcmp(bufz, buf64) != 0) - return 1; +oflo = __builtin_mul_overflow(a, b, &result); + ; return 0; } _ACEOF -if ac_fn_c_try_run "$LINENO"; then : - pgac_cv_snprintf_size_t_support=yes +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv__builtin_op_overflow=yes else - pgac_cv_snprintf_size_t_support=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - - + pgac_cv__builtin_op_overflow=no fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_snprintf_size_t_support" >&5 -$as_echo "$pgac_cv_snprintf_size_t_support" >&6; } - - if test "$pgac_cv_snprintf_size_t_support" != yes; then - pgac_need_repl_snprintf=yes - fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_op_overflow" >&5 +$as_echo "$pgac_cv__builtin_op_overflow" >&6; } +if test x"$pgac_cv__builtin_op_overflow" = xyes ; then -# Now we have checked all the reasons to replace snprintf -if test $pgac_need_repl_snprintf = yes; then - -$as_echo "#define USE_REPL_SNPRINTF 1" >>confdefs.h - - case " $LIBOBJS " in - *" snprintf.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS snprintf.$ac_objext" - ;; -esac +$as_echo "#define HAVE__BUILTIN_OP_OVERFLOW 1" >>confdefs.h fi @@ -14824,7 +16762,10 @@ _ACEOF # Compute maximum alignment of any basic type. # We assume long's alignment is at least as strong as char, short, or int; -# but we must check long long (if it exists) and double. +# but we must check long long (if it is being used for int64) and double. +# Note that we intentionally do not consider any types wider than 64 bits, +# as allowing MAXIMUM_ALIGNOF to exceed 8 would be too much of a penalty +# for disk and memory space. MAX_ALIGNOF=$ac_cv_alignof_long if test $MAX_ALIGNOF -lt $ac_cv_alignof_double ; then @@ -14884,7 +16825,7 @@ _ACEOF fi -# Check for extensions offering the integer scalar type __int128. +# Some compilers offer a 128-bit integer scalar type. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __int128" >&5 $as_echo_n "checking for __int128... " >&6; } if ${pgac_cv__128bit_int+:} false; then : @@ -14894,12 +16835,15 @@ else /* end confdefs.h. */ /* + * We don't actually run this test, just link it to verify that any support + * functions needed for __int128 are present. + * * These are globals to discourage the compiler from folding all the * arithmetic tests down to compile-time constants. We do not have - * convenient support for 64bit literals at this point... + * convenient support for 128bit literals at this point... */ __int128 a = 48828125; -__int128 b = 97656255; +__int128 b = 97656250; int main () @@ -14908,13 +16852,12 @@ main () __int128 c,d; a = (a << 12) + 1; /* 200000000001 */ b = (b << 12) + 5; /* 400000000005 */ -/* use the most relevant arithmetic ops */ +/* try the most relevant arithmetic ops */ c = a * b; d = (c + b) / b; -/* return different values, to prevent optimizations */ +/* must use the results, else compiler may optimize arithmetic away */ if (d != a+1) - return 0; -return 1; + return 1; ; return 0; @@ -14931,9 +16874,100 @@ fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__128bit_int" >&5 $as_echo "$pgac_cv__128bit_int" >&6; } if test x"$pgac_cv__128bit_int" = xyes ; then + # Use of non-default alignment with __int128 tickles bugs in some compilers. + # If not cross-compiling, we can test for bugs and disable use of __int128 + # with buggy compilers. If cross-compiling, hope for the best. + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83925 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __int128 alignment bug" >&5 +$as_echo_n "checking for __int128 alignment bug... " >&6; } +if ${pgac_cv__128bit_int_bug+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + pgac_cv__128bit_int_bug="assuming ok" +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* This must match the corresponding code in c.h: */ +#if defined(__GNUC__) || defined(__SUNPRO_C) || defined(__IBMC__) +#define pg_attribute_aligned(a) __attribute__((aligned(a))) +#endif +typedef __int128 int128a +#if defined(pg_attribute_aligned) +pg_attribute_aligned(8) +#endif +; +int128a holder; +void pass_by_val(void *buffer, int128a par) { holder = par; } + +int +main () +{ + +long int i64 = 97656225L << 12; +int128a q; +pass_by_val(main, (int128a) i64); +q = (int128a) i64; +if (q != holder) + return 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + pgac_cv__128bit_int_bug=ok +else + pgac_cv__128bit_int_bug=broken +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__128bit_int_bug" >&5 +$as_echo "$pgac_cv__128bit_int_bug" >&6; } + if test x"$pgac_cv__128bit_int_bug" != xbroken ; then $as_echo "#define PG_INT128_TYPE __int128" >>confdefs.h + # The cast to long int works around a bug in the HP C Compiler, +# see AC_CHECK_SIZEOF for more information. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking alignment of PG_INT128_TYPE" >&5 +$as_echo_n "checking alignment of PG_INT128_TYPE... " >&6; } +if ${ac_cv_alignof_PG_INT128_TYPE+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) offsetof (ac__type_alignof_, y)" "ac_cv_alignof_PG_INT128_TYPE" "$ac_includes_default +#ifndef offsetof +# define offsetof(type, member) ((char *) &((type *) 0)->member - (char *) 0) +#endif +typedef struct { char x; PG_INT128_TYPE y; } ac__type_alignof_;"; then : + +else + if test "$ac_cv_type_PG_INT128_TYPE" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute alignment of PG_INT128_TYPE +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_alignof_PG_INT128_TYPE=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_alignof_PG_INT128_TYPE" >&5 +$as_echo "$ac_cv_alignof_PG_INT128_TYPE" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define ALIGNOF_PG_INT128_TYPE $ac_cv_alignof_PG_INT128_TYPE +_ACEOF + + + fi fi # Check for various atomic operations now that we have checked how to declare @@ -15312,28 +17346,134 @@ if ac_fn_c_try_compile "$LINENO"; then : fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +# Check for ARMv8 CRC Extension intrinsics to do CRC calculations. +# +# First check if __crc32c* intrinsics can be used with the default compiler +# flags. If not, check if adding -march=armv8-a+crc flag helps. +# CFLAGS_ARMV8_CRC32C is set if the extra flag is required. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __crc32cb, __crc32ch, __crc32cw, and __crc32cd with CFLAGS=" >&5 +$as_echo_n "checking for __crc32cb, __crc32ch, __crc32cw, and __crc32cd with CFLAGS=... " >&6; } +if ${pgac_cv_armv8_crc32c_intrinsics_+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +CFLAGS="$pgac_save_CFLAGS " +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +unsigned int crc = 0; + crc = __crc32cb(crc, 0); + crc = __crc32ch(crc, 0); + crc = __crc32cw(crc, 0); + crc = __crc32cd(crc, 0); + /* return computed value, to prevent the above being optimized away */ + return crc == 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv_armv8_crc32c_intrinsics_=yes +else + pgac_cv_armv8_crc32c_intrinsics_=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +CFLAGS="$pgac_save_CFLAGS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_armv8_crc32c_intrinsics_" >&5 +$as_echo "$pgac_cv_armv8_crc32c_intrinsics_" >&6; } +if test x"$pgac_cv_armv8_crc32c_intrinsics_" = x"yes"; then + CFLAGS_ARMV8_CRC32C="" + pgac_armv8_crc32c_intrinsics=yes +fi + +if test x"$pgac_armv8_crc32c_intrinsics" != x"yes"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __crc32cb, __crc32ch, __crc32cw, and __crc32cd with CFLAGS=-march=armv8-a+crc" >&5 +$as_echo_n "checking for __crc32cb, __crc32ch, __crc32cw, and __crc32cd with CFLAGS=-march=armv8-a+crc... " >&6; } +if ${pgac_cv_armv8_crc32c_intrinsics__march_armv8_apcrc+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +CFLAGS="$pgac_save_CFLAGS -march=armv8-a+crc" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +unsigned int crc = 0; + crc = __crc32cb(crc, 0); + crc = __crc32ch(crc, 0); + crc = __crc32cw(crc, 0); + crc = __crc32cd(crc, 0); + /* return computed value, to prevent the above being optimized away */ + return crc == 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv_armv8_crc32c_intrinsics__march_armv8_apcrc=yes +else + pgac_cv_armv8_crc32c_intrinsics__march_armv8_apcrc=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +CFLAGS="$pgac_save_CFLAGS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_armv8_crc32c_intrinsics__march_armv8_apcrc" >&5 +$as_echo "$pgac_cv_armv8_crc32c_intrinsics__march_armv8_apcrc" >&6; } +if test x"$pgac_cv_armv8_crc32c_intrinsics__march_armv8_apcrc" = x"yes"; then + CFLAGS_ARMV8_CRC32C="-march=armv8-a+crc" + pgac_armv8_crc32c_intrinsics=yes +fi + +fi + + # Select CRC-32C implementation. # -# If we are targeting a processor that has SSE 4.2 instructions, we can use the -# special CRC instructions for calculating CRC-32C. If we're not targeting such -# a processor, but we can nevertheless produce code that uses the SSE -# intrinsics, perhaps with some extra CFLAGS, compile both implementations and -# select which one to use at runtime, depending on whether SSE 4.2 is supported -# by the processor we're running on. +# If we are targeting a processor that has Intel SSE 4.2 instructions, we can +# use the special CRC instructions for calculating CRC-32C. If we're not +# targeting such a processor, but we can nevertheless produce code that uses +# the SSE intrinsics, perhaps with some extra CFLAGS, compile both +# implementations and select which one to use at runtime, depending on whether +# SSE 4.2 is supported by the processor we're running on. +# +# Similarly, if we are targeting an ARM processor that has the CRC +# instructions that are part of the ARMv8 CRC Extension, use them. And if +# we're not targeting such a processor, but can nevertheless produce code that +# uses the CRC instructions, compile both, and select at runtime. # # You can override this logic by setting the appropriate USE_*_CRC32 flag to 1 # in the template or configure command line. -if test x"$USE_SSE42_CRC32C" = x"" && test x"$USE_SSE42_CRC32C_WITH_RUNTIME_CHECK" = x"" && test x"$USE_SLICING_BY_8_CRC32C" = x""; then +if test x"$USE_SLICING_BY_8_CRC32C" = x"" && test x"$USE_SSE42_CRC32C" = x"" && test x"$USE_SSE42_CRC32C_WITH_RUNTIME_CHECK" = x"" && test x"$USE_ARMV8_CRC32C" = x"" && test x"$USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK" = x""; then + # Use Intel SSE 4.2 if available. if test x"$pgac_sse42_crc32_intrinsics" = x"yes" && test x"$SSE4_2_TARGETED" = x"1" ; then USE_SSE42_CRC32C=1 else - # the CPUID instruction is needed for the runtime check. + # Intel SSE 4.2, with runtime check? The CPUID instruction is needed for + # the runtime check. if test x"$pgac_sse42_crc32_intrinsics" = x"yes" && (test x"$pgac_cv__get_cpuid" = x"yes" || test x"$pgac_cv__cpuid" = x"yes"); then USE_SSE42_CRC32C_WITH_RUNTIME_CHECK=1 else - # fall back to slicing-by-8 algorithm which doesn't require any special - # CPU support. - USE_SLICING_BY_8_CRC32C=1 + # Use ARM CRC Extension if available. + if test x"$pgac_armv8_crc32c_intrinsics" = x"yes" && test x"$CFLAGS_ARMV8_CRC32C" = x""; then + USE_ARMV8_CRC32C=1 + else + # ARM CRC Extension, with runtime check? + if test x"$pgac_armv8_crc32c_intrinsics" = x"yes"; then + USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK=1 + else + # fall back to slicing-by-8 algorithm, which doesn't require any + # special CPU support. + USE_SLICING_BY_8_CRC32C=1 + fi + fi fi fi fi @@ -15353,16 +17493,34 @@ else $as_echo "#define USE_SSE42_CRC32C_WITH_RUNTIME_CHECK 1" >>confdefs.h - PG_CRC32C_OBJS="pg_crc32c_sse42.o pg_crc32c_sb8.o pg_crc32c_choose.o" + PG_CRC32C_OBJS="pg_crc32c_sse42.o pg_crc32c_sb8.o pg_crc32c_sse42_choose.o" { $as_echo "$as_me:${as_lineno-$LINENO}: result: SSE 4.2 with runtime check" >&5 $as_echo "SSE 4.2 with runtime check" >&6; } else + if test x"$USE_ARMV8_CRC32C" = x"1"; then + +$as_echo "#define USE_ARMV8_CRC32C 1" >>confdefs.h + + PG_CRC32C_OBJS="pg_crc32c_armv8.o" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ARMv8 CRC instructions" >&5 +$as_echo "ARMv8 CRC instructions" >&6; } + else + if test x"$USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK" = x"1"; then + +$as_echo "#define USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK 1" >>confdefs.h + + PG_CRC32C_OBJS="pg_crc32c_armv8.o pg_crc32c_sb8.o pg_crc32c_armv8_choose.o" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ARMv8 CRC instructions with runtime check" >&5 +$as_echo "ARMv8 CRC instructions with runtime check" >&6; } + else $as_echo "#define USE_SLICING_BY_8_CRC32C 1" >>confdefs.h - PG_CRC32C_OBJS="pg_crc32c_sb8.o" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: slicing-by-8" >&5 + PG_CRC32C_OBJS="pg_crc32c_sb8.o" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: slicing-by-8" >&5 $as_echo "slicing-by-8" >&6; } + fi + fi fi fi @@ -15941,7 +18099,14 @@ if test -z "$TCL_CONFIG_SH"; then set X $pgac_test_dirs; shift if test $# -eq 0; then test -z "$TCLSH" && as_fn_error $? "unable to locate tclConfig.sh because no Tcl shell was found" "$LINENO" 5 - set X `echo 'puts $auto_path' | $TCLSH`; shift + pgac_test_dirs=`echo 'puts $auto_path' | $TCLSH` + # On newer macOS, $auto_path frequently doesn't include the place + # where tclConfig.sh actually lives. Append that to the end, so as not + # to break cases where a non-default Tcl installation is being used. + if test -d "$PG_SYSROOT/System/Library/Frameworks/Tcl.framework" ; then + pgac_test_dirs="$pgac_test_dirs $PG_SYSROOT/System/Library/Frameworks/Tcl.framework" + fi + set X $pgac_test_dirs; shift fi for pgac_dir do @@ -15990,7 +18155,7 @@ fi # check for if test "$with_perl" = yes; then ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$CPPFLAGS -I$perl_archlibexp/CORE" + CPPFLAGS="$CPPFLAGS $perl_includespec" ac_fn_c_check_header_compile "$LINENO" "perl.h" "ac_cv_header_perl_h" "#include " if test "x$ac_cv_header_perl_h" = xyes; then : @@ -16054,19 +18219,19 @@ fi # # Check for DocBook and tools # -if test -z "$NSGMLS"; then - for ac_prog in onsgmls nsgmls +if test -z "$XMLLINT"; then + for ac_prog in xmllint do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_path_NSGMLS+:} false; then : +if ${ac_cv_path_XMLLINT+:} false; then : $as_echo_n "(cached) " >&6 else - case $NSGMLS in + case $XMLLINT in [\\/]* | ?:[\\/]*) - ac_cv_path_NSGMLS="$NSGMLS" # Let the user override the test with a path. + ac_cv_path_XMLLINT="$XMLLINT" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR @@ -16076,7 +18241,7 @@ do test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_path_NSGMLS="$as_dir/$ac_word$ac_exec_ext" + ac_cv_path_XMLLINT="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi @@ -16087,35 +18252,35 @@ IFS=$as_save_IFS ;; esac fi -NSGMLS=$ac_cv_path_NSGMLS -if test -n "$NSGMLS"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NSGMLS" >&5 -$as_echo "$NSGMLS" >&6; } +XMLLINT=$ac_cv_path_XMLLINT +if test -n "$XMLLINT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XMLLINT" >&5 +$as_echo "$XMLLINT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi - test -n "$NSGMLS" && break + test -n "$XMLLINT" && break done else - # Report the value of NSGMLS in configure's output in all cases. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for NSGMLS" >&5 -$as_echo_n "checking for NSGMLS... " >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NSGMLS" >&5 -$as_echo "$NSGMLS" >&6; } + # Report the value of XMLLINT in configure's output in all cases. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for XMLLINT" >&5 +$as_echo_n "checking for XMLLINT... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XMLLINT" >&5 +$as_echo "$XMLLINT" >&6; } fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for DocBook V4.2" >&5 -$as_echo_n "checking for DocBook V4.2... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for DocBook XML V4.2" >&5 +$as_echo_n "checking for DocBook XML V4.2... " >&6; } if ${pgac_cv_check_docbook+:} false; then : $as_echo_n "(cached) " >&6 else - cat >conftest.sgml < + cat >conftest.xml < test @@ -16130,13 +18295,13 @@ EOF pgac_cv_check_docbook=no -if test -n "$NSGMLS"; then - $NSGMLS -s conftest.sgml 1>&5 2>&1 +if test -n "$XMLLINT"; then + $XMLLINT --noout --valid conftest.xml 1>&5 2>&1 if test $? -eq 0; then pgac_cv_check_docbook=yes fi fi -rm -f conftest.sgml +rm -f conftest.xml fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_check_docbook" >&5 $as_echo "$pgac_cv_check_docbook" >&6; } @@ -16198,60 +18363,6 @@ $as_echo_n "checking for DBTOEPUB... " >&6; } $as_echo "$DBTOEPUB" >&6; } fi -if test -z "$XMLLINT"; then - for ac_prog in xmllint -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_path_XMLLINT+:} false; then : - $as_echo_n "(cached) " >&6 -else - case $XMLLINT in - [\\/]* | ?:[\\/]*) - ac_cv_path_XMLLINT="$XMLLINT" # Let the user override the test with a path. - ;; - *) - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_path_XMLLINT="$as_dir/$ac_word$ac_exec_ext" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - - ;; -esac -fi -XMLLINT=$ac_cv_path_XMLLINT -if test -n "$XMLLINT"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XMLLINT" >&5 -$as_echo "$XMLLINT" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$XMLLINT" && break -done - -else - # Report the value of XMLLINT in configure's output in all cases. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for XMLLINT" >&5 -$as_echo_n "checking for XMLLINT... " >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XMLLINT" >&5 -$as_echo "$XMLLINT" >&6; } -fi - if test -z "$XSLTPROC"; then for ac_prog in xsltproc do @@ -16306,60 +18417,6 @@ $as_echo_n "checking for XSLTPROC... " >&6; } $as_echo "$XSLTPROC" >&6; } fi -if test -z "$OSX"; then - for ac_prog in osx sgml2xml sx -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_path_OSX+:} false; then : - $as_echo_n "(cached) " >&6 -else - case $OSX in - [\\/]* | ?:[\\/]*) - ac_cv_path_OSX="$OSX" # Let the user override the test with a path. - ;; - *) - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_path_OSX="$as_dir/$ac_word$ac_exec_ext" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - - ;; -esac -fi -OSX=$ac_cv_path_OSX -if test -n "$OSX"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OSX" >&5 -$as_echo "$OSX" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$OSX" && break -done - -else - # Report the value of OSX in configure's output in all cases. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for OSX" >&5 -$as_echo_n "checking for OSX... " >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OSX" >&5 -$as_echo "$OSX" >&6; } -fi - if test -z "$FOP"; then for ac_prog in fop do @@ -16424,6 +18481,12 @@ if test "$enable_tap_tests" = yes; then # (prove might be part of a different Perl installation than perl, eg on # MSys, so the result of AX_PROG_PERL_MODULES could be irrelevant anyway.) if test -z "$PROVE"; then + # Test::More and Time::HiRes are supposed to be part of core Perl, + # but some distros omit them in a minimal installation. + + + + @@ -16473,7 +18536,7 @@ fi if test "x$PERL" != x; then ax_perl_modules_failed=0 - for ax_perl_module in 'IPC::Run' ; do + for ax_perl_module in 'IPC::Run' 'Test::More 0.87' 'Time::HiRes' ; do { $as_echo "$as_me:${as_lineno-$LINENO}: checking for perl module $ax_perl_module" >&5 $as_echo_n "checking for perl module $ax_perl_module... " >&6; } @@ -16495,7 +18558,7 @@ $as_echo "ok" >&6; }; else : - as_fn_error $? "Perl module IPC::Run is required to run TAP tests" "$LINENO" 5 + as_fn_error $? "Additional Perl modules are required to run TAP tests" "$LINENO" 5 fi else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: could not find perl" >&5 @@ -16767,7 +18830,7 @@ _ACEOF # awk -F is a regex on some platforms, and not on others, so make "." a tab PG_VERSION_NUM="`echo "$PACKAGE_VERSION" | sed 's/[A-Za-z].*$//' | tr '.' ' ' | -$AWK '{printf "%d%02d%02d", $1, $2, (NF >= 3) ? $3 : 0}'`" +$AWK '{printf "%d%04d", $1, $2}'`" cat >>confdefs.h <<_ACEOF #define PG_VERSION_NUM $PG_VERSION_NUM @@ -16775,6 +18838,15 @@ _ACEOF +# If we are inserting PG_SYSROOT into CPPFLAGS, do so symbolically not +# literally, so that it's possible to override it at build time using +# a command like "make ... PG_SYSROOT=path". This has to be done after +# we've finished all configure checks that depend on CPPFLAGS. +if test x"$PG_SYSROOT" != x; then + CPPFLAGS=`echo "$CPPFLAGS" | sed -e "s| $PG_SYSROOT | \\\$(PG_SYSROOT) |"` +fi + + # Begin output steps @@ -16786,6 +18858,19 @@ $as_echo "$as_me: using CFLAGS=$CFLAGS" >&6;} $as_echo "$as_me: using CPPFLAGS=$CPPFLAGS" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: using LDFLAGS=$LDFLAGS" >&5 $as_echo "$as_me: using LDFLAGS=$LDFLAGS" >&6;} +# Currently only used when LLVM is used +if test "$with_llvm" = yes ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: using CXX=$CXX" >&5 +$as_echo "$as_me: using CXX=$CXX" >&6;} + { $as_echo "$as_me:${as_lineno-$LINENO}: using CXXFLAGS=$CXXFLAGS" >&5 +$as_echo "$as_me: using CXXFLAGS=$CXXFLAGS" >&6;} + { $as_echo "$as_me:${as_lineno-$LINENO}: using CLANG=$CLANG" >&5 +$as_echo "$as_me: using CLANG=$CLANG" >&6;} + { $as_echo "$as_me:${as_lineno-$LINENO}: using BITCODE_CFLAGS=$BITCODE_CFLAGS" >&5 +$as_echo "$as_me: using BITCODE_CFLAGS=$BITCODE_CFLAGS" >&6;} + { $as_echo "$as_me:${as_lineno-$LINENO}: using BITCODE_CXXFLAGS=$BITCODE_CXXFLAGS" >&5 +$as_echo "$as_me: using BITCODE_CXXFLAGS=$BITCODE_CXXFLAGS" >&6;} +fi # prepare build tree if outside source tree # Note 1: test -ef might not exist, but it's more reliable than `pwd`. @@ -16810,7 +18895,7 @@ fi ac_config_files="$ac_config_files GNUmakefile src/Makefile.global" -ac_config_links="$ac_config_links src/backend/port/dynloader.c:src/backend/port/dynloader/${template}.c src/backend/port/pg_sema.c:${SEMA_IMPLEMENTATION} src/backend/port/pg_shmem.c:${SHMEM_IMPLEMENTATION} src/include/dynloader.h:src/backend/port/dynloader/${template}.h src/include/pg_config_os.h:src/include/port/${template}.h src/Makefile.port:src/makefiles/Makefile.${template}" +ac_config_links="$ac_config_links src/backend/port/pg_sema.c:${SEMA_IMPLEMENTATION} src/backend/port/pg_shmem.c:${SHMEM_IMPLEMENTATION} src/include/pg_config_os.h:src/include/port/${template}.h src/Makefile.port:src/makefiles/Makefile.${template}" if test "$PORTNAME" = "win32"; then @@ -17334,7 +19419,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by PostgreSQL $as_me 11devel, which was +This file was extended by PostgreSQL $as_me 12devel, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -17404,7 +19489,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -PostgreSQL config.status 11devel +PostgreSQL config.status 12devel configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" @@ -17531,10 +19616,8 @@ do "src/backend/port/tas.s") CONFIG_LINKS="$CONFIG_LINKS src/backend/port/tas.s:src/backend/port/tas/${tas_file}" ;; "GNUmakefile") CONFIG_FILES="$CONFIG_FILES GNUmakefile" ;; "src/Makefile.global") CONFIG_FILES="$CONFIG_FILES src/Makefile.global" ;; - "src/backend/port/dynloader.c") CONFIG_LINKS="$CONFIG_LINKS src/backend/port/dynloader.c:src/backend/port/dynloader/${template}.c" ;; "src/backend/port/pg_sema.c") CONFIG_LINKS="$CONFIG_LINKS src/backend/port/pg_sema.c:${SEMA_IMPLEMENTATION}" ;; "src/backend/port/pg_shmem.c") CONFIG_LINKS="$CONFIG_LINKS src/backend/port/pg_shmem.c:${SHMEM_IMPLEMENTATION}" ;; - "src/include/dynloader.h") CONFIG_LINKS="$CONFIG_LINKS src/include/dynloader.h:src/backend/port/dynloader/${template}.h" ;; "src/include/pg_config_os.h") CONFIG_LINKS="$CONFIG_LINKS src/include/pg_config_os.h:src/include/port/${template}.h" ;; "src/Makefile.port") CONFIG_LINKS="$CONFIG_LINKS src/Makefile.port:src/makefiles/Makefile.${template}" ;; "check_win32_symlinks") CONFIG_COMMANDS="$CONFIG_COMMANDS check_win32_symlinks" ;; diff --git a/configure.in b/configure.in index e94fba5235..30e19097e4 100644 --- a/configure.in +++ b/configure.in @@ -17,13 +17,13 @@ dnl Read the Autoconf manual for details. dnl m4_pattern_forbid(^PGAC_)dnl to catch undefined macros -AC_INIT([PostgreSQL], [11devel], [pgsql-bugs@postgresql.org]) +AC_INIT([PostgreSQL], [12devel], [pgsql-bugs@postgresql.org]) m4_if(m4_defn([m4_PACKAGE_VERSION]), [2.69], [], [m4_fatal([Autoconf version 2.69 is required. Untested combinations of 'autoconf' and PostgreSQL versions are not recommended. You can remove the check from 'configure.in' but it is then your responsibility whether the result works or not.])]) -AC_COPYRIGHT([Copyright (c) 1996-2017, PostgreSQL Global Development Group]) +AC_COPYRIGHT([Copyright (c) 1996-2018, PostgreSQL Global Development Group]) AC_CONFIG_SRCDIR([src/backend/access/common/heaptuple.c]) AC_CONFIG_AUX_DIR(config) AC_PREFIX_DEFAULT(/usr/local/pgsql) @@ -343,37 +343,6 @@ AC_DEFINE_UNQUOTED([XLOG_BLCKSZ], ${XLOG_BLCKSZ}, [ Changing XLOG_BLCKSZ requires an initdb. ]) -# -# WAL segment size -# -AC_MSG_CHECKING([for WAL segment size]) -PGAC_ARG_REQ(with, wal-segsize, [SEGSIZE], [set WAL segment size in MB [16]], - [wal_segsize=$withval], - [wal_segsize=16]) -case ${wal_segsize} in - 1) ;; - 2) ;; - 4) ;; - 8) ;; - 16) ;; - 32) ;; - 64) ;; - 128) ;; - 256) ;; - 512) ;; - 1024) ;; - *) AC_MSG_ERROR([Invalid WAL segment size. Allowed values are 1,2,4,8,16,32,64,128,256,512,1024.]) -esac -AC_MSG_RESULT([${wal_segsize}MB]) - -AC_DEFINE_UNQUOTED([XLOG_SEG_SIZE], [(${wal_segsize} * 1024 * 1024)], [ - XLOG_SEG_SIZE is the size of a single WAL file. This must be a power of 2 - and larger than XLOG_BLCKSZ (preferably, a great deal larger than - XLOG_BLCKSZ). - - Changing XLOG_SEG_SIZE requires an initdb. -]) - # # C compiler # @@ -384,11 +353,20 @@ AC_DEFINE_UNQUOTED([XLOG_SEG_SIZE], [(${wal_segsize} * 1024 * 1024)], [ PGAC_ARG_REQ(with, CC, [CMD], [set compiler (deprecated)], [CC=$with_CC]) case $template in - aix) pgac_cc_list="gcc xlc";; - *) pgac_cc_list="gcc cc";; + aix) pgac_cc_list="gcc xlc"; pgac_cxx_list="g++ xlC";; + *) pgac_cc_list="gcc cc"; pgac_cxx_list="g++ c++";; esac AC_PROG_CC([$pgac_cc_list]) +AC_PROG_CC_C99() + +# Error out if the compiler does not support C99, as the codebase +# relies on that. +if test "$ac_cv_prog_cc_c99" = no; then + AC_MSG_ERROR([C compiler "$CC" does not support C99]) +fi + +AC_PROG_CXX([$pgac_cxx_list]) # Check if it's Intel's compiler, which (usually) pretends to be gcc, # but has idiosyncrasies of its own. We assume icc will define @@ -405,14 +383,28 @@ choke me AC_SUBST(SUN_STUDIO_CC) + +# +# LLVM +# +# Checked early because subsequent tests depend on it. +PGAC_ARG_BOOL(with, llvm, no, [build with LLVM based JIT support], + [AC_DEFINE([USE_LLVM], 1, [Define to 1 to build with LLVM based JIT support. (--with-llvm)])]) +AC_SUBST(with_llvm) +if test "$with_llvm" = yes ; then + PGAC_LLVM_SUPPORT() +fi + + unset CFLAGS +unset CXXFLAGS # # Read the template # . "$srcdir/src/template/$template" || exit -# CFLAGS are selected so: +# C[XX]FLAGS are selected so: # If the user specifies something in the environment, that is used. # else: If the template file set something, that is used. # else: If coverage was enabled, don't set anything. @@ -434,9 +426,47 @@ else fi fi -# CFLAGS we determined above will be added back at the end +if test "$ac_env_CXXFLAGS_set" = set; then + CXXFLAGS=$ac_env_CXXFLAGS_value +elif test "${CXXFLAGS+set}" = set; then + : # (keep what template set) +elif test "$enable_coverage" = yes; then + : # no optimization by default +elif test "$GCC" = yes; then + CXXFLAGS="-O2" +else + # if the user selected debug mode, don't use -O + if test "$enable_debug" != yes; then + CXXFLAGS="-O" + fi +fi + +# When generating bitcode (for inlining) we always want to use -O2 +# even when --enable-debug is specified. The bitcode it's not going to +# be used for line-by-line debugging, and JIT inlining doesn't work +# without at least -O1 (otherwise clang will emit 'noinline' +# attributes everywhere), which is bad for testing. Still allow the +# environment to override if done explicitly. +if test "$ac_env_BITCODE_CFLAGS_set" = set; then + BITCODE_CFLAGS=$ac_env_BITCODE_CFLAGS_value +else + BITCODE_CFLAGS="-O2 $BITCODE_CFLAGS" +fi +if test "$ac_env_BITCODE_CXXFLAGS_set" = set; then + BITCODE_CXXFLAGS=$ac_env_BITCODE_CXXFLAGS_value +else + BITCODE_CXXFLAGS="-O2 $BITCODE_CXXFLAGS" +fi + +# C[XX]FLAGS we determined above will be added back at the end user_CFLAGS=$CFLAGS CFLAGS="" +user_CXXFLAGS=$CXXFLAGS +CXXFLAGS="" +user_BITCODE_CFLAGS=$BITCODE_CFLAGS +BITCODE_CFLAGS="" +user_BITCODE_CXXFLAGS=$BITCODE_CXXFLAGS +BITCODE_CXXFLAGS="" # set CFLAGS_VECTOR from the environment, if available if test "$ac_env_CFLAGS_VECTOR_set" = set; then @@ -450,55 +480,102 @@ fi if test "$GCC" = yes -a "$ICC" = no; then CFLAGS="-Wall -Wmissing-prototypes -Wpointer-arith" + CXXFLAGS="-Wall -Wpointer-arith" # These work in some but not all gcc versions PGAC_PROG_CC_CFLAGS_OPT([-Wdeclaration-after-statement]) + # -Wdeclaration-after-statement isn't applicable for C++ + # Really don't want VLAs to be used in our dialect of C + PGAC_PROG_CC_CFLAGS_OPT([-Werror=vla]) + # -Wvla is not applicable for C++ PGAC_PROG_CC_CFLAGS_OPT([-Wendif-labels]) + PGAC_PROG_CXX_CFLAGS_OPT([-Wendif-labels]) PGAC_PROG_CC_CFLAGS_OPT([-Wmissing-format-attribute]) + PGAC_PROG_CXX_CFLAGS_OPT([-Wmissing-format-attribute]) # This was included in -Wall/-Wformat in older GCC versions PGAC_PROG_CC_CFLAGS_OPT([-Wformat-security]) + PGAC_PROG_CXX_CFLAGS_OPT([-Wformat-security]) # Disable strict-aliasing rules; needed for gcc 3.3+ PGAC_PROG_CC_CFLAGS_OPT([-fno-strict-aliasing]) + PGAC_PROG_CXX_CFLAGS_OPT([-fno-strict-aliasing]) # Disable optimizations that assume no overflow; needed for gcc 4.3+ PGAC_PROG_CC_CFLAGS_OPT([-fwrapv]) + PGAC_PROG_CXX_CFLAGS_OPT([-fwrapv]) # Disable FP optimizations that cause various errors on gcc 4.5+ or maybe 4.6+ PGAC_PROG_CC_CFLAGS_OPT([-fexcess-precision=standard]) + PGAC_PROG_CXX_CFLAGS_OPT([-fexcess-precision=standard]) # Optimization flags for specific files that benefit from vectorization PGAC_PROG_CC_VAR_OPT(CFLAGS_VECTOR, [-funroll-loops]) PGAC_PROG_CC_VAR_OPT(CFLAGS_VECTOR, [-ftree-vectorize]) # We want to suppress clang's unhelpful unused-command-line-argument warnings # but gcc won't complain about unrecognized -Wno-foo switches, so we have to # test for the positive form and if that works, add the negative form + NOT_THE_CFLAGS="" PGAC_PROG_CC_VAR_OPT(NOT_THE_CFLAGS, [-Wunused-command-line-argument]) if test -n "$NOT_THE_CFLAGS"; then CFLAGS="$CFLAGS -Wno-unused-command-line-argument" fi + # Similarly disable useless truncation warnings from gcc 8+ + NOT_THE_CFLAGS="" + PGAC_PROG_CC_VAR_OPT(NOT_THE_CFLAGS, [-Wformat-truncation]) + if test -n "$NOT_THE_CFLAGS"; then + CFLAGS="$CFLAGS -Wno-format-truncation" + fi + NOT_THE_CFLAGS="" + PGAC_PROG_CC_VAR_OPT(NOT_THE_CFLAGS, [-Wstringop-truncation]) + if test -n "$NOT_THE_CFLAGS"; then + CFLAGS="$CFLAGS -Wno-stringop-truncation" + fi elif test "$ICC" = yes; then # Intel's compiler has a bug/misoptimization in checking for # division by NAN (NaN == 0), -mp1 fixes it, so add it to the CFLAGS. PGAC_PROG_CC_CFLAGS_OPT([-mp1]) + PGAC_PROG_CXX_CFLAGS_OPT([-mp1]) # Make sure strict aliasing is off (though this is said to be the default) PGAC_PROG_CC_CFLAGS_OPT([-fno-strict-aliasing]) + PGAC_PROG_CXX_CFLAGS_OPT([-fno-strict-aliasing]) elif test "$PORTNAME" = "aix"; then # AIX's xlc has to have strict aliasing turned off too PGAC_PROG_CC_CFLAGS_OPT([-qnoansialias]) + PGAC_PROG_CXX_CFLAGS_OPT([-qnoansialias]) PGAC_PROG_CC_CFLAGS_OPT([-qlonglong]) + PGAC_PROG_CXX_CFLAGS_OPT([-qlonglong]) elif test "$PORTNAME" = "hpux"; then # On some versions of HP-UX, libm functions do not set errno by default. # Fix that by using +Olibmerrno if the compiler recognizes it. PGAC_PROG_CC_CFLAGS_OPT([+Olibmerrno]) + PGAC_PROG_CXX_CFLAGS_OPT([+Olibmerrno]) fi AC_SUBST(CFLAGS_VECTOR, $CFLAGS_VECTOR) +# Determine flags used to emit bitcode for JIT inlining. Need to test +# for behaviour changing compiler flags, to keep compatibility with +# compiler used for normal postgres code. +if test "$with_llvm" = yes ; then + CLANGXX="$CLANG -xc++" + + PGAC_PROG_VARCC_VARFLAGS_OPT(CLANG, BITCODE_CFLAGS, [-fno-strict-aliasing]) + PGAC_PROG_VARCXX_VARFLAGS_OPT(CLANGXX, BITCODE_CXXFLAGS, [-fno-strict-aliasing]) + PGAC_PROG_VARCC_VARFLAGS_OPT(CLANG, BITCODE_CFLAGS, [-fwrapv]) + PGAC_PROG_VARCXX_VARFLAGS_OPT(CLANGXX, BITCODE_CXXFLAGS, [-fwrapv]) + PGAC_PROG_VARCC_VARFLAGS_OPT(CLANG, BITCODE_CFLAGS, [-fexcess-precision=standard]) + PGAC_PROG_VARCXX_VARFLAGS_OPT(CLANGXX, BITCODE_CXXFLAGS, [-fexcess-precision=standard]) +fi + # supply -g if --enable-debug if test "$enable_debug" = yes && test "$ac_cv_prog_cc_g" = yes; then CFLAGS="$CFLAGS -g" fi +if test "$enable_debug" = yes && test "$ac_cv_prog_cxx_g" = yes; then + CXXFLAGS="$CXXFLAGS -g" +fi + # enable code coverage if --enable-coverage if test "$enable_coverage" = yes; then if test "$GCC" = yes; then CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage" + CXXFLAGS="$CXXFLAGS -fprofile-arcs -ftest-coverage" else AC_MSG_ERROR([--enable-coverage is supported only when using GCC]) fi @@ -510,6 +587,7 @@ if test "$enable_profiling" = yes && test "$ac_cv_prog_cc_g" = yes; then AC_DEFINE([PROFILE_PID_DIR], 1, [Define to 1 to allow profiling output to be saved separately for each process.]) CFLAGS="$CFLAGS -pg $PLATFORM_PROFILE_FLAGS" + CXXFLAGS="$CXXFLAGS -pg $PLATFORM_PROFILE_FLAGS" else AC_MSG_ERROR([--enable-profiling is supported only when using GCC]) fi @@ -520,12 +598,19 @@ if test "$PORTNAME" = "win32"; then CPPFLAGS="$CPPFLAGS -I$srcdir/src/include/port/win32 -DEXEC_BACKEND" fi -# Now that we're done automatically adding stuff to CFLAGS, put back the +# Now that we're done automatically adding stuff to C[XX]FLAGS, put back the # user-specified flags (if any) at the end. This lets users override # the automatic additions. CFLAGS="$CFLAGS $user_CFLAGS" +CXXFLAGS="$CXXFLAGS $user_CXXFLAGS" +BITCODE_CFLAGS="$BITCODE_CFLAGS $user_BITCODE_CFLAGS" +BITCODE_CXXFLAGS="$BITCODE_CXXFLAGS $user_BITCODE_CXXFLAGS" + +AC_SUBST(BITCODE_CFLAGS, $BITCODE_CFLAGS) +AC_SUBST(BITCODE_CXXFLAGS, $BITCODE_CXXFLAGS) # Check if the compiler still works with the final flag settings +# (note, we're not checking that for CXX, which is optional) AC_MSG_CHECKING([whether the C compiler still works]) AC_LINK_IFELSE([AC_LANG_PROGRAM([], [return 0;])], [AC_MSG_RESULT(yes)], @@ -539,6 +624,24 @@ choke me @%:@endif])], [], [AC_MSG_ERROR([do not put -ffast-math in CFLAGS])]) fi +# Defend against clang being used on x86-32 without SSE2 enabled. As current +# versions of clang do not understand -fexcess-precision=standard, the use of +# x87 floating point operations leads to problems like isinf possibly returning +# false for a value that is infinite when converted from the 80bit register to +# the 8byte memory representation. +# +# Only perform the test if the compiler doesn't understand +# -fexcess-precision=standard, that way a potentially fixed compiler will work +# automatically. +if test "$pgac_cv_prog_CC_cflags__fexcess_precision_standard" = no; then +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ +@%:@if defined(__clang__) && defined(__i386__) && !defined(__SSE2_MATH__) +choke me +@%:@endif +])], [], +[AC_MSG_ERROR([Compiling PostgreSQL with clang, on 32bit x86, requires SSE2 support. Use -msse2 or use gcc.])]) +fi + AC_PROG_CPP AC_SUBST(GCC) @@ -669,6 +772,7 @@ PGAC_ARG_BOOL(with, gssapi, no, [build with GSSAPI support], krb_srvtab="FILE:\$(sysconfdir)/krb5.keytab" ]) AC_MSG_RESULT([$with_gssapi]) +AC_SUBST(with_gssapi) AC_SUBST(krb_srvtab) @@ -681,6 +785,7 @@ PGAC_ARG_REQ(with, krb-srvnam, [NAME], [default service principal name in Kerberos (GSSAPI) [postgres]], [], [with_krb_srvnam="postgres"]) +AC_SUBST(with_krb_srvnam) AC_DEFINE_UNQUOTED([PG_KRB_SRVNAM], ["$with_krb_srvnam"], [Define to the name of the default PostgreSQL service principal in Kerberos (GSSAPI). (--with-krb-srvnam=NAME)]) @@ -713,6 +818,7 @@ PGAC_ARG_BOOL(with, ldap, no, [build with LDAP support], [AC_DEFINE([USE_LDAP], 1, [Define to 1 to build with LDAP support. (--with-ldap)])]) AC_MSG_RESULT([$with_ldap]) +AC_SUBST(with_ldap) # @@ -938,6 +1044,16 @@ You might have to rebuild your Perl installation. Refer to the documentation for details. Use --without-perl to disable building PL/Perl.]) fi + # On most platforms, archlibexp is also where the Perl include files live ... + perl_includespec="-I$perl_archlibexp/CORE" + # ... but on newer macOS versions, we must use -iwithsysroot to look + # under $PG_SYSROOT + if test \! -f "$perl_archlibexp/CORE/perl.h" ; then + if test -f "$PG_SYSROOT$perl_archlibexp/CORE/perl.h" ; then + perl_includespec="-iwithsysroot $perl_archlibexp/CORE" + fi + fi + AC_SUBST(perl_includespec)dnl PGAC_CHECK_PERL_EMBED_CCFLAGS PGAC_CHECK_PERL_EMBED_LDFLAGS fi @@ -1024,12 +1140,6 @@ AC_SEARCH_LIBS(setproctitle, util) AC_SEARCH_LIBS(dlopen, dl) AC_SEARCH_LIBS(socket, [socket ws2_32]) AC_SEARCH_LIBS(shl_load, dld) -# We only use libld in port/dynloader/aix.c -case $host_os in - aix*) - AC_SEARCH_LIBS(ldopen, ld) - ;; -esac AC_SEARCH_LIBS(getopt_long, [getopt gnugetopt]) AC_SEARCH_LIBS(crypt, crypt) AC_SEARCH_LIBS(shm_open, rt) @@ -1092,10 +1202,10 @@ if test "$with_openssl" = yes ; then AC_CHECK_LIB(crypto, CRYPTO_new_ex_data, [], [AC_MSG_ERROR([library 'crypto' is required for OpenSSL])]) AC_CHECK_LIB(ssl, SSL_new, [], [AC_MSG_ERROR([library 'ssl' is required for OpenSSL])]) else - AC_SEARCH_LIBS(CRYPTO_new_ex_data, eay32 crypto, [], [AC_MSG_ERROR([library 'eay32' or 'crypto' is required for OpenSSL])]) - AC_SEARCH_LIBS(SSL_new, ssleay32 ssl, [], [AC_MSG_ERROR([library 'ssleay32' or 'ssl' is required for OpenSSL])]) + AC_SEARCH_LIBS(CRYPTO_new_ex_data, [eay32 crypto], [], [AC_MSG_ERROR([library 'eay32' or 'crypto' is required for OpenSSL])]) + AC_SEARCH_LIBS(SSL_new, [ssleay32 ssl], [], [AC_MSG_ERROR([library 'ssleay32' or 'ssl' is required for OpenSSL])]) fi - AC_CHECK_FUNCS([SSL_get_current_compression]) + AC_CHECK_FUNCS([SSL_clear_options SSL_get_current_compression X509_get_signature_nid]) # Functions introduced in OpenSSL 1.1.0. We used to check for # OPENSSL_VERSION_NUMBER, but that didn't work with 1.1.0, because LibreSSL # defines OPENSSL_VERSION_NUMBER to claim version 2.0.0, even though it @@ -1137,6 +1247,7 @@ if test "$with_ldap" = yes ; then else LDAP_LIBS_FE="-lldap $EXTRA_LDAP_LIBS" fi + AC_CHECK_FUNCS([ldap_initialize]) else AC_CHECK_LIB(wldap32, ldap_bind, [], [AC_MSG_ERROR([library 'wldap32' is required for LDAP])]) LDAP_LIBS_FE="-lwldap32" @@ -1180,7 +1291,37 @@ AC_SUBST(UUID_LIBS) ## Header files ## -AC_CHECK_HEADERS([atomic.h crypt.h dld.h fp_class.h getopt.h ieeefp.h ifaddrs.h langinfo.h mbarrier.h poll.h sys/epoll.h sys/ipc.h sys/pstat.h sys/resource.h sys/select.h sys/sem.h sys/shm.h sys/sockio.h sys/tas.h sys/un.h termios.h ucred.h utime.h wchar.h wctype.h]) +AC_HEADER_STDBOOL + +AC_CHECK_HEADERS(m4_normalize([ + atomic.h + copyfile.h + crypt.h + fp_class.h + getopt.h + ieeefp.h + ifaddrs.h + langinfo.h + mbarrier.h + poll.h + sys/epoll.h + sys/ipc.h + sys/prctl.h + sys/procctl.h + sys/pstat.h + sys/resource.h + sys/select.h + sys/sem.h + sys/shm.h + sys/sockio.h + sys/tas.h + sys/un.h + termios.h + ucred.h + utime.h + wchar.h + wctype.h +])) # On BSD, test for net/if.h will fail unless sys/socket.h # is included first. @@ -1293,6 +1434,12 @@ fi if test "$with_bonjour" = yes ; then AC_CHECK_HEADER(dns_sd.h, [], [AC_MSG_ERROR([header file is required for Bonjour])]) +dnl At some point we might add something like +dnl AC_SEARCH_LIBS(DNSServiceRegister, dns_sd) +dnl but right now, what that would mainly accomplish is to encourage +dnl people to try to use the avahi implementation, which does not work. +dnl If you want to use Apple's own Bonjour code on another platform, +dnl just add -ldns_sd to LIBS manually. fi # for contrib/uuid-ossp @@ -1337,12 +1484,12 @@ PGAC_C_FUNCNAME_SUPPORT PGAC_C_STATIC_ASSERT PGAC_C_TYPEOF PGAC_C_TYPES_COMPATIBLE +PGAC_C_BUILTIN_BSWAP16 PGAC_C_BUILTIN_BSWAP32 PGAC_C_BUILTIN_BSWAP64 PGAC_C_BUILTIN_CONSTANT_P PGAC_C_BUILTIN_UNREACHABLE PGAC_C_COMPUTED_GOTO -PGAC_C_VA_ARGS PGAC_STRUCT_TIMEZONE PGAC_UNION_SEMUN PGAC_STRUCT_SOCKADDR_UN @@ -1355,6 +1502,24 @@ AC_TYPE_LONG_LONG_INT PGAC_TYPE_LOCALE_T +# MSVC doesn't cope well with defining restrict to __restrict, the +# spelling it understands, because it conflicts with +# __declspec(restrict). Therefore we define pg_restrict to the +# appropriate definition, which presumably won't conflict. +# +# Allow platforms with buggy compilers to force restrict to not be +# used by setting $FORCE_DISABLE_RESTRICT=yes in the relevant +# template. +AC_C_RESTRICT +if test "$ac_cv_c_restrict" = "no" -o "x$FORCE_DISABLE_RESTRICT" = "xyes"; then + pg_restrict="" +else + pg_restrict="$ac_cv_c_restrict" +fi +AC_DEFINE_UNQUOTED([pg_restrict], [$pg_restrict], +[Define to keyword to use for C99 restrict support, or to nothing if not +supported]) + AC_CHECK_TYPES([struct cmsgcred], [], [], [#include #include @@ -1413,6 +1578,11 @@ if test "$ac_cv_sizeof_off_t" -lt 8 -a "$segsize" != "1"; then AC_MSG_ERROR([Large file support is not enabled. Segment size cannot be larger than 1GB.]) fi +AC_CHECK_SIZEOF([bool], [], +[#ifdef HAVE_STDBOOL_H +#include +#endif]) + ## ## Functions, global variables @@ -1430,7 +1600,33 @@ PGAC_FUNC_WCSTOMBS_L LIBS_including_readline="$LIBS" LIBS=`echo "$LIBS" | sed -e 's/-ledit//g' -e 's/-lreadline//g'` -AC_CHECK_FUNCS([cbrt clock_gettime dlopen fdatasync getifaddrs getpeerucred getrlimit mbstowcs_l memmove poll pstat pthread_is_threaded_np readlink setproctitle setsid shm_open symlink sync_file_range towlower utime utimes wcstombs wcstombs_l]) +AC_CHECK_FUNCS(m4_normalize([ + cbrt + clock_gettime + copyfile + fdatasync + getifaddrs + getpeerucred + getrlimit + mbstowcs_l + memmove + poll + posix_fallocate + ppoll + pstat + pthread_is_threaded_np + readlink + setproctitle + setproctitle_fast + setsid + shm_open + strchrnul + symlink + sync_file_range + utime + utimes + wcstombs_l +])) AC_REPLACE_FUNCS(fseeko) case $host_os in @@ -1452,18 +1648,17 @@ AC_CHECK_DECLS(posix_fadvise, [], [], [#include ]) fi AC_CHECK_DECLS(fdatasync, [], [], [#include ]) -AC_CHECK_DECLS([strlcat, strlcpy]) +AC_CHECK_DECLS([strlcat, strlcpy, strnlen]) # This is probably only present on macOS, but may as well check always AC_CHECK_DECLS(F_FULLFSYNC, [], [], [#include ]) -HAVE_IPV6=no +AC_CHECK_DECLS([RTLD_GLOBAL, RTLD_NOW], [], [], [#include ]) + AC_CHECK_TYPE([struct sockaddr_in6], - [AC_DEFINE(HAVE_IPV6, 1, [Define to 1 if you have support for IPv6.]) - HAVE_IPV6=yes], + [AC_DEFINE(HAVE_IPV6, 1, [Define to 1 if you have support for IPv6.])], [], [$ac_includes_default #include ]) -AC_SUBST(HAVE_IPV6) AC_CACHE_CHECK([for PS_STRINGS], [pgac_cv_var_PS_STRINGS], [AC_LINK_IFELSE([AC_LANG_PROGRAM( @@ -1479,53 +1674,6 @@ if test "$pgac_cv_var_PS_STRINGS" = yes ; then fi -# We use our snprintf.c emulation if either snprintf() or vsnprintf() -# is missing. Yes, there are machines that have only one. We may -# also decide to use snprintf.c if snprintf() is present but does not -# have all the features we need --- see below. - -if test "$PORTNAME" = "win32"; then - # Win32 gets snprintf.c built unconditionally. - # - # To properly translate all NLS languages strings, we must support the - # *printf() %$ format, which allows *printf() arguments to be selected - # by position in the translated string. - # - # libintl versions < 0.13 use the native *printf() functions, and Win32 - # *printf() doesn't understand %$, so we must use our /port versions, - # which do understand %$. libintl versions >= 0.13 include their own - # *printf versions on Win32. The libintl 0.13 release note text is: - # - # C format strings with positions, as they arise when a translator - # needs to reorder a sentence, are now supported on all platforms. - # On those few platforms (NetBSD and Woe32) for which the native - # printf()/fprintf()/... functions don't support such format - # strings, replacements are provided through . - # - # We could use libintl >= 0.13's *printf() if we were sure that we had - # a litint >= 0.13 at runtime, but seeing that there is no clean way - # to guarantee that, it is best to just use our own, so we are sure to - # get %$ support. In include/port.h we disable the *printf() macros - # that might have been defined by libintl. - # - # We do this unconditionally whether NLS is used or not so we are sure - # that all Win32 libraries and binaries behave the same. - pgac_need_repl_snprintf=yes -else - pgac_need_repl_snprintf=no - AC_CHECK_FUNCS(snprintf, [], pgac_need_repl_snprintf=yes) - AC_CHECK_FUNCS(vsnprintf, [], pgac_need_repl_snprintf=yes) -fi - - -# Check whether declares snprintf() and vsnprintf(); if not, -# include/c.h will provide declarations. Note this is a separate test -# from whether the functions exist in the C library --- there are -# systems that have the functions but don't bother to declare them :-( - -AC_CHECK_DECLS([snprintf, vsnprintf]) - - dnl Cannot use AC_CHECK_FUNC because isinf may be a macro AC_CACHE_CHECK([for isinf], ac_cv_func_isinf, [AC_LINK_IFELSE([AC_LANG_PROGRAM([ @@ -1544,7 +1692,23 @@ else AC_CHECK_FUNCS([fpclass fp_class fp_class_d class], [break]) fi -AC_REPLACE_FUNCS([crypt fls getopt getrusage inet_aton mkdtemp random rint srandom strerror strlcat strlcpy]) +AC_REPLACE_FUNCS(m4_normalize([ + crypt + dlopen + fls + getopt + getrusage + inet_aton + mkdtemp + pread + pwrite + random + rint + srandom + strlcat + strlcpy + strnlen +])) case $host_os in @@ -1651,8 +1815,10 @@ if test x"$pgac_cv_var_int_optreset" = x"yes"; then AC_DEFINE(HAVE_INT_OPTRESET, 1, [Define to 1 if you have the global variable 'int optreset'.]) fi -AC_CHECK_FUNCS([strtoll strtoq], [break]) -AC_CHECK_FUNCS([strtoull strtouq], [break]) +AC_CHECK_FUNCS([strtoll __strtoll strtoq], [break]) +AC_CHECK_FUNCS([strtoull __strtoull strtouq], [break]) +# strto[u]ll may exist but not be declared +AC_CHECK_DECLS([strtoll, strtoull]) if test "$with_icu" = yes; then ac_save_CPPFLAGS=$CPPFLAGS @@ -1693,31 +1859,10 @@ for the exact reason.]])], # Run tests below here # -------------------- -# Force use of our snprintf if system's doesn't do arg control -# See comment above at snprintf test for details. -if test "$enable_nls" = yes -a "$pgac_need_repl_snprintf" = no; then - PGAC_FUNC_SNPRINTF_ARG_CONTROL - if test $pgac_cv_snprintf_arg_control != yes ; then - pgac_need_repl_snprintf=yes - fi -fi - - dnl Check to see if we have a working 64-bit integer type. -dnl This breaks down into two steps: -dnl (1) figure out if the compiler has a 64-bit int type with working -dnl arithmetic, and if so -dnl (2) see whether snprintf() can format the type correctly. (Currently, -dnl snprintf is the only library routine we really need for int8 support.) -dnl It's entirely possible to have a compiler that handles a 64-bit type -dnl when the C library doesn't; this is fairly likely when using gcc on -dnl an older platform, for example. -dnl If there is no native snprintf() or it does not handle the 64-bit type, -dnl we force our own version of snprintf() to be used instead. -dnl Note this test must be run after our initial check for snprintf/vsnprintf. - -dnl As of Postgres 8.4, we no longer support compilers without a working -dnl 64-bit type. But we still handle the case of snprintf being broken. +dnl Since Postgres 8.4, we no longer support compilers without a working +dnl 64-bit type; but we have to determine whether that type is called +dnl "long int" or "long long int". PGAC_TYPE_64BIT_INT([long int]) @@ -1735,58 +1880,19 @@ fi AC_DEFINE_UNQUOTED(PG_INT64_TYPE, $pg_int64_type, [Define to the name of a signed 64-bit integer type.]) -dnl If we need to use "long long int", figure out whether nnnLL notation works. - -if test x"$HAVE_LONG_LONG_INT_64" = xyes ; then - AC_COMPILE_IFELSE([AC_LANG_SOURCE([ -#define INT64CONST(x) x##LL -long long int foo = INT64CONST(0x1234567890123456); -])], - [AC_DEFINE(HAVE_LL_CONSTANTS, 1, [Define to 1 if constants of type 'long long int' should have the suffix LL.])], - []) -fi - - -# If we found "long int" is 64 bits, assume snprintf handles it. If -# we found we need to use "long long int", better check. We cope with -# snprintfs that use %lld, %qd, or %I64d as the format. If none of these -# work, fall back to our own snprintf emulation (which we know uses %lld). - -if test "$HAVE_LONG_LONG_INT_64" = yes ; then - if test $pgac_need_repl_snprintf = no; then - PGAC_FUNC_SNPRINTF_LONG_LONG_INT_MODIFIER - if test "$LONG_LONG_INT_MODIFIER" = ""; then - # Force usage of our own snprintf, since system snprintf is broken - pgac_need_repl_snprintf=yes - LONG_LONG_INT_MODIFIER='ll' - fi - else - # Here if we previously decided we needed to use our own snprintf - LONG_LONG_INT_MODIFIER='ll' - fi +# Select the printf length modifier that goes with that, too. +if test x"$pg_int64_type" = x"long long int" ; then + INT64_MODIFIER='"ll"' else - # Here if we are not using 'long long int' at all - LONG_LONG_INT_MODIFIER='l' + INT64_MODIFIER='"l"' fi -INT64_MODIFIER="\"$LONG_LONG_INT_MODIFIER\"" - AC_DEFINE_UNQUOTED(INT64_MODIFIER, $INT64_MODIFIER, - [Define to the appropriate snprintf length modifier for 64-bit ints.]) - -# Also force use of our snprintf if the system's doesn't support the %z flag. -if test "$pgac_need_repl_snprintf" = no; then - PGAC_FUNC_SNPRINTF_SIZE_T_SUPPORT - if test "$pgac_cv_snprintf_size_t_support" != yes; then - pgac_need_repl_snprintf=yes - fi -fi + [Define to the appropriate printf length modifier for 64-bit ints.]) -# Now we have checked all the reasons to replace snprintf -if test $pgac_need_repl_snprintf = yes; then - AC_DEFINE(USE_REPL_SNPRINTF, 1, [Use replacement snprintf() functions.]) - AC_LIBOBJ(snprintf) -fi +# has to be down here, rather than with the other builtins, because +# the test uses PG_INT64_TYPE. +PGAC_C_BUILTIN_OP_OVERFLOW # Check size of void *, size_t (enables tweaks for > 32bit address space) AC_CHECK_SIZEOF([void *]) @@ -1838,7 +1944,10 @@ AC_CHECK_ALIGNOF(double) # Compute maximum alignment of any basic type. # We assume long's alignment is at least as strong as char, short, or int; -# but we must check long long (if it exists) and double. +# but we must check long long (if it is being used for int64) and double. +# Note that we intentionally do not consider any types wider than 64 bits, +# as allowing MAXIMUM_ALIGNOF to exceed 8 would be too much of a penalty +# for disk and memory space. MAX_ALIGNOF=$ac_cv_alignof_long if test $MAX_ALIGNOF -lt $ac_cv_alignof_double ; then @@ -1855,7 +1964,7 @@ AC_DEFINE_UNQUOTED(MAXIMUM_ALIGNOF, $MAX_ALIGNOF, [Define as the maximum alignme AC_CHECK_TYPES([int8, uint8, int64, uint64], [], [], [#include ]) -# Check for extensions offering the integer scalar type __int128. +# Some compilers offer a 128-bit integer scalar type. PGAC_TYPE_128BIT_INT # Check for various atomic operations now that we have checked how to declare @@ -1910,28 +2019,56 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ #endif ])], [SSE4_2_TARGETED=1]) +# Check for ARMv8 CRC Extension intrinsics to do CRC calculations. +# +# First check if __crc32c* intrinsics can be used with the default compiler +# flags. If not, check if adding -march=armv8-a+crc flag helps. +# CFLAGS_ARMV8_CRC32C is set if the extra flag is required. +PGAC_ARMV8_CRC32C_INTRINSICS([]) +if test x"$pgac_armv8_crc32c_intrinsics" != x"yes"; then + PGAC_ARMV8_CRC32C_INTRINSICS([-march=armv8-a+crc]) +fi +AC_SUBST(CFLAGS_ARMV8_CRC32C) + # Select CRC-32C implementation. # -# If we are targeting a processor that has SSE 4.2 instructions, we can use the -# special CRC instructions for calculating CRC-32C. If we're not targeting such -# a processor, but we can nevertheless produce code that uses the SSE -# intrinsics, perhaps with some extra CFLAGS, compile both implementations and -# select which one to use at runtime, depending on whether SSE 4.2 is supported -# by the processor we're running on. +# If we are targeting a processor that has Intel SSE 4.2 instructions, we can +# use the special CRC instructions for calculating CRC-32C. If we're not +# targeting such a processor, but we can nevertheless produce code that uses +# the SSE intrinsics, perhaps with some extra CFLAGS, compile both +# implementations and select which one to use at runtime, depending on whether +# SSE 4.2 is supported by the processor we're running on. +# +# Similarly, if we are targeting an ARM processor that has the CRC +# instructions that are part of the ARMv8 CRC Extension, use them. And if +# we're not targeting such a processor, but can nevertheless produce code that +# uses the CRC instructions, compile both, and select at runtime. # # You can override this logic by setting the appropriate USE_*_CRC32 flag to 1 # in the template or configure command line. -if test x"$USE_SSE42_CRC32C" = x"" && test x"$USE_SSE42_CRC32C_WITH_RUNTIME_CHECK" = x"" && test x"$USE_SLICING_BY_8_CRC32C" = x""; then +if test x"$USE_SLICING_BY_8_CRC32C" = x"" && test x"$USE_SSE42_CRC32C" = x"" && test x"$USE_SSE42_CRC32C_WITH_RUNTIME_CHECK" = x"" && test x"$USE_ARMV8_CRC32C" = x"" && test x"$USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK" = x""; then + # Use Intel SSE 4.2 if available. if test x"$pgac_sse42_crc32_intrinsics" = x"yes" && test x"$SSE4_2_TARGETED" = x"1" ; then USE_SSE42_CRC32C=1 else - # the CPUID instruction is needed for the runtime check. + # Intel SSE 4.2, with runtime check? The CPUID instruction is needed for + # the runtime check. if test x"$pgac_sse42_crc32_intrinsics" = x"yes" && (test x"$pgac_cv__get_cpuid" = x"yes" || test x"$pgac_cv__cpuid" = x"yes"); then USE_SSE42_CRC32C_WITH_RUNTIME_CHECK=1 else - # fall back to slicing-by-8 algorithm which doesn't require any special - # CPU support. - USE_SLICING_BY_8_CRC32C=1 + # Use ARM CRC Extension if available. + if test x"$pgac_armv8_crc32c_intrinsics" = x"yes" && test x"$CFLAGS_ARMV8_CRC32C" = x""; then + USE_ARMV8_CRC32C=1 + else + # ARM CRC Extension, with runtime check? + if test x"$pgac_armv8_crc32c_intrinsics" = x"yes"; then + USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK=1 + else + # fall back to slicing-by-8 algorithm, which doesn't require any + # special CPU support. + USE_SLICING_BY_8_CRC32C=1 + fi + fi fi fi fi @@ -1944,13 +2081,25 @@ if test x"$USE_SSE42_CRC32C" = x"1"; then AC_MSG_RESULT(SSE 4.2) else if test x"$USE_SSE42_CRC32C_WITH_RUNTIME_CHECK" = x"1"; then - AC_DEFINE(USE_SSE42_CRC32C_WITH_RUNTIME_CHECK, 1, [Define to 1 to use Intel SSSE 4.2 CRC instructions with a runtime check.]) - PG_CRC32C_OBJS="pg_crc32c_sse42.o pg_crc32c_sb8.o pg_crc32c_choose.o" + AC_DEFINE(USE_SSE42_CRC32C_WITH_RUNTIME_CHECK, 1, [Define to 1 to use Intel SSE 4.2 CRC instructions with a runtime check.]) + PG_CRC32C_OBJS="pg_crc32c_sse42.o pg_crc32c_sb8.o pg_crc32c_sse42_choose.o" AC_MSG_RESULT(SSE 4.2 with runtime check) else - AC_DEFINE(USE_SLICING_BY_8_CRC32C, 1, [Define to 1 to use Intel SSE 4.2 CRC instructions with a runtime check.]) - PG_CRC32C_OBJS="pg_crc32c_sb8.o" - AC_MSG_RESULT(slicing-by-8) + if test x"$USE_ARMV8_CRC32C" = x"1"; then + AC_DEFINE(USE_ARMV8_CRC32C, 1, [Define to 1 to use ARMv8 CRC Extension.]) + PG_CRC32C_OBJS="pg_crc32c_armv8.o" + AC_MSG_RESULT(ARMv8 CRC instructions) + else + if test x"$USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK" = x"1"; then + AC_DEFINE(USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK, 1, [Define to 1 to use ARMv8 CRC Extension with a runtime check.]) + PG_CRC32C_OBJS="pg_crc32c_armv8.o pg_crc32c_sb8.o pg_crc32c_armv8_choose.o" + AC_MSG_RESULT(ARMv8 CRC instructions with runtime check) + else + AC_DEFINE(USE_SLICING_BY_8_CRC32C, 1, [Define to 1 to use software CRC-32C implementation (slicing-by-8).]) + PG_CRC32C_OBJS="pg_crc32c_sb8.o" + AC_MSG_RESULT(slicing-by-8) + fi + fi fi fi AC_SUBST(PG_CRC32C_OBJS) @@ -2074,7 +2223,7 @@ fi # check for if test "$with_perl" = yes; then ac_save_CPPFLAGS=$CPPFLAGS - CPPFLAGS="$CPPFLAGS -I$perl_archlibexp/CORE" + CPPFLAGS="$CPPFLAGS $perl_includespec" AC_CHECK_HEADER(perl.h, [], [AC_MSG_ERROR([header file is required for Perl])], [#include ]) # While we're at it, check that we can link to libperl. @@ -2106,12 +2255,10 @@ fi # # Check for DocBook and tools # -PGAC_PROG_NSGMLS +PGAC_PATH_XMLLINT PGAC_CHECK_DOCBOOK(4.2) PGAC_PATH_PROGS(DBTOEPUB, dbtoepub) -PGAC_PATH_PROGS(XMLLINT, xmllint) PGAC_PATH_PROGS(XSLTPROC, xsltproc) -PGAC_PATH_PROGS(OSX, [osx sgml2xml sx]) PGAC_PATH_PROGS(FOP, fop) # @@ -2123,8 +2270,10 @@ if test "$enable_tap_tests" = yes; then # (prove might be part of a different Perl installation than perl, eg on # MSys, so the result of AX_PROG_PERL_MODULES could be irrelevant anyway.) if test -z "$PROVE"; then - AX_PROG_PERL_MODULES(IPC::Run, , - AC_MSG_ERROR([Perl module IPC::Run is required to run TAP tests])) + # Test::More and Time::HiRes are supposed to be part of core Perl, + # but some distros omit them in a minimal installation. + AX_PROG_PERL_MODULES([IPC::Run Test::More=0.87 Time::HiRes], , + [AC_MSG_ERROR([Additional Perl modules are required to run TAP tests])]) fi # Now make sure we know where prove is PGAC_PATH_PROGS(PROVE, prove) @@ -2205,10 +2354,19 @@ AC_DEFINE_UNQUOTED(PG_VERSION_STR, # awk -F is a regex on some platforms, and not on others, so make "." a tab [PG_VERSION_NUM="`echo "$PACKAGE_VERSION" | sed 's/[A-Za-z].*$//' | tr '.' ' ' | -$AWK '{printf "%d%02d%02d", $1, $2, (NF >= 3) ? $3 : 0}'`"] +$AWK '{printf "%d%04d", $1, $2}'`"] AC_DEFINE_UNQUOTED(PG_VERSION_NUM, $PG_VERSION_NUM, [PostgreSQL version as a number]) AC_SUBST(PG_VERSION_NUM) +# If we are inserting PG_SYSROOT into CPPFLAGS, do so symbolically not +# literally, so that it's possible to override it at build time using +# a command like "make ... PG_SYSROOT=path". This has to be done after +# we've finished all configure checks that depend on CPPFLAGS. +if test x"$PG_SYSROOT" != x; then + CPPFLAGS=`echo "$CPPFLAGS" | sed -e "s| $PG_SYSROOT | \\\$(PG_SYSROOT) |"` +fi +AC_SUBST(PG_SYSROOT) + # Begin output steps @@ -2216,6 +2374,14 @@ AC_MSG_NOTICE([using compiler=$cc_string]) AC_MSG_NOTICE([using CFLAGS=$CFLAGS]) AC_MSG_NOTICE([using CPPFLAGS=$CPPFLAGS]) AC_MSG_NOTICE([using LDFLAGS=$LDFLAGS]) +# Currently only used when LLVM is used +if test "$with_llvm" = yes ; then + AC_MSG_NOTICE([using CXX=$CXX]) + AC_MSG_NOTICE([using CXXFLAGS=$CXXFLAGS]) + AC_MSG_NOTICE([using CLANG=$CLANG]) + AC_MSG_NOTICE([using BITCODE_CFLAGS=$BITCODE_CFLAGS]) + AC_MSG_NOTICE([using BITCODE_CXXFLAGS=$BITCODE_CXXFLAGS]) +fi # prepare build tree if outside source tree # Note 1: test -ef might not exist, but it's more reliable than `pwd`. @@ -2239,10 +2405,8 @@ AC_SUBST(vpath_build) AC_CONFIG_FILES([GNUmakefile src/Makefile.global]) AC_CONFIG_LINKS([ - src/backend/port/dynloader.c:src/backend/port/dynloader/${template}.c src/backend/port/pg_sema.c:${SEMA_IMPLEMENTATION} src/backend/port/pg_shmem.c:${SHMEM_IMPLEMENTATION} - src/include/dynloader.h:src/backend/port/dynloader/${template}.h src/include/pg_config_os.h:src/include/port/${template}.h src/Makefile.port:src/makefiles/Makefile.${template} ]) diff --git a/contrib/Makefile b/contrib/Makefile index e84eb67008..92184ed487 100644 --- a/contrib/Makefile +++ b/contrib/Makefile @@ -12,7 +12,6 @@ SUBDIRS = \ bloom \ btree_gin \ btree_gist \ - chkpass \ citext \ cube \ dblink \ @@ -76,15 +75,15 @@ ALWAYS_SUBDIRS += sepgsql endif ifeq ($(with_perl),yes) -SUBDIRS += hstore_plperl +SUBDIRS += hstore_plperl jsonb_plperl else -ALWAYS_SUBDIRS += hstore_plperl +ALWAYS_SUBDIRS += hstore_plperl jsonb_plperl endif ifeq ($(with_python),yes) -SUBDIRS += hstore_plpython ltree_plpython +SUBDIRS += hstore_plpython jsonb_plpython ltree_plpython else -ALWAYS_SUBDIRS += hstore_plpython ltree_plpython +ALWAYS_SUBDIRS += hstore_plpython jsonb_plpython ltree_plpython endif # Missing: diff --git a/contrib/adminpack/.gitignore b/contrib/adminpack/.gitignore new file mode 100644 index 0000000000..5dcb3ff972 --- /dev/null +++ b/contrib/adminpack/.gitignore @@ -0,0 +1,4 @@ +# Generated subdirectories +/log/ +/results/ +/tmp_check/ diff --git a/contrib/adminpack/Makefile b/contrib/adminpack/Makefile index f065f84bfb..689aca1b38 100644 --- a/contrib/adminpack/Makefile +++ b/contrib/adminpack/Makefile @@ -5,9 +5,11 @@ OBJS = adminpack.o $(WIN32RES) PG_CPPFLAGS = -I$(libpq_srcdir) EXTENSION = adminpack -DATA = adminpack--1.0.sql +DATA = adminpack--1.0.sql adminpack--1.0--1.1.sql adminpack--1.1--2.0.sql PGFILEDESC = "adminpack - support functions for pgAdmin" +REGRESS = adminpack + ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) diff --git a/contrib/adminpack/adminpack--1.0--1.1.sql b/contrib/adminpack/adminpack--1.0--1.1.sql new file mode 100644 index 0000000000..bb581653e0 --- /dev/null +++ b/contrib/adminpack/adminpack--1.0--1.1.sql @@ -0,0 +1,6 @@ +/* contrib/adminpack/adminpack--1.0--1.1.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION adminpack UPDATE TO '1.1'" to load this file. \quit + +REVOKE EXECUTE ON FUNCTION pg_catalog.pg_logfile_rotate() FROM PUBLIC; diff --git a/contrib/adminpack/adminpack--1.1--2.0.sql b/contrib/adminpack/adminpack--1.1--2.0.sql new file mode 100644 index 0000000000..ceaeafa378 --- /dev/null +++ b/contrib/adminpack/adminpack--1.1--2.0.sql @@ -0,0 +1,51 @@ +/* contrib/adminpack/adminpack--1.1--2.0.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION adminpack UPDATE TO '2.0'" to load this file. \quit + +/* *********************************************** + * Administrative functions for PostgreSQL + * *********************************************** */ + +/* generic file access functions */ + +CREATE OR REPLACE FUNCTION pg_catalog.pg_file_write(text, text, bool) +RETURNS bigint +AS 'MODULE_PATHNAME', 'pg_file_write_v1_1' +LANGUAGE C VOLATILE STRICT; + +REVOKE EXECUTE ON FUNCTION pg_catalog.pg_file_write(text, text, bool) FROM PUBLIC; + +CREATE OR REPLACE FUNCTION pg_catalog.pg_file_rename(text, text, text) +RETURNS bool +AS 'MODULE_PATHNAME', 'pg_file_rename_v1_1' +LANGUAGE C VOLATILE; + +REVOKE EXECUTE ON FUNCTION pg_catalog.pg_file_rename(text, text, text) FROM PUBLIC; + +CREATE OR REPLACE FUNCTION pg_catalog.pg_file_rename(text, text) +RETURNS bool +AS 'SELECT pg_catalog.pg_file_rename($1, $2, NULL::pg_catalog.text);' +LANGUAGE SQL VOLATILE STRICT; + +CREATE OR REPLACE FUNCTION pg_catalog.pg_file_unlink(text) +RETURNS bool +AS 'MODULE_PATHNAME', 'pg_file_unlink_v1_1' +LANGUAGE C VOLATILE STRICT; + +REVOKE EXECUTE ON FUNCTION pg_catalog.pg_file_unlink(text) FROM PUBLIC; + +CREATE OR REPLACE FUNCTION pg_catalog.pg_logdir_ls() +RETURNS setof record +AS 'MODULE_PATHNAME', 'pg_logdir_ls_v1_1' +LANGUAGE C VOLATILE STRICT; + +REVOKE EXECUTE ON FUNCTION pg_catalog.pg_logdir_ls() FROM PUBLIC; + +/* These functions are now in the backend and callers should update to use those */ + +DROP FUNCTION pg_file_read(text, bigint, bigint); + +DROP FUNCTION pg_file_length(text); + +DROP FUNCTION pg_logfile_rotate(); diff --git a/contrib/adminpack/adminpack.c b/contrib/adminpack/adminpack.c index f3f8e7f1e4..0a27701e9c 100644 --- a/contrib/adminpack/adminpack.c +++ b/contrib/adminpack/adminpack.c @@ -3,7 +3,7 @@ * adminpack.c * * - * Copyright (c) 2002-2017, PostgreSQL Global Development Group + * Copyright (c) 2002-2018, PostgreSQL Global Development Group * * Author: Andreas Pflug * @@ -18,6 +18,7 @@ #include #include +#include "catalog/pg_authid.h" #include "catalog/pg_type.h" #include "funcapi.h" #include "miscadmin.h" @@ -41,9 +42,17 @@ PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(pg_file_write); +PG_FUNCTION_INFO_V1(pg_file_write_v1_1); PG_FUNCTION_INFO_V1(pg_file_rename); +PG_FUNCTION_INFO_V1(pg_file_rename_v1_1); PG_FUNCTION_INFO_V1(pg_file_unlink); +PG_FUNCTION_INFO_V1(pg_file_unlink_v1_1); PG_FUNCTION_INFO_V1(pg_logdir_ls); +PG_FUNCTION_INFO_V1(pg_logdir_ls_v1_1); + +static int64 pg_file_write_internal(text *file, text *data, bool replace); +static bool pg_file_rename_internal(text *file1, text *file2, text *file3); +static Datum pg_logdir_ls_internal(FunctionCallInfo fcinfo); typedef struct { @@ -68,6 +77,15 @@ convert_and_check_filename(text *arg, bool logAllowed) canonicalize_path(filename); /* filename can change length here */ + /* + * Members of the 'pg_write_server_files' role are allowed to access any + * files on the server as the PG user, so no need to do any further checks + * here. + */ + if (is_member_of_role(GetUserId(), DEFAULT_ROLE_WRITE_SERVER_FILES)) + return filename; + + /* User isn't a member of the default role, so check if it's allowable */ if (is_absolute_path(filename)) { /* Disallow '/a/b/data/..' */ @@ -111,23 +129,64 @@ requireSuperuser(void) /* ------------------------------------ - * generic file handling functions + * pg_file_write - old version + * + * The superuser() check here must be kept as the library might be upgraded + * without the extension being upgraded, meaning that in pre-1.1 installations + * these functions could be called by any user. */ - Datum pg_file_write(PG_FUNCTION_ARGS) { - FILE *f; - char *filename; - text *data; + text *file = PG_GETARG_TEXT_PP(0); + text *data = PG_GETARG_TEXT_PP(1); + bool replace = PG_GETARG_BOOL(2); int64 count = 0; requireSuperuser(); - filename = convert_and_check_filename(PG_GETARG_TEXT_PP(0), false); - data = PG_GETARG_TEXT_PP(1); + count = pg_file_write_internal(file, data, replace); - if (!PG_GETARG_BOOL(2)) + PG_RETURN_INT64(count); +} + +/* ------------------------------------ + * pg_file_write_v1_1 - Version 1.1 + * + * As of adminpack version 1.1, we no longer need to check if the user + * is a superuser because we REVOKE EXECUTE on the function from PUBLIC. + * Users can then grant access to it based on their policies. + * + * Otherwise identical to pg_file_write (above). + */ +Datum +pg_file_write_v1_1(PG_FUNCTION_ARGS) +{ + text *file = PG_GETARG_TEXT_PP(0); + text *data = PG_GETARG_TEXT_PP(1); + bool replace = PG_GETARG_BOOL(2); + int64 count = 0; + + count = pg_file_write_internal(file, data, replace); + + PG_RETURN_INT64(count); +} + +/* ------------------------------------ + * pg_file_write_internal - Workhorse for pg_file_write functions. + * + * This handles the actual work for pg_file_write. + */ +static int64 +pg_file_write_internal(text *file, text *data, bool replace) +{ + FILE *f; + char *filename; + int64 count = 0; + + filename = convert_and_check_filename(file, false); + + if (!replace) { struct stat fst; @@ -153,29 +212,95 @@ pg_file_write(PG_FUNCTION_ARGS) (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", filename))); - PG_RETURN_INT64(count); + return (count); } - +/* ------------------------------------ + * pg_file_rename - old version + * + * The superuser() check here must be kept as the library might be upgraded + * without the extension being upgraded, meaning that in pre-1.1 installations + * these functions could be called by any user. + */ Datum pg_file_rename(PG_FUNCTION_ARGS) { - char *fn1, - *fn2, - *fn3; - int rc; + text *file1; + text *file2; + text *file3; + bool result; requireSuperuser(); if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) PG_RETURN_NULL(); - fn1 = convert_and_check_filename(PG_GETARG_TEXT_PP(0), false); - fn2 = convert_and_check_filename(PG_GETARG_TEXT_PP(1), false); + file1 = PG_GETARG_TEXT_PP(0); + file2 = PG_GETARG_TEXT_PP(1); + if (PG_ARGISNULL(2)) - fn3 = 0; + file3 = NULL; + else + file3 = PG_GETARG_TEXT_PP(2); + + result = pg_file_rename_internal(file1, file2, file3); + + PG_RETURN_BOOL(result); +} + +/* ------------------------------------ + * pg_file_rename_v1_1 - Version 1.1 + * + * As of adminpack version 1.1, we no longer need to check if the user + * is a superuser because we REVOKE EXECUTE on the function from PUBLIC. + * Users can then grant access to it based on their policies. + * + * Otherwise identical to pg_file_write (above). + */ +Datum +pg_file_rename_v1_1(PG_FUNCTION_ARGS) +{ + text *file1; + text *file2; + text *file3; + bool result; + + if (PG_ARGISNULL(0) || PG_ARGISNULL(1)) + PG_RETURN_NULL(); + + file1 = PG_GETARG_TEXT_PP(0); + file2 = PG_GETARG_TEXT_PP(1); + + if (PG_ARGISNULL(2)) + file3 = NULL; + else + file3 = PG_GETARG_TEXT_PP(2); + + result = pg_file_rename_internal(file1, file2, file3); + + PG_RETURN_BOOL(result); +} + +/* ------------------------------------ + * pg_file_rename_internal - Workhorse for pg_file_rename functions. + * + * This handles the actual work for pg_file_rename. + */ +static bool +pg_file_rename_internal(text *file1, text *file2, text *file3) +{ + char *fn1, + *fn2, + *fn3; + int rc; + + fn1 = convert_and_check_filename(file1, false); + fn2 = convert_and_check_filename(file2, false); + + if (file3 == NULL) + fn3 = NULL; else - fn3 = convert_and_check_filename(PG_GETARG_TEXT_PP(2), false); + fn3 = convert_and_check_filename(file3, false); if (access(fn1, W_OK) < 0) { @@ -183,7 +308,7 @@ pg_file_rename(PG_FUNCTION_ARGS) (errcode_for_file_access(), errmsg("file \"%s\" is not accessible: %m", fn1))); - PG_RETURN_BOOL(false); + return false; } if (fn3 && access(fn2, W_OK) < 0) @@ -192,10 +317,10 @@ pg_file_rename(PG_FUNCTION_ARGS) (errcode_for_file_access(), errmsg("file \"%s\" is not accessible: %m", fn2))); - PG_RETURN_BOOL(false); + return false; } - rc = access(fn3 ? fn3 : fn2, 2); + rc = access(fn3 ? fn3 : fn2, W_OK); if (rc >= 0 || errno != ENOENT) { ereport(ERROR, @@ -243,10 +368,17 @@ pg_file_rename(PG_FUNCTION_ARGS) errmsg("could not rename \"%s\" to \"%s\": %m", fn1, fn2))); } - PG_RETURN_BOOL(true); + return true; } +/* ------------------------------------ + * pg_file_unlink - old version + * + * The superuser() check here must be kept as the library might be upgraded + * without the extension being upgraded, meaning that in pre-1.1 installations + * these functions could be called by any user. + */ Datum pg_file_unlink(PG_FUNCTION_ARGS) { @@ -278,18 +410,83 @@ pg_file_unlink(PG_FUNCTION_ARGS) } +/* ------------------------------------ + * pg_file_unlink_v1_1 - Version 1.1 + * + * As of adminpack version 1.1, we no longer need to check if the user + * is a superuser because we REVOKE EXECUTE on the function from PUBLIC. + * Users can then grant access to it based on their policies. + * + * Otherwise identical to pg_file_unlink (above). + */ Datum -pg_logdir_ls(PG_FUNCTION_ARGS) +pg_file_unlink_v1_1(PG_FUNCTION_ARGS) { - FuncCallContext *funcctx; - struct dirent *de; - directory_fctx *fctx; + char *filename; + + filename = convert_and_check_filename(PG_GETARG_TEXT_PP(0), false); + + if (access(filename, W_OK) < 0) + { + if (errno == ENOENT) + PG_RETURN_BOOL(false); + else + ereport(ERROR, + (errcode_for_file_access(), + errmsg("file \"%s\" is not accessible: %m", filename))); + } + if (unlink(filename) < 0) + { + ereport(WARNING, + (errcode_for_file_access(), + errmsg("could not unlink file \"%s\": %m", filename))); + + PG_RETURN_BOOL(false); + } + PG_RETURN_BOOL(true); +} + +/* ------------------------------------ + * pg_logdir_ls - Old version + * + * The superuser() check here must be kept as the library might be upgraded + * without the extension being upgraded, meaning that in pre-1.1 installations + * these functions could be called by any user. + */ +Datum +pg_logdir_ls(PG_FUNCTION_ARGS) +{ if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("only superuser can list the log directory")))); + return (pg_logdir_ls_internal(fcinfo)); +} + +/* ------------------------------------ + * pg_logdir_ls_v1_1 - Version 1.1 + * + * As of adminpack version 1.1, we no longer need to check if the user + * is a superuser because we REVOKE EXECUTE on the function from PUBLIC. + * Users can then grant access to it based on their policies. + * + * Otherwise identical to pg_logdir_ls (above). + */ +Datum +pg_logdir_ls_v1_1(PG_FUNCTION_ARGS) +{ + return (pg_logdir_ls_internal(fcinfo)); +} + +static Datum +pg_logdir_ls_internal(FunctionCallInfo fcinfo) +{ + FuncCallContext *funcctx; + struct dirent *de; + directory_fctx *fctx; + if (strcmp(Log_filename, "postgresql-%Y-%m-%d_%H%M%S.log") != 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -319,7 +516,7 @@ pg_logdir_ls(PG_FUNCTION_ARGS) if (!fctx->dirdesc) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read directory \"%s\": %m", + errmsg("could not open directory \"%s\": %m", fctx->location))); funcctx->user_fctx = fctx; diff --git a/contrib/adminpack/adminpack.control b/contrib/adminpack/adminpack.control index c79413f378..12569dcdd7 100644 --- a/contrib/adminpack/adminpack.control +++ b/contrib/adminpack/adminpack.control @@ -1,6 +1,6 @@ # adminpack extension comment = 'administrative functions for PostgreSQL' -default_version = '1.0' +default_version = '2.0' module_pathname = '$libdir/adminpack' relocatable = false schema = pg_catalog diff --git a/contrib/adminpack/expected/adminpack.out b/contrib/adminpack/expected/adminpack.out new file mode 100644 index 0000000000..8747ac69a2 --- /dev/null +++ b/contrib/adminpack/expected/adminpack.out @@ -0,0 +1,155 @@ +CREATE EXTENSION adminpack; +-- create new file +SELECT pg_file_write('test_file1', 'test1', false); + pg_file_write +--------------- + 5 +(1 row) + +SELECT pg_read_file('test_file1'); + pg_read_file +-------------- + test1 +(1 row) + +-- append +SELECT pg_file_write('test_file1', 'test1', true); + pg_file_write +--------------- + 5 +(1 row) + +SELECT pg_read_file('test_file1'); + pg_read_file +-------------- + test1test1 +(1 row) + +-- error, already exists +SELECT pg_file_write('test_file1', 'test1', false); +ERROR: file "test_file1" exists +SELECT pg_read_file('test_file1'); + pg_read_file +-------------- + test1test1 +(1 row) + +-- disallowed file paths for non-superusers and users who are +-- not members of pg_write_server_files +CREATE ROLE regress_user1; +GRANT pg_read_all_settings TO regress_user1; +GRANT EXECUTE ON FUNCTION pg_file_write(text,text,bool) TO regress_user1; +SET ROLE regress_user1; +SELECT pg_file_write('../test_file0', 'test0', false); +ERROR: path must be in or below the current directory +SELECT pg_file_write('/tmp/test_file0', 'test0', false); +ERROR: absolute path not allowed +SELECT pg_file_write(current_setting('data_directory') || '/test_file4', 'test4', false); + pg_file_write +--------------- + 5 +(1 row) + +SELECT pg_file_write(current_setting('data_directory') || '/../test_file4', 'test4', false); +ERROR: reference to parent directory ("..") not allowed +RESET ROLE; +REVOKE EXECUTE ON FUNCTION pg_file_write(text,text,bool) FROM regress_user1; +REVOKE pg_read_all_settings FROM regress_user1; +DROP ROLE regress_user1; +-- rename file +SELECT pg_file_rename('test_file1', 'test_file2'); + pg_file_rename +---------------- + t +(1 row) + +SELECT pg_read_file('test_file1'); -- not there +ERROR: could not stat file "test_file1": No such file or directory +SELECT pg_read_file('test_file2'); + pg_read_file +-------------- + test1test1 +(1 row) + +-- error +SELECT pg_file_rename('test_file1', 'test_file2'); +WARNING: file "test_file1" is not accessible: No such file or directory + pg_file_rename +---------------- + f +(1 row) + +-- rename file and archive +SELECT pg_file_write('test_file3', 'test3', false); + pg_file_write +--------------- + 5 +(1 row) + +SELECT pg_file_rename('test_file2', 'test_file3', 'test_file3_archive'); + pg_file_rename +---------------- + t +(1 row) + +SELECT pg_read_file('test_file2'); -- not there +ERROR: could not stat file "test_file2": No such file or directory +SELECT pg_read_file('test_file3'); + pg_read_file +-------------- + test1test1 +(1 row) + +SELECT pg_read_file('test_file3_archive'); + pg_read_file +-------------- + test3 +(1 row) + +-- unlink +SELECT pg_file_unlink('test_file1'); -- does not exist + pg_file_unlink +---------------- + f +(1 row) + +SELECT pg_file_unlink('test_file2'); -- does not exist + pg_file_unlink +---------------- + f +(1 row) + +SELECT pg_file_unlink('test_file3'); + pg_file_unlink +---------------- + t +(1 row) + +SELECT pg_file_unlink('test_file3_archive'); + pg_file_unlink +---------------- + t +(1 row) + +SELECT pg_file_unlink('test_file4'); + pg_file_unlink +---------------- + t +(1 row) + +-- superuser checks +CREATE USER regress_user1; +SET ROLE regress_user1; +SELECT pg_file_write('test_file0', 'test0', false); +ERROR: permission denied for function pg_file_write +SELECT pg_file_rename('test_file0', 'test_file0'); +ERROR: permission denied for function pg_file_rename +CONTEXT: SQL function "pg_file_rename" statement 1 +SELECT pg_file_unlink('test_file0'); +ERROR: permission denied for function pg_file_unlink +SELECT pg_logdir_ls(); +ERROR: permission denied for function pg_logdir_ls +RESET ROLE; +DROP USER regress_user1; +-- no further tests for pg_logdir_ls() because it depends on the +-- server's logging setup diff --git a/contrib/adminpack/sql/adminpack.sql b/contrib/adminpack/sql/adminpack.sql new file mode 100644 index 0000000000..1525f0a82b --- /dev/null +++ b/contrib/adminpack/sql/adminpack.sql @@ -0,0 +1,70 @@ +CREATE EXTENSION adminpack; + +-- create new file +SELECT pg_file_write('test_file1', 'test1', false); +SELECT pg_read_file('test_file1'); + +-- append +SELECT pg_file_write('test_file1', 'test1', true); +SELECT pg_read_file('test_file1'); + +-- error, already exists +SELECT pg_file_write('test_file1', 'test1', false); +SELECT pg_read_file('test_file1'); + +-- disallowed file paths for non-superusers and users who are +-- not members of pg_write_server_files +CREATE ROLE regress_user1; + +GRANT pg_read_all_settings TO regress_user1; +GRANT EXECUTE ON FUNCTION pg_file_write(text,text,bool) TO regress_user1; + +SET ROLE regress_user1; +SELECT pg_file_write('../test_file0', 'test0', false); +SELECT pg_file_write('/tmp/test_file0', 'test0', false); +SELECT pg_file_write(current_setting('data_directory') || '/test_file4', 'test4', false); +SELECT pg_file_write(current_setting('data_directory') || '/../test_file4', 'test4', false); +RESET ROLE; +REVOKE EXECUTE ON FUNCTION pg_file_write(text,text,bool) FROM regress_user1; +REVOKE pg_read_all_settings FROM regress_user1; +DROP ROLE regress_user1; + +-- rename file +SELECT pg_file_rename('test_file1', 'test_file2'); +SELECT pg_read_file('test_file1'); -- not there +SELECT pg_read_file('test_file2'); + +-- error +SELECT pg_file_rename('test_file1', 'test_file2'); + +-- rename file and archive +SELECT pg_file_write('test_file3', 'test3', false); +SELECT pg_file_rename('test_file2', 'test_file3', 'test_file3_archive'); +SELECT pg_read_file('test_file2'); -- not there +SELECT pg_read_file('test_file3'); +SELECT pg_read_file('test_file3_archive'); + + +-- unlink +SELECT pg_file_unlink('test_file1'); -- does not exist +SELECT pg_file_unlink('test_file2'); -- does not exist +SELECT pg_file_unlink('test_file3'); +SELECT pg_file_unlink('test_file3_archive'); +SELECT pg_file_unlink('test_file4'); + + +-- superuser checks +CREATE USER regress_user1; +SET ROLE regress_user1; + +SELECT pg_file_write('test_file0', 'test0', false); +SELECT pg_file_rename('test_file0', 'test_file0'); +SELECT pg_file_unlink('test_file0'); +SELECT pg_logdir_ls(); + +RESET ROLE; +DROP USER regress_user1; + + +-- no further tests for pg_logdir_ls() because it depends on the +-- server's logging setup diff --git a/contrib/amcheck/Makefile b/contrib/amcheck/Makefile index 43bed919ae..c5764b544f 100644 --- a/contrib/amcheck/Makefile +++ b/contrib/amcheck/Makefile @@ -4,7 +4,7 @@ MODULE_big = amcheck OBJS = verify_nbtree.o $(WIN32RES) EXTENSION = amcheck -DATA = amcheck--1.0.sql +DATA = amcheck--1.0--1.1.sql amcheck--1.0.sql PGFILEDESC = "amcheck - function for verifying relation integrity" REGRESS = check check_btree diff --git a/contrib/amcheck/amcheck--1.0--1.1.sql b/contrib/amcheck/amcheck--1.0--1.1.sql new file mode 100644 index 0000000000..088416e7c2 --- /dev/null +++ b/contrib/amcheck/amcheck--1.0--1.1.sql @@ -0,0 +1,29 @@ +/* contrib/amcheck/amcheck--1.0--1.1.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "ALTER EXTENSION amcheck UPDATE TO '1.1'" to load this file. \quit + +-- In order to avoid issues with dependencies when updating amcheck to 1.1, +-- create new, overloaded versions of the 1.0 functions + +-- +-- bt_index_check() +-- +CREATE FUNCTION bt_index_check(index regclass, + heapallindexed boolean) +RETURNS VOID +AS 'MODULE_PATHNAME', 'bt_index_check' +LANGUAGE C STRICT PARALLEL RESTRICTED; + +-- +-- bt_index_parent_check() +-- +CREATE FUNCTION bt_index_parent_check(index regclass, + heapallindexed boolean) +RETURNS VOID +AS 'MODULE_PATHNAME', 'bt_index_parent_check' +LANGUAGE C STRICT PARALLEL RESTRICTED; + +-- Don't want these to be available to public +REVOKE ALL ON FUNCTION bt_index_check(regclass, boolean) FROM PUBLIC; +REVOKE ALL ON FUNCTION bt_index_parent_check(regclass, boolean) FROM PUBLIC; diff --git a/contrib/amcheck/amcheck.control b/contrib/amcheck/amcheck.control index 05e2861d7a..469048403d 100644 --- a/contrib/amcheck/amcheck.control +++ b/contrib/amcheck/amcheck.control @@ -1,5 +1,5 @@ # amcheck extension comment = 'functions for verifying relation integrity' -default_version = '1.0' +default_version = '1.1' module_pathname = '$libdir/amcheck' relocatable = true diff --git a/contrib/amcheck/expected/check_btree.out b/contrib/amcheck/expected/check_btree.out index df3741e2c9..e864579774 100644 --- a/contrib/amcheck/expected/check_btree.out +++ b/contrib/amcheck/expected/check_btree.out @@ -1,10 +1,19 @@ --- minimal test, basically just verifying that amcheck CREATE TABLE bttest_a(id int8); CREATE TABLE bttest_b(id int8); +CREATE TABLE bttest_multi(id int8, data int8); +CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint); +-- Stabalize tests +ALTER TABLE bttest_a SET (autovacuum_enabled = false); +ALTER TABLE bttest_b SET (autovacuum_enabled = false); +ALTER TABLE bttest_multi SET (autovacuum_enabled = false); +ALTER TABLE delete_test_table SET (autovacuum_enabled = false); INSERT INTO bttest_a SELECT * FROM generate_series(1, 100000); INSERT INTO bttest_b SELECT * FROM generate_series(100000, 1, -1); +INSERT INTO bttest_multi SELECT i, i%2 FROM generate_series(1, 100000) as i; CREATE INDEX bttest_a_idx ON bttest_a USING btree (id); CREATE INDEX bttest_b_idx ON bttest_b USING btree (id); +CREATE UNIQUE INDEX bttest_multi_idx ON bttest_multi +USING btree (id) INCLUDE (data); CREATE ROLE bttest_role; -- verify permissions are checked (error due to function not callable) SET ROLE bttest_role; @@ -18,6 +27,8 @@ RESET ROLE; -- above explicit permission has to be granted for that. GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO bttest_role; GRANT EXECUTE ON FUNCTION bt_index_parent_check(regclass) TO bttest_role; +GRANT EXECUTE ON FUNCTION bt_index_check(regclass, boolean) TO bttest_role; +GRANT EXECUTE ON FUNCTION bt_index_parent_check(regclass, boolean) TO bttest_role; SET ROLE bttest_role; SELECT bt_index_check('bttest_a_idx'); bt_index_check @@ -56,8 +67,14 @@ SELECT bt_index_check('bttest_a_idx'); (1 row) --- more expansive test -SELECT bt_index_parent_check('bttest_b_idx'); +-- more expansive tests +SELECT bt_index_check('bttest_a_idx', true); + bt_index_check +---------------- + +(1 row) + +SELECT bt_index_parent_check('bttest_b_idx', true); bt_index_parent_check ----------------------- @@ -85,8 +102,48 @@ WHERE relation = ANY(ARRAY['bttest_a', 'bttest_a_idx', 'bttest_b', 'bttest_b_idx (0 rows) COMMIT; +-- normal check outside of xact for index with included columns +SELECT bt_index_check('bttest_multi_idx'); + bt_index_check +---------------- + +(1 row) + +-- more expansive test for index with included columns +SELECT bt_index_parent_check('bttest_multi_idx', true); + bt_index_parent_check +----------------------- + +(1 row) + +-- repeat expansive test for index built using insertions +TRUNCATE bttest_multi; +INSERT INTO bttest_multi SELECT i, i%2 FROM generate_series(1, 100000) as i; +SELECT bt_index_parent_check('bttest_multi_idx', true); + bt_index_parent_check +----------------------- + +(1 row) + +-- +-- Test for multilevel page deletion/downlink present checks +-- +INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM generate_series(1,80000) i; +ALTER TABLE delete_test_table ADD PRIMARY KEY (a,b,c,d); +DELETE FROM delete_test_table WHERE a > 40000; +VACUUM delete_test_table; +DELETE FROM delete_test_table WHERE a > 10; +VACUUM delete_test_table; +SELECT bt_index_parent_check('delete_test_table_pkey', true); + bt_index_parent_check +----------------------- + +(1 row) + -- cleanup DROP TABLE bttest_a; DROP TABLE bttest_b; +DROP TABLE bttest_multi; +DROP TABLE delete_test_table; DROP OWNED BY bttest_role; -- permissions DROP ROLE bttest_role; diff --git a/contrib/amcheck/sql/check_btree.sql b/contrib/amcheck/sql/check_btree.sql index fd90531027..7b1ab4f148 100644 --- a/contrib/amcheck/sql/check_btree.sql +++ b/contrib/amcheck/sql/check_btree.sql @@ -1,12 +1,22 @@ --- minimal test, basically just verifying that amcheck CREATE TABLE bttest_a(id int8); CREATE TABLE bttest_b(id int8); +CREATE TABLE bttest_multi(id int8, data int8); +CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint); + +-- Stabalize tests +ALTER TABLE bttest_a SET (autovacuum_enabled = false); +ALTER TABLE bttest_b SET (autovacuum_enabled = false); +ALTER TABLE bttest_multi SET (autovacuum_enabled = false); +ALTER TABLE delete_test_table SET (autovacuum_enabled = false); INSERT INTO bttest_a SELECT * FROM generate_series(1, 100000); INSERT INTO bttest_b SELECT * FROM generate_series(100000, 1, -1); +INSERT INTO bttest_multi SELECT i, i%2 FROM generate_series(1, 100000) as i; CREATE INDEX bttest_a_idx ON bttest_a USING btree (id); CREATE INDEX bttest_b_idx ON bttest_b USING btree (id); +CREATE UNIQUE INDEX bttest_multi_idx ON bttest_multi +USING btree (id) INCLUDE (data); CREATE ROLE bttest_role; @@ -21,6 +31,8 @@ RESET ROLE; -- above explicit permission has to be granted for that. GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO bttest_role; GRANT EXECUTE ON FUNCTION bt_index_parent_check(regclass) TO bttest_role; +GRANT EXECUTE ON FUNCTION bt_index_check(regclass, boolean) TO bttest_role; +GRANT EXECUTE ON FUNCTION bt_index_parent_check(regclass, boolean) TO bttest_role; SET ROLE bttest_role; SELECT bt_index_check('bttest_a_idx'); SELECT bt_index_parent_check('bttest_a_idx'); @@ -42,8 +54,9 @@ ROLLBACK; -- normal check outside of xact SELECT bt_index_check('bttest_a_idx'); --- more expansive test -SELECT bt_index_parent_check('bttest_b_idx'); +-- more expansive tests +SELECT bt_index_check('bttest_a_idx', true); +SELECT bt_index_parent_check('bttest_b_idx', true); BEGIN; SELECT bt_index_check('bttest_a_idx'); @@ -54,8 +67,31 @@ WHERE relation = ANY(ARRAY['bttest_a', 'bttest_a_idx', 'bttest_b', 'bttest_b_idx AND pid = pg_backend_pid(); COMMIT; +-- normal check outside of xact for index with included columns +SELECT bt_index_check('bttest_multi_idx'); +-- more expansive test for index with included columns +SELECT bt_index_parent_check('bttest_multi_idx', true); + +-- repeat expansive test for index built using insertions +TRUNCATE bttest_multi; +INSERT INTO bttest_multi SELECT i, i%2 FROM generate_series(1, 100000) as i; +SELECT bt_index_parent_check('bttest_multi_idx', true); + +-- +-- Test for multilevel page deletion/downlink present checks +-- +INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM generate_series(1,80000) i; +ALTER TABLE delete_test_table ADD PRIMARY KEY (a,b,c,d); +DELETE FROM delete_test_table WHERE a > 40000; +VACUUM delete_test_table; +DELETE FROM delete_test_table WHERE a > 10; +VACUUM delete_test_table; +SELECT bt_index_parent_check('delete_test_table_pkey', true); + -- cleanup DROP TABLE bttest_a; DROP TABLE bttest_b; +DROP TABLE bttest_multi; +DROP TABLE delete_test_table; DROP OWNED BY bttest_role; -- permissions DROP ROLE bttest_role; diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index 9ae83dc839..a1438a2855 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -8,8 +8,13 @@ * (the insertion scankey sort-wise NULL semantics are needed for * verification). * + * When index-to-heap verification is requested, a Bloom filter is used to + * fingerprint all tuples in the target index, as the index is traversed to + * verify its structure. A heap scan later uses Bloom filter probes to verify + * that every visible heap tuple has a matching index tuple. * - * Copyright (c) 2017, PostgreSQL Global Development Group + * + * Copyright (c) 2017-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/amcheck/verify_nbtree.c @@ -18,11 +23,14 @@ */ #include "postgres.h" +#include "access/htup_details.h" #include "access/nbtree.h" #include "access/transam.h" +#include "access/xact.h" #include "catalog/index.h" #include "catalog/pg_am.h" #include "commands/tablecmds.h" +#include "lib/bloomfilter.h" #include "miscadmin.h" #include "storage/lmgr.h" #include "utils/memutils.h" @@ -43,9 +51,10 @@ PG_MODULE_MAGIC; * target is the point of reference for a verification operation. * * Other B-Tree pages may be allocated, but those are always auxiliary (e.g., - * they are current target's child pages). Conceptually, problems are only - * ever found in the current target page. Each page found by verification's - * left/right, top/bottom scan becomes the target exactly once. + * they are current target's child pages). Conceptually, problems are only + * ever found in the current target page (or for a particular heap tuple during + * heapallindexed verification). Each page found by verification's left/right, + * top/bottom scan becomes the target exactly once. */ typedef struct BtreeCheckState { @@ -53,10 +62,13 @@ typedef struct BtreeCheckState * Unchanging state, established at start of verification: */ - /* B-Tree Index Relation */ + /* B-Tree Index Relation and associated heap relation */ Relation rel; + Relation heaprel; /* ShareLock held on heap/index, rather than AccessShareLock? */ bool readonly; + /* Also verifying heap has no unindexed tuples? */ + bool heapallindexed; /* Per-page context */ MemoryContext targetcontext; /* Buffer access strategy */ @@ -72,6 +84,19 @@ typedef struct BtreeCheckState BlockNumber targetblock; /* Target page's LSN */ XLogRecPtr targetlsn; + + /* + * Mutable state, for optional heapallindexed verification: + */ + + /* Bloom filter fingerprints B-Tree index */ + bloom_filter *filter; + /* Bloom filter fingerprints downlink blocks within tree */ + bloom_filter *downlinkfilter; + /* Right half of incomplete split marker */ + bool rightsplit; + /* Debug counter */ + int64 heaptuplespresent; } BtreeCheckState; /* @@ -92,15 +117,21 @@ typedef struct BtreeLevel PG_FUNCTION_INFO_V1(bt_index_check); PG_FUNCTION_INFO_V1(bt_index_parent_check); -static void bt_index_check_internal(Oid indrelid, bool parentcheck); +static void bt_index_check_internal(Oid indrelid, bool parentcheck, + bool heapallindexed); static inline void btree_index_checkable(Relation rel); -static void bt_check_every_level(Relation rel, bool readonly); +static void bt_check_every_level(Relation rel, Relation heaprel, + bool readonly, bool heapallindexed); static BtreeLevel bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level); static void bt_target_page_check(BtreeCheckState *state); static ScanKey bt_right_page_check_scankey(BtreeCheckState *state); static void bt_downlink_check(BtreeCheckState *state, BlockNumber childblock, ScanKey targetkey); +static void bt_downlink_missing_check(BtreeCheckState *state); +static void bt_tuple_present_callback(Relation index, HeapTuple htup, + Datum *values, bool *isnull, + bool tupleIsAlive, void *checkstate); static inline bool offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset); static inline bool invariant_leq_offset(BtreeCheckState *state, @@ -116,37 +147,47 @@ static inline bool invariant_leq_nontarget_offset(BtreeCheckState *state, static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum); /* - * bt_index_check(index regclass) + * bt_index_check(index regclass, heapallindexed boolean) * * Verify integrity of B-Tree index. * * Acquires AccessShareLock on heap & index relations. Does not consider - * invariants that exist between parent/child pages. + * invariants that exist between parent/child pages. Optionally verifies + * that heap does not contain any unindexed or incorrectly indexed tuples. */ Datum bt_index_check(PG_FUNCTION_ARGS) { Oid indrelid = PG_GETARG_OID(0); + bool heapallindexed = false; - bt_index_check_internal(indrelid, false); + if (PG_NARGS() == 2) + heapallindexed = PG_GETARG_BOOL(1); + + bt_index_check_internal(indrelid, false, heapallindexed); PG_RETURN_VOID(); } /* - * bt_index_parent_check(index regclass) + * bt_index_parent_check(index regclass, heapallindexed boolean) * * Verify integrity of B-Tree index. * * Acquires ShareLock on heap & index relations. Verifies that downlinks in - * parent pages are valid lower bounds on child pages. + * parent pages are valid lower bounds on child pages. Optionally verifies + * that heap does not contain any unindexed or incorrectly indexed tuples. */ Datum bt_index_parent_check(PG_FUNCTION_ARGS) { Oid indrelid = PG_GETARG_OID(0); + bool heapallindexed = false; + + if (PG_NARGS() == 2) + heapallindexed = PG_GETARG_BOOL(1); - bt_index_check_internal(indrelid, true); + bt_index_check_internal(indrelid, true, heapallindexed); PG_RETURN_VOID(); } @@ -155,7 +196,7 @@ bt_index_parent_check(PG_FUNCTION_ARGS) * Helper for bt_index_[parent_]check, coordinating the bulk of the work. */ static void -bt_index_check_internal(Oid indrelid, bool parentcheck) +bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed) { Oid heapid; Relation indrel; @@ -185,15 +226,20 @@ bt_index_check_internal(Oid indrelid, bool parentcheck) * Open the target index relations separately (like relation_openrv(), but * with heap relation locked first to prevent deadlocking). In hot * standby mode this will raise an error when parentcheck is true. + * + * There is no need for the usual indcheckxmin usability horizon test + * here, even in the heapallindexed case, because index undergoing + * verification only needs to have entries for a new transaction snapshot. + * (If this is a parentcheck verification, there is no question about + * committed or recently dead heap tuples lacking index entries due to + * concurrent activity.) */ indrel = index_open(indrelid, lockmode); /* * Since we did the IndexGetRelation call above without any lock, it's * barely possible that a race against an index drop/recreation could have - * netted us the wrong table. Although the table itself won't actually be - * examined during verification currently, a recheck still seems like a - * good idea. + * netted us the wrong table. */ if (heaprel == NULL || heapid != IndexGetRelation(indrelid, false)) ereport(ERROR, @@ -204,8 +250,8 @@ bt_index_check_internal(Oid indrelid, bool parentcheck) /* Relation suitable for checking as B-Tree? */ btree_index_checkable(indrel); - /* Check index */ - bt_check_every_level(indrel, parentcheck); + /* Check index, possibly against table it is an index on */ + bt_check_every_level(indrel, heaprel, parentcheck, heapallindexed); /* * Release locks early. That's ok here because nothing in the called @@ -253,11 +299,14 @@ btree_index_checkable(Relation rel) /* * Main entry point for B-Tree SQL-callable functions. Walks the B-Tree in - * logical order, verifying invariants as it goes. + * logical order, verifying invariants as it goes. Optionally, verification + * checks if the heap relation contains any tuples that are not represented in + * the index but should be. * * It is the caller's responsibility to acquire appropriate heavyweight lock on * the index relation, and advise us if extra checks are safe when a ShareLock - * is held. + * is held. (A lock of the same type must also have been acquired on the heap + * relation.) * * A ShareLock is generally assumed to prevent any kind of physical * modification to the index structure, including modifications that VACUUM may @@ -272,13 +321,15 @@ btree_index_checkable(Relation rel) * parent/child check cannot be affected.) */ static void -bt_check_every_level(Relation rel, bool readonly) +bt_check_every_level(Relation rel, Relation heaprel, bool readonly, + bool heapallindexed) { BtreeCheckState *state; Page metapage; BTMetaPageData *metad; uint32 previouslevel; BtreeLevel current; + Snapshot snapshot = SnapshotAny; /* * RecentGlobalXmin assertion matches index_getnext_tid(). See note on @@ -289,15 +340,83 @@ bt_check_every_level(Relation rel, bool readonly) /* * Initialize state for entire verification operation */ - state = palloc(sizeof(BtreeCheckState)); + state = palloc0(sizeof(BtreeCheckState)); state->rel = rel; + state->heaprel = heaprel; state->readonly = readonly; + state->heapallindexed = heapallindexed; + + if (state->heapallindexed) + { + int64 total_elems; + uint64 seed; + + /* Size Bloom filter based on estimated number of tuples in index */ + total_elems = (int64) state->rel->rd_rel->reltuples; + /* Random seed relies on backend srandom() call to avoid repetition */ + seed = random(); + /* Create Bloom filter to fingerprint index */ + state->filter = bloom_create(total_elems, maintenance_work_mem, seed); + state->heaptuplespresent = 0; + + /* + * Register our own snapshot in !readonly case, rather than asking + * IndexBuildHeapScan() to do this for us later. This needs to happen + * before index fingerprinting begins, so we can later be certain that + * index fingerprinting should have reached all tuples returned by + * IndexBuildHeapScan(). + * + * In readonly case, we also check for problems with missing + * downlinks. A second Bloom filter is used for this. + */ + if (!state->readonly) + { + snapshot = RegisterSnapshot(GetTransactionSnapshot()); + + /* + * GetTransactionSnapshot() always acquires a new MVCC snapshot in + * READ COMMITTED mode. A new snapshot is guaranteed to have all + * the entries it requires in the index. + * + * We must defend against the possibility that an old xact + * snapshot was returned at higher isolation levels when that + * snapshot is not safe for index scans of the target index. This + * is possible when the snapshot sees tuples that are before the + * index's indcheckxmin horizon. Throwing an error here should be + * very rare. It doesn't seem worth using a secondary snapshot to + * avoid this. + */ + if (IsolationUsesXactSnapshot() && rel->rd_index->indcheckxmin && + !TransactionIdPrecedes(HeapTupleHeaderGetXmin(rel->rd_indextuple->t_data), + snapshot->xmin)) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("index \"%s\" cannot be verified using transaction snapshot", + RelationGetRelationName(rel)))); + } + else + { + int64 total_pages; + + /* + * Extra readonly downlink check. + * + * In readonly case, we know that there cannot be a concurrent + * page split or a concurrent page deletion, which gives us the + * opportunity to verify that every non-ignorable page had a + * downlink one level up. We must be tolerant of interrupted page + * splits and page deletions, though. This is taken care of in + * bt_downlink_missing_check(). + */ + total_pages = (int64) state->rel->rd_rel->relpages; + state->downlinkfilter = bloom_create(total_pages, work_mem, seed); + } + } + /* Create context for page */ state->targetcontext = AllocSetContextCreate(CurrentMemoryContext, "amcheck context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); state->checkstrategy = GetAccessStrategy(BAS_BULKREAD); /* Get true root block from meta-page */ @@ -332,6 +451,12 @@ bt_check_every_level(Relation rel, bool readonly) current.istruerootlevel = true; while (current.leftmost != P_NONE) { + /* + * Leftmost page on level cannot be right half of incomplete split. + * This can go stale immediately in !readonly case. + */ + state->rightsplit = false; + /* * Verify this level, and get left most page for next level down, if * not at leaf level @@ -347,6 +472,79 @@ bt_check_every_level(Relation rel, bool readonly) previouslevel = current.level; } + /* + * * Check whether heap contains unindexed/malformed tuples * + */ + if (state->heapallindexed) + { + IndexInfo *indexinfo = BuildIndexInfo(state->rel); + HeapScanDesc scan; + + /* Report on extra downlink checks performed in readonly case */ + if (state->readonly) + { + ereport(DEBUG1, + (errmsg_internal("finished verifying presence of downlink blocks within index \"%s\" with bitset %.2f%% set", + RelationGetRelationName(rel), + 100.0 * bloom_prop_bits_set(state->downlinkfilter)))); + bloom_free(state->downlinkfilter); + } + + /* + * Create our own scan for IndexBuildHeapScan(), rather than getting + * it to do so for us. This is required so that we can actually use + * the MVCC snapshot registered earlier in !readonly case. + * + * Note that IndexBuildHeapScan() calls heap_endscan() for us. + */ + scan = heap_beginscan_strat(state->heaprel, /* relation */ + snapshot, /* snapshot */ + 0, /* number of keys */ + NULL, /* scan key */ + true, /* buffer access strategy OK */ + true); /* syncscan OK? */ + + /* + * Scan will behave as the first scan of a CREATE INDEX CONCURRENTLY + * behaves in !readonly case. + * + * It's okay that we don't actually use the same lock strength for the + * heap relation as any other ii_Concurrent caller would in !readonly + * case. We have no reason to care about a concurrent VACUUM + * operation, since there isn't going to be a second scan of the heap + * that needs to be sure that there was no concurrent recycling of + * TIDs. + */ + indexinfo->ii_Concurrent = !state->readonly; + + /* + * Don't wait for uncommitted tuple xact commit/abort when index is a + * unique index on a catalog (or an index used by an exclusion + * constraint). This could otherwise happen in the readonly case. + */ + indexinfo->ii_Unique = false; + indexinfo->ii_ExclusionOps = NULL; + indexinfo->ii_ExclusionProcs = NULL; + indexinfo->ii_ExclusionStrats = NULL; + + elog(DEBUG1, "verifying that tuples from index \"%s\" are present in \"%s\"", + RelationGetRelationName(state->rel), + RelationGetRelationName(state->heaprel)); + + IndexBuildHeapScan(state->heaprel, state->rel, indexinfo, true, + bt_tuple_present_callback, (void *) state, scan); + + ereport(DEBUG1, + (errmsg_internal("finished verifying presence of " INT64_FORMAT " tuples from table \"%s\" with bitset %.2f%% set", + state->heaptuplespresent, RelationGetRelationName(heaprel), + 100.0 * bloom_prop_bits_set(state->filter)))); + + if (snapshot != SnapshotAny) + UnregisterSnapshot(snapshot); + + bloom_free(state->filter); + } + /* Be tidy: */ MemoryContextDelete(state->targetcontext); } @@ -407,6 +605,25 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level) if (P_IGNORE(opaque)) { + /* + * Since there cannot be a concurrent VACUUM operation in readonly + * mode, and since a page has no links within other pages + * (siblings and parent) once it is marked fully deleted, it + * should be impossible to land on a fully deleted page in + * readonly mode. See bt_downlink_check() for further details. + * + * The bt_downlink_check() P_ISDELETED() check is repeated here so + * that pages that are only reachable through sibling links get + * checked. + */ + if (state->readonly && P_ISDELETED(opaque)) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("downlink or sibling link points to deleted block in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Block=%u left block=%u left link from block=%u.", + current, leftcurrent, opaque->btpo_prev))); + if (P_RIGHTMOST(opaque)) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), @@ -460,7 +677,7 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level) /* Internal page -- downlink gets leftmost on next level */ itemid = PageGetItemId(state->target, P_FIRSTDATAKEY(opaque)); itup = (IndexTuple) PageGetItem(state->target, itemid); - nextleveldown.leftmost = ItemPointerGetBlockNumber(&(itup->t_tid)); + nextleveldown.leftmost = BTreeInnerTupleGetDownLink(itup); nextleveldown.level = opaque->btpo.level - 1; } else @@ -482,6 +699,10 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level) */ } + /* + * readonly mode can only ever land on live pages and half-dead pages, + * so sibling pointers should always be in mutual agreement + */ if (state->readonly && opaque->btpo_prev != leftcurrent) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), @@ -499,7 +720,7 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level) errdetail_internal("Block pointed to=%u expected level=%u level in pointed to block=%u.", current, level.level, opaque->btpo.level))); - /* Verify invariants for page -- all important checks occur here */ + /* Verify invariants for page */ bt_target_page_check(state); nextpage: @@ -511,6 +732,13 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level) errmsg("circular link chain found in block %u of index \"%s\"", current, RelationGetRelationName(state->rel)))); + /* + * Record if page that is about to become target is the right half of + * an incomplete page split. This can go stale immediately in + * !readonly case. + */ + state->rightsplit = P_INCOMPLETE_SPLIT(opaque); + leftcurrent = current; current = opaque->btpo_next; @@ -541,11 +769,19 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level) * "real" data item on the page to the right (if such a first item is * available). * - * Furthermore, when state passed shows ShareLock held, and target page is - * internal page, function also checks: + * - That tuples report that they have the expected number of attributes. + * INCLUDE index pivot tuples should not contain non-key attributes. + * + * Furthermore, when state passed shows ShareLock held, function also checks: * * - That all child pages respect downlinks lower bound. * + * - That downlink to block was encountered in parent where that's expected. + * (Limited to heapallindexed readonly callers.) + * + * This is also where heapallindexed callers use their Bloom filter to + * fingerprint IndexTuples for later IndexBuildHeapScan() verification. + * * Note: Memory allocated in this routine is expected to be released by caller * resetting state->targetcontext. */ @@ -562,10 +798,35 @@ bt_target_page_check(BtreeCheckState *state) elog(DEBUG2, "verifying %u items on %s block %u", max, P_ISLEAF(topaque) ? "leaf" : "internal", state->targetblock); + /* + * Check the number of attributes in high key. Note, rightmost page + * doesn't contain a high key, so nothing to check + */ + if (!P_RIGHTMOST(topaque) && + !_bt_check_natts(state->rel, state->target, P_HIKEY)) + { + ItemId itemid; + IndexTuple itup; + + itemid = PageGetItemId(state->target, P_HIKEY); + itup = (IndexTuple) PageGetItem(state->target, itemid); + + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("wrong number of high key index tuple attributes in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Index block=%u natts=%u block type=%s page lsn=%X/%X.", + state->targetblock, + BTreeTupleGetNAtts(itup, state->rel), + P_ISLEAF(topaque) ? "heap" : "index", + (uint32) (state->targetlsn >> 32), + (uint32) state->targetlsn))); + } + /* * Loop over page items, starting from first non-highkey item, not high - * key (if any). Also, immediately skip "negative infinity" real item (if - * any). + * key (if any). Most tests are not performed for the "negative infinity" + * real item (if any). */ for (offset = P_FIRSTDATAKEY(topaque); offset <= max; @@ -574,21 +835,80 @@ bt_target_page_check(BtreeCheckState *state) ItemId itemid; IndexTuple itup; ScanKey skey; + size_t tupsize; CHECK_FOR_INTERRUPTS(); + itemid = PageGetItemId(state->target, offset); + itup = (IndexTuple) PageGetItem(state->target, itemid); + tupsize = IndexTupleSize(itup); + /* - * Don't try to generate scankey using "negative infinity" garbage - * data + * lp_len should match the IndexTuple reported length exactly, since + * lp_len is completely redundant in indexes, and both sources of + * tuple length are MAXALIGN()'d. nbtree does not use lp_len all that + * frequently, and is surprisingly tolerant of corrupt lp_len fields. + */ + if (tupsize != ItemIdGetLength(itemid)) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("index tuple size does not equal lp_len in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Index tid=(%u,%u) tuple size=%zu lp_len=%u page lsn=%X/%X.", + state->targetblock, offset, + tupsize, ItemIdGetLength(itemid), + (uint32) (state->targetlsn >> 32), + (uint32) state->targetlsn), + errhint("This could be a torn page problem."))); + + /* Check the number of index tuple attributes */ + if (!_bt_check_natts(state->rel, state->target, offset)) + { + char *itid, + *htid; + + itid = psprintf("(%u,%u)", state->targetblock, offset); + htid = psprintf("(%u,%u)", + ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)), + ItemPointerGetOffsetNumberNoCheck(&(itup->t_tid))); + + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("wrong number of index tuple attributes in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Index tid=%s natts=%u points to %s tid=%s page lsn=%X/%X.", + itid, + BTreeTupleGetNAtts(itup, state->rel), + P_ISLEAF(topaque) ? "heap" : "index", + htid, + (uint32) (state->targetlsn >> 32), + (uint32) state->targetlsn))); + } + + /* Fingerprint downlink blocks in heapallindexed + readonly case */ + if (state->heapallindexed && state->readonly && !P_ISLEAF(topaque)) + { + BlockNumber childblock = BTreeInnerTupleGetDownLink(itup); + + bloom_add_element(state->downlinkfilter, + (unsigned char *) &childblock, + sizeof(BlockNumber)); + } + + /* + * Don't try to generate scankey using "negative infinity" item on + * internal pages. They are always truncated to zero attributes. */ if (offset_is_negative_infinity(topaque, offset)) continue; /* Build insertion scankey for current page offset */ - itemid = PageGetItemId(state->target, offset); - itup = (IndexTuple) PageGetItem(state->target, itemid); skey = _bt_mkscankey(state->rel, itup); + /* Fingerprint leaf page tuples (those that point to the heap) */ + if (state->heapallindexed && P_ISLEAF(topaque) && !ItemIdIsDead(itemid)) + bloom_add_element(state->filter, (unsigned char *) itup, tupsize); + /* * * High key check * * @@ -617,8 +937,8 @@ bt_target_page_check(BtreeCheckState *state) itid = psprintf("(%u,%u)", state->targetblock, offset); htid = psprintf("(%u,%u)", - ItemPointerGetBlockNumber(&(itup->t_tid)), - ItemPointerGetOffsetNumber(&(itup->t_tid))); + ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)), + ItemPointerGetOffsetNumberNoCheck(&(itup->t_tid))); ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), @@ -649,8 +969,8 @@ bt_target_page_check(BtreeCheckState *state) itid = psprintf("(%u,%u)", state->targetblock, offset); htid = psprintf("(%u,%u)", - ItemPointerGetBlockNumber(&(itup->t_tid)), - ItemPointerGetOffsetNumber(&(itup->t_tid))); + ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)), + ItemPointerGetOffsetNumberNoCheck(&(itup->t_tid))); nitid = psprintf("(%u,%u)", state->targetblock, OffsetNumberNext(offset)); @@ -658,8 +978,8 @@ bt_target_page_check(BtreeCheckState *state) itemid = PageGetItemId(state->target, OffsetNumberNext(offset)); itup = (IndexTuple) PageGetItem(state->target, itemid); nhtid = psprintf("(%u,%u)", - ItemPointerGetBlockNumber(&(itup->t_tid)), - ItemPointerGetOffsetNumber(&(itup->t_tid))); + ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)), + ItemPointerGetOffsetNumberNoCheck(&(itup->t_tid))); ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), @@ -682,8 +1002,10 @@ bt_target_page_check(BtreeCheckState *state) * * Last item check * * * Check last item against next/right page's first data item's when - * last item on page is reached. This additional check can detect - * transposed pages. + * last item on page is reached. This additional check will detect + * transposed pages iff the supposed right sibling page happens to + * belong before target in the key space. (Otherwise, a subsequent + * heap verification will probably detect the problem.) * * This check is similar to the item order check that will have * already been performed for every other "real" item on target page @@ -745,11 +1067,19 @@ bt_target_page_check(BtreeCheckState *state) */ if (!P_ISLEAF(topaque) && state->readonly) { - BlockNumber childblock = ItemPointerGetBlockNumber(&(itup->t_tid)); + BlockNumber childblock = BTreeInnerTupleGetDownLink(itup); bt_downlink_check(state, childblock, skey); } } + + /* + * * Check if page has a downlink in parent * + * + * This can only be checked in heapallindexed + readonly case. + */ + if (state->heapallindexed && state->readonly) + bt_downlink_missing_check(state); } /* @@ -1033,6 +1363,40 @@ bt_downlink_check(BtreeCheckState *state, BlockNumber childblock, copaque = (BTPageOpaque) PageGetSpecialPointer(child); maxoffset = PageGetMaxOffsetNumber(child); + /* + * Since there cannot be a concurrent VACUUM operation in readonly mode, + * and since a page has no links within other pages (siblings and parent) + * once it is marked fully deleted, it should be impossible to land on a + * fully deleted page. + * + * It does not quite make sense to enforce that the page cannot even be + * half-dead, despite the fact the downlink is modified at the same stage + * that the child leaf page is marked half-dead. That's incorrect because + * there may occasionally be multiple downlinks from a chain of pages + * undergoing deletion, where multiple successive calls are made to + * _bt_unlink_halfdead_page() by VACUUM before it can finally safely mark + * the leaf page as fully dead. While _bt_mark_page_halfdead() usually + * removes the downlink to the leaf page that is marked half-dead, that's + * not guaranteed, so it's possible we'll land on a half-dead page with a + * downlink due to an interrupted multi-level page deletion. + * + * We go ahead with our checks if the child page is half-dead. It's safe + * to do so because we do not test the child's high key, so it does not + * matter that the original high key will have been replaced by a dummy + * truncated high key within _bt_mark_page_halfdead(). All other page + * items are left intact on a half-dead page, so there is still something + * to test. + */ + if (P_ISDELETED(copaque)) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("downlink to deleted page found in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Parent block=%u child block=%u parent page lsn=%X/%X.", + state->targetblock, childblock, + (uint32) (state->targetlsn >> 32), + (uint32) state->targetlsn))); + for (offset = P_FIRSTDATAKEY(copaque); offset <= maxoffset; offset = OffsetNumberNext(offset)) @@ -1061,6 +1425,296 @@ bt_downlink_check(BtreeCheckState *state, BlockNumber childblock, pfree(child); } +/* + * Checks if page is missing a downlink that it should have. + * + * A page that lacks a downlink/parent may indicate corruption. However, we + * must account for the fact that a missing downlink can occasionally be + * encountered in a non-corrupt index. This can be due to an interrupted page + * split, or an interrupted multi-level page deletion (i.e. there was a hard + * crash or an error during a page split, or while VACUUM was deleting a + * multi-level chain of pages). + * + * Note that this can only be called in readonly mode, so there is no need to + * be concerned about concurrent page splits or page deletions. + */ +static void +bt_downlink_missing_check(BtreeCheckState *state) +{ + BTPageOpaque topaque = (BTPageOpaque) PageGetSpecialPointer(state->target); + ItemId itemid; + IndexTuple itup; + Page child; + BTPageOpaque copaque; + uint32 level; + BlockNumber childblk; + + Assert(state->heapallindexed && state->readonly); + Assert(!P_IGNORE(topaque)); + + /* No next level up with downlinks to fingerprint from the true root */ + if (P_ISROOT(topaque)) + return; + + /* + * Incomplete (interrupted) page splits can account for the lack of a + * downlink. Some inserting transaction should eventually complete the + * page split in passing, when it notices that the left sibling page is + * P_INCOMPLETE_SPLIT(). + * + * In general, VACUUM is not prepared for there to be no downlink to a + * page that it deletes. This is the main reason why the lack of a + * downlink can be reported as corruption here. It's not obvious that an + * invalid missing downlink can result in wrong answers to queries, + * though, since index scans that land on the child may end up + * consistently moving right. The handling of concurrent page splits (and + * page deletions) within _bt_moveright() cannot distinguish + * inconsistencies that last for a moment from inconsistencies that are + * permanent and irrecoverable. + * + * VACUUM isn't even prepared to delete pages that have no downlink due to + * an incomplete page split, but it can detect and reason about that case + * by design, so it shouldn't be taken to indicate corruption. See + * _bt_pagedel() for full details. + */ + if (state->rightsplit) + { + ereport(DEBUG1, + (errcode(ERRCODE_NO_DATA), + errmsg("harmless interrupted page split detected in index %s", + RelationGetRelationName(state->rel)), + errdetail_internal("Block=%u level=%u left sibling=%u page lsn=%X/%X.", + state->targetblock, topaque->btpo.level, + topaque->btpo_prev, + (uint32) (state->targetlsn >> 32), + (uint32) state->targetlsn))); + return; + } + + /* Target's downlink is typically present in parent/fingerprinted */ + if (!bloom_lacks_element(state->downlinkfilter, + (unsigned char *) &state->targetblock, + sizeof(BlockNumber))) + return; + + /* + * Target is probably the "top parent" of a multi-level page deletion. + * We'll need to descend the subtree to make sure that descendant pages + * are consistent with that, though. + * + * If the target page (which must be non-ignorable) is a leaf page, then + * clearly it can't be the top parent. The lack of a downlink is probably + * a symptom of a broad problem that could just as easily cause + * inconsistencies anywhere else. + */ + if (P_ISLEAF(topaque)) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("leaf index block lacks downlink in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Block=%u page lsn=%X/%X.", + state->targetblock, + (uint32) (state->targetlsn >> 32), + (uint32) state->targetlsn))); + + /* Descend from the target page, which is an internal page */ + elog(DEBUG1, "checking for interrupted multi-level deletion due to missing downlink in index \"%s\"", + RelationGetRelationName(state->rel)); + + level = topaque->btpo.level; + itemid = PageGetItemId(state->target, P_FIRSTDATAKEY(topaque)); + itup = (IndexTuple) PageGetItem(state->target, itemid); + childblk = BTreeInnerTupleGetDownLink(itup); + for (;;) + { + CHECK_FOR_INTERRUPTS(); + + child = palloc_btree_page(state, childblk); + copaque = (BTPageOpaque) PageGetSpecialPointer(child); + + if (P_ISLEAF(copaque)) + break; + + /* Do an extra sanity check in passing on internal pages */ + if (copaque->btpo.level != level - 1) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg_internal("downlink points to block in index \"%s\" whose level is not one level down", + RelationGetRelationName(state->rel)), + errdetail_internal("Top parent/target block=%u block pointed to=%u expected level=%u level in pointed to block=%u.", + state->targetblock, childblk, + level - 1, copaque->btpo.level))); + + level = copaque->btpo.level; + itemid = PageGetItemId(child, P_FIRSTDATAKEY(copaque)); + itup = (IndexTuple) PageGetItem(child, itemid); + childblk = BTreeInnerTupleGetDownLink(itup); + /* Be slightly more pro-active in freeing this memory, just in case */ + pfree(child); + } + + /* + * Since there cannot be a concurrent VACUUM operation in readonly mode, + * and since a page has no links within other pages (siblings and parent) + * once it is marked fully deleted, it should be impossible to land on a + * fully deleted page. See bt_downlink_check() for further details. + * + * The bt_downlink_check() P_ISDELETED() check is repeated here because + * bt_downlink_check() does not visit pages reachable through negative + * infinity items. Besides, bt_downlink_check() is unwilling to descend + * multiple levels. (The similar bt_downlink_check() P_ISDELETED() check + * within bt_check_level_from_leftmost() won't reach the page either, + * since the leaf's live siblings should have their sibling links updated + * to bypass the deletion target page when it is marked fully dead.) + * + * If this error is raised, it might be due to a previous multi-level page + * deletion that failed to realize that it wasn't yet safe to mark the + * leaf page as fully dead. A "dangling downlink" will still remain when + * this happens. The fact that the dangling downlink's page (the leaf's + * parent/ancestor page) lacked a downlink is incidental. + */ + if (P_ISDELETED(copaque)) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg_internal("downlink to deleted leaf page found in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Top parent/target block=%u leaf block=%u top parent/target lsn=%X/%X.", + state->targetblock, childblk, + (uint32) (state->targetlsn >> 32), + (uint32) state->targetlsn))); + + /* + * Iff leaf page is half-dead, its high key top parent link should point + * to what VACUUM considered to be the top parent page at the instant it + * was interrupted. Provided the high key link actually points to the + * target page, the missing downlink we detected is consistent with there + * having been an interrupted multi-level page deletion. This means that + * the subtree with the target page at its root (a page deletion chain) is + * in a consistent state, enabling VACUUM to resume deleting the entire + * chain the next time it encounters the half-dead leaf page. + */ + if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque)) + { + itemid = PageGetItemId(child, P_HIKEY); + itup = (IndexTuple) PageGetItem(child, itemid); + if (BTreeTupleGetTopParent(itup) == state->targetblock) + return; + } + + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("internal index block lacks downlink in index \"%s\"", + RelationGetRelationName(state->rel)), + errdetail_internal("Block=%u level=%u page lsn=%X/%X.", + state->targetblock, topaque->btpo.level, + (uint32) (state->targetlsn >> 32), + (uint32) state->targetlsn))); +} + +/* + * Per-tuple callback from IndexBuildHeapScan, used to determine if index has + * all the entries that definitely should have been observed in leaf pages of + * the target index (that is, all IndexTuples that were fingerprinted by our + * Bloom filter). All heapallindexed checks occur here. + * + * The redundancy between an index and the table it indexes provides a good + * opportunity to detect corruption, especially corruption within the table. + * The high level principle behind the verification performed here is that any + * IndexTuple that should be in an index following a fresh CREATE INDEX (based + * on the same index definition) should also have been in the original, + * existing index, which should have used exactly the same representation + * + * Since the overall structure of the index has already been verified, the most + * likely explanation for error here is a corrupt heap page (could be logical + * or physical corruption). Index corruption may still be detected here, + * though. Only readonly callers will have verified that left links and right + * links are in agreement, and so it's possible that a leaf page transposition + * within index is actually the source of corruption detected here (for + * !readonly callers). The checks performed only for readonly callers might + * more accurately frame the problem as a cross-page invariant issue (this + * could even be due to recovery not replaying all WAL records). The !readonly + * ERROR message raised here includes a HINT about retrying with readonly + * verification, just in case it's a cross-page invariant issue, though that + * isn't particularly likely. + * + * IndexBuildHeapScan() expects to be able to find the root tuple when a + * heap-only tuple (the live tuple at the end of some HOT chain) needs to be + * indexed, in order to replace the actual tuple's TID with the root tuple's + * TID (which is what we're actually passed back here). The index build heap + * scan code will raise an error when a tuple that claims to be the root of the + * heap-only tuple's HOT chain cannot be located. This catches cases where the + * original root item offset/root tuple for a HOT chain indicates (for whatever + * reason) that the entire HOT chain is dead, despite the fact that the latest + * heap-only tuple should be indexed. When this happens, sequential scans may + * always give correct answers, and all indexes may be considered structurally + * consistent (i.e. the nbtree structural checks would not detect corruption). + * It may be the case that only index scans give wrong answers, and yet heap or + * SLRU corruption is the real culprit. (While it's true that LP_DEAD bit + * setting will probably also leave the index in a corrupt state before too + * long, the problem is nonetheless that there is heap corruption.) + * + * Heap-only tuple handling within IndexBuildHeapScan() works in a way that + * helps us to detect index tuples that contain the wrong values (values that + * don't match the latest tuple in the HOT chain). This can happen when there + * is no superseding index tuple due to a faulty assessment of HOT safety, + * perhaps during the original CREATE INDEX. Because the latest tuple's + * contents are used with the root TID, an error will be raised when a tuple + * with the same TID but non-matching attribute values is passed back to us. + * Faulty assessment of HOT-safety was behind at least two distinct CREATE + * INDEX CONCURRENTLY bugs that made it into stable releases, one of which was + * undetected for many years. In short, the same principle that allows a + * REINDEX to repair corruption when there was an (undetected) broken HOT chain + * also allows us to detect the corruption in many cases. + */ +static void +bt_tuple_present_callback(Relation index, HeapTuple htup, Datum *values, + bool *isnull, bool tupleIsAlive, void *checkstate) +{ + BtreeCheckState *state = (BtreeCheckState *) checkstate; + IndexTuple itup; + + Assert(state->heapallindexed); + + /* + * Generate an index tuple for fingerprinting. + * + * Index tuple formation is assumed to be deterministic, and IndexTuples + * are assumed immutable. While the LP_DEAD bit is mutable in leaf pages, + * that's ItemId metadata, which was not fingerprinted. (There will often + * be some dead-to-everyone IndexTuples fingerprinted by the Bloom filter, + * but we only try to detect the absence of needed tuples, so that's + * okay.) + * + * Note that we rely on deterministic index_form_tuple() TOAST + * compression. If index_form_tuple() was ever enhanced to compress datums + * out-of-line, or otherwise varied when or how compression was applied, + * our assumption would break, leading to false positive reports of + * corruption. It's also possible that non-pivot tuples could in the + * future have alternative equivalent representations (e.g. by using the + * INDEX_ALT_TID_MASK bit). For now, we don't decompress/normalize toasted + * values as part of fingerprinting. + */ + itup = index_form_tuple(RelationGetDescr(index), values, isnull); + itup->t_tid = htup->t_self; + + /* Probe Bloom filter -- tuple should be present */ + if (bloom_lacks_element(state->filter, (unsigned char *) itup, + IndexTupleSize(itup))) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("heap tuple (%u,%u) from table \"%s\" lacks matching index tuple within index \"%s\"", + ItemPointerGetBlockNumber(&(itup->t_tid)), + ItemPointerGetOffsetNumber(&(itup->t_tid)), + RelationGetRelationName(state->heaprel), + RelationGetRelationName(state->rel)), + !state->readonly + ? errhint("Retrying verification using the function bt_index_parent_check() might provide a more specific error.") + : 0)); + + state->heaptuplespresent++; + pfree(itup); +} + /* * Is particular offset within page (whose special state is passed by caller) * the page negative-infinity item? @@ -1081,6 +1735,10 @@ offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset) * infinity item is either first or second line item, or there is none * within page. * + * Negative infinity items are a special case among pivot tuples. They + * always have zero attributes, while all other pivot tuples always have + * nkeyatts attributes. + * * Right-most pages don't have a high key, but could be said to * conceptually have a "positive infinity" high key. Thus, there is a * symmetry between down link items in parent pages, and high keys in @@ -1104,10 +1762,10 @@ static inline bool invariant_leq_offset(BtreeCheckState *state, ScanKey key, OffsetNumber upperbound) { - int16 natts = state->rel->rd_rel->relnatts; + int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(state->rel); int32 cmp; - cmp = _bt_compare(state->rel, natts, key, state->target, upperbound); + cmp = _bt_compare(state->rel, nkeyatts, key, state->target, upperbound); return cmp <= 0; } @@ -1123,10 +1781,10 @@ static inline bool invariant_geq_offset(BtreeCheckState *state, ScanKey key, OffsetNumber lowerbound) { - int16 natts = state->rel->rd_rel->relnatts; + int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(state->rel); int32 cmp; - cmp = _bt_compare(state->rel, natts, key, state->target, lowerbound); + cmp = _bt_compare(state->rel, nkeyatts, key, state->target, lowerbound); return cmp >= 0; } @@ -1146,10 +1804,10 @@ invariant_leq_nontarget_offset(BtreeCheckState *state, Page nontarget, ScanKey key, OffsetNumber upperbound) { - int16 natts = state->rel->rd_rel->relnatts; + int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(state->rel); int32 cmp; - cmp = _bt_compare(state->rel, natts, key, nontarget, upperbound); + cmp = _bt_compare(state->rel, nkeyatts, key, nontarget, upperbound); return cmp <= 0; } @@ -1172,6 +1830,7 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum) Buffer buffer; Page page; BTPageOpaque opaque; + OffsetNumber maxoffset; page = palloc(BLCKSZ); @@ -1195,7 +1854,7 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum) opaque = (BTPageOpaque) PageGetSpecialPointer(page); - if (opaque->btpo_flags & BTP_META && blocknum != BTREE_METAPAGE) + if (P_ISMETA(opaque) && blocknum != BTREE_METAPAGE) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("invalid meta page found at block %u in index \"%s\"", @@ -1206,19 +1865,25 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum) { BTMetaPageData *metad = BTPageGetMeta(page); - if (!(opaque->btpo_flags & BTP_META) || + if (!P_ISMETA(opaque) || metad->btm_magic != BTREE_MAGIC) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index \"%s\" meta page is corrupt", RelationGetRelationName(state->rel)))); - if (metad->btm_version != BTREE_VERSION) + if (metad->btm_version < BTREE_MIN_VERSION || + metad->btm_version > BTREE_VERSION) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("version mismatch in index \"%s\": file version %d, code version %d", + errmsg("version mismatch in index \"%s\": file version %d, " + "current version %d, minimum supported version %d", RelationGetRelationName(state->rel), - metad->btm_version, BTREE_VERSION))); + metad->btm_version, BTREE_VERSION, + BTREE_MIN_VERSION))); + + /* Finished with metapage checks */ + return page; } /* @@ -1231,12 +1896,66 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum) errmsg("invalid leaf page level %u for block %u in index \"%s\"", opaque->btpo.level, blocknum, RelationGetRelationName(state->rel)))); - if (blocknum != BTREE_METAPAGE && !P_ISLEAF(opaque) && - !P_ISDELETED(opaque) && opaque->btpo.level == 0) + if (!P_ISLEAF(opaque) && !P_ISDELETED(opaque) && + opaque->btpo.level == 0) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("invalid internal page level 0 for block %u in index \"%s\"", - opaque->btpo.level, RelationGetRelationName(state->rel)))); + blocknum, RelationGetRelationName(state->rel)))); + + /* + * Sanity checks for number of items on page. + * + * As noted at the beginning of _bt_binsrch(), an internal page must have + * children, since there must always be a negative infinity downlink + * (there may also be a highkey). In the case of non-rightmost leaf + * pages, there must be at least a highkey. + * + * This is correct when pages are half-dead, since internal pages are + * never half-dead, and leaf pages must have a high key when half-dead + * (the rightmost page can never be deleted). It's also correct with + * fully deleted pages: _bt_unlink_halfdead_page() doesn't change anything + * about the target page other than setting the page as fully dead, and + * setting its xact field. In particular, it doesn't change the sibling + * links in the deletion target itself, since they're required when index + * scans land on the deletion target, and then need to move right (or need + * to move left, in the case of backward index scans). + */ + maxoffset = PageGetMaxOffsetNumber(page); + if (maxoffset > MaxIndexTuplesPerPage) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("Number of items on block %u of index \"%s\" exceeds MaxIndexTuplesPerPage (%u)", + blocknum, RelationGetRelationName(state->rel), + MaxIndexTuplesPerPage))); + + if (!P_ISLEAF(opaque) && maxoffset < P_FIRSTDATAKEY(opaque)) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("internal block %u in index \"%s\" lacks high key and/or at least one downlink", + blocknum, RelationGetRelationName(state->rel)))); + + if (P_ISLEAF(opaque) && !P_RIGHTMOST(opaque) && maxoffset < P_HIKEY) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("non-rightmost leaf block %u in index \"%s\" lacks high key item", + blocknum, RelationGetRelationName(state->rel)))); + + /* + * In general, internal pages are never marked half-dead, except on + * versions of Postgres prior to 9.4, where it can be valid transient + * state. This state is nonetheless treated as corruption by VACUUM on + * from version 9.4 on, so do the same here. See _bt_pagedel() for full + * details. + * + * Internal pages should never have garbage items, either. + */ + if (!P_ISLEAF(opaque) && P_ISHALFDEAD(opaque)) + ereport(ERROR, + (errcode(ERRCODE_INDEX_CORRUPTED), + errmsg("internal page block %u in index \"%s\" is half-dead", + blocknum, RelationGetRelationName(state->rel)), + errhint("This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it."))); if (!P_ISLEAF(opaque) && P_HAS_GARBAGE(opaque)) ereport(ERROR, diff --git a/contrib/auth_delay/auth_delay.c b/contrib/auth_delay/auth_delay.c index cd12a86b99..ad047b365f 100644 --- a/contrib/auth_delay/auth_delay.c +++ b/contrib/auth_delay/auth_delay.c @@ -2,7 +2,7 @@ * * auth_delay.c * - * Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Copyright (c) 2010-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/auth_delay/auth_delay.c diff --git a/contrib/auto_explain/auto_explain.c b/contrib/auto_explain/auto_explain.c index edcb91542a..646cd0d42c 100644 --- a/contrib/auto_explain/auto_explain.c +++ b/contrib/auto_explain/auto_explain.c @@ -3,7 +3,7 @@ * auto_explain.c * * - * Copyright (c) 2008-2017, PostgreSQL Global Development Group + * Copyright (c) 2008-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/auto_explain/auto_explain.c @@ -16,6 +16,7 @@ #include "commands/explain.h" #include "executor/instrument.h" +#include "jit/jit.h" #include "utils/guc.h" PG_MODULE_MAGIC; @@ -28,6 +29,7 @@ static bool auto_explain_log_buffers = false; static bool auto_explain_log_triggers = false; static bool auto_explain_log_timing = true; static int auto_explain_log_format = EXPLAIN_FORMAT_TEXT; +static int auto_explain_log_level = LOG; static bool auto_explain_log_nested_statements = false; static double auto_explain_sample_rate = 1; @@ -39,6 +41,20 @@ static const struct config_enum_entry format_options[] = { {NULL, 0, false} }; +static const struct config_enum_entry loglevel_options[] = { + {"debug5", DEBUG5, false}, + {"debug4", DEBUG4, false}, + {"debug3", DEBUG3, false}, + {"debug2", DEBUG2, false}, + {"debug1", DEBUG1, false}, + {"debug", DEBUG2, true}, + {"info", INFO, false}, + {"notice", NOTICE, false}, + {"warning", WARNING, false}, + {"log", LOG, false}, + {NULL, 0, false} +}; + /* Current nesting depth of ExecutorRun calls */ static int nesting_level = 0; @@ -78,7 +94,7 @@ _PG_init(void) "Zero prints all plans. -1 turns this feature off.", &auto_explain_log_min_duration, -1, - -1, INT_MAX / 1000, + -1, INT_MAX, PGC_SUSET, GUC_UNIT_MS, NULL, @@ -141,6 +157,18 @@ _PG_init(void) NULL, NULL); + DefineCustomEnumVariable("auto_explain.log_level", + "Log level for the plan.", + NULL, + &auto_explain_log_level, + LOG, + loglevel_options, + PGC_SUSET, + 0, + NULL, + NULL, + NULL); + DefineCustomBoolVariable("auto_explain.log_nested_statements", "Log nested statements.", NULL, @@ -334,6 +362,8 @@ explain_ExecutorEnd(QueryDesc *queryDesc) ExplainPrintPlan(es, queryDesc); if (es->analyze && auto_explain_log_triggers) ExplainPrintTriggers(es, queryDesc); + if (es->costs) + ExplainPrintJITSummary(es, queryDesc); ExplainEndOutput(es); /* Remove last line break */ @@ -353,7 +383,7 @@ explain_ExecutorEnd(QueryDesc *queryDesc) * reported. This isn't ideal but trying to do it here would * often result in duplication. */ - ereport(LOG, + ereport(auto_explain_log_level, (errmsg("duration: %.3f ms plan:\n%s", msec, es->str->data), errhidestmt(true))); diff --git a/contrib/bloom/blcost.c b/contrib/bloom/blcost.c index ba39f627fd..fa0f17a217 100644 --- a/contrib/bloom/blcost.c +++ b/contrib/bloom/blcost.c @@ -3,7 +3,7 @@ * blcost.c * Cost estimate function for bloom indexes. * - * Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/bloom/blcost.c diff --git a/contrib/bloom/blinsert.c b/contrib/bloom/blinsert.c index 0d506e3c1a..9f223d3b2a 100644 --- a/contrib/bloom/blinsert.c +++ b/contrib/bloom/blinsert.c @@ -3,7 +3,7 @@ * blinsert.c * Bloom index build and insert functions. * - * Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/bloom/blinsert.c @@ -33,10 +33,11 @@ PG_MODULE_MAGIC; typedef struct { BloomState blstate; /* bloom index state */ + int64 indtuples; /* total number of tuples indexed */ MemoryContext tmpCtx; /* temporary memory context reset after each * tuple */ - char data[BLCKSZ]; /* cached page */ - int64 count; /* number of tuples in cached page */ + PGAlignedBlock data; /* cached page */ + int count; /* number of tuples in cached page */ } BloomBuildState; /* @@ -51,7 +52,7 @@ flushCachedPage(Relation index, BloomBuildState *buildstate) state = GenericXLogStart(index); page = GenericXLogRegisterBuffer(state, buffer, GENERIC_XLOG_FULL_IMAGE); - memcpy(page, buildstate->data, BLCKSZ); + memcpy(page, buildstate->data.data, BLCKSZ); GenericXLogFinish(state); UnlockReleaseBuffer(buffer); } @@ -62,8 +63,8 @@ flushCachedPage(Relation index, BloomBuildState *buildstate) static void initCachedPage(BloomBuildState *buildstate) { - memset(buildstate->data, 0, BLCKSZ); - BloomInitPage(buildstate->data, 0); + memset(buildstate->data.data, 0, BLCKSZ); + BloomInitPage(buildstate->data.data, 0); buildstate->count = 0; } @@ -83,7 +84,7 @@ bloomBuildCallback(Relation index, HeapTuple htup, Datum *values, itup = BloomFormTuple(&buildstate->blstate, &htup->t_self, values, isnull); /* Try to add next item to cached page */ - if (BloomPageAddItem(&buildstate->blstate, buildstate->data, itup)) + if (BloomPageAddItem(&buildstate->blstate, buildstate->data.data, itup)) { /* Next item was added successfully */ buildstate->count++; @@ -97,13 +98,19 @@ bloomBuildCallback(Relation index, HeapTuple htup, Datum *values, initCachedPage(buildstate); - if (!BloomPageAddItem(&buildstate->blstate, buildstate->data, itup)) + if (!BloomPageAddItem(&buildstate->blstate, buildstate->data.data, itup)) { /* We shouldn't be here since we're inserting to the empty page */ elog(ERROR, "could not add new bloom tuple to empty page"); } + + /* Next item was added successfully */ + buildstate->count++; } + /* Update total tuple count */ + buildstate->indtuples += 1; + MemoryContextSwitchTo(oldCtx); MemoryContextReset(buildstate->tmpCtx); } @@ -135,19 +142,18 @@ blbuild(Relation heap, Relation index, IndexInfo *indexInfo) /* Do the heap scan */ reltuples = IndexBuildHeapScan(heap, index, indexInfo, true, - bloomBuildCallback, (void *) &buildstate); + bloomBuildCallback, (void *) &buildstate, + NULL); - /* - * There are could be some items in cached page. Flush this page if - * needed. - */ + /* Flush last page if needed (it will be, unless heap was empty) */ if (buildstate.count > 0) flushCachedPage(index, &buildstate); MemoryContextDelete(buildstate.tmpCtx); result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult)); - result->heap_tuples = result->index_tuples = reltuples; + result->heap_tuples = reltuples; + result->index_tuples = buildstate.indtuples; return result; } @@ -175,7 +181,7 @@ blbuildempty(Relation index) smgrwrite(index->rd_smgr, INIT_FORKNUM, BLOOM_METAPAGE_BLKNO, (char *) metapage, true); log_newpage(&index->rd_smgr->smgr_rnode.node, INIT_FORKNUM, - BLOOM_METAPAGE_BLKNO, metapage, false); + BLOOM_METAPAGE_BLKNO, metapage, true); /* * An immediate sync is required even if we xlog'd the page, because the diff --git a/contrib/bloom/bloom.h b/contrib/bloom/bloom.h index f3df1af781..3973ac75e8 100644 --- a/contrib/bloom/bloom.h +++ b/contrib/bloom/bloom.h @@ -3,7 +3,7 @@ * bloom.h * Header for bloom index. * - * Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/bloom/bloom.h diff --git a/contrib/bloom/blscan.c b/contrib/bloom/blscan.c index b8fa2d0a71..2d81e38218 100644 --- a/contrib/bloom/blscan.c +++ b/contrib/bloom/blscan.c @@ -3,7 +3,7 @@ * blscan.c * Bloom index scan functions. * - * Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/bloom/blscan.c @@ -76,7 +76,7 @@ blendscan(IndexScanDesc scan) } /* - * Insert all matching tuples into to a bitmap. + * Insert all matching tuples into a bitmap. */ int64 blgetbitmap(IndexScanDesc scan, TIDBitmap *tbm) diff --git a/contrib/bloom/blutils.c b/contrib/bloom/blutils.c index f2eda67e0a..6b2b9e3742 100644 --- a/contrib/bloom/blutils.c +++ b/contrib/bloom/blutils.c @@ -3,7 +3,7 @@ * blutils.c * Bloom index utilities. * - * Portions Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2016-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1990-1993, Regents of the University of California * * IDENTIFICATION @@ -120,6 +120,7 @@ blhandler(PG_FUNCTION_ARGS) amroutine->amclusterable = false; amroutine->ampredlocks = false; amroutine->amcanparallel = false; + amroutine->amcaninclude = false; amroutine->amkeytype = InvalidOid; amroutine->ambuild = blbuild; diff --git a/contrib/bloom/blvacuum.c b/contrib/bloom/blvacuum.c index 2e060871b6..7530a664ab 100644 --- a/contrib/bloom/blvacuum.c +++ b/contrib/bloom/blvacuum.c @@ -3,7 +3,7 @@ * blvacuum.c * Bloom VACUUM functions. * - * Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/bloom/blvacuum.c @@ -125,7 +125,7 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, /* Is it empty page now? */ if (BloomPageGetMaxOffset(page) == 0) BloomPageSetDeleted(page); - /* Adjust pg_lower */ + /* Adjust pd_lower */ ((PageHeader) page)->pd_lower = (Pointer) itupPtr - page; /* Finish WAL-logging */ GenericXLogFinish(gxlogState); diff --git a/contrib/bloom/blvalidate.c b/contrib/bloom/blvalidate.c index cb75d23199..7235f12307 100644 --- a/contrib/bloom/blvalidate.c +++ b/contrib/bloom/blvalidate.c @@ -3,7 +3,7 @@ * blvalidate.c * Opclass validator for bloom. * - * Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/bloom/blvalidate.c diff --git a/contrib/bloom/expected/bloom.out b/contrib/bloom/expected/bloom.out index cbc50f757b..5ab9e34f82 100644 --- a/contrib/bloom/expected/bloom.out +++ b/contrib/bloom/expected/bloom.out @@ -210,3 +210,20 @@ ORDER BY 1; text_ops | t (2 rows) +-- +-- relation options +-- +DROP INDEX bloomidx; +CREATE INDEX bloomidx ON tst USING bloom (i, t) WITH (length=7, col1=4); +SELECT reloptions FROM pg_class WHERE oid = 'bloomidx'::regclass; + reloptions +------------------- + {length=7,col1=4} +(1 row) + +-- check for min and max values +\set VERBOSITY terse +CREATE INDEX bloomidx2 ON tst USING bloom (i, t) WITH (length=0); +ERROR: value 0 out of bounds for option "length" +CREATE INDEX bloomidx2 ON tst USING bloom (i, t) WITH (col1=0); +ERROR: value 0 out of bounds for option "col1" diff --git a/contrib/bloom/sql/bloom.sql b/contrib/bloom/sql/bloom.sql index 22274609f2..32755f2b1a 100644 --- a/contrib/bloom/sql/bloom.sql +++ b/contrib/bloom/sql/bloom.sql @@ -81,3 +81,14 @@ SELECT opcname, amvalidate(opc.oid) FROM pg_opclass opc JOIN pg_am am ON am.oid = opcmethod WHERE amname = 'bloom' ORDER BY 1; + +-- +-- relation options +-- +DROP INDEX bloomidx; +CREATE INDEX bloomidx ON tst USING bloom (i, t) WITH (length=7, col1=4); +SELECT reloptions FROM pg_class WHERE oid = 'bloomidx'::regclass; +-- check for min and max values +\set VERBOSITY terse +CREATE INDEX bloomidx2 ON tst USING bloom (i, t) WITH (length=0); +CREATE INDEX bloomidx2 ON tst USING bloom (i, t) WITH (col1=0); diff --git a/contrib/bloom/t/001_wal.pl b/contrib/bloom/t/001_wal.pl index dbba198254..0f2628b557 100644 --- a/contrib/bloom/t/001_wal.pl +++ b/contrib/bloom/t/001_wal.pl @@ -16,7 +16,7 @@ sub test_index_replay # Wait for standby to catch up my $applname = $node_standby->name; my $caughtup_query = -"SELECT pg_current_wal_lsn() <= write_lsn FROM pg_stat_replication WHERE application_name = '$applname';"; + "SELECT pg_current_wal_lsn() <= write_lsn FROM pg_stat_replication WHERE application_name = '$applname';"; $node_master->poll_query_until('postgres', $caughtup_query) or die "Timed out while waiting for standby 1 to catch up"; @@ -32,10 +32,11 @@ sub test_index_replay ); # Run test queries and compare their result - my $master_result = $node_master->psql("postgres", $queries); - my $standby_result = $node_standby->psql("postgres", $queries); + my $master_result = $node_master->safe_psql("postgres", $queries); + my $standby_result = $node_standby->safe_psql("postgres", $queries); is($master_result, $standby_result, "$test_name: query result matches"); + return; } # Initialize master node @@ -54,12 +55,12 @@ sub test_index_replay $node_standby->start; # Create some bloom index on master -$node_master->psql("postgres", "CREATE EXTENSION bloom;"); -$node_master->psql("postgres", "CREATE TABLE tst (i int4, t text);"); -$node_master->psql("postgres", -"INSERT INTO tst SELECT i%10, substr(md5(i::text), 1, 1) FROM generate_series(1,100000) i;" +$node_master->safe_psql("postgres", "CREATE EXTENSION bloom;"); +$node_master->safe_psql("postgres", "CREATE TABLE tst (i int4, t text);"); +$node_master->safe_psql("postgres", + "INSERT INTO tst SELECT i%10, substr(md5(i::text), 1, 1) FROM generate_series(1,100000) i;" ); -$node_master->psql("postgres", +$node_master->safe_psql("postgres", "CREATE INDEX bloomidx ON tst USING bloom (i, t) WITH (col1 = 3);"); # Test that queries give same result @@ -68,13 +69,13 @@ sub test_index_replay # Run 10 cycles of table modification. Run test queries after each modification. for my $i (1 .. 10) { - $node_master->psql("postgres", "DELETE FROM tst WHERE i = $i;"); + $node_master->safe_psql("postgres", "DELETE FROM tst WHERE i = $i;"); test_index_replay("delete $i"); - $node_master->psql("postgres", "VACUUM tst;"); + $node_master->safe_psql("postgres", "VACUUM tst;"); test_index_replay("vacuum $i"); my ($start, $end) = (100001 + ($i - 1) * 10000, 100000 + $i * 10000); - $node_master->psql("postgres", -"INSERT INTO tst SELECT i%10, substr(md5(i::text), 1, 1) FROM generate_series($start,$end) i;" + $node_master->safe_psql("postgres", + "INSERT INTO tst SELECT i%10, substr(md5(i::text), 1, 1) FROM generate_series($start,$end) i;" ); test_index_replay("insert $i"); } diff --git a/contrib/btree_gin/Makefile b/contrib/btree_gin/Makefile index 690e1d7602..a9e99257be 100644 --- a/contrib/btree_gin/Makefile +++ b/contrib/btree_gin/Makefile @@ -5,13 +5,13 @@ OBJS = btree_gin.o $(WIN32RES) EXTENSION = btree_gin DATA = btree_gin--1.0.sql btree_gin--1.0--1.1.sql btree_gin--1.1--1.2.sql \ - btree_gin--unpackaged--1.0.sql + btree_gin--1.2--1.3.sql btree_gin--unpackaged--1.0.sql PGFILEDESC = "btree_gin - B-tree equivalent GIN operator classes" REGRESS = install_btree_gin int2 int4 int8 float4 float8 money oid \ timestamp timestamptz time timetz date interval \ macaddr macaddr8 inet cidr text varchar char bytea bit varbit \ - numeric enum + numeric enum uuid name bool bpchar ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/btree_gin/btree_gin--1.2--1.3.sql b/contrib/btree_gin/btree_gin--1.2--1.3.sql new file mode 100644 index 0000000000..db675b7747 --- /dev/null +++ b/contrib/btree_gin/btree_gin--1.2--1.3.sql @@ -0,0 +1,128 @@ +/* contrib/btree_gin/btree_gin--1.2--1.3.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "ALTER EXTENSION btree_gin UPDATE TO '1.3'" to load this file. \quit + +-- uuid datatype support new in 1.3. +CREATE FUNCTION gin_extract_value_uuid(uuid, internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION gin_compare_prefix_uuid(uuid, uuid, int2, internal) +RETURNS int4 +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION gin_extract_query_uuid(uuid, internal, int2, internal, internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE OPERATOR CLASS uuid_ops +DEFAULT FOR TYPE uuid USING gin +AS + OPERATOR 1 <, + OPERATOR 2 <=, + OPERATOR 3 =, + OPERATOR 4 >=, + OPERATOR 5 >, + FUNCTION 1 uuid_cmp(uuid,uuid), + FUNCTION 2 gin_extract_value_uuid(uuid, internal), + FUNCTION 3 gin_extract_query_uuid(uuid, internal, int2, internal, internal), + FUNCTION 4 gin_btree_consistent(internal, int2, anyelement, int4, internal, internal), + FUNCTION 5 gin_compare_prefix_uuid(uuid,uuid,int2, internal), +STORAGE uuid; + +-- name datatype support new in 1.3. +CREATE FUNCTION gin_extract_value_name(name, internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION gin_compare_prefix_name(name, name, int2, internal) +RETURNS int4 +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION gin_extract_query_name(name, internal, int2, internal, internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE OPERATOR CLASS name_ops +DEFAULT FOR TYPE name USING gin +AS + OPERATOR 1 <, + OPERATOR 2 <=, + OPERATOR 3 =, + OPERATOR 4 >=, + OPERATOR 5 >, + FUNCTION 1 btnamecmp(name,name), + FUNCTION 2 gin_extract_value_name(name, internal), + FUNCTION 3 gin_extract_query_name(name, internal, int2, internal, internal), + FUNCTION 4 gin_btree_consistent(internal, int2, anyelement, int4, internal, internal), + FUNCTION 5 gin_compare_prefix_name(name,name,int2, internal), +STORAGE name; + +-- bool datatype support new in 1.3. +CREATE FUNCTION gin_extract_value_bool(bool, internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION gin_compare_prefix_bool(bool, bool, int2, internal) +RETURNS int4 +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION gin_extract_query_bool(bool, internal, int2, internal, internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE OPERATOR CLASS bool_ops +DEFAULT FOR TYPE bool USING gin +AS + OPERATOR 1 <, + OPERATOR 2 <=, + OPERATOR 3 =, + OPERATOR 4 >=, + OPERATOR 5 >, + FUNCTION 1 btboolcmp(bool,bool), + FUNCTION 2 gin_extract_value_bool(bool, internal), + FUNCTION 3 gin_extract_query_bool(bool, internal, int2, internal, internal), + FUNCTION 4 gin_btree_consistent(internal, int2, anyelement, int4, internal, internal), + FUNCTION 5 gin_compare_prefix_bool(bool,bool,int2, internal), +STORAGE bool; + +-- bpchar datatype support new in 1.3. +CREATE FUNCTION gin_extract_value_bpchar(bpchar, internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION gin_compare_prefix_bpchar(bpchar, bpchar, int2, internal) +RETURNS int4 +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION gin_extract_query_bpchar(bpchar, internal, int2, internal, internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE OPERATOR CLASS bpchar_ops +DEFAULT FOR TYPE bpchar USING gin +AS + OPERATOR 1 <, + OPERATOR 2 <=, + OPERATOR 3 =, + OPERATOR 4 >=, + OPERATOR 5 >, + FUNCTION 1 bpcharcmp(bpchar, bpchar), + FUNCTION 2 gin_extract_value_bpchar(bpchar, internal), + FUNCTION 3 gin_extract_query_bpchar(bpchar, internal, int2, internal, internal), + FUNCTION 4 gin_btree_consistent(internal, int2, anyelement, int4, internal, internal), + FUNCTION 5 gin_compare_prefix_bpchar(bpchar,bpchar,int2, internal), +STORAGE bpchar; diff --git a/contrib/btree_gin/btree_gin.c b/contrib/btree_gin/btree_gin.c index 2473f79ca1..2ecf7a2d87 100644 --- a/contrib/btree_gin/btree_gin.c +++ b/contrib/btree_gin/btree_gin.c @@ -10,10 +10,12 @@ #include "utils/bytea.h" #include "utils/cash.h" #include "utils/date.h" +#include "utils/float.h" #include "utils/inet.h" #include "utils/numeric.h" #include "utils/timestamp.h" #include "utils/varbit.h" +#include "utils/uuid.h" PG_MODULE_MAGIC; @@ -87,6 +89,7 @@ gin_btree_extract_query(FunctionCallInfo fcinfo, case BTGreaterEqualStrategyNumber: case BTGreaterStrategyNumber: *ptr_partialmatch = true; + /* FALLTHROUGH */ case BTEqualStrategyNumber: entries[0] = datum; break; @@ -350,6 +353,8 @@ leftmostvalue_text(void) GIN_SUPPORT(text, true, leftmostvalue_text, bttextcmp) +GIN_SUPPORT(bpchar, true, leftmostvalue_text, bpcharcmp) + static Datum leftmostvalue_char(void) { @@ -437,7 +442,6 @@ GIN_SUPPORT(numeric, true, leftmostvalue_numeric, gin_numeric_cmp) * routines it needs it, so we can't use DirectFunctionCall2. */ - #define ENUM_IS_LEFTMOST(x) ((x) == InvalidOid) PG_FUNCTION_INFO_V1(gin_enum_cmp); @@ -477,3 +481,35 @@ leftmostvalue_enum(void) } GIN_SUPPORT(anyenum, false, leftmostvalue_enum, gin_enum_cmp) + +static Datum +leftmostvalue_uuid(void) +{ + /* + * palloc0 will create the UUID with all zeroes: + * "00000000-0000-0000-0000-000000000000" + */ + pg_uuid_t *retval = (pg_uuid_t *) palloc0(sizeof(pg_uuid_t)); + + return UUIDPGetDatum(retval); +} + +GIN_SUPPORT(uuid, false, leftmostvalue_uuid, uuid_cmp) + +static Datum +leftmostvalue_name(void) +{ + NameData *result = (NameData *) palloc0(NAMEDATALEN); + + return NameGetDatum(result); +} + +GIN_SUPPORT(name, false, leftmostvalue_name, btnamecmp) + +static Datum +leftmostvalue_bool(void) +{ + return BoolGetDatum(false); +} + +GIN_SUPPORT(bool, false, leftmostvalue_bool, btboolcmp) diff --git a/contrib/btree_gin/btree_gin.control b/contrib/btree_gin/btree_gin.control index 3acc5af1a7..d576da7fd0 100644 --- a/contrib/btree_gin/btree_gin.control +++ b/contrib/btree_gin/btree_gin.control @@ -1,5 +1,5 @@ # btree_gin extension comment = 'support for indexing common datatypes in GIN' -default_version = '1.2' +default_version = '1.3' module_pathname = '$libdir/btree_gin' relocatable = true diff --git a/contrib/btree_gin/expected/bool.out b/contrib/btree_gin/expected/bool.out new file mode 100644 index 0000000000..efb3e1e327 --- /dev/null +++ b/contrib/btree_gin/expected/bool.out @@ -0,0 +1,119 @@ +set enable_seqscan=off; +CREATE TABLE test_bool ( + i boolean +); +INSERT INTO test_bool VALUES (false),(true),(null); +CREATE INDEX idx_bool ON test_bool USING gin (i); +SELECT * FROM test_bool WHERE i=true ORDER BY i; + i +--- + t +(1 row) + +SELECT * FROM test_bool WHERE i>true ORDER BY i; + i +--- +(0 rows) + +SELECT * FROM test_bool WHERE i=false ORDER BY i; + i +--- + f + t +(2 rows) + +SELECT * FROM test_bool WHERE i>false ORDER BY i; + i +--- + t +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_bool WHERE i Bitmap Heap Scan on test_bool + Recheck Cond: (i < true) + -> Bitmap Index Scan on idx_bool + Index Cond: (i < true) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_bool WHERE i<=true ORDER BY i; + QUERY PLAN +------------------------------------------- + Sort + Sort Key: i + -> Bitmap Heap Scan on test_bool + Recheck Cond: (i <= true) + -> Bitmap Index Scan on idx_bool + Index Cond: (i <= true) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_bool WHERE i=true ORDER BY i; + QUERY PLAN +----------------------------- + Sort + Sort Key: i + -> Seq Scan on test_bool + Filter: i +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_bool WHERE i>=true ORDER BY i; + QUERY PLAN +------------------------------------------- + Sort + Sort Key: i + -> Bitmap Heap Scan on test_bool + Recheck Cond: (i >= true) + -> Bitmap Index Scan on idx_bool + Index Cond: (i >= true) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_bool WHERE i>true ORDER BY i; + QUERY PLAN +------------------------------------------- + Sort + Sort Key: i + -> Bitmap Heap Scan on test_bool + Recheck Cond: (i > true) + -> Bitmap Index Scan on idx_bool + Index Cond: (i > true) +(6 rows) + diff --git a/contrib/btree_gin/expected/bpchar.out b/contrib/btree_gin/expected/bpchar.out new file mode 100644 index 0000000000..2eb8855492 --- /dev/null +++ b/contrib/btree_gin/expected/bpchar.out @@ -0,0 +1,109 @@ +set enable_seqscan=off; +CREATE TABLE test_bpchar ( + i char(10) +); +INSERT INTO test_bpchar VALUES ('a'),('ab'),('abc'),('abc '),('abb'),('axy'),('xyz'),('xyz '); +CREATE INDEX idx_bpchar ON test_bpchar USING gin (i); +SELECT * FROM test_bpchar WHERE i<'abc' ORDER BY i; + i +------------ + a + ab + abb +(3 rows) + +SELECT * FROM test_bpchar WHERE i<='abc' ORDER BY i; + i +------------ + a + ab + abb + abc + abc +(5 rows) + +SELECT * FROM test_bpchar WHERE i='abc' ORDER BY i; + i +------------ + abc + abc +(2 rows) + +SELECT * FROM test_bpchar WHERE i='abc ' ORDER BY i; + i +------------ + abc + abc +(2 rows) + +SELECT * FROM test_bpchar WHERE i>='abc' ORDER BY i; + i +------------ + abc + abc + axy + xyz + xyz +(5 rows) + +SELECT * FROM test_bpchar WHERE i>'abc' ORDER BY i; + i +------------ + axy + xyz + xyz +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_bpchar WHERE i<'abc' ORDER BY i; + QUERY PLAN +----------------------------------------------- + Sort + Sort Key: i + -> Bitmap Heap Scan on test_bpchar + Recheck Cond: (i < 'abc'::bpchar) + -> Bitmap Index Scan on idx_bpchar + Index Cond: (i < 'abc'::bpchar) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_bpchar WHERE i<='abc' ORDER BY i; + QUERY PLAN +------------------------------------------------ + Sort + Sort Key: i + -> Bitmap Heap Scan on test_bpchar + Recheck Cond: (i <= 'abc'::bpchar) + -> Bitmap Index Scan on idx_bpchar + Index Cond: (i <= 'abc'::bpchar) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_bpchar WHERE i='abc' ORDER BY i; + QUERY PLAN +----------------------------------------- + Bitmap Heap Scan on test_bpchar + Recheck Cond: (i = 'abc'::bpchar) + -> Bitmap Index Scan on idx_bpchar + Index Cond: (i = 'abc'::bpchar) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_bpchar WHERE i>='abc' ORDER BY i; + QUERY PLAN +------------------------------------------------ + Sort + Sort Key: i + -> Bitmap Heap Scan on test_bpchar + Recheck Cond: (i >= 'abc'::bpchar) + -> Bitmap Index Scan on idx_bpchar + Index Cond: (i >= 'abc'::bpchar) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_bpchar WHERE i>'abc' ORDER BY i; + QUERY PLAN +----------------------------------------------- + Sort + Sort Key: i + -> Bitmap Heap Scan on test_bpchar + Recheck Cond: (i > 'abc'::bpchar) + -> Bitmap Index Scan on idx_bpchar + Index Cond: (i > 'abc'::bpchar) +(6 rows) + diff --git a/contrib/btree_gin/expected/name.out b/contrib/btree_gin/expected/name.out new file mode 100644 index 0000000000..174de6576f --- /dev/null +++ b/contrib/btree_gin/expected/name.out @@ -0,0 +1,97 @@ +set enable_seqscan=off; +CREATE TABLE test_name ( + i name +); +INSERT INTO test_name VALUES ('a'),('ab'),('abc'),('abb'),('axy'),('xyz'); +CREATE INDEX idx_name ON test_name USING gin (i); +SELECT * FROM test_name WHERE i<'abc' ORDER BY i; + i +----- + a + ab + abb +(3 rows) + +SELECT * FROM test_name WHERE i<='abc' ORDER BY i; + i +----- + a + ab + abb + abc +(4 rows) + +SELECT * FROM test_name WHERE i='abc' ORDER BY i; + i +----- + abc +(1 row) + +SELECT * FROM test_name WHERE i>='abc' ORDER BY i; + i +----- + abc + axy + xyz +(3 rows) + +SELECT * FROM test_name WHERE i>'abc' ORDER BY i; + i +----- + axy + xyz +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_name WHERE i<'abc' ORDER BY i; + QUERY PLAN +--------------------------------------------- + Sort + Sort Key: i + -> Bitmap Heap Scan on test_name + Recheck Cond: (i < 'abc'::name) + -> Bitmap Index Scan on idx_name + Index Cond: (i < 'abc'::name) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_name WHERE i<='abc' ORDER BY i; + QUERY PLAN +---------------------------------------------- + Sort + Sort Key: i + -> Bitmap Heap Scan on test_name + Recheck Cond: (i <= 'abc'::name) + -> Bitmap Index Scan on idx_name + Index Cond: (i <= 'abc'::name) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_name WHERE i='abc' ORDER BY i; + QUERY PLAN +--------------------------------------- + Bitmap Heap Scan on test_name + Recheck Cond: (i = 'abc'::name) + -> Bitmap Index Scan on idx_name + Index Cond: (i = 'abc'::name) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_name WHERE i>='abc' ORDER BY i; + QUERY PLAN +---------------------------------------------- + Sort + Sort Key: i + -> Bitmap Heap Scan on test_name + Recheck Cond: (i >= 'abc'::name) + -> Bitmap Index Scan on idx_name + Index Cond: (i >= 'abc'::name) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_name WHERE i>'abc' ORDER BY i; + QUERY PLAN +--------------------------------------------- + Sort + Sort Key: i + -> Bitmap Heap Scan on test_name + Recheck Cond: (i > 'abc'::name) + -> Bitmap Index Scan on idx_name + Index Cond: (i > 'abc'::name) +(6 rows) + diff --git a/contrib/btree_gin/expected/uuid.out b/contrib/btree_gin/expected/uuid.out new file mode 100644 index 0000000000..60fd8d6016 --- /dev/null +++ b/contrib/btree_gin/expected/uuid.out @@ -0,0 +1,104 @@ +set enable_seqscan=off; +CREATE TABLE test_uuid ( + i uuid +); +INSERT INTO test_uuid VALUES + ( '00000000-0000-0000-0000-000000000000' ), + ( '299bc99f-2f79-4e3e-bfea-2cbfd62a7c27' ), + ( '6264af33-0d43-4337-bf4e-43509b8a4be8' ), + ( 'ce41c936-6acb-4feb-8c91-852a673e5a5c' ), + ( 'd2ce731f-f2a8-4a2b-be37-8f0ba637427f' ), + ( 'ffffffff-ffff-ffff-ffff-ffffffffffff' ) +; +CREATE INDEX idx_uuid ON test_uuid USING gin (i); +SELECT * FROM test_uuid WHERE i<'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; + i +-------------------------------------- + 00000000-0000-0000-0000-000000000000 + 299bc99f-2f79-4e3e-bfea-2cbfd62a7c27 + 6264af33-0d43-4337-bf4e-43509b8a4be8 +(3 rows) + +SELECT * FROM test_uuid WHERE i<='ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; + i +-------------------------------------- + 00000000-0000-0000-0000-000000000000 + 299bc99f-2f79-4e3e-bfea-2cbfd62a7c27 + 6264af33-0d43-4337-bf4e-43509b8a4be8 + ce41c936-6acb-4feb-8c91-852a673e5a5c +(4 rows) + +SELECT * FROM test_uuid WHERE i='ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; + i +-------------------------------------- + ce41c936-6acb-4feb-8c91-852a673e5a5c +(1 row) + +SELECT * FROM test_uuid WHERE i>='ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; + i +-------------------------------------- + ce41c936-6acb-4feb-8c91-852a673e5a5c + d2ce731f-f2a8-4a2b-be37-8f0ba637427f + ffffffff-ffff-ffff-ffff-ffffffffffff +(3 rows) + +SELECT * FROM test_uuid WHERE i>'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; + i +-------------------------------------- + d2ce731f-f2a8-4a2b-be37-8f0ba637427f + ffffffff-ffff-ffff-ffff-ffffffffffff +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_uuid WHERE i<'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; + QUERY PLAN +------------------------------------------------------------------------------ + Sort + Sort Key: i + -> Bitmap Heap Scan on test_uuid + Recheck Cond: (i < 'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid) + -> Bitmap Index Scan on idx_uuid + Index Cond: (i < 'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_uuid WHERE i<='ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Sort Key: i + -> Bitmap Heap Scan on test_uuid + Recheck Cond: (i <= 'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid) + -> Bitmap Index Scan on idx_uuid + Index Cond: (i <= 'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_uuid WHERE i='ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; + QUERY PLAN +------------------------------------------------------------------------ + Bitmap Heap Scan on test_uuid + Recheck Cond: (i = 'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid) + -> Bitmap Index Scan on idx_uuid + Index Cond: (i = 'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_uuid WHERE i>='ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Sort Key: i + -> Bitmap Heap Scan on test_uuid + Recheck Cond: (i >= 'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid) + -> Bitmap Index Scan on idx_uuid + Index Cond: (i >= 'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_uuid WHERE i>'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; + QUERY PLAN +------------------------------------------------------------------------------ + Sort + Sort Key: i + -> Bitmap Heap Scan on test_uuid + Recheck Cond: (i > 'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid) + -> Bitmap Index Scan on idx_uuid + Index Cond: (i > 'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid) +(6 rows) + diff --git a/contrib/btree_gin/sql/bool.sql b/contrib/btree_gin/sql/bool.sql new file mode 100644 index 0000000000..dad2ff32b8 --- /dev/null +++ b/contrib/btree_gin/sql/bool.sql @@ -0,0 +1,27 @@ +set enable_seqscan=off; + +CREATE TABLE test_bool ( + i boolean +); + +INSERT INTO test_bool VALUES (false),(true),(null); + +CREATE INDEX idx_bool ON test_bool USING gin (i); + +SELECT * FROM test_bool WHERE i=true ORDER BY i; +SELECT * FROM test_bool WHERE i>true ORDER BY i; + +SELECT * FROM test_bool WHERE i=false ORDER BY i; +SELECT * FROM test_bool WHERE i>false ORDER BY i; + +EXPLAIN (COSTS OFF) SELECT * FROM test_bool WHERE i=true ORDER BY i; +EXPLAIN (COSTS OFF) SELECT * FROM test_bool WHERE i>true ORDER BY i; diff --git a/contrib/btree_gin/sql/bpchar.sql b/contrib/btree_gin/sql/bpchar.sql new file mode 100644 index 0000000000..4c951e31a8 --- /dev/null +++ b/contrib/btree_gin/sql/bpchar.sql @@ -0,0 +1,22 @@ +set enable_seqscan=off; + +CREATE TABLE test_bpchar ( + i char(10) +); + +INSERT INTO test_bpchar VALUES ('a'),('ab'),('abc'),('abc '),('abb'),('axy'),('xyz'),('xyz '); + +CREATE INDEX idx_bpchar ON test_bpchar USING gin (i); + +SELECT * FROM test_bpchar WHERE i<'abc' ORDER BY i; +SELECT * FROM test_bpchar WHERE i<='abc' ORDER BY i; +SELECT * FROM test_bpchar WHERE i='abc' ORDER BY i; +SELECT * FROM test_bpchar WHERE i='abc ' ORDER BY i; +SELECT * FROM test_bpchar WHERE i>='abc' ORDER BY i; +SELECT * FROM test_bpchar WHERE i>'abc' ORDER BY i; + +EXPLAIN (COSTS OFF) SELECT * FROM test_bpchar WHERE i<'abc' ORDER BY i; +EXPLAIN (COSTS OFF) SELECT * FROM test_bpchar WHERE i<='abc' ORDER BY i; +EXPLAIN (COSTS OFF) SELECT * FROM test_bpchar WHERE i='abc' ORDER BY i; +EXPLAIN (COSTS OFF) SELECT * FROM test_bpchar WHERE i>='abc' ORDER BY i; +EXPLAIN (COSTS OFF) SELECT * FROM test_bpchar WHERE i>'abc' ORDER BY i; diff --git a/contrib/btree_gin/sql/name.sql b/contrib/btree_gin/sql/name.sql new file mode 100644 index 0000000000..c11580cdf9 --- /dev/null +++ b/contrib/btree_gin/sql/name.sql @@ -0,0 +1,21 @@ +set enable_seqscan=off; + +CREATE TABLE test_name ( + i name +); + +INSERT INTO test_name VALUES ('a'),('ab'),('abc'),('abb'),('axy'),('xyz'); + +CREATE INDEX idx_name ON test_name USING gin (i); + +SELECT * FROM test_name WHERE i<'abc' ORDER BY i; +SELECT * FROM test_name WHERE i<='abc' ORDER BY i; +SELECT * FROM test_name WHERE i='abc' ORDER BY i; +SELECT * FROM test_name WHERE i>='abc' ORDER BY i; +SELECT * FROM test_name WHERE i>'abc' ORDER BY i; + +EXPLAIN (COSTS OFF) SELECT * FROM test_name WHERE i<'abc' ORDER BY i; +EXPLAIN (COSTS OFF) SELECT * FROM test_name WHERE i<='abc' ORDER BY i; +EXPLAIN (COSTS OFF) SELECT * FROM test_name WHERE i='abc' ORDER BY i; +EXPLAIN (COSTS OFF) SELECT * FROM test_name WHERE i>='abc' ORDER BY i; +EXPLAIN (COSTS OFF) SELECT * FROM test_name WHERE i>'abc' ORDER BY i; diff --git a/contrib/btree_gin/sql/uuid.sql b/contrib/btree_gin/sql/uuid.sql new file mode 100644 index 0000000000..3c141bde74 --- /dev/null +++ b/contrib/btree_gin/sql/uuid.sql @@ -0,0 +1,28 @@ +set enable_seqscan=off; + +CREATE TABLE test_uuid ( + i uuid +); + +INSERT INTO test_uuid VALUES + ( '00000000-0000-0000-0000-000000000000' ), + ( '299bc99f-2f79-4e3e-bfea-2cbfd62a7c27' ), + ( '6264af33-0d43-4337-bf4e-43509b8a4be8' ), + ( 'ce41c936-6acb-4feb-8c91-852a673e5a5c' ), + ( 'd2ce731f-f2a8-4a2b-be37-8f0ba637427f' ), + ( 'ffffffff-ffff-ffff-ffff-ffffffffffff' ) +; + +CREATE INDEX idx_uuid ON test_uuid USING gin (i); + +SELECT * FROM test_uuid WHERE i<'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; +SELECT * FROM test_uuid WHERE i<='ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; +SELECT * FROM test_uuid WHERE i='ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; +SELECT * FROM test_uuid WHERE i>='ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; +SELECT * FROM test_uuid WHERE i>'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; + +EXPLAIN (COSTS OFF) SELECT * FROM test_uuid WHERE i<'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; +EXPLAIN (COSTS OFF) SELECT * FROM test_uuid WHERE i<='ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; +EXPLAIN (COSTS OFF) SELECT * FROM test_uuid WHERE i='ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; +EXPLAIN (COSTS OFF) SELECT * FROM test_uuid WHERE i>='ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; +EXPLAIN (COSTS OFF) SELECT * FROM test_uuid WHERE i>'ce41c936-6acb-4feb-8c91-852a673e5a5c'::uuid ORDER BY i; diff --git a/contrib/btree_gist/btree_bit.c b/contrib/btree_gist/btree_bit.c index a56a2752a7..2225244ded 100644 --- a/contrib/btree_gist/btree_bit.c +++ b/contrib/btree_gist/btree_bit.c @@ -111,7 +111,7 @@ static const gbtree_vinfo tinfo = { gbt_t_bit, 0, - TRUE, + true, gbt_bitgt, gbt_bitge, gbt_biteq, @@ -152,13 +152,13 @@ gbt_bit_consistent(PG_FUNCTION_ARGS) if (GIST_LEAF(entry)) retval = gbt_var_consistent(&r, query, strategy, PG_GET_COLLATION(), - TRUE, &tinfo, fcinfo->flinfo); + true, &tinfo, fcinfo->flinfo); else { bytea *q = gbt_bit_xfrm((bytea *) query); retval = gbt_var_consistent(&r, q, strategy, PG_GET_COLLATION(), - FALSE, &tinfo, fcinfo->flinfo); + false, &tinfo, fcinfo->flinfo); } PG_RETURN_BOOL(retval); } diff --git a/contrib/btree_gist/btree_bytea.c b/contrib/btree_gist/btree_bytea.c index 00753e7f48..6b005f0157 100644 --- a/contrib/btree_gist/btree_bytea.c +++ b/contrib/btree_gist/btree_bytea.c @@ -75,7 +75,7 @@ static const gbtree_vinfo tinfo = { gbt_t_bytea, 0, - TRUE, + true, gbt_byteagt, gbt_byteage, gbt_byteaeq, diff --git a/contrib/btree_gist/btree_cash.c b/contrib/btree_gist/btree_cash.c index 81131af4dc..894d0a2665 100644 --- a/contrib/btree_gist/btree_cash.c +++ b/contrib/btree_gist/btree_cash.c @@ -5,6 +5,7 @@ #include "btree_gist.h" #include "btree_utils_num.h" +#include "common/int.h" #include "utils/cash.h" typedef struct @@ -99,15 +100,14 @@ cash_dist(PG_FUNCTION_ARGS) Cash r; Cash ra; - r = a - b; - ra = Abs(r); - - /* Overflow check. */ - if (ra < 0 || (!SAMESIGN(a, b) && !SAMESIGN(r, a))) + if (pg_sub_s64_overflow(a, b, &r) || + r == PG_INT64_MIN) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("money out of range"))); + ra = Abs(r); + PG_RETURN_CASH(ra); } diff --git a/contrib/btree_gist/btree_inet.c b/contrib/btree_gist/btree_inet.c index b5b593f77f..34308cc640 100644 --- a/contrib/btree_gist/btree_inet.c +++ b/contrib/btree_gist/btree_inet.c @@ -99,13 +99,15 @@ gbt_inet_compress(PG_FUNCTION_ARGS) if (entry->leafkey) { inetKEY *r = (inetKEY *) palloc(sizeof(inetKEY)); + bool failure = false; retval = palloc(sizeof(GISTENTRY)); - r->lower = convert_network_to_scalar(entry->key, INETOID); + r->lower = convert_network_to_scalar(entry->key, INETOID, &failure); + Assert(!failure); r->upper = r->lower; gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } else retval = entry; @@ -118,13 +120,18 @@ Datum gbt_inet_consistent(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - double query = convert_network_to_scalar(PG_GETARG_DATUM(1), INETOID); + Datum dquery = PG_GETARG_DATUM(1); StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); /* Oid subtype = PG_GETARG_OID(3); */ bool *recheck = (bool *) PG_GETARG_POINTER(4); inetKEY *kkk = (inetKEY *) DatumGetPointer(entry->key); GBT_NUMKEY_R key; + double query; + bool failure = false; + + query = convert_network_to_scalar(dquery, INETOID, &failure); + Assert(!failure); /* All cases served by this function are inexact */ *recheck = true; diff --git a/contrib/btree_gist/btree_int2.c b/contrib/btree_gist/btree_int2.c index f343b8615f..7674e2d292 100644 --- a/contrib/btree_gist/btree_int2.c +++ b/contrib/btree_gist/btree_int2.c @@ -5,6 +5,7 @@ #include "btree_gist.h" #include "btree_utils_num.h" +#include "common/int.h" typedef struct int16key { @@ -98,15 +99,14 @@ int2_dist(PG_FUNCTION_ARGS) int16 r; int16 ra; - r = a - b; - ra = Abs(r); - - /* Overflow check. */ - if (ra < 0 || (!SAMESIGN(a, b) && !SAMESIGN(r, a))) + if (pg_sub_s16_overflow(a, b, &r) || + r == PG_INT16_MIN) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("smallint out of range"))); + ra = Abs(r); + PG_RETURN_INT16(ra); } diff --git a/contrib/btree_gist/btree_int4.c b/contrib/btree_gist/btree_int4.c index 35bb442437..80005ab65d 100644 --- a/contrib/btree_gist/btree_int4.c +++ b/contrib/btree_gist/btree_int4.c @@ -5,6 +5,7 @@ #include "btree_gist.h" #include "btree_utils_num.h" +#include "common/int.h" typedef struct int32key { @@ -99,15 +100,14 @@ int4_dist(PG_FUNCTION_ARGS) int32 r; int32 ra; - r = a - b; - ra = Abs(r); - - /* Overflow check. */ - if (ra < 0 || (!SAMESIGN(a, b) && !SAMESIGN(r, a))) + if (pg_sub_s32_overflow(a, b, &r) || + r == PG_INT32_MIN) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("integer out of range"))); + ra = Abs(r); + PG_RETURN_INT32(ra); } diff --git a/contrib/btree_gist/btree_int8.c b/contrib/btree_gist/btree_int8.c index 91f2d032d1..b0fd3e1277 100644 --- a/contrib/btree_gist/btree_int8.c +++ b/contrib/btree_gist/btree_int8.c @@ -5,6 +5,7 @@ #include "btree_gist.h" #include "btree_utils_num.h" +#include "common/int.h" typedef struct int64key { @@ -99,15 +100,14 @@ int8_dist(PG_FUNCTION_ARGS) int64 r; int64 ra; - r = a - b; - ra = Abs(r); - - /* Overflow check. */ - if (ra < 0 || (!SAMESIGN(a, b) && !SAMESIGN(r, a))) + if (pg_sub_s64_overflow(a, b, &r) || + r == PG_INT64_MIN) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); + ra = Abs(r); + PG_RETURN_INT64(ra); } diff --git a/contrib/btree_gist/btree_interval.c b/contrib/btree_gist/btree_interval.c index 61ab478c42..3a527a75fa 100644 --- a/contrib/btree_gist/btree_interval.c +++ b/contrib/btree_gist/btree_interval.c @@ -169,7 +169,7 @@ gbt_intv_compress(PG_FUNCTION_ARGS) } gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } PG_RETURN_POINTER(retval); @@ -201,7 +201,7 @@ gbt_intv_decompress(PG_FUNCTION_ARGS) gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } PG_RETURN_POINTER(retval); } diff --git a/contrib/btree_gist/btree_numeric.c b/contrib/btree_gist/btree_numeric.c index 43793d36a2..b72060cdb6 100644 --- a/contrib/btree_gist/btree_numeric.c +++ b/contrib/btree_gist/btree_numeric.c @@ -79,7 +79,7 @@ static const gbtree_vinfo tinfo = { gbt_t_numeric, 0, - FALSE, + false, gbt_numeric_gt, gbt_numeric_ge, gbt_numeric_eq, diff --git a/contrib/btree_gist/btree_text.c b/contrib/btree_gist/btree_text.c index 090c849470..8019d11281 100644 --- a/contrib/btree_gist/btree_text.c +++ b/contrib/btree_gist/btree_text.c @@ -80,7 +80,7 @@ static gbtree_vinfo tinfo = { gbt_t_text, 0, - FALSE, + false, gbt_textgt, gbt_textge, gbt_texteq, @@ -128,7 +128,7 @@ gbt_bpchar_compress(PG_FUNCTION_ARGS) gistentryinit(trim, d, entry->rel, entry->page, - entry->offset, TRUE); + entry->offset, true); retval = gbt_var_compress(&trim, &tinfo); } else @@ -171,7 +171,7 @@ Datum gbt_bpchar_consistent(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - void *query = (void *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1))); + void *query = (void *) DatumGetTextP(PG_GETARG_DATUM(1)); StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); /* Oid subtype = PG_GETARG_OID(3); */ diff --git a/contrib/btree_gist/btree_time.c b/contrib/btree_gist/btree_time.c index bb239d4986..90cf6554ea 100644 --- a/contrib/btree_gist/btree_time.c +++ b/contrib/btree_gist/btree_time.c @@ -183,7 +183,7 @@ gbt_timetz_compress(PG_FUNCTION_ARGS) r->lower = r->upper = tmp; gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } else retval = entry; diff --git a/contrib/btree_gist/btree_ts.c b/contrib/btree_gist/btree_ts.c index 1582cff102..49d1849d88 100644 --- a/contrib/btree_gist/btree_ts.c +++ b/contrib/btree_gist/btree_ts.c @@ -9,6 +9,7 @@ #include "btree_utils_num.h" #include "utils/builtins.h" #include "utils/datetime.h" +#include "utils/float.h" typedef struct { @@ -230,7 +231,7 @@ gbt_tstz_compress(PG_FUNCTION_ARGS) r->lower = r->upper = gmt; gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } else retval = entry; diff --git a/contrib/btree_gist/btree_utils_num.c b/contrib/btree_gist/btree_utils_num.c index bae32c4064..29b0faf997 100644 --- a/contrib/btree_gist/btree_utils_num.c +++ b/contrib/btree_gist/btree_utils_num.c @@ -86,7 +86,7 @@ gbt_num_compress(GISTENTRY *entry, const gbtree_ninfo *tinfo) memcpy((void *) &r[tinfo->size], leaf, tinfo->size); retval = palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } else retval = entry; @@ -150,7 +150,7 @@ gbt_num_fetch(GISTENTRY *entry, const gbtree_ninfo *tinfo) retval = palloc(sizeof(GISTENTRY)); gistentryinit(*retval, datum, entry->rel, entry->page, entry->offset, - FALSE); + false); return retval; } @@ -184,10 +184,10 @@ gbt_num_union(GBT_NUMKEY *out, const GistEntryVector *entryvec, const gbtree_nin c.lower = &cur[0]; c.upper = &cur[tinfo->size]; /* if out->lower > cur->lower, adopt cur as lower */ - if ((*tinfo->f_gt) (o.lower, c.lower, flinfo)) + if (tinfo->f_gt(o.lower, c.lower, flinfo)) memcpy((void *) o.lower, (void *) c.lower, tinfo->size); /* if out->upper < cur->upper, adopt cur as upper */ - if ((*tinfo->f_lt) (o.upper, c.upper, flinfo)) + if (tinfo->f_lt(o.upper, c.upper, flinfo)) memcpy((void *) o.upper, (void *) c.upper, tinfo->size); } @@ -211,8 +211,8 @@ gbt_num_same(const GBT_NUMKEY *a, const GBT_NUMKEY *b, const gbtree_ninfo *tinfo b2.lower = &(((GBT_NUMKEY *) b)[0]); b2.upper = &(((GBT_NUMKEY *) b)[tinfo->size]); - return ((*tinfo->f_eq) (b1.lower, b2.lower, flinfo) && - (*tinfo->f_eq) (b1.upper, b2.upper, flinfo)); + return (tinfo->f_eq(b1.lower, b2.lower, flinfo) && + tinfo->f_eq(b1.upper, b2.upper, flinfo)); } @@ -236,9 +236,9 @@ gbt_num_bin_union(Datum *u, GBT_NUMKEY *e, const gbtree_ninfo *tinfo, FmgrInfo * ur.lower = &(((GBT_NUMKEY *) DatumGetPointer(*u))[0]); ur.upper = &(((GBT_NUMKEY *) DatumGetPointer(*u))[tinfo->size]); - if ((*tinfo->f_gt) ((void *) ur.lower, (void *) rd.lower, flinfo)) + if (tinfo->f_gt((void *) ur.lower, (void *) rd.lower, flinfo)) memcpy((void *) ur.lower, (void *) rd.lower, tinfo->size); - if ((*tinfo->f_lt) ((void *) ur.upper, (void *) rd.upper, flinfo)) + if (tinfo->f_lt((void *) ur.upper, (void *) rd.upper, flinfo)) memcpy((void *) ur.upper, (void *) rd.upper, tinfo->size); } } @@ -264,39 +264,39 @@ gbt_num_consistent(const GBT_NUMKEY_R *key, switch (*strategy) { case BTLessEqualStrategyNumber: - retval = (*tinfo->f_ge) (query, key->lower, flinfo); + retval = tinfo->f_ge(query, key->lower, flinfo); break; case BTLessStrategyNumber: if (is_leaf) - retval = (*tinfo->f_gt) (query, key->lower, flinfo); + retval = tinfo->f_gt(query, key->lower, flinfo); else - retval = (*tinfo->f_ge) (query, key->lower, flinfo); + retval = tinfo->f_ge(query, key->lower, flinfo); break; case BTEqualStrategyNumber: if (is_leaf) - retval = (*tinfo->f_eq) (query, key->lower, flinfo); + retval = tinfo->f_eq(query, key->lower, flinfo); else - retval = ((*tinfo->f_le) (key->lower, query, flinfo) && - (*tinfo->f_le) (query, key->upper, flinfo)); + retval = (tinfo->f_le(key->lower, query, flinfo) && + tinfo->f_le(query, key->upper, flinfo)); break; case BTGreaterStrategyNumber: if (is_leaf) - retval = (*tinfo->f_lt) (query, key->upper, flinfo); + retval = tinfo->f_lt(query, key->upper, flinfo); else - retval = (*tinfo->f_le) (query, key->upper, flinfo); + retval = tinfo->f_le(query, key->upper, flinfo); break; case BTGreaterEqualStrategyNumber: - retval = (*tinfo->f_le) (query, key->upper, flinfo); + retval = tinfo->f_le(query, key->upper, flinfo); break; case BtreeGistNotEqualStrategyNumber: - retval = (!((*tinfo->f_eq) (query, key->lower, flinfo) && - (*tinfo->f_eq) (query, key->upper, flinfo))); + retval = (!(tinfo->f_eq(query, key->lower, flinfo) && + tinfo->f_eq(query, key->upper, flinfo))); break; default: retval = false; } - return (retval); + return retval; } diff --git a/contrib/btree_gist/btree_utils_num.h b/contrib/btree_gist/btree_utils_num.h index 17561fa9e4..d7945f856c 100644 --- a/contrib/btree_gist/btree_utils_num.h +++ b/contrib/btree_gist/btree_utils_num.h @@ -89,8 +89,6 @@ typedef struct #define GET_FLOAT_DISTANCE(t, arg1, arg2) Abs( ((float8) *((const t *) (arg1))) - ((float8) *((const t *) (arg2))) ) -#define SAMESIGN(a,b) (((a) < 0) == ((b) < 0)) - /* * check to see if a float4/8 val has underflowed or overflowed * borrowed from src/backend/utils/adt/float.c diff --git a/contrib/btree_gist/btree_utils_var.c b/contrib/btree_gist/btree_utils_var.c index 2c636ad2fa..670c879e77 100644 --- a/contrib/btree_gist/btree_utils_var.c +++ b/contrib/btree_gist/btree_utils_var.c @@ -37,7 +37,7 @@ Datum gbt_var_decompress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GBT_VARKEY *key = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); + GBT_VARKEY *key = (GBT_VARKEY *) PG_DETOAST_DATUM(entry->key); if (key != (GBT_VARKEY *) DatumGetPointer(entry->key)) { @@ -45,7 +45,7 @@ gbt_var_decompress(PG_FUNCTION_ARGS) gistentryinit(*retval, PointerGetDatum(key), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); PG_RETURN_POINTER(retval); } @@ -109,7 +109,7 @@ gbt_var_leaf2node(GBT_VARKEY *leaf, const gbtree_vinfo *tinfo, FmgrInfo *flinfo) GBT_VARKEY *out = leaf; if (tinfo->f_l2n) - out = (*tinfo->f_l2n) (leaf, flinfo); + out = tinfo->f_l2n(leaf, flinfo); return out; } @@ -159,7 +159,7 @@ gbt_var_node_cp_len(const GBT_VARKEY *node, const gbtree_vinfo *tinfo) l--; i++; } - return (ml); /* lower == upper */ + return ml; /* lower == upper */ } @@ -169,7 +169,7 @@ gbt_var_node_cp_len(const GBT_VARKEY *node, const gbtree_vinfo *tinfo) static bool gbt_bytea_pf_match(const bytea *pf, const bytea *query, const gbtree_vinfo *tinfo) { - bool out = FALSE; + bool out = false; int32 qlen = VARSIZE(query) - VARHDRSZ; int32 nlen = VARSIZE(pf) - VARHDRSZ; @@ -255,13 +255,13 @@ gbt_var_bin_union(Datum *u, GBT_VARKEY *e, Oid collation, nr.lower = ro.lower; nr.upper = ro.upper; - if ((*tinfo->f_cmp) (ro.lower, eo.lower, collation, flinfo) > 0) + if (tinfo->f_cmp(ro.lower, eo.lower, collation, flinfo) > 0) { nr.lower = eo.lower; update = true; } - if ((*tinfo->f_cmp) (ro.upper, eo.upper, collation, flinfo) < 0) + if (tinfo->f_cmp(ro.upper, eo.upper, collation, flinfo) < 0) { nr.upper = eo.upper; update = true; @@ -294,12 +294,12 @@ gbt_var_compress(GISTENTRY *entry, const gbtree_vinfo *tinfo) retval = palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, - entry->offset, TRUE); + entry->offset, true); } else retval = entry; - return (retval); + return retval; } @@ -307,14 +307,14 @@ Datum gbt_var_fetch(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GBT_VARKEY *key = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); + GBT_VARKEY *key = (GBT_VARKEY *) PG_DETOAST_DATUM(entry->key); GBT_VARKEY_R r = gbt_var_key_readable(key); GISTENTRY *retval; retval = palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(r.lower), entry->rel, entry->page, - entry->offset, TRUE); + entry->offset, true); PG_RETURN_POINTER(retval); } @@ -371,8 +371,8 @@ gbt_var_same(Datum d1, Datum d2, Oid collation, r1 = gbt_var_key_readable(t1); r2 = gbt_var_key_readable(t2); - return ((*tinfo->f_cmp) (r1.lower, r2.lower, collation, flinfo) == 0 && - (*tinfo->f_cmp) (r1.upper, r2.upper, collation, flinfo) == 0); + return (tinfo->f_cmp(r1.lower, r2.lower, collation, flinfo) == 0 && + tinfo->f_cmp(r1.upper, r2.upper, collation, flinfo) == 0); } @@ -400,9 +400,9 @@ gbt_var_penalty(float *res, const GISTENTRY *o, const GISTENTRY *n, if ((VARSIZE(ok.lower) - VARHDRSZ) == 0 && (VARSIZE(ok.upper) - VARHDRSZ) == 0) *res = 0.0; - else if (!(((*tinfo->f_cmp) (nk.lower, ok.lower, collation, flinfo) >= 0 || + else if (!((tinfo->f_cmp(nk.lower, ok.lower, collation, flinfo) >= 0 || gbt_bytea_pf_match(ok.lower, nk.lower, tinfo)) && - ((*tinfo->f_cmp) (nk.upper, ok.upper, collation, flinfo) <= 0 || + (tinfo->f_cmp(nk.upper, ok.upper, collation, flinfo) <= 0 || gbt_bytea_pf_match(ok.upper, nk.upper, tinfo)))) { Datum d = PointerGetDatum(0); @@ -449,9 +449,9 @@ gbt_vsrt_cmp(const void *a, const void *b, void *arg) const gbt_vsrt_arg *varg = (const gbt_vsrt_arg *) arg; int res; - res = (*varg->tinfo->f_cmp) (ar.lower, br.lower, varg->collation, varg->flinfo); + res = varg->tinfo->f_cmp(ar.lower, br.lower, varg->collation, varg->flinfo); if (res == 0) - return (*varg->tinfo->f_cmp) (ar.upper, br.upper, varg->collation, varg->flinfo); + return varg->tinfo->f_cmp(ar.upper, br.upper, varg->collation, varg->flinfo); return res; } @@ -561,53 +561,53 @@ gbt_var_consistent(GBT_VARKEY_R *key, const gbtree_vinfo *tinfo, FmgrInfo *flinfo) { - bool retval = FALSE; + bool retval = false; switch (strategy) { case BTLessEqualStrategyNumber: if (is_leaf) - retval = (*tinfo->f_ge) (query, key->lower, collation, flinfo); + retval = tinfo->f_ge(query, key->lower, collation, flinfo); else - retval = (*tinfo->f_cmp) (query, key->lower, collation, flinfo) >= 0 + retval = tinfo->f_cmp(query, key->lower, collation, flinfo) >= 0 || gbt_var_node_pf_match(key, query, tinfo); break; case BTLessStrategyNumber: if (is_leaf) - retval = (*tinfo->f_gt) (query, key->lower, collation, flinfo); + retval = tinfo->f_gt(query, key->lower, collation, flinfo); else - retval = (*tinfo->f_cmp) (query, key->lower, collation, flinfo) >= 0 + retval = tinfo->f_cmp(query, key->lower, collation, flinfo) >= 0 || gbt_var_node_pf_match(key, query, tinfo); break; case BTEqualStrategyNumber: if (is_leaf) - retval = (*tinfo->f_eq) (query, key->lower, collation, flinfo); + retval = tinfo->f_eq(query, key->lower, collation, flinfo); else retval = - ((*tinfo->f_cmp) (key->lower, query, collation, flinfo) <= 0 && - (*tinfo->f_cmp) (query, key->upper, collation, flinfo) <= 0) || + (tinfo->f_cmp(key->lower, query, collation, flinfo) <= 0 && + tinfo->f_cmp(query, key->upper, collation, flinfo) <= 0) || gbt_var_node_pf_match(key, query, tinfo); break; case BTGreaterStrategyNumber: if (is_leaf) - retval = (*tinfo->f_lt) (query, key->upper, collation, flinfo); + retval = tinfo->f_lt(query, key->upper, collation, flinfo); else - retval = (*tinfo->f_cmp) (query, key->upper, collation, flinfo) <= 0 + retval = tinfo->f_cmp(query, key->upper, collation, flinfo) <= 0 || gbt_var_node_pf_match(key, query, tinfo); break; case BTGreaterEqualStrategyNumber: if (is_leaf) - retval = (*tinfo->f_le) (query, key->upper, collation, flinfo); + retval = tinfo->f_le(query, key->upper, collation, flinfo); else - retval = (*tinfo->f_cmp) (query, key->upper, collation, flinfo) <= 0 + retval = tinfo->f_cmp(query, key->upper, collation, flinfo) <= 0 || gbt_var_node_pf_match(key, query, tinfo); break; case BtreeGistNotEqualStrategyNumber: - retval = !((*tinfo->f_eq) (query, key->lower, collation, flinfo) && - (*tinfo->f_eq) (query, key->upper, collation, flinfo)); + retval = !(tinfo->f_eq(query, key->lower, collation, flinfo) && + tinfo->f_eq(query, key->upper, collation, flinfo)); break; default: - retval = FALSE; + retval = false; } return retval; diff --git a/contrib/btree_gist/btree_uuid.c b/contrib/btree_gist/btree_uuid.c index ecf357d662..0b3e52fbff 100644 --- a/contrib/btree_gist/btree_uuid.c +++ b/contrib/btree_gist/btree_uuid.c @@ -114,7 +114,7 @@ gbt_uuid_compress(PG_FUNCTION_ARGS) memcpy((void *) (r + UUID_LEN), (void *) key, UUID_LEN); gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } else retval = entry; @@ -182,8 +182,8 @@ uuid_2_double(const pg_uuid_t *u) * machine, byte-swap each half so we can use native uint64 arithmetic. */ #ifndef WORDS_BIGENDIAN - uu[0] = BSWAP64(uu[0]); - uu[1] = BSWAP64(uu[1]); + uu[0] = pg_bswap64(uu[0]); + uu[1] = pg_bswap64(uu[1]); #endif /* diff --git a/contrib/btree_gist/expected/bit.out b/contrib/btree_gist/expected/bit.out index 8606baf366..e57871f310 100644 --- a/contrib/btree_gist/expected/bit.out +++ b/contrib/btree_gist/expected/bit.out @@ -68,9 +68,9 @@ SELECT count(*) FROM bittmp WHERE a > '011011000100010111011000110000100'; SET enable_bitmapscan=off; EXPLAIN (COSTS OFF) SELECT a FROM bittmp WHERE a BETWEEN '1000000' and '1000001'; - QUERY PLAN ------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Index Only Scan using bitidx on bittmp - Index Cond: ((a >= B'1000000'::"bit") AND (a <= B'1000001'::"bit")) + Index Cond: ((a >= '1000000'::"bit") AND (a <= '1000001'::"bit")) (2 rows) diff --git a/contrib/btree_gist/expected/inet.out b/contrib/btree_gist/expected/inet.out index 721a78e701..c323d903da 100644 --- a/contrib/btree_gist/expected/inet.out +++ b/contrib/btree_gist/expected/inet.out @@ -64,3 +64,38 @@ SELECT count(*) FROM inettmp WHERE a > '89.225.196.191'::inet; 386 (1 row) +VACUUM ANALYZE inettmp; +-- gist_inet_ops lacks a fetch function, so this should not be index-only scan +EXPLAIN (COSTS OFF) +SELECT count(*) FROM inettmp WHERE a = '89.225.196.191'::inet; + QUERY PLAN +-------------------------------------------------- + Aggregate + -> Index Scan using inetidx on inettmp + Index Cond: (a = '89.225.196.191'::inet) +(3 rows) + +SELECT count(*) FROM inettmp WHERE a = '89.225.196.191'::inet; + count +------- + 1 +(1 row) + +DROP INDEX inetidx; +CREATE INDEX ON inettmp USING gist (a gist_inet_ops, a inet_ops); +-- likewise here (checks for core planner bug) +EXPLAIN (COSTS OFF) +SELECT count(*) FROM inettmp WHERE a = '89.225.196.191'::inet; + QUERY PLAN +---------------------------------------------------- + Aggregate + -> Index Scan using inettmp_a_a1_idx on inettmp + Index Cond: (a = '89.225.196.191'::inet) +(3 rows) + +SELECT count(*) FROM inettmp WHERE a = '89.225.196.191'::inet; + count +------- + 1 +(1 row) + diff --git a/contrib/btree_gist/expected/varbit.out b/contrib/btree_gist/expected/varbit.out index 538ace85c9..ede36bc3ea 100644 --- a/contrib/btree_gist/expected/varbit.out +++ b/contrib/btree_gist/expected/varbit.out @@ -68,9 +68,9 @@ SELECT count(*) FROM varbittmp WHERE a > '1110100111010'::varbit; SET enable_bitmapscan=off; EXPLAIN (COSTS OFF) SELECT a FROM bittmp WHERE a BETWEEN '1000000' and '1000001'; - QUERY PLAN ------------------------------------------------------------------------ + QUERY PLAN +--------------------------------------------------------------------- Index Only Scan using bitidx on bittmp - Index Cond: ((a >= B'1000000'::"bit") AND (a <= B'1000001'::"bit")) + Index Cond: ((a >= '1000000'::"bit") AND (a <= '1000001'::"bit")) (2 rows) diff --git a/contrib/btree_gist/sql/inet.sql b/contrib/btree_gist/sql/inet.sql index 328846c0a3..4b8d354b00 100644 --- a/contrib/btree_gist/sql/inet.sql +++ b/contrib/btree_gist/sql/inet.sql @@ -29,3 +29,21 @@ SELECT count(*) FROM inettmp WHERE a = '89.225.196.191'::inet; SELECT count(*) FROM inettmp WHERE a >= '89.225.196.191'::inet; SELECT count(*) FROM inettmp WHERE a > '89.225.196.191'::inet; + +VACUUM ANALYZE inettmp; + +-- gist_inet_ops lacks a fetch function, so this should not be index-only scan +EXPLAIN (COSTS OFF) +SELECT count(*) FROM inettmp WHERE a = '89.225.196.191'::inet; + +SELECT count(*) FROM inettmp WHERE a = '89.225.196.191'::inet; + +DROP INDEX inetidx; + +CREATE INDEX ON inettmp USING gist (a gist_inet_ops, a inet_ops); + +-- likewise here (checks for core planner bug) +EXPLAIN (COSTS OFF) +SELECT count(*) FROM inettmp WHERE a = '89.225.196.191'::inet; + +SELECT count(*) FROM inettmp WHERE a = '89.225.196.191'::inet; diff --git a/contrib/chkpass/Makefile b/contrib/chkpass/Makefile deleted file mode 100644 index a2599ea239..0000000000 --- a/contrib/chkpass/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -# contrib/chkpass/Makefile - -MODULE_big = chkpass -OBJS = chkpass.o $(WIN32RES) - -EXTENSION = chkpass -DATA = chkpass--1.0.sql chkpass--unpackaged--1.0.sql -PGFILEDESC = "chkpass - encrypted password data type" - -SHLIB_LINK = $(filter -lcrypt, $(LIBS)) - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/chkpass -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif diff --git a/contrib/chkpass/chkpass--1.0.sql b/contrib/chkpass/chkpass--1.0.sql deleted file mode 100644 index 406a61924c..0000000000 --- a/contrib/chkpass/chkpass--1.0.sql +++ /dev/null @@ -1,70 +0,0 @@ -/* contrib/chkpass/chkpass--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION chkpass" to load this file. \quit - --- --- Input and output functions and the type itself: --- - -CREATE FUNCTION chkpass_in(cstring) - RETURNS chkpass - AS 'MODULE_PATHNAME' - LANGUAGE C STRICT VOLATILE; --- Note: chkpass_in actually is volatile, because of its use of random(). --- In hindsight that was a bad idea, but there's no way to change it without --- breaking some usage patterns. - -CREATE FUNCTION chkpass_out(chkpass) - RETURNS cstring - AS 'MODULE_PATHNAME' - LANGUAGE C STRICT IMMUTABLE; - -CREATE TYPE chkpass ( - internallength = 16, - input = chkpass_in, - output = chkpass_out -); - -CREATE FUNCTION raw(chkpass) - RETURNS text - AS 'MODULE_PATHNAME', 'chkpass_rout' - LANGUAGE C STRICT; - --- --- The various boolean tests: --- - -CREATE FUNCTION eq(chkpass, text) - RETURNS bool - AS 'MODULE_PATHNAME', 'chkpass_eq' - LANGUAGE C STRICT; - -CREATE FUNCTION ne(chkpass, text) - RETURNS bool - AS 'MODULE_PATHNAME', 'chkpass_ne' - LANGUAGE C STRICT; - --- --- Now the operators. --- - -CREATE OPERATOR = ( - leftarg = chkpass, - rightarg = text, - negator = <>, - procedure = eq -); - -CREATE OPERATOR <> ( - leftarg = chkpass, - rightarg = text, - negator = =, - procedure = ne -); - -COMMENT ON TYPE chkpass IS 'password type with checks'; - --- --- eof --- diff --git a/contrib/chkpass/chkpass--unpackaged--1.0.sql b/contrib/chkpass/chkpass--unpackaged--1.0.sql deleted file mode 100644 index 8bdecddfa5..0000000000 --- a/contrib/chkpass/chkpass--unpackaged--1.0.sql +++ /dev/null @@ -1,13 +0,0 @@ -/* contrib/chkpass/chkpass--unpackaged--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION chkpass FROM unpackaged" to load this file. \quit - -ALTER EXTENSION chkpass ADD type chkpass; -ALTER EXTENSION chkpass ADD function chkpass_in(cstring); -ALTER EXTENSION chkpass ADD function chkpass_out(chkpass); -ALTER EXTENSION chkpass ADD function raw(chkpass); -ALTER EXTENSION chkpass ADD function eq(chkpass,text); -ALTER EXTENSION chkpass ADD function ne(chkpass,text); -ALTER EXTENSION chkpass ADD operator <>(chkpass,text); -ALTER EXTENSION chkpass ADD operator =(chkpass,text); diff --git a/contrib/chkpass/chkpass.c b/contrib/chkpass/chkpass.c deleted file mode 100644 index 3803ccff9a..0000000000 --- a/contrib/chkpass/chkpass.c +++ /dev/null @@ -1,175 +0,0 @@ -/* - * PostgreSQL type definitions for chkpass - * Written by D'Arcy J.M. Cain - * darcy@druid.net - * http://www.druid.net/darcy/ - * - * contrib/chkpass/chkpass.c - * best viewed with tabs set to 4 - */ - -#include "postgres.h" - -#include -#include -#ifdef HAVE_CRYPT_H -#include -#endif - -#include "fmgr.h" -#include "utils/backend_random.h" -#include "utils/builtins.h" - -PG_MODULE_MAGIC; - -/* - * This type encrypts it's input unless the first character is a colon. - * The output is the encrypted form with a leading colon. The output - * format is designed to allow dump and reload operations to work as - * expected without doing special tricks. - */ - - -/* - * This is the internal storage format for CHKPASSs. - * 15 is all I need but add a little buffer - */ - -typedef struct chkpass -{ - char password[16]; -} chkpass; - - -/* This function checks that the password is a good one - * It's just a placeholder for now */ -static int -verify_pass(const char *str) -{ - return 0; -} - -/* - * CHKPASS reader. - */ -PG_FUNCTION_INFO_V1(chkpass_in); -Datum -chkpass_in(PG_FUNCTION_ARGS) -{ - char *str = PG_GETARG_CSTRING(0); - chkpass *result; - char mysalt[4]; - char *crypt_output; - static char salt_chars[] = - "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - - /* special case to let us enter encrypted passwords */ - if (*str == ':') - { - result = (chkpass *) palloc0(sizeof(chkpass)); - strlcpy(result->password, str + 1, 13 + 1); - PG_RETURN_POINTER(result); - } - - if (verify_pass(str) != 0) - ereport(ERROR, - (errcode(ERRCODE_DATA_EXCEPTION), - errmsg("password \"%s\" is weak", str))); - - result = (chkpass *) palloc0(sizeof(chkpass)); - - if (!pg_backend_random(mysalt, 2)) - ereport(ERROR, - (errmsg("could not generate random salt"))); - - mysalt[0] = salt_chars[mysalt[0] & 0x3f]; - mysalt[1] = salt_chars[mysalt[1] & 0x3f]; - mysalt[2] = 0; /* technically the terminator is not necessary - * but I like to play safe */ - - crypt_output = crypt(str, mysalt); - if (crypt_output == NULL) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("crypt() failed"))); - - strlcpy(result->password, crypt_output, sizeof(result->password)); - - PG_RETURN_POINTER(result); -} - -/* - * CHKPASS output function. - * Just like any string but we know it is max 15 (13 plus colon and terminator.) - */ - -PG_FUNCTION_INFO_V1(chkpass_out); -Datum -chkpass_out(PG_FUNCTION_ARGS) -{ - chkpass *password = (chkpass *) PG_GETARG_POINTER(0); - char *result; - - result = (char *) palloc(16); - result[0] = ':'; - strlcpy(result + 1, password->password, 15); - - PG_RETURN_CSTRING(result); -} - - -/* - * special output function that doesn't output the colon - */ - -PG_FUNCTION_INFO_V1(chkpass_rout); -Datum -chkpass_rout(PG_FUNCTION_ARGS) -{ - chkpass *password = (chkpass *) PG_GETARG_POINTER(0); - - PG_RETURN_TEXT_P(cstring_to_text(password->password)); -} - - -/* - * Boolean tests - */ - -PG_FUNCTION_INFO_V1(chkpass_eq); -Datum -chkpass_eq(PG_FUNCTION_ARGS) -{ - chkpass *a1 = (chkpass *) PG_GETARG_POINTER(0); - text *a2 = PG_GETARG_TEXT_PP(1); - char str[9]; - char *crypt_output; - - text_to_cstring_buffer(a2, str, sizeof(str)); - crypt_output = crypt(str, a1->password); - if (crypt_output == NULL) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("crypt() failed"))); - - PG_RETURN_BOOL(strcmp(a1->password, crypt_output) == 0); -} - -PG_FUNCTION_INFO_V1(chkpass_ne); -Datum -chkpass_ne(PG_FUNCTION_ARGS) -{ - chkpass *a1 = (chkpass *) PG_GETARG_POINTER(0); - text *a2 = PG_GETARG_TEXT_PP(1); - char str[9]; - char *crypt_output; - - text_to_cstring_buffer(a2, str, sizeof(str)); - crypt_output = crypt(str, a1->password); - if (crypt_output == NULL) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("crypt() failed"))); - - PG_RETURN_BOOL(strcmp(a1->password, crypt_output) != 0); -} diff --git a/contrib/chkpass/chkpass.control b/contrib/chkpass/chkpass.control deleted file mode 100644 index bd4b3d3d0d..0000000000 --- a/contrib/chkpass/chkpass.control +++ /dev/null @@ -1,5 +0,0 @@ -# chkpass extension -comment = 'data type for auto-encrypted passwords' -default_version = '1.0' -module_pathname = '$libdir/chkpass' -relocatable = true diff --git a/contrib/citext/Makefile b/contrib/citext/Makefile index 563cd22dcc..e32a7de946 100644 --- a/contrib/citext/Makefile +++ b/contrib/citext/Makefile @@ -3,7 +3,8 @@ MODULES = citext EXTENSION = citext -DATA = citext--1.4.sql citext--1.3--1.4.sql \ +DATA = citext--1.4.sql citext--1.4--1.5.sql \ + citext--1.3--1.4.sql \ citext--1.2--1.3.sql citext--1.1--1.2.sql \ citext--1.0--1.1.sql citext--unpackaged--1.0.sql PGFILEDESC = "citext - case-insensitive character string data type" diff --git a/contrib/citext/citext--1.4--1.5.sql b/contrib/citext/citext--1.4--1.5.sql new file mode 100644 index 0000000000..5ae522b7da --- /dev/null +++ b/contrib/citext/citext--1.4--1.5.sql @@ -0,0 +1,88 @@ +/* contrib/citext/citext--1.4--1.5.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION citext UPDATE TO '1.5'" to load this file. \quit + +ALTER OPERATOR <= (citext, citext) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel +); + +ALTER OPERATOR >= (citext, citext) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel +); + +CREATE FUNCTION citext_pattern_lt( citext, citext ) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE; + +CREATE FUNCTION citext_pattern_le( citext, citext ) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE; + +CREATE FUNCTION citext_pattern_gt( citext, citext ) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE; + +CREATE FUNCTION citext_pattern_ge( citext, citext ) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE; + +CREATE OPERATOR ~<~ ( + LEFTARG = CITEXT, + RIGHTARG = CITEXT, + NEGATOR = ~>=~, + COMMUTATOR = ~>~, + PROCEDURE = citext_pattern_lt, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + +CREATE OPERATOR ~<=~ ( + LEFTARG = CITEXT, + RIGHTARG = CITEXT, + NEGATOR = ~>~, + COMMUTATOR = ~>=~, + PROCEDURE = citext_pattern_le, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + +CREATE OPERATOR ~>=~ ( + LEFTARG = CITEXT, + RIGHTARG = CITEXT, + NEGATOR = ~<~, + COMMUTATOR = ~<=~, + PROCEDURE = citext_pattern_ge, + RESTRICT = scalargtsel, + JOIN = scalargtjoinsel +); + +CREATE OPERATOR ~>~ ( + LEFTARG = CITEXT, + RIGHTARG = CITEXT, + NEGATOR = ~<=~, + COMMUTATOR = ~<~, + PROCEDURE = citext_pattern_gt, + RESTRICT = scalargtsel, + JOIN = scalargtjoinsel +); + +CREATE FUNCTION citext_pattern_cmp(citext, citext) +RETURNS int4 +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE PARALLEL SAFE; + +CREATE OPERATOR CLASS citext_pattern_ops +FOR TYPE CITEXT USING btree AS + OPERATOR 1 ~<~ (citext, citext), + OPERATOR 2 ~<=~ (citext, citext), + OPERATOR 3 = (citext, citext), + OPERATOR 4 ~>=~ (citext, citext), + OPERATOR 5 ~>~ (citext, citext), + FUNCTION 1 citext_pattern_cmp(citext, citext); diff --git a/contrib/citext/citext.c b/contrib/citext/citext.c index 04f604b15f..2c0e48e2bc 100644 --- a/contrib/citext/citext.c +++ b/contrib/citext/citext.c @@ -9,9 +9,7 @@ #include "utils/formatting.h" #include "utils/varlena.h" -#ifdef PG_MODULE_MAGIC PG_MODULE_MAGIC; -#endif /* * ==================== @@ -20,6 +18,7 @@ PG_MODULE_MAGIC; */ static int32 citextcmp(text *left, text *right, Oid collid); +static int32 internal_citext_pattern_cmp(text *left, text *right, Oid collid); /* * ================= @@ -60,6 +59,41 @@ citextcmp(text *left, text *right, Oid collid) return result; } +/* + * citext_pattern_cmp() + * Internal character-by-character comparison function for citext strings. + * Returns int32 negative, zero, or positive. + */ +static int32 +internal_citext_pattern_cmp(text *left, text *right, Oid collid) +{ + char *lcstr, + *rcstr; + int llen, + rlen; + int32 result; + + lcstr = str_tolower(VARDATA_ANY(left), VARSIZE_ANY_EXHDR(left), DEFAULT_COLLATION_OID); + rcstr = str_tolower(VARDATA_ANY(right), VARSIZE_ANY_EXHDR(right), DEFAULT_COLLATION_OID); + + llen = strlen(lcstr); + rlen = strlen(rcstr); + + result = memcmp((void *) lcstr, (void *) rcstr, Min(llen, rlen)); + if (result == 0) + { + if (llen < rlen) + result = -1; + else if (llen > rlen) + result = 1; + } + + pfree(lcstr); + pfree(rcstr); + + return result; +} + /* * ================== * INDEXING FUNCTIONS @@ -83,6 +117,23 @@ citext_cmp(PG_FUNCTION_ARGS) PG_RETURN_INT32(result); } +PG_FUNCTION_INFO_V1(citext_pattern_cmp); + +Datum +citext_pattern_cmp(PG_FUNCTION_ARGS) +{ + text *left = PG_GETARG_TEXT_PP(0); + text *right = PG_GETARG_TEXT_PP(1); + int32 result; + + result = internal_citext_pattern_cmp(left, right, PG_GET_COLLATION()); + + PG_FREE_IF_COPY(left, 0); + PG_FREE_IF_COPY(right, 1); + + PG_RETURN_INT32(result); +} + PG_FUNCTION_INFO_V1(citext_hash); Datum @@ -236,6 +287,74 @@ citext_ge(PG_FUNCTION_ARGS) PG_RETURN_BOOL(result); } +PG_FUNCTION_INFO_V1(citext_pattern_lt); + +Datum +citext_pattern_lt(PG_FUNCTION_ARGS) +{ + text *left = PG_GETARG_TEXT_PP(0); + text *right = PG_GETARG_TEXT_PP(1); + bool result; + + result = internal_citext_pattern_cmp(left, right, PG_GET_COLLATION()) < 0; + + PG_FREE_IF_COPY(left, 0); + PG_FREE_IF_COPY(right, 1); + + PG_RETURN_BOOL(result); +} + +PG_FUNCTION_INFO_V1(citext_pattern_le); + +Datum +citext_pattern_le(PG_FUNCTION_ARGS) +{ + text *left = PG_GETARG_TEXT_PP(0); + text *right = PG_GETARG_TEXT_PP(1); + bool result; + + result = internal_citext_pattern_cmp(left, right, PG_GET_COLLATION()) <= 0; + + PG_FREE_IF_COPY(left, 0); + PG_FREE_IF_COPY(right, 1); + + PG_RETURN_BOOL(result); +} + +PG_FUNCTION_INFO_V1(citext_pattern_gt); + +Datum +citext_pattern_gt(PG_FUNCTION_ARGS) +{ + text *left = PG_GETARG_TEXT_PP(0); + text *right = PG_GETARG_TEXT_PP(1); + bool result; + + result = internal_citext_pattern_cmp(left, right, PG_GET_COLLATION()) > 0; + + PG_FREE_IF_COPY(left, 0); + PG_FREE_IF_COPY(right, 1); + + PG_RETURN_BOOL(result); +} + +PG_FUNCTION_INFO_V1(citext_pattern_ge); + +Datum +citext_pattern_ge(PG_FUNCTION_ARGS) +{ + text *left = PG_GETARG_TEXT_PP(0); + text *right = PG_GETARG_TEXT_PP(1); + bool result; + + result = internal_citext_pattern_cmp(left, right, PG_GET_COLLATION()) >= 0; + + PG_FREE_IF_COPY(left, 0); + PG_FREE_IF_COPY(right, 1); + + PG_RETURN_BOOL(result); +} + /* * =================== * AGGREGATE FUNCTIONS diff --git a/contrib/citext/citext.control b/contrib/citext/citext.control index 17fce4e887..4cd6e09331 100644 --- a/contrib/citext/citext.control +++ b/contrib/citext/citext.control @@ -1,5 +1,5 @@ # citext extension comment = 'data type for case-insensitive character strings' -default_version = '1.4' +default_version = '1.5' module_pathname = '$libdir/citext' relocatable = true diff --git a/contrib/citext/expected/citext.out b/contrib/citext/expected/citext.out index 9cc94f4c1b..99365c57b0 100644 --- a/contrib/citext/expected/citext.out +++ b/contrib/citext/expected/citext.out @@ -2336,8 +2336,8 @@ SELECT * WHERE t.id IS NULL OR m.id IS NULL; id | name | id | name ----+------+----+------ - 2 | two | | | | 2 | Two + 2 | two | | (2 rows) REFRESH MATERIALIZED VIEW CONCURRENTLY citext_matview; @@ -2351,3 +2351,355 @@ SELECT * FROM citext_matview ORDER BY id; 5 | (5 rows) +-- test citext_pattern_cmp() function explicitly. +SELECT citext_pattern_cmp('aardvark'::citext, 'aardvark'::citext) AS zero; + zero +------ + 0 +(1 row) + +SELECT citext_pattern_cmp('aardvark'::citext, 'aardVark'::citext) AS zero; + zero +------ + 0 +(1 row) + +SELECT citext_pattern_cmp('AARDVARK'::citext, 'AARDVARK'::citext) AS zero; + zero +------ + 0 +(1 row) + +SELECT citext_pattern_cmp('B'::citext, 'a'::citext) > 0 AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_cmp('a'::citext, 'B'::citext) < 0 AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_cmp('A'::citext, 'b'::citext) < 0 AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_cmp('ABCD'::citext, 'abc'::citext) > 0 AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_cmp('ABC'::citext, 'abcd'::citext) < 0 AS true; + true +------ + t +(1 row) + +-- test operator functions +-- lt +SELECT citext_pattern_lt('a'::citext, 'b'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_lt('A'::citext, 'b'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_lt('a'::citext, 'B'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_lt('b'::citext, 'a'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_lt('B'::citext, 'a'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_lt('b'::citext, 'A'::citext) AS false; + false +------- + f +(1 row) + +-- le +SELECT citext_pattern_le('a'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('a'::citext, 'A'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('A'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('A'::citext, 'A'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('a'::citext, 'B'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('A'::citext, 'b'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('a'::citext, 'B'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('b'::citext, 'a'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_le('B'::citext, 'a'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_le('b'::citext, 'A'::citext) AS false; + false +------- + f +(1 row) + +-- gt +SELECT citext_pattern_gt('a'::citext, 'b'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_gt('A'::citext, 'b'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_gt('a'::citext, 'B'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_gt('b'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_gt('B'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_gt('b'::citext, 'A'::citext) AS true; + true +------ + t +(1 row) + +-- ge +SELECT citext_pattern_ge('a'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_ge('a'::citext, 'A'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_ge('A'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_ge('A'::citext, 'A'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_ge('a'::citext, 'B'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_ge('A'::citext, 'b'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_ge('a'::citext, 'B'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_ge('b'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_ge('B'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_ge('b'::citext, 'A'::citext) AS true; + true +------ + t +(1 row) + +-- Multi-byte tests below are diabled like the sanity tests above. +-- Uncomment to run them. +-- Test ~<~ and ~<=~ +SELECT 'a'::citext ~<~ 'B'::citext AS t; + t +--- + t +(1 row) + +SELECT 'b'::citext ~<~ 'A'::citext AS f; + f +--- + f +(1 row) + +-- SELECT 'à'::citext ~<~ 'À'::citext AS f; +SELECT 'a'::citext ~<=~ 'B'::citext AS t; + t +--- + t +(1 row) + +SELECT 'a'::citext ~<=~ 'A'::citext AS t; + t +--- + t +(1 row) + +-- SELECT 'à'::citext ~<=~ 'À'::citext AS t; +-- Test ~>~ and ~>=~ +SELECT 'B'::citext ~>~ 'a'::citext AS t; + t +--- + t +(1 row) + +SELECT 'b'::citext ~>~ 'A'::citext AS t; + t +--- + t +(1 row) + +-- SELECT 'à'::citext ~>~ 'À'::citext AS f; +SELECT 'B'::citext ~>~ 'b'::citext AS f; + f +--- + f +(1 row) + +SELECT 'B'::citext ~>=~ 'b'::citext AS t; + t +--- + t +(1 row) + +-- SELECT 'à'::citext ~>=~ 'À'::citext AS t; +-- Test implicit casting. citext casts to text, but not vice-versa. +SELECT 'B'::citext ~<~ 'a'::text AS t; -- text wins. + t +--- + t +(1 row) + +SELECT 'B'::citext ~<=~ 'a'::text AS t; -- text wins. + t +--- + t +(1 row) + +SELECT 'a'::citext ~>~ 'B'::text AS t; -- text wins. + t +--- + t +(1 row) + +SELECT 'a'::citext ~>=~ 'B'::text AS t; -- text wins. + t +--- + t +(1 row) + +-- Test implicit casting. citext casts to varchar, but not vice-versa. +SELECT 'B'::citext ~<~ 'a'::varchar AS t; -- varchar wins. + t +--- + t +(1 row) + +SELECT 'B'::citext ~<=~ 'a'::varchar AS t; -- varchar wins. + t +--- + t +(1 row) + +SELECT 'a'::citext ~>~ 'B'::varchar AS t; -- varchar wins. + t +--- + t +(1 row) + +SELECT 'a'::citext ~>=~ 'B'::varchar AS t; -- varchar wins. + t +--- + t +(1 row) + diff --git a/contrib/citext/expected/citext_1.out b/contrib/citext/expected/citext_1.out index d1fb1e14e0..8aac72e226 100644 --- a/contrib/citext/expected/citext_1.out +++ b/contrib/citext/expected/citext_1.out @@ -2336,8 +2336,8 @@ SELECT * WHERE t.id IS NULL OR m.id IS NULL; id | name | id | name ----+------+----+------ - 2 | two | | | | 2 | Two + 2 | two | | (2 rows) REFRESH MATERIALIZED VIEW CONCURRENTLY citext_matview; @@ -2351,3 +2351,355 @@ SELECT * FROM citext_matview ORDER BY id; 5 | (5 rows) +-- test citext_pattern_cmp() function explicitly. +SELECT citext_pattern_cmp('aardvark'::citext, 'aardvark'::citext) AS zero; + zero +------ + 0 +(1 row) + +SELECT citext_pattern_cmp('aardvark'::citext, 'aardVark'::citext) AS zero; + zero +------ + 0 +(1 row) + +SELECT citext_pattern_cmp('AARDVARK'::citext, 'AARDVARK'::citext) AS zero; + zero +------ + 0 +(1 row) + +SELECT citext_pattern_cmp('B'::citext, 'a'::citext) > 0 AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_cmp('a'::citext, 'B'::citext) < 0 AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_cmp('A'::citext, 'b'::citext) < 0 AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_cmp('ABCD'::citext, 'abc'::citext) > 0 AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_cmp('ABC'::citext, 'abcd'::citext) < 0 AS true; + true +------ + t +(1 row) + +-- test operator functions +-- lt +SELECT citext_pattern_lt('a'::citext, 'b'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_lt('A'::citext, 'b'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_lt('a'::citext, 'B'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_lt('b'::citext, 'a'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_lt('B'::citext, 'a'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_lt('b'::citext, 'A'::citext) AS false; + false +------- + f +(1 row) + +-- le +SELECT citext_pattern_le('a'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('a'::citext, 'A'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('A'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('A'::citext, 'A'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('a'::citext, 'B'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('A'::citext, 'b'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('a'::citext, 'B'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_le('b'::citext, 'a'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_le('B'::citext, 'a'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_le('b'::citext, 'A'::citext) AS false; + false +------- + f +(1 row) + +-- gt +SELECT citext_pattern_gt('a'::citext, 'b'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_gt('A'::citext, 'b'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_gt('a'::citext, 'B'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_gt('b'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_gt('B'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_gt('b'::citext, 'A'::citext) AS true; + true +------ + t +(1 row) + +-- ge +SELECT citext_pattern_ge('a'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_ge('a'::citext, 'A'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_ge('A'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_ge('A'::citext, 'A'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_ge('a'::citext, 'B'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_ge('A'::citext, 'b'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_ge('a'::citext, 'B'::citext) AS false; + false +------- + f +(1 row) + +SELECT citext_pattern_ge('b'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_ge('B'::citext, 'a'::citext) AS true; + true +------ + t +(1 row) + +SELECT citext_pattern_ge('b'::citext, 'A'::citext) AS true; + true +------ + t +(1 row) + +-- Multi-byte tests below are diabled like the sanity tests above. +-- Uncomment to run them. +-- Test ~<~ and ~<=~ +SELECT 'a'::citext ~<~ 'B'::citext AS t; + t +--- + t +(1 row) + +SELECT 'b'::citext ~<~ 'A'::citext AS f; + f +--- + f +(1 row) + +-- SELECT 'à'::citext ~<~ 'À'::citext AS f; +SELECT 'a'::citext ~<=~ 'B'::citext AS t; + t +--- + t +(1 row) + +SELECT 'a'::citext ~<=~ 'A'::citext AS t; + t +--- + t +(1 row) + +-- SELECT 'à'::citext ~<=~ 'À'::citext AS t; +-- Test ~>~ and ~>=~ +SELECT 'B'::citext ~>~ 'a'::citext AS t; + t +--- + t +(1 row) + +SELECT 'b'::citext ~>~ 'A'::citext AS t; + t +--- + t +(1 row) + +-- SELECT 'à'::citext ~>~ 'À'::citext AS f; +SELECT 'B'::citext ~>~ 'b'::citext AS f; + f +--- + f +(1 row) + +SELECT 'B'::citext ~>=~ 'b'::citext AS t; + t +--- + t +(1 row) + +-- SELECT 'à'::citext ~>=~ 'À'::citext AS t; +-- Test implicit casting. citext casts to text, but not vice-versa. +SELECT 'B'::citext ~<~ 'a'::text AS t; -- text wins. + t +--- + t +(1 row) + +SELECT 'B'::citext ~<=~ 'a'::text AS t; -- text wins. + t +--- + t +(1 row) + +SELECT 'a'::citext ~>~ 'B'::text AS t; -- text wins. + t +--- + t +(1 row) + +SELECT 'a'::citext ~>=~ 'B'::text AS t; -- text wins. + t +--- + t +(1 row) + +-- Test implicit casting. citext casts to varchar, but not vice-versa. +SELECT 'B'::citext ~<~ 'a'::varchar AS t; -- varchar wins. + t +--- + t +(1 row) + +SELECT 'B'::citext ~<=~ 'a'::varchar AS t; -- varchar wins. + t +--- + t +(1 row) + +SELECT 'a'::citext ~>~ 'B'::varchar AS t; -- varchar wins. + t +--- + t +(1 row) + +SELECT 'a'::citext ~>=~ 'B'::varchar AS t; -- varchar wins. + t +--- + t +(1 row) + diff --git a/contrib/citext/sql/citext.sql b/contrib/citext/sql/citext.sql index f70f9ebae9..2732be436d 100644 --- a/contrib/citext/sql/citext.sql +++ b/contrib/citext/sql/citext.sql @@ -752,3 +752,84 @@ SELECT * WHERE t.id IS NULL OR m.id IS NULL; REFRESH MATERIALIZED VIEW CONCURRENTLY citext_matview; SELECT * FROM citext_matview ORDER BY id; + +-- test citext_pattern_cmp() function explicitly. +SELECT citext_pattern_cmp('aardvark'::citext, 'aardvark'::citext) AS zero; +SELECT citext_pattern_cmp('aardvark'::citext, 'aardVark'::citext) AS zero; +SELECT citext_pattern_cmp('AARDVARK'::citext, 'AARDVARK'::citext) AS zero; +SELECT citext_pattern_cmp('B'::citext, 'a'::citext) > 0 AS true; +SELECT citext_pattern_cmp('a'::citext, 'B'::citext) < 0 AS true; +SELECT citext_pattern_cmp('A'::citext, 'b'::citext) < 0 AS true; +SELECT citext_pattern_cmp('ABCD'::citext, 'abc'::citext) > 0 AS true; +SELECT citext_pattern_cmp('ABC'::citext, 'abcd'::citext) < 0 AS true; + +-- test operator functions +-- lt +SELECT citext_pattern_lt('a'::citext, 'b'::citext) AS true; +SELECT citext_pattern_lt('A'::citext, 'b'::citext) AS true; +SELECT citext_pattern_lt('a'::citext, 'B'::citext) AS true; +SELECT citext_pattern_lt('b'::citext, 'a'::citext) AS false; +SELECT citext_pattern_lt('B'::citext, 'a'::citext) AS false; +SELECT citext_pattern_lt('b'::citext, 'A'::citext) AS false; +-- le +SELECT citext_pattern_le('a'::citext, 'a'::citext) AS true; +SELECT citext_pattern_le('a'::citext, 'A'::citext) AS true; +SELECT citext_pattern_le('A'::citext, 'a'::citext) AS true; +SELECT citext_pattern_le('A'::citext, 'A'::citext) AS true; +SELECT citext_pattern_le('a'::citext, 'B'::citext) AS true; +SELECT citext_pattern_le('A'::citext, 'b'::citext) AS true; +SELECT citext_pattern_le('a'::citext, 'B'::citext) AS true; +SELECT citext_pattern_le('b'::citext, 'a'::citext) AS false; +SELECT citext_pattern_le('B'::citext, 'a'::citext) AS false; +SELECT citext_pattern_le('b'::citext, 'A'::citext) AS false; +-- gt +SELECT citext_pattern_gt('a'::citext, 'b'::citext) AS false; +SELECT citext_pattern_gt('A'::citext, 'b'::citext) AS false; +SELECT citext_pattern_gt('a'::citext, 'B'::citext) AS false; +SELECT citext_pattern_gt('b'::citext, 'a'::citext) AS true; +SELECT citext_pattern_gt('B'::citext, 'a'::citext) AS true; +SELECT citext_pattern_gt('b'::citext, 'A'::citext) AS true; +-- ge +SELECT citext_pattern_ge('a'::citext, 'a'::citext) AS true; +SELECT citext_pattern_ge('a'::citext, 'A'::citext) AS true; +SELECT citext_pattern_ge('A'::citext, 'a'::citext) AS true; +SELECT citext_pattern_ge('A'::citext, 'A'::citext) AS true; +SELECT citext_pattern_ge('a'::citext, 'B'::citext) AS false; +SELECT citext_pattern_ge('A'::citext, 'b'::citext) AS false; +SELECT citext_pattern_ge('a'::citext, 'B'::citext) AS false; +SELECT citext_pattern_ge('b'::citext, 'a'::citext) AS true; +SELECT citext_pattern_ge('B'::citext, 'a'::citext) AS true; +SELECT citext_pattern_ge('b'::citext, 'A'::citext) AS true; + +-- Multi-byte tests below are diabled like the sanity tests above. +-- Uncomment to run them. + +-- Test ~<~ and ~<=~ +SELECT 'a'::citext ~<~ 'B'::citext AS t; +SELECT 'b'::citext ~<~ 'A'::citext AS f; +-- SELECT 'à'::citext ~<~ 'À'::citext AS f; +SELECT 'a'::citext ~<=~ 'B'::citext AS t; +SELECT 'a'::citext ~<=~ 'A'::citext AS t; +-- SELECT 'à'::citext ~<=~ 'À'::citext AS t; + +-- Test ~>~ and ~>=~ +SELECT 'B'::citext ~>~ 'a'::citext AS t; +SELECT 'b'::citext ~>~ 'A'::citext AS t; +-- SELECT 'à'::citext ~>~ 'À'::citext AS f; +SELECT 'B'::citext ~>~ 'b'::citext AS f; +SELECT 'B'::citext ~>=~ 'b'::citext AS t; +-- SELECT 'à'::citext ~>=~ 'À'::citext AS t; + +-- Test implicit casting. citext casts to text, but not vice-versa. +SELECT 'B'::citext ~<~ 'a'::text AS t; -- text wins. +SELECT 'B'::citext ~<=~ 'a'::text AS t; -- text wins. + +SELECT 'a'::citext ~>~ 'B'::text AS t; -- text wins. +SELECT 'a'::citext ~>=~ 'B'::text AS t; -- text wins. + +-- Test implicit casting. citext casts to varchar, but not vice-versa. +SELECT 'B'::citext ~<~ 'a'::varchar AS t; -- varchar wins. +SELECT 'B'::citext ~<=~ 'a'::varchar AS t; -- varchar wins. + +SELECT 'a'::citext ~>~ 'B'::varchar AS t; -- varchar wins. +SELECT 'a'::citext ~>=~ 'B'::varchar AS t; -- varchar wins. diff --git a/contrib/cube/Makefile b/contrib/cube/Makefile index be7a1bc1a0..5e7b524dc2 100644 --- a/contrib/cube/Makefile +++ b/contrib/cube/Makefile @@ -4,11 +4,14 @@ MODULE_big = cube OBJS= cube.o cubeparse.o $(WIN32RES) EXTENSION = cube -DATA = cube--1.2.sql cube--1.1--1.2.sql cube--1.0--1.1.sql \ +DATA = cube--1.2.sql cube--1.2--1.3.sql cube--1.3--1.4.sql \ + cube--1.1--1.2.sql cube--1.0--1.1.sql \ cube--unpackaged--1.0.sql PGFILEDESC = "cube - multidimensional cube data type" -REGRESS = cube +HEADERS = cubedata.h + +REGRESS = cube cube_sci EXTRA_CLEAN = y.tab.c y.tab.h diff --git a/contrib/cube/cube--1.2--1.3.sql b/contrib/cube/cube--1.2--1.3.sql new file mode 100644 index 0000000000..a688f19f02 --- /dev/null +++ b/contrib/cube/cube--1.2--1.3.sql @@ -0,0 +1,12 @@ +/* contrib/cube/cube--1.2--1.3.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION cube UPDATE TO '1.3'" to load this file. \quit + +ALTER OPERATOR <= (cube, cube) SET ( + RESTRICT = scalarlesel, JOIN = scalarlejoinsel +); + +ALTER OPERATOR >= (cube, cube) SET ( + RESTRICT = scalargesel, JOIN = scalargejoinsel +); diff --git a/contrib/cube/cube--1.3--1.4.sql b/contrib/cube/cube--1.3--1.4.sql new file mode 100644 index 0000000000..869820c0c8 --- /dev/null +++ b/contrib/cube/cube--1.3--1.4.sql @@ -0,0 +1,45 @@ +/* contrib/cube/cube--1.3--1.4.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION cube UPDATE TO '1.4'" to load this file. \quit + +-- +-- Get rid of unnecessary compress and decompress support functions. +-- +-- To be allowed to drop the opclass entry for a support function, +-- we must change the entry's dependency type from 'internal' to 'auto', +-- as though it were a loose member of the opfamily rather than being +-- bound into a particular opclass. There's no SQL command for that, +-- so fake it with a manual update on pg_depend. +-- +UPDATE pg_catalog.pg_depend +SET deptype = 'a' +WHERE classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass + AND objid = + (SELECT objid + FROM pg_catalog.pg_depend + WHERE classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass + AND refclassid = 'pg_catalog.pg_proc'::pg_catalog.regclass + AND (refobjid = 'g_cube_compress(pg_catalog.internal)'::pg_catalog.regprocedure)) + AND refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass + AND deptype = 'i'; + +ALTER OPERATOR FAMILY gist_cube_ops USING gist drop function 3 (cube); +ALTER EXTENSION cube DROP function g_cube_compress(pg_catalog.internal); +DROP FUNCTION g_cube_compress(pg_catalog.internal); + +UPDATE pg_catalog.pg_depend +SET deptype = 'a' +WHERE classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass + AND objid = + (SELECT objid + FROM pg_catalog.pg_depend + WHERE classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass + AND refclassid = 'pg_catalog.pg_proc'::pg_catalog.regclass + AND (refobjid = 'g_cube_decompress(pg_catalog.internal)'::pg_catalog.regprocedure)) + AND refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass + AND deptype = 'i'; + +ALTER OPERATOR FAMILY gist_cube_ops USING gist drop function 4 (cube); +ALTER EXTENSION cube DROP function g_cube_decompress(pg_catalog.internal); +DROP FUNCTION g_cube_decompress(pg_catalog.internal); diff --git a/contrib/cube/cube.c b/contrib/cube/cube.c index 149558c763..3bbfbf2847 100644 --- a/contrib/cube/cube.c +++ b/contrib/cube/cube.c @@ -8,13 +8,12 @@ #include "postgres.h" -#include #include #include "access/gist.h" #include "access/stratnum.h" #include "utils/array.h" -#include "utils/builtins.h" +#include "utils/float.h" #include "cubedata.h" @@ -126,7 +125,7 @@ cube_in(PG_FUNCTION_ARGS) cube_scanner_finish(); - PG_RETURN_NDBOX(result); + PG_RETURN_NDBOX_P(result); } @@ -152,6 +151,13 @@ cube_a_f8_f8(PG_FUNCTION_ARGS) errmsg("cannot work with arrays containing NULLs"))); dim = ARRNELEMS(ur); + if (dim > CUBE_MAX_DIM) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("can't extend cube"), + errdetail("A cube cannot have more than %d dimensions.", + CUBE_MAX_DIM))); + if (ARRNELEMS(ll) != dim) ereport(ERROR, (errcode(ERRCODE_ARRAY_ELEMENT_ERROR), @@ -187,7 +193,7 @@ cube_a_f8_f8(PG_FUNCTION_ARGS) else SET_POINT_BIT(result); - PG_RETURN_NDBOX(result); + PG_RETURN_NDBOX_P(result); } /* @@ -209,6 +215,12 @@ cube_a_f8(PG_FUNCTION_ARGS) errmsg("cannot work with arrays containing NULLs"))); dim = ARRNELEMS(ur); + if (dim > CUBE_MAX_DIM) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("array is too long"), + errdetail("A cube cannot have more than %d dimensions.", + CUBE_MAX_DIM))); dur = ARRPTR(ur); @@ -221,13 +233,13 @@ cube_a_f8(PG_FUNCTION_ARGS) for (i = 0; i < dim; i++) result->x[i] = dur[i]; - PG_RETURN_NDBOX(result); + PG_RETURN_NDBOX_P(result); } Datum cube_subset(PG_FUNCTION_ARGS) { - NDBOX *c = PG_GETARG_NDBOX(0); + NDBOX *c = PG_GETARG_NDBOX_P(0); ArrayType *idx = PG_GETARG_ARRAYTYPE_P(1); NDBOX *result; int size, @@ -243,6 +255,13 @@ cube_subset(PG_FUNCTION_ARGS) dx = (int32 *) ARR_DATA_PTR(idx); dim = ARRNELEMS(idx); + if (dim > CUBE_MAX_DIM) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("array is too long"), + errdetail("A cube cannot have more than %d dimensions.", + CUBE_MAX_DIM))); + size = IS_POINT(c) ? POINT_SIZE(dim) : CUBE_SIZE(dim); result = (NDBOX *) palloc0(size); SET_VARSIZE(result, size); @@ -263,13 +282,13 @@ cube_subset(PG_FUNCTION_ARGS) } PG_FREE_IF_COPY(c, 0); - PG_RETURN_NDBOX(result); + PG_RETURN_NDBOX_P(result); } Datum cube_out(PG_FUNCTION_ARGS) { - NDBOX *cube = PG_GETARG_NDBOX(0); + NDBOX *cube = PG_GETARG_NDBOX_P(0); StringInfoData buf; int dim = DIM(cube); int i; @@ -309,14 +328,14 @@ cube_out(PG_FUNCTION_ARGS) /* ** The GiST Consistent method for boxes ** Should return false if for all data items x below entry, -** the predicate x op query == FALSE, where op is the oper +** the predicate x op query == false, where op is the oper ** corresponding to strategy in the pg_amop table. */ Datum g_cube_consistent(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - NDBOX *query = PG_GETARG_NDBOX(1); + NDBOX *query = PG_GETARG_NDBOX_P(1); StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); /* Oid subtype = PG_GETARG_OID(3); */ @@ -331,10 +350,10 @@ g_cube_consistent(PG_FUNCTION_ARGS) * g_cube_leaf_consistent */ if (GIST_LEAF(entry)) - res = g_cube_leaf_consistent(DatumGetNDBOX(entry->key), + res = g_cube_leaf_consistent(DatumGetNDBOXP(entry->key), query, strategy); else - res = g_cube_internal_consistent(DatumGetNDBOX(entry->key), + res = g_cube_internal_consistent(DatumGetNDBOXP(entry->key), query, strategy); PG_FREE_IF_COPY(query, 1); @@ -355,7 +374,7 @@ g_cube_union(PG_FUNCTION_ARGS) NDBOX *tmp; int i; - tmp = DatumGetNDBOX(entryvec->vector[0].key); + tmp = DatumGetNDBOXP(entryvec->vector[0].key); /* * sizep = sizeof(NDBOX); -- NDBOX has variable size @@ -365,7 +384,7 @@ g_cube_union(PG_FUNCTION_ARGS) for (i = 1; i < entryvec->n; i++) { out = g_cube_binary_union(tmp, - DatumGetNDBOX(entryvec->vector[i].key), + DatumGetNDBOXP(entryvec->vector[i].key), sizep); tmp = out; } @@ -388,15 +407,15 @@ Datum g_cube_decompress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - NDBOX *key = DatumGetNDBOX(PG_DETOAST_DATUM(entry->key)); + NDBOX *key = DatumGetNDBOXP(entry->key); - if (key != DatumGetNDBOX(entry->key)) + if (key != DatumGetNDBOXP(entry->key)) { GISTENTRY *retval = (GISTENTRY *) palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(key), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); PG_RETURN_POINTER(retval); } PG_RETURN_POINTER(entry); @@ -417,10 +436,10 @@ g_cube_penalty(PG_FUNCTION_ARGS) double tmp1, tmp2; - ud = cube_union_v0(DatumGetNDBOX(origentry->key), - DatumGetNDBOX(newentry->key)); + ud = cube_union_v0(DatumGetNDBOXP(origentry->key), + DatumGetNDBOXP(newentry->key)); rt_cube_size(ud, &tmp1); - rt_cube_size(DatumGetNDBOX(origentry->key), &tmp2); + rt_cube_size(DatumGetNDBOXP(origentry->key), &tmp2); *result = (float) (tmp1 - tmp2); PG_RETURN_FLOAT8(*result); @@ -473,17 +492,18 @@ g_cube_picksplit(PG_FUNCTION_ARGS) for (i = FirstOffsetNumber; i < maxoff; i = OffsetNumberNext(i)) { - datum_alpha = DatumGetNDBOX(entryvec->vector[i].key); + datum_alpha = DatumGetNDBOXP(entryvec->vector[i].key); for (j = OffsetNumberNext(i); j <= maxoff; j = OffsetNumberNext(j)) { - datum_beta = DatumGetNDBOX(entryvec->vector[j].key); + datum_beta = DatumGetNDBOXP(entryvec->vector[j].key); /* compute the wasted space by unioning these guys */ /* size_waste = size_union - size_inter; */ union_d = cube_union_v0(datum_alpha, datum_beta); rt_cube_size(union_d, &size_union); - inter_d = DatumGetNDBOX(DirectFunctionCall2(cube_inter, - entryvec->vector[i].key, entryvec->vector[j].key)); + inter_d = DatumGetNDBOXP(DirectFunctionCall2(cube_inter, + entryvec->vector[i].key, + entryvec->vector[j].key)); rt_cube_size(inter_d, &size_inter); size_waste = size_union - size_inter; @@ -506,10 +526,10 @@ g_cube_picksplit(PG_FUNCTION_ARGS) right = v->spl_right; v->spl_nright = 0; - datum_alpha = DatumGetNDBOX(entryvec->vector[seed_1].key); + datum_alpha = DatumGetNDBOXP(entryvec->vector[seed_1].key); datum_l = cube_union_v0(datum_alpha, datum_alpha); rt_cube_size(datum_l, &size_l); - datum_beta = DatumGetNDBOX(entryvec->vector[seed_2].key); + datum_beta = DatumGetNDBOXP(entryvec->vector[seed_2].key); datum_r = cube_union_v0(datum_beta, datum_beta); rt_cube_size(datum_r, &size_r); @@ -548,7 +568,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS) } /* okay, which page needs least enlargement? */ - datum_alpha = DatumGetNDBOX(entryvec->vector[i].key); + datum_alpha = DatumGetNDBOXP(entryvec->vector[i].key); union_dl = cube_union_v0(datum_l, datum_alpha); union_dr = cube_union_v0(datum_r, datum_alpha); rt_cube_size(union_dl, &size_alpha); @@ -584,16 +604,16 @@ g_cube_picksplit(PG_FUNCTION_ARGS) Datum g_cube_same(PG_FUNCTION_ARGS) { - NDBOX *b1 = PG_GETARG_NDBOX(0); - NDBOX *b2 = PG_GETARG_NDBOX(1); + NDBOX *b1 = PG_GETARG_NDBOX_P(0); + NDBOX *b2 = PG_GETARG_NDBOX_P(1); bool *result = (bool *) PG_GETARG_POINTER(2); if (cube_cmp_v0(b1, b2) == 0) - *result = TRUE; + *result = true; else - *result = FALSE; + *result = false; - PG_RETURN_NDBOX(result); + PG_RETURN_NDBOX_P(result); } /* @@ -609,23 +629,23 @@ g_cube_leaf_consistent(NDBOX *key, switch (strategy) { case RTOverlapStrategyNumber: - retval = (bool) cube_overlap_v0(key, query); + retval = cube_overlap_v0(key, query); break; case RTSameStrategyNumber: - retval = (bool) (cube_cmp_v0(key, query) == 0); + retval = (cube_cmp_v0(key, query) == 0); break; case RTContainsStrategyNumber: case RTOldContainsStrategyNumber: - retval = (bool) cube_contains_v0(key, query); + retval = cube_contains_v0(key, query); break; case RTContainedByStrategyNumber: case RTOldContainedByStrategyNumber: - retval = (bool) cube_contains_v0(query, key); + retval = cube_contains_v0(query, key); break; default: - retval = FALSE; + retval = false; } - return (retval); + return retval; } bool @@ -650,9 +670,9 @@ g_cube_internal_consistent(NDBOX *key, retval = (bool) cube_overlap_v0(key, query); break; default: - retval = FALSE; + retval = false; } - return (retval); + return retval; } NDBOX * @@ -663,7 +683,7 @@ g_cube_binary_union(NDBOX *r1, NDBOX *r2, int *sizep) retval = cube_union_v0(r1, r2); *sizep = VARSIZE(retval); - return (retval); + return retval; } @@ -729,29 +749,29 @@ cube_union_v0(NDBOX *a, NDBOX *b) SET_POINT_BIT(result); } - return (result); + return result; } Datum cube_union(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0); - NDBOX *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0); + NDBOX *b = PG_GETARG_NDBOX_P(1); NDBOX *res; res = cube_union_v0(a, b); PG_FREE_IF_COPY(a, 0); PG_FREE_IF_COPY(b, 1); - PG_RETURN_NDBOX(res); + PG_RETURN_NDBOX_P(res); } /* cube_inter */ Datum cube_inter(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0); - NDBOX *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0); + NDBOX *b = PG_GETARG_NDBOX_P(1); NDBOX *result; bool swapped = false; int i; @@ -823,14 +843,14 @@ cube_inter(PG_FUNCTION_ARGS) /* * Is it OK to return a non-null intersection for non-overlapping boxes? */ - PG_RETURN_NDBOX(result); + PG_RETURN_NDBOX_P(result); } /* cube_size */ Datum cube_size(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0); + NDBOX *a = PG_GETARG_NDBOX_P(0); double result; rt_cube_size(a, &result); @@ -948,8 +968,8 @@ cube_cmp_v0(NDBOX *a, NDBOX *b) Datum cube_cmp(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0), - *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0), + *b = PG_GETARG_NDBOX_P(1); int32 res; res = cube_cmp_v0(a, b); @@ -963,8 +983,8 @@ cube_cmp(PG_FUNCTION_ARGS) Datum cube_eq(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0), - *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0), + *b = PG_GETARG_NDBOX_P(1); int32 res; res = cube_cmp_v0(a, b); @@ -978,8 +998,8 @@ cube_eq(PG_FUNCTION_ARGS) Datum cube_ne(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0), - *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0), + *b = PG_GETARG_NDBOX_P(1); int32 res; res = cube_cmp_v0(a, b); @@ -993,8 +1013,8 @@ cube_ne(PG_FUNCTION_ARGS) Datum cube_lt(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0), - *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0), + *b = PG_GETARG_NDBOX_P(1); int32 res; res = cube_cmp_v0(a, b); @@ -1008,8 +1028,8 @@ cube_lt(PG_FUNCTION_ARGS) Datum cube_gt(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0), - *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0), + *b = PG_GETARG_NDBOX_P(1); int32 res; res = cube_cmp_v0(a, b); @@ -1023,8 +1043,8 @@ cube_gt(PG_FUNCTION_ARGS) Datum cube_le(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0), - *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0), + *b = PG_GETARG_NDBOX_P(1); int32 res; res = cube_cmp_v0(a, b); @@ -1038,8 +1058,8 @@ cube_le(PG_FUNCTION_ARGS) Datum cube_ge(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0), - *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0), + *b = PG_GETARG_NDBOX_P(1); int32 res; res = cube_cmp_v0(a, b); @@ -1058,7 +1078,7 @@ cube_contains_v0(NDBOX *a, NDBOX *b) int i; if ((a == NULL) || (b == NULL)) - return (FALSE); + return false; if (DIM(a) < DIM(b)) { @@ -1070,9 +1090,9 @@ cube_contains_v0(NDBOX *a, NDBOX *b) for (i = DIM(a); i < DIM(b); i++) { if (LL_COORD(b, i) != 0) - return (FALSE); + return false; if (UR_COORD(b, i) != 0) - return (FALSE); + return false; } } @@ -1081,20 +1101,20 @@ cube_contains_v0(NDBOX *a, NDBOX *b) { if (Min(LL_COORD(a, i), UR_COORD(a, i)) > Min(LL_COORD(b, i), UR_COORD(b, i))) - return (FALSE); + return false; if (Max(LL_COORD(a, i), UR_COORD(a, i)) < Max(LL_COORD(b, i), UR_COORD(b, i))) - return (FALSE); + return false; } - return (TRUE); + return true; } Datum cube_contains(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0), - *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0), + *b = PG_GETARG_NDBOX_P(1); bool res; res = cube_contains_v0(a, b); @@ -1109,8 +1129,8 @@ cube_contains(PG_FUNCTION_ARGS) Datum cube_contained(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0), - *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0), + *b = PG_GETARG_NDBOX_P(1); bool res; res = cube_contains_v0(b, a); @@ -1128,7 +1148,7 @@ cube_overlap_v0(NDBOX *a, NDBOX *b) int i; if ((a == NULL) || (b == NULL)) - return (FALSE); + return false; /* swap the box pointers if needed */ if (DIM(a) < DIM(b)) @@ -1143,29 +1163,29 @@ cube_overlap_v0(NDBOX *a, NDBOX *b) for (i = 0; i < DIM(b); i++) { if (Min(LL_COORD(a, i), UR_COORD(a, i)) > Max(LL_COORD(b, i), UR_COORD(b, i))) - return (FALSE); + return false; if (Max(LL_COORD(a, i), UR_COORD(a, i)) < Min(LL_COORD(b, i), UR_COORD(b, i))) - return (FALSE); + return false; } /* compare to zero those dimensions in (a) absent in (b) */ for (i = DIM(b); i < DIM(a); i++) { if (Min(LL_COORD(a, i), UR_COORD(a, i)) > 0) - return (FALSE); + return false; if (Max(LL_COORD(a, i), UR_COORD(a, i)) < 0) - return (FALSE); + return false; } - return (TRUE); + return true; } Datum cube_overlap(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0), - *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0), + *b = PG_GETARG_NDBOX_P(1); bool res; res = cube_overlap_v0(a, b); @@ -1184,8 +1204,8 @@ cube_overlap(PG_FUNCTION_ARGS) Datum cube_distance(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0), - *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0), + *b = PG_GETARG_NDBOX_P(1); bool swapped = false; double d, distance; @@ -1233,8 +1253,8 @@ cube_distance(PG_FUNCTION_ARGS) Datum distance_taxicab(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0), - *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0), + *b = PG_GETARG_NDBOX_P(1); bool swapped = false; double distance; int i; @@ -1277,8 +1297,8 @@ distance_taxicab(PG_FUNCTION_ARGS) Datum distance_chebyshev(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0), - *b = PG_GETARG_NDBOX(1); + NDBOX *a = PG_GETARG_NDBOX_P(0), + *b = PG_GETARG_NDBOX_P(1); bool swapped = false; double d, distance; @@ -1331,24 +1351,82 @@ g_cube_distance(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - NDBOX *cube = DatumGetNDBOX(entry->key); + NDBOX *cube = DatumGetNDBOXP(entry->key); double retval; if (strategy == CubeKNNDistanceCoord) { + /* + * Handle ordering by ~> operator. See comments of cube_coord_llur() + * for details + */ int coord = PG_GETARG_INT32(1); + bool isLeaf = GistPageIsLeaf(entry->page); + bool inverse = false; - if (DIM(cube) == 0) - retval = 0.0; - else if (IS_POINT(cube)) - retval = cube->x[(coord - 1) % DIM(cube)]; + /* 0 is the only unsupported coordinate value */ + if (coord == 0) + ereport(ERROR, + (errcode(ERRCODE_ARRAY_ELEMENT_ERROR), + errmsg("zero cube index is not defined"))); + + /* Return inversed value for negative coordinate */ + if (coord < 0) + { + coord = -coord; + inverse = true; + } + + if (coord <= 2 * DIM(cube)) + { + /* dimension index */ + int index = (coord - 1) / 2; + + /* whether this is upper bound (lower bound otherwise) */ + bool upper = ((coord - 1) % 2 == 1); + + if (IS_POINT(cube)) + { + retval = cube->x[index]; + } + else + { + if (isLeaf) + { + /* For leaf just return required upper/lower bound */ + if (upper) + retval = Max(cube->x[index], cube->x[index + DIM(cube)]); + else + retval = Min(cube->x[index], cube->x[index + DIM(cube)]); + } + else + { + /* + * For non-leaf we should always return lower bound, + * because even upper bound of a child in the subtree can + * be as small as our lower bound. For inversed case we + * return upper bound because it becomes lower bound for + * inversed value. + */ + if (!inverse) + retval = Min(cube->x[index], cube->x[index + DIM(cube)]); + else + retval = Max(cube->x[index], cube->x[index + DIM(cube)]); + } + } + } else - retval = Min(cube->x[(coord - 1) % DIM(cube)], - cube->x[(coord - 1) % DIM(cube) + DIM(cube)]); + { + retval = 0.0; + } + + /* Inverse return value if needed */ + if (inverse) + retval = -retval; } else { - NDBOX *query = PG_GETARG_NDBOX(1); + NDBOX *query = PG_GETARG_NDBOX_P(1); switch (strategy) { @@ -1385,14 +1463,14 @@ distance_1D(double a1, double a2, double b1, double b2) return (Min(a1, a2) - Max(b1, b2)); /* the rest are all sorts of intersections */ - return (0.0); + return 0.0; } /* Test if a box is also a point */ Datum cube_is_point(PG_FUNCTION_ARGS) { - NDBOX *cube = PG_GETARG_NDBOX(0); + NDBOX *cube = PG_GETARG_NDBOX_P(0); bool result; result = cube_is_point_internal(cube); @@ -1427,7 +1505,7 @@ cube_is_point_internal(NDBOX *cube) Datum cube_dim(PG_FUNCTION_ARGS) { - NDBOX *c = PG_GETARG_NDBOX(0); + NDBOX *c = PG_GETARG_NDBOX_P(0); int dim = DIM(c); PG_FREE_IF_COPY(c, 0); @@ -1438,7 +1516,7 @@ cube_dim(PG_FUNCTION_ARGS) Datum cube_ll_coord(PG_FUNCTION_ARGS) { - NDBOX *c = PG_GETARG_NDBOX(0); + NDBOX *c = PG_GETARG_NDBOX_P(0); int n = PG_GETARG_INT32(1); double result; @@ -1455,7 +1533,7 @@ cube_ll_coord(PG_FUNCTION_ARGS) Datum cube_ur_coord(PG_FUNCTION_ARGS) { - NDBOX *c = PG_GETARG_NDBOX(0); + NDBOX *c = PG_GETARG_NDBOX_P(0); int n = PG_GETARG_INT32(1); double result; @@ -1476,7 +1554,7 @@ cube_ur_coord(PG_FUNCTION_ARGS) Datum cube_coord(PG_FUNCTION_ARGS) { - NDBOX *cube = PG_GETARG_NDBOX(0); + NDBOX *cube = PG_GETARG_NDBOX_P(0); int coord = PG_GETARG_INT32(1); if (coord <= 0 || coord > 2 * DIM(cube)) @@ -1491,50 +1569,92 @@ cube_coord(PG_FUNCTION_ARGS) } -/* - * This function works like cube_coord(), - * but rearranges coordinates of corners to get cube representation - * in the form of (lower left, upper right). - * For historical reasons that extension allows us to create cubes in form - * ((2,1),(1,2)) and instead of normalizing such cube to ((1,1),(2,2)) it - * stores cube in original way. But to get cubes ordered by one of dimensions - * directly from the index without extra sort step we need some - * representation-independent coordinate getter. This function implements it. +/*---- + * This function works like cube_coord(), but rearranges coordinates in the + * way suitable to support coordinate ordering using KNN-GiST. For historical + * reasons this extension allows us to create cubes in form ((2,1),(1,2)) and + * instead of normalizing such cube to ((1,1),(2,2)) it stores cube in original + * way. But in order to get cubes ordered by one of dimensions from the index + * without explicit sort step we need this representation-independent coordinate + * getter. Moreover, indexed dataset may contain cubes of different dimensions + * number. Accordingly, this coordinate getter should be able to return + * lower/upper bound for particular dimension independently on number of cube + * dimensions. Also, KNN-GiST supports only ascending sorting. In order to + * support descending sorting, this function returns inverse of value when + * negative coordinate is given. + * + * Long story short, this function uses following meaning of coordinates: + * # (2 * N - 1) -- lower bound of Nth dimension, + * # (2 * N) -- upper bound of Nth dimension, + * # - (2 * N - 1) -- negative of lower bound of Nth dimension, + * # - (2 * N) -- negative of upper bound of Nth dimension. + * + * When given coordinate exceeds number of cube dimensions, then 0 returned + * (reproducing logic of GiST indexing of variable-length cubes). */ Datum cube_coord_llur(PG_FUNCTION_ARGS) { - NDBOX *cube = PG_GETARG_NDBOX(0); + NDBOX *cube = PG_GETARG_NDBOX_P(0); int coord = PG_GETARG_INT32(1); + bool inverse = false; + float8 result; - if (coord <= 0 || coord > 2 * DIM(cube)) + /* 0 is the only unsupported coordinate value */ + if (coord == 0) ereport(ERROR, (errcode(ERRCODE_ARRAY_ELEMENT_ERROR), - errmsg("cube index %d is out of bounds", coord))); + errmsg("zero cube index is not defined"))); + + /* Return inversed value for negative coordinate */ + if (coord < 0) + { + coord = -coord; + inverse = true; + } - if (coord <= DIM(cube)) + if (coord <= 2 * DIM(cube)) { + /* dimension index */ + int index = (coord - 1) / 2; + + /* whether this is upper bound (lower bound otherwise) */ + bool upper = ((coord - 1) % 2 == 1); + if (IS_POINT(cube)) - PG_RETURN_FLOAT8(cube->x[coord - 1]); + { + result = cube->x[index]; + } else - PG_RETURN_FLOAT8(Min(cube->x[coord - 1], - cube->x[coord - 1 + DIM(cube)])); + { + if (upper) + result = Max(cube->x[index], cube->x[index + DIM(cube)]); + else + result = Min(cube->x[index], cube->x[index + DIM(cube)]); + } } else { - if (IS_POINT(cube)) - PG_RETURN_FLOAT8(cube->x[(coord - 1) % DIM(cube)]); - else - PG_RETURN_FLOAT8(Max(cube->x[coord - 1], - cube->x[coord - 1 - DIM(cube)])); + /* + * Return zero if coordinate is out of bound. That reproduces logic + * of how cubes with low dimension number are expanded during GiST + * indexing. + */ + result = 0.0; } + + /* Inverse value if needed */ + if (inverse) + result = -result; + + PG_RETURN_FLOAT8(result); } /* Increase or decrease box size by a radius in at least n dimensions. */ Datum cube_enlarge(PG_FUNCTION_ARGS) { - NDBOX *a = PG_GETARG_NDBOX(0); + NDBOX *a = PG_GETARG_NDBOX_P(0); double r = PG_GETARG_FLOAT8(1); int32 n = PG_GETARG_INT32(2); NDBOX *result; @@ -1592,7 +1712,7 @@ cube_enlarge(PG_FUNCTION_ARGS) } PG_FREE_IF_COPY(a, 0); - PG_RETURN_NDBOX(result); + PG_RETURN_NDBOX_P(result); } /* Create a one dimensional box with identical upper and lower coordinates */ @@ -1610,7 +1730,7 @@ cube_f8(PG_FUNCTION_ARGS) SET_POINT_BIT(result); result->x[0] = x; - PG_RETURN_NDBOX(result); + PG_RETURN_NDBOX_P(result); } /* Create a one dimensional box */ @@ -1641,7 +1761,7 @@ cube_f8_f8(PG_FUNCTION_ARGS) result->x[1] = x1; } - PG_RETURN_NDBOX(result); + PG_RETURN_NDBOX_P(result); } /* Add a dimension to an existing cube with the same values for the new @@ -1649,12 +1769,19 @@ cube_f8_f8(PG_FUNCTION_ARGS) Datum cube_c_f8(PG_FUNCTION_ARGS) { - NDBOX *cube = PG_GETARG_NDBOX(0); + NDBOX *cube = PG_GETARG_NDBOX_P(0); double x = PG_GETARG_FLOAT8(1); NDBOX *result; int size; int i; + if (DIM(cube) + 1 > CUBE_MAX_DIM) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("can't extend cube"), + errdetail("A cube cannot have more than %d dimensions.", + CUBE_MAX_DIM))); + if (IS_POINT(cube)) { size = POINT_SIZE((DIM(cube) + 1)); @@ -1682,20 +1809,27 @@ cube_c_f8(PG_FUNCTION_ARGS) } PG_FREE_IF_COPY(cube, 0); - PG_RETURN_NDBOX(result); + PG_RETURN_NDBOX_P(result); } /* Add a dimension to an existing cube */ Datum cube_c_f8_f8(PG_FUNCTION_ARGS) { - NDBOX *cube = PG_GETARG_NDBOX(0); + NDBOX *cube = PG_GETARG_NDBOX_P(0); double x1 = PG_GETARG_FLOAT8(1); double x2 = PG_GETARG_FLOAT8(2); NDBOX *result; int size; int i; + if (DIM(cube) + 1 > CUBE_MAX_DIM) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("can't extend cube"), + errdetail("A cube cannot have more than %d dimensions.", + CUBE_MAX_DIM))); + if (IS_POINT(cube) && (x1 == x2)) { size = POINT_SIZE((DIM(cube) + 1)); @@ -1723,5 +1857,5 @@ cube_c_f8_f8(PG_FUNCTION_ARGS) } PG_FREE_IF_COPY(cube, 0); - PG_RETURN_NDBOX(result); + PG_RETURN_NDBOX_P(result); } diff --git a/contrib/cube/cube.control b/contrib/cube/cube.control index b03cfa0a58..f39a838e3f 100644 --- a/contrib/cube/cube.control +++ b/contrib/cube/cube.control @@ -1,5 +1,5 @@ # cube extension comment = 'data type for multidimensional cubes' -default_version = '1.2' +default_version = '1.4' module_pathname = '$libdir/cube' relocatable = true diff --git a/contrib/cube/cubedata.h b/contrib/cube/cubedata.h index 6e6ddfd3d7..dbe7d4f742 100644 --- a/contrib/cube/cubedata.h +++ b/contrib/cube/cubedata.h @@ -49,9 +49,9 @@ typedef struct NDBOX #define CUBE_SIZE(_dim) (offsetof(NDBOX, x) + sizeof(double)*(_dim)*2) /* fmgr interface macros */ -#define DatumGetNDBOX(x) ((NDBOX *) PG_DETOAST_DATUM(x)) -#define PG_GETARG_NDBOX(x) DatumGetNDBOX(PG_GETARG_DATUM(x)) -#define PG_RETURN_NDBOX(x) PG_RETURN_POINTER(x) +#define DatumGetNDBOXP(x) ((NDBOX *) PG_DETOAST_DATUM(x)) +#define PG_GETARG_NDBOX_P(x) DatumGetNDBOXP(PG_GETARG_DATUM(x)) +#define PG_RETURN_NDBOX_P(x) PG_RETURN_POINTER(x) /* GiST operator strategy numbers */ #define CubeKNNDistanceCoord 15 /* ~> */ diff --git a/contrib/cube/cubeparse.y b/contrib/cube/cubeparse.y index 1b65fa967c..deb2efdc0d 100644 --- a/contrib/cube/cubeparse.y +++ b/contrib/cube/cubeparse.y @@ -7,7 +7,7 @@ #include "postgres.h" #include "cubedata.h" -#include "utils/builtins.h" +#include "utils/float.h" /* All grammar constructs return strings */ #define YYSTYPE char * diff --git a/contrib/cube/cubescan.l b/contrib/cube/cubescan.l index dada917820..bd400e3684 100644 --- a/contrib/cube/cubescan.l +++ b/contrib/cube/cubescan.l @@ -4,6 +4,8 @@ * contrib/cube/cubescan.l */ +/* LCOV_EXCL_START */ + /* No reason to constrain amount of data slurped */ #define YY_READ_BUF_SIZE 16777216 @@ -56,6 +58,8 @@ NaN [nN][aA][nN] %% +/* LCOV_EXCL_STOP */ + /* result is not used, but Bison expects this signature */ void yyerror(NDBOX **result, const char *message) diff --git a/contrib/cube/expected/cube.out b/contrib/cube/expected/cube.out index 328b3b5f5d..1a65e6944a 100644 --- a/contrib/cube/expected/cube.out +++ b/contrib/cube/expected/cube.out @@ -62,90 +62,6 @@ SELECT '-1.0'::cube AS cube; (-1) (1 row) -SELECT '1e27'::cube AS cube; - cube ---------- - (1e+27) -(1 row) - -SELECT '-1e27'::cube AS cube; - cube ----------- - (-1e+27) -(1 row) - -SELECT '1.0e27'::cube AS cube; - cube ---------- - (1e+27) -(1 row) - -SELECT '-1.0e27'::cube AS cube; - cube ----------- - (-1e+27) -(1 row) - -SELECT '1e+27'::cube AS cube; - cube ---------- - (1e+27) -(1 row) - -SELECT '-1e+27'::cube AS cube; - cube ----------- - (-1e+27) -(1 row) - -SELECT '1.0e+27'::cube AS cube; - cube ---------- - (1e+27) -(1 row) - -SELECT '-1.0e+27'::cube AS cube; - cube ----------- - (-1e+27) -(1 row) - -SELECT '1e-7'::cube AS cube; - cube ---------- - (1e-07) -(1 row) - -SELECT '-1e-7'::cube AS cube; - cube ----------- - (-1e-07) -(1 row) - -SELECT '1.0e-7'::cube AS cube; - cube ---------- - (1e-07) -(1 row) - -SELECT '-1.0e-7'::cube AS cube; - cube ----------- - (-1e-07) -(1 row) - -SELECT '1e-300'::cube AS cube; - cube ----------- - (1e-300) -(1 row) - -SELECT '-1e-300'::cube AS cube; - cube ------------ - (-1e-300) -(1 row) - SELECT 'infinity'::cube AS cube; cube ------------ @@ -164,24 +80,6 @@ SELECT 'NaN'::cube AS cube; (NaN) (1 row) -SELECT '1234567890123456'::cube AS cube; - cube ------------------------- - (1.23456789012346e+15) -(1 row) - -SELECT '+1234567890123456'::cube AS cube; - cube ------------------------- - (1.23456789012346e+15) -(1 row) - -SELECT '-1234567890123456'::cube AS cube; - cube -------------------------- - (-1.23456789012346e+15) -(1 row) - SELECT '.1234567890123456'::cube AS cube; cube --------------------- @@ -520,6 +418,17 @@ SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]); ERROR: Index out of bounds SELECT cube_subset(cube('(6,7,8),(6,7,8)'), ARRAY[4,0]); ERROR: Index out of bounds +-- test for limits: this should pass +SELECT cube_subset(cube('(6,7,8),(6,7,8)'), array(SELECT 1 as a FROM generate_series(1,100))); + cube_subset +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6) +(1 row) + +-- and this should fail +SELECT cube_subset(cube('(6,7,8),(6,7,8)'), array(SELECT 1 as a FROM generate_series(1,101))); +ERROR: array is too long +DETAIL: A cube cannot have more than 100 dimensions. -- -- Test point processing -- @@ -592,6 +501,7 @@ SELECT cube(cube(1,2), 42, 24); -- cube_c_f8_f8 -- -- Testing limit of CUBE_MAX_DIM dimensions check in cube_in. -- +-- create too big cube from literal select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)'::cube; ERROR: invalid input syntax for cube LINE 1: select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0... @@ -602,6 +512,34 @@ ERROR: invalid input syntax for cube LINE 1: select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0... ^ DETAIL: A cube cannot have more than 100 dimensions. +-- from an array +select cube(array(SELECT 0 as a FROM generate_series(1,101))); +ERROR: array is too long +DETAIL: A cube cannot have more than 100 dimensions. +select cube(array(SELECT 0 as a FROM generate_series(1,101)),array(SELECT 0 as a FROM generate_series(1,101))); +ERROR: can't extend cube +DETAIL: A cube cannot have more than 100 dimensions. +-- extend cube beyond limit +-- this should work +select cube(array(SELECT 0 as a FROM generate_series(1,100))); + cube +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) +(1 row) + +select cube(array(SELECT 0 as a FROM generate_series(1,100)),array(SELECT 0 as a FROM generate_series(1,100))); + cube +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) +(1 row) + +-- this should fail +select cube(cube(array(SELECT 0 as a FROM generate_series(1,100))), 0); +ERROR: can't extend cube +DETAIL: A cube cannot have more than 100 dimensions. +select cube(cube(array(SELECT 0 as a FROM generate_series(1,100)),array(SELECT 0 as a FROM generate_series(1,100))), 0, 0); +ERROR: can't extend cube +DETAIL: A cube cannot have more than 100 dimensions. -- -- testing the operators -- @@ -1532,37 +1470,41 @@ SELECT cube(array[40,50,60], array[10,20,30])~>1; SELECT cube(array[10,20,30], array[40,50,60])~>2; ?column? ---------- - 20 + 40 (1 row) SELECT cube(array[40,50,60], array[10,20,30])~>2; ?column? ---------- - 20 + 40 (1 row) SELECT cube(array[10,20,30], array[40,50,60])~>3; ?column? ---------- - 30 + 20 (1 row) SELECT cube(array[40,50,60], array[10,20,30])~>3; ?column? ---------- - 30 + 20 (1 row) SELECT cube(array[40,50,60], array[10,20,30])~>0; -ERROR: cube index 0 is out of bounds +ERROR: zero cube index is not defined SELECT cube(array[40,50,60], array[10,20,30])~>4; ?column? ---------- - 40 + 50 (1 row) SELECT cube(array[40,50,60], array[10,20,30])~>(-1); -ERROR: cube index -1 is out of bounds + ?column? +---------- + -10 +(1 row) + -- Load some example data and build the index -- CREATE TABLE test_cube (c cube); @@ -1589,159 +1531,414 @@ SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c; (2424, 160),(2424, 81) (5 rows) --- kNN with index +-- Test index-only scans +SET enable_bitmapscan = false; +EXPLAIN (COSTS OFF) +SELECT c FROM test_cube WHERE c <@ '(3000,1000),(0,0)' ORDER BY c; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: c + -> Index Only Scan using test_cube_ix on test_cube + Index Cond: (c <@ '(3000, 1000),(0, 0)'::cube) +(4 rows) + +SELECT c FROM test_cube WHERE c <@ '(3000,1000),(0,0)' ORDER BY c; + c +------------------------- + (337, 455),(240, 359) + (759, 187),(662, 163) + (1444, 403),(1346, 344) + (2424, 160),(2424, 81) +(4 rows) + +RESET enable_bitmapscan; +-- Test kNN +INSERT INTO test_cube VALUES ('(1,1)'), ('(100000)'), ('(0, 100000)'); -- Some corner cases +SET enable_seqscan = false; +-- Test different metrics SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <-> '(100, 100),(500, 500)'::cube LIMIT 5; c | dist -------------------------+------------------ (337, 455),(240, 359) | 0 + (1, 1) | 140.007142674936 (759, 187),(662, 163) | 162 (948, 1201),(907, 1156) | 772.000647668122 (1444, 403),(1346, 344) | 846 - (369, 1457),(278, 1409) | 909 (5 rows) SELECT *, c <=> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <=> '(100, 100),(500, 500)'::cube LIMIT 5; c | dist -------------------------+------ (337, 455),(240, 359) | 0 + (1, 1) | 99 (759, 187),(662, 163) | 162 (948, 1201),(907, 1156) | 656 (1444, 403),(1346, 344) | 846 +(5 rows) + +SELECT *, c <#> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <#> '(100, 100),(500, 500)'::cube LIMIT 5; + c | dist +-------------------------+------ + (337, 455),(240, 359) | 0 + (759, 187),(662, 163) | 162 + (1, 1) | 198 + (1444, 403),(1346, 344) | 846 (369, 1457),(278, 1409) | 909 (5 rows) +-- Test sorting by coordinates +SELECT c~>1, c FROM test_cube ORDER BY c~>1 LIMIT 15; -- ascending by left bound + ?column? | c +----------+--------------------------- + 0 | (0, 100000) + 1 | (1, 1) + 3 | (54, 38679),(3, 38602) + 15 | (83, 10271),(15, 10265) + 64 | (122, 46832),(64, 46762) + 92 | (167, 17214),(92, 17184) + 107 | (161, 24465),(107, 24374) + 120 | (162, 26040),(120, 25963) + 138 | (154, 4019),(138, 3990) + 175 | (259, 1850),(175, 1820) + 179 | (207, 40886),(179, 40879) + 204 | (288, 49588),(204, 49571) + 226 | (270, 32616),(226, 32607) + 235 | (318, 31489),(235, 31404) + 240 | (337, 455),(240, 359) +(15 rows) + +SELECT c~>2, c FROM test_cube ORDER BY c~>2 LIMIT 15; -- ascending by right bound + ?column? | c +----------+--------------------------- + 0 | (0, 100000) + 1 | (1, 1) + 54 | (54, 38679),(3, 38602) + 83 | (83, 10271),(15, 10265) + 122 | (122, 46832),(64, 46762) + 154 | (154, 4019),(138, 3990) + 161 | (161, 24465),(107, 24374) + 162 | (162, 26040),(120, 25963) + 167 | (167, 17214),(92, 17184) + 207 | (207, 40886),(179, 40879) + 259 | (259, 1850),(175, 1820) + 270 | (270, 29508),(264, 29440) + 270 | (270, 32616),(226, 32607) + 288 | (288, 49588),(204, 49571) + 318 | (318, 31489),(235, 31404) +(15 rows) + +SELECT c~>3, c FROM test_cube ORDER BY c~>3 LIMIT 15; -- ascending by lower bound + ?column? | c +----------+--------------------------- + 0 | (100000) + 1 | (1, 1) + 6 | (30333, 50),(30273, 6) + 43 | (43301, 75),(43227, 43) + 51 | (19650, 142),(19630, 51) + 81 | (2424, 160),(2424, 81) + 108 | (3449, 171),(3354, 108) + 109 | (18037, 155),(17941, 109) + 114 | (28511, 208),(28479, 114) + 118 | (19946, 217),(19941, 118) + 139 | (16906, 191),(16816, 139) + 163 | (759, 187),(662, 163) + 181 | (22684, 266),(22656, 181) + 213 | (24423, 255),(24360, 213) + 222 | (45989, 249),(45910, 222) +(15 rows) + +SELECT c~>4, c FROM test_cube ORDER BY c~>4 LIMIT 15; -- ascending by upper bound + ?column? | c +----------+--------------------------- + 0 | (100000) + 1 | (1, 1) + 50 | (30333, 50),(30273, 6) + 75 | (43301, 75),(43227, 43) + 142 | (19650, 142),(19630, 51) + 155 | (18037, 155),(17941, 109) + 160 | (2424, 160),(2424, 81) + 171 | (3449, 171),(3354, 108) + 187 | (759, 187),(662, 163) + 191 | (16906, 191),(16816, 139) + 208 | (28511, 208),(28479, 114) + 217 | (19946, 217),(19941, 118) + 249 | (45989, 249),(45910, 222) + 255 | (24423, 255),(24360, 213) + 266 | (22684, 266),(22656, 181) +(15 rows) + +SELECT c~>(-1), c FROM test_cube ORDER BY c~>(-1) LIMIT 15; -- descending by left bound + ?column? | c +----------+------------------------------- + -100000 | (100000) + -49951 | (50027, 49230),(49951, 49214) + -49937 | (49980, 35004),(49937, 34963) + -49927 | (49985, 6436),(49927, 6338) + -49908 | (49999, 27218),(49908, 27176) + -49905 | (49954, 1340),(49905, 1294) + -49902 | (49944, 25163),(49902, 25153) + -49898 | (49981, 34876),(49898, 34786) + -49897 | (49957, 43390),(49897, 43384) + -49848 | (49853, 18504),(49848, 18503) + -49818 | (49902, 41752),(49818, 41746) + -49810 | (49907, 30225),(49810, 30158) + -49808 | (49843, 5175),(49808, 5145) + -49805 | (49887, 24274),(49805, 24184) + -49798 | (49847, 7128),(49798, 7067) +(15 rows) + +SELECT c~>(-2), c FROM test_cube ORDER BY c~>(-2) LIMIT 15; -- descending by right bound + ?column? | c +----------+------------------------------- + -100000 | (100000) + -50027 | (50027, 49230),(49951, 49214) + -49999 | (49999, 27218),(49908, 27176) + -49985 | (49985, 6436),(49927, 6338) + -49981 | (49981, 34876),(49898, 34786) + -49980 | (49980, 35004),(49937, 34963) + -49957 | (49957, 43390),(49897, 43384) + -49954 | (49954, 1340),(49905, 1294) + -49944 | (49944, 25163),(49902, 25153) + -49907 | (49907, 30225),(49810, 30158) + -49902 | (49902, 41752),(49818, 41746) + -49887 | (49887, 24274),(49805, 24184) + -49853 | (49853, 18504),(49848, 18503) + -49847 | (49847, 7128),(49798, 7067) + -49843 | (49843, 5175),(49808, 5145) +(15 rows) + +SELECT c~>(-3), c FROM test_cube ORDER BY c~>(-3) LIMIT 15; -- descending by lower bound + ?column? | c +----------+------------------------------- + -100000 | (0, 100000) + -49992 | (30746, 50040),(30727, 49992) + -49987 | (36311, 50073),(36258, 49987) + -49934 | (3531, 49962),(3463, 49934) + -49915 | (17954, 49975),(17865, 49915) + -49914 | (2168, 50012),(2108, 49914) + -49913 | (31287, 49923),(31236, 49913) + -49885 | (21551, 49983),(21492, 49885) + -49878 | (43925, 49912),(43888, 49878) + -49849 | (19128, 49932),(19112, 49849) + -49844 | (38266, 49852),(38233, 49844) + -49836 | (14913, 49873),(14849, 49836) + -49834 | (37595, 49849),(37581, 49834) + -49830 | (46151, 49848),(46058, 49830) + -49818 | (29261, 49910),(29247, 49818) +(15 rows) + +SELECT c~>(-4), c FROM test_cube ORDER BY c~>(-4) LIMIT 15; -- descending by upper bound + ?column? | c +----------+------------------------------- + -100000 | (0, 100000) + -50073 | (36311, 50073),(36258, 49987) + -50040 | (30746, 50040),(30727, 49992) + -50012 | (2168, 50012),(2108, 49914) + -49983 | (21551, 49983),(21492, 49885) + -49975 | (17954, 49975),(17865, 49915) + -49962 | (3531, 49962),(3463, 49934) + -49932 | (19128, 49932),(19112, 49849) + -49923 | (31287, 49923),(31236, 49913) + -49912 | (43925, 49912),(43888, 49878) + -49910 | (29261, 49910),(29247, 49818) + -49873 | (14913, 49873),(14849, 49836) + -49858 | (20007, 49858),(19921, 49778) + -49852 | (38266, 49852),(38233, 49844) + -49849 | (37595, 49849),(37581, 49834) +(15 rows) + +-- Same queries with sequential scan (should give the same results as above) +RESET enable_seqscan; +SET enable_indexscan = OFF; +SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <-> '(100, 100),(500, 500)'::cube LIMIT 5; + c | dist +-------------------------+------------------ + (337, 455),(240, 359) | 0 + (1, 1) | 140.007142674936 + (759, 187),(662, 163) | 162 + (948, 1201),(907, 1156) | 772.000647668122 + (1444, 403),(1346, 344) | 846 +(5 rows) + +SELECT *, c <=> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <=> '(100, 100),(500, 500)'::cube LIMIT 5; + c | dist +-------------------------+------ + (337, 455),(240, 359) | 0 + (1, 1) | 99 + (759, 187),(662, 163) | 162 + (948, 1201),(907, 1156) | 656 + (1444, 403),(1346, 344) | 846 +(5 rows) + SELECT *, c <#> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <#> '(100, 100),(500, 500)'::cube LIMIT 5; c | dist -------------------------+------ (337, 455),(240, 359) | 0 (759, 187),(662, 163) | 162 + (1, 1) | 198 (1444, 403),(1346, 344) | 846 (369, 1457),(278, 1409) | 909 - (948, 1201),(907, 1156) | 1063 (5 rows) --- kNN-based sorting -SELECT * FROM test_cube ORDER BY c~>1 LIMIT 15; -- ascending by 1st coordinate of lower left corner - c ---------------------------- - (54, 38679),(3, 38602) - (83, 10271),(15, 10265) - (122, 46832),(64, 46762) - (167, 17214),(92, 17184) - (161, 24465),(107, 24374) - (162, 26040),(120, 25963) - (154, 4019),(138, 3990) - (259, 1850),(175, 1820) - (207, 40886),(179, 40879) - (288, 49588),(204, 49571) - (270, 32616),(226, 32607) - (318, 31489),(235, 31404) - (337, 455),(240, 359) - (270, 29508),(264, 29440) - (369, 1457),(278, 1409) +SELECT c~>1, c FROM test_cube ORDER BY c~>1 LIMIT 15; -- ascending by left bound + ?column? | c +----------+--------------------------- + 0 | (0, 100000) + 1 | (1, 1) + 3 | (54, 38679),(3, 38602) + 15 | (83, 10271),(15, 10265) + 64 | (122, 46832),(64, 46762) + 92 | (167, 17214),(92, 17184) + 107 | (161, 24465),(107, 24374) + 120 | (162, 26040),(120, 25963) + 138 | (154, 4019),(138, 3990) + 175 | (259, 1850),(175, 1820) + 179 | (207, 40886),(179, 40879) + 204 | (288, 49588),(204, 49571) + 226 | (270, 32616),(226, 32607) + 235 | (318, 31489),(235, 31404) + 240 | (337, 455),(240, 359) (15 rows) -SELECT * FROM test_cube ORDER BY c~>4 LIMIT 15; -- ascending by 2nd coordinate or upper right corner - c ---------------------------- - (30333, 50),(30273, 6) - (43301, 75),(43227, 43) - (19650, 142),(19630, 51) - (2424, 160),(2424, 81) - (3449, 171),(3354, 108) - (18037, 155),(17941, 109) - (28511, 208),(28479, 114) - (19946, 217),(19941, 118) - (16906, 191),(16816, 139) - (759, 187),(662, 163) - (22684, 266),(22656, 181) - (24423, 255),(24360, 213) - (45989, 249),(45910, 222) - (11399, 377),(11360, 294) - (12162, 389),(12103, 309) +SELECT c~>2, c FROM test_cube ORDER BY c~>2 LIMIT 15; -- ascending by right bound + ?column? | c +----------+--------------------------- + 0 | (0, 100000) + 1 | (1, 1) + 54 | (54, 38679),(3, 38602) + 83 | (83, 10271),(15, 10265) + 122 | (122, 46832),(64, 46762) + 154 | (154, 4019),(138, 3990) + 161 | (161, 24465),(107, 24374) + 162 | (162, 26040),(120, 25963) + 167 | (167, 17214),(92, 17184) + 207 | (207, 40886),(179, 40879) + 259 | (259, 1850),(175, 1820) + 270 | (270, 29508),(264, 29440) + 270 | (270, 32616),(226, 32607) + 288 | (288, 49588),(204, 49571) + 318 | (318, 31489),(235, 31404) (15 rows) -SELECT * FROM test_cube ORDER BY c~>1 DESC LIMIT 15; -- descending by 1st coordinate of lower left corner - c -------------------------------- - (50027, 49230),(49951, 49214) - (49980, 35004),(49937, 34963) - (49985, 6436),(49927, 6338) - (49999, 27218),(49908, 27176) - (49954, 1340),(49905, 1294) - (49944, 25163),(49902, 25153) - (49981, 34876),(49898, 34786) - (49957, 43390),(49897, 43384) - (49853, 18504),(49848, 18503) - (49902, 41752),(49818, 41746) - (49907, 30225),(49810, 30158) - (49843, 5175),(49808, 5145) - (49887, 24274),(49805, 24184) - (49847, 7128),(49798, 7067) - (49820, 7990),(49771, 7967) +SELECT c~>3, c FROM test_cube ORDER BY c~>3 LIMIT 15; -- ascending by lower bound + ?column? | c +----------+--------------------------- + 0 | (100000) + 1 | (1, 1) + 6 | (30333, 50),(30273, 6) + 43 | (43301, 75),(43227, 43) + 51 | (19650, 142),(19630, 51) + 81 | (2424, 160),(2424, 81) + 108 | (3449, 171),(3354, 108) + 109 | (18037, 155),(17941, 109) + 114 | (28511, 208),(28479, 114) + 118 | (19946, 217),(19941, 118) + 139 | (16906, 191),(16816, 139) + 163 | (759, 187),(662, 163) + 181 | (22684, 266),(22656, 181) + 213 | (24423, 255),(24360, 213) + 222 | (45989, 249),(45910, 222) (15 rows) -SELECT * FROM test_cube ORDER BY c~>4 DESC LIMIT 15; -- descending by 2nd coordinate or upper right corner - c -------------------------------- - (36311, 50073),(36258, 49987) - (30746, 50040),(30727, 49992) - (2168, 50012),(2108, 49914) - (21551, 49983),(21492, 49885) - (17954, 49975),(17865, 49915) - (3531, 49962),(3463, 49934) - (19128, 49932),(19112, 49849) - (31287, 49923),(31236, 49913) - (43925, 49912),(43888, 49878) - (29261, 49910),(29247, 49818) - (14913, 49873),(14849, 49836) - (20007, 49858),(19921, 49778) - (38266, 49852),(38233, 49844) - (37595, 49849),(37581, 49834) - (46151, 49848),(46058, 49830) +SELECT c~>4, c FROM test_cube ORDER BY c~>4 LIMIT 15; -- ascending by upper bound + ?column? | c +----------+--------------------------- + 0 | (100000) + 1 | (1, 1) + 50 | (30333, 50),(30273, 6) + 75 | (43301, 75),(43227, 43) + 142 | (19650, 142),(19630, 51) + 155 | (18037, 155),(17941, 109) + 160 | (2424, 160),(2424, 81) + 171 | (3449, 171),(3354, 108) + 187 | (759, 187),(662, 163) + 191 | (16906, 191),(16816, 139) + 208 | (28511, 208),(28479, 114) + 217 | (19946, 217),(19941, 118) + 249 | (45989, 249),(45910, 222) + 255 | (24423, 255),(24360, 213) + 266 | (22684, 266),(22656, 181) (15 rows) --- same thing for index with points -CREATE TABLE test_point(c cube); -INSERT INTO test_point(SELECT cube(array[c->1,c->2,c->3,c->4]) FROM test_cube); -CREATE INDEX ON test_point USING gist(c); -SELECT * FROM test_point ORDER BY c~>1, c~>2 LIMIT 15; -- ascending by 1st then by 2nd coordinate - c --------------------------- - (54, 38679, 3, 38602) - (83, 10271, 15, 10265) - (122, 46832, 64, 46762) - (154, 4019, 138, 3990) - (161, 24465, 107, 24374) - (162, 26040, 120, 25963) - (167, 17214, 92, 17184) - (207, 40886, 179, 40879) - (259, 1850, 175, 1820) - (270, 29508, 264, 29440) - (270, 32616, 226, 32607) - (288, 49588, 204, 49571) - (318, 31489, 235, 31404) - (326, 18837, 285, 18817) - (337, 455, 240, 359) +SELECT c~>(-1), c FROM test_cube ORDER BY c~>(-1) LIMIT 15; -- descending by left bound + ?column? | c +----------+------------------------------- + -100000 | (100000) + -49951 | (50027, 49230),(49951, 49214) + -49937 | (49980, 35004),(49937, 34963) + -49927 | (49985, 6436),(49927, 6338) + -49908 | (49999, 27218),(49908, 27176) + -49905 | (49954, 1340),(49905, 1294) + -49902 | (49944, 25163),(49902, 25153) + -49898 | (49981, 34876),(49898, 34786) + -49897 | (49957, 43390),(49897, 43384) + -49848 | (49853, 18504),(49848, 18503) + -49818 | (49902, 41752),(49818, 41746) + -49810 | (49907, 30225),(49810, 30158) + -49808 | (49843, 5175),(49808, 5145) + -49805 | (49887, 24274),(49805, 24184) + -49798 | (49847, 7128),(49798, 7067) +(15 rows) + +SELECT c~>(-2), c FROM test_cube ORDER BY c~>(-2) LIMIT 15; -- descending by right bound + ?column? | c +----------+------------------------------- + -100000 | (100000) + -50027 | (50027, 49230),(49951, 49214) + -49999 | (49999, 27218),(49908, 27176) + -49985 | (49985, 6436),(49927, 6338) + -49981 | (49981, 34876),(49898, 34786) + -49980 | (49980, 35004),(49937, 34963) + -49957 | (49957, 43390),(49897, 43384) + -49954 | (49954, 1340),(49905, 1294) + -49944 | (49944, 25163),(49902, 25153) + -49907 | (49907, 30225),(49810, 30158) + -49902 | (49902, 41752),(49818, 41746) + -49887 | (49887, 24274),(49805, 24184) + -49853 | (49853, 18504),(49848, 18503) + -49847 | (49847, 7128),(49798, 7067) + -49843 | (49843, 5175),(49808, 5145) +(15 rows) + +SELECT c~>(-3), c FROM test_cube ORDER BY c~>(-3) LIMIT 15; -- descending by lower bound + ?column? | c +----------+------------------------------- + -100000 | (0, 100000) + -49992 | (30746, 50040),(30727, 49992) + -49987 | (36311, 50073),(36258, 49987) + -49934 | (3531, 49962),(3463, 49934) + -49915 | (17954, 49975),(17865, 49915) + -49914 | (2168, 50012),(2108, 49914) + -49913 | (31287, 49923),(31236, 49913) + -49885 | (21551, 49983),(21492, 49885) + -49878 | (43925, 49912),(43888, 49878) + -49849 | (19128, 49932),(19112, 49849) + -49844 | (38266, 49852),(38233, 49844) + -49836 | (14913, 49873),(14849, 49836) + -49834 | (37595, 49849),(37581, 49834) + -49830 | (46151, 49848),(46058, 49830) + -49818 | (29261, 49910),(29247, 49818) (15 rows) -SELECT * FROM test_point ORDER BY c~>4 DESC LIMIT 15; -- descending by 1st coordinate - c ------------------------------- - (30746, 50040, 30727, 49992) - (36311, 50073, 36258, 49987) - (3531, 49962, 3463, 49934) - (17954, 49975, 17865, 49915) - (2168, 50012, 2108, 49914) - (31287, 49923, 31236, 49913) - (21551, 49983, 21492, 49885) - (43925, 49912, 43888, 49878) - (19128, 49932, 19112, 49849) - (38266, 49852, 38233, 49844) - (14913, 49873, 14849, 49836) - (37595, 49849, 37581, 49834) - (46151, 49848, 46058, 49830) - (29261, 49910, 29247, 49818) - (19233, 49824, 19185, 49794) +SELECT c~>(-4), c FROM test_cube ORDER BY c~>(-4) LIMIT 15; -- descending by upper bound + ?column? | c +----------+------------------------------- + -100000 | (0, 100000) + -50073 | (36311, 50073),(36258, 49987) + -50040 | (30746, 50040),(30727, 49992) + -50012 | (2168, 50012),(2108, 49914) + -49983 | (21551, 49983),(21492, 49885) + -49975 | (17954, 49975),(17865, 49915) + -49962 | (3531, 49962),(3463, 49934) + -49932 | (19128, 49932),(19112, 49849) + -49923 | (31287, 49923),(31236, 49913) + -49912 | (43925, 49912),(43888, 49878) + -49910 | (29261, 49910),(29247, 49818) + -49873 | (14913, 49873),(14849, 49836) + -49858 | (20007, 49858),(19921, 49778) + -49852 | (38266, 49852),(38233, 49844) + -49849 | (37595, 49849),(37581, 49834) (15 rows) +RESET enable_indexscan; diff --git a/contrib/cube/expected/cube_2.out b/contrib/cube/expected/cube_2.out deleted file mode 100644 index 1aa5cf2f98..0000000000 --- a/contrib/cube/expected/cube_2.out +++ /dev/null @@ -1,1747 +0,0 @@ --- --- Test cube datatype --- -CREATE EXTENSION cube; --- Check whether any of our opclasses fail amvalidate -SELECT amname, opcname -FROM pg_opclass opc LEFT JOIN pg_am am ON am.oid = opcmethod -WHERE opc.oid >= 16384 AND NOT amvalidate(opc.oid); - amname | opcname ---------+--------- -(0 rows) - --- --- testing the input and output functions --- --- Any number (a one-dimensional point) -SELECT '1'::cube AS cube; - cube ------- - (1) -(1 row) - -SELECT '-1'::cube AS cube; - cube ------- - (-1) -(1 row) - -SELECT '1.'::cube AS cube; - cube ------- - (1) -(1 row) - -SELECT '-1.'::cube AS cube; - cube ------- - (-1) -(1 row) - -SELECT '.1'::cube AS cube; - cube -------- - (0.1) -(1 row) - -SELECT '-.1'::cube AS cube; - cube --------- - (-0.1) -(1 row) - -SELECT '1.0'::cube AS cube; - cube ------- - (1) -(1 row) - -SELECT '-1.0'::cube AS cube; - cube ------- - (-1) -(1 row) - -SELECT '1e27'::cube AS cube; - cube ----------- - (1e+027) -(1 row) - -SELECT '-1e27'::cube AS cube; - cube ------------ - (-1e+027) -(1 row) - -SELECT '1.0e27'::cube AS cube; - cube ----------- - (1e+027) -(1 row) - -SELECT '-1.0e27'::cube AS cube; - cube ------------ - (-1e+027) -(1 row) - -SELECT '1e+27'::cube AS cube; - cube ----------- - (1e+027) -(1 row) - -SELECT '-1e+27'::cube AS cube; - cube ------------ - (-1e+027) -(1 row) - -SELECT '1.0e+27'::cube AS cube; - cube ----------- - (1e+027) -(1 row) - -SELECT '-1.0e+27'::cube AS cube; - cube ------------ - (-1e+027) -(1 row) - -SELECT '1e-7'::cube AS cube; - cube ----------- - (1e-007) -(1 row) - -SELECT '-1e-7'::cube AS cube; - cube ------------ - (-1e-007) -(1 row) - -SELECT '1.0e-7'::cube AS cube; - cube ----------- - (1e-007) -(1 row) - -SELECT '-1.0e-7'::cube AS cube; - cube ------------ - (-1e-007) -(1 row) - -SELECT '1e-300'::cube AS cube; - cube ----------- - (1e-300) -(1 row) - -SELECT '-1e-300'::cube AS cube; - cube ------------ - (-1e-300) -(1 row) - -SELECT 'infinity'::cube AS cube; - cube ------------- - (Infinity) -(1 row) - -SELECT '-infinity'::cube AS cube; - cube -------------- - (-Infinity) -(1 row) - -SELECT 'NaN'::cube AS cube; - cube -------- - (NaN) -(1 row) - -SELECT '1234567890123456'::cube AS cube; - cube -------------------------- - (1.23456789012346e+015) -(1 row) - -SELECT '+1234567890123456'::cube AS cube; - cube -------------------------- - (1.23456789012346e+015) -(1 row) - -SELECT '-1234567890123456'::cube AS cube; - cube --------------------------- - (-1.23456789012346e+015) -(1 row) - -SELECT '.1234567890123456'::cube AS cube; - cube ---------------------- - (0.123456789012346) -(1 row) - -SELECT '+.1234567890123456'::cube AS cube; - cube ---------------------- - (0.123456789012346) -(1 row) - -SELECT '-.1234567890123456'::cube AS cube; - cube ----------------------- - (-0.123456789012346) -(1 row) - --- simple lists (points) -SELECT '()'::cube AS cube; - cube ------- - () -(1 row) - -SELECT '1,2'::cube AS cube; - cube --------- - (1, 2) -(1 row) - -SELECT '(1,2)'::cube AS cube; - cube --------- - (1, 2) -(1 row) - -SELECT '1,2,3,4,5'::cube AS cube; - cube ------------------ - (1, 2, 3, 4, 5) -(1 row) - -SELECT '(1,2,3,4,5)'::cube AS cube; - cube ------------------ - (1, 2, 3, 4, 5) -(1 row) - --- double lists (cubes) -SELECT '(),()'::cube AS cube; - cube ------- - () -(1 row) - -SELECT '(0),(0)'::cube AS cube; - cube ------- - (0) -(1 row) - -SELECT '(0),(1)'::cube AS cube; - cube ---------- - (0),(1) -(1 row) - -SELECT '[(0),(0)]'::cube AS cube; - cube ------- - (0) -(1 row) - -SELECT '[(0),(1)]'::cube AS cube; - cube ---------- - (0),(1) -(1 row) - -SELECT '(0,0,0,0),(0,0,0,0)'::cube AS cube; - cube --------------- - (0, 0, 0, 0) -(1 row) - -SELECT '(0,0,0,0),(1,0,0,0)'::cube AS cube; - cube ---------------------------- - (0, 0, 0, 0),(1, 0, 0, 0) -(1 row) - -SELECT '[(0,0,0,0),(0,0,0,0)]'::cube AS cube; - cube --------------- - (0, 0, 0, 0) -(1 row) - -SELECT '[(0,0,0,0),(1,0,0,0)]'::cube AS cube; - cube ---------------------------- - (0, 0, 0, 0),(1, 0, 0, 0) -(1 row) - --- invalid input: parse errors -SELECT ''::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT ''::cube AS cube; - ^ -DETAIL: syntax error at end of input -SELECT 'ABC'::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT 'ABC'::cube AS cube; - ^ -DETAIL: syntax error at or near "A" -SELECT '[]'::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT '[]'::cube AS cube; - ^ -DETAIL: syntax error at or near "]" -SELECT '[()]'::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT '[()]'::cube AS cube; - ^ -DETAIL: syntax error at or near "]" -SELECT '[(1)]'::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT '[(1)]'::cube AS cube; - ^ -DETAIL: syntax error at or near "]" -SELECT '[(1),]'::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT '[(1),]'::cube AS cube; - ^ -DETAIL: syntax error at or near "]" -SELECT '[(1),2]'::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT '[(1),2]'::cube AS cube; - ^ -DETAIL: syntax error at or near "2" -SELECT '[(1),(2),(3)]'::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT '[(1),(2),(3)]'::cube AS cube; - ^ -DETAIL: syntax error at or near "," -SELECT '1,'::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT '1,'::cube AS cube; - ^ -DETAIL: syntax error at end of input -SELECT '1,2,'::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT '1,2,'::cube AS cube; - ^ -DETAIL: syntax error at end of input -SELECT '1,,2'::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT '1,,2'::cube AS cube; - ^ -DETAIL: syntax error at or near "," -SELECT '(1,)'::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT '(1,)'::cube AS cube; - ^ -DETAIL: syntax error at or near ")" -SELECT '(1,2,)'::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT '(1,2,)'::cube AS cube; - ^ -DETAIL: syntax error at or near ")" -SELECT '(1,,2)'::cube AS cube; -ERROR: invalid input syntax for cube -LINE 1: SELECT '(1,,2)'::cube AS cube; - ^ -DETAIL: syntax error at or near "," --- invalid input: semantic errors and trailing garbage -SELECT '[(1),(2)],'::cube AS cube; -- 0 -ERROR: invalid input syntax for cube -LINE 1: SELECT '[(1),(2)],'::cube AS cube; - ^ -DETAIL: syntax error at or near "," -SELECT '[(1,2,3),(2,3)]'::cube AS cube; -- 1 -ERROR: invalid input syntax for cube -LINE 1: SELECT '[(1,2,3),(2,3)]'::cube AS cube; - ^ -DETAIL: Different point dimensions in (1,2,3) and (2,3). -SELECT '[(1,2),(1,2,3)]'::cube AS cube; -- 1 -ERROR: invalid input syntax for cube -LINE 1: SELECT '[(1,2),(1,2,3)]'::cube AS cube; - ^ -DETAIL: Different point dimensions in (1,2) and (1,2,3). -SELECT '(1),(2),'::cube AS cube; -- 2 -ERROR: invalid input syntax for cube -LINE 1: SELECT '(1),(2),'::cube AS cube; - ^ -DETAIL: syntax error at or near "," -SELECT '(1,2,3),(2,3)'::cube AS cube; -- 3 -ERROR: invalid input syntax for cube -LINE 1: SELECT '(1,2,3),(2,3)'::cube AS cube; - ^ -DETAIL: Different point dimensions in (1,2,3) and (2,3). -SELECT '(1,2),(1,2,3)'::cube AS cube; -- 3 -ERROR: invalid input syntax for cube -LINE 1: SELECT '(1,2),(1,2,3)'::cube AS cube; - ^ -DETAIL: Different point dimensions in (1,2) and (1,2,3). -SELECT '(1,2,3)ab'::cube AS cube; -- 4 -ERROR: invalid input syntax for cube -LINE 1: SELECT '(1,2,3)ab'::cube AS cube; - ^ -DETAIL: syntax error at or near "a" -SELECT '(1,2,3)a'::cube AS cube; -- 5 -ERROR: invalid input syntax for cube -LINE 1: SELECT '(1,2,3)a'::cube AS cube; - ^ -DETAIL: syntax error at or near "a" -SELECT '(1,2)('::cube AS cube; -- 5 -ERROR: invalid input syntax for cube -LINE 1: SELECT '(1,2)('::cube AS cube; - ^ -DETAIL: syntax error at or near "(" -SELECT '1,2ab'::cube AS cube; -- 6 -ERROR: invalid input syntax for cube -LINE 1: SELECT '1,2ab'::cube AS cube; - ^ -DETAIL: syntax error at or near "a" -SELECT '1 e7'::cube AS cube; -- 6 -ERROR: invalid input syntax for cube -LINE 1: SELECT '1 e7'::cube AS cube; - ^ -DETAIL: syntax error at or near "e" -SELECT '1,2a'::cube AS cube; -- 7 -ERROR: invalid input syntax for cube -LINE 1: SELECT '1,2a'::cube AS cube; - ^ -DETAIL: syntax error at or near "a" -SELECT '1..2'::cube AS cube; -- 7 -ERROR: invalid input syntax for cube -LINE 1: SELECT '1..2'::cube AS cube; - ^ -DETAIL: syntax error at or near ".2" -SELECT '-1e-700'::cube AS cube; -- out of range -ERROR: "-1e-700" is out of range for type double precision -LINE 1: SELECT '-1e-700'::cube AS cube; - ^ --- --- Testing building cubes from float8 values --- -SELECT cube(0::float8); - cube ------- - (0) -(1 row) - -SELECT cube(1::float8); - cube ------- - (1) -(1 row) - -SELECT cube(1,2); - cube ---------- - (1),(2) -(1 row) - -SELECT cube(cube(1,2),3); - cube ---------------- - (1, 3),(2, 3) -(1 row) - -SELECT cube(cube(1,2),3,4); - cube ---------------- - (1, 3),(2, 4) -(1 row) - -SELECT cube(cube(cube(1,2),3,4),5); - cube ---------------------- - (1, 3, 5),(2, 4, 5) -(1 row) - -SELECT cube(cube(cube(1,2),3,4),5,6); - cube ---------------------- - (1, 3, 5),(2, 4, 6) -(1 row) - --- --- Test that the text -> cube cast was installed. --- -SELECT '(0)'::text::cube; - cube ------- - (0) -(1 row) - --- --- Test the float[] -> cube cast --- -SELECT cube('{0,1,2}'::float[], '{3,4,5}'::float[]); - cube ---------------------- - (0, 1, 2),(3, 4, 5) -(1 row) - -SELECT cube('{0,1,2}'::float[], '{3}'::float[]); -ERROR: UR and LL arrays must be of same length -SELECT cube(NULL::float[], '{3}'::float[]); - cube ------- - -(1 row) - -SELECT cube('{0,1,2}'::float[]); - cube ------------ - (0, 1, 2) -(1 row) - -SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]); - cube_subset ---------------------------- - (5, 3, 1, 1),(8, 7, 6, 6) -(1 row) - -SELECT cube_subset(cube('(1,3,5),(1,3,5)'), ARRAY[3,2,1,1]); - cube_subset --------------- - (5, 3, 1, 1) -(1 row) - -SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]); -ERROR: Index out of bounds -SELECT cube_subset(cube('(6,7,8),(6,7,8)'), ARRAY[4,0]); -ERROR: Index out of bounds --- --- Test point processing --- -SELECT cube('(1,2),(1,2)'); -- cube_in - cube --------- - (1, 2) -(1 row) - -SELECT cube('{0,1,2}'::float[], '{0,1,2}'::float[]); -- cube_a_f8_f8 - cube ------------ - (0, 1, 2) -(1 row) - -SELECT cube('{5,6,7,8}'::float[]); -- cube_a_f8 - cube --------------- - (5, 6, 7, 8) -(1 row) - -SELECT cube(1.37); -- cube_f8 - cube --------- - (1.37) -(1 row) - -SELECT cube(1.37, 1.37); -- cube_f8_f8 - cube --------- - (1.37) -(1 row) - -SELECT cube(cube(1,1), 42); -- cube_c_f8 - cube ---------- - (1, 42) -(1 row) - -SELECT cube(cube(1,2), 42); -- cube_c_f8 - cube ------------------ - (1, 42),(2, 42) -(1 row) - -SELECT cube(cube(1,1), 42, 42); -- cube_c_f8_f8 - cube ---------- - (1, 42) -(1 row) - -SELECT cube(cube(1,1), 42, 24); -- cube_c_f8_f8 - cube ------------------ - (1, 42),(1, 24) -(1 row) - -SELECT cube(cube(1,2), 42, 42); -- cube_c_f8_f8 - cube ------------------ - (1, 42),(2, 42) -(1 row) - -SELECT cube(cube(1,2), 42, 24); -- cube_c_f8_f8 - cube ------------------ - (1, 42),(2, 24) -(1 row) - --- --- Testing limit of CUBE_MAX_DIM dimensions check in cube_in. --- -select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)'::cube; -ERROR: invalid input syntax for cube -LINE 1: select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0... - ^ -DETAIL: A cube cannot have more than 100 dimensions. -select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)'::cube; -ERROR: invalid input syntax for cube -LINE 1: select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0... - ^ -DETAIL: A cube cannot have more than 100 dimensions. --- --- testing the operators --- --- equality/inequality: --- -SELECT '24, 33.20'::cube = '24, 33.20'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '24, 33.20'::cube != '24, 33.20'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '24, 33.20'::cube = '24, 33.21'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '24, 33.20'::cube != '24, 33.21'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(2,0),(3,1)'::cube = '(2,0,0,0,0),(3,1,0,0,0)'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '(2,0),(3,1)'::cube = '(2,0,0,0,0),(3,1,0,0,1)'::cube AS bool; - bool ------- - f -(1 row) - --- "lower than" / "greater than" --- (these operators are not useful for anything but ordering) --- -SELECT '1'::cube > '2'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '1'::cube < '2'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '1,1'::cube > '1,2'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '1,1'::cube < '1,2'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(2,0),(3,1)'::cube > '(2,0,0,0,0),(3,1,0,0,1)'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '(2,0),(3,1)'::cube < '(2,0,0,0,0),(3,1,0,0,1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(2,0),(3,1)'::cube > '(2,0,0,0,1),(3,1,0,0,0)'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '(2,0),(3,1)'::cube < '(2,0,0,0,1),(3,1,0,0,0)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(2,0),(3,1)'::cube > '(2,0,0,0,0),(3,1,0,0,0)'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '(2,0),(3,1)'::cube < '(2,0,0,0,0),(3,1,0,0,0)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(2,0,0,0,0),(3,1,0,0,1)'::cube > '(2,0),(3,1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(2,0,0,0,0),(3,1,0,0,1)'::cube < '(2,0),(3,1)'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '(2,0,0,0,1),(3,1,0,0,0)'::cube > '(2,0),(3,1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(2,0,0,0,1),(3,1,0,0,0)'::cube < '(2,0),(3,1)'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '(2,0,0,0,0),(3,1,0,0,0)'::cube > '(2,0),(3,1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(2,0,0,0,0),(3,1,0,0,0)'::cube < '(2,0),(3,1)'::cube AS bool; - bool ------- - f -(1 row) - --- "overlap" --- -SELECT '1'::cube && '1'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '1'::cube && '2'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '0'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '1'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '1,1,1'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '[(1,1,1),(2,2,2)]'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '[(1,1),(2,2)]'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '[(-1,-1,-1),(1,1,1)]'::cube && '[(2,1,1),(2,2,2)]'::cube AS bool; - bool ------- - f -(1 row) - --- "contained in" (the left operand is the cube entirely enclosed by --- the right operand): --- -SELECT '0'::cube <@ '0'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '0,0,0'::cube <@ '0,0,0'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '0,0'::cube <@ '0,0,1'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '0,0,0'::cube <@ '0,0,1'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '1,0,0'::cube <@ '0,0,1'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '(1,0,0),(0,0,1)'::cube <@ '(1,0,0),(0,0,1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(1,0,0),(0,0,1)'::cube <@ '(-1,-1,-1),(1,1,1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(1,0,0),(0,0,1)'::cube <@ '(-1,-1,-1,-1),(1,1,1,1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '0'::cube <@ '(-1),(1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '1'::cube <@ '(-1),(1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '-1'::cube <@ '(-1),(1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(-1),(1)'::cube <@ '(-1),(1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(-1),(1)'::cube <@ '(-1,-1),(1,1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(-2),(1)'::cube <@ '(-1),(1)'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '(-2),(1)'::cube <@ '(-1,-1),(1,1)'::cube AS bool; - bool ------- - f -(1 row) - --- "contains" (the left operand is the cube that entirely encloses the --- right operand) --- -SELECT '0'::cube @> '0'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '0,0,0'::cube @> '0,0,0'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '0,0,1'::cube @> '0,0'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '0,0,1'::cube @> '0,0,0'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '0,0,1'::cube @> '1,0,0'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '(1,0,0),(0,0,1)'::cube @> '(1,0,0),(0,0,1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(-1,-1,-1),(1,1,1)'::cube @> '(1,0,0),(0,0,1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(-1,-1,-1,-1),(1,1,1,1)'::cube @> '(1,0,0),(0,0,1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(-1),(1)'::cube @> '0'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(-1),(1)'::cube @> '1'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(-1),(1)'::cube @> '-1'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(-1),(1)'::cube @> '(-1),(1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(-1,-1),(1,1)'::cube @> '(-1),(1)'::cube AS bool; - bool ------- - t -(1 row) - -SELECT '(-1),(1)'::cube @> '(-2),(1)'::cube AS bool; - bool ------- - f -(1 row) - -SELECT '(-1,-1),(1,1)'::cube @> '(-2),(1)'::cube AS bool; - bool ------- - f -(1 row) - --- Test of distance function --- -SELECT cube_distance('(0)'::cube,'(2,2,2,2)'::cube); - cube_distance ---------------- - 4 -(1 row) - -SELECT cube_distance('(0)'::cube,'(.3,.4)'::cube); - cube_distance ---------------- - 0.5 -(1 row) - -SELECT cube_distance('(2,3,4)'::cube,'(2,3,4)'::cube); - cube_distance ---------------- - 0 -(1 row) - -SELECT cube_distance('(42,42,42,42)'::cube,'(137,137,137,137)'::cube); - cube_distance ---------------- - 190 -(1 row) - -SELECT cube_distance('(42,42,42)'::cube,'(137,137)'::cube); - cube_distance ------------------- - 140.762210837994 -(1 row) - --- Test of cube function (text to cube) --- -SELECT cube('(1,1.2)'::text); - cube ----------- - (1, 1.2) -(1 row) - -SELECT cube(NULL); - cube ------- - -(1 row) - --- Test of cube_dim function (dimensions stored in cube) --- -SELECT cube_dim('(0)'::cube); - cube_dim ----------- - 1 -(1 row) - -SELECT cube_dim('(0,0)'::cube); - cube_dim ----------- - 2 -(1 row) - -SELECT cube_dim('(0,0,0)'::cube); - cube_dim ----------- - 3 -(1 row) - -SELECT cube_dim('(42,42,42),(42,42,42)'::cube); - cube_dim ----------- - 3 -(1 row) - -SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube); - cube_dim ----------- - 5 -(1 row) - --- Test of cube_ll_coord function (retrieves LL coordinate values) --- -SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1); - cube_ll_coord ---------------- - -1 -(1 row) - -SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 2); - cube_ll_coord ---------------- - -2 -(1 row) - -SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 3); - cube_ll_coord ---------------- - 0 -(1 row) - -SELECT cube_ll_coord('(1,2),(1,2)'::cube, 1); - cube_ll_coord ---------------- - 1 -(1 row) - -SELECT cube_ll_coord('(1,2),(1,2)'::cube, 2); - cube_ll_coord ---------------- - 2 -(1 row) - -SELECT cube_ll_coord('(1,2),(1,2)'::cube, 3); - cube_ll_coord ---------------- - 0 -(1 row) - -SELECT cube_ll_coord('(42,137)'::cube, 1); - cube_ll_coord ---------------- - 42 -(1 row) - -SELECT cube_ll_coord('(42,137)'::cube, 2); - cube_ll_coord ---------------- - 137 -(1 row) - -SELECT cube_ll_coord('(42,137)'::cube, 3); - cube_ll_coord ---------------- - 0 -(1 row) - --- Test of cube_ur_coord function (retrieves UR coordinate values) --- -SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1); - cube_ur_coord ---------------- - 2 -(1 row) - -SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 2); - cube_ur_coord ---------------- - 1 -(1 row) - -SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 3); - cube_ur_coord ---------------- - 0 -(1 row) - -SELECT cube_ur_coord('(1,2),(1,2)'::cube, 1); - cube_ur_coord ---------------- - 1 -(1 row) - -SELECT cube_ur_coord('(1,2),(1,2)'::cube, 2); - cube_ur_coord ---------------- - 2 -(1 row) - -SELECT cube_ur_coord('(1,2),(1,2)'::cube, 3); - cube_ur_coord ---------------- - 0 -(1 row) - -SELECT cube_ur_coord('(42,137)'::cube, 1); - cube_ur_coord ---------------- - 42 -(1 row) - -SELECT cube_ur_coord('(42,137)'::cube, 2); - cube_ur_coord ---------------- - 137 -(1 row) - -SELECT cube_ur_coord('(42,137)'::cube, 3); - cube_ur_coord ---------------- - 0 -(1 row) - --- Test of cube_is_point --- -SELECT cube_is_point('(0)'::cube); - cube_is_point ---------------- - t -(1 row) - -SELECT cube_is_point('(0,1,2)'::cube); - cube_is_point ---------------- - t -(1 row) - -SELECT cube_is_point('(0,1,2),(0,1,2)'::cube); - cube_is_point ---------------- - t -(1 row) - -SELECT cube_is_point('(0,1,2),(-1,1,2)'::cube); - cube_is_point ---------------- - f -(1 row) - -SELECT cube_is_point('(0,1,2),(0,-1,2)'::cube); - cube_is_point ---------------- - f -(1 row) - -SELECT cube_is_point('(0,1,2),(0,1,-2)'::cube); - cube_is_point ---------------- - f -(1 row) - --- Test of cube_enlarge (enlarging and shrinking cubes) --- -SELECT cube_enlarge('(0)'::cube, 0, 0); - cube_enlarge --------------- - (0) -(1 row) - -SELECT cube_enlarge('(0)'::cube, 0, 1); - cube_enlarge --------------- - (0) -(1 row) - -SELECT cube_enlarge('(0)'::cube, 0, 2); - cube_enlarge --------------- - (0) -(1 row) - -SELECT cube_enlarge('(2),(-2)'::cube, 0, 4); - cube_enlarge --------------- - (-2),(2) -(1 row) - -SELECT cube_enlarge('(0)'::cube, 1, 0); - cube_enlarge --------------- - (-1),(1) -(1 row) - -SELECT cube_enlarge('(0)'::cube, 1, 1); - cube_enlarge --------------- - (-1),(1) -(1 row) - -SELECT cube_enlarge('(0)'::cube, 1, 2); - cube_enlarge ------------------ - (-1, -1),(1, 1) -(1 row) - -SELECT cube_enlarge('(2),(-2)'::cube, 1, 4); - cube_enlarge -------------------------------- - (-3, -1, -1, -1),(3, 1, 1, 1) -(1 row) - -SELECT cube_enlarge('(0)'::cube, -1, 0); - cube_enlarge --------------- - (0) -(1 row) - -SELECT cube_enlarge('(0)'::cube, -1, 1); - cube_enlarge --------------- - (0) -(1 row) - -SELECT cube_enlarge('(0)'::cube, -1, 2); - cube_enlarge --------------- - (0) -(1 row) - -SELECT cube_enlarge('(2),(-2)'::cube, -1, 4); - cube_enlarge --------------- - (-1),(1) -(1 row) - -SELECT cube_enlarge('(0,0,0)'::cube, 1, 0); - cube_enlarge ------------------------- - (-1, -1, -1),(1, 1, 1) -(1 row) - -SELECT cube_enlarge('(0,0,0)'::cube, 1, 2); - cube_enlarge ------------------------- - (-1, -1, -1),(1, 1, 1) -(1 row) - -SELECT cube_enlarge('(2,-2),(-3,7)'::cube, 1, 2); - cube_enlarge ------------------ - (-4, -3),(3, 8) -(1 row) - -SELECT cube_enlarge('(2,-2),(-3,7)'::cube, 3, 2); - cube_enlarge ------------------- - (-6, -5),(5, 10) -(1 row) - -SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -1, 2); - cube_enlarge ------------------ - (-2, -1),(1, 6) -(1 row) - -SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2); - cube_enlarge ---------------------- - (-0.5, 1),(-0.5, 4) -(1 row) - -SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -23, 5); - cube_enlarge --------------- - (42, 0, 0) -(1 row) - -SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -24, 5); - cube_enlarge --------------- - (42, 0, 0) -(1 row) - --- Test of cube_union (MBR for two cubes) --- -SELECT cube_union('(1,2),(3,4)'::cube, '(5,6,7),(8,9,10)'::cube); - cube_union ----------------------- - (1, 2, 0),(8, 9, 10) -(1 row) - -SELECT cube_union('(1,2)'::cube, '(4,2,0,0)'::cube); - cube_union ---------------------------- - (1, 2, 0, 0),(4, 2, 0, 0) -(1 row) - -SELECT cube_union('(1,2),(1,2)'::cube, '(4,2),(4,2)'::cube); - cube_union ---------------- - (1, 2),(4, 2) -(1 row) - -SELECT cube_union('(1,2),(1,2)'::cube, '(1,2),(1,2)'::cube); - cube_union ------------- - (1, 2) -(1 row) - -SELECT cube_union('(1,2),(1,2)'::cube, '(1,2,0),(1,2,0)'::cube); - cube_union ------------- - (1, 2, 0) -(1 row) - --- Test of cube_inter --- -SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (16,15)'::cube); -- intersects - cube_inter ------------------ - (3, 4),(10, 11) -(1 row) - -SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (6,5)'::cube); -- includes - cube_inter ---------------- - (3, 4),(6, 5) -(1 row) - -SELECT cube_inter('(1,2),(10,11)'::cube, '(13,14), (16,15)'::cube); -- no intersection - cube_inter -------------------- - (13, 14),(10, 11) -(1 row) - -SELECT cube_inter('(1,2),(10,11)'::cube, '(3,14), (16,15)'::cube); -- no intersection, but one dimension intersects - cube_inter ------------------- - (3, 14),(10, 11) -(1 row) - -SELECT cube_inter('(1,2),(10,11)'::cube, '(10,11), (16,15)'::cube); -- point intersection - cube_inter ------------- - (10, 11) -(1 row) - -SELECT cube_inter('(1,2,3)'::cube, '(1,2,3)'::cube); -- point args - cube_inter ------------- - (1, 2, 3) -(1 row) - -SELECT cube_inter('(1,2,3)'::cube, '(5,6,3)'::cube); -- point args - cube_inter ---------------------- - (5, 6, 3),(1, 2, 3) -(1 row) - --- Test of cube_size --- -SELECT cube_size('(4,8),(15,16)'::cube); - cube_size ------------ - 88 -(1 row) - -SELECT cube_size('(42,137)'::cube); - cube_size ------------ - 0 -(1 row) - --- Test of distances --- -SELECT cube_distance('(1,1)'::cube, '(4,5)'::cube); - cube_distance ---------------- - 5 -(1 row) - -SELECT '(1,1)'::cube <-> '(4,5)'::cube as d_e; - d_e ------ - 5 -(1 row) - -SELECT distance_chebyshev('(1,1)'::cube, '(4,5)'::cube); - distance_chebyshev --------------------- - 4 -(1 row) - -SELECT '(1,1)'::cube <=> '(4,5)'::cube as d_c; - d_c ------ - 4 -(1 row) - -SELECT distance_taxicab('(1,1)'::cube, '(4,5)'::cube); - distance_taxicab ------------------- - 7 -(1 row) - -SELECT '(1,1)'::cube <#> '(4,5)'::cube as d_t; - d_t ------ - 7 -(1 row) - --- zero for overlapping -SELECT cube_distance('(2,2),(10,10)'::cube, '(0,0),(5,5)'::cube); - cube_distance ---------------- - 0 -(1 row) - -SELECT distance_chebyshev('(2,2),(10,10)'::cube, '(0,0),(5,5)'::cube); - distance_chebyshev --------------------- - 0 -(1 row) - -SELECT distance_taxicab('(2,2),(10,10)'::cube, '(0,0),(5,5)'::cube); - distance_taxicab ------------------- - 0 -(1 row) - --- coordinate access -SELECT cube(array[10,20,30], array[40,50,60])->1; - ?column? ----------- - 10 -(1 row) - -SELECT cube(array[40,50,60], array[10,20,30])->1; - ?column? ----------- - 40 -(1 row) - -SELECT cube(array[10,20,30], array[40,50,60])->6; - ?column? ----------- - 60 -(1 row) - -SELECT cube(array[10,20,30], array[40,50,60])->0; -ERROR: cube index 0 is out of bounds -SELECT cube(array[10,20,30], array[40,50,60])->7; -ERROR: cube index 7 is out of bounds -SELECT cube(array[10,20,30], array[40,50,60])->-1; -ERROR: cube index -1 is out of bounds -SELECT cube(array[10,20,30], array[40,50,60])->-6; -ERROR: cube index -6 is out of bounds -SELECT cube(array[10,20,30])->3; - ?column? ----------- - 30 -(1 row) - -SELECT cube(array[10,20,30])->6; - ?column? ----------- - 30 -(1 row) - -SELECT cube(array[10,20,30])->-6; -ERROR: cube index -6 is out of bounds --- "normalized" coordinate access -SELECT cube(array[10,20,30], array[40,50,60])~>1; - ?column? ----------- - 10 -(1 row) - -SELECT cube(array[40,50,60], array[10,20,30])~>1; - ?column? ----------- - 10 -(1 row) - -SELECT cube(array[10,20,30], array[40,50,60])~>2; - ?column? ----------- - 20 -(1 row) - -SELECT cube(array[40,50,60], array[10,20,30])~>2; - ?column? ----------- - 20 -(1 row) - -SELECT cube(array[10,20,30], array[40,50,60])~>3; - ?column? ----------- - 30 -(1 row) - -SELECT cube(array[40,50,60], array[10,20,30])~>3; - ?column? ----------- - 30 -(1 row) - -SELECT cube(array[40,50,60], array[10,20,30])~>0; -ERROR: cube index 0 is out of bounds -SELECT cube(array[40,50,60], array[10,20,30])~>4; - ?column? ----------- - 40 -(1 row) - -SELECT cube(array[40,50,60], array[10,20,30])~>(-1); -ERROR: cube index -1 is out of bounds --- Load some example data and build the index --- -CREATE TABLE test_cube (c cube); -\copy test_cube from 'data/test_cube.data' -CREATE INDEX test_cube_ix ON test_cube USING gist (c); -SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c; - c --------------------------- - (337, 455),(240, 359) - (759, 187),(662, 163) - (1444, 403),(1346, 344) - (1594, 1043),(1517, 971) - (2424, 160),(2424, 81) -(5 rows) - --- Test sorting -SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c; - c --------------------------- - (337, 455),(240, 359) - (759, 187),(662, 163) - (1444, 403),(1346, 344) - (1594, 1043),(1517, 971) - (2424, 160),(2424, 81) -(5 rows) - --- kNN with index -SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <-> '(100, 100),(500, 500)'::cube LIMIT 5; - c | dist --------------------------+------------------ - (337, 455),(240, 359) | 0 - (759, 187),(662, 163) | 162 - (948, 1201),(907, 1156) | 772.000647668122 - (1444, 403),(1346, 344) | 846 - (369, 1457),(278, 1409) | 909 -(5 rows) - -SELECT *, c <=> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <=> '(100, 100),(500, 500)'::cube LIMIT 5; - c | dist --------------------------+------ - (337, 455),(240, 359) | 0 - (759, 187),(662, 163) | 162 - (948, 1201),(907, 1156) | 656 - (1444, 403),(1346, 344) | 846 - (369, 1457),(278, 1409) | 909 -(5 rows) - -SELECT *, c <#> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <#> '(100, 100),(500, 500)'::cube LIMIT 5; - c | dist --------------------------+------ - (337, 455),(240, 359) | 0 - (759, 187),(662, 163) | 162 - (1444, 403),(1346, 344) | 846 - (369, 1457),(278, 1409) | 909 - (948, 1201),(907, 1156) | 1063 -(5 rows) - --- kNN-based sorting -SELECT * FROM test_cube ORDER BY c~>1 LIMIT 15; -- ascending by 1st coordinate of lower left corner - c ---------------------------- - (54, 38679),(3, 38602) - (83, 10271),(15, 10265) - (122, 46832),(64, 46762) - (167, 17214),(92, 17184) - (161, 24465),(107, 24374) - (162, 26040),(120, 25963) - (154, 4019),(138, 3990) - (259, 1850),(175, 1820) - (207, 40886),(179, 40879) - (288, 49588),(204, 49571) - (270, 32616),(226, 32607) - (318, 31489),(235, 31404) - (337, 455),(240, 359) - (270, 29508),(264, 29440) - (369, 1457),(278, 1409) -(15 rows) - -SELECT * FROM test_cube ORDER BY c~>4 LIMIT 15; -- ascending by 2nd coordinate or upper right corner - c ---------------------------- - (30333, 50),(30273, 6) - (43301, 75),(43227, 43) - (19650, 142),(19630, 51) - (2424, 160),(2424, 81) - (3449, 171),(3354, 108) - (18037, 155),(17941, 109) - (28511, 208),(28479, 114) - (19946, 217),(19941, 118) - (16906, 191),(16816, 139) - (759, 187),(662, 163) - (22684, 266),(22656, 181) - (24423, 255),(24360, 213) - (45989, 249),(45910, 222) - (11399, 377),(11360, 294) - (12162, 389),(12103, 309) -(15 rows) - -SELECT * FROM test_cube ORDER BY c~>1 DESC LIMIT 15; -- descending by 1st coordinate of lower left corner - c -------------------------------- - (50027, 49230),(49951, 49214) - (49980, 35004),(49937, 34963) - (49985, 6436),(49927, 6338) - (49999, 27218),(49908, 27176) - (49954, 1340),(49905, 1294) - (49944, 25163),(49902, 25153) - (49981, 34876),(49898, 34786) - (49957, 43390),(49897, 43384) - (49853, 18504),(49848, 18503) - (49902, 41752),(49818, 41746) - (49907, 30225),(49810, 30158) - (49843, 5175),(49808, 5145) - (49887, 24274),(49805, 24184) - (49847, 7128),(49798, 7067) - (49820, 7990),(49771, 7967) -(15 rows) - -SELECT * FROM test_cube ORDER BY c~>4 DESC LIMIT 15; -- descending by 2nd coordinate or upper right corner - c -------------------------------- - (36311, 50073),(36258, 49987) - (30746, 50040),(30727, 49992) - (2168, 50012),(2108, 49914) - (21551, 49983),(21492, 49885) - (17954, 49975),(17865, 49915) - (3531, 49962),(3463, 49934) - (19128, 49932),(19112, 49849) - (31287, 49923),(31236, 49913) - (43925, 49912),(43888, 49878) - (29261, 49910),(29247, 49818) - (14913, 49873),(14849, 49836) - (20007, 49858),(19921, 49778) - (38266, 49852),(38233, 49844) - (37595, 49849),(37581, 49834) - (46151, 49848),(46058, 49830) -(15 rows) - --- same thing for index with points -CREATE TABLE test_point(c cube); -INSERT INTO test_point(SELECT cube(array[c->1,c->2,c->3,c->4]) FROM test_cube); -CREATE INDEX ON test_point USING gist(c); -SELECT * FROM test_point ORDER BY c~>1, c~>2 LIMIT 15; -- ascending by 1st then by 2nd coordinate - c --------------------------- - (54, 38679, 3, 38602) - (83, 10271, 15, 10265) - (122, 46832, 64, 46762) - (154, 4019, 138, 3990) - (161, 24465, 107, 24374) - (162, 26040, 120, 25963) - (167, 17214, 92, 17184) - (207, 40886, 179, 40879) - (259, 1850, 175, 1820) - (270, 29508, 264, 29440) - (270, 32616, 226, 32607) - (288, 49588, 204, 49571) - (318, 31489, 235, 31404) - (326, 18837, 285, 18817) - (337, 455, 240, 359) -(15 rows) - -SELECT * FROM test_point ORDER BY c~>4 DESC LIMIT 15; -- descending by 1st coordinate - c ------------------------------- - (30746, 50040, 30727, 49992) - (36311, 50073, 36258, 49987) - (3531, 49962, 3463, 49934) - (17954, 49975, 17865, 49915) - (2168, 50012, 2108, 49914) - (31287, 49923, 31236, 49913) - (21551, 49983, 21492, 49885) - (43925, 49912, 43888, 49878) - (19128, 49932, 19112, 49849) - (38266, 49852, 38233, 49844) - (14913, 49873, 14849, 49836) - (37595, 49849, 37581, 49834) - (46151, 49848, 46058, 49830) - (29261, 49910, 29247, 49818) - (19233, 49824, 19185, 49794) -(15 rows) - diff --git a/contrib/cube/expected/cube_sci.out b/contrib/cube/expected/cube_sci.out new file mode 100644 index 0000000000..1e8269cdf0 --- /dev/null +++ b/contrib/cube/expected/cube_sci.out @@ -0,0 +1,106 @@ +--- +--- Testing cube output in scientific notation. This was put into separate +--- test, because has platform-depending output. +--- +SELECT '1e27'::cube AS cube; + cube +--------- + (1e+27) +(1 row) + +SELECT '-1e27'::cube AS cube; + cube +---------- + (-1e+27) +(1 row) + +SELECT '1.0e27'::cube AS cube; + cube +--------- + (1e+27) +(1 row) + +SELECT '-1.0e27'::cube AS cube; + cube +---------- + (-1e+27) +(1 row) + +SELECT '1e+27'::cube AS cube; + cube +--------- + (1e+27) +(1 row) + +SELECT '-1e+27'::cube AS cube; + cube +---------- + (-1e+27) +(1 row) + +SELECT '1.0e+27'::cube AS cube; + cube +--------- + (1e+27) +(1 row) + +SELECT '-1.0e+27'::cube AS cube; + cube +---------- + (-1e+27) +(1 row) + +SELECT '1e-7'::cube AS cube; + cube +--------- + (1e-07) +(1 row) + +SELECT '-1e-7'::cube AS cube; + cube +---------- + (-1e-07) +(1 row) + +SELECT '1.0e-7'::cube AS cube; + cube +--------- + (1e-07) +(1 row) + +SELECT '-1.0e-7'::cube AS cube; + cube +---------- + (-1e-07) +(1 row) + +SELECT '1e-300'::cube AS cube; + cube +---------- + (1e-300) +(1 row) + +SELECT '-1e-300'::cube AS cube; + cube +----------- + (-1e-300) +(1 row) + +SELECT '1234567890123456'::cube AS cube; + cube +------------------------ + (1.23456789012346e+15) +(1 row) + +SELECT '+1234567890123456'::cube AS cube; + cube +------------------------ + (1.23456789012346e+15) +(1 row) + +SELECT '-1234567890123456'::cube AS cube; + cube +------------------------- + (-1.23456789012346e+15) +(1 row) + diff --git a/contrib/cube/sql/cube.sql b/contrib/cube/sql/cube.sql index 58ea3ad811..59e7e4159d 100644 --- a/contrib/cube/sql/cube.sql +++ b/contrib/cube/sql/cube.sql @@ -22,26 +22,9 @@ SELECT '.1'::cube AS cube; SELECT '-.1'::cube AS cube; SELECT '1.0'::cube AS cube; SELECT '-1.0'::cube AS cube; -SELECT '1e27'::cube AS cube; -SELECT '-1e27'::cube AS cube; -SELECT '1.0e27'::cube AS cube; -SELECT '-1.0e27'::cube AS cube; -SELECT '1e+27'::cube AS cube; -SELECT '-1e+27'::cube AS cube; -SELECT '1.0e+27'::cube AS cube; -SELECT '-1.0e+27'::cube AS cube; -SELECT '1e-7'::cube AS cube; -SELECT '-1e-7'::cube AS cube; -SELECT '1.0e-7'::cube AS cube; -SELECT '-1.0e-7'::cube AS cube; -SELECT '1e-300'::cube AS cube; -SELECT '-1e-300'::cube AS cube; SELECT 'infinity'::cube AS cube; SELECT '-infinity'::cube AS cube; SELECT 'NaN'::cube AS cube; -SELECT '1234567890123456'::cube AS cube; -SELECT '+1234567890123456'::cube AS cube; -SELECT '-1234567890123456'::cube AS cube; SELECT '.1234567890123456'::cube AS cube; SELECT '+.1234567890123456'::cube AS cube; SELECT '-.1234567890123456'::cube AS cube; @@ -125,6 +108,12 @@ SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]); SELECT cube_subset(cube('(1,3,5),(1,3,5)'), ARRAY[3,2,1,1]); SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]); SELECT cube_subset(cube('(6,7,8),(6,7,8)'), ARRAY[4,0]); +-- test for limits: this should pass +SELECT cube_subset(cube('(6,7,8),(6,7,8)'), array(SELECT 1 as a FROM generate_series(1,100))); +-- and this should fail +SELECT cube_subset(cube('(6,7,8),(6,7,8)'), array(SELECT 1 as a FROM generate_series(1,101))); + + -- -- Test point processing @@ -144,9 +133,21 @@ SELECT cube(cube(1,2), 42, 24); -- cube_c_f8_f8 -- -- Testing limit of CUBE_MAX_DIM dimensions check in cube_in. -- - +-- create too big cube from literal select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)'::cube; select '(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)'::cube; +-- from an array +select cube(array(SELECT 0 as a FROM generate_series(1,101))); +select cube(array(SELECT 0 as a FROM generate_series(1,101)),array(SELECT 0 as a FROM generate_series(1,101))); + +-- extend cube beyond limit +-- this should work +select cube(array(SELECT 0 as a FROM generate_series(1,100))); +select cube(array(SELECT 0 as a FROM generate_series(1,100)),array(SELECT 0 as a FROM generate_series(1,100))); +-- this should fail +select cube(cube(array(SELECT 0 as a FROM generate_series(1,100))), 0); +select cube(cube(array(SELECT 0 as a FROM generate_series(1,100)),array(SELECT 0 as a FROM generate_series(1,100))), 0, 0); + -- -- testing the operators @@ -382,20 +383,44 @@ SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' ORDER BY c; -- Test sorting SELECT * FROM test_cube WHERE c && '(3000,1000),(0,0)' GROUP BY c ORDER BY c; --- kNN with index +-- Test index-only scans +SET enable_bitmapscan = false; +EXPLAIN (COSTS OFF) +SELECT c FROM test_cube WHERE c <@ '(3000,1000),(0,0)' ORDER BY c; +SELECT c FROM test_cube WHERE c <@ '(3000,1000),(0,0)' ORDER BY c; +RESET enable_bitmapscan; + +-- Test kNN +INSERT INTO test_cube VALUES ('(1,1)'), ('(100000)'), ('(0, 100000)'); -- Some corner cases +SET enable_seqscan = false; + +-- Test different metrics SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <-> '(100, 100),(500, 500)'::cube LIMIT 5; SELECT *, c <=> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <=> '(100, 100),(500, 500)'::cube LIMIT 5; SELECT *, c <#> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <#> '(100, 100),(500, 500)'::cube LIMIT 5; --- kNN-based sorting -SELECT * FROM test_cube ORDER BY c~>1 LIMIT 15; -- ascending by 1st coordinate of lower left corner -SELECT * FROM test_cube ORDER BY c~>4 LIMIT 15; -- ascending by 2nd coordinate or upper right corner -SELECT * FROM test_cube ORDER BY c~>1 DESC LIMIT 15; -- descending by 1st coordinate of lower left corner -SELECT * FROM test_cube ORDER BY c~>4 DESC LIMIT 15; -- descending by 2nd coordinate or upper right corner - --- same thing for index with points -CREATE TABLE test_point(c cube); -INSERT INTO test_point(SELECT cube(array[c->1,c->2,c->3,c->4]) FROM test_cube); -CREATE INDEX ON test_point USING gist(c); -SELECT * FROM test_point ORDER BY c~>1, c~>2 LIMIT 15; -- ascending by 1st then by 2nd coordinate -SELECT * FROM test_point ORDER BY c~>4 DESC LIMIT 15; -- descending by 1st coordinate +-- Test sorting by coordinates +SELECT c~>1, c FROM test_cube ORDER BY c~>1 LIMIT 15; -- ascending by left bound +SELECT c~>2, c FROM test_cube ORDER BY c~>2 LIMIT 15; -- ascending by right bound +SELECT c~>3, c FROM test_cube ORDER BY c~>3 LIMIT 15; -- ascending by lower bound +SELECT c~>4, c FROM test_cube ORDER BY c~>4 LIMIT 15; -- ascending by upper bound +SELECT c~>(-1), c FROM test_cube ORDER BY c~>(-1) LIMIT 15; -- descending by left bound +SELECT c~>(-2), c FROM test_cube ORDER BY c~>(-2) LIMIT 15; -- descending by right bound +SELECT c~>(-3), c FROM test_cube ORDER BY c~>(-3) LIMIT 15; -- descending by lower bound +SELECT c~>(-4), c FROM test_cube ORDER BY c~>(-4) LIMIT 15; -- descending by upper bound + +-- Same queries with sequential scan (should give the same results as above) +RESET enable_seqscan; +SET enable_indexscan = OFF; +SELECT *, c <-> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <-> '(100, 100),(500, 500)'::cube LIMIT 5; +SELECT *, c <=> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <=> '(100, 100),(500, 500)'::cube LIMIT 5; +SELECT *, c <#> '(100, 100),(500, 500)'::cube as dist FROM test_cube ORDER BY c <#> '(100, 100),(500, 500)'::cube LIMIT 5; +SELECT c~>1, c FROM test_cube ORDER BY c~>1 LIMIT 15; -- ascending by left bound +SELECT c~>2, c FROM test_cube ORDER BY c~>2 LIMIT 15; -- ascending by right bound +SELECT c~>3, c FROM test_cube ORDER BY c~>3 LIMIT 15; -- ascending by lower bound +SELECT c~>4, c FROM test_cube ORDER BY c~>4 LIMIT 15; -- ascending by upper bound +SELECT c~>(-1), c FROM test_cube ORDER BY c~>(-1) LIMIT 15; -- descending by left bound +SELECT c~>(-2), c FROM test_cube ORDER BY c~>(-2) LIMIT 15; -- descending by right bound +SELECT c~>(-3), c FROM test_cube ORDER BY c~>(-3) LIMIT 15; -- descending by lower bound +SELECT c~>(-4), c FROM test_cube ORDER BY c~>(-4) LIMIT 15; -- descending by upper bound +RESET enable_indexscan; diff --git a/contrib/cube/sql/cube_sci.sql b/contrib/cube/sql/cube_sci.sql new file mode 100644 index 0000000000..35a540779a --- /dev/null +++ b/contrib/cube/sql/cube_sci.sql @@ -0,0 +1,22 @@ +--- +--- Testing cube output in scientific notation. This was put into separate +--- test, because has platform-depending output. +--- + +SELECT '1e27'::cube AS cube; +SELECT '-1e27'::cube AS cube; +SELECT '1.0e27'::cube AS cube; +SELECT '-1.0e27'::cube AS cube; +SELECT '1e+27'::cube AS cube; +SELECT '-1e+27'::cube AS cube; +SELECT '1.0e+27'::cube AS cube; +SELECT '-1.0e+27'::cube AS cube; +SELECT '1e-7'::cube AS cube; +SELECT '-1e-7'::cube AS cube; +SELECT '1.0e-7'::cube AS cube; +SELECT '-1.0e-7'::cube AS cube; +SELECT '1e-300'::cube AS cube; +SELECT '-1e-300'::cube AS cube; +SELECT '1234567890123456'::cube AS cube; +SELECT '+1234567890123456'::cube AS cube; +SELECT '-1234567890123456'::cube AS cube; diff --git a/contrib/dblink/Makefile b/contrib/dblink/Makefile index 5189758dab..b1a5e06383 100644 --- a/contrib/dblink/Makefile +++ b/contrib/dblink/Makefile @@ -3,7 +3,7 @@ MODULE_big = dblink OBJS = dblink.o $(WIN32RES) PG_CPPFLAGS = -I$(libpq_srcdir) -SHLIB_LINK = $(libpq) +SHLIB_LINK_INTERNAL = $(libpq) EXTENSION = dblink DATA = dblink--1.2.sql dblink--1.1--1.2.sql dblink--1.0--1.1.sql \ diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index 81136b131c..c646068848 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -9,7 +9,7 @@ * Shridhar Daithankar * * contrib/dblink/dblink.c - * Copyright (c) 2001-2017, PostgreSQL Global Development Group + * Copyright (c) 2001-2018, PostgreSQL Global Development Group * ALL RIGHTS RESERVED; * * Permission to use, copy, modify, and distribute this software and its @@ -100,7 +100,7 @@ static remoteConn *getConnectionByName(const char *name); static HTAB *createConnHash(void); static void createNewConnection(const char *name, remoteConn *rconn); static void deleteConnection(const char *name); -static char **get_pkey_attnames(Relation rel, int16 *numatts); +static char **get_pkey_attnames(Relation rel, int16 *indnkeyatts); static char **get_text_array_contents(ArrayType *array, int *numitems); static char *get_sql_insert(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals, char **tgt_pkattvals); static char *get_sql_delete(Relation rel, int *pkattnums, int pknumatts, char **tgt_pkattvals); @@ -113,7 +113,7 @@ static char *generate_relation_name(Relation rel); static void dblink_connstr_check(const char *connstr); static void dblink_security_check(PGconn *conn, remoteConn *rconn); static void dblink_res_error(PGconn *conn, const char *conname, PGresult *res, - const char *dblink_context_msg, bool fail); + bool fail, const char *fmt,...) pg_attribute_printf(5, 6); static char *get_connect_string(const char *servername); static char *escape_param_str(const char *from); static void validate_pkattnums(Relation rel, @@ -243,7 +243,7 @@ dblink_init(void) pconn = (remoteConn *) MemoryContextAlloc(TopMemoryContext, sizeof(remoteConn)); pconn->conn = NULL; pconn->openCursorCount = 0; - pconn->newXactForCursor = FALSE; + pconn->newXactForCursor = false; } } @@ -423,7 +423,7 @@ dblink_open(PG_FUNCTION_ARGS) if (PQresultStatus(res) != PGRES_COMMAND_OK) dblink_res_internalerror(conn, res, "begin error"); PQclear(res); - rconn->newXactForCursor = TRUE; + rconn->newXactForCursor = true; /* * Since transaction state was IDLE, we force cursor count to @@ -441,7 +441,8 @@ dblink_open(PG_FUNCTION_ARGS) res = PQexec(conn, buf.data); if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) { - dblink_res_error(conn, conname, res, "could not open cursor", fail); + dblink_res_error(conn, conname, res, fail, + "while opening cursor \"%s\"", curname); PG_RETURN_TEXT_P(cstring_to_text("ERROR")); } @@ -509,7 +510,8 @@ dblink_close(PG_FUNCTION_ARGS) res = PQexec(conn, buf.data); if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) { - dblink_res_error(conn, conname, res, "could not close cursor", fail); + dblink_res_error(conn, conname, res, fail, + "while closing cursor \"%s\"", curname); PG_RETURN_TEXT_P(cstring_to_text("ERROR")); } @@ -523,7 +525,7 @@ dblink_close(PG_FUNCTION_ARGS) /* if count is zero, commit the transaction */ if (rconn->openCursorCount == 0) { - rconn->newXactForCursor = FALSE; + rconn->newXactForCursor = false; res = PQexec(conn, "COMMIT"); if (PQresultStatus(res) != PGRES_COMMAND_OK) @@ -612,8 +614,8 @@ dblink_fetch(PG_FUNCTION_ARGS) (PQresultStatus(res) != PGRES_COMMAND_OK && PQresultStatus(res) != PGRES_TUPLES_OK)) { - dblink_res_error(conn, conname, res, - "could not fetch from cursor", fail); + dblink_res_error(conn, conname, res, fail, + "while fetching from cursor \"%s\"", curname); return (Datum) 0; } else if (PQresultStatus(res) == PGRES_COMMAND_OK) @@ -763,8 +765,8 @@ dblink_record_internal(FunctionCallInfo fcinfo, bool is_async) if (PQresultStatus(res) != PGRES_COMMAND_OK && PQresultStatus(res) != PGRES_TUPLES_OK) { - dblink_res_error(conn, conname, res, - "could not execute query", fail); + dblink_res_error(conn, conname, res, fail, + "while executing query"); /* if fail isn't set, we'll return an empty query result */ } else @@ -1009,8 +1011,8 @@ materializeQueryResult(FunctionCallInfo fcinfo, PGresult *res1 = res; res = NULL; - dblink_res_error(conn, conname, res1, - "could not execute query", fail); + dblink_res_error(conn, conname, res1, fail, + "while executing query"); /* if fail isn't set, we'll return an empty query result */ } else if (PQresultStatus(res) == PGRES_COMMAND_OK) @@ -1438,8 +1440,8 @@ dblink_exec(PG_FUNCTION_ARGS) (PQresultStatus(res) != PGRES_COMMAND_OK && PQresultStatus(res) != PGRES_TUPLES_OK)) { - dblink_res_error(conn, conname, res, - "could not execute command", fail); + dblink_res_error(conn, conname, res, fail, + "while executing command"); /* * and save a copy of the command status string to return as our @@ -1491,7 +1493,7 @@ PG_FUNCTION_INFO_V1(dblink_get_pkey); Datum dblink_get_pkey(PG_FUNCTION_ARGS) { - int16 numatts; + int16 indnkeyatts; char **results; FuncCallContext *funcctx; int32 call_cntr; @@ -1517,7 +1519,7 @@ dblink_get_pkey(PG_FUNCTION_ARGS) rel = get_rel_from_relname(PG_GETARG_TEXT_PP(0), AccessShareLock, ACL_SELECT); /* get the array of attnums */ - results = get_pkey_attnames(rel, &numatts); + results = get_pkey_attnames(rel, &indnkeyatts); relation_close(rel, AccessShareLock); @@ -1537,9 +1539,9 @@ dblink_get_pkey(PG_FUNCTION_ARGS) attinmeta = TupleDescGetAttInMetadata(tupdesc); funcctx->attinmeta = attinmeta; - if ((results != NULL) && (numatts > 0)) + if ((results != NULL) && (indnkeyatts > 0)) { - funcctx->max_calls = numatts; + funcctx->max_calls = indnkeyatts; /* got results, keep track of them */ funcctx->user_fctx = results; @@ -1980,7 +1982,7 @@ dblink_fdw_validator(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_FDW_OUT_OF_MEMORY), errmsg("out of memory"), - errdetail("could not get libpq's default connection options"))); + errdetail("Could not get libpq's default connection options."))); } /* Validate each supplied option. */ @@ -2027,10 +2029,10 @@ dblink_fdw_validator(PG_FUNCTION_ARGS) * get_pkey_attnames * * Get the primary key attnames for the given relation. - * Return NULL, and set numatts = 0, if no primary key exists. + * Return NULL, and set indnkeyatts = 0, if no primary key exists. */ static char ** -get_pkey_attnames(Relation rel, int16 *numatts) +get_pkey_attnames(Relation rel, int16 *indnkeyatts) { Relation indexRelation; ScanKeyData skey; @@ -2040,8 +2042,8 @@ get_pkey_attnames(Relation rel, int16 *numatts) char **result = NULL; TupleDesc tupdesc; - /* initialize numatts to 0 in case no primary key exists */ - *numatts = 0; + /* initialize indnkeyatts to 0 in case no primary key exists */ + *indnkeyatts = 0; tupdesc = rel->rd_att; @@ -2062,12 +2064,12 @@ get_pkey_attnames(Relation rel, int16 *numatts) /* we're only interested if it is the primary key */ if (index->indisprimary) { - *numatts = index->indnatts; - if (*numatts > 0) + *indnkeyatts = index->indnkeyatts; + if (*indnkeyatts > 0) { - result = (char **) palloc(*numatts * sizeof(char *)); + result = (char **) palloc(*indnkeyatts * sizeof(char *)); - for (i = 0; i < *numatts; i++) + for (i = 0; i < *indnkeyatts; i++) result[i] = SPI_fname(tupdesc, index->indkey.values[i]); } break; @@ -2172,14 +2174,16 @@ get_sql_insert(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals needComma = false; for (i = 0; i < natts; i++) { - if (tupdesc->attrs[i]->attisdropped) + Form_pg_attribute att = TupleDescAttr(tupdesc, i); + + if (att->attisdropped) continue; if (needComma) appendStringInfoChar(&buf, ','); appendStringInfoString(&buf, - quote_ident_cstr(NameStr(tupdesc->attrs[i]->attname))); + quote_ident_cstr(NameStr(att->attname))); needComma = true; } @@ -2191,7 +2195,7 @@ get_sql_insert(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals needComma = false; for (i = 0; i < natts; i++) { - if (tupdesc->attrs[i]->attisdropped) + if (TupleDescAttr(tupdesc, i)->attisdropped) continue; if (needComma) @@ -2215,7 +2219,7 @@ get_sql_insert(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals } appendStringInfoChar(&buf, ')'); - return (buf.data); + return buf.data; } static char * @@ -2237,12 +2241,13 @@ get_sql_delete(Relation rel, int *pkattnums, int pknumatts, char **tgt_pkattvals for (i = 0; i < pknumatts; i++) { int pkattnum = pkattnums[i]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, pkattnum); if (i > 0) appendStringInfoString(&buf, " AND "); appendStringInfoString(&buf, - quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname))); + quote_ident_cstr(NameStr(attr->attname))); if (tgt_pkattvals[i] != NULL) appendStringInfo(&buf, " = %s", @@ -2251,7 +2256,7 @@ get_sql_delete(Relation rel, int *pkattnums, int pknumatts, char **tgt_pkattvals appendStringInfoString(&buf, " IS NULL"); } - return (buf.data); + return buf.data; } static char * @@ -2289,14 +2294,16 @@ get_sql_update(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals needComma = false; for (i = 0; i < natts; i++) { - if (tupdesc->attrs[i]->attisdropped) + Form_pg_attribute attr = TupleDescAttr(tupdesc, i); + + if (attr->attisdropped) continue; if (needComma) appendStringInfoString(&buf, ", "); appendStringInfo(&buf, "%s = ", - quote_ident_cstr(NameStr(tupdesc->attrs[i]->attname))); + quote_ident_cstr(NameStr(attr->attname))); key = get_attnum_pk_pos(pkattnums, pknumatts, i); @@ -2320,12 +2327,13 @@ get_sql_update(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals for (i = 0; i < pknumatts; i++) { int pkattnum = pkattnums[i]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, pkattnum); if (i > 0) appendStringInfoString(&buf, " AND "); appendStringInfoString(&buf, - quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname))); + quote_ident_cstr(NameStr(attr->attname))); val = tgt_pkattvals[i]; @@ -2335,7 +2343,7 @@ get_sql_update(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals appendStringInfoString(&buf, " IS NULL"); } - return (buf.data); + return buf.data; } /* @@ -2409,14 +2417,16 @@ get_tuple_of_interest(Relation rel, int *pkattnums, int pknumatts, char **src_pk for (i = 0; i < natts; i++) { + Form_pg_attribute attr = TupleDescAttr(tupdesc, i); + if (i > 0) appendStringInfoString(&buf, ", "); - if (tupdesc->attrs[i]->attisdropped) + if (attr->attisdropped) appendStringInfoString(&buf, "NULL"); else appendStringInfoString(&buf, - quote_ident_cstr(NameStr(tupdesc->attrs[i]->attname))); + quote_ident_cstr(NameStr(attr->attname))); } appendStringInfo(&buf, " FROM %s WHERE ", relname); @@ -2424,12 +2434,13 @@ get_tuple_of_interest(Relation rel, int *pkattnums, int pknumatts, char **src_pk for (i = 0; i < pknumatts; i++) { int pkattnum = pkattnums[i]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, pkattnum); if (i > 0) appendStringInfoString(&buf, " AND "); appendStringInfoString(&buf, - quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname))); + quote_ident_cstr(NameStr(attr->attname))); if (src_pkattvals[i] != NULL) appendStringInfo(&buf, " = %s", @@ -2495,7 +2506,7 @@ get_rel_from_relname(text *relname_text, LOCKMODE lockmode, AclMode aclmode) aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(), aclmode); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_CLASS, + aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind), RelationGetRelationName(rel)); return rel; @@ -2540,9 +2551,9 @@ getConnectionByName(const char *name) key, HASH_FIND, NULL); if (hentry) - return (hentry->rconn); + return hentry->rconn; - return (NULL); + return NULL; } static HTAB * @@ -2667,9 +2678,17 @@ dblink_connstr_check(const char *connstr) } } +/* + * Report an error received from the remote server + * + * res: the received error result (will be freed) + * fail: true for ERROR ereport, false for NOTICE + * fmt and following args: sprintf-style format and values for errcontext; + * the resulting string should be worded like "while " + */ static void dblink_res_error(PGconn *conn, const char *conname, PGresult *res, - const char *dblink_context_msg, bool fail) + bool fail, const char *fmt,...) { int level; char *pg_diag_sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE); @@ -2682,7 +2701,8 @@ dblink_res_error(PGconn *conn, const char *conname, PGresult *res, char *message_detail; char *message_hint; char *message_context; - const char *dblink_context_conname = "unnamed"; + va_list ap; + char dblink_context_msg[512]; if (fail) level = ERROR; @@ -2711,11 +2731,25 @@ dblink_res_error(PGconn *conn, const char *conname, PGresult *res, if (message_primary == NULL) message_primary = pchomp(PQerrorMessage(conn)); + /* + * Now that we've copied all the data we need out of the PGresult, it's + * safe to free it. We must do this to avoid PGresult leakage. We're + * leaking all the strings too, but those are in palloc'd memory that will + * get cleaned up eventually. + */ if (res) PQclear(res); - if (conname) - dblink_context_conname = conname; + /* + * Format the basic errcontext string. Below, we'll add on something + * about the connection name. That's a violation of the translatability + * guidelines about constructing error messages out of parts, but since + * there's no translation support for dblink, there's no need to worry + * about that (yet). + */ + va_start(ap, fmt); + vsnprintf(dblink_context_msg, sizeof(dblink_context_msg), fmt, ap); + va_end(ap); ereport(level, (errcode(sqlstate), @@ -2723,9 +2757,12 @@ dblink_res_error(PGconn *conn, const char *conname, PGresult *res, errmsg("could not obtain message string for remote error"), message_detail ? errdetail_internal("%s", message_detail) : 0, message_hint ? errhint("%s", message_hint) : 0, - message_context ? errcontext("%s", message_context) : 0, - errcontext("Error occurred on dblink connection named \"%s\": %s.", - dblink_context_conname, dblink_context_msg))); + message_context ? (errcontext("%s", message_context)) : 0, + conname ? + (errcontext("%s on dblink connection named \"%s\"", + dblink_context_msg, conname)) : + (errcontext("%s on unnamed dblink connection", + dblink_context_msg)))); } /* @@ -2760,7 +2797,7 @@ get_connect_string(const char *servername) ereport(ERROR, (errcode(ERRCODE_FDW_OUT_OF_MEMORY), errmsg("out of memory"), - errdetail("could not get libpq's default connection options"))); + errdetail("Could not get libpq's default connection options."))); } /* first gather the server connstr options */ @@ -2780,7 +2817,7 @@ get_connect_string(const char *servername) /* Check permissions, user must have usage on the server. */ aclresult = pg_foreign_server_aclcheck(serverid, userid, ACL_USAGE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_FOREIGN_SERVER, foreign_server->servername); + aclcheck_error(aclresult, OBJECT_FOREIGN_SERVER, foreign_server->servername); foreach(cell, fdw->options) { @@ -2894,7 +2931,7 @@ validate_pkattnums(Relation rel, for (j = 0; j < natts; j++) { /* dropped columns don't count */ - if (tupdesc->attrs[j]->attisdropped) + if (TupleDescAttr(tupdesc, j)->attisdropped) continue; if (++lnum == pkattnum) diff --git a/contrib/dblink/expected/dblink.out b/contrib/dblink/expected/dblink.out index 511691e57f..6ceabb453c 100644 --- a/contrib/dblink/expected/dblink.out +++ b/contrib/dblink/expected/dblink.out @@ -54,6 +54,61 @@ SELECT dblink_build_sql_delete('foo','1 2',2,'{"0", "a"}'); -- too many pk fields, should fail SELECT dblink_build_sql_delete('foo','1 2 3 4',4,'{"0", "a", "{a0,b0,c0}"}'); ERROR: invalid attribute number 4 +-- repeat the test for table with primary key index with included columns +CREATE TABLE foo_1(f1 int, f2 text, f3 text[], primary key (f1,f2) include (f3)); +INSERT INTO foo_1 VALUES (0,'a','{"a0","b0","c0"}'); +INSERT INTO foo_1 VALUES (1,'b','{"a1","b1","c1"}'); +INSERT INTO foo_1 VALUES (2,'c','{"a2","b2","c2"}'); +INSERT INTO foo_1 VALUES (3,'d','{"a3","b3","c3"}'); +INSERT INTO foo_1 VALUES (4,'e','{"a4","b4","c4"}'); +INSERT INTO foo_1 VALUES (5,'f','{"a5","b5","c5"}'); +INSERT INTO foo_1 VALUES (6,'g','{"a6","b6","c6"}'); +INSERT INTO foo_1 VALUES (7,'h','{"a7","b7","c7"}'); +INSERT INTO foo_1 VALUES (8,'i','{"a8","b8","c8"}'); +INSERT INTO foo_1 VALUES (9,'j','{"a9","b9","c9"}'); +-- misc utilities +-- list the primary key fields +SELECT * +FROM dblink_get_pkey('foo_1'); + position | colname +----------+--------- + 1 | f1 + 2 | f2 +(2 rows) + +-- build an insert statement based on a local tuple, +-- replacing the primary key values with new ones +SELECT dblink_build_sql_insert('foo_1','1 2',2,'{"0", "a"}','{"99", "xyz"}'); + dblink_build_sql_insert +------------------------------------------------------------- + INSERT INTO foo_1(f1,f2,f3) VALUES('99','xyz','{a0,b0,c0}') +(1 row) + +-- too many pk fields, should fail +SELECT dblink_build_sql_insert('foo_1','1 2 3 4',4,'{"0", "a", "{a0,b0,c0}"}','{"99", "xyz", "{za0,zb0,zc0}"}'); +ERROR: invalid attribute number 4 +-- build an update statement based on a local tuple, +-- replacing the primary key values with new ones +SELECT dblink_build_sql_update('foo_1','1 2',2,'{"0", "a"}','{"99", "xyz"}'); + dblink_build_sql_update +------------------------------------------------------------------------------------------ + UPDATE foo_1 SET f1 = '99', f2 = 'xyz', f3 = '{a0,b0,c0}' WHERE f1 = '99' AND f2 = 'xyz' +(1 row) + +-- too many pk fields, should fail +SELECT dblink_build_sql_update('foo_1','1 2 3 4',4,'{"0", "a", "{a0,b0,c0}"}','{"99", "xyz", "{za0,zb0,zc0}"}'); +ERROR: invalid attribute number 4 +-- build a delete statement based on a local tuple, +SELECT dblink_build_sql_delete('foo_1','1 2',2,'{"0", "a"}'); + dblink_build_sql_delete +----------------------------------------------- + DELETE FROM foo_1 WHERE f1 = '0' AND f2 = 'a' +(1 row) + +-- too many pk fields, should fail +SELECT dblink_build_sql_delete('foo_1','1 2 3 4',4,'{"0", "a", "{a0,b0,c0}"}'); +ERROR: invalid attribute number 4 +DROP TABLE foo_1; -- retest using a quoted and schema qualified table CREATE SCHEMA "MySchema"; CREATE TABLE "MySchema"."Foo"(f1 int, f2 text, f3 text[], primary key (f1,f2)); @@ -155,7 +210,7 @@ WHERE t.a > 7; -- open a cursor with bad SQL and fail_on_error set to false SELECT dblink_open('rmt_foo_cursor','SELECT * FROM foobar',false); NOTICE: relation "foobar" does not exist -CONTEXT: Error occurred on dblink connection named "unnamed": could not open cursor. +CONTEXT: while opening cursor "rmt_foo_cursor" on unnamed dblink connection dblink_open ------------- ERROR @@ -223,7 +278,7 @@ FROM dblink_fetch('rmt_foo_cursor',4) AS t(a int, b text, c text[]); SELECT * FROM dblink_fetch('rmt_foobar_cursor',4,false) AS t(a int, b text, c text[]); NOTICE: cursor "rmt_foobar_cursor" does not exist -CONTEXT: Error occurred on dblink connection named "unnamed": could not fetch from cursor. +CONTEXT: while fetching from cursor "rmt_foobar_cursor" on unnamed dblink connection a | b | c ---+---+--- (0 rows) @@ -238,7 +293,7 @@ SELECT dblink_exec('ABORT'); -- close the wrong cursor SELECT dblink_close('rmt_foobar_cursor',false); NOTICE: cursor "rmt_foobar_cursor" does not exist -CONTEXT: Error occurred on dblink connection named "unnamed": could not close cursor. +CONTEXT: while closing cursor "rmt_foobar_cursor" on unnamed dblink connection dblink_close -------------- ERROR @@ -248,12 +303,12 @@ CONTEXT: Error occurred on dblink connection named "unnamed": could not close c SELECT * FROM dblink_fetch('rmt_foo_cursor',4) AS t(a int, b text, c text[]); ERROR: cursor "rmt_foo_cursor" does not exist -CONTEXT: Error occurred on dblink connection named "unnamed": could not fetch from cursor. +CONTEXT: while fetching from cursor "rmt_foo_cursor" on unnamed dblink connection -- this time, 'cursor "rmt_foo_cursor" not found' as a notice SELECT * FROM dblink_fetch('rmt_foo_cursor',4,false) AS t(a int, b text, c text[]); NOTICE: cursor "rmt_foo_cursor" does not exist -CONTEXT: Error occurred on dblink connection named "unnamed": could not fetch from cursor. +CONTEXT: while fetching from cursor "rmt_foo_cursor" on unnamed dblink connection a | b | c ---+---+--- (0 rows) @@ -316,7 +371,7 @@ FROM dblink('SELECT * FROM foo') AS t(a int, b text, c text[]); SELECT * FROM dblink('SELECT * FROM foobar',false) AS t(a int, b text, c text[]); NOTICE: relation "foobar" does not exist -CONTEXT: Error occurred on dblink connection named "unnamed": could not execute query. +CONTEXT: while executing query on unnamed dblink connection a | b | c ---+---+--- (0 rows) @@ -340,7 +395,7 @@ WHERE a = 11; -- botch a change to some other data SELECT dblink_exec('UPDATE foobar SET f3[2] = ''b99'' WHERE f1 = 11',false); NOTICE: relation "foobar" does not exist -CONTEXT: Error occurred on dblink connection named "unnamed": could not execute command. +CONTEXT: while executing command on unnamed dblink connection dblink_exec ------------- ERROR @@ -400,7 +455,7 @@ SELECT * FROM dblink('myconn','SELECT * FROM foobar',false) AS t(a int, b text, c text[]) WHERE t.a > 7; NOTICE: relation "foobar" does not exist -CONTEXT: Error occurred on dblink connection named "myconn": could not execute query. +CONTEXT: while executing query on dblink connection named "myconn" a | b | c ---+---+--- (0 rows) @@ -437,7 +492,7 @@ SELECT dblink_disconnect('myconn2'); -- open a cursor incorrectly SELECT dblink_open('myconn','rmt_foo_cursor','SELECT * FROM foobar',false); NOTICE: relation "foobar" does not exist -CONTEXT: Error occurred on dblink connection named "myconn": could not open cursor. +CONTEXT: while opening cursor "rmt_foo_cursor" on dblink connection named "myconn" dblink_open ------------- ERROR @@ -523,7 +578,7 @@ SELECT dblink_close('myconn','rmt_foo_cursor'); -- this should fail because there is no open transaction SELECT dblink_exec('myconn','DECLARE xact_test CURSOR FOR SELECT * FROM foo'); ERROR: DECLARE CURSOR can only be used in transaction blocks -CONTEXT: Error occurred on dblink connection named "myconn": could not execute command. +CONTEXT: while executing command on dblink connection named "myconn" -- reset remote transaction state SELECT dblink_exec('myconn','ABORT'); dblink_exec @@ -573,7 +628,7 @@ FROM dblink_fetch('myconn','rmt_foo_cursor',4) AS t(a int, b text, c text[]); SELECT * FROM dblink_fetch('myconn','rmt_foobar_cursor',4,false) AS t(a int, b text, c text[]); NOTICE: cursor "rmt_foobar_cursor" does not exist -CONTEXT: Error occurred on dblink connection named "myconn": could not fetch from cursor. +CONTEXT: while fetching from cursor "rmt_foobar_cursor" on dblink connection named "myconn" a | b | c ---+---+--- (0 rows) @@ -589,7 +644,7 @@ SELECT dblink_exec('myconn','ABORT'); SELECT * FROM dblink_fetch('myconn','rmt_foo_cursor',4) AS t(a int, b text, c text[]); ERROR: cursor "rmt_foo_cursor" does not exist -CONTEXT: Error occurred on dblink connection named "myconn": could not fetch from cursor. +CONTEXT: while fetching from cursor "rmt_foo_cursor" on dblink connection named "myconn" -- close the named persistent connection SELECT dblink_disconnect('myconn'); dblink_disconnect @@ -1099,7 +1154,7 @@ FROM dblink_fetch('myconn','error_cursor', 1) AS t(i int); SELECT * FROM dblink_fetch('myconn','error_cursor', 1) AS t(i int); -ERROR: invalid input syntax for integer: "not an int" +ERROR: invalid input syntax for type integer: "not an int" -- Make sure that the local settings have retained their values in spite -- of shenanigans on the connection. SHOW datestyle; diff --git a/contrib/dblink/sql/dblink.sql b/contrib/dblink/sql/dblink.sql index b093fa6722..3e96b98571 100644 --- a/contrib/dblink/sql/dblink.sql +++ b/contrib/dblink/sql/dblink.sql @@ -38,6 +38,44 @@ SELECT dblink_build_sql_delete('foo','1 2',2,'{"0", "a"}'); -- too many pk fields, should fail SELECT dblink_build_sql_delete('foo','1 2 3 4',4,'{"0", "a", "{a0,b0,c0}"}'); +-- repeat the test for table with primary key index with included columns +CREATE TABLE foo_1(f1 int, f2 text, f3 text[], primary key (f1,f2) include (f3)); +INSERT INTO foo_1 VALUES (0,'a','{"a0","b0","c0"}'); +INSERT INTO foo_1 VALUES (1,'b','{"a1","b1","c1"}'); +INSERT INTO foo_1 VALUES (2,'c','{"a2","b2","c2"}'); +INSERT INTO foo_1 VALUES (3,'d','{"a3","b3","c3"}'); +INSERT INTO foo_1 VALUES (4,'e','{"a4","b4","c4"}'); +INSERT INTO foo_1 VALUES (5,'f','{"a5","b5","c5"}'); +INSERT INTO foo_1 VALUES (6,'g','{"a6","b6","c6"}'); +INSERT INTO foo_1 VALUES (7,'h','{"a7","b7","c7"}'); +INSERT INTO foo_1 VALUES (8,'i','{"a8","b8","c8"}'); +INSERT INTO foo_1 VALUES (9,'j','{"a9","b9","c9"}'); + +-- misc utilities + +-- list the primary key fields +SELECT * +FROM dblink_get_pkey('foo_1'); + +-- build an insert statement based on a local tuple, +-- replacing the primary key values with new ones +SELECT dblink_build_sql_insert('foo_1','1 2',2,'{"0", "a"}','{"99", "xyz"}'); +-- too many pk fields, should fail +SELECT dblink_build_sql_insert('foo_1','1 2 3 4',4,'{"0", "a", "{a0,b0,c0}"}','{"99", "xyz", "{za0,zb0,zc0}"}'); + +-- build an update statement based on a local tuple, +-- replacing the primary key values with new ones +SELECT dblink_build_sql_update('foo_1','1 2',2,'{"0", "a"}','{"99", "xyz"}'); +-- too many pk fields, should fail +SELECT dblink_build_sql_update('foo_1','1 2 3 4',4,'{"0", "a", "{a0,b0,c0}"}','{"99", "xyz", "{za0,zb0,zc0}"}'); + +-- build a delete statement based on a local tuple, +SELECT dblink_build_sql_delete('foo_1','1 2',2,'{"0", "a"}'); +-- too many pk fields, should fail +SELECT dblink_build_sql_delete('foo_1','1 2 3 4',4,'{"0", "a", "{a0,b0,c0}"}'); + +DROP TABLE foo_1; + -- retest using a quoted and schema qualified table CREATE SCHEMA "MySchema"; CREATE TABLE "MySchema"."Foo"(f1 int, f2 text, f3 text[], primary key (f1,f2)); diff --git a/contrib/dict_int/dict_int.c b/contrib/dict_int/dict_int.c index 55427c4bc7..56ede37089 100644 --- a/contrib/dict_int/dict_int.c +++ b/contrib/dict_int/dict_int.c @@ -3,7 +3,7 @@ * dict_int.c * Text search dictionary for integers * - * Copyright (c) 2007-2017, PostgreSQL Global Development Group + * Copyright (c) 2007-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/dict_int/dict_int.c @@ -42,11 +42,11 @@ dintdict_init(PG_FUNCTION_ARGS) { DefElem *defel = (DefElem *) lfirst(l); - if (pg_strcasecmp(defel->defname, "MAXLEN") == 0) + if (strcmp(defel->defname, "maxlen") == 0) { d->maxlen = atoi(defGetString(defel)); } - else if (pg_strcasecmp(defel->defname, "REJECTLONG") == 0) + else if (strcmp(defel->defname, "rejectlong") == 0) { d->rejectlong = defGetBoolean(defel); } diff --git a/contrib/dict_xsyn/dict_xsyn.c b/contrib/dict_xsyn/dict_xsyn.c index fcf541ee0f..a79ece240c 100644 --- a/contrib/dict_xsyn/dict_xsyn.c +++ b/contrib/dict_xsyn/dict_xsyn.c @@ -3,7 +3,7 @@ * dict_xsyn.c * Extended synonym dictionary * - * Copyright (c) 2007-2017, PostgreSQL Global Development Group + * Copyright (c) 2007-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/dict_xsyn/dict_xsyn.c @@ -70,7 +70,7 @@ compare_syn(const void *a, const void *b) } static void -read_dictionary(DictSyn *d, char *filename) +read_dictionary(DictSyn *d, const char *filename) { char *real_filename = get_tsearch_config_filename(filename, "rules"); tsearch_readline_state trst; @@ -157,23 +157,23 @@ dxsyn_init(PG_FUNCTION_ARGS) { DefElem *defel = (DefElem *) lfirst(l); - if (pg_strcasecmp(defel->defname, "MATCHORIG") == 0) + if (strcmp(defel->defname, "matchorig") == 0) { d->matchorig = defGetBoolean(defel); } - else if (pg_strcasecmp(defel->defname, "KEEPORIG") == 0) + else if (strcmp(defel->defname, "keeporig") == 0) { d->keeporig = defGetBoolean(defel); } - else if (pg_strcasecmp(defel->defname, "MATCHSYNONYMS") == 0) + else if (strcmp(defel->defname, "matchsynonyms") == 0) { d->matchsynonyms = defGetBoolean(defel); } - else if (pg_strcasecmp(defel->defname, "KEEPSYNONYMS") == 0) + else if (strcmp(defel->defname, "keepsynonyms") == 0) { d->keepsynonyms = defGetBoolean(defel); } - else if (pg_strcasecmp(defel->defname, "RULES") == 0) + else if (strcmp(defel->defname, "rules") == 0) { /* we can't read the rules before parsing all options! */ filename = defGetString(defel); diff --git a/contrib/earthdistance/expected/earthdistance.out b/contrib/earthdistance/expected/earthdistance.out index 89022491cb..26a843c3fa 100644 --- a/contrib/earthdistance/expected/earthdistance.out +++ b/contrib/earthdistance/expected/earthdistance.out @@ -882,11 +882,12 @@ SELECT earth_box(ll_to_earth(90,180), -- -- Test the recommended constraints. -- -SELECT is_point(ll_to_earth(0,0)); -ERROR: function is_point(earth) does not exist -LINE 1: SELECT is_point(ll_to_earth(0,0)); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. +SELECT cube_is_point(ll_to_earth(0,0)); + cube_is_point +--------------- + t +(1 row) + SELECT cube_dim(ll_to_earth(0,0)) <= 3; ?column? ---------- @@ -900,11 +901,12 @@ SELECT abs(cube_distance(ll_to_earth(0,0), '(0)'::cube) / earth() - 1) < t (1 row) -SELECT is_point(ll_to_earth(30,60)); -ERROR: function is_point(earth) does not exist -LINE 1: SELECT is_point(ll_to_earth(30,60)); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. +SELECT cube_is_point(ll_to_earth(30,60)); + cube_is_point +--------------- + t +(1 row) + SELECT cube_dim(ll_to_earth(30,60)) <= 3; ?column? ---------- @@ -918,11 +920,12 @@ SELECT abs(cube_distance(ll_to_earth(30,60), '(0)'::cube) / earth() - 1) < t (1 row) -SELECT is_point(ll_to_earth(60,90)); -ERROR: function is_point(earth) does not exist -LINE 1: SELECT is_point(ll_to_earth(60,90)); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. +SELECT cube_is_point(ll_to_earth(60,90)); + cube_is_point +--------------- + t +(1 row) + SELECT cube_dim(ll_to_earth(60,90)) <= 3; ?column? ---------- @@ -936,11 +939,12 @@ SELECT abs(cube_distance(ll_to_earth(60,90), '(0)'::cube) / earth() - 1) < t (1 row) -SELECT is_point(ll_to_earth(-30,-90)); -ERROR: function is_point(earth) does not exist -LINE 1: SELECT is_point(ll_to_earth(-30,-90)); - ^ -HINT: No function matches the given name and argument types. You might need to add explicit type casts. +SELECT cube_is_point(ll_to_earth(-30,-90)); + cube_is_point +--------------- + t +(1 row) + SELECT cube_dim(ll_to_earth(-30,-90)) <= 3; ?column? ---------- @@ -985,7 +989,7 @@ HINT: You can drop extension cube instead. create table foo (f1 cube, f2 int); drop extension cube; -- fail, foo.f1 requires it ERROR: cannot drop extension cube because other objects depend on it -DETAIL: table foo column f1 depends on type cube +DETAIL: column f1 of table foo depends on type cube HINT: Use DROP ... CASCADE to drop the dependent objects too. drop table foo; drop extension cube; @@ -1039,15 +1043,15 @@ create extension cube with schema c; create table foo (f1 c.cube, f2 int); drop extension cube; -- fail, foo.f1 requires it ERROR: cannot drop extension cube because other objects depend on it -DETAIL: table foo column f1 depends on type c.cube +DETAIL: column f1 of table foo depends on type c.cube HINT: Use DROP ... CASCADE to drop the dependent objects too. drop schema c; -- fail, cube requires it ERROR: cannot drop schema c because other objects depend on it DETAIL: extension cube depends on schema c -table foo column f1 depends on type c.cube +column f1 of table foo depends on type c.cube HINT: Use DROP ... CASCADE to drop the dependent objects too. drop extension cube cascade; -NOTICE: drop cascades to table foo column f1 +NOTICE: drop cascades to column f1 of table foo \d foo Table "public.foo" Column | Type | Collation | Nullable | Default diff --git a/contrib/earthdistance/sql/earthdistance.sql b/contrib/earthdistance/sql/earthdistance.sql index 860450276f..4145561217 100644 --- a/contrib/earthdistance/sql/earthdistance.sql +++ b/contrib/earthdistance/sql/earthdistance.sql @@ -282,19 +282,19 @@ SELECT earth_box(ll_to_earth(90,180), -- Test the recommended constraints. -- -SELECT is_point(ll_to_earth(0,0)); +SELECT cube_is_point(ll_to_earth(0,0)); SELECT cube_dim(ll_to_earth(0,0)) <= 3; SELECT abs(cube_distance(ll_to_earth(0,0), '(0)'::cube) / earth() - 1) < '10e-12'::float8; -SELECT is_point(ll_to_earth(30,60)); +SELECT cube_is_point(ll_to_earth(30,60)); SELECT cube_dim(ll_to_earth(30,60)) <= 3; SELECT abs(cube_distance(ll_to_earth(30,60), '(0)'::cube) / earth() - 1) < '10e-12'::float8; -SELECT is_point(ll_to_earth(60,90)); +SELECT cube_is_point(ll_to_earth(60,90)); SELECT cube_dim(ll_to_earth(60,90)) <= 3; SELECT abs(cube_distance(ll_to_earth(60,90), '(0)'::cube) / earth() - 1) < '10e-12'::float8; -SELECT is_point(ll_to_earth(-30,-90)); +SELECT cube_is_point(ll_to_earth(-30,-90)); SELECT cube_dim(ll_to_earth(-30,-90)) <= 3; SELECT abs(cube_distance(ll_to_earth(-30,-90), '(0)'::cube) / earth() - 1) < '10e-12'::float8; diff --git a/contrib/file_fdw/data/list1.csv b/contrib/file_fdw/data/list1.csv new file mode 100644 index 0000000000..203f3b2324 --- /dev/null +++ b/contrib/file_fdw/data/list1.csv @@ -0,0 +1,2 @@ +1,foo +1,bar diff --git a/contrib/file_fdw/data/list2.bad b/contrib/file_fdw/data/list2.bad new file mode 100644 index 0000000000..00af47f5ef --- /dev/null +++ b/contrib/file_fdw/data/list2.bad @@ -0,0 +1,2 @@ +2,baz +1,qux diff --git a/contrib/file_fdw/data/list2.csv b/contrib/file_fdw/data/list2.csv new file mode 100644 index 0000000000..2fb133d004 --- /dev/null +++ b/contrib/file_fdw/data/list2.csv @@ -0,0 +1,2 @@ +2,baz +2,qux diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c index 2396bd442f..2cf09aecf6 100644 --- a/contrib/file_fdw/file_fdw.c +++ b/contrib/file_fdw/file_fdw.c @@ -3,7 +3,7 @@ * file_fdw.c * foreign-data wrapper for server-side flat files (or programs). * - * Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Copyright (c) 2010-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/file_fdw/file_fdw.c @@ -18,6 +18,7 @@ #include "access/htup_details.h" #include "access/reloptions.h" #include "access/sysattr.h" +#include "catalog/pg_authid.h" #include "catalog/pg_foreign_table.h" #include "commands/copy.h" #include "commands/defrem.h" @@ -201,24 +202,6 @@ file_fdw_validator(PG_FUNCTION_ARGS) List *other_options = NIL; ListCell *cell; - /* - * Only superusers are allowed to set options of a file_fdw foreign table. - * This is because we don't want non-superusers to be able to control - * which file gets read or which program gets executed. - * - * Putting this sort of permissions check in a validator is a bit of a - * crock, but there doesn't seem to be any other place that can enforce - * the check more cleanly. - * - * Note that the valid_options[] array disallows setting filename and - * program at any options level other than foreign table --- otherwise - * there'd still be a security hole. - */ - if (catalog == ForeignTableRelationId && !superuser()) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("only superuser can change options of a file_fdw foreign table"))); - /* * Check that only options supported by file_fdw, and allowed for the * current object type, are given. @@ -264,6 +247,38 @@ file_fdw_validator(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("conflicting or redundant options"))); + + /* + * Check permissions for changing which file or program is used by + * the file_fdw. + * + * Only members of the role 'pg_read_server_files' are allowed to + * set the 'filename' option of a file_fdw foreign table, while + * only members of the role 'pg_execute_server_program' are + * allowed to set the 'program' option. This is because we don't + * want regular users to be able to control which file gets read + * or which program gets executed. + * + * Putting this sort of permissions check in a validator is a bit + * of a crock, but there doesn't seem to be any other place that + * can enforce the check more cleanly. + * + * Note that the valid_options[] array disallows setting filename + * and program at any options level other than foreign table --- + * otherwise there'd still be a security hole. + */ + if (strcmp(def->defname, "filename") == 0 && + !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_SERVER_FILES)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("only superuser or a member of the pg_read_server_files role may specify the filename option of a file_fdw foreign table"))); + + if (strcmp(def->defname, "program") == 0 && + !is_member_of_role(GetUserId(), DEFAULT_ROLE_EXECUTE_SERVER_PROGRAM)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("only superuser or a member of the pg_execute_server_program role may specify the program option of a file_fdw foreign table"))); + filename = defGetString(def); } @@ -277,7 +292,7 @@ file_fdw_validator(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("conflicting or redundant options"), - errhint("option \"force_not_null\" supplied more than once for a column"))); + errhint("Option \"force_not_null\" supplied more than once for a column."))); force_not_null = def; /* Don't care what the value is, as long as it's a legal boolean */ (void) defGetBoolean(def); @@ -289,7 +304,7 @@ file_fdw_validator(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("conflicting or redundant options"), - errhint("option \"force_null\" supplied more than once for a column"))); + errhint("Option \"force_null\" supplied more than once for a column."))); force_null = def; (void) defGetBoolean(def); } @@ -430,7 +445,7 @@ get_file_fdw_attribute_options(Oid relid) /* Retrieve FDW options for all user-defined attributes. */ for (attnum = 1; attnum <= natts; attnum++) { - Form_pg_attribute attr = tupleDesc->attrs[attnum - 1]; + Form_pg_attribute attr = TupleDescAttr(tupleDesc, attnum - 1); List *options; ListCell *lc; @@ -622,8 +637,8 @@ fileExplainForeignScan(ForeignScanState *node, ExplainState *es) if (!is_program && stat(filename, &stat_buf) == 0) - ExplainPropertyLong("Foreign File Size", (long) stat_buf.st_size, - es); + ExplainPropertyInteger("Foreign File Size", "b", + (int64) stat_buf.st_size, es); } } @@ -824,7 +839,7 @@ fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel, * * Check to see if it's useful to convert only a subset of the file's columns * to binary. If so, construct a list of the column names to be converted, - * return that at *columns, and return TRUE. (Note that it's possible to + * return that at *columns, and return true. (Note that it's possible to * determine that no columns need be converted, for instance with a COUNT(*) * query. So we can't use returning a NIL list to indicate failure.) */ @@ -898,7 +913,7 @@ check_selective_binary_conversion(RelOptInfo *baserel, /* Get user attributes. */ if (attnum > 0) { - Form_pg_attribute attr = tupleDesc->attrs[attnum - 1]; + Form_pg_attribute attr = TupleDescAttr(tupleDesc, attnum - 1); char *attname = NameStr(attr->attname); /* Skip dropped attributes (probably shouldn't see any here). */ @@ -912,7 +927,7 @@ check_selective_binary_conversion(RelOptInfo *baserel, numattrs = 0; for (i = 0; i < tupleDesc->natts; i++) { - Form_pg_attribute attr = tupleDesc->attrs[i]; + Form_pg_attribute attr = TupleDescAttr(tupleDesc, i); if (attr->attisdropped) continue; diff --git a/contrib/file_fdw/input/file_fdw.source b/contrib/file_fdw/input/file_fdw.source index 685561fc2a..a5e79a4549 100644 --- a/contrib/file_fdw/input/file_fdw.source +++ b/contrib/file_fdw/input/file_fdw.source @@ -136,6 +136,11 @@ DELETE FROM agg_csv WHERE a = 100; -- but this should be allowed SELECT * FROM agg_csv FOR UPDATE; +-- copy from isn't supported either +COPY agg_csv FROM STDIN; +12 3.4 +\. + -- constraint exclusion tests \t on EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0; @@ -162,6 +167,28 @@ SELECT tableoid::regclass, * FROM agg FOR UPDATE; ALTER FOREIGN TABLE agg_csv NO INHERIT agg; DROP TABLE agg; +-- declarative partitioning tests +SET ROLE regress_file_fdw_superuser; +CREATE TABLE pt (a int, b text) partition by list (a); +CREATE FOREIGN TABLE p1 partition of pt for values in (1) SERVER file_server +OPTIONS (format 'csv', filename '@abs_srcdir@/data/list1.csv', delimiter ','); +CREATE TABLE p2 partition of pt for values in (2); +SELECT tableoid::regclass, * FROM pt; +SELECT tableoid::regclass, * FROM p1; +SELECT tableoid::regclass, * FROM p2; +COPY pt FROM '@abs_srcdir@/data/list2.bad' with (format 'csv', delimiter ','); -- ERROR +COPY pt FROM '@abs_srcdir@/data/list2.csv' with (format 'csv', delimiter ','); +SELECT tableoid::regclass, * FROM pt; +SELECT tableoid::regclass, * FROM p1; +SELECT tableoid::regclass, * FROM p2; +INSERT INTO pt VALUES (1, 'xyzzy'); -- ERROR +INSERT INTO pt VALUES (2, 'xyzzy'); +UPDATE pt set a = 1 where a = 2; -- ERROR +SELECT tableoid::regclass, * FROM pt; +SELECT tableoid::regclass, * FROM p1; +SELECT tableoid::regclass, * FROM p2; +DROP TABLE pt; + -- privilege tests SET ROLE regress_file_fdw_superuser; SELECT * FROM agg_text ORDER BY a; diff --git a/contrib/file_fdw/output/file_fdw.source b/contrib/file_fdw/output/file_fdw.source index 01e2690a82..853c9f9b28 100644 --- a/contrib/file_fdw/output/file_fdw.source +++ b/contrib/file_fdw/output/file_fdw.source @@ -221,6 +221,9 @@ SELECT * FROM agg_csv FOR UPDATE; 42 | 324.78 (3 rows) +-- copy from isn't supported either +COPY agg_csv FROM STDIN; +ERROR: cannot insert into foreign table "agg_csv" -- constraint exclusion tests \t on EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0; @@ -289,6 +292,89 @@ SELECT tableoid::regclass, * FROM agg FOR UPDATE; ALTER FOREIGN TABLE agg_csv NO INHERIT agg; DROP TABLE agg; +-- declarative partitioning tests +SET ROLE regress_file_fdw_superuser; +CREATE TABLE pt (a int, b text) partition by list (a); +CREATE FOREIGN TABLE p1 partition of pt for values in (1) SERVER file_server +OPTIONS (format 'csv', filename '@abs_srcdir@/data/list1.csv', delimiter ','); +CREATE TABLE p2 partition of pt for values in (2); +SELECT tableoid::regclass, * FROM pt; + tableoid | a | b +----------+---+----- + p1 | 1 | foo + p1 | 1 | bar +(2 rows) + +SELECT tableoid::regclass, * FROM p1; + tableoid | a | b +----------+---+----- + p1 | 1 | foo + p1 | 1 | bar +(2 rows) + +SELECT tableoid::regclass, * FROM p2; + tableoid | a | b +----------+---+--- +(0 rows) + +COPY pt FROM '@abs_srcdir@/data/list2.bad' with (format 'csv', delimiter ','); -- ERROR +ERROR: cannot insert into foreign table "p1" +CONTEXT: COPY pt, line 2: "1,qux" +COPY pt FROM '@abs_srcdir@/data/list2.csv' with (format 'csv', delimiter ','); +SELECT tableoid::regclass, * FROM pt; + tableoid | a | b +----------+---+----- + p1 | 1 | foo + p1 | 1 | bar + p2 | 2 | baz + p2 | 2 | qux +(4 rows) + +SELECT tableoid::regclass, * FROM p1; + tableoid | a | b +----------+---+----- + p1 | 1 | foo + p1 | 1 | bar +(2 rows) + +SELECT tableoid::regclass, * FROM p2; + tableoid | a | b +----------+---+----- + p2 | 2 | baz + p2 | 2 | qux +(2 rows) + +INSERT INTO pt VALUES (1, 'xyzzy'); -- ERROR +ERROR: cannot insert into foreign table "p1" +INSERT INTO pt VALUES (2, 'xyzzy'); +UPDATE pt set a = 1 where a = 2; -- ERROR +ERROR: cannot insert into foreign table "p1" +SELECT tableoid::regclass, * FROM pt; + tableoid | a | b +----------+---+------- + p1 | 1 | foo + p1 | 1 | bar + p2 | 2 | baz + p2 | 2 | qux + p2 | 2 | xyzzy +(5 rows) + +SELECT tableoid::regclass, * FROM p1; + tableoid | a | b +----------+---+----- + p1 | 1 | foo + p1 | 1 | bar +(2 rows) + +SELECT tableoid::regclass, * FROM p2; + tableoid | a | b +----------+---+------- + p2 | 2 | baz + p2 | 2 | qux + p2 | 2 | xyzzy +(3 rows) + +DROP TABLE pt; -- privilege tests SET ROLE regress_file_fdw_superuser; SELECT * FROM agg_text ORDER BY a; @@ -312,7 +398,7 @@ SELECT * FROM agg_text ORDER BY a; SET ROLE regress_no_priv_user; SELECT * FROM agg_text ORDER BY a; -- ERROR -ERROR: permission denied for relation agg_text +ERROR: permission denied for foreign table agg_text SET ROLE regress_file_fdw_user; \t on EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_text WHERE a > 0; @@ -339,7 +425,7 @@ ALTER FOREIGN TABLE agg_text OWNER TO regress_file_fdw_user; ALTER FOREIGN TABLE agg_text OPTIONS (SET format 'text'); SET ROLE regress_file_fdw_user; ALTER FOREIGN TABLE agg_text OPTIONS (SET format 'text'); -ERROR: only superuser can change options of a file_fdw foreign table +ERROR: only superuser or a member of the pg_read_server_files role may specify the filename option of a file_fdw foreign table SET ROLE regress_file_fdw_superuser; -- cleanup RESET ROLE; diff --git a/contrib/fuzzystrmatch/.gitignore b/contrib/fuzzystrmatch/.gitignore new file mode 100644 index 0000000000..5dcb3ff972 --- /dev/null +++ b/contrib/fuzzystrmatch/.gitignore @@ -0,0 +1,4 @@ +# Generated subdirectories +/log/ +/results/ +/tmp_check/ diff --git a/contrib/fuzzystrmatch/Makefile b/contrib/fuzzystrmatch/Makefile index 51e215a919..bd6f5e50d1 100644 --- a/contrib/fuzzystrmatch/Makefile +++ b/contrib/fuzzystrmatch/Makefile @@ -8,6 +8,8 @@ DATA = fuzzystrmatch--1.1.sql fuzzystrmatch--1.0--1.1.sql \ fuzzystrmatch--unpackaged--1.0.sql PGFILEDESC = "fuzzystrmatch - similarities and distance between strings" +REGRESS = fuzzystrmatch + ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) diff --git a/contrib/fuzzystrmatch/dmetaphone.c b/contrib/fuzzystrmatch/dmetaphone.c index 918ee0d90e..bff20b37e6 100644 --- a/contrib/fuzzystrmatch/dmetaphone.c +++ b/contrib/fuzzystrmatch/dmetaphone.c @@ -52,7 +52,7 @@ /***************************** COPYRIGHT NOTICES *********************** Most of this code is directly from the Text::DoubleMetaphone perl module -version 0.05 available from http://www.cpan.org. +version 0.05 available from https://www.cpan.org/. It bears this copyright notice: @@ -232,7 +232,7 @@ metastring; */ static metastring * -NewMetaString(char *init_str) +NewMetaString(const char *init_str) { metastring *s; char empty_string[] = ""; @@ -375,7 +375,7 @@ StringAt(metastring *s, int start, int length,...) static void -MetaphAdd(metastring *s, char *new_str) +MetaphAdd(metastring *s, const char *new_str) { int add_length; diff --git a/contrib/fuzzystrmatch/expected/fuzzystrmatch.out b/contrib/fuzzystrmatch/expected/fuzzystrmatch.out new file mode 100644 index 0000000000..493c95cdfa --- /dev/null +++ b/contrib/fuzzystrmatch/expected/fuzzystrmatch.out @@ -0,0 +1,67 @@ +CREATE EXTENSION fuzzystrmatch; +SELECT soundex('hello world!'); + soundex +--------- + H464 +(1 row) + +SELECT soundex('Anne'), soundex('Ann'), difference('Anne', 'Ann'); + soundex | soundex | difference +---------+---------+------------ + A500 | A500 | 4 +(1 row) + +SELECT soundex('Anne'), soundex('Andrew'), difference('Anne', 'Andrew'); + soundex | soundex | difference +---------+---------+------------ + A500 | A536 | 2 +(1 row) + +SELECT soundex('Anne'), soundex('Margaret'), difference('Anne', 'Margaret'); + soundex | soundex | difference +---------+---------+------------ + A500 | M626 | 0 +(1 row) + +SELECT levenshtein('GUMBO', 'GAMBOL'); + levenshtein +------------- + 2 +(1 row) + +SELECT levenshtein('GUMBO', 'GAMBOL', 2, 1, 1); + levenshtein +------------- + 3 +(1 row) + +SELECT levenshtein_less_equal('extensive', 'exhaustive', 2); + levenshtein_less_equal +------------------------ + 3 +(1 row) + +SELECT levenshtein_less_equal('extensive', 'exhaustive', 4); + levenshtein_less_equal +------------------------ + 4 +(1 row) + +SELECT metaphone('GUMBO', 4); + metaphone +----------- + KM +(1 row) + +SELECT dmetaphone('gumbo'); + dmetaphone +------------ + KMP +(1 row) + +SELECT dmetaphone_alt('gumbo'); + dmetaphone_alt +---------------- + KMP +(1 row) + diff --git a/contrib/fuzzystrmatch/fuzzystrmatch.c b/contrib/fuzzystrmatch/fuzzystrmatch.c index ce58a6a7fc..05774658dc 100644 --- a/contrib/fuzzystrmatch/fuzzystrmatch.c +++ b/contrib/fuzzystrmatch/fuzzystrmatch.c @@ -6,7 +6,7 @@ * Joe Conway * * contrib/fuzzystrmatch/fuzzystrmatch.c - * Copyright (c) 2001-2017, PostgreSQL Global Development Group + * Copyright (c) 2001-2018, PostgreSQL Global Development Group * ALL RIGHTS RESERVED; * * metaphone() @@ -87,25 +87,13 @@ soundex_code(char letter) phoned_word -- The final phonized word. (We'll allocate the memory.) Output - error -- A simple error flag, returns TRUE or FALSE + error -- A simple error flag, returns true or false NOTES: ALL non-alpha characters are ignored, this includes whitespace, although non-alpha characters will break up phonemes. ****************************************************************************/ -/************************************************************************** - my constants -- constants I like - - Probably redundant. - -***************************************************************************/ - -#define META_ERROR FALSE -#define META_SUCCESS TRUE -#define META_FAILURE FALSE - - /* I add modifications to the traditional metaphone algorithm that you might find in books. Define this if you want metaphone to behave traditionally */ @@ -116,7 +104,7 @@ soundex_code(char letter) #define TH '0' static char Lookahead(char *word, int how_far); -static int _metaphone(char *word, int max_phonemes, char **phoned_word); +static void _metaphone(char *word, int max_phonemes, char **phoned_word); /* Metachar.h ... little bits about characters for metaphone */ @@ -272,7 +260,6 @@ metaphone(PG_FUNCTION_ARGS) size_t str_i_len = strlen(str_i); int reqlen; char *metaph; - int retval; /* return an empty string if we receive one */ if (!(str_i_len > 0)) @@ -296,17 +283,8 @@ metaphone(PG_FUNCTION_ARGS) (errcode(ERRCODE_ZERO_LENGTH_CHARACTER_STRING), errmsg("output cannot be empty string"))); - - retval = _metaphone(str_i, reqlen, &metaph); - if (retval == META_SUCCESS) - PG_RETURN_TEXT_P(cstring_to_text(metaph)); - else - { - /* internal error */ - elog(ERROR, "metaphone: failure"); - /* keep the compiler quiet */ - PG_RETURN_NULL(); - } + _metaphone(str_i, reqlen, &metaph); + PG_RETURN_TEXT_P(cstring_to_text(metaph)); } @@ -362,7 +340,7 @@ Lookahead(char *word, int how_far) #define Isbreak(c) (!isalpha((unsigned char) (c))) -static int +static void _metaphone(char *word, /* IN */ int max_phonemes, char **phoned_word) /* OUT */ @@ -404,7 +382,7 @@ _metaphone(char *word, /* IN */ if (Curr_Letter == '\0') { End_Phoned_Word; - return META_SUCCESS; /* For testing */ + return; } } @@ -721,7 +699,7 @@ _metaphone(char *word, /* IN */ End_Phoned_Word; - return (META_SUCCESS); + return; } /* END metaphone */ diff --git a/contrib/fuzzystrmatch/sql/fuzzystrmatch.sql b/contrib/fuzzystrmatch/sql/fuzzystrmatch.sql new file mode 100644 index 0000000000..f05dc28ffb --- /dev/null +++ b/contrib/fuzzystrmatch/sql/fuzzystrmatch.sql @@ -0,0 +1,21 @@ +CREATE EXTENSION fuzzystrmatch; + + +SELECT soundex('hello world!'); + +SELECT soundex('Anne'), soundex('Ann'), difference('Anne', 'Ann'); +SELECT soundex('Anne'), soundex('Andrew'), difference('Anne', 'Andrew'); +SELECT soundex('Anne'), soundex('Margaret'), difference('Anne', 'Margaret'); + + +SELECT levenshtein('GUMBO', 'GAMBOL'); +SELECT levenshtein('GUMBO', 'GAMBOL', 2, 1, 1); +SELECT levenshtein_less_equal('extensive', 'exhaustive', 2); +SELECT levenshtein_less_equal('extensive', 'exhaustive', 4); + + +SELECT metaphone('GUMBO', 4); + + +SELECT dmetaphone('gumbo'); +SELECT dmetaphone_alt('gumbo'); diff --git a/contrib/hstore/Makefile b/contrib/hstore/Makefile index 311cc099e5..46d26f8052 100644 --- a/contrib/hstore/Makefile +++ b/contrib/hstore/Makefile @@ -5,11 +5,14 @@ OBJS = hstore_io.o hstore_op.o hstore_gist.o hstore_gin.o hstore_compat.o \ $(WIN32RES) EXTENSION = hstore -DATA = hstore--1.4.sql hstore--1.3--1.4.sql hstore--1.2--1.3.sql \ +DATA = hstore--1.4.sql hstore--1.4--1.5.sql \ + hstore--1.3--1.4.sql hstore--1.2--1.3.sql \ hstore--1.1--1.2.sql hstore--1.0--1.1.sql \ hstore--unpackaged--1.0.sql PGFILEDESC = "hstore - key/value pair data type" +HEADERS = hstore.h + REGRESS = hstore ifdef USE_PGXS diff --git a/contrib/hstore/hstore--1.4--1.5.sql b/contrib/hstore/hstore--1.4--1.5.sql new file mode 100644 index 0000000000..92c1832dce --- /dev/null +++ b/contrib/hstore/hstore--1.4--1.5.sql @@ -0,0 +1,14 @@ +/* contrib/hstore/hstore--1.4--1.5.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION hstore UPDATE TO '1.5'" to load this file. \quit + +ALTER OPERATOR #<=# (hstore, hstore) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel +); + +ALTER OPERATOR #>=# (hstore, hstore) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel +); diff --git a/contrib/hstore/hstore.control b/contrib/hstore/hstore.control index f99a937acc..8a719475b8 100644 --- a/contrib/hstore/hstore.control +++ b/contrib/hstore/hstore.control @@ -1,5 +1,5 @@ # hstore extension comment = 'data type for storing sets of (key, value) pairs' -default_version = '1.4' +default_version = '1.5' module_pathname = '$libdir/hstore' relocatable = true diff --git a/contrib/hstore/hstore.h b/contrib/hstore/hstore.h index c4862a82e1..bf4a565ed9 100644 --- a/contrib/hstore/hstore.h +++ b/contrib/hstore/hstore.h @@ -151,7 +151,7 @@ extern HStore *hstoreUpgrade(Datum orig); #define DatumGetHStoreP(d) hstoreUpgrade(d) -#define PG_GETARG_HS(x) DatumGetHStoreP(PG_GETARG_DATUM(x)) +#define PG_GETARG_HSTORE_P(x) DatumGetHStoreP(PG_GETARG_DATUM(x)) /* diff --git a/contrib/hstore/hstore_gin.c b/contrib/hstore/hstore_gin.c index d98fb38458..4c3a422643 100644 --- a/contrib/hstore/hstore_gin.c +++ b/contrib/hstore/hstore_gin.c @@ -43,7 +43,7 @@ makeitem(char *str, int len, char flag) Datum gin_extract_hstore(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); int32 *nentries = (int32 *) PG_GETARG_POINTER(1); Datum *entries = NULL; HEntry *hsent = ARRPTR(hs); @@ -155,7 +155,7 @@ gin_consistent_hstore(PG_FUNCTION_ARGS) bool *check = (bool *) PG_GETARG_POINTER(0); StrategyNumber strategy = PG_GETARG_UINT16(1); - /* HStore *query = PG_GETARG_HS(2); */ + /* HStore *query = PG_GETARG_HSTORE_P(2); */ int32 nkeys = PG_GETARG_INT32(3); /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ diff --git a/contrib/hstore/hstore_gist.c b/contrib/hstore/hstore_gist.c index f8f5934e40..6d24d2f468 100644 --- a/contrib/hstore/hstore_gist.c +++ b/contrib/hstore/hstore_gist.c @@ -144,7 +144,7 @@ ghstore_compress(PG_FUNCTION_ARGS) gistentryinit(*retval, PointerGetDatum(res), entry->rel, entry->page, entry->offset, - FALSE); + false); } else if (!ISALLTRUE(DatumGetPointer(entry->key))) { @@ -166,7 +166,7 @@ ghstore_compress(PG_FUNCTION_ARGS) gistentryinit(*retval, PointerGetDatum(res), entry->rel, entry->page, entry->offset, - FALSE); + false); } PG_RETURN_POINTER(retval); @@ -518,7 +518,7 @@ ghstore_consistent(PG_FUNCTION_ARGS) if (strategy == HStoreContainsStrategyNumber || strategy == HStoreOldContainsStrategyNumber) { - HStore *query = PG_GETARG_HS(1); + HStore *query = PG_GETARG_HSTORE_P(1); HEntry *qe = ARRPTR(query); char *qv = STRPTR(query); int count = HS_COUNT(query); @@ -570,7 +570,7 @@ ghstore_consistent(PG_FUNCTION_ARGS) continue; crc = crc32_sz(VARDATA(key_datums[i]), VARSIZE(key_datums[i]) - VARHDRSZ); if (!(GETBIT(sign, HASHVAL(crc)))) - res = FALSE; + res = false; } } else if (strategy == HStoreExistsAnyStrategyNumber) @@ -585,7 +585,7 @@ ghstore_consistent(PG_FUNCTION_ARGS) TEXTOID, -1, false, 'i', &key_datums, &key_nulls, &key_count); - res = FALSE; + res = false; for (i = 0; !res && i < key_count; ++i) { @@ -595,7 +595,7 @@ ghstore_consistent(PG_FUNCTION_ARGS) continue; crc = crc32_sz(VARDATA(key_datums[i]), VARSIZE(key_datums[i]) - VARHDRSZ); if (GETBIT(sign, HASHVAL(crc))) - res = TRUE; + res = true; } } else diff --git a/contrib/hstore/hstore_io.c b/contrib/hstore/hstore_io.c index e03005c923..745497c76f 100644 --- a/contrib/hstore/hstore_io.c +++ b/contrib/hstore/hstore_io.c @@ -340,7 +340,8 @@ hstoreUniquePairs(Pairs *a, int32 l, int32 *buflen) { *buflen += res->keylen + ((res->isnull) ? 0 : res->vallen); res++; - memcpy(res, ptr, sizeof(Pairs)); + if (res != ptr) + memcpy(res, ptr, sizeof(Pairs)); } ptr++; @@ -752,6 +753,8 @@ typedef struct RecordIOData { Oid record_type; int32 record_typmod; + /* this field is used only if target type is domain over composite: */ + void *domain_info; /* opaque cache for domain checks */ int ncolumns; ColumnIOData columns[FLEXIBLE_ARRAY_MEMBER]; } RecordIOData; @@ -780,9 +783,11 @@ hstore_from_record(PG_FUNCTION_ARGS) Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0); /* - * have no tuple to look at, so the only source of type info is the - * argtype. The lookup_rowtype_tupdesc call below will error out if we - * don't have a known composite type oid here. + * We have no tuple to look at, so the only source of type info is the + * argtype --- which might be domain over composite, but we don't care + * here, since we have no need to be concerned about domain + * constraints. The lookup_rowtype_tupdesc_domain call below will + * error out if we don't have a known composite type oid here. */ tupType = argtype; tupTypmod = -1; @@ -793,12 +798,15 @@ hstore_from_record(PG_FUNCTION_ARGS) { rec = PG_GETARG_HEAPTUPLEHEADER(0); - /* Extract type info from the tuple itself */ + /* + * Extract type info from the tuple itself -- this will work even for + * anonymous record types. + */ tupType = HeapTupleHeaderGetTypeId(rec); tupTypmod = HeapTupleHeaderGetTypMod(rec); } - tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); + tupdesc = lookup_rowtype_tupdesc_domain(tupType, tupTypmod, false); ncolumns = tupdesc->natts; /* @@ -855,15 +863,16 @@ hstore_from_record(PG_FUNCTION_ARGS) for (i = 0, j = 0; i < ncolumns; ++i) { ColumnIOData *column_info = &my_extra->columns[i]; - Oid column_type = tupdesc->attrs[i]->atttypid; + Form_pg_attribute att = TupleDescAttr(tupdesc, i); + Oid column_type = att->atttypid; char *value; /* Ignore dropped columns in datatype */ - if (tupdesc->attrs[i]->attisdropped) + if (att->attisdropped) continue; - pairs[j].key = NameStr(tupdesc->attrs[i]->attname); - pairs[j].keylen = hstoreCheckKeyLen(strlen(NameStr(tupdesc->attrs[i]->attname))); + pairs[j].key = NameStr(att->attname); + pairs[j].keylen = hstoreCheckKeyLen(strlen(NameStr(att->attname))); if (!nulls || nulls[i]) { @@ -942,9 +951,9 @@ hstore_populate_record(PG_FUNCTION_ARGS) rec = NULL; /* - * have no tuple to look at, so the only source of type info is the - * argtype. The lookup_rowtype_tupdesc call below will error out if we - * don't have a known composite type oid here. + * We have no tuple to look at, so the only source of type info is the + * argtype. The lookup_rowtype_tupdesc_domain call below will error + * out if we don't have a known composite type oid here. */ tupType = argtype; tupTypmod = -1; @@ -956,12 +965,15 @@ hstore_populate_record(PG_FUNCTION_ARGS) if (PG_ARGISNULL(1)) PG_RETURN_POINTER(rec); - /* Extract type info from the tuple itself */ + /* + * Extract type info from the tuple itself -- this will work even for + * anonymous record types. + */ tupType = HeapTupleHeaderGetTypeId(rec); tupTypmod = HeapTupleHeaderGetTypMod(rec); } - hs = PG_GETARG_HS(1); + hs = PG_GETARG_HSTORE_P(1); entries = ARRPTR(hs); ptr = STRPTR(hs); @@ -974,7 +986,11 @@ hstore_populate_record(PG_FUNCTION_ARGS) if (HS_COUNT(hs) == 0 && rec) PG_RETURN_POINTER(rec); - tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); + /* + * Lookup the input record's tupdesc. For the moment, we don't worry + * about whether it is a domain over composite. + */ + tupdesc = lookup_rowtype_tupdesc_domain(tupType, tupTypmod, false); ncolumns = tupdesc->natts; if (rec) @@ -1001,6 +1017,7 @@ hstore_populate_record(PG_FUNCTION_ARGS) my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; my_extra->record_type = InvalidOid; my_extra->record_typmod = 0; + my_extra->domain_info = NULL; } if (my_extra->record_type != tupType || @@ -1034,21 +1051,22 @@ hstore_populate_record(PG_FUNCTION_ARGS) for (i = 0; i < ncolumns; ++i) { ColumnIOData *column_info = &my_extra->columns[i]; - Oid column_type = tupdesc->attrs[i]->atttypid; + Form_pg_attribute att = TupleDescAttr(tupdesc, i); + Oid column_type = att->atttypid; char *value; int idx; int vallen; /* Ignore dropped columns in datatype */ - if (tupdesc->attrs[i]->attisdropped) + if (att->attisdropped) { nulls[i] = true; continue; } idx = hstoreFindKey(hs, 0, - NameStr(tupdesc->attrs[i]->attname), - strlen(NameStr(tupdesc->attrs[i]->attname))); + NameStr(att->attname), + strlen(NameStr(att->attname))); /* * we can't just skip here if the key wasn't found since we might have @@ -1082,7 +1100,7 @@ hstore_populate_record(PG_FUNCTION_ARGS) */ values[i] = InputFunctionCall(&column_info->proc, NULL, column_info->typioparam, - tupdesc->attrs[i]->atttypmod); + att->atttypmod); nulls[i] = true; } else @@ -1094,13 +1112,24 @@ hstore_populate_record(PG_FUNCTION_ARGS) values[i] = InputFunctionCall(&column_info->proc, value, column_info->typioparam, - tupdesc->attrs[i]->atttypmod); + att->atttypmod); nulls[i] = false; } } rettuple = heap_form_tuple(tupdesc, values, nulls); + /* + * If the target type is domain over composite, all we know at this point + * is that we've made a valid value of the base composite type. Must + * check domain constraints before deciding we're done. + */ + if (argtype != tupdesc->tdtypeid) + domain_check(HeapTupleGetDatum(rettuple), false, + argtype, + &my_extra->domain_info, + fcinfo->flinfo->fn_mcxt); + ReleaseTupleDesc(tupdesc); PG_RETURN_DATUM(HeapTupleGetDatum(rettuple)); @@ -1125,7 +1154,7 @@ PG_FUNCTION_INFO_V1(hstore_out); Datum hstore_out(PG_FUNCTION_ARGS) { - HStore *in = PG_GETARG_HS(0); + HStore *in = PG_GETARG_HSTORE_P(0); int buflen, i; int count = HS_COUNT(in); @@ -1196,7 +1225,7 @@ PG_FUNCTION_INFO_V1(hstore_send); Datum hstore_send(PG_FUNCTION_ARGS) { - HStore *in = PG_GETARG_HS(0); + HStore *in = PG_GETARG_HSTORE_P(0); int i; int count = HS_COUNT(in); char *base = STRPTR(in); @@ -1205,23 +1234,23 @@ hstore_send(PG_FUNCTION_ARGS) pq_begintypsend(&buf); - pq_sendint(&buf, count, 4); + pq_sendint32(&buf, count); for (i = 0; i < count; i++) { int32 keylen = HSTORE_KEYLEN(entries, i); - pq_sendint(&buf, keylen, 4); + pq_sendint32(&buf, keylen); pq_sendtext(&buf, HSTORE_KEY(entries, base, i), keylen); if (HSTORE_VALISNULL(entries, i)) { - pq_sendint(&buf, -1, 4); + pq_sendint32(&buf, -1); } else { int32 vallen = HSTORE_VALLEN(entries, i); - pq_sendint(&buf, vallen, 4); + pq_sendint32(&buf, vallen); pq_sendtext(&buf, HSTORE_VAL(entries, base, i), vallen); } } @@ -1242,7 +1271,7 @@ PG_FUNCTION_INFO_V1(hstore_to_json_loose); Datum hstore_to_json_loose(PG_FUNCTION_ARGS) { - HStore *in = PG_GETARG_HS(0); + HStore *in = PG_GETARG_HSTORE_P(0); int i; int count = HS_COUNT(in); char *base = STRPTR(in); @@ -1297,7 +1326,7 @@ PG_FUNCTION_INFO_V1(hstore_to_json); Datum hstore_to_json(PG_FUNCTION_ARGS) { - HStore *in = PG_GETARG_HS(0); + HStore *in = PG_GETARG_HSTORE_P(0); int i; int count = HS_COUNT(in); char *base = STRPTR(in); @@ -1342,7 +1371,7 @@ PG_FUNCTION_INFO_V1(hstore_to_jsonb); Datum hstore_to_jsonb(PG_FUNCTION_ARGS) { - HStore *in = PG_GETARG_HS(0); + HStore *in = PG_GETARG_HSTORE_P(0); int i; int count = HS_COUNT(in); char *base = STRPTR(in); @@ -1385,7 +1414,7 @@ PG_FUNCTION_INFO_V1(hstore_to_jsonb_loose); Datum hstore_to_jsonb_loose(PG_FUNCTION_ARGS) { - HStore *in = PG_GETARG_HS(0); + HStore *in = PG_GETARG_HSTORE_P(0); int i; int count = HS_COUNT(in); char *base = STRPTR(in); @@ -1433,10 +1462,14 @@ hstore_to_jsonb_loose(PG_FUNCTION_ARGS) HSTORE_VALLEN(entries, i)); if (IsValidJsonNumber(tmp.data, tmp.len)) { + Datum numd; + val.type = jbvNumeric; - val.val.numeric = DatumGetNumeric( - DirectFunctionCall3(numeric_in, - CStringGetDatum(tmp.data), 0, -1)); + numd = DirectFunctionCall3(numeric_in, + CStringGetDatum(tmp.data), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1)); + val.val.numeric = DatumGetNumeric(numd); } else { diff --git a/contrib/hstore/hstore_op.c b/contrib/hstore/hstore_op.c index 612be23a74..8f9277f8da 100644 --- a/contrib/hstore/hstore_op.c +++ b/contrib/hstore/hstore_op.c @@ -130,7 +130,7 @@ PG_FUNCTION_INFO_V1(hstore_fetchval); Datum hstore_fetchval(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); text *key = PG_GETARG_TEXT_PP(1); HEntry *entries = ARRPTR(hs); text *out; @@ -151,7 +151,7 @@ PG_FUNCTION_INFO_V1(hstore_exists); Datum hstore_exists(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); text *key = PG_GETARG_TEXT_PP(1); int idx = hstoreFindKey(hs, NULL, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key)); @@ -164,7 +164,7 @@ PG_FUNCTION_INFO_V1(hstore_exists_any); Datum hstore_exists_any(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); ArrayType *keys = PG_GETARG_ARRAYTYPE_P(1); int nkeys; Pairs *key_pairs = hstoreArrayToPairs(keys, &nkeys); @@ -198,7 +198,7 @@ PG_FUNCTION_INFO_V1(hstore_exists_all); Datum hstore_exists_all(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); ArrayType *keys = PG_GETARG_ARRAYTYPE_P(1); int nkeys; Pairs *key_pairs = hstoreArrayToPairs(keys, &nkeys); @@ -232,7 +232,7 @@ PG_FUNCTION_INFO_V1(hstore_defined); Datum hstore_defined(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); text *key = PG_GETARG_TEXT_PP(1); HEntry *entries = ARRPTR(hs); int idx = hstoreFindKey(hs, NULL, @@ -247,7 +247,7 @@ PG_FUNCTION_INFO_V1(hstore_delete); Datum hstore_delete(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); text *key = PG_GETARG_TEXT_PP(1); char *keyptr = VARDATA_ANY(key); int keylen = VARSIZE_ANY_EXHDR(key); @@ -294,7 +294,7 @@ PG_FUNCTION_INFO_V1(hstore_delete_array); Datum hstore_delete_array(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); HStore *out = palloc(VARSIZE(hs)); int hs_count = HS_COUNT(hs); char *ps, @@ -373,8 +373,8 @@ PG_FUNCTION_INFO_V1(hstore_delete_hstore); Datum hstore_delete_hstore(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); - HStore *hs2 = PG_GETARG_HS(1); + HStore *hs = PG_GETARG_HSTORE_P(0); + HStore *hs2 = PG_GETARG_HSTORE_P(1); HStore *out = palloc(VARSIZE(hs)); int hs_count = HS_COUNT(hs); int hs2_count = HS_COUNT(hs2); @@ -473,8 +473,8 @@ PG_FUNCTION_INFO_V1(hstore_concat); Datum hstore_concat(PG_FUNCTION_ARGS) { - HStore *s1 = PG_GETARG_HS(0); - HStore *s2 = PG_GETARG_HS(1); + HStore *s1 = PG_GETARG_HSTORE_P(0); + HStore *s2 = PG_GETARG_HSTORE_P(1); HStore *out = palloc(VARSIZE(s1) + VARSIZE(s2)); char *ps1, *ps2, @@ -571,7 +571,7 @@ PG_FUNCTION_INFO_V1(hstore_slice_to_array); Datum hstore_slice_to_array(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); HEntry *entries = ARRPTR(hs); char *ptr = STRPTR(hs); ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(1); @@ -634,7 +634,7 @@ PG_FUNCTION_INFO_V1(hstore_slice_to_hstore); Datum hstore_slice_to_hstore(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); HEntry *entries = ARRPTR(hs); char *ptr = STRPTR(hs); ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(1); @@ -696,7 +696,7 @@ PG_FUNCTION_INFO_V1(hstore_akeys); Datum hstore_akeys(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); Datum *d; ArrayType *a; HEntry *entries = ARRPTR(hs); @@ -731,7 +731,7 @@ PG_FUNCTION_INFO_V1(hstore_avals); Datum hstore_avals(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); Datum *d; bool *nulls; ArrayType *a; @@ -827,7 +827,7 @@ PG_FUNCTION_INFO_V1(hstore_to_array); Datum hstore_to_array(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); ArrayType *out = hstore_to_array_internal(hs, 1); PG_RETURN_POINTER(out); @@ -837,7 +837,7 @@ PG_FUNCTION_INFO_V1(hstore_to_matrix); Datum hstore_to_matrix(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); ArrayType *out = hstore_to_array_internal(hs, 2); PG_RETURN_POINTER(out); @@ -891,7 +891,7 @@ hstore_skeys(PG_FUNCTION_ARGS) if (SRF_IS_FIRSTCALL()) { - hs = PG_GETARG_HS(0); + hs = PG_GETARG_HSTORE_P(0); funcctx = SRF_FIRSTCALL_INIT(); setup_firstcall(funcctx, hs, NULL); } @@ -925,7 +925,7 @@ hstore_svals(PG_FUNCTION_ARGS) if (SRF_IS_FIRSTCALL()) { - hs = PG_GETARG_HS(0); + hs = PG_GETARG_HSTORE_P(0); funcctx = SRF_FIRSTCALL_INIT(); setup_firstcall(funcctx, hs, NULL); } @@ -967,8 +967,8 @@ PG_FUNCTION_INFO_V1(hstore_contains); Datum hstore_contains(PG_FUNCTION_ARGS) { - HStore *val = PG_GETARG_HS(0); - HStore *tmpl = PG_GETARG_HS(1); + HStore *val = PG_GETARG_HSTORE_P(0); + HStore *tmpl = PG_GETARG_HSTORE_P(1); bool res = true; HEntry *te = ARRPTR(tmpl); char *tstr = STRPTR(tmpl); @@ -1032,7 +1032,7 @@ hstore_each(PG_FUNCTION_ARGS) if (SRF_IS_FIRSTCALL()) { - hs = PG_GETARG_HS(0); + hs = PG_GETARG_HSTORE_P(0); funcctx = SRF_FIRSTCALL_INIT(); setup_firstcall(funcctx, hs, fcinfo); } @@ -1087,8 +1087,8 @@ PG_FUNCTION_INFO_V1(hstore_cmp); Datum hstore_cmp(PG_FUNCTION_ARGS) { - HStore *hs1 = PG_GETARG_HS(0); - HStore *hs2 = PG_GETARG_HS(1); + HStore *hs1 = PG_GETARG_HSTORE_P(0); + HStore *hs2 = PG_GETARG_HSTORE_P(1); int hcount1 = HS_COUNT(hs1); int hcount2 = HS_COUNT(hs2); int res = 0; @@ -1235,7 +1235,7 @@ PG_FUNCTION_INFO_V1(hstore_hash); Datum hstore_hash(PG_FUNCTION_ARGS) { - HStore *hs = PG_GETARG_HS(0); + HStore *hs = PG_GETARG_HSTORE_P(0); Datum hval = hash_any((unsigned char *) VARDATA(hs), VARSIZE(hs) - VARHDRSZ); diff --git a/contrib/hstore_plperl/Makefile b/contrib/hstore_plperl/Makefile index c0906db1f5..5076e21e0e 100644 --- a/contrib/hstore_plperl/Makefile +++ b/contrib/hstore_plperl/Makefile @@ -4,7 +4,6 @@ MODULE_big = hstore_plperl OBJS = hstore_plperl.o $(WIN32RES) PGFILEDESC = "hstore_plperl - hstore transform for plperl" -PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plperl -I$(top_srcdir)/contrib/hstore EXTENSION = hstore_plperl hstore_plperlu DATA = hstore_plperl--1.0.sql hstore_plperlu--1.0.sql @@ -13,10 +12,12 @@ REGRESS = hstore_plperl hstore_plperlu create_transform EXTRA_INSTALL = contrib/hstore ifdef USE_PGXS +PG_CPPFLAGS = -I$(includedir_server)/extension PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) else +PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plperl -I$(top_srcdir)/contrib subdir = contrib/hstore_plperl top_builddir = ../.. include $(top_builddir)/src/Makefile.global @@ -28,14 +29,11 @@ ifeq ($(PORTNAME), win32) # these settings are the same as for plperl override CPPFLAGS += -DPLPERL_HAVE_UID_GID -Wno-comment # ... see silliness in plperl Makefile ... -SHLIB_LINK += $(sort $(wildcard ../../src/pl/plperl/libperl*.a)) +SHLIB_LINK_INTERNAL += $(sort $(wildcard ../../src/pl/plperl/libperl*.a)) else rpathdir = $(perl_archlibexp)/CORE SHLIB_LINK += $(perl_embed_ldflags) endif -# As with plperl we need to make sure that the CORE directory is included -# last, probably because it sometimes contains some header files with names -# that clash with some of ours, or with some that we include, notably on -# Windows. -override CPPFLAGS := $(CPPFLAGS) $(perl_embed_ccflags) -I$(perl_archlibexp)/CORE +# As with plperl we need to include the perl_includespec directory last. +override CPPFLAGS := $(CPPFLAGS) $(perl_embed_ccflags) $(perl_includespec) diff --git a/contrib/hstore_plperl/expected/hstore_plperl.out b/contrib/hstore_plperl/expected/hstore_plperl.out index 25fc506c23..1ab09a94cd 100644 --- a/contrib/hstore_plperl/expected/hstore_plperl.out +++ b/contrib/hstore_plperl/expected/hstore_plperl.out @@ -41,6 +41,25 @@ SELECT test2arr(); {"\"a\"=>\"1\", \"b\"=>\"boo\", \"c\"=>NULL","\"d\"=>\"2\""} (1 row) +-- check error cases +CREATE OR REPLACE FUNCTION test2() RETURNS hstore +LANGUAGE plperl +TRANSFORM FOR TYPE hstore +AS $$ +return 42; +$$; +SELECT test2(); +ERROR: cannot transform non-hash Perl value to hstore +CONTEXT: PL/Perl function "test2" +CREATE OR REPLACE FUNCTION test2() RETURNS hstore +LANGUAGE plperl +TRANSFORM FOR TYPE hstore +AS $$ +return [1, 2]; +$$; +SELECT test2(); +ERROR: cannot transform non-hash Perl value to hstore +CONTEXT: PL/Perl function "test2" DROP FUNCTION test2(); DROP FUNCTION test2arr(); DROP EXTENSION hstore_plperl; diff --git a/contrib/hstore_plperl/hstore_plperl.c b/contrib/hstore_plperl/hstore_plperl.c index cc46a525f6..61b5557421 100644 --- a/contrib/hstore_plperl/hstore_plperl.c +++ b/contrib/hstore_plperl/hstore_plperl.c @@ -5,7 +5,7 @@ #include "fmgr.h" #include "plperl.h" #include "plperl_helpers.h" -#include "hstore.h" +#include "hstore/hstore.h" PG_MODULE_MAGIC; @@ -68,7 +68,7 @@ Datum hstore_to_plperl(PG_FUNCTION_ARGS) { dTHX; - HStore *in = PG_GETARG_HS(0); + HStore *in = PG_GETARG_HSTORE_P(0); int i; int count = HS_COUNT(in); char *base = STRPTR(in); @@ -101,7 +101,8 @@ Datum plperl_to_hstore(PG_FUNCTION_ARGS) { dTHX; - HV *hv = (HV *) SvRV((SV *) PG_GETARG_POINTER(0)); + SV *in = (SV *) PG_GETARG_POINTER(0); + HV *hv; HE *he; int32 buflen; int32 i; @@ -109,6 +110,17 @@ plperl_to_hstore(PG_FUNCTION_ARGS) HStore *out; Pairs *pairs; + /* Dereference references recursively. */ + while (SvROK(in)) + in = SvRV(in); + + /* Now we must have a hash. */ + if (SvTYPE(in) != SVt_PVHV) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("cannot transform non-hash Perl value to hstore")))); + hv = (HV *) in; + pcount = hv_iterinit(hv); pairs = palloc(pcount * sizeof(Pairs)); diff --git a/contrib/hstore_plperl/sql/hstore_plperl.sql b/contrib/hstore_plperl/sql/hstore_plperl.sql index 9398aedfbb..ad1db7eae1 100644 --- a/contrib/hstore_plperl/sql/hstore_plperl.sql +++ b/contrib/hstore_plperl/sql/hstore_plperl.sql @@ -31,6 +31,25 @@ $$; SELECT test2arr(); +-- check error cases +CREATE OR REPLACE FUNCTION test2() RETURNS hstore +LANGUAGE plperl +TRANSFORM FOR TYPE hstore +AS $$ +return 42; +$$; + +SELECT test2(); + +CREATE OR REPLACE FUNCTION test2() RETURNS hstore +LANGUAGE plperl +TRANSFORM FOR TYPE hstore +AS $$ +return [1, 2]; +$$; + +SELECT test2(); + DROP FUNCTION test2(); DROP FUNCTION test2arr(); diff --git a/contrib/hstore_plpython/Makefile b/contrib/hstore_plpython/Makefile index 7ff787a22e..6877e7a072 100644 --- a/contrib/hstore_plpython/Makefile +++ b/contrib/hstore_plpython/Makefile @@ -4,19 +4,21 @@ MODULE_big = hstore_plpython$(python_majorversion) OBJS = hstore_plpython.o $(WIN32RES) PGFILEDESC = "hstore_plpython - hstore transform for plpython" -PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plpython $(python_includespec) -I$(top_srcdir)/contrib/hstore -DPLPYTHON_LIBNAME='"plpython$(python_majorversion)"' - EXTENSION = hstore_plpythonu hstore_plpython2u hstore_plpython3u DATA = hstore_plpythonu--1.0.sql hstore_plpython2u--1.0.sql hstore_plpython3u--1.0.sql REGRESS = hstore_plpython REGRESS_PLPYTHON3_MANGLE := $(REGRESS) +PG_CPPFLAGS = $(python_includespec) -DPLPYTHON_LIBNAME='"plpython$(python_majorversion)"' + ifdef USE_PGXS +PG_CPPFLAGS += -I$(includedir_server)/extension PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) else +PG_CPPFLAGS += -I$(top_srcdir)/src/pl/plpython -I$(top_srcdir)/contrib subdir = contrib/hstore_plpython top_builddir = ../.. include $(top_builddir)/src/Makefile.global @@ -26,7 +28,7 @@ endif # We must link libpython explicitly ifeq ($(PORTNAME), win32) # ... see silliness in plpython Makefile ... -SHLIB_LINK += $(sort $(wildcard ../../src/pl/plpython/libpython*.a)) +SHLIB_LINK_INTERNAL += $(sort $(wildcard ../../src/pl/plpython/libpython*.a)) else rpathdir = $(python_libdir) SHLIB_LINK += $(python_libspec) $(python_additional_libs) diff --git a/contrib/hstore_plpython/expected/hstore_plpython.out b/contrib/hstore_plpython/expected/hstore_plpython.out index df49cd5f37..1ab5feea93 100644 --- a/contrib/hstore_plpython/expected/hstore_plpython.out +++ b/contrib/hstore_plpython/expected/hstore_plpython.out @@ -68,12 +68,30 @@ AS $$ val = [{'a': 1, 'b': 'boo', 'c': None}, {'d': 2}] return val $$; - SELECT test2arr(); +SELECT test2arr(); test2arr -------------------------------------------------------------- {"\"a\"=>\"1\", \"b\"=>\"boo\", \"c\"=>NULL","\"d\"=>\"2\""} (1 row) +-- test python -> domain over hstore +CREATE DOMAIN hstore_foo AS hstore CHECK(VALUE ? 'foo'); +CREATE FUNCTION test2dom(fn text) RETURNS hstore_foo +LANGUAGE plpythonu +TRANSFORM FOR TYPE hstore +AS $$ +return {'a': 1, fn: 'boo', 'c': None} +$$; +SELECT test2dom('foo'); + test2dom +----------------------------------- + "a"=>"1", "c"=>NULL, "foo"=>"boo" +(1 row) + +SELECT test2dom('bar'); -- fail +ERROR: value for domain hstore_foo violates check constraint "hstore_foo_check" +CONTEXT: while creating return value +PL/Python function "test2dom" -- test as part of prepare/execute CREATE FUNCTION test3() RETURNS void LANGUAGE plpythonu diff --git a/contrib/hstore_plpython/hstore_plpython.c b/contrib/hstore_plpython/hstore_plpython.c index b184324ebf..2f24090ff3 100644 --- a/contrib/hstore_plpython/hstore_plpython.c +++ b/contrib/hstore_plpython/hstore_plpython.c @@ -3,7 +3,7 @@ #include "fmgr.h" #include "plpython.h" #include "plpy_typeio.h" -#include "hstore.h" +#include "hstore/hstore.h" PG_MODULE_MAGIC; @@ -85,7 +85,7 @@ PG_FUNCTION_INFO_V1(hstore_to_plpython); Datum hstore_to_plpython(PG_FUNCTION_ARGS) { - HStore *in = PG_GETARG_HS(0); + HStore *in = PG_GETARG_HSTORE_P(0); int i; int count = HS_COUNT(in); char *base = STRPTR(in); @@ -93,6 +93,10 @@ hstore_to_plpython(PG_FUNCTION_ARGS) PyObject *dict; dict = PyDict_New(); + if (!dict) + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"))); for (i = 0; i < count; i++) { diff --git a/contrib/hstore_plpython/sql/hstore_plpython.sql b/contrib/hstore_plpython/sql/hstore_plpython.sql index 911bbd67fe..2c54ee6aaa 100644 --- a/contrib/hstore_plpython/sql/hstore_plpython.sql +++ b/contrib/hstore_plpython/sql/hstore_plpython.sql @@ -60,7 +60,21 @@ val = [{'a': 1, 'b': 'boo', 'c': None}, {'d': 2}] return val $$; - SELECT test2arr(); +SELECT test2arr(); + + +-- test python -> domain over hstore +CREATE DOMAIN hstore_foo AS hstore CHECK(VALUE ? 'foo'); + +CREATE FUNCTION test2dom(fn text) RETURNS hstore_foo +LANGUAGE plpythonu +TRANSFORM FOR TYPE hstore +AS $$ +return {'a': 1, fn: 'boo', 'c': None} +$$; + +SELECT test2dom('foo'); +SELECT test2dom('bar'); -- fail -- test as part of prepare/execute diff --git a/contrib/intarray/_int_bool.c b/contrib/intarray/_int_bool.c index a18c645606..91e2a804f2 100644 --- a/contrib/intarray/_int_bool.c +++ b/contrib/intarray/_int_bool.c @@ -245,7 +245,7 @@ checkcondition_arr(void *checkval, ITEM *item) { StopMiddle = StopLow + (StopHigh - StopLow) / 2; if (*StopMiddle == item->val) - return (true); + return true; else if (*StopMiddle < item->val) StopLow = StopMiddle + 1; else @@ -274,7 +274,7 @@ execute(ITEM *curitem, void *checkval, bool calcnot, return (*chkcond) (checkval, curitem); else if (curitem->val == (int32) '!') { - return (calcnot) ? + return calcnot ? ((execute(curitem - 1, checkval, calcnot, chkcond)) ? false : true) : true; } @@ -342,7 +342,7 @@ gin_bool_consistent(QUERYTYPE *query, bool *check) j = 0; if (query->size <= 0) - return FALSE; + return false; /* * Set up data for checkcondition_gin. This must agree with the query diff --git a/contrib/intarray/_int_gin.c b/contrib/intarray/_int_gin.c index 73628bea11..7aebfec54b 100644 --- a/contrib/intarray/_int_gin.c +++ b/contrib/intarray/_int_gin.c @@ -116,7 +116,7 @@ ginint4_consistent(PG_FUNCTION_ARGS) /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ bool *recheck = (bool *) PG_GETARG_POINTER(5); - bool res = FALSE; + bool res = false; int32 i; switch (strategy) @@ -125,25 +125,25 @@ ginint4_consistent(PG_FUNCTION_ARGS) /* result is not lossy */ *recheck = false; /* at least one element in check[] is true, so result = true */ - res = TRUE; + res = true; break; case RTContainedByStrategyNumber: case RTOldContainedByStrategyNumber: /* we will need recheck */ *recheck = true; /* at least one element in check[] is true, so result = true */ - res = TRUE; + res = true; break; case RTSameStrategyNumber: /* we will need recheck */ *recheck = true; /* Must have all elements in check[] true */ - res = TRUE; + res = true; for (i = 0; i < nkeys; i++) { if (!check[i]) { - res = FALSE; + res = false; break; } } @@ -153,12 +153,12 @@ ginint4_consistent(PG_FUNCTION_ARGS) /* result is not lossy */ *recheck = false; /* Must have all elements in check[] true */ - res = TRUE; + res = true; for (i = 0; i < nkeys; i++) { if (!check[i]) { - res = FALSE; + res = false; break; } } diff --git a/contrib/intarray/_int_gist.c b/contrib/intarray/_int_gist.c index 79521b29b0..911d18023b 100644 --- a/contrib/intarray/_int_gist.c +++ b/contrib/intarray/_int_gist.c @@ -27,7 +27,7 @@ PG_FUNCTION_INFO_V1(g_int_same); /* ** The GiST Consistent method for _intments ** Should return false if for all data items x below entry, -** the predicate x op query == FALSE, where op is the oper +** the predicate x op query == false, where op is the oper ** corresponding to strategy in the pg_amop table. */ Datum @@ -89,7 +89,7 @@ g_int_consistent(PG_FUNCTION_ARGS) query); break; default: - retval = FALSE; + retval = false; } pfree(query); PG_RETURN_BOOL(retval); @@ -159,7 +159,7 @@ g_int_compress(PG_FUNCTION_ARGS) retval = palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(r), - entry->rel, entry->page, entry->offset, FALSE); + entry->rel, entry->page, entry->offset, false); PG_RETURN_POINTER(retval); } @@ -206,7 +206,7 @@ g_int_compress(PG_FUNCTION_ARGS) r = resize_intArrayType(r, len); retval = palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(r), - entry->rel, entry->page, entry->offset, FALSE); + entry->rel, entry->page, entry->offset, false); PG_RETURN_POINTER(retval); } else @@ -236,7 +236,7 @@ g_int_decompress(PG_FUNCTION_ARGS) { retval = palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(in), - entry->rel, entry->page, entry->offset, FALSE); + entry->rel, entry->page, entry->offset, false); PG_RETURN_POINTER(retval); } @@ -251,7 +251,7 @@ g_int_decompress(PG_FUNCTION_ARGS) { retval = palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(in), - entry->rel, entry->page, entry->offset, FALSE); + entry->rel, entry->page, entry->offset, false); PG_RETURN_POINTER(retval); } @@ -273,7 +273,7 @@ g_int_decompress(PG_FUNCTION_ARGS) pfree(in); retval = palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(r), - entry->rel, entry->page, entry->offset, FALSE); + entry->rel, entry->page, entry->offset, false); PG_RETURN_POINTER(retval); } @@ -321,14 +321,14 @@ g_int_same(PG_FUNCTION_ARGS) *result = false; PG_RETURN_POINTER(result); } - *result = TRUE; + *result = true; da = ARRPTR(a); db = ARRPTR(b); while (n--) { if (*da++ != *db++) { - *result = FALSE; + *result = false; break; } } diff --git a/contrib/intarray/_int_op.c b/contrib/intarray/_int_op.c index 3637c4564c..fe7fcc4662 100644 --- a/contrib/intarray/_int_op.c +++ b/contrib/intarray/_int_op.c @@ -74,19 +74,19 @@ _int_same(PG_FUNCTION_ARGS) da = ARRPTR(a); db = ARRPTR(b); - result = FALSE; + result = false; if (na == nb) { SORT(a); SORT(b); - result = TRUE; + result = true; for (n = 0; n < na; n++) { if (da[n] != db[n]) { - result = FALSE; + result = false; break; } } @@ -110,7 +110,7 @@ _int_overlap(PG_FUNCTION_ARGS) CHECKARRVALID(a); CHECKARRVALID(b); if (ARRISEMPTY(a) || ARRISEMPTY(b)) - return FALSE; + return false; SORT(a); SORT(b); diff --git a/contrib/intarray/_int_selfuncs.c b/contrib/intarray/_int_selfuncs.c index acb87d10f0..4c3f60c1dd 100644 --- a/contrib/intarray/_int_selfuncs.c +++ b/contrib/intarray/_int_selfuncs.c @@ -3,7 +3,7 @@ * _int_selfuncs.c * Functions for selectivity estimation of intarray operators * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/contrib/intarray/_int_tool.c b/contrib/intarray/_int_tool.c index 2fdfd2ec63..d86485dfa5 100644 --- a/contrib/intarray/_int_tool.c +++ b/contrib/intarray/_int_tool.c @@ -40,7 +40,7 @@ inner_int_contains(ArrayType *a, ArrayType *b) break; /* db[j] is not in da */ } - return (n == nb) ? TRUE : FALSE; + return (n == nb) ? true : false; } /* arguments are assumed sorted */ @@ -65,12 +65,12 @@ inner_int_overlap(ArrayType *a, ArrayType *b) if (da[i] < db[j]) i++; else if (da[i] == db[j]) - return TRUE; + return true; else j++; } - return FALSE; + return false; } ArrayType * @@ -220,7 +220,16 @@ ArrayType * new_intArrayType(int num) { ArrayType *r; - int nbytes = ARR_OVERHEAD_NONULLS(1) + sizeof(int) * num; + int nbytes; + + /* if no elements, return a zero-dimensional array */ + if (num <= 0) + { + r = construct_empty_array(INT4OID); + return r; + } + + nbytes = ARR_OVERHEAD_NONULLS(1) + sizeof(int) * num; r = (ArrayType *) palloc0(nbytes); @@ -237,11 +246,11 @@ new_intArrayType(int num) ArrayType * resize_intArrayType(ArrayType *a, int num) { - int nbytes = ARR_DATA_OFFSET(a) + sizeof(int) * num; + int nbytes; int i; /* if no elements, return a zero-dimensional array */ - if (num == 0) + if (num <= 0) { ARR_NDIM(a) = 0; return a; @@ -250,6 +259,8 @@ resize_intArrayType(ArrayType *a, int num) if (num == ARRNELEMS(a)) return a; + nbytes = ARR_DATA_OFFSET(a) + sizeof(int) * num; + a = (ArrayType *) repalloc(a, nbytes); SET_VARSIZE(a, nbytes); diff --git a/contrib/intarray/_intbig_gist.c b/contrib/intarray/_intbig_gist.c index 6dae7c91c1..de7bc82a23 100644 --- a/contrib/intarray/_intbig_gist.c +++ b/contrib/intarray/_intbig_gist.c @@ -168,7 +168,7 @@ g_intbig_compress(PG_FUNCTION_ARGS) retval = (GISTENTRY *) palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(res), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); if (in != DatumGetArrayTypeP(entry->key)) pfree(in); @@ -195,7 +195,7 @@ g_intbig_compress(PG_FUNCTION_ARGS) retval = (GISTENTRY *) palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(res), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); PG_RETURN_POINTER(retval); } @@ -594,7 +594,7 @@ g_intbig_consistent(PG_FUNCTION_ARGS) retval = _intbig_overlap((GISTTYPE *) DatumGetPointer(entry->key), query); break; default: - retval = FALSE; + retval = false; } PG_FREE_IF_COPY(query, 1); PG_RETURN_BOOL(retval); diff --git a/contrib/intarray/bench/create_test.pl b/contrib/intarray/bench/create_test.pl index f3262df05b..d2c678bb53 100755 --- a/contrib/intarray/bench/create_test.pl +++ b/contrib/intarray/bench/create_test.pl @@ -83,4 +83,5 @@ sub copytable while (<$fff>) { print; } close $fff; print "\\.\n"; + return; } diff --git a/contrib/intarray/expected/_int.out b/contrib/intarray/expected/_int.out index 0a5dd463ac..105c951bad 100644 --- a/contrib/intarray/expected/_int.out +++ b/contrib/intarray/expected/_int.out @@ -151,6 +151,30 @@ SELECT '{-1,3,1}'::int[] & '{1,2}'; {1} (1 row) +SELECT '{1}'::int[] & '{2}'::int[]; + ?column? +---------- + {} +(1 row) + +SELECT array_dims('{1}'::int[] & '{2}'::int[]); + array_dims +------------ + +(1 row) + +SELECT ('{1}'::int[] & '{2}'::int[]) = '{}'::int[]; + ?column? +---------- + t +(1 row) + +SELECT ('{}'::int[] & '{}'::int[]) = '{}'::int[]; + ?column? +---------- + t +(1 row) + --test query_int SELECT '1'::query_int; query_int diff --git a/contrib/intarray/sql/_int.sql b/contrib/intarray/sql/_int.sql index 44e1a729b4..40225c65ab 100644 --- a/contrib/intarray/sql/_int.sql +++ b/contrib/intarray/sql/_int.sql @@ -30,6 +30,10 @@ SELECT '{123,623,445}'::int[] | 1623; SELECT '{123,623,445}'::int[] | '{1623,623}'; SELECT '{123,623,445}'::int[] & '{1623,623}'; SELECT '{-1,3,1}'::int[] & '{1,2}'; +SELECT '{1}'::int[] & '{2}'::int[]; +SELECT array_dims('{1}'::int[] & '{2}'::int[]); +SELECT ('{1}'::int[] & '{2}'::int[]) = '{}'::int[]; +SELECT ('{}'::int[] & '{}'::int[]) = '{}'::int[]; --test query_int diff --git a/contrib/isn/Makefile b/contrib/isn/Makefile index 9543a4b1cf..c3600dac30 100644 --- a/contrib/isn/Makefile +++ b/contrib/isn/Makefile @@ -3,9 +3,13 @@ MODULES = isn EXTENSION = isn -DATA = isn--1.1.sql isn--1.0--1.1.sql isn--unpackaged--1.0.sql +DATA = isn--1.1.sql isn--1.1--1.2.sql \ + isn--1.0--1.1.sql isn--unpackaged--1.0.sql PGFILEDESC = "isn - data types for international product numbering standards" +# the other .h files are data tables, we don't install those +HEADERS_isn = isn.h + REGRESS = isn ifdef USE_PGXS diff --git a/contrib/isn/isn--1.1--1.2.sql b/contrib/isn/isn--1.1--1.2.sql new file mode 100644 index 0000000000..d626a5f44d --- /dev/null +++ b/contrib/isn/isn--1.1--1.2.sql @@ -0,0 +1,228 @@ +/* contrib/isn/isn--1.1--1.2.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION isn UPDATE TO '1.2'" to load this file. \quit + +ALTER OPERATOR <= (ean13, ean13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ean13, ean13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (ean13, isbn13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ean13, isbn13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (isbn13, ean13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (isbn13, ean13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (ean13, ismn13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ean13, ismn13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (ismn13, ean13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ismn13, ean13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (ean13, issn13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ean13, issn13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (ean13, isbn) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ean13, isbn) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (ean13, ismn) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ean13, ismn) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (ean13, issn) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ean13, issn) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (ean13, upc) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ean13, upc) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (isbn13, isbn13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (isbn13, isbn13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (isbn13, isbn) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (isbn13, isbn) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (isbn, isbn) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (isbn, isbn) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (isbn, isbn13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (isbn, isbn13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (isbn, ean13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (isbn, ean13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (ismn13, ismn13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ismn13, ismn13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (ismn13, ismn) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ismn13, ismn) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (ismn, ismn) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ismn, ismn) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (ismn, ismn13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ismn, ismn13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (ismn, ean13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (ismn, ean13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (issn13, issn13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (issn13, issn13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (issn13, issn) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (issn13, issn) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (issn13, ean13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (issn13, ean13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (issn, issn) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (issn, issn) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (issn, issn13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (issn, issn13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (issn, ean13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (issn, ean13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (upc, upc) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (upc, upc) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); + +ALTER OPERATOR <= (upc, ean13) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel); + +ALTER OPERATOR >= (upc, ean13) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel); diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c index 4d845b716f..897d83e0ca 100644 --- a/contrib/isn/isn.c +++ b/contrib/isn/isn.c @@ -4,7 +4,7 @@ * PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC) * * Author: German Mendez Bravo (Kronuz) - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/isn/isn.c @@ -26,6 +26,12 @@ PG_MODULE_MAGIC; +#ifdef USE_ASSERT_CHECKING +#define ISN_DEBUG 1 +#else +#define ISN_DEBUG 0 +#endif + #define MAXEAN13LEN 18 enum isn_type @@ -36,7 +42,6 @@ enum isn_type static const char *const isn_names[] = {"EAN13/UPC/ISxN", "EAN13/UPC/ISxN", "EAN13", "ISBN", "ISMN", "ISSN", "UPC"}; static bool g_weak = false; -static bool g_initialized = false; /*********************************************************************** @@ -56,7 +61,7 @@ static bool g_initialized = false; /* * Check if the table and its index is correct (just for debugging) */ -#ifdef ISN_DEBUG +pg_attribute_unused() static bool check_table(const char *(*TABLE)[2], const unsigned TABLE_index[10][2]) { @@ -68,7 +73,6 @@ check_table(const char *(*TABLE)[2], const unsigned TABLE_index[10][2]) y = -1, i = 0, j, - cnt = 0, init = 0; if (TABLE == NULL || TABLE_index == NULL) @@ -131,7 +135,6 @@ check_table(const char *(*TABLE)[2], const unsigned TABLE_index[10][2]) elog(DEBUG1, "index %d is invalid", j); return false; } -#endif /* ISN_DEBUG */ /*---------------------------------------------------------- * Formatting and conversion routines. @@ -922,22 +925,24 @@ string2ean(const char *str, bool errorOK, ean13 *result, * Exported routines. *---------------------------------------------------------*/ +void _PG_init(void); + void -initialize(void) +_PG_init(void) { -#ifdef ISN_DEBUG - if (!check_table(EAN13, EAN13_index)) - elog(LOG, "EAN13 failed check"); - if (!check_table(ISBN, ISBN_index)) - elog(LOG, "ISBN failed check"); - if (!check_table(ISMN, ISMN_index)) - elog(LOG, "ISMN failed check"); - if (!check_table(ISSN, ISSN_index)) - elog(LOG, "ISSN failed check"); - if (!check_table(UPC, UPC_index)) - elog(LOG, "UPC failed check"); -#endif - g_initialized = true; + if (ISN_DEBUG) + { + if (!check_table(EAN13_range, EAN13_index)) + elog(ERROR, "EAN13 failed check"); + if (!check_table(ISBN_range, ISBN_index)) + elog(ERROR, "ISBN failed check"); + if (!check_table(ISMN_range, ISMN_index)) + elog(ERROR, "ISMN failed check"); + if (!check_table(ISSN_range, ISSN_index)) + elog(ERROR, "ISSN failed check"); + if (!check_table(UPC_range, UPC_index)) + elog(ERROR, "UPC failed check"); + } } /* isn_out diff --git a/contrib/isn/isn.control b/contrib/isn/isn.control index 544bd8d0bf..765dce0e0a 100644 --- a/contrib/isn/isn.control +++ b/contrib/isn/isn.control @@ -1,5 +1,5 @@ # isn extension comment = 'data types for international product numbering standards' -default_version = '1.1' +default_version = '1.2' module_pathname = '$libdir/isn' relocatable = true diff --git a/contrib/isn/isn.h b/contrib/isn/isn.h index e2c8a26234..29632d8518 100644 --- a/contrib/isn/isn.h +++ b/contrib/isn/isn.h @@ -4,7 +4,7 @@ * PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC) * * Author: German Mendez Bravo (Kronuz) - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/isn/isn.h diff --git a/contrib/jsonb_plperl/.gitignore b/contrib/jsonb_plperl/.gitignore new file mode 100644 index 0000000000..5dcb3ff972 --- /dev/null +++ b/contrib/jsonb_plperl/.gitignore @@ -0,0 +1,4 @@ +# Generated subdirectories +/log/ +/results/ +/tmp_check/ diff --git a/contrib/jsonb_plperl/Makefile b/contrib/jsonb_plperl/Makefile new file mode 100644 index 0000000000..b43c8ed97b --- /dev/null +++ b/contrib/jsonb_plperl/Makefile @@ -0,0 +1,39 @@ +# contrib/jsonb_plperl/Makefile + +MODULE_big = jsonb_plperl +OBJS = jsonb_plperl.o $(WIN32RES) +PGFILEDESC = "jsonb_plperl - jsonb transform for plperl" + +PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plperl + +EXTENSION = jsonb_plperlu jsonb_plperl +DATA = jsonb_plperlu--1.0.sql jsonb_plperl--1.0.sql + +REGRESS = jsonb_plperl jsonb_plperlu + +SHLIB_LINK += $(filter -lm, $(LIBS)) + +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = contrib/jsonb_plperl +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif + +# We must link libperl explicitly +ifeq ($(PORTNAME), win32) +# these settings are the same as for plperl +override CPPFLAGS += -DPLPERL_HAVE_UID_GID -Wno-comment +# ... see silliness in plperl Makefile ... +SHLIB_LINK_INTERNAL += $(sort $(wildcard ../../src/pl/plperl/libperl*.a)) +else +rpathdir = $(perl_archlibexp)/CORE +SHLIB_LINK += $(perl_embed_ldflags) +endif + +# As with plperl we need to include the perl_includespec directory last. +override CPPFLAGS := $(CPPFLAGS) $(perl_embed_ccflags) $(perl_includespec) diff --git a/contrib/jsonb_plperl/expected/jsonb_plperl.out b/contrib/jsonb_plperl/expected/jsonb_plperl.out new file mode 100644 index 0000000000..6dc090a87f --- /dev/null +++ b/contrib/jsonb_plperl/expected/jsonb_plperl.out @@ -0,0 +1,233 @@ +CREATE EXTENSION jsonb_plperl CASCADE; +NOTICE: installing required extension "plperl" +CREATE FUNCTION testHVToJsonb() RETURNS jsonb +LANGUAGE plperl +TRANSFORM FOR TYPE jsonb +AS $$ +$val = {a => 1, b => 'boo', c => undef}; +return $val; +$$; +SELECT testHVToJsonb(); + testhvtojsonb +--------------------------------- + {"a": 1, "b": "boo", "c": null} +(1 row) + +CREATE FUNCTION testAVToJsonb() RETURNS jsonb +LANGUAGE plperl +TRANSFORM FOR TYPE jsonb +AS $$ +$val = [{a => 1, b => 'boo', c => undef}, {d => 2}]; +return $val; +$$; +SELECT testAVToJsonb(); + testavtojsonb +--------------------------------------------- + [{"a": 1, "b": "boo", "c": null}, {"d": 2}] +(1 row) + +CREATE FUNCTION testSVToJsonb() RETURNS jsonb +LANGUAGE plperl +TRANSFORM FOR TYPE jsonb +AS $$ +$val = 1; +return $val; +$$; +SELECT testSVToJsonb(); + testsvtojsonb +--------------- + 1 +(1 row) + +CREATE FUNCTION testUVToJsonb() RETURNS jsonb +LANGUAGE plperl +TRANSFORM FOR TYPE jsonb +as $$ +$val = ~0; +return $val; +$$; +-- this might produce either 18446744073709551615 or 4294967295 +SELECT testUVToJsonb() IN ('18446744073709551615'::jsonb, '4294967295'::jsonb); + ?column? +---------- + t +(1 row) + +-- this revealed a bug in the original implementation +CREATE FUNCTION testRegexpResultToJsonb() RETURNS jsonb +LANGUAGE plperl +TRANSFORM FOR TYPE jsonb +AS $$ +return ('1' =~ m(0\t2)); +$$; +SELECT testRegexpResultToJsonb(); + testregexpresulttojsonb +------------------------- + 0 +(1 row) + +CREATE FUNCTION roundtrip(val jsonb, ref text = '') RETURNS jsonb +LANGUAGE plperl +TRANSFORM FOR TYPE jsonb +AS $$ +# can't use Data::Dumper, but let's at least check for unexpected ref type +die 'unexpected '.(ref($_[0]) || 'not a').' reference' + if ref($_[0]) ne $_[1]; +return $_[0]; +$$; +SELECT roundtrip('null') is null; + ?column? +---------- + t +(1 row) + +SELECT roundtrip('1'); + roundtrip +----------- + 1 +(1 row) + +SELECT roundtrip('1E+131071'); +ERROR: cannot convert infinity to jsonb +CONTEXT: PL/Perl function "roundtrip" +SELECT roundtrip('-1'); + roundtrip +----------- + -1 +(1 row) + +SELECT roundtrip('1.2'); + roundtrip +----------- + 1.2 +(1 row) + +SELECT roundtrip('-1.2'); + roundtrip +----------- + -1.2 +(1 row) + +SELECT roundtrip('"string"'); + roundtrip +----------- + "string" +(1 row) + +SELECT roundtrip('"NaN"'); + roundtrip +----------- + "NaN" +(1 row) + +SELECT roundtrip('true'); + roundtrip +----------- + 1 +(1 row) + +SELECT roundtrip('false'); + roundtrip +----------- + 0 +(1 row) + +SELECT roundtrip('[]', 'ARRAY'); + roundtrip +----------- + [] +(1 row) + +SELECT roundtrip('[null, null]', 'ARRAY'); + roundtrip +-------------- + [null, null] +(1 row) + +SELECT roundtrip('[1, 2, 3]', 'ARRAY'); + roundtrip +----------- + [1, 2, 3] +(1 row) + +SELECT roundtrip('[-1, 2, -3]', 'ARRAY'); + roundtrip +------------- + [-1, 2, -3] +(1 row) + +SELECT roundtrip('[1.2, 2.3, 3.4]', 'ARRAY'); + roundtrip +----------------- + [1.2, 2.3, 3.4] +(1 row) + +SELECT roundtrip('[-1.2, 2.3, -3.4]', 'ARRAY'); + roundtrip +------------------- + [-1.2, 2.3, -3.4] +(1 row) + +SELECT roundtrip('["string1", "string2"]', 'ARRAY'); + roundtrip +------------------------ + ["string1", "string2"] +(1 row) + +SELECT roundtrip('[["string1", "string2"]]', 'ARRAY'); + roundtrip +-------------------------- + [["string1", "string2"]] +(1 row) + +SELECT roundtrip('{}', 'HASH'); + roundtrip +----------- + {} +(1 row) + +SELECT roundtrip('{"1": null}', 'HASH'); + roundtrip +------------- + {"1": null} +(1 row) + +SELECT roundtrip('{"1": 1}', 'HASH'); + roundtrip +----------- + {"1": 1} +(1 row) + +SELECT roundtrip('{"1": -1}', 'HASH'); + roundtrip +----------- + {"1": -1} +(1 row) + +SELECT roundtrip('{"1": 1.1}', 'HASH'); + roundtrip +------------ + {"1": 1.1} +(1 row) + +SELECT roundtrip('{"1": -1.1}', 'HASH'); + roundtrip +------------- + {"1": -1.1} +(1 row) + +SELECT roundtrip('{"1": "string1"}', 'HASH'); + roundtrip +------------------ + {"1": "string1"} +(1 row) + +SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}', 'HASH'); + roundtrip +--------------------------------- + {"1": {"2": [3, 4, 5]}, "2": 3} +(1 row) + +\set VERBOSITY terse \\ -- suppress cascade details +DROP EXTENSION plperl CASCADE; +NOTICE: drop cascades to 7 other objects diff --git a/contrib/jsonb_plperl/expected/jsonb_plperlu.out b/contrib/jsonb_plperl/expected/jsonb_plperlu.out new file mode 100644 index 0000000000..434327bea0 --- /dev/null +++ b/contrib/jsonb_plperl/expected/jsonb_plperlu.out @@ -0,0 +1,260 @@ +CREATE EXTENSION jsonb_plperlu CASCADE; +NOTICE: installing required extension "plperlu" +CREATE FUNCTION testHVToJsonb() RETURNS jsonb +LANGUAGE plperlu +TRANSFORM FOR TYPE jsonb +AS $$ +$val = {a => 1, b => 'boo', c => undef}; +return $val; +$$; +SELECT testHVToJsonb(); + testhvtojsonb +--------------------------------- + {"a": 1, "b": "boo", "c": null} +(1 row) + +CREATE FUNCTION testAVToJsonb() RETURNS jsonb +LANGUAGE plperlu +TRANSFORM FOR TYPE jsonb +AS $$ +$val = [{a => 1, b => 'boo', c => undef}, {d => 2}]; +return $val; +$$; +SELECT testAVToJsonb(); + testavtojsonb +--------------------------------------------- + [{"a": 1, "b": "boo", "c": null}, {"d": 2}] +(1 row) + +CREATE FUNCTION testSVToJsonb() RETURNS jsonb +LANGUAGE plperlu +TRANSFORM FOR TYPE jsonb +AS $$ +$val = 1; +return $val; +$$; +SELECT testSVToJsonb(); + testsvtojsonb +--------------- + 1 +(1 row) + +CREATE FUNCTION testUVToJsonb() RETURNS jsonb +LANGUAGE plperlu +TRANSFORM FOR TYPE jsonb +as $$ +$val = ~0; +return $val; +$$; +-- this might produce either 18446744073709551615 or 4294967295 +SELECT testUVToJsonb() IN ('18446744073709551615'::jsonb, '4294967295'::jsonb); + ?column? +---------- + t +(1 row) + +-- this revealed a bug in the original implementation +CREATE FUNCTION testRegexpResultToJsonb() RETURNS jsonb +LANGUAGE plperlu +TRANSFORM FOR TYPE jsonb +AS $$ +return ('1' =~ m(0\t2)); +$$; +SELECT testRegexpResultToJsonb(); + testregexpresulttojsonb +------------------------- + 0 +(1 row) + +CREATE FUNCTION roundtrip(val jsonb, ref text = '') RETURNS jsonb +LANGUAGE plperlu +TRANSFORM FOR TYPE jsonb +AS $$ +use Data::Dumper; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Indent = 0; +elog(INFO, Dumper($_[0])); +die 'unexpected '.(ref($_[0]) || 'not a').' reference' + if ref($_[0]) ne $_[1]; +return $_[0]; +$$; +SELECT roundtrip('null') is null; +INFO: $VAR1 = undef; + ?column? +---------- + t +(1 row) + +SELECT roundtrip('1'); +INFO: $VAR1 = '1'; + roundtrip +----------- + 1 +(1 row) + +-- skip because Data::Dumper produces a platform-dependent spelling of infinity +-- SELECT roundtrip('1E+131071'); +SELECT roundtrip('-1'); +INFO: $VAR1 = '-1'; + roundtrip +----------- + -1 +(1 row) + +SELECT roundtrip('1.2'); +INFO: $VAR1 = '1.2'; + roundtrip +----------- + 1.2 +(1 row) + +SELECT roundtrip('-1.2'); +INFO: $VAR1 = '-1.2'; + roundtrip +----------- + -1.2 +(1 row) + +SELECT roundtrip('"string"'); +INFO: $VAR1 = 'string'; + roundtrip +----------- + "string" +(1 row) + +SELECT roundtrip('"NaN"'); +INFO: $VAR1 = 'NaN'; + roundtrip +----------- + "NaN" +(1 row) + +SELECT roundtrip('true'); +INFO: $VAR1 = '1'; + roundtrip +----------- + 1 +(1 row) + +SELECT roundtrip('false'); +INFO: $VAR1 = '0'; + roundtrip +----------- + 0 +(1 row) + +SELECT roundtrip('[]', 'ARRAY'); +INFO: $VAR1 = []; + roundtrip +----------- + [] +(1 row) + +SELECT roundtrip('[null, null]', 'ARRAY'); +INFO: $VAR1 = [undef,undef]; + roundtrip +-------------- + [null, null] +(1 row) + +SELECT roundtrip('[1, 2, 3]', 'ARRAY'); +INFO: $VAR1 = ['1','2','3']; + roundtrip +----------- + [1, 2, 3] +(1 row) + +SELECT roundtrip('[-1, 2, -3]', 'ARRAY'); +INFO: $VAR1 = ['-1','2','-3']; + roundtrip +------------- + [-1, 2, -3] +(1 row) + +SELECT roundtrip('[1.2, 2.3, 3.4]', 'ARRAY'); +INFO: $VAR1 = ['1.2','2.3','3.4']; + roundtrip +----------------- + [1.2, 2.3, 3.4] +(1 row) + +SELECT roundtrip('[-1.2, 2.3, -3.4]', 'ARRAY'); +INFO: $VAR1 = ['-1.2','2.3','-3.4']; + roundtrip +------------------- + [-1.2, 2.3, -3.4] +(1 row) + +SELECT roundtrip('["string1", "string2"]', 'ARRAY'); +INFO: $VAR1 = ['string1','string2']; + roundtrip +------------------------ + ["string1", "string2"] +(1 row) + +SELECT roundtrip('[["string1", "string2"]]', 'ARRAY'); +INFO: $VAR1 = [['string1','string2']]; + roundtrip +-------------------------- + [["string1", "string2"]] +(1 row) + +SELECT roundtrip('{}', 'HASH'); +INFO: $VAR1 = {}; + roundtrip +----------- + {} +(1 row) + +SELECT roundtrip('{"1": null}', 'HASH'); +INFO: $VAR1 = {'1' => undef}; + roundtrip +------------- + {"1": null} +(1 row) + +SELECT roundtrip('{"1": 1}', 'HASH'); +INFO: $VAR1 = {'1' => '1'}; + roundtrip +----------- + {"1": 1} +(1 row) + +SELECT roundtrip('{"1": -1}', 'HASH'); +INFO: $VAR1 = {'1' => '-1'}; + roundtrip +----------- + {"1": -1} +(1 row) + +SELECT roundtrip('{"1": 1.1}', 'HASH'); +INFO: $VAR1 = {'1' => '1.1'}; + roundtrip +------------ + {"1": 1.1} +(1 row) + +SELECT roundtrip('{"1": -1.1}', 'HASH'); +INFO: $VAR1 = {'1' => '-1.1'}; + roundtrip +------------- + {"1": -1.1} +(1 row) + +SELECT roundtrip('{"1": "string1"}', 'HASH'); +INFO: $VAR1 = {'1' => 'string1'}; + roundtrip +------------------ + {"1": "string1"} +(1 row) + +SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}', 'HASH'); +INFO: $VAR1 = {'1' => {'2' => ['3','4','5']},'2' => '3'}; + roundtrip +--------------------------------- + {"1": {"2": [3, 4, 5]}, "2": 3} +(1 row) + +\set VERBOSITY terse \\ -- suppress cascade details +DROP EXTENSION plperlu CASCADE; +NOTICE: drop cascades to 7 other objects diff --git a/contrib/jsonb_plperl/jsonb_plperl--1.0.sql b/contrib/jsonb_plperl/jsonb_plperl--1.0.sql new file mode 100644 index 0000000000..c7964ba07c --- /dev/null +++ b/contrib/jsonb_plperl/jsonb_plperl--1.0.sql @@ -0,0 +1,19 @@ +/* contrib/jsonb_plperl/jsonb_plperl--1.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION jsonb_plperl" to load this file. \quit + +CREATE FUNCTION jsonb_to_plperl(val internal) RETURNS internal +LANGUAGE C STRICT IMMUTABLE +AS 'MODULE_PATHNAME'; + +CREATE FUNCTION plperl_to_jsonb(val internal) RETURNS jsonb +LANGUAGE C STRICT IMMUTABLE +AS 'MODULE_PATHNAME'; + +CREATE TRANSFORM FOR jsonb LANGUAGE plperl ( + FROM SQL WITH FUNCTION jsonb_to_plperl(internal), + TO SQL WITH FUNCTION plperl_to_jsonb(internal) +); + +COMMENT ON TRANSFORM FOR jsonb LANGUAGE plperl IS 'transform between jsonb and Perl'; diff --git a/contrib/jsonb_plperl/jsonb_plperl.c b/contrib/jsonb_plperl/jsonb_plperl.c new file mode 100644 index 0000000000..79c5f57d8f --- /dev/null +++ b/contrib/jsonb_plperl/jsonb_plperl.c @@ -0,0 +1,303 @@ +#include "postgres.h" + +#include + +/* Defined by Perl */ +#undef _ + +#include "fmgr.h" +#include "plperl.h" +#include "plperl_helpers.h" +#include "utils/jsonb.h" +#include "utils/fmgrprotos.h" + +PG_MODULE_MAGIC; + +static SV *Jsonb_to_SV(JsonbContainer *jsonb); +static JsonbValue *SV_to_JsonbValue(SV *obj, JsonbParseState **ps, bool is_elem); + + +static SV * +JsonbValue_to_SV(JsonbValue *jbv) +{ + dTHX; + + switch (jbv->type) + { + case jbvBinary: + return Jsonb_to_SV(jbv->val.binary.data); + + case jbvNumeric: + { + char *str = DatumGetCString(DirectFunctionCall1(numeric_out, + NumericGetDatum(jbv->val.numeric))); + SV *result = newSVnv(SvNV(cstr2sv(str))); + + pfree(str); + return result; + } + + case jbvString: + { + char *str = pnstrdup(jbv->val.string.val, + jbv->val.string.len); + SV *result = cstr2sv(str); + + pfree(str); + return result; + } + + case jbvBool: + return newSVnv(SvNV(jbv->val.boolean ? &PL_sv_yes : &PL_sv_no)); + + case jbvNull: + return newSV(0); + + default: + elog(ERROR, "unexpected jsonb value type: %d", jbv->type); + return NULL; + } +} + +static SV * +Jsonb_to_SV(JsonbContainer *jsonb) +{ + dTHX; + JsonbValue v; + JsonbIterator *it; + JsonbIteratorToken r; + + it = JsonbIteratorInit(jsonb); + r = JsonbIteratorNext(&it, &v, true); + + switch (r) + { + case WJB_BEGIN_ARRAY: + if (v.val.array.rawScalar) + { + JsonbValue tmp; + + if ((r = JsonbIteratorNext(&it, &v, true)) != WJB_ELEM || + (r = JsonbIteratorNext(&it, &tmp, true)) != WJB_END_ARRAY || + (r = JsonbIteratorNext(&it, &tmp, true)) != WJB_DONE) + elog(ERROR, "unexpected jsonb token: %d", r); + + return JsonbValue_to_SV(&v); + } + else + { + AV *av = newAV(); + + while ((r = JsonbIteratorNext(&it, &v, true)) != WJB_DONE) + { + if (r == WJB_ELEM) + av_push(av, JsonbValue_to_SV(&v)); + } + + return newRV((SV *) av); + } + + case WJB_BEGIN_OBJECT: + { + HV *hv = newHV(); + + while ((r = JsonbIteratorNext(&it, &v, true)) != WJB_DONE) + { + if (r == WJB_KEY) + { + /* json key in v, json value in val */ + JsonbValue val; + + if (JsonbIteratorNext(&it, &val, true) == WJB_VALUE) + { + SV *value = JsonbValue_to_SV(&val); + + (void) hv_store(hv, + v.val.string.val, v.val.string.len, + value, 0); + } + } + } + + return newRV((SV *) hv); + } + + default: + elog(ERROR, "unexpected jsonb token: %d", r); + return NULL; + } +} + +static JsonbValue * +AV_to_JsonbValue(AV *in, JsonbParseState **jsonb_state) +{ + dTHX; + SSize_t pcount = av_len(in) + 1; + SSize_t i; + + pushJsonbValue(jsonb_state, WJB_BEGIN_ARRAY, NULL); + + for (i = 0; i < pcount; i++) + { + SV **value = av_fetch(in, i, FALSE); + + if (value) + (void) SV_to_JsonbValue(*value, jsonb_state, true); + } + + return pushJsonbValue(jsonb_state, WJB_END_ARRAY, NULL); +} + +static JsonbValue * +HV_to_JsonbValue(HV *obj, JsonbParseState **jsonb_state) +{ + dTHX; + JsonbValue key; + SV *val; + char *kstr; + I32 klen; + + key.type = jbvString; + + pushJsonbValue(jsonb_state, WJB_BEGIN_OBJECT, NULL); + + (void) hv_iterinit(obj); + + while ((val = hv_iternextsv(obj, &kstr, &klen))) + { + key.val.string.val = pnstrdup(kstr, klen); + key.val.string.len = klen; + pushJsonbValue(jsonb_state, WJB_KEY, &key); + (void) SV_to_JsonbValue(val, jsonb_state, false); + } + + return pushJsonbValue(jsonb_state, WJB_END_OBJECT, NULL); +} + +static JsonbValue * +SV_to_JsonbValue(SV *in, JsonbParseState **jsonb_state, bool is_elem) +{ + dTHX; + JsonbValue out; /* result */ + + /* Dereference references recursively. */ + while (SvROK(in)) + in = SvRV(in); + + switch (SvTYPE(in)) + { + case SVt_PVAV: + return AV_to_JsonbValue((AV *) in, jsonb_state); + + case SVt_PVHV: + return HV_to_JsonbValue((HV *) in, jsonb_state); + + case SVt_NULL: + out.type = jbvNull; + break; + + default: + if (SvUOK(in)) + { + /* + * If UV is >=64 bits, we have no better way to make this + * happen than converting to text and back. Given the low + * usage of UV in Perl code, it's not clear it's worth working + * hard to provide alternate code paths. + */ + const char *strval = SvPV_nolen(in); + + out.type = jbvNumeric; + out.val.numeric = + DatumGetNumeric(DirectFunctionCall3(numeric_in, + CStringGetDatum(strval), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1))); + } + else if (SvIOK(in)) + { + IV ival = SvIV(in); + + out.type = jbvNumeric; + out.val.numeric = + DatumGetNumeric(DirectFunctionCall1(int8_numeric, + Int64GetDatum((int64) ival))); + } + else if (SvNOK(in)) + { + double nval = SvNV(in); + + /* + * jsonb doesn't allow infinity or NaN (per JSON + * specification), but the numeric type that is used for the + * storage accepts NaN, so we have to prevent it here + * explicitly. We don't really have to check for isinf() + * here, as numeric doesn't allow it and it would be caught + * later, but it makes for a nicer error message. + */ + if (isinf(nval)) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + (errmsg("cannot convert infinity to jsonb")))); + if (isnan(nval)) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + (errmsg("cannot convert NaN to jsonb")))); + + out.type = jbvNumeric; + out.val.numeric = + DatumGetNumeric(DirectFunctionCall1(float8_numeric, + Float8GetDatum(nval))); + } + else if (SvPOK(in)) + { + out.type = jbvString; + out.val.string.val = sv2cstr(in); + out.val.string.len = strlen(out.val.string.val); + } + else + { + /* + * XXX It might be nice if we could include the Perl type in + * the error message. + */ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("cannot transform this Perl type to jsonb")))); + return NULL; + } + } + + /* Push result into 'jsonb_state' unless it is a raw scalar. */ + return *jsonb_state + ? pushJsonbValue(jsonb_state, is_elem ? WJB_ELEM : WJB_VALUE, &out) + : memcpy(palloc(sizeof(JsonbValue)), &out, sizeof(JsonbValue)); +} + + +PG_FUNCTION_INFO_V1(jsonb_to_plperl); + +Datum +jsonb_to_plperl(PG_FUNCTION_ARGS) +{ + dTHX; + Jsonb *in = PG_GETARG_JSONB_P(0); + SV *sv = Jsonb_to_SV(&in->root); + + return PointerGetDatum(sv); +} + + +PG_FUNCTION_INFO_V1(plperl_to_jsonb); + +Datum +plperl_to_jsonb(PG_FUNCTION_ARGS) +{ + dTHX; + JsonbParseState *jsonb_state = NULL; + SV *in = (SV *) PG_GETARG_POINTER(0); + JsonbValue *out = SV_to_JsonbValue(in, &jsonb_state, true); + Jsonb *result = JsonbValueToJsonb(out); + + PG_RETURN_JSONB_P(result); +} diff --git a/contrib/jsonb_plperl/jsonb_plperl.control b/contrib/jsonb_plperl/jsonb_plperl.control new file mode 100644 index 0000000000..26c86a70e4 --- /dev/null +++ b/contrib/jsonb_plperl/jsonb_plperl.control @@ -0,0 +1,6 @@ +# jsonb_plperl extension +comment = 'transform between jsonb and plperl' +default_version = '1.0' +module_pathname = '$libdir/jsonb_plperl' +relocatable = true +requires = 'plperl' diff --git a/contrib/jsonb_plperl/jsonb_plperlu--1.0.sql b/contrib/jsonb_plperl/jsonb_plperlu--1.0.sql new file mode 100644 index 0000000000..5a5e475ad3 --- /dev/null +++ b/contrib/jsonb_plperl/jsonb_plperlu--1.0.sql @@ -0,0 +1,19 @@ +/* contrib/json_plperl/jsonb_plperl--1.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION jsonb_plperlu" to load this file. \quit + +CREATE FUNCTION jsonb_to_plperlu(val internal) RETURNS internal +LANGUAGE C STRICT IMMUTABLE +AS 'MODULE_PATHNAME', 'jsonb_to_plperl'; + +CREATE FUNCTION plperlu_to_jsonb(val internal) RETURNS jsonb +LANGUAGE C STRICT IMMUTABLE +AS 'MODULE_PATHNAME', 'plperl_to_jsonb'; + +CREATE TRANSFORM FOR jsonb LANGUAGE plperlu ( + FROM SQL WITH FUNCTION jsonb_to_plperlu(internal), + TO SQL WITH FUNCTION plperlu_to_jsonb(internal) +); + +COMMENT ON TRANSFORM FOR jsonb LANGUAGE plperlu IS 'transform between jsonb and Perl'; diff --git a/contrib/jsonb_plperl/jsonb_plperlu.control b/contrib/jsonb_plperl/jsonb_plperlu.control new file mode 100644 index 0000000000..946fc514c5 --- /dev/null +++ b/contrib/jsonb_plperl/jsonb_plperlu.control @@ -0,0 +1,6 @@ +# jsonb_plperl extension +comment = 'transform between jsonb and plperlu' +default_version = '1.0' +module_pathname = '$libdir/jsonb_plperl' +relocatable = true +requires = 'plperlu' diff --git a/contrib/jsonb_plperl/sql/jsonb_plperl.sql b/contrib/jsonb_plperl/sql/jsonb_plperl.sql new file mode 100644 index 0000000000..8b062dfc6b --- /dev/null +++ b/contrib/jsonb_plperl/sql/jsonb_plperl.sql @@ -0,0 +1,104 @@ +CREATE EXTENSION jsonb_plperl CASCADE; + + +CREATE FUNCTION testHVToJsonb() RETURNS jsonb +LANGUAGE plperl +TRANSFORM FOR TYPE jsonb +AS $$ +$val = {a => 1, b => 'boo', c => undef}; +return $val; +$$; + +SELECT testHVToJsonb(); + + +CREATE FUNCTION testAVToJsonb() RETURNS jsonb +LANGUAGE plperl +TRANSFORM FOR TYPE jsonb +AS $$ +$val = [{a => 1, b => 'boo', c => undef}, {d => 2}]; +return $val; +$$; + +SELECT testAVToJsonb(); + + +CREATE FUNCTION testSVToJsonb() RETURNS jsonb +LANGUAGE plperl +TRANSFORM FOR TYPE jsonb +AS $$ +$val = 1; +return $val; +$$; + +SELECT testSVToJsonb(); + + +CREATE FUNCTION testUVToJsonb() RETURNS jsonb +LANGUAGE plperl +TRANSFORM FOR TYPE jsonb +as $$ +$val = ~0; +return $val; +$$; + +-- this might produce either 18446744073709551615 or 4294967295 +SELECT testUVToJsonb() IN ('18446744073709551615'::jsonb, '4294967295'::jsonb); + + +-- this revealed a bug in the original implementation +CREATE FUNCTION testRegexpResultToJsonb() RETURNS jsonb +LANGUAGE plperl +TRANSFORM FOR TYPE jsonb +AS $$ +return ('1' =~ m(0\t2)); +$$; + +SELECT testRegexpResultToJsonb(); + + +CREATE FUNCTION roundtrip(val jsonb, ref text = '') RETURNS jsonb +LANGUAGE plperl +TRANSFORM FOR TYPE jsonb +AS $$ +# can't use Data::Dumper, but let's at least check for unexpected ref type +die 'unexpected '.(ref($_[0]) || 'not a').' reference' + if ref($_[0]) ne $_[1]; +return $_[0]; +$$; + + +SELECT roundtrip('null') is null; +SELECT roundtrip('1'); +SELECT roundtrip('1E+131071'); +SELECT roundtrip('-1'); +SELECT roundtrip('1.2'); +SELECT roundtrip('-1.2'); +SELECT roundtrip('"string"'); +SELECT roundtrip('"NaN"'); + +SELECT roundtrip('true'); +SELECT roundtrip('false'); + +SELECT roundtrip('[]', 'ARRAY'); +SELECT roundtrip('[null, null]', 'ARRAY'); +SELECT roundtrip('[1, 2, 3]', 'ARRAY'); +SELECT roundtrip('[-1, 2, -3]', 'ARRAY'); +SELECT roundtrip('[1.2, 2.3, 3.4]', 'ARRAY'); +SELECT roundtrip('[-1.2, 2.3, -3.4]', 'ARRAY'); +SELECT roundtrip('["string1", "string2"]', 'ARRAY'); +SELECT roundtrip('[["string1", "string2"]]', 'ARRAY'); + +SELECT roundtrip('{}', 'HASH'); +SELECT roundtrip('{"1": null}', 'HASH'); +SELECT roundtrip('{"1": 1}', 'HASH'); +SELECT roundtrip('{"1": -1}', 'HASH'); +SELECT roundtrip('{"1": 1.1}', 'HASH'); +SELECT roundtrip('{"1": -1.1}', 'HASH'); +SELECT roundtrip('{"1": "string1"}', 'HASH'); + +SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}', 'HASH'); + + +\set VERBOSITY terse \\ -- suppress cascade details +DROP EXTENSION plperl CASCADE; diff --git a/contrib/jsonb_plperl/sql/jsonb_plperlu.sql b/contrib/jsonb_plperl/sql/jsonb_plperlu.sql new file mode 100644 index 0000000000..8d8e841540 --- /dev/null +++ b/contrib/jsonb_plperl/sql/jsonb_plperlu.sql @@ -0,0 +1,108 @@ +CREATE EXTENSION jsonb_plperlu CASCADE; + + +CREATE FUNCTION testHVToJsonb() RETURNS jsonb +LANGUAGE plperlu +TRANSFORM FOR TYPE jsonb +AS $$ +$val = {a => 1, b => 'boo', c => undef}; +return $val; +$$; + +SELECT testHVToJsonb(); + + +CREATE FUNCTION testAVToJsonb() RETURNS jsonb +LANGUAGE plperlu +TRANSFORM FOR TYPE jsonb +AS $$ +$val = [{a => 1, b => 'boo', c => undef}, {d => 2}]; +return $val; +$$; + +SELECT testAVToJsonb(); + + +CREATE FUNCTION testSVToJsonb() RETURNS jsonb +LANGUAGE plperlu +TRANSFORM FOR TYPE jsonb +AS $$ +$val = 1; +return $val; +$$; + +SELECT testSVToJsonb(); + + +CREATE FUNCTION testUVToJsonb() RETURNS jsonb +LANGUAGE plperlu +TRANSFORM FOR TYPE jsonb +as $$ +$val = ~0; +return $val; +$$; + +-- this might produce either 18446744073709551615 or 4294967295 +SELECT testUVToJsonb() IN ('18446744073709551615'::jsonb, '4294967295'::jsonb); + + +-- this revealed a bug in the original implementation +CREATE FUNCTION testRegexpResultToJsonb() RETURNS jsonb +LANGUAGE plperlu +TRANSFORM FOR TYPE jsonb +AS $$ +return ('1' =~ m(0\t2)); +$$; + +SELECT testRegexpResultToJsonb(); + + +CREATE FUNCTION roundtrip(val jsonb, ref text = '') RETURNS jsonb +LANGUAGE plperlu +TRANSFORM FOR TYPE jsonb +AS $$ +use Data::Dumper; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Indent = 0; +elog(INFO, Dumper($_[0])); +die 'unexpected '.(ref($_[0]) || 'not a').' reference' + if ref($_[0]) ne $_[1]; +return $_[0]; +$$; + + +SELECT roundtrip('null') is null; +SELECT roundtrip('1'); +-- skip because Data::Dumper produces a platform-dependent spelling of infinity +-- SELECT roundtrip('1E+131071'); +SELECT roundtrip('-1'); +SELECT roundtrip('1.2'); +SELECT roundtrip('-1.2'); +SELECT roundtrip('"string"'); +SELECT roundtrip('"NaN"'); + +SELECT roundtrip('true'); +SELECT roundtrip('false'); + +SELECT roundtrip('[]', 'ARRAY'); +SELECT roundtrip('[null, null]', 'ARRAY'); +SELECT roundtrip('[1, 2, 3]', 'ARRAY'); +SELECT roundtrip('[-1, 2, -3]', 'ARRAY'); +SELECT roundtrip('[1.2, 2.3, 3.4]', 'ARRAY'); +SELECT roundtrip('[-1.2, 2.3, -3.4]', 'ARRAY'); +SELECT roundtrip('["string1", "string2"]', 'ARRAY'); +SELECT roundtrip('[["string1", "string2"]]', 'ARRAY'); + +SELECT roundtrip('{}', 'HASH'); +SELECT roundtrip('{"1": null}', 'HASH'); +SELECT roundtrip('{"1": 1}', 'HASH'); +SELECT roundtrip('{"1": -1}', 'HASH'); +SELECT roundtrip('{"1": 1.1}', 'HASH'); +SELECT roundtrip('{"1": -1.1}', 'HASH'); +SELECT roundtrip('{"1": "string1"}', 'HASH'); + +SELECT roundtrip('{"1": {"2": [3, 4, 5]}, "2": 3}', 'HASH'); + + +\set VERBOSITY terse \\ -- suppress cascade details +DROP EXTENSION plperlu CASCADE; diff --git a/contrib/jsonb_plpython/.gitignore b/contrib/jsonb_plpython/.gitignore new file mode 100644 index 0000000000..ce6fab94a0 --- /dev/null +++ b/contrib/jsonb_plpython/.gitignore @@ -0,0 +1,6 @@ +# Generated subdirectories +/expected/python3/ +/log/ +/results/ +/sql/python3/ +/tmp_check/ diff --git a/contrib/jsonb_plpython/Makefile b/contrib/jsonb_plpython/Makefile new file mode 100644 index 0000000000..b3c98e6db0 --- /dev/null +++ b/contrib/jsonb_plpython/Makefile @@ -0,0 +1,39 @@ +# contrib/jsonb_plpython/Makefile + +MODULE_big = jsonb_plpython$(python_majorversion) +OBJS = jsonb_plpython.o $(WIN32RES) +PGFILEDESC = "jsonb_plpython - transform between jsonb and plpythonu" + +PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plpython $(python_includespec) -DPLPYTHON_LIBNAME='"plpython$(python_majorversion)"' + +EXTENSION = jsonb_plpythonu jsonb_plpython2u jsonb_plpython3u +DATA = jsonb_plpythonu--1.0.sql jsonb_plpython2u--1.0.sql jsonb_plpython3u--1.0.sql + +REGRESS = jsonb_plpython +REGRESS_PLPYTHON3_MANGLE := $(REGRESS) + +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = contrib/jsonb_plpython +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif + +# We must link libpython explicitly +ifeq ($(PORTNAME), win32) +# ... see silliness in plpython Makefile ... +SHLIB_LINK_INTERNAL += $(sort $(wildcard ../../src/pl/plpython/libpython*.a)) +else +rpathdir = $(python_libdir) +SHLIB_LINK += $(python_libspec) $(python_additional_libs) +endif + +ifeq ($(python_majorversion),2) +REGRESS_OPTS += --load-extension=plpythonu --load-extension=jsonb_plpythonu +endif + +include $(top_srcdir)/src/pl/plpython/regress-python3-mangle.mk diff --git a/contrib/jsonb_plpython/expected/jsonb_plpython.out b/contrib/jsonb_plpython/expected/jsonb_plpython.out new file mode 100644 index 0000000000..b491fe9cc6 --- /dev/null +++ b/contrib/jsonb_plpython/expected/jsonb_plpython.out @@ -0,0 +1,306 @@ +CREATE EXTENSION jsonb_plpython2u CASCADE; +NOTICE: installing required extension "plpython2u" +-- test jsonb -> python dict +CREATE FUNCTION test1(val jsonb) RETURNS int +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert isinstance(val, dict) +assert(val == {'a': 1, 'c': 'NULL'}) +return len(val) +$$; +SELECT test1('{"a": 1, "c": "NULL"}'::jsonb); + test1 +------- + 2 +(1 row) + +-- test jsonb -> python dict +-- complex dict with dicts as value +CREATE FUNCTION test1complex(val jsonb) RETURNS int +LANGUAGE plpython2u +TRANSFORM FOR TYPE jsonb +AS $$ +assert isinstance(val, dict) +assert(val == {"d": {"d": 1}}) +return len(val) +$$; +SELECT test1complex('{"d": {"d": 1}}'::jsonb); + test1complex +-------------- + 1 +(1 row) + +-- test jsonb[] -> python dict +-- dict with array as value +CREATE FUNCTION test1arr(val jsonb) RETURNS int +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert isinstance(val, dict) +assert(val == {"d": [12, 1]}) +return len(val) +$$; +SELECT test1arr('{"d":[12, 1]}'::jsonb); + test1arr +---------- + 1 +(1 row) + +-- test jsonb[] -> python list +-- simple list +CREATE FUNCTION test2arr(val jsonb) RETURNS int +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert isinstance(val, list) +assert(val == [12, 1]) +return len(val) +$$; +SELECT test2arr('[12, 1]'::jsonb); + test2arr +---------- + 2 +(1 row) + +-- test jsonb[] -> python list +-- array of dicts +CREATE FUNCTION test3arr(val jsonb) RETURNS int +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert isinstance(val, list) +assert(val == [{"a": 1,"b": 2}, {"c": 3,"d": 4}]) +return len(val) +$$; +SELECT test3arr('[{"a": 1, "b": 2}, {"c": 3,"d": 4}]'::jsonb); + test3arr +---------- + 2 +(1 row) + +-- test jsonb int -> python int +CREATE FUNCTION test1int(val jsonb) RETURNS int +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert(val == 1) +return val +$$; +SELECT test1int('1'::jsonb); + test1int +---------- + 1 +(1 row) + +-- test jsonb string -> python string +CREATE FUNCTION test1string(val jsonb) RETURNS text +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert(val == "a") +return val +$$; +SELECT test1string('"a"'::jsonb); + test1string +------------- + a +(1 row) + +-- test jsonb null -> python None +CREATE FUNCTION test1null(val jsonb) RETURNS int +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert(val == None) +return 1 +$$; +SELECT test1null('null'::jsonb); + test1null +----------- + 1 +(1 row) + +-- test python -> jsonb +CREATE FUNCTION roundtrip(val jsonb) RETURNS jsonb +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +as $$ +return val +$$; +SELECT roundtrip('null'::jsonb); + roundtrip +----------- + +(1 row) + +SELECT roundtrip('1'::jsonb); + roundtrip +----------- + 1 +(1 row) + +SELECT roundtrip('1234567890.0987654321'::jsonb); + roundtrip +----------------------- + 1234567890.0987654321 +(1 row) + +SELECT roundtrip('-1234567890.0987654321'::jsonb); + roundtrip +------------------------ + -1234567890.0987654321 +(1 row) + +SELECT roundtrip('true'::jsonb); + roundtrip +----------- + true +(1 row) + +SELECT roundtrip('"string"'::jsonb); + roundtrip +----------- + "string" +(1 row) + +SELECT roundtrip('{"1": null}'::jsonb); + roundtrip +------------- + {"1": null} +(1 row) + +SELECT roundtrip('{"1": 1}'::jsonb); + roundtrip +----------- + {"1": 1} +(1 row) + +SELECT roundtrip('{"1": true}'::jsonb); + roundtrip +------------- + {"1": true} +(1 row) + +SELECT roundtrip('{"1": "string"}'::jsonb); + roundtrip +----------------- + {"1": "string"} +(1 row) + +SELECT roundtrip('[null]'::jsonb); + roundtrip +----------- + [null] +(1 row) + +SELECT roundtrip('[1]'::jsonb); + roundtrip +----------- + [1] +(1 row) + +SELECT roundtrip('[true]'::jsonb); + roundtrip +----------- + [true] +(1 row) + +SELECT roundtrip('["string"]'::jsonb); + roundtrip +------------ + ["string"] +(1 row) + +SELECT roundtrip('[null, 1]'::jsonb); + roundtrip +----------- + [null, 1] +(1 row) + +SELECT roundtrip('[1, true]'::jsonb); + roundtrip +----------- + [1, true] +(1 row) + +SELECT roundtrip('[true, "string"]'::jsonb); + roundtrip +------------------ + [true, "string"] +(1 row) + +SELECT roundtrip('["string", "string2"]'::jsonb); + roundtrip +----------------------- + ["string", "string2"] +(1 row) + +-- complex numbers -> jsonb +CREATE FUNCTION testComplexNumbers() RETURNS jsonb +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +x = 1 + 2j +return x +$$; +SELECT testComplexNumbers(); +ERROR: could not convert value "(1+2j)" to jsonb +CONTEXT: while creating return value +PL/Python function "testcomplexnumbers" +-- range -> jsonb +CREATE FUNCTION testRange() RETURNS jsonb +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +x = range(3) +return x +$$; +SELECT testRange(); + testrange +----------- + [0, 1, 2] +(1 row) + +-- 0xff -> jsonb +CREATE FUNCTION testDecimal() RETURNS jsonb +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +x = 0xff +return x +$$; +SELECT testDecimal(); + testdecimal +------------- + 255 +(1 row) + +-- tuple -> jsonb +CREATE FUNCTION testTuple() RETURNS jsonb +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +x = (1, 'String', None) +return x +$$; +SELECT testTuple(); + testtuple +--------------------- + [1, "String", null] +(1 row) + +-- interesting dict -> jsonb +CREATE FUNCTION test_dict1() RETURNS jsonb +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +x = {"a": 1, None: 2, 33: 3} +return x +$$; +SELECT test_dict1(); + test_dict1 +-------------------------- + {"": 2, "a": 1, "33": 3} +(1 row) + diff --git a/contrib/jsonb_plpython/jsonb_plpython.c b/contrib/jsonb_plpython/jsonb_plpython.c new file mode 100644 index 0000000000..f44d364c97 --- /dev/null +++ b/contrib/jsonb_plpython/jsonb_plpython.c @@ -0,0 +1,471 @@ +#include "postgres.h" + +#include "plpython.h" +#include "plpy_elog.h" +#include "plpy_typeio.h" +#include "utils/jsonb.h" +#include "utils/fmgrprotos.h" +#include "utils/numeric.h" + +PG_MODULE_MAGIC; + +void _PG_init(void); + +/* for PLyObject_AsString in plpy_typeio.c */ +typedef char *(*PLyObject_AsString_t) (PyObject *plrv); +static PLyObject_AsString_t PLyObject_AsString_p; + +typedef void (*PLy_elog_impl_t) (int elevel, const char *fmt,...); +static PLy_elog_impl_t PLy_elog_impl_p; + +/* + * decimal_constructor is a function from python library and used + * for transforming strings into python decimal type + */ +static PyObject *decimal_constructor; + +static PyObject *PLyObject_FromJsonbContainer(JsonbContainer *jsonb); +static JsonbValue *PLyObject_ToJsonbValue(PyObject *obj, + JsonbParseState **jsonb_state, bool is_elem); + +#if PY_MAJOR_VERSION >= 3 +typedef PyObject *(*PLyUnicode_FromStringAndSize_t) + (const char *s, Py_ssize_t size); +static PLyUnicode_FromStringAndSize_t PLyUnicode_FromStringAndSize_p; +#endif + +/* + * Module initialize function: fetch function pointers for cross-module calls. + */ +void +_PG_init(void) +{ + /* Asserts verify that typedefs above match original declarations */ + AssertVariableIsOfType(&PLyObject_AsString, PLyObject_AsString_t); + PLyObject_AsString_p = (PLyObject_AsString_t) + load_external_function("$libdir/" PLPYTHON_LIBNAME, "PLyObject_AsString", + true, NULL); +#if PY_MAJOR_VERSION >= 3 + AssertVariableIsOfType(&PLyUnicode_FromStringAndSize, PLyUnicode_FromStringAndSize_t); + PLyUnicode_FromStringAndSize_p = (PLyUnicode_FromStringAndSize_t) + load_external_function("$libdir/" PLPYTHON_LIBNAME, "PLyUnicode_FromStringAndSize", + true, NULL); +#endif + + AssertVariableIsOfType(&PLy_elog_impl, PLy_elog_impl_t); + PLy_elog_impl_p = (PLy_elog_impl_t) + load_external_function("$libdir/" PLPYTHON_LIBNAME, "PLy_elog_impl", + true, NULL); +} + +/* These defines must be after the _PG_init */ +#define PLyObject_AsString (PLyObject_AsString_p) +#define PLyUnicode_FromStringAndSize (PLyUnicode_FromStringAndSize_p) +#undef PLy_elog +#define PLy_elog (PLy_elog_impl_p) + +/* + * PLyString_FromJsonbValue + * + * Transform string JsonbValue to Python string. + */ +static PyObject * +PLyString_FromJsonbValue(JsonbValue *jbv) +{ + Assert(jbv->type == jbvString); + + return PyString_FromStringAndSize(jbv->val.string.val, jbv->val.string.len); +} + +/* + * PLyString_ToJsonbValue + * + * Transform Python string to JsonbValue. + */ +static void +PLyString_ToJsonbValue(PyObject *obj, JsonbValue *jbvElem) +{ + jbvElem->type = jbvString; + jbvElem->val.string.val = PLyObject_AsString(obj); + jbvElem->val.string.len = strlen(jbvElem->val.string.val); +} + +/* + * PLyObject_FromJsonbValue + * + * Transform JsonbValue to PyObject. + */ +static PyObject * +PLyObject_FromJsonbValue(JsonbValue *jsonbValue) +{ + switch (jsonbValue->type) + { + case jbvNull: + Py_RETURN_NONE; + + case jbvBinary: + return PLyObject_FromJsonbContainer(jsonbValue->val.binary.data); + + case jbvNumeric: + { + Datum num; + char *str; + + num = NumericGetDatum(jsonbValue->val.numeric); + str = DatumGetCString(DirectFunctionCall1(numeric_out, num)); + + return PyObject_CallFunction(decimal_constructor, "s", str); + } + + case jbvString: + return PLyString_FromJsonbValue(jsonbValue); + + case jbvBool: + if (jsonbValue->val.boolean) + Py_RETURN_TRUE; + else + Py_RETURN_FALSE; + + default: + elog(ERROR, "unexpected jsonb value type: %d", jsonbValue->type); + return NULL; + } +} + +/* + * PLyObject_FromJsonb + * + * Transform JsonbContainer to PyObject. + */ +static PyObject * +PLyObject_FromJsonbContainer(JsonbContainer *jsonb) +{ + JsonbIteratorToken r; + JsonbValue v; + JsonbIterator *it; + PyObject *result; + + it = JsonbIteratorInit(jsonb); + r = JsonbIteratorNext(&it, &v, true); + + switch (r) + { + case WJB_BEGIN_ARRAY: + if (v.val.array.rawScalar) + { + JsonbValue tmp; + + if ((r = JsonbIteratorNext(&it, &v, true)) != WJB_ELEM || + (r = JsonbIteratorNext(&it, &tmp, true)) != WJB_END_ARRAY || + (r = JsonbIteratorNext(&it, &tmp, true)) != WJB_DONE) + elog(ERROR, "unexpected jsonb token: %d", r); + + result = PLyObject_FromJsonbValue(&v); + } + else + { + /* array in v */ + result = PyList_New(0); + if (!result) + return NULL; + + while ((r = JsonbIteratorNext(&it, &v, true)) != WJB_DONE) + { + if (r == WJB_ELEM) + { + PyObject *elem = PLyObject_FromJsonbValue(&v); + + PyList_Append(result, elem); + Py_XDECREF(elem); + } + } + } + break; + + case WJB_BEGIN_OBJECT: + result = PyDict_New(); + if (!result) + return NULL; + + while ((r = JsonbIteratorNext(&it, &v, true)) != WJB_DONE) + { + if (r == WJB_KEY) + { + PyObject *key = PLyString_FromJsonbValue(&v); + + if (!key) + return NULL; + + r = JsonbIteratorNext(&it, &v, true); + + if (r == WJB_VALUE) + { + PyObject *value = PLyObject_FromJsonbValue(&v); + + if (!value) + { + Py_XDECREF(key); + return NULL; + } + + PyDict_SetItem(result, key, value); + Py_XDECREF(value); + } + + Py_XDECREF(key); + } + } + break; + + default: + elog(ERROR, "unexpected jsonb token: %d", r); + return NULL; + } + + return result; +} + +/* + * PLyMapping_ToJsonbValue + * + * Transform Python dict to JsonbValue. + */ +static JsonbValue * +PLyMapping_ToJsonbValue(PyObject *obj, JsonbParseState **jsonb_state) +{ + Py_ssize_t pcount; + JsonbValue *out = NULL; + + /* We need it volatile, since we use it after longjmp */ + volatile PyObject *items_v = NULL; + + pcount = PyMapping_Size(obj); + items_v = PyMapping_Items(obj); + + PG_TRY(); + { + Py_ssize_t i; + PyObject *items; + + items = (PyObject *) items_v; + + pushJsonbValue(jsonb_state, WJB_BEGIN_OBJECT, NULL); + + for (i = 0; i < pcount; i++) + { + JsonbValue jbvKey; + PyObject *item = PyList_GetItem(items, i); + PyObject *key = PyTuple_GetItem(item, 0); + PyObject *value = PyTuple_GetItem(item, 1); + + /* Python dictionary can have None as key */ + if (key == Py_None) + { + jbvKey.type = jbvString; + jbvKey.val.string.len = 0; + jbvKey.val.string.val = ""; + } + else + { + /* All others types of keys we serialize to string */ + PLyString_ToJsonbValue(key, &jbvKey); + } + + (void) pushJsonbValue(jsonb_state, WJB_KEY, &jbvKey); + (void) PLyObject_ToJsonbValue(value, jsonb_state, false); + } + + out = pushJsonbValue(jsonb_state, WJB_END_OBJECT, NULL); + } + PG_CATCH(); + { + Py_DECREF(items_v); + PG_RE_THROW(); + } + PG_END_TRY(); + + return out; +} + +/* + * PLySequence_ToJsonbValue + * + * Transform python list to JsonbValue. Expects transformed PyObject and + * a state required for jsonb construction. + */ +static JsonbValue * +PLySequence_ToJsonbValue(PyObject *obj, JsonbParseState **jsonb_state) +{ + Py_ssize_t i; + Py_ssize_t pcount; + + pcount = PySequence_Size(obj); + + pushJsonbValue(jsonb_state, WJB_BEGIN_ARRAY, NULL); + + for (i = 0; i < pcount; i++) + { + PyObject *value = PySequence_GetItem(obj, i); + + (void) PLyObject_ToJsonbValue(value, jsonb_state, true); + + Py_XDECREF(value); + } + + return pushJsonbValue(jsonb_state, WJB_END_ARRAY, NULL); +} + +/* + * PLyNumber_ToJsonbValue(PyObject *obj) + * + * Transform python number to JsonbValue. + */ +static JsonbValue * +PLyNumber_ToJsonbValue(PyObject *obj, JsonbValue *jbvNum) +{ + Numeric num; + char *str = PLyObject_AsString(obj); + + PG_TRY(); + { + Datum numd; + + numd = DirectFunctionCall3(numeric_in, + CStringGetDatum(str), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1)); + num = DatumGetNumeric(numd); + } + PG_CATCH(); + { + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + (errmsg("could not convert value \"%s\" to jsonb", str)))); + } + PG_END_TRY(); + + pfree(str); + + /* + * jsonb doesn't allow NaN (per JSON specification), so we have to prevent + * it here explicitly. (Infinity is also not allowed in jsonb, but + * numeric_in above already catches that.) + */ + if (numeric_is_nan(num)) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + (errmsg("cannot convert NaN to jsonb")))); + + jbvNum->type = jbvNumeric; + jbvNum->val.numeric = num; + + return jbvNum; +} + +/* + * PLyObject_ToJsonbValue(PyObject *obj) + * + * Transform python object to JsonbValue. + */ +static JsonbValue * +PLyObject_ToJsonbValue(PyObject *obj, JsonbParseState **jsonb_state, bool is_elem) +{ + JsonbValue buf; + JsonbValue *out; + + if (!(PyString_Check(obj) || PyUnicode_Check(obj))) + { + if (PySequence_Check(obj)) + return PLySequence_ToJsonbValue(obj, jsonb_state); + else if (PyMapping_Check(obj)) + return PLyMapping_ToJsonbValue(obj, jsonb_state); + } + + /* Allocate JsonbValue in heap only if it is raw scalar value. */ + if (*jsonb_state) + out = &buf; + else + out = palloc(sizeof(JsonbValue)); + + if (obj == Py_None) + out->type = jbvNull; + else if (PyString_Check(obj) || PyUnicode_Check(obj)) + PLyString_ToJsonbValue(obj, out); + + /* + * PyNumber_Check() returns true for booleans, so boolean check should + * come first. + */ + else if (PyBool_Check(obj)) + { + out->type = jbvBool; + out->val.boolean = (obj == Py_True); + } + else if (PyNumber_Check(obj)) + out = PLyNumber_ToJsonbValue(obj, out); + else + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("Python type \"%s\" cannot be transformed to jsonb", + PLyObject_AsString((PyObject *) obj->ob_type))))); + + /* Push result into 'jsonb_state' unless it is raw scalar value. */ + return (*jsonb_state ? + pushJsonbValue(jsonb_state, is_elem ? WJB_ELEM : WJB_VALUE, out) : + out); +} + +/* + * plpython_to_jsonb + * + * Transform python object to Jsonb datum + */ +PG_FUNCTION_INFO_V1(plpython_to_jsonb); +Datum +plpython_to_jsonb(PG_FUNCTION_ARGS) +{ + PyObject *obj; + JsonbValue *out; + JsonbParseState *jsonb_state = NULL; + + obj = (PyObject *) PG_GETARG_POINTER(0); + out = PLyObject_ToJsonbValue(obj, &jsonb_state, true); + PG_RETURN_POINTER(JsonbValueToJsonb(out)); +} + +/* + * jsonb_to_plpython + * + * Transform Jsonb datum to PyObject and return it as internal. + */ +PG_FUNCTION_INFO_V1(jsonb_to_plpython); +Datum +jsonb_to_plpython(PG_FUNCTION_ARGS) +{ + PyObject *result; + Jsonb *in = PG_GETARG_JSONB_P(0); + + /* + * Initialize pointer to Decimal constructor. First we try "cdecimal", C + * version of decimal library. In case of failure we use slower "decimal" + * module. + */ + if (!decimal_constructor) + { + PyObject *decimal_module = PyImport_ImportModule("cdecimal"); + + if (!decimal_module) + { + PyErr_Clear(); + decimal_module = PyImport_ImportModule("decimal"); + } + Assert(decimal_module); + decimal_constructor = PyObject_GetAttrString(decimal_module, "Decimal"); + } + + result = PLyObject_FromJsonbContainer(&in->root); + if (!result) + PLy_elog(ERROR, "transformation from jsonb to Python failed"); + + return PointerGetDatum(result); +} diff --git a/contrib/jsonb_plpython/jsonb_plpython2u--1.0.sql b/contrib/jsonb_plpython/jsonb_plpython2u--1.0.sql new file mode 100644 index 0000000000..2526d14ee1 --- /dev/null +++ b/contrib/jsonb_plpython/jsonb_plpython2u--1.0.sql @@ -0,0 +1,19 @@ +/* contrib/jsonb_plpython/jsonb_plpython2u--1.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION jsonb_plpython2u" to load this file. \quit + +CREATE FUNCTION jsonb_to_plpython2(val internal) RETURNS internal +LANGUAGE C STRICT IMMUTABLE +AS 'MODULE_PATHNAME', 'jsonb_to_plpython'; + +CREATE FUNCTION plpython2_to_jsonb(val internal) RETURNS jsonb +LANGUAGE C STRICT IMMUTABLE +AS 'MODULE_PATHNAME', 'plpython_to_jsonb'; + +CREATE TRANSFORM FOR jsonb LANGUAGE plpython2u ( + FROM SQL WITH FUNCTION jsonb_to_plpython2(internal), + TO SQL WITH FUNCTION plpython2_to_jsonb(internal) +); + +COMMENT ON TRANSFORM FOR jsonb LANGUAGE plpython2u IS 'transform between jsonb and Python'; diff --git a/contrib/jsonb_plpython/jsonb_plpython2u.control b/contrib/jsonb_plpython/jsonb_plpython2u.control new file mode 100644 index 0000000000..d26368316b --- /dev/null +++ b/contrib/jsonb_plpython/jsonb_plpython2u.control @@ -0,0 +1,6 @@ +# jsonb_plpython2u extension +comment = 'transform between jsonb and plpython2u' +default_version = '1.0' +module_pathname = '$libdir/jsonb_plpython2' +relocatable = true +requires = 'plpython2u' diff --git a/contrib/jsonb_plpython/jsonb_plpython3u--1.0.sql b/contrib/jsonb_plpython/jsonb_plpython3u--1.0.sql new file mode 100644 index 0000000000..60c34c0ea8 --- /dev/null +++ b/contrib/jsonb_plpython/jsonb_plpython3u--1.0.sql @@ -0,0 +1,19 @@ +/* contrib/jsonb_plpython/jsonb_plpython3u--1.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION jsonb_plpython3u" to load this file. \quit + +CREATE FUNCTION jsonb_to_plpython3(val internal) RETURNS internal +LANGUAGE C STRICT IMMUTABLE +AS 'MODULE_PATHNAME', 'jsonb_to_plpython'; + +CREATE FUNCTION plpython3_to_jsonb(val internal) RETURNS jsonb +LANGUAGE C STRICT IMMUTABLE +AS 'MODULE_PATHNAME', 'plpython_to_jsonb'; + +CREATE TRANSFORM FOR jsonb LANGUAGE plpython3u ( + FROM SQL WITH FUNCTION jsonb_to_plpython3(internal), + TO SQL WITH FUNCTION plpython3_to_jsonb(internal) +); + +COMMENT ON TRANSFORM FOR jsonb LANGUAGE plpython3u IS 'transform between jsonb and Python'; diff --git a/contrib/jsonb_plpython/jsonb_plpython3u.control b/contrib/jsonb_plpython/jsonb_plpython3u.control new file mode 100644 index 0000000000..f701e80fa2 --- /dev/null +++ b/contrib/jsonb_plpython/jsonb_plpython3u.control @@ -0,0 +1,6 @@ +# jsonb_plpython3u extension +comment = 'transform between jsonb and plpython3u' +default_version = '1.0' +module_pathname = '$libdir/jsonb_plpython3' +relocatable = true +requires = 'plpython3u' diff --git a/contrib/jsonb_plpython/jsonb_plpythonu--1.0.sql b/contrib/jsonb_plpython/jsonb_plpythonu--1.0.sql new file mode 100644 index 0000000000..3fa89885a6 --- /dev/null +++ b/contrib/jsonb_plpython/jsonb_plpythonu--1.0.sql @@ -0,0 +1,19 @@ +/* contrib/jsonb_plpython/jsonb_plpythonu--1.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION jsonb_plpythonu" to load this file. \quit + +CREATE FUNCTION jsonb_to_plpython(val internal) RETURNS internal +LANGUAGE C STRICT IMMUTABLE +AS 'MODULE_PATHNAME'; + +CREATE FUNCTION plpython_to_jsonb(val internal) RETURNS jsonb +LANGUAGE C STRICT IMMUTABLE +AS 'MODULE_PATHNAME'; + +CREATE TRANSFORM FOR jsonb LANGUAGE plpythonu ( + FROM SQL WITH FUNCTION jsonb_to_plpython(internal), + TO SQL WITH FUNCTION plpython_to_jsonb(internal) +); + +COMMENT ON TRANSFORM FOR jsonb LANGUAGE plpythonu IS 'transform between jsonb and Python'; diff --git a/contrib/jsonb_plpython/jsonb_plpythonu.control b/contrib/jsonb_plpython/jsonb_plpythonu.control new file mode 100644 index 0000000000..6f8fa4f184 --- /dev/null +++ b/contrib/jsonb_plpython/jsonb_plpythonu.control @@ -0,0 +1,6 @@ +# jsonb_plpythonu extension +comment = 'transform between jsonb and plpythonu' +default_version = '1.0' +module_pathname = '$libdir/jsonb_plpython2' +relocatable = true +requires = 'plpythonu' diff --git a/contrib/jsonb_plpython/sql/jsonb_plpython.sql b/contrib/jsonb_plpython/sql/jsonb_plpython.sql new file mode 100644 index 0000000000..2ee1bca0a9 --- /dev/null +++ b/contrib/jsonb_plpython/sql/jsonb_plpython.sql @@ -0,0 +1,183 @@ +CREATE EXTENSION jsonb_plpython2u CASCADE; + +-- test jsonb -> python dict +CREATE FUNCTION test1(val jsonb) RETURNS int +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert isinstance(val, dict) +assert(val == {'a': 1, 'c': 'NULL'}) +return len(val) +$$; + +SELECT test1('{"a": 1, "c": "NULL"}'::jsonb); + +-- test jsonb -> python dict +-- complex dict with dicts as value +CREATE FUNCTION test1complex(val jsonb) RETURNS int +LANGUAGE plpython2u +TRANSFORM FOR TYPE jsonb +AS $$ +assert isinstance(val, dict) +assert(val == {"d": {"d": 1}}) +return len(val) +$$; + +SELECT test1complex('{"d": {"d": 1}}'::jsonb); + + +-- test jsonb[] -> python dict +-- dict with array as value +CREATE FUNCTION test1arr(val jsonb) RETURNS int +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert isinstance(val, dict) +assert(val == {"d": [12, 1]}) +return len(val) +$$; + +SELECT test1arr('{"d":[12, 1]}'::jsonb); + +-- test jsonb[] -> python list +-- simple list +CREATE FUNCTION test2arr(val jsonb) RETURNS int +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert isinstance(val, list) +assert(val == [12, 1]) +return len(val) +$$; + +SELECT test2arr('[12, 1]'::jsonb); + +-- test jsonb[] -> python list +-- array of dicts +CREATE FUNCTION test3arr(val jsonb) RETURNS int +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert isinstance(val, list) +assert(val == [{"a": 1,"b": 2}, {"c": 3,"d": 4}]) +return len(val) +$$; + +SELECT test3arr('[{"a": 1, "b": 2}, {"c": 3,"d": 4}]'::jsonb); + +-- test jsonb int -> python int +CREATE FUNCTION test1int(val jsonb) RETURNS int +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert(val == 1) +return val +$$; + +SELECT test1int('1'::jsonb); + +-- test jsonb string -> python string +CREATE FUNCTION test1string(val jsonb) RETURNS text +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert(val == "a") +return val +$$; + +SELECT test1string('"a"'::jsonb); + +-- test jsonb null -> python None +CREATE FUNCTION test1null(val jsonb) RETURNS int +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +assert(val == None) +return 1 +$$; + +SELECT test1null('null'::jsonb); + +-- test python -> jsonb +CREATE FUNCTION roundtrip(val jsonb) RETURNS jsonb +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +as $$ +return val +$$; + +SELECT roundtrip('null'::jsonb); +SELECT roundtrip('1'::jsonb); +SELECT roundtrip('1234567890.0987654321'::jsonb); +SELECT roundtrip('-1234567890.0987654321'::jsonb); +SELECT roundtrip('true'::jsonb); +SELECT roundtrip('"string"'::jsonb); + +SELECT roundtrip('{"1": null}'::jsonb); +SELECT roundtrip('{"1": 1}'::jsonb); +SELECT roundtrip('{"1": true}'::jsonb); +SELECT roundtrip('{"1": "string"}'::jsonb); + +SELECT roundtrip('[null]'::jsonb); +SELECT roundtrip('[1]'::jsonb); +SELECT roundtrip('[true]'::jsonb); +SELECT roundtrip('["string"]'::jsonb); +SELECT roundtrip('[null, 1]'::jsonb); +SELECT roundtrip('[1, true]'::jsonb); +SELECT roundtrip('[true, "string"]'::jsonb); +SELECT roundtrip('["string", "string2"]'::jsonb); + +-- complex numbers -> jsonb +CREATE FUNCTION testComplexNumbers() RETURNS jsonb +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +x = 1 + 2j +return x +$$; + +SELECT testComplexNumbers(); + +-- range -> jsonb +CREATE FUNCTION testRange() RETURNS jsonb +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +x = range(3) +return x +$$; + +SELECT testRange(); + +-- 0xff -> jsonb +CREATE FUNCTION testDecimal() RETURNS jsonb +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +x = 0xff +return x +$$; + +SELECT testDecimal(); + +-- tuple -> jsonb +CREATE FUNCTION testTuple() RETURNS jsonb +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +x = (1, 'String', None) +return x +$$; + +SELECT testTuple(); + +-- interesting dict -> jsonb +CREATE FUNCTION test_dict1() RETURNS jsonb +LANGUAGE plpythonu +TRANSFORM FOR TYPE jsonb +AS $$ +x = {"a": 1, None: 2, 33: 3} +return x +$$; + +SELECT test_dict1(); diff --git a/contrib/lo/.gitignore b/contrib/lo/.gitignore new file mode 100644 index 0000000000..5dcb3ff972 --- /dev/null +++ b/contrib/lo/.gitignore @@ -0,0 +1,4 @@ +# Generated subdirectories +/log/ +/results/ +/tmp_check/ diff --git a/contrib/lo/Makefile b/contrib/lo/Makefile index 71f0cb0d24..bd4fd6b72d 100644 --- a/contrib/lo/Makefile +++ b/contrib/lo/Makefile @@ -6,6 +6,8 @@ EXTENSION = lo DATA = lo--1.1.sql lo--1.0--1.1.sql lo--unpackaged--1.0.sql PGFILEDESC = "lo - management for large objects" +REGRESS = lo + ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) diff --git a/contrib/lo/expected/lo.out b/contrib/lo/expected/lo.out new file mode 100644 index 0000000000..f7104aee3f --- /dev/null +++ b/contrib/lo/expected/lo.out @@ -0,0 +1,42 @@ +CREATE EXTENSION lo; +CREATE TABLE image (title text, raster lo); +CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON image + FOR EACH ROW EXECUTE PROCEDURE lo_manage(raster); +SELECT lo_create(43213); + lo_create +----------- + 43213 +(1 row) + +SELECT lo_create(43214); + lo_create +----------- + 43214 +(1 row) + +INSERT INTO image (title, raster) VALUES ('beautiful image', 43213); +SELECT lo_get(43213); + lo_get +-------- + \x +(1 row) + +SELECT lo_get(43214); + lo_get +-------- + \x +(1 row) + +UPDATE image SET raster = 43214 WHERE title = 'beautiful image'; +SELECT lo_get(43213); +ERROR: large object 43213 does not exist +SELECT lo_get(43214); + lo_get +-------- + \x +(1 row) + +DELETE FROM image; +SELECT lo_get(43214); +ERROR: large object 43214 does not exist +DROP TABLE image; diff --git a/contrib/lo/sql/lo.sql b/contrib/lo/sql/lo.sql new file mode 100644 index 0000000000..34ba6f00ec --- /dev/null +++ b/contrib/lo/sql/lo.sql @@ -0,0 +1,25 @@ +CREATE EXTENSION lo; + +CREATE TABLE image (title text, raster lo); + +CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON image + FOR EACH ROW EXECUTE PROCEDURE lo_manage(raster); + +SELECT lo_create(43213); +SELECT lo_create(43214); + +INSERT INTO image (title, raster) VALUES ('beautiful image', 43213); + +SELECT lo_get(43213); +SELECT lo_get(43214); + +UPDATE image SET raster = 43214 WHERE title = 'beautiful image'; + +SELECT lo_get(43213); +SELECT lo_get(43214); + +DELETE FROM image; + +SELECT lo_get(43214); + +DROP TABLE image; diff --git a/contrib/ltree/Makefile b/contrib/ltree/Makefile index c101603e6c..416c8da312 100644 --- a/contrib/ltree/Makefile +++ b/contrib/ltree/Makefile @@ -9,6 +9,8 @@ EXTENSION = ltree DATA = ltree--1.1.sql ltree--1.0--1.1.sql ltree--unpackaged--1.0.sql PGFILEDESC = "ltree - hierarchical label data type" +HEADERS = ltree.h + REGRESS = ltree ifdef USE_PGXS diff --git a/contrib/ltree/_ltree_gist.c b/contrib/ltree/_ltree_gist.c index a387f5b899..28bf7ad963 100644 --- a/contrib/ltree/_ltree_gist.c +++ b/contrib/ltree/_ltree_gist.c @@ -100,7 +100,7 @@ _ltree_compress(PG_FUNCTION_ARGS) retval = (GISTENTRY *) palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(key), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } else if (!LTG_ISALLTRUE(entry->key)) { @@ -123,7 +123,7 @@ _ltree_compress(PG_FUNCTION_ARGS) retval = (GISTENTRY *) palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(key), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } PG_RETURN_POINTER(retval); } @@ -545,7 +545,7 @@ Datum _ltree_consistent(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - char *query = (char *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1))); + void *query = (void *) PG_DETOAST_DATUM(PG_GETARG_DATUM(1)); StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); /* Oid subtype = PG_GETARG_OID(3); */ diff --git a/contrib/ltree/_ltree_op.c b/contrib/ltree/_ltree_op.c index fdf6ebb43b..9bb6bcaeff 100644 --- a/contrib/ltree/_ltree_op.c +++ b/contrib/ltree/_ltree_op.c @@ -71,7 +71,7 @@ Datum _ltree_isparent(PG_FUNCTION_ARGS) { ArrayType *la = PG_GETARG_ARRAYTYPE_P(0); - ltree *query = PG_GETARG_LTREE(1); + ltree *query = PG_GETARG_LTREE_P(1); bool res = array_iterator(la, ltree_isparent, (void *) query, NULL); PG_FREE_IF_COPY(la, 0); @@ -92,7 +92,7 @@ Datum _ltree_risparent(PG_FUNCTION_ARGS) { ArrayType *la = PG_GETARG_ARRAYTYPE_P(0); - ltree *query = PG_GETARG_LTREE(1); + ltree *query = PG_GETARG_LTREE_P(1); bool res = array_iterator(la, ltree_risparent, (void *) query, NULL); PG_FREE_IF_COPY(la, 0); @@ -113,7 +113,7 @@ Datum _ltq_regex(PG_FUNCTION_ARGS) { ArrayType *la = PG_GETARG_ARRAYTYPE_P(0); - lquery *query = PG_GETARG_LQUERY(1); + lquery *query = PG_GETARG_LQUERY_P(1); bool res = array_iterator(la, ltq_regex, (void *) query, NULL); PG_FREE_IF_COPY(la, 0); @@ -178,7 +178,7 @@ Datum _ltxtq_exec(PG_FUNCTION_ARGS) { ArrayType *la = PG_GETARG_ARRAYTYPE_P(0); - ltxtquery *query = PG_GETARG_LTXTQUERY(1); + ltxtquery *query = PG_GETARG_LTXTQUERY_P(1); bool res = array_iterator(la, ltxtq_exec, (void *) query, NULL); PG_FREE_IF_COPY(la, 0); @@ -200,7 +200,7 @@ Datum _ltree_extract_isparent(PG_FUNCTION_ARGS) { ArrayType *la = PG_GETARG_ARRAYTYPE_P(0); - ltree *query = PG_GETARG_LTREE(1); + ltree *query = PG_GETARG_LTREE_P(1); ltree *found, *item; @@ -223,7 +223,7 @@ Datum _ltree_extract_risparent(PG_FUNCTION_ARGS) { ArrayType *la = PG_GETARG_ARRAYTYPE_P(0); - ltree *query = PG_GETARG_LTREE(1); + ltree *query = PG_GETARG_LTREE_P(1); ltree *found, *item; @@ -246,7 +246,7 @@ Datum _ltq_extract_regex(PG_FUNCTION_ARGS) { ArrayType *la = PG_GETARG_ARRAYTYPE_P(0); - lquery *query = PG_GETARG_LQUERY(1); + lquery *query = PG_GETARG_LQUERY_P(1); ltree *found, *item; @@ -269,7 +269,7 @@ Datum _ltxtq_extract_exec(PG_FUNCTION_ARGS) { ArrayType *la = PG_GETARG_ARRAYTYPE_P(0); - ltxtquery *query = PG_GETARG_LTXTQUERY(1); + ltxtquery *query = PG_GETARG_LTXTQUERY_P(1); ltree *found, *item; diff --git a/contrib/ltree/expected/ltree.out b/contrib/ltree/expected/ltree.out index 3d5737d41b..8226930905 100644 --- a/contrib/ltree/expected/ltree.out +++ b/contrib/ltree/expected/ltree.out @@ -259,6 +259,24 @@ SELECT lca('{1.2.3,1.2.3.4.5.6}'); 1.2 (1 row) +SELECT lca('{1.2.3}'); + lca +----- + 1.2 +(1 row) + +SELECT lca('{1}'), lca('{1}') IS NULL; + lca | ?column? +-----+---------- + | f +(1 row) + +SELECT lca('{}') IS NULL; + ?column? +---------- + t +(1 row) + SELECT lca('1.la.2.3','1.2.3.4.5.6'); lca ----- diff --git a/contrib/ltree/lquery_op.c b/contrib/ltree/lquery_op.c index 229ddd0ae3..b6d2deb1af 100644 --- a/contrib/ltree/lquery_op.c +++ b/contrib/ltree/lquery_op.c @@ -302,8 +302,8 @@ checkCond(lquery_level *curq, int query_numlevel, ltree_level *curt, int tree_nu Datum ltq_regex(PG_FUNCTION_ARGS) { - ltree *tree = PG_GETARG_LTREE(0); - lquery *query = PG_GETARG_LQUERY(1); + ltree *tree = PG_GETARG_LTREE_P(0); + lquery *query = PG_GETARG_LQUERY_P(1); bool res = false; if (query->flag & LQUERY_HASNOT) @@ -338,7 +338,7 @@ ltq_rregex(PG_FUNCTION_ARGS) Datum lt_q_regex(PG_FUNCTION_ARGS) { - ltree *tree = PG_GETARG_LTREE(0); + ltree *tree = PG_GETARG_LTREE_P(0); ArrayType *_query = PG_GETARG_ARRAYTYPE_P(1); lquery *query = (lquery *) ARR_DATA_PTR(_query); bool res = false; diff --git a/contrib/ltree/ltree.h b/contrib/ltree/ltree.h index fd86323ffe..e4b8c84fa6 100644 --- a/contrib/ltree/ltree.h +++ b/contrib/ltree/ltree.h @@ -165,12 +165,21 @@ bool compare_subnode(ltree_level *t, char *q, int len, ltree *lca_inner(ltree **a, int len); int ltree_strncasecmp(const char *a, const char *b, size_t s); -#define PG_GETARG_LTREE(x) ((ltree*)DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(x)))) -#define PG_GETARG_LTREE_COPY(x) ((ltree*)DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(x)))) -#define PG_GETARG_LQUERY(x) ((lquery*)DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(x)))) -#define PG_GETARG_LQUERY_COPY(x) ((lquery*)DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(x)))) -#define PG_GETARG_LTXTQUERY(x) ((ltxtquery*)DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(x)))) -#define PG_GETARG_LTXTQUERY_COPY(x) ((ltxtquery*)DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(x)))) +/* fmgr macros for ltree objects */ +#define DatumGetLtreeP(X) ((ltree *) PG_DETOAST_DATUM(X)) +#define DatumGetLtreePCopy(X) ((ltree *) PG_DETOAST_DATUM_COPY(X)) +#define PG_GETARG_LTREE_P(n) DatumGetLtreeP(PG_GETARG_DATUM(n)) +#define PG_GETARG_LTREE_P_COPY(n) DatumGetLtreePCopy(PG_GETARG_DATUM(n)) + +#define DatumGetLqueryP(X) ((lquery *) PG_DETOAST_DATUM(X)) +#define DatumGetLqueryPCopy(X) ((lquery *) PG_DETOAST_DATUM_COPY(X)) +#define PG_GETARG_LQUERY_P(n) DatumGetLqueryP(PG_GETARG_DATUM(n)) +#define PG_GETARG_LQUERY_P_COPY(n) DatumGetLqueryPCopy(PG_GETARG_DATUM(n)) + +#define DatumGetLtxtqueryP(X) ((ltxtquery *) PG_DETOAST_DATUM(X)) +#define DatumGetLtxtqueryPCopy(X) ((ltxtquery *) PG_DETOAST_DATUM_COPY(X)) +#define PG_GETARG_LTXTQUERY_P(n) DatumGetLtxtqueryP(PG_GETARG_DATUM(n)) +#define PG_GETARG_LTXTQUERY_P_COPY(n) DatumGetLtxtqueryPCopy(PG_GETARG_DATUM(n)) /* GiST support for ltree */ diff --git a/contrib/ltree/ltree_gist.c b/contrib/ltree/ltree_gist.c index 70e78a672a..12aa8fff20 100644 --- a/contrib/ltree/ltree_gist.c +++ b/contrib/ltree/ltree_gist.c @@ -53,7 +53,7 @@ ltree_compress(PG_FUNCTION_ARGS) if (entry->leafkey) { /* ltree */ ltree_gist *key; - ltree *val = (ltree *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); + ltree *val = DatumGetLtreeP(entry->key); int32 len = LTG_HDRSIZE + VARSIZE(val); key = (ltree_gist *) palloc0(len); @@ -64,7 +64,7 @@ ltree_compress(PG_FUNCTION_ARGS) retval = (GISTENTRY *) palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(key), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } PG_RETURN_POINTER(retval); } @@ -73,7 +73,7 @@ Datum ltree_decompress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - ltree_gist *key = (ltree_gist *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); + ltree_gist *key = (ltree_gist *) PG_DETOAST_DATUM(entry->key); if (PointerGetDatum(key) != entry->key) { @@ -81,7 +81,7 @@ ltree_decompress(PG_FUNCTION_ARGS) gistentryinit(*retval, PointerGetDatum(key), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); PG_RETURN_POINTER(retval); } PG_RETURN_POINTER(entry); @@ -621,18 +621,18 @@ ltree_consistent(PG_FUNCTION_ARGS) switch (strategy) { case BTLessStrategyNumber: - query = PG_GETARG_LTREE(1); + query = PG_GETARG_LTREE_P(1); res = (GIST_LEAF(entry)) ? (ltree_compare((ltree *) query, LTG_NODE(key)) > 0) : (ltree_compare((ltree *) query, LTG_GETLNODE(key)) >= 0); break; case BTLessEqualStrategyNumber: - query = PG_GETARG_LTREE(1); + query = PG_GETARG_LTREE_P(1); res = (ltree_compare((ltree *) query, LTG_GETLNODE(key)) >= 0); break; case BTEqualStrategyNumber: - query = PG_GETARG_LTREE(1); + query = PG_GETARG_LTREE_P(1); if (GIST_LEAF(entry)) res = (ltree_compare((ltree *) query, LTG_NODE(key)) == 0); else @@ -643,25 +643,25 @@ ltree_consistent(PG_FUNCTION_ARGS) ); break; case BTGreaterEqualStrategyNumber: - query = PG_GETARG_LTREE(1); + query = PG_GETARG_LTREE_P(1); res = (ltree_compare((ltree *) query, LTG_GETRNODE(key)) <= 0); break; case BTGreaterStrategyNumber: - query = PG_GETARG_LTREE(1); + query = PG_GETARG_LTREE_P(1); res = (GIST_LEAF(entry)) ? (ltree_compare((ltree *) query, LTG_GETRNODE(key)) < 0) : (ltree_compare((ltree *) query, LTG_GETRNODE(key)) <= 0); break; case 10: - query = PG_GETARG_LTREE_COPY(1); + query = PG_GETARG_LTREE_P_COPY(1); res = (GIST_LEAF(entry)) ? inner_isparent((ltree *) query, LTG_NODE(key)) : gist_isparent(key, (ltree *) query); break; case 11: - query = PG_GETARG_LTREE(1); + query = PG_GETARG_LTREE_P(1); res = (GIST_LEAF(entry)) ? inner_isparent(LTG_NODE(key), (ltree *) query) : @@ -669,7 +669,7 @@ ltree_consistent(PG_FUNCTION_ARGS) break; case 12: case 13: - query = PG_GETARG_LQUERY(1); + query = PG_GETARG_LQUERY_P(1); if (GIST_LEAF(entry)) res = DatumGetBool(DirectFunctionCall2(ltq_regex, PointerGetDatum(LTG_NODE(key)), @@ -680,18 +680,18 @@ ltree_consistent(PG_FUNCTION_ARGS) break; case 14: case 15: - query = PG_GETARG_LQUERY(1); + query = PG_GETARG_LTXTQUERY_P(1); if (GIST_LEAF(entry)) res = DatumGetBool(DirectFunctionCall2(ltxtq_exec, PointerGetDatum(LTG_NODE(key)), - PointerGetDatum((lquery *) query) + PointerGetDatum((ltxtquery *) query) )); else res = gist_qtxt(key, (ltxtquery *) query); break; case 16: case 17: - query = DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1))); + query = PG_GETARG_ARRAYTYPE_P(1); if (GIST_LEAF(entry)) res = DatumGetBool(DirectFunctionCall2(lt_q_regex, PointerGetDatum(LTG_NODE(key)), diff --git a/contrib/ltree/ltree_io.c b/contrib/ltree/ltree_io.c index 34ca597a48..f54f037443 100644 --- a/contrib/ltree/ltree_io.c +++ b/contrib/ltree/ltree_io.c @@ -149,7 +149,7 @@ ltree_in(PG_FUNCTION_ARGS) Datum ltree_out(PG_FUNCTION_ARGS) { - ltree *in = PG_GETARG_LTREE(0); + ltree *in = PG_GETARG_LTREE_P(0); char *buf, *ptr; int i; @@ -521,7 +521,7 @@ lquery_in(PG_FUNCTION_ARGS) Datum lquery_out(PG_FUNCTION_ARGS) { - lquery *in = PG_GETARG_LQUERY(0); + lquery *in = PG_GETARG_LQUERY_P(0); char *buf, *ptr; int i, diff --git a/contrib/ltree/ltree_op.c b/contrib/ltree/ltree_op.c index aa1e9918be..df61c63180 100644 --- a/contrib/ltree/ltree_op.c +++ b/contrib/ltree/ltree_op.c @@ -45,17 +45,24 @@ ltree_compare(const ltree *a, const ltree *b) ltree_level *bl = LTREE_FIRST(b); int an = a->numlevel; int bn = b->numlevel; - int res = 0; while (an > 0 && bn > 0) { + int res; + if ((res = memcmp(al->name, bl->name, Min(al->len, bl->len))) == 0) { if (al->len != bl->len) return (al->len - bl->len) * 10 * (an + 1); } else + { + if (res < 0) + res = -1; + else + res = 1; return res * 10 * (an + 1); + } an--; bn--; @@ -67,65 +74,65 @@ ltree_compare(const ltree *a, const ltree *b) } #define RUNCMP \ -ltree *a = PG_GETARG_LTREE(0); \ -ltree *b = PG_GETARG_LTREE(1); \ -int res = ltree_compare(a,b); \ -PG_FREE_IF_COPY(a,0); \ -PG_FREE_IF_COPY(b,1); \ +ltree *a = PG_GETARG_LTREE_P(0); \ +ltree *b = PG_GETARG_LTREE_P(1); \ +int res = ltree_compare(a,b); \ +PG_FREE_IF_COPY(a,0); \ +PG_FREE_IF_COPY(b,1) Datum ltree_cmp(PG_FUNCTION_ARGS) { - RUNCMP - PG_RETURN_INT32(res); + RUNCMP; + PG_RETURN_INT32(res); } Datum ltree_lt(PG_FUNCTION_ARGS) { - RUNCMP - PG_RETURN_BOOL((res < 0) ? true : false); + RUNCMP; + PG_RETURN_BOOL((res < 0) ? true : false); } Datum ltree_le(PG_FUNCTION_ARGS) { - RUNCMP - PG_RETURN_BOOL((res <= 0) ? true : false); + RUNCMP; + PG_RETURN_BOOL((res <= 0) ? true : false); } Datum ltree_eq(PG_FUNCTION_ARGS) { - RUNCMP - PG_RETURN_BOOL((res == 0) ? true : false); + RUNCMP; + PG_RETURN_BOOL((res == 0) ? true : false); } Datum ltree_ge(PG_FUNCTION_ARGS) { - RUNCMP - PG_RETURN_BOOL((res >= 0) ? true : false); + RUNCMP; + PG_RETURN_BOOL((res >= 0) ? true : false); } Datum ltree_gt(PG_FUNCTION_ARGS) { - RUNCMP - PG_RETURN_BOOL((res > 0) ? true : false); + RUNCMP; + PG_RETURN_BOOL((res > 0) ? true : false); } Datum ltree_ne(PG_FUNCTION_ARGS) { - RUNCMP - PG_RETURN_BOOL((res != 0) ? true : false); + RUNCMP; + PG_RETURN_BOOL((res != 0) ? true : false); } Datum nlevel(PG_FUNCTION_ARGS) { - ltree *a = PG_GETARG_LTREE(0); + ltree *a = PG_GETARG_LTREE_P(0); int res = a->numlevel; PG_FREE_IF_COPY(a, 0); @@ -146,7 +153,7 @@ inner_isparent(const ltree *c, const ltree *p) { if (cl->len != pl->len) return false; - if (memcmp(cl->name, pl->name, cl->len)) + if (memcmp(cl->name, pl->name, cl->len) != 0) return false; pn--; @@ -159,8 +166,8 @@ inner_isparent(const ltree *c, const ltree *p) Datum ltree_isparent(PG_FUNCTION_ARGS) { - ltree *c = PG_GETARG_LTREE(1); - ltree *p = PG_GETARG_LTREE(0); + ltree *c = PG_GETARG_LTREE_P(1); + ltree *p = PG_GETARG_LTREE_P(0); bool res = inner_isparent(c, p); PG_FREE_IF_COPY(c, 1); @@ -171,8 +178,8 @@ ltree_isparent(PG_FUNCTION_ARGS) Datum ltree_risparent(PG_FUNCTION_ARGS) { - ltree *c = PG_GETARG_LTREE(0); - ltree *p = PG_GETARG_LTREE(1); + ltree *c = PG_GETARG_LTREE_P(0); + ltree *p = PG_GETARG_LTREE_P(1); bool res = inner_isparent(c, p); PG_FREE_IF_COPY(c, 0); @@ -223,7 +230,7 @@ inner_subltree(ltree *t, int32 startpos, int32 endpos) Datum subltree(PG_FUNCTION_ARGS) { - ltree *t = PG_GETARG_LTREE(0); + ltree *t = PG_GETARG_LTREE_P(0); ltree *res = inner_subltree(t, PG_GETARG_INT32(1), PG_GETARG_INT32(2)); PG_FREE_IF_COPY(t, 0); @@ -233,7 +240,7 @@ subltree(PG_FUNCTION_ARGS) Datum subpath(PG_FUNCTION_ARGS) { - ltree *t = PG_GETARG_LTREE(0); + ltree *t = PG_GETARG_LTREE_P(0); int32 start = PG_GETARG_INT32(1); int32 len = (fcinfo->nargs == 3) ? PG_GETARG_INT32(2) : 0; int32 end; @@ -282,8 +289,8 @@ ltree_concat(ltree *a, ltree *b) Datum ltree_addltree(PG_FUNCTION_ARGS) { - ltree *a = PG_GETARG_LTREE(0); - ltree *b = PG_GETARG_LTREE(1); + ltree *a = PG_GETARG_LTREE_P(0); + ltree *b = PG_GETARG_LTREE_P(1); ltree *r; r = ltree_concat(a, b); @@ -295,7 +302,7 @@ ltree_addltree(PG_FUNCTION_ARGS) Datum ltree_addtext(PG_FUNCTION_ARGS) { - ltree *a = PG_GETARG_LTREE(0); + ltree *a = PG_GETARG_LTREE_P(0); text *b = PG_GETARG_TEXT_PP(1); char *s; ltree *r, @@ -320,8 +327,8 @@ ltree_addtext(PG_FUNCTION_ARGS) Datum ltree_index(PG_FUNCTION_ARGS) { - ltree *a = PG_GETARG_LTREE(0); - ltree *b = PG_GETARG_LTREE(1); + ltree *a = PG_GETARG_LTREE_P(0); + ltree *b = PG_GETARG_LTREE_P(1); int start = (fcinfo->nargs == 3) ? PG_GETARG_INT32(2) : 0; int i, j; @@ -380,7 +387,7 @@ ltree_index(PG_FUNCTION_ARGS) Datum ltree_textadd(PG_FUNCTION_ARGS) { - ltree *a = PG_GETARG_LTREE(1); + ltree *a = PG_GETARG_LTREE_P(1); text *b = PG_GETARG_TEXT_PP(0); char *s; ltree *r, @@ -402,22 +409,34 @@ ltree_textadd(PG_FUNCTION_ARGS) PG_RETURN_POINTER(r); } +/* + * Common code for variants of lca(), find longest common ancestor of inputs + * + * Returns NULL if there is no common ancestor, ie, the longest common + * prefix is empty. + */ ltree * lca_inner(ltree **a, int len) { int tmp, - num = ((*a)->numlevel) ? (*a)->numlevel - 1 : 0; - ltree **ptr = a + 1; - int i, - reslen = LTREE_HDRSIZE; + num, + i, + reslen; + ltree **ptr; ltree_level *l1, *l2; ltree *res; - + if (len <= 0) + return NULL; /* no inputs? */ if ((*a)->numlevel == 0) - return NULL; + return NULL; /* any empty input means NULL result */ + + /* num is the length of the longest common ancestor so far */ + num = (*a)->numlevel - 1; + /* Compare each additional input to *a */ + ptr = a + 1; while (ptr - a < len) { if ((*ptr)->numlevel == 0) @@ -428,11 +447,12 @@ lca_inner(ltree **a, int len) { l1 = LTREE_FIRST(*a); l2 = LTREE_FIRST(*ptr); - tmp = num; + tmp = Min(num, (*ptr)->numlevel - 1); num = 0; - for (i = 0; i < Min(tmp, (*ptr)->numlevel - 1); i++) + for (i = 0; i < tmp; i++) { - if (l1->len == l2->len && memcmp(l1->name, l2->name, l1->len) == 0) + if (l1->len == l2->len && + memcmp(l1->name, l2->name, l1->len) == 0) num = i + 1; else break; @@ -443,6 +463,8 @@ lca_inner(ltree **a, int len) ptr++; } + /* Now compute size of result ... */ + reslen = LTREE_HDRSIZE; l1 = LTREE_FIRST(*a); for (i = 0; i < num; i++) { @@ -450,6 +472,7 @@ lca_inner(ltree **a, int len) l1 = LEVEL_NEXT(l1); } + /* ... and construct it by copying from *a */ res = (ltree *) palloc0(reslen); SET_VARSIZE(res, reslen); res->numlevel = num; @@ -476,7 +499,7 @@ lca(PG_FUNCTION_ARGS) a = (ltree **) palloc(sizeof(ltree *) * fcinfo->nargs); for (i = 0; i < fcinfo->nargs; i++) - a[i] = PG_GETARG_LTREE(i); + a[i] = PG_GETARG_LTREE_P(i); res = lca_inner(a, (int) fcinfo->nargs); for (i = 0; i < fcinfo->nargs; i++) PG_FREE_IF_COPY(a[i], i); @@ -508,7 +531,7 @@ text2ltree(PG_FUNCTION_ARGS) Datum ltree2text(PG_FUNCTION_ARGS) { - ltree *in = PG_GETARG_LTREE(0); + ltree *in = PG_GETARG_LTREE_P(0); char *ptr; int i; ltree_level *curlevel; diff --git a/contrib/ltree/ltxtquery_io.c b/contrib/ltree/ltxtquery_io.c index 9ca1994249..56bf39d145 100644 --- a/contrib/ltree/ltxtquery_io.c +++ b/contrib/ltree/ltxtquery_io.c @@ -515,7 +515,7 @@ infix(INFIX *in, bool first) Datum ltxtq_out(PG_FUNCTION_ARGS) { - ltxtquery *query = PG_GETARG_LTXTQUERY(0); + ltxtquery *query = PG_GETARG_LTXTQUERY_P(0); INFIX nrm; if (query->size == 0) diff --git a/contrib/ltree/ltxtquery_op.c b/contrib/ltree/ltxtquery_op.c index 1428c8b478..dc0ee82bb6 100644 --- a/contrib/ltree/ltxtquery_op.c +++ b/contrib/ltree/ltxtquery_op.c @@ -26,7 +26,7 @@ ltree_execute(ITEM *curitem, void *checkval, bool calcnot, bool (*chkcond) (void return (*chkcond) (checkval, curitem); else if (curitem->val == (int32) '!') { - return (calcnot) ? + return calcnot ? ((ltree_execute(curitem + 1, checkval, calcnot, chkcond)) ? false : true) : true; } @@ -86,8 +86,8 @@ checkcondition_str(void *checkval, ITEM *val) Datum ltxtq_exec(PG_FUNCTION_ARGS) { - ltree *val = PG_GETARG_LTREE(0); - ltxtquery *query = PG_GETARG_LTXTQUERY(1); + ltree *val = PG_GETARG_LTREE_P(0); + ltxtquery *query = PG_GETARG_LTXTQUERY_P(1); CHKVAL chkval; bool result; diff --git a/contrib/ltree/sql/ltree.sql b/contrib/ltree/sql/ltree.sql index e9f74909a6..846b04e48e 100644 --- a/contrib/ltree/sql/ltree.sql +++ b/contrib/ltree/sql/ltree.sql @@ -54,6 +54,9 @@ SELECT lca('{la.2.3,1.2.3.4.5.6,""}') IS NULL; SELECT lca('{la.2.3,1.2.3.4.5.6}') IS NULL; SELECT lca('{1.la.2.3,1.2.3.4.5.6}'); SELECT lca('{1.2.3,1.2.3.4.5.6}'); +SELECT lca('{1.2.3}'); +SELECT lca('{1}'), lca('{1}') IS NULL; +SELECT lca('{}') IS NULL; SELECT lca('1.la.2.3','1.2.3.4.5.6'); SELECT lca('1.2.3','1.2.3.4.5.6'); SELECT lca('1.2.2.3','1.2.3.4.5.6'); diff --git a/contrib/ltree_plpython/Makefile b/contrib/ltree_plpython/Makefile index bc7502b8c3..ce2c0cd2e2 100644 --- a/contrib/ltree_plpython/Makefile +++ b/contrib/ltree_plpython/Makefile @@ -4,19 +4,21 @@ MODULE_big = ltree_plpython$(python_majorversion) OBJS = ltree_plpython.o $(WIN32RES) PGFILEDESC = "ltree_plpython - ltree transform for plpython" -PG_CPPFLAGS = -I$(top_srcdir)/src/pl/plpython $(python_includespec) -I$(top_srcdir)/contrib/ltree -DPLPYTHON_LIBNAME='"plpython$(python_majorversion)"' - EXTENSION = ltree_plpythonu ltree_plpython2u ltree_plpython3u DATA = ltree_plpythonu--1.0.sql ltree_plpython2u--1.0.sql ltree_plpython3u--1.0.sql REGRESS = ltree_plpython REGRESS_PLPYTHON3_MANGLE := $(REGRESS) +PG_CPPFLAGS = $(python_includespec) -DPLPYTHON_LIBNAME='"plpython$(python_majorversion)"' + ifdef USE_PGXS +PG_CPPFLAGS += -I$(includedir_server)/extension PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) else +PG_CPPFLAGS += -I$(top_srcdir)/src/pl/plpython -I$(top_srcdir)/contrib subdir = contrib/ltree_plpython top_builddir = ../.. include $(top_builddir)/src/Makefile.global @@ -26,7 +28,7 @@ endif # We must link libpython explicitly ifeq ($(PORTNAME), win32) # ... see silliness in plpython Makefile ... -SHLIB_LINK += $(sort $(wildcard ../../src/pl/plpython/libpython*.a)) +SHLIB_LINK_INTERNAL += $(sort $(wildcard ../../src/pl/plpython/libpython*.a)) else rpathdir = $(python_libdir) SHLIB_LINK += $(python_libspec) $(python_additional_libs) diff --git a/contrib/ltree_plpython/ltree_plpython.c b/contrib/ltree_plpython/ltree_plpython.c index bdd462a91b..b254aa558d 100644 --- a/contrib/ltree_plpython/ltree_plpython.c +++ b/contrib/ltree_plpython/ltree_plpython.c @@ -2,7 +2,7 @@ #include "fmgr.h" #include "plpython.h" -#include "ltree.h" +#include "ltree/ltree.h" PG_MODULE_MAGIC; @@ -40,12 +40,16 @@ PG_FUNCTION_INFO_V1(ltree_to_plpython); Datum ltree_to_plpython(PG_FUNCTION_ARGS) { - ltree *in = PG_GETARG_LTREE(0); + ltree *in = PG_GETARG_LTREE_P(0); int i; PyObject *list; ltree_level *curlevel; list = PyList_New(in->numlevel); + if (!list) + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"))); curlevel = LTREE_FIRST(in); for (i = 0; i < in->numlevel; i++) diff --git a/contrib/oid2name/.gitignore b/contrib/oid2name/.gitignore index fdefde108d..0410fb7afa 100644 --- a/contrib/oid2name/.gitignore +++ b/contrib/oid2name/.gitignore @@ -1 +1,3 @@ /oid2name + +/tmp_check/ diff --git a/contrib/oid2name/Makefile b/contrib/oid2name/Makefile index 3414b4a5cc..908e078714 100644 --- a/contrib/oid2name/Makefile +++ b/contrib/oid2name/Makefile @@ -7,7 +7,9 @@ PROGRAM = oid2name OBJS = oid2name.o $(WIN32RES) PG_CPPFLAGS = -I$(libpq_srcdir) -PG_LIBS = $(libpq_pgport) +PG_LIBS_INTERNAL = $(libpq_pgport) + +EXTRA_CLEAN = tmp_check ifdef USE_PGXS PG_CONFIG = pg_config @@ -19,3 +21,9 @@ top_builddir = ../.. include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif + +check: + $(prove_check) + +installcheck: + $(prove_installcheck) diff --git a/contrib/oid2name/oid2name.c b/contrib/oid2name/oid2name.c index 8af99decad..aa122ca0e9 100644 --- a/contrib/oid2name/oid2name.c +++ b/contrib/oid2name/oid2name.c @@ -9,10 +9,13 @@ */ #include "postgres_fe.h" -#include "catalog/pg_class.h" +#include "catalog/pg_class_d.h" +#include "fe_utils/connect.h" #include "libpq-fe.h" #include "pg_getopt.h" +#include "getopt_long.h" + /* an extensible array to keep track of elements to show */ typedef struct @@ -59,8 +62,28 @@ void sql_exec_dumpalltbspc(PGconn *, struct options *); void get_opts(int argc, char **argv, struct options *my_opts) { + static struct option long_options[] = { + {"dbname", required_argument, NULL, 'd'}, + {"host", required_argument, NULL, 'h'}, + {"host", required_argument, NULL, 'H'}, /* deprecated */ + {"filenode", required_argument, NULL, 'f'}, + {"indexes", no_argument, NULL, 'i'}, + {"oid", required_argument, NULL, 'o'}, + {"port", required_argument, NULL, 'p'}, + {"quiet", no_argument, NULL, 'q'}, + {"tablespaces", no_argument, NULL, 's'}, + {"system-objects", no_argument, NULL, 'S'}, + {"table", required_argument, NULL, 't'}, + {"username", required_argument, NULL, 'U'}, + {"version", no_argument, NULL, 'V'}, + {"extended", no_argument, NULL, 'x'}, + {"help", no_argument, NULL, '?'}, + {NULL, 0, NULL, 0} + }; + int c; const char *progname; + int optindex; progname = get_progname(argv[0]); @@ -92,7 +115,7 @@ get_opts(int argc, char **argv, struct options *my_opts) } /* get opts */ - while ((c = getopt(argc, argv, "H:p:U:d:t:o:f:qSxish")) != -1) + while ((c = getopt_long(argc, argv, "d:f:h:H:io:p:qsSt:U:x", long_options, &optindex)) != -1) { switch (c) { @@ -101,39 +124,40 @@ get_opts(int argc, char **argv, struct options *my_opts) my_opts->dbname = pg_strdup(optarg); break; - /* specify one tablename to show */ - case 't': - add_one_elt(optarg, my_opts->tables); - break; - - /* specify one Oid to show */ - case 'o': - add_one_elt(optarg, my_opts->oids); - break; - /* specify one filenode to show */ case 'f': add_one_elt(optarg, my_opts->filenodes); break; - /* don't show headers */ - case 'q': - my_opts->quiet = true; - break; - /* host to connect to */ - case 'H': + case 'H': /* deprecated */ + case 'h': my_opts->hostname = pg_strdup(optarg); break; + /* also display indexes */ + case 'i': + my_opts->indexes = true; + break; + + /* specify one Oid to show */ + case 'o': + add_one_elt(optarg, my_opts->oids); + break; + /* port to connect to on remote host */ case 'p': my_opts->port = pg_strdup(optarg); break; - /* username */ - case 'U': - my_opts->username = pg_strdup(optarg); + /* don't show headers */ + case 'q': + my_opts->quiet = true; + break; + + /* dump tablespaces only */ + case 's': + my_opts->tablespaces = true; break; /* display system tables */ @@ -141,9 +165,14 @@ get_opts(int argc, char **argv, struct options *my_opts) my_opts->systables = true; break; - /* also display indexes */ - case 'i': - my_opts->indexes = true; + /* specify one tablename to show */ + case 't': + add_one_elt(optarg, my_opts->tables); + break; + + /* username */ + case 'U': + my_opts->username = pg_strdup(optarg); break; /* display extra columns */ @@ -151,16 +180,6 @@ get_opts(int argc, char **argv, struct options *my_opts) my_opts->extended = true; break; - /* dump tablespaces only */ - case 's': - my_opts->tablespaces = true; - break; - - case 'h': - help(progname); - exit(0); - break; - default: fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); exit(1); @@ -175,20 +194,22 @@ help(const char *progname) "Usage:\n" " %s [OPTION]...\n" "\nOptions:\n" - " -d DBNAME database to connect to\n" - " -f FILENODE show info for table with given file node\n" - " -H HOSTNAME database server host or socket directory\n" - " -i show indexes and sequences too\n" - " -o OID show info for table with given OID\n" - " -p PORT database server port number\n" - " -q quiet (don't show headers)\n" - " -s show all tablespaces\n" - " -S show system objects too\n" - " -t TABLE show info for named table\n" - " -U NAME connect as specified database user\n" - " -V, --version output version information, then exit\n" - " -x extended (show additional columns)\n" - " -?, --help show this help, then exit\n" + " -f, --filenode=FILENODE show info for table with given file node\n" + " -i, --indexes show indexes and sequences too\n" + " -o, --oid=OID show info for table with given OID\n" + " -q, --quiet quiet (don't show headers)\n" + " -s, --tablespaces show all tablespaces\n" + " -S, --system-objects show system objects too\n" + " -t, --table=TABLE show info for named table\n" + " -V, --version output version information, then exit\n" + " -x, --extended extended (show additional columns)\n" + " -?, --help show this help, then exit\n" + "\nConnection options:\n" + " -d, --dbname=DBNAME database to connect to\n" + " -h, --host=HOSTNAME database server host or socket directory\n" + " -H same as -h, deprecated option\n" + " -p, --port=PORT database server port number\n" + " -U, --username=USERNAME connect as specified database user\n" "\nThe default action is to show all database OIDs.\n\n" "Report bugs to .\n", progname, progname); @@ -266,6 +287,7 @@ sql_conn(struct options *my_opts) bool have_password = false; char password[100]; bool new_pass; + PGresult *res; /* * Start the connection. Loop until we have a password if requested by @@ -323,6 +345,17 @@ sql_conn(struct options *my_opts) exit(1); } + res = PQexec(conn, ALWAYS_SECURE_SEARCH_PATH_SQL); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + fprintf(stderr, "oid2name: could not clear search_path: %s\n", + PQerrorMessage(conn)); + PQclear(res); + PQfinish(conn); + exit(-1); + } + PQclear(res); + /* return the conn if good */ return conn; } diff --git a/contrib/oid2name/t/001_basic.pl b/contrib/oid2name/t/001_basic.pl new file mode 100644 index 0000000000..fa2c5743f6 --- /dev/null +++ b/contrib/oid2name/t/001_basic.pl @@ -0,0 +1,12 @@ +use strict; +use warnings; + +use TestLib; +use Test::More tests => 8; + +######################################### +# Basic checks + +program_help_ok('oid2name'); +program_version_ok('oid2name'); +program_options_handling_ok('oid2name'); diff --git a/contrib/pageinspect/Makefile b/contrib/pageinspect/Makefile index 0a3cbeeb10..e5a581f141 100644 --- a/contrib/pageinspect/Makefile +++ b/contrib/pageinspect/Makefile @@ -5,7 +5,8 @@ OBJS = rawpage.o heapfuncs.o btreefuncs.o fsmfuncs.o \ brinfuncs.o ginfuncs.o hashfuncs.o $(WIN32RES) EXTENSION = pageinspect -DATA = pageinspect--1.5.sql pageinspect--1.5--1.6.sql \ +DATA = pageinspect--1.6--1.7.sql \ + pageinspect--1.5.sql pageinspect--1.5--1.6.sql \ pageinspect--1.4--1.5.sql pageinspect--1.3--1.4.sql \ pageinspect--1.2--1.3.sql pageinspect--1.1--1.2.sql \ pageinspect--1.0--1.1.sql pageinspect--unpackaged--1.0.sql diff --git a/contrib/pageinspect/brinfuncs.c b/contrib/pageinspect/brinfuncs.c index 13da7616e7..f4f0dea860 100644 --- a/contrib/pageinspect/brinfuncs.c +++ b/contrib/pageinspect/brinfuncs.c @@ -2,7 +2,7 @@ * brinfuncs.c * Functions to investigate BRIN indexes * - * Copyright (c) 2014-2017, PostgreSQL Global Development Group + * Copyright (c) 2014-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pageinspect/brinfuncs.c diff --git a/contrib/pageinspect/btreefuncs.c b/contrib/pageinspect/btreefuncs.c index 4f834676ea..184ac62255 100644 --- a/contrib/pageinspect/btreefuncs.c +++ b/contrib/pageinspect/btreefuncs.c @@ -429,7 +429,7 @@ bt_page_items_bytea(PG_FUNCTION_ARGS) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("must be superuser to use pageinspect functions")))); + (errmsg("must be superuser to use raw page functions")))); if (SRF_IS_FIRSTCALL()) { @@ -511,7 +511,7 @@ bt_metap(PG_FUNCTION_ARGS) BTMetaPageData *metad; TupleDesc tupleDesc; int j; - char *values[6]; + char *values[8]; Buffer buffer; Page page; HeapTuple tuple; @@ -556,6 +556,21 @@ bt_metap(PG_FUNCTION_ARGS) values[j++] = psprintf("%d", metad->btm_fastroot); values[j++] = psprintf("%d", metad->btm_fastlevel); + /* + * Get values of extended metadata if available, use default values + * otherwise. + */ + if (metad->btm_version == BTREE_VERSION) + { + values[j++] = psprintf("%u", metad->btm_oldest_btpo_xact); + values[j++] = psprintf("%f", metad->btm_last_cleanup_num_heap_tuples); + } + else + { + values[j++] = "0"; + values[j++] = "-1"; + } + tuple = BuildTupleFromCStrings(TupleDescGetAttInMetadata(tupleDesc), values); diff --git a/contrib/pageinspect/expected/btree.out b/contrib/pageinspect/expected/btree.out index 67b103add3..2aaa4df53b 100644 --- a/contrib/pageinspect/expected/btree.out +++ b/contrib/pageinspect/expected/btree.out @@ -3,13 +3,15 @@ INSERT INTO test1 VALUES (72057594037927937, 'text'); CREATE INDEX test1_a_idx ON test1 USING btree (a); \x SELECT * FROM bt_metap('test1_a_idx'); --[ RECORD 1 ]----- -magic | 340322 -version | 2 -root | 1 -level | 0 -fastroot | 1 -fastlevel | 0 +-[ RECORD 1 ]-----------+------- +magic | 340322 +version | 3 +root | 1 +level | 0 +fastroot | 1 +fastlevel | 0 +oldest_xact | 0 +last_cleanup_num_tuples | -1 SELECT * FROM bt_page_stats('test1_a_idx', 0); ERROR: block 0 is a meta page diff --git a/contrib/pageinspect/expected/page.out b/contrib/pageinspect/expected/page.out index 8e15947a81..3fcd9fbe6d 100644 --- a/contrib/pageinspect/expected/page.out +++ b/contrib/pageinspect/expected/page.out @@ -83,12 +83,33 @@ SELECT * FROM fsm_page_contents(get_raw_page('test1', 'fsm', 0)); (1 row) DROP TABLE test1; --- check that using any of these functions with a partitioned table would fail +-- check that using any of these functions with a partitioned table or index +-- would fail create table test_partitioned (a int) partition by range (a); +create index test_partitioned_index on test_partitioned (a); select get_raw_page('test_partitioned', 0); -- error about partitioned table ERROR: cannot get raw page from partitioned table "test_partitioned" +select get_raw_page('test_partitioned_index', 0); -- error about partitioned index +ERROR: cannot get raw page from partitioned index "test_partitioned_index" -- a regular table which is a member of a partition set should work though create table test_part1 partition of test_partitioned for values from ( 1 ) to (100); select get_raw_page('test_part1', 0); -- get farther and error about empty table ERROR: block number 0 is out of range for relation "test_part1" drop table test_partitioned; +-- check null bitmap alignment for table whose number of attributes is multiple of 8 +create table test8 (f1 int, f2 int, f3 int, f4 int, f5 int, f6 int, f7 int, f8 int); +insert into test8(f1, f8) values (x'7f00007f'::int, 0); +select t_bits, t_data from heap_page_items(get_raw_page('test8', 0)); + t_bits | t_data +----------+-------------------- + 10000001 | \x7f00007f00000000 +(1 row) + +select tuple_data_split('test8'::regclass, t_data, t_infomask, t_infomask2, t_bits) + from heap_page_items(get_raw_page('test8', 0)); + tuple_data_split +------------------------------------------------------------- + {"\\x7f00007f",NULL,NULL,NULL,NULL,NULL,NULL,"\\x00000000"} +(1 row) + +drop table test8; diff --git a/contrib/pageinspect/fsmfuncs.c b/contrib/pageinspect/fsmfuncs.c index 615dab8b13..86e8075845 100644 --- a/contrib/pageinspect/fsmfuncs.c +++ b/contrib/pageinspect/fsmfuncs.c @@ -9,7 +9,7 @@ * there's hardly any use case for using these without superuser-rights * anyway. * - * Copyright (c) 2007-2017, PostgreSQL Global Development Group + * Copyright (c) 2007-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pageinspect/fsmfuncs.c diff --git a/contrib/pageinspect/ginfuncs.c b/contrib/pageinspect/ginfuncs.c index f774495b6f..d42609c577 100644 --- a/contrib/pageinspect/ginfuncs.c +++ b/contrib/pageinspect/ginfuncs.c @@ -2,7 +2,7 @@ * ginfuncs.c * Functions to investigate the content of GIN indexes * - * Copyright (c) 2014-2017, PostgreSQL Global Development Group + * Copyright (c) 2014-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pageinspect/ginfuncs.c diff --git a/contrib/pageinspect/hashfuncs.c b/contrib/pageinspect/hashfuncs.c index dbe3b6ab04..c49adf207c 100644 --- a/contrib/pageinspect/hashfuncs.c +++ b/contrib/pageinspect/hashfuncs.c @@ -2,7 +2,7 @@ * hashfuncs.c * Functions to investigate the content of HASH indexes * - * Copyright (c) 2017, PostgreSQL Global Development Group + * Copyright (c) 2017-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pageinspect/hashfuncs.c @@ -19,6 +19,7 @@ #include "funcapi.h" #include "miscadmin.h" #include "utils/builtins.h" +#include "utils/rel.h" PG_FUNCTION_INFO_V1(hash_page_type); PG_FUNCTION_INFO_V1(hash_page_stats); @@ -96,18 +97,22 @@ verify_hash_page(bytea *raw_page, int flags) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("page is not a hash meta page"))); + break; case LH_BUCKET_PAGE | LH_OVERFLOW_PAGE: ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("page is not a hash bucket or overflow page"))); + break; case LH_OVERFLOW_PAGE: ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("page is not a hash overflow page"))); + break; default: elog(ERROR, "hash page of type %08x not in mask %08x", pagetype, flags); + break; } } @@ -313,10 +318,10 @@ hash_page_items(PG_FUNCTION_ARGS) fctx = SRF_FIRSTCALL_INIT(); - page = verify_hash_page(raw_page, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE); - mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx); + page = verify_hash_page(raw_page, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE); + uargs = palloc(sizeof(struct user_args)); uargs->page = page; diff --git a/contrib/pageinspect/heapfuncs.c b/contrib/pageinspect/heapfuncs.c index 72d1776a4a..d96ba1e8b6 100644 --- a/contrib/pageinspect/heapfuncs.c +++ b/contrib/pageinspect/heapfuncs.c @@ -15,7 +15,7 @@ * there's hardly any use case for using these without superuser-rights * anyway. * - * Copyright (c) 2007-2017, PostgreSQL Global Development Group + * Copyright (c) 2007-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pageinspect/heapfuncs.c @@ -234,7 +234,7 @@ heap_page_items(PG_FUNCTION_ARGS) int bits_len; bits_len = - ((tuphdr->t_infomask2 & HEAP_NATTS_MASK) / 8 + 1) * 8; + BITMAPLEN(HeapTupleHeaderGetNatts(tuphdr)) * BITS_PER_BYTE; values[11] = CStringGetTextDatum( bits_to_text(tuphdr->t_bits, bits_len)); } @@ -298,9 +298,8 @@ tuple_data_split_internal(Oid relid, char *tupdata, TupleDesc tupdesc; /* Get tuple descriptor from relation OID */ - rel = relation_open(relid, NoLock); - tupdesc = CreateTupleDescCopyConstr(rel->rd_att); - relation_close(rel, NoLock); + rel = relation_open(relid, AccessShareLock); + tupdesc = RelationGetDescr(rel); raw_attrs = initArrayResult(BYTEAOID, CurrentMemoryContext, false); nattrs = tupdesc->natts; @@ -316,8 +315,7 @@ tuple_data_split_internal(Oid relid, char *tupdata, bool is_null; bytea *attr_data = NULL; - attr = tupdesc->attrs[i]; - is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits); + attr = TupleDescAttr(tupdesc, i); /* * Tuple header can specify less attributes than tuple descriptor as @@ -327,6 +325,8 @@ tuple_data_split_internal(Oid relid, char *tupdata, */ if (i >= (t_infomask2 & HEAP_NATTS_MASK)) is_null = true; + else + is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits); if (!is_null) { @@ -334,7 +334,7 @@ tuple_data_split_internal(Oid relid, char *tupdata, if (attr->attlen == -1) { - off = att_align_pointer(off, tupdesc->attrs[i]->attalign, -1, + off = att_align_pointer(off, attr->attalign, -1, tupdata + off); /* @@ -353,7 +353,7 @@ tuple_data_split_internal(Oid relid, char *tupdata, } else { - off = att_align_nominal(off, tupdesc->attrs[i]->attalign); + off = att_align_nominal(off, attr->attalign); len = attr->attlen; } @@ -371,7 +371,7 @@ tuple_data_split_internal(Oid relid, char *tupdata, memcpy(VARDATA(attr_data), tupdata + off, len); } - off = att_addlength_pointer(off, tupdesc->attrs[i]->attlen, + off = att_addlength_pointer(off, attr->attlen, tupdata + off); } @@ -386,6 +386,8 @@ tuple_data_split_internal(Oid relid, char *tupdata, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("end of tuple reached without looking at all its data"))); + relation_close(rel, AccessShareLock); + return makeArrayResult(raw_attrs, CurrentMemoryContext); } @@ -436,24 +438,19 @@ tuple_data_split(PG_FUNCTION_ARGS) int bits_str_len; int bits_len; - bits_len = (t_infomask2 & HEAP_NATTS_MASK) / 8 + 1; + bits_len = BITMAPLEN(t_infomask2 & HEAP_NATTS_MASK) * BITS_PER_BYTE; if (!t_bits_str) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("argument of t_bits is null, but it is expected to be null and %d character long", - bits_len * 8))); + bits_len))); bits_str_len = strlen(t_bits_str); - if ((bits_str_len % 8) != 0) - ereport(ERROR, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("length of t_bits is not a multiple of eight"))); - - if (bits_len * 8 != bits_str_len) + if (bits_len != bits_str_len) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("unexpected length of t_bits %u, expected %d", - bits_str_len, bits_len * 8))); + bits_str_len, bits_len))); /* do the conversion */ t_bits = text_to_bits(t_bits_str, bits_str_len); diff --git a/contrib/pageinspect/pageinspect--1.6--1.7.sql b/contrib/pageinspect/pageinspect--1.6--1.7.sql new file mode 100644 index 0000000000..2433a21af2 --- /dev/null +++ b/contrib/pageinspect/pageinspect--1.6--1.7.sql @@ -0,0 +1,26 @@ +/* contrib/pageinspect/pageinspect--1.6--1.7.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION pageinspect UPDATE TO '1.7'" to load this file. \quit + +-- +-- bt_metap() +-- +DROP FUNCTION bt_metap(IN relname text, + OUT magic int4, + OUT version int4, + OUT root int4, + OUT level int4, + OUT fastroot int4, + OUT fastlevel int4); +CREATE FUNCTION bt_metap(IN relname text, + OUT magic int4, + OUT version int4, + OUT root int4, + OUT level int4, + OUT fastroot int4, + OUT fastlevel int4, + OUT oldest_xact int4, + OUT last_cleanup_num_tuples real) +AS 'MODULE_PATHNAME', 'bt_metap' +LANGUAGE C STRICT PARALLEL SAFE; diff --git a/contrib/pageinspect/pageinspect.control b/contrib/pageinspect/pageinspect.control index 1a61c9f5ad..dcfc61f22d 100644 --- a/contrib/pageinspect/pageinspect.control +++ b/contrib/pageinspect/pageinspect.control @@ -1,5 +1,5 @@ # pageinspect extension comment = 'inspect the contents of database pages at a low level' -default_version = '1.6' +default_version = '1.7' module_pathname = '$libdir/pageinspect' relocatable = true diff --git a/contrib/pageinspect/pageinspect.h b/contrib/pageinspect/pageinspect.h index f49cf9e892..ab7d5d66cd 100644 --- a/contrib/pageinspect/pageinspect.h +++ b/contrib/pageinspect/pageinspect.h @@ -3,7 +3,7 @@ * pageinspect.h * Common functions for pageinspect. * - * Copyright (c) 2017, PostgreSQL Global Development Group + * Copyright (c) 2017-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pageinspect/pageinspect.h diff --git a/contrib/pageinspect/rawpage.c b/contrib/pageinspect/rawpage.c index e9d3131bda..39c50f4874 100644 --- a/contrib/pageinspect/rawpage.c +++ b/contrib/pageinspect/rawpage.c @@ -5,7 +5,7 @@ * * Access-method specific inspection functions are in separate files. * - * Copyright (c) 2007-2017, PostgreSQL Global Development Group + * Copyright (c) 2007-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pageinspect/rawpage.c @@ -18,7 +18,6 @@ #include "pageinspect.h" #include "access/htup_details.h" -#include "catalog/catalog.h" #include "catalog/namespace.h" #include "catalog/pg_type.h" #include "funcapi.h" @@ -103,7 +102,7 @@ get_raw_page_internal(text *relname, ForkNumber forknum, BlockNumber blkno) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("must be superuser to use raw functions")))); + (errmsg("must be superuser to use raw page functions")))); relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname)); rel = relation_openrv(relrv, AccessShareLock); @@ -129,6 +128,11 @@ get_raw_page_internal(text *relname, ForkNumber forknum, BlockNumber blkno) (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot get raw page from partitioned table \"%s\"", RelationGetRelationName(rel)))); + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot get raw page from partitioned index \"%s\"", + RelationGetRelationName(rel)))); /* * Reject attempts to read non-local temporary relations; we would be @@ -253,7 +257,7 @@ page_header(PG_FUNCTION_ARGS) lsn = PageGetLSN(page); /* pageinspect >= 1.2 uses pg_lsn instead of text for the LSN field. */ - if (tupdesc->attrs[0]->atttypid == TEXTOID) + if (TupleDescAttr(tupdesc, 0)->atttypid == TEXTOID) { char lsnchar[64]; diff --git a/contrib/pageinspect/sql/page.sql b/contrib/pageinspect/sql/page.sql index 493ca9b211..8ac9991837 100644 --- a/contrib/pageinspect/sql/page.sql +++ b/contrib/pageinspect/sql/page.sql @@ -33,11 +33,22 @@ SELECT * FROM fsm_page_contents(get_raw_page('test1', 'fsm', 0)); DROP TABLE test1; --- check that using any of these functions with a partitioned table would fail +-- check that using any of these functions with a partitioned table or index +-- would fail create table test_partitioned (a int) partition by range (a); +create index test_partitioned_index on test_partitioned (a); select get_raw_page('test_partitioned', 0); -- error about partitioned table +select get_raw_page('test_partitioned_index', 0); -- error about partitioned index -- a regular table which is a member of a partition set should work though create table test_part1 partition of test_partitioned for values from ( 1 ) to (100); select get_raw_page('test_part1', 0); -- get farther and error about empty table drop table test_partitioned; + +-- check null bitmap alignment for table whose number of attributes is multiple of 8 +create table test8 (f1 int, f2 int, f3 int, f4 int, f5 int, f6 int, f7 int, f8 int); +insert into test8(f1, f8) values (x'7f00007f'::int, 0); +select t_bits, t_data from heap_page_items(get_raw_page('test8', 0)); +select tuple_data_split('test8'::regclass, t_data, t_infomask, t_infomask2, t_bits) + from heap_page_items(get_raw_page('test8', 0)); +drop table test8; diff --git a/contrib/passwordcheck/.gitignore b/contrib/passwordcheck/.gitignore new file mode 100644 index 0000000000..5dcb3ff972 --- /dev/null +++ b/contrib/passwordcheck/.gitignore @@ -0,0 +1,4 @@ +# Generated subdirectories +/log/ +/results/ +/tmp_check/ diff --git a/contrib/passwordcheck/Makefile b/contrib/passwordcheck/Makefile index 4652aeb3d7..4da0b1417c 100644 --- a/contrib/passwordcheck/Makefile +++ b/contrib/passwordcheck/Makefile @@ -8,6 +8,8 @@ PGFILEDESC = "passwordcheck - strengthen user password checks" # PG_CPPFLAGS = -DUSE_CRACKLIB '-DCRACKLIB_DICTPATH="/usr/lib/cracklib_dict"' # SHLIB_LINK = -lcrack +REGRESS = passwordcheck + ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) diff --git a/contrib/passwordcheck/expected/passwordcheck.out b/contrib/passwordcheck/expected/passwordcheck.out new file mode 100644 index 0000000000..e04cda6bd9 --- /dev/null +++ b/contrib/passwordcheck/expected/passwordcheck.out @@ -0,0 +1,19 @@ +LOAD 'passwordcheck'; +CREATE USER regress_user1; +-- ok +ALTER USER regress_user1 PASSWORD 'a_nice_long_password'; +-- error: too short +ALTER USER regress_user1 PASSWORD 'tooshrt'; +ERROR: password is too short +-- error: contains user name +ALTER USER regress_user1 PASSWORD 'xyzregress_user1'; +ERROR: password must not contain user name +-- error: contains only letters +ALTER USER regress_user1 PASSWORD 'alessnicelongpassword'; +ERROR: password must contain both letters and nonletters +-- encrypted ok (password is "secret") +ALTER USER regress_user1 PASSWORD 'md51a44d829a20a23eac686d9f0d258af13'; +-- error: password is user name +ALTER USER regress_user1 PASSWORD 'md5e589150ae7d28f93333afae92b36ef48'; +ERROR: password must not equal user name +DROP USER regress_user1; diff --git a/contrib/passwordcheck/passwordcheck.c b/contrib/passwordcheck/passwordcheck.c index b80fd458ad..d3d9ff3676 100644 --- a/contrib/passwordcheck/passwordcheck.c +++ b/contrib/passwordcheck/passwordcheck.c @@ -3,7 +3,7 @@ * passwordcheck.c * * - * Copyright (c) 2009-2017, PostgreSQL Global Development Group + * Copyright (c) 2009-2018, PostgreSQL Global Development Group * * Author: Laurenz Albe * @@ -70,7 +70,7 @@ check_password(const char *username, if (plain_crypt_verify(username, shadow_pass, username, &logdetail) == STATUS_OK) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("password must not contain user name"))); + errmsg("password must not equal user name"))); } else { diff --git a/contrib/passwordcheck/sql/passwordcheck.sql b/contrib/passwordcheck/sql/passwordcheck.sql new file mode 100644 index 0000000000..d98796ac49 --- /dev/null +++ b/contrib/passwordcheck/sql/passwordcheck.sql @@ -0,0 +1,23 @@ +LOAD 'passwordcheck'; + +CREATE USER regress_user1; + +-- ok +ALTER USER regress_user1 PASSWORD 'a_nice_long_password'; + +-- error: too short +ALTER USER regress_user1 PASSWORD 'tooshrt'; + +-- error: contains user name +ALTER USER regress_user1 PASSWORD 'xyzregress_user1'; + +-- error: contains only letters +ALTER USER regress_user1 PASSWORD 'alessnicelongpassword'; + +-- encrypted ok (password is "secret") +ALTER USER regress_user1 PASSWORD 'md51a44d829a20a23eac686d9f0d258af13'; + +-- error: password is user name +ALTER USER regress_user1 PASSWORD 'md5e589150ae7d28f93333afae92b36ef48'; + +DROP USER regress_user1; diff --git a/contrib/pg_prewarm/Makefile b/contrib/pg_prewarm/Makefile index 7ad941e72b..88580d1118 100644 --- a/contrib/pg_prewarm/Makefile +++ b/contrib/pg_prewarm/Makefile @@ -1,10 +1,10 @@ # contrib/pg_prewarm/Makefile MODULE_big = pg_prewarm -OBJS = pg_prewarm.o $(WIN32RES) +OBJS = pg_prewarm.o autoprewarm.o $(WIN32RES) EXTENSION = pg_prewarm -DATA = pg_prewarm--1.1.sql pg_prewarm--1.0--1.1.sql +DATA = pg_prewarm--1.1--1.2.sql pg_prewarm--1.1.sql pg_prewarm--1.0--1.1.sql PGFILEDESC = "pg_prewarm - preload relation data into system buffer cache" ifdef USE_PGXS diff --git a/contrib/pg_prewarm/autoprewarm.c b/contrib/pg_prewarm/autoprewarm.c new file mode 100644 index 0000000000..03bf90ce2d --- /dev/null +++ b/contrib/pg_prewarm/autoprewarm.c @@ -0,0 +1,931 @@ +/*------------------------------------------------------------------------- + * + * autoprewarm.c + * Periodically dump information about the blocks present in + * shared_buffers, and reload them on server restart. + * + * Due to locking considerations, we can't actually begin prewarming + * until the server reaches a consistent state. We need the catalogs + * to be consistent so that we can figure out which relation to lock, + * and we need to lock the relations so that we don't try to prewarm + * pages from a relation that is in the process of being dropped. + * + * While prewarming, autoprewarm will use two workers. There's a + * master worker that reads and sorts the list of blocks to be + * prewarmed and then launches a per-database worker for each + * relevant database in turn. The former keeps running after the + * initial prewarm is complete to update the dump file periodically. + * + * Copyright (c) 2016-2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * contrib/pg_prewarm/autoprewarm.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include + +#include "access/heapam.h" +#include "access/xact.h" +#include "catalog/pg_class.h" +#include "catalog/pg_type.h" +#include "miscadmin.h" +#include "pgstat.h" +#include "postmaster/bgworker.h" +#include "storage/buf_internals.h" +#include "storage/dsm.h" +#include "storage/ipc.h" +#include "storage/latch.h" +#include "storage/lwlock.h" +#include "storage/proc.h" +#include "storage/procsignal.h" +#include "storage/shmem.h" +#include "storage/smgr.h" +#include "tcop/tcopprot.h" +#include "utils/acl.h" +#include "utils/guc.h" +#include "utils/memutils.h" +#include "utils/rel.h" +#include "utils/relfilenodemap.h" +#include "utils/resowner.h" + +#define AUTOPREWARM_FILE "autoprewarm.blocks" + +/* Metadata for each block we dump. */ +typedef struct BlockInfoRecord +{ + Oid database; + Oid tablespace; + Oid filenode; + ForkNumber forknum; + BlockNumber blocknum; +} BlockInfoRecord; + +/* Shared state information for autoprewarm bgworker. */ +typedef struct AutoPrewarmSharedState +{ + LWLock lock; /* mutual exclusion */ + pid_t bgworker_pid; /* for main bgworker */ + pid_t pid_using_dumpfile; /* for autoprewarm or block dump */ + + /* Following items are for communication with per-database worker */ + dsm_handle block_info_handle; + Oid database; + int prewarm_start_idx; + int prewarm_stop_idx; + int prewarmed_blocks; +} AutoPrewarmSharedState; + +void _PG_init(void); +void autoprewarm_main(Datum main_arg); +void autoprewarm_database_main(Datum main_arg); + +PG_FUNCTION_INFO_V1(autoprewarm_start_worker); +PG_FUNCTION_INFO_V1(autoprewarm_dump_now); + +static void apw_load_buffers(void); +static int apw_dump_now(bool is_bgworker, bool dump_unlogged); +static void apw_start_master_worker(void); +static void apw_start_database_worker(void); +static bool apw_init_shmem(void); +static void apw_detach_shmem(int code, Datum arg); +static int apw_compare_blockinfo(const void *p, const void *q); +static void apw_sigterm_handler(SIGNAL_ARGS); +static void apw_sighup_handler(SIGNAL_ARGS); + +/* Flags set by signal handlers */ +static volatile sig_atomic_t got_sigterm = false; +static volatile sig_atomic_t got_sighup = false; + +/* Pointer to shared-memory state. */ +static AutoPrewarmSharedState *apw_state = NULL; + +/* GUC variables. */ +static bool autoprewarm = true; /* start worker? */ +static int autoprewarm_interval; /* dump interval */ + +/* + * Module load callback. + */ +void +_PG_init(void) +{ + DefineCustomIntVariable("pg_prewarm.autoprewarm_interval", + "Sets the interval between dumps of shared buffers", + "If set to zero, time-based dumping is disabled.", + &autoprewarm_interval, + 300, + 0, INT_MAX / 1000, + PGC_SIGHUP, + GUC_UNIT_S, + NULL, + NULL, + NULL); + + if (!process_shared_preload_libraries_in_progress) + return; + + /* can't define PGC_POSTMASTER variable after startup */ + DefineCustomBoolVariable("pg_prewarm.autoprewarm", + "Starts the autoprewarm worker.", + NULL, + &autoprewarm, + true, + PGC_POSTMASTER, + 0, + NULL, + NULL, + NULL); + + EmitWarningsOnPlaceholders("pg_prewarm"); + + RequestAddinShmemSpace(MAXALIGN(sizeof(AutoPrewarmSharedState))); + + /* Register autoprewarm worker, if enabled. */ + if (autoprewarm) + apw_start_master_worker(); +} + +/* + * Main entry point for the master autoprewarm process. Per-database workers + * have a separate entry point. + */ +void +autoprewarm_main(Datum main_arg) +{ + bool first_time = true; + TimestampTz last_dump_time = 0; + + /* Establish signal handlers; once that's done, unblock signals. */ + pqsignal(SIGTERM, apw_sigterm_handler); + pqsignal(SIGHUP, apw_sighup_handler); + pqsignal(SIGUSR1, procsignal_sigusr1_handler); + BackgroundWorkerUnblockSignals(); + + /* Create (if necessary) and attach to our shared memory area. */ + if (apw_init_shmem()) + first_time = false; + + /* Set on-detach hook so that our PID will be cleared on exit. */ + on_shmem_exit(apw_detach_shmem, 0); + + /* + * Store our PID in the shared memory area --- unless there's already + * another worker running, in which case just exit. + */ + LWLockAcquire(&apw_state->lock, LW_EXCLUSIVE); + if (apw_state->bgworker_pid != InvalidPid) + { + LWLockRelease(&apw_state->lock); + ereport(LOG, + (errmsg("autoprewarm worker is already running under PID %lu", + (unsigned long) apw_state->bgworker_pid))); + return; + } + apw_state->bgworker_pid = MyProcPid; + LWLockRelease(&apw_state->lock); + + /* + * Preload buffers from the dump file only if we just created the shared + * memory region. Otherwise, it's either already been done or shouldn't + * be done - e.g. because the old dump file has been overwritten since the + * server was started. + * + * There's not much point in performing a dump immediately after we finish + * preloading; so, if we do end up preloading, consider the last dump time + * to be equal to the current time. + */ + if (first_time) + { + apw_load_buffers(); + last_dump_time = GetCurrentTimestamp(); + } + + /* Periodically dump buffers until terminated. */ + while (!got_sigterm) + { + int rc; + + /* In case of a SIGHUP, just reload the configuration. */ + if (got_sighup) + { + got_sighup = false; + ProcessConfigFile(PGC_SIGHUP); + } + + if (autoprewarm_interval <= 0) + { + /* We're only dumping at shutdown, so just wait forever. */ + rc = WaitLatch(&MyProc->procLatch, + WL_LATCH_SET | WL_POSTMASTER_DEATH, + -1L, + PG_WAIT_EXTENSION); + } + else + { + long delay_in_ms = 0; + TimestampTz next_dump_time = 0; + long secs = 0; + int usecs = 0; + + /* Compute the next dump time. */ + next_dump_time = + TimestampTzPlusMilliseconds(last_dump_time, + autoprewarm_interval * 1000); + TimestampDifference(GetCurrentTimestamp(), next_dump_time, + &secs, &usecs); + delay_in_ms = secs + (usecs / 1000); + + /* Perform a dump if it's time. */ + if (delay_in_ms <= 0) + { + last_dump_time = GetCurrentTimestamp(); + apw_dump_now(true, false); + continue; + } + + /* Sleep until the next dump time. */ + rc = WaitLatch(&MyProc->procLatch, + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + delay_in_ms, + PG_WAIT_EXTENSION); + } + + /* Reset the latch, bail out if postmaster died, otherwise loop. */ + ResetLatch(&MyProc->procLatch); + if (rc & WL_POSTMASTER_DEATH) + proc_exit(1); + } + + /* + * Dump one last time. We assume this is probably the result of a system + * shutdown, although it's possible that we've merely been terminated. + */ + apw_dump_now(true, true); +} + +/* + * Read the dump file and launch per-database workers one at a time to + * prewarm the buffers found there. + */ +static void +apw_load_buffers(void) +{ + FILE *file = NULL; + int num_elements, + i; + BlockInfoRecord *blkinfo; + dsm_segment *seg; + + /* + * Skip the prewarm if the dump file is in use; otherwise, prevent any + * other process from writing it while we're using it. + */ + LWLockAcquire(&apw_state->lock, LW_EXCLUSIVE); + if (apw_state->pid_using_dumpfile == InvalidPid) + apw_state->pid_using_dumpfile = MyProcPid; + else + { + LWLockRelease(&apw_state->lock); + ereport(LOG, + (errmsg("skipping prewarm because block dump file is being written by PID %lu", + (unsigned long) apw_state->pid_using_dumpfile))); + return; + } + LWLockRelease(&apw_state->lock); + + /* + * Open the block dump file. Exit quietly if it doesn't exist, but report + * any other error. + */ + file = AllocateFile(AUTOPREWARM_FILE, "r"); + if (!file) + { + if (errno == ENOENT) + { + LWLockAcquire(&apw_state->lock, LW_EXCLUSIVE); + apw_state->pid_using_dumpfile = InvalidPid; + LWLockRelease(&apw_state->lock); + return; /* No file to load. */ + } + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read file \"%s\": %m", + AUTOPREWARM_FILE))); + } + + /* First line of the file is a record count. */ + if (fscanf(file, "<<%d>>\n", &num_elements) != 1) + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read from file \"%s\": %m", + AUTOPREWARM_FILE))); + + /* Allocate a dynamic shared memory segment to store the record data. */ + seg = dsm_create(sizeof(BlockInfoRecord) * num_elements, 0); + blkinfo = (BlockInfoRecord *) dsm_segment_address(seg); + + /* Read records, one per line. */ + for (i = 0; i < num_elements; i++) + { + unsigned forknum; + + if (fscanf(file, "%u,%u,%u,%u,%u\n", &blkinfo[i].database, + &blkinfo[i].tablespace, &blkinfo[i].filenode, + &forknum, &blkinfo[i].blocknum) != 5) + ereport(ERROR, + (errmsg("autoprewarm block dump file is corrupted at line %d", + i + 1))); + blkinfo[i].forknum = forknum; + } + + FreeFile(file); + + /* Sort the blocks to be loaded. */ + pg_qsort(blkinfo, num_elements, sizeof(BlockInfoRecord), + apw_compare_blockinfo); + + /* Populate shared memory state. */ + apw_state->block_info_handle = dsm_segment_handle(seg); + apw_state->prewarm_start_idx = apw_state->prewarm_stop_idx = 0; + apw_state->prewarmed_blocks = 0; + + /* Get the info position of the first block of the next database. */ + while (apw_state->prewarm_start_idx < num_elements) + { + int j = apw_state->prewarm_start_idx; + Oid current_db = blkinfo[j].database; + + /* + * Advance the prewarm_stop_idx to the first BlockRecordInfo that does + * not belong to this database. + */ + j++; + while (j < num_elements) + { + if (current_db != blkinfo[j].database) + { + /* + * Combine BlockRecordInfos for global objects with those of + * the database. + */ + if (current_db != InvalidOid) + break; + current_db = blkinfo[j].database; + } + + j++; + } + + /* + * If we reach this point with current_db == InvalidOid, then only + * BlockRecordInfos belonging to global objects exist. We can't + * prewarm without a database connection, so just bail out. + */ + if (current_db == InvalidOid) + break; + + /* Configure stop point and database for next per-database worker. */ + apw_state->prewarm_stop_idx = j; + apw_state->database = current_db; + Assert(apw_state->prewarm_start_idx < apw_state->prewarm_stop_idx); + + /* If we've run out of free buffers, don't launch another worker. */ + if (!have_free_buffer()) + break; + + /* + * Start a per-database worker to load blocks for this database; this + * function will return once the per-database worker exits. + */ + apw_start_database_worker(); + + /* Prepare for next database. */ + apw_state->prewarm_start_idx = apw_state->prewarm_stop_idx; + } + + /* Clean up. */ + dsm_detach(seg); + LWLockAcquire(&apw_state->lock, LW_EXCLUSIVE); + apw_state->block_info_handle = DSM_HANDLE_INVALID; + apw_state->pid_using_dumpfile = InvalidPid; + LWLockRelease(&apw_state->lock); + + /* Report our success. */ + ereport(LOG, + (errmsg("autoprewarm successfully prewarmed %d of %d previously-loaded blocks", + apw_state->prewarmed_blocks, num_elements))); +} + +/* + * Prewarm all blocks for one database (and possibly also global objects, if + * those got grouped with this database). + */ +void +autoprewarm_database_main(Datum main_arg) +{ + int pos; + BlockInfoRecord *block_info; + Relation rel = NULL; + BlockNumber nblocks = 0; + BlockInfoRecord *old_blk = NULL; + dsm_segment *seg; + + /* Establish signal handlers; once that's done, unblock signals. */ + pqsignal(SIGTERM, die); + BackgroundWorkerUnblockSignals(); + + /* Connect to correct database and get block information. */ + apw_init_shmem(); + seg = dsm_attach(apw_state->block_info_handle); + if (seg == NULL) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("could not map dynamic shared memory segment"))); + BackgroundWorkerInitializeConnectionByOid(apw_state->database, InvalidOid, 0); + block_info = (BlockInfoRecord *) dsm_segment_address(seg); + pos = apw_state->prewarm_start_idx; + + /* + * Loop until we run out of blocks to prewarm or until we run out of free + * buffers. + */ + while (pos < apw_state->prewarm_stop_idx && have_free_buffer()) + { + BlockInfoRecord *blk = &block_info[pos++]; + Buffer buf; + + CHECK_FOR_INTERRUPTS(); + + /* + * Quit if we've reached records for another database. If previous + * blocks are of some global objects, then continue pre-warming. + */ + if (old_blk != NULL && old_blk->database != blk->database && + old_blk->database != 0) + break; + + /* + * As soon as we encounter a block of a new relation, close the old + * relation. Note that rel will be NULL if try_relation_open failed + * previously; in that case, there is nothing to close. + */ + if (old_blk != NULL && old_blk->filenode != blk->filenode && + rel != NULL) + { + relation_close(rel, AccessShareLock); + rel = NULL; + CommitTransactionCommand(); + } + + /* + * Try to open each new relation, but only once, when we first + * encounter it. If it's been dropped, skip the associated blocks. + */ + if (old_blk == NULL || old_blk->filenode != blk->filenode) + { + Oid reloid; + + Assert(rel == NULL); + StartTransactionCommand(); + reloid = RelidByRelfilenode(blk->tablespace, blk->filenode); + if (OidIsValid(reloid)) + rel = try_relation_open(reloid, AccessShareLock); + + if (!rel) + CommitTransactionCommand(); + } + if (!rel) + { + old_blk = blk; + continue; + } + + /* Once per fork, check for fork existence and size. */ + if (old_blk == NULL || + old_blk->filenode != blk->filenode || + old_blk->forknum != blk->forknum) + { + RelationOpenSmgr(rel); + + /* + * smgrexists is not safe for illegal forknum, hence check whether + * the passed forknum is valid before using it in smgrexists. + */ + if (blk->forknum > InvalidForkNumber && + blk->forknum <= MAX_FORKNUM && + smgrexists(rel->rd_smgr, blk->forknum)) + nblocks = RelationGetNumberOfBlocksInFork(rel, blk->forknum); + else + nblocks = 0; + } + + /* Check whether blocknum is valid and within fork file size. */ + if (blk->blocknum >= nblocks) + { + /* Move to next forknum. */ + old_blk = blk; + continue; + } + + /* Prewarm buffer. */ + buf = ReadBufferExtended(rel, blk->forknum, blk->blocknum, RBM_NORMAL, + NULL); + if (BufferIsValid(buf)) + { + apw_state->prewarmed_blocks++; + ReleaseBuffer(buf); + } + + old_blk = blk; + } + + dsm_detach(seg); + + /* Release lock on previous relation. */ + if (rel) + { + relation_close(rel, AccessShareLock); + CommitTransactionCommand(); + } +} + +/* + * Dump information on blocks in shared buffers. We use a text format here + * so that it's easy to understand and even change the file contents if + * necessary. + * Returns the number of blocks dumped. + */ +static int +apw_dump_now(bool is_bgworker, bool dump_unlogged) +{ + int num_blocks; + int i; + int ret; + BlockInfoRecord *block_info_array; + BufferDesc *bufHdr; + FILE *file; + char transient_dump_file_path[MAXPGPATH]; + pid_t pid; + + LWLockAcquire(&apw_state->lock, LW_EXCLUSIVE); + pid = apw_state->pid_using_dumpfile; + if (apw_state->pid_using_dumpfile == InvalidPid) + apw_state->pid_using_dumpfile = MyProcPid; + LWLockRelease(&apw_state->lock); + + if (pid != InvalidPid) + { + if (!is_bgworker) + ereport(ERROR, + (errmsg("could not perform block dump because dump file is being used by PID %lu", + (unsigned long) apw_state->pid_using_dumpfile))); + + ereport(LOG, + (errmsg("skipping block dump because it is already being performed by PID %lu", + (unsigned long) apw_state->pid_using_dumpfile))); + return 0; + } + + block_info_array = + (BlockInfoRecord *) palloc(sizeof(BlockInfoRecord) * NBuffers); + + for (num_blocks = 0, i = 0; i < NBuffers; i++) + { + uint32 buf_state; + + CHECK_FOR_INTERRUPTS(); + + bufHdr = GetBufferDescriptor(i); + + /* Lock each buffer header before inspecting. */ + buf_state = LockBufHdr(bufHdr); + + /* + * Unlogged tables will be automatically truncated after a crash or + * unclean shutdown. In such cases we need not prewarm them. Dump them + * only if requested by caller. + */ + if (buf_state & BM_TAG_VALID && + ((buf_state & BM_PERMANENT) || dump_unlogged)) + { + block_info_array[num_blocks].database = bufHdr->tag.rnode.dbNode; + block_info_array[num_blocks].tablespace = bufHdr->tag.rnode.spcNode; + block_info_array[num_blocks].filenode = bufHdr->tag.rnode.relNode; + block_info_array[num_blocks].forknum = bufHdr->tag.forkNum; + block_info_array[num_blocks].blocknum = bufHdr->tag.blockNum; + ++num_blocks; + } + + UnlockBufHdr(bufHdr, buf_state); + } + + snprintf(transient_dump_file_path, MAXPGPATH, "%s.tmp", AUTOPREWARM_FILE); + file = AllocateFile(transient_dump_file_path, "w"); + if (!file) + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not open file \"%s\": %m", + transient_dump_file_path))); + + ret = fprintf(file, "<<%d>>\n", num_blocks); + if (ret < 0) + { + int save_errno = errno; + + FreeFile(file); + unlink(transient_dump_file_path); + errno = save_errno; + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not write to file \"%s\": %m", + transient_dump_file_path))); + } + + for (i = 0; i < num_blocks; i++) + { + CHECK_FOR_INTERRUPTS(); + + ret = fprintf(file, "%u,%u,%u,%u,%u\n", + block_info_array[i].database, + block_info_array[i].tablespace, + block_info_array[i].filenode, + (uint32) block_info_array[i].forknum, + block_info_array[i].blocknum); + if (ret < 0) + { + int save_errno = errno; + + FreeFile(file); + unlink(transient_dump_file_path); + errno = save_errno; + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not write to file \"%s\": %m", + transient_dump_file_path))); + } + } + + pfree(block_info_array); + + /* + * Rename transient_dump_file_path to AUTOPREWARM_FILE to make things + * permanent. + */ + ret = FreeFile(file); + if (ret != 0) + { + int save_errno = errno; + + unlink(transient_dump_file_path); + errno = save_errno; + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not close file \"%s\": %m", + transient_dump_file_path))); + } + + (void) durable_rename(transient_dump_file_path, AUTOPREWARM_FILE, ERROR); + apw_state->pid_using_dumpfile = InvalidPid; + + ereport(DEBUG1, + (errmsg("wrote block details for %d blocks", num_blocks))); + return num_blocks; +} + +/* + * SQL-callable function to launch autoprewarm. + */ +Datum +autoprewarm_start_worker(PG_FUNCTION_ARGS) +{ + pid_t pid; + + if (!autoprewarm) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("autoprewarm is disabled"))); + + apw_init_shmem(); + LWLockAcquire(&apw_state->lock, LW_EXCLUSIVE); + pid = apw_state->bgworker_pid; + LWLockRelease(&apw_state->lock); + + if (pid != InvalidPid) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("autoprewarm worker is already running under PID %lu", + (unsigned long) pid))); + + apw_start_master_worker(); + + PG_RETURN_VOID(); +} + +/* + * SQL-callable function to perform an immediate block dump. + * + * Note: this is declared to return int8, as insurance against some + * very distant day when we might make NBuffers wider than int. + */ +Datum +autoprewarm_dump_now(PG_FUNCTION_ARGS) +{ + int num_blocks; + + apw_init_shmem(); + + PG_ENSURE_ERROR_CLEANUP(apw_detach_shmem, 0); + { + num_blocks = apw_dump_now(false, true); + } + PG_END_ENSURE_ERROR_CLEANUP(apw_detach_shmem, 0); + + PG_RETURN_INT64((int64) num_blocks); +} + +/* + * Allocate and initialize autoprewarm related shared memory, if not already + * done, and set up backend-local pointer to that state. Returns true if an + * existing shared memory segment was found. + */ +static bool +apw_init_shmem(void) +{ + bool found; + + LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); + apw_state = ShmemInitStruct("autoprewarm", + sizeof(AutoPrewarmSharedState), + &found); + if (!found) + { + /* First time through ... */ + LWLockInitialize(&apw_state->lock, LWLockNewTrancheId()); + apw_state->bgworker_pid = InvalidPid; + apw_state->pid_using_dumpfile = InvalidPid; + } + LWLockRelease(AddinShmemInitLock); + + LWLockRegisterTranche(apw_state->lock.tranche, "autoprewarm"); + + return found; +} + +/* + * Clear our PID from autoprewarm shared state. + */ +static void +apw_detach_shmem(int code, Datum arg) +{ + LWLockAcquire(&apw_state->lock, LW_EXCLUSIVE); + if (apw_state->pid_using_dumpfile == MyProcPid) + apw_state->pid_using_dumpfile = InvalidPid; + if (apw_state->bgworker_pid == MyProcPid) + apw_state->bgworker_pid = InvalidPid; + LWLockRelease(&apw_state->lock); +} + +/* + * Start autoprewarm master worker process. + */ +static void +apw_start_master_worker(void) +{ + BackgroundWorker worker; + BackgroundWorkerHandle *handle; + BgwHandleStatus status; + pid_t pid; + + MemSet(&worker, 0, sizeof(BackgroundWorker)); + worker.bgw_flags = BGWORKER_SHMEM_ACCESS; + worker.bgw_start_time = BgWorkerStart_ConsistentState; + strcpy(worker.bgw_library_name, "pg_prewarm"); + strcpy(worker.bgw_function_name, "autoprewarm_main"); + strcpy(worker.bgw_name, "autoprewarm master"); + strcpy(worker.bgw_type, "autoprewarm master"); + + if (process_shared_preload_libraries_in_progress) + { + RegisterBackgroundWorker(&worker); + return; + } + + /* must set notify PID to wait for startup */ + worker.bgw_notify_pid = MyProcPid; + + if (!RegisterDynamicBackgroundWorker(&worker, &handle)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("could not register background process"), + errhint("You may need to increase max_worker_processes."))); + + status = WaitForBackgroundWorkerStartup(handle, &pid); + if (status != BGWH_STARTED) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("could not start background process"), + errhint("More details may be available in the server log."))); +} + +/* + * Start autoprewarm per-database worker process. + */ +static void +apw_start_database_worker(void) +{ + BackgroundWorker worker; + BackgroundWorkerHandle *handle; + + MemSet(&worker, 0, sizeof(BackgroundWorker)); + worker.bgw_flags = + BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; + worker.bgw_start_time = BgWorkerStart_ConsistentState; + strcpy(worker.bgw_library_name, "pg_prewarm"); + strcpy(worker.bgw_function_name, "autoprewarm_database_main"); + strcpy(worker.bgw_name, "autoprewarm worker"); + strcpy(worker.bgw_type, "autoprewarm worker"); + + /* must set notify PID to wait for shutdown */ + worker.bgw_notify_pid = MyProcPid; + + if (!RegisterDynamicBackgroundWorker(&worker, &handle)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("registering dynamic bgworker autoprewarm failed"), + errhint("Consider increasing configuration parameter \"max_worker_processes\"."))); + + /* + * Ignore return value; if it fails, postmaster has died, but we have + * checks for that elsewhere. + */ + WaitForBackgroundWorkerShutdown(handle); +} + +/* Compare member elements to check whether they are not equal. */ +#define cmp_member_elem(fld) \ +do { \ + if (a->fld < b->fld) \ + return -1; \ + else if (a->fld > b->fld) \ + return 1; \ +} while(0) + +/* + * apw_compare_blockinfo + * + * We depend on all records for a particular database being consecutive + * in the dump file; each per-database worker will preload blocks until + * it sees a block for some other database. Sorting by tablespace, + * filenode, forknum, and blocknum isn't critical for correctness, but + * helps us get a sequential I/O pattern. + */ +static int +apw_compare_blockinfo(const void *p, const void *q) +{ + const BlockInfoRecord *a = (const BlockInfoRecord *) p; + const BlockInfoRecord *b = (const BlockInfoRecord *) q; + + cmp_member_elem(database); + cmp_member_elem(tablespace); + cmp_member_elem(filenode); + cmp_member_elem(forknum); + cmp_member_elem(blocknum); + + return 0; +} + +/* + * Signal handler for SIGTERM + */ +static void +apw_sigterm_handler(SIGNAL_ARGS) +{ + int save_errno = errno; + + got_sigterm = true; + + if (MyProc) + SetLatch(&MyProc->procLatch); + + errno = save_errno; +} + +/* + * Signal handler for SIGHUP + */ +static void +apw_sighup_handler(SIGNAL_ARGS) +{ + int save_errno = errno; + + got_sighup = true; + + if (MyProc) + SetLatch(&MyProc->procLatch); + + errno = save_errno; +} diff --git a/contrib/pg_prewarm/pg_prewarm--1.1--1.2.sql b/contrib/pg_prewarm/pg_prewarm--1.1--1.2.sql new file mode 100644 index 0000000000..2381c06eb9 --- /dev/null +++ b/contrib/pg_prewarm/pg_prewarm--1.1--1.2.sql @@ -0,0 +1,14 @@ +/* contrib/pg_prewarm/pg_prewarm--1.1--1.2.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION pg_prewarm UPDATE TO '1.2'" to load this file. \quit + +CREATE FUNCTION autoprewarm_start_worker() +RETURNS VOID STRICT +AS 'MODULE_PATHNAME', 'autoprewarm_start_worker' +LANGUAGE C; + +CREATE FUNCTION autoprewarm_dump_now() +RETURNS pg_catalog.int8 STRICT +AS 'MODULE_PATHNAME', 'autoprewarm_dump_now' +LANGUAGE C; diff --git a/contrib/pg_prewarm/pg_prewarm.c b/contrib/pg_prewarm/pg_prewarm.c index fec62b1a54..1f4bfb8c0d 100644 --- a/contrib/pg_prewarm/pg_prewarm.c +++ b/contrib/pg_prewarm/pg_prewarm.c @@ -3,7 +3,7 @@ * pg_prewarm.c * prewarming utilities * - * Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Copyright (c) 2010-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pg_prewarm/pg_prewarm.c @@ -16,7 +16,6 @@ #include #include "access/heapam.h" -#include "catalog/catalog.h" #include "fmgr.h" #include "miscadmin.h" #include "storage/bufmgr.h" @@ -37,7 +36,7 @@ typedef enum PREWARM_BUFFER } PrewarmType; -static char blockbuffer[BLCKSZ]; +static PGAlignedBlock blockbuffer; /* * pg_prewarm(regclass, mode text, fork text, @@ -107,7 +106,7 @@ pg_prewarm(PG_FUNCTION_ARGS) rel = relation_open(relOid, AccessShareLock); aclresult = pg_class_aclcheck(relOid, GetUserId(), ACL_SELECT); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_CLASS, get_rel_name(relOid)); + aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind), get_rel_name(relOid)); /* Check that the fork exists. */ RelationOpenSmgr(rel); @@ -179,7 +178,7 @@ pg_prewarm(PG_FUNCTION_ARGS) for (block = first_block; block <= last_block; ++block) { CHECK_FOR_INTERRUPTS(); - smgrread(rel->rd_smgr, forkNumber, block, blockbuffer); + smgrread(rel->rd_smgr, forkNumber, block, blockbuffer.data); ++blocks_done; } } diff --git a/contrib/pg_prewarm/pg_prewarm.control b/contrib/pg_prewarm/pg_prewarm.control index cf2fb92bed..40e3add481 100644 --- a/contrib/pg_prewarm/pg_prewarm.control +++ b/contrib/pg_prewarm/pg_prewarm.control @@ -1,5 +1,5 @@ # pg_prewarm extension comment = 'prewarm relation data' -default_version = '1.1' +default_version = '1.2' module_pathname = '$libdir/pg_prewarm' relocatable = true diff --git a/contrib/pg_standby/pg_standby.c b/contrib/pg_standby/pg_standby.c index d7fa2a80c6..ee1fbd7b33 100644 --- a/contrib/pg_standby/pg_standby.c +++ b/contrib/pg_standby/pg_standby.c @@ -36,6 +36,8 @@ const char *progname; +int WalSegSz = -1; + /* Options and defaults */ int sleeptime = 5; /* amount of time to sleep between file checks */ int waittime = -1; /* how long we have been waiting, -1 no wait @@ -92,7 +94,6 @@ int restoreCommandType; #define XLOG_DATA 0 #define XLOG_HISTORY 1 -#define XLOG_BACKUP_LABEL 2 int nextWALFileType; #define SET_RESTORE_COMMAND(cmd, arg1, arg2) \ @@ -100,6 +101,10 @@ int nextWALFileType; struct stat stat_buf; +static bool SetWALFileNameForCleanup(void); +static bool SetWALSegSize(void); + + /* ===================================================================== * * Customizable section @@ -176,15 +181,38 @@ CustomizableNextWALFileReady(void) if (stat(WALFilePath, &stat_buf) == 0) { /* - * If it's a backup file, return immediately. If it's a regular file - * return only if it's the right size already. + * If we've not seen any WAL segments, we don't know the WAL segment + * size, which we need. If it looks like a WAL segment, determine size + * of segments for the cluster. */ - if (IsBackupHistoryFileName(nextWALFileName)) + if (WalSegSz == -1 && IsXLogFileName(nextWALFileName)) { - nextWALFileType = XLOG_BACKUP_LABEL; - return true; + if (SetWALSegSize()) + { + /* + * Successfully determined WAL segment size. Can compute + * cleanup cutoff now. + */ + need_cleanup = SetWALFileNameForCleanup(); + if (debug) + { + fprintf(stderr, + _("WAL segment size: %d \n"), WalSegSz); + fprintf(stderr, "Keep archive history: "); + + if (need_cleanup) + fprintf(stderr, "%s and later\n", + exclusiveCleanupFileName); + else + fprintf(stderr, "no cleanup required\n"); + } + } } - else if (stat_buf.st_size == XLOG_SEG_SIZE) + + /* + * Return only if it's the right size already. + */ + if (WalSegSz > 0 && stat_buf.st_size == WalSegSz) { #ifdef WIN32 @@ -204,7 +232,7 @@ CustomizableNextWALFileReady(void) /* * If still too small, wait until it is the correct size */ - if (stat_buf.st_size > XLOG_SEG_SIZE) + if (WalSegSz > 0 && stat_buf.st_size > WalSegSz) { if (debug) { @@ -218,8 +246,6 @@ CustomizableNextWALFileReady(void) return false; } -#define MaxSegmentsPerLogFile ( 0xFFFFFFFF / XLOG_SEG_SIZE ) - static void CustomizableCleanupPriorWALFiles(void) { @@ -315,6 +341,7 @@ SetWALFileNameForCleanup(void) uint32 log_diff = 0, seg_diff = 0; bool cleanup = false; + int max_segments_per_logfile = (0xFFFFFFFF / WalSegSz); if (restartWALFileName) { @@ -336,12 +363,12 @@ SetWALFileNameForCleanup(void) sscanf(nextWALFileName, "%08X%08X%08X", &tli, &log, &seg); if (tli > 0 && seg > 0) { - log_diff = keepfiles / MaxSegmentsPerLogFile; - seg_diff = keepfiles % MaxSegmentsPerLogFile; + log_diff = keepfiles / max_segments_per_logfile; + seg_diff = keepfiles % max_segments_per_logfile; if (seg_diff > seg) { log_diff++; - seg = MaxSegmentsPerLogFile - (seg_diff - seg); + seg = max_segments_per_logfile - (seg_diff - seg); } else seg -= seg_diff; @@ -364,6 +391,70 @@ SetWALFileNameForCleanup(void) return cleanup; } +/* + * Try to set the wal segment size from the WAL file specified by WALFilePath. + * + * Return true if size could be determined, false otherwise. + */ +static bool +SetWALSegSize(void) +{ + bool ret_val = false; + int fd; + PGAlignedXLogBlock buf; + + Assert(WalSegSz == -1); + + if ((fd = open(WALFilePath, O_RDWR, 0)) < 0) + { + fprintf(stderr, "%s: could not open WAL file \"%s\": %s\n", + progname, WALFilePath, strerror(errno)); + return false; + } + + errno = 0; + if (read(fd, buf.data, XLOG_BLCKSZ) == XLOG_BLCKSZ) + { + XLogLongPageHeader longhdr = (XLogLongPageHeader) buf.data; + + WalSegSz = longhdr->xlp_seg_size; + + if (IsValidWalSegSize(WalSegSz)) + { + /* successfully retrieved WAL segment size */ + ret_val = true; + } + else + fprintf(stderr, + "%s: WAL segment size must be a power of two between 1MB and 1GB, but the WAL file header specifies %d bytes\n", + progname, WalSegSz); + } + else + { + /* + * Don't complain loudly, this is to be expected for segments being + * created. + */ + if (errno != 0) + { + if (debug) + fprintf(stderr, "could not read file \"%s\": %s\n", + WALFilePath, strerror(errno)); + } + else + { + if (debug) + fprintf(stderr, "not enough data in file \"%s\"\n", + WALFilePath); + } + } + + fflush(stderr); + + close(fd); + return ret_val; +} + /* * CheckForExternalTrigger() * @@ -708,8 +799,6 @@ main(int argc, char **argv) CustomizableInitialize(); - need_cleanup = SetWALFileNameForCleanup(); - if (debug) { fprintf(stderr, "Trigger file: %s\n", triggerPath ? triggerPath : ""); @@ -721,11 +810,6 @@ main(int argc, char **argv) fprintf(stderr, "Max wait interval: %d %s\n", maxwaittime, (maxwaittime > 0 ? "seconds" : "forever")); fprintf(stderr, "Command for restore: %s\n", restoreCommand); - fprintf(stderr, "Keep archive history: "); - if (need_cleanup) - fprintf(stderr, "%s and later\n", exclusiveCleanupFileName); - else - fprintf(stderr, "no cleanup required\n"); fflush(stderr); } diff --git a/contrib/pg_stat_statements/Makefile b/contrib/pg_stat_statements/Makefile index 39b368b70e..14a50380dc 100644 --- a/contrib/pg_stat_statements/Makefile +++ b/contrib/pg_stat_statements/Makefile @@ -4,9 +4,10 @@ MODULE_big = pg_stat_statements OBJS = pg_stat_statements.o $(WIN32RES) EXTENSION = pg_stat_statements -DATA = pg_stat_statements--1.4.sql pg_stat_statements--1.4--1.5.sql \ - pg_stat_statements--1.3--1.4.sql pg_stat_statements--1.2--1.3.sql \ - pg_stat_statements--1.1--1.2.sql pg_stat_statements--1.0--1.1.sql \ +DATA = pg_stat_statements--1.4.sql pg_stat_statements--1.5--1.6.sql \ + pg_stat_statements--1.4--1.5.sql pg_stat_statements--1.3--1.4.sql \ + pg_stat_statements--1.2--1.3.sql pg_stat_statements--1.1--1.2.sql \ + pg_stat_statements--1.0--1.1.sql \ pg_stat_statements--unpackaged--1.0.sql PGFILEDESC = "pg_stat_statements - execution statistics of SQL statements" diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql b/contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql new file mode 100644 index 0000000000..4f8c7f7ee8 --- /dev/null +++ b/contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql @@ -0,0 +1,7 @@ +/* contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.6'" to load this file. \quit + +-- Execution is only allowed for superusers, fixing issue with 1.5. +REVOKE EXECUTE ON FUNCTION pg_stat_statements_reset() FROM pg_read_all_stats; diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index fa409d72b7..33f9a79f54 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -21,7 +21,7 @@ * as the collations of Vars and, most notably, the values of constants. * * This jumble is acquired at the end of parse analysis of each query, and - * a 32-bit hash of it is stored into the query's Query.queryId field. + * a 64-bit hash of it is stored into the query's Query.queryId field. * The server then copies this value around, making it available in plan * tree(s) generated from the query. The executor can then use this value * to blame query costs on the proper queryId. @@ -48,7 +48,7 @@ * in the file to be read or written while holding only shared lock. * * - * Copyright (c) 2008-2017, PostgreSQL Global Development Group + * Copyright (c) 2008-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pg_stat_statements/pg_stat_statements.c @@ -76,6 +76,7 @@ #include "storage/ipc.h" #include "storage/spin.h" #include "tcop/utility.h" +#include "utils/acl.h" #include "utils/builtins.h" #include "utils/memutils.h" @@ -95,7 +96,7 @@ PG_MODULE_MAGIC; #define PGSS_TEXT_FILE PG_STAT_TMP_DIR "/pgss_query_texts.stat" /* Magic number identifying the stats file format */ -static const uint32 PGSS_FILE_HEADER = 0x20140125; +static const uint32 PGSS_FILE_HEADER = 0x20171004; /* PostgreSQL major version number, changes in which invalidate all entries */ static const uint32 PGSS_PG_MAJOR_VERSION = PG_VERSION_NUM / 100; @@ -125,12 +126,17 @@ typedef enum pgssVersion /* * Hashtable key that defines the identity of a hashtable entry. We separate * queries by user and by database even if they are otherwise identical. + * + * Right now, this structure contains no padding. If you add any, make sure + * to teach pgss_store() to zero the padding bytes. Otherwise, things will + * break, because pgss_hash is created using HASH_BLOBS, and thus tag_hash + * is used to hash this. */ typedef struct pgssHashKey { Oid userid; /* user OID */ Oid dbid; /* database OID */ - uint32 queryid; /* query identifier */ + uint64 queryid; /* query identifier */ } pgssHashKey; /* @@ -301,10 +307,8 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest, char *completionTag); -static uint32 pgss_hash_fn(const void *key, Size keysize); -static int pgss_match_fn(const void *key1, const void *key2, Size keysize); -static uint32 pgss_hash_string(const char *str, int len); -static void pgss_store(const char *query, uint32 queryId, +static uint64 pgss_hash_string(const char *str, int len); +static void pgss_store(const char *query, uint64 queryId, int query_location, int query_len, double total_time, uint64 rows, const BufferUsage *bufusage, @@ -500,12 +504,10 @@ pgss_shmem_startup(void) memset(&info, 0, sizeof(info)); info.keysize = sizeof(pgssHashKey); info.entrysize = sizeof(pgssEntry); - info.hash = pgss_hash_fn; - info.match = pgss_match_fn; pgss_hash = ShmemInitHash("pg_stat_statements hash", pgss_max, pgss_max, &info, - HASH_ELEM | HASH_FUNCTION | HASH_COMPARE); + HASH_ELEM | HASH_BLOBS); LWLockRelease(AddinShmemInitLock); @@ -640,19 +642,19 @@ pgss_shmem_startup(void) read_error: ereport(LOG, (errcode_for_file_access(), - errmsg("could not read pg_stat_statement file \"%s\": %m", + errmsg("could not read file \"%s\": %m", PGSS_DUMP_FILE))); goto fail; data_error: ereport(LOG, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("ignoring invalid data in pg_stat_statement file \"%s\"", + errmsg("ignoring invalid data in file \"%s\"", PGSS_DUMP_FILE))); goto fail; write_error: ereport(LOG, (errcode_for_file_access(), - errmsg("could not write pg_stat_statement file \"%s\": %m", + errmsg("could not write file \"%s\": %m", PGSS_TEXT_FILE))); fail: if (buffer) @@ -759,7 +761,7 @@ pgss_shmem_shutdown(int code, Datum arg) error: ereport(LOG, (errcode_for_file_access(), - errmsg("could not write pg_stat_statement file \"%s\": %m", + errmsg("could not write file \"%s\": %m", PGSS_DUMP_FILE ".tmp"))); if (qbuffer) free(qbuffer); @@ -781,7 +783,7 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query) prev_post_parse_analyze_hook(pstate, query); /* Assert we didn't do this already */ - Assert(query->queryId == 0); + Assert(query->queryId == UINT64CONST(0)); /* Safety check... */ if (!pgss || !pgss_hash) @@ -797,7 +799,7 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query) */ if (query->utilityStmt) { - query->queryId = 0; + query->queryId = UINT64CONST(0); return; } @@ -812,14 +814,15 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query) /* Compute query ID and mark the Query node with it */ JumbleQuery(&jstate, query); - query->queryId = hash_any(jstate.jumble, jstate.jumble_len); + query->queryId = + DatumGetUInt64(hash_any_extended(jstate.jumble, jstate.jumble_len, 0)); /* * If we are unlucky enough to get a hash of zero, use 1 instead, to * prevent confusion with the utility-statement case. */ - if (query->queryId == 0) - query->queryId = 1; + if (query->queryId == UINT64CONST(0)) + query->queryId = UINT64CONST(1); /* * If we were able to identify any ignorable constants, we immediately @@ -855,7 +858,7 @@ pgss_ExecutorStart(QueryDesc *queryDesc, int eflags) * counting of optimizable statements that are directly contained in * utility statements. */ - if (pgss_enabled() && queryDesc->plannedstmt->queryId != 0) + if (pgss_enabled() && queryDesc->plannedstmt->queryId != UINT64CONST(0)) { /* * Set up to track total elapsed time in ExecutorRun. Make sure the @@ -926,9 +929,9 @@ pgss_ExecutorFinish(QueryDesc *queryDesc) static void pgss_ExecutorEnd(QueryDesc *queryDesc) { - uint32 queryId = queryDesc->plannedstmt->queryId; + uint64 queryId = queryDesc->plannedstmt->queryId; - if (queryId != 0 && queryDesc->totaltime && pgss_enabled()) + if (queryId != UINT64CONST(0) && queryDesc->totaltime && pgss_enabled()) { /* * Make sure stats accumulation is done. (Note: it's okay if several @@ -1069,45 +1072,16 @@ pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, } } -/* - * Calculate hash value for a key - */ -static uint32 -pgss_hash_fn(const void *key, Size keysize) -{ - const pgssHashKey *k = (const pgssHashKey *) key; - - return hash_uint32((uint32) k->userid) ^ - hash_uint32((uint32) k->dbid) ^ - hash_uint32((uint32) k->queryid); -} - -/* - * Compare two keys - zero means match - */ -static int -pgss_match_fn(const void *key1, const void *key2, Size keysize) -{ - const pgssHashKey *k1 = (const pgssHashKey *) key1; - const pgssHashKey *k2 = (const pgssHashKey *) key2; - - if (k1->userid == k2->userid && - k1->dbid == k2->dbid && - k1->queryid == k2->queryid) - return 0; - else - return 1; -} - /* * Given an arbitrarily long query string, produce a hash for the purposes of * identifying the query, without normalizing constants. Used when hashing * utility statements. */ -static uint32 +static uint64 pgss_hash_string(const char *str, int len) { - return hash_any((const unsigned char *) str, len); + return DatumGetUInt64(hash_any_extended((const unsigned char *) str, + len, 0)); } /* @@ -1121,7 +1095,7 @@ pgss_hash_string(const char *str, int len) * query string. total_time, rows, bufusage are ignored in this case. */ static void -pgss_store(const char *query, uint32 queryId, +pgss_store(const char *query, uint64 queryId, int query_location, int query_len, double total_time, uint64 rows, const BufferUsage *bufusage, @@ -1173,7 +1147,7 @@ pgss_store(const char *query, uint32 queryId, /* * For utility statements, we just hash the query string to get an ID. */ - if (queryId == 0) + if (queryId == UINT64CONST(0)) queryId = pgss_hash_string(query, query_len); /* Set up key for hashtable search */ @@ -1869,8 +1843,7 @@ qtext_store(const char *query, int query_len, *query_offset = off; /* Now write the data into the successfully-reserved part of the file */ - fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDWR | O_CREAT | PG_BINARY, - S_IRUSR | S_IWUSR); + fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDWR | O_CREAT | PG_BINARY); if (fd < 0) goto error; @@ -1898,7 +1871,7 @@ qtext_store(const char *query, int query_len, error: ereport(LOG, (errcode_for_file_access(), - errmsg("could not write pg_stat_statement file \"%s\": %m", + errmsg("could not write file \"%s\": %m", PGSS_TEXT_FILE))); if (fd >= 0) @@ -1934,13 +1907,13 @@ qtext_load_file(Size *buffer_size) int fd; struct stat stat; - fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDONLY | PG_BINARY, 0); + fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDONLY | PG_BINARY); if (fd < 0) { if (errno != ENOENT) ereport(LOG, (errcode_for_file_access(), - errmsg("could not read pg_stat_statement file \"%s\": %m", + errmsg("could not read file \"%s\": %m", PGSS_TEXT_FILE))); return NULL; } @@ -1950,7 +1923,7 @@ qtext_load_file(Size *buffer_size) { ereport(LOG, (errcode_for_file_access(), - errmsg("could not stat pg_stat_statement file \"%s\": %m", + errmsg("could not stat file \"%s\": %m", PGSS_TEXT_FILE))); CloseTransientFile(fd); return NULL; @@ -1966,7 +1939,7 @@ qtext_load_file(Size *buffer_size) ereport(LOG, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"), - errdetail("Could not allocate enough memory to read pg_stat_statement file \"%s\".", + errdetail("Could not allocate enough memory to read file \"%s\".", PGSS_TEXT_FILE))); CloseTransientFile(fd); return NULL; @@ -1985,7 +1958,7 @@ qtext_load_file(Size *buffer_size) if (errno) ereport(LOG, (errcode_for_file_access(), - errmsg("could not read pg_stat_statement file \"%s\": %m", + errmsg("could not read file \"%s\": %m", PGSS_TEXT_FILE))); free(buf); CloseTransientFile(fd); @@ -2115,7 +2088,7 @@ gc_qtexts(void) { ereport(LOG, (errcode_for_file_access(), - errmsg("could not write pg_stat_statement file \"%s\": %m", + errmsg("could not write file \"%s\": %m", PGSS_TEXT_FILE))); goto gc_fail; } @@ -2145,7 +2118,7 @@ gc_qtexts(void) { ereport(LOG, (errcode_for_file_access(), - errmsg("could not write pg_stat_statement file \"%s\": %m", + errmsg("could not write file \"%s\": %m", PGSS_TEXT_FILE))); hash_seq_term(&hash_seq); goto gc_fail; @@ -2163,14 +2136,14 @@ gc_qtexts(void) if (ftruncate(fileno(qfile), extent) != 0) ereport(LOG, (errcode_for_file_access(), - errmsg("could not truncate pg_stat_statement file \"%s\": %m", + errmsg("could not truncate file \"%s\": %m", PGSS_TEXT_FILE))); if (FreeFile(qfile)) { ereport(LOG, (errcode_for_file_access(), - errmsg("could not write pg_stat_statement file \"%s\": %m", + errmsg("could not write file \"%s\": %m", PGSS_TEXT_FILE))); qfile = NULL; goto gc_fail; @@ -2230,7 +2203,7 @@ gc_qtexts(void) if (qfile == NULL) ereport(LOG, (errcode_for_file_access(), - errmsg("could not write new pg_stat_statement file \"%s\": %m", + errmsg("could not recreate file \"%s\": %m", PGSS_TEXT_FILE))); else FreeFile(qfile); @@ -2282,7 +2255,7 @@ entry_reset(void) { ereport(LOG, (errcode_for_file_access(), - errmsg("could not create pg_stat_statement file \"%s\": %m", + errmsg("could not create file \"%s\": %m", PGSS_TEXT_FILE))); goto done; } @@ -2291,7 +2264,7 @@ entry_reset(void) if (ftruncate(fileno(qfile), 0) != 0) ereport(LOG, (errcode_for_file_access(), - errmsg("could not truncate pg_stat_statement file \"%s\": %m", + errmsg("could not truncate file \"%s\": %m", PGSS_TEXT_FILE))); FreeFile(qfile); @@ -2325,8 +2298,10 @@ AppendJumble(pgssJumbleState *jstate, const unsigned char *item, Size size) if (jumble_len >= JUMBLE_SIZE) { - uint32 start_hash = hash_any(jumble, JUMBLE_SIZE); + uint64 start_hash; + start_hash = DatumGetUInt64(hash_any_extended(jumble, + JUMBLE_SIZE, 0)); memcpy(jumble, &start_hash, sizeof(start_hash)); jumble_len = sizeof(start_hash); } @@ -2631,6 +2606,7 @@ JumbleExpr(pgssJumbleState *jstate, Node *node) APP_JUMB(acexpr->resulttype); JumbleExpr(jstate, (Node *) acexpr->arg); + JumbleExpr(jstate, (Node *) acexpr->elemexpr); } break; case T_ConvertRowtypeExpr: diff --git a/contrib/pg_stat_statements/pg_stat_statements.control b/contrib/pg_stat_statements/pg_stat_statements.control index 193fcdfafa..617038b4c0 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.control +++ b/contrib/pg_stat_statements/pg_stat_statements.control @@ -1,5 +1,5 @@ # pg_stat_statements extension comment = 'track execution statistics of all SQL statements executed' -default_version = '1.5' +default_version = '1.6' module_pathname = '$libdir/pg_stat_statements' relocatable = true diff --git a/contrib/pg_trgm/Makefile b/contrib/pg_trgm/Makefile index 212a89039a..dfecc2a37f 100644 --- a/contrib/pg_trgm/Makefile +++ b/contrib/pg_trgm/Makefile @@ -4,11 +4,12 @@ MODULE_big = pg_trgm OBJS = trgm_op.o trgm_gist.o trgm_gin.o trgm_regexp.o $(WIN32RES) EXTENSION = pg_trgm -DATA = pg_trgm--1.3.sql pg_trgm--1.2--1.3.sql pg_trgm--1.1--1.2.sql \ +DATA = pg_trgm--1.3--1.4.sql \ + pg_trgm--1.3.sql pg_trgm--1.2--1.3.sql pg_trgm--1.1--1.2.sql \ pg_trgm--1.0--1.1.sql pg_trgm--unpackaged--1.0.sql PGFILEDESC = "pg_trgm - trigram matching" -REGRESS = pg_trgm pg_word_trgm +REGRESS = pg_trgm pg_word_trgm pg_strict_word_trgm ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/pg_trgm/expected/pg_strict_word_trgm.out b/contrib/pg_trgm/expected/pg_strict_word_trgm.out new file mode 100644 index 0000000000..43898a3b98 --- /dev/null +++ b/contrib/pg_trgm/expected/pg_strict_word_trgm.out @@ -0,0 +1,1025 @@ +DROP INDEX trgm_idx2; +\copy test_trgm3 from 'data/trgm2.data' +ERROR: relation "test_trgm3" does not exist +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <<% t order by sml desc, t; + t | sml +-------------------------------------+---------- + Baykal | 1 + Boloto Baykal | 1 + Boloto Malyy Baykal | 1 + Kolkhoz Krasnyy Baykal | 1 + Ozero Baykal | 1 + Polevoy Stan Baykal | 1 + Port Baykal | 1 + Prud Novyy Baykal | 1 + Sanatoriy Baykal | 1 + Stantsiya Baykal | 1 + Zaliv Baykal | 1 + Baykalo-Amurskaya Zheleznaya Doroga | 0.666667 + Baykalovo | 0.545455 + Baykalsko | 0.545455 + Maloye Baykalovo | 0.545455 + Baykalikha | 0.5 + Baykalovsk | 0.5 +(17 rows) + +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where 'Kabankala' <<% t order by sml desc, t; + t | sml +------------------------------+---------- + Kabankala | 1 + Kabankalan City Public Plaza | 0.75 + Abankala | 0.583333 + Kabakala | 0.583333 +(4 rows) + +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where t %>> 'Baykal' order by sml desc, t; + t | sml +-------------------------------------+---------- + Baykal | 1 + Boloto Baykal | 1 + Boloto Malyy Baykal | 1 + Kolkhoz Krasnyy Baykal | 1 + Ozero Baykal | 1 + Polevoy Stan Baykal | 1 + Port Baykal | 1 + Prud Novyy Baykal | 1 + Sanatoriy Baykal | 1 + Stantsiya Baykal | 1 + Zaliv Baykal | 1 + Baykalo-Amurskaya Zheleznaya Doroga | 0.666667 + Baykalovo | 0.545455 + Baykalsko | 0.545455 + Maloye Baykalovo | 0.545455 + Baykalikha | 0.5 + Baykalovsk | 0.5 +(17 rows) + +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where t %>> 'Kabankala' order by sml desc, t; + t | sml +------------------------------+---------- + Kabankala | 1 + Kabankalan City Public Plaza | 0.75 + Abankala | 0.583333 + Kabakala | 0.583333 +(4 rows) + +select t <->>> 'Alaikallupoddakulam', t from test_trgm2 order by t <->>> 'Alaikallupoddakulam' limit 7; + ?column? | t +----------+-------------------------- + 0 | Alaikallupoddakulam + 0.25 | Alaikallupodda Alankulam + 0.32 | Alaikalluppodda Kulam + 0.615385 | Mulaikallu Kulam + 0.724138 | Koraikalapu Kulam + 0.75 | Vaikaliththevakulam + 0.766667 | Karaivaikal Kulam +(7 rows) + +create index trgm_idx2 on test_trgm2 using gist (t gist_trgm_ops); +set enable_seqscan=off; +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <<% t order by sml desc, t; + t | sml +-------------------------------------+---------- + Baykal | 1 + Boloto Baykal | 1 + Boloto Malyy Baykal | 1 + Kolkhoz Krasnyy Baykal | 1 + Ozero Baykal | 1 + Polevoy Stan Baykal | 1 + Port Baykal | 1 + Prud Novyy Baykal | 1 + Sanatoriy Baykal | 1 + Stantsiya Baykal | 1 + Zaliv Baykal | 1 + Baykalo-Amurskaya Zheleznaya Doroga | 0.666667 + Baykalovo | 0.545455 + Baykalsko | 0.545455 + Maloye Baykalovo | 0.545455 + Baykalikha | 0.5 + Baykalovsk | 0.5 +(17 rows) + +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where 'Kabankala' <<% t order by sml desc, t; + t | sml +------------------------------+---------- + Kabankala | 1 + Kabankalan City Public Plaza | 0.75 + Abankala | 0.583333 + Kabakala | 0.583333 +(4 rows) + +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where t %>> 'Baykal' order by sml desc, t; + t | sml +-------------------------------------+---------- + Baykal | 1 + Boloto Baykal | 1 + Boloto Malyy Baykal | 1 + Kolkhoz Krasnyy Baykal | 1 + Ozero Baykal | 1 + Polevoy Stan Baykal | 1 + Port Baykal | 1 + Prud Novyy Baykal | 1 + Sanatoriy Baykal | 1 + Stantsiya Baykal | 1 + Zaliv Baykal | 1 + Baykalo-Amurskaya Zheleznaya Doroga | 0.666667 + Baykalovo | 0.545455 + Baykalsko | 0.545455 + Maloye Baykalovo | 0.545455 + Baykalikha | 0.5 + Baykalovsk | 0.5 +(17 rows) + +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where t %>> 'Kabankala' order by sml desc, t; + t | sml +------------------------------+---------- + Kabankala | 1 + Kabankalan City Public Plaza | 0.75 + Abankala | 0.583333 + Kabakala | 0.583333 +(4 rows) + +explain (costs off) +select t <->>> 'Alaikallupoddakulam', t from test_trgm2 order by t <->>> 'Alaikallupoddakulam' limit 7; + QUERY PLAN +--------------------------------------------------------- + Limit + -> Index Scan using trgm_idx2 on test_trgm2 + Order By: (t <->>> 'Alaikallupoddakulam'::text) +(3 rows) + +select t <->>> 'Alaikallupoddakulam', t from test_trgm2 order by t <->>> 'Alaikallupoddakulam' limit 7; + ?column? | t +----------+-------------------------- + 0 | Alaikallupoddakulam + 0.25 | Alaikallupodda Alankulam + 0.32 | Alaikalluppodda Kulam + 0.615385 | Mulaikallu Kulam + 0.724138 | Koraikalapu Kulam + 0.75 | Vaikaliththevakulam + 0.766667 | Karaivaikal Kulam +(7 rows) + +drop index trgm_idx2; +create index trgm_idx2 on test_trgm2 using gin (t gin_trgm_ops); +set enable_seqscan=off; +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <<% t order by sml desc, t; + t | sml +-------------------------------------+---------- + Baykal | 1 + Boloto Baykal | 1 + Boloto Malyy Baykal | 1 + Kolkhoz Krasnyy Baykal | 1 + Ozero Baykal | 1 + Polevoy Stan Baykal | 1 + Port Baykal | 1 + Prud Novyy Baykal | 1 + Sanatoriy Baykal | 1 + Stantsiya Baykal | 1 + Zaliv Baykal | 1 + Baykalo-Amurskaya Zheleznaya Doroga | 0.666667 + Baykalovo | 0.545455 + Baykalsko | 0.545455 + Maloye Baykalovo | 0.545455 + Baykalikha | 0.5 + Baykalovsk | 0.5 +(17 rows) + +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where 'Kabankala' <<% t order by sml desc, t; + t | sml +------------------------------+---------- + Kabankala | 1 + Kabankalan City Public Plaza | 0.75 + Abankala | 0.583333 + Kabakala | 0.583333 +(4 rows) + +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where t %>> 'Baykal' order by sml desc, t; + t | sml +-------------------------------------+---------- + Baykal | 1 + Boloto Baykal | 1 + Boloto Malyy Baykal | 1 + Kolkhoz Krasnyy Baykal | 1 + Ozero Baykal | 1 + Polevoy Stan Baykal | 1 + Port Baykal | 1 + Prud Novyy Baykal | 1 + Sanatoriy Baykal | 1 + Stantsiya Baykal | 1 + Zaliv Baykal | 1 + Baykalo-Amurskaya Zheleznaya Doroga | 0.666667 + Baykalovo | 0.545455 + Baykalsko | 0.545455 + Maloye Baykalovo | 0.545455 + Baykalikha | 0.5 + Baykalovsk | 0.5 +(17 rows) + +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where t %>> 'Kabankala' order by sml desc, t; + t | sml +------------------------------+---------- + Kabankala | 1 + Kabankalan City Public Plaza | 0.75 + Abankala | 0.583333 + Kabakala | 0.583333 +(4 rows) + +set "pg_trgm.strict_word_similarity_threshold" to 0.4; +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <<% t order by sml desc, t; + t | sml +-------------------------------------+---------- + Baykal | 1 + Boloto Baykal | 1 + Boloto Malyy Baykal | 1 + Kolkhoz Krasnyy Baykal | 1 + Ozero Baykal | 1 + Polevoy Stan Baykal | 1 + Port Baykal | 1 + Prud Novyy Baykal | 1 + Sanatoriy Baykal | 1 + Stantsiya Baykal | 1 + Zaliv Baykal | 1 + Baykalo-Amurskaya Zheleznaya Doroga | 0.666667 + Baykalovo | 0.545455 + Baykalsko | 0.545455 + Maloye Baykalovo | 0.545455 + Baykalikha | 0.5 + Baykalovsk | 0.5 + Zabaykal | 0.454545 + Air Bakal-kecil | 0.444444 + Bakal | 0.444444 + Bakal Batu | 0.444444 + Bakal Dos | 0.444444 + Bakal Julu | 0.444444 + Bakal Khel | 0.444444 + Bakal Lama | 0.444444 + Bakal Tres | 0.444444 + Bakal Uno | 0.444444 + Daang Bakal | 0.444444 + Desa Bakal | 0.444444 + Eat Bakal | 0.444444 + Gunung Bakal | 0.444444 + Sidi Bakal | 0.444444 + Stantsiya Bakal | 0.444444 + Sungai Bakal | 0.444444 + Talang Bakal | 0.444444 + Uruk Bakal | 0.444444 + Zaouia Oulad Bakal | 0.444444 + Baykalovskiy | 0.428571 + Baykalovskiy Rayon | 0.428571 + Baikal | 0.4 + Baikal Airfield | 0.4 + Baikal Business Centre | 0.4 + Baikal Hotel Moscow | 0.4 + Baikal Listvyanka Hotel | 0.4 + Baikal Mountains | 0.4 + Baikal Plaza | 0.4 + Bajkal | 0.4 + Bankal | 0.4 + Bankal School | 0.4 + Barkal | 0.4 + Jabal Barkal | 0.4 + Lake Baikal | 0.4 + Oulad el Bakkal | 0.4 + Sidi Mohammed Bakkal | 0.4 +(54 rows) + +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where 'Kabankala' <<% t order by sml desc, t; + t | sml +------------------------------+---------- + Kabankala | 1 + Kabankalan City Public Plaza | 0.75 + Abankala | 0.583333 + Kabakala | 0.583333 + Kabikala | 0.461538 +(5 rows) + +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where t %>> 'Baykal' order by sml desc, t; + t | sml +-------------------------------------+---------- + Baykal | 1 + Boloto Baykal | 1 + Boloto Malyy Baykal | 1 + Kolkhoz Krasnyy Baykal | 1 + Ozero Baykal | 1 + Polevoy Stan Baykal | 1 + Port Baykal | 1 + Prud Novyy Baykal | 1 + Sanatoriy Baykal | 1 + Stantsiya Baykal | 1 + Zaliv Baykal | 1 + Baykalo-Amurskaya Zheleznaya Doroga | 0.666667 + Baykalovo | 0.545455 + Baykalsko | 0.545455 + Maloye Baykalovo | 0.545455 + Baykalikha | 0.5 + Baykalovsk | 0.5 + Zabaykal | 0.454545 + Air Bakal-kecil | 0.444444 + Bakal | 0.444444 + Bakal Batu | 0.444444 + Bakal Dos | 0.444444 + Bakal Julu | 0.444444 + Bakal Khel | 0.444444 + Bakal Lama | 0.444444 + Bakal Tres | 0.444444 + Bakal Uno | 0.444444 + Daang Bakal | 0.444444 + Desa Bakal | 0.444444 + Eat Bakal | 0.444444 + Gunung Bakal | 0.444444 + Sidi Bakal | 0.444444 + Stantsiya Bakal | 0.444444 + Sungai Bakal | 0.444444 + Talang Bakal | 0.444444 + Uruk Bakal | 0.444444 + Zaouia Oulad Bakal | 0.444444 + Baykalovskiy | 0.428571 + Baykalovskiy Rayon | 0.428571 + Baikal | 0.4 + Baikal Airfield | 0.4 + Baikal Business Centre | 0.4 + Baikal Hotel Moscow | 0.4 + Baikal Listvyanka Hotel | 0.4 + Baikal Mountains | 0.4 + Baikal Plaza | 0.4 + Bajkal | 0.4 + Bankal | 0.4 + Bankal School | 0.4 + Barkal | 0.4 + Jabal Barkal | 0.4 + Lake Baikal | 0.4 + Oulad el Bakkal | 0.4 + Sidi Mohammed Bakkal | 0.4 +(54 rows) + +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where t %>> 'Kabankala' order by sml desc, t; + t | sml +------------------------------+---------- + Kabankala | 1 + Kabankalan City Public Plaza | 0.75 + Abankala | 0.583333 + Kabakala | 0.583333 + Kabikala | 0.461538 +(5 rows) + +set "pg_trgm.strict_word_similarity_threshold" to 0.2; +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <<% t order by sml desc, t; + t | sml +-----------------------------------------------------------+---------- + Baykal | 1 + Boloto Baykal | 1 + Boloto Malyy Baykal | 1 + Kolkhoz Krasnyy Baykal | 1 + Ozero Baykal | 1 + Polevoy Stan Baykal | 1 + Port Baykal | 1 + Prud Novyy Baykal | 1 + Sanatoriy Baykal | 1 + Stantsiya Baykal | 1 + Zaliv Baykal | 1 + Baykalo-Amurskaya Zheleznaya Doroga | 0.666667 + Baykalovo | 0.545455 + Baykalsko | 0.545455 + Maloye Baykalovo | 0.545455 + Baykalikha | 0.5 + Baykalovsk | 0.5 + Zabaykal | 0.454545 + Air Bakal-kecil | 0.444444 + Bakal | 0.444444 + Bakal Batu | 0.444444 + Bakal Dos | 0.444444 + Bakal Julu | 0.444444 + Bakal Khel | 0.444444 + Bakal Lama | 0.444444 + Bakal Tres | 0.444444 + Bakal Uno | 0.444444 + Daang Bakal | 0.444444 + Desa Bakal | 0.444444 + Eat Bakal | 0.444444 + Gunung Bakal | 0.444444 + Sidi Bakal | 0.444444 + Stantsiya Bakal | 0.444444 + Sungai Bakal | 0.444444 + Talang Bakal | 0.444444 + Uruk Bakal | 0.444444 + Zaouia Oulad Bakal | 0.444444 + Baykalovskiy | 0.428571 + Baykalovskiy Rayon | 0.428571 + Baikal | 0.4 + Baikal Airfield | 0.4 + Baikal Business Centre | 0.4 + Baikal Hotel Moscow | 0.4 + Baikal Listvyanka Hotel | 0.4 + Baikal Mountains | 0.4 + Baikal Plaza | 0.4 + Bajkal | 0.4 + Bankal | 0.4 + Bankal School | 0.4 + Barkal | 0.4 + Jabal Barkal | 0.4 + Lake Baikal | 0.4 + Oulad el Bakkal | 0.4 + Sidi Mohammed Bakkal | 0.4 + Bay of Backaland | 0.375 + Boikalakalawa Bay | 0.375 + Waikalabubu Bay | 0.375 + Bairkal | 0.363636 + Bairkal Dhora | 0.363636 + Bairkal Jabal | 0.363636 + Batikal | 0.363636 + Bakaleyka | 0.307692 + Bakkalmal | 0.307692 + Bikal | 0.3 + Al Barkali | 0.285714 + Zabaykalka | 0.285714 + Baidal | 0.272727 + Baihal | 0.272727 + Baipal | 0.272727 + Bakala | 0.272727 + Bakala Koupi | 0.272727 + Bakale | 0.272727 + Bakali | 0.272727 + Bakall | 0.272727 + Bakaly | 0.272727 + Bakaly TV Mast | 0.272727 + Buur Bakale | 0.272727 + Gory Bakaly | 0.272727 + Kusu-Bakali | 0.272727 + Kwala Bakala | 0.272727 + Mbay Bakala | 0.272727 + Ngao Bakala | 0.272727 + Sidi Mohammed el Bakali | 0.272727 + Sopka Bakaly | 0.272727 + Sungai Bakala | 0.272727 + Urochishche Bakaly | 0.272727 + Alue Bakkala | 0.25 + Azib el Bakkali | 0.25 + Ba Kaliin | 0.25 + Baikaluobbal | 0.25 + Bakalam | 0.25 + Bakalan | 0.25 + Bakalan Barat | 0.25 + Bakalan Dua | 0.25 + Bakalan Kidul | 0.25 + Bakalan Kulon | 0.25 + Bakalan Lor | 0.25 + Bakalan River | 0.25 + Bakalan Tengah | 0.25 + Bakalan Wetan | 0.25 + Bakalao Asibi Point | 0.25 + Bakalao Point | 0.25 + Bakalar Air Force Base (historical) | 0.25 + Bakalar Lake | 0.25 + Bakalar Library | 0.25 + Bakalda | 0.25 + Bakaldy | 0.25 + Bakaley | 0.25 + Bakalha | 0.25 + Bakalia Char | 0.25 + Bakalka | 0.25 + Bakalod Island | 0.25 + Bakalou | 0.25 + Bakalua | 0.25 + Bakalum | 0.25 + Bakkala Cemetery | 0.25 + Bankali | 0.25 + Barkala | 0.25 + Barkala Park | 0.25 + Barkala Rao | 0.25 + Barkala Reserved Forest | 0.25 + Barkald | 0.25 + Barkald stasjon | 0.25 + Barkale | 0.25 + Barkali | 0.25 + Baukala | 0.25 + Buur Bakaley | 0.25 + Columbus Bakalar Municipal Airport | 0.25 + Dakshin Bakalia | 0.25 + Danau Bakalan | 0.25 + Desa Bakalan | 0.25 + Gunung Bakalan | 0.25 + Kali Bakalan | 0.25 + Khrebet Batkali | 0.25 + Kordon Barkalo | 0.25 + Krajan Bakalan | 0.25 + Ovrag Bakalda | 0.25 + Pulau Bakalan | 0.25 + Selat Bakalan | 0.25 + Teluk Bakalan | 0.25 + Tukad Bakalan | 0.25 + Urochishche Batkali | 0.25 + Babakale | 0.230769 + Babakalo | 0.230769 + Bagkalen | 0.230769 + Bakalalan Airport | 0.230769 + Bakalang | 0.230769 + Bakalarr | 0.230769 + Bakalawa | 0.230769 + Bakaldum | 0.230769 + Bakaleko | 0.230769 + Bakalica | 0.230769 + Bakalino | 0.230769 + Bakalite | 0.230769 + Bakalovo | 0.230769 + Bakalsen | 0.230769 + Bakaltua Bank | 0.230769 + Bakalukalu | 0.230769 + Bakalukalu Shan | 0.230769 + Bakkalia | 0.230769 + Bankalol | 0.230769 + Barkaleh | 0.230769 + Barkalne | 0.230769 + Barkalow Hollow | 0.230769 + Bawkalut | 0.230769 + Bawkalut Chaung | 0.230769 + Clifton T Barkalow Elementary School | 0.230769 + Efrejtor Bakalovo | 0.230769 + Efreytor-Bakalovo | 0.230769 + Gora Barkalyu | 0.230769 + Ile Bakalibu | 0.230769 + Khor Bakallii | 0.230769 + Nehalla Bankalah Reserved Forest | 0.230769 + Ragha Bakalzai | 0.230769 + Tanjung Batikala | 0.230769 + Teluk Bakalang | 0.230769 + Urochishche Bakalovo | 0.230769 + Banjar Kubakal | 0.222222 + Darreh Pumba Kal | 0.222222 + Zabaykalovskiy | 0.222222 + Aparthotel Adagio Premium Dubai Al Barsha | 0.214286 + Babakalia | 0.214286 + Bahkalleh | 0.214286 + Baikalovo | 0.214286 + Bakalaale | 0.214286 + Bakalabwa Pans | 0.214286 + Bakalaeng | 0.214286 + Bakalauri | 0.214286 + Bakalbhar | 0.214286 + Bakalbuah | 0.214286 + Bakalerek | 0.214286 + Bakalinga | 0.214286 + Bakalipur | 0.214286 + Bakaljaya | 0.214286 + Bakalnica | 0.214286 + Bakalongo | 0.214286 + Bakalovka | 0.214286 + Bakalrejo | 0.214286 + Bakkalale | 0.214286 + Bambakala | 0.214286 + Bambakalo | 0.214286 + Barkalare | 0.214286 + Barkalden | 0.214286 + Barkallou | 0.214286 + Barkalova | 0.214286 + Baskalino | 0.214286 + Baskaltsi | 0.214286 + Desa Bakalrejo | 0.214286 + Doubletree By Hilton Dubai Al Barsha Hotel and Res | 0.214286 + Doubletree By Hilton Hotel and Apartments Dubai Al Barsha | 0.214286 + Doubletree Res.Dubai-Al Barsha | 0.214286 + Gora Barkalova | 0.214286 + Holiday Inn Dubai Al Barsha | 0.214286 + Novotel Dubai Al Barsha | 0.214286 + Park Inn By Radisson Dubai Al Barsha | 0.214286 + Ramee Rose Hotel Dubai Al Barsha | 0.214286 + Ras Barkallah | 0.214286 + Salu Bakalaeng | 0.214286 + Tanjung Bakalinga | 0.214286 + Tubu Bakalekuk | 0.214286 + Baikalakko | 0.2 + Bakalauri1 | 0.2 + Bakalauri2 | 0.2 + Bakalauri3 | 0.2 + Bakalauri4 | 0.2 + Bakalauri5 | 0.2 + Bakalauri6 | 0.2 + Bakalauri7 | 0.2 + Bakalauri8 | 0.2 + Bakalauri9 | 0.2 + Bakaldalam | 0.2 + Bakaldukuh | 0.2 + Bakaloolay | 0.2 + Bakalovina | 0.2 + Bakalpokok | 0.2 + Bakalshile | 0.2 + Bakalukudu | 0.2 + Bambakalia | 0.2 + Barkaladja Pool | 0.2 + Barkalovka | 0.2 + Bavkalasis | 0.2 + Gora Bakalyadyr | 0.2 + Kampong Bakaladong | 0.2 + Urochishche Bakalarnyn-Ayasy | 0.2 + Urochishche Bakaldikha | 0.2 +(245 rows) + +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where 'Kabankala' <<% t order by sml desc, t; + t | sml +----------------------------------+---------- + Kabankala | 1 + Kabankalan City Public Plaza | 0.75 + Abankala | 0.583333 + Kabakala | 0.583333 + Kabikala | 0.461538 + Ntombankala School | 0.375 + Nehalla Bankalah Reserved Forest | 0.357143 + Jabba Kalai | 0.333333 + Kambakala | 0.333333 + Ker Samba Kalla | 0.333333 + Bankal | 0.307692 + Bankal School | 0.307692 + Kanampumba-Kalawa | 0.307692 + Bankali | 0.285714 + Mwalaba-Kalamba | 0.285714 + Tumba-Kalamba | 0.285714 + Darreh Pumba Kal | 0.272727 + Bankalol | 0.266667 + Dabakala | 0.266667 + Purba Kalaujan | 0.266667 + Kali Purbakala | 0.263158 + Dalabakala | 0.25 + Demba Kali | 0.25 + Gagaba Kalo | 0.25 + Golba Kalo | 0.25 + Habakkala | 0.25 + Kali Bakalan | 0.25 + Kimbakala | 0.25 + Kombakala | 0.25 + Jaba Kalle | 0.235294 + Kaikalahun Indian Reserve 25 | 0.235294 + Kwala Bakala | 0.235294 + Gereba Kaler | 0.230769 + Goth Soba Kaloi | 0.230769 + Guba Kaldo | 0.230769 + Gulba Kalle | 0.230769 + Guba Kalgalaksha | 0.222222 + Kalibakalako | 0.222222 + Ba Kaliin | 0.214286 + Bakala | 0.214286 + Bakala Koupi | 0.214286 + Bikala | 0.214286 + Bikala Madila | 0.214286 + Bugor Arba-Kalgan | 0.214286 + Bumba-Kaloki | 0.214286 + Guba Kalita | 0.214286 + Kamba-Kalele | 0.214286 + Mbay Bakala | 0.214286 + Ngao Bakala | 0.214286 + Sungai Bakala | 0.214286 + Fayzabadkala | 0.210526 + Gora Fayzabadkala | 0.210526 + Alue Bakkala | 0.2 + Bakkala Cemetery | 0.2 + Barkala | 0.2 + Barkala Park | 0.2 + Barkala Rao | 0.2 + Barkala Reserved Forest | 0.2 + Baukala | 0.2 + Beikala | 0.2 + Bomba-Kalende | 0.2 + Bumba-Kalumba | 0.2 + Haikala | 0.2 + Kahambikalela | 0.2 + Kaikalapettai | 0.2 + Kaikale | 0.2 + Laikala | 0.2 + Maikala Range | 0.2 + Matamba-Kalenga | 0.2 + Matamba-Kalenge | 0.2 + Naikala | 0.2 + Tumba-Kalumba | 0.2 + Tumba-Kalunga | 0.2 + Waikala | 0.2 +(74 rows) + +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where t %>> 'Baykal' order by sml desc, t; + t | sml +-----------------------------------------------------------+---------- + Baykal | 1 + Boloto Baykal | 1 + Boloto Malyy Baykal | 1 + Kolkhoz Krasnyy Baykal | 1 + Ozero Baykal | 1 + Polevoy Stan Baykal | 1 + Port Baykal | 1 + Prud Novyy Baykal | 1 + Sanatoriy Baykal | 1 + Stantsiya Baykal | 1 + Zaliv Baykal | 1 + Baykalo-Amurskaya Zheleznaya Doroga | 0.666667 + Baykalovo | 0.545455 + Baykalsko | 0.545455 + Maloye Baykalovo | 0.545455 + Baykalikha | 0.5 + Baykalovsk | 0.5 + Zabaykal | 0.454545 + Air Bakal-kecil | 0.444444 + Bakal | 0.444444 + Bakal Batu | 0.444444 + Bakal Dos | 0.444444 + Bakal Julu | 0.444444 + Bakal Khel | 0.444444 + Bakal Lama | 0.444444 + Bakal Tres | 0.444444 + Bakal Uno | 0.444444 + Daang Bakal | 0.444444 + Desa Bakal | 0.444444 + Eat Bakal | 0.444444 + Gunung Bakal | 0.444444 + Sidi Bakal | 0.444444 + Stantsiya Bakal | 0.444444 + Sungai Bakal | 0.444444 + Talang Bakal | 0.444444 + Uruk Bakal | 0.444444 + Zaouia Oulad Bakal | 0.444444 + Baykalovskiy | 0.428571 + Baykalovskiy Rayon | 0.428571 + Baikal | 0.4 + Baikal Airfield | 0.4 + Baikal Business Centre | 0.4 + Baikal Hotel Moscow | 0.4 + Baikal Listvyanka Hotel | 0.4 + Baikal Mountains | 0.4 + Baikal Plaza | 0.4 + Bajkal | 0.4 + Bankal | 0.4 + Bankal School | 0.4 + Barkal | 0.4 + Jabal Barkal | 0.4 + Lake Baikal | 0.4 + Oulad el Bakkal | 0.4 + Sidi Mohammed Bakkal | 0.4 + Bay of Backaland | 0.375 + Boikalakalawa Bay | 0.375 + Waikalabubu Bay | 0.375 + Bairkal | 0.363636 + Bairkal Dhora | 0.363636 + Bairkal Jabal | 0.363636 + Batikal | 0.363636 + Bakaleyka | 0.307692 + Bakkalmal | 0.307692 + Bikal | 0.3 + Al Barkali | 0.285714 + Zabaykalka | 0.285714 + Baidal | 0.272727 + Baihal | 0.272727 + Baipal | 0.272727 + Bakala | 0.272727 + Bakala Koupi | 0.272727 + Bakale | 0.272727 + Bakali | 0.272727 + Bakall | 0.272727 + Bakaly | 0.272727 + Bakaly TV Mast | 0.272727 + Buur Bakale | 0.272727 + Gory Bakaly | 0.272727 + Kusu-Bakali | 0.272727 + Kwala Bakala | 0.272727 + Mbay Bakala | 0.272727 + Ngao Bakala | 0.272727 + Sidi Mohammed el Bakali | 0.272727 + Sopka Bakaly | 0.272727 + Sungai Bakala | 0.272727 + Urochishche Bakaly | 0.272727 + Alue Bakkala | 0.25 + Azib el Bakkali | 0.25 + Ba Kaliin | 0.25 + Baikaluobbal | 0.25 + Bakalam | 0.25 + Bakalan | 0.25 + Bakalan Barat | 0.25 + Bakalan Dua | 0.25 + Bakalan Kidul | 0.25 + Bakalan Kulon | 0.25 + Bakalan Lor | 0.25 + Bakalan River | 0.25 + Bakalan Tengah | 0.25 + Bakalan Wetan | 0.25 + Bakalao Asibi Point | 0.25 + Bakalao Point | 0.25 + Bakalar Air Force Base (historical) | 0.25 + Bakalar Lake | 0.25 + Bakalar Library | 0.25 + Bakalda | 0.25 + Bakaldy | 0.25 + Bakaley | 0.25 + Bakalha | 0.25 + Bakalia Char | 0.25 + Bakalka | 0.25 + Bakalod Island | 0.25 + Bakalou | 0.25 + Bakalua | 0.25 + Bakalum | 0.25 + Bakkala Cemetery | 0.25 + Bankali | 0.25 + Barkala | 0.25 + Barkala Park | 0.25 + Barkala Rao | 0.25 + Barkala Reserved Forest | 0.25 + Barkald | 0.25 + Barkald stasjon | 0.25 + Barkale | 0.25 + Barkali | 0.25 + Baukala | 0.25 + Buur Bakaley | 0.25 + Columbus Bakalar Municipal Airport | 0.25 + Dakshin Bakalia | 0.25 + Danau Bakalan | 0.25 + Desa Bakalan | 0.25 + Gunung Bakalan | 0.25 + Kali Bakalan | 0.25 + Khrebet Batkali | 0.25 + Kordon Barkalo | 0.25 + Krajan Bakalan | 0.25 + Ovrag Bakalda | 0.25 + Pulau Bakalan | 0.25 + Selat Bakalan | 0.25 + Teluk Bakalan | 0.25 + Tukad Bakalan | 0.25 + Urochishche Batkali | 0.25 + Babakale | 0.230769 + Babakalo | 0.230769 + Bagkalen | 0.230769 + Bakalalan Airport | 0.230769 + Bakalang | 0.230769 + Bakalarr | 0.230769 + Bakalawa | 0.230769 + Bakaldum | 0.230769 + Bakaleko | 0.230769 + Bakalica | 0.230769 + Bakalino | 0.230769 + Bakalite | 0.230769 + Bakalovo | 0.230769 + Bakalsen | 0.230769 + Bakaltua Bank | 0.230769 + Bakalukalu | 0.230769 + Bakalukalu Shan | 0.230769 + Bakkalia | 0.230769 + Bankalol | 0.230769 + Barkaleh | 0.230769 + Barkalne | 0.230769 + Barkalow Hollow | 0.230769 + Bawkalut | 0.230769 + Bawkalut Chaung | 0.230769 + Clifton T Barkalow Elementary School | 0.230769 + Efrejtor Bakalovo | 0.230769 + Efreytor-Bakalovo | 0.230769 + Gora Barkalyu | 0.230769 + Ile Bakalibu | 0.230769 + Khor Bakallii | 0.230769 + Nehalla Bankalah Reserved Forest | 0.230769 + Ragha Bakalzai | 0.230769 + Tanjung Batikala | 0.230769 + Teluk Bakalang | 0.230769 + Urochishche Bakalovo | 0.230769 + Banjar Kubakal | 0.222222 + Darreh Pumba Kal | 0.222222 + Zabaykalovskiy | 0.222222 + Aparthotel Adagio Premium Dubai Al Barsha | 0.214286 + Babakalia | 0.214286 + Bahkalleh | 0.214286 + Baikalovo | 0.214286 + Bakalaale | 0.214286 + Bakalabwa Pans | 0.214286 + Bakalaeng | 0.214286 + Bakalauri | 0.214286 + Bakalbhar | 0.214286 + Bakalbuah | 0.214286 + Bakalerek | 0.214286 + Bakalinga | 0.214286 + Bakalipur | 0.214286 + Bakaljaya | 0.214286 + Bakalnica | 0.214286 + Bakalongo | 0.214286 + Bakalovka | 0.214286 + Bakalrejo | 0.214286 + Bakkalale | 0.214286 + Bambakala | 0.214286 + Bambakalo | 0.214286 + Barkalare | 0.214286 + Barkalden | 0.214286 + Barkallou | 0.214286 + Barkalova | 0.214286 + Baskalino | 0.214286 + Baskaltsi | 0.214286 + Desa Bakalrejo | 0.214286 + Doubletree By Hilton Dubai Al Barsha Hotel and Res | 0.214286 + Doubletree By Hilton Hotel and Apartments Dubai Al Barsha | 0.214286 + Doubletree Res.Dubai-Al Barsha | 0.214286 + Gora Barkalova | 0.214286 + Holiday Inn Dubai Al Barsha | 0.214286 + Novotel Dubai Al Barsha | 0.214286 + Park Inn By Radisson Dubai Al Barsha | 0.214286 + Ramee Rose Hotel Dubai Al Barsha | 0.214286 + Ras Barkallah | 0.214286 + Salu Bakalaeng | 0.214286 + Tanjung Bakalinga | 0.214286 + Tubu Bakalekuk | 0.214286 + Baikalakko | 0.2 + Bakalauri1 | 0.2 + Bakalauri2 | 0.2 + Bakalauri3 | 0.2 + Bakalauri4 | 0.2 + Bakalauri5 | 0.2 + Bakalauri6 | 0.2 + Bakalauri7 | 0.2 + Bakalauri8 | 0.2 + Bakalauri9 | 0.2 + Bakaldalam | 0.2 + Bakaldukuh | 0.2 + Bakaloolay | 0.2 + Bakalovina | 0.2 + Bakalpokok | 0.2 + Bakalshile | 0.2 + Bakalukudu | 0.2 + Bambakalia | 0.2 + Barkaladja Pool | 0.2 + Barkalovka | 0.2 + Bavkalasis | 0.2 + Gora Bakalyadyr | 0.2 + Kampong Bakaladong | 0.2 + Urochishche Bakalarnyn-Ayasy | 0.2 + Urochishche Bakaldikha | 0.2 +(245 rows) + +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where t %>> 'Kabankala' order by sml desc, t; + t | sml +----------------------------------+---------- + Kabankala | 1 + Kabankalan City Public Plaza | 0.75 + Abankala | 0.583333 + Kabakala | 0.583333 + Kabikala | 0.461538 + Ntombankala School | 0.375 + Nehalla Bankalah Reserved Forest | 0.357143 + Jabba Kalai | 0.333333 + Kambakala | 0.333333 + Ker Samba Kalla | 0.333333 + Bankal | 0.307692 + Bankal School | 0.307692 + Kanampumba-Kalawa | 0.307692 + Bankali | 0.285714 + Mwalaba-Kalamba | 0.285714 + Tumba-Kalamba | 0.285714 + Darreh Pumba Kal | 0.272727 + Bankalol | 0.266667 + Dabakala | 0.266667 + Purba Kalaujan | 0.266667 + Kali Purbakala | 0.263158 + Dalabakala | 0.25 + Demba Kali | 0.25 + Gagaba Kalo | 0.25 + Golba Kalo | 0.25 + Habakkala | 0.25 + Kali Bakalan | 0.25 + Kimbakala | 0.25 + Kombakala | 0.25 + Jaba Kalle | 0.235294 + Kaikalahun Indian Reserve 25 | 0.235294 + Kwala Bakala | 0.235294 + Gereba Kaler | 0.230769 + Goth Soba Kaloi | 0.230769 + Guba Kaldo | 0.230769 + Gulba Kalle | 0.230769 + Guba Kalgalaksha | 0.222222 + Kalibakalako | 0.222222 + Ba Kaliin | 0.214286 + Bakala | 0.214286 + Bakala Koupi | 0.214286 + Bikala | 0.214286 + Bikala Madila | 0.214286 + Bugor Arba-Kalgan | 0.214286 + Bumba-Kaloki | 0.214286 + Guba Kalita | 0.214286 + Kamba-Kalele | 0.214286 + Mbay Bakala | 0.214286 + Ngao Bakala | 0.214286 + Sungai Bakala | 0.214286 + Fayzabadkala | 0.210526 + Gora Fayzabadkala | 0.210526 + Alue Bakkala | 0.2 + Bakkala Cemetery | 0.2 + Barkala | 0.2 + Barkala Park | 0.2 + Barkala Rao | 0.2 + Barkala Reserved Forest | 0.2 + Baukala | 0.2 + Beikala | 0.2 + Bomba-Kalende | 0.2 + Bumba-Kalumba | 0.2 + Haikala | 0.2 + Kahambikalela | 0.2 + Kaikalapettai | 0.2 + Kaikale | 0.2 + Laikala | 0.2 + Maikala Range | 0.2 + Matamba-Kalenga | 0.2 + Matamba-Kalenge | 0.2 + Naikala | 0.2 + Tumba-Kalumba | 0.2 + Tumba-Kalunga | 0.2 + Waikala | 0.2 +(74 rows) + diff --git a/contrib/pg_trgm/expected/pg_trgm.out b/contrib/pg_trgm/expected/pg_trgm.out index c3304b0ceb..6efc54356a 100644 --- a/contrib/pg_trgm/expected/pg_trgm.out +++ b/contrib/pg_trgm/expected/pg_trgm.out @@ -7,6 +7,9 @@ WHERE opc.oid >= 16384 AND NOT amvalidate(opc.oid); --------+--------- (0 rows) +--backslash is used in tests below, installcheck will fail if +--standard_conforming_string is off +set standard_conforming_strings=on; select show_trgm(''); show_trgm ----------- diff --git a/contrib/pg_trgm/pg_trgm--1.3--1.4.sql b/contrib/pg_trgm/pg_trgm--1.3--1.4.sql new file mode 100644 index 0000000000..64a0c219b5 --- /dev/null +++ b/contrib/pg_trgm/pg_trgm--1.3--1.4.sql @@ -0,0 +1,68 @@ +/* contrib/pg_trgm/pg_trgm--1.3--1.4.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION pg_trgm UPDATE TO '1.4'" to load this file. \quit + +CREATE FUNCTION strict_word_similarity(text,text) +RETURNS float4 +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE PARALLEL SAFE; + +CREATE FUNCTION strict_word_similarity_op(text,text) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT STABLE PARALLEL SAFE; -- stable because depends on pg_trgm.word_similarity_threshold + +CREATE FUNCTION strict_word_similarity_commutator_op(text,text) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT STABLE PARALLEL SAFE; -- stable because depends on pg_trgm.word_similarity_threshold + +CREATE OPERATOR <<% ( + LEFTARG = text, + RIGHTARG = text, + PROCEDURE = strict_word_similarity_op, + COMMUTATOR = '%>>', + RESTRICT = contsel, + JOIN = contjoinsel +); + +CREATE OPERATOR %>> ( + LEFTARG = text, + RIGHTARG = text, + PROCEDURE = strict_word_similarity_commutator_op, + COMMUTATOR = '<<%', + RESTRICT = contsel, + JOIN = contjoinsel +); + +CREATE FUNCTION strict_word_similarity_dist_op(text,text) +RETURNS float4 +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE PARALLEL SAFE; + +CREATE FUNCTION strict_word_similarity_dist_commutator_op(text,text) +RETURNS float4 +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE PARALLEL SAFE; + +CREATE OPERATOR <<<-> ( + LEFTARG = text, + RIGHTARG = text, + PROCEDURE = strict_word_similarity_dist_op, + COMMUTATOR = '<->>>' +); + +CREATE OPERATOR <->>> ( + LEFTARG = text, + RIGHTARG = text, + PROCEDURE = strict_word_similarity_dist_commutator_op, + COMMUTATOR = '<<<->' +); + +ALTER OPERATOR FAMILY gist_trgm_ops USING gist ADD + OPERATOR 9 %>> (text, text), + OPERATOR 10 <->>> (text, text) FOR ORDER BY pg_catalog.float_ops; + +ALTER OPERATOR FAMILY gin_trgm_ops USING gin ADD + OPERATOR 9 %>> (text, text); diff --git a/contrib/pg_trgm/pg_trgm.control b/contrib/pg_trgm/pg_trgm.control index 06f274f01a..3e325dde00 100644 --- a/contrib/pg_trgm/pg_trgm.control +++ b/contrib/pg_trgm/pg_trgm.control @@ -1,5 +1,5 @@ # pg_trgm extension comment = 'text similarity measurement and index searching based on trigrams' -default_version = '1.3' +default_version = '1.4' module_pathname = '$libdir/pg_trgm' relocatable = true diff --git a/contrib/pg_trgm/sql/pg_strict_word_trgm.sql b/contrib/pg_trgm/sql/pg_strict_word_trgm.sql new file mode 100644 index 0000000000..98e0d379f8 --- /dev/null +++ b/contrib/pg_trgm/sql/pg_strict_word_trgm.sql @@ -0,0 +1,42 @@ +DROP INDEX trgm_idx2; + +\copy test_trgm3 from 'data/trgm2.data' + +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <<% t order by sml desc, t; +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where 'Kabankala' <<% t order by sml desc, t; +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where t %>> 'Baykal' order by sml desc, t; +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where t %>> 'Kabankala' order by sml desc, t; +select t <->>> 'Alaikallupoddakulam', t from test_trgm2 order by t <->>> 'Alaikallupoddakulam' limit 7; + +create index trgm_idx2 on test_trgm2 using gist (t gist_trgm_ops); +set enable_seqscan=off; + +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <<% t order by sml desc, t; +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where 'Kabankala' <<% t order by sml desc, t; +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where t %>> 'Baykal' order by sml desc, t; +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where t %>> 'Kabankala' order by sml desc, t; + +explain (costs off) +select t <->>> 'Alaikallupoddakulam', t from test_trgm2 order by t <->>> 'Alaikallupoddakulam' limit 7; +select t <->>> 'Alaikallupoddakulam', t from test_trgm2 order by t <->>> 'Alaikallupoddakulam' limit 7; + +drop index trgm_idx2; +create index trgm_idx2 on test_trgm2 using gin (t gin_trgm_ops); +set enable_seqscan=off; + +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <<% t order by sml desc, t; +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where 'Kabankala' <<% t order by sml desc, t; +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where t %>> 'Baykal' order by sml desc, t; +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where t %>> 'Kabankala' order by sml desc, t; + +set "pg_trgm.strict_word_similarity_threshold" to 0.4; +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <<% t order by sml desc, t; +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where 'Kabankala' <<% t order by sml desc, t; +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where t %>> 'Baykal' order by sml desc, t; +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where t %>> 'Kabankala' order by sml desc, t; + +set "pg_trgm.strict_word_similarity_threshold" to 0.2; +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where 'Baykal' <<% t order by sml desc, t; +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where 'Kabankala' <<% t order by sml desc, t; +select t,strict_word_similarity('Baykal',t) as sml from test_trgm2 where t %>> 'Baykal' order by sml desc, t; +select t,strict_word_similarity('Kabankala',t) as sml from test_trgm2 where t %>> 'Kabankala' order by sml desc, t; diff --git a/contrib/pg_trgm/sql/pg_trgm.sql b/contrib/pg_trgm/sql/pg_trgm.sql index fe8d0a7495..96ae542320 100644 --- a/contrib/pg_trgm/sql/pg_trgm.sql +++ b/contrib/pg_trgm/sql/pg_trgm.sql @@ -5,6 +5,10 @@ SELECT amname, opcname FROM pg_opclass opc LEFT JOIN pg_am am ON am.oid = opcmethod WHERE opc.oid >= 16384 AND NOT amvalidate(opc.oid); +--backslash is used in tests below, installcheck will fail if +--standard_conforming_string is off +set standard_conforming_strings=on; + select show_trgm(''); select show_trgm('(*&^$@%@'); select show_trgm('a b c'); diff --git a/contrib/pg_trgm/trgm.h b/contrib/pg_trgm/trgm.h index 45df91875a..f0ab50dd05 100644 --- a/contrib/pg_trgm/trgm.h +++ b/contrib/pg_trgm/trgm.h @@ -6,6 +6,7 @@ #include "access/gist.h" #include "access/itup.h" +#include "access/stratnum.h" #include "storage/bufpage.h" /* @@ -26,14 +27,16 @@ #define DIVUNION /* operator strategy numbers */ -#define SimilarityStrategyNumber 1 -#define DistanceStrategyNumber 2 -#define LikeStrategyNumber 3 -#define ILikeStrategyNumber 4 -#define RegExpStrategyNumber 5 -#define RegExpICaseStrategyNumber 6 -#define WordSimilarityStrategyNumber 7 -#define WordDistanceStrategyNumber 8 +#define SimilarityStrategyNumber 1 +#define DistanceStrategyNumber 2 +#define LikeStrategyNumber 3 +#define ILikeStrategyNumber 4 +#define RegExpStrategyNumber 5 +#define RegExpICaseStrategyNumber 6 +#define WordSimilarityStrategyNumber 7 +#define WordDistanceStrategyNumber 8 +#define StrictWordSimilarityStrategyNumber 9 +#define StrictWordDistanceStrategyNumber 10 typedef char trgm[3]; @@ -120,7 +123,9 @@ typedef struct TrgmPackedGraph TrgmPackedGraph; extern double similarity_threshold; extern double word_similarity_threshold; +extern double strict_word_similarity_threshold; +extern double index_strategy_get_limit(StrategyNumber strategy); extern uint32 trgm2int(trgm *ptr); extern void compact_trigram(trgm *tptr, char *str, int bytelen); extern TRGM *generate_trgm(char *str, int slen); diff --git a/contrib/pg_trgm/trgm_gin.c b/contrib/pg_trgm/trgm_gin.c index e4b3daea44..1b9809b565 100644 --- a/contrib/pg_trgm/trgm_gin.c +++ b/contrib/pg_trgm/trgm_gin.c @@ -90,6 +90,7 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS) { case SimilarityStrategyNumber: case WordSimilarityStrategyNumber: + case StrictWordSimilarityStrategyNumber: trg = generate_trgm(VARDATA_ANY(val), VARSIZE_ANY_EXHDR(val)); break; case ILikeStrategyNumber: @@ -187,8 +188,8 @@ gin_trgm_consistent(PG_FUNCTION_ARGS) { case SimilarityStrategyNumber: case WordSimilarityStrategyNumber: - nlimit = (strategy == SimilarityStrategyNumber) ? - similarity_threshold : word_similarity_threshold; + case StrictWordSimilarityStrategyNumber: + nlimit = index_strategy_get_limit(strategy); /* Count the matches */ ntrue = 0; @@ -282,8 +283,8 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS) { case SimilarityStrategyNumber: case WordSimilarityStrategyNumber: - nlimit = (strategy == SimilarityStrategyNumber) ? - similarity_threshold : word_similarity_threshold; + case StrictWordSimilarityStrategyNumber: + nlimit = index_strategy_get_limit(strategy); /* Count the matches */ ntrue = 0; diff --git a/contrib/pg_trgm/trgm_gist.c b/contrib/pg_trgm/trgm_gist.c index ed02af875c..f1e05478da 100644 --- a/contrib/pg_trgm/trgm_gist.c +++ b/contrib/pg_trgm/trgm_gist.c @@ -106,7 +106,7 @@ gtrgm_compress(PG_FUNCTION_ARGS) retval = (GISTENTRY *) palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(res), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } else if (ISSIGNKEY(DatumGetPointer(entry->key)) && !ISALLTRUE(DatumGetPointer(entry->key))) @@ -130,7 +130,7 @@ gtrgm_compress(PG_FUNCTION_ARGS) retval = (GISTENTRY *) palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(res), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } PG_RETURN_POINTER(retval); } @@ -221,6 +221,7 @@ gtrgm_consistent(PG_FUNCTION_ARGS) { case SimilarityStrategyNumber: case WordSimilarityStrategyNumber: + case StrictWordSimilarityStrategyNumber: qtrg = generate_trgm(VARDATA(query), querysize - VARHDRSZ); break; @@ -290,10 +291,15 @@ gtrgm_consistent(PG_FUNCTION_ARGS) { case SimilarityStrategyNumber: case WordSimilarityStrategyNumber: - /* Similarity search is exact. Word similarity search is inexact */ - *recheck = (strategy == WordSimilarityStrategyNumber); - nlimit = (strategy == SimilarityStrategyNumber) ? - similarity_threshold : word_similarity_threshold; + case StrictWordSimilarityStrategyNumber: + + /* + * Similarity search is exact. (Strict) word similarity search is + * inexact + */ + *recheck = (strategy != SimilarityStrategyNumber); + + nlimit = index_strategy_get_limit(strategy); if (GIST_LEAF(entry)) { /* all leafs contains orig trgm */ @@ -468,7 +474,9 @@ gtrgm_distance(PG_FUNCTION_ARGS) { case DistanceStrategyNumber: case WordDistanceStrategyNumber: - *recheck = strategy == WordDistanceStrategyNumber; + case StrictWordDistanceStrategyNumber: + /* Only plain trigram distance is exact */ + *recheck = (strategy != DistanceStrategyNumber); if (GIST_LEAF(entry)) { /* all leafs contains orig trgm */ diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c index f7e96acc53..9f26725ec2 100644 --- a/contrib/pg_trgm/trgm_op.c +++ b/contrib/pg_trgm/trgm_op.c @@ -18,6 +18,7 @@ PG_MODULE_MAGIC; /* GUC variables */ double similarity_threshold = 0.3f; double word_similarity_threshold = 0.6f; +double strict_word_similarity_threshold = 0.5f; void _PG_init(void); @@ -26,12 +27,17 @@ PG_FUNCTION_INFO_V1(show_limit); PG_FUNCTION_INFO_V1(show_trgm); PG_FUNCTION_INFO_V1(similarity); PG_FUNCTION_INFO_V1(word_similarity); +PG_FUNCTION_INFO_V1(strict_word_similarity); PG_FUNCTION_INFO_V1(similarity_dist); PG_FUNCTION_INFO_V1(similarity_op); PG_FUNCTION_INFO_V1(word_similarity_op); PG_FUNCTION_INFO_V1(word_similarity_commutator_op); PG_FUNCTION_INFO_V1(word_similarity_dist_op); PG_FUNCTION_INFO_V1(word_similarity_dist_commutator_op); +PG_FUNCTION_INFO_V1(strict_word_similarity_op); +PG_FUNCTION_INFO_V1(strict_word_similarity_commutator_op); +PG_FUNCTION_INFO_V1(strict_word_similarity_dist_op); +PG_FUNCTION_INFO_V1(strict_word_similarity_dist_commutator_op); /* Trigram with position */ typedef struct @@ -40,6 +46,17 @@ typedef struct int index; } pos_trgm; +/* Trigram bound type */ +typedef uint8 TrgmBound; +#define TRGM_BOUND_LEFT 0x01 /* trigram is left bound of word */ +#define TRGM_BOUND_RIGHT 0x02 /* trigram is right bound of word */ + +/* Word similarity flags */ +#define WORD_SIMILARITY_CHECK_ONLY 0x01 /* only check existence of similar + * search pattern in text */ +#define WORD_SIMILARITY_STRICT 0x02 /* force bounds of extent to match + * word bounds */ + /* * Module load callback */ @@ -71,6 +88,18 @@ _PG_init(void) NULL, NULL, NULL); + DefineCustomRealVariable("pg_trgm.strict_word_similarity_threshold", + "Sets the threshold used by the <<%% operator.", + "Valid range is 0.0 .. 1.0.", + &strict_word_similarity_threshold, + 0.5, + 0.0, + 1.0, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); } /* @@ -95,6 +124,29 @@ set_limit(PG_FUNCTION_ARGS) PG_RETURN_FLOAT4(similarity_threshold); } + +/* + * Get similarity threshold for given index scan strategy number. + */ +double +index_strategy_get_limit(StrategyNumber strategy) +{ + switch (strategy) + { + case SimilarityStrategyNumber: + return similarity_threshold; + case WordSimilarityStrategyNumber: + return word_similarity_threshold; + case StrictWordSimilarityStrategyNumber: + return strict_word_similarity_threshold; + default: + elog(ERROR, "unrecognized strategy number: %d", strategy); + break; + } + + return 0.0; /* keep compiler quiet */ +} + /* * Deprecated function. * Use "pg_trgm.similarity_threshold" GUC variable instead of this function. @@ -235,11 +287,12 @@ make_trigrams(trgm *tptr, char *str, int bytelen, int charlen) * * trg: where to return the array of trigrams. * str: source string, of length slen bytes. + * bounds: where to return bounds of trigrams (if needed). * * Returns length of the generated array. */ static int -generate_trgm_only(trgm *trg, char *str, int slen) +generate_trgm_only(trgm *trg, char *str, int slen, TrgmBound *bounds) { trgm *tptr; char *buf; @@ -282,11 +335,13 @@ generate_trgm_only(trgm *trg, char *str, int slen) buf[LPADDING + bytelen] = ' '; buf[LPADDING + bytelen + 1] = ' '; - /* - * count trigrams - */ + /* Calculate trigrams marking their bounds if needed */ + if (bounds) + bounds[tptr - trg] |= TRGM_BOUND_LEFT; tptr = make_trigrams(tptr, buf, bytelen + LPADDING + RPADDING, charlen + LPADDING + RPADDING); + if (bounds) + bounds[tptr - trg - 1] |= TRGM_BOUND_RIGHT; } pfree(buf); @@ -328,7 +383,7 @@ generate_trgm(char *str, int slen) trg = (TRGM *) palloc(TRGMHDRSIZE + sizeof(trgm) * (slen / 2 + 1) * 3); trg->flag = ARRKEY; - len = generate_trgm_only(GETARR(trg), str, slen); + len = generate_trgm_only(GETARR(trg), str, slen, NULL); SET_VARSIZE(trg, CALCGTSIZE(ARRKEY, len)); if (len == 0) @@ -413,8 +468,8 @@ comp_ptrgm(const void *v1, const void *v2) * ulen1: count of unique trigrams of array "trg1". * len2: length of array "trg2" and array "trg2indexes". * len: length of the array "found". - * check_only: if true then only check existence of similar search pattern in - * text. + * lags: set of boolean flags parametrizing similarity calculation. + * bounds: whether each trigram is left/right bound of word. * * Returns word similarity. */ @@ -424,16 +479,32 @@ iterate_word_similarity(int *trg2indexes, int ulen1, int len2, int len, - bool check_only) + uint8 flags, + TrgmBound *bounds) { int *lastpos, i, ulen2 = 0, count = 0, upper = -1, - lower = -1; + lower; float4 smlr_cur, smlr_max = 0.0f; + double threshold; + + Assert(bounds || !(flags & WORD_SIMILARITY_STRICT)); + + /* Select appropriate threshold */ + threshold = (flags & WORD_SIMILARITY_STRICT) ? + strict_word_similarity_threshold : + word_similarity_threshold; + + /* + * Consider first trigram as initial lower bount for strict word + * similarity, or initialize it later with first trigram present for plain + * word similarity. + */ + lower = (flags & WORD_SIMILARITY_STRICT) ? 0 : -1; /* Memorise last position of each trigram */ lastpos = (int *) palloc(sizeof(int) * len); @@ -456,8 +527,13 @@ iterate_word_similarity(int *trg2indexes, lastpos[trgindex] = i; } - /* Adjust lower bound if this trigram is present in required substring */ - if (found[trgindex]) + /* + * Adjust upper bound if trigram is upper bound of word for strict + * word similarity, or if trigram is present in required substring for + * plain word similarity + */ + if ((flags & WORD_SIMILARITY_STRICT) ? (bounds[i] & TRGM_BOUND_RIGHT) + : found[trgindex]) { int prev_lower, tmp_ulen2, @@ -473,30 +549,41 @@ iterate_word_similarity(int *trg2indexes, smlr_cur = CALCSML(count, ulen1, ulen2); - /* Also try to adjust upper bound for greater similarity */ + /* Also try to adjust lower bound for greater similarity */ tmp_count = count; tmp_ulen2 = ulen2; prev_lower = lower; for (tmp_lower = lower; tmp_lower <= upper; tmp_lower++) { - float smlr_tmp = CALCSML(tmp_count, ulen1, tmp_ulen2); + float smlr_tmp; int tmp_trgindex; - if (smlr_tmp > smlr_cur) - { - smlr_cur = smlr_tmp; - ulen2 = tmp_ulen2; - lower = tmp_lower; - count = tmp_count; - } - /* - * if we only check that word similarity is greater than - * pg_trgm.word_similarity_threshold we do not need to - * calculate a maximum similarity. + * Adjust lower bound only if trigram is lower bound of word + * for strict word similarity, or consider every trigram as + * lower bound for plain word similarity. */ - if (check_only && smlr_cur >= word_similarity_threshold) - break; + if (!(flags & WORD_SIMILARITY_STRICT) + || (bounds[tmp_lower] & TRGM_BOUND_LEFT)) + { + smlr_tmp = CALCSML(tmp_count, ulen1, tmp_ulen2); + if (smlr_tmp > smlr_cur) + { + smlr_cur = smlr_tmp; + ulen2 = tmp_ulen2; + lower = tmp_lower; + count = tmp_count; + } + + /* + * If we only check that word similarity is greater than + * threshold we do not need to calculate a maximum + * similarity. + */ + if ((flags & WORD_SIMILARITY_CHECK_ONLY) + && smlr_cur >= threshold) + break; + } tmp_trgindex = trg2indexes[tmp_lower]; if (lastpos[tmp_trgindex] == tmp_lower) @@ -510,11 +597,10 @@ iterate_word_similarity(int *trg2indexes, smlr_max = Max(smlr_max, smlr_cur); /* - * if we only check that word similarity is greater than - * pg_trgm.word_similarity_threshold we do not need to calculate a - * maximum similarity + * if we only check that word similarity is greater than threshold + * we do not need to calculate a maximum similarity. */ - if (check_only && smlr_max >= word_similarity_threshold) + if ((flags & WORD_SIMILARITY_CHECK_ONLY) && smlr_max >= threshold) break; for (tmp_lower = prev_lower; tmp_lower < lower; tmp_lower++) @@ -547,14 +633,13 @@ iterate_word_similarity(int *trg2indexes, * * str1: search pattern string, of length slen1 bytes. * str2: text in which we are looking for a word, of length slen2 bytes. - * check_only: if true then only check existence of similar search pattern in - * text. + * flags: set of boolean flags parametrizing similarity calculation. * * Returns word similarity. */ static float4 calc_word_similarity(char *str1, int slen1, char *str2, int slen2, - bool check_only) + uint8 flags) { bool *found; pos_trgm *ptrg; @@ -568,15 +653,20 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2, ulen1; int *trg2indexes; float4 result; + TrgmBound *bounds; protect_out_of_mem(slen1 + slen2); /* Make positional trigrams */ trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) * 3); trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) * 3); + if (flags & WORD_SIMILARITY_STRICT) + bounds = (TrgmBound *) palloc0(sizeof(TrgmBound) * (slen2 / 2 + 1) * 3); + else + bounds = NULL; - len1 = generate_trgm_only(trg1, str1, slen1); - len2 = generate_trgm_only(trg2, str2, slen2); + len1 = generate_trgm_only(trg1, str1, slen1, NULL); + len2 = generate_trgm_only(trg2, str2, slen2, bounds); ptrg = make_positional_trgm(trg1, len1, trg2, len2); len = len1 + len2; @@ -622,7 +712,7 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2, /* Run iterative procedure to find maximum similarity with word */ result = iterate_word_similarity(trg2indexes, found, ulen1, len2, len, - check_only); + flags, bounds); pfree(trg2indexes); pfree(found); @@ -1081,7 +1171,23 @@ word_similarity(PG_FUNCTION_ARGS) res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), - false); + 0); + + PG_FREE_IF_COPY(in1, 0); + PG_FREE_IF_COPY(in2, 1); + PG_RETURN_FLOAT4(res); +} + +Datum +strict_word_similarity(PG_FUNCTION_ARGS) +{ + text *in1 = PG_GETARG_TEXT_PP(0); + text *in2 = PG_GETARG_TEXT_PP(1); + float4 res; + + res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), + VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), + WORD_SIMILARITY_STRICT); PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in2, 1); @@ -1117,7 +1223,7 @@ word_similarity_op(PG_FUNCTION_ARGS) res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), - true); + WORD_SIMILARITY_CHECK_ONLY); PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in2, 1); @@ -1133,7 +1239,7 @@ word_similarity_commutator_op(PG_FUNCTION_ARGS) res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), - true); + WORD_SIMILARITY_CHECK_ONLY); PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in2, 1); @@ -1149,7 +1255,7 @@ word_similarity_dist_op(PG_FUNCTION_ARGS) res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), - false); + 0); PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in2, 1); @@ -1165,7 +1271,71 @@ word_similarity_dist_commutator_op(PG_FUNCTION_ARGS) res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), - false); + 0); + + PG_FREE_IF_COPY(in1, 0); + PG_FREE_IF_COPY(in2, 1); + PG_RETURN_FLOAT4(1.0 - res); +} + +Datum +strict_word_similarity_op(PG_FUNCTION_ARGS) +{ + text *in1 = PG_GETARG_TEXT_PP(0); + text *in2 = PG_GETARG_TEXT_PP(1); + float4 res; + + res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), + VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), + WORD_SIMILARITY_CHECK_ONLY | WORD_SIMILARITY_STRICT); + + PG_FREE_IF_COPY(in1, 0); + PG_FREE_IF_COPY(in2, 1); + PG_RETURN_BOOL(res >= strict_word_similarity_threshold); +} + +Datum +strict_word_similarity_commutator_op(PG_FUNCTION_ARGS) +{ + text *in1 = PG_GETARG_TEXT_PP(0); + text *in2 = PG_GETARG_TEXT_PP(1); + float4 res; + + res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), + VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), + WORD_SIMILARITY_CHECK_ONLY | WORD_SIMILARITY_STRICT); + + PG_FREE_IF_COPY(in1, 0); + PG_FREE_IF_COPY(in2, 1); + PG_RETURN_BOOL(res >= strict_word_similarity_threshold); +} + +Datum +strict_word_similarity_dist_op(PG_FUNCTION_ARGS) +{ + text *in1 = PG_GETARG_TEXT_PP(0); + text *in2 = PG_GETARG_TEXT_PP(1); + float4 res; + + res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), + VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), + WORD_SIMILARITY_STRICT); + + PG_FREE_IF_COPY(in1, 0); + PG_FREE_IF_COPY(in2, 1); + PG_RETURN_FLOAT4(1.0 - res); +} + +Datum +strict_word_similarity_dist_commutator_op(PG_FUNCTION_ARGS) +{ + text *in1 = PG_GETARG_TEXT_PP(0); + text *in2 = PG_GETARG_TEXT_PP(1); + float4 res; + + res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2), + VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1), + WORD_SIMILARITY_STRICT); PG_FREE_IF_COPY(in1, 0); PG_FREE_IF_COPY(in2, 1); diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c index 1d474e2aac..547e7c094f 100644 --- a/contrib/pg_trgm/trgm_regexp.c +++ b/contrib/pg_trgm/trgm_regexp.c @@ -181,7 +181,7 @@ * 7) Mark state 3 final because state 5 of source NFA is marked as final. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -634,7 +634,7 @@ createTrgmNFAInternal(regex_t *regex, TrgmPackedGraph **graph, * Main entry point for evaluating a graph during index scanning. * * The check[] array is indexed by trigram number (in the array of simple - * trigrams returned by createTrgmNFA), and holds TRUE for those trigrams + * trigrams returned by createTrgmNFA), and holds true for those trigrams * that are present in the index entry being checked. */ bool @@ -1451,7 +1451,7 @@ prefixContains(TrgmPrefix *prefix1, TrgmPrefix *prefix2) * Get vector of all color trigrams in graph and select which of them * to expand into simple trigrams. * - * Returns TRUE if OK, FALSE if exhausted resource limits. + * Returns true if OK, false if exhausted resource limits. */ static bool selectColorTrigrams(TrgmNFA *trgmNFA) diff --git a/contrib/pg_visibility/pg_visibility.c b/contrib/pg_visibility/pg_visibility.c index 2cc9575d9f..944dea66fc 100644 --- a/contrib/pg_visibility/pg_visibility.c +++ b/contrib/pg_visibility/pg_visibility.c @@ -3,7 +3,7 @@ * pg_visibility.c * display visibility map information and page-level visibility bits * - * Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Copyright (c) 2016-2018, PostgreSQL Global Development Group * * contrib/pg_visibility/pg_visibility.c *------------------------------------------------------------------------- diff --git a/contrib/pgcrypto/crypt-des.c b/contrib/pgcrypto/crypt-des.c index 60bdbb0c91..ed07fc4606 100644 --- a/contrib/pgcrypto/crypt-des.c +++ b/contrib/pgcrypto/crypt-des.c @@ -62,13 +62,10 @@ #include "postgres.h" #include "miscadmin.h" +#include "port/pg_bswap.h" #include "px-crypt.h" -/* for ntohl/htonl */ -#include -#include - #define _PASSWORD_EFMT1 '_' static const char _crypt_a64[] = @@ -206,18 +203,18 @@ static inline int ascii_to_bin(char ch) { if (ch > 'z') - return (0); + return 0; if (ch >= 'a') return (ch - 'a' + 38); if (ch > 'Z') - return (0); + return 0; if (ch >= 'A') return (ch - 'A' + 12); if (ch > '9') - return (0); + return 0; if (ch >= '.') return (ch - '.'); - return (0); + return 0; } static void @@ -408,8 +405,8 @@ des_setkey(const char *key) if (!des_initialised) des_init(); - rawkey0 = ntohl(*(const uint32 *) key); - rawkey1 = ntohl(*(const uint32 *) (key + 4)); + rawkey0 = pg_ntoh32(*(const uint32 *) key); + rawkey1 = pg_ntoh32(*(const uint32 *) (key + 4)); if ((rawkey0 | rawkey1) && rawkey0 == old_rawkey0 @@ -420,7 +417,7 @@ des_setkey(const char *key) * (which is weak and has bad parity anyway) in order to simplify the * starting conditions. */ - return (0); + return 0; } old_rawkey0 = rawkey0; old_rawkey1 = rawkey1; @@ -479,7 +476,7 @@ des_setkey(const char *key) | comp_maskr[6][(t1 >> 7) & 0x7f] | comp_maskr[7][t1 & 0x7f]; } - return (0); + return 0; } static int @@ -500,7 +497,7 @@ do_des(uint32 l_in, uint32 r_in, uint32 *l_out, uint32 *r_out, int count) int round; if (count == 0) - return (1); + return 1; else if (count > 0) { /* @@ -613,7 +610,7 @@ do_des(uint32 l_in, uint32 r_in, uint32 *l_out, uint32 *r_out, int count) | fp_maskr[5][(r >> 16) & 0xff] | fp_maskr[6][(r >> 8) & 0xff] | fp_maskr[7][r & 0xff]; - return (0); + return 0; } static int @@ -634,20 +631,20 @@ des_cipher(const char *in, char *out, long salt, int count) /* copy data to avoid assuming input is word-aligned */ memcpy(buffer, in, sizeof(buffer)); - rawl = ntohl(buffer[0]); - rawr = ntohl(buffer[1]); + rawl = pg_ntoh32(buffer[0]); + rawr = pg_ntoh32(buffer[1]); retval = do_des(rawl, rawr, &l_out, &r_out, count); if (retval) - return (retval); + return retval; - buffer[0] = htonl(l_out); - buffer[1] = htonl(r_out); + buffer[0] = pg_hton32(l_out); + buffer[1] = pg_hton32(r_out); /* copy data to avoid assuming output is word-aligned */ memcpy(out, buffer, sizeof(buffer)); - return (retval); + return retval; } char * @@ -680,7 +677,7 @@ px_crypt_des(const char *key, const char *setting) key++; } if (des_setkey((char *) keybuf)) - return (NULL); + return NULL; #ifndef DISABLE_XDES if (*setting == _PASSWORD_EFMT1) @@ -711,7 +708,7 @@ px_crypt_des(const char *key, const char *setting) * Encrypt the key with itself. */ if (des_cipher((char *) keybuf, (char *) keybuf, 0L, 1)) - return (NULL); + return NULL; /* * And XOR with the next 8 characters of the key. @@ -721,7 +718,7 @@ px_crypt_des(const char *key, const char *setting) *q++ ^= *key++ << 1; if (des_setkey((char *) keybuf)) - return (NULL); + return NULL; } StrNCpy(output, setting, 10); @@ -767,7 +764,7 @@ px_crypt_des(const char *key, const char *setting) * Do it. */ if (do_des(0L, 0L, &r0, &r1, count)) - return (NULL); + return NULL; /* * Now encode the result... @@ -790,5 +787,5 @@ px_crypt_des(const char *key, const char *setting) *p++ = _crypt_a64[l & 0x3f]; *p = 0; - return (output); + return output; } diff --git a/contrib/pgcrypto/expected/pgp-compression_1.out b/contrib/pgcrypto/expected/pgp-compression_1.out index 8a046d8496..655830ae14 100644 --- a/contrib/pgcrypto/expected/pgp-compression_1.out +++ b/contrib/pgcrypto/expected/pgp-compression_1.out @@ -18,25 +18,25 @@ select pgp_sym_decrypt( pgp_sym_encrypt('Secret message', 'key', 'compress-algo=0'), 'key', 'expect-compress-algo=0'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_sym_decrypt( pgp_sym_encrypt('Secret message', 'key', 'compress-algo=1'), 'key', 'expect-compress-algo=1'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_sym_decrypt( pgp_sym_encrypt('Secret message', 'key', 'compress-algo=2'), 'key', 'expect-compress-algo=2'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- level=0 should turn compression off select pgp_sym_decrypt( pgp_sym_encrypt('Secret message', 'key', 'compress-algo=2, compress-level=0'), 'key', 'expect-compress-algo=0'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. diff --git a/contrib/pgcrypto/expected/pgp-decrypt_1.out b/contrib/pgcrypto/expected/pgp-decrypt_1.out index 431bac2b41..f3df4e618a 100644 --- a/contrib/pgcrypto/expected/pgp-decrypt_1.out +++ b/contrib/pgcrypto/expected/pgp-decrypt_1.out @@ -367,8 +367,8 @@ a3nsOzKTXUfS9VyaXo8IrncM6n7fdaXpwba/3tNsAhJG4lDv1k4g9v8Ix2dfv6Rs -- check BUG #11905, problem with messages 6 less than a power of 2. select pgp_sym_decrypt(pgp_sym_encrypt(repeat('x',65530),'1'),'1') = repeat('x',65530); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- expected: true -- Negative tests -- Decryption with a certain incorrect key yields an apparent Literal Data @@ -390,8 +390,8 @@ ERROR: Wrong key or corrupt data -- Routine text/binary mismatch. select pgp_sym_decrypt(pgp_sym_encrypt_bytea('P', 'key'), 'key', 'debug=1'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- Decryption with a certain incorrect key yields an apparent BZip2-compressed -- plaintext. Ciphertext source: iterative pgp_sym_encrypt('secret', 'key') -- until the random prefix gave rise to that property. diff --git a/contrib/pgcrypto/expected/pgp-encrypt_1.out b/contrib/pgcrypto/expected/pgp-encrypt_1.out index 48346e8e7e..72f346414a 100644 --- a/contrib/pgcrypto/expected/pgp-encrypt_1.out +++ b/contrib/pgcrypto/expected/pgp-encrypt_1.out @@ -5,8 +5,8 @@ SET bytea_output TO escape; select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'), 'key'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- check whether the defaults are ok select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'), 'key', 'expect-cipher-algo=aes128, @@ -17,8 +17,8 @@ select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'), expect-compress-algo=0 '); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- maybe the expect- stuff simply does not work select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'), 'key', 'expect-cipher-algo=bf, @@ -29,133 +29,133 @@ select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'), expect-compress-algo=1 '); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- bytea as text select pgp_sym_decrypt(pgp_sym_encrypt_bytea('Binary', 'baz'), 'baz'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- text as bytea select pgp_sym_decrypt_bytea(pgp_sym_encrypt('Text', 'baz'), 'baz'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- algorithm change select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=bf'), 'key', 'expect-cipher-algo=bf'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=aes'), 'key', 'expect-cipher-algo=aes128'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=aes192'), 'key', 'expect-cipher-algo=aes192'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- s2k change select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 's2k-mode=0'), 'key', 'expect-s2k-mode=0'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 's2k-mode=1'), 'key', 'expect-s2k-mode=1'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 's2k-mode=3'), 'key', 'expect-s2k-mode=3'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- s2k count change select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 's2k-count=1024'), 'key', 'expect-s2k-count=1024'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- s2k_count rounds up select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 's2k-count=65000000'), 'key', 'expect-s2k-count=65000000'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- s2k digest change select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 's2k-digest-algo=md5'), 'key', 'expect-s2k-digest-algo=md5'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 's2k-digest-algo=sha1'), 'key', 'expect-s2k-digest-algo=sha1'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- sess key select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 'sess-key=0'), 'key', 'expect-sess-key=0'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 'sess-key=1'), 'key', 'expect-sess-key=1'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=bf'), 'key', 'expect-sess-key=1, expect-cipher-algo=bf'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=aes192'), 'key', 'expect-sess-key=1, expect-cipher-algo=aes192'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=aes256'), 'key', 'expect-sess-key=1, expect-cipher-algo=aes256'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- no mdc select pgp_sym_decrypt( pgp_sym_encrypt('Secret.', 'key', 'disable-mdc=1'), 'key', 'expect-disable-mdc=1'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- crlf select encode(pgp_sym_decrypt_bytea( pgp_sym_encrypt(E'1\n2\n3\r\n', 'key', 'convert-crlf=1'), 'key'), 'hex'); ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- conversion should be lossless select encode(digest(pgp_sym_decrypt( pgp_sym_encrypt(E'\r\n0\n1\r\r\n\n2\r', 'key', 'convert-crlf=1'), 'key', 'convert-crlf=1'), 'sha1'), 'hex') as result, encode(digest(E'\r\n0\n1\r\r\n\n2\r', 'sha1'), 'hex') as expect; ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. diff --git a/contrib/pgcrypto/expected/pgp-pubkey-encrypt_1.out b/contrib/pgcrypto/expected/pgp-pubkey-encrypt_1.out index 41c54e4716..6da4c6da41 100644 --- a/contrib/pgcrypto/expected/pgp-pubkey-encrypt_1.out +++ b/contrib/pgcrypto/expected/pgp-pubkey-encrypt_1.out @@ -9,29 +9,29 @@ select pgp_pub_decrypt( dearmor(seckey)) from keytbl where keytbl.id=1; ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_pub_decrypt( pgp_pub_encrypt('Secret msg', dearmor(pubkey)), dearmor(seckey)) from keytbl where keytbl.id=2; ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_pub_decrypt( pgp_pub_encrypt('Secret msg', dearmor(pubkey)), dearmor(seckey)) from keytbl where keytbl.id=3; ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. select pgp_pub_decrypt( pgp_pub_encrypt('Secret msg', dearmor(pubkey)), dearmor(seckey)) from keytbl where keytbl.id=6; ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- try with rsa-sign only select pgp_pub_decrypt( pgp_pub_encrypt('Secret msg', dearmor(pubkey)), @@ -50,13 +50,13 @@ select pgp_pub_decrypt_bytea( dearmor(seckey)) from keytbl where keytbl.id=1; ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. -- and bytea-to-text? select pgp_pub_decrypt( pgp_pub_encrypt_bytea('Secret msg', dearmor(pubkey)), dearmor(seckey)) from keytbl where keytbl.id=1; ERROR: generating random data is not supported by this build -DETAIL: This functionality requires a source of strong random numbers -HINT: You need to rebuild PostgreSQL using --enable-strong-random +DETAIL: This functionality requires a source of strong random numbers. +HINT: You need to rebuild PostgreSQL using --enable-strong-random. diff --git a/contrib/pgcrypto/expected/rijndael.out b/contrib/pgcrypto/expected/rijndael.out index 14b2650c32..5366604a3d 100644 --- a/contrib/pgcrypto/expected/rijndael.out +++ b/contrib/pgcrypto/expected/rijndael.out @@ -1,5 +1,5 @@ -- --- AES / Rijndael-128 cipher +-- AES cipher (aka Rijndael-128, -192, or -256) -- -- ensure consistent test output regardless of the default bytea format SET bytea_output TO escape; diff --git a/contrib/pgcrypto/imath.c b/contrib/pgcrypto/imath.c index cd528bfd83..b94a51b81a 100644 --- a/contrib/pgcrypto/imath.c +++ b/contrib/pgcrypto/imath.c @@ -1254,11 +1254,9 @@ mp_int_compare(mp_int a, mp_int b) * If they're both zero or positive, the normal comparison applies; if * both negative, the sense is reversed. */ - if (sa == MP_ZPOS) - return cmp; - else - return -cmp; - + if (sa != MP_ZPOS) + INVERT_COMPARE_RESULT(cmp); + return cmp; } else { @@ -1314,10 +1312,9 @@ mp_int_compare_value(mp_int z, int value) { cmp = s_vcmp(z, value); - if (vsign == MP_ZPOS) - return cmp; - else - return -cmp; + if (vsign != MP_ZPOS) + INVERT_COMPARE_RESULT(cmp); + return cmp; } else { diff --git a/contrib/pgcrypto/openssl.c b/contrib/pgcrypto/openssl.c index f71a933407..7d686f3940 100644 --- a/contrib/pgcrypto/openssl.c +++ b/contrib/pgcrypto/openssl.c @@ -408,7 +408,7 @@ gen_ossl_encrypt(PX_Cipher *c, const uint8 *data, unsigned dlen, /* Blowfish */ /* - * Check if strong crypto is supported. Some openssl installations + * Check if strong crypto is supported. Some OpenSSL installations * support only short keys and unfortunately BF_set_key does not return any * error value. This function tests if is possible to use strong key. */ diff --git a/contrib/pgcrypto/pgcrypto.c b/contrib/pgcrypto/pgcrypto.c index e09f3378da..de09ececcf 100644 --- a/contrib/pgcrypto/pgcrypto.c +++ b/contrib/pgcrypto/pgcrypto.c @@ -47,7 +47,7 @@ PG_MODULE_MAGIC; /* private stuff */ typedef int (*PFN) (const char *name, void **res); -static void *find_provider(text *name, PFN pf, char *desc, int silent); +static void *find_provider(text *name, PFN pf, const char *desc, int silent); /* SQL function: hash(bytea, text) returns bytea */ PG_FUNCTION_INFO_V1(pg_digest); @@ -474,7 +474,7 @@ pg_random_uuid(PG_FUNCTION_ARGS) static void * find_provider(text *name, PFN provider_lookup, - char *desc, int silent) + const char *desc, int silent) { void *res; char *buf; diff --git a/contrib/pgcrypto/pgp-armor.c b/contrib/pgcrypto/pgp-armor.c index 5c8355808a..aa5b563a31 100644 --- a/contrib/pgcrypto/pgp-armor.c +++ b/contrib/pgcrypto/pgp-armor.c @@ -42,7 +42,7 @@ static const unsigned char _base64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; static int -b64_encode(const uint8 *src, unsigned len, uint8 *dst) +pg_base64_encode(const uint8 *src, unsigned len, uint8 *dst) { uint8 *p, *lend = dst + 76; @@ -92,7 +92,7 @@ b64_encode(const uint8 *src, unsigned len, uint8 *dst) /* probably should use lookup table */ static int -b64_decode(const uint8 *src, unsigned len, uint8 *dst) +pg_base64_decode(const uint8 *src, unsigned len, uint8 *dst) { const uint8 *srcend = src + len, *s = src; @@ -160,7 +160,7 @@ b64_decode(const uint8 *src, unsigned len, uint8 *dst) } static unsigned -b64_enc_len(unsigned srclen) +pg_base64_enc_len(unsigned srclen) { /* * 3 bytes will be converted to 4, linefeed after 76 chars @@ -169,7 +169,7 @@ b64_enc_len(unsigned srclen) } static unsigned -b64_dec_len(unsigned srclen) +pg_base64_dec_len(unsigned srclen) { return (srclen * 3) >> 2; } @@ -218,11 +218,11 @@ pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst, appendStringInfo(dst, "%s: %s\n", keys[n], values[n]); appendStringInfoChar(dst, '\n'); - /* make sure we have enough room to b64_encode() */ - b64len = b64_enc_len(len); + /* make sure we have enough room to pg_base64_encode() */ + b64len = pg_base64_enc_len(len); enlargeStringInfo(dst, (int) b64len); - res = b64_encode(src, len, (uint8 *) dst->data + dst->len); + res = pg_base64_encode(src, len, (uint8 *) dst->data + dst->len); if (res > b64len) elog(FATAL, "overflow - encode estimate too small"); dst->len += res; @@ -358,14 +358,14 @@ pgp_armor_decode(const uint8 *src, int len, StringInfo dst) goto out; /* decode crc */ - if (b64_decode(p + 1, 4, buf) != 3) + if (pg_base64_decode(p + 1, 4, buf) != 3) goto out; crc = (((long) buf[0]) << 16) + (((long) buf[1]) << 8) + (long) buf[2]; /* decode data */ - blen = (int) b64_dec_len(len); + blen = (int) pg_base64_dec_len(len); enlargeStringInfo(dst, blen); - res = b64_decode(base64_start, base64_end - base64_start, (uint8 *) dst->data); + res = pg_base64_decode(base64_start, base64_end - base64_start, (uint8 *) dst->data); if (res > blen) elog(FATAL, "overflow - decode estimate too small"); if (res >= 0) diff --git a/contrib/pgcrypto/px.c b/contrib/pgcrypto/px.c index 8ec920224a..aea8e863af 100644 --- a/contrib/pgcrypto/px.c +++ b/contrib/pgcrypto/px.c @@ -105,8 +105,8 @@ px_THROW_ERROR(int err) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("generating random data is not supported by this build"), - errdetail("This functionality requires a source of strong random numbers"), - errhint("You need to rebuild PostgreSQL using --enable-strong-random"))); + errdetail("This functionality requires a source of strong random numbers."), + errhint("You need to rebuild PostgreSQL using --enable-strong-random."))); #endif } else diff --git a/contrib/pgcrypto/rijndael.c b/contrib/pgcrypto/rijndael.c index 0c44a04af9..c22382e14a 100644 --- a/contrib/pgcrypto/rijndael.c +++ b/contrib/pgcrypto/rijndael.c @@ -164,7 +164,7 @@ gen_tabs(void) q; /* log and power tables for GF(2**8) finite field with */ - /* 0x11b as modular polynomial - the simplest prmitive */ + /* 0x11b as modular polynomial - the simplest primitive */ /* root is 0x11, used here to generate the tables */ for (i = 0, p = 1; i < 256; ++i) diff --git a/contrib/pgcrypto/sql/rijndael.sql b/contrib/pgcrypto/sql/rijndael.sql index bfbf95d39b..a9bcbf33d0 100644 --- a/contrib/pgcrypto/sql/rijndael.sql +++ b/contrib/pgcrypto/sql/rijndael.sql @@ -1,5 +1,5 @@ -- --- AES / Rijndael-128 cipher +-- AES cipher (aka Rijndael-128, -192, or -256) -- -- ensure consistent test output regardless of the default bytea format SET bytea_output TO escape; diff --git a/contrib/pgrowlocks/pgrowlocks.c b/contrib/pgrowlocks/pgrowlocks.c index eabca65bd2..94e051d642 100644 --- a/contrib/pgrowlocks/pgrowlocks.c +++ b/contrib/pgrowlocks/pgrowlocks.c @@ -121,7 +121,7 @@ pgrowlocks(PG_FUNCTION_ARGS) aclresult = is_member_of_role(GetUserId(), DEFAULT_ROLE_STAT_SCAN_TABLES) ? ACLCHECK_OK : ACLCHECK_NO_PRIV; if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_CLASS, + aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind), RelationGetRelationName(rel)); scan = heap_beginscan(rel, GetActiveSnapshot(), 0, NULL); diff --git a/contrib/pgstattuple/expected/pgstattuple.out b/contrib/pgstattuple/expected/pgstattuple.out index 20b5585d03..9858ea69d4 100644 --- a/contrib/pgstattuple/expected/pgstattuple.out +++ b/contrib/pgstattuple/expected/pgstattuple.out @@ -48,7 +48,7 @@ select version, tree_level, from pgstatindex('test_pkey'); version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation ---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+-------------------- - 2 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN + 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN (1 row) select version, tree_level, @@ -58,7 +58,7 @@ select version, tree_level, from pgstatindex('test_pkey'::text); version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation ---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+-------------------- - 2 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN + 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN (1 row) select version, tree_level, @@ -68,7 +68,7 @@ select version, tree_level, from pgstatindex('test_pkey'::name); version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation ---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+-------------------- - 2 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN + 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN (1 row) select version, tree_level, @@ -78,7 +78,7 @@ select version, tree_level, from pgstatindex('test_pkey'::regclass); version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation ---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+-------------------- - 2 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN + 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN (1 row) select pg_relpages('test'); @@ -141,20 +141,23 @@ select * from pgstathashindex('test_hashidx'); select pgstatginindex('test_pkey'); ERROR: relation "test_pkey" is not a GIN index select pgstathashindex('test_pkey'); -ERROR: relation "test_pkey" is not a HASH index +ERROR: relation "test_pkey" is not a hash index select pgstatindex('test_ginidx'); ERROR: relation "test_ginidx" is not a btree index select pgstathashindex('test_ginidx'); -ERROR: relation "test_ginidx" is not a HASH index +ERROR: relation "test_ginidx" is not a hash index select pgstatindex('test_hashidx'); ERROR: relation "test_hashidx" is not a btree index select pgstatginindex('test_hashidx'); ERROR: relation "test_hashidx" is not a GIN index -- check that using any of these functions with unsupported relations will fail create table test_partitioned (a int) partition by range (a); +create index test_partitioned_index on test_partitioned(a); -- these should all fail select pgstattuple('test_partitioned'); ERROR: "test_partitioned" (partitioned table) is not supported +select pgstattuple('test_partitioned_index'); +ERROR: "test_partitioned_index" (partitioned index) is not supported select pgstattuple_approx('test_partitioned'); ERROR: "test_partitioned" is not a table or materialized view select pg_relpages('test_partitioned'); @@ -229,7 +232,7 @@ create index test_partition_hash_idx on test_partition using hash (a); select pgstatindex('test_partition_idx'); pgstatindex ------------------------------ - (2,0,8192,0,0,0,0,0,NaN,NaN) + (3,0,8192,0,0,0,0,0,NaN,NaN) (1 row) select pgstathashindex('test_partition_hash_idx'); diff --git a/contrib/pgstattuple/pgstatapprox.c b/contrib/pgstattuple/pgstatapprox.c index 5bf06138a5..ef33cacec6 100644 --- a/contrib/pgstattuple/pgstatapprox.c +++ b/contrib/pgstattuple/pgstatapprox.c @@ -3,7 +3,7 @@ * pgstatapprox.c * Bloat estimation functions * - * Copyright (c) 2014-2017, PostgreSQL Global Development Group + * Copyright (c) 2014-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pgstattuple/pgstatapprox.c @@ -68,7 +68,6 @@ statapprox_heap(Relation rel, output_type *stat) Buffer vmbuffer = InvalidBuffer; BufferAccessStrategy bstrategy; TransactionId OldestXmin; - uint64 misc_count = 0; OldestXmin = GetOldestXmin(rel, PROCARRAY_FLAGS_VACUUM); bstrategy = GetAccessStrategy(BAS_BULKREAD); @@ -114,14 +113,15 @@ statapprox_heap(Relation rel, output_type *stat) else stat->free_space += BLCKSZ - SizeOfPageHeaderData; + /* We may count the page as scanned even if it's new/empty */ + scanned++; + if (PageIsNew(page) || PageIsEmpty(page)) { UnlockReleaseBuffer(buf); continue; } - scanned++; - /* * Look at each tuple on the page and decide whether it's live or * dead, then count it and its size. Unlike lazy_scan_heap, we can @@ -153,25 +153,23 @@ statapprox_heap(Relation rel, output_type *stat) tuple.t_tableOid = RelationGetRelid(rel); /* - * We count live and dead tuples, but we also need to add up - * others in order to feed vac_estimate_reltuples. + * We follow VACUUM's lead in counting INSERT_IN_PROGRESS tuples + * as "dead" while DELETE_IN_PROGRESS tuples are "live". We don't + * bother distinguishing tuples inserted/deleted by our own + * transaction. */ switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf)) { - case HEAPTUPLE_RECENTLY_DEAD: - misc_count++; - /* Fall through */ - case HEAPTUPLE_DEAD: - stat->dead_tuple_len += tuple.t_len; - stat->dead_tuple_count++; - break; case HEAPTUPLE_LIVE: + case HEAPTUPLE_DELETE_IN_PROGRESS: stat->tuple_len += tuple.t_len; stat->tuple_count++; break; + case HEAPTUPLE_DEAD: + case HEAPTUPLE_RECENTLY_DEAD: case HEAPTUPLE_INSERT_IN_PROGRESS: - case HEAPTUPLE_DELETE_IN_PROGRESS: - misc_count++; + stat->dead_tuple_len += tuple.t_len; + stat->dead_tuple_count++; break; default: elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result"); @@ -184,8 +182,16 @@ statapprox_heap(Relation rel, output_type *stat) stat->table_len = (uint64) nblocks * BLCKSZ; - stat->tuple_count = vac_estimate_reltuples(rel, false, nblocks, scanned, - stat->tuple_count + misc_count); + /* + * We don't know how many tuples are in the pages we didn't scan, so + * extrapolate the live-tuple count to the whole table in the same way + * that VACUUM does. (Like VACUUM, we're not taking a random sample, so + * just extrapolating linearly seems unsafe.) There should be no dead + * tuples in all-visible pages, so no correction is needed for that, and + * we already accounted for the space in those pages, too. + */ + stat->tuple_count = vac_estimate_reltuples(rel, nblocks, scanned, + stat->tuple_count); /* * Calculate percentages if the relation has one or more pages. diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c index 9365ba7e02..db396c8c4b 100644 --- a/contrib/pgstattuple/pgstatindex.c +++ b/contrib/pgstattuple/pgstatindex.c @@ -568,7 +568,7 @@ pgstatginindex_internal(Oid relid, FunctionCallInfo fcinfo) tuple = heap_form_tuple(tupleDesc, values, nulls); result = HeapTupleGetDatum(tuple); - return (result); + return result; } /* ------------------------------------------------------ @@ -601,10 +601,9 @@ pgstathashindex(PG_FUNCTION_ARGS) if (!IS_HASH(rel)) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("relation \"%s\" is not a HASH index", + errmsg("relation \"%s\" is not a hash index", RelationGetRelationName(rel)))); - /* * Reject attempts to read non-local temporary relations; we would be * likely to get wrong data since we have no visibility into the owning diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index 7a91cc3468..6d67bd8271 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -89,7 +89,7 @@ static Datum build_pgstattuple_type(pgstattuple_type *stat, FunctionCallInfo fcinfo) { #define NCOLUMNS 9 -#define NCHARS 32 +#define NCHARS 314 HeapTuple tuple; char *values[NCOLUMNS]; @@ -296,6 +296,9 @@ pgstat_relation(Relation rel, FunctionCallInfo fcinfo) case RELKIND_PARTITIONED_TABLE: err = "partitioned table"; break; + case RELKIND_PARTITIONED_INDEX: + err = "partitioned index"; + break; default: err = "unknown"; break; @@ -416,7 +419,7 @@ pgstat_btree_page(pgstattuple_type *stat, Relation rel, BlockNumber blkno, BTPageOpaque opaque; opaque = (BTPageOpaque) PageGetSpecialPointer(page); - if (opaque->btpo_flags & (BTP_DELETED | BTP_HALF_DEAD)) + if (P_IGNORE(opaque)) { /* recyclable page */ stat->free_space += BLCKSZ; diff --git a/contrib/pgstattuple/sql/pgstattuple.sql b/contrib/pgstattuple/sql/pgstattuple.sql index a8e341e351..cfa540302d 100644 --- a/contrib/pgstattuple/sql/pgstattuple.sql +++ b/contrib/pgstattuple/sql/pgstattuple.sql @@ -64,8 +64,10 @@ select pgstatginindex('test_hashidx'); -- check that using any of these functions with unsupported relations will fail create table test_partitioned (a int) partition by range (a); +create index test_partitioned_index on test_partitioned(a); -- these should all fail select pgstattuple('test_partitioned'); +select pgstattuple('test_partitioned_index'); select pgstattuple_approx('test_partitioned'); select pg_relpages('test_partitioned'); select pgstatindex('test_partitioned'); diff --git a/contrib/postgres_fdw/Makefile b/contrib/postgres_fdw/Makefile index 354331247a..85394b4f1f 100644 --- a/contrib/postgres_fdw/Makefile +++ b/contrib/postgres_fdw/Makefile @@ -5,7 +5,7 @@ OBJS = postgres_fdw.o option.o deparse.o connection.o shippable.o $(WIN32RES) PGFILEDESC = "postgres_fdw - foreign data wrapper for PostgreSQL" PG_CPPFLAGS = -I$(libpq_srcdir) -SHLIB_LINK = $(libpq) +SHLIB_LINK_INTERNAL = $(libpq) EXTENSION = postgres_fdw DATA = postgres_fdw--1.0.sql diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index be4ec07cf9..fe4893a8e0 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -3,7 +3,7 @@ * connection.c * Connection management functions for postgres_fdw * - * Portions Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/postgres_fdw/connection.c @@ -75,7 +75,7 @@ static bool xact_got_connection = false; /* prototypes of private functions */ static PGconn *connect_pg_server(ForeignServer *server, UserMapping *user); static void disconnect_pg_server(ConnCacheEntry *entry); -static void check_conn_params(const char **keywords, const char **values); +static void check_conn_params(const char **keywords, const char **values, UserMapping *user); static void configure_remote_session(PGconn *conn); static void do_sql_command(PGconn *conn, const char *sql); static void begin_remote_xact(ConnCacheEntry *entry); @@ -261,7 +261,7 @@ connect_pg_server(ForeignServer *server, UserMapping *user) keywords[n] = values[n] = NULL; /* verify connection parameters and make connection */ - check_conn_params(keywords, values); + check_conn_params(keywords, values, user); conn = PQconnectdbParams(keywords, values, false); if (!conn || PQstatus(conn) != CONNECTION_OK) @@ -276,7 +276,7 @@ connect_pg_server(ForeignServer *server, UserMapping *user) * otherwise, he's piggybacking on the postgres server's user * identity. See also dblink_security_check() in contrib/dblink. */ - if (!superuser() && !PQconnectionUsedPassword(conn)) + if (!superuser_arg(user->userid) && !PQconnectionUsedPassword(conn)) ereport(ERROR, (errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), errmsg("password is required"), @@ -322,12 +322,12 @@ disconnect_pg_server(ConnCacheEntry *entry) * contrib/dblink.) */ static void -check_conn_params(const char **keywords, const char **values) +check_conn_params(const char **keywords, const char **values, UserMapping *user) { int i; /* no check required if superuser */ - if (superuser()) + if (superuser_arg(user->userid)) return; /* ok if params contain a non-empty password */ @@ -630,7 +630,7 @@ pgfdw_report_error(int elevel, PGresult *res, PGconn *conn, message_detail ? errdetail_internal("%s", message_detail) : 0, message_hint ? errhint("%s", message_hint) : 0, message_context ? errcontext("%s", message_context) : 0, - sql ? errcontext("Remote SQL command: %s", sql) : 0)); + sql ? errcontext("remote SQL command: %s", sql) : 0)); } PG_CATCH(); { diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c index 285cf1b2ee..6001f4d25e 100644 --- a/contrib/postgres_fdw/deparse.c +++ b/contrib/postgres_fdw/deparse.c @@ -24,7 +24,7 @@ * with collations that match the remote table's columns, which we can * consider to be user error. * - * Portions Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/postgres_fdw/deparse.c @@ -125,23 +125,26 @@ static char *deparse_type_name(Oid type_oid, int32 typemod); * Functions to construct string representation of a node tree. */ static void deparseTargetList(StringInfo buf, - PlannerInfo *root, + RangeTblEntry *rte, Index rtindex, Relation rel, bool is_returning, Bitmapset *attrs_used, bool qualify_col, List **retrieved_attrs); -static void deparseExplicitTargetList(List *tlist, List **retrieved_attrs, +static void deparseExplicitTargetList(List *tlist, + bool is_returning, + List **retrieved_attrs, deparse_expr_cxt *context); static void deparseSubqueryTargetList(deparse_expr_cxt *context); -static void deparseReturningList(StringInfo buf, PlannerInfo *root, +static void deparseReturningList(StringInfo buf, RangeTblEntry *rte, Index rtindex, Relation rel, bool trig_after_row, + List *withCheckOptionList, List *returningList, List **retrieved_attrs); static void deparseColumnRef(StringInfo buf, int varno, int varattno, - PlannerInfo *root, bool qualify_col); + RangeTblEntry *rte, bool qualify_col); static void deparseRelation(StringInfo buf, Relation rel); static void deparseExpr(Expr *expr, deparse_expr_cxt *context); static void deparseVar(Var *node, deparse_expr_cxt *context); @@ -168,17 +171,19 @@ static void deparseLockingClause(deparse_expr_cxt *context); static void appendOrderByClause(List *pathkeys, deparse_expr_cxt *context); static void appendConditions(List *exprs, deparse_expr_cxt *context); static void deparseFromExprForRel(StringInfo buf, PlannerInfo *root, - RelOptInfo *joinrel, bool use_alias, List **params_list); + RelOptInfo *foreignrel, bool use_alias, + Index ignore_rel, List **ignore_conds, + List **params_list); static void deparseFromExpr(List *quals, deparse_expr_cxt *context); static void deparseRangeTblRef(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel, bool make_subquery, - List **params_list); + Index ignore_rel, List **ignore_conds, List **params_list); static void deparseAggref(Aggref *node, deparse_expr_cxt *context); static void appendGroupByClause(List *tlist, deparse_expr_cxt *context); static void appendAggOrderBy(List *orderList, List *targetList, deparse_expr_cxt *context); static void appendFunctionName(Oid funcid, deparse_expr_cxt *context); -static Node *deparseSortGroupClause(Index ref, List *tlist, +static Node *deparseSortGroupClause(Index ref, List *tlist, bool force_colno, deparse_expr_cxt *context); /* @@ -850,10 +855,12 @@ foreign_expr_walker(Node *node, static char * deparse_type_name(Oid type_oid, int32 typemod) { - if (is_builtin(type_oid)) - return format_type_with_typemod(type_oid, typemod); - else - return format_type_with_typemod_qualified(type_oid, typemod); + bits16 flags = FORMAT_TYPE_TYPEMOD_GIVEN; + + if (!is_builtin(type_oid)) + flags |= FORMAT_TYPE_FORCE_QUALIFY; + + return format_type_extended(type_oid, typemod, flags); } /* @@ -921,7 +928,7 @@ build_tlist_to_deparse(RelOptInfo *foreignrel) * * List of columns selected is returned in retrieved_attrs. */ -extern void +void deparseSelectStmtForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *rel, List *tlist, List *remote_conds, List *pathkeys, bool is_subquery, List **retrieved_attrs, @@ -1028,7 +1035,7 @@ deparseSelectSql(List *tlist, bool is_subquery, List **retrieved_attrs, * For a join or upper relation the input tlist gives the list of * columns required to be fetched from the foreign server. */ - deparseExplicitTargetList(tlist, retrieved_attrs, context); + deparseExplicitTargetList(tlist, false, retrieved_attrs, context); } else { @@ -1044,7 +1051,7 @@ deparseSelectSql(List *tlist, bool is_subquery, List **retrieved_attrs, */ Relation rel = heap_open(rte->relid, NoLock); - deparseTargetList(buf, root, foreignrel->relid, rel, false, + deparseTargetList(buf, rte, foreignrel->relid, rel, false, fpinfo->attrs_used, false, retrieved_attrs); heap_close(rel, NoLock); } @@ -1070,8 +1077,8 @@ deparseFromExpr(List *quals, deparse_expr_cxt *context) /* Construct FROM clause */ appendStringInfoString(buf, " FROM "); deparseFromExprForRel(buf, context->root, scanrel, - (bms_num_members(scanrel->relids) > 1), - context->params_list); + (bms_membership(scanrel->relids) == BMS_MULTIPLE), + (Index) 0, NULL, context->params_list); /* Construct WHERE clause */ if (quals != NIL) @@ -1093,7 +1100,7 @@ deparseFromExpr(List *quals, deparse_expr_cxt *context) */ static void deparseTargetList(StringInfo buf, - PlannerInfo *root, + RangeTblEntry *rte, Index rtindex, Relation rel, bool is_returning, @@ -1115,7 +1122,7 @@ deparseTargetList(StringInfo buf, first = true; for (i = 1; i <= tupdesc->natts; i++) { - Form_pg_attribute attr = tupdesc->attrs[i - 1]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, i - 1); /* Ignore dropped attributes. */ if (attr->attisdropped) @@ -1131,7 +1138,7 @@ deparseTargetList(StringInfo buf, appendStringInfoString(buf, " RETURNING "); first = false; - deparseColumnRef(buf, rtindex, i, root, qualify_col); + deparseColumnRef(buf, rtindex, i, rte, qualify_col); *retrieved_attrs = lappend_int(*retrieved_attrs, i); } @@ -1256,7 +1263,7 @@ deparseLockingClause(deparse_expr_cxt *context) } /* Add the relation alias if we are here for a join relation */ - if (bms_num_members(rel->relids) > 1 && + if (bms_membership(rel->relids) == BMS_MULTIPLE && rc->strength != LCS_NONE) appendStringInfo(buf, " OF %s%d", REL_ALIAS_PREFIX, relid); } @@ -1307,7 +1314,7 @@ appendConditions(List *exprs, deparse_expr_cxt *context) } /* Output join name for given join type */ -extern const char * +const char * get_jointype_name(JoinType jointype) { switch (jointype) @@ -1340,9 +1347,14 @@ get_jointype_name(JoinType jointype) * * retrieved_attrs is the list of continuously increasing integers starting * from 1. It has same number of entries as tlist. + * + * This is used for both SELECT and RETURNING targetlists; the is_returning + * parameter is true only for a RETURNING targetlist. */ static void -deparseExplicitTargetList(List *tlist, List **retrieved_attrs, +deparseExplicitTargetList(List *tlist, + bool is_returning, + List **retrieved_attrs, deparse_expr_cxt *context) { ListCell *lc; @@ -1357,13 +1369,16 @@ deparseExplicitTargetList(List *tlist, List **retrieved_attrs, if (i > 0) appendStringInfoString(buf, ", "); + else if (is_returning) + appendStringInfoString(buf, " RETURNING "); + deparseExpr((Expr *) tle->expr, context); *retrieved_attrs = lappend_int(*retrieved_attrs, i + 1); i++; } - if (i == 0) + if (i == 0 && !is_returning) appendStringInfoString(buf, "NULL"); } @@ -1406,10 +1421,17 @@ deparseSubqueryTargetList(deparse_expr_cxt *context) * The function constructs ... JOIN ... ON ... for join relation. For a base * relation it just returns schema-qualified tablename, with the appropriate * alias if so requested. + * + * 'ignore_rel' is either zero or the RT index of a target relation. In the + * latter case the function constructs FROM clause of UPDATE or USING clause + * of DELETE; it deparses the join relation as if the relation never contained + * the target relation, and creates a List of conditions to be deparsed into + * the top-level WHERE clause, which is returned to *ignore_conds. */ static void deparseFromExprForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel, - bool use_alias, List **params_list) + bool use_alias, Index ignore_rel, List **ignore_conds, + List **params_list) { PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) foreignrel->fdw_private; @@ -1417,16 +1439,89 @@ deparseFromExprForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel, { StringInfoData join_sql_o; StringInfoData join_sql_i; + RelOptInfo *outerrel = fpinfo->outerrel; + RelOptInfo *innerrel = fpinfo->innerrel; + bool outerrel_is_target = false; + bool innerrel_is_target = false; + + if (ignore_rel > 0 && bms_is_member(ignore_rel, foreignrel->relids)) + { + /* + * If this is an inner join, add joinclauses to *ignore_conds and + * set it to empty so that those can be deparsed into the WHERE + * clause. Note that since the target relation can never be + * within the nullable side of an outer join, those could safely + * be pulled up into the WHERE clause (see foreign_join_ok()). + * Note also that since the target relation is only inner-joined + * to any other relation in the query, all conditions in the join + * tree mentioning the target relation could be deparsed into the + * WHERE clause by doing this recursively. + */ + if (fpinfo->jointype == JOIN_INNER) + { + *ignore_conds = list_concat(*ignore_conds, + list_copy(fpinfo->joinclauses)); + fpinfo->joinclauses = NIL; + } + + /* + * Check if either of the input relations is the target relation. + */ + if (outerrel->relid == ignore_rel) + outerrel_is_target = true; + else if (innerrel->relid == ignore_rel) + innerrel_is_target = true; + } - /* Deparse outer relation */ - initStringInfo(&join_sql_o); - deparseRangeTblRef(&join_sql_o, root, fpinfo->outerrel, - fpinfo->make_outerrel_subquery, params_list); + /* Deparse outer relation if not the target relation. */ + if (!outerrel_is_target) + { + initStringInfo(&join_sql_o); + deparseRangeTblRef(&join_sql_o, root, outerrel, + fpinfo->make_outerrel_subquery, + ignore_rel, ignore_conds, params_list); + + /* + * If inner relation is the target relation, skip deparsing it. + * Note that since the join of the target relation with any other + * relation in the query is an inner join and can never be within + * the nullable side of an outer join, the join could be + * interchanged with higher-level joins (cf. identity 1 on outer + * join reordering shown in src/backend/optimizer/README), which + * means it's safe to skip the target-relation deparsing here. + */ + if (innerrel_is_target) + { + Assert(fpinfo->jointype == JOIN_INNER); + Assert(fpinfo->joinclauses == NIL); + appendStringInfo(buf, "%s", join_sql_o.data); + return; + } + } - /* Deparse inner relation */ - initStringInfo(&join_sql_i); - deparseRangeTblRef(&join_sql_i, root, fpinfo->innerrel, - fpinfo->make_innerrel_subquery, params_list); + /* Deparse inner relation if not the target relation. */ + if (!innerrel_is_target) + { + initStringInfo(&join_sql_i); + deparseRangeTblRef(&join_sql_i, root, innerrel, + fpinfo->make_innerrel_subquery, + ignore_rel, ignore_conds, params_list); + + /* + * If outer relation is the target relation, skip deparsing it. + * See the above note about safety. + */ + if (outerrel_is_target) + { + Assert(fpinfo->jointype == JOIN_INNER); + Assert(fpinfo->joinclauses == NIL); + appendStringInfo(buf, "%s", join_sql_i.data); + return; + } + } + + /* Neither of the relations is the target relation. */ + Assert(!outerrel_is_target && !innerrel_is_target); /* * For a join relation FROM clause entry is deparsed as @@ -1486,7 +1581,8 @@ deparseFromExprForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel, */ static void deparseRangeTblRef(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel, - bool make_subquery, List **params_list) + bool make_subquery, Index ignore_rel, List **ignore_conds, + List **params_list) { PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) foreignrel->fdw_private; @@ -1501,6 +1597,14 @@ deparseRangeTblRef(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel, List *retrieved_attrs; int ncols; + /* + * The given relation shouldn't contain the target relation, because + * this should only happen for input relations for a full join, and + * such relations can never contain an UPDATE/DELETE target. + */ + Assert(ignore_rel == 0 || + !bms_is_member(ignore_rel, foreignrel->relids)); + /* Deparse the subquery representing the relation. */ appendStringInfoChar(buf, '('); deparseSelectStmtForRel(buf, root, foreignrel, NIL, @@ -1534,21 +1638,23 @@ deparseRangeTblRef(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel, } } else - deparseFromExprForRel(buf, root, foreignrel, true, params_list); + deparseFromExprForRel(buf, root, foreignrel, true, ignore_rel, + ignore_conds, params_list); } /* * deparse remote INSERT statement * * The statement text is appended to buf, and we also create an integer List - * of the columns being retrieved by RETURNING (if any), which is returned - * to *retrieved_attrs. + * of the columns being retrieved by WITH CHECK OPTION or RETURNING (if any), + * which is returned to *retrieved_attrs. */ void -deparseInsertSql(StringInfo buf, PlannerInfo *root, +deparseInsertSql(StringInfo buf, RangeTblEntry *rte, Index rtindex, Relation rel, List *targetAttrs, bool doNothing, - List *returningList, List **retrieved_attrs) + List *withCheckOptionList, List *returningList, + List **retrieved_attrs) { AttrNumber pindex; bool first; @@ -1570,7 +1676,7 @@ deparseInsertSql(StringInfo buf, PlannerInfo *root, appendStringInfoString(buf, ", "); first = false; - deparseColumnRef(buf, rtindex, attnum, root, false); + deparseColumnRef(buf, rtindex, attnum, rte, false); } appendStringInfoString(buf, ") VALUES ("); @@ -1595,22 +1701,23 @@ deparseInsertSql(StringInfo buf, PlannerInfo *root, if (doNothing) appendStringInfoString(buf, " ON CONFLICT DO NOTHING"); - deparseReturningList(buf, root, rtindex, rel, + deparseReturningList(buf, rte, rtindex, rel, rel->trigdesc && rel->trigdesc->trig_insert_after_row, - returningList, retrieved_attrs); + withCheckOptionList, returningList, retrieved_attrs); } /* * deparse remote UPDATE statement * * The statement text is appended to buf, and we also create an integer List - * of the columns being retrieved by RETURNING (if any), which is returned - * to *retrieved_attrs. + * of the columns being retrieved by WITH CHECK OPTION or RETURNING (if any), + * which is returned to *retrieved_attrs. */ void -deparseUpdateSql(StringInfo buf, PlannerInfo *root, +deparseUpdateSql(StringInfo buf, RangeTblEntry *rte, Index rtindex, Relation rel, - List *targetAttrs, List *returningList, + List *targetAttrs, + List *withCheckOptionList, List *returningList, List **retrieved_attrs) { AttrNumber pindex; @@ -1631,27 +1738,37 @@ deparseUpdateSql(StringInfo buf, PlannerInfo *root, appendStringInfoString(buf, ", "); first = false; - deparseColumnRef(buf, rtindex, attnum, root, false); + deparseColumnRef(buf, rtindex, attnum, rte, false); appendStringInfo(buf, " = $%d", pindex); pindex++; } appendStringInfoString(buf, " WHERE ctid = $1"); - deparseReturningList(buf, root, rtindex, rel, + deparseReturningList(buf, rte, rtindex, rel, rel->trigdesc && rel->trigdesc->trig_update_after_row, - returningList, retrieved_attrs); + withCheckOptionList, returningList, retrieved_attrs); } /* * deparse remote UPDATE statement * - * The statement text is appended to buf, and we also create an integer List - * of the columns being retrieved by RETURNING (if any), which is returned - * to *retrieved_attrs. + * 'buf' is the output buffer to append the statement to + * 'rtindex' is the RT index of the associated target relation + * 'rel' is the relation descriptor for the target relation + * 'foreignrel' is the RelOptInfo for the target relation or the join relation + * containing all base relations in the query + * 'targetlist' is the tlist of the underlying foreign-scan plan node + * 'targetAttrs' is the target columns of the UPDATE + * 'remote_conds' is the qual clauses that must be evaluated remotely + * '*params_list' is an output list of exprs that will become remote Params + * 'returningList' is the RETURNING targetlist + * '*retrieved_attrs' is an output list of integers of columns being retrieved + * by RETURNING (if any) */ void deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root, Index rtindex, Relation rel, + RelOptInfo *foreignrel, List *targetlist, List *targetAttrs, List *remote_conds, @@ -1659,21 +1776,23 @@ deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root, List *returningList, List **retrieved_attrs) { - RelOptInfo *baserel = root->simple_rel_array[rtindex]; deparse_expr_cxt context; int nestlevel; bool first; ListCell *lc; + RangeTblEntry *rte = planner_rt_fetch(rtindex, root); /* Set up context struct for recursion */ context.root = root; - context.foreignrel = baserel; - context.scanrel = baserel; + context.foreignrel = foreignrel; + context.scanrel = foreignrel; context.buf = buf; context.params_list = params_list; appendStringInfoString(buf, "UPDATE "); deparseRelation(buf, rel); + if (foreignrel->reloptkind == RELOPT_JOINREL) + appendStringInfo(buf, " %s%d", REL_ALIAS_PREFIX, rtindex); appendStringInfoString(buf, " SET "); /* Make sure any constants in the exprs are printed portably */ @@ -1693,21 +1812,35 @@ deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root, appendStringInfoString(buf, ", "); first = false; - deparseColumnRef(buf, rtindex, attnum, root, false); + deparseColumnRef(buf, rtindex, attnum, rte, false); appendStringInfoString(buf, " = "); deparseExpr((Expr *) tle->expr, &context); } reset_transmission_modes(nestlevel); + if (foreignrel->reloptkind == RELOPT_JOINREL) + { + List *ignore_conds = NIL; + + appendStringInfo(buf, " FROM "); + deparseFromExprForRel(buf, root, foreignrel, true, rtindex, + &ignore_conds, params_list); + remote_conds = list_concat(remote_conds, ignore_conds); + } + if (remote_conds) { appendStringInfoString(buf, " WHERE "); appendConditions(remote_conds, &context); } - deparseReturningList(buf, root, rtindex, rel, false, - returningList, retrieved_attrs); + if (foreignrel->reloptkind == RELOPT_JOINREL) + deparseExplicitTargetList(returningList, true, retrieved_attrs, + &context); + else + deparseReturningList(buf, rte, rtindex, rel, false, + NIL, returningList, retrieved_attrs); } /* @@ -1718,7 +1851,7 @@ deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root, * to *retrieved_attrs. */ void -deparseDeleteSql(StringInfo buf, PlannerInfo *root, +deparseDeleteSql(StringInfo buf, RangeTblEntry *rte, Index rtindex, Relation rel, List *returningList, List **retrieved_attrs) @@ -1727,38 +1860,57 @@ deparseDeleteSql(StringInfo buf, PlannerInfo *root, deparseRelation(buf, rel); appendStringInfoString(buf, " WHERE ctid = $1"); - deparseReturningList(buf, root, rtindex, rel, + deparseReturningList(buf, rte, rtindex, rel, rel->trigdesc && rel->trigdesc->trig_delete_after_row, - returningList, retrieved_attrs); + NIL, returningList, retrieved_attrs); } /* * deparse remote DELETE statement * - * The statement text is appended to buf, and we also create an integer List - * of the columns being retrieved by RETURNING (if any), which is returned - * to *retrieved_attrs. + * 'buf' is the output buffer to append the statement to + * 'rtindex' is the RT index of the associated target relation + * 'rel' is the relation descriptor for the target relation + * 'foreignrel' is the RelOptInfo for the target relation or the join relation + * containing all base relations in the query + * 'remote_conds' is the qual clauses that must be evaluated remotely + * '*params_list' is an output list of exprs that will become remote Params + * 'returningList' is the RETURNING targetlist + * '*retrieved_attrs' is an output list of integers of columns being retrieved + * by RETURNING (if any) */ void deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root, Index rtindex, Relation rel, + RelOptInfo *foreignrel, List *remote_conds, List **params_list, List *returningList, List **retrieved_attrs) { - RelOptInfo *baserel = root->simple_rel_array[rtindex]; deparse_expr_cxt context; /* Set up context struct for recursion */ context.root = root; - context.foreignrel = baserel; - context.scanrel = baserel; + context.foreignrel = foreignrel; + context.scanrel = foreignrel; context.buf = buf; context.params_list = params_list; appendStringInfoString(buf, "DELETE FROM "); deparseRelation(buf, rel); + if (foreignrel->reloptkind == RELOPT_JOINREL) + appendStringInfo(buf, " %s%d", REL_ALIAS_PREFIX, rtindex); + + if (foreignrel->reloptkind == RELOPT_JOINREL) + { + List *ignore_conds = NIL; + + appendStringInfo(buf, " USING "); + deparseFromExprForRel(buf, root, foreignrel, true, rtindex, + &ignore_conds, params_list); + remote_conds = list_concat(remote_conds, ignore_conds); + } if (remote_conds) { @@ -1766,17 +1918,23 @@ deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root, appendConditions(remote_conds, &context); } - deparseReturningList(buf, root, rtindex, rel, false, - returningList, retrieved_attrs); + if (foreignrel->reloptkind == RELOPT_JOINREL) + deparseExplicitTargetList(returningList, true, retrieved_attrs, + &context); + else + deparseReturningList(buf, planner_rt_fetch(rtindex, root), + rtindex, rel, false, + NIL, returningList, retrieved_attrs); } /* * Add a RETURNING clause, if needed, to an INSERT/UPDATE/DELETE. */ static void -deparseReturningList(StringInfo buf, PlannerInfo *root, +deparseReturningList(StringInfo buf, RangeTblEntry *rte, Index rtindex, Relation rel, bool trig_after_row, + List *withCheckOptionList, List *returningList, List **retrieved_attrs) { @@ -1789,6 +1947,21 @@ deparseReturningList(StringInfo buf, PlannerInfo *root, bms_make_singleton(0 - FirstLowInvalidHeapAttributeNumber); } + if (withCheckOptionList != NIL) + { + /* + * We need the attrs, non-system and system, mentioned in the local + * query's WITH CHECK OPTION list. + * + * Note: we do this to ensure that WCO constraints will be evaluated + * on the data actually inserted/updated on the remote side, which + * might differ from the data supplied by the core code, for example + * as a result of remote triggers. + */ + pull_varattnos((Node *) withCheckOptionList, rtindex, + &attrs_used); + } + if (returningList != NIL) { /* @@ -1800,7 +1973,7 @@ deparseReturningList(StringInfo buf, PlannerInfo *root, } if (attrs_used != NULL) - deparseTargetList(buf, root, rtindex, rel, true, attrs_used, false, + deparseTargetList(buf, rte, rtindex, rel, true, attrs_used, false, retrieved_attrs); else *retrieved_attrs = NIL; @@ -1851,7 +2024,7 @@ deparseAnalyzeSql(StringInfo buf, Relation rel, List **retrieved_attrs) for (i = 0; i < tupdesc->natts; i++) { /* Ignore dropped columns. */ - if (tupdesc->attrs[i]->attisdropped) + if (TupleDescAttr(tupdesc, i)->attisdropped) continue; if (!first) @@ -1859,7 +2032,7 @@ deparseAnalyzeSql(StringInfo buf, Relation rel, List **retrieved_attrs) first = false; /* Use attribute name or column_name option. */ - colname = NameStr(tupdesc->attrs[i]->attname); + colname = NameStr(TupleDescAttr(tupdesc, i)->attname); options = GetForeignColumnOptions(relid, i + 1); foreach(lc, options) @@ -1896,11 +2069,9 @@ deparseAnalyzeSql(StringInfo buf, Relation rel, List **retrieved_attrs) * If qualify_col is true, qualify column name with the alias of relation. */ static void -deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root, +deparseColumnRef(StringInfo buf, int varno, int varattno, RangeTblEntry *rte, bool qualify_col) { - RangeTblEntry *rte; - /* We support fetching the remote side's CTID and OID. */ if (varattno == SelfItemPointerAttributeNumber) { @@ -1925,10 +2096,7 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root, Oid fetchval = 0; if (varattno == TableOidAttributeNumber) - { - rte = planner_rt_fetch(varno, root); fetchval = rte->relid; - } if (qualify_col) { @@ -1948,9 +2116,6 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root, /* Required only to be passed down to deparseTargetList(). */ List *retrieved_attrs; - /* Get RangeTblEntry from array in PlannerInfo. */ - rte = planner_rt_fetch(varno, root); - /* * The lock on the relation will be held by upper callers, so it's * fine to open it with no lock here. @@ -1982,7 +2147,7 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root, } appendStringInfoString(buf, "ROW("); - deparseTargetList(buf, root, varno, rel, false, attrs_used, qualify_col, + deparseTargetList(buf, rte, varno, rel, false, attrs_used, qualify_col, &retrieved_attrs); appendStringInfoChar(buf, ')'); @@ -2002,9 +2167,6 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root, /* varno must not be any of OUTER_VAR, INNER_VAR and INDEX_VAR. */ Assert(!IS_SPECIAL_VARNO(varno)); - /* Get RangeTblEntry from array in PlannerInfo. */ - rte = planner_rt_fetch(varno, root); - /* * If it's a column of a foreign table, and it has the column_name FDW * option, use that value. @@ -2026,7 +2188,7 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root, * FDW option, use attribute name. */ if (colname == NULL) - colname = get_relid_attribute_name(rte->relid, varattno); + colname = get_attname(rte->relid, varattno, false); if (qualify_col) ADD_REL_QUALIFIER(buf, varno); @@ -2185,7 +2347,7 @@ deparseVar(Var *node, deparse_expr_cxt *context) int colno; /* Qualify columns when multiple relations are involved. */ - bool qualify_col = (bms_num_members(relids) > 1); + bool qualify_col = (bms_membership(relids) == BMS_MULTIPLE); /* * If the Var belongs to the foreign relation that is deparsed as a @@ -2202,7 +2364,8 @@ deparseVar(Var *node, deparse_expr_cxt *context) if (bms_is_member(node->varno, relids) && node->varlevelsup == 0) deparseColumnRef(context->buf, node->varno, node->varattno, - context->root, qualify_col); + planner_rt_fetch(node->varno, context->root), + qualify_col); else { /* Treat like a Param */ @@ -2853,7 +3016,7 @@ appendAggOrderBy(List *orderList, List *targetList, deparse_expr_cxt *context) first = false; sortexpr = deparseSortGroupClause(srt->tleSortGroupRef, targetList, - context); + false, context); sortcoltype = exprType(sortexpr); /* See whether operator is default < or > for datatype */ typentry = lookup_type_cache(sortcoltype, @@ -2960,7 +3123,7 @@ appendGroupByClause(List *tlist, deparse_expr_cxt *context) appendStringInfoString(buf, ", "); first = false; - deparseSortGroupClause(grp->tleSortGroupRef, tlist, context); + deparseSortGroupClause(grp->tleSortGroupRef, tlist, true, context); } } @@ -3047,7 +3210,8 @@ appendFunctionName(Oid funcid, deparse_expr_cxt *context) * need not find it again. */ static Node * -deparseSortGroupClause(Index ref, List *tlist, deparse_expr_cxt *context) +deparseSortGroupClause(Index ref, List *tlist, bool force_colno, + deparse_expr_cxt *context) { StringInfo buf = context->buf; TargetEntry *tle; @@ -3056,7 +3220,13 @@ deparseSortGroupClause(Index ref, List *tlist, deparse_expr_cxt *context) tle = get_sortgroupref_tle(ref, tlist); expr = tle->expr; - if (expr && IsA(expr, Const)) + if (force_colno) + { + /* Use column-number form when requested by caller. */ + Assert(!tle->resjunk); + appendStringInfo(buf, "%d", tle->resno); + } + else if (expr && IsA(expr, Const)) { /* * Force a typecast here so that we don't emit something like "GROUP diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out index c19b3318c7..21a2ef5ad3 100644 --- a/contrib/postgres_fdw/expected/postgres_fdw.out +++ b/contrib/postgres_fdw/expected/postgres_fdw.out @@ -52,6 +52,11 @@ CREATE TABLE "S 1"."T 4" ( c3 text, CONSTRAINT t4_pkey PRIMARY KEY (c1) ); +-- Disable autovacuum for these tables to avoid unexpected effects of that +ALTER TABLE "S 1"."T 1" SET (autovacuum_enabled = 'false'); +ALTER TABLE "S 1"."T 2" SET (autovacuum_enabled = 'false'); +ALTER TABLE "S 1"."T 3" SET (autovacuum_enabled = 'false'); +ALTER TABLE "S 1"."T 4" SET (autovacuum_enabled = 'false'); INSERT INTO "S 1"."T 1" SELECT id, id % 10, @@ -1025,7 +1030,7 @@ SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t -> Foreign Scan Output: t1.c1, t2.c1, t1.c3 Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2) - Remote SQL: SELECT r1."C 1", r1.c3, r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST + Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3 FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST (6 rows) SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10; @@ -1056,7 +1061,7 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) JOIN ft4 t -> Foreign Scan Output: t1.c1, t2.c2, t3.c3, t1.c3 Relations: ((public.ft1 t1) INNER JOIN (public.ft2 t2)) INNER JOIN (public.ft4 t3) - Remote SQL: SELECT r1."C 1", r1.c3, r2.c2, r4.c3 FROM (("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) INNER JOIN "S 1"."T 3" r4 ON (((r1."C 1" = r4.c1)))) + Remote SQL: SELECT r1."C 1", r2.c2, r4.c3, r1.c3 FROM (("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) INNER JOIN "S 1"."T 3" r4 ON (((r1."C 1" = r4.c1)))) (9 rows) SELECT t1.c1, t2.c2, t3.c3 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) JOIN ft4 t3 ON (t3.c1 = t1.c1) ORDER BY t1.c3, t1.c1 OFFSET 10 LIMIT 10; @@ -1185,7 +1190,7 @@ SELECT t1.c1, t2.c1 FROM ft5 t1 RIGHT JOIN ft4 t2 ON (t1.c1 = t2.c1) ORDER BY t2 -> Foreign Scan Output: t1.c1, t2.c1 Relations: (public.ft4 t2) LEFT JOIN (public.ft5 t1) - Remote SQL: SELECT r2.c1, r1.c1 FROM ("S 1"."T 3" r2 LEFT JOIN "S 1"."T 4" r1 ON (((r1.c1 = r2.c1)))) ORDER BY r2.c1 ASC NULLS LAST, r1.c1 ASC NULLS LAST + Remote SQL: SELECT r1.c1, r2.c1 FROM ("S 1"."T 3" r2 LEFT JOIN "S 1"."T 4" r1 ON (((r1.c1 = r2.c1)))) ORDER BY r2.c1 ASC NULLS LAST, r1.c1 ASC NULLS LAST (6 rows) SELECT t1.c1, t2.c1 FROM ft5 t1 RIGHT JOIN ft4 t2 ON (t1.c1 = t2.c1) ORDER BY t2.c1, t1.c1 OFFSET 10 LIMIT 10; @@ -1213,7 +1218,7 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGH -> Foreign Scan Output: t1.c1, t2.c2, t3.c3 Relations: ((public.ft4 t3) LEFT JOIN (public.ft2 t2)) LEFT JOIN (public.ft2 t1) - Remote SQL: SELECT r4.c3, r2.c2, r1."C 1" FROM (("S 1"."T 3" r4 LEFT JOIN "S 1"."T 1" r2 ON (((r2."C 1" = r4.c1)))) LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) + Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 3" r4 LEFT JOIN "S 1"."T 1" r2 ON (((r2."C 1" = r4.c1)))) LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) (6 rows) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; @@ -1371,24 +1376,27 @@ SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM "S 1"."T 3" WHERE c1 = 50) t1 INNE Output: ft4.c1, ft4.*, ft5.c1, ft5.* Relations: (public.ft4) FULL JOIN (public.ft5) Remote SQL: SELECT s8.c1, s8.c2, s9.c1, s9.c2 FROM ((SELECT c1, ROW(c1, c2, c3) FROM "S 1"."T 3" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s8(c1, c2) FULL JOIN (SELECT c1, ROW(c1, c2, c3) FROM "S 1"."T 4" WHERE ((c1 >= 50)) AND ((c1 <= 60))) s9(c1, c2) ON (((s8.c1 = s9.c1)))) WHERE (((s8.c1 IS NULL) OR (s8.c1 IS NOT NULL))) ORDER BY s8.c1 ASC NULLS LAST, s9.c1 ASC NULLS LAST - -> Hash Full Join + -> Sort Output: ft4.c1, ft4.*, ft5.c1, ft5.* - Hash Cond: (ft4.c1 = ft5.c1) - Filter: ((ft4.c1 IS NULL) OR (ft4.c1 IS NOT NULL)) - -> Foreign Scan on public.ft4 - Output: ft4.c1, ft4.* - Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3" WHERE ((c1 >= 50)) AND ((c1 <= 60)) - -> Hash - Output: ft5.c1, ft5.* - -> Foreign Scan on public.ft5 + Sort Key: ft4.c1, ft5.c1 + -> Hash Full Join + Output: ft4.c1, ft4.*, ft5.c1, ft5.* + Hash Cond: (ft4.c1 = ft5.c1) + Filter: ((ft4.c1 IS NULL) OR (ft4.c1 IS NOT NULL)) + -> Foreign Scan on public.ft4 + Output: ft4.c1, ft4.* + Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3" WHERE ((c1 >= 50)) AND ((c1 <= 60)) + -> Hash Output: ft5.c1, ft5.* - Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4" WHERE ((c1 >= 50)) AND ((c1 <= 60)) + -> Foreign Scan on public.ft5 + Output: ft5.c1, ft5.* + Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4" WHERE ((c1 >= 50)) AND ((c1 <= 60)) -> Materialize Output: "T 3".c1, "T 3".ctid -> Seq Scan on "S 1"."T 3" Output: "T 3".c1, "T 3".ctid Filter: ("T 3".c1 = 50) -(25 rows) +(28 rows) SELECT t1.c1, ss.a, ss.b FROM (SELECT c1 FROM "S 1"."T 3" WHERE c1 = 50) t1 INNER JOIN (SELECT t2.c1, t3.c1 FROM (SELECT c1 FROM ft4 WHERE c1 between 50 and 60) t2 FULL JOIN (SELECT c1 FROM ft5 WHERE c1 between 50 and 60) t3 ON (t2.c1 = t3.c1) WHERE t2.c1 IS NULL OR t2.c1 IS NOT NULL) ss(a, b) ON (TRUE) ORDER BY t1.c1, ss.a, ss.b FOR UPDATE OF t1; c1 | a | b @@ -1469,7 +1477,7 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT -> Foreign Scan Output: t1.c1, t2.c2, t3.c3 Relations: ((public.ft4 t3) LEFT JOIN (public.ft2 t2)) LEFT JOIN (public.ft2 t1) - Remote SQL: SELECT r4.c3, r2.c2, r1."C 1" FROM (("S 1"."T 3" r4 LEFT JOIN "S 1"."T 1" r2 ON (((r2."C 1" = r4.c1)))) LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) + Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 3" r4 LEFT JOIN "S 1"."T 1" r2 ON (((r2."C 1" = r4.c1)))) LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) (6 rows) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; @@ -1497,7 +1505,7 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL -> Foreign Scan Output: t1.c1, t2.c2, t3.c3 Relations: ((public.ft2 t2) LEFT JOIN (public.ft2 t1)) FULL JOIN (public.ft4 t3) - Remote SQL: SELECT r2.c2, r1."C 1", r4.c3 FROM (("S 1"."T 1" r2 LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) FULL JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) + Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r2 LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) FULL JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) (6 rows) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) FULL JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; @@ -1581,7 +1589,7 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT -> Foreign Scan Output: t1.c1, t2.c2, t3.c3 Relations: ((public.ft2 t2) LEFT JOIN (public.ft2 t1)) LEFT JOIN (public.ft4 t3) - Remote SQL: SELECT r2.c2, r1."C 1", r4.c3 FROM (("S 1"."T 1" r2 LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) LEFT JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) + Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM (("S 1"."T 1" r2 LEFT JOIN "S 1"."T 1" r1 ON (((r1."C 1" = r2."C 1")))) LEFT JOIN "S 1"."T 3" r4 ON (((r2."C 1" = r4.c1)))) (6 rows) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 RIGHT JOIN ft2 t2 ON (t1.c1 = t2.c1) LEFT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; @@ -1609,7 +1617,7 @@ SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT -> Foreign Scan Output: t1.c1, t2.c2, t3.c3 Relations: (public.ft4 t3) LEFT JOIN ((public.ft2 t1) INNER JOIN (public.ft2 t2)) - Remote SQL: SELECT r4.c3, r1."C 1", r2.c2 FROM ("S 1"."T 3" r4 LEFT JOIN ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ON (((r2."C 1" = r4.c1)))) + Remote SQL: SELECT r1."C 1", r2.c2, r4.c3 FROM ("S 1"."T 3" r4 LEFT JOIN ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ON (((r2."C 1" = r4.c1)))) (6 rows) SELECT t1.c1, t2.c2, t3.c3 FROM ft2 t1 LEFT JOIN ft2 t2 ON (t1.c1 = t2.c1) RIGHT JOIN ft4 t3 ON (t2.c1 = t3.c1) OFFSET 10 LIMIT 10; @@ -1668,7 +1676,7 @@ SELECT t1.c1, t2.c2, t1.c3 FROM ft1 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE -> Foreign Scan Output: t1.c1, t2.c2, t1.c3 Relations: (public.ft1 t1) FULL JOIN (public.ft2 t2) - Remote SQL: SELECT r1."C 1", r1.c3, r2.c2 FROM ("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) WHERE ((public.postgres_fdw_abs(r1."C 1") > 0)) + Remote SQL: SELECT r1."C 1", r2.c2, r1.c3 FROM ("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) WHERE ((public.postgres_fdw_abs(r1."C 1") > 0)) (6 rows) ALTER SERVER loopback OPTIONS (DROP extensions); @@ -1683,7 +1691,7 @@ SELECT t1.c1, t2.c2, t1.c3 FROM ft1 t1 FULL JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE Output: t1.c1, t2.c2, t1.c3 Filter: (postgres_fdw_abs(t1.c1) > 0) Relations: (public.ft1 t1) FULL JOIN (public.ft2 t2) - Remote SQL: SELECT r1."C 1", r1.c3, r2.c2 FROM ("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) + Remote SQL: SELECT r1."C 1", r2.c2, r1.c3 FROM ("S 1"."T 1" r1 FULL JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) (7 rows) ALTER SERVER loopback OPTIONS (ADD extensions 'postgres_fdw'); @@ -1700,23 +1708,26 @@ SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t -> Foreign Scan Output: t1.c1, t2.c1, t1.c3, t1.*, t2.* Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2) - Remote SQL: SELECT r1."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, r2."C 1", CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR UPDATE OF r1 - -> Merge Join + Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR UPDATE OF r1 + -> Sort Output: t1.c1, t1.c3, t1.*, t2.c1, t2.* - Merge Cond: (t1.c1 = t2.c1) - -> Sort - Output: t1.c1, t1.c3, t1.* - Sort Key: t1.c1 - -> Foreign Scan on public.ft1 t1 + Sort Key: t1.c3 USING <, t1.c1 + -> Merge Join + Output: t1.c1, t1.c3, t1.*, t2.c1, t2.* + Merge Cond: (t1.c1 = t2.c1) + -> Sort Output: t1.c1, t1.c3, t1.* - Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR UPDATE - -> Sort - Output: t2.c1, t2.* - Sort Key: t2.c1 - -> Foreign Scan on public.ft2 t2 + Sort Key: t1.c1 + -> Foreign Scan on public.ft1 t1 + Output: t1.c1, t1.c3, t1.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR UPDATE + -> Sort Output: t2.c1, t2.* - Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" -(23 rows) + Sort Key: t2.c1 + -> Foreign Scan on public.ft2 t2 + Output: t2.c1, t2.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" +(26 rows) SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE OF t1; c1 | c1 @@ -1744,23 +1755,26 @@ SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t -> Foreign Scan Output: t1.c1, t2.c1, t1.c3, t1.*, t2.* Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2) - Remote SQL: SELECT r1."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, r2."C 1", CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR UPDATE OF r1 FOR UPDATE OF r2 - -> Merge Join + Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR UPDATE OF r1 FOR UPDATE OF r2 + -> Sort Output: t1.c1, t1.c3, t1.*, t2.c1, t2.* - Merge Cond: (t1.c1 = t2.c1) - -> Sort - Output: t1.c1, t1.c3, t1.* - Sort Key: t1.c1 - -> Foreign Scan on public.ft1 t1 + Sort Key: t1.c3 USING <, t1.c1 + -> Merge Join + Output: t1.c1, t1.c3, t1.*, t2.c1, t2.* + Merge Cond: (t1.c1 = t2.c1) + -> Sort Output: t1.c1, t1.c3, t1.* - Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR UPDATE - -> Sort - Output: t2.c1, t2.* - Sort Key: t2.c1 - -> Foreign Scan on public.ft2 t2 + Sort Key: t1.c1 + -> Foreign Scan on public.ft1 t1 + Output: t1.c1, t1.c3, t1.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR UPDATE + -> Sort Output: t2.c1, t2.* - Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR UPDATE -(23 rows) + Sort Key: t2.c1 + -> Foreign Scan on public.ft2 t2 + Output: t2.c1, t2.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR UPDATE +(26 rows) SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE; c1 | c1 @@ -1789,23 +1803,26 @@ SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t -> Foreign Scan Output: t1.c1, t2.c1, t1.c3, t1.*, t2.* Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2) - Remote SQL: SELECT r1."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, r2."C 1", CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR SHARE OF r1 - -> Merge Join + Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR SHARE OF r1 + -> Sort Output: t1.c1, t1.c3, t1.*, t2.c1, t2.* - Merge Cond: (t1.c1 = t2.c1) - -> Sort - Output: t1.c1, t1.c3, t1.* - Sort Key: t1.c1 - -> Foreign Scan on public.ft1 t1 + Sort Key: t1.c3 USING <, t1.c1 + -> Merge Join + Output: t1.c1, t1.c3, t1.*, t2.c1, t2.* + Merge Cond: (t1.c1 = t2.c1) + -> Sort Output: t1.c1, t1.c3, t1.* - Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR SHARE - -> Sort - Output: t2.c1, t2.* - Sort Key: t2.c1 - -> Foreign Scan on public.ft2 t2 + Sort Key: t1.c1 + -> Foreign Scan on public.ft1 t1 + Output: t1.c1, t1.c3, t1.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR SHARE + -> Sort Output: t2.c1, t2.* - Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" -(23 rows) + Sort Key: t2.c1 + -> Foreign Scan on public.ft2 t2 + Output: t2.c1, t2.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" +(26 rows) SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE OF t1; c1 | c1 @@ -1833,23 +1850,26 @@ SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t -> Foreign Scan Output: t1.c1, t2.c1, t1.c3, t1.*, t2.* Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2) - Remote SQL: SELECT r1."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, r2."C 1", CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR SHARE OF r1 FOR SHARE OF r2 - -> Merge Join + Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR SHARE OF r1 FOR SHARE OF r2 + -> Sort Output: t1.c1, t1.c3, t1.*, t2.c1, t2.* - Merge Cond: (t1.c1 = t2.c1) - -> Sort - Output: t1.c1, t1.c3, t1.* - Sort Key: t1.c1 - -> Foreign Scan on public.ft1 t1 + Sort Key: t1.c3 USING <, t1.c1 + -> Merge Join + Output: t1.c1, t1.c3, t1.*, t2.c1, t2.* + Merge Cond: (t1.c1 = t2.c1) + -> Sort Output: t1.c1, t1.c3, t1.* - Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR SHARE - -> Sort - Output: t2.c1, t2.* - Sort Key: t2.c1 - -> Foreign Scan on public.ft2 t2 + Sort Key: t1.c1 + -> Foreign Scan on public.ft1 t1 + Output: t1.c1, t1.c3, t1.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR SHARE + -> Sort Output: t2.c1, t2.* - Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR SHARE -(23 rows) + Sort Key: t2.c1 + -> Foreign Scan on public.ft2 t2 + Output: t2.c1, t2.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR SHARE +(26 rows) SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR SHARE; c1 | c1 @@ -1910,7 +1930,7 @@ SELECT t1.ctid, t1, t2, t1.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER B -> Foreign Scan Output: t1.ctid, t1.*, t2.*, t1.c1, t1.c3 Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2) - Remote SQL: SELECT r1.ctid, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, r1."C 1", r1.c3, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST + Remote SQL: SELECT r1.ctid, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END, r1."C 1", r1.c3 FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST (6 rows) -- SEMI JOIN, not pushed down @@ -2140,7 +2160,7 @@ SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE t1.c8 = t2. Output: t1.c1, t2.c1, t1.c3 Filter: (t1.c8 = t2.c8) Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2) - Remote SQL: SELECT r1."C 1", r1.c3, r2."C 1", r1.c8, r2.c8 FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) + Remote SQL: SELECT r1."C 1", r2."C 1", r1.c3, r1.c8, r2.c8 FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")))) (10 rows) SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) WHERE t1.c8 = t2.c8 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10; @@ -2314,8 +2334,78 @@ SELECT ft5, ft5.c1, ft5.c2, ft5.c3, ft4.c1, ft4.c2 FROM ft5 left join ft4 on ft5 (30,31,AAA030) | 30 | 31 | AAA030 | 30 | 31 (4 rows) +-- multi-way join involving multiple merge joins +-- (this case used to have EPQ-related planning problems) +SET enable_nestloop TO false; +SET enable_hashjoin TO false; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT * FROM ft1, ft2, ft4, ft5 WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1 + AND ft1.c2 = ft5.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + LockRows + Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3, ft1.*, ft2.*, ft4.*, ft5.* + -> Foreign Scan + Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3, ft1.*, ft2.*, ft4.*, ft5.* + Relations: (((public.ft1) INNER JOIN (public.ft2)) INNER JOIN (public.ft4)) INNER JOIN (public.ft5) + Remote SQL: SELECT r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8, r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8, r3.c1, r3.c2, r3.c3, r4.c1, r4.c2, r4.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END, CASE WHEN (r3.*)::text IS NOT NULL THEN ROW(r3.c1, r3.c2, r3.c3) END, CASE WHEN (r4.*)::text IS NOT NULL THEN ROW(r4.c1, r4.c2, r4.c3) END FROM ((("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")) AND ((r2."C 1" < 100)) AND ((r1."C 1" < 100)))) INNER JOIN "S 1"."T 3" r3 ON (((r1.c2 = r3.c1)))) INNER JOIN "S 1"."T 4" r4 ON (((r1.c2 = r4.c1)))) FOR UPDATE OF r1 FOR UPDATE OF r2 FOR UPDATE OF r3 FOR UPDATE OF r4 + -> Merge Join + Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3, ft1.*, ft2.*, ft4.*, ft5.* + Merge Cond: (ft1.c2 = ft5.c1) + -> Merge Join + Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.*, ft4.c1, ft4.c2, ft4.c3, ft4.* + Merge Cond: (ft1.c2 = ft4.c1) + -> Sort + Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.* + Sort Key: ft1.c2 + -> Merge Join + Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.* + Merge Cond: (ft1.c1 = ft2.c1) + -> Sort + Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.* + Sort Key: ft1.c1 + -> Foreign Scan on public.ft1 + Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < 100)) FOR UPDATE + -> Materialize + Output: ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.* + -> Foreign Scan on public.ft2 + Output: ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < 100)) ORDER BY "C 1" ASC NULLS LAST FOR UPDATE + -> Sort + Output: ft4.c1, ft4.c2, ft4.c3, ft4.* + Sort Key: ft4.c1 + -> Foreign Scan on public.ft4 + Output: ft4.c1, ft4.c2, ft4.c3, ft4.* + Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3" FOR UPDATE + -> Sort + Output: ft5.c1, ft5.c2, ft5.c3, ft5.* + Sort Key: ft5.c1 + -> Foreign Scan on public.ft5 + Output: ft5.c1, ft5.c2, ft5.c3, ft5.* + Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4" FOR UPDATE +(41 rows) + +SELECT * FROM ft1, ft2, ft4, ft5 WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1 + AND ft1.c2 = ft5.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c1 | c2 | c3 +----+----+-------+------------------------------+--------------------------+----+------------+-----+----+----+-------+------------------------------+--------------------------+----+------------+-----+----+----+--------+----+----+-------- + 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 + 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 + 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 + 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 + 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 + 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 + 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 + 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 + 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 + 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 +(10 rows) + +RESET enable_nestloop; +RESET enable_hashjoin; -- check join pushdown in situations where multiple userids are involved -CREATE ROLE regress_view_owner; +CREATE ROLE regress_view_owner SUPERUSER; CREATE USER MAPPING FOR regress_view_owner SERVER loopback; GRANT SELECT ON ft4 TO regress_view_owner; GRANT SELECT ON ft5 TO regress_view_owner; @@ -2462,8 +2552,8 @@ DROP ROLE regress_view_owner; -- Simple aggregates explain (verbose, costs off) select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------ Result Output: (count(c6)), (sum(c1)), (avg(c1)), (min(c2)), (max(c1)), (stddev(c2)), ((sum(c1)) * ((random() <= '1'::double precision))::integer), c2 -> Sort @@ -2472,7 +2562,7 @@ select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (ran -> Foreign Scan Output: (count(c6)), (sum(c1)), (avg(c1)), (min(c2)), (max(c1)), (stddev(c2)), c2 Relations: Aggregate on (public.ft1) - Remote SQL: SELECT count(c6), sum("C 1"), avg("C 1"), min(c2), max("C 1"), stddev(c2), c2 FROM "S 1"."T 1" WHERE ((c2 < 5)) GROUP BY c2 + Remote SQL: SELECT count(c6), sum("C 1"), avg("C 1"), min(c2), max("C 1"), stddev(c2), c2 FROM "S 1"."T 1" WHERE ((c2 < 5)) GROUP BY 7 (9 rows) select count(c6), sum(c1), avg(c1), min(c2), max(c1), stddev(c2), sum(c1) * (random() <= 1)::int as sum2 from ft1 where c2 < 5 group by c2 order by 1, 2; @@ -2531,15 +2621,15 @@ select sum(t1.c1), count(t2.c1) from ft1 t1 inner join ft2 t2 on (t1.c1 = t2.c1) -- GROUP BY clause having expressions explain (verbose, costs off) select c2/2, sum(c2) * (c2/2) from ft1 group by c2/2 order by c2/2; - QUERY PLAN ------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------- Sort Output: ((c2 / 2)), ((sum(c2) * (c2 / 2))) Sort Key: ((ft1.c2 / 2)) -> Foreign Scan Output: ((c2 / 2)), ((sum(c2) * (c2 / 2))) Relations: Aggregate on (public.ft1) - Remote SQL: SELECT (c2 / 2), (sum(c2) * (c2 / 2)) FROM "S 1"."T 1" GROUP BY ((c2 / 2)) + Remote SQL: SELECT (c2 / 2), (sum(c2) * (c2 / 2)) FROM "S 1"."T 1" GROUP BY 1 (7 rows) select c2/2, sum(c2) * (c2/2) from ft1 group by c2/2 order by c2/2; @@ -2555,8 +2645,8 @@ select c2/2, sum(c2) * (c2/2) from ft1 group by c2/2 order by c2/2; -- Aggregates in subquery are pushed down. explain (verbose, costs off) select count(x.a), sum(x.a) from (select c2 a, sum(c1) b from ft1 group by c2, sqrt(c1) order by 1, 2) x; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------- Aggregate Output: count(ft1.c2), sum(ft1.c2) -> Sort @@ -2565,7 +2655,7 @@ select count(x.a), sum(x.a) from (select c2 a, sum(c1) b from ft1 group by c2, s -> Foreign Scan Output: ft1.c2, (sum(ft1.c1)), (sqrt((ft1.c1)::double precision)) Relations: Aggregate on (public.ft1) - Remote SQL: SELECT c2, sum("C 1"), sqrt("C 1") FROM "S 1"."T 1" GROUP BY c2, (sqrt("C 1")) + Remote SQL: SELECT c2, sum("C 1"), sqrt("C 1") FROM "S 1"."T 1" GROUP BY 1, 3 (9 rows) select count(x.a), sum(x.a) from (select c2 a, sum(c1) b from ft1 group by c2, sqrt(c1) order by 1, 2) x; @@ -2585,7 +2675,7 @@ select c2 * (random() <= 1)::int as sum1, sum(c1) * c2 as sum2 from ft1 group by -> Foreign Scan Output: (c2 * ((random() <= '1'::double precision))::integer), ((sum(c1) * c2)), c2 Relations: Aggregate on (public.ft1) - Remote SQL: SELECT (sum("C 1") * c2), c2 FROM "S 1"."T 1" GROUP BY c2 + Remote SQL: SELECT (sum("C 1") * c2), c2 FROM "S 1"."T 1" GROUP BY 2 (7 rows) select c2 * (random() <= 1)::int as sum1, sum(c1) * c2 as sum2 from ft1 group by c2 order by 1, 2; @@ -2622,15 +2712,15 @@ select c2 * (random() <= 1)::int as c2 from ft2 group by c2 * (random() <= 1)::i -- GROUP BY clause in various forms, cardinal, alias and constant expression explain (verbose, costs off) select count(c2) w, c2 x, 5 y, 7.0 z from ft1 group by 2, y, 9.0::int order by 2; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------- Sort Output: (count(c2)), c2, 5, 7.0, 9 Sort Key: ft1.c2 -> Foreign Scan Output: (count(c2)), c2, 5, 7.0, 9 Relations: Aggregate on (public.ft1) - Remote SQL: SELECT count(c2), c2, 5, 7.0, 9 FROM "S 1"."T 1" GROUP BY c2, 5::integer, 9::integer + Remote SQL: SELECT count(c2), c2, 5, 7.0, 9 FROM "S 1"."T 1" GROUP BY 2, 3, 5 (7 rows) select count(c2) w, c2 x, 5 y, 7.0 z from ft1 group by 2, y, 9.0::int order by 2; @@ -2648,18 +2738,41 @@ select count(c2) w, c2 x, 5 y, 7.0 z from ft1 group by 2, y, 9.0::int order by 2 100 | 9 | 5 | 7.0 (10 rows) +-- GROUP BY clause referring to same column multiple times +-- Also, ORDER BY contains an aggregate function +explain (verbose, costs off) +select c2, c2 from ft1 where c2 > 6 group by 1, 2 order by sum(c1); + QUERY PLAN +----------------------------------------------------------------------------------------------- + Sort + Output: c2, c2, (sum(c1)) + Sort Key: (sum(ft1.c1)) + -> Foreign Scan + Output: c2, c2, (sum(c1)) + Relations: Aggregate on (public.ft1) + Remote SQL: SELECT c2, c2, sum("C 1") FROM "S 1"."T 1" WHERE ((c2 > 6)) GROUP BY 1, 2 +(7 rows) + +select c2, c2 from ft1 where c2 > 6 group by 1, 2 order by sum(c1); + c2 | c2 +----+---- + 7 | 7 + 8 | 8 + 9 | 9 +(3 rows) + -- Testing HAVING clause shippability explain (verbose, costs off) select c2, sum(c1) from ft2 group by c2 having avg(c1) < 500 and sum(c1) < 49800 order by c2; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- Sort Output: c2, (sum(c1)) Sort Key: ft2.c2 -> Foreign Scan Output: c2, (sum(c1)) Relations: Aggregate on (public.ft2) - Remote SQL: SELECT c2, sum("C 1") FROM "S 1"."T 1" GROUP BY c2 HAVING ((avg("C 1") < 500::numeric)) AND ((sum("C 1") < 49800)) + Remote SQL: SELECT c2, sum("C 1") FROM "S 1"."T 1" GROUP BY 1 HAVING ((avg("C 1") < 500::numeric)) AND ((sum("C 1") < 49800)) (7 rows) select c2, sum(c1) from ft2 group by c2 having avg(c1) < 500 and sum(c1) < 49800 order by c2; @@ -2672,15 +2785,15 @@ select c2, sum(c1) from ft2 group by c2 having avg(c1) < 500 and sum(c1) < 49800 -- Unshippable HAVING clause will be evaluated locally, and other qual in HAVING clause is pushed down explain (verbose, costs off) select count(*) from (select c5, count(c1) from ft1 group by c5, sqrt(c2) having (avg(c1) / avg(c1)) * random() <= 1 and avg(c1) < 500) x; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- Aggregate Output: count(*) -> Foreign Scan Output: ft1.c5, NULL::bigint, (sqrt((ft1.c2)::double precision)) Filter: (((((avg(ft1.c1)) / (avg(ft1.c1))))::double precision * random()) <= '1'::double precision) Relations: Aggregate on (public.ft1) - Remote SQL: SELECT c5, NULL::bigint, sqrt(c2), avg("C 1") FROM "S 1"."T 1" GROUP BY c5, (sqrt(c2)) HAVING ((avg("C 1") < 500::numeric)) + Remote SQL: SELECT c5, NULL::bigint, sqrt(c2), avg("C 1") FROM "S 1"."T 1" GROUP BY 1, 3 HAVING ((avg("C 1") < 500::numeric)) (7 rows) select count(*) from (select c5, count(c1) from ft1 group by c5, sqrt(c2) having (avg(c1) / avg(c1)) * random() <= 1 and avg(c1) < 500) x; @@ -2702,7 +2815,7 @@ select sum(c1) from ft1 group by c2 having avg(c1 * (random() <= 1)::int) > 100 Group Key: ft1.c2 Filter: (avg((ft1.c1 * ((random() <= '1'::double precision))::integer)) > '100'::numeric) -> Foreign Scan on public.ft1 - Output: c2, c1 + Output: c1, c2 Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" (10 rows) @@ -2710,15 +2823,15 @@ select sum(c1) from ft1 group by c2 having avg(c1 * (random() <= 1)::int) > 100 -- ORDER BY within aggregate, same column used to order explain (verbose, costs off) select array_agg(c1 order by c1) from ft1 where c1 < 100 group by c2 order by 1; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- Sort Output: (array_agg(c1 ORDER BY c1)), c2 Sort Key: (array_agg(ft1.c1 ORDER BY ft1.c1)) -> Foreign Scan Output: (array_agg(c1 ORDER BY c1)), c2 Relations: Aggregate on (public.ft1) - Remote SQL: SELECT array_agg("C 1" ORDER BY "C 1" ASC NULLS LAST), c2 FROM "S 1"."T 1" WHERE (("C 1" < 100)) GROUP BY c2 + Remote SQL: SELECT array_agg("C 1" ORDER BY "C 1" ASC NULLS LAST), c2 FROM "S 1"."T 1" WHERE (("C 1" < 100)) GROUP BY 2 (7 rows) select array_agg(c1 order by c1) from ft1 where c1 < 100 group by c2 order by 1; @@ -2756,15 +2869,15 @@ select array_agg(c5 order by c1 desc) from ft2 where c2 = 6 and c1 < 50; -- DISTINCT within aggregate explain (verbose, costs off) select array_agg(distinct (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Sort Output: (array_agg(DISTINCT (t1.c1 % 5))), ((t2.c1 % 3)) Sort Key: (array_agg(DISTINCT (t1.c1 % 5))) -> Foreign Scan Output: (array_agg(DISTINCT (t1.c1 % 5))), ((t2.c1 % 3)) Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2)) - Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5)), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY ((r2.c1 % 3)) + Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5)), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY 2 (7 rows) select array_agg(distinct (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1; @@ -2777,15 +2890,15 @@ select array_agg(distinct (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2 -- DISTINCT combined with ORDER BY within aggregate explain (verbose, costs off) select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Sort Output: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5))), ((t2.c1 % 3)) Sort Key: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5))) -> Foreign Scan Output: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5))), ((t2.c1 % 3)) Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2)) - Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) ASC NULLS LAST), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY ((r2.c1 % 3)) + Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) ASC NULLS LAST), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY 2 (7 rows) select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1; @@ -2797,15 +2910,15 @@ select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5) from ft4 t1 full join ft explain (verbose, costs off) select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5 desc nulls last) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Sort Output: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5) DESC NULLS LAST)), ((t2.c1 % 3)) Sort Key: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5) DESC NULLS LAST)) -> Foreign Scan Output: (array_agg(DISTINCT (t1.c1 % 5) ORDER BY (t1.c1 % 5) DESC NULLS LAST)), ((t2.c1 % 3)) Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2)) - Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) DESC NULLS LAST), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY ((r2.c1 % 3)) + Remote SQL: SELECT array_agg(DISTINCT (r1.c1 % 5) ORDER BY ((r1.c1 % 5)) DESC NULLS LAST), (r2.c1 % 3) FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) WHERE (((r1.c1 < 20) OR ((r1.c1 IS NULL) AND (r2.c1 < 5)))) GROUP BY 2 (7 rows) select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5 desc nulls last) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) where t1.c1 < 20 or (t1.c1 is null and t2.c1 < 5) group by (t2.c1)%3 order by 1; @@ -2818,15 +2931,15 @@ select array_agg(distinct (t1.c1)%5 order by (t1.c1)%5 desc nulls last) from ft4 -- FILTER within aggregate explain (verbose, costs off) select sum(c1) filter (where c1 < 100 and c2 > 5) from ft1 group by c2 order by 1 nulls last; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- Sort Output: (sum(c1) FILTER (WHERE ((c1 < 100) AND (c2 > 5)))), c2 Sort Key: (sum(ft1.c1) FILTER (WHERE ((ft1.c1 < 100) AND (ft1.c2 > 5)))) -> Foreign Scan Output: (sum(c1) FILTER (WHERE ((c1 < 100) AND (c2 > 5)))), c2 Relations: Aggregate on (public.ft1) - Remote SQL: SELECT sum("C 1") FILTER (WHERE (("C 1" < 100) AND (c2 > 5))), c2 FROM "S 1"."T 1" GROUP BY c2 + Remote SQL: SELECT sum("C 1") FILTER (WHERE (("C 1" < 100) AND (c2 > 5))), c2 FROM "S 1"."T 1" GROUP BY 2 (7 rows) select sum(c1) filter (where c1 < 100 and c2 > 5) from ft1 group by c2 order by 1 nulls last; @@ -2847,12 +2960,12 @@ select sum(c1) filter (where c1 < 100 and c2 > 5) from ft1 group by c2 order by -- DISTINCT, ORDER BY and FILTER within aggregate explain (verbose, costs off) select sum(c1%3), sum(distinct c1%3 order by c1%3) filter (where c1%3 < 2), c2 from ft1 where c2 = 6 group by c2; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Foreign Scan Output: (sum((c1 % 3))), (sum(DISTINCT (c1 % 3) ORDER BY (c1 % 3)) FILTER (WHERE ((c1 % 3) < 2))), c2 Relations: Aggregate on (public.ft1) - Remote SQL: SELECT sum(("C 1" % 3)), sum(DISTINCT ("C 1" % 3) ORDER BY (("C 1" % 3)) ASC NULLS LAST) FILTER (WHERE (("C 1" % 3) < 2)), c2 FROM "S 1"."T 1" WHERE ((c2 = 6)) GROUP BY c2 + Remote SQL: SELECT sum(("C 1" % 3)), sum(DISTINCT ("C 1" % 3) ORDER BY (("C 1" % 3)) ASC NULLS LAST) FILTER (WHERE (("C 1" % 3) < 2)), c2 FROM "S 1"."T 1" WHERE ((c2 = 6)) GROUP BY 3 (4 rows) select sum(c1%3), sum(distinct c1%3 order by c1%3) filter (where c1%3 < 2), c2 from ft1 where c2 = 6 group by c2; @@ -2926,7 +3039,7 @@ select sum(c1) filter (where (c1 / c1) * random() <= 1) from ft1 group by c2 ord Output: sum(c1) FILTER (WHERE ((((c1 / c1))::double precision * random()) <= '1'::double precision)), c2 Group Key: ft1.c2 -> Foreign Scan on public.ft1 - Output: c2, c1 + Output: c1, c2 Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" (9 rows) @@ -2948,15 +3061,15 @@ select sum(c2) filter (where c2 in (select c2 from ft1 where c2 < 5)) from ft1; -- Ordered-sets within aggregate explain (verbose, costs off) select c2, rank('10'::varchar) within group (order by c6), percentile_cont(c2/10::numeric) within group (order by c1) from ft1 where c2 < 10 group by c2 having percentile_cont(c2/10::numeric) within group (order by c1) < 500 order by c2; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Sort Output: c2, (rank('10'::character varying) WITHIN GROUP (ORDER BY c6)), (percentile_cont((((c2)::numeric / '10'::numeric))::double precision) WITHIN GROUP (ORDER BY ((c1)::double precision))) Sort Key: ft1.c2 -> Foreign Scan Output: c2, (rank('10'::character varying) WITHIN GROUP (ORDER BY c6)), (percentile_cont((((c2)::numeric / '10'::numeric))::double precision) WITHIN GROUP (ORDER BY ((c1)::double precision))) Relations: Aggregate on (public.ft1) - Remote SQL: SELECT c2, rank('10'::character varying) WITHIN GROUP (ORDER BY c6 ASC NULLS LAST), percentile_cont((c2 / 10::numeric)) WITHIN GROUP (ORDER BY ("C 1") ASC NULLS LAST) FROM "S 1"."T 1" WHERE ((c2 < 10)) GROUP BY c2 HAVING ((percentile_cont((c2 / 10::numeric)) WITHIN GROUP (ORDER BY ("C 1") ASC NULLS LAST) < 500::double precision)) + Remote SQL: SELECT c2, rank('10'::character varying) WITHIN GROUP (ORDER BY c6 ASC NULLS LAST), percentile_cont((c2 / 10::numeric)) WITHIN GROUP (ORDER BY ("C 1") ASC NULLS LAST) FROM "S 1"."T 1" WHERE ((c2 < 10)) GROUP BY 1 HAVING ((percentile_cont((c2 / 10::numeric)) WITHIN GROUP (ORDER BY ("C 1") ASC NULLS LAST) < 500::double precision)) (7 rows) select c2, rank('10'::varchar) within group (order by c6), percentile_cont(c2/10::numeric) within group (order by c1) from ft1 where c2 < 10 group by c2 having percentile_cont(c2/10::numeric) within group (order by c1) < 500 order by c2; @@ -2972,12 +3085,12 @@ select c2, rank('10'::varchar) within group (order by c6), percentile_cont(c2/10 -- Using multiple arguments within aggregates explain (verbose, costs off) select c1, rank(c1, c2) within group (order by c1, c2) from ft1 group by c1, c2 having c1 = 6 order by 1; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- Foreign Scan Output: c1, (rank(c1, c2) WITHIN GROUP (ORDER BY c1, c2)), c2 Relations: Aggregate on (public.ft1) - Remote SQL: SELECT "C 1", rank("C 1", c2) WITHIN GROUP (ORDER BY "C 1" ASC NULLS LAST, c2 ASC NULLS LAST), c2 FROM "S 1"."T 1" WHERE (("C 1" = 6)) GROUP BY "C 1", c2 + Remote SQL: SELECT "C 1", rank("C 1", c2) WITHIN GROUP (ORDER BY "C 1" ASC NULLS LAST, c2 ASC NULLS LAST), c2 FROM "S 1"."T 1" WHERE (("C 1" = 6)) GROUP BY 1, 3 (4 rows) select c1, rank(c1, c2) within group (order by c1, c2) from ft1 group by c1, c2 having c1 = 6 order by 1; @@ -3015,15 +3128,15 @@ alter server loopback options (set extensions 'postgres_fdw'); -- Now aggregate will be pushed. Aggregate will display VARIADIC argument. explain (verbose, costs off) select c2, least_agg(c1) from ft1 where c2 < 100 group by c2 order by c2; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- Sort Output: c2, (least_agg(VARIADIC ARRAY[c1])) Sort Key: ft1.c2 -> Foreign Scan Output: c2, (least_agg(VARIADIC ARRAY[c1])) Relations: Aggregate on (public.ft1) - Remote SQL: SELECT c2, public.least_agg(VARIADIC ARRAY["C 1"]) FROM "S 1"."T 1" WHERE ((c2 < 100)) GROUP BY c2 + Remote SQL: SELECT c2, public.least_agg(VARIADIC ARRAY["C 1"]) FROM "S 1"."T 1" WHERE ((c2 < 100)) GROUP BY 1 (7 rows) select c2, least_agg(c1) from ft1 where c2 < 100 group by c2 order by c2; @@ -3100,7 +3213,7 @@ select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6 Output: array_agg(c1 ORDER BY c1 USING <^ NULLS LAST), c2 Group Key: ft2.c2 -> Foreign Scan on public.ft2 - Output: c2, c1 + Output: c1, c2 Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE (("C 1" < 100)) AND ((c2 = 6)) (6 rows) @@ -3115,12 +3228,12 @@ alter server loopback options (set extensions 'postgres_fdw'); -- Now this will be pushed as sort operator is part of the extension. explain (verbose, costs off) select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6 and c1 < 100 group by c2; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------- Foreign Scan Output: (array_agg(c1 ORDER BY c1 USING <^ NULLS LAST)), c2 Relations: Aggregate on (public.ft2) - Remote SQL: SELECT array_agg("C 1" ORDER BY "C 1" USING OPERATOR(public.<^) NULLS LAST), c2 FROM "S 1"."T 1" WHERE (("C 1" < 100)) AND ((c2 = 6)) GROUP BY c2 + Remote SQL: SELECT array_agg("C 1" ORDER BY "C 1" USING OPERATOR(public.<^) NULLS LAST), c2 FROM "S 1"."T 1" WHERE (("C 1" < 100)) AND ((c2 = 6)) GROUP BY 2 (4 rows) select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6 and c1 < 100 group by c2; @@ -3146,7 +3259,7 @@ select array_agg(c1 order by c1 using operator(public.<^)) from ft2 where c2 = 6 Output: array_agg(c1 ORDER BY c1 USING <^ NULLS LAST), c2 Group Key: ft2.c2 -> Foreign Scan on public.ft2 - Output: c2, c1 + Output: c1, c2 Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE (("C 1" < 100)) AND ((c2 = 6)) (6 rows) @@ -3160,27 +3273,29 @@ drop operator public.<^(int, int); -- Input relation to aggregate push down hook is not safe to pushdown and thus -- the aggregate cannot be pushed down to foreign server. explain (verbose, costs off) -select count(t1.c3) from ft1 t1, ft1 t2 where t1.c1 = postgres_fdw_abs(t1.c2); - QUERY PLAN ----------------------------------------------------------------------------------------------------------- +select count(t1.c3) from ft2 t1 left join ft2 t2 on (t1.c1 = random() * t2.c2); + QUERY PLAN +------------------------------------------------------------------------------------------- Aggregate Output: count(t1.c3) - -> Nested Loop + -> Nested Loop Left Join Output: t1.c3 - -> Foreign Scan on public.ft1 t2 - Remote SQL: SELECT NULL FROM "S 1"."T 1" + Join Filter: ((t1.c1)::double precision = (random() * (t2.c2)::double precision)) + -> Foreign Scan on public.ft2 t1 + Output: t1.c3, t1.c1 + Remote SQL: SELECT "C 1", c3 FROM "S 1"."T 1" -> Materialize - Output: t1.c3 - -> Foreign Scan on public.ft1 t1 - Output: t1.c3 - Remote SQL: SELECT c3 FROM "S 1"."T 1" WHERE (("C 1" = public.postgres_fdw_abs(c2))) -(11 rows) + Output: t2.c2 + -> Foreign Scan on public.ft2 t2 + Output: t2.c2 + Remote SQL: SELECT c2 FROM "S 1"."T 1" +(13 rows) -- Subquery in FROM clause having aggregate explain (verbose, costs off) select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x where ft1.c2 = x.a group by x.b order by 1, 2; - QUERY PLAN ------------------------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------------------- Sort Output: (count(*)), x.b Sort Key: (count(*)), x.b @@ -3201,7 +3316,7 @@ select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x w -> Foreign Scan Output: ft1_1.c2, (sum(ft1_1.c1)) Relations: Aggregate on (public.ft1) - Remote SQL: SELECT c2, sum("C 1") FROM "S 1"."T 1" GROUP BY c2 + Remote SQL: SELECT c2, sum("C 1") FROM "S 1"."T 1" GROUP BY 1 (21 rows) select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x where ft1.c2 = x.a group by x.b order by 1, 2; @@ -3222,15 +3337,15 @@ select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x w -- FULL join with IS NULL check in HAVING explain (verbose, costs off) select avg(t1.c1), sum(t2.c1) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) group by t2.c1 having (avg(t1.c1) is null and sum(t2.c1) < 10) or sum(t2.c1) is null order by 1 nulls last, 2; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Sort Output: (avg(t1.c1)), (sum(t2.c1)), t2.c1 Sort Key: (avg(t1.c1)), (sum(t2.c1)) -> Foreign Scan Output: (avg(t1.c1)), (sum(t2.c1)), t2.c1 Relations: Aggregate on ((public.ft4 t1) FULL JOIN (public.ft5 t2)) - Remote SQL: SELECT avg(r1.c1), sum(r2.c1), r2.c1 FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) GROUP BY r2.c1 HAVING ((((avg(r1.c1) IS NULL) AND (sum(r2.c1) < 10)) OR (sum(r2.c1) IS NULL))) + Remote SQL: SELECT avg(r1.c1), sum(r2.c1), r2.c1 FROM ("S 1"."T 3" r1 FULL JOIN "S 1"."T 4" r2 ON (((r1.c1 = r2.c1)))) GROUP BY 3 HAVING ((((avg(r1.c1) IS NULL) AND (sum(r2.c1) < 10)) OR (sum(r2.c1) IS NULL))) (7 rows) select avg(t1.c1), sum(t2.c1) from ft4 t1 full join ft5 t2 on (t1.c1 = t2.c1) group by t2.c1 having (avg(t1.c1) is null and sum(t2.c1) < 10) or sum(t2.c1) is null order by 1 nulls last, 2; @@ -3284,8 +3399,8 @@ select sum(c2) * (random() <= 1)::int as sum from ft1 order by 1; set enable_hashagg to false; explain (verbose, costs off) select c2, sum from "S 1"."T 1" t1, lateral (select sum(t2.c1 + t1."C 1") sum from ft2 t2 group by t2.c1) qry where t1.c2 * 2 = qry.sum and t1.c2 < 3 and t1."C 1" < 100 order by 1; - QUERY PLAN ----------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------ Sort Output: t1.c2, qry.sum Sort Key: t1.c2 @@ -3301,7 +3416,7 @@ select c2, sum from "S 1"."T 1" t1, lateral (select sum(t2.c1 + t1."C 1") sum fr -> Foreign Scan Output: (sum((t2.c1 + t1."C 1"))), t2.c1 Relations: Aggregate on (public.ft2 t2) - Remote SQL: SELECT sum(("C 1" + $1::integer)), "C 1" FROM "S 1"."T 1" GROUP BY "C 1" + Remote SQL: SELECT sum(("C 1" + $1::integer)), "C 1" FROM "S 1"."T 1" GROUP BY 2 (16 rows) select c2, sum from "S 1"."T 1" t1, lateral (select sum(t2.c1 + t1."C 1") sum from ft2 t2 group by t2.c1) qry where t1.c2 * 2 = qry.sum and t1.c2 < 3 and t1."C 1" < 100 order by 1; @@ -3447,8 +3562,8 @@ select c2, sum(c1), grouping(c2) from ft1 where c2 < 3 group by c2 order by 1 nu -- DISTINCT itself is not pushed down, whereas underneath aggregate is pushed explain (verbose, costs off) select distinct sum(c1)/1000 s from ft2 where c2 < 6 group by c2 order by 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------- Unique Output: ((sum(c1) / 1000)), c2 -> Sort @@ -3457,7 +3572,7 @@ select distinct sum(c1)/1000 s from ft2 where c2 < 6 group by c2 order by 1; -> Foreign Scan Output: ((sum(c1) / 1000)), c2 Relations: Aggregate on (public.ft2) - Remote SQL: SELECT (sum("C 1") / 1000), c2 FROM "S 1"."T 1" WHERE ((c2 < 6)) GROUP BY c2 + Remote SQL: SELECT (sum("C 1") / 1000), c2 FROM "S 1"."T 1" WHERE ((c2 < 6)) GROUP BY 2 (9 rows) select distinct sum(c1)/1000 s from ft2 where c2 < 6 group by c2 order by 1; @@ -3470,8 +3585,8 @@ select distinct sum(c1)/1000 s from ft2 where c2 < 6 group by c2 order by 1; -- WindowAgg explain (verbose, costs off) select c2, sum(c2), count(c2) over (partition by c2%2) from ft2 where c2 < 10 group by c2 order by 1; - QUERY PLAN -------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------ Sort Output: c2, (sum(c2)), (count(c2) OVER (?)), ((c2 % 2)) Sort Key: ft2.c2 @@ -3483,7 +3598,7 @@ select c2, sum(c2), count(c2) over (partition by c2%2) from ft2 where c2 < 10 gr -> Foreign Scan Output: c2, ((c2 % 2)), (sum(c2)) Relations: Aggregate on (public.ft2) - Remote SQL: SELECT c2, (c2 % 2), sum(c2) FROM "S 1"."T 1" WHERE ((c2 < 10)) GROUP BY c2 + Remote SQL: SELECT c2, (c2 % 2), sum(c2) FROM "S 1"."T 1" WHERE ((c2 < 10)) GROUP BY 1 (12 rows) select c2, sum(c2), count(c2) over (partition by c2%2) from ft2 where c2 < 10 group by c2 order by 1; @@ -3503,8 +3618,8 @@ select c2, sum(c2), count(c2) over (partition by c2%2) from ft2 where c2 < 10 gr explain (verbose, costs off) select c2, array_agg(c2) over (partition by c2%2 order by c2 desc) from ft1 where c2 < 10 group by c2 order by 1; - QUERY PLAN ----------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------- Sort Output: c2, (array_agg(c2) OVER (?)), ((c2 % 2)) Sort Key: ft1.c2 @@ -3516,7 +3631,7 @@ select c2, array_agg(c2) over (partition by c2%2 order by c2 desc) from ft1 wher -> Foreign Scan Output: c2, ((c2 % 2)) Relations: Aggregate on (public.ft1) - Remote SQL: SELECT c2, (c2 % 2) FROM "S 1"."T 1" WHERE ((c2 < 10)) GROUP BY c2 + Remote SQL: SELECT c2, (c2 % 2) FROM "S 1"."T 1" WHERE ((c2 < 10)) GROUP BY 1 (12 rows) select c2, array_agg(c2) over (partition by c2%2 order by c2 desc) from ft1 where c2 < 10 group by c2 order by 1; @@ -3536,8 +3651,8 @@ select c2, array_agg(c2) over (partition by c2%2 order by c2 desc) from ft1 wher explain (verbose, costs off) select c2, array_agg(c2) over (partition by c2%2 order by c2 range between current row and unbounded following) from ft1 where c2 < 10 group by c2 order by 1; - QUERY PLAN ----------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------- Sort Output: c2, (array_agg(c2) OVER (?)), ((c2 % 2)) Sort Key: ft1.c2 @@ -3549,7 +3664,7 @@ select c2, array_agg(c2) over (partition by c2%2 order by c2 range between curre -> Foreign Scan Output: c2, ((c2 % 2)) Relations: Aggregate on (public.ft1) - Remote SQL: SELECT c2, (c2 % 2) FROM "S 1"."T 1" WHERE ((c2 < 10)) GROUP BY c2 + Remote SQL: SELECT c2, (c2 % 2) FROM "S 1"."T 1" WHERE ((c2 < 10)) GROUP BY 1 (12 rows) select c2, array_agg(c2) over (partition by c2%2 order by c2 range between current row and unbounded following) from ft1 where c2 < 10 group by c2 order by 1; @@ -3972,16 +4087,16 @@ DROP FUNCTION f_test(int); -- =================================================================== ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int; SELECT * FROM ft1 WHERE c1 = 1; -- ERROR -ERROR: invalid input syntax for integer: "foo" +ERROR: invalid input syntax for type integer: "foo" CONTEXT: column "c8" of foreign table "ft1" SELECT ft1.c1, ft2.c2, ft1.c8 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR -ERROR: invalid input syntax for integer: "foo" +ERROR: invalid input syntax for type integer: "foo" CONTEXT: column "c8" of foreign table "ft1" SELECT ft1.c1, ft2.c2, ft1 FROM ft1, ft2 WHERE ft1.c1 = ft2.c1 AND ft1.c1 = 1; -- ERROR -ERROR: invalid input syntax for integer: "foo" +ERROR: invalid input syntax for type integer: "foo" CONTEXT: whole-row reference to foreign table "ft1" SELECT sum(c2), array_agg(c8) FROM ft1 GROUP BY c8; -- ERROR -ERROR: invalid input syntax for integer: "foo" +ERROR: invalid input syntax for type integer: "foo" CONTEXT: processing expression at position 2 in select list ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE user_enum; -- =================================================================== @@ -4011,7 +4126,7 @@ FETCH c; SAVEPOINT s; SELECT * FROM ft1 WHERE 1 / (c1 - 1) > 0; -- ERROR ERROR: division by zero -CONTEXT: Remote SQL command: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (((1 / ("C 1" - 1)) > 0)) +CONTEXT: remote SQL command: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (((1 / ("C 1" - 1)) > 0)) ROLLBACK TO s; FETCH c; c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 @@ -4118,18 +4233,21 @@ explain (verbose, costs off) select * from ft3 where f2 = 'foo' COLLATE "C"; explain (verbose, costs off) select * from ft3 f, loct3 l where f.f3 = l.f3 COLLATE "POSIX" and l.f1 = 'foo'; - QUERY PLAN ---------------------------------------------------------- - Nested Loop + QUERY PLAN +------------------------------------------------------------- + Hash Join Output: f.f1, f.f2, f.f3, l.f1, l.f2, l.f3 - Join Filter: ((f.f3)::text = (l.f3)::text) - -> Index Scan using loct3_f1_key on public.loct3 l - Output: l.f1, l.f2, l.f3 - Index Cond: (l.f1 = 'foo'::text) + Inner Unique: true + Hash Cond: ((f.f3)::text = (l.f3)::text) -> Foreign Scan on public.ft3 f Output: f.f1, f.f2, f.f3 Remote SQL: SELECT f1, f2, f3 FROM public.loct3 -(9 rows) + -> Hash + Output: l.f1, l.f2, l.f3 + -> Index Scan using loct3_f1_key on public.loct3 l + Output: l.f1, l.f2, l.f3 + Index Cond: (l.f1 = 'foo'::text) +(12 rows) -- =================================================================== -- test writable foreign table stuff @@ -4289,27 +4407,13 @@ UPDATE ft2 SET c2 = c2 + 400, c3 = c3 || '_update7' WHERE c1 % 10 = 7 RETURNING EXPLAIN (verbose, costs off) UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT - FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9; -- can't be pushed down - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9; -- can be pushed down + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Update on public.ft2 - Remote SQL: UPDATE "S 1"."T 1" SET c2 = $2, c3 = $3, c7 = $4 WHERE ctid = $1 - -> Foreign Scan - Output: ft2.c1, (ft2.c2 + 500), NULL::integer, (ft2.c3 || '_update9'::text), ft2.c4, ft2.c5, ft2.c6, 'ft2 '::character(10), ft2.c8, ft2.ctid, ft1.* - Relations: (public.ft2) INNER JOIN (public.ft1) - Remote SQL: SELECT r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c8, r1.ctid, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1.c2 = r2."C 1")) AND (((r2."C 1" % 10) = 9)))) FOR UPDATE OF r1 - -> Hash Join - Output: ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c8, ft2.ctid, ft1.* - Hash Cond: (ft2.c2 = ft1.c1) - -> Foreign Scan on public.ft2 - Output: ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c8, ft2.ctid - Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c8, ctid FROM "S 1"."T 1" FOR UPDATE - -> Hash - Output: ft1.*, ft1.c1 - -> Foreign Scan on public.ft1 - Output: ft1.*, ft1.c1 - Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((("C 1" % 10) = 9)) -(17 rows) + -> Foreign Update + Remote SQL: UPDATE "S 1"."T 1" r1 SET c2 = (r1.c2 + 500), c3 = (r1.c3 || '_update9'::text), c7 = 'ft2 '::character(10) FROM "S 1"."T 1" r2 WHERE ((r1.c2 = r2."C 1")) AND (((r2."C 1" % 10) = 9)) +(3 rows) UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9; @@ -4432,27 +4536,13 @@ DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4; (103 rows) EXPLAIN (verbose, costs off) -DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2; -- can't be pushed down - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2; -- can be pushed down + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- Delete on public.ft2 - Remote SQL: DELETE FROM "S 1"."T 1" WHERE ctid = $1 - -> Foreign Scan - Output: ft2.ctid, ft1.* - Relations: (public.ft2) INNER JOIN (public.ft1) - Remote SQL: SELECT r1.ctid, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1.c2 = r2."C 1")) AND (((r2."C 1" % 10) = 2)))) FOR UPDATE OF r1 - -> Hash Join - Output: ft2.ctid, ft1.* - Hash Cond: (ft2.c2 = ft1.c1) - -> Foreign Scan on public.ft2 - Output: ft2.ctid, ft2.c2 - Remote SQL: SELECT c2, ctid FROM "S 1"."T 1" FOR UPDATE - -> Hash - Output: ft1.*, ft1.c1 - -> Foreign Scan on public.ft1 - Output: ft1.*, ft1.c1 - Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((("C 1" % 10) = 2)) -(17 rows) + -> Foreign Delete + Remote SQL: DELETE FROM "S 1"."T 1" r1 USING "S 1"."T 1" r2 WHERE ((r1.c2 = r2."C 1")) AND (((r2."C 1" % 10) = 2)) +(3 rows) DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2; SELECT c1,c2,c3,c4 FROM ft2 ORDER BY c1; @@ -5280,54 +5370,243 @@ SELECT c1,c2,c3,c4 FROM ft2 ORDER BY c1; (819 rows) EXPLAIN (verbose, costs off) -INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass; +INSERT INTO ft2 (c1,c2,c3) VALUES (1200,999,'foo') RETURNING tableoid::regclass; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Insert on public.ft2 Output: (tableoid)::regclass Remote SQL: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) -> Result - Output: 9999, 999, NULL::integer, 'foo'::text, NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft2 '::character(10), NULL::user_enum + Output: 1200, 999, NULL::integer, 'foo'::text, NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft2 '::character(10), NULL::user_enum (5 rows) -INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass; +INSERT INTO ft2 (c1,c2,c3) VALUES (1200,999,'foo') RETURNING tableoid::regclass; tableoid ---------- ft2 (1 row) EXPLAIN (verbose, costs off) -UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass; -- can be pushed down +UPDATE ft2 SET c3 = 'bar' WHERE c1 = 1200 RETURNING tableoid::regclass; -- can be pushed down QUERY PLAN ------------------------------------------------------------------------------------ Update on public.ft2 Output: (tableoid)::regclass -> Foreign Update on public.ft2 - Remote SQL: UPDATE "S 1"."T 1" SET c3 = 'bar'::text WHERE (("C 1" = 9999)) + Remote SQL: UPDATE "S 1"."T 1" SET c3 = 'bar'::text WHERE (("C 1" = 1200)) (4 rows) -UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass; +UPDATE ft2 SET c3 = 'bar' WHERE c1 = 1200 RETURNING tableoid::regclass; tableoid ---------- ft2 (1 row) EXPLAIN (verbose, costs off) -DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass; -- can be pushed down +DELETE FROM ft2 WHERE c1 = 1200 RETURNING tableoid::regclass; -- can be pushed down QUERY PLAN -------------------------------------------------------------------- Delete on public.ft2 Output: (tableoid)::regclass -> Foreign Delete on public.ft2 - Remote SQL: DELETE FROM "S 1"."T 1" WHERE (("C 1" = 9999)) + Remote SQL: DELETE FROM "S 1"."T 1" WHERE (("C 1" = 1200)) (4 rows) -DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass; +DELETE FROM ft2 WHERE c1 = 1200 RETURNING tableoid::regclass; tableoid ---------- ft2 (1 row) +-- Test UPDATE/DELETE with RETURNING on a three-table join +INSERT INTO ft2 (c1,c2,c3) + SELECT id, id - 1200, to_char(id, 'FM00000') FROM generate_series(1201, 1300) id; +EXPLAIN (verbose, costs off) +UPDATE ft2 SET c3 = 'foo' + FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1) + WHERE ft2.c1 > 1200 AND ft2.c2 = ft4.c1 + RETURNING ft2, ft2.*, ft4, ft4.*; -- can be pushed down + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Update on public.ft2 + Output: ft2.*, ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.*, ft4.c1, ft4.c2, ft4.c3 + -> Foreign Update + Remote SQL: UPDATE "S 1"."T 1" r1 SET c3 = 'foo'::text FROM ("S 1"."T 3" r2 INNER JOIN "S 1"."T 4" r3 ON (TRUE)) WHERE ((r2.c1 = r3.c1)) AND ((r1.c2 = r2.c1)) AND ((r1."C 1" > 1200)) RETURNING r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2.c1, r2.c2, r2.c3) END, r2.c1, r2.c2, r2.c3 +(4 rows) + +UPDATE ft2 SET c3 = 'foo' + FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1) + WHERE ft2.c1 > 1200 AND ft2.c2 = ft4.c1 + RETURNING ft2, ft2.*, ft4, ft4.*; + ft2 | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | ft4 | c1 | c2 | c3 +--------------------------------+------+----+-----+----+----+----+------------+----+----------------+----+----+-------- + (1206,6,foo,,,,"ft2 ",) | 1206 | 6 | foo | | | | ft2 | | (6,7,AAA006) | 6 | 7 | AAA006 + (1212,12,foo,,,,"ft2 ",) | 1212 | 12 | foo | | | | ft2 | | (12,13,AAA012) | 12 | 13 | AAA012 + (1218,18,foo,,,,"ft2 ",) | 1218 | 18 | foo | | | | ft2 | | (18,19,AAA018) | 18 | 19 | AAA018 + (1224,24,foo,,,,"ft2 ",) | 1224 | 24 | foo | | | | ft2 | | (24,25,AAA024) | 24 | 25 | AAA024 + (1230,30,foo,,,,"ft2 ",) | 1230 | 30 | foo | | | | ft2 | | (30,31,AAA030) | 30 | 31 | AAA030 + (1236,36,foo,,,,"ft2 ",) | 1236 | 36 | foo | | | | ft2 | | (36,37,AAA036) | 36 | 37 | AAA036 + (1242,42,foo,,,,"ft2 ",) | 1242 | 42 | foo | | | | ft2 | | (42,43,AAA042) | 42 | 43 | AAA042 + (1248,48,foo,,,,"ft2 ",) | 1248 | 48 | foo | | | | ft2 | | (48,49,AAA048) | 48 | 49 | AAA048 + (1254,54,foo,,,,"ft2 ",) | 1254 | 54 | foo | | | | ft2 | | (54,55,AAA054) | 54 | 55 | AAA054 + (1260,60,foo,,,,"ft2 ",) | 1260 | 60 | foo | | | | ft2 | | (60,61,AAA060) | 60 | 61 | AAA060 + (1266,66,foo,,,,"ft2 ",) | 1266 | 66 | foo | | | | ft2 | | (66,67,AAA066) | 66 | 67 | AAA066 + (1272,72,foo,,,,"ft2 ",) | 1272 | 72 | foo | | | | ft2 | | (72,73,AAA072) | 72 | 73 | AAA072 + (1278,78,foo,,,,"ft2 ",) | 1278 | 78 | foo | | | | ft2 | | (78,79,AAA078) | 78 | 79 | AAA078 + (1284,84,foo,,,,"ft2 ",) | 1284 | 84 | foo | | | | ft2 | | (84,85,AAA084) | 84 | 85 | AAA084 + (1290,90,foo,,,,"ft2 ",) | 1290 | 90 | foo | | | | ft2 | | (90,91,AAA090) | 90 | 91 | AAA090 + (1296,96,foo,,,,"ft2 ",) | 1296 | 96 | foo | | | | ft2 | | (96,97,AAA096) | 96 | 97 | AAA096 +(16 rows) + +EXPLAIN (verbose, costs off) +DELETE FROM ft2 + USING ft4 LEFT JOIN ft5 ON (ft4.c1 = ft5.c1) + WHERE ft2.c1 > 1200 AND ft2.c1 % 10 = 0 AND ft2.c2 = ft4.c1 + RETURNING 100; -- can be pushed down + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Delete on public.ft2 + Output: 100 + -> Foreign Delete + Remote SQL: DELETE FROM "S 1"."T 1" r1 USING ("S 1"."T 3" r2 LEFT JOIN "S 1"."T 4" r3 ON (((r2.c1 = r3.c1)))) WHERE ((r1.c2 = r2.c1)) AND ((r1."C 1" > 1200)) AND (((r1."C 1" % 10) = 0)) +(4 rows) + +DELETE FROM ft2 + USING ft4 LEFT JOIN ft5 ON (ft4.c1 = ft5.c1) + WHERE ft2.c1 > 1200 AND ft2.c1 % 10 = 0 AND ft2.c2 = ft4.c1 + RETURNING 100; + ?column? +---------- + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 +(10 rows) + +DELETE FROM ft2 WHERE ft2.c1 > 1200; +-- Test UPDATE/DELETE with WHERE or JOIN/ON conditions containing +-- user-defined operators/functions +ALTER SERVER loopback OPTIONS (DROP extensions); +INSERT INTO ft2 (c1,c2,c3) + SELECT id, id % 10, to_char(id, 'FM00000') FROM generate_series(2001, 2010) id; +EXPLAIN (verbose, costs off) +UPDATE ft2 SET c3 = 'bar' WHERE postgres_fdw_abs(c1) > 2000 RETURNING *; -- can't be pushed down + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Update on public.ft2 + Output: c1, c2, c3, c4, c5, c6, c7, c8 + Remote SQL: UPDATE "S 1"."T 1" SET c3 = $2 WHERE ctid = $1 RETURNING "C 1", c2, c3, c4, c5, c6, c7, c8 + -> Foreign Scan on public.ft2 + Output: c1, c2, NULL::integer, 'bar'::text, c4, c5, c6, c7, c8, ctid + Filter: (postgres_fdw_abs(ft2.c1) > 2000) + Remote SQL: SELECT "C 1", c2, c4, c5, c6, c7, c8, ctid FROM "S 1"."T 1" FOR UPDATE +(7 rows) + +UPDATE ft2 SET c3 = 'bar' WHERE postgres_fdw_abs(c1) > 2000 RETURNING *; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +------+----+-----+----+----+----+------------+---- + 2001 | 1 | bar | | | | ft2 | + 2002 | 2 | bar | | | | ft2 | + 2003 | 3 | bar | | | | ft2 | + 2004 | 4 | bar | | | | ft2 | + 2005 | 5 | bar | | | | ft2 | + 2006 | 6 | bar | | | | ft2 | + 2007 | 7 | bar | | | | ft2 | + 2008 | 8 | bar | | | | ft2 | + 2009 | 9 | bar | | | | ft2 | + 2010 | 0 | bar | | | | ft2 | +(10 rows) + +EXPLAIN (verbose, costs off) +UPDATE ft2 SET c3 = 'baz' + FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1) + WHERE ft2.c1 > 2000 AND ft2.c2 === ft4.c1 + RETURNING ft2.*, ft4.*, ft5.*; -- can't be pushed down + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Update on public.ft2 + Output: ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3 + Remote SQL: UPDATE "S 1"."T 1" SET c3 = $2 WHERE ctid = $1 RETURNING "C 1", c2, c3, c4, c5, c6, c7, c8 + -> Nested Loop + Output: ft2.c1, ft2.c2, NULL::integer, 'baz'::text, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.ctid, ft4.*, ft5.*, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3 + Join Filter: (ft2.c2 === ft4.c1) + -> Foreign Scan on public.ft2 + Output: ft2.c1, ft2.c2, ft2.c4, ft2.c5, ft2.c6, ft2.c7, ft2.c8, ft2.ctid + Remote SQL: SELECT "C 1", c2, c4, c5, c6, c7, c8, ctid FROM "S 1"."T 1" WHERE (("C 1" > 2000)) FOR UPDATE + -> Foreign Scan + Output: ft4.*, ft4.c1, ft4.c2, ft4.c3, ft5.*, ft5.c1, ft5.c2, ft5.c3 + Relations: (public.ft4) INNER JOIN (public.ft5) + Remote SQL: SELECT CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2.c1, r2.c2, r2.c3) END, r2.c1, r2.c2, r2.c3, CASE WHEN (r3.*)::text IS NOT NULL THEN ROW(r3.c1, r3.c2, r3.c3) END, r3.c1, r3.c2, r3.c3 FROM ("S 1"."T 3" r2 INNER JOIN "S 1"."T 4" r3 ON (((r2.c1 = r3.c1)))) + -> Hash Join + Output: ft4.*, ft4.c1, ft4.c2, ft4.c3, ft5.*, ft5.c1, ft5.c2, ft5.c3 + Hash Cond: (ft4.c1 = ft5.c1) + -> Foreign Scan on public.ft4 + Output: ft4.*, ft4.c1, ft4.c2, ft4.c3 + Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3" + -> Hash + Output: ft5.*, ft5.c1, ft5.c2, ft5.c3 + -> Foreign Scan on public.ft5 + Output: ft5.*, ft5.c1, ft5.c2, ft5.c3 + Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4" +(24 rows) + +UPDATE ft2 SET c3 = 'baz' + FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1) + WHERE ft2.c1 > 2000 AND ft2.c2 === ft4.c1 + RETURNING ft2.*, ft4.*, ft5.*; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c1 | c2 | c3 +------+----+-----+----+----+----+------------+----+----+----+--------+----+----+-------- + 2006 | 6 | baz | | | | ft2 | | 6 | 7 | AAA006 | 6 | 7 | AAA006 +(1 row) + +EXPLAIN (verbose, costs off) +DELETE FROM ft2 + USING ft4 INNER JOIN ft5 ON (ft4.c1 === ft5.c1) + WHERE ft2.c1 > 2000 AND ft2.c2 = ft4.c1 + RETURNING ft2.c1, ft2.c2, ft2.c3; -- can't be pushed down + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Delete on public.ft2 + Output: ft2.c1, ft2.c2, ft2.c3 + Remote SQL: DELETE FROM "S 1"."T 1" WHERE ctid = $1 RETURNING "C 1", c2, c3 + -> Foreign Scan + Output: ft2.ctid, ft4.*, ft5.* + Filter: (ft4.c1 === ft5.c1) + Relations: ((public.ft2) INNER JOIN (public.ft4)) INNER JOIN (public.ft5) + Remote SQL: SELECT r1.ctid, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2.c1, r2.c2, r2.c3) END, CASE WHEN (r3.*)::text IS NOT NULL THEN ROW(r3.c1, r3.c2, r3.c3) END, r2.c1, r3.c1 FROM (("S 1"."T 1" r1 INNER JOIN "S 1"."T 3" r2 ON (((r1.c2 = r2.c1)) AND ((r1."C 1" > 2000)))) INNER JOIN "S 1"."T 4" r3 ON (TRUE)) FOR UPDATE OF r1 + -> Nested Loop + Output: ft2.ctid, ft4.*, ft5.*, ft4.c1, ft5.c1 + -> Nested Loop + Output: ft2.ctid, ft4.*, ft4.c1 + Join Filter: (ft2.c2 = ft4.c1) + -> Foreign Scan on public.ft2 + Output: ft2.ctid, ft2.c2 + Remote SQL: SELECT c2, ctid FROM "S 1"."T 1" WHERE (("C 1" > 2000)) FOR UPDATE + -> Foreign Scan on public.ft4 + Output: ft4.*, ft4.c1 + Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3" + -> Foreign Scan on public.ft5 + Output: ft5.*, ft5.c1 + Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4" +(22 rows) + +DELETE FROM ft2 + USING ft4 INNER JOIN ft5 ON (ft4.c1 === ft5.c1) + WHERE ft2.c1 > 2000 AND ft2.c2 = ft4.c1 + RETURNING ft2.c1, ft2.c2, ft2.c3; + c1 | c2 | c3 +------+----+----- + 2006 | 6 | baz +(1 row) + +DELETE FROM ft2 WHERE ft2.c1 > 2000; +ALTER SERVER loopback OPTIONS (ADD extensions 'postgres_fdw'); -- Test that trigger on remote table works as expected CREATE OR REPLACE FUNCTION "S 1".F_BRTRIG() RETURNS trigger AS $$ BEGIN @@ -5461,7 +5740,7 @@ ALTER TABLE "S 1"."T 1" ADD CONSTRAINT c2positive CHECK (c2 >= 0); INSERT INTO ft1(c1, c2) VALUES(11, 12); -- duplicate key ERROR: duplicate key value violates unique constraint "t1_pkey" DETAIL: Key ("C 1")=(11) already exists. -CONTEXT: Remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) +CONTEXT: remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT DO NOTHING; -- works INSERT INTO ft1(c1, c2) VALUES(11, 12) ON CONFLICT (c1, c2) DO NOTHING; -- unsupported ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification @@ -5470,11 +5749,11 @@ ERROR: there is no unique or exclusion constraint matching the ON CONFLICT spec INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive ERROR: new row for relation "T 1" violates check constraint "c2positive" DETAIL: Failing row contains (1111, -2, null, null, null, null, ft1 , null). -CONTEXT: Remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) +CONTEXT: remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive ERROR: new row for relation "T 1" violates check constraint "c2positive" DETAIL: Failing row contains (1, -1, 00001_trig_update, 1970-01-02 08:00:00+00, 1970-01-02 00:00:00, 1, 1 , foo). -CONTEXT: Remote SQL command: UPDATE "S 1"."T 1" SET c2 = (- c2) WHERE (("C 1" = 1)) +CONTEXT: remote SQL command: UPDATE "S 1"."T 1" SET c2 = (- c2) WHERE (("C 1" = 1)) -- Test savepoint/rollback behavior select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; c2 | count @@ -5633,7 +5912,7 @@ savepoint s3; update ft2 set c2 = -2 where c2 = 42 and c1 = 10; -- fail on remote side ERROR: new row for relation "T 1" violates check constraint "c2positive" DETAIL: Failing row contains (10, -2, 00010_trig_update_trig_update, 1970-01-11 08:00:00+00, 1970-01-11 00:00:00, 0, 0 , foo). -CONTEXT: Remote SQL command: UPDATE "S 1"."T 1" SET c2 = (-2) WHERE ((c2 = 42)) AND (("C 1" = 10)) +CONTEXT: remote SQL command: UPDATE "S 1"."T 1" SET c2 = (-2) WHERE ((c2 = 42)) AND (("C 1" = 10)) rollback to savepoint s3; select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; c2 | count @@ -5728,6 +6007,7 @@ select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1; 407 | 100 (13 rows) +VACUUM ANALYZE "S 1"."T 1"; -- Above DMLs add data with c6 as NULL in ft1, so test ORDER BY NULLS LAST and NULLs -- FIRST behavior here. -- ORDER BY DESC NULLS LAST options @@ -5849,11 +6129,11 @@ RESET constraint_exclusion; INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive ERROR: new row for relation "T 1" violates check constraint "c2positive" DETAIL: Failing row contains (1111, -2, null, null, null, null, ft1 , null). -CONTEXT: Remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) +CONTEXT: remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive ERROR: new row for relation "T 1" violates check constraint "c2positive" DETAIL: Failing row contains (1, -1, 00001_trig_update, 1970-01-02 08:00:00+00, 1970-01-02 00:00:00, 1, 1 , foo). -CONTEXT: Remote SQL command: UPDATE "S 1"."T 1" SET c2 = (- c2) WHERE (("C 1" = 1)) +CONTEXT: remote SQL command: UPDATE "S 1"."T 1" SET c2 = (- c2) WHERE (("C 1" = 1)) ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c2positive; -- But inconsistent check constraints provide inconsistent results ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c2negative CHECK (c2 < 0); @@ -5896,9 +6176,12 @@ ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c2negative; -- =================================================================== -- test WITH CHECK OPTION constraints -- =================================================================== +CREATE FUNCTION row_before_insupd_trigfunc() RETURNS trigger AS $$BEGIN NEW.a := NEW.a + 10; RETURN NEW; END$$ LANGUAGE plpgsql; CREATE TABLE base_tbl (a int, b int); +ALTER TABLE base_tbl SET (autovacuum_enabled = 'false'); +CREATE TRIGGER row_before_insupd_trigger BEFORE INSERT OR UPDATE ON base_tbl FOR EACH ROW EXECUTE PROCEDURE row_before_insupd_trigfunc(); CREATE FOREIGN TABLE foreign_tbl (a int, b int) - SERVER loopback OPTIONS(table_name 'base_tbl'); + SERVER loopback OPTIONS (table_name 'base_tbl'); CREATE VIEW rw_view AS SELECT * FROM foreign_tbl WHERE a < b WITH CHECK OPTION; \d+ rw_view @@ -5914,49 +6197,167 @@ View definition: WHERE foreign_tbl.a < foreign_tbl.b; Options: check_option=cascaded -INSERT INTO rw_view VALUES (0, 10); -- ok -INSERT INTO rw_view VALUES (10, 0); -- should fail +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO rw_view VALUES (0, 5); + QUERY PLAN +-------------------------------------------------------------------------------- + Insert on public.foreign_tbl + Remote SQL: INSERT INTO public.base_tbl(a, b) VALUES ($1, $2) RETURNING a, b + -> Result + Output: 0, 5 +(4 rows) + +INSERT INTO rw_view VALUES (0, 5); -- should fail ERROR: new row violates check option for view "rw_view" -DETAIL: Failing row contains (10, 0). +DETAIL: Failing row contains (10, 5). EXPLAIN (VERBOSE, COSTS OFF) -UPDATE rw_view SET b = 20 WHERE a = 0; -- not pushed down - QUERY PLAN --------------------------------------------------------------------------------------------------- +INSERT INTO rw_view VALUES (0, 15); + QUERY PLAN +-------------------------------------------------------------------------------- + Insert on public.foreign_tbl + Remote SQL: INSERT INTO public.base_tbl(a, b) VALUES ($1, $2) RETURNING a, b + -> Result + Output: 0, 15 +(4 rows) + +INSERT INTO rw_view VALUES (0, 15); -- ok +SELECT * FROM foreign_tbl; + a | b +----+---- + 10 | 15 +(1 row) + +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE rw_view SET b = b + 5; + QUERY PLAN +--------------------------------------------------------------------------------------- Update on public.foreign_tbl - Remote SQL: UPDATE public.base_tbl SET b = $2 WHERE ctid = $1 + Remote SQL: UPDATE public.base_tbl SET b = $2 WHERE ctid = $1 RETURNING a, b -> Foreign Scan on public.foreign_tbl - Output: foreign_tbl.a, 20, foreign_tbl.ctid - Remote SQL: SELECT a, ctid FROM public.base_tbl WHERE ((a < b)) AND ((a = 0)) FOR UPDATE + Output: foreign_tbl.a, (foreign_tbl.b + 5), foreign_tbl.ctid + Remote SQL: SELECT a, b, ctid FROM public.base_tbl WHERE ((a < b)) FOR UPDATE (5 rows) -UPDATE rw_view SET b = 20 WHERE a = 0; -- ok +UPDATE rw_view SET b = b + 5; -- should fail +ERROR: new row violates check option for view "rw_view" +DETAIL: Failing row contains (20, 20). EXPLAIN (VERBOSE, COSTS OFF) -UPDATE rw_view SET b = -20 WHERE a = 0; -- not pushed down - QUERY PLAN --------------------------------------------------------------------------------------------------- +UPDATE rw_view SET b = b + 15; + QUERY PLAN +--------------------------------------------------------------------------------------- Update on public.foreign_tbl - Remote SQL: UPDATE public.base_tbl SET b = $2 WHERE ctid = $1 + Remote SQL: UPDATE public.base_tbl SET b = $2 WHERE ctid = $1 RETURNING a, b -> Foreign Scan on public.foreign_tbl - Output: foreign_tbl.a, '-20'::integer, foreign_tbl.ctid - Remote SQL: SELECT a, ctid FROM public.base_tbl WHERE ((a < b)) AND ((a = 0)) FOR UPDATE + Output: foreign_tbl.a, (foreign_tbl.b + 15), foreign_tbl.ctid + Remote SQL: SELECT a, b, ctid FROM public.base_tbl WHERE ((a < b)) FOR UPDATE (5 rows) -UPDATE rw_view SET b = -20 WHERE a = 0; -- should fail -ERROR: new row violates check option for view "rw_view" -DETAIL: Failing row contains (0, -20). +UPDATE rw_view SET b = b + 15; -- ok SELECT * FROM foreign_tbl; - a | b ----+---- - 0 | 20 + a | b +----+---- + 20 | 30 (1 row) DROP FOREIGN TABLE foreign_tbl CASCADE; NOTICE: drop cascades to view rw_view +DROP TRIGGER row_before_insupd_trigger ON base_tbl; DROP TABLE base_tbl; +-- test WCO for partitions +CREATE TABLE child_tbl (a int, b int); +ALTER TABLE child_tbl SET (autovacuum_enabled = 'false'); +CREATE TRIGGER row_before_insupd_trigger BEFORE INSERT OR UPDATE ON child_tbl FOR EACH ROW EXECUTE PROCEDURE row_before_insupd_trigfunc(); +CREATE FOREIGN TABLE foreign_tbl (a int, b int) + SERVER loopback OPTIONS (table_name 'child_tbl'); +CREATE TABLE parent_tbl (a int, b int) PARTITION BY RANGE(a); +ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES FROM (0) TO (100); +CREATE VIEW rw_view AS SELECT * FROM parent_tbl + WHERE a < b WITH CHECK OPTION; +\d+ rw_view + View "public.rw_view" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+---------+------------- + a | integer | | | | plain | + b | integer | | | | plain | +View definition: + SELECT parent_tbl.a, + parent_tbl.b + FROM parent_tbl + WHERE parent_tbl.a < parent_tbl.b; +Options: check_option=cascaded + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO rw_view VALUES (0, 5); + QUERY PLAN +----------------------------- + Insert on public.parent_tbl + -> Result + Output: 0, 5 +(3 rows) + +INSERT INTO rw_view VALUES (0, 5); -- should fail +ERROR: new row violates check option for view "rw_view" +DETAIL: Failing row contains (10, 5). +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO rw_view VALUES (0, 15); + QUERY PLAN +----------------------------- + Insert on public.parent_tbl + -> Result + Output: 0, 15 +(3 rows) + +INSERT INTO rw_view VALUES (0, 15); -- ok +SELECT * FROM foreign_tbl; + a | b +----+---- + 10 | 15 +(1 row) + +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE rw_view SET b = b + 5; + QUERY PLAN +---------------------------------------------------------------------------------------- + Update on public.parent_tbl + Foreign Update on public.foreign_tbl + Remote SQL: UPDATE public.child_tbl SET b = $2 WHERE ctid = $1 RETURNING a, b + -> Foreign Scan on public.foreign_tbl + Output: foreign_tbl.a, (foreign_tbl.b + 5), foreign_tbl.ctid + Remote SQL: SELECT a, b, ctid FROM public.child_tbl WHERE ((a < b)) FOR UPDATE +(6 rows) + +UPDATE rw_view SET b = b + 5; -- should fail +ERROR: new row violates check option for view "rw_view" +DETAIL: Failing row contains (20, 20). +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE rw_view SET b = b + 15; + QUERY PLAN +---------------------------------------------------------------------------------------- + Update on public.parent_tbl + Foreign Update on public.foreign_tbl + Remote SQL: UPDATE public.child_tbl SET b = $2 WHERE ctid = $1 RETURNING a, b + -> Foreign Scan on public.foreign_tbl + Output: foreign_tbl.a, (foreign_tbl.b + 15), foreign_tbl.ctid + Remote SQL: SELECT a, b, ctid FROM public.child_tbl WHERE ((a < b)) FOR UPDATE +(6 rows) + +UPDATE rw_view SET b = b + 15; -- ok +SELECT * FROM foreign_tbl; + a | b +----+---- + 20 | 30 +(1 row) + +DROP FOREIGN TABLE foreign_tbl CASCADE; +DROP TRIGGER row_before_insupd_trigger ON child_tbl; +DROP TABLE parent_tbl CASCADE; +NOTICE: drop cascades to view rw_view +DROP FUNCTION row_before_insupd_trigfunc; -- =================================================================== -- test serial columns (ie, sequence-based defaults) -- =================================================================== create table loc1 (f1 serial, f2 text); +alter table loc1 set (autovacuum_enabled = 'false'); create foreign table rem1 (f1 serial, f2 text) server loopback options(table_name 'loc1'); select pg_catalog.setval('rem1_f1_seq', 10, false); @@ -6504,6 +6905,8 @@ DROP TRIGGER trig_row_after_delete ON rem1; -- =================================================================== CREATE TABLE a (aa TEXT); CREATE TABLE loct (aa TEXT, bb TEXT); +ALTER TABLE a SET (autovacuum_enabled = 'false'); +ALTER TABLE loct SET (autovacuum_enabled = 'false'); CREATE FOREIGN TABLE b (bb TEXT) INHERITS (a) SERVER loopback OPTIONS (table_name 'loct'); INSERT INTO a(aa) VALUES('aaa'); @@ -6645,12 +7048,16 @@ DROP TABLE loct; -- Check SELECT FOR UPDATE/SHARE with an inherited source table create table loct1 (f1 int, f2 int, f3 int); create table loct2 (f1 int, f2 int, f3 int); +alter table loct1 set (autovacuum_enabled = 'false'); +alter table loct2 set (autovacuum_enabled = 'false'); create table foo (f1 int, f2 int); create foreign table foo2 (f3 int) inherits (foo) server loopback options (table_name 'loct1'); create table bar (f1 int, f2 int); create foreign table bar2 (f3 int) inherits (bar) server loopback options (table_name 'loct2'); +alter table foo set (autovacuum_enabled = 'false'); +alter table bar set (autovacuum_enabled = 'false'); insert into foo values(1,1); insert into foo values(3,3); insert into foo2 values(2,2,2); @@ -7022,12 +7429,571 @@ update bar set f2 = f2 + 100 returning *; 7 | 277 (6 rows) +-- Test that UPDATE/DELETE with inherited target works with row-level triggers +CREATE TRIGGER trig_row_before +BEFORE UPDATE OR DELETE ON bar2 +FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo'); +CREATE TRIGGER trig_row_after +AFTER UPDATE OR DELETE ON bar2 +FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo'); +explain (verbose, costs off) +update bar set f2 = f2 + 100; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on public.bar + Update on public.bar + Foreign Update on public.bar2 + Remote SQL: UPDATE public.loct2 SET f2 = $2 WHERE ctid = $1 RETURNING f1, f2, f3 + -> Seq Scan on public.bar + Output: bar.f1, (bar.f2 + 100), bar.ctid + -> Foreign Scan on public.bar2 + Output: bar2.f1, (bar2.f2 + 100), bar2.f3, bar2.ctid, bar2.* + Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR UPDATE +(9 rows) + +update bar set f2 = f2 + 100; +NOTICE: trig_row_before(23, skidoo) BEFORE ROW UPDATE ON bar2 +NOTICE: OLD: (3,333,33),NEW: (3,433,33) +NOTICE: trig_row_before(23, skidoo) BEFORE ROW UPDATE ON bar2 +NOTICE: OLD: (4,344,44),NEW: (4,444,44) +NOTICE: trig_row_before(23, skidoo) BEFORE ROW UPDATE ON bar2 +NOTICE: OLD: (7,277,77),NEW: (7,377,77) +NOTICE: trig_row_after(23, skidoo) AFTER ROW UPDATE ON bar2 +NOTICE: OLD: (3,333,33),NEW: (3,433,33) +NOTICE: trig_row_after(23, skidoo) AFTER ROW UPDATE ON bar2 +NOTICE: OLD: (4,344,44),NEW: (4,444,44) +NOTICE: trig_row_after(23, skidoo) AFTER ROW UPDATE ON bar2 +NOTICE: OLD: (7,277,77),NEW: (7,377,77) +explain (verbose, costs off) +delete from bar where f2 < 400; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Delete on public.bar + Delete on public.bar + Foreign Delete on public.bar2 + Remote SQL: DELETE FROM public.loct2 WHERE ctid = $1 RETURNING f1, f2, f3 + -> Seq Scan on public.bar + Output: bar.ctid + Filter: (bar.f2 < 400) + -> Foreign Scan on public.bar2 + Output: bar2.ctid, bar2.* + Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 WHERE ((f2 < 400)) FOR UPDATE +(10 rows) + +delete from bar where f2 < 400; +NOTICE: trig_row_before(23, skidoo) BEFORE ROW DELETE ON bar2 +NOTICE: OLD: (7,377,77) +NOTICE: trig_row_after(23, skidoo) AFTER ROW DELETE ON bar2 +NOTICE: OLD: (7,377,77) +-- cleanup drop table foo cascade; NOTICE: drop cascades to foreign table foo2 drop table bar cascade; NOTICE: drop cascades to foreign table bar2 drop table loct1; drop table loct2; +-- Test pushing down UPDATE/DELETE joins to the remote server +create table parent (a int, b text); +create table loct1 (a int, b text); +create table loct2 (a int, b text); +create foreign table remt1 (a int, b text) + server loopback options (table_name 'loct1'); +create foreign table remt2 (a int, b text) + server loopback options (table_name 'loct2'); +alter foreign table remt1 inherit parent; +insert into remt1 values (1, 'foo'); +insert into remt1 values (2, 'bar'); +insert into remt2 values (1, 'foo'); +insert into remt2 values (2, 'bar'); +analyze remt1; +analyze remt2; +explain (verbose, costs off) +update parent set b = parent.b || remt2.b from remt2 where parent.a = remt2.a returning *; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------- + Update on public.parent + Output: parent.a, parent.b, remt2.a, remt2.b + Update on public.parent + Foreign Update on public.remt1 + -> Nested Loop + Output: parent.a, (parent.b || remt2.b), parent.ctid, remt2.*, remt2.a, remt2.b + Join Filter: (parent.a = remt2.a) + -> Seq Scan on public.parent + Output: parent.a, parent.b, parent.ctid + -> Foreign Scan on public.remt2 + Output: remt2.b, remt2.*, remt2.a + Remote SQL: SELECT a, b FROM public.loct2 + -> Foreign Update + Remote SQL: UPDATE public.loct1 r4 SET b = (r4.b || r2.b) FROM public.loct2 r2 WHERE ((r4.a = r2.a)) RETURNING r4.a, r4.b, r2.a, r2.b +(14 rows) + +update parent set b = parent.b || remt2.b from remt2 where parent.a = remt2.a returning *; + a | b | a | b +---+--------+---+----- + 1 | foofoo | 1 | foo + 2 | barbar | 2 | bar +(2 rows) + +explain (verbose, costs off) +delete from parent using remt2 where parent.a = remt2.a returning parent; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Delete on public.parent + Output: parent.* + Delete on public.parent + Foreign Delete on public.remt1 + -> Nested Loop + Output: parent.ctid, remt2.* + Join Filter: (parent.a = remt2.a) + -> Seq Scan on public.parent + Output: parent.ctid, parent.a + -> Foreign Scan on public.remt2 + Output: remt2.*, remt2.a + Remote SQL: SELECT a, b FROM public.loct2 + -> Foreign Delete + Remote SQL: DELETE FROM public.loct1 r4 USING public.loct2 r2 WHERE ((r4.a = r2.a)) RETURNING r4.a, r4.b +(14 rows) + +delete from parent using remt2 where parent.a = remt2.a returning parent; + parent +------------ + (1,foofoo) + (2,barbar) +(2 rows) + +-- cleanup +drop foreign table remt1; +drop foreign table remt2; +drop table loct1; +drop table loct2; +drop table parent; +-- =================================================================== +-- test tuple routing for foreign-table partitions +-- =================================================================== +-- Test insert tuple routing +create table itrtest (a int, b text) partition by list (a); +create table loct1 (a int check (a in (1)), b text); +create foreign table remp1 (a int check (a in (1)), b text) server loopback options (table_name 'loct1'); +create table loct2 (a int check (a in (2)), b text); +create foreign table remp2 (b text, a int check (a in (2))) server loopback options (table_name 'loct2'); +alter table itrtest attach partition remp1 for values in (1); +alter table itrtest attach partition remp2 for values in (2); +insert into itrtest values (1, 'foo'); +insert into itrtest values (1, 'bar') returning *; + a | b +---+----- + 1 | bar +(1 row) + +insert into itrtest values (2, 'baz'); +insert into itrtest values (2, 'qux') returning *; + a | b +---+----- + 2 | qux +(1 row) + +insert into itrtest values (1, 'test1'), (2, 'test2') returning *; + a | b +---+------- + 1 | test1 + 2 | test2 +(2 rows) + +select tableoid::regclass, * FROM itrtest; + tableoid | a | b +----------+---+------- + remp1 | 1 | foo + remp1 | 1 | bar + remp1 | 1 | test1 + remp2 | 2 | baz + remp2 | 2 | qux + remp2 | 2 | test2 +(6 rows) + +select tableoid::regclass, * FROM remp1; + tableoid | a | b +----------+---+------- + remp1 | 1 | foo + remp1 | 1 | bar + remp1 | 1 | test1 +(3 rows) + +select tableoid::regclass, * FROM remp2; + tableoid | b | a +----------+-------+--- + remp2 | baz | 2 + remp2 | qux | 2 + remp2 | test2 | 2 +(3 rows) + +delete from itrtest; +create unique index loct1_idx on loct1 (a); +-- DO NOTHING without an inference specification is supported +insert into itrtest values (1, 'foo') on conflict do nothing returning *; + a | b +---+----- + 1 | foo +(1 row) + +insert into itrtest values (1, 'foo') on conflict do nothing returning *; + a | b +---+--- +(0 rows) + +-- But other cases are not supported +insert into itrtest values (1, 'bar') on conflict (a) do nothing; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into itrtest values (1, 'bar') on conflict (a) do update set b = excluded.b; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +select tableoid::regclass, * FROM itrtest; + tableoid | a | b +----------+---+----- + remp1 | 1 | foo +(1 row) + +delete from itrtest; +drop index loct1_idx; +-- Test that remote triggers work with insert tuple routing +create function br_insert_trigfunc() returns trigger as $$ +begin + new.b := new.b || ' triggered !'; + return new; +end +$$ language plpgsql; +create trigger loct1_br_insert_trigger before insert on loct1 + for each row execute procedure br_insert_trigfunc(); +create trigger loct2_br_insert_trigger before insert on loct2 + for each row execute procedure br_insert_trigfunc(); +-- The new values are concatenated with ' triggered !' +insert into itrtest values (1, 'foo') returning *; + a | b +---+----------------- + 1 | foo triggered ! +(1 row) + +insert into itrtest values (2, 'qux') returning *; + a | b +---+----------------- + 2 | qux triggered ! +(1 row) + +insert into itrtest values (1, 'test1'), (2, 'test2') returning *; + a | b +---+------------------- + 1 | test1 triggered ! + 2 | test2 triggered ! +(2 rows) + +with result as (insert into itrtest values (1, 'test1'), (2, 'test2') returning *) select * from result; + a | b +---+------------------- + 1 | test1 triggered ! + 2 | test2 triggered ! +(2 rows) + +drop trigger loct1_br_insert_trigger on loct1; +drop trigger loct2_br_insert_trigger on loct2; +drop table itrtest; +drop table loct1; +drop table loct2; +-- Test update tuple routing +create table utrtest (a int, b text) partition by list (a); +create table loct (a int check (a in (1)), b text); +create foreign table remp (a int check (a in (1)), b text) server loopback options (table_name 'loct'); +create table locp (a int check (a in (2)), b text); +alter table utrtest attach partition remp for values in (1); +alter table utrtest attach partition locp for values in (2); +insert into utrtest values (1, 'foo'); +insert into utrtest values (2, 'qux'); +select tableoid::regclass, * FROM utrtest; + tableoid | a | b +----------+---+----- + remp | 1 | foo + locp | 2 | qux +(2 rows) + +select tableoid::regclass, * FROM remp; + tableoid | a | b +----------+---+----- + remp | 1 | foo +(1 row) + +select tableoid::regclass, * FROM locp; + tableoid | a | b +----------+---+----- + locp | 2 | qux +(1 row) + +-- It's not allowed to move a row from a partition that is foreign to another +update utrtest set a = 2 where b = 'foo' returning *; +ERROR: new row for relation "loct" violates check constraint "loct_a_check" +DETAIL: Failing row contains (2, foo). +CONTEXT: remote SQL command: UPDATE public.loct SET a = 2 WHERE ((b = 'foo'::text)) RETURNING a, b +-- But the reverse is allowed +update utrtest set a = 1 where b = 'qux' returning *; + a | b +---+----- + 1 | qux +(1 row) + +select tableoid::regclass, * FROM utrtest; + tableoid | a | b +----------+---+----- + remp | 1 | foo + remp | 1 | qux +(2 rows) + +select tableoid::regclass, * FROM remp; + tableoid | a | b +----------+---+----- + remp | 1 | foo + remp | 1 | qux +(2 rows) + +select tableoid::regclass, * FROM locp; + tableoid | a | b +----------+---+--- +(0 rows) + +-- The executor should not let unexercised FDWs shut down +update utrtest set a = 1 where b = 'foo'; +-- Test that remote triggers work with update tuple routing +create trigger loct_br_insert_trigger before insert on loct + for each row execute procedure br_insert_trigfunc(); +delete from utrtest; +insert into utrtest values (2, 'qux'); +-- Check case where the foreign partition is a subplan target rel +explain (verbose, costs off) +update utrtest set a = 1 where a = 1 or a = 2 returning *; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Update on public.utrtest + Output: remp.a, remp.b + Foreign Update on public.remp + Update on public.locp + -> Foreign Update on public.remp + Remote SQL: UPDATE public.loct SET a = 1 WHERE (((a = 1) OR (a = 2))) RETURNING a, b + -> Seq Scan on public.locp + Output: 1, locp.b, locp.ctid + Filter: ((locp.a = 1) OR (locp.a = 2)) +(9 rows) + +-- The new values are concatenated with ' triggered !' +update utrtest set a = 1 where a = 1 or a = 2 returning *; + a | b +---+----------------- + 1 | qux triggered ! +(1 row) + +delete from utrtest; +insert into utrtest values (2, 'qux'); +-- Check case where the foreign partition isn't a subplan target rel +explain (verbose, costs off) +update utrtest set a = 1 where a = 2 returning *; + QUERY PLAN +-------------------------------------- + Update on public.utrtest + Output: locp.a, locp.b + Update on public.locp + -> Seq Scan on public.locp + Output: 1, locp.b, locp.ctid + Filter: (locp.a = 2) +(6 rows) + +-- The new values are concatenated with ' triggered !' +update utrtest set a = 1 where a = 2 returning *; + a | b +---+----------------- + 1 | qux triggered ! +(1 row) + +drop trigger loct_br_insert_trigger on loct; +drop table utrtest; +drop table loct; +-- Test copy tuple routing +create table ctrtest (a int, b text) partition by list (a); +create table loct1 (a int check (a in (1)), b text); +create foreign table remp1 (a int check (a in (1)), b text) server loopback options (table_name 'loct1'); +create table loct2 (a int check (a in (2)), b text); +create foreign table remp2 (b text, a int check (a in (2))) server loopback options (table_name 'loct2'); +alter table ctrtest attach partition remp1 for values in (1); +alter table ctrtest attach partition remp2 for values in (2); +copy ctrtest from stdin; +select tableoid::regclass, * FROM ctrtest; + tableoid | a | b +----------+---+----- + remp1 | 1 | foo + remp2 | 2 | qux +(2 rows) + +select tableoid::regclass, * FROM remp1; + tableoid | a | b +----------+---+----- + remp1 | 1 | foo +(1 row) + +select tableoid::regclass, * FROM remp2; + tableoid | b | a +----------+-----+--- + remp2 | qux | 2 +(1 row) + +-- Copying into foreign partitions directly should work as well +copy remp1 from stdin; +select tableoid::regclass, * FROM remp1; + tableoid | a | b +----------+---+----- + remp1 | 1 | foo + remp1 | 1 | bar +(2 rows) + +drop table ctrtest; +drop table loct1; +drop table loct2; +-- =================================================================== +-- test COPY FROM +-- =================================================================== +create table loc2 (f1 int, f2 text); +alter table loc2 set (autovacuum_enabled = 'false'); +create foreign table rem2 (f1 int, f2 text) server loopback options(table_name 'loc2'); +-- Test basic functionality +copy rem2 from stdin; +select * from rem2; + f1 | f2 +----+----- + 1 | foo + 2 | bar +(2 rows) + +delete from rem2; +-- Test check constraints +alter table loc2 add constraint loc2_f1positive check (f1 >= 0); +alter foreign table rem2 add constraint rem2_f1positive check (f1 >= 0); +-- check constraint is enforced on the remote side, not locally +copy rem2 from stdin; +copy rem2 from stdin; -- ERROR +ERROR: new row for relation "loc2" violates check constraint "loc2_f1positive" +DETAIL: Failing row contains (-1, xyzzy). +CONTEXT: remote SQL command: INSERT INTO public.loc2(f1, f2) VALUES ($1, $2) +COPY rem2, line 1: "-1 xyzzy" +select * from rem2; + f1 | f2 +----+----- + 1 | foo + 2 | bar +(2 rows) + +alter foreign table rem2 drop constraint rem2_f1positive; +alter table loc2 drop constraint loc2_f1positive; +delete from rem2; +-- Test local triggers +create trigger trig_stmt_before before insert on rem2 + for each statement execute procedure trigger_func(); +create trigger trig_stmt_after after insert on rem2 + for each statement execute procedure trigger_func(); +create trigger trig_row_before before insert on rem2 + for each row execute procedure trigger_data(23,'skidoo'); +create trigger trig_row_after after insert on rem2 + for each row execute procedure trigger_data(23,'skidoo'); +copy rem2 from stdin; +NOTICE: trigger_func() called: action = INSERT, when = BEFORE, level = STATEMENT +NOTICE: trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem2 +NOTICE: NEW: (1,foo) +NOTICE: trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem2 +NOTICE: NEW: (2,bar) +NOTICE: trig_row_after(23, skidoo) AFTER ROW INSERT ON rem2 +NOTICE: NEW: (1,foo) +NOTICE: trig_row_after(23, skidoo) AFTER ROW INSERT ON rem2 +NOTICE: NEW: (2,bar) +NOTICE: trigger_func() called: action = INSERT, when = AFTER, level = STATEMENT +select * from rem2; + f1 | f2 +----+----- + 1 | foo + 2 | bar +(2 rows) + +drop trigger trig_row_before on rem2; +drop trigger trig_row_after on rem2; +drop trigger trig_stmt_before on rem2; +drop trigger trig_stmt_after on rem2; +delete from rem2; +create trigger trig_row_before_insert before insert on rem2 + for each row execute procedure trig_row_before_insupdate(); +-- The new values are concatenated with ' triggered !' +copy rem2 from stdin; +select * from rem2; + f1 | f2 +----+----------------- + 1 | foo triggered ! + 2 | bar triggered ! +(2 rows) + +drop trigger trig_row_before_insert on rem2; +delete from rem2; +create trigger trig_null before insert on rem2 + for each row execute procedure trig_null(); +-- Nothing happens +copy rem2 from stdin; +select * from rem2; + f1 | f2 +----+---- +(0 rows) + +drop trigger trig_null on rem2; +delete from rem2; +-- Test remote triggers +create trigger trig_row_before_insert before insert on loc2 + for each row execute procedure trig_row_before_insupdate(); +-- The new values are concatenated with ' triggered !' +copy rem2 from stdin; +select * from rem2; + f1 | f2 +----+----------------- + 1 | foo triggered ! + 2 | bar triggered ! +(2 rows) + +drop trigger trig_row_before_insert on loc2; +delete from rem2; +create trigger trig_null before insert on loc2 + for each row execute procedure trig_null(); +-- Nothing happens +copy rem2 from stdin; +select * from rem2; + f1 | f2 +----+---- +(0 rows) + +drop trigger trig_null on loc2; +delete from rem2; +-- Test a combination of local and remote triggers +create trigger rem2_trig_row_before before insert on rem2 + for each row execute procedure trigger_data(23,'skidoo'); +create trigger rem2_trig_row_after after insert on rem2 + for each row execute procedure trigger_data(23,'skidoo'); +create trigger loc2_trig_row_before_insert before insert on loc2 + for each row execute procedure trig_row_before_insupdate(); +copy rem2 from stdin; +NOTICE: rem2_trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem2 +NOTICE: NEW: (1,foo) +NOTICE: rem2_trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem2 +NOTICE: NEW: (2,bar) +NOTICE: rem2_trig_row_after(23, skidoo) AFTER ROW INSERT ON rem2 +NOTICE: NEW: (1,"foo triggered !") +NOTICE: rem2_trig_row_after(23, skidoo) AFTER ROW INSERT ON rem2 +NOTICE: NEW: (2,"bar triggered !") +select * from rem2; + f1 | f2 +----+----------------- + 1 | foo triggered ! + 2 | bar triggered ! +(2 rows) + +drop trigger rem2_trig_row_before on rem2; +drop trigger rem2_trig_row_after on rem2; +drop trigger loc2_trig_row_before_insert on loc2; +delete from rem2; -- =================================================================== -- test IMPORT FOREIGN SCHEMA -- =================================================================== @@ -7272,7 +8238,7 @@ CREATE TABLE import_source.t5 (c1 int, c2 text collate "C", "Col" "Colors"); CREATE SCHEMA import_dest5; BEGIN; DROP TYPE "Colors" CASCADE; -NOTICE: drop cascades to table import_source.t5 column Col +NOTICE: drop cascades to column Col of table import_source.t5 IMPORT FOREIGN SCHEMA import_source LIMIT TO (t5) FROM SERVER loopback INTO import_dest5; -- ERROR ERROR: type "public.Colors" does not exist @@ -7346,3 +8312,341 @@ AND ftoptions @> array['fetch_size=60000']; (1 row) ROLLBACK; +-- =================================================================== +-- test partitionwise joins +-- =================================================================== +SET enable_partitionwise_join=on; +CREATE TABLE fprt1 (a int, b int, c varchar) PARTITION BY RANGE(a); +CREATE TABLE fprt1_p1 (LIKE fprt1); +CREATE TABLE fprt1_p2 (LIKE fprt1); +ALTER TABLE fprt1_p1 SET (autovacuum_enabled = 'false'); +ALTER TABLE fprt1_p2 SET (autovacuum_enabled = 'false'); +INSERT INTO fprt1_p1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 249, 2) i; +INSERT INTO fprt1_p2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(250, 499, 2) i; +CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (0) TO (250) + SERVER loopback OPTIONS (table_name 'fprt1_p1', use_remote_estimate 'true'); +CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (250) TO (500) + SERVER loopback OPTIONS (TABLE_NAME 'fprt1_p2'); +ANALYZE fprt1; +ANALYZE fprt1_p1; +ANALYZE fprt1_p2; +CREATE TABLE fprt2 (a int, b int, c varchar) PARTITION BY RANGE(b); +CREATE TABLE fprt2_p1 (LIKE fprt2); +CREATE TABLE fprt2_p2 (LIKE fprt2); +ALTER TABLE fprt2_p1 SET (autovacuum_enabled = 'false'); +ALTER TABLE fprt2_p2 SET (autovacuum_enabled = 'false'); +INSERT INTO fprt2_p1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 249, 3) i; +INSERT INTO fprt2_p2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(250, 499, 3) i; +CREATE FOREIGN TABLE ftprt2_p1 (b int, c varchar, a int) + SERVER loopback OPTIONS (table_name 'fprt2_p1', use_remote_estimate 'true'); +ALTER TABLE fprt2 ATTACH PARTITION ftprt2_p1 FOR VALUES FROM (0) TO (250); +CREATE FOREIGN TABLE ftprt2_p2 PARTITION OF fprt2 FOR VALUES FROM (250) TO (500) + SERVER loopback OPTIONS (table_name 'fprt2_p2', use_remote_estimate 'true'); +ANALYZE fprt2; +ANALYZE fprt2_p1; +ANALYZE fprt2_p2; +-- inner join three tables +EXPLAIN (COSTS OFF) +SELECT t1.a,t2.b,t3.c FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) INNER JOIN fprt1 t3 ON (t2.b = t3.a) WHERE t1.a % 25 =0 ORDER BY 1,2,3; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: t1.a, t3.c + -> Append + -> Foreign Scan + Relations: ((public.ftprt1_p1 t1) INNER JOIN (public.ftprt2_p1 t2)) INNER JOIN (public.ftprt1_p1 t3) + -> Foreign Scan + Relations: ((public.ftprt1_p2 t1) INNER JOIN (public.ftprt2_p2 t2)) INNER JOIN (public.ftprt1_p2 t3) +(7 rows) + +SELECT t1.a,t2.b,t3.c FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) INNER JOIN fprt1 t3 ON (t2.b = t3.a) WHERE t1.a % 25 =0 ORDER BY 1,2,3; + a | b | c +-----+-----+------ + 0 | 0 | 0000 + 150 | 150 | 0003 + 250 | 250 | 0005 + 400 | 400 | 0008 +(4 rows) + +-- left outer join + nullable clasue +EXPLAIN (COSTS OFF) +SELECT t1.a,t2.b,t2.c FROM fprt1 t1 LEFT JOIN (SELECT * FROM fprt2 WHERE a < 10) t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a < 10 ORDER BY 1,2,3; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Sort Key: t1.a, ftprt2_p1.b, ftprt2_p1.c + -> Append + -> Foreign Scan + Relations: (public.ftprt1_p1 t1) LEFT JOIN (public.ftprt2_p1 fprt2) +(5 rows) + +SELECT t1.a,t2.b,t2.c FROM fprt1 t1 LEFT JOIN (SELECT * FROM fprt2 WHERE a < 10) t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a < 10 ORDER BY 1,2,3; + a | b | c +---+---+------ + 0 | 0 | 0000 + 2 | | + 4 | | + 6 | 6 | 0000 + 8 | | +(5 rows) + +-- with whole-row reference; partitionwise join does not apply +EXPLAIN (COSTS OFF) +SELECT t1.wr, t2.wr FROM (SELECT t1 wr, a FROM fprt1 t1 WHERE t1.a % 25 = 0) t1 FULL JOIN (SELECT t2 wr, b FROM fprt2 t2 WHERE t2.b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY 1,2; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: ((t1.*)::fprt1), ((t2.*)::fprt2) + -> Hash Full Join + Hash Cond: (t1.a = t2.b) + -> Append + -> Foreign Scan on ftprt1_p1 t1 + -> Foreign Scan on ftprt1_p2 t1_1 + -> Hash + -> Append + -> Foreign Scan on ftprt2_p1 t2 + -> Foreign Scan on ftprt2_p2 t2_1 +(11 rows) + +SELECT t1.wr, t2.wr FROM (SELECT t1 wr, a FROM fprt1 t1 WHERE t1.a % 25 = 0) t1 FULL JOIN (SELECT t2 wr, b FROM fprt2 t2 WHERE t2.b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY 1,2; + wr | wr +----------------+---------------- + (0,0,0000) | (0,0,0000) + (50,50,0001) | + (100,100,0002) | + (150,150,0003) | (150,150,0003) + (200,200,0004) | + (250,250,0005) | (250,250,0005) + (300,300,0006) | + (350,350,0007) | + (400,400,0008) | (400,400,0008) + (450,450,0009) | + | (75,75,0001) + | (225,225,0004) + | (325,325,0006) + | (475,475,0009) +(14 rows) + +-- join with lateral reference +EXPLAIN (COSTS OFF) +SELECT t1.a,t1.b FROM fprt1 t1, LATERAL (SELECT t2.a, t2.b FROM fprt2 t2 WHERE t1.a = t2.b AND t1.b = t2.a) q WHERE t1.a%25 = 0 ORDER BY 1,2; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: t1.a, t1.b + -> Append + -> Foreign Scan + Relations: (public.ftprt1_p1 t1) INNER JOIN (public.ftprt2_p1 t2) + -> Foreign Scan + Relations: (public.ftprt1_p2 t1) INNER JOIN (public.ftprt2_p2 t2) +(7 rows) + +SELECT t1.a,t1.b FROM fprt1 t1, LATERAL (SELECT t2.a, t2.b FROM fprt2 t2 WHERE t1.a = t2.b AND t1.b = t2.a) q WHERE t1.a%25 = 0 ORDER BY 1,2; + a | b +-----+----- + 0 | 0 + 150 | 150 + 250 | 250 + 400 | 400 +(4 rows) + +-- with PHVs, partitionwise join selected but no join pushdown +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.phv, t2.b, t2.phv FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE a % 25 = 0) t1 FULL JOIN (SELECT 't2_phv' phv, * FROM fprt2 WHERE b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: ftprt1_p1.a, ftprt2_p1.b + -> Append + -> Hash Full Join + Hash Cond: (ftprt1_p1.a = ftprt2_p1.b) + -> Foreign Scan on ftprt1_p1 + -> Hash + -> Foreign Scan on ftprt2_p1 + -> Hash Full Join + Hash Cond: (ftprt1_p2.a = ftprt2_p2.b) + -> Foreign Scan on ftprt1_p2 + -> Hash + -> Foreign Scan on ftprt2_p2 +(13 rows) + +SELECT t1.a, t1.phv, t2.b, t2.phv FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE a % 25 = 0) t1 FULL JOIN (SELECT 't2_phv' phv, * FROM fprt2 WHERE b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY t1.a, t2.b; + a | phv | b | phv +-----+--------+-----+-------- + 0 | t1_phv | 0 | t2_phv + 50 | t1_phv | | + 100 | t1_phv | | + 150 | t1_phv | 150 | t2_phv + 200 | t1_phv | | + 250 | t1_phv | 250 | t2_phv + 300 | t1_phv | | + 350 | t1_phv | | + 400 | t1_phv | 400 | t2_phv + 450 | t1_phv | | + | | 75 | t2_phv + | | 225 | t2_phv + | | 325 | t2_phv + | | 475 | t2_phv +(14 rows) + +-- test FOR UPDATE; partitionwise join does not apply +EXPLAIN (COSTS OFF) +SELECT t1.a, t2.b FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) WHERE t1.a % 25 = 0 ORDER BY 1,2 FOR UPDATE OF t1; + QUERY PLAN +-------------------------------------------------------------- + LockRows + -> Sort + Sort Key: t1.a + -> Hash Join + Hash Cond: (t2.b = t1.a) + -> Append + -> Foreign Scan on ftprt2_p1 t2 + -> Foreign Scan on ftprt2_p2 t2_1 + -> Hash + -> Append + -> Foreign Scan on ftprt1_p1 t1 + -> Foreign Scan on ftprt1_p2 t1_1 +(12 rows) + +SELECT t1.a, t2.b FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) WHERE t1.a % 25 = 0 ORDER BY 1,2 FOR UPDATE OF t1; + a | b +-----+----- + 0 | 0 + 150 | 150 + 250 | 250 + 400 | 400 +(4 rows) + +RESET enable_partitionwise_join; +-- =================================================================== +-- test partitionwise aggregates +-- =================================================================== +CREATE TABLE pagg_tab (a int, b int, c text) PARTITION BY RANGE(a); +CREATE TABLE pagg_tab_p1 (LIKE pagg_tab); +CREATE TABLE pagg_tab_p2 (LIKE pagg_tab); +CREATE TABLE pagg_tab_p3 (LIKE pagg_tab); +INSERT INTO pagg_tab_p1 SELECT i % 30, i % 50, to_char(i/30, 'FM0000') FROM generate_series(1, 3000) i WHERE (i % 30) < 10; +INSERT INTO pagg_tab_p2 SELECT i % 30, i % 50, to_char(i/30, 'FM0000') FROM generate_series(1, 3000) i WHERE (i % 30) < 20 and (i % 30) >= 10; +INSERT INTO pagg_tab_p3 SELECT i % 30, i % 50, to_char(i/30, 'FM0000') FROM generate_series(1, 3000) i WHERE (i % 30) < 30 and (i % 30) >= 20; +-- Create foreign partitions +CREATE FOREIGN TABLE fpagg_tab_p1 PARTITION OF pagg_tab FOR VALUES FROM (0) TO (10) SERVER loopback OPTIONS (table_name 'pagg_tab_p1'); +CREATE FOREIGN TABLE fpagg_tab_p2 PARTITION OF pagg_tab FOR VALUES FROM (10) TO (20) SERVER loopback OPTIONS (table_name 'pagg_tab_p2');; +CREATE FOREIGN TABLE fpagg_tab_p3 PARTITION OF pagg_tab FOR VALUES FROM (20) TO (30) SERVER loopback OPTIONS (table_name 'pagg_tab_p3');; +ANALYZE pagg_tab; +ANALYZE fpagg_tab_p1; +ANALYZE fpagg_tab_p2; +ANALYZE fpagg_tab_p3; +-- When GROUP BY clause matches with PARTITION KEY. +-- Plan with partitionwise aggregates is disabled +SET enable_partitionwise_aggregate TO false; +EXPLAIN (COSTS OFF) +SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 ORDER BY 1; + QUERY PLAN +------------------------------------------------------- + Sort + Sort Key: fpagg_tab_p1.a + -> HashAggregate + Group Key: fpagg_tab_p1.a + Filter: (avg(fpagg_tab_p1.b) < '22'::numeric) + -> Append + -> Foreign Scan on fpagg_tab_p1 + -> Foreign Scan on fpagg_tab_p2 + -> Foreign Scan on fpagg_tab_p3 +(9 rows) + +-- Plan with partitionwise aggregates is enabled +SET enable_partitionwise_aggregate TO true; +EXPLAIN (COSTS OFF) +SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 ORDER BY 1; + QUERY PLAN +---------------------------------------------------------------------- + Sort + Sort Key: fpagg_tab_p1.a + -> Append + -> Foreign Scan + Relations: Aggregate on (public.fpagg_tab_p1 pagg_tab) + -> Foreign Scan + Relations: Aggregate on (public.fpagg_tab_p2 pagg_tab) + -> Foreign Scan + Relations: Aggregate on (public.fpagg_tab_p3 pagg_tab) +(9 rows) + +SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 ORDER BY 1; + a | sum | min | count +----+------+-----+------- + 0 | 2000 | 0 | 100 + 1 | 2100 | 1 | 100 + 10 | 2000 | 0 | 100 + 11 | 2100 | 1 | 100 + 20 | 2000 | 0 | 100 + 21 | 2100 | 1 | 100 +(6 rows) + +-- Check with whole-row reference +-- Should have all the columns in the target list for the given relation +EXPLAIN (VERBOSE, COSTS OFF) +SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------ + Sort + Output: t1.a, (count(((t1.*)::pagg_tab))) + Sort Key: t1.a + -> Append + -> HashAggregate + Output: t1.a, count(((t1.*)::pagg_tab)) + Group Key: t1.a + Filter: (avg(t1.b) < '22'::numeric) + -> Foreign Scan on public.fpagg_tab_p1 t1 + Output: t1.a, t1.*, t1.b + Remote SQL: SELECT a, b, c FROM public.pagg_tab_p1 + -> HashAggregate + Output: t1_1.a, count(((t1_1.*)::pagg_tab)) + Group Key: t1_1.a + Filter: (avg(t1_1.b) < '22'::numeric) + -> Foreign Scan on public.fpagg_tab_p2 t1_1 + Output: t1_1.a, t1_1.*, t1_1.b + Remote SQL: SELECT a, b, c FROM public.pagg_tab_p2 + -> HashAggregate + Output: t1_2.a, count(((t1_2.*)::pagg_tab)) + Group Key: t1_2.a + Filter: (avg(t1_2.b) < '22'::numeric) + -> Foreign Scan on public.fpagg_tab_p3 t1_2 + Output: t1_2.a, t1_2.*, t1_2.b + Remote SQL: SELECT a, b, c FROM public.pagg_tab_p3 +(25 rows) + +SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1; + a | count +----+------- + 0 | 100 + 1 | 100 + 10 | 100 + 11 | 100 + 20 | 100 + 21 | 100 +(6 rows) + +-- When GROUP BY clause does not match with PARTITION KEY. +EXPLAIN (COSTS OFF) +SELECT b, avg(a), max(a), count(*) FROM pagg_tab GROUP BY b HAVING sum(a) < 700 ORDER BY 1; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: fpagg_tab_p1.b + -> Finalize HashAggregate + Group Key: fpagg_tab_p1.b + Filter: (sum(fpagg_tab_p1.a) < 700) + -> Append + -> Partial HashAggregate + Group Key: fpagg_tab_p1.b + -> Foreign Scan on fpagg_tab_p1 + -> Partial HashAggregate + Group Key: fpagg_tab_p2.b + -> Foreign Scan on fpagg_tab_p2 + -> Partial HashAggregate + Group Key: fpagg_tab_p3.b + -> Foreign Scan on fpagg_tab_p3 +(15 rows) + +-- Clean-up +RESET enable_partitionwise_aggregate; diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c index 67e1c59951..6854f1bd91 100644 --- a/contrib/postgres_fdw/option.c +++ b/contrib/postgres_fdw/option.c @@ -3,7 +3,7 @@ * option.c * FDW option handling for postgres_fdw * - * Portions Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/postgres_fdw/option.c @@ -196,7 +196,7 @@ InitPgFdwOptions(void) ereport(ERROR, (errcode(ERRCODE_FDW_OUT_OF_MEMORY), errmsg("out of memory"), - errdetail("could not get libpq's default connection options"))); + errdetail("Could not get libpq's default connection options."))); /* Count how many libpq options are available. */ num_libpq_opts = 0; diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index d77c2a70e4..fd20aa96aa 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -3,7 +3,7 @@ * postgres_fdw.c * Foreign-data wrapper for remote PostgreSQL servers * - * Portions Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/postgres_fdw/postgres_fdw.c @@ -35,6 +35,7 @@ #include "optimizer/tlist.h" #include "parser/parsetree.h" #include "utils/builtins.h" +#include "utils/float.h" #include "utils/guc.h" #include "utils/lsyscache.h" #include "utils/memutils.h" @@ -210,6 +211,11 @@ typedef struct PgFdwDirectModifyState PGresult *result; /* result for query */ int num_tuples; /* # of result tuples */ int next_tuple; /* index of next one to return */ + Relation resultRel; /* relcache entry for the target relation */ + AttrNumber *attnoMap; /* array of attnums of input user columns */ + AttrNumber ctidAttno; /* attnum of input ctid column */ + AttrNumber oidAttno; /* attnum of input oid column */ + bool hasSystemCols; /* are there system columns of resultRel? */ /* working memory context */ MemoryContext temp_cxt; /* context for per-tuple temporary data */ @@ -278,7 +284,7 @@ static void postgresGetForeignPaths(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid); static ForeignScan *postgresGetForeignPlan(PlannerInfo *root, - RelOptInfo *baserel, + RelOptInfo *foreignrel, Oid foreigntableid, ForeignPath *best_path, List *tlist, @@ -314,6 +320,10 @@ static TupleTableSlot *postgresExecForeignDelete(EState *estate, TupleTableSlot *planSlot); static void postgresEndForeignModify(EState *estate, ResultRelInfo *resultRelInfo); +static void postgresBeginForeignInsert(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo); +static void postgresEndForeignInsert(EState *estate, + ResultRelInfo *resultRelInfo); static int postgresIsForeignRelUpdatable(Relation rel); static bool postgresPlanDirectModify(PlannerInfo *root, ModifyTable *plan, @@ -347,14 +357,15 @@ static bool postgresRecheckForeignScan(ForeignScanState *node, static void postgresGetForeignUpperPaths(PlannerInfo *root, UpperRelationKind stage, RelOptInfo *input_rel, - RelOptInfo *output_rel); + RelOptInfo *output_rel, + void *extra); /* * Helper functions */ static void estimate_path_cost_size(PlannerInfo *root, - RelOptInfo *baserel, - List *join_conds, + RelOptInfo *foreignrel, + List *param_join_conds, List *pathkeys, double *p_rows, int *p_width, Cost *p_startup_cost, Cost *p_total_cost); @@ -370,14 +381,33 @@ static bool ec_member_matches_foreign(PlannerInfo *root, RelOptInfo *rel, static void create_cursor(ForeignScanState *node); static void fetch_more_data(ForeignScanState *node); static void close_cursor(PGconn *conn, unsigned int cursor_number); +static PgFdwModifyState *create_foreign_modify(EState *estate, + RangeTblEntry *rte, + ResultRelInfo *resultRelInfo, + CmdType operation, + Plan *subplan, + char *query, + List *target_attrs, + bool has_returning, + List *retrieved_attrs); static void prepare_foreign_modify(PgFdwModifyState *fmstate); static const char **convert_prep_stmt_params(PgFdwModifyState *fmstate, ItemPointer tupleid, TupleTableSlot *slot); static void store_returning_result(PgFdwModifyState *fmstate, TupleTableSlot *slot, PGresult *res); +static void finish_foreign_modify(PgFdwModifyState *fmstate); +static List *build_remote_returning(Index rtindex, Relation rel, + List *returningList); +static void rebuild_fdw_scan_tlist(ForeignScan *fscan, List *tlist); static void execute_dml_stmt(ForeignScanState *node); static TupleTableSlot *get_returning_data(ForeignScanState *node); +static void init_returning_filter(PgFdwDirectModifyState *dmstate, + List *fdw_scan_tlist, + Index rtindex); +static TupleTableSlot *apply_returning_filter(PgFdwDirectModifyState *dmstate, + TupleTableSlot *slot, + EState *estate); static void prepare_query_params(PlanState *node, List *fdw_exprs, int numParams, @@ -405,7 +435,8 @@ static void conversion_error_callback(void *arg); static bool foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, RelOptInfo *outerrel, RelOptInfo *innerrel, JoinPathExtraData *extra); -static bool foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel); +static bool foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel, + Node *havingQual); static List *get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel); static List *get_useful_ecs_for_relation(PlannerInfo *root, RelOptInfo *rel); @@ -413,7 +444,8 @@ static void add_paths_with_pathkeys_for_rel(PlannerInfo *root, RelOptInfo *rel, Path *epq_path); static void add_foreign_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, - RelOptInfo *grouped_rel); + RelOptInfo *grouped_rel, + GroupPathExtraData *extra); static void apply_server_options(PgFdwRelationInfo *fpinfo); static void apply_table_options(PgFdwRelationInfo *fpinfo); static void merge_fdw_options(PgFdwRelationInfo *fpinfo, @@ -447,6 +479,8 @@ postgres_fdw_handler(PG_FUNCTION_ARGS) routine->ExecForeignUpdate = postgresExecForeignUpdate; routine->ExecForeignDelete = postgresExecForeignDelete; routine->EndForeignModify = postgresEndForeignModify; + routine->BeginForeignInsert = postgresBeginForeignInsert; + routine->EndForeignInsert = postgresEndForeignInsert; routine->IsForeignRelUpdatable = postgresIsForeignRelUpdatable; routine->PlanDirectModify = postgresPlanDirectModify; routine->BeginDirectModify = postgresBeginDirectModify; @@ -1311,7 +1345,7 @@ postgresBeginForeignScan(ForeignScanState *node, int eflags) rtindex = fsplan->scan.scanrelid; else rtindex = bms_next_member(fsplan->fs_relids, -1); - rte = rt_fetch(rtindex, estate->es_range_table); + rte = exec_rt_fetch(rtindex, estate); userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); /* Get info about foreign table. */ @@ -1409,10 +1443,9 @@ postgresIterateForeignScan(ForeignScanState *node) /* * Return the next tuple. */ - ExecStoreTuple(fsstate->tuples[fsstate->next_tuple++], - slot, - InvalidBuffer, - false); + ExecStoreHeapTuple(fsstate->tuples[fsstate->next_tuple++], + slot, + false); return slot; } @@ -1549,6 +1582,7 @@ postgresPlanForeignModify(PlannerInfo *root, Relation rel; StringInfoData sql; List *targetAttrs = NIL; + List *withCheckOptionList = NIL; List *returningList = NIL; List *retrieved_attrs = NIL; bool doNothing = false; @@ -1575,7 +1609,7 @@ postgresPlanForeignModify(PlannerInfo *root, for (attnum = 1; attnum <= tupdesc->natts; attnum++) { - Form_pg_attribute attr = tupdesc->attrs[attnum - 1]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1); if (!attr->attisdropped) targetAttrs = lappend_int(targetAttrs, attnum); @@ -1597,6 +1631,13 @@ postgresPlanForeignModify(PlannerInfo *root, } } + /* + * Extract the relevant WITH CHECK OPTION list if any. + */ + if (plan->withCheckOptionLists) + withCheckOptionList = (List *) list_nth(plan->withCheckOptionLists, + subplan_index); + /* * Extract the relevant RETURNING list if any. */ @@ -1621,17 +1662,19 @@ postgresPlanForeignModify(PlannerInfo *root, switch (operation) { case CMD_INSERT: - deparseInsertSql(&sql, root, resultRelation, rel, - targetAttrs, doNothing, returningList, + deparseInsertSql(&sql, rte, resultRelation, rel, + targetAttrs, doNothing, + withCheckOptionList, returningList, &retrieved_attrs); break; case CMD_UPDATE: - deparseUpdateSql(&sql, root, resultRelation, rel, - targetAttrs, returningList, + deparseUpdateSql(&sql, rte, resultRelation, rel, + targetAttrs, + withCheckOptionList, returningList, &retrieved_attrs); break; case CMD_DELETE: - deparseDeleteSql(&sql, root, resultRelation, rel, + deparseDeleteSql(&sql, rte, resultRelation, rel, returningList, &retrieved_attrs); break; @@ -1664,17 +1707,11 @@ postgresBeginForeignModify(ModifyTableState *mtstate, int eflags) { PgFdwModifyState *fmstate; - EState *estate = mtstate->ps.state; - CmdType operation = mtstate->operation; - Relation rel = resultRelInfo->ri_RelationDesc; + char *query; + List *target_attrs; + bool has_returning; + List *retrieved_attrs; RangeTblEntry *rte; - Oid userid; - ForeignTable *table; - UserMapping *user; - AttrNumber n_params; - Oid typefnoid; - bool isvarlena; - ListCell *lc; /* * Do nothing in EXPLAIN (no ANALYZE) case. resultRelInfo->ri_FdwState @@ -1683,82 +1720,30 @@ postgresBeginForeignModify(ModifyTableState *mtstate, if (eflags & EXEC_FLAG_EXPLAIN_ONLY) return; - /* Begin constructing PgFdwModifyState. */ - fmstate = (PgFdwModifyState *) palloc0(sizeof(PgFdwModifyState)); - fmstate->rel = rel; - - /* - * Identify which user to do the remote access as. This should match what - * ExecCheckRTEPerms() does. - */ - rte = rt_fetch(resultRelInfo->ri_RangeTableIndex, estate->es_range_table); - userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); - - /* Get info about foreign table. */ - table = GetForeignTable(RelationGetRelid(rel)); - user = GetUserMapping(userid, table->serverid); - - /* Open connection; report that we'll create a prepared statement. */ - fmstate->conn = GetConnection(user, true); - fmstate->p_name = NULL; /* prepared statement not made yet */ - /* Deconstruct fdw_private data. */ - fmstate->query = strVal(list_nth(fdw_private, - FdwModifyPrivateUpdateSql)); - fmstate->target_attrs = (List *) list_nth(fdw_private, - FdwModifyPrivateTargetAttnums); - fmstate->has_returning = intVal(list_nth(fdw_private, - FdwModifyPrivateHasReturning)); - fmstate->retrieved_attrs = (List *) list_nth(fdw_private, - FdwModifyPrivateRetrievedAttrs); - - /* Create context for per-tuple temp workspace. */ - fmstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt, - "postgres_fdw temporary data", - ALLOCSET_SMALL_SIZES); - - /* Prepare for input conversion of RETURNING results. */ - if (fmstate->has_returning) - fmstate->attinmeta = TupleDescGetAttInMetadata(RelationGetDescr(rel)); - - /* Prepare for output conversion of parameters used in prepared stmt. */ - n_params = list_length(fmstate->target_attrs) + 1; - fmstate->p_flinfo = (FmgrInfo *) palloc0(sizeof(FmgrInfo) * n_params); - fmstate->p_nums = 0; - - if (operation == CMD_UPDATE || operation == CMD_DELETE) - { - /* Find the ctid resjunk column in the subplan's result */ - Plan *subplan = mtstate->mt_plans[subplan_index]->plan; - - fmstate->ctidAttno = ExecFindJunkAttributeInTlist(subplan->targetlist, - "ctid"); - if (!AttributeNumberIsValid(fmstate->ctidAttno)) - elog(ERROR, "could not find junk ctid column"); - - /* First transmittable parameter will be ctid */ - getTypeOutputInfo(TIDOID, &typefnoid, &isvarlena); - fmgr_info(typefnoid, &fmstate->p_flinfo[fmstate->p_nums]); - fmstate->p_nums++; - } - - if (operation == CMD_INSERT || operation == CMD_UPDATE) - { - /* Set up for remaining transmittable parameters */ - foreach(lc, fmstate->target_attrs) - { - int attnum = lfirst_int(lc); - Form_pg_attribute attr = RelationGetDescr(rel)->attrs[attnum - 1]; - - Assert(!attr->attisdropped); - - getTypeOutputInfo(attr->atttypid, &typefnoid, &isvarlena); - fmgr_info(typefnoid, &fmstate->p_flinfo[fmstate->p_nums]); - fmstate->p_nums++; - } - } - - Assert(fmstate->p_nums <= n_params); + query = strVal(list_nth(fdw_private, + FdwModifyPrivateUpdateSql)); + target_attrs = (List *) list_nth(fdw_private, + FdwModifyPrivateTargetAttnums); + has_returning = intVal(list_nth(fdw_private, + FdwModifyPrivateHasReturning)); + retrieved_attrs = (List *) list_nth(fdw_private, + FdwModifyPrivateRetrievedAttrs); + + /* Find RTE. */ + rte = exec_rt_fetch(resultRelInfo->ri_RangeTableIndex, + mtstate->ps.state); + + /* Construct an execution state. */ + fmstate = create_foreign_modify(mtstate->ps.state, + rte, + resultRelInfo, + mtstate->operation, + mtstate->mt_plans[subplan_index]->plan, + query, + target_attrs, + has_returning, + retrieved_attrs); resultRelInfo->ri_FdwState = fmstate; } @@ -1993,28 +1978,116 @@ postgresEndForeignModify(EState *estate, if (fmstate == NULL) return; - /* If we created a prepared statement, destroy it */ - if (fmstate->p_name) + /* Destroy the execution state */ + finish_foreign_modify(fmstate); +} + +/* + * postgresBeginForeignInsert + * Begin an insert operation on a foreign table + */ +static void +postgresBeginForeignInsert(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo) +{ + PgFdwModifyState *fmstate; + ModifyTable *plan = castNode(ModifyTable, mtstate->ps.plan); + EState *estate = mtstate->ps.state; + Index resultRelation = resultRelInfo->ri_RangeTableIndex; + Relation rel = resultRelInfo->ri_RelationDesc; + RangeTblEntry *rte; + TupleDesc tupdesc = RelationGetDescr(rel); + int attnum; + StringInfoData sql; + List *targetAttrs = NIL; + List *retrieved_attrs = NIL; + bool doNothing = false; + + initStringInfo(&sql); + + /* We transmit all columns that are defined in the foreign table. */ + for (attnum = 1; attnum <= tupdesc->natts; attnum++) { - char sql[64]; - PGresult *res; + Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1); - snprintf(sql, sizeof(sql), "DEALLOCATE %s", fmstate->p_name); + if (!attr->attisdropped) + targetAttrs = lappend_int(targetAttrs, attnum); + } + + /* Check if we add the ON CONFLICT clause to the remote query. */ + if (plan) + { + OnConflictAction onConflictAction = plan->onConflictAction; + + /* We only support DO NOTHING without an inference specification. */ + if (onConflictAction == ONCONFLICT_NOTHING) + doNothing = true; + else if (onConflictAction != ONCONFLICT_NONE) + elog(ERROR, "unexpected ON CONFLICT specification: %d", + (int) onConflictAction); + } + + /* + * If the foreign table is a partition, we need to create a new RTE + * describing the foreign table for use by deparseInsertSql and + * create_foreign_modify() below, after first copying the parent's RTE and + * modifying some fields to describe the foreign partition to work on. + * However, if this is invoked by UPDATE, the existing RTE may already + * correspond to this partition if it is one of the UPDATE subplan target + * rels; in that case, we can just use the existing RTE as-is. + */ + rte = exec_rt_fetch(resultRelation, estate); + if (rte->relid != RelationGetRelid(rel)) + { + rte = copyObject(rte); + rte->relid = RelationGetRelid(rel); + rte->relkind = RELKIND_FOREIGN_TABLE; /* - * We don't use a PG_TRY block here, so be careful not to throw error - * without releasing the PGresult. + * For UPDATE, we must use the RT index of the first subplan target + * rel's RTE, because the core code would have built expressions for + * the partition, such as RETURNING, using that RT index as varno of + * Vars contained in those expressions. */ - res = pgfdw_exec_query(fmstate->conn, sql); - if (PQresultStatus(res) != PGRES_COMMAND_OK) - pgfdw_report_error(ERROR, res, fmstate->conn, true, sql); - PQclear(res); - fmstate->p_name = NULL; + if (plan && plan->operation == CMD_UPDATE && + resultRelation == plan->rootRelation) + resultRelation = mtstate->resultRelInfo[0].ri_RangeTableIndex; } - /* Release remote connection */ - ReleaseConnection(fmstate->conn); - fmstate->conn = NULL; + /* Construct the SQL command string. */ + deparseInsertSql(&sql, rte, resultRelation, rel, targetAttrs, doNothing, + resultRelInfo->ri_WithCheckOptions, + resultRelInfo->ri_returningList, + &retrieved_attrs); + + /* Construct an execution state. */ + fmstate = create_foreign_modify(mtstate->ps.state, + rte, + resultRelInfo, + CMD_INSERT, + NULL, + sql.data, + targetAttrs, + retrieved_attrs != NIL, + retrieved_attrs); + + resultRelInfo->ri_FdwState = fmstate; +} + +/* + * postgresEndForeignInsert + * Finish an insert operation on a foreign table + */ +static void +postgresEndForeignInsert(EState *estate, + ResultRelInfo *resultRelInfo) +{ + PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState; + + Assert(fmstate != NULL); + + /* Destroy the execution state */ + finish_foreign_modify(fmstate); } /* @@ -2143,14 +2216,15 @@ postgresPlanDirectModify(PlannerInfo *root, if (subplan->qual != NIL) return false; - /* - * We can't handle an UPDATE or DELETE on a foreign join for now. - */ - if (fscan->scan.scanrelid == 0) - return false; - /* Safe to fetch data about the target foreign rel */ - foreignrel = root->simple_rel_array[resultRelation]; + if (fscan->scan.scanrelid == 0) + { + foreignrel = find_join_rel(root, fscan->fs_relids); + /* We should have a rel for this foreign join. */ + Assert(foreignrel); + } + else + foreignrel = root->simple_rel_array[resultRelation]; rte = root->simple_rte_array[resultRelation]; fpinfo = (PgFdwRelationInfo *) foreignrel->fdw_private; @@ -2211,8 +2285,23 @@ postgresPlanDirectModify(PlannerInfo *root, * Extract the relevant RETURNING list if any. */ if (plan->returningLists) + { returningList = (List *) list_nth(plan->returningLists, subplan_index); + /* + * When performing an UPDATE/DELETE .. RETURNING on a join directly, + * we fetch from the foreign server any Vars specified in RETURNING + * that refer not only to the target relation but to non-target + * relations. So we'll deparse them into the RETURNING clause of the + * remote query; use a targetlist consisting of them instead, which + * will be adjusted to be new fdw_scan_tlist of the foreign-scan plan + * node below. + */ + if (fscan->scan.scanrelid == 0) + returningList = build_remote_returning(resultRelation, rel, + returningList); + } + /* * Construct the SQL command string. */ @@ -2220,6 +2309,7 @@ postgresPlanDirectModify(PlannerInfo *root, { case CMD_UPDATE: deparseDirectUpdateSql(&sql, root, resultRelation, rel, + foreignrel, ((Plan *) fscan)->targetlist, targetAttrs, remote_exprs, ¶ms_list, @@ -2227,6 +2317,7 @@ postgresPlanDirectModify(PlannerInfo *root, break; case CMD_DELETE: deparseDirectDeleteSql(&sql, root, resultRelation, rel, + foreignrel, remote_exprs, ¶ms_list, returningList, &retrieved_attrs); break; @@ -2254,6 +2345,19 @@ postgresPlanDirectModify(PlannerInfo *root, retrieved_attrs, makeInteger(plan->canSetTag)); + /* + * Update the foreign-join-related fields. + */ + if (fscan->scan.scanrelid == 0) + { + /* No need for the outer subplan. */ + fscan->scan.plan.lefttree = NULL; + + /* Build new fdw_scan_tlist if UPDATE/DELETE .. RETURNING. */ + if (returningList) + rebuild_fdw_scan_tlist(fscan, returningList); + } + heap_close(rel, NoLock); return true; } @@ -2268,6 +2372,7 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags) ForeignScan *fsplan = (ForeignScan *) node->ss.ps.plan; EState *estate = node->ss.ps.state; PgFdwDirectModifyState *dmstate; + Index rtindex; RangeTblEntry *rte; Oid userid; ForeignTable *table; @@ -2290,11 +2395,15 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags) * Identify which user to do the remote access as. This should match what * ExecCheckRTEPerms() does. */ - rte = rt_fetch(fsplan->scan.scanrelid, estate->es_range_table); + rtindex = estate->es_result_relation_info->ri_RangeTableIndex; + rte = exec_rt_fetch(rtindex, estate); userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); /* Get info about foreign table. */ - dmstate->rel = node->ss.ss_currentRelation; + if (fsplan->scan.scanrelid == 0) + dmstate->rel = ExecOpenScanRelation(estate, rtindex, eflags); + else + dmstate->rel = node->ss.ss_currentRelation; table = GetForeignTable(RelationGetRelid(dmstate->rel)); user = GetUserMapping(userid, table->serverid); @@ -2304,6 +2413,21 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags) */ dmstate->conn = GetConnection(user, false); + /* Update the foreign-join-related fields. */ + if (fsplan->scan.scanrelid == 0) + { + /* Save info about foreign table. */ + dmstate->resultRel = dmstate->rel; + + /* + * Set dmstate->rel to NULL to teach get_returning_data() and + * make_tuple_from_result_row() that columns fetched from the remote + * server are described by fdw_scan_tlist of the foreign-scan plan + * node, not the tuple descriptor for the target relation. + */ + dmstate->rel = NULL; + } + /* Initialize state variable */ dmstate->num_tuples = -1; /* -1 means not set yet */ @@ -2324,7 +2448,24 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags) /* Prepare for input conversion of RETURNING results. */ if (dmstate->has_returning) - dmstate->attinmeta = TupleDescGetAttInMetadata(RelationGetDescr(dmstate->rel)); + { + TupleDesc tupdesc; + + if (fsplan->scan.scanrelid == 0) + tupdesc = node->ss.ss_ScanTupleSlot->tts_tupleDescriptor; + else + tupdesc = RelationGetDescr(dmstate->rel); + + dmstate->attinmeta = TupleDescGetAttInMetadata(tupdesc); + + /* + * When performing an UPDATE/DELETE .. RETURNING on a join directly, + * initialize a filter to extract an updated/deleted tuple from a scan + * tuple. + */ + if (fsplan->scan.scanrelid == 0) + init_returning_filter(dmstate, fsplan->fdw_scan_tlist, rtindex); + } /* * Prepare for processing of parameters used in remote query, if any. @@ -2688,12 +2829,15 @@ estimate_path_cost_size(PlannerInfo *root, else if (IS_UPPER_REL(foreignrel)) { PgFdwRelationInfo *ofpinfo; - PathTarget *ptarget = root->upper_targets[UPPERREL_GROUP_AGG]; + PathTarget *ptarget = foreignrel->reltarget; AggClauseCosts aggcosts; double input_rows; int numGroupCols; double numGroups = 1; + /* Make sure the core code set the pathtarget. */ + Assert(ptarget != NULL); + /* * This cost model is mixture of costing done for sorted and * hashed aggregates in cost_agg(). We are not sure which @@ -2718,6 +2862,12 @@ estimate_path_cost_size(PlannerInfo *root, { get_agg_clause_costs(root, (Node *) fpinfo->grouped_tlist, AGGSPLIT_SIMPLE, &aggcosts); + + /* + * The cost of aggregates in the HAVING qual will be the same + * for each child as it is for the parent, so there's no need + * to use a translated version of havingQual. + */ get_agg_clause_costs(root, (Node *) root->parse->havingQual, AGGSPLIT_SIMPLE, &aggcosts); } @@ -3129,6 +3279,108 @@ close_cursor(PGconn *conn, unsigned int cursor_number) PQclear(res); } +/* + * create_foreign_modify + * Construct an execution state of a foreign insert/update/delete + * operation + */ +static PgFdwModifyState * +create_foreign_modify(EState *estate, + RangeTblEntry *rte, + ResultRelInfo *resultRelInfo, + CmdType operation, + Plan *subplan, + char *query, + List *target_attrs, + bool has_returning, + List *retrieved_attrs) +{ + PgFdwModifyState *fmstate; + Relation rel = resultRelInfo->ri_RelationDesc; + TupleDesc tupdesc = RelationGetDescr(rel); + Oid userid; + ForeignTable *table; + UserMapping *user; + AttrNumber n_params; + Oid typefnoid; + bool isvarlena; + ListCell *lc; + + /* Begin constructing PgFdwModifyState. */ + fmstate = (PgFdwModifyState *) palloc0(sizeof(PgFdwModifyState)); + fmstate->rel = rel; + + /* + * Identify which user to do the remote access as. This should match what + * ExecCheckRTEPerms() does. + */ + userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); + + /* Get info about foreign table. */ + table = GetForeignTable(RelationGetRelid(rel)); + user = GetUserMapping(userid, table->serverid); + + /* Open connection; report that we'll create a prepared statement. */ + fmstate->conn = GetConnection(user, true); + fmstate->p_name = NULL; /* prepared statement not made yet */ + + /* Set up remote query information. */ + fmstate->query = query; + fmstate->target_attrs = target_attrs; + fmstate->has_returning = has_returning; + fmstate->retrieved_attrs = retrieved_attrs; + + /* Create context for per-tuple temp workspace. */ + fmstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt, + "postgres_fdw temporary data", + ALLOCSET_SMALL_SIZES); + + /* Prepare for input conversion of RETURNING results. */ + if (fmstate->has_returning) + fmstate->attinmeta = TupleDescGetAttInMetadata(tupdesc); + + /* Prepare for output conversion of parameters used in prepared stmt. */ + n_params = list_length(fmstate->target_attrs) + 1; + fmstate->p_flinfo = (FmgrInfo *) palloc0(sizeof(FmgrInfo) * n_params); + fmstate->p_nums = 0; + + if (operation == CMD_UPDATE || operation == CMD_DELETE) + { + Assert(subplan != NULL); + + /* Find the ctid resjunk column in the subplan's result */ + fmstate->ctidAttno = ExecFindJunkAttributeInTlist(subplan->targetlist, + "ctid"); + if (!AttributeNumberIsValid(fmstate->ctidAttno)) + elog(ERROR, "could not find junk ctid column"); + + /* First transmittable parameter will be ctid */ + getTypeOutputInfo(TIDOID, &typefnoid, &isvarlena); + fmgr_info(typefnoid, &fmstate->p_flinfo[fmstate->p_nums]); + fmstate->p_nums++; + } + + if (operation == CMD_INSERT || operation == CMD_UPDATE) + { + /* Set up for remaining transmittable parameters */ + foreach(lc, fmstate->target_attrs) + { + int attnum = lfirst_int(lc); + Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1); + + Assert(!attr->attisdropped); + + getTypeOutputInfo(attr->atttypid, &typefnoid, &isvarlena); + fmgr_info(typefnoid, &fmstate->p_flinfo[fmstate->p_nums]); + fmstate->p_nums++; + } + } + + Assert(fmstate->p_nums <= n_params); + + return fmstate; +} + /* * prepare_foreign_modify * Establish a prepared statement for execution of INSERT/UPDATE/DELETE @@ -3260,7 +3512,7 @@ store_returning_result(PgFdwModifyState *fmstate, NULL, fmstate->temp_cxt); /* tuple will be deleted when it is cleared from the slot */ - ExecStoreTuple(newtup, slot, InvalidBuffer, true); + ExecStoreHeapTuple(newtup, slot, true); } PG_CATCH(); { @@ -3271,6 +3523,169 @@ store_returning_result(PgFdwModifyState *fmstate, PG_END_TRY(); } +/* + * finish_foreign_modify + * Release resources for a foreign insert/update/delete operation + */ +static void +finish_foreign_modify(PgFdwModifyState *fmstate) +{ + Assert(fmstate != NULL); + + /* If we created a prepared statement, destroy it */ + if (fmstate->p_name) + { + char sql[64]; + PGresult *res; + + snprintf(sql, sizeof(sql), "DEALLOCATE %s", fmstate->p_name); + + /* + * We don't use a PG_TRY block here, so be careful not to throw error + * without releasing the PGresult. + */ + res = pgfdw_exec_query(fmstate->conn, sql); + if (PQresultStatus(res) != PGRES_COMMAND_OK) + pgfdw_report_error(ERROR, res, fmstate->conn, true, sql); + PQclear(res); + fmstate->p_name = NULL; + } + + /* Release remote connection */ + ReleaseConnection(fmstate->conn); + fmstate->conn = NULL; +} + +/* + * build_remote_returning + * Build a RETURNING targetlist of a remote query for performing an + * UPDATE/DELETE .. RETURNING on a join directly + */ +static List * +build_remote_returning(Index rtindex, Relation rel, List *returningList) +{ + bool have_wholerow = false; + List *tlist = NIL; + List *vars; + ListCell *lc; + + Assert(returningList); + + vars = pull_var_clause((Node *) returningList, PVC_INCLUDE_PLACEHOLDERS); + + /* + * If there's a whole-row reference to the target relation, then we'll + * need all the columns of the relation. + */ + foreach(lc, vars) + { + Var *var = (Var *) lfirst(lc); + + if (IsA(var, Var) && + var->varno == rtindex && + var->varattno == InvalidAttrNumber) + { + have_wholerow = true; + break; + } + } + + if (have_wholerow) + { + TupleDesc tupdesc = RelationGetDescr(rel); + int i; + + for (i = 1; i <= tupdesc->natts; i++) + { + Form_pg_attribute attr = TupleDescAttr(tupdesc, i - 1); + Var *var; + + /* Ignore dropped attributes. */ + if (attr->attisdropped) + continue; + + var = makeVar(rtindex, + i, + attr->atttypid, + attr->atttypmod, + attr->attcollation, + 0); + + tlist = lappend(tlist, + makeTargetEntry((Expr *) var, + list_length(tlist) + 1, + NULL, + false)); + } + } + + /* Now add any remaining columns to tlist. */ + foreach(lc, vars) + { + Var *var = (Var *) lfirst(lc); + + /* + * No need for whole-row references to the target relation. We don't + * need system columns other than ctid and oid either, since those are + * set locally. + */ + if (IsA(var, Var) && + var->varno == rtindex && + var->varattno <= InvalidAttrNumber && + var->varattno != SelfItemPointerAttributeNumber && + var->varattno != ObjectIdAttributeNumber) + continue; /* don't need it */ + + if (tlist_member((Expr *) var, tlist)) + continue; /* already got it */ + + tlist = lappend(tlist, + makeTargetEntry((Expr *) var, + list_length(tlist) + 1, + NULL, + false)); + } + + list_free(vars); + + return tlist; +} + +/* + * rebuild_fdw_scan_tlist + * Build new fdw_scan_tlist of given foreign-scan plan node from given + * tlist + * + * There might be columns that the fdw_scan_tlist of the given foreign-scan + * plan node contains that the given tlist doesn't. The fdw_scan_tlist would + * have contained resjunk columns such as 'ctid' of the target relation and + * 'wholerow' of non-target relations, but the tlist might not contain them, + * for example. So, adjust the tlist so it contains all the columns specified + * in the fdw_scan_tlist; else setrefs.c will get confused. + */ +static void +rebuild_fdw_scan_tlist(ForeignScan *fscan, List *tlist) +{ + List *new_tlist = tlist; + List *old_tlist = fscan->fdw_scan_tlist; + ListCell *lc; + + foreach(lc, old_tlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(lc); + + if (tlist_member(tle->expr, new_tlist)) + continue; /* already got it */ + + new_tlist = lappend(new_tlist, + makeTargetEntry(tle->expr, + list_length(new_tlist) + 1, + NULL, + false)); + } + fscan->fdw_scan_tlist = new_tlist; +} + /* * Execute a direct UPDATE/DELETE statement. */ @@ -3331,6 +3746,7 @@ get_returning_data(ForeignScanState *node) EState *estate = node->ss.ps.state; ResultRelInfo *resultRelInfo = estate->es_result_relation_info; TupleTableSlot *slot = node->ss.ss_ScanTupleSlot; + TupleTableSlot *resultSlot; Assert(resultRelInfo->ri_projectReturning); @@ -3348,7 +3764,10 @@ get_returning_data(ForeignScanState *node) * "UPDATE/DELETE .. RETURNING 1" for example.) */ if (!dmstate->has_returning) + { ExecStoreAllNullTuple(slot); + resultSlot = slot; + } else { /* @@ -3364,9 +3783,9 @@ get_returning_data(ForeignScanState *node) dmstate->rel, dmstate->attinmeta, dmstate->retrieved_attrs, - NULL, + node, dmstate->temp_cxt); - ExecStoreTuple(newtup, slot, InvalidBuffer, false); + ExecStoreHeapTuple(newtup, slot, false); } PG_CATCH(); { @@ -3375,15 +3794,204 @@ get_returning_data(ForeignScanState *node) PG_RE_THROW(); } PG_END_TRY(); + + /* Get the updated/deleted tuple. */ + if (dmstate->rel) + resultSlot = slot; + else + resultSlot = apply_returning_filter(dmstate, slot, estate); } dmstate->next_tuple++; /* Make slot available for evaluation of the local query RETURNING list. */ - resultRelInfo->ri_projectReturning->pi_exprContext->ecxt_scantuple = slot; + resultRelInfo->ri_projectReturning->pi_exprContext->ecxt_scantuple = + resultSlot; return slot; } +/* + * Initialize a filter to extract an updated/deleted tuple from a scan tuple. + */ +static void +init_returning_filter(PgFdwDirectModifyState *dmstate, + List *fdw_scan_tlist, + Index rtindex) +{ + TupleDesc resultTupType = RelationGetDescr(dmstate->resultRel); + ListCell *lc; + int i; + + /* + * Calculate the mapping between the fdw_scan_tlist's entries and the + * result tuple's attributes. + * + * The "map" is an array of indexes of the result tuple's attributes in + * fdw_scan_tlist, i.e., one entry for every attribute of the result + * tuple. We store zero for any attributes that don't have the + * corresponding entries in that list, marking that a NULL is needed in + * the result tuple. + * + * Also get the indexes of the entries for ctid and oid if any. + */ + dmstate->attnoMap = (AttrNumber *) + palloc0(resultTupType->natts * sizeof(AttrNumber)); + + dmstate->ctidAttno = dmstate->oidAttno = 0; + + i = 1; + dmstate->hasSystemCols = false; + foreach(lc, fdw_scan_tlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(lc); + Var *var = (Var *) tle->expr; + + Assert(IsA(var, Var)); + + /* + * If the Var is a column of the target relation to be retrieved from + * the foreign server, get the index of the entry. + */ + if (var->varno == rtindex && + list_member_int(dmstate->retrieved_attrs, i)) + { + int attrno = var->varattno; + + if (attrno < 0) + { + /* + * We don't retrieve system columns other than ctid and oid. + */ + if (attrno == SelfItemPointerAttributeNumber) + dmstate->ctidAttno = i; + else if (attrno == ObjectIdAttributeNumber) + dmstate->oidAttno = i; + else + Assert(false); + dmstate->hasSystemCols = true; + } + else + { + /* + * We don't retrieve whole-row references to the target + * relation either. + */ + Assert(attrno > 0); + + dmstate->attnoMap[attrno - 1] = i; + } + } + i++; + } +} + +/* + * Extract and return an updated/deleted tuple from a scan tuple. + */ +static TupleTableSlot * +apply_returning_filter(PgFdwDirectModifyState *dmstate, + TupleTableSlot *slot, + EState *estate) +{ + TupleDesc resultTupType = RelationGetDescr(dmstate->resultRel); + TupleTableSlot *resultSlot; + Datum *values; + bool *isnull; + Datum *old_values; + bool *old_isnull; + int i; + + /* + * Use the trigger tuple slot as a place to store the result tuple. + */ + resultSlot = estate->es_trig_tuple_slot; + if (resultSlot->tts_tupleDescriptor != resultTupType) + ExecSetSlotDescriptor(resultSlot, resultTupType); + + /* + * Extract all the values of the scan tuple. + */ + slot_getallattrs(slot); + old_values = slot->tts_values; + old_isnull = slot->tts_isnull; + + /* + * Prepare to build the result tuple. + */ + ExecClearTuple(resultSlot); + values = resultSlot->tts_values; + isnull = resultSlot->tts_isnull; + + /* + * Transpose data into proper fields of the result tuple. + */ + for (i = 0; i < resultTupType->natts; i++) + { + int j = dmstate->attnoMap[i]; + + if (j == 0) + { + values[i] = (Datum) 0; + isnull[i] = true; + } + else + { + values[i] = old_values[j - 1]; + isnull[i] = old_isnull[j - 1]; + } + } + + /* + * Build the virtual tuple. + */ + ExecStoreVirtualTuple(resultSlot); + + /* + * If we have any system columns to return, install them. + */ + if (dmstate->hasSystemCols) + { + HeapTuple resultTup = ExecMaterializeSlot(resultSlot); + + /* ctid */ + if (dmstate->ctidAttno) + { + ItemPointer ctid = NULL; + + ctid = (ItemPointer) DatumGetPointer(old_values[dmstate->ctidAttno - 1]); + resultTup->t_self = *ctid; + } + + /* oid */ + if (dmstate->oidAttno) + { + Oid oid = InvalidOid; + + oid = DatumGetObjectId(old_values[dmstate->oidAttno - 1]); + HeapTupleSetOid(resultTup, oid); + } + + /* + * And remaining columns + * + * Note: since we currently don't allow the target relation to appear + * on the nullable side of an outer join, any system columns wouldn't + * go to NULL. + * + * Note: no need to care about tableoid here because it will be + * initialized in ExecProcessReturning(). + */ + HeapTupleHeaderSetXmin(resultTup->t_data, InvalidTransactionId); + HeapTupleHeaderSetXmax(resultTup->t_data, InvalidTransactionId); + HeapTupleHeaderSetCmin(resultTup->t_data, InvalidTransactionId); + } + + /* + * And return the result tuple. + */ + return resultSlot; +} + /* * Prepare for processing of parameters used in remote query. */ @@ -4127,7 +4735,8 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, bool is_remote_clause = is_foreign_expr(root, joinrel, rinfo->clause); - if (IS_OUTER_JOIN(jointype) && !rinfo->is_pushed_down) + if (IS_OUTER_JOIN(jointype) && + !RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids)) { if (!is_remote_clause) return false; @@ -4155,7 +4764,11 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype, foreach(lc, root->placeholder_list) { PlaceHolderInfo *phinfo = lfirst(lc); - Relids relids = joinrel->relids; + Relids relids; + + /* PlaceHolderInfo refers to parent relids, not child relids. */ + relids = IS_OTHER_REL(joinrel) ? + joinrel->top_parent_relids : joinrel->relids; if (bms_is_subset(phinfo->ph_eval_at, relids) && bms_nonempty_difference(relids, phinfo->ph_eval_at)) @@ -4329,10 +4942,26 @@ add_paths_with_pathkeys_for_rel(PlannerInfo *root, RelOptInfo *rel, Cost startup_cost; Cost total_cost; List *useful_pathkeys = lfirst(lc); + Path *sorted_epq_path; estimate_path_cost_size(root, rel, NIL, useful_pathkeys, &rows, &width, &startup_cost, &total_cost); + /* + * The EPQ path must be at least as well sorted as the path itself, in + * case it gets used as input to a mergejoin. + */ + sorted_epq_path = epq_path; + if (sorted_epq_path != NULL && + !pathkeys_contained_in(useful_pathkeys, + sorted_epq_path->pathkeys)) + sorted_epq_path = (Path *) + create_sort_path(root, + rel, + sorted_epq_path, + useful_pathkeys, + -1.0); + add_path(rel, (Path *) create_foreignscan_path(root, rel, NULL, @@ -4341,7 +4970,7 @@ add_paths_with_pathkeys_for_rel(PlannerInfo *root, RelOptInfo *rel, total_cost, useful_pathkeys, NULL, - epq_path, + sorted_epq_path, NIL)); } } @@ -4497,7 +5126,6 @@ postgresGetForeignJoinPaths(PlannerInfo *root, * the path list of the joinrel, if one exists. We must be careful to * call it before adding any ForeignPath, since the ForeignPath might * dominate the only suitable local path available. We also do it before - * reconstruct the row for EvalPlanQual(). Find an alternative local path * calling foreign_join_ok(), since that function updates fpinfo and marks * it as pushable if the join is found to be pushable. */ @@ -4588,18 +5216,19 @@ postgresGetForeignJoinPaths(PlannerInfo *root, * this function to PgFdwRelationInfo of the input relation. */ static bool -foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel) +foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel, + Node *havingQual) { Query *query = root->parse; - PathTarget *grouping_target; PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) grouped_rel->fdw_private; + PathTarget *grouping_target = grouped_rel->reltarget; PgFdwRelationInfo *ofpinfo; List *aggvars; ListCell *lc; int i; List *tlist = NIL; - /* Grouping Sets are not pushable */ + /* We currently don't support pushing Grouping Sets. */ if (query->groupingSets) return false; @@ -4607,7 +5236,7 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel) ofpinfo = (PgFdwRelationInfo *) fpinfo->outerrel->fdw_private; /* - * If underneath input relation has any local conditions, those conditions + * If underlying scan relation has any local conditions, those conditions * are required to be applied before performing aggregation. Hence the * aggregate cannot be pushed down. */ @@ -4615,21 +5244,11 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel) return false; /* - * The targetlist expected from this node and the targetlist pushed down - * to the foreign server may be different. The latter requires - * sortgrouprefs to be set to push down GROUP BY clause, but should not - * have those arising from ORDER BY clause. These sortgrouprefs may be - * different from those in the plan's targetlist. Use a copy of path - * target to record the new sortgrouprefs. - */ - grouping_target = copy_pathtarget(root->upper_targets[UPPERREL_GROUP_AGG]); - - /* - * Evaluate grouping targets and check whether they are safe to push down - * to the foreign side. All GROUP BY expressions will be part of the - * grouping target and thus there is no need to evaluate it separately. - * While doing so, add required expressions into target list which can - * then be used to pass to foreign server. + * Examine grouping expressions, as well as other expressions we'd need to + * compute, and check whether they are safe to push down to the foreign + * server. All GROUP BY expressions will be part of the grouping target + * and thus there is no need to search for them separately. Add grouping + * expressions into target list which will be passed to foreign server. */ i = 0; foreach(lc, grouping_target->exprs) @@ -4641,51 +5260,59 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel) /* Check whether this expression is part of GROUP BY clause */ if (sgref && get_sortgroupref_clause_noerr(sgref, query->groupClause)) { + TargetEntry *tle; + /* - * If any of the GROUP BY expression is not shippable we can not + * If any GROUP BY expression is not shippable, then we cannot * push down aggregation to the foreign server. */ if (!is_foreign_expr(root, grouped_rel, expr)) return false; - /* Pushable, add to tlist */ - tlist = add_to_flat_tlist(tlist, list_make1(expr)); + /* + * Pushable, so add to tlist. We need to create a TLE for this + * expression and apply the sortgroupref to it. We cannot use + * add_to_flat_tlist() here because that avoids making duplicate + * entries in the tlist. If there are duplicate entries with + * distinct sortgrouprefs, we have to duplicate that situation in + * the output tlist. + */ + tle = makeTargetEntry(expr, list_length(tlist) + 1, NULL, false); + tle->ressortgroupref = sgref; + tlist = lappend(tlist, tle); } else { - /* Check entire expression whether it is pushable or not */ + /* + * Non-grouping expression we need to compute. Is it shippable? + */ if (is_foreign_expr(root, grouped_rel, expr)) { - /* Pushable, add to tlist */ + /* Yes, so add to tlist as-is; OK to suppress duplicates */ tlist = add_to_flat_tlist(tlist, list_make1(expr)); } else { - /* - * If we have sortgroupref set, then it means that we have an - * ORDER BY entry pointing to this expression. Since we are - * not pushing ORDER BY with GROUP BY, clear it. - */ - if (sgref) - grouping_target->sortgrouprefs[i] = 0; - - /* Not matched exactly, pull the var with aggregates then */ + /* Not pushable as a whole; extract its Vars and aggregates */ aggvars = pull_var_clause((Node *) expr, PVC_INCLUDE_AGGREGATES); + /* + * If any aggregate expression is not shippable, then we + * cannot push down aggregation to the foreign server. + */ if (!is_foreign_expr(root, grouped_rel, (Expr *) aggvars)) return false; /* - * Add aggregates, if any, into the targetlist. Plain var - * nodes should be either same as some GROUP BY expression or - * part of some GROUP BY expression. In later case, the query - * cannot refer plain var nodes without the surrounding - * expression. In both the cases, they are already part of + * Add aggregates, if any, into the targetlist. Plain Vars + * outside an aggregate can be ignored, because they should be + * either same as some GROUP BY column or part of some GROUP + * BY expression. In either case, they are already part of * the targetlist and thus no need to add them again. In fact - * adding pulled plain var nodes in SELECT clause will cause - * an error on the foreign server if they are not same as some - * GROUP BY expression. + * including plain Vars in the tlist when they do not match a + * GROUP BY column would cause the foreign server to complain + * that the shipped query is invalid. */ foreach(l, aggvars) { @@ -4701,14 +5328,14 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel) } /* - * Classify the pushable and non-pushable having clauses and save them in + * Classify the pushable and non-pushable HAVING clauses and save them in * remote_conds and local_conds of the grouped rel's fpinfo. */ - if (root->hasHavingQual && query->havingQual) + if (havingQual) { ListCell *lc; - foreach(lc, (List *) query->havingQual) + foreach(lc, (List *) havingQual) { Expr *expr = (Expr *) lfirst(lc); RestrictInfo *rinfo; @@ -4771,9 +5398,6 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel) } } - /* Transfer any sortgroupref data to the replacement tlist */ - apply_pathtarget_labeling_to_tlist(tlist, grouping_target); - /* Store generated targetlist */ fpinfo->grouped_tlist = tlist; @@ -4808,7 +5432,8 @@ foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel) */ static void postgresGetForeignUpperPaths(PlannerInfo *root, UpperRelationKind stage, - RelOptInfo *input_rel, RelOptInfo *output_rel) + RelOptInfo *input_rel, RelOptInfo *output_rel, + void *extra) { PgFdwRelationInfo *fpinfo; @@ -4828,7 +5453,8 @@ postgresGetForeignUpperPaths(PlannerInfo *root, UpperRelationKind stage, fpinfo->pushdown_safe = false; output_rel->fdw_private = fpinfo; - add_foreign_grouping_paths(root, input_rel, output_rel); + add_foreign_grouping_paths(root, input_rel, output_rel, + (GroupPathExtraData *) extra); } /* @@ -4840,13 +5466,13 @@ postgresGetForeignUpperPaths(PlannerInfo *root, UpperRelationKind stage, */ static void add_foreign_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, - RelOptInfo *grouped_rel) + RelOptInfo *grouped_rel, + GroupPathExtraData *extra) { Query *parse = root->parse; PgFdwRelationInfo *ifpinfo = input_rel->fdw_private; PgFdwRelationInfo *fpinfo = grouped_rel->fdw_private; ForeignPath *grouppath; - PathTarget *grouping_target; double rows; int width; Cost startup_cost; @@ -4857,7 +5483,8 @@ add_foreign_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, !root->hasHavingQual) return; - grouping_target = root->upper_targets[UPPERREL_GROUP_AGG]; + Assert(extra->patype == PARTITIONWISE_AGGREGATE_NONE || + extra->patype == PARTITIONWISE_AGGREGATE_FULL); /* save the input_rel as outerrel in fpinfo */ fpinfo->outerrel = input_rel; @@ -4871,8 +5498,13 @@ add_foreign_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, fpinfo->user = ifpinfo->user; merge_fdw_options(fpinfo, ifpinfo, NULL); - /* Assess if it is safe to push down aggregation and grouping. */ - if (!foreign_grouping_ok(root, grouped_rel)) + /* + * Assess if it is safe to push down aggregation and grouping. + * + * Use HAVING qual from extra. In case of child partition, it will have + * translated Vars. + */ + if (!foreign_grouping_ok(root, grouped_rel, extra->havingQual)) return; /* Estimate the cost of push down */ @@ -4888,7 +5520,7 @@ add_foreign_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, /* Create and add foreign path to the grouping relation. */ grouppath = create_foreignscan_path(root, grouped_rel, - grouping_target, + grouped_rel->reltarget, rows, startup_cost, total_cost, @@ -4943,11 +5575,8 @@ make_tuple_from_result_row(PGresult *res, tupdesc = RelationGetDescr(rel); else { - PgFdwScanState *fdw_sstate; - Assert(fsstate); - fdw_sstate = (PgFdwScanState *) fsstate->fdw_state; - tupdesc = fdw_sstate->tupdesc; + tupdesc = fsstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor; } values = (Datum *) palloc0(tupdesc->natts * sizeof(Datum)); @@ -5091,9 +5720,10 @@ conversion_error_callback(void *arg) { /* error occurred in a scan against a foreign table */ TupleDesc tupdesc = RelationGetDescr(errpos->rel); + Form_pg_attribute attr = TupleDescAttr(tupdesc, errpos->cur_attno - 1); if (errpos->cur_attno > 0 && errpos->cur_attno <= tupdesc->natts) - attname = NameStr(tupdesc->attrs[errpos->cur_attno - 1]->attname); + attname = NameStr(attr->attname); else if (errpos->cur_attno == SelfItemPointerAttributeNumber) attname = "ctid"; else if (errpos->cur_attno == ObjectIdAttributeNumber) @@ -5114,7 +5744,7 @@ conversion_error_callback(void *arg) /* * Target list can have Vars and expressions. For Vars, we can get - * it's relation, however for expressions we can't. Thus for + * its relation, however for expressions we can't. Thus for * expressions, just show generic context message. */ if (IsA(tle->expr, Var)) @@ -5122,12 +5752,12 @@ conversion_error_callback(void *arg) RangeTblEntry *rte; Var *var = (Var *) tle->expr; - rte = rt_fetch(var->varno, estate->es_range_table); + rte = exec_rt_fetch(var->varno, estate); if (var->varattno == 0) is_wholerow = true; else - attname = get_relid_attribute_name(rte->relid, var->varattno); + attname = get_attname(rte->relid, var->varattno, false); relname = get_rel_name(rte->relid); } @@ -5149,7 +5779,7 @@ conversion_error_callback(void *arg) * Find an equivalence class member expression, all of whose Vars, come from * the indicated relation. */ -extern Expr * +Expr * find_em_expr_for_rel(EquivalenceClass *ec, RelOptInfo *rel) { ListCell *lc_em; @@ -5158,7 +5788,8 @@ find_em_expr_for_rel(EquivalenceClass *ec, RelOptInfo *rel) { EquivalenceMember *em = lfirst(lc_em); - if (bms_is_subset(em->em_relids, rel->relids)) + if (bms_is_subset(em->em_relids, rel->relids) && + !bms_is_empty(em->em_relids)) { /* * If there is more than one equivalence member whose Vars are diff --git a/contrib/postgres_fdw/postgres_fdw.h b/contrib/postgres_fdw/postgres_fdw.h index 788b003650..70b538e2f9 100644 --- a/contrib/postgres_fdw/postgres_fdw.h +++ b/contrib/postgres_fdw/postgres_fdw.h @@ -3,7 +3,7 @@ * postgres_fdw.h * Foreign-data wrapper for remote PostgreSQL servers * - * Portions Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/postgres_fdw/postgres_fdw.h @@ -140,28 +140,32 @@ extern void classifyConditions(PlannerInfo *root, extern bool is_foreign_expr(PlannerInfo *root, RelOptInfo *baserel, Expr *expr); -extern void deparseInsertSql(StringInfo buf, PlannerInfo *root, +extern void deparseInsertSql(StringInfo buf, RangeTblEntry *rte, Index rtindex, Relation rel, - List *targetAttrs, bool doNothing, List *returningList, + List *targetAttrs, bool doNothing, + List *withCheckOptionList, List *returningList, List **retrieved_attrs); -extern void deparseUpdateSql(StringInfo buf, PlannerInfo *root, +extern void deparseUpdateSql(StringInfo buf, RangeTblEntry *rte, Index rtindex, Relation rel, - List *targetAttrs, List *returningList, + List *targetAttrs, + List *withCheckOptionList, List *returningList, List **retrieved_attrs); extern void deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root, Index rtindex, Relation rel, + RelOptInfo *foreignrel, List *targetlist, List *targetAttrs, List *remote_conds, List **params_list, List *returningList, List **retrieved_attrs); -extern void deparseDeleteSql(StringInfo buf, PlannerInfo *root, +extern void deparseDeleteSql(StringInfo buf, RangeTblEntry *rte, Index rtindex, Relation rel, List *returningList, List **retrieved_attrs); extern void deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root, Index rtindex, Relation rel, + RelOptInfo *foreignrel, List *remote_conds, List **params_list, List *returningList, diff --git a/contrib/postgres_fdw/shippable.c b/contrib/postgres_fdw/shippable.c index 2ac0873caa..7f2ed0499c 100644 --- a/contrib/postgres_fdw/shippable.c +++ b/contrib/postgres_fdw/shippable.c @@ -13,7 +13,7 @@ * functions or functions using nonportable collations. Those considerations * need not be accounted for here. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/postgres_fdw/shippable.c diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql index 5f65d9d966..88c4cb4783 100644 --- a/contrib/postgres_fdw/sql/postgres_fdw.sql +++ b/contrib/postgres_fdw/sql/postgres_fdw.sql @@ -57,6 +57,12 @@ CREATE TABLE "S 1"."T 4" ( CONSTRAINT t4_pkey PRIMARY KEY (c1) ); +-- Disable autovacuum for these tables to avoid unexpected effects of that +ALTER TABLE "S 1"."T 1" SET (autovacuum_enabled = 'false'); +ALTER TABLE "S 1"."T 2" SET (autovacuum_enabled = 'false'); +ALTER TABLE "S 1"."T 3" SET (autovacuum_enabled = 'false'); +ALTER TABLE "S 1"."T 4" SET (autovacuum_enabled = 'false'); + INSERT INTO "S 1"."T 1" SELECT id, id % 10, @@ -559,8 +565,20 @@ EXPLAIN (VERBOSE, COSTS OFF) SELECT ft5, ft5.c1, ft5.c2, ft5.c3, ft4.c1, ft4.c2 FROM ft5 left join ft4 on ft5.c1 = ft4.c1 WHERE ft4.c1 BETWEEN 10 and 30 ORDER BY ft5.c1, ft4.c1; SELECT ft5, ft5.c1, ft5.c2, ft5.c3, ft4.c1, ft4.c2 FROM ft5 left join ft4 on ft5.c1 = ft4.c1 WHERE ft4.c1 BETWEEN 10 and 30 ORDER BY ft5.c1, ft4.c1; +-- multi-way join involving multiple merge joins +-- (this case used to have EPQ-related planning problems) +SET enable_nestloop TO false; +SET enable_hashjoin TO false; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT * FROM ft1, ft2, ft4, ft5 WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1 + AND ft1.c2 = ft5.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE; +SELECT * FROM ft1, ft2, ft4, ft5 WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1 + AND ft1.c2 = ft5.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE; +RESET enable_nestloop; +RESET enable_hashjoin; + -- check join pushdown in situations where multiple userids are involved -CREATE ROLE regress_view_owner; +CREATE ROLE regress_view_owner SUPERUSER; CREATE USER MAPPING FOR regress_view_owner SERVER loopback; GRANT SELECT ON ft4 TO regress_view_owner; GRANT SELECT ON ft5 TO regress_view_owner; @@ -636,6 +654,12 @@ explain (verbose, costs off) select count(c2) w, c2 x, 5 y, 7.0 z from ft1 group by 2, y, 9.0::int order by 2; select count(c2) w, c2 x, 5 y, 7.0 z from ft1 group by 2, y, 9.0::int order by 2; +-- GROUP BY clause referring to same column multiple times +-- Also, ORDER BY contains an aggregate function +explain (verbose, costs off) +select c2, c2 from ft1 where c2 > 6 group by 1, 2 order by sum(c1); +select c2, c2 from ft1 where c2 > 6 group by 1, 2 order by sum(c1); + -- Testing HAVING clause shippability explain (verbose, costs off) select c2, sum(c1) from ft2 group by c2 having avg(c1) < 500 and sum(c1) < 49800 order by c2; @@ -829,7 +853,7 @@ drop operator public.<^(int, int); -- Input relation to aggregate push down hook is not safe to pushdown and thus -- the aggregate cannot be pushed down to foreign server. explain (verbose, costs off) -select count(t1.c3) from ft1 t1, ft1 t2 where t1.c1 = postgres_fdw_abs(t1.c2); +select count(t1.c3) from ft2 t1 left join ft2 t2 on (t1.c1 = random() * t2.c2); -- Subquery in FROM clause having aggregate explain (verbose, costs off) @@ -1064,25 +1088,77 @@ UPDATE ft2 SET c2 = c2 + 400, c3 = c3 || '_update7' WHERE c1 % 10 = 7 RETURNING UPDATE ft2 SET c2 = c2 + 400, c3 = c3 || '_update7' WHERE c1 % 10 = 7 RETURNING *; EXPLAIN (verbose, costs off) UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT - FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9; -- can't be pushed down + FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9; -- can be pushed down UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9; EXPLAIN (verbose, costs off) DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4; -- can be pushed down DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4; EXPLAIN (verbose, costs off) -DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2; -- can't be pushed down +DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2; -- can be pushed down DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2; SELECT c1,c2,c3,c4 FROM ft2 ORDER BY c1; EXPLAIN (verbose, costs off) -INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass; -INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass; +INSERT INTO ft2 (c1,c2,c3) VALUES (1200,999,'foo') RETURNING tableoid::regclass; +INSERT INTO ft2 (c1,c2,c3) VALUES (1200,999,'foo') RETURNING tableoid::regclass; +EXPLAIN (verbose, costs off) +UPDATE ft2 SET c3 = 'bar' WHERE c1 = 1200 RETURNING tableoid::regclass; -- can be pushed down +UPDATE ft2 SET c3 = 'bar' WHERE c1 = 1200 RETURNING tableoid::regclass; +EXPLAIN (verbose, costs off) +DELETE FROM ft2 WHERE c1 = 1200 RETURNING tableoid::regclass; -- can be pushed down +DELETE FROM ft2 WHERE c1 = 1200 RETURNING tableoid::regclass; + +-- Test UPDATE/DELETE with RETURNING on a three-table join +INSERT INTO ft2 (c1,c2,c3) + SELECT id, id - 1200, to_char(id, 'FM00000') FROM generate_series(1201, 1300) id; +EXPLAIN (verbose, costs off) +UPDATE ft2 SET c3 = 'foo' + FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1) + WHERE ft2.c1 > 1200 AND ft2.c2 = ft4.c1 + RETURNING ft2, ft2.*, ft4, ft4.*; -- can be pushed down +UPDATE ft2 SET c3 = 'foo' + FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1) + WHERE ft2.c1 > 1200 AND ft2.c2 = ft4.c1 + RETURNING ft2, ft2.*, ft4, ft4.*; +EXPLAIN (verbose, costs off) +DELETE FROM ft2 + USING ft4 LEFT JOIN ft5 ON (ft4.c1 = ft5.c1) + WHERE ft2.c1 > 1200 AND ft2.c1 % 10 = 0 AND ft2.c2 = ft4.c1 + RETURNING 100; -- can be pushed down +DELETE FROM ft2 + USING ft4 LEFT JOIN ft5 ON (ft4.c1 = ft5.c1) + WHERE ft2.c1 > 1200 AND ft2.c1 % 10 = 0 AND ft2.c2 = ft4.c1 + RETURNING 100; +DELETE FROM ft2 WHERE ft2.c1 > 1200; + +-- Test UPDATE/DELETE with WHERE or JOIN/ON conditions containing +-- user-defined operators/functions +ALTER SERVER loopback OPTIONS (DROP extensions); +INSERT INTO ft2 (c1,c2,c3) + SELECT id, id % 10, to_char(id, 'FM00000') FROM generate_series(2001, 2010) id; EXPLAIN (verbose, costs off) -UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass; -- can be pushed down -UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass; +UPDATE ft2 SET c3 = 'bar' WHERE postgres_fdw_abs(c1) > 2000 RETURNING *; -- can't be pushed down +UPDATE ft2 SET c3 = 'bar' WHERE postgres_fdw_abs(c1) > 2000 RETURNING *; EXPLAIN (verbose, costs off) -DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass; -- can be pushed down -DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass; +UPDATE ft2 SET c3 = 'baz' + FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1) + WHERE ft2.c1 > 2000 AND ft2.c2 === ft4.c1 + RETURNING ft2.*, ft4.*, ft5.*; -- can't be pushed down +UPDATE ft2 SET c3 = 'baz' + FROM ft4 INNER JOIN ft5 ON (ft4.c1 = ft5.c1) + WHERE ft2.c1 > 2000 AND ft2.c2 === ft4.c1 + RETURNING ft2.*, ft4.*, ft5.*; +EXPLAIN (verbose, costs off) +DELETE FROM ft2 + USING ft4 INNER JOIN ft5 ON (ft4.c1 === ft5.c1) + WHERE ft2.c1 > 2000 AND ft2.c2 = ft4.c1 + RETURNING ft2.c1, ft2.c2, ft2.c3; -- can't be pushed down +DELETE FROM ft2 + USING ft4 INNER JOIN ft5 ON (ft4.c1 === ft5.c1) + WHERE ft2.c1 > 2000 AND ft2.c2 = ft4.c1 + RETURNING ft2.c1, ft2.c2, ft2.c3; +DELETE FROM ft2 WHERE ft2.c1 > 2000; +ALTER SERVER loopback OPTIONS (ADD extensions 'postgres_fdw'); -- Test that trigger on remote table works as expected CREATE OR REPLACE FUNCTION "S 1".F_BRTRIG() RETURNS trigger AS $$ @@ -1138,6 +1214,8 @@ commit; select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1; +VACUUM ANALYZE "S 1"."T 1"; + -- Above DMLs add data with c6 as NULL in ft1, so test ORDER BY NULLS LAST and NULLs -- FIRST behavior here. -- ORDER BY DESC NULLS LAST options @@ -1184,30 +1262,79 @@ ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c2negative; -- test WITH CHECK OPTION constraints -- =================================================================== +CREATE FUNCTION row_before_insupd_trigfunc() RETURNS trigger AS $$BEGIN NEW.a := NEW.a + 10; RETURN NEW; END$$ LANGUAGE plpgsql; + CREATE TABLE base_tbl (a int, b int); +ALTER TABLE base_tbl SET (autovacuum_enabled = 'false'); +CREATE TRIGGER row_before_insupd_trigger BEFORE INSERT OR UPDATE ON base_tbl FOR EACH ROW EXECUTE PROCEDURE row_before_insupd_trigfunc(); CREATE FOREIGN TABLE foreign_tbl (a int, b int) - SERVER loopback OPTIONS(table_name 'base_tbl'); + SERVER loopback OPTIONS (table_name 'base_tbl'); CREATE VIEW rw_view AS SELECT * FROM foreign_tbl WHERE a < b WITH CHECK OPTION; \d+ rw_view -INSERT INTO rw_view VALUES (0, 10); -- ok -INSERT INTO rw_view VALUES (10, 0); -- should fail EXPLAIN (VERBOSE, COSTS OFF) -UPDATE rw_view SET b = 20 WHERE a = 0; -- not pushed down -UPDATE rw_view SET b = 20 WHERE a = 0; -- ok +INSERT INTO rw_view VALUES (0, 5); +INSERT INTO rw_view VALUES (0, 5); -- should fail +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO rw_view VALUES (0, 15); +INSERT INTO rw_view VALUES (0, 15); -- ok +SELECT * FROM foreign_tbl; + +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE rw_view SET b = b + 5; +UPDATE rw_view SET b = b + 5; -- should fail EXPLAIN (VERBOSE, COSTS OFF) -UPDATE rw_view SET b = -20 WHERE a = 0; -- not pushed down -UPDATE rw_view SET b = -20 WHERE a = 0; -- should fail +UPDATE rw_view SET b = b + 15; +UPDATE rw_view SET b = b + 15; -- ok SELECT * FROM foreign_tbl; DROP FOREIGN TABLE foreign_tbl CASCADE; +DROP TRIGGER row_before_insupd_trigger ON base_tbl; DROP TABLE base_tbl; +-- test WCO for partitions + +CREATE TABLE child_tbl (a int, b int); +ALTER TABLE child_tbl SET (autovacuum_enabled = 'false'); +CREATE TRIGGER row_before_insupd_trigger BEFORE INSERT OR UPDATE ON child_tbl FOR EACH ROW EXECUTE PROCEDURE row_before_insupd_trigfunc(); +CREATE FOREIGN TABLE foreign_tbl (a int, b int) + SERVER loopback OPTIONS (table_name 'child_tbl'); + +CREATE TABLE parent_tbl (a int, b int) PARTITION BY RANGE(a); +ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES FROM (0) TO (100); + +CREATE VIEW rw_view AS SELECT * FROM parent_tbl + WHERE a < b WITH CHECK OPTION; +\d+ rw_view + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO rw_view VALUES (0, 5); +INSERT INTO rw_view VALUES (0, 5); -- should fail +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO rw_view VALUES (0, 15); +INSERT INTO rw_view VALUES (0, 15); -- ok +SELECT * FROM foreign_tbl; + +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE rw_view SET b = b + 5; +UPDATE rw_view SET b = b + 5; -- should fail +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE rw_view SET b = b + 15; +UPDATE rw_view SET b = b + 15; -- ok +SELECT * FROM foreign_tbl; + +DROP FOREIGN TABLE foreign_tbl CASCADE; +DROP TRIGGER row_before_insupd_trigger ON child_tbl; +DROP TABLE parent_tbl CASCADE; + +DROP FUNCTION row_before_insupd_trigfunc; + -- =================================================================== -- test serial columns (ie, sequence-based defaults) -- =================================================================== create table loc1 (f1 serial, f2 text); +alter table loc1 set (autovacuum_enabled = 'false'); create foreign table rem1 (f1 serial, f2 text) server loopback options(table_name 'loc1'); select pg_catalog.setval('rem1_f1_seq', 10, false); @@ -1524,6 +1651,8 @@ DROP TRIGGER trig_row_after_delete ON rem1; CREATE TABLE a (aa TEXT); CREATE TABLE loct (aa TEXT, bb TEXT); +ALTER TABLE a SET (autovacuum_enabled = 'false'); +ALTER TABLE loct SET (autovacuum_enabled = 'false'); CREATE FOREIGN TABLE b (bb TEXT) INHERITS (a) SERVER loopback OPTIONS (table_name 'loct'); @@ -1570,6 +1699,9 @@ DROP TABLE loct; create table loct1 (f1 int, f2 int, f3 int); create table loct2 (f1 int, f2 int, f3 int); +alter table loct1 set (autovacuum_enabled = 'false'); +alter table loct2 set (autovacuum_enabled = 'false'); + create table foo (f1 int, f2 int); create foreign table foo2 (f3 int) inherits (foo) server loopback options (table_name 'loct1'); @@ -1577,6 +1709,9 @@ create table bar (f1 int, f2 int); create foreign table bar2 (f3 int) inherits (bar) server loopback options (table_name 'loct2'); +alter table foo set (autovacuum_enabled = 'false'); +alter table bar set (autovacuum_enabled = 'false'); + insert into foo values(1,1); insert into foo values(3,3); insert into foo2 values(2,2,2); @@ -1656,11 +1791,347 @@ explain (verbose, costs off) update bar set f2 = f2 + 100 returning *; update bar set f2 = f2 + 100 returning *; +-- Test that UPDATE/DELETE with inherited target works with row-level triggers +CREATE TRIGGER trig_row_before +BEFORE UPDATE OR DELETE ON bar2 +FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo'); + +CREATE TRIGGER trig_row_after +AFTER UPDATE OR DELETE ON bar2 +FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo'); + +explain (verbose, costs off) +update bar set f2 = f2 + 100; +update bar set f2 = f2 + 100; + +explain (verbose, costs off) +delete from bar where f2 < 400; +delete from bar where f2 < 400; + +-- cleanup drop table foo cascade; drop table bar cascade; drop table loct1; drop table loct2; +-- Test pushing down UPDATE/DELETE joins to the remote server +create table parent (a int, b text); +create table loct1 (a int, b text); +create table loct2 (a int, b text); +create foreign table remt1 (a int, b text) + server loopback options (table_name 'loct1'); +create foreign table remt2 (a int, b text) + server loopback options (table_name 'loct2'); +alter foreign table remt1 inherit parent; + +insert into remt1 values (1, 'foo'); +insert into remt1 values (2, 'bar'); +insert into remt2 values (1, 'foo'); +insert into remt2 values (2, 'bar'); + +analyze remt1; +analyze remt2; + +explain (verbose, costs off) +update parent set b = parent.b || remt2.b from remt2 where parent.a = remt2.a returning *; +update parent set b = parent.b || remt2.b from remt2 where parent.a = remt2.a returning *; +explain (verbose, costs off) +delete from parent using remt2 where parent.a = remt2.a returning parent; +delete from parent using remt2 where parent.a = remt2.a returning parent; + +-- cleanup +drop foreign table remt1; +drop foreign table remt2; +drop table loct1; +drop table loct2; +drop table parent; + +-- =================================================================== +-- test tuple routing for foreign-table partitions +-- =================================================================== + +-- Test insert tuple routing +create table itrtest (a int, b text) partition by list (a); +create table loct1 (a int check (a in (1)), b text); +create foreign table remp1 (a int check (a in (1)), b text) server loopback options (table_name 'loct1'); +create table loct2 (a int check (a in (2)), b text); +create foreign table remp2 (b text, a int check (a in (2))) server loopback options (table_name 'loct2'); +alter table itrtest attach partition remp1 for values in (1); +alter table itrtest attach partition remp2 for values in (2); + +insert into itrtest values (1, 'foo'); +insert into itrtest values (1, 'bar') returning *; +insert into itrtest values (2, 'baz'); +insert into itrtest values (2, 'qux') returning *; +insert into itrtest values (1, 'test1'), (2, 'test2') returning *; + +select tableoid::regclass, * FROM itrtest; +select tableoid::regclass, * FROM remp1; +select tableoid::regclass, * FROM remp2; + +delete from itrtest; + +create unique index loct1_idx on loct1 (a); + +-- DO NOTHING without an inference specification is supported +insert into itrtest values (1, 'foo') on conflict do nothing returning *; +insert into itrtest values (1, 'foo') on conflict do nothing returning *; + +-- But other cases are not supported +insert into itrtest values (1, 'bar') on conflict (a) do nothing; +insert into itrtest values (1, 'bar') on conflict (a) do update set b = excluded.b; + +select tableoid::regclass, * FROM itrtest; + +delete from itrtest; + +drop index loct1_idx; + +-- Test that remote triggers work with insert tuple routing +create function br_insert_trigfunc() returns trigger as $$ +begin + new.b := new.b || ' triggered !'; + return new; +end +$$ language plpgsql; +create trigger loct1_br_insert_trigger before insert on loct1 + for each row execute procedure br_insert_trigfunc(); +create trigger loct2_br_insert_trigger before insert on loct2 + for each row execute procedure br_insert_trigfunc(); + +-- The new values are concatenated with ' triggered !' +insert into itrtest values (1, 'foo') returning *; +insert into itrtest values (2, 'qux') returning *; +insert into itrtest values (1, 'test1'), (2, 'test2') returning *; +with result as (insert into itrtest values (1, 'test1'), (2, 'test2') returning *) select * from result; + +drop trigger loct1_br_insert_trigger on loct1; +drop trigger loct2_br_insert_trigger on loct2; + +drop table itrtest; +drop table loct1; +drop table loct2; + +-- Test update tuple routing +create table utrtest (a int, b text) partition by list (a); +create table loct (a int check (a in (1)), b text); +create foreign table remp (a int check (a in (1)), b text) server loopback options (table_name 'loct'); +create table locp (a int check (a in (2)), b text); +alter table utrtest attach partition remp for values in (1); +alter table utrtest attach partition locp for values in (2); + +insert into utrtest values (1, 'foo'); +insert into utrtest values (2, 'qux'); + +select tableoid::regclass, * FROM utrtest; +select tableoid::regclass, * FROM remp; +select tableoid::regclass, * FROM locp; + +-- It's not allowed to move a row from a partition that is foreign to another +update utrtest set a = 2 where b = 'foo' returning *; + +-- But the reverse is allowed +update utrtest set a = 1 where b = 'qux' returning *; + +select tableoid::regclass, * FROM utrtest; +select tableoid::regclass, * FROM remp; +select tableoid::regclass, * FROM locp; + +-- The executor should not let unexercised FDWs shut down +update utrtest set a = 1 where b = 'foo'; + +-- Test that remote triggers work with update tuple routing +create trigger loct_br_insert_trigger before insert on loct + for each row execute procedure br_insert_trigfunc(); + +delete from utrtest; +insert into utrtest values (2, 'qux'); + +-- Check case where the foreign partition is a subplan target rel +explain (verbose, costs off) +update utrtest set a = 1 where a = 1 or a = 2 returning *; +-- The new values are concatenated with ' triggered !' +update utrtest set a = 1 where a = 1 or a = 2 returning *; + +delete from utrtest; +insert into utrtest values (2, 'qux'); + +-- Check case where the foreign partition isn't a subplan target rel +explain (verbose, costs off) +update utrtest set a = 1 where a = 2 returning *; +-- The new values are concatenated with ' triggered !' +update utrtest set a = 1 where a = 2 returning *; + +drop trigger loct_br_insert_trigger on loct; + +drop table utrtest; +drop table loct; + +-- Test copy tuple routing +create table ctrtest (a int, b text) partition by list (a); +create table loct1 (a int check (a in (1)), b text); +create foreign table remp1 (a int check (a in (1)), b text) server loopback options (table_name 'loct1'); +create table loct2 (a int check (a in (2)), b text); +create foreign table remp2 (b text, a int check (a in (2))) server loopback options (table_name 'loct2'); +alter table ctrtest attach partition remp1 for values in (1); +alter table ctrtest attach partition remp2 for values in (2); + +copy ctrtest from stdin; +1 foo +2 qux +\. + +select tableoid::regclass, * FROM ctrtest; +select tableoid::regclass, * FROM remp1; +select tableoid::regclass, * FROM remp2; + +-- Copying into foreign partitions directly should work as well +copy remp1 from stdin; +1 bar +\. + +select tableoid::regclass, * FROM remp1; + +drop table ctrtest; +drop table loct1; +drop table loct2; + +-- =================================================================== +-- test COPY FROM +-- =================================================================== + +create table loc2 (f1 int, f2 text); +alter table loc2 set (autovacuum_enabled = 'false'); +create foreign table rem2 (f1 int, f2 text) server loopback options(table_name 'loc2'); + +-- Test basic functionality +copy rem2 from stdin; +1 foo +2 bar +\. +select * from rem2; + +delete from rem2; + +-- Test check constraints +alter table loc2 add constraint loc2_f1positive check (f1 >= 0); +alter foreign table rem2 add constraint rem2_f1positive check (f1 >= 0); + +-- check constraint is enforced on the remote side, not locally +copy rem2 from stdin; +1 foo +2 bar +\. +copy rem2 from stdin; -- ERROR +-1 xyzzy +\. +select * from rem2; + +alter foreign table rem2 drop constraint rem2_f1positive; +alter table loc2 drop constraint loc2_f1positive; + +delete from rem2; + +-- Test local triggers +create trigger trig_stmt_before before insert on rem2 + for each statement execute procedure trigger_func(); +create trigger trig_stmt_after after insert on rem2 + for each statement execute procedure trigger_func(); +create trigger trig_row_before before insert on rem2 + for each row execute procedure trigger_data(23,'skidoo'); +create trigger trig_row_after after insert on rem2 + for each row execute procedure trigger_data(23,'skidoo'); + +copy rem2 from stdin; +1 foo +2 bar +\. +select * from rem2; + +drop trigger trig_row_before on rem2; +drop trigger trig_row_after on rem2; +drop trigger trig_stmt_before on rem2; +drop trigger trig_stmt_after on rem2; + +delete from rem2; + +create trigger trig_row_before_insert before insert on rem2 + for each row execute procedure trig_row_before_insupdate(); + +-- The new values are concatenated with ' triggered !' +copy rem2 from stdin; +1 foo +2 bar +\. +select * from rem2; + +drop trigger trig_row_before_insert on rem2; + +delete from rem2; + +create trigger trig_null before insert on rem2 + for each row execute procedure trig_null(); + +-- Nothing happens +copy rem2 from stdin; +1 foo +2 bar +\. +select * from rem2; + +drop trigger trig_null on rem2; + +delete from rem2; + +-- Test remote triggers +create trigger trig_row_before_insert before insert on loc2 + for each row execute procedure trig_row_before_insupdate(); + +-- The new values are concatenated with ' triggered !' +copy rem2 from stdin; +1 foo +2 bar +\. +select * from rem2; + +drop trigger trig_row_before_insert on loc2; + +delete from rem2; + +create trigger trig_null before insert on loc2 + for each row execute procedure trig_null(); + +-- Nothing happens +copy rem2 from stdin; +1 foo +2 bar +\. +select * from rem2; + +drop trigger trig_null on loc2; + +delete from rem2; + +-- Test a combination of local and remote triggers +create trigger rem2_trig_row_before before insert on rem2 + for each row execute procedure trigger_data(23,'skidoo'); +create trigger rem2_trig_row_after after insert on rem2 + for each row execute procedure trigger_data(23,'skidoo'); +create trigger loc2_trig_row_before_insert before insert on loc2 + for each row execute procedure trig_row_before_insupdate(); + +copy rem2 from stdin; +1 foo +2 bar +\. +select * from rem2; + +drop trigger rem2_trig_row_before on rem2; +drop trigger rem2_trig_row_after on rem2; +drop trigger loc2_trig_row_before_insert on loc2; + +delete from rem2; + -- =================================================================== -- test IMPORT FOREIGN SCHEMA -- =================================================================== @@ -1764,3 +2235,122 @@ WHERE ftrelid = 'table30000'::regclass AND ftoptions @> array['fetch_size=60000']; ROLLBACK; + +-- =================================================================== +-- test partitionwise joins +-- =================================================================== +SET enable_partitionwise_join=on; + +CREATE TABLE fprt1 (a int, b int, c varchar) PARTITION BY RANGE(a); +CREATE TABLE fprt1_p1 (LIKE fprt1); +CREATE TABLE fprt1_p2 (LIKE fprt1); +ALTER TABLE fprt1_p1 SET (autovacuum_enabled = 'false'); +ALTER TABLE fprt1_p2 SET (autovacuum_enabled = 'false'); +INSERT INTO fprt1_p1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 249, 2) i; +INSERT INTO fprt1_p2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(250, 499, 2) i; +CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (0) TO (250) + SERVER loopback OPTIONS (table_name 'fprt1_p1', use_remote_estimate 'true'); +CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (250) TO (500) + SERVER loopback OPTIONS (TABLE_NAME 'fprt1_p2'); +ANALYZE fprt1; +ANALYZE fprt1_p1; +ANALYZE fprt1_p2; + +CREATE TABLE fprt2 (a int, b int, c varchar) PARTITION BY RANGE(b); +CREATE TABLE fprt2_p1 (LIKE fprt2); +CREATE TABLE fprt2_p2 (LIKE fprt2); +ALTER TABLE fprt2_p1 SET (autovacuum_enabled = 'false'); +ALTER TABLE fprt2_p2 SET (autovacuum_enabled = 'false'); +INSERT INTO fprt2_p1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 249, 3) i; +INSERT INTO fprt2_p2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(250, 499, 3) i; +CREATE FOREIGN TABLE ftprt2_p1 (b int, c varchar, a int) + SERVER loopback OPTIONS (table_name 'fprt2_p1', use_remote_estimate 'true'); +ALTER TABLE fprt2 ATTACH PARTITION ftprt2_p1 FOR VALUES FROM (0) TO (250); +CREATE FOREIGN TABLE ftprt2_p2 PARTITION OF fprt2 FOR VALUES FROM (250) TO (500) + SERVER loopback OPTIONS (table_name 'fprt2_p2', use_remote_estimate 'true'); +ANALYZE fprt2; +ANALYZE fprt2_p1; +ANALYZE fprt2_p2; + +-- inner join three tables +EXPLAIN (COSTS OFF) +SELECT t1.a,t2.b,t3.c FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) INNER JOIN fprt1 t3 ON (t2.b = t3.a) WHERE t1.a % 25 =0 ORDER BY 1,2,3; +SELECT t1.a,t2.b,t3.c FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) INNER JOIN fprt1 t3 ON (t2.b = t3.a) WHERE t1.a % 25 =0 ORDER BY 1,2,3; + +-- left outer join + nullable clasue +EXPLAIN (COSTS OFF) +SELECT t1.a,t2.b,t2.c FROM fprt1 t1 LEFT JOIN (SELECT * FROM fprt2 WHERE a < 10) t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a < 10 ORDER BY 1,2,3; +SELECT t1.a,t2.b,t2.c FROM fprt1 t1 LEFT JOIN (SELECT * FROM fprt2 WHERE a < 10) t2 ON (t1.a = t2.b and t1.b = t2.a) WHERE t1.a < 10 ORDER BY 1,2,3; + +-- with whole-row reference; partitionwise join does not apply +EXPLAIN (COSTS OFF) +SELECT t1.wr, t2.wr FROM (SELECT t1 wr, a FROM fprt1 t1 WHERE t1.a % 25 = 0) t1 FULL JOIN (SELECT t2 wr, b FROM fprt2 t2 WHERE t2.b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY 1,2; +SELECT t1.wr, t2.wr FROM (SELECT t1 wr, a FROM fprt1 t1 WHERE t1.a % 25 = 0) t1 FULL JOIN (SELECT t2 wr, b FROM fprt2 t2 WHERE t2.b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY 1,2; + +-- join with lateral reference +EXPLAIN (COSTS OFF) +SELECT t1.a,t1.b FROM fprt1 t1, LATERAL (SELECT t2.a, t2.b FROM fprt2 t2 WHERE t1.a = t2.b AND t1.b = t2.a) q WHERE t1.a%25 = 0 ORDER BY 1,2; +SELECT t1.a,t1.b FROM fprt1 t1, LATERAL (SELECT t2.a, t2.b FROM fprt2 t2 WHERE t1.a = t2.b AND t1.b = t2.a) q WHERE t1.a%25 = 0 ORDER BY 1,2; + +-- with PHVs, partitionwise join selected but no join pushdown +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.phv, t2.b, t2.phv FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE a % 25 = 0) t1 FULL JOIN (SELECT 't2_phv' phv, * FROM fprt2 WHERE b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY t1.a, t2.b; +SELECT t1.a, t1.phv, t2.b, t2.phv FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE a % 25 = 0) t1 FULL JOIN (SELECT 't2_phv' phv, * FROM fprt2 WHERE b % 25 = 0) t2 ON (t1.a = t2.b) ORDER BY t1.a, t2.b; + +-- test FOR UPDATE; partitionwise join does not apply +EXPLAIN (COSTS OFF) +SELECT t1.a, t2.b FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) WHERE t1.a % 25 = 0 ORDER BY 1,2 FOR UPDATE OF t1; +SELECT t1.a, t2.b FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.a = t2.b) WHERE t1.a % 25 = 0 ORDER BY 1,2 FOR UPDATE OF t1; + +RESET enable_partitionwise_join; + + +-- =================================================================== +-- test partitionwise aggregates +-- =================================================================== + +CREATE TABLE pagg_tab (a int, b int, c text) PARTITION BY RANGE(a); + +CREATE TABLE pagg_tab_p1 (LIKE pagg_tab); +CREATE TABLE pagg_tab_p2 (LIKE pagg_tab); +CREATE TABLE pagg_tab_p3 (LIKE pagg_tab); + +INSERT INTO pagg_tab_p1 SELECT i % 30, i % 50, to_char(i/30, 'FM0000') FROM generate_series(1, 3000) i WHERE (i % 30) < 10; +INSERT INTO pagg_tab_p2 SELECT i % 30, i % 50, to_char(i/30, 'FM0000') FROM generate_series(1, 3000) i WHERE (i % 30) < 20 and (i % 30) >= 10; +INSERT INTO pagg_tab_p3 SELECT i % 30, i % 50, to_char(i/30, 'FM0000') FROM generate_series(1, 3000) i WHERE (i % 30) < 30 and (i % 30) >= 20; + +-- Create foreign partitions +CREATE FOREIGN TABLE fpagg_tab_p1 PARTITION OF pagg_tab FOR VALUES FROM (0) TO (10) SERVER loopback OPTIONS (table_name 'pagg_tab_p1'); +CREATE FOREIGN TABLE fpagg_tab_p2 PARTITION OF pagg_tab FOR VALUES FROM (10) TO (20) SERVER loopback OPTIONS (table_name 'pagg_tab_p2');; +CREATE FOREIGN TABLE fpagg_tab_p3 PARTITION OF pagg_tab FOR VALUES FROM (20) TO (30) SERVER loopback OPTIONS (table_name 'pagg_tab_p3');; + +ANALYZE pagg_tab; +ANALYZE fpagg_tab_p1; +ANALYZE fpagg_tab_p2; +ANALYZE fpagg_tab_p3; + +-- When GROUP BY clause matches with PARTITION KEY. +-- Plan with partitionwise aggregates is disabled +SET enable_partitionwise_aggregate TO false; +EXPLAIN (COSTS OFF) +SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 ORDER BY 1; + +-- Plan with partitionwise aggregates is enabled +SET enable_partitionwise_aggregate TO true; +EXPLAIN (COSTS OFF) +SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 ORDER BY 1; +SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 ORDER BY 1; + +-- Check with whole-row reference +-- Should have all the columns in the target list for the given relation +EXPLAIN (VERBOSE, COSTS OFF) +SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1; +SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1; + +-- When GROUP BY clause does not match with PARTITION KEY. +EXPLAIN (COSTS OFF) +SELECT b, avg(a), max(a), count(*) FROM pagg_tab GROUP BY b HAVING sum(a) < 700 ORDER BY 1; + + +-- Clean-up +RESET enable_partitionwise_aggregate; diff --git a/contrib/seg/Makefile b/contrib/seg/Makefile index c8f0f8b9a2..62b658e724 100644 --- a/contrib/seg/Makefile +++ b/contrib/seg/Makefile @@ -4,9 +4,12 @@ MODULE_big = seg OBJS = seg.o segparse.o $(WIN32RES) EXTENSION = seg -DATA = seg--1.1.sql seg--1.0--1.1.sql seg--unpackaged--1.0.sql +DATA = seg--1.1.sql seg--1.1--1.2.sql seg--1.2--1.3.sql \ + seg--1.0--1.1.sql seg--unpackaged--1.0.sql PGFILEDESC = "seg - line segment data type" +HEADERS = segdata.h + REGRESS = seg EXTRA_CLEAN = y.tab.c y.tab.h diff --git a/contrib/seg/expected/seg.out b/contrib/seg/expected/seg.out index 18010c4d5c..a289dbe5f9 100644 --- a/contrib/seg/expected/seg.out +++ b/contrib/seg/expected/seg.out @@ -930,12 +930,40 @@ SELECT '1'::seg <@ '-1 .. 1'::seg AS bool; CREATE TABLE test_seg (s seg); \copy test_seg from 'data/test_seg.data' CREATE INDEX test_seg_ix ON test_seg USING gist (s); +EXPLAIN (COSTS OFF) +SELECT count(*) FROM test_seg WHERE s @> '11..11.3'; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on test_seg + Recheck Cond: (s @> '1.1e1 .. 11.3'::seg) + -> Bitmap Index Scan on test_seg_ix + Index Cond: (s @> '1.1e1 .. 11.3'::seg) +(5 rows) + +SELECT count(*) FROM test_seg WHERE s @> '11..11.3'; + count +------- + 143 +(1 row) + +SET enable_bitmapscan = false; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM test_seg WHERE s @> '11..11.3'; + QUERY PLAN +----------------------------------------------------- + Aggregate + -> Index Only Scan using test_seg_ix on test_seg + Index Cond: (s @> '1.1e1 .. 11.3'::seg) +(3 rows) + SELECT count(*) FROM test_seg WHERE s @> '11..11.3'; count ------- 143 (1 row) +RESET enable_bitmapscan; -- Test sorting SELECT * FROM test_seg WHERE s @> '11..11.3' GROUP BY s; s diff --git a/contrib/seg/expected/seg_1.out b/contrib/seg/expected/seg_1.out deleted file mode 100644 index 566ce394ed..0000000000 --- a/contrib/seg/expected/seg_1.out +++ /dev/null @@ -1,1238 +0,0 @@ --- --- Test seg datatype --- -CREATE EXTENSION seg; --- Check whether any of our opclasses fail amvalidate -SELECT amname, opcname -FROM pg_opclass opc LEFT JOIN pg_am am ON am.oid = opcmethod -WHERE opc.oid >= 16384 AND NOT amvalidate(opc.oid); - amname | opcname ---------+--------- -(0 rows) - --- --- testing the input and output functions --- --- Any number -SELECT '1'::seg AS seg; - seg ------ - 1 -(1 row) - -SELECT '-1'::seg AS seg; - seg ------ - -1 -(1 row) - -SELECT '1.0'::seg AS seg; - seg ------ - 1.0 -(1 row) - -SELECT '-1.0'::seg AS seg; - seg ------- - -1.0 -(1 row) - -SELECT '1e7'::seg AS seg; - seg --------- - 1e+007 -(1 row) - -SELECT '-1e7'::seg AS seg; - seg ---------- - -1e+007 -(1 row) - -SELECT '1.0e7'::seg AS seg; - seg ----------- - 1.0e+007 -(1 row) - -SELECT '-1.0e7'::seg AS seg; - seg ------------ - -1.0e+007 -(1 row) - -SELECT '1e+7'::seg AS seg; - seg --------- - 1e+007 -(1 row) - -SELECT '-1e+7'::seg AS seg; - seg ---------- - -1e+007 -(1 row) - -SELECT '1.0e+7'::seg AS seg; - seg ----------- - 1.0e+007 -(1 row) - -SELECT '-1.0e+7'::seg AS seg; - seg ------------ - -1.0e+007 -(1 row) - -SELECT '1e-7'::seg AS seg; - seg --------- - 1e-007 -(1 row) - -SELECT '-1e-7'::seg AS seg; - seg ---------- - -1e-007 -(1 row) - -SELECT '1.0e-7'::seg AS seg; - seg ----------- - 1.0e-007 -(1 row) - -SELECT '-1.0e-7'::seg AS seg; - seg ------------ - -1.0e-007 -(1 row) - -SELECT '2e-6'::seg AS seg; - seg --------- - 2e-006 -(1 row) - -SELECT '2e-5'::seg AS seg; - seg --------- - 2e-005 -(1 row) - -SELECT '2e-4'::seg AS seg; - seg --------- - 0.0002 -(1 row) - -SELECT '2e-3'::seg AS seg; - seg -------- - 0.002 -(1 row) - -SELECT '2e-2'::seg AS seg; - seg ------- - 0.02 -(1 row) - -SELECT '2e-1'::seg AS seg; - seg ------ - 0.2 -(1 row) - -SELECT '2e-0'::seg AS seg; - seg ------ - 2 -(1 row) - -SELECT '2e+0'::seg AS seg; - seg ------ - 2 -(1 row) - -SELECT '2e+1'::seg AS seg; - seg ------ - 2e1 -(1 row) - -SELECT '2e+2'::seg AS seg; - seg ------ - 2e2 -(1 row) - -SELECT '2e+3'::seg AS seg; - seg ------ - 2e3 -(1 row) - -SELECT '2e+4'::seg AS seg; - seg ------ - 2e4 -(1 row) - -SELECT '2e+5'::seg AS seg; - seg --------- - 2e+005 -(1 row) - -SELECT '2e+6'::seg AS seg; - seg --------- - 2e+006 -(1 row) - --- Significant digits preserved -SELECT '1'::seg AS seg; - seg ------ - 1 -(1 row) - -SELECT '1.0'::seg AS seg; - seg ------ - 1.0 -(1 row) - -SELECT '1.00'::seg AS seg; - seg ------- - 1.00 -(1 row) - -SELECT '1.000'::seg AS seg; - seg -------- - 1.000 -(1 row) - -SELECT '1.0000'::seg AS seg; - seg --------- - 1.0000 -(1 row) - -SELECT '1.00000'::seg AS seg; - seg ---------- - 1.00000 -(1 row) - -SELECT '1.000000'::seg AS seg; - seg ---------- - 1.00000 -(1 row) - -SELECT '0.000000120'::seg AS seg; - seg ------------ - 1.20e-007 -(1 row) - -SELECT '3.400e5'::seg AS seg; - seg ------------- - 3.400e+005 -(1 row) - --- Digits truncated -SELECT '12.34567890123456'::seg AS seg; - seg ---------- - 12.3457 -(1 row) - --- Numbers with certainty indicators -SELECT '~6.5'::seg AS seg; - seg ------- - ~6.5 -(1 row) - -SELECT '<6.5'::seg AS seg; - seg ------- - <6.5 -(1 row) - -SELECT '>6.5'::seg AS seg; - seg ------- - >6.5 -(1 row) - -SELECT '~ 6.5'::seg AS seg; - seg ------- - ~6.5 -(1 row) - -SELECT '< 6.5'::seg AS seg; - seg ------- - <6.5 -(1 row) - -SELECT '> 6.5'::seg AS seg; - seg ------- - >6.5 -(1 row) - --- Open intervals -SELECT '0..'::seg AS seg; - seg ------- - 0 .. -(1 row) - -SELECT '0...'::seg AS seg; - seg ------- - 0 .. -(1 row) - -SELECT '0 ..'::seg AS seg; - seg ------- - 0 .. -(1 row) - -SELECT '0 ...'::seg AS seg; - seg ------- - 0 .. -(1 row) - -SELECT '..0'::seg AS seg; - seg ------- - .. 0 -(1 row) - -SELECT '...0'::seg AS seg; - seg ------- - .. 0 -(1 row) - -SELECT '.. 0'::seg AS seg; - seg ------- - .. 0 -(1 row) - -SELECT '... 0'::seg AS seg; - seg ------- - .. 0 -(1 row) - --- Finite intervals -SELECT '0 .. 1'::seg AS seg; - seg --------- - 0 .. 1 -(1 row) - -SELECT '-1 .. 0'::seg AS seg; - seg ---------- - -1 .. 0 -(1 row) - -SELECT '-1 .. 1'::seg AS seg; - seg ---------- - -1 .. 1 -(1 row) - --- (+/-) intervals -SELECT '0(+-)1'::seg AS seg; - seg ---------- - -1 .. 1 -(1 row) - -SELECT '0(+-)1.0'::seg AS seg; - seg -------------- - -1.0 .. 1.0 -(1 row) - -SELECT '1.0(+-)0.005'::seg AS seg; - seg ----------------- - 0.995 .. 1.005 -(1 row) - -SELECT '101(+-)1'::seg AS seg; - seg ------------------- - 1.00e2 .. 1.02e2 -(1 row) - --- incorrect number of significant digits in 99.0: -SELECT '100(+-)1'::seg AS seg; - seg ----------------- - 99.0 .. 1.01e2 -(1 row) - --- invalid input -SELECT ''::seg AS seg; -ERROR: bad seg representation -LINE 1: SELECT ''::seg AS seg; - ^ -DETAIL: syntax error at end of input -SELECT 'ABC'::seg AS seg; -ERROR: bad seg representation -LINE 1: SELECT 'ABC'::seg AS seg; - ^ -DETAIL: syntax error at or near "A" -SELECT '1ABC'::seg AS seg; -ERROR: bad seg representation -LINE 1: SELECT '1ABC'::seg AS seg; - ^ -DETAIL: syntax error at or near "A" -SELECT '1.'::seg AS seg; -ERROR: bad seg representation -LINE 1: SELECT '1.'::seg AS seg; - ^ -DETAIL: syntax error at or near "." -SELECT '1.....'::seg AS seg; -ERROR: bad seg representation -LINE 1: SELECT '1.....'::seg AS seg; - ^ -DETAIL: syntax error at or near ".." -SELECT '.1'::seg AS seg; -ERROR: bad seg representation -LINE 1: SELECT '.1'::seg AS seg; - ^ -DETAIL: syntax error at or near "." -SELECT '1..2.'::seg AS seg; -ERROR: bad seg representation -LINE 1: SELECT '1..2.'::seg AS seg; - ^ -DETAIL: syntax error at or near "." -SELECT '1 e7'::seg AS seg; -ERROR: bad seg representation -LINE 1: SELECT '1 e7'::seg AS seg; - ^ -DETAIL: syntax error at or near "e" -SELECT '1e700'::seg AS seg; -ERROR: "1e700" is out of range for type real -LINE 1: SELECT '1e700'::seg AS seg; - ^ --- --- testing the operators --- --- equality/inequality: --- -SELECT '24 .. 33.20'::seg = '24 .. 33.20'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '24 .. 33.20'::seg = '24 .. 33.21'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '24 .. 33.20'::seg != '24 .. 33.20'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '24 .. 33.20'::seg != '24 .. 33.21'::seg AS bool; - bool ------- - t -(1 row) - --- overlap --- -SELECT '1'::seg && '1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '1'::seg && '2'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0 ..'::seg && '0 ..'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 1'::seg && '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '..0'::seg && '0..'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '-1 .. 0.1'::seg && '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '-1 .. 0'::seg && '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '-1 .. -0.0001'::seg && '0 .. 1'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0 ..'::seg && '1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 1'::seg && '1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 1'::seg && '2'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0 .. 2'::seg && '1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '1'::seg && '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '2'::seg && '0 .. 1'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '1'::seg && '0 .. 2'::seg AS bool; - bool ------- - t -(1 row) - --- overlap on the left --- -SELECT '1'::seg &< '0'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '1'::seg &< '1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '1'::seg &< '2'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 1'::seg &< '0'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0 .. 1'::seg &< '1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 1'::seg &< '2'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 1'::seg &< '0 .. 0.5'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0 .. 1'::seg &< '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 1'::seg &< '0 .. 2'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 1'::seg &< '1 .. 2'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 1'::seg &< '2 .. 3'::seg AS bool; - bool ------- - t -(1 row) - --- overlap on the right --- -SELECT '0'::seg &> '1'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '1'::seg &> '1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '2'::seg &> '1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0'::seg &> '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '1'::seg &> '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '2'::seg &> '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 0.5'::seg &> '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 1'::seg &> '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 2'::seg &> '0 .. 2'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '1 .. 2'::seg &> '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '2 .. 3'::seg &> '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - --- left --- -SELECT '1'::seg << '0'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '1'::seg << '1'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '1'::seg << '2'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 1'::seg << '0'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0 .. 1'::seg << '1'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0 .. 1'::seg << '2'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 1'::seg << '0 .. 0.5'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0 .. 1'::seg << '0 .. 1'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0 .. 1'::seg << '0 .. 2'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0 .. 1'::seg << '1 .. 2'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0 .. 1'::seg << '2 .. 3'::seg AS bool; - bool ------- - t -(1 row) - --- right --- -SELECT '0'::seg >> '1'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '1'::seg >> '1'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '2'::seg >> '1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0'::seg >> '0 .. 1'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '1'::seg >> '0 .. 1'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '2'::seg >> '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. 0.5'::seg >> '0 .. 1'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0 .. 1'::seg >> '0 .. 1'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0 .. 2'::seg >> '0 .. 2'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '1 .. 2'::seg >> '0 .. 1'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '2 .. 3'::seg >> '0 .. 1'::seg AS bool; - bool ------- - t -(1 row) - --- "contained in" (the left value belongs within the interval specified in the right value): --- -SELECT '0'::seg <@ '0'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0'::seg <@ '0 ..'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0'::seg <@ '.. 0'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0'::seg <@ '-1 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0'::seg <@ '-1 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '-1'::seg <@ '-1 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '1'::seg <@ '-1 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '-1 .. 1'::seg <@ '-1 .. 1'::seg AS bool; - bool ------- - t -(1 row) - --- "contains" (the left value contains the interval specified in the right value): --- -SELECT '0'::seg @> '0'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '0 .. '::seg <@ '0'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '.. 0'::seg <@ '0'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '-1 .. 1'::seg <@ '0'::seg AS bool; - bool ------- - f -(1 row) - -SELECT '0'::seg <@ '-1 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '-1'::seg <@ '-1 .. 1'::seg AS bool; - bool ------- - t -(1 row) - -SELECT '1'::seg <@ '-1 .. 1'::seg AS bool; - bool ------- - t -(1 row) - --- Load some example data and build the index --- -CREATE TABLE test_seg (s seg); -\copy test_seg from 'data/test_seg.data' -CREATE INDEX test_seg_ix ON test_seg USING gist (s); -SELECT count(*) FROM test_seg WHERE s @> '11..11.3'; - count -------- - 143 -(1 row) - --- Test sorting -SELECT * FROM test_seg WHERE s @> '11..11.3' GROUP BY s; - s ------------------ - .. 4.0e1 - .. >8.2e1 - .. 9.0e1 - <1.0 .. >13.0 - 1.3 .. 12.0 - 2.0 .. 11.5 - 2.1 .. 11.8 - <2.3 .. - >2.3 .. - 2.4 .. 11.3 - 2.5 .. 11.5 - 2.5 .. 11.8 - 2.6 .. - 2.7 .. 12.0 - <3.0 .. - 3 .. 5.8e1 - 3.1 .. 11.5 - 3.5 .. 11.5 - 3.5 .. 12.2 - <4.0 .. >1.2e1 - <4.0 .. - 4 .. 1.2e1 - 4.0 .. 11.7 - 4.0 .. 12.5 - 4.0 .. 13.0 - 4.0 .. 6.0e1 - 4.0 .. - 4.2 .. 11.5 - 4.2 .. 11.7 - <4.5 .. >1.2e1 - 4.5 .. 11.5 - 4.5 .. <1.2e1 - 4.5 .. >1.2e1 - 4.5 .. 12.5 - 4.5 .. 1.15e2 - 4.7 .. 11.8 - 4.8 .. 11.5 - 4.8 .. 11.6 - 4.8 .. 12.5 - 4.8 .. - 4.9 .. >1.2e1 - 4.9 .. - 5 .. 11.5 - 5 .. 1.2e1 - 5 .. 3.0e1 - 5.0 .. 11.4 - 5.0 .. 11.5 - 5.0 .. 11.6 - 5.0 .. 11.7 - 5.0 .. 12.0 - 5.0 .. >12.0 - 5.0 .. >1.2e1 - 5.2 .. 11.5 - 5.2 .. >1.2e1 - 5.25 .. >1.2e1 - 5.3 .. 11.5 - 5.3 .. 1.3e1 - 5.3 .. >9.0e1 - 5.3 .. - 5.4 .. - 5.5 .. 11.5 - 5.5 .. 11.7 - 5.5 .. 1.2e1 - 5.5 .. >1.2e1 - 5.5 .. 12.5 - 5.5 .. 13.5 - 5.5 .. - >5.5 .. - 5.7 .. - 5.9 .. - 6 .. 11.5 - 6 .. >1.2e1 - 6.0 .. 11.5 - 6.0 .. 1.3e1 - >6.0 .. <11.5 - 6.1 .. >1.2e1 - 6.1 .. - 6.2 .. >11.5 - 6.3 .. - 6.5 .. 11.5 - 6.5 .. 12.0 - 6.5 .. >12.0 - 6.5 .. - 6.6 .. - 6.7 .. 11.5 - 6.7 .. - 6.75 .. - 6.8 .. - 6.9 .. 12.2 - 6.9 .. >9.0e1 - 6.9 .. - <7.0 .. >11.5 - 7.0 .. 11.5 - 7.0 .. >11.5 - 7.0 .. - >7.15 .. - 7.2 .. 13.5 - 7.3 .. >9.0e1 - 7.3 .. - >7.3 .. - 7.4 .. 12.1 - 7.4 .. - 7.5 .. 11.5 - 7.5 .. 12.0 - 7.5 .. - 7.7 .. 11.5 - 7.7 .. - 7.75 .. - 8.0 .. 11.7 - 8.0 .. 12.0 - 8.0 .. >13.0 - 8.2 .. - 8.3 .. - 8.5 .. >11.5 - 8.5 .. 12.5 - 8.5 .. - 8.6 .. >9.9e1 - 8.7 .. 11.3 - 8.7 .. 11.7 - 8.9 .. 11.5 - 9 .. >1.2e1 - 9.0 .. 11.3 - 9.0 .. 11.5 - 9.0 .. 1.2e1 - 9.0 .. - 9.2 .. 1.2e1 - 9.4 .. 12.2 - <9.5 .. 1.2e1 - <9.5 .. >12.2 - 9.5 .. - 9.6 .. 11.5 - 9.7 .. 11.5 - 9.7 .. >1.2e1 - 9.8 .. >12.5 - <1.0e1 .. >11.6 - 10.0 .. 11.5 - 10.0 .. 12.5 - 10.0 .. >12.5 - 10.2 .. 11.8 - <10.5 .. 11.5 - 10.5 .. 11.5 - 10.5 .. <13.5 - 10.7 .. 12.3 -(143 rows) - --- Test functions -SELECT seg_lower(s), seg_center(s), seg_upper(s) -FROM test_seg WHERE s @> '11.2..11.3' OR s IS NULL ORDER BY s; - seg_lower | seg_center | seg_upper ------------+------------+----------- - -Infinity | -Infinity | 40 - -Infinity | -Infinity | 82 - -Infinity | -Infinity | 90 - 1 | 7 | 13 - 1.3 | 6.65 | 12 - 2 | 6.75 | 11.5 - 2.1 | 6.95 | 11.8 - 2.3 | Infinity | Infinity - 2.3 | Infinity | Infinity - 2.4 | 6.85 | 11.3 - 2.5 | 7 | 11.5 - 2.5 | 7.15 | 11.8 - 2.6 | Infinity | Infinity - 2.7 | 7.35 | 12 - 3 | Infinity | Infinity - 3 | 30.5 | 58 - 3.1 | 7.3 | 11.5 - 3.5 | 7.5 | 11.5 - 3.5 | 7.85 | 12.2 - 4 | 8 | 12 - 4 | Infinity | Infinity - 4 | 8 | 12 - 4 | 7.85 | 11.7 - 4 | 8.25 | 12.5 - 4 | 8.5 | 13 - 4 | 32 | 60 - 4 | Infinity | Infinity - 4.2 | 7.85 | 11.5 - 4.2 | 7.95 | 11.7 - 4.5 | 8.25 | 12 - 4.5 | 8 | 11.5 - 4.5 | 8.25 | 12 - 4.5 | 8.25 | 12 - 4.5 | 8.5 | 12.5 - 4.5 | 59.75 | 115 - 4.7 | 8.25 | 11.8 - 4.8 | 8.15 | 11.5 - 4.8 | 8.2 | 11.6 - 4.8 | 8.65 | 12.5 - 4.8 | Infinity | Infinity - 4.9 | 8.45 | 12 - 4.9 | Infinity | Infinity - 5 | 8.25 | 11.5 - 5 | 8.5 | 12 - 5 | 17.5 | 30 - 5 | 8.2 | 11.4 - 5 | 8.25 | 11.5 - 5 | 8.3 | 11.6 - 5 | 8.35 | 11.7 - 5 | 8.5 | 12 - 5 | 8.5 | 12 - 5 | 8.5 | 12 - 5.2 | 8.35 | 11.5 - 5.2 | 8.6 | 12 - 5.25 | 8.625 | 12 - 5.3 | 8.4 | 11.5 - 5.3 | 9.15 | 13 - 5.3 | 47.65 | 90 - 5.3 | Infinity | Infinity - 5.4 | Infinity | Infinity - 5.5 | 8.5 | 11.5 - 5.5 | 8.6 | 11.7 - 5.5 | 8.75 | 12 - 5.5 | 8.75 | 12 - 5.5 | 9 | 12.5 - 5.5 | 9.5 | 13.5 - 5.5 | Infinity | Infinity - 5.5 | Infinity | Infinity - 5.7 | Infinity | Infinity - 5.9 | Infinity | Infinity - 6 | 8.75 | 11.5 - 6 | 9 | 12 - 6 | 8.75 | 11.5 - 6 | 9.5 | 13 - 6 | 8.75 | 11.5 - 6.1 | 9.05 | 12 - 6.1 | Infinity | Infinity - 6.2 | 8.85 | 11.5 - 6.3 | Infinity | Infinity - 6.5 | 9 | 11.5 - 6.5 | 9.25 | 12 - 6.5 | 9.25 | 12 - 6.5 | Infinity | Infinity - 6.6 | Infinity | Infinity - 6.7 | 9.1 | 11.5 - 6.7 | Infinity | Infinity - 6.75 | Infinity | Infinity - 6.8 | Infinity | Infinity - 6.9 | 9.55 | 12.2 - 6.9 | 48.45 | 90 - 6.9 | Infinity | Infinity - 7 | 9.25 | 11.5 - 7 | 9.25 | 11.5 - 7 | 9.25 | 11.5 - 7 | Infinity | Infinity - 7.15 | Infinity | Infinity - 7.2 | 10.35 | 13.5 - 7.3 | 48.65 | 90 - 7.3 | Infinity | Infinity - 7.3 | Infinity | Infinity - 7.4 | 9.75 | 12.1 - 7.4 | Infinity | Infinity - 7.5 | 9.5 | 11.5 - 7.5 | 9.75 | 12 - 7.5 | Infinity | Infinity - 7.7 | 9.6 | 11.5 - 7.7 | Infinity | Infinity - 7.75 | Infinity | Infinity - 8 | 9.85 | 11.7 - 8 | 10 | 12 - 8 | 10.5 | 13 - 8.2 | Infinity | Infinity - 8.3 | Infinity | Infinity - 8.5 | 10 | 11.5 - 8.5 | 10.5 | 12.5 - 8.5 | Infinity | Infinity - 8.6 | 53.8 | 99 - 8.7 | 10 | 11.3 - 8.7 | 10.2 | 11.7 - 8.9 | 10.2 | 11.5 - 9 | 10.5 | 12 - 9 | 10.15 | 11.3 - 9 | 10.25 | 11.5 - 9 | 10.5 | 12 - 9 | Infinity | Infinity - 9.2 | 10.6 | 12 - 9.4 | 10.8 | 12.2 - 9.5 | 10.75 | 12 - 9.5 | 10.85 | 12.2 - 9.5 | Infinity | Infinity - 9.6 | 10.55 | 11.5 - 9.7 | 10.6 | 11.5 - 9.7 | 10.85 | 12 - 9.8 | 11.15 | 12.5 - 10 | 10.8 | 11.6 - 10 | 10.75 | 11.5 - 10 | 11.25 | 12.5 - 10 | 11.25 | 12.5 - 10.2 | 11 | 11.8 - 10.5 | 11 | 11.5 - 10.5 | 11 | 11.5 - 10.5 | 12 | 13.5 - 10.7 | 11.5 | 12.3 - | | -(144 rows) - diff --git a/contrib/seg/seg--1.1--1.2.sql b/contrib/seg/seg--1.1--1.2.sql new file mode 100644 index 0000000000..a6e4456f07 --- /dev/null +++ b/contrib/seg/seg--1.1--1.2.sql @@ -0,0 +1,14 @@ +/* contrib/seg/seg--1.1--1.2.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION seg UPDATE TO '1.2'" to load this file. \quit + +ALTER OPERATOR <= (seg, seg) SET ( + RESTRICT = scalarlesel, + JOIN = scalarlejoinsel +); + +ALTER OPERATOR >= (seg, seg) SET ( + RESTRICT = scalargesel, + JOIN = scalargejoinsel +); diff --git a/contrib/seg/seg--1.2--1.3.sql b/contrib/seg/seg--1.2--1.3.sql new file mode 100644 index 0000000000..cd71a300f6 --- /dev/null +++ b/contrib/seg/seg--1.2--1.3.sql @@ -0,0 +1,45 @@ +/* contrib/seg/seg--1.2--1.3.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION seg UPDATE TO '1.3'" to load this file. \quit + +-- +-- Get rid of unnecessary compress and decompress support functions. +-- +-- To be allowed to drop the opclass entry for a support function, +-- we must change the entry's dependency type from 'internal' to 'auto', +-- as though it were a loose member of the opfamily rather than being +-- bound into a particular opclass. There's no SQL command for that, +-- so fake it with a manual update on pg_depend. +-- +UPDATE pg_catalog.pg_depend +SET deptype = 'a' +WHERE classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass + AND objid = + (SELECT objid + FROM pg_catalog.pg_depend + WHERE classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass + AND refclassid = 'pg_catalog.pg_proc'::pg_catalog.regclass + AND (refobjid = 'gseg_compress(pg_catalog.internal)'::pg_catalog.regprocedure)) + AND refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass + AND deptype = 'i'; + +ALTER OPERATOR FAMILY gist_seg_ops USING gist drop function 3 (seg); +ALTER EXTENSION seg DROP function gseg_compress(pg_catalog.internal); +DROP function gseg_compress(pg_catalog.internal); + +UPDATE pg_catalog.pg_depend +SET deptype = 'a' +WHERE classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass + AND objid = + (SELECT objid + FROM pg_catalog.pg_depend + WHERE classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass + AND refclassid = 'pg_catalog.pg_proc'::pg_catalog.regclass + AND (refobjid = 'gseg_decompress(pg_catalog.internal)'::pg_catalog.regprocedure)) + AND refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass + AND deptype = 'i'; + +ALTER OPERATOR FAMILY gist_seg_ops USING gist drop function 4 (seg); +ALTER EXTENSION seg DROP function gseg_decompress(pg_catalog.internal); +DROP function gseg_decompress(pg_catalog.internal); diff --git a/contrib/seg/seg.c b/contrib/seg/seg.c index 4fc18130e1..4e34fba7c7 100644 --- a/contrib/seg/seg.c +++ b/contrib/seg/seg.c @@ -188,7 +188,7 @@ seg_upper(PG_FUNCTION_ARGS) /* ** The GiST Consistent method for segments ** Should return false if for all data items x below entry, -** the predicate x op query == FALSE, where op is the oper +** the predicate x op query == false, where op is the oper ** corresponding to strategy in the pg_amop table. */ Datum @@ -413,9 +413,9 @@ gseg_same(PG_FUNCTION_ARGS) bool *result = (bool *) PG_GETARG_POINTER(2); if (DirectFunctionCall2(seg_same, PG_GETARG_DATUM(0), PG_GETARG_DATUM(1))) - *result = TRUE; + *result = true; else - *result = FALSE; + *result = false; #ifdef GIST_DEBUG fprintf(stderr, "same: %s\n", (*result ? "TRUE" : "FALSE")); @@ -465,7 +465,7 @@ gseg_leaf_consistent(Datum key, Datum query, StrategyNumber strategy) retval = DirectFunctionCall2(seg_contained, key, query); break; default: - retval = FALSE; + retval = false; } PG_RETURN_DATUM(retval); @@ -514,7 +514,7 @@ gseg_internal_consistent(Datum key, Datum query, StrategyNumber strategy) DatumGetBool(DirectFunctionCall2(seg_overlap, key, query)); break; default: - retval = FALSE; + retval = false; } PG_RETURN_BOOL(retval); @@ -528,7 +528,7 @@ gseg_binary_union(Datum r1, Datum r2, int *sizep) retval = DirectFunctionCall2(seg_union, r1, r2); *sizep = sizeof(SEG); - return (retval); + return retval; } @@ -1040,7 +1040,7 @@ restore(char *result, float val, int n) /* ... this is not done yet. */ } - return (strlen(result)); + return strlen(result); } @@ -1052,9 +1052,9 @@ restore(char *result, float val, int n) * a floating point number */ int -significant_digits(char *s) +significant_digits(const char *s) { - char *p = s; + const char *p = s; int n, c, zeroes; @@ -1080,7 +1080,7 @@ significant_digits(char *s) } if (!n) - return (zeroes); + return zeroes; - return (n); + return n; } diff --git a/contrib/seg/seg.control b/contrib/seg/seg.control index f210cf5e04..d697cd6c2a 100644 --- a/contrib/seg/seg.control +++ b/contrib/seg/seg.control @@ -1,5 +1,5 @@ # seg extension comment = 'data type for representing line segments or floating-point intervals' -default_version = '1.1' +default_version = '1.3' module_pathname = '$libdir/seg' relocatable = true diff --git a/contrib/seg/segdata.h b/contrib/seg/segdata.h index cac68ee2b2..9488bf3a81 100644 --- a/contrib/seg/segdata.h +++ b/contrib/seg/segdata.h @@ -12,7 +12,7 @@ typedef struct SEG } SEG; /* in seg.c */ -extern int significant_digits(char *str); +extern int significant_digits(const char *str); /* in segscan.l */ extern int seg_yylex(void); diff --git a/contrib/seg/segparse.y b/contrib/seg/segparse.y index 045ff91f3e..040cab3904 100644 --- a/contrib/seg/segparse.y +++ b/contrib/seg/segparse.y @@ -21,7 +21,7 @@ #define YYMALLOC palloc #define YYFREE pfree -static float seg_atof(char *value); +static float seg_atof(const char *value); static char strbuf[25] = { '0', '0', '0', '0', '0', @@ -151,7 +151,7 @@ deviation: SEGFLOAT static float -seg_atof(char *value) +seg_atof(const char *value) { Datum datum; diff --git a/contrib/seg/segscan.l b/contrib/seg/segscan.l index 6db24fdd1f..5f6595e9eb 100644 --- a/contrib/seg/segscan.l +++ b/contrib/seg/segscan.l @@ -3,6 +3,8 @@ * A scanner for EMP-style numeric ranges */ +/* LCOV_EXCL_START */ + /* No reason to constrain amount of data slurped */ #define YY_READ_BUF_SIZE 16777216 @@ -51,6 +53,8 @@ float ({integer}|{real})([eE]{integer})? %% +/* LCOV_EXCL_STOP */ + void yyerror(SEG *result, const char *message) { diff --git a/contrib/seg/sql/seg.sql b/contrib/seg/sql/seg.sql index aa91931474..1d7bad7c37 100644 --- a/contrib/seg/sql/seg.sql +++ b/contrib/seg/sql/seg.sql @@ -216,7 +216,16 @@ CREATE TABLE test_seg (s seg); \copy test_seg from 'data/test_seg.data' CREATE INDEX test_seg_ix ON test_seg USING gist (s); + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM test_seg WHERE s @> '11..11.3'; +SELECT count(*) FROM test_seg WHERE s @> '11..11.3'; + +SET enable_bitmapscan = false; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM test_seg WHERE s @> '11..11.3'; SELECT count(*) FROM test_seg WHERE s @> '11..11.3'; +RESET enable_bitmapscan; -- Test sorting SELECT * FROM test_seg WHERE s @> '11..11.3' GROUP BY s; diff --git a/contrib/sepgsql/database.c b/contrib/sepgsql/database.c index 8fc5a87e00..c641ec3565 100644 --- a/contrib/sepgsql/database.c +++ b/contrib/sepgsql/database.c @@ -4,7 +4,7 @@ * * Routines corresponding to database objects * - * Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Copyright (c) 2010-2018, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/dml.c b/contrib/sepgsql/dml.c index b643720e36..9bdbd7b60f 100644 --- a/contrib/sepgsql/dml.c +++ b/contrib/sepgsql/dml.c @@ -4,7 +4,7 @@ * * Routines to handle DML permission checks * - * Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Copyright (c) 2010-2018, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ @@ -18,7 +18,7 @@ #include "catalog/dependency.h" #include "catalog/pg_attribute.h" #include "catalog/pg_class.h" -#include "catalog/pg_inherits_fn.h" +#include "catalog/pg_inherits.h" #include "commands/seclabel.h" #include "commands/tablecmds.h" #include "executor/executor.h" @@ -118,10 +118,7 @@ fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns) continue; } - attname = get_attname(parentId, attno); - if (!attname) - elog(ERROR, "cache lookup failed for attribute %d of relation %u", - attno, parentId); + attname = get_attname(parentId, attno, false); attno = get_attnum(childId, attname); if (attno == InvalidAttrNumber) elog(ERROR, "cache lookup failed for attribute %s of relation %u", diff --git a/contrib/sepgsql/expected/alter.out b/contrib/sepgsql/expected/alter.out index 0948139f93..e1d31e5b2f 100644 --- a/contrib/sepgsql/expected/alter.out +++ b/contrib/sepgsql/expected/alter.out @@ -179,9 +179,9 @@ LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_reg LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column a" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column a of table regtest_table" LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column x" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column x of table regtest_table_3" LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" LINE 1: SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" f... ^ @@ -196,9 +196,9 @@ LINE 1: ..."regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(p... QUERY: SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL) LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column a" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column a of table regtest_table" LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column x" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column x of table regtest_table_3" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4eq(integer,integer)" ALTER TABLE regtest_table ADD CONSTRAINT test_ck CHECK (b like '%abc%') NOT VALID; -- not supported ALTER TABLE regtest_table VALIDATE CONSTRAINT test_ck; -- not supported diff --git a/contrib/sepgsql/expected/misc.out b/contrib/sepgsql/expected/misc.out index 98f8005a60..f37d98154f 100644 --- a/contrib/sepgsql/expected/misc.out +++ b/contrib/sepgsql/expected/misc.out @@ -17,8 +17,8 @@ SET client_min_messages = log; -- regular function and operators SELECT * FROM t1 WHERE x > 50 AND y like '%64%'; LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column y" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column x of table t1" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column y of table t1" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4gt(integer,integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)" x | y @@ -32,18 +32,15 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re (6 rows) SELECT * FROM t1p WHERE o > 50 AND p like '%64%'; -LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4le(integer,integer)" -LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4le(integer,integer)" -LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4le(integer,integer)" LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p column o" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p column p" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p" LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_ones" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column o" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column p" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_ones" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_ones" LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_tens" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column o" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column p" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_tens" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_tens" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4gt(integer,integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)" o | p @@ -57,8 +54,8 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re SELECT * FROM t1p_ones WHERE o > 50 AND p like '%64%'; LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_ones" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column o" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column p" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_ones" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_ones" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4gt(integer,integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)" o | p @@ -67,8 +64,8 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re SELECT * FROM t1p_tens WHERE o > 50 AND p like '%64%'; LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_tens" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column o" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column p" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_tens" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_tens" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4gt(integer,integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)" o | p @@ -83,7 +80,7 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re -- aggregate function SELECT MIN(x), AVG(x) FROM t1; LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column x of table t1" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.avg(integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4_avg_accum(bigint[],integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int8_avg(bigint[])" @@ -96,11 +93,11 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re SELECT MIN(o), AVG(o) FROM t1p; LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p column o" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p" LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_ones" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column o" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_ones" LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_tens" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column o" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_tens" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.avg(integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4_avg_accum(bigint[],integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int8_avg(bigint[])" @@ -113,7 +110,7 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re SELECT MIN(o), AVG(o) FROM t1p_ones; LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_ones" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column o" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_ones" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.avg(integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4_avg_accum(bigint[],integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int8_avg(bigint[])" @@ -126,7 +123,7 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re SELECT MIN(o), AVG(o) FROM t1p_tens; LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_tens" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column o" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_tens" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.avg(integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4_avg_accum(bigint[],integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int8_avg(bigint[])" @@ -140,9 +137,10 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re -- window function SELECT row_number() OVER (order by x), * FROM t1 WHERE y like '%86%'; LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column y" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column x of table t1" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column y of table t1" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)" +LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4eq(integer,integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.row_number()" row_number | x | y ------------+----+---------------------------------- @@ -162,16 +160,17 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re SELECT row_number() OVER (order by o), * FROM t1p WHERE p like '%86%'; LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p column o" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p column p" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p" LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_ones" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column o" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column p" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_ones" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_ones" LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_tens" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column o" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column p" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_tens" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_tens" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)" +LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4eq(integer,integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.row_number()" row_number | o | p ------------+----+---------------------------------- @@ -191,9 +190,10 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re SELECT row_number() OVER (order by o), * FROM t1p_ones WHERE p like '%86%'; LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_ones" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column o" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_ones column p" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_ones" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_ones" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)" +LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4eq(integer,integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.row_number()" row_number | o | p ------------+---+---------------------------------- @@ -202,9 +202,10 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re SELECT row_number() OVER (order by o), * FROM t1p_tens WHERE p like '%86%'; LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1p_tens" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column o" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1p_tens column p" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column o of table t1p_tens" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="column p of table t1p_tens" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)" +LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4eq(integer,integer)" LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.row_number()" row_number | o | p ------------+----+---------------------------------- diff --git a/contrib/sepgsql/hooks.c b/contrib/sepgsql/hooks.c index 5daa60c412..4249ed552c 100644 --- a/contrib/sepgsql/hooks.c +++ b/contrib/sepgsql/hooks.c @@ -4,7 +4,7 @@ * * Entrypoints of the hooks in PostgreSQL, and dispatches the callbacks. * - * Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Copyright (c) 2010-2018, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/label.c b/contrib/sepgsql/label.c index cbb9249be7..dba0986e02 100644 --- a/contrib/sepgsql/label.c +++ b/contrib/sepgsql/label.c @@ -4,7 +4,7 @@ * * Routines to support SELinux labels (security context) * - * Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Copyright (c) 2010-2018, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ @@ -12,14 +12,6 @@ #include -/* - * includes , which creates an incompatible - * #define for bool. Get rid of that so we can use our own typedef. - * (We don't care if redefines "true"/"false"; those are close - * enough.) - */ -#undef bool - #include "access/heapam.h" #include "access/htup_details.h" #include "access/genam.h" diff --git a/contrib/sepgsql/launcher b/contrib/sepgsql/launcher index 0fc96ea0d4..45139f3750 100755 --- a/contrib/sepgsql/launcher +++ b/contrib/sepgsql/launcher @@ -2,7 +2,7 @@ # # A wrapper script to launch psql command in regression test # -# Copyright (c) 2010-2017, PostgreSQL Global Development Group +# Copyright (c) 2010-2018, PostgreSQL Global Development Group # # ------------------------------------------------------------------------- diff --git a/contrib/sepgsql/proc.c b/contrib/sepgsql/proc.c index 14faa5fac6..c6a817d7c5 100644 --- a/contrib/sepgsql/proc.c +++ b/contrib/sepgsql/proc.c @@ -4,7 +4,7 @@ * * Routines corresponding to procedure objects * - * Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Copyright (c) 2010-2018, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/relation.c b/contrib/sepgsql/relation.c index 228869a520..f0c22715aa 100644 --- a/contrib/sepgsql/relation.c +++ b/contrib/sepgsql/relation.c @@ -4,7 +4,7 @@ * * Routines corresponding to relation/attribute objects * - * Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Copyright (c) 2010-2018, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/schema.c b/contrib/sepgsql/schema.c index d418577b75..bc15a36a45 100644 --- a/contrib/sepgsql/schema.c +++ b/contrib/sepgsql/schema.c @@ -4,7 +4,7 @@ * * Routines corresponding to schema objects * - * Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Copyright (c) 2010-2018, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/selinux.c b/contrib/sepgsql/selinux.c index bf89e83dd6..47def00a46 100644 --- a/contrib/sepgsql/selinux.c +++ b/contrib/sepgsql/selinux.c @@ -5,7 +5,7 @@ * Interactions between userspace and selinux in kernelspace, * using libselinux api. * - * Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Copyright (c) 2010-2018, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/sepgsql.h b/contrib/sepgsql/sepgsql.h index d4bf0cd14a..99adfc522a 100644 --- a/contrib/sepgsql/sepgsql.h +++ b/contrib/sepgsql/sepgsql.h @@ -4,7 +4,7 @@ * * Definitions corresponding to SE-PostgreSQL * - * Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Copyright (c) 2010-2018, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/uavc.c b/contrib/sepgsql/uavc.c index f0915918db..ea276ee0cc 100644 --- a/contrib/sepgsql/uavc.c +++ b/contrib/sepgsql/uavc.c @@ -6,7 +6,7 @@ * access control decisions recently used, and reduce number of kernel * invocations to avoid unnecessary performance hit. * - * Copyright (c) 2011-2017, PostgreSQL Global Development Group + * Copyright (c) 2011-2018, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/spi/Makefile b/contrib/spi/Makefile index 10ab5bb5fe..6bc2318e0a 100644 --- a/contrib/spi/Makefile +++ b/contrib/spi/Makefile @@ -1,14 +1,13 @@ # contrib/spi/Makefile -MODULES = autoinc insert_username moddatetime refint timetravel +MODULES = autoinc insert_username moddatetime refint -EXTENSION = autoinc insert_username moddatetime refint timetravel +EXTENSION = autoinc insert_username moddatetime refint DATA = autoinc--1.0.sql autoinc--unpackaged--1.0.sql \ insert_username--1.0.sql insert_username--unpackaged--1.0.sql \ moddatetime--1.0.sql moddatetime--unpackaged--1.0.sql \ - refint--1.0.sql refint--unpackaged--1.0.sql \ - timetravel--1.0.sql timetravel--unpackaged--1.0.sql + refint--1.0.sql refint--unpackaged--1.0.sql PGFILEDESC = "spi - examples of using SPI and triggers" DOCS = $(addsuffix .example, $(MODULES)) @@ -17,8 +16,6 @@ DOCS = $(addsuffix .example, $(MODULES)) # comment out if you want a quieter refint package for other uses PG_CPPFLAGS = -DREFINT_VERBOSE -LDFLAGS_SL += -L$(top_builddir)/src/port -lpgport - ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) diff --git a/contrib/spi/refint.c b/contrib/spi/refint.c index 46205c7613..f90f2bce0e 100644 --- a/contrib/spi/refint.c +++ b/contrib/spi/refint.c @@ -182,7 +182,7 @@ check_primary_key(PG_FUNCTION_ARGS) pplan = SPI_prepare(sql, nkeys, argtypes); if (pplan == NULL) /* internal error */ - elog(ERROR, "check_primary_key: SPI_prepare returned %d", SPI_result); + elog(ERROR, "check_primary_key: SPI_prepare returned %s", SPI_result_code_string(SPI_result)); /* * Remember that SPI_prepare places plan in current memory context - @@ -306,7 +306,7 @@ check_foreign_key(PG_FUNCTION_ARGS) /* internal error */ elog(ERROR, "check_foreign_key: too short %d (< 5) list of arguments", nargs); - nrefs = pg_atoi(args[0], sizeof(int), 0); + nrefs = pg_strtoint32(args[0]); if (nrefs < 1) /* internal error */ elog(ERROR, "check_foreign_key: %d (< 1) number of references specified", nrefs); @@ -395,7 +395,7 @@ check_foreign_key(PG_FUNCTION_ARGS) /* this shouldn't happen! SPI_ERROR_NOOUTFUNC ? */ if (oldval == NULL) /* internal error */ - elog(ERROR, "check_foreign_key: SPI_getvalue returned %d", SPI_result); + elog(ERROR, "check_foreign_key: SPI_getvalue returned %s", SPI_result_code_string(SPI_result)); newval = SPI_getvalue(newtuple, tupdesc, fnumber); if (newval == NULL || strcmp(oldval, newval) != 0) isequal = false; @@ -489,7 +489,6 @@ check_foreign_key(PG_FUNCTION_ARGS) " %s = %s%s%s %s ", args2[k], (is_char_type > 0) ? "'" : "", nv, (is_char_type > 0) ? "'" : "", (k < nkeys) ? ", " : ""); - is_char_type = 0; } strcat(sql, " where "); @@ -529,7 +528,7 @@ check_foreign_key(PG_FUNCTION_ARGS) pplan = SPI_prepare(sql, nkeys, argtypes); if (pplan == NULL) /* internal error */ - elog(ERROR, "check_foreign_key: SPI_prepare returned %d", SPI_result); + elog(ERROR, "check_foreign_key: SPI_prepare returned %s", SPI_result_code_string(SPI_result)); /* * Remember that SPI_prepare places plan in current memory context @@ -636,5 +635,5 @@ find_plan(char *ident, EPlan **eplan, int *nplans) newp->splan = NULL; (*nplans)++; - return (newp); + return newp; } diff --git a/contrib/spi/timetravel--1.0.sql b/contrib/spi/timetravel--1.0.sql deleted file mode 100644 index c34ca09965..0000000000 --- a/contrib/spi/timetravel--1.0.sql +++ /dev/null @@ -1,19 +0,0 @@ -/* contrib/spi/timetravel--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION timetravel" to load this file. \quit - -CREATE FUNCTION timetravel() -RETURNS trigger -AS 'MODULE_PATHNAME' -LANGUAGE C; - -CREATE FUNCTION set_timetravel(name, int4) -RETURNS int4 -AS 'MODULE_PATHNAME' -LANGUAGE C RETURNS NULL ON NULL INPUT; - -CREATE FUNCTION get_timetravel(name) -RETURNS int4 -AS 'MODULE_PATHNAME' -LANGUAGE C RETURNS NULL ON NULL INPUT; diff --git a/contrib/spi/timetravel--unpackaged--1.0.sql b/contrib/spi/timetravel--unpackaged--1.0.sql deleted file mode 100644 index 121bceba9b..0000000000 --- a/contrib/spi/timetravel--unpackaged--1.0.sql +++ /dev/null @@ -1,8 +0,0 @@ -/* contrib/spi/timetravel--unpackaged--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION timetravel FROM unpackaged" to load this file. \quit - -ALTER EXTENSION timetravel ADD function timetravel(); -ALTER EXTENSION timetravel ADD function set_timetravel(name,integer); -ALTER EXTENSION timetravel ADD function get_timetravel(name); diff --git a/contrib/spi/timetravel.c b/contrib/spi/timetravel.c deleted file mode 100644 index f7905e20db..0000000000 --- a/contrib/spi/timetravel.c +++ /dev/null @@ -1,553 +0,0 @@ -/* - * contrib/spi/timetravel.c - * - * - * timetravel.c -- function to get time travel feature - * using general triggers. - * - * Modified by BÖJTHE Zoltán, Hungary, mailto:urdesobt@axelero.hu - */ -#include "postgres.h" - -#include - -#include "access/htup_details.h" -#include "catalog/pg_type.h" -#include "commands/trigger.h" -#include "executor/spi.h" -#include "miscadmin.h" -#include "utils/builtins.h" -#include "utils/nabstime.h" -#include "utils/rel.h" - -PG_MODULE_MAGIC; - -/* AbsoluteTime currabstime(void); */ - -typedef struct -{ - char *ident; - SPIPlanPtr splan; -} EPlan; - -static EPlan *Plans = NULL; /* for UPDATE/DELETE */ -static int nPlans = 0; - -typedef struct _TTOffList -{ - struct _TTOffList *next; - char name[FLEXIBLE_ARRAY_MEMBER]; -} TTOffList; - -static TTOffList *TTOff = NULL; - -static int findTTStatus(char *name); -static EPlan *find_plan(char *ident, EPlan **eplan, int *nplans); - -/* - * timetravel () -- - * 1. IF an update affects tuple with stop_date eq INFINITY - * then form (and return) new tuple with start_date eq current date - * and stop_date eq INFINITY [ and update_user eq current user ] - * and all other column values as in new tuple, and insert tuple - * with old data and stop_date eq current date - * ELSE - skip updating of tuple. - * 2. IF a delete affects tuple with stop_date eq INFINITY - * then insert the same tuple with stop_date eq current date - * [ and delete_user eq current user ] - * ELSE - skip deletion of tuple. - * 3. On INSERT, if start_date is NULL then current date will be - * inserted, if stop_date is NULL then INFINITY will be inserted. - * [ and insert_user eq current user, update_user and delete_user - * eq NULL ] - * - * In CREATE TRIGGER you are to specify start_date and stop_date column - * names: - * EXECUTE PROCEDURE - * timetravel ('date_on', 'date_off' [,'insert_user', 'update_user', 'delete_user' ] ). - */ - -#define MaxAttrNum 5 -#define MinAttrNum 2 - -#define a_time_on 0 -#define a_time_off 1 -#define a_ins_user 2 -#define a_upd_user 3 -#define a_del_user 4 - -PG_FUNCTION_INFO_V1(timetravel); - -Datum /* have to return HeapTuple to Executor */ -timetravel(PG_FUNCTION_ARGS) -{ - TriggerData *trigdata = (TriggerData *) fcinfo->context; - Trigger *trigger; /* to get trigger name */ - int argc; - char **args; /* arguments */ - int attnum[MaxAttrNum]; /* fnumbers of start/stop columns */ - Datum oldtimeon, - oldtimeoff; - Datum newtimeon, - newtimeoff, - newuser, - nulltext; - Datum *cvals; /* column values */ - char *cnulls; /* column nulls */ - char *relname; /* triggered relation name */ - Relation rel; /* triggered relation */ - HeapTuple trigtuple; - HeapTuple newtuple = NULL; - HeapTuple rettuple; - TupleDesc tupdesc; /* tuple description */ - int natts; /* # of attributes */ - EPlan *plan; /* prepared plan */ - char ident[2 * NAMEDATALEN]; - bool isnull; /* to know is some column NULL or not */ - bool isinsert = false; - int ret; - int i; - - /* - * Some checks first... - */ - - /* Called by trigger manager ? */ - if (!CALLED_AS_TRIGGER(fcinfo)) - elog(ERROR, "timetravel: not fired by trigger manager"); - - /* Should be called for ROW trigger */ - if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) - elog(ERROR, "timetravel: must be fired for row"); - - /* Should be called BEFORE */ - if (!TRIGGER_FIRED_BEFORE(trigdata->tg_event)) - elog(ERROR, "timetravel: must be fired before event"); - - /* INSERT ? */ - if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event)) - isinsert = true; - - if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) - newtuple = trigdata->tg_newtuple; - - trigtuple = trigdata->tg_trigtuple; - - rel = trigdata->tg_relation; - relname = SPI_getrelname(rel); - - /* check if TT is OFF for this relation */ - if (0 == findTTStatus(relname)) - { - /* OFF - nothing to do */ - pfree(relname); - return PointerGetDatum((newtuple != NULL) ? newtuple : trigtuple); - } - - trigger = trigdata->tg_trigger; - - argc = trigger->tgnargs; - if (argc != MinAttrNum && argc != MaxAttrNum) - elog(ERROR, "timetravel (%s): invalid (!= %d or %d) number of arguments %d", - relname, MinAttrNum, MaxAttrNum, trigger->tgnargs); - - args = trigger->tgargs; - tupdesc = rel->rd_att; - natts = tupdesc->natts; - - for (i = 0; i < MinAttrNum; i++) - { - attnum[i] = SPI_fnumber(tupdesc, args[i]); - if (attnum[i] <= 0) - elog(ERROR, "timetravel (%s): there is no attribute %s", relname, args[i]); - if (SPI_gettypeid(tupdesc, attnum[i]) != ABSTIMEOID) - elog(ERROR, "timetravel (%s): attribute %s must be of abstime type", - relname, args[i]); - } - for (; i < argc; i++) - { - attnum[i] = SPI_fnumber(tupdesc, args[i]); - if (attnum[i] <= 0) - elog(ERROR, "timetravel (%s): there is no attribute %s", relname, args[i]); - if (SPI_gettypeid(tupdesc, attnum[i]) != TEXTOID) - elog(ERROR, "timetravel (%s): attribute %s must be of text type", - relname, args[i]); - } - - /* create fields containing name */ - newuser = CStringGetTextDatum(GetUserNameFromId(GetUserId(), false)); - - nulltext = (Datum) NULL; - - if (isinsert) - { /* INSERT */ - int chnattrs = 0; - int chattrs[MaxAttrNum]; - Datum newvals[MaxAttrNum]; - bool newnulls[MaxAttrNum]; - - oldtimeon = SPI_getbinval(trigtuple, tupdesc, attnum[a_time_on], &isnull); - if (isnull) - { - newvals[chnattrs] = GetCurrentAbsoluteTime(); - newnulls[chnattrs] = false; - chattrs[chnattrs] = attnum[a_time_on]; - chnattrs++; - } - - oldtimeoff = SPI_getbinval(trigtuple, tupdesc, attnum[a_time_off], &isnull); - if (isnull) - { - if ((chnattrs == 0 && DatumGetInt32(oldtimeon) >= NOEND_ABSTIME) || - (chnattrs > 0 && DatumGetInt32(newvals[a_time_on]) >= NOEND_ABSTIME)) - elog(ERROR, "timetravel (%s): %s is infinity", relname, args[a_time_on]); - newvals[chnattrs] = NOEND_ABSTIME; - newnulls[chnattrs] = false; - chattrs[chnattrs] = attnum[a_time_off]; - chnattrs++; - } - else - { - if ((chnattrs == 0 && DatumGetInt32(oldtimeon) > DatumGetInt32(oldtimeoff)) || - (chnattrs > 0 && DatumGetInt32(newvals[a_time_on]) > DatumGetInt32(oldtimeoff))) - elog(ERROR, "timetravel (%s): %s gt %s", relname, args[a_time_on], args[a_time_off]); - } - - pfree(relname); - if (chnattrs <= 0) - return PointerGetDatum(trigtuple); - - if (argc == MaxAttrNum) - { - /* clear update_user value */ - newvals[chnattrs] = nulltext; - newnulls[chnattrs] = true; - chattrs[chnattrs] = attnum[a_upd_user]; - chnattrs++; - /* clear delete_user value */ - newvals[chnattrs] = nulltext; - newnulls[chnattrs] = true; - chattrs[chnattrs] = attnum[a_del_user]; - chnattrs++; - /* set insert_user value */ - newvals[chnattrs] = newuser; - newnulls[chnattrs] = false; - chattrs[chnattrs] = attnum[a_ins_user]; - chnattrs++; - } - rettuple = heap_modify_tuple_by_cols(trigtuple, tupdesc, - chnattrs, chattrs, - newvals, newnulls); - return PointerGetDatum(rettuple); - /* end of INSERT */ - } - - /* UPDATE/DELETE: */ - oldtimeon = SPI_getbinval(trigtuple, tupdesc, attnum[a_time_on], &isnull); - if (isnull) - elog(ERROR, "timetravel (%s): %s must be NOT NULL", relname, args[a_time_on]); - - oldtimeoff = SPI_getbinval(trigtuple, tupdesc, attnum[a_time_off], &isnull); - if (isnull) - elog(ERROR, "timetravel (%s): %s must be NOT NULL", relname, args[a_time_off]); - - /* - * If DELETE/UPDATE of tuple with stop_date neq INFINITY then say upper - * Executor to skip operation for this tuple - */ - if (newtuple != NULL) - { /* UPDATE */ - newtimeon = SPI_getbinval(newtuple, tupdesc, attnum[a_time_on], &isnull); - if (isnull) - elog(ERROR, "timetravel (%s): %s must be NOT NULL", relname, args[a_time_on]); - - newtimeoff = SPI_getbinval(newtuple, tupdesc, attnum[a_time_off], &isnull); - if (isnull) - elog(ERROR, "timetravel (%s): %s must be NOT NULL", relname, args[a_time_off]); - - if (oldtimeon != newtimeon || oldtimeoff != newtimeoff) - elog(ERROR, "timetravel (%s): you cannot change %s and/or %s columns (use set_timetravel)", - relname, args[a_time_on], args[a_time_off]); - } - if (oldtimeoff != NOEND_ABSTIME) - { /* current record is a deleted/updated record */ - pfree(relname); - return PointerGetDatum(NULL); - } - - newtimeoff = GetCurrentAbsoluteTime(); - - /* Connect to SPI manager */ - if ((ret = SPI_connect()) < 0) - elog(ERROR, "timetravel (%s): SPI_connect returned %d", relname, ret); - - /* Fetch tuple values and nulls */ - cvals = (Datum *) palloc(natts * sizeof(Datum)); - cnulls = (char *) palloc(natts * sizeof(char)); - for (i = 0; i < natts; i++) - { - cvals[i] = SPI_getbinval(trigtuple, tupdesc, i + 1, &isnull); - cnulls[i] = (isnull) ? 'n' : ' '; - } - - /* change date column(s) */ - cvals[attnum[a_time_off] - 1] = newtimeoff; /* stop_date eq current date */ - cnulls[attnum[a_time_off] - 1] = ' '; - - if (!newtuple) - { /* DELETE */ - if (argc == MaxAttrNum) - { - cvals[attnum[a_del_user] - 1] = newuser; /* set delete user */ - cnulls[attnum[a_del_user] - 1] = ' '; - } - } - - /* - * Construct ident string as TriggerName $ TriggeredRelationId and try to - * find prepared execution plan. - */ - snprintf(ident, sizeof(ident), "%s$%u", trigger->tgname, rel->rd_id); - plan = find_plan(ident, &Plans, &nPlans); - - /* if there is no plan ... */ - if (plan->splan == NULL) - { - SPIPlanPtr pplan; - Oid *ctypes; - char sql[8192]; - char separ = ' '; - - /* allocate ctypes for preparation */ - ctypes = (Oid *) palloc(natts * sizeof(Oid)); - - /* - * Construct query: INSERT INTO _relation_ VALUES ($1, ...) - */ - snprintf(sql, sizeof(sql), "INSERT INTO %s VALUES (", relname); - for (i = 1; i <= natts; i++) - { - ctypes[i - 1] = SPI_gettypeid(tupdesc, i); - if (!(tupdesc->attrs[i - 1]->attisdropped)) /* skip dropped columns */ - { - snprintf(sql + strlen(sql), sizeof(sql) - strlen(sql), "%c$%d", separ, i); - separ = ','; - } - } - snprintf(sql + strlen(sql), sizeof(sql) - strlen(sql), ")"); - - elog(DEBUG4, "timetravel (%s) update: sql: %s", relname, sql); - - /* Prepare plan for query */ - pplan = SPI_prepare(sql, natts, ctypes); - if (pplan == NULL) - elog(ERROR, "timetravel (%s): SPI_prepare returned %d", relname, SPI_result); - - /* - * Remember that SPI_prepare places plan in current memory context - - * so, we have to save plan in Top memory context for later use. - */ - if (SPI_keepplan(pplan)) - elog(ERROR, "timetravel (%s): SPI_keepplan failed", relname); - - plan->splan = pplan; - } - - /* - * Ok, execute prepared plan. - */ - ret = SPI_execp(plan->splan, cvals, cnulls, 0); - - if (ret < 0) - elog(ERROR, "timetravel (%s): SPI_execp returned %d", relname, ret); - - /* Tuple to return to upper Executor ... */ - if (newtuple) - { /* UPDATE */ - int chnattrs = 0; - int chattrs[MaxAttrNum]; - Datum newvals[MaxAttrNum]; - char newnulls[MaxAttrNum]; - - newvals[chnattrs] = newtimeoff; - newnulls[chnattrs] = ' '; - chattrs[chnattrs] = attnum[a_time_on]; - chnattrs++; - - newvals[chnattrs] = NOEND_ABSTIME; - newnulls[chnattrs] = ' '; - chattrs[chnattrs] = attnum[a_time_off]; - chnattrs++; - - if (argc == MaxAttrNum) - { - /* set update_user value */ - newvals[chnattrs] = newuser; - newnulls[chnattrs] = ' '; - chattrs[chnattrs] = attnum[a_upd_user]; - chnattrs++; - /* clear delete_user value */ - newvals[chnattrs] = nulltext; - newnulls[chnattrs] = 'n'; - chattrs[chnattrs] = attnum[a_del_user]; - chnattrs++; - /* set insert_user value */ - newvals[chnattrs] = nulltext; - newnulls[chnattrs] = 'n'; - chattrs[chnattrs] = attnum[a_ins_user]; - chnattrs++; - } - - /* - * Use SPI_modifytuple() here because we are inside SPI environment - * but rettuple must be allocated in caller's context. - */ - rettuple = SPI_modifytuple(rel, newtuple, chnattrs, chattrs, newvals, newnulls); - } - else - /* DELETE case */ - rettuple = trigtuple; - - SPI_finish(); /* don't forget say Bye to SPI mgr */ - - pfree(relname); - return PointerGetDatum(rettuple); -} - -/* - * set_timetravel (relname, on) -- - * turn timetravel for specified relation ON/OFF - */ -PG_FUNCTION_INFO_V1(set_timetravel); - -Datum -set_timetravel(PG_FUNCTION_ARGS) -{ - Name relname = PG_GETARG_NAME(0); - int32 on = PG_GETARG_INT32(1); - char *rname; - char *d; - char *s; - int32 ret; - TTOffList *prev, - *pp; - - prev = NULL; - for (pp = TTOff; pp; prev = pp, pp = pp->next) - { - if (namestrcmp(relname, pp->name) == 0) - break; - } - if (pp) - { - /* OFF currently */ - if (on != 0) - { - /* turn ON */ - if (prev) - prev->next = pp->next; - else - TTOff = pp->next; - free(pp); - } - ret = 0; - } - else - { - /* ON currently */ - if (on == 0) - { - /* turn OFF */ - s = rname = DatumGetCString(DirectFunctionCall1(nameout, NameGetDatum(relname))); - if (s) - { - pp = malloc(offsetof(TTOffList, name) + strlen(rname) + 1); - if (pp) - { - pp->next = NULL; - d = pp->name; - while (*s) - *d++ = tolower((unsigned char) *s++); - *d = '\0'; - if (prev) - prev->next = pp; - else - TTOff = pp; - } - pfree(rname); - } - } - ret = 1; - } - PG_RETURN_INT32(ret); -} - -/* - * get_timetravel (relname) -- - * get timetravel status for specified relation (ON/OFF) - */ -PG_FUNCTION_INFO_V1(get_timetravel); - -Datum -get_timetravel(PG_FUNCTION_ARGS) -{ - Name relname = PG_GETARG_NAME(0); - TTOffList *pp; - - for (pp = TTOff; pp; pp = pp->next) - { - if (namestrcmp(relname, pp->name) == 0) - PG_RETURN_INT32(0); - } - PG_RETURN_INT32(1); -} - -static int -findTTStatus(char *name) -{ - TTOffList *pp; - - for (pp = TTOff; pp; pp = pp->next) - if (pg_strcasecmp(name, pp->name) == 0) - return 0; - return 1; -} - -/* -AbsoluteTime -currabstime() -{ - return (GetCurrentAbsoluteTime()); -} -*/ - -static EPlan * -find_plan(char *ident, EPlan **eplan, int *nplans) -{ - EPlan *newp; - int i; - - if (*nplans > 0) - { - for (i = 0; i < *nplans; i++) - { - if (strcmp((*eplan)[i].ident, ident) == 0) - break; - } - if (i != *nplans) - return (*eplan + i); - *eplan = (EPlan *) realloc(*eplan, (i + 1) * sizeof(EPlan)); - newp = *eplan + i; - } - else - { - newp = *eplan = (EPlan *) malloc(sizeof(EPlan)); - (*nplans) = i = 0; - } - - newp->ident = strdup(ident); - newp->splan = NULL; - (*nplans)++; - - return (newp); -} diff --git a/contrib/spi/timetravel.control b/contrib/spi/timetravel.control deleted file mode 100644 index 9b4bb6ba04..0000000000 --- a/contrib/spi/timetravel.control +++ /dev/null @@ -1,5 +0,0 @@ -# timetravel extension -comment = 'functions for implementing time travel' -default_version = '1.0' -module_pathname = '$libdir/timetravel' -relocatable = true diff --git a/contrib/spi/timetravel.example b/contrib/spi/timetravel.example deleted file mode 100644 index 35a7f65408..0000000000 --- a/contrib/spi/timetravel.example +++ /dev/null @@ -1,81 +0,0 @@ -drop table tttest; - -create table tttest ( - price_id int4, - price_val int4, - price_on abstime, - price_off abstime -); - -create unique index tttest_idx on tttest (price_id,price_off); -alter table tttest add column q1 text; -alter table tttest add column q2 int; -alter table tttest drop column q1; - -create trigger timetravel - before insert or delete or update on tttest - for each row - execute procedure - timetravel (price_on, price_off); - -insert into tttest values (1, 1, null, null); -insert into tttest(price_id, price_val) values (2, 2); -insert into tttest(price_id, price_val,price_off) values (3, 3, 'infinity'); - -insert into tttest(price_id, price_val,price_off) values (4, 4, - abstime('now'::timestamp - '100 days'::interval)); -insert into tttest(price_id, price_val,price_on) values (3, 3, 'infinity'); -- duplicate key - -select * from tttest; -delete from tttest where price_id = 2; -select * from tttest; --- what do we see ? - --- get current prices -select * from tttest where price_off = 'infinity'; - --- change price for price_id == 3 -update tttest set price_val = 30 where price_id = 3; -select * from tttest; - --- now we want to change price_id from 3 to 5 in ALL tuples --- but this gets us not what we need -update tttest set price_id = 5 where price_id = 3; -select * from tttest; - --- restore data as before last update: -select set_timetravel('tttest', 0); -- turn TT OFF! - -select get_timetravel('tttest'); -- check status - -delete from tttest where price_id = 5; -update tttest set price_off = 'infinity' where price_val = 30; -select * from tttest; - --- and try change price_id now! -update tttest set price_id = 5 where price_id = 3; -select * from tttest; --- isn't it what we need ? - -select set_timetravel('tttest', 1); -- turn TT ON! - -select get_timetravel('tttest'); -- check status - --- we want to correct some date -update tttest set price_on = 'Jan-01-1990 00:00:01' where price_id = 5 and - price_off <> 'infinity'; --- but this doesn't work - --- try in this way -select set_timetravel('tttest', 0); -- turn TT OFF! - -select get_timetravel('tttest'); -- check status - -update tttest set price_on = '01-Jan-1990 00:00:01' where price_id = 5 and - price_off <> 'infinity'; -select * from tttest; --- isn't it what we need ? - --- get price for price_id == 5 as it was '10-Jan-1990' -select * from tttest where price_id = 5 and - price_on <= '10-Jan-1990' and price_off > '10-Jan-1990'; diff --git a/contrib/start-scripts/freebsd b/contrib/start-scripts/freebsd index c6ac8cd47a..3323237a54 100644 --- a/contrib/start-scripts/freebsd +++ b/contrib/start-scripts/freebsd @@ -43,7 +43,7 @@ test -x $DAEMON || case $1 in start) - su -l $PGUSER -c "$DAEMON -D '$PGDATA' &" >>$PGLOG 2>&1 + su -l $PGUSER -c "$DAEMON -D '$PGDATA' >>$PGLOG 2>&1 &" echo -n ' postgresql' ;; stop) @@ -51,7 +51,7 @@ case $1 in ;; restart) su -l $PGUSER -c "$PGCTL stop -D '$PGDATA' -s" - su -l $PGUSER -c "$DAEMON -D '$PGDATA' &" >>$PGLOG 2>&1 + su -l $PGUSER -c "$DAEMON -D '$PGDATA' >>$PGLOG 2>&1 &" ;; status) su -l $PGUSER -c "$PGCTL status -D '$PGDATA'" diff --git a/contrib/start-scripts/linux b/contrib/start-scripts/linux index 44a775b030..a7757162fc 100644 --- a/contrib/start-scripts/linux +++ b/contrib/start-scripts/linux @@ -91,7 +91,7 @@ case $1 in start) echo -n "Starting PostgreSQL: " test -e "$PG_OOM_ADJUST_FILE" && echo "$PG_MASTER_OOM_SCORE_ADJ" > "$PG_OOM_ADJUST_FILE" - su - $PGUSER -c "$DAEMON_ENV $DAEMON -D '$PGDATA' &" >>$PGLOG 2>&1 + su - $PGUSER -c "$DAEMON_ENV $DAEMON -D '$PGDATA' >>$PGLOG 2>&1 &" echo "ok" ;; stop) @@ -103,7 +103,7 @@ case $1 in echo -n "Restarting PostgreSQL: " su - $PGUSER -c "$PGCTL stop -D '$PGDATA' -s" test -e "$PG_OOM_ADJUST_FILE" && echo "$PG_MASTER_OOM_SCORE_ADJ" > "$PG_OOM_ADJUST_FILE" - su - $PGUSER -c "$DAEMON_ENV $DAEMON -D '$PGDATA' &" >>$PGLOG 2>&1 + su - $PGUSER -c "$DAEMON_ENV $DAEMON -D '$PGDATA' >>$PGLOG 2>&1 &" echo "ok" ;; reload) diff --git a/contrib/start-scripts/macos/README b/contrib/start-scripts/macos/README new file mode 100644 index 0000000000..c4f2d9a270 --- /dev/null +++ b/contrib/start-scripts/macos/README @@ -0,0 +1,24 @@ +To make macOS automatically launch your PostgreSQL server at system start, +do the following: + +1. Edit the postgres-wrapper.sh script and adjust the file path +variables at its start to reflect where you have installed Postgres, +if that's not /usr/local/pgsql. + +2. Copy the modified postgres-wrapper.sh script into some suitable +installation directory. It can be, but doesn't have to be, where +you keep the Postgres executables themselves. + +3. Edit the org.postgresql.postgres.plist file and adjust its path +for postgres-wrapper.sh to match what you did in step 2. Also, +if you plan to run the Postgres server under some user name other +than "postgres", adjust the UserName parameter value for that. + +4. Copy the modified org.postgresql.postgres.plist file into +/Library/LaunchDaemons/. You must do this as root: + sudo cp org.postgresql.postgres.plist /Library/LaunchDaemons +because the file will be ignored if it is not root-owned. + +At this point a reboot should launch the server. But if you want +to test it without rebooting, you can do + sudo launchctl load /Library/LaunchDaemons/org.postgresql.postgres.plist diff --git a/contrib/start-scripts/macos/org.postgresql.postgres.plist b/contrib/start-scripts/macos/org.postgresql.postgres.plist new file mode 100644 index 0000000000..fdbd74f27d --- /dev/null +++ b/contrib/start-scripts/macos/org.postgresql.postgres.plist @@ -0,0 +1,17 @@ + + + + + Label + org.postgresql.postgres + ProgramArguments + + /bin/sh + /usr/local/pgsql/bin/postgres-wrapper.sh + + UserName + postgres + KeepAlive + + + diff --git a/contrib/start-scripts/macos/postgres-wrapper.sh b/contrib/start-scripts/macos/postgres-wrapper.sh new file mode 100644 index 0000000000..3a4ebdaf0f --- /dev/null +++ b/contrib/start-scripts/macos/postgres-wrapper.sh @@ -0,0 +1,25 @@ +#!/bin/sh + +# PostgreSQL server start script (launched by org.postgresql.postgres.plist) + +# edit these as needed: + +# directory containing postgres executable: +PGBINDIR="/usr/local/pgsql/bin" +# data directory: +PGDATA="/usr/local/pgsql/data" +# file to receive postmaster's initial log messages: +PGLOGFILE="${PGDATA}/pgstart.log" + +# (it's recommendable to enable the Postgres logging_collector feature +# so that PGLOGFILE doesn't grow without bound) + + +# set umask to ensure PGLOGFILE is not created world-readable +umask 077 + +# wait for networking to be up (else server may not bind to desired ports) +/usr/sbin/ipconfig waitall + +# and launch the server +exec "$PGBINDIR"/postgres -D "$PGDATA" >>"$PGLOGFILE" 2>&1 diff --git a/contrib/start-scripts/osx/PostgreSQL b/contrib/start-scripts/osx/PostgreSQL deleted file mode 100755 index 7ff1d0e377..0000000000 --- a/contrib/start-scripts/osx/PostgreSQL +++ /dev/null @@ -1,111 +0,0 @@ -#!/bin/sh - -## -# PostgreSQL RDBMS Server -## - -# PostgreSQL boot time startup script for OS X. To install, change -# the "prefix", "PGDATA", "PGUSER", and "PGLOG" variables below as -# necessary. Next, create a new directory, "/Library/StartupItems/PostgreSQL". -# Then copy this script and the accompanying "StartupParameters.plist" file -# into that directory. The name of this script file *must* be the same as the -# directory it is in. So you'll end up with these two files: -# -# /Library/StartupItems/PostgreSQL/PostgreSQL -# /Library/StartupItems/PostgreSQL/StartupParameters.plist -# -# Next, add this line to the /etc/hostconfig file: -# -# POSTGRESQL=-YES- -# -# The startup bundle will now be ready to go. To prevent this script from -# starting PostgreSQL at system startup, simply change that line in -# /etc/hostconfig back to: -# -# POSTGRESQL=-NO- -# -# Created by David Wheeler, 2002 - -# modified by Ray Aspeitia 12-03-2003 : -# added log rotation script to db startup -# modified StartupParameters.plist "Provides" parameter to make it easier to -# start and stop with the SystemStarter utility - -# use the below command in order to correctly start/stop/restart PG with log rotation script: -# SystemStarter [start|stop|restart] PostgreSQL - -################################################################################ -## EDIT FROM HERE -################################################################################ - -# Installation prefix -prefix="/usr/local/pgsql" - -# Data directory -PGDATA="/usr/local/pgsql/data" - -# Who to run the postmaster as, usually "postgres". (NOT "root") -PGUSER="postgres" - -# the logfile path and name (NEEDS to be writeable by PGUSER) -PGLOG="${PGDATA}/logs/logfile" - -# do you want to rotate the log files, 1=true 0=false -ROTATELOGS=1 - -# logfile rotate in seconds -ROTATESEC="604800" - - -################################################################################ -## STOP EDITING HERE -################################################################################ - -# The path that is to be used for the script -PATH="$prefix/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin" - -# What to use to start up the postmaster. (If you want the script to wait -# until the server has started, you could use "pg_ctl start" here.) -DAEMON="$prefix/bin/postmaster" - -# What to use to shut down the postmaster -PGCTL="$prefix/bin/pg_ctl" - -# The apache log rotation utility -LOGUTIL="/usr/sbin/rotatelogs" - -. /etc/rc.common - -StartService () { - if [ "${POSTGRESQL:=-NO-}" = "-YES-" ]; then - ConsoleMessage "Starting PostgreSQL database server" - if [ "${ROTATELOGS}" = "1" ]; then - sudo -u $PGUSER sh -c "${DAEMON} -D '${PGDATA}' &" 2>&1 | ${LOGUTIL} "${PGLOG}" ${ROTATESEC} & - else - sudo -u $PGUSER sh -c "${DAEMON} -D '${PGDATA}' &" >>"$PGLOG" 2>&1 - fi - fi -} - -StopService () { - ConsoleMessage "Stopping PostgreSQL database server" - sudo -u $PGUSER sh -c "$PGCTL stop -D '${PGDATA}' -s" -} - -RestartService () { - if [ "${POSTGRESQL:=-NO-}" = "-YES-" ]; then - ConsoleMessage "Restarting PostgreSQL database server" - # should match StopService: - sudo -u $PGUSER sh -c "$PGCTL stop -D '${PGDATA}' -s" - # should match StartService: - if [ "${ROTATELOGS}" = "1" ]; then - sudo -u $PGUSER sh -c "${DAEMON} -D '${PGDATA}' &" 2>&1 | ${LOGUTIL} "${PGLOG}" ${ROTATESEC} & - else - sudo -u $PGUSER sh -c "${DAEMON} -D '${PGDATA}' &" >>"$PGLOG" 2>&1 - fi - else - StopService - fi -} - -RunService "$1" diff --git a/contrib/start-scripts/osx/README b/contrib/start-scripts/osx/README deleted file mode 100644 index 97e299f7da..0000000000 --- a/contrib/start-scripts/osx/README +++ /dev/null @@ -1,3 +0,0 @@ -To install execute the following: - -sudo /bin/sh ./install.sh diff --git a/contrib/start-scripts/osx/StartupParameters.plist b/contrib/start-scripts/osx/StartupParameters.plist deleted file mode 100644 index 6c788d0dda..0000000000 --- a/contrib/start-scripts/osx/StartupParameters.plist +++ /dev/null @@ -1,33 +0,0 @@ - - - - - Description - PostgreSQL Database Server - Messages - - start - Starting PostgreSQL database server - stop - Stopping PostgreSQL database server - restart - Restarting PostgreSQL database server - - OrderPreference - Late - Provides - - PostgreSQL - - Requires - - Disks - Resolver - - Uses - - NFS - NetworkTime - - - diff --git a/contrib/start-scripts/osx/install.sh b/contrib/start-scripts/osx/install.sh deleted file mode 100755 index bbc5ee3926..0000000000 --- a/contrib/start-scripts/osx/install.sh +++ /dev/null @@ -1,10 +0,0 @@ -sudo sh -c 'echo "POSTGRESQL=-YES-" >> /etc/hostconfig' -sudo mkdir /Library/StartupItems/PostgreSQL -sudo cp PostgreSQL /Library/StartupItems/PostgreSQL -sudo cp StartupParameters.plist /Library/StartupItems/PostgreSQL -if [ -e /Library/StartupItems/PostgreSQL/PostgreSQL ] -then - echo "Startup Item Installed Successfully . . . " - echo "Starting PostgreSQL Server . . . " - SystemStarter restart PostgreSQL -fi diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c index 0bc8177b61..59f90dc947 100644 --- a/contrib/tablefunc/tablefunc.c +++ b/contrib/tablefunc/tablefunc.c @@ -10,7 +10,7 @@ * And contributors: * Nabil Sayegh * - * Copyright (c) 2002-2017, PostgreSQL Global Development Group + * Copyright (c) 2002-2018, PostgreSQL Global Development Group * * Permission to use, copy, modify, and distribute this software and its * documentation for any purpose, without fee, and without a written agreement @@ -1421,7 +1421,7 @@ build_tuplestore_recursively(char *key_fld, * Check expected (query runtime) tupdesc suitable for Connectby */ static void -validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial) +validateConnectbyTupleDesc(TupleDesc td, bool show_branch, bool show_serial) { int serial_column = 0; @@ -1431,7 +1431,7 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial /* are there the correct number of columns */ if (show_branch) { - if (tupdesc->natts != (CONNECTBY_NCOLS + serial_column)) + if (td->natts != (CONNECTBY_NCOLS + serial_column)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), @@ -1440,7 +1440,7 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial } else { - if (tupdesc->natts != CONNECTBY_NCOLS_NOBRANCH + serial_column) + if (td->natts != CONNECTBY_NCOLS_NOBRANCH + serial_column) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), @@ -1449,14 +1449,14 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial } /* check that the types of the first two columns match */ - if (tupdesc->attrs[0]->atttypid != tupdesc->attrs[1]->atttypid) + if (TupleDescAttr(td, 0)->atttypid != TupleDescAttr(td, 1)->atttypid) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), errdetail("First two columns must be the same type."))); /* check that the type of the third column is INT4 */ - if (tupdesc->attrs[2]->atttypid != INT4OID) + if (TupleDescAttr(td, 2)->atttypid != INT4OID) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), @@ -1464,7 +1464,7 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial format_type_be(INT4OID)))); /* check that the type of the fourth column is TEXT if applicable */ - if (show_branch && tupdesc->attrs[3]->atttypid != TEXTOID) + if (show_branch && TupleDescAttr(td, 3)->atttypid != TEXTOID) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), @@ -1472,7 +1472,8 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial format_type_be(TEXTOID)))); /* check that the type of the fifth column is INT4 */ - if (show_branch && show_serial && tupdesc->attrs[4]->atttypid != INT4OID) + if (show_branch && show_serial && + TupleDescAttr(td, 4)->atttypid != INT4OID) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("query-specified return tuple not valid for Connectby: " @@ -1480,7 +1481,8 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial format_type_be(INT4OID)))); /* check that the type of the fifth column is INT4 */ - if (!show_branch && show_serial && tupdesc->attrs[3]->atttypid != INT4OID) + if (!show_branch && show_serial && + TupleDescAttr(td, 3)->atttypid != INT4OID) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("query-specified return tuple not valid for Connectby: " @@ -1514,10 +1516,10 @@ compatConnectbyTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc) * These columns must match the result type indicated by the calling * query. */ - ret_atttypid = ret_tupdesc->attrs[0]->atttypid; - sql_atttypid = sql_tupdesc->attrs[0]->atttypid; - ret_atttypmod = ret_tupdesc->attrs[0]->atttypmod; - sql_atttypmod = sql_tupdesc->attrs[0]->atttypmod; + ret_atttypid = TupleDescAttr(ret_tupdesc, 0)->atttypid; + sql_atttypid = TupleDescAttr(sql_tupdesc, 0)->atttypid; + ret_atttypmod = TupleDescAttr(ret_tupdesc, 0)->atttypmod; + sql_atttypmod = TupleDescAttr(sql_tupdesc, 0)->atttypmod; if (ret_atttypid != sql_atttypid || (ret_atttypmod >= 0 && ret_atttypmod != sql_atttypmod)) ereport(ERROR, @@ -1528,10 +1530,10 @@ compatConnectbyTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc) format_type_with_typemod(ret_atttypid, ret_atttypmod), format_type_with_typemod(sql_atttypid, sql_atttypmod)))); - ret_atttypid = ret_tupdesc->attrs[1]->atttypid; - sql_atttypid = sql_tupdesc->attrs[1]->atttypid; - ret_atttypmod = ret_tupdesc->attrs[1]->atttypmod; - sql_atttypmod = sql_tupdesc->attrs[1]->atttypmod; + ret_atttypid = TupleDescAttr(ret_tupdesc, 1)->atttypid; + sql_atttypid = TupleDescAttr(sql_tupdesc, 1)->atttypid; + ret_atttypmod = TupleDescAttr(ret_tupdesc, 1)->atttypmod; + sql_atttypmod = TupleDescAttr(sql_tupdesc, 1)->atttypmod; if (ret_atttypid != sql_atttypid || (ret_atttypmod >= 0 && ret_atttypmod != sql_atttypmod)) ereport(ERROR, @@ -1562,8 +1564,8 @@ compatCrosstabTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc) return false; /* check the rowid types match */ - ret_atttypid = ret_tupdesc->attrs[0]->atttypid; - sql_atttypid = sql_tupdesc->attrs[0]->atttypid; + ret_atttypid = TupleDescAttr(ret_tupdesc, 0)->atttypid; + sql_atttypid = TupleDescAttr(sql_tupdesc, 0)->atttypid; if (ret_atttypid != sql_atttypid) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), @@ -1576,10 +1578,10 @@ compatCrosstabTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc) * attribute [2] of the sql tuple should match attributes [1] to [natts] * of the return tuple */ - sql_attr = sql_tupdesc->attrs[2]; + sql_attr = TupleDescAttr(sql_tupdesc, 2); for (i = 1; i < ret_tupdesc->natts; i++) { - ret_attr = ret_tupdesc->attrs[i]; + ret_attr = TupleDescAttr(ret_tupdesc, i); if (ret_attr->atttypid != sql_attr->atttypid) return false; diff --git a/contrib/tablefunc/tablefunc.h b/contrib/tablefunc/tablefunc.h index e88a5720fa..7d0773f82f 100644 --- a/contrib/tablefunc/tablefunc.h +++ b/contrib/tablefunc/tablefunc.h @@ -10,7 +10,7 @@ * And contributors: * Nabil Sayegh * - * Copyright (c) 2002-2017, PostgreSQL Global Development Group + * Copyright (c) 2002-2018, PostgreSQL Global Development Group * * Permission to use, copy, modify, and distribute this software and its * documentation for any purpose, without fee, and without a written agreement diff --git a/contrib/tcn/tcn.c b/contrib/tcn/tcn.c index 0b9acbf848..0c274322bd 100644 --- a/contrib/tcn/tcn.c +++ b/contrib/tcn/tcn.c @@ -3,7 +3,7 @@ * tcn.c * triggered change notification support for PostgreSQL * - * Portions Copyright (c) 2011-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2011-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -138,9 +138,9 @@ triggered_change_notification(PG_FUNCTION_ARGS) /* we're only interested if it is the primary key and valid */ if (index->indisprimary && IndexIsValid(index)) { - int numatts = index->indnatts; + int indnkeyatts = index->indnkeyatts; - if (numatts > 0) + if (indnkeyatts > 0) { int i; @@ -150,12 +150,13 @@ triggered_change_notification(PG_FUNCTION_ARGS) appendStringInfoCharMacro(payload, ','); appendStringInfoCharMacro(payload, operation); - for (i = 0; i < numatts; i++) + for (i = 0; i < indnkeyatts; i++) { int colno = index->indkey.values[i]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, colno - 1); appendStringInfoCharMacro(payload, ','); - strcpy_quoted(payload, NameStr((tupdesc->attrs[colno - 1])->attname), '"'); + strcpy_quoted(payload, NameStr(attr->attname), '"'); appendStringInfoCharMacro(payload, '='); strcpy_quoted(payload, SPI_getvalue(trigtuple, tupdesc, colno), '\''); } diff --git a/contrib/test_decoding/Makefile b/contrib/test_decoding/Makefile index 6c18189d9d..afcab930f7 100644 --- a/contrib/test_decoding/Makefile +++ b/contrib/test_decoding/Makefile @@ -39,7 +39,7 @@ submake-test_decoding: REGRESSCHECKS=ddl xact rewrite toast permissions decoding_in_xact \ decoding_into_rel binary prepared replorigin time messages \ - spill slot + spill slot truncate regresscheck: | submake-regress submake-test_decoding temp-install $(pg_regress_check) \ @@ -50,7 +50,8 @@ regresscheck-install-force: | submake-regress submake-test_decoding temp-install $(pg_regress_installcheck) \ $(REGRESSCHECKS) -ISOLATIONCHECKS=mxact delayed_startup ondisk_startup concurrent_ddl_dml +ISOLATIONCHECKS=mxact delayed_startup ondisk_startup concurrent_ddl_dml \ + oldest_xmin snapshot_transfer isolationcheck: | submake-isolation submake-test_decoding temp-install $(pg_isolation_regress_check) \ diff --git a/contrib/test_decoding/expected/concurrent_ddl_dml.out b/contrib/test_decoding/expected/concurrent_ddl_dml.out index a15bfa292e..1f9e7661b7 100644 --- a/contrib/test_decoding/expected/concurrent_ddl_dml.out +++ b/contrib/test_decoding/expected/concurrent_ddl_dml.out @@ -10,7 +10,7 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_float: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE float; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -32,16 +32,13 @@ step s2_alter_tbl1_float: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT -BEGIN -table public.pg_temp: INSERT: val1[integer]:1 val2[double precision]:1 -COMMIT ?column? stop @@ -56,7 +53,7 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_char: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE character varying; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -78,16 +75,13 @@ step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varyi step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; step s2_alter_tbl1_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT -BEGIN -table public.pg_temp: INSERT: val1[integer]:1 val2[character varying]:'1' -COMMIT ?column? stop @@ -103,16 +97,13 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_float: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; step s1_commit: COMMIT; step s2_alter_tbl1_float: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT -BEGIN -table public.pg_temp: INSERT: val1[integer]:1 val2[double precision]:1 -COMMIT ?column? stop @@ -128,16 +119,13 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; step s1_commit: COMMIT; step s2_alter_tbl1_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT -BEGIN -table public.pg_temp: INSERT: val1[integer]:1 val2[character varying]:'1' -COMMIT ?column? stop @@ -154,16 +142,13 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_float: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; step s1_commit: COMMIT; step s2_alter_tbl1_float: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[double precision]:1 COMMIT -BEGIN -table public.pg_temp: INSERT: val1[integer]:1 val2[double precision]:1 -COMMIT ?column? stop @@ -180,16 +165,13 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; step s1_commit: COMMIT; step s2_alter_tbl1_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[character varying]:'1' COMMIT -BEGIN -table public.pg_temp: INSERT: val1[integer]:1 val2[character varying]:'1' -COMMIT ?column? stop @@ -205,7 +187,7 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_text: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE text; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -229,16 +211,13 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; step s1_commit: COMMIT; step s2_alter_tbl1_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[text]:'1' COMMIT -BEGIN -table public.pg_temp: INSERT: val1[integer]:1 val2[character varying]:'1' -COMMIT ?column? stop @@ -254,7 +233,7 @@ step s2_alter_tbl2_boolean: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE boolean; ERROR: column "val2" cannot be cast automatically to type boolean step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -279,7 +258,7 @@ step s2_alter_tbl1_boolean: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE boolean; error in steps s1_commit s2_alter_tbl1_boolean: ERROR: column "val2" cannot be cast automatically to type boolean -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -300,7 +279,7 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_add_int: ALTER TABLE tbl2 ADD COLUMN val3 INTEGER; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -324,7 +303,7 @@ step s1_begin: BEGIN; step s2_alter_tbl2_add_int: ALTER TABLE tbl2 ADD COLUMN val3 INTEGER; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -348,7 +327,7 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_add_float: ALTER TABLE tbl2 ADD COLUMN val3 FLOAT; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -372,7 +351,7 @@ step s1_begin: BEGIN; step s2_alter_tbl2_add_float: ALTER TABLE tbl2 ADD COLUMN val3 FLOAT; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -396,7 +375,7 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_add_char: ALTER TABLE tbl2 ADD COLUMN val3 character varying; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -420,7 +399,7 @@ step s1_begin: BEGIN; step s2_alter_tbl2_add_char: ALTER TABLE tbl2 ADD COLUMN val3 character varying; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -445,7 +424,7 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -468,7 +447,7 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: <... completed> step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -493,7 +472,7 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -506,7 +485,7 @@ step s2_alter_tbl2_3rd_char: ALTER TABLE tbl2 ALTER COLUMN val3 TYPE character v step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_3rd_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -515,14 +494,9 @@ table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1' COMMIT step s2_alter_tbl2_3rd_int: ALTER TABLE tbl2 ALTER COLUMN val3 TYPE int USING val3::integer; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -table public.pg_temp: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:null -table public.pg_temp: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1 -table public.pg_temp: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1 -COMMIT BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1 COMMIT @@ -544,7 +518,7 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_3rd_text: <... completed> step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -573,7 +547,7 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_3rd_char: <... completed> step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -601,7 +575,7 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -628,7 +602,7 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -653,7 +627,7 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +step s2_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN diff --git a/contrib/test_decoding/expected/ddl.out b/contrib/test_decoding/expected/ddl.out index 1e22c1eefc..b7c76469fc 100644 --- a/contrib/test_decoding/expected/ddl.out +++ b/contrib/test_decoding/expected/ddl.out @@ -117,11 +117,11 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc (22 rows) ALTER TABLE replication_example ALTER COLUMN somenum TYPE int4 USING (somenum::int4); --- throw away changes, they contain oids +-- check that this doesn't produce any changes from the heap rewrite SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); count ------- - 12 + 0 (1 row) INSERT INTO replication_example(somedata, somenum) VALUES (5, 1); @@ -192,16 +192,20 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc COMMIT (33 rows) --- hide changes bc of oid visible in full table rewrites CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int); INSERT INTO tr_unique(data) VALUES(10); ALTER TABLE tr_unique RENAME TO tr_pkey; ALTER TABLE tr_pkey ADD COLUMN id serial primary key; -SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); - count -------- - 6 -(1 row) +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-rewrites', '1'); + data +----------------------------------------------------------------------------- + BEGIN + table public.tr_unique: INSERT: id2[integer]:1 data[integer]:10 + COMMIT + BEGIN + table public.tr_pkey: INSERT: id2[integer]:1 data[integer]:10 id[integer]:1 + COMMIT +(6 rows) INSERT INTO tr_pkey(data) VALUES(1); --show deletion with primary key diff --git a/contrib/test_decoding/expected/decoding_into_rel.out b/contrib/test_decoding/expected/decoding_into_rel.out index be759caa31..8fd3390066 100644 --- a/contrib/test_decoding/expected/decoding_into_rel.out +++ b/contrib/test_decoding/expected/decoding_into_rel.out @@ -59,6 +59,31 @@ SELECT * FROM changeresult; DROP TABLE changeresult; DROP TABLE somechange; +-- check calling logical decoding from pl/pgsql +CREATE FUNCTION slot_changes_wrapper(slot_name name) RETURNS SETOF TEXT AS $$ +BEGIN + RETURN QUERY + SELECT data FROM pg_logical_slot_peek_changes(slot_name, NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +END$$ LANGUAGE plpgsql; +SELECT * FROM slot_changes_wrapper('regression_slot'); + slot_changes_wrapper +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + BEGIN + table public.changeresult: INSERT: data[text]:'BEGIN' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''BEGIN''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.somechange: INSERT: id[integer]:1''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''COMMIT''' + table public.changeresult: INSERT: data[text]:'COMMIT' + table public.changeresult: INSERT: data[text]:'BEGIN' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''BEGIN''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.changeresult: INSERT: data[text]:''''BEGIN''''''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.changeresult: INSERT: data[text]:''''table public.somechange: INSERT: id[integer]:1''''''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.changeresult: INSERT: data[text]:''''COMMIT''''''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''COMMIT''' + table public.changeresult: INSERT: data[text]:'COMMIT' + COMMIT +(14 rows) + SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- diff --git a/contrib/test_decoding/expected/oldest_xmin.out b/contrib/test_decoding/expected/oldest_xmin.out new file mode 100644 index 0000000000..d1b4f17e3a --- /dev/null +++ b/contrib/test_decoding/expected/oldest_xmin.out @@ -0,0 +1,30 @@ +Parsed test spec with 2 sessions + +starting permutation: s0_begin s0_getxid s1_begin s1_insert s0_alter s0_commit s0_checkpoint s0_get_changes s0_get_changes s1_commit s0_vacuum s0_get_changes +step s0_begin: BEGIN; +step s0_getxid: SELECT txid_current() IS NULL; +?column? + +f +step s1_begin: BEGIN; +step s1_insert: INSERT INTO harvest VALUES ((1, 2, 3)); +step s0_alter: ALTER TYPE basket DROP ATTRIBUTE mangos; +step s0_commit: COMMIT; +step s0_checkpoint: CHECKPOINT; +step s0_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +data + +step s0_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +data + +step s1_commit: COMMIT; +step s0_vacuum: VACUUM pg_attribute; +step s0_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +data + +BEGIN +table public.harvest: INSERT: fruits[basket]:'(1,2,3)' +COMMIT +?column? + +stop diff --git a/contrib/test_decoding/expected/permissions.out b/contrib/test_decoding/expected/permissions.out index 7175dcd5f6..ed97f81dda 100644 --- a/contrib/test_decoding/expected/permissions.out +++ b/contrib/test_decoding/expected/permissions.out @@ -38,7 +38,7 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_d (1 row) INSERT INTO lr_test VALUES('lr_superuser_init'); -ERROR: permission denied for relation lr_test +ERROR: permission denied for table lr_test SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------ @@ -56,7 +56,7 @@ SET ROLE regress_lr_normal; SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); ERROR: must be superuser or replication role to use replication slots INSERT INTO lr_test VALUES('lr_superuser_init'); -ERROR: permission denied for relation lr_test +ERROR: permission denied for table lr_test SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); ERROR: must be superuser or replication role to use replication slots SELECT pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/expected/replorigin.out b/contrib/test_decoding/expected/replorigin.out index 76d4ea986d..8ea4ddda97 100644 --- a/contrib/test_decoding/expected/replorigin.out +++ b/contrib/test_decoding/expected/replorigin.out @@ -26,7 +26,14 @@ SELECT pg_replication_origin_drop('test_decoding: temp'); (1 row) SELECT pg_replication_origin_drop('test_decoding: temp'); -ERROR: cache lookup failed for replication origin 'test_decoding: temp' +ERROR: replication origin "test_decoding: temp" does not exist +-- various failure checks for undefined slots +select pg_replication_origin_advance('test_decoding: temp', '0/1'); +ERROR: replication origin "test_decoding: temp" does not exist +select pg_replication_origin_session_setup('test_decoding: temp'); +ERROR: replication origin "test_decoding: temp" does not exist +select pg_replication_origin_progress('test_decoding: temp', true); +ERROR: replication origin "test_decoding: temp" does not exist SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); ?column? ---------- diff --git a/contrib/test_decoding/expected/rewrite.out b/contrib/test_decoding/expected/rewrite.out index 4dcd489543..3bf2afa931 100644 --- a/contrib/test_decoding/expected/rewrite.out +++ b/contrib/test_decoding/expected/rewrite.out @@ -1,6 +1,61 @@ -- predictability SET synchronous_commit = on; DROP TABLE IF EXISTS replication_example; +-- Ensure there's tables with toast datums. To do so, we dynamically +-- create a function returning a large textblob. We want tables of +-- different kinds: mapped catalog table, unmapped catalog table, +-- shared catalog table and usertable. +CREATE FUNCTION exec(text) returns void language plpgsql volatile + AS $f$ + BEGIN + EXECUTE $1; + END; +$f$; +CREATE ROLE justforcomments NOLOGIN; +SELECT exec( + format($outer$CREATE FUNCTION iamalongfunction() RETURNS TEXT IMMUTABLE LANGUAGE SQL AS $f$SELECT text %L$f$$outer$, + (SELECT repeat(string_agg(to_char(g.i, 'FM0000'), ''), 50) FROM generate_series(1, 500) g(i)))); + exec +------ + +(1 row) + +SELECT exec( + format($outer$COMMENT ON FUNCTION iamalongfunction() IS %L$outer$, + iamalongfunction())); + exec +------ + +(1 row) + +SELECT exec( + format($outer$COMMENT ON ROLE JUSTFORCOMMENTS IS %L$outer$, + iamalongfunction())); + exec +------ + +(1 row) + +CREATE TABLE iamalargetable AS SELECT iamalongfunction() longfunctionoutput; +-- verify toast usage +SELECT pg_relation_size((SELECT reltoastrelid FROM pg_class WHERE oid = 'pg_proc'::regclass)) > 0; + ?column? +---------- + t +(1 row) + +SELECT pg_relation_size((SELECT reltoastrelid FROM pg_class WHERE oid = 'pg_description'::regclass)) > 0; + ?column? +---------- + t +(1 row) + +SELECT pg_relation_size((SELECT reltoastrelid FROM pg_class WHERE oid = 'pg_shdescription'::regclass)) > 0; + ?column? +---------- + t +(1 row) + SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); ?column? ---------- @@ -76,6 +131,23 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc COMMIT (15 rows) +-- trigger repeated rewrites of a system catalog with a toast table, +-- that previously was buggy: 20180914021046.oi7dm4ra3ot2g2kt@alap3.anarazel.de +VACUUM FULL pg_proc; VACUUM FULL pg_description; VACUUM FULL pg_shdescription; VACUUM FULL iamalargetable; +INSERT INTO replication_example(somedata, testcolumn1, testcolumn3) VALUES (8, 6, 1); +VACUUM FULL pg_proc; VACUUM FULL pg_description; VACUUM FULL pg_shdescription; VACUUM FULL iamalargetable; +INSERT INTO replication_example(somedata, testcolumn1, testcolumn3) VALUES (9, 7, 1); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + data +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + BEGIN + table public.replication_example: INSERT: id[integer]:9 somedata[integer]:8 text[character varying]:null testcolumn1[integer]:6 testcolumn2[integer]:null testcolumn3[integer]:1 + COMMIT + BEGIN + table public.replication_example: INSERT: id[integer]:10 somedata[integer]:9 text[character varying]:null testcolumn1[integer]:7 testcolumn2[integer]:null testcolumn3[integer]:1 + COMMIT +(6 rows) + SELECT pg_drop_replication_slot('regression_slot'); pg_drop_replication_slot -------------------------- @@ -83,3 +155,6 @@ SELECT pg_drop_replication_slot('regression_slot'); (1 row) DROP TABLE IF EXISTS replication_example; +DROP FUNCTION iamalongfunction(); +DROP FUNCTION exec(text); +DROP ROLE justforcomments; diff --git a/contrib/test_decoding/expected/slot.out b/contrib/test_decoding/expected/slot.out index 9f5f8a9b76..523621a705 100644 --- a/contrib/test_decoding/expected/slot.out +++ b/contrib/test_decoding/expected/slot.out @@ -30,6 +30,8 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot_t2', 'tes init (1 row) +SELECT pg_create_logical_replication_slot('foo', 'nonexistent'); +ERROR: could not access file "nonexistent": No such file or directory -- here we want to start a new session and wait till old one is gone select pg_backend_pid() as oldpid \gset \c - @@ -92,6 +94,36 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot2', NULL, NULL, 'in COMMIT (3 rows) +INSERT INTO replication_example(somedata, text) VALUES (1, 4); +INSERT INTO replication_example(somedata, text) VALUES (1, 5); +SELECT pg_current_wal_lsn() AS wal_lsn \gset +INSERT INTO replication_example(somedata, text) VALUES (1, 6); +SELECT end_lsn FROM pg_replication_slot_advance('regression_slot1', :'wal_lsn') \gset +SELECT slot_name FROM pg_replication_slot_advance('regression_slot2', pg_current_wal_lsn()); + slot_name +------------------ + regression_slot2 +(1 row) + +SELECT :'wal_lsn' = :'end_lsn'; + ?column? +---------- + t +(1 row) + +SELECT data FROM pg_logical_slot_get_changes('regression_slot1', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + data +--------------------------------------------------------------------------------------------------------- + BEGIN + table public.replication_example: INSERT: id[integer]:6 somedata[integer]:1 text[character varying]:'6' + COMMIT +(3 rows) + +SELECT data FROM pg_logical_slot_get_changes('regression_slot2', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + data +------ +(0 rows) + DROP TABLE replication_example; -- error SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot1', 'test_decoding', true); @@ -101,3 +133,20 @@ SELECT pg_drop_replication_slot('regression_slot1'); ERROR: replication slot "regression_slot1" does not exist SELECT pg_drop_replication_slot('regression_slot2'); ERROR: replication slot "regression_slot2" does not exist +-- slot advance with physical slot, error with non-reserved slot +SELECT slot_name FROM pg_create_physical_replication_slot('regression_slot3'); + slot_name +------------------ + regression_slot3 +(1 row) + +SELECT pg_replication_slot_advance('regression_slot3', '0/0'); -- invalid LSN +ERROR: invalid target wal lsn +SELECT pg_replication_slot_advance('regression_slot3', '0/1'); -- error +ERROR: cannot advance replication slot that has not previously reserved WAL +SELECT pg_drop_replication_slot('regression_slot3'); + pg_drop_replication_slot +-------------------------- + +(1 row) + diff --git a/contrib/test_decoding/expected/snapshot_transfer.out b/contrib/test_decoding/expected/snapshot_transfer.out new file mode 100644 index 0000000000..87bed03f76 --- /dev/null +++ b/contrib/test_decoding/expected/snapshot_transfer.out @@ -0,0 +1,49 @@ +Parsed test spec with 2 sessions + +starting permutation: s0_begin s0_begin_sub0 s0_log_assignment s0_sub_get_base_snap s1_produce_new_snap s0_insert s0_end_sub0 s0_commit s0_get_changes +step s0_begin: BEGIN; +step s0_begin_sub0: SAVEPOINT s0; +step s0_log_assignment: SELECT txid_current() IS NULL; +?column? + +f +step s0_sub_get_base_snap: INSERT INTO dummy VALUES (0); +step s1_produce_new_snap: ALTER TABLE harvest ADD COLUMN mangos int; +step s0_insert: INSERT INTO harvest VALUES (1, 2, 3); +step s0_end_sub0: RELEASE SAVEPOINT s0; +step s0_commit: COMMIT; +step s0_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +data + +BEGIN +table public.dummy: INSERT: i[integer]:0 +table public.harvest: INSERT: apples[integer]:1 pears[integer]:2 mangos[integer]:3 +COMMIT +?column? + +stop + +starting permutation: s0_begin s0_begin_sub0 s0_log_assignment s0_begin_sub1 s0_sub_get_base_snap s1_produce_new_snap s0_insert s0_end_sub1 s0_end_sub0 s0_commit s0_get_changes +step s0_begin: BEGIN; +step s0_begin_sub0: SAVEPOINT s0; +step s0_log_assignment: SELECT txid_current() IS NULL; +?column? + +f +step s0_begin_sub1: SAVEPOINT s1; +step s0_sub_get_base_snap: INSERT INTO dummy VALUES (0); +step s1_produce_new_snap: ALTER TABLE harvest ADD COLUMN mangos int; +step s0_insert: INSERT INTO harvest VALUES (1, 2, 3); +step s0_end_sub1: RELEASE SAVEPOINT s1; +step s0_end_sub0: RELEASE SAVEPOINT s0; +step s0_commit: COMMIT; +step s0_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +data + +BEGIN +table public.dummy: INSERT: i[integer]:0 +table public.harvest: INSERT: apples[integer]:1 pears[integer]:2 mangos[integer]:3 +COMMIT +?column? + +stop diff --git a/contrib/test_decoding/expected/truncate.out b/contrib/test_decoding/expected/truncate.out new file mode 100644 index 0000000000..1cf2ae835c --- /dev/null +++ b/contrib/test_decoding/expected/truncate.out @@ -0,0 +1,27 @@ +-- predictability +SET synchronous_commit = on; +SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); + ?column? +---------- + init +(1 row) + +CREATE TABLE tab1 (id serial unique, data int); +CREATE TABLE tab2 (a int primary key, b int); +TRUNCATE tab1; +TRUNCATE tab1, tab1 RESTART IDENTITY CASCADE; +TRUNCATE tab1, tab2; +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + data +------------------------------------------------------ + BEGIN + table public.tab1: TRUNCATE: (no-flags) + COMMIT + BEGIN + table public.tab1: TRUNCATE: restart_seqs cascade + COMMIT + BEGIN + table public.tab1, public.tab2: TRUNCATE: (no-flags) + COMMIT +(9 rows) + diff --git a/contrib/test_decoding/specs/concurrent_ddl_dml.spec b/contrib/test_decoding/specs/concurrent_ddl_dml.spec index 4a76532402..e7cea37d30 100644 --- a/contrib/test_decoding/specs/concurrent_ddl_dml.spec +++ b/contrib/test_decoding/specs/concurrent_ddl_dml.spec @@ -53,7 +53,7 @@ step "s2_alter_tbl2_3rd_char" { ALTER TABLE tbl2 ALTER COLUMN val3 TYPE characte step "s2_alter_tbl2_3rd_text" { ALTER TABLE tbl2 ALTER COLUMN val3 TYPE text; } step "s2_alter_tbl2_3rd_int" { ALTER TABLE tbl2 ALTER COLUMN val3 TYPE int USING val3::integer; } -step "s2_get_changes" { SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); } +step "s2_get_changes" { SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); } diff --git a/contrib/test_decoding/specs/oldest_xmin.spec b/contrib/test_decoding/specs/oldest_xmin.spec new file mode 100644 index 0000000000..6cb13e85ce --- /dev/null +++ b/contrib/test_decoding/specs/oldest_xmin.spec @@ -0,0 +1,42 @@ +# Test advancement of the slot's oldest xmin + +setup +{ + SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding'); -- must be first write in xact + DROP TYPE IF EXISTS basket; + CREATE TYPE basket AS (apples integer, pears integer, mangos integer); + DROP TABLE IF EXISTS harvest; + CREATE TABLE harvest(fruits basket); +} + +teardown +{ + DROP TABLE IF EXISTS harvest; + DROP TYPE IF EXISTS basket; + SELECT 'stop' FROM pg_drop_replication_slot('isolation_slot'); +} + +session "s0" +setup { SET synchronous_commit=on; } +step "s0_begin" { BEGIN; } +step "s0_getxid" { SELECT txid_current() IS NULL; } +step "s0_alter" { ALTER TYPE basket DROP ATTRIBUTE mangos; } +step "s0_commit" { COMMIT; } +step "s0_checkpoint" { CHECKPOINT; } +step "s0_vacuum" { VACUUM pg_attribute; } +step "s0_get_changes" { SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); } + +session "s1" +setup { SET synchronous_commit=on; } +step "s1_begin" { BEGIN; } +step "s1_insert" { INSERT INTO harvest VALUES ((1, 2, 3)); } +step "s1_commit" { COMMIT; } + +# Checkpoint with following get_changes forces xmin advancement. We do +# get_changes twice because if one more xl_running_xacts record had slipped +# before our CHECKPOINT, xmin will be advanced only on this record, thus not +# reaching value needed for vacuuming corresponding pg_attribute entry. ALTER of +# composite type is a rare form of DDL which allows T1 to see the tuple which +# will be removed (xmax set) before T1 commits. That is, interlocking doesn't +# forbid modifying catalog after someone read it (and didn't commit yet). +permutation "s0_begin" "s0_getxid" "s1_begin" "s1_insert" "s0_alter" "s0_commit" "s0_checkpoint" "s0_get_changes" "s0_get_changes""s1_commit" "s0_vacuum" "s0_get_changes" diff --git a/contrib/test_decoding/specs/snapshot_transfer.spec b/contrib/test_decoding/specs/snapshot_transfer.spec new file mode 100644 index 0000000000..8fb70e6567 --- /dev/null +++ b/contrib/test_decoding/specs/snapshot_transfer.spec @@ -0,0 +1,44 @@ +# Test snapshot transfer from subxact to top-level and receival of later snaps. + +setup +{ + SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding'); -- must be first write in xact + DROP TABLE IF EXISTS dummy; + CREATE TABLE dummy(i int); + DROP TABLE IF EXISTS harvest; + CREATE TABLE harvest(apples int, pears int); +} + +teardown +{ + DROP TABLE IF EXISTS harvest; + DROP TABLE IF EXISTS dummy; + SELECT 'stop' FROM pg_drop_replication_slot('isolation_slot'); +} + +session "s0" +setup { SET synchronous_commit=on; } +step "s0_begin" { BEGIN; } +step "s0_begin_sub0" { SAVEPOINT s0; } +step "s0_log_assignment" { SELECT txid_current() IS NULL; } +step "s0_begin_sub1" { SAVEPOINT s1; } +step "s0_sub_get_base_snap" { INSERT INTO dummy VALUES (0); } +step "s0_insert" { INSERT INTO harvest VALUES (1, 2, 3); } +step "s0_end_sub0" { RELEASE SAVEPOINT s0; } +step "s0_end_sub1" { RELEASE SAVEPOINT s1; } +step "s0_insert2" { INSERT INTO harvest VALUES (1, 2, 3, 4); } +step "s0_commit" { COMMIT; } +step "s0_get_changes" { SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); } + +session "s1" +setup { SET synchronous_commit=on; } +step "s1_produce_new_snap" { ALTER TABLE harvest ADD COLUMN mangos int; } + +# start top-level without base snap, get base snap in subxact, then create new +# snap and make sure it is queued. +permutation "s0_begin" "s0_begin_sub0" "s0_log_assignment" "s0_sub_get_base_snap" "s1_produce_new_snap" "s0_insert" "s0_end_sub0" "s0_commit" "s0_get_changes" + +# In previous test, we firstly associated subxact with xact and only then got +# base snap; now nest one more subxact to get snap first and only then (at +# commit) associate it with toplevel. +permutation "s0_begin" "s0_begin_sub0" "s0_log_assignment" "s0_begin_sub1" "s0_sub_get_base_snap" "s1_produce_new_snap" "s0_insert" "s0_end_sub1" "s0_end_sub0" "s0_commit" "s0_get_changes" diff --git a/contrib/test_decoding/sql/ddl.sql b/contrib/test_decoding/sql/ddl.sql index 057dae056b..c4b10a4cf9 100644 --- a/contrib/test_decoding/sql/ddl.sql +++ b/contrib/test_decoding/sql/ddl.sql @@ -67,7 +67,7 @@ INSERT INTO replication_example(somedata, somenum) VALUES (4, 1); SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); ALTER TABLE replication_example ALTER COLUMN somenum TYPE int4 USING (somenum::int4); --- throw away changes, they contain oids +-- check that this doesn't produce any changes from the heap rewrite SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); INSERT INTO replication_example(somedata, somenum) VALUES (5, 1); @@ -93,12 +93,11 @@ COMMIT; /* display results */ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); --- hide changes bc of oid visible in full table rewrites CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int); INSERT INTO tr_unique(data) VALUES(10); ALTER TABLE tr_unique RENAME TO tr_pkey; ALTER TABLE tr_pkey ADD COLUMN id serial primary key; -SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'include-rewrites', '1'); INSERT INTO tr_pkey(data) VALUES(1); --show deletion with primary key diff --git a/contrib/test_decoding/sql/decoding_into_rel.sql b/contrib/test_decoding/sql/decoding_into_rel.sql index 54670fd39e..1068cec588 100644 --- a/contrib/test_decoding/sql/decoding_into_rel.sql +++ b/contrib/test_decoding/sql/decoding_into_rel.sql @@ -27,5 +27,16 @@ INSERT INTO changeresult SELECT * FROM changeresult; DROP TABLE changeresult; DROP TABLE somechange; + +-- check calling logical decoding from pl/pgsql +CREATE FUNCTION slot_changes_wrapper(slot_name name) RETURNS SETOF TEXT AS $$ +BEGIN + RETURN QUERY + SELECT data FROM pg_logical_slot_peek_changes(slot_name, NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +END$$ LANGUAGE plpgsql; + +SELECT * FROM slot_changes_wrapper('regression_slot'); + SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + SELECT 'stop' FROM pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/sql/replorigin.sql b/contrib/test_decoding/sql/replorigin.sql index 7870f0ea32..451cd4bc3b 100644 --- a/contrib/test_decoding/sql/replorigin.sql +++ b/contrib/test_decoding/sql/replorigin.sql @@ -13,6 +13,11 @@ SELECT pg_replication_origin_create('test_decoding: temp'); SELECT pg_replication_origin_drop('test_decoding: temp'); SELECT pg_replication_origin_drop('test_decoding: temp'); +-- various failure checks for undefined slots +select pg_replication_origin_advance('test_decoding: temp', '0/1'); +select pg_replication_origin_session_setup('test_decoding: temp'); +select pg_replication_origin_progress('test_decoding: temp', true); + SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); -- origin tx diff --git a/contrib/test_decoding/sql/rewrite.sql b/contrib/test_decoding/sql/rewrite.sql index 8a7329423d..4271b82bea 100644 --- a/contrib/test_decoding/sql/rewrite.sql +++ b/contrib/test_decoding/sql/rewrite.sql @@ -3,6 +3,35 @@ SET synchronous_commit = on; DROP TABLE IF EXISTS replication_example; +-- Ensure there's tables with toast datums. To do so, we dynamically +-- create a function returning a large textblob. We want tables of +-- different kinds: mapped catalog table, unmapped catalog table, +-- shared catalog table and usertable. +CREATE FUNCTION exec(text) returns void language plpgsql volatile + AS $f$ + BEGIN + EXECUTE $1; + END; +$f$; +CREATE ROLE justforcomments NOLOGIN; + +SELECT exec( + format($outer$CREATE FUNCTION iamalongfunction() RETURNS TEXT IMMUTABLE LANGUAGE SQL AS $f$SELECT text %L$f$$outer$, + (SELECT repeat(string_agg(to_char(g.i, 'FM0000'), ''), 50) FROM generate_series(1, 500) g(i)))); +SELECT exec( + format($outer$COMMENT ON FUNCTION iamalongfunction() IS %L$outer$, + iamalongfunction())); +SELECT exec( + format($outer$COMMENT ON ROLE JUSTFORCOMMENTS IS %L$outer$, + iamalongfunction())); +CREATE TABLE iamalargetable AS SELECT iamalongfunction() longfunctionoutput; + +-- verify toast usage +SELECT pg_relation_size((SELECT reltoastrelid FROM pg_class WHERE oid = 'pg_proc'::regclass)) > 0; +SELECT pg_relation_size((SELECT reltoastrelid FROM pg_class WHERE oid = 'pg_description'::regclass)) > 0; +SELECT pg_relation_size((SELECT reltoastrelid FROM pg_class WHERE oid = 'pg_shdescription'::regclass)) > 0; + + SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); CREATE TABLE replication_example(id SERIAL PRIMARY KEY, somedata int, text varchar(120)); INSERT INTO replication_example(somedata) VALUES (1); @@ -57,6 +86,17 @@ COMMIT; CHECKPOINT; SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); -SELECT pg_drop_replication_slot('regression_slot'); +-- trigger repeated rewrites of a system catalog with a toast table, +-- that previously was buggy: 20180914021046.oi7dm4ra3ot2g2kt@alap3.anarazel.de +VACUUM FULL pg_proc; VACUUM FULL pg_description; VACUUM FULL pg_shdescription; VACUUM FULL iamalargetable; +INSERT INTO replication_example(somedata, testcolumn1, testcolumn3) VALUES (8, 6, 1); +VACUUM FULL pg_proc; VACUUM FULL pg_description; VACUUM FULL pg_shdescription; VACUUM FULL iamalargetable; +INSERT INTO replication_example(somedata, testcolumn1, testcolumn3) VALUES (9, 7, 1); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + +SELECT pg_drop_replication_slot('regression_slot'); DROP TABLE IF EXISTS replication_example; +DROP FUNCTION iamalongfunction(); +DROP FUNCTION exec(text); +DROP ROLE justforcomments; diff --git a/contrib/test_decoding/sql/slot.sql b/contrib/test_decoding/sql/slot.sql index fa9561f54e..c8d08f8541 100644 --- a/contrib/test_decoding/sql/slot.sql +++ b/contrib/test_decoding/sql/slot.sql @@ -9,6 +9,8 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot_p', 'test SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot_t2', 'test_decoding', true); +SELECT pg_create_logical_replication_slot('foo', 'nonexistent'); + -- here we want to start a new session and wait till old one is gone select pg_backend_pid() as oldpid \gset \c - @@ -45,6 +47,21 @@ INSERT INTO replication_example(somedata, text) VALUES (1, 3); SELECT data FROM pg_logical_slot_get_changes('regression_slot1', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT data FROM pg_logical_slot_get_changes('regression_slot2', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +INSERT INTO replication_example(somedata, text) VALUES (1, 4); +INSERT INTO replication_example(somedata, text) VALUES (1, 5); + +SELECT pg_current_wal_lsn() AS wal_lsn \gset + +INSERT INTO replication_example(somedata, text) VALUES (1, 6); + +SELECT end_lsn FROM pg_replication_slot_advance('regression_slot1', :'wal_lsn') \gset +SELECT slot_name FROM pg_replication_slot_advance('regression_slot2', pg_current_wal_lsn()); + +SELECT :'wal_lsn' = :'end_lsn'; + +SELECT data FROM pg_logical_slot_get_changes('regression_slot1', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot2', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + DROP TABLE replication_example; -- error @@ -53,3 +70,9 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot1', 'test_ -- both should error as they should be dropped on error SELECT pg_drop_replication_slot('regression_slot1'); SELECT pg_drop_replication_slot('regression_slot2'); + +-- slot advance with physical slot, error with non-reserved slot +SELECT slot_name FROM pg_create_physical_replication_slot('regression_slot3'); +SELECT pg_replication_slot_advance('regression_slot3', '0/0'); -- invalid LSN +SELECT pg_replication_slot_advance('regression_slot3', '0/1'); -- error +SELECT pg_drop_replication_slot('regression_slot3'); diff --git a/contrib/test_decoding/sql/truncate.sql b/contrib/test_decoding/sql/truncate.sql new file mode 100644 index 0000000000..5aecdf0881 --- /dev/null +++ b/contrib/test_decoding/sql/truncate.sql @@ -0,0 +1,13 @@ +-- predictability +SET synchronous_commit = on; + +SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); + +CREATE TABLE tab1 (id serial unique, data int); +CREATE TABLE tab2 (a int primary key, b int); + +TRUNCATE tab1; +TRUNCATE tab1, tab1 RESTART IDENTITY CASCADE; +TRUNCATE tab1, tab2; + +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c index a1a7c2ae0c..1c439b57b0 100644 --- a/contrib/test_decoding/test_decoding.c +++ b/contrib/test_decoding/test_decoding.c @@ -3,7 +3,7 @@ * test_decoding.c * example logical decoding output plugin * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/test_decoding/test_decoding.c @@ -12,25 +12,15 @@ */ #include "postgres.h" -#include "access/sysattr.h" - -#include "catalog/pg_class.h" #include "catalog/pg_type.h" -#include "nodes/parsenodes.h" - -#include "replication/output_plugin.h" #include "replication/logical.h" -#include "replication/message.h" #include "replication/origin.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/rel.h" -#include "utils/relcache.h" -#include "utils/syscache.h" -#include "utils/typcache.h" PG_MODULE_MAGIC; @@ -62,6 +52,10 @@ static void pg_decode_commit_txn(LogicalDecodingContext *ctx, static void pg_decode_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation rel, ReorderBufferChange *change); +static void pg_decode_truncate(LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, + int nrelations, Relation relations[], + ReorderBufferChange *change); static bool pg_decode_filter(LogicalDecodingContext *ctx, RepOriginId origin_id); static void pg_decode_message(LogicalDecodingContext *ctx, @@ -84,6 +78,7 @@ _PG_output_plugin_init(OutputPluginCallbacks *cb) cb->startup_cb = pg_decode_startup; cb->begin_cb = pg_decode_begin_txn; cb->change_cb = pg_decode_change; + cb->truncate_cb = pg_decode_truncate; cb->commit_cb = pg_decode_commit_txn; cb->filter_by_origin_cb = pg_decode_filter; cb->shutdown_cb = pg_decode_shutdown; @@ -111,6 +106,7 @@ pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, ctx->output_plugin_private = data; opt->output_type = OUTPUT_PLUGIN_TEXTUAL_OUTPUT; + opt->receive_rewrites = false; foreach(option, ctx->output_plugin_options) { @@ -176,6 +172,17 @@ pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, errmsg("could not parse value \"%s\" for parameter \"%s\"", strVal(elem->arg), elem->defname))); } + else if (strcmp(elem->defname, "include-rewrites") == 0) + { + + if (elem->arg == NULL) + continue; + else if (!parse_bool(strVal(elem->arg), &opt->receive_rewrites)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse value \"%s\" for parameter \"%s\"", + strVal(elem->arg), elem->defname))); + } else { ereport(ERROR, @@ -330,7 +337,7 @@ tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_ Datum origval; /* possibly toasted Datum */ bool isnull; /* column is null? */ - attr = tupdesc->attrs[natt]; + attr = TupleDescAttr(tupdesc, natt); /* * don't print dropped columns, we can't be sure everything is @@ -422,6 +429,8 @@ pg_decode_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, quote_qualified_identifier( get_namespace_name( get_rel_namespace(RelationGetRelid(relation))), + class_form->relrewrite ? + get_rel_name(class_form->relrewrite) : NameStr(class_form->relname))); appendStringInfoChar(ctx->out, ':'); @@ -476,6 +485,59 @@ pg_decode_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, OutputPluginWrite(ctx, true); } +static void +pg_decode_truncate(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, + int nrelations, Relation relations[], ReorderBufferChange *change) +{ + TestDecodingData *data; + MemoryContext old; + int i; + + data = ctx->output_plugin_private; + + /* output BEGIN if we haven't yet */ + if (data->skip_empty_xacts && !data->xact_wrote_changes) + { + pg_output_begin(ctx, data, txn, false); + } + data->xact_wrote_changes = true; + + /* Avoid leaking memory by using and resetting our own context */ + old = MemoryContextSwitchTo(data->context); + + OutputPluginPrepareWrite(ctx, true); + + appendStringInfoString(ctx->out, "table "); + + for (i = 0; i < nrelations; i++) + { + if (i > 0) + appendStringInfoString(ctx->out, ", "); + + appendStringInfoString(ctx->out, + quote_qualified_identifier(get_namespace_name(relations[i]->rd_rel->relnamespace), + NameStr(relations[i]->rd_rel->relname))); + } + + appendStringInfoString(ctx->out, ": TRUNCATE:"); + + if (change->data.truncate.restart_seqs + || change->data.truncate.cascade) + { + if (change->data.truncate.restart_seqs) + appendStringInfo(ctx->out, " restart_seqs"); + if (change->data.truncate.cascade) + appendStringInfo(ctx->out, " cascade"); + } + else + appendStringInfoString(ctx->out, " (no-flags)"); + + MemoryContextSwitchTo(old); + MemoryContextReset(data->context); + + OutputPluginWrite(ctx, true); +} + static void pg_decode_message(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr lsn, bool transactional, diff --git a/contrib/tsm_system_rows/tsm_system_rows.c b/contrib/tsm_system_rows/tsm_system_rows.c index 544458ec91..83f841f0c2 100644 --- a/contrib/tsm_system_rows/tsm_system_rows.c +++ b/contrib/tsm_system_rows/tsm_system_rows.c @@ -17,7 +17,7 @@ * won't visit blocks added after the first scan, but that is fine since * such blocks shouldn't contain any visible tuples anyway. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/contrib/tsm_system_time/tsm_system_time.c b/contrib/tsm_system_time/tsm_system_time.c index af8d025414..249d6f4d46 100644 --- a/contrib/tsm_system_time/tsm_system_time.c +++ b/contrib/tsm_system_time/tsm_system_time.c @@ -13,7 +13,7 @@ * However, we do what we can to reduce surprising behavior by selecting * the sampling pattern just once per query, much as in tsm_system_rows. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -24,9 +24,6 @@ #include "postgres.h" -#ifdef _MSC_VER -#include /* for _isnan */ -#endif #include #include "access/relscan.h" diff --git a/contrib/unaccent/generate_unaccent_rules.py b/contrib/unaccent/generate_unaccent_rules.py index 4b1b011861..859cac40fa 100644 --- a/contrib/unaccent/generate_unaccent_rules.py +++ b/contrib/unaccent/generate_unaccent_rules.py @@ -29,6 +29,15 @@ import sys import xml.etree.ElementTree as ET +# The ranges of Unicode characters that we consider to be "plain letters". +# For now we are being conservative by including only Latin and Greek. This +# could be extended in future based on feedback from people with relevant +# language knowledge. +PLAIN_LETTER_RANGES = ((ord('a'), ord('z')), # Latin lower case + (ord('A'), ord('Z')), # Latin upper case + (0x03b1, 0x03c9), # GREEK SMALL LETTER ALPHA, GREEK SMALL LETTER OMEGA + (0x0391, 0x03a9)) # GREEK CAPITAL LETTER ALPHA, GREEK CAPITAL LETTER OMEGA + def print_record(codepoint, letter): print (unichr(codepoint) + "\t" + letter).encode("UTF-8") @@ -39,9 +48,11 @@ def __init__(self, id, general_category, combining_ids): self.combining_ids = combining_ids def is_plain_letter(codepoint): - """Return true if codepoint represents a plain ASCII letter.""" - return (codepoint.id >= ord('a') and codepoint.id <= ord('z')) or \ - (codepoint.id >= ord('A') and codepoint.id <= ord('Z')) + """Return true if codepoint represents a "plain letter".""" + for begin, end in PLAIN_LETTER_RANGES: + if codepoint.id >= begin and codepoint.id <= end: + return True + return False def is_mark(codepoint): """Returns true for diacritical marks (combining codepoints).""" @@ -184,7 +195,7 @@ def main(args): len(codepoint.combining_ids) > 1: if is_letter_with_marks(codepoint, table): charactersSet.add((codepoint.id, - chr(get_plain_letter(codepoint, table).id))) + unichr(get_plain_letter(codepoint, table).id))) elif args.noLigaturesExpansion is False and is_ligature(codepoint, table): charactersSet.add((codepoint.id, "".join(unichr(combining_codepoint.id) diff --git a/contrib/unaccent/unaccent.c b/contrib/unaccent/unaccent.c index e08cca1707..dbf2bb9602 100644 --- a/contrib/unaccent/unaccent.c +++ b/contrib/unaccent/unaccent.c @@ -3,7 +3,7 @@ * unaccent.c * Text search unaccent dictionary * - * Copyright (c) 2009-2017, PostgreSQL Global Development Group + * Copyright (c) 2009-2018, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/unaccent/unaccent.c @@ -20,7 +20,9 @@ #include "tsearch/ts_locale.h" #include "tsearch/ts_public.h" #include "utils/builtins.h" +#include "utils/lsyscache.h" #include "utils/regproc.h" +#include "utils/syscache.h" PG_MODULE_MAGIC; @@ -90,7 +92,7 @@ placeChar(TrieChar *node, const unsigned char *str, int lenstr, * Function converts UTF8-encoded file into current encoding. */ static TrieChar * -initTrie(char *filename) +initTrie(const char *filename) { TrieChar *volatile rootTrie = NULL; MemoryContext ccxt = CurrentMemoryContext; @@ -276,7 +278,7 @@ unaccent_init(PG_FUNCTION_ARGS) { DefElem *defel = (DefElem *) lfirst(l); - if (pg_strcasecmp("Rules", defel->defname) == 0) + if (strcmp(defel->defname, "rules") == 0) { if (fileloaded) ereport(ERROR, @@ -376,7 +378,21 @@ unaccent_dict(PG_FUNCTION_ARGS) if (PG_NARGS() == 1) { - dictOid = get_ts_dict_oid(stringToQualifiedNameList("unaccent"), false); + /* + * Use the "unaccent" dictionary that is in the same schema that this + * function is in. + */ + Oid procnspid = get_func_namespace(fcinfo->flinfo->fn_oid); + const char *dictname = "unaccent"; + + dictOid = GetSysCacheOid2(TSDICTNAMENSP, + PointerGetDatum(dictname), + ObjectIdGetDatum(procnspid)); + if (!OidIsValid(dictOid)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("text search dictionary \"%s.%s\" does not exist", + get_namespace_name(procnspid), dictname))); strArg = 0; } else diff --git a/contrib/unaccent/unaccent.rules b/contrib/unaccent/unaccent.rules index 97f9ed47cf..76e4e69beb 100644 --- a/contrib/unaccent/unaccent.rules +++ b/contrib/unaccent/unaccent.rules @@ -399,6 +399,26 @@ ʦ ts ʪ ls ʫ lz +Ά Α +Έ Ε +Ή Η +Ί Ι +Ό Ο +Ύ Υ +Ώ Ω +ΐ ι +Ϊ Ι +Ϋ Υ +ά α +έ ε +ή η +ί ι +ΰ υ +ϊ ι +ϋ υ +ό ο +ύ υ +ώ ω Ё Е ё е ᴀ A @@ -709,6 +729,207 @@ ỽ v Ỿ Y ỿ y +ἀ α +ἁ α +ἂ α +ἃ α +ἄ α +ἅ α +ἆ α +ἇ α +Ἀ Α +Ἁ Α +Ἂ Α +Ἃ Α +Ἄ Α +Ἅ Α +Ἆ Α +Ἇ Α +ἐ ε +ἑ ε +ἒ ε +ἓ ε +ἔ ε +ἕ ε +Ἐ Ε +Ἑ Ε +Ἒ Ε +Ἓ Ε +Ἔ Ε +Ἕ Ε +ἠ η +ἡ η +ἢ η +ἣ η +ἤ η +ἥ η +ἦ η +ἧ η +Ἠ Η +Ἡ Η +Ἢ Η +Ἣ Η +Ἤ Η +Ἥ Η +Ἦ Η +Ἧ Η +ἰ ι +ἱ ι +ἲ ι +ἳ ι +ἴ ι +ἵ ι +ἶ ι +ἷ ι +Ἰ Ι +Ἱ Ι +Ἲ Ι +Ἳ Ι +Ἴ Ι +Ἵ Ι +Ἶ Ι +Ἷ Ι +ὀ ο +ὁ ο +ὂ ο +ὃ ο +ὄ ο +ὅ ο +Ὀ Ο +Ὁ Ο +Ὂ Ο +Ὃ Ο +Ὄ Ο +Ὅ Ο +ὐ υ +ὑ υ +ὒ υ +ὓ υ +ὔ υ +ὕ υ +ὖ υ +ὗ υ +Ὑ Υ +Ὓ Υ +Ὕ Υ +Ὗ Υ +ὠ ω +ὡ ω +ὢ ω +ὣ ω +ὤ ω +ὥ ω +ὦ ω +ὧ ω +Ὠ Ω +Ὡ Ω +Ὢ Ω +Ὣ Ω +Ὤ Ω +Ὥ Ω +Ὦ Ω +Ὧ Ω +ὰ α +ὲ ε +ὴ η +ὶ ι +ὸ ο +ὺ υ +ὼ ω +ᾀ α +ᾁ α +ᾂ α +ᾃ α +ᾄ α +ᾅ α +ᾆ α +ᾇ α +ᾈ Α +ᾉ Α +ᾊ Α +ᾋ Α +ᾌ Α +ᾍ Α +ᾎ Α +ᾏ Α +ᾐ η +ᾑ η +ᾒ η +ᾓ η +ᾔ η +ᾕ η +ᾖ η +ᾗ η +ᾘ Η +ᾙ Η +ᾚ Η +ᾛ Η +ᾜ Η +ᾝ Η +ᾞ Η +ᾟ Η +ᾠ ω +ᾡ ω +ᾢ ω +ᾣ ω +ᾤ ω +ᾥ ω +ᾦ ω +ᾧ ω +ᾨ Ω +ᾩ Ω +ᾪ Ω +ᾫ Ω +ᾬ Ω +ᾭ Ω +ᾮ Ω +ᾯ Ω +ᾰ α +ᾱ α +ᾲ α +ᾳ α +ᾴ α +ᾶ α +ᾷ α +Ᾰ Α +Ᾱ Α +Ὰ Α +ᾼ Α +ῂ η +ῃ η +ῄ η +ῆ η +ῇ η +Ὲ Ε +Ὴ Η +ῌ Η +ῐ ι +ῑ ι +ῒ ι +ῖ ι +ῗ ι +Ῐ Ι +Ῑ Ι +Ὶ Ι +ῠ υ +ῡ υ +ῢ υ +ῤ ρ +ῥ ρ +ῦ υ +ῧ υ +Ῠ Υ +Ῡ Υ +Ὺ Υ +Ῥ Ρ +ῲ ω +ῳ ω +ῴ ω +ῶ ω +ῷ ω +Ὸ Ο +Ὼ Ω +ῼ Ω ‐ - ‑ - ‒ - diff --git a/contrib/uuid-ossp/uuid-ossp.c b/contrib/uuid-ossp/uuid-ossp.c index db1f6b2b2f..ce96c7c64f 100644 --- a/contrib/uuid-ossp/uuid-ossp.c +++ b/contrib/uuid-ossp/uuid-ossp.c @@ -2,7 +2,7 @@ * * UUID generation functions using the BSD, E2FS or OSSP UUID library * - * Copyright (c) 2007-2017, PostgreSQL Global Development Group + * Copyright (c) 2007-2018, PostgreSQL Global Development Group * * Portions Copyright (c) 2009 Andrew Gierth * @@ -14,13 +14,10 @@ #include "postgres.h" #include "fmgr.h" +#include "port/pg_bswap.h" #include "utils/builtins.h" #include "utils/uuid.h" -/* for ntohl/htonl */ -#include -#include - /* * It's possible that there's more than one uuid.h header file present. * We expect configure to set the HAVE_ symbol for only the one we want. @@ -81,16 +78,16 @@ typedef struct #define UUID_TO_NETWORK(uu) \ do { \ - uu.time_low = htonl(uu.time_low); \ - uu.time_mid = htons(uu.time_mid); \ - uu.time_hi_and_version = htons(uu.time_hi_and_version); \ + uu.time_low = pg_hton32(uu.time_low); \ + uu.time_mid = pg_hton16(uu.time_mid); \ + uu.time_hi_and_version = pg_hton16(uu.time_hi_and_version); \ } while (0) #define UUID_TO_LOCAL(uu) \ do { \ - uu.time_low = ntohl(uu.time_low); \ - uu.time_mid = ntohs(uu.time_mid); \ - uu.time_hi_and_version = ntohs(uu.time_hi_and_version); \ + uu.time_low = pg_ntoh32(uu.time_low); \ + uu.time_mid = pg_ntoh16(uu.time_mid); \ + uu.time_hi_and_version = pg_ntoh16(uu.time_hi_and_version); \ } while (0) #define UUID_V3_OR_V5(uu, v) \ @@ -247,7 +244,7 @@ uuid_generate_v35_internal(int mode, pg_uuid_t *ns, text *name) #else /* !HAVE_UUID_OSSP */ static Datum -uuid_generate_internal(int v, unsigned char *ns, char *ptr, int len) +uuid_generate_internal(int v, unsigned char *ns, const char *ptr, int len) { char strbuf[40]; diff --git a/contrib/vacuumlo/.gitignore b/contrib/vacuumlo/.gitignore index 07f6ab4fd7..f3f0ce3d80 100644 --- a/contrib/vacuumlo/.gitignore +++ b/contrib/vacuumlo/.gitignore @@ -1 +1,3 @@ /vacuumlo + +/tmp_check/ diff --git a/contrib/vacuumlo/Makefile b/contrib/vacuumlo/Makefile index b4ba896fba..5de506151e 100644 --- a/contrib/vacuumlo/Makefile +++ b/contrib/vacuumlo/Makefile @@ -7,7 +7,9 @@ PROGRAM = vacuumlo OBJS = vacuumlo.o $(WIN32RES) PG_CPPFLAGS = -I$(libpq_srcdir) -PG_LIBS = $(libpq_pgport) +PG_LIBS_INTERNAL = $(libpq_pgport) + +EXTRA_CLEAN = tmp_check ifdef USE_PGXS PG_CONFIG = pg_config @@ -19,3 +21,9 @@ top_builddir = ../.. include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif + +check: + $(prove_check) + +installcheck: + $(prove_installcheck) diff --git a/contrib/vacuumlo/t/001_basic.pl b/contrib/vacuumlo/t/001_basic.pl new file mode 100644 index 0000000000..2bfb6ce17d --- /dev/null +++ b/contrib/vacuumlo/t/001_basic.pl @@ -0,0 +1,9 @@ +use strict; +use warnings; + +use TestLib; +use Test::More tests => 8; + +program_help_ok('vacuumlo'); +program_version_ok('vacuumlo'); +program_options_handling_ok('vacuumlo'); diff --git a/contrib/vacuumlo/vacuumlo.c b/contrib/vacuumlo/vacuumlo.c index a4d4553303..3075781abe 100644 --- a/contrib/vacuumlo/vacuumlo.c +++ b/contrib/vacuumlo/vacuumlo.c @@ -3,7 +3,7 @@ * vacuumlo.c * This removes orphaned large objects from a database. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -21,10 +21,12 @@ #include #endif -#include "catalog/pg_class.h" +#include "catalog/pg_class_d.h" +#include "fe_utils/connect.h" #include "libpq-fe.h" #include "pg_getopt.h" +#include "getopt_long.h" #define BUFSIZE 1024 @@ -140,11 +142,8 @@ vacuumlo(const char *database, const struct _param *param) fprintf(stdout, "Test run: no large objects will be removed!\n"); } - /* - * Don't get fooled by any non-system catalogs - */ - res = PQexec(conn, "SET search_path = pg_catalog"); - if (PQresultStatus(res) != PGRES_COMMAND_OK) + res = PQexec(conn, ALWAYS_SECURE_SEARCH_PATH_SQL); + if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, "Failed to set search_path:\n"); fprintf(stderr, "%s", PQerrorMessage(conn)); @@ -436,17 +435,17 @@ usage(const char *progname) printf("%s removes unreferenced large objects from databases.\n\n", progname); printf("Usage:\n %s [OPTION]... DBNAME...\n\n", progname); printf("Options:\n"); - printf(" -l LIMIT commit after removing each LIMIT large objects\n"); - printf(" -n don't remove large objects, just show what would be done\n"); - printf(" -v write a lot of progress messages\n"); - printf(" -V, --version output version information, then exit\n"); - printf(" -?, --help show this help, then exit\n"); + printf(" -l, --limit=LIMIT commit after removing each LIMIT large objects\n"); + printf(" -n, --dry-run don't remove large objects, just show what would be done\n"); + printf(" -v, --verbose write a lot of progress messages\n"); + printf(" -V, --version output version information, then exit\n"); + printf(" -?, --help show this help, then exit\n"); printf("\nConnection options:\n"); - printf(" -h HOSTNAME database server host or socket directory\n"); - printf(" -p PORT database server port\n"); - printf(" -U USERNAME user name to connect as\n"); - printf(" -w never prompt for password\n"); - printf(" -W force password prompt\n"); + printf(" -h, --host=HOSTNAME database server host or socket directory\n"); + printf(" -p, --port=PORT database server port\n"); + printf(" -U, --username=USERNAME user name to connect as\n"); + printf(" -w, --no-password never prompt for password\n"); + printf(" -W, --password force password prompt\n"); printf("\n"); printf("Report bugs to .\n"); } @@ -455,11 +454,26 @@ usage(const char *progname) int main(int argc, char **argv) { + static struct option long_options[] = { + {"host", required_argument, NULL, 'h'}, + {"limit", required_argument, NULL, 'l'}, + {"dry-run", no_argument, NULL, 'n'}, + {"port", required_argument, NULL, 'p'}, + {"username", required_argument, NULL, 'U'}, + {"verbose", no_argument, NULL, 'v'}, + {"version", no_argument, NULL, 'V'}, + {"no-password", no_argument, NULL, 'w'}, + {"password", no_argument, NULL, 'W'}, + {"help", no_argument, NULL, '?'}, + {NULL, 0, NULL, 0} + }; + int rc = 0; struct _param param; int c; int port; const char *progname; + int optindex; progname = get_progname(argv[0]); @@ -488,25 +502,15 @@ main(int argc, char **argv) } } - while (1) + while ((c = getopt_long(argc, argv, "h:l:np:U:vwW", long_options, &optindex)) != -1) { - c = getopt(argc, argv, "h:l:U:p:vnwW"); - if (c == -1) - break; - switch (c) { case '?': fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); exit(1); - case ':': - exit(1); - case 'v': - param.verbose = 1; - break; - case 'n': - param.dry_run = 1; - param.verbose = 1; + case 'h': + param.pg_host = pg_strdup(optarg); break; case 'l': param.transaction_limit = strtol(optarg, NULL, 10); @@ -518,14 +522,9 @@ main(int argc, char **argv) exit(1); } break; - case 'U': - param.pg_user = pg_strdup(optarg); - break; - case 'w': - param.pg_prompt = TRI_NO; - break; - case 'W': - param.pg_prompt = TRI_YES; + case 'n': + param.dry_run = 1; + param.verbose = 1; break; case 'p': port = strtol(optarg, NULL, 10); @@ -536,9 +535,21 @@ main(int argc, char **argv) } param.pg_port = pg_strdup(optarg); break; - case 'h': - param.pg_host = pg_strdup(optarg); + case 'U': + param.pg_user = pg_strdup(optarg); break; + case 'v': + param.verbose = 1; + break; + case 'w': + param.pg_prompt = TRI_NO; + break; + case 'W': + param.pg_prompt = TRI_YES; + break; + default: + fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); + exit(1); } } diff --git a/doc/bug.template b/doc/bug.template index 4d767bfd51..53e490433d 100644 --- a/doc/bug.template +++ b/doc/bug.template @@ -27,7 +27,7 @@ System Configuration: Operating System (example: Linux 2.4.18) : - PostgreSQL version (example: PostgreSQL 11devel): PostgreSQL 11devel + PostgreSQL version (example: PostgreSQL 12devel): PostgreSQL 12devel Compiler used (example: gcc 3.3.5) : diff --git a/doc/src/sgml/Makefile b/doc/src/sgml/Makefile index 8a73cc796f..74aac01c39 100644 --- a/doc/src/sgml/Makefile +++ b/doc/src/sgml/Makefile @@ -17,6 +17,8 @@ # to want to use. html: +# We don't need the tree-wide headers or install support here. +NO_GENERATED_HEADERS=yes NO_TEMP_INSTALL=yes subdir = doc/src/sgml @@ -37,15 +39,7 @@ ifndef FOP FOP = $(missing) fop endif -SGMLINCLUDE = -D . -D $(srcdir) - -ifndef NSGMLS -NSGMLS = $(missing) nsgmls -endif - -ifndef OSX -OSX = $(missing) osx -endif +XMLINCLUDE = --path . ifndef XMLLINT XMLLINT = $(missing) xmllint @@ -63,14 +57,6 @@ GENERATED_SGML = version.sgml \ ALLSGML := $(wildcard $(srcdir)/*.sgml $(srcdir)/ref/*.sgml) $(GENERATED_SGML) -# Enable some extra warnings -# -wfully-tagged needed to throw a warning on missing tags -# for older tool chains, 2007-08-31 -# Note: try "make SPFLAGS=-wxml" to catch a lot of other dubious constructs, -# in particular < and & that haven't been made into entities. It's far too -# noisy to turn on by default, unfortunately. -override SPFLAGS += -wall -wno-unused-param -wno-empty -wfully-tagged - ## ## Man pages @@ -78,9 +64,9 @@ override SPFLAGS += -wall -wno-unused-param -wno-empty -wfully-tagged man distprep-man: man-stamp -man-stamp: stylesheet-man.xsl postgres.xml - $(XMLLINT) --noout --valid postgres.xml - $(XSLTPROC) $(XSLTPROCFLAGS) $(XSLTPROC_MAN_FLAGS) $^ +man-stamp: stylesheet-man.xsl postgres.sgml $(ALLSGML) + $(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^) + $(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) $(XSLTPROC_MAN_FLAGS) $(wordlist 1,2,$^) touch $@ @@ -131,28 +117,8 @@ INSTALL.html: %.html : stylesheet-text.xsl %.xml $(XMLLINT) --noout --valid $*.xml $(XSLTPROC) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) $^ >$@ -INSTALL.xml: standalone-install.sgml installation.sgml version.sgml - $(OSX) $(SPFLAGS) $(SGMLINCLUDE) -x lower $(filter-out version.sgml,$^) >$@.tmp - $(call mangle-xml,chapter) - - -## -## SGML->XML conversion -## - -# For obscure reasons, GNU make 3.81 complains about circular dependencies -# if we try to do "make all" in a VPATH build without the explicit -# $(srcdir) on the postgres.sgml dependency in this rule. GNU make bug? -postgres.xml: $(srcdir)/postgres.sgml $(ALLSGML) - $(OSX) $(SPFLAGS) $(SGMLINCLUDE) -x lower $< >$@.tmp - $(call mangle-xml,book) - -define mangle-xml -$(PERL) -p -e 's/\[(aacute|acirc|aelig|agrave|amp|aring|atilde|auml|bull|copy|eacute|egrave|gt|iacute|lt|mdash|nbsp|ntilde|oacute|ocirc|oslash|ouml|pi|quot|scaron|uuml) *\]/\&\1;/gi;' \ - -e '$$_ .= qq{\n} if $$. == 1;' \ - <$@.tmp > $@ -rm $@.tmp -endef +INSTALL.xml: standalone-profile.xsl standalone-install.xml postgres.sgml $(ALLSGML) + $(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) --xinclude $(wordlist 1,2,$^) >$@ ## @@ -165,20 +131,20 @@ endif html: html-stamp -html-stamp: stylesheet.xsl postgres.xml - $(XMLLINT) --noout --valid postgres.xml - $(XSLTPROC) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) $^ +html-stamp: stylesheet.xsl postgres.sgml $(ALLSGML) + $(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^) + $(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) $(wordlist 1,2,$^) cp $(srcdir)/stylesheet.css html/ touch $@ -htmlhelp: stylesheet-hh.xsl postgres.xml - $(XMLLINT) --noout --valid postgres.xml - $(XSLTPROC) $(XSLTPROCFLAGS) $^ +htmlhelp: stylesheet-hh.xsl postgres.sgml $(ALLSGML) + $(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^) + $(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) $(wordlist 1,2,$^) # single-page HTML -postgres.html: stylesheet-html-nochunk.xsl postgres.xml - $(XMLLINT) --noout --valid postgres.xml - $(XSLTPROC) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) -o $@ $^ +postgres.html: stylesheet-html-nochunk.xsl postgres.sgml $(ALLSGML) + $(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^) + $(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) -o $@ $(wordlist 1,2,$^) # single-page text postgres.txt: postgres.html @@ -192,13 +158,13 @@ postgres.txt: postgres.html postgres.pdf: $(error Invalid target; use postgres-A4.pdf or postgres-US.pdf as targets) -%-A4.fo: stylesheet-fo.xsl %.xml - $(XMLLINT) --noout --valid $*.xml - $(XSLTPROC) $(XSLTPROCFLAGS) --stringparam paper.type A4 -o $@ $^ +%-A4.fo: stylesheet-fo.xsl %.sgml $(ALLSGML) + $(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^) + $(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) --stringparam paper.type A4 -o $@ $(wordlist 1,2,$^) -%-US.fo: stylesheet-fo.xsl %.xml - $(XMLLINT) --noout --valid $*.xml - $(XSLTPROC) $(XSLTPROCFLAGS) --stringparam paper.type USletter -o $@ $^ +%-US.fo: stylesheet-fo.xsl %.sgml $(ALLSGML) + $(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^) + $(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) --stringparam paper.type USletter -o $@ $(wordlist 1,2,$^) %.pdf: %.fo $(FOP) -fo $< -pdf $@ @@ -209,7 +175,7 @@ postgres.pdf: ## epub: postgres.epub -postgres.epub: postgres.xml +postgres.epub: postgres.sgml $(ALLSGML) $(XMLLINT) --noout --valid $< $(DBTOEPUB) $< @@ -222,7 +188,8 @@ DB2X_TEXIXML = db2x_texixml DB2X_XSLTPROC = db2x_xsltproc MAKEINFO = makeinfo -%.texixml: %.xml +%.texixml: %.sgml $(ALLSGML) + $(XMLLINT) --noout --valid $< $(DB2X_XSLTPROC) -s texi -g output-file=$(basename $@) $< -o $@ %.texi: %.texixml @@ -238,7 +205,7 @@ MAKEINFO = makeinfo # Quick syntax check without style processing check: postgres.sgml $(ALLSGML) check-tabs - $(NSGMLS) $(SPFLAGS) $(SGMLINCLUDE) -s $< + $(XMLLINT) $(XMLINCLUDE) --noout --valid $< ## @@ -300,7 +267,7 @@ endif # sqlmansectnum != 7 # tabs are harmless, but it is best to avoid them in SGML files check-tabs: - @( ! grep ' ' $(wildcard $(srcdir)/*.sgml $(srcdir)/ref/*.sgml $(srcdir)/*.dsl $(srcdir)/*.xsl) ) || (echo "Tabs appear in SGML/XML files" 1>&2; exit 1) + @( ! grep ' ' $(wildcard $(srcdir)/*.sgml $(srcdir)/ref/*.sgml $(srcdir)/*.xsl) ) || (echo "Tabs appear in SGML/XML files" 1>&2; exit 1) ## ## Clean @@ -308,7 +275,7 @@ check-tabs: # This allows removing some files from the distribution tarballs while # keeping the dependencies satisfied. -.SECONDARY: postgres.xml $(GENERATED_SGML) HTML.index +.SECONDARY: $(GENERATED_SGML) .SECONDARY: INSTALL.html INSTALL.xml .SECONDARY: postgres-A4.fo postgres-US.fo @@ -322,8 +289,6 @@ clean: rm -f *.fo *.pdf # generated SGML files rm -f $(GENERATED_SGML) -# SGML->XML conversion - rm -f postgres.xml *.tmp # HTML Help rm -f htmlhelp.hhp toc.hhc index.hhk # EPUB diff --git a/doc/src/sgml/acronyms.sgml b/doc/src/sgml/acronyms.sgml index 29f85e0846..411e368a9c 100644 --- a/doc/src/sgml/acronyms.sgml +++ b/doc/src/sgml/acronyms.sgml @@ -4,8 +4,8 @@ Acronyms - This is a list of acronyms commonly used in the PostgreSQL - documentation and in discussions about PostgreSQL. + This is a list of acronyms commonly used in the PostgreSQL + documentation and in discussions about PostgreSQL. @@ -13,7 +13,7 @@ ANSI - + American National Standards Institute @@ -23,7 +23,7 @@ API - Application Programming Interface + Application Programming Interface @@ -32,7 +32,7 @@ ASCII - American Standard + American Standard Code for Information Interchange @@ -51,7 +51,7 @@ CA - Certificate Authority + Certificate Authority @@ -61,7 +61,7 @@ Classless + url="https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing">Classless Inter-Domain Routing @@ -71,7 +71,7 @@ CPAN - Comprehensive Perl Archive Network + Comprehensive Perl Archive Network @@ -81,7 +81,7 @@ Certificate + url="https://en.wikipedia.org/wiki/Certificate_revocation_list">Certificate Revocation List @@ -92,7 +92,7 @@ Comma + url="https://en.wikipedia.org/wiki/Comma-separated_values">Comma Separated Values @@ -121,7 +121,7 @@ Database + url="https://en.wikipedia.org/wiki/Database_administrator">Database Administrator @@ -131,7 +131,7 @@ DBI - Database Interface (Perl) + Database Interface (Perl) @@ -140,7 +140,7 @@ DBMS - Database Management + Database Management System @@ -151,9 +151,9 @@ Data + url="https://en.wikipedia.org/wiki/Data_Definition_Language">Data Definition Language, SQL commands such as CREATE - TABLE, ALTER USER + TABLE, ALTER USER @@ -163,9 +163,9 @@ Data - Manipulation Language, SQL commands such as INSERT, - UPDATE, DELETE + url="https://en.wikipedia.org/wiki/Data_Manipulation_Language">Data + Manipulation Language, SQL commands such as INSERT, + UPDATE, DELETE @@ -175,7 +175,7 @@ Daylight + url="https://en.wikipedia.org/wiki/Daylight_saving_time">Daylight Saving Time @@ -194,7 +194,7 @@ ESQL - Embedded + Embedded SQL @@ -204,7 +204,7 @@ FAQ - Frequently Asked + Frequently Asked Questions @@ -232,7 +232,7 @@ GIN - Generalized Inverted Index + Generalized Inverted Index @@ -241,7 +241,7 @@ GiST - Generalized Search Tree + Generalized Search Tree @@ -251,7 +251,7 @@ Git + url="https://en.wikipedia.org/wiki/Git_(software)">Git @@ -260,7 +260,7 @@ GMT - Greenwich Mean Time + Greenwich Mean Time @@ -270,7 +270,7 @@ Generic + url="https://en.wikipedia.org/wiki/Generic_Security_Services_Application_Program_Interface">Generic Security Services Application Programming Interface @@ -281,7 +281,7 @@ Grand Unified Configuration, - the PostgreSQL subsystem that handles server configuration + the PostgreSQL subsystem that handles server configuration @@ -300,7 +300,7 @@ Heap-Only + url="https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/backend/access/heap/README.HOT;hb=HEAD">Heap-Only Tuples @@ -311,7 +311,7 @@ International + url="https://en.wikipedia.org/wiki/International_Electrotechnical_Commission">International Electrotechnical Commission @@ -332,7 +332,7 @@ Inter-Process + url="https://en.wikipedia.org/wiki/Inter-process_communication">Inter-Process Communication @@ -342,7 +342,7 @@ ISO - International Organization for + International Organization for Standardization @@ -352,7 +352,7 @@ ISSN - International Standard + International Standard Serial Number @@ -363,18 +363,38 @@ Java + url="https://en.wikipedia.org/wiki/Java_Database_Connectivity">Java Database Connectivity + + JIT + + + Just-in-Time + compilation + + + + + + JSON + + + JavaScript Object Notation + + + + LDAP Lightweight + url="https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol">Lightweight Directory Access Protocol @@ -384,7 +404,7 @@ LSN - Log Sequence Number, see pg_lsn + Log Sequence Number, see pg_lsn and WAL Internals. @@ -395,7 +415,7 @@ Microsoft + url="https://en.wikipedia.org/wiki/Visual_C++">Microsoft Visual C @@ -415,7 +435,7 @@ National + url="https://en.wikipedia.org/wiki/Internationalization_and_localization">National Language Support @@ -426,7 +446,7 @@ Open + url="https://en.wikipedia.org/wiki/Open_Database_Connectivity">Open Database Connectivity @@ -445,7 +465,7 @@ OLAP - Online Analytical + Online Analytical Processing @@ -455,7 +475,7 @@ OLTP - Online Transaction + Online Transaction Processing @@ -465,7 +485,7 @@ ORDBMS - Object-Relational + Object-Relational Database Management System @@ -476,7 +496,7 @@ Pluggable + url="https://en.wikipedia.org/wiki/Pluggable_Authentication_Modules">Pluggable Authentication Modules @@ -486,7 +506,7 @@ PGSQL - PostgreSQL + PostgreSQL @@ -495,7 +515,7 @@ PGXS - PostgreSQL Extension System + PostgreSQL Extension System @@ -504,7 +524,7 @@ PID - Process Identifier + Process Identifier @@ -532,7 +552,7 @@ POSIX - Portable Operating + Portable Operating System Interface @@ -543,7 +563,7 @@ Relational + url="https://en.wikipedia.org/wiki/Relational_database_management_system">Relational Database Management System @@ -554,7 +574,7 @@ Request For + url="https://en.wikipedia.org/wiki/Request_for_Comments">Request For Comments @@ -564,7 +584,7 @@ SGML - Standard Generalized + Standard Generalized Markup Language @@ -583,7 +603,7 @@ SP-GiST - Space-Partitioned Generalized Search Tree + Space-Partitioned Generalized Search Tree @@ -592,7 +612,7 @@ SQL - Structured Query Language + Structured Query Language @@ -610,7 +630,7 @@ SSH - Secure + Secure Shell @@ -620,7 +640,7 @@ SSL - Secure Sockets Layer + Secure Sockets Layer @@ -629,7 +649,7 @@ SSPI - Security + Security Support Provider Interface @@ -639,7 +659,7 @@ SYSV - Unix System V + Unix System V @@ -649,7 +669,7 @@ Transmission + url="https://en.wikipedia.org/wiki/Transmission_Control_Protocol">Transmission Control Protocol (TCP) / Internet Protocol (IP) @@ -687,7 +707,7 @@ URL - Uniform Resource + Uniform Resource Locator @@ -698,7 +718,7 @@ Coordinated + url="https://en.wikipedia.org/wiki/Coordinated_Universal_Time">Coordinated Universal Time @@ -718,7 +738,7 @@ UTF8 - Eight-Bit Unicode + Eight-Bit Unicode Transformation Format @@ -755,7 +775,7 @@ XML - Extensible Markup + Extensible Markup Language diff --git a/doc/src/sgml/adminpack.sgml b/doc/src/sgml/adminpack.sgml index fddf90c4a5..2655417366 100644 --- a/doc/src/sgml/adminpack.sgml +++ b/doc/src/sgml/adminpack.sgml @@ -8,24 +8,27 @@ - adminpack provides a number of support functions which - pgAdmin and other administration and management tools can + adminpack provides a number of support functions which + pgAdmin and other administration and management tools can use to provide additional functionality, such as remote management of server log files. - Use of all these functions is restricted to superusers. + Use of all these functions is only allowed to the superuser by default but may be + allowed to other users by using the GRANT command. - The functions shown in provide + The functions shown in provide write access to files on the machine hosting the server. (See also the - functions in , which + functions in , which provide read-only access.) - Only files within the database cluster directory can be accessed, but - either a relative or absolute path is allowable. + Only files within the database cluster directory can be accessed, unless the + user is a superuser or given one of the pg_read_server_files, or pg_write_server_files + roles, as appropriate for the function, but either a relative or absolute path is + allowable. - <filename>adminpack</> Functions + <filename>adminpack</filename> Functions Name Return Type Description @@ -58,7 +61,7 @@ pg_catalog.pg_logdir_ls() setof record - List the log files in the log_directory directory + List the log files in the log_directory directory @@ -69,9 +72,9 @@ pg_file_write - pg_file_write writes the specified data into - the file named by filename. If append is - false, the file must not already exist. If append is true, + pg_file_write writes the specified data into + the file named by filename. If append is + false, the file must not already exist. If append is true, the file can already exist, and will be appended to if so. Returns the number of bytes written. @@ -80,15 +83,15 @@ pg_file_rename - pg_file_rename renames a file. If archivename - is omitted or NULL, it simply renames oldname - to newname (which must not already exist). - If archivename is provided, it first - renames newname to archivename (which must - not already exist), and then renames oldname - to newname. In event of failure of the second rename step, - it will try to rename archivename back - to newname before reporting the error. + pg_file_rename renames a file. If archivename + is omitted or NULL, it simply renames oldname + to newname (which must not already exist). + If archivename is provided, it first + renames newname to archivename (which must + not already exist), and then renames oldname + to newname. In event of failure of the second rename step, + it will try to rename archivename back + to newname before reporting the error. Returns true on success, false if the source file(s) are not present or not writable; other cases throw errors. @@ -97,66 +100,20 @@ pg_file_unlink - pg_file_unlink removes the specified file. + pg_file_unlink removes the specified file. Returns true on success, false if the specified file is not present - or the unlink() call fails; other cases throw errors. + or the unlink() call fails; other cases throw errors. pg_logdir_ls - pg_logdir_ls returns the start timestamps and path - names of all the log files in the - directory. The parameter must have its - default setting (postgresql-%Y-%m-%d_%H%M%S.log) to use this + pg_logdir_ls returns the start timestamps and path + names of all the log files in the + directory. The parameter must have its + default setting (postgresql-%Y-%m-%d_%H%M%S.log) to use this function. - - The functions shown - in are deprecated - and should not be used in new applications; instead use those shown - in - and . These functions are - provided in adminpack only for compatibility with old - versions of pgAdmin. - - -
- Deprecated <filename>adminpack</> Functions - - - Name Return Type Description - - - - - - pg_catalog.pg_file_read(filename text, offset bigint, nbytes bigint) - text - - Alternate name for pg_read_file() - - - - pg_catalog.pg_file_length(filename text) - bigint - - Same as size column returned - by pg_stat_file() - - - - pg_catalog.pg_logfile_rotate() - integer - - Alternate name for pg_rotate_logfile(), but note that it - returns integer 0 or 1 rather than boolean - - - - -
- diff --git a/doc/src/sgml/advanced.sgml b/doc/src/sgml/advanced.sgml index f47c01987b..ae5f3fac75 100644 --- a/doc/src/sgml/advanced.sgml +++ b/doc/src/sgml/advanced.sgml @@ -18,12 +18,12 @@ This chapter will on occasion refer to examples found in to change or improve them, so it will be + linkend="tutorial-sql"/> to change or improve them, so it will be useful to have read that chapter. Some examples from this chapter can also be found in advanced.sql in the tutorial directory. This file also contains some sample data to load, which is not - repeated here. (Refer to for + repeated here. (Refer to for how to use the file.) @@ -37,7 +37,7 @@ - Refer back to the queries in . + Refer back to the queries in . Suppose the combined listing of weather records and city location is of particular interest to your application, but you do not want to type the query each time you need it. You can create a @@ -82,7 +82,7 @@ SELECT * FROM myview; Recall the weather and cities tables from . Consider the following problem: You + linkend="tutorial-sql"/>. Consider the following problem: You want to make sure that no one can insert rows in the weather table that do not have a matching entry in the cities table. This is called @@ -129,7 +129,7 @@ DETAIL: Key (city)=(Berkeley) is not present in table "cities". The behavior of foreign keys can be finely tuned to your application. We will not go beyond this simple example in this - tutorial, but just refer you to + tutorial, but just refer you to for more information. Making correct use of foreign keys will definitely improve the quality of your database applications, so you are strongly encouraged to learn about them. @@ -145,7 +145,7 @@ DETAIL: Key (city)=(Berkeley) is not present in table "cities". - Transactions are a fundamental concept of all database + Transactions are a fundamental concept of all database systems. The essential point of a transaction is that it bundles multiple steps into a single, all-or-nothing operation. The intermediate states between the steps are not visible to other concurrent transactions, @@ -182,8 +182,8 @@ UPDATE branches SET balance = balance + 100.00 remain a happy customer if she was debited without Bob being credited. We need a guarantee that if something goes wrong partway through the operation, none of the steps executed so far will take effect. Grouping - the updates into a transaction gives us this guarantee. - A transaction is said to be atomic: from the point of + the updates into a transaction gives us this guarantee. + A transaction is said to be atomic: from the point of view of other transactions, it either happens completely or not at all. @@ -216,9 +216,9 @@ UPDATE branches SET balance = balance + 100.00 - In PostgreSQL, a transaction is set up by surrounding + In PostgreSQL, a transaction is set up by surrounding the SQL commands of the transaction with - BEGIN and COMMIT commands. So our banking + BEGIN and COMMIT commands. So our banking transaction would actually look like: @@ -233,23 +233,23 @@ COMMIT; If, partway through the transaction, we decide we do not want to commit (perhaps we just noticed that Alice's balance went negative), - we can issue the command ROLLBACK instead of - COMMIT, and all our updates so far will be canceled. + we can issue the command ROLLBACK instead of + COMMIT, and all our updates so far will be canceled. - PostgreSQL actually treats every SQL statement as being - executed within a transaction. If you do not issue a BEGIN + PostgreSQL actually treats every SQL statement as being + executed within a transaction. If you do not issue a BEGIN command, - then each individual statement has an implicit BEGIN and - (if successful) COMMIT wrapped around it. A group of - statements surrounded by BEGIN and COMMIT - is sometimes called a transaction block. + then each individual statement has an implicit BEGIN and + (if successful) COMMIT wrapped around it. A group of + statements surrounded by BEGIN and COMMIT + is sometimes called a transaction block. - Some client libraries issue BEGIN and COMMIT + Some client libraries issue BEGIN and COMMIT commands automatically, so that you might get the effect of transaction blocks without asking. Check the documentation for the interface you are using. @@ -258,11 +258,11 @@ COMMIT; It's possible to control the statements in a transaction in a more - granular fashion through the use of savepoints. Savepoints + granular fashion through the use of savepoints. Savepoints allow you to selectively discard parts of the transaction, while committing the rest. After defining a savepoint with - SAVEPOINT, you can if needed roll back to the savepoint - with ROLLBACK TO. All the transaction's database changes + SAVEPOINT, you can if needed roll back to the savepoint + with ROLLBACK TO. All the transaction's database changes between defining the savepoint and rolling back to it are discarded, but changes earlier than the savepoint are kept. @@ -308,7 +308,7 @@ COMMIT; This example is, of course, oversimplified, but there's a lot of control possible in a transaction block through the use of savepoints. - Moreover, ROLLBACK TO is the only way to regain control of a + Moreover, ROLLBACK TO is the only way to regain control of a transaction block that was put in aborted state by the system due to an error, short of rolling it back completely and starting again. @@ -325,7 +325,7 @@ COMMIT; - A window function performs a calculation across a set of + A window function performs a calculation across a set of table rows that are somehow related to the current row. This is comparable to the type of calculation that can be done with an aggregate function. However, window functions do not cause rows to become grouped into a single @@ -360,31 +360,31 @@ SELECT depname, empno, salary, avg(salary) OVER (PARTITION BY depname) FROM emps The first three output columns come directly from the table - empsalary, and there is one output row for each row in the + empsalary, and there is one output row for each row in the table. The fourth column represents an average taken across all the table - rows that have the same depname value as the current row. - (This actually is the same function as the non-window avg - aggregate, but the OVER clause causes it to be + rows that have the same depname value as the current row. + (This actually is the same function as the non-window avg + aggregate, but the OVER clause causes it to be treated as a window function and computed across the window frame.) - A window function call always contains an OVER clause + A window function call always contains an OVER clause directly following the window function's name and argument(s). This is what syntactically distinguishes it from a normal function or non-window - aggregate. The OVER clause determines exactly how the + aggregate. The OVER clause determines exactly how the rows of the query are split up for processing by the window function. - The PARTITION BY clause within OVER + The PARTITION BY clause within OVER divides the rows into groups, or partitions, that share the same - values of the PARTITION BY expression(s). For each row, + values of the PARTITION BY expression(s). For each row, the window function is computed across the rows that fall into the same partition as the current row. You can also control the order in which rows are processed by - window functions using ORDER BY within OVER. - (The window ORDER BY does not even have to match the + window functions using ORDER BY within OVER. + (The window ORDER BY does not even have to match the order in which the rows are output.) Here is an example: @@ -409,48 +409,48 @@ FROM empsalary; (10 rows) - As shown here, the rank function produces a numerical rank - for each distinct ORDER BY value in the current row's - partition, using the order defined by the ORDER BY clause. - rank needs no explicit parameter, because its behavior - is entirely determined by the OVER clause. + As shown here, the rank function produces a numerical rank + for each distinct ORDER BY value in the current row's + partition, using the order defined by the ORDER BY clause. + rank needs no explicit parameter, because its behavior + is entirely determined by the OVER clause. The rows considered by a window function are those of the virtual - table produced by the query's FROM clause as filtered by its - WHERE, GROUP BY, and HAVING clauses + table produced by the query's FROM clause as filtered by its + WHERE, GROUP BY, and HAVING clauses if any. For example, a row removed because it does not meet the - WHERE condition is not seen by any window function. + WHERE condition is not seen by any window function. A query can contain multiple window functions that slice up the data - in different ways using different OVER clauses, but + in different ways using different OVER clauses, but they all act on the same collection of rows defined by this virtual table. - We already saw that ORDER BY can be omitted if the ordering + We already saw that ORDER BY can be omitted if the ordering of rows is not important. It is also possible to omit PARTITION - BY, in which case there is a single partition containing all rows. + BY, in which case there is a single partition containing all rows. There is another important concept associated with window functions: for each row, there is a set of rows within its partition called its - window frame. Some window functions act only + window frame. Some window functions act only on the rows of the window frame, rather than of the whole partition. - By default, if ORDER BY is supplied then the frame consists of + By default, if ORDER BY is supplied then the frame consists of all rows from the start of the partition up through the current row, plus any following rows that are equal to the current row according to the - ORDER BY clause. When ORDER BY is omitted the + ORDER BY clause. When ORDER BY is omitted the default frame consists of all rows in the partition. There are options to define the window frame in other ways, but this tutorial does not cover them. See - for details. + for details. - Here is an example using sum: + Here is an example using sum: @@ -474,11 +474,11 @@ SELECT salary, sum(salary) OVER () FROM empsalary; - Above, since there is no ORDER BY in the OVER + Above, since there is no ORDER BY in the OVER clause, the window frame is the same as the partition, which for lack of - PARTITION BY is the whole table; in other words each sum is + PARTITION BY is the whole table; in other words each sum is taken over the whole table and so we get the same result for each output - row. But if we add an ORDER BY clause, we get very different + row. But if we add an ORDER BY clause, we get very different results: @@ -510,8 +510,8 @@ SELECT salary, sum(salary) OVER (ORDER BY salary) FROM empsalary; Window functions are permitted only in the SELECT list - and the ORDER BY clause of the query. They are forbidden - elsewhere, such as in GROUP BY, HAVING + and the ORDER BY clause of the query. They are forbidden + elsewhere, such as in GROUP BY, HAVING and WHERE clauses. This is because they logically execute after the processing of those clauses. Also, window functions execute after non-window aggregate functions. This means it is valid to @@ -534,15 +534,15 @@ WHERE pos < 3; The above query only shows the rows from the inner query having - rank less than 3. + rank less than 3. When a query involves multiple window functions, it is possible to write - out each one with a separate OVER clause, but this is + out each one with a separate OVER clause, but this is duplicative and error-prone if the same windowing behavior is wanted for several functions. Instead, each windowing behavior can be named - in a WINDOW clause and then referenced in OVER. + in a WINDOW clause and then referenced in OVER. For example: @@ -554,10 +554,10 @@ SELECT sum(salary) OVER w, avg(salary) OVER w More details about window functions can be found in - , - , - , and the - reference page. + , + , + , and the + reference page. @@ -623,13 +623,13 @@ CREATE TABLE capitals ( In this case, a row of capitals - inherits all columns (name, - population, and altitude) from its + inherits all columns (name, + population, and altitude) from its parent, cities. The type of the column name is text, a native PostgreSQL type for variable length character strings. State capitals have - an extra column, state, that shows their state. In + an extra column, state, that shows their state. In PostgreSQL, a table can inherit from zero or more other tables. @@ -692,7 +692,7 @@ SELECT name, altitude Although inheritance is frequently useful, it has not been integrated with unique constraints or foreign keys, which limits its usefulness. - See for more detail. + See for more detail. diff --git a/doc/src/sgml/amcheck.sgml b/doc/src/sgml/amcheck.sgml index dd71dbd679..8bb60d5c2d 100644 --- a/doc/src/sgml/amcheck.sgml +++ b/doc/src/sgml/amcheck.sgml @@ -8,19 +8,19 @@ - The amcheck module provides functions that allow you to - verify the logical consistency of the structure of indexes. If the + The amcheck module provides functions that allow you to + verify the logical consistency of the structure of relations. If the structure appears to be valid, no error is raised. - The functions verify various invariants in the - structure of the representation of particular indexes. The + The functions verify various invariants in the + structure of the representation of particular relations. The correctness of the access method functions behind index scans and other important operations relies on these invariants always holding. For example, certain functions verify, among other things, - that all B-Tree pages have items in logical order (e.g., - for B-Tree indexes on text, index tuples should be in + that all B-Tree pages have items in logical order (e.g., + for B-Tree indexes on text, index tuples should be in collated lexical order). If that particular invariant somehow fails to hold, we can expect binary searches on the affected page to incorrectly guide index scans, resulting in wrong answers to SQL @@ -31,11 +31,11 @@ index scans themselves, which may be user-defined operator class code. For example, B-Tree index verification relies on comparisons made with one or more B-Tree support function 1 routines. See for details of operator class support + linkend="xindex-support"/> for details of operator class support functions. - amcheck functions may be used only by superusers. + amcheck functions may only be used by superusers. @@ -44,7 +44,7 @@ - bt_index_check(index regclass) returns void + bt_index_check(index regclass, heapallindexed boolean) returns void bt_index_check @@ -55,7 +55,9 @@ bt_index_check tests that its target, a B-Tree index, respects a variety of invariants. Example usage: -test=# SELECT bt_index_check(c.oid), c.relname, c.relpages +test=# SELECT bt_index_check(index => c.oid, heapallindexed => i.indisunique), + c.relname, + c.relpages FROM pg_index i JOIN pg_opclass op ON i.indclass[0] = op.oid JOIN pg_am am ON op.opcmethod = am.oid @@ -65,7 +67,7 @@ WHERE am.amname = 'btree' AND n.nspname = 'pg_catalog' -- Don't check temp tables, which may be from another session: AND c.relpersistence != 't' -- Function may throw an error when this is omitted: -AND i.indisready AND i.indisvalid +AND c.relkind = 'i' AND i.indisready AND i.indisvalid ORDER BY c.relpages DESC LIMIT 10; bt_index_check | relname | relpages ----------------+---------------------------------+---------- @@ -81,24 +83,26 @@ ORDER BY c.relpages DESC LIMIT 10; | pg_amop_fam_strat_index | 5 (10 rows) - This example shows a session that performs verification of every - catalog index in the database test. Details of just - the 10 largest indexes verified are displayed. Since no error - is raised, all indexes tested appear to be logically consistent. - Naturally, this query could easily be changed to call - bt_index_check for every index in the + This example shows a session that performs verification of the + 10 largest catalog indexes in the database test. + Verification of the presence of heap tuples as index tuples is + requested for the subset that are unique indexes. Since no + error is raised, all indexes tested appear to be logically + consistent. Naturally, this query could easily be changed to + call bt_index_check for every index in the database where verification is supported. - bt_index_check acquires an AccessShareLock + bt_index_check acquires an AccessShareLock on the target index and the heap relation it belongs to. This lock mode is the same lock mode acquired on relations by simple - SELECT statements. + SELECT statements. bt_index_check does not verify invariants - that span child/parent relationships, nor does it verify that - the target index is consistent with its heap relation. When a - routine, lightweight test for corruption is required in a live - production environment, using + that span child/parent relationships, but will verify the + presence of all heap tuples as index tuples within the index + when heapallindexed is + true. When a routine, lightweight test for + corruption is required in a live production environment, using bt_index_check often provides the best trade-off between thoroughness of verification and limiting the impact on application performance and availability. @@ -108,7 +112,7 @@ ORDER BY c.relpages DESC LIMIT 10; - bt_index_parent_check(index regclass) returns void + bt_index_parent_check(index regclass, heapallindexed boolean) returns void bt_index_parent_check @@ -117,28 +121,31 @@ ORDER BY c.relpages DESC LIMIT 10; bt_index_parent_check tests that its - target, a B-Tree index, respects a variety of invariants. The - checks performed by bt_index_parent_check - are a superset of the checks performed by - bt_index_check. + target, a B-Tree index, respects a variety of invariants. + Optionally, when the heapallindexed + argument is true, the function verifies the + presence of all heap tuples that should be found within the + index, and that there are no missing downlinks in the index + structure. The checks that can be performed by + bt_index_parent_check are a superset of the + checks that can be performed by bt_index_check. bt_index_parent_check can be thought of as a more thorough variant of bt_index_check: unlike bt_index_check, bt_index_parent_check also checks - invariants that span parent/child relationships. However, it - does not verify that the target index is consistent with its - heap relation. bt_index_parent_check - follows the general convention of raising an error if it finds a - logical inconsistency or other problem. + invariants that span parent/child relationships. + bt_index_parent_check follows the general + convention of raising an error if it finds a logical + inconsistency or other problem. - A ShareLock is required on the target index by + A ShareLock is required on the target index by bt_index_parent_check (a - ShareLock is also acquired on the heap relation). + ShareLock is also acquired on the heap relation). These locks prevent concurrent data modification from - INSERT, UPDATE, and DELETE + INSERT, UPDATE, and DELETE commands. The locks also prevent the underlying relation from - being concurrently processed by VACUUM, as well as + being concurrently processed by VACUUM, as well as all other utility commands. Note that the function holds locks only while running, not for the entire transaction. @@ -159,13 +166,54 @@ ORDER BY c.relpages DESC LIMIT 10; - Using <filename>amcheck</> effectively + Optional <parameter>heapallindexed</parameter> verification + + When the heapallindexed argument to + verification functions is true, an additional + phase of verification is performed against the table associated with + the target index relation. This consists of a dummy + CREATE INDEX operation, which checks for the + presence of all hypothetical new index tuples against a temporary, + in-memory summarizing structure (this is built when needed during + the basic first phase of verification). The summarizing structure + fingerprints every tuple found within the target + index. The high level principle behind + heapallindexed verification is that a new + index that is equivalent to the existing, target index must only + have entries that can be found in the existing structure. + + + The additional heapallindexed phase adds + significant overhead: verification will typically take several times + longer. However, there is no change to the relation-level locks + acquired when heapallindexed verification is + performed. + + + The summarizing structure is bound in size by + maintenance_work_mem. In order to ensure that + there is no more than a 2% probability of failure to detect an + inconsistency for each heap tuple that should be represented in the + index, approximately 2 bytes of memory are needed per tuple. As + less memory is made available per tuple, the probability of missing + an inconsistency slowly increases. This approach limits the + overhead of verification significantly, while only slightly reducing + the probability of detecting a problem, especially for installations + where verification is treated as a routine maintenance task. Any + single absent or malformed tuple has a new opportunity to be + detected with each new verification attempt. + + + + + + Using <filename>amcheck</filename> effectively - amcheck can be effective at detecting various types of + amcheck can be effective at detecting various types of failure modes that data page - checksums will always fail to catch. These include: + checksums will always fail to catch. These include: @@ -176,13 +224,13 @@ ORDER BY c.relpages DESC LIMIT 10; This includes issues caused by the comparison rules of operating system collations changing. Comparisons of datums of a collatable - type like text must be immutable (just as all + type like text must be immutable (just as all comparisons used for B-Tree index scans must be immutable), which implies that operating system collation rules must never change. Though rare, updates to operating system collation rules can cause these issues. More commonly, an inconsistency in the collation order between a master server and a standby server is - implicated, possibly because the major operating + implicated, possibly because the major operating system version in use is inconsistent. Such inconsistencies will generally only arise on standby servers, and so can generally only be detected on standby servers. @@ -190,27 +238,40 @@ ORDER BY c.relpages DESC LIMIT 10; If a problem like this arises, it may not affect each individual index that is ordered using an affected collation, simply because - indexed values might happen to have the same + indexed values might happen to have the same absolute ordering regardless of the behavioral inconsistency. See - and for - further details about how PostgreSQL uses + and for + further details about how PostgreSQL uses operating system locales and collations. + + + Structural inconsistencies between indexes and the heap relations + that are indexed (when heapallindexed + verification is performed). + + + There is no cross-checking of indexes against their heap relation + during normal operation. Symptoms of heap corruption can be subtle. + + Corruption caused by hypothetical undiscovered bugs in the - underlying PostgreSQL access method code or sort - code. + underlying PostgreSQL access method + code, sort code, or transaction management code. Automatic verification of the structural integrity of indexes plays a role in the general testing of new or proposed - PostgreSQL features that could plausibly allow a - logical inconsistency to be introduced. One obvious testing - strategy is to call amcheck functions continuously + PostgreSQL features that could plausibly allow a + logical inconsistency to be introduced. Verification of table + structure and associated visibility and transaction status + information plays a similar role. One obvious testing strategy + is to call amcheck functions continuously when running the standard regression tests. See for details on running the tests. + linkend="regress-run"/> for details on running the tests. @@ -219,22 +280,21 @@ ORDER BY c.relpages DESC LIMIT 10; simply not be enabled. - Note that amcheck examines a page as represented in some + Note that amcheck examines a page as represented in some shared memory buffer at the time of verification if there is only a shared buffer hit when accessing the block. Consequently, - amcheck does not necessarily examine data read from the + amcheck does not necessarily examine data read from the file system at the time of verification. Note that when checksums are - enabled, amcheck may raise an error due to a checksum + enabled, amcheck may raise an error due to a checksum failure when a corrupt block is read into a buffer. - Corruption caused by faulty RAM, and the broader memory subsystem - and operating system. + Corruption caused by faulty RAM, or the broader memory subsystem. - PostgreSQL does not protect against correctable + PostgreSQL does not protect against correctable memory errors and it is assumed you will operate using RAM that uses industry standard Error Correcting Codes (ECC) or better protection. However, ECC memory is typically only immune to @@ -242,9 +302,15 @@ ORDER BY c.relpages DESC LIMIT 10; absolute protection against failures that result in memory corruption. + + When heapallindexed verification is + performed, there is generally a greatly increased chance of + detecting single-bit errors, since strict binary equality is + tested, and the indexed attributes within the heap are tested. + - In general, amcheck can only prove the presence of + In general, amcheck can only prove the presence of corruption; it cannot prove its absence. @@ -252,19 +318,18 @@ ORDER BY c.relpages DESC LIMIT 10; Repairing corruption - No error concerning corruption raised by amcheck should - ever be a false positive. In practice, amcheck is more - likely to find software bugs than problems with hardware. - amcheck raises errors in the event of conditions that, - by definition, should never happen, and so careful analysis of - amcheck errors is often required. + No error concerning corruption raised by amcheck should + ever be a false positive. amcheck raises + errors in the event of conditions that, by definition, should never + happen, and so careful analysis of amcheck + errors is often required. There is no general method of repairing problems that - amcheck detects. An explanation for the root cause of + amcheck detects. An explanation for the root cause of an invariant violation should be sought. may play a useful role in diagnosing - corruption that amcheck detects. A REINDEX + linkend="pageinspect"/> may play a useful role in diagnosing + corruption that amcheck detects. A REINDEX may not be effective in repairing corruption. diff --git a/doc/src/sgml/arch-dev.sgml b/doc/src/sgml/arch-dev.sgml index c835e87215..53f8049df3 100644 --- a/doc/src/sgml/arch-dev.sgml +++ b/doc/src/sgml/arch-dev.sgml @@ -7,7 +7,7 @@ Author This chapter originated as part of - , Stefan Simkovics' + , Stefan Simkovics' Master's Thesis prepared at Vienna University of Technology under the direction of O.Univ.Prof.Dr. Georg Gottlob and Univ.Ass. Mag. Katrin Seyr. @@ -118,7 +118,7 @@ PostgreSQL is implemented using a - simple process per user client/server model. In this model + simple process per user client/server model. In this model there is one client process connected to exactly one server process. As we do not know ahead of time how many connections will be made, we have to @@ -136,10 +136,10 @@ The client process can be any program that understands the PostgreSQL protocol described in - . Many clients are based on the - C-language library libpq, but several independent + . Many clients are based on the + C-language library libpq, but several independent implementations of the protocol exist, such as the Java - JDBC driver. + JDBC driver. @@ -184,8 +184,8 @@ text) for valid syntax. If the syntax is correct a parse tree is built up and handed back; otherwise an error is returned. The parser and lexer are - implemented using the well-known Unix tools bison - and flex. + implemented using the well-known Unix tools bison + and flex. @@ -251,7 +251,7 @@ back by the parser as input and does the semantic interpretation needed to understand which tables, functions, and operators are referenced by the query. The data structure that is built to represent this - information is called the query tree. + information is called the query tree. @@ -259,10 +259,10 @@ system catalog lookups can only be done within a transaction, and we do not wish to start a transaction immediately upon receiving a query string. The raw parsing stage is sufficient to identify the transaction - control commands (BEGIN, ROLLBACK, etc), and + control commands (BEGIN, ROLLBACK, etc), and these can then be correctly executed without any further analysis. Once we know that we are dealing with an actual query (such as - SELECT or UPDATE), it is okay to + SELECT or UPDATE), it is okay to start a transaction if we're not already in one. Only then can the transformation process be invoked. @@ -270,10 +270,10 @@ The query tree created by the transformation process is structurally similar to the raw parse tree in most places, but it has many differences - in detail. For example, a FuncCall node in the + in detail. For example, a FuncCall node in the parse tree represents something that looks syntactically like a function - call. This might be transformed to either a FuncExpr - or Aggref node depending on whether the referenced + call. This might be transformed to either a FuncExpr + or Aggref node depending on whether the referenced name turns out to be an ordinary function or an aggregate function. Also, information about the actual data types of columns and expression results is added to the query tree. @@ -317,7 +317,7 @@ The query rewriter is discussed in some detail in - , so there is no need to cover it here. + , so there is no need to cover it here. We will only point out that both the input and the output of the rewriter are query trees, that is, there is no change in the representation or level of semantic detail in the trees. Rewriting @@ -347,17 +347,17 @@ involving large numbers of join operations. In order to determine a reasonable (not necessarily optimal) query plan in a reasonable amount of time, PostgreSQL uses a Genetic - Query Optimizer (see ) when the number of joins - exceeds a threshold (see ). + Query Optimizer (see ) when the number of joins + exceeds a threshold (see ). The planner's search procedure actually works with data structures - called paths, which are simply cut-down representations of + called paths, which are simply cut-down representations of plans containing only as much information as the planner needs to make its decisions. After the cheapest path is determined, a full-fledged - plan tree is built to pass to the executor. This represents + plan tree is built to pass to the executor. This represents the desired execution plan in sufficient detail for the executor to run it. In the rest of this section we'll ignore the distinction between paths and plans. @@ -378,12 +378,12 @@ relation.attribute OPR constant. If relation.attribute happens to match the key of the B-tree index and OPR is one of the operators listed in - the index's operator class, another plan is created using + the index's operator class, another plan is created using the B-tree index to scan the relation. If there are further indexes present and the restrictions in the query happen to match a key of an index, further plans will be considered. Index scan plans are also generated for indexes that have a sort ordering that can match the - query's ORDER BY clause (if any), or a sort ordering that + query's ORDER BY clause (if any), or a sort ordering that might be useful for merge joining (see below). @@ -438,7 +438,7 @@ - If the query uses fewer than + If the query uses fewer than relations, a near-exhaustive search is conducted to find the best join sequence. The planner preferentially considers joins between any two relations for which there exist a corresponding join clause in the @@ -454,7 +454,7 @@ When geqo_threshold is exceeded, the join sequences considered are determined by heuristics, as described - in . Otherwise the process is the same. + in . Otherwise the process is the same. @@ -462,9 +462,9 @@ the base relations, plus nested-loop, merge, or hash join nodes as needed, plus any auxiliary steps needed, such as sort nodes or aggregate-function calculation nodes. Most of these plan node - types have the additional ability to do selection + types have the additional ability to do selection (discarding rows that do not meet a specified Boolean condition) - and projection (computation of a derived column set + and projection (computation of a derived column set based on given column values, that is, evaluation of scalar expressions where needed). One of the responsibilities of the planner is to attach selection conditions from the @@ -496,7 +496,7 @@ subplan) is, let's say, a Sort node and again recursion is needed to obtain an input row. The child node of the Sort might - be a SeqScan node, representing actual reading of a table. + be a SeqScan node, representing actual reading of a table. Execution of this node causes the executor to fetch a row from the table and return it up to the calling node. The Sort node will repeatedly call its child to obtain all the rows to be sorted. @@ -529,24 +529,24 @@ The executor mechanism is used to evaluate all four basic SQL query types: - SELECT, INSERT, UPDATE, and - DELETE. For SELECT, the top-level executor + SELECT, INSERT, UPDATE, and + DELETE. For SELECT, the top-level executor code only needs to send each row returned by the query plan tree off - to the client. For INSERT, each returned row is inserted - into the target table specified for the INSERT. This is - done in a special top-level plan node called ModifyTable. + to the client. For INSERT, each returned row is inserted + into the target table specified for the INSERT. This is + done in a special top-level plan node called ModifyTable. (A simple - INSERT ... VALUES command creates a trivial plan tree - consisting of a single Result node, which computes just one - result row, and ModifyTable above it to perform the insertion. - But INSERT ... SELECT can demand the full power - of the executor mechanism.) For UPDATE, the planner arranges + INSERT ... VALUES command creates a trivial plan tree + consisting of a single Result node, which computes just one + result row, and ModifyTable above it to perform the insertion. + But INSERT ... SELECT can demand the full power + of the executor mechanism.) For UPDATE, the planner arranges that each computed row includes all the updated column values, plus - the TID (tuple ID, or row ID) of the original target row; - this data is fed into a ModifyTable node, which uses the + the TID (tuple ID, or row ID) of the original target row; + this data is fed into a ModifyTable node, which uses the information to create a new updated row and mark the old row deleted. - For DELETE, the only column that is actually returned by the - plan is the TID, and the ModifyTable node simply uses the TID + For DELETE, the only column that is actually returned by the + plan is the TID, and the ModifyTable node simply uses the TID to visit each target row and mark it deleted. diff --git a/doc/src/sgml/array.sgml b/doc/src/sgml/array.sgml index 58878451f0..a473fa8ee8 100644 --- a/doc/src/sgml/array.sgml +++ b/doc/src/sgml/array.sgml @@ -10,9 +10,8 @@ PostgreSQL allows columns of a table to be defined as variable-length multidimensional arrays. Arrays of any - built-in or user-defined base type, enum type, or composite type - can be created. - Arrays of domains are not yet supported. + built-in or user-defined base type, enum type, composite type, range type, + or domain can be created. @@ -33,7 +32,7 @@ CREATE TABLE sal_emp ( ); As shown, an array data type is named by appending square brackets - ([]) to the data type name of the array elements. The + ([]) to the data type name of the array elements. The above command will create a table named sal_emp with a column of type text (name), a @@ -70,7 +69,7 @@ CREATE TABLE tictactoe ( An alternative syntax, which conforms to the SQL standard by using - the keyword ARRAY, can be used for one-dimensional arrays. + the keyword ARRAY, can be used for one-dimensional arrays. pay_by_quarter could have been defined as: @@ -80,7 +79,7 @@ CREATE TABLE tictactoe ( pay_by_quarter integer ARRAY, - As before, however, PostgreSQL does not enforce the + As before, however, PostgreSQL does not enforce the size restriction in any case. @@ -108,8 +107,8 @@ CREATE TABLE tictactoe ( for the type, as recorded in its pg_type entry. Among the standard data types provided in the PostgreSQL distribution, all use a comma - (,), except for type box which uses a semicolon - (;). Each val is + (,), except for type box which uses a semicolon + (;). Each val is either a constant of the array element type, or a subarray. An example of an array constant is: @@ -120,16 +119,16 @@ CREATE TABLE tictactoe ( - To set an element of an array constant to NULL, write NULL + To set an element of an array constant to NULL, write NULL for the element value. (Any upper- or lower-case variant of - NULL will do.) If you want an actual string value - NULL, you must put double quotes around it. + NULL will do.) If you want an actual string value + NULL, you must put double quotes around it. (These kinds of array constants are actually only a special case of the generic type constants discussed in . The constant is initially + linkend="sql-syntax-constants-generic"/>. The constant is initially treated as a string and passed to the array input conversion routine. An explicit type specification might be necessary.) @@ -177,7 +176,7 @@ ERROR: multidimensional arrays must have array expressions with matching dimens - The ARRAY constructor syntax can also be used: + The ARRAY constructor syntax can also be used: INSERT INTO sal_emp VALUES ('Bill', @@ -191,9 +190,9 @@ INSERT INTO sal_emp Notice that the array elements are ordinary SQL constants or expressions; for instance, string literals are single quoted, instead of - double quoted as they would be in an array literal. The ARRAY + double quoted as they would be in an array literal. The ARRAY constructor syntax is discussed in more detail in - . + . @@ -223,8 +222,8 @@ SELECT name FROM sal_emp WHERE pay_by_quarter[1] <> pay_by_quarter[2]; The array subscript numbers are written within square brackets. By default PostgreSQL uses a one-based numbering convention for arrays, that is, - an array of n elements starts with array[1] and - ends with array[n]. + an array of n elements starts with array[1] and + ends with array[n]. @@ -260,8 +259,8 @@ SELECT schedule[1:2][1:1] FROM sal_emp WHERE name = 'Bill'; If any dimension is written as a slice, i.e., contains a colon, then all dimensions are treated as slices. Any dimension that has only a single number (no colon) is treated as being from 1 - to the number specified. For example, [2] is treated as - [1:2], as in this example: + to the number specified. For example, [2] is treated as + [1:2], as in this example: SELECT schedule[1:2][2] FROM sal_emp WHERE name = 'Bill'; @@ -273,7 +272,7 @@ SELECT schedule[1:2][2] FROM sal_emp WHERE name = 'Bill'; To avoid confusion with the non-slice case, it's best to use slice syntax - for all dimensions, e.g., [1:2][1:1], not [2][1:1]. + for all dimensions, e.g., [1:2][1:1], not [2][1:1]. @@ -303,9 +302,9 @@ SELECT schedule[:][1:1] FROM sal_emp WHERE name = 'Bill'; An array subscript expression will return null if either the array itself or any of the subscript expressions are null. Also, null is returned if a subscript is outside the array bounds (this case does not raise an error). - For example, if schedule - currently has the dimensions [1:3][1:2] then referencing - schedule[3][3] yields NULL. Similarly, an array reference + For example, if schedule + currently has the dimensions [1:3][1:2] then referencing + schedule[3][3] yields NULL. Similarly, an array reference with the wrong number of subscripts yields a null rather than an error. @@ -424,16 +423,16 @@ UPDATE sal_emp SET pay_by_quarter[1:2] = '{27000,27000}' A stored array value can be enlarged by assigning to elements not already present. Any positions between those previously present and the newly assigned elements will be filled with nulls. For example, if array - myarray currently has 4 elements, it will have six - elements after an update that assigns to myarray[6]; - myarray[5] will contain null. + myarray currently has 4 elements, it will have six + elements after an update that assigns to myarray[6]; + myarray[5] will contain null. Currently, enlargement in this fashion is only allowed for one-dimensional arrays, not multidimensional arrays. Subscripted assignment allows creation of arrays that do not use one-based - subscripts. For example one might assign to myarray[-2:7] to + subscripts. For example one might assign to myarray[-2:7] to create an array with subscript values from -2 to 7. @@ -458,8 +457,8 @@ SELECT ARRAY[5,6] || ARRAY[[1,2],[3,4]]; The concatenation operator allows a single element to be pushed onto the beginning or end of a one-dimensional array. It also accepts two - N-dimensional arrays, or an N-dimensional - and an N+1-dimensional array. + N-dimensional arrays, or an N-dimensional + and an N+1-dimensional array. @@ -502,10 +501,10 @@ SELECT array_dims(ARRAY[[1,2],[3,4]] || ARRAY[[5,6],[7,8],[9,0]]); - When an N-dimensional array is pushed onto the beginning - or end of an N+1-dimensional array, the result is - analogous to the element-array case above. Each N-dimensional - sub-array is essentially an element of the N+1-dimensional + When an N-dimensional array is pushed onto the beginning + or end of an N+1-dimensional array, the result is + analogous to the element-array case above. Each N-dimensional + sub-array is essentially an element of the N+1-dimensional array's outer dimension. For example: SELECT array_dims(ARRAY[1,2] || ARRAY[[3,4],[5,6]]); @@ -588,9 +587,9 @@ SELECT array_append(ARRAY[1, 2], NULL); -- this might have been meant The heuristic it uses to resolve the constant's type is to assume it's of the same type as the operator's other input — in this case, integer array. So the concatenation operator is presumed to - represent array_cat, not array_append. When + represent array_cat, not array_append. When that's the wrong choice, it could be fixed by casting the constant to the - array's element type; but explicit use of array_append might + array's element type; but explicit use of array_append might be a preferable solution. @@ -617,7 +616,7 @@ SELECT * FROM sal_emp WHERE pay_by_quarter[1] = 10000 OR However, this quickly becomes tedious for large arrays, and is not helpful if the size of the array is unknown. An alternative method is - described in . The above + described in . The above query could be replaced by: @@ -634,7 +633,7 @@ SELECT * FROM sal_emp WHERE 10000 = ALL (pay_by_quarter);
- Alternatively, the generate_subscripts function can be used. + Alternatively, the generate_subscripts function can be used. For example: @@ -645,26 +644,26 @@ SELECT * FROM WHERE pay_by_quarter[s] = 10000; - This function is described in . + This function is described in . - You can also search an array using the && operator, + You can also search an array using the && operator, which checks whether the left operand overlaps with the right operand. For instance: -SELECT * FROM sal_emp WHERE pay_by_quarter && ARRAY[10000]; +SELECT * FROM sal_emp WHERE pay_by_quarter && ARRAY[10000]; This and other array operators are further described in - . It can be accelerated by an appropriate - index, as described in . + . It can be accelerated by an appropriate + index, as described in . - You can also search for specific values in an array using the array_position - and array_positions functions. The former returns the subscript of + You can also search for specific values in an array using the array_position + and array_positions functions. The former returns the subscript of the first occurrence of a value in an array; the latter returns an array with the subscripts of all occurrences of the value in the array. For example: @@ -704,13 +703,13 @@ SELECT array_positions(ARRAY[1, 4, 3, 1, 3, 4, 2, 1], 1); The external text representation of an array value consists of items that are interpreted according to the I/O conversion rules for the array's element type, plus decoration that indicates the array structure. - The decoration consists of curly braces ({ and }) + The decoration consists of curly braces ({ and }) around the array value plus delimiter characters between adjacent items. - The delimiter character is usually a comma (,) but can be - something else: it is determined by the typdelim setting + The delimiter character is usually a comma (,) but can be + something else: it is determined by the typdelim setting for the array's element type. Among the standard data types provided in the PostgreSQL distribution, all use a comma, - except for type box, which uses a semicolon (;). + except for type box, which uses a semicolon (;). In a multidimensional array, each dimension (row, plane, cube, etc.) gets its own level of curly braces, and delimiters must be written between adjacent curly-braced entities of the same level. @@ -720,7 +719,7 @@ SELECT array_positions(ARRAY[1, 4, 3, 1, 3, 4, 2, 1], 1); The array output routine will put double quotes around element values if they are empty strings, contain curly braces, delimiter characters, double quotes, backslashes, or white space, or match the word - NULL. Double quotes and backslashes + NULL. Double quotes and backslashes embedded in element values will be backslash-escaped. For numeric data types it is safe to assume that double quotes will never appear, but for textual data types one should be prepared to cope with either the presence @@ -732,10 +731,10 @@ SELECT array_positions(ARRAY[1, 4, 3, 1, 3, 4, 2, 1], 1); set to one. To represent arrays with other lower bounds, the array subscript ranges can be specified explicitly before writing the array contents. - This decoration consists of square brackets ([]) + This decoration consists of square brackets ([]) around each array dimension's lower and upper bounds, with - a colon (:) delimiter character in between. The - array dimension decoration is followed by an equal sign (=). + a colon (:) delimiter character in between. The + array dimension decoration is followed by an equal sign (=). For example: SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2 @@ -751,25 +750,25 @@ SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2 - If the value written for an element is NULL (in any case + If the value written for an element is NULL (in any case variant), the element is taken to be NULL. The presence of any quotes or backslashes disables this and allows the literal string value - NULL to be entered. Also, for backward compatibility with - pre-8.2 versions of PostgreSQL, the configuration parameter can be turned - off to suppress recognition of NULL as a NULL. + NULL to be entered. Also, for backward compatibility with + pre-8.2 versions of PostgreSQL, the configuration parameter can be turned + off to suppress recognition of NULL as a NULL. As shown previously, when writing an array value you can use double - quotes around any individual array element. You must do so + quotes around any individual array element. You must do so if the element value would otherwise confuse the array-value parser. For example, elements containing curly braces, commas (or the data type's delimiter character), double quotes, backslashes, or leading or trailing whitespace must be double-quoted. Empty strings and strings matching the - word NULL must be quoted, too. To put a double quote or - backslash in a quoted array element value, use escape string syntax - and precede it with a backslash. Alternatively, you can avoid quotes and use + word NULL must be quoted, too. To put a double + quote or backslash in a quoted array element value, precede it + with a backslash. Alternatively, you can avoid quotes and use backslash-escaping to protect all data characters that would otherwise be taken as array syntax. @@ -782,33 +781,12 @@ SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2 non-whitespace characters of an element, is not ignored. - - - Remember that what you write in an SQL command will first be interpreted - as a string literal, and then as an array. This doubles the number of - backslashes you need. For example, to insert a text array - value containing a backslash and a double quote, you'd need to write: - -INSERT ... VALUES (E'{"\\\\","\\""}'); - - The escape string processor removes one level of backslashes, so that - what arrives at the array-value parser looks like {"\\","\""}. - In turn, the strings fed to the text data type's input routine - become \ and " respectively. (If we were working - with a data type whose input routine also treated backslashes specially, - bytea for example, we might need as many as eight backslashes - in the command to get one backslash into the stored array element.) - Dollar quoting (see ) can be - used to avoid the need to double backslashes. - - - - The ARRAY constructor syntax (see - ) is often easier to work + The ARRAY constructor syntax (see + ) is often easier to work with than the array-literal syntax when writing array values in SQL - commands. In ARRAY, individual element values are written the + commands. In ARRAY, individual element values are written the same way they would be written when not members of an array. diff --git a/doc/src/sgml/auth-delay.sgml b/doc/src/sgml/auth-delay.sgml index 9a6e3e9bb4..bd3ef7128d 100644 --- a/doc/src/sgml/auth-delay.sgml +++ b/doc/src/sgml/auth-delay.sgml @@ -18,7 +18,7 @@ In order to function, this module must be loaded via - in postgresql.conf. + in postgresql.conf. @@ -29,7 +29,7 @@ auth_delay.milliseconds (int) - auth_delay.milliseconds configuration parameter + auth_delay.milliseconds configuration parameter @@ -42,7 +42,7 @@ - These parameters must be set in postgresql.conf. + These parameters must be set in postgresql.conf. Typical usage might be: diff --git a/doc/src/sgml/auto-explain.sgml b/doc/src/sgml/auto-explain.sgml index 38e6f50c80..120b168d45 100644 --- a/doc/src/sgml/auto-explain.sgml +++ b/doc/src/sgml/auto-explain.sgml @@ -10,7 +10,7 @@ The auto_explain module provides a means for logging execution plans of slow statements automatically, without - having to run + having to run by hand. This is especially helpful for tracking down un-optimized queries in large applications. @@ -24,10 +24,10 @@ LOAD 'auto_explain'; (You must be superuser to do that.) More typical usage is to preload - it into some or all sessions by including auto_explain in - or - in - postgresql.conf. Then you can track unexpectedly slow queries + it into some or all sessions by including auto_explain in + or + in + postgresql.conf. Then you can track unexpectedly slow queries no matter when they happen. Of course there is a price in overhead for that. @@ -47,7 +47,7 @@ LOAD 'auto_explain'; auto_explain.log_min_duration (integer) - auto_explain.log_min_duration configuration parameter + auto_explain.log_min_duration configuration parameter @@ -66,13 +66,13 @@ LOAD 'auto_explain'; auto_explain.log_analyze (boolean) - auto_explain.log_analyze configuration parameter + auto_explain.log_analyze configuration parameter - auto_explain.log_analyze causes EXPLAIN ANALYZE - output, rather than just EXPLAIN output, to be printed + auto_explain.log_analyze causes EXPLAIN ANALYZE + output, rather than just EXPLAIN output, to be printed when an execution plan is logged. This parameter is off by default. Only superusers can change this setting. @@ -92,14 +92,14 @@ LOAD 'auto_explain'; auto_explain.log_buffers (boolean) - auto_explain.log_buffers configuration parameter + auto_explain.log_buffers configuration parameter auto_explain.log_buffers controls whether buffer usage statistics are printed when an execution plan is logged; it's - equivalent to the BUFFERS option of EXPLAIN. + equivalent to the BUFFERS option of EXPLAIN. This parameter has no effect unless auto_explain.log_analyze is enabled. This parameter is off by default. @@ -112,14 +112,14 @@ LOAD 'auto_explain'; auto_explain.log_timing (boolean) - auto_explain.log_timing configuration parameter + auto_explain.log_timing configuration parameter auto_explain.log_timing controls whether per-node timing information is printed when an execution plan is logged; it's - equivalent to the TIMING option of EXPLAIN. + equivalent to the TIMING option of EXPLAIN. The overhead of repeatedly reading the system clock can slow down queries significantly on some systems, so it may be useful to set this parameter to off when only actual row counts, and not exact times, are @@ -136,7 +136,7 @@ LOAD 'auto_explain'; auto_explain.log_triggers (boolean) - auto_explain.log_triggers configuration parameter + auto_explain.log_triggers configuration parameter @@ -155,14 +155,14 @@ LOAD 'auto_explain'; auto_explain.log_verbose (boolean) - auto_explain.log_verbose configuration parameter + auto_explain.log_verbose configuration parameter auto_explain.log_verbose controls whether verbose details are printed when an execution plan is logged; it's - equivalent to the VERBOSE option of EXPLAIN. + equivalent to the VERBOSE option of EXPLAIN. This parameter is off by default. Only superusers can change this setting. @@ -173,13 +173,13 @@ LOAD 'auto_explain'; auto_explain.log_format (enum) - auto_explain.log_format configuration parameter + auto_explain.log_format configuration parameter auto_explain.log_format selects the - EXPLAIN output format to be used. + EXPLAIN output format to be used. The allowed values are text, xml, json, and yaml. The default is text. Only superusers can change this setting. @@ -187,11 +187,32 @@ LOAD 'auto_explain'; + + + auto_explain.log_level (enum) + + auto_explain.log_level configuration parameter + + + + + auto_explain.log_level selects the log level at which + auto_explain will log the query plan. + Valid values are DEBUG5, DEBUG4, + DEBUG3, DEBUG2, + DEBUG1, INFO, + NOTICE, WARNING, + and LOG. The default is LOG. + Only superusers can change this setting. + + + + auto_explain.log_nested_statements (boolean) - auto_explain.log_nested_statements configuration parameter + auto_explain.log_nested_statements configuration parameter @@ -208,7 +229,7 @@ LOAD 'auto_explain'; auto_explain.sample_rate (real) - auto_explain.sample_rate configuration parameter + auto_explain.sample_rate configuration parameter @@ -224,7 +245,7 @@ LOAD 'auto_explain'; In ordinary usage, these parameters are set - in postgresql.conf, although superusers can alter them + in postgresql.conf, although superusers can alter them on-the-fly within their own sessions. Typical usage might be: diff --git a/doc/src/sgml/backup.sgml b/doc/src/sgml/backup.sgml index 0e7c6e2051..3fa5efdd78 100644 --- a/doc/src/sgml/backup.sgml +++ b/doc/src/sgml/backup.sgml @@ -3,10 +3,10 @@ Backup and Restore - backup + backup - As with everything that contains valuable data, PostgreSQL + As with everything that contains valuable data, PostgreSQL databases should be backed up regularly. While the procedure is essentially simple, it is important to have a clear understanding of the underlying techniques and assumptions. @@ -14,9 +14,9 @@ There are three fundamentally different approaches to backing up - PostgreSQL data: + PostgreSQL data: - SQL dump + SQL dump File system level backup Continuous archiving @@ -25,30 +25,30 @@ - <acronym>SQL</> Dump + <acronym>SQL</acronym> Dump The idea behind this dump method is to generate a file with SQL commands that, when fed back to the server, will recreate the database in the same state as it was at the time of the dump. - PostgreSQL provides the utility program - for this purpose. The basic usage of this + PostgreSQL provides the utility program + for this purpose. The basic usage of this command is: -pg_dump dbname > outfile +pg_dump dbname > dumpfile - As you see, pg_dump writes its result to the + As you see, pg_dump writes its result to the standard output. We will see below how this can be useful. - While the above command creates a text file, pg_dump + While the above command creates a text file, pg_dump can create files in other formats that allow for parallelism and more fine-grained control of object restoration. - pg_dump is a regular PostgreSQL + pg_dump is a regular PostgreSQL client application (albeit a particularly clever one). This means that you can perform this backup procedure from any remote host that has - access to the database. But remember that pg_dump + access to the database. But remember that pg_dump does not operate with special permissions. In particular, it must have read access to all tables that you want to back up, so in order to back up the entire database you almost always have to run it as a @@ -60,9 +60,9 @@ pg_dump dbname > - To specify which database server pg_dump should + To specify which database server pg_dump should contact, use the command line options ). psql + supports options similar to pg_dump for specifying the database server to connect to and the user name to use. See - the reference page for more information. + the reference page for more information. Non-text file dumps are restored using the utility. + linkend="app-pgrestore"/> utility. @@ -134,21 +134,21 @@ psql dbname < - By default, the psql script will continue to + By default, the psql script will continue to execute after an SQL error is encountered. You might wish to run psql with - the ON_ERROR_STOP variable set to alter that + the ON_ERROR_STOP variable set to alter that behavior and have psql exit with an exit status of 3 if an SQL error occurs: -psql --set ON_ERROR_STOP=on dbname < infile +psql --set ON_ERROR_STOP=on dbname < dumpfile Either way, you will only have a partially restored database. Alternatively, you can specify that the whole dump should be restored as a single transaction, so the restore is either fully completed or fully rolled back. This mode can be specified by - passing the - The ability of pg_dump and psql to + The ability of pg_dump and psql to write to or read from pipes makes it possible to dump a database directly from one server to another, for example: -pg_dump -h host1 dbname | psql -h host2 dbname +pg_dump -h host1 dbname | psql -h host2 dbname - The dumps produced by pg_dump are relative to - template0. This means that any languages, procedures, - etc. added via template1 will also be dumped by - pg_dump. As a result, when restoring, if you are - using a customized template1, you must create the - empty database from template0, as in the example + The dumps produced by pg_dump are relative to + template0. This means that any languages, procedures, + etc. added via template1 will also be dumped by + pg_dump. As a result, when restoring, if you are + using a customized template1, you must create the + empty database from template0, as in the example above. After restoring a backup, it is wise to run on each + linkend="sql-analyze"/> on each database so the query optimizer has useful statistics; - see - and for more information. + see + and for more information. For more advice on how to load large amounts of data - into PostgreSQL efficiently, refer to . + into PostgreSQL efficiently, refer to . - Using <application>pg_dumpall</> + Using <application>pg_dumpall</application> - pg_dump dumps only a single database at a time, + pg_dump dumps only a single database at a time, and it does not dump information about roles or tablespaces (because those are cluster-wide rather than per-database). To support convenient dumping of the entire contents of a database - cluster, the program is provided. - pg_dumpall backs up each database in a given + cluster, the program is provided. + pg_dumpall backs up each database in a given cluster, and also preserves cluster-wide data such as role and tablespace definitions. The basic usage of this command is: -pg_dumpall > outfile +pg_dumpall > dumpfile - The resulting dump can be restored with psql: + The resulting dump can be restored with psql: -psql -f infile postgres +psql -f dumpfile postgres (Actually, you can specify any existing database name to start from, - but if you are loading into an empty cluster then postgres + but if you are loading into an empty cluster then postgres should usually be used.) It is always necessary to have - database superuser access when restoring a pg_dumpall + database superuser access when restoring a pg_dumpall dump, as that is required to restore the role and tablespace information. If you use tablespaces, make sure that the tablespace paths in the dump are appropriate for the new installation. - pg_dumpall works by emitting commands to re-create + pg_dumpall works by emitting commands to re-create roles, tablespaces, and empty databases, then invoking - pg_dump for each database. This means that while + pg_dump for each database. This means that while each database will be internally consistent, the snapshots of different databases are not synchronized. Cluster-wide data can be dumped alone using the - pg_dumpall option. This is necessary to fully backup the cluster if running the - pg_dump command on individual databases. + pg_dump command on individual databases. @@ -237,8 +237,8 @@ psql -f infile postgres Some operating systems have maximum file size limits that cause - problems when creating large pg_dump output files. - Fortunately, pg_dump can write to the standard + problems when creating large pg_dump output files. + Fortunately, pg_dump can write to the standard output, so you can use standard Unix tools to work around this potential problem. There are several possible methods: @@ -268,7 +268,7 @@ cat filename.gz | gunzip | psql - Use <command>split</>. + Use <command>split</command>. The split command allows you to split the output into smaller files that are @@ -288,10 +288,10 @@ cat filename* | psql - Use <application>pg_dump</>'s custom dump format. + Use <application>pg_dump</application>'s custom dump format. If PostgreSQL was built on a system with the - zlib compression library installed, the custom dump + zlib compression library installed, the custom dump format will compress data as it writes it to the output file. This will produce dump file sizes similar to using gzip, but it has the added advantage that tables can be restored selectively. The @@ -301,25 +301,25 @@ cat filename* | psql dbname > filename - A custom-format dump is not a script for psql, but - instead must be restored with pg_restore, for example: + A custom-format dump is not a script for psql, but + instead must be restored with pg_restore, for example: pg_restore -d dbname filename - See the and reference pages for details. + See the and reference pages for details. - For very large databases, you might need to combine split + For very large databases, you might need to combine split with one of the other two approaches. - Use <application>pg_dump</>'s parallel dump feature. + Use <application>pg_dump</application>'s parallel dump feature. To speed up the dump of a large database, you can use pg_dump's parallel mode. This will dump @@ -344,8 +344,8 @@ pg_dump -j num -F d -f An alternative backup strategy is to directly copy the files that - PostgreSQL uses to store the data in the database; - explains where these files + PostgreSQL uses to store the data in the database; + explains where these files are located. You can use whatever method you prefer for doing file system backups; for example: @@ -356,20 +356,20 @@ tar -cf backup.tar /usr/local/pgsql/data There are two restrictions, however, which make this method - impractical, or at least inferior to the pg_dump + impractical, or at least inferior to the pg_dump method: - The database server must be shut down in order to + The database server must be shut down in order to get a usable backup. Half-way measures such as disallowing all connections will not work (in part because tar and similar tools do not take an atomic snapshot of the state of the file system, but also because of internal buffering within the server). Information about stopping the server can be found in - . Needless to say, you + . Needless to say, you also need to shut down the server before restoring the data. @@ -379,7 +379,7 @@ tar -cf backup.tar /usr/local/pgsql/data If you have dug into the details of the file system layout of the database, you might be tempted to try to back up or restore only certain individual tables or databases from their respective files or - directories. This will not work because the + directories. This will not work because the information contained in these files is not usable without the commit log files, pg_xact/*, which contain the commit status of @@ -399,7 +399,7 @@ tar -cf backup.tar /usr/local/pgsql/data consistent snapshot of the data directory, if the file system supports that functionality (and you are willing to trust that it is implemented correctly). The typical procedure is - to make a frozen snapshot of the volume containing the + to make a frozen snapshot of the volume containing the database, then copy the whole data directory (not just parts, see above) from the snapshot to a backup device, then release the frozen snapshot. This will work even while the database server is running. @@ -419,7 +419,7 @@ tar -cf backup.tar /usr/local/pgsql/data the volumes. For example, if your data files and WAL log are on different disks, or if tablespaces are on different file systems, it might not be possible to use snapshot backup because the snapshots - must be simultaneous. + must be simultaneous. Read your file system documentation very carefully before trusting the consistent-snapshot technique in such situations. @@ -428,20 +428,20 @@ tar -cf backup.tar /usr/local/pgsql/data If simultaneous snapshots are not possible, one option is to shut down the database server long enough to establish all the frozen snapshots. Another option is to perform a continuous archiving base backup () because such backups are immune to file + linkend="backup-base-backup"/>) because such backups are immune to file system changes during the backup. This requires enabling continuous archiving just during the backup process; restore is done using - continuous archive recovery (). + continuous archive recovery (). - Another option is to use rsync to perform a file - system backup. This is done by first running rsync + Another option is to use rsync to perform a file + system backup. This is done by first running rsync while the database server is running, then shutting down the database - server long enough to do an rsync --checksum. - ( @@ -517,7 +517,7 @@ tar -cf backup.tar /usr/local/pgsql/data If we continuously feed the series of WAL files to another machine that has been loaded with the same base backup file, we - have a warm standby system: at any point we can bring up + have a warm standby system: at any point we can bring up the second machine and it will have a nearly-current copy of the database. @@ -530,7 +530,7 @@ tar -cf backup.tar /usr/local/pgsql/data pg_dump and pg_dumpall do not produce file-system-level backups and cannot be used as part of a continuous-archiving solution. - Such dumps are logical and do not contain enough + Such dumps are logical and do not contain enough information to be used by WAL replay. @@ -546,10 +546,10 @@ tar -cf backup.tar /usr/local/pgsql/data To recover successfully using continuous archiving (also called - online backup by many database vendors), you need a continuous + online backup by many database vendors), you need a continuous sequence of archived WAL files that extends back at least as far as the start time of your backup. So to get started, you should set up and test - your procedure for archiving WAL files before you take your + your procedure for archiving WAL files before you take your first base backup. Accordingly, we first discuss the mechanics of archiving WAL files. @@ -558,17 +558,17 @@ tar -cf backup.tar /usr/local/pgsql/data Setting Up WAL Archiving - In an abstract sense, a running PostgreSQL system + In an abstract sense, a running PostgreSQL system produces an indefinitely long sequence of WAL records. The system physically divides this sequence into WAL segment - files, which are normally 16MB apiece (although the segment size - can be altered when building PostgreSQL). The segment + files, which are normally 16MB apiece (although the segment size + can be altered during initdb). The segment files are given numeric names that reflect their position in the abstract WAL sequence. When not using WAL archiving, the system normally creates just a few segment files and then - recycles them by renaming no-longer-needed segment files + recycles them by renaming no-longer-needed segment files to higher segment numbers. It's assumed that segment files whose - contents precede the checkpoint-before-last are no longer of + contents precede the last checkpoint are no longer of interest and can be recycled. @@ -577,33 +577,33 @@ tar -cf backup.tar /usr/local/pgsql/data file once it is filled, and save that data somewhere before the segment file is recycled for reuse. Depending on the application and the available hardware, there could be many different ways of saving - the data somewhere: we could copy the segment files to an NFS-mounted + the data somewhere: we could copy the segment files to an NFS-mounted directory on another machine, write them onto a tape drive (ensuring that you have a way of identifying the original name of each file), or batch them together and burn them onto CDs, or something else entirely. To provide the database administrator with flexibility, - PostgreSQL tries not to make any assumptions about how - the archiving will be done. Instead, PostgreSQL lets + PostgreSQL tries not to make any assumptions about how + the archiving will be done. Instead, PostgreSQL lets the administrator specify a shell command to be executed to copy a completed segment file to wherever it needs to go. The command could be - as simple as a cp, or it could invoke a complex shell + as simple as a cp, or it could invoke a complex shell script — it's all up to you. - To enable WAL archiving, set the - configuration parameter to replica or higher, - to on, + To enable WAL archiving, set the + configuration parameter to replica or higher, + to on, and specify the shell command to use in the configuration parameter. In practice + linkend="guc-archive-command"/> configuration parameter. In practice these settings will always be placed in the postgresql.conf file. - In archive_command, - %p is replaced by the path name of the file to - archive, while %f is replaced by only the file name. + In archive_command, + %p is replaced by the path name of the file to + archive, while %f is replaced by only the file name. (The path name is relative to the current working directory, i.e., the cluster's data directory.) - Use %% if you need to embed an actual % + Use %% if you need to embed an actual % character in the command. The simplest useful command is something like: @@ -611,9 +611,9 @@ archive_command = 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/ser archive_command = 'copy "%p" "C:\\server\\archivedir\\%f"' # Windows which will copy archivable WAL segments to the directory - /mnt/server/archivedir. (This is an example, not a + /mnt/server/archivedir. (This is an example, not a recommendation, and might not work on all platforms.) After the - %p and %f parameters have been replaced, + %p and %f parameters have been replaced, the actual command executed might look like this: test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/00000001000000A900000065 /mnt/server/archivedir/00000001000000A900000065 @@ -623,7 +623,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 The archive command will be executed under the ownership of the same - user that the PostgreSQL server is running as. Since + user that the PostgreSQL server is running as. Since the series of WAL files being archived contains effectively everything in your database, you will want to be sure that the archived data is protected from prying eyes; for example, archive into a directory that @@ -633,9 +633,9 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 It is important that the archive command return zero exit status if and only if it succeeds. Upon getting a zero result, - PostgreSQL will assume that the file has been + PostgreSQL will assume that the file has been successfully archived, and will remove or recycle it. However, a nonzero - status tells PostgreSQL that the file was not archived; + status tells PostgreSQL that the file was not archived; it will try again periodically until it succeeds. @@ -650,14 +650,14 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 It is advisable to test your proposed archive command to ensure that it indeed does not overwrite an existing file, and that it returns - nonzero status in this case. + nonzero status in this case. The example command above for Unix ensures this by including a separate - test step. On some Unix platforms, cp has - switches such as that can be used to do the same thing less verbosely, but you should not rely on these without verifying that - the right exit status is returned. (In particular, GNU cp - will return status zero when @@ -668,10 +668,10 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 fills, nothing further can be archived until the tape is swapped. You should ensure that any error condition or request to a human operator is reported appropriately so that the situation can be - resolved reasonably quickly. The pg_wal/ directory will + resolved reasonably quickly. The pg_wal/ directory will continue to fill with WAL segment files until the situation is resolved. - (If the file system containing pg_wal/ fills up, - PostgreSQL will do a PANIC shutdown. No committed + (If the file system containing pg_wal/ fills up, + PostgreSQL will do a PANIC shutdown. No committed transactions will be lost, but the database will remain offline until you free some space.) @@ -682,7 +682,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 operation continues even if the archiving process falls a little behind. If archiving falls significantly behind, this will increase the amount of data that would be lost in the event of a disaster. It will also mean that - the pg_wal/ directory will contain large numbers of + the pg_wal/ directory will contain large numbers of not-yet-archived segment files, which could eventually exceed available disk space. You are advised to monitor the archiving process to ensure that it is working as you intend. @@ -692,20 +692,20 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 In writing your archive command, you should assume that the file names to be archived can be up to 64 characters long and can contain any combination of ASCII letters, digits, and dots. It is not necessary to - preserve the original relative path (%p) but it is necessary to - preserve the file name (%f). + preserve the original relative path (%p) but it is necessary to + preserve the file name (%f). Note that although WAL archiving will allow you to restore any - modifications made to the data in your PostgreSQL database, + modifications made to the data in your PostgreSQL database, it will not restore changes made to configuration files (that is, - postgresql.conf, pg_hba.conf and - pg_ident.conf), since those are edited manually rather + postgresql.conf, pg_hba.conf and + pg_ident.conf), since those are edited manually rather than through SQL operations. You might wish to keep the configuration files in a location that will be backed up by your regular file system backup procedures. See - for how to relocate the + for how to relocate the configuration files. @@ -715,36 +715,36 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 where it does so), there could be a long delay between the completion of a transaction and its safe recording in archive storage. To put a limit on how old unarchived data can be, you can set - to force the server to switch + to force the server to switch to a new WAL segment file at least that often. Note that archived files that are archived early due to a forced switch are still the same length as completely full files. It is therefore unwise to set a very - short archive_timeout — it will bloat your archive - storage. archive_timeout settings of a minute or so are + short archive_timeout — it will bloat your archive + storage. archive_timeout settings of a minute or so are usually reasonable. Also, you can force a segment switch manually with - pg_switch_wal if you want to ensure that a + pg_switch_wal if you want to ensure that a just-finished transaction is archived as soon as possible. Other utility functions related to WAL management are listed in . + linkend="functions-admin-backup-table"/>. - When wal_level is minimal some SQL commands + When wal_level is minimal some SQL commands are optimized to avoid WAL logging, as described in . If archiving or streaming replication were + linkend="populate-pitr"/>. If archiving or streaming replication were turned on during execution of one of these statements, WAL would not contain enough information for archive recovery. (Crash recovery is - unaffected.) For this reason, wal_level can only be changed at - server start. However, archive_command can be changed with a + unaffected.) For this reason, wal_level can only be changed at + server start. However, archive_command can be changed with a configuration file reload. If you wish to temporarily stop archiving, - one way to do it is to set archive_command to the empty - string (''). - This will cause WAL files to accumulate in pg_wal/ until a - working archive_command is re-established. + one way to do it is to set archive_command to the empty + string (''). + This will cause WAL files to accumulate in pg_wal/ until a + working archive_command is re-established. @@ -753,18 +753,18 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 The easiest way to perform a base backup is to use the - tool. It can create + tool. It can create a base backup either as regular files or as a tar archive. If more - flexibility than can provide is + flexibility than can provide is required, you can also make a base backup using the low level API - (see ). + (see ). It is not necessary to be concerned about the amount of time it takes to make a base backup. However, if you normally run the - server with full_page_writes disabled, you might notice a drop - in performance while the backup runs since full_page_writes is + server with full_page_writes disabled, you might notice a drop + in performance while the backup runs since full_page_writes is effectively forced on during backup mode. @@ -772,13 +772,13 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 To make use of the backup, you will need to keep all the WAL segment files generated during and after the file system backup. To aid you in doing this, the base backup process - creates a backup history file that is immediately + creates a backup history file that is immediately stored into the WAL archive area. This file is named after the first WAL segment file that you need for the file system backup. For example, if the starting WAL file is - 0000000100001234000055CD the backup history file will be + 0000000100001234000055CD the backup history file will be named something like - 0000000100001234000055CD.007C9330.backup. (The second + 0000000100001234000055CD.007C9330.backup. (The second part of the file name stands for an exact position within the WAL file, and can ordinarily be ignored.) Once you have safely archived the file system backup and the WAL segment files used during the @@ -791,7 +791,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 The backup history file is just a small text file. It contains the - label string you gave to , as well as + label string you gave to , as well as the starting and ending times and WAL segments of the backup. If you used the label to identify the associated dump file, then the archived history file is enough to tell you which dump file to @@ -814,7 +814,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 The procedure for making a base backup using the low level APIs contains a few more steps than - the method, but is relatively + the method, but is relatively simple. It is very important that these steps are executed in sequence, and that the success of a step is verified before proceeding to the next step. @@ -830,7 +830,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 A non-exclusive low level backup is one that allows other concurrent backups to be running (both those started using the same backup API and those started using - ). + ). @@ -847,39 +847,39 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 SELECT pg_start_backup('label', false, false); - where label is any string you want to use to uniquely + where label is any string you want to use to uniquely identify this backup operation. The connection - calling pg_start_backup must be maintained until the end of + calling pg_start_backup must be maintained until the end of the backup, or the backup will be automatically aborted. - By default, pg_start_backup can take a long time to finish. + By default, pg_start_backup can take a long time to finish. This is because it performs a checkpoint, and the I/O required for the checkpoint will be spread out over a significant period of time, by default half your inter-checkpoint interval (see the configuration parameter - ). This is + ). This is usually what you want, because it minimizes the impact on query processing. If you want to start the backup as soon as - possible, change the second parameter to true, which will + possible, change the second parameter to true, which will issue an immediate checkpoint using as much I/O as available. - The third parameter being false tells - pg_start_backup to initiate a non-exclusive base backup. + The third parameter being false tells + pg_start_backup to initiate a non-exclusive base backup. Perform the backup, using any convenient file-system-backup tool - such as tar or cpio (not + such as tar or cpio (not pg_dump or pg_dumpall). It is neither necessary nor desirable to stop normal operation of the database while you do this. See - for things to + for things to consider during this backup. @@ -889,47 +889,54 @@ SELECT pg_start_backup('label', false, false); SELECT * FROM pg_stop_backup(false, true); - This terminates the backup mode and performs an automatic switch to - the next WAL segment. The reason for the switch is to arrange for + This terminates backup mode. On a primary, it also performs an automatic + switch to the next WAL segment. On a standby, it is not possible to + automatically switch WAL segments, so you may wish to run + pg_switch_wal on the primary to perform a manual + switch. The reason for the switch is to arrange for the last WAL segment file written during the backup interval to be ready to archive. - The pg_stop_backup will return one row with three + The pg_stop_backup will return one row with three values. The second of these fields should be written to a file named - backup_label in the root directory of the backup. The + backup_label in the root directory of the backup. The third field should be written to a file named - tablespace_map unless the field is empty. These files are + tablespace_map unless the field is empty. These files are vital to the backup working, and must be written without modification. Once the WAL segment files active during the backup are archived, you are - done. The file identified by pg_stop_backup's first return + done. The file identified by pg_stop_backup's first return value is the last segment that is required to form a complete set of - backup files. If archive_mode is enabled, - pg_stop_backup does not return until the last segment has + backup files. On a primary, if archive_mode is enabled and the + wait_for_archive parameter is true, + pg_stop_backup does not return until the last segment has been archived. + On a standby, archive_mode must be always in order + for pg_stop_backup to wait. Archiving of these files happens automatically since you have - already configured archive_command. In most cases this + already configured archive_command. In most cases this happens quickly, but you are advised to monitor your archive system to ensure there are no delays. If the archive process has fallen behind because of failures of the archive command, it will keep retrying until the archive succeeds and the backup is complete. If you wish to place a time limit on the execution of - pg_stop_backup, set an appropriate + pg_stop_backup, set an appropriate statement_timeout value, but make note that if - pg_stop_backup terminates because of this your backup + pg_stop_backup terminates because of this your backup may not be valid. If the backup process monitors and ensures that all WAL segment files - required for the backup are successfully archived then the second - parameter (which defaults to true) can be set to false to have - pg_stop_backup return as soon as the stop backup record is - written to the WAL. By default, pg_stop_backup will wait + required for the backup are successfully archived then the + wait_for_archive parameter (which defaults to true) can be set + to false to have + pg_stop_backup return as soon as the stop backup record is + written to the WAL. By default, pg_stop_backup will wait until all WAL has been archived, which can take some time. This option must be used with caution: if WAL archiving is not monitored correctly then the backup might not include all of the WAL files and will @@ -943,9 +950,9 @@ SELECT * FROM pg_stop_backup(false, true); Making an exclusive low level backup The process for an exclusive backup is mostly the same as for a - non-exclusive one, but it differs in a few key steps. It does not allow - more than one concurrent backup to run, and there can be some issues on - the server if it crashes during the backup. Prior to PostgreSQL 9.6, this + non-exclusive one, but it differs in a few key steps. This type of backup + can only be taken on a primary and does not allow concurrent backups. + Prior to PostgreSQL 9.6, this was the only low-level method available, but it is now recommended that all users upgrade their scripts to use non-exclusive backups if possible. @@ -964,25 +971,25 @@ SELECT * FROM pg_stop_backup(false, true); SELECT pg_start_backup('label'); - where label is any string you want to use to uniquely + where label is any string you want to use to uniquely identify this backup operation. - pg_start_backup creates a backup label file, - called backup_label, in the cluster directory with + pg_start_backup creates a backup label file, + called backup_label, in the cluster directory with information about your backup, including the start time and label string. - The function also creates a tablespace map file, - called tablespace_map, in the cluster directory with - information about tablespace symbolic links in pg_tblspc/ if + The function also creates a tablespace map file, + called tablespace_map, in the cluster directory with + information about tablespace symbolic links in pg_tblspc/ if one or more such link is present. Both files are critical to the integrity of the backup, should you need to restore from it. - By default, pg_start_backup can take a long time to finish. + By default, pg_start_backup can take a long time to finish. This is because it performs a checkpoint, and the I/O required for the checkpoint will be spread out over a significant period of time, by default half your inter-checkpoint interval (see the configuration parameter - ). This is + ). This is usually what you want, because it minimizes the impact on query processing. If you want to start the backup as soon as possible, use: @@ -995,14 +1002,19 @@ SELECT pg_start_backup('label', true); Perform the backup, using any convenient file-system-backup tool - such as tar or cpio (not + such as tar or cpio (not pg_dump or pg_dumpall). It is neither necessary nor desirable to stop normal operation of the database while you do this. See - for things to + for things to consider during this backup. + + Note that if the server crashes during the backup it may not be + possible to restart until the backup_label file has been + manually deleted from the PGDATA directory. + @@ -1012,36 +1024,31 @@ SELECT pg_start_backup('label', true); SELECT pg_stop_backup(); - This function, when called on a primary, terminates the backup mode and + This function terminates backup mode and performs an automatic switch to the next WAL segment. The reason for the switch is to arrange for the last WAL segment written during the backup - interval to be ready to archive. When called on a standby, this function - only terminates backup mode. A subsequent WAL segment switch will be - needed in order to ensure that all WAL files needed to restore the backup - can be archived; if the primary does not have sufficient write activity - to trigger one, pg_switch_wal should be executed on - the primary. + interval to be ready to archive. Once the WAL segment files active during the backup are archived, you are - done. The file identified by pg_stop_backup's result is + done. The file identified by pg_stop_backup's result is the last segment that is required to form a complete set of backup files. - If archive_mode is enabled, - pg_stop_backup does not return until the last segment has + If archive_mode is enabled, + pg_stop_backup does not return until the last segment has been archived. Archiving of these files happens automatically since you have - already configured archive_command. In most cases this + already configured archive_command. In most cases this happens quickly, but you are advised to monitor your archive system to ensure there are no delays. If the archive process has fallen behind because of failures of the archive command, it will keep retrying until the archive succeeds and the backup is complete. If you wish to place a time limit on the execution of - pg_stop_backup, set an appropriate + pg_stop_backup, set an appropriate statement_timeout value, but make note that if - pg_stop_backup terminates because of this your backup + pg_stop_backup terminates because of this your backup may not be valid. @@ -1056,21 +1063,21 @@ SELECT pg_stop_backup(); When taking a base backup of an active database, this situation is normal and not an error. However, you need to ensure that you can distinguish complaints of this sort from real errors. For example, some versions - of rsync return a separate exit code for - vanished source files, and you can write a driver script to + of rsync return a separate exit code for + vanished source files, and you can write a driver script to accept this exit code as a non-error case. Also, some versions of - GNU tar return an error code indistinguishable from - a fatal error if a file was truncated while tar was - copying it. Fortunately, GNU tar versions 1.16 and + GNU tar return an error code indistinguishable from + a fatal error if a file was truncated while tar was + copying it. Fortunately, GNU tar versions 1.16 and later exit with 1 if a file was changed during the backup, - and 2 for other errors. With GNU tar version 1.23 and + and 2 for other errors. With GNU tar version 1.23 and later, you can use the warning options --warning=no-file-changed --warning=no-file-removed to hide the related warning messages. Be certain that your backup includes all of the files under - the database cluster directory (e.g., /usr/local/pgsql/data). + the database cluster directory (e.g., /usr/local/pgsql/data). If you are using tablespaces that do not reside underneath this directory, be careful to include them as well (and be sure that your backup archives symbolic links as links, otherwise the restore will corrupt @@ -1079,21 +1086,21 @@ SELECT pg_stop_backup(); You should, however, omit from the backup the files within the - cluster's pg_wal/ subdirectory. This + cluster's pg_wal/ subdirectory. This slight adjustment is worthwhile because it reduces the risk of mistakes when restoring. This is easy to arrange if - pg_wal/ is a symbolic link pointing to someplace outside + pg_wal/ is a symbolic link pointing to someplace outside the cluster directory, which is a common setup anyway for performance - reasons. You might also want to exclude postmaster.pid - and postmaster.opts, which record information - about the running postmaster, not about the - postmaster which will eventually use this backup. - (These files can confuse pg_ctl.) + reasons. You might also want to exclude postmaster.pid + and postmaster.opts, which record information + about the running postmaster, not about the + postmaster which will eventually use this backup. + (These files can confuse pg_ctl.) It is often a good idea to also omit from the backup the files - within the cluster's pg_replslot/ directory, so that + within the cluster's pg_replslot/ directory, so that replication slots that exist on the master do not become part of the backup. Otherwise, the subsequent use of the backup to create a standby may result in indefinite retention of WAL files on the standby, and @@ -1107,12 +1114,12 @@ SELECT pg_stop_backup(); - The contents of the directories pg_dynshmem/, - pg_notify/, pg_serial/, - pg_snapshots/, pg_stat_tmp/, - and pg_subtrans/ (but not the directories themselves) can be + The contents of the directories pg_dynshmem/, + pg_notify/, pg_serial/, + pg_snapshots/, pg_stat_tmp/, + and pg_subtrans/ (but not the directories themselves) can be omitted from the backup as they will be initialized on postmaster startup. - If is set and is under the data + If is set and is under the data directory then the contents of that directory can also be omitted. @@ -1122,15 +1129,21 @@ SELECT pg_stop_backup(); the directories will be recreated as needed. + + pg_internal.init files can be omitted from the + backup whenever a file of that name is found. These files contain + relation cache data that is always rebuilt when recovering. + + The backup label - file includes the label string you gave to pg_start_backup, - as well as the time at which pg_start_backup was run, and + file includes the label string you gave to pg_start_backup, + as well as the time at which pg_start_backup was run, and the name of the starting WAL file. In case of confusion it is therefore possible to look inside a backup file and determine exactly which backup session the dump file came from. The tablespace map file includes the symbolic link names as they exist in the directory - pg_tblspc/ and the full path of each symbolic link. + pg_tblspc/ and the full path of each symbolic link. These files are not merely for your information; their presence and contents are critical to the proper operation of the system's recovery process. @@ -1139,7 +1152,7 @@ SELECT pg_stop_backup(); It is also possible to make a backup while the server is stopped. In this case, you obviously cannot use - pg_start_backup or pg_stop_backup, and + pg_start_backup or pg_stop_backup, and you will therefore be left to your own devices to keep track of which backup is which and how far back the associated WAL files go. It is generally better to follow the continuous archiving procedure above. @@ -1166,7 +1179,7 @@ SELECT pg_stop_backup(); location in case you need them later. Note that this precaution will require that you have enough free space on your system to hold two copies of your existing database. If you do not have enough space, - you should at least save the contents of the cluster's pg_wal + you should at least save the contents of the cluster's pg_wal subdirectory, as it might contain logs which were not archived before the system went down. @@ -1181,17 +1194,17 @@ SELECT pg_stop_backup(); Restore the database files from your file system backup. Be sure that they are restored with the right ownership (the database system user, not - root!) and with the right permissions. If you are using + root!) and with the right permissions. If you are using tablespaces, - you should verify that the symbolic links in pg_tblspc/ + you should verify that the symbolic links in pg_tblspc/ were correctly restored. - Remove any files present in pg_wal/; these came from the + Remove any files present in pg_wal/; these came from the file system backup and are therefore probably obsolete rather than current. - If you didn't archive pg_wal/ at all, then recreate + If you didn't archive pg_wal/ at all, then recreate it with proper permissions, being careful to ensure that you re-establish it as a symbolic link if you had it set up that way before. @@ -1200,16 +1213,16 @@ SELECT pg_stop_backup(); If you have unarchived WAL segment files that you saved in step 2, - copy them into pg_wal/. (It is best to copy them, + copy them into pg_wal/. (It is best to copy them, not move them, so you still have the unmodified files if a problem occurs and you have to start over.) - Create a recovery command file recovery.conf in the cluster - data directory (see ). You might - also want to temporarily modify pg_hba.conf to prevent + Create a recovery command file recovery.conf in the cluster + data directory (see ). You might + also want to temporarily modify pg_hba.conf to prevent ordinary users from connecting until you are sure the recovery was successful. @@ -1220,7 +1233,7 @@ SELECT pg_stop_backup(); recovery be terminated because of an external error, the server can simply be restarted and it will continue recovery. Upon completion of the recovery process, the server will rename - recovery.conf to recovery.done (to prevent + recovery.conf to recovery.done (to prevent accidentally re-entering recovery mode later) and then commence normal database operations. @@ -1229,7 +1242,7 @@ SELECT pg_stop_backup(); Inspect the contents of the database to ensure you have recovered to the desired state. If not, return to step 1. If all is well, - allow your users to connect by restoring pg_hba.conf to normal. + allow your users to connect by restoring pg_hba.conf to normal. @@ -1238,32 +1251,32 @@ SELECT pg_stop_backup(); The key part of all this is to set up a recovery configuration file that describes how you want to recover and how far the recovery should - run. You can use recovery.conf.sample (normally - located in the installation's share/ directory) as a + run. You can use recovery.conf.sample (normally + located in the installation's share/ directory) as a prototype. The one thing that you absolutely must specify in - recovery.conf is the restore_command, - which tells PostgreSQL how to retrieve archived - WAL file segments. Like the archive_command, this is - a shell command string. It can contain %f, which is - replaced by the name of the desired log file, and %p, + recovery.conf is the restore_command, + which tells PostgreSQL how to retrieve archived + WAL file segments. Like the archive_command, this is + a shell command string. It can contain %f, which is + replaced by the name of the desired log file, and %p, which is replaced by the path name to copy the log file to. (The path name is relative to the current working directory, i.e., the cluster's data directory.) - Write %% if you need to embed an actual % + Write %% if you need to embed an actual % character in the command. The simplest useful command is something like: restore_command = 'cp /mnt/server/archivedir/%f %p' which will copy previously archived WAL segments from the directory - /mnt/server/archivedir. Of course, you can use something + /mnt/server/archivedir. Of course, you can use something much more complicated, perhaps even a shell script that requests the operator to mount an appropriate tape. It is important that the command return nonzero exit status on failure. - The command will be called requesting files that are not + The command will be called requesting files that are not present in the archive; it must return nonzero when so asked. This is not an error condition. An exception is that if the command was terminated by a signal (other than SIGTERM, which is used as @@ -1275,36 +1288,36 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' Not all of the requested files will be WAL segment files; you should also expect requests for files with a suffix of - .backup or .history. Also be aware that - the base name of the %p path will be different from - %f; do not expect them to be interchangeable. + .history. Also be aware that + the base name of the %p path will be different from + %f; do not expect them to be interchangeable. WAL segments that cannot be found in the archive will be sought in - pg_wal/; this allows use of recent un-archived segments. + pg_wal/; this allows use of recent un-archived segments. However, segments that are available from the archive will be used in - preference to files in pg_wal/. + preference to files in pg_wal/. Normally, recovery will proceed through all available WAL segments, thereby restoring the database to the current point in time (or as close as possible given the available WAL segments). Therefore, a normal - recovery will end with a file not found message, the exact text + recovery will end with a file not found message, the exact text of the error message depending upon your choice of - restore_command. You may also see an error message + restore_command. You may also see an error message at the start of recovery for a file named something like - 00000001.history. This is also normal and does not + 00000001.history. This is also normal and does not indicate a problem in simple recovery situations; see - for discussion. + for discussion. If you want to recover to some previous point in time (say, right before the junior DBA dropped your main transaction table), just specify the - required stopping point in recovery.conf. You can specify - the stop point, known as the recovery target, either by + required stopping point in recovery.conf. You can specify + the stop point, known as the recovery target, either by date/time, named restore point or by completion of a specific transaction ID. As of this writing only the date/time and named restore point options are very usable, since there are no tools to help you identify with any @@ -1314,7 +1327,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' The stop point must be after the ending time of the base backup, i.e., - the end time of pg_stop_backup. You cannot use a base backup + the end time of pg_stop_backup. You cannot use a base backup to recover to a time when that backup was in progress. (To recover to such a time, you must go back to your previous base backup and roll forward from there.) @@ -1325,14 +1338,14 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' If recovery finds corrupted WAL data, recovery will halt at that point and the server will not start. In such a case the recovery process could be re-run from the beginning, specifying a - recovery target before the point of corruption so that recovery + recovery target before the point of corruption so that recovery can complete normally. If recovery fails for an external reason, such as a system crash or if the WAL archive has become inaccessible, then the recovery can simply be restarted and it will restart almost from where it failed. Recovery restart works much like checkpointing in normal operation: the server periodically forces all its state to disk, and then updates - the pg_control file to indicate that the already-processed + the pg_control file to indicate that the already-processed WAL data need not be scanned again. @@ -1352,7 +1365,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' suppose you dropped a critical table at 5:15PM on Tuesday evening, but didn't realize your mistake until Wednesday noon. Unfazed, you get out your backup, restore to the point-in-time 5:14PM - Tuesday evening, and are up and running. In this history of + Tuesday evening, and are up and running. In this history of the database universe, you never dropped the table. But suppose you later realize this wasn't such a great idea, and would like to return to sometime Wednesday morning in the original history. @@ -1365,8 +1378,8 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' - To deal with this problem, PostgreSQL has a notion - of timelines. Whenever an archive recovery completes, + To deal with this problem, PostgreSQL has a notion + of timelines. Whenever an archive recovery completes, a new timeline is created to identify the series of WAL records generated after that recovery. The timeline ID number is part of WAL segment file names so a new timeline does @@ -1377,13 +1390,13 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' and so have to do several point-in-time recoveries by trial and error until you find the best place to branch off from the old history. Without timelines this process would soon generate an unmanageable mess. With - timelines, you can recover to any prior state, including + timelines, you can recover to any prior state, including states in timeline branches that you abandoned earlier. - Every time a new timeline is created, PostgreSQL creates - a timeline history file that shows which timeline it branched + Every time a new timeline is created, PostgreSQL creates + a timeline history file that shows which timeline it branched off from and when. These history files are necessary to allow the system to pick the right WAL segment files when recovering from an archive that contains multiple timelines. Therefore, they are archived into the WAL @@ -1401,7 +1414,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' that was current when the base backup was taken. If you wish to recover into some child timeline (that is, you want to return to some state that was itself generated after a recovery attempt), you need to specify the - target timeline ID in recovery.conf. You cannot recover into + target timeline ID in recovery.conf. You cannot recover into timelines that branched off earlier than the base backup. @@ -1417,18 +1430,18 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' Standalone Hot Backups - It is possible to use PostgreSQL's backup facilities to + It is possible to use PostgreSQL's backup facilities to produce standalone hot backups. These are backups that cannot be used for point-in-time recovery, yet are typically much faster to backup and - restore than pg_dump dumps. (They are also much larger - than pg_dump dumps, so in some cases the speed advantage + restore than pg_dump dumps. (They are also much larger + than pg_dump dumps, so in some cases the speed advantage might be negated.) As with base backups, the easiest way to produce a standalone - hot backup is to use the - tool. If you include the -X parameter when calling + hot backup is to use the + tool. If you include the -X parameter when calling it, all the write-ahead log required to use the backup will be included in the backup automatically, and no special action is required to restore the backup. @@ -1438,16 +1451,16 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' If more flexibility in copying the backup files is needed, a lower level process can be used for standalone hot backups as well. To prepare for low level standalone hot backups, make sure - wal_level is set to - replica or higher, archive_mode to - on, and set up an archive_command that performs - archiving only when a switch file exists. For example: + wal_level is set to + replica or higher, archive_mode to + on, and set up an archive_command that performs + archiving only when a switch file exists. For example: archive_command = 'test ! -f /var/lib/pgsql/backup_in_progress || (test ! -f /var/lib/pgsql/archive/%f && cp %p /var/lib/pgsql/archive/%f)' This command will perform archiving when - /var/lib/pgsql/backup_in_progress exists, and otherwise - silently return zero exit status (allowing PostgreSQL + /var/lib/pgsql/backup_in_progress exists, and otherwise + silently return zero exit status (allowing PostgreSQL to recycle the unwanted WAL file). @@ -1462,11 +1475,11 @@ psql -c "select pg_stop_backup();" rm /var/lib/pgsql/backup_in_progress tar -rf /var/lib/pgsql/backup.tar /var/lib/pgsql/archive/ - The switch file /var/lib/pgsql/backup_in_progress is + The switch file /var/lib/pgsql/backup_in_progress is created first, enabling archiving of completed WAL files to occur. After the backup the switch file is removed. Archived WAL files are then added to the backup so that both base backup and all required - WAL files are part of the same tar file. + WAL files are part of the same tar file. Please remember to add error handling to your backup scripts. @@ -1481,7 +1494,7 @@ tar -rf /var/lib/pgsql/backup.tar /var/lib/pgsql/archive/ archive_command = 'gzip < %p > /var/lib/pgsql/archive/%f' - You will then need to use gunzip during recovery: + You will then need to use gunzip during recovery: restore_command = 'gunzip < /mnt/server/archivedir/%f > %p' @@ -1494,7 +1507,7 @@ restore_command = 'gunzip < /mnt/server/archivedir/%f > %p' Many people choose to use scripts to define their archive_command, so that their - postgresql.conf entry looks very simple: + postgresql.conf entry looks very simple: archive_command = 'local_backup_script.sh "%p" "%f"' @@ -1502,7 +1515,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"' more than a single command in the archiving process. This allows all complexity to be managed within the script, which can be written in a popular scripting language such as - bash or perl. + bash or perl. @@ -1535,8 +1548,8 @@ archive_command = 'local_backup_script.sh "%p" "%f"' When using an archive_command script, it's desirable - to enable . - Any messages written to stderr from the script will then + to enable . + Any messages written to stderr from the script will then appear in the database server log, allowing complex configurations to be diagnosed easily if they fail. @@ -1554,9 +1567,9 @@ archive_command = 'local_backup_script.sh "%p" "%f"' - If a + If a command is executed while a base backup is being taken, and then - the template database that the CREATE DATABASE copied + the template database that the CREATE DATABASE copied is modified while the base backup is still in progress, it is possible that recovery will cause those modifications to be propagated into the created database as well. This is of course @@ -1567,7 +1580,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"' - + commands are WAL-logged with the literal absolute path, and will therefore be replayed as tablespace creations with the same absolute path. This might be undesirable if the log is being @@ -1590,12 +1603,12 @@ archive_command = 'local_backup_script.sh "%p" "%f"' your system hardware and software, the risk of partial writes might be small enough to ignore, in which case you can significantly reduce the total volume of archived logs by turning off page - snapshots using the - parameter. (Read the notes and warnings in + snapshots using the + parameter. (Read the notes and warnings in before you do so.) Turning off page snapshots does not prevent use of the logs for PITR operations. An area for future development is to compress archived WAL data by removing - unnecessary page copies even when full_page_writes is + unnecessary page copies even when full_page_writes is on. In the meantime, administrators might wish to reduce the number of page snapshots included in WAL by increasing the checkpoint interval parameters as much as feasible. diff --git a/doc/src/sgml/bgworker.sgml b/doc/src/sgml/bgworker.sgml index b422323081..bc5a52584b 100644 --- a/doc/src/sgml/bgworker.sgml +++ b/doc/src/sgml/bgworker.sgml @@ -11,17 +11,17 @@ PostgreSQL can be extended to run user-supplied code in separate processes. Such processes are started, stopped and monitored by postgres, which permits them to have a lifetime closely linked to the server's status. - These processes have the option to attach to PostgreSQL's + These processes have the option to attach to PostgreSQL's shared memory area and to connect to databases internally; they can also run multiple transactions serially, just like a regular client-connected server - process. Also, by linking to libpq they can connect to the + process. Also, by linking to libpq they can connect to the server and behave like a regular client application. There are considerable robustness and security risks in using background - worker processes because, being written in the C language, + worker processes because, being written in the C language, they have unrestricted access to data. Administrators wishing to enable modules that include background worker process should exercise extreme caution. Only carefully audited modules should be permitted to run @@ -31,17 +31,17 @@ Background workers can be initialized at the time that - PostgreSQL is started by including the module name in - shared_preload_libraries. A module wishing to run a background + PostgreSQL is started by including the module name in + shared_preload_libraries. A module wishing to run a background worker can register it by calling RegisterBackgroundWorker(BackgroundWorker *worker) - from its _PG_init(). Background workers can also be started + from its _PG_init(). Background workers can also be started after the system is up and running by calling the function RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle). Unlike - RegisterBackgroundWorker, which can only be called from within + RegisterBackgroundWorker, which can only be called from within the postmaster, RegisterDynamicBackgroundWorker must be - called from a regular backend. + called from a regular backend or another background worker. @@ -51,6 +51,7 @@ typedef void (*bgworker_main_type)(Datum main_arg); typedef struct BackgroundWorker { char bgw_name[BGW_MAXLEN]; + char bgw_type[BGW_MAXLEN]; int bgw_flags; BgWorkerStartTime bgw_start_time; int bgw_restart_time; /* in seconds, or BGW_NEVER_RESTART */ @@ -64,12 +65,18 @@ typedef struct BackgroundWorker - bgw_name is a string to be used in log messages, process - listings and similar contexts. + bgw_name and bgw_type are + strings to be used in log messages, process listings and similar contexts. + bgw_type should be the same for all background + workers of the same type, so that it is possible to group such workers in a + process listing, for example. bgw_name on the + other hand can contain additional information about the specific process. + (Typically, the string for bgw_name will contain + the type somehow, but that is not strictly required.) - bgw_flags is a bitwise-or'd bit mask indicating the + bgw_flags is a bitwise-or'd bit mask indicating the capabilities that the module wants. Possible values are: @@ -107,14 +114,14 @@ typedef struct BackgroundWorker bgw_start_time is the server state during which - postgres should start the process; it can be one of - BgWorkerStart_PostmasterStart (start as soon as - postgres itself has finished its own initialization; processes + postgres should start the process; it can be one of + BgWorkerStart_PostmasterStart (start as soon as + postgres itself has finished its own initialization; processes requesting this are not eligible for database connections), - BgWorkerStart_ConsistentState (start as soon as a consistent state + BgWorkerStart_ConsistentState (start as soon as a consistent state has been reached in a hot standby, allowing processes to connect to databases and run read-only queries), and - BgWorkerStart_RecoveryFinished (start as soon as the system has + BgWorkerStart_RecoveryFinished (start as soon as the system has entered normal read-write state). Note the last two values are equivalent in a server that's not a hot standby. Note that this setting only indicates when the processes are to be started; they do not stop when a different state @@ -145,9 +152,9 @@ typedef struct BackgroundWorker - bgw_main_arg is the Datum argument + bgw_main_arg is the Datum argument to the background worker main function. This main function should take a - single argument of type Datum and return void. + single argument of type Datum and return void. bgw_main_arg will be passed as the argument. In addition, the global variable MyBgworkerEntry points to a copy of the BackgroundWorker structure @@ -158,39 +165,41 @@ typedef struct BackgroundWorker On Windows (and anywhere else where EXEC_BACKEND is defined) or in dynamic background workers it is not safe to pass a - Datum by reference, only by value. If an argument is required, it + Datum by reference, only by value. If an argument is required, it is safest to pass an int32 or other small value and use that as an index - into an array allocated in shared memory. If a value like a cstring + into an array allocated in shared memory. If a value like a cstring or text is passed then the pointer won't be valid from the new background worker process. bgw_extra can contain extra data to be passed - to the background worker. Unlike bgw_main_arg, this data + to the background worker. Unlike bgw_main_arg, this data is not passed as an argument to the worker's main function, but it can be accessed via MyBgworkerEntry, as discussed above. bgw_notify_pid is the PID of a PostgreSQL - backend process to which the postmaster should send SIGUSR1 + backend process to which the postmaster should send SIGUSR1 when the process is started or exits. It should be 0 for workers registered at postmaster startup time, or when the backend registering the worker does not wish to wait for the worker to start up. Otherwise, it should be - initialized to MyProcPid. + initialized to MyProcPid. Once running, the process can connect to a database by calling - BackgroundWorkerInitializeConnection(char *dbname, char *username) or - BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid). + BackgroundWorkerInitializeConnection(char *dbname, char *username, uint32 flags) or + BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid, uint32 flags). This allows the process to run transactions and queries using the - SPI interface. If dbname is NULL or - dboid is InvalidOid, the session is not connected + SPI interface. If dbname is NULL or + dboid is InvalidOid, the session is not connected to any particular database, but shared catalogs can be accessed. - If username is NULL or useroid is - InvalidOid, the process will run as the superuser created - during initdb. + If username is NULL or useroid is + InvalidOid, the process will run as the superuser created + during initdb. If BGWORKER_BYPASS_ALLOWCONN + is specified as flags it is possible to bypass the restriction + to connect to databases not allowing user connections. A background worker can only call one of these two functions, and only once. It is not possible to switch databases. @@ -200,24 +209,24 @@ typedef struct BackgroundWorker background worker's main function, and must be unblocked by it; this is to allow the process to customize its signal handlers, if necessary. Signals can be unblocked in the new process by calling - BackgroundWorkerUnblockSignals and blocked by calling - BackgroundWorkerBlockSignals. + BackgroundWorkerUnblockSignals and blocked by calling + BackgroundWorkerBlockSignals. If bgw_restart_time for a background worker is - configured as BGW_NEVER_RESTART, or if it exits with an exit - code of 0 or is terminated by TerminateBackgroundWorker, + configured as BGW_NEVER_RESTART, or if it exits with an exit + code of 0 or is terminated by TerminateBackgroundWorker, it will be automatically unregistered by the postmaster on exit. Otherwise, it will be restarted after the time period configured via - bgw_restart_time, or immediately if the postmaster + bgw_restart_time, or immediately if the postmaster reinitializes the cluster due to a backend failure. Backends which need to suspend execution only temporarily should use an interruptible sleep rather than exiting; this can be achieved by calling WaitLatch(). Make sure the - WL_POSTMASTER_DEATH flag is set when calling that function, and + WL_POSTMASTER_DEATH flag is set when calling that function, and verify the return code for a prompt exit in the emergency case that - postgres itself has terminated. + postgres itself has terminated. @@ -231,34 +240,45 @@ typedef struct BackgroundWorker opaque handle that can subsequently be passed to GetBackgroundWorkerPid(BackgroundWorkerHandle *, pid_t *) or TerminateBackgroundWorker(BackgroundWorkerHandle *). - GetBackgroundWorkerPid can be used to poll the status of the - worker: a return value of BGWH_NOT_YET_STARTED indicates that + GetBackgroundWorkerPid can be used to poll the status of the + worker: a return value of BGWH_NOT_YET_STARTED indicates that the worker has not yet been started by the postmaster; BGWH_STOPPED indicates that it has been started but is no longer running; and BGWH_STARTED indicates that it is currently running. In this last case, the PID will also be returned via the second argument. - TerminateBackgroundWorker causes the postmaster to send - SIGTERM to the worker if it is running, and to unregister it + TerminateBackgroundWorker causes the postmaster to send + SIGTERM to the worker if it is running, and to unregister it as soon as it is not. In some cases, a process which registers a background worker may wish to wait for the worker to start up. This can be accomplished by initializing - bgw_notify_pid to MyProcPid and + bgw_notify_pid to MyProcPid and then passing the BackgroundWorkerHandle * obtained at registration time to WaitForBackgroundWorkerStartup(BackgroundWorkerHandle *handle, pid_t *) function. This function will block until the postmaster has attempted to start the - background worker, or until the postmaster dies. If the background runner - is running, the return value will BGWH_STARTED, and + background worker, or until the postmaster dies. If the background worker + is running, the return value will be BGWH_STARTED, and the PID will be written to the provided address. Otherwise, the return value will be BGWH_STOPPED or BGWH_POSTMASTER_DIED. + + A process can also wait for a background worker to shut down, by using the + WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle + *handle) function and passing the + BackgroundWorkerHandle * obtained at registration. This + function will block until the background worker exits, or postmaster dies. + When the background worker exits, the return value is + BGWH_STOPPED, if postmaster dies it will return + BGWH_POSTMASTER_DIED. + + If a background worker sends asynchronous notifications with the NOTIFY command via the Server Programming Interface @@ -272,13 +292,13 @@ typedef struct BackgroundWorker - The src/test/modules/worker_spi module + The src/test/modules/worker_spi module contains a working example, which demonstrates some useful techniques. The maximum number of registered background workers is limited by - . + .
diff --git a/doc/src/sgml/biblio.sgml b/doc/src/sgml/biblio.sgml index 5462bc38e4..4953024162 100644 --- a/doc/src/sgml/biblio.sgml +++ b/doc/src/sgml/biblio.sgml @@ -18,7 +18,7 @@ <acronym>SQL</acronym> Reference Books - + The Practical <acronym>SQL</acronym> Handbook Using SQL Variants Fourth Edition @@ -43,7 +43,7 @@ 2001 - + A Guide to the <acronym>SQL</acronym> Standard A user's guide to the standard database language SQL Fourth Edition @@ -64,7 +64,7 @@ 1997 - + An Introduction to Database Systems Eighth Edition @@ -80,7 +80,7 @@ 2003 - + Fundamentals of Database Systems Fourth Edition @@ -100,7 +100,7 @@ 2003 - + Understanding the New <acronym>SQL</acronym> A complete guide @@ -120,7 +120,7 @@ 1993 - + Principles of Database and Knowledge Base Systems @@ -141,7 +141,7 @@ PostgreSQL-specific Documentation - + Enhancement of the ANSI SQL Implementation of PostgreSQL @@ -171,7 +171,7 @@ ssimkovi@ag.or.at Discusses SQL history and syntax, and describes the addition of - INTERSECT and EXCEPT constructs into + INTERSECT and EXCEPT constructs into PostgreSQL. Prepared as a Master's Thesis with the support of O. Univ. Prof. Dr. Georg Gottlob and Univ. Ass. Mag. Katrin Seyr at Vienna University of Technology. @@ -185,7 +185,7 @@ ssimkovi@ag.or.at November 29, 1998 - + The <productname>Postgres95</productname> User Manual @@ -204,7 +204,7 @@ ssimkovi@ag.or.at Sept. 5, 1995 - + <ulink url="http://db.cs.berkeley.edu/papers/UCB-MS-zfong.pdf">The design and implementation of the <productname>POSTGRES</productname> query optimizer</ulink> @@ -222,7 +222,7 @@ ssimkovi@ag.or.at Proceedings and Articles - + Partial indexing in POSTGRES: research project @@ -238,7 +238,7 @@ ssimkovi@ag.or.at 1993 - + A Unified Framework for Version Modeling Using Production Rules in a Database System @@ -262,7 +262,7 @@ ssimkovi@ag.or.at - + <ulink url="http://db.cs.berkeley.edu/papers/ERL-M87-13.pdf">The <productname>POSTGRES</productname> data model</ulink> @@ -284,7 +284,7 @@ ssimkovi@ag.or.at - + <ulink url="http://citeseer.ist.psu.edu/seshadri95generalized.html">Generalized Partial Indexes</ulink> @@ -313,7 +313,7 @@ ssimkovi@ag.or.at 420-7 - + <ulink url="http://db.cs.berkeley.edu/papers/ERL-M85-95.pdf">The design of <productname>POSTGRES</productname></ulink> @@ -335,7 +335,7 @@ ssimkovi@ag.or.at - + The design of the <productname>POSTGRES</productname> rules system @@ -360,7 +360,7 @@ ssimkovi@ag.or.at - + <ulink url="http://db.cs.berkeley.edu/papers/ERL-M87-06.pdf">The design of the <productname>POSTGRES</productname> storage @@ -379,7 +379,7 @@ ssimkovi@ag.or.at </confgroup> </biblioentry> - <biblioentry id="STON89"> + <biblioentry id="ston89"> <biblioset relation="article"> <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M89-82.pdf">A commentary on the <productname>POSTGRES</productname> rules @@ -405,7 +405,7 @@ ssimkovi@ag.or.at </biblioset> </biblioentry> - <biblioentry id="STON89b"> + <biblioentry id="ston89b"> <biblioset relation="article"> <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M89-17.pdf">The case for partial indexes</ulink> @@ -423,7 +423,7 @@ ssimkovi@ag.or.at - + <ulink url="http://db.cs.berkeley.edu/papers/ERL-M90-34.pdf">The implementation of <productname>POSTGRES</productname></ulink> @@ -451,7 +451,7 @@ ssimkovi@ag.or.at - + <ulink url="http://db.cs.berkeley.edu/papers/ERL-M90-36.pdf">On Rules, Procedures, Caching and Views in Database Systems</ulink> diff --git a/doc/src/sgml/bki.sgml b/doc/src/sgml/bki.sgml index af6d8d1d2a..0fb309a1bd 100644 --- a/doc/src/sgml/bki.sgml +++ b/doc/src/sgml/bki.sgml @@ -1,38 +1,680 @@ - <acronym>BKI</acronym> Backend Interface + System Catalog Declarations and Initial Contents - Backend Interface (BKI) files are scripts in a - special language that is understood by the - PostgreSQL backend when running in the - bootstrap mode. The bootstrap mode allows system catalogs - to be created and filled from scratch, whereas ordinary SQL commands - require the catalogs to exist already. - BKI files can therefore be used to create the - database system in the first place. (And they are probably not - useful for anything else.) + PostgreSQL uses many different system catalogs + to keep track of the existence and properties of database objects, such as + tables and functions. Physically there is no difference between a system + catalog and a plain user table, but the backend C code knows the structure + and properties of each catalog, and can manipulate it directly at a low + level. Thus, for example, it is inadvisable to attempt to alter the + structure of a catalog on-the-fly; that would break assumptions built into + the C code about how rows of the catalog are laid out. But the structure + of the catalogs can change between major versions. - initdb uses a BKI file - to do part of its job when creating a new database cluster. The - input file used by initdb is created as - part of building and installing PostgreSQL - by a program named genbki.pl, which reads some - specially formatted C header files in the src/include/catalog/ - directory of the source tree. The created BKI file - is called postgres.bki and is - normally installed in the - share subdirectory of the installation tree. + The structures of the catalogs are declared in specially formatted C + header files in the src/include/catalog/ directory of + the source tree. In particular, for each catalog there is a header file + named after the catalog (e.g., pg_class.h + for pg_class), which defines the set of columns + the catalog has, as well as some other basic properties such as its OID. + Other critical files defining the catalog structure + include indexing.h, which defines the indexes present + on all the system catalogs, and toasting.h, which + defines TOAST tables for catalogs that need one. - Related information can be found in the documentation for - initdb. + Many of the catalogs have initial data that must be loaded into them + during the bootstrap phase + of initdb, to bring the system up to a point + where it is capable of executing SQL commands. (For + example, pg_class.h must contain an entry for itself, + as well as one for each other system catalog and index.) This + initial data is kept in editable form in data files that are also stored + in the src/include/catalog/ directory. For example, + pg_proc.dat describes all the initial rows that must + be inserted into the pg_proc catalog. + + To create the catalog files and load this initial data into them, a + backend running in bootstrap mode reads a BKI + (Backend Interface) file containing commands and initial data. + The postgres.bki file used in this mode is prepared + from the aforementioned header and data files, while building + a PostgreSQL distribution, by a Perl script + named genbki.pl. + Although it's specific to a particular PostgreSQL + release, postgres.bki is platform-independent and is + installed in the share subdirectory of the + installation tree. + + + + genbki.pl also produces a derived header file for + each catalog, for example pg_class_d.h for + the pg_class catalog. This file contains + automatically-generated macro definitions, and may contain other macros, + enum declarations, and so on that can be useful for client C code that + reads a particular catalog. + + + + Most Postgres developers don't need to be directly concerned with + the BKI file, but almost any nontrivial feature + addition in the backend will require modifying the catalog header files + and/or initial data files. The rest of this chapter gives some + information about that, and for completeness describes + the BKI file format. + + + + System Catalog Declaration Rules + + + The key part of a catalog header file is a C structure definition + describing the layout of each row of the catalog. This begins with + a CATALOG macro, which so far as the C compiler is + concerned is just shorthand for typedef struct + FormData_catalogname. + Each field in the struct gives rise to a catalog column. + Fields can be annotated using the BKI property macros described + in genbki.h, for example to define a default value + for a field or mark it as nullable or not nullable. + The CATALOG line can also be annotated, with some + other BKI property macros described in genbki.h, to + define other properties of the catalog as a whole, such as whether + it has OIDs (by default, it does). + + + + The system catalog cache code (and most catalog-munging code in general) + assumes that the fixed-length portions of all system catalog tuples are + in fact present, because it maps this C struct declaration onto them. + Thus, all variable-length fields and nullable fields must be placed at + the end, and they cannot be accessed as struct fields. + For example, if you tried to + set pg_type.typrelid + to be NULL, it would fail when some piece of code tried to reference + typetup->typrelid (or worse, + typetup->typelem, because that follows + typrelid). This would result in + random errors or even segmentation violations. + + + + As a partial guard against this type of error, variable-length or + nullable fields should not be made directly visible to the C compiler. + This is accomplished by wrapping them in #ifdef + CATALOG_VARLEN ... #endif (where + CATALOG_VARLEN is a symbol that is never defined). + This prevents C code from carelessly trying to access fields that might + not be there or might be at some other offset. + As an independent guard against creating incorrect rows, we + require all columns that should be non-nullable to be marked so + in pg_attribute. The bootstrap code will + automatically mark catalog columns as NOT NULL + if they are fixed-width and are not preceded by any nullable column. + Where this rule is inadequate, you can force correct marking by using + BKI_FORCE_NOT_NULL + and BKI_FORCE_NULL annotations as needed. But note + that NOT NULL constraints are only enforced in the + executor, not against tuples that are generated by random C code, + so care is still needed when manually creating or updating catalog rows. + + + + Frontend code should not include any pg_xxx.h + catalog header file, as these files may contain C code that won't compile + outside the backend. (Typically, that happens because these files also + contain declarations for functions + in src/backend/catalog/ files.) + Instead, frontend code may include the corresponding + generated pg_xxx_d.h header, which will contain + OID #defines and any other data that might be of use + on the client side. If you want macros or other code in a catalog header + to be visible to frontend code, write #ifdef + EXPOSE_TO_CLIENT_CODE ... #endif around that + section to instruct genbki.pl to copy that section + to the pg_xxx_d.h header. + + + + A few of the catalogs are so fundamental that they can't even be created + by the BKI create command that's + used for most catalogs, because that command needs to write information + into these catalogs to describe the new catalog. These are + called bootstrap catalogs, and defining one takes + a lot of extra work: you have to manually prepare appropriate entries for + them in the pre-loaded contents of pg_class + and pg_type, and those entries will need to be + updated for subsequent changes to the catalog's structure. + (Bootstrap catalogs also need pre-loaded entries + in pg_attribute, but + fortunately genbki.pl handles that chore nowadays.) + Avoid making new catalogs be bootstrap catalogs if at all possible. + + + + + System Catalog Initial Data + + + Each catalog that has any manually-created initial data (some do not) + has a corresponding .dat file that contains its + initial data in an editable format. + + + + Data File Format + + + Each .dat file contains Perl data structure literals + that are simply eval'd to produce an in-memory data structure consisting + of an array of hash references, one per catalog row. + A slightly modified excerpt from pg_database.dat + will demonstrate the key features: + + + +[ + +# A comment could appear here. +{ oid => '1', oid_symbol => 'TemplateDbOid', + descr => 'database\'s default template', + datname => 'template1', datdba => 'PGUID', encoding => 'ENCODING', + datcollate => 'LC_COLLATE', datctype => 'LC_CTYPE', datistemplate => 't', + datallowconn => 't', datconnlimit => '-1', datlastsysoid => '0', + datfrozenxid => '0', datminmxid => '1', dattablespace => '1663', + datacl => '_null_' }, + +] + + + + Points to note: + + + + + + + The overall file layout is: open square bracket, one or more sets of + curly braces each of which represents a catalog row, close square + bracket. Write a comma after each closing curly brace. + + + + + + Within each catalog row, write comma-separated + key => + value pairs. The + allowed keys are the names of the catalog's + columns, plus the metadata keys oid, + oid_symbol, + array_type_oid, and descr. + (The use of oid and oid_symbol + is described in below, + while array_type_oid is described in + . + descr supplies a description string for the object, + which will be inserted into pg_description + or pg_shdescription as appropriate.) + While the metadata keys are optional, the catalog's defined columns + must all be provided, except when the catalog's .h + file specifies a default value for the column. + + + + + + All values must be single-quoted. Escape single quotes used within a + value with a backslash. Backslashes meant as data can, but need not, + be doubled; this follows Perl's rules for simple quoted literals. + Note that backslashes appearing as data will be treated as escapes by + the bootstrap scanner, according to the same rules as for escape string + constants (see ); for + example \t converts to a tab character. If you + actually want a backslash in the final value, you will need to write + four of them: Perl strips two, leaving \\ for the + bootstrap scanner to see. + + + + + + Null values are represented by _null_. + (Note that there is no way to create a value that is just that + string.) + + + + + + Comments are preceded by #, and must be on their + own lines. + + + + + + To aid readability, field values that are OIDs of other catalog + entries can be represented by names rather than numeric OIDs. + This is described in + below. + + + + + + Since hashes are unordered data structures, field order and line + layout aren't semantically significant. However, to maintain a + consistent appearance, we set a few rules that are applied by the + formatting script reformat_dat_file.pl: + + + + + + Within each pair of curly braces, the metadata + fields oid, oid_symbol, + array_type_oid, and descr + (if present) come first, in that order, then the catalog's own + fields appear in their defined order. + + + + + + Newlines are inserted between fields as needed to limit line length + to 80 characters, if possible. A newline is also inserted between + the metadata fields and the regular fields. + + + + + + If the catalog's .h file specifies a default + value for a column, and a data entry has that same + value, reformat_dat_file.pl will omit it from + the data file. This keeps the data representation compact. + + + + + + reformat_dat_file.pl preserves blank lines + and comment lines as-is. + + + + + + It's recommended to run reformat_dat_file.pl + before submitting catalog data patches. For convenience, you can + simply change to src/include/catalog/ and + run make reformat-dat-files. + + + + + + If you want to add a new method of making the data representation + smaller, you must implement it + in reformat_dat_file.pl and also + teach Catalog::ParseData() how to expand the + data back into the full representation. + + + + + + + + OID Assignment + + + A catalog row appearing in the initial data can be given a + manually-assigned OID by writing an oid + => nnnn metadata field. + Furthermore, if an OID is assigned, a C macro for that OID can be + created by writing an oid_symbol + => name metadata field. + + + + Pre-loaded catalog rows must have preassigned OIDs if there are OID + references to them in other pre-loaded rows. A preassigned OID is + also needed if the row's OID must be referenced from C code. + If neither case applies, the oid metadata field can + be omitted, in which case the bootstrap code assigns an OID + automatically, or leaves it zero in a catalog that has no OIDs. + In practice we usually preassign OIDs for all or none of the pre-loaded + rows in a given catalog, even if only some of them are actually + cross-referenced. + + + + Writing the actual numeric value of any OID in C code is considered + very bad form; always use a macro, instead. Direct references + to pg_proc OIDs are common enough that there's + a special mechanism to create the necessary macros automatically; + see src/backend/utils/Gen_fmgrtab.pl. Similarly + — but, for historical reasons, not done the same way — + there's an automatic method for creating macros + for pg_type + OIDs. oid_symbol entries are therefore not + necessary in those two catalogs. Likewise, macros for + the pg_class OIDs of system catalogs and + indexes are set up automatically. For all other system catalogs, you + have to manually specify any macros you need + via oid_symbol entries. + + + + To find an available OID for a new pre-loaded row, run the + script src/include/catalog/unused_oids. + It prints inclusive ranges of unused OIDs (e.g., the output + line 45-900 means OIDs 45 through 900 have not been + allocated yet). Currently, OIDs 1-9999 are reserved for manual + assignment; the unused_oids script simply looks + through the catalog headers and .dat files + to see which ones do not appear. You can also use + the duplicate_oids script to check for mistakes. + (genbki.pl will also detect duplicate OIDs + at compile time.) + + + + The OID counter starts at 10000 at the beginning of a bootstrap run. + If a catalog row is in a table that requires OIDs, but no OID was + preassigned by an oid field, then it will + receive an OID of 10000 or above. + + + + + OID Reference Lookup + + + Cross-references from one initial catalog row to another can be written + by just writing the preassigned OID of the referenced row. But + that's error-prone and hard to understand, so for frequently-referenced + catalogs, genbki.pl provides mechanisms to write + symbolic references instead. Currently this is possible for references + to access methods, functions, operators, opclasses, opfamilies, and + types. The rules are as follows: + + + + + + + Use of symbolic references is enabled in a particular catalog column + by attaching BKI_LOOKUP(lookuprule) + to the column's definition, where lookuprule + is pg_am, pg_proc, + pg_operator, pg_opclass, + pg_opfamily, or pg_type. + BKI_LOOKUP can be attached to columns of + type Oid, regproc, oidvector, + or Oid[]; in the latter two cases it implies performing a + lookup on each element of the array. + + + + + + In such a column, all entries must use the symbolic format except + when writing 0 for InvalidOid. (If the column is + declared regproc, you can optionally + write - instead of 0.) + genbki.pl will warn about unrecognized names. + + + + + + Access methods are just represented by their names, as are types. + Type names must match the referenced pg_type + entry's typname; you do not get to use any + aliases such as integer + for int4. + + + + + + A function can be represented by + its proname, if that is unique among + the pg_proc.dat entries (this works like regproc + input). Otherwise, write it + as proname(argtypename,argtypename,...), + like regprocedure. The argument type names must be spelled exactly as + they are in the pg_proc.dat entry's + proargtypes field. Do not insert any + spaces. + + + + + + Operators are represented + by oprname(lefttype,righttype), + writing the type names exactly as they appear in + the pg_operator.dat + entry's oprleft + and oprright fields. + (Write 0 for the omitted operand of a unary + operator.) + + + + + + The names of opclasses and opfamilies are only unique within an + access method, so they are represented + by access_method_name/object_name. + + + + + + In none of these cases is there any provision for + schema-qualification; all objects created during bootstrap are + expected to be in the pg_catalog schema. + + + + + + genbki.pl resolves all symbolic references while it + runs, and puts simple numeric OIDs into the emitted BKI file. There is + therefore no need for the bootstrap backend to deal with symbolic + references. + + + + + Automatic Creation of Array Types + + + Most scalar data types should have a corresponding array type (that is, + a standard varlena array type whose element type is the scalar type, and + which is referenced by the typarray field of + the scalar type's pg_type + entry). genbki.pl is able to generate + the pg_type entry for the array type + automatically in most cases. + + + + To use this facility, just write an array_type_oid + => nnnn metadata field in the + scalar type's pg_type entry, specifying the OID + to use for the array type. You may then omit + the typarray field, since it will be filled + automatically with that OID. + + + + The generated array type's name is the scalar type's name with an + underscore prepended. The array entry's other fields are filled from + BKI_ARRAY_DEFAULT(value) + annotations in pg_type.h, or if there isn't one, + copied from the scalar type. (There's also a special case + for typalign.) Then + the typelem + and typarray fields of the two entries are + set to cross-reference each other. + + + + + Recipes for Editing Data Files + + + Here are some suggestions about the easiest ways to perform common tasks + when updating catalog data files. + + + + Add a new column with a default to a catalog: + + Add the column to the header file with + a BKI_DEFAULT(value) + annotation. The data file need only be adjusted by adding the field + in existing rows where a non-default value is needed. + + + + + Add a default value to an existing column that doesn't have + one: + + Add a BKI_DEFAULT annotation to the header file, + then run make reformat-dat-files to remove + now-redundant field entries. + + + + + Remove a column, whether it has a default or not: + + Remove the column from the header, then run make + reformat-dat-files to remove now-useless field entries. + + + + + Change or remove an existing default value: + + You cannot simply change the header file, since that will cause the + current data to be interpreted incorrectly. First run make + expand-dat-files to rewrite the data files with all + default values inserted explicitly, then change or remove + the BKI_DEFAULT annotation, then run make + reformat-dat-files to remove superfluous fields again. + + + + + Ad-hoc bulk editing: + + reformat_dat_file.pl can be adapted to perform + many kinds of bulk changes. Look for its block comments showing where + one-off code can be inserted. In the following example, we are going + to consolidate two boolean fields in pg_proc + into a char field: + + + + + Add the new column, with a default, + to pg_proc.h: + ++ /* see PROKIND_ categories below */ ++ char prokind BKI_DEFAULT(f); + + + + + + + Create a new script based on reformat_dat_file.pl + to insert appropriate values on-the-fly: + +- # At this point we have the full row in memory as a hash +- # and can do any operations we want. As written, it only +- # removes default values, but this script can be adapted to +- # do one-off bulk-editing. ++ # One-off change to migrate to prokind ++ # Default has already been filled in by now, so change to other ++ # values as appropriate ++ if ($values{proisagg} eq 't') ++ { ++ $values{prokind} = 'a'; ++ } ++ elsif ($values{proiswindow} eq 't') ++ { ++ $values{prokind} = 'w'; ++ } + + + + + + + Run the new script: + +$ cd src/include/catalog +$ perl rewrite_dat_with_prokind.pl pg_proc.dat + + At this point pg_proc.dat has all three + columns, prokind, + proisagg, + and proiswindow, though they will appear + only in rows where they have non-default values. + + + + + + Remove the old columns from pg_proc.h: + +- /* is it an aggregate? */ +- bool proisagg BKI_DEFAULT(f); +- +- /* is it a window function? */ +- bool proiswindow BKI_DEFAULT(f); + + + + + + + Finally, run make reformat-dat-files to remove + the useless old entries from pg_proc.dat. + + + + + For further examples of scripts used for bulk editing, see + convert_oid2name.pl + and remove_pg_type_oid_symbols.pl attached to this + message: + + + + + + <acronym>BKI</acronym> File Format @@ -67,19 +709,19 @@ - create + create tablename tableoid - bootstrap - shared_relation - without_oids - rowtype_oid oid + bootstrap + shared_relation + without_oids + rowtype_oid oid (name1 = type1 - FORCE NOT NULL | FORCE NULL , + FORCE NOT NULL | FORCE NULL , name2 = type2 - FORCE NOT NULL | FORCE NULL , + FORCE NOT NULL | FORCE NULL , ...) @@ -93,7 +735,7 @@ The following column types are supported directly by - bootstrap.c: bool, + bootstrap.c: bool, bytea, char (1 byte), name, int2, int4, regproc, regclass, @@ -104,31 +746,31 @@ _oid (array), _char (array), _aclitem (array). Although it is possible to create tables containing columns of other types, this cannot be done until - after pg_type has been created and filled with + after pg_type has been created and filled with appropriate entries. (That effectively means that only these - column types can be used in bootstrapped tables, but non-bootstrap + column types can be used in bootstrap catalogs, but non-bootstrap catalogs can contain any built-in type.) - When bootstrap is specified, + When bootstrap is specified, the table will only be created on disk; nothing is entered into pg_class, pg_attribute, etc, for it. Thus the table will not be accessible by ordinary SQL operations until - such entries are made the hard way (with insert + such entries are made the hard way (with insert commands). This option is used for creating pg_class etc themselves. - The table is created as shared if shared_relation is + The table is created as shared if shared_relation is specified. - It will have OIDs unless without_oids is specified. - The table's row type OID (pg_type OID) can optionally - be specified via the rowtype_oid clause; if not specified, - an OID is automatically generated for it. (The rowtype_oid - clause is useless if bootstrap is specified, but it can be + It will have OIDs unless without_oids is specified. + The table's row type OID (pg_type OID) can optionally + be specified via the rowtype_oid clause; if not specified, + an OID is automatically generated for it. (The rowtype_oid + clause is useless if bootstrap is specified, but it can be provided anyway for documentation.) @@ -136,7 +778,7 @@ - open tablename + open tablename @@ -150,20 +792,20 @@ - close tablename + close tablename - Close the open table. The name of the table can be given as a - cross-check, but this is not required. + Close the open table. The name of the table must be given as a + cross-check. - insert OID = oid_value ( value1 value2 ... ) + insert OID = oid_value ( value1 value2 ... ) @@ -180,22 +822,22 @@ NULL values can be specified using the special key word - _null_. Values containing spaces must be - double quoted. + _null_. Values that do not look like + identifiers or digit strings must be double quoted. - declare unique - index indexname + declare unique + index indexname indexoid - on tablename - using amname - ( opclass1 + on tablename + using amname + ( opclass1 name1 - , ... ) + , ... ) @@ -220,10 +862,10 @@ - declare toast + declare toast toasttableoid toastindexoid - on tablename + on tablename @@ -234,14 +876,14 @@ toasttableoid and its index is assigned OID toastindexoid. - As with declare index, filling of the index + As with declare index, filling of the index is postponed. - build indices + build indices @@ -257,17 +899,17 @@ Structure of the Bootstrap <acronym>BKI</acronym> File - The open command cannot be used until the tables it uses + The open command cannot be used until the tables it uses exist and have entries for the table that is to be opened. - (These minimum tables are pg_class, - pg_attribute, pg_proc, and - pg_type.) To allow those tables themselves to be filled, - create with the bootstrap option implicitly opens + (These minimum tables are pg_class, + pg_attribute, pg_proc, and + pg_type.) To allow those tables themselves to be filled, + create with the bootstrap option implicitly opens the created table for data insertion. - Also, the declare index and declare toast + Also, the declare index and declare toast commands cannot be used until the system catalogs they need have been created and filled in. @@ -278,17 +920,17 @@ - create bootstrap one of the critical tables + create bootstrap one of the critical tables - insert data describing at least the critical tables + insert data describing at least the critical tables - close + close @@ -298,22 +940,22 @@ - create (without bootstrap) a noncritical table + create (without bootstrap) a noncritical table - open + open - insert desired data + insert desired data - close + close @@ -328,7 +970,7 @@ - build indices + build indices @@ -340,7 +982,7 @@ - Example + BKI Example The following sequence of commands will create the diff --git a/doc/src/sgml/bloom.sgml b/doc/src/sgml/bloom.sgml index 396348c523..6eeaddee09 100644 --- a/doc/src/sgml/bloom.sgml +++ b/doc/src/sgml/bloom.sgml @@ -8,8 +8,8 @@ - bloom provides an index access method based on - Bloom filters. + bloom provides an index access method based on + Bloom filters. @@ -42,29 +42,30 @@ Parameters - A bloom index accepts the following parameters in its - WITH clause: + A bloom index accepts the following parameters in its + WITH clause: - length + length - Length of each signature (index entry) in bits. The default - is 80 bits and maximum is 4096. + Length of each signature (index entry) in bits. It is rounded up to the + nearest multiple of 16. The default is + 80 bits and the maximum is 4096. - col1 — col32 + col1 — col32 Number of bits generated for each index column. Each parameter's name refers to the number of the index column that it controls. The default - is 2 bits and maximum is 4095. Parameters for + is 2 bits and maximum is 4095. Parameters for index columns not actually used are ignored. @@ -87,8 +88,8 @@ CREATE INDEX bloomidx ON tbloom USING bloom (i1,i2,i3) The index is created with a signature length of 80 bits, with attributes i1 and i2 mapped to 2 bits, and attribute i3 mapped to 4 bits. We could - have omitted the length, col1, - and col2 specifications since those have the default values. + have omitted the length, col1, + and col2 specifications since those have the default values. @@ -175,7 +176,7 @@ CREATE INDEX Note the relatively large number of false positives: 2439 rows were selected to be visited in the heap, but none actually matched the query. We could reduce that by specifying a larger signature length. - In this example, creating the index with length=200 + In this example, creating the index with length=200 reduced the number of false positives to 55; but it doubled the index size (to 306 MB) and ended up being slower for this query (125 ms overall). @@ -213,7 +214,7 @@ CREATE INDEX An operator class for bloom indexes requires only a hash function for the indexed data type and an equality operator for searching. This example - shows the operator class definition for the text data type: + shows the operator class definition for the text data type: @@ -230,7 +231,7 @@ DEFAULT FOR TYPE text USING bloom AS - Only operator classes for int4 and text are + Only operator classes for int4 and text are included with the module. @@ -242,6 +243,20 @@ DEFAULT FOR TYPE text USING bloom AS operations in the future. + + + + bloom access method doesn't support + UNIQUE indexes. + + + + + + bloom access method doesn't support searching for + NULL values. + + diff --git a/doc/src/sgml/brin.sgml b/doc/src/sgml/brin.sgml index 8dcc29925b..da0c911153 100644 --- a/doc/src/sgml/brin.sgml +++ b/doc/src/sgml/brin.sgml @@ -1,6 +1,6 @@ - + BRIN Indexes @@ -16,7 +16,7 @@ BRIN is designed for handling very large tables in which certain columns have some natural correlation with their physical location within the table. - A block range is a group of pages that are physically + A block range is a group of pages that are physically adjacent in the table; for each block range, some summary info is stored by the index. For example, a table storing a store's sale orders might have @@ -29,7 +29,7 @@ BRIN indexes can satisfy queries via regular bitmap index scans, and will return all tuples in all pages within each range if - the summary info stored by the index is consistent with the + the summary info stored by the index is consistent with the query conditions. The query executor is in charge of rechecking these tuples and discarding those that do not match the query conditions — in other words, these @@ -51,9 +51,9 @@ The size of the block range is determined at index creation time by - the pages_per_range storage parameter. The number of index + the pages_per_range storage parameter. The number of index entries will be equal to the size of the relation in pages divided by - the selected value for pages_per_range. Therefore, the smaller + the selected value for pages_per_range. Therefore, the smaller the number, the larger the index becomes (because of the need to store more index entries), but at the same time the summary data stored can be more precise and more data blocks can be skipped during an index scan. @@ -86,6 +86,18 @@ representation because the existing values have changed. + + When autosummarization is enabled, each time a page range is filled a + request is sent to autovacuum for it to execute a targeted summarization + for that range, to be fulfilled at the end of the next worker run on the + same database. If the request queue is full, the request is not recorded + and a message is sent to the server log: + +LOG: request for BRIN range summarization for index "brin_wi_idx" page 128 was not recorded + + When this happens, the range will be summarized normally during the next + regular vacuum of the table. + @@ -95,13 +107,13 @@ The core PostgreSQL distribution includes the BRIN operator classes shown in - . + . - The minmax + The minmax operator classes store the minimum and the maximum values appearing - in the indexed column within the range. The inclusion + in the indexed column within the range. The inclusion operator classes store a value which includes the values in the indexed column within the range. @@ -117,17 +129,6 @@ - - abstime_minmax_ops - abstime - - < - <= - = - >= - > - - int8_minmax_ops bigint @@ -162,21 +163,21 @@ - box_inclusion_ops + box_inclusion_ops box - << - &< - && - &> - >> - ~= - @> - <@ - &<| - <<| + << + &< + && + &> + >> + ~= + @> + <@ + &<| + <<| |>> - |&> + |&> @@ -249,11 +250,11 @@ network_inclusion_ops inet - && - >>= + && + >>= <<= = - >> + >> << @@ -346,18 +347,18 @@ - range_inclusion_ops + range_inclusion_ops any range type - << - &< - && - &> - >> - @> - <@ - -|- - = + << + &< + && + &> + >> + @> + <@ + -|- + = < <= = @@ -376,17 +377,6 @@ > - - reltime_minmax_ops - reltime - - < - <= - = - >= - > - - int2_minmax_ops smallint @@ -505,11 +495,11 @@ - BrinOpcInfo *opcInfo(Oid type_oid) + BrinOpcInfo *opcInfo(Oid type_oid) Returns internal information about the indexed columns' summary data. - The return value must point to a palloc'd BrinOpcInfo, + The return value must point to a palloc'd BrinOpcInfo, which has this definition: typedef struct BrinOpcInfo @@ -524,8 +514,8 @@ typedef struct BrinOpcInfo TypeCacheEntry *oi_typcache[FLEXIBLE_ARRAY_MEMBER]; } BrinOpcInfo; - BrinOpcInfo.oi_opaque can be used by the - operator class routines to pass information between support procedures + BrinOpcInfo.oi_opaque can be used by the + operator class routines to pass information between support functions during an index scan. @@ -575,27 +565,27 @@ typedef struct BrinOpcInfo defined by the user for other data types using equivalent definitions, without having to write any source code; appropriate catalog entries being declared is enough. Note that assumptions about the semantics of operator - strategies are embedded in the support procedures' source code. + strategies are embedded in the support functions' source code. Operator classes that implement completely different semantics are also - possible, provided implementations of the four main support procedures + possible, provided implementations of the four main support functions described above are written. Note that backwards compatibility across major - releases is not guaranteed: for example, additional support procedures might + releases is not guaranteed: for example, additional support functions might be required in later releases. To write an operator class for a data type that implements a totally - ordered set, it is possible to use the minmax support procedures + ordered set, it is possible to use the minmax support functions alongside the corresponding operators, as shown in - . - All operator class members (procedures and operators) are mandatory. + . + All operator class members (functions and operators) are mandatory. - Procedure and Support Numbers for Minmax Operator Classes + Function and Support Numbers for Minmax Operator Classes @@ -605,19 +595,19 @@ typedef struct BrinOpcInfo - Support Procedure 1 + Support Function 1 internal function brin_minmax_opcinfo() - Support Procedure 2 + Support Function 2 internal function brin_minmax_add_value() - Support Procedure 3 + Support Function 3 internal function brin_minmax_consistent() - Support Procedure 4 + Support Function 4 internal function brin_minmax_union() @@ -647,8 +637,8 @@ typedef struct BrinOpcInfo To write an operator class for a complex data type which has values included within another type, it's possible to use the inclusion support - procedures alongside the corresponding operators, as shown - in . It requires + functions alongside the corresponding operators, as shown + in . It requires only a single additional function, which can be written in any language. More functions can be defined for additional functionality. All operators are optional. Some operators require other operators, as shown as @@ -656,7 +646,7 @@ typedef struct BrinOpcInfo
- Procedure and Support Numbers for Inclusion Operator Classes + Function and Support Numbers for Inclusion Operator Classes @@ -667,42 +657,42 @@ typedef struct BrinOpcInfo - Support Procedure 1 + Support Function 1 internal function brin_inclusion_opcinfo() - Support Procedure 2 + Support Function 2 internal function brin_inclusion_add_value() - Support Procedure 3 + Support Function 3 internal function brin_inclusion_consistent() - Support Procedure 4 + Support Function 4 internal function brin_inclusion_union() - Support Procedure 11 + Support Function 11 function to merge two elements - Support Procedure 12 + Support Function 12 optional function to check whether two elements are mergeable - Support Procedure 13 + Support Function 13 optional function to check if an element is contained within another - Support Procedure 14 + Support Function 14 optional function to check whether an element is empty @@ -791,22 +781,22 @@ typedef struct BrinOpcInfo
- Support procedure numbers 1-10 are reserved for the BRIN internal + Support function numbers 1-10 are reserved for the BRIN internal functions, so the SQL level functions start with number 11. Support function number 11 is the main function required to build the index. It should accept two arguments with the same data type as the operator class, and return the union of them. The inclusion operator class can store union values with different data types if it is defined with the - STORAGE parameter. The return value of the union - function should match the STORAGE data type. + STORAGE parameter. The return value of the union + function should match the STORAGE data type. - Support procedure numbers 12 and 14 are provided to support - irregularities of built-in data types. Procedure number 12 + Support function numbers 12 and 14 are provided to support + irregularities of built-in data types. Function number 12 is used to support network addresses from different families which - are not mergeable. Procedure number 14 is used to support - empty ranges. Procedure number 13 is an optional but + are not mergeable. Function number 14 is used to support + empty ranges. Function number 13 is an optional but recommended one, which allows the new value to be checked before it is passed to the union function. As the BRIN framework can shortcut some operations when the union is not changed, using this @@ -821,13 +811,13 @@ typedef struct BrinOpcInfo additional data types to be supported by defining extra sets of operators. Inclusion operator class operator strategies are dependent on another operator strategy as shown in - , or the same + , or the same operator strategy as themselves. They require the dependency - operator to be defined with the STORAGE data type as the + operator to be defined with the STORAGE data type as the left-hand-side argument and the other supported data type to be the right-hand-side argument of the supported operator. See - float4_minmax_ops as an example of minmax, and - box_inclusion_ops as an example of inclusion. + float4_minmax_ops as an example of minmax, and + box_inclusion_ops as an example of inclusion.
diff --git a/doc/src/sgml/btree-gin.sgml b/doc/src/sgml/btree-gin.sgml index 375e7ec4be..314e001fef 100644 --- a/doc/src/sgml/btree-gin.sgml +++ b/doc/src/sgml/btree-gin.sgml @@ -8,16 +8,17 @@ - btree_gin provides sample GIN operator classes that + btree_gin provides sample GIN operator classes that implement B-tree equivalent behavior for the data types - int2, int4, int8, float4, - float8, timestamp with time zone, - timestamp without time zone, time with time zone, - time without time zone, date, interval, - oid, money, "char", - varchar, text, bytea, bit, - varbit, macaddr, macaddr8, inet, - cidr, and all enum types. + int2, int4, int8, float4, + float8, timestamp with time zone, + timestamp without time zone, time with time zone, + time without time zone, date, interval, + oid, money, "char", + varchar, text, bytea, bit, + varbit, macaddr, macaddr8, inet, + cidr, uuid, name, bool, + bpchar, and all enum types. diff --git a/doc/src/sgml/btree-gist.sgml b/doc/src/sgml/btree-gist.sgml index f3c639c2f3..774442feee 100644 --- a/doc/src/sgml/btree-gist.sgml +++ b/doc/src/sgml/btree-gist.sgml @@ -8,16 +8,16 @@ - btree_gist provides GiST index operator classes that + btree_gist provides GiST index operator classes that implement B-tree equivalent behavior for the data types - int2, int4, int8, float4, - float8, numeric, timestamp with time zone, - timestamp without time zone, time with time zone, - time without time zone, date, interval, - oid, money, char, - varchar, text, bytea, bit, - varbit, macaddr, macaddr8, inet, - cidr, uuid, and all enum types. + int2, int4, int8, float4, + float8, numeric, timestamp with time zone, + timestamp without time zone, time with time zone, + time without time zone, date, interval, + oid, money, char, + varchar, text, bytea, bit, + varbit, macaddr, macaddr8, inet, + cidr, uuid, and all enum types. @@ -33,23 +33,23 @@ - In addition to the typical B-tree search operators, btree_gist + In addition to the typical B-tree search operators, btree_gist also provides index support for <> (not equals). This may be useful in combination with an - exclusion constraint, + exclusion constraint, as described below. Also, for data types for which there is a natural distance metric, - btree_gist defines a distance operator <->, + btree_gist defines a distance operator <->, and provides GiST index support for nearest-neighbor searches using this operator. Distance operators are provided for - int2, int4, int8, float4, - float8, timestamp with time zone, - timestamp without time zone, - time without time zone, date, interval, - oid, and money. + int2, int4, int8, float4, + float8, timestamp with time zone, + timestamp without time zone, + time without time zone, date, interval, + oid, and money. @@ -70,7 +70,7 @@ SELECT *, a <-> 42 AS dist FROM test ORDER BY a <-> 42 LIMIT 10; - Use an exclusion + Use an exclusion constraint to enforce the rule that a cage at a zoo can contain only one kind of animal: diff --git a/doc/src/sgml/btree.sgml b/doc/src/sgml/btree.sgml new file mode 100644 index 0000000000..c16825e2ea --- /dev/null +++ b/doc/src/sgml/btree.sgml @@ -0,0 +1,443 @@ + + + +B-Tree Indexes + + + index + B-Tree + + + + Introduction + + + PostgreSQL includes an implementation of the + standard btree (multi-way binary tree) index data + structure. Any data type that can be sorted into a well-defined linear + order can be indexed by a btree index. The only limitation is that an + index entry cannot exceed approximately one-third of a page (after TOAST + compression, if applicable). + + + + Because each btree operator class imposes a sort order on its data type, + btree operator classes (or, really, operator families) have come to be + used as PostgreSQL's general representation + and understanding of sorting semantics. Therefore, they've acquired + some features that go beyond what would be needed just to support btree + indexes, and parts of the system that are quite distant from the + btree AM make use of them. + + + + + + Behavior of B-Tree Operator Classes + + + As shown in , a btree operator + class must provide five comparison operators, + <, + <=, + =, + >= and + >. + One might expect that <> should also be part of + the operator class, but it is not, because it would almost never be + useful to use a <> WHERE clause in an index + search. (For some purposes, the planner treats <> + as associated with a btree operator class; but it finds that operator via + the = operator's negator link, rather than + from pg_amop.) + + + + When several data types share near-identical sorting semantics, their + operator classes can be grouped into an operator family. Doing so is + advantageous because it allows the planner to make deductions about + cross-type comparisons. Each operator class within the family should + contain the single-type operators (and associated support functions) + for its input data type, while cross-type comparison operators and + support functions are loose in the family. It is + recommendable that a complete set of cross-type operators be included + in the family, thus ensuring that the planner can represent any + comparison conditions that it deduces from transitivity. + + + + There are some basic assumptions that a btree operator family must + satisfy: + + + + + + An = operator must be an equivalence relation; that + is, for all non-null values A, + B, C of the + data type: + + + + + A = + A is true + (reflexive law) + + + + + if A = + B, + then B = + A + (symmetric law) + + + + + if A = + B and B + = C, + then A = + C + (transitive law) + + + + + + + + + A < operator must be a strong ordering relation; + that is, for all non-null values A, + B, C: + + + + + A < + A is false + (irreflexive law) + + + + + if A < + B + and B < + C, + then A < + C + (transitive law) + + + + + + + + + Furthermore, the ordering is total; that is, for all non-null + values A, B: + + + + + exactly one of A < + B, A + = B, and + B < + A is true + (trichotomy law) + + + + + (The trichotomy law justifies the definition of the comparison support + function, of course.) + + + + + + The other three operators are defined in terms of = + and < in the obvious way, and must act consistently + with them. + + + + For an operator family supporting multiple data types, the above laws must + hold when A, B, + C are taken from any data types in the family. + The transitive laws are the trickiest to ensure, as in cross-type + situations they represent statements that the behaviors of two or three + different operators are consistent. + As an example, it would not work to put float8 + and numeric into the same operator family, at least not with + the current semantics that numeric values are converted + to float8 for comparison to a float8. Because + of the limited accuracy of float8, this means there are + distinct numeric values that will compare equal to the + same float8 value, and thus the transitive law would fail. + + + + Another requirement for a multiple-data-type family is that any implicit + or binary-coercion casts that are defined between data types included in + the operator family must not change the associated sort ordering. + + + + It should be fairly clear why a btree index requires these laws to hold + within a single data type: without them there is no ordering to arrange + the keys with. Also, index searches using a comparison key of a + different data type require comparisons to behave sanely across two + data types. The extensions to three or more data types within a family + are not strictly required by the btree index mechanism itself, but the + planner relies on them for optimization purposes. + + + + + + B-Tree Support Functions + + + As shown in , btree defines + one required and two optional support functions. + + + + For each combination of data types that a btree operator family provides + comparison operators for, it must provide a comparison support function, + registered in pg_amproc with support function + number 1 and + amproclefttype/amprocrighttype + equal to the left and right data types for the comparison (i.e., the + same data types that the matching operators are registered with + in pg_amop). + The comparison function must take two non-null values + A and B and + return an int32 value that + is < 0, 0, + or > 0 + when A < + B, A + = B, + or A > + B, respectively. + A null result is disallowed: all values of the data type must be comparable. + See src/backend/access/nbtree/nbtcompare.c for + examples. + + + + If the compared values are of a collatable data type, the appropriate + collation OID will be passed to the comparison support function, using + the standard PG_GET_COLLATION() mechanism. + + + + Optionally, a btree operator family may provide sort + support function(s), registered under support function number + 2. These functions allow implementing comparisons for sorting purposes + in a more efficient way than naively calling the comparison support + function. The APIs involved in this are defined in + src/include/utils/sortsupport.h. + + + + in_range support functions + + + + support functions + in_range + + + + Optionally, a btree operator family may + provide in_range support function(s), registered + under support function number 3. These are not used during btree index + operations; rather, they extend the semantics of the operator family so + that it can support window clauses containing + the RANGE offset + PRECEDING + and RANGE offset + FOLLOWING frame bound types (see + ). Fundamentally, the extra + information provided is how to add or subtract + an offset value in a way that is compatible + with the family's data ordering. + + + + An in_range function must have the signature + +in_range(val type1, base type1, offset type2, sub bool, less bool) +returns bool + + val and base must be + of the same type, which is one of the types supported by the operator + family (i.e., a type for which it provides an ordering). + However, offset could be of a different type, + which might be one otherwise unsupported by the family. An example is + that the built-in time_ops family provides + an in_range function that + has offset of type interval. + A family can provide in_range functions for any of + its supported types and one or more offset + types. Each in_range function should be entered + in pg_amproc + with amproclefttype equal to type1 + and amprocrighttype equal to type2. + + + + The essential semantics of an in_range function + depend on the two boolean flag parameters. It should add or + subtract base + and offset, then + compare val to the result, as follows: + + + + if !sub and + !less, + return val >= + (base + + offset) + + + + + if !sub + and less, + return val <= + (base + + offset) + + + + + if sub + and !less, + return val >= + (base - + offset) + + + + + if sub and less, + return val <= + (base - + offset) + + + + Before doing so, the function should check the sign + of offset: if it is less than zero, raise + error ERRCODE_INVALID_PRECEDING_OR_FOLLOWING_SIZE (22013) + with error text like invalid preceding or following size in window + function. (This is required by the SQL standard, although + nonstandard operator families might perhaps choose to ignore this + restriction, since there seems to be little semantic necessity for it.) + This requirement is delegated to the in_range + function so that the core code needn't understand what less than + zero means for a particular data type. + + + + An additional expectation is that in_range functions + should, if practical, avoid throwing an error + if base + + offset + or base - + offset would overflow. + The correct comparison result can be determined even if that value would + be out of the data type's range. Note that if the data type includes + concepts such as infinity or NaN, extra care + may be needed to ensure that in_range's results agree + with the normal sort order of the operator family. + + + + The results of the in_range function must be + consistent with the sort ordering imposed by the operator family. + To be precise, given any fixed values of offset + and sub, then: + + + + If in_range with less = + true is true for some val1 + and base, it must be true for + every val2 <= + val1 with the + same base. + + + + + If in_range with less = + true is false for some val1 + and base, it must be false for + every val2 >= + val1 with the + same base. + + + + + If in_range with less = + true is true for some val + and base1, it must be true for + every base2 >= + base1 with the + same val. + + + + + If in_range with less = + true is false for some val + and base1, it must be false for + every base2 <= + base1 with the + same val. + + + + Analogous statements with inverted conditions hold + when less = false. + + + + If the type being ordered (type1) is collatable, + the appropriate collation OID will be passed to + the in_range function, using the standard + PG_GET_COLLATION() mechanism. + + + + in_range functions need not handle NULL inputs, and + typically will be marked strict. + + + + + + Implementation + + + An introduction to the btree index implementation can be found in + src/backend/access/nbtree/README. + + + + + diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml index ef7054cf26..8b7f169d50 100644 --- a/doc/src/sgml/catalogs.sgml +++ b/doc/src/sgml/catalogs.sgml @@ -27,7 +27,7 @@ Overview - lists the system catalogs. + lists the system catalogs. More detailed documentation of each catalog follows below. @@ -67,7 +67,7 @@ pg_amproc - access method support procedures + access method support functions @@ -387,7 +387,7 @@ - <structname>pg_aggregate</> Columns + <structname>pg_aggregate</structname> Columns @@ -410,9 +410,9 @@ charAggregate kind: - n for normal aggregates, - o for ordered-set aggregates, or - h for hypothetical-set aggregates + n for normal aggregates, + o for ordered-set aggregates, or + h for hypothetical-set aggregates @@ -421,7 +421,7 @@ Number of direct (non-aggregated) arguments of an ordered-set or hypothetical-set aggregate, counting a variadic array as one argument. - If equal to pronargs, the aggregate must be variadic + If equal to pronargs, the aggregate must be variadic and the variadic array describes the aggregated arguments as well as the final direct arguments. Always zero for normal aggregates. @@ -486,6 +486,26 @@ True to pass extra dummy arguments to aggmfinalfn + + aggfinalmodify + char + + Whether aggfinalfn modifies the + transition state value: + r if it is read-only, + s if the aggtransfn + cannot be applied after the aggfinalfn, or + w if it writes on the value + + + + aggmfinalmodify + char + + Like aggfinalmodify, but for + the aggmfinalfn + + aggsortop oid @@ -547,8 +567,8 @@ New aggregate functions are registered with the - command. See for more information about + linkend="sql-createaggregate"/> + command. See for more information about writing aggregate functions and the meaning of the transition functions, etc. @@ -568,11 +588,11 @@ relation access methods. There is one row for each access method supported by the system. Currently, only indexes have access methods. The requirements for index - access methods are discussed in detail in . + access methods are discussed in detail in .
- <structname>pg_am</> Columns + <structname>pg_am</structname> Columns @@ -624,12 +644,12 @@ - Before PostgreSQL 9.6, pg_am + Before PostgreSQL 9.6, pg_am contained many additional columns representing properties of index access methods. That data is now only directly visible at the C code level. However, pg_index_column_has_property() and related functions have been added to allow SQL queries to inspect index access - method properties; see . + method properties; see . @@ -647,8 +667,8 @@ The catalog pg_amop stores information about operators associated with access method operator families. There is one row for each operator that is a member of an operator family. A family - member can be either a search operator or an - ordering operator. An operator + member can be either a search operator or an + ordering operator. An operator can appear in more than one family, but cannot appear in more than one search position nor more than one ordering position within a family. (It is allowed, though unlikely, for an operator to be used for both @@ -656,7 +676,7 @@
- <structname>pg_amop</> Columns + <structname>pg_amop</structname> Columns @@ -708,8 +728,8 @@ amoppurposechar - Operator purpose, either s for search or - o for ordering + Operator purpose, either s for search or + o for ordering @@ -739,26 +759,26 @@
- A search operator entry indicates that an index of this operator + A search operator entry indicates that an index of this operator family can be searched to find all rows satisfying - WHERE - indexed_column - operator - constant. + WHERE + indexed_column + operator + constant. Obviously, such an operator must return boolean, and its left-hand input type must match the index's column data type. - An ordering operator entry indicates that an index of this + An ordering operator entry indicates that an index of this operator family can be scanned to return rows in the order represented by - ORDER BY - indexed_column - operator - constant. + ORDER BY + indexed_column + operator + constant. Such an operator could return any sortable data type, though again its left-hand input type must match the index's column data type. - The exact semantics of the ORDER BY are specified by the + The exact semantics of the ORDER BY are specified by the amopsortfamily column, which must reference a B-tree operator family for the operator's result type. @@ -767,19 +787,19 @@ At present, it's assumed that the sort order for an ordering operator is the default for the referenced operator family, i.e., ASC NULLS - LAST. This might someday be relaxed by adding additional columns + LAST. This might someday be relaxed by adding additional columns to specify sort options explicitly. - An entry's amopmethod must match the - opfmethod of its containing operator family (including - amopmethod here is an intentional denormalization of the + An entry's amopmethod must match the + opfmethod of its containing operator family (including + amopmethod here is an intentional denormalization of the catalog structure for performance reasons). Also, - amoplefttype and amoprighttype must match - the oprleft and oprright fields of the - referenced pg_operator entry. + amoplefttype and amoprighttype must match + the oprleft and oprright fields of the + referenced pg_operator entry. @@ -794,8 +814,8 @@ The catalog pg_amproc stores information about - support procedures associated with access method operator families. There - is one row for each support procedure belonging to an operator family. + support functions associated with access method operator families. There + is one row for each support function belonging to an operator family. @@ -844,14 +864,14 @@ amprocnumint2 - Support procedure number + Support function number amproc regproc pg_proc.oid - OID of the procedure + OID of the function @@ -860,14 +880,14 @@ The usual interpretation of the - amproclefttype and amprocrighttype fields + amproclefttype and amprocrighttype fields is that they identify the left and right input types of the operator(s) - that a particular support procedure supports. For some access methods - these match the input data type(s) of the support procedure itself, for - others not. There is a notion of default support procedures for - an index, which are those with amproclefttype and - amprocrighttype both equal to the index operator class's - opcintype. + that a particular support function supports. For some access methods + these match the input data type(s) of the support function itself, for + others not. There is a notion of default support functions for + an index, which are those with amproclefttype and + amprocrighttype both equal to the index operator class's + opcintype. @@ -881,15 +901,15 @@ - The catalog pg_attrdef stores column default values. The main information - about columns is stored in pg_attribute - (see below). Only columns that explicitly specify a default value - (when the table is created or the column is added) will have an - entry here. + The catalog pg_attrdef stores column default + values. The main information about columns is stored in + pg_attribute. + Only columns for which a default value has been explicitly set will have + an entry here.
- <structname>pg_attrdef</> Columns + <structname>pg_attrdef</structname> Columns @@ -927,27 +947,13 @@ adbinpg_node_tree - The internal representation of the column default value - - - - adsrc - text - - A human-readable representation of the default value + The column default value, in nodeToString() + representation. Use pg_get_expr(adbin, adrelid) to + convert it to an SQL expression.
- - - The adsrc field is historical, and is best - not used, because it does not track outside changes that might affect - the representation of the default value. Reverse-compiling the - adbin field (with pg_get_expr for - example) is a better way to display the default value. - - @@ -973,7 +979,7 @@ - <structname>pg_attribute</> Columns + <structname>pg_attribute</structname> Columns @@ -1014,7 +1020,7 @@ attstattarget controls the level of detail of statistics accumulated for this column by - . + . A zero value indicates that no statistics should be collected. A negative value says to use the system default statistics target. The exact meaning of positive values is data type-dependent. @@ -1052,7 +1058,7 @@ Number of dimensions, if the column is an array type; otherwise 0. (Presently, the number of dimensions of an array is not enforced, - so any nonzero value effectively means it's an array.) + so any nonzero value effectively means it's an array.) @@ -1076,7 +1082,7 @@ supplied at table creation time (for example, the maximum length of a varchar column). It is passed to type-specific input functions and length coercion functions. - The value will generally be -1 for types that do not need atttypmod. + The value will generally be -1 for types that do not need atttypmod. @@ -1085,7 +1091,7 @@ bool - A copy of pg_type.typbyval of this column's type + A copy of pg_type.typbyval of this column's type @@ -1094,7 +1100,7 @@ char - Normally a copy of pg_type.typstorage of this + Normally a copy of pg_type.typstorage of this column's type. For TOAST-able data types, this can be altered after column creation to control storage policy. @@ -1105,7 +1111,7 @@ char - A copy of pg_type.typalign of this column's type + A copy of pg_type.typalign of this column's type @@ -1129,6 +1135,19 @@ + + atthasmissing + bool + + + This column has a value which is used where the column is entirely + missing from the row, as happens when a column is added with a + non-volatile DEFAULT value after the row is created. + The actual value used is stored in the + attmissingval column. + + + attidentity char @@ -1196,7 +1215,7 @@ text[] - Attribute-level options, as keyword=value strings + Attribute-level options, as keyword=value strings @@ -1205,7 +1224,21 @@ text[] - Attribute-level foreign data wrapper options, as keyword=value strings + Attribute-level foreign data wrapper options, as keyword=value strings + + + + + attmissingval + anyarray + + + This column has a one element array containing the value used when the + column is entirely missing from the row, as happens when the column is + added with a non-volatile DEFAULT value after the + row is created. The value is only used when + atthasmissing is true. If there is no value + the column is null. @@ -1217,9 +1250,9 @@ In a dropped column's pg_attribute entry, atttypid is reset to zero, but attlen and the other fields copied from - pg_type are still valid. This arrangement is needed + pg_type are still valid. This arrangement is needed to cope with the situation where the dropped column's data type was - later dropped, and so there is no pg_type row anymore. + later dropped, and so there is no pg_type row anymore. attlen and the other fields can be used to interpret the contents of a row of the table. @@ -1236,9 +1269,9 @@ The catalog pg_authid contains information about database authorization identifiers (roles). A role subsumes the concepts - of users and groups. A user is essentially just a - role with the rolcanlogin flag set. Any role (with or - without rolcanlogin) can have other roles as members; see + of users and groups. A user is essentially just a + role with the rolcanlogin flag set. Any role (with or + without rolcanlogin) can have other roles as members; see pg_auth_members. @@ -1250,7 +1283,7 @@ - contains detailed information about user and + contains detailed information about user and privilege management. @@ -1263,7 +1296,7 @@
- <structname>pg_authid</> Columns + <structname>pg_authid</structname> Columns @@ -1336,7 +1369,7 @@ bool Role bypasses every row level security policy, see - for more information. + for more information. @@ -1370,20 +1403,20 @@ For an MD5 encrypted password, rolpassword - column will begin with the string md5 followed by a + column will begin with the string md5 followed by a 32-character hexadecimal MD5 hash. The MD5 hash will be of the user's password concatenated to their user name. For example, if user - joe has password xyzzy, PostgreSQL - will store the md5 hash of xyzzyjoe. + joe has password xyzzy, PostgreSQL + will store the md5 hash of xyzzyjoe. If the password is encrypted with SCRAM-SHA-256, it has the format: -SCRAM-SHA-256$<iteration count>:<salt>$<StoredKey>:<ServerKey> +SCRAM-SHA-256$<iteration count>:<salt>$<StoredKey>:<ServerKey> - where salt, StoredKey and - ServerKey are in Base64 encoded format. This format is + where salt, StoredKey and + ServerKey are in Base64 encoded format. This format is the same as that specified by RFC 5803. @@ -1415,7 +1448,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_auth_members</> Columns + <structname>pg_auth_members</structname> Columns @@ -1439,7 +1472,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< member oid pg_authid.oid - ID of a role that is a member of roleid + ID of a role that is a member of roleid @@ -1453,8 +1486,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< admin_option bool - True if member can grant membership in - roleid to others + True if member can grant membership in + roleid to others @@ -1481,14 +1514,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< cannot be deduced from some generic rule. For example, casting between a domain and its base type is not explicitly represented in pg_cast. Another important exception is that - automatic I/O conversion casts, those performed using a data - type's own I/O functions to convert to or from text or other + automatic I/O conversion casts, those performed using a data + type's own I/O functions to convert to or from text or other string types, are not explicitly represented in pg_cast.
- <structname>pg_cast</> Columns + <structname>pg_cast</structname> Columns @@ -1538,11 +1571,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< Indicates what contexts the cast can be invoked in. - e means only as an explicit cast (using - CAST or :: syntax). - a means implicitly in assignment + e means only as an explicit cast (using + CAST or :: syntax). + a means implicitly in assignment to a target column, as well as explicitly. - i means implicitly in expressions, as well as the + i means implicitly in expressions, as well as the other cases. @@ -1552,9 +1585,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< Indicates how the cast is performed. - f means that the function specified in the castfunc field is used. - i means that the input/output functions are used. - b means that the types are binary-coercible, thus no conversion is required. + f means that the function specified in the castfunc field is used. + i means that the input/output functions are used. + b means that the types are binary-coercible, thus no conversion is required. @@ -1566,18 +1599,18 @@ SCRAM-SHA-256$<iteration count>:<salt>< always take the cast source type as their first argument type, and return the cast destination type as their result type. A cast function can have up to three arguments. The second argument, - if present, must be type integer; it receives the type + if present, must be type integer; it receives the type modifier associated with the destination type, or -1 if there is none. The third argument, - if present, must be type boolean; it receives true - if the cast is an explicit cast, false otherwise. + if present, must be type boolean; it receives true + if the cast is an explicit cast, false otherwise. It is legitimate to create a pg_cast entry in which the source and target types are the same, if the associated function takes more than one argument. Such entries represent - length coercion functions that coerce values of the type + length coercion functions that coerce values of the type to be legal for a particular type modifier value. @@ -1604,14 +1637,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< table. This includes indexes (but see also pg_index), sequences (but see also pg_sequence), views, materialized - views, composite types, and TOAST tables; see relkind. + views, composite types, and TOAST tables; see relkind. Below, when we mean all of these kinds of objects we speak of relations. Not all columns are meaningful for all relation types.
- <structname>pg_class</> Columns + <structname>pg_class</structname> Columns @@ -1653,7 +1686,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< pg_type.oid The OID of the data type that corresponds to this table's row type, - if any (zero for indexes, which have no pg_type entry) + if any (zero for indexes, which have no pg_type entry) @@ -1686,7 +1719,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< oid Name of the on-disk file of this relation; zero means this - is a mapped relation whose disk file name is determined + is a mapped relation whose disk file name is determined by low-level state @@ -1719,8 +1752,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< float4 - Number of rows in the table. This is only an estimate used by the - planner. It is updated by VACUUM, + Number of live rows in the table. This is only an estimate used by + the planner. It is updated by VACUUM, ANALYZE, and a few DDL commands such as CREATE INDEX. @@ -1775,8 +1808,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - p = permanent table, u = unlogged table, - t = temporary table + p = permanent table, u = unlogged table, + t = temporary table @@ -1785,15 +1818,16 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - r = ordinary table, - i = index, - S = sequence, - t = TOAST table, - v = view, - m = materialized view, - c = composite type, - f = foreign table, - p = partitioned table + r = ordinary table, + i = index, + S = sequence, + t = TOAST table, + v = view, + m = materialized view, + c = composite type, + f = foreign table, + p = partitioned table, + I = partitioned index @@ -1814,7 +1848,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< int2 - Number of CHECK constraints on the table; see + Number of CHECK constraints on the table; see pg_constraint catalog @@ -1828,15 +1862,6 @@ SCRAM-SHA-256$<iteration count>:<salt>< - - relhaspkey - bool - - - True if the table has (or once had) a primary key - - - relhasrules bool @@ -1861,7 +1886,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< relhassubclass bool - True if table has (or once had) any inheritance children + + True if table or index has (or once had) any inheritance children + @@ -1897,11 +1924,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - Columns used to form replica identity for rows: - d = default (primary key, if any), - n = nothing, - f = all columns - i = index with indisreplident set, or default + Columns used to form replica identity for rows: + d = default (primary key, if any), + n = nothing, + f = all columns + i = index with indisreplident set, or default @@ -1909,7 +1936,19 @@ SCRAM-SHA-256$<iteration count>:<salt>< relispartition bool - True if table is a partition + True if table or index is a partition + + + + relrewrite + oid + pg_class.oid + + For new relations being written during a DDL operation that requires a + table rewrite, this contains the OID of the original relation; + otherwise 0. That state is only visible internally; this field should + never contain anything other than 0 for a user-visible relation. + @@ -1918,9 +1957,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< All transaction IDs before this one have been replaced with a permanent - (frozen) transaction ID in this table. This is used to track + (frozen) transaction ID in this table. This is used to track whether the table needs to be vacuumed in order to prevent transaction - ID wraparound or to allow pg_xact to be shrunk. Zero + ID wraparound or to allow pg_xact to be shrunk. Zero (InvalidTransactionId) if the relation is not a table. @@ -1933,7 +1972,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< All multixact IDs before this one have been replaced by a transaction ID in this table. This is used to track whether the table needs to be vacuumed in order to prevent multixact ID - wraparound or to allow pg_multixact to be shrunk. Zero + wraparound or to allow pg_multixact to be shrunk. Zero (InvalidMultiXactId) if the relation is not a table. @@ -1944,8 +1983,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< Access privileges; see - and - + and + for details @@ -1955,7 +1994,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< text[] - Access-method-specific options, as keyword=value strings + Access-method-specific options, as keyword=value strings @@ -1973,13 +2012,13 @@ SCRAM-SHA-256$<iteration count>:<salt><
- Several of the Boolean flags in pg_class are maintained + Several of the Boolean flags in pg_class are maintained lazily: they are guaranteed to be true if that's the correct state, but may not be reset to false immediately when the condition is no longer - true. For example, relhasindex is set by + true. For example, relhasindex is set by CREATE INDEX, but it is never cleared by DROP INDEX. Instead, VACUUM clears - relhasindex if it finds the table has no indexes. This + relhasindex if it finds the table has no indexes. This arrangement avoids race conditions and improves concurrency. @@ -1995,11 +2034,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_collation describes the available collations, which are essentially mappings from an SQL name to operating system locale categories. - See for more information. + See for more information. - <structname>pg_collation</> Columns + <structname>pg_collation</structname> Columns @@ -2062,14 +2101,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< collcollate name - LC_COLLATE for this collation object + LC_COLLATE for this collation object collctype name - LC_CTYPE for this collation object + LC_CTYPE for this collation object @@ -2087,27 +2126,27 @@ SCRAM-SHA-256$<iteration count>:<salt><
- Note that the unique key on this catalog is (collname, - collencoding, collnamespace) not just - (collname, collnamespace). + Note that the unique key on this catalog is (collname, + collencoding, collnamespace) not just + (collname, collnamespace). PostgreSQL generally ignores all - collations that do not have collencoding equal to + collations that do not have collencoding equal to either the current database's encoding or -1, and creation of new entries - with the same name as an entry with collencoding = -1 + with the same name as an entry with collencoding = -1 is forbidden. Therefore it is sufficient to use a qualified SQL name - (schema.name) to identify a collation, + (schema.name) to identify a collation, even though this is not unique according to the catalog definition. The reason for defining the catalog this way is that - initdb fills it in at cluster initialization time with + initdb fills it in at cluster initialization time with entries for all locales available on the system, so it must be able to hold entries for all encodings that might ever be used in the cluster. - In the template0 database, it could be useful to create + In the template0 database, it could be useful to create collations whose encoding does not match the database encoding, since they could match the encodings of databases later cloned from - template0. This would currently have to be done manually. + template0. This would currently have to be done manually. @@ -2123,13 +2162,13 @@ SCRAM-SHA-256$<iteration count>:<salt>< key, unique, foreign key, and exclusion constraints on tables. (Column constraints are not treated specially. Every column constraint is equivalent to some table constraint.) - Not-null constraints are represented in the pg_attribute + Not-null constraints are represented in the pg_attribute catalog, not here. User-defined constraint triggers (created with CREATE CONSTRAINT - TRIGGER) also give rise to an entry in this table. + TRIGGER) also give rise to an entry in this table. @@ -2137,7 +2176,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - <structname>pg_constraint</> Columns + <structname>pg_constraint</structname> Columns @@ -2178,12 +2217,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - c = check constraint, - f = foreign key constraint, - p = primary key constraint, - u = unique constraint, - t = constraint trigger, - x = exclusion constraint + c = check constraint, + f = foreign key constraint, + p = primary key constraint, + u = unique constraint, + t = constraint trigger, + x = exclusion constraint @@ -2231,6 +2270,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< key, foreign key, or exclusion constraint; else 0 + + conparentid + oid + pg_constraint.oid + The corresponding constraint in the parent partitioned table, + if this is a constraint in a partition; else 0 + + confrelid oid @@ -2243,11 +2290,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< char Foreign key update action code: - a = no action, - r = restrict, - c = cascade, - n = set null, - d = set default + a = no action, + r = restrict, + c = cascade, + n = set null, + d = set default @@ -2256,11 +2303,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< char Foreign key deletion action code: - a = no action, - r = restrict, - c = cascade, - n = set null, - d = set default + a = no action, + r = restrict, + c = cascade, + n = set null, + d = set default @@ -2269,9 +2316,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< char Foreign key match type: - f = full, - p = partial, - s = simple + f = full, + p = partial, + s = simple @@ -2309,7 +2356,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< conkey int2[] - pg_attribute.attnum + pg_attribute.attnum If a table constraint (including foreign keys, but not constraint triggers), list of the constrained columns @@ -2317,35 +2364,35 @@ SCRAM-SHA-256$<iteration count>:<salt>< confkey int2[] - pg_attribute.attnum + pg_attribute.attnum If a foreign key, list of the referenced columns conpfeqop oid[] - pg_operator.oid + pg_operator.oid If a foreign key, list of the equality operators for PK = FK comparisons conppeqop oid[] - pg_operator.oid + pg_operator.oid If a foreign key, list of the equality operators for PK = PK comparisons conffeqop oid[] - pg_operator.oid + pg_operator.oid If a foreign key, list of the equality operators for FK = FK comparisons conexclop oid[] - pg_operator.oid + pg_operator.oid If an exclusion constraint, list of the per-column exclusion operators @@ -2353,14 +2400,10 @@ SCRAM-SHA-256$<iteration count>:<salt>< conbin pg_node_tree - If a check constraint, an internal representation of the expression - - - - consrc - text - - If a check constraint, a human-readable representation of the expression + If a check constraint, an internal representation of the + expression. (It's recommended to use + pg_get_constraintdef() to extract the definition of + a check constraint.) @@ -2372,19 +2415,10 @@ SCRAM-SHA-256$<iteration count>:<salt>< For other cases, a zero appears in conkey and the associated index must be consulted to discover the expression that is constrained. (conkey thus has the - same contents as pg_index.indkey for the + same contents as pg_index.indkey for the index.) - - - consrc is not updated when referenced objects - change; for example, it won't track renaming of columns. Rather than - relying on this field, it's best to use pg_get_constraintdef() - to extract the definition of a check constraint. - - - pg_class.relchecks needs to agree with the @@ -2404,12 +2438,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_conversion describes - encoding conversion procedures. See + encoding conversion functions. See for more information.
- <structname>pg_conversion</> Columns + <structname>pg_conversion</structname> Columns @@ -2470,7 +2504,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< conproc regproc pg_proc.oid - Conversion procedure + Conversion function @@ -2496,8 +2530,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_database stores information about the available databases. Databases are created with the command. - Consult for details about the meaning + linkend="sql-createdatabase"/> command. + Consult for details about the meaning of some of the parameters. @@ -2509,7 +2543,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_database</> Columns + <structname>pg_database</structname> Columns @@ -2572,7 +2606,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< If true, then this database can be cloned by - any user with CREATEDB privileges; + any user with CREATEDB privileges; if false, then only superusers or the owner of the database can clone it. @@ -2584,7 +2618,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< If false then no one can connect to this database. This is - used to protect the template0 database from being altered. + used to protect the template0 database from being altered. @@ -2614,11 +2648,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< All transaction IDs before this one have been replaced with a permanent - (frozen) transaction ID in this database. This is used to + (frozen) transaction ID in this database. This is used to track whether the database needs to be vacuumed in order to prevent - transaction ID wraparound or to allow pg_xact to be shrunk. + transaction ID wraparound or to allow pg_xact to be shrunk. It is the minimum of the per-table - pg_class.relfrozenxid values. + pg_class.relfrozenxid values. @@ -2630,9 +2664,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< All multixact IDs before this one have been replaced with a transaction ID in this database. This is used to track whether the database needs to be vacuumed in order to prevent - multixact ID wraparound or to allow pg_multixact to be shrunk. + multixact ID wraparound or to allow pg_multixact to be shrunk. It is the minimum of the per-table - pg_class.relminmxid values. + pg_class.relminmxid values. @@ -2643,7 +2677,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< The default tablespace for the database. Within this database, all tables for which - pg_class.reltablespace is zero + pg_class.reltablespace is zero will be stored in this tablespace; in particular, all the non-shared system catalogs will be there. @@ -2655,8 +2689,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< Access privileges; see - and - + and + for details @@ -2687,7 +2721,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_db_role_setting</> Columns + <structname>pg_db_role_setting</structname> Columns @@ -2734,12 +2768,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_default_acl stores initial + The catalog pg_default_acl stores initial privileges to be assigned to newly created objects.
- <structname>pg_default_acl</> Columns + <structname>pg_default_acl</structname> Columns @@ -2780,10 +2814,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< Type of object this entry is for: - r = relation (table, view), - S = sequence, - f = function, - T = type + r = relation (table, view), + S = sequence, + f = function, + T = type, + n = schema @@ -2800,21 +2835,21 @@ SCRAM-SHA-256$<iteration count>:<salt><
- A pg_default_acl entry shows the initial privileges to + A pg_default_acl entry shows the initial privileges to be assigned to an object belonging to the indicated user. There are - currently two types of entry: global entries with - defaclnamespace = 0, and per-schema entries + currently two types of entry: global entries with + defaclnamespace = 0, and per-schema entries that reference a particular schema. If a global entry is present then - it overrides the normal hard-wired default privileges + it overrides the normal hard-wired default privileges for the object type. A per-schema entry, if present, represents privileges - to be added to the global or hard-wired default privileges. + to be added to the global or hard-wired default privileges. Note that when an ACL entry in another catalog is null, it is taken to represent the hard-wired default privileges for its object, - not whatever might be in pg_default_acl - at the moment. pg_default_acl is only consulted during + not whatever might be in pg_default_acl + at the moment. pg_default_acl is only consulted during object creation. @@ -2831,9 +2866,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_depend records the dependency relationships between database objects. This information allows - DROP commands to find which other objects must be dropped - by DROP CASCADE or prevent dropping in the DROP - RESTRICT case. + DROP commands to find which other objects must be dropped + by DROP CASCADE or prevent dropping in the DROP + RESTRICT case. @@ -2843,7 +2878,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - <structname>pg_depend</> Columns + <structname>pg_depend</structname> Columns @@ -2876,7 +2911,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a table column, this is the column number (the - objid and classid refer to the + objid and classid refer to the table itself). For all other object types, this column is zero. @@ -2902,7 +2937,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a table column, this is the column number (the - refobjid and refclassid refer + refobjid and refclassid refer to the table itself). For all other object types, this column is zero. @@ -2925,17 +2960,17 @@ SCRAM-SHA-256$<iteration count>:<salt>< In all cases, a pg_depend entry indicates that the referenced object cannot be dropped without also dropping the dependent object. However, there are several subflavors identified by - deptype: + deptype: - DEPENDENCY_NORMAL (n) + DEPENDENCY_NORMAL (n) A normal relationship between separately-created objects. The dependent object can be dropped without affecting the referenced object. The referenced object can only be dropped - by specifying CASCADE, in which case the dependent + by specifying CASCADE, in which case the dependent object is dropped, too. Example: a table column has a normal dependency on its data type. @@ -2943,12 +2978,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< - DEPENDENCY_AUTO (a) + DEPENDENCY_AUTO (a) The dependent object can be dropped separately from the referenced object, and should be automatically dropped - (regardless of RESTRICT or CASCADE + (regardless of RESTRICT or CASCADE mode) if the referenced object is dropped. Example: a named constraint on a table is made autodependent on the table, so that it will go away if the table is dropped. @@ -2957,41 +2992,64 @@ SCRAM-SHA-256$<iteration count>:<salt>< - DEPENDENCY_INTERNAL (i) + DEPENDENCY_INTERNAL (i) The dependent object was created as part of creation of the referenced object, and is really just a part of its internal - implementation. A DROP of the dependent object + implementation. A DROP of the dependent object will be disallowed outright (we'll tell the user to issue a - DROP against the referenced object, instead). A - DROP of the referenced object will be propagated + DROP against the referenced object, instead). A + DROP of the referenced object will be propagated through to drop the dependent object whether - CASCADE is specified or not. Example: a trigger + CASCADE is specified or not. Example: a trigger that's created to enforce a foreign-key constraint is made internally dependent on the constraint's - pg_constraint entry. + pg_constraint entry. - DEPENDENCY_EXTENSION (e) + DEPENDENCY_INTERNAL_AUTO (I) - The dependent object is a member of the extension that is + The dependent object was created as part of creation of the + referenced object, and is really just a part of its internal + implementation. A DROP of the dependent object + will be disallowed outright (we'll tell the user to issue a + DROP against the referenced object, instead). + While a regular internal dependency will prevent + the dependent object from being dropped while any such dependencies + remain, DEPENDENCY_INTERNAL_AUTO will allow such + a drop as long as the object can be found by following any of such + dependencies. + Example: an index on a partition is made internal-auto-dependent on + both the partition itself as well as on the index on the parent + partitioned table; so the partition index is dropped together with + either the partition it indexes, or with the parent index it is + attached to. + + + + + + DEPENDENCY_EXTENSION (e) + + + The dependent object is a member of the extension that is the referenced object (see pg_extension). The dependent object can be dropped only via - DROP EXTENSION on the referenced object. Functionally + DROP EXTENSION on the referenced object. Functionally this dependency type acts the same as an internal dependency, but - it's kept separate for clarity and to simplify pg_dump. + it's kept separate for clarity and to simplify pg_dump. - DEPENDENCY_AUTO_EXTENSION (x) + DEPENDENCY_AUTO_EXTENSION (x) The dependent object is not a member of the extension that is the @@ -3004,7 +3062,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - DEPENDENCY_PIN (p) + DEPENDENCY_PIN (p) There is no dependent object; this type of entry is a signal @@ -3031,9 +3089,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_description stores optional descriptions + The catalog pg_description stores optional descriptions (comments) for each database object. Descriptions can be manipulated - with the command and viewed with + with the command and viewed with psql's \d commands. Descriptions of many built-in system objects are provided in the initial contents of pg_description. @@ -3046,7 +3104,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_description</> Columns + <structname>pg_description</structname> Columns @@ -3079,7 +3137,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a comment on a table column, this is the column number (the - objoid and classoid refer to + objoid and classoid refer to the table itself). For all other object types, this column is zero. @@ -3113,7 +3171,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_enum</> Columns + <structname>pg_enum</structname> Columns @@ -3137,7 +3195,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< enumtypid oid pg_type.oid - The OID of the pg_type entry owning this enum value + The OID of the pg_type entry owning this enum value @@ -3171,7 +3229,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< When an enum type is created, its members are assigned sort-order - positions 1..n. But members added later might be given + positions 1..n. But members added later might be given negative or fractional values of enumsortorder. The only requirement on these values is that they be correctly ordered and unique within each enum type. @@ -3188,11 +3246,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_event_trigger stores event triggers. - See for more information. + See for more information.
- <structname>pg_event_trigger</> Columns + <structname>pg_event_trigger</structname> Columns @@ -3238,12 +3296,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - Controls in which modes + Controls in which modes the event trigger fires. - O = trigger fires in origin and local modes, - D = trigger is disabled, - R = trigger fires in replica mode, - A = trigger fires always. + O = trigger fires in origin and local modes, + D = trigger is disabled, + R = trigger fires in replica mode, + A = trigger fires always. @@ -3271,12 +3329,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_extension stores information - about the installed extensions. See + about the installed extensions. See for details about extensions.
- <structname>pg_extension</> Columns + <structname>pg_extension</structname> Columns @@ -3335,16 +3393,16 @@ SCRAM-SHA-256$<iteration count>:<salt>< extconfig oid[] pg_class.oid - Array of regclass OIDs for the extension's configuration - table(s), or NULL if none + Array of regclass OIDs for the extension's configuration + table(s), or NULL if none extcondition text[] - Array of WHERE-clause filter conditions for the - extension's configuration table(s), or NULL if none + Array of WHERE-clause filter conditions for the + extension's configuration table(s), or NULL if none @@ -3352,7 +3410,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- Note that unlike most catalogs with a namespace column, + Note that unlike most catalogs with a namespace column, extnamespace is not meant to imply that the extension belongs to that schema. Extension names are never schema-qualified. Rather, extnamespace @@ -3379,7 +3437,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - <structname>pg_foreign_data_wrapper</> Columns + <structname>pg_foreign_data_wrapper</structname> Columns @@ -3443,8 +3501,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< Access privileges; see - and - + and + for details @@ -3454,7 +3512,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< text[] - Foreign-data wrapper specific options, as keyword=value strings + Foreign-data wrapper specific options, as keyword=value strings @@ -3478,7 +3536,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_foreign_server</> Columns + <structname>pg_foreign_server</structname> Columns @@ -3539,8 +3597,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< Access privileges; see - and - + and + for details @@ -3550,7 +3608,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< text[] - Foreign server specific options, as keyword=value strings + Foreign server specific options, as keyword=value strings @@ -3576,7 +3634,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_foreign_table</> Columns + <structname>pg_foreign_table</structname> Columns @@ -3593,7 +3651,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< ftrelid oid pg_class.oid - OID of the pg_class entry for this foreign table + OID of the pg_class entry for this foreign table @@ -3608,7 +3666,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< text[] - Foreign table options, as keyword=value strings + Foreign table options, as keyword=value strings @@ -3631,7 +3689,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_index</> Columns + <structname>pg_index</structname> Columns @@ -3648,22 +3706,31 @@ SCRAM-SHA-256$<iteration count>:<salt>< indexrelid oid pg_class.oid - The OID of the pg_class entry for this index + The OID of the pg_class entry for this index indrelid oid pg_class.oid - The OID of the pg_class entry for the table this index is for + The OID of the pg_class entry for the table this index is for indnatts int2 - The number of columns in the index (duplicates - pg_class.relnatts) + The total number of columns in the index (duplicates + pg_class.relnatts); this number includes both key and included attributes + + + + indnkeyatts + int2 + + The number of key columns in the index, + not counting any included columns, which are + merely stored and do not participate in the index semantics @@ -3678,7 +3745,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< bool If true, this index represents the primary key of the table - (indisunique should always be true when this is true) + (indisunique should always be true when this is true) @@ -3694,7 +3761,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< If true, the uniqueness check is enforced immediately on insertion - (irrelevant if indisunique is not true) + (irrelevant if indisunique is not true) @@ -3711,7 +3778,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< If true, the index is currently valid for queries. False means the index is possibly incomplete: it must still be modified by - INSERT/UPDATE operations, but it cannot safely + INSERT/UPDATE operations, but it cannot safely be used for queries. If it is unique, the uniqueness property is not guaranteed true either. @@ -3722,8 +3789,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< bool - If true, queries must not use the index until the xmin - of this pg_index row is below their TransactionXmin + If true, queries must not use the index until the xmin + of this pg_index row is below their TransactionXmin event horizon, because the table may contain broken HOT chains with incompatible rows that they can see @@ -3735,7 +3802,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< If true, the index is currently ready for inserts. False means the - index must be ignored by INSERT/UPDATE + index must be ignored by INSERT/UPDATE operations. @@ -3755,9 +3822,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< bool - If true this index has been chosen as replica identity + If true this index has been chosen as replica identity using ALTER TABLE ... REPLICA IDENTITY USING INDEX - ... + ... @@ -3769,7 +3836,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< This is an array of indnatts values that indicate which table columns this index indexes. For example a value of 1 3 would mean that the first and the third table - columns make up the index key. A zero in this array indicates that the + columns make up the index entries. Key columns come before non-key + (included) columns. A zero in this array indicates that the corresponding index attribute is an expression over the table columns, rather than a simple column reference. @@ -3780,9 +3848,10 @@ SCRAM-SHA-256$<iteration count>:<salt>< oidvector pg_collation.oid - For each column in the index key, this contains the OID of the - collation to use for the index, or zero if the column is not - of a collatable data type. + For each column in the index key + (indnkeyatts values), this contains the OID + of the collation to use for the index, or zero if the column is not of + a collatable data type. @@ -3791,8 +3860,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< oidvector pg_opclass.oid - For each column in the index key, this contains the OID of - the operator class to use. See + For each column in the index key + (indnkeyatts values), this contains the OID + of the operator class to use. See pg_opclass for details. @@ -3802,7 +3872,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< int2vector - This is an array of indnatts values that + This is an array of indnkeyatts values that store per-column flag bits. The meaning of the bits is defined by the index's access method. @@ -3816,7 +3886,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Expression trees (in nodeToString() representation) for index attributes that are not simple column references. This is a list with one element for each zero - entry in indkey. Null if all index attributes + entry in indkey. Null if all index attributes are simple references. @@ -3846,14 +3916,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_inherits records information about + The catalog pg_inherits records information about table inheritance hierarchies. There is one entry for each direct - child table in the database. (Indirect inheritance can be determined + parent-child table relationship in the database. (Indirect inheritance can be determined by following chains of entries.)
- <structname>pg_inherits</> Columns + <structname>pg_inherits</structname> Columns @@ -3908,7 +3978,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_init_privs records information about + The catalog pg_init_privs records information about the initial privileges of objects in the system. There is one entry for each object in the database which has a non-default (non-NULL) initial set of privileges. @@ -3916,7 +3986,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Objects can have initial privileges either by having those privileges set - when the system is initialized (by initdb) or when the + when the system is initialized (by initdb) or when the object is created during a CREATE EXTENSION and the extension script sets initial privileges using the GRANT system. Note that the system will automatically handle recording of the @@ -3924,12 +3994,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< only use the GRANT and REVOKE statements in their script to have the privileges recorded. The privtype column indicates if the initial privilege was - set by initdb or during a + set by initdb or during a CREATE EXTENSION command. - Objects which have initial privileges set by initdb will + Objects which have initial privileges set by initdb will have entries where privtype is 'i', while objects which have initial privileges set by CREATE EXTENSION will have entries where @@ -3937,7 +4007,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_init_privs</> Columns + <structname>pg_init_privs</structname> Columns @@ -3970,7 +4040,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a table column, this is the column number (the - objoid and classoid refer to the + objoid and classoid refer to the table itself). For all other object types, this column is zero. @@ -3991,8 +4061,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< The initial access privileges; see - and - + and + for details @@ -4014,12 +4084,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_language registers languages in which you can write functions or stored procedures. - See - and for more information about language handlers. + See + and for more information about language handlers.
- <structname>pg_language</> Columns + <structname>pg_language</structname> Columns @@ -4096,8 +4166,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< pg_proc.oid This references a function that is responsible for executing - inline anonymous code blocks - ( blocks). + inline anonymous code blocks + ( blocks). Zero if inline blocks are not supported. @@ -4119,8 +4189,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< Access privileges; see - and - + and + for details @@ -4142,24 +4212,24 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_largeobject holds the data making up large objects. A large object is identified by an OID assigned when it is created. Each large object is broken into - segments or pages small enough to be conveniently stored as rows + segments or pages small enough to be conveniently stored as rows in pg_largeobject. - The amount of data per page is defined to be LOBLKSIZE (which is currently - BLCKSZ/4, or typically 2 kB). + The amount of data per page is defined to be LOBLKSIZE (which is currently + BLCKSZ/4, or typically 2 kB). - Prior to PostgreSQL 9.0, there was no permission structure + Prior to PostgreSQL 9.0, there was no permission structure associated with large objects. As a result, pg_largeobject was publicly readable and could be used to obtain the OIDs (and contents) of all large objects in the system. This is no longer the case; use - pg_largeobject_metadata + pg_largeobject_metadata to obtain a list of large object OIDs.
- <structname>pg_largeobject</> Columns + <structname>pg_largeobject</structname> Columns @@ -4193,7 +4263,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Actual data stored in the large object. - This will never be more than LOBLKSIZE bytes and might be less. + This will never be more than LOBLKSIZE bytes and might be less. @@ -4203,9 +4273,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< Each row of pg_largeobject holds data for one page of a large object, beginning at - byte offset (pageno * LOBLKSIZE) within the object. The implementation + byte offset (pageno * LOBLKSIZE) within the object. The implementation allows sparse storage: pages might be missing, and might be shorter than - LOBLKSIZE bytes even if they are not the last page of the object. + LOBLKSIZE bytes even if they are not the last page of the object. Missing regions within a large object read as zeroes. @@ -4222,11 +4292,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_largeobject_metadata holds metadata associated with large objects. The actual large object data is stored in - pg_largeobject. + pg_largeobject.
- <structname>pg_largeobject_metadata</> Columns + <structname>pg_largeobject_metadata</structname> Columns @@ -4259,8 +4329,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< Access privileges; see - and - + and + for details @@ -4279,14 +4349,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_namespace stores namespaces. + The catalog pg_namespace stores namespaces. A namespace is the structure underlying SQL schemas: each namespace can have a separate collection of relations, types, etc. without name conflicts.
- <structname>pg_namespace</> Columns + <structname>pg_namespace</structname> Columns @@ -4326,8 +4396,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< Access privileges; see - and - + and + for details @@ -4357,11 +4427,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< - Operator classes are described at length in . + Operator classes are described at length in .
- <structname>pg_opclass</> Columns + <structname>pg_opclass</structname> Columns @@ -4427,14 +4497,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< opcdefault bool - True if this operator class is the default for opcintype + True if this operator class is the default for opcintype opckeytype oid pg_type.oid - Type of data stored in index, or zero if same as opcintype + Type of data stored in index, or zero if same as opcintype @@ -4442,11 +4512,11 @@ SCRAM-SHA-256$<iteration count>:<salt><
- An operator class's opcmethod must match the - opfmethod of its containing operator family. + An operator class's opcmethod must match the + opfmethod of its containing operator family. Also, there must be no more than one pg_opclass - row having opcdefault true for any given combination of - opcmethod and opcintype. + row having opcdefault true for any given combination of + opcmethod and opcintype. @@ -4460,13 +4530,13 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_operator stores information about operators. - See - and for more information. + The catalog pg_operator stores information about operators. + See + and for more information. - <structname>pg_operator</> Columns + <structname>pg_operator</structname> Columns @@ -4514,8 +4584,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - b = infix (both), l = prefix - (left), r = postfix (right) + b = infix (both), l = prefix + (left), r = postfix (right) @@ -4612,18 +4682,18 @@ SCRAM-SHA-256$<iteration count>:<salt>< Each operator family is a collection of operators and associated support routines that implement the semantics specified for a particular index access method. Furthermore, the operators in a family are all - compatible, in a way that is specified by the access method. + compatible, in a way that is specified by the access method. The operator family concept allows cross-data-type operators to be used with indexes and to be reasoned about using knowledge of access method semantics. - Operator families are described at length in . + Operator families are described at length in .
- <structname>pg_opfamily</> Columns + <structname>pg_opfamily</structname> Columns @@ -4700,7 +4770,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_partitioned_table</> Columns + <structname>pg_partitioned_table</structname> Columns @@ -4718,7 +4788,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< partrelid oid pg_class.oid - The OID of the pg_class entry for this partitioned table + The OID of the pg_class entry for this partitioned table @@ -4726,8 +4796,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - Partitioning strategy; l = list partitioned table, - r = range partitioned table + Partitioning strategy; h = hash partitioned table, + l = list partitioned table, r = range partitioned table @@ -4738,6 +4808,17 @@ SCRAM-SHA-256$<iteration count>:<salt>< The number of columns in partition key + + partdefid + oid + pg_class.oid + + The OID of the pg_class entry for the default partition + of this partitioned table, or zero if this partitioned table does not + have a default partition. + + + partattrs int2vector @@ -4782,7 +4863,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Expression trees (in nodeToString() representation) for partition key columns that are not simple column references. This is a list with one element for each zero - entry in partattrs. Null if all partition key columns + entry in partattrs. Null if all partition key columns are simple references. @@ -4802,9 +4883,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_pltemplate stores - template information for procedural languages. + template information for procedural languages. A template for a language allows the language to be created in a - particular database by a simple CREATE LANGUAGE command, + particular database by a simple CREATE LANGUAGE command, with no need to specify implementation details. @@ -4817,7 +4898,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_pltemplate</> Columns + <structname>pg_pltemplate</structname> Columns @@ -4890,7 +4971,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - It is likely that pg_pltemplate will be removed in some + It is likely that pg_pltemplate will be removed in some future release of PostgreSQL, in favor of keeping this knowledge about procedural languages in their respective extension installation scripts. @@ -4913,7 +4994,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< command that it applies to (possibly all commands), the roles that it applies to, the expression to be added as a security-barrier qualification to queries that include the table, and the expression - to be added as a WITH CHECK option for queries that attempt to + to be added as a WITH CHECK option for queries that attempt to add new records to the table. @@ -4951,11 +5032,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< char The command type to which the policy is applied: - r for SELECT, - a for INSERT, - w for UPDATE, - d for DELETE, - or * for all + r for SELECT, + a for INSERT, + w for UPDATE, + d for DELETE, + or * for all @@ -4992,8 +5073,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< - Policies stored in pg_policy are applied only when - pg_class.relrowsecurity is set for + Policies stored in pg_policy are applied only when + pg_class.relrowsecurity is set for their table. @@ -5008,19 +5089,21 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_proc stores information about functions (or procedures). - See - and for more information. + The catalog pg_proc stores information about + functions, procedures, aggregate functions, and window functions + (collectively also known as routines). See , , and + for more information. - The table contains data for aggregate functions as well as plain functions. - If proisagg is true, there should be a matching - row in pg_aggregate. + If prokind indicates that the entry is for an + aggregate function, there should be a matching row in + pg_aggregate.
- <structname>pg_proc</> Columns + <structname>pg_proc</structname> Columns @@ -5075,7 +5158,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< float4 Estimated execution cost (in units of - ); if proretset, + ); if proretset, this is cost per row returned @@ -5083,7 +5166,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< prorows float4 - Estimated number of result rows (zero if not proretset) + Estimated number of result rows (zero if not proretset) @@ -5099,28 +5182,23 @@ SCRAM-SHA-256$<iteration count>:<salt>< regproc pg_proc.oid Calls to this function can be simplified by this other function - (see ) + (see ) - proisagg - bool - - Function is an aggregate function - - - - proiswindow - bool + prokind + char - Function is a window function + f for a normal function, p + for a procedure, a for an aggregate function, or + w for a window function prosecdef bool - Function is a security definer (i.e., a setuid + Function is a security definer (i.e., a setuid function) @@ -5164,11 +5242,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< provolatile tells whether the function's result depends only on its input arguments, or is affected by outside factors. - It is i for immutable functions, + It is i for immutable functions, which always deliver the same result for the same inputs. - It is s for stable functions, + It is s for stable functions, whose results (for fixed inputs) do not change within a scan. - It is v for volatile functions, + It is v for volatile functions, whose results might change at any time. (Use v also for functions with side-effects, so that calls to them cannot get optimized away.) @@ -5220,7 +5298,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< An array with the data types of the function arguments. This includes only input arguments (including INOUT and - VARIADIC arguments), and thus represents + VARIADIC arguments), and thus represents the call signature of the function. @@ -5235,7 +5313,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< INOUT arguments); however, if all the arguments are IN arguments, this field will be null. Note that subscripting is 1-based, whereas for historical reasons - proargtypes is subscripted from 0. + proargtypes is subscripted from 0. @@ -5245,15 +5323,15 @@ SCRAM-SHA-256$<iteration count>:<salt>< An array with the modes of the function arguments, encoded as - i for IN arguments, - o for OUT arguments, - b for INOUT arguments, - v for VARIADIC arguments, - t for TABLE arguments. + i for IN arguments, + o for OUT arguments, + b for INOUT arguments, + v for VARIADIC arguments, + t for TABLE arguments. If all the arguments are IN arguments, this field will be null. Note that subscripts correspond to positions of - proallargtypes not proargtypes. + proallargtypes not proargtypes. @@ -5266,7 +5344,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Arguments without a name are set to empty strings in the array. If none of the arguments have a name, this field will be null. Note that subscripts correspond to positions of - proallargtypes not proargtypes. + proallargtypes not proargtypes. @@ -5277,9 +5355,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< Expression trees (in nodeToString() representation) for default values. This is a list with - pronargdefaults elements, corresponding to the last - N input arguments (i.e., the last - N proargtypes positions). + pronargdefaults elements, corresponding to the last + N input arguments (i.e., the last + N proargtypes positions). If none of the arguments have defaults, this field will be null. @@ -5328,8 +5406,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< Access privileges; see - and - + and + for details @@ -5359,7 +5437,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_publication contains all publications created in the database. For more on publications see - . + .
@@ -5385,7 +5463,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< pubname - Name + name Name of the publication @@ -5429,6 +5507,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< If true, DELETE operations are replicated for tables in the publication. + + + pubtruncate + bool + + If true, TRUNCATE operations are replicated for + tables in the publication. +
@@ -5444,7 +5530,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_publication_rel contains the mapping between relations and publications in the database. This is a - many-to-many mapping. See also + many-to-many mapping. See also for a more user-friendly view of this information. @@ -5494,7 +5580,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - <structname>pg_range</> Columns + <structname>pg_range</structname> Columns @@ -5555,10 +5641,10 @@ SCRAM-SHA-256$<iteration count>:<salt><
- rngsubopc (plus rngcollation, if the + rngsubopc (plus rngcollation, if the element type is collatable) determines the sort ordering used by the range - type. rngcanonical is used when the element type is - discrete. rngsubdiff is optional but should be supplied to + type. rngcanonical is used when the element type is + discrete. rngsubdiff is optional but should be supplied to improve performance of GiST indexes on the range type. @@ -5574,7 +5660,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< The pg_replication_origin catalog contains all replication origins created. For more on replication origins - see . + see . + + + + Unlike most system catalogs, pg_replication_origin + is shared across all databases of a cluster: there is only one copy + of pg_replication_origin per cluster, not one per + database. @@ -5624,7 +5717,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_rewrite</> Columns + <structname>pg_rewrite</structname> Columns @@ -5663,9 +5756,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - Event type that the rule is for: 1 = SELECT, 2 = - UPDATE, 3 = INSERT, 4 = - DELETE + Event type that the rule is for: 1 = SELECT, 2 = + UPDATE, 3 = INSERT, 4 = + DELETE @@ -5674,12 +5767,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - Controls in which modes + Controls in which modes the rule fires. - O = rule fires in origin and local modes, - D = rule is disabled, - R = rule fires in replica mode, - A = rule fires always. + O = rule fires in origin and local modes, + D = rule is disabled, + R = rule fires in replica mode, + A = rule fires always. @@ -5734,8 +5827,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_seclabel stores security labels on database objects. Security labels can be manipulated - with the command. For an easier - way to view security labels, see . + with the command. For an easier + way to view security labels, see . @@ -5778,7 +5871,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a security label on a table column, this is the column number (the - objoid and classoid refer to + objoid and classoid refer to the table itself). For all other object types, this column is zero. @@ -5816,7 +5909,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_sequence</> Columns + <structname>pg_sequence</structname> Columns @@ -5833,7 +5926,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< seqrelid oid pg_class.oid - The OID of the pg_class entry for this sequence + The OID of the pg_class entry for this sequence @@ -5918,7 +6011,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_shdepend</> Columns + <structname>pg_shdepend</structname> Columns @@ -5959,7 +6052,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a table column, this is the column number (the - objid and classid refer to the + objid and classid refer to the table itself). For all other object types, this column is zero. @@ -5996,11 +6089,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< In all cases, a pg_shdepend entry indicates that the referenced object cannot be dropped without also dropping the dependent object. However, there are several subflavors identified by - deptype: + deptype: - SHARED_DEPENDENCY_OWNER (o) + SHARED_DEPENDENCY_OWNER (o) The referenced object (which must be a role) is the owner of the @@ -6010,20 +6103,20 @@ SCRAM-SHA-256$<iteration count>:<salt>< - SHARED_DEPENDENCY_ACL (a) + SHARED_DEPENDENCY_ACL (a) The referenced object (which must be a role) is mentioned in the ACL (access control list, i.e., privileges list) of the - dependent object. (A SHARED_DEPENDENCY_ACL entry is + dependent object. (A SHARED_DEPENDENCY_ACL entry is not made for the owner of the object, since the owner will have - a SHARED_DEPENDENCY_OWNER entry anyway.) + a SHARED_DEPENDENCY_OWNER entry anyway.) - SHARED_DEPENDENCY_POLICY (r) + SHARED_DEPENDENCY_POLICY (r) The referenced object (which must be a role) is mentioned as the @@ -6033,7 +6126,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - SHARED_DEPENDENCY_PIN (p) + SHARED_DEPENDENCY_PIN (p) There is no dependent object; this type of entry is a signal @@ -6062,7 +6155,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_shdescription stores optional descriptions (comments) for shared database objects. Descriptions can be - manipulated with the command and viewed with + manipulated with the command and viewed with psql's \d commands. @@ -6080,7 +6173,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_shdescription</> Columns + <structname>pg_shdescription</structname> Columns @@ -6129,8 +6222,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_shseclabel stores security labels on shared database objects. Security labels can be manipulated - with the command. For an easier - way to view security labels, see . + with the command. For an easier + way to view security labels, see . @@ -6197,23 +6290,23 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_statistic stores statistical data about the contents of the database. Entries are - created by + created by and subsequently used by the query planner. Note that all the statistical data is inherently approximate, even assuming that it is up-to-date. - Normally there is one entry, with stainherit = - false, for each table column that has been analyzed. + Normally there is one entry, with stainherit = + false, for each table column that has been analyzed. If the table has inheritance children, a second entry with - stainherit = true is also created. This row + stainherit = true is also created. This row represents the column's statistics over the inheritance tree, i.e., statistics for the data you'd see with - SELECT column FROM table*, - whereas the stainherit = false row represents + SELECT column FROM table*, + whereas the stainherit = false row represents the results of - SELECT column FROM ONLY table. + SELECT column FROM ONLY table. @@ -6223,7 +6316,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< references the index. No entry is made for an ordinary non-expression index column, however, since it would be redundant with the entry for the underlying table column. Currently, entries for index expressions - always have stainherit = false. + always have stainherit = false. @@ -6250,7 +6343,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_statistic</> Columns + <structname>pg_statistic</structname> Columns @@ -6308,56 +6401,56 @@ SCRAM-SHA-256$<iteration count>:<salt>< A value less than zero is the negative of a multiplier for the number of rows in the table; for example, a column in which about 80% of the values are nonnull and each nonnull value appears about twice on - average could be represented by stadistinct = -0.4. + average could be represented by stadistinct = -0.4. A zero value means the number of distinct values is unknown. - stakindN + stakindN int2 A code number indicating the kind of statistics stored in the - Nth slot of the + Nth slot of the pg_statistic row. - staopN + staopN oid pg_operator.oid An operator used to derive the statistics stored in the - Nth slot. For example, a + Nth slot. For example, a histogram slot would show the < operator that defines the sort order of the data. - stanumbersN + stanumbersN float4[] Numerical statistics of the appropriate kind for the - Nth slot, or null if the slot + Nth slot, or null if the slot kind does not involve numerical values - stavaluesN + stavaluesN anyarray Column data values of the appropriate kind for the - Nth slot, or null if the slot + Nth slot, or null if the slot kind does not store any data values. Each array's element values are actually of the specific column's data type, or a related type such as an array's element type, so there is no way to define - these columns' type more specifically than anyarray. + these columns' type more specifically than anyarray. @@ -6376,12 +6469,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_statistic_ext holds extended planner statistics. - Each row in this catalog corresponds to a statistics object - created with . + Each row in this catalog corresponds to a statistics object + created with .
- <structname>pg_statistic_ext</> Columns + <structname>pg_statistic_ext</structname> Columns @@ -6442,7 +6535,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< char[] - An array containing codes for the enabled statistic types; + An array containing codes for the enabled statistic kinds; valid values are: d for n-distinct statistics, f for functional dependency statistics @@ -6454,7 +6547,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< pg_ndistinct - N-distinct counts, serialized as pg_ndistinct type + N-distinct counts, serialized as pg_ndistinct type @@ -6464,7 +6557,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Functional dependency statistics, serialized - as pg_dependencies type + as pg_dependencies type @@ -6476,7 +6569,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< The stxkind field is filled at creation of the statistics object, indicating which statistic type(s) are desired. The fields after it are initially NULL and are filled only when the - corresponding statistic has been computed by ANALYZE. + corresponding statistic has been computed by ANALYZE. @@ -6490,12 +6583,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_subscription contains all existing logical replication subscriptions. For more information about logical - replication see . + replication see . Unlike most system catalogs, pg_subscription is - shared across all databases of a cluster: There is only one copy + shared across all databases of a cluster: there is only one copy of pg_subscription per cluster, not one per database. @@ -6585,7 +6678,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Array of subscribed publication names. These reference the publications on the publisher server. For more on publications - see . + see . @@ -6646,10 +6739,10 @@ SCRAM-SHA-256$<iteration count>:<salt>< State code: - i = initialize, - d = data is being copied, - s = synchronized, - r = ready (normal replication) + i = initialize, + d = data is being copied, + s = synchronized, + r = ready (normal replication) @@ -6658,7 +6751,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< pg_lsn - End LSN for s and r states. + End LSN for s and r states. @@ -6687,7 +6780,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_tablespace</> Columns + <structname>pg_tablespace</structname> Columns @@ -6727,8 +6820,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< Access privileges; see - and - + and + for details @@ -6738,7 +6831,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< text[] - Tablespace-level options, as keyword=value strings + Tablespace-level options, as keyword=value strings @@ -6757,11 +6850,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_transform stores information about transforms, which are a mechanism to adapt data types to procedural - languages. See for more information. + languages. See for more information.
- <structname>pg_transform</> Columns + <structname>pg_transform</structname> Columns @@ -6825,12 +6918,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_trigger stores triggers on tables and views. - See + See for more information.
- <structname>pg_trigger</> Columns + <structname>pg_trigger</structname> Columns @@ -6883,12 +6976,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - Controls in which modes + Controls in which modes the trigger fires. - O = trigger fires in origin and local modes, - D = trigger is disabled, - R = trigger fires in replica mode, - A = trigger fires always. + O = trigger fires in origin and local modes, + D = trigger is disabled, + R = trigger fires in replica mode, + A = trigger fires always. @@ -6897,7 +6990,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< bool True if trigger is internally generated (usually, to enforce - the constraint identified by tgconstraint) + the constraint identified by tgconstraint) @@ -6919,7 +7012,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< tgconstraint oid pg_constraint.oid - The pg_constraint entry associated with the trigger, if any + The pg_constraint entry associated with the trigger, if any @@ -6963,7 +7056,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< pg_node_tree Expression tree (in nodeToString() - representation) for the trigger's WHEN condition, or null + representation) for the trigger's WHEN condition, or null if none @@ -6971,7 +7064,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< tgoldtable name - REFERENCING clause name for OLD TABLE, + REFERENCING clause name for OLD TABLE, or null if none @@ -6979,7 +7072,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< tgnewtable name - REFERENCING clause name for NEW TABLE, + REFERENCING clause name for NEW TABLE, or null if none @@ -6988,18 +7081,18 @@ SCRAM-SHA-256$<iteration count>:<salt>< Currently, column-specific triggering is supported only for - UPDATE events, and so tgattr is relevant + UPDATE events, and so tgattr is relevant only for that event type. tgtype might contain bits for other event types as well, but those are presumed - to be table-wide regardless of what is in tgattr. + to be table-wide regardless of what is in tgattr. - When tgconstraint is nonzero, - tgconstrrelid, tgconstrindid, - tgdeferrable, and tginitdeferred are - largely redundant with the referenced pg_constraint entry. + When tgconstraint is nonzero, + tgconstrrelid, tgconstrindid, + tgdeferrable, and tginitdeferred are + largely redundant with the referenced pg_constraint entry. However, it is possible for a non-deferrable trigger to be associated with a deferrable constraint: foreign key constraints can have some deferrable and some non-deferrable triggers. @@ -7035,11 +7128,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< PostgreSQL's text search features are - described at length in . + described at length in .
- <structname>pg_ts_config</> Columns + <structname>pg_ts_config</structname> Columns @@ -7110,11 +7203,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< PostgreSQL's text search features are - described at length in . + described at length in .
- <structname>pg_ts_config_map</> Columns + <structname>pg_ts_config_map</structname> Columns @@ -7131,7 +7224,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< mapcfg oid pg_ts_config.oid - The OID of the pg_ts_config entry owning this map entry + The OID of the pg_ts_config entry owning this map entry @@ -7146,7 +7239,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< integer Order in which to consult this entry (lower - mapseqnos first) + mapseqnos first) @@ -7175,17 +7268,17 @@ SCRAM-SHA-256$<iteration count>:<salt>< needed; the dictionary itself provides values for the user-settable parameters supported by the template. This division of labor allows dictionaries to be created by unprivileged users. The parameters - are specified by a text string dictinitoption, + are specified by a text string dictinitoption, whose format and meaning vary depending on the template. PostgreSQL's text search features are - described at length in . + described at length in .
- <structname>pg_ts_dict</> Columns + <structname>pg_ts_dict</structname> Columns @@ -7264,11 +7357,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< PostgreSQL's text search features are - described at length in . + described at length in .
- <structname>pg_ts_parser</> Columns + <structname>pg_ts_parser</structname> Columns @@ -7361,11 +7454,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< PostgreSQL's text search features are - described at length in . + described at length in .
- <structname>pg_ts_template</> Columns + <structname>pg_ts_template</structname> Columns @@ -7430,16 +7523,16 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_type stores information about data types. Base types and enum types (scalar types) are created with - , and + , and domains with - . + . A composite type is automatically created for each table in the database, to represent the row structure of the table. It is also possible to create composite types with CREATE TYPE AS.
- <structname>pg_type</> Columns + <structname>pg_type</structname> Columns @@ -7490,7 +7583,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a fixed-size type, typlen is the number of bytes in the internal representation of the type. But for a variable-length type, typlen is negative. - -1 indicates a varlena type (one that has a length word), + -1 indicates a varlena type (one that has a length word), -2 indicates a null-terminated C string. @@ -7535,8 +7628,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< typcategory is an arbitrary classification of data types that is used by the parser to determine which implicit - casts should be preferred. - See . + casts should be preferred. + See . @@ -7680,7 +7773,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< typalign is the alignment required when storing a value of this type. It applies to storage on disk as well as most representations of the value inside - PostgreSQL. + PostgreSQL. When multiple values are stored consecutively, such as in the representation of a complete row on disk, padding is inserted before a datum of this type so that it begins on the @@ -7692,16 +7785,16 @@ SCRAM-SHA-256$<iteration count>:<salt>< Possible values are: - c = char alignment, i.e., no alignment needed. + c = char alignment, i.e., no alignment needed. - s = short alignment (2 bytes on most machines). + s = short alignment (2 bytes on most machines). - i = int alignment (4 bytes on most machines). + i = int alignment (4 bytes on most machines). - d = double alignment (8 bytes on many machines, but by no means all). + d = double alignment (8 bytes on many machines, but by no means all). @@ -7726,24 +7819,24 @@ SCRAM-SHA-256$<iteration count>:<salt>< Possible values are - p: Value must always be stored plain. + p: Value must always be stored plain. - e: Value can be stored in a secondary + e: Value can be stored in a secondary relation (if relation has one, see pg_class.reltoastrelid). - m: Value can be stored compressed inline. + m: Value can be stored compressed inline. - x: Value can be stored compressed inline or stored in secondary storage. + x: Value can be stored compressed inline or stored in secondary storage. - Note that m columns can also be moved out to secondary - storage, but only as a last resort (e and x columns are + Note that m columns can also be moved out to secondary + storage, but only as a last resort (e and x columns are moved first). @@ -7774,9 +7867,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< int4 - Domains use typtypmod to record the typmod + Domains use typtypmod to record the typmod to be applied to their base type (-1 if base type does not use a - typmod). -1 if this type is not a domain. + typmod). -1 if this type is not a domain. @@ -7786,7 +7879,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< typndims is the number of array dimensions - for a domain over an array (that is, typbasetype is + for a domain over an array (that is, typbasetype is an array type). Zero for types other than domains over array types. @@ -7811,7 +7904,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< pg_node_tree - If typdefaultbin is not null, it is the + If typdefaultbin is not null, it is the nodeToString() representation of a default expression for the type. This is only used for domains. @@ -7823,12 +7916,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< text - typdefault is null if the type has no associated - default value. If typdefaultbin is not null, - typdefault must contain a human-readable version of the - default expression represented by typdefaultbin. If - typdefaultbin is null and typdefault is - not, then typdefault is the external representation of + typdefault is null if the type has no associated + default value. If typdefaultbin is not null, + typdefault must contain a human-readable version of the + default expression represented by typdefaultbin. If + typdefaultbin is null and typdefault is + not, then typdefault is the external representation of the type's default value, which can be fed to the type's input converter to produce a constant. @@ -7840,8 +7933,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< Access privileges; see - and - + and + for details @@ -7850,14 +7943,14 @@ SCRAM-SHA-256$<iteration count>:<salt><
- lists the system-defined values - of typcategory. Any future additions to this list will + lists the system-defined values + of typcategory. Any future additions to this list will also be upper-case ASCII letters. All other ASCII characters are reserved for user-defined categories. - <structfield>typcategory</> Codes + <structfield>typcategory</structfield> Codes @@ -7926,7 +8019,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< X - unknown type + unknown type @@ -7951,7 +8044,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_user_mapping</> Columns + <structname>pg_user_mapping</structname> Columns @@ -7992,7 +8085,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< text[] - User mapping specific options, as keyword=value strings + User mapping specific options, as keyword=value strings @@ -8012,7 +8105,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The information schema () provides + The information schema () provides an alternative set of views which overlap the functionality of the system views. Since the information schema is SQL-standard whereas the views described here are PostgreSQL-specific, @@ -8021,11 +8114,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< - lists the system views described here. + lists the system views described here. More detailed documentation of each view follows below. There are some additional views that provide access to the results of the statistics collector; they are described in . + linkend="monitoring-stats-views-table"/>. @@ -8210,7 +8303,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_available_extensions</> Columns + <structname>pg_available_extensions</structname> Columns @@ -8272,7 +8365,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_available_extension_versions</> Columns + <structname>pg_available_extension_versions</structname> Columns @@ -8354,11 +8447,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< The view pg_config describes the compile-time configuration parameters of the currently installed - version of PostgreSQL. It is intended, for example, to + version of PostgreSQL. It is intended, for example, to be used by software packages that want to interface to - PostgreSQL to facilitate finding the required header + PostgreSQL to facilitate finding the required header files and libraries. It provides the same basic information as the - PostgreSQL client + PostgreSQL client application. @@ -8368,7 +8461,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_config</> Columns + <structname>pg_config</structname> Columns @@ -8409,7 +8502,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - via the + via the statement in SQL @@ -8417,14 +8510,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< via the Bind message in the frontend/backend protocol, as - described in + described in via the Server Programming Interface (SPI), as described in - + @@ -8439,15 +8532,15 @@ SCRAM-SHA-256$<iteration count>:<salt>< Cursors are used internally to implement some of the components - of PostgreSQL, such as procedural languages. - Therefore, the pg_cursors view might include cursors + of PostgreSQL, such as procedural languages. + Therefore, the pg_cursors view might include cursors that have not been explicitly created by the user.
- <structname>pg_cursors</> Columns + <structname>pg_cursors</structname> Columns @@ -8495,7 +8588,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< is_scrollable boolean - true if the cursor is scrollable (that is, it + true if the cursor is scrollable (that is, it allows rows to be retrieved in a nonsequential manner); false otherwise @@ -8526,16 +8619,16 @@ SCRAM-SHA-256$<iteration count>:<salt>< The view pg_file_settings provides a summary of the contents of the server's configuration file(s). A row appears in - this view for each name = value entry appearing in the files, + this view for each name = value entry appearing in the files, with annotations indicating whether the value could be applied successfully. Additional row(s) may appear for problems not linked to - a name = value entry, such as syntax errors in the files. + a name = value entry, such as syntax errors in the files. This view is helpful for checking whether planned changes in the configuration files will work, or for diagnosing a previous failure. - Note that this view reports on the current contents of the + Note that this view reports on the current contents of the files, not on what was last applied by the server. (The pg_settings view is usually sufficient to determine that.) @@ -8547,7 +8640,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_file_settings</> Columns + <structname>pg_file_settings</structname> Columns @@ -8573,7 +8666,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< seqno integer - Order in which the entries are processed (1..n) + Order in which the entries are processed (1..n) name @@ -8603,21 +8696,21 @@ SCRAM-SHA-256$<iteration count>:<salt>< If the configuration file contains syntax errors or invalid parameter names, the server will not attempt to apply any settings from it, and - therefore all the applied fields will read as false. + therefore all the applied fields will read as false. In such a case there will be one or more rows with non-null error fields indicating the problem(s). Otherwise, individual settings will be applied if possible. If an individual setting cannot be applied (e.g., invalid value, or the setting cannot be changed after server start) it will have an appropriate message in the error field. Another way that - an entry might have applied = false is that it is + an entry might have applied = false is that it is overridden by a later entry for the same parameter name; this case is not considered an error so nothing appears in the error field. - See for more information about the various + See for more information about the various ways to change run-time parameters. @@ -8635,12 +8728,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< compatibility: it emulates a catalog that existed in PostgreSQL before version 8.1. It shows the names and members of all roles that are marked as not - rolcanlogin, which is an approximation to the set + rolcanlogin, which is an approximation to the set of roles that are being used as groups.
- <structname>pg_group</> Columns + <structname>pg_group</structname> Columns @@ -8689,7 +8782,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< The view pg_hba_file_rules provides a summary of the contents of the client authentication configuration - file, pg_hba.conf. A row appears in this view for each + file, pg_hba.conf. A row appears in this view for each non-empty, non-comment line in the file, with annotations indicating whether the rule could be applied successfully. @@ -8697,7 +8790,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< This view can be helpful for checking whether planned changes in the authentication configuration file will work, or for diagnosing a previous - failure. Note that this view reports on the current contents + failure. Note that this view reports on the current contents of the file, not on what was last loaded by the server. @@ -8707,7 +8800,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_hba_file_rules</> Columns + <structname>pg_hba_file_rules</structname> Columns @@ -8722,7 +8815,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< line_number integer - Line number of this rule in pg_hba.conf + Line number of this rule in pg_hba.conf @@ -8778,11 +8871,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< Usually, a row reflecting an incorrect entry will have values for only - the line_number and error fields. + the line_number and error fields. - See for more information about + See for more information about client authentication configuration. @@ -8800,7 +8893,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_indexes</> Columns + <structname>pg_indexes</structname> Columns @@ -8859,7 +8952,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< The view pg_locks provides access to information about the locks held by active processes within the - database server. See for more discussion + database server. See for more discussion of locking. @@ -8881,12 +8974,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< in the same way as in pg_description or pg_depend). Also, the right to extend a relation is represented as a separate lockable object. - Also, advisory locks can be taken on numbers that have + Also, advisory locks can be taken on numbers that have user-defined meanings.
- <structname>pg_locks</> Columns + <structname>pg_locks</structname> Columns @@ -8904,15 +8997,15 @@ SCRAM-SHA-256$<iteration count>:<salt>< Type of the lockable object: - relation, - extend, - page, - tuple, - transactionid, - virtualxid, - object, - userlock, or - advisory + relation, + extend, + page, + tuple, + transactionid, + virtualxid, + object, + userlock, or + advisory @@ -8994,7 +9087,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Column number targeted by the lock (the - classid and objid refer to the + classid and objid refer to the table itself), or zero if the target is some other general database object, or null if the target is not a general database object @@ -9022,7 +9115,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< text Name of the lock mode held or desired by this process (see and ) + linkend="locking-tables"/> and ) granted @@ -9076,23 +9169,23 @@ SCRAM-SHA-256$<iteration count>:<salt>< Advisory locks can be acquired on keys consisting of either a single bigint value or two integer values. A bigint key is displayed with its - high-order half in the classid column, its low-order half - in the objid column, and objsubid equal + high-order half in the classid column, its low-order half + in the objid column, and objsubid equal to 1. The original bigint value can be reassembled with the expression (classid::bigint << 32) | objid::bigint. Integer keys are displayed with the first key in the - classid column, the second key in the objid - column, and objsubid equal to 2. The actual meaning of + classid column, the second key in the objid + column, and objsubid equal to 2. The actual meaning of the keys is up to the user. Advisory locks are local to each database, - so the database column is meaningful for an advisory lock. + so the database column is meaningful for an advisory lock. pg_locks provides a global view of all locks in the database cluster, not only those relevant to the current database. Although its relation column can be joined - against pg_class.oid to identify locked + against pg_class.oid to identify locked relations, this will only work correctly for relations in the current database (those for which the database column is either the current database's OID or zero). @@ -9110,7 +9203,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_stat_activity psa ON pl.pid = psa.pid; Also, if you are using prepared transactions, the - virtualtransaction column can be joined to the + virtualtransaction column can be joined to the transaction column of the pg_prepared_xacts view to get more information on prepared transactions that hold locks. @@ -9132,8 +9225,8 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx information about which processes are ahead of which others in lock wait queues, nor information about which processes are parallel workers running on behalf of which other client sessions. It is better to use - the pg_blocking_pids() function - (see ) to identify which + the pg_blocking_pids() function + (see ) to identify which process(es) a waiting process is blocked behind. @@ -9141,10 +9234,10 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The pg_locks view displays data from both the regular lock manager and the predicate lock manager, which are separate systems; in addition, the regular lock manager subdivides its - locks into regular and fast-path locks. + locks into regular and fast-path locks. This data is not guaranteed to be entirely consistent. When the view is queried, - data on fast-path locks (with fastpath = true) + data on fast-path locks (with fastpath = true) is gathered from each backend one at a time, without freezing the state of the entire lock manager, so it is possible for locks to be taken or released while information is gathered. Note, however, that these locks are @@ -9187,7 +9280,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_matviews</> Columns + <structname>pg_matviews</structname> Columns @@ -9260,7 +9353,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_policies</> Columns + <structname>pg_policies</structname> Columns @@ -9338,7 +9431,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The pg_prepared_statements view displays all the prepared statements that are available in the current - session. See for more information about prepared + session. See for more information about prepared statements. @@ -9346,11 +9439,11 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx pg_prepared_statements contains one row for each prepared statement. Rows are added to the view when a new prepared statement is created and removed when a prepared statement - is released (for example, via the command). + is released (for example, via the command).
- <structname>pg_prepared_statements</> Columns + <structname>pg_prepared_statements</structname> Columns @@ -9426,7 +9519,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The view pg_prepared_xacts displays information about transactions that are currently prepared for two-phase - commit (see for details). + commit (see for details). @@ -9436,7 +9529,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_prepared_xacts</> Columns + <structname>pg_prepared_xacts</structname> Columns @@ -9570,7 +9663,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The pg_replication_origin_status view contains information about how far replay for a certain origin has progressed. For more on replication origins - see . + see .
@@ -9639,7 +9732,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx For more on replication slots, - see and . + see and .
@@ -9675,7 +9768,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx slot_typetext - The slot type - physical or logical + The slot type - physical or logical @@ -9746,7 +9839,8 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The address (LSN) of oldest WAL which still might be required by the consumer of this slot and thus won't be - automatically removed during checkpoints. + automatically removed during checkpoints. NULL + if the LSN of this slot has never been reserved. @@ -9756,7 +9850,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The address (LSN) up to which the logical slot's consumer has confirmed receiving data. Data older than this is - not available anymore. NULL for physical slots. + not available anymore. NULL for physical slots. @@ -9786,7 +9880,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_roles</> Columns + <structname>pg_roles</structname> Columns @@ -9869,7 +9963,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx rolpasswordtext - Not the password (always reads as ********) + Not the password (always reads as ********) @@ -9886,7 +9980,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx Role bypasses every row level security policy, see - for more information. + for more information. @@ -9922,7 +10016,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_rules</> Columns + <structname>pg_rules</structname> Columns @@ -9963,9 +10057,9 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- The pg_rules view excludes the ON SELECT rules + The pg_rules view excludes the ON SELECT rules of views and materialized views; those can be seen in - pg_views and pg_matviews. + pg_views and pg_matviews. @@ -9980,11 +10074,11 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The view pg_seclabels provides information about security labels. It as an easier-to-query version of the - pg_seclabel catalog. + pg_seclabel catalog. - <structname>pg_seclabels</> Columns + <structname>pg_seclabels</structname> Columns @@ -10014,7 +10108,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx For a security label on a table column, this is the column number (the - objoid and classoid refer to + objoid and classoid refer to the table itself). For all other object types, this column is zero. @@ -10074,7 +10168,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_sequences</> Columns + <structname>pg_sequences</structname> Columns @@ -10172,15 +10266,15 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The view pg_settings provides access to run-time parameters of the server. It is essentially an alternative - interface to the - and commands. + interface to the + and commands. It also provides access to some facts about each parameter that are - not directly available from SHOW, such as minimum and + not directly available from SHOW, such as minimum and maximum values.
- <structname>pg_settings</> Columns + <structname>pg_settings</structname> Columns @@ -10229,8 +10323,8 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx vartype text - Parameter type (bool, enum, - integer, real, or string) + Parameter type (bool, enum, + integer, real, or string) @@ -10275,7 +10369,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx values set from sources other than configuration files, or when examined by a user who is neither a superuser or a member of pg_read_all_settings); helpful when using - include directives in configuration files + include directives in configuration files sourceline @@ -10353,7 +10447,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx Changes to these settings can be made in postgresql.conf without restarting the server. They can also be set for a particular session in the connection request - packet (for example, via libpq's PGOPTIONS + packet (for example, via libpq's PGOPTIONS environment variable), but only if the connecting user is a superuser. However, these settings never change in a session after it is started. If you change them in postgresql.conf, send a @@ -10371,7 +10465,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx Changes to these settings can be made in postgresql.conf without restarting the server. They can also be set for a particular session in the connection request - packet (for example, via libpq's PGOPTIONS + packet (for example, via libpq's PGOPTIONS environment variable); any user can make such a change for their session. However, these settings never change in a session after it is started. If you change them in postgresql.conf, send a @@ -10387,10 +10481,10 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx These settings can be set from postgresql.conf, - or within a session via the SET command; but only superusers - can change them via SET. Changes in + or within a session via the SET command; but only superusers + can change them via SET. Changes in postgresql.conf will affect existing sessions - only if no session-local value has been established with SET. + only if no session-local value has been established with SET. @@ -10400,17 +10494,17 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx These settings can be set from postgresql.conf, - or within a session via the SET command. Any user is + or within a session via the SET command. Any user is allowed to change their session-local value. Changes in postgresql.conf will affect existing sessions - only if no session-local value has been established with SET. + only if no session-local value has been established with SET. - See for more information about the various + See for more information about the various ways to change these parameters. @@ -10418,7 +10512,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The pg_settings view cannot be inserted into or deleted from, but it can be updated. An UPDATE applied to a row of pg_settings is equivalent to executing - the command on that named + the command on that named parameter. The change only affects the value used by the current session. If an UPDATE is issued within a transaction that is later aborted, the effects of the UPDATE command @@ -10442,7 +10536,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx compatibility: it emulates a catalog that existed in PostgreSQL before version 8.1. It shows properties of all roles that are marked as - rolcanlogin in + rolcanlogin in pg_authid. @@ -10455,7 +10549,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_shadow</> Columns + <structname>pg_shadow</structname> Columns @@ -10512,7 +10606,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx User bypasses every row level security policy, see - for more information. + for more information. @@ -10527,7 +10621,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx valuntil - abstime + timestamptz Password expiry time (only used for password authentication) @@ -10569,7 +10663,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_stats</> Columns + <structname>pg_stats</structname> Columns @@ -10632,7 +10726,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx If greater than zero, the estimated number of distinct values in the column. If less than zero, the negative of the number of distinct values divided by the number of rows. (The negated form is used when - ANALYZE believes that the number of distinct values is + ANALYZE believes that the number of distinct values is likely to increase as the table grows; the positive form is used when the column seems to have a fixed number of possible values.) For example, -1 indicates a unique column in which the number of distinct @@ -10668,10 +10762,10 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx A list of values that divide the column's values into groups of approximately equal population. The values in - most_common_vals, if present, are omitted from this + most_common_vals, if present, are omitted from this histogram calculation. (This column is null if the column data type - does not have a < operator or if the - most_common_vals list accounts for the entire + does not have a < operator or if the + most_common_vals list accounts for the entire population.) @@ -10686,7 +10780,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx When the value is near -1 or +1, an index scan on the column will be estimated to be cheaper than when it is near zero, due to reduction of random access to the disk. (This column is null if the column data - type does not have a < operator.) + type does not have a < operator.) @@ -10730,9 +10824,9 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The maximum number of entries in the array fields can be controlled on a - column-by-column basis using the ALTER TABLE SET STATISTICS + column-by-column basis using the ALTER TABLE SET STATISTICS command, or globally by setting the - run-time parameter. + run-time parameter. @@ -10750,7 +10844,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_tables</> Columns + <structname>pg_tables</structname> Columns @@ -10827,11 +10921,11 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The view pg_timezone_abbrevs provides a list of time zone abbreviations that are currently recognized by the datetime input routines. The contents of this view change when the - run-time parameter is modified. + run-time parameter is modified.
- <structname>pg_timezone_abbrevs</> Columns + <structname>pg_timezone_abbrevs</structname> Columns @@ -10864,7 +10958,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx While most timezone abbreviations represent fixed offsets from UTC, there are some that have historically varied in value - (see for more information). + (see for more information). In such cases this view presents their current meaning. @@ -10879,7 +10973,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The view pg_timezone_names provides a list - of time zone names that are recognized by SET TIMEZONE, + of time zone names that are recognized by SET TIMEZONE, along with their associated abbreviations, UTC offsets, and daylight-savings status. (Technically, PostgreSQL does not use UTC because leap @@ -10888,11 +10982,11 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx linkend="view-pg-timezone-abbrevs">pg_timezone_abbrevs, many of these names imply a set of daylight-savings transition date rules. Therefore, the associated information changes across local DST boundaries. The displayed information is computed based on the current - value of CURRENT_TIMESTAMP. + value of CURRENT_TIMESTAMP.
- <structname>pg_timezone_names</> Columns + <structname>pg_timezone_names</structname> Columns @@ -10945,7 +11039,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_user</> Columns + <structname>pg_user</structname> Columns @@ -10994,19 +11088,19 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx bool User bypasses every row level security policy, see - for more information. + for more information. passwd text - Not the password (always reads as ********) + Not the password (always reads as ********) valuntil - abstime + timestamptz Password expiry time (only used for password authentication) @@ -11038,7 +11132,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_user_mappings</> Columns + <structname>pg_user_mappings</structname> Columns @@ -11095,7 +11189,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx text[] - User mapping specific options, as keyword=value strings + User mapping specific options, as keyword=value strings @@ -11110,12 +11204,12 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx current user is the user being mapped, and owns the server or - holds USAGE privilege on it + holds USAGE privilege on it - current user is the server owner and mapping is for PUBLIC + current user is the server owner and mapping is for PUBLIC @@ -11142,7 +11236,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_views</> Columns + <structname>pg_views</structname> Columns diff --git a/doc/src/sgml/charset.sgml b/doc/src/sgml/charset.sgml index 48ecfc5f48..a6143ef8a7 100644 --- a/doc/src/sgml/charset.sgml +++ b/doc/src/sgml/charset.sgml @@ -15,8 +15,8 @@ Using the locale features of the operating system to provide locale-specific collation order, number formatting, translated messages, and other aspects. - This is covered in and - . + This is covered in and + . @@ -25,7 +25,7 @@ Providing a number of different character sets to support storing text in all kinds of languages, and providing character set translation between client and server. - This is covered in . + This is covered in . @@ -35,12 +35,12 @@ Locale Support - locale + locale - Locale support refers to an application respecting + Locale support refers to an application respecting cultural preferences regarding alphabets, sorting, number - formatting, etc. PostgreSQL uses the standard ISO + formatting, etc. PostgreSQL uses the standard ISO C and POSIX locale facilities provided by the server operating system. For additional information refer to the documentation of your system. @@ -67,14 +67,14 @@ initdb --locale=sv_SE This example for Unix systems sets the locale to Swedish - (sv) as spoken - in Sweden (SE). Other possibilities might include - en_US (U.S. English) and fr_CA (French + (sv) as spoken + in Sweden (SE). Other possibilities might include + en_US (U.S. English) and fr_CA (French Canadian). If more than one character set can be used for a locale then the specifications can take the form - language_territory.codeset. For example, - fr_BE.UTF-8 represents the French language (fr) as - spoken in Belgium (BE), with a UTF-8 character set + language_territory.codeset. For example, + fr_BE.UTF-8 represents the French language (fr) as + spoken in Belgium (BE), with a UTF-8 character set encoding. @@ -82,9 +82,9 @@ initdb --locale=sv_SE What locales are available on your system under what names depends on what was provided by the operating system vendor and what was installed. On most Unix systems, the command - locale -a will provide a list of available locales. - Windows uses more verbose locale names, such as German_Germany - or Swedish_Sweden.1252, but the principles are the same. + locale -a will provide a list of available locales. + Windows uses more verbose locale names, such as German_Germany + or Swedish_Sweden.1252, but the principles are the same. @@ -97,28 +97,28 @@ initdb --locale=sv_SE - LC_COLLATE - String sort order + LC_COLLATE + String sort order - LC_CTYPE - Character classification (What is a letter? Its upper-case equivalent?) + LC_CTYPE + Character classification (What is a letter? Its upper-case equivalent?) - LC_MESSAGES - Language of messages + LC_MESSAGES + Language of messages - LC_MONETARY - Formatting of currency amounts + LC_MONETARY + Formatting of currency amounts - LC_NUMERIC - Formatting of numbers + LC_NUMERIC + Formatting of numbers - LC_TIME - Formatting of dates and times + LC_TIME + Formatting of dates and times @@ -133,8 +133,8 @@ initdb --locale=sv_SE If you want the system to behave as if it had no locale support, - use the special locale name C, or equivalently - POSIX. + use the special locale name C, or equivalently + POSIX. @@ -146,7 +146,7 @@ initdb --locale=sv_SE the sort order of indexes, so they must be kept fixed, or indexes on text columns would become corrupt. (But you can alleviate this restriction using collations, as discussed - in .) + in .) The default values for these categories are determined when initdb is run, and those values are used when new databases are created, unless @@ -157,7 +157,7 @@ initdb --locale=sv_SE The other locale categories can be changed whenever desired by setting the server configuration parameters that have the same name as the locale categories (see for details). The values + linkend="runtime-config-client-format"/> for details). The values that are chosen by initdb are actually only written into the configuration file postgresql.conf to serve as defaults when the server is started. If you remove these @@ -192,14 +192,14 @@ initdb --locale=sv_SE settings for the purpose of setting the language of messages. If in doubt, please refer to the documentation of your operating system, in particular the documentation about - gettext. + gettext. To enable messages to be translated to the user's preferred language, NLS must have been selected at build time - (configure --enable-nls). All other locale support is + (configure --enable-nls). All other locale support is built in automatically. @@ -213,64 +213,64 @@ initdb --locale=sv_SE - Sort order in queries using ORDER BY or the standard + Sort order in queries using ORDER BY or the standard comparison operators on textual data - ORDER BYand locales + ORDER BYand locales - The upper, lower, and initcap + The upper, lower, and initcap functions - upperand locales - lowerand locales + upperand locales + lowerand locales - Pattern matching operators (LIKE, SIMILAR TO, + Pattern matching operators (LIKE, SIMILAR TO, and POSIX-style regular expressions); locales affect both case insensitive matching and the classification of characters by character-class regular expressions - LIKEand locales - regular expressionsand locales + LIKEand locales + regular expressionsand locales - The to_char family of functions - to_charand locales + The to_char family of functions + to_charand locales - The ability to use indexes with LIKE clauses + The ability to use indexes with LIKE clauses - The drawback of using locales other than C or - POSIX in PostgreSQL is its performance + The drawback of using locales other than C or + POSIX in PostgreSQL is its performance impact. It slows character handling and prevents ordinary indexes - from being used by LIKE. For this reason use locales + from being used by LIKE. For this reason use locales only if you actually need them. - As a workaround to allow PostgreSQL to use indexes - with LIKE clauses under a non-C locale, several custom + As a workaround to allow PostgreSQL to use indexes + with LIKE clauses under a non-C locale, several custom operator classes exist. These allow the creation of an index that performs a strict character-by-character comparison, ignoring - locale comparison rules. Refer to + locale comparison rules. Refer to for more information. Another approach is to create indexes using - the C collation, as discussed in - . + the C collation, as discussed in + . @@ -286,20 +286,20 @@ initdb --locale=sv_SE - Check that PostgreSQL is actually using the locale - that you think it is. The LC_COLLATE and LC_CTYPE + Check that PostgreSQL is actually using the locale + that you think it is. The LC_COLLATE and LC_CTYPE settings are determined when a database is created, and cannot be changed except by creating a new database. Other locale - settings including LC_MESSAGES and LC_MONETARY + settings including LC_MESSAGES and LC_MONETARY are initially determined by the environment the server is started in, but can be changed on-the-fly. You can check the active locale - settings using the SHOW command. + settings using the SHOW command. - The directory src/test/locale in the source + The directory src/test/locale in the source distribution contains a test suite for - PostgreSQL's locale support. + PostgreSQL's locale support. @@ -313,10 +313,10 @@ initdb --locale=sv_SE Maintaining catalogs of message translations requires the on-going efforts of many volunteers that want to see - PostgreSQL speak their preferred language well. + PostgreSQL speak their preferred language well. If messages in your language are currently not available or not fully translated, your assistance would be appreciated. If you want to - help, refer to or write to the developers' + help, refer to or write to the developers' mailing list. @@ -326,7 +326,7 @@ initdb --locale=sv_SE Collation Support - collation + collation The collation feature allows specifying the sort order and character @@ -370,9 +370,9 @@ initdb --locale=sv_SE function or operator call is derived from the arguments, as described below. In addition to comparison operators, collations are taken into account by functions that convert between lower and upper case - letters, such as lower, upper, and - initcap; by pattern matching operators; and by - to_char and related functions. + letters, such as lower, upper, and + initcap; by pattern matching operators; and by + to_char and related functions. @@ -452,7 +452,7 @@ SELECT a < ('foo' COLLATE "fr_FR") FROM test1; SELECT a < b FROM test1; the parser cannot determine which collation to apply, since the - a and b columns have conflicting + a and b columns have conflicting implicit collations. Since the < operator does need to know which collation to use, this will result in an error. The error can be resolved by attaching an explicit collation @@ -468,7 +468,7 @@ SELECT a COLLATE "de_DE" < b FROM test1; SELECT a || b FROM test1; - does not result in an error, because the || operator + does not result in an error, because the || operator does not care about collations: its result is the same regardless of the collation. @@ -486,8 +486,8 @@ SELECT * FROM test1 ORDER BY a || 'foo'; SELECT * FROM test1 ORDER BY a || b; - results in an error, because even though the || operator - doesn't need to know a collation, the ORDER BY clause does. + results in an error, because even though the || operator + doesn't need to know a collation, the ORDER BY clause does. As before, the conflict can be resolved with an explicit collation specifier: @@ -508,14 +508,14 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; operating system C library. These are the locales that most tools provided by the operating system use. Another provider is icu, which uses the external - ICUICU library. ICU locales can only be + ICUICU library. ICU locales can only be used if support for ICU was configured when PostgreSQL was built. A collation object provided by libc maps to a combination of LC_COLLATE and LC_CTYPE - settings. (As + settings, as accepted by the setlocale() system library call. (As the name would suggest, the main purpose of a collation is to set LC_COLLATE, which controls the sort order. But it is rarely necessary in practice to have an @@ -524,7 +524,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; these under one concept than to create another infrastructure for setting LC_CTYPE per expression.) Also, a libc collation - is tied to a character set encoding (see ). + is tied to a character set encoding (see ). The same collation name may exist for different encodings. @@ -541,14 +541,14 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; Standard Collations - On all platforms, the collations named default, - C, and POSIX are available. Additional + On all platforms, the collations named default, + C, and POSIX are available. Additional collations may be available depending on operating system support. - The default collation selects the LC_COLLATE + The default collation selects the LC_COLLATE and LC_CTYPE values specified at database creation time. - The C and POSIX collations both specify - traditional C behavior, in which only the ASCII letters - A through Z + The C and POSIX collations both specify + traditional C behavior, in which only the ASCII letters + A through Z are treated as letters, and sorting is done strictly by character code byte values. @@ -565,7 +565,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; If the operating system provides support for using multiple locales - within a single program (newlocale and related functions), + within a single program (newlocale and related functions), or if support for ICU is configured, then when a database cluster is initialized, initdb populates the system catalog pg_collation with @@ -605,7 +605,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; for LC_COLLATE and LC_CTYPE, or if new locales are installed in the operating system after the database system was initialized, then a new collation may be created using - the command. + the command. New operating system locales can also be imported en masse using the pg_import_system_collations() function. @@ -618,8 +618,8 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; within a given database even though it would not be unique globally. Use of the stripped collation names is recommended, since it will make one less thing you need to change if you decide to change to - another database encoding. Note however that the default, - C, and POSIX collations can be used regardless of + another database encoding. Note however that the default, + C, and POSIX collations can be used regardless of the database encoding. @@ -630,7 +630,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; SELECT a COLLATE "C" < b COLLATE "POSIX" FROM test1; - will draw an error even though the C and POSIX + will draw an error even though the C and POSIX collations have identical behaviors. Mixing stripped and non-stripped collation names is therefore not recommended. @@ -640,22 +640,19 @@ SELECT a COLLATE "C" < b COLLATE "POSIX" FROM test1; ICU collations - Collations provided by ICU are created with names in BCP 47 language tag + With ICU, it is not sensible to enumerate all possible locale names. ICU + uses a particular naming system for locales, but there are many more ways + to name a locale than there are actually distinct locales. + initdb uses the ICU APIs to extract a set of distinct + locales to populate the initial set of collations. Collations provided by + ICU are created in the SQL environment with names in BCP 47 language tag format, with a private use extension -x-icu appended, to distinguish them from - libc locales. So de-x-icu would be an example name. + libc locales. - With ICU, it is not sensible to enumerate all possible locale names. ICU - uses a particular naming system for locales, but there are many more ways - to name a locale than there are actually distinct locales. (In fact, any - string will be accepted as a locale name.) - See for - information on ICU locale naming. initdb uses the ICU - APIs to extract a set of locales with distinct collation rules to populate - the initial set of collations. Here are some example collations that - might be created: + Here are some example collations that might be created: @@ -665,31 +662,18 @@ SELECT a COLLATE "C" < b COLLATE "POSIX" FROM test1; - - de-u-co-phonebk-x-icu - - German collation, phone book variant - - - de-AT-x-icu German collation for Austria, default variant - (As of this writing, there is no, - say, de-DE-x-icu or de-CH-x-icu, - because those are equivalent to de-x-icu.) + (There are also, say, de-DE-x-icu + or de-CH-x-icu, but as of this writing, they are + equivalent to de-x-icu.) - - de-AT-u-co-phonebk-x-icu - - German collation for Austria, phone book variant - - und-x-icu (for undefined) @@ -707,16 +691,187 @@ SELECT a COLLATE "C" < b COLLATE "POSIX" FROM test1; database encoding is one of these, ICU collation entries in pg_collation are ignored. Attempting to use one will draw an error along the lines of collation "de-x-icu" for - encoding "WIN874" does not exist. + encoding "WIN874" does not exist. + + + + + Creating New Collation Objects + + + If the standard and predefined collations are not sufficient, users can + create their own collation objects using the SQL + command . + + + + The standard and predefined collations are in the + schema pg_catalog, like all predefined objects. + User-defined collations should be created in user schemas. This also + ensures that they are saved by pg_dump. + + + + libc collations + + + New libc collations can be created like this: + +CREATE COLLATION german (provider = libc, locale = 'de_DE'); + + The exact values that are acceptable for the locale + clause in this command depend on the operating system. On Unix-like + systems, the command locale -a will show a list. + + + + Since the predefined libc collations already include all collations + defined in the operating system when the database instance is + initialized, it is not often necessary to manually create new ones. + Reasons might be if a different naming system is desired (in which case + see also ) or if the operating system has + been upgraded to provide new locale definitions (in which case see + also pg_import_system_collations()). + - - + + ICU collations + + + ICU allows collations to be customized beyond the basic language+country + set that is preloaded by initdb. Users are encouraged + to define their own collation objects that make use of these facilities to + suit the sorting behavior to their requirements. + See + and for + information on ICU locale naming. The set of acceptable names and + attributes depends on the particular ICU version. + + + + Here are some examples: + + + + CREATE COLLATION "de-u-co-phonebk-x-icu" (provider = icu, locale = 'de-u-co-phonebk'); + CREATE COLLATION "de-u-co-phonebk-x-icu" (provider = icu, locale = 'de@collation=phonebook'); + + German collation with phone book collation type + + The first example selects the ICU locale using a language + tag per BCP 47. The second example uses the traditional + ICU-specific locale syntax. The first style is preferred going + forward, but it is not supported by older ICU versions. + + + Note that you can name the collation objects in the SQL environment + anything you want. In this example, we follow the naming style that + the predefined collations use, which in turn also follow BCP 47, but + that is not required for user-defined collations. + + + + + + CREATE COLLATION "und-u-co-emoji-x-icu" (provider = icu, locale = 'und-u-co-emoji'); + CREATE COLLATION "und-u-co-emoji-x-icu" (provider = icu, locale = '@collation=emoji'); + + + Root collation with Emoji collation type, per Unicode Technical Standard #51 + + + Observe how in the traditional ICU locale naming system, the root + locale is selected by an empty string. + + + + + + CREATE COLLATION digitslast (provider = icu, locale = 'en-u-kr-latn-digit'); + CREATE COLLATION digitslast (provider = icu, locale = 'en@colReorder=latn-digit'); + + + Sort digits after Latin letters. (The default is digits before letters.) + + + + + + CREATE COLLATION upperfirst (provider = icu, locale = 'en-u-kf-upper'); + CREATE COLLATION upperfirst (provider = icu, locale = 'en@colCaseFirst=upper'); + + + Sort upper-case letters before lower-case letters. (The default is + lower-case letters first.) + + + + + + CREATE COLLATION special (provider = icu, locale = 'en-u-kf-upper-kr-latn-digit'); + CREATE COLLATION special (provider = icu, locale = 'en@colCaseFirst=upper;colReorder=latn-digit'); + + + Combines both of the above options. + + + + + + CREATE COLLATION numeric (provider = icu, locale = 'en-u-kn-true'); + CREATE COLLATION numeric (provider = icu, locale = 'en@colNumeric=yes'); + + + Numeric ordering, sorts sequences of digits by their numeric value, + for example: A-21 < A-123 + (also known as natural sort). + + + + + + See Unicode + Technical Standard #35 + and BCP 47 for + details. The list of possible collation types (co + subtag) can be found in + the CLDR + repository. + The ICU Locale + Explorer can be used to check the details of a particular locale + definition. The examples using the k* subtags require + at least ICU version 54. + + + + Note that while this system allows creating collations that ignore + case or ignore accents or similar (using + the ks key), PostgreSQL does not at the moment allow + such collations to act in a truly case- or accent-insensitive manner. Any + strings that compare equal according to the collation but are not + byte-wise equal will be sorted according to their byte values. + + + + + By design, ICU will accept almost any string as a locale name and match + it to the closest locale it can provide, using the fallback procedure + described in its documentation. Thus, there will be no direct feedback + if a collation specification is composed using features that the given + ICU installation does not actually support. It is therefore recommended + to create application-level test cases to check that the collation + definitions satisfy one's requirements. + + + + + Copying Collations - The command can also be used to + The command can also be used to create a new collation from an existing collation, which can be useful to be able to use operating-system-independent collation names in applications, create compatibility names, or use an ICU-provided collation @@ -724,16 +879,9 @@ SELECT a COLLATE "C" < b COLLATE "POSIX" FROM test1; CREATE COLLATION german FROM "de_DE"; CREATE COLLATION french FROM "fr-x-icu"; -CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; - - - The standard and predefined collations are in the - schema pg_catalog, like all predefined objects. - User-defined collations should be created in user schemas. This also - ensures that they are saved by pg_dump. - + @@ -741,30 +889,30 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; Character Set Support - character set + character set The character set support in PostgreSQL allows you to store text in a variety of character sets (also called encodings), including single-byte character sets such as the ISO 8859 series and - multiple-byte character sets such as EUC (Extended Unix + multiple-byte character sets such as EUC (Extended Unix Code), UTF-8, and Mule internal code. All supported character sets can be used transparently by clients, but a few are not supported for use within the server (that is, as a server-side encoding). The default character set is selected while initializing your PostgreSQL database - cluster using initdb. It can be overridden when you + cluster using initdb. It can be overridden when you create a database, so you can have multiple databases each with a different character set. An important restriction, however, is that each database's character set - must be compatible with the database's LC_CTYPE (character - classification) and LC_COLLATE (string sort order) locale - settings. For C or - POSIX locale, any character set is allowed, but for other + must be compatible with the database's LC_CTYPE (character + classification) and LC_COLLATE (string sort order) locale + settings. For C or + POSIX locale, any character set is allowed, but for other libc-provided locales there is only one character set that will work correctly. (On Windows, however, UTF-8 encoding can be used with any locale.) @@ -776,7 +924,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; Supported Character Sets - shows the character sets available + shows the character sets available for use in PostgreSQL. @@ -806,7 +954,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; No No 1-2 - WIN950, Windows950 + WIN950, Windows950 EUC_CN @@ -869,11 +1017,11 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; No No 1-2 - WIN936, Windows936 + WIN936, Windows936 ISO_8859_5 - ISO 8859-5, ECMA 113 + ISO 8859-5, ECMA 113 Latin/Cyrillic Yes Yes @@ -882,7 +1030,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; ISO_8859_6 - ISO 8859-6, ECMA 114 + ISO 8859-6, ECMA 114 Latin/Arabic Yes Yes @@ -891,7 +1039,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; ISO_8859_7 - ISO 8859-7, ECMA 118 + ISO 8859-7, ECMA 118 Latin/Greek Yes Yes @@ -900,7 +1048,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; ISO_8859_8 - ISO 8859-8, ECMA 121 + ISO 8859-8, ECMA 121 Latin/Hebrew Yes Yes @@ -909,7 +1057,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; JOHAB - JOHAB + JOHAB Korean (Hangul) No No @@ -923,7 +1071,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; Yes Yes 1 - KOI8 + KOI8 KOI8U @@ -936,57 +1084,57 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; LATIN1 - ISO 8859-1, ECMA 94 + ISO 8859-1, ECMA 94 Western European Yes Yes 1 - ISO88591 + ISO88591 LATIN2 - ISO 8859-2, ECMA 94 + ISO 8859-2, ECMA 94 Central European Yes Yes 1 - ISO88592 + ISO88592 LATIN3 - ISO 8859-3, ECMA 94 + ISO 8859-3, ECMA 94 South European Yes Yes 1 - ISO88593 + ISO88593 LATIN4 - ISO 8859-4, ECMA 94 + ISO 8859-4, ECMA 94 North European Yes Yes 1 - ISO88594 + ISO88594 LATIN5 - ISO 8859-9, ECMA 128 + ISO 8859-9, ECMA 128 Turkish Yes Yes 1 - ISO88599 + ISO88599 LATIN6 - ISO 8859-10, ECMA 144 + ISO 8859-10, ECMA 144 Nordic Yes Yes 1 - ISO885910 + ISO885910 LATIN7 @@ -995,7 +1143,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; Yes Yes 1 - ISO885913 + ISO885913 LATIN8 @@ -1004,7 +1152,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; Yes Yes 1 - ISO885914 + ISO885914 LATIN9 @@ -1013,16 +1161,16 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; Yes Yes 1 - ISO885915 + ISO885915 LATIN10 - ISO 8859-16, ASRO SR 14111 + ISO 8859-16, ASRO SR 14111 Romanian Yes No 1 - ISO885916 + ISO885916 MULE_INTERNAL @@ -1040,7 +1188,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; No No 1-2 - Mskanji, ShiftJIS, WIN932, Windows932 + Mskanji, ShiftJIS, WIN932, Windows932 SHIFT_JIS_2004 @@ -1054,7 +1202,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; SQL_ASCII unspecified (see text) - any + any Yes No 1 @@ -1067,16 +1215,16 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; No No 1-2 - WIN949, Windows949 + WIN949, Windows949 UTF8 Unicode, 8-bit - all + all Yes Yes 1-4 - Unicode + Unicode WIN866 @@ -1085,7 +1233,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; Yes Yes 1 - ALT + ALT WIN874 @@ -1112,7 +1260,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; Yes Yes 1 - WIN + WIN WIN1252 @@ -1175,30 +1323,30 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; Yes Yes 1 - ABC, TCVN, TCVN5712, VSCII + ABC, TCVN, TCVN5712, VSCII
- Not all client APIs support all the listed character sets. For example, the - PostgreSQL - JDBC driver does not support MULE_INTERNAL, LATIN6, - LATIN8, and LATIN10. + Not all client APIs support all the listed character sets. For example, the + PostgreSQL + JDBC driver does not support MULE_INTERNAL, LATIN6, + LATIN8, and LATIN10. - The SQL_ASCII setting behaves considerably differently + The SQL_ASCII setting behaves considerably differently from the other settings. When the server character set is - SQL_ASCII, the server interprets byte values 0-127 + SQL_ASCII, the server interprets byte values 0-127 according to the ASCII standard, while byte values 128-255 are taken as uninterpreted characters. No encoding conversion will be done when - the setting is SQL_ASCII. Thus, this setting is not so + the setting is SQL_ASCII. Thus, this setting is not so much a declaration that a specific encoding is in use, as a declaration of ignorance about the encoding. In most cases, if you are working with any non-ASCII data, it is unwise to use the - SQL_ASCII setting because + SQL_ASCII setting because PostgreSQL will be unable to help you by converting or validating non-ASCII characters. @@ -1208,7 +1356,7 @@ CREATE COLLATION "de-DE-x-icu" FROM "de-x-icu"; Setting the Character Set - initdb defines the default character set (encoding) + initdb defines the default character set (encoding) for a PostgreSQL cluster. For example, @@ -1219,8 +1367,8 @@ initdb -E EUC_JP EUC_JP (Extended Unix Code for Japanese). You can use instead of if you prefer longer option strings. - If no option is - given, initdb attempts to determine the appropriate + If no or option is + given, initdb attempts to determine the appropriate encoding to use based on the specified or default locale. @@ -1240,11 +1388,11 @@ createdb -E EUC_KR -T template0 --lc-collate=ko_KR.euckr --lc-ctype=ko_KR.euckr CREATE DATABASE korean WITH ENCODING 'EUC_KR' LC_COLLATE='ko_KR.euckr' LC_CTYPE='ko_KR.euckr' TEMPLATE=template0; - Notice that the above commands specify copying the template0 + Notice that the above commands specify copying the template0 database. When copying any other database, the encoding and locale settings cannot be changed from those of the source database, because that might result in corrupt data. For more information see - . + . @@ -1272,7 +1420,7 @@ $ psql -l On most modern operating systems, PostgreSQL - can determine which character set is implied by the LC_CTYPE + can determine which character set is implied by the LC_CTYPE setting, and it will enforce that only the matching database encoding is used. On older systems it is your responsibility to ensure that you use the encoding expected by the locale you have selected. A mistake in @@ -1282,9 +1430,9 @@ $ psql -l PostgreSQL will allow superusers to create - databases with SQL_ASCII encoding even when - LC_CTYPE is not C or POSIX. As noted - above, SQL_ASCII does not enforce that the data stored in + databases with SQL_ASCII encoding even when + LC_CTYPE is not C or POSIX. As noted + above, SQL_ASCII does not enforce that the data stored in the database has any particular encoding, and so this choice poses risks of locale-dependent misbehavior. Using this combination of settings is deprecated and may someday be forbidden altogether. @@ -1299,9 +1447,9 @@ $ psql -l PostgreSQL supports automatic character set conversion between server and client for certain character set combinations. The conversion information is stored in the - pg_conversion system catalog. PostgreSQL + pg_conversion system catalog. PostgreSQL comes with some predefined conversions, as shown in . You can create a new + linkend="multibyte-translation-table"/>. You can create a new conversion using the SQL command CREATE CONVERSION. @@ -1335,6 +1483,13 @@ $ psql -l UTF8 + + EUC_JIS_2004 + EUC_JIS_2004, + SHIFT_JIS_2004, + UTF8 + + EUC_KR EUC_KR, @@ -1390,8 +1545,7 @@ $ psql -l JOHAB - JOHAB, - UTF8 + not supported as a server encoding @@ -1497,6 +1651,11 @@ $ psql -l not supported as a server encoding + + SHIFT_JIS_2004 + not supported as a server encoding + + SQL_ASCII any (no conversion will be performed) @@ -1615,7 +1774,7 @@ $ psql -l - libpq () has functions to control the client encoding. + libpq () has functions to control the client encoding. @@ -1626,14 +1785,14 @@ $ psql -l Setting the client encoding can be done with this SQL command: -SET CLIENT_ENCODING TO 'value'; +SET CLIENT_ENCODING TO 'value'; Also you can use the standard SQL syntax SET NAMES for this purpose: -SET NAMES 'value'; +SET NAMES 'value'; To query the current client encoding: @@ -1664,8 +1823,8 @@ RESET client_encoding; Using the configuration variable . If the - client_encoding variable is set, that client + linkend="guc-client-encoding"/>. If the + client_encoding variable is set, that client encoding is automatically selected when a connection to the server is made. (This can subsequently be overridden using any of the other methods mentioned above.) @@ -1684,9 +1843,9 @@ RESET client_encoding; - If the client character set is defined as SQL_ASCII, + If the client character set is defined as SQL_ASCII, encoding conversion is disabled, regardless of the server's character - set. Just as for the server, use of SQL_ASCII is unwise + set. Just as for the server, use of SQL_ASCII is unwise unless you are working with all-ASCII data. diff --git a/doc/src/sgml/chkpass.sgml b/doc/src/sgml/chkpass.sgml deleted file mode 100644 index 9f682d8981..0000000000 --- a/doc/src/sgml/chkpass.sgml +++ /dev/null @@ -1,95 +0,0 @@ - - - - chkpass - - - chkpass - - - - This module implements a data type chkpass that is - designed for storing encrypted passwords. - Each password is automatically converted to encrypted form upon entry, - and is always stored encrypted. To compare, simply compare against a clear - text password and the comparison function will encrypt it before comparing. - - - - There are provisions in the code to report an error if the password is - determined to be easily crackable. However, this is currently just - a stub that does nothing. - - - - If you precede an input string with a colon, it is assumed to be an - already-encrypted password, and is stored without further encryption. - This allows entry of previously-encrypted passwords. - - - - On output, a colon is prepended. This makes it possible to dump and reload - passwords without re-encrypting them. If you want the encrypted password - without the colon then use the raw() function. - This allows you to use the - type with things like Apache's Auth_PostgreSQL module. - - - - The encryption uses the standard Unix function crypt(), - and so it suffers - from all the usual limitations of that function; notably that only the - first eight characters of a password are considered. - - - - Note that the chkpass data type is not indexable. - - - - - Sample usage: - - - -test=# create table test (p chkpass); -CREATE TABLE -test=# insert into test values ('hello'); -INSERT 0 1 -test=# select * from test; - p ----------------- - :dVGkpXdOrE3ko -(1 row) - -test=# select raw(p) from test; - raw ---------------- - dVGkpXdOrE3ko -(1 row) - -test=# select p = 'hello' from test; - ?column? ----------- - t -(1 row) - -test=# select p = 'goodbye' from test; - ?column? ----------- - f -(1 row) - - - - Author - - - D'Arcy J.M. Cain (darcy@druid.net) - - - - diff --git a/doc/src/sgml/citext.sgml b/doc/src/sgml/citext.sgml index 9b4c68f7d4..b1fe7101b2 100644 --- a/doc/src/sgml/citext.sgml +++ b/doc/src/sgml/citext.sgml @@ -8,10 +8,10 @@ - The citext module provides a case-insensitive - character string type, citext. Essentially, it internally calls - lower when comparing values. Otherwise, it behaves almost - exactly like text. + The citext module provides a case-insensitive + character string type, citext. Essentially, it internally calls + lower when comparing values. Otherwise, it behaves almost + exactly like text. @@ -19,7 +19,7 @@ The standard approach to doing case-insensitive matches - in PostgreSQL has been to use the lower + in PostgreSQL has been to use the lower function when comparing values, for example @@ -35,19 +35,19 @@ SELECT * FROM tab WHERE lower(col) = LOWER(?); It makes your SQL statements verbose, and you always have to remember to - use lower on both the column and the query value. + use lower on both the column and the query value. It won't use an index, unless you create a functional index using - lower. + lower. - If you declare a column as UNIQUE or PRIMARY - KEY, the implicitly generated index is case-sensitive. So it's + If you declare a column as UNIQUE or PRIMARY + KEY, the implicitly generated index is case-sensitive. So it's useless for case-insensitive searches, and it won't enforce uniqueness case-insensitively. @@ -55,13 +55,13 @@ SELECT * FROM tab WHERE lower(col) = LOWER(?); - The citext data type allows you to eliminate calls - to lower in SQL queries, and allows a primary key to - be case-insensitive. citext is locale-aware, just - like text, which means that the matching of upper case and + The citext data type allows you to eliminate calls + to lower in SQL queries, and allows a primary key to + be case-insensitive. citext is locale-aware, just + like text, which means that the matching of upper case and lower case characters is dependent on the rules of - the database's LC_CTYPE setting. Again, this behavior is - identical to the use of lower in queries. But because it's + the database's LC_CTYPE setting. Again, this behavior is + identical to the use of lower in queries. But because it's done transparently by the data type, you don't have to remember to do anything special in your queries. @@ -80,18 +80,18 @@ CREATE TABLE users ( pass TEXT NOT NULL ); -INSERT INTO users VALUES ( 'larry', md5(random()::text) ); -INSERT INTO users VALUES ( 'Tom', md5(random()::text) ); -INSERT INTO users VALUES ( 'Damian', md5(random()::text) ); -INSERT INTO users VALUES ( 'NEAL', md5(random()::text) ); -INSERT INTO users VALUES ( 'Bjørn', md5(random()::text) ); +INSERT INTO users VALUES ( 'larry', sha256(random()::text::bytea) ); +INSERT INTO users VALUES ( 'Tom', sha256(random()::text::bytea) ); +INSERT INTO users VALUES ( 'Damian', sha256(random()::text::bytea) ); +INSERT INTO users VALUES ( 'NEAL', sha256(random()::text::bytea) ); +INSERT INTO users VALUES ( 'Bjørn', sha256(random()::text::bytea) ); SELECT * FROM users WHERE nick = 'Larry'; - The SELECT statement will return one tuple, even though - the nick column was set to larry and the query - was for Larry. + The SELECT statement will return one tuple, even though + the nick column was set to larry and the query + was for Larry. @@ -99,82 +99,82 @@ SELECT * FROM users WHERE nick = 'Larry'; String Comparison Behavior - citext performs comparisons by converting each string to lower - case (as though lower were called) and then comparing the + citext performs comparisons by converting each string to lower + case (as though lower were called) and then comparing the results normally. Thus, for example, two strings are considered equal - if lower would produce identical results for them. + if lower would produce identical results for them. In order to emulate a case-insensitive collation as closely as possible, - there are citext-specific versions of a number of string-processing + there are citext-specific versions of a number of string-processing operators and functions. So, for example, the regular expression - operators ~ and ~* exhibit the same behavior when - applied to citext: they both match case-insensitively. + operators ~ and ~* exhibit the same behavior when + applied to citext: they both match case-insensitively. The same is true - for !~ and !~*, as well as for the - LIKE operators ~~ and ~~*, and - !~~ and !~~*. If you'd like to match - case-sensitively, you can cast the operator's arguments to text. + for !~ and !~*, as well as for the + LIKE operators ~~ and ~~*, and + !~~ and !~~*. If you'd like to match + case-sensitively, you can cast the operator's arguments to text. Similarly, all of the following functions perform matching - case-insensitively if their arguments are citext: + case-insensitively if their arguments are citext: - regexp_match() + regexp_match() - regexp_matches() + regexp_matches() - regexp_replace() + regexp_replace() - regexp_split_to_array() + regexp_split_to_array() - regexp_split_to_table() + regexp_split_to_table() - replace() + replace() - split_part() + split_part() - strpos() + strpos() - translate() + translate() For the regexp functions, if you want to match case-sensitively, you can - specify the c flag to force a case-sensitive match. Otherwise, - you must cast to text before using one of these functions if + specify the c flag to force a case-sensitive match. Otherwise, + you must cast to text before using one of these functions if you want case-sensitive behavior. @@ -186,13 +186,13 @@ SELECT * FROM users WHERE nick = 'Larry'; - citext's case-folding behavior depends on - the LC_CTYPE setting of your database. How it compares + citext's case-folding behavior depends on + the LC_CTYPE setting of your database. How it compares values is therefore determined when the database is created. It is not truly case-insensitive in the terms defined by the Unicode standard. Effectively, what this means is that, as long as you're happy with your - collation, you should be happy with citext's comparisons. But + collation, you should be happy with citext's comparisons. But if you have data in different languages stored in your database, users of one language may find their query results are not as expected if the collation is for another language. @@ -201,38 +201,38 @@ SELECT * FROM users WHERE nick = 'Larry'; - As of PostgreSQL 9.1, you can attach a - COLLATE specification to citext columns or data - values. Currently, citext operators will honor a non-default - COLLATE specification while comparing case-folded strings, + As of PostgreSQL 9.1, you can attach a + COLLATE specification to citext columns or data + values. Currently, citext operators will honor a non-default + COLLATE specification while comparing case-folded strings, but the initial folding to lower case is always done according to the - database's LC_CTYPE setting (that is, as though - COLLATE "default" were given). This may be changed in a - future release so that both steps follow the input COLLATE + database's LC_CTYPE setting (that is, as though + COLLATE "default" were given). This may be changed in a + future release so that both steps follow the input COLLATE specification. - citext is not as efficient as text because the + citext is not as efficient as text because the operator functions and the B-tree comparison functions must make copies of the data and convert it to lower case for comparisons. It is, - however, slightly more efficient than using lower to get + however, slightly more efficient than using lower to get case-insensitive matching. - citext doesn't help much if you need data to compare + citext doesn't help much if you need data to compare case-sensitively in some contexts and case-insensitively in other - contexts. The standard answer is to use the text type and - manually use the lower function when you need to compare + contexts. The standard answer is to use the text type and + manually use the lower function when you need to compare case-insensitively; this works all right if case-insensitive comparison is needed only infrequently. If you need case-insensitive behavior most of the time and case-sensitive infrequently, consider storing the data - as citext and explicitly casting the column to text + as citext and explicitly casting the column to text when you want case-sensitive comparison. In either situation, you will need two indexes if you want both types of searches to be fast. @@ -240,9 +240,9 @@ SELECT * FROM users WHERE nick = 'Larry'; - The schema containing the citext operators must be - in the current search_path (typically public); - if it is not, the normal case-sensitive text operators + The schema containing the citext operators must be + in the current search_path (typically public); + if it is not, the normal case-sensitive text operators will be invoked instead. @@ -257,7 +257,7 @@ SELECT * FROM users WHERE nick = 'Larry'; - Inspired by the original citext module by Donald Fraser. + Inspired by the original citext module by Donald Fraser. diff --git a/doc/src/sgml/client-auth.sgml b/doc/src/sgml/client-auth.sgml index 819db811b2..c2114021c3 100644 --- a/doc/src/sgml/client-auth.sgml +++ b/doc/src/sgml/client-auth.sgml @@ -13,17 +13,17 @@ wants to connect as, much the same way one logs into a Unix computer as a particular user. Within the SQL environment the active database user name determines access privileges to database objects — see - for more information. Therefore, it is + for more information. Therefore, it is essential to restrict which database users can connect. - As explained in , + As explained in , PostgreSQL actually does privilege - management in terms of roles. In this chapter, we - consistently use database user to mean role with the - LOGIN privilege. + management in terms of roles. In this chapter, we + consistently use database user to mean role with the + LOGIN privilege. @@ -66,11 +66,11 @@ which traditionally is named pg_hba.conf and is stored in the database cluster's data directory. - (HBA stands for host-based authentication.) A default + (HBA stands for host-based authentication.) A default pg_hba.conf file is installed when the data directory is initialized by initdb. It is possible to place the authentication configuration file elsewhere, - however; see the configuration parameter. + however; see the configuration parameter. @@ -82,7 +82,7 @@ up of a number of fields which are separated by spaces and/or tabs. Fields can contain white space if the field value is double-quoted. Quoting one of the keywords in a database, user, or address field (e.g., - all or replication) makes the word lose its special + all or replication) makes the word lose its special meaning, and just match a database, user, or host with that name. @@ -92,8 +92,8 @@ and the authentication method to be used for connections matching these parameters. The first record with a matching connection type, client address, requested database, and user name is used to perform - authentication. There is no fall-through or - backup: if one record is chosen and the authentication + authentication. There is no fall-through or + backup: if one record is chosen and the authentication fails, subsequent records are not considered. If no record matches, access is denied. @@ -136,9 +136,9 @@ hostnossl database user Remote TCP/IP connections will not be possible unless the server is started with an appropriate value for the - configuration parameter, + configuration parameter, since the default behavior is to listen for TCP/IP connections - only on the local loopback address localhost. + only on the local loopback address localhost. @@ -157,8 +157,8 @@ hostnossl database user To make use of this option the server must be built with SSL support. Furthermore, SSL must be enabled - by setting the configuration parameter (see - for more information). + by setting the configuration parameter (see + for more information). Otherwise, the hostssl record is ignored except for logging a warning that it cannot match any connections. @@ -169,7 +169,7 @@ hostnossl database user hostnossl - This record type has the opposite behavior of hostssl; + This record type has the opposite behavior of hostssl; it only matches connection attempts made over TCP/IP that do not use SSL. @@ -182,24 +182,24 @@ hostnossl database user Specifies which database name(s) this record matches. The value all specifies that it matches all databases. - The value sameuser specifies that the record + The value sameuser specifies that the record matches if the requested database has the same name as the - requested user. The value samerole specifies that + requested user. The value samerole specifies that the requested user must be a member of the role with the same - name as the requested database. (samegroup is an - obsolete but still accepted spelling of samerole.) + name as the requested database. (samegroup is an + obsolete but still accepted spelling of samerole.) Superusers are not considered to be members of a role for the - purposes of samerole unless they are explicitly + purposes of samerole unless they are explicitly members of the role, directly or indirectly, and not just by virtue of being a superuser. - The value replication specifies that the record + The value replication specifies that the record matches if a physical replication connection is requested (note that replication connections do not specify any particular database). Otherwise, this is the name of a specific PostgreSQL database. Multiple database names can be supplied by separating them with commas. A separate file containing database names can be specified by - preceding the file name with @. + preceding the file name with @. @@ -211,18 +211,18 @@ hostnossl database user Specifies which database user name(s) this record matches. The value all specifies that it matches all users. Otherwise, this is either the name of a specific - database user, or a group name preceded by +. + database user, or a group name preceded by +. (Recall that there is no real distinction between users and groups - in PostgreSQL; a + mark really means + in PostgreSQL; a + mark really means match any of the roles that are directly or indirectly members - of this role, while a name without a + mark matches + of this role, while a name without a + mark matches only that specific role.) For this purpose, a superuser is only considered to be a member of a role if they are explicitly a member of the role, directly or indirectly, and not just by virtue of being a superuser. Multiple user names can be supplied by separating them with commas. A separate file containing user names can be specified by preceding the - file name with @. + file name with @. @@ -239,7 +239,7 @@ hostnossl database user An IP address range is specified using standard numeric notation for the range's starting address, then a slash (/) - and a CIDR mask length. The mask + and a CIDR mask length. The mask length indicates the number of high-order bits of the client IP address that must match. Bits to the right of this should be zero in the given IP address. @@ -317,10 +317,10 @@ hostnossl database user This field only applies to host, - hostssl, and hostnossl records. + hostssl, and hostnossl records. - + Users sometimes wonder why host names are handled in this seemingly complicated way, with two name resolutions @@ -350,7 +350,7 @@ hostnossl database user implementations of host name-based access control, such as the Apache HTTP Server and TCP Wrappers. - + @@ -360,17 +360,17 @@ hostnossl database user These two fields can be used as an alternative to the - IP-address/mask-length + IP-address/mask-length notation. Instead of specifying the mask length, the actual mask is specified in a - separate column. For example, 255.0.0.0 represents an IPv4 - CIDR mask length of 8, and 255.255.255.255 represents a + separate column. For example, 255.0.0.0 represents an IPv4 + CIDR mask length of 8, and 255.255.255.255 represents a CIDR mask length of 32. These fields only apply to host, - hostssl, and hostnossl records. + hostssl, and hostnossl records. @@ -381,11 +381,11 @@ hostnossl database user Specifies the authentication method to use when a connection matches this record. The possible choices are summarized here; details - are in . + are in . - trust + trust Allow the connection unconditionally. This method @@ -393,18 +393,18 @@ hostnossl database user PostgreSQL database server to login as any PostgreSQL user they wish, without the need for a password or any other authentication. See for details. + linkend="auth-trust"/> for details. - reject + reject Reject the connection unconditionally. This is useful for - filtering out certain hosts from a group, for example a - reject line could block a specific host from connecting, + filtering out certain hosts from a group, for example a + reject line could block a specific host from connecting, while a later line allows the remaining hosts in a specific network to connect. @@ -412,63 +412,63 @@ hostnossl database user - scram-sha-256 + scram-sha-256 Perform SCRAM-SHA-256 authentication to verify the user's - password. See for details. + password. See for details. - md5 + md5 Perform SCRAM-SHA-256 or MD5 authentication to verify the - user's password. See + user's password. See for details. - password + password Require the client to supply an unencrypted password for authentication. Since the password is sent in clear text over the network, this should not be used on untrusted networks. - See for details. + See for details. - gss + gss Use GSSAPI to authenticate the user. This is only available for TCP/IP connections. See for details. + linkend="gssapi-auth"/> for details. - sspi + sspi Use SSPI to authenticate the user. This is only available on Windows. See for details. + linkend="sspi-auth"/> for details. - ident + ident Obtain the operating system user name of the client @@ -477,70 +477,70 @@ hostnossl database user Ident authentication can only be used on TCP/IP connections. When specified for local connections, peer authentication will be used instead. - See for details. + See for details. - peer + peer Obtain the client's operating system user name from the operating system and check if it matches the requested database user name. This is only available for local connections. - See for details. + See for details. - ldap + ldap - Authenticate using an LDAP server. See for details. + Authenticate using an LDAP server. See for details. - radius + radius Authenticate using a RADIUS server. See for details. + linkend="auth-radius"/> for details. - cert + cert Authenticate using SSL client certificates. See - for details. + for details. - pam + pam Authenticate using the Pluggable Authentication Modules (PAM) service provided by the operating system. See for details. + linkend="auth-pam"/> for details. - bsd + bsd Authenticate using the BSD Authentication service provided by the - operating system. See for details. + operating system. See for details. @@ -554,17 +554,17 @@ hostnossl database user auth-options - After the auth-method field, there can be field(s) of - the form name=value that + After the auth-method field, there can be field(s) of + the form name=value that specify options for the authentication method. Details about which options are available for which authentication methods appear below. In addition to the method-specific options listed below, there is one - method-independent authentication option clientcert, which - can be specified in any hostssl record. When set - to 1, this option requires the client to present a valid + method-independent authentication option clientcert, which + can be specified in any hostssl record. When set + to 1, this option requires the client to present a valid (trusted) SSL certificate, in addition to the other requirements of the authentication method. @@ -574,11 +574,11 @@ hostnossl database user - Files included by @ constructs are read as lists of names, + Files included by @ constructs are read as lists of names, which can be separated by either whitespace or commas. Comments are introduced by #, just as in - pg_hba.conf, and nested @ constructs are - allowed. Unless the file name following @ is an absolute + pg_hba.conf, and nested @ constructs are + allowed. Unless the file name following @ is an absolute path, it is taken to be relative to the directory containing the referencing file. @@ -589,10 +589,10 @@ hostnossl database user significant. Typically, earlier records will have tight connection match parameters and weaker authentication methods, while later records will have looser match parameters and stronger authentication - methods. For example, one might wish to use trust + methods. For example, one might wish to use trust authentication for local TCP/IP connections but require a password for remote TCP/IP connections. In this case a record specifying - trust authentication for connections from 127.0.0.1 would + trust authentication for connections from 127.0.0.1 would appear before a record specifying password authentication for a wider range of allowed client IP addresses. @@ -603,7 +603,7 @@ hostnossl database user SIGHUPSIGHUP signal. If you edit the file on an active system, you will need to signal the postmaster - (using pg_ctl reload or kill -HUP) to make it + (using pg_ctl reload or kill -HUP) to make it re-read the file. @@ -618,7 +618,7 @@ hostnossl database user The system view pg_hba_file_rules - can be helpful for pre-testing changes to the pg_hba.conf + can be helpful for pre-testing changes to the pg_hba.conf file, or for diagnosing problems if loading of the file did not have the desired effects. Rows in the view with non-null error fields indicate problems in the @@ -629,16 +629,16 @@ hostnossl database user To connect to a particular database, a user must not only pass the pg_hba.conf checks, but must have the - CONNECT privilege for the database. If you wish to + CONNECT privilege for the database. If you wish to restrict which users can connect to which databases, it's usually - easier to control this by granting/revoking CONNECT privilege + easier to control this by granting/revoking CONNECT privilege than to put the rules in pg_hba.conf entries. Some examples of pg_hba.conf entries are shown in - . See the next section for details on the + . See the next section for details on the different authentication methods. @@ -760,21 +760,21 @@ local db1,db2,@demodbs all md5 User name maps are defined in the ident map file, which by default is named - pg_ident.confpg_ident.conf + pg_ident.confpg_ident.conf and is stored in the cluster's data directory. (It is possible to place the map file - elsewhere, however; see the + elsewhere, however; see the configuration parameter.) The ident map file contains lines of the general form: -map-name system-username database-username +map-name system-username database-username Comments and whitespace are handled in the same way as in - pg_hba.conf. The - map-name is an arbitrary name that will be used to + pg_hba.conf. The + map-name is an arbitrary name that will be used to refer to this mapping in pg_hba.conf. The other two fields specify an operating system user name and a matching - database user name. The same map-name can be + database user name. The same map-name can be used repeatedly to specify multiple user-mappings within a single map. @@ -788,13 +788,13 @@ local db1,db2,@demodbs all md5 user has requested to connect as. - If the system-username field starts with a slash (/), + If the system-username field starts with a slash (/), the remainder of the field is treated as a regular expression. - (See for details of - PostgreSQL's regular expression syntax.) The regular + (See for details of + PostgreSQL's regular expression syntax.) The regular expression can include a single capture, or parenthesized subexpression, - which can then be referenced in the database-username - field as \1 (backslash-one). This allows the mapping of + which can then be referenced in the database-username + field as \1 (backslash-one). This allows the mapping of multiple user names in a single line, which is particularly useful for simple syntax substitutions. For example, these entries @@ -802,14 +802,14 @@ mymap /^(.*)@mydomain\.com$ \1 mymap /^(.*)@otherdomain\.com$ guest will remove the domain part for users with system user names that end with - @mydomain.com, and allow any user whose system name ends with - @otherdomain.com to log in as guest. + @mydomain.com, and allow any user whose system name ends with + @otherdomain.com to log in as guest. Keep in mind that by default, a regular expression can match just part of - a string. It's usually wise to use ^ and $, as + a string. It's usually wise to use ^ and $, as shown in the above example, to force the match to be to the entire system user name. @@ -821,28 +821,28 @@ mymap /^(.*)@otherdomain\.com$ guest SIGHUPSIGHUP signal. If you edit the file on an active system, you will need to signal the postmaster - (using pg_ctl reload or kill -HUP) to make it + (using pg_ctl reload or kill -HUP) to make it re-read the file. A pg_ident.conf file that could be used in - conjunction with the pg_hba.conf file in is shown in . In this example, anyone + conjunction with the pg_hba.conf file in is shown in . In this example, anyone logged in to a machine on the 192.168 network that does not have the - operating system user name bryanh, ann, or - robert would not be granted access. Unix user - robert would only be allowed access when he tries to - connect as PostgreSQL user bob, not - as robert or anyone else. ann would - only be allowed to connect as ann. User - bryanh would be allowed to connect as either - bryanh or as guest1. + operating system user name bryanh, ann, or + robert would not be granted access. Unix user + robert would only be allowed access when he tries to + connect as PostgreSQL user bob, not + as robert or anyone else. ann would + only be allowed to connect as ann. User + bryanh would be allowed to connect as either + bryanh or as guest1. - An Example <filename>pg_ident.conf</> File + An Example <filename>pg_ident.conf</filename> File # MAPNAME SYSTEM-USERNAME PG-USERNAME @@ -859,33 +859,34 @@ omicron bryanh guest1 Authentication Methods - The following subsections describe the authentication methods in more detail. + The following sections describe the authentication methods in more detail. + - + Trust Authentication - When trust authentication is specified, + When trust authentication is specified, PostgreSQL assumes that anyone who can connect to the server is authorized to access the database with whatever database user name they specify (even superuser names). - Of course, restrictions made in the database and - user columns still apply. + Of course, restrictions made in the database and + user columns still apply. This method should only be used when there is adequate operating-system-level protection on connections to the server. - trust authentication is appropriate and very + trust authentication is appropriate and very convenient for local connections on a single-user workstation. It - is usually not appropriate by itself on a multiuser - machine. However, you might be able to use trust even + is usually not appropriate by itself on a multiuser + machine. However, you might be able to use trust even on a multiuser machine, if you restrict access to the server's Unix-domain socket file using file-system permissions. To do this, set the unix_socket_permissions (and possibly unix_socket_group) configuration parameters as - described in . Or you + described in . Or you could set the unix_socket_directories configuration parameter to place the socket file in a suitably restricted directory. @@ -895,26 +896,29 @@ omicron bryanh guest1 Setting file-system permissions only helps for Unix-socket connections. Local TCP/IP connections are not restricted by file-system permissions. Therefore, if you want to use file-system permissions for local security, - remove the host ... 127.0.0.1 ... line from - pg_hba.conf, or change it to a - non-trust authentication method. + remove the host ... 127.0.0.1 ... line from + pg_hba.conf, or change it to a + non-trust authentication method. - trust authentication is only suitable for TCP/IP connections + trust authentication is only suitable for TCP/IP connections if you trust every user on every machine that is allowed to connect - to the server by the pg_hba.conf lines that specify - trust. It is seldom reasonable to use trust - for any TCP/IP connections other than those from localhost (127.0.0.1). + to the server by the pg_hba.conf lines that specify + trust. It is seldom reasonable to use trust + for any TCP/IP connections other than those from localhost (127.0.0.1). - + - + Password Authentication - MD5 + MD5 + + + SCRAM password @@ -922,56 +926,122 @@ omicron bryanh guest1 - The password-based authentication methods are scram-sha-256, - md5, and password. These methods operate - similarly except for the way that the password is sent across the + There are several password-based authentication methods. These methods + operate similarly but differ in how the users' passwords are stored on the + server and how the password provided by a client is sent across the connection. - - Plain password sends the password in clear-text, and is - therefore vulnerable to password sniffing attacks. It should - always be avoided if possible. If the connection is protected by SSL - encryption then password can be used safely, though. - (Though SSL certificate authentication might be a better choice if one - is depending on using SSL). - + + + scram-sha-256 + + + The method scram-sha-256 performs SCRAM-SHA-256 + authentication, as described in + RFC 7677. It + is a challenge-response scheme that prevents password sniffing on + untrusted connections and supports storing passwords on the server in a + cryptographically hashed form that is thought to be secure. + + + This is the most secure of the currently provided methods, but it is + not supported by older client libraries. + + + - - scram-sha-256 performs SCRAM-SHA-256 authentication, as - described in - RFC5802. It - is a challenge-response scheme, that prevents password sniffing on - untrusted connections. It is more secure than the md5 - method, but might not be supported by older clients. - + + md5 + + + The method md5 uses a custom less secure challenge-response + mechanism. It prevents password sniffing and avoids storing passwords + on the server in plain text but provides no protection if an attacker + manages to steal the password hash from the server. Also, the MD5 hash + algorithm is nowadays no longer considered secure against determined + attacks. + - - md5 allows falling back to a less secure challenge-response - mechanism for those users with an MD5 hashed password. - The fallback mechanism also prevents password sniffing, but provides no - protection if an attacker manages to steal the password hash from the - server, and it cannot be used with the feature. For all other users, - md5 works the same as scram-sha-256. - + + The md5 method cannot be used with + the feature. + + + + To ease transition from the md5 method to the newer + SCRAM method, if md5 is specified as a method + in pg_hba.conf but the user's password on the + server is encrypted for SCRAM (see below), then SCRAM-based + authentication will automatically be chosen instead. + + + + + + password + + + The method password sends the password in clear-text and is + therefore vulnerable to password sniffing attacks. It should + always be avoided if possible. If the connection is protected by SSL + encryption then password can be used safely, though. + (Though SSL certificate authentication might be a better choice if one + is depending on using SSL). + + + + PostgreSQL database passwords are separate from operating system user passwords. The password for - each database user is stored in the pg_authid system + each database user is stored in the pg_authid system catalog. Passwords can be managed with the SQL commands - and - , - e.g., CREATE USER foo WITH PASSWORD 'secret'. + and + , + e.g., CREATE ROLE foo WITH LOGIN PASSWORD 'secret', + or the psql + command \password. If no password has been set up for a user, the stored password is null and password authentication will always fail for that user. - + + The availability of the different password-based authentication methods + depends on how a user's password on the server is encrypted (or hashed, + more accurately). This is controlled by the configuration + parameter at the time the + password is set. If a password was encrypted using + the scram-sha-256 setting, then it can be used for the + authentication methods scram-sha-256 + and password (but password transmission will be in + plain text in the latter case). The authentication method + specification md5 will automatically switch to using + the scram-sha-256 method in this case, as explained + above, so it will also work. If a password was encrypted using + the md5 setting, then it can be used only for + the md5 and password authentication + method specifications (again, with the password transmitted in plain text + in the latter case). (Previous PostgreSQL releases supported storing the + password on the server in plain text. This is no longer possible.) To + check the currently stored password hashes, see the system + catalog pg_authid. + + + + To upgrade an existing installation from md5 + to scram-sha-256, after having ensured that all client + libraries in use are new enough to support SCRAM, + set password_encryption = 'scram-sha-256' + in postgresql.conf, make all users set new passwords, + and change the authentication method specifications + in pg_hba.conf to scram-sha-256. + + - + GSSAPI Authentication @@ -991,21 +1061,21 @@ omicron bryanh guest1 - GSSAPI support has to be enabled when PostgreSQL is built; - see for more information. + GSSAPI support has to be enabled when PostgreSQL is built; + see for more information. When GSSAPI uses Kerberos, it uses a standard principal in the format - servicename/hostname@realm. + servicename/hostname@realm. The PostgreSQL server will accept any principal that is included in the keytab used by the server, but care needs to be taken to specify the correct principal details when - making the connection from the client using the krbsrvname connection parameter. (See - also .) The installation default can be + making the connection from the client using the krbsrvname connection parameter. (See + also .) The installation default can be changed from the default postgres at build time using - ./configure --with-krb-srvnam=whatever. + ./configure --with-krb-srvnam=whatever. In most environments, this parameter never needs to be changed. Some Kerberos implementations might require a different service name, @@ -1013,42 +1083,42 @@ omicron bryanh guest1 to be in upper case (POSTGRES). - hostname is the fully qualified host name of the + hostname is the fully qualified host name of the server machine. The service principal's realm is the preferred realm of the server machine. - Client principals can be mapped to different PostgreSQL - database user names with pg_ident.conf. For example, - pgusername@realm could be mapped to just pgusername. - Alternatively, you can use the full username@realm principal as - the role name in PostgreSQL without any mapping. + Client principals can be mapped to different PostgreSQL + database user names with pg_ident.conf. For example, + pgusername@realm could be mapped to just pgusername. + Alternatively, you can use the full username@realm principal as + the role name in PostgreSQL without any mapping. - PostgreSQL also supports a parameter to strip the realm from + PostgreSQL also supports a parameter to strip the realm from the principal. This method is supported for backwards compatibility and is strongly discouraged as it is then impossible to distinguish different users with the same user name but coming from different realms. To enable this, - set include_realm to 0. For simple single-realm + set include_realm to 0. For simple single-realm installations, doing that combined with setting the - krb_realm parameter (which checks that the principal's realm + krb_realm parameter (which checks that the principal's realm matches exactly what is in the krb_realm parameter) is still secure; but this is a less capable approach compared to specifying an explicit mapping in - pg_ident.conf. + pg_ident.conf. Make sure that your server keytab file is readable (and preferably only readable, not writable) by the PostgreSQL - server account. (See also .) The location + server account. (See also .) The location of the key file is specified by the configuration + linkend="guc-krb-server-keyfile"/> configuration parameter. The default is - /usr/local/pgsql/etc/krb5.keytab (or whatever - directory was specified as sysconfdir at build time). + /usr/local/pgsql/etc/krb5.keytab (or whatever + directory was specified as sysconfdir at build time). For security reasons, it is recommended to use a separate keytab just for the PostgreSQL server rather than opening up permissions on the system keytab file. @@ -1058,18 +1128,18 @@ omicron bryanh guest1 Kerberos documentation for details. The following example is for MIT-compatible Kerberos 5 implementations: -kadmin% ank -randkey postgres/server.my.domain.org -kadmin% ktadd -k krb5.keytab postgres/server.my.domain.org +kadmin% ank -randkey postgres/server.my.domain.org +kadmin% ktadd -k krb5.keytab postgres/server.my.domain.org When connecting to the database make sure you have a ticket for a principal matching the requested database user name. For example, for - database user name fred, principal - fred@EXAMPLE.COM would be able to connect. To also allow - principal fred/users.example.com@EXAMPLE.COM, use a user name - map, as described in . + database user name fred, principal + fred@EXAMPLE.COM would be able to connect. To also allow + principal fred/users.example.com@EXAMPLE.COM, use a user name + map, as described in . @@ -1081,13 +1151,13 @@ omicron bryanh guest1 If set to 0, the realm name from the authenticated user principal is stripped off before being passed through the user name mapping - (). This is discouraged and is + (). This is discouraged and is primarily available for backwards compatibility, as it is not secure in multi-realm environments unless krb_realm is also used. It is recommended to leave include_realm set to the default (1) and to - provide an explicit mapping in pg_ident.conf to convert - principal names to PostgreSQL user names. + provide an explicit mapping in pg_ident.conf to convert + principal names to PostgreSQL user names. @@ -1097,7 +1167,7 @@ omicron bryanh guest1 Allows for mapping between system and database user names. See - for details. For a GSSAPI/Kerberos + for details. For a GSSAPI/Kerberos principal, such as username@EXAMPLE.COM (or, less commonly, username/hostbased@EXAMPLE.COM), the user name used for mapping is @@ -1123,9 +1193,9 @@ omicron bryanh guest1 - + - + SSPI Authentication @@ -1148,7 +1218,7 @@ omicron bryanh guest1 When using Kerberos authentication, SSPI works the same way - GSSAPI does; see + GSSAPI does; see for details. @@ -1162,13 +1232,13 @@ omicron bryanh guest1 If set to 0, the realm name from the authenticated user principal is stripped off before being passed through the user name mapping - (). This is discouraged and is + (). This is discouraged and is primarily available for backwards compatibility, as it is not secure in multi-realm environments unless krb_realm is also used. It is recommended to leave include_realm set to the default (1) and to - provide an explicit mapping in pg_ident.conf to convert - principal names to PostgreSQL user names. + provide an explicit mapping in pg_ident.conf to convert + principal names to PostgreSQL user names. @@ -1201,9 +1271,9 @@ omicron bryanh guest1 By default, these two names are identical for new user accounts. - Note that libpq uses the SAM-compatible name if no + Note that libpq uses the SAM-compatible name if no explicit user name is specified. If you use - libpq or a driver based on it, you should + libpq or a driver based on it, you should leave this option disabled or explicitly specify user name in the connection string. @@ -1215,7 +1285,7 @@ omicron bryanh guest1 Allows for mapping between system and database user names. See - for details. For a SSPI/Kerberos + for details. For a SSPI/Kerberos principal, such as username@EXAMPLE.COM (or, less commonly, username/hostbased@EXAMPLE.COM), the user name used for mapping is @@ -1241,9 +1311,9 @@ omicron bryanh guest1 - + - + Ident Authentication @@ -1260,7 +1330,7 @@ omicron bryanh guest1 When ident is specified for a local (non-TCP/IP) connection, - peer authentication (see ) will be + peer authentication (see ) will be used instead. @@ -1273,7 +1343,7 @@ omicron bryanh guest1 Allows for mapping between system and database user names. See - for details. + for details. @@ -1288,8 +1358,8 @@ omicron bryanh guest1 is to answer questions like What user initiated the connection that goes out of your port X and connects to my port Y?. - Since PostgreSQL knows both X and - Y when a physical connection is established, it + Since PostgreSQL knows both X and + Y when a physical connection is established, it can interrogate the ident server on the host of the connecting client and can theoretically determine the operating system user for any given connection. @@ -1317,14 +1387,14 @@ omicron bryanh guest1 Some ident servers have a nonstandard option that causes the returned user name to be encrypted, using a key that only the originating - machine's administrator knows. This option must not be - used when using the ident server with PostgreSQL, - since PostgreSQL does not have any way to decrypt the + machine's administrator knows. This option must not be + used when using the ident server with PostgreSQL, + since PostgreSQL does not have any way to decrypt the returned string to determine the actual user name. - + - + Peer Authentication @@ -1346,7 +1416,7 @@ omicron bryanh guest1 Allows for mapping between system and database user names. See - for details. + for details. @@ -1355,17 +1425,17 @@ omicron bryanh guest1 Peer authentication is only available on operating systems providing - the getpeereid() function, the SO_PEERCRED + the getpeereid() function, the SO_PEERCRED socket parameter, or similar mechanisms. Currently that includes - Linux, - most flavors of BSD including - macOS, + Linux, + most flavors of BSD including + macOS, and Solaris. - + - + LDAP Authentication @@ -1385,23 +1455,23 @@ omicron bryanh guest1 LDAP authentication can operate in two modes. In the first mode, which we will call the simple bind mode, the server will bind to the distinguished name constructed as - prefix username suffix. - Typically, the prefix parameter is used to specify - cn=, or DOMAIN\ in an Active - Directory environment. suffix is used to specify the + prefix username suffix. + Typically, the prefix parameter is used to specify + cn=, or DOMAIN\ in an Active + Directory environment. suffix is used to specify the remaining part of the DN in a non-Active Directory environment. In the second mode, which we will call the search+bind mode, the server first binds to the LDAP directory with - a fixed user name and password, specified with ldapbinddn - and ldapbindpasswd, and performs a search for the user trying + a fixed user name and password, specified with ldapbinddn + and ldapbindpasswd, and performs a search for the user trying to log in to the database. If no user and password is configured, an anonymous bind will be attempted to the directory. The search will be - performed over the subtree at ldapbasedn, and will try to + performed over the subtree at ldapbasedn, and will try to do an exact match of the attribute specified in - ldapsearchattribute. + ldapsearchattribute. Once the user has been found in this search, the server disconnects and re-binds to the directory as this user, using the password specified by the client, to verify that the @@ -1433,19 +1503,40 @@ omicron bryanh guest1 + + ldapscheme + + + Set to ldaps to use LDAPS. This is a non-standard + way of using LDAP over SSL, supported by some LDAP server + implementations. See also the ldaptls option for + an alternative. + + + ldaptls - Set to 1 to make the connection between PostgreSQL and the - LDAP server use TLS encryption. Note that this only encrypts - the traffic to the LDAP server — the connection to the client - will still be unencrypted unless SSL is used. + Set to 1 to make the connection between PostgreSQL and the LDAP server + use TLS encryption. This uses the StartTLS + operation per RFC 4513. See also the ldapscheme + option for an alternative. + + + Note that using ldapscheme or + ldaptls only encrypts the traffic between the + PostgreSQL server and the LDAP server. The connection between the + PostgreSQL server and the PostgreSQL client will still be unencrypted + unless SSL is used there as well. + + + The following options are used in simple bind mode only: @@ -1467,7 +1558,9 @@ omicron bryanh guest1 + + The following options are used in search+bind mode only: @@ -1503,7 +1596,18 @@ omicron bryanh guest1 Attribute to match against the user name in the search when doing search+bind authentication. If no attribute is specified, the - uid attribute will be used. + uid attribute will be used. + + + + + ldapsearchfilter + + + The search filter to use when doing search+bind authentication. + Occurrences of $username will be replaced with the + user name. This allows for more flexible search filters than + ldapsearchattribute. @@ -1514,26 +1618,33 @@ omicron bryanh guest1 An RFC 4516 LDAP URL. This is an alternative way to write some of the other LDAP options in a more compact and standard form. The format is -ldap://host[:port]/basedn[?[attribute][?[scope]]] +ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]] scope must be one of base, one, sub, - typically the latter. Only one attribute is used, and some other - components of standard LDAP URLs such as filters and extensions are - not supported. + typically the last. (The default is base, which + is normally not useful in this application.) attribute can + nominate a single attribute, in which case it is used as a value for + ldapsearchattribute. If + attribute is empty then + filter can be used as a value for + ldapsearchfilter. - For non-anonymous binds, ldapbinddn - and ldapbindpasswd must be specified as separate - options. + The URL scheme ldaps chooses the LDAPS method for + making LDAP connections over SSL, equivalent to using + ldapscheme=ldaps. To use encrypted LDAP + connections using the StartTLS operation, use the + normal URL scheme ldap and specify the + ldaptls option in addition to + ldapurl. - To use encrypted LDAP connections, the ldaptls - option has to be used in addition to ldapurl. - The ldaps URL scheme (direct SSL connection) is not - supported. + For non-anonymous binds, ldapbinddn + and ldapbindpasswd must be specified as separate + options. @@ -1549,6 +1660,17 @@ ldap://host[:port]/ + + When using search+bind mode, the search can be performed using a single + attribute specified with ldapsearchattribute, or using + a custom search filter specified with + ldapsearchfilter. + Specifying ldapsearchattribute=foo is equivalent to + specifying ldapsearchfilter="(foo=$username)". If neither + option is specified the default is + ldapsearchattribute=uid. + + Here is an example for a simple-bind LDAP configuration: @@ -1584,6 +1706,16 @@ host ... ldap ldapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" same URL format, so it will be easier to share the configuration. + + Here is an example for a search+bind configuration that uses + ldapsearchfilter instead of + ldapsearchattribute to allow authentication by + user ID or email address: + +host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapsearchfilter="(|(uid=$username)(mail=$username))" + + + Since LDAP often uses commas and spaces to separate the different @@ -1592,9 +1724,9 @@ host ... ldap ldapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" - + - + RADIUS Authentication @@ -1614,11 +1746,11 @@ host ... ldap ldapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" When using RADIUS authentication, an Access Request message will be sent to the configured RADIUS server. This request will be of type Authenticate Only, and include parameters for - user name, password (encrypted) and - NAS Identifier. The request will be encrypted using + user name, password (encrypted) and + NAS Identifier. The request will be encrypted using a secret shared with the server. The RADIUS server will respond to - this server with either Access Accept or - Access Reject. There is no support for RADIUS accounting. + this server with either Access Accept or + Access Reject. There is no support for RADIUS accounting. @@ -1657,8 +1789,8 @@ host ... ldap ldapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" The encryption vector used will only be cryptographically - strong if PostgreSQL is built with support for - OpenSSL. In other cases, the transmission to the + strong if PostgreSQL is built with support for + OpenSSL. In other cases, the transmission to the RADIUS server should only be considered obfuscated, not secured, and external security measures should be applied if necessary. @@ -1672,7 +1804,7 @@ host ... ldap ldapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" The port number on the RADIUS servers to connect to. If no port - is specified, the default port 1812 will be used. + is specified, the default port 1812 will be used. @@ -1681,21 +1813,21 @@ host ... ldap ldapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" radiusidentifiers - The string used as NAS Identifier in the RADIUS + The string used as NAS Identifier in the RADIUS requests. This parameter can be used as a second parameter identifying for example which database user the user is attempting to authenticate as, which can be used for policy matching on the RADIUS server. If no identifier is specified, the default - postgresql will be used. + postgresql will be used. - + - + Certificate Authentication @@ -1723,7 +1855,7 @@ host ... ldap ldapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" Allows for mapping between system and database user names. See - for details. + for details. @@ -1731,17 +1863,17 @@ host ... ldap ldapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" - In a pg_hba.conf record specifying certificate - authentication, the authentication option clientcert is - assumed to be 1, and it cannot be turned off since a client - certificate is necessary for this method. What the cert - method adds to the basic clientcert certificate validity test + In a pg_hba.conf record specifying certificate + authentication, the authentication option clientcert is + assumed to be 1, and it cannot be turned off since a client + certificate is necessary for this method. What the cert + method adds to the basic clientcert certificate validity test is a check that the cn attribute matches the database user name. - + - + PAM Authentication @@ -1757,8 +1889,8 @@ host ... ldap ldapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" connected remote host name or IP address. Therefore the user must already exist in the database before PAM can be used for authentication. For more information about PAM, please read the - - Linux-PAM Page. + + Linux-PAM Page. @@ -1791,15 +1923,15 @@ host ... ldap ldapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" - If PAM is set up to read /etc/shadow, authentication + If PAM is set up to read /etc/shadow, authentication will fail because the PostgreSQL server is started by a non-root user. However, this is not an issue when PAM is configured to use LDAP or other authentication methods. - + - + BSD Authentication @@ -1817,11 +1949,11 @@ host ... ldap ldapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" - BSD Authentication in PostgreSQL uses + BSD Authentication in PostgreSQL uses the auth-postgresql login type and authenticates with the postgresql login class if that's defined in login.conf. By default that login class does not - exist, and PostgreSQL will use the default login class. + exist, and PostgreSQL will use the default login class. @@ -1832,8 +1964,7 @@ host ... ldap ldapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" exists by default on OpenBSD systems. - - + Authentication Problems diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 2b6255ed95..0f8f2ef920 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -66,13 +66,13 @@ Numeric with Unit: Some numeric parameters have an implicit unit, because they describe - quantities of memory or time. The unit might be kilobytes, blocks + quantities of memory or time. The unit might be bytes, kilobytes, blocks (typically eight kilobytes), milliseconds, seconds, or minutes. An unadorned numeric value for one of these settings will use the setting's default unit, which can be learned from - pg_settings.unit. + pg_settings.unit. For convenience, settings can be given with a unit specified explicitly, - for example '120 ms' for a time value, and they will be + for example '120 ms' for a time value, and they will be converted to whatever the parameter's actual unit is. Note that the value must be written as a string (with quotes) to use this feature. The unit name is case-sensitive, and there can be whitespace between @@ -81,7 +81,8 @@ - Valid memory units are kB (kilobytes), + Valid memory units are B (bytes), + kB (kilobytes), MB (megabytes), GB (gigabytes), and TB (terabytes). The multiplier for memory units is 1024, not 1000. @@ -105,7 +106,7 @@ Enumerated-type parameters are written in the same way as string parameters, but are restricted to have one of a limited set of values. The values allowable for such a parameter can be found from - pg_settings.enumvals. + pg_settings.enumvals. Enum parameter values are case-insensitive. @@ -117,7 +118,7 @@ The most fundamental way to set these parameters is to edit the file - postgresql.confpostgresql.conf, + postgresql.confpostgresql.conf, which is normally kept in the data directory. A default copy is installed when the database cluster directory is initialized. An example of what this file might look like is: @@ -150,8 +151,8 @@ shared_buffers = 128MB SIGHUP The configuration file is reread whenever the main server process - receives a SIGHUP signal; this signal is most easily - sent by running pg_ctl reload from the command line or by + receives a SIGHUP signal; this signal is most easily + sent by running pg_ctl reload from the command line or by calling the SQL function pg_reload_conf(). The main server process also propagates this signal to all currently running server processes, so that existing sessions also adopt the new values @@ -161,26 +162,26 @@ shared_buffers = 128MB can only be set at server start; any changes to their entries in the configuration file will be ignored until the server is restarted. Invalid parameter settings in the configuration file are likewise - ignored (but logged) during SIGHUP processing. + ignored (but logged) during SIGHUP processing. - In addition to postgresql.conf, + In addition to postgresql.conf, a PostgreSQL data directory contains a file - postgresql.auto.confpostgresql.auto.conf, - which has the same format as postgresql.conf but should + postgresql.auto.confpostgresql.auto.conf, + which has the same format as postgresql.conf but should never be edited manually. This file holds settings provided through - the command. This file is automatically - read whenever postgresql.conf is, and its settings take - effect in the same way. Settings in postgresql.auto.conf - override those in postgresql.conf. + the command. This file is automatically + read whenever postgresql.conf is, and its settings take + effect in the same way. Settings in postgresql.auto.conf + override those in postgresql.conf. The system view pg_file_settings can be helpful for pre-testing changes to the configuration file, or for - diagnosing problems if a SIGHUP signal did not have the + diagnosing problems if a SIGHUP signal did not have the desired effects. @@ -191,9 +192,9 @@ shared_buffers = 128MB PostgreSQL provides three SQL commands to establish configuration defaults. - The already-mentioned command + The already-mentioned command provides a SQL-accessible means of changing global defaults; it is - functionally equivalent to editing postgresql.conf. + functionally equivalent to editing postgresql.conf. In addition, there are two commands that allow setting of defaults on a per-database or per-role basis: @@ -201,21 +202,21 @@ shared_buffers = 128MB - The command allows global + The command allows global settings to be overridden on a per-database basis. - The command allows both global and + The command allows both global and per-database settings to be overridden with user-specific values. - Values set with ALTER DATABASE and ALTER ROLE + Values set with ALTER DATABASE and ALTER ROLE are applied only when starting a fresh database session. They override values obtained from the configuration files or server command line, and constitute defaults for the rest of the session. @@ -224,7 +225,7 @@ shared_buffers = 128MB - Once a client is connected to the database, PostgreSQL + Once a client is connected to the database, PostgreSQL provides two additional SQL commands (and equivalent functions) to interact with session-local configuration settings: @@ -232,7 +233,7 @@ shared_buffers = 128MB - The command allows inspection of the + The command allows inspection of the current value of all parameters. The corresponding function is current_setting(setting_name text). @@ -240,7 +241,7 @@ shared_buffers = 128MB - The command allows modification of the + The command allows modification of the current value of those parameters that can be set locally to a session; it has no effect on other sessions. The corresponding function is @@ -251,14 +252,14 @@ shared_buffers = 128MB In addition, the system view pg_settings can be + linkend="view-pg-settings">pg_settings can be used to view and change session-local values: - Querying this view is similar to using SHOW ALL but + Querying this view is similar to using SHOW ALL but provides more detail. It is also more flexible, since it's possible to specify filter conditions or join against other relations. @@ -266,9 +267,9 @@ shared_buffers = 128MB - Using on this view, specifically - updating the setting column, is the equivalent - of issuing SET commands. For example, the equivalent of + Using on this view, specifically + updating the setting column, is the equivalent + of issuing SET commands. For example, the equivalent of SET configuration_parameter TO DEFAULT; @@ -289,7 +290,7 @@ UPDATE pg_settings SET setting = reset_val WHERE name = 'configuration_parameter In addition to setting global defaults or attaching overrides at the database or role level, you can pass settings to PostgreSQL via shell facilities. - Both the server and libpq client library + Both the server and libpq client library accept parameter values via the shell. @@ -298,26 +299,26 @@ UPDATE pg_settings SET setting = reset_val WHERE name = 'configuration_parameter During server startup, parameter settings can be passed to the postgres command via the - command-line parameter. For example, postgres -c log_connections=yes -c log_destination='syslog' Settings provided in this way override those set via - postgresql.conf or ALTER SYSTEM, + postgresql.conf or ALTER SYSTEM, so they cannot be changed globally without restarting the server. - When starting a client session via libpq, + When starting a client session via libpq, parameter settings can be specified using the PGOPTIONS environment variable. Settings established in this way constitute defaults for the life of the session, but do not affect other sessions. For historical reasons, the format of PGOPTIONS is similar to that used when launching the postgres - command; specifically, the flag must be specified. For example, env PGOPTIONS="-c geqo=off -c statement_timeout=5min" psql @@ -338,20 +339,20 @@ env PGOPTIONS="-c geqo=off -c statement_timeout=5min" psql Managing Configuration File Contents - PostgreSQL provides several features for breaking - down complex postgresql.conf files into sub-files. + PostgreSQL provides several features for breaking + down complex postgresql.conf files into sub-files. These features are especially useful when managing multiple servers with related, but not identical, configurations. - include + include in configuration file In addition to individual parameter settings, - the postgresql.conf file can contain include - directives, which specify another file to read and process as if + the postgresql.conf file can contain include + directives, which specify another file to read and process as if it were inserted into the configuration file at this point. This feature allows a configuration file to be divided into physically separate parts. Include directives simply look like: @@ -365,23 +366,23 @@ include 'filename' - include_if_exists + include_if_exists in configuration file - There is also an include_if_exists directive, which acts - the same as the include directive, except + There is also an include_if_exists directive, which acts + the same as the include directive, except when the referenced file does not exist or cannot be read. A regular - include will consider this an error condition, but - include_if_exists merely logs a message and continues + include will consider this an error condition, but + include_if_exists merely logs a message and continues processing the referencing configuration file. - include_dir + include_dir in configuration file - The postgresql.conf file can also contain + The postgresql.conf file can also contain include_dir directives, which specify an entire directory of configuration files to include. These look like @@ -401,36 +402,36 @@ include_dir 'directory' Include files or directories can be used to logically separate portions of the database configuration, rather than having a single large - postgresql.conf file. Consider a company that has two + postgresql.conf file. Consider a company that has two database servers, each with a different amount of memory. There are likely elements of the configuration both will share, for things such as logging. But memory-related parameters on the server will vary between the two. And there might be server specific customizations, too. One way to manage this situation is to break the custom configuration changes for your site into three files. You could add - this to the end of your postgresql.conf file to include + this to the end of your postgresql.conf file to include them: include 'shared.conf' include 'memory.conf' include 'server.conf' - All systems would have the same shared.conf. Each + All systems would have the same shared.conf. Each server with a particular amount of memory could share the - same memory.conf; you might have one for all servers + same memory.conf; you might have one for all servers with 8GB of RAM, another for those having 16GB. And - finally server.conf could have truly server-specific + finally server.conf could have truly server-specific configuration information in it. Another possibility is to create a configuration file directory and - put this information into files there. For example, a conf.d - directory could be referenced at the end of postgresql.conf: + put this information into files there. For example, a conf.d + directory could be referenced at the end of postgresql.conf: include_dir 'conf.d' - Then you could name the files in the conf.d directory + Then you could name the files in the conf.d directory like this: 00shared.conf @@ -441,8 +442,8 @@ include_dir 'conf.d' files will be loaded. This is important because only the last setting encountered for a particular parameter while the server is reading configuration files will be used. In this example, - something set in conf.d/02server.conf would override a - value set in conf.d/01memory.conf. + something set in conf.d/02server.conf would override a + value set in conf.d/01memory.conf. @@ -470,7 +471,7 @@ include_dir 'conf.d' already mentioned, PostgreSQL uses two other manually-edited configuration files, which control client authentication (their use is discussed in ). By default, all three + linkend="client-authentication"/>). By default, all three configuration files are stored in the database cluster's data directory. The parameters described in this section allow the configuration files to be placed elsewhere. (Doing so can ease @@ -483,7 +484,7 @@ include_dir 'conf.d' data_directory (string) - data_directory configuration parameter + data_directory configuration parameter @@ -497,13 +498,13 @@ include_dir 'conf.d' config_file (string) - config_file configuration parameter + config_file configuration parameter Specifies the main server configuration file - (customarily called postgresql.conf). + (customarily called postgresql.conf). This parameter can only be set on the postgres command line. @@ -512,13 +513,13 @@ include_dir 'conf.d' hba_file (string) - hba_file configuration parameter + hba_file configuration parameter Specifies the configuration file for host-based authentication - (customarily called pg_hba.conf). + (customarily called pg_hba.conf). This parameter can only be set at server start. @@ -527,15 +528,15 @@ include_dir 'conf.d' ident_file (string) - ident_file configuration parameter + ident_file configuration parameter Specifies the configuration file for user name mapping - (customarily called pg_ident.conf). + (customarily called pg_ident.conf). This parameter can only be set at server start. - See also . + See also . @@ -543,7 +544,7 @@ include_dir 'conf.d' external_pid_file (string) - external_pid_file configuration parameter + external_pid_file configuration parameter @@ -569,10 +570,10 @@ include_dir 'conf.d' data directory, the postgres command-line option or PGDATA environment variable must point to the directory containing the configuration files, - and the data_directory parameter must be set in + and the data_directory parameter must be set in postgresql.conf (or on the command line) to show where the data directory is actually located. Notice that - data_directory overrides and + data_directory overrides and PGDATA for the location of the data directory, but not for the location of the configuration files. @@ -580,12 +581,12 @@ include_dir 'conf.d' If you wish, you can specify the configuration file names and locations - individually using the parameters config_file, - hba_file and/or ident_file. - config_file can only be specified on the + individually using the parameters config_file, + hba_file and/or ident_file. + config_file can only be specified on the postgres command line, but the others can be set within the main configuration file. If all three parameters plus - data_directory are explicitly set, then it is not necessary + data_directory are explicitly set, then it is not necessary to specify or PGDATA. @@ -607,7 +608,7 @@ include_dir 'conf.d' listen_addresses (string) - listen_addresses configuration parameter + listen_addresses configuration parameter @@ -615,17 +616,17 @@ include_dir 'conf.d' Specifies the TCP/IP address(es) on which the server is to listen for connections from client applications. The value takes the form of a comma-separated list of host names - and/or numeric IP addresses. The special entry * + and/or numeric IP addresses. The special entry * corresponds to all available IP interfaces. The entry - 0.0.0.0 allows listening for all IPv4 addresses and - :: allows listening for all IPv6 addresses. + 0.0.0.0 allows listening for all IPv4 addresses and + :: allows listening for all IPv6 addresses. If the list is empty, the server does not listen on any IP interface at all, in which case only Unix-domain sockets can be used to connect to it. - The default value is localhost, - which allows only local TCP/IP loopback connections to be + The default value is localhost, + which allows only local TCP/IP loopback connections to be made. While client authentication () allows fine-grained control + linkend="client-authentication"/>) allows fine-grained control over who can access the server, listen_addresses controls which interfaces accept connection attempts, which can help prevent repeated malicious connection requests on @@ -638,7 +639,7 @@ include_dir 'conf.d' port (integer) - port configuration parameter + port configuration parameter @@ -653,7 +654,7 @@ include_dir 'conf.d' max_connections (integer) - max_connections configuration parameter + max_connections configuration parameter @@ -661,7 +662,7 @@ include_dir 'conf.d' Determines the maximum number of concurrent connections to the database server. The default is typically 100 connections, but might be less if your kernel settings will not support it (as - determined during initdb). This parameter can + determined during initdb). This parameter can only be set at server start. @@ -678,17 +679,17 @@ include_dir 'conf.d' superuser_reserved_connections (integer) - superuser_reserved_connections configuration parameter + superuser_reserved_connections configuration parameter Determines the number of connection slots that - are reserved for connections by PostgreSQL - superusers. At most + are reserved for connections by PostgreSQL + superusers. At most connections can ever be active simultaneously. Whenever the number of active concurrent connections is at least - max_connections minus + max_connections minus superuser_reserved_connections, new connections will be accepted only for superusers, and no new replication connections will be accepted. @@ -696,8 +697,9 @@ include_dir 'conf.d' The default value is three connections. The value must be less - than the value of max_connections. This - parameter can only be set at server start. + than max_connections minus + . + This parameter can only be set at server start. @@ -705,7 +707,7 @@ include_dir 'conf.d' unix_socket_directories (string) - unix_socket_directories configuration parameter + unix_socket_directories configuration parameter @@ -726,10 +728,10 @@ include_dir 'conf.d' In addition to the socket file itself, which is named - .s.PGSQL.nnnn where - nnnn is the server's port number, an ordinary file - named .s.PGSQL.nnnn.lock will be - created in each of the unix_socket_directories directories. + .s.PGSQL.nnnn where + nnnn is the server's port number, an ordinary file + named .s.PGSQL.nnnn.lock will be + created in each of the unix_socket_directories directories. Neither file should ever be removed manually. @@ -743,7 +745,7 @@ include_dir 'conf.d' unix_socket_group (string) - unix_socket_group configuration parameter + unix_socket_group configuration parameter @@ -768,7 +770,7 @@ include_dir 'conf.d' unix_socket_permissions (integer) - unix_socket_permissions configuration parameter + unix_socket_permissions configuration parameter @@ -794,7 +796,7 @@ include_dir 'conf.d' This access control mechanism is independent of the one - described in . + described in . @@ -804,7 +806,7 @@ include_dir 'conf.d' This parameter is irrelevant on systems, notably Solaris as of Solaris 10, that ignore socket permissions entirely. There, one can achieve a - similar effect by pointing unix_socket_directories to a + similar effect by pointing unix_socket_directories to a directory having search permission limited to the desired audience. This parameter is also irrelevant on Windows, which does not have Unix-domain sockets. @@ -815,7 +817,7 @@ include_dir 'conf.d' bonjour (boolean) - bonjour configuration parameter + bonjour configuration parameter @@ -830,14 +832,14 @@ include_dir 'conf.d' bonjour_name (string) - bonjour_name configuration parameter + bonjour_name configuration parameter Specifies the Bonjour service name. The computer name is used if this parameter is set to the - empty string '' (which is the default). This parameter is + empty string '' (which is the default). This parameter is ignored if the server was not compiled with Bonjour support. This parameter can only be set at server start. @@ -848,7 +850,7 @@ include_dir 'conf.d' tcp_keepalives_idle (integer) - tcp_keepalives_idle configuration parameter + tcp_keepalives_idle configuration parameter @@ -857,7 +859,7 @@ include_dir 'conf.d' should send a keepalive message to the client. A value of 0 uses the system default. This parameter is supported only on systems that support - TCP_KEEPIDLE or an equivalent socket option, and on + TCP_KEEPIDLE or an equivalent socket option, and on Windows; on other systems, it must be zero. In sessions connected via a Unix-domain socket, this parameter is ignored and always reads as zero. @@ -874,7 +876,7 @@ include_dir 'conf.d' tcp_keepalives_interval (integer) - tcp_keepalives_interval configuration parameter + tcp_keepalives_interval configuration parameter @@ -883,7 +885,7 @@ include_dir 'conf.d' that is not acknowledged by the client should be retransmitted. A value of 0 uses the system default. This parameter is supported only on systems that support - TCP_KEEPINTVL or an equivalent socket option, and on + TCP_KEEPINTVL or an equivalent socket option, and on Windows; on other systems, it must be zero. In sessions connected via a Unix-domain socket, this parameter is ignored and always reads as zero. @@ -900,7 +902,7 @@ include_dir 'conf.d' tcp_keepalives_count (integer) - tcp_keepalives_count configuration parameter + tcp_keepalives_count configuration parameter @@ -909,7 +911,7 @@ include_dir 'conf.d' the server's connection to the client is considered dead. A value of 0 uses the system default. This parameter is supported only on systems that support - TCP_KEEPCNT or an equivalent socket option; + TCP_KEEPCNT or an equivalent socket option; on other systems, it must be zero. In sessions connected via a Unix-domain socket, this parameter is ignored and always reads as zero. @@ -924,16 +926,17 @@ include_dir 'conf.d' - - Security and Authentication + + + Authentication authentication_timeout (integer) - timeoutclient authentication - client authenticationtimeout during + timeoutclient authentication + client authenticationtimeout during - authentication_timeout configuration parameter + authentication_timeout configuration parameter @@ -943,26 +946,142 @@ include_dir 'conf.d' would-be client has not completed the authentication protocol in this much time, the server closes the connection. This prevents hung clients from occupying a connection indefinitely. - The default is one minute (1m). - This parameter can only be set in the postgresql.conf + The default is one minute (1m). + This parameter can only be set in the postgresql.conf + file or on the server command line. + + + + + + password_encryption (enum) + + password_encryption configuration parameter + + + + + When a password is specified in or + , this parameter determines the algorithm + to use to encrypt the password. The default value is md5, + which stores the password as an MD5 hash (on is also + accepted, as alias for md5). Setting this parameter to + scram-sha-256 will encrypt the password with SCRAM-SHA-256. + + + Note that older clients might lack support for the SCRAM authentication + mechanism, and hence not work with passwords encrypted with + SCRAM-SHA-256. See for more details. + + + + + + krb_server_keyfile (string) + + krb_server_keyfile configuration parameter + + + + + Sets the location of the Kerberos server key file. See + + for details. This parameter can only be set in the + postgresql.conf file or on the server command line. + + + + + + krb_caseins_users (boolean) + + krb_caseins_users configuration parameter + + + + + Sets whether GSSAPI user names should be treated + case-insensitively. + The default is off (case sensitive). This parameter can only be + set in the postgresql.conf file or on the server command line. + + + + + + db_user_namespace (boolean) + + db_user_namespace configuration parameter + + + + + This parameter enables per-database user names. It is off by default. + This parameter can only be set in the postgresql.conf file or on the server command line. + + + If this is on, you should create users as username@dbname. + When username is passed by a connecting client, + @ and the database name are appended to the user + name and that database-specific user name is looked up by the + server. Note that when you create users with names containing + @ within the SQL environment, you will need to + quote the user name. + + + + With this parameter enabled, you can still create ordinary global + users. Simply append @ when specifying the user + name in the client, e.g. joe@. The @ + will be stripped off before the user name is looked up by the + server. + + + + db_user_namespace causes the client's and + server's user name representation to differ. + Authentication checks are always done with the server's user name + so authentication methods must be configured for the + server's user name, not the client's. Because + md5 uses the user name as salt on both the + client and server, md5 cannot be used with + db_user_namespace. + + + + + This feature is intended as a temporary measure until a + complete solution is found. At that time, this option will + be removed. + + + + + + + SSL + + + See for more information about setting up SSL. + + ssl (boolean) - ssl configuration parameter + ssl configuration parameter - Enables SSL connections. Please read - before using this. - This parameter can only be set in the postgresql.conf + Enables SSL connections. + This parameter can only be set in the postgresql.conf file or on the server command line. - The default is off. + The default is off. @@ -970,7 +1089,7 @@ include_dir 'conf.d' ssl_ca_file (string) - ssl_ca_file configuration parameter + ssl_ca_file configuration parameter @@ -978,29 +1097,25 @@ include_dir 'conf.d' Specifies the name of the file containing the SSL server certificate authority (CA). Relative paths are relative to the data directory. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is empty, meaning no CA file is loaded, and client certificate verification is not performed. - - In previous releases of PostgreSQL, the name of this file was - hard-coded as root.crt. - ssl_cert_file (string) - ssl_cert_file configuration parameter + ssl_cert_file configuration parameter Specifies the name of the file containing the SSL server certificate. Relative paths are relative to the data directory. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is server.crt. @@ -1010,7 +1125,7 @@ include_dir 'conf.d' ssl_crl_file (string) - ssl_crl_file configuration parameter + ssl_crl_file configuration parameter @@ -1018,28 +1133,24 @@ include_dir 'conf.d' Specifies the name of the file containing the SSL server certificate revocation list (CRL). Relative paths are relative to the data directory. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is empty, meaning no CRL file is loaded. - - In previous releases of PostgreSQL, the name of this file was - hard-coded as root.crl. - ssl_key_file (string) - ssl_key_file configuration parameter + ssl_key_file configuration parameter Specifies the name of the file containing the SSL server private key. Relative paths are relative to the data directory. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is server.key. @@ -1049,19 +1160,19 @@ include_dir 'conf.d' ssl_ciphers (string) - ssl_ciphers configuration parameter + ssl_ciphers configuration parameter - Specifies a list of SSL cipher suites that are allowed to be + Specifies a list of SSL cipher suites that are allowed to be used on secure connections. See - the ciphers manual page - in the OpenSSL package for the syntax of this setting + the ciphers manual page + in the OpenSSL package for the syntax of this setting and a list of supported values. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - The default value is HIGH:MEDIUM:+3DES:!aNULL. The + The default value is HIGH:MEDIUM:+3DES:!aNULL. The default is usually a reasonable choice unless you have specific security requirements. @@ -1073,7 +1184,7 @@ include_dir 'conf.d' HIGH - Cipher suites that use ciphers from HIGH group (e.g., + Cipher suites that use ciphers from HIGH group (e.g., AES, Camellia, 3DES) @@ -1083,7 +1194,7 @@ include_dir 'conf.d' MEDIUM - Cipher suites that use ciphers from MEDIUM group + Cipher suites that use ciphers from MEDIUM group (e.g., RC4, SEED) @@ -1093,11 +1204,11 @@ include_dir 'conf.d' +3DES - The OpenSSL default order for HIGH is problematic + The OpenSSL default order for HIGH is problematic because it orders 3DES higher than AES128. This is wrong because 3DES offers less security than AES128, and it is also much - slower. +3DES reorders it after all other - HIGH and MEDIUM ciphers. + slower. +3DES reorders it after all other + HIGH and MEDIUM ciphers. @@ -1119,7 +1230,7 @@ include_dir 'conf.d' Available cipher suite details will vary across OpenSSL versions. Use the command openssl ciphers -v 'HIGH:MEDIUM:+3DES:!aNULL' to - see actual details for the currently installed OpenSSL + see actual details for the currently installed OpenSSL version. Note that this list is filtered at run time based on the server key type. @@ -1129,16 +1240,16 @@ include_dir 'conf.d' ssl_prefer_server_ciphers (boolean) - ssl_prefer_server_ciphers configuration parameter + ssl_prefer_server_ciphers configuration parameter Specifies whether to use the server's SSL cipher preferences, rather than the client's. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - The default is true. + The default is true. @@ -1154,51 +1265,28 @@ include_dir 'conf.d' ssl_ecdh_curve (string) - ssl_ecdh_curve configuration parameter + ssl_ecdh_curve configuration parameter - Specifies the name of the curve to use in ECDH key + Specifies the name of the curve to use in ECDH key exchange. It needs to be supported by all clients that connect. It does not need to be the same curve used by the server's Elliptic Curve key. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - The default is prime256v1. + The default is prime256v1. OpenSSL names for the most common curves are: - prime256v1 (NIST P-256), - secp384r1 (NIST P-384), - secp521r1 (NIST P-521). + prime256v1 (NIST P-256), + secp384r1 (NIST P-384), + secp521r1 (NIST P-521). The full list of available curves can be shown with the command openssl ecparam -list_curves. Not all of them - are usable in TLS though. - - - - - - password_encryption (enum) - - password_encryption configuration parameter - - - - - When a password is specified in or - , this parameter determines the algorithm - to use to encrypt the password. The default value is md5, - which stores the password as an MD5 hash (on is also - accepted, as alias for md5). Setting this parameter to - scram-sha-256 will encrypt the password with SCRAM-SHA-256. - - - Note that older clients might lack support for the SCRAM authentication - mechanism, and hence not work with passwords encrypted with - SCRAM-SHA-256. + are usable in TLS though. @@ -1206,7 +1294,7 @@ include_dir 'conf.d' ssl_dh_params_file (string) - ssl_dh_params_file configuration parameter + ssl_dh_params_file configuration parameter @@ -1221,96 +1309,71 @@ include_dir 'conf.d' - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - - krb_server_keyfile (string) + + ssl_passphrase_command (string) - krb_server_keyfile configuration parameter + ssl_passphrase_command configuration parameter - Sets the location of the Kerberos server key file. See - - for details. This parameter can only be set in the - postgresql.conf file or on the server command line. + Sets an external command to be invoked when a passphrase for + decrypting an SSL file such as a private key needs to be obtained. By + default, this parameter is empty, which means the built-in prompting + mechanism is used. - - - - - krb_caseins_users (boolean) - - krb_caseins_users configuration parameter - - - - Sets whether GSSAPI user names should be treated - case-insensitively. - The default is off (case sensitive). This parameter can only be - set in the postgresql.conf file or on the server command line. + The command must print the passphrase to the standard output and exit + with code 0. In the parameter value, %p is + replaced by a prompt string. (Write %% for a + literal %.) Note that the prompt string will + probably contain whitespace, so be sure to quote adequately. A single + newline is stripped from the end of the output if present. + + + The command does not actually have to prompt the user for a + passphrase. It can read it from a file, obtain it from a keychain + facility, or similar. It is up to the user to make sure the chosen + mechanism is adequately secure. + + + This parameter can only be set in the postgresql.conf + file or on the server command line. - - db_user_namespace (boolean) + + ssl_passphrase_command_supports_reload (boolean) - db_user_namespace configuration parameter + ssl_passphrase_command_supports_reload configuration parameter - This parameter enables per-database user names. It is off by default. - This parameter can only be set in the postgresql.conf - file or on the server command line. + This parameter determines whether the passphrase command set by + ssl_passphrase_command will also be called during a + configuration reload if a key file needs a passphrase. If this + parameter is false (the default), then + ssl_passphrase_command will be ignored during a + reload and the SSL configuration will not be reloaded if a passphrase + is needed. That setting is appropriate for a command that requires a + TTY for prompting, which might not be available when the server is + running. Setting this parameter to true might be appropriate if the + passphrase is obtained from a file, for example. - - If this is on, you should create users as username@dbname. - When username is passed by a connecting client, - @ and the database name are appended to the user - name and that database-specific user name is looked up by the - server. Note that when you create users with names containing - @ within the SQL environment, you will need to - quote the user name. - - - - With this parameter enabled, you can still create ordinary global - users. Simply append @ when specifying the user - name in the client, e.g. joe@. The @ - will be stripped off before the user name is looked up by the - server. - - - - db_user_namespace causes the client's and - server's user name representation to differ. - Authentication checks are always done with the server's user name - so authentication methods must be configured for the - server's user name, not the client's. Because - md5 uses the user name as salt on both the - client and server, md5 cannot be used with - db_user_namespace. + This parameter can only be set in the postgresql.conf + file or on the server command line. - - - - This feature is intended as a temporary measure until a - complete solution is found. At that time, this option will - be removed. - - - @@ -1325,15 +1388,15 @@ include_dir 'conf.d' shared_buffers (integer) - shared_buffers configuration parameter + shared_buffers configuration parameter Sets the amount of memory the database server uses for shared memory buffers. The default is typically 128 megabytes - (128MB), but might be less if your kernel settings will - not support it (as determined during initdb). + (128MB), but might be less if your kernel settings will + not support it (as determined during initdb). This setting must be at least 128 kilobytes. (Non-default values of BLCKSZ change the minimum.) However, settings significantly higher than the minimum are usually needed @@ -1366,33 +1429,58 @@ include_dir 'conf.d' huge_pages (enum) - huge_pages configuration parameter + huge_pages configuration parameter - Enables/disables the use of huge memory pages. Valid values are - try (the default), on, - and off. + Controls whether huge pages are requested for the main shared memory + area. Valid values are try (the default), + on, and off. With + huge_pages set to try, the + server will try to request huge pages, but fall back to the default if + that fails. With on, failure to request huge pages + will prevent the server from starting up. With off, + huge pages will not be requested. - At present, this feature is supported only on Linux. The setting is - ignored on other systems when set to try. + At present, this setting is supported only on Linux and Windows. The + setting is ignored on other systems when set to + try. The use of huge pages results in smaller page tables and less CPU time - spent on memory management, increasing performance. For more details, - see . + spent on memory management, increasing performance. For more details about + using huge pages on Linux, see . + + + + Huge pages are known as large pages on Windows. To use them, you need to + assign the user right Lock Pages in Memory to the Windows user account + that runs PostgreSQL. + You can use Windows Group Policy tool (gpedit.msc) to assign the user right + Lock Pages in Memory. + To start the database server on the command prompt as a standalone process, + not as a Windows service, the command prompt must be run as an administrator or + User Access Control (UAC) must be disabled. When the UAC is enabled, the normal + command prompt revokes the user right Lock Pages in Memory when started. - With huge_pages set to try, - the server will try to use huge pages, but fall back to using - normal allocation if that fails. With on, failure - to use huge pages will prevent the server from starting up. With - off, huge pages will not be used. + Note that this setting only affects the main shared memory area. + Operating systems such as Linux, FreeBSD, and Illumos can also use + huge pages (also known as super pages or + large pages) automatically for normal memory + allocation, without an explicit request from + PostgreSQL. On Linux, this is called + transparent huge pagestransparent + huge pages (THP). That feature has been known to + cause performance degradation with + PostgreSQL for some users on some Linux + versions, so its use is currently discouraged (unlike explicit use of + huge_pages). @@ -1400,7 +1488,7 @@ include_dir 'conf.d' temp_buffers (integer) - temp_buffers configuration parameter + temp_buffers configuration parameter @@ -1408,7 +1496,7 @@ include_dir 'conf.d' Sets the maximum number of temporary buffers used by each database session. These are session-local buffers used only for access to temporary tables. The default is eight megabytes - (8MB). The setting can be changed within individual + (8MB). The setting can be changed within individual sessions, but only before the first use of temporary tables within the session; subsequent attempts to change the value will have no effect on that session. @@ -1416,10 +1504,10 @@ include_dir 'conf.d' A session will allocate temporary buffers as needed up to the limit - given by temp_buffers. The cost of setting a large + given by temp_buffers. The cost of setting a large value in sessions that do not actually need many temporary buffers is only a buffer descriptor, or about 64 bytes, per - increment in temp_buffers. However if a buffer is + increment in temp_buffers. However if a buffer is actually used an additional 8192 bytes will be consumed for it (or in general, BLCKSZ bytes). @@ -1429,14 +1517,14 @@ include_dir 'conf.d' max_prepared_transactions (integer) - max_prepared_transactions configuration parameter + max_prepared_transactions configuration parameter Sets the maximum number of transactions that can be in the - prepared state simultaneously (see ). + prepared state simultaneously (see ). Setting this parameter to zero (which is the default) disables the prepared-transaction feature. This parameter can only be set at server start. @@ -1447,7 +1535,7 @@ include_dir 'conf.d' should be set to zero to prevent accidental creation of prepared transactions. If you are using prepared transactions, you will probably want max_prepared_transactions to be at - least as large as , so that every + least as large as , so that every session can have a prepared transaction pending. @@ -1462,14 +1550,14 @@ include_dir 'conf.d' work_mem (integer) - work_mem configuration parameter + work_mem configuration parameter Specifies the amount of memory to be used by internal sort operations and hash tables before writing to temporary disk files. The value - defaults to four megabytes (4MB). + defaults to four megabytes (4MB). Note that for a complex query, several sort or hash operations might be running in parallel; each operation will be allowed to use as much memory as this value specifies before it starts to write data into temporary @@ -1477,10 +1565,10 @@ include_dir 'conf.d' concurrently. Therefore, the total memory used could be many times the value of work_mem; it is necessary to keep this fact in mind when choosing the value. Sort operations are - used for ORDER BY, DISTINCT, and + used for ORDER BY, DISTINCT, and merge joins. Hash tables are used in hash joins, hash-based aggregation, and - hash-based processing of IN subqueries. + hash-based processing of IN subqueries. @@ -1488,15 +1576,15 @@ include_dir 'conf.d' maintenance_work_mem (integer) - maintenance_work_mem configuration parameter + maintenance_work_mem configuration parameter Specifies the maximum amount of memory to be used by maintenance operations, such as VACUUM, CREATE - INDEX, and ALTER TABLE ADD FOREIGN KEY. It defaults - to 64 megabytes (64MB). Since only one of these + INDEX, and ALTER TABLE ADD FOREIGN KEY. It defaults + to 64 megabytes (64MB). Since only one of these operations can be executed at a time by a database session, and an installation normally doesn't have many of them running concurrently, it's safe to set this value significantly larger @@ -1505,49 +1593,10 @@ include_dir 'conf.d' Note that when autovacuum runs, up to - times this memory + times this memory may be allocated, so be careful not to set the default value too high. It may be useful to control for this by separately - setting . - - - - - - replacement_sort_tuples (integer) - - replacement_sort_tuples configuration parameter - - - - - When the number of tuples to be sorted is smaller than this number, - a sort will produce its first output run using replacement selection - rather than quicksort. This may be useful in memory-constrained - environments where tuples that are input into larger sort operations - have a strong physical-to-logical correlation. Note that this does - not include input tuples with an inverse - correlation. It is possible for the replacement selection algorithm - to generate one long run that requires no merging, where use of the - default strategy would result in many runs that must be merged - to produce a final sorted output. This may allow sort - operations to complete sooner. - - - The default is 150,000 tuples. Note that higher values are typically - not much more effective, and may be counter-productive, since the - priority queue is sensitive to the size of available CPU cache, whereas - the default strategy sorts runs using a cache - oblivious algorithm. This property allows the default sort - strategy to automatically and transparently make effective use - of available CPU cache. - - - Setting maintenance_work_mem to its default - value usually prevents utility command external sorts (e.g., - sorts used by CREATE INDEX to build B-Tree - indexes) from ever using replacement selection sort, unless the - input tuples are quite wide. + setting . @@ -1555,14 +1604,14 @@ include_dir 'conf.d' autovacuum_work_mem (integer) - autovacuum_work_mem configuration parameter + autovacuum_work_mem configuration parameter Specifies the maximum amount of memory to be used by each autovacuum worker process. It defaults to -1, indicating that - the value of should + the value of should be used instead. The setting has no effect on the behavior of VACUUM when run in other contexts. @@ -1572,26 +1621,26 @@ include_dir 'conf.d' max_stack_depth (integer) - max_stack_depth configuration parameter + max_stack_depth configuration parameter Specifies the maximum safe depth of the server's execution stack. The ideal setting for this parameter is the actual stack size limit - enforced by the kernel (as set by ulimit -s or local + enforced by the kernel (as set by ulimit -s or local equivalent), less a safety margin of a megabyte or so. The safety margin is needed because the stack depth is not checked in every routine in the server, but only in key potentially-recursive routines such as expression evaluation. The default setting is two - megabytes (2MB), which is conservatively small and + megabytes (2MB), which is conservatively small and unlikely to risk crashes. However, it might be too small to allow execution of complex functions. Only superusers can change this setting. - Setting max_stack_depth higher than + Setting max_stack_depth higher than the actual kernel limit will mean that a runaway recursive function can crash an individual backend process. On platforms where PostgreSQL can determine the kernel limit, @@ -1605,25 +1654,25 @@ include_dir 'conf.d' dynamic_shared_memory_type (enum) - dynamic_shared_memory_type configuration parameter + dynamic_shared_memory_type configuration parameter Specifies the dynamic shared memory implementation that the server - should use. Possible values are posix (for POSIX shared - memory allocated using shm_open), sysv - (for System V shared memory allocated via shmget), - windows (for Windows shared memory), mmap - (to simulate shared memory using memory-mapped files stored in the - data directory), and none (to disable this feature). + should use. Possible values are posix (for POSIX shared + memory allocated using shm_open), sysv + (for System V shared memory allocated via shmget), + windows (for Windows shared memory), + and mmap (to simulate shared memory using + memory-mapped files stored in the data directory). Not all values are supported on all platforms; the first supported option is the default for that platform. The use of the - mmap option, which is not the default on any platform, + mmap option, which is not the default on any platform, is generally discouraged because the operating system may write modified pages back to disk repeatedly, increasing system I/O load; however, it may be useful for debugging, when the - pg_dynshmem directory is stored on a RAM disk, or when + pg_dynshmem directory is stored on a RAM disk, or when other shared memory facilities are not available. @@ -1639,7 +1688,7 @@ include_dir 'conf.d' temp_file_limit (integer) - temp_file_limit configuration parameter + temp_file_limit configuration parameter @@ -1648,13 +1697,13 @@ include_dir 'conf.d' for temporary files, such as sort and hash temporary files, or the storage file for a held cursor. A transaction attempting to exceed this limit will be canceled. - The value is specified in kilobytes, and -1 (the + The value is specified in kilobytes, and -1 (the default) means no limit. Only superusers can change this setting. This setting constrains the total space used at any instant by all - temporary files used by a given PostgreSQL process. + temporary files used by a given PostgreSQL process. It should be noted that disk space used for explicit temporary tables, as opposed to temporary files used behind-the-scenes in query execution, does not count against this limit. @@ -1672,7 +1721,7 @@ include_dir 'conf.d' max_files_per_process (integer) - max_files_per_process configuration parameter + max_files_per_process configuration parameter @@ -1684,7 +1733,7 @@ include_dir 'conf.d' allow individual processes to open many more files than the system can actually support if many processes all try to open that many files. If you find yourself seeing Too many open - files failures, try reducing this setting. + files failures, try reducing this setting. This parameter can only be set at server start. @@ -1696,8 +1745,8 @@ include_dir 'conf.d' Cost-based Vacuum Delay - During the execution of - and + During the execution of + and commands, the system maintains an internal counter that keeps track of the estimated cost of the various I/O operations that are performed. When the accumulated @@ -1731,7 +1780,7 @@ include_dir 'conf.d' vacuum_cost_delay (integer) - vacuum_cost_delay configuration parameter + vacuum_cost_delay configuration parameter @@ -1749,7 +1798,7 @@ include_dir 'conf.d' When using cost-based vacuuming, appropriate values for - vacuum_cost_delay are usually quite small, perhaps + vacuum_cost_delay are usually quite small, perhaps 10 or 20 milliseconds. Adjusting vacuum's resource consumption is best done by changing the other vacuum cost parameters. @@ -1759,7 +1808,7 @@ include_dir 'conf.d' vacuum_cost_page_hit (integer) - vacuum_cost_page_hit configuration parameter + vacuum_cost_page_hit configuration parameter @@ -1775,7 +1824,7 @@ include_dir 'conf.d' vacuum_cost_page_miss (integer) - vacuum_cost_page_miss configuration parameter + vacuum_cost_page_miss configuration parameter @@ -1791,7 +1840,7 @@ include_dir 'conf.d' vacuum_cost_page_dirty (integer) - vacuum_cost_page_dirty configuration parameter + vacuum_cost_page_dirty configuration parameter @@ -1807,7 +1856,7 @@ include_dir 'conf.d' vacuum_cost_limit (integer) - vacuum_cost_limit configuration parameter + vacuum_cost_limit configuration parameter @@ -1839,8 +1888,8 @@ include_dir 'conf.d' There is a separate server - process called the background writer, whose function - is to issue writes of dirty (new or modified) shared + process called the background writer, whose function + is to issue writes of dirty (new or modified) shared buffers. It writes shared buffers so server processes handling user queries seldom or never need to wait for a write to occur. However, the background writer does cause a net overall @@ -1855,7 +1904,7 @@ include_dir 'conf.d' bgwriter_delay (integer) - bgwriter_delay configuration parameter + bgwriter_delay configuration parameter @@ -1863,16 +1912,16 @@ include_dir 'conf.d' Specifies the delay between activity rounds for the background writer. In each round the writer issues writes for some number of dirty buffers (controllable by the - following parameters). It then sleeps for bgwriter_delay + following parameters). It then sleeps for bgwriter_delay milliseconds, and repeats. When there are no dirty buffers in the buffer pool, though, it goes into a longer sleep regardless of - bgwriter_delay. The default value is 200 - milliseconds (200ms). Note that on many systems, the + bgwriter_delay. The default value is 200 + milliseconds (200ms). Note that on many systems, the effective resolution of sleep delays is 10 milliseconds; setting - bgwriter_delay to a value that is not a multiple of 10 + bgwriter_delay to a value that is not a multiple of 10 might have the same results as setting it to the next higher multiple of 10. This parameter can only be set in the - postgresql.conf file or on the server command line. + postgresql.conf file or on the server command line. @@ -1880,7 +1929,7 @@ include_dir 'conf.d' bgwriter_lru_maxpages (integer) - bgwriter_lru_maxpages configuration parameter + bgwriter_lru_maxpages configuration parameter @@ -1890,7 +1939,7 @@ include_dir 'conf.d' background writing. (Note that checkpoints, which are managed by a separate, dedicated auxiliary process, are unaffected.) The default value is 100 buffers. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -1899,7 +1948,7 @@ include_dir 'conf.d' bgwriter_lru_multiplier (floating point) - bgwriter_lru_multiplier configuration parameter + bgwriter_lru_multiplier configuration parameter @@ -1907,18 +1956,18 @@ include_dir 'conf.d' The number of dirty buffers written in each round is based on the number of new buffers that have been needed by server processes during recent rounds. The average recent need is multiplied by - bgwriter_lru_multiplier to arrive at an estimate of the + bgwriter_lru_multiplier to arrive at an estimate of the number of buffers that will be needed during the next round. Dirty buffers are written until there are that many clean, reusable buffers - available. (However, no more than bgwriter_lru_maxpages + available. (However, no more than bgwriter_lru_maxpages buffers will be written per round.) - Thus, a setting of 1.0 represents a just in time policy + Thus, a setting of 1.0 represents a just in time policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -1927,7 +1976,7 @@ include_dir 'conf.d' bgwriter_flush_after (integer) - bgwriter_flush_after configuration parameter + bgwriter_flush_after configuration parameter @@ -1940,14 +1989,14 @@ include_dir 'conf.d' the OS writes data back in larger batches in the background. Often that will result in greatly reduced transaction latency, but there also are some cases, especially with workloads that are bigger than - , but smaller than the OS's page + , but smaller than the OS's page cache, where performance might degrade. This setting may have no effect on some platforms. The valid range is between 0, which disables forced writeback, and - 2MB. The default is 512kB on Linux, - 0 elsewhere. (If BLCKSZ is not 8kB, + 2MB. The default is 512kB on Linux, + 0 elsewhere. (If BLCKSZ is not 8kB, the default and maximum values scale proportionally to it.) - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -1970,15 +2019,15 @@ include_dir 'conf.d' effective_io_concurrency (integer) - effective_io_concurrency configuration parameter + effective_io_concurrency configuration parameter Sets the number of concurrent disk I/O operations that - PostgreSQL expects can be executed + PostgreSQL expects can be executed simultaneously. Raising this value will increase the number of I/O - operations that any individual PostgreSQL session + operations that any individual PostgreSQL session attempts to initiate in parallel. The allowed range is 1 to 1000, or zero to disable issuance of asynchronous I/O requests. Currently, this setting only affects bitmap heap scans. @@ -1998,7 +2047,7 @@ include_dir 'conf.d' - Asynchronous I/O depends on an effective posix_fadvise + Asynchronous I/O depends on an effective posix_fadvise function, which some operating systems lack. If the function is not present then setting this parameter to anything but zero will result in an error. On some operating systems (e.g., Solaris), the function @@ -2009,7 +2058,7 @@ include_dir 'conf.d' The default is 1 on supported systems, otherwise 0. This value can be overridden for tables in a particular tablespace by setting the tablespace parameter of the same name (see - ). + ). @@ -2017,7 +2066,7 @@ include_dir 'conf.d' max_worker_processes (integer) - max_worker_processes configuration parameter + max_worker_processes configuration parameter @@ -2035,8 +2084,9 @@ include_dir 'conf.d' When changing this value, consider also adjusting - and - . + , + , and + . @@ -2044,7 +2094,7 @@ include_dir 'conf.d' max_parallel_workers_per_gather (integer) - max_parallel_workers_per_gather configuration parameter + max_parallel_workers_per_gather configuration parameter @@ -2052,8 +2102,8 @@ include_dir 'conf.d' Sets the maximum number of workers that can be started by a single Gather or Gather Merge node. Parallel workers are taken from the pool of processes established by - , limited by - . Note that the requested + , limited by + . Note that the requested number of workers may not actually be available at run time. If this occurs, the plan will run with fewer workers than expected, which may be inefficient. The default value is 2. Setting this value to 0 @@ -2067,8 +2117,8 @@ include_dir 'conf.d' system as an additional user session. This should be taken into account when choosing a value for this setting, as well as when configuring other settings that control resource utilization, such - as . Resource limits such as - work_mem are applied individually to each worker, + as . Resource limits such as + work_mem are applied individually to each worker, which means the total utilization may be much higher across all processes than it would normally be for any single process. For example, a parallel query using 4 workers may use up to 5 times @@ -2078,7 +2128,45 @@ include_dir 'conf.d' For more information on parallel query, see - . + . + + + + + + max_parallel_maintenance_workers (integer) + + max_parallel_maintenance_workers configuration parameter + + + + + Sets the maximum number of parallel workers that can be + started by a single utility command. Currently, the only + parallel utility command that supports the use of parallel + workers is CREATE INDEX, and only when + building a B-tree index. Parallel workers are taken from the + pool of processes established by , limited by . Note that the requested + number of workers may not actually be available at run time. + If this occurs, the utility operation will run with fewer + workers than expected. The default value is 2. Setting this + value to 0 disables the use of parallel workers by utility + commands. + + + + Note that parallel utility commands should not consume + substantially more memory than equivalent non-parallel + operations. This strategy differs from that of parallel + query, where resource limits generally apply per worker + process. Parallel utility commands treat the resource limit + maintenance_work_mem as a limit to be applied to + the entire utility command, regardless of the number of + parallel worker processes. However, parallel utility + commands may still consume substantially more CPU resources + and I/O bandwidth. @@ -2086,17 +2174,18 @@ include_dir 'conf.d' max_parallel_workers (integer) - max_parallel_workers configuration parameter + max_parallel_workers configuration parameter Sets the maximum number of workers that the system can support for - parallel queries. The default value is 8. When increasing or + parallel operations. The default value is 8. When increasing or decreasing this value, consider also adjusting - . + and + . Also, note that a setting for this value which is higher than - will have no effect, + will have no effect, since parallel workers are taken from the pool of worker processes established by that setting. @@ -2106,7 +2195,7 @@ include_dir 'conf.d' backend_flush_after (integer) - backend_flush_after configuration parameter + backend_flush_after configuration parameter @@ -2119,11 +2208,11 @@ include_dir 'conf.d' checkpoint, or when the OS writes data back in larger batches in the background. Often that will result in greatly reduced transaction latency, but there also are some cases, especially with workloads - that are bigger than , but smaller + that are bigger than , but smaller than the OS's page cache, where performance might degrade. This setting may have no effect on some platforms. The valid range is between 0, which disables forced writeback, - and 2MB. The default is 0, i.e., no + and 2MB. The default is 0, i.e., no forced writeback. (If BLCKSZ is not 8kB, the maximum value scales proportionally to it.) @@ -2133,13 +2222,13 @@ include_dir 'conf.d' old_snapshot_threshold (integer) - old_snapshot_threshold configuration parameter + old_snapshot_threshold configuration parameter Sets the minimum time that a snapshot can be used without risk of a - snapshot too old error occurring when using the snapshot. + snapshot too old error occurring when using the snapshot. This parameter can only be set at server start. @@ -2154,12 +2243,12 @@ include_dir 'conf.d' - A value of -1 disables this feature, and is the default. + A value of -1 disables this feature, and is the default. Useful values for production work probably range from a small number of hours to a few days. The setting will be coerced to a granularity - of minutes, and small numbers (such as 0 or - 1min) are only allowed because they may sometimes be - useful for testing. While a setting as high as 60d is + of minutes, and small numbers (such as 0 or + 1min) are only allowed because they may sometimes be + useful for testing. While a setting as high as 60d is allowed, please note that in many workloads extreme bloat or transaction ID wraparound may occur in much shorter time frames. @@ -2167,10 +2256,10 @@ include_dir 'conf.d' When this feature is enabled, freed space at the end of a relation cannot be released to the operating system, since that could remove - information needed to detect the snapshot too old + information needed to detect the snapshot too old condition. All space allocated to a relation remains associated with that relation for reuse only within that relation unless explicitly - freed (for example, with VACUUM FULL). + freed (for example, with VACUUM FULL). @@ -2182,7 +2271,7 @@ include_dir 'conf.d' Some tables cannot safely be vacuumed early, and so will not be affected by this setting, such as system catalogs. For such tables this setting will neither reduce bloat nor create a possibility - of a snapshot too old error on scanning. + of a snapshot too old error on scanning. @@ -2195,7 +2284,7 @@ include_dir 'conf.d' For additional information on tuning these settings, - see . + see . @@ -2205,45 +2294,45 @@ include_dir 'conf.d' wal_level (enum) - wal_level configuration parameter + wal_level configuration parameter - wal_level determines how much information is written to - the WAL. The default value is replica, which writes enough + wal_level determines how much information is written to + the WAL. The default value is replica, which writes enough data to support WAL archiving and replication, including running - read-only queries on a standby server. minimal removes all + read-only queries on a standby server. minimal removes all logging except the information required to recover from a crash or immediate shutdown. Finally, - logical adds information necessary to support logical + logical adds information necessary to support logical decoding. Each level includes the information logged at all lower levels. This parameter can only be set at server start. - In minimal level, WAL-logging of some bulk + In minimal level, WAL-logging of some bulk operations can be safely skipped, which can make those - operations much faster (see ). + operations much faster (see ). Operations in which this optimization can be applied include: - CREATE TABLE AS - CREATE INDEX - CLUSTER - COPY into tables that were created or truncated in the same + CREATE TABLE AS + CREATE INDEX + CLUSTER + COPY into tables that were created or truncated in the same transaction But minimal WAL does not contain enough information to reconstruct the - data from a base backup and the WAL logs, so replica or + data from a base backup and the WAL logs, so replica or higher must be used to enable WAL archiving - () and streaming replication. + () and streaming replication. - In logical level, the same information is logged as - with replica, plus information needed to allow + In logical level, the same information is logged as + with replica, plus information needed to allow extracting logical change sets from the WAL. Using a level of - logical will increase the WAL volume, particularly if many + logical will increase the WAL volume, particularly if many tables are configured for REPLICA IDENTITY FULL and - many UPDATE and DELETE statements are + many UPDATE and DELETE statements are executed. @@ -2257,15 +2346,15 @@ include_dir 'conf.d' fsync (boolean) - fsync configuration parameter + fsync configuration parameter - If this parameter is on, the PostgreSQL server + If this parameter is on, the PostgreSQL server will try to make sure that updates are physically written to - disk, by issuing fsync() system calls or various - equivalent methods (see ). + disk, by issuing fsync() system calls or various + equivalent methods (see ). This ensures that the database cluster can recover to a consistent state after an operating system or hardware crash. @@ -2296,22 +2385,22 @@ include_dir 'conf.d' off to on, it is necessary to force all modified buffers in the kernel to durable storage. This can be done while the cluster is shutdown or while fsync is on by running initdb - --sync-only, running sync, unmounting the + --sync-only, running sync, unmounting the file system, or rebooting the server. - In many situations, turning off + In many situations, turning off for noncritical transactions can provide much of the potential performance benefit of turning off fsync, without the attendant risks of data corruption. - fsync can only be set in the postgresql.conf + fsync can only be set in the postgresql.conf file or on the server command line. If you turn this parameter off, also consider turning off - . + . @@ -2319,60 +2408,60 @@ include_dir 'conf.d' synchronous_commit (enum) - synchronous_commit configuration parameter + synchronous_commit configuration parameter Specifies whether transaction commit will wait for WAL records - to be written to disk before the command returns a success - indication to the client. Valid values are on, - remote_apply, remote_write, local, - and off. The default, and safe, setting - is on. When off, there can be a delay between + to be written to disk before the command returns a success + indication to the client. Valid values are on, + remote_apply, remote_write, local, + and off. The default, and safe, setting + is on. When off, there can be a delay between when success is reported to the client and when the transaction is really guaranteed to be safe against a server crash. (The maximum - delay is three times .) Unlike - , setting this parameter to off + delay is three times .) Unlike + , setting this parameter to off does not create any risk of database inconsistency: an operating system or database crash might result in some recent allegedly-committed transactions being lost, but the database state will be just the same as if those transactions had - been aborted cleanly. So, turning synchronous_commit off + been aborted cleanly. So, turning synchronous_commit off can be a useful alternative when performance is more important than exact certainty about the durability of a transaction. For more - discussion see . + discussion see . - If is non-empty, this + If is non-empty, this parameter also controls whether or not transaction commits will wait for their WAL records to be replicated to the standby server(s). - When set to on, commits will wait until replies + When set to on, commits will wait until replies from the current synchronous standby(s) indicate they have received the commit record of the transaction and flushed it to disk. This ensures the transaction will not be lost unless both the primary and all synchronous standbys suffer corruption of their database storage. - When set to remote_apply, commits will wait until replies + When set to remote_apply, commits will wait until replies from the current synchronous standby(s) indicate they have received the commit record of the transaction and applied it, so that it has become visible to queries on the standby(s). - When set to remote_write, commits will wait until replies + When set to remote_write, commits will wait until replies from the current synchronous standby(s) indicate they have received the commit record of the transaction and written it out to their operating system. This setting is sufficient to ensure data preservation even if a standby instance of - PostgreSQL were to crash, but not if the standby + PostgreSQL were to crash, but not if the standby suffers an operating-system-level crash, since the data has not necessarily reached stable storage on the standby. - Finally, the setting local causes commits to wait for + Finally, the setting local causes commits to wait for local flush to disk, but not for replication. This is not usually desirable when synchronous replication is in use, but is provided for completeness. - If synchronous_standby_names is empty, the settings - on, remote_apply, remote_write - and local all provide the same synchronization level: + If synchronous_standby_names is empty, the settings + on, remote_apply, remote_write + and local all provide the same synchronization level: transaction commits only wait for local flush to disk. @@ -2382,7 +2471,7 @@ include_dir 'conf.d' transactions commit synchronously and others asynchronously. For example, to make a single multistatement transaction commit asynchronously when the default is the opposite, issue SET - LOCAL synchronous_commit TO OFF within the transaction. + LOCAL synchronous_commit TO OFF within the transaction. @@ -2390,7 +2479,7 @@ include_dir 'conf.d' wal_sync_method (enum) - wal_sync_method configuration parameter + wal_sync_method configuration parameter @@ -2403,41 +2492,41 @@ include_dir 'conf.d' - open_datasync (write WAL files with open() option O_DSYNC) + open_datasync (write WAL files with open() option O_DSYNC) - fdatasync (call fdatasync() at each commit) + fdatasync (call fdatasync() at each commit) - fsync (call fsync() at each commit) + fsync (call fsync() at each commit) - fsync_writethrough (call fsync() at each commit, forcing write-through of any disk write cache) + fsync_writethrough (call fsync() at each commit, forcing write-through of any disk write cache) - open_sync (write WAL files with open() option O_SYNC) + open_sync (write WAL files with open() option O_SYNC) - The open_* options also use O_DIRECT if available. + The open_* options also use O_DIRECT if available. Not all of these choices are available on all platforms. The default is the first method in the above list that is supported - by the platform, except that fdatasync is the default on + by the platform, except that fdatasync is the default on Linux. The default is not necessarily ideal; it might be necessary to change this setting or other aspects of your system configuration in order to create a crash-safe configuration or achieve optimal performance. - These aspects are discussed in . - This parameter can only be set in the postgresql.conf + These aspects are discussed in . + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2446,12 +2535,12 @@ include_dir 'conf.d' full_page_writes (boolean) - full_page_writes configuration parameter + full_page_writes configuration parameter - When this parameter is on, the PostgreSQL server + When this parameter is on, the PostgreSQL server writes the entire content of each disk page to WAL during the first modification of that page after a checkpoint. This is needed because @@ -2479,13 +2568,13 @@ include_dir 'conf.d' Turning off this parameter does not affect use of WAL archiving for point-in-time recovery (PITR) - (see ). + (see ). - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - The default is on. + The default is on. @@ -2493,12 +2582,12 @@ include_dir 'conf.d' wal_log_hints (boolean) - wal_log_hints configuration parameter + wal_log_hints configuration parameter - When this parameter is on, the PostgreSQL + When this parameter is on, the PostgreSQL server writes the entire content of each disk page to WAL during the first modification of that page after a checkpoint, even for non-critical modifications of so-called hint bits. @@ -2512,7 +2601,7 @@ include_dir 'conf.d' - This parameter can only be set at server start. The default value is off. + This parameter can only be set at server start. The default value is off. @@ -2520,16 +2609,16 @@ include_dir 'conf.d' wal_compression (boolean) - wal_compression configuration parameter + wal_compression configuration parameter - When this parameter is on, the PostgreSQL + When this parameter is on, the PostgreSQL server compresses a full page image written to WAL when - is on or during a base backup. + is on or during a base backup. A compressed page image will be decompressed during WAL replay. - The default value is off. + The default value is off. Only superusers can change this setting. @@ -2545,14 +2634,14 @@ include_dir 'conf.d' wal_buffers (integer) - wal_buffers configuration parameter + wal_buffers configuration parameter The amount of shared memory used for WAL data that has not yet been written to disk. The default setting of -1 selects a size equal to - 1/32nd (about 3%) of , but not less + 1/32nd (about 3%) of , but not less than 64kB nor more than the size of one WAL segment, typically 16MB. This value can be set manually if the automatic choice is too large or too small, @@ -2577,24 +2666,24 @@ include_dir 'conf.d' wal_writer_delay (integer) - wal_writer_delay configuration parameter + wal_writer_delay configuration parameter Specifies how often the WAL writer flushes WAL. After flushing WAL it - sleeps for wal_writer_delay milliseconds, unless woken up + sleeps for wal_writer_delay milliseconds, unless woken up by an asynchronously committing transaction. If the last flush - happened less than wal_writer_delay milliseconds ago and - less than wal_writer_flush_after bytes of WAL have been + happened less than wal_writer_delay milliseconds ago and + less than wal_writer_flush_after bytes of WAL have been produced since, then WAL is only written to the operating system, not flushed to disk. - The default value is 200 milliseconds (200ms). Note that + The default value is 200 milliseconds (200ms). Note that on many systems, the effective resolution of sleep delays is 10 - milliseconds; setting wal_writer_delay to a value that is + milliseconds; setting wal_writer_delay to a value that is not a multiple of 10 might have the same results as setting it to the next higher multiple of 10. This parameter can only be set in the - postgresql.conf file or on the server command line. + postgresql.conf file or on the server command line. @@ -2602,19 +2691,19 @@ include_dir 'conf.d' wal_writer_flush_after (integer) - wal_writer_flush_after configuration parameter + wal_writer_flush_after configuration parameter Specifies how often the WAL writer flushes WAL. If the last flush - happened less than wal_writer_delay milliseconds ago and - less than wal_writer_flush_after bytes of WAL have been + happened less than wal_writer_delay milliseconds ago and + less than wal_writer_flush_after bytes of WAL have been produced since, then WAL is only written to the operating system, not - flushed to disk. If wal_writer_flush_after is set - to 0 then WAL data is flushed immediately. The default is + flushed to disk. If wal_writer_flush_after is set + to 0 then WAL data is flushed immediately. The default is 1MB. This parameter can only be set in the - postgresql.conf file or on the server command line. + postgresql.conf file or on the server command line. @@ -2622,7 +2711,7 @@ include_dir 'conf.d' commit_delay (integer) - commit_delay configuration parameter + commit_delay configuration parameter @@ -2639,15 +2728,15 @@ include_dir 'conf.d' commit_siblings other transactions are active when a flush is about to be initiated. Also, no delays are performed if fsync is disabled. - The default commit_delay is zero (no delay). + The default commit_delay is zero (no delay). Only superusers can change this setting. - In PostgreSQL releases prior to 9.3, + In PostgreSQL releases prior to 9.3, commit_delay behaved differently and was much less effective: it affected only commits, rather than all WAL flushes, and waited for the entire configured delay even if the WAL flush - was completed sooner. Beginning in PostgreSQL 9.3, + was completed sooner. Beginning in PostgreSQL 9.3, the first process that becomes ready to flush waits for the configured interval, while subsequent processes wait only until the leader completes the flush operation. @@ -2658,13 +2747,13 @@ include_dir 'conf.d' commit_siblings (integer) - commit_siblings configuration parameter + commit_siblings configuration parameter Minimum number of concurrent open transactions to require - before performing the commit_delay delay. A larger + before performing the commit_delay delay. A larger value makes it more probable that at least one other transaction will become ready to commit during the delay interval. The default is five transactions. @@ -2681,17 +2770,17 @@ include_dir 'conf.d' checkpoint_timeout (integer) - checkpoint_timeout configuration parameter + checkpoint_timeout configuration parameter Maximum time between automatic WAL checkpoints, in seconds. The valid range is between 30 seconds and one day. - The default is five minutes (5min). + The default is five minutes (5min). Increasing this parameter can increase the amount of time needed for crash recovery. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2700,14 +2789,14 @@ include_dir 'conf.d' checkpoint_completion_target (floating point) - checkpoint_completion_target configuration parameter + checkpoint_completion_target configuration parameter Specifies the target of checkpoint completion, as a fraction of total time between checkpoints. The default is 0.5. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2716,7 +2805,7 @@ include_dir 'conf.d' checkpoint_flush_after (integer) - checkpoint_flush_after configuration parameter + checkpoint_flush_after configuration parameter @@ -2729,14 +2818,14 @@ include_dir 'conf.d' checkpoint, or when the OS writes data back in larger batches in the background. Often that will result in greatly reduced transaction latency, but there also are some cases, especially with workloads - that are bigger than , but smaller + that are bigger than , but smaller than the OS's page cache, where performance might degrade. This setting may have no effect on some platforms. The valid range is between 0, which disables forced writeback, - and 2MB. The default is 256kB on - Linux, 0 elsewhere. (If BLCKSZ is not + and 2MB. The default is 256kB on + Linux, 0 elsewhere. (If BLCKSZ is not 8kB, the default and maximum values scale proportionally to it.) - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2745,19 +2834,19 @@ include_dir 'conf.d' checkpoint_warning (integer) - checkpoint_warning configuration parameter + checkpoint_warning configuration parameter Write a message to the server log if checkpoints caused by - the filling of checkpoint segment files happen closer together + the filling of WAL segment files happen closer together than this many seconds (which suggests that - max_wal_size ought to be raised). The default is - 30 seconds (30s). Zero disables the warning. + max_wal_size ought to be raised). The default is + 30 seconds (30s). Zero disables the warning. No warnings will be generated if checkpoint_timeout is less than checkpoint_warning. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2766,19 +2855,19 @@ include_dir 'conf.d' max_wal_size (integer) - max_wal_size configuration parameter + max_wal_size configuration parameter Maximum size to let the WAL grow to between automatic WAL checkpoints. This is a soft limit; WAL size can exceed - max_wal_size under special circumstances, like - under heavy load, a failing archive_command, or a high - wal_keep_segments setting. The default is 1 GB. + max_wal_size under special circumstances, like + under heavy load, a failing archive_command, or a high + wal_keep_segments setting. The default is 1 GB. Increasing this parameter can increase the amount of time needed for crash recovery. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2787,7 +2876,7 @@ include_dir 'conf.d' min_wal_size (integer) - min_wal_size configuration parameter + min_wal_size configuration parameter @@ -2797,7 +2886,7 @@ include_dir 'conf.d' This can be used to ensure that enough WAL space is reserved to handle spikes in WAL usage, for example when running large batch jobs. The default is 80 MB. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2812,29 +2901,29 @@ include_dir 'conf.d' archive_mode (enum) - archive_mode configuration parameter + archive_mode configuration parameter - When archive_mode is enabled, completed WAL segments + When archive_mode is enabled, completed WAL segments are sent to archive storage by setting - . In addition to off, - to disable, there are two modes: on, and - always. During normal operation, there is no - difference between the two modes, but when set to always + . In addition to off, + to disable, there are two modes: on, and + always. During normal operation, there is no + difference between the two modes, but when set to always the WAL archiver is enabled also during archive recovery or standby - mode. In always mode, all files restored from the archive + mode. In always mode, all files restored from the archive or streamed with streaming replication will be archived (again). See - for details. + for details. - archive_mode and archive_command are - separate variables so that archive_command can be + archive_mode and archive_command are + separate variables so that archive_command can be changed without leaving archiving mode. This parameter can only be set at server start. - archive_mode cannot be enabled when - wal_level is set to minimal. + archive_mode cannot be enabled when + wal_level is set to minimal. @@ -2842,32 +2931,32 @@ include_dir 'conf.d' archive_command (string) - archive_command configuration parameter + archive_command configuration parameter The local shell command to execute to archive a completed WAL file - segment. Any %p in the string is + segment. Any %p in the string is replaced by the path name of the file to archive, and any - %f is replaced by only the file name. + %f is replaced by only the file name. (The path name is relative to the working directory of the server, i.e., the cluster's data directory.) - Use %% to embed an actual % character in the + Use %% to embed an actual % character in the command. It is important for the command to return a zero exit status only if it succeeds. For more information see - . + . - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. It is ignored unless - archive_mode was enabled at server start. - If archive_command is an empty string (the default) while - archive_mode is enabled, WAL archiving is temporarily + archive_mode was enabled at server start. + If archive_command is an empty string (the default) while + archive_mode is enabled, WAL archiving is temporarily disabled, but the server continues to accumulate WAL segment files in the expectation that a command will soon be provided. Setting - archive_command to a command that does nothing but - return true, e.g. /bin/true (REM on + archive_command to a command that does nothing but + return true, e.g. /bin/true (REM on Windows), effectively disables archiving, but also breaks the chain of WAL files needed for archive recovery, so it should only be used in unusual circumstances. @@ -2878,17 +2967,17 @@ include_dir 'conf.d' archive_timeout (integer) - archive_timeout configuration parameter + archive_timeout configuration parameter - The is only invoked for + The is only invoked for completed WAL segments. Hence, if your server generates little WAL traffic (or has slack periods where it does so), there could be a long delay between the completion of a transaction and its safe recording in archive storage. To limit how old unarchived - data can be, you can set archive_timeout to force the + data can be, you can set archive_timeout to force the server to switch to a new WAL segment file periodically. When this parameter is greater than zero, the server will switch to a new segment file whenever this many seconds have elapsed since the last @@ -2897,13 +2986,13 @@ include_dir 'conf.d' no database activity). Note that archived files that are closed early due to a forced switch are still the same length as completely full files. Therefore, it is unwise to use a very short - archive_timeout — it will bloat your archive - storage. archive_timeout settings of a minute or so are + archive_timeout — it will bloat your archive + storage. archive_timeout settings of a minute or so are usually reasonable. You should consider using streaming replication, instead of archiving, if you want data to be copied off the master server more quickly than that. This parameter can only be set in the - postgresql.conf file or on the server command line. + postgresql.conf file or on the server command line. @@ -2918,19 +3007,19 @@ include_dir 'conf.d' These settings control the behavior of the built-in - streaming replication feature (see - ). Servers will be either a - Master or a Standby server. Masters can send data, while Standby(s) + streaming replication feature (see + ). Servers will be either a + master or a standby server. Masters can send data, while standbys are always receivers of replicated data. When cascading replication - (see ) is used, Standby server(s) + (see ) is used, standby servers can also be senders, as well as receivers. - Parameters are mainly for Sending and Standby servers, though some - parameters have meaning only on the Master server. Settings may vary + Parameters are mainly for sending and standby servers, though some + parameters have meaning only on the master server. Settings may vary across the cluster without problems if that is required. - Sending Server(s) + Sending Servers These parameters can be set on any server that is @@ -2945,7 +3034,7 @@ include_dir 'conf.d' max_wal_senders (integer) - max_wal_senders configuration parameter + max_wal_senders configuration parameter @@ -2955,14 +3044,17 @@ include_dir 'conf.d' maximum number of simultaneously running WAL sender processes). The default is 10. The value 0 means replication is disabled. WAL sender processes count towards the total number - of connections, so the parameter cannot be set higher than - . Abrupt streaming client - disconnection might cause an orphaned connection slot until + of connections, so this parameter's value must be less than + minus + . + Abrupt streaming client disconnection might leave an orphaned + connection slot behind until a timeout is reached, so this parameter should be set slightly higher than the maximum number of expected clients so disconnected clients can immediately reconnect. This parameter can only - be set at server start. wal_level must be set to - replica or higher to allow connections from standby + be set at server start. + Also, wal_level must be set to + replica or higher to allow connections from standby servers. @@ -2971,19 +3063,20 @@ include_dir 'conf.d' max_replication_slots (integer) - max_replication_slots configuration parameter + max_replication_slots configuration parameter Specifies the maximum number of replication slots - (see ) that the server + (see ) that the server can support. The default is 10. This parameter can only be set at server start. - wal_level must be set - to replica or higher to allow replication slots to - be used. Setting it to a lower value than the number of currently + Setting it to a lower value than the number of currently existing replication slots will prevent the server from starting. + Also, wal_level must be set + to replica or higher to allow replication slots to + be used. @@ -2991,17 +3084,17 @@ include_dir 'conf.d' wal_keep_segments (integer) - wal_keep_segments configuration parameter + wal_keep_segments configuration parameter Specifies the minimum number of past log file segments kept in the - pg_wal + pg_wal directory, in case a standby server needs to fetch them for streaming replication. Each segment is normally 16 megabytes. If a standby server connected to the sending server falls behind by more than - wal_keep_segments segments, the sending server might remove + wal_keep_segments segments, the sending server might remove a WAL segment still needed by the standby, in which case the replication connection will be terminated. Downstream connections will also eventually fail as a result. (However, the standby @@ -3011,15 +3104,15 @@ include_dir 'conf.d' This sets only the minimum number of segments retained in - pg_wal; the system might need to retain more segments + pg_wal; the system might need to retain more segments for WAL archival or to recover from a checkpoint. If - wal_keep_segments is zero (the default), the system + wal_keep_segments is zero (the default), the system doesn't keep any extra segments for standby purposes, so the number of old WAL segments available to standby servers is a function of the location of the previous checkpoint and status of WAL archiving. This parameter can only be set in the - postgresql.conf file or on the server command line. + postgresql.conf file or on the server command line. @@ -3027,7 +3120,7 @@ include_dir 'conf.d' wal_sender_timeout (integer) - wal_sender_timeout configuration parameter + wal_sender_timeout configuration parameter @@ -3035,10 +3128,14 @@ include_dir 'conf.d' Terminate replication connections that are inactive longer than the specified number of milliseconds. This is useful for the sending server to detect a standby crash or network outage. - A value of zero disables the timeout mechanism. This parameter - can only be set in - the postgresql.conf file or on the server command line. - The default value is 60 seconds. + A value of zero disables the timeout mechanism. The default value + is 60 seconds. With a cluster distributed across multiple geographic + locations, using different values per location brings more flexibility + in the cluster management. A smaller value is useful for faster + failure detection with a standby having a low-latency network + connection, and a larger value helps in judging better the health + of a standby if located on a remote location, with a high-latency + network connection. @@ -3046,13 +3143,13 @@ include_dir 'conf.d' track_commit_timestamp (boolean) - track_commit_timestamp configuration parameter + track_commit_timestamp configuration parameter Record commit time of transactions. This parameter - can only be set in postgresql.conf file or on the server + can only be set in postgresql.conf file or on the server command line. The default value is off. @@ -3068,9 +3165,9 @@ include_dir 'conf.d' These parameters can be set on the master/primary server that is to send replication data to one or more standby servers. Note that in addition to these parameters, - must be set appropriately on the master + must be set appropriately on the master server, and optionally WAL archiving can be enabled as - well (see ). + well (see ). The values of these parameters on standby servers are irrelevant, although you may wish to set them there in preparation for the possibility of a standby becoming the master. @@ -3081,31 +3178,31 @@ include_dir 'conf.d' synchronous_standby_names (string) - synchronous_standby_names configuration parameter + synchronous_standby_names configuration parameter Specifies a list of standby servers that can support - synchronous replication, as described in - . + synchronous replication, as described in + . There will be one or more active synchronous standbys; transactions waiting for commit will be allowed to proceed after these standby servers confirm receipt of their data. The synchronous standbys will be those whose names appear in this list, and that are both currently connected and streaming data in real-time - (as shown by a state of streaming in the - - pg_stat_replication view). + (as shown by a state of streaming in the pg_stat_replication + view). Specifying more than one synchronous standby can allow for very high availability and protection against data loss. The name of a standby server for this purpose is the - application_name setting of the standby, as set in the + application_name setting of the standby, as set in the standby's connection information. In case of a physical replication - standby, this should be set in the primary_conninfo + standby, this should be set in the primary_conninfo setting in recovery.conf; the default is walreceiver. For logical replication, this can be set in the connection information of the subscription, and it @@ -3125,54 +3222,54 @@ ANY num_sync ( standby_name is the name of a standby server. - FIRST and ANY specify the method to choose + FIRST and ANY specify the method to choose synchronous standbys from the listed servers. - The keyword FIRST, coupled with + The keyword FIRST, coupled with num_sync, specifies a priority-based synchronous replication and makes transaction commits wait until their WAL records are replicated to num_sync synchronous standbys chosen based on their priorities. For example, a setting of - FIRST 3 (s1, s2, s3, s4) will cause each commit to wait for + FIRST 3 (s1, s2, s3, s4) will cause each commit to wait for replies from three higher-priority standbys chosen from standby servers - s1, s2, s3 and s4. + s1, s2, s3 and s4. The standbys whose names appear earlier in the list are given higher priority and will be considered as synchronous. Other standby servers appearing later in this list represent potential synchronous standbys. If any of the current synchronous standbys disconnects for whatever reason, it will be replaced immediately with the next-highest-priority - standby. The keyword FIRST is optional. + standby. The keyword FIRST is optional. - The keyword ANY, coupled with + The keyword ANY, coupled with num_sync, specifies a quorum-based synchronous replication and makes transaction commits - wait until their WAL records are replicated to at least + wait until their WAL records are replicated to at least num_sync listed standbys. - For example, a setting of ANY 3 (s1, s2, s3, s4) will cause + For example, a setting of ANY 3 (s1, s2, s3, s4) will cause each commit to proceed as soon as at least any three standbys of - s1, s2, s3 and s4 + s1, s2, s3 and s4 reply. - FIRST and ANY are case-insensitive. If these + FIRST and ANY are case-insensitive. If these keywords are used as the name of a standby server, its standby_name must be double-quoted. - The third syntax was used before PostgreSQL + The third syntax was used before PostgreSQL version 9.6 and is still supported. It's the same as the first syntax - with FIRST and + with FIRST and num_sync equal to 1. - For example, FIRST 1 (s1, s2) and s1, s2 have - the same meaning: either s1 or s2 is chosen + For example, FIRST 1 (s1, s2) and s1, s2 have + the same meaning: either s1 or s2 is chosen as a synchronous standby. - The special entry * matches any standby name. + The special entry * matches any standby name. There is no mechanism to enforce uniqueness of standby names. In case @@ -3183,7 +3280,7 @@ ANY num_sync ( standby_name should have the form of a valid SQL identifier, unless it - is *. You can use double-quoting if necessary. But note + is *. You can use double-quoting if necessary. But note that standby_names are compared to standby application names case-insensitively, whether double-quoted or not. @@ -3195,11 +3292,11 @@ ANY num_sync ( parameter to - local or off. + parameter to + local or off. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -3208,33 +3305,33 @@ ANY num_sync ( vacuum_defer_cleanup_age (integer) - vacuum_defer_cleanup_age configuration parameter + vacuum_defer_cleanup_age configuration parameter - Specifies the number of transactions by which VACUUM and - HOT updates will defer cleanup of dead row versions. The + Specifies the number of transactions by which VACUUM and + HOT updates will defer cleanup of dead row versions. The default is zero transactions, meaning that dead row versions can be removed as soon as possible, that is, as soon as they are no longer visible to any open transaction. You may wish to set this to a non-zero value on a primary server that is supporting hot standby - servers, as described in . This allows + servers, as described in . This allows more time for queries on the standby to complete without incurring conflicts due to early cleanup of rows. However, since the value is measured in terms of number of write transactions occurring on the primary server, it is difficult to predict just how much additional grace time will be made available to standby queries. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - You should also consider setting hot_standby_feedback + You should also consider setting hot_standby_feedback on standby server(s) as an alternative to using this parameter. This does not prevent cleanup of dead rows which have reached the age - specified by old_snapshot_threshold. + specified by old_snapshot_threshold. @@ -3256,13 +3353,13 @@ ANY num_sync ( hot_standby (boolean) - hot_standby configuration parameter + hot_standby configuration parameter Specifies whether or not you can connect and run queries during - recovery, as described in . + recovery, as described in . The default value is on. This parameter can only be set at server start. It only has effect during archive recovery or in standby mode. @@ -3273,7 +3370,7 @@ ANY num_sync ( max_standby_archive_delay (integer) - max_standby_archive_delay configuration parameter + max_standby_archive_delay configuration parameter @@ -3281,17 +3378,17 @@ ANY num_sync ( . - max_standby_archive_delay applies when WAL data is + . + max_standby_archive_delay applies when WAL data is being read from WAL archive (and is therefore not current). The default is 30 seconds. Units are milliseconds if not specified. A value of -1 allows the standby to wait forever for conflicting queries to complete. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - Note that max_standby_archive_delay is not the same as the + Note that max_standby_archive_delay is not the same as the maximum length of time a query can run before cancellation; rather it is the maximum total time allowed to apply any one WAL segment's data. Thus, if one query has resulted in significant delay earlier in the @@ -3304,7 +3401,7 @@ ANY num_sync ( max_standby_streaming_delay (integer) - max_standby_streaming_delay configuration parameter + max_standby_streaming_delay configuration parameter @@ -3312,17 +3409,17 @@ ANY num_sync ( . - max_standby_streaming_delay applies when WAL data is + . + max_standby_streaming_delay applies when WAL data is being received via streaming replication. The default is 30 seconds. Units are milliseconds if not specified. A value of -1 allows the standby to wait forever for conflicting queries to complete. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - Note that max_standby_streaming_delay is not the same as + Note that max_standby_streaming_delay is not the same as the maximum length of time a query can run before cancellation; rather it is the maximum total time allowed to apply WAL data once it has been received from the primary server. Thus, if one query has @@ -3336,7 +3433,7 @@ ANY num_sync ( wal_receiver_status_interval (integer) - wal_receiver_status_interval configuration parameter + wal_receiver_status_interval configuration parameter @@ -3344,8 +3441,9 @@ ANY num_sync ( - pg_stat_replication view. The standby will report + pg_stat_replication + view. The standby will report the last write-ahead log location it has written, the last position it has flushed to disk, and the last position it has applied. This parameter's @@ -3354,7 +3452,7 @@ ANY num_sync ( num_sync ( hot_standby_feedback (boolean) - hot_standby_feedback configuration parameter + hot_standby_feedback configuration parameter @@ -3374,9 +3472,9 @@ ANY num_sync ( ( num_sync ( wal_receiver_timeout (integer) - wal_receiver_timeout configuration parameter + wal_receiver_timeout configuration parameter @@ -3410,7 +3508,7 @@ ANY num_sync ( num_sync ( wal_retrieve_retry_interval (integer) - wal_retrieve_retry_interval configuration parameter + wal_retrieve_retry_interval configuration parameter Specify how long the standby server should wait when WAL data is not available from any sources (streaming replication, - local pg_wal or WAL archive) before retrying to + local pg_wal or WAL archive) before retrying to retrieve WAL data. This parameter can only be set in the - postgresql.conf file or on the server command line. + postgresql.conf file or on the server command line. The default value is 5 seconds. Units are milliseconds if not specified. @@ -3457,7 +3555,8 @@ ANY num_sync ( num_sync ( max_logical_replication_workers (int) - max_logical_replication_workers configuration parameter + max_logical_replication_workers configuration parameter @@ -3488,7 +3587,7 @@ ANY num_sync ( max_sync_workers_per_subscription (integer) - max_sync_workers_per_subscription configuration parameter + max_sync_workers_per_subscription configuration parameter @@ -3525,15 +3624,15 @@ ANY num_sync ( ), - running manually, increasing + constants (see ), + running manually, increasing the value of the configuration parameter, + linkend="guc-default-statistics-target"/> configuration parameter, and increasing the amount of statistics collected for specific columns using ALTER TABLE SET STATISTICS. @@ -3546,13 +3645,13 @@ ANY num_sync ( num_sync ( enable_gathermerge (boolean) - enable_gathermerge configuration parameter + enable_gathermerge configuration parameter Enables or disables the query planner's use of gather - merge plan types. The default is on. + merge plan types. The default is on. @@ -3574,13 +3673,13 @@ ANY num_sync ( enable_hashagg (boolean) - enable_hashagg configuration parameter + enable_hashagg configuration parameter Enables or disables the query planner's use of hashed - aggregation plan types. The default is on. + aggregation plan types. The default is on. @@ -3588,13 +3687,13 @@ ANY num_sync ( enable_hashjoin (boolean) - enable_hashjoin configuration parameter + enable_hashjoin configuration parameter Enables or disables the query planner's use of hash-join plan - types. The default is on. + types. The default is on. @@ -3605,13 +3704,13 @@ ANY num_sync ( num_sync ( enable_indexonlyscan (boolean) - enable_indexonlyscan configuration parameter + enable_indexonlyscan configuration parameter Enables or disables the query planner's use of index-only-scan plan - types (see ). - The default is on. + types (see ). + The default is on. @@ -3634,7 +3733,7 @@ ANY num_sync ( enable_material (boolean) - enable_material configuration parameter + enable_material configuration parameter @@ -3643,7 +3742,7 @@ ANY num_sync ( num_sync ( enable_mergejoin (boolean) - enable_mergejoin configuration parameter + enable_mergejoin configuration parameter Enables or disables the query planner's use of merge-join plan - types. The default is on. + types. The default is on. @@ -3665,7 +3764,7 @@ ANY num_sync ( enable_nestloop (boolean) - enable_nestloop configuration parameter + enable_nestloop configuration parameter @@ -3674,7 +3773,94 @@ ANY num_sync ( + enable_parallel_append (boolean) + + enable_parallel_append configuration parameter + + + + + Enables or disables the query planner's use of parallel-aware + append plan types. The default is on. + + + + + + enable_parallel_hash (boolean) + + enable_parallel_hash configuration parameter + + + + + Enables or disables the query planner's use of hash-join plan + types with parallel hash. Has no effect if hash-join plans are not + also enabled. The default is on. + + + + + + enable_partition_pruning (boolean) + + enable_partition_pruning configuration parameter + + + + + Enables or disables the query planner's ability to eliminate a + partitioned table's partitions from query plans. This also controls + the planner's ability to generate query plans which allow the query + executor to remove (ignore) partitions during query execution. The + default is on. + See for details. + + + + + + enable_partitionwise_join (boolean) + + enable_partitionwise_join configuration parameter + + + + + Enables or disables the query planner's use of partitionwise join, + which allows a join between partitioned tables to be performed by + joining the matching partitions. Partitionwise join currently applies + only when the join conditions include all the partition keys, which + must be of the same data type and have exactly matching sets of child + partitions. Because partitionwise join planning can use significantly + more CPU time and memory during planning, the default is + off. + + + + + + enable_partitionwise_aggregate (boolean) + + enable_partitionwise_aggregate configuration parameter + + + + + Enables or disables the query planner's use of partitionwise grouping + or aggregation, which allows grouping or aggregation on a partitioned + tables performed separately for each partition. If the GROUP + BY clause does not include the partition keys, only partial + aggregation can be performed on a per-partition basis, and + finalization must be performed later. Because partitionwise grouping + or aggregation can use significantly more CPU time and memory during + planning, the default is off. @@ -3685,7 +3871,7 @@ ANY num_sync ( num_sync ( num_sync ( enable_sort (boolean) - enable_sort configuration parameter + enable_sort configuration parameter @@ -3711,7 +3897,7 @@ ANY num_sync ( num_sync ( enable_tidscan (boolean) - enable_tidscan configuration parameter + enable_tidscan configuration parameter - Enables or disables the query planner's use of TID - scan plan types. The default is on. + Enables or disables the query planner's use of TID + scan plan types. The default is on. @@ -3736,12 +3922,12 @@ ANY num_sync ( num_sync ( seq_page_cost (floating point) - seq_page_cost configuration parameter + seq_page_cost configuration parameter @@ -3771,7 +3957,7 @@ ANY num_sync ( ). + (see ). @@ -3779,7 +3965,7 @@ ANY num_sync ( random_page_cost (floating point) - random_page_cost configuration parameter + random_page_cost configuration parameter @@ -3788,11 +3974,11 @@ ANY num_sync ( ). + (see ). - Reducing this value relative to seq_page_cost + Reducing this value relative to seq_page_cost will cause the system to prefer index scans; raising it will make index scans look relatively more expensive. You can raise or lower both values together to change the importance of disk I/O @@ -3822,8 +4008,8 @@ ANY num_sync ( num_sync ( cpu_tuple_cost (floating point) - cpu_tuple_cost configuration parameter + cpu_tuple_cost configuration parameter @@ -3853,7 +4039,7 @@ ANY num_sync ( cpu_index_tuple_cost (floating point) - cpu_index_tuple_cost configuration parameter + cpu_index_tuple_cost configuration parameter @@ -3868,7 +4054,7 @@ ANY num_sync ( cpu_operator_cost (floating point) - cpu_operator_cost configuration parameter + cpu_operator_cost configuration parameter @@ -3883,7 +4069,7 @@ ANY num_sync ( parallel_setup_cost (floating point) - parallel_setup_cost configuration parameter + parallel_setup_cost configuration parameter @@ -3898,7 +4084,7 @@ ANY num_sync ( parallel_tuple_cost (floating point) - parallel_tuple_cost configuration parameter + parallel_tuple_cost configuration parameter @@ -3913,7 +4099,7 @@ ANY num_sync ( min_parallel_table_scan_size (integer) - min_parallel_table_scan_size configuration parameter + min_parallel_table_scan_size configuration parameter @@ -3923,7 +4109,7 @@ ANY num_sync ( num_sync ( min_parallel_index_scan_size (integer) - min_parallel_index_scan_size configuration parameter + min_parallel_index_scan_size configuration parameter @@ -3940,7 +4126,7 @@ ANY num_sync ( num_sync ( effective_cache_size (integer) - effective_cache_size configuration parameter + effective_cache_size configuration parameter @@ -3961,7 +4147,8 @@ ANY num_sync ( ( + jit_above_cost (floating point) + + jit_above_cost configuration parameter + + + + + Sets the query cost above which JIT compilation is activated, if + enabled (see ). + Performing JIT costs planning time but can + accelerate query execution. + Setting this to -1 disables JIT compilation. + The default is 100000. + + + + + + jit_inline_above_cost (floating point) + + jit_inline_above_cost configuration parameter + + + + + Sets the query cost above which JIT compilation attempts to inline + functions and operators. Inlining adds planning time, but can + improve execution speed. It is not meaningful to set this to less + than jit_above_cost. + Setting this to -1 disables inlining. + The default is 500000. + + + + + + jit_optimize_above_cost (floating point) + + jit_optimize_above_cost configuration parameter + + + + + Sets the query cost above which JIT compilation applies expensive + optimizations. Such optimization adds planning time, but can improve + execution speed. It is not meaningful to set this to less + than jit_above_cost, and it is unlikely to be + beneficial to set it to more + than jit_inline_above_cost. + Setting this to -1 disables expensive optimizations. + The default is 500000. @@ -3986,7 +4229,7 @@ ANY num_sync ( . + For more information see . @@ -4001,7 +4244,7 @@ ANY num_sync ( num_sync ( geqo_threshold (integer) - geqo_threshold configuration parameter + geqo_threshold configuration parameter Use genetic query optimization to plan queries with at least - this many FROM items involved. (Note that a - FULL OUTER JOIN construct counts as only one FROM + this many FROM items involved. (Note that a + FULL OUTER JOIN construct counts as only one FROM item.) The default is 12. For simpler queries it is usually best to use the regular, exhaustive-search planner, but for queries with many tables the exhaustive search takes too long, often @@ -4038,7 +4281,7 @@ ANY num_sync ( geqo_effort (integer) - geqo_effort configuration parameter + geqo_effort configuration parameter @@ -4064,7 +4307,7 @@ ANY num_sync ( geqo_pool_size (integer) - geqo_pool_size configuration parameter + geqo_pool_size configuration parameter @@ -4082,7 +4325,7 @@ ANY num_sync ( geqo_generations (integer) - geqo_generations configuration parameter + geqo_generations configuration parameter @@ -4100,7 +4343,7 @@ ANY num_sync ( geqo_selection_bias (floating point) - geqo_selection_bias configuration parameter + geqo_selection_bias configuration parameter @@ -4115,7 +4358,7 @@ ANY num_sync ( geqo_seed (floating point) - geqo_seed configuration parameter + geqo_seed configuration parameter @@ -4139,18 +4382,18 @@ ANY num_sync ( default_statistics_target (integer) - default_statistics_target configuration parameter + default_statistics_target configuration parameter Sets the default statistics target for table columns without a column-specific target set via ALTER TABLE - SET STATISTICS. Larger values increase the time needed to - do ANALYZE, but might improve the quality of the + SET STATISTICS. Larger values increase the time needed to + do ANALYZE, but might improve the quality of the planner's estimates. The default is 100. For more information - on the use of statistics by the PostgreSQL - query planner, refer to . + on the use of statistics by the PostgreSQL + query planner, refer to . @@ -4161,26 +4404,25 @@ ANY num_sync ( for + Refer to for more information on using constraint exclusion and partitioning. @@ -4215,14 +4457,14 @@ SELECT * FROM parent WHERE key = 2400; cursor_tuple_fraction (floating point) - cursor_tuple_fraction configuration parameter + cursor_tuple_fraction configuration parameter Sets the planner's estimate of the fraction of a cursor's rows that will be retrieved. The default is 0.1. Smaller values of this - setting bias the planner towards using fast start plans + setting bias the planner towards using fast start plans for cursors, which will retrieve the first few rows quickly while perhaps taking a long time to fetch all rows. Larger values put more emphasis on the total estimated time. At the maximum @@ -4236,7 +4478,7 @@ SELECT * FROM parent WHERE key = 2400; from_collapse_limit (integer) - from_collapse_limit configuration parameter + from_collapse_limit configuration parameter @@ -4245,13 +4487,29 @@ SELECT * FROM parent WHERE key = 2400; resulting FROM list would have no more than this many items. Smaller values reduce planning time but might yield inferior query plans. The default is eight. - For more information see . + For more information see . - Setting this value to or more + Setting this value to or more may trigger use of the GEQO planner, resulting in non-optimal - plans. See . + plans. See . + + + + + + jit (boolean) + + jit configuration parameter + + + + + Determines whether JIT compilation may be used by + PostgreSQL, if available (see ). + The default is on. @@ -4259,14 +4517,14 @@ SELECT * FROM parent WHERE key = 2400; join_collapse_limit (integer) - join_collapse_limit configuration parameter + join_collapse_limit configuration parameter - The planner will rewrite explicit JOIN - constructs (except FULL JOINs) into lists of - FROM items whenever a list of no more than this many items + The planner will rewrite explicit JOIN + constructs (except FULL JOINs) into lists of + FROM items whenever a list of no more than this many items would result. Smaller values reduce planning time but might yield inferior query plans. @@ -4275,19 +4533,45 @@ SELECT * FROM parent WHERE key = 2400; By default, this variable is set the same as from_collapse_limit, which is appropriate for most uses. Setting it to 1 prevents any reordering of - explicit JOINs. Thus, the explicit join order + explicit JOINs. Thus, the explicit join order specified in the query will be the actual order in which the relations are joined. Because the query planner does not always choose the optimal join order, advanced users can elect to temporarily set this variable to 1, and then specify the join order they desire explicitly. - For more information see . + For more information see . - Setting this value to or more + Setting this value to or more may trigger use of the GEQO planner, resulting in non-optimal - plans. See . + plans. See . + + + + + + + parallel_leader_participation (boolean) + + + parallel_leader_participation configuration + parameter + + + + + + Allows the leader process to execute the query plan under + Gather and Gather Merge nodes + instead of waiting for worker processes. The default is + on. Setting this value to off + reduces the likelihood that workers will become blocked because the + leader is not reading tuples fast enough, but requires the leader + process to wait for worker processes to start up before the first + tuples can be produced. The degree to which the leader can help or + hinder performance depends on the plan type, number of workers and + query duration. @@ -4295,24 +4579,24 @@ SELECT * FROM parent WHERE key = 2400; force_parallel_mode (enum) - force_parallel_mode configuration parameter + force_parallel_mode configuration parameter Allows the use of parallel queries for testing purposes even in cases where no performance benefit is expected. - The allowed values of force_parallel_mode are - off (use parallel mode only when it is expected to improve - performance), on (force parallel query for all queries - for which it is thought to be safe), and regress (like - on, but with additional behavior changes as explained + The allowed values of force_parallel_mode are + off (use parallel mode only when it is expected to improve + performance), on (force parallel query for all queries + for which it is thought to be safe), and regress (like + on, but with additional behavior changes as explained below). - More specifically, setting this value to on will add - a Gather node to the top of any query plan for which this + More specifically, setting this value to on will add + a Gather node to the top of any query plan for which this appears to be safe, so that the query runs inside of a parallel worker. Even when a parallel worker is not available or cannot be used, operations such as starting a subtransaction that would be prohibited @@ -4324,15 +4608,45 @@ SELECT * FROM parent WHERE key = 2400; - Setting this value to regress has all of the same effects - as setting it to on plus some additional effects that are + Setting this value to regress has all of the same effects + as setting it to on plus some additional effects that are intended to facilitate automated regression testing. Normally, messages from a parallel worker include a context line indicating that, - but a setting of regress suppresses this line so that the + but a setting of regress suppresses this line so that the output is the same as in non-parallel execution. Also, - the Gather nodes added to plans by this setting are hidden - in EXPLAIN output so that the output matches what - would be obtained if this setting were turned off. + the Gather nodes added to plans by this setting are hidden + in EXPLAIN output so that the output matches what + would be obtained if this setting were turned off. + + + + + + plan_cache_mode (enum) + + plan_cache_mode configuration parameter + + + + + Prepared statements (either explicitly prepared or implicitly + generated, for example in PL/pgSQL) can be executed using custom or + generic plans. A custom plan is replanned for a new parameter value, + a generic plan is reused for repeated executions of the prepared + statement. The choice between them is normally made automatically. + This setting overrides the default behavior and forces either a custom + or a generic plan. This can be used to work around performance + problems in specific cases. Note, however, that the plan cache + behavior is subject to change, so this setting, like all settings that + force the planner's hand, should be reevaluated regularly. + + + + The allowed values are auto, + force_custom_plan and + force_generic_plan. The default value is + auto. The setting is applied when a cached plan is + to be executed, not when it is prepared. @@ -4365,7 +4679,7 @@ SELECT * FROM parent WHERE key = 2400; log_destination (string) - log_destination configuration parameter + log_destination configuration parameter @@ -4378,22 +4692,22 @@ SELECT * FROM parent WHERE key = 2400; parameter to a list of desired log destinations separated by commas. The default is to log to stderr only. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - If csvlog is included in log_destination, + If csvlog is included in log_destination, log entries are output in comma separated - value (CSV) format, which is convenient for + value (CSV) format, which is convenient for loading logs into programs. - See for details. - must be enabled to generate + See for details. + must be enabled to generate CSV-format log output. When either stderr or csvlog are included, the file - current_logfiles is created to record the location + current_logfiles is created to record the location of the log file(s) currently in use by the logging collector and the associated logging destination. This provides a convenient way to find the logs currently in use by the instance. Here is an example of @@ -4405,10 +4719,10 @@ csvlog log/postgresql.csv current_logfiles is recreated when a new log file is created as an effect of rotation, and - when log_destination is reloaded. It is removed when + when log_destination is reloaded. It is removed when neither stderr nor csvlog are included - in log_destination, and when the logging collector is + in log_destination, and when the logging collector is disabled. @@ -4417,10 +4731,10 @@ csvlog log/postgresql.csv On most Unix systems, you will need to alter the configuration of your system's syslog daemon in order to make use of the syslog option for - log_destination. PostgreSQL + log_destination. PostgreSQL can log to syslog facilities - LOCAL0 through LOCAL7 (see ), but the default + LOCAL0 through LOCAL7 (see ), but the default syslog configuration on most platforms will discard all such messages. You will need to add something like: @@ -4431,11 +4745,11 @@ local0.* /var/log/postgresql On Windows, when you use the eventlog - option for log_destination, you should + option for log_destination, you should register an event source and its library with the operating system so that the Windows Event Viewer can display event log messages cleanly. - See for details. + See for details. @@ -4444,27 +4758,27 @@ local0.* /var/log/postgresql logging_collector (boolean) - logging_collector configuration parameter + logging_collector configuration parameter - This parameter enables the logging collector, which + This parameter enables the logging collector, which is a background process that captures log messages - sent to stderr and redirects them into log files. + sent to stderr and redirects them into log files. This approach is often more useful than - logging to syslog, since some types of messages - might not appear in syslog output. (One common + logging to syslog, since some types of messages + might not appear in syslog output. (One common example is dynamic-linker failure messages; another is error messages - produced by scripts such as archive_command.) + produced by scripts such as archive_command.) This parameter can only be set at server start. - It is possible to log to stderr without using the + It is possible to log to stderr without using the logging collector; the log messages will just go to wherever the - server's stderr is directed. However, that method is + server's stderr is directed. However, that method is only suitable for low log volumes, since it provides no convenient way to rotate log files. Also, on some platforms not using the logging collector can result in lost or garbled log output, because @@ -4478,7 +4792,7 @@ local0.* /var/log/postgresql The logging collector is designed to never lose messages. This means that in case of extremely high load, server processes could be blocked while trying to send additional log messages when the - collector has fallen behind. In contrast, syslog + collector has fallen behind. In contrast, syslog prefers to drop messages if it cannot write them, which means it may fail to log some messages in such cases but it will not block the rest of the system. @@ -4491,16 +4805,16 @@ local0.* /var/log/postgresql log_directory (string) - log_directory configuration parameter + log_directory configuration parameter - When logging_collector is enabled, + When logging_collector is enabled, this parameter determines the directory in which log files will be created. It can be specified as an absolute path, or relative to the cluster data directory. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is log. @@ -4510,7 +4824,7 @@ local0.* /var/log/postgresql log_filename (string) - log_filename configuration parameter + log_filename configuration parameter @@ -4522,7 +4836,7 @@ local0.* /var/log/postgresql file names. (Note that if there are any time-zone-dependent %-escapes, the computation is done in the zone specified - by .) + by .) The supported %-escapes are similar to those listed in the Open Group's strftime @@ -4541,14 +4855,14 @@ local0.* /var/log/postgresql longer the case. - If CSV-format output is enabled in log_destination, - .csv will be appended to the timestamped + If CSV-format output is enabled in log_destination, + .csv will be appended to the timestamped log file name to create the file name for CSV-format output. - (If log_filename ends in .log, the suffix is + (If log_filename ends in .log, the suffix is replaced instead.) - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4557,7 +4871,7 @@ local0.* /var/log/postgresql log_file_mode (integer) - log_file_mode configuration parameter + log_file_mode configuration parameter @@ -4572,17 +4886,17 @@ local0.* /var/log/postgresql must start with a 0 (zero).) - The default permissions are 0600, meaning only the + The default permissions are 0600, meaning only the server owner can read or write the log files. The other commonly - useful setting is 0640, allowing members of the owner's + useful setting is 0640, allowing members of the owner's group to read the files. Note however that to make use of such a - setting, you'll need to alter to + setting, you'll need to alter to store the files somewhere outside the cluster data directory. In any case, it's unwise to make the log files world-readable, since they might contain sensitive data. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4591,7 +4905,7 @@ local0.* /var/log/postgresql log_rotation_age (integer) - log_rotation_age configuration parameter + log_rotation_age configuration parameter @@ -4601,7 +4915,7 @@ local0.* /var/log/postgresql After this many minutes have elapsed, a new log file will be created. Set to zero to disable time-based creation of new log files. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4610,7 +4924,7 @@ local0.* /var/log/postgresql log_rotation_size (integer) - log_rotation_size configuration parameter + log_rotation_size configuration parameter @@ -4620,7 +4934,7 @@ local0.* /var/log/postgresql After this many kilobytes have been emitted into a log file, a new log file will be created. Set to zero to disable size-based creation of new log files. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4629,7 +4943,7 @@ local0.* /var/log/postgresql log_truncate_on_rotation (boolean) - log_truncate_on_rotation configuration parameter + log_truncate_on_rotation configuration parameter @@ -4644,7 +4958,7 @@ local0.* /var/log/postgresql a log_filename like postgresql-%H.log would result in generating twenty-four hourly log files and then cyclically overwriting them. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4662,7 +4976,7 @@ local0.* /var/log/postgresql log_truncate_on_rotation to on, log_rotation_age to 60, and log_rotation_size to 1000000. - Including %M in log_filename allows + Including %M in log_filename allows any size-driven rotations that might occur to select a file name different from the hour's initial file name. @@ -4672,21 +4986,21 @@ local0.* /var/log/postgresql syslog_facility (enum) - syslog_facility configuration parameter + syslog_facility configuration parameter - When logging to syslog is enabled, this parameter + When logging to syslog is enabled, this parameter determines the syslog facility to be used. You can choose - from LOCAL0, LOCAL1, - LOCAL2, LOCAL3, LOCAL4, - LOCAL5, LOCAL6, LOCAL7; - the default is LOCAL0. See also the + from LOCAL0, LOCAL1, + LOCAL2, LOCAL3, LOCAL4, + LOCAL5, LOCAL6, LOCAL7; + the default is LOCAL0. See also the documentation of your system's syslog daemon. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4695,17 +5009,17 @@ local0.* /var/log/postgresql syslog_ident (string) - syslog_ident configuration parameter + syslog_ident configuration parameter - When logging to syslog is enabled, this parameter + When logging to syslog is enabled, this parameter determines the program name used to identify PostgreSQL messages in syslog logs. The default is postgres. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4714,7 +5028,7 @@ local0.* /var/log/postgresql syslog_sequence_numbers (boolean) - syslog_sequence_numbers configuration parameter + syslog_sequence_numbers configuration parameter @@ -4733,7 +5047,7 @@ local0.* /var/log/postgresql - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4742,12 +5056,12 @@ local0.* /var/log/postgresql syslog_split_messages (boolean) - syslog_split_messages configuration parameter + syslog_split_messages configuration parameter - When logging to syslog is enabled, this parameter + When logging to syslog is enabled, this parameter determines how messages are delivered to syslog. When on (the default), messages are split by lines, and long lines are split so that they will fit into 1024 bytes, which is a typical size limit for @@ -4766,7 +5080,7 @@ local0.* /var/log/postgresql - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4775,16 +5089,16 @@ local0.* /var/log/postgresql event_source (string) - event_source configuration parameter + event_source configuration parameter - When logging to event log is enabled, this parameter + When logging to event log is enabled, this parameter determines the program name used to identify PostgreSQL messages in the log. The default is PostgreSQL. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4797,46 +5111,24 @@ local0.* /var/log/postgresql - - client_min_messages (enum) - - client_min_messages configuration parameter - - - - - Controls which message levels are sent to the client. - Valid values are DEBUG5, - DEBUG4, DEBUG3, DEBUG2, - DEBUG1, LOG, NOTICE, - WARNING, ERROR, FATAL, - and PANIC. Each level - includes all the levels that follow it. The later the level, - the fewer messages are sent. The default is - NOTICE. Note that LOG has a different - rank here than in log_min_messages. - - - - log_min_messages (enum) - log_min_messages configuration parameter + log_min_messages configuration parameter Controls which message levels are written to the server log. - Valid values are DEBUG5, DEBUG4, - DEBUG3, DEBUG2, DEBUG1, - INFO, NOTICE, WARNING, - ERROR, LOG, FATAL, and - PANIC. Each level includes all the levels that + Valid values are DEBUG5, DEBUG4, + DEBUG3, DEBUG2, DEBUG1, + INFO, NOTICE, WARNING, + ERROR, LOG, FATAL, and + PANIC. Each level includes all the levels that follow it. The later the level, the fewer messages are sent - to the log. The default is WARNING. Note that - LOG has a different rank here than in - client_min_messages. + to the log. The default is WARNING. Note that + LOG has a different rank here than in + . Only superusers can change this setting. @@ -4845,7 +5137,7 @@ local0.* /var/log/postgresql log_min_error_statement (enum) - log_min_error_statement configuration parameter + log_min_error_statement configuration parameter @@ -4873,7 +5165,7 @@ local0.* /var/log/postgresql log_min_duration_statement (integer) - log_min_duration_statement configuration parameter + log_min_duration_statement configuration parameter @@ -4897,13 +5189,13 @@ local0.* /var/log/postgresql When using this option together with - , + , the text of statements that are logged because of - log_statement will not be repeated in the + log_statement will not be repeated in the duration log message. - If you are not using syslog, it is recommended + If you are not using syslog, it is recommended that you log the PID or session ID using - + so that you can link the statement message to the later duration message using the process ID or session ID. @@ -4914,8 +5206,8 @@ local0.* /var/log/postgresql - explains the message - severity levels used by PostgreSQL. If logging output + explains the message + severity levels used by PostgreSQL. If logging output is sent to syslog or Windows' eventlog, the severity levels are translated as shown in the table. @@ -4928,73 +5220,73 @@ local0.* /var/log/postgresql Severity Usage - syslog - eventlog + syslog + eventlog - DEBUG1..DEBUG5 + DEBUG1..DEBUG5 Provides successively-more-detailed information for use by developers. - DEBUG - INFORMATION + DEBUG + INFORMATION - INFO + INFO Provides information implicitly requested by the user, - e.g., output from VACUUM VERBOSE. - INFO - INFORMATION + e.g., output from VACUUM VERBOSE. + INFO + INFORMATION - NOTICE + NOTICE Provides information that might be helpful to users, e.g., notice of truncation of long identifiers. - NOTICE - INFORMATION + NOTICE + INFORMATION - WARNING - Provides warnings of likely problems, e.g., COMMIT + WARNING + Provides warnings of likely problems, e.g., COMMIT outside a transaction block. - NOTICE - WARNING + NOTICE + WARNING - ERROR + ERROR Reports an error that caused the current command to abort. - WARNING - ERROR + WARNING + ERROR - LOG + LOG Reports information of interest to administrators, e.g., checkpoint activity. - INFO - INFORMATION + INFO + INFORMATION - FATAL + FATAL Reports an error that caused the current session to abort. - ERR - ERROR + ERR + ERROR - PANIC + PANIC Reports an error that caused all database sessions to abort. - CRIT - ERROR + CRIT + ERROR @@ -5009,17 +5301,17 @@ local0.* /var/log/postgresql application_name (string) - application_name configuration parameter + application_name configuration parameter The application_name can be any string of less than - NAMEDATALEN characters (64 characters in a standard build). + NAMEDATALEN characters (64 characters in a standard build). It is typically set by an application upon connection to the server. - The name will be displayed in the pg_stat_activity view + The name will be displayed in the pg_stat_activity view and included in CSV log entries. It can also be included in regular - log entries via the parameter. + log entries via the parameter. Only printable ASCII characters may be used in the application_name value. Other characters will be replaced with question marks (?). @@ -5030,17 +5322,17 @@ local0.* /var/log/postgresql debug_print_parse (boolean) - debug_print_parse configuration parameter + debug_print_parse configuration parameter debug_print_rewritten (boolean) - debug_print_rewritten configuration parameter + debug_print_rewritten configuration parameter debug_print_plan (boolean) - debug_print_plan configuration parameter + debug_print_plan configuration parameter @@ -5048,11 +5340,11 @@ local0.* /var/log/postgresql These parameters enable various debugging output to be emitted. When set, they print the resulting parse tree, the query rewriter output, or the execution plan for each executed query. - These messages are emitted at LOG message level, so by + These messages are emitted at LOG message level, so by default they will appear in the server log but will not be sent to the client. You can change that by adjusting - and/or - . + and/or + . These parameters are off by default. @@ -5061,7 +5353,7 @@ local0.* /var/log/postgresql debug_pretty_print (boolean) - debug_pretty_print configuration parameter + debug_pretty_print configuration parameter @@ -5070,7 +5362,7 @@ local0.* /var/log/postgresql produced by debug_print_parse, debug_print_rewritten, or debug_print_plan. This results in more readable - but much longer output than the compact format used when + but much longer output than the compact format used when it is off. It is on by default. @@ -5079,7 +5371,7 @@ local0.* /var/log/postgresql log_checkpoints (boolean) - log_checkpoints configuration parameter + log_checkpoints configuration parameter @@ -5087,7 +5379,7 @@ local0.* /var/log/postgresql Causes checkpoints and restartpoints to be logged in the server log. Some statistics are included in the log messages, including the number of buffers written and the time spent writing them. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is off. @@ -5096,7 +5388,7 @@ local0.* /var/log/postgresql log_connections (boolean) - log_connections configuration parameter + log_connections configuration parameter @@ -5105,14 +5397,14 @@ local0.* /var/log/postgresql as well as successful completion of client authentication. Only superusers can change this parameter at session start, and it cannot be changed at all within a session. - The default is off. + The default is off. - Some client programs, like psql, attempt + Some client programs, like psql, attempt to connect twice while determining if a password is required, so - duplicate connection received messages do not + duplicate connection received messages do not necessarily indicate a problem. @@ -5122,7 +5414,7 @@ local0.* /var/log/postgresql log_disconnections (boolean) - log_disconnections configuration parameter + log_disconnections configuration parameter @@ -5132,7 +5424,7 @@ local0.* /var/log/postgresql plus the duration of the session. Only superusers can change this parameter at session start, and it cannot be changed at all within a session. - The default is off. + The default is off. @@ -5141,13 +5433,13 @@ local0.* /var/log/postgresql log_duration (boolean) - log_duration configuration parameter + log_duration configuration parameter Causes the duration of every completed statement to be logged. - The default is off. + The default is off. Only superusers can change this setting. @@ -5159,11 +5451,11 @@ local0.* /var/log/postgresql The difference between setting this option and setting - to zero is that - exceeding log_min_duration_statement forces the text of + to zero is that + exceeding log_min_duration_statement forces the text of the query to be logged, but this option doesn't. Thus, if - log_duration is on and - log_min_duration_statement has a positive value, all + log_duration is on and + log_min_duration_statement has a positive value, all durations are logged but the query text is included only for statements exceeding the threshold. This behavior can be useful for gathering statistics in high-load installations. @@ -5175,19 +5467,19 @@ local0.* /var/log/postgresql log_error_verbosity (enum) - log_error_verbosity configuration parameter + log_error_verbosity configuration parameter Controls the amount of detail written in the server log for each - message that is logged. Valid values are TERSE, - DEFAULT, and VERBOSE, each adding more - fields to displayed messages. TERSE excludes - the logging of DETAIL, HINT, - QUERY, and CONTEXT error information. - VERBOSE output includes the SQLSTATE error - code (see also ) and the source code file name, function name, + message that is logged. Valid values are TERSE, + DEFAULT, and VERBOSE, each adding more + fields to displayed messages. TERSE excludes + the logging of DETAIL, HINT, + QUERY, and CONTEXT error information. + VERBOSE output includes the SQLSTATE error + code (see also ) and the source code file name, function name, and line number that generated the error. Only superusers can change this setting. @@ -5197,7 +5489,7 @@ local0.* /var/log/postgresql log_hostname (boolean) - log_hostname configuration parameter + log_hostname configuration parameter @@ -5206,7 +5498,7 @@ local0.* /var/log/postgresql connecting host. Turning this parameter on causes logging of the host name as well. Note that depending on your host name resolution setup this might impose a non-negligible performance penalty. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -5215,14 +5507,14 @@ local0.* /var/log/postgresql log_line_prefix (string) - log_line_prefix configuration parameter + log_line_prefix configuration parameter - This is a printf-style string that is output at the + This is a printf-style string that is output at the beginning of each log line. - % characters begin escape sequences + % characters begin escape sequences that are replaced with status information as outlined below. Unrecognized escapes are ignored. Other characters are copied straight to the log line. Some escapes are @@ -5234,9 +5526,9 @@ local0.* /var/log/postgresql right with spaces to give it a minimum width, whereas a positive value will pad on the left. Padding can be useful to aid human readability in log files. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is - '%m [%p] ' which logs a time stamp and the process ID. + '%m [%p] ' which logs a time stamp and the process ID. @@ -5337,19 +5629,19 @@ local0.* /var/log/postgresql %% - Literal % + Literal % no - The %c escape prints a quasi-unique session identifier, + The %c escape prints a quasi-unique session identifier, consisting of two 4-byte hexadecimal numbers (without leading zeros) separated by a dot. The numbers are the process start time and the - process ID, so %c can also be used as a space saving way + process ID, so %c can also be used as a space saving way of printing those items. For example, to generate the session - identifier from pg_stat_activity, use this query: + identifier from pg_stat_activity, use this query: SELECT to_hex(trunc(EXTRACT(EPOCH FROM backend_start))::integer) || '.' || to_hex(pid) @@ -5360,7 +5652,7 @@ FROM pg_stat_activity; - If you set a nonempty value for log_line_prefix, + If you set a nonempty value for log_line_prefix, you should usually make its last character be a space, to provide visual separation from the rest of the log line. A punctuation character can be used too. @@ -5369,15 +5661,15 @@ FROM pg_stat_activity; - Syslog produces its own + Syslog produces its own time stamp and process ID information, so you probably do not want to - include those escapes if you are logging to syslog. + include those escapes if you are logging to syslog. - The %q escape is useful when including information that is + The %q escape is useful when including information that is only available in session (backend) context like user or database name. For example: @@ -5391,15 +5683,15 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' log_lock_waits (boolean) - log_lock_waits configuration parameter + log_lock_waits configuration parameter Controls whether a log message is produced when a session waits - longer than to acquire a + longer than to acquire a lock. This is useful in determining if lock waits are causing - poor performance. The default is off. + poor performance. The default is off. Only superusers can change this setting. @@ -5408,22 +5700,22 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' log_statement (enum) - log_statement configuration parameter + log_statement configuration parameter Controls which SQL statements are logged. Valid values are - none (off), ddl, mod, and - all (all statements). ddl logs all data definition - statements, such as CREATE, ALTER, and - DROP statements. mod logs all - ddl statements, plus data-modifying statements - such as INSERT, - UPDATE, DELETE, TRUNCATE, - and COPY FROM. - PREPARE, EXECUTE, and - EXPLAIN ANALYZE statements are also logged if their + none (off), ddl, mod, and + all (all statements). ddl logs all data definition + statements, such as CREATE, ALTER, and + DROP statements. mod logs all + ddl statements, plus data-modifying statements + such as INSERT, + UPDATE, DELETE, TRUNCATE, + and COPY FROM. + PREPARE, EXECUTE, and + EXPLAIN ANALYZE statements are also logged if their contained command is of an appropriate type. For clients using extended query protocol, logging occurs when an Execute message is received, and values of the Bind parameters are included @@ -5431,20 +5723,20 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' - The default is none. Only superusers can change this + The default is none. Only superusers can change this setting. Statements that contain simple syntax errors are not logged - even by the log_statement = all setting, + even by the log_statement = all setting, because the log message is emitted only after basic parsing has been done to determine the statement type. In the case of extended query protocol, this setting likewise does not log statements that fail before the Execute phase (i.e., during parse analysis or - planning). Set log_min_error_statement to - ERROR (or lower) to log such statements. + planning). Set log_min_error_statement to + ERROR (or lower) to log such statements. @@ -5453,14 +5745,14 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' log_replication_commands (boolean) - log_replication_commands configuration parameter + log_replication_commands configuration parameter Causes each replication command to be logged in the server log. - See for more information about - replication command. The default value is off. + See for more information about + replication command. The default value is off. Only superusers can change this setting. @@ -5469,7 +5761,7 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' log_temp_files (integer) - log_temp_files configuration parameter + log_temp_files configuration parameter @@ -5490,19 +5782,19 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' log_timezone (string) - log_timezone configuration parameter + log_timezone configuration parameter Sets the time zone used for timestamps written in the server log. - Unlike , this value is cluster-wide, + Unlike , this value is cluster-wide, so that all sessions will report timestamps consistently. - The built-in default is GMT, but that is typically - overridden in postgresql.conf; initdb + The built-in default is GMT, but that is typically + overridden in postgresql.conf; initdb will install a setting there corresponding to its system environment. - See for more information. - This parameter can only be set in the postgresql.conf + See for more information. + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -5514,10 +5806,10 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' Using CSV-Format Log Output - Including csvlog in the log_destination list + Including csvlog in the log_destination list provides a convenient way to import log files into a database table. This option emits log lines in comma-separated-values - (CSV) format, + (CSV) format, with these columns: time stamp with milliseconds, user name, @@ -5539,10 +5831,10 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' character count of the error position therein, error context, user query that led to the error (if any and enabled by - log_min_error_statement), + log_min_error_statement), character count of the error position therein, location of the error in the PostgreSQL source code - (if log_error_verbosity is set to verbose), + (if log_error_verbosity is set to verbose), and application name. Here is a sample table definition for storing CSV-format log output: @@ -5578,7 +5870,7 @@ CREATE TABLE postgres_log - To import a log file into this table, use the COPY FROM + To import a log file into this table, use the COPY FROM command: @@ -5594,7 +5886,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Set log_filename and - log_rotation_age to provide a consistent, + log_rotation_age to provide a consistent, predictable naming scheme for your log files. This lets you predict what the file name will be and know when an individual log file is complete and therefore ready to be imported. @@ -5611,7 +5903,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; - Set log_truncate_on_rotation to on so + Set log_truncate_on_rotation to on so that old log data isn't mixed with the new in the same file. @@ -5620,14 +5912,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; The table definition above includes a primary key specification. This is useful to protect against accidentally importing the same - information twice. The COPY command commits all of the + information twice. The COPY command commits all of the data it imports at one time, so any error will cause the entire import to fail. If you import a partial log file and later import the file again when it is complete, the primary key violation will cause the import to fail. Wait until the log is complete and closed before importing. This procedure will also protect against accidentally importing a partial line that hasn't been completely - written, which would also cause COPY to fail. + written, which would also cause COPY to fail. @@ -5640,26 +5932,26 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; These settings control how process titles of server processes are modified. Process titles are typically viewed using programs like - ps or, on Windows, Process Explorer. - See for details. + ps or, on Windows, Process Explorer. + See for details. cluster_name (string) - cluster_name configuration parameter + cluster_name configuration parameter Sets the cluster name that appears in the process title for all server processes in this cluster. The name can be any string of less - than NAMEDATALEN characters (64 characters in a standard + than NAMEDATALEN characters (64 characters in a standard build). Only printable ASCII characters may be used in the cluster_name value. Other characters will be replaced with question marks (?). No name is shown - if this parameter is set to the empty string '' (which is + if this parameter is set to the empty string '' (which is the default). This parameter can only be set at server start. @@ -5668,15 +5960,15 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; update_process_title (boolean) - update_process_title configuration parameter + update_process_title configuration parameter Enables updating of the process title every time a new SQL command is received by the server. - This setting defaults to on on most platforms, but it - defaults to off on Windows due to that platform's larger + This setting defaults to on on most platforms, but it + defaults to off on Windows due to that platform's larger overhead for updating the process title. Only superusers can change this setting. @@ -5697,7 +5989,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; When statistics collection is enabled, the data that is produced can be accessed via the pg_stat and pg_statio family of system views. - Refer to for more information. + Refer to for more information. @@ -5705,7 +5997,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; track_activities (boolean) - track_activities configuration parameter + track_activities configuration parameter @@ -5725,14 +6017,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; track_activity_query_size (integer) - track_activity_query_size configuration parameter + track_activity_query_size configuration parameter Specifies the number of bytes reserved to track the currently executing command for each active session, for the - pg_stat_activity.query field. + pg_stat_activity.query field. The default value is 1024. This parameter can only be set at server start. @@ -5742,7 +6034,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; track_counts (boolean) - track_counts configuration parameter + track_counts configuration parameter @@ -5758,7 +6050,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; track_io_timing (boolean) - track_io_timing configuration parameter + track_io_timing configuration parameter @@ -5766,12 +6058,12 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some - platforms. You can use the tool to + platforms. You can use the tool to measure the overhead of timing on your system. I/O timing information is - displayed in , in the output of - when the BUFFERS option is - used, and by . Only superusers can + displayed in , in the output of + when the BUFFERS option is + used, and by . Only superusers can change this setting. @@ -5780,7 +6072,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; track_functions (enum) - track_functions configuration parameter + track_functions configuration parameter @@ -5794,7 +6086,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; - SQL-language functions that are simple enough to be inlined + SQL-language functions that are simple enough to be inlined into the calling query will not be tracked, regardless of this setting. @@ -5805,7 +6097,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; stats_temp_directory (string) - stats_temp_directory configuration parameter + stats_temp_directory configuration parameter @@ -5815,7 +6107,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; is pg_stat_tmp. Pointing this at a RAM-based file system will decrease physical I/O requirements and can lead to improved performance. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -5831,29 +6123,29 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; log_statement_stats (boolean) - log_statement_stats configuration parameter + log_statement_stats configuration parameter log_parser_stats (boolean) - log_parser_stats configuration parameter + log_parser_stats configuration parameter log_planner_stats (boolean) - log_planner_stats configuration parameter + log_planner_stats configuration parameter log_executor_stats (boolean) - log_executor_stats configuration parameter + log_executor_stats configuration parameter For each query, output performance statistics of the respective module to the server log. This is a crude profiling - instrument, similar to the Unix getrusage() operating + instrument, similar to the Unix getrusage() operating system facility. log_statement_stats reports total statement statistics, while the others report per-module statistics. log_statement_stats cannot be enabled together with @@ -5877,11 +6169,11 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; - These settings control the behavior of the autovacuum - feature. Refer to for more information. + These settings control the behavior of the autovacuum + feature. Refer to for more information. Note that many of these settings can be overridden on a per-table basis; see . + endterm="sql-createtable-storage-parameters-title"/>. @@ -5889,16 +6181,16 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum (boolean) - autovacuum configuration parameter + autovacuum configuration parameter Controls whether the server should run the autovacuum launcher daemon. This is on by default; however, - must also be enabled for + must also be enabled for autovacuum to work. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; however, autovacuuming can be disabled for individual tables by changing table storage parameters. @@ -5906,7 +6198,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Note that even when this parameter is disabled, the system will launch autovacuum processes if necessary to prevent transaction ID wraparound. See for more information. + linkend="vacuum-for-wraparound"/> for more information. @@ -5914,7 +6206,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; log_autovacuum_min_duration (integer) - log_autovacuum_min_duration configuration parameter + log_autovacuum_min_duration configuration parameter @@ -5926,10 +6218,10 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; 250ms then all automatic vacuums and analyzes that run 250ms or longer will be logged. In addition, when this parameter is set to any value other than -1, a message will be - logged if an autovacuum action is skipped due to the existence of a - conflicting lock. Enabling this parameter can be helpful + logged if an autovacuum action is skipped due to a conflicting lock or a + concurrently dropped relation. Enabling this parameter can be helpful in tracking autovacuum activity. This parameter can only be set in - the postgresql.conf file or on the server command line; + the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -5939,7 +6231,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_max_workers (integer) - autovacuum_max_workers configuration parameter + autovacuum_max_workers configuration parameter @@ -5954,17 +6246,17 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_naptime (integer) - autovacuum_naptime configuration parameter + autovacuum_naptime configuration parameter Specifies the minimum delay between autovacuum runs on any given database. In each round the daemon examines the - database and issues VACUUM and ANALYZE commands + database and issues VACUUM and ANALYZE commands as needed for tables in that database. The delay is measured - in seconds, and the default is one minute (1min). - This parameter can only be set in the postgresql.conf + in seconds, and the default is one minute (1min). + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -5973,15 +6265,15 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_vacuum_threshold (integer) - autovacuum_vacuum_threshold configuration parameter + autovacuum_vacuum_threshold configuration parameter Specifies the minimum number of updated or deleted tuples needed - to trigger a VACUUM in any one table. + to trigger a VACUUM in any one table. The default is 50 tuples. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -5992,15 +6284,15 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_analyze_threshold (integer) - autovacuum_analyze_threshold configuration parameter + autovacuum_analyze_threshold configuration parameter Specifies the minimum number of inserted, updated or deleted tuples - needed to trigger an ANALYZE in any one table. + needed to trigger an ANALYZE in any one table. The default is 50 tuples. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -6011,16 +6303,16 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_vacuum_scale_factor (floating point) - autovacuum_vacuum_scale_factor configuration parameter + autovacuum_vacuum_scale_factor configuration parameter Specifies a fraction of the table size to add to autovacuum_vacuum_threshold - when deciding whether to trigger a VACUUM. + when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size). - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -6031,16 +6323,16 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_analyze_scale_factor (floating point) - autovacuum_analyze_scale_factor configuration parameter + autovacuum_analyze_scale_factor configuration parameter Specifies a fraction of the table size to add to autovacuum_analyze_threshold - when deciding whether to trigger an ANALYZE. + when deciding whether to trigger an ANALYZE. The default is 0.1 (10% of table size). - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -6051,14 +6343,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_freeze_max_age (integer) - autovacuum_freeze_max_age configuration parameter + autovacuum_freeze_max_age configuration parameter Specifies the maximum age (in transactions) that a table's - pg_class.relfrozenxid field can - attain before a VACUUM operation is forced + pg_class.relfrozenxid field can + attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. @@ -6066,12 +6358,12 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Vacuum also allows removal of old files from the - pg_xact subdirectory, which is why the default + pg_xact subdirectory, which is why the default is a relatively low 200 million transactions. This parameter can only be set at server start, but the setting can be reduced for individual tables by changing table storage parameters. - For more information see . + For more information see . @@ -6085,8 +6377,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Specifies the maximum age (in multixacts) that a table's - pg_class.relminmxid field can - attain before a VACUUM operation is forced to + pg_class.relminmxid field can + attain before a VACUUM operation is forced to prevent multixact ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. @@ -6094,12 +6386,12 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Vacuuming multixacts also allows removal of old files from the - pg_multixact/members and pg_multixact/offsets + pg_multixact/members and pg_multixact/offsets subdirectories, which is why the default is a relatively low 400 million multixacts. This parameter can only be set at server start, but the setting can be reduced for individual tables by changing table storage parameters. - For more information see . + For more information see . @@ -6107,16 +6399,16 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_vacuum_cost_delay (integer) - autovacuum_vacuum_cost_delay configuration parameter + autovacuum_vacuum_cost_delay configuration parameter Specifies the cost delay value that will be used in automatic - VACUUM operations. If -1 is specified, the regular - value will be used. + VACUUM operations. If -1 is specified, the regular + value will be used. The default value is 20 milliseconds. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -6127,19 +6419,19 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_vacuum_cost_limit (integer) - autovacuum_vacuum_cost_limit configuration parameter + autovacuum_vacuum_cost_limit configuration parameter Specifies the cost limit value that will be used in automatic - VACUUM operations. If -1 is specified (which is the + VACUUM operations. If -1 is specified (which is the default), the regular - value will be used. Note that + value will be used. Note that the value is distributed proportionally among the running autovacuum workers, if there is more than one, so that the sum of the limits for each worker does not exceed the value of this variable. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -6157,12 +6449,33 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Statement Behavior + + client_min_messages (enum) + + client_min_messages configuration parameter + + + + + Controls which message levels are sent to the client. + Valid values are DEBUG5, + DEBUG4, DEBUG3, DEBUG2, + DEBUG1, LOG, NOTICE, + WARNING, and ERROR. + Each level includes all the levels that follow it. The later the level, + the fewer messages are sent. The default is + NOTICE. Note that LOG has a different + rank here than in . + + + + search_path (string) - search_path configuration parameter + search_path configuration parameter - pathfor schemas + pathfor schemas @@ -6178,32 +6491,32 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; The value for search_path must be a comma-separated list of schema names. Any name that is not an existing schema, or is - a schema for which the user does not have USAGE + a schema for which the user does not have USAGE permission, is silently ignored. If one of the list items is the special name $user, then the schema having the name returned by - SESSION_USER is substituted, if there is such a schema - and the user has USAGE permission for it. + SESSION_USER is substituted, if there is such a schema + and the user has USAGE permission for it. (If not, $user is ignored.) - The system catalog schema, pg_catalog, is always + The system catalog schema, pg_catalog, is always searched, whether it is mentioned in the path or not. If it is mentioned in the path then it will be searched in the specified - order. If pg_catalog is not in the path then it will - be searched before searching any of the path items. + order. If pg_catalog is not in the path then it will + be searched before searching any of the path items. Likewise, the current session's temporary-table schema, - pg_temp_nnn, is always searched if it + pg_temp_nnn, is always searched if it exists. It can be explicitly listed in the path by using the - alias pg_temppg_temp. If it is not listed in the path then - it is searched first (even before pg_catalog). However, + alias pg_temppg_temp. If it is not listed in the path then + it is searched first (even before pg_catalog). However, the temporary schema is only searched for relation (table, view, sequence, etc) and data type names. It is never searched for function or operator names. @@ -6220,51 +6533,55 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; The default value for this parameter is "$user", public. This setting supports shared use of a database (where no users - have private schemas, and all share use of public), + have private schemas, and all share use of public), private per-user schemas, and combinations of these. Other effects can be obtained by altering the default search path setting, either globally or per-user. + + For more information on schema handling, see + . In particular, the default + configuration is suitable only when the database has a single user or + a few mutually-trusting users. + + The current effective value of the search path can be examined via the SQL function - current_schemas - (see ). + current_schemas + (see ). This is not quite the same as examining the value of search_path, since - current_schemas shows how the items + current_schemas shows how the items appearing in search_path were resolved. - - For more information on schema handling, see . - row_security (boolean) - row_security configuration parameter + row_security configuration parameter This variable controls whether to raise an error in lieu of applying a - row security policy. When set to on, policies apply - normally. When set to off, queries fail which would - otherwise apply at least one policy. The default is on. - Change to off where limited row visibility could cause - incorrect results; for example, pg_dump makes that + row security policy. When set to on, policies apply + normally. When set to off, queries fail which would + otherwise apply at least one policy. The default is on. + Change to off where limited row visibility could cause + incorrect results; for example, pg_dump makes that change by default. This variable has no effect on roles which bypass every row security policy, to wit, superusers and roles with - the BYPASSRLS attribute. + the BYPASSRLS attribute. For more information on row security policies, - see . + see . @@ -6272,14 +6589,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; default_tablespace (string) - default_tablespace configuration parameter + default_tablespace configuration parameter - tablespacedefault + tablespacedefault This variable specifies the default tablespace in which to create - objects (tables and indexes) when a CREATE command does + objects (tables and indexes) when a CREATE command does not explicitly specify a tablespace. @@ -6287,15 +6604,15 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; The value is either the name of a tablespace, or an empty string to specify using the default tablespace of the current database. If the value does not match the name of any existing tablespace, - PostgreSQL will automatically use the default + PostgreSQL will automatically use the default tablespace of the current database. If a nondefault tablespace - is specified, the user must have CREATE privilege + is specified, the user must have CREATE privilege for it, or creation attempts will fail. This variable is not used for temporary tables; for them, - is consulted instead. + is consulted instead. @@ -6306,7 +6623,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; For more information on tablespaces, - see . + see . @@ -6314,38 +6631,38 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; temp_tablespaces (string) - temp_tablespaces configuration parameter + temp_tablespaces configuration parameter - tablespacetemporary + tablespacetemporary This variable specifies tablespaces in which to create temporary objects (temp tables and indexes on temp tables) when a - CREATE command does not explicitly specify a tablespace. + CREATE command does not explicitly specify a tablespace. Temporary files for purposes such as sorting large data sets are also created in these tablespaces. The value is a list of names of tablespaces. When there is more than - one name in the list, PostgreSQL chooses a random + one name in the list, PostgreSQL chooses a random member of the list each time a temporary object is to be created; except that within a transaction, successively created temporary objects are placed in successive tablespaces from the list. If the selected element of the list is an empty string, - PostgreSQL will automatically use the default + PostgreSQL will automatically use the default tablespace of the current database instead. - When temp_tablespaces is set interactively, specifying a + When temp_tablespaces is set interactively, specifying a nonexistent tablespace is an error, as is specifying a tablespace for - which the user does not have CREATE privilege. However, + which the user does not have CREATE privilege. However, when using a previously set value, nonexistent tablespaces are ignored, as are tablespaces for which the user lacks - CREATE privilege. In particular, this rule applies when - using a value set in postgresql.conf. + CREATE privilege. In particular, this rule applies when + using a value set in postgresql.conf. @@ -6355,7 +6672,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; - See also . + See also . @@ -6363,18 +6680,18 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; check_function_bodies (boolean) - check_function_bodies configuration parameter + check_function_bodies configuration parameter - This parameter is normally on. When set to off, it + This parameter is normally on. When set to off, it disables validation of the function body string during . Disabling validation avoids side + linkend="sql-createfunction"/>. Disabling validation avoids side effects of the validation process and avoids false positives due to problems such as forward references. Set this parameter - to off before loading functions on behalf of other - users; pg_dump does so automatically. + to off before loading functions on behalf of other + users; pg_dump does so automatically. @@ -6386,7 +6703,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; setting default - default_transaction_isolation configuration parameter + default_transaction_isolation configuration parameter @@ -6400,8 +6717,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; - Consult and for more information. + Consult and for more information. @@ -6413,18 +6730,18 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; setting default - default_transaction_read_only configuration parameter + default_transaction_read_only configuration parameter A read-only SQL transaction cannot alter non-temporary tables. This parameter controls the default read-only status of each new - transaction. The default is off (read/write). + transaction. The default is off (read/write). - Consult for more information. + Consult for more information. @@ -6436,12 +6753,12 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; setting default - default_transaction_deferrable configuration parameter + default_transaction_deferrable configuration parameter - When running at the serializable isolation level, + When running at the serializable isolation level, a deferrable read-only SQL transaction may be delayed before it is allowed to proceed. However, once it begins executing it does not incur any of the overhead required to ensure @@ -6454,11 +6771,11 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; This parameter controls the default deferrable status of each new transaction. It currently has no effect on read-write transactions or those operating at isolation levels lower - than serializable. The default is off. + than serializable. The default is off. - Consult for more information. + Consult for more information. @@ -6467,7 +6784,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; session_replication_role (enum) - session_replication_role configuration parameter + session_replication_role configuration parameter @@ -6475,10 +6792,32 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Controls firing of replication-related triggers and rules for the current session. Setting this variable requires superuser privilege and results in discarding any previously cached - query plans. Possible values are origin (the default), - replica and local. - See for - more information. + query plans. Possible values are origin (the default), + replica and local. + + + + The intended use of this setting is that logical replication systems + set it to replica when they are applying replicated + changes. The effect of that will be that triggers and rules (that + have not been altered from their default configuration) will not fire + on the replica. See the clauses + ENABLE TRIGGER and ENABLE RULE + for more information. + + + + PostgreSQL treats the settings origin and + local the same internally. Third-party replication + systems may use these two values for their internal purposes, for + example using local to designate a session whose + changes should not be replicated. + + + + Since foreign keys are implemented as triggers, setting this parameter + to replica also disables all foreign key checks, + which can leave data in an inconsistent state if improperly used. @@ -6486,21 +6825,21 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; statement_timeout (integer) - statement_timeout configuration parameter + statement_timeout configuration parameter Abort any statement that takes more than the specified number of milliseconds, starting from the time the command arrives at the server - from the client. If log_min_error_statement is set to - ERROR or lower, the statement that timed out will also be + from the client. If log_min_error_statement is set to + ERROR or lower, the statement that timed out will also be logged. A value of zero (the default) turns this off. - Setting statement_timeout in - postgresql.conf is not recommended because it would + Setting statement_timeout in + postgresql.conf is not recommended because it would affect all sessions. @@ -6509,7 +6848,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; lock_timeout (integer) - lock_timeout configuration parameter + lock_timeout configuration parameter @@ -6518,24 +6857,24 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; milliseconds while attempting to acquire a lock on a table, index, row, or other database object. The time limit applies separately to each lock acquisition attempt. The limit applies both to explicit - locking requests (such as LOCK TABLE, or SELECT - FOR UPDATE without NOWAIT) and to implicitly-acquired - locks. If log_min_error_statement is set to - ERROR or lower, the statement that timed out will be + locking requests (such as LOCK TABLE, or SELECT + FOR UPDATE without NOWAIT) and to implicitly-acquired + locks. If log_min_error_statement is set to + ERROR or lower, the statement that timed out will be logged. A value of zero (the default) turns this off. - Unlike statement_timeout, this timeout can only occur - while waiting for locks. Note that if statement_timeout - is nonzero, it is rather pointless to set lock_timeout to + Unlike statement_timeout, this timeout can only occur + while waiting for locks. Note that if statement_timeout + is nonzero, it is rather pointless to set lock_timeout to the same or larger value, since the statement timeout would always trigger first. - Setting lock_timeout in - postgresql.conf is not recommended because it would + Setting lock_timeout in + postgresql.conf is not recommended because it would affect all sessions. @@ -6544,7 +6883,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; idle_in_transaction_session_timeout (integer) - idle_in_transaction_session_timeout configuration parameter + idle_in_transaction_session_timeout configuration parameter @@ -6553,7 +6892,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; longer than the specified duration in milliseconds. This allows any locks held by that session to be released and the connection slot to be reused; it also allows tuples visible only to this transaction to be vacuumed. See - for more details about this. + for more details about this. The default value of 0 disables this feature. @@ -6564,24 +6903,24 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; vacuum_freeze_table_age (integer) - vacuum_freeze_table_age configuration parameter + vacuum_freeze_table_age configuration parameter - VACUUM performs an aggressive scan if the table's - pg_class.relfrozenxid field has reached + VACUUM performs an aggressive scan if the table's + pg_class.relfrozenxid field has reached the age specified by this setting. An aggressive scan differs from - a regular VACUUM in that it visits every page that might + a regular VACUUM in that it visits every page that might contain unfrozen XIDs or MXIDs, not just those that might contain dead tuples. The default is 150 million transactions. Although users can - set this value anywhere from zero to two billions, VACUUM + set this value anywhere from zero to two billions, VACUUM will silently limit the effective value to 95% of - , so that a - periodical manual VACUUM has a chance to run before an + , so that a + periodical manual VACUUM has a chance to run before an anti-wraparound autovacuum is launched for the table. For more information see - . + . @@ -6589,21 +6928,21 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; vacuum_freeze_min_age (integer) - vacuum_freeze_min_age configuration parameter + vacuum_freeze_min_age configuration parameter - Specifies the cutoff age (in transactions) that VACUUM + Specifies the cutoff age (in transactions) that VACUUM should use to decide whether to freeze row versions while scanning a table. The default is 50 million transactions. Although users can set this value anywhere from zero to one billion, - VACUUM will silently limit the effective value to half - the value of , so + VACUUM will silently limit the effective value to half + the value of , so that there is not an unreasonably short time between forced autovacuums. For more information see . + linkend="vacuum-for-wraparound"/>. @@ -6611,23 +6950,23 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; vacuum_multixact_freeze_table_age (integer) - vacuum_multixact_freeze_table_age configuration parameter + vacuum_multixact_freeze_table_age configuration parameter - VACUUM performs an aggressive scan if the table's - pg_class.relminmxid field has reached + VACUUM performs an aggressive scan if the table's + pg_class.relminmxid field has reached the age specified by this setting. An aggressive scan differs from - a regular VACUUM in that it visits every page that might + a regular VACUUM in that it visits every page that might contain unfrozen XIDs or MXIDs, not just those that might contain dead tuples. The default is 150 million multixacts. Although users can set this value anywhere from zero to two billions, - VACUUM will silently limit the effective value to 95% of - , so that a - periodical manual VACUUM has a chance to run before an + VACUUM will silently limit the effective value to 95% of + , so that a + periodical manual VACUUM has a chance to run before an anti-wraparound is launched for the table. - For more information see . + For more information see . @@ -6635,29 +6974,69 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; vacuum_multixact_freeze_min_age (integer) - vacuum_multixact_freeze_min_age configuration parameter + vacuum_multixact_freeze_min_age configuration parameter - Specifies the cutoff age (in multixacts) that VACUUM + Specifies the cutoff age (in multixacts) that VACUUM should use to decide whether to replace multixact IDs with a newer transaction ID or multixact ID while scanning a table. The default is 5 million multixacts. Although users can set this value anywhere from zero to one billion, - VACUUM will silently limit the effective value to half - the value of , + VACUUM will silently limit the effective value to half + the value of , so that there is not an unreasonably short time between forced autovacuums. - For more information see . + For more information see . + + + + + + vacuum_cleanup_index_scale_factor (floating point) + + vacuum_cleanup_index_scale_factor configuration parameter + + + + + Specifies the fraction of the total number of heap tuples counted in + the previous statistics collection that can be inserted without + incurring an index scan at the VACUUM cleanup stage. + This setting currently applies to B-tree indexes only. + + + If no tuples were deleted from the heap, B-tree indexes are still + scanned at the VACUUM cleanup stage when at least one + of the following conditions is met: the index statistics are stale, or + the index contains deleted pages that can be recycled during cleanup. + Index statistics are considered to be stale if the number of newly + inserted tuples exceeds the vacuum_cleanup_index_scale_factor + fraction of the total number of heap tuples detected by the previous + statistics collection. The total number of heap tuples is stored in + the index meta-page. Note that the meta-page does not include this data + until VACUUM finds no dead tuples, so B-tree index + scan at the cleanup stage can only be skipped if the second and + subsequent VACUUM cycles detect no dead tuples. + + + + The value can range from 0 to + 10000000000. + When vacuum_cleanup_index_scale_factor is set to + 0, index scans are never skipped during + VACUUM cleanup. The default value is 0.1. + + bytea_output (enum) - bytea_output configuration parameter + bytea_output configuration parameter @@ -6665,7 +7044,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Sets the output format for values of type bytea. Valid values are hex (the default) and escape (the traditional PostgreSQL - format). See for more + format). See for more information. The bytea type always accepts both formats on input, regardless of this setting. @@ -6675,7 +7054,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; xmlbinary (enum) - xmlbinary configuration parameter + xmlbinary configuration parameter @@ -6687,7 +7066,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; base64 and hex, which are both defined in the XML Schema standard. The default is base64. For further information about - XML-related functions, see . + XML-related functions, see . @@ -6703,10 +7082,10 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; xmloption (enum) - xmloption configuration parameter + xmloption configuration parameter - SET XML OPTION + SET XML OPTION XML option @@ -6717,7 +7096,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Sets whether DOCUMENT or CONTENT is implicit when converting between XML and character string values. See for a description of this. Valid + linkend="datatype-xml"/> for a description of this. Valid values are DOCUMENT and CONTENT. The default is CONTENT. @@ -6736,19 +7115,19 @@ SET XML OPTION { DOCUMENT | CONTENT }; gin_pending_list_limit (integer) - gin_pending_list_limit configuration parameter + gin_pending_list_limit configuration parameter Sets the maximum size of the GIN pending list which is used - when fastupdate is enabled. If the list grows + when fastupdate is enabled. If the list grows larger than this maximum size, it is cleaned up by moving the entries in it to the main GIN data structure in bulk. - The default is four megabytes (4MB). This setting + The default is four megabytes (4MB). This setting can be overridden for individual GIN indexes by changing index storage parameters. - See and + See and for more information. @@ -6764,7 +7143,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; DateStyle (string) - DateStyle configuration parameter + DateStyle configuration parameter @@ -6772,16 +7151,16 @@ SET XML OPTION { DOCUMENT | CONTENT }; Sets the display format for date and time values, as well as the rules for interpreting ambiguous date input values. For historical reasons, this variable contains two independent - components: the output format specification (ISO, - Postgres, SQL, or German) + components: the output format specification (ISO, + Postgres, SQL, or German) and the input/output specification for year/month/day ordering - (DMY, MDY, or YMD). These - can be set separately or together. The keywords Euro - and European are synonyms for DMY; the - keywords US, NonEuro, and - NonEuropean are synonyms for MDY. See - for more information. The - built-in default is ISO, MDY, but + (DMY, MDY, or YMD). These + can be set separately or together. The keywords Euro + and European are synonyms for DMY; the + keywords US, NonEuro, and + NonEuropean are synonyms for MDY. See + for more information. The + built-in default is ISO, MDY, but initdb will initialize the configuration file with a setting that corresponds to the behavior of the chosen lc_time locale. @@ -6792,30 +7171,30 @@ SET XML OPTION { DOCUMENT | CONTENT }; IntervalStyle (enum) - IntervalStyle configuration parameter + IntervalStyle configuration parameter Sets the display format for interval values. - The value sql_standard will produce + The value sql_standard will produce output matching SQL standard interval literals. - The value postgres (which is the default) will produce - output matching PostgreSQL releases prior to 8.4 - when the - parameter was set to ISO. - The value postgres_verbose will produce output - matching PostgreSQL releases prior to 8.4 - when the DateStyle - parameter was set to non-ISO output. - The value iso_8601 will produce output matching the time - interval format with designators defined in section + The value postgres (which is the default) will produce + output matching PostgreSQL releases prior to 8.4 + when the + parameter was set to ISO. + The value postgres_verbose will produce output + matching PostgreSQL releases prior to 8.4 + when the DateStyle + parameter was set to non-ISO output. + The value iso_8601 will produce output matching the time + interval format with designators defined in section 4.4.3.2 of ISO 8601. - The IntervalStyle parameter also affects the + The IntervalStyle parameter also affects the interpretation of ambiguous interval input. See - for more information. + for more information. @@ -6823,17 +7202,17 @@ SET XML OPTION { DOCUMENT | CONTENT }; TimeZone (string) - TimeZone configuration parameter + TimeZone configuration parameter - time zone + time zone Sets the time zone for displaying and interpreting time stamps. - The built-in default is GMT, but that is typically - overridden in postgresql.conf; initdb + The built-in default is GMT, but that is typically + overridden in postgresql.conf; initdb will install a setting there corresponding to its system environment. - See for more information. + See for more information. @@ -6841,18 +7220,18 @@ SET XML OPTION { DOCUMENT | CONTENT }; timezone_abbreviations (string) - timezone_abbreviations configuration parameter + timezone_abbreviations configuration parameter - time zone names + time zone names Sets the collection of time zone abbreviations that will be accepted - by the server for datetime input. The default is 'Default', + by the server for datetime input. The default is 'Default', which is a collection that works in most of the world; there are also 'Australia' and 'India', and other collections can be defined for a particular installation. - See for more information. + See for more information. @@ -6867,20 +7246,20 @@ SET XML OPTION { DOCUMENT | CONTENT }; display - extra_float_digits configuration parameter + extra_float_digits configuration parameter This parameter adjusts the number of digits displayed for - floating-point values, including float4, float8, + floating-point values, including float4, float8, and geometric data types. The parameter value is added to the - standard number of digits (FLT_DIG or DBL_DIG + standard number of digits (FLT_DIG or DBL_DIG as appropriate). The value can be set as high as 3, to include partially-significant digits; this is especially useful for dumping float data that needs to be restored exactly. Or it can be set negative to suppress unwanted digits. - See also . + See also . @@ -6888,16 +7267,16 @@ SET XML OPTION { DOCUMENT | CONTENT }; client_encoding (string) - client_encoding configuration parameter + client_encoding configuration parameter - character set + character set Sets the client-side encoding (character set). The default is to use the database encoding. The character sets supported by the PostgreSQL - server are described in . + server are described in . @@ -6905,13 +7284,13 @@ SET XML OPTION { DOCUMENT | CONTENT }; lc_messages (string) - lc_messages configuration parameter + lc_messages configuration parameter Sets the language in which messages are displayed. Acceptable - values are system-dependent; see for + values are system-dependent; see for more information. If this variable is set to the empty string (which is the default) then the value is inherited from the execution environment of the server in a system-dependent way. @@ -6937,7 +7316,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; lc_monetary (string) - lc_monetary configuration parameter + lc_monetary configuration parameter @@ -6945,7 +7324,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; Sets the locale to use for formatting monetary amounts, for example with the to_char family of functions. Acceptable values are system-dependent; see for more information. If this variable is + linkend="locale"/> for more information. If this variable is set to the empty string (which is the default) then the value is inherited from the execution environment of the server in a system-dependent way. @@ -6956,7 +7335,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; lc_numeric (string) - lc_numeric configuration parameter + lc_numeric configuration parameter @@ -6964,7 +7343,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; Sets the locale to use for formatting numbers, for example with the to_char family of functions. Acceptable values are system-dependent; see for more information. If this variable is + linkend="locale"/> for more information. If this variable is set to the empty string (which is the default) then the value is inherited from the execution environment of the server in a system-dependent way. @@ -6975,7 +7354,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; lc_time (string) - lc_time configuration parameter + lc_time configuration parameter @@ -6983,7 +7362,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; Sets the locale to use for formatting dates and times, for example with the to_char family of functions. Acceptable values are system-dependent; see for more information. If this variable is + linkend="locale"/> for more information. If this variable is set to the empty string (which is the default) then the value is inherited from the execution environment of the server in a system-dependent way. @@ -6994,7 +7373,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; default_text_search_config (string) - default_text_search_config configuration parameter + default_text_search_config configuration parameter @@ -7002,8 +7381,8 @@ SET XML OPTION { DOCUMENT | CONTENT }; Selects the text search configuration that is used by those variants of the text search functions that do not have an explicit argument specifying the configuration. - See for further information. - The built-in default is pg_catalog.simple, but + See for further information. + The built-in default is pg_catalog.simple, but initdb will initialize the configuration file with a setting that corresponds to the chosen lc_ctype locale, if a configuration @@ -7024,8 +7403,8 @@ SET XML OPTION { DOCUMENT | CONTENT }; server, in order to load additional functionality or achieve performance benefits. For example, a setting of '$libdir/mylib' would cause - mylib.so (or on some platforms, - mylib.sl) to be preloaded from the installation's standard + mylib.so (or on some platforms, + mylib.sl) to be preloaded from the installation's standard library directory. The differences between the settings are when they take effect and what privileges are required to change them. @@ -7034,14 +7413,14 @@ SET XML OPTION { DOCUMENT | CONTENT }; PostgreSQL procedural language libraries can be preloaded in this way, typically by using the syntax '$libdir/plXXX' where - XXX is pgsql, perl, - tcl, or python. + XXX is pgsql, perl, + tcl, or python. Only shared libraries specifically intended to be used with PostgreSQL can be loaded this way. Every PostgreSQL-supported library has - a magic block that is checked to guarantee compatibility. For + a magic block that is checked to guarantee compatibility. For this reason, non-PostgreSQL libraries cannot be loaded in this way. You might be able to use operating-system facilities such as LD_PRELOAD for that. @@ -7056,10 +7435,10 @@ SET XML OPTION { DOCUMENT | CONTENT }; local_preload_libraries (string) - local_preload_libraries configuration parameter + local_preload_libraries configuration parameter - $libdir/plugins + $libdir/plugins @@ -7067,7 +7446,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; This variable specifies one or more shared libraries that are to be preloaded at connection start. It contains a comma-separated list of library names, where each name - is interpreted as for the command. + is interpreted as for the command. Whitespace between entries is ignored; surround a library name with double quotes if you need to include whitespace or commas in the name. The parameter value only takes effect at the start of the connection. @@ -7078,10 +7457,10 @@ SET XML OPTION { DOCUMENT | CONTENT }; This option can be set by any user. Because of that, the libraries that can be loaded are restricted to those appearing in the - plugins subdirectory of the installation's + plugins subdirectory of the installation's standard library directory. (It is the database administrator's - responsibility to ensure that only safe libraries - are installed there.) Entries in local_preload_libraries + responsibility to ensure that only safe libraries + are installed there.) Entries in local_preload_libraries can specify this directory explicitly, for example $libdir/plugins/mylib, or just specify the library name — mylib would have @@ -7091,17 +7470,17 @@ SET XML OPTION { DOCUMENT | CONTENT }; The intent of this feature is to allow unprivileged users to load debugging or performance-measurement libraries into specific sessions - without requiring an explicit LOAD command. To that end, + without requiring an explicit LOAD command. To that end, it would be typical to set this parameter using the PGOPTIONS environment variable on the client or by using - ALTER ROLE SET. + ALTER ROLE SET. However, unless a module is specifically designed to be used in this way by non-superusers, this is usually not the right setting to use. Look - at instead. + at instead. @@ -7110,7 +7489,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; session_preload_libraries (string) - session_preload_libraries configuration parameter + session_preload_libraries configuration parameter @@ -7118,7 +7497,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; This variable specifies one or more shared libraries that are to be preloaded at connection start. It contains a comma-separated list of library names, where each name - is interpreted as for the command. + is interpreted as for the command. Whitespace between entries is ignored; surround a library name with double quotes if you need to include whitespace or commas in the name. The parameter value only takes effect at the start of the connection. @@ -7131,17 +7510,17 @@ SET XML OPTION { DOCUMENT | CONTENT }; The intent of this feature is to allow debugging or performance-measurement libraries to be loaded into specific sessions without an explicit - LOAD command being given. For - example, could be enabled for all + LOAD command being given. For + example, could be enabled for all sessions under a given user name by setting this parameter - with ALTER ROLE SET. Also, this parameter can be changed + with ALTER ROLE SET. Also, this parameter can be changed without restarting the server (but changes only take effect when a new session is started), so it is easier to add new modules this way, even if they should apply to all sessions. - Unlike , there is no large + Unlike , there is no large performance advantage to loading a library at session start rather than when it is first used. There is some advantage, however, when connection pooling is used. @@ -7152,7 +7531,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; shared_preload_libraries (string) - shared_preload_libraries configuration parameter + shared_preload_libraries configuration parameter @@ -7160,7 +7539,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; This variable specifies one or more shared libraries to be preloaded at server start. It contains a comma-separated list of library names, where each name - is interpreted as for the command. + is interpreted as for the command. Whitespace between entries is ignored; surround a library name with double quotes if you need to include whitespace or commas in the name. This parameter can only be set at server start. If a specified @@ -7183,7 +7562,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; parameter is recommended only for libraries that will be used in most sessions. Also, changing this parameter requires a server restart, so this is not the right setting to use for short-term debugging tasks, - say. Use for that + say. Use for that instead. @@ -7198,6 +7577,30 @@ SET XML OPTION { DOCUMENT | CONTENT }; + + + jit_provider (string) + + jit_provider configuration parameter + + + + + This variable is the name of the JIT provider library to be used + (see ). + The default is llvmjit. + This parameter can only be set at server start. + + + + If set to a non-existent library, JIT will not be + available, but no error will be raised. This allows JIT support to be + installed separately from the main + PostgreSQL package. + + + + @@ -7209,9 +7612,9 @@ SET XML OPTION { DOCUMENT | CONTENT }; dynamic_library_path (string) - dynamic_library_path configuration parameter + dynamic_library_path configuration parameter - dynamic loading + dynamic loading @@ -7263,13 +7666,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' gin_fuzzy_search_limit (integer) - gin_fuzzy_search_limit configuration parameter + gin_fuzzy_search_limit configuration parameter Soft upper limit of the size of the set returned by GIN index scans. For more - information see . + information see . @@ -7294,7 +7697,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' deadlock - deadlock_timeout configuration parameter + deadlock_timeout configuration parameter @@ -7307,7 +7710,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' just wait on the lock for a while before checking for a deadlock. Increasing this value reduces the amount of time wasted in needless deadlock checks, but slows down reporting of - real deadlock errors. The default is one second (1s), + real deadlock errors. The default is one second (1s), which is probably about the smallest value you would want in practice. On a heavily loaded server you might want to raise it. Ideally the setting should exceed your typical transaction time, @@ -7317,7 +7720,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' - When is set, + When is set, this parameter also determines the length of time to wait before a log message is issued about the lock wait. If you are trying to investigate locking delays you might want to set a shorter than @@ -7329,20 +7732,20 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_locks_per_transaction (integer) - max_locks_per_transaction configuration parameter + max_locks_per_transaction configuration parameter The shared lock table tracks locks on max_locks_per_transaction * ( + ) objects (e.g., tables); + linkend="guc-max-connections"/> + ) objects (e.g., tables); hence, no more than this many distinct objects can be locked at any one time. This parameter controls the average number of object locks allocated for each transaction; individual transactions can lock more objects as long as the locks of all transactions - fit in the lock table. This is not the number of + fit in the lock table. This is not the number of rows that can be locked; that value is unlimited. The default, 64, has historically proven sufficient, but you might need to raise this value if you have queries that touch many different @@ -7361,20 +7764,20 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_pred_locks_per_transaction (integer) - max_pred_locks_per_transaction configuration parameter + max_pred_locks_per_transaction configuration parameter The shared predicate lock table tracks locks on max_pred_locks_per_transaction * ( + ) objects (e.g., tables); + linkend="guc-max-connections"/> + ) objects (e.g., tables); hence, no more than this many distinct objects can be locked at any one time. This parameter controls the average number of object locks allocated for each transaction; individual transactions can lock more objects as long as the locks of all transactions - fit in the lock table. This is not the number of + fit in the lock table. This is not the number of rows that can be locked; that value is unlimited. The default, 64, has generally been sufficient in testing, but you might need to raise this value if you have clients that touch many different @@ -7387,7 +7790,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_pred_locks_per_relation (integer) - max_pred_locks_per_relation configuration parameter + max_pred_locks_per_relation configuration parameter @@ -7396,10 +7799,10 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' predicate-locked before the lock is promoted to covering the whole relation. Values greater than or equal to zero mean an absolute limit, while negative values - mean divided by + mean divided by the absolute value of this setting. The default is -2, which keeps - the behavior from previous versions of PostgreSQL. - This parameter can only be set in the postgresql.conf + the behavior from previous versions of PostgreSQL. + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -7408,7 +7811,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_pred_locks_per_page (integer) - max_pred_locks_per_page configuration parameter + max_pred_locks_per_page configuration parameter @@ -7416,7 +7819,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' This controls how many rows on a single page can be predicate-locked before the lock is promoted to covering the whole page. The default is 2. This parameter can only be set in - the postgresql.conf file or on the server command line. + the postgresql.conf file or on the server command line. @@ -7435,62 +7838,62 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' array_nulls (boolean) - array_nulls configuration parameter + array_nulls configuration parameter This controls whether the array input parser recognizes - unquoted NULL as specifying a null array element. - By default, this is on, allowing array values containing - null values to be entered. However, PostgreSQL versions + unquoted NULL as specifying a null array element. + By default, this is on, allowing array values containing + null values to be entered. However, PostgreSQL versions before 8.2 did not support null values in arrays, and therefore would - treat NULL as specifying a normal array element with - the string value NULL. For backward compatibility with + treat NULL as specifying a normal array element with + the string value NULL. For backward compatibility with applications that require the old behavior, this variable can be - turned off. + turned off. Note that it is possible to create array values containing null values - even when this variable is off. + even when this variable is off. backslash_quote (enum) - stringsbackslash quotes + stringsbackslash quotes - backslash_quote configuration parameter + backslash_quote configuration parameter This controls whether a quote mark can be represented by - \' in a string literal. The preferred, SQL-standard way - to represent a quote mark is by doubling it ('') but - PostgreSQL has historically also accepted - \'. However, use of \' creates security risks + \' in a string literal. The preferred, SQL-standard way + to represent a quote mark is by doubling it ('') but + PostgreSQL has historically also accepted + \'. However, use of \' creates security risks because in some client character set encodings, there are multibyte characters in which the last byte is numerically equivalent to ASCII - \. If client-side code does escaping incorrectly then a + \. If client-side code does escaping incorrectly then a SQL-injection attack is possible. This risk can be prevented by making the server reject queries in which a quote mark appears to be escaped by a backslash. - The allowed values of backslash_quote are - on (allow \' always), - off (reject always), and - safe_encoding (allow only if client encoding does not - allow ASCII \ within a multibyte character). - safe_encoding is the default setting. + The allowed values of backslash_quote are + on (allow \' always), + off (reject always), and + safe_encoding (allow only if client encoding does not + allow ASCII \ within a multibyte character). + safe_encoding is the default setting. - Note that in a standard-conforming string literal, \ just - means \ anyway. This parameter only affects the handling of + Note that in a standard-conforming string literal, \ just + means \ anyway. This parameter only affects the handling of non-standard-conforming literals, including - escape string syntax (E'...'). + escape string syntax (E'...'). @@ -7498,7 +7901,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' default_with_oids (boolean) - default_with_oids configuration parameter + default_with_oids configuration parameter @@ -7508,9 +7911,9 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' newly-created tables, if neither WITH OIDS nor WITHOUT OIDS is specified. It also determines whether OIDs will be included in tables created by - SELECT INTO. The parameter is off - by default; in PostgreSQL 8.0 and earlier, it - was on by default. + SELECT INTO. The parameter is off + by default; in PostgreSQL 8.0 and earlier, it + was on by default. @@ -7526,21 +7929,21 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' escape_string_warning (boolean) - stringsescape warning + stringsescape warning - escape_string_warning configuration parameter + escape_string_warning configuration parameter - When on, a warning is issued if a backslash (\) - appears in an ordinary string literal ('...' + When on, a warning is issued if a backslash (\) + appears in an ordinary string literal ('...' syntax) and standard_conforming_strings is off. - The default is on. + The default is on. Applications that wish to use backslash as escape should be - modified to use escape string syntax (E'...'), + modified to use escape string syntax (E'...'), because the default behavior of ordinary strings is now to treat backslash as an ordinary character, per SQL standard. This variable can be enabled to help locate code that needs to be changed. @@ -7551,25 +7954,22 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' lo_compat_privileges (boolean) - lo_compat_privileges configuration parameter + lo_compat_privileges configuration parameter - In PostgreSQL releases prior to 9.0, large objects + In PostgreSQL releases prior to 9.0, large objects did not have access privileges and were, therefore, always readable - and writable by all users. Setting this variable to on + and writable by all users. Setting this variable to on disables the new privilege checks, for compatibility with prior - releases. The default is off. + releases. The default is off. Only superusers can change this setting. Setting this variable does not disable all security checks related to large objects — only those for which the default behavior has - changed in PostgreSQL 9.0. - For example, lo_import() and - lo_export() need superuser privileges regardless - of this setting. + changed in PostgreSQL 9.0. @@ -7577,22 +7977,22 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' operator_precedence_warning (boolean) - operator_precedence_warning configuration parameter + operator_precedence_warning configuration parameter When on, the parser will emit a warning for any construct that might - have changed meanings since PostgreSQL 9.4 as a result + have changed meanings since PostgreSQL 9.4 as a result of changes in operator precedence. This is useful for auditing applications to see if precedence changes have broken anything; but it is not meant to be kept turned on in production, since it will warn about some perfectly valid, standard-compliant SQL code. - The default is off. + The default is off. - See for more information. + See for more information. @@ -7600,40 +8000,40 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' quote_all_identifiers (boolean) - quote_all_identifiers configuration parameter + quote_all_identifiers configuration parameter When the database generates SQL, force all identifiers to be quoted, even if they are not (currently) keywords. This will affect the - output of EXPLAIN as well as the results of functions - like pg_get_viewdef. See also the + output of EXPLAIN as well as the results of functions + like pg_get_viewdef. See also the option of - and . + and . standard_conforming_strings (boolean) - stringsstandard conforming + stringsstandard conforming - standard_conforming_strings configuration parameter + standard_conforming_strings configuration parameter This controls whether ordinary string literals - ('...') treat backslashes literally, as specified in + ('...') treat backslashes literally, as specified in the SQL standard. Beginning in PostgreSQL 9.1, the default is - on (prior releases defaulted to off). + on (prior releases defaulted to off). Applications can check this parameter to determine how string literals will be processed. The presence of this parameter can also be taken as an indication - that the escape string syntax (E'...') is supported. - Escape string syntax () + that the escape string syntax (E'...') is supported. + Escape string syntax () should be used if an application desires backslashes to be treated as escape characters. @@ -7643,7 +8043,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' synchronize_seqscans (boolean) - synchronize_seqscans configuration parameter + synchronize_seqscans configuration parameter @@ -7652,13 +8052,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' other, so that concurrent scans read the same block at about the same time and hence share the I/O workload. When this is enabled, a scan might start in the middle of the table and then wrap - around the end to cover all rows, so as to synchronize with the + around the end to cover all rows, so as to synchronize with the activity of scans already in progress. This can result in unpredictable changes in the row ordering returned by queries that - have no ORDER BY clause. Setting this parameter to - off ensures the pre-8.3 behavior in which a sequential + have no ORDER BY clause. Setting this parameter to + off ensures the pre-8.3 behavior in which a sequential scan always starts from the beginning of the table. The default - is on. + is on. @@ -7672,31 +8072,31 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' transform_null_equals (boolean) - IS NULL + IS NULL - transform_null_equals configuration parameter + transform_null_equals configuration parameter - When on, expressions of the form expr = + When on, expressions of the form expr = NULL (or NULL = - expr) are treated as - expr IS NULL, that is, they - return true if expr evaluates to the null value, + expr) are treated as + expr IS NULL, that is, they + return true if expr evaluates to the null value, and false otherwise. The correct SQL-spec-compliant behavior of - expr = NULL is to always + expr = NULL is to always return null (unknown). Therefore this parameter defaults to - off. + off. However, filtered forms in Microsoft Access generate queries that appear to use - expr = NULL to test for + expr = NULL to test for null values, so if you use that interface to access the database you might want to turn this option on. Since expressions of the - form expr = NULL always + form expr = NULL always return the null value (using the SQL standard interpretation), they are not very useful and do not appear often in normal applications so this option does little harm in practice. But new users are @@ -7705,7 +8105,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' - Note that this option only affects the exact form = NULL, + Note that this option only affects the exact form = NULL, not other comparison operators or other expressions that are computationally equivalent to some expression involving the equals operator (such as IN). @@ -7713,7 +8113,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' - Refer to for related information. + Refer to for related information. @@ -7730,7 +8130,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' exit_on_error (boolean) - exit_on_error configuration parameter + exit_on_error configuration parameter @@ -7745,16 +8145,16 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' restart_after_crash (boolean) - restart_after_crash configuration parameter + restart_after_crash configuration parameter - When set to true, which is the default, PostgreSQL + When set to true, which is the default, PostgreSQL will automatically reinitialize after a backend crash. Leaving this value set to true is normally the best way to maximize the availability of the database. However, in some circumstances, such as when - PostgreSQL is being invoked by clusterware, it may be + PostgreSQL is being invoked by clusterware, it may be useful to disable the restart so that the clusterware can gain control and take any actions it deems appropriate. @@ -7769,10 +8169,10 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' Preset Options - The following parameters are read-only, and are determined + The following parameters are read-only, and are determined when PostgreSQL is compiled or when it is installed. As such, they have been excluded from the sample - postgresql.conf file. These options report + postgresql.conf file. These options report various aspects of PostgreSQL behavior that might be of interest to certain applications, particularly administrative front-ends. @@ -7783,17 +8183,17 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' block_size (integer) - block_size configuration parameter + block_size configuration parameter Reports the size of a disk block. It is determined by the value - of BLCKSZ when building the server. The default + of BLCKSZ when building the server. The default value is 8192 bytes. The meaning of some configuration - variables (such as ) is + variables (such as ) is influenced by block_size. See for information. + linkend="runtime-config-resource"/> for information. @@ -7801,13 +8201,30 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' data_checksums (boolean) - data_checksums configuration parameter + data_checksums configuration parameter Reports whether data checksums are enabled for this cluster. - See for more information. + See for more information. + + + + + + data_directory_mode (integer) + + data_directory_mode configuration parameter + + + + + On Unix systems this parameter reports the permissions of the data + directory defined by () at startup. + (On Microsoft Windows this parameter will always display + 0700). See + for more information. @@ -7815,7 +8232,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' debug_assertions (boolean) - debug_assertions configuration parameter + debug_assertions configuration parameter @@ -7835,13 +8252,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' integer_datetimes (boolean) - integer_datetimes configuration parameter + integer_datetimes configuration parameter - Reports whether PostgreSQL was built with support for - 64-bit-integer dates and times. As of PostgreSQL 10, + Reports whether PostgreSQL was built with support for + 64-bit-integer dates and times. As of PostgreSQL 10, this is always on. @@ -7850,13 +8267,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' lc_collate (string) - lc_collate configuration parameter + lc_collate configuration parameter Reports the locale in which sorting of textual data is done. - See for more information. + See for more information. This value is determined when a database is created. @@ -7865,13 +8282,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' lc_ctype (string) - lc_ctype configuration parameter + lc_ctype configuration parameter Reports the locale that determines character classifications. - See for more information. + See for more information. This value is determined when a database is created. Ordinarily this will be the same as lc_collate, but for special applications it might be set differently. @@ -7882,13 +8299,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_function_args (integer) - max_function_args configuration parameter + max_function_args configuration parameter Reports the maximum number of function arguments. It is determined by - the value of FUNC_MAX_ARGS when building the server. The + the value of FUNC_MAX_ARGS when building the server. The default value is 100 arguments. @@ -7897,14 +8314,14 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_identifier_length (integer) - max_identifier_length configuration parameter + max_identifier_length configuration parameter Reports the maximum identifier length. It is determined as one - less than the value of NAMEDATALEN when building - the server. The default value of NAMEDATALEN is + less than the value of NAMEDATALEN when building + the server. The default value of NAMEDATALEN is 64; therefore the default max_identifier_length is 63 bytes, which can be less than 63 characters when using multibyte encodings. @@ -7915,13 +8332,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_index_keys (integer) - max_index_keys configuration parameter + max_index_keys configuration parameter Reports the maximum number of index keys. It is determined by - the value of INDEX_MAX_KEYS when building the server. The + the value of INDEX_MAX_KEYS when building the server. The default value is 32 keys. @@ -7930,16 +8347,16 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' segment_size (integer) - segment_size configuration parameter + segment_size configuration parameter Reports the number of blocks (pages) that can be stored within a file - segment. It is determined by the value of RELSEG_SIZE + segment. It is determined by the value of RELSEG_SIZE when building the server. The maximum size of a segment file in bytes - is equal to segment_size multiplied by - block_size; by default this is 1GB. + is equal to segment_size multiplied by + block_size; by default this is 1GB. @@ -7947,16 +8364,16 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' server_encoding (string) - server_encoding configuration parameter + server_encoding configuration parameter - character set + character set Reports the database encoding (character set). It is determined when the database is created. Ordinarily, clients need only be concerned with the value of . + linkend="guc-client-encoding"/>. @@ -7964,13 +8381,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' server_version (string) - server_version configuration parameter + server_version configuration parameter Reports the version number of the server. It is determined by the - value of PG_VERSION when building the server. + value of PG_VERSION when building the server. @@ -7978,13 +8395,29 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' server_version_num (integer) - server_version_num configuration parameter + server_version_num configuration parameter Reports the version number of the server as an integer. It is determined - by the value of PG_VERSION_NUM when building the server. + by the value of PG_VERSION_NUM when building the server. + + + + + + ssl_library (string) + + ssl_library configuration parameter + + + + + Reports the name of the SSL library that this PostgreSQL server was + built with (even if SSL is not currently configured or in use on this + instance), for example OpenSSL, or an empty string + if none. @@ -7992,13 +8425,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' wal_block_size (integer) - wal_block_size configuration parameter + wal_block_size configuration parameter Reports the size of a WAL disk block. It is determined by the value - of XLOG_BLCKSZ when building the server. The default value + of XLOG_BLCKSZ when building the server. The default value is 8192 bytes. @@ -8007,16 +8440,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' wal_segment_size (integer) - wal_segment_size configuration parameter + wal_segment_size configuration parameter - Reports the number of blocks (pages) in a WAL segment file. - The total size of a WAL segment file in bytes is equal to - wal_segment_size multiplied by wal_block_size; - by default this is 16MB. See for - more information. + Reports the size of write ahead log segments. The default value is + 16MB. See for more information. @@ -8037,12 +8467,12 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' Custom options have two-part names: an extension name, then a dot, then the parameter name proper, much like qualified names in SQL. An example - is plpgsql.variable_conflict. + is plpgsql.variable_conflict. Because custom options may need to be set in processes that have not - loaded the relevant extension module, PostgreSQL + loaded the relevant extension module, PostgreSQL will accept a setting for any two-part parameter name. Such variables are treated as placeholders and have no function until the module that defines them is loaded. When an extension module is loaded, it will add @@ -8061,7 +8491,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' to assist with recovery of severely damaged databases. There should be no reason to use them on a production database. As such, they have been excluded from the sample - postgresql.conf file. Note that many of these + postgresql.conf file. Note that many of these parameters require special source compilation flags to work at all. @@ -8100,7 +8530,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' post_auth_delay (integer) - post_auth_delay configuration parameter + post_auth_delay configuration parameter @@ -8117,7 +8547,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' pre_auth_delay (integer) - pre_auth_delay configuration parameter + pre_auth_delay configuration parameter @@ -8127,7 +8557,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' authentication procedure. This is intended to give developers an opportunity to attach to the server process with a debugger to trace down misbehavior in authentication. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -8136,15 +8566,15 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' trace_notify (boolean) - trace_notify configuration parameter + trace_notify configuration parameter Generates a great amount of debugging output for the LISTEN and NOTIFY - commands. or - must be + commands. or + must be DEBUG1 or lower to send this output to the client or server logs, respectively. @@ -8154,24 +8584,24 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' trace_recovery_messages (enum) - trace_recovery_messages configuration parameter + trace_recovery_messages configuration parameter Enables logging of recovery-related debugging output that otherwise would not be logged. This parameter allows the user to override the - normal setting of , but only for + normal setting of , but only for specific messages. This is intended for use in debugging Hot Standby. - Valid values are DEBUG5, DEBUG4, - DEBUG3, DEBUG2, DEBUG1, and - LOG. The default, LOG, does not affect + Valid values are DEBUG5, DEBUG4, + DEBUG3, DEBUG2, DEBUG1, and + LOG. The default, LOG, does not affect logging decisions at all. The other values cause recovery-related debug messages of that priority or higher to be logged as though they - had LOG priority; for common settings of - log_min_messages this results in unconditionally sending + had LOG priority; for common settings of + log_min_messages this results in unconditionally sending them to the server log. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -8180,7 +8610,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' trace_sort (boolean) - trace_sort configuration parameter + trace_sort configuration parameter @@ -8196,7 +8626,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' trace_locks (boolean) - trace_locks configuration parameter + trace_locks configuration parameter @@ -8237,7 +8667,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) trace_lwlocks (boolean) - trace_lwlocks configuration parameter + trace_lwlocks configuration parameter @@ -8257,7 +8687,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) trace_userlocks (boolean) - trace_userlocks configuration parameter + trace_userlocks configuration parameter @@ -8276,7 +8706,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) trace_lock_oidmin (integer) - trace_lock_oidmin configuration parameter + trace_lock_oidmin configuration parameter @@ -8295,7 +8725,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) trace_lock_table (integer) - trace_lock_table configuration parameter + trace_lock_table configuration parameter @@ -8313,7 +8743,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) debug_deadlocks (boolean) - debug_deadlocks configuration parameter + debug_deadlocks configuration parameter @@ -8332,7 +8762,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) log_btree_build_stats (boolean) - log_btree_build_stats configuration parameter + log_btree_build_stats configuration parameter @@ -8351,7 +8781,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) wal_consistency_checking (string) - wal_consistency_checking configuration parameter + wal_consistency_checking configuration parameter @@ -8371,10 +8801,10 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) the feature. It can be set to all to check all records, or to a comma-separated list of resource managers to check only records originating from those resource managers. Currently, - the supported resource managers are heap, - heap2, btree, hash, - gin, gist, sequence, - spgist, brin, and generic. Only + the supported resource managers are heap, + heap2, btree, hash, + gin, gist, sequence, + spgist, brin, and generic. Only superusers can change this setting. @@ -8383,7 +8813,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) wal_debug (boolean) - wal_debug configuration parameter + wal_debug configuration parameter @@ -8399,24 +8829,24 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) ignore_checksum_failure (boolean) - ignore_checksum_failure configuration parameter + ignore_checksum_failure configuration parameter - Only has effect if are enabled. + Only has effect if are enabled. Detection of a checksum failure during a read normally causes - PostgreSQL to report an error, aborting the current - transaction. Setting ignore_checksum_failure to on causes + PostgreSQL to report an error, aborting the current + transaction. Setting ignore_checksum_failure to on causes the system to ignore the failure (but still report a warning), and continue processing. This behavior may cause crashes, propagate - or hide corruption, or other serious problems. However, it may allow + or hide corruption, or other serious problems. However, it may allow you to get past the error and retrieve undamaged tuples that might still be present in the table if the block header is still sane. If the header is corrupt an error will be reported even if this option is enabled. The - default setting is off, and it can only be changed by a superuser. + default setting is off, and it can only be changed by a superuser. @@ -8424,16 +8854,16 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) zero_damaged_pages (boolean) - zero_damaged_pages configuration parameter + zero_damaged_pages configuration parameter Detection of a damaged page header normally causes - PostgreSQL to report an error, aborting the current - transaction. Setting zero_damaged_pages to on causes + PostgreSQL to report an error, aborting the current + transaction. Setting zero_damaged_pages to on causes the system to instead report a warning, zero out the damaged - page in memory, and continue processing. This behavior will destroy data, + page in memory, and continue processing. This behavior will destroy data, namely all the rows on the damaged page. However, it does allow you to get past the error and retrieve rows from any undamaged pages that might be present in the table. It is useful for recovering data if @@ -8442,12 +8872,94 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) data from the damaged pages of a table. Zeroed-out pages are not forced to disk so it is recommended to recreate the table or the index before turning this parameter off again. The - default setting is off, and it can only be changed + default setting is off, and it can only be changed by a superuser. - + + + jit_debugging_support (boolean) + + jit_debugging_support configuration parameter + + + + + If LLVM has the required functionality, register generated functions + with GDB. This makes debugging easier. + The default setting is off. + This parameter can only be set at server start. + + + + + + jit_dump_bitcode (boolean) + + jit_dump_bitcode configuration parameter + + + + + Writes the generated LLVM IR out to the + file system, inside . This is only + useful for working on the internals of the JIT implementation. + The default setting is off. + This parameter can only be changed by a superuser. + + + + + + jit_expressions (boolean) + + jit_expressions configuration parameter + + + + + Determines whether expressions are JIT compiled, when JIT compilation + is activated (see ). The default is + on. + + + + + + jit_profiling_support (boolean) + + jit_profiling_support configuration parameter + + + + + If LLVM has the required functionality, emit the data needed to allow + perf to profile functions generated by JIT. + This writes out files to $HOME/.debug/jit/; the + user is responsible for performing cleanup when desired. + The default setting is off. + This parameter can only be set at server start. + + + + + + jit_tuple_deforming (boolean) + + jit_tuple_deforming configuration parameter + + + + + Determines whether tuple deforming is JIT compiled, when JIT + compilation is activated (see ). + The default is on. + + + + + Short Options @@ -8455,7 +8967,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) For convenience there are also single letter command-line option switches available for some parameters. They are described in - . Some of these + . Some of these options exist for historical reasons, and their presence as a single-letter option does not necessarily indicate an endorsement to use the option heavily. @@ -8474,15 +8986,15 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) - shared_buffers = x + shared_buffers = x - log_min_messages = DEBUGx + log_min_messages = DEBUGx - datestyle = euro + datestyle = euro @@ -8491,69 +9003,69 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) , - enable_bitmapscan = off, - enable_hashjoin = off, - enable_indexscan = off, - enable_mergejoin = off, - enable_nestloop = off, - enable_indexonlyscan = off, - enable_seqscan = off, - enable_tidscan = off + enable_bitmapscan = off, + enable_hashjoin = off, + enable_indexscan = off, + enable_mergejoin = off, + enable_nestloop = off, + enable_indexonlyscan = off, + enable_seqscan = off, + enable_tidscan = off - fsync = off + fsync = off - listen_addresses = x + listen_addresses = x - listen_addresses = '*' + listen_addresses = '*' - unix_socket_directories = x + unix_socket_directories = x - ssl = on + ssl = on - max_connections = x + max_connections = x - allow_system_table_mods = on + allow_system_table_mods = on - port = x + port = x - ignore_system_indexes = on + ignore_system_indexes = on - log_statement_stats = on + log_statement_stats = on - work_mem = x + work_mem = x , , - log_parser_stats = on, - log_planner_stats = on, - log_executor_stats = on + log_parser_stats = on, + log_planner_stats = on, + log_executor_stats = on - post_auth_delay = x + post_auth_delay = x diff --git a/doc/src/sgml/contacts.sgml b/doc/src/sgml/contacts.sgml deleted file mode 100644 index 308eb418a5..0000000000 --- a/doc/src/sgml/contacts.sgml +++ /dev/null @@ -1,26 +0,0 @@ - - - -Contacts - - - diff --git a/doc/src/sgml/contrib-spi.sgml b/doc/src/sgml/contrib-spi.sgml index 3287c18d27..fed6f24932 100644 --- a/doc/src/sgml/contrib-spi.sgml +++ b/doc/src/sgml/contrib-spi.sgml @@ -9,8 +9,10 @@ - The spi module provides several workable examples - of using SPI and triggers. While these functions are of some value in + The spi module provides several workable examples + of using the Server Programming Interface + (SPI) and triggers. While these functions are of + some value in their own right, they are even more useful as examples to modify for your own purposes. The functions are general enough to be used with any table, but you have to specify table and field names (as described @@ -26,15 +28,15 @@ refint — Functions for Implementing Referential Integrity - check_primary_key() and - check_foreign_key() are used to check foreign key constraints. + check_primary_key() and + check_foreign_key() are used to check foreign key constraints. (This functionality is long since superseded by the built-in foreign key mechanism, of course, but the module is still useful as an example.) - check_primary_key() checks the referencing table. - To use, create a BEFORE INSERT OR UPDATE trigger using this + check_primary_key() checks the referencing table. + To use, create a BEFORE INSERT OR UPDATE trigger using this function on a table referencing another table. Specify as the trigger arguments: the referencing table's column name(s) which form the foreign key, the referenced table name, and the column names in the referenced table @@ -43,14 +45,14 @@ - check_foreign_key() checks the referenced table. - To use, create a BEFORE DELETE OR UPDATE trigger using this + check_foreign_key() checks the referenced table. + To use, create a BEFORE DELETE OR UPDATE trigger using this function on a table referenced by other table(s). Specify as the trigger arguments: the number of referencing tables for which the function has to perform checking, the action if a referencing key is found - (cascade — to delete the referencing row, - restrict — to abort transaction if referencing keys - exist, setnull — to set referencing key fields to null), + (cascade — to delete the referencing row, + restrict — to abort transaction if referencing keys + exist, setnull — to set referencing key fields to null), the triggered table's column names which form the primary/unique key, then the referencing table name and column names (repeated for as many referencing tables as were specified by first argument). Note that the @@ -59,100 +61,7 @@ - There are examples in refint.example. - - - - - timetravel — Functions for Implementing Time Travel - - - Long ago, PostgreSQL had a built-in time travel feature - that kept the insert and delete times for each tuple. This can be - emulated using these functions. To use these functions, - you must add to a table two columns of abstime type to store - the date when a tuple was inserted (start_date) and changed/deleted - (stop_date): - - -CREATE TABLE mytab ( - ... ... - start_date abstime, - stop_date abstime - ... ... -); - - - The columns can be named whatever you like, but in this discussion - we'll call them start_date and stop_date. - - - - When a new row is inserted, start_date should normally be set to - current time, and stop_date to infinity. The trigger - will automatically substitute these values if the inserted data - contains nulls in these columns. Generally, inserting explicit - non-null data in these columns should only be done when re-loading - dumped data. - - - - Tuples with stop_date equal to infinity are valid - now, and can be modified. Tuples with a finite stop_date cannot - be modified anymore — the trigger will prevent it. (If you need - to do that, you can turn off time travel as shown below.) - - - - For a modifiable row, on update only the stop_date in the tuple being - updated will be changed (to current time) and a new tuple with the modified - data will be inserted. Start_date in this new tuple will be set to current - time and stop_date to infinity. - - - - A delete does not actually remove the tuple but only sets its stop_date - to current time. - - - - To query for tuples valid now, include - stop_date = 'infinity' in the query's WHERE condition. - (You might wish to incorporate that in a view.) Similarly, you can - query for tuples valid at any past time with suitable conditions on - start_date and stop_date. - - - - timetravel() is the general trigger function that supports - this behavior. Create a BEFORE INSERT OR UPDATE OR DELETE - trigger using this function on each time-traveled table. Specify two - trigger arguments: the actual - names of the start_date and stop_date columns. - Optionally, you can specify one to three more arguments, which must refer - to columns of type text. The trigger will store the name of - the current user into the first of these columns during INSERT, the - second column during UPDATE, and the third during DELETE. - - - - set_timetravel() allows you to turn time-travel on or off for - a table. - set_timetravel('mytab', 1) will turn TT ON for table mytab. - set_timetravel('mytab', 0) will turn TT OFF for table mytab. - In both cases the old status is reported. While TT is off, you can modify - the start_date and stop_date columns freely. Note that the on/off status - is local to the current database session — fresh sessions will - always start out with TT ON for all tables. - - - - get_timetravel() returns the TT state for a table without - changing it. - - - - There is an example in timetravel.example. + There are examples in refint.example. @@ -160,17 +69,17 @@ CREATE TABLE mytab ( autoinc — Functions for Autoincrementing Fields - autoinc() is a trigger that stores the next value of + autoinc() is a trigger that stores the next value of a sequence into an integer field. This has some overlap with the - built-in serial column feature, but it is not the same: - autoinc() will override attempts to substitute a + built-in serial column feature, but it is not the same: + autoinc() will override attempts to substitute a different field value during inserts, and optionally it can be used to increment the field during updates, too. - To use, create a BEFORE INSERT (or optionally BEFORE - INSERT OR UPDATE) trigger using this function. Specify two + To use, create a BEFORE INSERT (or optionally BEFORE + INSERT OR UPDATE) trigger using this function. Specify two trigger arguments: the name of the integer column to be modified, and the name of the sequence object that will supply values. (Actually, you can specify any number of pairs of such names, if @@ -178,7 +87,7 @@ CREATE TABLE mytab ( - There is an example in autoinc.example. + There is an example in autoinc.example. @@ -187,19 +96,19 @@ CREATE TABLE mytab ( insert_username — Functions for Tracking Who Changed a Table - insert_username() is a trigger that stores the current + insert_username() is a trigger that stores the current user's name into a text field. This can be useful for tracking who last modified a particular row within a table. - To use, create a BEFORE INSERT and/or UPDATE + To use, create a BEFORE INSERT and/or UPDATE trigger using this function. Specify a single trigger argument: the name of the text column to be modified. - There is an example in insert_username.example. + There is an example in insert_username.example. @@ -208,21 +117,21 @@ CREATE TABLE mytab ( moddatetime — Functions for Tracking Last Modification Time - moddatetime() is a trigger that stores the current - time into a timestamp field. This can be useful for tracking + moddatetime() is a trigger that stores the current + time into a timestamp field. This can be useful for tracking the last modification time of a particular row within a table. - To use, create a BEFORE UPDATE + To use, create a BEFORE UPDATE trigger using this function. Specify a single trigger argument: the name of the column to be modified. - The column must be of type timestamp or timestamp with - time zone. + The column must be of type timestamp or timestamp with + time zone. - There is an example in moddatetime.example. + There is an example in moddatetime.example. diff --git a/doc/src/sgml/contrib.sgml b/doc/src/sgml/contrib.sgml index eaaa36cb87..b626a345f3 100644 --- a/doc/src/sgml/contrib.sgml +++ b/doc/src/sgml/contrib.sgml @@ -6,7 +6,7 @@ This appendix and the next one contain information regarding the modules that can be found in the contrib directory of the - PostgreSQL distribution. + PostgreSQL distribution. These include porting tools, analysis utilities, and plug-in features that are not part of the core PostgreSQL system, mainly because they address a limited audience or are too experimental @@ -16,14 +16,14 @@ This appendix covers extensions and other server plug-in modules found in - contrib. covers utility + contrib. covers utility programs. When building from the source distribution, these components are not built automatically, unless you build the "world" target - (see ). + (see ). You can build and install all of them by running: make @@ -41,64 +41,64 @@ make installcheck - once you have a PostgreSQL server running. + once you have a PostgreSQL server running. - If you are using a pre-packaged version of PostgreSQL, + If you are using a pre-packaged version of PostgreSQL, these modules are typically made available as a separate subpackage, - such as postgresql-contrib. + such as postgresql-contrib. Many modules supply new user-defined functions, operators, or types. To make use of one of these modules, after you have installed the code you need to register the new SQL objects in the database system. - In PostgreSQL 9.1 and later, this is done by executing - a command. In a fresh database, + In PostgreSQL 9.1 and later, this is done by executing + a command. In a fresh database, you can simply do -CREATE EXTENSION module_name; +CREATE EXTENSION module_name; This command must be run by a database superuser. This registers the new SQL objects in the current database only, so you need to run this command in each database that you want the module's facilities to be available in. Alternatively, run it in - database template1 so that the extension will be copied into + database template1 so that the extension will be copied into subsequently-created databases by default. Many modules allow you to install their objects in a schema of your choice. To do that, add SCHEMA - schema_name to the CREATE EXTENSION + schema_name to the CREATE EXTENSION command. By default, the objects will be placed in your current creation - target schema, typically public. + target schema, which in turn defaults to public. If your database was brought forward by dump and reload from a pre-9.1 - version of PostgreSQL, and you had been using the pre-9.1 + version of PostgreSQL, and you had been using the pre-9.1 version of the module in it, you should instead do -CREATE EXTENSION module_name FROM unpackaged; +CREATE EXTENSION module_name FROM unpackaged; This will update the pre-9.1 objects of the module into a proper - extension object. Future updates to the module will be - managed by . + extension object. Future updates to the module will be + managed by . For more information about extension updates, see - . + . Note, however, that some of these modules are not extensions in this sense, but are loaded into the server in some other way, for instance by way of - . See the documentation of each + . See the documentation of each module for details. @@ -109,7 +109,6 @@ CREATE EXTENSION module_name FROM unpackaged; &bloom; &btree-gin; &btree-gist; - &chkpass; &citext; &cube; &dblink; @@ -164,7 +163,7 @@ pages. This appendix and the previous one contain information regarding the modules that can be found in the contrib directory of the - PostgreSQL distribution. See for + PostgreSQL distribution. See for more information about the contrib section in general and server extensions and plug-ins found in contrib specifically. @@ -185,7 +184,7 @@ pages. This section covers PostgreSQL client applications in contrib. They can be run from anywhere, independent of where the database server resides. See - also for information about client + also for information about client applications that part of the core PostgreSQL distribution. @@ -201,7 +200,7 @@ pages. This section covers PostgreSQL server-related applications in contrib. They are typically run on the host where the database server resides. See also for information about server applications that + linkend="reference-server"/> for information about server applications that part of the core PostgreSQL distribution. diff --git a/doc/src/sgml/cube.sgml b/doc/src/sgml/cube.sgml index 1ffc40f1a5..c6e586270a 100644 --- a/doc/src/sgml/cube.sgml +++ b/doc/src/sgml/cube.sgml @@ -8,7 +8,7 @@ - This module implements a data type cube for + This module implements a data type cube for representing multidimensional cubes. @@ -16,9 +16,9 @@ Syntax - shows the valid external - representations for the cube - type. x, y, etc. denote + shows the valid external + representations for the cube + type. x, y, etc. denote floating-point numbers. @@ -34,43 +34,43 @@ - x + x A one-dimensional point (or, zero-length one-dimensional interval) - (x) + (x) Same as above - x1,x2,...,xn + x1,x2,...,xn A point in n-dimensional space, represented internally as a zero-volume cube - (x1,x2,...,xn) + (x1,x2,...,xn) Same as above - (x),(y) - A one-dimensional interval starting at x and ending at y or vice versa; the + (x),(y) + A one-dimensional interval starting at x and ending at y or vice versa; the order does not matter - [(x),(y)] + [(x),(y)] Same as above - (x1,...,xn),(y1,...,yn) + (x1,...,xn),(y1,...,yn) An n-dimensional cube represented by a pair of its diagonally opposite corners - [(x1,...,xn),(y1,...,yn)] + [(x1,...,xn),(y1,...,yn)] Same as above @@ -79,17 +79,17 @@ It does not matter which order the opposite corners of a cube are - entered in. The cube functions + entered in. The cube functions automatically swap values if needed to create a uniform - lower left — upper right internal representation. - When the corners coincide, cube stores only one corner - along with an is point flag to avoid wasting space. + lower left — upper right internal representation. + When the corners coincide, cube stores only one corner + along with an is point flag to avoid wasting space. White space is ignored on input, so - [(x),(y)] is the same as - [ ( x ), ( y ) ]. + [(x),(y)] is the same as + [ ( x ), ( y ) ]. @@ -106,8 +106,8 @@ Usage - shows the operators provided for - type cube. + shows the operators provided for + type cube. @@ -123,91 +123,93 @@ - a = b - boolean + a = b + boolean The cubes a and b are identical. - a && b - boolean + a && b + boolean The cubes a and b overlap. - a @> b - boolean + a @> b + boolean The cube a contains the cube b. - a <@ b - boolean + a <@ b + boolean The cube a is contained in the cube b. - a < b - boolean + a < b + boolean The cube a is less than the cube b. - a <= b - boolean + a <= b + boolean The cube a is less than or equal to the cube b. - a > b - boolean + a > b + boolean The cube a is greater than the cube b. - a >= b - boolean + a >= b + boolean The cube a is greater than or equal to the cube b. - a <> b - boolean + a <> b + boolean The cube a is not equal to the cube b. - a -> n - float8 - Get n-th coordinate of cube (counting from 1). + a -> n + float8 + Get n-th coordinate of cube (counting from 1). - a ~> n - float8 + a ~> n + float8 - Get n-th coordinate in normalized cube - representation, in which the coordinates have been rearranged into - the form lower left — upper right; that is, the - smaller endpoint along each dimension appears first. + Get n-th coordinate of cube in following way: + n = 2 * k - 1 means lower bound of k-th + dimension, n = 2 * k means upper bound of + k-th dimension. Negative + n denotes the inverse value of the corresponding + positive coordinate. This operator is designed for KNN-GiST support. - a <-> b - float8 + a <-> b + float8 Euclidean distance between a and b. - a <#> b - float8 + a <#> b + float8 Taxicab (L-1 metric) distance between a and b. - a <=> b - float8 + a <=> b + float8 Chebyshev (L-inf metric) distance between a and b. @@ -216,35 +218,35 @@
- (Before PostgreSQL 8.2, the containment operators @> and <@ were - respectively called @ and ~. These names are still available, but are + (Before PostgreSQL 8.2, the containment operators @> and <@ were + respectively called @ and ~. These names are still available, but are deprecated and will eventually be retired. Notice that the old names are reversed from the convention formerly followed by the core geometric data types!) - The scalar ordering operators (<, >=, etc) + The scalar ordering operators (<, >=, etc) do not make a lot of sense for any practical purpose but sorting. These operators first compare the first coordinates, and if those are equal, compare the second coordinates, etc. They exist mainly to support the - b-tree index operator class for cube, which can be useful for - example if you would like a UNIQUE constraint on a cube column. + b-tree index operator class for cube, which can be useful for + example if you would like a UNIQUE constraint on a cube column. - The cube module also provides a GiST index operator class for - cube values. - A cube GiST index can be used to search for values using the - =, &&, @>, and - <@ operators in WHERE clauses. + The cube module also provides a GiST index operator class for + cube values. + A cube GiST index can be used to search for values using the + =, &&, @>, and + <@ operators in WHERE clauses. - In addition, a cube GiST index can be used to find nearest + In addition, a cube GiST index can be used to find nearest neighbors using the metric operators - <->, <#>, and - <=> in ORDER BY clauses. + <->, <#>, and + <=> in ORDER BY clauses. For example, the nearest neighbor of the 3-D point (0.5, 0.5, 0.5) could be found efficiently with: @@ -253,7 +255,7 @@ SELECT c FROM test ORDER BY c <-> cube(array[0.5,0.5,0.5]) LIMIT 1; - The ~> operator can also be used in this way to + The ~> operator can also be used in this way to efficiently retrieve the first few values sorted by a selected coordinate. For example, to get the first few cubes ordered by the first coordinate (lower left corner) ascending one could use the following query: @@ -268,7 +270,7 @@ SELECT c FROM test ORDER BY c ~> 3 DESC LIMIT 5; - shows the available functions. + shows the available functions. @@ -365,7 +367,7 @@ SELECT c FROM test ORDER BY c ~> 3 DESC LIMIT 5; cube_ll_coord(cube, integer) float8 - Returns the n-th coordinate value for the lower + Returns the n-th coordinate value for the lower left corner of the cube. @@ -376,7 +378,7 @@ SELECT c FROM test ORDER BY c ~> 3 DESC LIMIT 5; cube_ur_coord(cube, integer) float8 - Returns the n-th coordinate value for the + Returns the n-th coordinate value for the upper right corner of the cube. @@ -412,9 +414,9 @@ SELECT c FROM test ORDER BY c ~> 3 DESC LIMIT 5; desired. - cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[2]) == '(3),(7)' + cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[2]) == '(3),(7)' cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]) == - '(5,3,1,1),(8,7,6,6)' + '(5,3,1,1),(8,7,6,6)' @@ -440,24 +442,24 @@ SELECT c FROM test ORDER BY c ~> 3 DESC LIMIT 5; cube_enlarge(c cube, r double, n integer) cube Increases the size of the cube by the specified - radius r in at least n dimensions. + radius r in at least n dimensions. If the radius is negative the cube is shrunk instead. - All defined dimensions are changed by the radius r. - Lower-left coordinates are decreased by r and - upper-right coordinates are increased by r. If a + All defined dimensions are changed by the radius r. + Lower-left coordinates are decreased by r and + upper-right coordinates are increased by r. If a lower-left coordinate is increased to more than the corresponding - upper-right coordinate (this can only happen when r + upper-right coordinate (this can only happen when r < 0) than both coordinates are set to their average. - If n is greater than the number of defined dimensions - and the cube is being enlarged (r > 0), then extra - dimensions are added to make n altogether; + If n is greater than the number of defined dimensions + and the cube is being enlarged (r > 0), then extra + dimensions are added to make n altogether; 0 is used as the initial value for the extra coordinates. This function is useful for creating bounding boxes around a point for searching for nearby points. cube_enlarge('(1,2),(3,4)', 0.5, 3) == - '(0.5,1.5,-0.5),(3.5,4.5,0.5)' + '(0.5,1.5,-0.5),(3.5,4.5,0.5)' @@ -523,13 +525,13 @@ t Notes - For examples of usage, see the regression test sql/cube.sql. + For examples of usage, see the regression test sql/cube.sql. To make it harder for people to break things, there is a limit of 100 on the number of dimensions of cubes. This is set - in cubedata.h if you need something bigger. + in cubedata.h if you need something bigger. diff --git a/doc/src/sgml/custom-scan.sgml b/doc/src/sgml/custom-scan.sgml index 6159c3a24e..24631f5f40 100644 --- a/doc/src/sgml/custom-scan.sgml +++ b/doc/src/sgml/custom-scan.sgml @@ -9,9 +9,9 @@ - PostgreSQL supports a set of experimental facilities which + PostgreSQL supports a set of experimental facilities which are intended to allow extension modules to add new scan types to the system. - Unlike a foreign data wrapper, which is only + Unlike a foreign data wrapper, which is only responsible for knowing how to scan its own foreign tables, a custom scan provider can provide an alternative method of scanning any relation in the system. Typically, the motivation for writing a custom scan provider will @@ -51,9 +51,9 @@ extern PGDLLIMPORT set_rel_pathlist_hook_type set_rel_pathlist_hook; Although this hook function can be used to examine, modify, or remove paths generated by the core system, a custom scan provider will typically - confine itself to generating CustomPath objects and adding - them to rel using add_path. The custom scan - provider is responsible for initializing the CustomPath + confine itself to generating CustomPath objects and adding + them to rel using add_path. The custom scan + provider is responsible for initializing the CustomPath object, which is declared like this: typedef struct CustomPath @@ -68,22 +68,22 @@ typedef struct CustomPath - path must be initialized as for any other path, including + path must be initialized as for any other path, including the row-count estimate, start and total cost, and sort ordering provided - by this path. flags is a bit mask, which should include - CUSTOMPATH_SUPPORT_BACKWARD_SCAN if the custom path can support - a backward scan and CUSTOMPATH_SUPPORT_MARK_RESTORE if it + by this path. flags is a bit mask, which should include + CUSTOMPATH_SUPPORT_BACKWARD_SCAN if the custom path can support + a backward scan and CUSTOMPATH_SUPPORT_MARK_RESTORE if it can support mark and restore. Both capabilities are optional. - An optional custom_paths is a list of Path + An optional custom_paths is a list of Path nodes used by this custom-path node; these will be transformed into - Plan nodes by planner. - custom_private can be used to store the custom path's + Plan nodes by planner. + custom_private can be used to store the custom path's private data. Private data should be stored in a form that can be handled - by nodeToString, so that debugging routines that attempt to - print the custom path will work as designed. methods must + by nodeToString, so that debugging routines that attempt to + print the custom path will work as designed. methods must point to a (usually statically allocated) object implementing the required custom path methods, of which there is currently only one. The - LibraryName and SymbolName fields must also + LibraryName and SymbolName fields must also be initialized so that the dynamic loader can resolve them to locate the method table. @@ -93,7 +93,7 @@ typedef struct CustomPath relations, such a path must produce the same output as would normally be produced by the join it replaces. To do this, the join provider should set the following hook, and then within the hook function, - create CustomPath path(s) for the join relation. + create CustomPath path(s) for the join relation. typedef void (*set_join_pathlist_hook_type) (PlannerInfo *root, RelOptInfo *joinrel, @@ -122,8 +122,8 @@ Plan *(*PlanCustomPath) (PlannerInfo *root, List *custom_plans); Convert a custom path to a finished plan. The return value will generally - be a CustomScan object, which the callback must allocate and - initialize. See for more details. + be a CustomScan object, which the callback must allocate and + initialize. See for more details. @@ -150,45 +150,45 @@ typedef struct CustomScan - scan must be initialized as for any other scan, including + scan must be initialized as for any other scan, including estimated costs, target lists, qualifications, and so on. - flags is a bit mask with the same meaning as in - CustomPath. - custom_plans can be used to store child - Plan nodes. - custom_exprs should be used to + flags is a bit mask with the same meaning as in + CustomPath. + custom_plans can be used to store child + Plan nodes. + custom_exprs should be used to store expression trees that will need to be fixed up by - setrefs.c and subselect.c, while - custom_private should be used to store other private data + setrefs.c and subselect.c, while + custom_private should be used to store other private data that is only used by the custom scan provider itself. - custom_scan_tlist can be NIL when scanning a base + custom_scan_tlist can be NIL when scanning a base relation, indicating that the custom scan returns scan tuples that match the base relation's row type. Otherwise it is a target list describing - the actual scan tuples. custom_scan_tlist must be + the actual scan tuples. custom_scan_tlist must be provided for joins, and could be provided for scans if the custom scan provider can compute some non-Var expressions. - custom_relids is set by the core code to the set of + custom_relids is set by the core code to the set of relations (range table indexes) that this scan node handles; except when this scan is replacing a join, it will have only one member. - methods must point to a (usually statically allocated) + methods must point to a (usually statically allocated) object implementing the required custom scan methods, which are further detailed below. - When a CustomScan scans a single relation, - scan.scanrelid must be the range table index of the table - to be scanned. When it replaces a join, scan.scanrelid + When a CustomScan scans a single relation, + scan.scanrelid must be the range table index of the table + to be scanned. When it replaces a join, scan.scanrelid should be zero. - Plan trees must be able to be duplicated using copyObject, - so all the data stored within the custom fields must consist of + Plan trees must be able to be duplicated using copyObject, + so all the data stored within the custom fields must consist of nodes that that function can handle. Furthermore, custom scan providers cannot substitute a larger structure that embeds - a CustomScan for the structure itself, as would be possible - for a CustomPath or CustomScanState. + a CustomScan for the structure itself, as would be possible + for a CustomPath or CustomScanState. @@ -197,14 +197,14 @@ typedef struct CustomScan Node *(*CreateCustomScanState) (CustomScan *cscan); - Allocate a CustomScanState for this - CustomScan. The actual allocation will often be larger than - required for an ordinary CustomScanState, because many + Allocate a CustomScanState for this + CustomScan. The actual allocation will often be larger than + required for an ordinary CustomScanState, because many providers will wish to embed that as the first field of a larger structure. - The value returned must have the node tag and methods + The value returned must have the node tag and methods set appropriately, but other fields should be left as zeroes at this - stage; after ExecInitCustomScan performs basic initialization, - the BeginCustomScan callback will be invoked to give the + stage; after ExecInitCustomScan performs basic initialization, + the BeginCustomScan callback will be invoked to give the custom scan provider a chance to do whatever else is needed. @@ -214,8 +214,8 @@ Node *(*CreateCustomScanState) (CustomScan *cscan); Executing Custom Scans - When a CustomScan is executed, its execution state is - represented by a CustomScanState, which is declared as + When a CustomScan is executed, its execution state is + represented by a CustomScanState, which is declared as follows: typedef struct CustomScanState @@ -228,15 +228,15 @@ typedef struct CustomScanState - ss is initialized as for any other scan state, + ss is initialized as for any other scan state, except that if the scan is for a join rather than a base relation, - ss.ss_currentRelation is left NULL. - flags is a bit mask with the same meaning as in - CustomPath and CustomScan. - methods must point to a (usually statically allocated) + ss.ss_currentRelation is left NULL. + flags is a bit mask with the same meaning as in + CustomPath and CustomScan. + methods must point to a (usually statically allocated) object implementing the required custom scan state methods, which are - further detailed below. Typically, a CustomScanState, which - need not support copyObject, will actually be a larger + further detailed below. Typically, a CustomScanState, which + need not support copyObject, will actually be a larger structure embedding the above as its first member. @@ -249,8 +249,8 @@ void (*BeginCustomScan) (CustomScanState *node, EState *estate, int eflags); - Complete initialization of the supplied CustomScanState. - Standard fields have been initialized by ExecInitCustomScan, + Complete initialization of the supplied CustomScanState. + Standard fields have been initialized by ExecInitCustomScan, but any private fields should be initialized here. @@ -259,16 +259,16 @@ void (*BeginCustomScan) (CustomScanState *node, TupleTableSlot *(*ExecCustomScan) (CustomScanState *node); Fetch the next scan tuple. If any tuples remain, it should fill - ps_ResultTupleSlot with the next tuple in the current scan + ps_ResultTupleSlot with the next tuple in the current scan direction, and then return the tuple slot. If not, - NULL or an empty slot should be returned. + NULL or an empty slot should be returned. void (*EndCustomScan) (CustomScanState *node); - Clean up any private data associated with the CustomScanState. + Clean up any private data associated with the CustomScanState. This method is required, but it does not need to do anything if there is no associated data or it will be cleaned up automatically. @@ -286,9 +286,9 @@ void (*ReScanCustomScan) (CustomScanState *node); void (*MarkPosCustomScan) (CustomScanState *node); Save the current scan position so that it can subsequently be restored - by the RestrPosCustomScan callback. This callback is + by the RestrPosCustomScan callback. This callback is optional, and need only be supplied if the - CUSTOMPATH_SUPPORT_MARK_RESTORE flag is set. + CUSTOMPATH_SUPPORT_MARK_RESTORE flag is set. @@ -296,9 +296,9 @@ void (*MarkPosCustomScan) (CustomScanState *node); void (*RestrPosCustomScan) (CustomScanState *node); Restore the previous scan position as saved by the - MarkPosCustomScan callback. This callback is optional, + MarkPosCustomScan callback. This callback is optional, and need only be supplied if the - CUSTOMPATH_SUPPORT_MARK_RESTORE flag is set. + CUSTOMPATH_SUPPORT_MARK_RESTORE flag is set. @@ -320,22 +320,39 @@ void (*InitializeDSMCustomScan) (CustomScanState *node, void *coordinate); Initialize the dynamic shared memory that will be required for parallel - operation; coordinate points to an amount of allocated space - equal to the return value of EstimateDSMCustomScan. + operation. coordinate points to a shared memory area of + size equal to the return value of EstimateDSMCustomScan. This callback is optional, and need only be supplied if this custom scan provider supports parallel execution. +void (*ReInitializeDSMCustomScan) (CustomScanState *node, + ParallelContext *pcxt, + void *coordinate); + + Re-initialize the dynamic shared memory required for parallel operation + when the custom-scan plan node is about to be re-scanned. + This callback is optional, and need only be supplied if this custom + scan provider supports parallel execution. + Recommended practice is that this callback reset only shared state, + while the ReScanCustomScan callback resets only local + state. Currently, this callback will be called + before ReScanCustomScan, but it's best not to rely on + that ordering. + + + + void (*InitializeWorkerCustomScan) (CustomScanState *node, shm_toc *toc, void *coordinate); - Initialize a parallel worker's custom state based on the shared state - set up in the leader by InitializeDSMCustomScan. - This callback is optional, and needs only be supplied if this - custom path supports parallel execution. + Initialize a parallel worker's local state based on the shared state + set up by the leader during InitializeDSMCustomScan. + This callback is optional, and need only be supplied if this custom + scan provider supports parallel execution. @@ -344,7 +361,7 @@ void (*ShutdownCustomScan) (CustomScanState *node); Release resources when it is anticipated the node will not be executed to completion. This is not called in all cases; sometimes, - EndCustomScan may be called without this function having + EndCustomScan may be called without this function having been called first. Since the DSM segment used by parallel query is destroyed just after this callback is invoked, custom scan providers that wish to take some action before the DSM segment goes away should implement @@ -357,9 +374,9 @@ void (*ExplainCustomScan) (CustomScanState *node, List *ancestors, ExplainState *es); - Output additional information for EXPLAIN of a custom-scan + Output additional information for EXPLAIN of a custom-scan plan node. This callback is optional. Common data stored in the - ScanState, such as the target list and scan relation, will + ScanState, such as the target list and scan relation, will be shown even without this callback, but the callback allows the display of additional, private state. diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index 5f881a0b74..8c38dde8fb 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -16,11 +16,11 @@ PostgreSQL has a rich set of native data types available to users. Users can add new types to PostgreSQL using the command. + linkend="sql-createtype"/> command. - shows all the built-in general-purpose data + shows all the built-in general-purpose data types. Most of the alternative names listed in the Aliases column are the names used internally by PostgreSQL for historical reasons. In @@ -60,7 +60,7 @@ bit varying [ (n) ] - varbit + varbit [ (n) ] variable-length bit string @@ -79,7 +79,7 @@ bytea - binary data (byte array) + binary data (byte array) @@ -336,7 +336,7 @@ Numeric types consist of two-, four-, and eight-byte integers, four- and eight-byte floating-point numbers, and selectable-precision - decimals. lists the + decimals. lists the available types. @@ -354,45 +354,45 @@ - smallint + smallint 2 bytes small-range integer -32768 to +32767 - integer + integer 4 bytes typical choice for integer -2147483648 to +2147483647 - bigint + bigint 8 bytes large-range integer -9223372036854775808 to +9223372036854775807 - decimal + decimal variable user-specified precision, exact up to 131072 digits before the decimal point; up to 16383 digits after the decimal point - numeric + numeric variable user-specified precision, exact up to 131072 digits before the decimal point; up to 16383 digits after the decimal point - real + real 4 bytes variable-precision, inexact 6 decimal digits precision - double precision + double precision 8 bytes variable-precision, inexact 15 decimal digits precision @@ -406,7 +406,7 @@ - serial + serial 4 bytes autoincrementing integer 1 to 2147483647 @@ -424,9 +424,9 @@ The syntax of constants for the numeric types is described in - . The numeric types have a + . The numeric types have a full set of corresponding arithmetic operators and - functions. Refer to for more + functions. Refer to for more information. The following sections describe the types in detail. @@ -515,14 +515,13 @@ We use the following terms below: The - scale of a numeric is the - count of decimal digits in the fractional part, to the right of - the decimal point. The precision of a - numeric is the total count of significant digits in - the whole number, that is, the number of digits to both sides of - the decimal point. So the number 23.5141 has a precision of 6 - and a scale of 4. Integers can be considered to have a scale of - zero. + precision of a numeric + is the total count of significant digits in the whole number, + that is, the number of digits to both sides of the decimal point. + The scale of a numeric is the + count of decimal digits in the fractional part, to the right of the + decimal point. So the number 23.5141 has a precision of 6 and a + scale of 4. Integers can be considered to have a scale of zero. @@ -559,7 +558,7 @@ NUMERIC The maximum allowed precision when explicitly specified in the type declaration is 1000; NUMERIC without a specified precision is subject to the limits described in . + linkend="datatype-numeric-table"/>. @@ -574,9 +573,9 @@ NUMERIC Numeric values are physically stored without any extra leading or trailing zeroes. Thus, the declared precision and scale of a column - are maximums, not fixed allocations. (In this sense the numeric - type is more akin to varchar(n) - than to char(n).) The actual storage + are maximums, not fixed allocations. (In this sense the numeric + type is more akin to varchar(n) + than to char(n).) The actual storage requirement is two bytes for each group of four decimal digits, plus three to eight bytes overhead. @@ -593,22 +592,22 @@ NUMERIC In addition to ordinary numeric values, the numeric - type allows the special value NaN, meaning - not-a-number. Any operation on NaN - yields another NaN. When writing this value + type allows the special value NaN, meaning + not-a-number. Any operation on NaN + yields another NaN. When writing this value as a constant in an SQL command, you must put quotes around it, - for example UPDATE table SET x = 'NaN'. On input, - the string NaN is recognized in a case-insensitive manner. + for example UPDATE table SET x = 'NaN'. On input, + the string NaN is recognized in a case-insensitive manner. - In most implementations of the not-a-number concept, - NaN is not considered equal to any other numeric - value (including NaN). In order to allow - numeric values to be sorted and used in tree-based - indexes, PostgreSQL treats NaN - values as equal, and greater than all non-NaN + In most implementations of the not-a-number concept, + NaN is not considered equal to any other numeric + value (including NaN). In order to allow + numeric values to be sorted and used in tree-based + indexes, PostgreSQL treats NaN + values as equal, and greater than all non-NaN values. @@ -728,7 +727,7 @@ FROM generate_series(-3.5, 3.5, 1) as x; - The setting controls the + The setting controls the number of extra significant digits included when a floating point value is converted to text for output. With the default value of 0, the output is the same on every platform @@ -756,18 +755,18 @@ FROM generate_series(-3.5, 3.5, 1) as x; floating-point arithmetic does not follow IEEE 754, these values will probably not work as expected.) When writing these values as constants in an SQL command, you must put quotes around them, - for example UPDATE table SET x = '-Infinity'. On input, + for example UPDATE table SET x = '-Infinity'. On input, these strings are recognized in a case-insensitive manner. - IEEE754 specifies that NaN should not compare equal - to any other floating-point value (including NaN). + IEEE754 specifies that NaN should not compare equal + to any other floating-point value (including NaN). In order to allow floating-point values to be sorted and used - in tree-based indexes, PostgreSQL treats - NaN values as equal, and greater than all - non-NaN values. + in tree-based indexes, PostgreSQL treats + NaN values as equal, and greater than all + non-NaN values. @@ -776,7 +775,7 @@ FROM generate_series(-3.5, 3.5, 1) as x; notations float and float(p) for specifying inexact numeric types. Here, p specifies - the minimum acceptable precision in binary digits. + the minimum acceptable precision in binary digits. PostgreSQL accepts float(1) to float(24) as selecting the real type, while @@ -837,6 +836,14 @@ FROM generate_series(-3.5, 3.5, 1) as x; and serial type + + + This section describes a PostgreSQL-specific way to create an + autoincrementing column. Another way is to use the SQL-standard + identity column feature, described at . + + + The data types smallserial, serial and bigserial are not true types, but merely @@ -862,12 +869,12 @@ ALTER SEQUENCE tablename_ Thus, we have created an integer column and arranged for its default - values to be assigned from a sequence generator. A NOT NULL + values to be assigned from a sequence generator. A NOT NULL constraint is applied to ensure that a null value cannot be inserted. (In most cases you would also want to attach a - UNIQUE or PRIMARY KEY constraint to prevent + UNIQUE or PRIMARY KEY constraint to prevent duplicate values from being inserted by accident, but this is - not automatic.) Lastly, the sequence is marked as owned by + not automatic.) Lastly, the sequence is marked as owned by the column, so that it will be dropped if the column or table is dropped. @@ -880,7 +887,7 @@ ALTER SEQUENCE tablename_nextval() in + See nextval() in for details. @@ -900,7 +907,7 @@ ALTER SEQUENCE tablename_bigserial and serial8 work the same way, except that they create a bigint column. bigserial should be used if you anticipate - the use of more than 231 identifiers over the + the use of more than 231 identifiers over the lifetime of the table. The type names smallserial and serial2 also work the same way, except that they create a smallint column. @@ -921,8 +928,8 @@ ALTER SEQUENCE tablename_ The money type stores a currency amount with a fixed fractional precision; see . The fractional precision is - determined by the database's setting. + linkend="datatype-money-table"/>. The fractional precision is + determined by the database's setting. The range shown in the table assumes there are two fractional digits. Input is accepted in a variety of formats, including integer and floating-point literals, as well as typical @@ -954,9 +961,9 @@ ALTER SEQUENCE tablename_ Since the output of this data type is locale-sensitive, it might not - work to load money data into a database that has a different - setting of lc_monetary. To avoid problems, before - restoring a dump into a new database make sure lc_monetary has + work to load money data into a database that has a different + setting of lc_monetary. To avoid problems, before + restoring a dump into a new database make sure lc_monetary has the same or equivalent value as in the database that was dumped. @@ -986,7 +993,7 @@ SELECT '52093.89'::money::numeric::float8; Division of a money value by an integer value is performed with truncation of the fractional part towards zero. To get a rounded result, divide by a floating-point value, or cast the money - value to numeric before dividing and back to money + value to numeric before dividing and back to money afterwards. (The latter is preferable to avoid risking precision loss.) When a money value is divided by another money value, the result is double precision (i.e., a pure number, @@ -1039,11 +1046,11 @@ SELECT '52093.89'::money::numeric::float8; - character varying(n), varchar(n) + character varying(n), varchar(n) variable-length with limit - character(n), char(n) + character(n), char(n) fixed-length, blank padded @@ -1055,17 +1062,17 @@ SELECT '52093.89'::money::numeric::float8;
- shows the + shows the general-purpose character types available in PostgreSQL. SQL defines two primary character types: - character varying(n) and - character(n), where n + character varying(n) and + character(n), where n is a positive integer. Both of these types can store strings up to - n characters (not bytes) in length. An attempt to store a + n characters (not bytes) in length. An attempt to store a longer string into a column of these types will result in an error, unless the excess characters are all spaces, in which case the string will be truncated to the maximum length. (This somewhat @@ -1079,22 +1086,22 @@ SELECT '52093.89'::money::numeric::float8; If one explicitly casts a value to character - varying(n) or - character(n), then an over-length - value will be truncated to n characters without + varying(n) or + character(n), then an over-length + value will be truncated to n characters without raising an error. (This too is required by the SQL standard.) - The notations varchar(n) and - char(n) are aliases for character - varying(n) and - character(n), respectively. + The notations varchar(n) and + char(n) are aliases for character + varying(n) and + character(n), respectively. character without length specifier is equivalent to character(1). If character varying is used without length specifier, the type accepts strings of any size. The - latter is a PostgreSQL extension. + latter is a PostgreSQL extension. @@ -1107,19 +1114,19 @@ SELECT '52093.89'::money::numeric::float8; Values of type character are physically padded - with spaces to the specified width n, and are + with spaces to the specified width n, and are stored and displayed that way. However, trailing spaces are treated as semantically insignificant and disregarded when comparing two values of type character. In collations where whitespace is significant, this behavior can produce unexpected results; for example SELECT 'a '::CHAR(2) collate "C" < - E'a\n'::CHAR(2) returns true, even though C + E'a\n'::CHAR(2) returns true, even though C locale would consider a space to be greater than a newline. Trailing spaces are removed when converting a character value to one of the other string types. Note that trailing spaces - are semantically significant in + are semantically significant in character varying and text values, and - when using pattern matching, that is LIKE and + when using pattern matching, that is LIKE and regular expressions. @@ -1132,7 +1139,7 @@ SELECT '52093.89'::money::numeric::float8; stored in background tables so that they do not interfere with rapid access to shorter column values. In any case, the longest possible character string that can be stored is about 1 GB. (The - maximum value that will be allowed for n in the data + maximum value that will be allowed for n in the data type declaration is less than that. It wouldn't be useful to change this because with multibyte character encodings the number of characters and bytes can be quite different. If you desire to @@ -1147,10 +1154,10 @@ SELECT '52093.89'::money::numeric::float8; apart from increased storage space when using the blank-padded type, and a few extra CPU cycles to check the length when storing into a length-constrained column. While - character(n) has performance + character(n) has performance advantages in some other database systems, there is no such advantage in PostgreSQL; in fact - character(n) is usually the slowest of + character(n) is usually the slowest of the three because of its additional storage costs. In most situations text or character varying should be used instead. @@ -1158,12 +1165,12 @@ SELECT '52093.89'::money::numeric::float8; - Refer to for information about - the syntax of string literals, and to + Refer to for information about + the syntax of string literals, and to for information about available operators and functions. The database character set determines the character set used to store textual values; for more information on character set support, - refer to . + refer to . @@ -1172,7 +1179,7 @@ SELECT '52093.89'::money::numeric::float8; CREATE TABLE test1 (a character(4)); INSERT INTO test1 VALUES ('ok'); -SELECT a, char_length(a) FROM test1; -- +SELECT a, char_length(a) FROM test1; -- a | char_length ------+------------- @@ -1198,7 +1205,7 @@ SELECT b, char_length(b) FROM test2; The char_length function is discussed in - . + . @@ -1207,12 +1214,12 @@ SELECT b, char_length(b) FROM test2; There are two other fixed-length character types in PostgreSQL, shown in . The name + linkend="datatype-character-special-table"/>. The name type exists only for the storage of identifiers in the internal system catalogs and is not intended for use by the general user. Its length is currently defined as 64 bytes (63 usable characters plus terminator) but should be referenced using the constant - NAMEDATALEN in C source code. + NAMEDATALEN in C source code. The length is set at compile time (and is therefore adjustable for special uses); the default maximum length might change in a future release. The type "char" @@ -1261,7 +1268,7 @@ SELECT b, char_length(b) FROM test2; The bytea data type allows storage of binary strings; - see . + see . @@ -1289,23 +1296,24 @@ SELECT b, char_length(b) FROM test2; strings are distinguished from character strings in two ways. First, binary strings specifically allow storing octets of value zero and other non-printable - octets (usually, octets outside the range 32 to 126). + octets (usually, octets outside the decimal range 32 to 126). Character strings disallow zero octets, and also disallow any other octet values and sequences of octet values that are invalid according to the database's selected character set encoding. Second, operations on binary strings process the actual bytes, whereas the processing of character strings depends on locale settings. In short, binary strings are appropriate for storing data that the - programmer thinks of as raw bytes, whereas character + programmer thinks of as raw bytes, whereas character strings are appropriate for storing text. - The bytea type supports two external formats for - input and output: PostgreSQL's historical - escape format, and hex format. Both + The bytea type supports two + formats for input and output: hex format + and PostgreSQL's historical + escape format. Both of these are always accepted on input. The output format depends - on the configuration parameter ; + on the configuration parameter ; the default is hex. (Note that the hex format was introduced in PostgreSQL 9.0; earlier versions and some tools don't understand it.) @@ -1320,10 +1328,10 @@ SELECT b, char_length(b) FROM test2; - <type>bytea</> Hex Format + <type>bytea</type> Hex Format - The hex format encodes binary data as 2 hexadecimal digits + The hex format encodes binary data as 2 hexadecimal digits per byte, most significant nibble first. The entire string is preceded by the sequence \x (to distinguish it from the escape format). In some contexts, the initial backslash may @@ -1341,13 +1349,13 @@ SELECT b, char_length(b) FROM test2; Example: -SELECT E'\\xDEADBEEF'; +SELECT '\xDEADBEEF'; - <type>bytea</> Escape Format + <type>bytea</type> Escape Format The escape format is the traditional @@ -1361,7 +1369,7 @@ SELECT E'\\xDEADBEEF'; convenient. But in practice it is usually confusing because it fuzzes up the distinction between binary strings and character strings, and also the particular escape mechanism that was chosen is - somewhat unwieldy. So this format should probably be avoided + somewhat unwieldy. Therefore, this format should probably be avoided for most new applications. @@ -1374,15 +1382,15 @@ SELECT E'\\xDEADBEEF'; octal value and precede it by a backslash (or two backslashes, if writing the value as a literal using escape string syntax). - Backslash itself (octet value 92) can alternatively be represented by + Backslash itself (octet decimal value 92) can alternatively be represented by double backslashes. - + shows the characters that must be escaped, and gives the alternative escape sequences where applicable.
- <type>bytea</> Literal Escaped Octets + <type>bytea</type> Literal Escaped Octets @@ -1398,33 +1406,33 @@ SELECT E'\\xDEADBEEF'; 0 zero octet - E'\\000' - SELECT E'\\000'::bytea; - \000 + '\000' + SELECT '\000'::bytea; + \x00 39 single quote - '''' or E'\\047' - SELECT E'\''::bytea; - ' + '''' or '\047' + SELECT ''''::bytea; + \x27 92 backslash - E'\\\\' or E'\\134' - SELECT E'\\\\'::bytea; - \\ + '\' or '\\134' + SELECT '\\'::bytea; + \x5c 0 to 31 and 127 to 255 non-printable octets - E'\\xxx' (octal value) - SELECT E'\\001'::bytea; - \001 + '\xxx' (octal value) + SELECT '\001'::bytea; + \x01 @@ -1435,14 +1443,14 @@ SELECT E'\\xDEADBEEF'; The requirement to escape non-printable octets varies depending on locale settings. In some instances you can get away with leaving them unescaped. Note that the result in each of the examples - in was exactly one octet in + in was exactly one octet in length, even though the output representation is sometimes more than one character. The reason multiple backslashes are required, as shown - in , is that an input + in , is that an input string written as a string literal must pass through two parse phases in the PostgreSQL server. The first backslash of each pair is interpreted as an escape @@ -1452,28 +1460,40 @@ SELECT E'\\xDEADBEEF'; of escaping.) The remaining backslash is then recognized by the bytea input function as starting either a three digit octal value or escaping another backslash. For example, - a string literal passed to the server as E'\\001' + a string literal passed to the server as '\001' becomes \001 after passing through the escape string parser. The \001 is then sent to the bytea input function, where it is converted to a single octet with a decimal value of 1. Note that the single-quote character is not treated specially by bytea, so it follows the normal rules for string literals. (See also - .) + .) - Bytea octets are sometimes escaped when output. In general, each - non-printable octet is converted into - its equivalent three-digit octal value and preceded by one backslash. - Most printable octets are represented by their standard - representation in the client character set. The octet with decimal - value 92 (backslash) is doubled in the output. - Details are in . + Bytea octets are output in hex + format by default. If you change + to escape, + non-printable octet are converted to + equivalent three-digit octal value and preceded by one backslash. + Most printable octets are output by their standard + representation in the client character set, e.g.: + + +SET bytea_output = 'escape'; + +SELECT 'abc \153\154\155 \052\251\124'::bytea; + bytea +---------------- + abc klm *\251T + + + The octet with decimal value 92 (backslash) is doubled in the output. + Details are in .
- <type>bytea</> Output Escaped Octets + <type>bytea</type> Output Escaped Octets @@ -1491,15 +1511,15 @@ SELECT E'\\xDEADBEEF'; 92 backslash \\ - SELECT E'\\134'::bytea; + SELECT '\134'::bytea; \\ 0 to 31 and 127 to 255 non-printable octets - \xxx (octal value) - SELECT E'\\001'::bytea; + \xxx (octal value) + SELECT '\001'::bytea; \001 @@ -1507,7 +1527,7 @@ SELECT E'\\xDEADBEEF'; 32 to 126printable octetsclient character set representation - SELECT E'\\176'::bytea; + SELECT '\176'::bytea;~ @@ -1516,7 +1536,7 @@ SELECT E'\\xDEADBEEF';
- Depending on the front end to PostgreSQL you use, + Depending on the front end to PostgreSQL you use, you might have additional work to do in terms of escaping and unescaping bytea strings. For example, you might also have to escape line feeds and carriage returns if your interface @@ -1563,12 +1583,12 @@ SELECT E'\\xDEADBEEF'; PostgreSQL supports the full set of SQL date and time types, shown in . The operations available + linkend="datatype-datetime-table"/>. The operations available on these data types are described in - . + . Dates are counted according to the Gregorian calendar, even in years before that calendar was introduced (see for more information). + linkend="datetime-units-history"/> for more information). @@ -1677,7 +1697,7 @@ MINUTE TO SECOND Note that if both fields and p are specified, the - fields must include SECOND, + fields must include SECOND, since the precision applies only to the seconds. @@ -1691,14 +1711,6 @@ MINUTE TO SECOND any application. - - The types abstime - and reltime are lower precision types which are used internally. - You are discouraged from using these types in - applications; these internal types - might disappear in a future release. - - Date/Time Input @@ -1708,17 +1720,17 @@ MINUTE TO SECOND traditional POSTGRES, and others. For some formats, ordering of day, month, and year in date input is ambiguous and there is support for specifying the expected - ordering of these fields. Set the parameter - to MDY to select month-day-year interpretation, - DMY to select day-month-year interpretation, or - YMD to select year-month-day interpretation. + ordering of these fields. Set the parameter + to MDY to select month-day-year interpretation, + DMY to select day-month-year interpretation, or + YMD to select year-month-day interpretation. PostgreSQL is more flexible in handling date/time input than the SQL standard requires. - See + See for the exact parsing rules of date/time input and for the recognized text fields including months, days of the week, and time zones. @@ -1727,7 +1739,7 @@ MINUTE TO SECOND Remember that any date or time literal input needs to be enclosed in single quotes, like text strings. Refer to - for more + for more information. SQL requires the following syntax @@ -1751,7 +1763,7 @@ MINUTE TO SECOND - shows some possible + shows some possible inputs for the date type. @@ -1776,19 +1788,19 @@ MINUTE TO SECOND 1/8/1999 - January 8 in MDY mode; - August 1 in DMY mode + January 8 in MDY mode; + August 1 in DMY mode 1/18/1999 - January 18 in MDY mode; + January 18 in MDY mode; rejected in other modes 01/02/03 - January 2, 2003 in MDY mode; - February 1, 2003 in DMY mode; - February 3, 2001 in YMD mode + January 2, 2003 in MDY mode; + February 1, 2003 in DMY mode; + February 3, 2001 in YMD mode @@ -1805,15 +1817,15 @@ MINUTE TO SECOND 99-Jan-08 - January 8 in YMD mode, else error + January 8 in YMD mode, else error 08-Jan-99 - January 8, except error in YMD mode + January 8, except error in YMD mode Jan-08-99 - January 8, except error in YMD mode + January 8, except error in YMD mode 19990108 @@ -1864,8 +1876,8 @@ MINUTE TO SECOND Valid input for these types consists of a time of day followed by an optional time zone. (See - and .) If a time zone is + linkend="datatype-datetime-time-table"/> + and .) If a time zone is specified in the input for time without time zone, it is silently ignored. You can also specify a date but it will be ignored, except when you use a time zone name that involves a @@ -1985,7 +1997,7 @@ MINUTE TO SECOND
- Refer to for more information on how + Refer to for more information on how to specify time zones. @@ -2062,29 +2074,29 @@ January 8 04:05:06 1999 PST For timestamp with time zone, the internally stored value is always in UTC (Universal Coordinated Time, traditionally known as Greenwich Mean Time, - GMT). An input value that has an explicit + GMT). An input value that has an explicit time zone specified is converted to UTC using the appropriate offset for that time zone. If no time zone is stated in the input string, then it is assumed to be in the time zone indicated by the system's - parameter, and is converted to UTC using the - offset for the timezone zone. + parameter, and is converted to UTC using the + offset for the timezone zone.
When a timestamp with time zone value is output, it is always converted from UTC to the - current timezone zone, and displayed as local time in that + current timezone zone, and displayed as local time in that zone. To see the time in another time zone, either change - timezone or use the AT TIME ZONE construct - (see ). + timezone or use the AT TIME ZONE construct + (see ). Conversions between timestamp without time zone and timestamp with time zone normally assume that the timestamp without time zone value should be taken or given - as timezone local time. A different time zone can - be specified for the conversion using AT TIME ZONE. + as timezone local time. A different time zone can + be specified for the conversion using AT TIME ZONE. @@ -2104,12 +2116,12 @@ January 8 04:05:06 1999 PST PostgreSQL supports several special date/time input values for convenience, as shown in . The values + linkend="datatype-datetime-special-table"/>. The values infinity and -infinity are specially represented inside the system and will be displayed unchanged; but the others are simply notational shorthands that will be converted to ordinary date/time values when read. - (In particular, now and related strings are converted + (In particular, now and related strings are converted to a specific time value as soon as they are read.) All of these values need to be enclosed in single quotes when used as constants in SQL commands. @@ -2178,8 +2190,8 @@ January 8 04:05:06 1999 PST CURRENT_TIMESTAMP, LOCALTIME, LOCALTIMESTAMP. The latter four accept an optional subsecond precision specification. (See .) Note that these are - SQL functions and are not recognized in data input strings. + linkend="functions-datetime-current"/>.) Note that these are + SQL functions and are not recognized in data input strings. @@ -2203,18 +2215,18 @@ January 8 04:05:06 1999 PST The output format of the date/time types can be set to one of the four styles ISO 8601, - SQL (Ingres), traditional POSTGRES - (Unix date format), or + SQL (Ingres), traditional POSTGRES + (Unix date format), or German. The default is the ISO format. (The SQL standard requires the use of the ISO 8601 format. The name of the SQL output format is a historical accident.) shows examples of each + linkend="datatype-datetime-output-table"/> shows examples of each output style. The output of the date and time types is generally only the date or time part in accordance with the given examples. However, the - POSTGRES style outputs date-only values in + POSTGRES style outputs date-only values in ISO format. @@ -2255,9 +2267,9 @@ January 8 04:05:06 1999 PST - ISO 8601 specifies the use of uppercase letter T to separate - the date and time. PostgreSQL accepts that format on - input, but on output it uses a space rather than T, as shown + ISO 8601 specifies the use of uppercase letter T to separate + the date and time. PostgreSQL accepts that format on + input, but on output it uses a space rather than T, as shown above. This is for readability and for consistency with RFC 3339 as well as some other database systems. @@ -2267,9 +2279,9 @@ January 8 04:05:06 1999 PST In the SQL and POSTGRES styles, day appears before month if DMY field ordering has been specified, otherwise month appears before day. - (See + (See for how this setting also affects interpretation of input values.) - shows examples. + shows examples.
@@ -2284,17 +2296,17 @@ January 8 04:05:06 1999 PST - SQL, DMY + SQL, DMY day/month/year 17/12/1997 15:37:16.00 CET - SQL, MDY + SQL, MDY month/day/year 12/17/1997 07:37:16.00 PST - Postgres, DMY + Postgres, DMY day/month/year Wed 17 Dec 07:37:16 1997 PST @@ -2305,7 +2317,7 @@ January 8 04:05:06 1999 PST The date/time style can be selected by the user using the SET datestyle command, the parameter in the + linkend="guc-datestyle"/> parameter in the postgresql.conf configuration file, or the PGDATESTYLE environment variable on the server or client. @@ -2313,7 +2325,7 @@ January 8 04:05:06 1999 PST The formatting function to_char - (see ) is also available as + (see ) is also available as a more flexible way to format date/time output. @@ -2360,7 +2372,7 @@ January 8 04:05:06 1999 PST The default time zone is specified as a constant numeric offset - from UTC. It is therefore impossible to adapt to + from UTC. It is therefore impossible to adapt to daylight-saving time when doing date/time arithmetic across DST boundaries. @@ -2372,7 +2384,7 @@ January 8 04:05:06 1999 PST To address these difficulties, we recommend using date/time types that contain both date and time when using time zones. We - do not recommend using the type time with + do not recommend using the type time with time zone (though it is supported by PostgreSQL for legacy applications and for compliance with the SQL standard). @@ -2383,7 +2395,7 @@ January 8 04:05:06 1999 PST All timezone-aware dates and times are stored internally in UTC. They are converted to local time - in the zone specified by the configuration + in the zone specified by the configuration parameter before being displayed to the client. @@ -2393,10 +2405,10 @@ January 8 04:05:06 1999 PST - A full time zone name, for example America/New_York. + A full time zone name, for example America/New_York. The recognized time zone names are listed in the pg_timezone_names view (see ). + linkend="view-pg-timezone-names"/>). PostgreSQL uses the widely-used IANA time zone data for this purpose, so the same time zone names are also recognized by much other software. @@ -2404,16 +2416,16 @@ January 8 04:05:06 1999 PST - A time zone abbreviation, for example PST. Such a + A time zone abbreviation, for example PST. Such a specification merely defines a particular offset from UTC, in contrast to full time zone names which can imply a set of daylight savings transition-date rules as well. The recognized abbreviations - are listed in the pg_timezone_abbrevs view (see ). You cannot set the - configuration parameters or - to a time + are listed in the pg_timezone_abbrevs view (see ). You cannot set the + configuration parameters or + to a time zone abbreviation, but you can use abbreviations in - date/time input values and with the AT TIME ZONE + date/time input values and with the AT TIME ZONE operator. @@ -2421,25 +2433,25 @@ January 8 04:05:06 1999 PST In addition to the timezone names and abbreviations, PostgreSQL will accept POSIX-style time zone - specifications of the form STDoffset or - STDoffsetDST, where - STD is a zone abbreviation, offset is a - numeric offset in hours west from UTC, and DST is an + specifications of the form STDoffset or + STDoffsetDST, where + STD is a zone abbreviation, offset is a + numeric offset in hours west from UTC, and DST is an optional daylight-savings zone abbreviation, assumed to stand for one - hour ahead of the given offset. For example, if EST5EDT + hour ahead of the given offset. For example, if EST5EDT were not already a recognized zone name, it would be accepted and would be functionally equivalent to United States East Coast time. In this syntax, a zone abbreviation can be a string of letters, or an - arbitrary string surrounded by angle brackets (<>). + arbitrary string surrounded by angle brackets (<>). When a daylight-savings zone abbreviation is present, it is assumed to be used according to the same daylight-savings transition rules used in the - IANA time zone database's posixrules entry. + IANA time zone database's posixrules entry. In a standard PostgreSQL installation, - posixrules is the same as US/Eastern, so + posixrules is the same as US/Eastern, so that POSIX-style time zone specifications follow USA daylight-savings rules. If needed, you can adjust this behavior by replacing the - posixrules file. + posixrules file. @@ -2448,10 +2460,10 @@ January 8 04:05:06 1999 PST and full names: abbreviations represent a specific offset from UTC, whereas many of the full names imply a local daylight-savings time rule, and so have two possible UTC offsets. As an example, - 2014-06-04 12:00 America/New_York represents noon local + 2014-06-04 12:00 America/New_York represents noon local time in New York, which for this particular date was Eastern Daylight - Time (UTC-4). So 2014-06-04 12:00 EDT specifies that - same time instant. But 2014-06-04 12:00 EST specifies + Time (UTC-4). So 2014-06-04 12:00 EDT specifies that + same time instant. But 2014-06-04 12:00 EST specifies noon Eastern Standard Time (UTC-5), regardless of whether daylight savings was nominally in effect on that date. @@ -2459,10 +2471,10 @@ January 8 04:05:06 1999 PST To complicate matters, some jurisdictions have used the same timezone abbreviation to mean different UTC offsets at different times; for - example, in Moscow MSK has meant UTC+3 in some years and - UTC+4 in others. PostgreSQL interprets such + example, in Moscow MSK has meant UTC+3 in some years and + UTC+4 in others. PostgreSQL interprets such abbreviations according to whatever they meant (or had most recently - meant) on the specified date; but, as with the EST example + meant) on the specified date; but, as with the EST example above, this is not necessarily the same as local civil time on that date. @@ -2470,18 +2482,18 @@ January 8 04:05:06 1999 PST One should be wary that the POSIX-style time zone feature can lead to silently accepting bogus input, since there is no check on the reasonableness of the zone abbreviations. For example, SET - TIMEZONE TO FOOBAR0 will work, leaving the system effectively using + TIMEZONE TO FOOBAR0 will work, leaving the system effectively using a rather peculiar abbreviation for UTC. Another issue to keep in mind is that in POSIX time zone names, - positive offsets are used for locations west of Greenwich. + positive offsets are used for locations west of Greenwich. Everywhere else, PostgreSQL follows the - ISO-8601 convention that positive timezone offsets are east + ISO-8601 convention that positive timezone offsets are east of Greenwich. In all cases, timezone names and abbreviations are recognized - case-insensitively. (This is a change from PostgreSQL + case-insensitively. (This is a change from PostgreSQL versions prior to 8.2, which were case-sensitive in some contexts but not others.) @@ -2489,15 +2501,15 @@ January 8 04:05:06 1999 PST Neither timezone names nor abbreviations are hard-wired into the server; they are obtained from configuration files stored under - .../share/timezone/ and .../share/timezonesets/ + .../share/timezone/ and .../share/timezonesets/ of the installation directory - (see ). + (see ). - The configuration parameter can - be set in the file postgresql.conf, or in any of the - other standard ways described in . + The configuration parameter can + be set in the file postgresql.conf, or in any of the + other standard ways described in . There are also some special ways to set it: @@ -2505,7 +2517,7 @@ January 8 04:05:06 1999 PST The SQL command SET TIME ZONE sets the time zone for the session. This is an alternative spelling - of SET TIMEZONE TO with a more SQL-spec-compatible syntax. + of SET TIMEZONE TO with a more SQL-spec-compatible syntax. @@ -2533,52 +2545,52 @@ January 8 04:05:06 1999 PST verbose syntax: -@ quantity unit quantity unit... direction +@ quantity unit quantity unit... direction - where quantity is a number (possibly signed); - unit is microsecond, + where quantity is a number (possibly signed); + unit is microsecond, millisecond, second, minute, hour, day, week, month, year, decade, century, millennium, or abbreviations or plurals of these units; - direction can be ago or - empty. The at sign (@) is optional noise. The amounts + direction can be ago or + empty. The at sign (@) is optional noise. The amounts of the different units are implicitly added with appropriate sign accounting. ago negates all the fields. This syntax is also used for interval output, if - is set to - postgres_verbose. + is set to + postgres_verbose. Quantities of days, hours, minutes, and seconds can be specified without - explicit unit markings. For example, '1 12:59:10' is read - the same as '1 day 12 hours 59 min 10 sec'. Also, + explicit unit markings. For example, '1 12:59:10' is read + the same as '1 day 12 hours 59 min 10 sec'. Also, a combination of years and months can be specified with a dash; - for example '200-10' is read the same as '200 years - 10 months'. (These shorter forms are in fact the only ones allowed + for example '200-10' is read the same as '200 years + 10 months'. (These shorter forms are in fact the only ones allowed by the SQL standard, and are used for output when - IntervalStyle is set to sql_standard.) + IntervalStyle is set to sql_standard.) Interval values can also be written as ISO 8601 time intervals, using - either the format with designators of the standard's section - 4.4.3.2 or the alternative format of section 4.4.3.3. The + either the format with designators of the standard's section + 4.4.3.2 or the alternative format of section 4.4.3.3. The format with designators looks like this: -P quantity unit quantity unit ... T quantity unit ... +P quantity unit quantity unit ... T quantity unit ... - The string must start with a P, and may include a - T that introduces the time-of-day units. The + The string must start with a P, and may include a + T that introduces the time-of-day units. The available unit abbreviations are given in . Units may be + linkend="datatype-interval-iso8601-units"/>. Units may be omitted, and may be specified in any order, but units smaller than - a day must appear after T. In particular, the meaning of - M depends on whether it is before or after - T. + a day must appear after T. In particular, the meaning of + M depends on whether it is before or after + T.
@@ -2626,70 +2638,57 @@ P quantity unit quantity In the alternative format: -P years-months-days T hours:minutes:seconds +P years-months-days T hours:minutes:seconds the string must begin with P, and a - T separates the date and time parts of the interval. + T separates the date and time parts of the interval. The values are given as numbers similar to ISO 8601 dates. - When writing an interval constant with a fields + When writing an interval constant with a fields specification, or when assigning a string to an interval column that was - defined with a fields specification, the interpretation of - unmarked quantities depends on the fields. For - example INTERVAL '1' YEAR is read as 1 year, whereas - INTERVAL '1' means 1 second. Also, field values - to the right of the least significant field allowed by the - fields specification are silently discarded. For - example, writing INTERVAL '1 day 2:03:04' HOUR TO MINUTE + defined with a fields specification, the interpretation of + unmarked quantities depends on the fields. For + example INTERVAL '1' YEAR is read as 1 year, whereas + INTERVAL '1' means 1 second. Also, field values + to the right of the least significant field allowed by the + fields specification are silently discarded. For + example, writing INTERVAL '1 day 2:03:04' HOUR TO MINUTE results in dropping the seconds field, but not the day field. - According to the SQL standard all fields of an interval + According to the SQL standard all fields of an interval value must have the same sign, so a leading negative sign applies to all fields; for example the negative sign in the interval literal - '-1 2:03:04' applies to both the days and hour/minute/second - parts. PostgreSQL allows the fields to have different + '-1 2:03:04' applies to both the days and hour/minute/second + parts. PostgreSQL allows the fields to have different signs, and traditionally treats each field in the textual representation as independently signed, so that the hour/minute/second part is - considered positive in this example. If IntervalStyle is + considered positive in this example. If IntervalStyle is set to sql_standard then a leading sign is considered to apply to all fields (but only if no additional signs appear). - Otherwise the traditional PostgreSQL interpretation is + Otherwise the traditional PostgreSQL interpretation is used. To avoid ambiguity, it's recommended to attach an explicit sign to each field if any field is negative. - - Internally interval values are stored as months, days, - and seconds. This is done because the number of days in a month - varies, and a day can have 23 or 25 hours if a daylight savings - time adjustment is involved. The months and days fields are integers - while the seconds field can store fractions. Because intervals are - usually created from constant strings or timestamp subtraction, - this storage method works well in most cases. Functions - justify_days and justify_hours are - available for adjusting days and hours that overflow their normal - ranges. - - In the verbose input format, and in some fields of the more compact input formats, field values can have fractional parts; for example - '1.5 week' or '01:02:03.45'. Such input is + '1.5 week' or '01:02:03.45'. Such input is converted to the appropriate number of months, days, and seconds for storage. When this would result in a fractional number of months or days, the fraction is added to the lower-order fields using the conversion factors 1 month = 30 days and 1 day = 24 hours. - For example, '1.5 month' becomes 1 month and 15 days. + For example, '1.5 month' becomes 1 month and 15 days. Only seconds will ever be shown as fractional on output. - shows some examples - of valid interval input. + shows some examples + of valid interval input.
@@ -2716,16 +2715,43 @@ P years-months-days < P1Y2M3DT4H5M6S - ISO 8601 format with designators: same meaning as above + ISO 8601 format with designators: same meaning as above P0001-02-03T04:05:06 - ISO 8601 alternative format: same meaning as above + ISO 8601 alternative format: same meaning as above
+ + Internally interval values are stored as months, days, + and seconds. This is done because the number of days in a month + varies, and a day can have 23 or 25 hours if a daylight savings + time adjustment is involved. The months and days fields are integers + while the seconds field can store fractions. Because intervals are + usually created from constant strings or timestamp subtraction, + this storage method works well in most cases, but can cause unexpected + results: + + +SELECT EXTRACT(hours from '80 minutes'::interval); + date_part +----------- + 1 + +SELECT EXTRACT(days from '80 hours'::interval); + date_part +----------- + 0 + + + Functions justify_days and + justify_hours are available for adjusting days + and hours that overflow their normal ranges. + + @@ -2739,16 +2765,16 @@ P years-months-days < The output format of the interval type can be set to one of the - four styles sql_standard, postgres, - postgres_verbose, or iso_8601, + four styles sql_standard, postgres, + postgres_verbose, or iso_8601, using the command SET intervalstyle. - The default is the postgres format. - shows examples of each + The default is the postgres format. + shows examples of each output style. - The sql_standard style produces output that conforms to + The sql_standard style produces output that conforms to the SQL standard's specification for interval literal strings, if the interval value meets the standard's restrictions (either year-month only or day-time only, with no mixing of positive @@ -2758,20 +2784,20 @@ P years-months-days < - The output of the postgres style matches the output of - PostgreSQL releases prior to 8.4 when the - parameter was set to ISO. + The output of the postgres style matches the output of + PostgreSQL releases prior to 8.4 when the + parameter was set to ISO. - The output of the postgres_verbose style matches the output of - PostgreSQL releases prior to 8.4 when the - DateStyle parameter was set to non-ISO output. + The output of the postgres_verbose style matches the output of + PostgreSQL releases prior to 8.4 when the + DateStyle parameter was set to non-ISO output. - The output of the iso_8601 style matches the format - with designators described in section 4.4.3.2 of the + The output of the iso_8601 style matches the format + with designators described in section 4.4.3.2 of the ISO 8601 standard. @@ -2788,25 +2814,25 @@ P years-months-days < - sql_standard + sql_standard 1-2 3 4:05:06 -1-2 +3 -4:05:06 - postgres + postgres 1 year 2 mons 3 days 04:05:06 -1 year -2 mons +3 days -04:05:06 - postgres_verbose + postgres_verbose @ 1 year 2 mons @ 3 days 4 hours 5 mins 6 secs @ 1 year 2 mons -3 days 4 hours 5 mins 6 secs ago - iso_8601 + iso_8601 P1Y2M P3DT4H5M6S P-1Y-2M3DT-4H-5M-6S @@ -2838,7 +2864,7 @@ P years-months-days < PostgreSQL provides the standard SQL type boolean; - see . + see . The boolean type can have several states: true, false, and a third state, unknown, which is represented by the @@ -2894,7 +2920,7 @@ P years-months-days < - shows that + shows that boolean values are output using the letters t and f. @@ -2946,7 +2972,7 @@ SELECT * FROM test1 WHERE a; Enum types are created using the command, + linkend="sql-createtype"/> command, for example: @@ -3051,6 +3077,20 @@ SELECT person.name, holidays.num_weeks FROM person, holidays Implementation Details + + Enum labels are case sensitive, so + 'happy' is not the same as 'HAPPY'. + White space in the labels is significant too. + + + + Although enum types are primarily intended for static sets of values, + there is support for adding new values to an existing enum type, and for + renaming values (see ). Existing values + cannot be removed from an enum type, nor can the sort ordering of such + values be changed, short of dropping and re-creating the enum type. + + An enum value occupies four bytes on disk. The length of an enum value's textual label is limited by the NAMEDATALEN @@ -3058,12 +3098,6 @@ SELECT person.name, holidays.num_weeks FROM person, holidays builds this means at most 63 bytes. - - Enum labels are case sensitive, so - 'happy' is not the same as 'HAPPY'. - White space in the labels is significant too. - - The translations from internal enum values to textual labels are kept in the system catalog @@ -3079,7 +3113,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays Geometric data types represent two-dimensional spatial - objects. shows the geometric + objects. shows the geometric types available in PostgreSQL. @@ -3150,7 +3184,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays A rich set of functions and operators is available to perform various geometric operations such as scaling, translation, rotation, and determining - intersections. They are explained in . + intersections. They are explained in . @@ -3170,7 +3204,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays x , y - where x and y are the respective + where x and y are the respective coordinates, as floating-point numbers. @@ -3188,8 +3222,8 @@ SELECT person.name, holidays.num_weeks FROM person, holidays Lines are represented by the linear - equation Ax + By + C = 0, - where A and B are not both zero. Values + equation Ax + By + C = 0, + where A and B are not both zero. Values of type line are input and output in the following form: { A, B, C } @@ -3316,8 +3350,8 @@ SELECT person.name, holidays.num_weeks FROM person, holidays where the points are the end points of the line segments - comprising the path. Square brackets ([]) indicate - an open path, while parentheses (()) indicate a + comprising the path. Square brackets ([]) indicate + an open path, while parentheses (()) indicate a closed path. When the outermost parentheses are omitted, as in the third through fifth syntaxes, a closed path is assumed. @@ -3380,7 +3414,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays where - (x,y) + (x,y) is the center point and r is the radius of the circle. @@ -3401,12 +3435,12 @@ SELECT person.name, holidays.num_weeks FROM person, holidays - PostgreSQL offers data types to store IPv4, IPv6, and MAC - addresses, as shown in . It + PostgreSQL offers data types to store IPv4, IPv6, and MAC + addresses, as shown in . It is better to use these types instead of plain text types to store network addresses, because these types offer input error checking and specialized - operators and functions (see ). + operators and functions (see ). @@ -3495,7 +3529,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays - <type>cidr</> + <type>cidr</type> cidr @@ -3506,11 +3540,11 @@ SELECT person.name, holidays.num_weeks FROM person, holidays Input and output formats follow Classless Internet Domain Routing conventions. The format for specifying networks is address/y where address is the network represented as an + class="parameter">address/y where address is the network represented as an IPv4 or IPv6 address, and y is the number of bits in the netmask. If - y is omitted, it is calculated + class="parameter">y is the number of bits in the netmask. If + y is omitted, it is calculated using assumptions from the older classful network numbering system, except it will be at least large enough to include all of the octets written in the input. It is an error to specify a network address @@ -3518,11 +3552,11 @@ SELECT person.name, holidays.num_weeks FROM person, holidays - shows some examples. + shows some examples.
- <type>cidr</> Type Input Examples + <type>cidr</type> Type Input Examples @@ -3631,8 +3665,8 @@ SELECT person.name, holidays.num_weeks FROM person, holidays If you do not like the output format for inet or - cidr values, try the functions host, - text, and abbrev. + cidr values, try the functions host, + text, and abbrev. @@ -3650,24 +3684,24 @@ SELECT person.name, holidays.num_weeks FROM person, holidays - The macaddr type stores MAC addresses, known for example + The macaddr type stores MAC addresses, known for example from Ethernet card hardware addresses (although MAC addresses are used for other purposes as well). Input is accepted in the following formats: - '08:00:2b:01:02:03' - '08-00-2b-01-02-03' - '08002b:010203' - '08002b-010203' - '0800.2b01.0203' - '0800-2b01-0203' - '08002b010203' + '08:00:2b:01:02:03' + '08-00-2b-01-02-03' + '08002b:010203' + '08002b-010203' + '0800.2b01.0203' + '0800-2b01-0203' + '08002b010203' These examples would all specify the same address. Upper and lower case is accepted for the digits - a through f. Output is always in the + a through f. Output is always in the first of the forms shown. @@ -3700,7 +3734,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays - The macaddr8 type stores MAC addresses in EUI-64 + The macaddr8 type stores MAC addresses in EUI-64 format, known for example from Ethernet card hardware addresses (although MAC addresses are used for other purposes as well). This type can accept both 6 and 8 byte length MAC addresses @@ -3710,31 +3744,31 @@ SELECT person.name, holidays.num_weeks FROM person, holidays Note that IPv6 uses a modified EUI-64 format where the 7th bit should be set to one after the conversion from EUI-48. The - function macaddr8_set7bit is provided to make this + function macaddr8_set7bit is provided to make this change. Generally speaking, any input which is comprised of pairs of hex digits (on byte boundaries), optionally separated consistently by - one of ':', '-' or '.', is + one of ':', '-' or '.', is accepted. The number of hex digits must be either 16 (8 bytes) or 12 (6 bytes). Leading and trailing whitespace is ignored. The following are examples of input formats that are accepted: - '08:00:2b:01:02:03:04:05' - '08-00-2b-01-02-03-04-05' - '08002b:0102030405' - '08002b-0102030405' - '0800.2b01.0203.0405' - '0800-2b01-0203-0405' - '08002b01:02030405' - '08002b0102030405' + '08:00:2b:01:02:03:04:05' + '08-00-2b-01-02-03-04-05' + '08002b:0102030405' + '08002b-0102030405' + '0800.2b01.0203.0405' + '0800-2b01-0203-0405' + '08002b01:02030405' + '08002b0102030405' These examples would all specify the same address. Upper and lower case is accepted for the digits - a through f. Output is always in the + a through f. Output is always in the first of the forms shown. The last six input formats that are mentioned above are not part @@ -3742,7 +3776,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays To convert a traditional 48 bit MAC address in EUI-48 format to modified EUI-64 format to be included as the host portion of an - IPv6 address, use macaddr8_set7bit as shown: + IPv6 address, use macaddr8_set7bit as shown: SELECT macaddr8_set7bit('08:00:2b:01:02:03'); @@ -3790,21 +3824,21 @@ SELECT macaddr8_set7bit('08:00:2b:01:02:03'); If one explicitly casts a bit-string value to - bit(n), it will be truncated or - zero-padded on the right to be exactly n bits, + bit(n), it will be truncated or + zero-padded on the right to be exactly n bits, without raising an error. Similarly, if one explicitly casts a bit-string value to - bit varying(n), it will be truncated - on the right if it is more than n bits. + bit varying(n), it will be truncated + on the right if it is more than n bits. Refer to for information about the syntax + linkend="sql-syntax-bit-strings"/> for information about the syntax of bit string constants. Bit-logical operators and string manipulation functions are available; see . + linkend="functions-bitstring"/>. @@ -3832,7 +3866,7 @@ SELECT * FROM test; A bit string value requires 1 byte for each group of 8 bits, plus 5 or 8 bytes overhead depending on the length of the string (but long values may be compressed or moved out-of-line, as explained - in for character strings). + in for character strings). @@ -3852,13 +3886,13 @@ SELECT * FROM test; PostgreSQL provides two data types that are designed to support full text search, which is the activity of - searching through a collection of natural-language documents - to locate those that best match a query. + searching through a collection of natural-language documents + to locate those that best match a query. The tsvector type represents a document in a form optimized for text search; the tsquery type similarly represents a text query. - provides a detailed explanation of this - facility, and summarizes the + provides a detailed explanation of this + facility, and summarizes the related functions and operators. @@ -3871,9 +3905,9 @@ SELECT * FROM test; A tsvector value is a sorted list of distinct - lexemes, which are words that have been - normalized to merge different variants of the same word - (see for details). Sorting and + lexemes, which are words that have been + normalized to merge different variants of the same word + (see for details). Sorting and duplicate-elimination are done automatically during input, as shown in this example: @@ -3905,7 +3939,7 @@ SELECT $$the lexeme 'Joe''s' contains a quote$$::tsvector; 'Joe''s' 'a' 'contains' 'lexeme' 'quote' 'the' - Optionally, integer positions + Optionally, integer positions can be attached to lexemes: @@ -3924,7 +3958,7 @@ SELECT 'a:1 fat:2 cat:3 sat:4 on:5 a:6 mat:7 and:8 ate:9 a:10 fat:11 rat:12'::ts Lexemes that have positions can further be labeled with a - weight, which can be A, + weight, which can be A, B, C, or D. D is the default and hence is not shown on output: @@ -3957,7 +3991,7 @@ SELECT 'The Fat Rats'::tsvector; For most English-text-searching applications the above words would be considered non-normalized, but tsvector doesn't care. Raw document text should usually be passed through - to_tsvector to normalize the words appropriately + to_tsvector to normalize the words appropriately for searching: @@ -3967,7 +4001,7 @@ SELECT to_tsvector('english', 'The Fat Rats'); 'fat':2 'rat':3 - Again, see for more detail. + Again, see for more detail. @@ -3983,17 +4017,17 @@ SELECT to_tsvector('english', 'The Fat Rats'); A tsquery value stores lexemes that are to be searched for, and can combine them using the Boolean operators & (AND), | (OR), and - ! (NOT), as well as the phrase search operator - <-> (FOLLOWED BY). There is also a variant - <N> of the FOLLOWED BY - operator, where N is an integer constant that + ! (NOT), as well as the phrase search operator + <-> (FOLLOWED BY). There is also a variant + <N> of the FOLLOWED BY + operator, where N is an integer constant that specifies the distance between the two lexemes being searched - for. <-> is equivalent to <1>. + for. <-> is equivalent to <1>. Parentheses can be used to enforce grouping of these operators. - In the absence of parentheses, ! (NOT) binds most tightly, + In the absence of parentheses, ! (NOT) binds most tightly, <-> (FOLLOWED BY) next most tightly, then & (AND), with | (OR) binding the least tightly. @@ -4023,7 +4057,7 @@ SELECT 'fat & rat & ! cat'::tsquery; Optionally, lexemes in a tsquery can be labeled with one or more weight letters, which restricts them to match only - tsvector lexemes with one of those weights: + tsvector lexemes with one of those weights: SELECT 'fat:ab & cat'::tsquery; @@ -4034,7 +4068,7 @@ SELECT 'fat:ab & cat'::tsquery; - Also, lexemes in a tsquery can be labeled with * + Also, lexemes in a tsquery can be labeled with * to specify prefix matching: SELECT 'super:*'::tsquery; @@ -4042,15 +4076,15 @@ SELECT 'super:*'::tsquery; ----------- 'super':* - This query will match any word in a tsvector that begins - with super. + This query will match any word in a tsvector that begins + with super. Quoting rules for lexemes are the same as described previously for - lexemes in tsvector; and, as with tsvector, + lexemes in tsvector; and, as with tsvector, any required normalization of words must be done before converting - to the tsquery type. The to_tsquery + to the tsquery type. The to_tsquery function is convenient for performing such normalization: @@ -4060,7 +4094,7 @@ SELECT to_tsquery('Fat:ab & Cats'); 'fat':AB & 'cat' - Note that to_tsquery will process prefixes in the same way + Note that to_tsquery will process prefixes in the same way as other words, which means this comparison returns true: @@ -4069,14 +4103,14 @@ SELECT to_tsvector( 'postgraduate' ) @@ to_tsquery( 'postgres:*' ); ---------- t - because postgres gets stemmed to postgr: + because postgres gets stemmed to postgr: SELECT to_tsvector( 'postgraduate' ), to_tsquery( 'postgres:*' ); to_tsvector | to_tsquery ---------------+------------ 'postgradu':1 | 'postgr':* - which will match the stemmed form of postgraduate. + which will match the stemmed form of postgraduate. @@ -4132,9 +4166,9 @@ a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11 functions for UUIDs, but the core database does not include any function for generating UUIDs, because no single algorithm is well suited for every application. The module + linkend="uuid-ossp"/> module provides functions that implement several standard algorithms. - The module also provides a generation + The module also provides a generation function for random UUIDs. Alternatively, UUIDs could be generated by client applications or other libraries invoked through a server-side function. @@ -4142,7 +4176,7 @@ a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11 - <acronym>XML</> Type + <acronym>XML</acronym> Type XML @@ -4153,9 +4187,9 @@ a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11 advantage over storing XML data in a text field is that it checks the input values for well-formedness, and there are support functions to perform type-safe operations on it; see . Use of this data type requires the + linkend="functions-xml"/>. Use of this data type requires the installation to have been built with configure - --with-libxml. + --with-libxml. @@ -4259,7 +4293,7 @@ SET xmloption TO { DOCUMENT | CONTENT }; results to the client (which is the normal mode), PostgreSQL converts all character data passed between the client and the server and vice versa to the character encoding of the respective - end; see . This includes string + end; see . This includes string representations of XML values, such as in the above examples. This would ordinarily mean that encoding declarations contained in XML data can become invalid as the character data is converted @@ -4303,7 +4337,7 @@ SET xmloption TO { DOCUMENT | CONTENT }; Some XML-related functions may not work at all on non-ASCII data when the server encoding is not UTF-8. This is known to be an - issue for xmltable() and xpath() in particular. + issue for xmltable() and xpath() in particular. @@ -4351,6 +4385,59 @@ SET xmloption TO { DOCUMENT | CONTENT }; &rangetypes; + + Domain Types + + + domain + + + + data type + domain + + + + A domain is a user-defined data type that is + based on another underlying type. Optionally, + it can have constraints that restrict its valid values to a subset of + what the underlying type would allow. Otherwise it behaves like the + underlying type — for example, any operator or function that + can be applied to the underlying type will work on the domain type. + The underlying type can be any built-in or user-defined base type, + enum type, array type, composite type, range type, or another domain. + + + + For example, we could create a domain over integers that accepts only + positive integers: + +CREATE DOMAIN posint AS integer CHECK (VALUE > 0); +CREATE TABLE mytable (id posint); +INSERT INTO mytable VALUES(1); -- works +INSERT INTO mytable VALUES(-1); -- fails + + + + + When an operator or function of the underlying type is applied to a + domain value, the domain is automatically down-cast to the underlying + type. Thus, for example, the result of mytable.id - 1 + is considered to be of type integer not posint. + We could write (mytable.id - 1)::posint to cast the + result back to posint, causing the domain's constraints + to be rechecked. In this case, that would result in an error if the + expression had been applied to an id value of + 1. Assigning a value of the underlying type to a field or variable of + the domain type is allowed without writing an explicit cast, but the + domain's constraints will be checked. + + + + For additional information see . + + + Object Identifier Types @@ -4412,18 +4499,18 @@ SET xmloption TO { DOCUMENT | CONTENT }; PostgreSQL as primary keys for various system tables. OIDs are not added to user-created tables, unless WITH OIDS is specified when the table is - created, or the - configuration variable is enabled. Type oid represents + created, or the + configuration variable is enabled. Type oid represents an object identifier. There are also several alias types for - oid: regproc, regprocedure, - regoper, regoperator, regclass, - regtype, regrole, regnamespace, - regconfig, and regdictionary. - shows an overview. + oid: regproc, regprocedure, + regoper, regoperator, regclass, + regtype, regrole, regnamespace, + regconfig, and regdictionary. + shows an overview. - The oid type is currently implemented as an unsigned + The oid type is currently implemented as an unsigned four-byte integer. Therefore, it is not large enough to provide database-wide uniqueness in large databases, or even in large individual tables. So, using a user-created table's OID column as @@ -4432,7 +4519,7 @@ SET xmloption TO { DOCUMENT | CONTENT }; - The oid type itself has few operations beyond comparison. + The oid type itself has few operations beyond comparison. It can be cast to integer, however, and then manipulated using the standard integer operators. (Beware of possible signed-versus-unsigned confusion if you do this.) @@ -4442,10 +4529,10 @@ SET xmloption TO { DOCUMENT | CONTENT }; The OID alias types have no operations of their own except for specialized input and output routines. These routines are able to accept and display symbolic names for system objects, rather than - the raw numeric value that type oid would use. The alias + the raw numeric value that type oid would use. The alias types allow simplified lookup of OID values for objects. For example, - to examine the pg_attribute rows related to a table - mytable, one could write: + to examine the pg_attribute rows related to a table + mytable, one could write: SELECT * FROM pg_attribute WHERE attrelid = 'mytable'::regclass; @@ -4457,11 +4544,11 @@ SELECT * FROM pg_attribute While that doesn't look all that bad by itself, it's still oversimplified. A far more complicated sub-select would be needed to select the right OID if there are multiple tables named - mytable in different schemas. - The regclass input converter handles the table lookup according - to the schema path setting, and so it does the right thing + mytable in different schemas. + The regclass input converter handles the table lookup according + to the schema path setting, and so it does the right thing automatically. Similarly, casting a table's OID to - regclass is handy for symbolic display of a numeric OID. + regclass is handy for symbolic display of a numeric OID.
@@ -4479,80 +4566,80 @@ SELECT * FROM pg_attribute - oid + oid any numeric object identifier - 564182 + 564182 - regproc - pg_proc + regproc + pg_proc function name - sum + sum - regprocedure - pg_proc + regprocedure + pg_proc function with argument types - sum(int4) + sum(int4) - regoper - pg_operator + regoper + pg_operator operator name - + + + - regoperator - pg_operator + regoperator + pg_operator operator with argument types - *(integer,integer) or -(NONE,integer) + *(integer,integer) or -(NONE,integer) - regclass - pg_class + regclass + pg_class relation name - pg_type + pg_type - regtype - pg_type + regtype + pg_type data type name - integer + integer - regrole - pg_authid + regrole + pg_authid role name - smithee + smithee - regnamespace - pg_namespace + regnamespace + pg_namespace namespace name - pg_catalog + pg_catalog - regconfig - pg_ts_config + regconfig + pg_ts_config text search configuration - english + english - regdictionary - pg_ts_dict + regdictionary + pg_ts_dict text search dictionary - simple + simple @@ -4563,11 +4650,11 @@ SELECT * FROM pg_attribute schema-qualified names, and will display schema-qualified names on output if the object would not be found in the current search path without being qualified. - The regproc and regoper alias types will only + The regproc and regoper alias types will only accept input names that are unique (not overloaded), so they are - of limited use; for most uses regprocedure or - regoperator are more appropriate. For regoperator, - unary operators are identified by writing NONE for the unused + of limited use; for most uses regprocedure or + regoperator are more appropriate. For regoperator, + unary operators are identified by writing NONE for the unused operand. @@ -4577,12 +4664,12 @@ SELECT * FROM pg_attribute constant of one of these types appears in a stored expression (such as a column default expression or view), it creates a dependency on the referenced object. For example, if a column has a default - expression nextval('my_seq'::regclass), + expression nextval('my_seq'::regclass), PostgreSQL understands that the default expression depends on the sequence - my_seq; the system will not let the sequence be dropped + my_seq; the system will not let the sequence be dropped without first removing the default expression. - regrole is the only exception for the property. Constants of this + regrole is the only exception for the property. Constants of this type are not allowed in such expressions. @@ -4595,28 +4682,28 @@ SELECT * FROM pg_attribute - Another identifier type used by the system is xid, or transaction - (abbreviated xact) identifier. This is the data type of the system columns - xmin and xmax. Transaction identifiers are 32-bit quantities. + Another identifier type used by the system is xid, or transaction + (abbreviated xact) identifier. This is the data type of the system columns + xmin and xmax. Transaction identifiers are 32-bit quantities. - A third identifier type used by the system is cid, or + A third identifier type used by the system is cid, or command identifier. This is the data type of the system columns - cmin and cmax. Command identifiers are also 32-bit quantities. + cmin and cmax. Command identifiers are also 32-bit quantities. - A final identifier type used by the system is tid, or tuple + A final identifier type used by the system is tid, or tuple identifier (row identifier). This is the data type of the system column - ctid. A tuple ID is a pair + ctid. A tuple ID is a pair (block number, tuple index within block) that identifies the physical location of the row within its table. (The system columns are further explained in .) + linkend="ddl-system-columns"/>.) @@ -4638,7 +4725,7 @@ SELECT * FROM pg_attribute Internally, an LSN is a 64-bit integer, representing a byte position in the write-ahead log stream. It is printed as two hexadecimal numbers of up to 8 digits each, separated by a slash; for example, - 16/B374D848. The pg_lsn type supports the + 16/B374D848. The pg_lsn type supports the standard comparison operators, like = and >. Two LSNs can be subtracted using the - operator; the result is the number of bytes separating @@ -4728,13 +4815,13 @@ SELECT * FROM pg_attribute The PostgreSQL type system contains a number of special-purpose entries that are collectively called - pseudo-types. A pseudo-type cannot be used as a + pseudo-types. A pseudo-type cannot be used as a column data type, but it can be used to declare a function's argument or result type. Each of the available pseudo-types is useful in situations where a function's behavior does not correspond to simply taking or returning a value of a specific SQL data type. lists the existing + linkend="datatype-pseudotypes-table"/> lists the existing pseudo-types. @@ -4750,106 +4837,106 @@ SELECT * FROM pg_attribute - any + any Indicates that a function accepts any input data type. - anyelement + anyelement Indicates that a function accepts any data type - (see ). + (see ). - anyarray + anyarray Indicates that a function accepts any array data type - (see ). + (see ). - anynonarray + anynonarray Indicates that a function accepts any non-array data type - (see ). + (see ). - anyenum + anyenum Indicates that a function accepts any enum data type - (see and - ). + (see and + ). - anyrange + anyrange Indicates that a function accepts any range data type - (see and - ). + (see and + ). - cstring + cstring Indicates that a function accepts or returns a null-terminated C string. - internal + internal Indicates that a function accepts or returns a server-internal data type. - language_handler - A procedural language call handler is declared to return language_handler. + language_handler + A procedural language call handler is declared to return language_handler. - fdw_handler - A foreign-data wrapper handler is declared to return fdw_handler. + fdw_handler + A foreign-data wrapper handler is declared to return fdw_handler. - index_am_handler - An index access method handler is declared to return index_am_handler. + index_am_handler + An index access method handler is declared to return index_am_handler. - tsm_handler - A tablesample method handler is declared to return tsm_handler. + tsm_handler + A tablesample method handler is declared to return tsm_handler. - record + record Identifies a function taking or returning an unspecified row type. - trigger - A trigger function is declared to return trigger. + trigger + A trigger function is declared to return trigger. - event_trigger - An event trigger function is declared to return event_trigger. + event_trigger + An event trigger function is declared to return event_trigger. - pg_ddl_command + pg_ddl_command Identifies a representation of DDL commands that is available to event triggers. - void + void Indicates that a function returns no value. - unknown + unknown Identifies a not-yet-resolved type, e.g. of an undecorated string literal. - opaque + opaque An obsolete type name that formerly served many of the above purposes. @@ -4868,24 +4955,24 @@ SELECT * FROM pg_attribute Functions coded in procedural languages can use pseudo-types only as allowed by their implementation languages. At present most procedural languages forbid use of a pseudo-type as an argument type, and allow - only void and record as a result type (plus - trigger or event_trigger when the function is used + only void and record as a result type (plus + trigger or event_trigger when the function is used as a trigger or event trigger). Some also - support polymorphic functions using the types anyelement, - anyarray, anynonarray, anyenum, and - anyrange. + support polymorphic functions using the types anyelement, + anyarray, anynonarray, anyenum, and + anyrange. - The internal pseudo-type is used to declare functions + The internal pseudo-type is used to declare functions that are meant only to be called internally by the database system, and not by direct invocation in an SQL - query. If a function has at least one internal-type + query. If a function has at least one internal-type argument then it cannot be called from SQL. To preserve the type safety of this restriction it is important to follow this coding rule: do not create any function that is - declared to return internal unless it has at least one - internal argument. + declared to return internal unless it has at least one + internal argument. diff --git a/doc/src/sgml/datetime.sgml b/doc/src/sgml/datetime.sgml index ef9139f9e3..d269aa4cc5 100644 --- a/doc/src/sgml/datetime.sgml +++ b/doc/src/sgml/datetime.sgml @@ -37,18 +37,18 @@ - If the numeric token contains a colon (:), this is + If the numeric token contains a colon (:), this is a time string. Include all subsequent digits and colons. - If the numeric token contains a dash (-), slash - (/), or two or more dots (.), this is + If the numeric token contains a dash (-), slash + (/), or two or more dots (.), this is a date string which might have a text month. If a date token has already been seen, it is instead interpreted as a time zone - name (e.g., America/New_York). + name (e.g., America/New_York). @@ -63,8 +63,8 @@ - If the token starts with a plus (+) or minus - (-), then it is either a numeric time zone or a special + If the token starts with a plus (+) or minus + (-), then it is either a numeric time zone or a special field. @@ -114,7 +114,7 @@ and if no other date fields have been previously read, then interpret as a concatenated date (e.g., 19990118 or 990118). - The interpretation is YYYYMMDD or YYMMDD. + The interpretation is YYYYMMDD or YYMMDD. @@ -128,7 +128,7 @@ If four or six digits and a year has already been read, then - interpret as a time (HHMM or HHMMSS). + interpret as a time (HHMM or HHMMSS). @@ -143,7 +143,7 @@ Otherwise the date field ordering is assumed to follow the - DateStyle setting: mm-dd-yy, dd-mm-yy, or yy-mm-dd. + DateStyle setting: mm-dd-yy, dd-mm-yy, or yy-mm-dd. Throw an error if a month or day field is found to be out of range. @@ -167,7 +167,7 @@ Gregorian years AD 1-99 can be entered by using 4 digits with leading - zeros (e.g., 0099 is AD 99). + zeros (e.g., 0099 is AD 99). @@ -180,7 +180,7 @@ Date/Time Key Words - shows the tokens that are + shows the tokens that are recognized as names of months. @@ -247,7 +247,7 @@
- shows the tokens that are + shows the tokens that are recognized as names of days of the week. @@ -294,7 +294,7 @@ - shows the tokens that serve + shows the tokens that serve various modifier purposes. @@ -317,7 +317,7 @@ Ignored
- JULIAN, JD, J + JULIAN, JD, J Next field is Julian Date @@ -349,28 +349,28 @@ Since timezone abbreviations are not well standardized, PostgreSQL provides a means to customize the set of abbreviations accepted by the server. The - run-time parameter + run-time parameter determines the active set of abbreviations. While this parameter can be altered by any database user, the possible values for it are under the control of the database administrator — they are in fact names of configuration files stored in - .../share/timezonesets/ of the installation directory. + .../share/timezonesets/ of the installation directory. By adding or altering files in that directory, the administrator can set local policy for timezone abbreviations.
- timezone_abbreviations can be set to any file name - found in .../share/timezonesets/, if the file's name + timezone_abbreviations can be set to any file name + found in .../share/timezonesets/, if the file's name is entirely alphabetic. (The prohibition against non-alphabetic - characters in timezone_abbreviations prevents reading + characters in timezone_abbreviations prevents reading files outside the intended directory, as well as reading editor backup files and other extraneous files.) A timezone abbreviation file can contain blank lines and comments - beginning with #. Non-comment lines must have one of + beginning with #. Non-comment lines must have one of these formats: @@ -388,12 +388,12 @@ the equivalent offset in seconds from UTC, positive being east from Greenwich and negative being west. For example, -18000 would be five hours west of Greenwich, or North American east coast standard time. - D indicates that the zone name represents local + D indicates that the zone name represents local daylight-savings time rather than standard time. - Alternatively, a time_zone_name can be given, referencing + Alternatively, a time_zone_name can be given, referencing a zone name defined in the IANA timezone database. The zone's definition is consulted to see whether the abbreviation is or has been in use in that zone, and if so, the appropriate meaning is used — that is, @@ -417,34 +417,34 @@ - The @INCLUDE syntax allows inclusion of another file in the - .../share/timezonesets/ directory. Inclusion can be nested, + The @INCLUDE syntax allows inclusion of another file in the + .../share/timezonesets/ directory. Inclusion can be nested, to a limited depth. - The @OVERRIDE syntax indicates that subsequent entries in the + The @OVERRIDE syntax indicates that subsequent entries in the file can override previous entries (typically, entries obtained from included files). Without this, conflicting definitions of the same timezone abbreviation are considered an error. - In an unmodified installation, the file Default contains + In an unmodified installation, the file Default contains all the non-conflicting time zone abbreviations for most of the world. - Additional files Australia and India are + Additional files Australia and India are provided for those regions: these files first include the - Default file and then add or modify abbreviations as needed. + Default file and then add or modify abbreviations as needed. For reference purposes, a standard installation also contains files - Africa.txt, America.txt, etc, containing + Africa.txt, America.txt, etc, containing information about every time zone abbreviation known to be in use according to the IANA timezone database. The zone name definitions found in these files can be copied and pasted into a custom configuration file as needed. Note that these files cannot be directly - referenced as timezone_abbreviations settings, because of + referenced as timezone_abbreviations settings, because of the dot embedded in their names. @@ -460,16 +460,16 @@ Time zone abbreviations defined in the configuration file override non-timezone meanings built into PostgreSQL. - For example, the Australia configuration file defines - SAT (for South Australian Standard Time). When this - file is active, SAT will not be recognized as an abbreviation + For example, the Australia configuration file defines + SAT (for South Australian Standard Time). When this + file is active, SAT will not be recognized as an abbreviation for Saturday. - If you modify files in .../share/timezonesets/, + If you modify files in .../share/timezonesets/, it is up to you to make backups — a normal database dump will not include this directory. @@ -492,10 +492,10 @@ datetime literal, the datetime values are constrained by the natural rules for dates and times according to the Gregorian calendar
. - PostgreSQL follows the SQL + PostgreSQL follows the SQL standard's lead by counting dates exclusively in the Gregorian calendar, even for years before that calendar was in use. - This rule is known as the proleptic Gregorian calendar. + This rule is known as the proleptic Gregorian calendar. @@ -569,7 +569,7 @@ $ cal 9 1752 dominions, not other places. Since it would be difficult and confusing to try to track the actual calendars that were in use in various places at various times, - PostgreSQL does not try, but rather follows the Gregorian + PostgreSQL does not try, but rather follows the Gregorian calendar rules for all dates, even though this method is not historically accurate. @@ -597,7 +597,7 @@ $ cal 9 1752 and probably takes its name from Scaliger's father, the Italian scholar Julius Caesar Scaliger (1484-1558). In the Julian Date system, each day has a sequential number, starting - from JD 0 (which is sometimes called the Julian Date). + from JD 0 (which is sometimes called the Julian Date). JD 0 corresponds to 1 January 4713 BC in the Julian calendar, or 24 November 4714 BC in the Gregorian calendar. Julian Date counting is most often used by astronomers for labeling their nightly observations, @@ -607,10 +607,10 @@ $ cal 9 1752 - Although PostgreSQL supports Julian Date notation for + Although PostgreSQL supports Julian Date notation for input and output of dates (and also uses Julian dates for some internal datetime calculations), it does not observe the nicety of having dates - run from noon to noon. PostgreSQL treats a Julian Date + run from noon to noon. PostgreSQL treats a Julian Date as running from midnight to midnight. diff --git a/doc/src/sgml/dblink.sgml b/doc/src/sgml/dblink.sgml index f19c6b19f5..87e14ea093 100644 --- a/doc/src/sgml/dblink.sgml +++ b/doc/src/sgml/dblink.sgml @@ -8,17 +8,17 @@ - dblink is a module that supports connections to - other PostgreSQL databases from within a database + dblink is a module that supports connections to + other PostgreSQL databases from within a database session. - See also , which provides roughly the same + See also , which provides roughly the same functionality using a more modern and standards-compliant infrastructure. - + dblink_connect @@ -44,9 +44,9 @@ dblink_connect(text connname, text connstr) returns text Description - dblink_connect() establishes a connection to a remote - PostgreSQL database. The server and database to - be contacted are identified through a standard libpq + dblink_connect() establishes a connection to a remote + PostgreSQL database. The server and database to + be contacted are identified through a standard libpq connection string. Optionally, a name can be assigned to the connection. Multiple named connections can be open at once, but only one unnamed connection is permitted at a time. The connection @@ -58,8 +58,8 @@ dblink_connect(text connname, text connstr) returns text server. It is recommended to use the foreign-data wrapper dblink_fdw when defining the foreign server. See the example below, as well as - and - . + and + . @@ -81,10 +81,10 @@ dblink_connect(text connname, text connstr) returns text connstr - libpq-style connection info string, for example + libpq-style connection info string, for example hostaddr=127.0.0.1 port=5432 dbname=mydb user=postgres - password=mypasswd. - For details see . + password=mypasswd options=-csearch_path=. + For details see . Alternatively, the name of a foreign server. @@ -96,7 +96,7 @@ dblink_connect(text connname, text connstr) returns text Return Value - Returns status, which is always OK (since any error + Returns status, which is always OK (since any error causes the function to throw an error instead of returning). @@ -105,15 +105,26 @@ dblink_connect(text connname, text connstr) returns text Notes - Only superusers may use dblink_connect to create + If untrusted users have access to a database that has not adopted a + secure schema usage pattern, + begin each session by removing publicly-writable schemas from + search_path. One could, for example, + add options=-csearch_path= to + connstr. This consideration is not specific + to dblink; it applies to every interface for + executing arbitrary SQL commands. + + + + Only superusers may use dblink_connect to create non-password-authenticated connections. If non-superusers need this - capability, use dblink_connect_u instead. + capability, use dblink_connect_u instead. It is unwise to choose connection names that contain equal signs, as this opens a risk of confusion with connection info strings - in other dblink functions. + in other dblink functions. @@ -121,13 +132,13 @@ dblink_connect(text connname, text connstr) returns text Examples -SELECT dblink_connect('dbname=postgres'); +SELECT dblink_connect('dbname=postgres options=-csearch_path='); dblink_connect ---------------- OK (1 row) -SELECT dblink_connect('myconn', 'dbname=postgres'); +SELECT dblink_connect('myconn', 'dbname=postgres options=-csearch_path='); dblink_connect ---------------- OK @@ -182,7 +193,7 @@ DROP SERVER fdtest; - + dblink_connect_u @@ -208,8 +219,8 @@ dblink_connect_u(text connname, text connstr) returns text Description - dblink_connect_u() is identical to - dblink_connect(), except that it will allow non-superusers + dblink_connect_u() is identical to + dblink_connect(), except that it will allow non-superusers to connect using any authentication method. @@ -217,29 +228,29 @@ dblink_connect_u(text connname, text connstr) returns text If the remote server selects an authentication method that does not involve a password, then impersonation and subsequent escalation of privileges can occur, because the session will appear to have - originated from the user as which the local PostgreSQL + originated from the user as which the local PostgreSQL server runs. Also, even if the remote server does demand a password, it is possible for the password to be supplied from the server - environment, such as a ~/.pgpass file belonging to the + environment, such as a ~/.pgpass file belonging to the server's user. This opens not only a risk of impersonation, but the possibility of exposing a password to an untrustworthy remote server. - Therefore, dblink_connect_u() is initially - installed with all privileges revoked from PUBLIC, + Therefore, dblink_connect_u() is initially + installed with all privileges revoked from PUBLIC, making it un-callable except by superusers. In some situations - it may be appropriate to grant EXECUTE permission for - dblink_connect_u() to specific users who are considered + it may be appropriate to grant EXECUTE permission for + dblink_connect_u() to specific users who are considered trustworthy, but this should be done with care. It is also recommended - that any ~/.pgpass file belonging to the server's user - not contain any records specifying a wildcard host name. + that any ~/.pgpass file belonging to the server's user + not contain any records specifying a wildcard host name. - For further details see dblink_connect(). + For further details see dblink_connect(). - + dblink_disconnect @@ -265,8 +276,8 @@ dblink_disconnect(text connname) returns text Description - dblink_disconnect() closes a connection previously opened - by dblink_connect(). The form with no arguments closes + dblink_disconnect() closes a connection previously opened + by dblink_connect(). The form with no arguments closes an unnamed connection. @@ -290,7 +301,7 @@ dblink_disconnect(text connname) returns text Return Value - Returns status, which is always OK (since any error + Returns status, which is always OK (since any error causes the function to throw an error instead of returning). @@ -314,7 +325,7 @@ SELECT dblink_disconnect('myconn'); - + dblink @@ -341,15 +352,15 @@ dblink(text sql [, bool fail_on_error]) returns setof record Description - dblink executes a query (usually a SELECT, + dblink executes a query (usually a SELECT, but it can be any SQL statement that returns rows) in a remote database. - When two text arguments are given, the first one is first + When two text arguments are given, the first one is first looked up as a persistent connection's name; if found, the command is executed on that connection. If not found, the first argument - is treated as a connection info string as for dblink_connect, + is treated as a connection info string as for dblink_connect, and the indicated connection is made just for the duration of this command. @@ -373,7 +384,7 @@ dblink(text sql [, bool fail_on_error]) returns setof record A connection info string, as previously described for - dblink_connect. + dblink_connect. @@ -383,7 +394,7 @@ dblink(text sql [, bool fail_on_error]) returns setof record The SQL query that you wish to execute in the remote database, - for example select * from foo. + for example select * from foo. @@ -407,34 +418,35 @@ dblink(text sql [, bool fail_on_error]) returns setof record The function returns the row(s) produced by the query. Since - dblink can be used with any query, it is declared - to return record, rather than specifying any particular + dblink can be used with any query, it is declared + to return record, rather than specifying any particular set of columns. This means that you must specify the expected set of columns in the calling query — otherwise - PostgreSQL would not know what to expect. + PostgreSQL would not know what to expect. Here is an example: SELECT * - FROM dblink('dbname=mydb', 'select proname, prosrc from pg_proc') + FROM dblink('dbname=mydb options=-csearch_path=', + 'select proname, prosrc from pg_proc') AS t1(proname name, prosrc text) WHERE proname LIKE 'bytea%'; - The alias part of the FROM clause must + The alias part of the FROM clause must specify the column names and types that the function will return. (Specifying column names in an alias is actually standard SQL - syntax, but specifying column types is a PostgreSQL + syntax, but specifying column types is a PostgreSQL extension.) This allows the system to understand what - * should expand to, and what proname - in the WHERE clause refers to, in advance of trying + * should expand to, and what proname + in the WHERE clause refers to, in advance of trying to execute the function. At run time, an error will be thrown if the actual query result from the remote database does not - have the same number of columns shown in the FROM clause. - The column names need not match, however, and dblink + have the same number of columns shown in the FROM clause. + The column names need not match, however, and dblink does not insist on exact type matches either. It will succeed so long as the returned data strings are valid input for the - column type declared in the FROM clause. + column type declared in the FROM clause. @@ -442,7 +454,7 @@ SELECT * Notes - A convenient way to use dblink with predetermined + A convenient way to use dblink with predetermined queries is to create a view. This allows the column type information to be buried in the view, instead of having to spell it out in every query. For example, @@ -450,7 +462,8 @@ SELECT * CREATE VIEW myremote_pg_proc AS SELECT * - FROM dblink('dbname=postgres', 'select proname, prosrc from pg_proc') + FROM dblink('dbname=postgres options=-csearch_path=', + 'select proname, prosrc from pg_proc') AS t1(proname name, prosrc text); SELECT * FROM myremote_pg_proc WHERE proname LIKE 'bytea%'; @@ -461,7 +474,8 @@ SELECT * FROM myremote_pg_proc WHERE proname LIKE 'bytea%'; Examples -SELECT * FROM dblink('dbname=postgres', 'select proname, prosrc from pg_proc') +SELECT * FROM dblink('dbname=postgres options=-csearch_path=', + 'select proname, prosrc from pg_proc') AS t1(proname name, prosrc text) WHERE proname LIKE 'bytea%'; proname | prosrc ------------+------------ @@ -479,7 +493,7 @@ SELECT * FROM dblink('dbname=postgres', 'select proname, prosrc from pg_proc') byteaout | byteaout (12 rows) -SELECT dblink_connect('dbname=postgres'); +SELECT dblink_connect('dbname=postgres options=-csearch_path='); dblink_connect ---------------- OK @@ -503,7 +517,7 @@ SELECT * FROM dblink('select proname, prosrc from pg_proc') byteaout | byteaout (12 rows) -SELECT dblink_connect('myconn', 'dbname=regression'); +SELECT dblink_connect('myconn', 'dbname=regression options=-csearch_path='); dblink_connect ---------------- OK @@ -532,7 +546,7 @@ SELECT * FROM dblink('myconn', 'select proname, prosrc from pg_proc') - + dblink_exec @@ -559,15 +573,15 @@ dblink_exec(text sql [, bool fail_on_error]) returns text Description - dblink_exec executes a command (that is, any SQL statement + dblink_exec executes a command (that is, any SQL statement that doesn't return rows) in a remote database. - When two text arguments are given, the first one is first + When two text arguments are given, the first one is first looked up as a persistent connection's name; if found, the command is executed on that connection. If not found, the first argument - is treated as a connection info string as for dblink_connect, + is treated as a connection info string as for dblink_connect, and the indicated connection is made just for the duration of this command. @@ -591,7 +605,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text A connection info string, as previously described for - dblink_connect. + dblink_connect. @@ -602,7 +616,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text The SQL command that you wish to execute in the remote database, for example - insert into foo values(0,'a','{"a0","b0","c0"}'). + insert into foo values(0,'a','{"a0","b0","c0"}'). @@ -614,7 +628,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text If true (the default when omitted) then an error thrown on the remote side of the connection causes an error to also be thrown locally. If false, the remote error is locally reported as a NOTICE, - and the function's return value is set to ERROR. + and the function's return value is set to ERROR. @@ -625,7 +639,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text Return Value - Returns status, either the command's status string or ERROR. + Returns status, either the command's status string or ERROR. @@ -669,7 +683,7 @@ DETAIL: ERROR: null value in column "relnamespace" violates not-null constrain - + dblink_open @@ -695,9 +709,9 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret Description - dblink_open() opens a cursor in a remote database. + dblink_open() opens a cursor in a remote database. The cursor can subsequently be manipulated with - dblink_fetch() and dblink_close(). + dblink_fetch() and dblink_close(). @@ -728,8 +742,8 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret sql - The SELECT statement that you wish to execute in the remote - database, for example select * from pg_class. + The SELECT statement that you wish to execute in the remote + database, for example select * from pg_class. @@ -741,7 +755,7 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret If true (the default when omitted) then an error thrown on the remote side of the connection causes an error to also be thrown locally. If false, the remote error is locally reported as a NOTICE, - and the function's return value is set to ERROR. + and the function's return value is set to ERROR. @@ -752,7 +766,7 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret Return Value - Returns status, either OK or ERROR. + Returns status, either OK or ERROR. @@ -761,16 +775,16 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret Since a cursor can only persist within a transaction, - dblink_open starts an explicit transaction block - (BEGIN) on the remote side, if the remote side was + dblink_open starts an explicit transaction block + (BEGIN) on the remote side, if the remote side was not already within a transaction. This transaction will be - closed again when the matching dblink_close is + closed again when the matching dblink_close is executed. Note that if - you use dblink_exec to change data between - dblink_open and dblink_close, - and then an error occurs or you use dblink_disconnect before - dblink_close, your change will be - lost because the transaction will be aborted. + you use dblink_exec to change data between + dblink_open and dblink_close, + and then an error occurs or you use dblink_disconnect before + dblink_close, your change will be + lost because the transaction will be aborted. @@ -778,7 +792,7 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret Examples -SELECT dblink_connect('dbname=postgres'); +SELECT dblink_connect('dbname=postgres options=-csearch_path='); dblink_connect ---------------- OK @@ -793,7 +807,7 @@ SELECT dblink_open('foo', 'select proname, prosrc from pg_proc'); - + dblink_fetch @@ -819,8 +833,8 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error]) Description - dblink_fetch fetches rows from a cursor previously - established by dblink_open. + dblink_fetch fetches rows from a cursor previously + established by dblink_open. @@ -851,7 +865,7 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error]) howmany - The maximum number of rows to retrieve. The next howmany + The maximum number of rows to retrieve. The next howmany rows are fetched, starting at the current cursor position, moving forward. Once the cursor has reached its end, no more rows are produced. @@ -878,7 +892,7 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error]) The function returns the row(s) fetched from the cursor. To use this function, you will need to specify the expected set of columns, - as previously discussed for dblink. + as previously discussed for dblink. @@ -887,11 +901,11 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error]) On a mismatch between the number of return columns specified in the - FROM clause, and the actual number of columns returned by the + FROM clause, and the actual number of columns returned by the remote cursor, an error will be thrown. In this event, the remote cursor is still advanced by as many rows as it would have been if the error had not occurred. The same is true for any other error occurring in the local - query after the remote FETCH has been done. + query after the remote FETCH has been done. @@ -899,7 +913,7 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error]) Examples -SELECT dblink_connect('dbname=postgres'); +SELECT dblink_connect('dbname=postgres options=-csearch_path='); dblink_connect ---------------- OK @@ -946,7 +960,7 @@ SELECT * FROM dblink_fetch('foo', 5) AS (funcname name, source text); - + dblink_close @@ -972,8 +986,8 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text Description - dblink_close closes a cursor previously opened with - dblink_open. + dblink_close closes a cursor previously opened with + dblink_open. @@ -1007,7 +1021,7 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text If true (the default when omitted) then an error thrown on the remote side of the connection causes an error to also be thrown locally. If false, the remote error is locally reported as a NOTICE, - and the function's return value is set to ERROR. + and the function's return value is set to ERROR. @@ -1018,7 +1032,7 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text Return Value - Returns status, either OK or ERROR. + Returns status, either OK or ERROR. @@ -1026,9 +1040,9 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text Notes - If dblink_open started an explicit transaction block, + If dblink_open started an explicit transaction block, and this is the last remaining open cursor in this connection, - dblink_close will issue the matching COMMIT. + dblink_close will issue the matching COMMIT. @@ -1036,7 +1050,7 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text Examples -SELECT dblink_connect('dbname=postgres'); +SELECT dblink_connect('dbname=postgres options=-csearch_path='); dblink_connect ---------------- OK @@ -1057,7 +1071,7 @@ SELECT dblink_close('foo'); - + dblink_get_connections @@ -1082,8 +1096,8 @@ dblink_get_connections() returns text[] Description - dblink_get_connections returns an array of the names - of all open named dblink connections. + dblink_get_connections returns an array of the names + of all open named dblink connections. @@ -1102,7 +1116,7 @@ SELECT dblink_get_connections(); - + dblink_error_message @@ -1127,7 +1141,7 @@ dblink_error_message(text connname) returns text Description - dblink_error_message fetches the most recent remote + dblink_error_message fetches the most recent remote error message for a given connection. @@ -1165,7 +1179,7 @@ SELECT dblink_error_message('dtest1'); - + dblink_send_query @@ -1190,7 +1204,7 @@ dblink_send_query(text connname, text sql) returns int Description - dblink_send_query sends a query to be executed + dblink_send_query sends a query to be executed asynchronously, that is, without immediately waiting for the result. There must not be an async query already in progress on the connection. @@ -1198,10 +1212,10 @@ dblink_send_query(text connname, text sql) returns int After successfully dispatching an async query, completion status - can be checked with dblink_is_busy, and the results - are ultimately collected with dblink_get_result. + can be checked with dblink_is_busy, and the results + are ultimately collected with dblink_get_result. It is also possible to attempt to cancel an active async query - using dblink_cancel_query. + using dblink_cancel_query. @@ -1223,7 +1237,7 @@ dblink_send_query(text connname, text sql) returns int The SQL statement that you wish to execute in the remote database, - for example select * from pg_class. + for example select * from pg_class. @@ -1247,7 +1261,7 @@ SELECT dblink_send_query('dtest1', 'SELECT * FROM foo WHERE f1 < 3'); - + dblink_is_busy @@ -1272,7 +1286,7 @@ dblink_is_busy(text connname) returns int Description - dblink_is_busy tests whether an async query is in progress. + dblink_is_busy tests whether an async query is in progress. @@ -1297,7 +1311,7 @@ dblink_is_busy(text connname) returns int Returns 1 if connection is busy, 0 if it is not busy. If this function returns 0, it is guaranteed that - dblink_get_result will not block. + dblink_get_result will not block. @@ -1310,7 +1324,7 @@ SELECT dblink_is_busy('dtest1'); - + dblink_get_notify @@ -1336,11 +1350,11 @@ dblink_get_notify(text connname) returns setof (notify_name text, be_pid int, ex Description - dblink_get_notify retrieves notifications on either + dblink_get_notify retrieves notifications on either the unnamed connection, or on a named connection if specified. - To receive notifications via dblink, LISTEN must - first be issued, using dblink_exec. - For details see and . + To receive notifications via dblink, LISTEN must + first be issued, using dblink_exec. + For details see and . @@ -1392,7 +1406,7 @@ SELECT * FROM dblink_get_notify(); - + dblink_get_result @@ -1417,9 +1431,9 @@ dblink_get_result(text connname [, bool fail_on_error]) returns setof record Description - dblink_get_result collects the results of an - asynchronous query previously sent with dblink_send_query. - If the query is not already completed, dblink_get_result + dblink_get_result collects the results of an + asynchronous query previously sent with dblink_send_query. + If the query is not already completed, dblink_get_result will wait until it is. @@ -1458,14 +1472,14 @@ dblink_get_result(text connname [, bool fail_on_error]) returns setof record For an async query (that is, a SQL statement returning rows), the function returns the row(s) produced by the query. To use this function, you will need to specify the expected set of columns, - as previously discussed for dblink. + as previously discussed for dblink. For an async command (that is, a SQL statement not returning rows), the function returns a single row with a single text column containing the command's status string. It is still necessary to specify that - the result will have a single text column in the calling FROM + the result will have a single text column in the calling FROM clause. @@ -1474,22 +1488,22 @@ dblink_get_result(text connname [, bool fail_on_error]) returns setof record Notes - This function must be called if - dblink_send_query returned 1. + This function must be called if + dblink_send_query returned 1. It must be called once for each query sent, and one additional time to obtain an empty set result, before the connection can be used again. - When using dblink_send_query and - dblink_get_result, dblink fetches the entire + When using dblink_send_query and + dblink_get_result, dblink fetches the entire remote query result before returning any of it to the local query processor. If the query returns a large number of rows, this can result in transient memory bloat in the local session. It may be better to open - such a query as a cursor with dblink_open and then fetch a + such a query as a cursor with dblink_open and then fetch a manageable number of rows at a time. Alternatively, use plain - dblink(), which avoids memory bloat by spooling large result + dblink(), which avoids memory bloat by spooling large result sets to disk. @@ -1556,7 +1570,7 @@ contrib_regression=# SELECT * FROM dblink_get_result('dtest1') AS t1(f1 int, f2 - + dblink_cancel_query @@ -1581,13 +1595,13 @@ dblink_cancel_query(text connname) returns text Description - dblink_cancel_query attempts to cancel any query that + dblink_cancel_query attempts to cancel any query that is in progress on the named connection. Note that this is not certain to succeed (since, for example, the remote query might already have finished). A cancel request simply improves the odds that the query will fail soon. You must still complete the normal query protocol, for example by calling - dblink_get_result. + dblink_get_result. @@ -1610,7 +1624,7 @@ dblink_cancel_query(text connname) returns text Return Value - Returns OK if the cancel request has been sent, or + Returns OK if the cancel request has been sent, or the text of an error message on failure. @@ -1624,7 +1638,7 @@ SELECT dblink_cancel_query('dtest1'); - + dblink_get_pkey @@ -1651,7 +1665,7 @@ dblink_get_pkey(text relname) returns setof dblink_pkey_results Description - dblink_get_pkey provides information about the primary + dblink_get_pkey provides information about the primary key of a relation in the local database. This is sometimes useful in generating queries to be sent to remote databases. @@ -1665,10 +1679,10 @@ dblink_get_pkey(text relname) returns setof dblink_pkey_results relname - Name of a local relation, for example foo or - myschema.mytab. Include double quotes if the + Name of a local relation, for example foo or + myschema.mytab. Include double quotes if the name is mixed-case or contains special characters, for - example "FooBar"; without quotes, the string + example "FooBar"; without quotes, the string will be folded to lower case. @@ -1687,7 +1701,7 @@ dblink_get_pkey(text relname) returns setof dblink_pkey_results CREATE TYPE dblink_pkey_results AS (position int, colname text); - The position column simply runs from 1 to N; + The position column simply runs from 1 to N; it is the number of the field within the primary key, not the number within the table's columns. @@ -1716,7 +1730,7 @@ SELECT * FROM dblink_get_pkey('foobar'); - + dblink_build_sql_insert @@ -1748,10 +1762,10 @@ dblink_build_sql_insert(text relname, Description - dblink_build_sql_insert can be useful in doing selective + dblink_build_sql_insert can be useful in doing selective replication of a local table to a remote database. It selects a row from the local table based on primary key, and then builds a SQL - INSERT command that will duplicate that row, but with + INSERT command that will duplicate that row, but with the primary key values replaced by the values in the last argument. (To make an exact copy of the row, just specify the same values for the last two arguments.) @@ -1766,10 +1780,10 @@ dblink_build_sql_insert(text relname, relname - Name of a local relation, for example foo or - myschema.mytab. Include double quotes if the + Name of a local relation, for example foo or + myschema.mytab. Include double quotes if the name is mixed-case or contains special characters, for - example "FooBar"; without quotes, the string + example "FooBar"; without quotes, the string will be folded to lower case. @@ -1780,7 +1794,7 @@ dblink_build_sql_insert(text relname, Attribute numbers (1-based) of the primary key fields, - for example 1 2. + for example 1 2. @@ -1811,7 +1825,7 @@ dblink_build_sql_insert(text relname, Values of the primary key fields to be placed in the resulting - INSERT command. Each field is represented in text form. + INSERT command. Each field is represented in text form. @@ -1828,10 +1842,10 @@ dblink_build_sql_insert(text relname, Notes - As of PostgreSQL 9.0, the attribute numbers in + As of PostgreSQL 9.0, the attribute numbers in primary_key_attnums are interpreted as logical column numbers, corresponding to the column's position in - SELECT * FROM relname. Previous versions interpreted the + SELECT * FROM relname. Previous versions interpreted the numbers as physical column positions. There is a difference if any column(s) to the left of the indicated column have been dropped during the lifetime of the table. @@ -1851,7 +1865,7 @@ SELECT dblink_build_sql_insert('foo', '1 2', 2, '{"1", "a"}', '{"1", "b''a"}'); - + dblink_build_sql_delete @@ -1881,9 +1895,9 @@ dblink_build_sql_delete(text relname, Description - dblink_build_sql_delete can be useful in doing selective + dblink_build_sql_delete can be useful in doing selective replication of a local table to a remote database. It builds a SQL - DELETE command that will delete the row with the given + DELETE command that will delete the row with the given primary key values. @@ -1896,10 +1910,10 @@ dblink_build_sql_delete(text relname, relname - Name of a local relation, for example foo or - myschema.mytab. Include double quotes if the + Name of a local relation, for example foo or + myschema.mytab. Include double quotes if the name is mixed-case or contains special characters, for - example "FooBar"; without quotes, the string + example "FooBar"; without quotes, the string will be folded to lower case. @@ -1910,7 +1924,7 @@ dblink_build_sql_delete(text relname, Attribute numbers (1-based) of the primary key fields, - for example 1 2. + for example 1 2. @@ -1929,7 +1943,7 @@ dblink_build_sql_delete(text relname, Values of the primary key fields to be used in the resulting - DELETE command. Each field is represented in text form. + DELETE command. Each field is represented in text form. @@ -1946,10 +1960,10 @@ dblink_build_sql_delete(text relname, Notes - As of PostgreSQL 9.0, the attribute numbers in + As of PostgreSQL 9.0, the attribute numbers in primary_key_attnums are interpreted as logical column numbers, corresponding to the column's position in - SELECT * FROM relname. Previous versions interpreted the + SELECT * FROM relname. Previous versions interpreted the numbers as physical column positions. There is a difference if any column(s) to the left of the indicated column have been dropped during the lifetime of the table. @@ -1969,7 +1983,7 @@ SELECT dblink_build_sql_delete('"MyFoo"', '1 2', 2, '{"1", "b"}'); - + dblink_build_sql_update @@ -2000,15 +2014,15 @@ dblink_build_sql_update(text relname, Description - dblink_build_sql_update can be useful in doing selective + dblink_build_sql_update can be useful in doing selective replication of a local table to a remote database. It selects a row from the local table based on primary key, and then builds a SQL - UPDATE command that will duplicate that row, but with + UPDATE command that will duplicate that row, but with the primary key values replaced by the values in the last argument. (To make an exact copy of the row, just specify the same values for - the last two arguments.) The UPDATE command always assigns + the last two arguments.) The UPDATE command always assigns all fields of the row — the main difference between this and - dblink_build_sql_insert is that it's assumed that + dblink_build_sql_insert is that it's assumed that the target row already exists in the remote table. @@ -2021,10 +2035,10 @@ dblink_build_sql_update(text relname, relname - Name of a local relation, for example foo or - myschema.mytab. Include double quotes if the + Name of a local relation, for example foo or + myschema.mytab. Include double quotes if the name is mixed-case or contains special characters, for - example "FooBar"; without quotes, the string + example "FooBar"; without quotes, the string will be folded to lower case. @@ -2035,7 +2049,7 @@ dblink_build_sql_update(text relname, Attribute numbers (1-based) of the primary key fields, - for example 1 2. + for example 1 2. @@ -2066,7 +2080,7 @@ dblink_build_sql_update(text relname, Values of the primary key fields to be placed in the resulting - UPDATE command. Each field is represented in text form. + UPDATE command. Each field is represented in text form. @@ -2083,10 +2097,10 @@ dblink_build_sql_update(text relname, Notes - As of PostgreSQL 9.0, the attribute numbers in + As of PostgreSQL 9.0, the attribute numbers in primary_key_attnums are interpreted as logical column numbers, corresponding to the column's position in - SELECT * FROM relname. Previous versions interpreted the + SELECT * FROM relname. Previous versions interpreted the numbers as physical column positions. There is a difference if any column(s) to the left of the indicated column have been dropped during the lifetime of the table. diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml index b05a9c2150..b5ed1b7939 100644 --- a/doc/src/sgml/ddl.sgml +++ b/doc/src/sgml/ddl.sgml @@ -39,7 +39,7 @@ SQL does not make any guarantees about the order of the rows in a table. When a table is read, the rows will appear in an unspecified order, unless sorting is explicitly requested. This is covered in . Furthermore, SQL does not assign unique + linkend="queries"/>. Furthermore, SQL does not assign unique identifiers to rows, so it is possible to have several completely identical rows in a table. This is a consequence of the mathematical model that underlies SQL but is usually not desirable. @@ -64,7 +64,7 @@ built-in data types that fit many applications. Users can also define their own data types. Most built-in data types have obvious names and semantics, so we defer a detailed explanation to . Some of the frequently used data types are + linkend="datatype"/>. Some of the frequently used data types are integer for whole numbers, numeric for possibly fractional numbers, text for character strings, date for dates, time for @@ -79,7 +79,7 @@ To create a table, you use the aptly named command. + linkend="sql-createtable"/> command. In this command you specify at least a name for the new table, the names of the columns and the data type of each column. For example: @@ -95,7 +95,7 @@ CREATE TABLE my_first_table ( text; the second column has the name second_column and the type integer. The table and column names follow the identifier syntax explained - in . The type names are + in . The type names are usually also identifiers, but there are some exceptions. Note that the column list is comma-separated and surrounded by parentheses. @@ -139,7 +139,7 @@ CREATE TABLE products ( If you no longer need a table, you can remove it using the command. + linkend="sql-droptable"/> command. For example: DROP TABLE my_first_table; @@ -149,13 +149,13 @@ DROP TABLE products; Nevertheless, it is common in SQL script files to unconditionally try to drop each table before creating it, ignoring any error messages, so that the script works whether or not the table exists. - (If you like, you can use the DROP TABLE IF EXISTS variant + (If you like, you can use the DROP TABLE IF EXISTS variant to avoid the error messages, but this is not standard SQL.) If you need to modify a table that already exists, see later in this chapter. + linkend="ddl-alter"/> later in this chapter. @@ -163,7 +163,7 @@ DROP TABLE products; tables. The remainder of this chapter is concerned with adding features to the table definition to ensure data integrity, security, or convenience. If you are eager to fill your tables with - data now you can skip ahead to and read the + data now you can skip ahead to and read the rest of this chapter later. @@ -181,7 +181,7 @@ DROP TABLE products; columns will be filled with their respective default values. A data manipulation command can also request explicitly that a column be set to its default value, without having to know what that value is. - (Details about data manipulation commands are in .) + (Details about data manipulation commands are in .) @@ -207,9 +207,9 @@ CREATE TABLE products ( The default value can be an expression, which will be evaluated whenever the default value is inserted (not when the table is created). A common example - is for a timestamp column to have a default of CURRENT_TIMESTAMP, + is for a timestamp column to have a default of CURRENT_TIMESTAMP, so that it gets set to the time of row insertion. Another common - example is generating a serial number for each row. + example is generating a serial number for each row. In PostgreSQL this is typically done by something like: @@ -218,9 +218,9 @@ CREATE TABLE products ( ... ); - where the nextval() function supplies successive values - from a sequence object (see ). This arrangement is sufficiently common + where the nextval() function supplies successive values + from a sequence object (see ). This arrangement is sufficiently common that there's a special shorthand for it: CREATE TABLE products ( @@ -228,8 +228,8 @@ CREATE TABLE products ( ... ); - The SERIAL shorthand is discussed further in . + The SERIAL shorthand is discussed further in . @@ -385,7 +385,7 @@ CREATE TABLE products ( CHECK (price > 0), discounted_price numeric, CHECK (discounted_price > 0), - CONSTRAINT valid_discount CHECK (price > discounted_price) + CONSTRAINT valid_discount CHECK (price > discounted_price) ); @@ -623,7 +623,7 @@ CREATE TABLE example ( Adding a primary key will automatically create a unique B-tree index on the column or group of columns listed in the primary key, and will - force the column(s) to be marked NOT NULL. + force the column(s) to be marked NOT NULL. @@ -828,7 +828,7 @@ CREATE TABLE order_items ( (The essential difference between these two choices is that NO ACTION allows the check to be deferred until later in the transaction, whereas RESTRICT does not.) - CASCADE specifies that when a referenced row is deleted, + CASCADE specifies that when a referenced row is deleted, row(s) referencing it should be automatically deleted as well. There are two other options: SET NULL and SET DEFAULT. @@ -845,19 +845,19 @@ CREATE TABLE order_items ( Analogous to ON DELETE there is also ON UPDATE which is invoked when a referenced column is changed (updated). The possible actions are the same. - In this case, CASCADE means that the updated values of the + In this case, CASCADE means that the updated values of the referenced column(s) should be copied into the referencing row(s). Normally, a referencing row need not satisfy the foreign key constraint - if any of its referencing columns are null. If MATCH FULL + if any of its referencing columns are null. If MATCH FULL is added to the foreign key declaration, a referencing row escapes satisfying the constraint only if all its referencing columns are null (so a mix of null and non-null values is guaranteed to fail a - MATCH FULL constraint). If you don't want referencing rows + MATCH FULL constraint). If you don't want referencing rows to be able to avoid satisfying the foreign key constraint, declare the - referencing column(s) as NOT NULL. + referencing column(s) as NOT NULL. @@ -876,9 +876,9 @@ CREATE TABLE order_items ( More information about updating and deleting data is in . Also see the description of foreign key constraint + linkend="dml"/>. Also see the description of foreign key constraint syntax in the reference documentation for - . + . @@ -908,8 +908,8 @@ CREATE TABLE circles ( - See also CREATE - TABLE ... CONSTRAINT ... EXCLUDE for details. + See also CREATE + TABLE ... CONSTRAINT ... EXCLUDE for details. @@ -923,7 +923,7 @@ CREATE TABLE circles ( System Columns - Every table has several system columns that are + Every table has several system columns that are implicitly defined by the system. Therefore, these names cannot be used as names of user-defined columns. (Note that these restrictions are separate from whether the name is a key word or @@ -939,7 +939,7 @@ CREATE TABLE circles ( - oid + oid @@ -948,16 +948,16 @@ CREATE TABLE circles ( The object identifier (object ID) of a row. This column is only present if the table was created using WITH - OIDS, or if the + OIDS
, or if the configuration variable was set at the time. This column is of type oid (same name as the column); see for more information about the type. + linkend="datatype-oid"/> for more information about the type. - tableoid + tableoid tableoid @@ -966,7 +966,7 @@ CREATE TABLE circles ( The OID of the table containing this row. This column is particularly handy for queries that select from inheritance - hierarchies (see ), since without it, + hierarchies (see ), since without it, it's difficult to tell which individual table a row came from. The tableoid can be joined against the oid column of @@ -976,7 +976,7 @@ CREATE TABLE circles ( - xmin + xmin xmin @@ -992,7 +992,7 @@ CREATE TABLE circles ( - cmin + cmin cmin @@ -1006,7 +1006,7 @@ CREATE TABLE circles ( - xmax + xmax xmax @@ -1023,7 +1023,7 @@ CREATE TABLE circles ( - cmax + cmax cmax @@ -1036,7 +1036,7 @@ CREATE TABLE circles ( - ctid + ctid ctid @@ -1047,7 +1047,7 @@ CREATE TABLE circles ( although the ctid can be used to locate the row version very quickly, a row's ctid will change if it is - updated or moved by VACUUM FULL. Therefore + updated or moved by VACUUM FULL. Therefore ctid is useless as a long-term row identifier. The OID, or even better a user-defined serial number, should be used to identify logical rows. @@ -1074,7 +1074,7 @@ CREATE TABLE circles ( a unique constraint (or unique index) exists, the system takes care not to generate an OID matching an already-existing row. (Of course, this is only possible if the table contains fewer - than 232 (4 billion) rows, and in practice the + than 232 (4 billion) rows, and in practice the table size had better be much less than that, or performance might suffer.) @@ -1082,7 +1082,7 @@ CREATE TABLE circles ( OIDs should never be assumed to be unique across tables; use - the combination of tableoid and row OID if you + the combination of tableoid and row OID if you need a database-wide identifier. @@ -1090,7 +1090,7 @@ CREATE TABLE circles ( Of course, the tables in question must be created WITH OIDS. As of PostgreSQL 8.1, - WITHOUT OIDS is the default. + WITHOUT OIDS is the default. @@ -1100,14 +1100,14 @@ CREATE TABLE circles ( Transaction identifiers are also 32-bit quantities. In a long-lived database it is possible for transaction IDs to wrap around. This is not a fatal problem given appropriate maintenance - procedures; see for details. It is + procedures; see for details. It is unwise, however, to depend on the uniqueness of transaction IDs over the long term (more than one billion transactions). Command identifiers are also 32-bit quantities. This creates a hard limit - of 232 (4 billion) SQL commands + of 232 (4 billion) SQL commands within a single transaction. In practice this limit is not a problem — note that the limit is on the number of SQL commands, not the number of rows processed. @@ -1167,7 +1167,7 @@ CREATE TABLE circles ( All these actions are performed using the - + command, whose reference page contains details beyond those given here. @@ -1186,7 +1186,7 @@ CREATE TABLE circles ( ALTER TABLE products ADD COLUMN description text; The new column is initially filled with whatever default - value is given (null if you don't specify a DEFAULT clause). + value is given (null if you don't specify a DEFAULT clause). @@ -1196,9 +1196,9 @@ ALTER TABLE products ADD COLUMN description text; ALTER TABLE products ADD COLUMN description text CHECK (description <> ''); In fact all the options that can be applied to a column description - in CREATE TABLE can be used here. Keep in mind however + in CREATE TABLE can be used here. Keep in mind however that the default value must satisfy the given constraints, or the - ADD will fail. Alternatively, you can add + ADD will fail. Alternatively, you can add constraints later (see below) after you've filled in the new column correctly. @@ -1210,7 +1210,7 @@ ALTER TABLE products ADD COLUMN description text CHECK (description <> '') specified, PostgreSQL is able to avoid the physical update. So if you intend to fill the column with mostly nondefault values, it's best to add the column with no default, - insert the correct values using UPDATE, and then add any + insert the correct values using UPDATE, and then add any desired default as described below. @@ -1234,11 +1234,11 @@ ALTER TABLE products DROP COLUMN description; foreign key constraint of another table, PostgreSQL will not silently drop that constraint. You can authorize dropping everything that depends on - the column by adding CASCADE: + the column by adding CASCADE: ALTER TABLE products DROP COLUMN description CASCADE; - See for a description of the general + See for a description of the general mechanism behind this. @@ -1290,13 +1290,13 @@ ALTER TABLE products ALTER COLUMN product_no SET NOT NULL; ALTER TABLE products DROP CONSTRAINT some_name; - (If you are dealing with a generated constraint name like $2, + (If you are dealing with a generated constraint name like $2, don't forget that you'll need to double-quote it to make it a valid identifier.) - As with dropping a column, you need to add CASCADE if you + As with dropping a column, you need to add CASCADE if you want to drop a constraint that something else depends on. An example is that a foreign key constraint depends on a unique or primary key constraint on the referenced column(s). @@ -1326,7 +1326,7 @@ ALTER TABLE products ALTER COLUMN product_no DROP NOT NULL; ALTER TABLE products ALTER COLUMN price SET DEFAULT 7.77; Note that this doesn't affect any existing rows in the table, it - just changes the default for future INSERT commands. + just changes the default for future INSERT commands. @@ -1356,12 +1356,12 @@ ALTER TABLE products ALTER COLUMN price TYPE numeric(10,2); This will succeed only if each existing entry in the column can be converted to the new type by an implicit cast. If a more complex - conversion is needed, you can add a USING clause that + conversion is needed, you can add a USING clause that specifies how to compute the new values from the old. - PostgreSQL will attempt to convert the column's + PostgreSQL will attempt to convert the column's default value (if any) to the new type, as well as any constraints that involve the column. But these conversions might fail, or might produce surprising results. It's often best to drop any constraints @@ -1437,16 +1437,16 @@ ALTER TABLE products RENAME TO items; - There are different kinds of privileges: SELECT, - INSERT, UPDATE, DELETE, - TRUNCATE, REFERENCES, TRIGGER, - CREATE, CONNECT, TEMPORARY, - EXECUTE, and USAGE. + There are different kinds of privileges: SELECT, + INSERT, UPDATE, DELETE, + TRUNCATE, REFERENCES, TRIGGER, + CREATE, CONNECT, TEMPORARY, + EXECUTE, and USAGE. The privileges applicable to a particular object vary depending on the object's type (table, function, etc). For complete information on the different types of privileges supported by PostgreSQL, refer to the - reference + reference page. The following sections and chapters will also show you how those privileges are used. @@ -1459,7 +1459,7 @@ ALTER TABLE products RENAME TO items; An object can be assigned to a new owner with an ALTER command of the appropriate kind for the object, e.g. . Superusers can always do + linkend="sql-altertable"/>. Superusers can always do this; ordinary roles can only do it if they are both the current owner of the object (or a member of the owning role) and a member of the new owning role. @@ -1480,9 +1480,9 @@ GRANT UPDATE ON accounts TO joe; The special role name PUBLIC can be used to grant a privilege to every role on the system. Also, - group roles can be set up to help manage privileges when + group roles can be set up to help manage privileges when there are many users of a database — for details see - . + . @@ -1492,7 +1492,7 @@ GRANT UPDATE ON accounts TO joe; REVOKE ALL ON accounts FROM PUBLIC; The special privileges of the object owner (i.e., the right to do - DROP, GRANT, REVOKE, etc.) + DROP, GRANT, REVOKE, etc.) are always implicit in being the owner, and cannot be granted or revoked. But the object owner can choose to revoke their own ordinary privileges, for example to make a @@ -1502,12 +1502,12 @@ REVOKE ALL ON accounts FROM PUBLIC; Ordinarily, only the object's owner (or a superuser) can grant or revoke privileges on an object. However, it is possible to grant a - privilege with grant option, which gives the recipient + privilege with grant option, which gives the recipient the right to grant it in turn to others. If the grant option is subsequently revoked then all who received the privilege from that recipient (directly or through a chain of grants) will lose the - privilege. For details see the and - reference pages. + privilege. For details see the and + reference pages. @@ -1524,11 +1524,11 @@ REVOKE ALL ON accounts FROM PUBLIC; In addition to the SQL-standard privilege - system available through , - tables can have row security policies that restrict, + system available through , + tables can have row security policies that restrict, on a per-user basis, which rows can be returned by normal queries or inserted, updated, or deleted by data modification commands. - This feature is also known as Row-Level Security. + This feature is also known as Row-Level Security. By default, tables do not have any policies, so that if a user has access privileges to a table according to the SQL privilege system, all rows within it are equally available for querying or updating. @@ -1537,20 +1537,20 @@ REVOKE ALL ON accounts FROM PUBLIC; When row security is enabled on a table (with ALTER TABLE ... ENABLE ROW LEVEL - SECURITY), all normal access to the table for selecting rows or + SECURITY), all normal access to the table for selecting rows or modifying rows must be allowed by a row security policy. (However, the table's owner is typically not subject to row security policies.) If no policy exists for the table, a default-deny policy is used, meaning that no rows are visible or can be modified. Operations that apply to the - whole table, such as TRUNCATE and REFERENCES, + whole table, such as TRUNCATE and REFERENCES, are not subject to row security. Row security policies can be specific to commands, or to roles, or to both. A policy can be specified to apply to ALL - commands, or to SELECT, INSERT, UPDATE, - or DELETE. Multiple roles can be assigned to a given + commands, or to SELECT, INSERT, UPDATE, + or DELETE. Multiple roles can be assigned to a given policy, and normal role membership and inheritance rules apply. @@ -1562,7 +1562,7 @@ REVOKE ALL ON accounts FROM PUBLIC; rule are leakproof functions, which are guaranteed to not leak information; the optimizer may choose to apply such functions ahead of the row-security check.) Rows for which the expression does - not return true will not be processed. Separate expressions + not return true will not be processed. Separate expressions may be specified to provide independent control over the rows which are visible and the rows which are allowed to be modified. Policy expressions are run as part of the query and with the privileges of the @@ -1571,11 +1571,11 @@ REVOKE ALL ON accounts FROM PUBLIC; - Superusers and roles with the BYPASSRLS attribute always + Superusers and roles with the BYPASSRLS attribute always bypass the row security system when accessing a table. Table owners normally bypass row security as well, though a table owner can choose to be subject to row security with ALTER - TABLE ... FORCE ROW LEVEL SECURITY. + TABLE ... FORCE ROW LEVEL SECURITY. @@ -1584,11 +1584,11 @@ REVOKE ALL ON accounts FROM PUBLIC; - Policies are created using the - command, altered using the command, - and dropped using the command. To + Policies are created using the + command, altered using the command, + and dropped using the command. To enable and disable row security for a given table, use the - command. + command. @@ -1609,8 +1609,8 @@ REVOKE ALL ON accounts FROM PUBLIC; As a simple example, here is how to create a policy on - the account relation to allow only members of - the managers role to access rows, and only rows of their + the account relation to allow only members of + the managers role to access rows, and only rows of their accounts: @@ -1623,11 +1623,22 @@ CREATE POLICY account_managers ON accounts TO managers USING (manager = current_user); + + The policy above implicitly provides a WITH CHECK + clause identical to its USING clause, so that the + constraint applies both to rows selected by a command (so a manager + cannot SELECT, UPDATE, + or DELETE existing rows belonging to a different + manager) and to rows modified by a command (so rows belonging to a + different manager cannot be created via INSERT + or UPDATE). + + If no role is specified, or the special user name PUBLIC is used, then the policy applies to all - users on the system. To allow all users to access their own row in - a users table, a simple policy can be used: + users on the system. To allow all users to access only their own row in + a users table, a simple policy can be used: @@ -1635,21 +1646,34 @@ CREATE POLICY user_policy ON users USING (user_name = current_user); + + This works similarly to the previous example. + + To use a different policy for rows that are being added to the table - compared to those rows that are visible, the WITH CHECK - clause can be used. This policy would allow all users to view all rows - in the users table, but only modify their own: + compared to those rows that are visible, multiple policies can be + combined. This pair of policies would allow all users to view all rows + in the users table, but only modify their own: -CREATE POLICY user_policy ON users - USING (true) - WITH CHECK (user_name = current_user); +CREATE POLICY user_sel_policy ON users + FOR SELECT + USING (true); +CREATE POLICY user_mod_policy ON users + USING (user_name = current_user); - Row security can also be disabled with the ALTER TABLE + In a SELECT command, these two policies are combined + using OR, with the net effect being that all rows + can be selected. In other command types, only the second policy applies, + so that the effects are the same as before. + + + + Row security can also be disabled with the ALTER TABLE command. Disabling row security does not remove any policies that are defined on the table; they are simply ignored. Then all rows in the table are visible and modifiable, subject to the standard SQL privileges @@ -1658,7 +1682,7 @@ CREATE POLICY user_policy ON users Below is a larger example of how this feature can be used in production - environments. The table passwd emulates a Unix password + environments. The table passwd emulates a Unix password file: @@ -1820,7 +1844,7 @@ UPDATE 0 Referential integrity checks, such as unique or primary key constraints and foreign key references, always bypass row security to ensure that data integrity is maintained. Care must be taken when developing - schemas and row level policies to avoid covert channel leaks of + schemas and row level policies to avoid covert channel leaks of information through such referential integrity checks. @@ -1829,8 +1853,8 @@ UPDATE 0 not being applied. For example, when taking a backup, it could be disastrous if row security silently caused some rows to be omitted from the backup. In such a situation, you can set the - configuration parameter - to off. This does not in itself bypass row security; + configuration parameter + to off. This does not in itself bypass row security; what it does is throw an error if any query's results would get filtered by a policy. The reason for the error can then be investigated and fixed. @@ -1842,7 +1866,7 @@ UPDATE 0 best-performing case; when possible, it's best to design row security applications to work this way. If it is necessary to consult other rows or other tables to make a policy decision, that can be accomplished using - sub-SELECTs, or functions that contain SELECTs, + sub-SELECTs, or functions that contain SELECTs, in the policy expressions. Be aware however that such accesses can create race conditions that could allow information leakage if care is not taken. As an example, consider the following table design: @@ -1896,8 +1920,8 @@ GRANT ALL ON information TO public; - Now suppose that alice wishes to change the slightly - secret information, but decides that mallory should not + Now suppose that alice wishes to change the slightly + secret information, but decides that mallory should not be trusted with the new content of that row, so she does: @@ -1909,36 +1933,36 @@ COMMIT; - That looks safe; there is no window wherein mallory should be - able to see the secret from mallory string. However, there is - a race condition here. If mallory is concurrently doing, + That looks safe; there is no window wherein mallory should be + able to see the secret from mallory string. However, there is + a race condition here. If mallory is concurrently doing, say, SELECT * FROM information WHERE group_id = 2 FOR UPDATE; - and her transaction is in READ COMMITTED mode, it is possible - for her to see secret from mallory. That happens if her - transaction reaches the information row just - after alice's does. It blocks waiting - for alice's transaction to commit, then fetches the updated - row contents thanks to the FOR UPDATE clause. However, it - does not fetch an updated row for the - implicit SELECT from users, because that - sub-SELECT did not have FOR UPDATE; instead - the users row is read with the snapshot taken at the start + and her transaction is in READ COMMITTED mode, it is possible + for her to see secret from mallory. That happens if her + transaction reaches the information row just + after alice's does. It blocks waiting + for alice's transaction to commit, then fetches the updated + row contents thanks to the FOR UPDATE clause. However, it + does not fetch an updated row for the + implicit SELECT from users, because that + sub-SELECT did not have FOR UPDATE; instead + the users row is read with the snapshot taken at the start of the query. Therefore, the policy expression tests the old value - of mallory's privilege level and allows her to see the + of mallory's privilege level and allows her to see the updated row. There are several ways around this problem. One simple answer is to use - SELECT ... FOR SHARE in sub-SELECTs in row - security policies. However, that requires granting UPDATE - privilege on the referenced table (here users) to the + SELECT ... FOR SHARE in sub-SELECTs in row + security policies. However, that requires granting UPDATE + privilege on the referenced table (here users) to the affected users, which might be undesirable. (But another row security policy could be applied to prevent them from actually exercising that - privilege; or the sub-SELECT could be embedded into a security + privilege; or the sub-SELECT could be embedded into a security definer function.) Also, heavy concurrent use of row share locks on the referenced table could pose a performance problem, especially if updates of it are frequent. Another solution, practical if updates of the @@ -1951,8 +1975,8 @@ SELECT * FROM information WHERE group_id = 2 FOR UPDATE; - For additional details see - and . + For additional details see + and . @@ -1977,19 +2001,19 @@ SELECT * FROM information WHERE group_id = 2 FOR UPDATE; Users of a cluster do not necessarily have the privilege to access every database in the cluster. Sharing of user names means that there - cannot be different users named, say, joe in two databases + cannot be different users named, say, joe in two databases in the same cluster; but the system can be configured to allow - joe access to only some of the databases. + joe access to only some of the databases. - A database contains one or more named schemas, which + A database contains one or more named schemas, which in turn contain tables. Schemas also contain other kinds of named objects, including data types, functions, and operators. The same object name can be used in different schemas without conflict; for - example, both schema1 and myschema can - contain tables named mytable. Unlike databases, + example, both schema1 and myschema can + contain tables named mytable. Unlike databases, schemas are not rigidly separated: a user can access objects in any of the schemas in the database they are connected to, if they have privileges to do so. @@ -2034,7 +2058,7 @@ SELECT * FROM information WHERE group_id = 2 FOR UPDATE; - To create a schema, use the + To create a schema, use the command. Give the schema a name of your choice. For example: @@ -2053,10 +2077,10 @@ CREATE SCHEMA myschema; To create or access objects in a schema, write a - qualified name consisting of the schema name and + qualified name consisting of the schema name and table name separated by a dot: -schema.table +schema.table This works anywhere a table name is expected, including the table modification commands and the data access commands discussed in @@ -2068,10 +2092,10 @@ CREATE SCHEMA myschema; Actually, the even more general syntax -database.schema.table +database.schema.table can be used too, but at present this is just for pro - forma compliance with the SQL standard. If you write a database name, + forma compliance with the SQL standard. If you write a database name, it must be the same as the database you are connected to. @@ -2099,7 +2123,7 @@ DROP SCHEMA myschema; DROP SCHEMA myschema CASCADE; - See for a description of the general + See for a description of the general mechanism behind this. @@ -2112,11 +2136,11 @@ CREATE SCHEMA schema_name AUTHORIZATION You can even omit the schema name, in which case the schema name will be the same as the user name. See for how this can be useful. + linkend="ddl-schemas-patterns"/> for how this can be useful. - Schema names beginning with pg_ are reserved for + Schema names beginning with pg_ are reserved for system purposes and cannot be created by users. @@ -2163,15 +2187,29 @@ CREATE TABLE public.products ( ... ); Qualified names are tedious to write, and it's often best not to wire a particular schema name into applications anyway. Therefore - tables are often referred to by unqualified names, + tables are often referred to by unqualified names, which consist of just the table name. The system determines which table - is meant by following a search path, which is a list + is meant by following a search path, which is a list of schemas to look in. The first matching table in the search path is taken to be the one wanted. If there is no match in the search path, an error is reported, even if matching table names exist in other schemas in the database. + + The ability to create like-named objects in different schemas complicates + writing a query that references precisely the same objects every time. It + also opens up the potential for users to change the behavior of other + users' queries, maliciously or accidentally. Due to the prevalence of + unqualified names in queries and their use + in PostgreSQL internals, adding a schema + to search_path effectively trusts all users having + CREATE privilege on that schema. When you run an + ordinary query, a malicious user able to create objects in a schema of + your search path can take control and execute arbitrary SQL functions as + though you executed them. + + schema current @@ -2180,7 +2218,7 @@ CREATE TABLE public.products ( ... ); The first schema named in the search path is called the current schema. Aside from being the first schema searched, it is also the schema in - which new tables will be created if the CREATE TABLE + which new tables will be created if the CREATE TABLE command does not specify a schema name. @@ -2242,7 +2280,7 @@ SET search_path TO myschema; - See also for other ways to manipulate + See also for other ways to manipulate the schema search path. @@ -2253,7 +2291,7 @@ SET search_path TO myschema; need to write a qualified operator name in an expression, there is a special provision: you must write -OPERATOR(schema.operator) +OPERATOR(schema.operator) This is needed to avoid syntactic ambiguity. An example is: @@ -2288,8 +2326,9 @@ SELECT 3 OPERATOR(pg_catalog.+) 4; the schema public. This allows all users that are able to connect to a given database to create objects in its - public schema. If you do - not want to allow that, you can revoke that privilege: + public schema. + Some usage patterns call for + revoking that privilege: REVOKE CREATE ON SCHEMA public FROM PUBLIC; @@ -2297,7 +2336,7 @@ REVOKE CREATE ON SCHEMA public FROM PUBLIC; public means every user. In the first sense it is an identifier, in the second sense it is a key word, hence the different capitalization; recall the - guidelines from .) + guidelines from .) @@ -2310,28 +2349,28 @@ REVOKE CREATE ON SCHEMA public FROM PUBLIC; - In addition to public and user-created schemas, each - database contains a pg_catalog schema, which contains + In addition to public and user-created schemas, each + database contains a pg_catalog schema, which contains the system tables and all the built-in data types, functions, and - operators. pg_catalog is always effectively part of + operators. pg_catalog is always effectively part of the search path. If it is not named explicitly in the path then - it is implicitly searched before searching the path's + it is implicitly searched before searching the path's schemas. This ensures that built-in names will always be findable. However, you can explicitly place - pg_catalog at the end of your search path if you + pg_catalog at the end of your search path if you prefer to have user-defined names override built-in names. - Since system table names begin with pg_, it is best to + Since system table names begin with pg_, it is best to avoid such names to ensure that you won't suffer a conflict if some future version defines a system table named the same as your table. (With the default search path, an unqualified reference to your table name would then be resolved as the system table instead.) System tables will continue to follow the convention of having - names beginning with pg_, so that they will not + names beginning with pg_, so that they will not conflict with unqualified user-table names so long as users avoid - the pg_ prefix. + the pg_ prefix. @@ -2339,50 +2378,80 @@ REVOKE CREATE ON SCHEMA public FROM PUBLIC; Usage Patterns - Schemas can be used to organize your data in many ways. There are - a few usage patterns that are recommended and are easily supported by - the default configuration: + Schemas can be used to organize your data in many ways. There are a few + usage patterns easily supported by the default configuration, only one of + which suffices when database users mistrust other database users: + - If you do not create any schemas then all users access the - public schema implicitly. This simulates the situation where - schemas are not available at all. This setup is mainly - recommended when there is only a single user or a few cooperating - users in a database. This setup also allows smooth transition - from the non-schema-aware world. + Constrain ordinary users to user-private schemas. To implement this, + issue REVOKE CREATE ON SCHEMA public FROM PUBLIC, + and create a schema for each user with the same name as that user. If + affected users had logged in before this, consider auditing the public + schema for objects named like objects in + schema pg_catalog. Recall that the default search + path starts with $user, which resolves to the user + name. Therefore, if each user has a separate schema, they access their + own schemas by default. - You can create a schema for each user with the same name as - that user. Recall that the default search path starts with - $user, which resolves to the user name. - Therefore, if each user has a separate schema, they access their - own schemas by default. + Remove the public schema from each user's default search path + using ALTER ROLE user SET + search_path = "$user". Everyone retains the ability to + create objects in the public schema, but only qualified names will + choose those objects. While qualified table references are fine, calls + to functions in the public schema will be + unsafe or unreliable. Also, a user holding + the CREATEROLE privilege can undo this setting and + issue arbitrary queries under the identity of users relying on the + setting. If you create functions or extensions in the public schema or + grant CREATEROLE to users not warranting this + almost-superuser ability, use the first pattern instead. + + - If you use this setup then you might also want to revoke access - to the public schema (or drop it altogether), so users are - truly constrained to their own schemas. + Remove the public schema from search_path in + postgresql.conf. + The ensuing user experience matches the previous pattern. In addition + to that pattern's implications for functions + and CREATEROLE, this trusts database owners + like CREATEROLE. If you create functions or + extensions in the public schema or assign + the CREATEROLE + privilege, CREATEDB privilege or individual database + ownership to users not warranting almost-superuser access, use the + first pattern instead. - To install shared applications (tables to be used by everyone, - additional functions provided by third parties, etc.), put them - into separate schemas. Remember to grant appropriate - privileges to allow the other users to access them. Users can - then refer to these additional objects by qualifying the names - with a schema name, or they can put the additional schemas into - their search path, as they choose. + Keep the default. All users access the public schema implicitly. This + simulates the situation where schemas are not available at all, giving + a smooth transition from the non-schema-aware world. However, any user + can issue arbitrary queries under the identity of any user not electing + to protect itself individually. This pattern is acceptable only when + the database has a single user or a few mutually-trusting users. + + + For any pattern, to install shared applications (tables to be used by + everyone, additional functions provided by third parties, etc.), put them + into separate schemas. Remember to grant appropriate privileges to allow + the other users to access them. Users can then refer to these additional + objects by qualifying the names with a schema name, or they can put the + additional schemas into their search path, as they choose. + @@ -2397,15 +2466,15 @@ REVOKE CREATE ON SCHEMA public FROM PUBLIC; implements only the basic schema support specified in the standard. Therefore, many users consider qualified names to really consist of - user_name.table_name. + user_name.table_name. This is how PostgreSQL will effectively behave if you create a per-user schema for every user. - Also, there is no concept of a public schema in the + Also, there is no concept of a public schema in the SQL standard. For maximum conformance to the standard, you should - not use (perhaps even remove) the public schema. + not use the public schema. @@ -2461,9 +2530,9 @@ CREATE TABLE capitals ( ) INHERITS (cities); - In this case, the capitals table inherits - all the columns of its parent table, cities. State - capitals also have an extra column, state, that shows + In this case, the capitals table inherits + all the columns of its parent table, cities. State + capitals also have an extra column, state, that shows their state. @@ -2483,7 +2552,7 @@ SELECT name, altitude Given the sample data from the PostgreSQL - tutorial (see ), this returns: + tutorial (see ), this returns: name | altitude @@ -2521,7 +2590,7 @@ SELECT name, altitude - You can also write the table name with a trailing * + You can also write the table name with a trailing * to explicitly specify that descendant tables are included: @@ -2530,7 +2599,7 @@ SELECT name, altitude WHERE altitude > 500; - Writing * is not necessary, since this behavior is always + Writing * is not necessary, since this behavior is always the default. However, this syntax is still supported for compatibility with older releases where the default could be changed. @@ -2559,7 +2628,7 @@ WHERE c.altitude > 500; (If you try to reproduce this example, you will probably get different numeric OIDs.) By doing a join with - pg_class you can see the actual table names: + pg_class you can see the actual table names: SELECT p.relname, c.name, c.altitude @@ -2579,7 +2648,7 @@ WHERE c.altitude > 500 AND c.tableoid = p.oid; - Another way to get the same effect is to use the regclass + Another way to get the same effect is to use the regclass alias type, which will print the table OID symbolically: @@ -2602,16 +2671,16 @@ VALUES ('Albany', NULL, NULL, 'NY'); capitals table, but this does not happen: INSERT always inserts into exactly the table specified. In some cases it is possible to redirect the insertion - using a rule (see ). However that does not - help for the above case because the cities table - does not contain the column state, and so the + using a rule (see ). However that does not + help for the above case because the cities table + does not contain the column state, and so the command will be rejected before the rule can be applied. All check constraints and not-null constraints on a parent table are automatically inherited by its children, unless explicitly specified - otherwise with NO INHERIT clauses. Other types of constraints + otherwise with NO INHERIT clauses. Other types of constraints (unique, primary key, and foreign key constraints) are not inherited. @@ -2620,7 +2689,7 @@ VALUES ('Albany', NULL, NULL, 'NY'); the union of the columns defined by the parent tables. Any columns declared in the child table's definition are added to these. If the same column name appears in multiple parent tables, or in both a parent - table and the child's definition, then these columns are merged + table and the child's definition, then these columns are merged so that there is only one such column in the child table. To be merged, columns must have the same data types, else an error is raised. Inheritable check constraints and not-null constraints are merged in a @@ -2632,20 +2701,20 @@ VALUES ('Albany', NULL, NULL, 'NY'); Table inheritance is typically established when the child table is - created, using the INHERITS clause of the - + created, using the INHERITS clause of the + statement. Alternatively, a table which is already defined in a compatible way can have a new parent relationship added, using the INHERIT - variant of . + variant of . To do this the new child table must already include columns with the same names and types as the columns of the parent. It must also include check constraints with the same names and check expressions as those of the parent. Similarly an inheritance link can be removed from a child using the - NO INHERIT variant of ALTER TABLE. + NO INHERIT variant of ALTER TABLE. Dynamically adding and removing inheritance links like this can be useful when the inheritance relationship is being used for table - partitioning (see ). + partitioning (see ). @@ -2665,11 +2734,11 @@ VALUES ('Albany', NULL, NULL, 'NY'); if they are inherited from any parent tables. If you wish to remove a table and all of its descendants, one easy way is to drop the parent table with the - CASCADE option (see ). + CASCADE option (see ). - will + will propagate any changes in column data definitions and check constraints down the inheritance hierarchy. Again, dropping columns that are depended on by other tables is only possible when using @@ -2680,14 +2749,14 @@ VALUES ('Albany', NULL, NULL, 'NY'); Inherited queries perform access permission checks on the parent table - only. Thus, for example, granting UPDATE permission on - the cities table implies permission to update rows in + only. Thus, for example, granting UPDATE permission on + the cities table implies permission to update rows in the capitals table as well, when they are - accessed through cities. This preserves the appearance + accessed through cities. This preserves the appearance that the data is (also) in the parent table. But the capitals table could not be updated directly without an additional grant. In a similar way, the parent table's row - security policies (see ) are applied to + security policies (see ) are applied to rows coming from child tables during an inherited query. A child table's policies, if any, are applied only when it is the table explicitly named in the query; and in that case, any policies attached to its parent(s) are @@ -2695,7 +2764,7 @@ VALUES ('Albany', NULL, NULL, 'NY'); - Foreign tables (see ) can also + Foreign tables (see ) can also be part of inheritance hierarchies, either as parent or child tables, just as regular tables can be. If a foreign table is part of an inheritance hierarchy then any operations not supported by @@ -2719,7 +2788,7 @@ VALUES ('Albany', NULL, NULL, 'NY'); typically only work on individual, physical tables and do not support recursing over inheritance hierarchies. The respective behavior of each individual command is documented in its reference - page (). + page (). @@ -2732,33 +2801,33 @@ VALUES ('Albany', NULL, NULL, 'NY'); - If we declared cities.name to be - UNIQUE or a PRIMARY KEY, this would not stop the - capitals table from having rows with names duplicating - rows in cities. And those duplicate rows would by - default show up in queries from cities. In fact, by - default capitals would have no unique constraint at all, + If we declared cities.name to be + UNIQUE or a PRIMARY KEY, this would not stop the + capitals table from having rows with names duplicating + rows in cities. And those duplicate rows would by + default show up in queries from cities. In fact, by + default capitals would have no unique constraint at all, and so could contain multiple rows with the same name. - You could add a unique constraint to capitals, but this - would not prevent duplication compared to cities. + You could add a unique constraint to capitals, but this + would not prevent duplication compared to cities. Similarly, if we were to specify that - cities.name REFERENCES some + cities.name REFERENCES some other table, this constraint would not automatically propagate to - capitals. In this case you could work around it by - manually adding the same REFERENCES constraint to - capitals. + capitals. In this case you could work around it by + manually adding the same REFERENCES constraint to + capitals. Specifying that another table's column REFERENCES - cities(name) would allow the other table to contain city names, but + cities(name) would allow the other table to contain city names, but not capital names. There is no good workaround for this case. @@ -2825,10 +2894,10 @@ VALUES ('Albany', NULL, NULL, 'NY'); Bulk loads and deletes can be accomplished by adding or removing partitions, if that requirement is planned into the partitioning design. - Doing ALTER TABLE DETACH PARTITION or dropping an individual - partition using DROP TABLE is far faster than a bulk + Doing ALTER TABLE DETACH PARTITION or dropping an individual + partition using DROP TABLE is far faster than a bulk operation. These commands also entirely avoid the - VACUUM overhead caused by a bulk DELETE. + VACUUM overhead caused by a bulk DELETE. @@ -2875,6 +2944,19 @@ VALUES ('Albany', NULL, NULL, 'NY'); + + + Hash Partitioning + + + + The table is partitioned by specifying a modulus and a remainder for + each partition. Each partition will hold the rows for which the hash + value of the partition key divided by the specified modulus will + produce the specified remainder. + + + If your application needs to use other forms of partitioning not listed @@ -2893,7 +2975,7 @@ VALUES ('Albany', NULL, NULL, 'NY'); divide a table into pieces called partitions. The table that is divided is referred to as a partitioned table. The specification consists of the partitioning method - and a list of columns or expressions to be used as the + and a list of columns or expressions to be used as the partition key. @@ -2901,18 +2983,16 @@ VALUES ('Albany', NULL, NULL, 'NY'); All rows inserted into a partitioned table will be routed to one of the partitions based on the value of the partition key. Each partition has a subset of the data defined by its - partition bounds. Currently supported - partitioning methods include range and list, where each partition is - assigned a range of keys and a list of keys, respectively. + partition bounds. The currently supported + partitioning methods are range, list, and hash. Partitions may themselves be defined as partitioned tables, using what is called sub-partitioning. Partitions may have their own indexes, constraints and default values, distinct from those of other - partitions. Indexes must be created separately for each partition. See - for more details on creating partitioned - tables and partitions. + partitions. See for more details on + creating partitioned tables and partitions. @@ -2920,23 +3000,24 @@ VALUES ('Albany', NULL, NULL, 'NY'); vice versa. However, it is possible to add a regular or partitioned table containing data as a partition of a partitioned table, or remove a partition from a partitioned table turning it into a standalone table; - see to learn more about the - ATTACH PARTITION and DETACH PARTITION + see to learn more about the + ATTACH PARTITION and DETACH PARTITION sub-commands. Individual partitions are linked to the partitioned table with inheritance behind-the-scenes; however, it is not possible to use some of the - inheritance features discussed in the previous section with partitioned - tables and partitions. For example, a partition cannot have any parents - other than the partitioned table it is a partition of, nor can a regular - table inherit from a partitioned table making the latter its parent. - That means partitioned tables and partitions do not participate in - inheritance with regular tables. Since a partition hierarchy consisting - of the partitioned table and its partitions is still an inheritance - hierarchy, all the normal rules of inheritance apply as described in - with some exceptions, most notably: + generic features of inheritance (discussed below) with declaratively + partitioned tables or their partitions. For example, a partition + cannot have any parents other than the partitioned table it is a + partition of, nor can a regular table inherit from a partitioned table + making the latter its parent. That means partitioned tables and their + partitions do not participate in inheritance with regular tables. + Since a partition hierarchy consisting of the partitioned table and its + partitions is still an inheritance hierarchy, all the normal rules of + inheritance apply as described in with + some exceptions, most notably: @@ -2952,25 +3033,30 @@ VALUES ('Albany', NULL, NULL, 'NY'); Using ONLY to add or drop a constraint on only the - partitioned table is supported when there are no partitions. Once + partitioned table is supported as long as there are no partitions. Once partitions exist, using ONLY will result in an error as adding or dropping constraints on only the partitioned table, when - partitions exist, is not supported. Instead, constraints can be added - or dropped, when they are not present in the parent table, directly on - the partitions. As a partitioned table does not have any data - directly, attempts to use TRUNCATE - ONLY on a partitioned table will always return an - error. + partitions exist, is not supported. Instead, constraints on the + partitions themselves can be added and (if they are not present in the + parent table) dropped. + + + + + + As a partitioned table does not have any data directly, attempts to use + TRUNCATE ONLY on a partitioned + table will always return an error. Partitions cannot have columns that are not present in the parent. It - is neither possible to specify columns when creating partitions with - CREATE TABLE nor is it possible to add columns to - partitions after-the-fact using ALTER TABLE. Tables may be - added as a partition with ALTER TABLE ... ATTACH PARTITION + is not possible to specify columns when creating partitions with + CREATE TABLE, nor is it possible to add columns to + partitions after-the-fact using ALTER TABLE. Tables may be + added as a partition with ALTER TABLE ... ATTACH PARTITION only if their columns exactly match the parent, including any oid column. @@ -2986,11 +3072,14 @@ VALUES ('Albany', NULL, NULL, 'NY'); - Partitions can also be foreign tables - (see ), - although these have some limitations that normal tables do not. For - example, data inserted into the partitioned table is not routed to - foreign table partitions. + Partitions can also be foreign tables, although they have some limitations + that normal tables do not; see for + more information. + + + + Updating the partition key of a row might cause it to be moved into a + different partition where this row satisfies the partition bounds. @@ -3049,7 +3138,7 @@ CREATE TABLE measurement ( accessing the partitioned table will have to scan fewer partitions if the conditions involve some or all of these columns. For example, consider a table range partitioned using columns - lastname and firstname (in that order) + lastname and firstname (in that order) as the partition key. @@ -3061,13 +3150,13 @@ CREATE TABLE measurement ( parent. Note that specifying bounds such that the new partition's values will overlap with those in one or more existing partitions will cause an error. Inserting data into the parent table that does not map - to one of the existing partitions will cause an error; appropriate + to one of the existing partitions will cause an error; an appropriate partition must be added manually. Partitions thus created are in every way normal - PostgreSQL + PostgreSQL tables (or, possibly, foreign tables). It is possible to specify a tablespace and storage parameters for each partition separately. @@ -3080,14 +3169,14 @@ CREATE TABLE measurement ( CREATE TABLE measurement_y2006m02 PARTITION OF measurement - FOR VALUES FROM ('2006-02-01') TO ('2006-03-01') + FOR VALUES FROM ('2006-02-01') TO ('2006-03-01'); CREATE TABLE measurement_y2006m03 PARTITION OF measurement - FOR VALUES FROM ('2006-03-01') TO ('2006-04-01') + FOR VALUES FROM ('2006-03-01') TO ('2006-04-01'); ... CREATE TABLE measurement_y2007m11 PARTITION OF measurement - FOR VALUES FROM ('2007-11-01') TO ('2007-12-01') + FOR VALUES FROM ('2007-11-01') TO ('2007-12-01'); CREATE TABLE measurement_y2007m12 PARTITION OF measurement FOR VALUES FROM ('2007-12-01') TO ('2008-01-01') @@ -3095,8 +3184,8 @@ CREATE TABLE measurement_y2007m12 PARTITION OF measurement CREATE TABLE measurement_y2008m01 PARTITION OF measurement FOR VALUES FROM ('2008-01-01') TO ('2008-02-01') - TABLESPACE fasttablespace - WITH (parallel_workers = 4); + WITH (parallel_workers = 4) + TABLESPACE fasttablespace; @@ -3111,12 +3200,12 @@ CREATE TABLE measurement_y2006m02 PARTITION OF measurement PARTITION BY RANGE (peaktemp); - After creating partitions of measurement_y2006m02, - any data inserted into measurement that is mapped to - measurement_y2006m02 (or data that is directly inserted - into measurement_y2006m02, provided it satisfies its + After creating partitions of measurement_y2006m02, + any data inserted into measurement that is mapped to + measurement_y2006m02 (or data that is directly inserted + into measurement_y2006m02, provided it satisfies its partition constraint) will be further redirected to one of its - partitions based on the peaktemp column. The partition + partitions based on the peaktemp column. The partition key specified may overlap with the parent's partition key, although care should be taken when specifying the bounds of a sub-partition such that the set of data it accepts constitutes a subset of what @@ -3128,26 +3217,22 @@ CREATE TABLE measurement_y2006m02 PARTITION OF measurement Create an index on the key column(s), as well as any other indexes you - might want for every partition. (The key index is not strictly - necessary, but in most scenarios it is helpful. If you intend the key - values to be unique then you should always create a unique or - primary-key constraint for each partition.) + might want, on the partitioned table. (The key index is not strictly + necessary, but in most scenarios it is helpful.) + This automatically creates + one index on each partition, and any partitions you create or attach + later will also contain the index. -CREATE INDEX ON measurement_y2006m02 (logdate); -CREATE INDEX ON measurement_y2006m03 (logdate); -... -CREATE INDEX ON measurement_y2007m11 (logdate); -CREATE INDEX ON measurement_y2007m12 (logdate); -CREATE INDEX ON measurement_y2008m01 (logdate); +CREATE INDEX ON measurement (logdate); - Ensure that the - configuration parameter is not disabled in postgresql.conf. + Ensure that the + configuration parameter is not disabled in postgresql.conf. If it is, queries will not be optimized as desired. @@ -3197,7 +3282,7 @@ ALTER TABLE measurement DETACH PARTITION measurement_y2006m02; This allows further operations to be performed on the data before it is dropped. For example, this is often a useful time to back up - the data using COPY, pg_dump, or + the data using COPY, pg_dump, or similar tools. It might also be a useful time to aggregate data into smaller formats, perform other data manipulations, or run reports. @@ -3236,14 +3321,14 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02 - Before running the ATTACH PARTITION command, it is - recommended to create a CHECK constraint on the table to + Before running the ATTACH PARTITION command, it is + recommended to create a CHECK constraint on the table to be attached describing the desired partition constraint. That way, the system will be able to skip the scan to validate the implicit partition constraint. Without such a constraint, the table will be scanned to validate the partition constraint while holding an ACCESS EXCLUSIVE lock on the parent table. - One may then drop the constraint after ATTACH PARTITION + One may then drop the constraint after ATTACH PARTITION is finished, because it is no longer necessary. @@ -3256,45 +3341,55 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02 - There is no facility available to create the matching indexes on all - partitions automatically. Indexes must be added to each partition with - separate commands. This also means that there is no way to create a - primary key, unique constraint, or exclusion constraint spanning all - partitions; it is only possible to constrain each leaf partition - individually. + There is no way to create an + exclusion constraint spanning all partitions; it is only possible + to constrain each leaf partition individually. - Since primary keys are not supported on partitioned tables, foreign - keys referencing partitioned tables are not supported, nor are foreign - key references from a partitioned table to some other table. + While primary keys are supported on partitioned tables, foreign + keys referencing partitioned tables are not supported. (Foreign key + references from a partitioned table to some other table are supported.) - Using the ON CONFLICT clause with partitioned tables - will cause an error, because unique or exclusion constraints can only be - created on individual partitions. There is no support for enforcing - uniqueness (or an exclusion constraint) across an entire partitioning - hierarchy. + When an UPDATE causes a row to move from one + partition to another, there is a chance that another concurrent + UPDATE or DELETE misses this row. + Suppose session 1 is performing an UPDATE on a + partition key, and meanwhile a concurrent session 2 for which this row + is visible performs an UPDATE or + DELETE operation on this row. Session 2 can silently + miss the row if the row is deleted from the partition due to session + 1's activity. In such case, session 2's + UPDATE or DELETE, being unaware of + the row movement thinks that the row has just been deleted and concludes + that there is nothing to be done for this row. In the usual case where + the table is not partitioned, or where there is no row movement, + session 2 would have identified the newly updated row and carried out + the UPDATE/DELETE on this new row + version. - An UPDATE that causes a row to move from one partition to - another fails, because the new value of the row fails to satisfy the - implicit partition constraint of the original partition. + BEFORE ROW triggers, if necessary, must be defined + on individual partitions, not the partitioned table. - Row triggers, if necessary, must be defined on individual partitions, - not the partitioned table. + Mixing temporary and permanent relations in the same partition tree is + not allowed. Hence, if the partitioned table is permanent, so must be + its partitions and likewise if the partitioned table is temporary. When + using temporary relations, all members of the partition tree have to be + from the same session. @@ -3308,15 +3403,15 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02 While the built-in declarative partitioning is suitable for most common use cases, there are some circumstances where a more flexible approach may be useful. Partitioning can be implemented using table - inheritance, which allows for several features which are not supported + inheritance, which allows for several features not supported by declarative partitioning, such as: - Partitioning enforces a rule that all partitions must have exactly - the same set of columns as the parent, but table inheritance allows - children to have extra columns not present in the parent. + For declarative partitioning, partitions must have exactly the same set + of columns as the partitioned table, whereas with table inheritance, + child tables may have extra columns not present in the parent. @@ -3328,11 +3423,11 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02 - Declarative partitioning only supports list and range partitioning, - whereas table inheritance allows data to be divided in a manner of - the user's choosing. (Note, however, that if constraint exclusion is - unable to prune partitions effectively, query performance will be very - poor.) + Declarative partitioning only supports range, list and hash + partitioning, whereas table inheritance allows data to be divided in a + manner of the user's choosing. (Note, however, that if constraint + exclusion is unable to prune child tables effectively, query performance + might be poor.) @@ -3354,18 +3449,18 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02 We use the same measurement table we used - above. To implement it as a partitioned table using inheritance, use + above. To implement partitioning using inheritance, use the following steps: Create the master table, from which all of the - partitions will inherit. This table will contain no data. Do not + child tables will inherit. This table will contain no data. Do not define any check constraints on this table, unless you intend them - to be applied equally to all partitions. There is no point in + to be applied equally to all child tables. There is no point in defining any indexes or unique constraints on it, either. For our - example, master table is the measurement + example, the master table is the measurement table as originally defined. @@ -3375,8 +3470,8 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02 Create several child tables that each inherit from the master table. Normally, these tables will not add any columns to the set inherited from the master. Just as with declarative - partitioning, these partitions are in every way normal - PostgreSQL tables (or foreign tables). + partitioning, these tables are in every way normal + PostgreSQL tables (or foreign tables). @@ -3393,8 +3488,8 @@ CREATE TABLE measurement_y2008m01 () INHERITS (measurement); - Add non-overlapping table constraints to the partition tables to - define the allowed key values in each partition. + Add non-overlapping table constraints to the child tables to + define the allowed key values in each. @@ -3405,18 +3500,18 @@ CHECK ( county IN ( 'Oxfordshire', 'Buckinghamshire', 'Warwickshire' )) CHECK ( outletID >= 100 AND outletID < 200 ) Ensure that the constraints guarantee that there is no overlap - between the key values permitted in different partitions. A common + between the key values permitted in different child tables. A common mistake is to set up range constraints like: CHECK ( outletID BETWEEN 100 AND 200 ) CHECK ( outletID BETWEEN 200 AND 300 ) - This is wrong since it is not clear which partition the key value - 200 belongs in. + This is wrong since it is not clear which child table the key + value 200 belongs in. - It would be better to instead create partitions as follows: + It would be better to instead create child tables as follows: CREATE TABLE measurement_y2006m02 ( @@ -3445,7 +3540,7 @@ CREATE TABLE measurement_y2008m01 ( - For each partition, create an index on the key column(s), + For each child table, create an index on the key column(s), as well as any other indexes you might want. CREATE INDEX measurement_y2006m02_logdate ON measurement_y2006m02 (logdate); @@ -3460,10 +3555,10 @@ CREATE INDEX measurement_y2008m01_logdate ON measurement_y2008m01 (logdate); We want our application to be able to say INSERT INTO - measurement ... and have the data be redirected into the - appropriate partition table. We can arrange that by attaching + measurement ... and have the data be redirected into the + appropriate child table. We can arrange that by attaching a suitable trigger function to the master table. - If data will be added only to the latest partition, we can + If data will be added only to the latest child, we can use a very simple trigger function: @@ -3485,17 +3580,17 @@ LANGUAGE plpgsql; CREATE TRIGGER insert_measurement_trigger BEFORE INSERT ON measurement - FOR EACH ROW EXECUTE PROCEDURE measurement_insert_trigger(); + FOR EACH ROW EXECUTE FUNCTION measurement_insert_trigger(); We must redefine the trigger function each month so that it always - points to the current partition. The trigger definition does + points to the current child table. The trigger definition does not need to be updated, however. We might want to insert data and have the server automatically - locate the partition into which the row should be added. We + locate the child table into which the row should be added. We could do this with a more complex trigger function, for example: @@ -3523,7 +3618,7 @@ LANGUAGE plpgsql; The trigger definition is the same as before. Note that each IF test must exactly match the - CHECK constraint for its partition. + CHECK constraint for its child table. @@ -3534,8 +3629,8 @@ LANGUAGE plpgsql; - In practice it might be best to check the newest partition first, - if most inserts go into that partition. For simplicity we have + In practice, it might be best to check the newest child first, + if most inserts go into that child. For simplicity, we have shown the trigger's tests in the same order as in other parts of this example. @@ -3543,7 +3638,7 @@ LANGUAGE plpgsql; A different approach to redirecting inserts into the appropriate - partition table is to set up rules, instead of a trigger, on the + child table is to set up rules, instead of a trigger, on the master table. For example: @@ -3567,9 +3662,9 @@ DO INSTEAD - Be aware that COPY ignores rules. If you want to - use COPY to insert data, you'll need to copy into the - correct partition table rather than into the master. COPY + Be aware that COPY ignores rules. If you want to + use COPY to insert data, you'll need to copy into the + correct child table rather than directly into the master. COPY does fire triggers, so you can use it normally if you use the trigger approach. @@ -3583,27 +3678,27 @@ DO INSTEAD - Ensure that the + Ensure that the configuration parameter is not disabled in - postgresql.conf. - If it is, queries will not be optimized as desired. + postgresql.conf; otherwise + child tables may be accessed unnecessarily. - As we can see, a complex partitioning scheme could require a + As we can see, a complex table hierarchy could require a substantial amount of DDL. In the above example we would be creating - a new partition each month, so it might be wise to write a script that + a new child table each month, so it might be wise to write a script that generates the required DDL automatically. - Partition Maintenance + Maintenance for Inheritance Partitioning - To remove old data quickly, simply drop the partition that is no longer + To remove old data quickly, simply drop the child table that is no longer necessary: DROP TABLE measurement_y2006m02; @@ -3611,7 +3706,7 @@ DROP TABLE measurement_y2006m02; - To remove the partition from the partitioned table but retain access to + To remove the child table from the inheritance hierarchy table but retain access to it as a table in its own right: @@ -3620,8 +3715,8 @@ ALTER TABLE measurement_y2006m02 NO INHERIT measurement; - To add a new partition to handle new data, create an empty partition - just as the original partitions were created above: + To add a new child table to handle new data, create an empty child table + just as the original children were created above: CREATE TABLE measurement_y2008m02 ( @@ -3629,9 +3724,10 @@ CREATE TABLE measurement_y2008m02 ( ) INHERITS (measurement); - Alternatively, one may want to create the new table outside the partition - structure, and make it a partition after the data is loaded, checked, - and transformed. + Alternatively, one may want to create and populate the new child table + before adding it to the table hierarchy. This could allow data to be + loaded, checked, and transformed before being made visible to queries on + the parent table. CREATE TABLE measurement_y2008m02 @@ -3649,7 +3745,7 @@ ALTER TABLE measurement_y2008m02 INHERIT measurement; Caveats - The following caveats apply to partitioned tables implemented using + The following caveats apply to partitioning implemented using inheritance: @@ -3657,19 +3753,19 @@ ALTER TABLE measurement_y2008m02 INHERIT measurement; There is no automatic way to verify that all of the CHECK constraints are mutually exclusive. It is safer to create code that generates - partitions and creates and/or modifies associated objects than + child tables and creates and/or modifies associated objects than to write each by hand. - The schemes shown here assume that the partition key column(s) - of a row never change, or at least do not change enough to require - it to move to another partition. An UPDATE that attempts - to do that will fail because of the CHECK constraints. + The schemes shown here assume that the values of a row's key column(s) + never change, or at least do not change enough to require it to move to another partition. + An UPDATE that attempts + to do that will fail because of the CHECK constraints. If you need to handle such cases, you can put suitable update triggers - on the partition tables, but it makes management of the structure + on the child tables, but it makes management of the structure much more complicated. @@ -3678,7 +3774,7 @@ ALTER TABLE measurement_y2008m02 INHERIT measurement; If you are using manual VACUUM or ANALYZE commands, don't forget that - you need to run them on each partition individually. A command like: + you need to run them on each child table individually. A command like: ANALYZE measurement; @@ -3688,8 +3784,8 @@ ANALYZE measurement; - INSERT statements with ON CONFLICT - clauses are unlikely to work as expected, as the ON CONFLICT + INSERT statements with ON CONFLICT + clauses are unlikely to work as expected, as the ON CONFLICT action is only taken in case of unique violations on the specified target relation, not its child relations. @@ -3698,7 +3794,7 @@ ANALYZE measurement; Triggers or rules will be needed to route rows to the desired - partition, unless the application is explicitly aware of the + child table, unless the application is explicitly aware of the partitioning scheme. Triggers may be complicated to write, and will be much slower than the tuple routing performed internally by declarative partitioning. @@ -3709,112 +3805,218 @@ ANALYZE measurement; - - Partitioning and Constraint Exclusion + + Partition Pruning - constraint exclusion + partition pruning - Constraint exclusion is a query optimization technique - that improves performance for partitioned tables defined in the - fashion described above (both declaratively partitioned tables and those - implemented using inheritance). As an example: + Partition pruning is a query optimization technique + that improves performance for declaratively partitioned tables. + As an example: -SET constraint_exclusion = on; +SET enable_partition_pruning = on; -- the default SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; - Without constraint exclusion, the above query would scan each of - the partitions of the measurement table. With constraint - exclusion enabled, the planner will examine the constraints of each - partition and try to prove that the partition need not + Without partition pruning, the above query would scan each of the + partitions of the measurement table. With + partition pruning enabled, the planner will examine the definition + of each partition and prove that the partition need not be scanned because it could not contain any rows meeting the query's - WHERE clause. When the planner can prove this, it - excludes the partition from the query plan. + WHERE clause. When the planner can prove this, it + excludes (prunes) the partition from the query + plan. - You can use the EXPLAIN command to show the difference - between a plan with constraint_exclusion on and a plan - with it off. A typical unoptimized plan for this type of table setup is: - + By using the EXPLAIN command and the configuration parameter, it's + possible to show the difference between a plan for which partitions have + been pruned and one for which they have not. A typical unoptimized + plan for this type of table setup is: -SET constraint_exclusion = off; +SET enable_partition_pruning = off; EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; - - QUERY PLAN ------------------------------------------------------------------------------------------------ - Aggregate (cost=158.66..158.68 rows=1 width=0) - -> Append (cost=0.00..151.88 rows=2715 width=0) - -> Seq Scan on measurement (cost=0.00..30.38 rows=543 width=0) - Filter: (logdate >= '2008-01-01'::date) - -> Seq Scan on measurement_y2006m02 measurement (cost=0.00..30.38 rows=543 width=0) + QUERY PLAN +----------------------------------------------------------------------------------- + Aggregate (cost=188.76..188.77 rows=1 width=8) + -> Append (cost=0.00..181.05 rows=3085 width=0) + -> Seq Scan on measurement_y2006m02 (cost=0.00..33.12 rows=617 width=0) Filter: (logdate >= '2008-01-01'::date) - -> Seq Scan on measurement_y2006m03 measurement (cost=0.00..30.38 rows=543 width=0) + -> Seq Scan on measurement_y2006m03 (cost=0.00..33.12 rows=617 width=0) Filter: (logdate >= '2008-01-01'::date) ... - -> Seq Scan on measurement_y2007m12 measurement (cost=0.00..30.38 rows=543 width=0) + -> Seq Scan on measurement_y2007m11 (cost=0.00..33.12 rows=617 width=0) Filter: (logdate >= '2008-01-01'::date) - -> Seq Scan on measurement_y2008m01 measurement (cost=0.00..30.38 rows=543 width=0) + -> Seq Scan on measurement_y2007m12 (cost=0.00..33.12 rows=617 width=0) + Filter: (logdate >= '2008-01-01'::date) + -> Seq Scan on measurement_y2008m01 (cost=0.00..33.12 rows=617 width=0) Filter: (logdate >= '2008-01-01'::date) Some or all of the partitions might use index scans instead of full-table sequential scans, but the point here is that there is no need to scan the older partitions at all to answer this query. - When we enable constraint exclusion, we get a significantly + When we enable partition pruning, we get a significantly cheaper plan that will deliver the same answer: - -SET constraint_exclusion = on; +SET enable_partition_pruning = on; EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; - QUERY PLAN ------------------------------------------------------------------------------------------------ - Aggregate (cost=63.47..63.48 rows=1 width=0) - -> Append (cost=0.00..60.75 rows=1086 width=0) - -> Seq Scan on measurement (cost=0.00..30.38 rows=543 width=0) - Filter: (logdate >= '2008-01-01'::date) - -> Seq Scan on measurement_y2008m01 measurement (cost=0.00..30.38 rows=543 width=0) + QUERY PLAN +----------------------------------------------------------------------------------- + Aggregate (cost=37.75..37.76 rows=1 width=8) + -> Append (cost=0.00..36.21 rows=617 width=0) + -> Seq Scan on measurement_y2008m01 (cost=0.00..33.12 rows=617 width=0) Filter: (logdate >= '2008-01-01'::date) - Note that constraint exclusion is driven only by CHECK - constraints, not by the presence of indexes. Therefore it isn't - necessary to define indexes on the key columns. Whether an index - needs to be created for a given partition depends on whether you - expect that queries that scan the partition will generally scan - a large part of the partition or just a small part. An index will - be helpful in the latter case but not the former. + Note that partition pruning is driven only by the constraints defined + implicitly by the partition keys, not by the presence of indexes. + Therefore it isn't necessary to define indexes on the key columns. + Whether an index needs to be created for a given partition depends on + whether you expect that queries that scan the partition will + generally scan a large part of the partition or just a small part. + An index will be helpful in the latter case but not the former. + + + + Partition pruning can be performed not only during the planning of a + given query, but also during its execution. This is useful as it can + allow more partitions to be pruned when clauses contain expressions + whose values are not known at query planning time; for example, + parameters defined in a PREPARE statement, using a + value obtained from a subquery or using a parameterized value on the + inner side of a nested loop join. Partition pruning during execution + can be performed at any of the following times: + + + + + During initialization of the query plan. Partition pruning can be + performed here for parameter values which are known during the + initialization phase of execution. Partitions which are pruned + during this stage will not show up in the query's + EXPLAIN or EXPLAIN ANALYZE. + It is possible to determine the number of partitions which were + removed during this phase by observing the + Subplans Removed property in the + EXPLAIN output. + + + + + + During actual execution of the query plan. Partition pruning may + also be performed here to remove partitions using values which are + only known during actual query execution. This includes values + from subqueries and values from execution-time parameters such as + those from parameterized nested loop joins. Since the value of + these parameters may change many times during the execution of the + query, partition pruning is performed whenever one of the + execution parameters being used by partition pruning changes. + Determining if partitions were pruned during this phase requires + careful inspection of the nloops property in + the EXPLAIN ANALYZE output. + + + + + + + Partition pruning can be disabled using the + setting. + + + + + Currently, pruning of partitions during the planning of an + UPDATE or DELETE command is + implemented using the constraint exclusion method (however, it is + controlled by the enable_partition_pruning rather than + constraint_exclusion) — see the following section + for details and caveats that apply. + + + + Execution-time partition pruning currently occurs for the + Append and MergeAppend node types. + + + + Both of these behaviors are likely to be changed in a future release + of PostgreSQL. + + + + + + Partitioning and Constraint Exclusion + + + constraint exclusion + + + + Constraint exclusion is a query optimization + technique similar to partition pruning. While it is primarily used + for partitioning implemented using the legacy inheritance method, it can be + used for other purposes, including with declarative partitioning. + + + + Constraint exclusion works in a very similar way to partition + pruning, except that it uses each table's CHECK + constraints — which gives it its name — whereas partition + pruning uses the table's partition bounds, which exist only in the + case of declarative partitioning. Another difference is that + constraint exclusion is only applied at plan time; there is no attempt + to remove partitions at execution time. + + + + The fact that constraint exclusion uses CHECK + constraints, which makes it slow compared to partition pruning, can + sometimes be used as an advantage: because constraints can be defined + even on declaratively-partitioned tables, in addition to their internal + partition bounds, constraint exclusion may be able + to elide additional partitions from the query plan. The default (and recommended) setting of - is actually neither - on nor off, but an intermediate setting - called partition, which causes the technique to be - applied only to queries that are likely to be working on partitioned - tables. The on setting causes the planner to examine - CHECK constraints in all queries, even simple ones that + is neither + on nor off, but an intermediate setting + called partition, which causes the technique to be + applied only to queries that are likely to be working on inheritance partitioned + tables. The on setting causes the planner to examine + CHECK constraints in all queries, even simple ones that are unlikely to benefit. - The following caveats apply to constraint exclusion, which is used by - both inheritance and partitioned tables: + The following caveats apply to constraint exclusion: - Constraint exclusion only works when the query's WHERE + Constraint exclusion is only applied during query planning; unlike + partition pruning, it cannot be applied during query execution. + + + + + + Constraint exclusion only works when the query's WHERE clause contains constants (or externally supplied parameters). For example, a comparison against a non-immutable function such as CURRENT_TIMESTAMP cannot be optimized, since the - planner cannot know which partition the function value might fall + planner cannot know which child table the function's value might fall into at run time. @@ -3822,26 +4024,23 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; Keep the partitioning constraints simple, else the planner may not be - able to prove that partitions don't need to be visited. Use simple + able to prove that child tables might not need to be visited. Use simple equality conditions for list partitioning, or simple range tests for range partitioning, as illustrated in the preceding examples. A good rule of thumb is that partitioning constraints should contain only comparisons of the partitioning column(s) to constants - using B-tree-indexable operators, which applies even to partitioned - tables, because only B-tree-indexable column(s) are allowed in the - partition key. (This is not a problem when using declarative - partitioning, since the automatically generated constraints are simple - enough to be understood by the planner.) + using B-tree-indexable operators, because only B-tree-indexable + column(s) are allowed in the partition key. - All constraints on all partitions of the master table are examined - during constraint exclusion, so large numbers of partitions are likely - to increase query planning time considerably. Partitioning using - these techniques will work well with up to perhaps a hundred partitions; - don't try to use many thousands of partitions. + All constraints on all children of the parent table are examined + during constraint exclusion, so large numbers of children are likely + to increase query planning time considerably. So the legacy + inheritance based partitioning will work well with up to perhaps a + hundred child tables; don't try to use many thousands of children. @@ -3867,7 +4066,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; PostgreSQL implements portions of the SQL/MED specification, allowing you to access data that resides outside PostgreSQL using regular SQL queries. Such data is referred to as - foreign data. (Note that this usage is not to be confused + foreign data. (Note that this usage is not to be confused with foreign keys, which are a type of constraint within the database.) @@ -3876,15 +4075,15 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; foreign data wrapper. A foreign data wrapper is a library that can communicate with an external data source, hiding the details of connecting to the data source and obtaining data from it. - There are some foreign data wrappers available as contrib - modules; see . Other kinds of foreign data + There are some foreign data wrappers available as contrib + modules; see . Other kinds of foreign data wrappers might be found as third party products. If none of the existing foreign data wrappers suit your needs, you can write your own; see . + linkend="fdwhandler"/>. - To access foreign data, you need to create a foreign server + To access foreign data, you need to create a foreign server object, which defines how to connect to a particular external data source according to the set of options used by its supporting foreign data wrapper. Then you need to create one or more foreign @@ -3899,18 +4098,18 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; Accessing remote data may require authenticating to the external data source. This information can be provided by a - user mapping, which can provide additional data + user mapping, which can provide additional data such as user names and passwords based on the current PostgreSQL role. For additional information, see - , - , - , - , and - . + , + , + , + , and + . @@ -3935,7 +4134,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; - Functions and operators + Functions, procedures, and operators @@ -3954,7 +4153,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; Detailed information on - these topics appears in . + these topics appears in . @@ -3984,7 +4183,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; PostgreSQL makes sure that you cannot drop objects that other objects still depend on. For example, attempting to drop the products table we considered in , with the orders table depending on + linkend="ddl-constraints-fk"/>, with the orders table depending on it, would result in an error message like this: DROP TABLE products; @@ -4002,13 +4201,13 @@ DROP TABLE products CASCADE; that depend on them, recursively. In this case, it doesn't remove the orders table, it only removes the foreign key constraint. It stops there because nothing depends on the foreign key constraint. - (If you want to check what DROP ... CASCADE will do, - run DROP without CASCADE and read the - DETAIL output.) + (If you want to check what DROP ... CASCADE will do, + run DROP without CASCADE and read the + DETAIL output.) - Almost all DROP commands in PostgreSQL support + Almost all DROP commands in PostgreSQL support specifying CASCADE. Of course, the nature of the possible dependencies varies with the type of the object. You can also write RESTRICT instead of @@ -4020,7 +4219,7 @@ DROP TABLE products CASCADE; According to the SQL standard, specifying either RESTRICT or CASCADE is - required in a DROP command. No database system actually + required in a DROP command. No database system actually enforces that rule, but whether the default behavior is RESTRICT or CASCADE varies across systems. @@ -4028,18 +4227,18 @@ DROP TABLE products CASCADE; - If a DROP command lists multiple + If a DROP command lists multiple objects, CASCADE is only required when there are dependencies outside the specified group. For example, when saying DROP TABLE tab1, tab2 the existence of a foreign - key referencing tab1 from tab2 would not mean + key referencing tab1 from tab2 would not mean that CASCADE is needed to succeed. For user-defined functions, PostgreSQL tracks dependencies associated with a function's externally-visible properties, - such as its argument and result types, but not dependencies + such as its argument and result types, but not dependencies that could only be known by examining the function body. As an example, consider this situation: @@ -4054,13 +4253,13 @@ CREATE FUNCTION get_color_note (rainbow) RETURNS text AS LANGUAGE SQL; - (See for an explanation of SQL-language + (See for an explanation of SQL-language functions.) PostgreSQL will be aware that - the get_color_note function depends on the rainbow + the get_color_note function depends on the rainbow type: dropping the type would force dropping the function, because its - argument type would no longer be defined. But PostgreSQL - will not consider get_color_note to depend on - the my_colors table, and so will not drop the function if + argument type would no longer be defined. But PostgreSQL + will not consider get_color_note to depend on + the my_colors table, and so will not drop the function if the table is dropped. While there are disadvantages to this approach, there are also benefits. The function is still valid in some sense if the table is missing, though executing it would cause an error; creating a new diff --git a/doc/src/sgml/dfunc.sgml b/doc/src/sgml/dfunc.sgml index 23af270e32..dfefa9e686 100644 --- a/doc/src/sgml/dfunc.sgml +++ b/doc/src/sgml/dfunc.sgml @@ -9,7 +9,7 @@ C, they must be compiled and linked in a special way to produce a file that can be dynamically loaded by the server. To be precise, a shared library needs to be - created.shared library + created.shared library @@ -30,7 +30,7 @@ executables: first the source files are compiled into object files, then the object files are linked together. The object files need to be created as position-independent code - (PIC),PIC which + (PIC),PIC which conceptually means that they can be placed at an arbitrary location in memory when they are loaded by the executable. (Object files intended for executables are usually not compiled that way.) The @@ -57,8 +57,8 @@ - FreeBSD - FreeBSDshared library + FreeBSD + FreeBSDshared library @@ -70,15 +70,15 @@ gcc -fPIC -c foo.c gcc -shared -o foo.so foo.o This is applicable as of version 3.0 of - FreeBSD. + FreeBSD. - HP-UX - HP-UXshared library + HP-UX + HP-UXshared library @@ -97,7 +97,7 @@ gcc -fPIC -c foo.c ld -b -o foo.sl foo.o - HP-UX uses the extension + HP-UX uses the extension .sl for shared libraries, unlike most other systems. @@ -106,8 +106,8 @@ ld -b -o foo.sl foo.o - Linux - Linuxshared library + Linux + Linuxshared library @@ -125,8 +125,8 @@ cc -shared -o foo.so foo.o - macOS - macOSshared library + macOS + macOSshared library @@ -141,8 +141,8 @@ cc -bundle -flat_namespace -undefined suppress -o foo.so foo.o - NetBSD - NetBSDshared library + NetBSD + NetBSDshared library @@ -161,8 +161,8 @@ gcc -shared -o foo.so foo.o - OpenBSD - OpenBSDshared library + OpenBSD + OpenBSDshared library @@ -179,17 +179,17 @@ ld -Bshareable -o foo.so foo.o - Solaris - Solarisshared library + Solaris + Solarisshared library The compiler flag to create PIC is with the Sun compiler and - with GCC. To + with GCC. To link shared libraries, the compiler option is with either compiler or alternatively - with GCC. + with GCC. cc -KPIC -c foo.c cc -G -o foo.so foo.o @@ -226,7 +226,7 @@ gcc -G -o foo.so foo.o - Refer back to about where the + Refer back to about where the server expects to find the shared library files. diff --git a/doc/src/sgml/dict-int.sgml b/doc/src/sgml/dict-int.sgml index d49f3e2a3a..c15cbd0e4d 100644 --- a/doc/src/sgml/dict-int.sgml +++ b/doc/src/sgml/dict-int.sgml @@ -8,7 +8,7 @@ - dict_int is an example of an add-on dictionary template + dict_int is an example of an add-on dictionary template for full-text search. The motivation for this example dictionary is to control the indexing of integers (signed and unsigned), allowing such numbers to be indexed while preventing excessive growth in the number of @@ -25,17 +25,17 @@ - The maxlen parameter specifies the maximum number of + The maxlen parameter specifies the maximum number of digits allowed in an integer word. The default value is 6. - The rejectlong parameter specifies whether an overlength - integer should be truncated or ignored. If rejectlong is - false (the default), the dictionary returns the first - maxlen digits of the integer. If rejectlong is - true, the dictionary treats an overlength integer as a stop + The rejectlong parameter specifies whether an overlength + integer should be truncated or ignored. If rejectlong is + false (the default), the dictionary returns the first + maxlen digits of the integer. If rejectlong is + true, the dictionary treats an overlength integer as a stop word, so that it will not be indexed. Note that this also means that such an integer cannot be searched for. @@ -47,8 +47,8 @@ Usage - Installing the dict_int extension creates a text search - template intdict_template and a dictionary intdict + Installing the dict_int extension creates a text search + template intdict_template and a dictionary intdict based on it, with the default parameters. You can alter the parameters, for example @@ -71,7 +71,7 @@ mydb# select ts_lexize('intdict', '12345678'); but real-world usage will involve including it in a text search - configuration as described in . + configuration as described in . That might look like this: diff --git a/doc/src/sgml/dict-xsyn.sgml b/doc/src/sgml/dict-xsyn.sgml index 42362ffbc8..256aff7c58 100644 --- a/doc/src/sgml/dict-xsyn.sgml +++ b/doc/src/sgml/dict-xsyn.sgml @@ -8,7 +8,7 @@ - dict_xsyn (Extended Synonym Dictionary) is an example of an + dict_xsyn (Extended Synonym Dictionary) is an example of an add-on dictionary template for full-text search. This dictionary type replaces words with groups of their synonyms, and so makes it possible to search for a word using any of its synonyms. @@ -18,41 +18,41 @@ Configuration - A dict_xsyn dictionary accepts the following options: + A dict_xsyn dictionary accepts the following options: - matchorig controls whether the original word is accepted by - the dictionary. Default is true. + matchorig controls whether the original word is accepted by + the dictionary. Default is true. - matchsynonyms controls whether the synonyms are - accepted by the dictionary. Default is false. + matchsynonyms controls whether the synonyms are + accepted by the dictionary. Default is false. - keeporig controls whether the original word is included in - the dictionary's output. Default is true. + keeporig controls whether the original word is included in + the dictionary's output. Default is true. - keepsynonyms controls whether the synonyms are included in - the dictionary's output. Default is true. + keepsynonyms controls whether the synonyms are included in + the dictionary's output. Default is true. - rules is the base name of the file containing the list of + rules is the base name of the file containing the list of synonyms. This file must be stored in - $SHAREDIR/tsearch_data/ (where $SHAREDIR means - the PostgreSQL installation's shared-data directory). - Its name must end in .rules (which is not to be included in - the rules parameter). + $SHAREDIR/tsearch_data/ (where $SHAREDIR means + the PostgreSQL installation's shared-data directory). + Its name must end in .rules (which is not to be included in + the rules parameter). @@ -71,15 +71,15 @@ word syn1 syn2 syn3 - The sharp (#) sign is a comment delimiter. It may appear at + The sharp (#) sign is a comment delimiter. It may appear at any position in a line. The rest of the line will be skipped. - Look at xsyn_sample.rules, which is installed in - $SHAREDIR/tsearch_data/, for an example. + Look at xsyn_sample.rules, which is installed in + $SHAREDIR/tsearch_data/, for an example. @@ -87,8 +87,8 @@ word syn1 syn2 syn3 Usage - Installing the dict_xsyn extension creates a text search - template xsyn_template and a dictionary xsyn + Installing the dict_xsyn extension creates a text search + template xsyn_template and a dictionary xsyn based on it, with default parameters. You can alter the parameters, for example @@ -135,7 +135,7 @@ mydb=# SELECT ts_lexize('xsyn', 'syn1'); Real-world usage will involve including it in a text search - configuration as described in . + configuration as described in . That might look like this: diff --git a/doc/src/sgml/diskusage.sgml b/doc/src/sgml/diskusage.sgml index 461deb9dba..3708e5f3d8 100644 --- a/doc/src/sgml/diskusage.sgml +++ b/doc/src/sgml/diskusage.sgml @@ -5,7 +5,7 @@ This chapter discusses how to monitor the disk usage of a - PostgreSQL database system. + PostgreSQL database system. @@ -18,20 +18,20 @@ Each table has a primary heap disk file where most of the data is stored. If the table has any columns with potentially-wide values, - there also might be a TOAST file associated with the table, + there also might be a TOAST file associated with the table, which is used to store values too wide to fit comfortably in the main - table (see ). There will be one valid index - on the TOAST table, if present. There also might be indexes + table (see ). There will be one valid index + on the TOAST table, if present. There also might be indexes associated with the base table. Each table and index is stored in a separate disk file — possibly more than one file, if the file would exceed one gigabyte. Naming conventions for these files are described - in . + in . You can monitor disk space in three ways: - using the SQL functions listed in , - using the module, or + using the SQL functions listed in , + using the module, or using manual inspection of the system catalogs. The SQL functions are the easiest to use and are generally recommended. The remainder of this section shows how to do it by inspection of the @@ -39,7 +39,7 @@ - Using psql on a recently vacuumed or analyzed database, + Using psql on a recently vacuumed or analyzed database, you can issue queries to see the disk usage of any table: SELECT pg_relation_filepath(oid), relpages FROM pg_class WHERE relname = 'customer'; @@ -49,14 +49,14 @@ SELECT pg_relation_filepath(oid), relpages FROM pg_class WHERE relname = 'custom base/16384/16806 | 60 (1 row) - Each page is typically 8 kilobytes. (Remember, relpages - is only updated by VACUUM, ANALYZE, and - a few DDL commands such as CREATE INDEX.) The file path name + Each page is typically 8 kilobytes. (Remember, relpages + is only updated by VACUUM, ANALYZE, and + a few DDL commands such as CREATE INDEX.) The file path name is of interest if you want to examine the table's disk file directly. - To show the space used by TOAST tables, use a query + To show the space used by TOAST tables, use a query like the following: SELECT relname, relpages @@ -124,7 +124,7 @@ ORDER BY relpages DESC; If you cannot free up additional space on the disk by deleting other things, you can move some of the database files to other file systems by making use of tablespaces. See for more information about that. + linkend="manage-ag-tablespaces"/> for more information about that. diff --git a/doc/src/sgml/dml.sgml b/doc/src/sgml/dml.sgml index 071cdb610f..97a7730955 100644 --- a/doc/src/sgml/dml.sgml +++ b/doc/src/sgml/dml.sgml @@ -33,10 +33,10 @@ - To create a new row, use the + To create a new row, use the command. The command requires the table name and column values. For - example, consider the products table from : + example, consider the products table from : CREATE TABLE products ( product_no integer, @@ -107,16 +107,16 @@ INSERT INTO products (product_no, name, price) WHERE release_date = 'today'; This provides the full power of the SQL query mechanism () for computing the rows to be inserted. + linkend="queries"/>) for computing the rows to be inserted. - When inserting a lot of data at the same time, considering using - the command. - It is not as flexible as the + When inserting a lot of data at the same time, consider using + the command. + It is not as flexible as the command, but is more efficient. Refer - to for more information on improving + to for more information on improving bulk loading performance. @@ -141,7 +141,7 @@ INSERT INTO products (product_no, name, price) - To update existing rows, use the + To update existing rows, use the command. This requires three pieces of information: @@ -160,7 +160,7 @@ INSERT INTO products (product_no, name, price) - Recall from that SQL does not, in general, + Recall from that SQL does not, in general, provide a unique identifier for rows. Therefore it is not always possible to directly specify which row to update. Instead, you specify which conditions a row must meet in order to @@ -203,7 +203,7 @@ UPDATE products SET price = price * 1.10; this does not create any ambiguity. Of course, the WHERE condition does not have to be an equality test. Many other operators are - available (see ). But the expression + available (see ). But the expression needs to evaluate to a Boolean result. @@ -243,7 +243,7 @@ UPDATE mytable SET a = 5, b = 3, c = 1 WHERE a > 0; - You use the + You use the command to remove rows; the syntax is very similar to the UPDATE command. For instance, to remove all rows from the products table that have a price of 10, use: @@ -285,42 +285,42 @@ DELETE FROM products; Sometimes it is useful to obtain data from modified rows while they are - being manipulated. The INSERT, UPDATE, - and DELETE commands all have an - optional RETURNING clause that supports this. Use - of RETURNING avoids performing an extra database query to + being manipulated. The INSERT, UPDATE, + and DELETE commands all have an + optional RETURNING clause that supports this. Use + of RETURNING avoids performing an extra database query to collect the data, and is especially valuable when it would otherwise be difficult to identify the modified rows reliably. - The allowed contents of a RETURNING clause are the same as - a SELECT command's output list - (see ). It can contain column + The allowed contents of a RETURNING clause are the same as + a SELECT command's output list + (see ). It can contain column names of the command's target table, or value expressions using those - columns. A common shorthand is RETURNING *, which selects + columns. A common shorthand is RETURNING *, which selects all columns of the target table in order. - In an INSERT, the data available to RETURNING is + In an INSERT, the data available to RETURNING is the row as it was inserted. This is not so useful in trivial inserts, since it would just repeat the data provided by the client. But it can be very handy when relying on computed default values. For example, - when using a serial - column to provide unique identifiers, RETURNING can return + when using a serial + column to provide unique identifiers, RETURNING can return the ID assigned to a new row: CREATE TABLE users (firstname text, lastname text, id serial primary key); INSERT INTO users (firstname, lastname) VALUES ('Joe', 'Cool') RETURNING id; - The RETURNING clause is also very useful - with INSERT ... SELECT. + The RETURNING clause is also very useful + with INSERT ... SELECT. - In an UPDATE, the data available to RETURNING is + In an UPDATE, the data available to RETURNING is the new content of the modified row. For example: UPDATE products SET price = price * 1.10 @@ -330,7 +330,7 @@ UPDATE products SET price = price * 1.10 - In a DELETE, the data available to RETURNING is + In a DELETE, the data available to RETURNING is the content of the deleted row. For example: DELETE FROM products @@ -340,10 +340,10 @@ DELETE FROM products - If there are triggers () on the target table, - the data available to RETURNING is the row as modified by + If there are triggers () on the target table, + the data available to RETURNING is the row as modified by the triggers. Thus, inspecting columns computed by triggers is another - common use-case for RETURNING. + common use-case for RETURNING. diff --git a/doc/src/sgml/docguide.sgml b/doc/src/sgml/docguide.sgml index ff58a17335..420a2f9d6e 100644 --- a/doc/src/sgml/docguide.sgml +++ b/doc/src/sgml/docguide.sgml @@ -47,35 +47,23 @@ The documentation sources are written in DocBook, which is a markup language - superficially similar to HTML. Both of these - languages are applications of the Standard Generalized - Markup Language, SGML, which is - essentially a language for describing other languages. In what - follows, the terms DocBook and SGML are both + defined in XML. In what + follows, the terms DocBook and XML are both used, but technically they are not interchangeable. - - - The PostgreSQL documentation is currently being transitioned from DocBook - SGML and DSSSL style sheets to DocBook XML and XSLT style sheets. Be - careful to look at the instructions relating to the PostgreSQL version you - are dealing with, as the procedures and required tools will change. - - - DocBook allows an author to specify the structure and content of a technical document without worrying about presentation details. A document style defines how that content is rendered into one of several final forms. DocBook is - maintained by the - OASIS group. The + maintained by the + OASIS group. The official DocBook site has good introductory and reference documentation and a complete O'Reilly book for your online reading pleasure. The NewbieDoc Docbook Guide is very helpful for beginners. - The + The FreeBSD Documentation Project also uses DocBook and has some good information, including a number of style guidelines that might be worth considering. @@ -97,19 +85,8 @@ This is the definition of DocBook itself. We currently use version 4.2; you cannot use later or earlier versions. You need - the SGML and the XML variant of - the DocBook DTD of the same version. These will usually be in separate - packages. - - - - - - ISO 8879 character entities - - - These are required by DocBook SGML but are distributed separately - because they are maintained by ISO. + the XML variant of the DocBook DTD, not + the SGML variant. @@ -130,17 +107,6 @@ - - OpenSP - - - This is the base package of SGML processing. Note - that we no longer need OpenJade, the DSSSL - processor, only the OpenSP package for converting SGML to XML. - - - - Libxml2 for xmllint @@ -201,7 +167,7 @@ To install the required packages, use: -yum install docbook-dtds docbook-style-xsl fop libxslt opensp +yum install docbook-dtds docbook-style-xsl fop libxslt @@ -209,41 +175,10 @@ yum install docbook-dtds docbook-style-xsl fop libxslt opensp Installation on FreeBSD - - The FreeBSD Documentation Project is itself a heavy user of - DocBook, so it comes as no surprise that there is a full set of - ports of the documentation tools available on - FreeBSD. The following ports need to be installed to build the - documentation on FreeBSD. - - - textproc/docbook-sgml - - - textproc/docbook-xml - - - textproc/docbook-xsl - - - textproc/dsssl-docbook-modular - - - textproc/libxslt - - - textproc/fop - - - textproc/opensp - - - - To install the required packages with pkg, use: -pkg install docbook-sgml docbook-xml docbook-xsl fop libxslt opensp +pkg install docbook-xml docbook-xsl fop libxslt @@ -252,12 +187,6 @@ pkg install docbook-sgml docbook-xml docbook-xsl fop libxslt opensp directory you'll need to use gmake, because the makefile provided is not suitable for FreeBSD's make. - - - More information about the FreeBSD documentation tools can be - found in the - FreeBSD Documentation Project's instructions. - @@ -268,7 +197,7 @@ pkg install docbook-sgml docbook-xml docbook-xsl fop libxslt opensp available for Debian GNU/Linux. To install, simply use: -apt-get install docbook docbook-xml docbook-xsl fop libxml2-utils opensp xsltproc +apt-get install docbook-xml docbook-xsl fop libxml2-utils xsltproc @@ -277,117 +206,21 @@ apt-get install docbook docbook-xml docbook-xsl fop libxml2-utils opensp xsltpro macOS - If you use MacPorts, the following will get you set up: - -sudo port install docbook-sgml-4.2 docbook-xml-4.2 docbook-xsl fop libxslt opensp - + On macOS, you can build the HTML and man documentation without installing + anything extra. If you want to build PDFs or want to install a local copy + of DocBook, you can get those from your preferred package manager. - - - - Manual Installation from Source - The manual installation process of the DocBook tools is somewhat - complex, so if you have pre-built packages available, use them. - We describe here only a standard setup, with reasonably standard - installation paths, and no fancy features. For - details, you should study the documentation of the respective - package, and read SGML introductory material. - - - - Installing OpenSP - - - The installation of OpenSP offers a GNU-style - ./configure; make; make install build process. - Details can be found in the OpenSP source distribution. In a nutshell: - -./configure --enable-default-catalog=/usr/local/etc/sgml/catalog -make -make install - - Be sure to remember where you put the default catalog; you - will need it below. You can also leave it off, but then you will have to - set the environment variable SGML_CATALOG_FILES to point - to the file whenever you use any programs from OpenSP later on. (This - method is also an option if OpenSP is already installed and you want to - install the rest of the toolchain locally.) - - - - - Installing the <productname>DocBook</productname> <acronym>DTD</acronym> Kit - - - - - Obtain the - DocBook V4.2 distribution. - - - - - - Create the directory - /usr/local/share/sgml/docbook-4.2 and change - to it. (The exact location is irrelevant, but this one is - reasonable within the layout we are following here.) - -$ mkdir /usr/local/share/sgml/docbook-4.2 -$ cd /usr/local/share/sgml/docbook-4.2 - - - - - - - Unpack the archive: - -$ unzip -a ...../docbook-4.2.zip - - (The archive will unpack its files into the current directory.) - - - - - - Edit the file - /usr/local/share/sgml/catalog (or whatever - you told jade during installation) and put a line like this - into it: + If you use MacPorts, the following will get you set up: -CATALOG "docbook-4.2/docbook.cat" +sudo port install docbook-xml-4.2 docbook-xsl fop - - - - - - Download the - ISO 8879 character entities archive, unpack it, and put the - files in the same directory you put the DocBook files in: - -$ cd /usr/local/share/sgml/docbook-4.2 -$ unzip ...../ISOEnts.zip - - - - - - - Run the following command in the directory with the DocBook and ISO files: + If you use Homebrew, use this: -perl -pi -e 's/iso-(.*).gml/ISO\1/g' docbook.cat +brew install docbook docbook-xsl fop - (This fixes a mixup between the names used in the DocBook - catalog file and the actual names of the ISO character entity - files.) - - - - + @@ -400,26 +233,14 @@ perl -pi -e 's/iso-(.*).gml/ISO\1/g' docbook.cat Check the output near the end of the run, it should look something like this: - -checking for onsgmls... onsgmls -checking for DocBook V4.2... yes -checking for dbtoepub... dbtoepub checking for xmllint... xmllint +checking for DocBook XML V4.2... yes +checking for dbtoepub... dbtoepub checking for xsltproc... xsltproc -checking for osx... osx checking for fop... fop - - If neither onsgmls nor - nsgmls were found then some of the following tests - will be skipped. nsgmls is part of the OpenSP - package. You can pass the environment variable - NSGMLS to configure to point - to the programs if they are not found automatically. If - DocBook V4.2 was not found then you did not install - the DocBook DTD kit in a place where OpenSP can find it, or you have - not set up the catalog files correctly. See the installation hints - above. + If xmllint was not found then some of the following + tests will be skipped. @@ -449,7 +270,7 @@ checking for fop... fop To produce HTML documentation with the stylesheet used on postgresql.org instead of the + url="https://www.postgresql.org/docs/current/">postgresql.org instead of the default simple style use: doc/src/sgml$ make STYLE=website html @@ -464,9 +285,7 @@ checking for fop... fop We use the DocBook XSL stylesheets to convert DocBook refentry pages to *roff output suitable for man - pages. The man pages are also distributed as a tar archive, - similar to the HTML version. To create the man - pages, use the commands: + pages. To create the man pages, use the command: doc/src/sgml$ make man @@ -511,11 +330,11 @@ checking for fop... fop file ~/.foprc, for example: # FOP binary distribution -FOP_OPTS='-Xmx1000m' +FOP_OPTS='-Xmx1500m' # Debian -JAVA_ARGS='-Xmx1000m' +JAVA_ARGS='-Xmx1500m' # Red Hat -ADDITIONAL_FLAGS='-Xmx1000m' +ADDITIONAL_FLAGS='-Xmx1500m' There is a minimum amount of memory that is required, and to some extent more memory appears to make things a bit faster. On systems with very @@ -536,7 +355,7 @@ ADDITIONAL_FLAGS='-Xmx1000m' The installation instructions are also distributed as plain text, in case they are needed in a situation where better reading tools are not available. The INSTALL file - corresponds to , with some minor + corresponds to , with some minor changes to account for the different context. To recreate the file, change to the directory doc/src/sgml and enter make INSTALL. @@ -568,112 +387,36 @@ ADDITIONAL_FLAGS='-Xmx1000m' Documentation Authoring - SGML and DocBook do - not suffer from an oversupply of open-source authoring tools. The - most common tool set is the - Emacs/XEmacs - editor with appropriate editing mode. On some systems - these tools are provided in a typical full installation. + The documentation sources are most conveniently modified with an editor + that has a mode for editing XML, and even more so if it has some awareness + of XML schema languages so that it can know about + DocBook syntax specifically. - - Emacs/PSGML - - - PSGML is the most common and most - powerful mode for editing SGML documents. - When properly configured, it will allow you to use - Emacs to insert tags and check markup - consistency. You could use it for HTML as - well. Check the - PSGML web site for downloads, installation instructions, and - detailed documentation. - - - - There is one important thing to note with - PSGML: its author assumed that your - main SGML DTD directory - would be /usr/local/lib/sgml. If, as in the - examples in this chapter, you use - /usr/local/share/sgml, you have to - compensate for this, either by setting - SGML_CATALOG_FILES environment variable, or you - can customize your PSGML installation - (its manual tells you how). - - - - Put the following in your ~/.emacs - environment file (adjusting the path names to be appropriate for - your system): - - -; ********** for SGML mode (psgml) - -(setq sgml-omittag t) -(setq sgml-shorttag t) -(setq sgml-minimize-attributes nil) -(setq sgml-always-quote-attributes t) -(setq sgml-indent-step 1) -(setq sgml-indent-data t) -(setq sgml-parent-document nil) -(setq sgml-exposed-tags nil) -(setq sgml-catalog-files '("/usr/local/share/sgml/catalog")) - -(autoload 'sgml-mode "psgml" "Major mode to edit SGML files." t ) - - - and in the same file add an entry for SGML - into the (existing) definition for - auto-mode-alist: - -(setq - auto-mode-alist - '(("\\.sgml$" . sgml-mode) - )) - - - - - You might find that when using PSGML, a - comfortable way of working with these separate files of book - parts is to insert a proper DOCTYPE - declaration while you're editing them. If you are working on - this source, for instance, it is an appendix chapter, so you - would specify the document as an appendix instance - of a DocBook document by making the first line look like this: - - -<!DOCTYPE appendix PUBLIC "-//OASIS//DTD DocBook V4.2//EN"> - - - This means that anything and everything that reads - SGML will get it right, and I can verify the - document with nsgmls -s docguide.sgml. (But - you need to take out that line before building the entire - documentation set.) - - + + Note that for historical reasons the documentation source files are named + with an extension .sgml even though they are now XML + files. So you might need to adjust your editor configuration to set the + correct mode. + - Other Emacs Modes + Emacs - GNU Emacs ships with a different - SGML mode, which is not quite as powerful as - PSGML, but it's less confusing and - lighter weight. Also, it offers syntax highlighting (font lock), - which can be very helpful. - src/tools/editors/emacs.samples contains - sample settings for this mode. + nXML Mode, which ships with + Emacs, is the most common mode for editing + XML documents with Emacs. + It will allow you to use Emacs to insert tags + and check markup consistency, and it supports + DocBook out of the box. Check the + nXML manual for detailed documentation. - Norm Walsh offers a - major mode - specifically for DocBook which also has font-lock and a number of features to - reduce typing. + src/tools/editors/emacs.samples contains + recommended settings for this mode. diff --git a/doc/src/sgml/earthdistance.sgml b/doc/src/sgml/earthdistance.sgml index 6dedc4a5f4..1f3ea6aa6e 100644 --- a/doc/src/sgml/earthdistance.sgml +++ b/doc/src/sgml/earthdistance.sgml @@ -8,18 +8,18 @@ - The earthdistance module provides two different approaches to + The earthdistance module provides two different approaches to calculating great circle distances on the surface of the Earth. The one - described first depends on the cube module (which - must be installed before earthdistance can be - installed). The second one is based on the built-in point data type, + described first depends on the cube module (which + must be installed before earthdistance can be + installed). The second one is based on the built-in point data type, using longitude and latitude for the coordinates. In this module, the Earth is assumed to be perfectly spherical. (If that's too inaccurate for you, you might want to look at the - PostGIS + PostGIS project.) @@ -29,13 +29,13 @@ Data is stored in cubes that are points (both corners are the same) using 3 coordinates representing the x, y, and z distance from the center of the - Earth. A domain earth over cube is provided, which + Earth. A domain earth over cube is provided, which includes constraint checks that the value meets these restrictions and is reasonably close to the actual surface of the Earth. - The radius of the Earth is obtained from the earth() + The radius of the Earth is obtained from the earth() function. It is given in meters. But by changing this one function you can change the module to use some other units, or to use a different value of the radius that you feel is more appropriate. @@ -43,8 +43,8 @@ This package has applications to astronomical databases as well. - Astronomers will probably want to change earth() to return a - radius of 180/pi() so that distances are in degrees. + Astronomers will probably want to change earth() to return a + radius of 180/pi() so that distances are in degrees. @@ -56,7 +56,7 @@ The provided functions are shown - in . + in . @@ -123,11 +123,11 @@ earth_box(earth, float8)earth_boxcubeReturns a box suitable for an indexed search using the cube - @> + @> operator for points within a given great circle distance of a location. Some points in this box are further than the specified great circle distance from the location, so a second check using - earth_distance should be included in the query. + earth_distance should be included in the query. @@ -141,7 +141,7 @@ The second part of the module relies on representing Earth locations as - values of type point, in which the first component is taken to + values of type point, in which the first component is taken to represent longitude in degrees, and the second component is taken to represent latitude in degrees. Points are taken as (longitude, latitude) and not vice versa because longitude is closer to the intuitive idea of @@ -150,7 +150,7 @@ A single operator is provided, shown - in . + in .
@@ -165,7 +165,7 @@ - point <@> point + point <@> point float8 Gives the distance in statute miles between two points on the Earth's surface. @@ -176,15 +176,15 @@
- Note that unlike the cube-based part of the module, units - are hardwired here: changing the earth() function will + Note that unlike the cube-based part of the module, units + are hardwired here: changing the earth() function will not affect the results of this operator. One disadvantage of the longitude/latitude representation is that you need to be careful about the edge conditions near the poles - and near +/- 180 degrees of longitude. The cube-based + and near +/- 180 degrees of longitude. The cube-based representation avoids these discontinuities. diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml index f13a0e999f..fac45400b0 100644 --- a/doc/src/sgml/ecpg.sgml +++ b/doc/src/sgml/ecpg.sgml @@ -31,7 +31,7 @@ specially marked sections. To build the program, the source code (*.pgc) is first passed through the embedded SQL preprocessor, which converts it to an ordinary C program (*.c), and afterwards it can be processed by a C - compiler. (For details about the compiling and linking see ). + compiler. (For details about the compiling and linking see ). Converted ECPG applications call functions in the libpq library through the embedded SQL library (ecpglib), and communicate with the PostgreSQL server using the normal frontend-backend protocol. @@ -46,7 +46,7 @@ correctness. Third, embedded SQL in C is specified in the SQL standard and supported by many other SQL database systems. The - PostgreSQL implementation is designed to match this + PostgreSQL implementation is designed to match this standard as much as possible, and it is usually possible to port embedded SQL programs written for other SQL databases to PostgreSQL with relative @@ -97,19 +97,19 @@ EXEC SQL CONNECT TO target AS - dbname@hostname:port + dbname@hostname:port - tcp:postgresql://hostname:port/dbname?options + tcp:postgresql://hostname:port/dbname?options - unix:postgresql://hostname:port/dbname?options + unix:postgresql://hostname:port/dbname?options @@ -186,6 +186,18 @@ EXEC SQL CONNECT TO target AS chapter).
+ + If untrusted users have access to a database that has not adopted a + secure schema usage pattern, + begin each session by removing publicly-writable schemas + from search_path. For example, + add options=-csearch_path= + to options, or + issue EXEC SQL SELECT pg_catalog.set_config('search_path', '', + false); after connecting. This consideration is not specific to + ECPG; it applies to every interface for executing arbitrary SQL commands. + + Here are some examples of CONNECT statements: @@ -266,8 +278,11 @@ int main() { EXEC SQL CONNECT TO testdb1 AS con1 USER testuser; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; EXEC SQL CONNECT TO testdb2 AS con2 USER testuser; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; EXEC SQL CONNECT TO testdb3 AS con3 USER testuser; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; /* This query would be executed in the last opened database "testdb3". */ EXEC SQL SELECT current_database() INTO :dbname; @@ -397,9 +412,9 @@ EXEC SQL COMMIT; row can also be executed using EXEC SQL directly. To handle result sets with multiple rows, an application has to use a cursor; - see below. (As a special case, an + see below. (As a special case, an application can fetch multiple rows at once into an array host - variable; see .) + variable; see .) @@ -422,7 +437,7 @@ EXEC SQL SHOW search_path INTO :var; :something are host variables, that is, they refer to variables in the C program. They are explained in . + linkend="ecpg-variables"/>. @@ -452,8 +467,8 @@ EXEC SQL COMMIT; For more details about declaration of the cursor, - see , and - see for FETCH command + see , and + see for FETCH command details. @@ -475,9 +490,9 @@ EXEC SQL COMMIT; In the default mode, statements are committed only when EXEC SQL COMMIT is issued. The embedded SQL interface also supports autocommit of transactions (similar to - psql's default behavior) via the + psql's default behavior) via the command-line option to ecpg (see ) or via the EXEC SQL SET AUTOCOMMIT TO + linkend="app-ecpg"/>) or via the EXEC SQL SET AUTOCOMMIT TO ON statement. In autocommit mode, each command is automatically committed unless it is inside an explicit transaction block. This mode can be explicitly turned off using EXEC @@ -507,7 +522,7 @@ EXEC SQL COMMIT; - EXEC SQL PREPARE TRANSACTION transaction_id + EXEC SQL PREPARE TRANSACTION transaction_id Prepare the current transaction for two-phase commit. @@ -516,7 +531,7 @@ EXEC SQL COMMIT; - EXEC SQL COMMIT PREPARED transaction_id + EXEC SQL COMMIT PREPARED transaction_id Commit a transaction that is in prepared state. @@ -525,7 +540,7 @@ EXEC SQL COMMIT; - EXEC SQL ROLLBACK PREPARED transaction_id + EXEC SQL ROLLBACK PREPARED transaction_id Roll back a transaction that is in prepared state. @@ -617,8 +632,8 @@ EXEC SQL DEALLOCATE PREPARE name; For more details about PREPARE, - see . Also - see for more details about using + see . Also + see for more details about using placeholders and input parameters. @@ -628,7 +643,7 @@ EXEC SQL DEALLOCATE PREPARE name; Using Host Variables - In you saw how you can execute SQL + In you saw how you can execute SQL statements from an embedded SQL program. Some of those statements only used fixed values and did not provide a way to insert user-supplied values into statements or have the program process @@ -646,7 +661,7 @@ EXEC SQL DEALLOCATE PREPARE name; Another way to exchange values between PostgreSQL backends and ECPG applications is the use of SQL descriptors, described - in . + in . @@ -720,7 +735,7 @@ EXEC SQL int i = 4; The definition of a structure or union also must be listed inside - a DECLARE section. Otherwise the preprocessor cannot + a DECLARE section. Otherwise the preprocessor cannot handle these types since it does not know the definition. @@ -812,11 +827,11 @@ do directly. Other PostgreSQL data types, such as timestamp and numeric can only be accessed through special library functions; see - . + . - shows which PostgreSQL + shows which PostgreSQL data types correspond to which C data types. When you wish to send or receive a value of a given PostgreSQL data type, you should declare a C variable of the corresponding C data type in @@ -851,12 +866,12 @@ do decimal - decimalThis type can only be accessed through special library functions; see . + decimalThis type can only be accessed through special library functions; see . numeric - numeric + numeric @@ -890,8 +905,8 @@ do - character(n), varchar(n), text - char[n+1], VARCHAR[n+1]declared in ecpglib.h + character(n), varchar(n), text + char[n+1], VARCHAR[n+1]declared in ecpglib.h @@ -901,17 +916,17 @@ do timestamp - timestamp + timestamp interval - interval + interval date - date + date @@ -955,7 +970,7 @@ EXEC SQL END DECLARE SECTION; The other way is using the VARCHAR type, which is a special type provided by ECPG. The definition on an array of type VARCHAR is converted into a - named struct for every variable. A declaration like: + named struct for every variable. A declaration like: VARCHAR var[180]; @@ -994,15 +1009,15 @@ struct varchar_var { int len; char arr[180]; } var; ECPG contains some special types that help you to interact easily with some special data types from the PostgreSQL server. In particular, it has implemented support for the - numeric, decimal, date, timestamp, - and interval types. These data types cannot usefully be + numeric, decimal, date, timestamp, + and interval types. These data types cannot usefully be mapped to primitive host variable types (such - as int, long long int, + as int, long long int, or char[]), because they have a complex internal structure. Applications deal with these types by declaring host variables in special types and accessing them using functions in the pgtypes library. The pgtypes library, described in detail - in contains basic functions to deal + in contains basic functions to deal with those types, such that you do not need to send a query to the SQL server just for adding an interval to a time stamp for example. @@ -1011,7 +1026,7 @@ struct varchar_var { int len; char arr[180]; } var; The follow subsections describe these special data types. For more details about pgtypes library functions, - see . + see . @@ -1062,7 +1077,7 @@ ts = 2010-06-27 18:03:56.949343 program has to include pgtypes_date.h, declare a host variable as the date type and convert a DATE value into a text form using PGTYPESdate_to_asc() function. For more details about the - pgtypes library functions, see . + pgtypes library functions, see . @@ -1093,6 +1108,7 @@ EXEC SQL BEGIN DECLARE SECTION; EXEC SQL END DECLARE SECTION; EXEC SQL CONNECT TO testdb; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; in = PGTYPESinterval_new(); EXEC SQL SELECT '1 min'::interval INTO :in; @@ -1117,7 +1133,7 @@ EXEC SQL END DECLARE SECTION; allocating some memory space on the heap, and accessing the variable using the pgtypes library functions. For more details about the pgtypes library functions, - see . + see . @@ -1147,6 +1163,7 @@ EXEC SQL BEGIN DECLARE SECTION; EXEC SQL END DECLARE SECTION; EXEC SQL CONNECT TO testdb; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; num = PGTYPESnumeric_new(); dec = PGTYPESdecimal_new(); @@ -1193,7 +1210,7 @@ EXEC SQL END DECLARE SECTION; There are two use cases for arrays as host variables. The first is a way to store some text string in char[] or VARCHAR[], as - explained in . The second use case is to + explained in . The second use case is to retrieve multiple rows from a query result without using a cursor. Without an array, to process a query result consisting of multiple rows, it is required to use a cursor and @@ -1221,6 +1238,7 @@ EXEC SQL END DECLARE SECTION; memset(dbid, 0, sizeof(int) * 8); EXEC SQL CONNECT TO testdb; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; /* Retrieve multiple rows into arrays at once. */ EXEC SQL SELECT oid,datname INTO :dbid, :dbname FROM pg_database; @@ -1378,7 +1396,7 @@ EXEC SQL TYPE serial_t IS long; You can declare pointers to the most common types. Note however that you cannot use pointers as target variables of queries - without auto-allocation. See + without auto-allocation. See for more information on auto-allocation. @@ -1520,7 +1538,7 @@ while (1) Another workaround is to store arrays in their external string representation in host variables of type char[] or VARCHAR[]. For more details about this - representation, see . Note that + representation, see . Note that this means that the array cannot be accessed naturally as an array in the host program (without further processing that parses the text representation). @@ -1578,7 +1596,7 @@ EXEC SQL CLOSE cur1; To enhance this example, the host variables to store values in the FETCH command can be gathered into one structure. For more details about the host variable in the - structure form, see . + structure form, see . To switch to the structure, the example can be modified as below. The two host variables, intval and textval, become members of @@ -1659,12 +1677,12 @@ while (1) Here is an example using the data type complex from - the example in . The external string - representation of that type is (%lf,%lf), + the example in . The external string + representation of that type is (%f,%f), which is defined in the functions complex_in() and complex_out() functions - in . The following example inserts the + in . The following example inserts the complex type values (1,1) and (3,3) into the columns a and b, and select @@ -1875,7 +1893,7 @@ EXEC SQL EXECUTE mystmt INTO :v1, :v2, :v3 USING 37; If a query is expected to return more than one result row, a cursor should be used, as in the following example. - (See for more details about the + (See for more details about the cursor.) EXEC SQL BEGIN DECLARE SECTION; @@ -1887,6 +1905,7 @@ char *stmt = "SELECT u.usename as dbaname, d.datname " EXEC SQL END DECLARE SECTION; EXEC SQL CONNECT TO testdb AS con1 USER testuser; +EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; EXEC SQL PREPARE stmt1 FROM :stmt; @@ -1932,20 +1951,32 @@ EXEC SQL SELECT started, duration INTO :ts1, :iv1 FROM datetbl WHERE d=:date1; PGTYPEStimestamp_add_interval(&ts1, &iv1, &tsout); out = PGTYPEStimestamp_to_asc(&tsout); printf("Started + duration: %s\n", out); -free(out); +PGTYPESchar_free(out); ]]> + + Character Strings + + Some functions such as PGTYPESnumeric_to_asc return + a pointer to a freshly allocated character string. These results should be + freed with PGTYPESchar_free instead of + free. (This is important only on Windows, where + memory allocation and release sometimes need to be done by the same + library.) + + + The numeric Type The numeric type offers to do calculations with arbitrary precision. See - for the equivalent type in the - PostgreSQL server. Because of the arbitrary precision this + for the equivalent type in the + PostgreSQL server. Because of the arbitrary precision this variable needs to be able to expand and shrink dynamically. That's why you can only create numeric variables on the heap, by means of the - PGTYPESnumeric_new and PGTYPESnumeric_free + PGTYPESnumeric_new and PGTYPESnumeric_free functions. The decimal type, which is similar but limited in precision, can be created on the stack as well as on the heap. @@ -2010,6 +2041,7 @@ char *PGTYPESnumeric_to_asc(numeric *num, int dscale); The numeric value will be printed with dscale decimal digits, with rounding applied if necessary. + The result must be freed with PGTYPESchar_free(). @@ -2092,17 +2124,17 @@ int PGTYPESnumeric_cmp(numeric *var1, numeric *var2) - 1, if var1 is bigger than var2 + 1, if var1 is bigger than var2 - -1, if var1 is smaller than var2 + -1, if var1 is smaller than var2 - 0, if var1 and var2 are equal + 0, if var1 and var2 are equal @@ -2119,7 +2151,7 @@ int PGTYPESnumeric_cmp(numeric *var1, numeric *var2) int PGTYPESnumeric_from_int(signed int int_val, numeric *var); This function accepts a variable of type signed int and stores it - in the numeric variable var. Upon success, 0 is returned and + in the numeric variable var. Upon success, 0 is returned and -1 in case of a failure.
@@ -2134,7 +2166,7 @@ int PGTYPESnumeric_from_int(signed int int_val, numeric *var); int PGTYPESnumeric_from_long(signed long int long_val, numeric *var); This function accepts a variable of type signed long int and stores it - in the numeric variable var. Upon success, 0 is returned and + in the numeric variable var. Upon success, 0 is returned and -1 in case of a failure.
@@ -2149,7 +2181,7 @@ int PGTYPESnumeric_from_long(signed long int long_val, numeric *var); int PGTYPESnumeric_copy(numeric *src, numeric *dst); This function copies over the value of the variable that - src points to into the variable that dst + src points to into the variable that dst points to. It returns 0 on success and -1 if an error occurs. @@ -2164,7 +2196,7 @@ int PGTYPESnumeric_copy(numeric *src, numeric *dst); int PGTYPESnumeric_from_double(double d, numeric *dst); This function accepts a variable of type double and stores the result - in the variable that dst points to. It returns 0 on success + in the variable that dst points to. It returns 0 on success and -1 if an error occurs. @@ -2179,10 +2211,10 @@ int PGTYPESnumeric_from_double(double d, numeric *dst); int PGTYPESnumeric_to_double(numeric *nv, double *dp) The function converts the numeric value from the variable that - nv points to into the double variable that dp points + nv points to into the double variable that dp points to. It returns 0 on success and -1 if an error occurs, including - overflow. On overflow, the global variable errno will be set - to PGTYPES_NUM_OVERFLOW additionally. + overflow. On overflow, the global variable errno will be set + to PGTYPES_NUM_OVERFLOW additionally. @@ -2196,10 +2228,10 @@ int PGTYPESnumeric_to_double(numeric *nv, double *dp) int PGTYPESnumeric_to_int(numeric *nv, int *ip); The function converts the numeric value from the variable that - nv points to into the integer variable that ip + nv points to into the integer variable that ip points to. It returns 0 on success and -1 if an error occurs, including - overflow. On overflow, the global variable errno will be set - to PGTYPES_NUM_OVERFLOW additionally. + overflow. On overflow, the global variable errno will be set + to PGTYPES_NUM_OVERFLOW additionally. @@ -2213,10 +2245,10 @@ int PGTYPESnumeric_to_int(numeric *nv, int *ip); int PGTYPESnumeric_to_long(numeric *nv, long *lp); The function converts the numeric value from the variable that - nv points to into the long integer variable that - lp points to. It returns 0 on success and -1 if an error + nv points to into the long integer variable that + lp points to. It returns 0 on success and -1 if an error occurs, including overflow. On overflow, the global variable - errno will be set to PGTYPES_NUM_OVERFLOW + errno will be set to PGTYPES_NUM_OVERFLOW additionally. @@ -2231,10 +2263,10 @@ int PGTYPESnumeric_to_long(numeric *nv, long *lp); int PGTYPESnumeric_to_decimal(numeric *src, decimal *dst); The function converts the numeric value from the variable that - src points to into the decimal variable that - dst points to. It returns 0 on success and -1 if an error + src points to into the decimal variable that + dst points to. It returns 0 on success and -1 if an error occurs, including overflow. On overflow, the global variable - errno will be set to PGTYPES_NUM_OVERFLOW + errno will be set to PGTYPES_NUM_OVERFLOW additionally. @@ -2249,8 +2281,8 @@ int PGTYPESnumeric_to_decimal(numeric *src, decimal *dst); int PGTYPESnumeric_from_decimal(decimal *src, numeric *dst); The function converts the decimal value from the variable that - src points to into the numeric variable that - dst points to. It returns 0 on success and -1 if an error + src points to into the numeric variable that + dst points to. It returns 0 on success and -1 if an error occurs. Since the decimal type is implemented as a limited version of the numeric type, overflow cannot occur with this conversion. @@ -2264,13 +2296,13 @@ int PGTYPESnumeric_from_decimal(decimal *src, numeric *dst); The date Type The date type in C enables your programs to deal with data of the SQL type - date. See for the equivalent type in the - PostgreSQL server. + date. See for the equivalent type in the + PostgreSQL server. The following functions can be used to work with the date type: - + PGTYPESdate_from_timestamp @@ -2284,7 +2316,7 @@ date PGTYPESdate_from_timestamp(timestamp dt); - + PGTYPESdate_from_asc @@ -2292,8 +2324,8 @@ date PGTYPESdate_from_timestamp(timestamp dt); date PGTYPESdate_from_asc(char *str, char **endptr); - The function receives a C char* string str and a pointer to - a C char* string endptr. At the moment ECPG always parses + The function receives a C char* string str and a pointer to + a C char* string endptr. At the moment ECPG always parses the complete string and so it currently does not support to store the address of the first invalid character in *endptr. You can safely set endptr to NULL. @@ -2303,7 +2335,7 @@ date PGTYPESdate_from_asc(char *str, char **endptr); currently no variable to change that within ECPG. - shows the allowed input formats. + shows the allowed input formats. Valid Input Formats for <function>PGTYPESdate_from_asc</function> @@ -2389,7 +2421,7 @@ date PGTYPESdate_from_asc(char *str, char **endptr); - + PGTYPESdate_to_asc @@ -2397,14 +2429,15 @@ date PGTYPESdate_from_asc(char *str, char **endptr); char *PGTYPESdate_to_asc(date dDate); - The function receives the date dDate as its only parameter. - It will output the date in the form 1999-01-18, i.e., in the - YYYY-MM-DD format. + The function receives the date dDate as its only parameter. + It will output the date in the form 1999-01-18, i.e., in the + YYYY-MM-DD format. + The result must be freed with PGTYPESchar_free(). - + PGTYPESdate_julmdy @@ -2414,16 +2447,16 @@ char *PGTYPESdate_to_asc(date dDate); void PGTYPESdate_julmdy(date d, int *mdy); - The function receives the date d and a pointer to an array - of 3 integer values mdy. The variable name indicates - the sequential order: mdy[0] will be set to contain the - number of the month, mdy[1] will be set to the value of the - day and mdy[2] will contain the year. + The function receives the date d and a pointer to an array + of 3 integer values mdy. The variable name indicates + the sequential order: mdy[0] will be set to contain the + number of the month, mdy[1] will be set to the value of the + day and mdy[2] will contain the year. - + PGTYPESdate_mdyjul @@ -2432,14 +2465,14 @@ void PGTYPESdate_julmdy(date d, int *mdy); void PGTYPESdate_mdyjul(int *mdy, date *jdate); - The function receives the array of the 3 integers (mdy) as + The function receives the array of the 3 integers (mdy) as its first argument and as its second argument a pointer to a variable of type date that should hold the result of the operation. - + PGTYPESdate_dayofweek @@ -2447,7 +2480,7 @@ void PGTYPESdate_mdyjul(int *mdy, date *jdate); int PGTYPESdate_dayofweek(date d); - The function receives the date variable d as its only + The function receives the date variable d as its only argument and returns an integer that indicates the day of the week for this date. @@ -2491,7 +2524,7 @@ int PGTYPESdate_dayofweek(date d); - + PGTYPESdate_today @@ -2499,13 +2532,13 @@ int PGTYPESdate_dayofweek(date d); void PGTYPESdate_today(date *d); - The function receives a pointer to a date variable (d) + The function receives a pointer to a date variable (d) that it sets to the current date. - + PGTYPESdate_fmt_asc @@ -2514,9 +2547,9 @@ void PGTYPESdate_today(date *d); int PGTYPESdate_fmt_asc(date dDate, char *fmtstring, char *outbuf); - The function receives the date to convert (dDate), the - format mask (fmtstring) and the string that will hold the - textual representation of the date (outbuf). + The function receives the date to convert (dDate), the + format mask (fmtstring) and the string that will hold the + textual representation of the date (outbuf). On success, 0 is returned and a negative value if an error occurred. @@ -2558,7 +2591,7 @@ int PGTYPESdate_fmt_asc(date dDate, char *fmtstring, char *outbuf); All other characters are copied 1:1 to the output string. - indicates a few possible formats. This will give + indicates a few possible formats. This will give you an idea of how to use this function. All output lines are based on the same date: November 23, 1959. @@ -2626,7 +2659,7 @@ int PGTYPESdate_fmt_asc(date dDate, char *fmtstring, char *outbuf); - + PGTYPESdate_defmt_asc @@ -2637,9 +2670,9 @@ int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str); The function receives a pointer to the date value that should hold the - result of the operation (d), the format mask to use for - parsing the date (fmt) and the C char* string containing - the textual representation of the date (str). The textual + result of the operation (d), the format mask to use for + parsing the date (fmt) and the C char* string containing + the textual representation of the date (str). The textual representation is expected to match the format mask. However you do not need to have a 1:1 mapping of the string to the format mask. The function only analyzes the sequential order and looks for the literals @@ -2649,7 +2682,7 @@ int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str); day. - indicates a few possible formats. This will give + indicates a few possible formats. This will give you an idea of how to use this function.
@@ -2741,13 +2774,13 @@ int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str); The timestamp Type The timestamp type in C enables your programs to deal with data of the SQL - type timestamp. See for the equivalent - type in the PostgreSQL server. + type timestamp. See for the equivalent + type in the PostgreSQL server. The following functions can be used to work with the timestamp type: - + PGTYPEStimestamp_from_asc @@ -2756,8 +2789,8 @@ int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str); timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr); - The function receives the string to parse (str) and a - pointer to a C char* (endptr). + The function receives the string to parse (str) and a + pointer to a C char* (endptr). At the moment ECPG always parses the complete string and so it currently does not support to store the address of the first invalid character in *endptr. @@ -2765,19 +2798,19 @@ timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr); The function returns the parsed timestamp on success. On error, - PGTYPESInvalidTimestamp is returned and errno is - set to PGTYPES_TS_BAD_TIMESTAMP. See for important notes on this value. + PGTYPESInvalidTimestamp is returned and errno is + set to PGTYPES_TS_BAD_TIMESTAMP. See for important notes on this value. In general, the input string can contain any combination of an allowed date specification, a whitespace character and an allowed time specification. Note that time zones are not supported by ECPG. It can parse them but does not apply any calculation as the - PostgreSQL server does for example. Timezone + PostgreSQL server does for example. Timezone specifiers are silently discarded. - contains a few examples for input strings. + contains a few examples for input strings.
Valid Input Formats for <function>PGTYPEStimestamp_from_asc</function> @@ -2811,7 +2844,7 @@ timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr); - + PGTYPEStimestamp_to_asc @@ -2819,14 +2852,15 @@ timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr); char *PGTYPEStimestamp_to_asc(timestamp tstamp); - The function receives the timestamp tstamp as + The function receives the timestamp tstamp as its only argument and returns an allocated string that contains the textual representation of the timestamp. + The result must be freed with PGTYPESchar_free(). - + PGTYPEStimestamp_current @@ -2835,12 +2869,12 @@ char *PGTYPEStimestamp_to_asc(timestamp tstamp); void PGTYPEStimestamp_current(timestamp *ts); The function retrieves the current timestamp and saves it into the - timestamp variable that ts points to. + timestamp variable that ts points to. - + PGTYPEStimestamp_fmt_asc @@ -2849,8 +2883,8 @@ void PGTYPEStimestamp_current(timestamp *ts); int PGTYPEStimestamp_fmt_asc(timestamp *ts, char *output, int str_len, char *fmtstr); The function receives a pointer to the timestamp to convert as its - first argument (ts), a pointer to the output buffer - (output), the maximal length that has been allocated for + first argument (ts), a pointer to the output buffer + (output), the maximal length that has been allocated for the output buffer (str_len) and the format mask to use for the conversion (fmtstr). @@ -2861,7 +2895,7 @@ int PGTYPEStimestamp_fmt_asc(timestamp *ts, char *output, int str_len, char *fmt You can use the following format specifiers for the format mask. The format specifiers are the same ones that are used in the - strftime function in libc. Any + strftime function in libc. Any non-format specifier will be copied into the output buffer. -216 (ECPG_ARRAY_INSERT) @@ -5275,7 +5323,6 @@ while (1) -]]> -220 (ECPG_NO_CONN) @@ -5430,8 +5477,8 @@ while (1) - + -602 (ECPG_WARNING_UNKNOWN_PORTAL) @@ -5567,8 +5614,8 @@ EXEC SQL INCLUDE "filename"; Similar to the directive #define that is known from C, embedded SQL has a similar concept: -EXEC SQL DEFINE name; -EXEC SQL DEFINE name value; +EXEC SQL DEFINE name; +EXEC SQL DEFINE name value; So you can define a name: @@ -5579,7 +5626,7 @@ EXEC SQL DEFINE HAVE_FEATURE; EXEC SQL DEFINE MYNUMBER 12; EXEC SQL DEFINE MYSTRING 'abc'; - Use undef to remove a previous definition: + Use undef to remove a previous definition: EXEC SQL UNDEF MYNUMBER; @@ -5589,15 +5636,15 @@ EXEC SQL UNDEF MYNUMBER; Of course you can continue to use the C versions #define and #undef in your embedded SQL program. The difference is where your defined values get evaluated. If you use EXEC SQL - DEFINE then the ecpg preprocessor evaluates the defines and substitutes + DEFINE then the ecpg preprocessor evaluates the defines and substitutes the values. For example if you write: EXEC SQL DEFINE MYNUMBER 12; ... EXEC SQL UPDATE Tbl SET col = MYNUMBER; - then ecpg will already do the substitution and your C compiler will never - see any name or identifier MYNUMBER. Note that you cannot use + then ecpg will already do the substitution and your C compiler will never + see any name or identifier MYNUMBER. Note that you cannot use #define for a constant that you are going to use in an embedded SQL query because in this case the embedded SQL precompiler is not able to see this declaration. @@ -5611,23 +5658,23 @@ EXEC SQL UPDATE Tbl SET col = MYNUMBER; - EXEC SQL ifdef name; + EXEC SQL ifdef name; - Checks a name and processes subsequent lines if - name has been created with EXEC SQL define - name. + Checks a name and processes subsequent lines if + name has been created with EXEC SQL define + name. - EXEC SQL ifndef name; + EXEC SQL ifndef name; - Checks a name and processes subsequent lines if - name has not been created with - EXEC SQL define name. + Checks a name and processes subsequent lines if + name has not been created with + EXEC SQL define name. @@ -5637,19 +5684,19 @@ EXEC SQL UPDATE Tbl SET col = MYNUMBER; Starts processing an alternative section to a section introduced by - either EXEC SQL ifdef name or - EXEC SQL ifndef name. + either EXEC SQL ifdef name or + EXEC SQL ifndef name. - EXEC SQL elif name; + EXEC SQL elif name; - Checks name and starts an alternative section if - name has been created with EXEC SQL define - name. + Checks name and starts an alternative section if + name has been created with EXEC SQL define + name. @@ -5699,7 +5746,7 @@ EXEC SQL endif; The preprocessor program is called ecpg and is - included in a normal PostgreSQL installation. + included in a normal PostgreSQL installation. Embedded SQL programs are typically named with an extension .pgc. If you have a program file called prog1.pgc, you can preprocess it by simply @@ -5719,8 +5766,8 @@ ecpg prog1.pgc cc -c prog1.c The generated C source files include header files from the - PostgreSQL installation, so if you installed - PostgreSQL in a location that is not searched by + PostgreSQL installation, so if you installed + PostgreSQL in a location that is not searched by default, you have to add an option such as -I/usr/local/pgsql/include to the compilation command line. @@ -5759,7 +5806,7 @@ ECPG = ecpg The complete syntax of the ecpg command is - detailed in . + detailed in . @@ -5795,10 +5842,10 @@ ECPG = ecpg - On Windows, if the ecpg libraries and an application are + On Windows, if the ecpg libraries and an application are compiled with different flags, this function call will crash the application because the internal representation of the - FILE pointers differ. Specifically, + FILE pointers differ. Specifically, multithreaded/single-threaded, release/debug, and static/dynamic flags should be the same for the library and all applications using that library. @@ -5827,7 +5874,7 @@ ECPG = ecpg ECPGtransactionStatus(const char *connection_name) returns the current transaction status of the given connection identified by connection_name. - See and libpq's PQtransactionStatus() for details about the returned status codes. + See and libpq's PQtransactionStatus() for details about the returned status codes. @@ -5836,7 +5883,7 @@ ECPG = ecpg ECPGstatus(int lineno, const char* connection_name) returns true if you are connected to a database and false if not. - connection_name can be NULL + connection_name can be NULL if a single connection is being used. @@ -5859,8 +5906,8 @@ ECPG = ecpg For more details about the ECPGget_PGconn(), see - . For information about the large - object function interface, see . + . For information about the large + object function interface, see . @@ -5870,7 +5917,7 @@ ECPG = ecpg - shows an example program that + shows an example program that illustrates how to create, write, and read a large object in an ECPG application. @@ -5899,6 +5946,7 @@ main(void) memset(buf, 1, buflen); EXEC SQL CONNECT TO testdb AS con1; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; conn = ECPGget_PGconn("con1"); printf("conn = %p\n", conn); @@ -5989,7 +6037,7 @@ main(void) A safe way to use the embedded SQL code in a C++ application is hiding the ECPG calls in a C module, which the C++ application code calls into to access the database, and linking that together with - the rest of the C++ code. See + the rest of the C++ code. See about that. @@ -6028,6 +6076,7 @@ class TestCpp TestCpp::TestCpp() { EXEC SQL CONNECT TO testdb1; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; } void Test::test() @@ -6107,6 +6156,7 @@ void db_connect() { EXEC SQL CONNECT TO testdb1; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; } void @@ -6209,10 +6259,10 @@ main(void) To build the application, proceed as follows. Convert - test_mod.pgc into test_mod.c by + test_mod.pgc into test_mod.c by running ecpg, and generate - test_mod.o by compiling - test_mod.c with the C compiler: + test_mod.o by compiling + test_mod.c with the C compiler: ecpg -o test_mod.c test_mod.pgc cc -c test_mod.c -o test_mod.o @@ -6220,16 +6270,16 @@ cc -c test_mod.c -o test_mod.o - Next, generate test_cpp.o by compiling - test_cpp.cpp with the C++ compiler: + Next, generate test_cpp.o by compiling + test_cpp.cpp with the C++ compiler: c++ -c test_cpp.cpp -o test_cpp.o - Finally, link these object files, test_cpp.o - and test_mod.o, into one executable, using the C++ + Finally, link these object files, test_cpp.o + and test_mod.o, into one executable, using the C++ compiler driver: c++ test_cpp.o test_mod.o -lecpg -o test_cpp @@ -6244,7 +6294,7 @@ c++ test_cpp.o test_mod.o -lecpg -o test_cpp This section describes all SQL commands that are specific to embedded SQL. Also refer to the SQL commands listed - in , which can also be used in + in , which can also be used in embedded SQL, unless stated otherwise. @@ -6256,7 +6306,7 @@ c++ test_cpp.o test_mod.o -lecpg -o test_cpp -ALLOCATE DESCRIPTOR name +ALLOCATE DESCRIPTOR name @@ -6280,7 +6330,7 @@ ALLOCATE DESCRIPTOR name - name + name A name of SQL descriptor, case sensitive. This can be an SQL @@ -6312,9 +6362,9 @@ EXEC SQL ALLOCATE DESCRIPTOR mydesc; See Also - - - + + + @@ -6348,10 +6398,10 @@ DATABASE connection_target - connection_target + connection_target - connection_target + connection_target specifies the target server of the connection on one of several forms. @@ -6408,7 +6458,7 @@ DATABASE connection_target - connection_object + connection_object An optional identifier for the connection, so that it can be @@ -6419,7 +6469,7 @@ DATABASE connection_target - connection_user + connection_user The user name for the database connection. @@ -6500,12 +6550,14 @@ EXEC SQL END DECLARE SECTION; ECPGdebug(1, stderr); EXEC SQL CONNECT TO :dbname USER :user; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; EXEC SQL SELECT version() INTO :ver; EXEC SQL DISCONNECT; printf("version: %s\n", ver); EXEC SQL CONNECT TO :connection USER :user; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; EXEC SQL SELECT version() INTO :ver; EXEC SQL DISCONNECT; @@ -6531,8 +6583,8 @@ EXEC SQL END DECLARE SECTION; See Also - - + + @@ -6545,7 +6597,7 @@ EXEC SQL END DECLARE SECTION; -DEALLOCATE DESCRIPTOR name +DEALLOCATE DESCRIPTOR name @@ -6563,7 +6615,7 @@ DEALLOCATE DESCRIPTOR name - name + name The name of the descriptor which is going to be deallocated. @@ -6596,9 +6648,9 @@ EXEC SQL DEALLOCATE DESCRIPTOR mydesc; See Also - - - + + + @@ -6611,8 +6663,8 @@ EXEC SQL DEALLOCATE DESCRIPTOR mydesc; -DECLARE cursor_name [ BINARY ] [ INSENSITIVE ] [ [ NO ] SCROLL ] CURSOR [ { WITH | WITHOUT } HOLD ] FOR prepared_name -DECLARE cursor_name [ BINARY ] [ INSENSITIVE ] [ [ NO ] SCROLL ] CURSOR [ { WITH | WITHOUT } HOLD ] FOR query +DECLARE cursor_name [ BINARY ] [ INSENSITIVE ] [ [ NO ] SCROLL ] CURSOR [ { WITH | WITHOUT } HOLD ] FOR prepared_name +DECLARE cursor_name [ BINARY ] [ INSENSITIVE ] [ [ NO ] SCROLL ] CURSOR [ { WITH | WITHOUT } HOLD ] FOR query @@ -6637,7 +6689,7 @@ DECLARE cursor_name [ BINARY ] [ IN - cursor_name + cursor_name A cursor name, case sensitive. This can be an SQL identifier @@ -6647,7 +6699,7 @@ DECLARE cursor_name [ BINARY ] [ IN - prepared_name + prepared_name The name of a prepared query, either as an SQL identifier or a @@ -6660,8 +6712,8 @@ DECLARE cursor_name [ BINARY ] [ IN query - A or - command which will provide the + A or + command which will provide the rows to be returned by the cursor. @@ -6670,7 +6722,7 @@ DECLARE cursor_name [ BINARY ] [ IN For the meaning of the cursor options, - see . + see . @@ -6707,9 +6759,9 @@ EXEC SQL DECLARE cur1 CURSOR FOR stmt1; See Also - - - + + + @@ -6722,9 +6774,9 @@ EXEC SQL DECLARE cur1 CURSOR FOR stmt1; -DESCRIBE [ OUTPUT ] prepared_name USING [ SQL ] DESCRIPTOR descriptor_name -DESCRIBE [ OUTPUT ] prepared_name INTO [ SQL ] DESCRIPTOR descriptor_name -DESCRIBE [ OUTPUT ] prepared_name INTO sqlda_name +DESCRIBE [ OUTPUT ] prepared_name USING [ SQL ] DESCRIPTOR descriptor_name +DESCRIBE [ OUTPUT ] prepared_name INTO [ SQL ] DESCRIPTOR descriptor_name +DESCRIBE [ OUTPUT ] prepared_name INTO sqlda_name @@ -6743,7 +6795,7 @@ DESCRIBE [ OUTPUT ] prepared_name I - prepared_name + prepared_name The name of a prepared statement. This can be an SQL @@ -6753,7 +6805,7 @@ DESCRIBE [ OUTPUT ] prepared_name I - descriptor_name + descriptor_name A descriptor name. It is case sensitive. It can be an SQL @@ -6763,7 +6815,7 @@ DESCRIBE [ OUTPUT ] prepared_name I - sqlda_name + sqlda_name The name of an SQLDA variable. @@ -6797,8 +6849,8 @@ EXEC SQL DEALLOCATE DESCRIPTOR mydesc; See Also - - + + @@ -6811,7 +6863,7 @@ EXEC SQL DEALLOCATE DESCRIPTOR mydesc; -DISCONNECT connection_name +DISCONNECT connection_name DISCONNECT [ CURRENT ] DISCONNECT DEFAULT DISCONNECT ALL @@ -6832,7 +6884,7 @@ DISCONNECT ALL - connection_name + connection_name A database connection name established by @@ -6907,8 +6959,8 @@ main(void) See Also - - + + @@ -6921,7 +6973,7 @@ main(void) -EXECUTE IMMEDIATE string +EXECUTE IMMEDIATE string @@ -6940,7 +6992,7 @@ EXECUTE IMMEDIATE string - string + string A literal C string or a host variable containing the SQL @@ -6982,8 +7034,8 @@ EXEC SQL EXECUTE IMMEDIATE :command; -GET DESCRIPTOR descriptor_name :cvariable = descriptor_header_item [, ... ] -GET DESCRIPTOR descriptor_name VALUE column_number :cvariable = descriptor_item [, ... ] +GET DESCRIPTOR descriptor_name :cvariable = descriptor_header_item [, ... ] +GET DESCRIPTOR descriptor_name VALUE column_number :cvariable = descriptor_item [, ... ] @@ -7014,7 +7066,7 @@ GET DESCRIPTOR descriptor_name VALU - descriptor_name + descriptor_name A descriptor name. @@ -7023,7 +7075,7 @@ GET DESCRIPTOR descriptor_name VALU - descriptor_header_item + descriptor_header_item A token identifying which header information item to retrieve. @@ -7034,7 +7086,7 @@ GET DESCRIPTOR descriptor_name VALU - column_number + column_number The number of the column about which information is to be @@ -7044,18 +7096,18 @@ GET DESCRIPTOR descriptor_name VALU - descriptor_item + descriptor_item A token identifying which item of information about a column - to retrieve. See for + to retrieve. See for a list of supported items. - cvariable + cvariable A host variable that will receive the data retrieved from the @@ -7093,7 +7145,7 @@ EXEC SQL GET DESCRIPTOR d VALUE 2 :d_data = DATA; Here is an example for a whole procedure of - executing SELECT current_database(); and showing the number of + executing SELECT current_database(); and showing the number of columns, the column data length, and the column data: int @@ -7106,6 +7158,7 @@ EXEC SQL BEGIN DECLARE SECTION; EXEC SQL END DECLARE SECTION; EXEC SQL CONNECT TO testdb AS con1 USER testuser; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; EXEC SQL ALLOCATE DESCRIPTOR d; /* Declare, open a cursor, and assign a descriptor to the cursor */ @@ -7156,8 +7209,8 @@ d_data = testdb See Also - - + + @@ -7170,9 +7223,9 @@ d_data = testdb -OPEN cursor_name -OPEN cursor_name USING value [, ... ] -OPEN cursor_name USING SQL DESCRIPTOR descriptor_name +OPEN cursor_name +OPEN cursor_name USING value [, ... ] +OPEN cursor_name USING SQL DESCRIPTOR descriptor_name @@ -7194,7 +7247,7 @@ OPEN cursor_name USING SQL DESCRIPT - cursor_name + cursor_name The name of the cursor to be opened. This can be an SQL @@ -7204,7 +7257,7 @@ OPEN cursor_name USING SQL DESCRIPT - value + value A value to be bound to a placeholder in the cursor. This can @@ -7215,7 +7268,7 @@ OPEN cursor_name USING SQL DESCRIPT - descriptor_name + descriptor_name The name of a descriptor containing values to be bound to the @@ -7250,8 +7303,8 @@ EXEC SQL OPEN :curname1; See Also - - + + @@ -7264,7 +7317,7 @@ EXEC SQL OPEN :curname1; -PREPARE name FROM string +PREPARE name FROM string @@ -7274,8 +7327,8 @@ PREPARE name FROM PREPARE prepares a statement dynamically specified as a string for execution. This is different from the - direct SQL statement , which can also - be used in embedded programs. The + direct SQL statement , which can also + be used in embedded programs. The command is used to execute either kind of prepared statement. @@ -7285,7 +7338,7 @@ PREPARE name FROM - prepared_name + prepared_name An identifier for the prepared query. @@ -7294,7 +7347,7 @@ PREPARE name FROM - string + string A literal C string or a host variable containing a preparable @@ -7330,7 +7383,7 @@ EXEC SQL EXECUTE foo USING SQL DESCRIPTOR indesc INTO SQL DESCRIPTOR outdesc; See Also - + @@ -7377,7 +7430,7 @@ SET AUTOCOMMIT { = | TO } { ON | OFF } -SET CONNECTION [ TO | = ] connection_name +SET CONNECTION [ TO | = ] connection_name @@ -7396,7 +7449,7 @@ SET CONNECTION [ TO | = ] connection_name - connection_name + connection_name A database connection name established by @@ -7437,8 +7490,8 @@ EXEC SQL SET CONNECTION = con1; See Also - - + + @@ -7451,8 +7504,8 @@ EXEC SQL SET CONNECTION = con1; -SET DESCRIPTOR descriptor_name descriptor_header_item = value [, ... ] -SET DESCRIPTOR descriptor_name VALUE number descriptor_item = value [, ...] +SET DESCRIPTOR descriptor_name descriptor_header_item = value [, ... ] +SET DESCRIPTOR descriptor_name VALUE number descriptor_item = value [, ...] @@ -7478,7 +7531,7 @@ SET DESCRIPTOR descriptor_name VALU - descriptor_name + descriptor_name A descriptor name. @@ -7487,7 +7540,7 @@ SET DESCRIPTOR descriptor_name VALU - descriptor_header_item + descriptor_header_item A token identifying which header information item to set. @@ -7498,7 +7551,7 @@ SET DESCRIPTOR descriptor_name VALU - number + number The number of the descriptor item to set. The count starts at @@ -7508,18 +7561,18 @@ SET DESCRIPTOR descriptor_name VALU - descriptor_item + descriptor_item A token identifying which item of information to set in the - descriptor. See for a + descriptor. See for a list of supported items. - value + value A value to store into the descriptor item. This can be an SQL @@ -7553,8 +7606,8 @@ EXEC SQL SET DESCRIPTOR indesc VALUE 2 INDICATOR = :val2null, DATA = :val2; See Also - - + + @@ -7567,7 +7620,7 @@ EXEC SQL SET DESCRIPTOR indesc VALUE 2 INDICATOR = :val2null, DATA = :val2; -TYPE type_name IS ctype +TYPE type_name IS ctype @@ -7591,7 +7644,7 @@ TYPE type_name IS - type_name + type_name The name for the new type. It must be a valid C type name. @@ -7600,7 +7653,7 @@ TYPE type_name IS - ctype + ctype A C type specification. @@ -7663,6 +7716,7 @@ EXEC SQL BEGIN DECLARE SECTION; EXEC SQL END DECLARE SECTION; EXEC SQL CONNECT TO testdb AS con1; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; EXEC SQL SELECT current_database(), 256 INTO :t:t_ind LIMIT 1; @@ -7724,7 +7778,7 @@ VAR varname IS ctype - varname + varname A C variable name. @@ -7733,7 +7787,7 @@ VAR varname IS ctype - ctype + ctype A C type specification. @@ -7771,7 +7825,7 @@ EXEC SQL VAR a IS int; -WHENEVER { NOT FOUND | SQLERROR | SQLWARNING } action +WHENEVER { NOT FOUND | SQLERROR | SQLWARNING } action @@ -7788,7 +7842,7 @@ WHENEVER { NOT FOUND | SQLERROR | SQLWARNING } ac Parameters - See for a description of the + See for a description of the parameters. @@ -7799,6 +7853,7 @@ WHENEVER { NOT FOUND | SQLERROR | SQLWARNING } ac EXEC SQL WHENEVER NOT FOUND CONTINUE; EXEC SQL WHENEVER NOT FOUND DO BREAK; +EXEC SQL WHENEVER NOT FOUND DO CONTINUE; EXEC SQL WHENEVER SQLWARNING SQLPRINT; EXEC SQL WHENEVER SQLWARNING DO warn(); EXEC SQL WHENEVER SQLERROR sqlprint; @@ -7818,6 +7873,7 @@ int main(void) { EXEC SQL CONNECT TO testdb AS con1; + EXEC SQL SELECT pg_catalog.set_config('search_path', '', false); EXEC SQL COMMIT; EXEC SQL ALLOCATE DESCRIPTOR d; EXEC SQL DECLARE cur CURSOR FOR SELECT current_database(), 'hoge', 256; EXEC SQL OPEN cur; @@ -7857,10 +7913,10 @@ main(void) <productname>Informix</productname> Compatibility Mode - ecpg can be run in a so-called Informix compatibility mode. If + ecpg can be run in a so-called Informix compatibility mode. If this mode is active, it tries to behave as if it were the Informix precompiler for Informix E/SQL. Generally spoken this will allow you to use - the dollar sign instead of the EXEC SQL primitive to introduce + the dollar sign instead of the EXEC SQL primitive to introduce embedded SQL commands: $int j = 3; @@ -7882,11 +7938,11 @@ $COMMIT; - There are two compatibility modes: INFORMIX, INFORMIX_SE + There are two compatibility modes: INFORMIX, INFORMIX_SE When linking programs that use this compatibility mode, remember to link - against libcompat that is shipped with ECPG. + against libcompat that is shipped with ECPG. Besides the previously explained syntactic sugar, the Informix compatibility @@ -7904,7 +7960,7 @@ $COMMIT; no drop-in replacement if you are using Informix at the moment. Moreover, some of the data types are different. For example, PostgreSQL's datetime and interval types do not - know about ranges like for example YEAR TO MINUTE so you won't + know about ranges like for example YEAR TO MINUTE so you won't find support in ECPG for that either. @@ -7929,11 +7985,11 @@ EXEC SQL FETCH MYCUR INTO :userid; - CLOSE DATABASE + CLOSE DATABASE This statement closes the current connection. In fact, this is a - synonym for ECPG's DISCONNECT CURRENT: + synonym for ECPG's DISCONNECT CURRENT: $CLOSE DATABASE; /* close the current connection */ EXEC SQL CLOSE DATABASE; @@ -7942,12 +7998,12 @@ EXEC SQL CLOSE DATABASE; - FREE cursor_name + FREE cursor_name Due to the differences how ECPG works compared to Informix's ESQL/C (i.e. which steps are purely grammar transformations and which steps rely on the underlying run-time library) - there is no FREE cursor_name statement in ECPG. This is because in ECPG, + there is no FREE cursor_name statement in ECPG. This is because in ECPG, DECLARE CURSOR doesn't translate to a function call into the run-time library that uses to the cursor name. This means that there's no run-time bookkeeping of SQL cursors in the ECPG run-time library, only in the PostgreSQL server. @@ -7955,10 +8011,10 @@ EXEC SQL CLOSE DATABASE; - FREE statement_name + FREE statement_name - FREE statement_name is a synonym for DEALLOCATE PREPARE statement_name. + FREE statement_name is a synonym for DEALLOCATE PREPARE statement_name. @@ -7970,7 +8026,7 @@ EXEC SQL CLOSE DATABASE; Informix-compatible SQLDA Descriptor Areas Informix-compatible mode supports a different structure than the one described in - . See below: + . See below: struct sqlvar_compat { @@ -8015,16 +8071,16 @@ typedef struct sqlda_compat sqlda_t; - sqld + sqld - The number of fields in the SQLDA descriptor. + The number of fields in the SQLDA descriptor. - sqlvar + sqlvar Pointer to the per-field properties. @@ -8033,7 +8089,7 @@ typedef struct sqlda_compat sqlda_t; - desc_name + desc_name Unused, filled with zero-bytes. @@ -8042,7 +8098,7 @@ typedef struct sqlda_compat sqlda_t; - desc_occ + desc_occ Size of the allocated structure. @@ -8051,7 +8107,7 @@ typedef struct sqlda_compat sqlda_t; - desc_next + desc_next Pointer to the next SQLDA structure if the result set contains more than one record. @@ -8060,7 +8116,7 @@ typedef struct sqlda_compat sqlda_t; - reserved + reserved Unused pointer, contains NULL. Kept for Informix-compatibility. @@ -8075,7 +8131,7 @@ typedef struct sqlda_compat sqlda_t; - sqltype + sqltype Type of the field. Constants are in sqltypes.h @@ -8084,7 +8140,7 @@ typedef struct sqlda_compat sqlda_t; - sqllen + sqllen Length of the field data. @@ -8093,7 +8149,7 @@ typedef struct sqlda_compat sqlda_t; - sqldata + sqldata Pointer to the field data. The pointer is of char * type, @@ -8114,7 +8170,7 @@ switch (sqldata->sqlvar[i].sqltype) - sqlind + sqlind Pointer to the NULL indicator. If returned by DESCRIBE or FETCH then it's always a valid pointer. @@ -8130,7 +8186,7 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0) - sqlname + sqlname Name of the field. 0-terminated string. @@ -8139,16 +8195,16 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0) - sqlformat + sqlformat - Reserved in Informix, value of PQfformat() for the field. + Reserved in Informix, value of PQfformat() for the field. - sqlitype + sqlitype Type of the NULL indicator data. It's always SQLSMINT when returning data from the server. @@ -8159,7 +8215,7 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0) - sqlilen + sqlilen Length of the NULL indicator data. @@ -8168,23 +8224,23 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0) - sqlxid + sqlxid - Extended type of the field, result of PQftype(). + Extended type of the field, result of PQftype(). - sqltypename - sqltypelen - sqlownerlen - sqlsourcetype - sqlownername - sqlsourceid - sqlflags - sqlreserved + sqltypename + sqltypelen + sqlownerlen + sqlsourcetype + sqlownername + sqlsourceid + sqlflags + sqlreserved Unused. @@ -8193,7 +8249,7 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0) - sqlilongdata + sqlilongdata It equals to sqldata if sqllen is larger than 32kB. @@ -8238,7 +8294,7 @@ EXEC SQL INCLUDE sqlda.h; free(sqlda); /* The main structure is all to be free(), * sqlda and sqlda->sqlvar is in one allocated area */ - For more information, see the sqlda.h header and the + For more information, see the sqlda.h header and the src/interfaces/ecpg/test/compat_informix/sqlda.pgc regression test. @@ -8248,7 +8304,7 @@ EXEC SQL INCLUDE sqlda.h; - decadd + decadd Add two decimal type values. @@ -8256,19 +8312,19 @@ EXEC SQL INCLUDE sqlda.h; int decadd(decimal *arg1, decimal *arg2, decimal *sum); The function receives a pointer to the first operand of type decimal - (arg1), a pointer to the second operand of type decimal - (arg2) and a pointer to a value of type decimal that will - contain the sum (sum). On success, the function returns 0. - ECPG_INFORMIX_NUM_OVERFLOW is returned in case of overflow and - ECPG_INFORMIX_NUM_UNDERFLOW in case of underflow. -1 is returned for - other failures and errno is set to the respective errno number of the + (arg1), a pointer to the second operand of type decimal + (arg2) and a pointer to a value of type decimal that will + contain the sum (sum). On success, the function returns 0. + ECPG_INFORMIX_NUM_OVERFLOW is returned in case of overflow and + ECPG_INFORMIX_NUM_UNDERFLOW in case of underflow. -1 is returned for + other failures and errno is set to the respective errno number of the pgtypeslib. - deccmp + deccmp Compare two variables of type decimal. @@ -8276,25 +8332,25 @@ int decadd(decimal *arg1, decimal *arg2, decimal *sum); int deccmp(decimal *arg1, decimal *arg2); The function receives a pointer to the first decimal value - (arg1), a pointer to the second decimal value - (arg2) and returns an integer value that indicates which is + (arg1), a pointer to the second decimal value + (arg2) and returns an integer value that indicates which is the bigger value. - 1, if the value that arg1 points to is bigger than the - value that var2 points to + 1, if the value that arg1 points to is bigger than the + value that var2 points to - -1, if the value that arg1 points to is smaller than the - value that arg2 points to + -1, if the value that arg1 points to is smaller than the + value that arg2 points to - 0, if the value that arg1 points to and the value that - arg2 points to are equal + 0, if the value that arg1 points to and the value that + arg2 points to are equal @@ -8303,7 +8359,7 @@ int deccmp(decimal *arg1, decimal *arg2); - deccopy + deccopy Copy a decimal value. @@ -8311,15 +8367,15 @@ int deccmp(decimal *arg1, decimal *arg2); void deccopy(decimal *src, decimal *target); The function receives a pointer to the decimal value that should be - copied as the first argument (src) and a pointer to the - target structure of type decimal (target) as the second + copied as the first argument (src) and a pointer to the + target structure of type decimal (target) as the second argument. - deccvasc + deccvasc Convert a value from its ASCII representation into a decimal type. @@ -8327,8 +8383,8 @@ void deccopy(decimal *src, decimal *target); int deccvasc(char *cp, int len, decimal *np); The function receives a pointer to string that contains the string - representation of the number to be converted (cp) as well - as its length len. np is a pointer to the + representation of the number to be converted (cp) as well + as its length len. np is a pointer to the decimal value that saves the result of the operation. @@ -8341,18 +8397,18 @@ int deccvasc(char *cp, int len, decimal *np); The function returns 0 on success. If overflow or underflow occurred, - ECPG_INFORMIX_NUM_OVERFLOW or - ECPG_INFORMIX_NUM_UNDERFLOW is returned. If the ASCII + ECPG_INFORMIX_NUM_OVERFLOW or + ECPG_INFORMIX_NUM_UNDERFLOW is returned. If the ASCII representation could not be parsed, - ECPG_INFORMIX_BAD_NUMERIC is returned or - ECPG_INFORMIX_BAD_EXPONENT if this problem occurred while + ECPG_INFORMIX_BAD_NUMERIC is returned or + ECPG_INFORMIX_BAD_EXPONENT if this problem occurred while parsing the exponent. - deccvdbl + deccvdbl Convert a value of type double to a value of type decimal. @@ -8360,8 +8416,8 @@ int deccvasc(char *cp, int len, decimal *np); int deccvdbl(double dbl, decimal *np); The function receives the variable of type double that should be - converted as its first argument (dbl). As the second - argument (np), the function receives a pointer to the + converted as its first argument (dbl). As the second + argument (np), the function receives a pointer to the decimal variable that should hold the result of the operation. @@ -8372,7 +8428,7 @@ int deccvdbl(double dbl, decimal *np); - deccvint + deccvint Convert a value of type int to a value of type decimal. @@ -8380,8 +8436,8 @@ int deccvdbl(double dbl, decimal *np); int deccvint(int in, decimal *np); The function receives the variable of type int that should be - converted as its first argument (in). As the second - argument (np), the function receives a pointer to the + converted as its first argument (in). As the second + argument (np), the function receives a pointer to the decimal variable that should hold the result of the operation. @@ -8392,7 +8448,7 @@ int deccvint(int in, decimal *np); - deccvlong + deccvlong Convert a value of type long to a value of type decimal. @@ -8400,8 +8456,8 @@ int deccvint(int in, decimal *np); int deccvlong(long lng, decimal *np); The function receives the variable of type long that should be - converted as its first argument (lng). As the second - argument (np), the function receives a pointer to the + converted as its first argument (lng). As the second + argument (np), the function receives a pointer to the decimal variable that should hold the result of the operation. @@ -8412,7 +8468,7 @@ int deccvlong(long lng, decimal *np); - decdiv + decdiv Divide two variables of type decimal. @@ -8420,15 +8476,15 @@ int deccvlong(long lng, decimal *np); int decdiv(decimal *n1, decimal *n2, decimal *result); The function receives pointers to the variables that are the first - (n1) and the second (n2) operands and - calculates n1/n2. result is a + (n1) and the second (n2) operands and + calculates n1/n2. result is a pointer to the variable that should hold the result of the operation. On success, 0 is returned and a negative value if the division fails. If overflow or underflow occurred, the function returns - ECPG_INFORMIX_NUM_OVERFLOW or - ECPG_INFORMIX_NUM_UNDERFLOW respectively. If an attempt to + ECPG_INFORMIX_NUM_OVERFLOW or + ECPG_INFORMIX_NUM_UNDERFLOW respectively. If an attempt to divide by zero is observed, the function returns ECPG_INFORMIX_DIVIDE_ZERO. @@ -8436,7 +8492,7 @@ int decdiv(decimal *n1, decimal *n2, decimal *result); - decmul + decmul Multiply two decimal values. @@ -8444,21 +8500,21 @@ int decdiv(decimal *n1, decimal *n2, decimal *result); int decmul(decimal *n1, decimal *n2, decimal *result); The function receives pointers to the variables that are the first - (n1) and the second (n2) operands and - calculates n1*n2. result is a + (n1) and the second (n2) operands and + calculates n1*n2. result is a pointer to the variable that should hold the result of the operation. On success, 0 is returned and a negative value if the multiplication fails. If overflow or underflow occurred, the function returns - ECPG_INFORMIX_NUM_OVERFLOW or - ECPG_INFORMIX_NUM_UNDERFLOW respectively. + ECPG_INFORMIX_NUM_OVERFLOW or + ECPG_INFORMIX_NUM_UNDERFLOW respectively. - decsub + decsub Subtract one decimal value from another. @@ -8466,21 +8522,21 @@ int decmul(decimal *n1, decimal *n2, decimal *result); int decsub(decimal *n1, decimal *n2, decimal *result); The function receives pointers to the variables that are the first - (n1) and the second (n2) operands and - calculates n1-n2. result is a + (n1) and the second (n2) operands and + calculates n1-n2. result is a pointer to the variable that should hold the result of the operation. On success, 0 is returned and a negative value if the subtraction fails. If overflow or underflow occurred, the function returns - ECPG_INFORMIX_NUM_OVERFLOW or - ECPG_INFORMIX_NUM_UNDERFLOW respectively. + ECPG_INFORMIX_NUM_OVERFLOW or + ECPG_INFORMIX_NUM_UNDERFLOW respectively. - dectoasc + dectoasc Convert a variable of type decimal to its ASCII representation in a C @@ -8489,28 +8545,28 @@ int decsub(decimal *n1, decimal *n2, decimal *result); int dectoasc(decimal *np, char *cp, int len, int right) The function receives a pointer to a variable of type decimal - (np) that it converts to its textual representation. - cp is the buffer that should hold the result of the - operation. The parameter right specifies, how many digits + (np) that it converts to its textual representation. + cp is the buffer that should hold the result of the + operation. The parameter right specifies, how many digits right of the decimal point should be included in the output. The result will be rounded to this number of decimal digits. Setting - right to -1 indicates that all available decimal digits + right to -1 indicates that all available decimal digits should be included in the output. If the length of the output buffer, - which is indicated by len is not sufficient to hold the + which is indicated by len is not sufficient to hold the textual representation including the trailing zero byte, only a - single * character is stored in the result and -1 is + single * character is stored in the result and -1 is returned. - The function returns either -1 if the buffer cp was too - small or ECPG_INFORMIX_OUT_OF_MEMORY if memory was + The function returns either -1 if the buffer cp was too + small or ECPG_INFORMIX_OUT_OF_MEMORY if memory was exhausted. - dectodbl + dectodbl Convert a variable of type decimal to a double. @@ -8518,8 +8574,8 @@ int dectoasc(decimal *np, char *cp, int len, int right) int dectodbl(decimal *np, double *dblp); The function receives a pointer to the decimal value to convert - (np) and a pointer to the double variable that - should hold the result of the operation (dblp). + (np) and a pointer to the double variable that + should hold the result of the operation (dblp). On success, 0 is returned and a negative value if the conversion @@ -8529,7 +8585,7 @@ int dectodbl(decimal *np, double *dblp); - dectoint + dectoint Convert a variable to type decimal to an integer. @@ -8537,25 +8593,25 @@ int dectodbl(decimal *np, double *dblp); int dectoint(decimal *np, int *ip); The function receives a pointer to the decimal value to convert - (np) and a pointer to the integer variable that - should hold the result of the operation (ip). + (np) and a pointer to the integer variable that + should hold the result of the operation (ip). On success, 0 is returned and a negative value if the conversion - failed. If an overflow occurred, ECPG_INFORMIX_NUM_OVERFLOW + failed. If an overflow occurred, ECPG_INFORMIX_NUM_OVERFLOW is returned. Note that the ECPG implementation differs from the Informix implementation. Informix limits an integer to the range from -32767 to 32767, while the limits in the ECPG implementation depend on the - architecture (-INT_MAX .. INT_MAX). + architecture (-INT_MAX .. INT_MAX). - dectolong + dectolong Convert a variable to type decimal to a long integer. @@ -8563,12 +8619,12 @@ int dectoint(decimal *np, int *ip); int dectolong(decimal *np, long *lngp); The function receives a pointer to the decimal value to convert - (np) and a pointer to the long variable that - should hold the result of the operation (lngp). + (np) and a pointer to the long variable that + should hold the result of the operation (lngp). On success, 0 is returned and a negative value if the conversion - failed. If an overflow occurred, ECPG_INFORMIX_NUM_OVERFLOW + failed. If an overflow occurred, ECPG_INFORMIX_NUM_OVERFLOW is returned. @@ -8576,13 +8632,13 @@ int dectolong(decimal *np, long *lngp); implementation. Informix limits a long integer to the range from -2,147,483,647 to 2,147,483,647, while the limits in the ECPG implementation depend on the architecture (-LONG_MAX .. - LONG_MAX). + LONG_MAX). - rdatestr + rdatestr Converts a date to a C char* string. @@ -8590,8 +8646,8 @@ int dectolong(decimal *np, long *lngp); int rdatestr(date d, char *str); The function receives two arguments, the first one is the date to - convert (d) and the second one is a pointer to the target - string. The output format is always yyyy-mm-dd, so you need + convert (d) and the second one is a pointer to the target + string. The output format is always yyyy-mm-dd, so you need to allocate at least 11 bytes (including the zero-byte terminator) for the string. @@ -8609,7 +8665,7 @@ int rdatestr(date d, char *str); - rstrdate + rstrdate Parse the textual representation of a date. @@ -8617,41 +8673,41 @@ int rdatestr(date d, char *str); int rstrdate(char *str, date *d); The function receives the textual representation of the date to convert - (str) and a pointer to a variable of type date - (d). This function does not allow you to specify a format + (str) and a pointer to a variable of type date + (d). This function does not allow you to specify a format mask. It uses the default format mask of Informix which is - mm/dd/yyyy. Internally, this function is implemented by - means of rdefmtdate. Therefore, rstrdate is + mm/dd/yyyy. Internally, this function is implemented by + means of rdefmtdate. Therefore, rstrdate is not faster and if you have the choice you should opt for - rdefmtdate which allows you to specify the format mask + rdefmtdate which allows you to specify the format mask explicitly. - The function returns the same values as rdefmtdate. + The function returns the same values as rdefmtdate. - rtoday + rtoday Get the current date. void rtoday(date *d); - The function receives a pointer to a date variable (d) + The function receives a pointer to a date variable (d) that it sets to the current date. - Internally this function uses the + Internally this function uses the function. - rjulmdy + rjulmdy Extract the values for the day, the month and the year from a variable @@ -8659,24 +8715,24 @@ void rtoday(date *d); int rjulmdy(date d, short mdy[3]); - The function receives the date d and a pointer to an array - of 3 short integer values mdy. The variable name indicates - the sequential order: mdy[0] will be set to contain the - number of the month, mdy[1] will be set to the value of the - day and mdy[2] will contain the year. + The function receives the date d and a pointer to an array + of 3 short integer values mdy. The variable name indicates + the sequential order: mdy[0] will be set to contain the + number of the month, mdy[1] will be set to the value of the + day and mdy[2] will contain the year. The function always returns 0 at the moment. - Internally the function uses the + Internally the function uses the function. - rdefmtdate + rdefmtdate Use a format mask to convert a character string to a value of type @@ -8685,9 +8741,9 @@ int rjulmdy(date d, short mdy[3]); int rdefmtdate(date *d, char *fmt, char *str); The function receives a pointer to the date value that should hold the - result of the operation (d), the format mask to use for - parsing the date (fmt) and the C char* string containing - the textual representation of the date (str). The textual + result of the operation (d), the format mask to use for + parsing the date (fmt) and the C char* string containing + the textual representation of the date (str). The textual representation is expected to match the format mask. However you do not need to have a 1:1 mapping of the string to the format mask. The function only analyzes the sequential order and looks for the literals @@ -8706,32 +8762,32 @@ int rdefmtdate(date *d, char *fmt, char *str); - ECPG_INFORMIX_ENOSHORTDATE - The date does not contain + ECPG_INFORMIX_ENOSHORTDATE - The date does not contain delimiters between day, month and year. In this case the input string must be exactly 6 or 8 bytes long but isn't. - ECPG_INFORMIX_ENOTDMY - The format string did not + ECPG_INFORMIX_ENOTDMY - The format string did not correctly indicate the sequential order of year, month and day. - ECPG_INFORMIX_BAD_DAY - The input string does not + ECPG_INFORMIX_BAD_DAY - The input string does not contain a valid day. - ECPG_INFORMIX_BAD_MONTH - The input string does not + ECPG_INFORMIX_BAD_MONTH - The input string does not contain a valid month. - ECPG_INFORMIX_BAD_YEAR - The input string does not + ECPG_INFORMIX_BAD_YEAR - The input string does not contain a valid year. @@ -8739,14 +8795,14 @@ int rdefmtdate(date *d, char *fmt, char *str); Internally this function is implemented to use the function. See the reference there for a + linkend="pgtypesdatedefmtasc"/> function. See the reference there for a table of example input. - rfmtdate + rfmtdate Convert a variable of type date to its textual representation using a @@ -8754,22 +8810,22 @@ int rdefmtdate(date *d, char *fmt, char *str); int rfmtdate(date d, char *fmt, char *str); - The function receives the date to convert (d), the format - mask (fmt) and the string that will hold the textual - representation of the date (str). + The function receives the date to convert (d), the format + mask (fmt) and the string that will hold the textual + representation of the date (str). On success, 0 is returned and a negative value if an error occurred. - Internally this function uses the + Internally this function uses the function, see the reference there for examples. - rmdyjul + rmdyjul Create a date value from an array of 3 short integers that specify the @@ -8778,7 +8834,7 @@ int rfmtdate(date d, char *fmt, char *str); int rmdyjul(short mdy[3], date *d); The function receives the array of the 3 short integers - (mdy) and a pointer to a variable of type date that should + (mdy) and a pointer to a variable of type date that should hold the result of the operation. @@ -8786,20 +8842,20 @@ int rmdyjul(short mdy[3], date *d); Internally the function is implemented to use the function . + linkend="pgtypesdatemdyjul"/>. - rdayofweek + rdayofweek Return a number representing the day of the week for a date value. int rdayofweek(date d); - The function receives the date variable d as its only + The function receives the date variable d as its only argument and returns an integer that indicates the day of the week for this date. @@ -8842,13 +8898,13 @@ int rdayofweek(date d); Internally the function is implemented to use the function . + linkend="pgtypesdatedayofweek"/>. - dtcurrent + dtcurrent Retrieve the current timestamp. @@ -8856,13 +8912,13 @@ int rdayofweek(date d); void dtcurrent(timestamp *ts); The function retrieves the current timestamp and saves it into the - timestamp variable that ts points to. + timestamp variable that ts points to. - dtcvasc + dtcvasc Parses a timestamp from its textual representation @@ -8870,9 +8926,9 @@ void dtcurrent(timestamp *ts); int dtcvasc(char *str, timestamp *ts); - The function receives the string to parse (str) and a + The function receives the string to parse (str) and a pointer to the timestamp variable that should hold the result of the - operation (ts). + operation (ts). The function returns 0 on success and a negative value in case of @@ -8880,14 +8936,14 @@ int dtcvasc(char *str, timestamp *ts); Internally this function uses the function. See the reference there + linkend="pgtypestimestampfromasc"/> function. See the reference there for a table with example inputs. - dtcvfmtasc + dtcvfmtasc Parses a timestamp from its textual representation @@ -8895,14 +8951,14 @@ int dtcvasc(char *str, timestamp *ts); dtcvfmtasc(char *inbuf, char *fmtstr, timestamp *dtvalue) - The function receives the string to parse (inbuf), the - format mask to use (fmtstr) and a pointer to the timestamp + The function receives the string to parse (inbuf), the + format mask to use (fmtstr) and a pointer to the timestamp variable that should hold the result of the operation - (dtvalue). + (dtvalue). This function is implemented by means of the function. See the documentation + linkend="pgtypestimestampdefmtasc"/> function. See the documentation there for a list of format specifiers that can be used. @@ -8913,7 +8969,7 @@ dtcvfmtasc(char *inbuf, char *fmtstr, timestamp *dtvalue) - dtsub + dtsub Subtract one timestamp from another and return a variable of type @@ -8921,9 +8977,9 @@ dtcvfmtasc(char *inbuf, char *fmtstr, timestamp *dtvalue) int dtsub(timestamp *ts1, timestamp *ts2, interval *iv); - The function will subtract the timestamp variable that ts2 - points to from the timestamp variable that ts1 points to - and will store the result in the interval variable that iv + The function will subtract the timestamp variable that ts2 + points to from the timestamp variable that ts1 points to + and will store the result in the interval variable that iv points to. @@ -8934,7 +8990,7 @@ int dtsub(timestamp *ts1, timestamp *ts2, interval *iv); - dttoasc + dttoasc Convert a timestamp variable to a C char* string. @@ -8942,8 +8998,8 @@ int dtsub(timestamp *ts1, timestamp *ts2, interval *iv); int dttoasc(timestamp *ts, char *output); The function receives a pointer to the timestamp variable to convert - (ts) and the string that should hold the result of the - operation (output). It converts ts to its + (ts) and the string that should hold the result of the + operation (output). It converts ts to its textual representation according to the SQL standard, which is be YYYY-MM-DD HH:MM:SS. @@ -8955,7 +9011,7 @@ int dttoasc(timestamp *ts, char *output); - dttofmtasc + dttofmtasc Convert a timestamp variable to a C char* using a format mask. @@ -8963,8 +9019,8 @@ int dttoasc(timestamp *ts, char *output); int dttofmtasc(timestamp *ts, char *output, int str_len, char *fmtstr); The function receives a pointer to the timestamp to convert as its - first argument (ts), a pointer to the output buffer - (output), the maximal length that has been allocated for + first argument (ts), a pointer to the output buffer + (output), the maximal length that has been allocated for the output buffer (str_len) and the format mask to use for the conversion (fmtstr). @@ -8974,14 +9030,14 @@ int dttofmtasc(timestamp *ts, char *output, int str_len, char *fmtstr); Internally, this function uses the function. See the reference there for + linkend="pgtypestimestampfmtasc"/> function. See the reference there for information on what format mask specifiers can be used. - intoasc + intoasc Convert an interval variable to a C char* string. @@ -8989,8 +9045,8 @@ int dttofmtasc(timestamp *ts, char *output, int str_len, char *fmtstr); int intoasc(interval *i, char *str); The function receives a pointer to the interval variable to convert - (i) and the string that should hold the result of the - operation (str). It converts i to its + (i) and the string that should hold the result of the + operation (str). It converts i to its textual representation according to the SQL standard, which is be YYYY-MM-DD HH:MM:SS. @@ -9002,7 +9058,7 @@ int intoasc(interval *i, char *str); - rfmtlong + rfmtlong Convert a long integer value to its textual representation using a @@ -9010,9 +9066,9 @@ int intoasc(interval *i, char *str); int rfmtlong(long lng_val, char *fmt, char *outbuf); - The function receives the long value lng_val, the format - mask fmt and a pointer to the output buffer - outbuf. It converts the long value according to the format + The function receives the long value lng_val, the format + mask fmt and a pointer to the output buffer + outbuf. It converts the long value according to the format mask to its textual representation. @@ -9088,7 +9144,7 @@ int rfmtlong(long lng_val, char *fmt, char *outbuf); - rupshift + rupshift Convert a string to upper case. @@ -9102,7 +9158,7 @@ void rupshift(char *str); - byleng + byleng Return the number of characters in a string without counting trailing @@ -9111,15 +9167,15 @@ void rupshift(char *str); int byleng(char *str, int len); The function expects a fixed-length string as its first argument - (str) and its length as its second argument - (len). It returns the number of significant characters, + (str) and its length as its second argument + (len). It returns the number of significant characters, that is the length of the string without trailing blanks. - ldchar + ldchar Copy a fixed-length string into a null-terminated string. @@ -9127,10 +9183,10 @@ int byleng(char *str, int len); void ldchar(char *src, int len, char *dest); The function receives the fixed-length string to copy - (src), its length (len) and a pointer to the - destination memory (dest). Note that you need to reserve at - least len+1 bytes for the string that dest - points to. The function copies at most len bytes to the new + (src), its length (len) and a pointer to the + destination memory (dest). Note that you need to reserve at + least len+1 bytes for the string that dest + points to. The function copies at most len bytes to the new location (less if the source string has trailing blanks) and adds the null-terminator. @@ -9138,7 +9194,7 @@ void ldchar(char *src, int len, char *dest); - rgetmsg + rgetmsg @@ -9150,7 +9206,7 @@ int rgetmsg(int msgnum, char *s, int maxsize); - rtypalign + rtypalign @@ -9162,7 +9218,7 @@ int rtypalign(int offset, int type); - rtypmsize + rtypmsize @@ -9174,7 +9230,7 @@ int rtypmsize(int type, int len); - rtypwidth + rtypwidth @@ -9186,7 +9242,7 @@ int rtypwidth(int sqltype, int sqllen); - rsetnull + rsetnull Set a variable to NULL. @@ -9270,17 +9326,17 @@ rsetnull(CINTTYPE, (char *) &i); - risnull + risnull Test if a variable is NULL. int risnull(int t, char *ptr); - The function receives the type of the variable to test (t) - as well a pointer to this variable (ptr). Note that the + The function receives the type of the variable to test (t) + as well a pointer to this variable (ptr). Note that the latter needs to be cast to a char*. See the function for a list of possible variable types. + linkend="rsetnull"/> for a list of possible variable types. Here is an example of how to use this function: @@ -9312,7 +9368,7 @@ risnull(CINTTYPE, (char *) &i); values. - ECPG_INFORMIX_NUM_OVERFLOW + ECPG_INFORMIX_NUM_OVERFLOW Functions return this value if an overflow occurred in a @@ -9323,7 +9379,7 @@ risnull(CINTTYPE, (char *) &i); - ECPG_INFORMIX_NUM_UNDERFLOW + ECPG_INFORMIX_NUM_UNDERFLOW Functions return this value if an underflow occurred in a calculation. @@ -9333,7 +9389,7 @@ risnull(CINTTYPE, (char *) &i); - ECPG_INFORMIX_DIVIDE_ZERO + ECPG_INFORMIX_DIVIDE_ZERO Functions return this value if an attempt to divide by zero is @@ -9343,7 +9399,7 @@ risnull(CINTTYPE, (char *) &i); - ECPG_INFORMIX_BAD_YEAR + ECPG_INFORMIX_BAD_YEAR Functions return this value if a bad value for a year was found while @@ -9354,7 +9410,7 @@ risnull(CINTTYPE, (char *) &i); - ECPG_INFORMIX_BAD_MONTH + ECPG_INFORMIX_BAD_MONTH Functions return this value if a bad value for a month was found while @@ -9365,7 +9421,7 @@ risnull(CINTTYPE, (char *) &i); - ECPG_INFORMIX_BAD_DAY + ECPG_INFORMIX_BAD_DAY Functions return this value if a bad value for a day was found while @@ -9376,7 +9432,7 @@ risnull(CINTTYPE, (char *) &i); - ECPG_INFORMIX_ENOSHORTDATE + ECPG_INFORMIX_ENOSHORTDATE Functions return this value if a parsing routine needs a short date @@ -9387,7 +9443,7 @@ risnull(CINTTYPE, (char *) &i); - ECPG_INFORMIX_DATE_CONVERT + ECPG_INFORMIX_DATE_CONVERT Functions return this value if an error occurred during date @@ -9398,7 +9454,7 @@ risnull(CINTTYPE, (char *) &i); - ECPG_INFORMIX_OUT_OF_MEMORY + ECPG_INFORMIX_OUT_OF_MEMORY Functions return this value if memory was exhausted during @@ -9409,18 +9465,18 @@ risnull(CINTTYPE, (char *) &i); - ECPG_INFORMIX_ENOTDMY + ECPG_INFORMIX_ENOTDMY Functions return this value if a parsing routine was supposed to get a - format mask (like mmddyy) but not all fields were listed + format mask (like mmddyy) but not all fields were listed correctly. Internally it is defined as -1212 (the Informix definition). - ECPG_INFORMIX_BAD_NUMERIC + ECPG_INFORMIX_BAD_NUMERIC Functions return this value either if a parsing routine cannot parse @@ -9433,7 +9489,7 @@ risnull(CINTTYPE, (char *) &i); - ECPG_INFORMIX_BAD_EXPONENT + ECPG_INFORMIX_BAD_EXPONENT Functions return this value if a parsing routine cannot parse @@ -9444,7 +9500,7 @@ risnull(CINTTYPE, (char *) &i); - ECPG_INFORMIX_BAD_DATE + ECPG_INFORMIX_BAD_DATE Functions return this value if a parsing routine cannot parse @@ -9455,7 +9511,7 @@ risnull(CINTTYPE, (char *) &i); - ECPG_INFORMIX_EXTRA_CHARS + ECPG_INFORMIX_EXTRA_CHARS Functions return this value if a parsing routine is passed extra @@ -9498,7 +9554,7 @@ risnull(CINTTYPE, (char *) &i); Variable substitution occurs when a symbol starts with a colon (:). The variable with that name is looked up among the variables that were previously declared within a - EXEC SQL DECLARE section. + EXEC SQL DECLARE section. @@ -9546,10 +9602,10 @@ risnull(CINTTYPE, (char *) &i); - ECPGt_EOIT + ECPGt_EOIT - An enum telling that there are no more input + An enum telling that there are no more input variables. @@ -9566,10 +9622,10 @@ risnull(CINTTYPE, (char *) &i); - ECPGt_EORT + ECPGt_EORT - An enum telling that there are no more variables. + An enum telling that there are no more variables. @@ -9651,7 +9707,7 @@ risnull(CINTTYPE, (char *) &i); EXEC SQL OPEN cursor; is not copied to the output. Instead, the cursor's - DECLARE command is used at the position of the OPEN command + DECLARE command is used at the position of the OPEN command because it indeed opens the cursor. diff --git a/doc/src/sgml/errcodes.sgml b/doc/src/sgml/errcodes.sgml index 40b4191c10..6fd16f643e 100644 --- a/doc/src/sgml/errcodes.sgml +++ b/doc/src/sgml/errcodes.sgml @@ -11,13 +11,13 @@ All messages emitted by the PostgreSQL server are assigned five-character error codes that follow the SQL - standard's conventions for SQLSTATE codes. Applications + standard's conventions for SQLSTATE codes. Applications that need to know which error condition has occurred should usually test the error code, rather than looking at the textual error message. The error codes are less likely to change across - PostgreSQL releases, and also are not subject to + PostgreSQL releases, and also are not subject to change due to localization of error messages. Note that some, but - not all, of the error codes produced by PostgreSQL + not all, of the error codes produced by PostgreSQL are defined by the SQL standard; some additional error codes for conditions not defined by the standard have been invented or borrowed from other databases. @@ -32,20 +32,20 @@ - lists all the error codes defined in + lists all the error codes defined in PostgreSQL &version;. (Some are not actually used at present, but are defined by the SQL standard.) The error classes are also shown. For each error class there is a - standard error code having the last three characters - 000. This code is used only for error conditions that fall + standard error code having the last three characters + 000. This code is used only for error conditions that fall within the class but do not have any more-specific code assigned. The symbol shown in the column Condition Name is - the condition name to use in PL/pgSQL. Condition + the condition name to use in PL/pgSQL. Condition names can be written in either upper or lower case. (Note that - PL/pgSQL does not recognize warning, as opposed to error, + PL/pgSQL does not recognize warning, as opposed to error, condition names; those are classes 00, 01, and 02.) @@ -53,10 +53,10 @@ For some types of errors, the server reports the name of a database object (a table, table column, data type, or constraint) associated with the error; for example, the name of the unique constraint that caused a - unique_violation error. Such names are supplied in separate + unique_violation error. Such names are supplied in separate fields of the error report message so that applications need not try to extract them from the possibly-localized human-readable text of the message. - As of PostgreSQL 9.3, complete coverage for this feature + As of PostgreSQL 9.3, complete coverage for this feature exists only for errors in SQLSTATE class 23 (integrity constraint violation), but this is likely to be expanded in future. @@ -66,9 +66,9 @@ <productname>PostgreSQL</productname> Error Codes - - - + + + diff --git a/doc/src/sgml/event-trigger.sgml b/doc/src/sgml/event-trigger.sgml index 3ed14f08c0..d273dc5b58 100644 --- a/doc/src/sgml/event-trigger.sgml +++ b/doc/src/sgml/event-trigger.sgml @@ -8,8 +8,8 @@ - To supplement the trigger mechanism discussed in , - PostgreSQL also provides event triggers. Unlike regular + To supplement the trigger mechanism discussed in , + PostgreSQL also provides event triggers. Unlike regular triggers, which are attached to a single table and capture only DML events, event triggers are global to a particular database and are capable of capturing DDL events. @@ -28,67 +28,67 @@ An event trigger fires whenever the event with which it is associated occurs in the database in which it is defined. Currently, the only supported events are - ddl_command_start, - ddl_command_end, - table_rewrite - and sql_drop. + ddl_command_start, + ddl_command_end, + table_rewrite + and sql_drop. Support for additional events may be added in future releases. - The ddl_command_start event occurs just before the - execution of a CREATE, ALTER, DROP, - SECURITY LABEL, - COMMENT, GRANT or REVOKE + The ddl_command_start event occurs just before the + execution of a CREATE, ALTER, DROP, + SECURITY LABEL, + COMMENT, GRANT or REVOKE command. No check whether the affected object exists or doesn't exist is performed before the event trigger fires. As an exception, however, this event does not occur for DDL commands targeting shared objects — databases, roles, and tablespaces — or for commands targeting event triggers themselves. The event trigger mechanism does not support these object types. - ddl_command_start also occurs just before the execution of a + ddl_command_start also occurs just before the execution of a SELECT INTO command, since this is equivalent to CREATE TABLE AS. - The ddl_command_end event occurs just after the execution of - this same set of commands. To obtain more details on the DDL + The ddl_command_end event occurs just after the execution of + this same set of commands. To obtain more details on the DDL operations that took place, use the set-returning function - pg_event_trigger_ddl_commands() from the - ddl_command_end event trigger code (see - ). Note that the trigger fires + pg_event_trigger_ddl_commands() from the + ddl_command_end event trigger code (see + ). Note that the trigger fires after the actions have taken place (but before the transaction commits), and thus the system catalogs can be read as already changed. - The sql_drop event occurs just before the - ddl_command_end event trigger for any operation that drops + The sql_drop event occurs just before the + ddl_command_end event trigger for any operation that drops database objects. To list the objects that have been dropped, use the - set-returning function pg_event_trigger_dropped_objects() from the - sql_drop event trigger code (see - ). Note that + set-returning function pg_event_trigger_dropped_objects() from the + sql_drop event trigger code (see + ). Note that the trigger is executed after the objects have been deleted from the system catalogs, so it's not possible to look them up anymore. - The table_rewrite event occurs just before a table is - rewritten by some actions of the commands ALTER TABLE and - ALTER TYPE. While other + The table_rewrite event occurs just before a table is + rewritten by some actions of the commands ALTER TABLE and + ALTER TYPE. While other control statements are available to rewrite a table, like CLUSTER and VACUUM, - the table_rewrite event is not triggered by them. + the table_rewrite event is not triggered by them. Event triggers (like other functions) cannot be executed in an aborted transaction. Thus, if a DDL command fails with an error, any associated - ddl_command_end triggers will not be executed. Conversely, - if a ddl_command_start trigger fails with an error, no + ddl_command_end triggers will not be executed. Conversely, + if a ddl_command_start trigger fails with an error, no further event triggers will fire, and no attempt will be made to execute - the command itself. Similarly, if a ddl_command_end trigger + the command itself. Similarly, if a ddl_command_end trigger fails with an error, the effects of the DDL statement will be rolled back, just as they would be in any other case where the containing transaction aborts. @@ -96,11 +96,11 @@ For a complete list of commands supported by the event trigger mechanism, - see . + see . - Event triggers are created using the command . + Event triggers are created using the command . In order to create an event trigger, you must first create a function with the special return type event_trigger. This function need not (and may not) return a value; the return type serves merely as @@ -125,7 +125,7 @@ Event Trigger Firing Matrix - lists all commands + lists all commands for which event triggers are supported. @@ -503,6 +503,14 @@ - + + CREATE STATISTICS + X + X + - + - + + CREATE TABLE X @@ -565,6 +573,7 @@ X - - + CREATE USER MAPPING @@ -742,6 +751,14 @@ - + + DROP STATISTICS + X + X + X + - + + DROP TABLE X @@ -878,14 +895,14 @@ - Event trigger functions must use the version 1 function + Event trigger functions must use the version 1 function manager interface. When a function is called by the event trigger manager, it is not passed - any normal arguments, but it is passed a context pointer - pointing to a EventTriggerData structure. C functions can + any normal arguments, but it is passed a context pointer + pointing to a EventTriggerData structure. C functions can check whether they were called from the event trigger manager or not by executing the macro: @@ -896,10 +913,10 @@ CALLED_AS_EVENT_TRIGGER(fcinfo) ((fcinfo)->context != NULL && IsA((fcinfo)->context, EventTriggerData)) If this returns true, then it is safe to cast - fcinfo->context to type EventTriggerData + fcinfo->context to type EventTriggerData * and make use of the pointed-to - EventTriggerData structure. The function must - not alter the EventTriggerData + EventTriggerData structure. The function must + not alter the EventTriggerData structure or any of the data it points to. @@ -921,7 +938,7 @@ typedef struct EventTriggerData - type + type Always T_EventTriggerData. @@ -930,20 +947,20 @@ typedef struct EventTriggerData - event + event Describes the event for which the function is called, one of "ddl_command_start", "ddl_command_end", "sql_drop", "table_rewrite". - See for the meaning of these + See for the meaning of these events. - parsetree + parsetree A pointer to the parse tree of the command. Check the PostgreSQL @@ -954,7 +971,7 @@ typedef struct EventTriggerData - tag + tag The command tag associated with the event for which the event trigger @@ -966,8 +983,8 @@ typedef struct EventTriggerData - An event trigger function must return a NULL pointer - (not an SQL null value, that is, do not + An event trigger function must return a NULL pointer + (not an SQL null value, that is, do not set isNull true). @@ -982,11 +999,11 @@ typedef struct EventTriggerData - The function noddl raises an exception each time it is called. + The function noddl raises an exception each time it is called. The event trigger definition associated the function with the ddl_command_start event. The effect is that all DDL commands (with the exceptions mentioned - in ) are prevented from running. + in ) are prevented from running. @@ -1020,14 +1037,14 @@ noddl(PG_FUNCTION_ARGS) - After you have compiled the source code (see ), + After you have compiled the source code (see ), declare the function and the triggers: CREATE FUNCTION noddl() RETURNS event_trigger AS 'noddl' LANGUAGE C; CREATE EVENT TRIGGER noddl ON ddl_command_start - EXECUTE PROCEDURE noddl(); + EXECUTE FUNCTION noddl(); @@ -1036,9 +1053,9 @@ CREATE EVENT TRIGGER noddl ON ddl_command_start =# \dy List of event triggers - Name | Event | Owner | Enabled | Procedure | Tags --------+-------------------+-------+---------+-----------+------ - noddl | ddl_command_start | dim | enabled | noddl | + Name | Event | Owner | Enabled | Function | Tags +-------+-------------------+-------+---------+----------+------ + noddl | ddl_command_start | dim | enabled | noddl | (1 row) =# CREATE TABLE foo(id serial); @@ -1067,7 +1084,7 @@ COMMIT; A Table Rewrite Event Trigger Example - Thanks to the table_rewrite event, it is possible to implement + Thanks to the table_rewrite event, it is possible to implement a table rewriting policy only allowing the rewrite in maintenance windows. @@ -1112,7 +1129,7 @@ $$; CREATE EVENT TRIGGER no_rewrite_allowed ON table_rewrite - EXECUTE PROCEDURE no_rewrite(); + EXECUTE FUNCTION no_rewrite(); diff --git a/doc/src/sgml/extend.sgml b/doc/src/sgml/extend.sgml index b96ef389a2..695e07fb38 100644 --- a/doc/src/sgml/extend.sgml +++ b/doc/src/sgml/extend.sgml @@ -15,32 +15,32 @@ - functions (starting in ) + functions (starting in ) - aggregates (starting in ) + aggregates (starting in ) - data types (starting in ) + data types (starting in ) - operators (starting in ) + operators (starting in ) - operator classes for indexes (starting in ) + operator classes for indexes (starting in ) - packages of related objects (starting in ) + packages of related objects (starting in ) @@ -106,42 +106,77 @@ composite + + container type + + + + data type + container + + - PostgreSQL data types are divided into base - types, composite types, domains, and pseudo-types. + PostgreSQL data types can be divided into base + types, container types, domains, and pseudo-types. Base Types - Base types are those, like int4, that are - implemented below the level of the SQL language + Base types are those, like integer, that are + implemented below the level of the SQL language (typically in a low-level language such as C). They generally correspond to what are often known as abstract data types. PostgreSQL can only operate on such types through functions provided by the user and only understands the behavior of such types to the extent that the user describes - them. Base types are further subdivided into scalar and array - types. For each scalar type, a corresponding array type is - automatically created that can hold variable-size arrays of that - scalar type. + them. + The built-in base types are described in . + + + + Enumerated (enum) types can be considered as a subcategory of base + types. The main difference is that they can be created using + just SQL commands, without any low-level programming. + Refer to for more information. - Composite Types + Container Types + + + PostgreSQL has three kinds + of container types, which are types that contain multiple + values of other types. These are arrays, composites, and ranges. + + + + Arrays can hold multiple values that are all of the same type. An array + type is automatically created for each base type, composite type, range + type, and domain type. But there are no arrays of arrays. So far as + the type system is concerned, multi-dimensional arrays are the same as + one-dimensional arrays. Refer to for more + information. + Composite types, or row types, are created whenever the user creates a table. It is also possible to use to - define a stand-alone composite type with no associated + linkend="sql-createtype"/> to + define a stand-alone composite type with no associated table. A composite type is simply a list of types with associated field names. A value of a composite type is a row or - record of field values. The user can access the component fields - from SQL queries. Refer to - for more information on composite types. + record of field values. Refer to + for more information. + + + + A range type can hold two values of the same type, which are the lower + and upper bounds of the range. Range types are user-created, although + a few built-in ones exist. Refer to + for more information. @@ -149,16 +184,12 @@ Domains - A domain is based on a particular base type and for many purposes - is interchangeable with its base type. However, a domain can - have constraints that restrict its valid values to a subset of - what the underlying base type would allow. - - - - Domains can be created using the SQL command - . - Their creation and use is not discussed in this chapter. + A domain is based on a particular underlying type and for many purposes + is interchangeable with its underlying type. However, a domain can have + constraints that restrict its valid values to a subset of what the + underlying type would allow. Domains are created using + the SQL command . + Refer to for more information. @@ -166,12 +197,12 @@ Pseudo-Types - There are a few pseudo-types for special purposes. - Pseudo-types cannot appear as columns of tables or attributes of - composite types, but they can be used to declare the argument and + There are a few pseudo-types for special purposes. + Pseudo-types cannot appear as columns of tables or components of + container types, but they can be used to declare the argument and result types of functions. This provides a mechanism within the type system to identify special classes of functions. lists the existing + linkend="datatype-pseudotypes-table"/> lists the existing pseudo-types. @@ -188,7 +219,7 @@ - type + data type polymorphic @@ -198,12 +229,12 @@ - Five pseudo-types of special interest are anyelement, - anyarray, anynonarray, anyenum, - and anyrange, - which are collectively called polymorphic types. + Five pseudo-types of special interest are anyelement, + anyarray, anynonarray, anyenum, + and anyrange, + which are collectively called polymorphic types. Any function declared using these types is said to be - a polymorphic function. A polymorphic function can + a polymorphic function. A polymorphic function can operate on many different data types, with the specific data type(s) being determined by the data types actually passed to it in a particular call. @@ -228,10 +259,10 @@ and others declared anyelement, the actual range type in the anyrange positions must be a range whose subtype is the same type appearing in the anyelement positions. - anynonarray is treated exactly the same as anyelement, + anynonarray is treated exactly the same as anyelement, but adds the additional constraint that the actual type must not be an array type. - anyenum is treated exactly the same as anyelement, + anyenum is treated exactly the same as anyelement, but adds the additional constraint that the actual type must be an enum type. @@ -240,7 +271,7 @@ Thus, when more than one argument position is declared with a polymorphic type, the net effect is that only certain combinations of actual argument types are allowed. For example, a function declared as - equal(anyelement, anyelement) will take any two input values, + equal(anyelement, anyelement) will take any two input values, so long as they are of the same data type. @@ -251,30 +282,30 @@ result type for that call. For example, if there were not already an array subscripting mechanism, one could define a function that implements subscripting as subscript(anyarray, integer) - returns anyelement. This declaration constrains the actual first + returns anyelement. This declaration constrains the actual first argument to be an array type, and allows the parser to infer the correct result type from the actual first argument's type. Another example - is that a function declared as f(anyarray) returns anyenum + is that a function declared as f(anyarray) returns anyenum will only accept arrays of enum types. - Note that anynonarray and anyenum do not represent + Note that anynonarray and anyenum do not represent separate type variables; they are the same type as anyelement, just with an additional constraint. For - example, declaring a function as f(anyelement, anyenum) - is equivalent to declaring it as f(anyenum, anyenum): + example, declaring a function as f(anyelement, anyenum) + is equivalent to declaring it as f(anyenum, anyenum): both actual arguments have to be the same enum type. A variadic function (one taking a variable number of arguments, as in - ) can be + ) can be polymorphic: this is accomplished by declaring its last parameter as - VARIADIC anyarray. For purposes of argument + VARIADIC anyarray. For purposes of argument matching and determining the actual result type, such a function behaves the same as if you had written the appropriate number of - anynonarray parameters. + anynonarray parameters. @@ -294,32 +325,32 @@ - A useful extension to PostgreSQL typically includes + A useful extension to PostgreSQL typically includes multiple SQL objects; for example, a new data type will require new functions, new operators, and probably new index operator classes. It is helpful to collect all these objects into a single package - to simplify database management. PostgreSQL calls - such a package an extension. To define an extension, - you need at least a script file that contains the - SQL commands to create the extension's objects, and a - control file that specifies a few basic properties + to simplify database management. PostgreSQL calls + such a package an extension. To define an extension, + you need at least a script file that contains the + SQL commands to create the extension's objects, and a + control file that specifies a few basic properties of the extension itself. If the extension includes C code, there will typically also be a shared library file into which the C code has been built. Once you have these files, a simple - command loads the objects into + command loads the objects into your database. The main advantage of using an extension, rather than just running the - SQL script to load a bunch of loose objects - into your database, is that PostgreSQL will then + SQL script to load a bunch of loose objects + into your database, is that PostgreSQL will then understand that the objects of the extension go together. You can - drop all the objects with a single - command (no need to maintain a separate uninstall script). - Even more useful, pg_dump knows that it should not + drop all the objects with a single + command (no need to maintain a separate uninstall script). + Even more useful, pg_dump knows that it should not dump the individual member objects of the extension — it will - just include a CREATE EXTENSION command in dumps, instead. + just include a CREATE EXTENSION command in dumps, instead. This vastly simplifies migration to a new version of the extension that might contain more or different objects than the old version. Note however that you must have the extension's control, script, and @@ -327,15 +358,15 @@ - PostgreSQL will not let you drop an individual object + PostgreSQL will not let you drop an individual object contained in an extension, except by dropping the whole extension. Also, while you can change the definition of an extension member object (for example, via CREATE OR REPLACE FUNCTION for a function), bear in mind that the modified definition will not be dumped - by pg_dump. Such a change is usually only sensible if + by pg_dump. Such a change is usually only sensible if you concurrently make the same change in the extension's script file. (But there are special provisions for tables containing configuration - data; see .) + data; see .) In production situations, it's generally better to create an extension update script to perform changes to extension member objects. @@ -346,19 +377,19 @@ statements. The final set of privileges for each object (if any are set) will be stored in the pg_init_privs - system catalog. When pg_dump is used, the - CREATE EXTENSION command will be included in the dump, followed + system catalog. When pg_dump is used, the + CREATE EXTENSION command will be included in the dump, followed by the set of GRANT and REVOKE statements necessary to set the privileges on the objects to what they were at the time the dump was taken. - PostgreSQL does not currently support extension scripts + PostgreSQL does not currently support extension scripts issuing CREATE POLICY or SECURITY LABEL statements. These are expected to be set after the extension has been created. All RLS policies and security labels on extension objects will be - included in dumps created by pg_dump. + included in dumps created by pg_dump. @@ -366,15 +397,15 @@ scripts that adjust the definitions of the SQL objects contained in an extension. For example, if version 1.1 of an extension adds one function and changes the body of another function compared to 1.0, the extension - author can provide an update script that makes just those - two changes. The ALTER EXTENSION UPDATE command can then + author can provide an update script that makes just those + two changes. The ALTER EXTENSION UPDATE command can then be used to apply these changes and track which version of the extension is actually installed in a given database. The kinds of SQL objects that can be members of an extension are shown in - the description of . Notably, objects + the description of . Notably, objects that are database-cluster-wide, such as databases, roles, and tablespaces, cannot be extension members since an extension is only known within one database. (Although an extension script is not prohibited from creating @@ -384,7 +415,7 @@ considered members of the extension. Another important point is that schemas can belong to extensions, but not vice versa: an extension as such has an unqualified name and does not - exist within any schema. The extension's member objects, + exist within any schema. The extension's member objects, however, will belong to schemas whenever appropriate for their object types. It may or may not be appropriate for an extension to own the schema(s) its member objects are within. @@ -399,6 +430,32 @@ dropping the whole extension. + + Defining Extension Objects + + + + Widely-distributed extensions should assume little about the database + they occupy. In particular, unless you issued SET search_path = + pg_temp, assume each unqualified name could resolve to an + object that a malicious user has defined. Beware of constructs that + depend on search_path implicitly: IN + and CASE expression WHEN + always select an operator using the search path. In their place, use + OPERATOR(schema.=) ANY + and CASE WHEN expression. + + + + Extension Files @@ -407,25 +464,25 @@ - The command relies on a control + The command relies on a control file for each extension, which must be named the same as the extension - with a suffix of .control, and must be placed in the + with a suffix of .control, and must be placed in the installation's SHAREDIR/extension directory. There - must also be at least one SQL script file, which follows the + must also be at least one SQL script file, which follows the naming pattern - extension--version.sql - (for example, foo--1.0.sql for version 1.0 of - extension foo). By default, the script file(s) are also + extension--version.sql + (for example, foo--1.0.sql for version 1.0 of + extension foo). By default, the script file(s) are also placed in the SHAREDIR/extension directory; but the control file can specify a different directory for the script file(s). The file format for an extension control file is the same as for the - postgresql.conf file, namely a list of - parameter_name = value + postgresql.conf file, namely a list of + parameter_name = value assignments, one per line. Blank lines and comments introduced by - # are allowed. Be sure to quote any value that is not + # are allowed. Be sure to quote any value that is not a single word or number. @@ -438,11 +495,11 @@ directory (string) - The directory containing the extension's SQL script + The directory containing the extension's SQL script file(s). Unless an absolute path is given, the name is relative to the installation's SHAREDIR directory. The default behavior is equivalent to specifying - directory = 'extension'. + directory = 'extension'. @@ -452,9 +509,9 @@ The default version of the extension (the one that will be installed - if no version is specified in CREATE EXTENSION). Although - this can be omitted, that will result in CREATE EXTENSION - failing if no VERSION option appears, so you generally + if no version is specified in CREATE EXTENSION). Although + this can be omitted, that will result in CREATE EXTENSION + failing if no VERSION option appears, so you generally don't want to do that. @@ -468,7 +525,7 @@ when initially creating an extension, but not during extension updates (since that might override user-added comments). Alternatively, the extension's comment can be set by writing - a command in the script file. + a command in the script file. @@ -489,11 +546,11 @@ The value of this parameter will be substituted for each occurrence - of MODULE_PATHNAME in the script file(s). If it is not + of MODULE_PATHNAME in the script file(s). If it is not set, no substitution is made. Typically, this is set to - $libdir/shared_library_name and - then MODULE_PATHNAME is used in CREATE - FUNCTION commands for C-language functions, so that the script + $libdir/shared_library_name and + then MODULE_PATHNAME is used in CREATE + FUNCTION commands for C-language functions, so that the script files do not need to hard-wire the name of the shared library. @@ -514,9 +571,9 @@ superuser (boolean) - If this parameter is true (which is the default), + If this parameter is true (which is the default), only superusers can create the extension or update it to a new - version. If it is set to false, just the privileges + version. If it is set to false, just the privileges required to execute the commands in the installation or update script are required. @@ -527,11 +584,11 @@ relocatable (boolean) - An extension is relocatable if it is possible to move + An extension is relocatable if it is possible to move its contained objects into a different schema after initial creation - of the extension. The default is false, i.e. the + of the extension. The default is false, i.e. the extension is not relocatable. - See for more information. + See for more information. @@ -545,7 +602,7 @@ and not any other. The schema parameter is consulted only when initially creating an extension, not during extension updates. - See for more information. + See for more information. @@ -553,45 +610,45 @@ In addition to the primary control file - extension.control, + extension.control, an extension can have secondary control files named in the style - extension--version.control. + extension--version.control. If supplied, these must be located in the script file directory. Secondary control files follow the same format as the primary control file. Any parameters set in a secondary control file override the primary control file when installing or updating to that version of - the extension. However, the parameters directory and - default_version cannot be set in a secondary control file. + the extension. However, the parameters directory and + default_version cannot be set in a secondary control file. - An extension's SQL script files can contain any SQL commands, - except for transaction control commands (BEGIN, - COMMIT, etc) and commands that cannot be executed inside a - transaction block (such as VACUUM). This is because the + An extension's SQL script files can contain any SQL commands, + except for transaction control commands (BEGIN, + COMMIT, etc) and commands that cannot be executed inside a + transaction block (such as VACUUM). This is because the script files are implicitly executed within a transaction block. - An extension's SQL script files can also contain lines - beginning with \echo, which will be ignored (treated as + An extension's SQL script files can also contain lines + beginning with \echo, which will be ignored (treated as comments) by the extension mechanism. This provision is commonly used - to throw an error if the script file is fed to psql - rather than being loaded via CREATE EXTENSION (see example - script in ). + to throw an error if the script file is fed to psql + rather than being loaded via CREATE EXTENSION (see example + script in ). Without that, users might accidentally load the - extension's contents as loose objects rather than as an + extension's contents as loose objects rather than as an extension, a state of affairs that's a bit tedious to recover from. While the script files can contain any characters allowed by the specified encoding, control files should contain only plain ASCII, because there - is no way for PostgreSQL to know what encoding a + is no way for PostgreSQL to know what encoding a control file is in. In practice this is only an issue if you want to use non-ASCII characters in the extension's comment. Recommended - practice in that case is to not use the control file comment - parameter, but instead use COMMENT ON EXTENSION + practice in that case is to not use the control file comment + parameter, but instead use COMMENT ON EXTENSION within a script file to set the comment. @@ -611,14 +668,14 @@ A fully relocatable extension can be moved into another schema at any time, even after it's been loaded into a database. - This is done with the ALTER EXTENSION SET SCHEMA + This is done with the ALTER EXTENSION SET SCHEMA command, which automatically renames all the member objects into the new schema. Normally, this is only possible if the extension contains no internal assumptions about what schema any of its objects are in. Also, the extension's objects must all be in one schema to begin with (ignoring objects that do not belong to any schema, such as procedural languages). Mark a fully relocatable - extension by setting relocatable = true in its control + extension by setting relocatable = true in its control file. @@ -628,26 +685,26 @@ An extension might be relocatable during installation but not afterwards. This is typically the case if the extension's script file needs to reference the target schema explicitly, for example - in setting search_path properties for SQL functions. - For such an extension, set relocatable = false in its - control file, and use @extschema@ to refer to the target + in setting search_path properties for SQL functions. + For such an extension, set relocatable = false in its + control file, and use @extschema@ to refer to the target schema in the script file. All occurrences of this string will be replaced by the actual target schema's name before the script is executed. The user can set the target schema using the - SCHEMA option of CREATE EXTENSION. + SCHEMA option of CREATE EXTENSION. If the extension does not support relocation at all, set - relocatable = false in its control file, and also set - schema to the name of the intended target schema. This - will prevent use of the SCHEMA option of CREATE - EXTENSION, unless it specifies the same schema named in the control + relocatable = false in its control file, and also set + schema to the name of the intended target schema. This + will prevent use of the SCHEMA option of CREATE + EXTENSION, unless it specifies the same schema named in the control file. This choice is typically necessary if the extension contains internal assumptions about schema names that can't be replaced by - uses of @extschema@. The @extschema@ + uses of @extschema@. The @extschema@ substitution mechanism is available in this case too, although it is of limited use since the schema name is determined by the control file. @@ -656,24 +713,24 @@ In all cases, the script file will be executed with - initially set to point to the target - schema; that is, CREATE EXTENSION does the equivalent of + initially set to point to the target + schema; that is, CREATE EXTENSION does the equivalent of this: SET LOCAL search_path TO @extschema@; This allows the objects created by the script file to go into the target - schema. The script file can change search_path if it wishes, - but that is generally undesirable. search_path is restored - to its previous setting upon completion of CREATE EXTENSION. + schema. The script file can change search_path if it wishes, + but that is generally undesirable. search_path is restored + to its previous setting upon completion of CREATE EXTENSION. - The target schema is determined by the schema parameter in - the control file if that is given, otherwise by the SCHEMA - option of CREATE EXTENSION if that is given, otherwise the + The target schema is determined by the schema parameter in + the control file if that is given, otherwise by the SCHEMA + option of CREATE EXTENSION if that is given, otherwise the current default object creation schema (the first one in the caller's - search_path). When the control file schema + search_path). When the control file schema parameter is used, the target schema will be created if it doesn't already exist, but in the other two cases it must already exist. @@ -681,7 +738,7 @@ SET LOCAL search_path TO @extschema@; If any prerequisite extensions are listed in requires in the control file, their target schemas are appended to the initial - setting of search_path. This allows their objects to be + setting of search_path. This allows their objects to be visible to the new extension's script file. @@ -690,7 +747,7 @@ SET LOCAL search_path TO @extschema@; multiple schemas, it is usually desirable to place all the objects meant for external use into a single schema, which is considered the extension's target schema. Such an arrangement works conveniently with the default - setting of search_path during creation of dependent + setting of search_path during creation of dependent extensions. @@ -703,7 +760,7 @@ SET LOCAL search_path TO @extschema@; might be added or changed by the user after installation of the extension. Ordinarily, if a table is part of an extension, neither the table's definition nor its content will be dumped by - pg_dump. But that behavior is undesirable for a + pg_dump. But that behavior is undesirable for a configuration table; any data changes made by the user need to be included in dumps, or the extension will behave differently after a dump and reload. @@ -716,9 +773,9 @@ SET LOCAL search_path TO @extschema@; To solve this problem, an extension's script file can mark a table or a sequence it has created as a configuration relation, which will - cause pg_dump to include the table's or the sequence's + cause pg_dump to include the table's or the sequence's contents (not its definition) in dumps. To do that, call the function - pg_extension_config_dump(regclass, text) after creating the + pg_extension_config_dump(regclass, text) after creating the table or the sequence, for example CREATE TABLE my_config (key text, value text); @@ -728,30 +785,30 @@ SELECT pg_catalog.pg_extension_config_dump('my_config', ''); SELECT pg_catalog.pg_extension_config_dump('my_config_seq', ''); Any number of tables or sequences can be marked this way. Sequences - associated with serial or bigserial columns can + associated with serial or bigserial columns can be marked as well. - When the second argument of pg_extension_config_dump is + When the second argument of pg_extension_config_dump is an empty string, the entire contents of the table are dumped by - pg_dump. This is usually only correct if the table + pg_dump. This is usually only correct if the table is initially empty as created by the extension script. If there is a mixture of initial data and user-provided data in the table, - the second argument of pg_extension_config_dump provides - a WHERE condition that selects the data to be dumped. + the second argument of pg_extension_config_dump provides + a WHERE condition that selects the data to be dumped. For example, you might do CREATE TABLE my_config (key text, value text, standard_entry boolean); SELECT pg_catalog.pg_extension_config_dump('my_config', 'WHERE NOT standard_entry'); - and then make sure that standard_entry is true only + and then make sure that standard_entry is true only in the rows created by the extension's script. - For sequences, the second argument of pg_extension_config_dump + For sequences, the second argument of pg_extension_config_dump has no effect. @@ -763,10 +820,10 @@ SELECT pg_catalog.pg_extension_config_dump('my_config', 'WHERE NOT standard_entr You can alter the filter condition associated with a configuration table - by calling pg_extension_config_dump again. (This would + by calling pg_extension_config_dump again. (This would typically be useful in an extension update script.) The only way to mark a table as no longer a configuration table is to dissociate it from the - extension with ALTER EXTENSION ... DROP TABLE. + extension with ALTER EXTENSION ... DROP TABLE. @@ -781,7 +838,7 @@ SELECT pg_catalog.pg_extension_config_dump('my_config', 'WHERE NOT standard_entr - Sequences associated with serial or bigserial columns + Sequences associated with serial or bigserial columns need to be directly marked to dump their state. Marking their parent relation is not enough for this purpose. @@ -797,20 +854,20 @@ SELECT pg_catalog.pg_extension_config_dump('my_config', 'WHERE NOT standard_entr each released version of the extension's installation script. In addition, if you want users to be able to update their databases dynamically from one version to the next, you should provide - update scripts that make the necessary changes to go from + update scripts that make the necessary changes to go from one version to the next. Update scripts have names following the pattern - extension--oldversion--newversion.sql - (for example, foo--1.0--1.1.sql contains the commands to modify - version 1.0 of extension foo into version - 1.1). + extension--oldversion--newversion.sql + (for example, foo--1.0--1.1.sql contains the commands to modify + version 1.0 of extension foo into version + 1.1). Given that a suitable update script is available, the command - ALTER EXTENSION UPDATE will update an installed extension + ALTER EXTENSION UPDATE will update an installed extension to the specified new version. The update script is run in the same - environment that CREATE EXTENSION provides for installation - scripts: in particular, search_path is set up in the same + environment that CREATE EXTENSION provides for installation + scripts: in particular, search_path is set up in the same way, and any new objects created by the script are automatically added to the extension. Also, if the script chooses to drop extension member objects, they are automatically dissociated from the extension. @@ -824,56 +881,56 @@ SELECT pg_catalog.pg_extension_config_dump('my_config', 'WHERE NOT standard_entr The update mechanism can be used to solve an important special case: - converting a loose collection of objects into an extension. + converting a loose collection of objects into an extension. Before the extension mechanism was added to PostgreSQL (in 9.1), many people wrote extension modules that simply created assorted unpackaged objects. Given an existing database containing such objects, how can we convert the objects into a properly packaged extension? Dropping them and then - doing a plain CREATE EXTENSION is one way, but it's not + doing a plain CREATE EXTENSION is one way, but it's not desirable if the objects have dependencies (for example, if there are table columns of a data type created by the extension). The way to fix this situation is to create an empty extension, then use ALTER - EXTENSION ADD to attach each pre-existing object to the extension, + EXTENSION ADD to attach each pre-existing object to the extension, then finally create any new objects that are in the current extension version but were not in the unpackaged release. CREATE - EXTENSION supports this case with its FROM old_version option, which causes it to not run the + EXTENSION supports this case with its FROM old_version option, which causes it to not run the normal installation script for the target version, but instead the update script named - extension--old_version--target_version.sql. + extension--old_version--target_version.sql. The choice of the dummy version name to use as old_version is up to the extension author, though - unpackaged is a common convention. If you have multiple + class="parameter">old_version is up to the extension author, though + unpackaged is a common convention. If you have multiple prior versions you need to be able to update into extension style, use multiple dummy version names to identify them. - ALTER EXTENSION is able to execute sequences of update + ALTER EXTENSION is able to execute sequences of update script files to achieve a requested update. For example, if only - foo--1.0--1.1.sql and foo--1.1--2.0.sql are - available, ALTER EXTENSION will apply them in sequence if an - update to version 2.0 is requested when 1.0 is + foo--1.0--1.1.sql and foo--1.1--2.0.sql are + available, ALTER EXTENSION will apply them in sequence if an + update to version 2.0 is requested when 1.0 is currently installed. - PostgreSQL doesn't assume anything about the properties - of version names: for example, it does not know whether 1.1 - follows 1.0. It just matches up the available version names + PostgreSQL doesn't assume anything about the properties + of version names: for example, it does not know whether 1.1 + follows 1.0. It just matches up the available version names and follows the path that requires applying the fewest update scripts. (A version name can actually be any string that doesn't contain - -- or leading or trailing -.) + -- or leading or trailing -.) - Sometimes it is useful to provide downgrade scripts, for - example foo--1.1--1.0.sql to allow reverting the changes - associated with version 1.1. If you do that, be careful + Sometimes it is useful to provide downgrade scripts, for + example foo--1.1--1.0.sql to allow reverting the changes + associated with version 1.1. If you do that, be careful of the possibility that a downgrade script might unexpectedly get applied because it yields a shorter path. The risky case is where - there is a fast path update script that jumps ahead several + there is a fast path update script that jumps ahead several versions as well as a downgrade script to the fast path's start point. It might take fewer steps to apply the downgrade and then the fast path than to move ahead one version at a time. If the downgrade script @@ -883,14 +940,14 @@ SELECT pg_catalog.pg_extension_config_dump('my_config', 'WHERE NOT standard_entr To check for unexpected update paths, use this command: -SELECT * FROM pg_extension_update_paths('extension_name'); +SELECT * FROM pg_extension_update_paths('extension_name'); This shows each pair of distinct known version names for the specified extension, together with the update path sequence that would be taken to - get from the source version to the target version, or NULL if + get from the source version to the target version, or NULL if there is no available update path. The path is shown in textual form - with -- separators. You can use - regexp_split_to_array(path,'--') if you prefer an array + with -- separators. You can use + regexp_split_to_array(path,'--') if you prefer an array format. @@ -901,24 +958,24 @@ SELECT * FROM pg_extension_update_paths('extension_name'); An extension that has been around for awhile will probably exist in several versions, for which the author will need to write update scripts. - For example, if you have released a foo extension in - versions 1.0, 1.1, and 1.2, there - should be update scripts foo--1.0--1.1.sql - and foo--1.1--1.2.sql. - Before PostgreSQL 10, it was necessary to also create - new script files foo--1.1.sql and foo--1.2.sql + For example, if you have released a foo extension in + versions 1.0, 1.1, and 1.2, there + should be update scripts foo--1.0--1.1.sql + and foo--1.1--1.2.sql. + Before PostgreSQL 10, it was necessary to also create + new script files foo--1.1.sql and foo--1.2.sql that directly build the newer extension versions, or else the newer versions could not be installed directly, only by - installing 1.0 and then updating. That was tedious and + installing 1.0 and then updating. That was tedious and duplicative, but now it's unnecessary, because CREATE - EXTENSION can follow update chains automatically. + EXTENSION can follow update chains automatically. For example, if only the script - files foo--1.0.sql, foo--1.0--1.1.sql, - and foo--1.1--1.2.sql are available then a request to - install version 1.2 is honored by running those three + files foo--1.0.sql, foo--1.0--1.1.sql, + and foo--1.1--1.2.sql are available then a request to + install version 1.2 is honored by running those three scripts in sequence. The processing is the same as if you'd first - installed 1.0 and then updated to 1.2. - (As with ALTER EXTENSION UPDATE, if multiple pathways are + installed 1.0 and then updated to 1.2. + (As with ALTER EXTENSION UPDATE, if multiple pathways are available then the shortest is preferred.) Arranging an extension's script files in this style can reduce the amount of maintenance effort needed to produce small updates. @@ -929,10 +986,10 @@ SELECT * FROM pg_extension_update_paths('extension_name'); maintained in this style, keep in mind that each version needs a control file even if it has no stand-alone installation script, as that control file will determine how the implicit update to that version is performed. - For example, if foo--1.0.control specifies requires - = 'bar' but foo's other control files do not, the - extension's dependency on bar will be dropped when updating - from 1.0 to another version. + For example, if foo--1.0.control specifies requires + = 'bar' but foo's other control files do not, the + extension's dependency on bar will be dropped when updating + from 1.0 to another version. @@ -940,14 +997,14 @@ SELECT * FROM pg_extension_update_paths('extension_name'); Extension Example - Here is a complete example of an SQL-only + Here is a complete example of an SQL-only extension, a two-element composite type that can store any type of value - in its slots, which are named k and v. Non-text + in its slots, which are named k and v. Non-text values are automatically coerced to text for storage. - The script file pair--1.0.sql looks like this: + The script file pair--1.0.sql looks like this: extension_name'); CREATE TYPE pair AS ( k text, v text ); -CREATE OR REPLACE FUNCTION pair(anyelement, text) -RETURNS pair LANGUAGE SQL AS 'SELECT ROW($1, $2)::pair'; +CREATE OR REPLACE FUNCTION pair(text, text) +RETURNS pair LANGUAGE SQL AS 'SELECT ROW($1, $2)::@extschema@.pair;'; -CREATE OR REPLACE FUNCTION pair(text, anyelement) -RETURNS pair LANGUAGE SQL AS 'SELECT ROW($1, $2)::pair'; +CREATE OPERATOR ~> (LEFTARG = text, RIGHTARG = text, FUNCTION = pair); -CREATE OR REPLACE FUNCTION pair(anyelement, anyelement) -RETURNS pair LANGUAGE SQL AS 'SELECT ROW($1, $2)::pair'; +-- "SET search_path" is easy to get right, but qualified names perform better. +CREATE OR REPLACE FUNCTION lower(pair) +RETURNS pair LANGUAGE SQL +AS 'SELECT ROW(lower($1.k), lower($1.v))::@extschema@.pair;' +SET search_path = pg_temp; -CREATE OR REPLACE FUNCTION pair(text, text) -RETURNS pair LANGUAGE SQL AS 'SELECT ROW($1, $2)::pair;'; - -CREATE OPERATOR ~> (LEFTARG = text, RIGHTARG = anyelement, PROCEDURE = pair); -CREATE OPERATOR ~> (LEFTARG = anyelement, RIGHTARG = text, PROCEDURE = pair); -CREATE OPERATOR ~> (LEFTARG = anyelement, RIGHTARG = anyelement, PROCEDURE = pair); -CREATE OPERATOR ~> (LEFTARG = text, RIGHTARG = text, PROCEDURE = pair); +CREATE OR REPLACE FUNCTION pair_concat(pair, pair) +RETURNS pair LANGUAGE SQL +AS 'SELECT ROW($1.k OPERATOR(pg_catalog.||) $2.k, + $1.v OPERATOR(pg_catalog.||) $2.v)::@extschema@.pair;'; ]]> - The control file pair.control looks like this: + The control file pair.control looks like this: # pair extension comment = 'A key/value pair data type' default_version = '1.0' -relocatable = true +relocatable = false While you hardly need a makefile to install these two files into the - correct directory, you could use a Makefile containing this: + correct directory, you could use a Makefile containing this: EXTENSION = pair @@ -1000,14 +1056,14 @@ include $(PGXS) This makefile relies on PGXS, which is described - in . The command make install + in . The command make install will install the control and script files into the correct - directory as reported by pg_config. + directory as reported by pg_config. Once the files are installed, use the - command to load the objects into + command to load the objects into any particular database. @@ -1022,16 +1078,16 @@ include $(PGXS) If you are thinking about distributing your - PostgreSQL extension modules, setting up a + PostgreSQL extension modules, setting up a portable build system for them can be fairly difficult. Therefore - the PostgreSQL installation provides a build + the PostgreSQL installation provides a build infrastructure for extensions, called PGXS, so that simple extension modules can be built simply against an already installed server. PGXS is mainly intended for extensions that include C code, although it can be used for pure-SQL extensions too. Note that PGXS is not intended to be a universal build system framework that can be used - to build any software interfacing to PostgreSQL; + to build any software interfacing to PostgreSQL; it simply automates common build rules for simple server extension modules. For more complicated packages, you might need to write your own build system. @@ -1044,13 +1100,15 @@ include $(PGXS) and include the global PGXS makefile. Here is an example that builds an extension module named isbn_issn, consisting of a shared library containing - some C code, an extension control file, a SQL script, and a documentation - text file: + some C code, an extension control file, a SQL script, an include file + (only needed if other modules might need to access the extension functions + without going via SQL), and a documentation text file: MODULES = isbn_issn EXTENSION = isbn_issn DATA = isbn_issn--1.0.sql DOCS = README.isbn_issn +HEADERS_isbn_issn = isbn_issn.h PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) @@ -1115,7 +1173,7 @@ include $(PGXS) MODULEDIR - subdirectory of prefix/share + subdirectory of prefix/share into which DATA and DOCS files should be installed (if not set, default is extension if EXTENSION is set, @@ -1164,6 +1222,48 @@ include $(PGXS) + + HEADERS + HEADERS_built + + + Files to (optionally build and) install under + prefix/include/server/$MODULEDIR/$MODULE_big. + + + Unlike DATA_built, files in HEADERS_built + are not removed by the clean target; if you want them removed, + also add them to EXTRA_CLEAN or add your own rules to do it. + + + + + + HEADERS_$MODULE + HEADERS_built_$MODULE + + + Files to install (after building if specified) under + prefix/include/server/$MODULEDIR/$MODULE, + where $MODULE must be a module name used + in MODULES or MODULE_big. + + + Unlike DATA_built, files in HEADERS_built_$MODULE + are not removed by the clean target; if you want them removed, + also add them to EXTRA_CLEAN or add your own rules to do it. + + + It is legal to use both variables for the same module, or any + combination, unless you have two module names in the + MODULES list that differ only by the presence of a + prefix built_, which would cause ambiguity. In + that (hopefully unlikely) case, you should use only the + HEADERS_built_$MODULE variables. + + + + SCRIPTS @@ -1198,7 +1298,7 @@ include $(PGXS) REGRESS_OPTS - additional switches to pass to pg_regress + additional switches to pass to pg_regress @@ -1252,10 +1352,10 @@ include $(PGXS) PG_CONFIG - path to pg_config program for the + path to pg_config program for the PostgreSQL installation to build against - (typically just pg_config to use the first one in your - PATH) + (typically just pg_config to use the first one in your + PATH) @@ -1270,7 +1370,7 @@ include $(PGXS) compiled and installed for the PostgreSQL installation that corresponds to the first pg_config program - found in your PATH. You can use a different installation by + found in your PATH. You can use a different installation by setting PG_CONFIG to point to its pg_config program, either within the makefile or on the make command line. @@ -1293,7 +1393,7 @@ make -f /path/to/extension/source/tree/Makefile install Alternatively, you can set up a directory for a VPATH build in a similar way to how it is done for the core code. One way to do this is using the - core script config/prep_buildtree. Once this has been done + core script config/prep_buildtree. Once this has been done you can build by setting the make variable VPATH like this: @@ -1304,18 +1404,18 @@ make VPATH=/path/to/extension/source/tree install - The scripts listed in the REGRESS variable are used for + The scripts listed in the REGRESS variable are used for regression testing of your module, which can be invoked by make - installcheck after doing make install. For this to + installcheck after doing make install. For this to work you must have a running PostgreSQL server. - The script files listed in REGRESS must appear in a + The script files listed in REGRESS must appear in a subdirectory named sql/ in your extension's directory. These files must have extension .sql, which must not be included in the REGRESS list in the makefile. For each test there should also be a file containing the expected output in a subdirectory named expected/, with the same stem and extension .out. make installcheck - executes each test script with psql, and compares the + executes each test script with psql, and compares the resulting output to the matching expected file. Any differences will be written to the file regression.diffs in diff -c format. Note that trying to run a test that is missing its diff --git a/doc/src/sgml/external-projects.sgml b/doc/src/sgml/external-projects.sgml index 82eaf4a355..81742d227b 100644 --- a/doc/src/sgml/external-projects.sgml +++ b/doc/src/sgml/external-projects.sgml @@ -40,9 +40,9 @@ All other language interfaces are external projects and are distributed - separately. includes a list of + separately. includes a list of some of these projects. Note that some of these packages might not be - released under the same license as PostgreSQL. For more + released under the same license as PostgreSQL. For more information on each language interface, including licensing terms, refer to its website and documentation. @@ -65,7 +65,7 @@ DBD::Pg Perl Perl DBI driver - + @@ -78,7 +78,7 @@ libpqxx C++ - New-style C++ interface + C++ interface @@ -107,7 +107,7 @@ pgtclng Tcl - + @@ -145,8 +145,8 @@ There are several administration tools available for - PostgreSQL. The most popular is - pgAdmin III, + PostgreSQL. The most popular is + pgAdmin, and there are several commercially available ones as well. @@ -170,9 +170,9 @@ In addition, there are a number of procedural languages that are developed and maintained outside the core PostgreSQL - distribution. lists some of these + distribution. lists some of these packages. Note that some of these projects might not be released under the same - license as PostgreSQL. For more information on each + license as PostgreSQL. For more information on each procedural language, including licensing information, refer to its website and documentation. @@ -233,17 +233,17 @@ - PostgreSQL is designed to be easily extensible. For + PostgreSQL is designed to be easily extensible. For this reason, extensions loaded into the database can function just like features that are built in. The - contrib/ directory shipped with the source code + contrib/ directory shipped with the source code contains several extensions, which are described in - . Other extensions are developed + . Other extensions are developed independently, like PostGIS. Even - PostgreSQL replication solutions can be developed + url="http://postgis.net/">PostGIS. Even + PostgreSQL replication solutions can be developed externally. For example, Slony-I is a popular + url="http://www.slony.info">Slony-I is a popular master/standby replication solution that is developed independently from the core project. diff --git a/doc/src/sgml/fdwhandler.sgml b/doc/src/sgml/fdwhandler.sgml index dbeaab555d..4ce88dd77c 100644 --- a/doc/src/sgml/fdwhandler.sgml +++ b/doc/src/sgml/fdwhandler.sgml @@ -21,8 +21,8 @@ The foreign data wrappers included in the standard distribution are good references when trying to write your own. Look into the - contrib subdirectory of the source tree. - The reference page also has + contrib subdirectory of the source tree. + The reference page also has some useful details. @@ -43,7 +43,7 @@ a validator function. Both functions must be written in a compiled language such as C, using the version-1 interface. For details on C language calling conventions and dynamic loading, - see . + see . @@ -57,7 +57,7 @@ returning the special pseudo-type fdw_handler. The callback functions are plain C functions and are not visible or callable at the SQL level. The callback functions are described in - . + . @@ -70,10 +70,10 @@ representing the type of object the options are associated with (in the form of the OID of the system catalog the object would be stored in, either - ForeignDataWrapperRelationId, - ForeignServerRelationId, - UserMappingRelationId, - or ForeignTableRelationId). + ForeignDataWrapperRelationId, + ForeignServerRelationId, + UserMappingRelationId, + or ForeignTableRelationId). If no validator function is supplied, options are not checked at object creation time or object alteration time. @@ -84,14 +84,14 @@ Foreign Data Wrapper Callback Routines - The FDW handler function returns a palloc'd FdwRoutine + The FDW handler function returns a palloc'd FdwRoutine struct containing pointers to the callback functions described below. The scan-related functions are required, the rest are optional. - The FdwRoutine struct type is declared in - src/include/foreign/fdwapi.h, which see for additional + The FdwRoutine struct type is declared in + src/include/foreign/fdwapi.h, which see for additional details. @@ -101,152 +101,152 @@ void -GetForeignRelSize (PlannerInfo *root, - RelOptInfo *baserel, - Oid foreigntableid); +GetForeignRelSize(PlannerInfo *root, + RelOptInfo *baserel, + Oid foreigntableid); Obtain relation size estimates for a foreign table. This is called at the beginning of planning for a query that scans a foreign table. - root is the planner's global information about the query; - baserel is the planner's information about this table; and - foreigntableid is the pg_class OID of the - foreign table. (foreigntableid could be obtained from the + root is the planner's global information about the query; + baserel is the planner's information about this table; and + foreigntableid is the pg_class OID of the + foreign table. (foreigntableid could be obtained from the planner data structures, but it's passed explicitly to save effort.) - This function should update baserel->rows to be the + This function should update baserel->rows to be the expected number of rows returned by the table scan, after accounting for the filtering done by the restriction quals. The initial value of - baserel->rows is just a constant default estimate, which + baserel->rows is just a constant default estimate, which should be replaced if at all possible. The function may also choose to - update baserel->width if it can compute a better estimate + update baserel->width if it can compute a better estimate of the average result row width. - See for additional information. + See for additional information. void -GetForeignPaths (PlannerInfo *root, - RelOptInfo *baserel, - Oid foreigntableid); +GetForeignPaths(PlannerInfo *root, + RelOptInfo *baserel, + Oid foreigntableid); Create possible access paths for a scan on a foreign table. This is called during query planning. - The parameters are the same as for GetForeignRelSize, + The parameters are the same as for GetForeignRelSize, which has already been called. This function must generate at least one access path - (ForeignPath node) for a scan on the foreign table and - must call add_path to add each such path to - baserel->pathlist. It's recommended to use - create_foreignscan_path to build the - ForeignPath nodes. The function can generate multiple - access paths, e.g., a path which has valid pathkeys to + (ForeignPath node) for a scan on the foreign table and + must call add_path to add each such path to + baserel->pathlist. It's recommended to use + create_foreignscan_path to build the + ForeignPath nodes. The function can generate multiple + access paths, e.g., a path which has valid pathkeys to represent a pre-sorted result. Each access path must contain cost estimates, and can contain any FDW-private information that is needed to identify the specific scan method intended. - See for additional information. + See for additional information. ForeignScan * -GetForeignPlan (PlannerInfo *root, - RelOptInfo *baserel, - Oid foreigntableid, - ForeignPath *best_path, - List *tlist, - List *scan_clauses, - Plan *outer_plan); +GetForeignPlan(PlannerInfo *root, + RelOptInfo *baserel, + Oid foreigntableid, + ForeignPath *best_path, + List *tlist, + List *scan_clauses, + Plan *outer_plan); - Create a ForeignScan plan node from the selected foreign + Create a ForeignScan plan node from the selected foreign access path. This is called at the end of query planning. - The parameters are as for GetForeignRelSize, plus - the selected ForeignPath (previously produced by - GetForeignPaths, GetForeignJoinPaths, - or GetForeignUpperPaths), + The parameters are as for GetForeignRelSize, plus + the selected ForeignPath (previously produced by + GetForeignPaths, GetForeignJoinPaths, + or GetForeignUpperPaths), the target list to be emitted by the plan node, the restriction clauses to be enforced by the plan node, - and the outer subplan of the ForeignScan, - which is used for rechecks performed by RecheckForeignScan. + and the outer subplan of the ForeignScan, + which is used for rechecks performed by RecheckForeignScan. (If the path is for a join rather than a base - relation, foreigntableid is InvalidOid.) + relation, foreigntableid is InvalidOid.) - This function must create and return a ForeignScan plan - node; it's recommended to use make_foreignscan to build the - ForeignScan node. + This function must create and return a ForeignScan plan + node; it's recommended to use make_foreignscan to build the + ForeignScan node. - See for additional information. + See for additional information. void -BeginForeignScan (ForeignScanState *node, - int eflags); +BeginForeignScan(ForeignScanState *node, + int eflags); Begin executing a foreign scan. This is called during executor startup. It should perform any initialization needed before the scan can start, but not start executing the actual scan (that should be done upon the - first call to IterateForeignScan). - The ForeignScanState node has already been created, but - its fdw_state field is still NULL. Information about + first call to IterateForeignScan). + The ForeignScanState node has already been created, but + its fdw_state field is still NULL. Information about the table to scan is accessible through the - ForeignScanState node (in particular, from the underlying - ForeignScan plan node, which contains any FDW-private - information provided by GetForeignPlan). - eflags contains flag bits describing the executor's + ForeignScanState node (in particular, from the underlying + ForeignScan plan node, which contains any FDW-private + information provided by GetForeignPlan). + eflags contains flag bits describing the executor's operating mode for this plan node. - Note that when (eflags & EXEC_FLAG_EXPLAIN_ONLY) is + Note that when (eflags & EXEC_FLAG_EXPLAIN_ONLY) is true, this function should not perform any externally-visible actions; it should only do the minimum required to make the node state valid - for ExplainForeignScan and EndForeignScan. + for ExplainForeignScan and EndForeignScan. TupleTableSlot * -IterateForeignScan (ForeignScanState *node); +IterateForeignScan(ForeignScanState *node); Fetch one row from the foreign source, returning it in a tuple table slot - (the node's ScanTupleSlot should be used for this + (the node's ScanTupleSlot should be used for this purpose). Return NULL if no more rows are available. The tuple table slot infrastructure allows either a physical or virtual tuple to be returned; in most cases the latter choice is preferable from a performance standpoint. Note that this is called in a short-lived memory context that will be reset between invocations. Create a memory context - in BeginForeignScan if you need longer-lived storage, or use - the es_query_cxt of the node's EState. + in BeginForeignScan if you need longer-lived storage, or use + the es_query_cxt of the node's EState. - The rows returned must match the fdw_scan_tlist target + The rows returned must match the fdw_scan_tlist target list if one was supplied, otherwise they must match the row type of the foreign table being scanned. If you choose to optimize away fetching columns that are not needed, you should insert nulls in those column - positions, or else generate a fdw_scan_tlist list with + positions, or else generate a fdw_scan_tlist list with those columns omitted. @@ -264,7 +264,7 @@ IterateForeignScan (ForeignScanState *node); void -ReScanForeignScan (ForeignScanState *node); +ReScanForeignScan(ForeignScanState *node); Restart the scan from the beginning. Note that any parameters the @@ -275,7 +275,7 @@ ReScanForeignScan (ForeignScanState *node); void -EndForeignScan (ForeignScanState *node); +EndForeignScan(ForeignScanState *node); End the scan and release resources. It is normally not important @@ -297,21 +297,21 @@ EndForeignScan (ForeignScanState *node); void -GetForeignJoinPaths (PlannerInfo *root, - RelOptInfo *joinrel, - RelOptInfo *outerrel, - RelOptInfo *innerrel, - JoinType jointype, - JoinPathExtraData *extra); +GetForeignJoinPaths(PlannerInfo *root, + RelOptInfo *joinrel, + RelOptInfo *outerrel, + RelOptInfo *innerrel, + JoinType jointype, + JoinPathExtraData *extra); Create possible access paths for a join of two (or more) foreign tables that all belong to the same foreign server. This optional function is called during query planning. As - with GetForeignPaths, this function should - generate ForeignPath path(s) for the - supplied joinrel, and call add_path to add these + with GetForeignPaths, this function should + generate ForeignPath path(s) for the + supplied joinrel, and call add_path to add these paths to the set of paths considered for the join. But unlike - GetForeignPaths, it is not necessary that this function + GetForeignPaths, it is not necessary that this function succeed in creating at least one path, since paths involving local joining are always possible. @@ -323,25 +323,25 @@ GetForeignJoinPaths (PlannerInfo *root, - If a ForeignPath path is chosen for the join, it will + If a ForeignPath path is chosen for the join, it will represent the entire join process; paths generated for the component tables and subsidiary joins will not be used. Subsequent processing of the join path proceeds much as it does for a path scanning a single - foreign table. One difference is that the scanrelid of - the resulting ForeignScan plan node should be set to zero, + foreign table. One difference is that the scanrelid of + the resulting ForeignScan plan node should be set to zero, since there is no single relation that it represents; instead, - the fs_relids field of the ForeignScan + the fs_relids field of the ForeignScan node represents the set of relations that were joined. (The latter field is set up automatically by the core planner code, and need not be filled by the FDW.) Another difference is that, because the column list for a remote join cannot be found from the system catalogs, the FDW must - fill fdw_scan_tlist with an appropriate list - of TargetEntry nodes, representing the set of columns + fill fdw_scan_tlist with an appropriate list + of TargetEntry nodes, representing the set of columns it will supply at run time in the tuples it returns. - See for additional information. + See for additional information. @@ -356,39 +356,44 @@ GetForeignJoinPaths (PlannerInfo *root, void -GetForeignUpperPaths (PlannerInfo *root, - UpperRelationKind stage, - RelOptInfo *input_rel, - RelOptInfo *output_rel); +GetForeignUpperPaths(PlannerInfo *root, + UpperRelationKind stage, + RelOptInfo *input_rel, + RelOptInfo *output_rel, + void *extra); - Create possible access paths for upper relation processing, + Create possible access paths for upper relation processing, which is the planner's term for all post-scan/join query processing, such as aggregation, window functions, sorting, and table updates. This optional function is called during query planning. Currently, it is called only if all base relation(s) involved in the query belong to the - same FDW. This function should generate ForeignPath + same FDW. This function should generate ForeignPath path(s) for any post-scan/join processing that the FDW knows how to - perform remotely, and call add_path to add these paths to - the indicated upper relation. As with GetForeignJoinPaths, + perform remotely, and call add_path to add these paths to + the indicated upper relation. As with GetForeignJoinPaths, it is not necessary that this function succeed in creating any paths, since paths involving local processing are always possible. - The stage parameter identifies which post-scan/join step is - currently being considered. output_rel is the upper relation + The stage parameter identifies which post-scan/join step is + currently being considered. output_rel is the upper relation that should receive paths representing computation of this step, - and input_rel is the relation representing the input to this - step. (Note that ForeignPath paths added - to output_rel would typically not have any direct dependency - on paths of the input_rel, since their processing is expected + and input_rel is the relation representing the input to this + step. The extra parameter provides additional details, + currently, it is set only for UPPERREL_PARTIAL_GROUP_AGG + or UPPERREL_GROUP_AGG, in which case it points to a + GroupPathExtraData structure. + (Note that ForeignPath paths added + to output_rel would typically not have any direct dependency + on paths of the input_rel, since their processing is expected to be done externally. However, examining paths previously generated for the previous processing step can be useful to avoid redundant planning work.) - See for additional information. + See for additional information. @@ -404,155 +409,158 @@ GetForeignUpperPaths (PlannerInfo *root, void -AddForeignUpdateTargets (Query *parsetree, - RangeTblEntry *target_rte, - Relation target_relation); +AddForeignUpdateTargets(Query *parsetree, + RangeTblEntry *target_rte, + Relation target_relation); - UPDATE and DELETE operations are performed + UPDATE and DELETE operations are performed against rows previously fetched by the table-scanning functions. The FDW may need extra information, such as a row ID or the values of primary-key columns, to ensure that it can identify the exact row to update or delete. To support that, this function can add extra hidden, - or junk, target columns to the list of columns that are to be - retrieved from the foreign table during an UPDATE or - DELETE. + or junk, target columns to the list of columns that are to be + retrieved from the foreign table during an UPDATE or + DELETE. - To do that, add TargetEntry items to - parsetree->targetList, containing expressions for the + To do that, add TargetEntry items to + parsetree->targetList, containing expressions for the extra values to be fetched. Each such entry must be marked - resjunk = true, and must have a distinct - resname that will identify it at execution time. - Avoid using names matching ctidN, + resjunk = true, and must have a distinct + resname that will identify it at execution time. + Avoid using names matching ctidN, wholerow, or - wholerowN, as the core system can + wholerowN, as the core system can generate junk columns of these names. + If the extra expressions are more complex than simple Vars, they + must be run through eval_const_expressions + before adding them to the targetlist. - This function is called in the rewriter, not the planner, so the - information available is a bit different from that available to the + Although this function is called during planning, the + information provided is a bit different from that available to other planning routines. - parsetree is the parse tree for the UPDATE or - DELETE command, while target_rte and - target_relation describe the target foreign table. + parsetree is the parse tree for the UPDATE or + DELETE command, while target_rte and + target_relation describe the target foreign table. - If the AddForeignUpdateTargets pointer is set to - NULL, no extra target expressions are added. - (This will make it impossible to implement DELETE - operations, though UPDATE may still be feasible if the FDW + If the AddForeignUpdateTargets pointer is set to + NULL, no extra target expressions are added. + (This will make it impossible to implement DELETE + operations, though UPDATE may still be feasible if the FDW relies on an unchanging primary key to identify rows.) List * -PlanForeignModify (PlannerInfo *root, - ModifyTable *plan, - Index resultRelation, - int subplan_index); +PlanForeignModify(PlannerInfo *root, + ModifyTable *plan, + Index resultRelation, + int subplan_index); Perform any additional planning actions needed for an insert, update, or delete on a foreign table. This function generates the FDW-private - information that will be attached to the ModifyTable plan + information that will be attached to the ModifyTable plan node that performs the update action. This private information must - have the form of a List, and will be delivered to - BeginForeignModify during the execution stage. + have the form of a List, and will be delivered to + BeginForeignModify during the execution stage. - root is the planner's global information about the query. - plan is the ModifyTable plan node, which is - complete except for the fdwPrivLists field. - resultRelation identifies the target foreign table by its - range table index. subplan_index identifies which target of - the ModifyTable plan node this is, counting from zero; - use this if you want to index into plan->plans or other - substructure of the plan node. + root is the planner's global information about the query. + plan is the ModifyTable plan node, which is + complete except for the fdwPrivLists field. + resultRelation identifies the target foreign table by its + range table index. subplan_index identifies which target of + the ModifyTable plan node this is, counting from zero; + use this if you want to index into plan->plans or other + substructure of the plan node. - See for additional information. + See for additional information. - If the PlanForeignModify pointer is set to - NULL, no additional plan-time actions are taken, and the - fdw_private list delivered to - BeginForeignModify will be NIL. + If the PlanForeignModify pointer is set to + NULL, no additional plan-time actions are taken, and the + fdw_private list delivered to + BeginForeignModify will be NIL. void -BeginForeignModify (ModifyTableState *mtstate, - ResultRelInfo *rinfo, - List *fdw_private, - int subplan_index, - int eflags); +BeginForeignModify(ModifyTableState *mtstate, + ResultRelInfo *rinfo, + List *fdw_private, + int subplan_index, + int eflags); Begin executing a foreign table modification operation. This routine is called during executor startup. It should perform any initialization needed prior to the actual table modifications. Subsequently, - ExecForeignInsert, ExecForeignUpdate or - ExecForeignDelete will be called for each tuple to be + ExecForeignInsert, ExecForeignUpdate or + ExecForeignDelete will be called for each tuple to be inserted, updated, or deleted. - mtstate is the overall state of the - ModifyTable plan node being executed; global data about + mtstate is the overall state of the + ModifyTable plan node being executed; global data about the plan and execution state is available via this structure. - rinfo is the ResultRelInfo struct describing - the target foreign table. (The ri_FdwState field of - ResultRelInfo is available for the FDW to store any + rinfo is the ResultRelInfo struct describing + the target foreign table. (The ri_FdwState field of + ResultRelInfo is available for the FDW to store any private state it needs for this operation.) - fdw_private contains the private data generated by - PlanForeignModify, if any. - subplan_index identifies which target of - the ModifyTable plan node this is. - eflags contains flag bits describing the executor's + fdw_private contains the private data generated by + PlanForeignModify, if any. + subplan_index identifies which target of + the ModifyTable plan node this is. + eflags contains flag bits describing the executor's operating mode for this plan node. - Note that when (eflags & EXEC_FLAG_EXPLAIN_ONLY) is + Note that when (eflags & EXEC_FLAG_EXPLAIN_ONLY) is true, this function should not perform any externally-visible actions; it should only do the minimum required to make the node state valid - for ExplainForeignModify and EndForeignModify. + for ExplainForeignModify and EndForeignModify. - If the BeginForeignModify pointer is set to - NULL, no action is taken during executor startup. + If the BeginForeignModify pointer is set to + NULL, no action is taken during executor startup. TupleTableSlot * -ExecForeignInsert (EState *estate, - ResultRelInfo *rinfo, - TupleTableSlot *slot, - TupleTableSlot *planSlot); +ExecForeignInsert(EState *estate, + ResultRelInfo *rinfo, + TupleTableSlot *slot, + TupleTableSlot *planSlot); Insert one tuple into the foreign table. - estate is global execution state for the query. - rinfo is the ResultRelInfo struct describing + estate is global execution state for the query. + rinfo is the ResultRelInfo struct describing the target foreign table. - slot contains the tuple to be inserted; it will match the + slot contains the tuple to be inserted; it will match the row-type definition of the foreign table. - planSlot contains the tuple that was generated by the - ModifyTable plan node's subplan; it differs from - slot in possibly containing additional junk - columns. (The planSlot is typically of little interest - for INSERT cases, but is provided for completeness.) + planSlot contains the tuple that was generated by the + ModifyTable plan node's subplan; it differs from + slot in possibly containing additional junk + columns. (The planSlot is typically of little interest + for INSERT cases, but is provided for completeness.) @@ -560,45 +568,47 @@ ExecForeignInsert (EState *estate, inserted (this might differ from the data supplied, for example as a result of trigger actions), or NULL if no row was actually inserted (again, typically as a result of triggers). The passed-in - slot can be re-used for this purpose. + slot can be re-used for this purpose. - The data in the returned slot is used only if the INSERT - query has a RETURNING clause or the foreign table has - an AFTER ROW trigger. Triggers require all columns, but the - FDW could choose to optimize away returning some or all columns depending - on the contents of the RETURNING clause. Regardless, some - slot must be returned to indicate success, or the query's reported row - count will be wrong. + The data in the returned slot is used only if the INSERT + statement has a RETURNING clause or involves a view + WITH CHECK OPTION; or if the foreign table has + an AFTER ROW trigger. Triggers require all columns, + but the FDW could choose to optimize away returning some or all columns + depending on the contents of the RETURNING clause or + WITH CHECK OPTION constraints. Regardless, some slot + must be returned to indicate success, or the query's reported row count + will be wrong. - If the ExecForeignInsert pointer is set to - NULL, attempts to insert into the foreign table will fail + If the ExecForeignInsert pointer is set to + NULL, attempts to insert into the foreign table will fail with an error message. TupleTableSlot * -ExecForeignUpdate (EState *estate, - ResultRelInfo *rinfo, - TupleTableSlot *slot, - TupleTableSlot *planSlot); +ExecForeignUpdate(EState *estate, + ResultRelInfo *rinfo, + TupleTableSlot *slot, + TupleTableSlot *planSlot); Update one tuple in the foreign table. - estate is global execution state for the query. - rinfo is the ResultRelInfo struct describing + estate is global execution state for the query. + rinfo is the ResultRelInfo struct describing the target foreign table. - slot contains the new data for the tuple; it will match the + slot contains the new data for the tuple; it will match the row-type definition of the foreign table. - planSlot contains the tuple that was generated by the - ModifyTable plan node's subplan; it differs from - slot in possibly containing additional junk + planSlot contains the tuple that was generated by the + ModifyTable plan node's subplan; it differs from + slot in possibly containing additional junk columns. In particular, any junk columns that were requested by - AddForeignUpdateTargets will be available from this slot. + AddForeignUpdateTargets will be available from this slot. @@ -606,74 +616,76 @@ ExecForeignUpdate (EState *estate, updated (this might differ from the data supplied, for example as a result of trigger actions), or NULL if no row was actually updated (again, typically as a result of triggers). The passed-in - slot can be re-used for this purpose. + slot can be re-used for this purpose. - The data in the returned slot is used only if the UPDATE - query has a RETURNING clause or the foreign table has - an AFTER ROW trigger. Triggers require all columns, but the - FDW could choose to optimize away returning some or all columns depending - on the contents of the RETURNING clause. Regardless, some - slot must be returned to indicate success, or the query's reported row - count will be wrong. + The data in the returned slot is used only if the UPDATE + statement has a RETURNING clause or involves a view + WITH CHECK OPTION; or if the foreign table has + an AFTER ROW trigger. Triggers require all columns, + but the FDW could choose to optimize away returning some or all columns + depending on the contents of the RETURNING clause or + WITH CHECK OPTION constraints. Regardless, some slot + must be returned to indicate success, or the query's reported row count + will be wrong. - If the ExecForeignUpdate pointer is set to - NULL, attempts to update the foreign table will fail + If the ExecForeignUpdate pointer is set to + NULL, attempts to update the foreign table will fail with an error message. TupleTableSlot * -ExecForeignDelete (EState *estate, - ResultRelInfo *rinfo, - TupleTableSlot *slot, - TupleTableSlot *planSlot); +ExecForeignDelete(EState *estate, + ResultRelInfo *rinfo, + TupleTableSlot *slot, + TupleTableSlot *planSlot); Delete one tuple from the foreign table. - estate is global execution state for the query. - rinfo is the ResultRelInfo struct describing + estate is global execution state for the query. + rinfo is the ResultRelInfo struct describing the target foreign table. - slot contains nothing useful upon call, but can be used to + slot contains nothing useful upon call, but can be used to hold the returned tuple. - planSlot contains the tuple that was generated by the - ModifyTable plan node's subplan; in particular, it will + planSlot contains the tuple that was generated by the + ModifyTable plan node's subplan; in particular, it will carry any junk columns that were requested by - AddForeignUpdateTargets. The junk column(s) must be used + AddForeignUpdateTargets. The junk column(s) must be used to identify the tuple to be deleted. The return value is either a slot containing the row that was deleted, or NULL if no row was deleted (typically as a result of triggers). The - passed-in slot can be used to hold the tuple to be returned. + passed-in slot can be used to hold the tuple to be returned. - The data in the returned slot is used only if the DELETE - query has a RETURNING clause or the foreign table has - an AFTER ROW trigger. Triggers require all columns, but the + The data in the returned slot is used only if the DELETE + query has a RETURNING clause or the foreign table has + an AFTER ROW trigger. Triggers require all columns, but the FDW could choose to optimize away returning some or all columns depending - on the contents of the RETURNING clause. Regardless, some + on the contents of the RETURNING clause. Regardless, some slot must be returned to indicate success, or the query's reported row count will be wrong. - If the ExecForeignDelete pointer is set to - NULL, attempts to delete from the foreign table will fail + If the ExecForeignDelete pointer is set to + NULL, attempts to delete from the foreign table will fail with an error message. void -EndForeignModify (EState *estate, - ResultRelInfo *rinfo); +EndForeignModify(EState *estate, + ResultRelInfo *rinfo); End the table update and release resources. It is normally not important @@ -682,35 +694,101 @@ EndForeignModify (EState *estate, - If the EndForeignModify pointer is set to - NULL, no action is taken during executor shutdown. + If the EndForeignModify pointer is set to + NULL, no action is taken during executor shutdown. + + + + Tuples inserted into a partitioned table by INSERT or + COPY FROM are routed to partitions. If an FDW + supports routable foreign-table partitions, it should also provide the + following callback functions. These functions are also called when + COPY FROM is executed on a foreign table. + + + + +void +BeginForeignInsert(ModifyTableState *mtstate, + ResultRelInfo *rinfo); + + + Begin executing an insert operation on a foreign table. This routine is + called right before the first tuple is inserted into the foreign table + in both cases when it is the partition chosen for tuple routing and the + target specified in a COPY FROM command. It should + perform any initialization needed prior to the actual insertion. + Subsequently, ExecForeignInsert will be called for + each tuple to be inserted into the foreign table. + + + + mtstate is the overall state of the + ModifyTable plan node being executed; global data about + the plan and execution state is available via this structure. + rinfo is the ResultRelInfo struct describing + the target foreign table. (The ri_FdwState field of + ResultRelInfo is available for the FDW to store any + private state it needs for this operation.) + + + + When this is called by a COPY FROM command, the + plan-related global data in mtstate is not provided + and the planSlot parameter of + ExecForeignInsert subsequently called for each + inserted tuple is NULL, whether the foreign table is + the partition chosen for tuple routing or the target specified in the + command. + + + + If the BeginForeignInsert pointer is set to + NULL, no action is taken for the initialization. + + + + +void +EndForeignInsert(EState *estate, + ResultRelInfo *rinfo); + + + End the insert operation and release resources. It is normally not important + to release palloc'd memory, but for example open files and connections + to remote servers should be cleaned up. + + + + If the EndForeignInsert pointer is set to + NULL, no action is taken for the termination. int -IsForeignRelUpdatable (Relation rel); +IsForeignRelUpdatable(Relation rel); Report which update operations the specified foreign table supports. The return value should be a bit mask of rule event numbers indicating which operations are supported by the foreign table, using the - CmdType enumeration; that is, - (1 << CMD_UPDATE) = 4 for UPDATE, - (1 << CMD_INSERT) = 8 for INSERT, and - (1 << CMD_DELETE) = 16 for DELETE. + CmdType enumeration; that is, + (1 << CMD_UPDATE) = 4 for UPDATE, + (1 << CMD_INSERT) = 8 for INSERT, and + (1 << CMD_DELETE) = 16 for DELETE. - If the IsForeignRelUpdatable pointer is set to - NULL, foreign tables are assumed to be insertable, updatable, - or deletable if the FDW provides ExecForeignInsert, - ExecForeignUpdate, or ExecForeignDelete + If the IsForeignRelUpdatable pointer is set to + NULL, foreign tables are assumed to be insertable, updatable, + or deletable if the FDW provides ExecForeignInsert, + ExecForeignUpdate, or ExecForeignDelete respectively. This function is only needed if the FDW supports some tables that are updatable and some that are not. (Even then, it's permissible to throw an error in the execution routine instead of checking in this function. However, this function is used to determine - updatability for display in the information_schema views.) + updatability for display in the information_schema views.) @@ -729,129 +807,129 @@ IsForeignRelUpdatable (Relation rel); bool -PlanDirectModify (PlannerInfo *root, - ModifyTable *plan, - Index resultRelation, - int subplan_index); +PlanDirectModify(PlannerInfo *root, + ModifyTable *plan, + Index resultRelation, + int subplan_index); Decide whether it is safe to execute a direct modification - on the remote server. If so, return true after performing - planning actions needed for that. Otherwise, return false. + on the remote server. If so, return true after performing + planning actions needed for that. Otherwise, return false. This optional function is called during query planning. - If this function succeeds, BeginDirectModify, - IterateDirectModify and EndDirectModify will + If this function succeeds, BeginDirectModify, + IterateDirectModify and EndDirectModify will be called at the execution stage, instead. Otherwise, the table modification will be executed using the table-updating functions described above. - The parameters are the same as for PlanForeignModify. + The parameters are the same as for PlanForeignModify. To execute the direct modification on the remote server, this function - must rewrite the target subplan with a ForeignScan plan + must rewrite the target subplan with a ForeignScan plan node that executes the direct modification on the remote server. The - operation field of the ForeignScan must - be set to the CmdType enumeration appropriately; that is, - CMD_UPDATE for UPDATE, - CMD_INSERT for INSERT, and - CMD_DELETE for DELETE. + operation field of the ForeignScan must + be set to the CmdType enumeration appropriately; that is, + CMD_UPDATE for UPDATE, + CMD_INSERT for INSERT, and + CMD_DELETE for DELETE. - See for additional information. + See for additional information. - If the PlanDirectModify pointer is set to - NULL, no attempts to execute a direct modification on the + If the PlanDirectModify pointer is set to + NULL, no attempts to execute a direct modification on the remote server are taken. void -BeginDirectModify (ForeignScanState *node, - int eflags); +BeginDirectModify(ForeignScanState *node, + int eflags); Prepare to execute a direct modification on the remote server. This is called during executor startup. It should perform any initialization needed prior to the direct modification (that should be - done upon the first call to IterateDirectModify). - The ForeignScanState node has already been created, but - its fdw_state field is still NULL. Information about + done upon the first call to IterateDirectModify). + The ForeignScanState node has already been created, but + its fdw_state field is still NULL. Information about the table to modify is accessible through the - ForeignScanState node (in particular, from the underlying - ForeignScan plan node, which contains any FDW-private - information provided by PlanDirectModify). - eflags contains flag bits describing the executor's + ForeignScanState node (in particular, from the underlying + ForeignScan plan node, which contains any FDW-private + information provided by PlanDirectModify). + eflags contains flag bits describing the executor's operating mode for this plan node. - Note that when (eflags & EXEC_FLAG_EXPLAIN_ONLY) is + Note that when (eflags & EXEC_FLAG_EXPLAIN_ONLY) is true, this function should not perform any externally-visible actions; it should only do the minimum required to make the node state valid - for ExplainDirectModify and EndDirectModify. + for ExplainDirectModify and EndDirectModify. - If the BeginDirectModify pointer is set to - NULL, no attempts to execute a direct modification on the + If the BeginDirectModify pointer is set to + NULL, no attempts to execute a direct modification on the remote server are taken. TupleTableSlot * -IterateDirectModify (ForeignScanState *node); +IterateDirectModify(ForeignScanState *node); - When the INSERT, UPDATE or DELETE - query doesn't have a RETURNING clause, just return NULL + When the INSERT, UPDATE or DELETE + query doesn't have a RETURNING clause, just return NULL after a direct modification on the remote server. When the query has the clause, fetch one result containing the data - needed for the RETURNING calculation, returning it in a - tuple table slot (the node's ScanTupleSlot should be + needed for the RETURNING calculation, returning it in a + tuple table slot (the node's ScanTupleSlot should be used for this purpose). The data that was actually inserted, updated or deleted must be stored in the - es_result_relation_info->ri_projectReturning->pi_exprContext->ecxt_scantuple - of the node's EState. + es_result_relation_info->ri_projectReturning->pi_exprContext->ecxt_scantuple + of the node's EState. Return NULL if no more rows are available. Note that this is called in a short-lived memory context that will be reset between invocations. Create a memory context in - BeginDirectModify if you need longer-lived storage, or use - the es_query_cxt of the node's EState. + BeginDirectModify if you need longer-lived storage, or use + the es_query_cxt of the node's EState. - The rows returned must match the fdw_scan_tlist target + The rows returned must match the fdw_scan_tlist target list if one was supplied, otherwise they must match the row type of the foreign table being updated. If you choose to optimize away fetching - columns that are not needed for the RETURNING calculation, + columns that are not needed for the RETURNING calculation, you should insert nulls in those column positions, or else generate a - fdw_scan_tlist list with those columns omitted. + fdw_scan_tlist list with those columns omitted. Whether the query has the clause or not, the query's reported row count must be incremented by the FDW itself. When the query doesn't have the clause, the FDW must also increment the row count for the - ForeignScanState node in the EXPLAIN ANALYZE + ForeignScanState node in the EXPLAIN ANALYZE case. - If the IterateDirectModify pointer is set to - NULL, no attempts to execute a direct modification on the + If the IterateDirectModify pointer is set to + NULL, no attempts to execute a direct modification on the remote server are taken. void -EndDirectModify (ForeignScanState *node); +EndDirectModify(ForeignScanState *node); Clean up following a direct modification on the remote server. It is @@ -860,8 +938,8 @@ EndDirectModify (ForeignScanState *node); - If the EndDirectModify pointer is set to - NULL, no attempts to execute a direct modification on the + If the EndDirectModify pointer is set to + NULL, no attempts to execute a direct modification on the remote server are taken. @@ -871,117 +949,118 @@ EndDirectModify (ForeignScanState *node); FDW Routines For Row Locking - If an FDW wishes to support late row locking (as described - in ), it must provide the following + If an FDW wishes to support late row locking (as described + in ), it must provide the following callback functions: RowMarkType -GetForeignRowMarkType (RangeTblEntry *rte, - LockClauseStrength strength); +GetForeignRowMarkType(RangeTblEntry *rte, + LockClauseStrength strength); Report which row-marking option to use for a foreign table. - rte is the RangeTblEntry node for the table - and strength describes the lock strength requested by the - relevant FOR UPDATE/SHARE clause, if any. The result must be - a member of the RowMarkType enum type. + rte is the RangeTblEntry node for the table + and strength describes the lock strength requested by the + relevant FOR UPDATE/SHARE clause, if any. The result must be + a member of the RowMarkType enum type. This function is called during query planning for each foreign table that - appears in an UPDATE, DELETE, or SELECT - FOR UPDATE/SHARE query and is not the target of UPDATE - or DELETE. + appears in an UPDATE, DELETE, or SELECT + FOR UPDATE/SHARE query and is not the target of UPDATE + or DELETE. - If the GetForeignRowMarkType pointer is set to - NULL, the ROW_MARK_COPY option is always used. - (This implies that RefetchForeignRow will never be called, + If the GetForeignRowMarkType pointer is set to + NULL, the ROW_MARK_COPY option is always used. + (This implies that RefetchForeignRow will never be called, so it need not be provided either.) - See for more information. + See for more information. HeapTuple -RefetchForeignRow (EState *estate, - ExecRowMark *erm, - Datum rowid, - bool *updated); +RefetchForeignRow(EState *estate, + ExecRowMark *erm, + Datum rowid, + bool *updated); Re-fetch one tuple from the foreign table, after locking it if required. - estate is global execution state for the query. - erm is the ExecRowMark struct describing + estate is global execution state for the query. + erm is the ExecRowMark struct describing the target foreign table and the row lock type (if any) to acquire. - rowid identifies the tuple to be fetched. - updated is an output parameter. + rowid identifies the tuple to be fetched. + updated is an output parameter. This function should return a palloc'ed copy of the fetched tuple, - or NULL if the row lock couldn't be obtained. The row lock - type to acquire is defined by erm->markType, which is the - value previously returned by GetForeignRowMarkType. - (ROW_MARK_REFERENCE means to just re-fetch the tuple without - acquiring any lock, and ROW_MARK_COPY will never be seen by + or NULL if the row lock couldn't be obtained. The row lock + type to acquire is defined by erm->markType, which is the + value previously returned by GetForeignRowMarkType. + (ROW_MARK_REFERENCE means to just re-fetch the tuple without + acquiring any lock, and ROW_MARK_COPY will never be seen by this routine.) - In addition, *updated should be set to true + In addition, *updated should be set to true if what was fetched was an updated version of the tuple rather than the same version previously obtained. (If the FDW cannot be sure about - this, always returning true is recommended.) + this, always returning true is recommended.) Note that by default, failure to acquire a row lock should result in - raising an error; a NULL return is only appropriate if - the SKIP LOCKED option is specified - by erm->waitPolicy. + raising an error; a NULL return is only appropriate if + the SKIP LOCKED option is specified + by erm->waitPolicy. - The rowid is the ctid value previously read - for the row to be re-fetched. Although the rowid value is - passed as a Datum, it can currently only be a tid. The + The rowid is the ctid value previously read + for the row to be re-fetched. Although the rowid value is + passed as a Datum, it can currently only be a tid. The function API is chosen in hopes that it may be possible to allow other data types for row IDs in future. - If the RefetchForeignRow pointer is set to - NULL, attempts to re-fetch rows will fail + If the RefetchForeignRow pointer is set to + NULL, attempts to re-fetch rows will fail with an error message. - See for more information. + See for more information. bool -RecheckForeignScan (ForeignScanState *node, TupleTableSlot *slot); +RecheckForeignScan(ForeignScanState *node, + TupleTableSlot *slot); Recheck that a previously-returned tuple still matches the relevant scan and join qualifiers, and possibly provide a modified version of the tuple. For foreign data wrappers which do not perform join pushdown, - it will typically be more convenient to set this to NULL and + it will typically be more convenient to set this to NULL and instead set fdw_recheck_quals appropriately. When outer joins are pushed down, however, it isn't sufficient to reapply the checks relevant to all the base tables to the result tuple, even if all needed attributes are present, because failure to match some qualifier might result in some attributes going to NULL, rather than in - no tuple being returned. RecheckForeignScan can recheck + no tuple being returned. RecheckForeignScan can recheck qualifiers and return true if they are still satisfied and false otherwise, but it can also store a replacement tuple into the supplied slot. @@ -991,13 +1070,13 @@ RecheckForeignScan (ForeignScanState *node, TupleTableSlot *slot); To implement join pushdown, a foreign data wrapper will typically construct an alternative local join plan which is used only for rechecks; this will become the outer subplan of the - ForeignScan. When a recheck is required, this subplan + ForeignScan. When a recheck is required, this subplan can be executed and the resulting tuple can be stored in the slot. This plan need not be efficient since no base table will return more than one row; for example, it may implement all joins as nested loops. - The function GetExistingLocalJoinPath may be used to search + The function GetExistingLocalJoinPath may be used to search existing paths for a suitable local join path, which can be used as the - alternative local join plan. GetExistingLocalJoinPath + alternative local join plan. GetExistingLocalJoinPath searches for an unparameterized path in the path list of the specified join relation. (If it does not find such a path, it returns NULL, in which case a foreign data wrapper may build the local path by itself or @@ -1006,182 +1085,184 @@ RecheckForeignScan (ForeignScanState *node, TupleTableSlot *slot); - FDW Routines for <command>EXPLAIN</> + FDW Routines for <command>EXPLAIN</command> void -ExplainForeignScan (ForeignScanState *node, - ExplainState *es); +ExplainForeignScan(ForeignScanState *node, + ExplainState *es); - Print additional EXPLAIN output for a foreign table scan. - This function can call ExplainPropertyText and - related functions to add fields to the EXPLAIN output. - The flag fields in es can be used to determine what to - print, and the state of the ForeignScanState node + Print additional EXPLAIN output for a foreign table scan. + This function can call ExplainPropertyText and + related functions to add fields to the EXPLAIN output. + The flag fields in es can be used to determine what to + print, and the state of the ForeignScanState node can be inspected to provide run-time statistics in the EXPLAIN - ANALYZE case. + ANALYZE case. - If the ExplainForeignScan pointer is set to - NULL, no additional information is printed during - EXPLAIN. + If the ExplainForeignScan pointer is set to + NULL, no additional information is printed during + EXPLAIN. void -ExplainForeignModify (ModifyTableState *mtstate, - ResultRelInfo *rinfo, - List *fdw_private, - int subplan_index, - struct ExplainState *es); +ExplainForeignModify(ModifyTableState *mtstate, + ResultRelInfo *rinfo, + List *fdw_private, + int subplan_index, + struct ExplainState *es); - Print additional EXPLAIN output for a foreign table update. - This function can call ExplainPropertyText and - related functions to add fields to the EXPLAIN output. - The flag fields in es can be used to determine what to - print, and the state of the ModifyTableState node + Print additional EXPLAIN output for a foreign table update. + This function can call ExplainPropertyText and + related functions to add fields to the EXPLAIN output. + The flag fields in es can be used to determine what to + print, and the state of the ModifyTableState node can be inspected to provide run-time statistics in the EXPLAIN - ANALYZE case. The first four arguments are the same as for - BeginForeignModify. + ANALYZE case. The first four arguments are the same as for + BeginForeignModify. - If the ExplainForeignModify pointer is set to - NULL, no additional information is printed during - EXPLAIN. + If the ExplainForeignModify pointer is set to + NULL, no additional information is printed during + EXPLAIN. void -ExplainDirectModify (ForeignScanState *node, - ExplainState *es); +ExplainDirectModify(ForeignScanState *node, + ExplainState *es); - Print additional EXPLAIN output for a direct modification + Print additional EXPLAIN output for a direct modification on the remote server. - This function can call ExplainPropertyText and - related functions to add fields to the EXPLAIN output. - The flag fields in es can be used to determine what to - print, and the state of the ForeignScanState node + This function can call ExplainPropertyText and + related functions to add fields to the EXPLAIN output. + The flag fields in es can be used to determine what to + print, and the state of the ForeignScanState node can be inspected to provide run-time statistics in the EXPLAIN - ANALYZE case. + ANALYZE case. - If the ExplainDirectModify pointer is set to - NULL, no additional information is printed during - EXPLAIN. + If the ExplainDirectModify pointer is set to + NULL, no additional information is printed during + EXPLAIN. - FDW Routines for <command>ANALYZE</> + FDW Routines for <command>ANALYZE</command> bool -AnalyzeForeignTable (Relation relation, - AcquireSampleRowsFunc *func, - BlockNumber *totalpages); +AnalyzeForeignTable(Relation relation, + AcquireSampleRowsFunc *func, + BlockNumber *totalpages); - This function is called when is executed on + This function is called when is executed on a foreign table. If the FDW can collect statistics for this - foreign table, it should return true, and provide a pointer + foreign table, it should return true, and provide a pointer to a function that will collect sample rows from the table in - func, plus the estimated size of the table in pages in - totalpages. Otherwise, return false. + func, plus the estimated size of the table in pages in + totalpages. Otherwise, return false. If the FDW does not support collecting statistics for any tables, the - AnalyzeForeignTable pointer can be set to NULL. + AnalyzeForeignTable pointer can be set to NULL. If provided, the sample collection function must have the signature int -AcquireSampleRowsFunc (Relation relation, int elevel, - HeapTuple *rows, int targrows, - double *totalrows, - double *totaldeadrows); +AcquireSampleRowsFunc(Relation relation, + int elevel, + HeapTuple *rows, + int targrows, + double *totalrows, + double *totaldeadrows); - A random sample of up to targrows rows should be collected - from the table and stored into the caller-provided rows + A random sample of up to targrows rows should be collected + from the table and stored into the caller-provided rows array. The actual number of rows collected must be returned. In addition, store estimates of the total numbers of live and dead rows in - the table into the output parameters totalrows and - totaldeadrows. (Set totaldeadrows to zero + the table into the output parameters totalrows and + totaldeadrows. (Set totaldeadrows to zero if the FDW does not have any concept of dead rows.) - FDW Routines For <command>IMPORT FOREIGN SCHEMA</> + FDW Routines For <command>IMPORT FOREIGN SCHEMA</command> List * -ImportForeignSchema (ImportForeignSchemaStmt *stmt, Oid serverOid); +ImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid); Obtain a list of foreign table creation commands. This function is - called when executing , and is + called when executing , and is passed the parse tree for that statement, as well as the OID of the foreign server to use. It should return a list of C strings, each of - which must contain a command. + which must contain a command. These strings will be parsed and executed by the core server. - Within the ImportForeignSchemaStmt struct, - remote_schema is the name of the remote schema from + Within the ImportForeignSchemaStmt struct, + remote_schema is the name of the remote schema from which tables are to be imported. - list_type identifies how to filter table names: - FDW_IMPORT_SCHEMA_ALL means that all tables in the remote - schema should be imported (in this case table_list is - empty), FDW_IMPORT_SCHEMA_LIMIT_TO means to include only - tables listed in table_list, - and FDW_IMPORT_SCHEMA_EXCEPT means to exclude the tables - listed in table_list. - options is a list of options used for the import process. + list_type identifies how to filter table names: + FDW_IMPORT_SCHEMA_ALL means that all tables in the remote + schema should be imported (in this case table_list is + empty), FDW_IMPORT_SCHEMA_LIMIT_TO means to include only + tables listed in table_list, + and FDW_IMPORT_SCHEMA_EXCEPT means to exclude the tables + listed in table_list. + options is a list of options used for the import process. The meanings of the options are up to the FDW. For example, an FDW could use an option to define whether the - NOT NULL attributes of columns should be imported. + NOT NULL attributes of columns should be imported. These options need not have anything to do with those supported by the FDW as database object options. - The FDW may ignore the local_schema field of - the ImportForeignSchemaStmt, because the core server + The FDW may ignore the local_schema field of + the ImportForeignSchemaStmt, because the core server will automatically insert that name into the parsed CREATE - FOREIGN TABLE commands. + FOREIGN TABLE commands. The FDW does not have to concern itself with implementing the filtering - specified by list_type and table_list, + specified by list_type and table_list, either, as the core server will automatically skip any returned commands for tables excluded according to those options. However, it's often useful to avoid the work of creating commands for excluded tables in the - first place. The function IsImportableForeignTable() may be + first place. The function IsImportableForeignTable() may be useful to test whether a given foreign-table name will pass the filter. If the FDW does not support importing table definitions, the - ImportForeignSchema pointer can be set to NULL. + ImportForeignSchema pointer can be set to NULL. @@ -1189,14 +1270,14 @@ ImportForeignSchema (ImportForeignSchemaStmt *stmt, Oid serverOid); FDW Routines for Parallel Execution - A ForeignScan node can, optionally, support parallel - execution. A parallel ForeignScan will be executed - in multiple processes and should return each row only once across + A ForeignScan node can, optionally, support parallel + execution. A parallel ForeignScan will be executed + in multiple processes and must return each row exactly once across all cooperating processes. To do this, processes can coordinate through - fixed size chunks of dynamic shared memory. This shared memory is not - guaranteed to be mapped at the same address in every process, so pointers - may not be used. The following callbacks are all optional in general, - but required if parallel execution is to be supported. + fixed-size chunks of dynamic shared memory. This shared memory is not + guaranteed to be mapped at the same address in every process, so it + must not contain pointers. The following functions are all optional, + but most are required if parallel execution is to be supported. @@ -1215,7 +1296,7 @@ IsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel, - If this callback is not defined, it is assumed that the scan must take + If this function is not defined, it is assumed that the scan must take place within the parallel leader. Note that returning true does not mean that the scan itself can be done in parallel, only that the scan can be performed within a parallel worker. Therefore, it can be useful to define @@ -1230,6 +1311,9 @@ EstimateDSMForeignScan(ForeignScanState *node, ParallelContext *pcxt); Estimate the amount of dynamic shared memory that will be required for parallel operation. This may be higher than the amount that will actually be used, but it must not be lower. The return value is in bytes. + This function is optional, and can be omitted if not needed; but if it + is omitted, the next three functions must be omitted as well, because + no shared memory will be allocated for the FDW's use. @@ -1239,8 +1323,25 @@ InitializeDSMForeignScan(ForeignScanState *node, ParallelContext *pcxt, void *coordinate); Initialize the dynamic shared memory that will be required for parallel - operation; coordinate points to an amount of allocated space - equal to the return value of EstimateDSMForeignScan. + operation. coordinate points to a shared memory area of + size equal to the return value of EstimateDSMForeignScan. + This function is optional, and can be omitted if not needed. + + + + +void +ReInitializeDSMForeignScan(ForeignScanState *node, ParallelContext *pcxt, + void *coordinate); + + Re-initialize the dynamic shared memory required for parallel operation + when the foreign-scan plan node is about to be re-scanned. + This function is optional, and can be omitted if not needed. + Recommended practice is that this function reset only shared state, + while the ReScanForeignScan function resets only local + state. Currently, this function will be called + before ReScanForeignScan, but it's best not to rely on + that ordering. @@ -1249,10 +1350,9 @@ void InitializeWorkerForeignScan(ForeignScanState *node, shm_toc *toc, void *coordinate); - Initialize a parallel worker's custom state based on the shared state - set up in the leader by InitializeDSMForeignScan. - This callback is optional, and needs only be supplied if this - custom path supports parallel execution. + Initialize a parallel worker's local state based on the shared state + set up by the leader during InitializeDSMForeignScan. + This function is optional, and can be omitted if not needed. @@ -1262,7 +1362,7 @@ ShutdownForeignScan(ForeignScanState *node); Release resources when it is anticipated the node will not be executed to completion. This is not called in all cases; sometimes, - EndForeignScan may be called without this function having + EndForeignScan may be called without this function having been called first. Since the DSM segment used by parallel query is destroyed just after this callback is invoked, foreign data wrappers that wish to take some action before the DSM segment goes away should implement @@ -1270,6 +1370,26 @@ ShutdownForeignScan(ForeignScanState *node); + + FDW Routines For reparameterization of paths + + + +List * +ReparameterizeForeignPathByChild(PlannerInfo *root, List *fdw_private, + RelOptInfo *child_rel); + + This function is called while converting a path parameterized by the + top-most parent of the given child relation child_rel to be + parameterized by the child relation. The function is used to reparameterize + any paths or translate any expression nodes saved in the given + fdw_private member of a ForeignPath. The + callback may use reparameterize_path_by_child, + adjust_appendrel_attrs or + adjust_appendrel_attrs_multilevel as required. + + + @@ -1318,7 +1438,7 @@ GetUserMapping(Oid userid, Oid serverid); This function returns a UserMapping object for the user mapping of the given role on the given server. (If there is no mapping for the specific user, it will return the mapping for - PUBLIC, or throw error if there is none.) A + PUBLIC, or throw error if there is none.) A UserMapping object contains properties of the user mapping (see foreign/foreign.h for details). @@ -1381,25 +1501,25 @@ GetForeignServerByName(const char *name, bool missing_ok); Foreign Data Wrapper Query Planning - The FDW callback functions GetForeignRelSize, - GetForeignPaths, GetForeignPlan, - PlanForeignModify, GetForeignJoinPaths, - GetForeignUpperPaths, and PlanDirectModify - must fit into the workings of the PostgreSQL planner. + The FDW callback functions GetForeignRelSize, + GetForeignPaths, GetForeignPlan, + PlanForeignModify, GetForeignJoinPaths, + GetForeignUpperPaths, and PlanDirectModify + must fit into the workings of the PostgreSQL planner. Here are some notes about what they must do. - The information in root and baserel can be used + The information in root and baserel can be used to reduce the amount of information that has to be fetched from the foreign table (and therefore reduce the cost). - baserel->baserestrictinfo is particularly interesting, as - it contains restriction quals (WHERE clauses) that should be + baserel->baserestrictinfo is particularly interesting, as + it contains restriction quals (WHERE clauses) that should be used to filter the rows to be fetched. (The FDW itself is not required to enforce these quals, as the core executor can check them instead.) - baserel->reltarget->exprs can be used to determine which + baserel->reltarget->exprs can be used to determine which columns need to be fetched; but note that it only lists columns that - have to be emitted by the ForeignScan plan node, not + have to be emitted by the ForeignScan plan node, not columns that are used in qual evaluation but not output by the query. @@ -1410,49 +1530,49 @@ GetForeignServerByName(const char *name, bool missing_ok); - baserel->fdw_private is a void pointer that is + baserel->fdw_private is a void pointer that is available for FDW planning functions to store information relevant to the particular foreign table. The core planner does not touch it except - to initialize it to NULL when the RelOptInfo node is created. + to initialize it to NULL when the RelOptInfo node is created. It is useful for passing information forward from - GetForeignRelSize to GetForeignPaths and/or - GetForeignPaths to GetForeignPlan, thereby + GetForeignRelSize to GetForeignPaths and/or + GetForeignPaths to GetForeignPlan, thereby avoiding recalculation. - GetForeignPaths can identify the meaning of different + GetForeignPaths can identify the meaning of different access paths by storing private information in the - fdw_private field of ForeignPath nodes. - fdw_private is declared as a List pointer, but + fdw_private field of ForeignPath nodes. + fdw_private is declared as a List pointer, but could actually contain anything since the core planner does not touch it. However, best practice is to use a representation that's dumpable - by nodeToString, for use with debugging support available + by nodeToString, for use with debugging support available in the backend. - GetForeignPlan can examine the fdw_private - field of the selected ForeignPath node, and can generate - fdw_exprs and fdw_private lists to be - placed in the ForeignScan plan node, where they will be + GetForeignPlan can examine the fdw_private + field of the selected ForeignPath node, and can generate + fdw_exprs and fdw_private lists to be + placed in the ForeignScan plan node, where they will be available at execution time. Both of these lists must be - represented in a form that copyObject knows how to copy. - The fdw_private list has no other restrictions and is + represented in a form that copyObject knows how to copy. + The fdw_private list has no other restrictions and is not interpreted by the core backend in any way. The - fdw_exprs list, if not NIL, is expected to contain + fdw_exprs list, if not NIL, is expected to contain expression trees that are intended to be executed at run time. These trees will undergo post-processing by the planner to make them fully executable. - In GetForeignPlan, generally the passed-in target list can - be copied into the plan node as-is. The passed scan_clauses list - contains the same clauses as baserel->baserestrictinfo, + In GetForeignPlan, generally the passed-in target list can + be copied into the plan node as-is. The passed scan_clauses list + contains the same clauses as baserel->baserestrictinfo, but may be re-ordered for better execution efficiency. In simple cases - the FDW can just strip RestrictInfo nodes from the - scan_clauses list (using extract_actual_clauses) and put + the FDW can just strip RestrictInfo nodes from the + scan_clauses list (using extract_actual_clauses) and put all the clauses into the plan node's qual list, which means that all the clauses will be checked by the executor at run time. More complex FDWs may be able to check some of the clauses internally, in which case those @@ -1462,54 +1582,54 @@ GetForeignServerByName(const char *name, bool missing_ok); As an example, the FDW might identify some restriction clauses of the - form foreign_variable = - sub_expression, which it determines can be executed on + form foreign_variable = + sub_expression, which it determines can be executed on the remote server given the locally-evaluated value of the - sub_expression. The actual identification of such a - clause should happen during GetForeignPaths, since it would + sub_expression. The actual identification of such a + clause should happen during GetForeignPaths, since it would affect the cost estimate for the path. The path's - fdw_private field would probably include a pointer to - the identified clause's RestrictInfo node. Then - GetForeignPlan would remove that clause from scan_clauses, - but add the sub_expression to fdw_exprs + fdw_private field would probably include a pointer to + the identified clause's RestrictInfo node. Then + GetForeignPlan would remove that clause from scan_clauses, + but add the sub_expression to fdw_exprs to ensure that it gets massaged into executable form. It would probably also put control information into the plan node's - fdw_private field to tell the execution functions what + fdw_private field to tell the execution functions what to do at run time. The query transmitted to the remote server would - involve something like WHERE foreign_variable = + involve something like WHERE foreign_variable = $1, with the parameter value obtained at run time from - evaluation of the fdw_exprs expression tree. + evaluation of the fdw_exprs expression tree. Any clauses removed from the plan node's qual list must instead be added - to fdw_recheck_quals or rechecked by - RecheckForeignScan in order to ensure correct behavior - at the READ COMMITTED isolation level. When a concurrent + to fdw_recheck_quals or rechecked by + RecheckForeignScan in order to ensure correct behavior + at the READ COMMITTED isolation level. When a concurrent update occurs for some other table involved in the query, the executor may need to verify that all of the original quals are still satisfied for the tuple, possibly against a different set of parameter values. Using - fdw_recheck_quals is typically easier than implementing checks - inside RecheckForeignScan, but this method will be + fdw_recheck_quals is typically easier than implementing checks + inside RecheckForeignScan, but this method will be insufficient when outer joins have been pushed down, since the join tuples in that case might have some fields go to NULL without rejecting the tuple entirely. - Another ForeignScan field that can be filled by FDWs - is fdw_scan_tlist, which describes the tuples returned by + Another ForeignScan field that can be filled by FDWs + is fdw_scan_tlist, which describes the tuples returned by the FDW for this plan node. For simple foreign table scans this can be - set to NIL, implying that the returned tuples have the + set to NIL, implying that the returned tuples have the row type declared for the foreign table. A non-NIL value must be a - target list (list of TargetEntrys) containing Vars and/or + target list (list of TargetEntrys) containing Vars and/or expressions representing the returned columns. This might be used, for example, to show that the FDW has omitted some columns that it noticed won't be needed for the query. Also, if the FDW can compute expressions used by the query more cheaply than can be done locally, it could add - those expressions to fdw_scan_tlist. Note that join - plans (created from paths made by GetForeignJoinPaths) must - always supply fdw_scan_tlist to describe the set of + those expressions to fdw_scan_tlist. Note that join + plans (created from paths made by GetForeignJoinPaths) must + always supply fdw_scan_tlist to describe the set of columns they will return. @@ -1517,87 +1637,87 @@ GetForeignServerByName(const char *name, bool missing_ok); The FDW should always construct at least one path that depends only on the table's restriction clauses. In join queries, it might also choose to construct path(s) that depend on join clauses, for example - foreign_variable = - local_variable. Such clauses will not be found in - baserel->baserestrictinfo but must be sought in the + foreign_variable = + local_variable. Such clauses will not be found in + baserel->baserestrictinfo but must be sought in the relation's join lists. A path using such a clause is called a - parameterized path. It must identify the other relations + parameterized path. It must identify the other relations used in the selected join clause(s) with a suitable value of - param_info; use get_baserel_parampathinfo - to compute that value. In GetForeignPlan, the - local_variable portion of the join clause would be added - to fdw_exprs, and then at run time the case works the + param_info; use get_baserel_parampathinfo + to compute that value. In GetForeignPlan, the + local_variable portion of the join clause would be added + to fdw_exprs, and then at run time the case works the same as for an ordinary restriction clause. - If an FDW supports remote joins, GetForeignJoinPaths should - produce ForeignPaths for potential remote joins in much - the same way as GetForeignPaths works for base tables. + If an FDW supports remote joins, GetForeignJoinPaths should + produce ForeignPaths for potential remote joins in much + the same way as GetForeignPaths works for base tables. Information about the intended join can be passed forward - to GetForeignPlan in the same ways described above. - However, baserestrictinfo is not relevant for join + to GetForeignPlan in the same ways described above. + However, baserestrictinfo is not relevant for join relations; instead, the relevant join clauses for a particular join are - passed to GetForeignJoinPaths as a separate parameter - (extra->restrictlist). + passed to GetForeignJoinPaths as a separate parameter + (extra->restrictlist). An FDW might additionally support direct execution of some plan actions that are above the level of scans and joins, such as grouping or aggregation. To offer such options, the FDW should generate paths and - insert them into the appropriate upper relation. For + insert them into the appropriate upper relation. For example, a path representing remote aggregation should be inserted into - the UPPERREL_GROUP_AGG relation, using add_path. + the UPPERREL_GROUP_AGG relation, using add_path. This path will be compared on a cost basis with local aggregation performed by reading a simple scan path for the foreign relation (note that such a path must also be supplied, else there will be an error at plan time). If the remote-aggregation path wins, which it usually would, it will be converted into a plan in the usual way, by - calling GetForeignPlan. The recommended place to generate - such paths is in the GetForeignUpperPaths + calling GetForeignPlan. The recommended place to generate + such paths is in the GetForeignUpperPaths callback function, which is called for each upper relation (i.e., each post-scan/join processing step), if all the base relations of the query come from the same FDW. - PlanForeignModify and the other callbacks described in - are designed around the assumption + PlanForeignModify and the other callbacks described in + are designed around the assumption that the foreign relation will be scanned in the usual way and then - individual row updates will be driven by a local ModifyTable + individual row updates will be driven by a local ModifyTable plan node. This approach is necessary for the general case where an update requires reading local tables as well as foreign tables. However, if the operation could be executed entirely by the foreign server, the FDW could generate a path representing that and insert it - into the UPPERREL_FINAL upper relation, where it would - compete against the ModifyTable approach. This approach - could also be used to implement remote SELECT FOR UPDATE, + into the UPPERREL_FINAL upper relation, where it would + compete against the ModifyTable approach. This approach + could also be used to implement remote SELECT FOR UPDATE, rather than using the row locking callbacks described in - . Keep in mind that a path - inserted into UPPERREL_FINAL is responsible for - implementing all behavior of the query. + . Keep in mind that a path + inserted into UPPERREL_FINAL is responsible for + implementing all behavior of the query. - When planning an UPDATE or DELETE, - PlanForeignModify and PlanDirectModify - can look up the RelOptInfo + When planning an UPDATE or DELETE, + PlanForeignModify and PlanDirectModify + can look up the RelOptInfo struct for the foreign table and make use of the - baserel->fdw_private data previously created by the - scan-planning functions. However, in INSERT the target - table is not scanned so there is no RelOptInfo for it. - The List returned by PlanForeignModify has - the same restrictions as the fdw_private list of a - ForeignScan plan node, that is it must contain only - structures that copyObject knows how to copy. + baserel->fdw_private data previously created by the + scan-planning functions. However, in INSERT the target + table is not scanned so there is no RelOptInfo for it. + The List returned by PlanForeignModify has + the same restrictions as the fdw_private list of a + ForeignScan plan node, that is it must contain only + structures that copyObject knows how to copy. - INSERT with an ON CONFLICT clause does not + INSERT with an ON CONFLICT clause does not support specifying the conflict target, as unique constraints or exclusion constraints on remote tables are not locally known. This - in turn implies that ON CONFLICT DO UPDATE is not supported, + in turn implies that ON CONFLICT DO UPDATE is not supported, since the specification is mandatory there. @@ -1611,13 +1731,13 @@ GetForeignServerByName(const char *name, bool missing_ok); individual rows to prevent concurrent updates of those rows, it is usually worthwhile for the FDW to perform row-level locking with as close an approximation as practical to the semantics used in - ordinary PostgreSQL tables. There are multiple + ordinary PostgreSQL tables. There are multiple considerations involved in this. One key decision to be made is whether to perform early - locking or late locking. In early locking, a row is + locking or late locking. In early locking, a row is locked when it is first retrieved from the underlying store, while in late locking, the row is locked only when it is known that it needs to be locked. (The difference arises because some rows may be discarded by @@ -1627,25 +1747,25 @@ GetForeignServerByName(const char *name, bool missing_ok); concurrency or even unexpected deadlocks. Also, late locking is only possible if the row to be locked can be uniquely re-identified later. Preferably the row identifier should identify a specific version of the - row, as PostgreSQL TIDs do. + row, as PostgreSQL TIDs do. - By default, PostgreSQL ignores locking considerations + By default, PostgreSQL ignores locking considerations when interfacing to FDWs, but an FDW can perform early locking without any explicit support from the core code. The API functions described - in , which were added - in PostgreSQL 9.5, allow an FDW to use late locking if + in , which were added + in PostgreSQL 9.5, allow an FDW to use late locking if it wishes. - An additional consideration is that in READ COMMITTED - isolation mode, PostgreSQL may need to re-check + An additional consideration is that in READ COMMITTED + isolation mode, PostgreSQL may need to re-check restriction and join conditions against an updated version of some target tuple. Rechecking join conditions requires re-obtaining copies of the non-target rows that were previously joined to the target tuple. - When working with standard PostgreSQL tables, this is + When working with standard PostgreSQL tables, this is done by including the TIDs of the non-target tables in the column list projected through the join, and then re-fetching non-target rows when required. This approach keeps the join data set compact, but it @@ -1660,56 +1780,56 @@ GetForeignServerByName(const char *name, bool missing_ok); - For an UPDATE or DELETE on a foreign table, it - is recommended that the ForeignScan operation on the target + For an UPDATE or DELETE on a foreign table, it + is recommended that the ForeignScan operation on the target table perform early locking on the rows that it fetches, perhaps via the - equivalent of SELECT FOR UPDATE. An FDW can detect whether - a table is an UPDATE/DELETE target at plan time - by comparing its relid to root->parse->resultRelation, - or at execution time by using ExecRelationIsTargetRelation(). + equivalent of SELECT FOR UPDATE. An FDW can detect whether + a table is an UPDATE/DELETE target at plan time + by comparing its relid to root->parse->resultRelation, + or at execution time by using ExecRelationIsTargetRelation(). An alternative possibility is to perform late locking within the - ExecForeignUpdate or ExecForeignDelete + ExecForeignUpdate or ExecForeignDelete callback, but no special support is provided for this. For foreign tables that are specified to be locked by a SELECT - FOR UPDATE/SHARE command, the ForeignScan operation can + FOR UPDATE/SHARE command, the ForeignScan operation can again perform early locking by fetching tuples with the equivalent - of SELECT FOR UPDATE/SHARE. To perform late locking + of SELECT FOR UPDATE/SHARE. To perform late locking instead, provide the callback functions defined - in . - In GetForeignRowMarkType, select rowmark option - ROW_MARK_EXCLUSIVE, ROW_MARK_NOKEYEXCLUSIVE, - ROW_MARK_SHARE, or ROW_MARK_KEYSHARE depending + in . + In GetForeignRowMarkType, select rowmark option + ROW_MARK_EXCLUSIVE, ROW_MARK_NOKEYEXCLUSIVE, + ROW_MARK_SHARE, or ROW_MARK_KEYSHARE depending on the requested lock strength. (The core code will act the same regardless of which of these four options you choose.) Elsewhere, you can detect whether a foreign table was specified to be - locked by this type of command by using get_plan_rowmark at - plan time, or ExecFindRowMark at execution time; you must + locked by this type of command by using get_plan_rowmark at + plan time, or ExecFindRowMark at execution time; you must check not only whether a non-null rowmark struct is returned, but that - its strength field is not LCS_NONE. + its strength field is not LCS_NONE. - Lastly, for foreign tables that are used in an UPDATE, - DELETE or SELECT FOR UPDATE/SHARE command but + Lastly, for foreign tables that are used in an UPDATE, + DELETE or SELECT FOR UPDATE/SHARE command but are not specified to be row-locked, you can override the default choice - to copy entire rows by having GetForeignRowMarkType select - option ROW_MARK_REFERENCE when it sees lock strength - LCS_NONE. This will cause RefetchForeignRow to - be called with that value for markType; it should then + to copy entire rows by having GetForeignRowMarkType select + option ROW_MARK_REFERENCE when it sees lock strength + LCS_NONE. This will cause RefetchForeignRow to + be called with that value for markType; it should then re-fetch the row without acquiring any new lock. (If you have - a GetForeignRowMarkType function but don't wish to re-fetch - unlocked rows, select option ROW_MARK_COPY - for LCS_NONE.) + a GetForeignRowMarkType function but don't wish to re-fetch + unlocked rows, select option ROW_MARK_COPY + for LCS_NONE.) - See src/include/nodes/lockoptions.h, the comments - for RowMarkType and PlanRowMark - in src/include/nodes/plannodes.h, and the comments for - ExecRowMark in src/include/nodes/execnodes.h for + See src/include/nodes/lockoptions.h, the comments + for RowMarkType and PlanRowMark + in src/include/nodes/plannodes.h, and the comments for + ExecRowMark in src/include/nodes/execnodes.h for additional information. diff --git a/doc/src/sgml/file-fdw.sgml b/doc/src/sgml/file-fdw.sgml index 74941a6f1e..955a13ab7d 100644 --- a/doc/src/sgml/file-fdw.sgml +++ b/doc/src/sgml/file-fdw.sgml @@ -8,12 +8,12 @@ - The file_fdw module provides the foreign-data wrapper + The file_fdw module provides the foreign-data wrapper file_fdw, which can be used to access data files in the server's file system, or to execute programs on the server and read their output. The data file or program output must be in a format that can be read by COPY FROM; - see for details. + see for details. Access to data files is currently read-only. @@ -41,7 +41,7 @@ Specifies the command to be executed. The standard output of this - command will be read as though COPY FROM PROGRAM were used. + command will be read as though COPY FROM PROGRAM were used. Either program or filename must be specified, but not both. @@ -54,7 +54,7 @@ Specifies the data format, - the same as COPY's FORMAT option. + the same as COPY's FORMAT option. @@ -65,7 +65,7 @@ Specifies whether the data has a header line, - the same as COPY's HEADER option. + the same as COPY's HEADER option. @@ -76,7 +76,7 @@ Specifies the data delimiter character, - the same as COPY's DELIMITER option. + the same as COPY's DELIMITER option. @@ -87,7 +87,7 @@ Specifies the data quote character, - the same as COPY's QUOTE option. + the same as COPY's QUOTE option. @@ -98,7 +98,7 @@ Specifies the data escape character, - the same as COPY's ESCAPE option. + the same as COPY's ESCAPE option. @@ -109,7 +109,7 @@ Specifies the data null string, - the same as COPY's NULL option. + the same as COPY's NULL option. @@ -120,7 +120,7 @@ Specifies the data encoding, - the same as COPY's ENCODING option. + the same as COPY's ENCODING option. @@ -128,10 +128,10 @@ - Note that while COPY allows options such as HEADER + Note that while COPY allows options such as HEADER to be specified without a corresponding value, the foreign table option syntax requires a value to be present in all cases. To activate - COPY options typically written without a value, you can pass + COPY options typically written without a value, you can pass the value TRUE, since all such options are Booleans. @@ -150,7 +150,7 @@ This is a Boolean option. If true, it specifies that values of the column should not be matched against the null string (that is, the table-level null option). This has the same effect - as listing the column in COPY's + as listing the column in COPY's FORCE_NOT_NULL option. @@ -162,11 +162,11 @@ This is a Boolean option. If true, it specifies that values of the - column which match the null string are returned as NULL + column which match the null string are returned as NULL even if the value is quoted. Without this option, only unquoted - values matching the null string are returned as NULL. + values matching the null string are returned as NULL. This has the same effect as listing the column in - COPY's FORCE_NULL option. + COPY's FORCE_NULL option. @@ -174,26 +174,28 @@ - COPY's OIDS and + COPY's OIDS and FORCE_QUOTE options are currently not supported by - file_fdw. + file_fdw. These options can only be specified for a foreign table or its columns, not - in the options of the file_fdw foreign-data wrapper, nor in the + in the options of the file_fdw foreign-data wrapper, nor in the options of a server or user mapping using the wrapper. - Changing table-level options requires superuser privileges, for security - reasons: only a superuser should be able to control which file is read - or which program is run. In principle non-superusers could be allowed to + Changing table-level options requires being a superuser or having the privileges + of the default role pg_read_server_files (to use a filename) or + the default role pg_execute_server_programs (to use a program), + for security reasons: only certain users should be able to control which file is + read or which program is run. In principle regular users could be allowed to change the other options, but that's not supported at present. - When specifying the program option, keep in mind that the option + When specifying the program option, keep in mind that the option string is executed by the shell. If you need to pass any arguments to the command that come from an untrusted source, you must be careful to strip or escape any characters that might have special meaning to the shell. @@ -202,9 +204,9 @@ - For a foreign table using file_fdw, EXPLAIN shows + For a foreign table using file_fdw, EXPLAIN shows the name of the file to be read or program to be run. - For a file, unless COSTS OFF is + For a file, unless COSTS OFF is specified, the file size (in bytes) is shown as well. @@ -212,10 +214,10 @@ Create a Foreign Table for PostgreSQL CSV Logs - One of the obvious uses for file_fdw is to make + One of the obvious uses for file_fdw is to make the PostgreSQL activity log available as a table for querying. To do this, first you must be logging to a CSV file, which here we - will call pglog.csv. First, install file_fdw + will call pglog.csv. First, install file_fdw as an extension: @@ -233,7 +235,7 @@ CREATE SERVER pglog FOREIGN DATA WRAPPER file_fdw; Now you are ready to create the foreign data table. Using the - CREATE FOREIGN TABLE command, you will need to define + CREATE FOREIGN TABLE command, you will need to define the columns for the table, the CSV file name, and its format: diff --git a/doc/src/sgml/filelist.sgml b/doc/src/sgml/filelist.sgml index b914086009..48ac14a838 100644 --- a/doc/src/sgml/filelist.sgml +++ b/doc/src/sgml/filelist.sgml @@ -48,6 +48,7 @@ + @@ -83,6 +84,7 @@ + @@ -110,7 +112,6 @@ - @@ -157,7 +158,6 @@ - @@ -166,6 +166,8 @@ + + @@ -191,12 +193,3 @@ - - - - diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index b43ec30a4e..1678c8cbac 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -15,7 +15,7 @@ PostgreSQL provides a large number of functions and operators for the built-in data types. Users can also define their own functions and operators, as described in - . The + . The psql commands \df and \do can be used to list all available functions and operators, respectively. @@ -77,13 +77,13 @@ - AND - OR - NOT + AND + OR + NOT SQL uses a three-valued logic system with true, - false, and null, which represents unknown. + false, and null, which represents unknown. Observe the following truth tables: @@ -176,7 +176,7 @@ The operators AND and OR are commutative, that is, you can switch the left and right operand without affecting the result. But see for more information about the + linkend="syntax-express-eval"/> for more information about the order of evaluation of subexpressions. @@ -191,7 +191,7 @@ The usual comparison operators are available, as shown in . + linkend="functions-comparison-op-table"/>.
@@ -258,7 +258,7 @@ There are also some comparison predicates, as shown in . These behave much like + linkend="functions-comparison-pred-table"/>. These behave much like operators, but have special syntax mandated by the SQL standard. @@ -274,82 +274,82 @@ - a BETWEEN x AND y + a BETWEEN x AND y between - a NOT BETWEEN x AND y + a NOT BETWEEN x AND y not between - a BETWEEN SYMMETRIC x AND y + a BETWEEN SYMMETRIC x AND y between, after sorting the comparison values - a NOT BETWEEN SYMMETRIC x AND y + a NOT BETWEEN SYMMETRIC x AND y not between, after sorting the comparison values - a IS DISTINCT FROM b + a IS DISTINCT FROM b not equal, treating null like an ordinary value - a IS NOT DISTINCT FROM b + a IS NOT DISTINCT FROM b equal, treating null like an ordinary value - expression IS NULL + expression IS NULL is null - expression IS NOT NULL + expression IS NOT NULL is not null - expression ISNULL + expression ISNULL is null (nonstandard syntax) - expression NOTNULL + expression NOTNULL is not null (nonstandard syntax) - boolean_expression IS TRUE + boolean_expression IS TRUE is true - boolean_expression IS NOT TRUE + boolean_expression IS NOT TRUE is false or unknown - boolean_expression IS FALSE + boolean_expression IS FALSE is false - boolean_expression IS NOT FALSE + boolean_expression IS NOT FALSE is true or unknown - boolean_expression IS UNKNOWN + boolean_expression IS UNKNOWN is unknown - boolean_expression IS NOT UNKNOWN + boolean_expression IS NOT UNKNOWN is true or false @@ -381,9 +381,9 @@ BETWEEN SYMMETRIC - BETWEEN SYMMETRIC is like BETWEEN + BETWEEN SYMMETRIC is like BETWEEN except there is no requirement that the argument to the left of - AND be less than or equal to the argument on the right. + AND be less than or equal to the argument on the right. If it is not, those two arguments are automatically swapped, so that a nonempty range is always implied. @@ -395,23 +395,23 @@ IS NOT DISTINCT FROM - Ordinary comparison operators yield null (signifying unknown), + Ordinary comparison operators yield null (signifying unknown), not true or false, when either input is null. For example, - 7 = NULL yields null, as does 7 <> NULL. When + 7 = NULL yields null, as does 7 <> NULL. When this behavior is not suitable, use the - IS NOT DISTINCT FROM predicates: + IS NOT DISTINCT FROM predicates: a IS DISTINCT FROM b a IS NOT DISTINCT FROM b For non-null inputs, IS DISTINCT FROM is - the same as the <> operator. However, if both + the same as the <> operator. However, if both inputs are null it returns false, and if only one input is null it returns true. Similarly, IS NOT DISTINCT FROM is identical to = for non-null inputs, but it returns true when both inputs are null, and false when only one input is null. Thus, these predicates effectively act as though null - were a normal data value, rather than unknown. + were a normal data value, rather than unknown. @@ -443,8 +443,8 @@ Do not write expression = NULL - because NULL is not equal to - NULL. (The null value represents an unknown value, + because NULL is not equal to + NULL. (The null value represents an unknown value, and it is not known whether two unknown values are equal.) @@ -455,7 +455,7 @@ returns true if expression evaluates to the null value. It is highly recommended that these applications be modified to comply with the SQL standard. However, if that - cannot be done the + cannot be done the configuration variable is available. If it is enabled, PostgreSQL will convert x = NULL clauses to x IS NULL. @@ -464,16 +464,16 @@ If the expression is row-valued, then - IS NULL is true when the row expression itself is null + IS NULL is true when the row expression itself is null or when all the row's fields are null, while - IS NOT NULL is true when the row expression itself is non-null + IS NOT NULL is true when the row expression itself is non-null and all the row's fields are non-null. Because of this behavior, - IS NULL and IS NOT NULL do not always return + IS NULL and IS NOT NULL do not always return inverse results for row-valued expressions; in particular, a row-valued expression that contains both null and non-null fields will return false for both tests. In some cases, it may be preferable to - write row IS DISTINCT FROM NULL - or row IS NOT DISTINCT FROM NULL, + write row IS DISTINCT FROM NULL + or row IS NOT DISTINCT FROM NULL, which will simply check whether the overall row value is null without any additional tests on the row fields. @@ -508,8 +508,8 @@ These will always return true or false, never a null value, even when the operand is null. - A null input is treated as the logical value unknown. - Notice that IS UNKNOWN and IS NOT UNKNOWN are + A null input is treated as the logical value unknown. + Notice that IS UNKNOWN and IS NOT UNKNOWN are effectively the same as IS NULL and IS NOT NULL, respectively, except that the input expression must be of Boolean type. @@ -536,7 +536,7 @@ Some comparison-related functions are also available, as shown in . + linkend="functions-comparison-func-table"/>.
@@ -591,7 +591,7 @@ - shows the available mathematical operators. + shows the available mathematical operators.
@@ -736,11 +736,11 @@ the others are available for all numeric data types. The bitwise operators are also available for the bit string types bit and bit varying, as - shown in . + shown in . - shows the available + shows the available mathematical functions. In the table, dp indicates double precision. Many of these functions are provided in multiple forms with different argument types. @@ -835,10 +835,10 @@ div - div(y numeric, - x numeric) + div(y numeric, + x numeric) - numeric + numeric integer quotient of y/x div(9,4) 2 @@ -941,7 +941,7 @@ b dp) dp - a raised to the power of b + a raised to the power of b power(9.0, 3.0) 729 @@ -950,7 +950,7 @@ power(a numeric, b numeric) numeric - a raised to the power of b + a raised to the power of b power(9.0, 3.0) 729 @@ -1056,10 +1056,10 @@ width_bucket(operand dp, b1 dp, b2 dp, count int) int - return the bucket number to which operand would - be assigned in a histogram having count equal-width - buckets spanning the range b1 to b2; - returns 0 or count+1 for + return the bucket number to which operand would + be assigned in a histogram having count equal-width + buckets spanning the range b1 to b2; + returns 0 or count+1 for an input outside the range width_bucket(5.35, 0.024, 10.06, 5) 3 @@ -1068,10 +1068,10 @@ width_bucket(operand numeric, b1 numeric, b2 numeric, count int) int - return the bucket number to which operand would - be assigned in a histogram having count equal-width - buckets spanning the range b1 to b2; - returns 0 or count+1 for + return the bucket number to which operand would + be assigned in a histogram having count equal-width + buckets spanning the range b1 to b2; + returns 0 or count+1 for an input outside the range width_bucket(5.35, 0.024, 10.06, 5) 3 @@ -1080,10 +1080,10 @@ width_bucket(operand anyelement, thresholds anyarray) int - return the bucket number to which operand would + return the bucket number to which operand would be assigned given an array listing the lower bounds of the buckets; - returns 0 for an input less than the first lower bound; - the thresholds array must be sorted, + returns 0 for an input less than the first lower bound; + the thresholds array must be sorted, smallest first, or unexpected results will be obtained width_bucket(now(), array['yesterday', 'today', 'tomorrow']::timestamptz[]) 2 @@ -1093,7 +1093,7 @@
- shows functions for + shows functions for generating random numbers. @@ -1139,11 +1139,11 @@ The characteristics of the values returned by random() depend on the system implementation. It is not suitable for cryptographic - applications; see module for an alternative. + applications; see module for an alternative.
- Finally, shows the + Finally, shows the available trigonometric functions. All trigonometric functions take arguments and return values of type double precision. Each of the trigonometric functions comes in @@ -1303,7 +1303,7 @@ and degrees() shown earlier. However, using the degree-based trigonometric functions is preferred, as that way avoids round-off error for special cases such - as sind(30). + as sind(30). @@ -1328,10 +1328,10 @@ SQL defines some string functions that use key words, rather than commas, to separate arguments. Details are in - . - PostgreSQL also provides versions of these functions + . + PostgreSQL also provides versions of these functions that use the regular function invocation syntax - (see ). + (see ). @@ -1339,12 +1339,12 @@ Before PostgreSQL 8.3, these functions would silently accept values of several non-string data types as well, due to the presence of implicit coercions from those data types to - text. Those coercions have been removed because they frequently + text. Those coercions have been removed because they frequently caused surprising behaviors. However, the string concatenation operator - (||) still accepts non-string input, so long as at least one + (||) still accepts non-string input, so long as at least one input is of a string type, as shown in . For other cases, insert an explicit - coercion to text if you need to duplicate the previous behavior. + linkend="functions-string-sql"/>. For other cases, insert an explicit + coercion to text if you need to duplicate the previous behavior. @@ -1504,7 +1504,7 @@ text Extract substring matching POSIX regular expression. See - for more information on pattern + for more information on pattern matching. substring('Thomas' from '...$') @@ -1516,7 +1516,7 @@ text Extract substring matching SQL regular expression. - See for more information on + See for more information on pattern matching. substring('Thomas' from '%#"o_a#"_' for '#') @@ -1536,7 +1536,7 @@ Remove the longest string containing only characters from characters (a space by default) from the - start, end, or both ends (both is the default) + start, end, or both ends (both is the default) of string trim(both 'xyz' from 'yxTomxx') @@ -1553,7 +1553,7 @@ text - Non-standard syntax for trim() + Non-standard syntax for trim() trim(both from 'yxTomxx', 'xyz') Tom @@ -1577,8 +1577,8 @@ Additional string manipulation functions are available and are - listed in . Some of them are used internally to implement the - SQL-standard string functions listed in . + listed in . Some of them are used internally to implement the + SQL-standard string functions listed in . @@ -1702,7 +1702,7 @@ string must be valid in this encoding. Conversions can be defined by CREATE CONVERSION. Also there are some predefined conversions. See for available conversions. + linkend="conversion-names"/> for available conversions. convert('text_in_utf8', 'UTF8', 'LATIN1') text_in_utf8 represented in Latin-1 @@ -1753,8 +1753,8 @@ bytea - Decode binary data from textual representation in string. - Options for format are same as in encode. + Decode binary data from textual representation in string. + Options for format are same as in encode. decode('MTIzAAE=', 'base64') \x3132330001 @@ -1771,12 +1771,12 @@ text Encode binary data into a textual representation. Supported - formats are: base64, hex, escape. - escape converts zero bytes and high-bit-set bytes to - octal sequences (\nnn) and + formats are: base64, hex, escape. + escape converts zero bytes and high-bit-set bytes to + octal sequences (\nnn) and doubles backslashes. - encode(E'123\\000\\001', 'base64') + encode('123\000\001', 'base64') MTIzAAE= @@ -1791,8 +1791,8 @@ text Format arguments according to a format string. - This function is similar to the C function sprintf. - See . + This function is similar to the C function sprintf. + See . format('Hello %s, %1$s', 'World') Hello World, World @@ -1825,8 +1825,8 @@ text - Return first n characters in the string. When n - is negative, return all but last |n| characters. + Return first n characters in the string. When n + is negative, return all but last |n| characters. left('abcde', 2) ab @@ -1929,11 +1929,11 @@ Split qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. By default, extra characters after the last identifier are considered an - error; but if the second parameter is false, then such + error; but if the second parameter is false, then such extra characters are ignored. (This behavior is useful for parsing names for objects like functions.) Note that this function does not truncate over-length identifiers. If you want truncation you can cast - the result to name[]. + the result to name[]. parse_ident('"SomeSchema".someTable') {SomeSchema,sometable} @@ -1968,7 +1968,7 @@ Quotes are added only if necessary (i.e., if the string contains non-identifier characters or would be case-folded). Embedded quotes are properly doubled. - See also . + See also . quote_ident('Foo bar') "Foo bar" @@ -1989,7 +1989,7 @@ Note that quote_literal returns null on null input; if the argument might be null, quote_nullable is often more suitable. - See also . + See also . quote_literal(E'O\'Reilly') 'O''Reilly' @@ -2017,9 +2017,9 @@ Return the given string suitably quoted to be used as a string literal in an SQL statement string; or, if the argument - is null, return NULL. + is null, return NULL. Embedded single-quotes and backslashes are properly doubled. - See also . + See also . quote_nullable(NULL) NULL @@ -2030,7 +2030,7 @@ text Coerce the given value to text and then quote it as a literal; - or, if the argument is null, return NULL. + or, if the argument is null, return NULL. Embedded single-quotes and backslashes are properly doubled. quote_nullable(42.5) @@ -2048,7 +2048,7 @@ Return captured substring(s) resulting from the first match of a POSIX regular expression to the string. See - for more information. + for more information. regexp_match('foobarbequebaz', '(bar)(beque)') {bar,beque} @@ -2065,7 +2065,7 @@ Return captured substring(s) resulting from matching a POSIX regular expression to the string. See - for more information. + for more information. regexp_matches('foobarbequebaz', 'ba.', 'g') {bar}{baz} (2 rows) @@ -2081,7 +2081,7 @@ text Replace substring(s) matching a POSIX regular expression. See - for more information. + for more information. regexp_replace('Thomas', '.[mN]a.', 'M') ThM @@ -2097,10 +2097,10 @@ text[] Split string using a POSIX regular expression as - the delimiter. See for more + the delimiter. See for more information. - regexp_split_to_array('hello world', E'\\s+') + regexp_split_to_array('hello world', '\s+') {hello,world} @@ -2114,10 +2114,10 @@ setof text Split string using a POSIX regular expression as - the delimiter. See for more + the delimiter. See for more information. - regexp_split_to_table('hello world', E'\\s+') + regexp_split_to_table('hello world', '\s+') helloworld (2 rows) @@ -2177,8 +2177,8 @@ text - Return last n characters in the string. When n - is negative, return all but first |n| characters. + Return last n characters in the string. When n + is negative, return all but first |n| characters. right('abcde', 2) de @@ -2274,6 +2274,21 @@ ph + + + + starts_with + + starts_with(string, prefix) + + bool + + Returns true if string starts with prefix. + + starts_with('alphabet', 'alph') + t + + @@ -2285,8 +2300,8 @@ text Convert string to ASCII from another encoding - (only supports conversion from LATIN1, LATIN2, LATIN9, - and WIN1250 encodings) + (only supports conversion from LATIN1, LATIN2, LATIN9, + and WIN1250 encodings) to_ascii('Karel') Karel @@ -2339,7 +2354,7 @@ format functions are variadic, so it is possible to pass the values to be concatenated or formatted as an array marked with the VARIADIC keyword (see ). The array's elements are + linkend="xfunc-sql-variadic-functions"/>). The array's elements are treated as if they were separate ordinary arguments to the function. If the variadic array argument is NULL, concat and concat_ws return NULL, but @@ -2348,7 +2363,7 @@ See also the aggregate function string_agg in - . + .
@@ -3154,30 +3169,30 @@ - The function format produces output formatted according to + The function format produces output formatted according to a format string, in a style similar to the C function - sprintf. + sprintf. -format(formatstr text [, formatarg "any" [, ...] ]) +format(formatstr text [, formatarg "any" [, ...] ]) - formatstr is a format string that specifies how the + formatstr is a format string that specifies how the result should be formatted. Text in the format string is copied - directly to the result, except where format specifiers are + directly to the result, except where format specifiers are used. Format specifiers act as placeholders in the string, defining how subsequent function arguments should be formatted and inserted into the - result. Each formatarg argument is converted to text + result. Each formatarg argument is converted to text according to the usual output rules for its data type, and then formatted and inserted into the result string according to the format specifier(s). - Format specifiers are introduced by a % character and have + Format specifiers are introduced by a % character and have the form -%[position][flags][width]type +%[position][flags][width]type where the component fields are: @@ -3186,10 +3201,10 @@ position (optional) - A string of the form n$ where - n is the index of the argument to print. + A string of the form n$ where + n is the index of the argument to print. Index 1 means the first argument after - formatstr. If the position is + formatstr. If the position is omitted, the default is to use the next argument in sequence. @@ -3201,8 +3216,8 @@ Additional options controlling how the format specifier's output is formatted. Currently the only supported flag is a minus sign - (-) which will cause the format specifier's output to be - left-justified. This has no effect unless the width + (-) which will cause the format specifier's output to be + left-justified. This has no effect unless the width field is also specified. @@ -3212,23 +3227,23 @@ width (optional) - Specifies the minimum number of characters to use to + Specifies the minimum number of characters to use to display the format specifier's output. The output is padded on the - left or right (depending on the - flag) with spaces as + left or right (depending on the - flag) with spaces as needed to fill the width. A too-small width does not cause truncation of the output, but is simply ignored. The width may be specified using any of the following: a positive integer; an - asterisk (*) to use the next function argument as the - width; or a string of the form *n$ to - use the nth function argument as the width. + asterisk (*) to use the next function argument as the + width; or a string of the form *n$ to + use the nth function argument as the width. If the width comes from a function argument, that argument is consumed before the argument that is used for the format specifier's value. If the width argument is negative, the result is left - aligned (as if the - flag had been specified) within a - field of length abs(width). + aligned (as if the - flag had been specified) within a + field of length abs(width). @@ -3251,13 +3266,13 @@ I treats the argument value as an SQL identifier, double-quoting it if necessary. It is an error for the value to be null (equivalent to - quote_ident). + quote_ident). L quotes the argument value as an SQL literal. - A null value is displayed as the string NULL, without + A null value is displayed as the string NULL, without quotes (equivalent to quote_nullable). @@ -3270,7 +3285,7 @@ In addition to the format specifiers described above, the special sequence - %% may be used to output a literal % character. + %% may be used to output a literal % character. @@ -3281,77 +3296,77 @@ SELECT format('Hello %s', 'World'); Result: Hello World SELECT format('Testing %s, %s, %s, %%', 'one', 'two', 'three'); -Result: Testing one, two, three, % +Result: Testing one, two, three, % SELECT format('INSERT INTO %I VALUES(%L)', 'Foo bar', E'O\'Reilly'); Result: INSERT INTO "Foo bar" VALUES('O''Reilly') -SELECT format('INSERT INTO %I VALUES(%L)', 'locations', E'C:\\Program Files'); -Result: INSERT INTO locations VALUES(E'C:\\Program Files') +SELECT format('INSERT INTO %I VALUES(%L)', 'locations', 'C:\Program Files'); +Result: INSERT INTO locations VALUES('C:\Program Files') Here are examples using width fields - and the - flag: + and the - flag: SELECT format('|%10s|', 'foo'); -Result: | foo| +Result: | foo| SELECT format('|%-10s|', 'foo'); -Result: |foo | +Result: |foo | SELECT format('|%*s|', 10, 'foo'); -Result: | foo| +Result: | foo| SELECT format('|%*s|', -10, 'foo'); -Result: |foo | +Result: |foo | SELECT format('|%-*s|', 10, 'foo'); -Result: |foo | +Result: |foo | SELECT format('|%-*s|', -10, 'foo'); -Result: |foo | +Result: |foo | - These examples show use of position fields: + These examples show use of position fields: SELECT format('Testing %3$s, %2$s, %1$s', 'one', 'two', 'three'); -Result: Testing three, two, one +Result: Testing three, two, one SELECT format('|%*2$s|', 'foo', 10, 'bar'); -Result: | bar| +Result: | bar| SELECT format('|%1$*2$s|', 'foo', 10, 'bar'); -Result: | foo| +Result: | foo| - Unlike the standard C function sprintf, - PostgreSQL's format function allows format - specifiers with and without position fields to be mixed + Unlike the standard C function sprintf, + PostgreSQL's format function allows format + specifiers with and without position fields to be mixed in the same format string. A format specifier without a - position field always uses the next argument after the + position field always uses the next argument after the last argument consumed. - In addition, the format function does not require all + In addition, the format function does not require all function arguments to be used in the format string. For example: SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); -Result: Testing three, two, three +Result: Testing three, two, three - The %I and %L format specifiers are particularly + The %I and %L format specifiers are particularly useful for safely constructing dynamic SQL statements. See - . + . @@ -3375,16 +3390,16 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); SQL defines some string functions that use key words, rather than commas, to separate arguments. Details are in - . - PostgreSQL also provides versions of these functions + . + PostgreSQL also provides versions of these functions that use the regular function invocation syntax - (see ). + (see ). The sample results shown on this page assume that the server parameter - bytea_output is set + bytea_output is set to escape (the traditional PostgreSQL format). @@ -3414,7 +3429,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); concatenation - E'\\\\Post'::bytea || E'\\047gres\\000'::bytea + '\\Post'::bytea || '\047gres\000'::bytea \\Post'gres\000 @@ -3427,7 +3442,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); int Number of bytes in binary string - octet_length(E'jo\\000se'::bytea) + octet_length('jo\000se'::bytea) 5 @@ -3442,7 +3457,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); Replace substring - overlay(E'Th\\000omas'::bytea placing E'\\002\\003'::bytea from 2 for 3) + overlay('Th\000omas'::bytea placing '\002\003'::bytea from 2 for 3) T\\002\\003mas @@ -3455,7 +3470,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); int Location of specified substring - position(E'\\000om'::bytea in E'Th\\000omas'::bytea) + position('\000om'::bytea in 'Th\000omas'::bytea) 3 @@ -3470,7 +3485,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); Extract substring - substring(E'Th\\000omas'::bytea from 2 for 3) + substring('Th\000omas'::bytea from 2 for 3) h\000o @@ -3489,7 +3504,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); bytes from the start and end of string - trim(E'\\000\\001'::bytea from E'\\000Tom\\001'::bytea) + trim('\000\001'::bytea from '\000Tom\001'::bytea) Tom @@ -3498,10 +3513,10 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); Additional binary string manipulation functions are available and - are listed in . Some + are listed in . Some of them are used internally to implement the SQL-standard string functions listed in . + linkend="functions-binarystring-sql"/>.
@@ -3532,7 +3547,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); bytes from the start and end of string - btrim(E'\\000trim\\001'::bytea, E'\\000\\001'::bytea) + btrim('\000trim\001'::bytea, '\000\001'::bytea)trim @@ -3546,10 +3561,10 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); bytea - Decode binary data from textual representation in string. - Options for format are same as in encode. + Decode binary data from textual representation in string. + Options for format are same as in encode. - decode(E'123\\000456', 'escape') + decode('123\000456', 'escape')123\000456 @@ -3564,12 +3579,12 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); text Encode binary data into a textual representation. Supported - formats are: base64, hex, escape. - escape converts zero bytes and high-bit-set bytes to - octal sequences (\nnn) and + formats are: base64, hex, escape. + escape converts zero bytes and high-bit-set bytes to + octal sequences (\nnn) and doubles backslashes. - encode(E'123\\000456'::bytea, 'escape') + encode('123\000456'::bytea, 'escape')123\000456 @@ -3584,7 +3599,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); Extract bit from string - get_bit(E'Th\\000omas'::bytea, 45) + get_bit('Th\000omas'::bytea, 45)1 @@ -3599,7 +3614,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); Extract byte from string - get_byte(E'Th\\000omas'::bytea, 4) + get_byte('Th\000omas'::bytea, 4)109 @@ -3623,7 +3638,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); binary strings, length - length(E'jo\\000se'::bytea) + length('jo\000se'::bytea)5 @@ -3639,8 +3654,8 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); Calculates the MD5 hash of string, returning the result in hexadecimal - md5(E'Th\\000omas'::bytea) - 8ab2d3c9689aaf18 b4958c334c82d8b1 + md5('Th\000omas'::bytea) + 8ab2d3c9689aaf18​b4958c334c82d8b1 @@ -3649,13 +3664,13 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); set_bit set_bit(string, - offset, newvalue) + offset, newvalue) bytea Set bit in string - set_bit(E'Th\\000omas'::bytea, 45, 0) + set_bit('Th\000omas'::bytea, 45, 0) Th\000omAs @@ -3665,31 +3680,100 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); set_byteset_byte(string, - offset, newvalue) + offset, newvalue)bytea Set byte in string - set_byte(E'Th\\000omas'::bytea, 4, 64) + set_byte('Th\000omas'::bytea, 4, 64)Th\000o@as + + + + + sha224 + + sha224(bytea) + + bytea + + SHA-224 hash + + sha224('abc') + \x23097d223405d8228642a477bda2​55b32aadbce4bda0b3f7e36c9da7 + + + + + + sha256 + + sha256(bytea) + + bytea + + SHA-256 hash + + sha256('abc') + \xba7816bf8f01cfea414140de5dae2223​b00361a396177a9cb410ff61f20015ad + + + + + + sha384 + + sha384(bytea) + + bytea + + SHA-384 hash + + sha384('abc') + \xcb00753f45a35e8bb5a03d699ac65007​272c32ab0eded1631a8b605a43ff5bed​8086072ba1e7cc2358baeca134c825a7 + + + + + + sha512 + + sha512(bytea) + + bytea + + SHA-512 hash + + sha512('abc') + \xddaf35a193617abacc417349ae204131​12e6fa4e89a97ea20a9eeee64b55d39a​2192992a274fc1a836ba3c23a3feebbd​454d4423643ce80e2a9ac94fa54ca49f +
- get_byte and set_byte number the first byte + get_byte and set_byte number the first byte of a binary string as byte 0. - get_bit and set_bit number bits from the + get_bit and set_bit number bits from the right within each byte; for example bit 0 is the least significant bit of the first byte, and bit 15 is the most significant bit of the second byte. + + Note that for historic reasons, the function md5 + returns a hex-encoded value of type text whereas the SHA-2 + functions return type bytea. Use the functions + encode and decode to convert + between the two, for example encode(sha256('abc'), + 'hex') to get a hex-encoded text representation. + + See also the aggregate function string_agg in - and the large object functions - in . + and the large object functions + in . @@ -3707,7 +3791,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); manipulating bit strings, that is values of the types bit and bit varying. Aside from the usual comparison operators, the operators - shown in can be used. + shown in can be used. Bit string operands of &, |, and # must be of equal length. When bit shifting, the original length of the string is preserved, as shown @@ -3802,7 +3886,7 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); In addition, it is possible to cast integral values to and from type - bit. + bit. Some examples: 44::bit(10) 0000101100 @@ -3810,15 +3894,15 @@ SELECT format('Testing %3$s, %2$s, %s', 'one', 'two', 'three'); cast(-44 as bit(12)) 111111010100 '1110'::bit(4)::integer 14 - Note that casting to just bit means casting to - bit(1), and so will deliver only the least significant + Note that casting to just bit means casting to + bit(1), and so will deliver only the least significant bit of the integer. - Casting an integer to bit(n) copies the rightmost - n bits. Casting an integer to a bit string width wider + Casting an integer to bit(n) copies the rightmost + n bits. Casting an integer to a bit string width wider than the integer itself will sign-extend on the left. @@ -3840,7 +3924,7 @@ cast(-44 as bit(12)) 111111010100 more recent SIMILAR TO operator (added in SQL:1999), and POSIX-style regular expressions. Aside from the basic does this string match - this pattern? operators, functions are available to extract + this pattern? operators, functions are available to extract or replace matching substrings and to split a string at matching locations. @@ -3935,9 +4019,9 @@ cast(-44 as bit(12)) 111111010100 - If you have turned off, + If you have turned off, any backslashes you write in literal string constants will need to be - doubled. See for more information. + doubled. See for more information. @@ -3964,6 +4048,12 @@ cast(-44 as bit(12)) 111111010100 ILIKE, respectively. All of these operators are PostgreSQL-specific. + + + There is also the prefix operator ^@ and corresponding + starts_with function which covers cases when only + searching by beginning of the string is needed. + @@ -4004,9 +4094,9 @@ cast(-44 as bit(12)) 111111010100 can match any part of the string. Also like LIKE, SIMILAR TO uses - _ and % as wildcard characters denoting + _ and % as wildcard characters denoting any single character and any string, respectively (these are - comparable to . and .* in POSIX regular + comparable to . and .* in POSIX regular expressions). @@ -4041,21 +4131,21 @@ cast(-44 as bit(12)) 111111010100 - {m} denotes repetition - of the previous item exactly m times. + {m} denotes repetition + of the previous item exactly m times. - {m,} denotes repetition - of the previous item m or more times. + {m,} denotes repetition + of the previous item m or more times. - {m,n} - denotes repetition of the previous item at least m and - not more than n times. + {m,n} + denotes repetition of the previous item at least m and + not more than n times. @@ -4072,14 +4162,14 @@ cast(-44 as bit(12)) 111111010100
- Notice that the period (.) is not a metacharacter - for SIMILAR TO. + Notice that the period (.) is not a metacharacter + for SIMILAR TO. - As with LIKE, a backslash disables the special meaning + As with LIKE, a backslash disables the special meaning of any of these metacharacters; or a different escape character can - be specified with ESCAPE. + be specified with ESCAPE. @@ -4093,23 +4183,23 @@ cast(-44 as bit(12)) 111111010100 - The substring function with three parameters, + The substring function with three parameters, substring(string from pattern for escape-character), provides extraction of a substring that matches an SQL - regular expression pattern. As with SIMILAR TO, the + regular expression pattern. As with SIMILAR TO, the specified pattern must match the entire data string, or else the function fails and returns null. To indicate the part of the pattern that should be returned on success, the pattern must contain two occurrences of the escape character followed by a double quote - ("). + ("). The text matching the portion of the pattern between these markers is returned. - Some examples, with #" delimiting the return string: + Some examples, with #" delimiting the return string: substring('foobar' from '%#"o_b#"%' for '#') oob substring('foobar' from '#"o_b#"%' for '#') NULL @@ -4144,7 +4234,7 @@ substring('foobar' from '#"o_b#"%' for '#') NULL - lists the available + lists the available operators for pattern matching using POSIX regular expressions. @@ -4191,7 +4281,7 @@ substring('foobar' from '#"o_b#"%' for '#') NULL POSIX regular expressions provide a more powerful means for pattern matching than the LIKE and - SIMILAR TO operators. + SIMILAR TO operators. Many Unix tools such as egrep, sed, or awk use a pattern matching language that is similar to the one described here. @@ -4228,7 +4318,7 @@ substring('foobar' from '#"o_b#"%' for '#') NULL - The substring function with two parameters, + The substring function with two parameters, substring(string from pattern), provides extraction of a substring @@ -4253,31 +4343,31 @@ substring('foobar' from 'o(.)b') o - The regexp_replace function provides substitution of + The regexp_replace function provides substitution of new text for substrings that match POSIX regular expression patterns. It has the syntax - regexp_replace(source, - pattern, replacement - , flags ). - The source string is returned unchanged if - there is no match to the pattern. If there is a - match, the source string is returned with the - replacement string substituted for the matching - substring. The replacement string can contain - \n, where n is 1 + regexp_replace(source, + pattern, replacement + , flags ). + The source string is returned unchanged if + there is no match to the pattern. If there is a + match, the source string is returned with the + replacement string substituted for the matching + substring. The replacement string can contain + \n, where n is 1 through 9, to indicate that the source substring matching the - n'th parenthesized subexpression of the pattern should be - inserted, and it can contain \& to indicate that the + n'th parenthesized subexpression of the pattern should be + inserted, and it can contain \& to indicate that the substring matching the entire pattern should be inserted. Write - \\ if you need to put a literal backslash in the replacement + \\ if you need to put a literal backslash in the replacement text. - The flags parameter is an optional text + The flags parameter is an optional text string containing zero or more single-letter flags that change the - function's behavior. Flag i specifies case-insensitive - matching, while flag g specifies replacement of each matching + function's behavior. Flag i specifies case-insensitive + matching, while flag g specifies replacement of each matching substring rather than only the first one. Supported flags (though - not g) are - described in . + not g) are + described in . @@ -4287,31 +4377,31 @@ regexp_replace('foobarbaz', 'b..', 'X') fooXbaz regexp_replace('foobarbaz', 'b..', 'X', 'g') fooXX -regexp_replace('foobarbaz', 'b(..)', E'X\\1Y', 'g') +regexp_replace('foobarbaz', 'b(..)', 'X\1Y', 'g') fooXarYXazY - The regexp_match function returns a text array of + The regexp_match function returns a text array of captured substring(s) resulting from the first match of a POSIX regular expression pattern to a string. It has the syntax - regexp_match(string, - pattern , flags ). - If there is no match, the result is NULL. - If a match is found, and the pattern contains no + regexp_match(string, + pattern , flags ). + If there is no match, the result is NULL. + If a match is found, and the pattern contains no parenthesized subexpressions, then the result is a single-element text array containing the substring matching the whole pattern. - If a match is found, and the pattern contains + If a match is found, and the pattern contains parenthesized subexpressions, then the result is a text array - whose n'th element is the substring matching - the n'th parenthesized subexpression of - the pattern (not counting non-capturing + whose n'th element is the substring matching + the n'th parenthesized subexpression of + the pattern (not counting non-capturing parentheses; see below for details). - The flags parameter is an optional text string + The flags parameter is an optional text string containing zero or more single-letter flags that change the function's behavior. Supported flags are described - in . + in . @@ -4330,7 +4420,7 @@ SELECT regexp_match('foobarbequebaz', '(bar)(beque)'); (1 row) In the common case where you just want the whole matching substring - or NULL for no match, write something like + or NULL for no match, write something like SELECT (regexp_match('foobarbequebaz', 'bar.*que'))[1]; regexp_match @@ -4341,20 +4431,20 @@ SELECT (regexp_match('foobarbequebaz', 'bar.*que'))[1]; - The regexp_matches function returns a set of text arrays + The regexp_matches function returns a set of text arrays of captured substring(s) resulting from matching a POSIX regular expression pattern to a string. It has the same syntax as regexp_match. This function returns no rows if there is no match, one row if there is - a match and the g flag is not given, or N - rows if there are N matches and the g flag + a match and the g flag is not given, or N + rows if there are N matches and the g flag is given. Each returned row is a text array containing the whole matched substring or the substrings matching parenthesized - subexpressions of the pattern, just as described above + subexpressions of the pattern, just as described above for regexp_match. - regexp_matches accepts all the flags shown - in , plus - the g flag which commands it to return all matches, not + regexp_matches accepts all the flags shown + in , plus + the g flag which commands it to return all matches, not just the first one. @@ -4377,53 +4467,53 @@ SELECT regexp_matches('foobarbequebazilbarfbonk', '(b[^b]+)(b[^b]+)', 'g'); - In most cases regexp_matches() should be used with - the g flag, since if you only want the first match, it's - easier and more efficient to use regexp_match(). - However, regexp_match() only exists - in PostgreSQL version 10 and up. When working in older - versions, a common trick is to place a regexp_matches() + In most cases regexp_matches() should be used with + the g flag, since if you only want the first match, it's + easier and more efficient to use regexp_match(). + However, regexp_match() only exists + in PostgreSQL version 10 and up. When working in older + versions, a common trick is to place a regexp_matches() call in a sub-select, for example: SELECT col1, (SELECT regexp_matches(col2, '(bar)(beque)')) FROM tab; - This produces a text array if there's a match, or NULL if - not, the same as regexp_match() would do. Without the + This produces a text array if there's a match, or NULL if + not, the same as regexp_match() would do. Without the sub-select, this query would produce no output at all for table rows without a match, which is typically not the desired behavior. - The regexp_split_to_table function splits a string using a POSIX + The regexp_split_to_table function splits a string using a POSIX regular expression pattern as a delimiter. It has the syntax - regexp_split_to_table(string, pattern - , flags ). - If there is no match to the pattern, the function returns the - string. If there is at least one match, for each match it returns + regexp_split_to_table(string, pattern + , flags ). + If there is no match to the pattern, the function returns the + string. If there is at least one match, for each match it returns the text from the end of the last match (or the beginning of the string) to the beginning of the match. When there are no more matches, it returns the text from the end of the last match to the end of the string. - The flags parameter is an optional text string containing + The flags parameter is an optional text string containing zero or more single-letter flags that change the function's behavior. regexp_split_to_table supports the flags described in - . + . - The regexp_split_to_array function behaves the same as - regexp_split_to_table, except that regexp_split_to_array - returns its result as an array of text. It has the syntax - regexp_split_to_array(string, pattern - , flags ). - The parameters are the same as for regexp_split_to_table. + The regexp_split_to_array function behaves the same as + regexp_split_to_table, except that regexp_split_to_array + returns its result as an array of text. It has the syntax + regexp_split_to_array(string, pattern + , flags ). + The parameters are the same as for regexp_split_to_table. Some examples: -SELECT foo FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', E'\\s+') AS foo; +SELECT foo FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', '\s+') AS foo; foo ------- the @@ -4437,13 +4527,13 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox jumps over the lazy d dog (9 rows) -SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', E'\\s+'); +SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', '\s+'); regexp_split_to_array ----------------------------------------------- {the,quick,brown,fox,jumps,over,the,lazy,dog} (1 row) -SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; +SELECT foo FROM regexp_split_to_table('the quick brown fox', '\s*') AS foo; foo ----- t @@ -4471,8 +4561,8 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; zero-length matches that occur at the start or end of the string or immediately after a previous match. This is contrary to the strict definition of regexp matching that is implemented by - regexp_match and - regexp_matches, but is usually the most convenient behavior + regexp_match and + regexp_matches, but is usually the most convenient behavior in practice. Other software systems such as Perl use similar definitions. @@ -4491,16 +4581,16 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; Regular expressions (REs), as defined in POSIX 1003.2, come in two forms: - extended REs or EREs + extended REs or EREs (roughly those of egrep), and - basic REs or BREs + basic REs or BREs (roughly those of ed). PostgreSQL supports both forms, and also implements some extensions that are not in the POSIX standard, but have become widely used due to their availability in programming languages such as Perl and Tcl. REs using these non-POSIX extensions are called - advanced REs or AREs + advanced REs or AREs in this documentation. AREs are almost an exact superset of EREs, but BREs have several notational incompatibilities (as well as being much more limited). @@ -4510,10 +4600,10 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - PostgreSQL always initially presumes that a regular + PostgreSQL always initially presumes that a regular expression follows the ARE rules. However, the more limited ERE or - BRE rules can be chosen by prepending an embedded option - to the RE pattern, as described in . + BRE rules can be chosen by prepending an embedded option + to the RE pattern, as described in . This can be useful for compatibility with applications that expect exactly the POSIX 1003.2 rules. @@ -4527,29 +4617,29 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - A branch is zero or more quantified atoms or - constraints, concatenated. + A branch is zero or more quantified atoms or + constraints, concatenated. It matches a match for the first, followed by a match for the second, etc; an empty branch matches the empty string. - A quantified atom is an atom possibly followed - by a single quantifier. + A quantified atom is an atom possibly followed + by a single quantifier. Without a quantifier, it matches a match for the atom. With a quantifier, it can match some number of matches of the atom. An atom can be any of the possibilities - shown in . + shown in . The possible quantifiers and their meanings are shown in - . + . - A constraint matches an empty string, but matches only when + A constraint matches an empty string, but matches only when specific conditions are met. A constraint can be used where an atom could be used, except it cannot be followed by a quantifier. The simple constraints are shown in - ; + ; some more constraints are described later. @@ -4567,57 +4657,57 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - (re) - (where re is any regular expression) + (re) + (where re is any regular expression) matches a match for - re, with the match noted for possible reporting + re, with the match noted for possible reporting - (?:re) + (?:re) as above, but the match is not noted for reporting - (a non-capturing set of parentheses) + (a non-capturing set of parentheses) (AREs only) - . + . matches any single character - [chars] - a bracket expression, - matching any one of the chars (see - for more detail) + [chars] + a bracket expression, + matching any one of the chars (see + for more detail) - \k - (where k is a non-alphanumeric character) + \k + (where k is a non-alphanumeric character) matches that character taken as an ordinary character, - e.g., \\ matches a backslash character + e.g., \\ matches a backslash character - \c - where c is alphanumeric + \c + where c is alphanumeric (possibly followed by other characters) - is an escape, see - (AREs only; in EREs and BREs, this matches c) + is an escape, see + (AREs only; in EREs and BREs, this matches c) - { + { when followed by a character other than a digit, - matches the left-brace character {; + matches the left-brace character {; when followed by a digit, it is the beginning of a - bound (see below) + bound (see below) - x - where x is a single character with no other + x + where x is a single character with no other significance, matches that character @@ -4625,14 +4715,14 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - An RE cannot end with a backslash (\). + An RE cannot end with a backslash (\). - If you have turned off, + If you have turned off, any backslashes you write in literal string constants will need to be - doubled. See for more information. + doubled. See for more information. @@ -4649,91 +4739,91 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - * + * a sequence of 0 or more matches of the atom - + + + a sequence of 1 or more matches of the atom - ? + ? a sequence of 0 or 1 matches of the atom - {m} - a sequence of exactly m matches of the atom + {m} + a sequence of exactly m matches of the atom - {m,} - a sequence of m or more matches of the atom + {m,} + a sequence of m or more matches of the atom - {m,n} - a sequence of m through n - (inclusive) matches of the atom; m cannot exceed - n + {m,n} + a sequence of m through n + (inclusive) matches of the atom; m cannot exceed + n - *? - non-greedy version of * + *? + non-greedy version of * - +? - non-greedy version of + + +? + non-greedy version of + - ?? - non-greedy version of ? + ?? + non-greedy version of ? - {m}? - non-greedy version of {m} + {m}? + non-greedy version of {m} - {m,}? - non-greedy version of {m,} + {m,}? + non-greedy version of {m,} - {m,n}? - non-greedy version of {m,n} + {m,n}? + non-greedy version of {m,n} - The forms using {...} - are known as bounds. - The numbers m and n within a bound are + The forms using {...} + are known as bounds. + The numbers m and n within a bound are unsigned decimal integers with permissible values from 0 to 255 inclusive. - Non-greedy quantifiers (available in AREs only) match the - same possibilities as their corresponding normal (greedy) + Non-greedy quantifiers (available in AREs only) match the + same possibilities as their corresponding normal (greedy) counterparts, but prefer the smallest number rather than the largest number of matches. - See for more detail. + See for more detail. A quantifier cannot immediately follow another quantifier, e.g., - ** is invalid. + ** is invalid. A quantifier cannot begin an expression or subexpression or follow ^ or |. @@ -4753,40 +4843,40 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - ^ + ^ matches at the beginning of the string - $ + $ matches at the end of the string - (?=re) - positive lookahead matches at any point - where a substring matching re begins + (?=re) + positive lookahead matches at any point + where a substring matching re begins (AREs only) - (?!re) - negative lookahead matches at any point - where no substring matching re begins + (?!re) + negative lookahead matches at any point + where no substring matching re begins (AREs only) - (?<=re) - positive lookbehind matches at any point - where a substring matching re ends + (?<=re) + positive lookbehind matches at any point + where a substring matching re ends (AREs only) - (?<!re) - negative lookbehind matches at any point - where no substring matching re ends + (?<!re) + negative lookbehind matches at any point + where no substring matching re ends (AREs only) @@ -4795,7 +4885,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; Lookahead and lookbehind constraints cannot contain back - references (see ), + references (see ), and all parentheses within them are considered non-capturing. @@ -4808,7 +4898,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; characters enclosed in []. It normally matches any single character from the list (but see below). If the list begins with ^, it matches any single character - not from the rest of the list. + not from the rest of the list. If two characters in the list are separated by -, this is shorthand for the full range of characters between those two @@ -4853,7 +4943,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - PostgreSQL currently does not support multi-character collating + PostgreSQL currently does not support multi-character collating elements. This information describes possible future behavior. @@ -4861,7 +4951,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; Within a bracket expression, a collating element enclosed in [= and =] is an equivalence - class, standing for the sequences of characters of all collating + class, standing for the sequences of characters of all collating elements equivalent to that one, including itself. (If there are no other equivalent collating elements, the treatment is as if the enclosing delimiters were [. and @@ -4896,7 +4986,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; matching empty strings at the beginning and end of a word respectively. A word is defined as a sequence of word characters that is neither preceded nor followed by word - characters. A word character is an alnum character (as + characters. A word character is an alnum character (as defined by ctype3) or an underscore. This is an extension, compatible with but not @@ -4911,44 +5001,44 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; Regular Expression Escapes - Escapes are special sequences beginning with \ + Escapes are special sequences beginning with \ followed by an alphanumeric character. Escapes come in several varieties: character entry, class shorthands, constraint escapes, and back references. - A \ followed by an alphanumeric character but not constituting + A \ followed by an alphanumeric character but not constituting a valid escape is illegal in AREs. In EREs, there are no escapes: outside a bracket expression, - a \ followed by an alphanumeric character merely stands for + a \ followed by an alphanumeric character merely stands for that character as an ordinary character, and inside a bracket expression, - \ is an ordinary character. + \ is an ordinary character. (The latter is the one actual incompatibility between EREs and AREs.) - Character-entry escapes exist to make it easier to specify + Character-entry escapes exist to make it easier to specify non-printing and other inconvenient characters in REs. They are - shown in . + shown in . - Class-shorthand escapes provide shorthands for certain + Class-shorthand escapes provide shorthands for certain commonly-used character classes. They are - shown in . + shown in . - A constraint escape is a constraint, + A constraint escape is a constraint, matching the empty string if specific conditions are met, written as an escape. They are - shown in . + shown in . - A back reference (\n) matches the + A back reference (\n) matches the same string matched by the previous parenthesized subexpression specified - by the number n - (see ). For example, - ([bc])\1 matches bb or cc - but not bc or cb. + by the number n + (see ). For example, + ([bc])\1 matches bb or cc + but not bc or cb. The subexpression must entirely precede the back reference in the RE. Subexpressions are numbered in the order of their leading parentheses. Non-capturing parentheses do not define subexpressions. @@ -4967,122 +5057,122 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - \a + \a alert (bell) character, as in C - \b + \b backspace, as in C - \B - synonym for backslash (\) to help reduce the need for backslash + \B + synonym for backslash (\) to help reduce the need for backslash doubling - \cX - (where X is any character) the character whose + \cX + (where X is any character) the character whose low-order 5 bits are the same as those of - X, and whose other bits are all zero + X, and whose other bits are all zero - \e + \e the character whose collating-sequence name - is ESC, - or failing that, the character with octal value 033 + is ESC, + or failing that, the character with octal value 033 - \f + \f form feed, as in C - \n + \n newline, as in C - \r + \r carriage return, as in C - \t + \t horizontal tab, as in C - \uwxyz - (where wxyz is exactly four hexadecimal digits) + \uwxyz + (where wxyz is exactly four hexadecimal digits) the character whose hexadecimal value is - 0xwxyz + 0xwxyz - \Ustuvwxyz - (where stuvwxyz is exactly eight hexadecimal + \Ustuvwxyz + (where stuvwxyz is exactly eight hexadecimal digits) the character whose hexadecimal value is - 0xstuvwxyz + 0xstuvwxyz - \v + \v vertical tab, as in C - \xhhh - (where hhh is any sequence of hexadecimal + \xhhh + (where hhh is any sequence of hexadecimal digits) the character whose hexadecimal value is - 0xhhh + 0xhhh (a single character no matter how many hexadecimal digits are used) - \0 - the character whose value is 0 (the null byte) + \0 + the character whose value is 0 (the null byte) - \xy - (where xy is exactly two octal digits, - and is not a back reference) + \xy + (where xy is exactly two octal digits, + and is not a back reference) the character whose octal value is - 0xy + 0xy - \xyz - (where xyz is exactly three octal digits, - and is not a back reference) + \xyz + (where xyz is exactly three octal digits, + and is not a back reference) the character whose octal value is - 0xyz + 0xyz - Hexadecimal digits are 0-9, - a-f, and A-F. - Octal digits are 0-7. + Hexadecimal digits are 0-9, + a-f, and A-F. + Octal digits are 0-7. Numeric character-entry escapes specifying values outside the ASCII range (0-127) have meanings dependent on the database encoding. When the encoding is UTF-8, escape values are equivalent to Unicode code points, - for example \u1234 means the character U+1234. + for example \u1234 means the character U+1234. For other multibyte encodings, character-entry escapes usually just specify the concatenation of the byte values for the character. If the escape value does not correspond to any legal character in the database @@ -5091,8 +5181,8 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; The character-entry escapes are always taken as ordinary characters. - For example, \135 is ] in ASCII, but - \135 does not terminate a bracket expression. + For example, \135 is ] in ASCII, but + \135 does not terminate a bracket expression. @@ -5108,34 +5198,34 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - \d - [[:digit:]] + \d + [[:digit:]] - \s - [[:space:]] + \s + [[:space:]] - \w - [[:alnum:]_] + \w + [[:alnum:]_] (note underscore is included) - \D - [^[:digit:]] + \D + [^[:digit:]] - \S - [^[:space:]] + \S + [^[:space:]] - \W - [^[:alnum:]_] + \W + [^[:alnum:]_] (note underscore is included) @@ -5143,13 +5233,13 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo;
- Within bracket expressions, \d, \s, - and \w lose their outer brackets, - and \D, \S, and \W are illegal. - (So, for example, [a-c\d] is equivalent to - [a-c[:digit:]]. - Also, [a-c\D], which is equivalent to - [a-c^[:digit:]], is illegal.) + Within bracket expressions, \d, \s, + and \w lose their outer brackets, + and \D, \S, and \W are illegal. + (So, for example, [a-c\d] is equivalent to + [a-c[:digit:]]. + Also, [a-c\D], which is equivalent to + [a-c^[:digit:]], is illegal.) @@ -5165,38 +5255,38 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - \A + \A matches only at the beginning of the string - (see for how this differs from - ^) + (see for how this differs from + ^) - \m + \m matches only at the beginning of a word - \M + \M matches only at the end of a word - \y + \y matches only at the beginning or end of a word - \Y + \Y matches only at a point that is not the beginning or end of a word - \Z + \Z matches only at the end of the string - (see for how this differs from - $) + (see for how this differs from + $) @@ -5204,7 +5294,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; A word is defined as in the specification of - [[:<:]] and [[:>:]] above. + [[:<:]] and [[:>:]] above. Constraint escapes are illegal within bracket expressions. @@ -5221,18 +5311,18 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - \m - (where m is a nonzero digit) - a back reference to the m'th subexpression + \m + (where m is a nonzero digit) + a back reference to the m'th subexpression - \mnn - (where m is a nonzero digit, and - nn is some more digits, and the decimal value - mnn is not greater than the number of closing capturing + \mnn + (where m is a nonzero digit, and + nn is some more digits, and the decimal value + mnn is not greater than the number of closing capturing parentheses seen so far) - a back reference to the mnn'th subexpression + a back reference to the mnn'th subexpression @@ -5263,29 +5353,29 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - An RE can begin with one of two special director prefixes. - If an RE begins with ***:, + An RE can begin with one of two special director prefixes. + If an RE begins with ***:, the rest of the RE is taken as an ARE. (This normally has no effect in - PostgreSQL, since REs are assumed to be AREs; + PostgreSQL, since REs are assumed to be AREs; but it does have an effect if ERE or BRE mode had been specified by - the flags parameter to a regex function.) - If an RE begins with ***=, + the flags parameter to a regex function.) + If an RE begins with ***=, the rest of the RE is taken to be a literal string, with all characters considered ordinary characters. - An ARE can begin with embedded options: - a sequence (?xyz) - (where xyz is one or more alphabetic characters) + An ARE can begin with embedded options: + a sequence (?xyz) + (where xyz is one or more alphabetic characters) specifies options affecting the rest of the RE. These options override any previously determined options — in particular, they can override the case-sensitivity behavior implied by - a regex operator, or the flags parameter to a regex + a regex operator, or the flags parameter to a regex function. The available option letters are - shown in . - Note that these same option letters are used in the flags + shown in . + Note that these same option letters are used in the flags parameters of regex functions. @@ -5302,67 +5392,67 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - b + b rest of RE is a BRE - c + c case-sensitive matching (overrides operator type) - e + e rest of RE is an ERE - i + i case-insensitive matching (see - ) (overrides operator type) + ) (overrides operator type) - m - historical synonym for n + m + historical synonym for n - n + n newline-sensitive matching (see - ) + ) - p + p partial newline-sensitive matching (see - ) + ) - q - rest of RE is a literal (quoted) string, all ordinary + q + rest of RE is a literal (quoted) string, all ordinary characters - s + s non-newline-sensitive matching (default) - t + t tight syntax (default; see below) - w - inverse partial newline-sensitive (weird) matching - (see ) + w + inverse partial newline-sensitive (weird) matching + (see ) - x + x expanded syntax (see below) @@ -5370,18 +5460,18 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo;
- Embedded options take effect at the ) terminating the sequence. + Embedded options take effect at the ) terminating the sequence. They can appear only at the start of an ARE (after the - ***: director if any). + ***: director if any). - In addition to the usual (tight) RE syntax, in which all - characters are significant, there is an expanded syntax, - available by specifying the embedded x option. + In addition to the usual (tight) RE syntax, in which all + characters are significant, there is an expanded syntax, + available by specifying the embedded x option. In the expanded syntax, white-space characters in the RE are ignored, as are - all characters between a # + all characters between a # and the following newline (or the end of the RE). This permits paragraphing and commenting a complex RE. There are three exceptions to that basic rule: @@ -5389,41 +5479,41 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - a white-space character or # preceded by \ is + a white-space character or # preceded by \ is retained - white space or # within a bracket expression is retained + white space or # within a bracket expression is retained white space and comments cannot appear within multi-character symbols, - such as (?: + such as (?: For this purpose, white-space characters are blank, tab, newline, and - any character that belongs to the space character class. + any character that belongs to the space character class. Finally, in an ARE, outside bracket expressions, the sequence - (?#ttt) - (where ttt is any text not containing a )) + (?#ttt) + (where ttt is any text not containing a )) is a comment, completely ignored. Again, this is not allowed between the characters of - multi-character symbols, like (?:. + multi-character symbols, like (?:. Such comments are more a historical artifact than a useful facility, and their use is deprecated; use the expanded syntax instead. - None of these metasyntax extensions is available if - an initial ***= director + None of these metasyntax extensions is available if + an initial ***= director has specified that the user's input be treated as a literal string rather than as an RE. @@ -5437,8 +5527,8 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; string, the RE matches the one starting earliest in the string. If the RE could match more than one substring starting at that point, either the longest possible match or the shortest possible match will - be taken, depending on whether the RE is greedy or - non-greedy. + be taken, depending on whether the RE is greedy or + non-greedy.
@@ -5458,39 +5548,39 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; A quantified atom with a fixed-repetition quantifier - ({m} + ({m} or - {m}?) + {m}?) has the same greediness (possibly none) as the atom itself. A quantified atom with other normal quantifiers (including - {m,n} - with m equal to n) + {m,n} + with m equal to n) is greedy (prefers longest match). A quantified atom with a non-greedy quantifier (including - {m,n}? - with m equal to n) + {m,n}? + with m equal to n) is non-greedy (prefers shortest match). A branch — that is, an RE that has no top-level - | operator — has the same greediness as the first + | operator — has the same greediness as the first quantified atom in it that has a greediness attribute. An RE consisting of two or more branches connected by the - | operator is always greedy. + | operator is always greedy. @@ -5501,7 +5591,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; quantified atoms, but with branches and entire REs that contain quantified atoms. What that means is that the matching is done in such a way that the branch, or whole RE, matches the longest or shortest possible - substring as a whole. Once the length of the entire match + substring as a whole. Once the length of the entire match is determined, the part of it that matches any particular subexpression is determined on the basis of the greediness attribute of that subexpression, with subexpressions starting earlier in the RE taking @@ -5516,16 +5606,16 @@ SELECT SUBSTRING('XY1234Z', 'Y*([0-9]{1,3})'); SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); Result: 1 - In the first case, the RE as a whole is greedy because Y* - is greedy. It can match beginning at the Y, and it matches - the longest possible string starting there, i.e., Y123. - The output is the parenthesized part of that, or 123. - In the second case, the RE as a whole is non-greedy because Y*? - is non-greedy. It can match beginning at the Y, and it matches - the shortest possible string starting there, i.e., Y1. - The subexpression [0-9]{1,3} is greedy but it cannot change + In the first case, the RE as a whole is greedy because Y* + is greedy. It can match beginning at the Y, and it matches + the longest possible string starting there, i.e., Y123. + The output is the parenthesized part of that, or 123. + In the second case, the RE as a whole is non-greedy because Y*? + is non-greedy. It can match beginning at the Y, and it matches + the shortest possible string starting there, i.e., Y1. + The subexpression [0-9]{1,3} is greedy but it cannot change the decision as to the overall match length; so it is forced to match - just 1. + just 1. @@ -5533,11 +5623,11 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); the total match length is either as long as possible or as short as possible, according to the attribute assigned to the whole RE. The attributes assigned to the subexpressions only affect how much of that - match they are allowed to eat relative to each other. + match they are allowed to eat relative to each other. - The quantifiers {1,1} and {1,1}? + The quantifiers {1,1} and {1,1}? can be used to force greediness or non-greediness, respectively, on a subexpression or a whole RE. This is useful when you need the whole RE to have a greediness attribute @@ -5549,8 +5639,8 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); SELECT regexp_match('abc01234xyz', '(.*)(\d+)(.*)'); Result: {abc0123,4,xyz} - That didn't work: the first .* is greedy so - it eats as much as it can, leaving the \d+ to + That didn't work: the first .* is greedy so + it eats as much as it can, leaving the \d+ to match at the last possible place, the last digit. We might try to fix that by making it non-greedy: @@ -5573,14 +5663,14 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); match lengths are measured in characters, not collating elements. An empty string is considered longer than no match at all. For example: - bb* - matches the three middle characters of abbbc; - (week|wee)(night|knights) - matches all ten characters of weeknights; - when (.*).* - is matched against abc the parenthesized subexpression + bb* + matches the three middle characters of abbbc; + (week|wee)(night|knights) + matches all ten characters of weeknights; + when (.*).* + is matched against abc the parenthesized subexpression matches all three characters; and when - (a*)* is matched against bc + (a*)* is matched against bc both the whole RE and the parenthesized subexpression match an empty string. @@ -5592,38 +5682,38 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); When an alphabetic that exists in multiple cases appears as an ordinary character outside a bracket expression, it is effectively transformed into a bracket expression containing both cases, - e.g., x becomes [xX]. + e.g., x becomes [xX]. When it appears inside a bracket expression, all case counterparts of it are added to the bracket expression, e.g., - [x] becomes [xX] - and [^x] becomes [^xX]. + [x] becomes [xX] + and [^x] becomes [^xX].
- If newline-sensitive matching is specified, . - and bracket expressions using ^ + If newline-sensitive matching is specified, . + and bracket expressions using ^ will never match the newline character (so that matches will never cross newlines unless the RE explicitly arranges it) - and ^ and $ + and ^ and $ will match the empty string after and before a newline respectively, in addition to matching at beginning and end of string respectively. - But the ARE escapes \A and \Z - continue to match beginning or end of string only. + But the ARE escapes \A and \Z + continue to match beginning or end of string only. If partial newline-sensitive matching is specified, - this affects . and bracket expressions - as with newline-sensitive matching, but not ^ - and $. + this affects . and bracket expressions + as with newline-sensitive matching, but not ^ + and $. If inverse partial newline-sensitive matching is specified, - this affects ^ and $ - as with newline-sensitive matching, but not . + this affects ^ and $ + as with newline-sensitive matching, but not . and bracket expressions. This isn't very useful but is provided for symmetry. @@ -5642,18 +5732,18 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); The only feature of AREs that is actually incompatible with - POSIX EREs is that \ does not lose its special + POSIX EREs is that \ does not lose its special significance inside bracket expressions. All other ARE features use syntax which is illegal or has undefined or unspecified effects in POSIX EREs; - the *** syntax of directors likewise is outside the POSIX + the *** syntax of directors likewise is outside the POSIX syntax for both BREs and EREs. Many of the ARE extensions are borrowed from Perl, but some have been changed to clean them up, and a few Perl extensions are not present. - Incompatibilities of note include \b, \B, + Incompatibilities of note include \b, \B, the lack of special treatment for a trailing newline, the addition of complemented bracket expressions to the things affected by newline-sensitive matching, @@ -5664,12 +5754,12 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); Two significant incompatibilities exist between AREs and the ERE syntax - recognized by pre-7.4 releases of PostgreSQL: + recognized by pre-7.4 releases of PostgreSQL: - In AREs, \ followed by an alphanumeric character is either + In AREs, \ followed by an alphanumeric character is either an escape or an error, while in previous releases, it was just another way of writing the alphanumeric. This should not be much of a problem because there was no reason to @@ -5678,9 +5768,9 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); - In AREs, \ remains a special character within - [], so a literal \ within a bracket - expression must be written \\. + In AREs, \ remains a special character within + [], so a literal \ within a bracket + expression must be written \\. @@ -5692,27 +5782,27 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); BREs differ from EREs in several respects. - In BREs, |, +, and ? + In BREs, |, +, and ? are ordinary characters and there is no equivalent for their functionality. The delimiters for bounds are - \{ and \}, - with { and } + \{ and \}, + with { and } by themselves ordinary characters. The parentheses for nested subexpressions are - \( and \), - with ( and ) by themselves ordinary characters. - ^ is an ordinary character except at the beginning of the + \( and \), + with ( and ) by themselves ordinary characters. + ^ is an ordinary character except at the beginning of the RE or the beginning of a parenthesized subexpression, - $ is an ordinary character except at the end of the + $ is an ordinary character except at the end of the RE or the end of a parenthesized subexpression, - and * is an ordinary character if it appears at the beginning + and * is an ordinary character if it appears at the beginning of the RE or the beginning of a parenthesized subexpression - (after a possible leading ^). + (after a possible leading ^). Finally, single-digit back references are available, and - \< and \> + \< and \> are synonyms for - [[:<:]] and [[:>:]] + [[:<:]] and [[:>:]] respectively; no other escapes are available in BREs. @@ -5735,7 +5825,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); provide a powerful set of tools for converting various data types (date/time, integer, floating point, numeric) to formatted strings and for converting from formatted strings to specific data types. - lists them. + lists them. These functions all follow a common calling convention: the first argument is the value to be formatted and the second argument is a template that defines the output or input format. @@ -5829,7 +5919,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); There is also a single-argument to_timestamp - function; see . + function; see . @@ -5839,22 +5929,25 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); exist to handle input formats that cannot be converted by simple casting. For most standard date/time formats, simply casting the source string to the required data type works, and is much easier. - Similarly, to_number is unnecessary for standard numeric + Similarly, to_number is unnecessary for standard numeric representations. - In a to_char output template string, there are certain + In a to_char output template string, there are certain patterns that are recognized and replaced with appropriately-formatted data based on the given value. Any text that is not a template pattern is simply copied verbatim. Similarly, in an input template string (for the other functions), template patterns identify the values to be supplied by - the input data string. + the input data string. If there are characters in the template string + that are not template patterns, the corresponding characters in the input + data string are simply skipped over (whether or not they are equal to the + template string characters). - shows the + shows the template patterns available for formatting date and time values. @@ -6022,11 +6115,11 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); D - day of the week, Sunday (1) to Saturday (7) + day of the week, Sunday (1) to Saturday (7) ID - ISO 8601 day of the week, Monday (1) to Sunday (7) + ISO 8601 day of the week, Monday (1) to Sunday (7) W @@ -6063,17 +6156,25 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); TZ upper case time-zone abbreviation - (only supported in to_char) + (only supported in to_char) tz lower case time-zone abbreviation - (only supported in to_char) + (only supported in to_char) + + + TZH + time-zone hours + + + TZM + time-zone minutes OF time-zone offset from UTC - (only supported in to_char) + (only supported in to_char) @@ -6084,7 +6185,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); behavior. For example, FMMonth is the Month pattern with the FM modifier. - shows the + shows the modifier patterns for date/time formatting. @@ -6107,12 +6208,12 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); TH suffix upper case ordinal number suffix - DDTH, e.g., 12TH + DDTH, e.g., 12TH th suffix lower case ordinal number suffix - DDth, e.g., 12th + DDth, e.g., 12th FX prefix @@ -6122,7 +6223,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); TM prefix translation mode (print localized day and month names based on - ) + ) TMMonth @@ -6153,7 +6254,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); TM does not include trailing blanks. - to_timestamp and to_date ignore + to_timestamp and to_date ignore the TM modifier. @@ -6161,29 +6262,85 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); to_timestamp and to_date - skip multiple blank spaces in the input string unless the - FX option is used. For example, - to_timestamp('2000    JUN', 'YYYY MON') works, but + skip multiple blank spaces at the beginning of the input string and + around date and time values unless the FX option is used. For example, + to_timestamp(' 2000    JUN', 'YYYY MON') and + to_timestamp('2000 - JUN', 'YYYY-MON') work, but to_timestamp('2000    JUN', 'FXYYYY MON') returns an error - because to_timestamp expects one space only. + because to_timestamp expects a single space only. FX must be specified as the first item in the template. + + + A separator (a space or a non-letter/non-digit character) in the template string of + to_timestamp and to_date + matches any single separator in the input string or is skipped, + unless the FX option is used. + For example, to_timestamp('2000JUN', 'YYYY///MON') and + to_timestamp('2000/JUN', 'YYYY MON') work, but + to_timestamp('2000//JUN', 'YYYY/MON') + returns an error because the number of separators in the input string + exceeds the number of separators in the template. + + + If FX is specified, separator in template string + matches to exactly one character in input string. Notice we don't insist + input string character to be the same as template string separator. + For example, to_timestamp('2000/JUN', 'FXYYYY MON') + works, but to_timestamp('2000/JUN', 'FXYYYY  MON') + returns an error because a space second template string space consumed + letter J from the input string. + + + + + + TZH template pattern can match a signed number. + Without the FX option, it may lead to ambiguity in + interpretation of the minus sign, which can also be interpreted as a separator. + This ambiguity is resolved as follows. If the number of separators before + TZH in the template string is less than the number of + separators before the minus sign in the input string, the minus sign + is interpreted as part of TZH. + Otherwise, the minus sign is considered to be a separator between values. + For example, to_timestamp('2000 -10', 'YYYY TZH') matches + -10 to TZH, but + to_timestamp('2000 -10', 'YYYY  TZH') + matches 10 to TZH. + + + Ordinary text is allowed in to_char templates and will be output literally. You can put a substring in double quotes to force it to be interpreted as literal text - even if it contains pattern key words. For example, in + even if it contains template patterns. For example, in '"Hello Year "YYYY', the YYYY will be replaced by the year data, but the single Y in Year - will not be. In to_date, to_number, - and to_timestamp, double-quoted strings skip the number of - input characters contained in the string, e.g. "XX" - skips two input characters. + will not be. + In to_date, to_number, + and to_timestamp, literal text and double-quoted + strings result in skipping the number of characters contained in the + string; for example "XX" skips two input characters + (whether or not they are XX). + + + Prior to PostgreSQL 12, it was possible to + skip arbitrary text in the input string using non-letter or non-digit + characters. For example, + to_timestamp('2000y6m1d', 'yyyy-MM-DD') used to + work. Now you can only use letter characters for this purpose. For example, + to_timestamp('2000y6m1d', 'yyyytMMtDDt') and + to_timestamp('2000y6m1d', 'yyyy"y"MM"m"DD"d"') + skip y, m, and + d. + + @@ -6191,6 +6348,11 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); If you want to have a double quote in the output you must precede it with a backslash, for example '\"YYYY Month\"'. + Backslashes are not otherwise special outside of double-quoted + strings. Within a double-quoted string, a backslash causes the + next character to be taken literally, whatever it is (but this + has no special effect unless the next character is a double quote + or another backslash).
@@ -6198,9 +6360,9 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); In to_timestamp and to_date, if the year format specification is less than four digits, e.g. - YYY, and the supplied year is less than four digits, + YYY, and the supplied year is less than four digits, the year will be adjusted to be nearest to the year 2020, e.g. - 95 becomes 1995. + 95 becomes 1995. @@ -6269,7 +6431,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); Attempting to enter a date using a mixture of ISO 8601 week-numbering fields and Gregorian date fields is nonsensical, and will cause an error. In the context of an ISO 8601 week-numbering year, the - concept of a month or day of month has no + concept of a month or day of month has no meaning. In the context of a Gregorian year, the ISO week has no meaning.
@@ -6278,10 +6440,10 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); While to_date will reject a mixture of Gregorian and ISO week-numbering date fields, to_char will not, since output format - specifications like YYYY-MM-DD (IYYY-IDDD) can be - useful. But avoid writing something like IYYY-MM-DD; + specifications like YYYY-MM-DD (IYYY-IDDD) can be + useful. But avoid writing something like IYYY-MM-DD; that would yield surprising results near the start of the year. - (See for more + (See for more information.) @@ -6323,11 +6485,11 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); - to_char(interval) formats HH and - HH12 as shown on a 12-hour clock, for example zero hours - and 36 hours both output as 12, while HH24 + to_char(interval) formats HH and + HH12 as shown on a 12-hour clock, for example zero hours + and 36 hours both output as 12, while HH24 outputs the full hour value, which can exceed 23 in - an interval value. + an interval value. @@ -6335,7 +6497,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); - shows the + shows the template patterns available for formatting numeric values. @@ -6351,11 +6513,11 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); 9 - value with the specified number of digits + digit position (can be dropped if insignificant) 0 - value with leading zeros + digit position (will not be dropped, even if insignificant) . (period) @@ -6363,7 +6525,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); , (comma) - group (thousand) separator + group (thousands) separator PR @@ -6421,6 +6583,39 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); Usage notes for numeric formatting: + + + 0 specifies a digit position that will always be printed, + even if it contains a leading/trailing zero. 9 also + specifies a digit position, but if it is a leading zero then it will + be replaced by a space, while if it is a trailing zero and fill mode + is specified then it will be deleted. (For to_number(), + these two pattern characters are equivalent.) + + + + + + The pattern characters S, L, D, + and G represent the sign, currency symbol, decimal point, + and thousands separator characters defined by the current locale + (see + and ). The pattern characters period + and comma represent those exact characters, with the meanings of + decimal point and thousands separator, regardless of locale. + + + + + + If no explicit provision is made for a sign + in to_char()'s pattern, one column will be reserved for + the sign, and it will be anchored to (appear just left of) the + number. If S appears just left of some 9's, + it will likewise be anchored to the number. + + + A sign formatted using SG, PL, or @@ -6428,18 +6623,10 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); the number; for example, to_char(-12, 'MI9999') produces '-  12' but to_char(-12, 'S9999') produces '  -12'. - The Oracle implementation does not allow the use of + (The Oracle implementation does not allow the use of MI before 9, but rather requires that 9 precede - MI. - - - - - - 9 results in a value with the same number of - digits as there are 9s. If a digit is - not available it outputs a space. + MI.) @@ -6458,6 +6645,17 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); + + + In to_number, if non-data template patterns such + as L or TH are used, the + corresponding number of input characters are skipped, whether or not + they match the template pattern, unless they are data characters + (that is, digits, sign, decimal point, or comma). For + example, TH would skip two non-data characters. + + + V with to_char @@ -6486,10 +6684,10 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); Certain modifiers can be applied to any template pattern to alter its - behavior. For example, FM9999 - is the 9999 pattern with the + behavior. For example, FM99.99 + is the 99.99 pattern with the FM modifier. - shows the + shows the modifier patterns for numeric formatting. @@ -6506,8 +6704,8 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); FM prefix - fill mode (suppress leading zeroes and padding blanks) - FM9999 + fill mode (suppress trailing zeroes and padding blanks) + FM99.99 TH suffix @@ -6524,7 +6722,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); - shows some + shows some examples of the use of the to_char function. @@ -6554,6 +6752,10 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); to_char(-0.1, 'FM9.99') '-.1' + + to_char(-0.1, 'FM90.99') + '-0.1' + to_char(0.1, '0.9') ' 0.1' @@ -6697,15 +6899,15 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); Date/Time Functions and Operators - shows the available + shows the available functions for date/time value processing, with details appearing in the following subsections. illustrates the behaviors of + linkend="operators-datetime-table"/> illustrates the behaviors of the basic arithmetic operators (+, *, etc.). For formatting functions, refer to - . You should be familiar with + . You should be familiar with the background information on date/time data types from . + linkend="datatype-datetime"/>. @@ -6713,7 +6915,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); inputs actually come in two variants: one that takes time with time zone or timestamp with time zone, and one that takes time without time zone or timestamp without time zone. For brevity, these variants are not shown separately. Also, the - + and * operators come in commutative pairs (for + + and * operators come in commutative pairs (for example both date + integer and integer + date); we show only one of each such pair. @@ -6870,7 +7072,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); age(timestamp, timestamp) interval - Subtract arguments, producing a symbolic result that + Subtract arguments, producing a symbolic result that uses years and months, rather than just days age(timestamp '2001-04-10', timestamp '1957-06-13') 43 years 9 mons 27 days @@ -6893,7 +7095,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); timestamp with time zone Current date and time (changes during statement execution); - see + see @@ -6908,7 +7110,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); date Current date; - see + see @@ -6923,7 +7125,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); time with time zone Current time of day; - see + see @@ -6938,7 +7140,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); timestamp with time zone Current date and time (start of current transaction); - see + see @@ -6953,7 +7155,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); double precision Get subfield (equivalent to extract); - see + see date_part('hour', timestamp '2001-02-16 20:38:40') 20 @@ -6963,7 +7165,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); date_part(text, interval) double precision Get subfield (equivalent to - extract); see + extract); see date_part('month', interval '2 years 3 months') 3 @@ -6977,7 +7179,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); date_trunc(text, timestamp) timestamp - Truncate to specified precision; see also + Truncate to specified precision; see also date_trunc('hour', timestamp '2001-02-16 20:38:40') 2001-02-16 20:00:00 @@ -6986,7 +7188,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); date_trunc(text, interval) interval - Truncate to specified precision; see also + Truncate to specified precision; see also date_trunc('hour', interval '2 days 3 hours 40 minutes') 2 days 03:00:00 @@ -7001,7 +7203,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); timestamp) double precision - Get subfield; see + Get subfield; see extract(hour from timestamp '2001-02-16 20:38:40') 20 @@ -7011,7 +7213,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); extract(field from interval) double precision - Get subfield; see + Get subfield; see extract(month from interval '2 years 3 months') 3 @@ -7080,7 +7282,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); justify_interval(interval) interval - Adjust interval using justify_days and justify_hours, with additional sign adjustments + Adjust interval using justify_days and justify_hours, with additional sign adjustments justify_interval(interval '1 mon -1 hour') 29 days 23:00:00 @@ -7094,7 +7296,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); time Current time of day; - see + see @@ -7109,7 +7311,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); timestamp Current date and time (start of current transaction); - see + see @@ -7243,7 +7445,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); timestamp with time zone Current date and time (start of current transaction); - see + see @@ -7258,7 +7460,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); timestamp with time zone Current date and time (start of current statement); - see + see @@ -7273,8 +7475,8 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); text Current date and time - (like clock_timestamp, but as a text string); - see + (like clock_timestamp, but as a text string); + see @@ -7289,7 +7491,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); timestamp with time zone Current date and time (start of current transaction); - see + see @@ -7315,7 +7517,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); OVERLAPS - In addition to these functions, the SQL OVERLAPS operator is + In addition to these functions, the SQL OVERLAPS operator is supported: (start1, end1) OVERLAPS (start2, end2) @@ -7326,11 +7528,11 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); can be specified as pairs of dates, times, or time stamps; or as a date, time, or time stamp followed by an interval. When a pair of values is provided, either the start or the end can be written - first; OVERLAPS automatically takes the earlier value + first; OVERLAPS automatically takes the earlier value of the pair as the start. Each time period is considered to - represent the half-open interval start <= - time < end, unless - start and end are equal in which case it + represent the half-open interval start <= + time < end, unless + start and end are equal in which case it represents that single time instant. This means for instance that two time periods with only an endpoint in common do not overlap. @@ -7369,31 +7571,31 @@ SELECT (DATE '2001-10-30', DATE '2001-10-30') OVERLAPS - Note there can be ambiguity in the months field returned by - age because different months have different numbers of - days. PostgreSQL's approach uses the month from the + Note there can be ambiguity in the months field returned by + age because different months have different numbers of + days. PostgreSQL's approach uses the month from the earlier of the two dates when calculating partial months. For example, - age('2004-06-01', '2004-04-30') uses April to yield - 1 mon 1 day, while using May would yield 1 mon 2 - days because May has 31 days, while April has only 30. + age('2004-06-01', '2004-04-30') uses April to yield + 1 mon 1 day, while using May would yield 1 mon 2 + days because May has 31 days, while April has only 30. Subtraction of dates and timestamps can also be complex. One conceptually simple way to perform subtraction is to convert each value to a number - of seconds using EXTRACT(EPOCH FROM ...), then subtract the + of seconds using EXTRACT(EPOCH FROM ...), then subtract the results; this produces the - number of seconds between the two values. This will adjust + number of seconds between the two values. This will adjust for the number of days in each month, timezone changes, and daylight saving time adjustments. Subtraction of date or timestamp - values with the - operator + values with the - operator returns the number of days (24-hours) and hours/minutes/seconds - between the values, making the same adjustments. The age + between the values, making the same adjustments. The age function returns years, months, days, and hours/minutes/seconds, performing field-by-field subtraction and then adjusting for negative field values. The following queries illustrate the differences in these approaches. The sample results were produced with timezone - = 'US/Eastern'; there is a daylight saving time change between the + = 'US/Eastern'; there is a daylight saving time change between the two dates used: @@ -7505,8 +7707,8 @@ SELECT EXTRACT(DECADE FROM TIMESTAMP '2001-02-16 20:38:40'); dow - The day of the week as Sunday (0) to - Saturday (6) + The day of the week as Sunday (0) to + Saturday (6) @@ -7558,7 +7760,7 @@ SELECT EXTRACT(EPOCH FROM INTERVAL '5 days 3 hours'); You can convert an epoch value back to a time stamp - with to_timestamp: + with to_timestamp: SELECT to_timestamp(982384720.12); @@ -7585,8 +7787,8 @@ SELECT EXTRACT(HOUR FROM TIMESTAMP '2001-02-16 20:38:40'); isodow - The day of the week as Monday (1) to - Sunday (7) + The day of the week as Monday (1) to + Sunday (7) @@ -7594,8 +7796,8 @@ SELECT EXTRACT(ISODOW FROM TIMESTAMP '2001-02-18 20:38:40'); Result: 7 - This is identical to dow except for Sunday. This - matches the ISO 8601 day of the week numbering. + This is identical to dow except for Sunday. This + matches the ISO 8601 day of the week numbering. @@ -7790,11 +7992,11 @@ SELECT EXTRACT(SECOND FROM TIME '17:12:28.5'); In the ISO week-numbering system, it is possible for early-January dates to be part of the 52nd or 53rd week of the previous year, and for late-December dates to be part of the first week of the next year. - For example, 2005-01-01 is part of the 53rd week of year - 2004, and 2006-01-01 is part of the 52nd week of year - 2005, while 2012-12-31 is part of the first week of 2013. - It's recommended to use the isoyear field together with - week to get consistent results. + For example, 2005-01-01 is part of the 53rd week of year + 2004, and 2006-01-01 is part of the 52nd week of year + 2005, while 2012-12-31 is part of the first week of 2013. + It's recommended to use the isoyear field together with + week to get consistent results. @@ -7808,8 +8010,8 @@ SELECT EXTRACT(WEEK FROM TIMESTAMP '2001-02-16 20:38:40'); year - The year field. Keep in mind there is no 0 AD, so subtracting - BC years from AD years should be done with care. + The year field. Keep in mind there is no 0 AD, so subtracting + BC years from AD years should be done with care. @@ -7824,11 +8026,11 @@ SELECT EXTRACT(YEAR FROM TIMESTAMP '2001-02-16 20:38:40'); - When the input value is +/-Infinity, extract returns - +/-Infinity for monotonically-increasing fields (epoch, - julian, year, isoyear, - decade, century, and millennium). - For other fields, NULL is returned. PostgreSQL + When the input value is +/-Infinity, extract returns + +/-Infinity for monotonically-increasing fields (epoch, + julian, year, isoyear, + decade, century, and millennium). + For other fields, NULL is returned. PostgreSQL versions before 9.6 returned zero for all cases of infinite input. @@ -7836,7 +8038,7 @@ SELECT EXTRACT(YEAR FROM TIMESTAMP '2001-02-16 20:38:40'); The extract function is primarily intended for computational processing. For formatting date/time values for - display, see . + display, see . @@ -7879,13 +8081,13 @@ SELECT date_part('hour', INTERVAL '4 hours 3 minutes'); date_trunc('field', source) source is a value expression of type - timestamp or interval. + timestamp or interval. (Values of type date and time are cast automatically to timestamp or - interval, respectively.) + interval, respectively.) field selects to which precision to truncate the input value. The return value is of type - timestamp or interval + timestamp or interval with all fields that are less significant than the selected one set to zero (or one, for day and month). @@ -7934,10 +8136,11 @@ SELECT date_trunc('year', TIMESTAMP '2001-02-16 20:38:40'); - The AT TIME ZONE construct allows conversions - of time stamps to different time zones. shows its - variants. + The AT TIME ZONE converts time + stamp without time zone to/from + time stamp with time zone, and + time values to different time zones. shows its variants. @@ -7954,59 +8157,68 @@ SELECT date_trunc('year', TIMESTAMP '2001-02-16 20:38:40'); - timestamp without time zone AT TIME ZONE zone + timestamp without time zone AT TIME ZONE zone timestamp with time zone - Treat given time stamp without time zone as located in the specified time zone + Treat given time stamp without time zone as located in the specified time zone - timestamp with time zone AT TIME ZONE zone + timestamp with time zone AT TIME ZONE zone timestamp without time zone - Convert given time stamp with time zone to the new time + Convert given time stamp with time zone to the new time zone, with no time zone designation - time with time zone AT TIME ZONE zone + time with time zone AT TIME ZONE zone time with time zone - Convert given time with time zone to the new time zone + Convert given time with time zone to the new time zone
- In these expressions, the desired time zone zone can be - specified either as a text string (e.g., 'PST') + In these expressions, the desired time zone zone can be + specified either as a text string (e.g., 'America/Los_Angeles') or as an interval (e.g., INTERVAL '-08:00'). In the text case, a time zone name can be specified in any of the ways - described in . + described in . - Examples (assuming the local time zone is PST8PDT): + Examples (assuming the local time zone is America/Los_Angeles): -SELECT TIMESTAMP '2001-02-16 20:38:40' AT TIME ZONE 'MST'; +SELECT TIMESTAMP '2001-02-16 20:38:40' AT TIME ZONE 'America/Denver'; Result: 2001-02-16 19:38:40-08 -SELECT TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40-05' AT TIME ZONE 'MST'; +SELECT TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40-05' AT TIME ZONE 'America/Denver'; Result: 2001-02-16 18:38:40 + +SELECT TIMESTAMP '2001-02-16 20:38:40-05' AT TIME ZONE 'Asia/Tokyo' AT TIME ZONE 'America/Chicago'; +Result: 2001-02-16 05:38:40 - The first example takes a time stamp without time zone and interprets it as MST time - (UTC-7), which is then converted to PST (UTC-8) for display. The second example takes - a time stamp specified in EST (UTC-5) and converts it to local time in MST (UTC-7). + The first example adds a time zone to a value that lacks it, and + displays the value using the current TimeZone + setting. The second example shifts the time stamp with time zone value + to the specified time zone, and returns the value without a time zone. + This allows storage and display of values different from the current + TimeZone setting. The third example converts + Tokyo time to Chicago time. Converting time + values to other time zones uses the currently active time zone rules + since no date is supplied. - The function timezone(zone, - timestamp) is equivalent to the SQL-conforming construct - timestamp AT TIME ZONE - zone. + The function timezone(zone, + timestamp) is equivalent to the SQL-conforming construct + timestamp AT TIME ZONE + zone. @@ -8111,23 +8323,23 @@ now() - transaction_timestamp() is equivalent to + transaction_timestamp() is equivalent to CURRENT_TIMESTAMP, but is named to clearly reflect what it returns. - statement_timestamp() returns the start time of the current + statement_timestamp() returns the start time of the current statement (more specifically, the time of receipt of the latest command message from the client). - statement_timestamp() and transaction_timestamp() + statement_timestamp() and transaction_timestamp() return the same value during the first command of a transaction, but might differ during subsequent commands. - clock_timestamp() returns the actual current time, and + clock_timestamp() returns the actual current time, and therefore its value changes even within a single SQL command. - timeofday() is a historical + timeofday() is a historical PostgreSQL function. Like - clock_timestamp(), it returns the actual current time, - but as a formatted text string rather than a timestamp - with time zone value. - now() is a traditional PostgreSQL + clock_timestamp(), it returns the actual current time, + but as a formatted text string rather than a timestamp + with time zone value. + now() is a traditional PostgreSQL equivalent to transaction_timestamp(). @@ -8145,7 +8357,7 @@ SELECT TIMESTAMP 'now'; -- incorrect for use with DEFAULT - You do not want to use the third form when specifying a DEFAULT + You do not want to use the third form when specifying a DEFAULT clause while creating a table. The system will convert now to a timestamp as soon as the constant is parsed, so that when the default value is needed, @@ -8181,16 +8393,16 @@ SELECT TIMESTAMP 'now'; -- incorrect for use with DEFAULT process: pg_sleep(seconds) -pg_sleep_for(interval) -pg_sleep_until(timestamp with time zone) +pg_sleep_for(interval) +pg_sleep_until(timestamp with time zone) pg_sleep makes the current session's process sleep until seconds seconds have elapsed. seconds is a value of type - double precision, so fractional-second delays can be specified. + double precision, so fractional-second delays can be specified. pg_sleep_for is a convenience function for larger - sleep times specified as an interval. + sleep times specified as an interval. pg_sleep_until is a convenience function for when a specific wake-up time is desired. For example: @@ -8229,10 +8441,10 @@ SELECT pg_sleep_until('tomorrow 03:00'); Enum Support Functions - For enum types (described in ), + For enum types (described in ), there are several functions that allow cleaner programming without hard-coding particular values of an enum type. - These are listed in . The examples + These are listed in . The examples assume an enum type created as: @@ -8312,7 +8524,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - Notice that except for the two-argument form of enum_range, + Notice that except for the two-argument form of enum_range, these functions disregard the specific value passed to them; they care only about its declared data type. Either null or a specific value of the type can be passed, with the same result. It is more common to @@ -8329,20 +8541,20 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple lseg, line, path, polygon, and circle have a large set of native support functions and operators, shown in , , and . + linkend="functions-geometry-op-table"/>, , and . - Note that the same as operator, ~=, represents + Note that the same as operator, ~=, represents the usual notion of equality for the point, box, polygon, and circle types. - Some of these types also have an = operator, but - = compares - for equal areas only. The other scalar comparison operators - (<= and so on) likewise compare areas for these types. + Some of these types also have an = operator, but + = compares + for equal areas only. The other scalar comparison operators + (<= and so on) likewise compare areas for these types. @@ -8519,8 +8731,8 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple Before PostgreSQL 8.2, the containment - operators @> and <@ were respectively - called ~ and @. These names are still + operators @> and <@ were respectively + called ~ and @. These names are still available, but are deprecated and will eventually be removed. @@ -8575,76 +8787,67 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - area(object) + area(object) double precision area area(box '((0,0),(1,1))') - center(object) + center(object) point center center(box '((0,0),(1,2))') - diameter(circle) + diameter(circle) double precision diameter of circle diameter(circle '((0,0),2.0)') - height(box) + height(box) double precision vertical size of box height(box '((0,0),(1,1))') - isclosed(path) + isclosed(path) boolean a closed path? isclosed(path '((0,0),(1,1),(2,0))') - isopen(path) + isopen(path) boolean an open path? isopen(path '[(0,0),(1,1),(2,0)]') - length(object) + length(object) double precision length length(path '((-1,0),(1,0))') - npoints(path) + npoints(path) int number of points npoints(path '[(0,0),(1,1),(2,0)]') - npoints(polygon) + npoints(polygon) int number of points npoints(polygon '((1,1),(0,0))') - pclose(path) + pclose(path) path convert path to closed pclose(path '[(0,0),(1,1),(2,0)]') - - - point(lseg, lseg) - point - intersection - point(lseg '((-1,0),(1,0))',lseg '((-2,-2),(2,2))') - -]]> - popen(path) + popen(path) path convert path to open popen(path '((0,0),(1,1),(2,0))') @@ -8656,7 +8859,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple radius(circle '((0,0),2.0)') - width(box) + width(box) double precision horizontal size of box width(box '((0,0),(1,1))') @@ -8839,13 +9042,13 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - It is possible to access the two component numbers of a point + It is possible to access the two component numbers of a point as though the point were an array with indexes 0 and 1. For example, if - t.p is a point column then - SELECT p[0] FROM t retrieves the X coordinate and - UPDATE t SET p[1] = ... changes the Y coordinate. - In the same way, a value of type box or lseg can be treated - as an array of two point values. + t.p is a point column then + SELECT p[0] FROM t retrieves the X coordinate and + UPDATE t SET p[1] = ... changes the Y coordinate. + In the same way, a value of type box or lseg can be treated + as an array of two point values. @@ -8871,7 +9074,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple Network Address Functions and Operators - shows the operators + shows the operators available for the cidr and inet types. The operators <<, <<=, >>, @@ -8983,7 +9186,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - shows the functions + shows the functions available for use with the cidr and inet types. The abbrev, host, and text @@ -9168,23 +9371,23 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - Any cidr value can be cast to inet implicitly + Any cidr value can be cast to inet implicitly or explicitly; therefore, the functions shown above as operating on - inet also work on cidr values. (Where there are - separate functions for inet and cidr, it is because + inet also work on cidr values. (Where there are + separate functions for inet and cidr, it is because the behavior should be different for the two cases.) - Also, it is permitted to cast an inet value to cidr. + Also, it is permitted to cast an inet value to cidr. When this is done, any bits to the right of the netmask are silently zeroed - to create a valid cidr value. + to create a valid cidr value. In addition, - you can cast a text value to inet or cidr + you can cast a text value to inet or cidr using normal casting syntax: for example, - inet(expression) or - colname::cidr. + inet(expression) or + colname::cidr. - shows the functions + shows the functions available for use with the macaddr type. The function trunc(macaddr) returns a MAC address with the last 3 bytes set to zero. This can be used to @@ -9229,7 +9432,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - shows the functions + shows the functions available for use with the macaddr8 type. The function trunc(macaddr8) returns a MAC address with the last 5 bytes set to zero. This can be used to @@ -9301,11 +9504,11 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - , - and - + , + and + summarize the functions and operators that are provided - for full text searching. See for a detailed + for full text searching. See for a detailed explanation of PostgreSQL's text search facility. @@ -9325,64 +9528,64 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple @@ - boolean - tsvector matches tsquery ? + boolean + tsvector matches tsquery ? to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat') t @@@ - boolean - deprecated synonym for @@ + boolean + deprecated synonym for @@ to_tsvector('fat cats ate rats') @@@ to_tsquery('cat & rat') t || - tsvector - concatenate tsvectors + tsvector + concatenate tsvectors 'a:1 b:2'::tsvector || 'c:1 d:2 b:3'::tsvector 'a':1 'b':2,5 'c':3 'd':4 && - tsquery - AND tsquerys together + tsquery + AND tsquerys together 'fat | rat'::tsquery && 'cat'::tsquery ( 'fat' | 'rat' ) & 'cat' || - tsquery - OR tsquerys together + tsquery + OR tsquerys together 'fat | rat'::tsquery || 'cat'::tsquery ( 'fat' | 'rat' ) | 'cat' !! - tsquery - negate a tsquery + tsquery + negate a tsquery !! 'cat'::tsquery !'cat' <-> - tsquery - tsquery followed by tsquery + tsquery + tsquery followed by tsquery to_tsquery('fat') <-> to_tsquery('rat') 'fat' <-> 'rat' @> - boolean - tsquery contains another ? + boolean + tsquery contains another ? 'cat'::tsquery @> 'cat & rat'::tsquery f <@ - boolean - tsquery is contained in ? + boolean + tsquery is contained in ? 'cat'::tsquery <@ 'cat & rat'::tsquery t @@ -9392,15 +9595,15 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - The tsquery containment operators consider only the lexemes + The tsquery containment operators consider only the lexemes listed in the two queries, ignoring the combining operators. In addition to the operators shown in the table, the ordinary B-tree - comparison operators (=, <, etc) are defined - for types tsvector and tsquery. These are not very + comparison operators (=, <, etc) are defined + for types tsvector and tsquery. These are not very useful for text searching but allow, for example, unique indexes to be built on columns of these types. @@ -9423,7 +9626,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple array_to_tsvector - array_to_tsvector(text[]) + array_to_tsvector(text[]) tsvector convert array of lexemes to tsvector @@ -9447,10 +9650,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple length - length(tsvector) + length(tsvector) integer - number of lexemes in tsvector + number of lexemes in tsvector length('fat:2,4 cat:3 rat:5A'::tsvector) 3 @@ -9459,10 +9662,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple numnode - numnode(tsquery) + numnode(tsquery) integer - number of lexemes plus operators in tsquery + number of lexemes plus operators in tsquery numnode('(fat & rat) | cat'::tsquery) 5 @@ -9471,10 +9674,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple plainto_tsquery - plainto_tsquery( config regconfig , query text) + plainto_tsquery( config regconfig , query text) tsquery - produce tsquery ignoring punctuation + produce tsquery ignoring punctuation plainto_tsquery('english', 'The Fat Rats') 'fat' & 'rat' @@ -9483,23 +9686,35 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple phraseto_tsquery - phraseto_tsquery( config regconfig , query text) + phraseto_tsquery( config regconfig , query text) tsquery - produce tsquery that searches for a phrase, + produce tsquery that searches for a phrase, ignoring punctuation phraseto_tsquery('english', 'The Fat Rats') 'fat' <-> 'rat' + + + + websearch_to_tsquery + + websearch_to_tsquery( config regconfig , query text) + + tsquery + produce tsquery from a web search style query + websearch_to_tsquery('english', '"fat rat" or rat') + 'fat' <-> 'rat' | 'rat' + querytree - querytree(query tsquery) + querytree(query tsquery) text - get indexable part of a tsquery + get indexable part of a tsquery querytree('foo & ! bar'::tsquery) 'foo' @@ -9508,10 +9723,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple setweight - setweight(vector tsvector, weight "char") + setweight(vector tsvector, weight "char") tsvector - assign weight to each element of vector + assign weight to each element of vector setweight('fat:2,4 cat:3 rat:5B'::tsvector, 'A') 'cat':3A 'fat':2A,4A 'rat':5A @@ -9521,10 +9736,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple setweight setweight for specific lexeme(s) - setweight(vector tsvector, weight "char", lexemes text[]) + setweight(vector tsvector, weight "char", lexemes text[]) tsvector - assign weight to elements of vector that are listed in lexemes + assign weight to elements of vector that are listed in lexemes setweight('fat:2,4 cat:3 rat:5B'::tsvector, 'A', '{cat,rat}') 'cat':3A 'fat':2,4 'rat':5A @@ -9533,10 +9748,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple strip - strip(tsvector) + strip(tsvector) tsvector - remove positions and weights from tsvector + remove positions and weights from tsvector strip('fat:2,4 cat:3 rat:5A'::tsvector) 'cat' 'fat' 'rat' @@ -9545,10 +9760,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple to_tsquery - to_tsquery( config regconfig , query text) + to_tsquery( config regconfig , query text) tsquery - normalize words and convert to tsquery + normalize words and convert to tsquery to_tsquery('english', 'The & Fat & Rats') 'fat' & 'rat' @@ -9557,44 +9772,64 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple to_tsvector - to_tsvector( config regconfig , document text) + to_tsvector( config regconfig , document text) tsvector - reduce document text to tsvector + reduce document text to tsvector to_tsvector('english', 'The Fat Rats') 'fat':2 'rat':3 - to_tsvector( config regconfig , document json(b)) + to_tsvector( config regconfig , document json(b)) tsvector - reduce each string value in the document to a tsvector, and then - concatenate those in document order to produce a single tsvector + reduce each string value in the document to a tsvector, and then + concatenate those in document order to produce a single tsvector to_tsvector('english', '{"a": "The Fat Rats"}'::json) 'fat':2 'rat':3 + + + json(b)_to_tsvector( config regconfig, + document json(b), + filter json(b)) + + tsvector + + reduce each value in the document, specified by filter to a tsvector, + and then concatenate those in document order to produce a single tsvector. + filter is a jsonb array, that enumerates what kind of elements need to be included + into the resulting tsvector. Possible values for filter are + "string" (to include all string values), "numeric" (to include all numeric values in the string format), + "boolean" (to include all Boolean values in the string format "true"/"false"), + "key" (to include all keys) or "all" (to include all above). These values + can be combined together to include, e.g. all string and numeric values. + + json_to_tsvector('english', '{"a": "The Fat Rats", "b": 123}'::json, '["string", "numeric"]') + '123':5 'fat':2 'rat':3 + ts_delete - ts_delete(vector tsvector, lexeme text) + ts_delete(vector tsvector, lexeme text) tsvector - remove given lexeme from vector + remove given lexeme from vector ts_delete('fat:2,4 cat:3 rat:5A'::tsvector, 'fat') 'cat':3 'rat':5A - ts_delete(vector tsvector, lexemes text[]) + ts_delete(vector tsvector, lexemes text[]) tsvector - remove any occurrence of lexemes in lexemes from vector + remove any occurrence of lexemes in lexemes from vector ts_delete('fat:2,4 cat:3 rat:5A'::tsvector, ARRAY['fat','rat']) 'cat':3 @@ -9603,10 +9838,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_filter - ts_filter(vector tsvector, weights "char"[]) + ts_filter(vector tsvector, weights "char"[]) tsvector - select only elements with given weights from vector + select only elements with given weights from vector ts_filter('fat:2,4 cat:3b rat:5A'::tsvector, '{a,b}') 'cat':3B 'rat':5A @@ -9615,7 +9850,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_headline - ts_headline( config regconfig, document text, query tsquery , options text ) + ts_headline( config regconfig, document text, query tsquery , options text ) text display a query match @@ -9624,7 +9859,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - ts_headline( config regconfig, document json(b), query tsquery , options text ) + ts_headline( config regconfig, document json(b), query tsquery , options text ) text display a query match @@ -9636,7 +9871,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_rank - ts_rank( weights float4[], vector tsvector, query tsquery , normalization integer ) + ts_rank( weights float4[], vector tsvector, query tsquery , normalization integer ) float4 rank document for query @@ -9648,7 +9883,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_rank_cd - ts_rank_cd( weights float4[], vector tsvector, query tsquery , normalization integer ) + ts_rank_cd( weights float4[], vector tsvector, query tsquery , normalization integer ) float4 rank document for query using cover density @@ -9660,18 +9895,18 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_rewrite - ts_rewrite(query tsquery, target tsquery, substitute tsquery) + ts_rewrite(query tsquery, target tsquery, substitute tsquery) tsquery - replace target with substitute + replace target with substitute within query ts_rewrite('a & b'::tsquery, 'a'::tsquery, 'foo|bar'::tsquery) 'b' & ( 'foo' | 'bar' ) - ts_rewrite(query tsquery, select text) + ts_rewrite(query tsquery, select text) tsquery - replace using targets and substitutes from a SELECT command + replace using targets and substitutes from a SELECT command SELECT ts_rewrite('a & b'::tsquery, 'SELECT t,s FROM aliases') 'b' & ( 'foo' | 'bar' ) @@ -9680,22 +9915,22 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple tsquery_phrase - tsquery_phrase(query1 tsquery, query2 tsquery) + tsquery_phrase(query1 tsquery, query2 tsquery) tsquery - make query that searches for query1 followed - by query2 (same as <-> + make query that searches for query1 followed + by query2 (same as <-> operator) tsquery_phrase(to_tsquery('fat'), to_tsquery('cat')) 'fat' <-> 'cat' - tsquery_phrase(query1 tsquery, query2 tsquery, distance integer) + tsquery_phrase(query1 tsquery, query2 tsquery, distance integer) tsquery - make query that searches for query1 followed by - query2 at distance distance + make query that searches for query1 followed by + query2 at distance distance tsquery_phrase(to_tsquery('fat'), to_tsquery('cat'), 10) 'fat' <10> 'cat' @@ -9704,10 +9939,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple tsvector_to_array - tsvector_to_array(tsvector) + tsvector_to_array(tsvector) text[] - convert tsvector to array of lexemes + convert tsvector to array of lexemes tsvector_to_array('fat:2,4 cat:3 rat:5A'::tsvector) {cat,fat,rat} @@ -9719,7 +9954,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple tsvector_update_trigger() trigger - trigger function for automatic tsvector column update + trigger function for automatic tsvector column update CREATE TRIGGER ... tsvector_update_trigger(tsvcol, 'pg_catalog.swedish', title, body) @@ -9731,7 +9966,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple tsvector_update_trigger_column() trigger - trigger function for automatic tsvector column update + trigger function for automatic tsvector column update CREATE TRIGGER ... tsvector_update_trigger_column(tsvcol, configcol, title, body) @@ -9741,7 +9976,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple unnest for tsvector - unnest(tsvector, OUT lexeme text, OUT positions smallint[], OUT weights text) + unnest(tsvector, OUT lexeme text, OUT positions smallint[], OUT weights text) setof record expand a tsvector to a set of rows @@ -9754,16 +9989,16 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - All the text search functions that accept an optional regconfig + All the text search functions that accept an optional regconfig argument will use the configuration specified by - + when that argument is omitted. The functions in - + are listed separately because they are not usually used in everyday text searching operations. They are helpful for development and debugging of new text search configurations. @@ -9787,7 +10022,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_debug - ts_debug( config regconfig, document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) + ts_debug( config regconfig, document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) setof record test a configuration @@ -9799,7 +10034,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_lexize - ts_lexize(dict regdictionary, token text) + ts_lexize(dict regdictionary, token text) text[] test a dictionary @@ -9811,7 +10046,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_parse - ts_parse(parser_name text, document text, OUT tokid integer, OUT token text) + ts_parse(parser_name text, document text, OUT tokid integer, OUT token text) setof record test a parser @@ -9819,7 +10054,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple (1,foo) ... - ts_parse(parser_oid oid, document text, OUT tokid integer, OUT token text) + ts_parse(parser_oid oid, document text, OUT tokid integer, OUT token text) setof record test a parser ts_parse(3722, 'foo - bar') @@ -9830,7 +10065,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_token_type - ts_token_type(parser_name text, OUT tokid integer, OUT alias text, OUT description text) + ts_token_type(parser_name text, OUT tokid integer, OUT alias text, OUT description text) setof record get token types defined by parser @@ -9838,7 +10073,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple (1,asciiword,"Word, all ASCII") ... - ts_token_type(parser_oid oid, OUT tokid integer, OUT alias text, OUT description text) + ts_token_type(parser_oid oid, OUT tokid integer, OUT alias text, OUT description text) setof record get token types defined by parser ts_token_type(3722) @@ -9849,10 +10084,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_stat - ts_stat(sqlquery text, weights text, OUT word text, OUT ndoc integer, OUT nentry integer) + ts_stat(sqlquery text, weights text, OUT word text, OUT ndoc integer, OUT nentry integer) setof record - get statistics of a tsvector column + get statistics of a tsvector column ts_stat('SELECT vector from apod') (foo,10,15) ... @@ -9869,12 +10104,12 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple The functions and function-like expressions described in this section operate on values of type xml. Check for information about the xml + linkend="datatype-xml"/> for information about the xml type. The function-like expressions xmlparse and xmlserialize for converting to and from type xml are not repeated here. Use of most of these functions requires the installation to have been built - with configure --with-libxml. + with configure --with-libxml. @@ -10066,7 +10301,7 @@ SELECT xmlelement(name foo, xmlattributes('xyz' as bar), and & will be converted to entities. Binary data (data type bytea) will be represented in base64 or hex encoding, depending on the setting of the configuration parameter - . The particular behavior for + . The particular behavior for individual data types is expected to evolve in order to align the SQL and PostgreSQL data types with the XML Schema specification, at which point a more precise description will appear. @@ -10208,7 +10443,7 @@ SELECT xmlroot(xmlparse(document 'abc'), input values to the aggregate function call, much like xmlconcat does, except that concatenation occurs across rows rather than across expressions in a single row. - See for additional information + See for additional information about aggregate functions. @@ -10226,9 +10461,9 @@ SELECT xmlagg(x) FROM test; - To determine the order of the concatenation, an ORDER BY + To determine the order of the concatenation, an ORDER BY clause may be added to the aggregate call as described in - . For example: + . For example: IS DOCUMENT returns true if the argument XML value is a proper XML document, false if it is not (that is, it is a content fragment), or null if the argument is - null. See about the difference + null. See about the difference between documents and content fragments. + + <literal>IS NOT DOCUMENT</literal> + + + IS NOT DOCUMENT + + + +xml IS NOT DOCUMENT + + + + The expression IS NOT DOCUMENT returns false if the + argument XML value is a proper XML document, true if it is not (that is, + it is a content fragment), or null if the argument is null. + + + <literal>XMLEXISTS</literal> @@ -10345,18 +10598,18 @@ SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'Tor - These functions check whether a text string is well-formed XML, + These functions check whether a text string is well-formed XML, returning a Boolean result. xml_is_well_formed_document checks for a well-formed document, while xml_is_well_formed_content checks for well-formed content. xml_is_well_formed does - the former if the configuration - parameter is set to DOCUMENT, or the latter if it is set to - CONTENT. This means that + the former if the configuration + parameter is set to DOCUMENT, or the latter if it is set to + CONTENT. This means that xml_is_well_formed is useful for seeing whether - a simple cast to type xml will succeed, whereas the other two + a simple cast to type xml will succeed, whereas the other two functions are useful for seeing whether the corresponding variants of - XMLPARSE will succeed. + XMLPARSE will succeed. @@ -10426,7 +10679,7 @@ SELECT xml_is_well_formed_document(' The optional third argument of the function is an array of namespace - mappings. This array should be a two-dimensional text array with + mappings. This array should be a two-dimensional text array with the length of the second axis being equal to 2 (i.e., it should be an array of arrays, each of which consists of exactly 2 elements). The first element of each array entry is the namespace name (alias), the second the namespace URI. It is not required that aliases provided in this array be the same as those being used in the XML document itself (in other words, both in the XML document and in the xpath - function context, aliases are local). + function context, aliases are local). @@ -10494,7 +10747,7 @@ SELECT xpath('//mydefns:b/text()', 'testxpath
function. Instead of returning the individual XML values that satisfy the XPath, this function returns a Boolean indicating whether the query was satisfied or not. This - function is equivalent to the standard XMLEXISTS predicate, + function is equivalent to the standard XMLEXISTS predicate, except that it also offers support for a namespace mapping argument. @@ -10540,21 +10793,21 @@ SELECT xpath_exists('/my:a/text()', 'test - The optional XMLNAMESPACES clause is a comma-separated + The optional XMLNAMESPACES clause is a comma-separated list of namespaces. It specifies the XML namespaces used in the document and their aliases. A default namespace specification is not currently supported. - The required row_expression argument is an XPath + The required row_expression argument is an XPath expression that is evaluated against the supplied XML document to obtain an ordered sequence of XML nodes. This sequence is what - xmltable transforms into output rows. + xmltable transforms into output rows. - document_expression provides the XML document to + document_expression provides the XML document to operate on. The BY REF clauses have no effect in PostgreSQL, but are allowed for SQL conformance and compatibility with other @@ -10566,9 +10819,9 @@ SELECT xpath_exists('/my:a/text()', 'test The mandatory COLUMNS clause specifies the list of columns in the output table. - If the COLUMNS clause is omitted, the rows in the result - set contain a single column of type xml containing the - data matched by row_expression. + If the COLUMNS clause is omitted, the rows in the result + set contain a single column of type xml containing the + data matched by row_expression. If COLUMNS is specified, each entry describes a single column. See the syntax summary above for the format. @@ -10584,10 +10837,10 @@ SELECT xpath_exists('/my:a/text()', 'test - The column_expression for a column is an XPath expression + The column_expression for a column is an XPath expression that is evaluated for each row, relative to the result of the - row_expression, to find the value of the column. - If no column_expression is given, then the column name + row_expression, to find the value of the column. + If no column_expression is given, then the column name is used as an implicit path. @@ -10595,55 +10848,55 @@ SELECT xpath_exists('/my:a/text()', 'testNULL). - Any xsi:nil attributes are ignored. + empty string (not NULL). + Any xsi:nil attributes are ignored. - The text body of the XML matched by the column_expression + The text body of the XML matched by the column_expression is used as the column value. Multiple text() nodes within an element are concatenated in order. Any child elements, processing instructions, and comments are ignored, but the text contents of child elements are concatenated to the result. - Note that the whitespace-only text() node between two non-text - elements is preserved, and that leading whitespace on a text() + Note that the whitespace-only text() node between two non-text + elements is preserved, and that leading whitespace on a text() node is not flattened. If the path expression does not match for a given row but - default_expression is specified, the value resulting + default_expression is specified, the value resulting from evaluating that expression is used. - If no DEFAULT clause is given for the column, - the field will be set to NULL. - It is possible for a default_expression to reference + If no DEFAULT clause is given for the column, + the field will be set to NULL. + It is possible for a default_expression to reference the value of output columns that appear prior to it in the column list, so the default of one column may be based on the value of another column. - Columns may be marked NOT NULL. If the - column_expression for a NOT NULL column - does not match anything and there is no DEFAULT or the - default_expression also evaluates to null, an error + Columns may be marked NOT NULL. If the + column_expression for a NOT NULL column + does not match anything and there is no DEFAULT or the + default_expression also evaluates to null, an error is reported. - Unlike regular PostgreSQL functions, column_expression - and default_expression are not evaluated to a simple + Unlike regular PostgreSQL functions, column_expression + and default_expression are not evaluated to a simple value before calling the function. - column_expression is normally evaluated - exactly once per input row, and default_expression + column_expression is normally evaluated + exactly once per input row, and default_expression is evaluated each time a default is needed for a field. If the expression qualifies as stable or immutable the repeat evaluation may be skipped. - Effectively xmltable behaves more like a subquery than a + Effectively xmltable behaves more like a subquery than a function call. This means that you can usefully use volatile functions like - nextval in default_expression, and - column_expression may depend on other parts of the + nextval in default_expression, and + column_expression may depend on other parts of the XML document. @@ -10714,7 +10967,7 @@ SELECT xmltable.* The following example illustrates how the XMLNAMESPACES clause can be used to specify - the default namespace, and a list of additional namespaces + a list of namespaces used in the XML document as well as in the XPath expressions: As an example of using the output produced by these functions, - shows an XSLT stylesheet that + shows an XSLT stylesheet that converts the output of table_to_xml_and_xmlschema to an HTML document containing a tabular rendition of the table data. In a @@ -11003,13 +11256,13 @@ table2-mapping - shows the operators that + shows the operators that are available for use with the two JSON data types (see ). + linkend="datatype-json"/>). - <type>json</> and <type>jsonb</> Operators + <type>json</type> and <type>jsonb</type> Operators @@ -11039,14 +11292,14 @@ table2-mapping ->> int - Get JSON array element as text + Get JSON array element as text '[1,2,3]'::json->>2 3 ->> text - Get JSON object field as text + Get JSON object field as text '{"a":1,"b":2}'::json->>'b' 2 @@ -11060,7 +11313,7 @@ table2-mapping #>> text[] - Get JSON object at specified path as text + Get JSON object at specified path as text '{"a":[1,2,3],"b":[4,5,6]}'::json#>>'{a,2}' 3 @@ -11075,7 +11328,7 @@ table2-mapping The field/element/path extraction operators return the same type as their left-hand input (either json or jsonb), except for those specified as - returning text, which coerce the value to text. + returning text, which coerce the value to text. The field/element/path extraction operators return NULL, rather than failing, if the JSON input does not have the right structure to match the request; for example if no such element exists. The @@ -11086,23 +11339,23 @@ table2-mapping The standard comparison operators shown in are available for + linkend="functions-comparison-op-table"/> are available for jsonb, but not for json. They follow the ordering rules for B-tree operations outlined at . + linkend="json-indexing"/>. Some further operators also exist only for jsonb, as shown - in . + in . Many of these operators can be indexed by - jsonb operator classes. For a full description of - jsonb containment and existence semantics, see . + jsonb operator classes. For a full description of + jsonb containment and existence semantics, see . describes how these operators can be used to effectively index - jsonb data. + jsonb data.
- Additional <type>jsonb</> Operators + Additional <type>jsonb</type> Operators @@ -11191,7 +11444,7 @@ table2-mapping - The || operator concatenates the elements at the top level of + The || operator concatenates the elements at the top level of each of its operands. It does not operate recursively. For example, if both operands are objects with a common key field name, the value of the field in the result will just be the value from the right hand operand. @@ -11199,10 +11452,10 @@ table2-mapping - shows the functions that are + shows the functions that are available for creating json and jsonb values. - (There are no equivalent functions for jsonb, of the row_to_json - and array_to_json functions. However, the to_jsonb + (There are no equivalent functions for jsonb, of the row_to_json + and array_to_json functions. However, the to_jsonb function supplies much the same functionality as these functions would.) @@ -11254,14 +11507,14 @@ table2-mapping to_jsonb(anyelement) - Returns the value as json or jsonb. + Returns the value as json or jsonb. Arrays and composites are converted (recursively) to arrays and objects; otherwise, if there is a cast from the type to json, the cast function will be used to perform the conversion; otherwise, a scalar value is produced. For any scalar type other than a number, a Boolean, or a null value, the text representation will be used, in such a fashion that it is a - valid json or jsonb value. + valid json or jsonb value. to_json('Fred said "Hi."'::text) "Fred said \"Hi.\"" @@ -11323,8 +11576,8 @@ table2-mapping such that each inner array has exactly two elements, which are taken as a key/value pair. - json_object('{a, 1, b, "def", c, 3.5}') - json_object('{{a, 1},{b, "def"},{c, 3.5}}') + json_object('{a, 1, b, "def", c, 3.5}') + json_object('{{a, 1},{b, "def"},{c, 3.5}}') {"a": "1", "b": "def", "c": "3.5"} @@ -11332,7 +11585,7 @@ table2-mapping jsonb_object(keys text[], values text[]) - This form of json_object takes keys and values pairwise from two separate + This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form. json_object('{a, b}', '{1,2}') @@ -11344,16 +11597,16 @@ table2-mapping - array_to_json and row_to_json have the same - behavior as to_json except for offering a pretty-printing - option. The behavior described for to_json likewise applies + array_to_json and row_to_json have the same + behavior as to_json except for offering a pretty-printing + option. The behavior described for to_json likewise applies to each individual value converted by the other JSON creation functions. - The extension has a cast + The extension has a cast from hstore to json, so that hstore values converted via the JSON creation functions will be represented as JSON objects, not as primitive string values. @@ -11361,7 +11614,7 @@ table2-mapping - shows the functions that + shows the functions that are available for processing json and jsonb values. @@ -11510,7 +11763,7 @@ table2-mapping setof key text, value text Expands the outermost JSON object into a set of key/value pairs. The - returned values will be of type text. + returned values will be of type text. select * from json_each_text('{"a":"foo", "b":"bar"}') @@ -11542,7 +11795,7 @@ table2-mapping text Returns JSON value pointed to by path_elems - as text + as text (equivalent to #>> operator). json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"foo"}}','f4', 'f6') @@ -11573,7 +11826,7 @@ table2-mapping anyelement Expands the object in from_json to a row - whose columns match the record type defined by base + whose columns match the record type defined by base (see note below). select * from json_populate_record(null::myrowtype, '{"a": 1, "b": ["2", "a b"], "c": {"d": 4, "e": "a b c"}}') @@ -11593,7 +11846,7 @@ table2-mapping Expands the outermost array of objects in from_json to a set of rows whose - columns match the record type defined by base (see + columns match the record type defined by base (see note below). select * from json_populate_recordset(null::myrowtype, '[{"a":1,"b":2},{"a":3,"b":4}]') @@ -11633,7 +11886,7 @@ table2-mapping setof text - Expands a JSON array to a set of text values. + Expands a JSON array to a set of text values. select * from json_array_elements_text('["foo", "bar"]') @@ -11653,8 +11906,8 @@ table2-mapping Returns the type of the outermost JSON value as a text string. Possible types are - object, array, string, number, - boolean, and null. + object, array, string, number, + boolean, and null. json_typeof('-123.4') number @@ -11666,8 +11919,8 @@ table2-mapping record Builds an arbitrary record from a JSON object (see note below). As - with all functions returning record, the caller must - explicitly define the structure of the record with an AS + with all functions returning record, the caller must + explicitly define the structure of the record with an AS clause. select * from json_to_record('{"a":1,"b":[1,2,3],"c":[1,2,3],"e":"bar","r": {"a": 123, "b": "a b c"}}') as x(a int, b text, c int[], d text, r myrowtype) @@ -11686,9 +11939,9 @@ table2-mapping setof record Builds an arbitrary set of records from a JSON array of objects (see - note below). As with all functions returning record, the + note below). As with all functions returning record, the caller must explicitly define the structure of the record with - an AS clause. + an AS clause. select * from json_to_recordset('[{"a":1,"b":"foo"},{"a":"2","c":"bar"}]') as x(a int, b text); @@ -11723,7 +11976,7 @@ table2-mapping replaced by new_value, or with new_value added if create_missing is true ( default is - true) and the item + true) and the item designated by path does not exist. As with the path orientated operators, negative integers that appear in path count from the end @@ -11750,7 +12003,7 @@ table2-mapping path is in a JSONB array, new_value will be inserted before target or after if insert_after is true (default is - false). If target section + false). If target section designated by path is in JSONB object, new_value will be inserted only if target does not exist. As with the path @@ -11800,38 +12053,52 @@ table2-mapping Many of these functions and operators will convert Unicode escapes in JSON strings to the appropriate single character. This is a non-issue - if the input is type jsonb, because the conversion was already - done; but for json input, this may result in throwing an error, - as noted in . + if the input is type jsonb, because the conversion was already + done; but for json input, this may result in throwing an error, + as noted in . - In json_populate_record, json_populate_recordset, - json_to_record and json_to_recordset, - type coercion from the JSON is best effort and may not result - in desired values for some types. JSON keys are matched to - identical column names in the target row type. JSON fields that do not - appear in the target row type will be omitted from the output, and - target columns that do not match any JSON field will simply be NULL. + While the examples for the functions + json_populate_record, + json_populate_recordset, + json_to_record and + json_to_recordset use constants, the typical use + would be to reference a table in the FROM clause + and use one of its json or jsonb columns + as an argument to the function. Extracted key values can then be + referenced in other parts of the query, like WHERE + clauses and target lists. Extracting multiple values in this + way can improve performance over extracting them separately with + per-key operators. + + + + JSON keys are matched to identical column names in the target + row type. JSON type coercion for these functions is best + effort and may not result in desired values for some types. + JSON fields that do not appear in the target row type will be + omitted from the output, and target columns that do not match any + JSON field will simply be NULL. - All the items of the path parameter of jsonb_set - as well as jsonb_insert except the last item must be present - in the target. If create_missing is false, all - items of the path parameter of jsonb_set must be - present. If these conditions are not met the target is + All the items of the path parameter of jsonb_set + as well as jsonb_insert except the last item must be present + in the target. If create_missing is false, all + items of the path parameter of jsonb_set must be + present. If these conditions are not met the target is returned unchanged. If the last path item is an object key, it will be created if it is absent and given the new value. If the last path item is an array index, if it is positive the item to set is found by counting from - the left, and if negative by counting from the right - -1 + the left, and if negative by counting from the right - -1 designates the rightmost element, and so on. If the item is out of the range -array_length .. array_length -1, and create_missing is true, the new value is added at the beginning @@ -11842,31 +12109,31 @@ table2-mapping - The json_typeof function's null return value + The json_typeof function's null return value should not be confused with a SQL NULL. While - calling json_typeof('null'::json) will - return null, calling json_typeof(NULL::json) + calling json_typeof('null'::json) will + return null, calling json_typeof(NULL::json) will return a SQL NULL. - If the argument to json_strip_nulls contains duplicate + If the argument to json_strip_nulls contains duplicate field names in any object, the result could be semantically somewhat different, depending on the order in which they occur. This is not an - issue for jsonb_strip_nulls since jsonb values never have + issue for jsonb_strip_nulls since jsonb values never have duplicate object field names. - See also for the aggregate + See also for the aggregate function json_agg which aggregates record values as JSON, and the aggregate function json_object_agg which aggregates pairs of values into a JSON object, and their jsonb equivalents, - jsonb_agg and jsonb_object_agg. + jsonb_agg and jsonb_object_agg. @@ -11894,10 +12161,10 @@ table2-mapping This section describes functions for operating on sequence objects, also called sequence generators or just sequences. Sequence objects are special single-row tables created with . + linkend="sql-createsequence"/>. Sequence objects are commonly used to generate unique identifiers for rows of a table. The sequence functions, listed in , provide simple, multiuser-safe + linkend="functions-sequence-table"/>, provide simple, multiuser-safe methods for obtaining successive sequence values from sequence objects. @@ -11943,52 +12210,52 @@ table2-mapping The sequence to be operated on by a sequence function is specified by - a regclass argument, which is simply the OID of the sequence in the - pg_class system catalog. You do not have to look up the - OID by hand, however, since the regclass data type's input + a regclass argument, which is simply the OID of the sequence in the + pg_class system catalog. You do not have to look up the + OID by hand, however, since the regclass data type's input converter will do the work for you. Just write the sequence name enclosed in single quotes so that it looks like a literal constant. For compatibility with the handling of ordinary SQL names, the string will be converted to lower case unless it contains double quotes around the sequence name. Thus: -nextval('foo') operates on sequence foo -nextval('FOO') operates on sequence foo -nextval('"Foo"') operates on sequence Foo +nextval('foo') operates on sequence foo +nextval('FOO') operates on sequence foo +nextval('"Foo"') operates on sequence Foo The sequence name can be schema-qualified if necessary: -nextval('myschema.foo') operates on myschema.foo +nextval('myschema.foo') operates on myschema.foo nextval('"myschema".foo') same as above -nextval('foo') searches search path for foo +nextval('foo') searches search path for foo - See for more information about - regclass. + See for more information about + regclass. Before PostgreSQL 8.1, the arguments of the - sequence functions were of type text, not regclass, and + sequence functions were of type text, not regclass, and the above-described conversion from a text string to an OID value would happen at run time during each call. For backward compatibility, this facility still exists, but internally it is now handled as an implicit - coercion from text to regclass before the function is + coercion from text to regclass before the function is invoked. When you write the argument of a sequence function as an unadorned - literal string, it becomes a constant of type regclass. + literal string, it becomes a constant of type regclass. Since this is really just an OID, it will track the originally identified sequence despite later renaming, schema reassignment, - etc. This early binding behavior is usually desirable for + etc. This early binding behavior is usually desirable for sequence references in column defaults and views. But sometimes you might - want late binding where the sequence reference is resolved + want late binding where the sequence reference is resolved at run time. To get late-binding behavior, force the constant to be - stored as a text constant instead of regclass: + stored as a text constant instead of regclass: -nextval('foo'::text) foo is looked up at runtime +nextval('foo'::text) foo is looked up at runtime Note that late binding was the only behavior supported in PostgreSQL releases before 8.1, so you @@ -12020,7 +12287,7 @@ nextval('foo'::text) foo is looked up at If a sequence object has been created with default parameters, successive nextval calls will return successive values beginning with 1. Other behaviors can be obtained by using - special parameters in the command; + special parameters in the command; see its command reference page for more information. @@ -12031,14 +12298,14 @@ nextval('foo'::text) foo is looked up at rolled back; that is, once a value has been fetched it is considered used and will not be returned again. This is true even if the surrounding transaction later aborts, or if the calling query ends - up not using the value. For example an INSERT with - an ON CONFLICT clause will compute the to-be-inserted + up not using the value. For example an INSERT with + an ON CONFLICT clause will compute the to-be-inserted tuple, including doing any required nextval calls, before detecting any conflict that would cause it to follow - the ON CONFLICT rule instead. Such cases will leave + the ON CONFLICT rule instead. Such cases will leave unused holes in the sequence of assigned values. - Thus, PostgreSQL sequence objects cannot - be used to obtain gapless sequences. + Thus, PostgreSQL sequence objects cannot + be used to obtain gapless sequences. @@ -12074,7 +12341,7 @@ nextval('foo'::text) foo is looked up at Return the value most recently returned by - nextval in the current session. This function is + nextval in the current session. This function is identical to currval, except that instead of taking the sequence name as an argument it refers to whichever sequence nextval was most recently applied to @@ -12099,20 +12366,20 @@ nextval('foo'::text) foo is looked up at specified value and sets its is_called field to true, meaning that the next nextval will advance the sequence before - returning a value. The value reported by currval is + returning a value. The value reported by currval is also set to the specified value. In the three-parameter form, is_called can be set to either true - or false. true has the same effect as + or false. true has the same effect as the two-parameter form. If it is set to false, the next nextval will return exactly the specified value, and sequence advancement commences with the following nextval. Furthermore, the value reported by - currval is not changed in this case. For example, + currval is not changed in this case. For example, -SELECT setval('foo', 42); Next nextval will return 43 +SELECT setval('foo', 42); Next nextval will return 43 SELECT setval('foo', 42, true); Same as above -SELECT setval('foo', 42, false); Next nextval will return 42 +SELECT setval('foo', 42, false); Next nextval will return 42 The result returned by setval is just the value of its @@ -12157,13 +12424,13 @@ SELECT setval('foo', 42, false); Next nextval wi If your needs go beyond the capabilities of these conditional - expressions, you might want to consider writing a stored procedure + expressions, you might want to consider writing a server-side function in a more expressive programming language. - <literal>CASE</> + <literal>CASE</literal> The SQL CASE expression is a @@ -12186,7 +12453,7 @@ END condition's result is not true, any subsequent WHEN clauses are examined in the same manner. If no WHEN condition yields true, the value of the - CASE expression is the result of the + CASE expression is the result of the ELSE clause. If the ELSE clause is omitted and no condition is true, the result is null. @@ -12221,11 +12488,11 @@ SELECT a, The data types of all the result expressions must be convertible to a single output type. - See for more details. + See for more details. - There is a simple form of CASE expression + There is a simple form of CASE expression that is a variant of the general form above: @@ -12275,11 +12542,11 @@ SELECT ... WHERE CASE WHEN x <> 0 THEN y/x > 1.5 ELSE false END; - As described in , there are various + As described in , there are various situations in which subexpressions of an expression are evaluated at different times, so that the principle that CASE evaluates only necessary subexpressions is not ironclad. For - example a constant 1/0 subexpression will usually result in + example a constant 1/0 subexpression will usually result in a division-by-zero failure at planning time, even if it's within a CASE arm that would never be entered at run time. @@ -12287,7 +12554,7 @@ SELECT ... WHERE CASE WHEN x <> 0 THEN y/x > 1.5 ELSE false END; - <literal>COALESCE</> + <literal>COALESCE</literal> COALESCE @@ -12313,8 +12580,8 @@ SELECT ... WHERE CASE WHEN x <> 0 THEN y/x > 1.5 ELSE false END; SELECT COALESCE(description, short_description, '(none)') ... - This returns description if it is not null, otherwise - short_description if it is not null, otherwise (none). + This returns description if it is not null, otherwise + short_description if it is not null, otherwise (none). @@ -12322,13 +12589,13 @@ SELECT COALESCE(description, short_description, '(none)') ... evaluates the arguments that are needed to determine the result; that is, arguments to the right of the first non-null argument are not evaluated. This SQL-standard function provides capabilities similar - to NVL and IFNULL, which are used in some other + to NVL and IFNULL, which are used in some other database systems. - <literal>NULLIF</> + <literal>NULLIF</literal> NULLIF @@ -12349,7 +12616,7 @@ SELECT NULLIF(value, '(none)') ... - In this example, if value is (none), + In this example, if value is (none), null is returned, otherwise the value of value is returned. @@ -12374,17 +12641,17 @@ SELECT NULLIF(value, '(none)') ... - The GREATEST and LEAST functions select the + The GREATEST and LEAST functions select the largest or smallest value from a list of any number of expressions. The expressions must all be convertible to a common data type, which will be the type of the result - (see for details). NULL values + (see for details). NULL values in the list are ignored. The result will be NULL only if all the expressions evaluate to NULL. - Note that GREATEST and LEAST are not in + Note that GREATEST and LEAST are not in the SQL standard, but are a common extension. Some other databases make them return NULL if any argument is NULL, rather than only when all are NULL. @@ -12396,7 +12663,7 @@ SELECT NULLIF(value, '(none)') ... Array Functions and Operators - shows the operators + shows the operators available for array types. @@ -12514,20 +12781,20 @@ SELECT NULLIF(value, '(none)') ... If the contents of two arrays are equal but the dimensionality is different, the first difference in the dimensionality information determines the sort order. (This is a change from versions of - PostgreSQL prior to 8.2: older versions would claim + PostgreSQL prior to 8.2: older versions would claim that two arrays with the same contents were equal, even if the number of dimensions or subscript ranges were different.) - See for more details about array operator - behavior. See for more details about + See for more details about array operator + behavior. See for more details about which operators support indexed operations. - shows the functions - available for use with array types. See + shows the functions + available for use with array types. See for more information and examples of the use of these functions. @@ -12643,7 +12910,7 @@ SELECT NULLIF(value, '(none)') ... - array_fill(anyelement, int[], + array_fill(anyelement, int[] , int[]) @@ -12802,7 +13069,7 @@ SELECT NULLIF(value, '(none)') ... setof anyelement, anyelement [, ...] expand multiple arrays (possibly of different types) to a set of rows. This is only allowed in the FROM clause; see - + unnest(ARRAY[1,2],ARRAY['foo','bar','baz']) 1 foo 2 bar @@ -12813,7 +13080,7 @@ NULL baz(3 rows)
- In array_position and array_positions, + In array_position and array_positions, each array element is compared to the searched value using IS NOT DISTINCT FROM semantics. @@ -12848,8 +13115,8 @@ NULL baz(3 rows) - There are two differences in the behavior of string_to_array - from pre-9.1 versions of PostgreSQL. + There are two differences in the behavior of string_to_array + from pre-9.1 versions of PostgreSQL. First, it will return an empty (zero-element) array rather than NULL when the input string is of zero length. Second, if the delimiter string is NULL, the function splits the input into individual characters, rather @@ -12858,7 +13125,7 @@ NULL baz(3 rows) - See also about the aggregate + See also about the aggregate function array_agg for use with arrays. @@ -12867,11 +13134,11 @@ NULL baz(3 rows) Range Functions and Operators - See for an overview of range types. + See for an overview of range types. - shows the operators + shows the operators available for range types. @@ -13046,7 +13313,7 @@ NULL baz(3 rows)
- shows the functions + shows the functions available for use with range types. @@ -13178,7 +13445,7 @@ NULL baz(3 rows) - The lower and upper functions return null + The lower and upper functions return null if the range is empty or the requested bound is infinite. The lower_inc, upper_inc, lower_inf, and upper_inf @@ -13197,18 +13464,18 @@ NULL baz(3 rows) Aggregate functions compute a single result from a set of input values. The built-in general-purpose aggregate - functions are listed in + functions are listed in and statistical aggregates in . + linkend="functions-aggregate-statistics-table"/>. The built-in within-group ordered-set aggregate functions - are listed in + are listed in while the built-in within-group hypothetical-set ones are in . Grouping operations, + linkend="functions-hypothetical-table"/>. Grouping operations, which are closely related to aggregate functions, are listed in - . + . The special syntax considerations for aggregate - functions are explained in . - Consult for additional introductory + functions are explained in . + Consult for additional introductory information. @@ -13530,7 +13797,7 @@ NULL baz(3 rows) smallint, int, bigint, real, double precision, numeric, - interval, or money + interval, or money bigint for smallint or @@ -13556,7 +13823,7 @@ NULL baz(3 rows) xml No - concatenation of XML values (see also ) + concatenation of XML values (see also ) @@ -13627,8 +13894,8 @@ SELECT count(*) FROM sometable; aggregate functions, produce meaningfully different result values depending on the order of the input values. This ordering is unspecified by default, but can be controlled by writing an - ORDER BY clause within the aggregate call, as shown in - . + ORDER BY clause within the aggregate call, as shown in + . Alternatively, supplying the input values from a sorted subquery will usually work. For example: @@ -13642,7 +13909,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; - shows + shows aggregate functions typically used in statistical analysis. (These are separated out merely to avoid cluttering the listing of more-commonly-used aggregates.) Where the description mentions @@ -14061,10 +14328,10 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; - shows some - aggregate functions that use the ordered-set aggregate + shows some + aggregate functions that use the ordered-set aggregate syntax. These functions are sometimes referred to as inverse - distribution functions. + distribution functions. @@ -14211,7 +14478,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; - All the aggregates listed in + All the aggregates listed in ignore null values in their sorted input. For those that take a fraction parameter, the fraction value must be between 0 and 1; an error is thrown if not. However, a null fraction value @@ -14225,11 +14492,11 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; Each of the aggregates listed in - is associated with a + is associated with a window function of the same name defined in - . In each case, the aggregate result + . In each case, the aggregate result is the value that the associated window function would have - returned for the hypothetical row constructed from + returned for the hypothetical row constructed from args, if such a row had been added to the sorted group of rows computed from the sorted_args. @@ -14260,10 +14527,10 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; rank(args) WITHIN GROUP (ORDER BY sorted_args) - VARIADIC "any" + VARIADIC "any" - VARIADIC "any" + VARIADIC "any" bigint @@ -14283,10 +14550,10 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; dense_rank(args) WITHIN GROUP (ORDER BY sorted_args) - VARIADIC "any" + VARIADIC "any" - VARIADIC "any" + VARIADIC "any" bigint @@ -14306,10 +14573,10 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; percent_rank(args) WITHIN GROUP (ORDER BY sorted_args) - VARIADIC "any" + VARIADIC "any" - VARIADIC "any" + VARIADIC "any" double precision @@ -14329,10 +14596,10 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; cume_dist(args) WITHIN GROUP (ORDER BY sorted_args) - VARIADIC "any" + VARIADIC "any" - VARIADIC "any" + VARIADIC "any" double precision @@ -14340,7 +14607,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; No relative rank of the hypothetical row, ranging from - 1/N to 1 + 1/N to 1 @@ -14354,7 +14621,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; the aggregated arguments given in sorted_args. Unlike most built-in aggregates, these aggregates are not strict, that is they do not drop input rows containing nulls. Null values sort according - to the rule specified in the ORDER BY clause. + to the rule specified in the ORDER BY clause. @@ -14392,15 +14659,15 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; Grouping operations are used in conjunction with grouping sets (see - ) to distinguish result rows. The - arguments to the GROUPING operation are not actually evaluated, - but they must match exactly expressions given in the GROUP BY + ) to distinguish result rows. The + arguments to the GROUPING operation are not actually evaluated, + but they must match exactly expressions given in the GROUP BY clause of the associated query level. Bits are assigned with the rightmost argument being the least-significant bit; each bit is 0 if the corresponding expression is included in the grouping criteria of the grouping set generating the result row, and 1 if it is not. For example: -=> SELECT * FROM items_sold; +=> SELECT * FROM items_sold; make | model | sales -------+-------+------- Foo | GT | 10 @@ -14409,7 +14676,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; Bar | Sport | 5 (4 rows) -=> SELECT make, model, GROUPING(make,model), sum(sales) FROM items_sold GROUP BY ROLLUP(make,model); +=> SELECT make, model, GROUPING(make,model), sum(sales) FROM items_sold GROUP BY ROLLUP(make,model); make | model | grouping | sum -------+-------+----------+----- Foo | GT | 0 | 10 @@ -14436,16 +14703,16 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; Window functions provide the ability to perform calculations across sets of rows that are related to the current query - row. See for an introduction to this - feature, and for syntax + row. See for an introduction to this + feature, and for syntax details. The built-in window functions are listed in - . Note that these functions - must be invoked using window function syntax, i.e., an - OVER clause is required. + . Note that these functions + must be invoked using window function syntax, i.e., an + OVER clause is required. @@ -14453,8 +14720,8 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; general-purpose or statistical aggregate (i.e., not ordered-set or hypothetical-set aggregates) can be used as a window function; see - for a list of the built-in aggregates. - Aggregate functions act as window functions only when an OVER + for a list of the built-in aggregates. + Aggregate functions act as window functions only when an OVER clause follows the call; otherwise they act as non-window aggregates and return a single row for the entire set. @@ -14495,7 +14762,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; bigint - rank of the current row with gaps; same as row_number of its first peer + rank of the current row with gaps; same as row_number of its first peer @@ -14521,7 +14788,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; double precision - relative rank of the current row: (rank - 1) / (total partition rows - 1) + relative rank of the current row: (rank - 1) / (total partition rows - 1) @@ -14542,7 +14809,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; ntile - ntile(num_buckets integer) + ntile(num_buckets integer) integer @@ -14557,9 +14824,9 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; lag - lag(value anyelement - [, offset integer - [, default anyelement ]]) + lag(value anyelement + [, offset integer + [, default anyelement ]]) @@ -14586,9 +14853,9 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; lead - lead(value anyelement - [, offset integer - [, default anyelement ]]) + lead(value anyelement + [, offset integer + [, default anyelement ]]) @@ -14614,7 +14881,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; first_value - first_value(value any) + first_value(value any) same type as value @@ -14630,7 +14897,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; last_value - last_value(value any) + last_value(value any) same type as value @@ -14647,7 +14914,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; nth_value - nth_value(value any, nth integer) + nth_value(value any, nth integer) @@ -14665,58 +14932,59 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; All of the functions listed in - depend on the sort ordering - specified by the ORDER BY clause of the associated window + depend on the sort ordering + specified by the ORDER BY clause of the associated window definition. Rows that are not distinct when considering only the - ORDER BY columns are said to be peers. - The four ranking functions (including cume_dist) are + ORDER BY columns are said to be peers. + The four ranking functions (including cume_dist) are defined so that they give the same answer for all peer rows. - Note that first_value, last_value, and - nth_value consider only the rows within the window - frame, which by default contains the rows from the start of the + Note that first_value, last_value, and + nth_value consider only the rows within the window + frame, which by default contains the rows from the start of the partition through the last peer of the current row. This is - likely to give unhelpful results for last_value and - sometimes also nth_value. You can redefine the frame by - adding a suitable frame specification (RANGE or - ROWS) to the OVER clause. - See for more information + likely to give unhelpful results for last_value and + sometimes also nth_value. You can redefine the frame by + adding a suitable frame specification (RANGE, + ROWS or GROUPS) to + the OVER clause. + See for more information about frame specifications. When an aggregate function is used as a window function, it aggregates over the rows within the current row's window frame. - An aggregate used with ORDER BY and the default window frame - definition produces a running sum type of behavior, which may or + An aggregate used with ORDER BY and the default window frame + definition produces a running sum type of behavior, which may or may not be what's wanted. To obtain - aggregation over the whole partition, omit ORDER BY or use - ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING. + aggregation over the whole partition, omit ORDER BY or use + ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING. Other frame specifications can be used to obtain other effects. - The SQL standard defines a RESPECT NULLS or - IGNORE NULLS option for lead, lag, - first_value, last_value, and - nth_value. This is not implemented in + The SQL standard defines a RESPECT NULLS or + IGNORE NULLS option for lead, lag, + first_value, last_value, and + nth_value. This is not implemented in PostgreSQL: the behavior is always the - same as the standard's default, namely RESPECT NULLS. - Likewise, the standard's FROM FIRST or FROM LAST - option for nth_value is not implemented: only the - default FROM FIRST behavior is supported. (You can achieve - the result of FROM LAST by reversing the ORDER BY + same as the standard's default, namely RESPECT NULLS. + Likewise, the standard's FROM FIRST or FROM LAST + option for nth_value is not implemented: only the + default FROM FIRST behavior is supported. (You can achieve + the result of FROM LAST by reversing the ORDER BY ordering.) - cume_dist computes the fraction of partition rows that + cume_dist computes the fraction of partition rows that are less than or equal to the current row and its peers, while - percent_rank computes the fraction of partition rows that + percent_rank computes the fraction of partition rows that are less than the current row, assuming the current row does not exist in the partition. @@ -14769,12 +15037,12 @@ EXISTS (subquery) - The argument of EXISTS is an arbitrary SELECT statement, + The argument of EXISTS is an arbitrary SELECT statement, or subquery. The subquery is evaluated to determine whether it returns any rows. If it returns at least one row, the result of EXISTS is - true; if the subquery returns no rows, the result of EXISTS - is false. + true; if the subquery returns no rows, the result of EXISTS + is false. @@ -14794,15 +15062,15 @@ EXISTS (subquery) Since the result depends only on whether any rows are returned, and not on the contents of those rows, the output list of the subquery is normally unimportant. A common coding convention is - to write all EXISTS tests in the form + to write all EXISTS tests in the form EXISTS(SELECT 1 WHERE ...). There are exceptions to this rule however, such as subqueries that use INTERSECT. - This simple example is like an inner join on col2, but - it produces at most one output row for each tab1 row, - even if there are several matching tab2 rows: + This simple example is like an inner join on col2, but + it produces at most one output row for each tab1 row, + even if there are several matching tab2 rows: SELECT col1 FROM tab1 @@ -14822,8 +15090,8 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); The right-hand side is a parenthesized subquery, which must return exactly one column. The left-hand expression is evaluated and compared to each row of the subquery result. - The result of IN is true if any equal subquery row is found. - The result is false if no equal row is found (including the + The result of IN is true if any equal subquery row is found. + The result is false if no equal row is found (including the case where the subquery returns no rows). @@ -14846,13 +15114,13 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); The left-hand side of this form of IN is a row constructor, - as described in . + as described in . The right-hand side is a parenthesized subquery, which must return exactly as many columns as there are expressions in the left-hand row. The left-hand expressions are evaluated and compared row-wise to each row of the subquery result. - The result of IN is true if any equal subquery row is found. - The result is false if no equal row is found (including the + The result of IN is true if any equal subquery row is found. + The result is false if no equal row is found (including the case where the subquery returns no rows). @@ -14878,9 +15146,9 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); The right-hand side is a parenthesized subquery, which must return exactly one column. The left-hand expression is evaluated and compared to each row of the subquery result. - The result of NOT IN is true if only unequal subquery rows + The result of NOT IN is true if only unequal subquery rows are found (including the case where the subquery returns no rows). - The result is false if any equal row is found. + The result is false if any equal row is found. @@ -14902,14 +15170,14 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); The left-hand side of this form of NOT IN is a row constructor, - as described in . + as described in . The right-hand side is a parenthesized subquery, which must return exactly as many columns as there are expressions in the left-hand row. The left-hand expressions are evaluated and compared row-wise to each row of the subquery result. - The result of NOT IN is true if only unequal subquery rows + The result of NOT IN is true if only unequal subquery rows are found (including the case where the subquery returns no rows). - The result is false if any equal row is found. + The result is false if any equal row is found. @@ -14937,8 +15205,8 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); is evaluated and compared to each row of the subquery result using the given operator, which must yield a Boolean result. - The result of ANY is true if any true result is obtained. - The result is false if no true result is found (including the + The result of ANY is true if any true result is obtained. + The result is false if no true result is found (including the case where the subquery returns no rows). @@ -14961,29 +15229,29 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); -row_constructor operator ANY (subquery) -row_constructor operator SOME (subquery) +row_constructor operator ANY (subquery) +row_constructor operator SOME (subquery) The left-hand side of this form of ANY is a row constructor, - as described in . + as described in . The right-hand side is a parenthesized subquery, which must return exactly as many columns as there are expressions in the left-hand row. The left-hand expressions are evaluated and compared row-wise to each row of the subquery result, using the given operator. - The result of ANY is true if the comparison + The result of ANY is true if the comparison returns true for any subquery row. - The result is false if the comparison returns false for every + The result is false if the comparison returns false for every subquery row (including the case where the subquery returns no rows). - The result is NULL if the comparison does not return true for any row, - and it returns NULL for at least one row. + The result is NULL if no comparison with a subquery row returns true, + and at least one comparison returns NULL. - See for details about the meaning + See for details about the meaning of a row constructor comparison. @@ -15001,11 +15269,11 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); is evaluated and compared to each row of the subquery result using the given operator, which must yield a Boolean result. - The result of ALL is true if all rows yield true + The result of ALL is true if all rows yield true (including the case where the subquery returns no rows). - The result is false if any false result is found. - The result is NULL if the comparison does not return false for any row, - and it returns NULL for at least one row. + The result is false if any false result is found. + The result is NULL if no comparison with a subquery row returns false, + and at least one comparison returns NULL. @@ -15023,23 +15291,23 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); The left-hand side of this form of ALL is a row constructor, - as described in . + as described in . The right-hand side is a parenthesized subquery, which must return exactly as many columns as there are expressions in the left-hand row. The left-hand expressions are evaluated and compared row-wise to each row of the subquery result, using the given operator. - The result of ALL is true if the comparison + The result of ALL is true if the comparison returns true for all subquery rows (including the case where the subquery returns no rows). - The result is false if the comparison returns false for any + The result is false if the comparison returns false for any subquery row. - The result is NULL if the comparison does not return false for any - subquery row, and it returns NULL for at least one row. + The result is NULL if no comparison with a subquery row returns false, + and at least one comparison returns NULL. - See for details about the meaning + See for details about the meaning of a row constructor comparison. @@ -15058,7 +15326,7 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); The left-hand side is a row constructor, - as described in . + as described in . The right-hand side is a parenthesized subquery, which must return exactly as many columns as there are expressions in the left-hand row. Furthermore, the subquery cannot return more than one row. (If it returns zero rows, @@ -15067,7 +15335,7 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); - See for details about the meaning + See for details about the meaning of a row constructor comparison. @@ -15145,7 +15413,7 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); The right-hand side is a parenthesized list - of scalar expressions. The result is true if the left-hand expression's + of scalar expressions. The result is true if the left-hand expression's result is equal to any of the right-hand expressions. This is a shorthand notation for @@ -15223,8 +15491,8 @@ AND is evaluated and compared to each element of the array using the given operator, which must yield a Boolean result. - The result of ANY is true if any true result is obtained. - The result is false if no true result is found (including the + The result of ANY is true if any true result is obtained. + The result is false if no true result is found (including the case where the array has zero elements). @@ -15259,9 +15527,9 @@ AND is evaluated and compared to each element of the array using the given operator, which must yield a Boolean result. - The result of ALL is true if all comparisons yield true + The result of ALL is true if all comparisons yield true (including the case where the array has zero elements). - The result is false if any false result is found. + The result is false if any false result is found. @@ -15286,16 +15554,16 @@ AND Each side is a row constructor, - as described in . + as described in . The two row values must have the same number of fields. Each side is evaluated and they are compared row-wise. Row constructor comparisons are allowed when the operator is - =, - <>, - <, - <=, - > or - >=. + =, + <>, + <, + <=, + > or + >=. Every row element must be of a type which has a default B-tree operator class or the attempted comparison may generate an error. @@ -15308,7 +15576,7 @@ AND - The = and <> cases work slightly differently + The = and <> cases work slightly differently from the others. Two rows are considered equal if all their corresponding members are non-null and equal; the rows are unequal if any corresponding members are non-null and unequal; @@ -15316,13 +15584,13 @@ AND - For the <, <=, > and - >= cases, the row elements are compared left-to-right, + For the <, <=, > and + >= cases, the row elements are compared left-to-right, stopping as soon as an unequal or null pair of elements is found. If either of this pair of elements is null, the result of the row comparison is unknown (null); otherwise comparison of this pair of elements determines the result. For example, - ROW(1,2,NULL) < ROW(1,3,0) + ROW(1,2,NULL) < ROW(1,3,0) yields true, not null, because the third pair of elements are not considered. @@ -15330,13 +15598,13 @@ AND Prior to PostgreSQL 8.2, the - <, <=, > and >= + <, <=, > and >= cases were not handled per SQL specification. A comparison like - ROW(a,b) < ROW(c,d) + ROW(a,b) < ROW(c,d) was implemented as - a < c AND b < d + a < c AND b < d whereas the correct behavior is equivalent to - a < c OR (a = c AND b < d). + a < c OR (a = c AND b < d). @@ -15378,8 +15646,8 @@ AND result depends on comparing two NULL values or a NULL and a non-NULL. PostgreSQL does this only when comparing the results of two row constructors (as in - ) or comparing a row constructor - to the output of a subquery (as in ). + ) or comparing a row constructor + to the output of a subquery (as in ). In other contexts where two composite-type values are compared, two NULL field values are considered equal, and a NULL is considered larger than a non-NULL. This is necessary in order to have consistent sorting @@ -15389,30 +15657,30 @@ AND Each side is evaluated and they are compared row-wise. Composite type comparisons are allowed when the operator is - =, - <>, - <, - <=, - > or - >=, + =, + <>, + <, + <=, + > or + >=, or has semantics similar to one of these. (To be specific, an operator can be a row comparison operator if it is a member of a B-tree operator - class, or is the negator of the = member of a B-tree operator + class, or is the negator of the = member of a B-tree operator class.) The default behavior of the above operators is the same as for IS [ NOT ] DISTINCT FROM for row constructors (see - ). + ). To support matching of rows which include elements without a default B-tree operator class, the following operators are defined for composite type comparison: - *=, - *<>, - *<, - *<=, - *>, and - *>=. + *=, + *<>, + *<, + *<=, + *>, and + *>=. These operators compare the internal binary representation of the two rows. Two rows might have a different binary representation even though comparisons of the two rows with the equality operator is true. @@ -15440,10 +15708,10 @@ AND This section describes functions that possibly return more than one row. The most widely used functions in this class are series generating - functions, as detailed in and - . Other, more specialized + functions, as detailed in and + . Other, more specialized set-returning functions are described elsewhere in this manual. - See for ways to combine multiple + See for ways to combine multiple set-returning functions. @@ -15481,7 +15749,7 @@ AND - generate_series(start, stop, step interval) + generate_series(start, stop, step interval) timestamp or timestamp with time zone setof timestamp or setof timestamp with time zone (same as argument type) @@ -15596,7 +15864,7 @@ SELECT * FROM generate_series('2008-03-01 00:00'::timestamp, - generate_subscripts is a convenience function that generates + generate_subscripts is a convenience function that generates the set of valid subscripts for the specified dimension of the given array. Zero rows are returned for arrays that do not have the requested dimension, @@ -15661,7 +15929,7 @@ SELECT * FROM unnest2(ARRAY[[1,2],[3,4]]); by WITH ORDINALITY, a bigint column is appended to the output which starts from 1 and increments by 1 for each row of the function's output. This is most useful in the case of set returning - functions such as unnest(). + functions such as unnest(). -- set returning function WITH ORDINALITY @@ -15694,17 +15962,17 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); - System Information Functions + System Information Functions and Operators - shows several + shows several functions that extract session and system information. In addition to the functions listed in this section, there are a number of functions related to the statistics system that also provide system - information. See for more + information. See for more information. @@ -15805,7 +16073,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); - pg_current_logfile(text) + pg_current_logfile(text) text Primary log file name, or log in the requested format, currently in use by the logging collector @@ -15823,6 +16091,14 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); is schema another session's temporary schema? + + pg_jit_available() + boolean + is JIT compilation available in this session + (see )? Returns false if is set to false. + + pg_listening_channels() setof text @@ -15850,7 +16126,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); pg_trigger_depth() int - current nesting level of PostgreSQL triggers + current nesting level of PostgreSQL triggers (0 if not called, directly or indirectly, from inside a trigger) @@ -15869,7 +16145,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); version() text - PostgreSQL version information. See also for a machine-readable version. + PostgreSQL version information. See also for a machine-readable version. @@ -15947,11 +16223,11 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); The session_user is normally the user who initiated the current database connection; but superusers can change this setting - with . + with . The current_user is the user identifier that is applicable for permission checking. Normally it is equal to the session user, but it can be changed with - . + . It also changes during the execution of functions with the attribute SECURITY DEFINER. In Unix parlance, the session user is the real user and @@ -15959,7 +16235,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); current_role and user are synonyms for current_user. (The SQL standard draws a distinction between current_role - and current_user, but PostgreSQL + and current_user, but PostgreSQL does not, since it unifies users and roles into a single kind of entity.) @@ -15970,7 +16246,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); other named objects that are created without specifying a target schema. current_schemas(boolean) returns an array of the names of all schemas presently in the search path. The Boolean option determines whether or not - implicitly included system schemas such as pg_catalog are included in the + implicitly included system schemas such as pg_catalog are included in the returned search path. @@ -15978,7 +16254,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); The search path can be altered at run time. The command is: -SET search_path TO schema , schema, ... +SET search_path TO schema , schema, ... @@ -16023,7 +16299,7 @@ SET search_path TO schema , schema, .. waiting for a lock that would conflict with the blocked process's lock request and is ahead of it in the wait queue (soft block). When using parallel queries the result always lists client-visible process IDs (that - is, pg_backend_pid results) even if the actual lock is held + is, pg_backend_pid results) even if the actual lock is held or awaited by a child worker process. As a result of that, there may be duplicated PIDs in the result. Also note that when a prepared transaction holds a conflicting lock, it will be represented by a zero process ID in @@ -16070,20 +16346,20 @@ SET search_path TO schema , schema, .. pg_current_logfile returns, as text, the path of the log file(s) currently in use by the logging collector. - The path includes the directory + The path includes the directory and the log file name. Log collection must be enabled or the return value is NULL. When multiple log files exist, each in a different format, pg_current_logfile called without arguments returns the path of the file having the first format - found in the ordered list: stderr, csvlog. + found in the ordered list: stderr, csvlog. NULL is returned when no log file has any of these formats. To request a specific file format supply, as text, - either csvlog or stderr as the value of the + either csvlog or stderr as the value of the optional parameter. The return value is NULL when the log format requested is not a configured - . The + . The pg_current_logfiles reflects the contents of the - current_logfiles file. + current_logfiles file. @@ -16119,7 +16395,7 @@ SET search_path TO schema , schema, .. fraction of the total available space for notifications currently occupied by notifications that are waiting to be processed, as a double in the range 0-1. - See and + See and for more information. @@ -16145,7 +16421,7 @@ SET search_path TO schema , schema, .. running a SERIALIZABLE transaction blocks a SERIALIZABLE READ ONLY DEFERRABLE transaction from acquiring a snapshot until the latter determines that it is safe to avoid - taking any predicate locks. See for + taking any predicate locks. See for more information about serializable and deferrable transactions. Frequent calls to this function could have some impact on database performance, because it needs access to the predicate lock manager's shared @@ -16159,10 +16435,10 @@ SET search_path TO schema , schema, .. version returns a string describing the PostgreSQL server's version. You can also - get this information from or - for a machine-readable version, . + get this information from or + for a machine-readable version, . Software developers should use server_version_num - (available since 8.2) or instead + (available since 8.2) or instead of parsing the text version. @@ -16172,9 +16448,9 @@ SET search_path TO schema , schema, .. - lists functions that + lists functions that allow the user to query object access privileges programmatically. - See for more information about + See for more information about privileges. @@ -16440,7 +16716,7 @@ SET search_path TO schema , schema, .. has_table_privilege checks whether a user can access a table in a particular way. The user can be specified by name, by OID (pg_authid.oid), - public to indicate the PUBLIC pseudo-role, or if the argument is + public to indicate the PUBLIC pseudo-role, or if the argument is omitted current_user is assumed. The table can be specified by name or by OID. (Thus, there are actually six variants of @@ -16450,12 +16726,12 @@ SET search_path TO schema , schema, .. The desired access privilege type is specified by a text string, which must evaluate to one of the values SELECT, INSERT, - UPDATE, DELETE, TRUNCATE, + UPDATE, DELETE, TRUNCATE, REFERENCES, or TRIGGER. Optionally, - WITH GRANT OPTION can be added to a privilege type to test + WITH GRANT OPTION can be added to a privilege type to test whether the privilege is held with grant option. Also, multiple privilege types can be listed separated by commas, in which case the result will - be true if any of the listed privileges is held. + be true if any of the listed privileges is held. (Case of the privilege string is not significant, and extra whitespace is allowed between but not within privilege names.) Some examples: @@ -16479,7 +16755,7 @@ SELECT has_table_privilege('joe', 'mytable', 'INSERT, SELECT WITH GRANT OPTION') has_any_column_privilege checks whether a user can access any column of a table in a particular way. Its argument possibilities - are analogous to has_table_privilege, + are analogous to has_table_privilege, except that the desired access privilege type must evaluate to some combination of SELECT, @@ -16488,8 +16764,8 @@ SELECT has_table_privilege('joe', 'mytable', 'INSERT, SELECT WITH GRANT OPTION') REFERENCES. Note that having any of these privileges at the table level implicitly grants it for each column of the table, so has_any_column_privilege will always return - true if has_table_privilege does for the same - arguments. But has_any_column_privilege also succeeds if + true if has_table_privilege does for the same + arguments. But has_any_column_privilege also succeeds if there is a column-level grant of the privilege for at least one column. @@ -16527,8 +16803,8 @@ SELECT has_table_privilege('joe', 'mytable', 'INSERT, SELECT WITH GRANT OPTION') Its argument possibilities are analogous to has_table_privilege. When specifying a function by a text string rather than by OID, - the allowed input is the same as for the regprocedure data type - (see ). + the allowed input is the same as for the regprocedure data type + (see ). The desired access privilege type must evaluate to EXECUTE. An example is: @@ -16589,8 +16865,8 @@ SELECT has_function_privilege('joeuser', 'myfunc(int, text)', 'execute'); Its argument possibilities are analogous to has_table_privilege. When specifying a type by a text string rather than by OID, - the allowed input is the same as for the regtype data type - (see ). + the allowed input is the same as for the regtype data type + (see ). The desired access privilege type must evaluate to USAGE. @@ -16600,14 +16876,14 @@ SELECT has_function_privilege('joeuser', 'myfunc(int, text)', 'execute'); can access a role in a particular way. Its argument possibilities are analogous to has_table_privilege, - except that public is not allowed as a user name. + except that public is not allowed as a user name. The desired access privilege type must evaluate to some combination of MEMBER or USAGE. MEMBER denotes direct or indirect membership in - the role (that is, the right to do SET ROLE), while + the role (that is, the right to do SET ROLE), while USAGE denotes whether the privileges of the role - are immediately available without doing SET ROLE. + are immediately available without doing SET ROLE. @@ -16618,8 +16894,143 @@ SELECT has_function_privilege('joeuser', 'myfunc(int, text)', 'execute'); - shows functions that - determine whether a certain object is visible in the + shows the operators + available for the aclitem type, which is the internal + representation of access privileges. An aclitem entry + describes the permissions of a grantee, whether they are grantable + or not, and which grantor granted them. For instance, + calvin=r*w/hobbes specifies that the role + calvin has the grantable privilege + SELECT (r*) and the non-grantable + privilege UPDATE (w), granted by + the role hobbes. An empty grantee stands for + PUBLIC. + + + + aclitem + + + acldefault + + + aclitemeq + + + aclcontains + + + aclexplode + + + makeaclitem + + +
+ <type>aclitem</type> Operators + + + + Operator + Description + Example + Result + + + + + + = + equal + 'calvin=r*w/hobbes'::aclitem = 'calvin=r*w*/hobbes'::aclitem + f + + + + @> + contains element + '{calvin=r*w/hobbes,hobbes=r*w*/postgres}'::aclitem[] @> 'calvin=r*w/hobbes'::aclitem + t + + + + ~ + contains element + '{calvin=r*w/hobbes,hobbes=r*w*/postgres}'::aclitem[] ~ 'calvin=r*w/hobbes'::aclitem + t + + + + +
+ + + shows some additional + functions to manage the aclitem type. + + + + <type>aclitem</type> Functions + + + Name Return Type Description + + + + acldefault(type, + ownerId) + aclitem[] + get the hardcoded default access privileges for an object belonging to ownerId + + + aclexplode(aclitem[]) + setof record + get aclitem array as tuples + + + makeaclitem(grantee, grantor, privilege, grantable) + aclitem + build an aclitem from input + + + +
+ + + acldefault returns the hardcoded default access privileges + for an object of type belonging to role ownerId. + Notice that these are used in the absence of any pg_default_acl + () entry. Default access privileges are described in + and can be overwritten with + . In other words, this function will return + results which may be misleading when the defaults have been overridden. + Type is a CHAR, use + 'c' for COLUMN, + 'r' for relation-like objects such as TABLE or VIEW, + 's' for SEQUENCE, + 'd' for DATABASE, + 'f' for FUNCTION or PROCEDURE, + 'l' for LANGUAGE, + 'L' for LARGE OBJECT, + 'n' for SCHEMA, + 't' for TABLESPACE, + 'F' for FOREIGN DATA WRAPPER, + 'S' for FOREIGN SERVER, + 'T' for TYPE or DOMAIN. + + + + aclexplode returns an aclitem array + as a set rows. Output columns are grantor oid, + grantee oid (0 for PUBLIC), + granted privilege as text (SELECT, ...) + and whether the prilivege is grantable as boolean. + makeaclitem performs the inverse operation. + + + + shows functions that + determine whether a certain object is visible in the current schema search path. For example, a table is said to be visible if its containing schema is in the search path and no table of the same @@ -16770,19 +17181,21 @@ SELECT relname FROM pg_class WHERE pg_table_is_visible(oid); Each function performs the visibility check for one type of database object. Note that pg_table_is_visible can also be used with views, materialized views, indexes, sequences and foreign tables; + pg_function_is_visible can also be used with + procedures and aggregates; pg_type_is_visible can also be used with domains. For functions and operators, an object in the search path is visible if there is no object of the same name - and argument data type(s) earlier in the path. For operator + and argument data type(s) earlier in the path. For operator classes, both name and associated index access method are considered. All these functions require object OIDs to identify the object to be checked. If you want to test an object by name, it is convenient to use - the OID alias types (regclass, regtype, - regprocedure, regoperator, regconfig, - or regdictionary), + the OID alias types (regclass, regtype, + regprocedure, regoperator, regconfig, + or regdictionary), for example: SELECT pg_type_is_visible('myschema.widget'::regtype); @@ -16916,7 +17329,7 @@ SELECT pg_type_is_visible('myschema.widget'::regtype);
- lists functions that + lists functions that extract information from the system catalogs. @@ -16929,7 +17342,7 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); - format_type(type_oid, typemod) + format_type(type_oid, typemod) text get SQL name of a data type @@ -16939,18 +17352,18 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); get definition of a constraint - pg_get_constraintdef(constraint_oid, pretty_bool) + pg_get_constraintdef(constraint_oid, pretty_bool) text get definition of a constraint - pg_get_expr(pg_node_tree, relation_oid) + pg_get_expr(pg_node_tree, relation_oid) text decompile internal form of an expression, assuming that any Vars in it refer to the relation indicated by the second parameter - pg_get_expr(pg_node_tree, relation_oid, pretty_bool) + pg_get_expr(pg_node_tree, relation_oid, pretty_bool) text decompile internal form of an expression, assuming that any Vars in it refer to the relation indicated by the second parameter @@ -16958,34 +17371,34 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); pg_get_functiondef(func_oid) text - get definition of a function + get definition of a function or procedure pg_get_function_arguments(func_oid) text - get argument list of function's definition (with default values) + get argument list of function's or procedure's definition (with default values) pg_get_function_identity_arguments(func_oid) text - get argument list to identify a function (without default values) + get argument list to identify a function or procedure (without default values) pg_get_function_result(func_oid) text - get RETURNS clause for function + get RETURNS clause for function (returns null for a procedure) pg_get_indexdef(index_oid) text - get CREATE INDEX command for index + get CREATE INDEX command for index - pg_get_indexdef(index_oid, column_no, pretty_bool) + pg_get_indexdef(index_oid, column_no, pretty_bool) text - get CREATE INDEX command for index, + get CREATE INDEX command for index, or definition of just one index column when - column_no is not zero + column_no is not zero pg_get_keywords() @@ -16995,33 +17408,32 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); pg_get_ruledef(rule_oid) text - get CREATE RULE command for rule + get CREATE RULE command for rule - pg_get_ruledef(rule_oid, pretty_bool) + pg_get_ruledef(rule_oid, pretty_bool) text - get CREATE RULE command for rule + get CREATE RULE command for rule pg_get_serial_sequence(table_name, column_name) text - get name of the sequence that a serial, smallserial or bigserial column - uses + get name of the sequence that a serial or identity column uses pg_get_statisticsobjdef(statobj_oid) text - get CREATE STATISTICS command for extended statistics object + get CREATE STATISTICS command for extended statistics object pg_get_triggerdef(trigger_oid) text - get CREATE [ CONSTRAINT ] TRIGGER command for trigger + get CREATE [ CONSTRAINT ] TRIGGER command for trigger - pg_get_triggerdef(trigger_oid, pretty_bool) + pg_get_triggerdef(trigger_oid, pretty_bool) text - get CREATE [ CONSTRAINT ] TRIGGER command for trigger + get CREATE [ CONSTRAINT ] TRIGGER command for trigger pg_get_userbyid(role_oid) @@ -17034,7 +17446,7 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); get underlying SELECT command for view or materialized view (deprecated) - pg_get_viewdef(view_name, pretty_bool) + pg_get_viewdef(view_name, pretty_bool) text get underlying SELECT command for view or materialized view (deprecated) @@ -17044,29 +17456,29 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); get underlying SELECT command for view or materialized view - pg_get_viewdef(view_oid, pretty_bool) + pg_get_viewdef(view_oid, pretty_bool) text get underlying SELECT command for view or materialized view - pg_get_viewdef(view_oid, wrap_column_int) + pg_get_viewdef(view_oid, wrap_column_int) text get underlying SELECT command for view or materialized view; lines with fields are wrapped to specified number of columns, pretty-printing is implied - pg_index_column_has_property(index_oid, column_no, prop_name) + pg_index_column_has_property(index_oid, column_no, prop_name) boolean test whether an index column has a specified property - pg_index_has_property(index_oid, prop_name) + pg_index_has_property(index_oid, prop_name) boolean test whether an index has a specified property - pg_indexam_has_property(am_oid, prop_name) + pg_indexam_has_property(am_oid, prop_name) boolean test whether an index access method has a specified property @@ -17147,11 +17559,11 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); pg_get_keywords returns a set of records describing - the SQL keywords recognized by the server. The word column - contains the keyword. The catcode column contains a - category code: U for unreserved, C for column name, - T for type or function name, or R for reserved. - The catdesc column contains a possibly-localized string + the SQL keywords recognized by the server. The word column + contains the keyword. The catcode column contains a + category code: U for unreserved, C for column name, + T for type or function name, or R for reserved. + The catdesc column contains a possibly-localized string describing the category. @@ -17168,45 +17580,53 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); catalogs. If the expression might contain Vars, specify the OID of the relation they refer to as the second parameter; if no Vars are expected, zero is sufficient. pg_get_viewdef reconstructs the - SELECT query that defines a view. Most of these functions come - in two variants, one of which can optionally pretty-print the + SELECT query that defines a view. Most of these functions come + in two variants, one of which can optionally pretty-print the result. The pretty-printed format is more readable, but the default format is more likely to be interpreted the same way by future versions of - PostgreSQL; avoid using pretty-printed output for dump - purposes. Passing false for the pretty-print parameter yields + PostgreSQL; avoid using pretty-printed output for dump + purposes. Passing false for the pretty-print parameter yields the same result as the variant that does not have the parameter at all.
- pg_get_functiondef returns a complete - CREATE OR REPLACE FUNCTION statement for a function. + pg_get_functiondef returns a complete + CREATE OR REPLACE FUNCTION statement for a function. pg_get_function_arguments returns the argument list of a function, in the form it would need to appear in within - CREATE FUNCTION. + CREATE FUNCTION. pg_get_function_result similarly returns the - appropriate RETURNS clause for the function. + appropriate RETURNS clause for the function. pg_get_function_identity_arguments returns the argument list necessary to identify a function, in the form it - would need to appear in within ALTER FUNCTION, for + would need to appear in within ALTER FUNCTION, for instance. This form omits default values. pg_get_serial_sequence returns the name of the sequence associated with a column, or NULL if no sequence is associated - with the column. The first input parameter is a table name with - optional schema, and the second parameter is a column name. Because - the first parameter is potentially a schema and table, it is not treated - as a double-quoted identifier, meaning it is lower cased by default, - while the second parameter, being just a column name, is treated as - double-quoted and has its case preserved. The function returns a value - suitably formatted for passing to sequence functions (see ). This association can be modified or - removed with ALTER SEQUENCE OWNED BY. (The function - probably should have been called - pg_get_owned_sequence; its current name reflects the fact - that it's typically used with serial or bigserial - columns.) + with the column. If the column is an identity column, the associated + sequence is the sequence internally created for the identity column. For + columns created using one of the serial types + (serial, smallserial, bigserial), it + is the sequence created for that serial column definition. In the latter + case, this association can be modified or removed with ALTER + SEQUENCE OWNED BY. (The function probably should have been called + pg_get_owned_sequence; its current name reflects the + fact that it has typically been used with serial + or bigserial columns.) The first input parameter is a table name + with optional schema, and the second parameter is a column name. Because + the first parameter is potentially a schema and table, it is not treated as + a double-quoted identifier, meaning it is lower cased by default, while the + second parameter, being just a column name, is treated as double-quoted and + has its case preserved. The function returns a value suitably formatted + for passing to sequence functions + (see ). A typical use is in reading the + current value of a sequence for an identity or serial column, for example: + +SELECT currval(pg_get_serial_sequence('sometable', 'id')); + @@ -17222,9 +17642,9 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); property. NULL is returned if the property name is not known or does not apply to the particular object, or if the OID or column number does not identify a valid object. Refer to - for column properties, - for index properties, and - for access method properties. + for column properties, + for index properties, and + for access method properties. (Note that extension access methods can define additional property names for their indexes.) @@ -17263,8 +17683,8 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); distance_orderable - Can the column be scanned in order by a distance - operator, for example ORDER BY col <-> constant ? + Can the column be scanned in order by a distance + operator, for example ORDER BY col <-> constant ? @@ -17274,14 +17694,14 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); search_array - Does the column natively support col = ANY(array) + Does the column natively support col = ANY(array) searches? search_nulls - Does the column support IS NULL and - IS NOT NULL searches? + Does the column support IS NULL and + IS NOT NULL searches? @@ -17297,7 +17717,7 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); clusterable - Can the index be used in a CLUSTER command? + Can the index be used in a CLUSTER command? @@ -17312,7 +17732,9 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); backward_scan - Can the index be scanned backwards? + Can the scan direction be changed in mid-scan (to + support FETCH BACKWARD on a cursor without + needing materialization)? @@ -17328,9 +17750,9 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); can_order - Does the access method support ASC, - DESC and related keywords in - CREATE INDEX? + Does the access method support ASC, + DESC and related keywords in + CREATE INDEX? @@ -17348,6 +17770,12 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); Does the access method support exclusion constraints? + + can_include + Does the access method support the INCLUDE + clause of CREATE INDEX? + + @@ -17355,9 +17783,9 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); pg_options_to_table returns the set of storage option name/value pairs - (option_name/option_value) when passed - pg_class.reloptions or - pg_attribute.attoptions. + (option_name/option_value) when passed + pg_class.reloptions or + pg_attribute.attoptions. @@ -17367,15 +17795,15 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); empty and cannot be dropped. To display the specific objects populating the tablespace, you will need to connect to the databases identified by pg_tablespace_databases and query their - pg_class catalogs. + pg_class catalogs. pg_typeof returns the OID of the data type of the value that is passed to it. This can be helpful for troubleshooting or dynamically constructing SQL queries. The function is declared as - returning regtype, which is an OID alias type (see - ); this means that it is the same as an + returning regtype, which is an OID alias type (see + ); this means that it is the same as an OID for comparison purposes but displays as a type name. For example: SELECT pg_typeof(33); @@ -17420,10 +17848,10 @@ SELECT collation for ('foo' COLLATE "de_DE"); to_regoperator, to_regtype, to_regnamespace, and to_regrole functions translate relation, function, operator, type, schema, and role - names (given as text) to objects of - type regclass, regproc, regprocedure, - regoper, regoperator, regtype, - regnamespace, and regrole + names (given as text) to objects of + type regclass, regproc, regprocedure, + regoper, regoperator, regtype, + regnamespace, and regrole respectively. These functions differ from a cast from text in that they don't accept a numeric OID, and that they return null rather than throwing an error if the name is not found (or, for @@ -17448,7 +17876,7 @@ SELECT collation for ('foo' COLLATE "de_DE"); - lists functions related to + lists functions related to database object identification and addressing. @@ -17461,24 +17889,24 @@ SELECT collation for ('foo' COLLATE "de_DE"); - pg_describe_object(catalog_id, object_id, object_sub_id) + pg_describe_object(classid oid, objid oid, objsubid integer) text get description of a database object - pg_identify_object(catalog_id oid, object_id oid, object_sub_id integer) - type text, schema text, name text, identity text + pg_identify_object(classid oid, objid oid, objsubid integer) + type text, schema text, name text, identity text get identity of a database object - pg_identify_object_as_address(catalog_id oid, object_id oid, object_sub_id integer) - type text, name text[], args text[] + pg_identify_object_as_address(classid oid, objid oid, objsubid integer) + type text, object_names text[], object_args text[] get external representation of a database object's address - pg_get_object_address(type text, name text[], args text[]) - catalog_id oid, object_id oid, object_sub_id int32 - get address of a database object, from its external representation + pg_get_object_address(type text, object_names text[], object_args text[]) + classid oid, objid oid, objsubid integer + get address of a database object from its external representation @@ -17486,7 +17914,9 @@ SELECT collation for ('foo' COLLATE "de_DE"); pg_describe_object returns a textual description of a database - object specified by catalog OID, object OID and a (possibly zero) sub-object ID. + object specified by catalog OID, object OID, and sub-object ID (such as + a column number within a table; the sub-object ID is zero when referring + to a whole object). This description is intended to be human-readable, and might be translated, depending on server configuration. This is useful to determine the identity of an object as stored in the @@ -17495,30 +17925,31 @@ SELECT collation for ('foo' COLLATE "de_DE"); pg_identify_object returns a row containing enough information - to uniquely identify the database object specified by catalog OID, object OID and a - (possibly zero) sub-object ID. This information is intended to be machine-readable, + to uniquely identify the database object specified by catalog OID, object OID and + sub-object ID. This information is intended to be machine-readable, and is never translated. - type identifies the type of database object; - schema is the schema name that the object belongs in, or - NULL for object types that do not belong to schemas; - name is the name of the object, quoted if necessary, only - present if it can be used (alongside schema name, if pertinent) as a unique - identifier of the object, otherwise NULL; - identity is the complete object identity, with the precise format - depending on object type, and each part within the format being - schema-qualified and quoted as necessary. + type identifies the type of database object; + schema is the schema name that the object belongs in, or + NULL for object types that do not belong to schemas; + name is the name of the object, quoted if necessary, + if the name (along with schema name, if pertinent) is sufficient to + uniquely identify the object, otherwise NULL; + identity is the complete object identity, with the + precise format depending on object type, and each name within the format + being schema-qualified and quoted as necessary. pg_identify_object_as_address returns a row containing enough information to uniquely identify the database object specified by - catalog OID, object OID and a (possibly zero) sub-object ID. The returned + catalog OID, object OID and sub-object ID. The returned information is independent of the current server, that is, it could be used to identify an identically named object in another server. - type identifies the type of database object; - name and args are text arrays that together - form a reference to the object. These three columns can be passed to - pg_get_object_address to obtain the internal address + type identifies the type of database object; + object_names and object_args + are text arrays that together form a reference to the object. + These three values can be passed to + pg_get_object_address to obtain the internal address of the object. This function is the inverse of pg_get_object_address. @@ -17527,13 +17958,13 @@ SELECT collation for ('foo' COLLATE "de_DE"); pg_get_object_address returns a row containing enough information to uniquely identify the database object specified by its type and object name and argument arrays. The returned values are the - ones that would be used in system catalogs such as pg_depend + ones that would be used in system catalogs such as pg_depend and can be passed to other system functions such as - pg_identify_object or pg_describe_object. - catalog_id is the OID of the system catalog containing the + pg_identify_object or pg_describe_object. + classid is the OID of the system catalog containing the object; - object_id is the OID of the object itself, and - object_sub_id is the object sub-ID, or zero if none. + objid is the OID of the object itself, and + objsubid is the sub-object ID, or zero if none. This function is the inverse of pg_identify_object_as_address. @@ -17555,8 +17986,8 @@ SELECT collation for ('foo' COLLATE "de_DE"); - The functions shown in - extract comments previously stored with the + The functions shown in + extract comments previously stored with the command. A null value is returned if no comment could be found for the specified parameters. @@ -17653,7 +18084,7 @@ SELECT collation for ('foo' COLLATE "de_DE"); - The functions shown in + The functions shown in provide server transaction information in an exportable form. The main use of these functions is to determine which transactions were committed between two snapshots. @@ -17704,7 +18135,7 @@ SELECT collation for ('foo' COLLATE "de_DE"); txid_status(bigint) - txid_status + text report the status of the given transaction: committed, aborted, in progress, or null if the transaction ID is too old @@ -17712,14 +18143,14 @@ SELECT collation for ('foo' COLLATE "de_DE"); - The internal transaction ID type (xid) is 32 bits wide and + The internal transaction ID type (xid) is 32 bits wide and wraps around every 4 billion transactions. However, these functions - export a 64-bit format that is extended with an epoch counter + export a 64-bit format that is extended with an epoch counter so it will not wrap around during the life of an installation. The data type used by these functions, txid_snapshot, stores information about transaction ID visibility at a particular moment in time. Its components are - described in . + described in . @@ -17755,9 +18186,9 @@ SELECT collation for ('foo' COLLATE "de_DE"); xip_list Active txids at the time of the snapshot. The list - includes only those active txids between xmin - and xmax; there might be active txids higher - than xmax. A txid that is xmin <= txid < + includes only those active txids between xmin + and xmax; there might be active txids higher + than xmax. A txid that is xmin <= txid < xmax and not in this list was already completed at the time of the snapshot, and thus either visible or dead according to its commit status. The list does not @@ -17770,36 +18201,36 @@ SELECT collation for ('foo' COLLATE "de_DE");
- txid_snapshot's textual representation is - xmin:xmax:xip_list. + txid_snapshot's textual representation is + xmin:xmax:xip_list. For example 10:20:10,14,15 means xmin=10, xmax=20, xip_list=10, 14, 15. - txid_status(bigint) reports the commit status of a recent + txid_status(bigint) reports the commit status of a recent transaction. Applications may use it to determine whether a transaction committed or aborted when the application and database server become disconnected while a COMMIT is in progress. The status of a transaction will be reported as either - in progress, - committed, or aborted, provided that the + in progress, + committed, or aborted, provided that the transaction is recent enough that the system retains the commit status of that transaction. If is old enough that no references to that transaction survive in the system and the commit status information has been discarded, this function will return NULL. Note that prepared - transactions are reported as in progress; applications must + transactions are reported as in progress; applications must check pg_prepared_xacts if they + linkend="view-pg-prepared-xacts">pg_prepared_xacts if they need to determine whether the txid is a prepared transaction. - The functions shown in + The functions shown in provide information about transactions that have been already committed. These functions mainly provide information about when the transactions were committed. They only provide useful data when - configuration option is enabled + configuration option is enabled and only for transactions that were committed after it was enabled. @@ -17825,7 +18256,7 @@ SELECT collation for ('foo' COLLATE "de_DE"); pg_last_committed_xact pg_last_committed_xact() - xid xid, timestamp timestamp with time zone + xid xid, timestamp timestamp with time zone get transaction ID and commit timestamp of latest committed transaction @@ -17833,13 +18264,13 @@ SELECT collation for ('foo' COLLATE "de_DE"); - The functions shown in - print information initialized during initdb, such + The functions shown in + print information initialized during initdb, such as the catalog version. They also show information about write-ahead logging and checkpoint processing. This information is cluster-wide, and not specific to any one database. They provide most of the same information, from the same source, as - , although in a form better suited + , although in a form better suited to SQL functions. @@ -17900,12 +18331,12 @@ SELECT collation for ('foo' COLLATE "de_DE"); - pg_control_checkpoint returns a record, shown in - + pg_control_checkpoint returns a record, shown in + - <function>pg_control_checkpoint</> Columns + <function>pg_control_checkpoint</function> Columns @@ -17921,11 +18352,6 @@ SELECT collation for ('foo' COLLATE "de_DE"); pg_lsn - - prior_lsn - pg_lsn - - redo_lsn pg_lsn @@ -18016,12 +18442,12 @@ SELECT collation for ('foo' COLLATE "de_DE");
- pg_control_system returns a record, shown in - + pg_control_system returns a record, shown in + - <function>pg_control_system</> Columns + <function>pg_control_system</function> Columns @@ -18057,12 +18483,12 @@ SELECT collation for ('foo' COLLATE "de_DE");
- pg_control_init returns a record, shown in - + pg_control_init returns a record, shown in + - <function>pg_control_init</> Columns + <function>pg_control_init</function> Columns @@ -18138,12 +18564,12 @@ SELECT collation for ('foo' COLLATE "de_DE");
- pg_control_recovery returns a record, shown in - + pg_control_recovery returns a record, shown in + - <function>pg_control_recovery</> Columns + <function>pg_control_recovery</function> Columns @@ -18190,14 +18616,14 @@ SELECT collation for ('foo' COLLATE "de_DE"); The functions described in this section are used to control and - monitor a PostgreSQL installation. + monitor a PostgreSQL installation. Configuration Settings Functions - shows the functions + shows the functions available to query and alter run-time configuration parameters. @@ -18313,7 +18739,7 @@ SELECT set_config('log_statement_stats', 'off', false); The functions shown in send control signals to + linkend="functions-admin-signal-table"/> send control signals to other server processes. Use of these functions is restricted to superusers by default but access may be granted to others using GRANT, with noted exceptions. @@ -18330,7 +18756,7 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_cancel_backend(pid int) + pg_cancel_backend(pid int) boolean Cancel a backend's current query. This is also allowed if the @@ -18355,7 +18781,7 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_terminate_backend(pid int) + pg_terminate_backend(pid int) boolean Terminate a backend. This is also allowed if the calling role @@ -18374,28 +18800,28 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_cancel_backend and pg_terminate_backend - send signals (SIGINT or SIGTERM + pg_cancel_backend and pg_terminate_backend + send signals (SIGINT or SIGTERM respectively) to backend processes identified by process ID. The process ID of an active backend can be found from the pid column of the pg_stat_activity view, or by listing the postgres processes on the server (using - ps on Unix or the Task - Manager on Windows). + ps on Unix or the Task + Manager on Windows). The role of an active backend can be found from the usename column of the pg_stat_activity view. - pg_reload_conf sends a SIGHUP signal + pg_reload_conf sends a SIGHUP signal to the server, causing configuration files to be reloaded by all server processes. - pg_rotate_logfile signals the log-file manager to switch + pg_rotate_logfile signals the log-file manager to switch to a new output file immediately. This works only when the built-in log collector is running, since otherwise there is no log-file manager subprocess. @@ -18448,7 +18874,7 @@ SELECT set_config('log_statement_stats', 'off', false); The functions shown in assist in making on-line backups. + linkend="functions-admin-backup-table"/> assist in making on-line backups. These functions cannot be executed during recovery (except pg_is_in_backup, pg_backup_start_time and pg_wal_lsn_diff). @@ -18465,7 +18891,7 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_create_restore_point(name text) + pg_create_restore_point(name text) pg_lsn Create a named point for performing restore (restricted to superusers by default, but other users can be granted EXECUTE to run the function) @@ -18493,7 +18919,7 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_start_backup(label text , fast boolean , exclusive boolean ) + pg_start_backup(label text , fast boolean , exclusive boolean ) pg_lsn Prepare for performing on-line backup (restricted to superusers by default, but other users can be granted EXECUTE to run the function) @@ -18507,7 +18933,7 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_stop_backup(exclusive boolean , wait_for_archive boolean ) + pg_stop_backup(exclusive boolean , wait_for_archive boolean ) setof record Finish performing exclusive or non-exclusive on-line backup (restricted to superusers by default, but other users can be granted EXECUTE to run the function) @@ -18535,23 +18961,23 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_walfile_name(lsn pg_lsn) + pg_walfile_name(lsn pg_lsn) text Convert write-ahead log location to file name - pg_walfile_name_offset(lsn pg_lsn) + pg_walfile_name_offset(lsn pg_lsn) - text, integer + text, integer Convert write-ahead log location to file name and decimal byte offset within file - pg_wal_lsn_diff(lsn pg_lsn, lsn pg_lsn) + pg_wal_lsn_diff(lsn pg_lsn, lsn pg_lsn) - numeric + numeric Calculate the difference between two write-ahead log locations @@ -18559,17 +18985,17 @@ SELECT set_config('log_statement_stats', 'off', false);
- pg_start_backup accepts an arbitrary user-defined label for + pg_start_backup accepts an arbitrary user-defined label for the backup. (Typically this would be the name under which the backup dump file will be stored.) When used in exclusive mode, the function writes a - backup label file (backup_label) and, if there are any links - in the pg_tblspc/ directory, a tablespace map file - (tablespace_map) into the database cluster's data directory, + backup label file (backup_label) and, if there are any links + in the pg_tblspc/ directory, a tablespace map file + (tablespace_map) into the database cluster's data directory, performs a checkpoint, and then returns the backup's starting write-ahead log location as text. The user can ignore this result value, but it is provided in case it is useful. When used in non-exclusive mode, the contents of these files are instead returned by the - pg_stop_backup function, and should be written to the backup + pg_stop_backup function, and should be written to the backup by the caller. @@ -18579,36 +19005,37 @@ postgres=# select pg_start_backup('label_goes_here'); 0/D4445B8 (1 row) - There is an optional second parameter of type boolean. If true, - it specifies executing pg_start_backup as quickly as + There is an optional second parameter of type boolean. If true, + it specifies executing pg_start_backup as quickly as possible. This forces an immediate checkpoint which will cause a spike in I/O operations, slowing any concurrently executing queries. - In an exclusive backup, pg_stop_backup removes the label file - and, if it exists, the tablespace_map file created by - pg_start_backup. In a non-exclusive backup, the contents of - the backup_label and tablespace_map are returned + In an exclusive backup, pg_stop_backup removes the label file + and, if it exists, the tablespace_map file created by + pg_start_backup. In a non-exclusive backup, the contents of + the backup_label and tablespace_map are returned in the result of the function, and should be written to files in the backup (and not in the data directory). There is an optional second - parameter of type boolean. If false, the pg_stop_backup + parameter of type boolean. If false, the pg_stop_backup will return immediately after the backup is completed without waiting for WAL to be archived. This behavior is only useful for backup software which independently monitors WAL archiving. Otherwise, WAL required to make the backup consistent might be missing and make the backup - useless. When this parameter is set to true, pg_stop_backup + useless. When this parameter is set to true, pg_stop_backup will wait for WAL to be archived when archiving is enabled; on the standby, - this means that it will wait only when archive_mode = always. + this means that it will wait only when archive_mode = always. If write activity on the primary is low, it may be useful to run - pg_switch_wal on the primary in order to trigger + pg_switch_wal on the primary in order to trigger an immediate segment switch. - The function also creates a backup history file in the write-ahead log + When executed on a primary, the function also creates a backup history file + in the write-ahead log archive area. The history file includes the label given to - pg_start_backup, the starting and ending write-ahead log locations for + pg_start_backup, the starting and ending write-ahead log locations for the backup, and the starting and ending times of the backup. The return value is the backup's ending write-ahead log location (which again can be ignored). After recording the ending location, the current @@ -18618,30 +19045,30 @@ postgres=# select pg_start_backup('label_goes_here'); - pg_switch_wal moves to the next write-ahead log file, allowing the + pg_switch_wal moves to the next write-ahead log file, allowing the current file to be archived (assuming you are using continuous archiving). The return value is the ending write-ahead log location + 1 within the just-completed write-ahead log file. If there has been no write-ahead log activity since the last write-ahead log switch, - pg_switch_wal does nothing and returns the start location + pg_switch_wal does nothing and returns the start location of the write-ahead log file currently in use. - pg_create_restore_point creates a named write-ahead log + pg_create_restore_point creates a named write-ahead log record that can be used as recovery target, and returns the corresponding write-ahead log location. The given name can then be used with - to specify the point up to which + to specify the point up to which recovery will proceed. Avoid creating multiple restore points with the same name, since recovery will stop at the first one whose name matches the recovery target. - pg_current_wal_lsn displays the current write-ahead log write + pg_current_wal_lsn displays the current write-ahead log write location in the same format used by the above functions. Similarly, - pg_current_wal_insert_lsn displays the current write-ahead log - insertion location and pg_current_wal_flush_lsn displays the - current write-ahead log flush location. The insertion location is the logical + pg_current_wal_insert_lsn displays the current write-ahead log + insertion location and pg_current_wal_flush_lsn displays the + current write-ahead log flush location. The insertion location is the logical end of the write-ahead log at any instant, while the write location is the end of what has actually been written out from the server's internal buffers and flush location is the location guaranteed to be written to durable storage. The write @@ -18653,7 +19080,7 @@ postgres=# select pg_start_backup('label_goes_here'); - You can use pg_walfile_name_offset to extract the + You can use pg_walfile_name_offset to extract the corresponding write-ahead log file name and byte offset from the results of any of the above functions. For example: @@ -18663,7 +19090,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); 00000001000000000000000D | 4039624 (1 row) - Similarly, pg_walfile_name extracts just the write-ahead log file name. + Similarly, pg_walfile_name extracts just the write-ahead log file name. When the given write-ahead log location is exactly at a write-ahead log file boundary, both these functions return the name of the preceding write-ahead log file. This is usually the desired behavior for managing write-ahead log archiving @@ -18672,15 +19099,15 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - pg_wal_lsn_diff calculates the difference in bytes + pg_wal_lsn_diff calculates the difference in bytes between two write-ahead log locations. It can be used with pg_stat_replication or some functions shown in - to get the replication lag. + to get the replication lag. For details about proper usage of these functions, see - . + . @@ -18703,7 +19130,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); The functions shown in provide information + linkend="functions-recovery-info-table"/> provide information about the current status of the standby. These functions may be executed both during recovery and in normal running. @@ -18775,6 +19202,9 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_is_wal_replay_paused + + pg_promote + pg_wal_replay_pause @@ -18784,7 +19214,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); The functions shown in control the progress of recovery. + linkend="functions-recovery-control-table"/> control the progress of recovery. These functions may be executed only during recovery. @@ -18805,6 +19235,22 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); True if recovery is paused. + + + pg_promote(wait boolean DEFAULT true, wait_seconds integer DEFAULT 60) + + boolean + + Promotes a physical standby server. Returns true + if promotion is successful and false otherwise. + With wait set to true, the + default, the function waits until promotion is completed or + wait_seconds seconds have passed, otherwise the + function returns immediately after sending the promotion signal to the + postmaster. This function is restricted to superusers by default, but + other users can be granted EXECUTE to run the function. + + pg_wal_replay_pause() @@ -18850,21 +19296,21 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - PostgreSQL allows database sessions to synchronize their - snapshots. A snapshot determines which data is visible to the + PostgreSQL allows database sessions to synchronize their + snapshots. A snapshot determines which data is visible to the transaction that is using the snapshot. Synchronized snapshots are necessary when two or more sessions need to see identical content in the database. If two sessions just start their transactions independently, there is always a possibility that some third transaction commits - between the executions of the two START TRANSACTION commands, + between the executions of the two START TRANSACTION commands, so that one session sees the effects of that transaction and the other does not. - To solve this problem, PostgreSQL allows a transaction to - export the snapshot it is using. As long as the exporting - transaction remains open, other transactions can import its + To solve this problem, PostgreSQL allows a transaction to + export the snapshot it is using. As long as the exporting + transaction remains open, other transactions can import its snapshot, and thereby be guaranteed that they see exactly the same view of the database that the first transaction sees. But note that any database changes made by any one of these transactions remain invisible @@ -18874,9 +19320,9 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - Snapshots are exported with the pg_export_snapshot function, - shown in , and - imported with the command. + Snapshots are exported with the pg_export_snapshot function, + shown in , and + imported with the command. @@ -18900,20 +19346,20 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup());
- The function pg_export_snapshot saves the current snapshot - and returns a text string identifying the snapshot. This string + The function pg_export_snapshot saves the current snapshot + and returns a text string identifying the snapshot. This string must be passed (outside the database) to clients that want to import the snapshot. The snapshot is available for import only until the end of the transaction that exported it. A transaction can export more than one snapshot, if needed. Note that doing so is only useful in READ - COMMITTED transactions, since in REPEATABLE READ and + COMMITTED transactions, since in REPEATABLE READ and higher isolation levels, transactions use the same snapshot throughout their lifetime. Once a transaction has exported any snapshots, it cannot - be prepared with . + be prepared with . - See for details of how to use an + See for details of how to use an exported snapshot. @@ -18923,25 +19369,25 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); The functions shown - in are for + in are for controlling and interacting with replication features. - See , - , and - + See , + , and + for information about the underlying features. Use of these functions is restricted to superusers. Many of these functions have equivalent commands in the replication - protocol; see . + protocol; see . The functions described in - , - , and - + , + , and + are also relevant for replication. @@ -18961,7 +19407,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_create_physical_replication_slot - pg_create_physical_replication_slot(slot_name name , immediately_reserve boolean, temporary boolean) + pg_create_physical_replication_slot(slot_name name , immediately_reserve boolean, temporary boolean)
(slot_name name, lsn pg_lsn) @@ -18969,13 +19415,13 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); Creates a new physical replication slot named slot_name. The optional second parameter, - when true, specifies that the LSN for this + when true, specifies that the LSN for this replication slot be reserved immediately; otherwise - the LSN is reserved on first connection from a streaming + the LSN is reserved on first connection from a streaming replication client. Streaming changes from a physical slot is only possible with the streaming-replication protocol — - see . The optional third - parameter, temporary, when set to true, specifies that + see . The optional third + parameter, temporary, when set to true, specifies that the slot should not be permanently stored to disk and is only meant for use by current session. Temporary slots are also released upon any error. This function corresponds @@ -18996,7 +19442,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); Drops the physical or logical replication slot named slot_name. Same as replication protocol - command DROP_REPLICATION_SLOT. For logical slots, this must + command DROP_REPLICATION_SLOT. For logical slots, this must be called when connected to the same database the slot was created on.
@@ -19006,7 +19452,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_create_logical_replication_slot - pg_create_logical_replication_slot(slot_name name, plugin name , temporary boolean) + pg_create_logical_replication_slot(slot_name name, plugin name , temporary boolean) (slot_name name, lsn pg_lsn) @@ -19015,7 +19461,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); Creates a new logical (decoding) replication slot named slot_name using the output plugin plugin. The optional third - parameter, temporary, when set to true, specifies that + parameter, temporary, when set to true, specifies that the slot should not be permanently stored to disk and is only meant for use by current session. Temporary slots are also released upon any error. A call to this function has the same @@ -19037,9 +19483,9 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); Returns changes in the slot slot_name, starting from the point at which since changes have been consumed last. If - upto_lsn and upto_nchanges are NULL, + upto_lsn and upto_nchanges are NULL, logical decoding will continue until end of WAL. If - upto_lsn is non-NULL, decoding will include only + upto_lsn is non-NULL, decoding will include only those transactions which commit prior to the specified LSN. If upto_nchanges is non-NULL, decoding will stop when the number of rows produced by decoding exceeds @@ -19103,6 +19549,25 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); + + + + pg_replication_slot_advance + + pg_replication_slot_advance(slot_name name, upto_lsn pg_lsn) + + + (slot_name name, end_lsn pg_lsn) + bool + + + Advances the current confirmed position of a replication slot named + slot_name. The slot will not be moved backwards, + and it will not be moved beyond the current insert location. Returns + name of the slot and real position to which it was advanced to. + + + @@ -19127,7 +19592,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_replication_origin_drop(node_name text) - void + void Delete a previously created replication origin, including any @@ -19159,7 +19624,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_replication_origin_session_setup(node_name text) - void + void Mark the current session as replaying from the given @@ -19177,7 +19642,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_replication_origin_session_reset() - void + void Cancel the effects @@ -19226,7 +19691,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_replication_origin_xact_setup(origin_lsn pg_lsn, origin_timestamp timestamptz) - void + void Mark the current transaction as replaying a transaction that has @@ -19245,7 +19710,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_replication_origin_xact_reset() - void + void Cancel the effects of @@ -19261,7 +19726,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_replication_origin_advance(node_name text, lsn pg_lsn) - void + void Set replication progress for the given node to the given @@ -19342,7 +19807,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); Database Object Management Functions - The functions shown in calculate + The functions shown in calculate the disk space usage of database objects. @@ -19418,7 +19883,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); bigint Disk space used by the specified fork ('main', - 'fsm', 'vm', or 'init') + 'fsm', 'vm', or 'init') of the specified table or index @@ -19491,7 +19956,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); bigint Total disk space used by the specified table, - including all indexes and TOAST data + including all indexes and TOAST data @@ -19499,48 +19964,48 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - pg_column_size shows the space used to store any individual + pg_column_size shows the space used to store any individual data value. - pg_total_relation_size accepts the OID or name of a + pg_total_relation_size accepts the OID or name of a table or toast table, and returns the total on-disk space used for that table, including all associated indexes. This function is equivalent to pg_table_size - + pg_indexes_size. + + pg_indexes_size. - pg_table_size accepts the OID or name of a table and + pg_table_size accepts the OID or name of a table and returns the disk space needed for that table, exclusive of indexes. (TOAST space, free space map, and visibility map are included.) - pg_indexes_size accepts the OID or name of a table and + pg_indexes_size accepts the OID or name of a table and returns the total disk space used by all the indexes attached to that table. - pg_database_size and pg_tablespace_size + pg_database_size and pg_tablespace_size accept the OID or name of a database or tablespace, and return the total disk space used therein. To use pg_database_size, - you must have CONNECT permission on the specified database - (which is granted by default), or be a member of the pg_read_all_stats - role. To use pg_tablespace_size, you must have - CREATE permission on the specified tablespace, or be a member - of the pg_read_all_stats role unless it is the default tablespace for + you must have CONNECT permission on the specified database + (which is granted by default), or be a member of the pg_read_all_stats + role. To use pg_tablespace_size, you must have + CREATE permission on the specified tablespace, or be a member + of the pg_read_all_stats role unless it is the default tablespace for the current database. - pg_relation_size accepts the OID or name of a table, index + pg_relation_size accepts the OID or name of a table, index or toast table, and returns the on-disk size in bytes of one fork of that relation. (Note that for most purposes it is more convenient to - use the higher-level functions pg_total_relation_size - or pg_table_size, which sum the sizes of all forks.) + use the higher-level functions pg_total_relation_size + or pg_table_size, which sum the sizes of all forks.) With one argument, it returns the size of the main data fork of the relation. The second argument can be provided to specify which fork to examine: @@ -19554,13 +20019,13 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); 'fsm' returns the size of the Free Space Map - (see ) associated with the relation. + (see ) associated with the relation. 'vm' returns the size of the Visibility Map - (see ) associated with the relation. + (see ) associated with the relation. @@ -19573,13 +20038,13 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - pg_size_pretty can be used to format the result of one of + pg_size_pretty can be used to format the result of one of the other functions in a human-readable way, using bytes, kB, MB, GB or TB as appropriate. - pg_size_bytes can be used to get the size in bytes from a + pg_size_bytes can be used to get the size in bytes from a string in human-readable format. The input may have units of bytes, kB, MB, GB or TB, and is parsed case-insensitively. If no units are specified, bytes are assumed. @@ -19588,17 +20053,17 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); The units kB, MB, GB and TB used by the functions - pg_size_pretty and pg_size_bytes are defined + pg_size_pretty and pg_size_bytes are defined using powers of 2 rather than powers of 10, so 1kB is 1024 bytes, 1MB is - 10242 = 1048576 bytes, and so on. + 10242 = 1048576 bytes, and so on. The functions above that operate on tables or indexes accept a - regclass argument, which is simply the OID of the table or index - in the pg_class system catalog. You do not have to look up - the OID by hand, however, since the regclass data type's input + regclass argument, which is simply the OID of the table or index + in the pg_class system catalog. You do not have to look up + the OID by hand, however, since the regclass data type's input converter will do the work for you. Just write the table name enclosed in single quotes so that it looks like a literal constant. For compatibility with the handling of ordinary SQL names, the string @@ -19612,7 +20077,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - The functions shown in assist + The functions shown in assist in identifying the specific disk files associated with database objects. @@ -19667,33 +20132,33 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - pg_relation_filenode accepts the OID or name of a table, - index, sequence, or toast table, and returns the filenode number + pg_relation_filenode accepts the OID or name of a table, + index, sequence, or toast table, and returns the filenode number currently assigned to it. The filenode is the base component of the file - name(s) used for the relation (see + name(s) used for the relation (see for more information). For most tables the result is the same as - pg_class.relfilenode, but for certain - system catalogs relfilenode is zero and this function must + pg_class.relfilenode, but for certain + system catalogs relfilenode is zero and this function must be used to get the correct value. The function returns NULL if passed a relation that does not have storage, such as a view. - pg_relation_filepath is similar to - pg_relation_filenode, but it returns the entire file path name - (relative to the database cluster's data directory PGDATA) of + pg_relation_filepath is similar to + pg_relation_filenode, but it returns the entire file path name + (relative to the database cluster's data directory PGDATA) of the relation. - pg_filenode_relation is the reverse of - pg_relation_filenode. Given a tablespace OID and - a filenode, it returns the associated relation's OID. For a table + pg_filenode_relation is the reverse of + pg_relation_filenode. Given a tablespace OID and + a filenode, it returns the associated relation's OID. For a table in the database's default tablespace, the tablespace can be specified as 0. - lists functions used to manage + lists functions used to manage collations. @@ -19708,7 +20173,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_collation_actual_version - pg_collation_actual_version(oid) + pg_collation_actual_version(oid) text Return actual version of collation from operating system @@ -19716,7 +20181,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_import_system_collations - pg_import_system_collations(schema regnamespace) + pg_import_system_collations(schema regnamespace) integer Import operating system collations @@ -19731,15 +20196,15 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); operating system. If this is different from the value in pg_collation.collversion, then objects depending on the collation might need to be rebuilt. See also - . + . - pg_import_system_collations adds collations to the system + pg_import_system_collations adds collations to the system catalog pg_collation based on all the locales it finds in the operating system. This is what initdb uses; - see for more details. If additional + see for more details. If additional locales are installed into the operating system later on, this function can be run again to add collations for the new locales. Locales that match existing entries in pg_collation will be skipped. @@ -19751,6 +20216,49 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); The function returns the number of new collation objects it created. + + Partitioning Information Functions + + + Name Return Type Description + + + + + pg_partition_tree(regclass) + setof record + + List information about tables or indexes in a partition tree for a + given partitioned table or partitioned index, with one row for each + partition. Information provided includes the name of the partition, + the name of its immediate parent, a boolean value telling if the + partition is a leaf, and an integer telling its level in the hierarchy. + The value of level begins at 0 for the input table + or index in its role as the root of the partition tree, + 1 for its partitions, 2 for + their partitions, and so on. + + + + +
+ + + To check the total size of the data contained in + measurement table described in + , one could use the + following query: + + + +=# SELECT pg_size_pretty(sum(pg_relation_size(relid))) AS total_size + FROM pg_partition_tree('measurement'); + total_size +------------ + 24 kB +(1 row) + + @@ -19773,7 +20281,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - shows the functions + shows the functions available for index maintenance tasks. These functions cannot be executed during recovery. Use of these functions is restricted to superusers and the owner @@ -19790,28 +20298,28 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - brin_summarize_new_values(index regclass) + brin_summarize_new_values(index regclass) integer summarize page ranges not already summarized - brin_summarize_range(index regclass, blockNumber bigint) + brin_summarize_range(index regclass, blockNumber bigint) integer summarize the page range covering the given block, if not already summarized - brin_desummarize_range(index regclass, blockNumber bigint) + brin_desummarize_range(index regclass, blockNumber bigint) integer de-summarize the page range covering the given block, if summarized - gin_clean_pending_list(index regclass) + gin_clean_pending_list(index regclass) bigint move GIN pending list entries into main index structure @@ -19821,25 +20329,25 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - brin_summarize_new_values accepts the OID or name of a + brin_summarize_new_values accepts the OID or name of a BRIN index and inspects the index to find page ranges in the base table that are not currently summarized by the index; for any such range it creates a new summary index tuple by scanning the table pages. It returns the number of new page range summaries that were inserted - into the index. brin_summarize_range does the same, except + into the index. brin_summarize_range does the same, except it only summarizes the range that covers the given block number. - gin_clean_pending_list accepts the OID or name of + gin_clean_pending_list accepts the OID or name of a GIN index and cleans up the pending list of the specified index by moving entries in it to the main GIN data structure in bulk. It returns the number of pages removed from the pending list. Note that if the argument is a GIN index built with - the fastupdate option disabled, no cleanup happens and the + the fastupdate option disabled, no cleanup happens and the return value is 0, because the index doesn't have a pending list. - Please see and - for details of the pending list and fastupdate option. + Please see and + for details of the pending list and fastupdate option. @@ -19849,13 +20357,24 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); The functions shown in provide native access to + linkend="functions-admin-genfile-table"/> provide native access to files on the machine hosting the server. Only files within the - database cluster directory and the log_directory can be - accessed. Use a relative path for files in the cluster directory, - and a path matching the log_directory configuration setting - for log files. Use of these functions is restricted to superusers - except where stated otherwise. + database cluster directory and the log_directory can be + accessed unless the user is granted the role + pg_read_server_files. Use a relative path for files in + the cluster directory, and a path matching the log_directory + configuration setting for log files. + + + + Note that granting users the EXECUTE privilege on the + pg_read_file(), or related, functions allows them the + ability to read any file on the server which the database can read and + that those reads bypass all in-database privilege checks. This means that, + among other things, a user with this access is able to read the contents of the + pg_authid table where authentication information is contained, + as well as read any file in the database. Therefore, granting access to these + functions should be carefully considered. @@ -19869,11 +20388,11 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - pg_ls_dir(dirname text [, missing_ok boolean, include_dot_dirs boolean]) + pg_ls_dir(dirname text [, missing_ok boolean, include_dot_dirs boolean]) setof text - List the contents of a directory. + List the contents of a directory. Restricted to superusers by default, but other users can be granted EXECUTE to run the function. @@ -19883,7 +20402,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); setof record List the name, size, and last modification time of files in the log - directory. Access is granted to members of the pg_monitor + directory. Access is granted to members of the pg_monitor role and may be granted to other non-superuser roles. @@ -19894,35 +20413,61 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); setof record List the name, size, and last modification time of files in the WAL - directory. Access is granted to members of the pg_monitor + directory. Access is granted to members of the pg_monitor role and may be granted to other non-superuser roles. - pg_read_file(filename text [, offset bigint, length bigint [, missing_ok boolean] ]) + pg_ls_archive_statusdir() + + setof record + + List the name, size, and last modification time of files in the WAL + archive status directory. Access is granted to members of the + pg_monitor role and may be granted to other + non-superuser roles. + + + + + pg_ls_tmpdir(tablespace oid) + + setof record + + List the name, size, and last modification time of files in the + temporary directory for tablespace. If + tablespace is not provided, the + pg_default tablespace is used. Access is granted + to members of the pg_monitor role and may be + granted to other non-superuser roles. + + + + + pg_read_file(filename text [, offset bigint, length bigint [, missing_ok boolean] ]) text - Return the contents of a text file. + Return the contents of a text file. Restricted to superusers by default, but other users can be granted EXECUTE to run the function. - pg_read_binary_file(filename text [, offset bigint, length bigint [, missing_ok boolean] ]) + pg_read_binary_file(filename text [, offset bigint, length bigint [, missing_ok boolean] ]) bytea - Return the contents of a file. + Return the contents of a file. Restricted to superusers by default, but other users can be granted EXECUTE to run the function. - pg_stat_file(filename text[, missing_ok boolean]) + pg_stat_file(filename text[, missing_ok boolean]) record - Return information about a file. + Return information about a file. Restricted to superusers by default, but other users can be granted EXECUTE to run the function. @@ -19930,23 +20475,23 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup());
- Some of these functions take an optional missing_ok parameter, + Some of these functions take an optional missing_ok parameter, which specifies the behavior when the file or directory does not exist. If true, the function returns NULL (except - pg_ls_dir, which returns an empty result set). If - false, an error is raised. The default is false. + pg_ls_dir, which returns an empty result set). If + false, an error is raised. The default is false. pg_ls_dir - pg_ls_dir returns the names of all files (and directories + pg_ls_dir returns the names of all files (and directories and other special files) in the specified directory. The - include_dot_dirs indicates whether . and .. are + include_dot_dirs indicates whether . and .. are included in the result set. The default is to exclude them - (false), but including them can be useful when - missing_ok is true, to distinguish an + (false), but including them can be useful when + missing_ok is true, to distinguish an empty directory from an non-existent directory. @@ -19954,9 +20499,9 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_ls_logdir - pg_ls_logdir returns the name, size, and last modified time + pg_ls_logdir returns the name, size, and last modified time (mtime) of each file in the log directory. By default, only superusers - and members of the pg_monitor role can use this function. + and members of the pg_monitor role can use this function. Access may be granted to others using GRANT. @@ -19964,22 +20509,47 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_ls_waldir - pg_ls_waldir returns the name, size, and last modified time + pg_ls_waldir returns the name, size, and last modified time (mtime) of each file in the write ahead log (WAL) directory. By - default only superusers and members of the pg_monitor role + default only superusers and members of the pg_monitor role can use this function. Access may be granted to others using GRANT. + + pg_ls_archive_statusdir + + + pg_ls_archive_statusdir returns the name, size, and + last modified time (mtime) of each file in the WAL archive status + directory pg_wal/archive_status. By default only + superusers and members of the pg_monitor role can + use this function. Access may be granted to others using + GRANT. + + + + pg_ls_tmpdir + + + pg_ls_tmpdir returns the name, size, and last modified + time (mtime) of each file in the temporary file directory for the specified + tablespace. If tablespace is + not provided, the pg_default tablespace is used. By + default only superusers and members of the pg_monitor + role can use this function. Access may be granted to others using + GRANT. + + pg_read_file - pg_read_file returns part of a text file, starting - at the given offset, returning at most length - bytes (less if the end of file is reached first). If offset + pg_read_file returns part of a text file, starting + at the given offset, returning at most length + bytes (less if the end of file is reached first). If offset is negative, it is relative to the end of the file. - If offset and length are omitted, the entire + If offset and length are omitted, the entire file is returned. The bytes read from the file are interpreted as a string in the server encoding; an error is thrown if they are not valid in that encoding. @@ -19989,10 +20559,10 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_read_binary_file - pg_read_binary_file is similar to - pg_read_file, except that the result is a bytea value; + pg_read_binary_file is similar to + pg_read_file, except that the result is a bytea value; accordingly, no encoding checks are performed. - In combination with the convert_from function, this function + In combination with the convert_from function, this function can be used to read a file in a specified encoding: SELECT convert_from(pg_read_binary_file('file_in_utf8.txt'), 'UTF8'); @@ -20003,7 +20573,7 @@ SELECT convert_from(pg_read_binary_file('file_in_utf8.txt'), 'UTF8'); pg_stat_file - pg_stat_file returns a record containing the file + pg_stat_file returns a record containing the file size, last accessed time stamp, last modified time stamp, last file status change time stamp (Unix platforms only), file creation time stamp (Windows only), and a boolean @@ -20020,9 +20590,9 @@ SELECT (pg_stat_file('filename')).modification; Advisory Lock Functions - The functions shown in + The functions shown in manage advisory locks. For details about proper use of these functions, - see . + see . @@ -20036,42 +20606,42 @@ SELECT (pg_stat_file('filename')).modification; - pg_advisory_lock(key bigint) + pg_advisory_lock(key bigint) void Obtain exclusive session level advisory lock - pg_advisory_lock(key1 int, key2 int) + pg_advisory_lock(key1 int, key2 int) void Obtain exclusive session level advisory lock - pg_advisory_lock_shared(key bigint) + pg_advisory_lock_shared(key bigint) void Obtain shared session level advisory lock - pg_advisory_lock_shared(key1 int, key2 int) + pg_advisory_lock_shared(key1 int, key2 int) void Obtain shared session level advisory lock - pg_advisory_unlock(key bigint) + pg_advisory_unlock(key bigint) boolean Release an exclusive session level advisory lock - pg_advisory_unlock(key1 int, key2 int) + pg_advisory_unlock(key1 int, key2 int) boolean Release an exclusive session level advisory lock @@ -20085,98 +20655,98 @@ SELECT (pg_stat_file('filename')).modification; - pg_advisory_unlock_shared(key bigint) + pg_advisory_unlock_shared(key bigint) boolean Release a shared session level advisory lock - pg_advisory_unlock_shared(key1 int, key2 int) + pg_advisory_unlock_shared(key1 int, key2 int) boolean Release a shared session level advisory lock - pg_advisory_xact_lock(key bigint) + pg_advisory_xact_lock(key bigint) void Obtain exclusive transaction level advisory lock - pg_advisory_xact_lock(key1 int, key2 int) + pg_advisory_xact_lock(key1 int, key2 int) void Obtain exclusive transaction level advisory lock - pg_advisory_xact_lock_shared(key bigint) + pg_advisory_xact_lock_shared(key bigint) void Obtain shared transaction level advisory lock - pg_advisory_xact_lock_shared(key1 int, key2 int) + pg_advisory_xact_lock_shared(key1 int, key2 int) void Obtain shared transaction level advisory lock - pg_try_advisory_lock(key bigint) + pg_try_advisory_lock(key bigint) boolean Obtain exclusive session level advisory lock if available - pg_try_advisory_lock(key1 int, key2 int) + pg_try_advisory_lock(key1 int, key2 int) boolean Obtain exclusive session level advisory lock if available - pg_try_advisory_lock_shared(key bigint) + pg_try_advisory_lock_shared(key bigint) boolean Obtain shared session level advisory lock if available - pg_try_advisory_lock_shared(key1 int, key2 int) + pg_try_advisory_lock_shared(key1 int, key2 int) boolean Obtain shared session level advisory lock if available - pg_try_advisory_xact_lock(key bigint) + pg_try_advisory_xact_lock(key bigint) boolean Obtain exclusive transaction level advisory lock if available - pg_try_advisory_xact_lock(key1 int, key2 int) + pg_try_advisory_xact_lock(key1 int, key2 int) boolean Obtain exclusive transaction level advisory lock if available - pg_try_advisory_xact_lock_shared(key bigint) + pg_try_advisory_xact_lock_shared(key bigint) boolean Obtain shared transaction level advisory lock if available - pg_try_advisory_xact_lock_shared(key1 int, key2 int) + pg_try_advisory_xact_lock_shared(key1 int, key2 int) boolean Obtain shared transaction level advisory lock if available @@ -20189,7 +20759,7 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_lock - pg_advisory_lock locks an application-defined resource, + pg_advisory_lock locks an application-defined resource, which can be identified either by a single 64-bit key value or two 32-bit key values (note that these two key spaces do not overlap). If another session already holds a lock on the same resource identifier, @@ -20203,8 +20773,8 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_lock_shared - pg_advisory_lock_shared works the same as - pg_advisory_lock, + pg_advisory_lock_shared works the same as + pg_advisory_lock, except the lock can be shared with other sessions requesting shared locks. Only would-be exclusive lockers are locked out. @@ -20213,10 +20783,10 @@ SELECT (pg_stat_file('filename')).modification; pg_try_advisory_lock - pg_try_advisory_lock is similar to - pg_advisory_lock, except the function will not wait for the + pg_try_advisory_lock is similar to + pg_advisory_lock, except the function will not wait for the lock to become available. It will either obtain the lock immediately and - return true, or return false if the lock cannot be + return true, or return false if the lock cannot be acquired immediately. @@ -20224,8 +20794,8 @@ SELECT (pg_stat_file('filename')).modification; pg_try_advisory_lock_shared - pg_try_advisory_lock_shared works the same as - pg_try_advisory_lock, except it attempts to acquire + pg_try_advisory_lock_shared works the same as + pg_try_advisory_lock, except it attempts to acquire a shared rather than an exclusive lock. @@ -20233,10 +20803,10 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_unlock - pg_advisory_unlock will release a previously-acquired + pg_advisory_unlock will release a previously-acquired exclusive session level advisory lock. It - returns true if the lock is successfully released. - If the lock was not held, it will return false, + returns true if the lock is successfully released. + If the lock was not held, it will return false, and in addition, an SQL warning will be reported by the server. @@ -20244,8 +20814,8 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_unlock_shared - pg_advisory_unlock_shared works the same as - pg_advisory_unlock, + pg_advisory_unlock_shared works the same as + pg_advisory_unlock, except it releases a shared session level advisory lock. @@ -20253,7 +20823,7 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_unlock_all - pg_advisory_unlock_all will release all session level advisory + pg_advisory_unlock_all will release all session level advisory locks held by the current session. (This function is implicitly invoked at session end, even if the client disconnects ungracefully.) @@ -20262,8 +20832,8 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_xact_lock - pg_advisory_xact_lock works the same as - pg_advisory_lock, except the lock is automatically released + pg_advisory_xact_lock works the same as + pg_advisory_lock, except the lock is automatically released at the end of the current transaction and cannot be released explicitly. @@ -20271,8 +20841,8 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_xact_lock_shared - pg_advisory_xact_lock_shared works the same as - pg_advisory_lock_shared, except the lock is automatically released + pg_advisory_xact_lock_shared works the same as + pg_advisory_lock_shared, except the lock is automatically released at the end of the current transaction and cannot be released explicitly. @@ -20280,8 +20850,8 @@ SELECT (pg_stat_file('filename')).modification; pg_try_advisory_xact_lock - pg_try_advisory_xact_lock works the same as - pg_try_advisory_lock, except the lock, if acquired, + pg_try_advisory_xact_lock works the same as + pg_try_advisory_lock, except the lock, if acquired, is automatically released at the end of the current transaction and cannot be released explicitly. @@ -20290,8 +20860,8 @@ SELECT (pg_stat_file('filename')).modification; pg_try_advisory_xact_lock_shared - pg_try_advisory_xact_lock_shared works the same as - pg_try_advisory_lock_shared, except the lock, if acquired, + pg_try_advisory_xact_lock_shared works the same as + pg_try_advisory_lock_shared, except the lock, if acquired, is automatically released at the end of the current transaction and cannot be released explicitly. @@ -20308,8 +20878,8 @@ SELECT (pg_stat_file('filename')).modification; - Currently PostgreSQL provides one built in trigger - function, suppress_redundant_updates_trigger, + Currently PostgreSQL provides one built in trigger + function, suppress_redundant_updates_trigger, which will prevent any update that does not actually change the data in the row from taking place, in contrast to the normal behavior which always performs the update @@ -20326,7 +20896,7 @@ SELECT (pg_stat_file('filename')).modification; However, detecting such situations in client code is not always easy, or even possible, and writing expressions to detect them can be error-prone. An alternative is to use - suppress_redundant_updates_trigger, which will skip + suppress_redundant_updates_trigger, which will skip updates that don't change the data. You should use this with care, however. The trigger takes a small but non-trivial time for each record, so if most of the records affected by an update are actually changed, @@ -20334,12 +20904,12 @@ SELECT (pg_stat_file('filename')).modification; - The suppress_redundant_updates_trigger function can be + The suppress_redundant_updates_trigger function can be added to a table like this: CREATE TRIGGER z_min_update BEFORE UPDATE ON tablename -FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); +FOR EACH ROW EXECUTE FUNCTION suppress_redundant_updates_trigger(); In most cases, you would want to fire this trigger last for each row. Bearing in mind that triggers fire in name order, you would then @@ -20348,7 +20918,7 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); For more information about creating triggers, see - . + . @@ -20356,13 +20926,13 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); Event Trigger Functions - PostgreSQL provides these helper functions + PostgreSQL provides these helper functions to retrieve information from event triggers. For more information about event triggers, - see . + see . @@ -20373,12 +20943,12 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); - pg_event_trigger_ddl_commands returns a list of + pg_event_trigger_ddl_commands returns a list of DDL commands executed by each user action, when invoked in a function attached to a - ddl_command_end event trigger. If called in any other + ddl_command_end event trigger. If called in any other context, an error is raised. - pg_event_trigger_ddl_commands returns one row for each + pg_event_trigger_ddl_commands returns one row for each base command executed; some commands that are a single SQL sentence may return more than one row. This function returns the following columns: @@ -20396,23 +20966,23 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); classid - Oid + oid OID of catalog the object belongs in objid - Oid - OID of the object in the catalog + oid + OID of the object itself objsubid integer - Object sub-id (e.g. attribute number for columns) + Sub-object ID (e.g. attribute number for a column) command_tag text - command tag + Command tag object_type @@ -20423,7 +20993,7 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); schema_name text - Name of the schema the object belongs in, if any; otherwise NULL. + Name of the schema the object belongs in, if any; otherwise NULL. No quoting is applied. @@ -20431,14 +21001,14 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); object_identitytext - Text rendering of the object identity, schema-qualified. Each and every - identifier present in the identity is quoted if necessary. + Text rendering of the object identity, schema-qualified. Each + identifier included in the identity is quoted if necessary. in_extension bool - whether the command is part of an extension script + True if the command is part of an extension script command @@ -20464,11 +21034,11 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); - pg_event_trigger_dropped_objects returns a list of all objects - dropped by the command in whose sql_drop event it is called. + pg_event_trigger_dropped_objects returns a list of all objects + dropped by the command in whose sql_drop event it is called. If called in any other context, - pg_event_trigger_dropped_objects raises an error. - pg_event_trigger_dropped_objects returns the following columns: + pg_event_trigger_dropped_objects raises an error. + pg_event_trigger_dropped_objects returns the following columns: @@ -20483,29 +21053,29 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); classid - Oid + oid OID of catalog the object belonged in objid - Oid - OID the object had within the catalog + oid + OID of the object itself objsubid - int32 - Object sub-id (e.g. attribute number for columns) + integer + Sub-object ID (e.g. attribute number for a column) original bool - Flag used to identify the root object(s) of the deletion + True if this was one of the root object(s) of the deletion normal bool - Flag indicating that there's a normal dependency relationship + True if there was a normal dependency relationship in the dependency graph leading to this object @@ -20513,7 +21083,7 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); is_temporarybool - Flag indicating that the object was a temporary object. + True if this was a temporary object @@ -20525,7 +21095,7 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); schema_name text - Name of the schema the object belonged in, if any; otherwise NULL. + Name of the schema the object belonged in, if any; otherwise NULL. No quoting is applied. @@ -20534,7 +21104,7 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); text Name of the object, if the combination of schema and name can be - used as a unique identifier for the object; otherwise NULL. + used as a unique identifier for the object; otherwise NULL. No quoting is applied, and name is never schema-qualified. @@ -20542,8 +21112,8 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); object_identitytext - Text rendering of the object identity, schema-qualified. Each and every - identifier present in the identity is quoted if necessary. + Text rendering of the object identity, schema-qualified. Each + identifier included in the identity is quoted if necessary. @@ -20551,17 +21121,17 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); text[] An array that, together with object_type and - address_args, - can be used by the pg_get_object_address() to + address_args, can be used by + the pg_get_object_address() function to recreate the object address in a remote server containing an - identically named object of the same kind. + identically named object of the same kind address_args text[] - Complement for address_names above. + Complement for address_names @@ -20570,7 +21140,7 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); - The pg_event_trigger_dropped_objects function can be used + The pg_event_trigger_dropped_objects function can be used in an event trigger like this: CREATE FUNCTION test_event_trigger_for_drops() @@ -20591,7 +21161,7 @@ END $$; CREATE EVENT TRIGGER test_event_trigger_for_drops ON sql_drop - EXECUTE PROCEDURE test_event_trigger_for_drops(); + EXECUTE FUNCTION test_event_trigger_for_drops(); @@ -20601,9 +21171,9 @@ CREATE EVENT TRIGGER test_event_trigger_for_drops The functions shown in - + provide information about a table for which a - table_rewrite event has just been called. + table_rewrite event has just been called. If called in any other context, an error is raised. @@ -20640,7 +21210,7 @@ CREATE EVENT TRIGGER test_event_trigger_for_drops
- The pg_event_trigger_table_rewrite_oid function can be used + The pg_event_trigger_table_rewrite_oid function can be used in an event trigger like this: CREATE FUNCTION test_event_trigger_table_rewrite_oid() @@ -20656,7 +21226,7 @@ $$; CREATE EVENT TRIGGER test_table_rewrite_oid ON table_rewrite - EXECUTE PROCEDURE test_event_trigger_table_rewrite_oid(); + EXECUTE FUNCTION test_event_trigger_table_rewrite_oid(); diff --git a/doc/src/sgml/fuzzystrmatch.sgml b/doc/src/sgml/fuzzystrmatch.sgml index feb06861da..373ac4891d 100644 --- a/doc/src/sgml/fuzzystrmatch.sgml +++ b/doc/src/sgml/fuzzystrmatch.sgml @@ -8,14 +8,14 @@ - The fuzzystrmatch module provides several + The fuzzystrmatch module provides several functions to determine similarities and distance between strings. - At present, the soundex, metaphone, - dmetaphone, and dmetaphone_alt functions do + At present, the soundex, metaphone, + dmetaphone, and dmetaphone_alt functions do not work well with multibyte encodings (such as UTF-8). @@ -31,7 +31,7 @@
- The fuzzystrmatch module provides two functions + The fuzzystrmatch module provides two functions for working with Soundex codes: @@ -49,12 +49,12 @@ difference(text, text) returns int - The soundex function converts a string to its Soundex code. - The difference function converts two strings to their Soundex + The soundex function converts a string to its Soundex code. + The difference function converts two strings to their Soundex codes and then reports the number of matching code positions. Since Soundex codes have four characters, the result ranges from zero to four, with zero being no match and four being an exact match. (Thus, the - function is misnamed — similarity would have been + function is misnamed — similarity would have been a better name.) @@ -115,10 +115,10 @@ levenshtein_less_equal(text source, text target, int max_d) returns int levenshtein_less_equal is an accelerated version of the Levenshtein function for use when only small distances are of interest. - If the actual distance is less than or equal to max_d, + If the actual distance is less than or equal to max_d, then levenshtein_less_equal returns the correct - distance; otherwise it returns some value greater than max_d. - If max_d is negative then the behavior is the same as + distance; otherwise it returns some value greater than max_d. + If max_d is negative then the behavior is the same as levenshtein. @@ -133,19 +133,19 @@ test=# SELECT levenshtein('GUMBO', 'GAMBOL'); 2 (1 row) -test=# SELECT levenshtein('GUMBO', 'GAMBOL', 2,1,1); +test=# SELECT levenshtein('GUMBO', 'GAMBOL', 2, 1, 1); levenshtein ------------- 3 (1 row) -test=# SELECT levenshtein_less_equal('extensive', 'exhaustive',2); +test=# SELECT levenshtein_less_equal('extensive', 'exhaustive', 2); levenshtein_less_equal ------------------------ 3 (1 row) -test=# SELECT levenshtein_less_equal('extensive', 'exhaustive',4); +test=# SELECT levenshtein_less_equal('extensive', 'exhaustive', 4); levenshtein_less_equal ------------------------ 4 @@ -198,9 +198,9 @@ test=# SELECT metaphone('GUMBO', 4); Double Metaphone - The Double Metaphone system computes two sounds like strings - for a given input string — a primary and an - alternate. In most cases they are the same, but for non-English + The Double Metaphone system computes two sounds like strings + for a given input string — a primary and an + alternate. In most cases they are the same, but for non-English names especially they can be a bit different, depending on pronunciation. These functions compute the primary and alternate codes: @@ -227,7 +227,7 @@ dmetaphone_alt(text source) returns text
-test=# select dmetaphone('gumbo'); +test=# SELECT dmetaphone('gumbo'); dmetaphone ------------ KMP diff --git a/doc/src/sgml/generate-errcodes-table.pl b/doc/src/sgml/generate-errcodes-table.pl index 01fc6166bf..ebec43159e 100644 --- a/doc/src/sgml/generate-errcodes-table.pl +++ b/doc/src/sgml/generate-errcodes-table.pl @@ -1,7 +1,7 @@ #!/usr/bin/perl # # Generate the errcodes-table.sgml file from errcodes.txt -# Copyright (c) 2000-2017, PostgreSQL Global Development Group +# Copyright (c) 2000-2018, PostgreSQL Global Development Group use warnings; use strict; @@ -30,12 +30,12 @@ s/-/—/; # Wrap PostgreSQL in - s/PostgreSQL/PostgreSQL<\/>/g; + s/PostgreSQL/PostgreSQL<\/productname>/g; print "\n\n"; print "\n"; print ""; - print "$_\n"; + print "$_
\n"; print "\n"; next; diff --git a/doc/src/sgml/generic-wal.sgml b/doc/src/sgml/generic-wal.sgml index dfa78c5ca2..7a0284994c 100644 --- a/doc/src/sgml/generic-wal.sgml +++ b/doc/src/sgml/generic-wal.sgml @@ -13,8 +13,8 @@ The API for constructing generic WAL records is defined in - access/generic_xlog.h and implemented - in access/transam/generic_xlog.c. + access/generic_xlog.h and implemented + in access/transam/generic_xlog.c. @@ -24,24 +24,24 @@ - state = GenericXLogStart(relation) — start + state = GenericXLogStart(relation) — start construction of a generic WAL record for the given relation. - page = GenericXLogRegisterBuffer(state, buffer, flags) + page = GenericXLogRegisterBuffer(state, buffer, flags) — register a buffer to be modified within the current generic WAL record. This function returns a pointer to a temporary copy of the buffer's page, where modifications should be made. (Do not modify the buffer's contents directly.) The third argument is a bit mask of flags applicable to the operation. Currently the only such flag is - GENERIC_XLOG_FULL_IMAGE, which indicates that a full-page + GENERIC_XLOG_FULL_IMAGE, which indicates that a full-page image rather than a delta update should be included in the WAL record. Typically this flag would be set if the page is new or has been rewritten completely. - GenericXLogRegisterBuffer can be repeated if the + GenericXLogRegisterBuffer can be repeated if the WAL-logged action needs to modify multiple pages. @@ -54,7 +54,7 @@ - GenericXLogFinish(state) — apply the changes to + GenericXLogFinish(state) — apply the changes to the buffers and emit the generic WAL record. @@ -63,7 +63,7 @@ WAL record construction can be canceled between any of the above steps by - calling GenericXLogAbort(state). This will discard all + calling GenericXLogAbort(state). This will discard all changes to the page image copies. @@ -75,13 +75,13 @@ No direct modifications of buffers are allowed! All modifications must - be done in copies acquired from GenericXLogRegisterBuffer(). + be done in copies acquired from GenericXLogRegisterBuffer(). In other words, code that makes generic WAL records should never call - BufferGetPage() for itself. However, it remains the + BufferGetPage() for itself. However, it remains the caller's responsibility to pin/unpin and lock/unlock the buffers at appropriate times. Exclusive lock must be held on each target buffer - from before GenericXLogRegisterBuffer() until after - GenericXLogFinish(). + from before GenericXLogRegisterBuffer() until after + GenericXLogFinish(). @@ -97,7 +97,7 @@ The maximum number of buffers that can be registered for a generic WAL - record is MAX_GENERIC_XLOG_PAGES. An error will be thrown + record is MAX_GENERIC_XLOG_PAGES. An error will be thrown if this limit is exceeded. @@ -106,26 +106,26 @@ Generic WAL assumes that the pages to be modified have standard layout, and in particular that there is no useful data between - pd_lower and pd_upper. + pd_lower and pd_upper. Since you are modifying copies of buffer - pages, GenericXLogStart() does not start a critical + pages, GenericXLogStart() does not start a critical section. Thus, you can safely do memory allocation, error throwing, - etc. between GenericXLogStart() and - GenericXLogFinish(). The only actual critical section is - present inside GenericXLogFinish(). There is no need to - worry about calling GenericXLogAbort() during an error + etc. between GenericXLogStart() and + GenericXLogFinish(). The only actual critical section is + present inside GenericXLogFinish(). There is no need to + worry about calling GenericXLogAbort() during an error exit, either. - GenericXLogFinish() takes care of marking buffers dirty + GenericXLogFinish() takes care of marking buffers dirty and setting their LSNs. You do not need to do this explicitly. @@ -148,7 +148,7 @@ - If GENERIC_XLOG_FULL_IMAGE is not specified for a + If GENERIC_XLOG_FULL_IMAGE is not specified for a registered buffer, the generic WAL record contains a delta between the old and the new page images. This delta is based on byte-by-byte comparison. This is not very compact for the case of moving data diff --git a/doc/src/sgml/geqo.sgml b/doc/src/sgml/geqo.sgml index e0f8adcd6e..5120dfbb42 100644 --- a/doc/src/sgml/geqo.sgml +++ b/doc/src/sgml/geqo.sgml @@ -88,7 +88,7 @@ - According to the comp.ai.genetic FAQ it cannot be stressed too + According to the comp.ai.genetic FAQ it cannot be stressed too strongly that a GA is not a pure random search for a solution to a problem. A GA uses stochastic processes, but the result is distinctly non-random (better than random). @@ -222,7 +222,7 @@ are considered; and all the initially-determined relation scan plans are available. The estimated cost is the cheapest of these possibilities.) Join sequences with lower estimated cost are considered - more fit than those with higher cost. The genetic algorithm + more fit than those with higher cost. The genetic algorithm discards the least fit candidates. Then new candidates are generated by combining genes of more-fit candidates — that is, by using randomly-chosen portions of known low-cost join sequences to create @@ -235,20 +235,20 @@ This process is inherently nondeterministic, because of the randomized choices made during both the initial population selection and subsequent - mutation of the best candidates. To avoid surprising changes + mutation of the best candidates. To avoid surprising changes of the selected plan, each run of the GEQO algorithm restarts its - random number generator with the current - parameter setting. As long as geqo_seed and the other + random number generator with the current + parameter setting. As long as geqo_seed and the other GEQO parameters are kept fixed, the same plan will be generated for a given query (and other planner inputs such as statistics). To experiment - with different search paths, try changing geqo_seed. + with different search paths, try changing geqo_seed. Future Implementation Tasks for - <productname>PostgreSQL</> <acronym>GEQO</acronym> + PostgreSQL GEQO Work is still needed to improve the genetic algorithm parameter @@ -320,13 +320,13 @@ - + - + diff --git a/doc/src/sgml/gin.sgml b/doc/src/sgml/gin.sgml index 7c2321ec3c..cc7cd1ed2c 100644 --- a/doc/src/sgml/gin.sgml +++ b/doc/src/sgml/gin.sgml @@ -1,6 +1,6 @@ - + GIN Indexes @@ -21,15 +21,15 @@ - We use the word item to refer to a composite value that - is to be indexed, and the word key to refer to an element + We use the word item to refer to a composite value that + is to be indexed, and the word key to refer to an element value. GIN always stores and searches for keys, not item values per se. A GIN index stores a set of (key, posting list) pairs, - where a posting list is a set of row IDs in which the key + where a posting list is a set of row IDs in which the key occurs. The same row ID can appear in multiple posting lists, since an item can contain more than one key. Each key value is stored only once, so a GIN index is very compact for cases @@ -66,10 +66,10 @@ Built-in Operator Classes - The core PostgreSQL distribution + The core PostgreSQL distribution includes the GIN operator classes shown in - . - (Some of the optional modules described in + . + (Some of the optional modules described in provide additional GIN operator classes.) @@ -85,38 +85,38 @@ - array_ops - anyarray + array_ops + anyarray - && - <@ - = - @> + && + <@ + = + @> - jsonb_ops - jsonb + jsonb_ops + jsonb - ? - ?& - ?| - @> + ? + ?& + ?| + @> - jsonb_path_ops - jsonb + jsonb_path_ops + jsonb - @> + @> - tsvector_ops - tsvector + tsvector_ops + tsvector - @@ - @@@ + @@ + @@@ @@ -124,10 +124,10 @@ - Of the two operator classes for type jsonb, jsonb_ops - is the default. jsonb_path_ops supports fewer operators but + Of the two operator classes for type jsonb, jsonb_ops + is the default. jsonb_path_ops supports fewer operators but offers better performance for those operators. - See for details. + See for details. @@ -157,15 +157,15 @@ Datum *extractValue(Datum itemValue, int32 *nkeys, - bool **nullFlags) + bool **nullFlags) Returns a palloc'd array of keys given an item to be indexed. The - number of returned keys must be stored into *nkeys. + number of returned keys must be stored into *nkeys. If any of the keys can be null, also palloc an array of - *nkeys bool fields, store its address at - *nullFlags, and set these null flags as needed. - *nullFlags can be left NULL (its initial value) + *nkeys bool fields, store its address at + *nullFlags, and set these null flags as needed. + *nullFlags can be left NULL (its initial value) if all keys are non-null. The return value can be NULL if the item contains no keys. @@ -175,40 +175,40 @@ Datum *extractQuery(Datum query, int32 *nkeys, StrategyNumber n, bool **pmatch, Pointer **extra_data, - bool **nullFlags, int32 *searchMode) + bool **nullFlags, int32 *searchMode) Returns a palloc'd array of keys given a value to be queried; that is, - query is the value on the right-hand side of an + query is the value on the right-hand side of an indexable operator whose left-hand side is the indexed column. - n is the strategy number of the operator within the - operator class (see ). - Often, extractQuery will need - to consult n to determine the data type of - query and the method it should use to extract key values. - The number of returned keys must be stored into *nkeys. + n is the strategy number of the operator within the + operator class (see ). + Often, extractQuery will need + to consult n to determine the data type of + query and the method it should use to extract key values. + The number of returned keys must be stored into *nkeys. If any of the keys can be null, also palloc an array of - *nkeys bool fields, store its address at - *nullFlags, and set these null flags as needed. - *nullFlags can be left NULL (its initial value) + *nkeys bool fields, store its address at + *nullFlags, and set these null flags as needed. + *nullFlags can be left NULL (its initial value) if all keys are non-null. - The return value can be NULL if the query contains no keys. + The return value can be NULL if the query contains no keys. - searchMode is an output argument that allows - extractQuery to specify details about how the search + searchMode is an output argument that allows + extractQuery to specify details about how the search will be done. - If *searchMode is set to - GIN_SEARCH_MODE_DEFAULT (which is the value it is + If *searchMode is set to + GIN_SEARCH_MODE_DEFAULT (which is the value it is initialized to before call), only items that match at least one of the returned keys are considered candidate matches. - If *searchMode is set to - GIN_SEARCH_MODE_INCLUDE_EMPTY, then in addition to items + If *searchMode is set to + GIN_SEARCH_MODE_INCLUDE_EMPTY, then in addition to items containing at least one matching key, items that contain no keys at all are considered candidate matches. (This mode is useful for implementing is-subset-of operators, for example.) - If *searchMode is set to GIN_SEARCH_MODE_ALL, + If *searchMode is set to GIN_SEARCH_MODE_ALL, then all non-null items in the index are considered candidate matches, whether they match any of the returned keys or not. (This mode is much slower than the other two choices, since it requires @@ -217,33 +217,33 @@ in most cases is probably not a good candidate for a GIN operator class.) The symbols to use for setting this mode are defined in - access/gin.h. + access/gin.h. - pmatch is an output argument for use when partial match - is supported. To use it, extractQuery must allocate - an array of *nkeys booleans and store its address at - *pmatch. Each element of the array should be set to TRUE - if the corresponding key requires partial match, FALSE if not. - If *pmatch is set to NULL then GIN assumes partial match + pmatch is an output argument for use when partial match + is supported. To use it, extractQuery must allocate + an array of *nkeys bools and store its address at + *pmatch. Each element of the array should be set to true + if the corresponding key requires partial match, false if not. + If *pmatch is set to NULL then GIN assumes partial match is not required. The variable is initialized to NULL before call, so this argument can simply be ignored by operator classes that do not support partial match. - extra_data is an output argument that allows - extractQuery to pass additional data to the - consistent and comparePartial methods. - To use it, extractQuery must allocate - an array of *nkeys pointers and store its address at - *extra_data, then store whatever it wants to into the + extra_data is an output argument that allows + extractQuery to pass additional data to the + consistent and comparePartial methods. + To use it, extractQuery must allocate + an array of *nkeys pointers and store its address at + *extra_data, then store whatever it wants to into the individual pointers. The variable is initialized to NULL before call, so this argument can simply be ignored by operator classes that - do not require extra data. If *extra_data is set, the - whole array is passed to the consistent method, and - the appropriate element to the comparePartial method. + do not require extra data. If *extra_data is set, the + whole array is passed to the consistent method, and + the appropriate element to the comparePartial method. @@ -251,10 +251,10 @@ An operator class must also provide a function to check if an indexed item - matches the query. It comes in two flavors, a boolean consistent - function, and a ternary triConsistent function. - triConsistent covers the functionality of both, so providing - triConsistent alone is sufficient. However, if the boolean + matches the query. It comes in two flavors, a boolean consistent + function, and a ternary triConsistent function. + triConsistent covers the functionality of both, so providing + triConsistent alone is sufficient. However, if the boolean variant is significantly cheaper to calculate, it can be advantageous to provide both. If only the boolean variant is provided, some optimizations that depend on refuting index items before fetching all the keys are @@ -264,48 +264,48 @@ bool consistent(bool check[], StrategyNumber n, Datum query, int32 nkeys, Pointer extra_data[], bool *recheck, - Datum queryKeys[], bool nullFlags[]) + Datum queryKeys[], bool nullFlags[]) - Returns TRUE if an indexed item satisfies the query operator with - strategy number n (or might satisfy it, if the recheck + Returns true if an indexed item satisfies the query operator with + strategy number n (or might satisfy it, if the recheck indication is returned). This function does not have direct access to the indexed item's value, since GIN does not store items explicitly. Rather, what is available is knowledge about which key values extracted from the query appear in a given - indexed item. The check array has length - nkeys, which is the same as the number of keys previously - returned by extractQuery for this query datum. + indexed item. The check array has length + nkeys, which is the same as the number of keys previously + returned by extractQuery for this query datum. Each element of the - check array is TRUE if the indexed item contains the - corresponding query key, i.e., if (check[i] == TRUE) the i-th key of the - extractQuery result array is present in the indexed item. - The original query datum is - passed in case the consistent method needs to consult it, - and so are the queryKeys[] and nullFlags[] - arrays previously returned by extractQuery. - extra_data is the extra-data array returned by - extractQuery, or NULL if none. + check array is true if the indexed item contains the + corresponding query key, i.e., if (check[i] == true) the i-th key of the + extractQuery result array is present in the indexed item. + The original query datum is + passed in case the consistent method needs to consult it, + and so are the queryKeys[] and nullFlags[] + arrays previously returned by extractQuery. + extra_data is the extra-data array returned by + extractQuery, or NULL if none. - When extractQuery returns a null key in - queryKeys[], the corresponding check[] element - is TRUE if the indexed item contains a null key; that is, the - semantics of check[] are like IS NOT DISTINCT - FROM. The consistent function can examine the - corresponding nullFlags[] element if it needs to tell + When extractQuery returns a null key in + queryKeys[], the corresponding check[] element + is true if the indexed item contains a null key; that is, the + semantics of check[] are like IS NOT DISTINCT + FROM. The consistent function can examine the + corresponding nullFlags[] element if it needs to tell the difference between a regular value match and a null match. - On success, *recheck should be set to TRUE if the heap - tuple needs to be rechecked against the query operator, or FALSE if - the index test is exact. That is, a FALSE return value guarantees - that the heap tuple does not match the query; a TRUE return value with - *recheck set to FALSE guarantees that the heap tuple does - match the query; and a TRUE return value with - *recheck set to TRUE means that the heap tuple might match + On success, *recheck should be set to true if the heap + tuple needs to be rechecked against the query operator, or false if + the index test is exact. That is, a false return value guarantees + that the heap tuple does not match the query; a true return value with + *recheck set to false guarantees that the heap tuple does + match the query; and a true return value with + *recheck set to true means that the heap tuple might match the query, so it needs to be fetched and rechecked by evaluating the query operator directly against the originally indexed item. @@ -315,30 +315,30 @@ GinTernaryValue triConsistent(GinTernaryValue check[], StrategyNumber n, Datum query, int32 nkeys, Pointer extra_data[], - Datum queryKeys[], bool nullFlags[]) + Datum queryKeys[], bool nullFlags[]) - triConsistent is similar to consistent, - but instead of booleans in the check vector, there are + triConsistent is similar to consistent, + but instead of booleans in the check vector, there are three possible values for each - key: GIN_TRUE, GIN_FALSE and - GIN_MAYBE. GIN_FALSE and GIN_TRUE + key: GIN_TRUE, GIN_FALSE and + GIN_MAYBE. GIN_FALSE and GIN_TRUE have the same meaning as regular boolean values, while - GIN_MAYBE means that the presence of that key is not known. - When GIN_MAYBE values are present, the function should only - return GIN_TRUE if the item certainly matches whether or + GIN_MAYBE means that the presence of that key is not known. + When GIN_MAYBE values are present, the function should only + return GIN_TRUE if the item certainly matches whether or not the index item contains the corresponding query keys. Likewise, the - function must return GIN_FALSE only if the item certainly - does not match, whether or not it contains the GIN_MAYBE - keys. If the result depends on the GIN_MAYBE entries, i.e., + function must return GIN_FALSE only if the item certainly + does not match, whether or not it contains the GIN_MAYBE + keys. If the result depends on the GIN_MAYBE entries, i.e., the match cannot be confirmed or refuted based on the known query keys, - the function must return GIN_MAYBE. + the function must return GIN_MAYBE. - When there are no GIN_MAYBE values in the check - vector, a GIN_MAYBE return value is the equivalent of - setting the recheck flag in the - boolean consistent function. + When there are no GIN_MAYBE values in the check + vector, a GIN_MAYBE return value is the equivalent of + setting the recheck flag in the + boolean consistent function. @@ -352,7 +352,7 @@ - int compare(Datum a, Datum b) + int compare(Datum a, Datum b) Compares two keys (not indexed items!) and returns an integer less than @@ -364,13 +364,13 @@ - Alternatively, if the operator class does not provide a compare + Alternatively, if the operator class does not provide a compare method, GIN will look up the default btree operator class for the index key data type, and use its comparison function. It is recommended to specify the comparison function in a GIN operator class that is meant for just one data type, as looking up the btree operator class costs a few cycles. However, polymorphic GIN operator classes (such - as array_ops) typically cannot specify a single comparison + as array_ops) typically cannot specify a single comparison function. @@ -381,7 +381,7 @@ int comparePartial(Datum partial_key, Datum key, StrategyNumber n, - Pointer extra_data) + Pointer extra_data) Compare a partial-match query key to an index key. Returns an integer @@ -389,11 +389,11 @@ does not match the query, but the index scan should continue; zero means that the index key does match the query; greater than zero indicates that the index scan should stop because no more matches - are possible. The strategy number n of the operator + are possible. The strategy number n of the operator that generated the partial match query is provided, in case its semantics are needed to determine when to end the scan. Also, - extra_data is the corresponding element of the extra-data - array made by extractQuery, or NULL if none. + extra_data is the corresponding element of the extra-data + array made by extractQuery, or NULL if none. Null keys are never passed to this function. @@ -402,25 +402,25 @@ - To support partial match queries, an operator class must - provide the comparePartial method, and its - extractQuery method must set the pmatch + To support partial match queries, an operator class must + provide the comparePartial method, and its + extractQuery method must set the pmatch parameter when a partial-match query is encountered. See - for details. + for details. - The actual data types of the various Datum values mentioned + The actual data types of the various Datum values mentioned above vary depending on the operator class. The item values passed to - extractValue are always of the operator class's input type, and - all key values must be of the class's STORAGE type. The type of - the query argument passed to extractQuery, - consistent and triConsistent is whatever is the + extractValue are always of the operator class's input type, and + all key values must be of the class's STORAGE type. The type of + the query argument passed to extractQuery, + consistent and triConsistent is whatever is the right-hand input type of the class member operator identified by the strategy number. This need not be the same as the indexed type, so long as key values of the correct type can be extracted from it. However, it is recommended that the SQL declarations of these three support functions use - the opclass's indexed data type for the query argument, even + the opclass's indexed data type for the query argument, even though the actual type might be something else depending on the operator. @@ -434,8 +434,8 @@ constructed over keys, where each key is an element of one or more indexed items (a member of an array, for example) and where each tuple in a leaf page contains either a pointer to a B-tree of heap pointers (a - posting tree), or a simple list of heap pointers (a posting - list) when the list is small enough to fit into a single index tuple along + posting tree), or a simple list of heap pointers (a posting + list) when the list is small enough to fit into a single index tuple along with the key value. @@ -443,7 +443,7 @@ As of PostgreSQL 9.1, null key values can be included in the index. Also, placeholder nulls are included in the index for indexed items that are null or contain no keys according to - extractValue. This allows searches that should find empty + extractValue. This allows searches that should find empty items to do so.
@@ -461,12 +461,12 @@ intrinsic nature of inverted indexes: inserting or updating one heap row can cause many inserts into the index (one for each key extracted from the indexed item). As of PostgreSQL 8.4, - GIN is capable of postponing much of this work by inserting + GIN is capable of postponing much of this work by inserting new tuples into a temporary, unsorted list of pending entries. When the table is vacuumed or autoanalyzed, or when gin_clean_pending_list function is called, or if the pending list becomes larger than - , the entries are moved to the + , the entries are moved to the main GIN data structure using the same bulk insert techniques used during initial index creation. This greatly improves GIN index update speed, even counting the additional @@ -479,7 +479,7 @@ of pending entries in addition to searching the regular index, and so a large list of pending entries will slow searches significantly. Another disadvantage is that, while most updates are fast, an update - that causes the pending list to become too large will incur an + that causes the pending list to become too large will incur an immediate cleanup cycle and thus be much slower than other updates. Proper use of autovacuum can minimize both of these problems.
@@ -488,7 +488,7 @@ If consistent response time is more important than update speed, use of pending entries can be disabled by turning off the fastupdate storage parameter for a - GIN index. See + GIN index. See for details. @@ -497,15 +497,15 @@ Partial Match Algorithm - GIN can support partial match queries, in which the query + GIN can support partial match queries, in which the query does not determine an exact match for one or more keys, but the possible matches fall within a reasonably narrow range of key values (within the - key sorting order determined by the compare support method). - The extractQuery method, instead of returning a key value + key sorting order determined by the compare support method). + The extractQuery method, instead of returning a key value to be matched exactly, returns a key value that is the lower bound of - the range to be searched, and sets the pmatch flag true. - The key range is then scanned using the comparePartial - method. comparePartial must return zero for a matching + the range to be searched, and sets the pmatch flag true. + The key range is then scanned using the comparePartial + method. comparePartial must return zero for a matching index key, less than zero for a non-match that is still within the range to be searched, or greater than zero if the index key is past the range that could match. @@ -531,40 +531,40 @@ As of PostgreSQL 8.4, this advice is less necessary since delayed indexing is used (see for details). But for very large updates + linkend="gin-fast-update"/> for details). But for very large updates it may still be best to drop and recreate the index. - + Build time for a GIN index is very sensitive to - the maintenance_work_mem setting; it doesn't pay to + the maintenance_work_mem setting; it doesn't pay to skimp on work memory during index creation. - + During a series of insertions into an existing GIN - index that has fastupdate enabled, the system will clean up + index that has fastupdate enabled, the system will clean up the pending-entry list whenever the list grows larger than - gin_pending_list_limit. To avoid fluctuations in observed + gin_pending_list_limit. To avoid fluctuations in observed response time, it's desirable to have pending-list cleanup occur in the background (i.e., via autovacuum). Foreground cleanup operations - can be avoided by increasing gin_pending_list_limit + can be avoided by increasing gin_pending_list_limit or making autovacuum more aggressive. However, enlarging the threshold of the cleanup operation means that if a foreground cleanup does occur, it will take even longer. - gin_pending_list_limit can be overridden for individual + gin_pending_list_limit can be overridden for individual GIN indexes by changing storage parameters, and which allows each GIN index to have its own cleanup threshold. For example, it's possible to increase the threshold only for the GIN @@ -574,7 +574,7 @@ - + The primary goal of developing GIN indexes was @@ -616,7 +616,7 @@ GIN assumes that indexable operators are strict. This - means that extractValue will not be called at all on a null + means that extractValue will not be called at all on a null item value (instead, a placeholder index entry is created automatically), and extractQuery will not be called on a null query value either (instead, the query is presumed to be unsatisfiable). Note @@ -629,36 +629,36 @@ Examples - The core PostgreSQL distribution + The core PostgreSQL distribution includes the GIN operator classes previously shown in - . - The following contrib modules also contain + . + The following contrib modules also contain GIN operator classes: - btree_gin + btree_gin B-tree equivalent functionality for several data types - hstore + hstore Module for storing (key, value) pairs - intarray + intarray Enhanced support for int[] - pg_trgm + pg_trgm Text similarity using trigram matching diff --git a/doc/src/sgml/gist.sgml b/doc/src/sgml/gist.sgml index b3cc347e5c..44a3b2c03c 100644 --- a/doc/src/sgml/gist.sgml +++ b/doc/src/sgml/gist.sgml @@ -1,6 +1,6 @@ - + GiST Indexes @@ -44,10 +44,10 @@ Built-in Operator Classes - The core PostgreSQL distribution + The core PostgreSQL distribution includes the GiST operator classes shown in - . - (Some of the optional modules described in + . + (Some of the optional modules described in provide additional GiST operator classes.) @@ -64,142 +64,142 @@ - box_ops - box + box_ops + box - && - &> - &< - &<| - >> - << - <<| - <@ - @> - @ - |&> - |>> - ~ - ~= + && + &> + &< + &<| + >> + << + <<| + <@ + @> + @ + |&> + |>> + ~ + ~= - circle_ops - circle + circle_ops + circle - && - &> - &< - &<| - >> - << - <<| - <@ - @> - @ - |&> - |>> - ~ - ~= + && + &> + &< + &<| + >> + << + <<| + <@ + @> + @ + |&> + |>> + ~ + ~= - <-> + <-> - inet_ops - inet, cidr + inet_ops + inet, cidr - && - >> - >>= - > - >= - <> - << - <<= - < - <= - = + && + >> + >>= + > + >= + <> + << + <<= + < + <= + = - point_ops - point + point_ops + point - >> - >^ - << - <@ - <@ - <@ - <^ - ~= + >> + >^ + << + <@ + <@ + <@ + <^ + ~= - <-> + <-> - poly_ops - polygon + poly_ops + polygon - && - &> - &< - &<| - >> - << - <<| - <@ - @> - @ - |&> - |>> - ~ - ~= + && + &> + &< + &<| + >> + << + <<| + <@ + @> + @ + |&> + |>> + ~ + ~= - <-> + <-> - range_ops + range_ops any range type - && - &> - &< - >> - << - <@ - -|- - = - @> - @> + && + &> + &< + >> + << + <@ + -|- + = + @> + @> - tsquery_ops - tsquery + tsquery_ops + tsquery - <@ - @> + <@ + @> - tsvector_ops - tsvector + tsvector_ops + tsvector - @@ + @@ @@ -209,9 +209,9 @@ - For historical reasons, the inet_ops operator class is - not the default class for types inet and cidr. - To use it, mention the class name in CREATE INDEX, + For historical reasons, the inet_ops operator class is + not the default class for types inet and cidr. + To use it, mention the class name in CREATE INDEX, for example CREATE INDEX ON my_table USING GIST (my_inet_column inet_ops); @@ -267,55 +267,56 @@ CREATE INDEX ON my_table USING GIST (my_inet_column inet_ops); - There are seven methods that an index operator class for - GiST must provide, and two that are optional. + There are five methods that an index operator class for + GiST must provide, and four that are optional. Correctness of the index is ensured - by proper implementation of the same, consistent - and union methods, while efficiency (size and speed) of the - index will depend on the penalty and picksplit + by proper implementation of the same, consistent + and union methods, while efficiency (size and speed) of the + index will depend on the penalty and picksplit methods. - The remaining two basic methods are compress and - decompress, which allow an index to have internal tree data of + Two optional methods are compress and + decompress, which allow an index to have internal tree data of a different type than the data it indexes. The leaves are to be of the indexed data type, while the other tree nodes can be of any C struct (but - you still have to follow PostgreSQL data type rules here, - see about varlena for variable sized data). If the tree's - internal data type exists at the SQL level, the STORAGE option - of the CREATE OPERATOR CLASS command can be used. - The optional eighth method is distance, which is needed + you still have to follow PostgreSQL data type rules here, + see about varlena for variable sized data). If the tree's + internal data type exists at the SQL level, the STORAGE option + of the CREATE OPERATOR CLASS command can be used. + The optional eighth method is distance, which is needed if the operator class wishes to support ordered scans (nearest-neighbor - searches). The optional ninth method fetch is needed if the - operator class wishes to support index-only scans. + searches). The optional ninth method fetch is needed if the + operator class wishes to support index-only scans, except when the + compress method is omitted. - consistent + consistent - Given an index entry p and a query value q, + Given an index entry p and a query value q, this function determines whether the index entry is - consistent with the query; that is, could the predicate - indexed_column - indexable_operator q be true for + consistent with the query; that is, could the predicate + indexed_column + indexable_operator q be true for any row represented by the index entry? For a leaf index entry this is equivalent to testing the indexable condition, while for an internal tree node this determines whether it is necessary to scan the subtree of the index represented by the tree node. When the result is - true, a recheck flag must also be returned. + true, a recheck flag must also be returned. This indicates whether the predicate is certainly true or only possibly - true. If recheck = false then the index has - tested the predicate condition exactly, whereas if recheck - = true the row is only a candidate match. In that case the + true. If recheck = false then the index has + tested the predicate condition exactly, whereas if recheck + = true the row is only a candidate match. In that case the system will automatically evaluate the - indexable_operator against the actual row value to see + indexable_operator against the actual row value to see if it is really a match. This convention allows GiST to support both lossless and lossy index structures. - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_consistent(internal, data_type, smallint, oid, internal) @@ -355,23 +356,23 @@ my_consistent(PG_FUNCTION_ARGS) } - Here, key is an element in the index and query - the value being looked up in the index. The StrategyNumber + Here, key is an element in the index and query + the value being looked up in the index. The StrategyNumber parameter indicates which operator of your operator class is being applied — it matches one of the operator numbers in the - CREATE OPERATOR CLASS command. + CREATE OPERATOR CLASS command. Depending on which operators you have included in the class, the data - type of query could vary with the operator, since it will + type of query could vary with the operator, since it will be whatever type is on the righthand side of the operator, which might be different from the indexed data type appearing on the lefthand side. (The above code skeleton assumes that only one type is possible; if - not, fetching the query argument value would have to depend + not, fetching the query argument value would have to depend on the operator.) It is recommended that the SQL declaration of - the consistent function use the opclass's indexed data - type for the query argument, even though the actual type + the consistent function use the opclass's indexed data + type for the query argument, even though the actual type might be something else depending on the operator. @@ -379,7 +380,7 @@ my_consistent(PG_FUNCTION_ARGS) - union + union This method consolidates information in the tree. Given a set of @@ -388,7 +389,7 @@ my_consistent(PG_FUNCTION_ARGS) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_union(internal, internal) @@ -438,42 +439,44 @@ my_union(PG_FUNCTION_ARGS) As you can see, in this skeleton we're dealing with a data type - where union(X, Y, Z) = union(union(X, Y), Z). It's easy + where union(X, Y, Z) = union(union(X, Y), Z). It's easy enough to support data types where this is not the case, by implementing the proper union algorithm in this - GiST support method. + GiST support method. - The result of the union function must be a value of the + The result of the union function must be a value of the index's storage type, whatever that is (it might or might not be - different from the indexed column's type). The union - function should return a pointer to newly palloc()ed + different from the indexed column's type). The union + function should return a pointer to newly palloc()ed memory. You can't just return the input value as-is, even if there is no type change. - As shown above, the union function's - first internal argument is actually - a GistEntryVector pointer. The second argument is a + As shown above, the union function's + first internal argument is actually + a GistEntryVector pointer. The second argument is a pointer to an integer variable, which can be ignored. (It used to be - required that the union function store the size of its + required that the union function store the size of its result value into that variable, but this is no longer necessary.) - compress + compress - Converts the data item into a format suitable for physical storage in + Converts a data item into a format suitable for physical storage in an index page. + If the compress method is omitted, data items are stored + in the index without modification. - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_compress(internal) @@ -516,7 +519,7 @@ my_compress(PG_FUNCTION_ARGS) - You have to adapt compressed_data_type to the specific + You have to adapt compressed_data_type to the specific type you're converting to in order to compress your leaf nodes, of course. @@ -524,16 +527,24 @@ my_compress(PG_FUNCTION_ARGS) - decompress + decompress - The reverse of the compress method. Converts the - index representation of the data item into a format that can be - manipulated by the other GiST methods in the operator class. + Converts the stored representation of a data item into a format that + can be manipulated by the other GiST methods in the operator class. + If the decompress method is omitted, it is assumed that + the other GiST methods can work directly on the stored data format. + (decompress is not necessarily the reverse of + the compress method; in particular, + if compress is lossy then it's impossible + for decompress to exactly reconstruct the original + data. decompress is not necessarily equivalent + to fetch, either, since the other GiST methods might not + require full reconstruction of the data.) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_decompress(internal) @@ -555,13 +566,14 @@ my_decompress(PG_FUNCTION_ARGS) The above skeleton is suitable for the case where no decompression - is needed. + is needed. (But, of course, omitting the method altogether is even + easier, and is recommended in such cases.) - penalty + penalty Returns a value indicating the cost of inserting the new @@ -572,7 +584,7 @@ my_decompress(PG_FUNCTION_ARGS) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_penalty(internal, internal, internal) @@ -600,15 +612,15 @@ my_penalty(PG_FUNCTION_ARGS) } - For historical reasons, the penalty function doesn't - just return a float result; instead it has to store the value + For historical reasons, the penalty function doesn't + just return a float result; instead it has to store the value at the location indicated by the third argument. The return value per se is ignored, though it's conventional to pass back the address of that argument. - The penalty function is crucial to good performance of + The penalty function is crucial to good performance of the index. It'll get used at insertion time to determine which branch to follow when choosing where to add the new entry in the tree. At query time, the more balanced the index, the quicker the lookup. @@ -617,7 +629,7 @@ my_penalty(PG_FUNCTION_ARGS) - picksplit + picksplit When an index page split is necessary, this function decides which @@ -626,7 +638,7 @@ my_penalty(PG_FUNCTION_ARGS) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_picksplit(internal, internal) @@ -713,33 +725,33 @@ my_picksplit(PG_FUNCTION_ARGS) } - Notice that the picksplit function's result is delivered - by modifying the passed-in v structure. The return + Notice that the picksplit function's result is delivered + by modifying the passed-in v structure. The return value per se is ignored, though it's conventional to pass back the - address of v. + address of v. - Like penalty, the picksplit function + Like penalty, the picksplit function is crucial to good performance of the index. Designing suitable - penalty and picksplit implementations + penalty and picksplit implementations is where the challenge of implementing well-performing - GiST indexes lies. + GiST indexes lies. - same + same Returns true if two index entries are identical, false otherwise. - (An index entry is a value of the index's storage type, + (An index entry is a value of the index's storage type, not necessarily the original indexed column's type.) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_same(storage_type, storage_type, internal) @@ -765,7 +777,7 @@ my_same(PG_FUNCTION_ARGS) } - For historical reasons, the same function doesn't + For historical reasons, the same function doesn't just return a Boolean result; instead it has to store the flag at the location indicated by the third argument. The return value per se is ignored, though it's conventional to pass back the @@ -775,15 +787,15 @@ my_same(PG_FUNCTION_ARGS) - distance + distance - Given an index entry p and a query value q, + Given an index entry p and a query value q, this function determines the index entry's - distance from the query value. This function must be + distance from the query value. This function must be supplied if the operator class contains any ordering operators. A query using the ordering operator will be implemented by returning - index entries with the smallest distance values first, + index entries with the smallest distance values first, so the results must be consistent with the operator's semantics. For a leaf index entry the result just represents the distance to the index entry; for an internal tree node, the result must be the @@ -791,7 +803,7 @@ my_same(PG_FUNCTION_ARGS) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_distance(internal, data_type, smallint, oid, internal) @@ -824,8 +836,8 @@ my_distance(PG_FUNCTION_ARGS) } - The arguments to the distance function are identical to - the arguments of the consistent function. + The arguments to the distance function are identical to + the arguments of the consistent function. @@ -835,31 +847,31 @@ my_distance(PG_FUNCTION_ARGS) geometric applications. For an internal tree node, the distance returned must not be greater than the distance to any of the child nodes. If the returned distance is not exact, the function must set - *recheck to true. (This is not necessary for internal tree + *recheck to true. (This is not necessary for internal tree nodes; for them, the calculation is always assumed to be inexact.) In this case the executor will calculate the accurate distance after fetching the tuple from the heap, and reorder the tuples if necessary. - If the distance function returns *recheck = true for any + If the distance function returns *recheck = true for any leaf node, the original ordering operator's return type must - be float8 or float4, and the distance function's + be float8 or float4, and the distance function's result values must be comparable to those of the original ordering operator, since the executor will sort using both distance function results and recalculated ordering-operator results. Otherwise, the - distance function's result values can be any finite float8 + distance function's result values can be any finite float8 values, so long as the relative order of the result values matches the order returned by the ordering operator. (Infinity and minus infinity are used internally to handle cases such as nulls, so it is not - recommended that distance functions return these values.) + recommended that distance functions return these values.) - fetch + fetch Converts the compressed index representation of a data item into the @@ -868,7 +880,7 @@ my_distance(PG_FUNCTION_ARGS) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_fetch(internal) @@ -877,13 +889,15 @@ AS 'MODULE_PATHNAME' LANGUAGE C STRICT; - The argument is a pointer to a GISTENTRY struct. On - entry, its key field contains a non-NULL leaf datum in - compressed form. The return value is another GISTENTRY - struct, whose key field contains the same datum in its + The argument is a pointer to a GISTENTRY struct. On + entry, its key field contains a non-NULL leaf datum in + compressed form. The return value is another GISTENTRY + struct, whose key field contains the same datum in its original, uncompressed form. If the opclass's compress function does - nothing for leaf entries, the fetch method can return the - argument as-is. + nothing for leaf entries, the fetch method can return the + argument as-is. Or, if the opclass does not have a compress function, + the fetch method can be omitted as well, since it would + necessarily be a no-op. @@ -919,7 +933,7 @@ my_fetch(PG_FUNCTION_ARGS) If the compress method is lossy for leaf entries, the operator class cannot support index-only scans, and must not define - a fetch function. + a fetch function. @@ -928,15 +942,15 @@ my_fetch(PG_FUNCTION_ARGS) All the GiST support methods are normally called in short-lived memory - contexts; that is, CurrentMemoryContext will get reset after + contexts; that is, CurrentMemoryContext will get reset after each tuple is processed. It is therefore not very important to worry about pfree'ing everything you palloc. However, in some cases it's useful for a support method to cache data across repeated calls. To do that, allocate - the longer-lived data in fcinfo->flinfo->fn_mcxt, and - keep a pointer to it in fcinfo->flinfo->fn_extra. Such + the longer-lived data in fcinfo->flinfo->fn_mcxt, and + keep a pointer to it in fcinfo->flinfo->fn_extra. Such data will survive for the life of the index operation (e.g., a single GiST index scan, index build, or index tuple insertion). Be careful to pfree - the previous value when replacing a fn_extra value, or the leak + the previous value when replacing a fn_extra value, or the leak will accumulate for the duration of the operation. @@ -960,7 +974,7 @@ my_fetch(PG_FUNCTION_ARGS) - However, buffering index build needs to call the penalty + However, buffering index build needs to call the penalty function more often, which consumes some extra CPU resources. Also, the buffers used in the buffering build need temporary disk space, up to the size of the resulting index. Buffering can also influence the quality @@ -971,7 +985,7 @@ my_fetch(PG_FUNCTION_ARGS) By default, a GiST index build switches to the buffering method when the - index size reaches . It can + index size reaches . It can be manually turned on or off by the buffering parameter to the CREATE INDEX command. The default behavior is good for most cases, but turning buffering off might speed up the build somewhat if the input @@ -988,57 +1002,57 @@ my_fetch(PG_FUNCTION_ARGS) The PostgreSQL source distribution includes several examples of index methods implemented using GiST. The core system currently provides text search - support (indexing for tsvector and tsquery) as well as + support (indexing for tsvector and tsquery) as well as R-Tree equivalent functionality for some of the built-in geometric data types - (see src/backend/access/gist/gistproc.c). The following - contrib modules also contain GiST + (see src/backend/access/gist/gistproc.c). The following + contrib modules also contain GiST operator classes: - btree_gist + btree_gist B-tree equivalent functionality for several data types - cube + cube Indexing for multidimensional cubes - hstore + hstore Module for storing (key, value) pairs - intarray + intarray RD-Tree for one-dimensional array of int4 values - ltree + ltree Indexing for tree-like structures - pg_trgm + pg_trgm Text similarity using trigram matching - seg + seg Indexing for float ranges diff --git a/doc/src/sgml/high-availability.sgml b/doc/src/sgml/high-availability.sgml index 1a152cf118..faf8e71854 100644 --- a/doc/src/sgml/high-availability.sgml +++ b/doc/src/sgml/high-availability.sgml @@ -3,12 +3,12 @@ High Availability, Load Balancing, and Replication - high availability - failover - replication - load balancing - clustering - data partitioning + high availability + failover + replication + load balancing + clustering + data partitioning Database servers can work together to allow a second server to @@ -38,12 +38,12 @@ Some solutions deal with synchronization by allowing only one server to modify the data. Servers that can modify data are - called read/write, master or primary servers. - Servers that track changes in the master are called standby - or secondary servers. A standby server that cannot be connected + called read/write, master or primary servers. + Servers that track changes in the master are called standby + or secondary servers. A standby server that cannot be connected to until it is promoted to a master server is called a warm - standby server, and one that can accept connections and serves read-only - queries is called a hot standby server. + standby server, and one that can accept connections and serves read-only + queries is called a hot standby server. @@ -99,8 +99,8 @@ Shared hardware functionality is common in network storage devices. Using a network file system is also possible, though care must be - taken that the file system has full POSIX behavior (see ). One significant limitation of this + taken that the file system has full POSIX behavior (see ). One significant limitation of this method is that if the shared disk array fails or becomes corrupt, the primary and standby servers are both nonfunctional. Another issue is that the standby server should never access the shared storage while @@ -121,7 +121,7 @@ the mirroring must be done in a way that ensures the standby server has a consistent copy of the file system — specifically, writes to the standby must be done in the same order as those on the master. - DRBD is a popular file system replication solution + DRBD is a popular file system replication solution for Linux. @@ -143,7 +143,7 @@ protocol to make nodes agree on a serializable transactional order. Warm and hot standby servers can be kept current by reading a - stream of write-ahead log (WAL) + stream of write-ahead log (WAL) records. If the main server fails, the standby contains almost all of the data of the main server, and can be quickly made the new master database server. This can be synchronous or @@ -151,9 +151,9 @@ protocol to make nodes agree on a serializable transactional order. A standby server can be implemented using file-based log shipping - () or streaming replication (see - ), or a combination of both. For - information on hot standby, see . + () or streaming replication (see + ), or a combination of both. For + information on hot standby, see . @@ -169,8 +169,8 @@ protocol to make nodes agree on a serializable transactional order. individual tables to be replicated. Logical replication doesn't require a particular server to be designated as a master or a replica but allows data to flow in multiple directions. For more information on logical - replication, see . Through the - logical decoding interface (), + replication, see . Through the + logical decoding interface (), third-party extensions can also provide similar functionality. @@ -189,7 +189,7 @@ protocol to make nodes agree on a serializable transactional order. - Slony-I is an example of this type of replication, with per-table + Slony-I is an example of this type of replication, with per-table granularity, and support for multiple standby servers. Because it updates the standby server asynchronously (in batches), there is possible data loss during fail over. @@ -212,7 +212,7 @@ protocol to make nodes agree on a serializable transactional order. If queries are simply broadcast unmodified, functions like - random(), CURRENT_TIMESTAMP, and + random(), CURRENT_TIMESTAMP, and sequences can have different values on different servers. This is because each server operates independently, and because SQL queries are broadcast (and not actual modified rows). If @@ -224,9 +224,9 @@ protocol to make nodes agree on a serializable transactional order. standby servers via master-standby replication, not by the replication middleware. Care must also be taken that all transactions either commit or abort on all servers, perhaps - using two-phase commit ( - and ). - Pgpool-II and Continuent Tungsten + using two-phase commit ( + and ). + Pgpool-II and Continuent Tungsten are examples of this type of replication. @@ -266,14 +266,14 @@ protocol to make nodes agree on a serializable transactional order. there is no need to partition workloads between master and standby servers, and because the data changes are sent from one server to another, there is no problem with non-deterministic - functions like random(). + functions like random(). - PostgreSQL does not offer this type of replication, - though PostgreSQL two-phase commit ( and ) + PostgreSQL does not offer this type of replication, + though PostgreSQL two-phase commit ( and ) can be used to implement this in application code or middleware. @@ -284,8 +284,8 @@ protocol to make nodes agree on a serializable transactional order. - Because PostgreSQL is open source and easily - extended, a number of companies have taken PostgreSQL + Because PostgreSQL is open source and easily + extended, a number of companies have taken PostgreSQL and created commercial closed-source solutions with unique failover, replication, and load balancing capabilities. @@ -295,13 +295,13 @@ protocol to make nodes agree on a serializable transactional order. - summarizes + summarizes the capabilities of the various solutions listed above. High Availability, Load Balancing, and Replication Feature Matrix - + Feature @@ -475,9 +475,9 @@ protocol to make nodes agree on a serializable transactional order. concurrently on a single query. It is usually accomplished by splitting the data among servers and having each server execute its part of the query and return results to a central server where they - are combined and returned to the user. Pgpool-II + are combined and returned to the user. Pgpool-II has this capability. Also, this can be implemented using the - PL/Proxy tool set. + PL/Proxy tool set. @@ -494,10 +494,10 @@ protocol to make nodes agree on a serializable transactional order. Continuous archiving can be used to create a high - availability (HA) cluster configuration with one or more - standby servers ready to take over operations if the + availability (HA) cluster configuration with one or more + standby servers ready to take over operations if the primary server fails. This capability is widely referred to as - warm standby or log shipping. + warm standby or log shipping. @@ -513,7 +513,7 @@ protocol to make nodes agree on a serializable transactional order. Directly moving WAL records from one database server to another - is typically described as log shipping. PostgreSQL + is typically described as log shipping. PostgreSQL implements file-based log shipping by transferring WAL records one file (WAL segment) at a time. WAL files (16MB) can be shipped easily and cheaply over any distance, whether it be to an @@ -522,7 +522,7 @@ protocol to make nodes agree on a serializable transactional order. varies according to the transaction rate of the primary server. Record-based log shipping is more granular and streams WAL changes incrementally over a network connection (see ). + linkend="streaming-replication"/>). @@ -534,7 +534,7 @@ protocol to make nodes agree on a serializable transactional order. archive_timeout parameter, which can be set as low as a few seconds. However such a low setting will substantially increase the bandwidth required for file shipping. - Streaming replication (see ) + Streaming replication (see ) allows a much smaller window of data loss. @@ -547,7 +547,7 @@ protocol to make nodes agree on a serializable transactional order. rollforward will take considerably longer, so that technique only offers a solution for disaster recovery, not high availability. A standby server can also be used for read-only queries, in which case - it is called a Hot Standby server. See for + it is called a Hot Standby server. See for more information. @@ -585,7 +585,7 @@ protocol to make nodes agree on a serializable transactional order. associated with tablespaces will be passed across unmodified, so both primary and standby servers must have the same mount paths for tablespaces if that feature is used. Keep in mind that if - + is executed on the primary, any new mount point needed for it must be created on the primary and all standby servers before the command is executed. Hardware need not be exactly the same, but experience shows @@ -597,7 +597,7 @@ protocol to make nodes agree on a serializable transactional order. In general, log shipping between servers running different major - PostgreSQL release + PostgreSQL release levels is not possible. It is the policy of the PostgreSQL Global Development Group not to make changes to disk formats during minor release upgrades, so it is likely that running different minor release levels @@ -618,35 +618,35 @@ protocol to make nodes agree on a serializable transactional order. In standby mode, the server continuously applies WAL received from the master server. The standby server can read WAL from a WAL archive - (see ) or directly from the master + (see ) or directly from the master over a TCP connection (streaming replication). The standby server will also attempt to restore any WAL found in the standby cluster's - pg_wal directory. That typically happens after a server + pg_wal directory. That typically happens after a server restart, when the standby replays again WAL that was streamed from the master before the restart, but you can also manually copy files to - pg_wal at any time to have them replayed. + pg_wal at any time to have them replayed. At startup, the standby begins by restoring all WAL available in the - archive location, calling restore_command. Once it - reaches the end of WAL available there and restore_command - fails, it tries to restore any WAL available in the pg_wal directory. + archive location, calling restore_command. Once it + reaches the end of WAL available there and restore_command + fails, it tries to restore any WAL available in the pg_wal directory. If that fails, and streaming replication has been configured, the standby tries to connect to the primary server and start streaming WAL - from the last valid record found in archive or pg_wal. If that fails + from the last valid record found in archive or pg_wal. If that fails or streaming replication is not configured, or if the connection is later disconnected, the standby goes back to step 1 and tries to restore the file from the archive again. This loop of retries from the - archive, pg_wal, and via streaming replication goes on until the server + archive, pg_wal, and via streaming replication goes on until the server is stopped or failover is triggered by a trigger file. Standby mode is exited and the server switches to normal operation - when pg_ctl promote is run or a trigger file is found - (trigger_file). Before failover, - any WAL immediately available in the archive or in pg_wal will be + when pg_ctl promote is run or a trigger file is found + (trigger_file). Before failover, + any WAL immediately available in the archive or in pg_wal will be restored, but no attempt is made to connect to the master. @@ -657,7 +657,7 @@ protocol to make nodes agree on a serializable transactional order. Set up continuous archiving on the primary to an archive directory accessible from the standby, as described - in . The archive location should be + in . The archive location should be accessible from the standby even when the master is down, i.e. it should reside on the standby server itself or another trusted server, not on the master server. @@ -667,8 +667,8 @@ protocol to make nodes agree on a serializable transactional order. If you want to use streaming replication, set up authentication on the primary server to allow replication connections from the standby server(s); that is, create a role and provide a suitable entry or - entries in pg_hba.conf with the database field set to - replication. Also ensure max_wal_senders is set + entries in pg_hba.conf with the database field set to + replication. Also ensure max_wal_senders is set to a sufficiently large value in the configuration file of the primary server. If replication slots will be used, ensure that max_replication_slots is set sufficiently @@ -676,7 +676,7 @@ protocol to make nodes agree on a serializable transactional order. - Take a base backup as described in + Take a base backup as described in to bootstrap the standby server. @@ -686,33 +686,33 @@ protocol to make nodes agree on a serializable transactional order. To set up the standby server, restore the base backup taken from primary - server (see ). Create a recovery - command file recovery.conf in the standby's cluster data - directory, and turn on standby_mode. Set - restore_command to a simple command to copy files from + server (see ). Create a recovery + command file recovery.conf in the standby's cluster data + directory, and turn on standby_mode. Set + restore_command to a simple command to copy files from the WAL archive. If you plan to have multiple standby servers for high - availability purposes, set recovery_target_timeline to - latest, to make the standby server follow the timeline change + availability purposes, set recovery_target_timeline to + latest, to make the standby server follow the timeline change that occurs at failover to another standby. Do not use pg_standby or similar tools with the built-in standby mode - described here. restore_command should return immediately + described here. restore_command should return immediately if the file does not exist; the server will retry the command again if - necessary. See + necessary. See for using tools like pg_standby. If you want to use streaming replication, fill in - primary_conninfo with a libpq connection string, including + primary_conninfo with a libpq connection string, including the host name (or IP address) and any additional details needed to connect to the primary server. If the primary needs a password for authentication, the password needs to be specified in - primary_conninfo as well. + primary_conninfo as well. @@ -724,21 +724,21 @@ protocol to make nodes agree on a serializable transactional order. If you're using a WAL archive, its size can be minimized using the parameter to remove files that are no + linkend="archive-cleanup-command"/> parameter to remove files that are no longer required by the standby server. - The pg_archivecleanup utility is designed specifically to - be used with archive_cleanup_command in typical single-standby - configurations, see . + The pg_archivecleanup utility is designed specifically to + be used with archive_cleanup_command in typical single-standby + configurations, see . Note however, that if you're using the archive for backup purposes, you need to retain files needed to recover from at least the latest base backup, even if they're no longer needed by the standby. - A simple example of a recovery.conf is: + A simple example of a recovery.conf is: standby_mode = 'on' -primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' +primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass options=''-c wal_sender_timeout=5000''' restore_command = 'cp /path/to/archive/%f %p' archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' @@ -746,7 +746,7 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' You can have any number of standby servers, but if you use streaming - replication, make sure you set max_wal_senders high enough in + replication, make sure you set max_wal_senders high enough in the primary to allow them to be connected simultaneously. @@ -768,12 +768,12 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' Streaming replication is asynchronous by default - (see ), in which case there is + (see ), in which case there is a small delay between committing a transaction in the primary and the changes becoming visible in the standby. This delay is however much smaller than with file-based log shipping, typically under one second assuming the standby is powerful enough to keep up with the load. With - streaming replication, archive_timeout is not required to + streaming replication, archive_timeout is not required to reduce the data loss window. @@ -782,7 +782,7 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' archiving, the server might recycle old WAL segments before the standby has received them. If this occurs, the standby will need to be reinitialized from a new base backup. You can avoid this by setting - wal_keep_segments to a value large enough to ensure that + wal_keep_segments to a value large enough to ensure that WAL segments are not recycled too early, or by configuring a replication slot for the standby. If you set up a WAL archive that's accessible from the standby, these solutions are not required, since the standby can @@ -791,31 +791,31 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' To use streaming replication, set up a file-based log-shipping standby - server as described in . The step that + server as described in . The step that turns a file-based log-shipping standby into streaming replication - standby is setting primary_conninfo setting in the - recovery.conf file to point to the primary server. Set - and authentication options - (see pg_hba.conf) on the primary so that the standby server - can connect to the replication pseudo-database on the primary - server (see ). + standby is setting primary_conninfo setting in the + recovery.conf file to point to the primary server. Set + and authentication options + (see pg_hba.conf) on the primary so that the standby server + can connect to the replication pseudo-database on the primary + server (see ). On systems that support the keepalive socket option, setting - , - and - helps the primary promptly + , + and + helps the primary promptly notice a broken connection. Set the maximum number of concurrent connections from the standby servers - (see for details). + (see for details). - When the standby is started and primary_conninfo is set + When the standby is started and primary_conninfo is set correctly, the standby will connect to the primary after replaying all WAL files available in the archive. If the connection is established successfully, you will see a walreceiver process in the standby, and @@ -829,20 +829,20 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' so that only trusted users can read the WAL stream, because it is easy to extract privileged information from it. Standby servers must authenticate to the primary as a superuser or an account that has the - REPLICATION privilege. It is recommended to create a - dedicated user account with REPLICATION and LOGIN - privileges for replication. While REPLICATION privilege gives + REPLICATION privilege. It is recommended to create a + dedicated user account with REPLICATION and LOGIN + privileges for replication. While REPLICATION privilege gives very high permissions, it does not allow the user to modify any data on - the primary system, which the SUPERUSER privilege does. + the primary system, which the SUPERUSER privilege does. Client authentication for replication is controlled by a - pg_hba.conf record specifying replication in the - database field. For example, if the standby is running on - host IP 192.168.1.100 and the account name for replication - is foo, the administrator can add the following line to the - pg_hba.conf file on the primary: + pg_hba.conf record specifying replication in the + database field. For example, if the standby is running on + host IP 192.168.1.100 and the account name for replication + is foo, the administrator can add the following line to the + pg_hba.conf file on the primary: # Allow the user "foo" from host 192.168.1.100 to connect to the primary @@ -854,14 +854,14 @@ host replication foo 192.168.1.100/32 md5 The host name and port number of the primary, connection user name, - and password are specified in the recovery.conf file. - The password can also be set in the ~/.pgpass file on the - standby (specify replication in the database + and password are specified in the recovery.conf file. + The password can also be set in the ~/.pgpass file on the + standby (specify replication in the database field). - For example, if the primary is running on host IP 192.168.1.50, + For example, if the primary is running on host IP 192.168.1.50, port 5432, the account name for replication is - foo, and the password is foopass, the administrator - can add the following line to the recovery.conf file on the + foo, and the password is foopass, the administrator + can add the following line to the recovery.conf file on the standby: @@ -880,24 +880,30 @@ primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' standby. You can calculate this lag by comparing the current WAL write location on the primary with the last WAL location received by the standby. These locations can be retrieved using - pg_current_wal_lsn on the primary and - pg_last_wal_receive_lsn on the standby, - respectively (see and - for details). + pg_current_wal_lsn on the primary and + pg_last_wal_receive_lsn on the standby, + respectively (see and + for details). The last WAL receive location in the standby is also displayed in the process status of the WAL receiver process, displayed using the - ps command (see for details). + ps command (see for details). You can retrieve a list of WAL sender processes via the - - pg_stat_replication view. Large differences between - pg_current_wal_lsn and the view's sent_lsn field + view. Large differences between + pg_current_wal_lsn and the view's sent_lsn field might indicate that the master server is under heavy load, while - differences between sent_lsn and - pg_last_wal_receive_lsn on the standby might indicate + differences between sent_lsn and + pg_last_wal_receive_lsn on the standby might indicate network delay, or that the standby is under heavy load. + + On a hot standby, the status of the WAL receiver process can be retrieved + via the view. A large + difference between pg_last_wal_replay_lsn and the + view's received_lsn indicates that WAL is being + received faster than it can be replayed. + @@ -911,23 +917,23 @@ primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' Replication slots provide an automated way to ensure that the master does not remove WAL segments until they have been received by all standbys, and that the master does not remove rows which could cause a - recovery conflict even when the + recovery conflict even when the standby is disconnected. In lieu of using replication slots, it is possible to prevent the removal - of old WAL segments using , or by + of old WAL segments using , or by storing the segments in an archive using - . + . However, these methods often result in retaining more WAL segments than required, whereas replication slots retain only the number of segments known to be needed. An advantage of these methods is that they bound - the space requirement for pg_wal; there is currently no way + the space requirement for pg_wal; there is currently no way to do this using replication slots. - Similarly, - and provide protection against + Similarly, + and provide protection against relevant rows being removed by vacuum, but the former provides no protection during any time period when the standby is not connected, and the latter often needs to be set to a high value to provide adequate @@ -946,8 +952,8 @@ primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' Slots can be created and dropped either via the streaming replication - protocol (see ) or via SQL - functions (see ). + protocol (see ) or via SQL + functions (see ). @@ -960,14 +966,14 @@ postgres=# SELECT * FROM pg_create_physical_replication_slot('node_a_slot'); -------------+----- node_a_slot | -postgres=# SELECT * FROM pg_replication_slots; - slot_name | slot_type | datoid | database | active | xmin | restart_lsn | confirmed_flush_lsn --------------+-----------+--------+----------+--------+------+-------------+--------------------- - node_a_slot | physical | | | f | | | +postgres=# SELECT slot_name, slot_type, active FROM pg_replication_slots; + slot_name | slot_type | active +-------------+-----------+-------- + node_a_slot | physical | f (1 row) - To configure the standby to use this slot, primary_slot_name - should be configured in the standby's recovery.conf. + To configure the standby to use this slot, primary_slot_name + should be configured in the standby's recovery.conf. Here is a simple example: standby_mode = 'on' @@ -1011,7 +1017,7 @@ primary_slot_name = 'node_a_slot' Cascading replication is currently asynchronous. Synchronous replication - (see ) settings have no effect on + (see ) settings have no effect on cascading replication at present. @@ -1022,16 +1028,16 @@ primary_slot_name = 'node_a_slot' If an upstream standby server is promoted to become new master, downstream servers will continue to stream from the new master if - recovery_target_timeline is set to 'latest'. + recovery_target_timeline is set to 'latest'. To use cascading replication, set up the cascading standby so that it can accept replication connections (that is, set - and , + and , and configure host-based authentication). - You will also need to set primary_conninfo in the downstream + You will also need to set primary_conninfo in the downstream standby to point to the cascading standby. @@ -1044,7 +1050,7 @@ primary_slot_name = 'node_a_slot' - PostgreSQL streaming replication is asynchronous by + PostgreSQL streaming replication is asynchronous by default. If the primary server crashes then some transactions that were committed may not have been replicated to the standby server, causing data loss. The amount @@ -1058,8 +1064,8 @@ primary_slot_name = 'node_a_slot' standby servers. This extends that standard level of durability offered by a transaction commit. This level of protection is referred to as 2-safe replication in computer science theory, and group-1-safe - (group-safe and 1-safe) when synchronous_commit is set to - remote_write. + (group-safe and 1-safe) when synchronous_commit is set to + remote_write. @@ -1103,15 +1109,15 @@ primary_slot_name = 'node_a_slot' Once streaming replication has been configured, configuring synchronous replication requires only one additional configuration step: - must be set to - a non-empty value. synchronous_commit must also be set to - on, but since this is the default value, typically no change is - required. (See and - .) + must be set to + a non-empty value. synchronous_commit must also be set to + on, but since this is the default value, typically no change is + required. (See and + .) This configuration will cause each commit to wait for confirmation that the standby has written the commit record to durable storage. - synchronous_commit can be set by individual + synchronous_commit can be set by individual users, so it can be configured in the configuration file, for particular users or databases, or dynamically by applications, in order to control the durability guarantee on a per-transaction basis. @@ -1121,12 +1127,12 @@ primary_slot_name = 'node_a_slot' After a commit record has been written to disk on the primary, the WAL record is then sent to the standby. The standby sends reply messages each time a new batch of WAL data is written to disk, unless - wal_receiver_status_interval is set to zero on the standby. - In the case that synchronous_commit is set to - remote_apply, the standby sends reply messages when the commit + wal_receiver_status_interval is set to zero on the standby. + In the case that synchronous_commit is set to + remote_apply, the standby sends reply messages when the commit record is replayed, making the transaction visible. If the standby is chosen as a synchronous standby, according to the setting - of synchronous_standby_names on the primary, the reply + of synchronous_standby_names on the primary, the reply messages from that standby will be considered along with those from other synchronous standbys to decide when to release transactions waiting for confirmation that the commit record has been received. These parameters @@ -1138,13 +1144,13 @@ primary_slot_name = 'node_a_slot' - Setting synchronous_commit to remote_write will + Setting synchronous_commit to remote_write will cause each commit to wait for confirmation that the standby has received the commit record and written it out to its own operating system, but not for the data to be flushed to disk on the standby. This - setting provides a weaker guarantee of durability than on + setting provides a weaker guarantee of durability than on does: the standby could lose the data in the event of an operating system - crash, though not a PostgreSQL crash. + crash, though not a PostgreSQL crash. However, it's a useful setting in practice because it can decrease the response time for the transaction. Data loss could only occur if both the primary and the standby crash and @@ -1152,7 +1158,7 @@ primary_slot_name = 'node_a_slot' - Setting synchronous_commit to remote_apply will + Setting synchronous_commit to remote_apply will cause each commit to wait until the current synchronous standbys report that they have replayed the transaction, making it visible to user queries. In simple cases, this allows for load balancing with causal @@ -1176,12 +1182,12 @@ primary_slot_name = 'node_a_slot' transactions will wait until all the standby servers which are considered as synchronous confirm receipt of their data. The number of synchronous standbys that transactions must wait for replies from is specified in - synchronous_standby_names. This parameter also specifies - a list of standby names and the method (FIRST and - ANY) to choose synchronous standbys from the listed ones. + synchronous_standby_names. This parameter also specifies + a list of standby names and the method (FIRST and + ANY) to choose synchronous standbys from the listed ones. - The method FIRST specifies a priority-based synchronous + The method FIRST specifies a priority-based synchronous replication and makes transaction commits wait until their WAL records are replicated to the requested number of synchronous standbys chosen based on their priorities. The standbys whose names appear earlier in the list are @@ -1192,36 +1198,36 @@ primary_slot_name = 'node_a_slot' next-highest-priority standby. - An example of synchronous_standby_names for + An example of synchronous_standby_names for a priority-based multiple synchronous standbys is: synchronous_standby_names = 'FIRST 2 (s1, s2, s3)' - In this example, if four standby servers s1, s2, - s3 and s4 are running, the two standbys - s1 and s2 will be chosen as synchronous standbys + In this example, if four standby servers s1, s2, + s3 and s4 are running, the two standbys + s1 and s2 will be chosen as synchronous standbys because their names appear early in the list of standby names. - s3 is a potential synchronous standby and will take over - the role of synchronous standby when either of s1 or - s2 fails. s4 is an asynchronous standby since + s3 is a potential synchronous standby and will take over + the role of synchronous standby when either of s1 or + s2 fails. s4 is an asynchronous standby since its name is not in the list. - The method ANY specifies a quorum-based synchronous + The method ANY specifies a quorum-based synchronous replication and makes transaction commits wait until their WAL records - are replicated to at least the requested number of + are replicated to at least the requested number of synchronous standbys in the list. - An example of synchronous_standby_names for + An example of synchronous_standby_names for a quorum-based multiple synchronous standbys is: synchronous_standby_names = 'ANY 2 (s1, s2, s3)' - In this example, if four standby servers s1, s2, - s3 and s4 are running, transaction commits will - wait for replies from at least any two standbys of s1, - s2 and s3. s4 is an asynchronous + In this example, if four standby servers s1, s2, + s3 and s4 are running, transaction commits will + wait for replies from at least any two standbys of s1, + s2 and s3. s4 is an asynchronous standby since its name is not in the list. @@ -1243,7 +1249,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' - PostgreSQL allows the application developer + PostgreSQL allows the application developer to specify the durability level required via replication. This can be specified for the system overall, though it can also be specified for specific users or connections, or even individual transactions. @@ -1275,10 +1281,10 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' Planning for High Availability - synchronous_standby_names specifies the number and + synchronous_standby_names specifies the number and names of synchronous standbys that transaction commits made when - synchronous_commit is set to on, - remote_apply or remote_write will wait for + synchronous_commit is set to on, + remote_apply or remote_write will wait for responses from. Such transaction commits may never be completed if any one of synchronous standbys should crash. @@ -1286,7 +1292,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' The best solution for high availability is to ensure you keep as many synchronous standbys as requested. This can be achieved by naming multiple - potential synchronous standbys using synchronous_standby_names. + potential synchronous standbys using synchronous_standby_names. @@ -1305,27 +1311,27 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' When a standby first attaches to the primary, it will not yet be properly - synchronized. This is described as catchup mode. Once + synchronized. This is described as catchup mode. Once the lag between standby and primary reaches zero for the first time - we move to real-time streaming state. + we move to real-time streaming state. The catch-up duration may be long immediately after the standby has been created. If the standby is shut down, then the catch-up period will increase according to the length of time the standby has been down. The standby is only able to become a synchronous standby - once it has reached streaming state. + once it has reached streaming state. This state can be viewed using the pg_stat_replication view. - If primary restarts while commits are waiting for acknowledgement, those + If primary restarts while commits are waiting for acknowledgment, those waiting transactions will be marked fully committed once the primary database recovers. There is no way to be certain that all standbys have received all outstanding WAL data at time of the crash of the primary. Some transactions may not show as committed on the standby, even though they show as committed on the primary. The guarantee we offer is that - the application will not receive explicit acknowledgement of the + the application will not receive explicit acknowledgment of the successful commit of a transaction until the WAL data is known to be safely received by all the synchronous standbys. @@ -1334,7 +1340,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' If you really cannot keep as many synchronous standbys as requested then you should decrease the number of synchronous standbys that transaction commits must wait for responses from - in synchronous_standby_names (or disable it) and + in synchronous_standby_names (or disable it) and reload the configuration file on the primary server. @@ -1347,7 +1353,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' If you need to re-create a standby server while transactions are waiting, make sure that the commands pg_start_backup() and pg_stop_backup() are run in a session with - synchronous_commit = off, otherwise those + synchronous_commit = off, otherwise those requests will wait forever for the standby to appear. @@ -1381,7 +1387,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' - If archive_mode is set to on, the + If archive_mode is set to on, the archiver is not enabled during recovery or standby mode. If the standby server is promoted, it will start archiving after the promotion, but will not archive any WAL it did not generate itself. To get a complete @@ -1415,7 +1421,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' If the primary server fails and the standby server becomes the new primary, and then the old primary restarts, you must have a mechanism for informing the old primary that it is no longer the primary. This is - sometimes known as STONITH (Shoot The Other Node In The Head), which is + sometimes known as STONITH (Shoot The Other Node In The Head), which is necessary to avoid situations where both systems think they are the primary, which will lead to confusion and ultimately data loss. @@ -1445,7 +1451,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' and might stay down. To return to normal operation, a standby server must be recreated, either on the former primary system when it comes up, or on a third, - possibly new, system. The utility can be + possibly new, system. The utility can be used to speed up this process on large clusters. Once complete, the primary and standby can be considered to have switched roles. Some people choose to use a third @@ -1465,14 +1471,17 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' - To trigger failover of a log-shipping standby server, - run pg_ctl promote or create a trigger - file with the file name and path specified by the trigger_file - setting in recovery.conf. If you're planning to use - pg_ctl promote to fail over, trigger_file is - not required. If you're setting up the reporting servers that are - only used to offload read-only queries from the primary, not for high - availability purposes, you don't need to promote it. + To trigger failover of a log-shipping standby server, run + pg_ctl promote, call pg_promote, + or create a trigger file with the file name and path specified by the + trigger_file setting in + recovery.conf. If you're planning to use + pg_ctl promote or to call + pg_promote to fail over, + trigger_file is not required. If you're + setting up the reporting servers that are only used to offload read-only + queries from the primary, not for high availability purposes, you don't + need to promote it. @@ -1481,11 +1490,11 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' An alternative to the built-in standby mode described in the previous - sections is to use a restore_command that polls the archive location. + sections is to use a restore_command that polls the archive location. This was the only option available in versions 8.4 and below. In this - setup, set standby_mode off, because you are implementing + setup, set standby_mode off, because you are implementing the polling required for standby operation yourself. See the - module for a reference + module for a reference implementation of this. @@ -1494,7 +1503,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' time, so if you use the standby server for queries (see Hot Standby), there is a delay between an action in the master and when the action becomes visible in the standby, corresponding the time it takes - to fill up the WAL file. archive_timeout can be used to make that delay + to fill up the WAL file. archive_timeout can be used to make that delay shorter. Also note that you can't combine streaming replication with this method. @@ -1511,25 +1520,25 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' The magic that makes the two loosely coupled servers work together is - simply a restore_command used on the standby that, + simply a restore_command used on the standby that, when asked for the next WAL file, waits for it to become available from - the primary. The restore_command is specified in the - recovery.conf file on the standby server. Normal recovery + the primary. The restore_command is specified in the + recovery.conf file on the standby server. Normal recovery processing would request a file from the WAL archive, reporting failure if the file was unavailable. For standby processing it is normal for the next WAL file to be unavailable, so the standby must wait for - it to appear. For files ending in .backup or - .history there is no need to wait, and a non-zero return - code must be returned. A waiting restore_command can be + it to appear. For files ending in + .history there is no need to wait, and a non-zero return + code must be returned. A waiting restore_command can be written as a custom script that loops after polling for the existence of the next WAL file. There must also be some way to trigger failover, which - should interrupt the restore_command, break the loop and + should interrupt the restore_command, break the loop and return a file-not-found error to the standby server. This ends recovery and the standby will then come up as a normal server. - Pseudocode for a suitable restore_command is: + Pseudocode for a suitable restore_command is: triggered = false; while (!NextWALFileReady() && !triggered) @@ -1544,8 +1553,8 @@ if (!triggered) - A working example of a waiting restore_command is provided - in the module. It + A working example of a waiting restore_command is provided + in the module. It should be used as a reference on how to correctly implement the logic described above. It can also be extended as needed to support specific configurations and environments. @@ -1553,14 +1562,14 @@ if (!triggered) The method for triggering failover is an important part of planning - and design. One potential option is the restore_command + and design. One potential option is the restore_command command. It is executed once for each WAL file, but the process - running the restore_command is created and dies for + running the restore_command is created and dies for each file, so there is no daemon or server process, and signals or a signal handler cannot be used. Therefore, the - restore_command is not suitable to trigger failover. + restore_command is not suitable to trigger failover. It is possible to use a simple timeout facility, especially if - used in conjunction with a known archive_timeout + used in conjunction with a known archive_timeout setting on the primary. However, this is somewhat error prone since a network problem or busy primary server might be sufficient to initiate failover. A notification mechanism such as the explicit @@ -1579,32 +1588,32 @@ if (!triggered) Set up primary and standby systems as nearly identical as possible, including two identical copies of - PostgreSQL at the same release level. + PostgreSQL at the same release level. Set up continuous archiving from the primary to a WAL archive directory on the standby server. Ensure that - , - and - + , + and + are set appropriately on the primary - (see ). + (see ). Make a base backup of the primary server (see ), and load this data onto the standby. + linkend="backup-base-backup"/>), and load this data onto the standby. Begin recovery on the standby server from the local WAL - archive, using a recovery.conf that specifies a - restore_command that waits as described - previously (see ). + archive, using a recovery.conf that specifies a + restore_command that waits as described + previously (see ). @@ -1637,8 +1646,8 @@ if (!triggered) - An external program can call the pg_walfile_name_offset() - function (see ) + An external program can call the pg_walfile_name_offset() + function (see ) to find out the file name and the exact byte offset within it of the current end of WAL. It can then access the WAL file directly and copy the data from the last known end of WAL through the current end @@ -1646,18 +1655,18 @@ if (!triggered) loss is the polling cycle time of the copying program, which can be very small, and there is no wasted bandwidth from forcing partially-used segment files to be archived. Note that the standby servers' - restore_command scripts can only deal with whole WAL files, + restore_command scripts can only deal with whole WAL files, so the incrementally copied data is not ordinarily made available to the standby servers. It is of use only when the primary dies — then the last partial WAL file is fed to the standby before allowing it to come up. The correct implementation of this process requires - cooperation of the restore_command script with the data + cooperation of the restore_command script with the data copying program. - Starting with PostgreSQL version 9.0, you can use - streaming replication (see ) to + Starting with PostgreSQL version 9.0, you can use + streaming replication (see ) to achieve the same benefits with less effort. @@ -1691,7 +1700,7 @@ if (!triggered) User's Overview - When the parameter is set to true on a + When the parameter is set to true on a standby server, it will begin accepting connections once the recovery has brought the system to a consistent state. All such connections are strictly read-only; not even temporary tables may be written. @@ -1707,7 +1716,7 @@ if (!triggered) made by that transaction will be visible to any new snapshots taken on the standby. Snapshots may be taken at the start of each query or at the start of each transaction, depending on the current transaction isolation - level. For more details, see . + level. For more details, see . @@ -1716,17 +1725,17 @@ if (!triggered) - Query access - SELECT, COPY TO + Query access - SELECT, COPY TO - Cursor commands - DECLARE, FETCH, CLOSE + Cursor commands - DECLARE, FETCH, CLOSE - Parameters - SHOW, SET, RESET + Parameters - SHOW, SET, RESET @@ -1735,17 +1744,17 @@ if (!triggered) - BEGIN, END, ABORT, START TRANSACTION + BEGIN, END, ABORT, START TRANSACTION - SAVEPOINT, RELEASE, ROLLBACK TO SAVEPOINT + SAVEPOINT, RELEASE, ROLLBACK TO SAVEPOINT - EXCEPTION blocks and other internal subtransactions + EXCEPTION blocks and other internal subtransactions @@ -1753,19 +1762,19 @@ if (!triggered) - LOCK TABLE, though only when explicitly in one of these modes: - ACCESS SHARE, ROW SHARE or ROW EXCLUSIVE. + LOCK TABLE, though only when explicitly in one of these modes: + ACCESS SHARE, ROW SHARE or ROW EXCLUSIVE. - Plans and resources - PREPARE, EXECUTE, - DEALLOCATE, DISCARD + Plans and resources - PREPARE, EXECUTE, + DEALLOCATE, DISCARD - Plugins and extensions - LOAD + Plugins and extensions - LOAD @@ -1779,9 +1788,9 @@ if (!triggered) - Data Manipulation Language (DML) - INSERT, - UPDATE, DELETE, COPY FROM, - TRUNCATE. + Data Manipulation Language (DML) - INSERT, + UPDATE, DELETE, COPY FROM, + TRUNCATE. Note that there are no allowed actions that result in a trigger being executed during recovery. This restriction applies even to temporary tables, because table rows cannot be read or written without @@ -1791,31 +1800,31 @@ if (!triggered) - Data Definition Language (DDL) - CREATE, - DROP, ALTER, COMMENT. + Data Definition Language (DDL) - CREATE, + DROP, ALTER, COMMENT. This restriction applies even to temporary tables, because carrying out these operations would require updating the system catalog tables. - SELECT ... FOR SHARE | UPDATE, because row locks cannot be + SELECT ... FOR SHARE | UPDATE, because row locks cannot be taken without updating the underlying data files. - Rules on SELECT statements that generate DML commands. + Rules on SELECT statements that generate DML commands. - LOCK that explicitly requests a mode higher than ROW EXCLUSIVE MODE. + LOCK that explicitly requests a mode higher than ROW EXCLUSIVE MODE. - LOCK in short default form, since it requests ACCESS EXCLUSIVE MODE. + LOCK in short default form, since it requests ACCESS EXCLUSIVE MODE. @@ -1824,19 +1833,19 @@ if (!triggered) - BEGIN READ WRITE, - START TRANSACTION READ WRITE + BEGIN READ WRITE, + START TRANSACTION READ WRITE - SET TRANSACTION READ WRITE, - SET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE + SET TRANSACTION READ WRITE, + SET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE - SET transaction_read_only = off + SET transaction_read_only = off @@ -1844,35 +1853,35 @@ if (!triggered) - Two-phase commit commands - PREPARE TRANSACTION, - COMMIT PREPARED, ROLLBACK PREPARED + Two-phase commit commands - PREPARE TRANSACTION, + COMMIT PREPARED, ROLLBACK PREPARED because even read-only transactions need to write WAL in the prepare phase (the first phase of two phase commit). - Sequence updates - nextval(), setval() + Sequence updates - nextval(), setval() - LISTEN, UNLISTEN, NOTIFY + LISTEN, UNLISTEN, NOTIFY - In normal operation, read-only transactions are allowed to - use LISTEN, UNLISTEN, and - NOTIFY, so Hot Standby sessions operate under slightly tighter + In normal operation, read-only transactions are allowed to + use LISTEN, UNLISTEN, and + NOTIFY, so Hot Standby sessions operate under slightly tighter restrictions than ordinary read-only sessions. It is possible that some of these restrictions might be loosened in a future release. - During hot standby, the parameter transaction_read_only is always + During hot standby, the parameter transaction_read_only is always true and may not be changed. But as long as no attempt is made to modify the database, connections during hot standby will act much like any other database connection. If failover or switchover occurs, the database will @@ -1884,8 +1893,8 @@ if (!triggered) Users will be able to tell whether their session is read-only by - issuing SHOW transaction_read_only. In addition, a set of - functions () allow users to + issuing SHOW transaction_read_only. In addition, a set of + functions () allow users to access information about the standby server. These allow you to write programs that are aware of the current state of the database. These can be used to monitor the progress of recovery, or to allow you to @@ -1907,7 +1916,7 @@ if (!triggered) There are also additional types of conflict that can occur with Hot Standby. - These conflicts are hard conflicts in the sense that queries + These conflicts are hard conflicts in the sense that queries might need to be canceled and, in some cases, sessions disconnected to resolve them. The user is provided with several ways to handle these conflicts. Conflict cases include: @@ -1916,7 +1925,7 @@ if (!triggered) Access Exclusive locks taken on the primary server, including both - explicit LOCK commands and various DDL + explicit LOCK commands and various DDL actions, conflict with table accesses in standby queries. @@ -1935,7 +1944,7 @@ if (!triggered) Application of a vacuum cleanup record from WAL conflicts with - standby transactions whose snapshots can still see any of + standby transactions whose snapshots can still see any of the rows to be removed. @@ -1962,31 +1971,31 @@ if (!triggered) An example of the problem situation is an administrator on the primary - server running DROP TABLE on a table that is currently being + server running DROP TABLE on a table that is currently being queried on the standby server. Clearly the standby query cannot continue - if the DROP TABLE is applied on the standby. If this situation - occurred on the primary, the DROP TABLE would wait until the - other query had finished. But when DROP TABLE is run on the + if the DROP TABLE is applied on the standby. If this situation + occurred on the primary, the DROP TABLE would wait until the + other query had finished. But when DROP TABLE is run on the primary, the primary doesn't have information about what queries are running on the standby, so it will not wait for any such standby queries. The WAL change records come through to the standby while the standby query is still running, causing a conflict. The standby server must either delay application of the WAL records (and everything after them, too) or else cancel the conflicting query so that the DROP - TABLE can be applied. + TABLE can be applied. When a conflicting query is short, it's typically desirable to allow it to complete by delaying WAL application for a little bit; but a long delay in WAL application is usually not desirable. So the cancel mechanism has - parameters, and , that define the maximum + parameters, and , that define the maximum allowed delay in WAL application. Conflicting queries will be canceled once it has taken longer than the relevant delay setting to apply any newly-received WAL data. There are two parameters so that different delay values can be specified for the case of reading WAL data from an archive - (i.e., initial recovery from a base backup or catching up a + (i.e., initial recovery from a base backup or catching up a standby server that has fallen far behind) versus reading WAL data via streaming replication. @@ -2003,10 +2012,10 @@ if (!triggered) - Once the delay specified by max_standby_archive_delay or - max_standby_streaming_delay has been exceeded, conflicting + Once the delay specified by max_standby_archive_delay or + max_standby_streaming_delay has been exceeded, conflicting queries will be canceled. This usually results just in a cancellation - error, although in the case of replaying a DROP DATABASE + error, although in the case of replaying a DROP DATABASE the entire conflicting session will be terminated. Also, if the conflict is over a lock held by an idle transaction, the conflicting session is terminated (this behavior might change in the future). @@ -2030,7 +2039,7 @@ if (!triggered) The most common reason for conflict between standby queries and WAL replay - is early cleanup. Normally, PostgreSQL allows + is early cleanup. Normally, PostgreSQL allows cleanup of old row versions when there are no transactions that need to see them to ensure correct visibility of data according to MVCC rules. However, this rule can only be applied for transactions executing on the @@ -2041,7 +2050,7 @@ if (!triggered) Experienced users should note that both row version cleanup and row version freezing will potentially conflict with standby queries. Running a manual - VACUUM FREEZE is likely to cause conflicts even on tables with + VACUUM FREEZE is likely to cause conflicts even on tables with no updated or deleted rows. @@ -2049,15 +2058,15 @@ if (!triggered) Users should be clear that tables that are regularly and heavily updated on the primary server will quickly cause cancellation of longer running queries on the standby. In such cases the setting of a finite value for - max_standby_archive_delay or - max_standby_streaming_delay can be considered similar to - setting statement_timeout. + max_standby_archive_delay or + max_standby_streaming_delay can be considered similar to + setting statement_timeout. Remedial possibilities exist if the number of standby-query cancellations is found to be unacceptable. The first option is to set the parameter - hot_standby_feedback, which prevents VACUUM from + hot_standby_feedback, which prevents VACUUM from removing recently-dead rows and so cleanup conflicts do not occur. If you do this, you should note that this will delay cleanup of dead rows on the primary, @@ -2067,29 +2076,29 @@ if (!triggered) off-loading execution onto the standby. If standby servers connect and disconnect frequently, you might want to make adjustments to handle the period when - hot_standby_feedback feedback is not being provided. - For example, consider increasing max_standby_archive_delay + hot_standby_feedback feedback is not being provided. + For example, consider increasing max_standby_archive_delay so that queries are not rapidly canceled by conflicts in WAL archive files during disconnected periods. You should also consider increasing - max_standby_streaming_delay to avoid rapid cancellations + max_standby_streaming_delay to avoid rapid cancellations by newly-arrived streaming WAL entries after reconnection. - Another option is to increase + Another option is to increase on the primary server, so that dead rows will not be cleaned up as quickly as they normally would be. This will allow more time for queries to execute before they are canceled on the standby, without having to set - a high max_standby_streaming_delay. However it is + a high max_standby_streaming_delay. However it is difficult to guarantee any specific execution-time window with this - approach, since vacuum_defer_cleanup_age is measured in + approach, since vacuum_defer_cleanup_age is measured in transactions executed on the primary server. The number of query cancels and the reason for them can be viewed using - the pg_stat_database_conflicts system view on the standby - server. The pg_stat_database system view also contains + the pg_stat_database_conflicts system view on the standby + server. The pg_stat_database system view also contains summary information. @@ -2098,8 +2107,8 @@ if (!triggered) Administrator's Overview - If hot_standby is on in postgresql.conf - (the default value) and there is a recovery.conf + If hot_standby is on in postgresql.conf + (the default value) and there is a recovery.conf file present, the server will run in Hot Standby mode. However, it may take some time for Hot Standby connections to be allowed, because the server will not accept connections until it has completed @@ -2120,8 +2129,8 @@ LOG: database system is ready to accept read only connections Consistency information is recorded once per checkpoint on the primary. It is not possible to enable hot standby when reading WAL - written during a period when wal_level was not set to - replica or logical on the primary. Reaching + written during a period when wal_level was not set to + replica or logical on the primary. Reaching a consistent state can also be delayed in the presence of both of these conditions: @@ -2140,14 +2149,19 @@ LOG: database system is ready to accept read only connections If you are running file-based log shipping ("warm standby"), you might need to wait until the next WAL file arrives, which could be as long as the - archive_timeout setting on the primary. + archive_timeout setting on the primary. The setting of some parameters on the standby will need reconfiguration if they have been changed on the primary. For these parameters, the value on the standby must - be equal to or greater than the value on the primary. If these parameters + be equal to or greater than the value on the primary. + Therefore, if you want to increase these values, you should do so on all + standby servers first, before applying the changes to the primary server. + Conversely, if you want to decrease these values, you should do so on the + primary server first, before applying the changes to all standby servers. + If these parameters are not set high enough then the standby will refuse to start. Higher values can then be supplied and the server restarted to begin recovery again. These parameters are: @@ -2155,22 +2169,22 @@ LOG: database system is ready to accept read only connections - max_connections + max_connections - max_prepared_transactions + max_prepared_transactions - max_locks_per_transaction + max_locks_per_transaction - max_worker_processes + max_worker_processes @@ -2178,8 +2192,8 @@ LOG: database system is ready to accept read only connections It is important that the administrator select appropriate settings for - and . The best choices vary + and . The best choices vary depending on business priorities. For example if the server is primarily tasked as a High Availability server, then you will want low delay settings, perhaps even zero, though that is a very aggressive setting. If @@ -2209,19 +2223,19 @@ LOG: database system is ready to accept read only connections - Data Definition Language (DDL) - e.g. CREATE INDEX + Data Definition Language (DDL) - e.g. CREATE INDEX - Privilege and Ownership - GRANT, REVOKE, - REASSIGN + Privilege and Ownership - GRANT, REVOKE, + REASSIGN - Maintenance commands - ANALYZE, VACUUM, - CLUSTER, REINDEX + Maintenance commands - ANALYZE, VACUUM, + CLUSTER, REINDEX @@ -2241,14 +2255,14 @@ LOG: database system is ready to accept read only connections - pg_cancel_backend() - and pg_terminate_backend() will work on user backends, + pg_cancel_backend() + and pg_terminate_backend() will work on user backends, but not the Startup process, which performs recovery. pg_stat_activity does not show recovering transactions as active. As a result, pg_prepared_xacts is always empty during recovery. If you wish to resolve in-doubt prepared transactions, view - pg_prepared_xacts on the primary and issue commands to + pg_prepared_xacts on the primary and issue commands to resolve transactions there or resolve them after the end of recovery. @@ -2256,17 +2270,17 @@ LOG: database system is ready to accept read only connections pg_locks will show locks held by backends, as normal. pg_locks also shows a virtual transaction managed by the Startup process that owns all - AccessExclusiveLocks held by transactions being replayed by recovery. + AccessExclusiveLocks held by transactions being replayed by recovery. Note that the Startup process does not acquire locks to - make database changes, and thus locks other than AccessExclusiveLocks + make database changes, and thus locks other than AccessExclusiveLocks do not show in pg_locks for the Startup process; they are just presumed to exist. - The Nagios plugin check_pgsql will + The Nagios plugin check_pgsql will work, because the simple information it checks for exists. - The check_postgres monitoring script will also work, + The check_postgres monitoring script will also work, though some reported values could give different or confusing results. For example, last vacuum time will not be maintained, since no vacuum occurs on the standby. Vacuums running on the primary @@ -2275,11 +2289,11 @@ LOG: database system is ready to accept read only connections WAL file control commands will not work during recovery, - e.g. pg_start_backup, pg_switch_wal etc. + e.g. pg_start_backup, pg_switch_wal etc. - Dynamically loadable modules work, including pg_stat_statements. + Dynamically loadable modules work, including pg_stat_statements. @@ -2292,8 +2306,8 @@ LOG: database system is ready to accept read only connections - Trigger-based replication systems such as Slony, - Londiste and Bucardo won't run on the + Trigger-based replication systems such as Slony, + Londiste and Bucardo won't run on the standby at all, though they will run happily on the primary server as long as the changes are not sent to standby servers to be applied. WAL replay is not trigger-based so you cannot relay from the @@ -2302,7 +2316,7 @@ LOG: database system is ready to accept read only connections - New OIDs cannot be assigned, though some UUID generators may still + New OIDs cannot be assigned, though some UUID generators may still work as long as they do not rely on writing new status to the database. @@ -2314,32 +2328,32 @@ LOG: database system is ready to accept read only connections - DROP TABLESPACE can only succeed if the tablespace is empty. + DROP TABLESPACE can only succeed if the tablespace is empty. Some standby users may be actively using the tablespace via their - temp_tablespaces parameter. If there are temporary files in the + temp_tablespaces parameter. If there are temporary files in the tablespace, all active queries are canceled to ensure that temporary files are removed, so the tablespace can be removed and WAL replay can continue. - Running DROP DATABASE or ALTER DATABASE ... SET - TABLESPACE on the primary + Running DROP DATABASE or ALTER DATABASE ... SET + TABLESPACE on the primary will generate a WAL entry that will cause all users connected to that database on the standby to be forcibly disconnected. This action occurs immediately, whatever the setting of - max_standby_streaming_delay. Note that - ALTER DATABASE ... RENAME does not disconnect users, which + max_standby_streaming_delay. Note that + ALTER DATABASE ... RENAME does not disconnect users, which in most cases will go unnoticed, though might in some cases cause a program confusion if it depends in some way upon database name. - In normal (non-recovery) mode, if you issue DROP USER or DROP ROLE + In normal (non-recovery) mode, if you issue DROP USER or DROP ROLE for a role with login capability while that user is still connected then nothing happens to the connected user - they remain connected. The user cannot reconnect however. This behavior applies in recovery also, so a - DROP USER on the primary does not disconnect that user on the standby. + DROP USER on the primary does not disconnect that user on the standby. @@ -2361,7 +2375,7 @@ LOG: database system is ready to accept read only connections restartpoints (similar to checkpoints on the primary) and normal block cleaning activities. This can include updates of the hint bit information stored on the standby server. - The CHECKPOINT command is accepted during recovery, + The CHECKPOINT command is accepted during recovery, though it performs a restartpoint rather than a new checkpoint. @@ -2371,23 +2385,23 @@ LOG: database system is ready to accept read only connections Various parameters have been mentioned above in - and - . + and + . - On the primary, parameters and - can be used. - and - have no effect if set on + On the primary, parameters and + can be used. + and + have no effect if set on the primary. - On the standby, parameters , - and - can be used. - has no effect + On the standby, parameters , + and + can be used. + has no effect as long as the server remains in standby mode, though it will become relevant if the standby becomes primary. @@ -2427,22 +2441,22 @@ LOG: database system is ready to accept read only connections - At the end of recovery, AccessExclusiveLocks held by prepared transactions + At the end of recovery, AccessExclusiveLocks held by prepared transactions will require twice the normal number of lock table entries. If you plan on running either a large number of concurrent prepared transactions - that normally take AccessExclusiveLocks, or you plan on having one - large transaction that takes many AccessExclusiveLocks, you are - advised to select a larger value of max_locks_per_transaction, + that normally take AccessExclusiveLocks, or you plan on having one + large transaction that takes many AccessExclusiveLocks, you are + advised to select a larger value of max_locks_per_transaction, perhaps as much as twice the value of the parameter on the primary server. You need not consider this at all if - your setting of max_prepared_transactions is 0. + your setting of max_prepared_transactions is 0. The Serializable transaction isolation level is not yet available in hot - standby. (See and - for details.) + standby. (See and + for details.) An attempt to set a transaction to the serializable isolation level in hot standby mode will generate an error. diff --git a/doc/src/sgml/history.sgml b/doc/src/sgml/history.sgml index a7f4b701ea..180695afd9 100644 --- a/doc/src/sgml/history.sgml +++ b/doc/src/sgml/history.sgml @@ -31,12 +31,12 @@ Office (ARO), the National Science Foundation (NSF), and ESL, Inc. The implementation of POSTGRES began in 1986. The initial - concepts for the system were presented in , + concepts for the system were presented in , and the definition of the initial data model appeared in . The design of the rule system at that time was - described in . The rationale and + linkend="rowe87"/>. The design of the rule system at that time was + described in . The rationale and architecture of the storage manager were detailed in . + linkend="ston87b"/>. @@ -44,10 +44,10 @@ releases since then. The first demoware system became operational in 1987 and was shown at the 1988 ACM-SIGMOD Conference. Version 1, described in - , was released to a few external users in + , was released to a few external users in June 1989. In response to a critique of the first rule system - (), the rule system was redesigned (), and Version 2 was released in June 1990 with + (), the rule system was redesigned (), and Version 2 was released in June 1990 with the new rule system. Version 3 appeared in 1991 and added support for multiple storage managers, an improved query executor, and a rewritten rule system. For the most part, subsequent releases @@ -64,9 +64,9 @@ POSTGRES has also been used as an educational tool at several universities. Finally, Illustra Information Technologies (later merged into - Informix, + Informix, which is now owned by IBM) picked up the code and + url="https://www.ibm.com/">IBM) picked up the code and commercialized it. In late 1992, POSTGRES became the primary data manager for the @@ -132,7 +132,7 @@ (psql) was provided for interactive SQL queries, which used GNU Readline. This largely superseded - the old monitor program. + the old monitor program. @@ -215,8 +215,8 @@ - Details about what has happened in PostgreSQL since - then can be found in . + Details about what has happened in PostgreSQL since + then can be found in . diff --git a/doc/src/sgml/hstore.sgml b/doc/src/sgml/hstore.sgml index db5d4409a6..94ccd1201e 100644 --- a/doc/src/sgml/hstore.sgml +++ b/doc/src/sgml/hstore.sgml @@ -8,21 +8,21 @@ - This module implements the hstore data type for storing sets of - key/value pairs within a single PostgreSQL value. + This module implements the hstore data type for storing sets of + key/value pairs within a single PostgreSQL value. This can be useful in various scenarios, such as rows with many attributes that are rarely examined, or semi-structured data. Keys and values are simply text strings. - <type>hstore</> External Representation + <type>hstore</type> External Representation - The text representation of an hstore, used for input and output, - includes zero or more key => - value pairs separated by commas. Some examples: + The text representation of an hstore, used for input and output, + includes zero or more key => + value pairs separated by commas. Some examples: k => v @@ -31,15 +31,15 @@ foo => bar, baz => whatever The order of the pairs is not significant (and may not be reproduced on - output). Whitespace between pairs or around the => sign is + output). Whitespace between pairs or around the => sign is ignored. Double-quote keys and values that include whitespace, commas, - =s or >s. To include a double quote or a + =s or >s. To include a double quote or a backslash in a key or value, escape it with a backslash. - Each key in an hstore is unique. If you declare an hstore - with duplicate keys, only one will be stored in the hstore and + Each key in an hstore is unique. If you declare an hstore + with duplicate keys, only one will be stored in the hstore and there is no guarantee as to which will be kept: @@ -51,26 +51,26 @@ SELECT 'a=>1,a=>2'::hstore; - A value (but not a key) can be an SQL NULL. For example: + A value (but not a key) can be an SQL NULL. For example: key => NULL - The NULL keyword is case-insensitive. Double-quote the - NULL to treat it as the ordinary string NULL. + The NULL keyword is case-insensitive. Double-quote the + NULL to treat it as the ordinary string NULL. - Keep in mind that the hstore text format, when used for input, - applies before any required quoting or escaping. If you are - passing an hstore literal via a parameter, then no additional + Keep in mind that the hstore text format, when used for input, + applies before any required quoting or escaping. If you are + passing an hstore literal via a parameter, then no additional processing is needed. But if you're passing it as a quoted literal constant, then any single-quote characters and (depending on the setting of - the standard_conforming_strings configuration parameter) + the standard_conforming_strings configuration parameter) backslash characters need to be escaped correctly. See - for more on the handling of string + for more on the handling of string constants. @@ -83,16 +83,16 @@ key => NULL - <type>hstore</> Operators and Functions + <type>hstore</type> Operators and Functions The operators provided by the hstore module are - shown in , the functions - in . + shown in , the functions + in .
- <type>hstore</> Operators + <type>hstore</type> Operators @@ -106,99 +106,99 @@ key => NULL - hstore -> text - get value for key (NULL if not present) + hstore -> text + get value for key (NULL if not present) 'a=>x, b=>y'::hstore -> 'a' x - hstore -> text[] - get values for keys (NULL if not present) + hstore -> text[] + get values for keys (NULL if not present) 'a=>x, b=>y, c=>z'::hstore -> ARRAY['c','a'] {"z","x"} - hstore || hstore - concatenate hstores + hstore || hstore + concatenate hstores 'a=>b, c=>d'::hstore || 'c=>x, d=>q'::hstore "a"=>"b", "c"=>"x", "d"=>"q" - hstore ? text - does hstore contain key? + hstore ? text + does hstore contain key? 'a=>1'::hstore ? 'a' t - hstore ?& text[] - does hstore contain all specified keys? + hstore ?& text[] + does hstore contain all specified keys? 'a=>1,b=>2'::hstore ?& ARRAY['a','b'] t - hstore ?| text[] - does hstore contain any of the specified keys? + hstore ?| text[] + does hstore contain any of the specified keys? 'a=>1,b=>2'::hstore ?| ARRAY['b','c'] t - hstore @> hstore + hstore @> hstore does left operand contain right? 'a=>b, b=>1, c=>NULL'::hstore @> 'b=>1' t - hstore <@ hstore + hstore <@ hstore is left operand contained in right? 'a=>c'::hstore <@ 'a=>b, b=>1, c=>NULL' f - hstore - text + hstore - text delete key from left operand 'a=>1, b=>2, c=>3'::hstore - 'b'::text "a"=>"1", "c"=>"3" - hstore - text[] + hstore - text[] delete keys from left operand 'a=>1, b=>2, c=>3'::hstore - ARRAY['a','b'] "c"=>"3" - hstore - hstore + hstore - hstore delete matching pairs from left operand 'a=>1, b=>2, c=>3'::hstore - 'a=>4, b=>2'::hstore "a"=>"1", "c"=>"3" - record #= hstore - replace fields in record with matching values from hstore + record #= hstore + replace fields in record with matching values from hstore see Examples section - %% hstore - convert hstore to array of alternating keys and values + %% hstore + convert hstore to array of alternating keys and values %% 'a=>foo, b=>bar'::hstore {a,foo,b,bar} - %# hstore - convert hstore to two-dimensional key/value array + %# hstore + convert hstore to two-dimensional key/value array %# 'a=>foo, b=>bar'::hstore {{a,foo},{b,bar}} @@ -209,8 +209,8 @@ key => NULL - Prior to PostgreSQL 8.2, the containment operators @> - and <@ were called @ and ~, + Prior to PostgreSQL 8.2, the containment operators @> + and <@ were called @ and ~, respectively. These names are still available, but are deprecated and will eventually be removed. Notice that the old names are reversed from the convention formerly followed by the core geometric data types! @@ -218,7 +218,7 @@ key => NULL
- <type>hstore</> Functions + <type>hstore</type> Functions @@ -235,7 +235,7 @@ key => NULL hstore(record)hstore hstore - construct an hstore from a record or row + construct an hstore from a record or row hstore(ROW(1,2)) f1=>1,f2=>2 @@ -243,7 +243,7 @@ key => NULL hstore(text[]) hstore - construct an hstore from an array, which may be either + construct an hstore from an array, which may be either a key/value array, or a two-dimensional array hstore(ARRAY['a','1','b','2']) || hstore(ARRAY[['c','3'],['d','4']]) a=>1, b=>2, c=>3, d=>4 @@ -252,7 +252,7 @@ key => NULL hstore(text[], text[]) hstore - construct an hstore from separate key and value arrays + construct an hstore from separate key and value arrays hstore(ARRAY['a','b'], ARRAY['1','2']) "a"=>"1","b"=>"2" @@ -260,7 +260,7 @@ key => NULL hstore(text, text) hstore - make single-item hstore + make single-item hstore hstore('a', 'b') "a"=>"b" @@ -268,7 +268,7 @@ key => NULL akeys(hstore)akeys text[] - get hstore's keys as an array + get hstore's keys as an array akeys('a=>1,b=>2') {a,b} @@ -276,7 +276,7 @@ key => NULL skeys(hstore)skeys setof text - get hstore's keys as a set + get hstore's keys as a set skeys('a=>1,b=>2') @@ -288,7 +288,7 @@ b avals(hstore)avals text[] - get hstore's values as an array + get hstore's values as an array avals('a=>1,b=>2') {1,2} @@ -296,7 +296,7 @@ b svals(hstore)svals setof text - get hstore's values as a set + get hstore's values as a set svals('a=>1,b=>2') @@ -308,7 +308,7 @@ b hstore_to_array(hstore)hstore_to_array text[] - get hstore's keys and values as an array of alternating + get hstore's keys and values as an array of alternating keys and values hstore_to_array('a=>1,b=>2') {a,1,b,2} @@ -317,7 +317,7 @@ b hstore_to_matrix(hstore)hstore_to_matrix text[] - get hstore's keys and values as a two-dimensional array + get hstore's keys and values as a two-dimensional array hstore_to_matrix('a=>1,b=>2') {{a,1},{b,2}} @@ -359,7 +359,7 @@ b slice(hstore, text[])slice hstore - extract a subset of an hstore + extract a subset of an hstore slice('a=>1,b=>2,c=>3'::hstore, ARRAY['b','c','x']) "b"=>"2", "c"=>"3" @@ -367,7 +367,7 @@ b each(hstore)each setof(key text, value text) - get hstore's keys and values as a set + get hstore's keys and values as a set select * from each('a=>1,b=>2') @@ -381,7 +381,7 @@ b exist(hstore,text)exist boolean - does hstore contain key? + does hstore contain key? exist('a=>1','a') t @@ -389,7 +389,7 @@ b defined(hstore,text)defined boolean - does hstore contain non-NULL value for key? + does hstore contain non-NULL value for key? defined('a=>NULL','a') f @@ -421,7 +421,7 @@ b populate_record(record,hstore)populate_record record - replace fields in record with matching values from hstore + replace fields in record with matching values from hstore see Examples section @@ -442,7 +442,7 @@ b The function populate_record is actually declared - with anyelement, not record, as its first argument, + with anyelement, not record, as its first argument, but it will reject non-record types with a run-time error. @@ -452,8 +452,8 @@ b Indexes - hstore has GiST and GIN index support for the @>, - ?, ?& and ?| operators. For example: + hstore has GiST and GIN index support for the @>, + ?, ?& and ?| operators. For example: CREATE INDEX hidx ON testhstore USING GIST (h); @@ -462,12 +462,12 @@ CREATE INDEX hidx ON testhstore USING GIN (h); - hstore also supports btree or hash indexes for - the = operator. This allows hstore columns to be - declared UNIQUE, or to be used in GROUP BY, - ORDER BY or DISTINCT expressions. The sort ordering - for hstore values is not particularly useful, but these indexes - may be useful for equivalence lookups. Create indexes for = + hstore also supports btree or hash indexes for + the = operator. This allows hstore columns to be + declared UNIQUE, or to be used in GROUP BY, + ORDER BY or DISTINCT expressions. The sort ordering + for hstore values is not particularly useful, but these indexes + may be useful for equivalence lookups. Create indexes for = comparisons as follows: @@ -495,7 +495,7 @@ UPDATE tab SET h = delete(h, 'k1'); - Convert a record to an hstore: + Convert a record to an hstore: CREATE TABLE test (col1 integer, col2 text, col3 text); INSERT INTO test VALUES (123, 'foo', 'bar'); @@ -509,7 +509,7 @@ SELECT hstore(t) FROM test AS t; - Convert an hstore to a predefined record type: + Convert an hstore to a predefined record type: CREATE TABLE test (col1 integer, col2 text, col3 text); @@ -523,7 +523,7 @@ SELECT * FROM populate_record(null::test, - Modify an existing record using the values from an hstore: + Modify an existing record using the values from an hstore: CREATE TABLE test (col1 integer, col2 text, col3 text); INSERT INTO test VALUES (123, 'foo', 'bar'); @@ -541,7 +541,7 @@ SELECT (r).* FROM (SELECT t #= '"col3"=>"baz"' AS r FROM test t) s; Statistics - The hstore type, because of its intrinsic liberality, could + The hstore type, because of its intrinsic liberality, could contain a lot of different keys. Checking for valid keys is the task of the application. The following examples demonstrate several techniques for checking keys and obtaining statistics. @@ -588,7 +588,7 @@ SELECT key, count(*) FROM Compatibility - As of PostgreSQL 9.0, hstore uses a different internal + As of PostgreSQL 9.0, hstore uses a different internal representation than previous versions. This presents no obstacle for dump/restore upgrades since the text representation (used in the dump) is unchanged. @@ -599,7 +599,7 @@ SELECT key, count(*) FROM having the new code recognize old-format data. This will entail a slight performance penalty when processing data that has not yet been modified by the new code. It is possible to force an upgrade of all values in a table - column by doing an UPDATE statement as follows: + column by doing an UPDATE statement as follows: UPDATE tablename SET hstorecol = hstorecol || ''; @@ -610,7 +610,7 @@ UPDATE tablename SET hstorecol = hstorecol || ''; ALTER TABLE tablename ALTER hstorecol TYPE hstore USING hstorecol || ''; - The ALTER TABLE method requires an exclusive lock on the table, + The ALTER TABLE method requires an exclusive lock on the table, but does not result in bloating the table with old row versions. @@ -629,7 +629,7 @@ ALTER TABLE tablename ALTER hstorecol TYPE hstore USING hstorecol || ''; extensions for PL/Python are called hstore_plpythonu, hstore_plpython2u, and hstore_plpython3u - (see for the PL/Python naming + (see for the PL/Python naming convention). If you use them, hstore values are mapped to Python dictionaries. diff --git a/doc/src/sgml/indexam.sgml b/doc/src/sgml/indexam.sgml index ac512588e2..d758a4987d 100644 --- a/doc/src/sgml/indexam.sgml +++ b/doc/src/sgml/indexam.sgml @@ -6,32 +6,32 @@ This chapter defines the interface between the core PostgreSQL system and index access - methods, which manage individual index types. The core system + methods, which manage individual index types. The core system knows nothing about indexes beyond what is specified here, so it is possible to develop entirely new index types by writing add-on code. All indexes in PostgreSQL are what are known - technically as secondary indexes; that is, the index is + technically as secondary indexes; that is, the index is physically separate from the table file that it describes. Each index - is stored as its own physical relation and so is described - by an entry in the pg_class catalog. The contents of an + is stored as its own physical relation and so is described + by an entry in the pg_class catalog. The contents of an index are entirely under the control of its index access method. In practice, all index access methods divide indexes into standard-size pages so that they can use the regular storage manager and buffer manager to access the index contents. (All the existing index access methods furthermore use the standard page layout described in , and most use the same format for index + linkend="storage-page-layout"/>, and most use the same format for index tuple headers; but these decisions are not forced on an access method.) An index is effectively a mapping from some data key values to - tuple identifiers, or TIDs, of row versions + tuple identifiers, or TIDs, of row versions (tuples) in the index's parent table. A TID consists of a block number and an item number within that block (see ). This is sufficient + linkend="storage-page-layout"/>). This is sufficient information to fetch a particular row version from the table. Indexes are not directly aware that under MVCC, there might be multiple extant versions of the same logical row; to an index, each tuple is @@ -50,28 +50,28 @@ Each index access method is described by a row in the pg_am system catalog. The pg_am entry - specifies a name and a handler function for the access + specifies a name and a handler function for the access method. These entries can be created and deleted using the - and - SQL commands. + and + SQL commands. An index access method handler function must be declared to accept a - single argument of type internal and to return the - pseudo-type index_am_handler. The argument is a dummy value that + single argument of type internal and to return the + pseudo-type index_am_handler. The argument is a dummy value that simply serves to prevent handler functions from being called directly from SQL commands. The result of the function must be a palloc'd struct of type IndexAmRoutine, which contains everything that the core code needs to know to make use of the index access method. The IndexAmRoutine struct, also called the access - method's API struct, includes fields specifying assorted + method's API struct, includes fields specifying assorted fixed properties of the access method, such as whether it can support multicolumn indexes. More importantly, it contains pointers to support functions for the access method, which do all of the real work to access indexes. These support functions are plain C functions and are not visible or callable at the SQL level. The support functions are described - in . + in . @@ -112,6 +112,8 @@ typedef struct IndexAmRoutine bool ampredlocks; /* does AM support parallel scan? */ bool amcanparallel; + /* does AM support columns included with clause INCLUDE? */ + bool amcaninclude; /* type of data stored in index, or InvalidOid if variable */ Oid amkeytype; @@ -144,8 +146,8 @@ typedef struct IndexAmRoutine To be useful, an index access method must also have one or more - operator families and - operator classes defined in + operator families and + operator classes defined in pg_opfamily, pg_opclass, pg_amop, and @@ -153,7 +155,7 @@ typedef struct IndexAmRoutine These entries allow the planner to determine what kinds of query qualifications can be used with indexes of this access method. Operator families and classes are described - in , which is prerequisite material for reading + in , which is prerequisite material for reading this chapter. @@ -170,14 +172,14 @@ typedef struct IndexAmRoutine key values come from (it is always handed precomputed key values) but it will be very interested in the operator class information in pg_index. Both of these catalog entries can be - accessed as part of the Relation data structure that is + accessed as part of the Relation data structure that is passed to all operations on the index. - Some of the flag fields of IndexAmRoutine have nonobvious + Some of the flag fields of IndexAmRoutine have nonobvious implications. The requirements of amcanunique - are discussed in . + are discussed in . The amcanmulticol flag asserts that the access method supports multicolumn indexes, while amoptionalkey asserts that it allows scans @@ -185,7 +187,7 @@ typedef struct IndexAmRoutine When amcanmulticol is false, amoptionalkey essentially says whether the access method supports full-index scans without any restriction clause. - Access methods that support multiple index columns must + Access methods that support multiple index columns must support scans that omit restrictions on any or all of the columns after the first; however they are permitted to require some restriction to appear for the first index column, and this is signaled by setting @@ -201,17 +203,17 @@ typedef struct IndexAmRoutine indexes that have amoptionalkey true must index nulls, since the planner might decide to use such an index with no scan keys at all. A related restriction is that an index - access method that supports multiple index columns must + access method that supports multiple index columns must support indexing null values in columns after the first, because the planner will assume the index can be used for queries that do not restrict these columns. For example, consider an index on (a,b) and a query with WHERE a = 4. The system will assume the index can be used to scan for rows with a = 4, which is wrong if the - index omits rows where b is null. + index omits rows where b is null. It is, however, OK to omit rows where the first indexed column is null. An index access method that does index nulls may also set amsearchnulls, indicating that it supports - IS NULL and IS NOT NULL clauses as search + IS NULL and IS NOT NULL clauses as search conditions. @@ -235,8 +237,8 @@ ambuild (Relation heapRelation, Build a new index. The index relation has been physically created, but is empty. It must be filled in with whatever fixed data the access method requires, plus entries for all tuples already existing - in the table. Ordinarily the ambuild function will call - IndexBuildHeapScan() to scan the table for existing tuples + in the table. Ordinarily the ambuild function will call + IndexBuildHeapScan() to scan the table for existing tuples and compute the keys that need to be inserted into the index. The function must return a palloc'd struct containing statistics about the new index. @@ -264,30 +266,30 @@ aminsert (Relation indexRelation, IndexUniqueCheck checkUnique, IndexInfo *indexInfo); - Insert a new tuple into an existing index. The values and - isnull arrays give the key values to be indexed, and - heap_tid is the TID to be indexed. + Insert a new tuple into an existing index. The values and + isnull arrays give the key values to be indexed, and + heap_tid is the TID to be indexed. If the access method supports unique indexes (its - amcanunique flag is true) then - checkUnique indicates the type of uniqueness check to + amcanunique flag is true) then + checkUnique indicates the type of uniqueness check to perform. This varies depending on whether the unique constraint is - deferrable; see for details. - Normally the access method only needs the heapRelation + deferrable; see for details. + Normally the access method only needs the heapRelation parameter when performing uniqueness checking (since then it will have to look into the heap to verify tuple liveness). The function's Boolean result value is significant only when - checkUnique is UNIQUE_CHECK_PARTIAL. - In this case a TRUE result means the new entry is known unique, whereas - FALSE means it might be non-unique (and a deferred uniqueness check must - be scheduled). For other cases a constant FALSE result is recommended. + checkUnique is UNIQUE_CHECK_PARTIAL. + In this case a true result means the new entry is known unique, whereas + false means it might be non-unique (and a deferred uniqueness check must + be scheduled). For other cases a constant false result is recommended. Some indexes might not index all tuples. If the tuple is not to be - indexed, aminsert should just return without doing anything. + indexed, aminsert should just return without doing anything. @@ -306,26 +308,26 @@ ambulkdelete (IndexVacuumInfo *info, IndexBulkDeleteCallback callback, void *callback_state); - Delete tuple(s) from the index. This is a bulk delete operation + Delete tuple(s) from the index. This is a bulk delete operation that is intended to be implemented by scanning the whole index and checking each entry to see if it should be deleted. - The passed-in callback function must be called, in the style - callback(TID, callback_state) returns bool, + The passed-in callback function must be called, in the style + callback(TID, callback_state) returns bool, to determine whether any particular index entry, as identified by its referenced TID, is to be deleted. Must return either NULL or a palloc'd struct containing statistics about the effects of the deletion operation. It is OK to return NULL if no information needs to be passed on to - amvacuumcleanup. + amvacuumcleanup. - Because of limited maintenance_work_mem, - ambulkdelete might need to be called more than once when many - tuples are to be deleted. The stats argument is the result + Because of limited maintenance_work_mem, + ambulkdelete might need to be called more than once when many + tuples are to be deleted. The stats argument is the result of the previous call for this index (it is NULL for the first call within a - VACUUM operation). This allows the AM to accumulate statistics - across the whole operation. Typically, ambulkdelete will - modify and return the same struct if the passed stats is not + VACUUM operation). This allows the AM to accumulate statistics + across the whole operation. Typically, ambulkdelete will + modify and return the same struct if the passed stats is not null. @@ -336,14 +338,14 @@ amvacuumcleanup (IndexVacuumInfo *info, IndexBulkDeleteResult *stats); Clean up after a VACUUM operation (zero or more - ambulkdelete calls). This does not have to do anything + ambulkdelete calls). This does not have to do anything beyond returning index statistics, but it might perform bulk cleanup - such as reclaiming empty index pages. stats is whatever the - last ambulkdelete call returned, or NULL if - ambulkdelete was not called because no tuples needed to be + such as reclaiming empty index pages. stats is whatever the + last ambulkdelete call returned, or NULL if + ambulkdelete was not called because no tuples needed to be deleted. If the result is not NULL it must be a palloc'd struct. - The statistics it contains will be used to update pg_class, - and will be reported by VACUUM if VERBOSE is given. + The statistics it contains will be used to update pg_class, + and will be reported by VACUUM if VERBOSE is given. It is OK to return NULL if the index was not changed at all during the VACUUM operation, but otherwise correct stats should be returned. @@ -351,8 +353,8 @@ amvacuumcleanup (IndexVacuumInfo *info, As of PostgreSQL 8.4, - amvacuumcleanup will also be called at completion of an - ANALYZE operation. In this case stats is always + amvacuumcleanup will also be called at completion of an + ANALYZE operation. In this case stats is always NULL and any return value will be ignored. This case can be distinguished by checking info->analyze_only. It is recommended that the access method do nothing except post-insert cleanup in such a @@ -365,12 +367,12 @@ bool amcanreturn (Relation indexRelation, int attno); Check whether the index can support index-only scans on + linkend="indexes-index-only-scans">index-only scans on the given column, by returning the indexed column values for an index entry in the form of an IndexTuple. The attribute number - is 1-based, i.e. the first column's attno is 1. Returns TRUE if supported, - else FALSE. If the access method does not support index-only scans at all, - the amcanreturn field in its IndexAmRoutine + is 1-based, i.e. the first column's attno is 1. Returns true if supported, + else false. If the access method does not support index-only scans at all, + the amcanreturn field in its IndexAmRoutine struct can be set to NULL. @@ -383,10 +385,11 @@ amcostestimate (PlannerInfo *root, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, - double *indexCorrelation); + double *indexCorrelation, + double *indexPages); Estimate the costs of an index scan. This function is described fully - in , below. + in , below. @@ -397,18 +400,18 @@ amoptions (ArrayType *reloptions, Parse and validate the reloptions array for an index. This is called only when a non-null reloptions array exists for the index. - reloptions is a text array containing entries of the - form name=value. - The function should construct a bytea value, which will be copied - into the rd_options field of the index's relcache entry. - The data contents of the bytea value are open for the access + reloptions is a text array containing entries of the + form name=value. + The function should construct a bytea value, which will be copied + into the rd_options field of the index's relcache entry. + The data contents of the bytea value are open for the access method to define; most of the standard access methods use struct - StdRdOptions. - When validate is true, the function should report a suitable + StdRdOptions. + When validate is true, the function should report a suitable error message if any of the options are unrecognized or have invalid - values; when validate is false, invalid entries should be - silently ignored. (validate is false when loading options - already stored in pg_catalog; an invalid entry could only + values; when validate is false, invalid entries should be + silently ignored. (validate is false when loading options + already stored in pg_catalog; an invalid entry could only be found if the access method has changed its rules for options, and in that case ignoring obsolete entries is appropriate.) It is OK to return NULL if default behavior is wanted. @@ -421,44 +424,44 @@ amproperty (Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull); - The amproperty method allows index access methods to override + The amproperty method allows index access methods to override the default behavior of pg_index_column_has_property and related functions. If the access method does not have any special behavior for index property - inquiries, the amproperty field in - its IndexAmRoutine struct can be set to NULL. - Otherwise, the amproperty method will be called with - index_oid and attno both zero for + inquiries, the amproperty field in + its IndexAmRoutine struct can be set to NULL. + Otherwise, the amproperty method will be called with + index_oid and attno both zero for pg_indexam_has_property calls, - or with index_oid valid and attno zero for + or with index_oid valid and attno zero for pg_index_has_property calls, - or with index_oid valid and attno greater than + or with index_oid valid and attno greater than zero for pg_index_column_has_property calls. - prop is an enum value identifying the property being tested, - while propname is the original property name string. + prop is an enum value identifying the property being tested, + while propname is the original property name string. If the core code does not recognize the property name - then prop is AMPROP_UNKNOWN. + then prop is AMPROP_UNKNOWN. Access methods can define custom property names by - checking propname for a match (use pg_strcasecmp + checking propname for a match (use pg_strcasecmp to match, for consistency with the core code); for names known to the core - code, it's better to inspect prop. - If the amproperty method returns true then - it has determined the property test result: it must set *res - to the boolean value to return, or set *isnull - to true to return a NULL. (Both of the referenced variables - are initialized to false before the call.) - If the amproperty method returns false then + code, it's better to inspect prop. + If the amproperty method returns true then + it has determined the property test result: it must set *res + to the boolean value to return, or set *isnull + to true to return a NULL. (Both of the referenced variables + are initialized to false before the call.) + If the amproperty method returns false then the core code will proceed with its normal logic for determining the property test result. Access methods that support ordering operators should - implement AMPROP_DISTANCE_ORDERABLE property testing, as the + implement AMPROP_DISTANCE_ORDERABLE property testing, as the core code does not know how to do that and will return NULL. It may - also be advantageous to implement AMPROP_RETURNABLE testing, + also be advantageous to implement AMPROP_RETURNABLE testing, if that can be done more cheaply than by opening the index and calling - amcanreturn, which is the core code's default behavior. + amcanreturn, which is the core code's default behavior. The default behavior should be satisfactory for all other standard properties. @@ -471,18 +474,18 @@ amvalidate (Oid opclassoid); Validate the catalog entries for the specified operator class, so far as the access method can reasonably do that. For example, this might include testing that all required support functions are provided. - The amvalidate function must return false if the opclass is - invalid. Problems should be reported with ereport messages. + The amvalidate function must return false if the opclass is + invalid. Problems should be reported with ereport messages. The purpose of an index, of course, is to support scans for tuples matching - an indexable WHERE condition, often called a - qualifier or scan key. The semantics of - index scanning are described more fully in , - below. An index access method can support plain index scans, - bitmap index scans, or both. The scan-related functions that an + an indexable WHERE condition, often called a + qualifier or scan key. The semantics of + index scanning are described more fully in , + below. An index access method can support plain index scans, + bitmap index scans, or both. The scan-related functions that an index access method must or may provide are: @@ -493,17 +496,17 @@ ambeginscan (Relation indexRelation, int nkeys, int norderbys); - Prepare for an index scan. The nkeys and norderbys + Prepare for an index scan. The nkeys and norderbys parameters indicate the number of quals and ordering operators that will be used in the scan; these may be useful for space allocation purposes. Note that the actual values of the scan keys aren't provided yet. The result must be a palloc'd struct. For implementation reasons the index access method - must create this struct by calling - RelationGetIndexScan(). In most cases - ambeginscan does little beyond making that call and perhaps + must create this struct by calling + RelationGetIndexScan(). In most cases + ambeginscan does little beyond making that call and perhaps acquiring locks; - the interesting parts of index-scan startup are in amrescan. + the interesting parts of index-scan startup are in amrescan. @@ -516,10 +519,10 @@ amrescan (IndexScanDesc scan, int norderbys); Start or restart an index scan, possibly with new scan keys. (To restart - using previously-passed keys, NULL is passed for keys and/or - orderbys.) Note that it is not allowed for + using previously-passed keys, NULL is passed for keys and/or + orderbys.) Note that it is not allowed for the number of keys or order-by operators to be larger than - what was passed to ambeginscan. In practice the restart + what was passed to ambeginscan. In practice the restart feature is used when a new outer tuple is selected by a nested-loop join and so a new key comparison value is needed, but the scan key structure remains the same. @@ -532,44 +535,44 @@ amgettuple (IndexScanDesc scan, ScanDirection direction); Fetch the next tuple in the given scan, moving in the given - direction (forward or backward in the index). Returns TRUE if a tuple was - obtained, FALSE if no matching tuples remain. In the TRUE case the tuple - TID is stored into the scan structure. Note that - success means only that the index contains an entry that matches + direction (forward or backward in the index). Returns true if a tuple was + obtained, false if no matching tuples remain. In the true case the tuple + TID is stored into the scan structure. Note that + success means only that the index contains an entry that matches the scan keys, not that the tuple necessarily still exists in the heap or - will pass the caller's snapshot test. On success, amgettuple - must also set scan->xs_recheck to TRUE or FALSE. - FALSE means it is certain that the index entry matches the scan keys. - TRUE means this is not certain, and the conditions represented by the + will pass the caller's snapshot test. On success, amgettuple + must also set scan->xs_recheck to true or false. + False means it is certain that the index entry matches the scan keys. + true means this is not certain, and the conditions represented by the scan keys must be rechecked against the heap tuple after fetching it. - This provision supports lossy index operators. + This provision supports lossy index operators. Note that rechecking will extend only to the scan conditions; a partial - index predicate (if any) is never rechecked by amgettuple + index predicate (if any) is never rechecked by amgettuple callers. If the index supports index-only - scans (i.e., amcanreturn returns TRUE for it), - then on success the AM must also check scan->xs_want_itup, + scans (i.e., amcanreturn returns true for it), + then on success the AM must also check scan->xs_want_itup, and if that is true it must return the originally indexed data for the index entry. The data can be returned in the form of an - IndexTuple pointer stored at scan->xs_itup, - with tuple descriptor scan->xs_itupdesc; or in the form of - a HeapTuple pointer stored at scan->xs_hitup, - with tuple descriptor scan->xs_hitupdesc. (The latter + IndexTuple pointer stored at scan->xs_itup, + with tuple descriptor scan->xs_itupdesc; or in the form of + a HeapTuple pointer stored at scan->xs_hitup, + with tuple descriptor scan->xs_hitupdesc. (The latter format should be used when reconstructing data that might possibly not fit - into an IndexTuple.) In either case, + into an IndexTuple.) In either case, management of the data referenced by the pointer is the access method's responsibility. The data must remain good at least until the next - amgettuple, amrescan, or amendscan + amgettuple, amrescan, or amendscan call for the scan. - The amgettuple function need only be provided if the access - method supports plain index scans. If it doesn't, the - amgettuple field in its IndexAmRoutine + The amgettuple function need only be provided if the access + method supports plain index scans. If it doesn't, the + amgettuple field in its IndexAmRoutine struct must be set to NULL. @@ -583,24 +586,24 @@ amgetbitmap (IndexScanDesc scan, TIDBitmap (that is, OR the set of tuple IDs into whatever set is already in the bitmap). The number of tuples fetched is returned (this might be just an approximate count, for instance some AMs do not detect duplicates). - While inserting tuple IDs into the bitmap, amgetbitmap can + While inserting tuple IDs into the bitmap, amgetbitmap can indicate that rechecking of the scan conditions is required for specific - tuple IDs. This is analogous to the xs_recheck output parameter - of amgettuple. Note: in the current implementation, support + tuple IDs. This is analogous to the xs_recheck output parameter + of amgettuple. Note: in the current implementation, support for this feature is conflated with support for lossy storage of the bitmap itself, and therefore callers recheck both the scan conditions and the partial index predicate (if any) for recheckable tuples. That might not always be true, however. - amgetbitmap and - amgettuple cannot be used in the same index scan; there - are other restrictions too when using amgetbitmap, as explained - in . + amgetbitmap and + amgettuple cannot be used in the same index scan; there + are other restrictions too when using amgetbitmap, as explained + in . - The amgetbitmap function need only be provided if the access - method supports bitmap index scans. If it doesn't, the - amgetbitmap field in its IndexAmRoutine + The amgetbitmap function need only be provided if the access + method supports bitmap index scans. If it doesn't, the + amgetbitmap field in its IndexAmRoutine struct must be set to NULL. @@ -609,9 +612,10 @@ amgetbitmap (IndexScanDesc scan, void amendscan (IndexScanDesc scan); - End a scan and release resources. The scan struct itself + End a scan and release resources. The scan struct itself should not be freed, but any locks or pins taken internally by the - access method must be released. + access method must be released, as well as any other memory allocated + by ambeginscan and other scan-related functions. @@ -624,9 +628,9 @@ ammarkpos (IndexScanDesc scan); - The ammarkpos function need only be provided if the access + The ammarkpos function need only be provided if the access method supports ordered scans. If it doesn't, - the ammarkpos field in its IndexAmRoutine + the ammarkpos field in its IndexAmRoutine struct may be set to NULL. @@ -639,15 +643,15 @@ amrestrpos (IndexScanDesc scan); - The amrestrpos function need only be provided if the access + The amrestrpos function need only be provided if the access method supports ordered scans. If it doesn't, - the amrestrpos field in its IndexAmRoutine + the amrestrpos field in its IndexAmRoutine struct may be set to NULL. In addition to supporting ordinary index scans, some types of index - may wish to support parallel index scans, which allow + may wish to support parallel index scans, which allow multiple backends to cooperate in performing an index scan. The index access method should arrange things so that each cooperating process returns a subset of the tuples that would be performed by @@ -668,7 +672,7 @@ amestimateparallelscan (void); Estimate and return the number of bytes of dynamic shared memory which the access method will be needed to perform a parallel scan. (This number is in addition to, not in lieu of, the amount of space needed for - AM-independent data in ParallelIndexScanDescData.) + AM-independent data in ParallelIndexScanDescData.) @@ -683,9 +687,9 @@ void aminitparallelscan (void *target); This function will be called to initialize dynamic shared memory at the - beginning of a parallel scan. target will point to at least + beginning of a parallel scan. target will point to at least the number of bytes previously returned by - amestimateparallelscan, and this function may use that + amestimateparallelscan, and this function may use that amount of space to store whatever data it wishes. @@ -702,7 +706,7 @@ amparallelrescan (IndexScanDesc scan); This function, if implemented, will be called when a parallel index scan must be restarted. It should reset any shared state set up by - aminitparallelscan such that the scan will be restarted from + aminitparallelscan such that the scan will be restarted from the beginning. @@ -714,16 +718,16 @@ amparallelrescan (IndexScanDesc scan); In an index scan, the index access method is responsible for regurgitating the TIDs of all the tuples it has been told about that match the - scan keys. The access method is not involved in + scan keys. The access method is not involved in actually fetching those tuples from the index's parent table, nor in determining whether they pass the scan's time qualification test or other conditions. - A scan key is the internal representation of a WHERE clause of - the form index_key operator - constant, where the index key is one of the columns of the + A scan key is the internal representation of a WHERE clause of + the form index_key operator + constant, where the index key is one of the columns of the index and the operator is one of the members of the operator family associated with that index column. An index scan has zero or more scan keys, which are implicitly ANDed — the returned tuples are expected @@ -731,7 +735,7 @@ amparallelrescan (IndexScanDesc scan); - The access method can report that the index is lossy, or + The access method can report that the index is lossy, or requires rechecks, for a particular query. This implies that the index scan will return all the entries that pass the scan key, plus possibly additional entries that do not. The core system's index-scan machinery @@ -743,16 +747,16 @@ amparallelrescan (IndexScanDesc scan); Note that it is entirely up to the access method to ensure that it correctly finds all and only the entries passing all the given scan keys. - Also, the core system will simply hand off all the WHERE + Also, the core system will simply hand off all the WHERE clauses that match the index keys and operator families, without any semantic analysis to determine whether they are redundant or contradictory. As an example, given - WHERE x > 4 AND x > 14 where x is a b-tree - indexed column, it is left to the b-tree amrescan function + WHERE x > 4 AND x > 14 where x is a b-tree + indexed column, it is left to the b-tree amrescan function to realize that the first scan key is redundant and can be discarded. - The extent of preprocessing needed during amrescan will + The extent of preprocessing needed during amrescan will depend on the extent to which the index access method needs to reduce - the scan keys to a normalized form. + the scan keys to a normalized form. @@ -765,7 +769,7 @@ amparallelrescan (IndexScanDesc scan); Access methods that always return entries in the natural ordering of their data (such as btree) should set - amcanorder to true. + amcanorder to true. Currently, such access methods must use btree-compatible strategy numbers for their equality and ordering operators. @@ -773,11 +777,11 @@ amparallelrescan (IndexScanDesc scan); Access methods that support ordering operators should set - amcanorderbyop to true. + amcanorderbyop to true. This indicates that the index is capable of returning entries in - an order satisfying ORDER BY index_key - operator constant. Scan modifiers - of that form can be passed to amrescan as described + an order satisfying ORDER BY index_key + operator constant. Scan modifiers + of that form can be passed to amrescan as described previously. @@ -785,29 +789,29 @@ amparallelrescan (IndexScanDesc scan); - The amgettuple function has a direction argument, - which can be either ForwardScanDirection (the normal case) - or BackwardScanDirection. If the first call after - amrescan specifies BackwardScanDirection, then the + The amgettuple function has a direction argument, + which can be either ForwardScanDirection (the normal case) + or BackwardScanDirection. If the first call after + amrescan specifies BackwardScanDirection, then the set of matching index entries is to be scanned back-to-front rather than in - the normal front-to-back direction, so amgettuple must return + the normal front-to-back direction, so amgettuple must return the last matching tuple in the index, rather than the first one as it normally would. (This will only occur for access - methods that set amcanorder to true.) After the - first call, amgettuple must be prepared to advance the scan in + methods that set amcanorder to true.) After the + first call, amgettuple must be prepared to advance the scan in either direction from the most recently returned entry. (But if - amcanbackward is false, all subsequent + amcanbackward is false, all subsequent calls will have the same direction as the first one.) - Access methods that support ordered scans must support marking a + Access methods that support ordered scans must support marking a position in a scan and later returning to the marked position. The same position might be restored multiple times. However, only one position need - be remembered per scan; a new ammarkpos call overrides the + be remembered per scan; a new ammarkpos call overrides the previously marked position. An access method that does not support ordered - scans need not provide ammarkpos and amrestrpos - functions in IndexAmRoutine; set those pointers to NULL + scans need not provide ammarkpos and amrestrpos + functions in IndexAmRoutine; set those pointers to NULL instead. @@ -835,29 +839,29 @@ amparallelrescan (IndexScanDesc scan); - Instead of using amgettuple, an index scan can be done with - amgetbitmap to fetch all tuples in one call. This can be - noticeably more efficient than amgettuple because it allows + Instead of using amgettuple, an index scan can be done with + amgetbitmap to fetch all tuples in one call. This can be + noticeably more efficient than amgettuple because it allows avoiding lock/unlock cycles within the access method. In principle - amgetbitmap should have the same effects as repeated - amgettuple calls, but we impose several restrictions to - simplify matters. First of all, amgetbitmap returns all + amgetbitmap should have the same effects as repeated + amgettuple calls, but we impose several restrictions to + simplify matters. First of all, amgetbitmap returns all tuples at once and marking or restoring scan positions isn't supported. Secondly, the tuples are returned in a bitmap which doesn't - have any specific ordering, which is why amgetbitmap doesn't - take a direction argument. (Ordering operators will never be + have any specific ordering, which is why amgetbitmap doesn't + take a direction argument. (Ordering operators will never be supplied for such a scan, either.) Also, there is no provision for index-only scans with - amgetbitmap, since there is no way to return the contents of + amgetbitmap, since there is no way to return the contents of index tuples. - Finally, amgetbitmap + Finally, amgetbitmap does not guarantee any locking of the returned tuples, with implications - spelled out in . + spelled out in . Note that it is permitted for an access method to implement only - amgetbitmap and not amgettuple, or vice versa, + amgetbitmap and not amgettuple, or vice versa, if its internal implementation is unsuited to one API or the other. @@ -870,26 +874,26 @@ amparallelrescan (IndexScanDesc scan); Index access methods must handle concurrent updates of the index by multiple processes. The core PostgreSQL system obtains - AccessShareLock on the index during an index scan, and - RowExclusiveLock when updating the index (including plain - VACUUM). Since these lock types do not conflict, the access + AccessShareLock on the index during an index scan, and + RowExclusiveLock when updating the index (including plain + VACUUM). Since these lock types do not conflict, the access method is responsible for handling any fine-grained locking it might need. An exclusive lock on the index as a whole will be taken only during index - creation, destruction, or REINDEX. + creation, destruction, or REINDEX. Building an index type that supports concurrent updates usually requires extensive and subtle analysis of the required behavior. For the b-tree and hash index types, you can read about the design decisions involved in - src/backend/access/nbtree/README and - src/backend/access/hash/README. + src/backend/access/nbtree/README and + src/backend/access/hash/README. Aside from the index's own internal consistency requirements, concurrent updates create issues about consistency between the parent table (the - heap) and the index. Because + heap) and the index. Because PostgreSQL separates accesses and updates of the heap from those of the index, there are windows in which the index might be inconsistent with the heap. We handle this problem @@ -901,12 +905,12 @@ amparallelrescan (IndexScanDesc scan); A new heap entry is made before making its index entries. (Therefore a concurrent index scan is likely to fail to see the heap entry. This is okay because the index reader would be uninterested in an - uncommitted row anyway. But see .) + uncommitted row anyway. But see .) - When a heap entry is to be deleted (by VACUUM), all its + When a heap entry is to be deleted (by VACUUM), all its index entries must be removed first. @@ -914,7 +918,7 @@ amparallelrescan (IndexScanDesc scan); An index scan must maintain a pin on the index page holding the item last returned by - amgettuple, and ambulkdelete cannot delete + amgettuple, and ambulkdelete cannot delete entries from pages that are pinned by other backends. The need for this rule is explained below. @@ -922,33 +926,33 @@ amparallelrescan (IndexScanDesc scan); Without the third rule, it is possible for an index reader to - see an index entry just before it is removed by VACUUM, and + see an index entry just before it is removed by VACUUM, and then to arrive at the corresponding heap entry after that was removed by - VACUUM. + VACUUM. This creates no serious problems if that item number is still unused when the reader reaches it, since an empty - item slot will be ignored by heap_fetch(). But what if a + item slot will be ignored by heap_fetch(). But what if a third backend has already re-used the item slot for something else? When using an MVCC-compliant snapshot, there is no problem because the new occupant of the slot is certain to be too new to pass the snapshot test. However, with a non-MVCC-compliant snapshot (such as - SnapshotAny), it would be possible to accept and return + SnapshotAny), it would be possible to accept and return a row that does not in fact match the scan keys. We could defend against this scenario by requiring the scan keys to be rechecked against the heap row in all cases, but that is too expensive. Instead, we use a pin on an index page as a proxy to indicate that the reader - might still be in flight from the index entry to the matching - heap entry. Making ambulkdelete block on such a pin ensures - that VACUUM cannot delete the heap entry before the reader + might still be in flight from the index entry to the matching + heap entry. Making ambulkdelete block on such a pin ensures + that VACUUM cannot delete the heap entry before the reader is done with it. This solution costs little in run time, and adds blocking overhead only in the rare cases where there actually is a conflict. - This solution requires that index scans be synchronous: we have + This solution requires that index scans be synchronous: we have to fetch each heap tuple immediately after scanning the corresponding index entry. This is expensive for a number of reasons. An - asynchronous scan in which we collect many TIDs from the index, + asynchronous scan in which we collect many TIDs from the index, and only visit the heap tuples sometime later, requires much less index locking overhead and can allow a more efficient heap access pattern. Per the above analysis, we must use the synchronous approach for @@ -957,13 +961,13 @@ amparallelrescan (IndexScanDesc scan); - In an amgetbitmap index scan, the access method does not + In an amgetbitmap index scan, the access method does not keep an index pin on any of the returned tuples. Therefore it is only safe to use such scans with MVCC-compliant snapshots. - When the ampredlocks flag is not set, any scan using that + When the ampredlocks flag is not set, any scan using that index access method within a serializable transaction will acquire a nonblocking predicate lock on the full index. This will generate a read-write conflict with the insert of any tuple into that index by a @@ -982,10 +986,12 @@ amparallelrescan (IndexScanDesc scan); PostgreSQL enforces SQL uniqueness constraints - using unique indexes, which are indexes that disallow + using unique indexes, which are indexes that disallow multiple entries with identical keys. An access method that supports this - feature sets amcanunique true. - (At present, only b-tree supports it.) + feature sets amcanunique true. + (At present, only b-tree supports it.) Columns listed in the + INCLUDE clause are not considered when enforcing + uniqueness. @@ -1032,7 +1038,7 @@ amparallelrescan (IndexScanDesc scan); no violation should be reported. (This case cannot occur during the ordinary scenario of inserting a row that's just been created by the current transaction. It can happen during - CREATE UNIQUE INDEX CONCURRENTLY, however.) + CREATE UNIQUE INDEX CONCURRENTLY, however.) @@ -1057,33 +1063,33 @@ amparallelrescan (IndexScanDesc scan); are done. Otherwise, we schedule a recheck to occur when it is time to enforce the constraint. If, at the time of the recheck, both the inserted tuple and some other tuple with the same key are live, then the error - must be reported. (Note that for this purpose, live actually - means any tuple in the index entry's HOT chain is live.) - To implement this, the aminsert function is passed a - checkUnique parameter having one of the following values: + must be reported. (Note that for this purpose, live actually + means any tuple in the index entry's HOT chain is live.) + To implement this, the aminsert function is passed a + checkUnique parameter having one of the following values: - UNIQUE_CHECK_NO indicates that no uniqueness checking + UNIQUE_CHECK_NO indicates that no uniqueness checking should be done (this is not a unique index). - UNIQUE_CHECK_YES indicates that this is a non-deferrable + UNIQUE_CHECK_YES indicates that this is a non-deferrable unique index, and the uniqueness check must be done immediately, as described above. - UNIQUE_CHECK_PARTIAL indicates that the unique + UNIQUE_CHECK_PARTIAL indicates that the unique constraint is deferrable. PostgreSQL will use this mode to insert each row's index entry. The access method must allow duplicate entries into the index, and report any - potential duplicates by returning FALSE from aminsert. - For each row for which FALSE is returned, a deferred recheck will + potential duplicates by returning false from aminsert. + For each row for which false is returned, a deferred recheck will be scheduled. @@ -1098,21 +1104,21 @@ amparallelrescan (IndexScanDesc scan); - UNIQUE_CHECK_EXISTING indicates that this is a deferred + UNIQUE_CHECK_EXISTING indicates that this is a deferred recheck of a row that was reported as a potential uniqueness violation. - Although this is implemented by calling aminsert, the - access method must not insert a new index entry in this + Although this is implemented by calling aminsert, the + access method must not insert a new index entry in this case. The index entry is already present. Rather, the access method must check to see if there is another live index entry. If so, and if the target row is also still live, report error. - It is recommended that in a UNIQUE_CHECK_EXISTING call, + It is recommended that in a UNIQUE_CHECK_EXISTING call, the access method further verify that the target row actually does have an existing entry in the index, and report error if not. This is a good idea because the index tuple values passed to - aminsert will have been recomputed. If the index + aminsert will have been recomputed. If the index definition involves functions that are not really immutable, we might be checking the wrong area of the index. Checking that the target row is found in the recheck verifies that we are scanning @@ -1128,20 +1134,20 @@ amparallelrescan (IndexScanDesc scan); Index Cost Estimation Functions - The amcostestimate function is given information describing + The amcostestimate function is given information describing a possible index scan, including lists of WHERE and ORDER BY clauses that have been determined to be usable with the index. It must return estimates of the cost of accessing the index and the selectivity of the WHERE clauses (that is, the fraction of parent-table rows that will be retrieved during the index scan). For simple cases, nearly all the work of the cost estimator can be done by calling standard routines - in the optimizer; the point of having an amcostestimate function is + in the optimizer; the point of having an amcostestimate function is to allow index access methods to provide index-type-specific knowledge, in case it is possible to improve on the standard estimates. - Each amcostestimate function must have the signature: + Each amcostestimate function must have the signature: void @@ -1151,14 +1157,15 @@ amcostestimate (PlannerInfo *root, Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity, - double *indexCorrelation); + double *indexCorrelation, + double *indexPages); The first three parameters are inputs: - root + root The planner's information about the query being processed. @@ -1167,7 +1174,7 @@ amcostestimate (PlannerInfo *root, - path + path The index access path being considered. All fields except cost and @@ -1177,14 +1184,14 @@ amcostestimate (PlannerInfo *root, - loop_count + loop_count The number of repetitions of the index scan that should be factored into the cost estimates. This will typically be greater than one when considering a parameterized scan for use in the inside of a nestloop join. Note that the cost estimates should still be for just one scan; - a larger loop_count means that it may be appropriate + a larger loop_count means that it may be appropriate to allow for some caching effects across multiple scans. @@ -1193,11 +1200,11 @@ amcostestimate (PlannerInfo *root, - The last four parameters are pass-by-reference outputs: + The last five parameters are pass-by-reference outputs: - *indexStartupCost + *indexStartupCost Set to cost of index start-up processing @@ -1206,7 +1213,7 @@ amcostestimate (PlannerInfo *root, - *indexTotalCost + *indexTotalCost Set to total cost of index processing @@ -1215,7 +1222,7 @@ amcostestimate (PlannerInfo *root, - *indexSelectivity + *indexSelectivity Set to index selectivity @@ -1224,7 +1231,7 @@ amcostestimate (PlannerInfo *root, - *indexCorrelation + *indexCorrelation Set to correlation coefficient between index scan order and @@ -1232,6 +1239,15 @@ amcostestimate (PlannerInfo *root, + + + *indexPages + + + Set to number of index leaf pages + + + @@ -1244,17 +1260,17 @@ amcostestimate (PlannerInfo *root, The index access costs should be computed using the parameters used by src/backend/optimizer/path/costsize.c: a sequential - disk block fetch has cost seq_page_cost, a nonsequential fetch - has cost random_page_cost, and the cost of processing one index - row should usually be taken as cpu_index_tuple_cost. In - addition, an appropriate multiple of cpu_operator_cost should + disk block fetch has cost seq_page_cost, a nonsequential fetch + has cost random_page_cost, and the cost of processing one index + row should usually be taken as cpu_index_tuple_cost. In + addition, an appropriate multiple of cpu_operator_cost should be charged for any comparison operators invoked during index processing (especially evaluation of the indexquals themselves). The access costs should include all disk and CPU costs associated with - scanning the index itself, but not the costs of retrieving or + scanning the index itself, but not the costs of retrieving or processing the parent-table rows that are identified by the index. @@ -1266,21 +1282,26 @@ amcostestimate (PlannerInfo *root, - The indexSelectivity should be set to the estimated fraction of the parent + The indexSelectivity should be set to the estimated fraction of the parent table rows that will be retrieved during the index scan. In the case of a lossy query, this will typically be higher than the fraction of rows that actually pass the given qual conditions. - The indexCorrelation should be set to the correlation (ranging between + The indexCorrelation should be set to the correlation (ranging between -1.0 and 1.0) between the index order and the table order. This is used to adjust the estimate for the cost of fetching rows from the parent table. - When loop_count is greater than one, the returned numbers + The indexPages should be set to the number of leaf pages. + This is used to estimate the number of workers for parallel index scan. + + + + When loop_count is greater than one, the returned numbers should be averages expected for any one scan of the index. @@ -1307,17 +1328,17 @@ amcostestimate (PlannerInfo *root, Estimate the number of index rows that will be visited during the - scan. For many index types this is the same as indexSelectivity times + scan. For many index types this is the same as indexSelectivity times the number of rows in the index, but it might be more. (Note that the index's size in pages and rows is available from the - path->indexinfo struct.) + path->indexinfo struct.) Estimate the number of index pages that will be retrieved during the scan. - This might be just indexSelectivity times the index's size in pages. + This might be just indexSelectivity times the index's size in pages. diff --git a/doc/src/sgml/indices.sgml b/doc/src/sgml/indices.sgml index e40750e8ec..df7d16ff68 100644 --- a/doc/src/sgml/indices.sgml +++ b/doc/src/sgml/indices.sgml @@ -77,7 +77,7 @@ CREATE INDEX test1_id_index ON test1 (id); than a sequential table scan. But you might have to run the ANALYZE command regularly to update statistics to allow the query planner to make educated decisions. - See for information about + See for information about how to find out whether an index is used and when and why the planner might choose not to use an index. @@ -98,8 +98,8 @@ CREATE INDEX test1_id_index ON test1 (id); In production environments this is often unacceptable. It is possible to allow writes to occur in parallel with index creation, but there are several caveats to be aware of — - for more information see . + for more information see . @@ -147,21 +147,21 @@ CREATE INDEX test1_id_index ON test1 (id); Constructs equivalent to combinations of these operators, such as - BETWEEN and IN, can also be implemented with - a B-tree index search. Also, an IS NULL or IS NOT - NULL condition on an index column can be used with a B-tree index. + BETWEEN and IN, can also be implemented with + a B-tree index search. Also, an IS NULL or IS NOT + NULL condition on an index column can be used with a B-tree index. The optimizer can also use a B-tree index for queries involving the - pattern matching operators LIKE and ~ + pattern matching operators LIKE and ~ if the pattern is a constant and is anchored to the beginning of the string — for example, col LIKE 'foo%' or col ~ '^foo', but not col LIKE '%bar'. However, if your database does not use the C locale you will need to create the index with a special operator class to support indexing of pattern-matching queries; see - below. It is also possible to use + below. It is also possible to use B-tree indexes for ILIKE and ~*, but only if the pattern starts with non-alphabetic characters, i.e., characters that are not affected by @@ -206,7 +206,7 @@ CREATE INDEX name ON table within which many different indexing strategies can be implemented. Accordingly, the particular operators with which a GiST index can be used vary depending on the indexing strategy (the operator - class). As an example, the standard distribution of + class). As an example, the standard distribution of PostgreSQL includes GiST operator classes for several two-dimensional geometric data types, which support indexed queries using these operators: @@ -226,17 +226,17 @@ CREATE INDEX name ON table && - (See for the meaning of + (See for the meaning of these operators.) The GiST operator classes included in the standard distribution are - documented in . + documented in . Many other GiST operator - classes are available in the contrib collection or as separate - projects. For more information see . + classes are available in the contrib collection or as separate + projects. For more information see . - GiST indexes are also capable of optimizing nearest-neighbor + GiST indexes are also capable of optimizing nearest-neighbor searches, such as point '(101,456)' LIMIT 10; @@ -244,8 +244,8 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; which finds the ten places closest to a given target point. The ability to do this is again dependent on the particular operator class being used. - In , operators that can be - used in this way are listed in the column Ordering Operators. + In , operators that can be + used in this way are listed in the column Ordering Operators. @@ -274,11 +274,18 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; >^ - (See for the meaning of + (See for the meaning of these operators.) The SP-GiST operator classes included in the standard distribution are - documented in . - For more information see . + documented in . + For more information see . + + + + Like GiST, SP-GiST supports nearest-neighbor searches. + For SP-GiST operator classes that support distance ordering, the + corresponding operator is specified in the Ordering Operators + column in . @@ -290,7 +297,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; GIN index - GIN indexes are inverted indexes which are appropriate for + GIN indexes are inverted indexes which are appropriate for data values that contain multiple component values, such as arrays. An inverted index contains a separate entry for each component value, and can efficiently handle queries that test for the presence of specific @@ -313,13 +320,13 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; && - (See for the meaning of + (See for the meaning of these operators.) The GIN operator classes included in the standard distribution are - documented in . + documented in . Many other GIN operator - classes are available in the contrib collection or as separate - projects. For more information see . + classes are available in the contrib collection or as separate + projects. For more information see . @@ -351,8 +358,8 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; The BRIN operator classes included in the standard distribution are - documented in . - For more information see . + documented in . + For more information see . @@ -407,13 +414,13 @@ CREATE INDEX test2_mm_idx ON test2 (major, minor); are checked in the index, so they save visits to the table proper, but they do not reduce the portion of the index that has to be scanned. For example, given an index on (a, b, c) and a - query condition WHERE a = 5 AND b >= 42 AND c < 77, + query condition WHERE a = 5 AND b >= 42 AND c < 77, the index would have to be scanned from the first entry with - a = 5 and b = 42 up through the last entry with - a = 5. Index entries with c >= 77 would be + a = 5 and b = 42 up through the last entry with + a = 5. Index entries with c >= 77 would be skipped, but they'd still have to be scanned through. This index could in principle be used for queries that have constraints - on b and/or c with no constraint on a + on b and/or c with no constraint on a — but the entire index would have to be scanned, so in most cases the planner would prefer a sequential table scan over using the index. @@ -454,25 +461,25 @@ CREATE INDEX test2_mm_idx ON test2 (major, minor); an index on a single column is sufficient and saves space and time. Indexes with more than three columns are unlikely to be helpful unless the usage of the table is extremely stylized. See also - and - for some discussion of the + and + for some discussion of the merits of different index configurations. - Indexes and <literal>ORDER BY</> + Indexes and <literal>ORDER BY</literal> index - and ORDER BY + and ORDER BY In addition to simply finding the rows to be returned by a query, an index may be able to deliver them in a specific sorted order. - This allows a query's ORDER BY specification to be honored + This allows a query's ORDER BY specification to be honored without a separate sorting step. Of the index types currently supported by PostgreSQL, only B-tree can produce sorted output — the other index types return @@ -480,7 +487,7 @@ CREATE INDEX test2_mm_idx ON test2 (major, minor); - The planner will consider satisfying an ORDER BY specification + The planner will consider satisfying an ORDER BY specification either by scanning an available index that matches the specification, or by scanning the table in physical order and doing an explicit sort. For a query that requires scanning a large fraction of the @@ -488,50 +495,50 @@ CREATE INDEX test2_mm_idx ON test2 (major, minor); because it requires less disk I/O due to following a sequential access pattern. Indexes are more useful when only a few rows need be fetched. An important - special case is ORDER BY in combination with - LIMIT n: an explicit sort will have to process - all the data to identify the first n rows, but if there is - an index matching the ORDER BY, the first n + special case is ORDER BY in combination with + LIMIT n: an explicit sort will have to process + all the data to identify the first n rows, but if there is + an index matching the ORDER BY, the first n rows can be retrieved directly, without scanning the remainder at all. By default, B-tree indexes store their entries in ascending order with nulls last. This means that a forward scan of an index on - column x produces output satisfying ORDER BY x - (or more verbosely, ORDER BY x ASC NULLS LAST). The + column x produces output satisfying ORDER BY x + (or more verbosely, ORDER BY x ASC NULLS LAST). The index can also be scanned backward, producing output satisfying - ORDER BY x DESC - (or more verbosely, ORDER BY x DESC NULLS FIRST, since - NULLS FIRST is the default for ORDER BY DESC). + ORDER BY x DESC + (or more verbosely, ORDER BY x DESC NULLS FIRST, since + NULLS FIRST is the default for ORDER BY DESC). You can adjust the ordering of a B-tree index by including the - options ASC, DESC, NULLS FIRST, - and/or NULLS LAST when creating the index; for example: + options ASC, DESC, NULLS FIRST, + and/or NULLS LAST when creating the index; for example: CREATE INDEX test2_info_nulls_low ON test2 (info NULLS FIRST); CREATE INDEX test3_desc_index ON test3 (id DESC NULLS LAST); An index stored in ascending order with nulls first can satisfy - either ORDER BY x ASC NULLS FIRST or - ORDER BY x DESC NULLS LAST depending on which direction + either ORDER BY x ASC NULLS FIRST or + ORDER BY x DESC NULLS LAST depending on which direction it is scanned in. You might wonder why bother providing all four options, when two options together with the possibility of backward scan would cover - all the variants of ORDER BY. In single-column indexes + all the variants of ORDER BY. In single-column indexes the options are indeed redundant, but in multicolumn indexes they can be - useful. Consider a two-column index on (x, y): this can - satisfy ORDER BY x, y if we scan forward, or - ORDER BY x DESC, y DESC if we scan backward. + useful. Consider a two-column index on (x, y): this can + satisfy ORDER BY x, y if we scan forward, or + ORDER BY x DESC, y DESC if we scan backward. But it might be that the application frequently needs to use - ORDER BY x ASC, y DESC. There is no way to get that + ORDER BY x ASC, y DESC. There is no way to get that ordering from a plain index, but it is possible if the index is defined - as (x ASC, y DESC) or (x DESC, y ASC). + as (x ASC, y DESC) or (x DESC, y ASC). @@ -559,38 +566,38 @@ CREATE INDEX test3_desc_index ON test3 (id DESC NULLS LAST); A single index scan can only use query clauses that use the index's columns with operators of its operator class and are joined with - AND. For example, given an index on (a, b) - a query condition like WHERE a = 5 AND b = 6 could - use the index, but a query like WHERE a = 5 OR b = 6 could not + AND. For example, given an index on (a, b) + a query condition like WHERE a = 5 AND b = 6 could + use the index, but a query like WHERE a = 5 OR b = 6 could not directly use the index. Fortunately, - PostgreSQL has the ability to combine multiple indexes + PostgreSQL has the ability to combine multiple indexes (including multiple uses of the same index) to handle cases that cannot - be implemented by single index scans. The system can form AND - and OR conditions across several index scans. For example, - a query like WHERE x = 42 OR x = 47 OR x = 53 OR x = 99 - could be broken down into four separate scans of an index on x, + be implemented by single index scans. The system can form AND + and OR conditions across several index scans. For example, + a query like WHERE x = 42 OR x = 47 OR x = 53 OR x = 99 + could be broken down into four separate scans of an index on x, each scan using one of the query clauses. The results of these scans are then ORed together to produce the result. Another example is that if we - have separate indexes on x and y, one possible - implementation of a query like WHERE x = 5 AND y = 6 is to + have separate indexes on x and y, one possible + implementation of a query like WHERE x = 5 AND y = 6 is to use each index with the appropriate query clause and then AND together the index results to identify the result rows. To combine multiple indexes, the system scans each needed index and - prepares a bitmap in memory giving the locations of + prepares a bitmap in memory giving the locations of table rows that are reported as matching that index's conditions. The bitmaps are then ANDed and ORed together as needed by the query. Finally, the actual table rows are visited and returned. The table rows are visited in physical order, because that is how the bitmap is laid out; this means that any ordering of the original indexes is lost, and so a separate sort step will be needed if the query has an ORDER - BY clause. For this reason, and because each additional index scan + BY clause. For this reason, and because each additional index scan adds extra time, the planner will sometimes choose to use a simple index scan even though additional indexes are available that could have been used as well. @@ -603,19 +610,19 @@ CREATE INDEX test3_desc_index ON test3 (id DESC NULLS LAST); indexes are best, but sometimes it's better to create separate indexes and rely on the index-combination feature. For example, if your workload includes a mix of queries that sometimes involve only column - x, sometimes only column y, and sometimes both + x, sometimes only column y, and sometimes both columns, you might choose to create two separate indexes on - x and y, relying on index combination to + x and y, relying on index combination to process the queries that use both columns. You could also create a - multicolumn index on (x, y). This index would typically be + multicolumn index on (x, y). This index would typically be more efficient than index combination for queries involving both - columns, but as discussed in , it - would be almost useless for queries involving only y, so it + columns, but as discussed in , it + would be almost useless for queries involving only y, so it should not be the only index. A combination of the multicolumn index - and a separate index on y would serve reasonably well. For - queries involving only x, the multicolumn index could be + and a separate index on y would serve reasonably well. For + queries involving only x, the multicolumn index could be used, though it would be larger and hence slower than an index on - x alone. The last alternative is to create all three + x alone. The last alternative is to create all three indexes, but this is probably only reasonable if the table is searched much more often than it is updated and all three types of query are common. If one of the types of query is much less common than the @@ -638,7 +645,8 @@ CREATE INDEX test3_desc_index ON test3 (id DESC NULLS LAST); Indexes can also be used to enforce uniqueness of a column's value, or the uniqueness of the combined values of more than one column. -CREATE UNIQUE INDEX name ON table (column , ...); +CREATE UNIQUE INDEX name ON table (column , ...) + INCLUDE (column , ...) ; Currently, only B-tree indexes can be declared unique. @@ -647,7 +655,9 @@ CREATE UNIQUE INDEX name ON tableINCLUDE clause, if any, aren't considered when + determining whether index entries are equal. @@ -698,9 +708,9 @@ CREATE INDEX test1_lower_col1_idx ON test1 (lower(col1)); - If we were to declare this index UNIQUE, it would prevent - creation of rows whose col1 values differ only in case, - as well as rows whose col1 values are actually identical. + If we were to declare this index UNIQUE, it would prevent + creation of rows whose col1 values differ only in case, + as well as rows whose col1 values are actually identical. Thus, indexes on expressions can be used to enforce constraints that are not definable as simple unique constraints. @@ -717,7 +727,7 @@ CREATE INDEX people_names ON people ((first_name || ' ' || last_name)); - The syntax of the CREATE INDEX command normally requires + The syntax of the CREATE INDEX command normally requires writing parentheses around index expressions, as shown in the second example. The parentheses can be omitted when the expression is just a function call, as in the first example. @@ -727,9 +737,9 @@ CREATE INDEX people_names ON people ((first_name || ' ' || last_name)); Index expressions are relatively expensive to maintain, because the derived expression(s) must be computed for each row upon insertion and whenever it is updated. However, the index expressions are - not recomputed during an indexed search, since they are + not recomputed during an indexed search, since they are already stored in the index. In both examples above, the system - sees the query as just WHERE indexedcolumn = 'constant' + sees the query as just WHERE indexedcolumn = 'constant' and so the speed of the search is equivalent to any other simple index query. Thus, indexes on expressions are useful when retrieval speed is more important than insertion and update speed. @@ -762,7 +772,7 @@ CREATE INDEX people_names ON people ((first_name || ' ' || last_name)); index at all. This reduces the size of the index, which will speed up those queries that do use the index. It will also speed up many table update operations because the index does not need to be - updated in all cases. shows a + updated in all cases. shows a possible application of this idea. @@ -827,7 +837,7 @@ WHERE client_ip = inet '192.168.100.23'; Another possible use for a partial index is to exclude values from the index that the typical query workload is not interested in; this is shown in . This results in the same + linkend="indexes-partial-ex2"/>. This results in the same advantages as listed above, but it prevents the uninteresting values from being accessed via that index, even if an index scan might be profitable in that @@ -856,12 +866,12 @@ CREATE INDEX orders_unbilled_index ON orders (order_nr) SELECT * FROM orders WHERE billed is not true AND order_nr < 10000; However, the index can also be used in queries that do not involve - order_nr at all, e.g.: + order_nr at all, e.g.: SELECT * FROM orders WHERE billed is not true AND amount > 5000.00; This is not as efficient as a partial index on the - amount column would be, since the system has to + amount column would be, since the system has to scan the entire index. Yet, if there are relatively few unbilled orders, using this partial index just to find the unbilled orders could be a win. @@ -878,7 +888,7 @@ SELECT * FROM orders WHERE order_nr = 3501; - also illustrates that the + also illustrates that the indexed column and the column used in the predicate do not need to match. PostgreSQL supports partial indexes with arbitrary predicates, so long as only columns of the @@ -886,7 +896,7 @@ SELECT * FROM orders WHERE order_nr = 3501; predicate must match the conditions used in the queries that are supposed to benefit from the index. To be precise, a partial index can be used in a query only if the system can recognize that - the WHERE condition of the query mathematically implies + the WHERE condition of the query mathematically implies the predicate of the index. PostgreSQL does not have a sophisticated theorem prover that can recognize mathematically equivalent @@ -896,7 +906,7 @@ SELECT * FROM orders WHERE order_nr = 3501; The system can recognize simple inequality implications, for example x < 1 implies x < 2; otherwise the predicate condition must exactly match part of the query's - WHERE condition + WHERE condition or the index will not be recognized as usable. Matching takes place at query planning time, not at run time. As a result, parameterized query clauses do not work with a partial index. For @@ -909,7 +919,7 @@ SELECT * FROM orders WHERE order_nr = 3501; A third possible use for partial indexes does not require the index to be used in queries at all. The idea here is to create a unique index over a subset of a table, as in . This enforces uniqueness + linkend="indexes-partial-ex3"/>. This enforces uniqueness among the rows that satisfy the index predicate, without constraining those that do not. @@ -919,9 +929,9 @@ SELECT * FROM orders WHERE order_nr = 3501; Suppose that we have a table describing test outcomes. We wish - to ensure that there is only one successful entry for + to ensure that there is only one successful entry for a given subject and target combination, but there might be any number of - unsuccessful entries. Here is one way to do it: + unsuccessful entries. Here is one way to do it: CREATE TABLE tests ( subject text, @@ -944,7 +954,7 @@ CREATE UNIQUE INDEX tests_success_constraint ON tests (subject, target) distributions might cause the system to use an index when it really should not. In that case the index can be set up so that it is not available for the offending query. Normally, - PostgreSQL makes reasonable choices about index + PostgreSQL makes reasonable choices about index usage (e.g., it avoids them when retrieving common values, so the earlier example really only saves index size, it is not required to avoid index usage), and grossly incorrect plan choices are cause @@ -956,14 +966,14 @@ CREATE UNIQUE INDEX tests_success_constraint ON tests (subject, target) know at least as much as the query planner knows, in particular you know when an index might be profitable. Forming this knowledge requires experience and understanding of how indexes in - PostgreSQL work. In most cases, the advantage of a + PostgreSQL work. In most cases, the advantage of a partial index over a regular index will be minimal. More information about partial indexes can be found in , , and . + linkend="ston89b"/>, , and . @@ -998,8 +1008,8 @@ CREATE INDEX name ON table the proper class when making an index. The operator class determines the basic sort ordering (which can then be modified by adding sort options COLLATE, - ASC/DESC and/or - NULLS FIRST/NULLS LAST). + ASC/DESC and/or + NULLS FIRST/NULLS LAST). @@ -1025,8 +1035,8 @@ CREATE INDEX name ON table CREATE INDEX test_index ON test_table (col varchar_pattern_ops); Note that you should also create an index with the default operator - class if you want queries involving ordinary <, - <=, >, or >= comparisons + class if you want queries involving ordinary <, + <=, >, or >= comparisons to use an index. Such queries cannot use the xxx_pattern_ops operator classes. (Ordinary equality comparisons can use these @@ -1057,7 +1067,7 @@ SELECT am.amname AS index_method, An operator class is actually just a subset of a larger structure called an - operator family. In cases where several data types have + operator family. In cases where several data types have similar behaviors, it is frequently useful to define cross-data-type operators and allow these to work with indexes. To do this, the operator classes for each of the types must be grouped into the same operator @@ -1147,24 +1157,24 @@ CREATE INDEX test1c_content_y_index ON test1c (content COLLATE "y"); - All indexes in PostgreSQL are secondary + All indexes in PostgreSQL are secondary indexes, meaning that each index is stored separately from the table's - main data area (which is called the table's heap - in PostgreSQL terminology). This means that in an + main data area (which is called the table's heap + in PostgreSQL terminology). This means that in an ordinary index scan, each row retrieval requires fetching data from both the index and the heap. Furthermore, while the index entries that match a - given indexable WHERE condition are usually close together in + given indexable WHERE condition are usually close together in the index, the table rows they reference might be anywhere in the heap. The heap-access portion of an index scan thus involves a lot of random access into the heap, which can be slow, particularly on traditional - rotating media. (As described in , + rotating media. (As described in , bitmap scans try to alleviate this cost by doing the heap accesses in sorted order, but that only goes so far.) - To solve this performance problem, PostgreSQL - supports index-only scans, which can answer queries from an + To solve this performance problem, PostgreSQL + supports index-only scans, which can answer queries from an index alone without any heap access. The basic idea is to return values directly out of each index entry instead of consulting the associated heap entry. There are two fundamental restrictions on when this method can be @@ -1187,8 +1197,8 @@ CREATE INDEX test1c_content_y_index ON test1c (content COLLATE "y"); The query must reference only columns stored in the index. For - example, given an index on columns x and y of a - table that also has a column z, these queries could use + example, given an index on columns x and y of a + table that also has a column z, these queries could use index-only scans: SELECT x, y FROM tab WHERE x = 'key'; @@ -1210,17 +1220,17 @@ SELECT x FROM tab WHERE x = 'key' AND z < 42; If these two fundamental requirements are met, then all the data values required by the query are available from the index, so an index-only scan is physically possible. But there is an additional requirement for any - table scan in PostgreSQL: it must verify that each - retrieved row be visible to the query's MVCC snapshot, as - discussed in . Visibility information is not stored + table scan in PostgreSQL: it must verify that each + retrieved row be visible to the query's MVCC snapshot, as + discussed in . Visibility information is not stored in index entries, only in heap entries; so at first glance it would seem that every row retrieval would require a heap access anyway. And this is indeed the case, if the table row has been modified recently. However, for seldom-changing data there is a way around this - problem. PostgreSQL tracks, for each page in a table's + problem. PostgreSQL tracks, for each page in a table's heap, whether all rows stored in that page are old enough to be visible to all current and future transactions. This information is stored in a bit - in the table's visibility map. An index-only scan, after + in the table's visibility map. An index-only scan, after finding a candidate index entry, checks the visibility map bit for the corresponding heap page. If it's set, the row is known visible and so the data can be returned with no further work. If it's not set, the heap @@ -1243,53 +1253,53 @@ SELECT x FROM tab WHERE x = 'key' AND z < 42; To make effective use of the index-only scan feature, you might choose to create indexes in which only the leading columns are meant to - match WHERE clauses, while the trailing columns - hold payload data to be returned by a query. For example, if + match WHERE clauses, while the trailing columns + hold payload data to be returned by a query. For example, if you commonly run queries like SELECT y FROM tab WHERE x = 'key'; the traditional approach to speeding up such queries would be to create an - index on x only. However, an index on (x, y) + index on x only. However, an index on (x, y) would offer the possibility of implementing this query as an index-only scan. As previously discussed, such an index would be larger and hence - more expensive than an index on x alone, so this is attractive + more expensive than an index on x alone, so this is attractive only if the table is known to be mostly static. Note it's important that - the index be declared on (x, y) not (y, x), as for + the index be declared on (x, y) not (y, x), as for most index types (particularly B-trees) searches that do not constrain the leading index columns are not very efficient. In principle, index-only scans can be used with expression indexes. - For example, given an index on f(x) where x is a + For example, given an index on f(x) where x is a table column, it should be possible to execute SELECT f(x) FROM tab WHERE f(x) < 1; - as an index-only scan; and this is very attractive if f() is - an expensive-to-compute function. However, PostgreSQL's + as an index-only scan; and this is very attractive if f() is + an expensive-to-compute function. However, PostgreSQL's planner is currently not very smart about such cases. It considers a query to be potentially executable by index-only scan only when - all columns needed by the query are available from the index. - In this example, x is not needed except in the - context f(x), but the planner does not notice that and + all columns needed by the query are available from the index. + In this example, x is not needed except in the + context f(x), but the planner does not notice that and concludes that an index-only scan is not possible. If an index-only scan seems sufficiently worthwhile, this can be worked around by declaring the - index to be on (f(x), x), where the second column is not + index to be on (f(x), x), where the second column is not expected to be used in practice but is just there to convince the planner that an index-only scan is possible. An additional caveat, if the goal is - to avoid recalculating f(x), is that the planner won't - necessarily match uses of f(x) that aren't in - indexable WHERE clauses to the index column. It will usually + to avoid recalculating f(x), is that the planner won't + necessarily match uses of f(x) that aren't in + indexable WHERE clauses to the index column. It will usually get this right in simple queries such as shown above, but not in queries that involve joins. These deficiencies may be remedied in future versions - of PostgreSQL. + of PostgreSQL. Partial indexes also have interesting interactions with index-only scans. - Consider the partial index shown in : + Consider the partial index shown in : CREATE UNIQUE INDEX tests_success_constraint ON tests (subject, target) WHERE success; @@ -1299,13 +1309,13 @@ CREATE UNIQUE INDEX tests_success_constraint ON tests (subject, target) SELECT target FROM tests WHERE subject = 'some-subject' AND success; - But there's a problem: the WHERE clause refers - to success which is not available as a result column of the + But there's a problem: the WHERE clause refers + to success which is not available as a result column of the index. Nonetheless, an index-only scan is possible because the plan does - not need to recheck that part of the WHERE clause at run time: - all entries found in the index necessarily have success = true + not need to recheck that part of the WHERE clause at run time: + all entries found in the index necessarily have success = true so this need not be explicitly checked in the - plan. PostgreSQL versions 9.6 and later will recognize + plan. PostgreSQL versions 9.6 and later will recognize such cases and allow index-only scans to be generated, but older versions will not. @@ -1321,15 +1331,15 @@ SELECT target FROM tests WHERE subject = 'some-subject' AND success; - Although indexes in PostgreSQL do not need + Although indexes in PostgreSQL do not need maintenance or tuning, it is still important to check which indexes are actually used by the real-life query workload. Examining index usage for an individual query is done with the - + command; its application for this purpose is - illustrated in . + illustrated in . It is also possible to gather overall statistics about index usage - in a running server, as described in . + in a running server, as described in . @@ -1343,7 +1353,7 @@ SELECT target FROM tests WHERE subject = 'some-subject' AND success; - Always run + Always run first. This command collects statistics about the distribution of the values in the table. This information is required to estimate the number of rows @@ -1353,8 +1363,8 @@ SELECT target FROM tests WHERE subject = 'some-subject' AND success; almost certain to be inaccurate. Examining an application's index usage without having run ANALYZE is therefore a lost cause. - See - and for more information. + See + and for more information. @@ -1386,10 +1396,10 @@ SELECT target FROM tests WHERE subject = 'some-subject' AND success; When indexes are not used, it can be useful for testing to force their use. There are run-time parameters that can turn off - various plan types (see ). + various plan types (see ). For instance, turning off sequential scans - (enable_seqscan) and nested-loop joins - (enable_nestloop), which are the most basic plans, + (enable_seqscan) and nested-loop joins + (enable_nestloop), which are the most basic plans, will force the system to use a different plan. If the system still chooses a sequential scan or nested-loop join then there is probably a more fundamental reason why the index is not being @@ -1417,18 +1427,18 @@ SELECT target FROM tests WHERE subject = 'some-subject' AND success; per-row costs of each plan node times the selectivity estimate of the plan node. The costs estimated for the plan nodes can be adjusted via run-time parameters (described in ). + linkend="runtime-config-query-constants"/>). An inaccurate selectivity estimate is due to insufficient statistics. It might be possible to improve this by tuning the statistics-gathering parameters (see - ). + ). If you do not succeed in adjusting the costs to be more appropriate, then you might have to resort to forcing index usage explicitly. You might also want to contact the - PostgreSQL developers to examine the issue. + PostgreSQL developers to examine the issue. diff --git a/doc/src/sgml/info.sgml b/doc/src/sgml/info.sgml index 233ba0e668..6b9f1b5d81 100644 --- a/doc/src/sgml/info.sgml +++ b/doc/src/sgml/info.sgml @@ -15,9 +15,9 @@ The PostgreSQL wiki contains the project's FAQ + url="https://wiki.postgresql.org/wiki/Frequently_Asked_Questions">FAQ (Frequently Asked Questions) list, TODO list, and + url="https://wiki.postgresql.org/wiki/Todo">TODO list, and detailed information about many more topics. @@ -42,7 +42,7 @@ The mailing lists are a good place to have your questions answered, to share experiences with other users, and to contact - the developers. Consult the PostgreSQL web site + the developers. Consult the PostgreSQL web site for details. diff --git a/doc/src/sgml/information_schema.sgml b/doc/src/sgml/information_schema.sgml index e07ff35bca..b13700da92 100644 --- a/doc/src/sgml/information_schema.sgml +++ b/doc/src/sgml/information_schema.sgml @@ -35,12 +35,12 @@ This problem can appear when querying information schema views such - as check_constraint_routine_usage, - check_constraints, domain_constraints, and - referential_constraints. Some other views have similar + as check_constraint_routine_usage, + check_constraints, domain_constraints, and + referential_constraints. Some other views have similar issues but contain the table name to help distinguish duplicate - rows, e.g., constraint_column_usage, - constraint_table_usage, table_constraints. + rows, e.g., constraint_column_usage, + constraint_table_usage, table_constraints. @@ -384,19 +384,19 @@ character_set_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -535,25 +535,25 @@ scope_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL scope_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL scope_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL maximum_cardinality cardinal_number - Always null, because arrays always have unlimited maximum cardinality in PostgreSQL + Always null, because arrays always have unlimited maximum cardinality in PostgreSQL @@ -572,14 +572,14 @@ is_derived_reference_attribute yes_or_no - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL
- See also under , a similarly + See also under , a similarly structured view, for further information on some of the columns. @@ -776,7 +776,7 @@ sql_identifier The specific name of the function. See for more information. + linkend="infoschema-routines"/> for more information. @@ -895,7 +895,7 @@ identifies which character set the available collations are applicable to. In PostgreSQL, there is only one character set per database (see explanation - in ), so this view does + in ), so this view does not provide much useful information. @@ -1178,7 +1178,7 @@ that use data types owned by a currently enabled role. Note that in PostgreSQL, built-in data types behave like user-defined types, so they are included here as well. See - also for details. + also for details. @@ -1256,7 +1256,7 @@ The view columns contains information about all table columns (or view columns) in the database. System columns - (oid, etc.) are not included. Only those columns are + (oid, etc.) are not included. Only those columns are shown that the current user has access to (by way of being the owner or having some privilege). @@ -1441,19 +1441,19 @@ character_set_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -1540,25 +1540,25 @@ scope_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL scope_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL scope_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL maximum_cardinality cardinal_number - Always null, because arrays always have unlimited maximum cardinality in PostgreSQL + Always null, because arrays always have unlimited maximum cardinality in PostgreSQL @@ -1577,7 +1577,7 @@ is_self_referencing yes_or_no - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -1648,13 +1648,13 @@ is_generated character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL generation_expression character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -2152,19 +2152,19 @@ character_set_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -2300,25 +2300,25 @@ scope_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL scope_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL scope_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL maximum_cardinality cardinal_number - Always null, because arrays always have unlimited maximum cardinality in PostgreSQL + Always null, because arrays always have unlimited maximum cardinality in PostgreSQL @@ -2442,31 +2442,31 @@ ORDER BY c.ordinal_position; character_maximum_length cardinal_number - Always null, since this information is not applied to array element data types in PostgreSQL + Always null, since this information is not applied to array element data types in PostgreSQL character_octet_length cardinal_number - Always null, since this information is not applied to array element data types in PostgreSQL + Always null, since this information is not applied to array element data types in PostgreSQL character_set_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -2501,37 +2501,37 @@ ORDER BY c.ordinal_position; numeric_precision cardinal_number - Always null, since this information is not applied to array element data types in PostgreSQL + Always null, since this information is not applied to array element data types in PostgreSQL numeric_precision_radix cardinal_number - Always null, since this information is not applied to array element data types in PostgreSQL + Always null, since this information is not applied to array element data types in PostgreSQL numeric_scale cardinal_number - Always null, since this information is not applied to array element data types in PostgreSQL + Always null, since this information is not applied to array element data types in PostgreSQL datetime_precision cardinal_number - Always null, since this information is not applied to array element data types in PostgreSQL + Always null, since this information is not applied to array element data types in PostgreSQL interval_type character_data - Always null, since this information is not applied to array element data types in PostgreSQL + Always null, since this information is not applied to array element data types in PostgreSQL interval_precision cardinal_number - Always null, since this information is not applied to array element data types in PostgreSQL + Always null, since this information is not applied to array element data types in PostgreSQL @@ -2569,25 +2569,25 @@ ORDER BY c.ordinal_position; scope_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL scope_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL scope_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL maximum_cardinality cardinal_number - Always null, because arrays always have unlimited maximum cardinality in PostgreSQL + Always null, because arrays always have unlimited maximum cardinality in PostgreSQL @@ -2621,8 +2621,9 @@ ORDER BY c.ordinal_position; For permission checking, the set of applicable roles is applied, which can be broader than the set of enabled roles. So generally, it is better to use the view - applicable_roles instead of this one; see also - there. + applicable_roles instead of this one; See + for details on + applicable_roles view.
@@ -3134,7 +3135,7 @@ ORDER BY c.ordinal_position; sql_identifier The specific name of the function. See for more information. + linkend="infoschema-routines"/> for more information. @@ -3160,13 +3161,13 @@ ORDER BY c.ordinal_position; is_result yes_or_no - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL as_locator yes_or_no - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -3191,85 +3192,85 @@ ORDER BY c.ordinal_position; character_maximum_length cardinal_number - Always null, since this information is not applied to parameter data types in PostgreSQL + Always null, since this information is not applied to parameter data types in PostgreSQL character_octet_length cardinal_number - Always null, since this information is not applied to parameter data types in PostgreSQL + Always null, since this information is not applied to parameter data types in PostgreSQL character_set_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL collation_catalog sql_identifier - Always null, since this information is not applied to parameter data types in PostgreSQL + Always null, since this information is not applied to parameter data types in PostgreSQL collation_schema sql_identifier - Always null, since this information is not applied to parameter data types in PostgreSQL + Always null, since this information is not applied to parameter data types in PostgreSQL collation_name sql_identifier - Always null, since this information is not applied to parameter data types in PostgreSQL + Always null, since this information is not applied to parameter data types in PostgreSQL numeric_precision cardinal_number - Always null, since this information is not applied to parameter data types in PostgreSQL + Always null, since this information is not applied to parameter data types in PostgreSQL numeric_precision_radix cardinal_number - Always null, since this information is not applied to parameter data types in PostgreSQL + Always null, since this information is not applied to parameter data types in PostgreSQL numeric_scale cardinal_number - Always null, since this information is not applied to parameter data types in PostgreSQL + Always null, since this information is not applied to parameter data types in PostgreSQL datetime_precision cardinal_number - Always null, since this information is not applied to parameter data types in PostgreSQL + Always null, since this information is not applied to parameter data types in PostgreSQL interval_type character_data - Always null, since this information is not applied to parameter data types in PostgreSQL + Always null, since this information is not applied to parameter data types in PostgreSQL interval_precision cardinal_number - Always null, since this information is not applied to parameter data types in PostgreSQL + Always null, since this information is not applied to parameter data types in PostgreSQL @@ -3301,25 +3302,25 @@ ORDER BY c.ordinal_position; scope_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL scope_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL scope_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL maximum_cardinality cardinal_number - Always null, because arrays always have unlimited maximum cardinality in PostgreSQL + Always null, because arrays always have unlimited maximum cardinality in PostgreSQL @@ -3594,7 +3595,7 @@ ORDER BY c.ordinal_position; sql_identifier The specific name of the function. See for more information. + linkend="infoschema-routines"/> for more information. @@ -3930,7 +3931,7 @@ ORDER BY c.ordinal_position; sql_identifier The specific name of the function. See for more information. + linkend="infoschema-routines"/> for more information. @@ -3972,8 +3973,8 @@ ORDER BY c.ordinal_position; <literal>routines</literal> - The view routines contains all functions in the - current database. Only those functions are shown that the current + The view routines contains all functions and procedures in the + current database. Only those functions and procedures are shown that the current user has access to (by way of being the owner or having some privilege). @@ -4037,45 +4038,45 @@ ORDER BY c.ordinal_position; routine_type character_data - Always FUNCTION (In the future there might - be other types of routines.) + FUNCTION for a + function, PROCEDURE for a procedure module_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL module_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL module_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL udt_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL udt_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL udt_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -4087,92 +4088,92 @@ ORDER BY c.ordinal_position; the view element_types), else USER-DEFINED (in that case, the type is identified in type_udt_name and associated - columns). + columns). Null for a procedure. character_maximum_length cardinal_number - Always null, since this information is not applied to return data types in PostgreSQL + Always null, since this information is not applied to return data types in PostgreSQL character_octet_length cardinal_number - Always null, since this information is not applied to return data types in PostgreSQL + Always null, since this information is not applied to return data types in PostgreSQL character_set_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL collation_catalog sql_identifier - Always null, since this information is not applied to return data types in PostgreSQL + Always null, since this information is not applied to return data types in PostgreSQL collation_schema sql_identifier - Always null, since this information is not applied to return data types in PostgreSQL + Always null, since this information is not applied to return data types in PostgreSQL collation_name sql_identifier - Always null, since this information is not applied to return data types in PostgreSQL + Always null, since this information is not applied to return data types in PostgreSQL numeric_precision cardinal_number - Always null, since this information is not applied to return data types in PostgreSQL + Always null, since this information is not applied to return data types in PostgreSQL numeric_precision_radix cardinal_number - Always null, since this information is not applied to return data types in PostgreSQL + Always null, since this information is not applied to return data types in PostgreSQL numeric_scale cardinal_number - Always null, since this information is not applied to return data types in PostgreSQL + Always null, since this information is not applied to return data types in PostgreSQL datetime_precision cardinal_number - Always null, since this information is not applied to return data types in PostgreSQL + Always null, since this information is not applied to return data types in PostgreSQL interval_type character_data - Always null, since this information is not applied to return data types in PostgreSQL + Always null, since this information is not applied to return data types in PostgreSQL interval_precision cardinal_number - Always null, since this information is not applied to return data types in PostgreSQL + Always null, since this information is not applied to return data types in PostgreSQL @@ -4180,7 +4181,7 @@ ORDER BY c.ordinal_position; sql_identifier Name of the database that the return data type of the function - is defined in (always the current database) + is defined in (always the current database). Null for a procedure. @@ -4189,7 +4190,7 @@ ORDER BY c.ordinal_position; sql_identifier Name of the schema that the return data type of the function is - defined in + defined in. Null for a procedure. @@ -4197,32 +4198,32 @@ ORDER BY c.ordinal_position; type_udt_namesql_identifier - Name of the return data type of the function + Name of the return data type of the function. Null for a procedure. scope_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL scope_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL scope_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL maximum_cardinality cardinal_number - Always null, because arrays always have unlimited maximum cardinality in PostgreSQL + Always null, because arrays always have unlimited maximum cardinality in PostgreSQL @@ -4283,7 +4284,7 @@ ORDER BY c.ordinal_position; character_data Always GENERAL (The SQL standard defines - other parameter styles, which are not available in PostgreSQL.) + other parameter styles, which are not available in PostgreSQL.) @@ -4294,7 +4295,7 @@ ORDER BY c.ordinal_position; If the function is declared immutable (called deterministic in the SQL standard), then YES, else NO. (You cannot query the other volatility - levels available in PostgreSQL through the information schema.) + levels available in PostgreSQL through the information schema.) @@ -4304,7 +4305,7 @@ ORDER BY c.ordinal_position; Always MODIFIES, meaning that the function possibly modifies SQL data. This information is not useful for - PostgreSQL. + PostgreSQL. @@ -4314,14 +4315,14 @@ ORDER BY c.ordinal_position; If the function automatically returns null if any of its arguments are null, then YES, else - NO. + NO. Null for a procedure. sql_path character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -4330,26 +4331,26 @@ ORDER BY c.ordinal_position; Always YES (The opposite would be a method of a user-defined type, which is a feature not available in - PostgreSQL.) + PostgreSQL.) max_dynamic_result_sets cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL is_user_defined_cast yes_or_no - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL is_implicitly_invocable yes_or_no - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -4366,43 +4367,43 @@ ORDER BY c.ordinal_position; to_sql_specific_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL to_sql_specific_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL to_sql_specific_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL as_locator yes_or_no - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL created time_stamp - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL last_altered time_stamp - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL new_savepoint_level yes_or_no - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -4411,152 +4412,152 @@ ORDER BY c.ordinal_position; Currently always NO. The alternative YES applies to a feature not available in - PostgreSQL. + PostgreSQL. result_cast_from_data_type character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_as_locator yes_or_no - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_char_max_length cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_char_octet_length character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_char_set_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_char_set_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_char_set_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_collation_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_collation_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_collation_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_numeric_precision cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_numeric_precision_radix cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_numeric_scale cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_datetime_precision character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_interval_type character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_interval_precision cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_type_udt_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_type_udt_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_type_udt_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_scope_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_scope_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_scope_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_maximum_cardinality cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL result_cast_dtd_identifier sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -4606,25 +4607,25 @@ ORDER BY c.ordinal_position; default_character_set_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL default_character_set_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL default_character_set_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL sql_path character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -4762,7 +4763,7 @@ ORDER BY c.ordinal_position; The table sql_features contains information about which formal features defined in the SQL standard are supported by PostgreSQL. This is the - same information that is presented in . + same information that is presented in . There you can also find some additional background information. @@ -4808,7 +4809,7 @@ ORDER BY c.ordinal_position; yes_or_no YES if the feature is fully supported by the - current version of PostgreSQL, NO if not + current version of PostgreSQL, NO if not @@ -4816,7 +4817,7 @@ ORDER BY c.ordinal_position; is_verified_by character_data - Always null, since the PostgreSQL development group does not + Always null, since the PostgreSQL development group does not perform formal testing of feature conformance @@ -4982,7 +4983,7 @@ ORDER BY c.ordinal_position; character_data The programming language, if the binding style is - EMBEDDED, else null. PostgreSQL only + EMBEDDED, else null. PostgreSQL only supports the language C. @@ -4998,7 +4999,7 @@ ORDER BY c.ordinal_position; The table sql_packages contains information about which feature packages defined in the SQL standard are supported by PostgreSQL. Refer to for background information on feature packages. + linkend="features"/> for background information on feature packages.
@@ -5031,7 +5032,7 @@ ORDER BY c.ordinal_position; yes_or_no YES if the package is fully supported by the - current version of PostgreSQL, NO if not + current version of PostgreSQL, NO if not @@ -5039,7 +5040,7 @@ ORDER BY c.ordinal_position; is_verified_bycharacter_data - Always null, since the PostgreSQL development group does not + Always null, since the PostgreSQL development group does not perform formal testing of feature conformance @@ -5093,7 +5094,7 @@ ORDER BY c.ordinal_position; yes_or_no YES if the part is fully supported by the - current version of PostgreSQL, + current version of PostgreSQL, NO if not @@ -5102,7 +5103,7 @@ ORDER BY c.ordinal_position; is_verified_bycharacter_data - Always null, since the PostgreSQL development group does not + Always null, since the PostgreSQL development group does not perform formal testing of feature conformance @@ -5182,7 +5183,7 @@ ORDER BY c.ordinal_position; The table sql_sizing_profiles contains information about the sql_sizing values that are - required by various profiles of the SQL standard. PostgreSQL does + required by various profiles of the SQL standard. PostgreSQL does not track any SQL profiles, so this table is empty. @@ -5317,6 +5318,13 @@ ORDER BY c.ordinal_position; yes_or_noYES if the constraint is deferrable and initially deferred, NO if not + + enforced + yes_or_no + Applies to a feature not available in + PostgreSQL (currently always + YES) +
@@ -5465,13 +5473,13 @@ ORDER BY c.ordinal_position; self_referencing_column_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL reference_generation character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -5586,7 +5594,7 @@ ORDER BY c.ordinal_position; sql_identifier The specific name of the function. See for more information. + linkend="infoschema-routines"/> for more information. @@ -5761,7 +5769,14 @@ ORDER BY c.ordinal_position; action_order cardinal_number - Not yet implemented + + Firing order among triggers on the same table having the same + event_manipulation, + action_timing, and + action_orientation. In + PostgreSQL, triggers are fired in name + order, so this column reflects that. + @@ -5779,7 +5794,7 @@ ORDER BY c.ordinal_position; character_data Statement that is executed by the trigger (currently always - EXECUTE PROCEDURE + EXECUTE FUNCTION function(...)) @@ -5806,31 +5821,31 @@ ORDER BY c.ordinal_position; action_reference_old_table sql_identifier - Applies to a feature not available in PostgreSQL + Name of the old transition table, or null if none action_reference_new_table sql_identifier - Applies to a feature not available in PostgreSQL + Name of the new transition table, or null if none action_reference_old_row sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL action_reference_new_row sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL created time_stamp - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -5864,7 +5879,7 @@ ORDER BY c.ordinal_position; - Prior to PostgreSQL 9.1, this view's columns + Prior to PostgreSQL 9.1, this view's columns action_timing, action_reference_old_table, action_reference_new_table, @@ -5891,9 +5906,9 @@ ORDER BY c.ordinal_position; USAGE privileges granted on user-defined types to a currently enabled role or by a currently enabled role. There is one row for each combination of type, grantor, and grantee. This view shows only - composite types (see under + composite types (see under for why); see - for domain privileges. + for domain privileges. @@ -6068,7 +6083,7 @@ ORDER BY c.ordinal_position; differentiate between these. Other user-defined types such as base types and enums, which are PostgreSQL extensions, are not shown here. For domains, - see instead. + see instead.
@@ -6113,151 +6128,151 @@ ORDER BY c.ordinal_position; is_instantiable yes_or_no - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL is_final yes_or_no - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL ordering_form character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL ordering_category character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL ordering_routine_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL ordering_routine_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL ordering_routine_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL reference_type character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL data_type character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_maximum_length cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_octet_length cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL character_set_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL collation_catalog sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL collation_schema sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL collation_name sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL numeric_precision cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL numeric_precision_radix cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL numeric_scale cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL datetime_precision cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL interval_type character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL interval_precision cardinal_number - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL source_dtd_identifier sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL ref_dtd_identifier sql_identifier - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -6522,7 +6537,7 @@ ORDER BY c.ordinal_position; sql_identifier The specific name of the function. See for more information. + linkend="infoschema-routines"/> for more information. @@ -6660,7 +6675,7 @@ ORDER BY c.ordinal_position; check_option character_data - Applies to a feature not available in PostgreSQL + Applies to a feature not available in PostgreSQL @@ -6686,8 +6701,8 @@ ORDER BY c.ordinal_position; is_trigger_updatable yes_or_no - YES if the view has an INSTEAD OF - UPDATE trigger defined on it, NO if not + YES if the view has an INSTEAD OF + UPDATE trigger defined on it, NO if not @@ -6695,8 +6710,8 @@ ORDER BY c.ordinal_position; is_trigger_deletableyes_or_no - YES if the view has an INSTEAD OF - DELETE trigger defined on it, NO if not + YES if the view has an INSTEAD OF + DELETE trigger defined on it, NO if not @@ -6704,8 +6719,8 @@ ORDER BY c.ordinal_position; is_trigger_insertable_intoyes_or_no - YES if the view has an INSTEAD OF - INSERT trigger defined on it, NO if not + YES if the view has an INSTEAD OF + INSERT trigger defined on it, NO if not diff --git a/doc/src/sgml/install-windows.sgml b/doc/src/sgml/install-windows.sgml index 1861e7e2f7..22a2ffd55e 100644 --- a/doc/src/sgml/install-windows.sgml +++ b/doc/src/sgml/install-windows.sgml @@ -19,10 +19,10 @@ There are several different ways of building PostgreSQL on Windows. The simplest way to build with - Microsoft tools is to install Visual Studio Express 2015 + Microsoft tools is to install Visual Studio Express 2017 for Windows Desktop and use the included compiler. It is also possible to build with the full - Microsoft Visual C++ 2005 to 2015. + Microsoft Visual C++ 2013 to 2017. In some cases that requires the installation of the Windows SDK in addition to the compiler. @@ -37,8 +37,8 @@ Building using MinGW or Cygwin uses the normal build system, see - and the specific notes in - and . + and the specific notes in + and . To produce native 64 bit binaries in these environments, use the tools from MinGW-w64. These tools can also be used to cross-compile for 32 bit and 64 bit Windows @@ -69,26 +69,32 @@ Visual Studio Express or some versions of the Microsoft Windows SDK. If you do not already have a Visual Studio environment set up, the easiest - ways are to use the compilers from Visual Studio Express 2015 + ways are to use the compilers from Visual Studio Express 2017 for Windows Desktop or those in the Windows SDK - 7.1, which are both free downloads from Microsoft. + 8.1, which are both free downloads from Microsoft. Both 32-bit and 64-bit builds are possible with the Microsoft Compiler suite. 32-bit PostgreSQL builds are possible with - Visual Studio 2005 to - Visual Studio 2015 (including Express editions), - as well as standalone Windows SDK releases 6.0 to 7.1. + Visual Studio 2013 to + Visual Studio 2017 (including Express editions), + as well as standalone Windows SDK releases 6.0 to 8.1. 64-bit PostgreSQL builds are supported with - Microsoft Windows SDK version 6.0a to 7.1 or - Visual Studio 2008 and above. Compilation - is supported down to Windows XP and - Windows Server 2003 when building with - Visual Studio 2005 to - Visual Studio 2013. Building with - Visual Studio 2015 is supported down to - Windows Vista and Windows Server 2008. + Microsoft Windows SDK version 6.0a to 8.1 or + Visual Studio 2013 and above. Compilation + is supported down to Windows 7 and + Windows Server 2008 R2 SP1 when building with + Visual Studio 2013 to + Visual Studio 2017. + @@ -161,7 +167,7 @@ $ENV{MSBFLAGS}="/m"; Microsoft Windows SDK it is recommended that you upgrade to the latest version (currently version 7.1), available for download from - . + . You must always include the @@ -180,7 +186,7 @@ $ENV{MSBFLAGS}="/m"; ActiveState Perl is required to run the build generation scripts. MinGW or Cygwin Perl will not work. It must also be present in the PATH. Binaries can be downloaded from - + (Note: version 5.8.3 or later is required, the free Standard Distribution is sufficient). @@ -217,7 +223,7 @@ $ENV{MSBFLAGS}="/m"; Both Bison and Flex are included in the msys tool suite, available - from as part of the + from as part of the MinGW compiler suite. @@ -257,7 +263,7 @@ $ENV{MSBFLAGS}="/m"; Diff Diff is required to run the regression tests, and can be downloaded - from . + from . @@ -265,7 +271,7 @@ $ENV{MSBFLAGS}="/m"; Gettext Gettext is required to build with NLS support, and can be downloaded - from . Note that binaries, + from . Note that binaries, dependencies and developer files are all needed. @@ -275,7 +281,7 @@ $ENV{MSBFLAGS}="/m"; Required for GSSAPI authentication support. MIT Kerberos can be downloaded from - . + . @@ -284,18 +290,18 @@ $ENV{MSBFLAGS}="/m"; libxslt Required for XML support. Binaries can be downloaded from - or source from - . Note that libxml2 requires iconv, + or source from + . Note that libxml2 requires iconv, which is available from the same download location. - openssl + OpenSSL Required for SSL support. Binaries can be downloaded from - - or source from . + + or source from . @@ -304,7 +310,7 @@ $ENV{MSBFLAGS}="/m"; Required for UUID-OSSP support (contrib only). Source can be downloaded from - . + . @@ -312,7 +318,7 @@ $ENV{MSBFLAGS}="/m"; Python Required for building PL/Python. Binaries can - be downloaded from . + be downloaded from . @@ -321,7 +327,7 @@ $ENV{MSBFLAGS}="/m"; Required for compression support in pg_dump and pg_restore. Binaries can be downloaded - from . + from . @@ -345,8 +351,8 @@ $ENV{MSBFLAGS}="/m"; - To use a server-side third party library such as python or - openssl, this library must also be + To use a server-side third party library such as python or + OpenSSL, this library must also be 64-bit. There is no support for loading a 32-bit library in a 64-bit server. Several of the third party libraries that PostgreSQL supports may only be available in 32-bit versions, in which case they cannot be used with @@ -455,25 +461,25 @@ $ENV{CONFIG}="Debug"; For more information about the regression tests, see - . + . Running the regression tests on client programs, with - vcregress bincheck, or on recovery tests, with - vcregress recoverycheck, requires an additional Perl module + vcregress bincheck, or on recovery tests, with + vcregress recoverycheck, requires an additional Perl module to be installed: IPC::Run - As of this writing, IPC::Run is not included in the + As of this writing, IPC::Run is not included in the ActiveState Perl installation, nor in the ActiveState Perl Package Manager (PPM) library. To install, download the - IPC-Run-<version>.tar.gz source archive from CPAN, - at , and - uncompress. Edit the buildenv.pl file, and add a PERL5LIB - variable to point to the lib subdirectory from the + IPC-Run-<version>.tar.gz source archive from CPAN, + at , and + uncompress. Edit the buildenv.pl file, and add a PERL5LIB + variable to point to the lib subdirectory from the extracted archive. For example: $ENV{PERL5LIB}=$ENV{PERL5LIB} . ';c:\IPC-Run-0.94\lib'; @@ -484,52 +490,5 @@ $ENV{PERL5LIB}=$ENV{PERL5LIB} . ';c:\IPC-Run-0.94\lib'; - - Building the Documentation - - - Building the PostgreSQL documentation in HTML format requires several tools - and files. Create a root directory for all these files, and store them - in the subdirectories in the list below. - - - OpenJade 1.3.1-2 - - Download from - - and uncompress in the subdirectory openjade-1.3.1. - - - - - DocBook DTD 4.2 - - Download from - - and uncompress in the subdirectory docbook. - - - - - ISO character entities - - Download from - and - uncompress in the subdirectory docbook. - - - - Edit the buildenv.pl file, and add a variable for the - location of the root directory, for example: - -$ENV{DOCROOT}='c:\docbook'; - - To build the documentation, run the command - builddoc.bat. Note that this will actually run the - build twice, in order to generate the indexes. The generated HTML files - will be in doc\src\sgml. - - - diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index 12866b4bf7..d3326ce182 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -1,28 +1,25 @@ - <![%standalone-include[<productname>PostgreSQL</>]]> - Installation from Source Code + Installation from Source Code installation - This - describes the installation of + This chapter describes the installation of PostgreSQL using the source code distribution. (If you are installing a pre-packaged distribution, - such as an RPM or Debian package, ignore this - - + such as an RPM or Debian package, ignore this chapter and read the packager's instructions instead.) @@ -45,8 +42,7 @@ su - postgres /usr/local/pgsql/bin/psql test The long version is the rest of this - - + chapter. @@ -56,17 +52,17 @@ su - postgres In general, a modern Unix-compatible platform should be able to run - PostgreSQL. + PostgreSQL. The platforms that had received specific testing at the - time of release are listed in - below. In the doc subdirectory of the distribution - there are several platform-specific FAQ documents you + time of release are listed in + below. In the doc subdirectory of the distribution + there are several platform-specific FAQ documents you might wish to consult if you are having trouble. The following software packages are required for building - PostgreSQL: + PostgreSQL: @@ -75,9 +71,9 @@ su - postgres make - GNU make version 3.80 or newer is required; other - make programs or older GNU make versions will not work. - (GNU make is sometimes installed under + GNU make version 3.80 or newer is required; other + make programs or older GNU make versions will not work. + (GNU make is sometimes installed under the name gmake.) To test for GNU make enter: @@ -88,19 +84,19 @@ su - postgres - You need an ISO/ANSI C compiler (at least - C89-compliant). Recent - versions of GCC are recommended, but - PostgreSQL is known to build using a wide variety + You need an ISO/ANSI C compiler (at least + C99-compliant). Recent + versions of GCC are recommended, but + PostgreSQL is known to build using a wide variety of compilers from different vendors. - tar is required to unpack the source + tar is required to unpack the source distribution, in addition to either - gzip or bzip2. + gzip or bzip2. @@ -113,23 +109,23 @@ su - postgres libedit - The GNU Readline library is used by + The GNU Readline library is used by default. It allows psql (the PostgreSQL command line SQL interpreter) to remember each command you type, and allows you to use arrow keys to recall and edit previous commands. This is very helpful and is strongly recommended. If you don't want to use it then you must specify the option to - configure. As an alternative, you can often use the + configure. As an alternative, you can often use the BSD-licensed libedit library, originally developed on NetBSD. The libedit library is GNU Readline-compatible and is used if libreadline is not found, or if is used as an - option to configure. If you are using a package-based + option to configure. If you are using a package-based Linux distribution, be aware that you need both the - readline and readline-devel packages, if + readline and readline-devel packages, if those are separate in your distribution. @@ -144,8 +140,8 @@ su - postgres used by default. If you don't want to use it then you must specify the option to configure. Using this option disables - support for compressed archives in pg_dump and - pg_restore. + support for compressed archives in pg_dump and + pg_restore. @@ -183,22 +179,21 @@ su - postgres If you intend to make more than incidental use of PL/Perl, you should ensure that the Perl installation was built with the - usemultiplicity option enabled (perl -V + usemultiplicity option enabled (perl -V will show whether this is the case). - To build the PL/Python server programming + To build the PL/Python server programming language, you need a Python installation with the header files and the distutils module. The minimum required version is Python 2.4. Python 3 is supported if it's version 3.1 or later; but see - PL/Python documentation]]> - ]]> + when using Python 3. @@ -214,15 +209,15 @@ su - postgres find a shared libpython. That might mean that you either have to install additional packages or rebuild (part of) your Python installation to provide this shared - library. When building from source, run Python's - configure with the --enable-shared flag. + library. When building from source, run Python's + configure with the --enable-shared flag. To build the PL/Tcl - procedural language, you of course need a Tcl + procedural language, you of course need a Tcl installation. The minimum required version is Tcl 8.4. @@ -233,13 +228,13 @@ su - postgres To enable Native Language Support (NLS), that is, the ability to display a program's messages in a language other than English, you need an implementation of the - Gettext API. Some operating + Gettext API. Some operating systems have this built-in (e.g., Linux, NetBSD, - Solaris), for other systems you + class="osname">Linux, NetBSD, + Solaris), for other systems you can download an add-on package from . - If you are using the Gettext implementation in + If you are using the Gettext implementation in the GNU C library then you will additionally need the GNU Gettext package for some utility programs. For any of the other implementations you will @@ -249,7 +244,7 @@ su - postgres - You need OpenSSL, if you want to support + You need OpenSSL, if you want to support encrypted client connections. The minimum required version is 0.9.8. @@ -257,8 +252,8 @@ su - postgres - You need Kerberos, OpenLDAP, - and/or PAM, if you want to support authentication + You need Kerberos, OpenLDAP, + and/or PAM, if you want to support authentication using those services. @@ -267,9 +262,7 @@ su - postgres To build the PostgreSQL documentation, there is a separate set of requirements; see - .]]> - + . @@ -296,12 +289,12 @@ su - postgres yacc - GNU Flex and Bison + Flex and Bison are needed to build from a Git checkout, or if you changed the actual scanner and parser definition files. If you need them, be sure - to get Flex 2.5.31 or later and - Bison 1.875 or later. Other lex - and yacc programs cannot be used. + to get Flex 2.5.31 or later and + Bison 1.875 or later. Other lex + and yacc programs cannot be used. @@ -310,10 +303,10 @@ su - postgres perl - Perl 5.8.3 or later is needed to build from a Git checkout, + Perl 5.8.3 or later is needed to build from a Git checkout, or if you changed the input files for any of the build steps that use Perl scripts. If building on Windows you will need - Perl in any case. Perl is + Perl in any case. Perl is also required to run some test suites. @@ -323,7 +316,7 @@ su - postgres If you need to get a GNU package, you can find it at your local GNU mirror site (see + url="https://www.gnu.org/prep/ftp"> for a list) or at . @@ -340,12 +333,11 @@ su - postgres - Getting The Source - The PostgreSQL &version; sources can be obtained from the + The PostgreSQL &version; sources can be obtained from the download section of our website: . You should get a file named postgresql-&version;.tar.gz @@ -359,17 +351,16 @@ su - postgres have the .bz2 file.) This will create a directory postgresql-&version; under the current directory - with the PostgreSQL sources. + with the PostgreSQL sources. Change into that directory for the rest of the installation procedure. You can also get the source directly from the version control repository, see - . + . -]]> Installation Procedure @@ -386,7 +377,7 @@ su - postgres The first step of the installation procedure is to configure the source tree for your system and choose the options you would like. - This is done by running the configure script. For a + This is done by running the configure script. For a default installation simply enter: ./configure @@ -412,7 +403,7 @@ su - postgres The default configuration will build the server and utilities, as well as all client applications and interfaces that require only a C compiler. All files will be installed under - /usr/local/pgsql by default. + /usr/local/pgsql by default. @@ -422,14 +413,14 @@ su - postgres - + - Install all files under the directory PREFIX + Install all files under the directory PREFIX instead of /usr/local/pgsql. The actual files will be installed into various subdirectories; no files will ever be installed directly into the - PREFIX directory. + PREFIX directory. @@ -437,13 +428,13 @@ su - postgres individual subdirectories with the following options. However, if you leave these with their defaults, the installation will be relocatable, meaning you can move the directory after - installation. (The man and doc + installation. (The man and doc locations are not affected by this.) For relocatable installs, you might want to use - configure's --disable-rpath + configure's --disable-rpath option. Also, you will need to tell the operating system how to find the shared libraries. @@ -451,15 +442,15 @@ su - postgres - + You can install architecture-dependent files under a - different prefix, EXEC-PREFIX, than what - PREFIX was set to. This can be useful to + different prefix, EXEC-PREFIX, than what + PREFIX was set to. This can be useful to share architecture-independent files between hosts. If you - omit this, then EXEC-PREFIX is set equal to - PREFIX and both architecture-dependent and + omit this, then EXEC-PREFIX is set equal to + PREFIX and both architecture-dependent and independent files will be installed under the same tree, which is probably what you want. @@ -467,114 +458,114 @@ su - postgres - + Specifies the directory for executable programs. The default - is EXEC-PREFIX/bin, which - normally means /usr/local/pgsql/bin. + is EXEC-PREFIX/bin, which + normally means /usr/local/pgsql/bin. - + Sets the directory for various configuration files, - PREFIX/etc by default. + PREFIX/etc by default. - + Sets the location to install libraries and dynamically loadable modules. The default is - EXEC-PREFIX/lib. + EXEC-PREFIX/lib. - + Sets the directory for installing C and C++ header files. The - default is PREFIX/include. + default is PREFIX/include. - + Sets the root directory for various types of read-only data files. This only sets the default for some of the following options. The default is - PREFIX/share. + PREFIX/share. - + Sets the directory for read-only data files used by the installed programs. The default is - DATAROOTDIR. Note that this has + DATAROOTDIR. Note that this has nothing to do with where your database files will be placed. - + Sets the directory for installing locale data, in particular message translation catalog files. The default is - DATAROOTDIR/locale. + DATAROOTDIR/locale. - + - The man pages that come with PostgreSQL will be installed under + The man pages that come with PostgreSQL will be installed under this directory, in their respective - manx subdirectories. - The default is DATAROOTDIR/man. + manx subdirectories. + The default is DATAROOTDIR/man. - + Sets the root directory for installing documentation files, - except man pages. This only sets the default for + except man pages. This only sets the default for the following options. The default value for this option is - DATAROOTDIR/doc/postgresql. + DATAROOTDIR/doc/postgresql. - + The HTML-formatted documentation for PostgreSQL will be installed under this directory. The default is - DATAROOTDIR. + DATAROOTDIR. @@ -583,15 +574,15 @@ su - postgres Care has been taken to make it possible to install - PostgreSQL into shared installation locations + PostgreSQL into shared installation locations (such as /usr/local/include) without interfering with the namespace of the rest of the system. First, the string /postgresql is automatically appended to datadir, sysconfdir, and docdir, unless the fully expanded directory name already contains the - string postgres or - pgsql. For example, if you choose + string postgres or + pgsql. For example, if you choose /usr/local as prefix, the documentation will be installed in /usr/local/doc/postgresql, but if the prefix is /opt/postgres, then it @@ -611,10 +602,10 @@ su - postgres - + - Append STRING to the PostgreSQL version number. You + Append STRING to the PostgreSQL version number. You can use this, for example, to mark binaries built from unreleased Git snapshots or containing custom patches with an extra version string such as a git describe identifier or a @@ -624,35 +615,35 @@ su - postgres - + - DIRECTORIES is a colon-separated list of + DIRECTORIES is a colon-separated list of directories that will be added to the list the compiler searches for header files. If you have optional packages - (such as GNU Readline) installed in a non-standard + (such as GNU Readline) installed in a non-standard location, you have to use this option and probably also the corresponding - option. - Example: --with-includes=/opt/gnu/include:/usr/sup/include. + Example: --with-includes=/opt/gnu/include:/usr/sup/include. - + - DIRECTORIES is a colon-separated list of + DIRECTORIES is a colon-separated list of directories to search for libraries. You will probably have to use this option (and the corresponding - option) if you have packages installed in non-standard locations. - Example: --with-libraries=/opt/gnu/lib:/usr/sup/lib. + Example: --with-libraries=/opt/gnu/lib:/usr/sup/lib. @@ -666,7 +657,7 @@ su - postgres language other than English. LANGUAGES is an optional space-separated list of codes of the languages that you want supported, for - example --enable-nls='de fr'. (The intersection + example --enable-nls='de fr'. (The intersection between your list and the set of actually provided translations will be computed automatically.) If you do not specify a list, then all available translations are @@ -675,22 +666,22 @@ su - postgres To use this option, you will need an implementation of the - Gettext API; see above. + Gettext API; see above. - + - Set NUMBER as the default port number for + Set NUMBER as the default port number for server and clients. The default is 5432. The port can always be changed later on, but if you specify it here then both server and clients will have the same default compiled in, which can be very convenient. Usually the only good reason to select a non-default value is if you intend to run multiple - PostgreSQL servers on the same machine. + PostgreSQL servers on the same machine. @@ -699,7 +690,7 @@ su - postgres - Build the PL/Perl server-side language. + Build the PL/Perl server-side language. @@ -708,7 +699,7 @@ su - postgres - Build the PL/Python server-side language. + Build the PL/Python server-side language. @@ -717,7 +708,7 @@ su - postgres - Build the PL/Tcl server-side language. + Build the PL/Tcl server-side language. @@ -743,10 +734,10 @@ su - postgres Build with support for GSSAPI authentication. On many systems, the GSSAPI (usually a part of the Kerberos installation) system is not installed in a location - that is searched by default (e.g., /usr/include, - /usr/lib), so you must use the options - @@ -754,7 +745,7 @@ su - postgres - + The default name of the Kerberos service principal used @@ -767,12 +758,47 @@ su - postgres + + + + + Build with support for LLVM based + JIT compilation (see ). This + requires the LLVM library to be installed. + The minimum required version of LLVM is + currently 3.9. + + + llvm-configllvm-config + will be used to find the required compilation options. + llvm-config, and then + llvm-config-$major-$minor for all supported + versions, will be searched on PATH. If that would not + yield the correct binary, use LLVM_CONFIG to specify a + path to the correct llvm-config. For example + +./configure ... --with-llvm LLVM_CONFIG='/path/to/llvm/bin/llvm-config' + + + + + LLVM support requires a compatible + clang compiler (specified, if necessary, using the + CLANG environment variable), and a working C++ + compiler (specified, if necessary, using the CXX + environment variable). + + + + Build with support for - the ICUICU + the ICUICU library. This requires the ICU4C package to be installed. The minimum required version of ICU4C is currently 4.2. @@ -780,7 +806,7 @@ su - postgres By default, - pkg-configpkg-config + pkg-configpkg-config will be used to find the required compilation options. This is supported for ICU4C version 4.6 and later. For older versions, or if pkg-config is @@ -807,11 +833,11 @@ su - postgres - Build with support for SSL (encrypted) - connections. This requires the OpenSSL - package to be installed. configure will check + Build with support for SSL (encrypted) + connections. This requires the OpenSSL + package to be installed. configure will check for the required header files and libraries to make sure that - your OpenSSL installation is sufficient + your OpenSSL installation is sufficient before proceeding. @@ -821,7 +847,7 @@ su - postgres - Build with PAMPAM + Build with PAMPAM (Pluggable Authentication Modules) support. @@ -842,16 +868,15 @@ su - postgres - Build with LDAPLDAP + Build with LDAPLDAP support for authentication and connection parameter lookup (see - and - ]]> for more information). On Unix, - this requires the OpenLDAP package to be - installed. On Windows, the default WinLDAP - library is used. configure will check for the required + and + for more information). On Unix, + this requires the OpenLDAP package to be + installed. On Windows, the default WinLDAP + library is used. configure will check for the required header files and libraries to make sure that your - OpenLDAP installation is sufficient before + OpenLDAP installation is sufficient before proceeding. @@ -865,8 +890,8 @@ su - postgres for systemdsystemd service notifications. This improves integration if the server binary is started under systemd but has no impact - otherwise for more - information]]>. libsystemd and the + otherwise; see for more + information. libsystemd and the associated header files need to be installed to be able to use this option. @@ -877,8 +902,8 @@ su - postgres - Prevents use of the Readline library - (and libedit as well). This option disables + Prevents use of the Readline library + (and libedit as well). This option disables command-line editing and history in psql, so it is not recommended. @@ -889,10 +914,10 @@ su - postgres - Favors the use of the BSD-licensed libedit library - rather than GPL-licensed Readline. This option + Favors the use of the BSD-licensed libedit library + rather than GPL-licensed Readline. This option is significant only if you have both libraries installed; the - default in that case is to use Readline. + default in that case is to use Readline. @@ -911,8 +936,7 @@ su - postgres - Build the - ]]> module + Build the module (which provides functions to generate UUIDs), using the specified UUID library.UUID LIBRARY must be one of: @@ -920,21 +944,21 @@ su - postgres - to use the UUID functions found in FreeBSD, NetBSD, and some other BSD-derived systems - - to use the OSSP UUID library @@ -979,9 +1003,8 @@ su - postgres Use libxslt when building the - - ]]> - module. xml2 relies on this library + + module. xml2 relies on this library to perform XSL transformations of XML. @@ -991,13 +1014,13 @@ su - postgres - Disable passing float4 values by value, causing them - to be passed by reference instead. This option costs + Disable passing float4 values by value, causing them + to be passed by reference instead. This option costs performance, but may be needed for compatibility with old user-defined functions that are written in C and use the - version 0 calling convention. A better long-term + version 0 calling convention. A better long-term solution is to update any such functions to use the - version 1 calling convention. + version 1 calling convention. @@ -1006,17 +1029,17 @@ su - postgres - Disable passing float8 values by value, causing them - to be passed by reference instead. This option costs + Disable passing float8 values by value, causing them + to be passed by reference instead. This option costs performance, but may be needed for compatibility with old user-defined functions that are written in C and use the - version 0 calling convention. A better long-term + version 0 calling convention. A better long-term solution is to update any such functions to use the - version 1 calling convention. + version 1 calling convention. Note that this option affects not only float8, but also int8 and some related types such as timestamp. - On 32-bit platforms, is the default + and it is not allowed to select . @@ -1025,17 +1048,17 @@ su - postgres - Set the segment size, in gigabytes. Large tables are + Set the segment size, in gigabytes. Large tables are divided into multiple operating-system files, each of size equal to the segment size. This avoids problems with file size limits that exist on many platforms. The default segment size, 1 gigabyte, is safe on all supported platforms. If your operating system has - largefile support (which most do, nowadays), you can use + largefile support (which most do, nowadays), you can use a larger segment size. This can be helpful to reduce the number of file descriptors consumed when working with very large tables. But be careful not to select a value larger than is supported by your platform and the file systems you intend to use. Other - tools you might wish to use, such as tar, could + tools you might wish to use, such as tar, could also set limits on the usable file size. It is recommended, though not absolutely required, that this value be a power of 2. @@ -1048,7 +1071,7 @@ su - postgres - Set the block size, in kilobytes. This is the unit + Set the block size, in kilobytes. This is the unit of storage and I/O within tables. The default, 8 kilobytes, is suitable for most situations; but other values may be useful in special cases. @@ -1058,25 +1081,11 @@ su - postgres - - - - - Set the WAL segment size, in megabytes. This is - the size of each individual file in the WAL log. It may be useful - to adjust this size to control the granularity of WAL log shipping. - The default size is 16 megabytes. - The value must be a power of 2 between 1 and 1024 (megabytes). - Note that changing this value requires an initdb. - - - - - Set the WAL block size, in kilobytes. This is the unit + Set the WAL block size, in kilobytes. This is the unit of storage and I/O within the WAL log. The default, 8 kilobytes, is suitable for most situations; but other values may be useful in special cases. @@ -1090,14 +1099,14 @@ su - postgres - Allow the build to succeed even if PostgreSQL + Allow the build to succeed even if PostgreSQL has no CPU spinlock support for the platform. The lack of spinlock support will result in poor performance; therefore, this option should only be used if the build aborts and informs you that the platform lacks spinlock support. If this - option is required to build PostgreSQL on + option is required to build PostgreSQL on your platform, please report the problem to the - PostgreSQL developers. + PostgreSQL developers. @@ -1106,12 +1115,11 @@ su - postgres - Allow the build to succeed even if PostgreSQL + Allow the build to succeed even if PostgreSQL has no support for strong random numbers on the platform. A source of random numbers is needed for some authentication protocols, as well as some routines in the - - ]]> + module. disables functionality that requires cryptographically strong random numbers, and substitutes a weak pseudo-random-number-generator for the generation of @@ -1141,7 +1149,7 @@ su - postgres - PostgreSQL includes its own time zone database, + PostgreSQL includes its own time zone database, which it requires for date and time operations. This time zone database is in fact compatible with the IANA time zone database provided by many operating systems such as FreeBSD, @@ -1155,7 +1163,7 @@ su - postgres installation routine will not detect mismatching or erroneous time zone data. If you use this option, you are advised to run the regression tests to verify that the time zone data you have - pointed to works correctly with PostgreSQL. + pointed to works correctly with PostgreSQL. cross compilation @@ -1180,7 +1188,7 @@ su - postgres zlib - Prevents use of the Zlib library. This disables + Prevents use of the Zlib library. This disables support for compressed archives in pg_dump and pg_restore. This option is only intended for those rare systems where this @@ -1215,8 +1223,8 @@ su - postgres code coverage testing instrumentation. When run, they generate files in the build directory with code coverage metrics. - - for more information.]]> This option is for use only with GCC + See + for more information. This option is for use only with GCC and when doing development work. @@ -1228,7 +1236,7 @@ su - postgres If using GCC, all programs and libraries are compiled so they can be profiled. On backend exit, a subdirectory will be created - that contains the gmon.out file for use in profiling. + that contains the gmon.out file for use in profiling. This option is for use only with GCC and when doing development work. @@ -1238,8 +1246,8 @@ su - postgres - Enables assertion checks in the server, which test for - many cannot happen conditions. This is invaluable for + Enables assertion checks in the server, which test for + many cannot happen conditions. This is invaluable for code development purposes, but the tests can slow down the server significantly. Also, having the tests turned on won't necessarily enhance the @@ -1276,8 +1284,8 @@ su - postgres Compiles PostgreSQL with support for the dynamic tracing tool DTrace. - - for more information.]]> + See + for more information. @@ -1293,7 +1301,7 @@ su - postgres can be specified in the environment variable DTRACEFLAGS. On Solaris, to include DTrace support in a 64-bit binary, you must specify - DTRACEFLAGS="-64" to configure. For example, + DTRACEFLAGS="-64" to configure. For example, using the GCC compiler: ./configure CC='gcc -m64' --enable-dtrace DTRACEFLAGS='-64' ... @@ -1312,7 +1320,7 @@ su - postgres Enable tests using the Perl TAP tools. This requires a Perl installation and the Perl module IPC::Run. - for more information.]]> + See for more information. @@ -1322,10 +1330,10 @@ su - postgres If you prefer a C compiler different from the one configure picks, you can set the - environment variable CC to the program of your choice. + environment variable CC to the program of your choice. By default, configure will pick gcc if available, else the platform's - default (usually cc). Similarly, you can override the + default (usually cc). Similarly, you can override the default compiler flags if needed with the CFLAGS variable. @@ -1333,7 +1341,7 @@ su - postgres You can specify environment variables on the configure command line, for example: -./configure CC=/opt/bin/gcc CFLAGS='-O2 -pipe' +./configure CC=/opt/bin/gcc CFLAGS='-O2 -pipe' @@ -1369,6 +1377,16 @@ su - postgres + + CLANG + + + path to clang program used to process source code + for inlining when compiling with --with-llvm + + + + CPP @@ -1387,6 +1405,24 @@ su - postgres + + CXX + + + C++ compiler + + + + + + CXXFLAGS + + + options to pass to the C++ compiler + + + + DTRACE @@ -1441,6 +1477,16 @@ su - postgres + + LLVM_CONFIG + + + llvm-config program used to locate the + LLVM installation. + + + + MSGFMT @@ -1469,9 +1515,7 @@ su - postgres whether Python 2 or 3 is specified here (or otherwise implicitly chosen) determines which variant of the PL/Python language becomes available. See - PL/Python - documentation]]> - ]]> + for more information. @@ -1502,51 +1546,51 @@ su - postgres Sometimes it is useful to add compiler flags after-the-fact to the set - that were chosen by configure. An important example is - that gcc's When developing code inside the server, it is recommended to - use the configure options (which + turns on many run-time error checks) and (which improves the usefulness of debugging tools). If using GCC, it is best to build with an optimization level of - at least , because using no optimization + () disables some important compiler warnings (such as the use of uninitialized variables). However, non-zero optimization levels can complicate debugging because stepping through compiled code will usually not match up one-to-one with source code lines. If you get confused while trying to debug optimized code, recompile the specific files of interest with - - The COPT and PROFILE environment variables are - actually handled identically by the PostgreSQL + The COPT and PROFILE environment variables are + actually handled identically by the PostgreSQL makefiles. Which to use is a matter of preference, but a common habit - among developers is to use PROFILE for one-time flag - adjustments, while COPT might be kept set all the time. + among developers is to use PROFILE for one-time flag + adjustments, while COPT might be kept set all the time. @@ -1555,12 +1599,13 @@ su - postgres Build - To start the build, type: + To start the build, type either of: make +make all - (Remember to use GNU make.) The build - will take a few minutes depending on your + (Remember to use GNU make.) + The build will take a few minutes depending on your hardware. The last line displayed should be: All of PostgreSQL successfully made. Ready to install. @@ -1579,6 +1624,18 @@ All of PostgreSQL successfully made. Ready to install. PostgreSQL, contrib, and documentation successfully made. Ready to install. + + + If you want to invoke the build from another makefile rather than + manually, you must unset MAKELEVEL or set it to zero, + for instance like this: + +build-postgresql: + $(MAKE) -C postgresql MAKELEVEL=0 all + + Failure to do that can lead to strange error messages, typically about + missing header files. + @@ -1591,17 +1648,14 @@ PostgreSQL, contrib, and documentation successfully made. Ready to install. If you want to test the newly built server before you install it, you can run the regression tests at this point. The regression - tests are a test suite to verify that PostgreSQL + tests are a test suite to verify that PostgreSQL runs on your machine in the way the developers expected it to. Type: make check (This won't work as root; do it as an unprivileged user.) - src/test/regress/README and the - documentation contain]]> - contains]]> + See for detailed information about interpreting the test results. You can repeat this test at any later time by issuing the same command. @@ -1613,20 +1667,19 @@ PostgreSQL, contrib, and documentation successfully made. Ready to install. If you are upgrading an existing system be sure to read - - ]]> + , which has instructions about upgrading a cluster. - To install PostgreSQL enter: + To install PostgreSQL enter: make install This will install files into the directories that were specified - in . Make sure that you have appropriate + in . Make sure that you have appropriate permissions to write into that area. Normally you need to do this step as root. Alternatively, you can create the target directories in advance and arrange for appropriate permissions to @@ -1665,8 +1718,8 @@ PostgreSQL, contrib, and documentation successfully made. Ready to install. The standard installation provides all the header files needed for client application development as well as for server-side program development, such as custom functions or data types written in C. - (Prior to PostgreSQL 8.0, a separate make - install-all-headers command was needed for the latter, but this + (Prior to PostgreSQL 8.0, a separate make + install-all-headers command was needed for the latter, but this step has been folded into the standard install.) @@ -1676,12 +1729,12 @@ PostgreSQL, contrib, and documentation successfully made. Ready to install. If you want to install only the client applications and interface libraries, then you can use these commands: -make -C src/bin install -make -C src/include install -make -C src/interfaces install -make -C doc install +make -C src/bin install +make -C src/include install +make -C src/interfaces install +make -C doc install - src/bin has a few binaries for server-only use, + src/bin has a few binaries for server-only use, but they are small. @@ -1692,7 +1745,7 @@ PostgreSQL, contrib, and documentation successfully made. Ready to install. Uninstallation: To undo the installation use the command make - uninstall. However, this will not remove any created directories. + uninstall. However, this will not remove any created directories. @@ -1702,10 +1755,10 @@ PostgreSQL, contrib, and documentation successfully made. Ready to install. After the installation you can free disk space by removing the built files from the source tree with the command make - clean. This will preserve the files made by the configure - program, so that you can rebuild everything with make + clean. This will preserve the files made by the configure + program, so that you can rebuild everything with make later on. To reset the source tree to the state in which it was - distributed, use make distclean. If you are going to + distributed, use make distclean. If you are going to build for several platforms within the same source tree you must do this and re-configure for each platform. (Alternatively, use a separate build tree for each platform, so that the source tree @@ -1714,10 +1767,10 @@ PostgreSQL, contrib, and documentation successfully made. Ready to install. - If you perform a build and then discover that your configure - options were wrong, or if you change anything that configure + If you perform a build and then discover that your configure + options were wrong, or if you change anything that configure investigates (for example, software upgrades), then it's a good - idea to do make distclean before reconfiguring and + idea to do make distclean before reconfiguring and rebuilding. Without this, your changes in configuration choices might not propagate everywhere they need to. @@ -1738,34 +1791,34 @@ PostgreSQL, contrib, and documentation successfully made. Ready to install. you need to tell the system how to find the newly installed shared libraries. The systems on which this is not necessary include - FreeBSD, - HP-UX, - Linux, - NetBSD, OpenBSD, and - Solaris. + FreeBSD, + HP-UX, + Linux, + NetBSD, OpenBSD, and + Solaris. The method to set the shared library search path varies between platforms, but the most widely-used method is to set the - environment variable LD_LIBRARY_PATH like so: In Bourne - shells (sh, ksh, bash, zsh): + environment variable LD_LIBRARY_PATH like so: In Bourne + shells (sh, ksh, bash, zsh): LD_LIBRARY_PATH=/usr/local/pgsql/lib export LD_LIBRARY_PATH - or in csh or tcsh: + or in csh or tcsh: setenv LD_LIBRARY_PATH /usr/local/pgsql/lib - Replace /usr/local/pgsql/lib with whatever you set - @@ -1796,17 +1849,17 @@ libpq.so.2.1: cannot open shared object file: No such file or directory ldconfig - If you are on Linux and you have root + If you are on Linux and you have root access, you can run: /sbin/ldconfig /usr/local/pgsql/lib (or equivalent directory) after installation to enable the run-time linker to find the shared libraries faster. Refer to the - manual page of ldconfig for more information. On - FreeBSD, NetBSD, and OpenBSD the command is: + manual page of ldconfig for more information. On + FreeBSD, NetBSD, and OpenBSD the command is: /sbin/ldconfig -m /usr/local/pgsql/lib @@ -1823,24 +1876,24 @@ libpq.so.2.1: cannot open shared object file: No such file or directory - If you installed into /usr/local/pgsql or some other + If you installed into /usr/local/pgsql or some other location that is not searched for programs by default, you should - add /usr/local/pgsql/bin (or whatever you set - To do this, add the following to your shell start-up file, such as - ~/.bash_profile (or /etc/profile, if you + ~/.bash_profile (or /etc/profile, if you want it to affect all users): PATH=/usr/local/pgsql/bin:$PATH export PATH - If you are using csh or tcsh, then use this command: + If you are using csh or tcsh, then use this command: set path = ( /usr/local/pgsql/bin $path ) @@ -1850,7 +1903,7 @@ set path = ( /usr/local/pgsql/bin $path ) MANPATH - To enable your system to find the man + To enable your system to find the man documentation, you need to add lines like the following to a shell start-up file unless you installed into a location that is searched by default: @@ -1861,197 +1914,36 @@ export MANPATH - The environment variables PGHOST and PGPORT + The environment variables PGHOST and PGPORT specify to client applications the host and port of the database server, overriding the compiled-in defaults. If you are going to run client applications remotely then it is convenient if every - user that plans to use the database sets PGHOST. This + user that plans to use the database sets PGHOST. This is not required, however; the settings can be communicated via command line options to most client programs. - - - Getting Started - - - The following is a quick summary of how to get PostgreSQL up and - running once installed. The main documentation contains more information. - - - - - - Create a user account for the PostgreSQL - server. This is the user the server will run as. For production - use you should create a separate, unprivileged account - (postgres is commonly used). If you do not have root - access or just want to play around, your own user account is - enough, but running the server as root is a security risk and - will not work. - -adduser postgres - - - - - - - Create a database installation with the initdb - command. To run initdb you must be logged in to your - PostgreSQL server account. It will not work as - root. - -root# mkdir /usr/local/pgsql/data -root# chown postgres /usr/local/pgsql/data -root# su - postgres -postgres$ /usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data - - - - - The - - - - - At this point, if you did not use the initdb -A - option, you might want to modify pg_hba.conf to control - local access to the server before you start it. The default is to - trust all local users. - - - - - - The previous initdb step should have told you how to - start up the database server. Do so now. The command should look - something like: - -/usr/local/pgsql/bin/postgres -D /usr/local/pgsql/data - - This will start the server in the foreground. To put the server - in the background use something like: - -nohup /usr/local/pgsql/bin/postgres -D /usr/local/pgsql/data \ - </dev/null >>server.log 2>&1 </dev/null & - - - - - To stop a server running in the background you can type: - -kill `cat /usr/local/pgsql/data/postmaster.pid` - - - - - - - Create a database: - -createdb testdb - - Then enter: - -psql testdb - - to connect to that database. At the prompt you can enter SQL - commands and start experimenting. - - - - - - - What Now? - - - - - - The PostgreSQL distribution contains a - comprehensive documentation set, which you should read sometime. - After installation, the documentation can be accessed by - pointing your browser to - /usr/local/pgsql/doc/html/index.html, unless you - changed the installation directories. - - - - The first few chapters of the main documentation are the Tutorial, - which should be your first reading if you are completely new to - SQL databases. If you are familiar with database - concepts then you want to proceed with part on server - administration, which contains information about how to set up - the database server, database users, and authentication. - - - - - - Usually, you will want to modify your computer so that it will - automatically start the database server whenever it boots. Some - suggestions for this are in the documentation. - - - - - - Run the regression tests against the installed server (using - make installcheck). If you didn't run the - tests before installation, you should definitely do it now. This - is also explained in the documentation. - - - - - - By default, PostgreSQL is configured to run on - minimal hardware. This allows it to start up with almost any - hardware configuration. The default configuration is, however, - not designed for optimum performance. To achieve optimum - performance, several server parameters must be adjusted, the two - most common being shared_buffers and - work_mem. - Other parameters mentioned in the documentation also affect - performance. - - - - - -]]> - - Supported Platforms A platform (that is, a CPU architecture and operating system combination) - is considered supported by the PostgreSQL development + is considered supported by the PostgreSQL development community if the code contains provisions to work on that platform and it has recently been verified to build and pass its regression tests on that platform. Currently, most testing of platform compatibility is done automatically by test machines in the PostgreSQL Build Farm. - If you are interested in using PostgreSQL on a platform + If you are interested in using PostgreSQL on a platform that is not represented in the build farm, but on which the code works or can be made to work, you are strongly encouraged to set up a build farm member machine so that continued compatibility can be assured. - In general, PostgreSQL can be expected to work on + In general, PostgreSQL can be expected to work on these CPU architectures: x86, x86_64, IA64, PowerPC, PowerPC 64, S/390, S/390x, Sparc, Sparc 64, ARM, MIPS, MIPSEL, and PA-RISC. Code support exists for M68K, M32R, and VAX, but these @@ -2061,13 +1953,13 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` - PostgreSQL can be expected to work on these operating + PostgreSQL can be expected to work on these operating systems: Linux (all recent distributions), Windows (Win2000 SP4 and later), FreeBSD, OpenBSD, NetBSD, macOS, AIX, HP/UX, and Solaris. Other Unix-like systems may also work but are not currently being tested. In most cases, all CPU architectures supported by a given operating system will work. Look in - below to see if + below to see if there is information specific to your operating system, particularly if using an older system. @@ -2076,7 +1968,7 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` If you have installation problems on a platform that is known to be supported according to recent build farm results, please report it to pgsql-bugs@postgresql.org. If you are interested - in porting PostgreSQL to a new platform, + in porting PostgreSQL to a new platform, pgsql-hackers@postgresql.org is the appropriate place to discuss that. @@ -2089,10 +1981,8 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` This section documents additional platform-specific issues regarding the installation and setup of PostgreSQL. Be sure to read the installation instructions, and in - particular as well. Also, - check src/test/regress/README and the documentation]]> - ]]> regarding the + particular as well. Also, + check regarding the interpretation of regression test results. @@ -2158,7 +2048,7 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` Use the following configure flags in addition to your own if you have installed Readline or libz in - /usr/local: + /usr/local: --with-includes=/usr/local/include --with-libraries=/usr/local/lib. @@ -2211,9 +2101,9 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` Internet Address Issues - PostgreSQL relies on the system's getaddrinfo function - to parse IP addresses in listen_addresses, - pg_hba.conf, etc. Older versions of AIX have assorted + PostgreSQL relies on the system's getaddrinfo function + to parse IP addresses in listen_addresses, + pg_hba.conf, etc. Older versions of AIX have assorted bugs in this function. If you have problems related to these settings, updating to the appropriate AIX fix level shown above should take care of it. @@ -2359,7 +2249,7 @@ ERROR: could not load library "/opt/dbs/pgsql/lib/plperl.so": Bad address xlc might differ.) If you omit the export of OBJECT_MODE, your build may fail with linker errors. When OBJECT_MODE is set, it tells AIX's build utilities - such as ar, as, and ld what + such as ar, as, and ld what type of objects to default to handling. @@ -2443,7 +2333,7 @@ ERROR: could not load library "/opt/dbs/pgsql/lib/plperl.so": Bad address PostgreSQL can be built using Cygwin, a Linux-like environment for Windows, but that method is inferior to the native Windows build - )]]> and + (see ) and running a server under Cygwin is no longer recommended. @@ -2567,17 +2457,13 @@ PHSS_30966 s700_800 ld(1) and linker tools cumulative patch On general principles you should be current on libc and ld/dld patches, as well as compiler patches if you are using HP's C compiler. See HP's support sites such - as and - for free + as for free copies of their latest patches. If you are building on a PA-RISC 2.0 machine and want to have - 64-bit binaries using GCC, you must use GCC 64-bit version. GCC - binaries for HP-UX PA-RISC and Itanium are available from - . Don't forget to - get and install binutils at the same time. + 64-bit binaries using GCC, you must use a GCC 64-bit version. @@ -2627,6 +2513,57 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch + + macOS + + + macOS + installation on + + + + On recent macOS releases, it's necessary to + embed the sysroot path in the include switches used to + find some system header files. This results in the outputs of + the configure script varying depending on + which SDK version was used during configure. + That shouldn't pose any problem in simple scenarios, but if you are + trying to do something like building an extension on a different machine + than the server code was built on, you may need to force use of a + different sysroot path. To do that, set PG_SYSROOT, + for example + +make PG_SYSROOT=/desired/path all + + To find out the appropriate path on your machine, run + +xcodebuild -version -sdk macosx Path + + Note that building an extension using a different sysroot version than + was used to build the core server is not really recommended; in the + worst case it could result in hard-to-debug ABI inconsistencies. + + + + You can also select a non-default sysroot path when configuring, by + specifying PG_SYSROOT + to configure: + +./configure ... PG_SYSROOT=/desired/path + + + + + macOS's System Integrity + Protection (SIP) feature breaks make check, + because it prevents passing the needed setting + of DYLD_LIBRARY_PATH down to the executables being + tested. You can work around that by doing make + install before make check. + Most Postgres developers just turn off SIP, though. + + + MinGW/Native Windows @@ -2641,8 +2578,7 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch Microsoft's Visual C++ compiler suite. The MinGW build variant uses the normal build system described in this chapter; the Visual C++ build works completely differently - and is described in ]]>. + and is described in . It is a fully native build and uses no additional software like MinGW. A ready-made installer is available on the main PostgreSQL web site. @@ -2662,7 +2598,7 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch To build 64 bit binaries using MinGW, install the 64 bit tool set - from , put its bin + from , put its bin directory in the PATH, and run configure with the --host=x86_64-w64-mingw32 option. @@ -2680,10 +2616,10 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch If PostgreSQL on Windows crashes, it has the ability to generate - minidumps that can be used to track down the cause + minidumps that can be used to track down the cause for the crash, similar to core dumps on Unix. These dumps can be - read using the Windows Debugger Tools or using - Visual Studio. To enable the generation of dumps + read using the Windows Debugger Tools or using + Visual Studio. To enable the generation of dumps on Windows, create a subdirectory named crashdumps inside the cluster data directory. The dumps will then be written into this directory with a unique name based on the identifier of @@ -2721,14 +2657,14 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch You can download Sun Studio - from . + from . Many of GNU tools are integrated into Solaris 10, or they are present on the Solaris companion CD. If you like packages for older version of Solaris, you can find these tools at . If you prefer sources, look - at . + at . @@ -2746,35 +2682,11 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch configure ... LDFLAGS="-R /usr/sfw/lib:/opt/sfw/lib:/usr/local/lib" See - the ld1 + the ld1 man page for more information. - - 64-bit Build Sometimes Crashes - - - On Solaris 7 and older, the 64-bit version of libc has a buggy - vsnprintf routine, which leads to erratic - core dumps in PostgreSQL. The simplest known workaround is to - force PostgreSQL to use its own version of vsnprintf rather than - the library copy. To do this, after you - run configure edit a file produced by - configure: - In src/Makefile.global, change the line - -LIBOBJS = - - to read - -LIBOBJS = snprintf.o - - (There might be other files already listed in this variable. - Order does not matter.) Then build as usual. - - - Compiling for Optimal Performance @@ -2803,11 +2715,8 @@ LIBOBJS = snprintf.o Using DTrace for Tracing PostgreSQL - Yes, using DTrace is possible. See - ]]> for further - information. You can also find more information in this - article: . + Yes, using DTrace is possible. See for + further information. diff --git a/doc/src/sgml/intagg.sgml b/doc/src/sgml/intagg.sgml index 669c901764..c410f64f3e 100644 --- a/doc/src/sgml/intagg.sgml +++ b/doc/src/sgml/intagg.sgml @@ -28,10 +28,10 @@ The aggregator is an aggregate function - int_array_aggregate(integer) + int_array_aggregate(integer) that produces an integer array containing exactly the integers it is fed. - This is a wrapper around array_agg, + This is a wrapper around array_agg, which does the same thing for any array type. @@ -41,10 +41,10 @@ The enumerator is a function - int_array_enum(integer[]) - that returns setof integer. It is essentially the reverse + int_array_enum(integer[]) + that returns setof integer. It is essentially the reverse operation of the aggregator: given an array of integers, expand it - into a set of rows. This is a wrapper around unnest, + into a set of rows. This is a wrapper around unnest, which does the same thing for any array type. @@ -67,7 +67,7 @@ CREATE TABLE one_to_many(left INT REFERENCES left, right INT REFERENCES right); SELECT right.* from right JOIN one_to_many ON (right.id = one_to_many.right) - WHERE one_to_many.left = item; + WHERE one_to_many.left = item; This will return all the items in the right hand table for an entry @@ -76,7 +76,7 @@ SELECT right.* from right JOIN one_to_many ON (right.id = one_to_many.right) Now, this methodology can be cumbersome with a very large number of - entries in the one_to_many table. Often, + entries in the one_to_many table. Often, a join like this would result in an index scan and a fetch for each right hand entry in the table for a particular left hand entry. If you have a very dynamic system, there is not much you @@ -95,30 +95,30 @@ CREATE TABLE summary AS the array; that's why there is an array enumerator. You can do -SELECT left, int_array_enum(right) FROM summary WHERE left = item; +SELECT left, int_array_enum(right) FROM summary WHERE left = item; - The above query using int_array_enum produces the same results + The above query using int_array_enum produces the same results as -SELECT left, right FROM one_to_many WHERE left = item; +SELECT left, right FROM one_to_many WHERE left = item; The difference is that the query against the summary table has to get only one row from the table, whereas the direct query against - one_to_many must index scan and fetch a row for each entry. + one_to_many must index scan and fetch a row for each entry. - On one system, an EXPLAIN showed a query with a cost of 8488 was + On one system, an EXPLAIN showed a query with a cost of 8488 was reduced to a cost of 329. The original query was a join involving the - one_to_many table, which was replaced by: + one_to_many table, which was replaced by: SELECT right, count(right) FROM ( SELECT left, int_array_enum(right) AS right - FROM summary JOIN (SELECT left FROM left_table WHERE left = item) AS lefts + FROM summary JOIN (SELECT left FROM left_table WHERE left = item) AS lefts ON (summary.left = lefts.left) ) AS list GROUP BY right diff --git a/doc/src/sgml/intarray.sgml b/doc/src/sgml/intarray.sgml index ccb1fdecea..b633cf3677 100644 --- a/doc/src/sgml/intarray.sgml +++ b/doc/src/sgml/intarray.sgml @@ -8,7 +8,7 @@ - The intarray module provides a number of useful functions + The intarray module provides a number of useful functions and operators for manipulating null-free arrays of integers. There is also support for indexed searches using some of the operators. @@ -25,16 +25,16 @@ - <filename>intarray</> Functions and Operators + <filename>intarray</filename> Functions and Operators The functions provided by the intarray module - are shown in , the operators - in . + are shown in , the operators + in .
- <filename>intarray</> Functions + <filename>intarray</filename> Functions @@ -59,7 +59,7 @@ sort(int[], text dir)sort int[] - sort array — dir must be asc or desc + sort array — dir must be asc or desc sort('{1,2,3}'::int[], 'desc') {3,2,1} @@ -99,7 +99,7 @@ idx(int[], int item)idx int - index of first element matching item (0 if none) + index of first element matching item (0 if none) idx(array[11,22,33,22,11], 22) 2 @@ -107,7 +107,7 @@ subarray(int[], int start, int len)subarray int[] - portion of array starting at position start, len elements + portion of array starting at position start, len elements subarray('{1,2,3,2,1}'::int[], 2, 3) {2,3,2} @@ -115,7 +115,7 @@ subarray(int[], int start) int[] - portion of array starting at position start + portion of array starting at position start subarray('{1,2,3,2,1}'::int[], 2) {2,3,2,1} @@ -133,7 +133,7 @@
- <filename>intarray</> Operators + <filename>intarray</filename> Operators @@ -148,17 +148,17 @@ int[] && int[] boolean - overlap — true if arrays have at least one common element + overlap — true if arrays have at least one common element int[] @> int[] boolean - contains — true if left array contains right array + contains — true if left array contains right array int[] <@ int[] boolean - contained — true if left array is contained in right array + contained — true if left array is contained in right array # int[] @@ -168,7 +168,7 @@ int[] # int int - index (same as idx function) + index (same as idx function) int[] + int @@ -208,28 +208,28 @@ int[] @@ query_int boolean - true if array satisfies query (see below) + true if array satisfies query (see below) query_int ~~ int[] boolean - true if array satisfies query (commutator of @@) + true if array satisfies query (commutator of @@)
- (Before PostgreSQL 8.2, the containment operators @> and - <@ were respectively called @ and ~. + (Before PostgreSQL 8.2, the containment operators @> and + <@ were respectively called @ and ~. These names are still available, but are deprecated and will eventually be retired. Notice that the old names are reversed from the convention formerly followed by the core geometric data types!) - The operators &&, @> and - <@ are equivalent to PostgreSQL's built-in + The operators &&, @> and + <@ are equivalent to PostgreSQL's built-in operators of the same names, except that they work only on integer arrays that do not contain nulls, while the built-in operators work for any array type. This restriction makes them faster than the built-in operators @@ -237,14 +237,14 @@ - The @@ and ~~ operators test whether an array - satisfies a query, which is expressed as a value of a - specialized data type query_int. A query + The @@ and ~~ operators test whether an array + satisfies a query, which is expressed as a value of a + specialized data type query_int. A query consists of integer values that are checked against the elements of - the array, possibly combined using the operators & - (AND), | (OR), and ! (NOT). Parentheses + the array, possibly combined using the operators & + (AND), | (OR), and ! (NOT). Parentheses can be used as needed. For example, - the query 1&(2|3) matches arrays that contain 1 + the query 1&(2|3) matches arrays that contain 1 and also contain either 2 or 3. @@ -253,16 +253,16 @@ Index Support - intarray provides index support for the - &&, @>, <@, - and @@ operators, as well as regular array equality. + intarray provides index support for the + &&, @>, <@, + and @@ operators, as well as regular array equality. Two GiST index operator classes are provided: - gist__int_ops (used by default) is suitable for + gist__int_ops (used by default) is suitable for small- to medium-size data sets, while - gist__intbig_ops uses a larger signature and is more + gist__intbig_ops uses a larger signature and is more suitable for indexing large data sets (i.e., columns containing a large number of distinct array values). The implementation uses an RD-tree data structure with @@ -271,7 +271,7 @@ There is also a non-default GIN operator class - gin__int_ops supporting the same operators. + gin__int_ops supporting the same operators. @@ -284,7 +284,7 @@ Example --- a message can be in one or more sections +-- a message can be in one or more sections CREATE TABLE message (mid INT PRIMARY KEY, sections INT[], ...); -- create specialized index @@ -305,9 +305,9 @@ SELECT message.mid FROM message WHERE message.sections @@ '1&2'::query_int; Benchmark - The source directory contrib/intarray/bench contains a + The source directory contrib/intarray/bench contains a benchmark test suite, which can be run against an installed - PostgreSQL server. (It also requires DBD::Pg + PostgreSQL server. (It also requires DBD::Pg to be installed.) To run: @@ -320,7 +320,7 @@ psql -c "CREATE EXTENSION intarray" TEST - The bench.pl script has numerous options, which + The bench.pl script has numerous options, which are displayed when it is run without any arguments. diff --git a/doc/src/sgml/intro.sgml b/doc/src/sgml/intro.sgml index f0dba6f56f..3038826311 100644 --- a/doc/src/sgml/intro.sgml +++ b/doc/src/sgml/intro.sgml @@ -23,22 +23,22 @@ - is an informal introduction for new users. + is an informal introduction for new users. - documents the SQL query + documents the SQL query language environment, including data types and functions, as well as user-level performance tuning. Every - PostgreSQL user should read this. + PostgreSQL user should read this. - describes the installation and + describes the installation and administration of the server. Everyone who runs a PostgreSQL server, be it for private use or for others, should read this part. @@ -47,7 +47,7 @@ - describes the programming + describes the programming interfaces for PostgreSQL client programs. @@ -56,7 +56,7 @@ - contains information for + contains information for advanced users about the extensibility capabilities of the server. Topics include user-defined data types and functions. @@ -65,7 +65,7 @@ - contains reference information about + contains reference information about SQL commands, client and server programs. This part supports the other parts with structured information sorted by command or program. @@ -74,8 +74,8 @@ - contains assorted information that might be of - use to PostgreSQL developers. + contains assorted information that might be of + use to PostgreSQL developers. diff --git a/doc/src/sgml/isn.sgml b/doc/src/sgml/isn.sgml index c1da702df6..598dda2e9a 100644 --- a/doc/src/sgml/isn.sgml +++ b/doc/src/sgml/isn.sgml @@ -25,7 +25,7 @@ Data Types - shows the data types provided by + shows the data types provided by the isn module. @@ -123,7 +123,7 @@
UPC numbers are a subset of the EAN13 numbers (they are basically - EAN13 without the first 0 digit). + EAN13 without the first 0 digit). All UPC, ISBN, ISMN and ISSN numbers can be represented as EAN13 @@ -139,7 +139,7 @@ - The ISBN, ISMN, and ISSN types will display the + The ISBN, ISMN, and ISSN types will display the short version of the number (ISxN 10) whenever it's possible, and will show ISxN 13 format for numbers that do not fit in the short version. The EAN13, ISBN13, ISMN13 and @@ -152,7 +152,7 @@ Casts - The isn module provides the following pairs of type casts: + The isn module provides the following pairs of type casts: @@ -209,7 +209,7 @@ - When casting from EAN13 to another type, there is a run-time + When casting from EAN13 to another type, there is a run-time check that the value is within the domain of the other type, and an error is thrown if not. The other casts are simply relabelings that will always succeed. @@ -220,15 +220,15 @@ Functions and Operators - The isn module provides the standard comparison operators, + The isn module provides the standard comparison operators, plus B-tree and hash indexing support for all these data types. In - addition there are several specialized functions; shown in . + addition there are several specialized functions; shown in . In this table, - isn means any one of the module's data types. + isn means any one of the module's data types. - <filename>isn</> Functions + <filename>isn</filename> Functions @@ -285,21 +285,21 @@ When you insert invalid numbers in a table using the weak mode, the number will be inserted with the corrected check digit, but it will be displayed - with an exclamation mark (!) at the end, for example - 0-11-000322-5!. This invalid marker can be checked with - the is_valid function and cleared with the - make_valid function. + with an exclamation mark (!) at the end, for example + 0-11-000322-5!. This invalid marker can be checked with + the is_valid function and cleared with the + make_valid function. You can also force the insertion of invalid numbers even when not in the - weak mode, by appending the ! character at the end of the + weak mode, by appending the ! character at the end of the number. Another special feature is that during input, you can write - ? in place of the check digit, and the correct check digit + ? in place of the check digit, and the correct check digit will be inserted automatically. @@ -355,19 +355,19 @@ SELECT isbn13(id) FROM test; The information to implement this module was collected from several sites, including: - + - - + + The prefixes used for hyphenation were also compiled from: - - + + - - + + Care was taken during the creation of the algorithms and they @@ -384,7 +384,7 @@ SELECT isbn13(id) FROM test; This module was inspired by Garrett A. Wollman's - isbn_issn code. + isbn_issn code. diff --git a/doc/src/sgml/jit.sgml b/doc/src/sgml/jit.sgml new file mode 100644 index 0000000000..a21a07ef71 --- /dev/null +++ b/doc/src/sgml/jit.sgml @@ -0,0 +1,285 @@ + + + + Just-in-Time Compilation (<acronym>JIT</acronym>) + + + JIT + + + + Just-In-Time compilation + JIT + + + + This chapter explains what just-in-time compilation is, and how it can be + configured in PostgreSQL. + + + + What is <acronym>JIT</acronym> compilation? + + + Just-in-Time (JIT) compilation is the process of turning + some form of interpreted program evaluation into a native program, and + doing so at run time. + For example, instead of using general-purpose code that can evaluate + arbitrary SQL expressions to evaluate a particular SQL predicate + like WHERE a.col = 3, it is possible to generate a + function that is specific to that expression and can be natively executed + by the CPU, yielding a speedup. + + + + PostgreSQL has builtin support to perform + JIT compilation using LLVM when + PostgreSQL is built with + --with-llvm. + + + + See src/backend/jit/README for further details. + + + + <acronym>JIT</acronym> Accelerated Operations + + Currently PostgreSQL's JIT + implementation has support for accelerating expression evaluation and + tuple deforming. Several other operations could be accelerated in the + future. + + + Expression evaluation is used to evaluate WHERE + clauses, target lists, aggregates and projections. It can be accelerated + by generating code specific to each case. + + + Tuple deforming is the process of transforming an on-disk tuple (see ) into its in-memory representation. + It can be accelerated by creating a function specific to the table layout + and the number of columns to be extracted. + + + + + Inlining + + PostgreSQL is very extensible and allows new + data types, functions, operators and other database objects to be defined; + see . In fact the built-in objects are implemented + using nearly the same mechanisms. This extensibility implies some + overhead, for example due to function calls (see ). + To reduce that overhead, JIT compilation can inline the + bodies of small functions into the expressions using them. That allows a + significant percentage of the overhead to be optimized away. + + + + + Optimization + + LLVM has support for optimizing generated + code. Some of the optimizations are cheap enough to be performed whenever + JIT is used, while others are only beneficial for + longer-running queries. + See for + more details about optimizations. + + + + + + + When to <acronym>JIT</acronym>? + + + JIT compilation is beneficial primarily for long-running + CPU-bound queries. Frequently these will be analytical queries. For short + queries the added overhead of performing JIT compilation + will often be higher than the time it can save. + + + + To determine whether JIT compilation should be used, + the total estimated cost of a query (see + and + ) is used. + The estimated cost of the query will be compared with the setting of . If the cost is higher, + JIT compilation will be performed. + Two further decisions are then needed. + Firstly, if the estimated cost is more + than the setting of , short + functions and operators used in the query will be inlined. + Secondly, if the estimated cost is more than the setting of , expensive optimizations are + applied to improve the generated code. + Each of these options increases the JIT compilation + overhead, but can reduce query execution time considerably. + + + + These cost-based decisions will be made at plan time, not execution + time. This means that when prepared statements are in use, and a generic + plan is used (see ), the values of the + configuration parameters in effect at prepare time control the decisions, + not the settings at execution time. + + + + + If is set to off, or if no + JIT implementation is available (for example because + the server was compiled without --with-llvm), + JIT will not be performed, even if it would be + beneficial based on the above criteria. Setting + to off has effects at both plan and execution time. + + + + + can be used to see whether + JIT is used or not. As an example, here is a query that + is not using JIT: + +=# EXPLAIN ANALYZE SELECT SUM(relpages) FROM pg_class; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Aggregate (cost=16.27..16.29 rows=1 width=8) (actual time=0.303..0.303 rows=1 loops=1) + -> Seq Scan on pg_class (cost=0.00..15.42 rows=342 width=4) (actual time=0.017..0.111 rows=356 loops=1) + Planning Time: 0.116 ms + Execution Time: 0.365 ms +(4 rows) + + Given the cost of the plan, it is entirely reasonable that no + JIT was used; the cost of JIT would + have been bigger than the potential savings. Adjusting the cost limits + will lead to JIT use: + +=# SET jit_above_cost = 10; +SET +=# EXPLAIN ANALYZE SELECT SUM(relpages) FROM pg_class; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Aggregate (cost=16.27..16.29 rows=1 width=8) (actual time=6.049..6.049 rows=1 loops=1) + -> Seq Scan on pg_class (cost=0.00..15.42 rows=342 width=4) (actual time=0.019..0.052 rows=356 loops=1) + Planning Time: 0.133 ms + JIT: + Functions: 3 + Options: Inlining false, Optimization false, Expressions true, Deforming true + Timing: Generation 1.259 ms, Inlining 0.000 ms, Optimization 0.797 ms, Emission 5.048 ms, Total 7.104 ms + Execution Time: 7.416 ms + + As visible here, JIT was used, but inlining and + expensive optimization were not. If or were also lowered, + that would change. + + + + + Configuration + + + The configuration variable + determines whether JIT + compilation is enabled or disabled. + If it is enabled, the configuration variables + , , and determine + whether JIT compilation is performed for a query, + and how much effort is spent doing so. + + + + determines which JIT + implementation is used. It is rarely required to be changed. See . + + + + For development and debugging purposes a few additional configuration + parameters exist, as described in + . + + + + + Extensibility + + + Inlining Support for Extensions + + PostgreSQL's JIT + implementation can inline the bodies of functions + of types C and internal, as well as + operators based on such functions. To do so for functions in extensions, + the definitions of those functions need to be made available. + When using PGXS to build an extension + against a server that has been compiled with LLVM JIT support, the + relevant files will be built and installed automatically. + + + + The relevant files have to be installed into + $pkglibdir/bitcode/$extension/ and a summary of them + into $pkglibdir/bitcode/$extension.index.bc, where + $pkglibdir is the directory returned by + pg_config --pkglibdir and $extension + is the base name of the extension's shared library. + + + + For functions built into PostgreSQL itself, + the bitcode is installed into + $pkglibdir/bitcode/postgres. + + + + + + + Pluggable <acronym>JIT</acronym> Providers + + + PostgreSQL provides a JIT + implementation based on LLVM. The interface to + the JIT provider is pluggable and the provider can be + changed without recompiling (although currently, the build process only + provides inlining support data for LLVM). + The active provider is chosen via the setting + . + + + + <acronym>JIT</acronym> Provider Interface + + A JIT provider is loaded by dynamically loading the + named shared library. The normal library search path is used to locate + the library. To provide the required JIT provider + callbacks and to indicate that the library is actually a + JIT provider, it needs to provide a C function named + _PG_jit_provider_init. This function is passed a + struct that needs to be filled with the callback function pointers for + individual actions: + +struct JitProviderCallbacks +{ + JitProviderResetAfterErrorCB reset_after_error; + JitProviderReleaseContextCB release_context; + JitProviderCompileExprCB compile_expr; +}; + +extern void _PG_jit_provider_init(JitProviderCallbacks *cb); + + + + + + + diff --git a/doc/src/sgml/json.sgml b/doc/src/sgml/json.sgml index 3cf78d6394..e7b68fa0d2 100644 --- a/doc/src/sgml/json.sgml +++ b/doc/src/sgml/json.sgml @@ -1,7 +1,7 @@ - <acronym>JSON</> Types + <acronym>JSON</acronym> Types JSON @@ -13,34 +13,34 @@ JSON data types are for storing JSON (JavaScript Object Notation) - data, as specified in RFC + data, as specified in RFC 7159. Such data can also be stored as text, but the JSON data types have the advantage of enforcing that each stored value is valid according to the JSON rules. There are also assorted JSON-specific functions and operators available for data stored - in these data types; see . + in these data types; see . - There are two JSON data types: json and jsonb. - They accept almost identical sets of values as + There are two JSON data types: json and jsonb. + They accept almost identical sets of values as input. The major practical difference is one of efficiency. The - json data type stores an exact copy of the input text, + json data type stores an exact copy of the input text, which processing functions must reparse on each execution; while - jsonb data is stored in a decomposed binary format that + jsonb data is stored in a decomposed binary format that makes it slightly slower to input due to added conversion overhead, but significantly faster to process, since no reparsing - is needed. jsonb also supports indexing, which can be a + is needed. jsonb also supports indexing, which can be a significant advantage. - Because the json type stores an exact copy of the input text, it + Because the json type stores an exact copy of the input text, it will preserve semantically-insignificant white space between tokens, as well as the order of keys within JSON objects. Also, if a JSON object within the value contains the same key more than once, all the key/value pairs are kept. (The processing functions consider the last value as the - operative one.) By contrast, jsonb does not preserve white + operative one.) By contrast, jsonb does not preserve white space, does not preserve the order of object keys, and does not keep duplicate object keys. If duplicate keys are specified in the input, only the last value is kept. @@ -48,7 +48,7 @@ In general, most applications should prefer to store JSON data as - jsonb, unless there are quite specialized needs, such as + jsonb, unless there are quite specialized needs, such as legacy assumptions about ordering of object keys. @@ -64,15 +64,15 @@ RFC 7159 permits JSON strings to contain Unicode escape sequences - denoted by \uXXXX. In the input - function for the json type, Unicode escapes are allowed + denoted by \uXXXX. In the input + function for the json type, Unicode escapes are allowed regardless of the database encoding, and are checked only for syntactic - correctness (that is, that four hex digits follow \u). - However, the input function for jsonb is stricter: it disallows - Unicode escapes for non-ASCII characters (those above U+007F) - unless the database encoding is UTF8. The jsonb type also - rejects \u0000 (because that cannot be represented in - PostgreSQL's text type), and it insists + correctness (that is, that four hex digits follow \u). + However, the input function for jsonb is stricter: it disallows + Unicode escapes for non-ASCII characters (those above U+007F) + unless the database encoding is UTF8. The jsonb type also + rejects \u0000 (because that cannot be represented in + PostgreSQL's text type), and it insists that any use of Unicode surrogate pairs to designate characters outside the Unicode Basic Multilingual Plane be correct. Valid Unicode escapes are converted to the equivalent ASCII or UTF8 character for storage; @@ -82,10 +82,10 @@ Many of the JSON processing functions described - in will convert Unicode escapes to + in will convert Unicode escapes to regular characters, and will therefore throw the same types of errors - just described even if their input is of type json - not jsonb. The fact that the json input function does + just described even if their input is of type json + not jsonb. The fact that the json input function does not make these checks may be considered a historical artifact, although it does allow for simple storage (without processing) of JSON Unicode escapes in a non-UTF8 database encoding. In general, it is best to @@ -95,22 +95,22 @@ - When converting textual JSON input into jsonb, the primitive - types described by RFC 7159 are effectively mapped onto + When converting textual JSON input into jsonb, the primitive + types described by RFC 7159 are effectively mapped onto native PostgreSQL types, as shown - in . + in . Therefore, there are some minor additional constraints on what constitutes valid jsonb data that do not apply to the json type, nor to JSON in the abstract, corresponding to limits on what can be represented by the underlying data type. - Notably, jsonb will reject numbers that are outside the - range of the PostgreSQL numeric data - type, while json will not. Such implementation-defined - restrictions are permitted by RFC 7159. However, in + Notably, jsonb will reject numbers that are outside the + range of the PostgreSQL numeric data + type, while json will not. Such implementation-defined + restrictions are permitted by RFC 7159. However, in practice such problems are far more likely to occur in other - implementations, as it is common to represent JSON's number + implementations, as it is common to represent JSON's number primitive type as IEEE 754 double precision floating point - (which RFC 7159 explicitly anticipates and allows for). + (which RFC 7159 explicitly anticipates and allows for). When using JSON as an interchange format with such systems, the danger of losing numeric precision compared to data originally stored by PostgreSQL should be considered. @@ -134,23 +134,23 @@ - string - text - \u0000 is disallowed, as are non-ASCII Unicode + string + text + \u0000 is disallowed, as are non-ASCII Unicode escapes if database encoding is not UTF8 - number - numeric + number + numeric NaN and infinity values are disallowed - boolean - boolean + boolean + boolean Only lowercase true and false spellings are accepted - null + null (none) SQL NULL is a different concept @@ -162,10 +162,10 @@ JSON Input and Output Syntax The input/output syntax for the JSON data types is as specified in - RFC 7159. + RFC 7159. - The following are all valid json (or jsonb) expressions: + The following are all valid json (or jsonb) expressions: -- Simple scalar/primitive value -- Primitive values can be numbers, quoted strings, true, false, or null @@ -185,8 +185,8 @@ SELECT '{"foo": [true, "bar"], "tags": {"a": 1, "b": null}}'::json; As previously stated, when a JSON value is input and then printed without - any additional processing, json outputs the same text that was - input, while jsonb does not preserve semantically-insignificant + any additional processing, json outputs the same text that was + input, while jsonb does not preserve semantically-insignificant details such as whitespace. For example, note the differences here: SELECT '{"bar": "baz", "balance": 7.77, "active":false}'::json; @@ -202,9 +202,9 @@ SELECT '{"bar": "baz", "balance": 7.77, "active":false}'::jsonb; (1 row) One semantically-insignificant detail worth noting is that - in jsonb, numbers will be printed according to the behavior of the - underlying numeric type. In practice this means that numbers - entered with E notation will be printed without it, for + in jsonb, numbers will be printed according to the behavior of the + underlying numeric type. In practice this means that numbers + entered with E notation will be printed without it, for example: SELECT '{"reading": 1.230e-5}'::json, '{"reading": 1.230e-5}'::jsonb; @@ -213,7 +213,7 @@ SELECT '{"reading": 1.230e-5}'::json, '{"reading": 1.230e-5}'::jsonb; {"reading": 1.230e-5} | {"reading": 0.00001230} (1 row) - However, jsonb will preserve trailing fractional zeroes, as seen + However, jsonb will preserve trailing fractional zeroes, as seen in this example, even though those are semantically insignificant for purposes such as equality checks. @@ -231,7 +231,7 @@ SELECT '{"reading": 1.230e-5}'::json, '{"reading": 1.230e-5}'::jsonb; have a somewhat fixed structure. The structure is typically unenforced (though enforcing some business rules declaratively is possible), but having a predictable structure makes it easier to write - queries that usefully summarize a set of documents (datums) + queries that usefully summarize a set of documents (datums) in a table. @@ -249,7 +249,7 @@ SELECT '{"reading": 1.230e-5}'::json, '{"reading": 1.230e-5}'::jsonb; - <type>jsonb</> Containment and Existence + <type>jsonb</type> Containment and Existence jsonb containment @@ -259,10 +259,10 @@ SELECT '{"reading": 1.230e-5}'::json, '{"reading": 1.230e-5}'::jsonb; existence - Testing containment is an important capability of - jsonb. There is no parallel set of facilities for the - json type. Containment tests whether - one jsonb document has contained within it another one. + Testing containment is an important capability of + jsonb. There is no parallel set of facilities for the + json type. Containment tests whether + one jsonb document has contained within it another one. These examples return true except as noted: @@ -282,7 +282,7 @@ SELECT '[1, 2, 3]'::jsonb @> '[1, 2, 2]'::jsonb; -- within the object on the left side: SELECT '{"product": "PostgreSQL", "version": 9.4, "jsonb": true}'::jsonb @> '{"version": 9.4}'::jsonb; --- The array on the right side is not considered contained within the +-- The array on the right side is not considered contained within the -- array on the left, even though a similar array is nested within it: SELECT '[1, 2, [1, 3]]'::jsonb @> '[1, 3]'::jsonb; -- yields false @@ -319,10 +319,10 @@ SELECT '"bar"'::jsonb @> '["bar"]'::jsonb; -- yields false - jsonb also has an existence operator, which is + jsonb also has an existence operator, which is a variation on the theme of containment: it tests whether a string - (given as a text value) appears as an object key or array - element at the top level of the jsonb value. + (given as a text value) appears as an object key or array + element at the top level of the jsonb value. These examples return true except as noted: @@ -353,11 +353,11 @@ SELECT '"foo"'::jsonb ? 'foo'; Because JSON containment is nested, an appropriate query can skip explicit selection of sub-objects. As an example, suppose that we have - a doc column containing objects at the top level, with - most objects containing tags fields that contain arrays of + a doc column containing objects at the top level, with + most objects containing tags fields that contain arrays of sub-objects. This query finds entries in which sub-objects containing - both "term":"paris" and "term":"food" appear, - while ignoring any such keys outside the tags array: + both "term":"paris" and "term":"food" appear, + while ignoring any such keys outside the tags array: SELECT doc->'site_name' FROM websites WHERE doc @> '{"tags":[{"term":"paris"}, {"term":"food"}]}'; @@ -380,12 +380,12 @@ SELECT doc->'site_name' FROM websites The various containment and existence operators, along with all other JSON operators and functions are documented - in . + in . - <type>jsonb</> Indexing + <type>jsonb</type> Indexing jsonb indexes on @@ -394,23 +394,23 @@ SELECT doc->'site_name' FROM websites GIN indexes can be used to efficiently search for keys or key/value pairs occurring within a large number of - jsonb documents (datums). - Two GIN operator classes are provided, offering different + jsonb documents (datums). + Two GIN operator classes are provided, offering different performance and flexibility trade-offs. - The default GIN operator class for jsonb supports queries with - top-level key-exists operators ?, ?& - and ?| operators and path/value-exists operator - @>. + The default GIN operator class for jsonb supports queries with + top-level key-exists operators ?, ?& + and ?| operators and path/value-exists operator + @>. (For details of the semantics that these operators - implement, see .) + implement, see .) An example of creating an index with this operator class is: CREATE INDEX idxgin ON api USING GIN (jdoc); - The non-default GIN operator class jsonb_path_ops - supports indexing the @> operator only. + The non-default GIN operator class jsonb_path_ops + supports indexing the @> operator only. An example of creating an index with this operator class is: CREATE INDEX idxginp ON api USING GIN (jdoc jsonb_path_ops); @@ -438,8 +438,8 @@ CREATE INDEX idxginp ON api USING GIN (jdoc jsonb_path_ops); ] } - We store these documents in a table named api, - in a jsonb column named jdoc. + We store these documents in a table named api, + in a jsonb column named jdoc. If a GIN index is created on this column, queries like the following can make use of the index: @@ -447,25 +447,25 @@ CREATE INDEX idxginp ON api USING GIN (jdoc jsonb_path_ops); SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"company": "Magnafone"}'; However, the index could not be used for queries like the - following, because though the operator ? is indexable, - it is not applied directly to the indexed column jdoc: + following, because though the operator ? is indexable, + it is not applied directly to the indexed column jdoc: -- Find documents in which the key "tags" contains key or array element "qui" SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc -> 'tags' ? 'qui'; Still, with appropriate use of expression indexes, the above query can use an index. If querying for particular items within - the "tags" key is common, defining an index like this + the "tags" key is common, defining an index like this may be worthwhile: CREATE INDEX idxgintags ON api USING GIN ((jdoc -> 'tags')); - Now, the WHERE clause jdoc -> 'tags' ? 'qui' + Now, the WHERE clause jdoc -> 'tags' ? 'qui' will be recognized as an application of the indexable - operator ? to the indexed - expression jdoc -> 'tags'. + operator ? to the indexed + expression jdoc -> 'tags'. (More information on expression indexes can be found in .) + linkend="indexes-expressional"/>.) Another approach to querying is to exploit containment, for example: @@ -473,11 +473,11 @@ CREATE INDEX idxgintags ON api USING GIN ((jdoc -> 'tags')); -- Find documents in which the key "tags" contains array element "qui" SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"tags": ["qui"]}'; - A simple GIN index on the jdoc column can support this + A simple GIN index on the jdoc column can support this query. But note that such an index will store copies of every key and - value in the jdoc column, whereas the expression index + value in the jdoc column, whereas the expression index of the previous example stores only data found under - the tags key. While the simple-index approach is far more + the tags key. While the simple-index approach is far more flexible (since it supports queries about any key), targeted expression indexes are likely to be smaller and faster to search than a simple index. @@ -485,7 +485,7 @@ SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"tags": ["qu Although the jsonb_path_ops operator class supports - only queries with the @> operator, it has notable + only queries with the @> operator, it has notable performance advantages over the default operator class jsonb_ops. A jsonb_path_ops index is usually much smaller than a jsonb_ops @@ -503,7 +503,7 @@ SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"tags": ["qu data. - For this purpose, the term value includes array elements, + For this purpose, the term value includes array elements, though JSON terminology sometimes considers array elements distinct from values within objects. @@ -511,13 +511,13 @@ SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"tags": ["qu Basically, each jsonb_path_ops index item is a hash of the value and the key(s) leading to it; for example to index {"foo": {"bar": "baz"}}, a single index item would - be created incorporating all three of foo, bar, - and baz into the hash value. Thus a containment query + be created incorporating all three of foo, bar, + and baz into the hash value. Thus a containment query looking for this structure would result in an extremely specific index - search; but there is no way at all to find out whether foo + search; but there is no way at all to find out whether foo appears as a key. On the other hand, a jsonb_ops - index would create three index items representing foo, - bar, and baz separately; then to do the + index would create three index items representing foo, + bar, and baz separately; then to do the containment query, it would look for rows containing all three of these items. While GIN indexes can perform such an AND search fairly efficiently, it will still be less specific and slower than the @@ -531,15 +531,15 @@ SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"tags": ["qu that it produces no index entries for JSON structures not containing any values, such as {"a": {}}. If a search for documents containing such a structure is requested, it will require a - full-index scan, which is quite slow. jsonb_path_ops is + full-index scan, which is quite slow. jsonb_path_ops is therefore ill-suited for applications that often perform such searches. - jsonb also supports btree and hash + jsonb also supports btree and hash indexes. These are usually useful only if it's important to check equality of complete JSON documents. - The btree ordering for jsonb datums is seldom + The btree ordering for jsonb datums is seldom of great interest, but for completeness it is: Object > Array > Boolean > Number > String > Null @@ -569,4 +569,28 @@ SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"tags": ["qu compared using the default database collation. + + + Transforms + + + Additional extensions are available that implement transforms for the + jsonb type for different procedural languages. + + + + The extensions for PL/Perl are called jsonb_plperl and + jsonb_plperlu. If you use them, jsonb + values are mapped to Perl arrays, hashes, and scalars, as appropriate. + + + + The extensions for PL/Python are called jsonb_plpythonu, + jsonb_plpython2u, and + jsonb_plpython3u (see for the PL/Python naming convention). If you + use them, jsonb values are mapped to Python dictionaries, + lists, and scalars, as appropriate. + + diff --git a/doc/src/sgml/keywords.sgml b/doc/src/sgml/keywords.sgml index 01bc9b47b1..a37d0b756b 100644 --- a/doc/src/sgml/keywords.sgml +++ b/doc/src/sgml/keywords.sgml @@ -9,10 +9,10 @@ - lists all tokens that are key words + lists all tokens that are key words in the SQL standard and in PostgreSQL &version;. Background information can be found in . + linkend="sql-syntax-identifiers"/>. (For space reasons, only the latest two versions of the SQL standard, and SQL-92 for historical comparison, are included. The differences between those and the other intermediate standard versions are small.) @@ -45,7 +45,7 @@ - In in the column for + In in the column for PostgreSQL we classify as non-reserved those key words that are explicitly known to the parser but are allowed as column or table names. @@ -69,7 +69,7 @@ It is important to understand before studying that the fact that a key word is not + linkend="keywords-table"/> that the fact that a key word is not reserved in PostgreSQL does not mean that the feature related to the word is not implemented. Conversely, the presence of a key word does not indicate the existence of a feature. @@ -519,7 +519,7 @@ CALL - + non-reserved reserved reserved @@ -2059,7 +2059,7 @@ GROUPS - + non-reserved reserved @@ -2197,6 +2197,13 @@ reserved reserved + + INCLUDE + non-reserved + + + + INCLUDING non-reserved @@ -3200,7 +3207,7 @@ OTHERS - + non-reserved non-reserved non-reserved @@ -3569,6 +3576,13 @@ reserved reserved + + PROCEDURES + non-reserved + + + + PROGRAM non-reserved @@ -3935,9 +3949,16 @@ ROUTINE - non-reserved non-reserved + non-reserved + + + + ROUTINES + non-reserved + + @@ -4614,7 +4635,7 @@ TIES - + non-reserved non-reserved non-reserved diff --git a/doc/src/sgml/legal.sgml b/doc/src/sgml/legal.sgml index 67ef88b2ff..fd5cda30b7 100644 --- a/doc/src/sgml/legal.sgml +++ b/doc/src/sgml/legal.sgml @@ -1,9 +1,9 @@ -2017 +2018 - 1996-2017 + 1996-2018 The PostgreSQL Global Development Group @@ -11,7 +11,7 @@ Legal Notice - PostgreSQL is Copyright © 1996-2017 + PostgreSQL is Copyright © 1996-2018 by the PostgreSQL Global Development Group. diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index ad5e9b95b4..601091c570 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -13,27 +13,27 @@ libpq is the C - application programmer's interface to PostgreSQL. - libpq is a set of library functions that allow - client programs to pass queries to the PostgreSQL + application programmer's interface to PostgreSQL. + libpq is a set of library functions that allow + client programs to pass queries to the PostgreSQL backend server and to receive the results of these queries. - libpq is also the underlying engine for several - other PostgreSQL application interfaces, including - those written for C++, Perl, Python, Tcl and ECPG. - So some aspects of libpq's behavior will be + libpq is also the underlying engine for several + other PostgreSQL application interfaces, including + those written for C++, Perl, Python, Tcl and ECPG. + So some aspects of libpq's behavior will be important to you if you use one of those packages. In particular, - , - and - + , + and + describe behavior that is visible to the user of any application - that uses libpq. + that uses libpq. - Some short programs are included at the end of this chapter () to show how + Some short programs are included at the end of this chapter () to show how to write programs that use libpq. There are also several complete examples of libpq applications in the directory src/test/examples in the source code distribution. @@ -42,7 +42,7 @@ Client programs that use libpq must include the header file - libpq-fe.hlibpq-fe.h + libpq-fe.hlibpq-fe.h and must link with the libpq library. @@ -55,22 +55,38 @@ application program can have several backend connections open at one time. (One reason to do that is to access more than one database.) Each connection is represented by a - PGconnPGconn object, which - is obtained from the function PQconnectdb, - PQconnectdbParams, or - PQsetdbLogin. Note that these functions will always + PGconnPGconn object, which + is obtained from the function PQconnectdb, + PQconnectdbParams, or + PQsetdbLogin. Note that these functions will always return a non-null object pointer, unless perhaps there is too - little memory even to allocate the PGconn object. - The PQstatus function should be called to check + little memory even to allocate the PGconn object. + The PQstatus function should be called to check the return value for a successful connection before queries are sent via the connection object. + + + If untrusted users have access to a database that has not adopted a + secure schema usage pattern, + begin each session by removing publicly-writable schemas from + search_path. One can set parameter key + word options to + value -csearch_path=. Alternately, one can + issue PQexec(conn, "SELECT + pg_catalog.set_config('search_path', '', false)") after + connecting. This consideration is not specific + to libpq; it applies to every interface for + executing arbitrary SQL commands. + + + On Unix, forking a process with open libpq connections can lead to unpredictable results because the parent and child processes share the same sockets and operating system resources. For this reason, - such usage is not recommended, though doing an exec from + such usage is not recommended, though doing an exec from the child process to load a new executable is safe. @@ -79,20 +95,20 @@ On Windows, there is a way to improve performance if a single database connection is repeatedly started and shutdown. Internally, - libpq calls WSAStartup() and WSACleanup() for connection startup - and shutdown, respectively. WSAStartup() increments an internal - Windows library reference count which is decremented by WSACleanup(). - When the reference count is just one, calling WSACleanup() frees + libpq calls WSAStartup() and WSACleanup() for connection startup + and shutdown, respectively. WSAStartup() increments an internal + Windows library reference count which is decremented by WSACleanup(). + When the reference count is just one, calling WSACleanup() frees all resources and all DLLs are unloaded. This is an expensive operation. To avoid this, an application can manually call - WSAStartup() so resources will not be freed when the last database + WSAStartup() so resources will not be freed when the last database connection is closed. - PQconnectdbParamsPQconnectdbParams + PQconnectdbParamsPQconnectdbParams Makes a new connection to the database server. @@ -109,16 +125,16 @@ PGconn *PQconnectdbParams(const char * const *keywords, from two NULL-terminated arrays. The first, keywords, is defined as an array of strings, each one being a key word. The second, values, gives the value - for each key word. Unlike PQsetdbLogin below, the parameter + for each key word. Unlike PQsetdbLogin below, the parameter set can be extended without changing the function signature, so use of - this function (or its nonblocking analogs PQconnectStartParams + this function (or its nonblocking analogs PQconnectStartParams and PQconnectPoll) is preferred for new application programming. The currently recognized parameter key words are listed in - . + . @@ -128,7 +144,7 @@ PGconn *PQconnectdbParams(const char * const *keywords, dbname is expanded this way, any subsequent dbname value is processed as plain database name. More details on the possible connection string formats appear in - . + . @@ -140,7 +156,7 @@ PGconn *PQconnectdbParams(const char * const *keywords, If any parameter is NULL or an empty string, the corresponding - environment variable (see ) is checked. + environment variable (see ) is checked. If the environment variable is not set either, then the indicated built-in defaults are used. @@ -157,7 +173,7 @@ PGconn *PQconnectdbParams(const char * const *keywords, - PQconnectdbPQconnectdb + PQconnectdbPQconnectdb Makes a new connection to the database server. @@ -176,7 +192,7 @@ PGconn *PQconnectdb(const char *conninfo); The passed string can be empty to use all default parameters, or it can contain one or more parameter settings separated by whitespace, or it can contain a URI. - See for details. + See for details. @@ -184,7 +200,7 @@ PGconn *PQconnectdb(const char *conninfo); - PQsetdbLoginPQsetdbLogin + PQsetdbLoginPQsetdbLogin Makes a new connection to the database server. @@ -211,13 +227,13 @@ PGconn *PQsetdbLogin(const char *pghost, an = sign or has a valid connection URI prefix, it is taken as a conninfo string in exactly the same way as if it had been passed to PQconnectdb, and the remaining - parameters are then applied as specified for PQconnectdbParams. + parameters are then applied as specified for PQconnectdbParams. - PQsetdbPQsetdb + PQsetdbPQsetdb Makes a new connection to the database server. @@ -232,16 +248,16 @@ PGconn *PQsetdb(char *pghost, This is a macro that calls PQsetdbLogin with null pointers - for the login and pwd parameters. It is provided + for the login and pwd parameters. It is provided for backward compatibility with very old programs. - PQconnectStartParamsPQconnectStartParams - PQconnectStartPQconnectStart - PQconnectPollPQconnectPoll + PQconnectStartParamsPQconnectStartParams + PQconnectStartPQconnectStart + PQconnectPollPQconnectPoll nonblocking connection @@ -263,7 +279,7 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn); that your application's thread of execution is not blocked on remote I/O whilst doing so. The point of this approach is that the waits for I/O to complete can occur in the application's main loop, rather than down inside - PQconnectdbParams or PQconnectdb, and so the + PQconnectdbParams or PQconnectdb, and so the application can manage this operation in parallel with other activities. @@ -287,9 +303,9 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn); - The hostaddr and host parameters are used appropriately to ensure that - name and reverse name queries are not made. See the documentation of - these parameters in for details. + The hostaddr parameter must be used appropriately + to prevent DNS queries from being made. See the documentation of + this parameter in for details. @@ -302,7 +318,7 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn); - You ensure that the socket is in the appropriate state + You must ensure that the socket is in the appropriate state before calling PQconnectPoll, as described below. @@ -310,35 +326,37 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn); - Note: use of PQconnectStartParams is analogous to - PQconnectStart shown below. + To begin a nonblocking connection request, + call PQconnectStart + or PQconnectStartParams. If the result is null, + then libpq has been unable to allocate a + new PGconn structure. Otherwise, a + valid PGconn pointer is returned (though not + yet representing a valid connection to the database). Next + call PQstatus(conn). If the result + is CONNECTION_BAD, the connection attempt has already + failed, typically because of invalid connection parameters. - To begin a nonblocking connection request, call conn = PQconnectStart("connection_info_string"). - If conn is null, then libpq has been unable to allocate a new PGconn - structure. Otherwise, a valid PGconn pointer is returned (though not yet - representing a valid connection to the database). On return from - PQconnectStart, call status = PQstatus(conn). If status equals - CONNECTION_BAD, PQconnectStart has failed. - - - - If PQconnectStart succeeds, the next stage is to poll - libpq so that it can proceed with the connection sequence. + If PQconnectStart + or PQconnectStartParams succeeds, the next stage + is to poll libpq so that it can proceed with + the connection sequence. Use PQsocket(conn) to obtain the descriptor of the socket underlying the database connection. + (Caution: do not assume that the socket remains the same + across PQconnectPoll calls.) Loop thus: If PQconnectPoll(conn) last returned PGRES_POLLING_READING, wait until the socket is ready to - read (as indicated by select(), poll(), or + read (as indicated by select(), poll(), or similar system function). Then call PQconnectPoll(conn) again. Conversely, if PQconnectPoll(conn) last returned PGRES_POLLING_WRITING, wait until the socket is ready to write, then call PQconnectPoll(conn) again. - If you have yet to call - PQconnectPoll, i.e., just after the call to - PQconnectStart, behave as if it last returned + On the first iteration, i.e. if you have yet to call + PQconnectPoll, behave as if it last returned PGRES_POLLING_WRITING. Continue this loop until PQconnectPoll(conn) returns PGRES_POLLING_FAILED, indicating the connection procedure @@ -348,10 +366,10 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn); At any time during connection, the status of the connection can be - checked by calling PQstatus. If this call returns CONNECTION_BAD, then the - connection procedure has failed; if the call returns CONNECTION_OK, then the + checked by calling PQstatus. If this call returns CONNECTION_BAD, then the + connection procedure has failed; if the call returns CONNECTION_OK, then the connection is ready. Both of these states are equally detectable - from the return value of PQconnectPoll, described above. Other states might also occur + from the return value of PQconnectPoll, described above. Other states might also occur during (and only during) an asynchronous connection procedure. These indicate the current stage of the connection procedure and might be useful to provide feedback to the user for example. These statuses are: @@ -463,16 +481,18 @@ switch(PQstatus(conn)) - Note that if PQconnectStart returns a non-null pointer, you must call - PQfinish when you are finished with it, in order to dispose of - the structure and any associated memory blocks. This must be done even if - the connection attempt fails or is abandoned. + Note that when PQconnectStart + or PQconnectStartParams returns a non-null + pointer, you must call PQfinish when you are + finished with it, in order to dispose of the structure and any + associated memory blocks. This must be done even if the connection + attempt fails or is abandoned. - PQconndefaultsPQconndefaults + PQconndefaultsPQconndefaults Returns the default connection options. @@ -501,7 +521,7 @@ typedef struct all possible PQconnectdb options and their current default values. The return value points to an array of PQconninfoOption structures, which ends - with an entry having a null keyword pointer. The + with an entry having a null keyword pointer. The null pointer is returned if memory could not be allocated. Note that the current default values (val fields) will depend on environment variables and other context. A @@ -519,7 +539,7 @@ typedef struct - PQconninfoPQconninfo + PQconninfoPQconninfo Returns the connection options used by a live connection. @@ -533,7 +553,7 @@ PQconninfoOption *PQconninfo(PGconn *conn); all possible PQconnectdb options and the values that were used to connect to the server. The return value points to an array of PQconninfoOption - structures, which ends with an entry having a null keyword + structures, which ends with an entry having a null keyword pointer. All notes above for PQconndefaults also apply to the result of PQconninfo. @@ -543,7 +563,7 @@ PQconninfoOption *PQconninfo(PGconn *conn); - PQconninfoParsePQconninfoParse + PQconninfoParsePQconninfoParse Returns parsed connection options from the provided connection string. @@ -555,12 +575,12 @@ PQconninfoOption *PQconninfoParse(const char *conninfo, char **errmsg); Parses a connection string and returns the resulting options as an - array; or returns NULL if there is a problem with the connection + array; or returns NULL if there is a problem with the connection string. This function can be used to extract the PQconnectdb options in the provided connection string. The return value points to an array of PQconninfoOption structures, which ends - with an entry having a null keyword pointer. + with an entry having a null keyword pointer. @@ -571,10 +591,10 @@ PQconninfoOption *PQconninfoParse(const char *conninfo, char **errmsg); - If errmsg is not NULL, then *errmsg is set - to NULL on success, else to a malloc'd error string explaining - the problem. (It is also possible for *errmsg to be - set to NULL and the function to return NULL; + If errmsg is not NULL, then *errmsg is set + to NULL on success, else to a malloc'd error string explaining + the problem. (It is also possible for *errmsg to be + set to NULL and the function to return NULL; this indicates an out-of-memory condition.) @@ -582,15 +602,15 @@ PQconninfoOption *PQconninfoParse(const char *conninfo, char **errmsg); After processing the options array, free it by passing it to PQconninfoFree. If this is not done, some memory is leaked for each call to PQconninfoParse. - Conversely, if an error occurs and errmsg is not NULL, - be sure to free the error string using PQfreemem. + Conversely, if an error occurs and errmsg is not NULL, + be sure to free the error string using PQfreemem. - PQfinishPQfinish + PQfinishPQfinish Closes the connection to the server. Also frees @@ -604,14 +624,14 @@ void PQfinish(PGconn *conn); Note that even if the server connection attempt fails (as indicated by PQstatus), the application should call PQfinish to free the memory used by the PGconn object. - The PGconn pointer must not be used again after + The PGconn pointer must not be used again after PQfinish has been called. - PQresetPQreset + PQresetPQreset Resets the communication channel to the server. @@ -631,8 +651,8 @@ void PQreset(PGconn *conn); - PQresetStartPQresetStart - PQresetPollPQresetPoll + PQresetStartPQresetStart + PQresetPollPQresetPoll Reset the communication channel to the server, in a nonblocking manner. @@ -650,8 +670,8 @@ PostgresPollingStatusType PQresetPoll(PGconn *conn); parameters previously used. This can be useful for error recovery if a working connection is lost. They differ from PQreset (above) in that they act in a nonblocking manner. These functions suffer from the same - restrictions as PQconnectStartParams, PQconnectStart - and PQconnectPoll. + restrictions as PQconnectStartParams, PQconnectStart + and PQconnectPoll. @@ -665,12 +685,12 @@ PostgresPollingStatusType PQresetPoll(PGconn *conn); - PQpingParamsPQpingParams + PQpingParamsPQpingParams PQpingParams reports the status of the server. It accepts connection parameters identical to those of - PQconnectdbParams, described above. It is not + PQconnectdbParams, described above. It is not necessary to supply correct user name, password, or database name values to obtain the server status; however, if incorrect values are provided, the server will log a failed connection attempt. @@ -734,12 +754,12 @@ PGPing PQpingParams(const char * const *keywords, - PQpingPQping + PQpingPQping PQping reports the status of the server. It accepts connection parameters identical to those of - PQconnectdb, described above. It is not + PQconnectdb, described above. It is not necessary to supply correct user name, password, or database name values to obtain the server status; however, if incorrect values are provided, the server will log a failed connection attempt. @@ -750,7 +770,7 @@ PGPing PQping(const char *conninfo); - The return values are the same as for PQpingParams. + The return values are the same as for PQpingParams. @@ -771,11 +791,11 @@ PGPing PQping(const char *conninfo); - Several libpq functions parse a user-specified string to obtain + Several libpq functions parse a user-specified string to obtain connection parameters. There are two accepted formats for these strings: plain keyword = value strings and URIs. URIs generally follow - RFC + RFC 3986, except that multi-host connection strings are allowed as further described below. @@ -802,7 +822,7 @@ host=localhost port=5432 dbname=mydb connect_timeout=10 The recognized parameter key words are listed in . + linkend="libpq-paramkeywords"/>. @@ -840,12 +860,14 @@ postgresql:///mydb?host=localhost&port=5433 Percent-encoding may be used to include symbols with special meaning in any - of the URI parts. + of the URI parts, e.g. replace = with + %3D. + Any connection parameters not corresponding to key words listed in are ignored and a warning message about them + linkend="libpq-paramkeywords"/> are ignored and a warning message about them is sent to stderr. @@ -865,7 +887,7 @@ postgresql://[2001:db8::1234]/database The host component is interpreted as described for the parameter . In particular, a Unix-domain socket + linkend="libpq-connect-host"/>. In particular, a Unix-domain socket connection is chosen if the host part is either empty or starts with a slash, otherwise a TCP/IP connection is initiated. Note, however, that the slash is a reserved character in the hierarchical part of the URI. So, to @@ -893,24 +915,29 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname It is possible to specify multiple hosts to connect to, so that they are - tried in the given order. In the Keyword/Value format, the host, - hostaddr, and port options accept a comma-separated - list of values. The same number of elements must be given in each option, such - that e.g. the first hostaddr corresponds to the first host name, - the second hostaddr corresponds to the second host name, and so + tried in the given order. In the Keyword/Value format, the host, + hostaddr, and port options accept a comma-separated + list of values. The same number of elements must be given in each + option that is specified, such + that e.g. the first hostaddr corresponds to the first host name, + the second hostaddr corresponds to the second host name, and so forth. As an exception, if only one port is specified, it applies to all the hosts. - In the connection URI format, you can list multiple host:port pairs - separated by commas, in the host component of the URI. In either - format, a single hostname can also translate to multiple network addresses. A - common example of this is a host that has both an IPv4 and an IPv6 address. + In the connection URI format, you can list multiple host:port pairs + separated by commas, in the host component of the URI. - When multiple hosts are specified, or when a single hostname is + In either format, a single host name can translate to multiple network + addresses. A common example of this is a host that has both an IPv4 and + an IPv6 address. + + + + When multiple hosts are specified, or when a single host name is translated to multiple addresses, all the hosts and addresses will be tried in order, until one succeeds. If none of the hosts can be reached, the connection fails. If a connection is established successfully, but @@ -920,8 +947,8 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname If a password file is used, you can have different passwords for different hosts. All the other connection options are the same for every - host, it is not possible to e.g. specify a different username for - different hosts. + host in the list; it is not possible to e.g. specify different + usernames for different hosts. @@ -937,22 +964,22 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname host - Name of host to connect to.host name + Name of host to connect to.host name If a host name begins with a slash, it specifies Unix-domain communication rather than TCP/IP communication; the value is the - name of the directory in which the socket file is stored. If - multiple host names are specified, each will be tried in turn in - the order given. The default behavior when host is - not specified is to connect to a Unix-domain - socketUnix domain socket in + name of the directory in which the socket file is stored. + The default behavior when host is + not specified, or is empty, is to connect to a Unix-domain + socketUnix domain socket in /tmp (or whatever socket directory was specified - when PostgreSQL was built). On machines without - Unix-domain sockets, the default is to connect to localhost. + when PostgreSQL was built). On machines without + Unix-domain sockets, the default is to connect to localhost. A comma-separated list of host names is also accepted, in which case - each host name in the list is tried in order. See - for details. + each host name in the list is tried in order; an empty item in the + list selects the default behavior as explained above. See + for details. @@ -962,61 +989,68 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname Numeric IP address of host to connect to. This should be in the - standard IPv4 address format, e.g., 172.28.40.9. If + standard IPv4 address format, e.g., 172.28.40.9. If your machine supports IPv6, you can also use those addresses. TCP/IP communication is always used when a nonempty string is specified for this parameter. - Using hostaddr instead of host allows the + Using hostaddr instead of host allows the application to avoid a host name look-up, which might be important in applications with time constraints. However, a host name is required for GSSAPI or SSPI authentication - methods, as well as for verify-full SSL + methods, as well as for verify-full SSL certificate verification. The following rules are used: - If host is specified without hostaddr, - a host name lookup occurs. + If host is specified + without hostaddr, a host name lookup occurs. + (When using PQconnectPoll, the lookup occurs + when PQconnectPoll first considers this host + name, and it may cause PQconnectPoll to block + for a significant amount of time.) - If hostaddr is specified without host, - the value for hostaddr gives the server network address. + If hostaddr is specified without host, + the value for hostaddr gives the server network address. The connection attempt will fail if the authentication method requires a host name. - If both host and hostaddr are specified, - the value for hostaddr gives the server network address. - The value for host is ignored unless the + If both host and hostaddr are specified, + the value for hostaddr gives the server network address. + The value for host is ignored unless the authentication method requires it, in which case it will be used as the host name. - Note that authentication is likely to fail if host - is not the name of the server at network address hostaddr. - Also, note that host rather than hostaddr + Note that authentication is likely to fail if host + is not the name of the server at network address hostaddr. + Also, when both host and hostaddr + are specified, host is used to identify the connection in a password file (see - ). + ). - A comma-separated list of hostaddrs is also accepted, in - which case each host in the list is tried in order. See - for details. + A comma-separated list of hostaddr values is also + accepted, in which case each host in the list is tried in order. + An empty item in the list causes the corresponding host name to be + used, or the default host name if that is empty as well. See + for details. Without either a host name or host address, libpq will connect using a local Unix-domain socket; or on machines without Unix-domain - sockets, it will attempt to connect to localhost. + sockets, it will attempt to connect to localhost. @@ -1027,11 +1061,14 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname Port number to connect to at the server host, or socket file name extension for Unix-domain - connections.port + connections.port If multiple hosts were given in the host or - hostaddr parameters, this parameter may specify a list - of ports of equal length, or it may specify a single port number to - be used for all hosts. + hostaddr parameters, this parameter may specify a + comma-separated list of ports of the same length as the host list, or + it may specify a single port number to be used for all hosts. + An empty string, or an empty item in a comma-separated list, + specifies the default port number established + when PostgreSQL was built. @@ -1042,7 +1079,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname The database name. Defaults to be the same as the user name. In certain contexts, the value is checked for extended - formats; see for more details on + formats; see for more details on those. @@ -1073,9 +1110,9 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname Specifies the name of the file used to store passwords - (see ). + (see ). Defaults to ~/.pgpass, or - %APPDATA%\postgresql\pgpass.conf on Microsoft Windows. + %APPDATA%\postgresql\pgpass.conf on Microsoft Windows. (No error is reported if this file does not exist.) @@ -1085,11 +1122,12 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname connect_timeout - Maximum wait for connection, in seconds (write as a decimal integer - string). Zero or not specified means wait indefinitely. It is not - recommended to use a timeout of less than 2 seconds. - This timeout applies separately to each connection attempt. - For example, if you specify two hosts and connect_timeout + Maximum wait for connection, in seconds (write as a decimal integer, + e.g. 10). Zero, negative, or not specified means + wait indefinitely. The minimum allowed timeout is 2 seconds, therefore + a value of 1 is interpreted as 2. + This timeout applies separately to each host name or IP address. + For example, if you specify two hosts and connect_timeout is 5, each host will time out if no connection is made within 5 seconds, so the total time spent waiting for a connection might be up to 10 seconds. @@ -1117,13 +1155,13 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname Specifies command-line options to send to the server at connection - start. For example, setting this to -c geqo=off sets the - session's value of the geqo parameter to - off. Spaces within this string are considered to + start. For example, setting this to -c geqo=off sets the + session's value of the geqo parameter to + off. Spaces within this string are considered to separate command-line arguments, unless escaped with a backslash - (\); write \\ to represent a literal + (\); write \\ to represent a literal backslash. For a detailed discussion of the available - options, consult . + options, consult . @@ -1132,7 +1170,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname application_name - Specifies a value for the + Specifies a value for the configuration parameter. @@ -1143,9 +1181,9 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname Specifies a fallback value for the configuration parameter. + linkend="guc-application-name"/> configuration parameter. This value will be used if no value has been given for - application_name via a connection parameter or the + application_name via a connection parameter or the PGAPPNAME environment variable. Specifying a fallback name is useful in generic utility programs that wish to set a default application name but allow it to be @@ -1174,7 +1212,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname send a keepalive message to the server. A value of zero uses the system default. This parameter is ignored for connections made via a Unix-domain socket, or if keepalives are disabled. - It is only supported on systems where TCP_KEEPIDLE or + It is only supported on systems where TCP_KEEPIDLE or an equivalent socket option is available, and on Windows; on other systems, it has no effect. @@ -1189,7 +1227,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname that is not acknowledged by the server should be retransmitted. A value of zero uses the system default. This parameter is ignored for connections made via a Unix-domain socket, or if keepalives are disabled. - It is only supported on systems where TCP_KEEPINTVL or + It is only supported on systems where TCP_KEEPINTVL or an equivalent socket option is available, and on Windows; on other systems, it has no effect. @@ -1204,7 +1242,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname client's connection to the server is considered dead. A value of zero uses the system default. This parameter is ignored for connections made via a Unix-domain socket, or if keepalives are disabled. - It is only supported on systems where TCP_KEEPCNT or + It is only supported on systems where TCP_KEEPCNT or an equivalent socket option is available; on other systems, it has no effect. @@ -1220,12 +1258,70 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname + + replication + + + This option determines whether the connection should use the + replication protocol instead of the normal protocol. This is what + PostgreSQL replication connections as well as tools such as + pg_basebackup use internally, but it can + also be used by third-party applications. For a description of the + replication protocol, consult . + + + + The following values, which are case-insensitive, are supported: + + + + true, on, + yes, 1 + + + + The connection goes into physical replication mode. + + + + + + database + + + The connection goes into logical replication mode, connecting to + the database specified in the dbname parameter. + + + + + + + false, off, + no, 0 + + + + The connection is a regular one, which is the default behavior. + + + + + + + + In physical or logical replication mode, only the simple query protocol + can be used. + + + + sslmode This option determines whether or with what priority a secure - SSL TCP/IP connection will be negotiated with the + SSL TCP/IP connection will be negotiated with the server. There are six modes: @@ -1233,7 +1329,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname disable - only try a non-SSL connection + only try a non-SSL connection @@ -1242,8 +1338,8 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname allow - first try a non-SSL connection; if that - fails, try an SSL connection + first try a non-SSL connection; if that + fails, try an SSL connection @@ -1252,8 +1348,8 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname prefer (default) - first try an SSL connection; if that fails, - try a non-SSL connection + first try an SSL connection; if that fails, + try a non-SSL connection @@ -1262,7 +1358,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname require - only try an SSL connection. If a root CA + only try an SSL connection. If a root CA file is present, verify the certificate in the same way as if verify-ca was specified @@ -1273,9 +1369,9 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname verify-ca - only try an SSL connection, and verify that + only try an SSL connection, and verify that the server certificate is issued by a trusted - certificate authority (CA) + certificate authority (CA) @@ -1284,30 +1380,30 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname verify-full - only try an SSL connection, verify that the + only try an SSL connection, verify that the server certificate is issued by a - trusted CA and that the requested server host name + trusted CA and that the requested server host name matches that in the certificate - See for a detailed description of how + See for a detailed description of how these options work. - sslmode is ignored for Unix domain socket + sslmode is ignored for Unix domain socket communication. - If PostgreSQL is compiled without SSL support, - using options require, verify-ca, or - verify-full will cause an error, while - options allow and prefer will be - accepted but libpq will not actually attempt - an SSL - connection.SSLwith libpq + If PostgreSQL is compiled without SSL support, + using options require, verify-ca, or + verify-full will cause an error, while + options allow and prefer will be + accepted but libpq will not actually attempt + an SSL + connection.SSLwith libpq @@ -1316,20 +1412,20 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname requiressl - This option is deprecated in favor of the sslmode + This option is deprecated in favor of the sslmode setting. If set to 1, an SSL connection to the server - is required (this is equivalent to sslmode - require). libpq will then refuse + is required (this is equivalent to sslmode + require). libpq will then refuse to connect if the server does not accept an SSL connection. If set to 0 (default), - libpq will negotiate the connection type with - the server (equivalent to sslmode - prefer). This option is only available if - PostgreSQL is compiled with SSL support. + libpq will negotiate the connection type with + the server (equivalent to sslmode + prefer). This option is only available if + PostgreSQL is compiled with SSL support. @@ -1338,19 +1434,28 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname sslcompression - If set to 1 (default), data sent over SSL connections will be - compressed. - If set to 0, compression will be disabled (this requires - OpenSSL 1.0.0 or later). - This parameter is ignored if a connection without SSL is made, - or if the version of OpenSSL used does not support - it. + If set to 1, data sent over SSL connections will be compressed. If + set to 0, compression will be disabled. The default is 0. This + parameter is ignored if a connection without SSL is made. + - Compression uses CPU time, but can improve throughput if - the network is the bottleneck. - Disabling compression can improve response time and throughput - if CPU performance is the limiting factor. + SSL compression is nowadays considered insecure and its use is no + longer recommended. OpenSSL 1.1.0 disables + compression by default, and many operating system distributions + disable it in prior versions as well, so setting this parameter to on + will not have any effect if the server does not accept compression. + On the other hand, OpenSSL before 1.0.0 + does not support disabling compression, so this parameter is ignored + with those versions, and whether compression is used depends on the + server. + + + + If security is not a primary concern, compression can improve + throughput if the network is the bottleneck. Disabling compression + can improve response time and throughput if CPU performance is the + limiting factor. @@ -1361,7 +1466,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname This parameter specifies the file name of the client SSL certificate, replacing the default - ~/.postgresql/postgresql.crt. + ~/.postgresql/postgresql.crt. This parameter is ignored if an SSL connection is not made. @@ -1374,9 +1479,9 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname This parameter specifies the location for the secret key used for the client certificate. It can either specify a file name that will be used instead of the default - ~/.postgresql/postgresql.key, or it can specify a key - obtained from an external engine (engines are - OpenSSL loadable modules). An external engine + ~/.postgresql/postgresql.key, or it can specify a key + obtained from an external engine (engines are + OpenSSL loadable modules). An external engine specification should consist of a colon-separated engine name and an engine-specific key identifier. This parameter is ignored if an SSL connection is not made. @@ -1389,10 +1494,10 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname This parameter specifies the name of a file containing SSL - certificate authority (CA) certificate(s). + certificate authority (CA) certificate(s). If the file exists, the server's certificate will be verified to be signed by one of these authorities. The default is - ~/.postgresql/root.crt. + ~/.postgresql/root.crt. @@ -1405,7 +1510,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname revocation list (CRL). Certificates listed in this file, if it exists, will be rejected while attempting to authenticate the server's certificate. The default is - ~/.postgresql/root.crl. + ~/.postgresql/root.crl. @@ -1427,8 +1532,8 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname any user could start a server listening there. Use this parameter to ensure that you are connected to a server run by a trusted user.) This option is only supported on platforms for which the - peer authentication method is implemented; see - . + peer authentication method is implemented; see + . @@ -1440,7 +1545,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname Kerberos service name to use when authenticating with GSSAPI. This must match the service name specified in the server configuration for Kerberos authentication to succeed. (See also - .) + .) @@ -1463,7 +1568,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname Service name to use for additional parameters. It specifies a service name in pg_service.conf that holds additional connection parameters. This allows applications to specify only a service name so connection parameters - can be centrally maintained. See . + can be centrally maintained. See . @@ -1476,11 +1581,11 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname connection in which read-write transactions are accepted by default is considered acceptable. The query SHOW transaction_read_only will be sent upon any - successful connection; if it returns on, the connection + successful connection; if it returns on, the connection will be closed. If multiple hosts were specified in the connection string, any remaining servers will be tried just as if the connection attempt had failed. The default value of this parameter, - any, regards all connections as acceptable. + any, regards all connections as acceptable. @@ -1499,13 +1604,13 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname - libpq-fe.h - libpq-int.h + libpq-fe.h + libpq-int.h libpq application programmers should be careful to maintain the PGconn abstraction. Use the accessor functions described below to get at the contents of PGconn. Reference to internal PGconn fields using - libpq-int.h is not recommended because they are subject to change + libpq-int.h is not recommended because they are subject to change in the future. @@ -1513,10 +1618,10 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname The following functions return parameter values established at connection. These values are fixed for the life of the connection. If a multi-host - connection string is used, the values of PQhost, - PQport, and PQpass can change if a new connection - is established using the same PGconn object. Other values - are fixed for the lifetime of the PGconn object. + connection string is used, the values of PQhost, + PQport, and PQpass can change if a new connection + is established using the same PGconn object. Other values + are fixed for the lifetime of the PGconn object. @@ -1570,6 +1675,17 @@ char *PQuser(const PGconn *conn); char *PQpass(const PGconn *conn); + + + PQpass will return either the password specified + in the connection parameters, or if there was none and the password + was obtained from the password + file, it will return that. In the latter case, + if multiple hosts were specified in the connection parameters, it is + not possible to rely on the result of PQpass until + the connection is established. The status of the connection can be + checked using the function PQstatus. + @@ -1583,15 +1699,39 @@ char *PQpass(const PGconn *conn); - Returns the server host name of the connection. + Returns the server host name of the active connection. This can be a host name, an IP address, or a directory path if the connection is via Unix socket. (The path case can be distinguished because it will always be an absolute path, beginning - with /.) + with /.) char *PQhost(const PGconn *conn); + + + If the connection parameters specified both host and + hostaddr, then PQhost will + return the host information. If only + hostaddr was specified, then that is returned. + If multiple hosts were specified in the connection parameters, + PQhost returns the host actually connected to. + + + + PQhost returns NULL if the + conn argument is NULL. + Otherwise, if there is an error producing the host information (perhaps + if the connection has not been fully established or there was an + error), it returns an empty string. + + + + If multiple hosts were specified in the connection parameters, it is + not possible to rely on the result of PQhost until + the connection is established. The status of the connection can be + checked using the function PQstatus. + @@ -1605,12 +1745,32 @@ char *PQhost(const PGconn *conn); - Returns the port of the connection. + Returns the port of the active connection. char *PQport(const PGconn *conn); + + + If multiple ports were specified in the connection parameters, + PQport returns the port actually connected to. + + + + PQport returns NULL if the + conn argument is NULL. + Otherwise, if there is an error producing the port information (perhaps + if the connection has not been fully established or there was an + error), it returns an empty string. + + + + If multiple ports were specified in the connection parameters, it is + not possible to rely on the result of PQport until + the connection is established. The status of the connection can be + checked using the function PQstatus. + @@ -1658,7 +1818,7 @@ char *PQoptions(const PGconn *conn); The following functions return status data that can change as operations - are executed on the PGconn object. + are executed on the PGconn object. @@ -1693,8 +1853,8 @@ ConnStatusType PQstatus(const PGconn *conn); - See the entry for PQconnectStartParams, PQconnectStart - and PQconnectPoll with regards to other status codes that + See the entry for PQconnectStartParams, PQconnectStart + and PQconnectPoll with regards to other status codes that might be returned. @@ -1745,62 +1905,62 @@ const char *PQparameterStatus(const PGconn *conn, const char *paramName); Certain parameter values are reported by the server automatically at connection startup or whenever their values change. - PQparameterStatus can be used to interrogate these settings. + PQparameterStatus can be used to interrogate these settings. It returns the current value of a parameter if known, or NULL if the parameter is not known. Parameters reported as of the current release include - server_version, - server_encoding, - client_encoding, - application_name, - is_superuser, - session_authorization, - DateStyle, - IntervalStyle, - TimeZone, - integer_datetimes, and - standard_conforming_strings. - (server_encoding, TimeZone, and - integer_datetimes were not reported by releases before 8.0; - standard_conforming_strings was not reported by releases + server_version, + server_encoding, + client_encoding, + application_name, + is_superuser, + session_authorization, + DateStyle, + IntervalStyle, + TimeZone, + integer_datetimes, and + standard_conforming_strings. + (server_encoding, TimeZone, and + integer_datetimes were not reported by releases before 8.0; + standard_conforming_strings was not reported by releases before 8.1; - IntervalStyle was not reported by releases before 8.4; - application_name was not reported by releases before 9.0.) + IntervalStyle was not reported by releases before 8.4; + application_name was not reported by releases before 9.0.) Note that - server_version, - server_encoding and - integer_datetimes + server_version, + server_encoding and + integer_datetimes cannot change after startup. Pre-3.0-protocol servers do not report parameter settings, but - libpq includes logic to obtain values for - server_version and client_encoding anyway. - Applications are encouraged to use PQparameterStatus - rather than ad hoc code to determine these values. + libpq includes logic to obtain values for + server_version and client_encoding anyway. + Applications are encouraged to use PQparameterStatus + rather than ad hoc code to determine these values. (Beware however that on a pre-3.0 connection, changing - client_encoding via SET after connection - startup will not be reflected by PQparameterStatus.) - For server_version, see also - PQserverVersion, which returns the information in a + client_encoding via SET after connection + startup will not be reflected by PQparameterStatus.) + For server_version, see also + PQserverVersion, which returns the information in a numeric form that is much easier to compare against. - If no value for standard_conforming_strings is reported, - applications can assume it is off, that is, backslashes + If no value for standard_conforming_strings is reported, + applications can assume it is off, that is, backslashes are treated as escapes in string literals. Also, the presence of this parameter can be taken as an indication that the escape string - syntax (E'...') is accepted. + syntax (E'...') is accepted. - Although the returned pointer is declared const, it in fact - points to mutable storage associated with the PGconn structure. + Although the returned pointer is declared const, it in fact + points to mutable storage associated with the PGconn structure. It is unwise to assume the pointer will remain valid across queries. @@ -1827,7 +1987,7 @@ int PQprotocolVersion(const PGconn *conn); not change after connection startup is complete, but it could theoretically change during a connection reset. The 3.0 protocol will normally be used when communicating with - PostgreSQL 7.4 or later servers; pre-7.4 servers + PostgreSQL 7.4 or later servers; pre-7.4 servers support only protocol 2.0. (Protocol 1.0 is obsolete and not supported by libpq.) @@ -1860,17 +2020,17 @@ int PQserverVersion(const PGconn *conn); - Prior to major version 10, PostgreSQL used + Prior to major version 10, PostgreSQL used three-part version numbers in which the first two parts together represented the major version. For those - versions, PQserverVersion uses two digits for each + versions, PQserverVersion uses two digits for each part; for example version 9.1.5 will be returned as 90105, and version 9.2.0 will be returned as 90200. Therefore, for purposes of determining feature compatibility, - applications should divide the result of PQserverVersion + applications should divide the result of PQserverVersion by 100 not 10000 to determine a logical major version number. In all release series, only the last two digits differ between minor releases (bug-fix releases). @@ -1888,7 +2048,7 @@ int PQserverVersion(const PGconn *conn); - error message Returns the error message + error message Returns the error message most recently generated by an operation on the connection. @@ -1898,22 +2058,22 @@ char *PQerrorMessage(const PGconn *conn); - Nearly all libpq functions will set a message for + Nearly all libpq functions will set a message for PQerrorMessage if they fail. Note that by libpq convention, a nonempty PQerrorMessage result can consist of multiple lines, and will include a trailing newline. The caller should not free the result directly. It will be freed when the associated - PGconn handle is passed to + PGconn handle is passed to PQfinish. The result string should not be expected to remain the same across operations on the - PGconn structure. + PGconn structure. - PQsocketPQsocket + PQsocketPQsocket Obtains the file descriptor number of the connection socket to @@ -1931,13 +2091,13 @@ int PQsocket(const PGconn *conn); - PQbackendPIDPQbackendPID + PQbackendPIDPQbackendPID Returns the process ID (PID) - PID - determining PID of server process - in libpq + PID + determining PID of server process + in libpq of the backend process handling this connection. @@ -1958,7 +2118,7 @@ int PQbackendPID(const PGconn *conn); - PQconnectionNeedsPasswordPQconnectionNeedsPassword + PQconnectionNeedsPasswordPQconnectionNeedsPassword Returns true (1) if the connection authentication method @@ -1978,7 +2138,7 @@ int PQconnectionNeedsPassword(const PGconn *conn); - PQconnectionUsedPasswordPQconnectionUsedPassword + PQconnectionUsedPasswordPQconnectionUsedPassword Returns true (1) if the connection authentication method @@ -2004,7 +2164,7 @@ int PQconnectionUsedPassword(const PGconn *conn); - PQsslInUsePQsslInUse + PQsslInUsePQsslInUse Returns true (1) if the connection uses SSL, false (0) if not. @@ -2018,7 +2178,7 @@ int PQsslInUse(const PGconn *conn); - PQsslAttributePQsslAttribute + PQsslAttributePQsslAttribute Returns SSL-related information about the connection. @@ -2091,7 +2251,7 @@ const char *PQsslAttribute(const PGconn *conn, const char *attribute_name); - PQsslAttributeNamesPQsslAttributeNames + PQsslAttributeNamesPQsslAttributeNames Return an array of SSL attribute names available. The array is terminated by a NULL pointer. @@ -2103,7 +2263,7 @@ const char * const * PQsslAttributeNames(const PGconn *conn); - PQsslStructPQsslStruct + PQsslStructPQsslStruct Return a pointer to an SSL-implementation-specific object describing @@ -2137,17 +2297,17 @@ void *PQsslStruct(const PGconn *conn, const char *struct_name); This structure can be used to verify encryption levels, check server - certificates, and more. Refer to the OpenSSL + certificates, and more. Refer to the OpenSSL documentation for information about this structure. - PQgetsslPQgetssl + PQgetsslPQgetssl - SSLin libpq + SSLin libpq Returns the SSL structure used in the connection, or null if SSL is not in use. @@ -2161,8 +2321,8 @@ void *PQgetssl(const PGconn *conn); not be used in new applications, because the returned struct is specific to OpenSSL and will not be available if another SSL implementation is used. To check if a connection uses SSL, call - PQsslInUse instead, and for more details about the - connection, use PQsslAttribute. + PQsslInUse instead, and for more details about the + connection, use PQsslAttribute. @@ -2207,7 +2367,7 @@ PGresult *PQexec(PGconn *conn, const char *command); Returns a PGresult pointer or possibly a null pointer. A non-null pointer will generally be returned except in out-of-memory conditions or serious errors such as inability to send - the command to the server. The PQresultStatus function + the command to the server. The PQresultStatus function should be called to check the return value for any errors (including the value of a null pointer, in which case it will return PGRES_FATAL_ERROR). Use @@ -2220,10 +2380,12 @@ PGresult *PQexec(PGconn *conn, const char *command); The command string can include multiple SQL commands (separated by semicolons). Multiple queries sent in a single - PQexec call are processed in a single transaction, unless + PQexec call are processed in a single transaction, unless there are explicit BEGIN/COMMIT commands included in the query string to divide it into multiple - transactions. Note however that the returned + transactions. (See + for more details about how the server handles multi-query strings.) + Note however that the returned PGresult structure describes only the result of the last command executed from the string. Should one of the commands fail, processing of the string stops with it and the returned @@ -2259,10 +2421,10 @@ PGresult *PQexecParams(PGconn *conn, - PQexecParams is like PQexec, but offers additional + PQexecParams is like PQexec, but offers additional functionality: parameter values can be specified separately from the command string proper, and query results can be requested in either text or binary - format. PQexecParams is supported only in protocol 3.0 and later + format. PQexecParams is supported only in protocol 3.0 and later connections; it will fail when using protocol 2.0. @@ -2285,8 +2447,8 @@ PGresult *PQexecParams(PGconn *conn, The SQL command string to be executed. If parameters are used, - they are referred to in the command string as $1, - $2, etc. + they are referred to in the command string as $1, + $2, etc. @@ -2296,9 +2458,9 @@ PGresult *PQexecParams(PGconn *conn, The number of parameters supplied; it is the length of the arrays - paramTypes[], paramValues[], - paramLengths[], and paramFormats[]. (The - array pointers can be NULL when nParams + paramTypes[], paramValues[], + paramLengths[], and paramFormats[]. (The + array pointers can be NULL when nParams is zero.) @@ -2309,7 +2471,7 @@ PGresult *PQexecParams(PGconn *conn, Specifies, by OID, the data types to be assigned to the - parameter symbols. If paramTypes is + parameter symbols. If paramTypes is NULL, or any particular element in the array is zero, the server infers a data type for the parameter symbol in the same way it would do for an untyped literal string. @@ -2355,11 +2517,11 @@ PGresult *PQexecParams(PGconn *conn, Values passed in binary format require knowledge of the internal representation expected by the backend. For example, integers must be passed in network byte - order. Passing numeric values requires + order. Passing numeric values requires knowledge of the server storage format, as implemented in - src/backend/utils/adt/numeric.c::numeric_send() and - src/backend/utils/adt/numeric.c::numeric_recv(). + src/backend/utils/adt/numeric.c::numeric_send() and + src/backend/utils/adt/numeric.c::numeric_recv(). @@ -2383,14 +2545,14 @@ PGresult *PQexecParams(PGconn *conn, - The primary advantage of PQexecParams over - PQexec is that parameter values can be separated from the + The primary advantage of PQexecParams over + PQexec is that parameter values can be separated from the command string, thus avoiding the need for tedious and error-prone quoting and escaping. - Unlike PQexec, PQexecParams allows at most + Unlike PQexec, PQexecParams allows at most one SQL command in the given string. (There can be semicolons in it, but not more than one nonempty command.) This is a limitation of the underlying protocol, but has some usefulness as an extra defense against @@ -2408,8 +2570,8 @@ PGresult *PQexecParams(PGconn *conn, SELECT * FROM mytable WHERE x = $1::bigint; - This forces parameter $1 to be treated as bigint, whereas - by default it would be assigned the same type as x. Forcing the + This forces parameter $1 to be treated as bigint, whereas + by default it would be assigned the same type as x. Forcing the parameter type decision, either this way or by specifying a numeric type OID, is strongly recommended when sending parameter values in binary format, because binary format has less redundancy than text format and so there is less chance @@ -2440,40 +2602,40 @@ PGresult *PQprepare(PGconn *conn, - PQprepare creates a prepared statement for later - execution with PQexecPrepared. This feature allows + PQprepare creates a prepared statement for later + execution with PQexecPrepared. This feature allows commands to be executed repeatedly without being parsed and - planned each time; see for details. - PQprepare is supported only in protocol 3.0 and later + planned each time; see for details. + PQprepare is supported only in protocol 3.0 and later connections; it will fail when using protocol 2.0. The function creates a prepared statement named - stmtName from the query string, which - must contain a single SQL command. stmtName can be - "" to create an unnamed statement, in which case any + stmtName from the query string, which + must contain a single SQL command. stmtName can be + "" to create an unnamed statement, in which case any pre-existing unnamed statement is automatically replaced; otherwise it is an error if the statement name is already defined in the current session. If any parameters are used, they are referred - to in the query as $1, $2, etc. - nParams is the number of parameters for which types - are pre-specified in the array paramTypes[]. (The + to in the query as $1, $2, etc. + nParams is the number of parameters for which types + are pre-specified in the array paramTypes[]. (The array pointer can be NULL when - nParams is zero.) paramTypes[] + nParams is zero.) paramTypes[] specifies, by OID, the data types to be assigned to the parameter - symbols. If paramTypes is NULL, + symbols. If paramTypes is NULL, or any particular element in the array is zero, the server assigns a data type to the parameter symbol in the same way it would do for an untyped literal string. Also, the query can use parameter - symbols with numbers higher than nParams; data types + symbols with numbers higher than nParams; data types will be inferred for these symbols as well. (See PQdescribePrepared for a means to find out what data types were inferred.) - As with PQexec, the result is normally a + As with PQexec, the result is normally a PGresult object whose contents indicate server-side success or failure. A null result indicates out-of-memory or inability to send the command at all. Use @@ -2484,11 +2646,11 @@ PGresult *PQprepare(PGconn *conn, - Prepared statements for use with PQexecPrepared can also - be created by executing SQL - statements. Also, although there is no libpq + Prepared statements for use with PQexecPrepared can also + be created by executing SQL + statements. Also, although there is no libpq function for deleting a prepared statement, the SQL statement + linkend="sql-deallocate"/> statement can be used for that purpose. @@ -2518,21 +2680,21 @@ PGresult *PQexecPrepared(PGconn *conn, - PQexecPrepared is like PQexecParams, + PQexecPrepared is like PQexecParams, but the command to be executed is specified by naming a previously-prepared statement, instead of giving a query string. This feature allows commands that will be used repeatedly to be parsed and planned just once, rather than each time they are executed. The statement must have been prepared previously in - the current session. PQexecPrepared is supported + the current session. PQexecPrepared is supported only in protocol 3.0 and later connections; it will fail when using protocol 2.0. - The parameters are identical to PQexecParams, except that the + The parameters are identical to PQexecParams, except that the name of a prepared statement is given instead of a query string, and the - paramTypes[] parameter is not present (it is not needed since + paramTypes[] parameter is not present (it is not needed since the prepared statement's parameter types were determined when it was created). @@ -2556,20 +2718,20 @@ PGresult *PQdescribePrepared(PGconn *conn, const char *stmtName); - PQdescribePrepared allows an application to obtain + PQdescribePrepared allows an application to obtain information about a previously prepared statement. - PQdescribePrepared is supported only in protocol 3.0 + PQdescribePrepared is supported only in protocol 3.0 and later connections; it will fail when using protocol 2.0. - stmtName can be "" or NULL to reference + stmtName can be "" or NULL to reference the unnamed statement, otherwise it must be the name of an existing - prepared statement. On success, a PGresult with + prepared statement. On success, a PGresult with status PGRES_COMMAND_OK is returned. The functions PQnparams and PQparamtype can be applied to this - PGresult to obtain information about the parameters + PGresult to obtain information about the parameters of the prepared statement, and the functions PQnfields, PQfname, PQftype, etc provide information about the @@ -2596,23 +2758,23 @@ PGresult *PQdescribePortal(PGconn *conn, const char *portalName); - PQdescribePortal allows an application to obtain + PQdescribePortal allows an application to obtain information about a previously created portal. - (libpq does not provide any direct access to + (libpq does not provide any direct access to portals, but you can use this function to inspect the properties - of a cursor created with a DECLARE CURSOR SQL command.) - PQdescribePortal is supported only in protocol 3.0 + of a cursor created with a DECLARE CURSOR SQL command.) + PQdescribePortal is supported only in protocol 3.0 and later connections; it will fail when using protocol 2.0. - portalName can be "" or NULL to reference + portalName can be "" or NULL to reference the unnamed portal, otherwise it must be the name of an existing - portal. On success, a PGresult with status + portal. On success, a PGresult with status PGRES_COMMAND_OK is returned. The functions PQnfields, PQfname, PQftype, etc can be applied to the - PGresult to obtain information about the result + PGresult to obtain information about the result columns (if any) of the portal. @@ -2621,7 +2783,7 @@ PGresult *PQdescribePortal(PGconn *conn, const char *portalName); - The PGresultPGresult + The PGresultPGresult structure encapsulates the result returned by the server. libpq application programmers should be careful to maintain the PGresult abstraction. @@ -2674,7 +2836,7 @@ ExecStatusType PQresultStatus(const PGresult *res); Successful completion of a command returning data (such as - a SELECT or SHOW). + a SELECT or SHOW). @@ -2739,10 +2901,10 @@ ExecStatusType PQresultStatus(const PGresult *res); PGRES_SINGLE_TUPLE - The PGresult contains a single result tuple + The PGresult contains a single result tuple from the current command. This status occurs only when single-row mode has been selected for the query - (see ). + (see ). @@ -2766,7 +2928,7 @@ ExecStatusType PQresultStatus(const PGresult *res); never be returned directly by PQexec or other query execution functions; results of this kind are instead passed to the notice processor (see ). + linkend="libpq-notice-processing"/>). @@ -2782,7 +2944,7 @@ ExecStatusType PQresultStatus(const PGresult *res); Converts the enumerated type returned by - PQresultStatus into a string constant describing the + PQresultStatus into a string constant describing the status code. The caller should not free the result. @@ -2809,7 +2971,7 @@ char *PQresultErrorMessage(const PGresult *res); If there was an error, the returned string will include a trailing newline. The caller should not free the result directly. It will - be freed when the associated PGresult handle is + be freed when the associated PGresult handle is passed to PQclear. @@ -2841,7 +3003,7 @@ char *PQresultErrorMessage(const PGresult *res); Returns a reformatted version of the error message associated with - a PGresult object. + a PGresult object. char *PQresultVerboseErrorMessage(const PGresult *res, PGVerbosity verbosity, @@ -2853,17 +3015,17 @@ char *PQresultVerboseErrorMessage(const PGresult *res, by computing the message that would have been produced by PQresultErrorMessage if the specified verbosity settings had been in effect for the connection when the - given PGresult was generated. If - the PGresult is not an error result, - PGresult is not an error result is reported instead. + given PGresult was generated. If + the PGresult is not an error result, + PGresult is not an error result is reported instead. The returned string includes a trailing newline. Unlike most other functions for extracting data from - a PGresult, the result of this function is a freshly + a PGresult, the result of this function is a freshly allocated string. The caller must free it - using PQfreemem() when the string is no longer needed. + using PQfreemem() when the string is no longer needed. @@ -2873,20 +3035,20 @@ char *PQresultVerboseErrorMessage(const PGresult *res, - PQresultErrorFieldPQresultErrorField + PQresultErrorFieldPQresultErrorField Returns an individual field of an error report. char *PQresultErrorField(const PGresult *res, int fieldcode); - fieldcode is an error field identifier; see the symbols + fieldcode is an error field identifier; see the symbols listed below. NULL is returned if the PGresult is not an error or warning result, or does not include the specified field. Field values will normally not include a trailing newline. The caller should not free the result directly. It will be freed when the - associated PGresult handle is passed to + associated PGresult handle is passed to PQclear. @@ -2894,29 +3056,29 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); The following field codes are available: - PG_DIAG_SEVERITY + PG_DIAG_SEVERITY - The severity; the field contents are ERROR, - FATAL, or PANIC (in an error message), - or WARNING, NOTICE, DEBUG, - INFO, or LOG (in a notice message), or + The severity; the field contents are ERROR, + FATAL, or PANIC (in an error message), + or WARNING, NOTICE, DEBUG, + INFO, or LOG (in a notice message), or a localized translation of one of these. Always present. - PG_DIAG_SEVERITY_NONLOCALIZED + PG_DIAG_SEVERITY_NONLOCALIZED - The severity; the field contents are ERROR, - FATAL, or PANIC (in an error message), - or WARNING, NOTICE, DEBUG, - INFO, or LOG (in a notice message). - This is identical to the PG_DIAG_SEVERITY field except + The severity; the field contents are ERROR, + FATAL, or PANIC (in an error message), + or WARNING, NOTICE, DEBUG, + INFO, or LOG (in a notice message). + This is identical to the PG_DIAG_SEVERITY field except that the contents are never localized. This is present only in - reports generated by PostgreSQL versions 9.6 + reports generated by PostgreSQL versions 9.6 and later. @@ -2924,7 +3086,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_SQLSTATE + PG_DIAG_SQLSTATE error codes libpq @@ -2937,14 +3099,14 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); front-end applications to perform specific operations (such as error handling) in response to a particular database error. For a list of the possible SQLSTATE codes, see . This field is not localizable, + linkend="errcodes-appendix"/>. This field is not localizable, and is always present. - PG_DIAG_MESSAGE_PRIMARY + PG_DIAG_MESSAGE_PRIMARY The primary human-readable error message (typically one line). @@ -2954,7 +3116,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_MESSAGE_DETAIL + PG_DIAG_MESSAGE_DETAIL Detail: an optional secondary error message carrying more @@ -2964,7 +3126,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_MESSAGE_HINT + PG_DIAG_MESSAGE_HINT Hint: an optional suggestion what to do about the problem. @@ -2976,7 +3138,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_STATEMENT_POSITION + PG_DIAG_STATEMENT_POSITION A string containing a decimal integer indicating an error cursor @@ -2988,21 +3150,21 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_INTERNAL_POSITION + PG_DIAG_INTERNAL_POSITION This is defined the same as the - PG_DIAG_STATEMENT_POSITION field, but it is used + PG_DIAG_STATEMENT_POSITION field, but it is used when the cursor position refers to an internally generated command rather than the one submitted by the client. The - PG_DIAG_INTERNAL_QUERY field will always appear when + PG_DIAG_INTERNAL_QUERY field will always appear when this field appears. - PG_DIAG_INTERNAL_QUERY + PG_DIAG_INTERNAL_QUERY The text of a failed internally-generated command. This could @@ -3012,7 +3174,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_CONTEXT + PG_DIAG_CONTEXT An indication of the context in which the error occurred. @@ -3024,7 +3186,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_SCHEMA_NAME + PG_DIAG_SCHEMA_NAME If the error was associated with a specific database object, @@ -3034,7 +3196,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_TABLE_NAME + PG_DIAG_TABLE_NAME If the error was associated with a specific table, the name of the @@ -3045,7 +3207,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_COLUMN_NAME + PG_DIAG_COLUMN_NAME If the error was associated with a specific table column, the name @@ -3056,7 +3218,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_DATATYPE_NAME + PG_DIAG_DATATYPE_NAME If the error was associated with a specific data type, the name of @@ -3067,7 +3229,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_CONSTRAINT_NAME + PG_DIAG_CONSTRAINT_NAME If the error was associated with a specific constraint, the name @@ -3080,7 +3242,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_SOURCE_FILE + PG_DIAG_SOURCE_FILE The file name of the source-code location where the error was @@ -3090,7 +3252,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_SOURCE_LINE + PG_DIAG_SOURCE_LINE The line number of the source-code location where the error @@ -3100,7 +3262,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PG_DIAG_SOURCE_FUNCTION + PG_DIAG_SOURCE_FUNCTION The name of the source-code function reporting the error. @@ -3114,7 +3276,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); The fields for schema name, table name, column name, data type name, and constraint name are supplied only for a limited number of error - types; see . Do not assume that + types; see . Do not assume that the presence of any of these fields guarantees the presence of another field. Core error sources observe the interrelationships noted above, but user-defined functions may use these fields in other @@ -3147,7 +3309,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); - PQclearPQclear + PQclearPQclear Frees the storage associated with a @@ -3180,7 +3342,7 @@ void PQclear(PGresult *res); These functions are used to extract information from a PGresult object that represents a successful query result (that is, one that has status - PGRES_TUPLES_OK or PGRES_SINGLE_TUPLE). + PGRES_TUPLES_OK or PGRES_SINGLE_TUPLE). They can also be used to extract information from a successful Describe operation: a Describe's result has all the same column information that actual execution of the query @@ -3199,9 +3361,10 @@ void PQclear(PGresult *res); - Returns the number of rows (tuples) in the query result. Because - it returns an integer result, large result sets might overflow the - return value on 32-bit operating systems. + Returns the number of rows (tuples) in the query result. + (Note that PGresult objects are limited to no more + than INT_MAX rows, so an int result is + sufficient.) int PQntuples(const PGresult *res); @@ -3244,7 +3407,7 @@ int PQnfields(const PGresult *res); Returns the column name associated with the given column number. Column numbers start at 0. The caller should not free the result directly. It will be freed when the associated - PGresult handle is passed to + PGresult handle is passed to PQclear. char *PQfname(const PGresult *res, @@ -3318,7 +3481,7 @@ Oid PQftable(const PGresult *res, - InvalidOid is returned if the column number is out of range, + InvalidOid is returned if the column number is out of range, or if the specified column is not a simple reference to a table column, or when using pre-3.0 protocol. You can query the system table pg_class to determine @@ -3410,7 +3573,7 @@ Oid PQftype(const PGresult *res, You can query the system table pg_type to obtain the names and properties of the various data types. The OIDs of the built-in data types are defined - in the file src/include/catalog/pg_type.h + in the file src/include/catalog/pg_type_d.h in the source tree. @@ -3437,7 +3600,7 @@ int PQfmod(const PGresult *res, The interpretation of modifier values is type-specific; they typically indicate precision or size limits. The value -1 is - used to indicate no information available. Most data + used to indicate no information available. Most data types do not use modifiers, in which case the value is always -1. @@ -3463,7 +3626,7 @@ int PQfsize(const PGresult *res, - PQfsize returns the space allocated for this column + PQfsize returns the space allocated for this column in a database row, in other words the size of the server's internal representation of the data type. (Accordingly, it is not really very useful to clients.) A negative value indicates @@ -3482,7 +3645,7 @@ int PQfsize(const PGresult *res, - Returns 1 if the PGresult contains binary data + Returns 1 if the PGresult contains binary data and 0 if it contains text data. int PQbinaryTuples(const PGresult *res); @@ -3491,10 +3654,10 @@ int PQbinaryTuples(const PGresult *res); This function is deprecated (except for its use in connection with - COPY), because it is possible for a single - PGresult to contain text data in some columns and - binary data in others. PQfformat is preferred. - PQbinaryTuples returns 1 only if all columns of the + COPY), because it is possible for a single + PGresult to contain text data in some columns and + binary data in others. PQfformat is preferred. + PQbinaryTuples returns 1 only if all columns of the result are binary (format 1). @@ -3513,7 +3676,7 @@ int PQbinaryTuples(const PGresult *res); Returns a single field value of one row of a PGresult. Row and column numbers start at 0. The caller should not free the result directly. It will - be freed when the associated PGresult handle is + be freed when the associated PGresult handle is passed to PQclear. char *PQgetvalue(const PGresult *res, @@ -3527,7 +3690,7 @@ char *PQgetvalue(const PGresult *res, PQgetvalue is a null-terminated character string representation of the field value. For data in binary format, the value is in the binary representation determined by - the data type's typsend and typreceive + the data type's typsend and typreceive functions. (The value is actually followed by a zero byte in this case too, but that is not ordinarily useful, since the value is likely to contain embedded nulls.) @@ -3535,7 +3698,7 @@ char *PQgetvalue(const PGresult *res, An empty string is returned if the field value is null. See - PQgetisnull to distinguish null values from + PQgetisnull to distinguish null values from empty-string values. @@ -3604,8 +3767,8 @@ int PQgetlength(const PGresult *res, This is the actual data length for the particular data value, that is, the size of the object pointed to by PQgetvalue. For text data format this is - the same as strlen(). For binary format this is - essential information. Note that one should not + the same as strlen(). For binary format this is + essential information. Note that one should not rely on PQfsize to obtain the actual data length. @@ -3630,7 +3793,7 @@ int PQnparams(const PGresult *res); This function is only useful when inspecting the result of - PQdescribePrepared. For other types of queries it + PQdescribePrepared. For other types of queries it will return zero. @@ -3655,7 +3818,7 @@ Oid PQparamtype(const PGresult *res, int param_number); This function is only useful when inspecting the result of - PQdescribePrepared. For other types of queries it + PQdescribePrepared. For other types of queries it will return zero. @@ -3733,7 +3896,7 @@ char *PQcmdStatus(PGresult *res); Commonly this is just the name of the command, but it might include additional data such as the number of rows processed. The caller should not free the result directly. It will be freed when the - associated PGresult handle is passed to + associated PGresult handle is passed to PQclear. @@ -3757,17 +3920,17 @@ char *PQcmdTuples(PGresult *res); This function returns a string containing the number of rows - affected by the SQL statement that generated the - PGresult. This function can only be used following - the execution of a SELECT, CREATE TABLE AS, - INSERT, UPDATE, DELETE, - MOVE, FETCH, or COPY statement, - or an EXECUTE of a prepared query that contains an - INSERT, UPDATE, or DELETE statement. - If the command that generated the PGresult was anything - else, PQcmdTuples returns an empty string. The caller + affected by the SQL statement that generated the + PGresult. This function can only be used following + the execution of a SELECT, CREATE TABLE AS, + INSERT, UPDATE, DELETE, + MOVE, FETCH, or COPY statement, + or an EXECUTE of a prepared query that contains an + INSERT, UPDATE, or DELETE statement. + If the command that generated the PGresult was anything + else, PQcmdTuples returns an empty string. The caller should not free the return value directly. It will be freed when - the associated PGresult handle is passed to + the associated PGresult handle is passed to PQclear. @@ -3783,14 +3946,14 @@ char *PQcmdTuples(PGresult *res); - Returns the OIDOIDin libpq - of the inserted row, if the SQL command was an - INSERT that inserted exactly one row into a table that - has OIDs, or a EXECUTE of a prepared query containing - a suitable INSERT statement. Otherwise, this function + Returns the OIDOIDin libpq + of the inserted row, if the SQL command was an + INSERT that inserted exactly one row into a table that + has OIDs, or a EXECUTE of a prepared query containing + a suitable INSERT statement. Otherwise, this function returns InvalidOid. This function will also return InvalidOid if the table affected by the - INSERT statement does not contain OIDs. + INSERT statement does not contain OIDs. Oid PQoidValue(const PGresult *res); @@ -3853,19 +4016,19 @@ char *PQescapeLiteral(PGconn *conn, const char *str, size_t length); values as literal constants in SQL commands. Certain characters (such as quotes and backslashes) must be escaped to prevent them from being interpreted specially by the SQL parser. - PQescapeLiteral performs this operation. + PQescapeLiteral performs this operation. - PQescapeLiteral returns an escaped version of the + PQescapeLiteral returns an escaped version of the str parameter in memory allocated with - malloc(). This memory should be freed using - PQfreemem() when the result is no longer needed. + malloc(). This memory should be freed using + PQfreemem() when the result is no longer needed. A terminating zero byte is not required, and should not be - counted in length. (If a terminating zero byte is found - before length bytes are processed, - PQescapeLiteral stops at the zero; the behavior is - thus rather like strncpy.) The + counted in length. (If a terminating zero byte is found + before length bytes are processed, + PQescapeLiteral stops at the zero; the behavior is + thus rather like strncpy.) The return string has all special characters replaced so that they can be properly processed by the PostgreSQL string literal parser. A terminating zero byte is also added. The @@ -3874,8 +4037,8 @@ char *PQescapeLiteral(PGconn *conn, const char *str, size_t length); - On error, PQescapeLiteral returns NULL and a suitable - message is stored in the conn object. + On error, PQescapeLiteral returns NULL and a suitable + message is stored in the conn object. @@ -3883,14 +4046,14 @@ char *PQescapeLiteral(PGconn *conn, const char *str, size_t length); It is especially important to do proper escaping when handling strings that were received from an untrustworthy source. Otherwise there is a security risk: you are vulnerable to - SQL injection attacks wherein unwanted SQL commands are + SQL injection attacks wherein unwanted SQL commands are fed to your database. Note that it is not necessary nor correct to do escaping when a data - value is passed as a separate parameter in PQexecParams or + value is passed as a separate parameter in PQexecParams or its sibling routines. @@ -3921,15 +4084,15 @@ char *PQescapeIdentifier(PGconn *conn, const char *str, size_t length); - PQescapeIdentifier returns a version of the + PQescapeIdentifier returns a version of the str parameter escaped as an SQL identifier - in memory allocated with malloc(). This memory must be - freed using PQfreemem() when the result is no longer + in memory allocated with malloc(). This memory must be + freed using PQfreemem() when the result is no longer needed. A terminating zero byte is not required, and should not be - counted in length. (If a terminating zero byte is found - before length bytes are processed, - PQescapeIdentifier stops at the zero; the behavior is - thus rather like strncpy.) The + counted in length. (If a terminating zero byte is found + before length bytes are processed, + PQescapeIdentifier stops at the zero; the behavior is + thus rather like strncpy.) The return string has all special characters replaced so that it will be properly processed as an SQL identifier. A terminating zero byte is also added. The return string will also be surrounded by double @@ -3937,8 +4100,8 @@ char *PQescapeIdentifier(PGconn *conn, const char *str, size_t length); - On error, PQescapeIdentifier returns NULL and a suitable - message is stored in the conn object. + On error, PQescapeIdentifier returns NULL and a suitable + message is stored in the conn object. @@ -3969,39 +4132,39 @@ size_t PQescapeStringConn(PGconn *conn, - PQescapeStringConn escapes string literals, much like - PQescapeLiteral. Unlike PQescapeLiteral, + PQescapeStringConn escapes string literals, much like + PQescapeLiteral. Unlike PQescapeLiteral, the caller is responsible for providing an appropriately sized buffer. - Furthermore, PQescapeStringConn does not generate the - single quotes that must surround PostgreSQL string + Furthermore, PQescapeStringConn does not generate the + single quotes that must surround PostgreSQL string literals; they should be provided in the SQL command that the - result is inserted into. The parameter from points to + result is inserted into. The parameter from points to the first character of the string that is to be escaped, and the - length parameter gives the number of bytes in this + length parameter gives the number of bytes in this string. A terminating zero byte is not required, and should not be - counted in length. (If a terminating zero byte is found - before length bytes are processed, - PQescapeStringConn stops at the zero; the behavior is - thus rather like strncpy.) to shall point + counted in length. (If a terminating zero byte is found + before length bytes are processed, + PQescapeStringConn stops at the zero; the behavior is + thus rather like strncpy.) to shall point to a buffer that is able to hold at least one more byte than twice - the value of length, otherwise the behavior is undefined. - Behavior is likewise undefined if the to and - from strings overlap. + the value of length, otherwise the behavior is undefined. + Behavior is likewise undefined if the to and + from strings overlap. - If the error parameter is not NULL, then - *error is set to zero on success, nonzero on error. + If the error parameter is not NULL, then + *error is set to zero on success, nonzero on error. Presently the only possible error conditions involve invalid multibyte encoding in the source string. The output string is still generated on error, but it can be expected that the server will reject it as malformed. On error, a suitable message is stored in the - conn object, whether or not error is NULL. + conn object, whether or not error is NULL. - PQescapeStringConn returns the number of bytes written - to to, not including the terminating zero byte. + PQescapeStringConn returns the number of bytes written + to to, not including the terminating zero byte. @@ -4016,30 +4179,30 @@ size_t PQescapeStringConn(PGconn *conn, - PQescapeString is an older, deprecated version of - PQescapeStringConn. + PQescapeString is an older, deprecated version of + PQescapeStringConn. size_t PQescapeString (char *to, const char *from, size_t length); - The only difference from PQescapeStringConn is that - PQescapeString does not take PGconn - or error parameters. + The only difference from PQescapeStringConn is that + PQescapeString does not take PGconn + or error parameters. Because of this, it cannot adjust its behavior depending on the connection properties (such as character encoding) and therefore - it might give the wrong results. Also, it has no way + it might give the wrong results. Also, it has no way to report error conditions. - PQescapeString can be used safely in - client programs that work with only one PostgreSQL + PQescapeString can be used safely in + client programs that work with only one PostgreSQL connection at a time (in this case it can find out what it needs to - know behind the scenes). In other contexts it is a security + know behind the scenes). In other contexts it is a security hazard and should be avoided in favor of - PQescapeStringConn. + PQescapeStringConn. @@ -4070,7 +4233,7 @@ unsigned char *PQescapeByteaConn(PGconn *conn, bytea literal in an SQL statement. PQescapeByteaConn escapes bytes using either hex encoding or backslash escaping. See for more information. + linkend="datatype-binary"/> for more information. @@ -4085,10 +4248,10 @@ unsigned char *PQescapeByteaConn(PGconn *conn, - PQescapeByteaConn returns an escaped version of the + PQescapeByteaConn returns an escaped version of the from parameter binary string in memory - allocated with malloc(). This memory should be freed using - PQfreemem() when the result is no longer needed. The + allocated with malloc(). This memory should be freed using + PQfreemem() when the result is no longer needed. The return string has all special characters replaced so that they can be properly processed by the PostgreSQL string literal parser, and the bytea input function. A @@ -4099,7 +4262,7 @@ unsigned char *PQescapeByteaConn(PGconn *conn, On error, a null pointer is returned, and a suitable error message - is stored in the conn object. Currently, the only + is stored in the conn object. Currently, the only possible error is insufficient memory for the result string. @@ -4115,8 +4278,8 @@ unsigned char *PQescapeByteaConn(PGconn *conn, - PQescapeBytea is an older, deprecated version of - PQescapeByteaConn. + PQescapeBytea is an older, deprecated version of + PQescapeByteaConn. unsigned char *PQescapeBytea(const unsigned char *from, size_t from_length, @@ -4125,15 +4288,15 @@ unsigned char *PQescapeBytea(const unsigned char *from, - The only difference from PQescapeByteaConn is that - PQescapeBytea does not take a PGconn - parameter. Because of this, PQescapeBytea can + The only difference from PQescapeByteaConn is that + PQescapeBytea does not take a PGconn + parameter. Because of this, PQescapeBytea can only be used safely in client programs that use a single - PostgreSQL connection at a time (in this case + PostgreSQL connection at a time (in this case it can find out what it needs to know behind the - scenes). It might give the wrong results if + scenes). It might give the wrong results if used in programs that use multiple database connections (use - PQescapeByteaConn in such cases). + PQescapeByteaConn in such cases). @@ -4164,17 +4327,17 @@ unsigned char *PQunescapeBytea(const unsigned char *from, size_t *to_length); to a bytea column. PQunescapeBytea converts this string representation into its binary representation. It returns a pointer to a buffer allocated with - malloc(), or NULL on error, and puts the size of + malloc(), or NULL on error, and puts the size of the buffer in to_length. The result must be - freed using PQfreemem when it is no longer needed. + freed using PQfreemem when it is no longer needed. This conversion is not exactly the inverse of PQescapeBytea, because the string is not expected - to be escaped when received from PQgetvalue. + to be escaped when received from PQgetvalue. In particular this means there is no need for string quoting considerations, - and so no need for a PGconn parameter. + and so no need for a PGconn parameter. @@ -4268,7 +4431,7 @@ unsigned char *PQunescapeBytea(const unsigned char *from, size_t *to_length); Submits a command to the server without waiting for the result(s). 1 is returned if the command was successfully dispatched and 0 if - not (in which case, use PQerrorMessage to get more + not (in which case, use PQerrorMessage to get more information about the failure). int PQsendQuery(PGconn *conn, const char *command); @@ -4318,7 +4481,7 @@ int PQsendQueryParams(PGconn *conn, - PQsendPrepare + PQsendPrepare PQsendPrepare @@ -4336,7 +4499,7 @@ int PQsendPrepare(PGconn *conn, const Oid *paramTypes); - This is an asynchronous version of PQprepare: it + This is an asynchronous version of PQprepare: it returns 1 if it was able to dispatch the request, and 0 if not. After a successful call, call PQgetResult to determine whether the server successfully created the prepared @@ -4383,7 +4546,7 @@ int PQsendQueryPrepared(PGconn *conn, - PQsendDescribePrepared + PQsendDescribePrepared PQsendDescribePrepared @@ -4397,7 +4560,7 @@ int PQsendQueryPrepared(PGconn *conn, int PQsendDescribePrepared(PGconn *conn, const char *stmtName); - This is an asynchronous version of PQdescribePrepared: + This is an asynchronous version of PQdescribePrepared: it returns 1 if it was able to dispatch the request, and 0 if not. After a successful call, call PQgetResult to obtain the results. The function's parameters are handled @@ -4410,7 +4573,7 @@ int PQsendDescribePrepared(PGconn *conn, const char *stmtName); - PQsendDescribePortal + PQsendDescribePortal PQsendDescribePortal @@ -4424,7 +4587,7 @@ int PQsendDescribePrepared(PGconn *conn, const char *stmtName); int PQsendDescribePortal(PGconn *conn, const char *portalName); - This is an asynchronous version of PQdescribePortal: + This is an asynchronous version of PQdescribePortal: it returns 1 if it was able to dispatch the request, and 0 if not. After a successful call, call PQgetResult to obtain the results. The function's parameters are handled @@ -4467,7 +4630,7 @@ PGresult *PQgetResult(PGconn *conn); PQgetResult will just return a null pointer at once.) Each non-null result from PQgetResult should be processed using the - same PGresult accessor functions previously + same PGresult accessor functions previously described. Don't forget to free each result object with PQclear when done with it. Note that PQgetResult will block only if a command is @@ -4479,7 +4642,7 @@ PGresult *PQgetResult(PGconn *conn); Even when PQresultStatus indicates a fatal error, PQgetResult should be called until it - returns a null pointer, to allow libpq to + returns a null pointer, to allow libpq to process the error information completely. @@ -4503,7 +4666,7 @@ PGresult *PQgetResult(PGconn *conn); Another frequently-desired feature that can be obtained with PQsendQuery and PQgetResult is retrieving large query results a row at a time. This is discussed - in . + in . @@ -4584,7 +4747,7 @@ int PQisBusy(PGconn *conn); A typical application using these functions will have a main loop that - uses select() or poll() to wait for + uses select() or poll() to wait for all the conditions that it must respond to. One of the conditions will be input available from the server, which in terms of select() means readable data on the file @@ -4594,15 +4757,15 @@ int PQisBusy(PGconn *conn); call PQisBusy, followed by PQgetResult if PQisBusy returns false (0). It can also call PQnotifies - to detect NOTIFY messages (see ). + to detect NOTIFY messages (see ). A client that uses PQsendQuery/PQgetResult can also attempt to cancel a command that is still being processed - by the server; see . But regardless of + by the server; see . But regardless of the return value of PQcancel, the application must continue with the normal result-reading sequence using PQgetResult. A successful cancellation will @@ -4732,12 +4895,12 @@ int PQflush(PGconn *conn); - Ordinarily, libpq collects a SQL command's + Ordinarily, libpq collects a SQL command's entire result and returns it to the application as a single PGresult. This can be unworkable for commands that return a large number of rows. For such cases, applications can use PQsendQuery and PQgetResult in - single-row mode. In this mode, the result row(s) are + single-row mode. In this mode, the result row(s) are returned to the application one at a time, as they are received from the server. @@ -4748,7 +4911,7 @@ int PQflush(PGconn *conn); (or a sibling function). This mode selection is effective only for the currently executing query. Then call PQgetResult repeatedly, until it returns null, as documented in . If the query returns any rows, they are returned + linkend="libpq-async"/>. If the query returns any rows, they are returned as individual PGresult objects, which look like normal query results except for having status code PGRES_SINGLE_TUPLE instead of @@ -4802,7 +4965,7 @@ int PQsetSingleRowMode(PGconn *conn); While processing a query, the server may return some rows and then encounter an error, causing the query to be aborted. Ordinarily, - libpq discards any such rows and reports only the + libpq discards any such rows and reports only the error. But in single-row mode, those rows will have already been returned to the application. Hence, the application will see some PGRES_SINGLE_TUPLE PGresult @@ -4848,10 +5011,10 @@ PGcancel *PQgetCancel(PGconn *conn); PQgetCancel creates a - PGcancelPGcancel object - given a PGconn connection object. It will return - NULL if the given conn is NULL or an invalid - connection. The PGcancel object is an opaque + PGcancelPGcancel object + given a PGconn connection object. It will return + NULL if the given conn is NULL or an invalid + connection. The PGcancel object is an opaque structure that is not meant to be accessed directly by the application; it can only be passed to PQcancel or PQfreeCancel. @@ -4900,9 +5063,9 @@ int PQcancel(PGcancel *cancel, char *errbuf, int errbufsize); The return value is 1 if the cancel request was successfully - dispatched and 0 if not. If not, errbuf is filled - with an explanatory error message. errbuf - must be a char array of size errbufsize (the + dispatched and 0 if not. If not, errbuf is filled + with an explanatory error message. errbuf + must be a char array of size errbufsize (the recommended size is 256 bytes). @@ -4917,11 +5080,11 @@ int PQcancel(PGcancel *cancel, char *errbuf, int errbufsize); PQcancel can safely be invoked from a signal - handler, if the errbuf is a local variable in the - signal handler. The PGcancel object is read-only + handler, if the errbuf is a local variable in the + signal handler. The PGcancel object is read-only as far as PQcancel is concerned, so it can also be invoked from a thread that is separate from the one - manipulating the PGconn object. + manipulating the PGconn object. @@ -4948,12 +5111,12 @@ int PQrequestCancel(PGconn *conn); Requests that the server abandon processing of the current command. It operates directly on the - PGconn object, and in case of failure stores the - error message in the PGconn object (whence it can + PGconn object, and in case of failure stores the + error message in the PGconn object (whence it can be retrieved by PQerrorMessage). Although the functionality is the same, this approach creates hazards for multiple-thread programs and signal handlers, since it is possible - that overwriting the PGconn's error message will + that overwriting the PGconn's error message will mess up the operation currently in progress on the connection. @@ -4986,7 +5149,7 @@ int PQrequestCancel(PGconn *conn); - The function PQfnPQfn + The function PQfnPQfn requests execution of a server function via the fast-path interface: PGresult *PQfn(PGconn *conn, @@ -5011,19 +5174,19 @@ typedef struct - The fnid argument is the OID of the function to be - executed. args and nargs define the + The fnid argument is the OID of the function to be + executed. args and nargs define the parameters to be passed to the function; they must match the declared - function argument list. When the isint field of a - parameter structure is true, the u.integer value is sent + function argument list. When the isint field of a + parameter structure is true, the u.integer value is sent to the server as an integer of the indicated length (this must be - 2 or 4 bytes); proper byte-swapping occurs. When isint - is false, the indicated number of bytes at *u.ptr are + 2 or 4 bytes); proper byte-swapping occurs. When isint + is false, the indicated number of bytes at *u.ptr are sent with no processing; the data must be in the format expected by the server for binary transmission of the function's argument data - type. (The declaration of u.ptr as being of - type int * is historical; it would be better to consider - it void *.) + type. (The declaration of u.ptr as being of + type int * is historical; it would be better to consider + it void *.) result_buf points to the buffer in which to place the function's return value. The caller must have allocated sufficient space to store the return value. (There is no check!) The actual result @@ -5031,14 +5194,14 @@ typedef struct result_len. If a 2- or 4-byte integer result is expected, set result_is_int to 1, otherwise set it to 0. Setting result_is_int to 1 causes - libpq to byte-swap the value if necessary, so that it + libpq to byte-swap the value if necessary, so that it is delivered as a proper int value for the client machine; - note that a 4-byte integer is delivered into *result_buf + note that a 4-byte integer is delivered into *result_buf for either allowed result size. - When result_is_int is 0, the binary-format byte string + When result_is_int is 0, the binary-format byte string sent by the server is returned unmodified. (In this case it's better to consider result_buf as being of - type void *.) + type void *.) @@ -5072,7 +5235,7 @@ typedef struct can stop listening with the UNLISTEN command). All sessions listening on a particular channel will be notified asynchronously when a NOTIFY command with that - channel name is executed by any session. A payload string can + channel name is executed by any session. A payload string can be passed to communicate additional data to the listeners. @@ -5082,14 +5245,14 @@ typedef struct and NOTIFY commands as ordinary SQL commands. The arrival of NOTIFY messages can subsequently be detected by calling - PQnotifies.PQnotifies + PQnotifies.PQnotifies The function PQnotifies returns the next notification from a list of unhandled notification messages received from the server. It returns a null pointer if there are no pending notifications. Once a - notification is returned from PQnotifies, it is considered + notification is returned from PQnotifies, it is considered handled and will be removed from the list of notifications. @@ -5114,23 +5277,23 @@ typedef struct pgNotify - gives a sample program that illustrates + gives a sample program that illustrates the use of asynchronous notification. PQnotifies does not actually read data from the server; it just returns messages previously absorbed by another - libpq function. In prior releases of + libpq function. In ancient releases of libpq, the only way to ensure timely receipt - of NOTIFY messages was to constantly submit commands, even + of NOTIFY messages was to constantly submit commands, even empty ones, and then check PQnotifies after each PQexec. While this still works, it is deprecated as a waste of processing power. - A better way to check for NOTIFY messages when you have no + A better way to check for NOTIFY messages when you have no useful commands to execute is to call PQconsumeInput, then check PQnotifies. You can use @@ -5168,12 +5331,12 @@ typedef struct pgNotify The overall process is that the application first issues the SQL COPY command via PQexec or one of the equivalent functions. The response to this (if there is no - error in the command) will be a PGresult object bearing + error in the command) will be a PGresult object bearing a status code of PGRES_COPY_OUT or PGRES_COPY_IN (depending on the specified copy direction). The application should then use the functions of this section to receive or transmit data rows. When the data transfer is - complete, another PGresult object is returned to indicate + complete, another PGresult object is returned to indicate success or failure of the transfer. Its status will be PGRES_COMMAND_OK for success or PGRES_FATAL_ERROR if some problem was encountered. @@ -5187,8 +5350,8 @@ typedef struct pgNotify If a COPY command is issued via PQexec in a string that could contain additional commands, the application must continue fetching results via - PQgetResult after completing the COPY - sequence. Only when PQgetResult returns + PQgetResult after completing the COPY + sequence. Only when PQgetResult returns NULL is it certain that the PQexec command string is done and it is safe to issue more commands. @@ -5201,7 +5364,7 @@ typedef struct pgNotify - A PGresult object bearing one of these status values + A PGresult object bearing one of these status values carries some additional data about the COPY operation that is starting. This additional data is available using functions that are also used in connection with query results: @@ -5237,7 +5400,7 @@ typedef struct pgNotify 0 indicates the overall copy format is textual (rows separated by newlines, columns separated by separator characters, etc). 1 indicates the overall copy format is binary. See for more information. + linkend="sql-copy"/> for more information. @@ -5257,7 +5420,7 @@ typedef struct pgNotify each column of the copy operation. The per-column format codes will always be zero when the overall copy format is textual, but the binary format can support both text and binary columns. - (However, as of the current implementation of COPY, + (However, as of the current implementation of COPY, only binary columns appear in a binary copy; so the per-column formats always match the overall format at present.) @@ -5278,8 +5441,8 @@ typedef struct pgNotify These functions are used to send data during COPY FROM - STDIN. They will fail if called when the connection is not in - COPY_IN state. + STDIN. They will fail if called when the connection is not in + COPY_IN state. @@ -5293,7 +5456,7 @@ typedef struct pgNotify - Sends data to the server during COPY_IN state. + Sends data to the server during COPY_IN state. int PQputCopyData(PGconn *conn, const char *buffer, @@ -5303,7 +5466,7 @@ int PQputCopyData(PGconn *conn, Transmits the COPY data in the specified - buffer, of length nbytes, to the server. + buffer, of length nbytes, to the server. The result is 1 if the data was queued, zero if it was not queued because of full buffers (this will only happen in nonblocking mode), or -1 if an error occurred. @@ -5317,7 +5480,7 @@ int PQputCopyData(PGconn *conn, into buffer loads of any convenient size. Buffer-load boundaries have no semantic significance when sending. The contents of the data stream must match the data format expected by the - COPY command; see for details. + COPY command; see for details. @@ -5332,7 +5495,7 @@ int PQputCopyData(PGconn *conn, - Sends end-of-data indication to the server during COPY_IN state. + Sends end-of-data indication to the server during COPY_IN state. int PQputCopyEnd(PGconn *conn, const char *errormsg); @@ -5340,14 +5503,14 @@ int PQputCopyEnd(PGconn *conn, - Ends the COPY_IN operation successfully if - errormsg is NULL. If - errormsg is not NULL then the - COPY is forced to fail, with the string pointed to by - errormsg used as the error message. (One should not + Ends the COPY_IN operation successfully if + errormsg is NULL. If + errormsg is not NULL then the + COPY is forced to fail, with the string pointed to by + errormsg used as the error message. (One should not assume that this exact error message will come back from the server, however, as the server might have already failed the - COPY for its own reasons. Also note that the option + COPY for its own reasons. Also note that the option to force failure does not work when using pre-3.0-protocol connections.) @@ -5357,19 +5520,19 @@ int PQputCopyEnd(PGconn *conn, nonblocking mode, this may only indicate that the termination message was successfully queued. (In nonblocking mode, to be certain that the data has been sent, you should next wait for - write-ready and call PQflush, repeating until it + write-ready and call PQflush, repeating until it returns zero.) Zero indicates that the function could not queue the termination message because of full buffers; this will only happen in nonblocking mode. (In this case, wait for - write-ready and try the PQputCopyEnd call + write-ready and try the PQputCopyEnd call again.) If a hard error occurs, -1 is returned; you can use PQerrorMessage to retrieve details. - After successfully calling PQputCopyEnd, call - PQgetResult to obtain the final result status of the - COPY command. One can wait for this result to be + After successfully calling PQputCopyEnd, call + PQgetResult to obtain the final result status of the + COPY command. One can wait for this result to be available in the usual way. Then return to normal operation. @@ -5383,8 +5546,8 @@ int PQputCopyEnd(PGconn *conn, These functions are used to receive data during COPY TO - STDOUT. They will fail if called when the connection is not in - COPY_OUT state. + STDOUT. They will fail if called when the connection is not in + COPY_OUT state. @@ -5398,7 +5561,7 @@ int PQputCopyEnd(PGconn *conn, - Receives data from the server during COPY_OUT state. + Receives data from the server during COPY_OUT state. int PQgetCopyData(PGconn *conn, char **buffer, @@ -5411,11 +5574,11 @@ int PQgetCopyData(PGconn *conn, COPY. Data is always returned one data row at a time; if only a partial row is available, it is not returned. Successful return of a data row involves allocating a chunk of - memory to hold the data. The buffer parameter must - be non-NULL. *buffer is set to + memory to hold the data. The buffer parameter must + be non-NULL. *buffer is set to point to the allocated memory, or to NULL in cases where no buffer is returned. A non-NULL result - buffer should be freed using PQfreemem when no longer + buffer should be freed using PQfreemem when no longer needed. @@ -5426,26 +5589,26 @@ int PQgetCopyData(PGconn *conn, probably only useful for textual COPY. A result of zero indicates that the COPY is still in progress, but no row is yet available (this is only possible when - async is true). A result of -1 indicates that the + async is true). A result of -1 indicates that the COPY is done. A result of -2 indicates that an - error occurred (consult PQerrorMessage for the reason). + error occurred (consult PQerrorMessage for the reason). - When async is true (not zero), - PQgetCopyData will not block waiting for input; it + When async is true (not zero), + PQgetCopyData will not block waiting for input; it will return zero if the COPY is still in progress but no complete row is available. (In this case wait for read-ready - and then call PQconsumeInput before calling - PQgetCopyData again.) When async is - false (zero), PQgetCopyData will block until data is + and then call PQconsumeInput before calling + PQgetCopyData again.) When async is + false (zero), PQgetCopyData will block until data is available or the operation completes. - After PQgetCopyData returns -1, call - PQgetResult to obtain the final result status of the - COPY command. One can wait for this result to be + After PQgetCopyData returns -1, call + PQgetResult to obtain the final result status of the + COPY command. One can wait for this result to be available in the usual way. Then return to normal operation. @@ -5458,7 +5621,7 @@ int PQgetCopyData(PGconn *conn, Obsolete Functions for <command>COPY</command> - These functions represent older methods of handling COPY. + These functions represent older methods of handling COPY. Although they still work, they are deprecated due to poor error handling, inconvenient methods of detecting end-of-data, and lack of support for binary or nonblocking transfers. @@ -5476,7 +5639,7 @@ int PQgetCopyData(PGconn *conn, Reads a newline-terminated line of characters (transmitted - by the server) into a buffer string of size length. + by the server) into a buffer string of size length. int PQgetline(PGconn *conn, char *buffer, @@ -5485,7 +5648,7 @@ int PQgetline(PGconn *conn, - This function copies up to length-1 characters into + This function copies up to length-1 characters into the buffer and converts the terminating newline into a zero byte. PQgetline returns EOF at the end of input, 0 if the entire line has been read, and 1 if the @@ -5496,7 +5659,7 @@ int PQgetline(PGconn *conn, of the two characters \., which indicates that the server has finished sending the results of the COPY command. If the application might receive - lines that are more than length-1 characters long, + lines that are more than length-1 characters long, care is needed to be sure it recognizes the \. line correctly (and does not, for example, mistake the end of a long data line for a terminator line). @@ -5540,7 +5703,7 @@ int PQgetlineAsync(PGconn *conn, On each call, PQgetlineAsync will return data if a - complete data row is available in libpq's input buffer. + complete data row is available in libpq's input buffer. Otherwise, no data is returned until the rest of the row arrives. The function returns -1 if the end-of-copy-data marker has been recognized, or 0 if no data is available, or a positive number giving the number of @@ -5554,7 +5717,7 @@ int PQgetlineAsync(PGconn *conn, the caller is too small to hold a row sent by the server, then a partial data row will be returned. With textual data this can be detected by testing whether the last returned byte is \n or not. (In a binary - COPY, actual parsing of the COPY data format will be needed to make the + COPY, actual parsing of the COPY data format will be needed to make the equivalent determination.) The returned string is not null-terminated. (If you want to add a terminating null, be sure to pass a bufsize one smaller @@ -5595,7 +5758,7 @@ int PQputline(PGconn *conn, Before PostgreSQL protocol 3.0, it was necessary for the application to explicitly send the two characters \. as a final line to indicate to the server that it had - finished sending COPY data. While this still works, it is deprecated and the + finished sending COPY data. While this still works, it is deprecated and the special meaning of \. can be expected to be removed in a future release. It is sufficient to call PQendcopy after having sent the actual data. @@ -5691,7 +5854,7 @@ int PQendcopy(PGconn *conn); Control Functions - These functions control miscellaneous details of libpq's + These functions control miscellaneous details of libpq's behavior. @@ -5742,7 +5905,7 @@ int PQsetClientEncoding(PGconn *conn, const char *encoding is the encoding you want to use. If the function successfully sets the encoding, it returns 0, otherwise -1. The current encoding for this connection can be - determined by using PQclientEncoding. + determined by using PQclientEncoding. @@ -5758,7 +5921,7 @@ int PQsetClientEncoding(PGconn *conn, const char * Determines the verbosity of messages returned by - PQerrorMessage and PQresultErrorMessage. + PQerrorMessage and PQresultErrorMessage. typedef enum { @@ -5770,15 +5933,15 @@ typedef enum PGVerbosity PQsetErrorVerbosity(PGconn *conn, PGVerbosity verbosity); - PQsetErrorVerbosity sets the verbosity mode, returning - the connection's previous setting. In TERSE mode, + PQsetErrorVerbosity sets the verbosity mode, returning + the connection's previous setting. In TERSE mode, returned messages include severity, primary text, and position only; this will normally fit on a single line. The default mode produces messages that include the above plus any detail, hint, or context - fields (these might span multiple lines). The VERBOSE + fields (these might span multiple lines). The VERBOSE mode includes all available fields. Changing the verbosity does not affect the messages available from already-existing - PGresult objects, only subsequently-created ones. + PGresult objects, only subsequently-created ones. (But see PQresultVerboseErrorMessage if you want to print a previous error with a different verbosity.) @@ -5795,9 +5958,9 @@ PGVerbosity PQsetErrorVerbosity(PGconn *conn, PGVerbosity verbosity); - Determines the handling of CONTEXT fields in messages - returned by PQerrorMessage - and PQresultErrorMessage. + Determines the handling of CONTEXT fields in messages + returned by PQerrorMessage + and PQresultErrorMessage. typedef enum { @@ -5809,17 +5972,17 @@ typedef enum PGContextVisibility PQsetErrorContextVisibility(PGconn *conn, PGContextVisibility show_context); - PQsetErrorContextVisibility sets the context display mode, + PQsetErrorContextVisibility sets the context display mode, returning the connection's previous setting. This mode controls whether the CONTEXT field is included in messages - (unless the verbosity setting is TERSE, in which - case CONTEXT is never shown). The NEVER mode - never includes CONTEXT, while ALWAYS always - includes it if available. In ERRORS mode (the - default), CONTEXT fields are included only for error + (unless the verbosity setting is TERSE, in which + case CONTEXT is never shown). The NEVER mode + never includes CONTEXT, while ALWAYS always + includes it if available. In ERRORS mode (the + default), CONTEXT fields are included only for error messages, not for notices and warnings. Changing this mode does not affect the messages available from - already-existing PGresult objects, only + already-existing PGresult objects, only subsequently-created ones. (But see PQresultVerboseErrorMessage if you want to print a previous error with a different display mode.) @@ -5845,9 +6008,9 @@ void PQtrace(PGconn *conn, FILE *stream); - On Windows, if the libpq library and an application are + On Windows, if the libpq library and an application are compiled with different flags, this function call will crash the - application because the internal representation of the FILE + application because the internal representation of the FILE pointers differ. Specifically, multithreaded/single-threaded, release/debug, and static/dynamic flags should be the same for the library and all applications using that library. @@ -5896,25 +6059,25 @@ void PQuntrace(PGconn *conn); - Frees memory allocated by libpq. + Frees memory allocated by libpq. void PQfreemem(void *ptr); - Frees memory allocated by libpq, particularly + Frees memory allocated by libpq, particularly PQescapeByteaConn, PQescapeBytea, PQunescapeBytea, and PQnotifies. It is particularly important that this function, rather than - free(), be used on Microsoft Windows. This is because + free(), be used on Microsoft Windows. This is because allocating memory in a DLL and releasing it in the application works only if multithreaded/single-threaded, release/debug, and static/dynamic flags are the same for the DLL and the application. On non-Microsoft Windows platforms, this function is the same as the standard library - function free(). + function free(). @@ -5930,7 +6093,7 @@ void PQfreemem(void *ptr); Frees the data structures allocated by - PQconndefaults or PQconninfoParse. + PQconndefaults or PQconninfoParse. void PQconninfoFree(PQconninfoOption *connOptions); @@ -5953,44 +6116,44 @@ void PQconninfoFree(PQconninfoOption *connOptions); - Prepares the encrypted form of a PostgreSQL password. + Prepares the encrypted form of a PostgreSQL password. char *PQencryptPasswordConn(PGconn *conn, const char *passwd, const char *user, const char *algorithm); This function is intended to be used by client applications that wish to send commands like ALTER USER joe PASSWORD - 'pwd'. It is good practice not to send the original cleartext + 'pwd'. It is good practice not to send the original cleartext password in such a command, because it might be exposed in command logs, activity displays, and so on. Instead, use this function to convert the password to encrypted form before it is sent. - The passwd and user arguments + The passwd and user arguments are the cleartext password, and the SQL name of the user it is for. - algorithm specifies the encryption algorithm + algorithm specifies the encryption algorithm to use to encrypt the password. Currently supported algorithms are - md5 and scram-sha-256 (on and - off are also accepted as aliases for md5, for + md5 and scram-sha-256 (on and + off are also accepted as aliases for md5, for compatibility with older server versions). Note that support for - scram-sha-256 was introduced in PostgreSQL + scram-sha-256 was introduced in PostgreSQL version 10, and will not work correctly with older server versions. If - algorithm is NULL, this function will query + algorithm is NULL, this function will query the server for the current value of the - setting. That can block, and + setting. That can block, and will fail if the current transaction is aborted, or if the connection is busy executing another query. If you wish to use the default algorithm for the server but want to avoid blocking, query - password_encryption yourself before calling - PQencryptPasswordConn, and pass that value as the - algorithm. + password_encryption yourself before calling + PQencryptPasswordConn, and pass that value as the + algorithm. - The return value is a string allocated by malloc. + The return value is a string allocated by malloc. The caller can assume the string doesn't contain any special characters - that would require escaping. Use PQfreemem to free the - result when done with it. On error, returns NULL, and + that would require escaping. Use PQfreemem to free the + result when done with it. On error, returns NULL, and a suitable message is stored in the connection object. @@ -6007,14 +6170,14 @@ char *PQencryptPasswordConn(PGconn *conn, const char *passwd, const char *user, - Prepares the md5-encrypted form of a PostgreSQL password. + Prepares the md5-encrypted form of a PostgreSQL password. char *PQencryptPassword(const char *passwd, const char *user); - PQencryptPassword is an older, deprecated version of - PQencryptPasswodConn. The difference is that - PQencryptPassword does not - require a connection object, and md5 is always used as the + PQencryptPassword is an older, deprecated version of + PQencryptPasswodConn. The difference is that + PQencryptPassword does not + require a connection object, and md5 is always used as the encryption algorithm. @@ -6037,18 +6200,18 @@ PGresult *PQmakeEmptyPGresult(PGconn *conn, ExecStatusType status); - This is libpq's internal function to allocate and + This is libpq's internal function to allocate and initialize an empty PGresult object. This - function returns NULL if memory could not be allocated. It is + function returns NULL if memory could not be allocated. It is exported because some applications find it useful to generate result objects (particularly objects with error status) themselves. If - conn is not null and status + conn is not null and status indicates an error, the current error message of the specified connection is copied into the PGresult. Also, if conn is not null, any event procedures registered in the connection are copied into the PGresult. (They do not get - PGEVT_RESULTCREATE calls, but see + PGEVT_RESULTCREATE calls, but see PQfireResultCreateEvents.) Note that PQclear should eventually be called on the object, just as with a PGresult @@ -6067,7 +6230,7 @@ PGresult *PQmakeEmptyPGresult(PGconn *conn, ExecStatusType status); Fires a PGEVT_RESULTCREATE event (see ) for each event procedure registered in the + linkend="libpq-events"/>) for each event procedure registered in the PGresult object. Returns non-zero for success, zero if any event procedure fails. @@ -6077,14 +6240,14 @@ int PQfireResultCreateEvents(PGconn *conn, PGresult *res); - The conn argument is passed through to event procedures - but not used directly. It can be NULL if the event + The conn argument is passed through to event procedures + but not used directly. It can be NULL if the event procedures won't use it. Event procedures that have already received a - PGEVT_RESULTCREATE or PGEVT_RESULTCOPY event + PGEVT_RESULTCREATE or PGEVT_RESULTCOPY event for this object are not fired again. @@ -6110,7 +6273,7 @@ int PQfireResultCreateEvents(PGconn *conn, PGresult *res); Makes a copy of a PGresult object. The copy is not linked to the source result in any way and PQclear must be called when the copy is no longer - needed. If the function fails, NULL is returned. + needed. If the function fails, NULL is returned. PGresult *PQcopyResult(const PGresult *src, int flags); @@ -6154,7 +6317,7 @@ int PQsetResultAttrs(PGresult *res, int numAttributes, PGresAttDesc *attDescs); The provided attDescs are copied into the result. - If the attDescs pointer is NULL or + If the attDescs pointer is NULL or numAttributes is less than one, the request is ignored and the function succeeds. If res already contains attributes, the function will fail. If the function @@ -6188,7 +6351,7 @@ int PQsetvalue(PGresult *res, int tup_num, int field_num, char *value, int len); field of any existing tuple can be modified in any order. If a value at field_num already exists, it will be overwritten. If len is -1 or - value is NULL, the field value + value is NULL, the field value will be set to an SQL null value. The value is copied into the result's private storage, thus is no longer needed after the function @@ -6217,9 +6380,35 @@ void *PQresultAlloc(PGresult *res, size_t nBytes); Any memory allocated with this function will be freed when res is cleared. If the function fails, - the return value is NULL. The result is + the return value is NULL. The result is guaranteed to be adequately aligned for any type of data, - just as for malloc. + just as for malloc. + + + + + + + PQresultMemorySize + + PQresultMemorySize + + + + + + Retrieves the number of bytes allocated for + a PGresult object. + +size_t PQresultMemorySize(const PGresult *res); + + + + + This value is the sum of all malloc requests + associated with the PGresult object, that is, + all the space that will be freed by PQclear. + This information can be useful for managing memory consumption. @@ -6235,7 +6424,7 @@ void *PQresultAlloc(PGresult *res, size_t nBytes); - Return the version of libpq that is being used. + Return the version of libpq that is being used. int PQlibVersion(void); @@ -6246,7 +6435,7 @@ int PQlibVersion(void); run time, whether specific functionality is available in the currently loaded version of libpq. The function can be used, for example, to determine which connection options are available in - PQconnectdb. + PQconnectdb. @@ -6257,17 +6446,17 @@ int PQlibVersion(void); - Prior to major version 10, PostgreSQL used + Prior to major version 10, PostgreSQL used three-part version numbers in which the first two parts together represented the major version. For those - versions, PQlibVersion uses two digits for each + versions, PQlibVersion uses two digits for each part; for example version 9.1.5 will be returned as 90105, and version 9.2.0 will be returned as 90200. Therefore, for purposes of determining feature compatibility, - applications should divide the result of PQlibVersion + applications should divide the result of PQlibVersion by 100 not 10000 to determine a logical major version number. In all release series, only the last two digits differ between minor releases (bug-fix releases). @@ -6275,7 +6464,7 @@ int PQlibVersion(void); - This function appeared in PostgreSQL version 9.1, so + This function appeared in PostgreSQL version 9.1, so it cannot be used to detect required functionality in earlier versions, since calling it will create a link dependency on version 9.1 or later. @@ -6317,12 +6506,12 @@ int PQlibVersion(void); The function PQsetNoticeReceiver - notice receiver - PQsetNoticeReceiver sets or + notice receiver + PQsetNoticeReceiver sets or examines the current notice receiver for a connection object. Similarly, PQsetNoticeProcessor - notice processor - PQsetNoticeProcessor sets or + notice processor + PQsetNoticeProcessor sets or examines the current notice processor. @@ -6353,9 +6542,9 @@ PQsetNoticeProcessor(PGconn *conn, receiver function is called. It is passed the message in the form of a PGRES_NONFATAL_ERROR PGresult. (This allows the receiver to extract - individual fields using PQresultErrorField, or obtain a - complete preformatted message using PQresultErrorMessage - or PQresultVerboseErrorMessage.) The same + individual fields using PQresultErrorField, or obtain a + complete preformatted message using PQresultErrorMessage + or PQresultVerboseErrorMessage.) The same void pointer passed to PQsetNoticeReceiver is also passed. (This pointer can be used to access application-specific state if needed.) @@ -6363,7 +6552,7 @@ PQsetNoticeProcessor(PGconn *conn, The default notice receiver simply extracts the message (using - PQresultErrorMessage) and passes it to the notice + PQresultErrorMessage) and passes it to the notice processor. @@ -6389,10 +6578,10 @@ defaultNoticeProcessor(void *arg, const char *message) Once you have set a notice receiver or processor, you should expect that that function could be called as long as either the - PGconn object or PGresult objects made - from it exist. At creation of a PGresult, the - PGconn's current notice handling pointers are copied - into the PGresult for possible use by functions like + PGconn object or PGresult objects made + from it exist. At creation of a PGresult, the + PGconn's current notice handling pointers are copied + into the PGresult for possible use by functions like PQgetvalue. @@ -6414,21 +6603,21 @@ defaultNoticeProcessor(void *arg, const char *message) Each registered event handler is associated with two pieces of data, - known to libpq only as opaque void * - pointers. There is a passthrough pointer that is provided + known to libpq only as opaque void * + pointers. There is a passthrough pointer that is provided by the application when the event handler is registered with a - PGconn. The passthrough pointer never changes for the - life of the PGconn and all PGresults + PGconn. The passthrough pointer never changes for the + life of the PGconn and all PGresults generated from it; so if used, it must point to long-lived data. - In addition there is an instance data pointer, which starts - out NULL in every PGconn and PGresult. + In addition there is an instance data pointer, which starts + out NULL in every PGconn and PGresult. This pointer can be manipulated using the PQinstanceData, PQsetInstanceData, PQresultInstanceData and PQsetResultInstanceData functions. Note that - unlike the passthrough pointer, instance data of a PGconn - is not automatically inherited by PGresults created from + unlike the passthrough pointer, instance data of a PGconn + is not automatically inherited by PGresults created from it. libpq does not know what passthrough and instance data pointers point to (if anything) and will never attempt to free them — that is the responsibility of the event handler. @@ -6438,7 +6627,7 @@ defaultNoticeProcessor(void *arg, const char *message) Event Types - The enum PGEventId names the types of events handled by + The enum PGEventId names the types of events handled by the event system. All its values have names beginning with PGEVT. For each event type, there is a corresponding event info structure that carries the parameters passed to the event @@ -6502,8 +6691,8 @@ typedef struct PGconn was just reset, all event data remains unchanged. This event should be used to reset/reload/requery any associated instanceData. Note that even if the - event procedure fails to process PGEVT_CONNRESET, it will - still receive a PGEVT_CONNDESTROY event when the connection + event procedure fails to process PGEVT_CONNRESET, it will + still receive a PGEVT_CONNDESTROY event when the connection is closed. @@ -6563,7 +6752,7 @@ typedef struct instanceData that needs to be associated with the result. If the event procedure fails, the result will be cleared and the failure will be propagated. The event procedure must not try to - PQclear the result object for itself. When returning a + PQclear the result object for itself. When returning a failure code, all cleanup must be performed as no PGEVT_RESULTDESTROY event will be sent. @@ -6670,7 +6859,7 @@ int eventproc(PGEventId evtId, void *evtInfo, void *passThrough) A particular event procedure can be registered only once in any - PGconn. This is because the address of the procedure + PGconn. This is because the address of the procedure is used as a lookup key to identify the associated instance data. @@ -6679,9 +6868,9 @@ int eventproc(PGEventId evtId, void *evtInfo, void *passThrough) On Windows, functions can have two different addresses: one visible from outside a DLL and another visible from inside the DLL. One should be careful that only one of these addresses is used with - libpq's event-procedure functions, else confusion will + libpq's event-procedure functions, else confusion will result. The simplest rule for writing code that will work is to - ensure that event procedures are declared static. If the + ensure that event procedures are declared static. If the procedure's address must be available outside its own source file, expose a separate function to return the address. @@ -6715,7 +6904,7 @@ int PQregisterEventProc(PGconn *conn, PGEventProc proc, An event procedure must be registered once on each - PGconn you want to receive events about. There is no + PGconn you want to receive events about. There is no limit, other than memory, on the number of event procedures that can be registered with a connection. The function returns a non-zero value if it succeeds and zero if it fails. @@ -6726,11 +6915,11 @@ int PQregisterEventProc(PGconn *conn, PGEventProc proc, event is fired. Its memory address is also used to lookup instanceData. The name argument is used to refer to the event procedure in error messages. - This value cannot be NULL or a zero-length string. The name string is - copied into the PGconn, so what is passed need not be + This value cannot be NULL or a zero-length string. The name string is + copied into the PGconn, so what is passed need not be long-lived. The passThrough pointer is passed to the proc whenever an event occurs. This - argument can be NULL. + argument can be NULL. @@ -6744,11 +6933,11 @@ int PQregisterEventProc(PGconn *conn, PGEventProc proc, - Sets the connection conn's instanceData - for procedure proc to data. This + Sets the connection conn's instanceData + for procedure proc to data. This returns non-zero for success and zero for failure. (Failure is - only possible if proc has not been properly - registered in conn.) + only possible if proc has not been properly + registered in conn.) int PQsetInstanceData(PGconn *conn, PGEventProc proc, void *data); @@ -6767,8 +6956,8 @@ int PQsetInstanceData(PGconn *conn, PGEventProc proc, void *data); Returns the - connection conn's instanceData - associated with procedure proc, + connection conn's instanceData + associated with procedure proc, or NULL if there is none. @@ -6787,16 +6976,24 @@ void *PQinstanceData(const PGconn *conn, PGEventProc proc); - Sets the result's instanceData - for proc to data. This returns + Sets the result's instanceData + for proc to data. This returns non-zero for success and zero for failure. (Failure is only - possible if proc has not been properly registered + possible if proc has not been properly registered in the result.) int PQresultSetInstanceData(PGresult *res, PGEventProc proc, void *data); + + + Beware that any storage represented by data + will not be accounted for by PQresultMemorySize, + unless it is allocated using PQresultAlloc. + (Doing so is recommendable because it eliminates the need to free + such storage explicitly when the result is destroyed.) + @@ -6809,7 +7006,7 @@ int PQresultSetInstanceData(PGresult *res, PGEventProc proc, void *data); - Returns the result's instanceData associated with proc, or NULL + Returns the result's instanceData associated with proc, or NULL if there is none. @@ -6849,7 +7046,8 @@ main(void) { mydata *data; PGresult *res; - PGconn *conn = PQconnectdb("dbname = postgres"); + PGconn *conn = + PQconnectdb("dbname=postgres options=-csearch_path="); if (PQstatus(conn) != CONNECTION_OK) { @@ -6965,12 +7163,12 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) break; } - /* unknown event ID, just return TRUE. */ + /* unknown event ID, just return true. */ default: break; } - return TRUE; /* event processing succeeded */ + return true; /* event processing succeeded */ } ]]> @@ -6987,8 +7185,8 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) The following environment variables can be used to select default connection parameter values, which will be used by - PQconnectdb, PQsetdbLogin and - PQsetdb if no value is directly specified by the calling + PQconnectdb, PQsetdbLogin and + PQsetdb if no value is directly specified by the calling code. These are useful to avoid hard-coding database connection information into simple client applications, for example. @@ -6999,7 +7197,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGHOST PGHOST behaves the same as the connection parameter. + linkend="libpq-connect-host"/> connection parameter. @@ -7009,7 +7207,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGHOSTADDR PGHOSTADDR behaves the same as the connection parameter. + linkend="libpq-connect-hostaddr"/> connection parameter. This can be set instead of or in addition to PGHOST to avoid DNS lookup overhead. @@ -7021,7 +7219,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGPORT PGPORT behaves the same as the connection parameter. + linkend="libpq-connect-port"/> connection parameter. @@ -7031,7 +7229,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGDATABASE PGDATABASE behaves the same as the connection parameter. + linkend="libpq-connect-dbname"/> connection parameter. @@ -7041,7 +7239,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGUSER PGUSER behaves the same as the connection parameter. + linkend="libpq-connect-user"/> connection parameter. @@ -7051,12 +7249,12 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGPASSWORD PGPASSWORD behaves the same as the connection parameter. + linkend="libpq-connect-password"/> connection parameter. Use of this environment variable is not recommended for security reasons, as some operating systems allow non-root users to see process environment variables via - ps; instead consider using a password file - (see ). + ps; instead consider using a password file + (see ). @@ -7066,7 +7264,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGPASSFILE PGPASSFILE behaves the same as the connection parameter. + linkend="libpq-connect-passfile"/> connection parameter. @@ -7076,7 +7274,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGSERVICE PGSERVICE behaves the same as the connection parameter. + linkend="libpq-connect-service"/> connection parameter. @@ -7087,8 +7285,8 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGSERVICEFILE specifies the name of the per-user connection service file. If not set, it defaults - to ~/.pg_service.conf - (see ). + to ~/.pg_service.conf + (see ). @@ -7098,7 +7296,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGOPTIONS PGOPTIONS behaves the same as the connection parameter. + linkend="libpq-connect-options"/> connection parameter. @@ -7108,7 +7306,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGAPPNAME PGAPPNAME behaves the same as the connection parameter. + linkend="libpq-connect-application-name"/> connection parameter. @@ -7118,7 +7316,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGSSLMODE PGSSLMODE behaves the same as the connection parameter. + linkend="libpq-connect-sslmode"/> connection parameter. @@ -7128,7 +7326,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGREQUIRESSL PGREQUIRESSL behaves the same as the connection parameter. + linkend="libpq-connect-requiressl"/> connection parameter. This environment variable is deprecated in favor of the PGSSLMODE variable; setting both variables suppresses the effect of this one. @@ -7141,7 +7339,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGSSLCOMPRESSION PGSSLCOMPRESSION behaves the same as the connection parameter. + linkend="libpq-connect-sslcompression"/> connection parameter. @@ -7151,7 +7349,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGSSLCERT PGSSLCERT behaves the same as the connection parameter. + linkend="libpq-connect-sslcert"/> connection parameter. @@ -7161,7 +7359,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGSSLKEY PGSSLKEY behaves the same as the connection parameter. + linkend="libpq-connect-sslkey"/> connection parameter. @@ -7171,7 +7369,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGSSLROOTCERT PGSSLROOTCERT behaves the same as the connection parameter. + linkend="libpq-connect-sslrootcert"/> connection parameter. @@ -7181,7 +7379,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGSSLCRL PGSSLCRL behaves the same as the connection parameter. + linkend="libpq-connect-sslcrl"/> connection parameter. @@ -7191,7 +7389,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGREQUIREPEER PGREQUIREPEER behaves the same as the connection parameter. + linkend="libpq-connect-requirepeer"/> connection parameter. @@ -7201,7 +7399,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGKRBSRVNAME PGKRBSRVNAME behaves the same as the connection parameter. + linkend="libpq-connect-krbsrvname"/> connection parameter. @@ -7211,7 +7409,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGGSSLIB PGGSSLIB behaves the same as the connection parameter. + linkend="libpq-connect-gsslib"/> connection parameter. @@ -7221,7 +7419,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGCONNECT_TIMEOUT PGCONNECT_TIMEOUT behaves the same as the connection parameter. + linkend="libpq-connect-connect-timeout"/> connection parameter. @@ -7231,7 +7429,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGCLIENTENCODING PGCLIENTENCODING behaves the same as the connection parameter. + linkend="libpq-connect-client-encoding"/> connection parameter. @@ -7241,7 +7439,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGTARGETSESSIONATTRS PGTARGETSESSIONATTRS behaves the same as the connection parameter. + linkend="libpq-connect-target-session-attrs"/> connection parameter. @@ -7250,8 +7448,8 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) The following environment variables can be used to specify default behavior for each PostgreSQL session. (See - also the - and + also the + and commands for ways to set default behavior on a per-user or per-database basis.) @@ -7288,7 +7486,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) - Refer to the SQL command + Refer to the SQL command for information on correct values for these environment variables. @@ -7304,7 +7502,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGSYSCONFDIR PGSYSCONFDIR sets the directory containing the - pg_service.conf file and in a future version + pg_service.conf file and in a future version possibly other system-wide configuration files. @@ -7315,7 +7513,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) PGLOCALEDIR PGLOCALEDIR sets the directory containing the - locale files for message localization. + locale files for message localization. @@ -7339,11 +7537,11 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) contain passwords to be used if the connection requires a password (and no password has been specified otherwise). On Microsoft Windows the file is named - %APPDATA%\postgresql\pgpass.conf (where - %APPDATA% refers to the Application Data subdirectory in + %APPDATA%\postgresql\pgpass.conf (where + %APPDATA% refers to the Application Data subdirectory in the user's profile). Alternatively, a password file can be specified - using the connection parameter + using the connection parameter or the environment variable PGPASSFILE. @@ -7353,20 +7551,25 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) hostname:port:database:username:password (You can add a reminder comment to the file by copying the line above and - preceding it with #.) + preceding it with #.) Each of the first four fields can be a literal value, or *, which matches anything. The password field from the first line that matches the current connection parameters will be used. (Therefore, put more-specific entries first when you are using wildcards.) If an entry needs to contain : or \, escape this character with \. - A host name of localhost matches both TCP (host name - localhost) and Unix domain socket (pghost empty - or the default socket directory) connections coming from the local - machine. In a standby server, a database name of replication + The host name field is matched to the host connection + parameter if that is specified, otherwise to + the hostaddr parameter if that is specified; if neither + are given then the host name localhost is searched for. + The host name localhost is also searched for when + the connection is a Unix-domain socket connection and + the host parameter + matches libpq's default socket directory path. + In a standby server, a database field of replication matches streaming replication connections made to the master server. - The database field is of limited usefulness because - users have the same password for all databases in the same cluster. + The database field is of limited usefulness otherwise, because users have + the same password for all databases in the same cluster. @@ -7417,7 +7620,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) The file uses an INI file format where the section name is the service name and the parameters are connection - parameters; see for a list. For + parameters; see for a list. For example: # comment @@ -7451,7 +7654,7 @@ user=admin LDAP connection parameter lookup uses the connection service file pg_service.conf (see ). A line in a + linkend="libpq-pgservice"/>). A line in a pg_service.conf stanza that starts with ldap:// will be recognized as an LDAP URL and an LDAP query will be performed. The result must be a list of @@ -7521,17 +7724,17 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) - PostgreSQL has native support for using SSL + PostgreSQL has native support for using SSL connections to encrypt client/server communications for increased - security. See for details about the server-side - SSL functionality. + security. See for details about the server-side + SSL functionality. libpq reads the system-wide OpenSSL configuration file. By default, this file is named openssl.cnf and is located in the - directory reported by openssl version -d. This default + directory reported by openssl version -d. This default can be overridden by setting environment variable OPENSSL_CONF to the name of the desired configuration file. @@ -7541,44 +7744,63 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) Client Verification of Server Certificates - By default, PostgreSQL will not perform any verification of + By default, PostgreSQL will not perform any verification of the server certificate. This means that it is possible to spoof the server identity (for example by modifying a DNS record or by taking over the server IP address) without the client knowing. In order to prevent spoofing, - SSL certificate verification must be used. + the client must be able to verify the server's identity via a chain of + trust. A chain of trust is established by placing a root (self-signed) + certificate authority (CA) certificate on one + computer and a leaf certificate signed by the + root certificate on another computer. It is also possible to use an + intermediate certificate which is signed by the root + certificate and signs leaf certificates. + + + + To allow the client to verify the identity of the server, place a root + certificate on the client and a leaf certificate signed by the root + certificate on the server. To allow the server to verify the identity + of the client, place a root certificate on the server and a leaf + certificate signed by the root certificate on the client. One or more + intermediate certificates (usually stored with the leaf certificate) + can also be used to link the leaf certificate to the root certificate. - If the parameter sslmode is set to verify-ca, + Once a chain of trust has been established, there are two ways for + the client to validate the leaf certificate sent by the server. + If the parameter sslmode is set to verify-ca, libpq will verify that the server is trustworthy by checking the - certificate chain up to a trusted certificate authority - (CA). If sslmode is set to verify-full, - libpq will also verify that the server host name matches its - certificate. The SSL connection will fail if the server certificate cannot - be verified. verify-full is recommended in most + certificate chain up to the root certificate stored on the client. + If sslmode is set to verify-full, + libpq will also verify that the server host + name matches the name stored in the server certificate. The + SSL connection will fail if the server certificate cannot be + verified. verify-full is recommended in most security-sensitive environments. - In verify-full mode, the host name is matched against the + In verify-full mode, the host name is matched against the certificate's Subject Alternative Name attribute(s), or against the Common Name attribute if no Subject Alternative Name of type dNSName is present. If the certificate's name attribute starts with an asterisk - (*), the asterisk will be treated as - a wildcard, which will match all characters except a dot - (.). This means the certificate will not match subdomains. + (*), the asterisk will be treated as + a wildcard, which will match all characters except a dot + (.). This means the certificate will not match subdomains. If the connection is made using an IP address instead of a host name, the IP address will be matched (without doing any DNS lookups). - To allow server certificate verification, the certificate(s) of one or more - trusted CAs must be - placed in the file ~/.postgresql/root.crt in the user's home - directory. If intermediate CAs appear in - root.crt, the file must also contain certificate - chains to their root CAs. (On Microsoft Windows the file is named - %APPDATA%\postgresql\root.crt.) + To allow server certificate verification, one or more root certificates + must be placed in the file ~/.postgresql/root.crt + in the user's home directory. (On Microsoft Windows the file is named + %APPDATA%\postgresql\root.crt.) Intermediate + certificates should also be added to the file if they are needed to link + the certificate chain sent by the server to the root certificates + stored on the client. @@ -7591,8 +7813,8 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) The location of the root certificate file and the CRL can be changed by setting - the connection parameters sslrootcert and sslcrl - or the environment variables PGSSLROOTCERT and PGSSLCRL. + the connection parameters sslrootcert and sslcrl + or the environment variables PGSSLROOTCERT and PGSSLCRL. @@ -7612,12 +7834,13 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) Client Certificates - If the server requests a trusted client certificate, - libpq will send the certificate stored in - file ~/.postgresql/postgresql.crt in the user's home - directory. The certificate must be signed by one of the certificate - authorities (CA) trusted by the server. A matching - private key file ~/.postgresql/postgresql.key must also + If the server attempts to verify the identity of the + client by requesting the client's leaf certificate, + libpq will send the certificates stored in + file ~/.postgresql/postgresql.crt in the user's home + directory. The certificates must chain to the root certificate trusted + by the server. A matching + private key file ~/.postgresql/postgresql.key must also be present. The private key file must not allow any access to world or group; achieve this by the command chmod 0600 ~/.postgresql/postgresql.key. @@ -7626,35 +7849,29 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) %APPDATA%\postgresql\postgresql.key, and there is no special permissions check since the directory is presumed secure. The location of the certificate and key files can be overridden by the - connection parameters sslcert and sslkey or the - environment variables PGSSLCERT and PGSSLKEY. + connection parameters sslcert and sslkey or the + environment variables PGSSLCERT and PGSSLKEY. - In some cases, the client certificate might be signed by an - intermediate certificate authority, rather than one that is - directly trusted by the server. To use such a certificate, append the - certificate of the signing authority to the postgresql.crt - file, then its parent authority's certificate, and so on up to a certificate - authority, root or intermediate, that is trusted by - the server, i.e. signed by a certificate in the server's - root.crt file. + The first certificate in postgresql.crt must be the + client's certificate because it must match the client's private key. + Intermediate certificates can be optionally appended + to the file — doing so avoids requiring storage of intermediate + certificates on the server (). - Note that the client's ~/.postgresql/root.crt lists the top-level CAs - that are considered trusted for signing server certificates. In principle it need - not list the CA that signed the client's certificate, though in most cases - that CA would also be trusted for server certificates. + For instructions on creating certificates, see . - Protection Provided in Different Modes - The different values for the sslmode parameter provide different + The different values for the sslmode parameter provide different levels of protection. SSL can provide protection against three types of attacks: @@ -7664,23 +7881,23 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) If a third party can examine the network traffic between the client and the server, it can read both connection information (including - the user name and password) and the data that is passed. SSL + the user name and password) and the data that is passed. SSL uses encryption to prevent this. - Man in the middle (MITM) + Man in the middle (MITM) If a third party can modify the data while passing between the client and server, it can pretend to be the server and therefore see and - modify data even if it is encrypted. The third party can then + modify data even if it is encrypted. The third party can then forward the connection information and data to the original server, making it impossible to detect this attack. Common vectors to do this include DNS poisoning and address hijacking, whereby the client is directed to a different server than intended. There are also several other - attack methods that can accomplish this. SSL uses certificate + attack methods that can accomplish this. SSL uses certificate verification to prevent this, by authenticating the server to the client. @@ -7691,7 +7908,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) If a third party can pretend to be an authorized client, it can simply access data it should not have access to. Typically this can - happen through insecure password management. SSL uses + happen through insecure password management. SSL uses client certificates to prevent this, by making sure that only holders of valid certificates can access the server. @@ -7702,15 +7919,15 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) For a connection to be known secure, SSL usage must be configured - on both the client and the server before the connection + on both the client and the server before the connection is made. If it is only configured on the server, the client may end up sending sensitive information (e.g. passwords) before it knows that the server requires high security. In libpq, secure connections can be ensured - by setting the sslmode parameter to verify-full or - verify-ca, and providing the system with a root certificate to - verify against. This is analogous to using an https - URL for encrypted web browsing. + by setting the sslmode parameter to verify-full or + verify-ca, and providing the system with a root certificate to + verify against. This is analogous to using an https + URL for encrypted web browsing. @@ -7721,10 +7938,10 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) - All SSL options carry overhead in the form of encryption and + All SSL options carry overhead in the form of encryption and key-exchange, so there is a trade-off that has to be made between performance - and security. - illustrates the risks the different sslmode values + and security. + illustrates the risks the different sslmode values protect against, and what statement they make about security and overhead. @@ -7733,16 +7950,16 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) - sslmode + sslmode Eavesdropping protection - MITM protection + MITM protection Statement - disable + disable No No I don't care about security, and I don't want to pay the overhead @@ -7751,7 +7968,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) - allow + allow Maybe No I don't care about security, but I will pay the overhead of @@ -7760,7 +7977,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) - prefer + prefer Maybe No I don't care about encryption, but I wish to pay the overhead of @@ -7769,7 +7986,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) - require + require Yes No I want my data to be encrypted, and I accept the overhead. I trust @@ -7778,16 +7995,16 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) - verify-ca + verify-ca Yes - Depends on CA-policy + Depends on CA-policy I want my data encrypted, and I accept the overhead. I want to be sure that I connect to a server that I trust. - verify-full + verify-full Yes Yes I want my data encrypted, and I accept the overhead. I want to be @@ -7801,17 +8018,17 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*)
- The difference between verify-ca and verify-full - depends on the policy of the root CA. If a public - CA is used, verify-ca allows connections to a server - that somebody else may have registered with the CA. - In this case, verify-full should always be used. If - a local CA is used, or even a self-signed certificate, using - verify-ca often provides enough protection. + The difference between verify-ca and verify-full + depends on the policy of the root CA. If a public + CA is used, verify-ca allows connections to a server + that somebody else may have registered with the CA. + In this case, verify-full should always be used. If + a local CA is used, or even a self-signed certificate, using + verify-ca often provides enough protection. - The default value for sslmode is prefer. As is shown + The default value for sslmode is prefer. As is shown in the table, this makes no sense from a security point of view, and it only promises performance overhead if possible. It is only provided as the default for backward compatibility, and is not recommended in secure deployments. @@ -7823,7 +8040,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) SSL Client File Usage - summarizes the files that are + summarizes the files that are relevant to the SSL setup on the client. @@ -7841,27 +8058,27 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) - ~/.postgresql/postgresql.crt + ~/.postgresql/postgresql.crt client certificate requested by server - ~/.postgresql/postgresql.key + ~/.postgresql/postgresql.key client private key proves client certificate sent by owner; does not indicate certificate owner is trustworthy - ~/.postgresql/root.crt + ~/.postgresql/root.crt trusted certificate authorities checks that server certificate is signed by a trusted certificate authority - ~/.postgresql/root.crl + ~/.postgresql/root.crl certificates revoked by certificate authorities server certificate must not be on this list @@ -7875,16 +8092,15 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) SSL Library Initialization - If your application initializes libssl and/or - libcrypto libraries and libpq - is built with SSL support, you should call - PQinitOpenSSL to tell libpq - that the libssl and/or libcrypto libraries + If your application initializes libssl and/or + libcrypto libraries and libpq + is built with SSL support, you should call + PQinitOpenSSL to tell libpq + that the libssl and/or libcrypto libraries have been initialized by your application, so that libpq will not also initialize those libraries. - See + url="http://h41379.www4.hpe.com/doc/83final/ba554_90007/ch04.html"> for details on the SSL API. @@ -7907,18 +8123,18 @@ void PQinitOpenSSL(int do_ssl, int do_crypto); - When do_ssl is non-zero, libpq - will initialize the OpenSSL library before first - opening a database connection. When do_crypto is - non-zero, the libcrypto library will be initialized. By - default (if PQinitOpenSSL is not called), both libraries + When do_ssl is non-zero, libpq + will initialize the OpenSSL library before first + opening a database connection. When do_crypto is + non-zero, the libcrypto library will be initialized. By + default (if PQinitOpenSSL is not called), both libraries are initialized. When SSL support is not compiled in, this function is present but does nothing. - If your application uses and initializes either OpenSSL - or its underlying libcrypto library, you must + If your application uses and initializes either OpenSSL + or its underlying libcrypto library, you must call this function with zeroes for the appropriate parameter(s) before first opening a database connection. Also be sure that you have done that initialization before opening a database connection. @@ -7944,15 +8160,15 @@ void PQinitSSL(int do_ssl); This function is equivalent to - PQinitOpenSSL(do_ssl, do_ssl). + PQinitOpenSSL(do_ssl, do_ssl). It is sufficient for applications that initialize both or neither - of OpenSSL and libcrypto. + of OpenSSL and libcrypto. - PQinitSSL has been present since - PostgreSQL 8.0, while PQinitOpenSSL - was added in PostgreSQL 8.4, so PQinitSSL + PQinitSSL has been present since + PostgreSQL 8.0, while PQinitOpenSSL + was added in PostgreSQL 8.4, so PQinitSSL might be preferable for applications that need to work with older versions of libpq. @@ -7979,8 +8195,8 @@ void PQinitSSL(int do_ssl); options when you compile your application code. Refer to your system's documentation for information about how to build thread-enabled applications, or look in - src/Makefile.global for PTHREAD_CFLAGS - and PTHREAD_LIBS. This function allows the querying of + src/Makefile.global for PTHREAD_CFLAGS + and PTHREAD_LIBS. This function allows the querying of libpq's thread-safe status: @@ -8012,18 +8228,18 @@ int PQisthreadsafe(); One thread restriction is that no two threads attempt to manipulate - the same PGconn object at the same time. In particular, + the same PGconn object at the same time. In particular, you cannot issue concurrent commands from different threads through the same connection object. (If you need to run concurrent commands, use multiple connections.) - PGresult objects are normally read-only after creation, + PGresult objects are normally read-only after creation, and so can be passed around freely between threads. However, if you use - any of the PGresult-modifying functions described in - or , it's up - to you to avoid concurrent operations on the same PGresult, + any of the PGresult-modifying functions described in + or , it's up + to you to avoid concurrent operations on the same PGresult, too. @@ -8040,14 +8256,14 @@ int PQisthreadsafe(); If you are using Kerberos inside your application (in addition to inside libpq), you will need to do locking around Kerberos calls because Kerberos functions are not thread-safe. See - function PQregisterThreadLock in the + function PQregisterThreadLock in the libpq source code for a way to do cooperative locking between libpq and your application.
If you experience problems with threaded applications, run the program - in src/tools/thread to see if your platform has + in src/tools/thread to see if your platform has thread-unsafe functions. This program is run by configure, but for binary distributions your library might not match the library used to build the binaries. @@ -8090,7 +8306,7 @@ foo.c:95: `PGRES_TUPLES_OK' undeclared (first use in this function) - Point your compiler to the directory where the PostgreSQL header + Point your compiler to the directory where the PostgreSQL header files were installed, by supplying the -Idirectory option to your compiler. (In some cases the compiler will look into @@ -8111,8 +8327,8 @@ CPPFLAGS += -I/usr/local/pgsql/include If there is any chance that your program might be compiled by other users then you should not hardcode the directory location like that. Instead, you can run the utility - pg_configpg_configwith libpq to find out where the header + pg_configpg_configwith libpq to find out where the header files are on the local system: $ pg_config --includedir @@ -8213,13 +8429,16 @@ testlibpq.o(.text+0xa4): undefined reference to `PQerrorMessage' #include -#include +#include "libpq-fe.h" static void exit_nicely(PGconn *conn) @@ -8259,6 +8478,22 @@ main(int argc, char **argv) exit_nicely(conn); } + /* Set always-secure search path, so malicious users can't take control. */ + res = PQexec(conn, + "SELECT pg_catalog.set_config('search_path', '', false)"); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + fprintf(stderr, "SET failed: %s", PQerrorMessage(conn)); + PQclear(res); + exit_nicely(conn); + } + + /* + * Should PQclear PGresult whenever it is no longer needed to avoid memory + * leaks + */ + PQclear(res); + /* * Our test case here involves using a cursor, for which we must be inside * a transaction block. We could do the whole thing with a single @@ -8274,11 +8509,6 @@ main(int argc, char **argv) PQclear(res); exit_nicely(conn); } - - /* - * Should PQclear PGresult whenever it is no longer needed to avoid memory - * leaks - */ PQclear(res); /* @@ -8340,6 +8570,9 @@ main(int argc, char **argv) #include #include +#ifdef HAVE_SYS_SELECT_H +#include +#endif + #include "libpq-fe.h" static void @@ -8411,6 +8648,22 @@ main(int argc, char **argv) exit_nicely(conn); } + /* Set always-secure search path, so malicious users can't take control. */ + res = PQexec(conn, + "SELECT pg_catalog.set_config('search_path', '', false)"); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + fprintf(stderr, "SET failed: %s", PQerrorMessage(conn)); + PQclear(res); + exit_nicely(conn); + } + + /* + * Should PQclear PGresult whenever it is no longer needed to avoid memory + * leaks + */ + PQclear(res); + /* * Issue LISTEN command to enable notifications from the rule's NOTIFY. */ @@ -8421,11 +8674,6 @@ main(int argc, char **argv) PQclear(res); exit_nicely(conn); } - - /* - * should PQclear PGresult whenever it is no longer needed to avoid memory - * leaks - */ PQclear(res); /* Quit after four notifies are received. */ @@ -8463,6 +8711,7 @@ main(int argc, char **argv) notify->relname, notify->be_pid); PQfreemem(notify); nnotifies++; + PQconsumeInput(conn); } } @@ -8483,14 +8732,18 @@ main(int argc, char **argv) - The lo module provides support for managing Large Objects - (also called LOs or BLOBs). This includes a data type lo - and a trigger lo_manage. + The lo module provides support for managing Large Objects + (also called LOs or BLOBs). This includes a data type lo + and a trigger lo_manage. @@ -24,7 +24,7 @@ - As PostgreSQL stands, this doesn't occur. Large objects + As PostgreSQL stands, this doesn't occur. Large objects are treated as objects in their own right; a table entry can reference a large object by OID, but there can be multiple table entries referencing the same large object OID, so the system doesn't delete the large object @@ -32,30 +32,30 @@ - Now this is fine for PostgreSQL-specific applications, but + Now this is fine for PostgreSQL-specific applications, but standard code using JDBC or ODBC won't delete the objects, resulting in orphan objects — objects that are not referenced by anything, and simply occupy disk space. - The lo module allows fixing this by attaching a trigger + The lo module allows fixing this by attaching a trigger to tables that contain LO reference columns. The trigger essentially just - does a lo_unlink whenever you delete or modify a value + does a lo_unlink whenever you delete or modify a value referencing a large object. When you use this trigger, you are assuming that there is only one database reference to any large object that is referenced in a trigger-controlled column! - The module also provides a data type lo, which is really just - a domain of the oid type. This is useful for differentiating + The module also provides a data type lo, which is really just + a domain of the oid type. This is useful for differentiating database columns that hold large object references from those that are - OIDs of other things. You don't have to use the lo type to + OIDs of other things. You don't have to use the lo type to use the trigger, but it may be convenient to use it to keep track of which columns in your database represent large objects that you are managing with the trigger. It is also rumored that the ODBC driver gets confused if you - don't use lo for BLOB columns. + don't use lo for BLOB columns. @@ -67,19 +67,19 @@ -CREATE TABLE image (title TEXT, raster lo); +CREATE TABLE image (title text, raster lo); CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON image - FOR EACH ROW EXECUTE PROCEDURE lo_manage(raster); + FOR EACH ROW EXECUTE FUNCTION lo_manage(raster); For each column that will contain unique references to large objects, - create a BEFORE UPDATE OR DELETE trigger, and give the column + create a BEFORE UPDATE OR DELETE trigger, and give the column name as the sole trigger argument. You can also restrict the trigger to only execute on updates to the column by using BEFORE UPDATE OF column_name. - If you need multiple lo + If you need multiple lo columns in the same table, create a separate trigger for each one, remembering to give a different name to each trigger on the same table. @@ -93,18 +93,18 @@ CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON image Dropping a table will still orphan any objects it contains, as the trigger is not executed. You can avoid this by preceding the DROP - TABLE with DELETE FROM table. + TABLE with DELETE FROM table. - TRUNCATE has the same hazard. + TRUNCATE has the same hazard. If you already have, or suspect you have, orphaned large objects, see the - module to help - you clean them up. It's a good idea to run vacuumlo - occasionally as a back-stop to the lo_manage trigger. + module to help + you clean them up. It's a good idea to run vacuumlo + occasionally as a back-stop to the lo_manage trigger.
diff --git a/doc/src/sgml/lobj.sgml b/doc/src/sgml/lobj.sgml index 7757e1e441..4bcce4f5ed 100644 --- a/doc/src/sgml/lobj.sgml +++ b/doc/src/sgml/lobj.sgml @@ -1,13 +1,13 @@ - + Large Objects - large object - BLOBlarge object + large object + BLOBlarge object - PostgreSQL has a large object + PostgreSQL has a large object facility, which provides stream-style access to user data that is stored in a special large-object structure. Streaming access is useful when working with data values that are too large to manipulate @@ -76,15 +76,15 @@ of 1000000 bytes worth of storage; only of chunks covering the range of data bytes actually written. A read operation will, however, read out zeroes for any unallocated locations preceding the last existing chunk. - This corresponds to the common behavior of sparsely allocated + This corresponds to the common behavior of sparsely allocated files in Unix file systems. - As of PostgreSQL 9.0, large objects have an owner + As of PostgreSQL 9.0, large objects have an owner and a set of access permissions, which can be managed using - and - . + and + . SELECT privileges are required to read a large object, and UPDATE privileges are required to write or @@ -92,7 +92,7 @@ Only the large object's owner (or a database superuser) can delete, comment on, or change the owner of a large object. To adjust this behavior for compatibility with prior releases, see the - run-time parameter. + run-time parameter. @@ -101,7 +101,7 @@ This section describes the facilities that - PostgreSQL's libpq + PostgreSQL's libpq client interface library provides for accessing large objects. The PostgreSQL large object interface is modeled after the Unix file-system interface, with @@ -121,7 +121,7 @@ If an error occurs while executing any one of these functions, the function will return an otherwise-impossible value, typically 0 or -1. A message describing the error is stored in the connection object and - can be retrieved with PQerrorMessage. + can be retrieved with PQerrorMessage. @@ -134,7 +134,7 @@ Creating a Large Object - lo_creat + lo_creat The function Oid lo_creat(PGconn *conn, int mode); @@ -147,7 +147,7 @@ Oid lo_creat(PGconn *conn, int mode); ignored as of PostgreSQL 8.1; however, for backward compatibility with earlier releases it is best to set it to INV_READ, INV_WRITE, - or INV_READ | INV_WRITE. + or INV_READ | INV_WRITE. (These symbolic constants are defined in the header file libpq/libpq-fs.h.) @@ -160,7 +160,7 @@ inv_oid = lo_creat(conn, INV_READ|INV_WRITE); - lo_create + lo_create The function Oid lo_create(PGconn *conn, Oid lobjId); @@ -169,14 +169,14 @@ Oid lo_create(PGconn *conn, Oid lobjId); specified by lobjId; if so, failure occurs if that OID is already in use for some large object. If lobjId - is InvalidOid (zero) then lo_create assigns an unused - OID (this is the same behavior as lo_creat). + is InvalidOid (zero) then lo_create assigns an unused + OID (this is the same behavior as lo_creat). The return value is the OID that was assigned to the new large object, or InvalidOid (zero) on failure. - lo_create is new as of PostgreSQL + lo_create is new as of PostgreSQL 8.1; if this function is run against an older server version, it will fail and return InvalidOid. @@ -193,7 +193,7 @@ inv_oid = lo_create(conn, desired_oid); Importing a Large Object - lo_import + lo_import To import an operating system file as a large object, call Oid lo_import(PGconn *conn, const char *filename); @@ -209,7 +209,7 @@ Oid lo_import(PGconn *conn, const char *filename); - lo_import_with_oid + lo_import_with_oid The function Oid lo_import_with_oid(PGconn *conn, const char *filename, Oid lobjId); @@ -218,14 +218,14 @@ Oid lo_import_with_oid(PGconn *conn, const char *filename, Oid lobjId); specified by lobjId; if so, failure occurs if that OID is already in use for some large object. If lobjId - is InvalidOid (zero) then lo_import_with_oid assigns an unused - OID (this is the same behavior as lo_import). + is InvalidOid (zero) then lo_import_with_oid assigns an unused + OID (this is the same behavior as lo_import). The return value is the OID that was assigned to the new large object, or InvalidOid (zero) on failure. - lo_import_with_oid is new as of PostgreSQL + lo_import_with_oid is new as of PostgreSQL 8.4 and uses lo_create internally which is new in 8.1; if this function is run against 8.0 or before, it will fail and return InvalidOid. @@ -235,7 +235,7 @@ Oid lo_import_with_oid(PGconn *conn, const char *filename, Oid lobjId); Exporting a Large Object - lo_export + lo_export To export a large object into an operating system file, call @@ -253,14 +253,14 @@ int lo_export(PGconn *conn, Oid lobjId, const char *filename); Opening an Existing Large Object - lo_open + lo_open To open an existing large object for reading or writing, call int lo_open(PGconn *conn, Oid lobjId, int mode); The lobjId argument specifies the OID of the large object to open. The mode bits control whether the - object is opened for reading (INV_READ), writing + object is opened for reading (INV_READ), writing (INV_WRITE), or both. (These symbolic constants are defined in the header file libpq/libpq-fs.h.) @@ -277,19 +277,31 @@ int lo_open(PGconn *conn, Oid lobjId, int mode); The server currently does not distinguish between modes - INV_WRITE and INV_READ | + INV_WRITE and INV_READ | INV_WRITE: you are allowed to read from the descriptor in either case. However there is a significant difference between - these modes and INV_READ alone: with INV_READ + these modes and INV_READ alone: with INV_READ you cannot write on the descriptor, and the data read from it will reflect the contents of the large object at the time of the transaction - snapshot that was active when lo_open was executed, + snapshot that was active when lo_open was executed, regardless of later writes by this or other transactions. Reading from a descriptor opened with INV_WRITE returns data that reflects all writes of other committed transactions as well as writes of the current transaction. This is similar to the behavior - of REPEATABLE READ versus READ COMMITTED transaction - modes for ordinary SQL SELECT commands. + of REPEATABLE READ versus READ COMMITTED transaction + modes for ordinary SQL SELECT commands. + + + + lo_open will fail if SELECT + privilege is not available for the large object, or + if INV_WRITE is specified and UPDATE + privilege is not available. + (Prior to PostgreSQL 11, these privilege + checks were instead performed at the first actual read or write call + using the descriptor.) + These privilege checks can be disabled with the + run-time parameter. @@ -304,14 +316,14 @@ inv_fd = lo_open(conn, inv_oid, INV_READ|INV_WRITE); Writing Data to a Large Object - lo_write + lo_write The function int lo_write(PGconn *conn, int fd, const char *buf, size_t len); writes len bytes from buf (which must be of size len) to large object - descriptor fd. The fd argument must + descriptor fd. The fd argument must have been returned by a previous lo_open. The number of bytes actually written is returned (in the current implementation, this will always equal len unless @@ -320,8 +332,8 @@ int lo_write(PGconn *conn, int fd, const char *buf, size_t len); Although the len parameter is declared as - size_t, this function will reject length values larger than - INT_MAX. In practice, it's best to transfer data in chunks + size_t, this function will reject length values larger than + INT_MAX. In practice, it's best to transfer data in chunks of at most a few megabytes anyway. @@ -330,7 +342,7 @@ int lo_write(PGconn *conn, int fd, const char *buf, size_t len); Reading Data from a Large Object - lo_read + lo_read The function int lo_read(PGconn *conn, int fd, char *buf, size_t len); @@ -347,8 +359,8 @@ int lo_read(PGconn *conn, int fd, char *buf, size_t len); Although the len parameter is declared as - size_t, this function will reject length values larger than - INT_MAX. In practice, it's best to transfer data in chunks + size_t, this function will reject length values larger than + INT_MAX. In practice, it's best to transfer data in chunks of at most a few megabytes anyway. @@ -357,7 +369,7 @@ int lo_read(PGconn *conn, int fd, char *buf, size_t len); Seeking in a Large Object - lo_lseek + lo_lseek To change the current read or write location associated with a large object descriptor, call @@ -365,16 +377,16 @@ int lo_lseek(PGconn *conn, int fd, int offset, int whence); This function moves the current location pointer for the large object descriptor identified by - fd to the new location specified by - offset. The valid values for whence - are SEEK_SET (seek from object start), - SEEK_CUR (seek from current position), and - SEEK_END (seek from object end). The return value is + fd to the new location specified by + offset. The valid values for whence + are SEEK_SET (seek from object start), + SEEK_CUR (seek from current position), and + SEEK_END (seek from object end). The return value is the new location pointer, or -1 on error. - lo_lseek64 + lo_lseek64 When dealing with large objects that might exceed 2GB in size, instead use @@ -382,14 +394,14 @@ pg_int64 lo_lseek64(PGconn *conn, int fd, pg_int64 offset, int whence); This function has the same behavior as lo_lseek, but it can accept an - offset larger than 2GB and/or deliver a result larger + offset larger than 2GB and/or deliver a result larger than 2GB. Note that lo_lseek will fail if the new location pointer would be greater than 2GB. - lo_lseek64 is new as of PostgreSQL + lo_lseek64 is new as of PostgreSQL 9.3. If this function is run against an older server version, it will fail and return -1. @@ -400,7 +412,7 @@ pg_int64 lo_lseek64(PGconn *conn, int fd, pg_int64 offset, int whence); Obtaining the Seek Position of a Large Object - lo_tell + lo_tell To obtain the current read or write location of a large object descriptor, call @@ -410,7 +422,7 @@ int lo_tell(PGconn *conn, int fd); - lo_tell64 + lo_tell64 When dealing with large objects that might exceed 2GB in size, instead use @@ -424,7 +436,7 @@ pg_int64 lo_tell64(PGconn *conn, int fd); - lo_tell64 is new as of PostgreSQL + lo_tell64 is new as of PostgreSQL 9.3. If this function is run against an older server version, it will fail and return -1. @@ -434,15 +446,15 @@ pg_int64 lo_tell64(PGconn *conn, int fd); Truncating a Large Object - lo_truncate + lo_truncate To truncate a large object to a given length, call int lo_truncate(PGcon *conn, int fd, size_t len); This function truncates the large object - descriptor fd to length len. The + descriptor fd to length len. The fd argument must have been returned by a - previous lo_open. If len is + previous lo_open. If len is greater than the large object's current length, the large object is extended to the specified length with null bytes ('\0'). On success, lo_truncate returns @@ -456,12 +468,12 @@ int lo_truncate(PGcon *conn, int fd, size_t len); Although the len parameter is declared as - size_t, lo_truncate will reject length - values larger than INT_MAX. + size_t, lo_truncate will reject length + values larger than INT_MAX. - lo_truncate64 + lo_truncate64 When dealing with large objects that might exceed 2GB in size, instead use @@ -469,17 +481,17 @@ int lo_truncate64(PGcon *conn, int fd, pg_int64 len); This function has the same behavior as lo_truncate, but it can accept a - len value exceeding 2GB. + len value exceeding 2GB. - lo_truncate is new as of PostgreSQL + lo_truncate is new as of PostgreSQL 8.3; if this function is run against an older server version, it will fail and return -1. - lo_truncate64 is new as of PostgreSQL + lo_truncate64 is new as of PostgreSQL 9.3; if this function is run against an older server version, it will fail and return -1. @@ -489,12 +501,12 @@ int lo_truncate64(PGcon *conn, int fd, pg_int64 len); Closing a Large Object Descriptor - lo_close + lo_close A large object descriptor can be closed by calling int lo_close(PGconn *conn, int fd); - where fd is a + where fd is a large object descriptor returned by lo_open. On success, lo_close returns zero. On error, the return value is -1. @@ -510,7 +522,7 @@ int lo_close(PGconn *conn, int fd); Removing a Large Object - lo_unlink + lo_unlink To remove a large object from the database, call int lo_unlink(PGconn *conn, Oid lobjId); @@ -527,7 +539,7 @@ int lo_unlink(PGconn *conn, Oid lobjId); Server-side functions tailored for manipulating large objects from SQL are - listed in . + listed in . @@ -554,9 +566,9 @@ int lo_unlink(PGconn *conn, Oid lobjId); oid Create a large object and store data there, returning its OID. - Pass 0 to have the system choose an OID. + Pass 0 to have the system choose an OID. - lo_from_bytea(0, E'\\xffffff00') + lo_from_bytea(0, '\xffffff00')24528 @@ -571,7 +583,7 @@ int lo_unlink(PGconn *conn, Oid lobjId); Write data at the given offset. - lo_put(24528, 1, E'\\xaa') + lo_put(24528, 1, '\xaa') @@ -599,11 +611,11 @@ int lo_unlink(PGconn *conn, Oid lobjId); client-side functions described earlier; indeed, for the most part the client-side functions are simply interfaces to the equivalent server-side functions. The ones just as convenient to call via SQL commands are - lo_creatlo_creat, + lo_creatlo_creat, lo_create, - lo_unlinklo_unlink, - lo_importlo_import, and - lo_exportlo_export. + lo_unlinklo_unlink, + lo_importlo_import, and + lo_exportlo_export. Here are examples of their use: @@ -634,18 +646,40 @@ SELECT lo_export(image.raster, '/tmp/motd') FROM image lo_export functions behave considerably differently from their client-side analogs. These two functions read and write files in the server's file system, using the permissions of the database's - owning user. Therefore, their use is restricted to superusers. In - contrast, the client-side import and export functions read and write files - in the client's file system, using the permissions of the client program. - The client-side functions do not require superuser privilege. + owning user. Therefore, by default their use is restricted to superusers. + In contrast, the client-side import and export functions read and write + files in the client's file system, using the permissions of the client + program. The client-side functions do not require any database + privileges, except the privilege to read or write the large object in + question. + + + It is possible to use of the + server-side lo_import + and lo_export functions to non-superusers, but + careful consideration of the security implications is required. A + malicious user of such privileges could easily parlay them into becoming + superuser (for example by rewriting server configuration files), or could + attack the rest of the server's file system without bothering to obtain + database superuser privileges as such. Access to roles having + such privilege must therefore be guarded just as carefully as access to + superuser roles. Nonetheless, if use of + server-side lo_import + or lo_export is needed for some routine task, it's + safer to use a role with such privileges than one with full superuser + privileges, as that helps to reduce the risk of damage from accidental + errors. + + + The functionality of lo_read and lo_write is also available via server-side calls, but the names of the server-side functions differ from the client side interfaces in that they do not contain underscores. You must call - these functions as loread and lowrite. + these functions as loread and lowrite. @@ -654,9 +688,9 @@ SELECT lo_export(image.raster, '/tmp/motd') FROM image Example Program - is a sample program which shows how the large object + is a sample program which shows how the large object interface - in libpq can be used. Parts of the program are + in libpq can be used. Parts of the program are commented out but are left in the source for the reader's benefit. This program can also be found in src/test/examples/testlo.c in the source distribution. @@ -670,7 +704,7 @@ SELECT lo_export(image.raster, '/tmp/motd') FROM image * testlo.c * test using large objects with libpq * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -899,6 +933,17 @@ main(int argc, char **argv) exit_nicely(conn); } + /* Set always-secure search path, so malicious users can't take control. */ + res = PQexec(conn, + "SELECT pg_catalog.set_config('search_path', '', false)"); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + fprintf(stderr, "SET failed: %s", PQerrorMessage(conn)); + PQclear(res); + exit_nicely(conn); + } + PQclear(res); + res = PQexec(conn, "begin"); PQclear(res); printf("importing file \"%s\" ...\n", in_filename); diff --git a/doc/src/sgml/logical-replication.sgml b/doc/src/sgml/logical-replication.sgml index fa0bb56b7b..3f2f674a1a 100644 --- a/doc/src/sgml/logical-replication.sgml +++ b/doc/src/sgml/logical-replication.sgml @@ -8,7 +8,7 @@ changes, based upon their replication identity (usually a primary key). We use the term logical in contrast to physical replication, which uses exact block addresses and byte-by-byte replication. PostgreSQL supports both - mechanisms concurrently, see . Logical + mechanisms concurrently, see . Logical replication allows fine-grained control over both data replication and security. @@ -64,6 +64,13 @@ + + + Replicating between PostgreSQL instances on different platforms (for + example Linux to Windows) + + + Giving access to replicated data to different groups of users. @@ -108,8 +115,8 @@ Publications can choose to limit the changes they produce to - any combination of INSERT, UPDATE, and - DELETE, similar to how triggers are fired by + any combination of INSERT, UPDATE, + DELETE, and TRUNCATE, similar to how triggers are fired by particular event types. By default, all operation types are replicated. @@ -126,7 +133,7 @@ fallback if no other solution is possible. If a replica identity other than full is set on the publisher side, a replica identity comprising the same or fewer columns must also be set on the subscriber - side. See for details on + side. See for details on how to set the replica identity. If a table without a replica identity is added to a publication that replicates UPDATE or DELETE operations then @@ -140,13 +147,13 @@ - A publication is created using the + A publication is created using the command and may later be altered or dropped using corresponding commands. The individual tables can be added and removed dynamically using - . Both the ADD + . Both the ADD TABLE and DROP TABLE operations are transactional; so the table will start or stop replicating at the correct snapshot once the transaction has committed. @@ -179,14 +186,14 @@ Each subscription will receive changes via one replication slot (see - ). Additional temporary + ). Additional temporary replication slots may be required for the initial data synchronization of pre-existing table data. A logical replication subscription can be a standby for synchronous - replication (see ). The standby + replication (see ). The standby name is by default the subscription name. An alternative name can be specified as application_name in the connection information of the subscription. @@ -200,10 +207,10 @@ - The subscription is added using and + The subscription is added using and can be stopped/resumed at any time using the - command and removed using - . + command and removed using + . @@ -366,16 +373,23 @@ - TRUNCATE commands are not replicated. This can, of - course, be worked around by using DELETE instead. To - avoid accidental TRUNCATE invocations, you can revoke - the TRUNCATE privilege from tables. + Replication of TRUNCATE commands is supported, but + some care must be taken when truncating groups of tables connected by + foreign keys. When replicating a truncate action, the subscriber will + truncate the same group of tables that was truncated on the publisher, + either explicitly specified or implicitly collected via + CASCADE, minus tables that are not part of the + subscription. This will work correctly if all affected tables are part + of the same subscription. But if some tables to be truncated on the + subscriber have foreign-key links to tables that are not part of the same + (or any) subscription, then the application of the truncate action on the + subscriber will fail. - Large objects (see ) are not replicated. + Large objects (see ) are not replicated. There is no workaround for that, other than storing data in normal tables. @@ -409,13 +423,13 @@ Logical replication is built with an architecture similar to physical - streaming replication (see ). It is + streaming replication (see ). It is implemented by walsender and apply processes. The walsender process starts logical decoding (described - in ) of the WAL and loads the standard + in ) of the WAL and loads the standard logical decoding plugin (pgoutput). The plugin transforms the changes read from WAL to the logical replication protocol - (see ) and filters the data + (see ) and filters the data according to the publication specification. The data is then continuously transferred using the streaming replication protocol to the apply worker, which maps the data to local tables and applies the individual changes as @@ -461,7 +475,7 @@ physical streaming replication, the monitoring on a publication node is similar to monitoring of a physical replication master - (see ). + (see ). @@ -486,7 +500,14 @@ The role used for the replication connection must have the REPLICATION attribute (or be a superuser). Access for the role must be - configured in pg_hba.conf. + configured in pg_hba.conf and it must have the + LOGIN attribute. + + + + In order to be able to copy the initial table data, the role used for the + replication connection must have the SELECT privilege on + a published table (or be a superuser). diff --git a/doc/src/sgml/logicaldecoding.sgml b/doc/src/sgml/logicaldecoding.sgml index 8dcfc6c742..8db968641e 100644 --- a/doc/src/sgml/logicaldecoding.sgml +++ b/doc/src/sgml/logicaldecoding.sgml @@ -24,17 +24,17 @@ by INSERT and the new row version created by UPDATE. Availability of old row versions for UPDATE and DELETE depends on - the configured replica identity (see ). + the configured replica identity (see ). Changes can be consumed either using the streaming replication protocol - (see and - ), or by calling functions - via SQL (see ). It is also possible + (see and + ), or by calling functions + via SQL (see ). It is also possible to write additional methods of consuming the output of a replication slot without modifying core code - (see ). + (see ). @@ -47,8 +47,8 @@ Before you can use logical decoding, you must set - to logical and - to at least 1. Then, you + to logical and + to at least 1. Then, you should connect to the target database (in the example below, postgres) as a superuser. @@ -146,24 +146,24 @@ postgres=# SELECT pg_drop_replication_slot('regression_slot'); The following example shows how logical decoding is controlled over the streaming replication protocol, using the - program included in the PostgreSQL + program included in the PostgreSQL distribution. This requires that client authentication is set up to allow replication connections - (see ) and + (see ) and that max_wal_senders is set sufficiently high to allow an additional connection. -$ pg_recvlogical -d postgres --slot test --create-slot -$ pg_recvlogical -d postgres --slot test --start -f - -ControlZ +$ pg_recvlogical -d postgres --slot=test --create-slot +$ pg_recvlogical -d postgres --slot=test --start -f - +ControlZ $ psql -d postgres -c "INSERT INTO data(data) VALUES('4');" $ fg BEGIN 693 table public.data: INSERT: id[integer]:4 data[text]:'4' COMMIT 693 -ControlC -$ pg_recvlogical -d postgres --slot test --drop-slot +ControlC +$ pg_recvlogical -d postgres --slot=test --drop-slot @@ -208,7 +208,7 @@ $ pg_recvlogical -d postgres --slot test --drop-slot PostgreSQL also has streaming replication slots - (see ), but they are used somewhat + (see ), but they are used somewhat differently there. @@ -248,16 +248,18 @@ $ pg_recvlogical -d postgres --slot test --drop-slot may consume changes from a slot at any given time. - + Replication slots persist across crashes and know nothing about the state of their consumer(s). They will prevent removal of required resources even when there is no connection using them. This consumes storage because neither required WAL nor required rows from the system catalogs can be removed by VACUUM as long as they are required by a replication - slot. So if a slot is no longer required it should be dropped. + slot. In extreme cases this could cause the database to shut down to prevent + transaction ID wraparound (see ). + So if a slot is no longer required it should be dropped. - + @@ -272,9 +274,9 @@ $ pg_recvlogical -d postgres --slot test --drop-slot Exported Snapshots When a new replication slot is created using the streaming replication - interface (see ), a + interface (see ), a snapshot is exported - (see ), which will show + (see ), which will show exactly the state of the database after which all changes will be included in the change stream. This can be used to create a new replica by using SET TRANSACTION @@ -286,7 +288,7 @@ $ pg_recvlogical -d postgres --slot test --drop-slot Creation of a snapshot is not always possible. In particular, it will fail when connected to a hot standby. Applications that do not require - snapshot export may suppress it with the NOEXPORT_SNAPSHOT + snapshot export may suppress it with the NOEXPORT_SNAPSHOT option. @@ -303,7 +305,7 @@ $ pg_recvlogical -d postgres --slot test --drop-slot - DROP_REPLICATION_SLOT slot_name + DROP_REPLICATION_SLOT slot_name WAIT @@ -313,11 +315,11 @@ $ pg_recvlogical -d postgres --slot test --drop-slot are used to create, drop, and stream changes from a replication slot, respectively. These commands are only available over a replication connection; they cannot be used via SQL. - See for details on these commands. + See for details on these commands. - The command can be used to control + The command can be used to control logical decoding over a streaming replication connection. (It uses these commands internally.) @@ -327,12 +329,12 @@ $ pg_recvlogical -d postgres --slot test --drop-slot Logical Decoding <acronym>SQL</acronym> Interface - See for detailed documentation on + See for detailed documentation on the SQL-level API for interacting with logical decoding. - Synchronous replication (see ) is + Synchronous replication (see ) is only supported on replication slots used over the streaming replication interface. The function interface and additional, non-core interfaces do not support synchronous replication. @@ -345,7 +347,7 @@ $ pg_recvlogical -d postgres --slot test --drop-slot The pg_replication_slots view and the - pg_stat_replication + pg_stat_replication view provide information about the current state of replication slots and streaming replication connections respectively. These views apply to both physical and logical replication. @@ -363,7 +365,7 @@ $ pg_recvlogical -d postgres --slot test --drop-slot Initialization Function - + _PG_output_plugin_init @@ -381,6 +383,7 @@ typedef struct OutputPluginCallbacks LogicalDecodeStartupCB startup_cb; LogicalDecodeBeginCB begin_cb; LogicalDecodeChangeCB change_cb; + LogicalDecodeTruncateCB truncate_cb; LogicalDecodeCommitCB commit_cb; LogicalDecodeMessageCB message_cb; LogicalDecodeFilterByOriginCB filter_by_origin_cb; @@ -392,8 +395,10 @@ typedef void (*LogicalOutputPluginInit) (struct OutputPluginCallbacks *cb); The begin_cb, change_cb and commit_cb callbacks are required, while startup_cb, - filter_by_origin_cb + filter_by_origin_cb, truncate_cb, and shutdown_cb are optional. + If truncate_cb is not set but a + TRUNCATE is to be decoded, the action will be ignored. @@ -426,12 +431,12 @@ CREATE TABLE another_catalog_table(data text) WITH (user_catalog_table = true); data in a data type that can contain arbitrary data (e.g., bytea) is cumbersome. If the output plugin only outputs textual data in the server's encoding, it can declare that by - setting OutputPluginOptions.output_type - to OUTPUT_PLUGIN_TEXTUAL_OUTPUT instead - of OUTPUT_PLUGIN_BINARY_OUTPUT in + setting OutputPluginOptions.output_type + to OUTPUT_PLUGIN_TEXTUAL_OUTPUT instead + of OUTPUT_PLUGIN_BINARY_OUTPUT in the startup - callback. In that case, all the data has to be in the server's encoding - so that a text datum can contain it. This is checked in assertion-enabled + callback. In that case, all the data has to be in the server's encoding + so that a text datum can contain it. This is checked in assertion-enabled builds. @@ -484,12 +489,17 @@ typedef void (*LogicalDecodeStartupCB) (struct LogicalDecodingContext *ctx, typedef struct OutputPluginOptions { OutputPluginOutputType output_type; + bool receive_rewrites; } OutputPluginOptions; output_type has to either be set to OUTPUT_PLUGIN_TEXTUAL_OUTPUT or OUTPUT_PLUGIN_BINARY_OUTPUT. See also - . + . + If receive_rewrites is true, the output plugin will + also be called for changes made by heap rewrites during certain DDL + operations. These are of interest to plugins that handle DDL + replication, but they require special handling. @@ -576,13 +586,35 @@ typedef void (*LogicalDecodeChangeCB) (struct LogicalDecodingContext *ctx, Only changes in user defined tables that are not unlogged - (see ) and not temporary - (see ) can be extracted using + (see ) and not temporary + (see ) can be extracted using logical decoding. + + Truncate Callback + + + The truncate_cb callback is called for a + TRUNCATE command. + +typedef void (*LogicalDecodeTruncateCB) (struct LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, + int nrelations, + Relation relations[], + ReorderBufferChange *change); + + The parameters are analogous to the change_cb + callback. However, because TRUNCATE actions on + tables connected by foreign keys need to be executed together, this + callback receives an array of relations instead of just a single one. + See the description of the statement for + details. + + + Origin Filter Callback @@ -685,7 +717,7 @@ OutputPluginWrite(ctx, true); src/backend/replication/logical/logicalfuncs.c. Essentially, three functions need to be provided: one to read WAL, one to prepare writing output, and one to write the output - (see ). + (see ). @@ -698,9 +730,9 @@ OutputPluginWrite(ctx, true); replication solutions with the same user interface as synchronous replication for streaming replication. To do this, the streaming replication interface - (see ) must be used to stream out + (see ) must be used to stream out data. Clients have to send Standby status update (F) - (see ) messages, just like streaming + (see ) messages, just like streaming replication clients do. diff --git a/doc/src/sgml/ltree.sgml b/doc/src/sgml/ltree.sgml index fccfd320f5..3ddd335b8c 100644 --- a/doc/src/sgml/ltree.sgml +++ b/doc/src/sgml/ltree.sgml @@ -8,7 +8,7 @@ - This module implements a data type ltree for representing + This module implements a data type ltree for representing labels of data stored in a hierarchical tree-like structure. Extensive facilities for searching through label trees are provided. @@ -19,22 +19,20 @@ A label is a sequence of alphanumeric characters and underscores (for example, in C locale the characters - A-Za-z0-9_ are allowed). Labels must be less than 256 bytes + A-Za-z0-9_ are allowed). Labels must be less than 256 bytes long. - Examples: 42, Personal_Services + Examples: 42, Personal_Services A label path is a sequence of zero or more - labels separated by dots, for example L1.L2.L3, representing + labels separated by dots, for example L1.L2.L3, representing a path from the root of a hierarchical tree to a particular node. The length of a label path must be less than 65kB, but keeping it under 2kB is - preferable. In practice this is not a major limitation; for example, - the longest label path in the DMOZ catalog () is about 240 bytes. + preferable. @@ -42,7 +40,7 @@ - The ltree module provides several data types: + The ltree module provides several data types: @@ -55,13 +53,13 @@ lquery represents a regular-expression-like pattern - for matching ltree values. A simple word matches that - label within a path. A star symbol (*) matches zero + for matching ltree values. A simple word matches that + label within a path. A star symbol (*) matches zero or more labels. For example: -foo Match the exact label path foo -*.foo.* Match any label path containing the label foo -*.foo Match any label path whose last label is foo +foo Match the exact label path foo +*.foo.* Match any label path containing the label foo +*.foo Match any label path whose last label is foo @@ -69,34 +67,34 @@ foo Match the exact label path foo -*{n} Match exactly n labels -*{n,} Match at least n labels -*{n,m} Match at least n but not more than m labels -*{,m} Match at most m labels — same as *{0,m} +*{n} Match exactly n labels +*{n,} Match at least n labels +*{n,m} Match at least n but not more than m labels +*{,m} Match at most m labels — same as *{0,m} There are several modifiers that can be put at the end of a non-star - label in lquery to make it match more than just the exact match: + label in lquery to make it match more than just the exact match: -@ Match case-insensitively, for example a@ matches A -* Match any label with this prefix, for example foo* matches foobar +@ Match case-insensitively, for example a@ matches A +* Match any label with this prefix, for example foo* matches foobar % Match initial underscore-separated words - The behavior of % is a bit complicated. It tries to match + The behavior of % is a bit complicated. It tries to match words rather than the entire label. For example - foo_bar% matches foo_bar_baz but not - foo_barbaz. If combined with *, prefix + foo_bar% matches foo_bar_baz but not + foo_barbaz. If combined with *, prefix matching applies to each word separately, for example - foo_bar%* matches foo1_bar2_baz but - not foo1_br2_baz. + foo_bar%* matches foo1_bar2_baz but + not foo1_br2_baz. Also, you can write several possibly-modified labels separated with - | (OR) to match any of those labels, and you can put - ! (NOT) at the start to match any label that doesn't + | (OR) to match any of those labels, and you can put + ! (NOT) at the start to match any label that doesn't match any of the alternatives. @@ -141,14 +139,14 @@ a. b. c. d. e. ltxtquery represents a full-text-search-like - pattern for matching ltree values. An + pattern for matching ltree values. An ltxtquery value contains words, possibly with the - modifiers @, *, % at the end; - the modifiers have the same meanings as in lquery. - Words can be combined with & (AND), - | (OR), ! (NOT), and parentheses. + modifiers @, *, % at the end; + the modifiers have the same meanings as in lquery. + Words can be combined with & (AND), + | (OR), ! (NOT), and parentheses. The key difference from - lquery is that ltxtquery matches words without + lquery is that ltxtquery matches words without regard to their position in the label path. @@ -161,7 +159,7 @@ Europe & Russia*@ & !Transportation any label beginning with Russia (case-insensitive), but not paths containing the label Transportation. The location of these words within the path is not important. - Also, when % is used, the word can be matched to any + Also, when % is used, the word can be matched to any underscore-separated word within a label, regardless of position. @@ -169,8 +167,8 @@ Europe & Russia*@ & !Transportation - Note: ltxtquery allows whitespace between symbols, but - ltree and lquery do not. + Note: ltxtquery allows whitespace between symbols, but + ltree and lquery do not. @@ -178,16 +176,16 @@ Europe & Russia*@ & !Transportation Operators and Functions - Type ltree has the usual comparison operators - =, <>, - <, >, <=, >=. + Type ltree has the usual comparison operators + =, <>, + <, >, <=, >=. Comparison sorts in the order of a tree traversal, with the children of a node sorted by label text. In addition, the specialized - operators shown in are available. + operators shown in are available.
- <type>ltree</> Operators + <type>ltree</type> Operators @@ -200,153 +198,153 @@ Europe & Russia*@ & !Transportation - ltree @> ltree + ltree @> ltree boolean is left argument an ancestor of right (or equal)? - ltree <@ ltree + ltree <@ ltree boolean is left argument a descendant of right (or equal)? - ltree ~ lquery + ltree ~ lquery boolean - does ltree match lquery? + does ltree match lquery? - lquery ~ ltree + lquery ~ ltree boolean - does ltree match lquery? + does ltree match lquery? - ltree ? lquery[] + ltree ? lquery[] boolean - does ltree match any lquery in array? + does ltree match any lquery in array? - lquery[] ? ltree + lquery[] ? ltree boolean - does ltree match any lquery in array? + does ltree match any lquery in array? - ltree @ ltxtquery + ltree @ ltxtquery boolean - does ltree match ltxtquery? + does ltree match ltxtquery? - ltxtquery @ ltree + ltxtquery @ ltree boolean - does ltree match ltxtquery? + does ltree match ltxtquery? - ltree || ltree + ltree || ltree ltree - concatenate ltree paths + concatenate ltree paths - ltree || text + ltree || text ltree - convert text to ltree and concatenate + convert text to ltree and concatenate - text || ltree + text || ltree ltree - convert text to ltree and concatenate + convert text to ltree and concatenate - ltree[] @> ltree + ltree[] @> ltree boolean - does array contain an ancestor of ltree? + does array contain an ancestor of ltree? - ltree <@ ltree[] + ltree <@ ltree[] boolean - does array contain an ancestor of ltree? + does array contain an ancestor of ltree? - ltree[] <@ ltree + ltree[] <@ ltree boolean - does array contain a descendant of ltree? + does array contain a descendant of ltree? - ltree @> ltree[] + ltree @> ltree[] boolean - does array contain a descendant of ltree? + does array contain a descendant of ltree? - ltree[] ~ lquery + ltree[] ~ lquery boolean - does array contain any path matching lquery? + does array contain any path matching lquery? - lquery ~ ltree[] + lquery ~ ltree[] boolean - does array contain any path matching lquery? + does array contain any path matching lquery? - ltree[] ? lquery[] + ltree[] ? lquery[] boolean - does ltree array contain any path matching any lquery? + does ltree array contain any path matching any lquery? - lquery[] ? ltree[] + lquery[] ? ltree[] boolean - does ltree array contain any path matching any lquery? + does ltree array contain any path matching any lquery? - ltree[] @ ltxtquery + ltree[] @ ltxtquery boolean - does array contain any path matching ltxtquery? + does array contain any path matching ltxtquery? - ltxtquery @ ltree[] + ltxtquery @ ltree[] boolean - does array contain any path matching ltxtquery? + does array contain any path matching ltxtquery? - ltree[] ?@> ltree + ltree[] ?@> ltree ltree - first array entry that is an ancestor of ltree; NULL if none + first array entry that is an ancestor of ltree; NULL if none - ltree[] ?<@ ltree + ltree[] ?<@ ltree ltree - first array entry that is a descendant of ltree; NULL if none + first array entry that is a descendant of ltree; NULL if none - ltree[] ?~ lquery + ltree[] ?~ lquery ltree - first array entry that matches lquery; NULL if none + first array entry that matches lquery; NULL if none - ltree[] ?@ ltxtquery + ltree[] ?@ ltxtquery ltree - first array entry that matches ltxtquery; NULL if none + first array entry that matches ltxtquery; NULL if none @@ -356,17 +354,17 @@ Europe & Russia*@ & !Transportation The operators <@, @>, @ and ~ have analogues - ^<@, ^@>, ^@, + ^<@, ^@>, ^@, ^~, which are the same except they do not use indexes. These are useful only for testing purposes. - The available functions are shown in . + The available functions are shown in .
- <type>ltree</> Functions + <type>ltree</type> Functions @@ -383,8 +381,8 @@ Europe & Russia*@ & !Transportation subltree(ltree, int start, int end)subltree ltree - subpath of ltree from position start to - position end-1 (counting from 0) + subpath of ltree from position start to + position end-1 (counting from 0) subltree('Top.Child1.Child2',1,2) Child1 @@ -392,10 +390,10 @@ Europe & Russia*@ & !Transportation subpath(ltree, int offset, int len)subpath ltree - subpath of ltree starting at position - offset, length len. - If offset is negative, subpath starts that far from the - end of the path. If len is negative, leaves that many + subpath of ltree starting at position + offset, length len. + If offset is negative, subpath starts that far from the + end of the path. If len is negative, leaves that many labels off the end of the path. subpath('Top.Child1.Child2',0,2) Top.Child1 @@ -404,9 +402,9 @@ Europe & Russia*@ & !Transportation subpath(ltree, int offset) ltree - subpath of ltree starting at position - offset, extending to end of path. - If offset is negative, subpath starts that far from the + subpath of ltree starting at position + offset, extending to end of path. + If offset is negative, subpath starts that far from the end of the path. subpath('Top.Child1.Child2',1) Child1.Child2 @@ -423,8 +421,8 @@ Europe & Russia*@ & !Transportation index(ltree a, ltree b)index integer - position of first occurrence of b in - a; -1 if not found + position of first occurrence of b in + a; -1 if not found index('0.1.2.3.5.4.5.6.8.5.6.8','5.6') 6 @@ -432,9 +430,9 @@ Europe & Russia*@ & !Transportation index(ltree a, ltree b, int offset) integer - position of first occurrence of b in - a, searching starting at offset; - negative offset means start -offset + position of first occurrence of b in + a, searching starting at offset; + negative offset means start -offset labels from the end of the path index('0.1.2.3.5.4.5.6.8.5.6.8','5.6',-4) 9 @@ -443,7 +441,7 @@ Europe & Russia*@ & !Transportation text2ltree(text)text2ltree ltree - cast text to ltree + cast text to ltree @@ -451,7 +449,7 @@ Europe & Russia*@ & !Transportation ltree2text(ltree)ltree2text text - cast ltree to text + cast ltree to text @@ -459,17 +457,17 @@ Europe & Russia*@ & !Transportation lca(ltree, ltree, ...)lca ltree - lowest common ancestor, i.e., longest common prefix of paths + longest common ancestor of paths (up to 8 arguments supported) - lca('1.2.2.3','1.2.3.4.5.6') + lca('1.2.3','1.2.3.4.5.6') 1.2 lca(ltree[]) ltree - lowest common ancestor, i.e., longest common prefix of paths - lca(array['1.2.2.3'::ltree,'1.2.3']) + longest common ancestor of paths in array + lca(array['1.2.3'::ltree,'1.2.3.4']) 1.2 @@ -481,25 +479,25 @@ Europe & Russia*@ & !Transportation Indexes - ltree supports several types of indexes that can speed + ltree supports several types of indexes that can speed up the indicated operators: - B-tree index over ltree: - <, <=, =, - >=, > + B-tree index over ltree: + <, <=, =, + >=, > - GiST index over ltree: - <, <=, =, - >=, >, - @>, <@, - @, ~, ? + GiST index over ltree: + <, <=, =, + >=, >, + @>, <@, + @, ~, ? Example of creating such an index: @@ -510,9 +508,9 @@ CREATE INDEX path_gist_idx ON test USING GIST (path); - GiST index over ltree[]: - ltree[] <@ ltree, ltree @> ltree[], - @, ~, ? + GiST index over ltree[]: + ltree[] <@ ltree, ltree @> ltree[], + @, ~, ? Example of creating such an index: @@ -532,7 +530,7 @@ CREATE INDEX path_gist_idx ON test USING GIST (array_path); This example uses the following data (also available in file - contrib/ltree/ltreetest.sql in the source distribution): + contrib/ltree/ltreetest.sql in the source distribution): @@ -555,7 +553,7 @@ CREATE INDEX path_idx ON test USING BTREE (path); - Now, we have a table test populated with data describing + Now, we have a table test populated with data describing the hierarchy shown below: @@ -672,7 +670,7 @@ ltreetest=> SELECT ins_label(path,2,'Space') FROM test WHERE path <@ 'Top. the ltree type for PL/Python. The extensions are called ltree_plpythonu, ltree_plpython2u, and ltree_plpython3u - (see for the PL/Python naming + (see for the PL/Python naming convention). If you install these transforms and specify them when creating a function, ltree values are mapped to Python lists. (The reverse is currently not supported, however.) diff --git a/doc/src/sgml/maintenance.sgml b/doc/src/sgml/maintenance.sgml index fe1e0ed2b3..02c512f8bc 100644 --- a/doc/src/sgml/maintenance.sgml +++ b/doc/src/sgml/maintenance.sgml @@ -12,12 +12,12 @@ - PostgreSQL, like any database software, requires that certain tasks + PostgreSQL, like any database software, requires that certain tasks be performed regularly to achieve optimum performance. The tasks discussed here are required, but they are repetitive in nature and can easily be automated using standard tools such as cron scripts or - Windows' Task Scheduler. It is the database + Windows' Task Scheduler. It is the database administrator's responsibility to set up appropriate scripts, and to check that they execute successfully. @@ -28,27 +28,27 @@ after a catastrophe (disk failure, fire, mistakenly dropping a critical table, etc.). The backup and recovery mechanisms available in PostgreSQL are discussed at length in - . + . - The other main category of maintenance task is periodic vacuuming + The other main category of maintenance task is periodic vacuuming of the database. This activity is discussed in - . Closely related to this is updating + . Closely related to this is updating the statistics that will be used by the query planner, as discussed in - . + . Another task that might need periodic attention is log file management. - This is discussed in . + This is discussed in . check_postgres + url="https://bucardo.org/check_postgres/">check_postgres is available for monitoring database health and reporting unusual - conditions. check_postgres integrates with + conditions. check_postgres integrates with Nagios and MRTG, but can be run standalone too. @@ -68,15 +68,15 @@ PostgreSQL databases require periodic - maintenance known as vacuuming. For many installations, it + maintenance known as vacuuming. For many installations, it is sufficient to let vacuuming be performed by the autovacuum - daemon, which is described in . You might + daemon, which is described in . You might need to adjust the autovacuuming parameters described there to obtain best results for your situation. Some database administrators will want to supplement or replace the daemon's activities with manually-managed - VACUUM commands, which typically are executed according to a + VACUUM commands, which typically are executed according to a schedule by cron or Task - Scheduler scripts. To set up manually-managed vacuuming properly, + Scheduler scripts. To set up manually-managed vacuuming properly, it is essential to understand the issues discussed in the next few subsections. Administrators who rely on autovacuuming may still wish to skim this material to help them understand and adjust autovacuuming. @@ -87,7 +87,7 @@ PostgreSQL's - command has to + command has to process each table on a regular basis for several reasons: @@ -109,30 +109,30 @@ To protect against loss of very old data due to - transaction ID wraparound or - multixact ID wraparound. + transaction ID wraparound or + multixact ID wraparound. - Each of these reasons dictates performing VACUUM operations + Each of these reasons dictates performing VACUUM operations of varying frequency and scope, as explained in the following subsections. - There are two variants of VACUUM: standard VACUUM - and VACUUM FULL. VACUUM FULL can reclaim more + There are two variants of VACUUM: standard VACUUM + and VACUUM FULL. VACUUM FULL can reclaim more disk space but runs much more slowly. Also, - the standard form of VACUUM can run in parallel with production + the standard form of VACUUM can run in parallel with production database operations. (Commands such as SELECT, INSERT, UPDATE, and DELETE will continue to function normally, though you will not be able to modify the definition of a table with commands such as ALTER TABLE while it is being vacuumed.) - VACUUM FULL requires exclusive lock on the table it is + VACUUM FULL requires exclusive lock on the table it is working on, and therefore cannot be done in parallel with other use of the table. Generally, therefore, - administrators should strive to use standard VACUUM and - avoid VACUUM FULL. + administrators should strive to use standard VACUUM and + avoid VACUUM FULL. @@ -140,7 +140,7 @@ traffic, which can cause poor performance for other active sessions. There are configuration parameters that can be adjusted to reduce the performance impact of background vacuuming — see - . + . @@ -153,15 +153,15 @@ In PostgreSQL, an - UPDATE or DELETE of a row does not + UPDATE or DELETE of a row does not immediately remove the old version of the row. This approach is necessary to gain the benefits of multiversion - concurrency control (MVCC, see ): the row version + concurrency control (MVCC, see ): the row version must not be deleted while it is still potentially visible to other transactions. But eventually, an outdated or deleted row version is no longer of interest to any transaction. The space it occupies must then be reclaimed for reuse by new rows, to avoid unbounded growth of disk - space requirements. This is done by running VACUUM. + space requirements. This is done by running VACUUM. @@ -170,7 +170,7 @@ future reuse. However, it will not return the space to the operating system, except in the special case where one or more pages at the end of a table become entirely free and an exclusive table lock can be - easily obtained. In contrast, VACUUM FULL actively compacts + easily obtained. In contrast, VACUUM FULL actively compacts tables by writing a complete new version of the table file with no dead space. This minimizes the size of the table, but can take a long time. It also requires extra disk space for the new copy of the table, until @@ -178,18 +178,18 @@ - The usual goal of routine vacuuming is to do standard VACUUMs - often enough to avoid needing VACUUM FULL. The + The usual goal of routine vacuuming is to do standard VACUUMs + often enough to avoid needing VACUUM FULL. The autovacuum daemon attempts to work this way, and in fact will - never issue VACUUM FULL. In this approach, the idea + never issue VACUUM FULL. In this approach, the idea is not to keep tables at their minimum size, but to maintain steady-state usage of disk space: each table occupies space equivalent to its minimum size plus however much space gets used up between vacuumings. - Although VACUUM FULL can be used to shrink a table back + Although VACUUM FULL can be used to shrink a table back to its minimum size and return the disk space to the operating system, there is not much point in this if the table will just grow again in the - future. Thus, moderately-frequent standard VACUUM runs are a - better approach than infrequent VACUUM FULL runs for + future. Thus, moderately-frequent standard VACUUM runs are a + better approach than infrequent VACUUM FULL runs for maintaining heavily-updated tables. @@ -198,38 +198,38 @@ doing all the work at night when load is low. The difficulty with doing vacuuming according to a fixed schedule is that if a table has an unexpected spike in update activity, it may - get bloated to the point that VACUUM FULL is really necessary + get bloated to the point that VACUUM FULL is really necessary to reclaim space. Using the autovacuum daemon alleviates this problem, since the daemon schedules vacuuming dynamically in response to update activity. It is unwise to disable the daemon completely unless you have an extremely predictable workload. One possible compromise is to set the daemon's parameters so that it will only react to unusually heavy update activity, thus keeping things from getting out of hand, - while scheduled VACUUMs are expected to do the bulk of the + while scheduled VACUUMs are expected to do the bulk of the work when the load is typical. For those not using autovacuum, a typical approach is to schedule a - database-wide VACUUM once a day during a low-usage period, + database-wide VACUUM once a day during a low-usage period, supplemented by more frequent vacuuming of heavily-updated tables as necessary. (Some installations with extremely high update rates vacuum their busiest tables as often as once every few minutes.) If you have multiple databases in a cluster, don't forget to VACUUM each one; the program might be helpful. + linkend="app-vacuumdb"/> might be helpful. - Plain VACUUM may not be satisfactory when + Plain VACUUM may not be satisfactory when a table contains large numbers of dead row versions as a result of massive update or delete activity. If you have such a table and you need to reclaim the excess disk space it occupies, you will need - to use VACUUM FULL, or alternatively - + to use VACUUM FULL, or alternatively + or one of the table-rewriting variants of - . + . These commands rewrite an entire new copy of the table and build new indexes for it. All these options require exclusive lock. Note that they also temporarily use extra disk space approximately equal to the size @@ -242,7 +242,7 @@ If you have a table whose entire contents are deleted on a periodic basis, consider doing it with - rather + rather than using DELETE followed by VACUUM. TRUNCATE removes the entire content of the table immediately, without requiring a @@ -269,21 +269,21 @@ The PostgreSQL query planner relies on statistical information about the contents of tables in order to generate good plans for queries. These statistics are gathered by - the command, + the command, which can be invoked by itself or - as an optional step in VACUUM. It is important to have + as an optional step in VACUUM. It is important to have reasonably accurate statistics, otherwise poor choices of plans might degrade database performance. The autovacuum daemon, if enabled, will automatically issue - ANALYZE commands whenever the content of a table has + ANALYZE commands whenever the content of a table has changed sufficiently. However, administrators might prefer to rely - on manually-scheduled ANALYZE operations, particularly + on manually-scheduled ANALYZE operations, particularly if it is known that update activity on a table will not affect the - statistics of interesting columns. The daemon schedules - ANALYZE strictly as a function of the number of rows + statistics of interesting columns. The daemon schedules + ANALYZE strictly as a function of the number of rows inserted or updated; it has no knowledge of whether that will lead to meaningful statistical changes. @@ -305,25 +305,25 @@ - It is possible to run ANALYZE on specific tables and even + It is possible to run ANALYZE on specific tables and even just specific columns of a table, so the flexibility exists to update some statistics more frequently than others if your application requires it. In practice, however, it is usually best to just analyze the entire - database, because it is a fast operation. ANALYZE uses a + database, because it is a fast operation. ANALYZE uses a statistically random sampling of the rows of a table rather than reading every single row. - Although per-column tweaking of ANALYZE frequency might not be + Although per-column tweaking of ANALYZE frequency might not be very productive, you might find it worthwhile to do per-column adjustment of the level of detail of the statistics collected by - ANALYZE. Columns that are heavily used in WHERE + ANALYZE. Columns that are heavily used in WHERE clauses and have highly irregular data distributions might require a finer-grain data histogram than other columns. See ALTER TABLE - SET STATISTICS, or change the database-wide default using the configuration parameter. + SET STATISTICS, or change the database-wide default using the configuration parameter. @@ -337,11 +337,11 @@ - The autovacuum daemon does not issue ANALYZE commands for + The autovacuum daemon does not issue ANALYZE commands for foreign tables, since it has no means of determining how often that might be useful. If your queries require statistics on foreign tables for proper planning, it's a good idea to run manually-managed - ANALYZE commands on those tables on a suitable schedule. + ANALYZE commands on those tables on a suitable schedule. @@ -350,7 +350,7 @@ Updating The Visibility Map - Vacuum maintains a visibility map for each + Vacuum maintains a visibility map for each table to keep track of which pages contain only tuples that are known to be visible to all active transactions (and all future transactions, until the page is again modified). This has two purposes. First, vacuum @@ -366,7 +366,7 @@ matching index entry, to check whether it should be seen by the current transaction. An index-only - scan, on the other hand, checks the visibility map first. + scan, on the other hand, checks the visibility map first. If it's known that all tuples on the page are visible, the heap fetch can be skipped. This is most useful on large data sets where the visibility map can prevent disk accesses. @@ -391,13 +391,13 @@ PostgreSQL's MVCC transaction semantics - depend on being able to compare transaction ID (XID) + depend on being able to compare transaction ID (XID) numbers: a row version with an insertion XID greater than the current - transaction's XID is in the future and should not be visible + transaction's XID is in the future and should not be visible to the current transaction. But since transaction IDs have limited size (32 bits) a cluster that runs for a long time (more than 4 billion transactions) would suffer transaction ID - wraparound: the XID counter wraps around to zero, and all of a sudden + wraparound: the XID counter wraps around to zero, and all of a sudden transactions that were in the past appear to be in the future — which means their output become invisible. In short, catastrophic data loss. (Actually the data is still there, but that's cold comfort if you cannot @@ -407,53 +407,53 @@ The reason that periodic vacuuming solves the problem is that - VACUUM will mark rows as frozen, indicating that + VACUUM will mark rows as frozen, indicating that they were inserted by a transaction that committed sufficiently far in the past that the effects of the inserting transaction are certain to be visible to all current and future transactions. Normal XIDs are - compared using modulo-232 arithmetic. This means + compared using modulo-232 arithmetic. This means that for every normal XID, there are two billion XIDs that are - older and two billion that are newer; another + older and two billion that are newer; another way to say it is that the normal XID space is circular with no endpoint. Therefore, once a row version has been created with a particular - normal XID, the row version will appear to be in the past for + normal XID, the row version will appear to be in the past for the next two billion transactions, no matter which normal XID we are talking about. If the row version still exists after more than two billion transactions, it will suddenly appear to be in the future. To - prevent this, PostgreSQL reserves a special XID, - FrozenTransactionId, which does not follow the normal XID + prevent this, PostgreSQL reserves a special XID, + FrozenTransactionId, which does not follow the normal XID comparison rules and is always considered older than every normal XID. Frozen row versions are treated as if the inserting XID were - FrozenTransactionId, so that they will appear to be - in the past to all normal transactions regardless of wraparound + FrozenTransactionId, so that they will appear to be + in the past to all normal transactions regardless of wraparound issues, and so such row versions will be valid until deleted, no matter how long that is. - In PostgreSQL versions before 9.4, freezing was + In PostgreSQL versions before 9.4, freezing was implemented by actually replacing a row's insertion XID - with FrozenTransactionId, which was visible in the - row's xmin system column. Newer versions just set a flag - bit, preserving the row's original xmin for possible - forensic use. However, rows with xmin equal - to FrozenTransactionId (2) may still be found - in databases pg_upgrade'd from pre-9.4 versions. + with FrozenTransactionId, which was visible in the + row's xmin system column. Newer versions just set a flag + bit, preserving the row's original xmin for possible + forensic use. However, rows with xmin equal + to FrozenTransactionId (2) may still be found + in databases pg_upgrade'd from pre-9.4 versions. - Also, system catalogs may contain rows with xmin equal - to BootstrapTransactionId (1), indicating that they were - inserted during the first phase of initdb. - Like FrozenTransactionId, this special XID is treated as + Also, system catalogs may contain rows with xmin equal + to BootstrapTransactionId (1), indicating that they were + inserted during the first phase of initdb. + Like FrozenTransactionId, this special XID is treated as older than every normal XID. - + controls how old an XID value has to be before rows bearing that XID will be frozen. Increasing this setting may avoid unnecessary work if the rows that would otherwise be frozen will soon be modified again, @@ -463,61 +463,61 @@ - VACUUM uses the visibility map + VACUUM uses the visibility map to determine which pages of a table must be scanned. Normally, it will skip pages that don't have any dead row versions even if those pages might still have row versions with old XID values. Therefore, normal - VACUUMs won't always freeze every old row version in the table. - Periodically, VACUUM will perform an aggressive - vacuum, skipping only those pages which contain neither dead rows nor + VACUUMs won't always freeze every old row version in the table. + Periodically, VACUUM will perform an aggressive + vacuum, skipping only those pages which contain neither dead rows nor any unfrozen XID or MXID values. - - controls when VACUUM does that: all-visible but not all-frozen + + controls when VACUUM does that: all-visible but not all-frozen pages are scanned if the number of transactions that have passed since the - last such scan is greater than vacuum_freeze_table_age minus - vacuum_freeze_min_age. Setting - vacuum_freeze_table_age to 0 forces VACUUM to + last such scan is greater than vacuum_freeze_table_age minus + vacuum_freeze_min_age. Setting + vacuum_freeze_table_age to 0 forces VACUUM to use this more aggressive strategy for all scans. The maximum time that a table can go unvacuumed is two billion - transactions minus the vacuum_freeze_min_age value at + transactions minus the vacuum_freeze_min_age value at the time of the last aggressive vacuum. If it were to go unvacuumed for longer than that, data loss could result. To ensure that this does not happen, autovacuum is invoked on any table that might contain unfrozen rows with XIDs older than the age specified by the configuration parameter . (This will happen even if + linkend="guc-autovacuum-freeze-max-age"/>. (This will happen even if autovacuum is disabled.) This implies that if a table is not otherwise vacuumed, autovacuum will be invoked on it approximately once every - autovacuum_freeze_max_age minus - vacuum_freeze_min_age transactions. + autovacuum_freeze_max_age minus + vacuum_freeze_min_age transactions. For tables that are regularly vacuumed for space reclamation purposes, this is of little importance. However, for static tables (including tables that receive inserts, but no updates or deletes), there is no need to vacuum for space reclamation, so it can be useful to try to maximize the interval between forced autovacuums on very large static tables. Obviously one can do this either by - increasing autovacuum_freeze_max_age or decreasing - vacuum_freeze_min_age. + increasing autovacuum_freeze_max_age or decreasing + vacuum_freeze_min_age. - The effective maximum for vacuum_freeze_table_age is 0.95 * - autovacuum_freeze_max_age; a setting higher than that will be + The effective maximum for vacuum_freeze_table_age is 0.95 * + autovacuum_freeze_max_age; a setting higher than that will be capped to the maximum. A value higher than - autovacuum_freeze_max_age wouldn't make sense because an + autovacuum_freeze_max_age wouldn't make sense because an anti-wraparound autovacuum would be triggered at that point anyway, and the 0.95 multiplier leaves some breathing room to run a manual - VACUUM before that happens. As a rule of thumb, - vacuum_freeze_table_age should be set to a value somewhat - below autovacuum_freeze_max_age, leaving enough gap so that - a regularly scheduled VACUUM or an autovacuum triggered by + VACUUM before that happens. As a rule of thumb, + vacuum_freeze_table_age should be set to a value somewhat + below autovacuum_freeze_max_age, leaving enough gap so that + a regularly scheduled VACUUM or an autovacuum triggered by normal delete and update activity is run in that window. Setting it too close could lead to anti-wraparound autovacuums, even though the table was recently vacuumed to reclaim space, whereas lower values lead to more @@ -525,25 +525,29 @@ - The sole disadvantage of increasing autovacuum_freeze_max_age - (and vacuum_freeze_table_age along with it) - is that the pg_xact subdirectory of the database cluster - will take more space, because it must store the commit status of all - transactions back to the autovacuum_freeze_max_age horizon. - The commit status uses two bits per transaction, so if - autovacuum_freeze_max_age is set to its maximum allowed - value of two billion, pg_xact can be expected to - grow to about half a gigabyte. If this is trivial compared to your - total database size, setting autovacuum_freeze_max_age to - its maximum allowed value is recommended. Otherwise, set it depending - on what you are willing to allow for pg_xact storage. - (The default, 200 million transactions, translates to about 50MB of - pg_xact storage.) + The sole disadvantage of increasing autovacuum_freeze_max_age + (and vacuum_freeze_table_age along with it) is that + the pg_xact and pg_commit_ts + subdirectories of the database cluster will take more space, because it + must store the commit status and (if track_commit_timestamp is + enabled) timestamp of all transactions back to + the autovacuum_freeze_max_age horizon. The commit status uses + two bits per transaction, so if + autovacuum_freeze_max_age is set to its maximum allowed value + of two billion, pg_xact can be expected to grow to about half + a gigabyte and pg_commit_ts to about 20GB. If this + is trivial compared to your total database size, + setting autovacuum_freeze_max_age to its maximum allowed value + is recommended. Otherwise, set it depending on what you are willing to + allow for pg_xact and pg_commit_ts storage. + (The default, 200 million transactions, translates to about 50MB + of pg_xact storage and about 2GB of pg_commit_ts + storage.) - One disadvantage of decreasing vacuum_freeze_min_age is that - it might cause VACUUM to do useless work: freezing a row + One disadvantage of decreasing vacuum_freeze_min_age is that + it might cause VACUUM to do useless work: freezing a row version is a waste of time if the row is modified soon thereafter (causing it to acquire a new XID). So the setting should be large enough that rows are not frozen until they are unlikely to change @@ -552,18 +556,18 @@ To track the age of the oldest unfrozen XIDs in a database, - VACUUM stores XID - statistics in the system tables pg_class and - pg_database. In particular, - the relfrozenxid column of a table's - pg_class row contains the freeze cutoff XID that was used - by the last aggressive VACUUM for that table. All rows + VACUUM stores XID + statistics in the system tables pg_class and + pg_database. In particular, + the relfrozenxid column of a table's + pg_class row contains the freeze cutoff XID that was used + by the last aggressive VACUUM for that table. All rows inserted by transactions with XIDs older than this cutoff XID are guaranteed to have been frozen. Similarly, - the datfrozenxid column of a database's - pg_database row is a lower bound on the unfrozen XIDs + the datfrozenxid column of a database's + pg_database row is a lower bound on the unfrozen XIDs appearing in that database — it is just the minimum of the - per-table relfrozenxid values within the database. + per-table relfrozenxid values within the database. A convenient way to examine this information is to execute queries such as: @@ -577,27 +581,27 @@ WHERE c.relkind IN ('r', 'm'); SELECT datname, age(datfrozenxid) FROM pg_database; - The age column measures the number of transactions from the + The age column measures the number of transactions from the cutoff XID to the current transaction's XID. - VACUUM normally only scans pages that have been modified - since the last vacuum, but relfrozenxid can only be + VACUUM normally only scans pages that have been modified + since the last vacuum, but relfrozenxid can only be advanced when every page of the table that might contain unfrozen XIDs is scanned. This happens when - relfrozenxid is more than - vacuum_freeze_table_age transactions old, when - VACUUM's FREEZE option is used, or when all + relfrozenxid is more than + vacuum_freeze_table_age transactions old, when + VACUUM's FREEZE option is used, or when all pages that are not already all-frozen happen to - require vacuuming to remove dead row versions. When VACUUM + require vacuuming to remove dead row versions. When VACUUM scans every page in the table that is not already all-frozen, it should - set age(relfrozenxid) to a value just a little more than the - vacuum_freeze_min_age setting + set age(relfrozenxid) to a value just a little more than the + vacuum_freeze_min_age setting that was used (more by the number of transactions started since the - VACUUM started). If no relfrozenxid-advancing - VACUUM is issued on the table until - autovacuum_freeze_max_age is reached, an autovacuum will soon + VACUUM started). If no relfrozenxid-advancing + VACUUM is issued on the table until + autovacuum_freeze_max_age is reached, an autovacuum will soon be forced for the table. @@ -612,10 +616,10 @@ WARNING: database "mydb" must be vacuumed within 177009986 transactions HINT: To avoid a database shutdown, execute a database-wide VACUUM in "mydb". - (A manual VACUUM should fix the problem, as suggested by the - hint; but note that the VACUUM must be performed by a + (A manual VACUUM should fix the problem, as suggested by the + hint; but note that the VACUUM must be performed by a superuser, else it will fail to process system catalogs and thus not - be able to advance the database's datfrozenxid.) + be able to advance the database's datfrozenxid.) If these warnings are ignored, the system will shut down and refuse to start any new transactions once there are fewer than 1 million transactions left @@ -628,11 +632,11 @@ HINT: Stop the postmaster and vacuum that database in single-user mode. The 1-million-transaction safety margin exists to let the administrator recover without data loss, by manually executing the - required VACUUM commands. However, since the system will not + required VACUUM commands. However, since the system will not execute commands once it has gone into the safety shutdown mode, the only way to do this is to stop the server and start the server in single-user - mode to execute VACUUM. The shutdown mode is not enforced - in single-user mode. See the reference + mode to execute VACUUM. The shutdown mode is not enforced + in single-user mode. See the reference page for details about using single-user mode. @@ -649,15 +653,15 @@ HINT: Stop the postmaster and vacuum that database in single-user mode. - Multixact IDs are used to support row locking by + Multixact IDs are used to support row locking by multiple transactions. Since there is only limited space in a tuple header to store lock information, that information is encoded as - a multiple transaction ID, or multixact ID for short, + a multiple transaction ID, or multixact ID for short, whenever there is more than one transaction concurrently locking a row. Information about which transaction IDs are included in any particular multixact ID is stored separately in - the pg_multixact subdirectory, and only the multixact ID - appears in the xmax field in the tuple header. + the pg_multixact subdirectory, and only the multixact ID + appears in the xmax field in the tuple header. Like transaction IDs, multixact IDs are implemented as a 32-bit counter and corresponding storage, all of which requires careful aging management, storage cleanup, and wraparound handling. @@ -667,23 +671,23 @@ HINT: Stop the postmaster and vacuum that database in single-user mode. - Whenever VACUUM scans any part of a table, it will replace + Whenever VACUUM scans any part of a table, it will replace any multixact ID it encounters which is older than - + by a different value, which can be the zero value, a single transaction ID, or a newer multixact ID. For each table, - pg_class.relminmxid stores the oldest + pg_class.relminmxid stores the oldest possible multixact ID still appearing in any tuple of that table. If this value is older than - , an aggressive + , an aggressive vacuum is forced. As discussed in the previous section, an aggressive vacuum means that only those pages which are known to be all-frozen will - be skipped. mxid_age() can be used on - pg_class.relminmxid to find its age. + be skipped. mxid_age() can be used on + pg_class.relminmxid to find its age. - Aggressive VACUUM scans, regardless of + Aggressive VACUUM scans, regardless of what causes them, enable advancing the value for that table. Eventually, as all tables in all databases are scanned and their oldest multixact values are advanced, on-disk storage for older @@ -693,7 +697,7 @@ HINT: Stop the postmaster and vacuum that database in single-user mode. As a safety device, an aggressive vacuum scan will occur for any table whose multixact-age is greater than - . Aggressive + . Aggressive vacuum scans will also occur progressively for all tables, starting with those that have the oldest multixact-age, if the amount of used member storage space exceeds the amount 50% of the addressable storage space. @@ -719,28 +723,28 @@ HINT: Stop the postmaster and vacuum that database in single-user mode. tables that have had a large number of inserted, updated or deleted tuples. These checks use the statistics collection facility; therefore, autovacuum cannot be used unless is set to true. + linkend="guc-track-counts"/> is set to true. In the default configuration, autovacuuming is enabled and the related configuration parameters are appropriately set. - The autovacuum daemon actually consists of multiple processes. + The autovacuum daemon actually consists of multiple processes. There is a persistent daemon process, called the autovacuum launcher, which is in charge of starting autovacuum worker processes for all databases. The launcher will distribute the work across time, attempting to start one - worker within each database every - seconds. (Therefore, if the installation has N databases, + worker within each database every + seconds. (Therefore, if the installation has N databases, a new worker will be launched every - autovacuum_naptime/N seconds.) - A maximum of worker processes + autovacuum_naptime/N seconds.) + A maximum of worker processes are allowed to run at the same time. If there are more than - autovacuum_max_workers databases to be processed, + autovacuum_max_workers databases to be processed, the next database will be processed as soon as the first worker finishes. Each worker process will check each table within its database and - execute VACUUM and/or ANALYZE as needed. - can be set to monitor + execute VACUUM and/or ANALYZE as needed. + can be set to monitor autovacuum workers' activity. @@ -752,13 +756,13 @@ HINT: Stop the postmaster and vacuum that database in single-user mode. available. There is no limit on how many workers might be in a single database, but workers do try to avoid repeating work that has already been done by other workers. Note that the number of running - workers does not count towards or - limits. + workers does not count towards or + limits. - Tables whose relfrozenxid value is more than - transactions old are always + Tables whose relfrozenxid value is more than + transactions old are always vacuumed (this also applies to those tables whose freeze max age has been modified via storage parameters; see below). Otherwise, if the number of tuples obsoleted since the last @@ -768,19 +772,19 @@ HINT: Stop the postmaster and vacuum that database in single-user mode. vacuum threshold = vacuum base threshold + vacuum scale factor * number of tuples where the vacuum base threshold is - , + , the vacuum scale factor is - , + , and the number of tuples is pg_class.reltuples. The number of obsolete tuples is obtained from the statistics collector; it is a semi-accurate count updated by each UPDATE and DELETE operation. (It is only semi-accurate because some information might be lost under heavy - load.) If the relfrozenxid value of the table is more - than vacuum_freeze_table_age transactions old, an aggressive + load.) If the relfrozenxid value of the table is more + than vacuum_freeze_table_age transactions old, an aggressive vacuum is performed to freeze old tuples and advance - relfrozenxid; otherwise, only pages that have been modified + relfrozenxid; otherwise, only pages that have been modified since the last vacuum are scanned. @@ -804,21 +808,21 @@ analyze threshold = analyze base threshold + analyze scale factor * number of tu postgresql.conf, but it is possible to override them (and many other autovacuum control parameters) on a per-table basis; see for more information. + endterm="sql-createtable-storage-parameters-title"/> for more information. If a setting has been changed via a table's storage parameters, that value is used when processing that table; otherwise the global settings are - used. See for more details on + used. See for more details on the global settings. When multiple workers are running, the autovacuum cost delay parameters - (see ) are + (see ) are balanced among all the running workers, so that the total I/O impact on the system is the same regardless of the number of workers actually running. However, any workers processing tables whose - per-table autovacuum_vacuum_cost_delay or - autovacuum_vacuum_cost_limit storage parameters have been set + per-table autovacuum_vacuum_cost_delay or + autovacuum_vacuum_cost_limit storage parameters have been set are not considered in the balancing algorithm. @@ -834,7 +838,7 @@ analyze threshold = analyze base threshold + analyze scale factor * number of tu In some situations it is worthwhile to rebuild indexes periodically - with the command or a series of individual + with the command or a series of individual rebuilding steps. @@ -864,16 +868,16 @@ analyze threshold = analyze base threshold + analyze scale factor * number of tu - can be used safely and easily in all cases. + can be used safely and easily in all cases. But since the command requires an exclusive table lock, it is often preferable to execute an index rebuild with a sequence of creation and replacement steps. Index types that support - with the CONCURRENTLY + with the CONCURRENTLY option can instead be recreated that way. If that is successful and the resulting index is valid, the original index can then be replaced by - the newly built one using a combination of - and . When an index is used to enforce - uniqueness or other constraints, might + the newly built one using a combination of + and . When an index is used to enforce + uniqueness or other constraints, might be necessary to swap the existing constraint with one enforced by the new index. Review this alternate multistep rebuild approach carefully before using it as there are limitations on which @@ -892,17 +896,17 @@ analyze threshold = analyze base threshold + analyze scale factor * number of tu It is a good idea to save the database server's log output - somewhere, rather than just discarding it via /dev/null. + somewhere, rather than just discarding it via /dev/null. The log output is invaluable when diagnosing problems. However, the log output tends to be voluminous (especially at higher debug levels) so you won't want to save it - indefinitely. You need to rotate the log files so that + indefinitely. You need to rotate the log files so that new log files are started and old ones removed after a reasonable period of time. - If you simply direct the stderr of + If you simply direct the stderr of postgres into a file, you will have log output, but the only way to truncate the log file is to stop and restart @@ -913,13 +917,13 @@ analyze threshold = analyze base threshold + analyze scale factor * number of tu A better approach is to send the server's - stderr output to some type of log rotation program. + stderr output to some type of log rotation program. There is a built-in log rotation facility, which you can use by - setting the configuration parameter logging_collector to - true in postgresql.conf. The control + setting the configuration parameter logging_collector to + true in postgresql.conf. The control parameters for this program are described in . You can also use this approach - to capture the log data in machine readable CSV + linkend="runtime-config-logging-where"/>. You can also use this approach + to capture the log data in machine readable CSV (comma-separated values) format. @@ -928,12 +932,12 @@ analyze threshold = analyze base threshold + analyze scale factor * number of tu program if you have one that you are already using with other server software. For example, the rotatelogs tool included in the Apache distribution - can be used with PostgreSQL. To do this, - just pipe the server's - stderr output to the desired program. + can be used with PostgreSQL. One way to + do this is to pipe the server's + stderr output to the desired program. If you start the server with - pg_ctl, then stderr - is already redirected to stdout, so you just need a + pg_ctl, then stderr + is already redirected to stdout, so you just need a pipe command, for example: @@ -941,14 +945,44 @@ pg_ctl start | rotatelogs /var/log/pgsql_log 86400 + + You can combine these approaches by setting up logrotate + to collect log files produced by PostgreSQL built-in + logging collector. In this case, the logging collector defines the names and + location of the log files, while logrotate + periodically archives these files. When initiating log rotation, + logrotate must ensure that the application + sends further output to the new file. This is commonly done with a + postrotate script that sends a SIGHUP + signal to the application, which then reopens the log file. + In PostgreSQL, you can run pg_ctl + with the logrotate option instead. When the server receives + this command, the server either switches to a new log file or reopens the + existing file, depending on the logging configuration + (see ). + + + + + When using static log file names, the server might fail to reopen the log + file if the max open file limit is reached or a file table overflow occurs. + In this case, log messages are sent to the old log file until a + successful log rotation. If logrotate is + configured to compress the log file and delete it, the server may lose + the messages logged in this timeframe. To avoid this issue, you can + configure the logging collector to dynamically assign log file names + and use a prerotate script to ignore open log files. + + + Another production-grade approach to managing log output is to - send it to syslog and let - syslog deal with file rotation. To do this, set the - configuration parameter log_destination to syslog - (to log to syslog only) in - postgresql.conf. Then you can send a SIGHUP - signal to the syslog daemon whenever you want to force it + send it to syslog and let + syslog deal with file rotation. To do this, set the + configuration parameter log_destination to syslog + (to log to syslog only) in + postgresql.conf. Then you can send a SIGHUP + signal to the syslog daemon whenever you want to force it to start writing a new log file. If you want to automate log rotation, the logrotate program can be configured to work with log files from @@ -956,12 +990,12 @@ pg_ctl start | rotatelogs /var/log/pgsql_log 86400 - On many systems, however, syslog is not very reliable, + On many systems, however, syslog is not very reliable, particularly with large log messages; it might truncate or drop messages - just when you need them the most. Also, on Linux, - syslog will flush each message to disk, yielding poor - performance. (You can use a - at the start of the file name - in the syslog configuration file to disable syncing.) + just when you need them the most. Also, on Linux, + syslog will flush each message to disk, yielding poor + performance. (You can use a - at the start of the file name + in the syslog configuration file to disable syncing.) @@ -977,7 +1011,7 @@ pg_ctl start | rotatelogs /var/log/pgsql_log 86400 pgBadger is an external project that does sophisticated log file analysis. check_postgres + url="https://bucardo.org/check_postgres/">check_postgres provides Nagios alerts when important messages appear in the log files, as well as detection of many other extraordinary conditions. diff --git a/doc/src/sgml/manage-ag.sgml b/doc/src/sgml/manage-ag.sgml index fe1a6355c4..0154064e50 100644 --- a/doc/src/sgml/manage-ag.sgml +++ b/doc/src/sgml/manage-ag.sgml @@ -3,7 +3,7 @@ Managing Databases - database + database Every instance of a running PostgreSQL @@ -26,7 +26,7 @@ (database objects). Generally, every database object (tables, functions, etc.) belongs to one and only one database. (However there are a few system catalogs, for example - pg_database, that belong to a whole cluster and + pg_database, that belong to a whole cluster and are accessible from each database within the cluster.) More accurately, a database is a collection of schemas and the schemas contain the tables, functions, etc. So the full hierarchy is: @@ -41,7 +41,7 @@ connection. However, an application is not restricted in the number of connections it opens to the same or other databases. Databases are physically separated and access control is managed at the - connection level. If one PostgreSQL server + connection level. If one PostgreSQL server instance is to house projects or users that should be separate and for the most part unaware of each other, it is therefore recommended to put them into separate databases. If the projects @@ -49,27 +49,27 @@ resources, they should be put in the same database but possibly into separate schemas. Schemas are a purely logical structure and who can access what is managed by the privilege system. More information about - managing schemas is in . + managing schemas is in . - Databases are created with the CREATE DATABASE command - (see ) and destroyed with the - DROP DATABASE command - (see ). + Databases are created with the CREATE DATABASE command + (see ) and destroyed with the + DROP DATABASE command + (see ). To determine the set of existing databases, examine the - pg_database system catalog, for example + pg_database system catalog, for example SELECT datname FROM pg_database; - The program's \l meta-command - and - The SQL standard calls databases catalogs, but there + The SQL standard calls databases catalogs, but there is no difference in practice. @@ -78,21 +78,21 @@ SELECT datname FROM pg_database; Creating a Database - CREATE DATABASE + CREATE DATABASE - In order to create a database, the PostgreSQL + In order to create a database, the PostgreSQL server must be up and running (see ). + linkend="server-start"/>). Databases are created with the SQL command - : + : -CREATE DATABASE name; +CREATE DATABASE name; - where name follows the usual rules for + where name follows the usual rules for SQL identifiers. The current role automatically becomes the owner of the new database. It is the privilege of the owner of a database to remove it later (which also removes all @@ -101,55 +101,55 @@ CREATE DATABASE name; The creation of databases is a restricted operation. See for how to grant permission. + linkend="role-attributes"/> for how to grant permission. Since you need to be connected to the database server in order to execute the CREATE DATABASE command, the - question remains how the first database at any given + question remains how the first database at any given site can be created. The first database is always created by the - initdb command when the data storage area is - initialized. (See .) This + initdb command when the data storage area is + initialized. (See .) This database is called - postgres.postgres So to - create the first ordinary database you can connect to - postgres. + postgres.postgres So to + create the first ordinary database you can connect to + postgres. A second database, - template1,template1 + template1,template1 is also created during database cluster initialization. Whenever a new database is created within the cluster, template1 is essentially cloned. - This means that any changes you make in template1 are + This means that any changes you make in template1 are propagated to all subsequently created databases. Because of this, - avoid creating objects in template1 unless you want them + avoid creating objects in template1 unless you want them propagated to every newly created database. More details - appear in . + appear in . As a convenience, there is a program you can execute from the shell to create new databases, - createdb.createdb + createdb.createdb createdb dbname - createdb does no magic. It connects to the postgres - database and issues the CREATE DATABASE command, + createdb does no magic. It connects to the postgres + database and issues the CREATE DATABASE command, exactly as described above. - The reference page contains the invocation - details. Note that createdb without any arguments will create + The reference page contains the invocation + details. Note that createdb without any arguments will create a database with the current user name. - contains information about + contains information about how to restrict who can connect to a given database. @@ -160,11 +160,11 @@ createdb dbname configure and manage it themselves. To achieve that, use one of the following commands: -CREATE DATABASE dbname OWNER rolename; +CREATE DATABASE dbname OWNER rolename; from the SQL environment, or: -createdb -O rolename dbname +createdb -O rolename dbname from the shell. Only the superuser is allowed to create a database for @@ -176,55 +176,55 @@ createdb -O rolename dbname Template Databases - CREATE DATABASE actually works by copying an existing + CREATE DATABASE actually works by copying an existing database. By default, it copies the standard system database named - template1.template1 Thus that - database is the template from which new databases are - made. If you add objects to template1, these objects + template1.template1 Thus that + database is the template from which new databases are + made. If you add objects to template1, these objects will be copied into subsequently created user databases. This behavior allows site-local modifications to the standard set of objects in databases. For example, if you install the procedural - language PL/Perl in template1, it will + language PL/Perl in template1, it will automatically be available in user databases without any extra action being taken when those databases are created. There is a second standard system database named - template0.template0 This + template0.template0 This database contains the same data as the initial contents of - template1, that is, only the standard objects + template1, that is, only the standard objects predefined by your version of - PostgreSQL. template0 + PostgreSQL. template0 should never be changed after the database cluster has been initialized. By instructing - CREATE DATABASE to copy template0 instead - of template1, you can create a virgin user + CREATE DATABASE to copy template0 instead + of template1, you can create a virgin user database that contains none of the site-local additions in - template1. This is particularly handy when restoring a - pg_dump dump: the dump script should be restored in a + template1. This is particularly handy when restoring a + pg_dump dump: the dump script should be restored in a virgin database to ensure that one recreates the correct contents of the dumped database, without conflicting with objects that - might have been added to template1 later on. + might have been added to template1 later on. - Another common reason for copying template0 instead - of template1 is that new encoding and locale settings - can be specified when copying template0, whereas a copy - of template1 must use the same settings it does. - This is because template1 might contain encoding-specific - or locale-specific data, while template0 is known not to. + Another common reason for copying template0 instead + of template1 is that new encoding and locale settings + can be specified when copying template0, whereas a copy + of template1 must use the same settings it does. + This is because template1 might contain encoding-specific + or locale-specific data, while template0 is known not to. To create a database by copying template0, use: -CREATE DATABASE dbname TEMPLATE template0; +CREATE DATABASE dbname TEMPLATE template0; from the SQL environment, or: -createdb -T template0 dbname +createdb -T template0 dbname from the shell. @@ -232,49 +232,49 @@ createdb -T template0 dbname It is possible to create additional template databases, and indeed one can copy any database in a cluster by specifying its name - as the template for CREATE DATABASE. It is important to + as the template for CREATE DATABASE. It is important to understand, however, that this is not (yet) intended as a general-purpose COPY DATABASE facility. The principal limitation is that no other sessions can be connected to the source database while it is being copied. CREATE - DATABASE will fail if any other connection exists when it starts; + DATABASE will fail if any other connection exists when it starts; during the copy operation, new connections to the source database are prevented. - Two useful flags exist in pg_databasepg_database for each + Two useful flags exist in pg_databasepg_database for each database: the columns datistemplate and datallowconn. datistemplate can be set to indicate that a database is intended as a template for - CREATE DATABASE. If this flag is set, the database can be - cloned by any user with CREATEDB privileges; if it is not set, + CREATE DATABASE. If this flag is set, the database can be + cloned by any user with CREATEDB privileges; if it is not set, only superusers and the owner of the database can clone it. If datallowconn is false, then no new connections to that database will be allowed (but existing sessions are not terminated simply by setting the flag false). The template0 - database is normally marked datallowconn = false to prevent its modification. + database is normally marked datallowconn = false to prevent its modification. Both template0 and template1 - should always be marked with datistemplate = true. + should always be marked with datistemplate = true. - template1 and template0 do not have any special - status beyond the fact that the name template1 is the default - source database name for CREATE DATABASE. - For example, one could drop template1 and recreate it from - template0 without any ill effects. This course of action + template1 and template0 do not have any special + status beyond the fact that the name template1 is the default + source database name for CREATE DATABASE. + For example, one could drop template1 and recreate it from + template0 without any ill effects. This course of action might be advisable if one has carelessly added a bunch of junk in - template1. (To delete template1, - it must have pg_database.datistemplate = false.) + template1. (To delete template1, + it must have pg_database.datistemplate = false.) - The postgres database is also created when a database + The postgres database is also created when a database cluster is initialized. This database is meant as a default database for users and applications to connect to. It is simply a copy of - template1 and can be dropped and recreated if necessary. + template1 and can be dropped and recreated if necessary. @@ -283,8 +283,8 @@ createdb -T template0 dbname Database Configuration - Recall from that the - PostgreSQL server provides a large number of + Recall from that the + PostgreSQL server provides a large number of run-time configuration variables. You can set database-specific default values for many of these settings. @@ -305,8 +305,8 @@ ALTER DATABASE mydb SET geqo TO off; session started. Note that users can still alter this setting during their sessions; it will only be the default. To undo any such setting, use - ALTER DATABASE dbname RESET - varname. + ALTER DATABASE dbname RESET + varname. @@ -315,9 +315,9 @@ ALTER DATABASE mydb SET geqo TO off; Databases are destroyed with the command - :DROP DATABASE + :DROP DATABASE -DROP DATABASE name; +DROP DATABASE name; Only the owner of the database, or a superuser, can drop a database. Dropping a database removes all objects @@ -329,19 +329,19 @@ DROP DATABASE name; You cannot execute the DROP DATABASE command while connected to the victim database. You can, however, be - connected to any other database, including the template1 + connected to any other database, including the template1 database. - template1 would be the only option for dropping the last user database of a + template1 would be the only option for dropping the last user database of a given cluster. For convenience, there is also a shell program to drop - databases, :dropdb + databases, :dropdb dropdb dbname - (Unlike createdb, it is not the default action to drop + (Unlike createdb, it is not the default action to drop the database with the current user name.) @@ -354,7 +354,7 @@ dropdb dbname - Tablespaces in PostgreSQL allow database administrators to + Tablespaces in PostgreSQL allow database administrators to define locations in the file system where the files representing database objects can be stored. Once created, a tablespace can be referred to by name when creating database objects. @@ -362,7 +362,7 @@ dropdb dbname By using tablespaces, an administrator can control the disk layout - of a PostgreSQL installation. This is useful in at + of a PostgreSQL installation. This is useful in at least two ways. First, if the partition or volume on which the cluster was initialized runs out of space and cannot be extended, a tablespace can be created on a different partition and used @@ -396,13 +396,13 @@ dropdb dbname To define a tablespace, use the - command, for example:CREATE TABLESPACE: + linkend="sql-createtablespace"/> + command, for example:CREATE TABLESPACE: CREATE TABLESPACE fastspace LOCATION '/ssd1/postgresql/data'; The location must be an existing, empty directory that is owned by - the PostgreSQL operating system user. All objects subsequently + the PostgreSQL operating system user. All objects subsequently created within the tablespace will be stored in files underneath this directory. The location must not be on removable or transient storage, as the cluster might fail to function if the tablespace is missing @@ -414,7 +414,7 @@ CREATE TABLESPACE fastspace LOCATION '/ssd1/postgresql/data'; There is usually not much point in making more than one tablespace per logical file system, since you cannot control the location of individual files within a logical file system. However, - PostgreSQL does not enforce any such limitation, and + PostgreSQL does not enforce any such limitation, and indeed it is not directly aware of the file system boundaries on your system. It just stores files in the directories you tell it to use. @@ -423,34 +423,34 @@ CREATE TABLESPACE fastspace LOCATION '/ssd1/postgresql/data'; Creation of the tablespace itself must be done as a database superuser, but after that you can allow ordinary database users to use it. - To do that, grant them the CREATE privilege on it. + To do that, grant them the CREATE privilege on it. Tables, indexes, and entire databases can be assigned to - particular tablespaces. To do so, a user with the CREATE + particular tablespaces. To do so, a user with the CREATE privilege on a given tablespace must pass the tablespace name as a parameter to the relevant command. For example, the following creates - a table in the tablespace space1: + a table in the tablespace space1: CREATE TABLE foo(i int) TABLESPACE space1; - Alternatively, use the parameter: + Alternatively, use the parameter: SET default_tablespace = space1; CREATE TABLE foo(i int); - When default_tablespace is set to anything but an empty - string, it supplies an implicit TABLESPACE clause for - CREATE TABLE and CREATE INDEX commands that + When default_tablespace is set to anything but an empty + string, it supplies an implicit TABLESPACE clause for + CREATE TABLE and CREATE INDEX commands that do not have an explicit one. - There is also a parameter, which + There is also a parameter, which determines the placement of temporary tables and indexes, as well as temporary files that are used for purposes such as sorting large data sets. This can be a list of tablespace names, rather than only one, @@ -463,9 +463,9 @@ CREATE TABLE foo(i int); The tablespace associated with a database is used to store the system catalogs of that database. Furthermore, it is the default tablespace used for tables, indexes, and temporary files created within the database, - if no TABLESPACE clause is given and no other selection is - specified by default_tablespace or - temp_tablespaces (as appropriate). + if no TABLESPACE clause is given and no other selection is + specified by default_tablespace or + temp_tablespaces (as appropriate). If a database is created without specifying a tablespace for it, it uses the same tablespace as the template database it is copied from. @@ -473,12 +473,12 @@ CREATE TABLE foo(i int); Two tablespaces are automatically created when the database cluster is initialized. The - pg_global tablespace is used for shared system catalogs. The - pg_default tablespace is the default tablespace of the - template1 and template0 databases (and, therefore, + pg_global tablespace is used for shared system catalogs. The + pg_default tablespace is the default tablespace of the + template1 and template0 databases (and, therefore, will be the default tablespace for other databases as well, unless - overridden by a TABLESPACE clause in CREATE - DATABASE). + overridden by a TABLESPACE clause in CREATE + DATABASE). @@ -490,7 +490,7 @@ CREATE TABLE foo(i int); To remove an empty tablespace, use the + linkend="sql-droptablespace"/> command. @@ -501,25 +501,25 @@ CREATE TABLE foo(i int); SELECT spcname FROM pg_tablespace; - The program's \db meta-command + The program's \db meta-command is also useful for listing the existing tablespaces. - PostgreSQL makes use of symbolic links + PostgreSQL makes use of symbolic links to simplify the implementation of tablespaces. This - means that tablespaces can be used only on systems + means that tablespaces can be used only on systems that support symbolic links. - The directory $PGDATA/pg_tblspc contains symbolic links that + The directory $PGDATA/pg_tblspc contains symbolic links that point to each of the non-built-in tablespaces defined in the cluster. Although not recommended, it is possible to adjust the tablespace layout by hand by redefining these links. Under no circumstances perform this operation while the server is running. Note that in PostgreSQL 9.1 - and earlier you will also need to update the pg_tablespace - catalog with the new locations. (If you do not, pg_dump will + and earlier you will also need to update the pg_tablespace + catalog with the new locations. (If you do not, pg_dump will continue to output the old tablespace locations.) diff --git a/doc/src/sgml/mk_feature_tables.pl b/doc/src/sgml/mk_feature_tables.pl index 9b111b8b40..476e50e66d 100644 --- a/doc/src/sgml/mk_feature_tables.pl +++ b/doc/src/sgml/mk_feature_tables.pl @@ -38,8 +38,8 @@ $is_supported eq $yesno || next; - $feature_name =~ s//>/g; + $feature_name =~ s//>/g; $subfeature_name =~ s//>/g; diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index 5575c2c837..add71458e2 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -24,11 +24,11 @@ analyzing performance. Most of this chapter is devoted to describing PostgreSQL's statistics collector, but one should not neglect regular Unix monitoring programs such as - ps, top, iostat, and vmstat. + ps, top, iostat, and vmstat. Also, once one has identified a poorly-performing query, further investigation might be needed using - PostgreSQL's command. - discusses EXPLAIN + PostgreSQL's command. + discusses EXPLAIN and other methods for understanding the behavior of an individual query. @@ -43,45 +43,45 @@ On most Unix platforms, PostgreSQL modifies its - command title as reported by ps, so that individual server + command title as reported by ps, so that individual server processes can readily be identified. A sample display is $ ps auxww | grep ^postgres postgres 15551 0.0 0.1 57536 7132 pts/0 S 18:02 0:00 postgres -i -postgres 15554 0.0 0.0 57536 1184 ? Ss 18:02 0:00 postgres: writer process -postgres 15555 0.0 0.0 57536 916 ? Ss 18:02 0:00 postgres: checkpointer process -postgres 15556 0.0 0.0 57536 916 ? Ss 18:02 0:00 postgres: wal writer process -postgres 15557 0.0 0.0 58504 2244 ? Ss 18:02 0:00 postgres: autovacuum launcher process -postgres 15558 0.0 0.0 17512 1068 ? Ss 18:02 0:00 postgres: stats collector process +postgres 15554 0.0 0.0 57536 1184 ? Ss 18:02 0:00 postgres: background writer +postgres 15555 0.0 0.0 57536 916 ? Ss 18:02 0:00 postgres: checkpointer +postgres 15556 0.0 0.0 57536 916 ? Ss 18:02 0:00 postgres: walwriter +postgres 15557 0.0 0.0 58504 2244 ? Ss 18:02 0:00 postgres: autovacuum launcher +postgres 15558 0.0 0.0 17512 1068 ? Ss 18:02 0:00 postgres: stats collector postgres 15582 0.0 0.0 58772 3080 ? Ss 18:04 0:00 postgres: joe runbug 127.0.0.1 idle postgres 15606 0.0 0.0 58772 3052 ? Ss 18:07 0:00 postgres: tgl regression [local] SELECT waiting postgres 15610 0.0 0.0 58772 3056 ? Ss 18:07 0:00 postgres: tgl regression [local] idle in transaction - (The appropriate invocation of ps varies across different + (The appropriate invocation of ps varies across different platforms, as do the details of what is shown. This example is from a recent Linux system.) The first process listed here is the master server process. The command arguments shown for it are the same ones used when it was launched. The next five processes are background worker processes automatically launched by the - master process. (The stats collector process will not be present + master process. (The stats collector process will not be present if you have set the system not to start the statistics collector; likewise - the autovacuum launcher process can be disabled.) + the autovacuum launcher process can be disabled.) Each of the remaining processes is a server process handling one client connection. Each such process sets its command line display in the form -postgres: user database host activity +postgres: user database host activity The user, database, and (client) host items remain the same for the life of the client connection, but the activity indicator changes. - The activity can be idle (i.e., waiting for a client command), - idle in transaction (waiting for client inside a BEGIN block), - or a command type name such as SELECT. Also, - waiting is appended if the server process is presently waiting + The activity can be idle (i.e., waiting for a client command), + idle in transaction (waiting for client inside a BEGIN block), + or a command type name such as SELECT. Also, + waiting is appended if the server process is presently waiting on a lock held by another session. In the above example we can infer that process 15606 is waiting for process 15610 to complete its transaction and thereby release some lock. (Process 15610 must be the blocker, because @@ -92,8 +92,8 @@ postgres: user database host - If has been configured the - cluster name will also be shown in ps output: + If has been configured the + cluster name will also be shown in ps output: $ psql -c 'SHOW cluster_name' cluster_name @@ -102,13 +102,13 @@ $ psql -c 'SHOW cluster_name' (1 row) $ ps aux|grep server1 -postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: server1: writer process +postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: server1: background writer ... - If you have turned off then the + If you have turned off then the activity indicator is not updated; the process title is set only once when a new process is launched. On some platforms this saves a measurable amount of per-command overhead; on others it's insignificant. @@ -122,8 +122,8 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser flags, not just one. In addition, your original invocation of the postgres command must have a shorter ps status display than that provided by each - server process. If you fail to do all three things, the ps - output for each server process will be the original postgres + server process. If you fail to do all three things, the ps + output for each server process will be the original postgres command line. @@ -137,7 +137,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser - PostgreSQL's statistics collector + PostgreSQL's statistics collector is a subsystem that supports collection and reporting of information about server activity. Presently, the collector can count accesses to tables and indexes in both disk-block and individual-row terms. It also tracks @@ -161,47 +161,47 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser Since collection of statistics adds some overhead to query execution, the system can be configured to collect or not collect information. This is controlled by configuration parameters that are normally set in - postgresql.conf. (See for + postgresql.conf. (See for details about setting configuration parameters.) - The parameter enables monitoring + The parameter enables monitoring of the current command being executed by any server process. - The parameter controls whether + The parameter controls whether statistics are collected about table and index accesses. - The parameter enables tracking of + The parameter enables tracking of usage of user-defined functions. - The parameter enables monitoring + The parameter enables monitoring of block read and write times. - Normally these parameters are set in postgresql.conf so + Normally these parameters are set in postgresql.conf so that they apply to all server processes, but it is possible to turn them on or off in individual sessions using the command. (To prevent + linkend="sql-set"/> command. (To prevent ordinary users from hiding their activity from the administrator, only superusers are allowed to change these parameters with - SET.) + SET.) The statistics collector transmits the collected information to other PostgreSQL processes through temporary files. These files are stored in the directory named by the - parameter, + parameter, pg_stat_tmp by default. - For better performance, stats_temp_directory can be + For better performance, stats_temp_directory can be pointed at a RAM-based file system, decreasing physical I/O requirements. When the server shuts down cleanly, a permanent copy of the statistics data is stored in the pg_stat subdirectory, so that @@ -217,13 +217,13 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser Several predefined views, listed in , are available to show + linkend="monitoring-stats-dynamic-views-table"/>, are available to show the current state of the system. There are also several other views, listed in , available to show the results + linkend="monitoring-stats-views-table"/>, available to show the results of statistics collection. Alternatively, one can build custom views using the underlying statistics functions, as discussed - in . + in . @@ -261,10 +261,10 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser A transaction can also see its own statistics (as yet untransmitted to the - collector) in the views pg_stat_xact_all_tables, - pg_stat_xact_sys_tables, - pg_stat_xact_user_tables, and - pg_stat_xact_user_functions. These numbers do not act as + collector) in the views pg_stat_xact_all_tables, + pg_stat_xact_sys_tables, + pg_stat_xact_user_tables, and + pg_stat_xact_user_functions. These numbers do not act as stated above; instead they update continuously throughout the transaction. @@ -288,47 +288,47 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser One row per server process, showing information related to the current activity of that process, such as state and current query. - See for details. + See for details. - pg_stat_replicationpg_stat_replication + pg_stat_replicationpg_stat_replication One row per WAL sender process, showing statistics about replication to that sender's connected standby server. - See for details. + See for details. - pg_stat_wal_receiverpg_stat_wal_receiver + pg_stat_wal_receiverpg_stat_wal_receiver Only one row, showing statistics about the WAL receiver from that receiver's connected server. - See for details. + See for details. - pg_stat_subscriptionpg_stat_subscription + pg_stat_subscriptionpg_stat_subscription At least one row per subscription, showing information about the subscription workers. - See for details. + See for details. - pg_stat_sslpg_stat_ssl + pg_stat_sslpg_stat_ssl One row per connection (regular and replication), showing information about SSL used on this connection. - See for details. + See for details. - pg_stat_progress_vacuumpg_stat_progress_vacuum + pg_stat_progress_vacuumpg_stat_progress_vacuum One row for each backend (including autovacuum worker processes) running - VACUUM, showing current progress. - See . + VACUUM, showing current progress. + See . @@ -349,178 +349,178 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser - pg_stat_archiverpg_stat_archiver + pg_stat_archiverpg_stat_archiver One row only, showing statistics about the WAL archiver process's activity. See - for details. + for details. - pg_stat_bgwriterpg_stat_bgwriter + pg_stat_bgwriterpg_stat_bgwriter One row only, showing statistics about the background writer process's activity. See - for details. + for details. - pg_stat_databasepg_stat_database + pg_stat_databasepg_stat_database One row per database, showing database-wide statistics. See - for details. + for details. - pg_stat_database_conflictspg_stat_database_conflicts + pg_stat_database_conflictspg_stat_database_conflicts One row per database, showing database-wide statistics about query cancels due to conflict with recovery on standby servers. - See for details. + See for details. - pg_stat_all_tablespg_stat_all_tables + pg_stat_all_tablespg_stat_all_tables One row for each table in the current database, showing statistics about accesses to that specific table. - See for details. + See for details. - pg_stat_sys_tablespg_stat_sys_tables - Same as pg_stat_all_tables, except that only + pg_stat_sys_tablespg_stat_sys_tables + Same as pg_stat_all_tables, except that only system tables are shown. - pg_stat_user_tablespg_stat_user_tables - Same as pg_stat_all_tables, except that only user + pg_stat_user_tablespg_stat_user_tables + Same as pg_stat_all_tables, except that only user tables are shown. - pg_stat_xact_all_tablespg_stat_xact_all_tables - Similar to pg_stat_all_tables, but counts actions - taken so far within the current transaction (which are not - yet included in pg_stat_all_tables and related views). + pg_stat_xact_all_tablespg_stat_xact_all_tables + Similar to pg_stat_all_tables, but counts actions + taken so far within the current transaction (which are not + yet included in pg_stat_all_tables and related views). The columns for numbers of live and dead rows and vacuum and analyze actions are not present in this view. - pg_stat_xact_sys_tablespg_stat_xact_sys_tables - Same as pg_stat_xact_all_tables, except that only + pg_stat_xact_sys_tablespg_stat_xact_sys_tables + Same as pg_stat_xact_all_tables, except that only system tables are shown. - pg_stat_xact_user_tablespg_stat_xact_user_tables - Same as pg_stat_xact_all_tables, except that only + pg_stat_xact_user_tablespg_stat_xact_user_tables + Same as pg_stat_xact_all_tables, except that only user tables are shown. - pg_stat_all_indexespg_stat_all_indexes + pg_stat_all_indexespg_stat_all_indexes One row for each index in the current database, showing statistics about accesses to that specific index. - See for details. + See for details. - pg_stat_sys_indexespg_stat_sys_indexes - Same as pg_stat_all_indexes, except that only + pg_stat_sys_indexespg_stat_sys_indexes + Same as pg_stat_all_indexes, except that only indexes on system tables are shown. - pg_stat_user_indexespg_stat_user_indexes - Same as pg_stat_all_indexes, except that only + pg_stat_user_indexespg_stat_user_indexes + Same as pg_stat_all_indexes, except that only indexes on user tables are shown. - pg_statio_all_tablespg_statio_all_tables + pg_statio_all_tablespg_statio_all_tables One row for each table in the current database, showing statistics about I/O on that specific table. - See for details. + See for details. - pg_statio_sys_tablespg_statio_sys_tables - Same as pg_statio_all_tables, except that only + pg_statio_sys_tablespg_statio_sys_tables + Same as pg_statio_all_tables, except that only system tables are shown. - pg_statio_user_tablespg_statio_user_tables - Same as pg_statio_all_tables, except that only + pg_statio_user_tablespg_statio_user_tables + Same as pg_statio_all_tables, except that only user tables are shown. - pg_statio_all_indexespg_statio_all_indexes + pg_statio_all_indexespg_statio_all_indexes One row for each index in the current database, showing statistics about I/O on that specific index. - See for details. + See for details. - pg_statio_sys_indexespg_statio_sys_indexes - Same as pg_statio_all_indexes, except that only + pg_statio_sys_indexespg_statio_sys_indexes + Same as pg_statio_all_indexes, except that only indexes on system tables are shown. - pg_statio_user_indexespg_statio_user_indexes - Same as pg_statio_all_indexes, except that only + pg_statio_user_indexespg_statio_user_indexes + Same as pg_statio_all_indexes, except that only indexes on user tables are shown. - pg_statio_all_sequencespg_statio_all_sequences + pg_statio_all_sequencespg_statio_all_sequences One row for each sequence in the current database, showing statistics about I/O on that specific sequence. - See for details. + See for details. - pg_statio_sys_sequencespg_statio_sys_sequences - Same as pg_statio_all_sequences, except that only + pg_statio_sys_sequencespg_statio_sys_sequences + Same as pg_statio_all_sequences, except that only system sequences are shown. (Presently, no system sequences are defined, so this view is always empty.) - pg_statio_user_sequencespg_statio_user_sequences - Same as pg_statio_all_sequences, except that only + pg_statio_user_sequencespg_statio_user_sequences + Same as pg_statio_all_sequences, except that only user sequences are shown. - pg_stat_user_functionspg_stat_user_functions + pg_stat_user_functionspg_stat_user_functions One row for each tracked function, showing statistics about executions of that function. See - for details. + for details. - pg_stat_xact_user_functionspg_stat_xact_user_functions - Similar to pg_stat_user_functions, but counts only - calls during the current transaction (which are not - yet included in pg_stat_user_functions). + pg_stat_xact_user_functionspg_stat_xact_user_functions + Similar to pg_stat_user_functions, but counts only + calls during the current transaction (which are not + yet included in pg_stat_user_functions). @@ -533,18 +533,18 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser - The pg_statio_ views are primarily useful to + The pg_statio_ views are primarily useful to determine the effectiveness of the buffer cache. When the number of actual disk reads is much smaller than the number of buffer hits, then the cache is satisfying most read requests without invoking a kernel call. However, these statistics do not give the - entire story: due to the way in which PostgreSQL + entire story: due to the way in which PostgreSQL handles disk I/O, data that is not in the - PostgreSQL buffer cache might still reside in the + PostgreSQL buffer cache might still reside in the kernel's I/O cache, and might therefore still be fetched without requiring a physical read. Users interested in obtaining more - detailed information on PostgreSQL I/O behavior are - advised to use the PostgreSQL statistics collector + detailed information on PostgreSQL I/O behavior are + advised to use the PostgreSQL statistics collector in combination with operating system utilities that allow insight into the kernel's handling of I/O. @@ -564,39 +564,39 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser - datid - oid + datid + oid OID of the database this backend is connected to - datname - name + datname + name Name of the database this backend is connected to - pid - integer + pid + integer Process ID of this backend - usesysid - oid + usesysid + oid OID of the user logged into this backend - usename - name + usename + name Name of the user logged into this backend - application_name - text + application_name + text Name of the application that is connected to this backend - client_addr - inet + client_addr + inet IP address of the client connected to this backend. If this field is null, it indicates either that the client is connected via a Unix socket on the server machine or that this is an @@ -604,78 +604,78 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser - client_hostname - text + client_hostname + text Host name of the connected client, as reported by a - reverse DNS lookup of client_addr. This field will + reverse DNS lookup of client_addr. This field will only be non-null for IP connections, and only when is enabled. + linkend="guc-log-hostname"/> is enabled. - client_port - integer + client_port + integer TCP port number that the client is using for communication - with this backend, or -1 if a Unix socket is used + with this backend, or -1 if a Unix socket is used - backend_start - timestamp with time zone + backend_start + timestamp with time zone Time when this process was started. For client backends, this is the time the client connected to the server. - xact_start - timestamp with time zone + xact_start + timestamp with time zone Time when this process' current transaction was started, or null if no transaction is active. If the current query is the first of its transaction, this column is equal to the - query_start column. + query_start column. - query_start - timestamp with time zone + query_start + timestamp with time zone Time when the currently active query was started, or if - state is not active, when the last query + state is not active, when the last query was started - state_change - timestamp with time zone - Time when the state was last changed + state_change + timestamp with time zone + Time when the state was last changed - wait_event_type - text + wait_event_type + text The type of event for which the backend is waiting, if any; otherwise NULL. Possible values are: - LWLock: The backend is waiting for a lightweight lock. + LWLock: The backend is waiting for a lightweight lock. Each such lock protects a particular data structure in shared memory. - wait_event will contain a name identifying the purpose + wait_event will contain a name identifying the purpose of the lightweight lock. (Some locks have specific names; others are part of a group of locks each with a similar purpose.) - Lock: The backend is waiting for a heavyweight lock. + Lock: The backend is waiting for a heavyweight lock. Heavyweight locks, also known as lock manager locks or simply locks, primarily protect SQL-visible objects such as tables. However, they are also used to ensure mutual exclusion for certain internal - operations such as relation extension. wait_event will + operations such as relation extension. wait_event will identify the type of lock awaited. - BufferPin: The server process is waiting to access to + BufferPin: The server process is waiting to access to a data buffer during a period when no other process can be examining that buffer. Buffer pin waits can be protracted if another process holds an open cursor which last read data from the @@ -684,95 +684,95 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser - Activity: The server process is idle. This is used by + Activity: The server process is idle. This is used by system processes waiting for activity in their main processing loop. - wait_event will identify the specific wait point. + wait_event will identify the specific wait point. - Extension: The server process is waiting for activity + Extension: The server process is waiting for activity in an extension module. This category is useful for modules to track custom waiting points. - Client: The server process is waiting for some activity + Client: The server process is waiting for some activity on a socket from user applications, and that the server expects something to happen that is independent from its internal processes. - wait_event will identify the specific wait point. + wait_event will identify the specific wait point. - IPC: The server process is waiting for some activity - from another process in the server. wait_event will + IPC: The server process is waiting for some activity + from another process in the server. wait_event will identify the specific wait point. - Timeout: The server process is waiting for a timeout - to expire. wait_event will identify the specific wait + Timeout: The server process is waiting for a timeout + to expire. wait_event will identify the specific wait point. - IO: The server process is waiting for a IO to complete. - wait_event will identify the specific wait point. + IO: The server process is waiting for a IO to complete. + wait_event will identify the specific wait point. - wait_event - text + wait_event + text Wait event name if backend is currently waiting, otherwise NULL. - See for details. + See for details. - state - text + state + text Current overall state of this backend. Possible values are: - active: The backend is executing a query. + active: The backend is executing a query. - idle: The backend is waiting for a new client command. + idle: The backend is waiting for a new client command. - idle in transaction: The backend is in a transaction, + idle in transaction: The backend is in a transaction, but is not currently executing a query. - idle in transaction (aborted): This state is similar to - idle in transaction, except one of the statements in + idle in transaction (aborted): This state is similar to + idle in transaction, except one of the statements in the transaction caused an error. - fastpath function call: The backend is executing a + fastpath function call: The backend is executing a fast-path function. - disabled: This state is reported if is disabled in this backend. + disabled: This state is reported if is disabled in this backend. @@ -786,28 +786,28 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser backend_xmin xid - The current backend's xmin horizon. + The current backend's xmin horizon. - query - text + query + text Text of this backend's most recent query. If - state is active this field shows the + state is active this field shows the currently executing query. In all other states, it shows the last query that was executed. By default the query text is truncated at 1024 characters; this value can be changed via the parameter - . + . backend_type text Type of current backend. Possible types are - autovacuum launcher, autovacuum worker, - background worker, background writer, - client backend, checkpointer, - startup, walreceiver, - walsender and walwriter. + autovacuum launcher, autovacuum worker, + background worker, background writer, + client backend, checkpointer, + startup, walreceiver, + walsender and walwriter. @@ -822,10 +822,10 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser - The wait_event and state columns are - independent. If a backend is in the active state, - it may or may not be waiting on some event. If the state - is active and wait_event is non-null, it + The wait_event and state columns are + independent. If a backend is in the active state, + it may or may not be waiting on some event. If the state + is active and wait_event is non-null, it means that a query is being executed, but is being blocked somewhere in the system. @@ -845,755 +845,849 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser - LWLock - ShmemIndexLock + LWLock + ShmemIndexLock Waiting to find or allocate space in shared memory. - OidGenLock + OidGenLock Waiting to allocate or assign an OID. - XidGenLock + XidGenLock Waiting to allocate or assign a transaction id. - ProcArrayLock + ProcArrayLock Waiting to get a snapshot or clearing a transaction id at transaction end. - SInvalReadLock + SInvalReadLock Waiting to retrieve or remove messages from shared invalidation queue. - SInvalWriteLock + SInvalWriteLock Waiting to add a message in shared invalidation queue. - WALBufMappingLock + WALBufMappingLock Waiting to replace a page in WAL buffers. - WALWriteLock + WALWriteLock Waiting for WAL buffers to be written to disk. - ControlFileLock + ControlFileLock Waiting to read or update the control file or creation of a new WAL file. - CheckpointLock + CheckpointLock Waiting to perform checkpoint. - CLogControlLock + CLogControlLock Waiting to read or update transaction status. - SubtransControlLock + SubtransControlLock Waiting to read or update sub-transaction information. - MultiXactGenLock + MultiXactGenLock Waiting to read or update shared multixact state. - MultiXactOffsetControlLock + MultiXactOffsetControlLock Waiting to read or update multixact offset mappings. - MultiXactMemberControlLock + MultiXactMemberControlLock Waiting to read or update multixact member mappings. - RelCacheInitLock + RelCacheInitLock Waiting to read or write relation cache initialization file. - CheckpointerCommLock + CheckpointerCommLock Waiting to manage fsync requests. - TwoPhaseStateLock + TwoPhaseStateLock Waiting to read or update the state of prepared transactions. - TablespaceCreateLock + TablespaceCreateLock Waiting to create or drop the tablespace. - BtreeVacuumLock + BtreeVacuumLock Waiting to read or update vacuum-related information for a B-tree index. - AddinShmemInitLock + AddinShmemInitLock Waiting to manage space allocation in shared memory. - AutovacuumLock + AutovacuumLock Autovacuum worker or launcher waiting to update or read the current state of autovacuum workers. - AutovacuumScheduleLock + AutovacuumScheduleLock Waiting to ensure that the table it has selected for a vacuum still needs vacuuming. - SyncScanLock + SyncScanLock Waiting to get the start location of a scan on a table for synchronized scans. - RelationMappingLock + RelationMappingLock Waiting to update the relation map file used to store catalog to filenode mapping. - AsyncCtlLock + AsyncCtlLock Waiting to read or update shared notification state. - AsyncQueueLock + AsyncQueueLock Waiting to read or update notification messages. - SerializableXactHashLock + SerializableXactHashLock Waiting to retrieve or store information about serializable transactions. - SerializableFinishedListLock + SerializableFinishedListLock Waiting to access the list of finished serializable transactions. - SerializablePredicateLockListLock + SerializablePredicateLockListLock Waiting to perform an operation on a list of locks held by serializable transactions. - OldSerXidLock + OldSerXidLock Waiting to read or record conflicting serializable transactions. - SyncRepLock + SyncRepLock Waiting to read or update information about synchronous replicas. - BackgroundWorkerLock + BackgroundWorkerLock Waiting to read or update background worker state. - DynamicSharedMemoryControlLock + DynamicSharedMemoryControlLock Waiting to read or update dynamic shared memory state. - AutoFileLock - Waiting to update the postgresql.auto.conf file. + AutoFileLock + Waiting to update the postgresql.auto.conf file. - ReplicationSlotAllocationLock + ReplicationSlotAllocationLock Waiting to allocate or free a replication slot. - ReplicationSlotControlLock + ReplicationSlotControlLock Waiting to read or update replication slot state. - CommitTsControlLock + CommitTsControlLock Waiting to read or update transaction commit timestamps. - CommitTsLock + CommitTsLock Waiting to read or update the last value set for the transaction timestamp. - ReplicationOriginLock + ReplicationOriginLock Waiting to setup, drop or use replication origin. - MultiXactTruncationLock + MultiXactTruncationLock Waiting to read or truncate multixact information. - OldSnapshotTimeMapLock + OldSnapshotTimeMapLock Waiting to read or update old snapshot control information. - CLogTruncationLock + BackendRandomLock + Waiting to generate a random number. + + + LogicalRepWorkerLock + Waiting for action on logical replication worker to finish. + + + CLogTruncationLock Waiting to truncate the write-ahead log or waiting for write-ahead log truncation to finish. - clog + clog Waiting for I/O on a clog (transaction status) buffer. - commit_timestamp + commit_timestamp Waiting for I/O on commit timestamp buffer. - subtrans + subtrans Waiting for I/O a subtransaction buffer. - multixact_offset + multixact_offset Waiting for I/O on a multixact offset buffer. - multixact_member + multixact_member Waiting for I/O on a multixact_member buffer. - async + async Waiting for I/O on an async (notify) buffer. - oldserxid - Waiting to I/O on an oldserxid buffer. + oldserxid + Waiting for I/O on an oldserxid buffer. - wal_insert + wal_insert Waiting to insert WAL into a memory buffer. - buffer_content + buffer_content Waiting to read or write a data page in memory. - buffer_io + buffer_io Waiting for I/O on a data page. - replication_origin + replication_origin Waiting to read or update the replication progress. - replication_slot_io + replication_slot_io Waiting for I/O on a replication slot. - proc + proc Waiting to read or update the fast-path lock information. - buffer_mapping + buffer_mapping Waiting to associate a data block with a buffer in the buffer pool. - lock_manager + lock_manager Waiting to add or examine locks for backends, or waiting to join or exit a locking group (used by parallel query). - predicate_lock_manager + predicate_lock_manager Waiting to add or examine predicate lock information. - parallel_query_dsa + parallel_query_dsa Waiting for parallel query dynamic shared memory allocation lock. - tbm + tbm Waiting for TBM shared iterator lock. - Lock - relation + parallel_append + Waiting to choose the next subplan during Parallel Append plan + execution. + + + parallel_hash_join + Waiting to allocate or exchange a chunk of memory or update + counters during Parallel Hash plan execution. + + + Lock + relation Waiting to acquire a lock on a relation. - extend + extend Waiting to extend a relation. - page + page Waiting to acquire a lock on page of a relation. - tuple + tuple Waiting to acquire a lock on a tuple. - transactionid + transactionid Waiting for a transaction to finish. - virtualxid + virtualxid Waiting to acquire a virtual xid lock. - speculative token + speculative token Waiting to acquire a speculative insertion lock. - object + object Waiting to acquire a lock on a non-relation database object. - userlock + userlock Waiting to acquire a user lock. - advisory + advisory Waiting to acquire an advisory user lock. - BufferPin - BufferPin + BufferPin + BufferPin Waiting to acquire a pin on a buffer. - Activity - ArchiverMain + Activity + ArchiverMain Waiting in main loop of the archiver process. - AutoVacuumMain + AutoVacuumMain Waiting in main loop of autovacuum launcher process. - BgWriterHibernate + BgWriterHibernate Waiting in background writer process, hibernating. - BgWriterMain + BgWriterMain Waiting in main loop of background writer process background worker. - CheckpointerMain + CheckpointerMain Waiting in main loop of checkpointer process. - LogicalLauncherMain - Waiting in main loop of logical launcher process. + LogicalApplyMain + Waiting in main loop of logical apply process. - LogicalApplyMain - Waiting in main loop of logical apply process. + LogicalLauncherMain + Waiting in main loop of logical launcher process. - PgStatMain + PgStatMain Waiting in main loop of the statistics collector process. - RecoveryWalAll + RecoveryWalAll Waiting for WAL from any kind of source (local, archive or stream) at recovery. - RecoveryWalStream + RecoveryWalStream Waiting for WAL from a stream at recovery. - SysLoggerMain + SysLoggerMain Waiting in main loop of syslogger process. - WalReceiverMain + WalReceiverMain Waiting in main loop of WAL receiver process. - WalSenderMain + WalSenderMain Waiting in main loop of WAL sender process. - WalWriterMain + WalWriterMain Waiting in main loop of WAL writer process. - Client - ClientRead + Client + ClientRead Waiting to read data from the client. - ClientWrite - Waiting to write data from the client. + ClientWrite + Waiting to write data to the client. - LibPQWalReceiverConnect + LibPQWalReceiverConnect Waiting in WAL receiver to establish connection to remote server. - LibPQWalReceiverReceive + LibPQWalReceiverReceive Waiting in WAL receiver to receive data from remote server. - SSLOpenServer + SSLOpenServer Waiting for SSL while attempting connection. - WalReceiverWaitStart + WalReceiverWaitStart Waiting for startup process to send initial data for streaming replication. - WalSenderWaitForWAL + WalSenderWaitForWAL Waiting for WAL to be flushed in WAL sender process. - WalSenderWriteData + WalSenderWriteData Waiting for any activity when processing replies from WAL receiver in WAL sender process. - Extension - Extension + Extension + Extension Waiting in an extension. - IPC - BgWorkerShutdown + IPC + BgWorkerShutdown Waiting for background worker to shut down. - BgWorkerStartup + BgWorkerStartup Waiting for background worker to start up. - BtreePage + BtreePage Waiting for the page number needed to continue a parallel B-tree scan to become available. - ExecuteGather - Waiting for activity from child process when executing Gather node. + ClogGroupUpdate + Waiting for group leader to update transaction status at transaction end. + + + ExecuteGather + Waiting for activity from child process when executing Gather node. + + + Hash/Batch/Allocating + Waiting for an elected Parallel Hash participant to allocate a hash table. + + + Hash/Batch/Electing + Electing a Parallel Hash participant to allocate a hash table. + + + Hash/Batch/Loading + Waiting for other Parallel Hash participants to finish loading a hash table. + + + Hash/Build/Allocating + Waiting for an elected Parallel Hash participant to allocate the initial hash table. + + + Hash/Build/Electing + Electing a Parallel Hash participant to allocate the initial hash table. + + + Hash/Build/HashingInner + Waiting for other Parallel Hash participants to finish hashing the inner relation. + + + Hash/Build/HashingOuter + Waiting for other Parallel Hash participants to finish partitioning the outer relation. + + + Hash/GrowBatches/Allocating + Waiting for an elected Parallel Hash participant to allocate more batches. + + + Hash/GrowBatches/Deciding + Electing a Parallel Hash participant to decide on future batch growth. + + + Hash/GrowBatches/Electing + Electing a Parallel Hash participant to allocate more batches. + + + Hash/GrowBatches/Finishing + Waiting for an elected Parallel Hash participant to decide on future batch growth. + + + Hash/GrowBatches/Repartitioning + Waiting for other Parallel Hash participants to finishing repartitioning. - LogicalSyncData + Hash/GrowBuckets/Allocating + Waiting for an elected Parallel Hash participant to finish allocating more buckets. + + + Hash/GrowBuckets/Electing + Electing a Parallel Hash participant to allocate more buckets. + + + Hash/GrowBuckets/Reinserting + Waiting for other Parallel Hash participants to finish inserting tuples into new buckets. + + + LogicalSyncData Waiting for logical replication remote server to send data for initial table synchronization. - LogicalSyncStateChange + LogicalSyncStateChange Waiting for logical replication remote server to change state. - MessageQueueInternal + MessageQueueInternal Waiting for other process to be attached in shared message queue. - MessageQueuePutMessage + MessageQueuePutMessage Waiting to write a protocol message to a shared message queue. - MessageQueueReceive + MessageQueueReceive Waiting to receive bytes from a shared message queue. - MessageQueueSend + MessageQueueSend Waiting to send bytes to a shared message queue. - ParallelFinish - Waiting for parallel workers to finish computing. + ParallelBitmapScan + Waiting for parallel bitmap scan to become initialized. - ParallelBitmapScan - Waiting for parallel bitmap scan to become initialized. + ParallelCreateIndexScan + Waiting for parallel CREATE INDEX workers to finish heap scan. - ProcArrayGroupUpdate + ParallelFinish + Waiting for parallel workers to finish computing. + + + ProcArrayGroupUpdate Waiting for group leader to clear transaction id at transaction end. - ReplicationOriginDrop + Promote + Waiting for standby promotion. + + + ReplicationOriginDrop Waiting for a replication origin to become inactive to be dropped. - ReplicationSlotDrop + ReplicationSlotDrop Waiting for a replication slot to become inactive to be dropped. - SafeSnapshot - Waiting for a snapshot for a READ ONLY DEFERRABLE transaction. + SafeSnapshot + Waiting for a snapshot for a READ ONLY DEFERRABLE transaction. - SyncRep + SyncRep Waiting for confirmation from remote server during synchronous replication. - Timeout - BaseBackupThrottle + Timeout + BaseBackupThrottle Waiting during base backup when throttling activity. - PgSleep - Waiting in process that called pg_sleep. + PgSleep + Waiting in process that called pg_sleep. - RecoveryApplyDelay + RecoveryApplyDelay Waiting to apply WAL at recovery because it is delayed. - IO - BufFileRead + IO + BufFileRead Waiting for a read from a buffered file. - BufFileWrite + BufFileWrite Waiting for a write to a buffered file. - ControlFileRead + ControlFileRead Waiting for a read from the control file. - ControlFileSync + ControlFileSync Waiting for the control file to reach stable storage. - ControlFileSyncUpdate + ControlFileSyncUpdate Waiting for an update to the control file to reach stable storage. - ControlFileWrite + ControlFileWrite Waiting for a write to the control file. - ControlFileWriteUpdate + ControlFileWriteUpdate Waiting for a write to update the control file. - CopyFileRead + CopyFileRead Waiting for a read during a file copy operation. - CopyFileWrite + CopyFileWrite Waiting for a write during a file copy operation. - DataFileExtend + DataFileExtend Waiting for a relation data file to be extended. - DataFileFlush + DataFileFlush Waiting for a relation data file to reach stable storage. - DataFileImmediateSync + DataFileImmediateSync Waiting for an immediate synchronization of a relation data file to stable storage. - DataFilePrefetch + DataFilePrefetch Waiting for an asynchronous prefetch from a relation data file. - DataFileRead + DataFileRead Waiting for a read from a relation data file. - DataFileSync + DataFileSync Waiting for changes to a relation data file to reach stable storage. - DataFileTruncate + DataFileTruncate Waiting for a relation data file to be truncated. - DataFileWrite + DataFileWrite Waiting for a write to a relation data file. - DSMFillZeroWrite + DSMFillZeroWrite Waiting to write zero bytes to a dynamic shared memory backing file. - LockFileAddToDataDirRead + LockFileAddToDataDirRead Waiting for a read while adding a line to the data directory lock file. - LockFileAddToDataDirSync + LockFileAddToDataDirSync Waiting for data to reach stable storage while adding a line to the data directory lock file. - LockFileAddToDataDirWrite + LockFileAddToDataDirWrite Waiting for a write while adding a line to the data directory lock file. - LockFileCreateRead + LockFileCreateRead Waiting to read while creating the data directory lock file. - LockFileCreateSync + LockFileCreateSync Waiting for data to reach stable storage while creating the data directory lock file. - LockFileCreateWrite + LockFileCreateWrite Waiting for a write while creating the data directory lock file. - LockFileReCheckDataDirRead + LockFileReCheckDataDirRead Waiting for a read during recheck of the data directory lock file. - LogicalRewriteCheckpointSync + LogicalRewriteCheckpointSync Waiting for logical rewrite mappings to reach stable storage during a checkpoint. - LogicalRewriteMappingSync + LogicalRewriteMappingSync Waiting for mapping data to reach stable storage during a logical rewrite. - LogicalRewriteMappingWrite + LogicalRewriteMappingWrite Waiting for a write of mapping data during a logical rewrite. - LogicalRewriteSync + LogicalRewriteSync Waiting for logical rewrite mappings to reach stable storage. - LogicalRewriteWrite + LogicalRewriteWrite Waiting for a write of logical rewrite mappings. - RelationMapRead + RelationMapRead Waiting for a read of the relation map file. - RelationMapSync + RelationMapSync Waiting for the relation map file to reach stable storage. - RelationMapWrite + RelationMapWrite Waiting for a write to the relation map file. - ReorderBufferRead + ReorderBufferRead Waiting for a read during reorder buffer management. - ReorderBufferWrite + ReorderBufferWrite Waiting for a write during reorder buffer management. - ReorderLogicalMappingRead + ReorderLogicalMappingRead Waiting for a read of a logical mapping during reorder buffer management. - ReplicationSlotRead + ReplicationSlotRead Waiting for a read from a replication slot control file. - ReplicationSlotRestoreSync + ReplicationSlotRestoreSync Waiting for a replication slot control file to reach stable storage while restoring it to memory. - ReplicationSlotSync + ReplicationSlotSync Waiting for a replication slot control file to reach stable storage. - ReplicationSlotWrite + ReplicationSlotWrite Waiting for a write to a replication slot control file. - SLRUFlushSync + SLRUFlushSync Waiting for SLRU data to reach stable storage during a checkpoint or database shutdown. - SLRURead + SLRURead Waiting for a read of an SLRU page. - SLRUSync + SLRUSync Waiting for SLRU data to reach stable storage following a page write. - SLRUWrite + SLRUWrite Waiting for a write of an SLRU page. - SnapbuildRead + SnapbuildRead Waiting for a read of a serialized historical catalog snapshot. - SnapbuildSync + SnapbuildSync Waiting for a serialized historical catalog snapshot to reach stable storage. - SnapbuildWrite + SnapbuildWrite Waiting for a write of a serialized historical catalog snapshot. - TimelineHistoryFileSync + TimelineHistoryFileSync Waiting for a timeline history file received via streaming replication to reach stable storage. - TimelineHistoryFileWrite + TimelineHistoryFileWrite Waiting for a write of a timeline history file received via streaming replication. - TimelineHistoryRead + TimelineHistoryRead Waiting for a read of a timeline history file. - TimelineHistorySync + TimelineHistorySync Waiting for a newly created timeline history file to reach stable storage. - TimelineHistoryWrite + TimelineHistoryWrite Waiting for a write of a newly created timeline history file. - TwophaseFileRead + TwophaseFileRead Waiting for a read of a two phase state file. - TwophaseFileSync + TwophaseFileSync Waiting for a two phase state file to reach stable storage. - TwophaseFileWrite + TwophaseFileWrite Waiting for a write of a two phase state file. - WALBootstrapSync + WALBootstrapSync Waiting for WAL to reach stable storage during bootstrapping. - WALBootstrapWrite + WALBootstrapWrite Waiting for a write of a WAL page during bootstrapping. - WALCopyRead + WALCopyRead Waiting for a read when creating a new WAL segment by copying an existing one. - WALCopySync + WALCopySync Waiting a new WAL segment created by copying an existing one to reach stable storage. - WALCopyWrite + WALCopyWrite Waiting for a write when creating a new WAL segment by copying an existing one. - WALInitSync + WALInitSync Waiting for a newly initialized WAL file to reach stable storage. - WALInitWrite + WALInitWrite Waiting for a write while initializing a new WAL file. - WALRead + WALRead Waiting for a read from a WAL file. - WALSenderTimelineHistoryRead + WALSenderTimelineHistoryRead Waiting for a read from a timeline history file during walsender timeline command. - WALSyncMethodAssign + WALSync + Waiting for a WAL file to reach stable storage. + + + WALSyncMethodAssign Waiting for data to reach stable storage while assigning WAL sync method. - WALWrite + WALWrite Waiting for a write to a WAL file. @@ -1603,10 +1697,10 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser For tranches registered by extensions, the name is specified by extension - and this will be displayed as wait_event. It is quite + and this will be displayed as wait_event. It is quite possible that user has registered the tranche in one of the backends (by having allocation in dynamic shared memory) in which case other backends - won't have that information, so we display extension for such + won't have that information, so we display extension for such cases. @@ -1637,53 +1731,53 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - pid - integer + pid + integer Process ID of a WAL sender process - usesysid - oid + usesysid + oid OID of the user logged into this WAL sender process - usename - name + usename + name Name of the user logged into this WAL sender process - application_name - text + application_name + text Name of the application that is connected to this WAL sender - client_addr - inet + client_addr + inet IP address of the client connected to this WAL sender. If this field is null, it indicates that the client is connected via a Unix socket on the server machine. - client_hostname - text + client_hostname + text Host name of the connected client, as reported by a - reverse DNS lookup of client_addr. This field will + reverse DNS lookup of client_addr. This field will only be non-null for IP connections, and only when is enabled. + linkend="guc-log-hostname"/> is enabled. - client_port - integer + client_port + integer TCP port number that the client is using for communication - with this WAL sender, or -1 if a Unix socket is used + with this WAL sender, or -1 if a Unix socket is used - backend_start - timestamp with time zone + backend_start + timestamp with time zone Time when this process was started, i.e., when the client connected to this WAL sender @@ -1691,71 +1785,71 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i backend_xmin xid - This standby's xmin horizon reported - by . + This standby's xmin horizon reported + by . - state - text + state + text Current WAL sender state. Possible values are: - startup: This WAL sender is starting up. + startup: This WAL sender is starting up. - catchup: This WAL sender's connected standby is + catchup: This WAL sender's connected standby is catching up with the primary. - streaming: This WAL sender is streaming changes + streaming: This WAL sender is streaming changes after its connected standby server has caught up with the primary. - backup: This WAL sender is sending a backup. + backup: This WAL sender is sending a backup. - stopping: This WAL sender is stopping. + stopping: This WAL sender is stopping. - sent_lsn - pg_lsn + sent_lsn + pg_lsn Last write-ahead log location sent on this connection - write_lsn - pg_lsn + write_lsn + pg_lsn Last write-ahead log location written to disk by this standby server - flush_lsn - pg_lsn + flush_lsn + pg_lsn Last write-ahead log location flushed to disk by this standby server - replay_lsn - pg_lsn + replay_lsn + pg_lsn Last write-ahead log location replayed into the database on this standby server - write_lag - interval + write_lag + interval Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it (but not yet flushed it or applied it). This can be used to gauge the delay that @@ -1764,8 +1858,8 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i server was configured as a synchronous standby. - flush_lag - interval + flush_lag + interval Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it (but not yet applied it). This can be used to gauge the delay that @@ -1774,8 +1868,8 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i server was configured as a synchronous standby. - replay_lag - interval + replay_lag + interval Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it. This can be used to gauge the delay that @@ -1784,38 +1878,38 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i server was configured as a synchronous standby. - sync_priority - integer + sync_priority + integer Priority of this standby server for being chosen as the synchronous standby in a priority-based synchronous replication. This has no effect in a quorum-based synchronous replication. - sync_state - text + sync_state + text Synchronous state of this standby server. Possible values are: - async: This standby server is asynchronous. + async: This standby server is asynchronous. - potential: This standby server is now asynchronous, + potential: This standby server is now asynchronous, but can potentially become synchronous if one of current synchronous ones fails. - sync: This standby server is synchronous. + sync: This standby server is synchronous. - quorum: This standby server is considered as a candidate + quorum: This standby server is considered as a candidate for quorum standbys. @@ -1885,69 +1979,88 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - pid - integer + pid + integer Process ID of the WAL receiver process - status - text + status + text Activity status of the WAL receiver process - receive_start_lsn - pg_lsn + receive_start_lsn + pg_lsn First write-ahead log location used when WAL receiver is started - receive_start_tli - integer + receive_start_tli + integer First timeline number used when WAL receiver is started - received_lsn - pg_lsn + received_lsn + pg_lsn Last write-ahead log location already received and flushed to disk, the initial value of this field being the first log location used when WAL receiver is started - received_tli - integer + received_tli + integer Timeline number of last write-ahead log location received and flushed to disk, the initial value of this field being the timeline number of the first log location used when WAL receiver is started - last_msg_send_time - timestamp with time zone + last_msg_send_time + timestamp with time zone Send time of last message received from origin WAL sender - last_msg_receipt_time - timestamp with time zone + last_msg_receipt_time + timestamp with time zone Receipt time of last message received from origin WAL sender - latest_end_lsn - pg_lsn + latest_end_lsn + pg_lsn Last write-ahead log location reported to origin WAL sender - latest_end_time - timestamp with time zone + latest_end_time + timestamp with time zone Time of last write-ahead log location reported to origin WAL sender - slot_name - text + slot_name + text Replication slot name used by this WAL receiver - conninfo - text + sender_host + text + + Host of the PostgreSQL instance + this WAL receiver is connected to. This can be a host name, + an IP address, or a directory path if the connection is via + Unix socket. (The path case can be distinguished because it + will always be an absolute path, beginning with /.) + + + + sender_port + integer + + Port number of the PostgreSQL instance + this WAL receiver is connected to. + + + + conninfo + text Connection string used by this WAL receiver, with security-sensitive fields obfuscated. @@ -1976,52 +2089,52 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - subid - oid + subid + oid OID of the subscription - subname - text + subname + text Name of the subscription - pid - integer + pid + integer Process ID of the subscription worker process - relid - Oid + relid + Oid OID of the relation that the worker is synchronizing; null for the main apply worker - received_lsn - pg_lsn + received_lsn + pg_lsn Last write-ahead log location received, the initial value of this field being 0 - last_msg_send_time - timestamp with time zone + last_msg_send_time + timestamp with time zone Send time of last message received from origin WAL sender - last_msg_receipt_time - timestamp with time zone + last_msg_receipt_time + timestamp with time zone Receipt time of last message received from origin WAL sender - latest_end_lsn - pg_lsn + latest_end_lsn + pg_lsn Last write-ahead log location reported to origin WAL sender - latest_end_time - timestamp with time zone + latest_end_time + timestamp with time zone Time of last write-ahead log location reported to origin WAL sender @@ -2049,42 +2162,42 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - pid - integer + pid + integer Process ID of a backend or WAL sender process - ssl - boolean + ssl + boolean True if SSL is used on this connection - version - text + version + text Version of SSL in use, or NULL if SSL is not in use on this connection - cipher - text + cipher + text Name of SSL cipher in use, or NULL if SSL is not in use on this connection - bits - integer + bits + integer Number of bits in the encryption algorithm used, or NULL if SSL is not used on this connection - compression - boolean + compression + boolean True if SSL compression is in use, false if not, or NULL if SSL is not in use on this connection - clientdn - text + clientdn + text Distinguished Name (DN) field from the client certificate used, or NULL if no client certificate was supplied or if SSL is not in use on this connection. This field is truncated if the @@ -2120,37 +2233,37 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - archived_count + archived_count bigint Number of WAL files that have been successfully archived - last_archived_wal + last_archived_wal text Name of the last WAL file successfully archived - last_archived_time + last_archived_time timestamp with time zone Time of the last successful archive operation - failed_count + failed_count bigint Number of failed attempts for archiving WAL files - last_failed_wal + last_failed_wal text Name of the WAL file of the last failed archival operation - last_failed_time + last_failed_time timestamp with time zone Time of the last failed archival operation - stats_reset + stats_reset timestamp with time zone Time at which these statistics were last reset @@ -2177,17 +2290,17 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - checkpoints_timed + checkpoints_timed bigint Number of scheduled checkpoints that have been performed - checkpoints_req + checkpoints_req bigint Number of requested checkpoints that have been performed - checkpoint_write_time + checkpoint_write_time double precision Total amount of time that has been spent in the portion of @@ -2195,7 +2308,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - checkpoint_sync_time + checkpoint_sync_time double precision Total amount of time that has been spent in the portion of @@ -2204,40 +2317,40 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - buffers_checkpoint + buffers_checkpoint bigint Number of buffers written during checkpoints - buffers_clean + buffers_clean bigint Number of buffers written by the background writer - maxwritten_clean + maxwritten_clean bigint Number of times the background writer stopped a cleaning scan because it had written too many buffers - buffers_backend + buffers_backend bigint Number of buffers written directly by a backend - buffers_backend_fsync + buffers_backend_fsync bigint Number of times a backend had to execute its own - fsync call (normally the background writer handles those + fsync call (normally the background writer handles those even when the backend does its own write) - buffers_alloc + buffers_alloc bigint Number of buffers allocated - stats_reset + stats_reset timestamp with time zone Time at which these statistics were last reset @@ -2263,119 +2376,119 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - datid - oid + datid + oid OID of a database - datname - name + datname + name Name of this database - numbackends - integer + numbackends + integer Number of backends currently connected to this database. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset. - xact_commit - bigint + xact_commit + bigint Number of transactions in this database that have been committed - xact_rollback - bigint + xact_rollback + bigint Number of transactions in this database that have been rolled back - blks_read - bigint + blks_read + bigint Number of disk blocks read in this database - blks_hit - bigint + blks_hit + bigint Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache) - tup_returned - bigint + tup_returned + bigint Number of rows returned by queries in this database - tup_fetched - bigint + tup_fetched + bigint Number of rows fetched by queries in this database - tup_inserted - bigint + tup_inserted + bigint Number of rows inserted by queries in this database - tup_updated - bigint + tup_updated + bigint Number of rows updated by queries in this database - tup_deleted - bigint + tup_deleted + bigint Number of rows deleted by queries in this database - conflicts - bigint + conflicts + bigint Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see - for details.) + for details.) - temp_files - bigint + temp_files + bigint Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the - setting. + setting. - temp_bytes - bigint + temp_bytes + bigint Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and - regardless of the setting. + regardless of the setting. - deadlocks - bigint + deadlocks + bigint Number of deadlocks detected in this database - blk_read_time - double precision + blk_read_time + double precision Time spent reading data file blocks by backends in this database, in milliseconds - blk_write_time - double precision + blk_write_time + double precision Time spent writing data file blocks by backends in this database, in milliseconds - stats_reset - timestamp with time zone + stats_reset + timestamp with time zone Time at which these statistics were last reset @@ -2400,42 +2513,42 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - datid - oid + datid + oid OID of a database - datname - name + datname + name Name of this database - confl_tablespace - bigint + confl_tablespace + bigint Number of queries in this database that have been canceled due to dropped tablespaces - confl_lock - bigint + confl_lock + bigint Number of queries in this database that have been canceled due to lock timeouts - confl_snapshot - bigint + confl_snapshot + bigint Number of queries in this database that have been canceled due to old snapshots - confl_bufferpin - bigint + confl_bufferpin + bigint Number of queries in this database that have been canceled due to pinned buffers - confl_deadlock - bigint + confl_deadlock + bigint Number of queries in this database that have been canceled due to deadlocks @@ -2464,119 +2577,119 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - relid - oid + relid + oid OID of a table - schemaname - name + schemaname + name Name of the schema that this table is in - relname - name + relname + name Name of this table - seq_scan - bigint + seq_scan + bigint Number of sequential scans initiated on this table - seq_tup_read - bigint + seq_tup_read + bigint Number of live rows fetched by sequential scans - idx_scan - bigint + idx_scan + bigint Number of index scans initiated on this table - idx_tup_fetch - bigint + idx_tup_fetch + bigint Number of live rows fetched by index scans - n_tup_ins - bigint + n_tup_ins + bigint Number of rows inserted - n_tup_upd - bigint + n_tup_upd + bigint Number of rows updated (includes HOT updated rows) - n_tup_del - bigint + n_tup_del + bigint Number of rows deleted - n_tup_hot_upd - bigint + n_tup_hot_upd + bigint Number of rows HOT updated (i.e., with no separate index update required) - n_live_tup - bigint + n_live_tup + bigint Estimated number of live rows - n_dead_tup - bigint + n_dead_tup + bigint Estimated number of dead rows - n_mod_since_analyze - bigint + n_mod_since_analyze + bigint Estimated number of rows modified since this table was last analyzed - last_vacuum - timestamp with time zone + last_vacuum + timestamp with time zone Last time at which this table was manually vacuumed - (not counting VACUUM FULL) + (not counting VACUUM FULL) - last_autovacuum - timestamp with time zone + last_autovacuum + timestamp with time zone Last time at which this table was vacuumed by the autovacuum daemon - last_analyze - timestamp with time zone + last_analyze + timestamp with time zone Last time at which this table was manually analyzed - last_autoanalyze - timestamp with time zone + last_autoanalyze + timestamp with time zone Last time at which this table was analyzed by the autovacuum daemon - vacuum_count - bigint + vacuum_count + bigint Number of times this table has been manually vacuumed - (not counting VACUUM FULL) + (not counting VACUUM FULL) - autovacuum_count - bigint + autovacuum_count + bigint Number of times this table has been vacuumed by the autovacuum daemon - analyze_count - bigint + analyze_count + bigint Number of times this table has been manually analyzed - autoanalyze_count - bigint + autoanalyze_count + bigint Number of times this table has been analyzed by the autovacuum daemon @@ -2607,43 +2720,43 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - relid - oid + relid + oid OID of the table for this index - indexrelid - oid + indexrelid + oid OID of this index - schemaname - name + schemaname + name Name of the schema this index is in - relname - name + relname + name Name of the table for this index - indexrelname - name + indexrelname + name Name of this index - idx_scan - bigint + idx_scan + bigint Number of index scans initiated on this index - idx_tup_read - bigint + idx_tup_read + bigint Number of index entries returned by scans on this index - idx_tup_fetch - bigint + idx_tup_fetch + bigint Number of live table rows fetched by simple index scans using this index @@ -2662,17 +2775,17 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - Indexes can be used by simple index scans, bitmap index scans, + Indexes can be used by simple index scans, bitmap index scans, and the optimizer. In a bitmap scan the output of several indexes can be combined via AND or OR rules, so it is difficult to associate individual heap row fetches with specific indexes when a bitmap scan is used. Therefore, a bitmap scan increments the - pg_stat_all_indexes.idx_tup_read + pg_stat_all_indexes.idx_tup_read count(s) for the index(es) it uses, and it increments the - pg_stat_all_tables.idx_tup_fetch + pg_stat_all_tables.idx_tup_fetch count for the table, but it does not affect - pg_stat_all_indexes.idx_tup_fetch. + pg_stat_all_indexes.idx_tup_fetch. The optimizer also accesses indexes to check for supplied constants whose values are outside the recorded range of the optimizer statistics because the optimizer statistics might be stale. @@ -2680,10 +2793,10 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - The idx_tup_read and idx_tup_fetch counts + The idx_tup_read and idx_tup_fetch counts can be different even without any use of bitmap scans, - because idx_tup_read counts - index entries retrieved from the index while idx_tup_fetch + because idx_tup_read counts + index entries retrieved from the index while idx_tup_fetch counts live rows fetched from the table. The latter will be less if any dead or not-yet-committed rows are fetched using the index, or if any heap fetches are avoided by means of an index-only scan. @@ -2703,58 +2816,58 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - relid - oid + relid + oid OID of a table - schemaname - name + schemaname + name Name of the schema that this table is in - relname - name + relname + name Name of this table - heap_blks_read - bigint + heap_blks_read + bigint Number of disk blocks read from this table - heap_blks_hit - bigint + heap_blks_hit + bigint Number of buffer hits in this table - idx_blks_read - bigint + idx_blks_read + bigint Number of disk blocks read from all indexes on this table - idx_blks_hit - bigint + idx_blks_hit + bigint Number of buffer hits in all indexes on this table - toast_blks_read - bigint + toast_blks_read + bigint Number of disk blocks read from this table's TOAST table (if any) - toast_blks_hit - bigint + toast_blks_hit + bigint Number of buffer hits in this table's TOAST table (if any) - tidx_blks_read - bigint + tidx_blks_read + bigint Number of disk blocks read from this table's TOAST table indexes (if any) - tidx_blks_hit - bigint + tidx_blks_hit + bigint Number of buffer hits in this table's TOAST table indexes (if any) @@ -2784,38 +2897,38 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - relid - oid + relid + oid OID of the table for this index - indexrelid - oid + indexrelid + oid OID of this index - schemaname - name + schemaname + name Name of the schema this index is in - relname - name + relname + name Name of the table for this index - indexrelname - name + indexrelname + name Name of this index - idx_blks_read - bigint + idx_blks_read + bigint Number of disk blocks read from this index - idx_blks_hit - bigint + idx_blks_hit + bigint Number of buffer hits in this index @@ -2845,28 +2958,28 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - relid - oid + relid + oid OID of a sequence - schemaname - name + schemaname + name Name of the schema this sequence is in - relname - name + relname + name Name of this sequence - blks_read - bigint + blks_read + bigint Number of disk blocks read from this sequence - blks_hit - bigint + blks_hit + bigint Number of buffer hits in this sequence @@ -2892,34 +3005,34 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i - funcid - oid + funcid + oid OID of a function - schemaname - name + schemaname + name Name of the schema this function is in - funcname - name + funcname + name Name of this function - calls - bigint + calls + bigint Number of times this function has been called - total_time - double precision + total_time + double precision Total time spent in this function and all other functions called by it, in milliseconds - self_time - double precision + self_time + double precision Total time spent in this function itself, not including other functions called by it, in milliseconds @@ -2930,7 +3043,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i The pg_stat_user_functions view will contain one row for each tracked function, showing statistics about executions of - that function. The parameter + that function. The parameter controls exactly which functions are tracked. @@ -2944,7 +3057,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i queries that use the same underlying statistics access functions used by the standard views shown above. For details such as the functions' names, consult the definitions of the standard views. (For example, in - psql you could issue \d+ pg_stat_activity.) + psql you could issue \d+ pg_stat_activity.) The access functions for per-database statistics take a database OID as an argument to identify which database to report on. The per-table and per-index functions take a table or index OID. @@ -2955,7 +3068,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i Additional functions related to statistics collection are listed in . + linkend="monitoring-stats-funcs-table"/>.
@@ -3025,10 +3138,10 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i Reset some cluster-wide statistics counters to zero, depending on the argument (requires superuser privileges by default, but EXECUTE for this function can be granted to others). - Calling pg_stat_reset_shared('bgwriter') will zero all the - counters shown in the pg_stat_bgwriter view. - Calling pg_stat_reset_shared('archiver') will zero all the - counters shown in the pg_stat_archiver view. + Calling pg_stat_reset_shared('bgwriter') will zero all the + counters shown in the pg_stat_bgwriter view. + Calling pg_stat_reset_shared('archiver') will zero all the + counters shown in the pg_stat_archiver view. @@ -3057,17 +3170,17 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i pg_stat_get_activity, the underlying function of - the pg_stat_activity view, returns a set of records + the pg_stat_activity view, returns a set of records containing all the available information about each backend process. Sometimes it may be more convenient to obtain just a subset of this information. In such cases, an older set of per-backend statistics access functions can be used; these are shown in . + linkend="monitoring-stats-backend-funcs-table"/>. These access functions use a backend ID number, which ranges from one to the number of currently active backends. The function pg_stat_get_backend_idset provides a convenient way to generate one row for each active backend for - invoking these functions. For example, to show the PIDs and + invoking these functions. For example, to show the PIDs and current queries of all backends: @@ -3101,7 +3214,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, pg_stat_get_backend_activity(integer) text - Text of this backend's most recent query + Text of this backend's most recent query @@ -3150,7 +3263,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, pg_stat_get_backend_wait_event_type(integer) text Wait event type name if backend is currently waiting, otherwise NULL. - See for details. + See for details. @@ -3158,7 +3271,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, pg_stat_get_backend_wait_event(integer) text Wait event name if backend is currently waiting, otherwise NULL. - See for details. + See for details. @@ -3218,9 +3331,9 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, Details of the pg_locks view appear in - . + . For more information on locking and managing concurrency with - PostgreSQL, refer to . + PostgreSQL, refer to . @@ -3228,9 +3341,9 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, Progress Reporting - PostgreSQL has the ability to report the progress of + PostgreSQL has the ability to report the progress of certain commands during command execution. Currently, the only command - which supports progress reporting is VACUUM. This may be + which supports progress reporting is VACUUM. This may be expanded in the future. @@ -3238,13 +3351,13 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, VACUUM Progress Reporting - Whenever VACUUM is running, the + Whenever VACUUM is running, the pg_stat_progress_vacuum view will contain one row for each backend (including autovacuum worker processes) that is currently vacuuming. The tables below describe the information that will be reported and provide information about how to interpret it. - Progress reporting is not currently supported for VACUUM FULL - and backends running VACUUM FULL will not be listed in this + Progress reporting is not currently supported for VACUUM FULL + and backends running VACUUM FULL will not be listed in this view. @@ -3261,82 +3374,82 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, - pid - integer + pid + integer Process ID of backend. - datid - oid + datid + oid OID of the database to which this backend is connected. - datname - name + datname + name Name of the database to which this backend is connected. - relid - oid + relid + oid OID of the table being vacuumed. - phase - text + phase + text - Current processing phase of vacuum. See . + Current processing phase of vacuum. See . - heap_blks_total - bigint + heap_blks_total + bigint Total number of heap blocks in the table. This number is reported as of the beginning of the scan; blocks added later will not be (and - need not be) visited by this VACUUM. + need not be) visited by this VACUUM. - heap_blks_scanned - bigint + heap_blks_scanned + bigint Number of heap blocks scanned. Because the - visibility map is used to optimize scans, + visibility map is used to optimize scans, some blocks will be skipped without inspection; skipped blocks are included in this total, so that this number will eventually become - equal to heap_blks_total when the vacuum is complete. - This counter only advances when the phase is scanning heap. + equal to heap_blks_total when the vacuum is complete. + This counter only advances when the phase is scanning heap. - heap_blks_vacuumed - bigint + heap_blks_vacuumed + bigint Number of heap blocks vacuumed. Unless the table has no indexes, this - counter only advances when the phase is vacuuming heap. + counter only advances when the phase is vacuuming heap. Blocks that contain no dead tuples are skipped, so the counter may sometimes skip forward in large increments. - index_vacuum_count - bigint + index_vacuum_count + bigint Number of completed index vacuum cycles. - max_dead_tuples - bigint + max_dead_tuples + bigint Number of dead tuples that we can store before needing to perform an index vacuum cycle, based on - . + . - num_dead_tuples - bigint + num_dead_tuples + bigint Number of dead tuples collected since the last index vacuum cycle. @@ -3359,36 +3472,36 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, initializing - VACUUM is preparing to begin scanning the heap. This + VACUUM is preparing to begin scanning the heap. This phase is expected to be very brief. scanning heap - VACUUM is currently scanning the heap. It will prune and + VACUUM is currently scanning the heap. It will prune and defragment each page if required, and possibly perform freezing - activity. The heap_blks_scanned column can be used + activity. The heap_blks_scanned column can be used to monitor the progress of the scan. vacuuming indexes - VACUUM is currently vacuuming the indexes. If a table has + VACUUM is currently vacuuming the indexes. If a table has any indexes, this will happen at least once per vacuum, after the heap has been completely scanned. It may happen multiple times per vacuum - if is insufficient to + if is insufficient to store the number of dead tuples found. vacuuming heap - VACUUM is currently vacuuming the heap. Vacuuming the heap + VACUUM is currently vacuuming the heap. Vacuuming the heap is distinct from scanning the heap, and occurs after each instance of - vacuuming indexes. If heap_blks_scanned is less than - heap_blks_total, the system will return to scanning + vacuuming indexes. If heap_blks_scanned is less than + heap_blks_total, the system will return to scanning the heap after this phase is completed; otherwise, it will begin cleaning up indexes after this phase is completed. @@ -3396,7 +3509,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, cleaning up indexes - VACUUM is currently cleaning up indexes. This occurs after + VACUUM is currently cleaning up indexes. This occurs after the heap has been completely scanned and all vacuuming of the indexes and the heap has been completed. @@ -3404,7 +3517,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, truncating heap - VACUUM is currently truncating the heap so as to return + VACUUM is currently truncating the heap so as to return empty pages at the end of the relation to the operating system. This occurs after cleaning up indexes. @@ -3412,10 +3525,10 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, performing final cleanup - VACUUM is performing final cleanup. During this phase, - VACUUM will vacuum the free space map, update statistics - in pg_class, and report statistics to the statistics - collector. When this phase is completed, VACUUM will end. + VACUUM is performing final cleanup. During this phase, + VACUUM will vacuum the free space map, update statistics + in pg_class, and report statistics to the statistics + collector. When this phase is completed, VACUUM will end. @@ -3455,7 +3568,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, SystemTap project for Linux provides a DTrace equivalent and can also be used. Supporting other dynamic tracing utilities is theoretically possible by changing the definitions for - the macros in src/include/utils/probes.h. + the macros in src/include/utils/probes.h. @@ -3465,8 +3578,8 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, By default, probes are not available, so you will need to explicitly tell the configure script to make the probes available in PostgreSQL. To include DTrace support - specify @@ -3475,10 +3588,10 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, A number of standard probes are provided in the source code, - as shown in ; - + as shown in ; + shows the types used in the probes. More probes can certainly be - added to enhance PostgreSQL's observability. + added to enhance PostgreSQL's observability.
@@ -3572,7 +3685,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, statement-status(const char *)Probe that fires anytime the server process updates its - pg_stat_activity.status. + pg_stat_activity.status. arg0 is the new status string. @@ -3740,7 +3853,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, (ForkNumber, BlockNumber, Oid, Oid, Oid) Probe that fires when a server process begins to write a dirty buffer. (If this happens often, it implies that - is too + is too small or the background writer control parameters need adjustment.) arg0 and arg1 contain the fork and block numbers of the page. arg2, arg3, and arg4 contain the tablespace, database, and relation OIDs @@ -3758,7 +3871,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, Probe that fires when a server process begins to write a dirty WAL buffer because no more WAL buffer space is available. (If this happens often, it implies that - is too small.) + is too small.) wal-buffer-write-dirty-done @@ -3823,13 +3936,15 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, sort-start - (int, bool, int, int, bool) + (int, bool, int, int, bool, int) Probe that fires when a sort operation is started. arg0 indicates heap, index or datum sort. arg1 is true for unique-value enforcement. arg2 is the number of key columns. arg3 is the number of kilobytes of work memory allowed. - arg4 is true if random access to the sort result is required. + arg4 is true if random access to the sort result is required. + arg5 indicates serial when 0, parallel worker when + 1, or parallel leader when 2. sort-done @@ -3966,7 +4081,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, The example below shows a DTrace script for analyzing transaction counts in the system, as an alternative to snapshotting - pg_stat_database before and after a performance test: + pg_stat_database before and after a performance test: #!/usr/sbin/dtrace -qs @@ -4038,15 +4153,15 @@ Total time (ns) 2312105013 - Add the probe definitions to src/backend/utils/probes.d + Add the probe definitions to src/backend/utils/probes.d - Include pg_trace.h if it is not already present in the + Include pg_trace.h if it is not already present in the module(s) containing the probe points, and insert - TRACE_POSTGRESQL probe macros at the desired locations + TRACE_POSTGRESQL probe macros at the desired locations in the source code @@ -4069,30 +4184,30 @@ Total time (ns) 2312105013 - Decide that the probe will be named transaction-start and + Decide that the probe will be named transaction-start and requires a parameter of type LocalTransactionId - Add the probe definition to src/backend/utils/probes.d: + Add the probe definition to src/backend/utils/probes.d: probe transaction__start(LocalTransactionId); Note the use of the double underline in the probe name. In a DTrace script using the probe, the double underline needs to be replaced with a - hyphen, so transaction-start is the name to document for + hyphen, so transaction-start is the name to document for users. - At compile time, transaction__start is converted to a macro - called TRACE_POSTGRESQL_TRANSACTION_START (notice the + At compile time, transaction__start is converted to a macro + called TRACE_POSTGRESQL_TRANSACTION_START (notice the underscores are single here), which is available by including - pg_trace.h. Add the macro call to the appropriate location + pg_trace.h. Add the macro call to the appropriate location in the source code. In this case, it looks like the following: @@ -4136,9 +4251,9 @@ TRACE_POSTGRESQL_TRANSACTION_START(vxid.localTransactionId); On most platforms, if PostgreSQL is - built with , the arguments to a trace macro will be evaluated whenever control passes through the - macro, even if no tracing is being done. This is + macro, even if no tracing is being done. This is usually not worth worrying about if you are just reporting the values of a few local variables. But beware of putting expensive function calls into the arguments. If you need to do that, @@ -4150,7 +4265,7 @@ if (TRACE_POSTGRESQL_TRANSACTION_START_ENABLED()) TRACE_POSTGRESQL_TRANSACTION_START(some_function(...)); - Each trace macro has a corresponding ENABLED macro. + Each trace macro has a corresponding ENABLED macro. diff --git a/doc/src/sgml/mvcc.sgml b/doc/src/sgml/mvcc.sgml index dda0170886..73934e5cf3 100644 --- a/doc/src/sgml/mvcc.sgml +++ b/doc/src/sgml/mvcc.sgml @@ -165,7 +165,7 @@ transaction isolation level The SQL standard and PostgreSQL-implemented transaction isolation levels - are described in . + are described in .
@@ -279,14 +279,14 @@ The table also shows that PostgreSQL's Repeatable Read implementation does not allow phantom reads. Stricter behavior is permitted by the SQL standard: the four isolation levels only define which phenomena - must not happen, not which phenomena must happen. + must not happen, not which phenomena must happen. The behavior of the available isolation levels is detailed in the following subsections. To set the transaction isolation level of a transaction, use the - command . + command . @@ -296,8 +296,8 @@ made to a sequence (and therefore the counter of a column declared using serial) are immediately visible to all other transactions and are not rolled back if the transaction - that made the changes aborts. See - and . + that made the changes aborts. See + and . @@ -317,7 +317,7 @@ Read Committed is the default isolation level in PostgreSQL. When a transaction uses this isolation level, a SELECT query - (without a FOR UPDATE/SHARE clause) sees only data + (without a FOR UPDATE/SHARE clause) sees only data committed before the query began; it never sees either uncommitted data or changes committed during query execution by concurrent transactions. In effect, a SELECT query sees @@ -345,7 +345,7 @@ updating the originally found row. If the first updater commits, the second updater will ignore the row if the first updater deleted it, otherwise it will attempt to apply its operation to the updated version of - the row. The search condition of the command (the WHERE clause) is + the row. The search condition of the command (the WHERE clause) is re-evaluated to see if the updated version of the row still matches the search condition. If so, the second updater proceeds with its operation using the updated version of the row. In the case of @@ -355,19 +355,19 @@ - INSERT with an ON CONFLICT DO UPDATE clause + INSERT with an ON CONFLICT DO UPDATE clause behaves similarly. In Read Committed mode, each row proposed for insertion will either insert or update. Unless there are unrelated errors, one of those two outcomes is guaranteed. If a conflict originates in another transaction whose effects are not yet visible to the INSERT , the UPDATE clause will affect that row, - even though possibly no version of that row is + even though possibly no version of that row is conventionally visible to the command. INSERT with an ON CONFLICT DO - NOTHING clause may have insertion not proceed for a row due to + NOTHING clause may have insertion not proceed for a row due to the outcome of another transaction whose effects are not visible to the INSERT snapshot. Again, this is only the case in Read Committed mode. @@ -416,10 +416,10 @@ COMMIT; The DELETE will have no effect even though there is a website.hits = 10 row before and after the UPDATE. This occurs because the - pre-update row value 9 is skipped, and when the + pre-update row value 9 is skipped, and when the UPDATE completes and DELETE - obtains a lock, the new row value is no longer 10 but - 11, which no longer matches the criteria. + obtains a lock, the new row value is no longer 10 but + 11, which no longer matches the criteria. @@ -427,7 +427,7 @@ COMMIT; that includes all transactions committed up to that instant, subsequent commands in the same transaction will see the effects of the committed concurrent transaction in any case. The point - at issue above is whether or not a single command + at issue above is whether or not a single command sees an absolutely consistent view of the database. @@ -461,7 +461,7 @@ COMMIT; even though they are not yet committed.) This is a stronger guarantee than is required by the SQL standard for this isolation level, and prevents all of the phenomena described - in except for serialization + in except for serialization anomalies. As mentioned above, this is specifically allowed by the standard, which only describes the minimum protections each isolation level must @@ -472,9 +472,9 @@ COMMIT; This level is different from Read Committed in that a query in a repeatable read transaction sees a snapshot as of the start of the first non-transaction-control statement in the - transaction, not as of the start + transaction, not as of the start of the current statement within the transaction. Thus, successive - SELECT commands within a single + SELECT commands within a single transaction see the same data, i.e., they do not see changes made by other transactions that committed after their own transaction started. @@ -587,7 +587,7 @@ ERROR: could not serialize access due to concurrent update As an example, - consider a table mytab, initially containing: + consider a table mytab, initially containing: class | value -------+------- @@ -600,14 +600,14 @@ ERROR: could not serialize access due to concurrent update SELECT SUM(value) FROM mytab WHERE class = 1; - and then inserts the result (30) as the value in a - new row with class = 2. Concurrently, serializable + and then inserts the result (30) as the value in a + new row with class = 2. Concurrently, serializable transaction B computes: SELECT SUM(value) FROM mytab WHERE class = 2; and obtains the result 300, which it inserts in a new row with - class = 1. Then both transactions try to commit. + class = 1. Then both transactions try to commit. If either transaction were running at the Repeatable Read isolation level, both would be allowed to commit; but since there is no serial order of execution consistent with the result, using Serializable transactions will allow one @@ -639,11 +639,11 @@ ERROR: could not serialize access due to read/write dependencies among transact To guarantee true serializability PostgreSQL - uses predicate locking, which means that it keeps locks + uses predicate locking, which means that it keeps locks which allow it to determine when a write would have had an impact on the result of a previous read from a concurrent transaction, had it run first. In PostgreSQL these locks do not - cause any blocking and therefore can not play any part in + cause any blocking and therefore can not play any part in causing a deadlock. They are used to identify and flag dependencies among concurrent Serializable transactions which in certain combinations can lead to serialization anomalies. In contrast, a Read Committed or @@ -659,20 +659,20 @@ ERROR: could not serialize access due to read/write dependencies among transact other database systems, are based on data actually accessed by a transaction. These will show up in the pg_locks - system view with a mode of SIReadLock. The + system view with a mode of SIReadLock. The particular locks acquired during execution of a query will depend on the plan used by the query, and multiple finer-grained locks (e.g., tuple locks) may be combined into fewer coarser-grained locks (e.g., page locks) during the course of the transaction to prevent exhaustion of the memory used to - track the locks. A READ ONLY transaction may be able to + track the locks. A READ ONLY transaction may be able to release its SIRead locks before completion, if it detects that no conflicts can still occur which could lead to a serialization anomaly. - In fact, READ ONLY transactions will often be able to + In fact, READ ONLY transactions will often be able to establish that fact at startup and avoid taking any predicate locks. - If you explicitly request a SERIALIZABLE READ ONLY DEFERRABLE + If you explicitly request a SERIALIZABLE READ ONLY DEFERRABLE transaction, it will block until it can establish this fact. (This is - the only case where Serializable transactions block but + the only case where Serializable transactions block but Repeatable Read transactions don't.) On the other hand, SIRead locks often need to be kept past transaction commit, until overlapping read write transactions complete. @@ -695,13 +695,13 @@ ERROR: could not serialize access due to read/write dependencies among transact anomalies. The monitoring of read/write dependencies has a cost, as does the restart of transactions which are terminated with a serialization failure, but balanced against the cost and blocking involved in use of - explicit locks and SELECT FOR UPDATE or SELECT FOR - SHARE, Serializable transactions are the best performance choice + explicit locks and SELECT FOR UPDATE or SELECT FOR + SHARE, Serializable transactions are the best performance choice for some environments. - While PostgreSQL's Serializable transaction isolation + While PostgreSQL's Serializable transaction isolation level only allows concurrent transactions to commit if it can prove there is a serial order of execution that would produce the same effect, it doesn't always prevent errors from being raised that would not occur in @@ -709,7 +709,7 @@ ERROR: could not serialize access due to read/write dependencies among transact constraint violations caused by conflicts with overlapping Serializable transactions even after explicitly checking that the key isn't present before attempting to insert it. This can be avoided by making sure - that all Serializable transactions that insert potentially + that all Serializable transactions that insert potentially conflicting keys explicitly check if they can do so first. For example, imagine an application that asks the user for a new key and then checks that it doesn't exist already by trying to select it first, or generates @@ -727,7 +727,7 @@ ERROR: could not serialize access due to read/write dependencies among transact - Declare transactions as READ ONLY when possible. + Declare transactions as READ ONLY when possible. @@ -748,14 +748,14 @@ ERROR: could not serialize access due to read/write dependencies among transact Don't leave connections dangling idle in transaction longer than necessary. The configuration parameter - may be used to + may be used to automatically disconnect lingering sessions. - Eliminate explicit locks, SELECT FOR UPDATE, and - SELECT FOR SHARE where no longer needed due to the + Eliminate explicit locks, SELECT FOR UPDATE, and + SELECT FOR SHARE where no longer needed due to the protections automatically provided by Serializable transactions. @@ -765,9 +765,9 @@ ERROR: could not serialize access due to read/write dependencies among transact locks into a single relation-level predicate lock because the predicate lock table is short of memory, an increase in the rate of serialization failures may occur. You can avoid this by increasing - , - , and/or - . + , + , and/or + . @@ -775,8 +775,8 @@ ERROR: could not serialize access due to read/write dependencies among transact A sequential scan will always necessitate a relation-level predicate lock. This can result in an increased rate of serialization failures. It may be helpful to encourage the use of index scans by reducing - and/or increasing - . Be sure to weigh any decrease + and/or increasing + . Be sure to weigh any decrease in transaction rollbacks and restarts against any overall change in query execution time. @@ -801,7 +801,7 @@ ERROR: could not serialize access due to read/write dependencies among transact most PostgreSQL commands automatically acquire locks of appropriate modes to ensure that referenced tables are not dropped or modified in incompatible ways while the - command executes. (For example, TRUNCATE cannot safely be + command executes. (For example, TRUNCATE cannot safely be executed concurrently with other operations on the same table, so it obtains an exclusive lock on the table to enforce that.) @@ -811,7 +811,7 @@ ERROR: could not serialize access due to read/write dependencies among transact server, use the pg_locks system view. For more information on monitoring the status of the lock - manager subsystem, refer to . + manager subsystem, refer to . @@ -826,14 +826,14 @@ ERROR: could not serialize access due to read/write dependencies among transact which they are used automatically by PostgreSQL. You can also acquire any of these locks explicitly with the command . + linkend="sql-lock"/>. Remember that all of these lock modes are table-level locks, even if the name contains the word row; the names of the lock modes are historical. To some extent the names reflect the typical usage of each lock mode — but the semantics are all the same. The only real difference between one lock mode and another is the set of lock modes with - which each conflicts (see ). + which each conflicts (see ). Two transactions cannot hold locks of conflicting modes on the same table at the same time. (However, a transaction never conflicts with itself. For example, it might acquire @@ -860,7 +860,7 @@ ERROR: could not serialize access due to read/write dependencies among transact The SELECT command acquires a lock of this mode on - referenced tables. In general, any query that only reads a table + referenced tables. In general, any query that only reads a table and does not modify it will acquire this lock mode. @@ -904,7 +904,7 @@ ERROR: could not serialize access due to read/write dependencies among transact acquire this lock mode on the target table (in addition to ACCESS SHARE locks on any other referenced tables). In general, this lock mode will be acquired by any - command that modifies data in a table. + command that modifies data in a table. @@ -920,16 +920,16 @@ ERROR: could not serialize access due to read/write dependencies among transact EXCLUSIVE, EXCLUSIVE, and ACCESS EXCLUSIVE lock modes. This mode protects a table against - concurrent schema changes and VACUUM runs. + concurrent schema changes and VACUUM runs. Acquired by VACUUM (without ), - ANALYZE, CREATE INDEX CONCURRENTLY, - CREATE STATISTICS and + ANALYZE, CREATE INDEX CONCURRENTLY, + CREATE STATISTICS and ALTER TABLE VALIDATE and other ALTER TABLE variants (for full details see - ). + ). @@ -970,9 +970,8 @@ ERROR: could not serialize access due to read/write dependencies among transact - Acquired by CREATE COLLATION, - CREATE TRIGGER, and many forms of - ALTER TABLE (see ). + Acquired by CREATE TRIGGER and many forms of + ALTER TABLE (see ). @@ -1016,12 +1015,12 @@ ERROR: could not serialize access due to read/write dependencies among transact - Acquired by the DROP TABLE, + Acquired by the DROP TABLE, TRUNCATE, REINDEX, CLUSTER, VACUUM FULL, and REFRESH MATERIALIZED VIEW (without ) - commands. Many forms of ALTER TABLE also acquire + commands. Many forms of ALTER TABLE also acquire a lock at this level. This is also the default lock mode for LOCK TABLE statements that do not specify a mode explicitly. @@ -1042,9 +1041,9 @@ ERROR: could not serialize access due to read/write dependencies among transact Once acquired, a lock is normally held till end of transaction. But if a lock is acquired after establishing a savepoint, the lock is released immediately if the savepoint is rolled back to. This is consistent with - the principle that ROLLBACK cancels all effects of the + the principle that ROLLBACK cancels all effects of the commands since the savepoint. The same holds for locks acquired within a - PL/pgSQL exception block: an error escape from the block + PL/pgSQL exception block: an error escape from the block releases locks acquired within it. @@ -1053,9 +1052,9 @@ ERROR: could not serialize access due to read/write dependencies among transact
Conflicting Lock Modes - - - + + + Requested Lock Mode @@ -1173,7 +1172,7 @@ ERROR: could not serialize access due to read/write dependencies among transact In addition to table-level locks, there are row-level locks, which are listed as below with the contexts in which they are used automatically by PostgreSQL. See - for a complete table of + for a complete table of row-level lock conflicts. Note that a transaction can hold conflicting locks on the same row, even in different subtransactions; but other than that, two transactions can never hold conflicting locks @@ -1204,17 +1203,17 @@ ERROR: could not serialize access due to read/write dependencies among transact concurrent transaction that has run any of those commands on the same row, and will then lock and return the updated row (or no row, if the - row was deleted). Within a REPEATABLE READ or - SERIALIZABLE transaction, + row was deleted). Within a REPEATABLE READ or + SERIALIZABLE transaction, however, an error will be thrown if a row to be locked has changed since the transaction started. For further discussion see - . + . - The FOR UPDATE lock mode - is also acquired by any DELETE on a row, and also by an - UPDATE that modifies the values on certain columns. Currently, - the set of columns considered for the UPDATE case are those that + The FOR UPDATE lock mode + is also acquired by any DELETE on a row, and also by an + UPDATE that modifies the values on certain columns. Currently, + the set of columns considered for the UPDATE case are those that have a unique index on them that can be used in a foreign key (so partial indexes and expressional indexes are not considered), but this may change in the future. @@ -1228,11 +1227,11 @@ ERROR: could not serialize access due to read/write dependencies among transact - Behaves similarly to FOR UPDATE, except that the lock + Behaves similarly to FOR UPDATE, except that the lock acquired is weaker: this lock will not block - SELECT FOR KEY SHARE commands that attempt to acquire + SELECT FOR KEY SHARE commands that attempt to acquire a lock on the same rows. This lock mode is also acquired by any - UPDATE that does not acquire a FOR UPDATE lock. + UPDATE that does not acquire a FOR UPDATE lock. @@ -1243,12 +1242,12 @@ ERROR: could not serialize access due to read/write dependencies among transact - Behaves similarly to FOR NO KEY UPDATE, except that it + Behaves similarly to FOR NO KEY UPDATE, except that it acquires a shared lock rather than exclusive lock on each retrieved row. A shared lock blocks other transactions from performing UPDATE, DELETE, SELECT FOR UPDATE or - SELECT FOR NO KEY UPDATE on these rows, but it does not + SELECT FOR NO KEY UPDATE on these rows, but it does not prevent them from performing SELECT FOR SHARE or SELECT FOR KEY SHARE. @@ -1262,13 +1261,13 @@ ERROR: could not serialize access due to read/write dependencies among transact Behaves similarly to FOR SHARE, except that the - lock is weaker: SELECT FOR UPDATE is blocked, but not - SELECT FOR NO KEY UPDATE. A key-shared lock blocks + lock is weaker: SELECT FOR UPDATE is blocked, but not + SELECT FOR NO KEY UPDATE. A key-shared lock blocks other transactions from performing DELETE or any UPDATE that changes the key values, but not - other UPDATE, and neither does it prevent - SELECT FOR NO KEY UPDATE, SELECT FOR SHARE, - or SELECT FOR KEY SHARE. + other UPDATE, and neither does it prevent + SELECT FOR NO KEY UPDATE, SELECT FOR SHARE, + or SELECT FOR KEY SHARE. @@ -1286,9 +1285,9 @@ ERROR: could not serialize access due to read/write dependencies among transact
Conflicting Row-level Locks - - - + + + Requested Lock Mode @@ -1357,7 +1356,7 @@ ERROR: could not serialize access due to read/write dependencies among transact The use of explicit locking can increase the likelihood of - deadlocks, wherein two (or more) transactions each + deadlocks, wherein two (or more) transactions each hold locks that the other wants. For example, if transaction 1 acquires an exclusive lock on table A and then tries to acquire an exclusive lock on table B, while transaction 2 has already @@ -1447,12 +1446,12 @@ UPDATE accounts SET balance = balance - 100.00 WHERE acctnum = 22222; PostgreSQL provides a means for creating locks that have application-defined meanings. These are - called advisory locks, because the system does not + called advisory locks, because the system does not enforce their use — it is up to the application to use them correctly. Advisory locks can be useful for locking strategies that are an awkward fit for the MVCC model. For example, a common use of advisory locks is to emulate pessimistic - locking strategies typical of so-called flat file data + locking strategies typical of so-called flat file data management systems. While a flag stored in a table could be used for the same purpose, advisory locks are faster, avoid table bloat, and are automatically @@ -1495,8 +1494,8 @@ UPDATE accounts SET balance = balance - 100.00 WHERE acctnum = 22222; Both advisory locks and regular locks are stored in a shared memory pool whose size is defined by the configuration variables - and - . + and + . Care must be taken not to exhaust this memory or the server will be unable to grant any locks at all. This imposes an upper limit on the number of advisory locks @@ -1506,7 +1505,7 @@ UPDATE accounts SET balance = balance - 100.00 WHERE acctnum = 22222; In certain cases using advisory locking methods, especially in queries - involving explicit ordering and LIMIT clauses, care must be + involving explicit ordering and LIMIT clauses, care must be taken to control the locks acquired because of the order in which SQL expressions are evaluated. For example: @@ -1518,7 +1517,7 @@ SELECT pg_advisory_lock(q.id) FROM ) q; -- ok In the above queries, the second form is dangerous because the - LIMIT is not guaranteed to be applied before the locking + LIMIT is not guaranteed to be applied before the locking function is executed. This might cause some locks to be acquired that the application was not expecting, and hence would fail to release (until it ends the session). @@ -1529,7 +1528,7 @@ SELECT pg_advisory_lock(q.id) FROM The functions provided to manipulate advisory locks are described in - . + . @@ -1565,7 +1564,7 @@ SELECT pg_advisory_lock(q.id) FROM - As mentioned in , Serializable + As mentioned in , Serializable transactions are just Repeatable Read transactions which add nonblocking monitoring for dangerous patterns of read/write conflicts. When a pattern is detected which could cause a cycle in the apparent @@ -1590,7 +1589,7 @@ SELECT pg_advisory_lock(q.id) FROM for application programmers if the application software goes through a framework which automatically retries transactions which are rolled back with a serialization failure. It may be a good idea to set - default_transaction_isolation to serializable. + default_transaction_isolation to serializable. It would also be wise to take some action to ensure that no other transaction isolation level is used, either inadvertently or to subvert integrity checks, through checks of the transaction isolation @@ -1598,13 +1597,13 @@ SELECT pg_advisory_lock(q.id) FROM - See for performance suggestions. + See for performance suggestions. This level of integrity protection using Serializable transactions - does not yet extend to hot standby mode (). + does not yet extend to hot standby mode (). Because of that, those using hot standby may want to use Repeatable Read and explicit locking on the master. @@ -1660,7 +1659,7 @@ SELECT pg_advisory_lock(q.id) FROM includes some but not all post-transaction-start changes. In such cases a careful person might wish to lock all tables needed for the check, in order to get an indisputable picture of current reality. A - SHARE mode (or higher) lock guarantees that there are no + SHARE mode (or higher) lock guarantees that there are no uncommitted changes in the locked table, other than those of the current transaction. @@ -1675,8 +1674,8 @@ SELECT pg_advisory_lock(q.id) FROM transaction predates obtaining the lock, it might predate some now-committed changes in the table. A repeatable read transaction's snapshot is actually frozen at the start of its first query or data-modification command - (SELECT, INSERT, - UPDATE, or DELETE), so + (SELECT, INSERT, + UPDATE, or DELETE), so it is possible to obtain locks explicitly before the snapshot is frozen. @@ -1687,8 +1686,8 @@ SELECT pg_advisory_lock(q.id) FROM Caveats - Some DDL commands, currently only and the - table-rewriting forms of , are not + Some DDL commands, currently only and the + table-rewriting forms of , are not MVCC-safe. This means that after the truncation or rewrite commits, the table will appear empty to concurrent transactions, if they are using a snapshot taken before the DDL command committed. This will only be an @@ -1705,7 +1704,7 @@ SELECT pg_advisory_lock(q.id) FROM Support for the Serializable transaction isolation level has not yet been added to Hot Standby replication targets (described in - ). The strictest isolation level currently + ). The strictest isolation level currently supported in hot standby mode is Repeatable Read. While performing all permanent database writes within Serializable transactions on the master will ensure that all standbys will eventually reach a consistent diff --git a/doc/src/sgml/nls.sgml b/doc/src/sgml/nls.sgml index 1d331473af..0035746845 100644 --- a/doc/src/sgml/nls.sgml +++ b/doc/src/sgml/nls.sgml @@ -7,12 +7,12 @@ For the Translator - PostgreSQL + PostgreSQL programs (server and client) can issue their messages in your favorite language — if the messages have been translated. Creating and maintaining translated message sets needs the help of people who speak their own language well and want to contribute to - the PostgreSQL effort. You do not have to be a + the PostgreSQL effort. You do not have to be a programmer at all to do this. This section explains how to help. @@ -153,7 +153,7 @@ msgstr "another translated" can also be named language_region.po where region is the - + ISO 3166-1 two-letter country code (in upper case), e.g., pt_BR.po for Portuguese in Brazil. If you @@ -170,8 +170,8 @@ make init-po This will create a file progname.pot. (.pot to distinguish it from PO files that - are in production. The T stands for - template.) + are in production. The T stands for + template.) Copy this file to language.po and edit it. To make it known that the new language is available, @@ -234,7 +234,7 @@ make update-po - If the original is a printf format string, the translation + If the original is a printf format string, the translation also needs to be. The translation also needs to have the same format specifiers in the same order. Sometimes the natural rules of the language make this impossible or at least awkward. @@ -272,7 +272,7 @@ msgstr "Die Datei %2$s hat %1$u Zeichen." open file %s) should probably not start with a capital letter (if your language distinguishes letter case) or end with a period (if your language uses punctuation marks). - It might help to read . + It might help to read . @@ -301,7 +301,7 @@ msgstr "Die Datei %2$s hat %1$u Zeichen." This section describes how to implement native language support in a program or library that is part of the - PostgreSQL distribution. + PostgreSQL distribution. Currently, it only applies to C programs. @@ -447,7 +447,7 @@ fprintf(stderr, gettext("panic level %d\n"), lvl); printf("Files were %s.\n", flag ? "copied" : "removed"); The word order within the sentence might be different in other - languages. Also, even if you remember to call gettext() on + languages. Also, even if you remember to call gettext() on each fragment, the fragments might not translate well separately. It's better to duplicate a little code so that each message to be translated is a coherent whole. Only numbers, file names, and @@ -481,7 +481,7 @@ printf("number of copied files: %d", n); If you really want to construct a properly pluralized message, there is support for this, but it's a bit awkward. When generating - a primary or detail error message in ereport(), you can + a primary or detail error message in ereport(), you can write something like this: errmsg_plural("copied %d file", @@ -496,17 +496,17 @@ errmsg_plural("copied %d file", are formatted per the format string as usual. (Normally, the pluralization control value will also be one of the values to be formatted, so it has to be written twice.) In English it only - matters whether n is 1 or not 1, but in other + matters whether n is 1 or not 1, but in other languages there can be many different plural forms. The translator sees the two English forms as a group and has the opportunity to supply multiple substitute strings, with the appropriate one being - selected based on the run-time value of n. + selected based on the run-time value of n. If you need to pluralize a message that isn't going directly to an - errmsg or errdetail report, you have to use - the underlying function ngettext. See the gettext + errmsg or errdetail report, you have to use + the underlying function ngettext. See the gettext documentation. diff --git a/doc/src/sgml/notation.sgml b/doc/src/sgml/notation.sgml index 2f350a329d..bd1e8f629a 100644 --- a/doc/src/sgml/notation.sgml +++ b/doc/src/sgml/notation.sgml @@ -7,17 +7,17 @@ The following conventions are used in the synopsis of a command: brackets ([ and ]) indicate optional parts. (In the synopsis of a Tcl command, question marks - (?) are used instead, as is usual in Tcl.) Braces + (?) are used instead, as is usual in Tcl.) Braces ({ and }) and vertical lines (|) indicate that you must choose one - alternative. Dots (...) mean that the preceding element + alternative. Dots (...) mean that the preceding element can be repeated. Where it enhances the clarity, SQL commands are preceded by the - prompt =>, and shell commands are preceded by the - prompt $. Normally, prompts are not shown, though. + prompt =>, and shell commands are preceded by the + prompt $. Normally, prompts are not shown, though. diff --git a/doc/src/sgml/oid2name.sgml b/doc/src/sgml/oid2name.sgml index 97b170a23f..c7ebd61c6b 100644 --- a/doc/src/sgml/oid2name.sgml +++ b/doc/src/sgml/oid2name.sgml @@ -27,15 +27,15 @@ Description - oid2name is a utility program that helps administrators to + oid2name is a utility program that helps administrators to examine the file structure used by PostgreSQL. To make use of it, you need to be familiar with the database file structure, which is described in - . + . - The name oid2name is historical, and is actually rather + The name oid2name is historical, and is actually rather misleading, since most of the time when you use it, you will really be concerned with tables' filenode numbers (which are the file names visible in the database directories). Be sure you understand the @@ -60,46 +60,53 @@ - filenode - show info for table with filenode filenode + + + show info for table with filenode filenode. - include indexes and sequences in the listing + + include indexes and sequences in the listing. - oid - show info for table with OID oid + + + show info for table with OID oid. - omit headers (useful for scripting) + + omit headers (useful for scripting). - show tablespace OIDs + + show tablespace OIDs. + include system objects (those in , - and schemas) + and schemas). - tablename_pattern - show info for table(s) matching tablename_pattern + + + show info for table(s) matching tablename_pattern. - - + + Print the oid2name version and exit. @@ -109,14 +116,15 @@ + display more information about each object shown: tablespace name, - schema name, and OID + schema name, and OID. - - + + Show help about oid2name command line @@ -133,29 +141,34 @@ - database - database to connect to + + + database to connect to. - host - database server's host + + + database server's host. - port - database server's port + + database server's host. Use of this parameter is + deprecated as of + PostgreSQL 12. - username - user name to connect as + + + database server's port. - password - password (deprecated — putting this on the command line - is a security hazard) + + + user name to connect as. @@ -163,36 +176,60 @@ To display specific tables, select which tables to show by - using - If you don't give any of , or , + but do give , it will list all tables in the database + named by . In this mode, the and + options control what gets listed. - If you don't give either, it will show a listing of database + OIDs. Alternatively you can give to get a tablespace listing. + + Environment + + + + PGHOST + PGPORT + PGUSER + + + + Default connection parameters. + + + + + + + This utility, like most other PostgreSQL + utilities, also uses the environment variables supported by + libpq (see ). + + + Notes - oid2name requires a running database server with + oid2name requires a running database server with non-corrupt system catalogs. It is therefore of only limited use for recovering from catastrophic database corruption situations. diff --git a/doc/src/sgml/pageinspect.sgml b/doc/src/sgml/pageinspect.sgml index e46f5ca6bc..4d5da186bb 100644 --- a/doc/src/sgml/pageinspect.sgml +++ b/doc/src/sgml/pageinspect.sgml @@ -8,7 +8,7 @@ - The pageinspect module provides functions that allow you to + The pageinspect module provides functions that allow you to inspect the contents of database pages at a low level, which is useful for debugging purposes. All of these functions may be used only by superusers. @@ -28,7 +28,7 @@ get_raw_page reads the specified block of the named - relation and returns a copy as a bytea value. This allows a + relation and returns a copy as a bytea value. This allows a single time-consistent copy of the block to be obtained. fork should be 'main' for the main data fork, 'fsm' for the free space map, @@ -63,7 +63,7 @@ page_header shows fields that are common to all - PostgreSQL heap and index pages. + PostgreSQL heap and index pages. @@ -76,8 +76,8 @@ test=# SELECT * FROM page_header(get_raw_page('pg_class', 0)); 0/24A1B50 | 0 | 1 | 232 | 368 | 8192 | 8192 | 4 | 0 The returned columns correspond to the fields in the - PageHeaderData struct. - See src/include/storage/bufpage.h for details. + PageHeaderData struct. + See src/include/storage/bufpage.h for details. @@ -147,8 +147,8 @@ test=# SELECT page_checksum(get_raw_page('pg_class', 0), 0); test=# SELECT * FROM heap_page_items(get_raw_page('pg_class', 0)); - See src/include/storage/itemid.h and - src/include/access/htup_details.h for explanations of the fields + See src/include/storage/itemid.h and + src/include/access/htup_details.h for explanations of the fields returned. @@ -221,7 +221,7 @@ test=# SELECT * FROM heap_page_item_attrs(get_raw_page('pg_class', 0), 'pg_class next slot to be returned from the page, is also printed. - See src/backend/storage/freespace/README for more + See src/backend/storage/freespace/README for more information on the structure of an FSM page. @@ -247,13 +247,15 @@ test=# SELECT * FROM heap_page_item_attrs(get_raw_page('pg_class', 0), 'pg_class index's metapage. For example: test=# SELECT * FROM bt_metap('pg_cast_oid_index'); --[ RECORD 1 ]----- -magic | 340322 -version | 2 -root | 1 -level | 0 -fastroot | 1 -fastlevel | 0 +-[ RECORD 1 ]-----------+------- +magic | 340322 +version | 3 +root | 1 +level | 0 +fastroot | 1 +fastlevel | 0 +oldest_xact | 582 +last_cleanup_num_tuples | 1000 @@ -315,21 +317,21 @@ test=# SELECT * FROM bt_page_items('pg_cast_oid_index', 1); 7 | (0,7) | 12 | f | f | 29 27 00 00 8 | (0,8) | 12 | f | f | 2a 27 00 00 - In a B-tree leaf page, ctid points to a heap tuple. - In an internal page, the block number part of ctid + In a B-tree leaf page, ctid points to a heap tuple. + In an internal page, the block number part of ctid points to another page in the index itself, while the offset part (the second number) is ignored and is usually 1. Note that the first item on any non-rightmost page (any page with - a non-zero value in the btpo_next field) is the - page's high key, meaning its data + a non-zero value in the btpo_next field) is the + page's high key, meaning its data serves as an upper bound on all items appearing on the page, while - its ctid field is meaningless. Also, on non-leaf + its ctid field is meaningless. Also, on non-leaf pages, the first real data item (the first item that is not a high key) is a minus infinity item, with no actual value - in its data field. Such an item does have a valid - downlink in its ctid field, however. + in its data field. Such an item does have a valid + downlink in its ctid field, however. @@ -345,7 +347,7 @@ test=# SELECT * FROM bt_page_items('pg_cast_oid_index', 1); It is also possible to pass a page to bt_page_items - as a bytea value. A page image obtained + as a bytea value. A page image obtained with get_raw_page should be passed as argument. So the last example could also be rewritten like this: @@ -470,8 +472,8 @@ test=# SELECT * FROM brin_page_items(get_raw_page('brinidx', 5), 139 | 8 | 2 | f | f | f | {177 .. 264} The returned columns correspond to the fields in the - BrinMemTuple and BrinValues structs. - See src/include/access/brin_tuple.h for details. + BrinMemTuple and BrinValues structs. + See src/include/access/brin_tuple.h for details. diff --git a/doc/src/sgml/parallel.sgml b/doc/src/sgml/parallel.sgml index 2a25f21eb4..1005e9fef4 100644 --- a/doc/src/sgml/parallel.sgml +++ b/doc/src/sgml/parallel.sgml @@ -8,7 +8,7 @@ - PostgreSQL can devise query plans which can leverage + PostgreSQL can devise query plans which can leverage multiple CPUs in order to answer queries faster. This feature is known as parallel query. Many queries cannot benefit from parallel query, either due to limitations of the current implementation or because there is no @@ -47,32 +47,32 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; In all cases, the Gather or Gather Merge node will have exactly one child plan, which is the portion of the plan that will be executed in - parallel. If the Gather or Gather Merge node is + parallel. If the Gather or Gather Merge node is at the very top of the plan tree, then the entire query will execute in parallel. If it is somewhere else in the plan tree, then only the portion of the plan below it will run in parallel. In the example above, the query accesses only one table, so there is only one plan node other than - the Gather node itself; since that plan node is a child of the - Gather node, it will run in parallel. + the Gather node itself; since that plan node is a child of the + Gather node, it will run in parallel. - Using EXPLAIN, you can see the number of - workers chosen by the planner. When the Gather node is reached + Using EXPLAIN, you can see the number of + workers chosen by the planner. When the Gather node is reached during query execution, the process which is implementing the user's session will request a number of background worker processes equal to the number of workers chosen by the planner. The number of background workers that the planner will consider using is limited to at most - . The total number + . The total number of background workers that can exist at any one time is limited by both - and - . Therefore, it is possible for a + and + . Therefore, it is possible for a parallel query to run with fewer workers than planned, or even with no workers at all. The optimal plan may depend on the number of workers that are available, so this can result in poor query performance. If this occurrence is frequent, consider increasing - max_worker_processes and max_parallel_workers + max_worker_processes and max_parallel_workers so that more workers can be run simultaneously or alternatively reducing max_parallel_workers_per_gather so that the planner requests fewer workers. @@ -96,10 +96,10 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; When the node at the top of the parallel portion of the plan is - Gather Merge rather than Gather, it indicates that + Gather Merge rather than Gather, it indicates that each process executing the parallel portion of the plan is producing tuples in sorted order, and that the leader is performing an - order-preserving merge. In contrast, Gather reads tuples + order-preserving merge. In contrast, Gather reads tuples from the workers in whatever order is convenient, destroying any sort order that may have existed. @@ -118,20 +118,12 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; - must be set to a + must be set to a value which is greater than zero. This is a special case of the more general principle that no more workers should be used than the number configured via max_parallel_workers_per_gather. - - - - must be set to a - value other than none. Parallel query requires dynamic - shared memory in order to pass data between cooperating processes. - - @@ -151,9 +143,10 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; The query writes any data or locks any database rows. If a query contains a data-modifying operation either at the top level or within - a CTE, no parallel plans for that query will be generated. This is a - limitation of the current implementation which could be lifted in a - future release. + a CTE, no parallel plans for that query will be generated. As an + exception, the commands CREATE TABLE ... AS, SELECT + INTO, and CREATE MATERIALIZED VIEW which create a new + table and populate it can use a parallel plan. @@ -177,7 +170,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; Most system-defined functions are PARALLEL SAFE, but user-defined functions are marked PARALLEL UNSAFE by default. See the discussion of - . + . @@ -204,8 +197,8 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; Even when parallel query plan is generated for a particular query, there are several circumstances under which it will be impossible to execute that plan in parallel at execution time. If this occurs, the leader - will execute the portion of the plan below the Gather - node entirely by itself, almost as if the Gather node were + will execute the portion of the plan below the Gather + node entirely by itself, almost as if the Gather node were not present. This will happen if any of the following conditions are met: @@ -214,7 +207,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; No background workers can be obtained because of the limitation that the total number of background workers cannot exceed - . + . @@ -222,7 +215,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; No background workers can be obtained because of the limitation that the total number of background workers launched for purposes of - parallel query cannot exceed . + parallel query cannot exceed . @@ -235,21 +228,12 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; send such a message, this can only occur when using a client that does not rely on libpq. If this is a frequent occurrence, it may be a good idea to set - to zero in + to zero in sessions where it is likely, so as to avoid generating query plans that may be suboptimal when run serially. - - - A prepared statement is executed using a CREATE TABLE .. AS - EXECUTE .. statement. This construct converts what otherwise - would have been a read-only operation into a read-write operation, - making it ineligible for parallel query. - - - The transaction isolation level is serializable. This situation @@ -272,7 +256,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; copy of the output result set, so the query would not run any faster than normal but would produce incorrect results. Instead, the parallel portion of the plan must be what is known internally to the query - optimizer as a partial plan; that is, it must be constructed + optimizer as a partial plan; that is, it must be constructed so that each process which executes the plan will generate only a subset of the output rows in such a way that each required output row is guaranteed to be generated by exactly one of the cooperating processes. @@ -289,14 +273,14 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; - In a parallel sequential scan, the table's blocks will + In a parallel sequential scan, the table's blocks will be divided among the cooperating processes. Blocks are handed out one at a time, so that access to the table remains sequential. - In a parallel bitmap heap scan, one process is chosen + In a parallel bitmap heap scan, one process is chosen as the leader. That process performs a scan of one or more indexes and builds a bitmap indicating which table blocks need to be visited. These blocks are then divided among the cooperating processes as in @@ -306,8 +290,8 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; - In a parallel index scan or parallel index-only - scan, the cooperating processes take turns reading data from the + In a parallel index scan or parallel index-only + scan, the cooperating processes take turns reading data from the index. Currently, parallel index scans are supported only for btree indexes. Each process will claim a single index block and will scan and return all tuples referenced by that block; other process can @@ -331,47 +315,64 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; more other tables using a nested loop, hash join, or merge join. The inner side of the join may be any kind of non-parallel plan that is otherwise supported by the planner provided that it is safe to run within - a parallel worker. For example, if a nested loop join is chosen, the - inner plan may be an index scan which looks up a value taken from the outer - side of the join. + a parallel worker. Depending on the join type, the inner side may also be + a parallel plan. - - Each worker will execute the inner side of the join in full. This is - typically not a problem for nested loops, but may be inefficient for - cases involving hash or merge joins. For example, for a hash join, this - restriction means that an identical hash table is built in each worker - process, which works fine for joins against small tables but may not be - efficient when the inner table is large. For a merge join, it might mean - that each worker performs a separate sort of the inner relation, which - could be slow. Of course, in cases where a parallel plan of this type - would be inefficient, the query planner will normally choose some other - plan (possibly one which does not use parallelism) instead. - + + + + In a nested loop join, the inner side is always + non-parallel. Although it is executed in full, this is efficient if + the inner side is an index scan, because the outer tuples and thus + the loops that look up values in the index are divided over the + cooperating processes. + + + + + In a merge join, the inner side is always + a non-parallel plan and therefore executed in full. This may be + inefficient, especially if a sort must be performed, because the work + and resulting data are duplicated in every cooperating process. + + + + + In a hash join (without the "parallel" prefix), + the inner side is executed in full by every cooperating process + to build identical copies of the hash table. This may be inefficient + if the hash table is large or the plan is expensive. In a + parallel hash join, the inner side is a + parallel hash that divides the work of building + a shared hash table over the cooperating processes. + + + Parallel Aggregation - PostgreSQL supports parallel aggregation by aggregating in + PostgreSQL supports parallel aggregation by aggregating in two stages. First, each process participating in the parallel portion of the query performs an aggregation step, producing a partial result for each group of which that process is aware. This is reflected in the plan - as a Partial Aggregate node. Second, the partial results are - transferred to the leader via Gather or Gather - Merge. Finally, the leader re-aggregates the results across all + as a Partial Aggregate node. Second, the partial results are + transferred to the leader via Gather or Gather + Merge. Finally, the leader re-aggregates the results across all workers in order to produce the final result. This is reflected in the - plan as a Finalize Aggregate node. + plan as a Finalize Aggregate node. - Because the Finalize Aggregate node runs on the leader + Because the Finalize Aggregate node runs on the leader process, queries which produce a relatively large number of groups in comparison to the number of input rows will appear less favorable to the query planner. For example, in the worst-case scenario the number of - groups seen by the Finalize Aggregate node could be as many as + groups seen by the Finalize Aggregate node could be as many as the number of input rows which were seen by all worker processes in the - Partial Aggregate stage. For such cases, there is clearly + Partial Aggregate stage. For such cases, there is clearly going to be no performance benefit to using parallel aggregation. The query planner takes this into account during the planning process and is unlikely to choose parallel aggregate in this scenario. @@ -379,33 +380,81 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; Parallel aggregation is not supported in all situations. Each aggregate - must be safe for parallelism and must + must be safe for parallelism and must have a combine function. If the aggregate has a transition state of type - internal, it must have serialization and deserialization - functions. See for more details. + internal, it must have serialization and deserialization + functions. See for more details. Parallel aggregation is not supported if any aggregate function call - contains DISTINCT or ORDER BY clause and is also + contains DISTINCT or ORDER BY clause and is also not supported for ordered set aggregates or when the query involves - GROUPING SETS. It can only be used when all joins involved in + GROUPING SETS. It can only be used when all joins involved in the query are also part of the parallel portion of the plan. + + Parallel Append + + + Whenever PostgreSQL needs to combine rows + from multiple sources into a single result set, it uses an + Append or MergeAppend plan node. + This commonly happens when implementing UNION ALL or + when scanning a partitioned table. Such nodes can be used in parallel + plans just as they can in any other plan. However, in a parallel plan, + the planner may instead use a Parallel Append node. + + + + When an Append node is used in a parallel plan, each + process will execute the child plans in the order in which they appear, + so that all participating processes cooperate to execute the first child + plan until it is complete and then move to the second plan at around the + same time. When a Parallel Append is used instead, the + executor will instead spread out the participating processes as evenly as + possible across its child plans, so that multiple child plans are executed + simultaneously. This avoids contention, and also avoids paying the startup + cost of a child plan in those processes that never execute it. + + + + Also, unlike a regular Append node, which can only have + partial children when used within a parallel plan, a Parallel + Append node can have both partial and non-partial child plans. + Non-partial children will be scanned by only a single process, since + scanning them more than once would produce duplicate results. Plans that + involve appending multiple results sets can therefore achieve + coarse-grained parallelism even when efficient partial plans are not + available. For example, consider a query against a partitioned table + which can be only be implemented efficiently by using an index that does + not support parallel scans. The planner might choose a Parallel + Append of regular Index Scan plans; each + individual index scan would have to be executed to completion by a single + process, but different scans could be performed at the same time by + different processes. + + + + can be used to disable + this feature. + + + Parallel Plan Tips If a query that is expected to do so does not produce a parallel plan, - you can try reducing or - . Of course, this plan may turn + you can try reducing or + . Of course, this plan may turn out to be slower than the serial plan which the planner preferred, but this will not always be the case. If you don't get a parallel plan even with very small values of these settings (e.g. after setting them both to zero), there may be some reason why the query planner is unable to generate a parallel plan for your query. See - and - for information on why this may be + and + for information on why this may be the case. @@ -425,13 +474,13 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; The planner classifies operations involved in a query as either - parallel safe, parallel restricted, - or parallel unsafe. A parallel safe operation is one which + parallel safe, parallel restricted, + or parallel unsafe. A parallel safe operation is one which does not conflict with the use of parallel query. A parallel restricted operation is one which cannot be performed in a parallel worker, but which can be performed in the leader while parallel query is in use. Therefore, - parallel restricted operations can never occur below a Gather - or Gather Merge node, but can occur elsewhere in a plan which + parallel restricted operations can never occur below a Gather + or Gather Merge node, but can occur elsewhere in a plan which contains such a node. A parallel unsafe operation is one which cannot be performed while parallel query is in use, not even in the leader. When a query contains anything which is parallel unsafe, parallel query @@ -458,13 +507,19 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; Scans of foreign tables, unless the foreign data wrapper has - an IsForeignScanParallelSafe API which indicates otherwise. + an IsForeignScanParallelSafe API which indicates otherwise. + + + + + + Plan nodes to which an InitPlan is attached. - Access to an InitPlan or correlated SubPlan. + Plan nodes which reference a correlated SubPlan. @@ -478,28 +533,28 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; unsafe, because this would require predicting every operation which the function could possibly perform. In general, this is equivalent to the Halting Problem and therefore impossible. Even for simple functions - where it conceivably be done, we do not try, since this would be expensive + where it could conceivably be done, we do not try, since this would be expensive and error-prone. Instead, all user-defined functions are assumed to be parallel unsafe unless otherwise marked. When using - or - , markings can be set by specifying - PARALLEL SAFE, PARALLEL RESTRICTED, or - PARALLEL UNSAFE as appropriate. When using - , the - PARALLEL option can be specified with SAFE, - RESTRICTED, or UNSAFE as the corresponding value. + or + , markings can be set by specifying + PARALLEL SAFE, PARALLEL RESTRICTED, or + PARALLEL UNSAFE as appropriate. When using + , the + PARALLEL option can be specified with SAFE, + RESTRICTED, or UNSAFE as the corresponding value. - Functions and aggregates must be marked PARALLEL UNSAFE if + Functions and aggregates must be marked PARALLEL UNSAFE if they write to the database, access sequences, change the transaction state even temporarily (e.g. a PL/pgSQL function which establishes an - EXCEPTION block to catch errors), or make persistent changes to + EXCEPTION block to catch errors), or make persistent changes to settings. Similarly, functions must be marked PARALLEL - RESTRICTED if they access temporary tables, client connection state, + RESTRICTED if they access temporary tables, client connection state, cursors, prepared statements, or miscellaneous backend-local state which the system cannot synchronize across workers. For example, - setseed and random are parallel restricted for + setseed and random are parallel restricted for this last reason. @@ -511,7 +566,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; mislabeled, since there is no way for the system to protect itself against arbitrary C code, but in most likely cases the result will be no worse than for any other function. If in doubt, it is probably best to label functions - as UNSAFE. + as UNSAFE. @@ -527,13 +582,13 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; Note that the query planner does not consider deferring the evaluation of parallel-restricted functions or aggregates involved in the query in - order to obtain a superior plan. So, for example, if a WHERE + order to obtain a superior plan. So, for example, if a WHERE clause applied to a particular table is parallel restricted, the query planner will not consider performing a scan of that table in the parallel portion of a plan. In some cases, it would be possible (and perhaps even efficient) to include the scan of that table in the parallel portion of the query and defer the evaluation of the - WHERE clause so that it happens above the Gather + WHERE clause so that it happens above the Gather node. However, the planner does not do this. diff --git a/doc/src/sgml/passwordcheck.sgml b/doc/src/sgml/passwordcheck.sgml index 6e6e4ef435..4128b6cc4f 100644 --- a/doc/src/sgml/passwordcheck.sgml +++ b/doc/src/sgml/passwordcheck.sgml @@ -10,22 +10,22 @@ The passwordcheck module checks users' passwords whenever they are set with - or - . + or + . If a password is considered too weak, it will be rejected and the command will terminate with an error. To enable this module, add '$libdir/passwordcheck' - to in + to in postgresql.conf, then restart the server. You can adapt this module to your needs by changing the source code. For example, you can use - CrackLib + CrackLib to check passwords — this only requires uncommenting two lines in the Makefile and rebuilding the module. (We cannot include CrackLib @@ -49,7 +49,7 @@ For this reason, passwordcheck is not recommended if your security requirements are high. It is more secure to use an external authentication method such as GSSAPI - (see ) than to rely on + (see ) than to rely on passwords within the database. diff --git a/doc/src/sgml/perform.sgml b/doc/src/sgml/perform.sgml index 924f6091ba..262f30fd4c 100644 --- a/doc/src/sgml/perform.sgml +++ b/doc/src/sgml/perform.sgml @@ -30,8 +30,8 @@ plan for each query it receives. Choosing the right plan to match the query structure and the properties of the data is absolutely critical for good performance, so the system includes - a complex planner that tries to choose good plans. - You can use the command + a complex planner that tries to choose good plans. + You can use the command to see what query plan the planner creates for any query. Plan-reading is an art that requires some experience to master, but this section attempts to cover the basics. @@ -39,17 +39,17 @@ Examples in this section are drawn from the regression test database - after doing a VACUUM ANALYZE, using 9.3 development sources. + after doing a VACUUM ANALYZE, using 9.3 development sources. You should be able to get similar results if you try the examples yourself, but your estimated costs and row counts might vary slightly - because ANALYZE's statistics are random samples rather + because ANALYZE's statistics are random samples rather than exact, and because costs are inherently somewhat platform-dependent. - The examples use EXPLAIN's default text output + The examples use EXPLAIN's default text output format, which is compact and convenient for humans to read. - If you want to feed EXPLAIN's output to a program for further + If you want to feed EXPLAIN's output to a program for further analysis, you should use one of its machine-readable output formats (XML, JSON, or YAML) instead. @@ -58,12 +58,12 @@ <command>EXPLAIN</command> Basics - The structure of a query plan is a tree of plan nodes. + The structure of a query plan is a tree of plan nodes. Nodes at the bottom level of the tree are scan nodes: they return raw rows from a table. There are different types of scan nodes for different table access methods: sequential scans, index scans, and bitmap index - scans. There are also non-table row sources, such as VALUES - clauses and set-returning functions in FROM, which have their + scans. There are also non-table row sources, such as VALUES + clauses and set-returning functions in FROM, which have their own scan node types. If the query requires joining, aggregation, sorting, or other operations on the raw rows, then there will be additional nodes @@ -93,7 +93,7 @@ EXPLAIN SELECT * FROM tenk1; - Since this query has no WHERE clause, it must scan all the + Since this query has no WHERE clause, it must scan all the rows of the table, so the planner has chosen to use a simple sequential scan plan. The numbers that are quoted in parentheses are (left to right): @@ -111,7 +111,7 @@ EXPLAIN SELECT * FROM tenk1; Estimated total cost. This is stated on the assumption that the plan node is run to completion, i.e., all available rows are retrieved. In practice a node's parent node might stop short of reading all - available rows (see the LIMIT example below). + available rows (see the LIMIT example below). @@ -132,10 +132,10 @@ EXPLAIN SELECT * FROM tenk1; The costs are measured in arbitrary units determined by the planner's - cost parameters (see ). + cost parameters (see ). Traditional practice is to measure the costs in units of disk page - fetches; that is, is conventionally - set to 1.0 and the other cost parameters are set relative + fetches; that is, is conventionally + set to 1.0 and the other cost parameters are set relative to that. The examples in this section are run with the default cost parameters. @@ -152,11 +152,11 @@ EXPLAIN SELECT * FROM tenk1; - The rows value is a little tricky because it is + The rows value is a little tricky because it is not the number of rows processed or scanned by the plan node, but rather the number emitted by the node. This is often less than the number scanned, as a result of filtering by any - WHERE-clause conditions that are being applied at the node. + WHERE-clause conditions that are being applied at the node. Ideally the top-level rows estimate will approximate the number of rows actually returned, updated, or deleted by the query. @@ -182,14 +182,14 @@ SELECT relpages, reltuples FROM pg_class WHERE relname = 'tenk1'; you will find that tenk1 has 358 disk pages and 10000 rows. The estimated cost is computed as (disk pages read * - ) + (rows scanned * - ). By default, - seq_page_cost is 1.0 and cpu_tuple_cost is 0.01, + ) + (rows scanned * + ). By default, + seq_page_cost is 1.0 and cpu_tuple_cost is 0.01, so the estimated cost is (358 * 1.0) + (10000 * 0.01) = 458. - Now let's modify the query to add a WHERE condition: + Now let's modify the query to add a WHERE condition: EXPLAIN SELECT * FROM tenk1 WHERE unique1 < 7000; @@ -200,21 +200,21 @@ EXPLAIN SELECT * FROM tenk1 WHERE unique1 < 7000; Filter: (unique1 < 7000) - Notice that the EXPLAIN output shows the WHERE - clause being applied as a filter condition attached to the Seq + Notice that the EXPLAIN output shows the WHERE + clause being applied as a filter condition attached to the Seq Scan plan node. This means that the plan node checks the condition for each row it scans, and outputs only the ones that pass the condition. The estimate of output rows has been reduced because of the - WHERE clause. + WHERE clause. However, the scan will still have to visit all 10000 rows, so the cost hasn't decreased; in fact it has gone up a bit (by 10000 * , to be exact) to reflect the extra CPU - time spent checking the WHERE condition. + linkend="guc-cpu-operator-cost"/>, to be exact) to reflect the extra CPU + time spent checking the WHERE condition. - The actual number of rows this query would select is 7000, but the rows + The actual number of rows this query would select is 7000, but the rows estimate is only approximate. If you try to duplicate this experiment, you will probably get a slightly different estimate; moreover, it can change after each ANALYZE command, because the @@ -245,12 +245,12 @@ EXPLAIN SELECT * FROM tenk1 WHERE unique1 < 100; scan. (The reason for using two plan levels is that the upper plan node sorts the row locations identified by the index into physical order before reading them, to minimize the cost of separate fetches. - The bitmap mentioned in the node names is the mechanism that + The bitmap mentioned in the node names is the mechanism that does the sorting.) - Now let's add another condition to the WHERE clause: + Now let's add another condition to the WHERE clause: EXPLAIN SELECT * FROM tenk1 WHERE unique1 < 100 AND stringu1 = 'xxx'; @@ -266,15 +266,15 @@ EXPLAIN SELECT * FROM tenk1 WHERE unique1 < 100 AND stringu1 = 'xxx'; The added condition stringu1 = 'xxx' reduces the output row count estimate, but not the cost because we still have to visit - the same set of rows. Notice that the stringu1 clause + the same set of rows. Notice that the stringu1 clause cannot be applied as an index condition, since this index is only on - the unique1 column. Instead it is applied as a filter on + the unique1 column. Instead it is applied as a filter on the rows retrieved by the index. Thus the cost has actually gone up slightly to reflect this extra checking. - In some cases the planner will prefer a simple index scan plan: + In some cases the planner will prefer a simple index scan plan: EXPLAIN SELECT * FROM tenk1 WHERE unique1 = 42; @@ -289,14 +289,14 @@ EXPLAIN SELECT * FROM tenk1 WHERE unique1 = 42; makes them even more expensive to read, but there are so few that the extra cost of sorting the row locations is not worth it. You'll most often see this plan type for queries that fetch just a single row. It's - also often used for queries that have an ORDER BY condition + also often used for queries that have an ORDER BY condition that matches the index order, because then no extra sorting step is needed - to satisfy the ORDER BY. + to satisfy the ORDER BY. If there are separate indexes on several of the columns referenced - in WHERE, the planner might choose to use an AND or OR + in WHERE, the planner might choose to use an AND or OR combination of the indexes: @@ -320,7 +320,7 @@ EXPLAIN SELECT * FROM tenk1 WHERE unique1 < 100 AND unique2 > 9000; - Here is an example showing the effects of LIMIT: + Here is an example showing the effects of LIMIT: EXPLAIN SELECT * FROM tenk1 WHERE unique1 < 100 AND unique2 > 9000 LIMIT 2; @@ -335,7 +335,7 @@ EXPLAIN SELECT * FROM tenk1 WHERE unique1 < 100 AND unique2 > 9000 LIMIT 2 - This is the same query as above, but we added a LIMIT so that + This is the same query as above, but we added a LIMIT so that not all the rows need be retrieved, and the planner changed its mind about what to do. Notice that the total cost and row count of the Index Scan node are shown as if it were run to completion. However, the Limit node @@ -370,32 +370,32 @@ WHERE t1.unique1 < 10 AND t1.unique2 = t2.unique2; In this plan, we have a nested-loop join node with two table scans as inputs, or children. The indentation of the node summary lines reflects - the plan tree structure. The join's first, or outer, child + the plan tree structure. The join's first, or outer, child is a bitmap scan similar to those we saw before. Its cost and row count - are the same as we'd get from SELECT ... WHERE unique1 < 10 + are the same as we'd get from SELECT ... WHERE unique1 < 10 because we are - applying the WHERE clause unique1 < 10 + applying the WHERE clause unique1 < 10 at that node. The t1.unique2 = t2.unique2 clause is not relevant yet, so it doesn't affect the row count of the outer scan. The nested-loop join node will run its second, - or inner child once for each row obtained from the outer child. + or inner child once for each row obtained from the outer child. Column values from the current outer row can be plugged into the inner - scan; here, the t1.unique2 value from the outer row is available, + scan; here, the t1.unique2 value from the outer row is available, so we get a plan and costs similar to what we saw above for a simple - SELECT ... WHERE t2.unique2 = constant case. + SELECT ... WHERE t2.unique2 = constant case. (The estimated cost is actually a bit lower than what was seen above, as a result of caching that's expected to occur during the repeated - index scans on t2.) The + index scans on t2.) The costs of the loop node are then set on the basis of the cost of the outer - scan, plus one repetition of the inner scan for each outer row (10 * 7.87, + scan, plus one repetition of the inner scan for each outer row (10 * 7.91, here), plus a little CPU time for join processing. In this example the join's output row count is the same as the product of the two scans' row counts, but that's not true in all cases because - there can be additional WHERE clauses that mention both tables + there can be additional WHERE clauses that mention both tables and so can only be applied at the join point, not to either input scan. Here's an example: @@ -418,15 +418,15 @@ WHERE t1.unique1 < 10 AND t2.unique2 < 10 AND t1.hundred < t2.hundred; The condition t1.hundred < t2.hundred can't be - tested in the tenk2_unique2 index, so it's applied at the + tested in the tenk2_unique2 index, so it's applied at the join node. This reduces the estimated output row count of the join node, but does not change either input scan. - Notice that here the planner has chosen to materialize the inner + Notice that here the planner has chosen to materialize the inner relation of the join, by putting a Materialize plan node atop it. This - means that the t2 index scan will be done just once, even + means that the t2 index scan will be done just once, even though the nested-loop join node needs to read that data ten times, once for each row from the outer relation. The Materialize node saves the data in memory as it's read, and then returns the data from memory on each @@ -435,8 +435,8 @@ WHERE t1.unique1 < 10 AND t2.unique2 < 10 AND t1.hundred < t2.hundred; When dealing with outer joins, you might see join plan nodes with both - Join Filter and plain Filter conditions attached. - Join Filter conditions come from the outer join's ON clause, + Join Filter and plain Filter conditions attached. + Join Filter conditions come from the outer join's ON clause, so a row that fails the Join Filter condition could still get emitted as a null-extended row. But a plain Filter condition is applied after the outer-join rules and so acts to remove rows unconditionally. In an inner @@ -470,7 +470,7 @@ WHERE t1.unique1 < 100 AND t1.unique2 = t2.unique2; table are entered into an in-memory hash table, after which the other table is scanned and the hash table is probed for matches to each row. Again note how the indentation reflects the plan structure: the bitmap - scan on tenk1 is the input to the Hash node, which constructs + scan on tenk1 is the input to the Hash node, which constructs the hash table. That's then returned to the Hash Join node, which reads rows from its outer child plan and searches the hash table for each one. @@ -497,9 +497,9 @@ WHERE t1.unique1 < 100 AND t1.unique2 = t2.unique2; Merge join requires its input data to be sorted on the join keys. In this - plan the tenk1 data is sorted by using an index scan to visit + plan the tenk1 data is sorted by using an index scan to visit the rows in the correct order, but a sequential scan and sort is preferred - for onek, because there are many more rows to be visited in + for onek, because there are many more rows to be visited in that table. (Sequential-scan-and-sort frequently beats an index scan for sorting many rows, because of the nonsequential disk access required by the index scan.) @@ -508,11 +508,11 @@ WHERE t1.unique1 < 100 AND t1.unique2 = t2.unique2; One way to look at variant plans is to force the planner to disregard whatever strategy it thought was the cheapest, using the enable/disable - flags described in . + flags described in . (This is a crude tool, but useful. See - also .) + also .) For example, if we're unconvinced that sequential-scan-and-sort is the best way to - deal with table onek in the previous example, we could try + deal with table onek in the previous example, we could try SET enable_sort = off; @@ -530,10 +530,10 @@ WHERE t1.unique1 < 100 AND t1.unique2 = t2.unique2; -> Index Scan using onek_unique2 on onek t2 (cost=0.28..224.79 rows=1000 width=244) - which shows that the planner thinks that sorting onek by + which shows that the planner thinks that sorting onek by index-scanning is about 12% more expensive than sequential-scan-and-sort. Of course, the next question is whether it's right about that. - We can investigate that using EXPLAIN ANALYZE, as discussed + We can investigate that using EXPLAIN ANALYZE, as discussed below. @@ -544,8 +544,8 @@ WHERE t1.unique1 < 100 AND t1.unique2 = t2.unique2; It is possible to check the accuracy of the planner's estimates - by using EXPLAIN's ANALYZE option. With this - option, EXPLAIN actually executes the query, and then displays + by using EXPLAIN's ANALYZE option. With this + option, EXPLAIN actually executes the query, and then displays the true row counts and true run time accumulated within each plan node, along with the same estimates that a plain EXPLAIN shows. For example, we might get a result like this: @@ -569,7 +569,7 @@ WHERE t1.unique1 < 10 AND t1.unique2 = t2.unique2; Note that the actual time values are in milliseconds of - real time, whereas the cost estimates are expressed in + real time, whereas the cost estimates are expressed in arbitrary units; so they are unlikely to match up. The thing that's usually most important to look for is whether the estimated row counts are reasonably close to reality. In this example @@ -580,17 +580,17 @@ WHERE t1.unique1 < 10 AND t1.unique2 = t2.unique2; In some query plans, it is possible for a subplan node to be executed more than once. For example, the inner index scan will be executed once per outer row in the above nested-loop plan. In such cases, the - loops value reports the + loops value reports the total number of executions of the node, and the actual time and rows values shown are averages per-execution. This is done to make the numbers comparable with the way that the cost estimates are shown. Multiply by - the loops value to get the total time actually spent in + the loops value to get the total time actually spent in the node. In the above example, we spent a total of 0.220 milliseconds - executing the index scans on tenk2. + executing the index scans on tenk2. - In some cases EXPLAIN ANALYZE shows additional execution + In some cases EXPLAIN ANALYZE shows additional execution statistics beyond the plan node execution times and row counts. For example, Sort and Hash nodes provide extra information: @@ -642,13 +642,13 @@ EXPLAIN ANALYZE SELECT * FROM tenk1 WHERE ten < 7; These counts can be particularly valuable for filter conditions applied at - join nodes. The Rows Removed line only appears when at least + join nodes. The Rows Removed line only appears when at least one scanned row, or potential join pair in the case of a join node, is rejected by the filter condition. - A case similar to filter conditions occurs with lossy + A case similar to filter conditions occurs with lossy index scans. For example, consider this search for polygons containing a specific point: @@ -685,14 +685,14 @@ EXPLAIN ANALYZE SELECT * FROM polygon_tbl WHERE f1 @> polygon '(0.5,2.0)'; Here we can see that the index returned one candidate row, which was then rejected by a recheck of the index condition. This happens because a - GiST index is lossy for polygon containment tests: it actually + GiST index is lossy for polygon containment tests: it actually returns the rows with polygons that overlap the target, and then we have to do the exact containment test on those rows. - EXPLAIN has a BUFFERS option that can be used with - ANALYZE to get even more run time statistics: + EXPLAIN has a BUFFERS option that can be used with + ANALYZE to get even more run time statistics: EXPLAIN (ANALYZE, BUFFERS) SELECT * FROM tenk1 WHERE unique1 < 100 AND unique2 > 9000; @@ -714,7 +714,7 @@ EXPLAIN (ANALYZE, BUFFERS) SELECT * FROM tenk1 WHERE unique1 < 100 AND unique Execution time: 0.423 ms - The numbers provided by BUFFERS help to identify which parts + The numbers provided by BUFFERS help to identify which parts of the query are the most I/O-intensive. @@ -722,7 +722,7 @@ EXPLAIN (ANALYZE, BUFFERS) SELECT * FROM tenk1 WHERE unique1 < 100 AND unique Keep in mind that because EXPLAIN ANALYZE actually runs the query, any side-effects will happen as usual, even though whatever results the query might output are discarded in favor of - printing the EXPLAIN data. If you want to analyze a + printing the EXPLAIN data. If you want to analyze a data-modifying query without changing your tables, you can roll the command back afterwards, for example: @@ -746,8 +746,8 @@ ROLLBACK; - As seen in this example, when the query is an INSERT, - UPDATE, or DELETE command, the actual work of + As seen in this example, when the query is an INSERT, + UPDATE, or DELETE command, the actual work of applying the table changes is done by a top-level Insert, Update, or Delete plan node. The plan nodes underneath this node perform the work of locating the old rows and/or computing the new data. @@ -762,7 +762,7 @@ ROLLBACK; - When an UPDATE or DELETE command affects an + When an UPDATE or DELETE command affects an inheritance hierarchy, the output might look like this: @@ -789,7 +789,7 @@ EXPLAIN UPDATE parent SET f2 = f2 + 1 WHERE f1 = 101; scanning subplans, one per table. For clarity, the Update node is annotated to show the specific target tables that will be updated, in the same order as the corresponding subplans. (These annotations are new as - of PostgreSQL 9.5; in prior versions the reader had to + of PostgreSQL 9.5; in prior versions the reader had to intuit the target tables by inspecting the subplans.) @@ -804,12 +804,12 @@ EXPLAIN UPDATE parent SET f2 = f2 + 1 WHERE f1 = 101; ANALYZE includes executor start-up and shut-down time, as well as the time to run any triggers that are fired, but it does not include parsing, rewriting, or planning time. - Time spent executing BEFORE triggers, if any, is included in + Time spent executing BEFORE triggers, if any, is included in the time for the related Insert, Update, or Delete node; but time - spent executing AFTER triggers is not counted there because - AFTER triggers are fired after completion of the whole plan. + spent executing AFTER triggers is not counted there because + AFTER triggers are fired after completion of the whole plan. The total time spent in each trigger - (either BEFORE or AFTER) is also shown separately. + (either BEFORE or AFTER) is also shown separately. Note that deferred constraint triggers will not be executed until end of transaction and are thus not considered at all by EXPLAIN ANALYZE. @@ -827,13 +827,13 @@ EXPLAIN UPDATE parent SET f2 = f2 + 1 WHERE f1 = 101; network transmission costs and I/O conversion costs are not included. Second, the measurement overhead added by EXPLAIN ANALYZE can be significant, especially on machines with slow - gettimeofday() operating-system calls. You can use the - tool to measure the overhead of timing + gettimeofday() operating-system calls. You can use the + tool to measure the overhead of timing on your system. - EXPLAIN results should not be extrapolated to situations + EXPLAIN results should not be extrapolated to situations much different from the one you are actually testing; for example, results on a toy-sized table cannot be assumed to apply to large tables. The planner's cost estimates are not linear and so it might choose @@ -843,14 +843,14 @@ EXPLAIN UPDATE parent SET f2 = f2 + 1 WHERE f1 = 101; The planner realizes that it's going to take one disk page read to process the table in any case, so there's no value in expending additional page reads to look at an index. (We saw this happening in the - polygon_tbl example above.) + polygon_tbl example above.) There are cases in which the actual and estimated values won't match up well, but nothing is really wrong. One such case occurs when - plan node execution is stopped short by a LIMIT or similar - effect. For example, in the LIMIT query we used before, + plan node execution is stopped short by a LIMIT or similar + effect. For example, in the LIMIT query we used before, EXPLAIN ANALYZE SELECT * FROM tenk1 WHERE unique1 < 100 AND unique2 > 9000 LIMIT 2; @@ -880,10 +880,10 @@ EXPLAIN ANALYZE SELECT * FROM tenk1 WHERE unique1 < 100 AND unique2 > 9000 and the next key value in the one input is greater than the last key value of the other input; in such a case there can be no more matches and so no need to scan the rest of the first input. This results in not reading all - of one child, with results like those mentioned for LIMIT. + of one child, with results like those mentioned for LIMIT. Also, if the outer (first) child contains rows with duplicate key values, the inner (second) child is backed up and rescanned for the portion of its - rows matching that key value. EXPLAIN ANALYZE counts these + rows matching that key value. EXPLAIN ANALYZE counts these repeated emissions of the same inner rows as if they were real additional rows. When there are many outer duplicates, the reported actual row count for the inner child plan node can be significantly larger than the number @@ -894,6 +894,18 @@ EXPLAIN ANALYZE SELECT * FROM tenk1 WHERE unique1 < 100 AND unique2 > 9000 BitmapAnd and BitmapOr nodes always report their actual row counts as zero, due to implementation limitations. + + + Generally, the EXPLAIN output will display details for + every plan node which was generated by the query planner. However, there + are cases where the executor is able to determine that certain nodes are + not required; currently, the only node types to support this are the + Append and MergeAppend nodes. These + node types have the ability to discard subnodes which they are able to + determine won't contain any records required by the query. It is possible + to determine that nodes have been removed in this way by the presence of a + "Subplans Removed" property in the EXPLAIN output. + @@ -948,9 +960,9 @@ WHERE relname LIKE 'tenk1%'; For efficiency reasons, reltuples and relpages are not updated on-the-fly, and so they usually contain somewhat out-of-date values. - They are updated by VACUUM, ANALYZE, and a - few DDL commands such as CREATE INDEX. A VACUUM - or ANALYZE operation that does not scan the entire table + They are updated by VACUUM, ANALYZE, and a + few DDL commands such as CREATE INDEX. A VACUUM + or ANALYZE operation that does not scan the entire table (which is commonly the case) will incrementally update the reltuples count on the basis of the part of the table it did scan, resulting in an approximate value. @@ -966,16 +978,16 @@ WHERE relname LIKE 'tenk1%'; Most queries retrieve only a fraction of the rows in a table, due - to WHERE clauses that restrict the rows to be + to WHERE clauses that restrict the rows to be examined. The planner thus needs to make an estimate of the - selectivity of WHERE clauses, that is, + selectivity of WHERE clauses, that is, the fraction of rows that match each condition in the - WHERE clause. The information used for this task is + WHERE clause. The information used for this task is stored in the pg_statistic system catalog. Entries in pg_statistic - are updated by the ANALYZE and VACUUM - ANALYZE commands, and are always approximate even when freshly + are updated by the ANALYZE and VACUUM + ANALYZE commands, and are always approximate even when freshly updated. @@ -1020,19 +1032,19 @@ WHERE tablename = 'road'; Note that two rows are displayed for the same column, one corresponding to the complete inheritance hierarchy starting at the - road table (inherited=t), + road table (inherited=t), and another one including only the road table itself - (inherited=f). + (inherited=f). The amount of information stored in pg_statistic - by ANALYZE, in particular the maximum number of entries in the - most_common_vals and histogram_bounds + by ANALYZE, in particular the maximum number of entries in the + most_common_vals and histogram_bounds arrays for each column, can be set on a - column-by-column basis using the ALTER TABLE SET STATISTICS + column-by-column basis using the ALTER TABLE SET STATISTICS command, or globally by setting the - configuration variable. + configuration variable. The default limit is presently 100 entries. Raising the limit might allow more accurate planner estimates to be made, particularly for columns with irregular data distributions, at the price of consuming @@ -1043,7 +1055,7 @@ WHERE tablename = 'road'; Further details about the planner's use of statistics can be found in - . + . @@ -1072,7 +1084,7 @@ WHERE tablename = 'road'; an assumption that does not hold when column values are correlated. Regular statistics, because of their per-individual-column nature, cannot capture any knowledge about cross-column correlation. - However, PostgreSQL has the ability to compute + However, PostgreSQL has the ability to compute multivariate statistics, which can capture such information. @@ -1081,13 +1093,13 @@ WHERE tablename = 'road'; Because the number of possible column combinations is very large, it's impractical to compute multivariate statistics automatically. Instead, extended statistics objects, more often - called just statistics objects, can be created to instruct + called just statistics objects, can be created to instruct the server to obtain statistics across interesting sets of columns. Statistics objects are created using - , which see for more details. + , which see for more details. Creation of such an object merely creates a catalog entry expressing interest in the statistics. Actual data collection is performed by ANALYZE (either a manual command, or background @@ -1107,7 +1119,7 @@ WHERE tablename = 'road'; - The following subsections describe the types of extended statistics + The following subsections describe the kinds of extended statistics that are currently supported. @@ -1115,13 +1127,13 @@ WHERE tablename = 'road'; Functional Dependencies - The simplest type of extended statistics tracks functional - dependencies, a concept used in definitions of database normal forms. - We say that column b is functionally dependent on - column a if knowledge of the value of - a is sufficient to determine the value - of b, that is there are no two rows having the same value - of a but different values of b. + The simplest kind of extended statistics tracks functional + dependencies, a concept used in definitions of database normal forms. + We say that column b is functionally dependent on + column a if knowledge of the value of + a is sufficient to determine the value + of b, that is there are no two rows having the same value + of a but different values of b. In a fully normalized database, functional dependencies should exist only on primary keys and superkeys. However, in practice many data sets are not fully normalized for various reasons; intentional @@ -1142,15 +1154,15 @@ WHERE tablename = 'road'; - To inform the planner about functional dependencies, ANALYZE + To inform the planner about functional dependencies, ANALYZE can collect measurements of cross-column dependency. Assessing the degree of dependency between all sets of columns would be prohibitively expensive, so data collection is limited to those groups of columns appearing together in a statistics object defined with - the dependencies option. It is advisable to create - dependencies statistics only for column groups that are + the dependencies option. It is advisable to create + dependencies statistics only for column groups that are strongly correlated, to avoid unnecessary overhead in both - ANALYZE and later query planning. + ANALYZE and later query planning. @@ -1189,7 +1201,7 @@ SELECT stxname, stxkeys, stxdependencies simple equality conditions that compare columns to constant values. They are not used to improve estimates for equality conditions comparing two columns or comparing a column to an expression, nor for - range clauses, LIKE or any other type of condition. + range clauses, LIKE or any other type of condition. @@ -1200,7 +1212,7 @@ SELECT stxname, stxkeys, stxdependencies SELECT * FROM zipcodes WHERE city = 'San Francisco' AND zip = '94105'; - the planner will disregard the city clause as not + the planner will disregard the city clause as not changing the selectivity, which is correct. However, it will make the same assumption about @@ -1233,11 +1245,11 @@ SELECT * FROM zipcodes WHERE city = 'San Francisco' AND zip = '90210'; - To improve such estimates, ANALYZE can collect n-distinct + To improve such estimates, ANALYZE can collect n-distinct statistics for groups of columns. As before, it's impractical to do this for every possible column grouping, so data is collected only for those groups of columns appearing together in a statistics object - defined with the ndistinct option. Data will be collected + defined with the ndistinct option. Data will be collected for each possible combination of two or more columns from the set of listed columns. @@ -1267,17 +1279,17 @@ nd | {"1, 2": 33178, "1, 5": 33178, "2, 5": 27435, "1, 2, 5": 33178} - It's advisable to create ndistinct statistics objects only + It's advisable to create ndistinct statistics objects only on combinations of columns that are actually used for grouping, and for which misestimation of the number of groups is resulting in bad - plans. Otherwise, the ANALYZE cycles are just wasted. + plans. Otherwise, the ANALYZE cycles are just wasted. - Controlling the Planner with Explicit <literal>JOIN</> Clauses + Controlling the Planner with Explicit <literal>JOIN</literal> Clauses join @@ -1286,7 +1298,7 @@ nd | {"1, 2": 33178, "1, 5": 33178, "2, 5": 27435, "1, 2, 5": 33178} It is possible - to control the query planner to some extent by using the explicit JOIN + to control the query planner to some extent by using the explicit JOIN syntax. To see why this matters, we first need some background. @@ -1297,13 +1309,13 @@ SELECT * FROM a, b, c WHERE a.id = b.id AND b.ref = c.id; the planner is free to join the given tables in any order. For example, it could generate a query plan that joins A to B, using - the WHERE condition a.id = b.id, and then - joins C to this joined table, using the other WHERE + the WHERE condition a.id = b.id, and then + joins C to this joined table, using the other WHERE condition. Or it could join B to C and then join A to that result. Or it could join A to C and then join them with B — but that would be inefficient, since the full Cartesian product of A and C would have to be formed, there being no applicable condition in the - WHERE clause to allow optimization of the join. (All + WHERE clause to allow optimization of the join. (All joins in the PostgreSQL executor happen between two input tables, so it's necessary to build up the result in one or another of these fashions.) The important point is that @@ -1323,7 +1335,7 @@ SELECT * FROM a, b, c WHERE a.id = b.id AND b.ref = c.id; PostgreSQL planner will switch from exhaustive search to a genetic probabilistic search through a limited number of possibilities. (The switch-over threshold is - set by the run-time + set by the run-time parameter.) The genetic search takes less time, but it won't necessarily find the best possible plan. @@ -1347,30 +1359,30 @@ SELECT * FROM a LEFT JOIN (b JOIN c ON (b.ref = c.id)) ON (a.id = b.id); SELECT * FROM a LEFT JOIN b ON (a.bid = b.id) LEFT JOIN c ON (a.cid = c.id); it is valid to join A to either B or C first. Currently, only - FULL JOIN completely constrains the join order. Most - practical cases involving LEFT JOIN or RIGHT JOIN + FULL JOIN completely constrains the join order. Most + practical cases involving LEFT JOIN or RIGHT JOIN can be rearranged to some extent. - Explicit inner join syntax (INNER JOIN, CROSS - JOIN, or unadorned JOIN) is semantically the same as - listing the input relations in FROM, so it does not + Explicit inner join syntax (INNER JOIN, CROSS + JOIN, or unadorned JOIN) is semantically the same as + listing the input relations in FROM, so it does not constrain the join order. - Even though most kinds of JOIN don't completely constrain + Even though most kinds of JOIN don't completely constrain the join order, it is possible to instruct the PostgreSQL query planner to treat all - JOIN clauses as constraining the join order anyway. + JOIN clauses as constraining the join order anyway. For example, these three queries are logically equivalent: SELECT * FROM a, b, c WHERE a.id = b.id AND b.ref = c.id; SELECT * FROM a CROSS JOIN b CROSS JOIN c WHERE a.id = b.id AND b.ref = c.id; SELECT * FROM a JOIN (b JOIN c ON (b.ref = c.id)) ON (a.id = b.id); - But if we tell the planner to honor the JOIN order, + But if we tell the planner to honor the JOIN order, the second and third take less time to plan than the first. This effect is not worth worrying about for only three tables, but it can be a lifesaver with many tables. @@ -1378,19 +1390,19 @@ SELECT * FROM a JOIN (b JOIN c ON (b.ref = c.id)) ON (a.id = b.id); To force the planner to follow the join order laid out by explicit - JOINs, - set the run-time parameter to 1. + JOINs, + set the run-time parameter to 1. (Other possible values are discussed below.) You do not need to constrain the join order completely in order to - cut search time, because it's OK to use JOIN operators - within items of a plain FROM list. For example, consider: + cut search time, because it's OK to use JOIN operators + within items of a plain FROM list. For example, consider: SELECT * FROM a CROSS JOIN b, c, d, e WHERE ...; - With join_collapse_limit = 1, this + With join_collapse_limit = 1, this forces the planner to join A to B before joining them to other tables, but doesn't constrain its choices otherwise. In this example, the number of possible join orders is reduced by a factor of 5. @@ -1400,7 +1412,7 @@ SELECT * FROM a CROSS JOIN b, c, d, e WHERE ...; Constraining the planner's search in this way is a useful technique both for reducing planning time and for directing the planner to a good query plan. If the planner chooses a bad join order by default, - you can force it to choose a better order via JOIN syntax + you can force it to choose a better order via JOIN syntax — assuming that you know of a better order, that is. Experimentation is recommended. @@ -1415,35 +1427,35 @@ FROM x, y, WHERE somethingelse; This situation might arise from use of a view that contains a join; - the view's SELECT rule will be inserted in place of the view + the view's SELECT rule will be inserted in place of the view reference, yielding a query much like the above. Normally, the planner will try to collapse the subquery into the parent, yielding: SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; This usually results in a better plan than planning the subquery - separately. (For example, the outer WHERE conditions might be such that + separately. (For example, the outer WHERE conditions might be such that joining X to A first eliminates many rows of A, thus avoiding the need to form the full logical output of the subquery.) But at the same time, we have increased the planning time; here, we have a five-way join problem replacing two separate three-way join problems. Because of the exponential growth of the number of possibilities, this makes a big difference. The planner tries to avoid getting stuck in huge join search - problems by not collapsing a subquery if more than from_collapse_limit - FROM items would result in the parent + problems by not collapsing a subquery if more than from_collapse_limit + FROM items would result in the parent query. You can trade off planning time against quality of plan by adjusting this run-time parameter up or down. - and + and are similarly named because they do almost the same thing: one controls - when the planner will flatten out subqueries, and the + when the planner will flatten out subqueries, and the other controls when it will flatten out explicit joins. Typically - you would either set join_collapse_limit equal to - from_collapse_limit (so that explicit joins and subqueries - act similarly) or set join_collapse_limit to 1 (if you want + you would either set join_collapse_limit equal to + from_collapse_limit (so that explicit joins and subqueries + act similarly) or set join_collapse_limit to 1 (if you want to control join order with explicit joins). But you might set them differently if you are trying to fine-tune the trade-off between planning time and run time. @@ -1468,7 +1480,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; - When using multiple INSERTs, turn off autocommit and just do + When using multiple INSERTs, turn off autocommit and just do one commit at the end. (In plain SQL, this means issuing BEGIN at the start and COMMIT at the end. Some client libraries might @@ -1488,7 +1500,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; Use <command>COPY</command> - Use to load + Use to load all the rows in one command, instead of using a series of INSERT commands. The COPY command is optimized for loading large numbers of rows; it is less @@ -1500,19 +1512,19 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; If you cannot use COPY, it might help to use to create a + linkend="sql-prepare"/> to create a prepared INSERT statement, and then use EXECUTE as many times as required. This avoids some of the overhead of repeatedly parsing and planning INSERT. Different interfaces provide this facility - in different ways; look for prepared statements in the interface + in different ways; look for prepared statements in the interface documentation. Note that loading a large number of rows using COPY is almost always faster than using - INSERT, even if PREPARE is used and + INSERT, even if PREPARE is used and multiple insertions are batched into a single transaction. @@ -1523,7 +1535,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; needs to be written, because in case of an error, the files containing the newly loaded data will be removed anyway. However, this consideration only applies when - is minimal as all commands + is minimal as all commands must write WAL otherwise. @@ -1557,7 +1569,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; Just as with indexes, a foreign key constraint can be checked - in bulk more efficiently than row-by-row. So it might be + in bulk more efficiently than row-by-row. So it might be useful to drop foreign key constraints, load data, and re-create the constraints. Again, there is a trade-off between data load speed and loss of error checking while the constraint is missing. @@ -1570,7 +1582,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; the row's foreign key constraint). Loading many millions of rows can cause the trigger event queue to overflow available memory, leading to intolerable swapping or even outright failure of the command. Therefore - it may be necessary, not just desirable, to drop and re-apply + it may be necessary, not just desirable, to drop and re-apply foreign keys when loading large amounts of data. If temporarily removing the constraint isn't acceptable, the only other recourse may be to split up the load operation into smaller transactions. @@ -1581,11 +1593,11 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; Increase <varname>maintenance_work_mem</varname> - Temporarily increasing the + Temporarily increasing the configuration variable when loading large amounts of data can lead to improved performance. This will help to speed up CREATE - INDEX commands and ALTER TABLE ADD FOREIGN KEY commands. - It won't do much for COPY itself, so this advice is + INDEX commands and ALTER TABLE ADD FOREIGN KEY commands. + It won't do much for COPY itself, so this advice is only useful when you are using one or both of the above techniques. @@ -1594,7 +1606,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; Increase <varname>max_wal_size</varname> - Temporarily increasing the + Temporarily increasing the configuration variable can also make large data loads faster. This is because loading a large amount of data into PostgreSQL will @@ -1617,9 +1629,9 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; new base backup after the load has completed than to process a large amount of incremental WAL data. To prevent incremental WAL logging while loading, disable archiving and streaming replication, by setting - to minimal, - to off, and - to zero. + to minimal, + to off, and + to zero. But note that changing these settings requires a server restart. @@ -1628,8 +1640,8 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; process the WAL data, doing this will actually make certain commands faster, because they are designed not to write WAL at all if wal_level - is minimal. (They can guarantee crash safety more cheaply - by doing an fsync at the end than by writing WAL.) + is minimal. (They can guarantee crash safety more cheaply + by doing an fsync at the end than by writing WAL.) This applies to the following commands: @@ -1668,7 +1680,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; Whenever you have significantly altered the distribution of data - within a table, running is strongly recommended. This + within a table, running is strongly recommended. This includes bulk loading large amounts of data into the table. Running ANALYZE (or VACUUM ANALYZE) ensures that the planner has up-to-date statistics about the @@ -1677,27 +1689,27 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; performance on any tables with inaccurate or nonexistent statistics. Note that if the autovacuum daemon is enabled, it might run ANALYZE automatically; see - - and for more information. + + and for more information. - Some Notes About <application>pg_dump</> + Some Notes About <application>pg_dump</application> - Dump scripts generated by pg_dump automatically apply + Dump scripts generated by pg_dump automatically apply several, but not all, of the above guidelines. To reload a - pg_dump dump as quickly as possible, you need to + pg_dump dump as quickly as possible, you need to do a few extra things manually. (Note that these points apply while - restoring a dump, not while creating it. + restoring a dump, not while creating it. The same points apply whether loading a text dump with - psql or using pg_restore to load - from a pg_dump archive file.) + psql or using pg_restore to load + from a pg_dump archive file.) - By default, pg_dump uses COPY, and when + By default, pg_dump uses COPY, and when it is generating a complete schema-and-data dump, it is careful to load data before creating indexes and foreign keys. So in this case several guidelines are handled automatically. What is left @@ -1713,10 +1725,10 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; If using WAL archiving or streaming replication, consider disabling - them during the restore. To do that, set archive_mode - to off, - wal_level to minimal, and - max_wal_senders to zero before loading the dump. + them during the restore. To do that, set archive_mode + to off, + wal_level to minimal, and + max_wal_senders to zero before loading the dump. Afterwards, set them back to the right values and take a fresh base backup. @@ -1724,49 +1736,49 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; Experiment with the parallel dump and restore modes of both - pg_dump and pg_restore and find the + pg_dump and pg_restore and find the optimal number of concurrent jobs to use. Dumping and restoring in - parallel by means of the option should give you a significantly higher performance over the serial mode. Consider whether the whole dump should be restored as a single - transaction. To do that, pass the If multiple CPUs are available in the database server, consider using - pg_restore's option. This allows concurrent data loading and index creation. - Run ANALYZE afterwards. + Run ANALYZE afterwards. - A data-only dump will still use COPY, but it does not + A data-only dump will still use COPY, but it does not drop or recreate indexes, and it does not normally touch foreign keys. You can get the effect of disabling foreign keys by using - the option — but realize that that eliminates, rather than just postpones, foreign key validation, and so it is possible to insert bad data if you use it. @@ -1778,9 +1790,9 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; while loading the data, but don't bother increasing maintenance_work_mem; rather, you'd do that while manually recreating indexes and foreign keys afterwards. - And don't forget to ANALYZE when you're done; see - - and for more information. + And don't forget to ANALYZE when you're done; see + + and for more information. @@ -1808,7 +1820,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; Place the database cluster's data directory in a memory-backed - file system (i.e. RAM disk). This eliminates all + file system (i.e. RAM disk). This eliminates all database disk I/O, but limits data storage to the amount of available memory (and perhaps swap). @@ -1816,39 +1828,39 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; - Turn off ; there is no need to flush + Turn off ; there is no need to flush data to disk. - Turn off ; there might be no + Turn off ; there might be no need to force WAL writes to disk on every commit. This setting does risk transaction loss (though not data - corruption) in case of a crash of the database. + corruption) in case of a crash of the database. - Turn off ; there is no need + Turn off ; there is no need to guard against partial page writes. - Increase and ; this reduces the frequency + Increase and ; this reduces the frequency of checkpoints, but increases the storage requirements of - /pg_wal. + /pg_wal. - Create unlogged + Create unlogged tables to avoid WAL writes, though it makes the tables non-crash-safe. diff --git a/doc/src/sgml/pgbuffercache.sgml b/doc/src/sgml/pgbuffercache.sgml index 4e53009ae0..faf5a3115d 100644 --- a/doc/src/sgml/pgbuffercache.sgml +++ b/doc/src/sgml/pgbuffercache.sgml @@ -33,11 +33,11 @@ The <structname>pg_buffercache</structname> View - The definitions of the columns exposed by the view are shown in . + The definitions of the columns exposed by the view are shown in .
- <structname>pg_buffercache</> Columns + <structname>pg_buffercache</structname> Columns @@ -54,7 +54,7 @@ bufferidinteger - ID, in the range 1..shared_buffers + ID, in the range 1..shared_buffers @@ -83,7 +83,7 @@ smallint Fork number within the relation; see - include/common/relpath.h + include/common/relpath.h @@ -120,22 +120,22 @@ There is one row for each buffer in the shared cache. Unused buffers are - shown with all fields null except bufferid. Shared system + shown with all fields null except bufferid. Shared system catalogs are shown as belonging to database zero. Because the cache is shared by all the databases, there will normally be pages from relations not belonging to the current database. This means - that there may not be matching join rows in pg_class for + that there may not be matching join rows in pg_class for some rows, or that there could even be incorrect joins. If you are - trying to join against pg_class, it's a good idea to - restrict the join to rows having reldatabase equal to + trying to join against pg_class, it's a good idea to + restrict the join to rows having reldatabase equal to the current database's OID or zero. - When the pg_buffercache view is accessed, internal buffer + When the pg_buffercache view is accessed, internal buffer manager locks are taken for long enough to copy all the buffer state data that the view will display. This ensures that the view produces a consistent set of results, while not diff --git a/doc/src/sgml/pgcrypto.sgml b/doc/src/sgml/pgcrypto.sgml index bf514aacf3..5c79666654 100644 --- a/doc/src/sgml/pgcrypto.sgml +++ b/doc/src/sgml/pgcrypto.sgml @@ -13,8 +13,8 @@ - The pgcrypto module provides cryptographic functions for - PostgreSQL. + The pgcrypto module provides cryptographic functions for + PostgreSQL. @@ -33,19 +33,19 @@ digest(data bytea, type text) returns bytea - Computes a binary hash of the given data. - type is the algorithm to use. + Computes a binary hash of the given data. + type is the algorithm to use. Standard algorithms are md5, sha1, sha224, sha256, sha384 and sha512. - If pgcrypto was built with + If pgcrypto was built with OpenSSL, more algorithms are available, as detailed in - . + . If you want the digest as a hexadecimal string, use - encode() on the result. For example: + encode() on the result. For example: CREATE OR REPLACE FUNCTION sha1(bytea) returns text AS $$ SELECT encode(digest($1, 'sha1'), 'hex') @@ -63,16 +63,16 @@ $$ LANGUAGE SQL STRICT IMMUTABLE; hmac(data text, key text, type text) returns bytea -hmac(data bytea, key text, type text) returns bytea +hmac(data bytea, key bytea, type text) returns bytea - Calculates hashed MAC for data with key key. - type is the same as in digest(). + Calculates hashed MAC for data with key key. + type is the same as in digest(). - This is similar to digest() but the hash can only be + This is similar to digest() but the hash can only be recalculated knowing the key. This prevents the scenario of someone altering data and also changing the hash to match. @@ -88,14 +88,14 @@ hmac(data bytea, key text, type text) returns bytea Password Hashing Functions - The functions crypt() and gen_salt() + The functions crypt() and gen_salt() are specifically designed for hashing passwords. - crypt() does the hashing and gen_salt() + crypt() does the hashing and gen_salt() prepares algorithm parameters for it. - The algorithms in crypt() differ from the usual + The algorithms in crypt() differ from the usual MD5 or SHA1 hashing algorithms in the following respects: @@ -108,7 +108,7 @@ hmac(data bytea, key text, type text) returns bytea - They use a random value, called the salt, so that users + They use a random value, called the salt, so that users having the same password will have different encrypted passwords. This is also an additional defense against reversing the algorithm. @@ -129,12 +129,12 @@ hmac(data bytea, key text, type text) returns bytea - lists the algorithms + lists the algorithms supported by the crypt() function.
- Supported Algorithms for <function>crypt()</> + Supported Algorithms for <function>crypt()</function> @@ -148,7 +148,7 @@ hmac(data bytea, key text, type text) returns bytea - bf + bf 72 yes 128 @@ -156,7 +156,7 @@ hmac(data bytea, key text, type text) returns bytea Blowfish-based, variant 2a - md5 + md5 unlimited no 48 @@ -164,7 +164,7 @@ hmac(data bytea, key text, type text) returns bytea MD5-based crypt - xdes + xdes 8 yes 24 @@ -172,7 +172,7 @@ hmac(data bytea, key text, type text) returns bytea Extended DES - des + des 8 no 12 @@ -184,7 +184,7 @@ hmac(data bytea, key text, type text) returns bytea
- <function>crypt()</> + <function>crypt()</function> crypt @@ -195,10 +195,10 @@ crypt(password text, salt text) returns text
- Calculates a crypt(3)-style hash of password. + Calculates a crypt(3)-style hash of password. When storing a new password, you need to use - gen_salt() to generate a new salt value. - To check a password, pass the stored hash value as salt, + gen_salt() to generate a new salt value. + To check a password, pass the stored hash value as salt, and test whether the result matches the stored value. @@ -212,12 +212,12 @@ UPDATE ... SET pswhash = crypt('new password', gen_salt('md5')); SELECT (pswhash = crypt('entered password', pswhash)) AS pswmatch FROM ... ; - This returns true if the entered password is correct. + This returns true if the entered password is correct. - <function>gen_salt()</> + <function>gen_salt()</function> gen_salt @@ -228,30 +228,30 @@ gen_salt(type text [, iter_count integer ]) returns text
- Generates a new random salt string for use in crypt(). - The salt string also tells crypt() which algorithm to use. + Generates a new random salt string for use in crypt(). + The salt string also tells crypt() which algorithm to use. - The type parameter specifies the hashing algorithm. + The type parameter specifies the hashing algorithm. The accepted types are: des, xdes, md5 and bf. - The iter_count parameter lets the user specify the iteration + The iter_count parameter lets the user specify the iteration count, for algorithms that have one. The higher the count, the more time it takes to hash the password and therefore the more time to break it. Although with too high a count the time to calculate a hash may be several years - — which is somewhat impractical. If the iter_count + — which is somewhat impractical. If the iter_count parameter is omitted, the default iteration count is used. - Allowed values for iter_count depend on the algorithm and - are shown in . + Allowed values for iter_count depend on the algorithm and + are shown in . - Iteration Counts for <function>crypt()</> + Iteration Counts for <function>crypt()</function> @@ -263,13 +263,13 @@ gen_salt(type text [, iter_count integer ]) returns text - xdes + xdes 725 1 16777215 - bf + bf 6 4 31 @@ -292,7 +292,7 @@ gen_salt(type text [, iter_count integer ]) returns text - gives an overview of the relative slowness + gives an overview of the relative slowness of different hashing algorithms. The table shows how much time it would take to try all combinations of characters in an 8-character password, assuming @@ -310,63 +310,63 @@ gen_salt(type text [, iter_count integer ]) returns text Algorithm Hashes/sec - For [a-z] - For [A-Za-z0-9] - Duration relative to md5 hash + For [a-z] + For [A-Za-z0-9] + Duration relative to md5 hash - crypt-bf/8 + crypt-bf/8 1792 4 years 3927 years 100k - crypt-bf/7 + crypt-bf/7 3648 2 years 1929 years 50k - crypt-bf/6 + crypt-bf/6 7168 1 year 982 years 25k - crypt-bf/5 + crypt-bf/5 13504 188 days 521 years 12.5k - crypt-md5 + crypt-md5 171584 15 days 41 years 1k - crypt-des + crypt-des 23221568 157.5 minutes 108 days 7 - sha1 + sha1 37774272 90 minutes 68 days 4 - md5 (hash) + md5 (hash) 150085504 22.5 minutes 17 days @@ -388,18 +388,18 @@ gen_salt(type text [, iter_count integer ]) returns text - crypt-des and crypt-md5 algorithm numbers are - taken from John the Ripper v1.6.38 -test output. + crypt-des and crypt-md5 algorithm numbers are + taken from John the Ripper v1.6.38 -test output. - md5 hash numbers are from mdcrack 1.2. + md5 hash numbers are from mdcrack 1.2. - sha1 numbers are from lcrack-20031130-beta. + sha1 numbers are from lcrack-20031130-beta. @@ -407,10 +407,10 @@ gen_salt(type text [, iter_count integer ]) returns text crypt-bf numbers are taken using a simple program that loops over 1000 8-character passwords. That way I can show the speed with different numbers of iterations. For reference: john - -test shows 13506 loops/sec for crypt-bf/5. + -test shows 13506 loops/sec for crypt-bf/5. (The very small difference in results is in accordance with the fact that the - crypt-bf implementation in pgcrypto + crypt-bf implementation in pgcrypto is the same one used in John the Ripper.) @@ -436,7 +436,7 @@ gen_salt(type text [, iter_count integer ]) returns text - An encrypted PGP message consists of 2 parts, or packets: + An encrypted PGP message consists of 2 parts, or packets: @@ -459,7 +459,7 @@ gen_salt(type text [, iter_count integer ]) returns text The given password is hashed using a String2Key (S2K) algorithm. This is - rather similar to crypt() algorithms — purposefully + rather similar to crypt() algorithms — purposefully slow and with random salt — but it produces a full-length binary key. @@ -540,8 +540,8 @@ pgp_sym_encrypt(data text, psw text [, options text ]) returns bytea pgp_sym_encrypt_bytea(data bytea, psw text [, options text ]) returns bytea - Encrypt data with a symmetric PGP key psw. - The options parameter can contain option settings, + Encrypt data with a symmetric PGP key psw. + The options parameter can contain option settings, as described below. @@ -565,12 +565,12 @@ pgp_sym_decrypt_bytea(msg bytea, psw text [, options text ]) returns bytea Decrypt a symmetric-key-encrypted PGP message. - Decrypting bytea data with pgp_sym_decrypt is disallowed. + Decrypting bytea data with pgp_sym_decrypt is disallowed. This is to avoid outputting invalid character data. Decrypting - originally textual data with pgp_sym_decrypt_bytea is fine. + originally textual data with pgp_sym_decrypt_bytea is fine. - The options parameter can contain option settings, + The options parameter can contain option settings, as described below. @@ -591,11 +591,11 @@ pgp_pub_encrypt(data text, key bytea [, options text ]) returns bytea pgp_pub_encrypt_bytea(data bytea, key bytea [, options text ]) returns bytea - Encrypt data with a public PGP key key. + Encrypt data with a public PGP key key. Giving this function a secret key will produce an error. - The options parameter can contain option settings, + The options parameter can contain option settings, as described below. @@ -616,19 +616,19 @@ pgp_pub_decrypt(msg bytea, key bytea [, psw text [, options text ]]) returns tex pgp_pub_decrypt_bytea(msg bytea, key bytea [, psw text [, options text ]]) returns bytea - Decrypt a public-key-encrypted message. key must be the + Decrypt a public-key-encrypted message. key must be the secret key corresponding to the public key that was used to encrypt. If the secret key is password-protected, you must give the password in - psw. If there is no password, but you want to specify + psw. If there is no password, but you want to specify options, you need to give an empty password. - Decrypting bytea data with pgp_pub_decrypt is disallowed. + Decrypting bytea data with pgp_pub_decrypt is disallowed. This is to avoid outputting invalid character data. Decrypting - originally textual data with pgp_pub_decrypt_bytea is fine. + originally textual data with pgp_pub_decrypt_bytea is fine. - The options parameter can contain option settings, + The options parameter can contain option settings, as described below. @@ -644,7 +644,7 @@ pgp_pub_decrypt_bytea(msg bytea, key bytea [, psw text [, options text ]]) retur pgp_key_id(bytea) returns text - pgp_key_id extracts the key ID of a PGP public or secret key. + pgp_key_id extracts the key ID of a PGP public or secret key. Or it gives the key ID that was used for encrypting the data, if given an encrypted message. @@ -654,7 +654,7 @@ pgp_key_id(bytea) returns text - SYMKEY + SYMKEY The message is encrypted with a symmetric key. @@ -662,12 +662,12 @@ pgp_key_id(bytea) returns text - ANYKEY + ANYKEY The message is public-key encrypted, but the key ID has been removed. That means you will need to try all your secret keys on it to see - which one decrypts it. pgcrypto itself does not produce + which one decrypts it. pgcrypto itself does not produce such messages. @@ -675,7 +675,7 @@ pgp_key_id(bytea) returns text Note that different keys may have the same ID. This is rare but a normal event. The client application should then try to decrypt with each one, - to see which fits — like handling ANYKEY. + to see which fits — like handling ANYKEY. @@ -700,8 +700,8 @@ dearmor(data text) returns bytea - If the keys and values arrays are specified, - an armor header is added to the armored format for each + If the keys and values arrays are specified, + an armor header is added to the armored format for each key/value pair. Both arrays must be single-dimensional, and they must be of the same length. The keys and values cannot contain any non-ASCII characters. @@ -719,8 +719,8 @@ dearmor(data text) returns bytea pgp_armor_headers(data text, key out text, value out text) returns setof record - pgp_armor_headers() extracts the armor headers from - data. The return value is a set of rows with two columns, + pgp_armor_headers() extracts the armor headers from + data. The return value is a set of rows with two columns, key and value. If the keys or values contain any non-ASCII characters, they are treated as UTF-8. @@ -924,7 +924,7 @@ gpg --gen-key - The preferred key type is DSA and Elgamal. + The preferred key type is DSA and Elgamal. For RSA encryption you must create either DSA or RSA sign-only key @@ -950,15 +950,15 @@ gpg -a --export-secret-keys KEYID > secret.key - You need to use dearmor() on these keys before giving them to + You need to use dearmor() on these keys before giving them to the PGP functions. Or if you can handle binary data, you can drop -a from the command. For more details see man gpg, - The GNU + The GNU Privacy Handbook and other documentation on - . + . @@ -982,7 +982,7 @@ gpg -a --export-secret-keys KEYID > secret.key No support for several subkeys. This may seem like a problem, as this is common practice. On the other hand, you should not use your regular - GPG/PGP keys with pgcrypto, but create new ones, + GPG/PGP keys with pgcrypto, but create new ones, as the usage scenario is rather different. @@ -1056,15 +1056,15 @@ decrypt_iv(data bytea, key bytea, iv bytea, type text) returns bytea type string is: -algorithm - mode /pad: padding +algorithm - mode /pad: padding - where algorithm is one of: + where algorithm is one of: bf — Blowfish - aes — AES (Rijndael-128) + aes — AES (Rijndael-128, -192 or -256) - and mode is one of: + and mode is one of: @@ -1078,7 +1078,7 @@ decrypt_iv(data bytea, key bytea, iv bytea, type text) returns bytea - and padding is one of: + and padding is one of: @@ -1100,8 +1100,8 @@ encrypt(data, 'fooz', 'bf-cbc/pad:pkcs') - In encrypt_iv and decrypt_iv, the - iv parameter is the initial value for the CBC mode; + In encrypt_iv and decrypt_iv, the + iv parameter is the initial value for the CBC mode; it is ignored for ECB. It is clipped or padded with zeroes if not exactly block size. It defaults to all zeroes in the functions without this parameter. @@ -1119,7 +1119,7 @@ encrypt(data, 'fooz', 'bf-cbc/pad:pkcs') gen_random_bytes(count integer) returns bytea - Returns count cryptographically strong random bytes. + Returns count cryptographically strong random bytes. At most 1024 bytes can be extracted at a time. This is to avoid draining the randomness generator pool. @@ -1143,7 +1143,7 @@ gen_random_uuid() returns uuid Configuration - pgcrypto configures itself according to the findings of the + pgcrypto configures itself according to the findings of the main PostgreSQL configure script. The options that affect it are --with-zlib and --with-openssl. @@ -1253,9 +1253,9 @@ gen_random_uuid() returns uuid Security Limitations - All pgcrypto functions run inside the database server. + All pgcrypto functions run inside the database server. That means that all - the data and passwords move between pgcrypto and client + the data and passwords move between pgcrypto and client applications in clear text. Thus you must: @@ -1274,9 +1274,9 @@ gen_random_uuid() returns uuid The implementation does not resist - side-channel + side-channel attacks. For example, the time required for - a pgcrypto decryption function to complete varies among + a pgcrypto decryption function to complete varies among ciphertexts of a given size. @@ -1286,7 +1286,7 @@ gen_random_uuid() returns uuid - + The GNU Privacy Handbook. @@ -1295,7 +1295,7 @@ gen_random_uuid() returns uuid - + How to choose a good password. @@ -1317,36 +1317,32 @@ gen_random_uuid() returns uuid - + OpenPGP message format. - + The MD5 Message-Digest Algorithm. - + HMAC: Keyed-Hashing for Message Authentication. - + Comparison of crypt-des, crypt-md5 and bcrypt algorithms. - + Description of Fortuna CSPRNG. - Jean-Luc Cooke Fortuna-based /dev/random driver for Linux. - - - - Collection of cryptology pointers. + Jean-Luc Cooke Fortuna-based /dev/random driver for Linux. diff --git a/doc/src/sgml/pgfreespacemap.sgml b/doc/src/sgml/pgfreespacemap.sgml index 43e154a2f3..0122d278e3 100644 --- a/doc/src/sgml/pgfreespacemap.sgml +++ b/doc/src/sgml/pgfreespacemap.sgml @@ -8,7 +8,7 @@ - The pg_freespacemap module provides a means for examining the + The pg_freespacemap module provides a means for examining the free space map (FSM). It provides a function called pg_freespace, or two overloaded functions, to be precise. The functions show the value recorded in the free space map for @@ -36,7 +36,7 @@ Returns the amount of free space on the page of the relation, specified - by blkno, according to the FSM. + by blkno, according to the FSM. @@ -50,7 +50,7 @@ Displays the amount of free space on each page of the relation, - according to the FSM. A set of (blkno bigint, avail int2) + according to the FSM. A set of (blkno bigint, avail int2) tuples is returned, one tuple for each page in the relation. @@ -59,7 +59,7 @@ The values stored in the free space map are not exact. They're rounded - to precision of 1/256th of BLCKSZ (32 bytes with default BLCKSZ), and + to precision of 1/256th of BLCKSZ (32 bytes with default BLCKSZ), and they're not kept fully up-to-date as tuples are inserted and updated. diff --git a/doc/src/sgml/pgprewarm.sgml b/doc/src/sgml/pgprewarm.sgml index c090401eca..51afc5df3f 100644 --- a/doc/src/sgml/pgprewarm.sgml +++ b/doc/src/sgml/pgprewarm.sgml @@ -10,7 +10,13 @@ The pg_prewarm module provides a convenient way to load relation data into either the operating system buffer cache - or the PostgreSQL buffer cache. + or the PostgreSQL buffer cache. Prewarming + can be performed manually using the pg_prewarm function, + or can be performed automatically by including pg_prewarm in + . In the latter case, the + system will run a background worker which periodically records the contents + of shared buffers in a file called autoprewarm.blocks and + will, using 2 background workers, reload those same blocks after a restart. @@ -55,6 +61,67 @@ pg_prewarm(regclass, mode text default 'buffer', fork text default 'main', cache. For these reasons, prewarming is typically most useful at startup, when caches are largely empty. + + +autoprewarm_start_worker() RETURNS void + + + + Launch the main autoprewarm worker. This will normally happen + automatically, but is useful if automatic prewarm was not configured at + server startup time and you wish to start up the worker at a later time. + + + +autoprewarm_dump_now() RETURNS int8 + + + + Update autoprewarm.blocks immediately. This may be useful + if the autoprewarm worker is not running but you anticipate running it + after the next restart. The return value is the number of records written + to autoprewarm.blocks. + + + + + Configuration Parameters + + + + + pg_prewarm.autoprewarm (boolean) + + pg_prewarm.autoprewarm configuration parameter + + + + + Controls whether the server should run the autoprewarm worker. This is + on by default. This parameter can only be set at server start. + + + + + + + + + pg_prewarm.autoprewarm_interval (int) + + pg_prewarm.autoprewarm_interval configuration parameter + + + + + This is the interval between updates to autoprewarm.blocks. + The default is 300 seconds. If set to 0, the file will not be + dumped at regular intervals, but only when the server is shut down. + + + + + diff --git a/doc/src/sgml/pgrowlocks.sgml b/doc/src/sgml/pgrowlocks.sgml index 65d532e081..60e13393ea 100644 --- a/doc/src/sgml/pgrowlocks.sgml +++ b/doc/src/sgml/pgrowlocks.sgml @@ -33,11 +33,11 @@ pgrowlocks(text) returns setof record The parameter is the name of a table. The result is a set of records, with one row for each locked row within the table. The output columns - are shown in . + are shown in .
- <function>pgrowlocks</> Output Columns + <function>pgrowlocks</function> Output Columns @@ -70,12 +70,12 @@ pgrowlocks(text) returns setof record Transaction IDs of lockers (more than one if multitransaction) - lock_type + modes text[] Lock mode of lockers (more than one if multitransaction), - an array of Key Share, Share, - For No Key Update, No Key Update, - For Update, Update. + an array of Key Share, Share, + For No Key Update, No Key Update, + For Update, Update. @@ -89,7 +89,7 @@ pgrowlocks(text) returns setof record
- pgrowlocks takes AccessShareLock for the + pgrowlocks takes AccessShareLock for the target table and reads each row one by one to collect the row locking information. This is not very speedy for a large table. Note that: @@ -127,14 +127,14 @@ SELECT * FROM accounts AS a, pgrowlocks('accounts') AS p Sample Output - -test=# SELECT * FROM pgrowlocks('t1'); - locked_row | lock_type | locker | multi | xids | pids -------------+-----------+--------+-------+-----------+--------------- - (0,1) | Shared | 19 | t | {804,805} | {29066,29068} - (0,2) | Shared | 19 | t | {804,805} | {29066,29068} - (0,3) | Exclusive | 804 | f | {804} | {29066} - (0,4) | Exclusive | 804 | f | {804} | {29066} + +=# SELECT * FROM pgrowlocks('t1'); + locked_row | locker | multi | xids | modes | pids +------------+--------+-------+-------+----------------+-------- + (0,1) | 609 | f | {609} | {"For Share"} | {3161} + (0,2) | 609 | f | {609} | {"For Share"} | {3161} + (0,3) | 607 | f | {607} | {"For Update"} | {3107} + (0,4) | 607 | f | {607} | {"For Update"} | {3107} (4 rows) diff --git a/doc/src/sgml/pgstandby.sgml b/doc/src/sgml/pgstandby.sgml index bf4edea9f1..2cc58fe356 100644 --- a/doc/src/sgml/pgstandby.sgml +++ b/doc/src/sgml/pgstandby.sgml @@ -31,48 +31,48 @@ Description - pg_standby supports creation of a warm standby + pg_standby supports creation of a warm standby database server. It is designed to be a production-ready program, as well as a customizable template should you require specific modifications. - pg_standby is designed to be a waiting - restore_command, which is needed to turn a standard + pg_standby is designed to be a waiting + restore_command, which is needed to turn a standard archive recovery into a warm standby operation. Other configuration is required as well, all of which is described in the main - server manual (see ). + server manual (see ). To configure a standby - server to use pg_standby, put this into its + server to use pg_standby, put this into its recovery.conf configuration file: -restore_command = 'pg_standby archiveDir %f %p %r' +restore_command = 'pg_standby archiveDir %f %p %r' - where archiveDir is the directory from which WAL segment + where archiveDir is the directory from which WAL segment files should be restored. - If restartwalfile is specified, normally by using the + If restartwalfile is specified, normally by using the %r macro, then all WAL files logically preceding this - file will be removed from archivelocation. This minimizes + file will be removed from archivelocation. This minimizes the number of files that need to be retained, while preserving crash-restart capability. Use of this parameter is appropriate if the - archivelocation is a transient staging area for this - particular standby server, but not when the - archivelocation is intended as a long-term WAL archive area. + archivelocation is a transient staging area for this + particular standby server, but not when the + archivelocation is intended as a long-term WAL archive area. pg_standby assumes that - archivelocation is a directory readable by the - server-owning user. If restartwalfile (or -k) + archivelocation is a directory readable by the + server-owning user. If restartwalfile (or -k) is specified, - the archivelocation directory must be writable too. + the archivelocation directory must be writable too. - There are two ways to fail over to a warm standby database server + There are two ways to fail over to a warm standby database server when the master server fails: @@ -85,7 +85,7 @@ restore_command = 'pg_standby archiveDir %f %p %r' the standby server has fallen behind, but if there is a lot of unapplied WAL it can be a long time before the standby server becomes ready. To trigger a smart failover, create a trigger file containing - the word smart, or just create it and leave it empty. + the word smart, or just create it and leave it empty. @@ -96,8 +96,8 @@ restore_command = 'pg_standby archiveDir %f %p %r' In fast failover, the server is brought up immediately. Any WAL files in the archive that have not yet been applied will be ignored, and all transactions in those files are lost. To trigger a fast failover, - create a trigger file and write the word fast into it. - pg_standby can also be configured to execute a fast + create a trigger file and write the word fast into it. + pg_standby can also be configured to execute a fast failover automatically if no new WAL file appears within a defined interval.
@@ -120,7 +120,7 @@ restore_command = 'pg_standby archiveDir %f %p %r' - Use cp or copy command to restore WAL files + Use cp or copy command to restore WAL files from archive. This is the only supported behavior so this option is useless. @@ -130,7 +130,7 @@ restore_command = 'pg_standby archiveDir %f %p %r' - Print lots of debug logging output on stderr. + Print lots of debug logging output on stderr. @@ -147,8 +147,8 @@ restore_command = 'pg_standby archiveDir %f %p %r' restartwalfile is specified, since that specification method is more accurate in determining the correct archive cut-off point. - Use of this parameter is deprecated as of - PostgreSQL 8.3; it is safer and more efficient to + Use of this parameter is deprecated as of + PostgreSQL 8.3; it is safer and more efficient to specify a restartwalfile parameter. A too small setting could result in removal of files that are still needed for a restart of the standby server, while a too large setting wastes @@ -158,12 +158,12 @@ restore_command = 'pg_standby archiveDir %f %p %r' - maxretries + maxretries Set the maximum number of times to retry the copy command if it fails (default 3). After each failure, we wait for - sleeptime * num_retries + sleeptime * num_retries so that the wait time increases progressively. So by default, we will wait 5 secs, 10 secs, then 15 secs before reporting the failure back to the standby server. This will be @@ -174,33 +174,33 @@ restore_command = 'pg_standby archiveDir %f %p %r' - sleeptime + sleeptime Set the number of seconds (up to 60, default 5) to sleep between tests to see if the WAL file to be restored is available in the archive yet. The default setting is not necessarily - recommended; consult for discussion. + recommended; consult for discussion. - triggerfile + triggerfile Specify a trigger file whose presence should cause failover. It is recommended that you use a structured file name to avoid confusion as to which server is being triggered when multiple servers exist on the same system; for example - /tmp/pgsql.trigger.5432. + /tmp/pgsql.trigger.5432. - - + + Print the pg_standby version and exit. @@ -209,21 +209,21 @@ restore_command = 'pg_standby archiveDir %f %p %r' - maxwaittime + maxwaittime Set the maximum number of seconds to wait for the next WAL file, after which a fast failover will be performed. A setting of zero (the default) means wait forever. The default setting is not necessarily recommended; - consult for discussion. + consult for discussion. - - + + Show help about pg_standby command line @@ -241,18 +241,18 @@ restore_command = 'pg_standby archiveDir %f %p %r' pg_standby is designed to work with - PostgreSQL 8.2 and later. + PostgreSQL 8.2 and later. - PostgreSQL 8.3 provides the %r macro, + PostgreSQL 8.3 provides the %r macro, which is designed to let pg_standby know the - last file it needs to keep. With PostgreSQL 8.2, the + last file it needs to keep. With PostgreSQL 8.2, the -k option must be used if archive cleanup is required. This option remains available in 8.3, but its use is deprecated. - PostgreSQL 8.4 provides the - recovery_end_command option. Without this option + PostgreSQL 8.4 provides the + recovery_end_command option. Without this option a leftover trigger file can be hazardous. @@ -276,13 +276,13 @@ restore_command = 'pg_standby -d -s 2 -t /tmp/pgsql.trigger.5442 .../archive %f recovery_end_command = 'rm -f /tmp/pgsql.trigger.5442' where the archive directory is physically located on the standby server, - so that the archive_command is accessing it across NFS, - but the files are local to the standby (enabling use of ln). + so that the archive_command is accessing it across NFS, + but the files are local to the standby (enabling use of ln). This will: - produce debugging output in standby.log + produce debugging output in standby.log @@ -293,7 +293,7 @@ recovery_end_command = 'rm -f /tmp/pgsql.trigger.5442' stop waiting only when a trigger file called - /tmp/pgsql.trigger.5442 appears, + /tmp/pgsql.trigger.5442 appears, and perform failover according to its content @@ -320,18 +320,18 @@ restore_command = 'pg_standby -d -s 5 -t C:\pgsql.trigger.5442 ...\archive %f %p recovery_end_command = 'del C:\pgsql.trigger.5442' Note that backslashes need to be doubled in the - archive_command, but not in the - restore_command or recovery_end_command. + archive_command, but not in the + restore_command or recovery_end_command. This will: - use the copy command to restore WAL files from archive + use the copy command to restore WAL files from archive - produce debugging output in standby.log + produce debugging output in standby.log @@ -342,7 +342,7 @@ recovery_end_command = 'del C:\pgsql.trigger.5442' stop waiting only when a trigger file called - C:\pgsql.trigger.5442 appears, + C:\pgsql.trigger.5442 appears, and perform failover according to its content @@ -360,16 +360,16 @@ recovery_end_command = 'del C:\pgsql.trigger.5442' - The copy command on Windows sets the final file size + The copy command on Windows sets the final file size before the file is completely copied, which would ordinarily confuse pg_standby. Therefore - pg_standby waits sleeptime - seconds once it sees the proper file size. GNUWin32's cp + pg_standby waits sleeptime + seconds once it sees the proper file size. GNUWin32's cp sets the file size only after the file copy is complete. - Since the Windows example uses copy at both ends, either + Since the Windows example uses copy at both ends, either or both servers might be accessing the archive directory across the network. @@ -388,7 +388,7 @@ recovery_end_command = 'del C:\pgsql.trigger.5442' See Also - + diff --git a/doc/src/sgml/pgstatstatements.sgml b/doc/src/sgml/pgstatstatements.sgml index f9dd43e891..c0217ed485 100644 --- a/doc/src/sgml/pgstatstatements.sgml +++ b/doc/src/sgml/pgstatstatements.sgml @@ -13,20 +13,20 @@
- The module must be loaded by adding pg_stat_statements to - in - postgresql.conf, because it requires additional shared memory. + The module must be loaded by adding pg_stat_statements to + in + postgresql.conf, because it requires additional shared memory. This means that a server restart is needed to add or remove the module. When pg_stat_statements is loaded, it tracks statistics across all databases of the server. To access and manipulate - these statistics, the module provides a view, pg_stat_statements, - and the utility functions pg_stat_statements_reset and - pg_stat_statements. These are not available globally but + these statistics, the module provides a view, pg_stat_statements, + and the utility functions pg_stat_statements_reset and + pg_stat_statements. These are not available globally but can be enabled for a specific database with - CREATE EXTENSION pg_stat_statements. + CREATE EXTENSION pg_stat_statements. @@ -34,15 +34,15 @@ The statistics gathered by the module are made available via a - view named pg_stat_statements. This view + view named pg_stat_statements. This view contains one row for each distinct database ID, user ID and query ID (up to the maximum number of distinct statements that the module can track). The columns of the view are shown in - . + . - <structname>pg_stat_statements</> Columns + <structname>pg_stat_statements</structname> Columns @@ -207,7 +207,7 @@ Total time the statement spent reading blocks, in milliseconds - (if is enabled, otherwise zero) + (if is enabled, otherwise zero) @@ -217,7 +217,7 @@ Total time the statement spent writing blocks, in milliseconds - (if is enabled, otherwise zero) + (if is enabled, otherwise zero) @@ -234,9 +234,9 @@ - Plannable queries (that is, SELECT, INSERT, - UPDATE, and DELETE) are combined into a single - pg_stat_statements entry whenever they have identical query + Plannable queries (that is, SELECT, INSERT, + UPDATE, and DELETE) are combined into a single + pg_stat_statements entry whenever they have identical query structures according to an internal hash calculation. Typically, two queries will be considered the same for this purpose if they are semantically equivalent except for the values of literal constants @@ -247,16 +247,16 @@ When a constant's value has been ignored for purposes of matching the query to other queries, the constant is replaced by a parameter symbol, such - as $1, in the pg_stat_statements + as $1, in the pg_stat_statements display. The rest of the query text is that of the first query that had the - particular queryid hash value associated with the - pg_stat_statements entry. + particular queryid hash value associated with the + pg_stat_statements entry. In some cases, queries with visibly different texts might get merged into a - single pg_stat_statements entry. Normally this will happen + single pg_stat_statements entry. Normally this will happen only for semantically equivalent queries, but there is a small chance of hash collisions causing unrelated queries to be merged into one entry. (This cannot happen for queries belonging to different users or databases, @@ -264,41 +264,41 @@ - Since the queryid hash value is computed on the + Since the queryid hash value is computed on the post-parse-analysis representation of the queries, the opposite is also possible: queries with identical texts might appear as separate entries, if they have different meanings as a result of - factors such as different search_path settings. + factors such as different search_path settings. - Consumers of pg_stat_statements may wish to use - queryid (perhaps in combination with - dbid and userid) as a more stable + Consumers of pg_stat_statements may wish to use + queryid (perhaps in combination with + dbid and userid) as a more stable and reliable identifier for each entry than its query text. However, it is important to understand that there are only limited - guarantees around the stability of the queryid hash + guarantees around the stability of the queryid hash value. Since the identifier is derived from the post-parse-analysis tree, its value is a function of, among other things, the internal object identifiers appearing in this representation. This has some counterintuitive implications. For example, - pg_stat_statements will consider two apparently-identical + pg_stat_statements will consider two apparently-identical queries to be distinct, if they reference a table that was dropped and recreated between the executions of the two queries. The hashing process is also sensitive to differences in machine architecture and other facets of the platform. - Furthermore, it is not safe to assume that queryid - will be stable across major versions of PostgreSQL. + Furthermore, it is not safe to assume that queryid + will be stable across major versions of PostgreSQL. - As a rule of thumb, queryid values can be assumed to be + As a rule of thumb, queryid values can be assumed to be stable and comparable only so long as the underlying server version and catalog metadata details stay exactly the same. Two servers participating in replication based on physical WAL replay can be expected - to have identical queryid values for the same query. + to have identical queryid values for the same query. However, logical replication schemes do not promise to keep replicas - identical in all relevant details, so queryid will + identical in all relevant details, so queryid will not be a useful identifier for accumulating costs across a set of logical replicas. If in doubt, direct testing is recommended. @@ -306,13 +306,13 @@ The parameter symbols used to replace constants in representative query texts start from the next number after the - highest $n parameter in the original query - text, or $1 if there was none. It's worth noting that in + highest $n parameter in the original query + text, or $1 if there was none. It's worth noting that in some cases there may be hidden parameter symbols that affect this - numbering. For example, PL/pgSQL uses hidden parameter + numbering. For example, PL/pgSQL uses hidden parameter symbols to insert values of function local variables into queries, so that - a PL/pgSQL statement like SELECT i + 1 INTO j - would have representative text like SELECT i + $2. + a PL/pgSQL statement like SELECT i + 1 INTO j + would have representative text like SELECT i + $2. @@ -320,11 +320,11 @@ not consume shared memory. Therefore, even very lengthy query texts can be stored successfully. However, if many long query texts are accumulated, the external file might grow unmanageably large. As a - recovery method if that happens, pg_stat_statements may + recovery method if that happens, pg_stat_statements may choose to discard the query texts, whereupon all existing entries in - the pg_stat_statements view will show - null query fields, though the statistics associated with - each queryid are preserved. If this happens, consider + the pg_stat_statements view will show + null query fields, though the statistics associated with + each queryid are preserved. If this happens, consider reducing pg_stat_statements.max to prevent recurrences. @@ -345,7 +345,7 @@ pg_stat_statements_reset discards all statistics - gathered so far by pg_stat_statements. + gathered so far by pg_stat_statements. By default, this function can only be executed by superusers. @@ -363,17 +363,17 @@ The pg_stat_statements view is defined in - terms of a function also named pg_stat_statements. + terms of a function also named pg_stat_statements. It is possible for clients to call the pg_stat_statements function directly, and by specifying showtext := false have query text be omitted (that is, the OUT argument that corresponds - to the view's query column will return nulls). This + to the view's query column will return nulls). This feature is intended to support external tools that might wish to avoid the overhead of repeatedly retrieving query texts of indeterminate length. Such tools can instead cache the first query text observed for each entry themselves, since that is - all pg_stat_statements itself does, and then retrieve + all pg_stat_statements itself does, and then retrieve query texts only as needed. Since the server stores query texts in a file, this approach may reduce physical I/O for repeated examination of the pg_stat_statements data. @@ -396,7 +396,7 @@ pg_stat_statements.max is the maximum number of statements tracked by the module (i.e., the maximum number of rows - in the pg_stat_statements view). If more distinct + in the pg_stat_statements view). If more distinct statements than that are observed, information about the least-executed statements is discarded. The default value is 5000. @@ -414,11 +414,11 @@ pg_stat_statements.track controls which statements are counted by the module. - Specify top to track top-level statements (those issued - directly by clients), all to also track nested statements - (such as statements invoked within functions), or none to + Specify top to track top-level statements (those issued + directly by clients), all to also track nested statements + (such as statements invoked within functions), or none to disable statement statistics collection. - The default value is top. + The default value is top. Only superusers can change this setting. @@ -433,9 +433,9 @@ pg_stat_statements.track_utility controls whether utility commands are tracked by the module. Utility commands are - all those other than SELECT, INSERT, - UPDATE and DELETE. - The default value is on. + all those other than SELECT, INSERT, + UPDATE and DELETE. + The default value is on. Only superusers can change this setting. @@ -450,10 +450,10 @@ pg_stat_statements.save specifies whether to save statement statistics across server shutdowns. - If it is off then statistics are not saved at + If it is off then statistics are not saved at shutdown nor reloaded at server start. - The default value is on. - This parameter can only be set in the postgresql.conf + The default value is on. + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -464,11 +464,11 @@ The module requires additional shared memory proportional to pg_stat_statements.max. Note that this memory is consumed whenever the module is loaded, even if - pg_stat_statements.track is set to none. + pg_stat_statements.track is set to none. - These parameters must be set in postgresql.conf. + These parameters must be set in postgresql.conf. Typical usage might be: diff --git a/doc/src/sgml/pgstattuple.sgml b/doc/src/sgml/pgstattuple.sgml index a7c67ae645..b17b3c59e0 100644 --- a/doc/src/sgml/pgstattuple.sgml +++ b/doc/src/sgml/pgstattuple.sgml @@ -13,12 +13,14 @@ - As these functions return detailed page-level information, only the superuser - has EXECUTE privileges on them upon installation. After the functions have - been installed, users may issue GRANT commands to change - the privileges on the functions to allow non-superusers to execute them. Members - of the pg_stat_scan_tables role are granted access by default. See - the description of the command for specifics. + Because these functions return detailed page-level information, access is + restricted by default. By default, only the + role pg_stat_scan_tables has EXECUTE + privilege. Superusers of course bypass this restriction. After the + extension has been installed, users may issue GRANT + commands to change the privileges on the functions to allow others to + execute them. However, it might be preferable to add those users to + the pg_stat_scan_tables role instead. @@ -30,13 +32,13 @@ pgstattuple - pgstattuple(regclass) returns record + pgstattuple(regclass) returns record pgstattuple returns a relation's physical length, - percentage of dead tuples, and other info. This may help users + percentage of dead tuples, and other info. This may help users to determine whether vacuum is necessary or not. The argument is the target relation's name (optionally schema-qualified) or OID. For example: @@ -53,7 +55,7 @@ dead_tuple_percent | 0.69 free_space | 8932 free_percent | 1.95 - The output columns are described in . + The output columns are described in .
@@ -135,15 +137,15 @@ free_percent | 1.95 - pgstattuple judges a tuple is dead if - HeapTupleSatisfiesDirty returns false. + pgstattuple judges a tuple is dead if + HeapTupleSatisfiesDirty returns false. - pgstattuple(text) returns record + pgstattuple(text) returns record @@ -161,7 +163,7 @@ free_percent | 1.95 pgstatindex - pgstatindex(regclass) returns record + pgstatindex(regclass) returns record @@ -225,7 +227,7 @@ leaf_fragmentation | 0 internal_pages bigint - Number of internal (upper-level) pages + Number of internal (upper-level) pages @@ -264,14 +266,14 @@ leaf_fragmentation | 0 - The reported index_size will normally correspond to one more + The reported index_size will normally correspond to one more page than is accounted for by internal_pages + leaf_pages + empty_pages + deleted_pages, because it also includes the index's metapage. - As with pgstattuple, the results are accumulated + As with pgstattuple, the results are accumulated page-by-page, and should not be expected to represent an instantaneous snapshot of the whole index. @@ -280,7 +282,7 @@ leaf_fragmentation | 0 - pgstatindex(text) returns record + pgstatindex(text) returns record @@ -298,7 +300,7 @@ leaf_fragmentation | 0 pgstatginindex - pgstatginindex(regclass) returns record + pgstatginindex(regclass) returns record @@ -358,7 +360,7 @@ pending_tuples | 0 pgstathashindex - pgstathashindex(regclass) returns record + pgstathashindex(regclass) returns record @@ -453,7 +455,7 @@ free_percent | 61.8005949100872 pg_relpages - pg_relpages(regclass) returns bigint + pg_relpages(regclass) returns bigint @@ -466,7 +468,7 @@ free_percent | 61.8005949100872 - pg_relpages(text) returns bigint + pg_relpages(text) returns bigint @@ -484,7 +486,7 @@ free_percent | 61.8005949100872 pgstattuple_approx - pgstattuple_approx(regclass) returns record + pgstattuple_approx(regclass) returns record @@ -507,7 +509,7 @@ dead_tuple_percent | 0 approx_free_space | 11996 approx_free_percent | 2.09 - The output columns are described in . + The output columns are described in . diff --git a/doc/src/sgml/pgtrgm.sgml b/doc/src/sgml/pgtrgm.sgml index 775a7b8be7..83b0033d7a 100644 --- a/doc/src/sgml/pgtrgm.sgml +++ b/doc/src/sgml/pgtrgm.sgml @@ -58,8 +58,8 @@ The functions provided by the pg_trgm module - are shown in , the operators - in . + are shown in , the operators + in .
@@ -99,19 +99,31 @@ real - Returns a number that indicates how similar the first string - to the most similar word of the second string. The function searches in - the second string a most similar word not a most similar substring. The - range of the result is zero (indicating that the two strings are - completely dissimilar) to one (indicating that the first string is - identical to one of the words of the second string). + Returns a number that indicates the greatest similarity between + the set of trigrams in the first string and any continuous extent + of an ordered set of trigrams in the second string. For details, see + the explanation below. + + + + + strict_word_similarity(text, text) + strict_word_similarity + + real + + Same as word_similarity(text, text), but forces + extent boundaries to match word boundaries. Since we don't have + cross-word trigrams, this function actually returns greatest similarity + between first string and any continuous extent of words of the second + string. show_limit()show_limit real - Returns the current similarity threshold used by the % + Returns the current similarity threshold used by the % operator. This sets the minimum similarity between two words for them to be considered similar enough to be misspellings of each other, for example @@ -122,7 +134,7 @@ set_limit(real)set_limit real - Sets the current similarity threshold that is used by the % + Sets the current similarity threshold that is used by the % operator. The threshold must be between 0 and 1 (default is 0.3). Returns the same value passed in (deprecated). @@ -131,6 +143,57 @@
+ + Consider the following example: + + +# SELECT word_similarity('word', 'two words'); + word_similarity +----------------- + 0.8 +(1 row) + + + In the first string, the set of trigrams is + {" w"," wo","wor","ord","rd "}. + In the second string, the ordered set of trigrams is + {" t"," tw","two","wo "," w"," wo","wor","ord","rds","ds "}. + The most similar extent of an ordered set of trigrams in the second string + is {" w"," wo","wor","ord"}, and the similarity is + 0.8. + + + + This function returns a value that can be approximately understood as the + greatest similarity between the first string and any substring of the second + string. However, this function does not add padding to the boundaries of + the extent. Thus, the number of additional characters present in the + second string is not considered, except for the mismatched word boundaries. + + + + At the same time, strict_word_similarity(text, text) + selects an extent of words in the second string. In the example above, + strict_word_similarity(text, text) would select the + extent of a single word 'words', whose set of trigrams is + {" w"," wo","wor","ord","rds","ds "}. + + +# SELECT strict_word_similarity('word', 'two words'), similarity('word', 'words'); + strict_word_similarity | similarity +------------------------+------------ + 0.571429 | 0.571429 +(1 row) + + + + + Thus, the strict_word_similarity(text, text) function + is useful for finding the similarity to whole words, while + word_similarity(text, text) is more suitable for + finding the similarity for parts of words. + + <filename>pg_trgm</filename> Operators @@ -144,56 +207,94 @@ - text % text + text % text boolean - Returns true if its arguments have a similarity that is + Returns true if its arguments have a similarity that is greater than the current similarity threshold set by - pg_trgm.similarity_threshold. + pg_trgm.similarity_threshold. - text <% text + text <% text boolean - Returns true if its first argument has the similar word in - the second argument and they have a similarity that is greater than the - current word similarity threshold set by - pg_trgm.word_similarity_threshold parameter. + Returns true if the similarity between the trigram + set in the first argument and a continuous extent of an ordered trigram + set in the second argument is greater than the current word similarity + threshold set by pg_trgm.word_similarity_threshold + parameter. - text %> text + text %> text boolean - Commutator of the <% operator. + Commutator of the <% operator. - text <-> text + text <<% text + boolean + + Returns true if its second argument has a continuous + extent of an ordered trigram set that matches word boundaries, + and its similarity to the trigram set of the first argument is greater + than the current strict word similarity threshold set by the + pg_trgm.strict_word_similarity_threshold parameter. + + + + text %>> text + boolean + + Commutator of the <<% operator. + + + + text <-> text real - Returns the distance between the arguments, that is - one minus the similarity() value. + Returns the distance between the arguments, that is + one minus the similarity() value. - text <<-> text + text <<-> text real - Returns the distance between the arguments, that is - one minus the word_similarity() value. + Returns the distance between the arguments, that is + one minus the word_similarity() value. - text <->> text + text <->> text real - Commutator of the <<-> operator. + Commutator of the <<-> operator. + + + + + text <<<-> text + + real + + Returns the distance between the arguments, that is + one minus the strict_word_similarity() value. + + + + + text <->>> text + + real + + Commutator of the <<<-> operator. @@ -207,31 +308,31 @@ - pg_trgm.similarity_threshold (real) + pg_trgm.similarity_threshold (real) - pg_trgm.similarity_threshold configuration parameter + pg_trgm.similarity_threshold configuration parameter - Sets the current similarity threshold that is used by the % + Sets the current similarity threshold that is used by the % operator. The threshold must be between 0 and 1 (default is 0.3). - pg_trgm.word_similarity_threshold (real) + pg_trgm.word_similarity_threshold (real) - pg_trgm.word_similarity_threshold configuration parameter + pg_trgm.word_similarity_threshold configuration parameter Sets the current word similarity threshold that is used by - <% and %> operators. The threshold + <% and %> operators. The threshold must be between 0 and 1 (default is 0.6). @@ -247,8 +348,8 @@ operator classes that allow you to create an index over a text column for the purpose of very fast similarity searches. These index types support the above-described similarity operators, and additionally support - trigram-based index searches for LIKE, ILIKE, - ~ and ~* queries. (These indexes do not + trigram-based index searches for LIKE, ILIKE, + ~ and ~* queries. (These indexes do not support equality nor simple comparison operators, so you may need a regular B-tree index too.) @@ -267,16 +368,16 @@ CREATE INDEX trgm_idx ON test_trgm USING GIN (t gin_trgm_ops); - At this point, you will have an index on the t column that + At this point, you will have an index on the t column that you can use for similarity searching. A typical query is -SELECT t, similarity(t, 'word') AS sml +SELECT t, similarity(t, 'word') AS sml FROM test_trgm - WHERE t % 'word' + WHERE t % 'word' ORDER BY sml DESC, t; This will return all values in the text column that are sufficiently - similar to word, sorted from best match to worst. The + similar to word, sorted from best match to worst. The index will be used to make this a fast operation even over very large data sets. @@ -284,7 +385,7 @@ SELECT t, similarity(t, 'word') AS sml A variant of the above query is -SELECT t, t <-> 'word' AS dist +SELECT t, t <-> 'word' AS dist FROM test_trgm ORDER BY dist LIMIT 10; @@ -294,24 +395,38 @@ SELECT t, t <-> 'word' AS dist - Also you can use an index on the t column for word - similarity. For example: + Also you can use an index on the t column for word + similarity or strict word similarity. Typical queries are: + +SELECT t, word_similarity('word', t) AS sml + FROM test_trgm + WHERE 'word' <% t + ORDER BY sml DESC, t; + + and -SELECT t, word_similarity('word', t) AS sml +SELECT t, strict_word_similarity('word', t) AS sml FROM test_trgm - WHERE 'word' <% t + WHERE 'word' <<% t ORDER BY sml DESC, t; - This will return all values in the text column that have a word - which sufficiently similar to word, sorted from best - match to worst. The index will be used to make this a fast operation - even over very large data sets. + This will return all values in the text column for which there is a + continuous extent in the corresponding ordered trigram set that is + sufficiently similar to the trigram set of word, + sorted from best match to worst. The index will be used to make this + a fast operation even over very large data sets. - A variant of the above query is + Possible variants of the above queries are: + +SELECT t, 'word' <<-> t AS dist + FROM test_trgm + ORDER BY dist LIMIT 10; + + and -SELECT t, 'word' <<-> t AS dist +SELECT t, 'word' <<<-> t AS dist FROM test_trgm ORDER BY dist LIMIT 10; @@ -321,8 +436,8 @@ SELECT t, 'word' <<-> t AS dist - Beginning in PostgreSQL 9.1, these index types also support - index searches for LIKE and ILIKE, for example + Beginning in PostgreSQL 9.1, these index types also support + index searches for LIKE and ILIKE, for example SELECT * FROM test_trgm WHERE t LIKE '%foo%bar'; @@ -333,9 +448,9 @@ SELECT * FROM test_trgm WHERE t LIKE '%foo%bar'; - Beginning in PostgreSQL 9.3, these index types also support + Beginning in PostgreSQL 9.3, these index types also support index searches for regular-expression matches - (~ and ~* operators), for example + (~ and ~* operators), for example SELECT * FROM test_trgm WHERE t ~ '(foo|bar)'; @@ -347,7 +462,7 @@ SELECT * FROM test_trgm WHERE t ~ '(foo|bar)'; - For both LIKE and regular-expression searches, keep in mind + For both LIKE and regular-expression searches, keep in mind that a pattern with no extractable trigrams will degenerate to a full-index scan. @@ -377,9 +492,9 @@ CREATE TABLE words AS SELECT word FROM ts_stat('SELECT to_tsvector(''simple'', bodytext) FROM documents'); - where documents is a table that has a text field - bodytext that we wish to search. The reason for using - the simple configuration with the to_tsvector + where documents is a table that has a text field + bodytext that we wish to search. The reason for using + the simple configuration with the to_tsvector function, instead of using a language-specific configuration, is that we want a list of the original (unstemmed) words. @@ -399,7 +514,7 @@ CREATE INDEX words_idx ON words USING GIN (word gin_trgm_ops); - Since the words table has been generated as a separate, + Since the words table has been generated as a separate, static table, it will need to be periodically regenerated so that it remains reasonably up-to-date with the document collection. Keeping it exactly current is usually unnecessary. diff --git a/doc/src/sgml/pgvisibility.sgml b/doc/src/sgml/pgvisibility.sgml index d466a3bce8..75336946a6 100644 --- a/doc/src/sgml/pgvisibility.sgml +++ b/doc/src/sgml/pgvisibility.sgml @@ -8,7 +8,7 @@ - The pg_visibility module provides a means for examining the + The pg_visibility module provides a means for examining the visibility map (VM) and page-level visibility information of a table. It also provides functions to check the integrity of a visibility map and to force it to be rebuilt. @@ -28,13 +28,13 @@ These two bits will normally agree, but the page's all-visible bit can sometimes be set while the visibility map bit is clear after a crash recovery. The reported values can also disagree because of a change that - occurs after pg_visibility examines the visibility map and + occurs after pg_visibility examines the visibility map and before it examines the data page. Any event that causes data corruption can also cause these bits to disagree. - Functions that display information about PD_ALL_VISIBLE bits + Functions that display information about PD_ALL_VISIBLE bits are much more costly than those that only consult the visibility map, because they must read the relation's data blocks rather than only the (much smaller) visibility map. Functions that check the relation's @@ -61,7 +61,7 @@ Returns the all-visible and all-frozen bits in the visibility map for the given block of the given relation, plus the - PD_ALL_VISIBLE bit of that block. + PD_ALL_VISIBLE bit of that block. @@ -82,7 +82,7 @@ Returns the all-visible and all-frozen bits in the visibility map for - each block of the given relation, plus the PD_ALL_VISIBLE + each block of the given relation, plus the PD_ALL_VISIBLE bit of each block. @@ -130,7 +130,7 @@ Truncates the visibility map for the given relation. This function is useful if you believe that the visibility map for the relation is - corrupt and wish to force rebuilding it. The first VACUUM + corrupt and wish to force rebuilding it. The first VACUUM executed on the given relation after this function is executed will scan every page in the relation and rebuild the visibility map. (Until that is done, queries will treat the visibility map as containing all zeroes.) diff --git a/doc/src/sgml/planstats.sgml b/doc/src/sgml/planstats.sgml index 838fcda6d2..ef643ad064 100644 --- a/doc/src/sgml/planstats.sgml +++ b/doc/src/sgml/planstats.sgml @@ -5,7 +5,7 @@ This chapter builds on the material covered in and to show some + linkend="using-explain"/> and to show some additional details about how the planner uses the system statistics to estimate the number of rows each part of a query might return. This is a significant part of the planning process, @@ -28,13 +28,13 @@ - The examples shown below use tables in the PostgreSQL + The examples shown below use tables in the PostgreSQL regression test database. The outputs shown are taken from version 8.3. The behavior of earlier (or later) versions might vary. - Note also that since ANALYZE uses random sampling + Note also that since ANALYZE uses random sampling while producing statistics, the results will change slightly after - any new ANALYZE. + any new ANALYZE. @@ -49,7 +49,7 @@ EXPLAIN SELECT * FROM tenk1; How the planner determines the cardinality of tenk1 - is covered in , but is repeated here for + is covered in , but is repeated here for completeness. The number of pages and rows is looked up in pg_class: @@ -61,8 +61,8 @@ SELECT relpages, reltuples FROM pg_class WHERE relname = 'tenk1'; 358 | 10000 - These numbers are current as of the last VACUUM or - ANALYZE on the table. The planner then fetches the + These numbers are current as of the last VACUUM or + ANALYZE on the table. The planner then fetches the actual current number of pages in the table (this is a cheap operation, not requiring a table scan). If that is different from relpages then @@ -150,7 +150,7 @@ EXPLAIN SELECT * FROM tenk1 WHERE stringu1 = 'CRAAAA'; and looks up the selectivity function for =, which is eqsel. For equality estimation the histogram is not useful; instead the list of most - common values (MCVs) is used to determine the + common values (MCVs) is used to determine the selectivity. Let's have a look at the MCVs, with some additional columns that will be useful later: @@ -165,7 +165,7 @@ most_common_freqs | {0.00333333,0.003,0.003,0.003,0.003,0.003,0.003,0.003,0.003, - Since CRAAAA appears in the list of MCVs, the selectivity is + Since CRAAAA appears in the list of MCVs, the selectivity is merely the corresponding entry in the list of most common frequencies (MCFs): @@ -225,18 +225,18 @@ rows = 10000 * 0.0014559 - The previous example with unique1 < 1000 was an + The previous example with unique1 < 1000 was an oversimplification of what scalarltsel really does; now that we have seen an example of the use of MCVs, we can fill in some more detail. The example was correct as far as it went, because since - unique1 is a unique column it has no MCVs (obviously, no + unique1 is a unique column it has no MCVs (obviously, no value is any more common than any other value). For a non-unique column, there will normally be both a histogram and an MCV list, and the histogram does not include the portion of the column - population represented by the MCVs. We do things this way because + population represented by the MCVs. We do things this way because it allows more precise estimation. In this situation scalarltsel directly applies the condition (e.g., - < 1000) to each value of the MCV list, and adds up the + < 1000) to each value of the MCV list, and adds up the frequencies of the MCVs for which the condition is true. This gives an exact estimate of the selectivity within the portion of the table that is MCVs. The histogram is then used in the same way as above @@ -253,7 +253,7 @@ EXPLAIN SELECT * FROM tenk1 WHERE stringu1 < 'IAAAAA'; Filter: (stringu1 < 'IAAAAA'::name) - We already saw the MCV information for stringu1, + We already saw the MCV information for stringu1, and here is its histogram: @@ -266,7 +266,7 @@ WHERE tablename='tenk1' AND attname='stringu1'; Checking the MCV list, we find that the condition stringu1 < - 'IAAAAA' is satisfied by the first six entries and not the last four, + 'IAAAAA' is satisfied by the first six entries and not the last four, so the selectivity within the MCV part of the population is @@ -279,11 +279,11 @@ selectivity = sum(relevant mvfs) population represented by MCVs is 0.03033333, and therefore the fraction represented by the histogram is 0.96966667 (again, there are no nulls, else we'd have to exclude them here). We can see - that the value IAAAAA falls nearly at the end of the + that the value IAAAAA falls nearly at the end of the third histogram bucket. Using some rather cheesy assumptions about the frequency of different characters, the planner arrives at the estimate 0.298387 for the portion of the histogram population - that is less than IAAAAA. We then combine the estimates + that is less than IAAAAA. We then combine the estimates for the MCV and non-MCV populations: @@ -372,7 +372,7 @@ rows = 10000 * 0.005035 = 50 (rounding off) - The restriction for the join is t2.unique2 = t1.unique2. + The restriction for the join is t2.unique2 = t1.unique2. The operator is just our familiar =, however the selectivity function is obtained from the oprjoin column of @@ -424,12 +424,12 @@ rows = (outer_cardinality * inner_cardinality) * selectivity - Notice that we showed inner_cardinality as 10000, that is, - the unmodified size of tenk2. It might appear from - inspection of the EXPLAIN output that the estimate of + Notice that we showed inner_cardinality as 10000, that is, + the unmodified size of tenk2. It might appear from + inspection of the EXPLAIN output that the estimate of join rows comes from 50 * 1, that is, the number of outer rows times the estimated number of rows obtained by each inner index scan on - tenk2. But this is not the case: the join relation size + tenk2. But this is not the case: the join relation size is estimated before any particular join plan has been considered. If everything is working well then the two ways of estimating the join size will produce about the same answer, but due to round-off error and @@ -438,7 +438,7 @@ rows = (outer_cardinality * inner_cardinality) * selectivity For those interested in further details, estimation of the size of - a table (before any WHERE clauses) is done in + a table (before any WHERE clauses) is done in src/backend/optimizer/util/plancat.c. The generic logic for clause selectivities is in src/backend/optimizer/path/clausesel.c. The @@ -468,7 +468,7 @@ INSERT INTO t SELECT i % 100, i % 100 FROM generate_series(1, 10000) s(i); ANALYZE t; - As explained in , the planner can determine + As explained in , the planner can determine cardinality of t using the number of pages and rows obtained from pg_class: @@ -485,8 +485,8 @@ SELECT relpages, reltuples FROM pg_class WHERE relname = 't'; - The following example shows the result of estimating a WHERE - condition on the a column: + The following example shows the result of estimating a WHERE + condition on the a column: EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a = 1; @@ -501,9 +501,9 @@ EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a = 1; of this clause to be 1%. By comparing this estimate and the actual number of rows, we see that the estimate is very accurate (in fact exact, as the table is very small). Changing the - WHERE condition to use the b column, an + WHERE condition to use the b column, an identical plan is generated. But observe what happens if we apply the same - condition on both columns, combining them with AND: + condition on both columns, combining them with AND: EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a = 1 AND b = 1; @@ -524,7 +524,7 @@ EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a = 1 AND b = 1; This problem can be fixed by creating a statistics object that - directs ANALYZE to calculate functional-dependency + directs ANALYZE to calculate functional-dependency multivariate statistics on the two columns: diff --git a/doc/src/sgml/plhandler.sgml b/doc/src/sgml/plhandler.sgml index 57a2a05ed2..73cd7d1387 100644 --- a/doc/src/sgml/plhandler.sgml +++ b/doc/src/sgml/plhandler.sgml @@ -11,9 +11,8 @@ All calls to functions that are written in a language other than the current version 1 interface for compiled - languages (this includes functions in user-defined procedural languages, - functions written in SQL, and functions using the version 0 compiled - language interface) go through a call handler + languages (this includes functions in user-defined procedural languages + and functions written in SQL) go through a call handler function for the specific language. It is the responsibility of the call handler to execute the function in a meaningful way, such as by interpreting the supplied source text. This chapter outlines @@ -29,13 +28,13 @@ special pseudo-type identifies the function as a call handler and prevents it from being called directly in SQL commands. For more details on C language calling conventions and dynamic loading, - see . + see . The call handler is called in the same way as any other function: It receives a pointer to a - FunctionCallInfoData struct containing + FunctionCallInfoData struct containing argument values and information about the called function, and it is expected to return a Datum result (and possibly set the isnull field of the @@ -54,7 +53,7 @@ It's up to the call handler to fetch the entry of the function from the pg_proc system catalog and to analyze the argument - and return types of the called function. The AS clause from the + and return types of the called function. The AS clause from the CREATE FUNCTION command for the function will be found in the prosrc column of the pg_proc row. This is commonly source @@ -68,9 +67,9 @@ A call handler can avoid repeated lookups of information about the called function by using the flinfo->fn_extra field. This will - initially be NULL, but can be set by the call handler to point at + initially be NULL, but can be set by the call handler to point at information about the called function. On subsequent calls, if - flinfo->fn_extra is already non-NULL + flinfo->fn_extra is already non-NULL then it can be used and the information lookup step skipped. The call handler must make sure that flinfo->fn_extra is made to point at @@ -90,7 +89,7 @@ are passed in the usual way, but the FunctionCallInfoData's context field points at a - TriggerData structure, rather than being NULL + TriggerData structure, rather than being NULL as it is in a plain function call. A language handler should provide mechanisms for procedural-language functions to get at the trigger information. @@ -108,9 +107,7 @@ #include "catalog/pg_proc.h" #include "catalog/pg_type.h" -#ifdef PG_MODULE_MAGIC PG_MODULE_MAGIC; -#endif PG_FUNCTION_INFO_V1(plsample_call_handler); @@ -122,7 +119,7 @@ plsample_call_handler(PG_FUNCTION_ARGS) if (CALLED_AS_TRIGGER(fcinfo)) { /* - * Called as a trigger procedure + * Called as a trigger function */ TriggerData *trigdata = (TriggerData *) fcinfo->context; @@ -146,7 +143,7 @@ plsample_call_handler(PG_FUNCTION_ARGS) After having compiled the handler function into a loadable module - (see ), the following commands then + (see ), the following commands then register the sample procedural language: CREATE FUNCTION plsample_call_handler() RETURNS language_handler @@ -164,83 +161,83 @@ CREATE LANGUAGE plsample are a validator and an inline handler. A validator can be provided to allow language-specific checking to be done during - . + . An inline handler can be provided to allow the language to support - anonymous code blocks executed via the command. + anonymous code blocks executed via the command. If a validator is provided by a procedural language, it must be declared as a function taking a single parameter of type - oid. The validator's result is ignored, so it is customarily - declared to return void. The validator will be called at - the end of a CREATE FUNCTION command that has created + oid. The validator's result is ignored, so it is customarily + declared to return void. The validator will be called at + the end of a CREATE FUNCTION command that has created or updated a function written in the procedural language. - The passed-in OID is the OID of the function's pg_proc + The passed-in OID is the OID of the function's pg_proc row. The validator must fetch this row in the usual way, and do whatever checking is appropriate. - First, call CheckFunctionValidatorAccess() to diagnose + First, call CheckFunctionValidatorAccess() to diagnose explicit calls to the validator that the user could not achieve through - CREATE FUNCTION. Typical checks then include verifying + CREATE FUNCTION. Typical checks then include verifying that the function's argument and result types are supported by the language, and that the function's body is syntactically correct in the language. If the validator finds the function to be okay, it should just return. If it finds an error, it should report that - via the normal ereport() error reporting mechanism. + via the normal ereport() error reporting mechanism. Throwing an error will force a transaction rollback and thus prevent the incorrect function definition from being committed. Validator functions should typically honor the parameter: if it is turned off then + linkend="guc-check-function-bodies"/> parameter: if it is turned off then any expensive or context-sensitive checking should be skipped. If the language provides for code execution at compilation time, the validator must suppress checks that would induce such execution. In particular, - this parameter is turned off by pg_dump so that it can + this parameter is turned off by pg_dump so that it can load procedural language functions without worrying about side effects or dependencies of the function bodies on other database objects. (Because of this requirement, the call handler should avoid assuming that the validator has fully checked the function. The point of having a validator is not to let the call handler omit checks, but to notify the user immediately if there are obvious errors in a - CREATE FUNCTION command.) + CREATE FUNCTION command.) While the choice of exactly what to check is mostly left to the discretion of the validator function, note that the core - CREATE FUNCTION code only executes SET clauses - attached to a function when check_function_bodies is on. + CREATE FUNCTION code only executes SET clauses + attached to a function when check_function_bodies is on. Therefore, checks whose results might be affected by GUC parameters - definitely should be skipped when check_function_bodies is + definitely should be skipped when check_function_bodies is off, to avoid false failures when reloading a dump. If an inline handler is provided by a procedural language, it must be declared as a function taking a single parameter of type - internal. The inline handler's result is ignored, so it is - customarily declared to return void. The inline handler - will be called when a DO statement is executed specifying + internal. The inline handler's result is ignored, so it is + customarily declared to return void. The inline handler + will be called when a DO statement is executed specifying the procedural language. The parameter actually passed is a pointer - to an InlineCodeBlock struct, which contains information - about the DO statement's parameters, in particular the + to an InlineCodeBlock struct, which contains information + about the DO statement's parameters, in particular the text of the anonymous code block to be executed. The inline handler should execute this code and return. It's recommended that you wrap all these function declarations, - as well as the CREATE LANGUAGE command itself, into - an extension so that a simple CREATE EXTENSION + as well as the CREATE LANGUAGE command itself, into + an extension so that a simple CREATE EXTENSION command is sufficient to install the language. See - for information about writing + for information about writing extensions. The procedural languages included in the standard distribution are good references when trying to write your own language handler. - Look into the src/pl subdirectory of the source tree. - The + Look into the src/pl subdirectory of the source tree. + The reference page also has some useful details. diff --git a/doc/src/sgml/plperl.sgml b/doc/src/sgml/plperl.sgml index 37a3557d61..967efba3b5 100644 --- a/doc/src/sgml/plperl.sgml +++ b/doc/src/sgml/plperl.sgml @@ -27,12 +27,12 @@ To install PL/Perl in a particular database, use - CREATE EXTENSION plperl. + CREATE EXTENSION plperl. - If a language is installed into template1, all subsequently + If a language is installed into template1, all subsequently created databases will have the language installed automatically. @@ -41,7 +41,7 @@ Users of source packages must specially enable the build of PL/Perl during the installation process. (Refer to for more information.) Users of + linkend="installation"/> for more information.) Users of binary packages might find PL/Perl in a separate subpackage. @@ -51,7 +51,7 @@ To create a function in the PL/Perl language, use the standard - + syntax: @@ -67,9 +67,13 @@ $$ LANGUAGE plperl; as discussed below. + + In a PL/Perl procedure, any return value from the Perl code is ignored. + + PL/Perl also supports anonymous code blocks called with the - statement: + statement: DO $$ @@ -90,8 +94,8 @@ $$ LANGUAGE plperl; subroutines which you call via a coderef. For more information, see the entries for Variable "%s" will not stay shared and Variable "%s" is not available in the - perldiag man page, or - search the Internet for perl nested named subroutine. + perldiag man page, or + search the Internet for perl nested named subroutine. @@ -99,17 +103,17 @@ $$ LANGUAGE plperl; The syntax of the CREATE FUNCTION command requires the function body to be written as a string constant. It is usually most convenient to use dollar quoting (see ) for the string constant. - If you choose to use escape string syntax E'', - you must double any single quote marks (') and backslashes - (\) used in the body of the function - (see ). + linkend="sql-syntax-dollar-quoting"/>) for the string constant. + If you choose to use escape string syntax E'', + you must double any single quote marks (') and backslashes + (\) used in the body of the function + (see ). Arguments and results are handled as in any other Perl subroutine: arguments are passed in @_, and a result value - is returned with return or as the last expression + is returned with return or as the last expression evaluated in the function. @@ -134,12 +138,12 @@ $$ LANGUAGE plperl; - If an SQL null valuenull valuein PL/Perl is passed to a function, - the argument value will appear as undefined in Perl. The + If an SQL null valuenull valuein PL/Perl is passed to a function, + the argument value will appear as undefined in Perl. The above function definition will not behave very nicely with null inputs (in fact, it will act as though they are zeroes). We could - add STRICT to the function definition to make + add STRICT to the function definition to make PostgreSQL do something more reasonable: if a null value is passed, the function will not be called at all, but will just return a null result automatically. Alternatively, @@ -174,14 +178,14 @@ $$ LANGUAGE plperl; other cases the argument will need to be converted into a form that is more usable in Perl. For example, the decode_bytea function can be used to convert an argument of - type bytea into unescaped binary. + type bytea into unescaped binary. Similarly, values passed back to PostgreSQL must be in the external text representation format. For example, the encode_bytea function can be used to - escape binary data for a return value of type bytea. + escape binary data for a return value of type bytea. @@ -274,6 +278,20 @@ SELECT * FROM perl_row(); hash will be returned as null values. + + Similarly, output arguments of procedures can be returned as a hash + reference: + + +CREATE PROCEDURE perl_triple(INOUT a integer, INOUT b integer) AS $$ + my ($a, $b) = @_; + return {a => $a * 3, b => $b * 3}; +$$ LANGUAGE plperl; + +CALL perl_triple(5, 10); + + + PL/Perl functions can also return sets of either scalar or composite types. Usually you'll want to return rows one at a @@ -330,10 +348,10 @@ SELECT * FROM perl_set(); - If you wish to use the strict pragma with your code you - have a few options. For temporary global use you can SET + If you wish to use the strict pragma with your code you + have a few options. For temporary global use you can SET plperl.use_strict to true. - This will affect subsequent compilations of PL/Perl + This will affect subsequent compilations of PL/Perl functions, but not functions already compiled in the current session. For permanent global use you can set plperl.use_strict to true in the postgresql.conf file. @@ -348,7 +366,7 @@ use strict; - The feature pragma is also available to use if your Perl is version 5.10.0 or higher. + The feature pragma is also available to use if your Perl is version 5.10.0 or higher. @@ -380,7 +398,7 @@ use strict; - spi_exec_query(query [, max-rows]) + spi_exec_query(query [, max-rows]) spi_exec_query in PL/Perl @@ -524,13 +542,13 @@ SELECT * from lotsa_md5(500); - Normally, spi_fetchrow should be repeated until it + Normally, spi_fetchrow should be repeated until it returns undef, indicating that there are no more rows to read. The cursor returned by spi_query is automatically freed when - spi_fetchrow returns undef. + spi_fetchrow returns undef. If you do not wish to read all the rows, instead call - spi_cursor_close to free the cursor. + spi_cursor_close to free the cursor. Failure to do so will result in memory leaks. @@ -657,6 +675,55 @@ SELECT release_hosts_query(); + + + + spi_commit() + + spi_commit + in PL/Perl + + + + spi_rollback() + + spi_rollback + in PL/Perl + + + + + Commit or roll back the current transaction. This can only be called + in a procedure or anonymous code block (DO command) + called from the top level. (Note that it is not possible to run the + SQL commands COMMIT or ROLLBACK + via spi_exec_query or similar. It has to be done + using these functions.) After a transaction is ended, a new + transaction is automatically started, so there is no separate function + for that. + + + + Here is an example: + +CREATE PROCEDURE transaction_test1() +LANGUAGE plperl +AS $$ +foreach my $i (0..9) { + spi_exec_query("INSERT INTO test1 (a) VALUES ($i)"); + if ($i % 2 == 0) { + spi_commit(); + } else { + spi_rollback(); + } +} +$$; + +CALL transaction_test1(); + + + + @@ -675,20 +742,20 @@ SELECT release_hosts_query(); Emit a log or error message. Possible levels are - DEBUG, LOG, INFO, - NOTICE, WARNING, and ERROR. - ERROR + DEBUG, LOG, INFO, + NOTICE, WARNING, and ERROR. + ERROR raises an error condition; if this is not trapped by the surrounding Perl code, the error propagates out to the calling query, causing the current transaction or subtransaction to be aborted. This - is effectively the same as the Perl die command. + is effectively the same as the Perl die command. The other levels only generate messages of different priority levels. Whether messages of a particular priority are reported to the client, written to the server log, or both is controlled by the - and - configuration - variables. See for more + and + configuration + variables. See for more information. @@ -706,8 +773,8 @@ SELECT release_hosts_query(); Return the given string suitably quoted to be used as a string literal in an SQL statement string. Embedded single-quotes and backslashes are properly doubled. - Note that quote_literal returns undef on undef input; if the argument - might be undef, quote_nullable is often more suitable. + Note that quote_literal returns undef on undef input; if the argument + might be undef, quote_nullable is often more suitable. @@ -792,7 +859,7 @@ SELECT release_hosts_query(); Returns the contents of the referenced array as a string in array literal format - (see ). + (see ). Returns the argument value unaltered if it's not a reference to an array. The delimiter used between elements of the array literal defaults to ", " if a delimiter is not specified or is undef. @@ -828,7 +895,7 @@ SELECT release_hosts_query(); Returns the contents of the referenced array as a string in array constructor format - (see ). + (see ). Individual values are quoted using quote_nullable. Returns the argument value, quoted using quote_nullable, if it's not a reference to an array. @@ -849,7 +916,7 @@ SELECT release_hosts_query(); Returns a true value if the content of the given string looks like a number, according to Perl, returns false otherwise. Returns undef if the argument is undef. Leading and trailing space is - ignored. Inf and Infinity are regarded as numbers. + ignored. Inf and Infinity are regarded as numbers. @@ -865,8 +932,8 @@ SELECT release_hosts_query(); Returns a true value if the given argument may be treated as an - array reference, that is, if ref of the argument is ARRAY or - PostgreSQL::InServer::ARRAY. Returns false otherwise. + array reference, that is, if ref of the argument is ARRAY or + PostgreSQL::InServer::ARRAY. Returns false otherwise. @@ -941,11 +1008,11 @@ $$ LANGUAGE plperl; PL/Perl functions will share the same value of %_SHARED if and only if they are executed by the same SQL role. In an application wherein a single session executes code under multiple SQL roles (via - SECURITY DEFINER functions, use of SET ROLE, etc) + SECURITY DEFINER functions, use of SET ROLE, etc) you may need to take explicit steps to ensure that PL/Perl functions can share data via %_SHARED. To do that, make sure that functions that should communicate are owned by the same user, and mark - them SECURITY DEFINER. You must of course take care that + them SECURITY DEFINER. You must of course take care that such functions can't be used to do anything unintended. @@ -959,8 +1026,8 @@ $$ LANGUAGE plperl; - Normally, PL/Perl is installed as a trusted programming - language named plperl. In this setup, certain Perl + Normally, PL/Perl is installed as a trusted programming + language named plperl. In this setup, certain Perl operations are disabled to preserve security. In general, the operations that are restricted are those that interact with the environment. This includes file handle operations, @@ -993,15 +1060,15 @@ $$ LANGUAGE plperl; Sometimes it is desirable to write Perl functions that are not restricted. For example, one might want a Perl function that sends mail. To handle these cases, PL/Perl can also be installed as an - untrusted language (usually called - PL/PerlUPL/PerlU). + untrusted language (usually called + PL/PerlUPL/PerlU). In this case the full Perl language is available. When installing the language, the language name plperlu will select the untrusted PL/Perl variant. - The writer of a PL/PerlU function must take care that the function + The writer of a PL/PerlU function must take care that the function cannot be used to do anything unwanted, since it will be able to do anything that could be done by a user logged in as the database administrator. Note that the database system allows only database @@ -1010,25 +1077,25 @@ $$ LANGUAGE plperl; If the above function was created by a superuser using the language - plperlu, execution would succeed. + plperlu, execution would succeed. In the same way, anonymous code blocks written in Perl can use restricted operations if the language is specified as - plperlu rather than plperl, but the caller + plperlu rather than plperl, but the caller must be a superuser. - While PL/Perl functions run in a separate Perl - interpreter for each SQL role, all PL/PerlU functions + While PL/Perl functions run in a separate Perl + interpreter for each SQL role, all PL/PerlU functions executed in a given session run in a single Perl interpreter (which is - not any of the ones used for PL/Perl functions). - This allows PL/PerlU functions to share data freely, - but no communication can occur between PL/Perl and - PL/PerlU functions. + not any of the ones used for PL/Perl functions). + This allows PL/PerlU functions to share data freely, + but no communication can occur between PL/Perl and + PL/PerlU functions. @@ -1036,14 +1103,14 @@ $$ LANGUAGE plperl; Perl cannot support multiple interpreters within one process unless it was built with the appropriate flags, namely either - usemultiplicity or useithreads. - (usemultiplicity is preferred unless you actually need + usemultiplicity or useithreads. + (usemultiplicity is preferred unless you actually need to use threads. For more details, see the - perlembed man page.) - If PL/Perl is used with a copy of Perl that was not built + perlembed man page.) + If PL/Perl is used with a copy of Perl that was not built this way, then it is only possible to have one Perl interpreter per session, and so any one session can only execute either - PL/PerlU functions, or PL/Perl functions + PL/PerlU functions, or PL/Perl functions that are all called by the same SQL role. @@ -1056,7 +1123,7 @@ $$ LANGUAGE plperl; PL/Perl can be used to write trigger functions. In a trigger function, the hash reference $_TD contains information about the - current trigger event. $_TD is a global variable, + current trigger event. $_TD is a global variable, which gets a separate local value for each invocation of the trigger. The fields of the $_TD hash reference are: @@ -1092,8 +1159,8 @@ $$ LANGUAGE plperl; $_TD->{event} - Trigger event: INSERT, UPDATE, - DELETE, TRUNCATE, or UNKNOWN + Trigger event: INSERT, UPDATE, + DELETE, TRUNCATE, or UNKNOWN @@ -1233,7 +1300,7 @@ $$ LANGUAGE plperl; CREATE TRIGGER test_valid_id_trig BEFORE INSERT OR UPDATE ON test - FOR EACH ROW EXECUTE PROCEDURE valid_id(); + FOR EACH ROW EXECUTE FUNCTION valid_id(); @@ -1244,7 +1311,7 @@ CREATE TRIGGER test_valid_id_trig PL/Perl can be used to write event trigger functions. In an event trigger function, the hash reference $_TD contains information - about the current trigger event. $_TD is a global variable, + about the current trigger event. $_TD is a global variable, which gets a separate local value for each invocation of the trigger. The fields of the $_TD hash reference are: @@ -1270,7 +1337,7 @@ CREATE TRIGGER test_valid_id_trig - The return value of the trigger procedure is ignored. + The return value of the trigger function is ignored. @@ -1283,7 +1350,7 @@ $$ LANGUAGE plperl; CREATE EVENT TRIGGER perl_a_snitch ON ddl_command_start - EXECUTE PROCEDURE perlsnitch(); + EXECUTE FUNCTION perlsnitch(); @@ -1295,7 +1362,7 @@ CREATE EVENT TRIGGER perl_a_snitch Configuration - This section lists configuration parameters that affect PL/Perl. + This section lists configuration parameters that affect PL/Perl. @@ -1304,14 +1371,14 @@ CREATE EVENT TRIGGER perl_a_snitch plperl.on_init (string) - plperl.on_init configuration parameter + plperl.on_init configuration parameter Specifies Perl code to be executed when a Perl interpreter is first - initialized, before it is specialized for use by plperl or - plperlu. + initialized, before it is specialized for use by plperl or + plperlu. The SPI functions are not available when this code is executed. If the code fails with an error it will abort the initialization of the interpreter and propagate out to the calling query, causing the @@ -1319,7 +1386,7 @@ CREATE EVENT TRIGGER perl_a_snitch The Perl code is limited to a single string. Longer code can be placed - into a module and loaded by the on_init string. + into a module and loaded by the on_init string. Examples: plperl.on_init = 'require "plperlinit.pl"' @@ -1327,8 +1394,8 @@ plperl.on_init = 'use lib "/my/app"; use MyApp::PgInit;' - Any modules loaded by plperl.on_init, either directly or - indirectly, will be available for use by plperl. This may + Any modules loaded by plperl.on_init, either directly or + indirectly, will be available for use by plperl. This may create a security risk. To see what modules have been loaded you can use: DO 'elog(WARNING, join ", ", sort keys %INC)' LANGUAGE plperl; @@ -1336,17 +1403,17 @@ DO 'elog(WARNING, join ", ", sort keys %INC)' LANGUAGE plperl; Initialization will happen in the postmaster if the plperl library is - included in , in which + included in , in which case extra consideration should be given to the risk of destabilizing the postmaster. The principal reason for making use of this feature - is that Perl modules loaded by plperl.on_init need be + is that Perl modules loaded by plperl.on_init need be loaded only at postmaster start, and will be instantly available without loading overhead in individual database sessions. However, keep in mind that the overhead is avoided only for the first Perl interpreter used by a database session — either PL/PerlU, or PL/Perl for the first SQL role that calls a PL/Perl function. Any additional Perl interpreters created in a database session will have - to execute plperl.on_init afresh. Also, on Windows there + to execute plperl.on_init afresh. Also, on Windows there will be no savings whatsoever from preloading, since the Perl interpreter created in the postmaster process does not propagate to child processes. @@ -1361,27 +1428,27 @@ DO 'elog(WARNING, join ", ", sort keys %INC)' LANGUAGE plperl; plperl.on_plperl_init (string) - plperl.on_plperl_init configuration parameter + plperl.on_plperl_init configuration parameter plperl.on_plperlu_init (string) - plperl.on_plperlu_init configuration parameter + plperl.on_plperlu_init configuration parameter These parameters specify Perl code to be executed when a Perl - interpreter is specialized for plperl or - plperlu respectively. This will happen when a PL/Perl or + interpreter is specialized for plperl or + plperlu respectively. This will happen when a PL/Perl or PL/PerlU function is first executed in a database session, or when an additional interpreter has to be created because the other language is called or a PL/Perl function is called by a new SQL role. This - follows any initialization done by plperl.on_init. + follows any initialization done by plperl.on_init. The SPI functions are not available when this code is executed. - The Perl code in plperl.on_plperl_init is executed after - locking down the interpreter, and thus it can only perform + The Perl code in plperl.on_plperl_init is executed after + locking down the interpreter, and thus it can only perform trusted operations. @@ -1404,13 +1471,13 @@ DO 'elog(WARNING, join ", ", sort keys %INC)' LANGUAGE plperl; plperl.use_strict (boolean) - plperl.use_strict configuration parameter + plperl.use_strict configuration parameter When set true subsequent compilations of PL/Perl functions will have - the strict pragma enabled. This parameter does not affect + the strict pragma enabled. This parameter does not affect functions already compiled in the current session. @@ -1459,7 +1526,7 @@ DO 'elog(WARNING, join ", ", sort keys %INC)' LANGUAGE plperl; When a session ends normally, not due to a fatal error, any - END blocks that have been defined are executed. + END blocks that have been defined are executed. Currently no other actions are performed. Specifically, file handles are not automatically flushed and objects are not automatically destroyed. diff --git a/doc/src/sgml/plpgsql.sgml b/doc/src/sgml/plpgsql.sgml index 2f166d2d59..beb7e03bbc 100644 --- a/doc/src/sgml/plpgsql.sgml +++ b/doc/src/sgml/plpgsql.sgml @@ -13,13 +13,13 @@ PL/pgSQL is a loadable procedural language for the PostgreSQL database - system. The design goals of PL/pgSQL were to create + system. The design goals of PL/pgSQL were to create a loadable procedural language that - can be used to create functions and trigger procedures, + can be used to create functions and triggers, @@ -59,7 +59,7 @@ - In PostgreSQL 9.0 and later, + In PostgreSQL 9.0 and later, PL/pgSQL is installed by default. However it is still a loadable module, so especially security-conscious administrators could choose to remove it. @@ -69,7 +69,7 @@ Advantages of Using <application>PL/pgSQL</application> - SQL is the language PostgreSQL + SQL is the language PostgreSQL and most other relational databases use as query language. It's portable and easy to learn. But every SQL statement must be executed individually by the database server. @@ -123,55 +123,58 @@ and they can return a result of any of these types. They can also accept or return any composite type (row type) specified by name. It is also possible to declare a PL/pgSQL - function as returning record, which means that the result + function as accepting record, which means that any + composite type will do as input, or + as returning record, which means that the result is a row type whose columns are determined by specification in the - calling query, as discussed in . + calling query, as discussed in . - PL/pgSQL functions can be declared to accept a variable - number of arguments by using the VARIADIC marker. This + PL/pgSQL functions can be declared to accept a variable + number of arguments by using the VARIADIC marker. This works exactly the same way as for SQL functions, as discussed in - . + . - PL/pgSQL functions can also be declared to accept + PL/pgSQL functions can also be declared to accept and return the polymorphic types anyelement, anyarray, anynonarray, - anyenum, and anyrange. The actual + anyenum, and anyrange. The actual data types handled by a polymorphic function can vary from call to - call, as discussed in . - An example is shown in . + call, as discussed in . + An example is shown in . - PL/pgSQL functions can also be declared to return - a set (or table) of any data type that can be returned as + PL/pgSQL functions can also be declared to return + a set (or table) of any data type that can be returned as a single instance. Such a function generates its output by executing - RETURN NEXT for each desired element of the result - set, or by using RETURN QUERY to output the result of + RETURN NEXT for each desired element of the result + set, or by using RETURN QUERY to output the result of evaluating a query. - Finally, a PL/pgSQL function can be declared to return - void if it has no useful return value. + Finally, a PL/pgSQL function can be declared to return + void if it has no useful return value. (Alternatively, it + could be written as a procedure in that case.) - PL/pgSQL functions can also be declared with output + PL/pgSQL functions can also be declared with output parameters in place of an explicit specification of the return type. This does not add any fundamental capability to the language, but it is often convenient, especially for returning multiple values. - The RETURNS TABLE notation can also be used in place - of RETURNS SETOF. + The RETURNS TABLE notation can also be used in place + of RETURNS SETOF. Specific examples appear in - and - . + and + . @@ -181,16 +184,16 @@ Functions written in PL/pgSQL are defined - to the server by executing commands. + to the server by executing commands. Such a command would normally look like, say, CREATE FUNCTION somefunc(integer, text) RETURNS integer -AS 'function body text' +AS 'function body text' LANGUAGE plpgsql; The function body is simply a string literal so far as CREATE - FUNCTION is concerned. It is often helpful to use dollar quoting - (see ) to write the function + FUNCTION is concerned. It is often helpful to use dollar quoting + (see ) to write the function body, rather than the normal single quote syntax. Without dollar quoting, any single quotes or backslashes in the function body must be escaped by doubling them. Almost all the examples in this chapter use dollar-quoted @@ -200,7 +203,7 @@ LANGUAGE plpgsql; PL/pgSQL is a block-structured language. The complete text of a function body must be a - block. A block is defined as: + block. A block is defined as: <<label>> @@ -223,16 +226,16 @@ END label ; A common mistake is to write a semicolon immediately after - BEGIN. This is incorrect and will result in a syntax error. + BEGIN. This is incorrect and will result in a syntax error. A label is only needed if you want to identify the block for use - in an EXIT statement, or to qualify the names of the + in an EXIT statement, or to qualify the names of the variables declared in the block. If a label is given after - END, it must match the label at the block's beginning. + END, it must match the label at the block's beginning. @@ -242,7 +245,7 @@ END label ; - Comments work the same way in PL/pgSQL code as in + Comments work the same way in PL/pgSQL code as in ordinary SQL. A double dash (--) starts a comment that extends to the end of the line. A /* starts a block comment that extends to the matching occurrence of @@ -251,7 +254,7 @@ END label ; Any statement in the statement section of a block - can be a subblock. Subblocks can be used for + can be a subblock. Subblocks can be used for logical grouping or to localize variables to a small group of statements. Variables declared in a subblock mask any similarly-named variables of outer blocks for the duration @@ -285,11 +288,11 @@ $$ LANGUAGE plpgsql; - There is actually a hidden outer block surrounding the body - of any PL/pgSQL function. This block provides the + There is actually a hidden outer block surrounding the body + of any PL/pgSQL function. This block provides the declarations of the function's parameters (if any), as well as some special variables such as FOUND (see - ). The outer block is + ). The outer block is labeled with the function's name, meaning that parameters and special variables can be qualified with the function's name. @@ -297,18 +300,17 @@ $$ LANGUAGE plpgsql; It is important not to confuse the use of - BEGIN/END for grouping statements in - PL/pgSQL with the similarly-named SQL commands + BEGIN/END for grouping statements in + PL/pgSQL with the similarly-named SQL commands for transaction - control. PL/pgSQL's BEGIN/END + control. PL/pgSQL's BEGIN/END are only for grouping; they do not start or end a transaction. - Functions and trigger procedures are always executed within a transaction - established by an outer query — they cannot start or commit that - transaction, since there would be no context for them to execute in. - However, a block containing an EXCEPTION clause effectively + See for information on managing + transactions in PL/pgSQL. + Also, a block containing an EXCEPTION clause effectively forms a subtransaction that can be rolled back without affecting the outer transaction. For more about that see . + linkend="plpgsql-error-trapping"/>. @@ -318,15 +320,15 @@ $$ LANGUAGE plpgsql; All variables used in a block must be declared in the declarations section of the block. - (The only exceptions are that the loop variable of a FOR loop + (The only exceptions are that the loop variable of a FOR loop iterating over a range of integer values is automatically declared as an - integer variable, and likewise the loop variable of a FOR loop + integer variable, and likewise the loop variable of a FOR loop iterating over a cursor's result is automatically declared as a record variable.) - PL/pgSQL variables can have any SQL data type, such as + PL/pgSQL variables can have any SQL data type, such as integer, varchar, and char. @@ -348,21 +350,21 @@ arow RECORD; name CONSTANT type COLLATE collation_name NOT NULL { DEFAULT | := | = } expression ; - The DEFAULT clause, if given, specifies the initial value assigned - to the variable when the block is entered. If the DEFAULT clause + The DEFAULT clause, if given, specifies the initial value assigned + to the variable when the block is entered. If the DEFAULT clause is not given then the variable is initialized to the SQL null value. - The CONSTANT option prevents the variable from being + The CONSTANT option prevents the variable from being assigned to after initialization, so that its value will remain constant for the duration of the block. - The COLLATE option specifies a collation to use for the - variable (see ). - If NOT NULL + The COLLATE option specifies a collation to use for the + variable (see ). + If NOT NULL is specified, an assignment of a null value results in a run-time - error. All variables declared as NOT NULL + error. All variables declared as NOT NULL must have a nonnull default value specified. - Equal (=) can be used instead of PL/SQL-compliant - :=. + Equal (=) can be used instead of PL/SQL-compliant + :=. @@ -428,9 +430,9 @@ $$ LANGUAGE plpgsql; These two examples are not perfectly equivalent. In the first case, - subtotal could be referenced as - sales_tax.subtotal, but in the second case it could not. - (Had we attached a label to the inner block, subtotal could + subtotal could be referenced as + sales_tax.subtotal, but in the second case it could not. + (Had we attached a label to the inner block, subtotal could be qualified with that label, instead.) @@ -474,7 +476,7 @@ END; $$ LANGUAGE plpgsql; - Notice that we omitted RETURNS real — we could have + Notice that we omitted RETURNS real — we could have included it, but it would be redundant. @@ -491,15 +493,15 @@ END; $$ LANGUAGE plpgsql; - As discussed in , this + As discussed in , this effectively creates an anonymous record type for the function's - results. If a RETURNS clause is given, it must say - RETURNS record. + results. If a RETURNS clause is given, it must say + RETURNS record. Another way to declare a PL/pgSQL function - is with RETURNS TABLE, for example: + is with RETURNS TABLE, for example: CREATE FUNCTION extended_sales(p_itemno int) @@ -511,9 +513,9 @@ END; $$ LANGUAGE plpgsql; - This is exactly equivalent to declaring one or more OUT + This is exactly equivalent to declaring one or more OUT parameters and specifying RETURNS SETOF - sometype. + sometype. @@ -523,14 +525,14 @@ $$ LANGUAGE plpgsql; or anyrange), a special parameter $0 is created. Its data type is the actual return type of the function, as deduced from the actual input types (see ). + linkend="extend-types-polymorphic"/>). This allows the function to access its actual return type - as shown in . + as shown in . $0 is initialized to null and can be modified by the function, so it can be used to hold the return value if desired, though that is not required. $0 can also be given an alias. For example, this function works on any data type - that has a + operator: + that has a + operator: CREATE FUNCTION add_three_values(v1 anyelement, v2 anyelement, v3 anyelement) @@ -564,19 +566,19 @@ $$ LANGUAGE plpgsql; - <literal>ALIAS</> + <literal>ALIAS</literal> -newname ALIAS FOR oldname; +newname ALIAS FOR oldname; - The ALIAS syntax is more general than is suggested in the + The ALIAS syntax is more general than is suggested in the previous section: you can declare an alias for any variable, not just function parameters. The main practical use for this is to assign a different name for variables with predetermined names, such as NEW or OLD within - a trigger procedure. + a trigger function. @@ -589,7 +591,7 @@ DECLARE - Since ALIAS creates two different ways to name the same + Since ALIAS creates two different ways to name the same object, unrestricted use can be confusing. It's best to use it only for the purpose of overriding predetermined names. @@ -608,7 +610,7 @@ DECLARE database values. For example, let's say you have a column named user_id in your users table. To declare a variable with the same data type as - users.user_id you write: + users.user_id you write: user_id users.user_id%TYPE; @@ -618,7 +620,7 @@ user_id users.user_id%TYPE; By using %TYPE you don't need to know the data type of the structure you are referencing, and most importantly, if the data type of the referenced item changes in the future (for - instance: you change the type of user_id + instance: you change the type of user_id from integer to real), you might not need to change your function definition. @@ -642,9 +644,9 @@ user_id users.user_id%TYPE; - A variable of a composite type is called a row - variable (or row-type variable). Such a variable - can hold a whole row of a SELECT or FOR + A variable of a composite type is called a row + variable (or row-type variable). Such a variable + can hold a whole row of a SELECT or FOR query result, so long as that query's column set matches the declared type of the variable. The individual fields of the row value @@ -658,7 +660,7 @@ user_id users.user_id%TYPE; table_name%ROWTYPE notation; or it can be declared by giving a composite type's name. (Since every table has an associated composite type of the same name, - it actually does not matter in PostgreSQL whether you + it actually does not matter in PostgreSQL whether you write %ROWTYPE or not. But the form with %ROWTYPE is more portable.) @@ -666,21 +668,13 @@ user_id users.user_id%TYPE; Parameters to a function can be composite types (complete table rows). In that case, the - corresponding identifier $n will be a row variable, and fields can + corresponding identifier $n will be a row variable, and fields can be selected from it, for example $1.user_id. - Only the user-defined columns of a table row are accessible in a - row-type variable, not the OID or other system columns (because the - row could be from a view). The fields of the row type inherit the - table's field size or precision for data types such as - char(n). - - - - Here is an example of using composite types. table1 - and table2 are existing tables having at least the + Here is an example of using composite types. table1 + and table2 are existing tables having at least the mentioned fields: @@ -708,7 +702,7 @@ SELECT merge_fields(t.*) FROM table1 t WHERE ... ; Record variables are similar to row-type variables, but they have no predefined structure. They take on the actual row structure of the - row they are assigned during a SELECT or FOR command. The substructure + row they are assigned during a SELECT or FOR command. The substructure of a record variable can change each time it is assigned to. A consequence of this is that until a record variable is first assigned to, it has no substructure, and any attempt to access a @@ -716,13 +710,13 @@ SELECT merge_fields(t.*) FROM table1 t WHERE ... ; - Note that RECORD is not a true data type, only a placeholder. + Note that RECORD is not a true data type, only a placeholder. One should also realize that when a PL/pgSQL - function is declared to return type record, this is not quite the + function is declared to return type record, this is not quite the same concept as a record variable, even though such a function might use a record variable to hold its result. In both cases the actual row structure is unknown when the function is written, but for a function - returning record the actual structure is determined when the + returning record the actual structure is determined when the calling query is parsed, whereas a record variable can change its row structure on-the-fly. @@ -732,15 +726,15 @@ SELECT merge_fields(t.*) FROM table1 t WHERE ... ; Collation of <application>PL/pgSQL</application> Variables - collation - in PL/pgSQL + collation + in PL/pgSQL When a PL/pgSQL function has one or more parameters of collatable data types, a collation is identified for each function call depending on the collations assigned to the actual - arguments, as described in . If a collation is + arguments, as described in . If a collation is successfully identified (i.e., there are no conflicts of implicit collations among the arguments) then all the collatable parameters are treated as having that collation implicitly. This will affect the @@ -758,9 +752,9 @@ SELECT less_than(text_field_1, text_field_2) FROM table1; SELECT less_than(text_field_1, text_field_2 COLLATE "C") FROM table1; - The first use of less_than will use the common collation - of text_field_1 and text_field_2 for - the comparison, while the second use will use C collation. + The first use of less_than will use the common collation + of text_field_1 and text_field_2 for + the comparison, while the second use will use C collation. @@ -790,7 +784,7 @@ $$ LANGUAGE plpgsql; A local variable of a collatable data type can have a different collation - associated with it by including the COLLATE option in its + associated with it by including the COLLATE option in its declaration, for example @@ -803,7 +797,7 @@ DECLARE - Also, of course explicit COLLATE clauses can be written inside + Also, of course explicit COLLATE clauses can be written inside a function if it is desired to force a particular collation to be used in a particular operation. For example, @@ -838,30 +832,30 @@ IF expression THEN ... SELECT expression - to the main SQL engine. While forming the SELECT command, + to the main SQL engine. While forming the SELECT command, any occurrences of PL/pgSQL variable names are replaced by parameters, as discussed in detail in - . + . This allows the query plan for the SELECT to be prepared just once and then reused for subsequent evaluations with different values of the variables. Thus, what really happens on first use of an expression is essentially a - PREPARE command. For example, if we have declared - two integer variables x and y, and we write + PREPARE command. For example, if we have declared + two integer variables x and y, and we write IF x < y THEN ... what happens behind the scenes is equivalent to -PREPARE statement_name(integer, integer) AS SELECT $1 < $2; +PREPARE statement_name(integer, integer) AS SELECT $1 < $2; - and then this prepared statement is EXECUTEd for each - execution of the IF statement, with the current values + and then this prepared statement is EXECUTEd for each + execution of the IF statement, with the current values of the PL/pgSQL variables supplied as parameter values. Normally these details are not important to a PL/pgSQL user, but they are useful to know when trying to diagnose a problem. - More information appears in . + More information appears in . @@ -874,8 +868,8 @@ PREPARE statement_name(integer, integer) AS SELECT $1 < $2; PL/pgSQL. Anything not recognized as one of these statement types is presumed to be an SQL command and is sent to the main database engine to execute, - as described in - and . + as described in + and . @@ -888,20 +882,20 @@ PREPARE statement_name(integer, integer) AS SELECT $1 < $2; variable { := | = } expression; As explained previously, the expression in such a statement is evaluated - by means of an SQL SELECT command sent to the main + by means of an SQL SELECT command sent to the main database engine. The expression must yield a single value (possibly a row value, if the variable is a row or record variable). The target variable can be a simple variable (optionally qualified with a block name), a field of a row or record variable, or an element of an array - that is a simple variable or field. Equal (=) can be - used instead of PL/SQL-compliant :=. + that is a simple variable or field. Equal (=) can be + used instead of PL/SQL-compliant :=. If the expression's result data type doesn't match the variable's data type, the value will be coerced as though by an assignment cast - (see ). If no assignment cast is known - for the pair of data types involved, the PL/pgSQL + (see ). If no assignment cast is known + for the pair of data types involved, the PL/pgSQL interpreter will attempt to convert the result value textually, that is by applying the result type's output function followed by the variable type's input function. Note that this could result in run-time errors @@ -923,7 +917,7 @@ my_record.user_id := 20; For any SQL command that does not return rows, for example - INSERT without a RETURNING clause, you can + INSERT without a RETURNING clause, you can execute the command within a PL/pgSQL function just by writing the command. @@ -933,18 +927,18 @@ my_record.user_id := 20; in the command text is treated as a parameter, and then the current value of the variable is provided as the parameter value at run time. This is exactly like the processing described earlier - for expressions; for details see . + for expressions; for details see . When executing a SQL command in this way, PL/pgSQL may cache and re-use the execution plan for the command, as discussed in - . + . - Sometimes it is useful to evaluate an expression or SELECT + Sometimes it is useful to evaluate an expression or SELECT query but discard the result, for example when calling a function that has side-effects but no useful result value. To do this in PL/pgSQL, use the @@ -956,9 +950,9 @@ PERFORM query; This executes query and discards the result. Write the query the same - way you would write an SQL SELECT command, but replace the - initial keyword SELECT with PERFORM. - For WITH queries, use PERFORM and then + way you would write an SQL SELECT command, but replace the + initial keyword SELECT with PERFORM. + For WITH queries, use PERFORM and then place the query in parentheses. (In this case, the query can only return one row.) PL/pgSQL variables will be @@ -966,7 +960,7 @@ PERFORM query; and the plan is cached in the same way. Also, the special variable FOUND is set to true if the query produced at least one row, or false if it produced no rows (see - ). + ). @@ -976,7 +970,7 @@ PERFORM query; present the only accepted way to do it is PERFORM. A SQL command that can return rows, such as SELECT, will be rejected as an error - unless it has an INTO clause as discussed in the + unless it has an INTO clause as discussed in the next section. @@ -1006,7 +1000,7 @@ PERFORM create_mv('cs_session_page_requests_mv', my_query); The result of a SQL command yielding a single row (possibly of multiple columns) can be assigned to a record variable, row-type variable, or list of scalar variables. This is done by writing the base SQL command and - adding an INTO clause. For example, + adding an INTO clause. For example, SELECT select_expressions INTO STRICT target FROM ...; @@ -1021,21 +1015,21 @@ DELETE ... RETURNING expressions INTO STRIC PL/pgSQL variables will be substituted into the rest of the query, and the plan is cached, just as described above for commands that do not return rows. - This works for SELECT, - INSERT/UPDATE/DELETE with - RETURNING, and utility commands that return row-set - results (such as EXPLAIN). - Except for the INTO clause, the SQL command is the same + This works for SELECT, + INSERT/UPDATE/DELETE with + RETURNING, and utility commands that return row-set + results (such as EXPLAIN). + Except for the INTO clause, the SQL command is the same as it would be written outside PL/pgSQL. - Note that this interpretation of SELECT with INTO - is quite different from PostgreSQL's regular - SELECT INTO command, wherein the INTO + Note that this interpretation of SELECT with INTO + is quite different from PostgreSQL's regular + SELECT INTO command, wherein the INTO target is a newly created table. If you want to create a table from a - SELECT result inside a + SELECT result inside a PL/pgSQL function, use the syntax CREATE TABLE ... AS SELECT. @@ -1050,24 +1044,24 @@ DELETE ... RETURNING expressions INTO STRIC - The INTO clause can appear almost anywhere in the SQL + The INTO clause can appear almost anywhere in the SQL command. Customarily it is written either just before or just after the list of select_expressions in a - SELECT command, or at the end of the command for other + SELECT command, or at the end of the command for other command types. It is recommended that you follow this convention in case the PL/pgSQL parser becomes stricter in future versions. - If STRICT is not specified in the INTO + If STRICT is not specified in the INTO clause, then target will be set to the first row returned by the query, or to nulls if the query returned no rows. - (Note that the first row is not - well-defined unless you've used ORDER BY.) Any result rows + (Note that the first row is not + well-defined unless you've used ORDER BY.) Any result rows after the first row are discarded. You can check the special FOUND variable (see - ) to + ) to determine whether a row was returned: @@ -1079,7 +1073,7 @@ END IF; If the STRICT option is specified, the query must return exactly one row or a run-time error will be reported, either - NO_DATA_FOUND (no rows) or TOO_MANY_ROWS + NO_DATA_FOUND (no rows) or TOO_MANY_ROWS (more than one row). You can use an exception block if you wish to catch the error, for example: @@ -1093,28 +1087,28 @@ BEGIN RAISE EXCEPTION 'employee % not unique', myname; END; - Successful execution of a command with STRICT + Successful execution of a command with STRICT always sets FOUND to true. - For INSERT/UPDATE/DELETE with - RETURNING, PL/pgSQL reports + For INSERT/UPDATE/DELETE with + RETURNING, PL/pgSQL reports an error for more than one returned row, even when STRICT is not specified. This is because there - is no option such as ORDER BY with which to determine + is no option such as ORDER BY with which to determine which affected row should be returned. - If print_strict_params is enabled for the function, + If print_strict_params is enabled for the function, then when an error is thrown because the requirements - of STRICT are not met, the DETAIL part of + of STRICT are not met, the DETAIL part of the error message will include information about the parameters passed to the query. - You can change the print_strict_params + You can change the print_strict_params setting for all functions by setting - plpgsql.print_strict_params, though only subsequent + plpgsql.print_strict_params, though only subsequent function compilations will be affected. You can also enable it on a per-function basis by using a compiler option, for example: @@ -1140,14 +1134,14 @@ CONTEXT: PL/pgSQL function get_userid(text) line 6 at SQL statement - The STRICT option matches the behavior of + The STRICT option matches the behavior of Oracle PL/SQL's SELECT INTO and related statements. To handle cases where you need to process multiple result rows - from a SQL query, see . + from a SQL query, see . @@ -1161,7 +1155,7 @@ CONTEXT: PL/pgSQL function get_userid(text) line 6 at SQL statement that will involve different tables or different data types each time they are executed. PL/pgSQL's normal attempts to cache plans for commands (as discussed in - ) will not work in such + ) will not work in such scenarios. To handle this sort of problem, the EXECUTE statement is provided: @@ -1174,12 +1168,12 @@ EXECUTE command-string INT command to be executed. The optional target is a record variable, a row variable, or a comma-separated list of simple variables and record/row fields, into which the results of - the command will be stored. The optional USING expressions + the command will be stored. The optional USING expressions supply values to be inserted into the command. - No substitution of PL/pgSQL variables is done on the + No substitution of PL/pgSQL variables is done on the computed command string. Any required variable values must be inserted in the command string as it is constructed; or you can use parameters as described below. @@ -1207,14 +1201,14 @@ EXECUTE command-string INT - If the STRICT option is given, an error is reported + If the STRICT option is given, an error is reported unless the query produces exactly one row. The command string can use parameter values, which are referenced - in the command as $1, $2, etc. - These symbols refer to values supplied in the USING + in the command as $1, $2, etc. + These symbols refer to values supplied in the USING clause. This method is often preferable to inserting data values into the command string as text: it avoids run-time overhead of converting the values to text and back, and it is much less prone @@ -1240,7 +1234,7 @@ EXECUTE 'SELECT count(*) FROM ' INTO c USING checked_user, checked_date; - A cleaner approach is to use format()'s %I + A cleaner approach is to use format()'s %I specification for table or column names (strings separated by a newline are concatenated): @@ -1250,32 +1244,32 @@ EXECUTE format('SELECT count(*) FROM %I ' USING checked_user, checked_date; Another restriction on parameter symbols is that they only work in - SELECT, INSERT, UPDATE, and - DELETE commands. In other statement + SELECT, INSERT, UPDATE, and + DELETE commands. In other statement types (generically called utility statements), you must insert values textually even if they are just data values. - An EXECUTE with a simple constant command string and some - USING parameters, as in the first example above, is + An EXECUTE with a simple constant command string and some + USING parameters, as in the first example above, is functionally equivalent to just writing the command directly in PL/pgSQL and allowing replacement of PL/pgSQL variables to happen automatically. - The important difference is that EXECUTE will re-plan + The important difference is that EXECUTE will re-plan the command on each execution, generating a plan that is specific to the current parameter values; whereas PL/pgSQL may otherwise create a generic plan and cache it for re-use. In situations where the best plan depends strongly on the parameter values, it can be helpful to use - EXECUTE to positively ensure that a generic plan is not + EXECUTE to positively ensure that a generic plan is not selected. SELECT INTO is not currently supported within - EXECUTE; instead, execute a plain SELECT - command and specify INTO as part of the EXECUTE + EXECUTE; instead, execute a plain SELECT + command and specify INTO as part of the EXECUTE itself. @@ -1283,11 +1277,11 @@ EXECUTE format('SELECT count(*) FROM %I ' The PL/pgSQL EXECUTE statement is not related to the - SQL + SQL statement supported by the PostgreSQL server. The server's EXECUTE statement cannot be used directly within - PL/pgSQL functions (and is not needed). + PL/pgSQL functions (and is not needed). @@ -1319,14 +1313,14 @@ EXECUTE format('SELECT count(*) FROM %I ' of single quotes. The recommended method for quoting fixed text in your function body is dollar quoting. (If you have legacy code that does not use dollar quoting, please refer to the - overview in , which can save you + overview in , which can save you some effort when translating said code to a more reasonable scheme.) Dynamic values require careful handling since they might contain quote characters. - An example using format() (this assumes that you are + An example using format() (this assumes that you are dollar quoting the function body so quote marks need not be doubled): EXECUTE format('UPDATE tbl SET %I = $1 ' @@ -1347,11 +1341,11 @@ EXECUTE 'UPDATE tbl SET ' This example demonstrates the use of the quote_ident and quote_literal functions (see ). For safety, expressions containing column + linkend="functions-string"/>). For safety, expressions containing column or table identifiers should be passed through quote_ident before insertion in a dynamic query. Expressions containing values that should be literal strings in the - constructed command should be passed through quote_literal. + constructed command should be passed through quote_literal. These functions take the appropriate steps to return the input text enclosed in double or single quotes respectively, with any embedded special characters properly escaped. @@ -1360,12 +1354,12 @@ EXECUTE 'UPDATE tbl SET ' Because quote_literal is labeled STRICT, it will always return null when called with a - null argument. In the above example, if newvalue or - keyvalue were null, the entire dynamic query string would + null argument. In the above example, if newvalue or + keyvalue were null, the entire dynamic query string would become null, leading to an error from EXECUTE. - You can avoid this problem by using the quote_nullable - function, which works the same as quote_literal except that - when called with a null argument it returns the string NULL. + You can avoid this problem by using the quote_nullable + function, which works the same as quote_literal except that + when called with a null argument it returns the string NULL. For example, EXECUTE 'UPDATE tbl SET ' @@ -1376,26 +1370,26 @@ EXECUTE 'UPDATE tbl SET ' || quote_nullable(keyvalue); If you are dealing with values that might be null, you should usually - use quote_nullable in place of quote_literal. + use quote_nullable in place of quote_literal. As always, care must be taken to ensure that null values in a query do - not deliver unintended results. For example the WHERE clause + not deliver unintended results. For example the WHERE clause 'WHERE key = ' || quote_nullable(keyvalue) - will never succeed if keyvalue is null, because the - result of using the equality operator = with a null operand + will never succeed if keyvalue is null, because the + result of using the equality operator = with a null operand is always null. If you wish null to work like an ordinary key value, you would need to rewrite the above as 'WHERE key IS NOT DISTINCT FROM ' || quote_nullable(keyvalue) - (At present, IS NOT DISTINCT FROM is handled much less - efficiently than =, so don't do this unless you must. - See for - more information on nulls and IS DISTINCT.) + (At present, IS NOT DISTINCT FROM is handled much less + efficiently than =, so don't do this unless you must. + See for + more information on nulls and IS DISTINCT.) @@ -1409,24 +1403,24 @@ EXECUTE 'UPDATE tbl SET ' || '$$ WHERE key = ' || quote_literal(keyvalue); - because it would break if the contents of newvalue - happened to contain $$. The same objection would + because it would break if the contents of newvalue + happened to contain $$. The same objection would apply to any other dollar-quoting delimiter you might pick. So, to safely quote text that is not known in advance, you - must use quote_literal, - quote_nullable, or quote_ident, as appropriate. + must use quote_literal, + quote_nullable, or quote_ident, as appropriate. Dynamic SQL statements can also be safely constructed using the format function (see ). For example: + linkend="functions-string"/>). For example: EXECUTE format('UPDATE tbl SET %I = %L ' 'WHERE key = %L', colname, newvalue, keyvalue); - %I is equivalent to quote_ident, and - %L is equivalent to quote_nullable. + %I is equivalent to quote_ident, and + %L is equivalent to quote_nullable. The format function can be used in conjunction with the USING clause: @@ -1435,15 +1429,15 @@ EXECUTE format('UPDATE tbl SET %I = $1 WHERE key = $2', colname) This form is better because the variables are handled in their native data type format, rather than unconditionally converting them to - text and quoting them via %L. It is also more efficient. + text and quoting them via %L. It is also more efficient. A much larger example of a dynamic command and EXECUTE can be seen in , which builds and executes a - CREATE FUNCTION command to define a new function. + linkend="plpgsql-porting-ex2"/>, which builds and executes a + CREATE FUNCTION command to define a new function. @@ -1460,14 +1454,14 @@ GET CURRENT DIAGNOSTICS variable This command allows retrieval of system status indicators. - CURRENT is a noise word (but see also GET STACKED - DIAGNOSTICS in ). + CURRENT is a noise word (but see also GET STACKED + DIAGNOSTICS in ). Each item is a key word identifying a status value to be assigned to the specified variable (which should be of the right data type to receive it). The currently available status items are shown - in . Colon-equal - (:=) can be used instead of the SQL-standard = + in . Colon-equal + (:=) can be used instead of the SQL-standard = token. An example: GET DIAGNOSTICS integer_var = ROW_COUNT; @@ -1487,13 +1481,13 @@ GET DIAGNOSTICS integer_var = ROW_COUNT; ROW_COUNT - bigint + bigint the number of rows processed by the most recent SQL command RESULT_OID - oid + oid the OID of the last row inserted by the most recent SQL command (only useful after an INSERT command into a table having @@ -1501,9 +1495,9 @@ GET DIAGNOSTICS integer_var = ROW_COUNT; PG_CONTEXT - text + text line(s) of text describing the current call stack - (see ) + (see ) @@ -1526,33 +1520,33 @@ GET DIAGNOSTICS integer_var = ROW_COUNT; - A PERFORM statement sets FOUND + A PERFORM statement sets FOUND true if it produces (and discards) one or more rows, false if no row is produced. - UPDATE, INSERT, and DELETE + UPDATE, INSERT, and DELETE statements set FOUND true if at least one row is affected, false if no row is affected. - A FETCH statement sets FOUND + A FETCH statement sets FOUND true if it returns a row, false if no row is returned. - A MOVE statement sets FOUND + A MOVE statement sets FOUND true if it successfully repositions the cursor, false otherwise. - A FOR or FOREACH statement sets + A FOR or FOREACH statement sets FOUND true if it iterates one or more times, else false. FOUND is set this way when the @@ -1625,7 +1619,7 @@ END; In Oracle's PL/SQL, empty statement lists are not allowed, and so - NULL statements are required for situations + NULL statements are required for situations such as this. PL/pgSQL allows you to just write nothing, instead. @@ -1639,9 +1633,9 @@ END; Control structures are probably the most useful (and - important) part of PL/pgSQL. With - PL/pgSQL's control structures, - you can manipulate PostgreSQL data in a very + important) part of PL/pgSQL. With + PL/pgSQL's control structures, + you can manipulate PostgreSQL data in a very flexible and powerful way. @@ -1655,7 +1649,7 @@ END; - <command>RETURN</> + <command>RETURN</command> RETURN expression; @@ -1665,7 +1659,7 @@ RETURN expression; RETURN with an expression terminates the function and returns the value of expression to the caller. This form - is used for PL/pgSQL functions that do + is used for PL/pgSQL functions that do not return a set. @@ -1716,7 +1710,7 @@ RETURN (1, 2, 'three'::text); -- must cast columns to correct types - <command>RETURN NEXT</> and <command>RETURN QUERY</command> + <command>RETURN NEXT</command> and <command>RETURN QUERY</command> RETURN NEXT in PL/pgSQL @@ -1733,8 +1727,8 @@ RETURN QUERY EXECUTE command-string < - When a PL/pgSQL function is declared to return - SETOF sometype, the procedure + When a PL/pgSQL function is declared to return + SETOF sometype, the procedure to follow is slightly different. In that case, the individual items to return are specified by a sequence of RETURN NEXT or RETURN QUERY commands, and @@ -1755,7 +1749,7 @@ RETURN QUERY EXECUTE command-string < QUERY do not actually return from the function — they simply append zero or more rows to the function's result set. Execution then continues with the next statement in the - PL/pgSQL function. As successive + PL/pgSQL function. As successive RETURN NEXT or RETURN QUERY commands are executed, the result set is built up. A final RETURN, which should have no @@ -1767,8 +1761,8 @@ RETURN QUERY EXECUTE command-string < RETURN QUERY has a variant RETURN QUERY EXECUTE, which specifies the query to be executed dynamically. Parameter expressions can - be inserted into the computed query string via USING, - in just the same way as in the EXECUTE command. + be inserted into the computed query string via USING, + in just the same way as in the EXECUTE command. @@ -1778,9 +1772,9 @@ RETURN QUERY EXECUTE command-string < variable(s) will be saved for eventual return as a row of the result. Note that you must declare the function as returning SETOF record when there are multiple output - parameters, or SETOF sometype + parameters, or SETOF sometype when there is just one output parameter of type - sometype, in order to create a set-returning + sometype, in order to create a set-returning function with output parameters. @@ -1823,8 +1817,8 @@ $BODY$ BEGIN RETURN QUERY SELECT flightid FROM flight - WHERE flightdate >= $1 - AND flightdate < ($1 + 1); + WHERE flightdate >= $1 + AND flightdate < ($1 + 1); -- Since execution is not finished, we can check whether rows were returned -- and raise exception if not. @@ -1848,15 +1842,15 @@ SELECT * FROM get_available_flightid(CURRENT_DATE); The current implementation of RETURN NEXT and RETURN QUERY stores the entire result set before returning from the function, as discussed above. That - means that if a PL/pgSQL function produces a + means that if a PL/pgSQL function produces a very large result set, performance might be poor: data will be written to disk to avoid memory exhaustion, but the function itself will not return until the entire result set has been - generated. A future version of PL/pgSQL might + generated. A future version of PL/pgSQL might allow users to define set-returning functions that do not have this limitation. Currently, the point at which data begins being written to disk is controlled by the - + configuration variable. Administrators who have sufficient memory to store larger result sets in memory should consider increasing this parameter. @@ -1865,38 +1859,86 @@ SELECT * FROM get_available_flightid(CURRENT_DATE); + + Returning From a Procedure + + + A procedure does not have a return value. A procedure can therefore end + without a RETURN statement. If you wish to use + a RETURN statement to exit the code early, write + just RETURN with no expression. + + + + If the procedure has output parameters, the final values of the output + parameter variables will be returned to the caller. + + + + + Calling a Procedure + + + A PL/pgSQL function, procedure, + or DO block can call a procedure + using CALL. Output parameters are handled + differently from the way that CALL works in plain + SQL. Each INOUT parameter of the procedure must + correspond to a variable in the CALL statement, and + whatever the procedure returns is assigned back to that variable after + it returns. For example: + +CREATE PROCEDURE triple(INOUT x int) +LANGUAGE plpgsql +AS $$ +BEGIN + x := x * 3; +END; +$$; + +DO $$ +DECLARE myvar int := 5; +BEGIN + CALL triple(myvar); + RAISE NOTICE 'myvar = %', myvar; -- prints 15 +END +$$; + + + + Conditionals - IF and CASE statements let you execute + IF and CASE statements let you execute alternative commands based on certain conditions. - PL/pgSQL has three forms of IF: + PL/pgSQL has three forms of IF: - IF ... THEN ... END IF + IF ... THEN ... END IF - IF ... THEN ... ELSE ... END IF + IF ... THEN ... ELSE ... END IF - IF ... THEN ... ELSIF ... THEN ... ELSE ... END IF + IF ... THEN ... ELSIF ... THEN ... ELSE ... END IF - and two forms of CASE: + and two forms of CASE: - CASE ... WHEN ... THEN ... ELSE ... END CASE + CASE ... WHEN ... THEN ... ELSE ... END CASE - CASE WHEN ... THEN ... ELSE ... END CASE + CASE WHEN ... THEN ... ELSE ... END CASE - <literal>IF-THEN</> + <literal>IF-THEN</literal> IF boolean-expression THEN @@ -1923,7 +1965,7 @@ END IF; - <literal>IF-THEN-ELSE</> + <literal>IF-THEN-ELSE</literal> IF boolean-expression THEN @@ -1964,7 +2006,7 @@ END IF; - <literal>IF-THEN-ELSIF</> + <literal>IF-THEN-ELSIF</literal> IF boolean-expression THEN @@ -1983,15 +2025,15 @@ END IF; Sometimes there are more than just two alternatives. - IF-THEN-ELSIF provides a convenient + IF-THEN-ELSIF provides a convenient method of checking several alternatives in turn. - The IF conditions are tested successively + The IF conditions are tested successively until the first one that is true is found. Then the associated statement(s) are executed, after which control - passes to the next statement after END IF. - (Any subsequent IF conditions are not - tested.) If none of the IF conditions is true, - then the ELSE block (if any) is executed. + passes to the next statement after END IF. + (Any subsequent IF conditions are not + tested.) If none of the IF conditions is true, + then the ELSE block (if any) is executed. @@ -2012,8 +2054,8 @@ END IF; - The key word ELSIF can also be spelled - ELSEIF. + The key word ELSIF can also be spelled + ELSEIF. @@ -2033,14 +2075,14 @@ END IF; - However, this method requires writing a matching END IF - for each IF, so it is much more cumbersome than - using ELSIF when there are many alternatives. + However, this method requires writing a matching END IF + for each IF, so it is much more cumbersome than + using ELSIF when there are many alternatives. - Simple <literal>CASE</> + Simple <literal>CASE</literal> CASE search-expression @@ -2055,16 +2097,16 @@ END CASE; - The simple form of CASE provides conditional execution - based on equality of operands. The search-expression + The simple form of CASE provides conditional execution + based on equality of operands. The search-expression is evaluated (once) and successively compared to each - expression in the WHEN clauses. + expression in the WHEN clauses. If a match is found, then the corresponding statements are executed, and then control - passes to the next statement after END CASE. (Subsequent - WHEN expressions are not evaluated.) If no match is - found, the ELSE statements are - executed; but if ELSE is not present, then a + passes to the next statement after END CASE. (Subsequent + WHEN expressions are not evaluated.) If no match is + found, the ELSE statements are + executed; but if ELSE is not present, then a CASE_NOT_FOUND exception is raised. @@ -2083,7 +2125,7 @@ END CASE; - Searched <literal>CASE</> + Searched <literal>CASE</literal> CASE @@ -2098,16 +2140,16 @@ END CASE; - The searched form of CASE provides conditional execution - based on truth of Boolean expressions. Each WHEN clause's + The searched form of CASE provides conditional execution + based on truth of Boolean expressions. Each WHEN clause's boolean-expression is evaluated in turn, - until one is found that yields true. Then the + until one is found that yields true. Then the corresponding statements are executed, and - then control passes to the next statement after END CASE. - (Subsequent WHEN expressions are not evaluated.) - If no true result is found, the ELSE + then control passes to the next statement after END CASE. + (Subsequent WHEN expressions are not evaluated.) + If no true result is found, the ELSE statements are executed; - but if ELSE is not present, then a + but if ELSE is not present, then a CASE_NOT_FOUND exception is raised. @@ -2125,9 +2167,9 @@ END CASE; - This form of CASE is entirely equivalent to - IF-THEN-ELSIF, except for the rule that reaching - an omitted ELSE clause results in an error rather + This form of CASE is entirely equivalent to + IF-THEN-ELSIF, except for the rule that reaching + an omitted ELSE clause results in an error rather than doing nothing. @@ -2143,14 +2185,14 @@ END CASE; - With the LOOP, EXIT, - CONTINUE, WHILE, FOR, - and FOREACH statements, you can arrange for your - PL/pgSQL function to repeat a series of commands. + With the LOOP, EXIT, + CONTINUE, WHILE, FOR, + and FOREACH statements, you can arrange for your + PL/pgSQL function to repeat a series of commands. - <literal>LOOP</> + <literal>LOOP</literal> <<label>> @@ -2160,17 +2202,17 @@ END LOOP label ; - LOOP defines an unconditional loop that is repeated - indefinitely until terminated by an EXIT or + LOOP defines an unconditional loop that is repeated + indefinitely until terminated by an EXIT or RETURN statement. The optional - label can be used by EXIT + label can be used by EXIT and CONTINUE statements within nested loops to specify which loop those statements refer to. - <literal>EXIT</> + <literal>EXIT</literal> EXIT @@ -2184,21 +2226,21 @@ EXIT label WHEN If no label is given, the innermost loop is terminated and the statement following END - LOOP is executed next. If label + LOOP is executed next. If label is given, it must be the label of the current or some outer level of nested loop or block. Then the named loop or block is terminated and control continues with the statement after the - loop's/block's corresponding END. + loop's/block's corresponding END. - If WHEN is specified, the loop exit occurs only if - boolean-expression is true. Otherwise, control passes - to the statement after EXIT. + If WHEN is specified, the loop exit occurs only if + boolean-expression is true. Otherwise, control passes + to the statement after EXIT. - EXIT can be used with all types of loops; it is + EXIT can be used with all types of loops; it is not limited to use with unconditional loops. @@ -2242,7 +2284,7 @@ END; - <literal>CONTINUE</> + <literal>CONTINUE</literal> CONTINUE @@ -2254,25 +2296,25 @@ CONTINUE label WHEN - If no label is given, the next iteration of + If no label is given, the next iteration of the innermost loop is begun. That is, all statements remaining in the loop body are skipped, and control returns to the loop control expression (if any) to determine whether another loop iteration is needed. - If label is present, it + If label is present, it specifies the label of the loop whose execution will be continued. - If WHEN is specified, the next iteration of the - loop is begun only if boolean-expression is + If WHEN is specified, the next iteration of the + loop is begun only if boolean-expression is true. Otherwise, control passes to the statement after - CONTINUE. + CONTINUE. - CONTINUE can be used with all types of loops; it + CONTINUE can be used with all types of loops; it is not limited to use with unconditional loops. @@ -2291,7 +2333,7 @@ END LOOP; - <literal>WHILE</> + <literal>WHILE</literal> WHILE @@ -2306,7 +2348,7 @@ END LOOP label ; - The WHILE statement repeats a + The WHILE statement repeats a sequence of statements so long as the boolean-expression evaluates to true. The expression is checked just before @@ -2328,7 +2370,7 @@ END LOOP; - <literal>FOR</> (Integer Variant) + <literal>FOR</literal> (Integer Variant) <<label>> @@ -2338,22 +2380,22 @@ END LOOP label ; - This form of FOR creates a loop that iterates over a range + This form of FOR creates a loop that iterates over a range of integer values. The variable name is automatically defined as type - integer and exists only inside the loop (any existing + integer and exists only inside the loop (any existing definition of the variable name is ignored within the loop). The two expressions giving the lower and upper bound of the range are evaluated once when entering - the loop. If the BY clause isn't specified the iteration - step is 1, otherwise it's the value specified in the BY + the loop. If the BY clause isn't specified the iteration + step is 1, otherwise it's the value specified in the BY clause, which again is evaluated once on loop entry. - If REVERSE is specified then the step value is + If REVERSE is specified then the step value is subtracted, rather than added, after each iteration. - Some examples of integer FOR loops: + Some examples of integer FOR loops: FOR i IN 1..10 LOOP -- i will take on the values 1,2,3,4,5,6,7,8,9,10 within the loop @@ -2371,13 +2413,13 @@ END LOOP; If the lower bound is greater than the upper bound (or less than, - in the REVERSE case), the loop body is not + in the REVERSE case), the loop body is not executed at all. No error is raised. If a label is attached to the - FOR loop then the integer loop variable can be + FOR loop then the integer loop variable can be referenced with a qualified name, using that label. @@ -2388,7 +2430,7 @@ END LOOP; Looping Through Query Results - Using a different type of FOR loop, you can iterate through + Using a different type of FOR loop, you can iterate through the results of a query and manipulate that data accordingly. The syntax is: @@ -2424,28 +2466,28 @@ END; $$ LANGUAGE plpgsql; - If the loop is terminated by an EXIT statement, the last + If the loop is terminated by an EXIT statement, the last assigned row value is still accessible after the loop. - The query used in this type of FOR + The query used in this type of FOR statement can be any SQL command that returns rows to the caller: - SELECT is the most common case, - but you can also use INSERT, UPDATE, or - DELETE with a RETURNING clause. Some utility - commands such as EXPLAIN will work too. + SELECT is the most common case, + but you can also use INSERT, UPDATE, or + DELETE with a RETURNING clause. Some utility + commands such as EXPLAIN will work too. - PL/pgSQL variables are substituted into the query text, + PL/pgSQL variables are substituted into the query text, and the query plan is cached for possible re-use, as discussed in - detail in and - . + detail in and + . - The FOR-IN-EXECUTE statement is another way to iterate over + The FOR-IN-EXECUTE statement is another way to iterate over rows: <<label>> @@ -2455,17 +2497,17 @@ END LOOP label ; This is like the previous form, except that the source query is specified as a string expression, which is evaluated and replanned - on each entry to the FOR loop. This allows the programmer to + on each entry to the FOR loop. This allows the programmer to choose the speed of a preplanned query or the flexibility of a dynamic query, just as with a plain EXECUTE statement. As with EXECUTE, parameter values can be inserted - into the dynamic command via USING. + into the dynamic command via USING. Another way to specify the query whose results should be iterated through is to declare it as a cursor. This is described in - . + . @@ -2473,13 +2515,13 @@ END LOOP label ; Looping Through Arrays - The FOREACH loop is much like a FOR loop, + The FOREACH loop is much like a FOR loop, but instead of iterating through the rows returned by a SQL query, it iterates through the elements of an array value. - (In general, FOREACH is meant for looping through + (In general, FOREACH is meant for looping through components of a composite-valued expression; variants for looping through composites besides arrays may be added in future.) - The FOREACH statement to loop over an array is: + The FOREACH statement to loop over an array is: <<label>> @@ -2490,7 +2532,7 @@ END LOOP label ; - Without SLICE, or if SLICE 0 is specified, + Without SLICE, or if SLICE 0 is specified, the loop iterates through individual elements of the array produced by evaluating the expression. The target variable is assigned each @@ -2522,13 +2564,13 @@ $$ LANGUAGE plpgsql; - With a positive SLICE value, FOREACH + With a positive SLICE value, FOREACH iterates through slices of the array rather than single elements. - The SLICE value must be an integer constant not larger + The SLICE value must be an integer constant not larger than the number of dimensions of the array. The target variable must be an array, and it receives successive slices of the array value, where each slice - is of the number of dimensions specified by SLICE. + is of the number of dimensions specified by SLICE. Here is an example of iterating through one-dimensional slices: @@ -2562,12 +2604,12 @@ NOTICE: row = {10,11,12} - By default, any error occurring in a PL/pgSQL + By default, any error occurring in a PL/pgSQL function aborts execution of the function, and indeed of the surrounding transaction as well. You can trap errors and recover - from them by using a BEGIN block with an - EXCEPTION clause. The syntax is an extension of the - normal syntax for a BEGIN block: + from them by using a BEGIN block with an + EXCEPTION clause. The syntax is an extension of the + normal syntax for a BEGIN block: <<label>> @@ -2588,31 +2630,31 @@ END; If no error occurs, this form of block simply executes all the statements, and then control passes - to the next statement after END. But if an error + to the next statement after END. But if an error occurs within the statements, further processing of the statements is - abandoned, and control passes to the EXCEPTION list. + abandoned, and control passes to the EXCEPTION list. The list is searched for the first condition matching the error that occurred. If a match is found, the corresponding handler_statements are executed, and then control passes to the next statement after - END. If no match is found, the error propagates out - as though the EXCEPTION clause were not there at all: + END. If no match is found, the error propagates out + as though the EXCEPTION clause were not there at all: the error can be caught by an enclosing block with - EXCEPTION, or if there is none it aborts processing + EXCEPTION, or if there is none it aborts processing of the function. The condition names can be any of - those shown in . A category + those shown in . A category name matches any error within its category. The special - condition name OTHERS matches every error type except - QUERY_CANCELED and ASSERT_FAILURE. + condition name OTHERS matches every error type except + QUERY_CANCELED and ASSERT_FAILURE. (It is possible, but often unwise, to trap those two error types by name.) Condition names are not case-sensitive. Also, an error condition can be specified - by SQLSTATE code; for example these are equivalent: + by SQLSTATE code; for example these are equivalent: WHEN division_by_zero THEN ... WHEN SQLSTATE '22012' THEN ... @@ -2622,13 +2664,13 @@ WHEN SQLSTATE '22012' THEN ... If a new error occurs within the selected handler_statements, it cannot be caught - by this EXCEPTION clause, but is propagated out. - A surrounding EXCEPTION clause could catch it. + by this EXCEPTION clause, but is propagated out. + A surrounding EXCEPTION clause could catch it. - When an error is caught by an EXCEPTION clause, - the local variables of the PL/pgSQL function + When an error is caught by an EXCEPTION clause, + the local variables of the PL/pgSQL function remain as they were when the error occurred, but all changes to persistent database state within the block are rolled back. As an example, consider this fragment: @@ -2646,32 +2688,32 @@ EXCEPTION END; - When control reaches the assignment to y, it will - fail with a division_by_zero error. This will be caught by - the EXCEPTION clause. The value returned in the - RETURN statement will be the incremented value of - x, but the effects of the UPDATE command will - have been rolled back. The INSERT command preceding the + When control reaches the assignment to y, it will + fail with a division_by_zero error. This will be caught by + the EXCEPTION clause. The value returned in the + RETURN statement will be the incremented value of + x, but the effects of the UPDATE command will + have been rolled back. The INSERT command preceding the block is not rolled back, however, so the end result is that the database - contains Tom Jones not Joe Jones. + contains Tom Jones not Joe Jones. - A block containing an EXCEPTION clause is significantly + A block containing an EXCEPTION clause is significantly more expensive to enter and exit than a block without one. Therefore, - don't use EXCEPTION without need. + don't use EXCEPTION without need. - Exceptions with <command>UPDATE</>/<command>INSERT</> + Exceptions with <command>UPDATE</command>/<command>INSERT</command> This example uses exception handling to perform either - UPDATE or INSERT, as appropriate. It is - recommended that applications use INSERT with - ON CONFLICT DO UPDATE rather than actually using + UPDATE or INSERT, as appropriate. It is + recommended that applications use INSERT with + ON CONFLICT DO UPDATE rather than actually using this pattern. This example serves primarily to illustrate use of PL/pgSQL control flow structures: @@ -2705,8 +2747,8 @@ SELECT merge_db(1, 'david'); SELECT merge_db(1, 'dennis'); - This coding assumes the unique_violation error is caused by - the INSERT, and not by, say, an INSERT in a + This coding assumes the unique_violation error is caused by + the INSERT, and not by, say, an INSERT in a trigger function on the table. It might also misbehave if there is more than one unique index on the table, since it will retry the operation regardless of which index caused the error. @@ -2722,14 +2764,14 @@ SELECT merge_db(1, 'dennis'); Exception handlers frequently need to identify the specific error that occurred. There are two ways to get information about the current - exception in PL/pgSQL: special variables and the + exception in PL/pgSQL: special variables and the GET STACKED DIAGNOSTICS command. Within an exception handler, the special variable SQLSTATE contains the error code that corresponds to - the exception that was raised (refer to + the exception that was raised (refer to for a list of possible error codes). The special variable SQLERRM contains the error message associated with the exception. These variables are undefined outside exception handlers. @@ -2748,7 +2790,7 @@ GET STACKED DIAGNOSTICS variable { = | := } variable (which should be of the right data type to receive it). The currently available status items are shown - in . + in .
@@ -2764,54 +2806,54 @@ GET STACKED DIAGNOSTICS variable { = | := } RETURNED_SQLSTATE - text + text the SQLSTATE error code of the exception COLUMN_NAME - text + text the name of the column related to exception CONSTRAINT_NAME - text + text the name of the constraint related to exception PG_DATATYPE_NAME - text + text the name of the data type related to exception MESSAGE_TEXT - text + text the text of the exception's primary message TABLE_NAME - text + text the name of the table related to exception SCHEMA_NAME - text + text the name of the schema related to exception PG_EXCEPTION_DETAIL - text + text the text of the exception's detail message, if any PG_EXCEPTION_HINT - text + text the text of the exception's hint message, if any PG_EXCEPTION_CONTEXT - text + text line(s) of text describing the call stack at the time of the - exception (see ) + exception (see ) @@ -2847,12 +2889,12 @@ END; The GET DIAGNOSTICS command, previously described - in , retrieves information + in , retrieves information about current execution state (whereas the GET STACKED DIAGNOSTICS command discussed above reports information about - the execution state as of a previous error). Its PG_CONTEXT + the execution state as of a previous error). Its PG_CONTEXT status item is useful for identifying the current execution - location. PG_CONTEXT returns a text string with line(s) + location. PG_CONTEXT returns a text string with line(s) of text describing the call stack. The first line refers to the current function and currently executing GET DIAGNOSTICS command. The second and any subsequent lines refer to calling functions @@ -2907,11 +2949,11 @@ CONTEXT: PL/pgSQL function outer_func() line 3 at RETURN Rather than executing a whole query at once, it is possible to set - up a cursor that encapsulates the query, and then read + up a cursor that encapsulates the query, and then read the query result a few rows at a time. One reason for doing this is to avoid memory overrun when the result contains a large number of - rows. (However, PL/pgSQL users do not normally need - to worry about that, since FOR loops automatically use a cursor + rows. (However, PL/pgSQL users do not normally need + to worry about that, since FOR loops automatically use a cursor internally to avoid memory problems.) A more interesting usage is to return a reference to a cursor that a function has created, allowing the caller to read the rows. This provides an efficient way to return @@ -2922,19 +2964,19 @@ CONTEXT: PL/pgSQL function outer_func() line 3 at RETURN Declaring Cursor Variables - All access to cursors in PL/pgSQL goes through + All access to cursors in PL/pgSQL goes through cursor variables, which are always of the special data type - refcursor. One way to create a cursor variable - is just to declare it as a variable of type refcursor. + refcursor. One way to create a cursor variable + is just to declare it as a variable of type refcursor. Another way is to use the cursor declaration syntax, which in general is: name NO SCROLL CURSOR ( arguments ) FOR query; - (FOR can be replaced by IS for + (FOR can be replaced by IS for Oracle compatibility.) - If SCROLL is specified, the cursor will be capable of - scrolling backward; if NO SCROLL is specified, backward + If SCROLL is specified, the cursor will be capable of + scrolling backward; if NO SCROLL is specified, backward fetches will be rejected; if neither specification appears, it is query-dependent whether backward fetches will be allowed. arguments, if specified, is a @@ -2952,13 +2994,13 @@ DECLARE curs2 CURSOR FOR SELECT * FROM tenk1; curs3 CURSOR (key integer) FOR SELECT * FROM tenk1 WHERE unique1 = key; - All three of these variables have the data type refcursor, + All three of these variables have the data type refcursor, but the first can be used with any query, while the second has - a fully specified query already bound to it, and the last - has a parameterized query bound to it. (key will be + a fully specified query already bound to it, and the last + has a parameterized query bound to it. (key will be replaced by an integer parameter value when the cursor is opened.) - The variable curs1 - is said to be unbound since it is not bound to + The variable curs1 + is said to be unbound since it is not bound to any particular query. @@ -2968,17 +3010,17 @@ DECLARE Before a cursor can be used to retrieve rows, it must be - opened. (This is the equivalent action to the SQL - command DECLARE CURSOR.) PL/pgSQL has - three forms of the OPEN statement, two of which use unbound + opened. (This is the equivalent action to the SQL + command DECLARE CURSOR.) PL/pgSQL has + three forms of the OPEN statement, two of which use unbound cursor variables while the third uses a bound cursor variable. Bound cursor variables can also be used without explicitly opening the cursor, - via the FOR statement described in - . + via the FOR statement described in + . @@ -2993,18 +3035,18 @@ OPEN unbound_cursorvar NO refcursor variable). The query must be a + refcursor variable). The query must be a SELECT, or something else that returns rows - (such as EXPLAIN). The query + (such as EXPLAIN). The query is treated in the same way as other SQL commands in - PL/pgSQL: PL/pgSQL + PL/pgSQL: PL/pgSQL variable names are substituted, and the query plan is cached for - possible reuse. When a PL/pgSQL + possible reuse. When a PL/pgSQL variable is substituted into the cursor query, the value that is - substituted is the one it has at the time of the OPEN; + substituted is the one it has at the time of the OPEN; subsequent changes to the variable will not affect the cursor's behavior. - The SCROLL and NO SCROLL + The SCROLL and NO SCROLL options have the same meanings as for a bound cursor. @@ -3028,16 +3070,16 @@ OPEN unbound_cursorvar NO refcursor variable). The query is specified as a string + refcursor variable). The query is specified as a string expression, in the same way as in the EXECUTE command. As usual, this gives flexibility so the query plan can vary - from one run to the next (see ), + from one run to the next (see ), and it also means that variable substitution is not done on the command string. As with EXECUTE, parameter values can be inserted into the dynamic command via - format() and USING. - The SCROLL and - NO SCROLL options have the same meanings as for a bound + format() and USING. + The SCROLL and + NO SCROLL options have the same meanings as for a bound cursor. @@ -3047,8 +3089,8 @@ OPEN unbound_cursorvar NO In this example, the table name is inserted into the query via - format(). The comparison value for col1 - is inserted via a USING parameter, so it needs + format(). The comparison value for col1 + is inserted via a USING parameter, so it needs no quoting. @@ -3071,8 +3113,8 @@ OPEN bound_cursorvar ( The query plan for a bound cursor is always considered cacheable; there is no equivalent of EXECUTE in this case. - Notice that SCROLL and NO SCROLL cannot be - specified in OPEN, as the cursor's scrolling + Notice that SCROLL and NO SCROLL cannot be + specified in OPEN, as the cursor's scrolling behavior was already determined. @@ -3082,7 +3124,7 @@ OPEN bound_cursorvar ( := to separate it from the argument expression. Similar to calling - functions, described in , it + functions, described in , it is also allowed to mix positional and named notation. @@ -3098,13 +3140,13 @@ OPEN curs3(key := 42); Because variable substitution is done on a bound cursor's query, there are really two ways to pass values into the cursor: either - with an explicit argument to OPEN, or implicitly by - referencing a PL/pgSQL variable in the query. + with an explicit argument to OPEN, or implicitly by + referencing a PL/pgSQL variable in the query. However, only variables declared before the bound cursor was declared will be substituted into it. In either case the value to - be passed is determined at the time of the OPEN. + be passed is determined at the time of the OPEN. For example, another way to get the same effect as the - curs3 example above is + curs3 example above is DECLARE key integer; @@ -3127,22 +3169,22 @@ BEGIN These manipulations need not occur in the same function that - opened the cursor to begin with. You can return a refcursor + opened the cursor to begin with. You can return a refcursor value out of a function and let the caller operate on the cursor. - (Internally, a refcursor value is simply the string name + (Internally, a refcursor value is simply the string name of a so-called portal containing the active query for the cursor. This name - can be passed around, assigned to other refcursor variables, + can be passed around, assigned to other refcursor variables, and so on, without disturbing the portal.) All portals are implicitly closed at transaction end. Therefore - a refcursor value is usable to reference an open cursor + a refcursor value is usable to reference an open cursor only until the end of the transaction. - <literal>FETCH</> + <literal>FETCH</literal> FETCH direction { FROM | IN } cursor INTO target; @@ -3160,26 +3202,30 @@ FETCH direction { FROM | IN } The direction clause can be any of the - variants allowed in the SQL + variants allowed in the SQL command except the ones that can fetch more than one row; namely, it can be - NEXT, - PRIOR, - FIRST, - LAST, - ABSOLUTE count, - RELATIVE count, - FORWARD, or - BACKWARD. + NEXT, + PRIOR, + FIRST, + LAST, + ABSOLUTE count, + RELATIVE count, + FORWARD, or + BACKWARD. Omitting direction is the same - as specifying NEXT. + as specifying NEXT. + In the forms using a count, + the count can be any integer-valued + expression (unlike the SQL FETCH command, + which only allows an integer constant). direction values that require moving backward are likely to fail unless the cursor was declared or opened - with the SCROLL option. + with the SCROLL option. - cursor must be the name of a refcursor + cursor must be the name of a refcursor variable that references an open cursor portal. @@ -3195,7 +3241,7 @@ FETCH RELATIVE -2 FROM curs4 INTO x; - <literal>MOVE</> + <literal>MOVE</literal> MOVE direction { FROM | IN } cursor; @@ -3210,26 +3256,6 @@ MOVE direction { FROM | IN } < be checked to see whether there was a next row to move to. - - The direction clause can be any of the - variants allowed in the SQL - command, namely - NEXT, - PRIOR, - FIRST, - LAST, - ABSOLUTE count, - RELATIVE count, - ALL, - FORWARD count | ALL , or - BACKWARD count | ALL . - Omitting direction is the same - as specifying NEXT. - direction values that require moving - backward are likely to fail unless the cursor was declared or opened - with the SCROLL option. - - Examples: @@ -3242,7 +3268,7 @@ MOVE FORWARD 2 FROM curs4; - <literal>UPDATE/DELETE WHERE CURRENT OF</> + <literal>UPDATE/DELETE WHERE CURRENT OF</literal> UPDATE table SET ... WHERE CURRENT OF cursor; @@ -3253,9 +3279,9 @@ DELETE FROM table WHERE CURRENT OF curso When a cursor is positioned on a table row, that row can be updated or deleted using the cursor to identify the row. There are restrictions on what the cursor's query can be (in particular, - no grouping) and it's best to use FOR UPDATE in the + no grouping) and it's best to use FOR UPDATE in the cursor. For more information see the - + reference page. @@ -3268,7 +3294,7 @@ UPDATE foo SET dataval = myval WHERE CURRENT OF curs1; - <literal>CLOSE</> + <literal>CLOSE</literal> CLOSE cursor; @@ -3292,7 +3318,7 @@ CLOSE curs1; Returning Cursors - PL/pgSQL functions can return cursors to the + PL/pgSQL functions can return cursors to the caller. This is useful to return multiple rows or columns, especially with very large result sets. To do this, the function opens the cursor and returns the cursor name to the caller (or simply @@ -3305,13 +3331,13 @@ CLOSE curs1; The portal name used for a cursor can be specified by the programmer or automatically generated. To specify a portal name, - simply assign a string to the refcursor variable before - opening it. The string value of the refcursor variable - will be used by OPEN as the name of the underlying portal. - However, if the refcursor variable is null, - OPEN automatically generates a name that does not + simply assign a string to the refcursor variable before + opening it. The string value of the refcursor variable + will be used by OPEN as the name of the underlying portal. + However, if the refcursor variable is null, + OPEN automatically generates a name that does not conflict with any existing portal, and assigns it to the - refcursor variable. + refcursor variable. @@ -3405,7 +3431,7 @@ COMMIT; Looping Through a Cursor's Result - There is a variant of the FOR statement that allows + There is a variant of the FOR statement that allows iterating through the rows returned by a cursor. The syntax is: @@ -3416,18 +3442,18 @@ END LOOP label ; The cursor variable must have been bound to some query when it was - declared, and it cannot be open already. The - FOR statement automatically opens the cursor, and it closes + declared, and it cannot be open already. The + FOR statement automatically opens the cursor, and it closes the cursor again when the loop exits. A list of actual argument value expressions must appear if and only if the cursor was declared to take arguments. These values will be substituted in the query, in just - the same way as during an OPEN (see ). + the same way as during an OPEN (see ). The variable recordvar is automatically - defined as type record and exists only inside the loop (any + defined as type record and exists only inside the loop (any existing definition of the variable name is ignored within the loop). Each row returned by the cursor is successively assigned to this record variable and the loop body is executed. @@ -3436,6 +3462,95 @@ END LOOP label ; + + Transaction Management + + + In procedures invoked by the CALL command + as well as in anonymous code blocks (DO command), + it is possible to end transactions using the + commands COMMIT and ROLLBACK. A new + transaction is started automatically after a transaction is ended using + these commands, so there is no separate START + TRANSACTION command. (Note that BEGIN and + END have different meanings in PL/pgSQL.) + + + + Here is a simple example: + +CREATE PROCEDURE transaction_test1() +LANGUAGE plpgsql +AS $$ +BEGIN + FOR i IN 0..9 LOOP + INSERT INTO test1 (a) VALUES (i); + IF i % 2 = 0 THEN + COMMIT; + ELSE + ROLLBACK; + END IF; + END LOOP; +END +$$; + +CALL transaction_test1(); + + + + + Transaction control is only possible in CALL or + DO invocations from the top level or nested + CALL or DO invocations without any + other intervening command. For example, if the call stack is + CALL proc1()CALL proc2() + → CALL proc3(), then the second and third + procedures can perform transaction control actions. But if the call stack + is CALL proc1()SELECT + func2()CALL proc3(), then the last + procedure cannot do transaction control, because of the + SELECT in between. + + + + Special considerations apply to cursor loops. Consider this example: + +CREATE PROCEDURE transaction_test2() +LANGUAGE plpgsql +AS $$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM test2 ORDER BY x LOOP + INSERT INTO test1 (a) VALUES (r.x); + COMMIT; + END LOOP; +END; +$$; + +CALL transaction_test2(); + + Normally, cursors are automatically closed at transaction commit. + However, a cursor created as part of a loop like this is automatically + converted to a holdable cursor by the first COMMIT or + ROLLBACK. That means that the cursor is fully + evaluated at the first COMMIT or + ROLLBACK rather than row by row. The cursor is still + removed automatically after the loop, so this is mostly invisible to the + user. + + + + Transaction commands are not allowed in cursor loops driven by commands + that are not read-only (for example UPDATE + ... RETURNING). + + + + A transaction cannot be ended inside a block with exception handlers. + + + Errors and Messages @@ -3458,8 +3573,8 @@ END LOOP label ; RAISE level 'format' , expression , ... USING option = expression , ... ; -RAISE level condition_name USING option = expression , ... ; -RAISE level SQLSTATE 'sqlstate' USING option = expression , ... ; +RAISE level condition_name USING option = expression , ... ; +RAISE level SQLSTATE 'sqlstate' USING option = expression , ... ; RAISE level USING option = expression , ... ; RAISE ; @@ -3475,9 +3590,9 @@ RAISE ; priority levels. Whether messages of a particular priority are reported to the client, written to the server log, or both is controlled by the - and - configuration - variables. See for more + and + configuration + variables. See for more information. @@ -3491,13 +3606,13 @@ RAISE ; Inside the format string, % is replaced by the string representation of the next optional argument's value. Write %% to emit a literal %. - The number of arguments must match the number of % + The number of arguments must match the number of % placeholders in the format string, or an error is raised during the compilation of the function. - In this example, the value of v_job_id will replace the + In this example, the value of v_job_id will replace the % in the string: RAISE NOTICE 'Calling cs_create_job(%)', v_job_id; @@ -3506,7 +3621,7 @@ RAISE NOTICE 'Calling cs_create_job(%)', v_job_id; You can attach additional information to the error report by writing - USING followed by USING followed by option = expression items. Each expression can be any @@ -3518,8 +3633,8 @@ RAISE NOTICE 'Calling cs_create_job(%)', v_job_id; MESSAGE Sets the error message text. This option can't be used in the - form of RAISE that includes a format string - before USING. + form of RAISE that includes a format string + before USING. @@ -3541,7 +3656,7 @@ RAISE NOTICE 'Calling cs_create_job(%)', v_job_id; ERRCODE Specifies the error code (SQLSTATE) to report, either by condition - name, as shown in , or directly as a + name, as shown in , or directly as a five-character SQLSTATE code. @@ -3577,13 +3692,13 @@ RAISE 'Duplicate user ID: %', user_id USING ERRCODE = '23505'; - There is a second RAISE syntax in which the main argument + There is a second RAISE syntax in which the main argument is the condition name or SQLSTATE to be reported, for example: RAISE division_by_zero; RAISE SQLSTATE '22012'; - In this syntax, USING can be used to supply a custom + In this syntax, USING can be used to supply a custom error message, detail, or hint. Another way to do the earlier example is @@ -3592,25 +3707,25 @@ RAISE unique_violation USING MESSAGE = 'Duplicate user ID: ' || user_id; - Still another variant is to write RAISE USING or RAISE - level USING and put - everything else into the USING list. + Still another variant is to write RAISE USING or RAISE + level USING and put + everything else into the USING list. - The last variant of RAISE has no parameters at all. - This form can only be used inside a BEGIN block's - EXCEPTION clause; + The last variant of RAISE has no parameters at all. + This form can only be used inside a BEGIN block's + EXCEPTION clause; it causes the error currently being handled to be re-thrown. - Before PostgreSQL 9.1, RAISE without + Before PostgreSQL 9.1, RAISE without parameters was interpreted as re-throwing the error from the block - containing the active exception handler. Thus an EXCEPTION + containing the active exception handler. Thus an EXCEPTION clause nested within that handler could not catch it, even if the - RAISE was within the nested EXCEPTION clause's + RAISE was within the nested EXCEPTION clause's block. This was deemed surprising as well as being incompatible with Oracle's PL/SQL. @@ -3619,7 +3734,7 @@ RAISE unique_violation USING MESSAGE = 'Duplicate user ID: ' || user_id; If no condition name nor SQLSTATE is specified in a RAISE EXCEPTION command, the default is to use - RAISE_EXCEPTION (P0001). If no message + RAISE_EXCEPTION (P0001). If no message text is specified, the default is to use the condition name or SQLSTATE as message text. @@ -3629,7 +3744,7 @@ RAISE unique_violation USING MESSAGE = 'Duplicate user ID: ' || user_id; When specifying an error code by SQLSTATE code, you are not limited to the predefined error codes, but can select any error code consisting of five digits and/or upper-case ASCII - letters, other than 00000. It is recommended that + letters, other than 00000. It is recommended that you avoid throwing error codes that end in three zeroes, because these are category codes and can only be trapped by trapping the whole category. @@ -3652,7 +3767,7 @@ RAISE unique_violation USING MESSAGE = 'Duplicate user ID: ' || user_id; - plpgsql.check_asserts configuration parameter + plpgsql.check_asserts configuration parameter @@ -3667,7 +3782,7 @@ ASSERT condition , condition is a Boolean expression that is expected to always evaluate to true; if it does, the ASSERT statement does nothing further. If the - result is false or null, then an ASSERT_FAILURE exception + result is false or null, then an ASSERT_FAILURE exception is raised. (If an error occurs while evaluating the condition, it is reported as a normal error.) @@ -3676,7 +3791,7 @@ ASSERT condition , If the optional message is provided, it is an expression whose result (if not null) replaces the - default error message text assertion failed, should + default error message text assertion failed, should the condition fail. The message expression is not evaluated in the normal case where the assertion succeeds. @@ -3684,15 +3799,15 @@ ASSERT condition , Testing of assertions can be enabled or disabled via the configuration - parameter plpgsql.check_asserts, which takes a Boolean - value; the default is on. If this parameter - is off then ASSERT statements do nothing. + parameter plpgsql.check_asserts, which takes a Boolean + value; the default is on. If this parameter + is off then ASSERT statements do nothing. Note that ASSERT is meant for detecting program bugs, not for reporting ordinary error conditions. Use - the RAISE statement, described above, for that. + the RAISE statement, described above, for that. @@ -3700,7 +3815,7 @@ ASSERT condition , - Trigger Procedures + Trigger Functions trigger @@ -3709,12 +3824,12 @@ ASSERT condition , PL/pgSQL can be used to define trigger - procedures on data changes or database events. - A trigger procedure is created with the CREATE FUNCTION + functions on data changes or database events. + A trigger function is created with the CREATE FUNCTION command, declaring it as a function with no arguments and a return type of - trigger (for data change triggers) or - event_trigger (for database event triggers). - Special local variables named PG_something are + trigger (for data change triggers) or + event_trigger (for database event triggers). + Special local variables named TG_something are automatically defined to describe the condition that triggered the call. @@ -3722,11 +3837,11 @@ ASSERT condition , Triggers on Data Changes - A data change trigger is declared as a - function with no arguments and a return type of trigger. + A data change trigger is declared as a + function with no arguments and a return type of trigger. Note that the function must be declared with no arguments even if it - expects to receive some arguments specified in CREATE TRIGGER - — such arguments are passed via TG_ARGV, as described + expects to receive some arguments specified in CREATE TRIGGER + — such arguments are passed via TG_ARGV, as described below. @@ -3741,7 +3856,7 @@ ASSERT condition , Data type RECORD; variable holding the new - database row for INSERT/UPDATE operations in row-level + database row for INSERT/UPDATE operations in row-level triggers. This variable is unassigned in statement-level triggers and for DELETE operations. @@ -3753,7 +3868,7 @@ ASSERT condition , Data type RECORD; variable holding the old - database row for UPDATE/DELETE operations in row-level + database row for UPDATE/DELETE operations in row-level triggers. This variable is unassigned in statement-level triggers and for INSERT operations. @@ -3798,7 +3913,7 @@ ASSERT condition , Data type text; a string of INSERT, UPDATE, - DELETE, or TRUNCATE + DELETE, or TRUNCATE telling for which operation the trigger was fired. @@ -3820,7 +3935,7 @@ ASSERT condition , Data type name; the name of the table that caused the trigger invocation. This is now deprecated, and could disappear in a future - release. Use TG_TABLE_NAME instead. + release. Use TG_TABLE_NAME instead. @@ -3850,7 +3965,7 @@ ASSERT condition , Data type integer; the number of arguments given to the trigger - procedure in the CREATE TRIGGER statement. + function in the CREATE TRIGGER statement. @@ -3862,7 +3977,7 @@ ASSERT condition , text; the arguments from the CREATE TRIGGER statement. The index counts from 0. Invalid - indexes (less than 0 or greater than or equal to tg_nargs) + indexes (less than 0 or greater than or equal to tg_nargs) result in a null value. @@ -3877,20 +3992,20 @@ ASSERT condition , - Row-level triggers fired BEFORE can return null to signal the + Row-level triggers fired BEFORE can return null to signal the trigger manager to skip the rest of the operation for this row (i.e., subsequent triggers are not fired, and the - INSERT/UPDATE/DELETE does not occur + INSERT/UPDATE/DELETE does not occur for this row). If a nonnull value is returned then the operation proceeds with that row value. Returning a row value different from the original value - of NEW alters the row that will be inserted or + of NEW alters the row that will be inserted or updated. Thus, if the trigger function wants the triggering action to succeed normally without altering the row value, NEW (or a value equal thereto) has to be returned. To alter the row to be stored, it is possible to - replace single values directly in NEW and return the - modified NEW, or to build a complete new record/row to + replace single values directly in NEW and return the + modified NEW, or to build a complete new record/row to return. In the case of a before-trigger on DELETE, the returned value has no direct effect, but it has to be nonnull to allow the trigger action to @@ -3901,39 +4016,39 @@ ASSERT condition , - INSTEAD OF triggers (which are always row-level triggers, + INSTEAD OF triggers (which are always row-level triggers, and may only be used on views) can return null to signal that they did not perform any updates, and that the rest of the operation for this row should be skipped (i.e., subsequent triggers are not fired, and the row is not counted in the rows-affected status for the surrounding - INSERT/UPDATE/DELETE). + INSERT/UPDATE/DELETE). Otherwise a nonnull value should be returned, to signal that the trigger performed the requested operation. For - INSERT and UPDATE operations, the return value - should be NEW, which the trigger function may modify to - support INSERT RETURNING and UPDATE RETURNING + INSERT and UPDATE operations, the return value + should be NEW, which the trigger function may modify to + support INSERT RETURNING and UPDATE RETURNING (this will also affect the row value passed to any subsequent triggers, - or passed to a special EXCLUDED alias reference within - an INSERT statement with an ON CONFLICT DO - UPDATE clause). For DELETE operations, the return - value should be OLD. + or passed to a special EXCLUDED alias reference within + an INSERT statement with an ON CONFLICT DO + UPDATE clause). For DELETE operations, the return + value should be OLD. The return value of a row-level trigger fired AFTER or a statement-level trigger - fired BEFORE or AFTER is + fired BEFORE or AFTER is always ignored; it might as well be null. However, any of these types of triggers might still abort the entire operation by raising an error. - shows an example of a - trigger procedure in PL/pgSQL. + shows an example of a + trigger function in PL/pgSQL. - A <application>PL/pgSQL</application> Trigger Procedure + A <application>PL/pgSQL</application> Trigger Function This example trigger ensures that any time a row is inserted or updated @@ -3973,7 +4088,7 @@ CREATE FUNCTION emp_stamp() RETURNS trigger AS $emp_stamp$ $emp_stamp$ LANGUAGE plpgsql; CREATE TRIGGER emp_stamp BEFORE INSERT OR UPDATE ON emp - FOR EACH ROW EXECUTE PROCEDURE emp_stamp(); + FOR EACH ROW EXECUTE FUNCTION emp_stamp(); @@ -3981,12 +4096,12 @@ CREATE TRIGGER emp_stamp BEFORE INSERT OR UPDATE ON emp Another way to log changes to a table involves creating a new table that holds a row for each insert, update, or delete that occurs. This approach can be thought of as auditing changes to a table. - shows an example of an - audit trigger procedure in PL/pgSQL. + shows an example of an + audit trigger function in PL/pgSQL. - A <application>PL/pgSQL</application> Trigger Procedure For Auditing + A <application>PL/pgSQL</application> Trigger Function For Auditing This example trigger ensures that any insert, update or delete of a row @@ -4013,7 +4128,7 @@ CREATE OR REPLACE FUNCTION process_emp_audit() RETURNS TRIGGER AS $emp_audit$ BEGIN -- -- Create a row in emp_audit to reflect the operation performed on emp, - -- make use of the special variable TG_OP to work out the operation. + -- making use of the special variable TG_OP to work out the operation. -- IF (TG_OP = 'DELETE') THEN INSERT INTO emp_audit SELECT 'D', now(), user, OLD.*; @@ -4028,7 +4143,7 @@ $emp_audit$ LANGUAGE plpgsql; CREATE TRIGGER emp_audit AFTER INSERT OR UPDATE OR DELETE ON emp - FOR EACH ROW EXECUTE PROCEDURE process_emp_audit(); + FOR EACH ROW EXECUTE FUNCTION process_emp_audit(); @@ -4038,12 +4153,12 @@ AFTER INSERT OR UPDATE OR DELETE ON emp approach still records the full audit trail of changes to the table, but also presents a simplified view of the audit trail, showing just the last modified timestamp derived from the audit trail for each entry. - shows an example + shows an example of an audit trigger on a view in PL/pgSQL. - A <application>PL/pgSQL</application> View Trigger Procedure For Auditing + A <application>PL/pgSQL</application> View Trigger Function For Auditing This example uses a trigger on the view to make it updatable, and @@ -4107,7 +4222,7 @@ $$ LANGUAGE plpgsql; CREATE TRIGGER emp_audit INSTEAD OF INSERT OR UPDATE OR DELETE ON emp_view - FOR EACH ROW EXECUTE PROCEDURE update_emp_view(); + FOR EACH ROW EXECUTE FUNCTION update_emp_view(); @@ -4118,14 +4233,14 @@ INSTEAD OF INSERT OR UPDATE OR DELETE ON emp_view times. This technique is commonly used in Data Warehousing, where the tables of measured or observed data (called fact tables) might be extremely large. - shows an example of a - trigger procedure in PL/pgSQL that maintains + shows an example of a + trigger function in PL/pgSQL that maintains a summary table for a fact table in a data warehouse. - A <application>PL/pgSQL</application> Trigger Procedure For Maintaining A Summary Table + A <application>PL/pgSQL</application> Trigger Function For Maintaining A Summary Table The schema detailed here is partly based on the Grocery Store @@ -4252,7 +4367,7 @@ $maint_sales_summary_bytime$ LANGUAGE plpgsql; CREATE TRIGGER maint_sales_summary_bytime AFTER INSERT OR UPDATE OR DELETE ON sales_fact - FOR EACH ROW EXECUTE PROCEDURE maint_sales_summary_bytime(); + FOR EACH ROW EXECUTE FUNCTION maint_sales_summary_bytime(); INSERT INTO sales_fact VALUES(1,1,1,10,3,15); INSERT INTO sales_fact VALUES(1,2,1,20,5,35); @@ -4265,6 +4380,82 @@ UPDATE sales_fact SET units_sold = units_sold * 2; SELECT * FROM sales_summary_bytime; + + + AFTER triggers can also make use of transition + tables to inspect the entire set of rows changed by the triggering + statement. The CREATE TRIGGER command assigns names to one + or both transition tables, and then the function can refer to those names + as though they were read-only temporary tables. + shows an example. + + + + Auditing with Transition Tables + + + This example produces the same results as + , but instead of using a + trigger that fires for every row, it uses a trigger that fires once + per statement, after collecting the relevant information in a transition + table. This can be significantly faster than the row-trigger approach + when the invoking statement has modified many rows. Notice that we must + make a separate trigger declaration for each kind of event, since the + REFERENCING clauses must be different for each case. But + this does not stop us from using a single trigger function if we choose. + (In practice, it might be better to use three separate functions and + avoid the run-time tests on TG_OP.) + + + +CREATE TABLE emp ( + empname text NOT NULL, + salary integer +); + +CREATE TABLE emp_audit( + operation char(1) NOT NULL, + stamp timestamp NOT NULL, + userid text NOT NULL, + empname text NOT NULL, + salary integer +); + +CREATE OR REPLACE FUNCTION process_emp_audit() RETURNS TRIGGER AS $emp_audit$ + BEGIN + -- + -- Create rows in emp_audit to reflect the operations performed on emp, + -- making use of the special variable TG_OP to work out the operation. + -- + IF (TG_OP = 'DELETE') THEN + INSERT INTO emp_audit + SELECT 'D', now(), user, o.* FROM old_table o; + ELSIF (TG_OP = 'UPDATE') THEN + INSERT INTO emp_audit + SELECT 'U', now(), user, n.* FROM new_table n; + ELSIF (TG_OP = 'INSERT') THEN + INSERT INTO emp_audit + SELECT 'I', now(), user, n.* FROM new_table n; + END IF; + RETURN NULL; -- result is ignored since this is an AFTER trigger + END; +$emp_audit$ LANGUAGE plpgsql; + +CREATE TRIGGER emp_audit_ins + AFTER INSERT ON emp + REFERENCING NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE FUNCTION process_emp_audit(); +CREATE TRIGGER emp_audit_upd + AFTER UPDATE ON emp + REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE FUNCTION process_emp_audit(); +CREATE TRIGGER emp_audit_del + AFTER DELETE ON emp + REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT EXECUTE FUNCTION process_emp_audit(); + + + @@ -4272,10 +4463,10 @@ SELECT * FROM sales_summary_bytime; PL/pgSQL can be used to define - event triggers. - PostgreSQL requires that a procedure that + event triggers. + PostgreSQL requires that a function that is to be called as an event trigger must be declared as a function with - no arguments and a return type of event_trigger. + no arguments and a return type of event_trigger. @@ -4307,12 +4498,12 @@ SELECT * FROM sales_summary_bytime; - shows an example of an - event trigger procedure in PL/pgSQL. + shows an example of an + event trigger function in PL/pgSQL. - A <application>PL/pgSQL</application> Event Trigger Procedure + A <application>PL/pgSQL</application> Event Trigger Function This example trigger simply raises a NOTICE message @@ -4326,7 +4517,7 @@ BEGIN END; $$ LANGUAGE plpgsql; -CREATE EVENT TRIGGER snitch ON ddl_command_start EXECUTE PROCEDURE snitch(); +CREATE EVENT TRIGGER snitch ON ddl_command_start EXECUTE FUNCTION snitch(); @@ -4334,29 +4525,29 @@ CREATE EVENT TRIGGER snitch ON ddl_command_start EXECUTE PROCEDURE snitch(); - <application>PL/pgSQL</> Under the Hood + <application>PL/pgSQL</application> Under the Hood This section discusses some implementation details that are - frequently important for PL/pgSQL users to know. + frequently important for PL/pgSQL users to know. Variable Substitution - SQL statements and expressions within a PL/pgSQL function + SQL statements and expressions within a PL/pgSQL function can refer to variables and parameters of the function. Behind the scenes, - PL/pgSQL substitutes query parameters for such references. + PL/pgSQL substitutes query parameters for such references. Parameters will only be substituted in places where a parameter or column reference is syntactically allowed. As an extreme case, consider this example of poor programming style: INSERT INTO foo (foo) VALUES (foo); - The first occurrence of foo must syntactically be a table + The first occurrence of foo must syntactically be a table name, so it will not be substituted, even if the function has a variable - named foo. The second occurrence must be the name of a + named foo. The second occurrence must be the name of a column of the table, so it will not be substituted either. Only the third occurrence is a candidate to be a reference to the function's variable. @@ -4377,18 +4568,18 @@ INSERT INTO foo (foo) VALUES (foo); INSERT INTO dest (col) SELECT foo + bar FROM src; - Here, dest and src must be table names, and - col must be a column of dest, but foo - and bar might reasonably be either variables of the function - or columns of src. + Here, dest and src must be table names, and + col must be a column of dest, but foo + and bar might reasonably be either variables of the function + or columns of src. - By default, PL/pgSQL will report an error if a name + By default, PL/pgSQL will report an error if a name in a SQL statement could refer to either a variable or a table column. You can fix such a problem by renaming the variable or column, or by qualifying the ambiguous reference, or by telling - PL/pgSQL which interpretation to prefer. + PL/pgSQL which interpretation to prefer. @@ -4397,16 +4588,16 @@ INSERT INTO dest (col) SELECT foo + bar FROM src; different naming convention for PL/pgSQL variables than you use for column names. For example, if you consistently name function variables - v_something while none of your - column names start with v_, no conflicts will occur. + v_something while none of your + column names start with v_, no conflicts will occur. Alternatively you can qualify ambiguous references to make them clear. - In the above example, src.foo would be an unambiguous reference + In the above example, src.foo would be an unambiguous reference to the table column. To create an unambiguous reference to a variable, declare it in a labeled block and use the block's label - (see ). For example, + (see ). For example, <<block>> DECLARE @@ -4415,37 +4606,37 @@ BEGIN foo := ...; INSERT INTO dest (col) SELECT block.foo + bar FROM src; - Here block.foo means the variable even if there is a column - foo in src. Function parameters, as well as - special variables such as FOUND, can be qualified by the + Here block.foo means the variable even if there is a column + foo in src. Function parameters, as well as + special variables such as FOUND, can be qualified by the function's name, because they are implicitly declared in an outer block labeled with the function's name. Sometimes it is impractical to fix all the ambiguous references in a - large body of PL/pgSQL code. In such cases you can - specify that PL/pgSQL should resolve ambiguous references - as the variable (which is compatible with PL/pgSQL's + large body of PL/pgSQL code. In such cases you can + specify that PL/pgSQL should resolve ambiguous references + as the variable (which is compatible with PL/pgSQL's behavior before PostgreSQL 9.0), or as the table column (which is compatible with some other systems such as Oracle). - plpgsql.variable_conflict configuration parameter + plpgsql.variable_conflict configuration parameter To change this behavior on a system-wide basis, set the configuration - parameter plpgsql.variable_conflict to one of - error, use_variable, or - use_column (where error is the factory default). + parameter plpgsql.variable_conflict to one of + error, use_variable, or + use_column (where error is the factory default). This parameter affects subsequent compilations - of statements in PL/pgSQL functions, but not statements + of statements in PL/pgSQL functions, but not statements already compiled in the current session. Because changing this setting - can cause unexpected changes in the behavior of PL/pgSQL + can cause unexpected changes in the behavior of PL/pgSQL functions, it can only be changed by a superuser. @@ -4459,7 +4650,7 @@ BEGIN #variable_conflict use_column These commands affect only the function they are written in, and override - the setting of plpgsql.variable_conflict. An example is + the setting of plpgsql.variable_conflict. An example is CREATE FUNCTION stamp_user(id int, comment text) RETURNS void AS $$ #variable_conflict use_variable @@ -4471,15 +4662,15 @@ CREATE FUNCTION stamp_user(id int, comment text) RETURNS void AS $$ END; $$ LANGUAGE plpgsql; - In the UPDATE command, curtime, comment, - and id will refer to the function's variable and parameters - whether or not users has columns of those names. Notice - that we had to qualify the reference to users.id in the - WHERE clause to make it refer to the table column. - But we did not have to qualify the reference to comment - as a target in the UPDATE list, because syntactically - that must be a column of users. We could write the same - function without depending on the variable_conflict setting + In the UPDATE command, curtime, comment, + and id will refer to the function's variable and parameters + whether or not users has columns of those names. Notice + that we had to qualify the reference to users.id in the + WHERE clause to make it refer to the table column. + But we did not have to qualify the reference to comment + as a target in the UPDATE list, because syntactically + that must be a column of users. We could write the same + function without depending on the variable_conflict setting in this way: CREATE FUNCTION stamp_user(id int, comment text) RETURNS void AS $$ @@ -4496,19 +4687,19 @@ $$ LANGUAGE plpgsql; Variable substitution does not happen in the command string given - to EXECUTE or one of its variants. If you need to + to EXECUTE or one of its variants. If you need to insert a varying value into such a command, do so as part of - constructing the string value, or use USING, as illustrated in - . + constructing the string value, or use USING, as illustrated in + . - Variable substitution currently works only in SELECT, - INSERT, UPDATE, and DELETE commands, + Variable substitution currently works only in SELECT, + INSERT, UPDATE, and DELETE commands, because the main SQL engine allows query parameters only in these commands. To use a non-constant name or value in other statement types (generically called utility statements), you must construct - the utility statement as a string and EXECUTE it. + the utility statement as a string and EXECUTE it. @@ -4517,22 +4708,22 @@ $$ LANGUAGE plpgsql; Plan Caching - The PL/pgSQL interpreter parses the function's source + The PL/pgSQL interpreter parses the function's source text and produces an internal binary instruction tree the first time the function is called (within each session). The instruction tree fully translates the - PL/pgSQL statement structure, but individual + PL/pgSQL statement structure, but individual SQL expressions and SQL commands used in the function are not translated immediately. - preparing a query - in PL/pgSQL + preparing a query + in PL/pgSQL As each expression and SQL command is first - executed in the function, the PL/pgSQL interpreter + executed in the function, the PL/pgSQL interpreter parses and analyzes the command to create a prepared statement, using the SPI manager's SPI_prepare function. @@ -4548,19 +4739,19 @@ $$ LANGUAGE plpgsql; - PL/pgSQL (or more precisely, the SPI manager) can + PL/pgSQL (or more precisely, the SPI manager) can furthermore attempt to cache the execution plan associated with any particular prepared statement. If a cached plan is not used, then a fresh execution plan is generated on each visit to the statement, - and the current parameter values (that is, PL/pgSQL + and the current parameter values (that is, PL/pgSQL variable values) can be used to optimize the selected plan. If the statement has no parameters, or is executed many times, the SPI manager - will consider creating a generic plan that is not dependent + will consider creating a generic plan that is not dependent on specific parameter values, and caching that for re-use. Typically this will happen only if the execution plan is not very sensitive to - the values of the PL/pgSQL variables referenced in it. + the values of the PL/pgSQL variables referenced in it. If it is, generating a plan each time is a net win. See for more information about the behavior of + linkend="sql-prepare"/> for more information about the behavior of prepared statements. @@ -4594,7 +4785,7 @@ $$ LANGUAGE plpgsql; for each trigger function and table combination, not just for each function. This alleviates some of the problems with varying data types; for instance, a trigger function will be able to work - successfully with a column named key even if it happens + successfully with a column named key even if it happens to have different types in different tables. @@ -4644,8 +4835,8 @@ $$ LANGUAGE plpgsql; INSERT is analyzed, and then used in all invocations of logfunc1 during the lifetime of the session. Needless to say, this isn't what the programmer - wanted. A better idea is to use the now() or - current_timestamp function. + wanted. A better idea is to use the now() or + current_timestamp function. @@ -4661,7 +4852,7 @@ $$ LANGUAGE plpgsql; functions for the conversion. So, the computed time stamp is updated on each execution as the programmer expects. Even though this happens to work as expected, it's not terribly efficient, so - use of the now() function would still be a better idea. + use of the now() function would still be a better idea. @@ -4673,12 +4864,12 @@ $$ LANGUAGE plpgsql; One good way to develop in - PL/pgSQL is to use the text editor of your + PL/pgSQL is to use the text editor of your choice to create your functions, and in another window, use psql to load and test those functions. If you are doing it this way, it is a good idea to write the function using CREATE OR - REPLACE FUNCTION. That way you can just reload the file to update + REPLACE FUNCTION. That way you can just reload the file to update the function definition. For example: CREATE OR REPLACE FUNCTION testfunc(integer) RETURNS integer AS $$ @@ -4697,10 +4888,10 @@ $$ LANGUAGE plpgsql; - Another good way to develop in PL/pgSQL is with a + Another good way to develop in PL/pgSQL is with a GUI database access tool that facilitates development in a procedural language. One example of such a tool is - pgAdmin, although others exist. These tools often + pgAdmin, although others exist. These tools often provide convenient features such as escaping single quotes and making it easier to recreate and debug functions. @@ -4709,7 +4900,7 @@ $$ LANGUAGE plpgsql; Handling of Quotation Marks - The code of a PL/pgSQL function is specified in + The code of a PL/pgSQL function is specified in CREATE FUNCTION as a string literal. If you write the string literal in the ordinary way with surrounding single quotes, then any single quotes inside the function body @@ -4719,8 +4910,8 @@ $$ LANGUAGE plpgsql; the code can become downright incomprehensible, because you can easily find yourself needing half a dozen or more adjacent quote marks. It's recommended that you instead write the function body as a - dollar-quoted string literal (see ). In the dollar-quoting + dollar-quoted string literal (see ). In the dollar-quoting approach, you never double any quote marks, but instead take care to choose a different dollar-quoting delimiter for each level of nesting you need. For example, you might write the CREATE @@ -4731,9 +4922,9 @@ CREATE OR REPLACE FUNCTION testfunc(integer) RETURNS integer AS $PROC$ $PROC$ LANGUAGE plpgsql; Within this, you might use quote marks for simple literal strings in - SQL commands and $$ to delimit fragments of SQL commands + SQL commands and $$ to delimit fragments of SQL commands that you are assembling as strings. If you need to quote text that - includes $$, you could use $Q$, and so on. + includes $$, you could use $Q$, and so on. @@ -4754,7 +4945,7 @@ CREATE FUNCTION foo() RETURNS integer AS ' ' LANGUAGE plpgsql; Anywhere within a single-quoted function body, quote marks - must appear in pairs. + must appear in pairs. @@ -4773,7 +4964,7 @@ SELECT * FROM users WHERE f_name=''foobar''; a_output := 'Blah'; SELECT * FROM users WHERE f_name='foobar'; - which is exactly what the PL/pgSQL parser would see + which is exactly what the PL/pgSQL parser would see in either case. @@ -4797,7 +4988,7 @@ a_output := a_output || '' AND name LIKE ''''foobar'''' AND xyz'' a_output := a_output || $$ AND name LIKE 'foobar' AND xyz$$ being careful that any dollar-quote delimiters around this are not - just $$. + just $$. @@ -4831,7 +5022,7 @@ a_output := a_output || $$ AND name LIKE 'foobar'$$ accounts for 8 quotation marks) and this is adjacent to the end of that string constant (2 more). You will probably only need that if you are writing a function that generates other functions, as in - . + . For example: a_output := a_output || '' if v_'' || @@ -4862,38 +5053,76 @@ a_output := a_output || $$ if v_$$ || referrer_keys.kind || $$ like '$$ - Additional Compile-time Checks + Additional Compile-time and Run-time Checks To aid the user in finding instances of simple but common problems before - they cause harm, PL/pgSQL provides additional - checks. When enabled, depending on the configuration, they - can be used to emit either a WARNING or an ERROR + they cause harm, PL/pgSQL provides additional + checks. When enabled, depending on the configuration, they + can be used to emit either a WARNING or an ERROR during the compilation of a function. A function which has received - a WARNING can be executed without producing further messages, + a WARNING can be executed without producing further messages, so you are advised to test in a separate development environment. - - These additional checks are enabled through the configuration variables - plpgsql.extra_warnings for warnings and - plpgsql.extra_errors for errors. Both can be set either to - a comma-separated list of checks, "none" or "all". - The default is "none". Currently the list of available checks - includes only one: - - - shadowed_variables - - - Checks if a declaration shadows a previously defined variable. - - - - + + Setting plpgsql.extra_warnings, or + plpgsql.extra_errors, as appropriate, to "all" + is encouraged in development and/or testing environments. + + + + These additional checks are enabled through the configuration variables + plpgsql.extra_warnings for warnings and + plpgsql.extra_errors for errors. Both can be set either to + a comma-separated list of checks, "none" or + "all". The default is "none". Currently + the list of available checks includes: + + + shadowed_variables + + + Checks if a declaration shadows a previously defined variable. + + + + + + strict_multi_assignment + + + Some PL/PgSQL commands allow assigning + values to more than one variable at a time, such as + SELECT INTO. Typically, the number of target + variables and the number of source variables should match, though + PL/PgSQL will use NULL + for missing values and extra variables are ignored. Enabling this + check will cause PL/PgSQL to throw a + WARNING or ERROR whenever the + number of target variables and the number of source variables are + different. + + + + + + too_many_rows + + + Enabling this check will cause PL/PgSQL to + check if a given query returns more than one row when an + INTO clause is used. As an INTO + statement will only ever use one row, having a query return multiple + rows is generally either inefficient and/or nondeterministic and + therefore is likely an error. + + + + - The following example shows the effect of plpgsql.extra_warnings - set to shadowed_variables: + The following example shows the effect of plpgsql.extra_warnings + set to shadowed_variables: SET plpgsql.extra_warnings TO 'shadowed_variables'; @@ -4909,8 +5138,41 @@ LINE 3: f1 int; ^ CREATE FUNCTION - - + The below example shows the effects of setting + plpgsql.extra_warnings to + strict_multi_assignment: + +SET plpgsql.extra_warnings TO 'strict_multi_assignment'; + +CREATE OR REPLACE FUNCTION public.foo() + RETURNS void + LANGUAGE plpgsql +AS $$ +DECLARE + x int; + y int; +BEGIN + SELECT 1 INTO x, y; + SELECT 1, 2 INTO x, y; + SELECT 1, 2, 3 INTO x, y; +END; +$$; + +SELECT foo(); +WARNING: number of source and target fields in assignment do not match +DETAIL: strict_multi_assignment check of extra_warnings is active. +HINT: Make sure the query returns the exact list of columns. +WARNING: number of source and target fields in assignment do not match +DETAIL: strict_multi_assignment check of extra_warnings is active. +HINT: Make sure the query returns the exact list of columns. + + foo +----- + +(1 row) + + + @@ -4930,10 +5192,10 @@ CREATE FUNCTION This section explains differences between - PostgreSQL's PL/pgSQL + PostgreSQL's PL/pgSQL language and Oracle's PL/SQL language, to help developers who port applications from - Oracle to PostgreSQL. + Oracle to PostgreSQL. @@ -4941,7 +5203,7 @@ CREATE FUNCTION aspects. It is a block-structured, imperative language, and all variables have to be declared. Assignments, loops, conditionals are similar. The main differences you should keep in mind when - porting from PL/SQL to + porting from PL/SQL to PL/pgSQL are: @@ -4949,34 +5211,34 @@ CREATE FUNCTION If a name used in a SQL command could be either a column name of a table or a reference to a variable of the function, - PL/SQL treats it as a column name. This corresponds - to PL/pgSQL's - plpgsql.variable_conflict = use_column + PL/SQL treats it as a column name. This corresponds + to PL/pgSQL's + plpgsql.variable_conflict = use_column behavior, which is not the default, - as explained in . + as explained in . It's often best to avoid such ambiguities in the first place, but if you have to port a large amount of code that depends on - this behavior, setting variable_conflict may be the + this behavior, setting variable_conflict may be the best solution. - In PostgreSQL the function body must be written as + In PostgreSQL the function body must be written as a string literal. Therefore you need to use dollar quoting or escape single quotes in the function body. (See .) + linkend="plpgsql-quote-tips"/>.) Data type names often need translation. For example, in Oracle string - values are commonly declared as being of type varchar2, which + values are commonly declared as being of type varchar2, which is a non-SQL-standard type. In PostgreSQL, - use type varchar or text instead. Similarly, replace - type number with numeric, or use some other numeric + use type varchar or text instead. Similarly, replace + type number with numeric, or use some other numeric data type if there's a more appropriate one. @@ -4998,21 +5260,21 @@ CREATE FUNCTION - Integer FOR loops with REVERSE work - differently: PL/SQL counts down from the second - number to the first, while PL/pgSQL counts down + Integer FOR loops with REVERSE work + differently: PL/SQL counts down from the second + number to the first, while PL/pgSQL counts down from the first number to the second, requiring the loop bounds to be swapped when porting. This incompatibility is unfortunate but is unlikely to be changed. (See .) + linkend="plpgsql-integer-for"/>.) - FOR loops over queries (other than cursors) also work + FOR loops over queries (other than cursors) also work differently: the target variable(s) must have been declared, - whereas PL/SQL always declares them implicitly. + whereas PL/SQL always declares them implicitly. An advantage of this is that the variable values are still accessible after the loop exits. @@ -5032,15 +5294,15 @@ CREATE FUNCTION Porting Examples - shows how to port a simple - function from PL/SQL to PL/pgSQL. + shows how to port a simple + function from PL/SQL to PL/pgSQL. - Porting a Simple Function from <application>PL/SQL</> to <application>PL/pgSQL</> + Porting a Simple Function from <application>PL/SQL</application> to <application>PL/pgSQL</application> - Here is an Oracle PL/SQL function: + Here is an Oracle PL/SQL function: CREATE OR REPLACE FUNCTION cs_fmt_browser_version(v_name varchar2, v_version varchar2) @@ -5058,14 +5320,14 @@ show errors; Let's go through this function and see the differences compared to - PL/pgSQL: + PL/pgSQL: - The type name varchar2 has to be changed to varchar - or text. In the examples in this section, we'll - use varchar, but text is often a better choice if + The type name varchar2 has to be changed to varchar + or text. In the examples in this section, we'll + use varchar, but text is often a better choice if you do not need specific string length limits. @@ -5076,17 +5338,17 @@ show errors; prototype (not the function body) becomes RETURNS in PostgreSQL. - Also, IS becomes AS, and you need to - add a LANGUAGE clause because PL/pgSQL + Also, IS becomes AS, and you need to + add a LANGUAGE clause because PL/pgSQL is not the only possible function language. - In PostgreSQL, the function body is considered + In PostgreSQL, the function body is considered to be a string literal, so you need to use quote marks or dollar - quotes around it. This substitutes for the terminating / + quotes around it. This substitutes for the terminating / in the Oracle approach. @@ -5094,7 +5356,7 @@ show errors; The show errors command does not exist in - PostgreSQL, and is not needed since errors are + PostgreSQL, and is not needed since errors are reported automatically. @@ -5103,7 +5365,7 @@ show errors; This is how this function would look when ported to - PostgreSQL: + PostgreSQL: CREATE OR REPLACE FUNCTION cs_fmt_browser_version(v_name varchar, @@ -5121,13 +5383,13 @@ $$ LANGUAGE plpgsql; - shows how to port a + shows how to port a function that creates another function and how to handle the ensuing quoting problems. - Porting a Function that Creates Another Function from <application>PL/SQL</> to <application>PL/pgSQL</> + Porting a Function that Creates Another Function from <application>PL/SQL</application> to <application>PL/pgSQL</application> The following procedure grabs rows from a @@ -5166,9 +5428,9 @@ show errors; - Here is how this function would end up in PostgreSQL: + Here is how this function would end up in PostgreSQL: -CREATE OR REPLACE FUNCTION cs_update_referrer_type_proc() RETURNS void AS $func$ +CREATE OR REPLACE PROCEDURE cs_update_referrer_type_proc() AS $func$ DECLARE referrer_keys CURSOR IS SELECT * FROM cs_referrer_keys @@ -5201,27 +5463,27 @@ END; $func$ LANGUAGE plpgsql; Notice how the body of the function is built separately and passed - through quote_literal to double any quote marks in it. This + through quote_literal to double any quote marks in it. This technique is needed because we cannot safely use dollar quoting for defining the new function: we do not know for sure what strings will - be interpolated from the referrer_key.key_string field. - (We are assuming here that referrer_key.kind can be - trusted to always be host, domain, or - url, but referrer_key.key_string might be + be interpolated from the referrer_key.key_string field. + (We are assuming here that referrer_key.kind can be + trusted to always be host, domain, or + url, but referrer_key.key_string might be anything, in particular it might contain dollar signs.) This function is actually an improvement on the Oracle original, because it will - not generate broken code when referrer_key.key_string or - referrer_key.referrer_type contain quote marks. + not generate broken code when referrer_key.key_string or + referrer_key.referrer_type contain quote marks. - shows how to port a function - with OUT parameters and string manipulation. - PostgreSQL does not have a built-in + shows how to port a function + with OUT parameters and string manipulation. + PostgreSQL does not have a built-in instr function, but you can create one using a combination of other - functions. In there is a + functions. In there is a PL/pgSQL implementation of instr that you can use to make your porting easier. @@ -5229,8 +5491,8 @@ $func$ LANGUAGE plpgsql; Porting a Procedure With String Manipulation and - <literal>OUT</> Parameters from <application>PL/SQL</> to - <application>PL/pgSQL</> + OUT Parameters from PL/SQL to + PL/pgSQL The following Oracle PL/SQL procedure is used @@ -5281,7 +5543,7 @@ show errors; - Here is a possible translation into PL/pgSQL: + Here is a possible translation into PL/pgSQL: CREATE OR REPLACE FUNCTION cs_parse_url( v_url IN VARCHAR, @@ -5330,12 +5592,12 @@ SELECT * FROM cs_parse_url('http://foobar.com/query.cgi?baz'); - shows how to port a procedure + shows how to port a procedure that uses numerous features that are specific to Oracle. - Porting a Procedure from <application>PL/SQL</> to <application>PL/pgSQL</> + Porting a Procedure from <application>PL/SQL</application> to <application>PL/pgSQL</application> The Oracle version: @@ -5343,14 +5605,13 @@ SELECT * FROM cs_parse_url('http://foobar.com/query.cgi?baz'); CREATE OR REPLACE PROCEDURE cs_create_job(v_job_id IN INTEGER) IS a_running_job_count INTEGER; - PRAGMA AUTONOMOUS_TRANSACTION; -- BEGIN - LOCK TABLE cs_jobs IN EXCLUSIVE MODE; -- + LOCK TABLE cs_jobs IN EXCLUSIVE MODE; SELECT count(*) INTO a_running_job_count FROM cs_jobs WHERE end_stamp IS NULL; IF a_running_job_count > 0 THEN - COMMIT; -- free lock + COMMIT; -- free lock raise_application_error(-20000, 'Unable to create a new job: a job is currently running.'); END IF; @@ -5371,44 +5632,10 @@ show errors - Procedures like this can easily be converted into PostgreSQL - functions returning void. This procedure in - particular is interesting because it can teach us some things: - - - - - There is no PRAGMA statement in PostgreSQL. - - - - - - If you do a LOCK TABLE in PL/pgSQL, - the lock will not be released until the calling transaction is - finished. - - - - - - You cannot issue COMMIT in a - PL/pgSQL function. The function is - running within some outer transaction and so COMMIT - would imply terminating the function's execution. However, in - this particular case it is not necessary anyway, because the lock - obtained by the LOCK TABLE will be released when - we raise an error. - - - - - - - This is how we could port this procedure to PL/pgSQL: + This is how we could port this procedure to PL/pgSQL: -CREATE OR REPLACE FUNCTION cs_create_job(v_job_id integer) RETURNS void AS $$ +CREATE OR REPLACE PROCEDURE cs_create_job(v_job_id integer) AS $$ DECLARE a_running_job_count integer; BEGIN @@ -5417,7 +5644,8 @@ BEGIN SELECT count(*) INTO a_running_job_count FROM cs_jobs WHERE end_stamp IS NULL; IF a_running_job_count > 0 THEN - RAISE EXCEPTION 'Unable to create a new job: a job is currently running'; -- + COMMIT; -- free lock + RAISE EXCEPTION 'Unable to create a new job: a job is currently running'; -- END IF; DELETE FROM cs_active_job; @@ -5426,9 +5654,10 @@ BEGIN BEGIN INSERT INTO cs_jobs (job_id, start_stamp) VALUES (v_job_id, now()); EXCEPTION - WHEN unique_violation THEN -- + WHEN unique_violation THEN -- -- don't worry if it already exists END; + COMMIT; END; $$ LANGUAGE plpgsql; @@ -5436,28 +5665,22 @@ $$ LANGUAGE plpgsql; - The syntax of RAISE is considerably different from - Oracle's statement, although the basic case RAISE + The syntax of RAISE is considerably different from + Oracle's statement, although the basic case RAISE exception_name works similarly. - The exception names supported by PL/pgSQL are + The exception names supported by PL/pgSQL are different from Oracle's. The set of built-in exception names - is much larger (see ). There + is much larger (see ). There is not currently a way to declare user-defined exception names, although you can throw user-chosen SQLSTATE values instead. - - The main functional difference between this procedure and the - Oracle equivalent is that the exclusive lock on the cs_jobs - table will be held until the calling transaction completes. Also, if - the caller later aborts (for example due to an error), the effects of - this procedure will be rolled back. @@ -5467,7 +5690,7 @@ $$ LANGUAGE plpgsql; This section explains a few other things to watch for when porting - Oracle PL/SQL functions to + Oracle PL/SQL functions to PostgreSQL. @@ -5475,9 +5698,9 @@ $$ LANGUAGE plpgsql; Implicit Rollback after Exceptions - In PL/pgSQL, when an exception is caught by an - EXCEPTION clause, all database changes since the block's - BEGIN are automatically rolled back. That is, the behavior + In PL/pgSQL, when an exception is caught by an + EXCEPTION clause, all database changes since the block's + BEGIN are automatically rolled back. That is, the behavior is equivalent to what you'd get in Oracle with: @@ -5495,10 +5718,10 @@ END; If you are translating an Oracle procedure that uses - SAVEPOINT and ROLLBACK TO in this style, - your task is easy: just omit the SAVEPOINT and - ROLLBACK TO. If you have a procedure that uses - SAVEPOINT and ROLLBACK TO in a different way + SAVEPOINT and ROLLBACK TO in this style, + your task is easy: just omit the SAVEPOINT and + ROLLBACK TO. If you have a procedure that uses + SAVEPOINT and ROLLBACK TO in a different way then some actual thought will be required. @@ -5507,12 +5730,12 @@ END; <command>EXECUTE</command> - The PL/pgSQL version of + The PL/pgSQL version of EXECUTE works similarly to the - PL/SQL version, but you have to remember to use + PL/SQL version, but you have to remember to use quote_literal and quote_ident as described in . Constructs of the + linkend="plpgsql-statements-executing-dyn"/>. Constructs of the type EXECUTE 'SELECT * FROM $1'; will not work reliably unless you use these functions. @@ -5522,12 +5745,12 @@ END; Optimizing <application>PL/pgSQL</application> Functions - PostgreSQL gives you two function creation - modifiers to optimize execution: volatility (whether + PostgreSQL gives you two function creation + modifiers to optimize execution: volatility (whether the function always returns the same result when given the same arguments) and strictness (whether the function returns null if any argument is null). Consult the + linkend="sql-createfunction"/> reference page for details. @@ -5555,30 +5778,32 @@ $$ LANGUAGE plpgsql STRICT IMMUTABLE; - instr function + instr function - + 0 THEN temp_str := substring(string FROM beg_index); - pos := position(string_to_search IN temp_str); + pos := position(string_to_search_for IN temp_str); IF pos = 0 THEN RETURN 0; ELSE RETURN pos + beg_index - 1; END IF; - ELSIF beg_index < 0 THEN - ss_length := char_length(string_to_search); + ELSIF beg_index < 0 THEN + ss_length := char_length(string_to_search_for); length := char_length(string); - beg := length + beg_index - ss_length + 2; + beg := length + 1 + beg_index; - WHILE beg > 0 LOOP + WHILE beg > 0 LOOP temp_str := substring(string FROM beg FOR ss_length); - pos := position(string_to_search IN temp_str); - - IF pos > 0 THEN + IF string_to_search_for = temp_str THEN RETURN beg; END IF; @@ -5620,7 +5843,7 @@ END; $$ LANGUAGE plpgsql STRICT IMMUTABLE; -CREATE FUNCTION instr(string varchar, string_to_search varchar, +CREATE FUNCTION instr(string varchar, string_to_search_for varchar, beg_index integer, occur_index integer) RETURNS integer AS $$ DECLARE @@ -5632,39 +5855,32 @@ DECLARE length integer; ss_length integer; BEGIN - IF beg_index > 0 THEN - beg := beg_index; - temp_str := substring(string FROM beg_index); + IF occur_index <= 0 THEN + RAISE 'argument ''%'' is out of range', occur_index + USING ERRCODE = '22003'; + END IF; + IF beg_index > 0 THEN + beg := beg_index - 1; FOR i IN 1..occur_index LOOP - pos := position(string_to_search IN temp_str); - - IF i = 1 THEN - beg := beg + pos - 1; - ELSE - beg := beg + pos; - END IF; - temp_str := substring(string FROM beg + 1); + pos := position(string_to_search_for IN temp_str); + IF pos = 0 THEN + RETURN 0; + END IF; + beg := beg + pos; END LOOP; - IF pos = 0 THEN - RETURN 0; - ELSE - RETURN beg; - END IF; - ELSIF beg_index < 0 THEN - ss_length := char_length(string_to_search); + RETURN beg; + ELSIF beg_index < 0 THEN + ss_length := char_length(string_to_search_for); length := char_length(string); - beg := length + beg_index - ss_length + 2; + beg := length + 1 + beg_index; - WHILE beg > 0 LOOP + WHILE beg > 0 LOOP temp_str := substring(string FROM beg FOR ss_length); - pos := position(string_to_search IN temp_str); - - IF pos > 0 THEN + IF string_to_search_for = temp_str THEN occur_number := occur_number + 1; - IF occur_number = occur_index THEN RETURN beg; END IF; @@ -5679,6 +5895,7 @@ BEGIN END IF; END; $$ LANGUAGE plpgsql STRICT IMMUTABLE; +]]> diff --git a/doc/src/sgml/plpython.sgml b/doc/src/sgml/plpython.sgml index 777a7ef780..60a5907464 100644 --- a/doc/src/sgml/plpython.sgml +++ b/doc/src/sgml/plpython.sgml @@ -3,33 +3,33 @@ PL/Python - Python Procedural Language - PL/Python - Python + PL/Python + Python The PL/Python procedural language allows PostgreSQL functions to be written in the - Python language. + Python language. To install PL/Python in a particular database, use - CREATE EXTENSION plpythonu (but - see also ). + CREATE EXTENSION plpythonu (but + see also ). - If a language is installed into template1, all subsequently + If a language is installed into template1, all subsequently created databases will have the language installed automatically. - PL/Python is only available as an untrusted language, meaning + PL/Python is only available as an untrusted language, meaning it does not offer any way of restricting what users can do in it and - is therefore named plpythonu. A trusted - variant plpython might become available in the future + is therefore named plpythonu. A trusted + variant plpython might become available in the future if a secure execution mechanism is developed in Python. The writer of a function in untrusted PL/Python must take care that the function cannot be used to do anything unwanted, since it will be @@ -89,7 +89,7 @@ This scheme is analogous to the recommendations in PEP 394 regarding the + url="https://www.python.org/dev/peps/pep-0394/">PEP 394 regarding the naming and transitioning of the python command. @@ -103,7 +103,7 @@ The built variant depends on which Python version was found during the installation or which version was explicitly set using the PYTHON environment variable; - see . To make both variants of + see . To make both variants of PL/Python available in one installation, the source tree has to be configured and built twice. @@ -165,7 +165,7 @@ See also the - document What's + document What's New In Python 3.0 for more information about porting to Python 3. @@ -186,7 +186,7 @@ Functions in PL/Python are declared via the - standard syntax: + standard syntax: CREATE FUNCTION funcname (argument-list) @@ -207,7 +207,11 @@ $$ LANGUAGE plpythonu; yield (in case of a result-set statement). If you do not provide a return value, Python returns the default None. PL/Python translates - Python's None into the SQL null value. + Python's None into the SQL null value. In a procedure, + the result from the Python code must be None (typically + achieved by ending the procedure without a return + statement or by using a return statement without + argument); otherwise, an error will be raised. @@ -383,8 +387,8 @@ $$ LANGUAGE plpythonu; For all other PostgreSQL return types, the return value is converted to a string using the Python built-in str, and the result is passed to the input function of the PostgreSQL data type. - (If the Python value is a float, it is converted using - the repr built-in instead of str, to + (If the Python value is a float, it is converted using + the repr built-in instead of str, to avoid loss of precision.) @@ -420,7 +424,7 @@ $$ LANGUAGE plpythonu; sortas="PL/Python">in PL/Python is passed to a function, the argument value will appear as None in Python. For example, the function definition of pymax - shown in will return the wrong answer for null + shown in will return the wrong answer for null inputs. We could add STRICT to the function definition to make PostgreSQL do something more reasonable: if a null value is passed, the function will not be called at all, @@ -645,6 +649,17 @@ return (1, 2) $$ LANGUAGE plpythonu; SELECT * FROM multiout_simple(); + + + + + Output parameters of procedures are passed back the same way. For example: + +CREATE PROCEDURE python_triple(INOUT a integer, INOUT b integer) AS $$ +return (a * 3, b * 3) +$$ LANGUAGE plpythonu; + +CALL python_triple(5, 10); @@ -753,11 +768,11 @@ SELECT * FROM multiout_simple_setof(3); Sharing Data The global dictionary SD is available to store - data between function calls. This variable is private static data. + private data between repeated calls to the same function. The global dictionary GD is public data, - available to all Python functions within a session. Use with - care.global data - in PL/Python + that is available to all Python functions within a session; use with + care.global data + in PL/Python @@ -774,7 +789,7 @@ SELECT * FROM multiout_simple_setof(3); PL/Python also supports anonymous code blocks called with the - statement: + statement: DO $$ @@ -800,38 +815,38 @@ $$ LANGUAGE plpythonu; TD contains trigger-related values: - TD["event"] + TD["event"] contains the event as a string: - INSERT, UPDATE, - DELETE, or TRUNCATE. + INSERT, UPDATE, + DELETE, or TRUNCATE. - TD["when"] + TD["when"] - contains one of BEFORE, AFTER, or - INSTEAD OF. + contains one of BEFORE, AFTER, or + INSTEAD OF. - TD["level"] + TD["level"] - contains ROW or STATEMENT. + contains ROW or STATEMENT. - TD["new"] - TD["old"] + TD["new"] + TD["old"] For a row-level trigger, one or both of these fields contain @@ -841,7 +856,7 @@ $$ LANGUAGE plpythonu; - TD["name"] + TD["name"] contains the trigger name. @@ -850,7 +865,7 @@ $$ LANGUAGE plpythonu; - TD["table_name"] + TD["table_name"] contains the name of the table on which the trigger occurred. @@ -859,7 +874,7 @@ $$ LANGUAGE plpythonu; - TD["table_schema"] + TD["table_schema"] contains the schema of the table on which the trigger occurred. @@ -868,7 +883,7 @@ $$ LANGUAGE plpythonu; - TD["relid"] + TD["relid"] contains the OID of the table on which the trigger occurred. @@ -877,12 +892,12 @@ $$ LANGUAGE plpythonu; - TD["args"] + TD["args"] - If the CREATE TRIGGER command - included arguments, they are available in TD["args"][0] to - TD["args"][n-1]. + If the CREATE TRIGGER command + included arguments, they are available in TD["args"][0] to + TD["args"][n-1]. @@ -890,14 +905,14 @@ $$ LANGUAGE plpythonu; - If TD["when"] is BEFORE or - INSTEAD OF and - TD["level"] is ROW, you can + If TD["when"] is BEFORE or + INSTEAD OF and + TD["level"] is ROW, you can return None or "OK" from the Python function to indicate the row is unmodified, - "SKIP" to abort the event, or if TD["event"] - is INSERT or UPDATE you can return - "MODIFY" to indicate you've modified the new row. + "SKIP" to abort the event, or if TD["event"] + is INSERT or UPDATE you can return + "MODIFY" to indicate you've modified the new row. Otherwise the return value is ignored. @@ -1023,7 +1038,7 @@ foo = rv[i]["my_column"] plpy.execute(plan [, arguments [, max-rows]]) - preparing a queryin PL/Python + preparing a queryin PL/Python plpy.prepare prepares the execution plan for a query. It is called with a query string and a list of parameter types, if you have parameter references in the query. For example: @@ -1056,16 +1071,16 @@ rv = plan.execute(["name"], 5) Query parameters and result row fields are converted between PostgreSQL - and Python data types as described in . + and Python data types as described in . When you prepare a plan using the PL/Python module it is automatically - saved. Read the SPI documentation () for a + saved. Read the SPI documentation () for a description of what this means. In order to make effective use of this across function calls one needs to use one of the persistent storage dictionaries SD or GD (see - ). For example: + ). For example: CREATE FUNCTION usesavedplan() RETURNS trigger AS $$ if "plan" in SD: @@ -1101,7 +1116,7 @@ $$ LANGUAGE plpythonu; batch of rows, never larger than the parameter value. Once all rows are exhausted, fetch starts returning an empty result object. Cursor objects also provide an - iterator + iterator interface, yielding one row at a time until all rows are exhausted. Data fetched that way is not returned as result objects, but rather as dictionaries, each dictionary corresponding to a single result @@ -1152,7 +1167,7 @@ $$ LANGUAGE plpythonu; Do not confuse objects created by plpy.cursor with DB-API cursors as defined by - the Python + the Python Database API specification. They don't have anything in common except for the name. @@ -1190,7 +1205,7 @@ $$ LANGUAGE plpythonu; The actual class of the exception being raised corresponds to the specific condition that caused the error. Refer - to for a list of possible + to for a list of possible conditions. The module plpy.spiexceptions defines an exception class for each PostgreSQL condition, deriving @@ -1241,7 +1256,7 @@ $$ LANGUAGE plpythonu; Recovering from errors caused by database access as described in - can lead to an undesirable + can lead to an undesirable situation where some operations succeed before one of them fails, and after recovering from that error the data is left in an inconsistent state. PL/Python offers a solution to this problem in @@ -1282,7 +1297,7 @@ $$ LANGUAGE plpythonu; helper object to manage explicit subtransactions that gets created with the plpy.subtransaction() function. Objects created by this function implement the - + context manager interface. Using explicit subtransactions we can rewrite our function as: @@ -1358,7 +1373,7 @@ $$ LANGUAGE plpythonu; Although context managers were implemented in Python 2.5, to use the with syntax in that version you need to use a future + url="https://docs.python.org/release/2.5/ref/future.html">future statement. Because of implementation details, however, you cannot use future statements in PL/Python functions. @@ -1366,38 +1381,77 @@ $$ LANGUAGE plpythonu; + + Transaction Management + + + In a procedure called from the top level or an anonymous code block + (DO command) called from the top level it is possible to + control transactions. To commit the current transaction, call + plpy.commit(). To roll back the current transaction, + call plpy.rollback(). (Note that it is not possible to + run the SQL commands COMMIT or + ROLLBACK via plpy.execute or + similar. It has to be done using these functions.) After a transaction is + ended, a new transaction is automatically started, so there is no separate + function for that. + + + + Here is an example: + +CREATE PROCEDURE transaction_test1() +LANGUAGE plpythonu +AS $$ +for i in range(0, 10): + plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) + if i % 2 == 0: + plpy.commit() + else: + plpy.rollback() +$$; + +CALL transaction_test1(); + + + + + Transactions cannot be ended when an explicit subtransaction is active. + + + Utility Functions The plpy module also provides the functions - plpy.debug(msg, **kwargs) - plpy.log(msg, **kwargs) - plpy.info(msg, **kwargs) - plpy.notice(msg, **kwargs) - plpy.warning(msg, **kwargs) - plpy.error(msg, **kwargs) - plpy.fatal(msg, **kwargs) + plpy.debug(msg, **kwargs) + plpy.log(msg, **kwargs) + plpy.info(msg, **kwargs) + plpy.notice(msg, **kwargs) + plpy.warning(msg, **kwargs) + plpy.error(msg, **kwargs) + plpy.fatal(msg, **kwargs) - elogin PL/Python + elogin PL/Python plpy.error and plpy.fatal actually raise a Python exception which, if uncaught, propagates out to the calling query, causing the current transaction or subtransaction to - be aborted. raise plpy.Error(msg) and - raise plpy.Fatal(msg) are - equivalent to calling plpy.error(msg) and - plpy.fatal(msg), respectively but + be aborted. raise plpy.Error(msg) and + raise plpy.Fatal(msg) are + equivalent to calling plpy.error(msg) and + plpy.fatal(msg), respectively but the raise form does not allow passing keyword arguments. The other functions only generate messages of different priority levels. Whether messages of a particular priority are reported to the client, written to the server log, or both is controlled by the - and - configuration - variables. See for more information. + and + configuration + variables. See for more information. - The msg argument is given as a positional argument. For + The msg argument is given as a positional argument. For backward compatibility, more than one positional argument can be given. In that case, the string representation of the tuple of positional arguments becomes the message reported to the client. @@ -1438,13 +1492,13 @@ PL/Python function "raise_custom_exception" Another set of utility functions are - plpy.quote_literal(string), - plpy.quote_nullable(string), and - plpy.quote_ident(string). They + plpy.quote_literal(string), + plpy.quote_nullable(string), and + plpy.quote_ident(string). They are equivalent to the built-in quoting functions described in . They are useful when constructing + linkend="functions-string"/>. They are useful when constructing ad-hoc queries. A PL/Python equivalent of dynamic SQL from would be: + linkend="plpgsql-quote-literal-example"/> would be: plpy.execute("UPDATE tbl SET %s = %s WHERE key = %s" % ( plpy.quote_ident(colname), diff --git a/doc/src/sgml/pltcl.sgml b/doc/src/sgml/pltcl.sgml index acd4dd69d3..4dd6fe434f 100644 --- a/doc/src/sgml/pltcl.sgml +++ b/doc/src/sgml/pltcl.sgml @@ -15,8 +15,8 @@ PL/Tcl is a loadable procedural language for the PostgreSQL database system that enables the - Tcl language to be used to write functions and - trigger procedures. + Tcl language to be used to write + PostgreSQL functions. @@ -35,7 +35,7 @@ everything is executed from within the safety of the context of a Tcl interpreter. In addition to the limited command set of safe Tcl, only a few commands are available to access the database via - SPI and to raise messages via elog(). PL/Tcl + SPI and to raise messages via elog(). PL/Tcl provides no way to access internals of the database server or to gain OS-level access under the permissions of the PostgreSQL server process, as a C @@ -50,23 +50,23 @@ Sometimes it is desirable to write Tcl functions that are not restricted to safe Tcl. For example, one might want a Tcl function that sends - email. To handle these cases, there is a variant of PL/Tcl called PL/TclU + email. To handle these cases, there is a variant of PL/Tcl called PL/TclU (for untrusted Tcl). This is exactly the same language except that a full - Tcl interpreter is used. If PL/TclU is used, it must be + Tcl interpreter is used. If PL/TclU is used, it must be installed as an untrusted procedural language so that only - database superusers can create functions in it. The writer of a PL/TclU + database superusers can create functions in it. The writer of a PL/TclU function must take care that the function cannot be used to do anything unwanted, since it will be able to do anything that could be done by a user logged in as the database administrator. - The shared object code for the PL/Tcl and - PL/TclU call handlers is automatically built and + The shared object code for the PL/Tcl and + PL/TclU call handlers is automatically built and installed in the PostgreSQL library directory if Tcl support is specified in the configuration step of - the installation procedure. To install PL/Tcl - and/or PL/TclU in a particular database, use the - CREATE EXTENSION command, for example + the installation procedure. To install PL/Tcl + and/or PL/TclU in a particular database, use the + CREATE EXTENSION command, for example CREATE EXTENSION pltcl or CREATE EXTENSION pltclu. @@ -78,8 +78,8 @@ PL/Tcl Functions and Arguments - To create a function in the PL/Tcl language, use - the standard syntax: + To create a function in the PL/Tcl language, use + the standard syntax: CREATE FUNCTION funcname (argument-types) RETURNS return-type AS $$ @@ -87,8 +87,8 @@ CREATE FUNCTION funcname (argument-types $$ LANGUAGE pltcl; - PL/TclU is the same, except that the language has to be specified as - pltclu. + PL/TclU is the same, except that the language has to be specified as + pltclu. @@ -97,7 +97,8 @@ $$ LANGUAGE pltcl; Tcl script as variables named 1 ... n. The result is returned from the Tcl code in the usual way, with - a return statement. + a return statement. In a procedure, the return value + from the Tcl code is ignored. @@ -111,7 +112,7 @@ CREATE FUNCTION tcl_max(integer, integer) RETURNS integer AS $$ $$ LANGUAGE pltcl STRICT; - Note the clause STRICT, which saves us from + Note the clause STRICT, which saves us from having to think about null input values: if a null value is passed, the function will not be called at all, but will just return a null result automatically. @@ -122,7 +123,7 @@ $$ LANGUAGE pltcl STRICT; if the actual value of an argument is null, the corresponding $n variable will be set to an empty string. To detect whether a particular argument is null, use the function - argisnull. For example, suppose that we wanted tcl_max + argisnull. For example, suppose that we wanted tcl_max with one null and one nonnull argument to return the nonnull argument, rather than null: @@ -185,10 +186,22 @@ $$ LANGUAGE pltcl; + + Output arguments of procedures are returned in the same way, for example: + + +CREATE PROCEDURE tcl_triple(INOUT a integer, INOUT b integer) AS $$ + return [list a [expr {$1 * 3}] b [expr {$2 * 3}]] +$$ LANGUAGE pltcl; + +CALL tcl_triple(5, 10); + + + The result list can be made from an array representation of the - desired tuple with the array get Tcl command. For example: + desired tuple with the array get Tcl command. For example: CREATE FUNCTION raise_pay(employee, delta int) RETURNS employee AS $$ @@ -233,8 +246,8 @@ $$ LANGUAGE pltcl; The argument values supplied to a PL/Tcl function's code are simply the input arguments converted to text form (just as if they had been - displayed by a SELECT statement). Conversely, the - return and return_next commands will accept + displayed by a SELECT statement). Conversely, the + return and return_next commands will accept any string that is acceptable input format for the function's declared result type, or for the specified column of a composite result type. @@ -262,14 +275,14 @@ $$ LANGUAGE pltcl; role in a separate Tcl interpreter for that role. This prevents accidental or malicious interference by one user with the behavior of another user's PL/Tcl functions. Each such interpreter will have its own - values for any global Tcl variables. Thus, two PL/Tcl + values for any global Tcl variables. Thus, two PL/Tcl functions will share the same global variables if and only if they are executed by the same SQL role. In an application wherein a single session executes code under multiple SQL roles (via SECURITY - DEFINER functions, use of SET ROLE, etc) you may need to + DEFINER functions, use of SET ROLE, etc) you may need to take explicit steps to ensure that PL/Tcl functions can share data. To do that, make sure that functions that should communicate are owned by - the same user, and mark them SECURITY DEFINER. You must of + the same user, and mark them SECURITY DEFINER. You must of course take care that such functions can't be used to do anything unintended. @@ -286,19 +299,19 @@ $$ LANGUAGE pltcl; To help protect PL/Tcl functions from unintentionally interfering with each other, a global - array is made available to each function via the upvar + array is made available to each function via the upvar command. The global name of this variable is the function's internal - name, and the local name is GD. It is recommended that - GD be used + name, and the local name is GD. It is recommended that + GD be used for persistent private data of a function. Use regular Tcl global variables only for values that you specifically intend to be shared among - multiple functions. (Note that the GD arrays are only + multiple functions. (Note that the GD arrays are only global within a particular interpreter, so they do not bypass the security restrictions mentioned above.) - An example of using GD appears in the + An example of using GD appears in the spi_execp example below. @@ -320,28 +333,28 @@ $$ LANGUAGE pltcl; causes an error to be raised. Otherwise, the return value of spi_exec is the number of rows processed (selected, inserted, updated, or deleted) by the command, or zero if the command is a utility - statement. In addition, if the command is a SELECT statement, the + statement. In addition, if the command is a SELECT statement, the values of the selected columns are placed in Tcl variables as described below. - The optional -count value tells + The optional -count value tells spi_exec the maximum number of rows to process in the command. The effect of this is comparable to - setting up a query as a cursor and then saying FETCH n. + setting up a query as a cursor and then saying FETCH n. - If the command is a SELECT statement, the values of the + If the command is a SELECT statement, the values of the result columns are placed into Tcl variables named after the columns. - If the -array option is given, the column values are + If the -array option is given, the column values are instead stored into elements of the named associative array, with the column names used as array indexes. In addition, the current row number within the result (counting from zero) is stored into the array - element named .tupno, unless that name is + element named .tupno, unless that name is in use as a column name in the result. - If the command is a SELECT statement and no loop-body + If the command is a SELECT statement and no loop-body script is given, then only the first row of results are stored into Tcl variables or array elements; remaining rows, if any, are ignored. No storing occurs if the query returns no rows. (This case can be @@ -350,14 +363,14 @@ $$ LANGUAGE pltcl; spi_exec "SELECT count(*) AS cnt FROM pg_proc" - will set the Tcl variable $cnt to the number of rows in - the pg_proc system catalog. + will set the Tcl variable $cnt to the number of rows in + the pg_proc system catalog. - If the optional loop-body argument is given, it is + If the optional loop-body argument is given, it is a piece of Tcl script that is executed once for each row in the - query result. (loop-body is ignored if the given - command is not a SELECT.) + query result. (loop-body is ignored if the given + command is not a SELECT.) The values of the current row's columns are stored into Tcl variables or array elements before each iteration. For example: @@ -366,14 +379,14 @@ spi_exec -array C "SELECT * FROM pg_class" { elog DEBUG "have table $C(relname)" } - will print a log message for every row of pg_class. This + will print a log message for every row of pg_class. This feature works similarly to other Tcl looping constructs; in - particular continue and break work in the + particular continue and break work in the usual way inside the loop body. If a column of a query result is null, the target - variable for it is unset rather than being set. + variable for it is unset rather than being set. @@ -384,8 +397,8 @@ spi_exec -array C "SELECT * FROM pg_class" { Prepares and saves a query plan for later execution. The saved plan will be retained for the life of the current - session.preparing a query - in PL/Tcl + session.preparing a query + in PL/Tcl The query can use parameters, that is, placeholders for @@ -405,29 +418,29 @@ spi_exec -array C "SELECT * FROM pg_class" { - spi_execp -count n -array name -nulls string queryid value-list loop-body + spi_execp -count n -array name -nulls string queryid value-list loop-body - Executes a query previously prepared with spi_prepare. + Executes a query previously prepared with spi_prepare. queryid is the ID returned by - spi_prepare. If the query references parameters, + spi_prepare. If the query references parameters, a value-list must be supplied. This is a Tcl list of actual values for the parameters. The list must be the same length as the parameter type list previously given to - spi_prepare. Omit value-list + spi_prepare. Omit value-list if the query has no parameters. - The optional value for -nulls is a string of spaces and - 'n' characters telling spi_execp + The optional value for -nulls is a string of spaces and + 'n' characters telling spi_execp which of the parameters are null values. If given, it must have exactly the same length as the value-list. If it is not given, all the parameter values are nonnull. Except for the way in which the query and its parameters are specified, - spi_execp works just like spi_exec. - The -count, -array, and + spi_execp works just like spi_exec. + The -count, -array, and loop-body options are the same, and so is the result value. @@ -448,9 +461,9 @@ $$ LANGUAGE pltcl; We need backslashes inside the query string given to - spi_prepare to ensure that the - $n markers will be passed - through to spi_prepare as-is, and not replaced by Tcl + spi_prepare to ensure that the + $n markers will be passed + through to spi_prepare as-is, and not replaced by Tcl variable substitution. @@ -459,7 +472,7 @@ $$ LANGUAGE pltcl; - spi_lastoid + spi_lastoid spi_lastoid in PL/Tcl @@ -468,8 +481,8 @@ $$ LANGUAGE pltcl; Returns the OID of the row inserted by the last - spi_exec or spi_execp, if the - command was a single-row INSERT and the modified + spi_exec or spi_execp, if the + command was a single-row INSERT and the modified table contained OIDs. (If not, you get zero.) @@ -483,14 +496,14 @@ $$ LANGUAGE pltcl; executed within a SQL subtransaction. If the script returns an error, that entire subtransaction is rolled back before returning the error out to the surrounding Tcl code. - See for more details and an + See for more details and an example. - quote string + quote string Doubles all occurrences of single quote and backslash characters @@ -504,7 +517,7 @@ $$ LANGUAGE pltcl; "SELECT '$val' AS ret" - where the Tcl variable val actually contains + where the Tcl variable val actually contains doesn't. This would result in the final command string: @@ -536,7 +549,7 @@ SELECT 'doesn''t' AS ret - elog level msg + elog level msg elog in PL/Tcl @@ -545,24 +558,24 @@ SELECT 'doesn''t' AS ret Emits a log or error message. Possible levels are - DEBUG, LOG, INFO, - NOTICE, WARNING, ERROR, and - FATAL. ERROR + DEBUG, LOG, INFO, + NOTICE, WARNING, ERROR, and + FATAL. ERROR raises an error condition; if this is not trapped by the surrounding Tcl code, the error propagates out to the calling query, causing the current transaction or subtransaction to be aborted. This - is effectively the same as the Tcl error command. - FATAL aborts the transaction and causes the current + is effectively the same as the Tcl error command. + FATAL aborts the transaction and causes the current session to shut down. (There is probably no good reason to use this error level in PL/Tcl functions, but it's provided for completeness.) The other levels only generate messages of different priority levels. Whether messages of a particular priority are reported to the client, written to the server log, or both is controlled by the - and - configuration - variables. See - and + and + configuration + variables. See + and for more information. @@ -574,7 +587,7 @@ SELECT 'doesn''t' AS ret - Trigger Procedures in PL/Tcl + Trigger Functions in PL/Tcl trigger @@ -582,13 +595,13 @@ SELECT 'doesn''t' AS ret - Trigger procedures can be written in PL/Tcl. - PostgreSQL requires that a procedure that is to be called + Trigger functions can be written in PL/Tcl. + PostgreSQL requires that a function that is to be called as a trigger must be declared as a function with no arguments - and a return type of trigger. + and a return type of trigger. - The information from the trigger manager is passed to the procedure body + The information from the trigger manager is passed to the function body in the following variables: @@ -606,7 +619,7 @@ SELECT 'doesn''t' AS ret $TG_relid - The object ID of the table that caused the trigger procedure + The object ID of the table that caused the trigger function to be invoked. @@ -616,7 +629,7 @@ SELECT 'doesn''t' AS ret $TG_table_name - The name of the table that caused the trigger procedure + The name of the table that caused the trigger function to be invoked. @@ -626,7 +639,7 @@ SELECT 'doesn''t' AS ret $TG_table_schema - The schema of the table that caused the trigger procedure + The schema of the table that caused the trigger function to be invoked. @@ -637,8 +650,8 @@ SELECT 'doesn''t' AS ret A Tcl list of the table column names, prefixed with an empty list - element. So looking up a column name in the list with Tcl's - lsearch command returns the element's number starting + element. So looking up a column name in the list with Tcl's + lsearch command returns the element's number starting with 1 for the first column, the same way the columns are customarily numbered in PostgreSQL. (Empty list elements also appear in the positions of columns that have been @@ -652,8 +665,8 @@ SELECT 'doesn''t' AS ret $TG_when - The string BEFORE, AFTER, or - INSTEAD OF, depending on the type of trigger event. + The string BEFORE, AFTER, or + INSTEAD OF, depending on the type of trigger event. @@ -662,7 +675,7 @@ SELECT 'doesn''t' AS ret $TG_level - The string ROW or STATEMENT depending on the + The string ROW or STATEMENT depending on the type of trigger event. @@ -672,8 +685,8 @@ SELECT 'doesn''t' AS ret $TG_op - The string INSERT, UPDATE, - DELETE, or TRUNCATE depending on the type of + The string INSERT, UPDATE, + DELETE, or TRUNCATE depending on the type of trigger event. @@ -684,8 +697,8 @@ SELECT 'doesn''t' AS ret An associative array containing the values of the new table - row for INSERT or UPDATE actions, or - empty for DELETE. The array is indexed by column + row for INSERT or UPDATE actions, or + empty for DELETE. The array is indexed by column name. Columns that are null will not appear in the array. This is not set for statement-level triggers. @@ -697,8 +710,8 @@ SELECT 'doesn''t' AS ret An associative array containing the values of the old table - row for UPDATE or DELETE actions, or - empty for INSERT. The array is indexed by column + row for UPDATE or DELETE actions, or + empty for INSERT. The array is indexed by column name. Columns that are null will not appear in the array. This is not set for statement-level triggers. @@ -709,9 +722,9 @@ SELECT 'doesn''t' AS ret $args - A Tcl list of the arguments to the procedure as given in the + A Tcl list of the arguments to the function as given in the CREATE TRIGGER statement. These arguments are also accessible as - $1 ... $n in the procedure body. + $1 ... $n in the function body. @@ -720,38 +733,38 @@ SELECT 'doesn''t' AS ret - The return value from a trigger procedure can be one of the strings - OK or SKIP, or a list of column name/value pairs. - If the return value is OK, - the operation (INSERT/UPDATE/DELETE) + The return value from a trigger function can be one of the strings + OK or SKIP, or a list of column name/value pairs. + If the return value is OK, + the operation (INSERT/UPDATE/DELETE) that fired the trigger will proceed - normally. SKIP tells the trigger manager to silently suppress + normally. SKIP tells the trigger manager to silently suppress the operation for this row. If a list is returned, it tells PL/Tcl to return a modified row to the trigger manager; the contents of the modified row are specified by the column names and values in the list. Any columns not mentioned in the list are set to null. Returning a modified row is only meaningful - for row-level BEFORE INSERT or UPDATE + for row-level BEFORE INSERT or UPDATE triggers, for which the modified row will be inserted instead of the one - given in $NEW; or for row-level INSTEAD OF - INSERT or UPDATE triggers where the returned row - is used as the source data for INSERT RETURNING or - UPDATE RETURNING clauses. - In row-level BEFORE DELETE or INSTEAD - OF DELETE triggers, returning a modified row has the same - effect as returning OK, that is the operation proceeds. + given in $NEW; or for row-level INSTEAD OF + INSERT or UPDATE triggers where the returned row + is used as the source data for INSERT RETURNING or + UPDATE RETURNING clauses. + In row-level BEFORE DELETE or INSTEAD + OF DELETE triggers, returning a modified row has the same + effect as returning OK, that is the operation proceeds. The trigger return value is ignored for all other types of triggers. The result list can be made from an array representation of the - modified tuple with the array get Tcl command. + modified tuple with the array get Tcl command. - Here's a little example trigger procedure that forces an integer value + Here's a little example trigger function that forces an integer value in a table to keep track of the number of updates that are performed on the row. For new rows inserted, the value is initialized to 0 and then incremented on every update operation. @@ -776,17 +789,17 @@ $$ LANGUAGE pltcl; CREATE TABLE mytab (num integer, description text, modcnt integer); CREATE TRIGGER trig_mytab_modcount BEFORE INSERT OR UPDATE ON mytab - FOR EACH ROW EXECUTE PROCEDURE trigfunc_modcount('modcnt'); + FOR EACH ROW EXECUTE FUNCTION trigfunc_modcount('modcnt'); - Notice that the trigger procedure itself does not know the column + Notice that the trigger function itself does not know the column name; that's supplied from the trigger arguments. This lets the - trigger procedure be reused with different tables. + trigger function be reused with different tables. - Event Trigger Procedures in PL/Tcl + Event Trigger Functions in PL/Tcl event trigger @@ -794,13 +807,13 @@ CREATE TRIGGER trig_mytab_modcount BEFORE INSERT OR UPDATE ON mytab - Event trigger procedures can be written in PL/Tcl. - PostgreSQL requires that a procedure that is + Event trigger functions can be written in PL/Tcl. + PostgreSQL requires that a function that is to be called as an event trigger must be declared as a function with no - arguments and a return type of event_trigger. + arguments and a return type of event_trigger. - The information from the trigger manager is passed to the procedure body + The information from the trigger manager is passed to the function body in the following variables: @@ -826,11 +839,11 @@ CREATE TRIGGER trig_mytab_modcount BEFORE INSERT OR UPDATE ON mytab - The return value of the trigger procedure is ignored. + The return value of the trigger function is ignored. - Here's a little example event trigger procedure that simply raises + Here's a little example event trigger function that simply raises a NOTICE message each time a supported command is executed: @@ -839,7 +852,7 @@ CREATE OR REPLACE FUNCTION tclsnitch() RETURNS event_trigger AS $$ elog NOTICE "tclsnitch: $TG_event $TG_tag" $$ LANGUAGE pltcl; -CREATE EVENT TRIGGER tcl_a_snitch ON ddl_command_start EXECUTE PROCEDURE tclsnitch(); +CREATE EVENT TRIGGER tcl_a_snitch ON ddl_command_start EXECUTE FUNCTION tclsnitch(); @@ -885,17 +898,17 @@ CREATE EVENT TRIGGER tcl_a_snitch ON ddl_command_start EXECUTE PROCEDURE tclsnit word is POSTGRES, the second word is the PostgreSQL version number, and additional words are field name/value pairs providing detailed information about the error. - Fields SQLSTATE, condition, - and message are always supplied + Fields SQLSTATE, condition, + and message are always supplied (the first two represent the error code and condition name as shown - in ). + in ). Fields that may be present include - detail, hint, context, - schema, table, column, - datatype, constraint, - statement, cursor_position, - filename, lineno, and - funcname. + detail, hint, context, + schema, table, column, + datatype, constraint, + statement, cursor_position, + filename, lineno, and + funcname. @@ -929,7 +942,7 @@ if {[catch { spi_exec $sql_command }]} { Recovering from errors caused by database access as described in - can lead to an undesirable + can lead to an undesirable situation where some operations succeed before one of them fails, and after recovering from that error the data is left in an inconsistent state. PL/Tcl offers a solution to this problem in @@ -1001,12 +1014,53 @@ $$ LANGUAGE pltcl; + + Transaction Management + + + In a procedure called from the top level or an anonymous code block + (DO command) called from the top level it is possible + to control transactions. To commit the current transaction, call the + commit command. To roll back the current transaction, + call the rollback command. (Note that it is not + possible to run the SQL commands COMMIT or + ROLLBACK via spi_exec or similar. + It has to be done using these functions.) After a transaction is ended, + a new transaction is automatically started, so there is no separate + command for that. + + + + Here is an example: + +CREATE PROCEDURE transaction_test1() +LANGUAGE pltcl +AS $$ +for {set i 0} {$i < 10} {incr i} { + spi_exec "INSERT INTO test1 (a) VALUES ($i)" + if {$i % 2 == 0} { + commit + } else { + rollback + } +} +$$; + +CALL transaction_test1(); + + + + + Transactions cannot be ended when an explicit subtransaction is active. + + + PL/Tcl Configuration This section lists configuration parameters that - affect PL/Tcl. + affect PL/Tcl. @@ -1015,7 +1069,7 @@ $$ LANGUAGE pltcl; pltcl.start_proc (string) - pltcl.start_proc configuration parameter + pltcl.start_proc configuration parameter @@ -1031,8 +1085,8 @@ $$ LANGUAGE pltcl; - The referenced function must be written in the pltcl - language, and must not be marked SECURITY DEFINER. + The referenced function must be written in the pltcl + language, and must not be marked SECURITY DEFINER. (These restrictions ensure that it runs in the interpreter it's supposed to initialize.) The current user must have permission to call it, too. @@ -1060,14 +1114,14 @@ $$ LANGUAGE pltcl; pltclu.start_proc (string) - pltclu.start_proc configuration parameter + pltclu.start_proc configuration parameter This parameter is exactly like pltcl.start_proc, except that it applies to PL/TclU. The referenced function must - be written in the pltclu language. + be written in the pltclu language. @@ -1084,7 +1138,7 @@ $$ LANGUAGE pltcl; differ. Tcl, however, requires all procedure names to be distinct. PL/Tcl deals with this by making the internal Tcl procedure names contain the object - ID of the function from the system table pg_proc as part of their name. Thus, + ID of the function from the system table pg_proc as part of their name. Thus, PostgreSQL functions with the same name and different argument types will be different Tcl procedures, too. This is not normally a concern for a PL/Tcl programmer, but it might be visible diff --git a/doc/src/sgml/postgres-fdw.sgml b/doc/src/sgml/postgres-fdw.sgml index d83fc9e52b..54b5e98a0e 100644 --- a/doc/src/sgml/postgres-fdw.sgml +++ b/doc/src/sgml/postgres-fdw.sgml @@ -8,31 +8,31 @@ - The postgres_fdw module provides the foreign-data wrapper + The postgres_fdw module provides the foreign-data wrapper postgres_fdw, which can be used to access data stored in external PostgreSQL servers. The functionality provided by this module overlaps substantially - with the functionality of the older module. - But postgres_fdw provides more transparent and + with the functionality of the older module. + But postgres_fdw provides more transparent and standards-compliant syntax for accessing remote tables, and can give better performance in many cases. - To prepare for remote access using postgres_fdw: + To prepare for remote access using postgres_fdw: - Install the postgres_fdw extension using . + Install the postgres_fdw extension using . - Create a foreign server object, using , + Create a foreign server object, using , to represent each remote database you want to connect to. Specify connection information, except user and password, as options of the server object. @@ -40,7 +40,7 @@ - Create a user mapping, using , for + Create a user mapping, using , for each database user you want to allow to access each foreign server. Specify the remote user name and password to use as user and password options of the @@ -49,8 +49,8 @@ - Create a foreign table, using - or , + Create a foreign table, using + or , for each remote table you want to access. The columns of the foreign table must match the referenced remote table. You can, however, use table and/or column names different from the remote table's, if you @@ -61,17 +61,17 @@ - Now you need only SELECT from a foreign table to access + Now you need only SELECT from a foreign table to access the data stored in its underlying remote table. You can also modify - the remote table using INSERT, UPDATE, or - DELETE. (Of course, the remote user you have specified + the remote table using INSERT, UPDATE, or + DELETE. (Of course, the remote user you have specified in your user mapping must have privileges to do these things.) - Note that postgres_fdw currently lacks support for + Note that postgres_fdw currently lacks support for INSERT statements with an ON CONFLICT DO - UPDATE clause. However, the ON CONFLICT DO NOTHING + UPDATE clause. However, the ON CONFLICT DO NOTHING clause is supported, provided a unique index inference specification is omitted. @@ -79,10 +79,10 @@ It is generally recommended that the columns of a foreign table be declared with exactly the same data types, and collations if applicable, as the - referenced columns of the remote table. Although postgres_fdw + referenced columns of the remote table. Although postgres_fdw is currently rather forgiving about performing data type conversions at need, surprising semantic anomalies may arise when types or collations do - not match, due to the remote server interpreting WHERE clauses + not match, due to the remote server interpreting WHERE clauses slightly differently from the local server. @@ -99,9 +99,9 @@ Connection Options - A foreign server using the postgres_fdw foreign data wrapper - can have the same options that libpq accepts in - connection strings, as described in , + A foreign server using the postgres_fdw foreign data wrapper + can have the same options that libpq accepts in + connection strings, as described in , except that these options are not allowed: @@ -113,14 +113,14 @@ - client_encoding (this is automatically set from the local + client_encoding (this is automatically set from the local server encoding) - fallback_application_name (always set to - postgres_fdw) + fallback_application_name (always set to + postgres_fdw) @@ -186,14 +186,14 @@ Cost Estimation Options - postgres_fdw retrieves remote data by executing queries + postgres_fdw retrieves remote data by executing queries against remote servers, so ideally the estimated cost of scanning a foreign table should be whatever it costs to be done on the remote server, plus some overhead for communication. The most reliable way to get such an estimate is to ask the remote server and then add something for overhead — but for simple queries, it may not be worth the cost of an additional remote query to get a cost estimate. - So postgres_fdw provides the following options to control + So postgres_fdw provides the following options to control how cost estimation is done: @@ -204,7 +204,7 @@ This option, which can be specified for a foreign table or a foreign - server, controls whether postgres_fdw issues remote + server, controls whether postgres_fdw issues remote EXPLAIN commands to obtain cost estimates. A setting for a foreign table overrides any setting for its server, but only for that table. @@ -245,16 +245,16 @@ When use_remote_estimate is true, - postgres_fdw obtains row count and cost estimates from the + postgres_fdw obtains row count and cost estimates from the remote server and then adds fdw_startup_cost and fdw_tuple_cost to the cost estimates. When use_remote_estimate is false, - postgres_fdw performs local row count and cost estimation + postgres_fdw performs local row count and cost estimation and then adds fdw_startup_cost and fdw_tuple_cost to the cost estimates. This local estimation is unlikely to be very accurate unless local copies of the remote table's statistics are available. Running - on the foreign table is the way to update + on the foreign table is the way to update the local statistics; this will perform a scan of the remote table and then calculate and store statistics just as though the table were local. Keeping local statistics can be a useful way to reduce per-query planning @@ -268,12 +268,12 @@ Remote Execution Options - By default, only WHERE clauses using built-in operators and + By default, only WHERE clauses using built-in operators and functions will be considered for execution on the remote server. Clauses involving non-built-in functions are checked locally after rows are fetched. If such functions are available on the remote server and can be relied on to produce the same results as they do locally, performance can - be improved by sending such WHERE clauses for remote + be improved by sending such WHERE clauses for remote execution. This behavior can be controlled using the following option: @@ -284,7 +284,7 @@ This option is a comma-separated list of names - of PostgreSQL extensions that are installed, in + of PostgreSQL extensions that are installed, in compatible versions, on both the local and remote servers. Functions and operators that are immutable and belong to a listed extension will be considered shippable to the remote server. @@ -293,7 +293,7 @@ When using the extensions option, it is the - user's responsibility that the listed extensions exist and behave + user's responsibility that the listed extensions exist and behave identically on both the local and remote servers. Otherwise, remote queries may fail or behave unexpectedly. @@ -304,11 +304,11 @@ fetch_size - This option specifies the number of rows postgres_fdw + This option specifies the number of rows postgres_fdw should get in each fetch operation. It can be specified for a foreign table or a foreign server. The option specified on a table overrides an option specified for the server. - The default is 100. + The default is 100. @@ -321,7 +321,7 @@ Updatability Options - By default all foreign tables using postgres_fdw are assumed + By default all foreign tables using postgres_fdw are assumed to be updatable. This may be overridden using the following option: @@ -331,20 +331,20 @@ updatable - This option controls whether postgres_fdw allows foreign - tables to be modified using INSERT, UPDATE and - DELETE commands. It can be specified for a foreign table + This option controls whether postgres_fdw allows foreign + tables to be modified using INSERT, UPDATE and + DELETE commands. It can be specified for a foreign table or a foreign server. A table-level option overrides a server-level option. - The default is true. + The default is true. Of course, if the remote table is not in fact updatable, an error would occur anyway. Use of this option primarily allows the error to be thrown locally without querying the remote server. Note however - that the information_schema views will report a - postgres_fdw foreign table to be updatable (or not) + that the information_schema views will report a + postgres_fdw foreign table to be updatable (or not) according to the setting of this option, without any check of the remote server. @@ -358,8 +358,8 @@ Importing Options - postgres_fdw is able to import foreign table definitions - using . This command creates + postgres_fdw is able to import foreign table definitions + using . This command creates foreign table definitions on the local server that match tables or views present on the remote server. If the remote tables to be imported have columns of user-defined data types, the local server must have @@ -368,7 +368,7 @@ Importing behavior can be customized with the following options - (given in the IMPORT FOREIGN SCHEMA command): + (given in the IMPORT FOREIGN SCHEMA command): @@ -376,9 +376,9 @@ import_collate - This option controls whether column COLLATE options + This option controls whether column COLLATE options are included in the definitions of foreign tables imported - from a foreign server. The default is true. You might + from a foreign server. The default is true. You might need to turn this off if the remote server has a different set of collation names than the local server does, which is likely to be the case if it's running on a different operating system. @@ -389,13 +389,13 @@ import_default - This option controls whether column DEFAULT expressions + This option controls whether column DEFAULT expressions are included in the definitions of foreign tables imported - from a foreign server. The default is false. If you + from a foreign server. The default is false. If you enable this option, be wary of defaults that might get computed differently on the local server than they would be on the remote - server; nextval() is a common source of problems. - The IMPORT will fail altogether if an imported default + server; nextval() is a common source of problems. + The IMPORT will fail altogether if an imported default expression uses a function or operator that does not exist locally. @@ -404,26 +404,26 @@ import_not_null - This option controls whether column NOT NULL + This option controls whether column NOT NULL constraints are included in the definitions of foreign tables imported - from a foreign server. The default is true. + from a foreign server. The default is true. - Note that constraints other than NOT NULL will never be - imported from the remote tables. Although PostgreSQL - does support CHECK constraints on foreign tables, there is no + Note that constraints other than NOT NULL will never be + imported from the remote tables. Although PostgreSQL + does support CHECK constraints on foreign tables, there is no provision for importing them automatically, because of the risk that a constraint expression could evaluate differently on the local and remote - servers. Any such inconsistency in the behavior of a CHECK + servers. Any such inconsistency in the behavior of a CHECK constraint could lead to hard-to-detect errors in query optimization. - So if you wish to import CHECK constraints, you must do so + So if you wish to import CHECK constraints, you must do so manually, and you should verify the semantics of each one carefully. - For more detail about the treatment of CHECK constraints on - foreign tables, see . + For more detail about the treatment of CHECK constraints on + foreign tables, see . @@ -464,18 +464,18 @@ - The remote transaction uses SERIALIZABLE - isolation level when the local transaction has SERIALIZABLE - isolation level; otherwise it uses REPEATABLE READ + The remote transaction uses SERIALIZABLE + isolation level when the local transaction has SERIALIZABLE + isolation level; otherwise it uses REPEATABLE READ isolation level. This choice ensures that if a query performs multiple table scans on the remote server, it will get snapshot-consistent results for all the scans. A consequence is that successive queries within a single transaction will see the same data from the remote server, even if concurrent updates are occurring on the remote server due to other activities. That behavior would be expected anyway if the local - transaction uses SERIALIZABLE or REPEATABLE READ + transaction uses SERIALIZABLE or REPEATABLE READ isolation level, but it might be surprising for a READ - COMMITTED local transaction. A future + COMMITTED local transaction. A future PostgreSQL release might modify these rules. @@ -484,42 +484,42 @@ Remote Query Optimization - postgres_fdw attempts to optimize remote queries to reduce + postgres_fdw attempts to optimize remote queries to reduce the amount of data transferred from foreign servers. This is done by - sending query WHERE clauses to the remote server for + sending query WHERE clauses to the remote server for execution, and by not retrieving table columns that are not needed for the current query. To reduce the risk of misexecution of queries, - WHERE clauses are not sent to the remote server unless they use + WHERE clauses are not sent to the remote server unless they use only data types, operators, and functions that are built-in or belong to an - extension that's listed in the foreign server's extensions + extension that's listed in the foreign server's extensions option. Operators and functions in such clauses must - be IMMUTABLE as well. - For an UPDATE or DELETE query, - postgres_fdw attempts to optimize the query execution by + be IMMUTABLE as well. + For an UPDATE or DELETE query, + postgres_fdw attempts to optimize the query execution by sending the whole query to the remote server if there are no query - WHERE clauses that cannot be sent to the remote server, - no local joins for the query, no row-level local BEFORE or - AFTER triggers on the target table, and no - CHECK OPTION constraints from parent views. - In UPDATE, + WHERE clauses that cannot be sent to the remote server, + no local joins for the query, no row-level local BEFORE or + AFTER triggers on the target table, and no + CHECK OPTION constraints from parent views. + In UPDATE, expressions to assign to target columns must use only built-in data types, - IMMUTABLE operators, or IMMUTABLE functions, + IMMUTABLE operators, or IMMUTABLE functions, to reduce the risk of misexecution of the query. - When postgres_fdw encounters a join between foreign tables on + When postgres_fdw encounters a join between foreign tables on the same foreign server, it sends the entire join to the foreign server, unless for some reason it believes that it will be more efficient to fetch rows from each table individually, or unless the table references involved - are subject to different user mappings. While sending the JOIN + are subject to different user mappings. While sending the JOIN clauses, it takes the same precautions as mentioned above for the - WHERE clauses. + WHERE clauses. The query that is actually sent to the remote server for execution can - be examined using EXPLAIN VERBOSE. + be examined using EXPLAIN VERBOSE. @@ -527,55 +527,55 @@ Remote Query Execution Environment - In the remote sessions opened by postgres_fdw, - the parameter is set to - just pg_catalog, so that only built-in objects are visible + In the remote sessions opened by postgres_fdw, + the parameter is set to + just pg_catalog, so that only built-in objects are visible without schema qualification. This is not an issue for queries - generated by postgres_fdw itself, because it always + generated by postgres_fdw itself, because it always supplies such qualification. However, this can pose a hazard for functions that are executed on the remote server via triggers or rules on remote tables. For example, if a remote table is actually a view, any functions used in that view will be executed with the restricted search path. It is recommended to schema-qualify all names in such - functions, or else attach SET search_path options - (see ) to such functions + functions, or else attach SET search_path options + (see ) to such functions to establish their expected search path environment. - postgres_fdw likewise establishes remote session settings + postgres_fdw likewise establishes remote session settings for various parameters: - is set to UTC + is set to UTC - is set to ISO + is set to ISO - is set to postgres + is set to postgres - is set to 3 for remote - servers 9.0 and newer and is set to 2 for older versions + is set to 3 for remote + servers 9.0 and newer and is set to 2 for older versions - These are less likely to be problematic than search_path, but - can be handled with function SET options if the need arises. + These are less likely to be problematic than search_path, but + can be handled with function SET options if the need arises. - It is not recommended that you override this behavior by + It is not recommended that you override this behavior by changing the session-level settings of these parameters; that is likely - to cause postgres_fdw to malfunction. + to cause postgres_fdw to malfunction. @@ -583,19 +583,19 @@ Cross-Version Compatibility - postgres_fdw can be used with remote servers dating back - to PostgreSQL 8.3. Read-only capability is available - back to 8.1. A limitation however is that postgres_fdw + postgres_fdw can be used with remote servers dating back + to PostgreSQL 8.3. Read-only capability is available + back to 8.1. A limitation however is that postgres_fdw generally assumes that immutable built-in functions and operators are safe to send to the remote server for execution, if they appear in a - WHERE clause for a foreign table. Thus, a built-in + WHERE clause for a foreign table. Thus, a built-in function that was added since the remote server's release might be sent - to it for execution, resulting in function does not exist or + to it for execution, resulting in function does not exist or a similar error. This type of failure can be worked around by rewriting the query, for example by embedding the foreign table - reference in a sub-SELECT with OFFSET 0 as an + reference in a sub-SELECT with OFFSET 0 as an optimization fence, and placing the problematic function or operator - outside the sub-SELECT. + outside the sub-SELECT. @@ -604,7 +604,7 @@ Here is an example of creating a foreign table with - postgres_fdw. First install the extension: + postgres_fdw. First install the extension: @@ -612,8 +612,8 @@ CREATE EXTENSION postgres_fdw; - Then create a foreign server using . - In this example we wish to connect to a PostgreSQL server + Then create a foreign server using . + In this example we wish to connect to a PostgreSQL server on host 192.83.123.89 listening on port 5432. The database to which the connection is made is named foreign_db on the remote server: @@ -626,7 +626,7 @@ CREATE SERVER foreign_server - A user mapping, defined with , is + A user mapping, defined with , is needed as well to identify the role that will be used on the remote server: @@ -639,10 +639,10 @@ CREATE USER MAPPING FOR local_user Now it is possible to create a foreign table with - . In this example we - wish to access the table named some_schema.some_table + . In this example we + wish to access the table named some_schema.some_table on the remote server. The local name for it will - be foreign_table: + be foreign_table: CREATE FOREIGN TABLE foreign_table ( @@ -654,11 +654,11 @@ CREATE FOREIGN TABLE foreign_table ( It's essential that the data types and other properties of the columns - declared in CREATE FOREIGN TABLE match the actual remote table. - Column names must match as well, unless you attach column_name + declared in CREATE FOREIGN TABLE match the actual remote table. + Column names must match as well, unless you attach column_name options to the individual columns to show how they are named in the remote table. - In many cases, use of is + In many cases, use of is preferable to constructing foreign table definitions manually. diff --git a/doc/src/sgml/postgres.sgml b/doc/src/sgml/postgres.sgml index 8a3bfc9b0d..0070603fc3 100644 --- a/doc/src/sgml/postgres.sgml +++ b/doc/src/sgml/postgres.sgml @@ -1,6 +1,8 @@ - %version; @@ -42,11 +44,11 @@ After you have worked through this tutorial you might want to move - on to reading to gain a more formal knowledge - of the SQL language, or for + on to reading to gain a more formal knowledge + of the SQL language, or for information about developing applications for PostgreSQL. Those who set up and - manage their own server should also read . + manage their own server should also read . @@ -80,16 +82,16 @@ chapters individually as they choose. The information in this part is presented in a narrative fashion in topical units. Readers looking for a complete description of a particular command - should see . + should see . Readers of this part should know how to connect to a - PostgreSQL database and issue + PostgreSQL database and issue SQL commands. Readers that are unfamiliar with - these issues are encouraged to read + these issues are encouraged to read first. SQL commands are typically entered - using the PostgreSQL interactive terminal + using the PostgreSQL interactive terminal psql, but other programs that have similar functionality can be used as well. @@ -116,10 +118,10 @@ This part covers topics that are of interest to a - PostgreSQL database administrator. This includes + PostgreSQL database administrator. This includes installation of the software, set up and configuration of the server, management of users and databases, and maintenance tasks. - Anyone who runs a PostgreSQL server, even for + Anyone who runs a PostgreSQL server, even for personal use, but especially in production, should be familiar with the topics covered in this part. @@ -130,7 +132,7 @@ self-contained and can be read individually as desired. The information in this part is presented in a narrative fashion in topical units. Readers looking for a complete description of a - particular command should see . + particular command should see . @@ -139,9 +141,9 @@ up their own server can begin their exploration with this part. The rest of this part is about tuning and management; that material assumes that the reader is familiar with the general use of - the PostgreSQL database system. Readers are - encouraged to look at and for additional information. + the PostgreSQL database system. Readers are + encouraged to look at and for additional information. @@ -161,6 +163,7 @@ &diskusage; &wal; &logical-replication; + &jit; ®ress; @@ -171,13 +174,13 @@ This part describes the client programming interfaces distributed - with PostgreSQL. Each of these chapters can be + with PostgreSQL. Each of these chapters can be read independently. Note that there are many other programming interfaces for client programs that are distributed separately and - contain their own documentation ( + contain their own documentation ( lists some of the more popular ones). Readers of this part should be familiar with using SQL commands to manipulate - and query the database (see ) and of course + and query the database (see ) and of course with the programming language that the interface uses. @@ -197,13 +200,13 @@ This part is about extending the server functionality with user-defined functions, data types, triggers, etc. These are advanced topics which should probably be approached only after all - the other user documentation about PostgreSQL has + the other user documentation about PostgreSQL has been understood. Later chapters in this part describe the server-side programming languages available in the PostgreSQL distribution as well as general issues concerning server-side programming languages. It is essential to read at least the earlier sections of (covering functions) before diving into the + linkend="extend"/> (covering functions) before diving into the material about server-side programming languages. @@ -234,7 +237,7 @@ This part contains assorted information that might be of use to - PostgreSQL developers. + PostgreSQL developers. @@ -250,6 +253,7 @@ &geqo; &indexam; &generic-wal; + &btree; &gist; &spgist; &gin; diff --git a/doc/src/sgml/problems.sgml b/doc/src/sgml/problems.sgml index 6bf74bb399..eced8dfdf2 100644 --- a/doc/src/sgml/problems.sgml +++ b/doc/src/sgml/problems.sgml @@ -145,7 +145,7 @@ - If your application uses some other client interface, such as PHP, then + If your application uses some other client interface, such as PHP, then please try to isolate the offending queries. We will probably not set up a web server to reproduce your problem. In any case remember to provide the exact input files; do not guess that the problem happens for @@ -167,10 +167,10 @@ If you are reporting an error message, please obtain the most verbose - form of the message. In psql, say \set - VERBOSITY verbose beforehand. If you are extracting the message + form of the message. In psql, say \set + VERBOSITY verbose beforehand. If you are extracting the message from the server log, set the run-time parameter - to verbose so that all + to verbose so that all details are logged. @@ -236,9 +236,9 @@ If your version is older than &version; we will almost certainly tell you to upgrade. There are many bug fixes and improvements in each new release, so it is quite possible that a bug you have - encountered in an older release of PostgreSQL + encountered in an older release of PostgreSQL has already been fixed. We can only provide limited support for - sites using older releases of PostgreSQL; if you + sites using older releases of PostgreSQL; if you require more than we can provide, consider acquiring a commercial support contract. @@ -264,7 +264,7 @@ It is better to report everything the first time than us having to squeeze the facts out of you. On the other hand, if your input files are huge, it is fair to ask first whether somebody is interested in looking into it. Here is - an article + an article that outlines some more tips on reporting bugs. @@ -283,8 +283,8 @@ are specifically talking about the backend process, mention that, do not just say PostgreSQL crashes. A crash of a single backend process is quite different from crash of the parent - postgres process; please don't say the server - crashed when you mean a single backend process went down, nor vice versa. + postgres process; please don't say the server + crashed when you mean a single backend process went down, nor vice versa. Also, client programs such as the interactive frontend psql are completely separate from the backend. Please try to be specific about whether the problem is on the client or server side. @@ -356,10 +356,10 @@ subscribed to a list to be allowed to post on it. (You need not be subscribed to use the bug-report web form, however.) If you would like to send mail but do not want to receive list traffic, - you can subscribe and set your subscription option to nomail. + you can subscribe and set your subscription option to nomail. For more information send mail to majordomo@postgresql.org - with the single word help in the body of the message. + with the single word help in the body of the message. diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml index c8b083c29c..f0b2145208 100644 --- a/doc/src/sgml/protocol.sgml +++ b/doc/src/sgml/protocol.sgml @@ -22,20 +22,28 @@ PostgreSQL 7.4 and later. For descriptions of the earlier protocol versions, see previous releases of the PostgreSQL documentation. A single server - can support multiple protocol versions. The initial - startup-request message tells the server which protocol version the - client is attempting to use, and then the server follows that protocol - if it is able. + can support multiple protocol versions. The initial startup-request + message tells the server which protocol version the client is attempting to + use. If the major version requested by the client is not supported by + the server, the connection will be rejected (for example, this would occur + if the client requested protocol version 4.0, which does not exist as of + this writing). If the minor version requested by the client is not + supported by the server (e.g. the client requests version 3.1, but the + server supports only 3.0), the server may either reject the connection or + may respond with a NegotiateProtocolVersion message containing the highest + minor protocol version which it supports. The client may then choose either + to continue with the connection using the specified protocol version or + to abort the connection. In order to serve multiple clients efficiently, the server launches - a new backend process for each client. + a new backend process for each client. In the current implementation, a new child process is created immediately after an incoming connection is detected. This is transparent to the protocol, however. For purposes of the - protocol, the terms backend and server are - interchangeable; likewise frontend and client + protocol, the terms backend and server are + interchangeable; likewise frontend and client are interchangeable. @@ -56,7 +64,7 @@ During normal operation, the frontend sends queries and other commands to the backend, and the backend sends back query results - and other responses. There are a few cases (such as NOTIFY) + and other responses. There are a few cases (such as NOTIFY) wherein the backend will send unsolicited messages, but for the most part this portion of a session is driven by frontend requests. @@ -71,9 +79,9 @@ Within normal operation, SQL commands can be executed through either of - two sub-protocols. In the simple query protocol, the frontend + two sub-protocols. In the simple query protocol, the frontend just sends a textual query string, which is parsed and immediately - executed by the backend. In the extended query protocol, + executed by the backend. In the extended query protocol, processing of queries is separated into multiple steps: parsing, binding of parameter values, and execution. This offers flexibility and performance benefits, at the cost of extra complexity. @@ -81,7 +89,7 @@ Normal operation has additional sub-protocols for special operations - such as COPY. + such as COPY. @@ -123,24 +131,24 @@ In the extended-query protocol, execution of SQL commands is divided into multiple steps. The state retained between steps is represented - by two types of objects: prepared statements and - portals. A prepared statement represents the result of + by two types of objects: prepared statements and + portals. A prepared statement represents the result of parsing and semantic analysis of a textual query string. A prepared statement is not in itself ready to execute, because it might - lack specific values for parameters. A portal represents + lack specific values for parameters. A portal represents a ready-to-execute or already-partially-executed statement, with any - missing parameter values filled in. (For SELECT statements, + missing parameter values filled in. (For SELECT statements, a portal is equivalent to an open cursor, but we choose to use a different - term since cursors don't handle non-SELECT statements.) + term since cursors don't handle non-SELECT statements.) - The overall execution cycle consists of a parse step, + The overall execution cycle consists of a parse step, which creates a prepared statement from a textual query string; a - bind step, which creates a portal given a prepared + bind step, which creates a portal given a prepared statement and values for any needed parameters; and an - execute step that runs a portal's query. In the case of - a query that returns rows (SELECT, SHOW, etc), + execute step that runs a portal's query. In the case of + a query that returns rows (SELECT, SHOW, etc), the execute step can be told to fetch only a limited number of rows, so that multiple execute steps might be needed to complete the operation. @@ -151,7 +159,7 @@ (but note that these exist only within a session, and are never shared across sessions). Existing prepared statements and portals are referenced by names assigned when they were created. In addition, - an unnamed prepared statement and portal exist. Although these + an unnamed prepared statement and portal exist. Although these behave largely the same as named objects, operations on them are optimized for the case of executing a query only once and then discarding it, whereas operations on named objects are optimized on the expectation @@ -164,10 +172,10 @@ Data of a particular data type might be transmitted in any of several - different formats. As of PostgreSQL 7.4 - the only supported formats are text and binary, + different formats. As of PostgreSQL 7.4 + the only supported formats are text and binary, but the protocol makes provision for future extensions. The desired - format for any value is specified by a format code. + format for any value is specified by a format code. Clients can specify a format code for each transmitted parameter value and for each column of a query result. Text has format code zero, binary has format code one, and all other format codes are reserved @@ -199,7 +207,7 @@ This section describes the message flow and the semantics of each message type. (Details of the exact representation of each message - appear in .) There are + appear in .) There are several different sub-protocols depending on the state of the connection: start-up, query, function call, COPY, and termination. There are also special @@ -300,8 +308,8 @@ password, the server responds with an AuthenticationOk, otherwise it responds with an ErrorResponse. The actual PasswordMessage can be computed in SQL as concat('md5', - md5(concat(md5(concat(password, username)), random-salt))). - (Keep in mind the md5() function returns its + md5(concat(md5(concat(password, username)), random-salt))). + (Keep in mind the md5() function returns its result as a hex string.) @@ -375,7 +383,7 @@ SASLInitialResponse with the name of the selected mechanism, and the first part of the SASL data stream in response to this. If further messages are needed, the server will respond with - AuthenticationSASLContinue. See + AuthenticationSASLContinue. See for details. @@ -406,6 +414,21 @@ + + NegotiateProtocolVersion + + + The server does not support the minor protocol version requested + by the client, but does support an earlier version of the protocol; + this message indicates the highest supported minor version. This + message will also be sent if the client requested unsupported protocol + options (i.e. beginning with _pq_.) in the + startup packet. This message will be followed by an ErrorResponse or + a message indicating the success or failure of authentication. + + + + @@ -420,8 +443,10 @@ for further messages from the server. In this phase a backend process is being started, and the frontend is just an interested bystander. It is still possible for the startup attempt - to fail (ErrorResponse), but in the normal case the backend will send - some ParameterStatus messages, BackendKeyData, and finally ReadyForQuery. + to fail (ErrorResponse) or the server to decline support for the requested + minor protocol version (NegotiateProtocolVersion), but in the normal case + the backend will send some ParameterStatus messages, BackendKeyData, and + finally ReadyForQuery. @@ -453,9 +478,9 @@ This message informs the frontend about the current (initial) setting of backend parameters, such as or . + linkend="guc-client-encoding"/> or . The frontend can ignore this message, or record the settings - for its future use; see for + for its future use; see for more details. The frontend should not respond to this message, but should continue listening for a ReadyForQuery message. @@ -539,7 +564,7 @@ The backend is ready to copy data from the frontend to a - table; see . + table; see . @@ -549,7 +574,7 @@ The backend is ready to copy data from a table to the - frontend; see . + frontend; see . @@ -624,12 +649,12 @@ - The response to a SELECT query (or other queries that - return row sets, such as EXPLAIN or SHOW) + The response to a SELECT query (or other queries that + return row sets, such as EXPLAIN or SHOW) normally consists of RowDescription, zero or more DataRow messages, and then CommandComplete. - COPY to or from the frontend invokes special protocol - as described in . + COPY to or from the frontend invokes special protocol + as described in . All other query types normally produce only a CommandComplete message. @@ -657,8 +682,8 @@ In simple Query mode, the format of retrieved values is always text, - except when the given command is a FETCH from a cursor - declared with the BINARY option. In that case, the + except when the given command is a FETCH from a cursor + declared with the BINARY option. In that case, the retrieved values are in binary format. The format codes given in the RowDescription message tell which format is being used. @@ -666,7 +691,7 @@ A frontend must be prepared to accept ErrorResponse and NoticeResponse messages whenever it is expecting any other type of - message. See also concerning messages + message. See also concerning messages that the backend might generate due to outside events. @@ -675,6 +700,125 @@ that will accept any message type at any time that it could make sense, rather than wiring in assumptions about the exact sequence of messages. + + + Multiple Statements in a Simple Query + + + When a simple Query message contains more than one SQL statement + (separated by semicolons), those statements are executed as a single + transaction, unless explicit transaction control commands are included + to force a different behavior. For example, if the message contains + +INSERT INTO mytable VALUES(1); +SELECT 1/0; +INSERT INTO mytable VALUES(2); + + then the divide-by-zero failure in the SELECT will force + rollback of the first INSERT. Furthermore, because + execution of the message is abandoned at the first error, the second + INSERT is never attempted at all. + + + + If instead the message contains + +BEGIN; +INSERT INTO mytable VALUES(1); +COMMIT; +INSERT INTO mytable VALUES(2); +SELECT 1/0; + + then the first INSERT is committed by the + explicit COMMIT command. The second INSERT + and the SELECT are still treated as a single transaction, + so that the divide-by-zero failure will roll back the + second INSERT, but not the first one. + + + + This behavior is implemented by running the statements in a + multi-statement Query message in an implicit transaction + block unless there is some explicit transaction block for them to + run in. The main difference between an implicit transaction block and + a regular one is that an implicit block is closed automatically at the + end of the Query message, either by an implicit commit if there was no + error, or an implicit rollback if there was an error. This is similar + to the implicit commit or rollback that happens for a statement + executed by itself (when not in a transaction block). + + + + If the session is already in a transaction block, as a result of + a BEGIN in some previous message, then the Query message + simply continues that transaction block, whether the message contains + one statement or several. However, if the Query message contains + a COMMIT or ROLLBACK closing the existing + transaction block, then any following statements are executed in an + implicit transaction block. + Conversely, if a BEGIN appears in a multi-statement Query + message, then it starts a regular transaction block that will only be + terminated by an explicit COMMIT or ROLLBACK, + whether that appears in this Query message or a later one. + If the BEGIN follows some statements that were executed as + an implicit transaction block, those statements are not immediately + committed; in effect, they are retroactively included into the new + regular transaction block. + + + + A COMMIT or ROLLBACK appearing in an implicit + transaction block is executed as normal, closing the implicit block; + however, a warning will be issued since a COMMIT + or ROLLBACK without a previous BEGIN might + represent a mistake. If more statements follow, a new implicit + transaction block will be started for them. + + + + Savepoints are not allowed in an implicit transaction block, since + they would conflict with the behavior of automatically closing the + block upon any error. + + + + Remember that, regardless of any transaction control commands that may + be present, execution of the Query message stops at the first error. + Thus for example given + +BEGIN; +SELECT 1/0; +ROLLBACK; + + in a single Query message, the session will be left inside a failed + regular transaction block, since the ROLLBACK is not + reached after the divide-by-zero error. Another ROLLBACK + will be needed to restore the session to a usable state. + + + + Another behavior of note is that initial lexical and syntactic + analysis is done on the entire query string before any of it is + executed. Thus simple errors (such as a misspelled keyword) in later + statements can prevent execution of any of the statements. This + is normally invisible to users since the statements would all roll + back anyway when done as an implicit transaction block. However, + it can be visible when attempting to do multiple transactions within a + multi-statement Query. For instance, if a typo turned our previous + example into + +BEGIN; +INSERT INTO mytable VALUES(1); +COMMIT; +INSERT INTO mytable VALUES(2); +SELCT 1/0; + + then none of the statements would get run, resulting in the visible + difference that the first INSERT is not committed. + Errors detected at semantic analysis or later, such as a misspelled + table or column name, do not have this effect. + + @@ -705,17 +849,17 @@ A parameter data type can be left unspecified by setting it to zero, or by making the array of parameter type OIDs shorter than the - number of parameter symbols ($n) + number of parameter symbols ($n) used in the query string. Another special case is that a parameter's - type can be specified as void (that is, the OID of the - void pseudo-type). This is meant to allow parameter symbols + type can be specified as void (that is, the OID of the + void pseudo-type). This is meant to allow parameter symbols to be used for function parameters that are actually OUT parameters. - Ordinarily there is no context in which a void parameter + Ordinarily there is no context in which a void parameter could be used, but if such a parameter symbol appears in a function's parameter list, it is effectively ignored. For example, a function - call such as foo($1,$2,$3,$4) could match a function with - two IN and two OUT arguments, if $3 and $4 - are specified as having type void. + call such as foo($1,$2,$3,$4) could match a function with + two IN and two OUT arguments, if $3 and $4 + are specified as having type void. @@ -739,7 +883,7 @@ statements must be explicitly closed before they can be redefined by another Parse message, but this is not required for the unnamed statement. Named prepared statements can also be created and accessed at the SQL - command level, using PREPARE and EXECUTE. + command level, using PREPARE and EXECUTE. @@ -750,7 +894,7 @@ the values to use for any parameter placeholders present in the prepared statement. The supplied parameter set must match those needed by the prepared statement. - (If you declared any void parameters in the Parse message, + (If you declared any void parameters in the Parse message, pass NULL values for them in the Bind message.) Bind also specifies the format to use for any data returned by the query; the format can be specified overall, or per-column. @@ -761,7 +905,7 @@ The choice between text and binary output is determined by the format codes given in Bind, regardless of the SQL command involved. The - BINARY attribute in cursor declarations is irrelevant when + BINARY attribute in cursor declarations is irrelevant when using extended query protocol. @@ -785,14 +929,14 @@ portals must be explicitly closed before they can be redefined by another Bind message, but this is not required for the unnamed portal. Named portals can also be created and accessed at the SQL - command level, using DECLARE CURSOR and FETCH. + command level, using DECLARE CURSOR and FETCH. Once a portal exists, it can be executed using an Execute message. The Execute message specifies the portal name (empty string denotes the unnamed portal) and - a maximum result-row count (zero meaning fetch all rows). + a maximum result-row count (zero meaning fetch all rows). The result-row count is only meaningful for portals containing commands that return row sets; in other cases the command is always executed to completion, and the row count is ignored. @@ -819,7 +963,7 @@ At completion of each series of extended-query messages, the frontend should issue a Sync message. This parameterless message causes the backend to close the current transaction if it's not inside a - BEGIN/COMMIT transaction block (close + BEGIN/COMMIT transaction block (close meaning to commit if no error, or roll back if error). Then a ReadyForQuery response is issued. The purpose of Sync is to provide a resynchronization point for error recovery. When an error is detected @@ -827,13 +971,13 @@ ErrorResponse, then reads and discards messages until a Sync is reached, then issues ReadyForQuery and returns to normal message processing. (But note that no skipping occurs if an error is detected - while processing Sync — this ensures that there is one + while processing Sync — this ensures that there is one and only one ReadyForQuery sent for each Sync.) - Sync does not cause a transaction block opened with BEGIN + Sync does not cause a transaction block opened with BEGIN to be closed. It is possible to detect this situation since the ReadyForQuery message includes transaction status information. @@ -920,7 +1064,7 @@ The Function Call sub-protocol is a legacy feature that is probably best avoided in new code. Similar results can be accomplished by setting up - a prepared statement that does SELECT function($1, ...). + a prepared statement that does SELECT function($1, ...). The Function Call cycle can then be replaced with Bind/Execute. @@ -988,7 +1132,7 @@ COPY Operations - The COPY command allows high-speed bulk data transfer + The COPY command allows high-speed bulk data transfer to or from the server. Copy-in and copy-out operations each switch the connection into a distinct sub-protocol, which lasts until the operation is completed. @@ -996,16 +1140,16 @@ Copy-in mode (data transfer to the server) is initiated when the - backend executes a COPY FROM STDIN SQL statement. The backend + backend executes a COPY FROM STDIN SQL statement. The backend sends a CopyInResponse message to the frontend. The frontend should then send zero or more CopyData messages, forming a stream of input data. (The message boundaries are not required to have anything to do with row boundaries, although that is often a reasonable choice.) The frontend can terminate the copy-in mode by sending either a CopyDone message (allowing successful termination) or a CopyFail message (which - will cause the COPY SQL statement to fail with an + will cause the COPY SQL statement to fail with an error). The backend then reverts to the command-processing mode it was - in before the COPY started, which will be either simple or + in before the COPY started, which will be either simple or extended query protocol. It will next send either CommandComplete (if successful) or ErrorResponse (if not). @@ -1013,10 +1157,10 @@ In the event of a backend-detected error during copy-in mode (including receipt of a CopyFail message), the backend will issue an ErrorResponse - message. If the COPY command was issued via an extended-query + message. If the COPY command was issued via an extended-query message, the backend will now discard frontend messages until a Sync message is received, then it will issue ReadyForQuery and return to normal - processing. If the COPY command was issued in a simple + processing. If the COPY command was issued in a simple Query message, the rest of that message is discarded and ReadyForQuery is issued. In either case, any subsequent CopyData, CopyDone, or CopyFail messages issued by the frontend will simply be dropped. @@ -1028,16 +1172,16 @@ that will abort the copy-in state as described above. (The exception for Flush and Sync is for the convenience of client libraries that always send Flush or Sync after an Execute message, without checking whether - the command to be executed is a COPY FROM STDIN.) + the command to be executed is a COPY FROM STDIN.) Copy-out mode (data transfer from the server) is initiated when the - backend executes a COPY TO STDOUT SQL statement. The backend + backend executes a COPY TO STDOUT SQL statement. The backend sends a CopyOutResponse message to the frontend, followed by zero or more CopyData messages (always one per row), followed by CopyDone. The backend then reverts to the command-processing mode it was - in before the COPY started, and sends CommandComplete. + in before the COPY started, and sends CommandComplete. The frontend cannot abort the transfer (except by closing the connection or issuing a Cancel request), but it can discard unwanted CopyData and CopyDone messages. @@ -1054,13 +1198,13 @@ It is possible for NoticeResponse and ParameterStatus messages to be interspersed between CopyData messages; frontends must handle these cases, and should be prepared for other asynchronous message types as well (see - ). Otherwise, any message type other than + ). Otherwise, any message type other than CopyData or CopyDone may be treated as terminating copy-out mode. There is another Copy-related mode called copy-both, which allows - high-speed bulk data transfer to and from the server. + high-speed bulk data transfer to and from the server. Copy-both mode is initiated when a backend in walsender mode executes a START_REPLICATION statement. The backend sends a CopyBothResponse message to the frontend. Both @@ -1077,7 +1221,7 @@ until a Sync message is received, and then issue ReadyForQuery and return to normal processing. The frontend should treat receipt of ErrorResponse as terminating the copy in both directions; no CopyDone should be sent - in this case. See for more + in this case. See for more information on the subprotocol transmitted over copy-both mode. @@ -1085,7 +1229,7 @@ The CopyInResponse, CopyOutResponse and CopyBothResponse messages include fields that inform the frontend of the number of columns per row and the format codes being used for each column. (As of - the present implementation, all columns in a given COPY + the present implementation, all columns in a given COPY operation will use the same format, but the message design does not assume this.) @@ -1107,7 +1251,7 @@ It is possible for NoticeResponse messages to be generated due to outside activity; for example, if the database administrator commands - a fast database shutdown, the backend will send a NoticeResponse + a fast database shutdown, the backend will send a NoticeResponse indicating this fact before closing the connection. Accordingly, frontends should always be prepared to accept and display NoticeResponse messages, even when the connection is nominally idle. @@ -1117,7 +1261,7 @@ ParameterStatus messages will be generated whenever the active value changes for any of the parameters the backend believes the frontend should know about. Most commonly this occurs in response - to a SET SQL command executed by the frontend, and + to a SET SQL command executed by the frontend, and this case is effectively synchronous — but it is also possible for parameter status changes to occur because the administrator changed a configuration file and then sent the @@ -1130,27 +1274,27 @@ At present there is a hard-wired set of parameters for which ParameterStatus will be generated: they are - server_version, - server_encoding, - client_encoding, - application_name, - is_superuser, - session_authorization, - DateStyle, - IntervalStyle, - TimeZone, - integer_datetimes, and - standard_conforming_strings. - (server_encoding, TimeZone, and - integer_datetimes were not reported by releases before 8.0; - standard_conforming_strings was not reported by releases + server_version, + server_encoding, + client_encoding, + application_name, + is_superuser, + session_authorization, + DateStyle, + IntervalStyle, + TimeZone, + integer_datetimes, and + standard_conforming_strings. + (server_encoding, TimeZone, and + integer_datetimes were not reported by releases before 8.0; + standard_conforming_strings was not reported by releases before 8.1; - IntervalStyle was not reported by releases before 8.4; - application_name was not reported by releases before 9.0.) + IntervalStyle was not reported by releases before 8.4; + application_name was not reported by releases before 9.0.) Note that - server_version, - server_encoding and - integer_datetimes + server_version, + server_encoding and + integer_datetimes are pseudo-parameters that cannot change after startup. This set might change in the future, or even become configurable. Accordingly, a frontend should simply ignore ParameterStatus for @@ -1275,7 +1419,7 @@ frontend disconnects while a non-SELECT query is being processed, the backend will probably finish the query before noticing the disconnection. If the query is outside any - transaction block (BEGIN ... COMMIT + transaction block (BEGIN ... COMMIT sequence) then its results might be committed before the disconnection is recognized. @@ -1285,30 +1429,30 @@ <acronym>SSL</acronym> Session Encryption - If PostgreSQL was built with + If PostgreSQL was built with SSL support, frontend/backend communications can be encrypted using SSL. This provides communication security in environments where attackers might be able to capture the session traffic. For more information on encrypting PostgreSQL sessions with - SSL, see . + SSL, see . To initiate an SSL-encrypted connection, the frontend initially sends an SSLRequest message rather than a StartupMessage. The server then responds with a single byte - containing S or N, indicating that it is + containing S or N, indicating that it is willing or unwilling to perform SSL, respectively. The frontend might close the connection at this point if it is dissatisfied with the response. To continue after - S, perform an SSL startup handshake + S, perform an SSL startup handshake (not described here, part of the SSL specification) with the server. If this is successful, continue with sending the usual StartupMessage. In this case the StartupMessage and all subsequent data will be SSL-encrypted. To continue after - N, send the usual StartupMessage and proceed without + N, send the usual StartupMessage and proceed without encryption. @@ -1316,7 +1460,7 @@ The frontend should also be prepared to handle an ErrorMessage response to SSLRequest from the server. This would only occur if the server predates the addition of SSL support - to PostgreSQL. (Such servers are now very ancient, + to PostgreSQL. (Such servers are now very ancient, and likely do not exist in the wild anymore.) In this case the connection must be closed, but the frontend might choose to open a fresh connection @@ -1341,11 +1485,12 @@ SASL Authentication -SASL is a framework for authentication in connection-oriented -protocols. At the moment, PostgreSQL implements only one SASL -authentication mechanism, SCRAM-SHA-256, but more might be added in the -future. The below steps illustrate how SASL authentication is performed in -general, while the next subsection gives more details on SCRAM-SHA-256. +SASL is a framework for authentication in connection-oriented +protocols. At the moment, PostgreSQL implements two SASL +authentication mechanisms, SCRAM-SHA-256 and SCRAM-SHA-256-PLUS. More +might be added in the future. The below steps illustrate how SASL +authentication is performed in general, while the next subsection gives +more details on SCRAM-SHA-256 and SCRAM-SHA-256-PLUS. @@ -1395,30 +1540,29 @@ On error, the server can abort the authentication at any stage, and send an ErrorMessage. - + SCRAM-SHA-256 authentication - SCRAM-SHA-256 (called just SCRAM from now on) is - the only implemented SASL mechanism, at the moment. It is described in detail - in RFC 7677 and RFC 5802. + The implemented SASL mechanisms at the moment + are SCRAM-SHA-256 and its variant with channel + binding SCRAM-SHA-256-PLUS. They are described in + detail in RFC 7677 and RFC 5802. -When SCRAM-SHA-256 is used in PostgreSQL, the server will ignore the username -that the client sends in the client-first-message. The username +When SCRAM-SHA-256 is used in PostgreSQL, the server will ignore the user name +that the client sends in the client-first-message. The user name that was already sent in the startup message is used instead. -PostgreSQL supports multiple character encodings, while SCRAM -dictates UTF-8 to be used for the username, so it might be impossible to -represent the PostgreSQL username in UTF-8. To avoid confusion, the client -should use pg_same_as_startup_message as the username in the -client-first-message. +PostgreSQL supports multiple character encodings, while SCRAM +dictates UTF-8 to be used for the user name, so it might be impossible to +represent the PostgreSQL user name in UTF-8. The SCRAM specification dictates that the password is also in UTF-8, and is -processed with the SASLprep algorithm. -PostgreSQL, however, does not require UTF-8 to be used for +processed with the SASLprep algorithm. +PostgreSQL, however, does not require UTF-8 to be used for the password. When a user's password is set, it is processed with SASLprep as if it was in UTF-8, regardless of the actual encoding used. However, if it is not a legal UTF-8 byte sequence, or it contains UTF-8 byte sequences @@ -1430,7 +1574,29 @@ the password is in. -Channel binding has not been implemented yet. +Channel binding is supported in PostgreSQL builds with +SSL support. The SASL mechanism name for SCRAM with channel binding is +SCRAM-SHA-256-PLUS. The channel binding type used by +PostgreSQL is tls-server-end-point. + + + + In SCRAM without channel binding, the server chooses + a random number that is transmitted to the client to be mixed with the + user-supplied password in the transmitted password hash. While this + prevents the password hash from being successfully retransmitted in + a later session, it does not prevent a fake server between the real + server and client from passing through the server's random value + and successfully authenticating. + + + + SCRAM with channel binding prevents such + man-in-the-middle attacks by mixing the signature of the server's + certificate into the transmitted password hash. While a fake server can + retransmit the real server's certificate, it doesn't have access to the + private key matching that certificate, and therefore cannot prove it is + the owner, causing SSL connection failure. @@ -1439,32 +1605,39 @@ the password is in. The server sends an AuthenticationSASL message. It includes a list of SASL authentication mechanisms that the server can accept. + This will be SCRAM-SHA-256-PLUS + and SCRAM-SHA-256 if the server is built with SSL + support, or else just the latter. The client responds by sending a SASLInitialResponse message, which - indicates the chosen mechanism, SCRAM-SHA-256. In the Initial - Client response field, the message contains the SCRAM - client-first-message. + indicates the chosen mechanism, SCRAM-SHA-256 or + SCRAM-SHA-256-PLUS. (A client is free to choose either + mechanism, but for better security it should choose the channel-binding + variant if it can support it.) In the Initial Client response field, the + message contains the SCRAM client-first-message. + The client-first-message also contains the channel + binding type chosen by the client. Server sends an AuthenticationSASLContinue message, with a SCRAM - server-first message as the content. + server-first message as the content. Client sends a SASLResponse message, with SCRAM - client-final-message as the content. + client-final-message as the content. Server sends an AuthenticationSASLFinal message, with the SCRAM - server-final-message, followed immediately by + server-final-message, followed immediately by an AuthenticationOk message. @@ -1477,16 +1650,27 @@ the password is in. To initiate streaming replication, the frontend sends the -replication parameter in the startup message. A Boolean value -of true tells the backend to go into walsender mode, wherein a -small set of replication commands can be issued instead of SQL statements. Only -the simple query protocol can be used in walsender mode. -Replication commands are logged in the server log when - is enabled. -Passing database as the value instructs walsender to connect to -the database specified in the dbname parameter, which will allow -the connection to be used for logical replication from that database. +replication parameter in the startup message. A Boolean +value of true (or on, +yes, 1) tells the backend to go into +physical replication walsender mode, wherein a small set of replication +commands, shown below, can be issued instead of SQL statements. + + +Passing database as the value for the +replication parameter instructs the backend to go into +logical replication walsender mode, connecting to the database specified in +the dbname parameter. In logical replication walsender +mode, the replication commands shown below as well as normal SQL commands can +be issued. + + + +In either physical replication or logical replication walsender mode, only the +simple query protocol can be used. + + For the purpose of testing replication commands, you can make a replication connection via psql or any other libpq-using @@ -1496,12 +1680,17 @@ the connection to be used for logical replication from that database. psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;" However, it is often more useful to use - (for physical replication) or - (for logical replication). + (for physical replication) or + (for logical replication). -The commands accepted in walsender mode are: +Replication commands are logged in the server log when + is enabled. + + + +The commands accepted in replication mode are: IDENTIFY_SYSTEM @@ -1575,16 +1764,16 @@ The commands accepted in walsender mode are: Requests the server to send the current setting of a run-time parameter. - This is similar to the SQL command . + This is similar to the SQL command . - name + name The name of a run-time parameter. Available parameters are documented - in . + in . @@ -1611,7 +1800,7 @@ The commands accepted in walsender mode are: - File name of the timeline history file, e.g., 00000002.history. + File name of the timeline history file, e.g., 00000002.history. @@ -1633,38 +1822,38 @@ The commands accepted in walsender mode are: - CREATE_REPLICATION_SLOT slot_name [ TEMPORARY ] { PHYSICAL [ RESERVE_WAL ] | LOGICAL output_plugin [ EXPORT_SNAPSHOT | NOEXPORT_SNAPSHOT | USE_SNAPSHOT ] } + CREATE_REPLICATION_SLOT slot_name [ TEMPORARY ] { PHYSICAL [ RESERVE_WAL ] | LOGICAL output_plugin [ EXPORT_SNAPSHOT | NOEXPORT_SNAPSHOT | USE_SNAPSHOT ] } CREATE_REPLICATION_SLOT Create a physical or logical replication - slot. See for more about + slot. See for more about replication slots. - slot_name + slot_name The name of the slot to create. Must be a valid replication slot - name (see ). + name (see ). - output_plugin + output_plugin The name of the output plugin used for logical decoding - (see ). + (see ). - TEMPORARY + TEMPORARY Specify that this replication slot is a temporary one. Temporary @@ -1675,30 +1864,30 @@ The commands accepted in walsender mode are: - RESERVE_WAL + RESERVE_WAL - Specify that this physical replication slot reserves WAL - immediately. Otherwise, WAL is only reserved upon + Specify that this physical replication slot reserves WAL + immediately. Otherwise, WAL is only reserved upon connection from a streaming replication client. - EXPORT_SNAPSHOT - NOEXPORT_SNAPSHOT - USE_SNAPSHOT + EXPORT_SNAPSHOT + NOEXPORT_SNAPSHOT + USE_SNAPSHOT Decides what to do with the snapshot created during logical slot - initialization. EXPORT_SNAPSHOT, which is the default, + initialization. EXPORT_SNAPSHOT, which is the default, will export the snapshot for use in other sessions. This option can't - be used inside a transaction. USE_SNAPSHOT will use the + be used inside a transaction. USE_SNAPSHOT will use the snapshot for the current transaction executing the command. This option must be used in a transaction, and CREATE_REPLICATION_SLOT must be the first command - run in that transaction. Finally, NOEXPORT_SNAPSHOT will + run in that transaction. Finally, NOEXPORT_SNAPSHOT will just use the snapshot for logical decoding as normal but won't do anything else with it. @@ -1758,15 +1947,15 @@ The commands accepted in walsender mode are: - START_REPLICATION [ SLOT slot_name ] [ PHYSICAL ] XXX/XXX [ TIMELINE tli ] + START_REPLICATION [ SLOT slot_name ] [ PHYSICAL ] XXX/XXX [ TIMELINE tli ] START_REPLICATION Instructs server to start streaming WAL, starting at - WAL location XXX/XXX. + WAL location XXX/XXX. If TIMELINE option is specified, - streaming starts on timeline tli; + streaming starts on timeline tli; otherwise, the server's current timeline is selected. The server can reply with an error, for example if the requested section of WAL has already been recycled. On success, server responds with a CopyBothResponse @@ -1775,9 +1964,9 @@ The commands accepted in walsender mode are: If a slot's name is provided - via slot_name, it will be updated + via slot_name, it will be updated as replication progresses so that the server knows which WAL segments, - and if hot_standby_feedback is on which transactions, + and if hot_standby_feedback is on which transactions, are still needed by the standby. @@ -2111,11 +2300,11 @@ The commands accepted in walsender mode are: - START_REPLICATION SLOT slot_name LOGICAL XXX/XXX [ ( option_name [ option_value ] [, ...] ) ] + START_REPLICATION SLOT slot_name LOGICAL XXX/XXX [ ( option_name [ option_value ] [, ...] ) ] Instructs server to start streaming WAL for logical replication, starting - at WAL location XXX/XXX. The server can + at WAL location XXX/XXX. The server can reply with an error, for example if the requested section of WAL has already been recycled. On success, server responds with a CopyBothResponse message, and then starts to stream WAL to the frontend. @@ -2133,7 +2322,7 @@ The commands accepted in walsender mode are: - SLOT slot_name + SLOT slot_name The name of the slot to stream changes from. This parameter is required, @@ -2144,7 +2333,7 @@ The commands accepted in walsender mode are: - XXX/XXX + XXX/XXX The WAL location to begin streaming at. @@ -2152,7 +2341,7 @@ The commands accepted in walsender mode are: - option_name + option_name The name of an option passed to the slot's logical decoding plugin. @@ -2160,7 +2349,7 @@ The commands accepted in walsender mode are: - option_value + option_value Optional value, in the form of a string constant, associated with the @@ -2173,31 +2362,42 @@ The commands accepted in walsender mode are: - DROP_REPLICATION_SLOT slot_name + + DROP_REPLICATION_SLOT slot_name WAIT DROP_REPLICATION_SLOT - Drops a replication slot, freeing any reserved server-side resources. If - the slot is currently in use by an active connection, this command fails. + Drops a replication slot, freeing any reserved server-side resources. If the slot is a logical slot that was created in a database other than the database the walsender is connected to, this command fails. - slot_name + slot_name The name of the slot to drop. + + + WAIT + + + This option causes the command to wait if the slot is active until + it becomes inactive, instead of the default behavior of raising an + error. + + + - BASE_BACKUP [ LABEL 'label' ] [ PROGRESS ] [ FAST ] [ WAL ] [ NOWAIT ] [ MAX_RATE rate ] [ TABLESPACE_MAP ] + BASE_BACKUP [ LABEL 'label' ] [ PROGRESS ] [ FAST ] [ WAL ] [ NOWAIT ] [ MAX_RATE rate ] [ TABLESPACE_MAP ] [ NOVERIFY_CHECKSUMS ] BASE_BACKUP @@ -2214,13 +2414,13 @@ The commands accepted in walsender mode are: Sets the label of the backup. If none is specified, a backup label of base backup will be used. The quoting rules for the label are the same as a standard SQL string with - turned on. + turned on. - PROGRESS + PROGRESS Request information required to generate a progress report. This will @@ -2237,7 +2437,7 @@ The commands accepted in walsender mode are: - FAST + FAST Request a fast checkpoint. @@ -2271,7 +2471,7 @@ The commands accepted in walsender mode are: - MAX_RATE rate + MAX_RATE rate Limit (throttle) the maximum amount of data transferred from server @@ -2292,7 +2492,18 @@ The commands accepted in walsender mode are: pg_tblspc in a file named tablespace_map. The tablespace map file includes each symbolic link name as it exists in the directory - pg_tblspc/ and the full path of that symbolic link. + pg_tblspc/ and the full path of that symbolic link. + + + + + + NOVERIFY_CHECKSUMS + + + By default, checksums are verified during a base backup if they are + enabled. Specifying NOVERIFY_CHECKSUMS disables + this verification. @@ -2345,9 +2556,9 @@ The commands accepted in walsender mode are: After the second regular result set, one or more CopyResponse results will be sent, one for the main data directory and one for each additional tablespace other - than pg_default and pg_global. The data in + than pg_default and pg_global. The data in the CopyResponse results will be a tar format (following the - ustar interchange format specified in the POSIX 1003.1-2008 + ustar interchange format specified in the POSIX 1003.1-2008 standard) dump of the tablespace contents, except that the two trailing blocks of zeroes specified in the standard are omitted. After the tar data is complete, a final ordinary result set will be sent, @@ -2358,29 +2569,40 @@ The commands accepted in walsender mode are: The tar archive for the data directory and each tablespace will contain all files in the directories, regardless of whether they are - PostgreSQL files or other files added to the same + PostgreSQL files or other files added to the same directory. The only excluded files are: - postmaster.pid + postmaster.pid - postmaster.opts + postmaster.opts + + + + + pg_internal.init (found in multiple directories) Various temporary files and directories created during the operation of the PostgreSQL server, such as any file or directory beginning - with pgsql_tmp. + with pgsql_tmp and temporary relations. + + + + + Unlogged relations, except for the init fork which is required to + recreate the (empty) unlogged relation on recovery. - pg_wal, including subdirectories. If the backup is run + pg_wal, including subdirectories. If the backup is run with WAL files included, a synthesized version of pg_wal will be included, but it will only contain the files necessary for the backup to work, not the rest of the contents. @@ -2388,10 +2610,10 @@ The commands accepted in walsender mode are: - pg_dynshmem, pg_notify, - pg_replslot, pg_serial, - pg_snapshots, pg_stat_tmp, and - pg_subtrans are copied as empty directories (even if + pg_dynshmem, pg_notify, + pg_replslot, pg_serial, + pg_snapshots, pg_stat_tmp, and + pg_subtrans are copied as empty directories (even if they are symbolic links). @@ -2421,7 +2643,7 @@ The commands accepted in walsender mode are: This section describes the logical replication protocol, which is the message flow started by the START_REPLICATION - SLOT slot_name + SLOT slot_name LOGICAL replication command. @@ -2473,7 +2695,7 @@ The commands accepted in walsender mode are: The individual protocol messages are discussed in the following subsections. Individual messages are described in - . + . @@ -3291,7 +3513,7 @@ Bind (F) The number of parameter format codes that follow - (denoted C below). + (denoted C below). This can be zero to indicate that there are no parameters or that the parameters all use the default format (text); or one, in which case the specified format code is applied @@ -3302,7 +3524,7 @@ Bind (F) - Int16[C] + Int16[C] @@ -3360,7 +3582,7 @@ Bind (F) The number of result-column format codes that follow - (denoted R below). + (denoted R below). This can be zero to indicate that there are no result columns or that the result columns should all use the default format (text); @@ -3372,7 +3594,7 @@ Bind (F) - Int16[R] + Int16[R] @@ -3447,7 +3669,7 @@ CancelRequest (F) The cancel request code. The value is chosen to contain - 1234 in the most significant 16 bits, and 5678 in the + 1234 in the most significant 16 bits, and 5678 in the least significant 16 bits. (To avoid confusion, this code must not be the same as any protocol version number.) @@ -3514,8 +3736,8 @@ Close (F) - 'S' to close a prepared statement; or - 'P' to close a portal. + 'S' to close a prepared statement; or + 'P' to close a portal. @@ -3837,7 +4059,7 @@ CopyInResponse (B) characters, etc). 1 indicates the overall copy format is binary (similar to DataRow format). - See + See for more information. @@ -3849,13 +4071,13 @@ CopyInResponse (B) The number of columns in the data to be copied - (denoted N below). + (denoted N below). - Int16[N] + Int16[N] @@ -3911,7 +4133,7 @@ CopyOutResponse (B) is textual (rows separated by newlines, columns separated by separator characters, etc). 1 indicates the overall copy format is binary (similar to DataRow - format). See for more information. + format). See for more information. @@ -3922,13 +4144,13 @@ CopyOutResponse (B) The number of columns in the data to be copied - (denoted N below). + (denoted N below). - Int16[N] + Int16[N] @@ -3984,7 +4206,7 @@ CopyBothResponse (B) is textual (rows separated by newlines, columns separated by separator characters, etc). 1 indicates the overall copy format is binary (similar to DataRow - format). See for more information. + format). See for more information. @@ -3995,13 +4217,13 @@ CopyBothResponse (B) The number of columns in the data to be copied - (denoted N below). + (denoted N below). - Int16[N] + Int16[N] @@ -4124,8 +4346,8 @@ Describe (F) - 'S' to describe a prepared statement; or - 'P' to describe a portal. + 'S' to describe a prepared statement; or + 'P' to describe a portal. @@ -4225,7 +4447,7 @@ ErrorResponse (B) A code identifying the field type; if zero, this is the message terminator and no string follows. The presently defined field types are listed in - . + . Since more field types might be added in future, frontends should silently ignore fields of unrecognized type. @@ -4296,7 +4518,7 @@ Execute (F) Maximum number of rows to return, if portal contains a query that returns rows (ignored otherwise). Zero - denotes no limit. + denotes no limit. @@ -4386,7 +4608,7 @@ FunctionCall (F) The number of argument format codes that follow - (denoted C below). + (denoted C below). This can be zero to indicate that there are no arguments or that the arguments all use the default format (text); or one, in which case the specified format code is applied @@ -4397,7 +4619,7 @@ FunctionCall (F) - Int16[C] + Int16[C] @@ -4571,6 +4793,74 @@ GSSResponse (F) + + +NegotiateProtocolVersion (B) + + + + + + + + Byte1('v') + + + + Identifies the message as a protocol version negotiation + message. + + + + + + Int32 + + + + Length of message contents in bytes, including self. + + + + + + Int32 + + + + Newest minor protocol version supported by the server + for the major protocol version requested by the client. + + + + + + Int32 + + + + Number of protocol options not recognized by the server. + + + + + Then, for protocol option not recognized by the server, there + is the following: + + + + String + + + + The option name. + + + + + + + @@ -4649,7 +4939,7 @@ NoticeResponse (B) A code identifying the field type; if zero, this is the message terminator and no string follows. The presently defined field types are listed in - . + . Since more field types might be added in future, frontends should silently ignore fields of unrecognized type. @@ -4727,7 +5017,7 @@ NotificationResponse (B) - The payload string passed from the notifying process. + The payload string passed from the notifying process. @@ -5133,9 +5423,9 @@ ReadyForQuery (B) Current backend transaction status indicator. - Possible values are 'I' if idle (not in - a transaction block); 'T' if in a transaction - block; or 'E' if in a failed transaction + Possible values are 'I' if idle (not in + a transaction block); 'T' if in a transaction + block; or 'E' if in a failed transaction block (queries will be rejected until block is ended). @@ -5236,7 +5526,7 @@ RowDescription (B) - The data type size (see pg_type.typlen). + The data type size (see pg_type.typlen). Note that negative values denote variable-width types. @@ -5247,7 +5537,7 @@ RowDescription (B) - The type modifier (see pg_attribute.atttypmod). + The type modifier (see pg_attribute.atttypmod). The meaning of the modifier is type-specific. @@ -5274,7 +5564,7 @@ RowDescription (B) -SASLInitialresponse (F) +SASLInitialResponse (F) @@ -5411,7 +5701,7 @@ SSLRequest (F) The SSL request code. The value is chosen to contain - 1234 in the most significant 16 bits, and 5679 in the + 1234 in the most significant 16 bits, and 5679 in the least significant 16 bits. (To avoid confusion, this code must not be the same as any protocol version number.) @@ -5460,7 +5750,7 @@ StartupMessage (F) parameter name and value strings. A zero byte is required as a terminator after the last name/value pair. Parameters can appear in any - order. user is required, others are optional. + order. user is required, others are optional. Each parameter is specified as: @@ -5474,7 +5764,7 @@ StartupMessage (F) - user + user @@ -5485,7 +5775,7 @@ StartupMessage (F) - database + database @@ -5495,7 +5785,7 @@ StartupMessage (F) - options + options @@ -5503,34 +5793,36 @@ StartupMessage (F) deprecated in favor of setting individual run-time parameters.) Spaces within this string are considered to separate arguments, unless escaped with - a backslash (\); write \\ to + a backslash (\); write \\ to represent a literal backslash. - replication + replication Used to connect in streaming replication mode, where a small set of replication commands can be issued instead of SQL statements. Value can be - true, false, or - database, and the default is - false. See - for details. + true, false, or + database, and the default is + false. See + for details. - In addition to the above, any run-time parameter that can be - set at backend start time might be listed. Such settings - will be applied during backend start (after parsing the - command-line arguments if any). The values will act as - session defaults. + In addition to the above, other parameters may be listed. + Parameter names beginning with _pq_. are + reserved for use as protocol extensions, while others are + treated as run-time parameters to be set at backend start + time. Such settings will be applied during backend start + (after parsing the command-line arguments if any) and will + act as session defaults. @@ -5640,15 +5932,15 @@ message. -S +S Severity: the field contents are - ERROR, FATAL, or - PANIC (in an error message), or - WARNING, NOTICE, DEBUG, - INFO, or LOG (in a notice message), + ERROR, FATAL, or + PANIC (in an error message), or + WARNING, NOTICE, DEBUG, + INFO, or LOG (in a notice message), or a localized translation of one of these. Always present. @@ -5656,18 +5948,18 @@ message. -V +V Severity: the field contents are - ERROR, FATAL, or - PANIC (in an error message), or - WARNING, NOTICE, DEBUG, - INFO, or LOG (in a notice message). - This is identical to the S field except + ERROR, FATAL, or + PANIC (in an error message), or + WARNING, NOTICE, DEBUG, + INFO, or LOG (in a notice message). + This is identical to the S field except that the contents are never localized. This is present only in - messages generated by PostgreSQL versions 9.6 + messages generated by PostgreSQL versions 9.6 and later. @@ -5675,19 +5967,19 @@ message. -C +C Code: the SQLSTATE code for the error (see ). Not localizable. Always present. + linkend="errcodes-appendix"/>). Not localizable. Always present. -M +M @@ -5700,7 +5992,7 @@ message. -D +D @@ -5712,7 +6004,7 @@ message. -H +H @@ -5726,7 +6018,7 @@ message. -P +P @@ -5740,21 +6032,21 @@ message. -p +p - Internal position: this is defined the same as the P + Internal position: this is defined the same as the P field, but it is used when the cursor position refers to an internally generated command rather than the one submitted by the client. - The q field will always appear when this field appears. + The q field will always appear when this field appears. -q +q @@ -5766,7 +6058,7 @@ message. -W +W @@ -5780,7 +6072,7 @@ message. -s +s @@ -5792,7 +6084,7 @@ message. -t +t @@ -5805,7 +6097,7 @@ message. -c +c @@ -5818,7 +6110,7 @@ message. -d +d @@ -5831,7 +6123,7 @@ message. -n +n @@ -5846,7 +6138,7 @@ message. -F +F @@ -5858,7 +6150,7 @@ message. -L +L @@ -5870,7 +6162,7 @@ message. -R +R @@ -5885,7 +6177,7 @@ message. The fields for schema name, table name, column name, data type name, and constraint name are supplied only for a limited number of error types; - see . Frontends should not assume that + see . Frontends should not assume that the presence of any of these fields guarantees the presence of another field. Core error sources observe the interrelationships noted above, but user-defined functions may use these fields in other ways. In the same @@ -5910,7 +6202,7 @@ not line breaks. This section describes the detailed format of each logical replication message. These messages are returned either by the replication slot SQL interface or are sent by a walsender. In case of a walsender they are encapsulated inside the replication -protocol WAL messages as described in +protocol WAL messages as described in and generally obey same message flow as physical replication. @@ -6469,7 +6761,7 @@ Delete Identifies the following TupleData message as a old tuple. - This field is is present if the table in which the delete has + This field is present if the table in which the delete has happened has REPLICA IDENTITY set to FULL. @@ -6497,6 +6789,62 @@ Delete + + +Truncate + + + + + + + + Byte1('T') + + + + Identifies the message as a truncate message. + + + + + + Int32 + + + + Number of relations + + + + + + Int8 + + + + Option bits for TRUNCATE: + 1 for CASCADE, 2 for RESTART IDENTITY + + + + + + Int32 + + + + ID of the relation corresponding to the ID in the relation + message. This field is repeated for each relation. + + + + + + + + + @@ -6610,8 +6958,8 @@ developers trying to update existing client libraries to protocol 3.0. The initial startup packet uses a flexible list-of-strings format instead of a fixed format. Notice that session default values for run-time parameters can now be specified directly in the startup packet. (Actually, -you could do that before using the options field, but given the -limited width of options and the lack of any way to quote +you could do that before using the options field, but given the +limited width of options and the lack of any way to quote whitespace in the values, it wasn't a very safe technique.) @@ -6622,7 +6970,7 @@ PasswordMessage now has a type byte. -ErrorResponse and NoticeResponse ('E' and 'N') +ErrorResponse and NoticeResponse ('E' and 'N') messages now contain multiple fields, from which the client code can assemble an error message of the desired level of verbosity. Note that individual fields will typically not end with a newline, whereas the single @@ -6630,7 +6978,7 @@ string sent in the older protocol always did. -The ReadyForQuery ('Z') message includes a transaction status +The ReadyForQuery ('Z') message includes a transaction status indicator. @@ -6643,7 +6991,7 @@ directly tied to the server's internal representation. -There is a new extended query sub-protocol, which adds the frontend +There is a new extended query sub-protocol, which adds the frontend message types Parse, Bind, Execute, Describe, Close, Flush, and Sync, and the backend message types ParseComplete, BindComplete, PortalSuspended, ParameterDescription, NoData, and CloseComplete. Existing clients do not @@ -6654,7 +7002,7 @@ might allow improvements in performance or functionality. COPY data is now encapsulated into CopyData and CopyDone messages. There is a well-defined way to recover from errors during COPY. The special -\. last line is not needed anymore, and is not sent +\. last line is not needed anymore, and is not sent during COPY OUT. (It is still recognized as a terminator during COPY IN, but its use is deprecated and will eventually be removed.) Binary COPY is supported. @@ -6672,31 +7020,31 @@ server data representations. -The backend sends ParameterStatus ('S') messages during connection +The backend sends ParameterStatus ('S') messages during connection startup for all parameters it considers interesting to the client library. Subsequently, a ParameterStatus message is sent whenever the active value changes for any of these parameters. -The RowDescription ('T') message carries new table OID and column +The RowDescription ('T') message carries new table OID and column number fields for each column of the described row. It also shows the format code for each column. -The CursorResponse ('P') message is no longer generated by +The CursorResponse ('P') message is no longer generated by the backend. -The NotificationResponse ('A') message has an additional string -field, which can carry a payload string passed +The NotificationResponse ('A') message has an additional string +field, which can carry a payload string passed from the NOTIFY event sender. -The EmptyQueryResponse ('I') message used to include an empty +The EmptyQueryResponse ('I') message used to include an empty string parameter; this has been removed. diff --git a/doc/src/sgml/queries.sgml b/doc/src/sgml/queries.sgml index 0588da2912..88bc189646 100644 --- a/doc/src/sgml/queries.sgml +++ b/doc/src/sgml/queries.sgml @@ -24,14 +24,14 @@ The process of retrieving or the command to retrieve data from a database is called a query. In SQL the - command is + command is used to specify queries. The general syntax of the SELECT command is WITH with_queries SELECT select_list FROM table_expression sort_specification The following sections describe the details of the select list, the - table expression, and the sort specification. WITH + table expression, and the sort specification. WITH queries are treated last since they are an advanced feature. @@ -51,15 +51,15 @@ SELECT * FROM table1; expression happens to provide. A select list can also select a subset of the available columns or make calculations using the columns. For example, if - table1 has columns named a, - b, and c (and perhaps others) you can make + table1 has columns named a, + b, and c (and perhaps others) you can make the following query: SELECT a, b + c FROM table1; - (assuming that b and c are of a numerical + (assuming that b and c are of a numerical data type). - See for more details. + See for more details. @@ -89,19 +89,19 @@ SELECT random(); A table expression computes a table. The - table expression contains a FROM clause that is - optionally followed by WHERE, GROUP BY, and - HAVING clauses. Trivial table expressions simply refer + table expression contains a FROM clause that is + optionally followed by WHERE, GROUP BY, and + HAVING clauses. Trivial table expressions simply refer to a table on disk, a so-called base table, but more complex expressions can be used to modify or combine base tables in various ways. - The optional WHERE, GROUP BY, and - HAVING clauses in the table expression specify a + The optional WHERE, GROUP BY, and + HAVING clauses in the table expression specify a pipeline of successive transformations performed on the table - derived in the FROM clause. All these transformations + derived in the FROM clause. All these transformations produce a virtual table that provides the rows that are passed to the select list to compute the output rows of the query. @@ -110,7 +110,7 @@ SELECT random(); The <literal>FROM</literal> Clause - The derives a + The derives a table from one or more other tables given in a comma-separated table reference list. @@ -118,14 +118,14 @@ FROM table_reference , table_r A table reference can be a table name (possibly schema-qualified), - or a derived table such as a subquery, a JOIN construct, or + or a derived table such as a subquery, a JOIN construct, or complex combinations of these. If more than one table reference is - listed in the FROM clause, the tables are cross-joined + listed in the FROM clause, the tables are cross-joined (that is, the Cartesian product of their rows is formed; see below). - The result of the FROM list is an intermediate virtual + The result of the FROM list is an intermediate virtual table that can then be subject to - transformations by the WHERE, GROUP BY, - and HAVING clauses and is finally the result of the + transformations by the WHERE, GROUP BY, + and HAVING clauses and is finally the result of the overall table expression. @@ -137,14 +137,14 @@ FROM table_reference , table_r When a table reference names a table that is the parent of a table inheritance hierarchy, the table reference produces rows of not only that table but all of its descendant tables, unless the - key word ONLY precedes the table name. However, the + key word ONLY precedes the table name. However, the reference produces only the columns that appear in the named table — any columns added in subtables are ignored. - Instead of writing ONLY before the table name, you can write - * after the table name to explicitly specify that descendant + Instead of writing ONLY before the table name, you can write + * after the table name to explicitly specify that descendant tables are included. There is no real reason to use this syntax any more, because searching descendant tables is now always the default behavior. However, it is supported for compatibility with older releases. @@ -168,8 +168,8 @@ FROM table_reference , table_r Joins of all types can be chained together, or nested: either or both T1 and T2 can be joined tables. Parentheses - can be used around JOIN clauses to control the join - order. In the absence of parentheses, JOIN clauses + can be used around JOIN clauses to control the join + order. In the absence of parentheses, JOIN clauses nest left-to-right. @@ -215,7 +215,7 @@ FROM table_reference , table_r This latter equivalence does not hold exactly when more than two - tables appear, because JOIN binds more tightly than + tables appear, because JOIN binds more tightly than comma. For example FROM T1 CROSS JOIN T2 INNER JOIN T3 @@ -262,8 +262,8 @@ FROM table_reference , table_r The join condition is specified in the - ON or USING clause, or implicitly by - the word NATURAL. The join condition determines + ON or USING clause, or implicitly by + the word NATURAL. The join condition determines which rows from the two source tables are considered to match, as explained in detail below. @@ -273,7 +273,7 @@ FROM table_reference , table_r - INNER JOIN + INNER JOIN @@ -284,7 +284,7 @@ FROM table_reference , table_r - LEFT OUTER JOIN + LEFT OUTER JOIN join left @@ -307,7 +307,7 @@ FROM table_reference , table_r - RIGHT OUTER JOIN + RIGHT OUTER JOIN join right @@ -330,7 +330,7 @@ FROM table_reference , table_r - FULL OUTER JOIN + FULL OUTER JOIN @@ -347,35 +347,35 @@ FROM table_reference , table_r - The ON clause is the most general kind of join + The ON clause is the most general kind of join condition: it takes a Boolean value expression of the same - kind as is used in a WHERE clause. A pair of rows - from T1 and T2 match if the - ON expression evaluates to true. + kind as is used in a WHERE clause. A pair of rows + from T1 and T2 match if the + ON expression evaluates to true. - The USING clause is a shorthand that allows you to take + The USING clause is a shorthand that allows you to take advantage of the specific situation where both sides of the join use the same name for the joining column(s). It takes a comma-separated list of the shared column names and forms a join condition that includes an equality comparison - for each one. For example, joining T1 - and T2 with USING (a, b) produces - the join condition ON T1.a - = T2.a AND T1.b - = T2.b. + for each one. For example, joining T1 + and T2 with USING (a, b) produces + the join condition ON T1.a + = T2.a AND T1.b + = T2.b. - Furthermore, the output of JOIN USING suppresses + Furthermore, the output of JOIN USING suppresses redundant columns: there is no need to print both of the matched columns, since they must have equal values. While JOIN - ON produces all columns from T1 followed by all - columns from T2, JOIN USING produces one + ON produces all columns from T1 followed by all + columns from T2, JOIN USING produces one output column for each of the listed column pairs (in the listed - order), followed by any remaining columns from T1, - followed by any remaining columns from T2. + order), followed by any remaining columns from T1, + followed by any remaining columns from T2. @@ -386,10 +386,10 @@ FROM table_reference , table_r natural join - Finally, NATURAL is a shorthand form of - USING: it forms a USING list + Finally, NATURAL is a shorthand form of + USING: it forms a USING list consisting of all column names that appear in both - input tables. As with USING, these columns appear + input tables. As with USING, these columns appear only once in the output table. If there are no common column names, NATURAL JOIN behaves like JOIN ... ON TRUE, producing a cross-product join. @@ -399,7 +399,7 @@ FROM table_reference , table_r USING is reasonably safe from column changes in the joined relations since only the listed columns - are combined. NATURAL is considerably more risky since + are combined. NATURAL is considerably more risky since any schema changes to either relation that cause a new matching column name to be present will cause the join to combine that new column as well. @@ -428,7 +428,7 @@ FROM table_reference , table_r then we get the following results for the various joins: -=> SELECT * FROM t1 CROSS JOIN t2; +=> SELECT * FROM t1 CROSS JOIN t2; num | name | num | value -----+------+-----+------- 1 | a | 1 | xxx @@ -442,28 +442,28 @@ FROM table_reference , table_r 3 | c | 5 | zzz (9 rows) -=> SELECT * FROM t1 INNER JOIN t2 ON t1.num = t2.num; +=> SELECT * FROM t1 INNER JOIN t2 ON t1.num = t2.num; num | name | num | value -----+------+-----+------- 1 | a | 1 | xxx 3 | c | 3 | yyy (2 rows) -=> SELECT * FROM t1 INNER JOIN t2 USING (num); +=> SELECT * FROM t1 INNER JOIN t2 USING (num); num | name | value -----+------+------- 1 | a | xxx 3 | c | yyy (2 rows) -=> SELECT * FROM t1 NATURAL INNER JOIN t2; +=> SELECT * FROM t1 NATURAL INNER JOIN t2; num | name | value -----+------+------- 1 | a | xxx 3 | c | yyy (2 rows) -=> SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num; +=> SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num; num | name | num | value -----+------+-----+------- 1 | a | 1 | xxx @@ -471,7 +471,7 @@ FROM table_reference , table_r 3 | c | 3 | yyy (3 rows) -=> SELECT * FROM t1 LEFT JOIN t2 USING (num); +=> SELECT * FROM t1 LEFT JOIN t2 USING (num); num | name | value -----+------+------- 1 | a | xxx @@ -479,7 +479,7 @@ FROM table_reference , table_r 3 | c | yyy (3 rows) -=> SELECT * FROM t1 RIGHT JOIN t2 ON t1.num = t2.num; +=> SELECT * FROM t1 RIGHT JOIN t2 ON t1.num = t2.num; num | name | num | value -----+------+-----+------- 1 | a | 1 | xxx @@ -487,7 +487,7 @@ FROM table_reference , table_r | | 5 | zzz (3 rows) -=> SELECT * FROM t1 FULL JOIN t2 ON t1.num = t2.num; +=> SELECT * FROM t1 FULL JOIN t2 ON t1.num = t2.num; num | name | num | value -----+------+-----+------- 1 | a | 1 | xxx @@ -499,12 +499,12 @@ FROM table_reference , table_r - The join condition specified with ON can also contain + The join condition specified with ON can also contain conditions that do not relate directly to the join. This can prove useful for some queries but needs to be thought out carefully. For example: -=> SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num AND t2.value = 'xxx'; +=> SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num AND t2.value = 'xxx'; num | name | num | value -----+------+-----+------- 1 | a | 1 | xxx @@ -512,19 +512,19 @@ FROM table_reference , table_r 3 | c | | (3 rows) - Notice that placing the restriction in the WHERE clause + Notice that placing the restriction in the WHERE clause produces a different result: -=> SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num WHERE t2.value = 'xxx'; +=> SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num WHERE t2.value = 'xxx'; num | name | num | value -----+------+-----+------- 1 | a | 1 | xxx (1 row) - This is because a restriction placed in the ON - clause is processed before the join, while - a restriction placed in the WHERE clause is processed - after the join. + This is because a restriction placed in the ON + clause is processed before the join, while + a restriction placed in the WHERE clause is processed + after the join. That does not matter with inner joins, but it matters a lot with outer joins. @@ -589,13 +589,13 @@ SELECT * FROM my_table AS m WHERE my_table.a > 5; -- wrong SELECT * FROM people AS mother JOIN people AS child ON mother.id = child.mother_id; Additionally, an alias is required if the table reference is a - subquery (see ). + subquery (see ). Parentheses are used to resolve ambiguities. In the following example, the first statement assigns the alias b to the second - instance of my_table, but the second statement assigns the + instance of my_table, but the second statement assigns the alias to the result of the join: SELECT * FROM my_table AS a CROSS JOIN my_table AS b ... @@ -615,9 +615,9 @@ FROM table_reference AS - When an alias is applied to the output of a JOIN + When an alias is applied to the output of a JOIN clause, the alias hides the original - name(s) within the JOIN. For example: + name(s) within the JOIN. For example: SELECT a.* FROM my_table AS a JOIN your_table AS b ON ... @@ -625,8 +625,8 @@ SELECT a.* FROM my_table AS a JOIN your_table AS b ON ... SELECT a.* FROM (my_table AS a JOIN your_table AS b ON ...) AS c - is not valid; the table alias a is not visible - outside the alias c. + is not valid; the table alias a is not visible + outside the alias c. @@ -640,7 +640,7 @@ SELECT a.* FROM (my_table AS a JOIN your_table AS b ON ...) AS c Subqueries specifying a derived table must be enclosed in parentheses and must be assigned a table - alias name (as in ). For + alias name (as in ). For example: FROM (SELECT * FROM table1) AS alias_name @@ -655,39 +655,39 @@ FROM (SELECT * FROM table1) AS alias_name - A subquery can also be a VALUES list: + A subquery can also be a VALUES list: FROM (VALUES ('anne', 'smith'), ('bob', 'jones'), ('joe', 'blow')) AS names(first, last) Again, a table alias is required. Assigning alias names to the columns - of the VALUES list is optional, but is good practice. - For more information see . + of the VALUES list is optional, but is good practice. + For more information see . Table Functions - table function + table function - function - in the FROM clause + function + in the FROM clause Table functions are functions that produce a set of rows, made up of either base data types (scalar types) or composite data types (table rows). They are used like a table, view, or subquery in - the FROM clause of a query. Columns returned by table - functions can be included in SELECT, - JOIN, or WHERE clauses in the same manner + the FROM clause of a query. Columns returned by table + functions can be included in SELECT, + JOIN, or WHERE clauses in the same manner as columns of a table, view, or subquery. - Table functions may also be combined using the ROWS FROM + Table functions may also be combined using the ROWS FROM syntax, with the results returned in parallel columns; the number of result rows in this case is that of the largest function result, with smaller results padded with null values to match. @@ -704,7 +704,7 @@ ROWS FROM( function_call , ... function result columns. This column numbers the rows of the function result set, starting from 1. (This is a generalization of the SQL-standard syntax for UNNEST ... WITH ORDINALITY.) - By default, the ordinal column is called ordinality, but + By default, the ordinal column is called ordinality, but a different column name can be assigned to it using an AS clause. @@ -713,7 +713,7 @@ ROWS FROM( function_call , ... The special table function UNNEST may be called with any number of array parameters, and it returns a corresponding number of columns, as if UNNEST - () had been called on each parameter + () had been called on each parameter separately and combined using the ROWS FROM construct. @@ -723,7 +723,7 @@ UNNEST( array_expression , ... If no table_alias is specified, the function - name is used as the table name; in the case of a ROWS FROM() + name is used as the table name; in the case of a ROWS FROM() construct, the first function's name is used. @@ -762,7 +762,7 @@ SELECT * FROM vw_getfoo; In some cases it is useful to define table functions that can return different column sets depending on how they are invoked. To support this, the table function can be declared as returning - the pseudo-type record. When such a function is used in + the pseudo-type record. When such a function is used in a query, the expected row structure must be specified in the query itself, so that the system can know how to parse and plan the query. This syntax looks like: @@ -775,16 +775,16 @@ ROWS FROM( ... function_call AS (column_ - When not using the ROWS FROM() syntax, + When not using the ROWS FROM() syntax, the column_definition list replaces the column - alias list that could otherwise be attached to the FROM + alias list that could otherwise be attached to the FROM item; the names in the column definitions serve as column aliases. - When using the ROWS FROM() syntax, + When using the ROWS FROM() syntax, a column_definition list can be attached to each member function separately; or if there is only one member function - and no WITH ORDINALITY clause, + and no WITH ORDINALITY clause, a column_definition list can be written in - place of a column alias list following ROWS FROM(). + place of a column alias list following ROWS FROM(). @@ -795,52 +795,52 @@ SELECT * AS t1(proname name, prosrc text) WHERE proname LIKE 'bytea%'; - The function - (part of the module) executes + The function + (part of the module) executes a remote query. It is declared to return - record since it might be used for any kind of query. + record since it might be used for any kind of query. The actual column set must be specified in the calling query so - that the parser knows, for example, what * should + that the parser knows, for example, what * should expand to. - <literal>LATERAL</> Subqueries + <literal>LATERAL</literal> Subqueries - LATERAL - in the FROM clause + LATERAL + in the FROM clause - Subqueries appearing in FROM can be - preceded by the key word LATERAL. This allows them to - reference columns provided by preceding FROM items. + Subqueries appearing in FROM can be + preceded by the key word LATERAL. This allows them to + reference columns provided by preceding FROM items. (Without LATERAL, each subquery is evaluated independently and so cannot cross-reference any other - FROM item.) + FROM item.) - Table functions appearing in FROM can also be - preceded by the key word LATERAL, but for functions the + Table functions appearing in FROM can also be + preceded by the key word LATERAL, but for functions the key word is optional; the function's arguments can contain references - to columns provided by preceding FROM items in any case. + to columns provided by preceding FROM items in any case. A LATERAL item can appear at top level in the - FROM list, or within a JOIN tree. In the latter + FROM list, or within a JOIN tree. In the latter case it can also refer to any items that are on the left-hand side of a - JOIN that it is on the right-hand side of. + JOIN that it is on the right-hand side of. - When a FROM item contains LATERAL + When a FROM item contains LATERAL cross-references, evaluation proceeds as follows: for each row of the - FROM item providing the cross-referenced column(s), or - set of rows of multiple FROM items providing the + FROM item providing the cross-referenced column(s), or + set of rows of multiple FROM items providing the columns, the LATERAL item is evaluated using that row or row set's values of the columns. The resulting row(s) are joined as usual with the rows they were computed from. This is @@ -860,7 +860,7 @@ SELECT * FROM foo, bar WHERE bar.id = foo.bar_id; LATERAL is primarily useful when the cross-referenced column is necessary for computing the row(s) to be joined. A common application is providing an argument value for a set-returning function. - For example, supposing that vertices(polygon) returns the + For example, supposing that vertices(polygon) returns the set of vertices of a polygon, we could identify close-together vertices of polygons stored in a table with: @@ -878,15 +878,15 @@ FROM polygons p1 CROSS JOIN LATERAL vertices(p1.poly) v1, WHERE (v1 <-> v2) < 10 AND p1.id != p2.id; or in several other equivalent formulations. (As already mentioned, - the LATERAL key word is unnecessary in this example, but + the LATERAL key word is unnecessary in this example, but we use it for clarity.) - It is often particularly handy to LEFT JOIN to a + It is often particularly handy to LEFT JOIN to a LATERAL subquery, so that source rows will appear in the result even if the LATERAL subquery produces no - rows for them. For example, if get_product_names() returns + rows for them. For example, if get_product_names() returns the names of products made by a manufacturer, but some manufacturers in our table currently produce no products, we could find out which ones those are like this: @@ -908,30 +908,30 @@ WHERE pname IS NULL; The syntax of the is + endterm="sql-where-title"/> is WHERE search_condition where search_condition is any value - expression (see ) that + expression (see ) that returns a value of type boolean. - After the processing of the FROM clause is done, each + After the processing of the FROM clause is done, each row of the derived virtual table is checked against the search condition. If the result of the condition is true, the row is kept in the output table, otherwise (i.e., if the result is false or null) it is discarded. The search condition typically references at least one column of the table generated in the - FROM clause; this is not required, but otherwise the - WHERE clause will be fairly useless. + FROM clause; this is not required, but otherwise the + WHERE clause will be fairly useless. The join condition of an inner join can be written either in - the WHERE clause or in the JOIN clause. + the WHERE clause or in the JOIN clause. For example, these table expressions are equivalent: FROM a, b WHERE a.id = b.id AND b.val > 5 @@ -945,13 +945,13 @@ FROM a INNER JOIN b ON (a.id = b.id) WHERE b.val > 5 FROM a NATURAL JOIN b WHERE b.val > 5 Which one of these you use is mainly a matter of style. The - JOIN syntax in the FROM clause is + JOIN syntax in the FROM clause is probably not as portable to other SQL database management systems, even though it is in the SQL standard. For outer joins there is no choice: they must be done in - the FROM clause. The ON or USING - clause of an outer join is not equivalent to a - WHERE condition, because it results in the addition + the FROM clause. The ON or USING + clause of an outer join is not equivalent to a + WHERE condition, because it results in the addition of rows (for unmatched input rows) as well as the removal of rows in the final result. @@ -973,14 +973,14 @@ SELECT ... FROM fdt WHERE c1 BETWEEN (SELECT c3 FROM t2 WHERE c2 = fdt.c1 + 10) SELECT ... FROM fdt WHERE EXISTS (SELECT c1 FROM t2 WHERE c2 > fdt.c1) fdt is the table derived in the - FROM clause. Rows that do not meet the search - condition of the WHERE clause are eliminated from + FROM clause. Rows that do not meet the search + condition of the WHERE clause are eliminated from fdt. Notice the use of scalar subqueries as value expressions. Just like any other query, the subqueries can employ complex table expressions. Notice also how fdt is referenced in the subqueries. - Qualifying c1 as fdt.c1 is only necessary - if c1 is also the name of a column in the derived + Qualifying c1 as fdt.c1 is only necessary + if c1 is also the name of a column in the derived input table of the subquery. But qualifying the column name adds clarity even when it is not needed. This example shows how the column naming scope of an outer query extends into its inner queries. @@ -1000,9 +1000,9 @@ SELECT ... FROM fdt WHERE EXISTS (SELECT c1 FROM t2 WHERE c2 > fdt.c1) - After passing the WHERE filter, the derived input - table might be subject to grouping, using the GROUP BY - clause, and elimination of group rows using the HAVING + After passing the WHERE filter, the derived input + table might be subject to grouping, using the GROUP BY + clause, and elimination of group rows using the HAVING clause. @@ -1014,7 +1014,7 @@ SELECT select_list - The is + The is used to group together those rows in a table that have the same values in all the columns listed. The order in which the columns are listed does not matter. The effect is to combine each set @@ -1023,7 +1023,7 @@ SELECT select_list eliminate redundancy in the output and/or compute aggregates that apply to these groups. For instance: -=> SELECT * FROM test1; +=> SELECT * FROM test1; x | y ---+--- a | 3 @@ -1032,7 +1032,7 @@ SELECT select_list a | 1 (4 rows) -=> SELECT x FROM test1 GROUP BY x; +=> SELECT x FROM test1 GROUP BY x; x --- a @@ -1045,17 +1045,17 @@ SELECT select_list In the second query, we could not have written SELECT * FROM test1 GROUP BY x, because there is no single value - for the column y that could be associated with each + for the column y that could be associated with each group. The grouped-by columns can be referenced in the select list since they have a single value in each group. In general, if a table is grouped, columns that are not - listed in GROUP BY cannot be referenced except in aggregate + listed in GROUP BY cannot be referenced except in aggregate expressions. An example with aggregate expressions is: -=> SELECT x, sum(y) FROM test1 GROUP BY x; +=> SELECT x, sum(y) FROM test1 GROUP BY x; x | sum ---+----- a | 4 @@ -1066,15 +1066,15 @@ SELECT select_list Here sum is an aggregate function that computes a single value over the entire group. More information about the available aggregate functions can be found in . + linkend="functions-aggregate"/>. Grouping without aggregate expressions effectively calculates the set of distinct values in a column. This can also be achieved - using the DISTINCT clause (see ). + using the DISTINCT clause (see ). @@ -1088,10 +1088,10 @@ SELECT product_id, p.name, (sum(s.units) * p.price) AS sales In this example, the columns product_id, p.name, and p.price must be - in the GROUP BY clause since they are referenced in + in the GROUP BY clause since they are referenced in the query select list (but see below). The column - s.units does not have to be in the GROUP - BY list since it is only used in an aggregate expression + s.units does not have to be in the GROUP + BY list since it is only used in an aggregate expression (sum(...)), which represents the sales of a product. For each product, the query returns a summary row about all sales of the product. @@ -1110,9 +1110,9 @@ SELECT product_id, p.name, (sum(s.units) * p.price) AS sales - In strict SQL, GROUP BY can only group by columns of + In strict SQL, GROUP BY can only group by columns of the source table but PostgreSQL extends - this to also allow GROUP BY to group by columns in the + this to also allow GROUP BY to group by columns in the select list. Grouping by value expressions instead of simple column names is also allowed. @@ -1125,12 +1125,12 @@ SELECT product_id, p.name, (sum(s.units) * p.price) AS sales If a table has been grouped using GROUP BY, but only certain groups are of interest, the HAVING clause can be used, much like a - WHERE clause, to eliminate groups from the result. + WHERE clause, to eliminate groups from the result. The syntax is: SELECT select_list FROM ... WHERE ... GROUP BY ... HAVING boolean_expression - Expressions in the HAVING clause can refer both to + Expressions in the HAVING clause can refer both to grouped expressions and to ungrouped expressions (which necessarily involve an aggregate function). @@ -1138,14 +1138,14 @@ SELECT select_list FROM ... WHERE ... Example: -=> SELECT x, sum(y) FROM test1 GROUP BY x HAVING sum(y) > 3; +=> SELECT x, sum(y) FROM test1 GROUP BY x HAVING sum(y) > 3; x | sum ---+----- a | 4 b | 5 (2 rows) -=> SELECT x, sum(y) FROM test1 GROUP BY x HAVING x < 'c'; +=> SELECT x, sum(y) FROM test1 GROUP BY x HAVING x < 'c'; x | sum ---+----- a | 4 @@ -1163,26 +1163,26 @@ SELECT product_id, p.name, (sum(s.units) * (p.price - p.cost)) AS profit GROUP BY product_id, p.name, p.price, p.cost HAVING sum(p.price * s.units) > 5000; - In the example above, the WHERE clause is selecting + In the example above, the WHERE clause is selecting rows by a column that is not grouped (the expression is only true for - sales during the last four weeks), while the HAVING + sales during the last four weeks), while the HAVING clause restricts the output to groups with total gross sales over 5000. Note that the aggregate expressions do not necessarily need to be the same in all parts of the query. - If a query contains aggregate function calls, but no GROUP BY + If a query contains aggregate function calls, but no GROUP BY clause, grouping still occurs: the result is a single group row (or perhaps no rows at all, if the single row is then eliminated by - HAVING). - The same is true if it contains a HAVING clause, even - without any aggregate function calls or GROUP BY clause. + HAVING). + The same is true if it contains a HAVING clause, even + without any aggregate function calls or GROUP BY clause. - <literal>GROUPING SETS</>, <literal>CUBE</>, and <literal>ROLLUP</> + <literal>GROUPING SETS</literal>, <literal>CUBE</literal>, and <literal>ROLLUP</literal> GROUPING SETS @@ -1196,13 +1196,13 @@ SELECT product_id, p.name, (sum(s.units) * (p.price - p.cost)) AS profit More complex grouping operations than those described above are possible - using the concept of grouping sets. The data selected by - the FROM and WHERE clauses is grouped separately + using the concept of grouping sets. The data selected by + the FROM and WHERE clauses is grouped separately by each specified grouping set, aggregates computed for each group just as - for simple GROUP BY clauses, and then the results returned. + for simple GROUP BY clauses, and then the results returned. For example: -=> SELECT * FROM items_sold; +=> SELECT * FROM items_sold; brand | size | sales -------+------+------- Foo | L | 10 @@ -1211,7 +1211,7 @@ SELECT product_id, p.name, (sum(s.units) * (p.price - p.cost)) AS profit Bar | L | 5 (4 rows) -=> SELECT brand, size, sum(sales) FROM items_sold GROUP BY GROUPING SETS ((brand), (size), ()); +=> SELECT brand, size, sum(sales) FROM items_sold GROUP BY GROUPING SETS ((brand), (size), ()); brand | size | sum -------+------+----- Foo | | 30 @@ -1224,35 +1224,35 @@ SELECT product_id, p.name, (sum(s.units) * (p.price - p.cost)) AS profit - Each sublist of GROUPING SETS may specify zero or more columns + Each sublist of GROUPING SETS may specify zero or more columns or expressions and is interpreted the same way as though it were directly - in the GROUP BY clause. An empty grouping set means that all + in the GROUP BY clause. An empty grouping set means that all rows are aggregated down to a single group (which is output even if no input rows were present), as described above for the case of aggregate - functions with no GROUP BY clause. + functions with no GROUP BY clause. References to the grouping columns or expressions are replaced by null values in result rows for grouping sets in which those columns do not appear. To distinguish which grouping a particular output - row resulted from, see . + row resulted from, see . A shorthand notation is provided for specifying two common types of grouping set. A clause of the form -ROLLUP ( e1, e2, e3, ... ) +ROLLUP ( e1, e2, e3, ... ) represents the given list of expressions and all prefixes of the list including the empty list; thus it is equivalent to GROUPING SETS ( - ( e1, e2, e3, ... ), + ( e1, e2, e3, ... ), ... - ( e1, e2 ), - ( e1 ), + ( e1, e2 ), + ( e1 ), ( ) ) @@ -1263,7 +1263,7 @@ GROUPING SETS ( A clause of the form -CUBE ( e1, e2, ... ) +CUBE ( e1, e2, ... ) represents the given list and all of its possible subsets (i.e. the power set). Thus @@ -1286,7 +1286,7 @@ GROUPING SETS ( - The individual elements of a CUBE or ROLLUP + The individual elements of a CUBE or ROLLUP clause may be either individual expressions, or sublists of elements in parentheses. In the latter case, the sublists are treated as single units for the purposes of generating the individual grouping sets. @@ -1319,15 +1319,15 @@ GROUPING SETS ( - The CUBE and ROLLUP constructs can be used either - directly in the GROUP BY clause, or nested inside a - GROUPING SETS clause. If one GROUPING SETS clause + The CUBE and ROLLUP constructs can be used either + directly in the GROUP BY clause, or nested inside a + GROUPING SETS clause. If one GROUPING SETS clause is nested inside another, the effect is the same as if all the elements of the inner clause had been written directly in the outer clause. - If multiple grouping items are specified in a single GROUP BY + If multiple grouping items are specified in a single GROUP BY clause, then the final list of grouping sets is the cross product of the individual items. For example: @@ -1346,12 +1346,12 @@ GROUP BY GROUPING SETS ( - The construct (a, b) is normally recognized in expressions as + The construct (a, b) is normally recognized in expressions as a row constructor. - Within the GROUP BY clause, this does not apply at the top - levels of expressions, and (a, b) is parsed as a list of - expressions as described above. If for some reason you need - a row constructor in a grouping expression, use ROW(a, b). + Within the GROUP BY clause, this does not apply at the top + levels of expressions, and (a, b) is parsed as a list of + expressions as described above. If for some reason you need + a row constructor in a grouping expression, use ROW(a, b). @@ -1361,40 +1361,40 @@ GROUP BY GROUPING SETS ( window function - order of execution + order of execution If the query contains any window functions (see - , - and - ), these functions are evaluated - after any grouping, aggregation, and HAVING filtering is + , + and + ), these functions are evaluated + after any grouping, aggregation, and HAVING filtering is performed. That is, if the query uses any aggregates, GROUP - BY, or HAVING, then the rows seen by the window functions + BY, or HAVING, then the rows seen by the window functions are the group rows instead of the original table rows from - FROM/WHERE. + FROM/WHERE. When multiple window functions are used, all the window functions having - syntactically equivalent PARTITION BY and ORDER BY + syntactically equivalent PARTITION BY and ORDER BY clauses in their window definitions are guaranteed to be evaluated in a single pass over the data. Therefore they will see the same sort ordering, - even if the ORDER BY does not uniquely determine an ordering. + even if the ORDER BY does not uniquely determine an ordering. However, no guarantees are made about the evaluation of functions having - different PARTITION BY or ORDER BY specifications. + different PARTITION BY or ORDER BY specifications. (In such cases a sort step is typically required between the passes of window function evaluations, and the sort is not guaranteed to preserve - ordering of rows that its ORDER BY sees as equivalent.) + ordering of rows that its ORDER BY sees as equivalent.) Currently, window functions always require presorted data, and so the query output will be ordered according to one or another of the window - functions' PARTITION BY/ORDER BY clauses. + functions' PARTITION BY/ORDER BY clauses. It is not recommended to rely on this, however. Use an explicit - top-level ORDER BY clause if you want to be sure the + top-level ORDER BY clause if you want to be sure the results are sorted in a particular way. @@ -1430,18 +1430,18 @@ GROUP BY GROUPING SETS ( The simplest kind of select list is * which emits all columns that the table expression produces. Otherwise, a select list is a comma-separated list of value expressions (as - defined in ). For instance, it + defined in ). For instance, it could be a list of column names: SELECT a, b, c FROM ... - The columns names a, b, and c + The columns names a, b, and c are either the actual names of the columns of tables referenced - in the FROM clause, or the aliases given to them as - explained in . The name + in the FROM clause, or the aliases given to them as + explained in . The name space available in the select list is the same as in the - WHERE clause, unless grouping is used, in which case - it is the same as in the HAVING clause. + WHERE clause, unless grouping is used, in which case + it is the same as in the HAVING clause. @@ -1455,8 +1455,8 @@ SELECT tbl1.a, tbl2.a, tbl1.b FROM ... SELECT tbl1.*, tbl2.a FROM ... - See for more about - the table_name.* notation. + See for more about + the table_name.* notation. @@ -1465,7 +1465,7 @@ SELECT tbl1.*, tbl2.a FROM ... value expression is evaluated once for each result row, with the row's values substituted for any column references. But the expressions in the select list do not have to reference any - columns in the table expression of the FROM clause; + columns in the table expression of the FROM clause; they can be constant arithmetic expressions, for instance. @@ -1480,7 +1480,7 @@ SELECT tbl1.*, tbl2.a FROM ... The entries in the select list can be assigned names for subsequent - processing, such as for use in an ORDER BY clause + processing, such as for use in an ORDER BY clause or for display by the client application. For example: SELECT a AS value, b + c AS sum FROM ... @@ -1488,7 +1488,7 @@ SELECT a AS value, b + c AS sum FROM ... - If no output column name is specified using AS, + If no output column name is specified using AS, the system assigns a default column name. For simple column references, this is the name of the referenced column. For function calls, this is the name of the function. For complex expressions, @@ -1496,12 +1496,12 @@ SELECT a AS value, b + c AS sum FROM ... - The AS keyword is optional, but only if the new column + The AS keyword is optional, but only if the new column name does not match any PostgreSQL keyword (see ). To avoid an accidental match to + linkend="sql-keywords-appendix"/>). To avoid an accidental match to a keyword, you can double-quote the column name. For example, - VALUE is a keyword, so this does not work: + VALUE is a keyword, so this does not work: SELECT a value, b + c AS sum FROM ... @@ -1517,8 +1517,8 @@ SELECT a "value", b + c AS sum FROM ... The naming of output columns here is different from that done in - the FROM clause (see ). It is possible + the FROM clause (see ). It is possible to rename the same column twice, but the name assigned in the select list is the one that will be passed on. @@ -1544,13 +1544,13 @@ SELECT a "value", b + c AS sum FROM ... SELECT DISTINCT select_list ... - (Instead of DISTINCT the key word ALL + (Instead of DISTINCT the key word ALL can be used to specify the default behavior of retaining all rows.) - null value - in DISTINCT + null value + in DISTINCT @@ -1571,16 +1571,16 @@ SELECT DISTINCT ON (expression , first row of a set is unpredictable unless the query is sorted on enough columns to guarantee a unique ordering - of the rows arriving at the DISTINCT filter. - (DISTINCT ON processing occurs after ORDER - BY sorting.) + of the rows arriving at the DISTINCT filter. + (DISTINCT ON processing occurs after ORDER + BY sorting.) - The DISTINCT ON clause is not part of the SQL standard + The DISTINCT ON clause is not part of the SQL standard and is sometimes considered bad style because of the potentially indeterminate nature of its results. With judicious use of - GROUP BY and subqueries in FROM, this + GROUP BY and subqueries in FROM, this construct can be avoided, but it is often the most convenient alternative. @@ -1635,27 +1635,27 @@ SELECT DISTINCT ON (expression , - UNION effectively appends the result of + UNION effectively appends the result of query2 to the result of query1 (although there is no guarantee that this is the order in which the rows are actually returned). Furthermore, it eliminates duplicate rows from its result, in the same - way as DISTINCT, unless UNION ALL is used. + way as DISTINCT, unless UNION ALL is used. - INTERSECT returns all rows that are both in the result + INTERSECT returns all rows that are both in the result of query1 and in the result of query2. Duplicate rows are eliminated - unless INTERSECT ALL is used. + unless INTERSECT ALL is used. - EXCEPT returns all rows that are in the result of + EXCEPT returns all rows that are in the result of query1 but not in the result of query2. (This is sometimes called the - difference between two queries.) Again, duplicates - are eliminated unless EXCEPT ALL is used. + difference between two queries.) Again, duplicates + are eliminated unless EXCEPT ALL is used. @@ -1663,7 +1663,7 @@ SELECT DISTINCT ON (expression , union compatible, which means that they return the same number of columns and the corresponding columns have compatible data types, as - described in . + described in . @@ -1690,7 +1690,7 @@ SELECT DISTINCT ON (expression , - The ORDER BY clause specifies the sort order: + The ORDER BY clause specifies the sort order: SELECT select_list FROM table_expression @@ -1705,17 +1705,17 @@ SELECT a, b FROM table1 ORDER BY a + b, c; When more than one expression is specified, the later values are used to sort rows that are equal according to the earlier values. Each expression can be followed by an optional - ASC or DESC keyword to set the sort direction to - ascending or descending. ASC order is the default. + ASC or DESC keyword to set the sort direction to + ascending or descending. ASC order is the default. Ascending order puts smaller values first, where smaller is defined in terms of the < operator. Similarly, descending order is determined with the > operator. - Actually, PostgreSQL uses the default B-tree - operator class for the expression's data type to determine the sort - ordering for ASC and DESC. Conventionally, + Actually, PostgreSQL uses the default B-tree + operator class for the expression's data type to determine the sort + ordering for ASC and DESC. Conventionally, data types will be set up so that the < and > operators correspond to this sort ordering, but a user-defined data type's designer could choose to do something @@ -1725,22 +1725,22 @@ SELECT a, b FROM table1 ORDER BY a + b, c; - The NULLS FIRST and NULLS LAST options can be + The NULLS FIRST and NULLS LAST options can be used to determine whether nulls appear before or after non-null values in the sort ordering. By default, null values sort as if larger than any - non-null value; that is, NULLS FIRST is the default for - DESC order, and NULLS LAST otherwise. + non-null value; that is, NULLS FIRST is the default for + DESC order, and NULLS LAST otherwise. Note that the ordering options are considered independently for each - sort column. For example ORDER BY x, y DESC means - ORDER BY x ASC, y DESC, which is not the same as - ORDER BY x DESC, y DESC. + sort column. For example ORDER BY x, y DESC means + ORDER BY x ASC, y DESC, which is not the same as + ORDER BY x DESC, y DESC. - A sort_expression can also be the column label or number + A sort_expression can also be the column label or number of an output column, as in: SELECT a + b AS sum, c FROM table1 ORDER BY sum; @@ -1748,21 +1748,21 @@ SELECT a, max(b) FROM table1 GROUP BY a ORDER BY 1; both of which sort by the first output column. Note that an output column name has to stand alone, that is, it cannot be used in an expression - — for example, this is not correct: + — for example, this is not correct: SELECT a + b AS sum, c FROM table1 ORDER BY sum + c; -- wrong This restriction is made to reduce ambiguity. There is still - ambiguity if an ORDER BY item is a simple name that + ambiguity if an ORDER BY item is a simple name that could match either an output column name or a column from the table expression. The output column is used in such cases. This would - only cause confusion if you use AS to rename an output + only cause confusion if you use AS to rename an output column to match some other table column's name. - ORDER BY can be applied to the result of a - UNION, INTERSECT, or EXCEPT + ORDER BY can be applied to the result of a + UNION, INTERSECT, or EXCEPT combination, but in this case it is only permitted to sort by output column names or numbers, not by expressions. @@ -1781,7 +1781,7 @@ SELECT a + b AS sum, c FROM table1 ORDER BY sum + c; -- wrong - LIMIT and OFFSET allow you to retrieve just + LIMIT and OFFSET allow you to retrieve just a portion of the rows that are generated by the rest of the query: SELECT select_list @@ -1794,49 +1794,49 @@ SELECT select_list If a limit count is given, no more than that many rows will be returned (but possibly fewer, if the query itself yields fewer rows). - LIMIT ALL is the same as omitting the LIMIT - clause, as is LIMIT with a NULL argument. + LIMIT ALL is the same as omitting the LIMIT + clause, as is LIMIT with a NULL argument. - OFFSET says to skip that many rows before beginning to - return rows. OFFSET 0 is the same as omitting the - OFFSET clause, as is OFFSET with a NULL argument. + OFFSET says to skip that many rows before beginning to + return rows. OFFSET 0 is the same as omitting the + OFFSET clause, as is OFFSET with a NULL argument. - If both OFFSET - and LIMIT appear, then OFFSET rows are - skipped before starting to count the LIMIT rows that + If both OFFSET + and LIMIT appear, then OFFSET rows are + skipped before starting to count the LIMIT rows that are returned. - When using LIMIT, it is important to use an - ORDER BY clause that constrains the result rows into a + When using LIMIT, it is important to use an + ORDER BY clause that constrains the result rows into a unique order. Otherwise you will get an unpredictable subset of the query's rows. You might be asking for the tenth through twentieth rows, but tenth through twentieth in what ordering? The - ordering is unknown, unless you specified ORDER BY. + ordering is unknown, unless you specified ORDER BY. - The query optimizer takes LIMIT into account when + The query optimizer takes LIMIT into account when generating query plans, so you are very likely to get different plans (yielding different row orders) depending on what you give - for LIMIT and OFFSET. Thus, using - different LIMIT/OFFSET values to select + for LIMIT and OFFSET. Thus, using + different LIMIT/OFFSET values to select different subsets of a query result will give inconsistent results unless you enforce a predictable - result ordering with ORDER BY. This is not a bug; it + result ordering with ORDER BY. This is not a bug; it is an inherent consequence of the fact that SQL does not promise to deliver the results of a query in any particular order unless - ORDER BY is used to constrain the order. + ORDER BY is used to constrain the order. - The rows skipped by an OFFSET clause still have to be - computed inside the server; therefore a large OFFSET + The rows skipped by an OFFSET clause still have to be + computed inside the server; therefore a large OFFSET might be inefficient. @@ -1850,18 +1850,18 @@ SELECT select_list - VALUES provides a way to generate a constant table + VALUES provides a way to generate a constant table that can be used in a query without having to actually create and populate a table on-disk. The syntax is -VALUES ( expression [, ...] ) [, ...] +VALUES ( expression [, ...] ) [, ...] Each parenthesized list of expressions generates a row in the table. The lists must all have the same number of elements (i.e., the number of columns in the table), and corresponding entries in each list must have compatible data types. The actual data type assigned to each column - of the result is determined using the same rules as for UNION - (see ). + of the result is determined using the same rules as for UNION + (see ). @@ -1881,8 +1881,8 @@ SELECT 3, 'three'; By default, PostgreSQL assigns the names - column1, column2, etc. to the columns of a - VALUES table. The column names are not specified by the + column1, column2, etc. to the columns of a + VALUES table. The column names are not specified by the SQL standard and different database systems do it differently, so it's usually better to override the default names with a table alias list, like this: @@ -1898,21 +1898,21 @@ SELECT 3, 'three'; - Syntactically, VALUES followed by expression lists is + Syntactically, VALUES followed by expression lists is treated as equivalent to: SELECT select_list FROM table_expression - and can appear anywhere a SELECT can. For example, you can - use it as part of a UNION, or attach a - sort_specification (ORDER BY, - LIMIT, and/or OFFSET) to it. VALUES - is most commonly used as the data source in an INSERT command, + and can appear anywhere a SELECT can. For example, you can + use it as part of a UNION, or attach a + sort_specification (ORDER BY, + LIMIT, and/or OFFSET) to it. VALUES + is most commonly used as the data source in an INSERT command, and next most commonly as a subquery. - For more information see . + For more information see . @@ -1932,34 +1932,34 @@ SELECT select_list FROM table_expression - WITH provides a way to write auxiliary statements for use in a + WITH provides a way to write auxiliary statements for use in a larger query. These statements, which are often referred to as Common Table Expressions or CTEs, can be thought of as defining temporary tables that exist just for one query. Each auxiliary statement - in a WITH clause can be a SELECT, - INSERT, UPDATE, or DELETE; and the - WITH clause itself is attached to a primary statement that can - also be a SELECT, INSERT, UPDATE, or - DELETE. + in a WITH clause can be a SELECT, + INSERT, UPDATE, or DELETE; and the + WITH clause itself is attached to a primary statement that can + also be a SELECT, INSERT, UPDATE, or + DELETE. - <command>SELECT</> in <literal>WITH</> + <command>SELECT</command> in <literal>WITH</literal> - The basic value of SELECT in WITH is to + The basic value of SELECT in WITH is to break down complicated queries into simpler parts. An example is: WITH regional_sales AS ( - SELECT region, SUM(amount) AS total_sales - FROM orders - GROUP BY region - ), top_regions AS ( - SELECT region - FROM regional_sales - WHERE total_sales > (SELECT SUM(total_sales)/10 FROM regional_sales) - ) + SELECT region, SUM(amount) AS total_sales + FROM orders + GROUP BY region +), top_regions AS ( + SELECT region + FROM regional_sales + WHERE total_sales > (SELECT SUM(total_sales)/10 FROM regional_sales) +) SELECT region, product, SUM(quantity) AS product_units, @@ -1970,21 +1970,25 @@ GROUP BY region, product; which displays per-product sales totals in only the top sales regions. - The WITH clause defines two auxiliary statements named - regional_sales and top_regions, - where the output of regional_sales is used in - top_regions and the output of top_regions - is used in the primary SELECT query. - This example could have been written without WITH, + The WITH clause defines two auxiliary statements named + regional_sales and top_regions, + where the output of regional_sales is used in + top_regions and the output of top_regions + is used in the primary SELECT query. + This example could have been written without WITH, but we'd have needed two levels of nested sub-SELECTs. It's a bit easier to follow this way. - The optional RECURSIVE modifier changes WITH + + RECURSIVE + in common table expressions + + The optional RECURSIVE modifier changes WITH from a mere syntactic convenience into a feature that accomplishes things not otherwise possible in standard SQL. Using - RECURSIVE, a WITH query can refer to its own + RECURSIVE, a WITH query can refer to its own output. A very simple example is this query to sum the integers from 1 through 100: @@ -1997,10 +2001,10 @@ WITH RECURSIVE t(n) AS ( SELECT sum(n) FROM t; - The general form of a recursive WITH query is always a - non-recursive term, then UNION (or - UNION ALL), then a - recursive term, where only the recursive term can contain + The general form of a recursive WITH query is always a + non-recursive term, then UNION (or + UNION ALL), then a + recursive term, where only the recursive term can contain a reference to the query's own output. Such a query is executed as follows: @@ -2010,10 +2014,10 @@ SELECT sum(n) FROM t; - Evaluate the non-recursive term. For UNION (but not - UNION ALL), discard duplicate rows. Include all remaining + Evaluate the non-recursive term. For UNION (but not + UNION ALL), discard duplicate rows. Include all remaining rows in the result of the recursive query, and also place them in a - temporary working table. + temporary working table. @@ -2026,10 +2030,10 @@ SELECT sum(n) FROM t; Evaluate the recursive term, substituting the current contents of the working table for the recursive self-reference. - For UNION (but not UNION ALL), discard + For UNION (but not UNION ALL), discard duplicate rows and rows that duplicate any previous result row. Include all remaining rows in the result of the recursive query, and - also place them in a temporary intermediate table. + also place them in a temporary intermediate table. @@ -2046,7 +2050,7 @@ SELECT sum(n) FROM t; Strictly speaking, this process is iteration not recursion, but - RECURSIVE is the terminology chosen by the SQL standards + RECURSIVE is the terminology chosen by the SQL standards committee. @@ -2054,7 +2058,7 @@ SELECT sum(n) FROM t; In the example above, the working table has just a single row in each step, and it takes on the values from 1 through 100 in successive steps. In - the 100th step, there is no output because of the WHERE + the 100th step, there is no output because of the WHERE clause, and so the query terminates. @@ -2071,7 +2075,7 @@ WITH RECURSIVE included_parts(sub_part, part, quantity) AS ( SELECT p.sub_part, p.part, p.quantity FROM included_parts pr, parts p WHERE p.part = pr.sub_part - ) +) SELECT sub_part, SUM(quantity) as total_quantity FROM included_parts GROUP BY sub_part @@ -2082,71 +2086,71 @@ GROUP BY sub_part When working with recursive queries it is important to be sure that the recursive part of the query will eventually return no tuples, or else the query will loop indefinitely. Sometimes, using - UNION instead of UNION ALL can accomplish this + UNION instead of UNION ALL can accomplish this by discarding rows that duplicate previous output rows. However, often a cycle does not involve output rows that are completely duplicate: it may be necessary to check just one or a few fields to see if the same point has been reached before. The standard method for handling such situations is to compute an array of the already-visited values. For example, consider - the following query that searches a table graph using a - link field: + the following query that searches a table graph using a + link field: WITH RECURSIVE search_graph(id, link, data, depth) AS ( - SELECT g.id, g.link, g.data, 1 - FROM graph g - UNION ALL - SELECT g.id, g.link, g.data, sg.depth + 1 - FROM graph g, search_graph sg - WHERE g.id = sg.link + SELECT g.id, g.link, g.data, 1 + FROM graph g + UNION ALL + SELECT g.id, g.link, g.data, sg.depth + 1 + FROM graph g, search_graph sg + WHERE g.id = sg.link ) SELECT * FROM search_graph; - This query will loop if the link relationships contain - cycles. Because we require a depth output, just changing - UNION ALL to UNION would not eliminate the looping. + This query will loop if the link relationships contain + cycles. Because we require a depth output, just changing + UNION ALL to UNION would not eliminate the looping. Instead we need to recognize whether we have reached the same row again while following a particular path of links. We add two columns - path and cycle to the loop-prone query: + path and cycle to the loop-prone query: WITH RECURSIVE search_graph(id, link, data, depth, path, cycle) AS ( - SELECT g.id, g.link, g.data, 1, - ARRAY[g.id], - false - FROM graph g - UNION ALL - SELECT g.id, g.link, g.data, sg.depth + 1, - path || g.id, - g.id = ANY(path) - FROM graph g, search_graph sg - WHERE g.id = sg.link AND NOT cycle + SELECT g.id, g.link, g.data, 1, + ARRAY[g.id], + false + FROM graph g + UNION ALL + SELECT g.id, g.link, g.data, sg.depth + 1, + path || g.id, + g.id = ANY(path) + FROM graph g, search_graph sg + WHERE g.id = sg.link AND NOT cycle ) SELECT * FROM search_graph; Aside from preventing cycles, the array value is often useful in its own - right as representing the path taken to reach any particular row. + right as representing the path taken to reach any particular row. In the general case where more than one field needs to be checked to recognize a cycle, use an array of rows. For example, if we needed to - compare fields f1 and f2: + compare fields f1 and f2: WITH RECURSIVE search_graph(id, link, data, depth, path, cycle) AS ( - SELECT g.id, g.link, g.data, 1, - ARRAY[ROW(g.f1, g.f2)], - false - FROM graph g - UNION ALL - SELECT g.id, g.link, g.data, sg.depth + 1, - path || ROW(g.f1, g.f2), - ROW(g.f1, g.f2) = ANY(path) - FROM graph g, search_graph sg - WHERE g.id = sg.link AND NOT cycle + SELECT g.id, g.link, g.data, 1, + ARRAY[ROW(g.f1, g.f2)], + false + FROM graph g + UNION ALL + SELECT g.id, g.link, g.data, sg.depth + 1, + path || ROW(g.f1, g.f2), + ROW(g.f1, g.f2) = ANY(path) + FROM graph g, search_graph sg + WHERE g.id = sg.link AND NOT cycle ) SELECT * FROM search_graph; @@ -2154,7 +2158,7 @@ SELECT * FROM search_graph; - Omit the ROW() syntax in the common case where only one field + Omit the ROW() syntax in the common case where only one field needs to be checked to recognize a cycle. This allows a simple array rather than a composite-type array to be used, gaining efficiency. @@ -2164,16 +2168,16 @@ SELECT * FROM search_graph; The recursive query evaluation algorithm produces its output in breadth-first search order. You can display the results in depth-first - search order by making the outer query ORDER BY a - path column constructed in this way. + search order by making the outer query ORDER BY a + path column constructed in this way. A helpful trick for testing queries - when you are not certain if they might loop is to place a LIMIT + when you are not certain if they might loop is to place a LIMIT in the parent query. For example, this query would loop forever without - the LIMIT: + the LIMIT: WITH RECURSIVE t(n) AS ( @@ -2185,26 +2189,26 @@ SELECT n FROM t LIMIT 100; This works because PostgreSQL's implementation - evaluates only as many rows of a WITH query as are actually + evaluates only as many rows of a WITH query as are actually fetched by the parent query. Using this trick in production is not recommended, because other systems might work differently. Also, it usually won't work if you make the outer query sort the recursive query's results or join them to some other table, because in such cases the - outer query will usually try to fetch all of the WITH query's + outer query will usually try to fetch all of the WITH query's output anyway. - A useful property of WITH queries is that they are evaluated + A useful property of WITH queries is that they are evaluated only once per execution of the parent query, even if they are referred to - more than once by the parent query or sibling WITH queries. + more than once by the parent query or sibling WITH queries. Thus, expensive calculations that are needed in multiple places can be - placed within a WITH query to avoid redundant work. Another + placed within a WITH query to avoid redundant work. Another possible application is to prevent unwanted multiple evaluations of functions with side-effects. However, the other side of this coin is that the optimizer is less able to - push restrictions from the parent query down into a WITH query - than an ordinary subquery. The WITH query will generally be + push restrictions from the parent query down into a WITH query + than an ordinary subquery. The WITH query will generally be evaluated as written, without suppression of rows that the parent query might discard afterwards. (But, as mentioned above, evaluation might stop early if the reference(s) to the query demand only a limited number of @@ -2212,20 +2216,20 @@ SELECT n FROM t LIMIT 100; - The examples above only show WITH being used with - SELECT, but it can be attached in the same way to - INSERT, UPDATE, or DELETE. + The examples above only show WITH being used with + SELECT, but it can be attached in the same way to + INSERT, UPDATE, or DELETE. In each case it effectively provides temporary table(s) that can be referred to in the main command. - Data-Modifying Statements in <literal>WITH</> + Data-Modifying Statements in <literal>WITH</literal> - You can use data-modifying statements (INSERT, - UPDATE, or DELETE) in WITH. This + You can use data-modifying statements (INSERT, + UPDATE, or DELETE) in WITH. This allows you to perform several different operations in the same query. An example is: @@ -2241,32 +2245,32 @@ INSERT INTO products_log SELECT * FROM moved_rows; - This query effectively moves rows from products to - products_log. The DELETE in WITH - deletes the specified rows from products, returning their - contents by means of its RETURNING clause; and then the + This query effectively moves rows from products to + products_log. The DELETE in WITH + deletes the specified rows from products, returning their + contents by means of its RETURNING clause; and then the primary query reads that output and inserts it into - products_log. + products_log. - A fine point of the above example is that the WITH clause is - attached to the INSERT, not the sub-SELECT within - the INSERT. This is necessary because data-modifying - statements are only allowed in WITH clauses that are attached - to the top-level statement. However, normal WITH visibility - rules apply, so it is possible to refer to the WITH - statement's output from the sub-SELECT. + A fine point of the above example is that the WITH clause is + attached to the INSERT, not the sub-SELECT within + the INSERT. This is necessary because data-modifying + statements are only allowed in WITH clauses that are attached + to the top-level statement. However, normal WITH visibility + rules apply, so it is possible to refer to the WITH + statement's output from the sub-SELECT. - Data-modifying statements in WITH usually have - RETURNING clauses (see ), + Data-modifying statements in WITH usually have + RETURNING clauses (see ), as shown in the example above. - It is the output of the RETURNING clause, not the + It is the output of the RETURNING clause, not the target table of the data-modifying statement, that forms the temporary table that can be referred to by the rest of the query. If a - data-modifying statement in WITH lacks a RETURNING + data-modifying statement in WITH lacks a RETURNING clause, then it forms no temporary table and cannot be referred to in the rest of the query. Such a statement will be executed nonetheless. A not-particularly-useful example is: @@ -2278,15 +2282,15 @@ WITH t AS ( DELETE FROM bar; - This example would remove all rows from tables foo and - bar. The number of affected rows reported to the client - would only include rows removed from bar. + This example would remove all rows from tables foo and + bar. The number of affected rows reported to the client + would only include rows removed from bar. Recursive self-references in data-modifying statements are not allowed. In some cases it is possible to work around this limitation by - referring to the output of a recursive WITH, for example: + referring to the output of a recursive WITH, for example: WITH RECURSIVE included_parts(sub_part, part) AS ( @@ -2295,7 +2299,7 @@ WITH RECURSIVE included_parts(sub_part, part) AS ( SELECT p.sub_part, p.part FROM included_parts pr, parts p WHERE p.part = pr.sub_part - ) +) DELETE FROM parts WHERE part IN (SELECT part FROM included_parts); @@ -2304,24 +2308,24 @@ DELETE FROM parts - Data-modifying statements in WITH are executed exactly once, + Data-modifying statements in WITH are executed exactly once, and always to completion, independently of whether the primary query reads all (or indeed any) of their output. Notice that this is different - from the rule for SELECT in WITH: as stated in the - previous section, execution of a SELECT is carried only as far + from the rule for SELECT in WITH: as stated in the + previous section, execution of a SELECT is carried only as far as the primary query demands its output. - The sub-statements in WITH are executed concurrently with + The sub-statements in WITH are executed concurrently with each other and with the main query. Therefore, when using data-modifying - statements in WITH, the order in which the specified updates + statements in WITH, the order in which the specified updates actually happen is unpredictable. All the statements are executed with - the same snapshot (see ), so they - cannot see one another's effects on the target tables. This + the same snapshot (see ), so they + cannot see one another's effects on the target tables. This alleviates the effects of the unpredictability of the actual order of row - updates, and means that RETURNING data is the only way to - communicate changes between different WITH sub-statements and + updates, and means that RETURNING data is the only way to + communicate changes between different WITH sub-statements and the main query. An example of this is that in @@ -2332,8 +2336,8 @@ WITH t AS ( SELECT * FROM products; - the outer SELECT would return the original prices before the - action of the UPDATE, while in + the outer SELECT would return the original prices before the + action of the UPDATE, while in WITH t AS ( @@ -2343,7 +2347,7 @@ WITH t AS ( SELECT * FROM t; - the outer SELECT would return the updated data. + the outer SELECT would return the updated data. @@ -2353,15 +2357,15 @@ SELECT * FROM t; applies to deleting a row that was already updated in the same statement: only the update is performed. Therefore you should generally avoid trying to modify a single row twice in a single statement. In particular avoid - writing WITH sub-statements that could affect the same rows + writing WITH sub-statements that could affect the same rows changed by the main statement or a sibling sub-statement. The effects of such a statement will not be predictable. At present, any table used as the target of a data-modifying statement in - WITH must not have a conditional rule, nor an ALSO - rule, nor an INSTEAD rule that expands to multiple statements. + WITH must not have a conditional rule, nor an ALSO + rule, nor an INSTEAD rule that expands to multiple statements. diff --git a/doc/src/sgml/query.sgml b/doc/src/sgml/query.sgml index 98434925df..c0889743c4 100644 --- a/doc/src/sgml/query.sgml +++ b/doc/src/sgml/query.sgml @@ -12,7 +12,7 @@ tutorial is only intended to give you an introduction and is in no way a complete tutorial on SQL. Numerous books have been written on SQL, including and . + linkend="melt93"/> and . You should be aware that some PostgreSQL language features are extensions to the standard. @@ -29,7 +29,7 @@ in the directory src/tutorial/. (Binary distributions of PostgreSQL might not compile these files.) To use those - files, first change to that directory and run make: + files, first change to that directory and run make: $ cd ..../src/tutorial @@ -50,7 +50,7 @@ The \i command reads in commands from the - specified file. psql's -s option puts you in + specified file. psql's -s option puts you in single step mode which pauses before sending each statement to the server. The commands used in this section are in the file basics.sql. @@ -155,8 +155,8 @@ CREATE TABLE weather ( PostgreSQL supports the standard SQL types int, smallint, real, double - precision, char(N), - varchar(N), date, + precision, char(N), + varchar(N), date, time, timestamp, and interval, as well as other types of general utility and a rich set of geometric types. @@ -211,7 +211,7 @@ INSERT INTO weather VALUES ('San Francisco', 46, 50, 0.25, '1994-11-27'); Note that all data types use rather obvious input formats. Constants that are not simple numeric values usually must be - surrounded by single quotes ('), as in the example. + surrounded by single quotes ('), as in the example. The date type is actually quite flexible in what it accepts, but for this tutorial we will stick to the unambiguous @@ -267,7 +267,7 @@ COPY weather FROM '/home/user/weather.txt'; where the file name for the source file must be available on the machine running the backend process, not the client, since the backend process reads the file directly. You can read more about the - COPY command in . + COPY command in . @@ -336,8 +336,8 @@ SELECT city, (temp_hi+temp_lo)/2 AS temp_avg, date FROM weather; - A query can be qualified by adding a WHERE - clause that specifies which rows are wanted. The WHERE + A query can be qualified by adding a WHERE + clause that specifies which rows are wanted. The WHERE clause contains a Boolean (truth value) expression, and only rows for which the Boolean expression is true are returned. The usual Boolean operators (AND, @@ -446,9 +446,9 @@ SELECT DISTINCT city of the same or different tables at one time is called a join query. As an example, say you wish to list all the weather records together with the location of the - associated city. To do that, we need to compare the city - column of each row of the weather table with the - name column of all rows in the cities + associated city. To do that, we need to compare the city + column of each row of the weather table with the + name column of all rows in the cities table, and select the pairs of rows where these values match. @@ -483,7 +483,7 @@ SELECT * There is no result row for the city of Hayward. This is because there is no matching entry in the cities table for Hayward, so the join - ignores the unmatched rows in the weather table. We will see + ignores the unmatched rows in the weather table. We will see shortly how this can be fixed. @@ -520,7 +520,7 @@ SELECT city, temp_lo, temp_hi, prcp, date, location Since the columns all had different names, the parser automatically found which table they belong to. If there were duplicate column names in the two tables you'd need to - qualify the column names to show which one you + qualify the column names to show which one you meant, as in: @@ -599,7 +599,7 @@ SELECT * self join. As an example, suppose we wish to find all the weather records that are in the temperature range of other weather records. So we need to compare the - temp_lo and temp_hi columns of + temp_lo and temp_hi columns of each weather row to the temp_lo and temp_hi columns of all other @@ -620,8 +620,8 @@ SELECT W1.city, W1.temp_lo AS low, W1.temp_hi AS high, (2 rows) - Here we have relabeled the weather table as W1 and - W2 to be able to distinguish the left and right side + Here we have relabeled the weather table as W1 and + W2 to be able to distinguish the left and right side of the join. You can also use these kinds of aliases in other queries to save some typing, e.g.: @@ -644,7 +644,7 @@ SELECT * Like most other relational database products, PostgreSQL supports - aggregate functions. + aggregate functions. An aggregate function computes a single result from multiple input rows. For example, there are aggregates to compute the count, sum, @@ -747,14 +747,14 @@ SELECT city, max(temp_lo) which gives us the same results for only the cities that have all - temp_lo values below 40. Finally, if we only care about + temp_lo values below 40. Finally, if we only care about cities whose names begin with S, we might do: SELECT city, max(temp_lo) FROM weather - WHERE city LIKE 'S%' -- + WHERE city LIKE 'S%' -- GROUP BY city HAVING max(temp_lo) < 40; @@ -762,7 +762,7 @@ SELECT city, max(temp_lo) The LIKE operator does pattern matching and - is explained in . + is explained in . @@ -871,7 +871,7 @@ DELETE FROM tablename; Without a qualification, DELETE will - remove all rows from the given table, leaving it + remove all rows from the given table, leaving it empty. The system will not request confirmation before doing this! diff --git a/doc/src/sgml/rangetypes.sgml b/doc/src/sgml/rangetypes.sgml index 9557c16a4d..3a034d9b06 100644 --- a/doc/src/sgml/rangetypes.sgml +++ b/doc/src/sgml/rangetypes.sgml @@ -9,7 +9,7 @@ Range types are data types representing a range of values of some - element type (called the range's subtype). + element type (called the range's subtype). For instance, ranges of timestamp might be used to represent the ranges of time that a meeting room is reserved. In this case the data type @@ -65,7 +65,7 @@ In addition, you can define your own range types; - see for more information. + see for more information. @@ -94,8 +94,8 @@ SELECT int4range(10, 20) * int4range(15, 25); SELECT isempty(numrange(1, 5)); - See - and for complete lists of + See + and for complete lists of operators and functions on range types. @@ -117,7 +117,7 @@ SELECT isempty(numrange(1, 5)); represented by (. Likewise, an inclusive upper bound is represented by ], while an exclusive upper bound is represented by ). - (See for more details.) + (See for more details.) @@ -148,12 +148,12 @@ SELECT isempty(numrange(1, 5)); - Also, some element types have a notion of infinity, but that + Also, some element types have a notion of infinity, but that is just another value so far as the range type mechanisms are concerned. - For example, in timestamp ranges, [today,] means the same - thing as [today,). But [today,infinity] means - something different from [today,infinity) — the latter - excludes the special timestamp value infinity. + For example, in timestamp ranges, [today,] means the same + thing as [today,). But [today,infinity] means + something different from [today,infinity) — the latter + excludes the special timestamp value infinity. @@ -214,7 +214,7 @@ empty These rules are very similar to those for writing field values in - composite-type literals. See for + composite-type literals. See for additional commentary. @@ -284,25 +284,25 @@ SELECT numrange(NULL, 2.2); no valid values between them. This contrasts with continuous ranges, where it's always (or almost always) possible to identify other element values between two given values. For example, a range over the - numeric type is continuous, as is a range over timestamp. - (Even though timestamp has limited precision, and so could + numeric type is continuous, as is a range over timestamp. + (Even though timestamp has limited precision, and so could theoretically be treated as discrete, it's better to consider it continuous since the step size is normally not of interest.) Another way to think about a discrete range type is that there is a clear - idea of a next or previous value for each element value. + idea of a next or previous value for each element value. Knowing that, it is possible to convert between inclusive and exclusive representations of a range's bounds, by choosing the next or previous element value instead of the one originally given. - For example, in an integer range type [4,8] and - (3,9) denote the same set of values; but this would not be so + For example, in an integer range type [4,8] and + (3,9) denote the same set of values; but this would not be so for a range over numeric. - A discrete range type should have a canonicalization + A discrete range type should have a canonicalization function that is aware of the desired step size for the element type. The canonicalization function is charged with converting equivalent values of the range type to have identical representations, in particular @@ -352,8 +352,8 @@ SELECT '[1.234, 5.678]'::floatrange; If the subtype is considered to have discrete rather than continuous - values, the CREATE TYPE command should specify a - canonical function. + values, the CREATE TYPE command should specify a + canonical function. The canonicalization function takes an input range value, and must return an equivalent range value that may have different bounds and formatting. The canonical output for two ranges that represent the same set of values, @@ -364,7 +364,7 @@ SELECT '[1.234, 5.678]'::floatrange; formatting. In addition to adjusting the inclusive/exclusive bounds format, a canonicalization function might round off boundary values, in case the desired step size is larger than what the subtype is capable of - storing. For instance, a range type over timestamp could be + storing. For instance, a range type over timestamp could be defined to have a step size of an hour, in which case the canonicalization function would need to round off bounds that weren't a multiple of an hour, or perhaps throw an error instead. @@ -372,25 +372,25 @@ SELECT '[1.234, 5.678]'::floatrange; In addition, any range type that is meant to be used with GiST or SP-GiST - indexes should define a subtype difference, or subtype_diff, - function. (The index will still work without subtype_diff, + indexes should define a subtype difference, or subtype_diff, + function. (The index will still work without subtype_diff, but it is likely to be considerably less efficient than if a difference function is provided.) The subtype difference function takes two input values of the subtype, and returns their difference - (i.e., X minus Y) represented as - a float8 value. In our example above, the - function float8mi that underlies the regular float8 + (i.e., X minus Y) represented as + a float8 value. In our example above, the + function float8mi that underlies the regular float8 minus operator can be used; but for any other subtype, some type conversion would be necessary. Some creative thought about how to represent differences as numbers might be needed, too. To the greatest - extent possible, the subtype_diff function should agree with + extent possible, the subtype_diff function should agree with the sort ordering implied by the selected operator class and collation; that is, its result should be positive whenever its first argument is greater than its second according to the sort ordering. - A less-oversimplified example of a subtype_diff function is: + A less-oversimplified example of a subtype_diff function is: @@ -406,7 +406,7 @@ SELECT '[11:10, 23:00]'::timerange; - See for more information about creating + See for more information about creating range types. @@ -426,23 +426,23 @@ SELECT '[11:10, 23:00]'::timerange; CREATE INDEX reservation_idx ON reservation USING GIST (during); A GiST or SP-GiST index can accelerate queries involving these range operators: - =, - &&, - <@, - @>, - <<, - >>, - -|-, - &<, and - &> - (see for more information). + =, + &&, + <@, + @>, + <<, + >>, + -|-, + &<, and + &> + (see for more information). In addition, B-tree and hash indexes can be created for table columns of range types. For these index types, basically the only useful range operation is equality. There is a B-tree sort ordering defined for range - values, with corresponding < and > operators, + values, with corresponding < and > operators, but the ordering is rather arbitrary and not usually useful in the real world. Range types' B-tree and hash support is primarily meant to allow sorting and hashing internally in queries, rather than creation of @@ -462,7 +462,7 @@ CREATE INDEX reservation_idx ON reservation USING GIST (during); While UNIQUE is a natural constraint for scalar values, it is usually unsuitable for range types. Instead, an exclusion constraint is often more appropriate - (see CREATE TABLE + (see CREATE TABLE ... CONSTRAINT ... EXCLUDE). Exclusion constraints allow the specification of constraints such as non-overlapping on a range type. For example: @@ -491,7 +491,7 @@ with existing key (during)=(["2010-01-01 11:30:00","2010-01-01 15:00:00")). - You can use the btree_gist + You can use the btree_gist extension to define exclusion constraints on plain scalar data types, which can then be combined with range exclusions for maximum flexibility. For example, after btree_gist is installed, the following diff --git a/doc/src/sgml/recovery-config.sgml b/doc/src/sgml/recovery-config.sgml index 0a5d086248..a2bdffda94 100644 --- a/doc/src/sgml/recovery-config.sgml +++ b/doc/src/sgml/recovery-config.sgml @@ -11,23 +11,23 @@ This chapter describes the settings available in the - recovery.confrecovery.conf + recovery.confrecovery.conf file. They apply only for the duration of the recovery. They must be reset for any subsequent recovery you wish to perform. They cannot be changed once recovery has begun. - Settings in recovery.conf are specified in the format - name = 'value'. One parameter is specified per line. + Settings in recovery.conf are specified in the format + name = 'value'. One parameter is specified per line. Hash marks (#) designate the rest of the line as a comment. To embed a single quote in a parameter - value, write two quotes (''). + value, write two quotes (''). - A sample file, share/recovery.conf.sample, - is provided in the installation's share/ directory. + A sample file, share/recovery.conf.sample, + is provided in the installation's share/ directory. @@ -38,7 +38,7 @@ restore_command (string) - restore_command recovery parameter + restore_command recovery parameter @@ -46,25 +46,25 @@ The local shell command to execute to retrieve an archived segment of the WAL file series. This parameter is required for archive recovery, but optional for streaming replication. - Any %f in the string is + Any %f in the string is replaced by the name of the file to retrieve from the archive, - and any %p is replaced by the copy destination path name + and any %p is replaced by the copy destination path name on the server. (The path name is relative to the current working directory, i.e., the cluster's data directory.) - Any %r is replaced by the name of the file containing the + Any %r is replaced by the name of the file containing the last valid restart point. That is the earliest file that must be kept to allow a restore to be restartable, so this information can be used to truncate the archive to just the minimum required to support - restarting from the current restore. %r is typically only + restarting from the current restore. %r is typically only used by warm-standby configurations - (see ). - Write %% to embed an actual % character. + (see ). + Write %% to embed an actual % character. It is important for the command to return a zero exit status - only if it succeeds. The command will be asked for file + only if it succeeds. The command will be asked for file names that are not present in the archive; it must return nonzero when so asked. Examples: @@ -82,33 +82,33 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows archive_cleanup_command (string) - archive_cleanup_command recovery parameter + archive_cleanup_command recovery parameter This optional parameter specifies a shell command that will be executed at every restartpoint. The purpose of - archive_cleanup_command is to provide a mechanism for + archive_cleanup_command is to provide a mechanism for cleaning up old archived WAL files that are no longer needed by the standby server. - Any %r is replaced by the name of the file containing the + Any %r is replaced by the name of the file containing the last valid restart point. - That is the earliest file that must be kept to allow a - restore to be restartable, and so all files earlier than %r + That is the earliest file that must be kept to allow a + restore to be restartable, and so all files earlier than %r may be safely removed. This information can be used to truncate the archive to just the minimum required to support restart from the current restore. - The module - is often used in archive_cleanup_command for + The module + is often used in archive_cleanup_command for single-standby configurations, for example: archive_cleanup_command = 'pg_archivecleanup /mnt/server/archivedir %r' Note however that if multiple standby servers are restoring from the same archive directory, you will need to ensure that you do not delete WAL files until they are no longer needed by any of the servers. - archive_cleanup_command would typically be used in a - warm-standby configuration (see ). - Write %% to embed an actual % character in the + archive_cleanup_command would typically be used in a + warm-standby configuration (see ). + Write %% to embed an actual % character in the command. @@ -123,17 +123,17 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows recovery_end_command (string) - recovery_end_command recovery parameter + recovery_end_command recovery parameter This parameter specifies a shell command that will be executed once only at the end of recovery. This parameter is optional. The purpose of the - recovery_end_command is to provide a mechanism for cleanup + recovery_end_command is to provide a mechanism for cleanup following replication or recovery. - Any %r is replaced by the name of the file containing the - last valid restart point, like in . + Any %r is replaced by the name of the file containing the + last valid restart point, like in . If the command returns a nonzero exit status then a warning log @@ -156,9 +156,9 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows By default, recovery will recover to the end of the WAL log. The following parameters can be used to specify an earlier stopping point. - At most one of recovery_target, - recovery_target_lsn, recovery_target_name, - recovery_target_time, or recovery_target_xid + At most one of recovery_target, + recovery_target_lsn, recovery_target_name, + recovery_target_time, or recovery_target_xid can be used; if more than one of these is specified in the configuration file, the last entry will be used. @@ -167,7 +167,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows recovery_target = 'immediate' - recovery_target recovery parameter + recovery_target recovery parameter @@ -178,7 +178,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows ended. - Technically, this is a string parameter, but 'immediate' + Technically, this is a string parameter, but 'immediate' is currently the only allowed value. @@ -187,13 +187,13 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows recovery_target_name (string) - recovery_target_name recovery parameter + recovery_target_name recovery parameter This parameter specifies the named restore point (created with - pg_create_restore_point()) to which recovery will proceed. + pg_create_restore_point()) to which recovery will proceed. @@ -201,7 +201,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows recovery_target_time (timestamp) - recovery_target_time recovery parameter + recovery_target_time recovery parameter @@ -209,7 +209,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows This parameter specifies the time stamp up to which recovery will proceed. The precise stopping point is also influenced by - . + . @@ -217,7 +217,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows recovery_target_xid (string) - recovery_target_xid recovery parameter + recovery_target_xid recovery parameter @@ -229,7 +229,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows The transactions that will be recovered are those that committed before (and optionally including) the specified one. The precise stopping point is also influenced by - . + . @@ -237,16 +237,16 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows recovery_target_lsn (pg_lsn) - recovery_target_lsn recovery parameter + recovery_target_lsn recovery parameter This parameter specifies the LSN of the write-ahead log location up to which recovery will proceed. The precise stopping point is also - influenced by . This + influenced by . This parameter is parsed using the system data type - pg_lsn. + pg_lsn. @@ -262,7 +262,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows xreflabel="recovery_target_inclusive"> recovery_target_inclusive (boolean) - recovery_target_inclusive recovery parameter + recovery_target_inclusive recovery parameter @@ -270,11 +270,12 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows Specifies whether to stop just after the specified recovery target (true), or just before the recovery target (false). - Applies when either - or is specified. + Applies when , + , or + is specified. This setting controls whether transactions - having exactly the target commit time or ID, respectively, will - be included in the recovery. Default is true. + having exactly the target WAL location (LSN), commit time, or transaction ID, respectively, will + be included in the recovery. Default is true. @@ -283,19 +284,19 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows xreflabel="recovery_target_timeline"> recovery_target_timeline (string) - recovery_target_timeline recovery parameter + recovery_target_timeline recovery parameter Specifies recovering into a particular timeline. The default is to recover along the same timeline that was current when the - base backup was taken. Setting this to latest recovers + base backup was taken. Setting this to latest recovers to the latest timeline found in the archive, which is useful in a standby server. Other than that you only need to set this parameter in complex re-recovery situations, where you need to return to a state that itself was reached after a point-in-time recovery. - See for discussion. + See for discussion. @@ -304,47 +305,47 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows xreflabel="recovery_target_action"> recovery_target_action (enum) - recovery_target_action recovery parameter + recovery_target_action recovery parameter Specifies what action the server should take once the recovery target is - reached. The default is pause, which means recovery will - be paused. promote means the recovery process will finish + reached. The default is pause, which means recovery will + be paused. promote means the recovery process will finish and the server will start to accept connections. - Finally shutdown will stop the server after reaching the + Finally shutdown will stop the server after reaching the recovery target. - The intended use of the pause setting is to allow queries + The intended use of the pause setting is to allow queries to be executed against the database to check if this recovery target is the most desirable point for recovery. The paused state can be resumed by - using pg_wal_replay_resume() (see - ), which then + using pg_wal_replay_resume() (see + ), which then causes recovery to end. If this recovery target is not the desired stopping point, then shut down the server, change the recovery target settings to a later target and restart to continue recovery. - The shutdown setting is useful to have the instance ready + The shutdown setting is useful to have the instance ready at the exact replay point desired. The instance will still be able to replay more WAL records (and in fact will have to replay WAL records since the last checkpoint next time it is started). - Note that because recovery.conf will not be renamed when - recovery_target_action is set to shutdown, + Note that because recovery.conf will not be renamed when + recovery_target_action is set to shutdown, any subsequent start will end with immediate shutdown unless the - configuration is changed or the recovery.conf file is + configuration is changed or the recovery.conf file is removed manually. This setting has no effect if no recovery target is set. - If is not enabled, a setting of - pause will act the same as shutdown. + If is not enabled, a setting of + pause will act the same as shutdown. @@ -360,34 +361,34 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows standby_mode (boolean) - standby_mode recovery parameter + standby_mode recovery parameter - Specifies whether to start the PostgreSQL server as - a standby. If this parameter is on, the server will + Specifies whether to start the PostgreSQL server as + a standby. If this parameter is on, the server will not stop recovery when the end of archived WAL is reached, but will keep trying to continue recovery by fetching new WAL segments - using restore_command + using restore_command and/or by connecting to the primary server as specified by the - primary_conninfo setting. + primary_conninfo setting. primary_conninfo (string) - primary_conninfo recovery parameter + primary_conninfo recovery parameter Specifies a connection string to be used for the standby server to connect with the primary. This string is in the format - described in . If any option is + described in . If any option is unspecified in this string, then the corresponding environment - variable (see ) is checked. If the + variable (see ) is checked. If the environment variable is not set either, then defaults are used. @@ -397,24 +398,24 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows the same as the standby server's default. Also specify a user name corresponding to a suitably-privileged role on the primary (see - ). + ). A password needs to be provided too, if the primary demands password authentication. It can be provided in the primary_conninfo string, or in a separate - ~/.pgpass file on the standby server (use - replication as the database name). + ~/.pgpass file on the standby server (use + replication as the database name). Do not specify a database name in the primary_conninfo string. - This setting has no effect if standby_mode is off. + This setting has no effect if standby_mode is off. primary_slot_name (string) - primary_slot_name recovery parameter + primary_slot_name recovery parameter @@ -422,8 +423,8 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows Optionally specifies an existing replication slot to be used when connecting to the primary via streaming replication to control resource removal on the upstream node - (see ). - This setting has no effect if primary_conninfo is not + (see ). + This setting has no effect if primary_conninfo is not set. @@ -431,15 +432,16 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows trigger_file (string) - trigger_file recovery parameter + trigger_file recovery parameter Specifies a trigger file whose presence ends recovery in the standby. Even if this value is not set, you can still promote - the standby using pg_ctl promote. - This setting has no effect if standby_mode is off. + the standby using pg_ctl promote or calling + pg_promote. + This setting has no effect if standby_mode is off. @@ -447,7 +449,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows recovery_min_apply_delay (integer) - recovery_min_apply_delay recovery parameter + recovery_min_apply_delay recovery parameter @@ -488,7 +490,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows This parameter is intended for use with streaming replication deployments; however, if the parameter is specified it will be honored in all cases. - hot_standby_feedback will be delayed by use of this feature + hot_standby_feedback will be delayed by use of this feature which could lead to bloat on the master; use both together with care. diff --git a/doc/src/sgml/ref/abort.sgml b/doc/src/sgml/ref/abort.sgml index ed9332c395..21799d2a83 100644 --- a/doc/src/sgml/ref/abort.sgml +++ b/doc/src/sgml/ref/abort.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/abort.sgml PostgreSQL documentation --> - + ABORT @@ -33,7 +33,7 @@ ABORT [ WORK | TRANSACTION ] all the updates made by the transaction to be discarded. This command is identical in behavior to the standard SQL command - , + , and is present only for historical reasons. @@ -58,12 +58,12 @@ ABORT [ WORK | TRANSACTION ] Notes - Use to + Use to successfully terminate a transaction. - Issuing ABORT outside of a transaction block + Issuing ABORT outside of a transaction block emits a warning and otherwise has no effect. @@ -92,9 +92,9 @@ ABORT; See Also - - - + + + diff --git a/doc/src/sgml/ref/allfiles.sgml b/doc/src/sgml/ref/allfiles.sgml index 01acc2ef9d..c81c87ef41 100644 --- a/doc/src/sgml/ref/allfiles.sgml +++ b/doc/src/sgml/ref/allfiles.sgml @@ -26,8 +26,10 @@ Complete list of usable sgml source files in this directory. + + @@ -48,6 +50,7 @@ Complete list of usable sgml source files in this directory. + @@ -75,6 +78,7 @@ Complete list of usable sgml source files in this directory. + @@ -122,8 +126,10 @@ Complete list of usable sgml source files in this directory. + + @@ -204,6 +210,7 @@ Complete list of usable sgml source files in this directory. + diff --git a/doc/src/sgml/ref/alter_aggregate.sgml b/doc/src/sgml/ref/alter_aggregate.sgml index 7b7616ca01..2ad3e0440b 100644 --- a/doc/src/sgml/ref/alter_aggregate.sgml +++ b/doc/src/sgml/ref/alter_aggregate.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_aggregate.sgml PostgreSQL documentation --> - + ALTER AGGREGATE @@ -43,7 +43,7 @@ ALTER AGGREGATE name ( aggregate_signatu - You must own the aggregate function to use ALTER AGGREGATE. + You must own the aggregate function to use ALTER AGGREGATE. To change the schema of an aggregate function, you must also have CREATE privilege on the new schema. To alter the owner, you must also be a direct or indirect member of the new @@ -73,8 +73,8 @@ ALTER AGGREGATE name ( aggregate_signatu - The mode of an argument: IN or VARIADIC. - If omitted, the default is IN. + The mode of an argument: IN or VARIADIC. + If omitted, the default is IN. @@ -97,10 +97,10 @@ ALTER AGGREGATE name ( aggregate_signatu An input data type on which the aggregate function operates. - To reference a zero-argument aggregate function, write * + To reference a zero-argument aggregate function, write * in place of the list of argument specifications. To reference an ordered-set aggregate function, write - ORDER BY between the direct and aggregated argument + ORDER BY between the direct and aggregated argument specifications. @@ -140,13 +140,13 @@ ALTER AGGREGATE name ( aggregate_signatu The recommended syntax for referencing an ordered-set aggregate - is to write ORDER BY between the direct and aggregated + is to write ORDER BY between the direct and aggregated argument specifications, in the same style as in - . However, it will also work to - omit ORDER BY and just run the direct and aggregated + . However, it will also work to + omit ORDER BY and just run the direct and aggregated argument specifications into a single list. In this abbreviated form, - if VARIADIC "any" was used in both the direct and - aggregated argument lists, write VARIADIC "any" only once. + if VARIADIC "any" was used in both the direct and + aggregated argument lists, write VARIADIC "any" only once. @@ -195,8 +195,8 @@ ALTER AGGREGATE mypercentile(float8, integer) SET SCHEMA myschema; See Also - - + + diff --git a/doc/src/sgml/ref/alter_collation.sgml b/doc/src/sgml/ref/alter_collation.sgml index 30e8c756a1..b51b3a2564 100644 --- a/doc/src/sgml/ref/alter_collation.sgml +++ b/doc/src/sgml/ref/alter_collation.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_collation.sgml PostgreSQL documentation --> - + ALTER COLLATION @@ -38,7 +38,7 @@ ALTER COLLATION name SET SCHEMA new_sche - You must own the collation to use ALTER COLLATION. + You must own the collation to use ALTER COLLATION. To alter the owner, you must also be a direct or indirect member of the new owning role, and that role must have CREATE privilege on the collation's schema. (These restrictions enforce that altering the @@ -94,7 +94,7 @@ ALTER COLLATION name SET SCHEMA new_sche Update the collation's version. See below. + endterm="sql-altercollation-notes-title"/> below. @@ -176,8 +176,8 @@ ALTER COLLATION "en_US" OWNER TO joe; See Also - - + + diff --git a/doc/src/sgml/ref/alter_conversion.sgml b/doc/src/sgml/ref/alter_conversion.sgml index 3514720d03..c42bd8b3e4 100644 --- a/doc/src/sgml/ref/alter_conversion.sgml +++ b/doc/src/sgml/ref/alter_conversion.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_conversion.sgml PostgreSQL documentation --> - + ALTER CONVERSION @@ -36,7 +36,7 @@ ALTER CONVERSION name SET SCHEMA new_sch - You must own the conversion to use ALTER CONVERSION. + You must own the conversion to use ALTER CONVERSION. To alter the owner, you must also be a direct or indirect member of the new owning role, and that role must have CREATE privilege on the conversion's schema. (These restrictions enforce that altering the @@ -120,8 +120,8 @@ ALTER CONVERSION iso_8859_1_to_utf8 OWNER TO joe; See Also - - + + diff --git a/doc/src/sgml/ref/alter_database.sgml b/doc/src/sgml/ref/alter_database.sgml index cfc28cf9a7..7db878cf53 100644 --- a/doc/src/sgml/ref/alter_database.sgml +++ b/doc/src/sgml/ref/alter_database.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_database.sgml PostgreSQL documentation --> - + ALTER DATABASE @@ -21,24 +21,24 @@ PostgreSQL documentation -ALTER DATABASE name [ [ WITH ] option [ ... ] ] +ALTER DATABASE name [ [ WITH ] option [ ... ] ] -where option can be: +where option can be: - ALLOW_CONNECTIONS allowconn - CONNECTION LIMIT connlimit - IS_TEMPLATE istemplate + ALLOW_CONNECTIONS allowconn + CONNECTION LIMIT connlimit + IS_TEMPLATE istemplate -ALTER DATABASE name RENAME TO new_name +ALTER DATABASE name RENAME TO new_name -ALTER DATABASE name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER DATABASE name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } -ALTER DATABASE name SET TABLESPACE new_tablespace +ALTER DATABASE name SET TABLESPACE new_tablespace -ALTER DATABASE name SET configuration_parameter { TO | = } { value | DEFAULT } -ALTER DATABASE name SET configuration_parameter FROM CURRENT -ALTER DATABASE name RESET configuration_parameter -ALTER DATABASE name RESET ALL +ALTER DATABASE name SET configuration_parameter { TO | = } { value | DEFAULT } +ALTER DATABASE name SET configuration_parameter FROM CURRENT +ALTER DATABASE name RESET configuration_parameter +ALTER DATABASE name RESET ALL @@ -89,7 +89,7 @@ ALTER DATABASE name RESET ALL database. Whenever a new session is subsequently started in that database, the specified value becomes the session default value. The database-specific default overrides whatever setting is present - in postgresql.conf or has been received from the + in postgresql.conf or has been received from the postgres command line. Only the database owner or a superuser can change the session defaults for a database. Certain variables cannot be set this way, or can only be @@ -102,7 +102,7 @@ ALTER DATABASE name RESET ALL - name + name The name of the database whose attributes are to be altered. @@ -164,6 +164,10 @@ ALTER DATABASE name RESET ALL The new default tablespace of the database. + + + This form of the command cannot be executed inside a transaction block. + @@ -179,12 +183,12 @@ ALTER DATABASE name RESET ALL database-specific setting is removed, so the system-wide default setting will be inherited in new sessions. Use RESET ALL to clear all database-specific settings. - SET FROM CURRENT saves the session's current value of + SET FROM CURRENT saves the session's current value of the parameter as the database-specific value. - See and + See and for more information about allowed parameter names and values. @@ -199,7 +203,7 @@ ALTER DATABASE name RESET ALL It is also possible to tie a session default to a specific role rather than to a database; see - . + . Role-specific settings override database-specific ones if there is a conflict. @@ -230,10 +234,10 @@ ALTER DATABASE test SET enable_indexscan TO off; See Also - - - - + + + + diff --git a/doc/src/sgml/ref/alter_default_privileges.sgml b/doc/src/sgml/ref/alter_default_privileges.sgml index e3363f868a..0c09f1db5c 100644 --- a/doc/src/sgml/ref/alter_default_privileges.sgml +++ b/doc/src/sgml/ref/alter_default_privileges.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_default_privileges.sgml PostgreSQL documentation --> - + ALTER DEFAULT PRIVILEGES @@ -31,55 +31,55 @@ ALTER DEFAULT PRIVILEGES GRANT { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER } [, ...] | ALL [ PRIVILEGES ] } ON TABLES - TO { [ GROUP ] role_name | PUBLIC } [, ...] [ WITH GRANT OPTION ] + TO { [ GROUP ] role_name | PUBLIC } [, ...] [ WITH GRANT OPTION ] GRANT { { USAGE | SELECT | UPDATE } [, ...] | ALL [ PRIVILEGES ] } ON SEQUENCES - TO { [ GROUP ] role_name | PUBLIC } [, ...] [ WITH GRANT OPTION ] + TO { [ GROUP ] role_name | PUBLIC } [, ...] [ WITH GRANT OPTION ] GRANT { EXECUTE | ALL [ PRIVILEGES ] } - ON FUNCTIONS - TO { [ GROUP ] role_name | PUBLIC } [, ...] [ WITH GRANT OPTION ] + ON { FUNCTIONS | ROUTINES } + TO { [ GROUP ] role_name | PUBLIC } [, ...] [ WITH GRANT OPTION ] GRANT { USAGE | ALL [ PRIVILEGES ] } ON TYPES - TO { [ GROUP ] role_name | PUBLIC } [, ...] [ WITH GRANT OPTION ] + TO { [ GROUP ] role_name | PUBLIC } [, ...] [ WITH GRANT OPTION ] GRANT { USAGE | CREATE | ALL [ PRIVILEGES ] } ON SCHEMAS - TO { [ GROUP ] role_name | PUBLIC } [, ...] [ WITH GRANT OPTION ] + TO { [ GROUP ] role_name | PUBLIC } [, ...] [ WITH GRANT OPTION ] REVOKE [ GRANT OPTION FOR ] { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER } [, ...] | ALL [ PRIVILEGES ] } ON TABLES - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { { USAGE | SELECT | UPDATE } [, ...] | ALL [ PRIVILEGES ] } ON SEQUENCES - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { EXECUTE | ALL [ PRIVILEGES ] } - ON FUNCTIONS - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + ON { FUNCTIONS | ROUTINES } + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { USAGE | ALL [ PRIVILEGES ] } ON TYPES - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { USAGE | CREATE | ALL [ PRIVILEGES ] } ON SCHEMAS - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] @@ -88,12 +88,18 @@ REVOKE [ GRANT OPTION FOR ] Description - ALTER DEFAULT PRIVILEGES allows you to set the privileges + ALTER DEFAULT PRIVILEGES allows you to set the privileges that will be applied to objects created in the future. (It does not affect privileges assigned to already-existing objects.) Currently, only the privileges for schemas, tables (including views and foreign tables), sequences, functions, and types (including domains) can be - altered. + altered. For this command, functions include aggregates and procedures. + The words FUNCTIONS and ROUTINES are + equivalent in this command. (ROUTINES is preferred + going forward as the standard term for functions and procedures taken + together. In earlier PostgreSQL releases, only the + word FUNCTIONS was allowed. It is not possible to set + default privileges for functions and procedures separately.) @@ -106,12 +112,12 @@ REVOKE [ GRANT OPTION FOR ] - As explained under , + As explained under , the default privileges for any object type normally grant all grantable permissions to the object owner, and may grant some privileges to - PUBLIC as well. However, this behavior can be changed by + PUBLIC as well. However, this behavior can be changed by altering the global default privileges with - ALTER DEFAULT PRIVILEGES. + ALTER DEFAULT PRIVILEGES. @@ -123,7 +129,7 @@ REVOKE [ GRANT OPTION FOR ] The name of an existing role of which the current role is a member. - If FOR ROLE is omitted, the current role is assumed. + If FOR ROLE is omitted, the current role is assumed. @@ -134,9 +140,9 @@ REVOKE [ GRANT OPTION FOR ] The name of an existing schema. If specified, the default privileges are altered for objects later created in that schema. - If IN SCHEMA is omitted, the global default privileges + If IN SCHEMA is omitted, the global default privileges are altered. - IN SCHEMA is not allowed when using ON SCHEMAS + IN SCHEMA is not allowed when using ON SCHEMAS as schemas can't be nested. @@ -148,10 +154,10 @@ REVOKE [ GRANT OPTION FOR ] The name of an existing role to grant or revoke privileges for. This parameter, and all the other parameters in - abbreviated_grant_or_revoke, + abbreviated_grant_or_revoke, act as described under - or - , + or + , except that one is setting permissions for a whole class of objects rather than specific named objects. @@ -165,17 +171,17 @@ REVOKE [ GRANT OPTION FOR ] Notes - Use 's \ddp command + Use 's \ddp command to obtain information about existing assignments of default privileges. The meaning of the privilege values is the same as explained for \dp under - . + . If you wish to drop a role for which the default privileges have been altered, it is necessary to reverse the changes in its default privileges - or use DROP OWNED BY to get rid of the default privileges entry + or use DROP OWNED BY to get rid of the default privileges entry for the role. @@ -186,7 +192,7 @@ REVOKE [ GRANT OPTION FOR ] Grant SELECT privilege to everyone for all tables (and views) you subsequently create in schema myschema, and allow - role webuser to INSERT into them too: + role webuser to INSERT into them too: ALTER DEFAULT PRIVILEGES IN SCHEMA myschema GRANT SELECT ON TABLES TO PUBLIC; @@ -206,7 +212,7 @@ ALTER DEFAULT PRIVILEGES IN SCHEMA myschema REVOKE INSERT ON TABLES FROM webuser Remove the public EXECUTE permission that is normally granted on functions, - for all functions subsequently created by role admin: + for all functions subsequently created by role admin: ALTER DEFAULT PRIVILEGES FOR ROLE admin REVOKE EXECUTE ON FUNCTIONS FROM PUBLIC; @@ -226,8 +232,8 @@ ALTER DEFAULT PRIVILEGES FOR ROLE admin REVOKE EXECUTE ON FUNCTIONS FROM PUBLIC; See Also - - + + diff --git a/doc/src/sgml/ref/alter_domain.sgml b/doc/src/sgml/ref/alter_domain.sgml index 95a822aef6..85253e209b 100644 --- a/doc/src/sgml/ref/alter_domain.sgml +++ b/doc/src/sgml/ref/alter_domain.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_domain.sgml PostgreSQL documentation --> - + ALTER DOMAIN @@ -23,24 +23,24 @@ PostgreSQL documentation -ALTER DOMAIN name - { SET DEFAULT expression | DROP DEFAULT } -ALTER DOMAIN name +ALTER DOMAIN name + { SET DEFAULT expression | DROP DEFAULT } +ALTER DOMAIN name { SET | DROP } NOT NULL -ALTER DOMAIN name - ADD domain_constraint [ NOT VALID ] -ALTER DOMAIN name - DROP CONSTRAINT [ IF EXISTS ] constraint_name [ RESTRICT | CASCADE ] -ALTER DOMAIN name - RENAME CONSTRAINT constraint_name TO new_constraint_name -ALTER DOMAIN name - VALIDATE CONSTRAINT constraint_name -ALTER DOMAIN name - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } -ALTER DOMAIN name - RENAME TO new_name -ALTER DOMAIN name - SET SCHEMA new_schema +ALTER DOMAIN name + ADD domain_constraint [ NOT VALID ] +ALTER DOMAIN name + DROP CONSTRAINT [ IF EXISTS ] constraint_name [ RESTRICT | CASCADE ] +ALTER DOMAIN name + RENAME CONSTRAINT constraint_name TO new_constraint_name +ALTER DOMAIN name + VALIDATE CONSTRAINT constraint_name +ALTER DOMAIN name + OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER DOMAIN name + RENAME TO new_name +ALTER DOMAIN name + SET SCHEMA new_schema @@ -69,18 +69,18 @@ ALTER DOMAIN name These forms change whether a domain is marked to allow NULL - values or to reject NULL values. You can only SET NOT NULL + values or to reject NULL values. You can only SET NOT NULL when the columns using the domain contain no null values. - ADD domain_constraint [ NOT VALID ] + ADD domain_constraint [ NOT VALID ] This form adds a new constraint to a domain using the same syntax as - . + . When a new constraint is added to a domain, all columns using that domain will be checked against the newly added constraint. These checks can be suppressed by adding the new constraint using the @@ -88,7 +88,7 @@ ALTER DOMAIN name valid using ALTER DOMAIN ... VALIDATE CONSTRAINT. Newly inserted or updated rows are always checked against all constraints, even those marked NOT VALID. - NOT VALID is only accepted for CHECK constraints. + NOT VALID is only accepted for CHECK constraints. @@ -118,7 +118,7 @@ ALTER DOMAIN name This form validates a constraint previously added as - NOT VALID, that is, verify that all data in columns using the + NOT VALID, that is, verify that all data in columns using the domain satisfy the specified constraint. @@ -154,7 +154,7 @@ ALTER DOMAIN name - You must own the domain to use ALTER DOMAIN. + You must own the domain to use ALTER DOMAIN. To change the schema of a domain, you must also have CREATE privilege on the new schema. To alter the owner, you must also be a direct or indirect member of the new @@ -171,7 +171,7 @@ ALTER DOMAIN name - name + name The name (possibly schema-qualified) of an existing domain to @@ -181,7 +181,7 @@ ALTER DOMAIN name - domain_constraint + domain_constraint New domain constraint for the domain. @@ -190,7 +190,7 @@ ALTER DOMAIN name - constraint_name + constraint_name Name of an existing constraint to drop or rename. @@ -199,7 +199,7 @@ ALTER DOMAIN name - NOT VALID + NOT VALID Do not verify existing column data for constraint validity. @@ -214,7 +214,7 @@ ALTER DOMAIN name Automatically drop objects that depend on the constraint, and in turn all objects that depend on those objects - (see ). + (see ). @@ -230,7 +230,7 @@ ALTER DOMAIN name - new_name + new_name The new name for the domain. @@ -239,7 +239,7 @@ ALTER DOMAIN name - new_constraint_name + new_constraint_name The new name for the constraint. @@ -248,7 +248,7 @@ ALTER DOMAIN name - new_owner + new_owner The user name of the new owner of the domain. @@ -257,7 +257,7 @@ ALTER DOMAIN name - new_schema + new_schema The new schema for the domain. @@ -273,12 +273,12 @@ ALTER DOMAIN name Notes - Currently, ALTER DOMAIN ADD CONSTRAINT, ALTER - DOMAIN VALIDATE CONSTRAINT, and ALTER DOMAIN SET NOT NULL - will fail if the validated named domain or - any derived domain is used within a composite-type column of any - table in the database. They should eventually be improved to be - able to verify the new constraint for such nested columns. + Currently, ALTER DOMAIN ADD CONSTRAINT, ALTER + DOMAIN VALIDATE CONSTRAINT, and ALTER DOMAIN SET NOT + NULL will fail if the named domain or any derived domain is used + within a container-type column (a composite, array, or range column) in + any table in the database. They should eventually be improved to be able + to verify the new constraint for such nested values. @@ -325,25 +325,25 @@ ALTER DOMAIN zipcode SET SCHEMA customers; - + Compatibility ALTER DOMAIN conforms to the SQL - standard, except for the OWNER, RENAME, SET SCHEMA, and - VALIDATE CONSTRAINT variants, which are - PostgreSQL extensions. The NOT VALID - clause of the ADD CONSTRAINT variant is also a + standard, except for the OWNER, RENAME, SET SCHEMA, and + VALIDATE CONSTRAINT variants, which are + PostgreSQL extensions. The NOT VALID + clause of the ADD CONSTRAINT variant is also a PostgreSQL extension. - + See Also - - + + diff --git a/doc/src/sgml/ref/alter_event_trigger.sgml b/doc/src/sgml/ref/alter_event_trigger.sgml index 9d6c64ad52..61919f7845 100644 --- a/doc/src/sgml/ref/alter_event_trigger.sgml +++ b/doc/src/sgml/ref/alter_event_trigger.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_event_trigger.sgml PostgreSQL documentation --> - + ALTER EVENT TRIGGER @@ -21,10 +21,10 @@ PostgreSQL documentation -ALTER EVENT TRIGGER name DISABLE -ALTER EVENT TRIGGER name ENABLE [ REPLICA | ALWAYS ] -ALTER EVENT TRIGGER name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } -ALTER EVENT TRIGGER name RENAME TO new_name +ALTER EVENT TRIGGER name DISABLE +ALTER EVENT TRIGGER name ENABLE [ REPLICA | ALWAYS ] +ALTER EVENT TRIGGER name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER EVENT TRIGGER name RENAME TO new_name @@ -46,7 +46,7 @@ ALTER EVENT TRIGGER name RENAME TO - name + name The name of an existing trigger to alter. @@ -55,7 +55,7 @@ ALTER EVENT TRIGGER name RENAME TO - new_owner + new_owner The user name of the new owner of the event trigger. @@ -64,7 +64,7 @@ ALTER EVENT TRIGGER name RENAME TO - new_name + new_name The new name of the event trigger. @@ -78,7 +78,7 @@ ALTER EVENT TRIGGER name RENAME TO These forms configure the firing of event triggers. A disabled trigger is still known to the system, but is not executed when its triggering - event occurs. See also . + event occurs. See also . @@ -98,8 +98,8 @@ ALTER EVENT TRIGGER name RENAME TO See Also - - + + diff --git a/doc/src/sgml/ref/alter_extension.sgml b/doc/src/sgml/ref/alter_extension.sgml index a7c0927d1c..a2d405d6cd 100644 --- a/doc/src/sgml/ref/alter_extension.sgml +++ b/doc/src/sgml/ref/alter_extension.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_extension.sgml PostgreSQL documentation --> - + ALTER EXTENSION @@ -23,39 +23,41 @@ PostgreSQL documentation -ALTER EXTENSION name UPDATE [ TO new_version ] -ALTER EXTENSION name SET SCHEMA new_schema -ALTER EXTENSION name ADD member_object -ALTER EXTENSION name DROP member_object +ALTER EXTENSION name UPDATE [ TO new_version ] +ALTER EXTENSION name SET SCHEMA new_schema +ALTER EXTENSION name ADD member_object +ALTER EXTENSION name DROP member_object -where member_object is: +where member_object is: - ACCESS METHOD object_name | - AGGREGATE aggregate_name ( aggregate_signature ) | + ACCESS METHOD object_name | + AGGREGATE aggregate_name ( aggregate_signature ) | CAST (source_type AS target_type) | - COLLATION object_name | - CONVERSION object_name | - DOMAIN object_name | - EVENT TRIGGER object_name | - FOREIGN DATA WRAPPER object_name | - FOREIGN TABLE object_name | - FUNCTION function_name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] | - MATERIALIZED VIEW object_name | - OPERATOR operator_name (left_type, right_type) | - OPERATOR CLASS object_name USING index_method | - OPERATOR FAMILY object_name USING index_method | - [ PROCEDURAL ] LANGUAGE object_name | - SCHEMA object_name | - SEQUENCE object_name | - SERVER object_name | - TABLE object_name | - TEXT SEARCH CONFIGURATION object_name | - TEXT SEARCH DICTIONARY object_name | - TEXT SEARCH PARSER object_name | - TEXT SEARCH TEMPLATE object_name | + COLLATION object_name | + CONVERSION object_name | + DOMAIN object_name | + EVENT TRIGGER object_name | + FOREIGN DATA WRAPPER object_name | + FOREIGN TABLE object_name | + FUNCTION function_name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] | + MATERIALIZED VIEW object_name | + OPERATOR operator_name (left_type, right_type) | + OPERATOR CLASS object_name USING index_method | + OPERATOR FAMILY object_name USING index_method | + [ PROCEDURAL ] LANGUAGE object_name | + PROCEDURE procedure_name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] | + ROUTINE routine_name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] | + SCHEMA object_name | + SEQUENCE object_name | + SERVER object_name | + TABLE object_name | + TEXT SEARCH CONFIGURATION object_name | + TEXT SEARCH DICTIONARY object_name | + TEXT SEARCH PARSER object_name | + TEXT SEARCH TEMPLATE object_name | TRANSFORM FOR type_name LANGUAGE lang_name | - TYPE object_name | - VIEW object_name + TYPE object_name | + VIEW object_name and aggregate_signature is: @@ -89,14 +91,14 @@ ALTER EXTENSION name DROP This form moves the extension's objects into another schema. The - extension has to be relocatable for this command to + extension has to be relocatable for this command to succeed. - ADD member_object + ADD member_object This form adds an existing object to the extension. This is mainly @@ -108,7 +110,7 @@ ALTER EXTENSION name DROP - DROP member_object + DROP member_object This form removes a member object from the extension. This is mainly @@ -119,13 +121,13 @@ ALTER EXTENSION name DROP - See for more information about these + See for more information about these operations. You must own the extension to use ALTER EXTENSION. - The ADD/DROP forms require ownership of the + The ADD/DROP forms require ownership of the added/dropped object as well. @@ -136,7 +138,7 @@ ALTER EXTENSION name DROP - name + name The name of an installed extension. @@ -145,19 +147,19 @@ ALTER EXTENSION name DROP - new_version + new_version The desired new version of the extension. This can be written as either an identifier or a string literal. If not specified, - ALTER EXTENSION UPDATE attempts to update to whatever is + ALTER EXTENSION UPDATE attempts to update to whatever is shown as the default version in the extension's control file. - new_schema + new_schema The new schema for the extension. @@ -170,12 +172,14 @@ ALTER EXTENSION name DROP aggregate_name function_name operator_name + procedure_name + routine_name The name of an object to be added to or removed from the extension. Names of tables, aggregates, domains, foreign tables, functions, operators, - operator classes, operator families, sequences, text search objects, + operator classes, operator families, procedures, routines, sequences, text search objects, types, and views can be schema-qualified. @@ -204,15 +208,15 @@ ALTER EXTENSION name DROP - The mode of a function or aggregate - argument: IN, OUT, - INOUT, or VARIADIC. - If omitted, the default is IN. + The mode of a function, procedure, or aggregate + argument: IN, OUT, + INOUT, or VARIADIC. + If omitted, the default is IN. Note that ALTER EXTENSION does not actually pay - any attention to OUT arguments, since only the input + any attention to OUT arguments, since only the input arguments are needed to determine the function's identity. - So it is sufficient to list the IN, INOUT, - and VARIADIC arguments. + So it is sufficient to list the IN, INOUT, + and VARIADIC arguments. @@ -222,7 +226,7 @@ ALTER EXTENSION name DROP - The name of a function or aggregate argument. + The name of a function, procedure, or aggregate argument. Note that ALTER EXTENSION does not actually pay any attention to argument names, since only the argument data types are needed to determine the function's identity. @@ -235,7 +239,7 @@ ALTER EXTENSION name DROP - The data type of a function or aggregate argument. + The data type of a function, procedure, or aggregate argument. @@ -246,7 +250,7 @@ ALTER EXTENSION name DROP The data type(s) of the operator's arguments (optionally - schema-qualified). Write NONE for the missing argument + schema-qualified). Write NONE for the missing argument of a prefix or postfix operator. @@ -314,17 +318,17 @@ ALTER EXTENSION hstore ADD FUNCTION populate_record(anyelement, hstore); Compatibility - ALTER EXTENSION is a PostgreSQL + ALTER EXTENSION is a PostgreSQL extension. - + See Also - - + + diff --git a/doc/src/sgml/ref/alter_foreign_data_wrapper.sgml b/doc/src/sgml/ref/alter_foreign_data_wrapper.sgml index 3f5fb0f77e..14f3d616e7 100644 --- a/doc/src/sgml/ref/alter_foreign_data_wrapper.sgml +++ b/doc/src/sgml/ref/alter_foreign_data_wrapper.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_foreign_data_wrapper.sgml PostgreSQL documentation --> - + ALTER FOREIGN DATA WRAPPER @@ -24,7 +24,7 @@ PostgreSQL documentation ALTER FOREIGN DATA WRAPPER name [ HANDLER handler_function | NO HANDLER ] [ VALIDATOR validator_function | NO VALIDATOR ] - [ OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ]) ] + [ OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ]) ] ALTER FOREIGN DATA WRAPPER name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } ALTER FOREIGN DATA WRAPPER name RENAME TO new_name @@ -93,11 +93,11 @@ ALTER FOREIGN DATA WRAPPER name REN Note that it is possible that pre-existing options of the foreign-data wrapper, or of dependent servers, user mappings, or foreign tables, are - invalid according to the new validator. PostgreSQL does + invalid according to the new validator. PostgreSQL does not check for this. It is up to the user to make sure that these options are correct before using the modified foreign-data wrapper. However, any options specified in this ALTER FOREIGN DATA - WRAPPER command will be checked using the new validator. + WRAPPER command will be checked using the new validator. @@ -113,12 +113,12 @@ ALTER FOREIGN DATA WRAPPER name REN - OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) + OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) Change options for the foreign-data - wrapper. ADD, SET, and DROP - specify the action to be performed. ADD is assumed + wrapper. ADD, SET, and DROP + specify the action to be performed. ADD is assumed if no operation is explicitly specified. Option names must be unique; names and values are also validated using the foreign data wrapper's validator function, if any. @@ -127,7 +127,7 @@ ALTER FOREIGN DATA WRAPPER name REN - new_owner + new_owner The user name of the new owner of the foreign-data wrapper. @@ -150,16 +150,16 @@ ALTER FOREIGN DATA WRAPPER name REN Examples - Change a foreign-data wrapper dbi, add - option foo, drop bar: + Change a foreign-data wrapper dbi, add + option foo, drop bar: ALTER FOREIGN DATA WRAPPER dbi OPTIONS (ADD foo '1', DROP 'bar'); - Change the foreign-data wrapper dbi validator - to bob.myvalidator: + Change the foreign-data wrapper dbi validator + to bob.myvalidator: ALTER FOREIGN DATA WRAPPER dbi VALIDATOR bob.myvalidator; @@ -171,7 +171,7 @@ ALTER FOREIGN DATA WRAPPER dbi VALIDATOR bob.myvalidator; ALTER FOREIGN DATA WRAPPER conforms to ISO/IEC 9075-9 (SQL/MED), except that the HANDLER, - VALIDATOR, OWNER TO, and RENAME + VALIDATOR, OWNER TO, and RENAME clauses are extensions. @@ -180,8 +180,8 @@ ALTER FOREIGN DATA WRAPPER dbi VALIDATOR bob.myvalidator; See Also - - + + diff --git a/doc/src/sgml/ref/alter_foreign_table.sgml b/doc/src/sgml/ref/alter_foreign_table.sgml index b1692842b2..f266be0c37 100644 --- a/doc/src/sgml/ref/alter_foreign_table.sgml +++ b/doc/src/sgml/ref/alter_foreign_table.sgml @@ -3,7 +3,7 @@ doc/src/sgml/rel/alter_foreign_table.sgml PostgreSQL documentation --> - + ALTER FOREIGN TABLE @@ -21,41 +21,41 @@ PostgreSQL documentation -ALTER FOREIGN TABLE [ IF EXISTS ] [ ONLY ] name [ * ] - action [, ... ] -ALTER FOREIGN TABLE [ IF EXISTS ] [ ONLY ] name [ * ] - RENAME [ COLUMN ] column_name TO new_column_name -ALTER FOREIGN TABLE [ IF EXISTS ] name - RENAME TO new_name -ALTER FOREIGN TABLE [ IF EXISTS ] name - SET SCHEMA new_schema - -where action is one of: - - ADD [ COLUMN ] column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ] - DROP [ COLUMN ] [ IF EXISTS ] column_name [ RESTRICT | CASCADE ] - ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type [ COLLATE collation ] - ALTER [ COLUMN ] column_name SET DEFAULT expression - ALTER [ COLUMN ] column_name DROP DEFAULT - ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL - ALTER [ COLUMN ] column_name SET STATISTICS integer - ALTER [ COLUMN ] column_name SET ( attribute_option = value [, ... ] ) - ALTER [ COLUMN ] column_name RESET ( attribute_option [, ... ] ) - ALTER [ COLUMN ] column_name SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN } - ALTER [ COLUMN ] column_name OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ]) - ADD table_constraint [ NOT VALID ] - VALIDATE CONSTRAINT constraint_name - DROP CONSTRAINT [ IF EXISTS ] constraint_name [ RESTRICT | CASCADE ] - DISABLE TRIGGER [ trigger_name | ALL | USER ] - ENABLE TRIGGER [ trigger_name | ALL | USER ] - ENABLE REPLICA TRIGGER trigger_name - ENABLE ALWAYS TRIGGER trigger_name +ALTER FOREIGN TABLE [ IF EXISTS ] [ ONLY ] name [ * ] + action [, ... ] +ALTER FOREIGN TABLE [ IF EXISTS ] [ ONLY ] name [ * ] + RENAME [ COLUMN ] column_name TO new_column_name +ALTER FOREIGN TABLE [ IF EXISTS ] name + RENAME TO new_name +ALTER FOREIGN TABLE [ IF EXISTS ] name + SET SCHEMA new_schema + +where action is one of: + + ADD [ COLUMN ] column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ] + DROP [ COLUMN ] [ IF EXISTS ] column_name [ RESTRICT | CASCADE ] + ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type [ COLLATE collation ] + ALTER [ COLUMN ] column_name SET DEFAULT expression + ALTER [ COLUMN ] column_name DROP DEFAULT + ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL + ALTER [ COLUMN ] column_name SET STATISTICS integer + ALTER [ COLUMN ] column_name SET ( attribute_option = value [, ... ] ) + ALTER [ COLUMN ] column_name RESET ( attribute_option [, ... ] ) + ALTER [ COLUMN ] column_name SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN } + ALTER [ COLUMN ] column_name OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ]) + ADD table_constraint [ NOT VALID ] + VALIDATE CONSTRAINT constraint_name + DROP CONSTRAINT [ IF EXISTS ] constraint_name [ RESTRICT | CASCADE ] + DISABLE TRIGGER [ trigger_name | ALL | USER ] + ENABLE TRIGGER [ trigger_name | ALL | USER ] + ENABLE REPLICA TRIGGER trigger_name + ENABLE ALWAYS TRIGGER trigger_name SET WITH OIDS SET WITHOUT OIDS - INHERIT parent_table - NO INHERIT parent_table - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } - OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ]) + INHERIT parent_table + NO INHERIT parent_table + OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ]) @@ -72,7 +72,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name This form adds a new column to the foreign table, using the same syntax as - . + . Unlike the case when adding a column to a regular table, nothing happens to the underlying storage: this action simply declares that some new column is now accessible through the foreign table. @@ -85,7 +85,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name This form drops a column from a foreign table. - You will need to say CASCADE if + You will need to say CASCADE if anything outside the table depends on the column; for example, views. If IF EXISTS is specified and the column @@ -101,7 +101,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name This form changes the type of a column of a foreign table. Again, this has no effect on any underlying storage: this action simply - changes the type that PostgreSQL believes the column to + changes the type that PostgreSQL believes the column to have. @@ -113,7 +113,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name These forms set or remove the default value for a column. Default values only apply in subsequent INSERT - or UPDATE commands; they do not cause rows already in the + or UPDATE commands; they do not cause rows already in the table to change. @@ -134,20 +134,20 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name This form sets the per-column statistics-gathering target for subsequent - operations. - See the similar form of + operations. + See the similar form of for more details. - SET ( attribute_option = value [, ... ] ) - RESET ( attribute_option [, ... ] ) + SET ( attribute_option = value [, ... ] ) + RESET ( attribute_option [, ... ] ) This form sets or resets per-attribute options. - See the similar form of + See the similar form of for more details. @@ -160,7 +160,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name This form sets the storage mode for a column. - See the similar form of + See the similar form of for more details. Note that the storage mode has no effect unless the table's foreign-data wrapper chooses to pay attention to it. @@ -169,12 +169,12 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - ADD table_constraint [ NOT VALID ] + ADD table_constraint [ NOT VALID ] This form adds a new constraint to a foreign table, using the same - syntax as . - Currently only CHECK constraints are supported. + syntax as . + Currently only CHECK constraints are supported. @@ -182,8 +182,8 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name.) - If the constraint is marked NOT VALID, then it isn't + in .) + If the constraint is marked NOT VALID, then it isn't assumed to hold, but is only recorded for possible future use. @@ -217,7 +217,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name These forms configure the firing of trigger(s) belonging to the foreign - table. See the similar form of for more + table. See the similar form of for more details. @@ -228,16 +228,16 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name This form adds an oid system column to the - table (see ). + table (see ). It does nothing if the table already has OIDs. Unless the table's foreign-data wrapper supports OIDs, this column will simply read as zeroes. - Note that this is not equivalent to ADD COLUMN oid oid; + Note that this is not equivalent to ADD COLUMN oid oid; that would add a normal column that happened to be named - oid, not a system column. + oid, not a system column. @@ -256,19 +256,19 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - INHERIT parent_table + INHERIT parent_table This form adds the target foreign table as a new child of the specified parent table. - See the similar form of + See the similar form of for more details. - NO INHERIT parent_table + NO INHERIT parent_table This form removes the target foreign table from the list of children of @@ -288,12 +288,12 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) + OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) Change options for the foreign table or one of its columns. - ADD, SET, and DROP - specify the action to be performed. ADD is assumed + ADD, SET, and DROP + specify the action to be performed. ADD is assumed if no operation is explicitly specified. Duplicate option names are not allowed (although it's OK for a table option and a column option to have the same name). Option names and values are also validated using the @@ -325,7 +325,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - All the actions except RENAME and SET SCHEMA + All the actions except RENAME and SET SCHEMA can be combined into a list of multiple alterations to apply in parallel. For example, it is possible to add several columns and/or alter the type of several @@ -333,13 +333,13 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - If the command is written as ALTER FOREIGN TABLE IF EXISTS ... + If the command is written as ALTER FOREIGN TABLE IF EXISTS ... and the foreign table does not exist, no error is thrown. A notice is issued in this case. - You must own the table to use ALTER FOREIGN TABLE. + You must own the table to use ALTER FOREIGN TABLE. To change the schema of a foreign table, you must also have CREATE privilege on the new schema. To alter the owner, you must also be a direct or indirect member of the new @@ -358,21 +358,21 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - name + name The name (possibly schema-qualified) of an existing foreign table to - alter. If ONLY is specified before the table name, only - that table is altered. If ONLY is not specified, the table + alter. If ONLY is specified before the table name, only + that table is altered. If ONLY is not specified, the table and all its descendant tables (if any) are altered. Optionally, - * can be specified after the table name to explicitly + * can be specified after the table name to explicitly indicate that descendant tables are included. - column_name + column_name Name of a new or existing column. @@ -381,7 +381,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - new_column_name + new_column_name New name for an existing column. @@ -390,7 +390,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - new_name + new_name New name for the table. @@ -399,7 +399,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - data_type + data_type Data type of the new column, or new data type for an existing @@ -409,7 +409,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - table_constraint + table_constraint New table constraint for the foreign table. @@ -418,7 +418,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - constraint_name + constraint_name Name of an existing constraint to drop. @@ -433,7 +433,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name). + (see ). @@ -449,7 +449,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - trigger_name + trigger_name Name of a single trigger to disable or enable. @@ -480,7 +480,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - parent_table + parent_table A parent table to associate or de-associate with this foreign table. @@ -489,7 +489,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - new_owner + new_owner The user name of the new owner of the table. @@ -498,7 +498,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - new_schema + new_schema The name of the schema to which the table will be moved. @@ -518,14 +518,14 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name Consistency with the foreign server is not checked when a column is added or removed with ADD COLUMN or - DROP COLUMN, a NOT NULL - or CHECK constraint is added, or a column type is changed - with SET DATA TYPE. It is the user's responsibility to ensure + DROP COLUMN, a NOT NULL + or CHECK constraint is added, or a column type is changed + with SET DATA TYPE. It is the user's responsibility to ensure that the table definition matches the remote side. - Refer to for a further description of valid + Refer to for a further description of valid parameters. @@ -552,16 +552,16 @@ ALTER FOREIGN TABLE myschema.distributors OPTIONS (ADD opt1 'value', SET opt2 'v Compatibility - The forms ADD, DROP, + The forms ADD, DROP, and SET DATA TYPE conform with the SQL standard. The other forms are PostgreSQL extensions of the SQL standard. Also, the ability to specify more than one manipulation in a single - ALTER FOREIGN TABLE command is an extension. + ALTER FOREIGN TABLE command is an extension. - ALTER FOREIGN TABLE DROP COLUMN can be used to drop the only + ALTER FOREIGN TABLE DROP COLUMN can be used to drop the only column of a foreign table, leaving a zero-column table. This is an extension of SQL, which disallows zero-column foreign tables. @@ -571,8 +571,8 @@ ALTER FOREIGN TABLE myschema.distributors OPTIONS (ADD opt1 'value', SET opt2 'v See Also - - + + diff --git a/doc/src/sgml/ref/alter_function.sgml b/doc/src/sgml/ref/alter_function.sgml index 168eeb7c52..d8747e0748 100644 --- a/doc/src/sgml/ref/alter_function.sgml +++ b/doc/src/sgml/ref/alter_function.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_function.sgml PostgreSQL documentation --> - + ALTER FUNCTION @@ -22,7 +22,7 @@ PostgreSQL documentation ALTER FUNCTION name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] - action [ ... ] [ RESTRICT ] + action [ ... ] [ RESTRICT ] ALTER FUNCTION name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] RENAME TO new_name ALTER FUNCTION name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] @@ -32,7 +32,7 @@ ALTER FUNCTION name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] DEPENDS ON EXTENSION extension_name -where action is one of: +where action is one of: CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT IMMUTABLE | STABLE | VOLATILE | [ NOT ] LEAKPROOF @@ -56,8 +56,8 @@ ALTER FUNCTION name [ ( [ [ - The mode of an argument: IN, OUT, - INOUT, or VARIADIC. - If omitted, the default is IN. + The mode of an argument: IN, OUT, + INOUT, or VARIADIC. + If omitted, the default is IN. Note that ALTER FUNCTION does not actually pay - any attention to OUT arguments, since only the input + any attention to OUT arguments, since only the input arguments are needed to determine the function's identity. - So it is sufficient to list the IN, INOUT, - and VARIADIC arguments. + So it is sufficient to list the IN, INOUT, + and VARIADIC arguments. @@ -171,7 +171,7 @@ ALTER FUNCTION name [ ( [ [ + is assumed automatically. See for more information. @@ -185,7 +185,7 @@ ALTER FUNCTION name [ ( [ [ for details. + See for details. @@ -198,7 +198,7 @@ ALTER FUNCTION name [ ( [ [ for more information about + conformance. See for more information about this capability. @@ -210,7 +210,7 @@ ALTER FUNCTION name [ ( [ [ for details. + See for details. @@ -220,7 +220,7 @@ ALTER FUNCTION name [ ( [ [ for more information about + See for more information about this capability. @@ -232,7 +232,7 @@ ALTER FUNCTION name [ ( [ [ for more information. + See for more information. @@ -243,7 +243,7 @@ ALTER FUNCTION name [ ( [ [ for more information. + function. See for more information. @@ -260,14 +260,14 @@ ALTER FUNCTION name [ ( [ [ and - + See and + for more information about allowed parameter names and values. @@ -329,7 +329,7 @@ ALTER FUNCTION check_password(text) SET search_path = admin, pg_temp; - To disable automatic setting of search_path for a function: + To disable automatic setting of search_path for a function: ALTER FUNCTION check_password(text) RESET search_path; @@ -343,13 +343,13 @@ ALTER FUNCTION check_password(text) RESET search_path; This statement is partially compatible with the ALTER - FUNCTION statement in the SQL standard. The standard allows more + FUNCTION statement in the SQL standard. The standard allows more properties of a function to be modified, but does not provide the ability to rename a function, make a function a security definer, attach configuration parameter values to a function, or change the owner, schema, or volatility of a function. The standard also - requires the RESTRICT key word, which is optional in - PostgreSQL. + requires the RESTRICT key word, which is optional in + PostgreSQL. @@ -357,8 +357,10 @@ ALTER FUNCTION check_password(text) RESET search_path; See Also - - + + + + diff --git a/doc/src/sgml/ref/alter_group.sgml b/doc/src/sgml/ref/alter_group.sgml index adf6f7e932..39cc2b88cf 100644 --- a/doc/src/sgml/ref/alter_group.sgml +++ b/doc/src/sgml/ref/alter_group.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_group.sgml PostgreSQL documentation --> - + ALTER GROUP @@ -21,16 +21,16 @@ PostgreSQL documentation -ALTER GROUP role_specification ADD USER user_name [, ... ] -ALTER GROUP role_specification DROP USER user_name [, ... ] +ALTER GROUP role_specification ADD USER user_name [, ... ] +ALTER GROUP role_specification DROP USER user_name [, ... ] -where role_specification can be: +where role_specification can be: - role_name + role_name | CURRENT_USER | SESSION_USER -ALTER GROUP group_name RENAME TO new_name +ALTER GROUP group_name RENAME TO new_name @@ -46,18 +46,18 @@ ALTER GROUP group_name RENAME TO The first two variants add users to a group or remove them from a group. - (Any role can play the part of either a user or a - group for this purpose.) These variants are effectively + (Any role can play the part of either a user or a + group for this purpose.) These variants are effectively equivalent to granting or revoking membership in the role named as the - group; so the preferred way to do this is to use - or - . + group; so the preferred way to do this is to use + or + . The third variant changes the name of the group. This is exactly equivalent to renaming the role with - . + . @@ -66,7 +66,7 @@ ALTER GROUP group_name RENAME TO - group_name + group_name The name of the group (role) to modify. @@ -75,11 +75,11 @@ ALTER GROUP group_name RENAME TO - user_name + user_name Users (roles) that are to be added to or removed from the group. - The users must already exist; ALTER GROUP does not + The users must already exist; ALTER GROUP does not create or drop users. @@ -125,9 +125,9 @@ ALTER GROUP workers DROP USER beth; See Also - - - + + + diff --git a/doc/src/sgml/ref/alter_index.sgml b/doc/src/sgml/ref/alter_index.sgml index ad77b5743a..d0a6212358 100644 --- a/doc/src/sgml/ref/alter_index.sgml +++ b/doc/src/sgml/ref/alter_index.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_index.sgml PostgreSQL documentation --> - + ALTER INDEX @@ -21,13 +21,16 @@ PostgreSQL documentation -ALTER INDEX [ IF EXISTS ] name RENAME TO new_name -ALTER INDEX [ IF EXISTS ] name SET TABLESPACE tablespace_name -ALTER INDEX name DEPENDS ON EXTENSION extension_name -ALTER INDEX [ IF EXISTS ] name SET ( storage_parameter = value [, ... ] ) -ALTER INDEX [ IF EXISTS ] name RESET ( storage_parameter [, ... ] ) -ALTER INDEX ALL IN TABLESPACE name [ OWNED BY role_name [, ... ] ] - SET TABLESPACE new_tablespace [ NOWAIT ] +ALTER INDEX [ IF EXISTS ] name RENAME TO new_name +ALTER INDEX [ IF EXISTS ] name SET TABLESPACE tablespace_name +ALTER INDEX name ATTACH PARTITION index_name +ALTER INDEX name DEPENDS ON EXTENSION extension_name +ALTER INDEX [ IF EXISTS ] name SET ( storage_parameter = value [, ... ] ) +ALTER INDEX [ IF EXISTS ] name RESET ( storage_parameter [, ... ] ) +ALTER INDEX [ IF EXISTS ] name ALTER [ COLUMN ] column_number + SET STATISTICS integer +ALTER INDEX ALL IN TABLESPACE name [ OWNED BY role_name [, ... ] ] + SET TABLESPACE new_tablespace [ NOWAIT ] @@ -45,6 +48,9 @@ ALTER INDEX ALL IN TABLESPACE name The RENAME form changes the name of the index. + If the index is associated with a table constraint (either + UNIQUE, PRIMARY KEY, + or EXCLUDE), the constraint is renamed as well. There is no effect on the stored data. @@ -68,7 +74,20 @@ ALTER INDEX ALL IN TABLESPACE name this command, use ALTER DATABASE or explicit ALTER INDEX invocations instead if desired. See also - . + . + + + + + + ATTACH PARTITION + + + Causes the named index to become attached to the altered index. + The named index must be on a partition of the table containing the + index being altered, and have an equivalent definition. An attached + index cannot be dropped by itself, and will automatically be dropped + if its parent index is dropped. @@ -84,32 +103,51 @@ ALTER INDEX ALL IN TABLESPACE name - SET ( storage_parameter = value [, ... ] ) + SET ( storage_parameter = value [, ... ] ) This form changes one or more index-method-specific storage parameters for the index. See - + for details on the available parameters. Note that the index contents will not be modified immediately by this command; depending on the parameter you might need to rebuild the index with - + to get the desired effects. - RESET ( storage_parameter [, ... ] ) + RESET ( storage_parameter [, ... ] ) This form resets one or more index-method-specific storage parameters to - their defaults. As with SET, a REINDEX + their defaults. As with SET, a REINDEX might be needed to update the index entirely. + + ALTER [ COLUMN ] column_number SET STATISTICS integer + + + This form sets the per-column statistics-gathering target for + subsequent operations, though can + be used only on index columns that are defined as an expression. + Since expressions lack a unique name, we refer to them using the + ordinal number of the index column. + The target can be set in the range 0 to 10000; alternatively, set it + to -1 to revert to using the system default statistics + target (). + For more information on the use of statistics by the + PostgreSQL query planner, refer to + . + + + + @@ -131,7 +169,17 @@ ALTER INDEX ALL IN TABLESPACE name - name + column_number + + + The ordinal number refers to the ordinal (left-to-right) position + of the index column. + + + + + + name The name (possibly schema-qualified) of an existing index to @@ -141,7 +189,7 @@ ALTER INDEX ALL IN TABLESPACE name - new_name + new_name The new name for the index. @@ -150,7 +198,7 @@ ALTER INDEX ALL IN TABLESPACE name - tablespace_name + tablespace_name The tablespace to which the index will be moved. @@ -159,7 +207,7 @@ ALTER INDEX ALL IN TABLESPACE name - extension_name + extension_name The name of the extension that the index is to depend on. @@ -168,7 +216,7 @@ ALTER INDEX ALL IN TABLESPACE name - storage_parameter + storage_parameter The name of an index-method-specific storage parameter. @@ -177,7 +225,7 @@ ALTER INDEX ALL IN TABLESPACE name - value + value The new value for an index-method-specific storage parameter. @@ -194,13 +242,13 @@ ALTER INDEX ALL IN TABLESPACE name These operations are also possible using - . - ALTER INDEX is in fact just an alias for the forms - of ALTER TABLE that apply to indexes. + . + ALTER INDEX is in fact just an alias for the forms + of ALTER TABLE that apply to indexes. - There was formerly an ALTER INDEX OWNER variant, but + There was formerly an ALTER INDEX OWNER variant, but this is now ignored (with a warning). An index cannot have an owner different from its table's owner. Changing the table's owner automatically changes the index as well. @@ -233,6 +281,13 @@ ALTER INDEX distributors SET TABLESPACE fasttablespace; ALTER INDEX distributors SET (fillfactor = 75); REINDEX INDEX distributors; + + + + Set the statistics-gathering target for an expression index: + +CREATE INDEX coord_idx ON measured (x, y, (z + t)); +ALTER INDEX coord_idx ALTER COLUMN 3 SET STATISTICS 1000; @@ -241,7 +296,7 @@ REINDEX INDEX distributors; Compatibility - ALTER INDEX is a PostgreSQL + ALTER INDEX is a PostgreSQL extension. @@ -251,8 +306,8 @@ REINDEX INDEX distributors; See Also - - + + diff --git a/doc/src/sgml/ref/alter_language.sgml b/doc/src/sgml/ref/alter_language.sgml index 63d9ecd924..eac63dec13 100644 --- a/doc/src/sgml/ref/alter_language.sgml +++ b/doc/src/sgml/ref/alter_language.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_language.sgml PostgreSQL documentation --> - + ALTER LANGUAGE @@ -83,8 +83,8 @@ ALTER [ PROCEDURAL ] LANGUAGE name OWNER TO { See Also - - + + diff --git a/doc/src/sgml/ref/alter_large_object.sgml b/doc/src/sgml/ref/alter_large_object.sgml index 5748d52db1..f4a9c9e2a5 100644 --- a/doc/src/sgml/ref/alter_large_object.sgml +++ b/doc/src/sgml/ref/alter_large_object.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_large_object.sgml PostgreSQL documentation --> - + ALTER LARGE OBJECT @@ -21,7 +21,7 @@ PostgreSQL documentation -ALTER LARGE OBJECT large_object_oid OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER LARGE OBJECT large_object_oid OWNER TO { new_owner | CURRENT_USER | SESSION_USER } @@ -73,7 +73,7 @@ ALTER LARGE OBJECT large_object_oid See Also - + diff --git a/doc/src/sgml/ref/alter_materialized_view.sgml b/doc/src/sgml/ref/alter_materialized_view.sgml index b88f5ac00f..03e3df1ffd 100644 --- a/doc/src/sgml/ref/alter_materialized_view.sgml +++ b/doc/src/sgml/ref/alter_materialized_view.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_materialized_view.sgml PostgreSQL documentation --> - + ALTER MATERIALIZED VIEW @@ -21,30 +21,30 @@ PostgreSQL documentation -ALTER MATERIALIZED VIEW [ IF EXISTS ] name - action [, ... ] -ALTER MATERIALIZED VIEW name - DEPENDS ON EXTENSION extension_name -ALTER MATERIALIZED VIEW [ IF EXISTS ] name - RENAME [ COLUMN ] column_name TO new_column_name +ALTER MATERIALIZED VIEW [ IF EXISTS ] name + action [, ... ] +ALTER MATERIALIZED VIEW name + DEPENDS ON EXTENSION extension_name +ALTER MATERIALIZED VIEW [ IF EXISTS ] name + RENAME [ COLUMN ] column_name TO new_column_name ALTER MATERIALIZED VIEW [ IF EXISTS ] name RENAME TO new_name ALTER MATERIALIZED VIEW [ IF EXISTS ] name SET SCHEMA new_schema -ALTER MATERIALIZED VIEW ALL IN TABLESPACE name [ OWNED BY role_name [, ... ] ] - SET TABLESPACE new_tablespace [ NOWAIT ] +ALTER MATERIALIZED VIEW ALL IN TABLESPACE name [ OWNED BY role_name [, ... ] ] + SET TABLESPACE new_tablespace [ NOWAIT ] -where action is one of: +where action is one of: - ALTER [ COLUMN ] column_name SET STATISTICS integer - ALTER [ COLUMN ] column_name SET ( attribute_option = value [, ... ] ) - ALTER [ COLUMN ] column_name RESET ( attribute_option [, ... ] ) - ALTER [ COLUMN ] column_name SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN } - CLUSTER ON index_name + ALTER [ COLUMN ] column_name SET STATISTICS integer + ALTER [ COLUMN ] column_name SET ( attribute_option = value [, ... ] ) + ALTER [ COLUMN ] column_name RESET ( attribute_option [, ... ] ) + ALTER [ COLUMN ] column_name SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN } + CLUSTER ON index_name SET WITHOUT CLUSTER - SET ( storage_parameter = value [, ... ] ) - RESET ( storage_parameter [, ... ] ) - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + SET ( storage_parameter = value [, ... ] ) + RESET ( storage_parameter [, ... ] ) + OWNER TO { new_owner | CURRENT_USER | SESSION_USER } @@ -58,8 +58,8 @@ ALTER MATERIALIZED VIEW ALL IN TABLESPACE name You must own the materialized view to use ALTER MATERIALIZED - VIEW. To change a materialized view's schema, you must also have - CREATE privilege on the new schema. + VIEW. To change a materialized view's schema, you must also have + CREATE privilege on the new schema. To alter the owner, you must also be a direct or indirect member of the new owning role, and that role must have CREATE privilege on the materialized view's schema. (These restrictions enforce that altering @@ -78,7 +78,7 @@ ALTER MATERIALIZED VIEW ALL IN TABLESPACE nameALTER MATERIALIZED VIEW are a subset of those available for ALTER TABLE, and have the same meaning when used for - materialized views. See the descriptions for + materialized views. See the descriptions for for details. @@ -98,7 +98,7 @@ ALTER MATERIALIZED VIEW ALL IN TABLESPACE name - column_name + column_name Name of a new or existing column. @@ -107,7 +107,7 @@ ALTER MATERIALIZED VIEW ALL IN TABLESPACE name - extension_name + extension_name The name of the extension that the materialized view is to depend on. @@ -116,7 +116,7 @@ ALTER MATERIALIZED VIEW ALL IN TABLESPACE name - new_column_name + new_column_name New name for an existing column. @@ -125,7 +125,7 @@ ALTER MATERIALIZED VIEW ALL IN TABLESPACE name - new_owner + new_owner The user name of the new owner of the materialized view. @@ -177,9 +177,9 @@ ALTER MATERIALIZED VIEW foo RENAME TO bar; See Also - - - + + + diff --git a/doc/src/sgml/ref/alter_opclass.sgml b/doc/src/sgml/ref/alter_opclass.sgml index 58de603aa4..59a64caa4f 100644 --- a/doc/src/sgml/ref/alter_opclass.sgml +++ b/doc/src/sgml/ref/alter_opclass.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_opclass.sgml PostgreSQL documentation --> - + ALTER OPERATOR CLASS @@ -41,7 +41,7 @@ ALTER OPERATOR CLASS name USING See Also - - - + + + diff --git a/doc/src/sgml/ref/alter_operator.sgml b/doc/src/sgml/ref/alter_operator.sgml index b2eaa7a263..b3bfa9ccbe 100644 --- a/doc/src/sgml/ref/alter_operator.sgml +++ b/doc/src/sgml/ref/alter_operator.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_operator.sgml PostgreSQL documentation --> - + ALTER OPERATOR @@ -43,7 +43,7 @@ ALTER OPERATOR name ( { left_type - You must own the operator to use ALTER OPERATOR. + You must own the operator to use ALTER OPERATOR. To alter the owner, you must also be a direct or indirect member of the new owning role, and that role must have CREATE privilege on the operator's schema. (These restrictions enforce that altering the owner @@ -134,9 +134,9 @@ ALTER OPERATOR @@ (text, text) OWNER TO joe; - Change the restriction and join selectivity estimator functions of a custom operator a && b for type int[]: + Change the restriction and join selectivity estimator functions of a custom operator a && b for type int[]: -ALTER OPERATOR && (_int4, _int4) SET (RESTRICT = _int_contsel, JOIN = _int_contjoinsel); +ALTER OPERATOR && (_int4, _int4) SET (RESTRICT = _int_contsel, JOIN = _int_contjoinsel); @@ -154,8 +154,8 @@ ALTER OPERATOR && (_int4, _int4) SET (RESTRICT = _int_contsel, JOIN = _int_contj See Also - - + + diff --git a/doc/src/sgml/ref/alter_opfamily.sgml b/doc/src/sgml/ref/alter_opfamily.sgml index 0bafe5b8f8..848156c9d7 100644 --- a/doc/src/sgml/ref/alter_opfamily.sgml +++ b/doc/src/sgml/ref/alter_opfamily.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_opfamily.sgml PostgreSQL documentation --> - + ALTER OPERATOR FAMILY @@ -57,12 +57,12 @@ ALTER OPERATOR FAMILY name USING .) + instead; see .) PostgreSQL will allow loose members of a family to be dropped from the family at any time, but members of an operator class cannot be dropped without dropping the whole class and @@ -74,7 +74,7 @@ ALTER OPERATOR FAMILY name USING - Refer to for further information. + Refer to for further information. @@ -139,15 +139,15 @@ ALTER OPERATOR FAMILY name USING op_type - In an OPERATOR clause, - the operand data type(s) of the operator, or NONE to + In an OPERATOR clause, + the operand data type(s) of the operator, or NONE to signify a left-unary or right-unary operator. Unlike the comparable - syntax in CREATE OPERATOR CLASS, the operand data types + syntax in CREATE OPERATOR CLASS, the operand data types must always be specified. - In an ADD FUNCTION clause, the operand data type(s) the + In an ADD FUNCTION clause, the operand data type(s) the function is intended to support, if different from the input data type(s) of the function. For B-tree comparison functions and hash functions it is not necessary to specify name USING - If neither FOR SEARCH nor FOR ORDER BY is - specified, FOR SEARCH is the default. + If neither FOR SEARCH nor FOR ORDER BY is + specified, FOR SEARCH is the default. @@ -185,7 +185,7 @@ ALTER OPERATOR FAMILY name USING support_number - The index method's support procedure number for a + The index method's support function number for a function associated with the operator family. @@ -196,7 +196,7 @@ ALTER OPERATOR FAMILY name USING - The OPERATOR and FUNCTION + The OPERATOR and FUNCTION clauses can appear in any order. @@ -250,10 +250,10 @@ ALTER OPERATOR FAMILY name USING - Before PostgreSQL 8.4, the OPERATOR - clause could include a RECHECK option. This is no longer - supported because whether an index operator is lossy is now + Before PostgreSQL 8.4, the OPERATOR + clause could include a RECHECK option. This is no longer + supported because whether an index operator is lossy is now determined on-the-fly at run time. This allows efficient handling of cases where an operator might or might not be lossy. @@ -288,7 +288,7 @@ ALTER OPERATOR FAMILY name USING - - - - - + + + + + diff --git a/doc/src/sgml/ref/alter_policy.sgml b/doc/src/sgml/ref/alter_policy.sgml index df347d180e..a1c720a956 100644 --- a/doc/src/sgml/ref/alter_policy.sgml +++ b/doc/src/sgml/ref/alter_policy.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_policy.sgml PostgreSQL documentation --> - + ALTER POLICY @@ -21,7 +21,7 @@ PostgreSQL documentation -ALTER POLICY name ON table_name RENAME TO new_name +ALTER POLICY name ON table_name RENAME TO new_name ALTER POLICY name ON table_name [ TO { role_name | PUBLIC | CURRENT_USER | SESSION_USER } [, ...] ] @@ -105,7 +105,7 @@ ALTER POLICY name ON The USING expression for the policy. - See for details. + See for details. @@ -115,7 +115,7 @@ ALTER POLICY name ON The WITH CHECK expression for the policy. - See for details. + See for details. @@ -135,8 +135,8 @@ ALTER POLICY name ON See Also - - + + diff --git a/doc/src/sgml/ref/alter_procedure.sgml b/doc/src/sgml/ref/alter_procedure.sgml new file mode 100644 index 0000000000..dae80076d9 --- /dev/null +++ b/doc/src/sgml/ref/alter_procedure.sgml @@ -0,0 +1,281 @@ + + + + + ALTER PROCEDURE + + + + ALTER PROCEDURE + 7 + SQL - Language Statements + + + + ALTER PROCEDURE + change the definition of a procedure + + + + +ALTER PROCEDURE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] + action [ ... ] [ RESTRICT ] +ALTER PROCEDURE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] + RENAME TO new_name +ALTER PROCEDURE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] + OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER PROCEDURE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] + SET SCHEMA new_schema +ALTER PROCEDURE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] + DEPENDS ON EXTENSION extension_name + +where action is one of: + + [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER + SET configuration_parameter { TO | = } { value | DEFAULT } + SET configuration_parameter FROM CURRENT + RESET configuration_parameter + RESET ALL + + + + + Description + + + ALTER PROCEDURE changes the definition of a + procedure. + + + + You must own the procedure to use ALTER PROCEDURE. + To change a procedure's schema, you must also have CREATE + privilege on the new schema. + To alter the owner, you must also be a direct or indirect member of the new + owning role, and that role must have CREATE privilege on + the procedure's schema. (These restrictions enforce that altering the owner + doesn't do anything you couldn't do by dropping and recreating the procedure. + However, a superuser can alter ownership of any procedure anyway.) + + + + + Parameters + + + + name + + + The name (optionally schema-qualified) of an existing procedure. If no + argument list is specified, the name must be unique in its schema. + + + + + + argmode + + + + The mode of an argument: IN or VARIADIC. + If omitted, the default is IN. + + + + + + argname + + + + The name of an argument. + Note that ALTER PROCEDURE does not actually pay + any attention to argument names, since only the argument data + types are needed to determine the procedure's identity. + + + + + + argtype + + + + The data type(s) of the procedure's arguments (optionally + schema-qualified), if any. + + + + + + new_name + + + The new name of the procedure. + + + + + + new_owner + + + The new owner of the procedure. Note that if the procedure is + marked SECURITY DEFINER, it will subsequently + execute as the new owner. + + + + + + new_schema + + + The new schema for the procedure. + + + + + + extension_name + + + The name of the extension that the procedure is to depend on. + + + + + + EXTERNAL SECURITY INVOKER + EXTERNAL SECURITY DEFINER + + + + Change whether the procedure is a security definer or not. The + key word EXTERNAL is ignored for SQL + conformance. See for more information about + this capability. + + + + + + configuration_parameter + value + + + Add or change the assignment to be made to a configuration parameter + when the procedure is called. If + value is DEFAULT + or, equivalently, RESET is used, the procedure-local + setting is removed, so that the procedure executes with the value + present in its environment. Use RESET + ALL to clear all procedure-local settings. + SET FROM CURRENT saves the value of the parameter that + is current when ALTER PROCEDURE is executed as the value + to be applied when the procedure is entered. + + + + See and + + for more information about allowed parameter names and values. + + + + + + RESTRICT + + + + Ignored for conformance with the SQL standard. + + + + + + + + Examples + + + To rename the procedure insert_data with two arguments + of type integer to insert_record: + +ALTER PROCEDURE insert_data(integer, integer) RENAME TO insert_record; + + + + + To change the owner of the procedure insert_data with + two arguments of type integer to joe: + +ALTER PROCEDURE insert_data(integer, integer) OWNER TO joe; + + + + + To change the schema of the procedure insert_data with + two arguments of type integer + to accounting: + +ALTER PROCEDURE insert_data(integer, integer) SET SCHEMA accounting; + + + + + To mark the procedure insert_data(integer, integer) as + being dependent on the extension myext: + +ALTER PROCEDURE insert_data(integer, integer) DEPENDS ON EXTENSION myext; + + + + + To adjust the search path that is automatically set for a procedure: + +ALTER PROCEDURE check_password(text) SET search_path = admin, pg_temp; + + + + + To disable automatic setting of search_path for a procedure: + +ALTER PROCEDURE check_password(text) RESET search_path; + + The procedure will now execute with whatever search path is used by its + caller. + + + + + Compatibility + + + This statement is partially compatible with the ALTER + PROCEDURE statement in the SQL standard. The standard allows more + properties of a procedure to be modified, but does not provide the + ability to rename a procedure, make a procedure a security definer, + attach configuration parameter values to a procedure, + or change the owner, schema, or volatility of a procedure. The standard also + requires the RESTRICT key word, which is optional in + PostgreSQL. + + + + + See Also + + + + + + + + + diff --git a/doc/src/sgml/ref/alter_publication.sgml b/doc/src/sgml/ref/alter_publication.sgml index f064ec5f32..534e598d93 100644 --- a/doc/src/sgml/ref/alter_publication.sgml +++ b/doc/src/sgml/ref/alter_publication.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_publication.sgml PostgreSQL documentation --> - + ALTER PUBLICATION @@ -21,12 +21,12 @@ PostgreSQL documentation -ALTER PUBLICATION name ADD TABLE [ ONLY ] table_name [ * ] [, ...] -ALTER PUBLICATION name SET TABLE [ ONLY ] table_name [ * ] [, ...] -ALTER PUBLICATION name DROP TABLE [ ONLY ] table_name [ * ] [, ...] -ALTER PUBLICATION name SET ( publication_parameter [= value] [, ... ] ) -ALTER PUBLICATION name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } -ALTER PUBLICATION name RENAME TO new_name +ALTER PUBLICATION name ADD TABLE [ ONLY ] table_name [ * ] [, ...] +ALTER PUBLICATION name SET TABLE [ ONLY ] table_name [ * ] [, ...] +ALTER PUBLICATION name DROP TABLE [ ONLY ] table_name [ * ] [, ...] +ALTER PUBLICATION name SET ( publication_parameter [= value] [, ... ] ) +ALTER PUBLICATION name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER PUBLICATION name RENAME TO new_name @@ -52,7 +52,7 @@ ALTER PUBLICATION name RENAME TO The fourth variant of this command listed in the synopsis can change all of the publication properties specified in - . Properties not mentioned in the + . Properties not mentioned in the command retain their previous settings. @@ -87,10 +87,10 @@ ALTER PUBLICATION name RENAME TO table_name - Name of an existing table. If ONLY is specified before the - table name, only that table is affected. If ONLY is not + Name of an existing table. If ONLY is specified before the + table name, only that table is affected. If ONLY is not specified, the table and all its descendant tables (if any) are - affected. Optionally, * can be specified after the table + affected. Optionally, * can be specified after the table name to explicitly indicate that descendant tables are included. @@ -101,7 +101,7 @@ ALTER PUBLICATION name RENAME TO This clause alters publication parameters originally set by - . See there for more information. + . See there for more information. @@ -147,7 +147,7 @@ ALTER PUBLICATION mypublication ADD TABLE users, departments; Compatibility - ALTER PUBLICATION is a PostgreSQL + ALTER PUBLICATION is a PostgreSQL extension. @@ -156,10 +156,10 @@ ALTER PUBLICATION mypublication ADD TABLE users, departments; See Also - - - - + + + + diff --git a/doc/src/sgml/ref/alter_role.sgml b/doc/src/sgml/ref/alter_role.sgml index 8cd8602bc4..573a3e80f7 100644 --- a/doc/src/sgml/ref/alter_role.sgml +++ b/doc/src/sgml/ref/alter_role.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_role.sgml PostgreSQL documentation --> - + ALTER ROLE @@ -21,9 +21,9 @@ PostgreSQL documentation -ALTER ROLE role_specification [ WITH ] option [ ... ] +ALTER ROLE role_specification [ WITH ] option [ ... ] -where option can be: +where option can be: SUPERUSER | NOSUPERUSER | CREATEDB | NOCREATEDB @@ -32,20 +32,20 @@ ALTER ROLE role_specification [ WIT | LOGIN | NOLOGIN | REPLICATION | NOREPLICATION | BYPASSRLS | NOBYPASSRLS - | CONNECTION LIMIT connlimit - | [ ENCRYPTED ] PASSWORD 'password' - | VALID UNTIL 'timestamp' + | CONNECTION LIMIT connlimit + | [ ENCRYPTED ] PASSWORD 'password' + | VALID UNTIL 'timestamp' -ALTER ROLE name RENAME TO new_name +ALTER ROLE name RENAME TO new_name -ALTER ROLE { role_specification | ALL } [ IN DATABASE database_name ] SET configuration_parameter { TO | = } { value | DEFAULT } -ALTER ROLE { role_specification | ALL } [ IN DATABASE database_name ] SET configuration_parameter FROM CURRENT -ALTER ROLE { role_specification | ALL } [ IN DATABASE database_name ] RESET configuration_parameter -ALTER ROLE { role_specification | ALL } [ IN DATABASE database_name ] RESET ALL +ALTER ROLE { role_specification | ALL } [ IN DATABASE database_name ] SET configuration_parameter { TO | = } { value | DEFAULT } +ALTER ROLE { role_specification | ALL } [ IN DATABASE database_name ] SET configuration_parameter FROM CURRENT +ALTER ROLE { role_specification | ALL } [ IN DATABASE database_name ] RESET configuration_parameter +ALTER ROLE { role_specification | ALL } [ IN DATABASE database_name ] RESET ALL -where role_specification can be: +where role_specification can be: - [ GROUP ] role_name + role_name | CURRENT_USER | SESSION_USER @@ -62,14 +62,14 @@ ALTER ROLE { role_specification | A The first variant of this command listed in the synopsis can change many of the role attributes that can be specified in - . + . (All the possible attributes are covered, except that there are no options for adding or removing memberships; use - and - for that.) + and + for that.) Attributes not mentioned in the command retain their previous settings. Database superusers can change any of these settings for any role. - Roles having CREATEROLE privilege can change any of these + Roles having CREATEROLE privilege can change any of these settings, but only for non-superuser and non-replication roles. Ordinary roles can only change their own password. @@ -77,13 +77,13 @@ ALTER ROLE { role_specification | A The second variant changes the name of the role. Database superusers can rename any role. - Roles having CREATEROLE privilege can rename non-superuser + Roles having CREATEROLE privilege can rename non-superuser roles. The current session user cannot be renamed. (Connect as a different user if you need to do that.) - Because MD5-encrypted passwords use the role name as + Because MD5-encrypted passwords use the role name as cryptographic salt, renaming a role clears its password if the - password is MD5-encrypted. + password is MD5-encrypted. @@ -100,10 +100,10 @@ ALTER ROLE { role_specification | A Whenever the role subsequently starts a new session, the specified value becomes the session default, overriding whatever setting is present in - postgresql.conf or has been received from the postgres + postgresql.conf or has been received from the postgres command line. This only happens at login time; executing - or - does not cause new + or + does not cause new configuration values to be set. Settings set for all databases are overridden by database-specific settings attached to a role. Settings for specific databases or specific roles override @@ -112,7 +112,7 @@ ALTER ROLE { role_specification | A Superusers can change anyone's session defaults. Roles having - CREATEROLE privilege can change defaults for non-superuser + CREATEROLE privilege can change defaults for non-superuser roles. Ordinary roles can only set defaults for themselves. Certain configuration variables cannot be set this way, or can only be set if a superuser issues the command. Only superusers can change a setting @@ -125,7 +125,7 @@ ALTER ROLE { role_specification | A - name + name The name of the role whose attributes are to be altered. @@ -155,8 +155,8 @@ ALTER ROLE { role_specification | A SUPERUSER NOSUPERUSER - CREATEDB - NOCREATEDB + CREATEDB + NOCREATEDB CREATEROLE NOCREATEROLE INHERIT @@ -168,12 +168,12 @@ ALTER ROLE { role_specification | A BYPASSRLS NOBYPASSRLS CONNECTION LIMIT connlimit - [ ENCRYPTED ] PASSWORD password + [ ENCRYPTED ] PASSWORD password VALID UNTIL 'timestamp' These clauses alter attributes originally set by - . For more information, see the + . For more information, see the CREATE ROLE reference page. @@ -209,7 +209,7 @@ ALTER ROLE { role_specification | A role-specific variable setting is removed, so the role will inherit the system-wide default setting in new sessions. Use RESET ALL to clear all role-specific settings. - SET FROM CURRENT saves the session's current value of + SET FROM CURRENT saves the session's current value of the parameter as the role-specific value. If IN DATABASE is specified, the configuration parameter is set or removed for the given role and database only. @@ -217,14 +217,14 @@ ALTER ROLE { role_specification | A Role-specific variable settings take effect only at login; - and - + and + do not process role-specific variable settings. - See and for more information about allowed + See and for more information about allowed parameter names and values. @@ -236,14 +236,14 @@ ALTER ROLE { role_specification | A Notes - Use - to add new roles, and to remove a role. + Use + to add new roles, and to remove a role. ALTER ROLE cannot change a role's memberships. - Use and - + Use and + to do that. @@ -251,7 +251,7 @@ ALTER ROLE { role_specification | A Caution must be exercised when specifying an unencrypted password with this command. The password will be transmitted to the server in cleartext, and it might also be logged in the client's command - history or the server log. + history or the server log. contains a command \password that can be used to change a role's password without exposing the cleartext password. @@ -260,7 +260,7 @@ ALTER ROLE { role_specification | A It is also possible to tie a session default to a specific database rather than to a role; see - . + . If there is a conflict, database-role-specific settings override role-specific ones, which in turn override database-specific ones. @@ -288,7 +288,7 @@ ALTER ROLE davide WITH PASSWORD NULL; Change a password expiration date, specifying that the password should expire at midday on 4th May 2015 using - the time zone which is one hour ahead of UTC: + the time zone which is one hour ahead of UTC: ALTER ROLE chris VALID UNTIL 'May 4 12:00:00 2015 +1'; @@ -311,7 +311,7 @@ ALTER ROLE miriam CREATEROLE CREATEDB; Give a role a non-default setting of the - parameter: + parameter: ALTER ROLE worker_bee SET maintenance_work_mem = 100000; @@ -320,7 +320,7 @@ ALTER ROLE worker_bee SET maintenance_work_mem = 100000; Give a role a non-default, database-specific setting of the - parameter: + parameter: ALTER ROLE fred IN DATABASE devel SET client_min_messages = DEBUG; @@ -340,10 +340,10 @@ ALTER ROLE fred IN DATABASE devel SET client_min_messages = DEBUG; See Also - - - - + + + + diff --git a/doc/src/sgml/ref/alter_routine.sgml b/doc/src/sgml/ref/alter_routine.sgml new file mode 100644 index 0000000000..d1699691e1 --- /dev/null +++ b/doc/src/sgml/ref/alter_routine.sgml @@ -0,0 +1,102 @@ + + + + + ALTER ROUTINE + + + + ALTER ROUTINE + 7 + SQL - Language Statements + + + + ALTER ROUTINE + change the definition of a routine + + + + +ALTER ROUTINE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] + action [ ... ] [ RESTRICT ] +ALTER ROUTINE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] + RENAME TO new_name +ALTER ROUTINE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] + OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER ROUTINE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] + SET SCHEMA new_schema +ALTER ROUTINE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] + DEPENDS ON EXTENSION extension_name + +where action is one of: + + IMMUTABLE | STABLE | VOLATILE | [ NOT ] LEAKPROOF + [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER + PARALLEL { UNSAFE | RESTRICTED | SAFE } + COST execution_cost + ROWS result_rows + SET configuration_parameter { TO | = } { value | DEFAULT } + SET configuration_parameter FROM CURRENT + RESET configuration_parameter + RESET ALL + + + + + Description + + + ALTER ROUTINE changes the definition of a routine, which + can be an aggregate function, a normal function, or a procedure. See + under , , + and for the description of the + parameters, more examples, and further details. + + + + + Examples + + + To rename the routine foo for type + integer to foobar: + +ALTER ROUTINE foo(integer) RENAME TO foobar; + + This command will work independent of whether foo is an + aggregate, function, or procedure. + + + + + Compatibility + + + This statement is partially compatible with the ALTER + ROUTINE statement in the SQL standard. See + under + and for more details. Allowing + routine names to refer to aggregate functions is + a PostgreSQL extension. + + + + + See Also + + + + + + + + + + Note that there is no CREATE ROUTINE command. + + + diff --git a/doc/src/sgml/ref/alter_rule.sgml b/doc/src/sgml/ref/alter_rule.sgml index 993a0ceb83..c20bfb35e1 100644 --- a/doc/src/sgml/ref/alter_rule.sgml +++ b/doc/src/sgml/ref/alter_rule.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_rule.sgml PostgreSQL documentation --> - + ALTER RULE @@ -21,7 +21,7 @@ PostgreSQL documentation -ALTER RULE name ON table_name RENAME TO new_name +ALTER RULE name ON table_name RENAME TO new_name @@ -44,7 +44,7 @@ ALTER RULE name ON - name + name The name of an existing rule to alter. @@ -53,7 +53,7 @@ ALTER RULE name ON - table_name + table_name The name (optionally schema-qualified) of the table or view that the @@ -63,7 +63,7 @@ ALTER RULE name ON - new_name + new_name The new name for the rule. @@ -97,8 +97,8 @@ ALTER RULE notify_all ON emp RENAME TO notify_me; See Also - - + + diff --git a/doc/src/sgml/ref/alter_schema.sgml b/doc/src/sgml/ref/alter_schema.sgml index dbc5c2d45f..2937214026 100644 --- a/doc/src/sgml/ref/alter_schema.sgml +++ b/doc/src/sgml/ref/alter_schema.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_schema.sgml PostgreSQL documentation --> - + ALTER SCHEMA @@ -34,7 +34,7 @@ ALTER SCHEMA name OWNER TO { new_owner - You must own the schema to use ALTER SCHEMA. + You must own the schema to use ALTER SCHEMA. To rename a schema you must also have the CREATE privilege for the database. To alter the owner, you must also be a direct or @@ -92,8 +92,8 @@ ALTER SCHEMA name OWNER TO { new_ownerSee Also - - + + diff --git a/doc/src/sgml/ref/alter_sequence.sgml b/doc/src/sgml/ref/alter_sequence.sgml index 3a04d07ecc..bfd20af6d3 100644 --- a/doc/src/sgml/ref/alter_sequence.sgml +++ b/doc/src/sgml/ref/alter_sequence.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_sequence.sgml PostgreSQL documentation --> - + ALTER SEQUENCE @@ -31,7 +31,7 @@ ALTER SEQUENCE [ IF EXISTS ] name [ RESTART [ [ WITH ] restart ] ] [ CACHE cache ] [ [ NO ] CYCLE ] [ OWNED BY { table_name.column_name | NONE } ] -ALTER SEQUENCE [ IF EXISTS ] name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER SEQUENCE [ IF EXISTS ] name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } ALTER SEQUENCE [ IF EXISTS ] name RENAME TO new_name ALTER SEQUENCE [ IF EXISTS ] name SET SCHEMA new_schema @@ -47,8 +47,8 @@ ALTER SEQUENCE [ IF EXISTS ] name S - You must own the sequence to use ALTER SEQUENCE. - To change a sequence's schema, you must also have CREATE + You must own the sequence to use ALTER SEQUENCE. + To change a sequence's schema, you must also have CREATE privilege on the new schema. To alter the owner, you must also be a direct or indirect member of the new owning role, and that role must have CREATE privilege on @@ -89,7 +89,7 @@ ALTER SEQUENCE [ IF EXISTS ] name S The optional clause AS data_type changes the data type of the sequence. Valid types are - are smallint, integer, + smallint, integer, and bigint. @@ -159,8 +159,8 @@ ALTER SEQUENCE [ IF EXISTS ] name S The optional clause START WITH start changes the recorded start value of the sequence. This has no effect on the - current sequence value; it simply sets the value - that future ALTER SEQUENCE RESTART commands will use. + current sequence value; it simply sets the value + that future ALTER SEQUENCE RESTART commands will use. @@ -172,13 +172,13 @@ ALTER SEQUENCE [ IF EXISTS ] name S The optional clause RESTART [ WITH restart ] changes the current value of the sequence. This is similar to calling the - setval function with is_called = - false: the specified value will be returned by the - next call of nextval. - Writing RESTART with no restart value is equivalent to supplying - the start value that was recorded by CREATE SEQUENCE - or last set by ALTER SEQUENCE START WITH. + setval function with is_called = + false: the specified value will be returned by the + next call of nextval. + Writing RESTART with no restart value is equivalent to supplying + the start value that was recorded by CREATE SEQUENCE + or last set by ALTER SEQUENCE START WITH. @@ -186,7 +186,7 @@ ALTER SEQUENCE [ IF EXISTS ] name S a RESTART operation on a sequence is transactional and blocks concurrent transactions from obtaining numbers from the same sequence. If that's not the desired mode of - operation, setval should be used. + operation, setval should be used. @@ -250,13 +250,13 @@ ALTER SEQUENCE [ IF EXISTS ] name S table must have the same owner and be in the same schema as the sequence. Specifying OWNED BY NONE removes any existing - association, making the sequence free-standing. + association, making the sequence free-standing. - new_owner + new_owner The user name of the new owner of the sequence. @@ -291,7 +291,7 @@ ALTER SEQUENCE [ IF EXISTS ] name S ALTER SEQUENCE will not immediately affect - nextval results in backends, + nextval results in backends, other than the current one, that have preallocated (cached) sequence values. They will use up all cached values prior to noticing the changed sequence generation parameters. The current backend will be affected @@ -299,7 +299,7 @@ ALTER SEQUENCE [ IF EXISTS ] name S - ALTER SEQUENCE does not affect the currval + ALTER SEQUENCE does not affect the currval status for the sequence. (Before PostgreSQL 8.3, it sometimes did.) @@ -332,8 +332,8 @@ ALTER SEQUENCE serial RESTART WITH 105; ALTER SEQUENCE conforms to the SQL - standard, except for the AS, START WITH, - OWNED BY, OWNER TO, RENAME TO, and + standard, except for the AS, START WITH, + OWNED BY, OWNER TO, RENAME TO, and SET SCHEMA clauses, which are PostgreSQL extensions. @@ -343,8 +343,8 @@ ALTER SEQUENCE serial RESTART WITH 105; See Also - - + + diff --git a/doc/src/sgml/ref/alter_server.sgml b/doc/src/sgml/ref/alter_server.sgml index e6cf511853..17e55b093e 100644 --- a/doc/src/sgml/ref/alter_server.sgml +++ b/doc/src/sgml/ref/alter_server.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_server.sgml PostgreSQL documentation --> - + ALTER SERVER @@ -22,9 +22,9 @@ PostgreSQL documentation ALTER SERVER name [ VERSION 'new_version' ] - [ OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) ] -ALTER SERVER name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } -ALTER SERVER name RENAME TO new_name + [ OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) ] +ALTER SERVER name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER SERVER name RENAME TO new_name @@ -42,7 +42,7 @@ ALTER SERVER name RENAME TO USAGE privilege on the server's foreign-data + have USAGE privilege on the server's foreign-data wrapper. (Note that superusers satisfy all these criteria automatically.) @@ -71,12 +71,12 @@ ALTER SERVER name RENAME TO - OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) + OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) Change options for the - server. ADD, SET, and DROP - specify the action to be performed. ADD is assumed + server. ADD, SET, and DROP + specify the action to be performed. ADD is assumed if no operation is explicitly specified. Option names must be unique; names and values are also validated using the server's foreign-data wrapper library. @@ -85,7 +85,7 @@ ALTER SERVER name RENAME TO - new_owner + new_owner The user name of the new owner of the foreign server. @@ -108,15 +108,15 @@ ALTER SERVER name RENAME TO Examples - Alter server foo, add connection options: + Alter server foo, add connection options: ALTER SERVER foo OPTIONS (host 'foo', dbname 'foodb'); - Alter server foo, change version, - change host option: + Alter server foo, change version, + change host option: ALTER SERVER foo VERSION '8.4' OPTIONS (SET host 'baz'); @@ -136,8 +136,8 @@ ALTER SERVER foo VERSION '8.4' OPTIONS (SET host 'baz'); See Also - - + + diff --git a/doc/src/sgml/ref/alter_statistics.sgml b/doc/src/sgml/ref/alter_statistics.sgml index 4f25669852..58c7ed020d 100644 --- a/doc/src/sgml/ref/alter_statistics.sgml +++ b/doc/src/sgml/ref/alter_statistics.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_statistics.sgml PostgreSQL documentation --> - + ALTER STATISTICS @@ -23,7 +23,7 @@ PostgreSQL documentation -ALTER STATISTICS name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER STATISTICS name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } ALTER STATISTICS name RENAME TO new_name ALTER STATISTICS name SET SCHEMA new_schema @@ -39,9 +39,9 @@ ALTER STATISTICS name SET SCHEMA - You must own the statistics object to use ALTER STATISTICS. + You must own the statistics object to use ALTER STATISTICS. To change a statistics object's schema, you must also - have CREATE privilege on the new schema. + have CREATE privilege on the new schema. To alter the owner, you must also be a direct or indirect member of the new owning role, and that role must have CREATE privilege on the statistics object's schema. (These restrictions enforce that altering @@ -67,7 +67,7 @@ ALTER STATISTICS name SET SCHEMA - new_owner + new_owner The user name of the new owner of the statistics object. @@ -109,8 +109,8 @@ ALTER STATISTICS name SET SCHEMA See Also - - + + diff --git a/doc/src/sgml/ref/alter_subscription.sgml b/doc/src/sgml/ref/alter_subscription.sgml index b1b7765d76..6dfb2e4d3e 100644 --- a/doc/src/sgml/ref/alter_subscription.sgml +++ b/doc/src/sgml/ref/alter_subscription.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_subscription.sgml PostgreSQL documentation --> - + ALTER SUBSCRIPTION @@ -21,14 +21,14 @@ PostgreSQL documentation -ALTER SUBSCRIPTION name CONNECTION 'conninfo' -ALTER SUBSCRIPTION name SET PUBLICATION publication_name [, ...] [ WITH ( set_publication_option [= value] [, ... ] ) ] -ALTER SUBSCRIPTION name REFRESH PUBLICATION [ WITH ( refresh_option [= value] [, ... ] ) ] -ALTER SUBSCRIPTION name ENABLE -ALTER SUBSCRIPTION name DISABLE -ALTER SUBSCRIPTION name SET ( subscription_parameter [= value] [, ... ] ) -ALTER SUBSCRIPTION name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } -ALTER SUBSCRIPTION name RENAME TO new_name +ALTER SUBSCRIPTION name CONNECTION 'conninfo' +ALTER SUBSCRIPTION name SET PUBLICATION publication_name [, ...] [ WITH ( set_publication_option [= value] [, ... ] ) ] +ALTER SUBSCRIPTION name REFRESH PUBLICATION [ WITH ( refresh_option [= value] [, ... ] ) ] +ALTER SUBSCRIPTION name ENABLE +ALTER SUBSCRIPTION name DISABLE +ALTER SUBSCRIPTION name SET ( subscription_parameter [= value] [, ... ] ) +ALTER SUBSCRIPTION name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER SUBSCRIPTION name RENAME TO new_name @@ -38,11 +38,11 @@ ALTER SUBSCRIPTION name RENAME TO < ALTER SUBSCRIPTION can change most of the subscription properties that can be specified - in . + in . - You must own the subscription to use ALTER SUBSCRIPTION. + You must own the subscription to use ALTER SUBSCRIPTION. To alter the owner, you must also be a direct or indirect member of the new owning role. The new owner has to be a superuser. (Currently, all subscription owners must be superusers, so the owner checks @@ -68,7 +68,7 @@ ALTER SUBSCRIPTION name RENAME TO < This clause alters the connection property originally set by - . See there for more + . See there for more information. @@ -79,7 +79,7 @@ ALTER SUBSCRIPTION name RENAME TO < Changes list of subscribed publications. See - for more information. + for more information. By default this command will also act like REFRESH PUBLICATION. @@ -162,7 +162,7 @@ ALTER SUBSCRIPTION name RENAME TO < This clause alters parameters originally set by - . See there for more + . See there for more information. The allowed options are slot_name and synchronous_commit @@ -211,7 +211,7 @@ ALTER SUBSCRIPTION mysub DISABLE; Compatibility - ALTER SUBSCRIPTION is a PostgreSQL + ALTER SUBSCRIPTION is a PostgreSQL extension. @@ -220,10 +220,10 @@ ALTER SUBSCRIPTION mysub DISABLE; See Also - - - - + + + + diff --git a/doc/src/sgml/ref/alter_system.sgml b/doc/src/sgml/ref/alter_system.sgml index b234793f3e..5e41f7f644 100644 --- a/doc/src/sgml/ref/alter_system.sgml +++ b/doc/src/sgml/ref/alter_system.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_system.sgml PostgreSQL documentation --> - + ALTER SYSTEM @@ -21,9 +21,9 @@ PostgreSQL documentation -ALTER SYSTEM SET configuration_parameter { TO | = } { value | 'value' | DEFAULT } +ALTER SYSTEM SET configuration_parameter { TO | = } { value | 'value' | DEFAULT } -ALTER SYSTEM RESET configuration_parameter +ALTER SYSTEM RESET configuration_parameter ALTER SYSTEM RESET ALL @@ -50,8 +50,8 @@ ALTER SYSTEM RESET ALL the next server configuration reload, or after the next server restart in the case of parameters that can only be changed at server start. A server configuration reload can be commanded by calling the SQL - function pg_reload_conf(), running pg_ctl reload, - or sending a SIGHUP signal to the main server process. + function pg_reload_conf(), running pg_ctl reload, + or sending a SIGHUP signal to the main server process. @@ -70,7 +70,7 @@ ALTER SYSTEM RESET ALL Name of a settable configuration parameter. Available parameters are - documented in . + documented in . @@ -94,13 +94,13 @@ ALTER SYSTEM RESET ALL Notes - This command can't be used to set , - nor parameters that are not allowed in postgresql.conf - (e.g., preset options). + This command can't be used to set , + nor parameters that are not allowed in postgresql.conf + (e.g., preset options). - See for other ways to set the parameters. + See for other ways to set the parameters. @@ -108,7 +108,7 @@ ALTER SYSTEM RESET ALL Examples - Set the wal_level: + Set the wal_level: ALTER SYSTEM SET wal_level = replica; @@ -116,7 +116,7 @@ ALTER SYSTEM SET wal_level = replica; Undo that, restoring whatever setting was effective - in postgresql.conf: + in postgresql.conf: ALTER SYSTEM RESET wal_level; @@ -135,8 +135,8 @@ ALTER SYSTEM RESET wal_level; See Also - - + + diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml index 69600321e6..f13a6cd944 100644 --- a/doc/src/sgml/ref/alter_table.sgml +++ b/doc/src/sgml/ref/alter_table.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_table.sgml PostgreSQL documentation --> - + ALTER TABLE @@ -21,75 +21,117 @@ PostgreSQL documentation -ALTER TABLE [ IF EXISTS ] [ ONLY ] name [ * ] - action [, ... ] -ALTER TABLE [ IF EXISTS ] [ ONLY ] name [ * ] - RENAME [ COLUMN ] column_name TO new_column_name -ALTER TABLE [ IF EXISTS ] [ ONLY ] name [ * ] - RENAME CONSTRAINT constraint_name TO new_constraint_name -ALTER TABLE [ IF EXISTS ] name - RENAME TO new_name -ALTER TABLE [ IF EXISTS ] name - SET SCHEMA new_schema -ALTER TABLE ALL IN TABLESPACE name [ OWNED BY role_name [, ... ] ] - SET TABLESPACE new_tablespace [ NOWAIT ] -ALTER TABLE [ IF EXISTS ] name - ATTACH PARTITION partition_name FOR VALUES partition_bound_spec -ALTER TABLE [ IF EXISTS ] name - DETACH PARTITION partition_name - -where action is one of: - - ADD [ COLUMN ] [ IF NOT EXISTS ] column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ] - DROP [ COLUMN ] [ IF EXISTS ] column_name [ RESTRICT | CASCADE ] - ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type [ COLLATE collation ] [ USING expression ] - ALTER [ COLUMN ] column_name SET DEFAULT expression - ALTER [ COLUMN ] column_name DROP DEFAULT - ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL - ALTER [ COLUMN ] column_name ADD GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY [ ( sequence_options ) ] - ALTER [ COLUMN ] column_name { SET GENERATED { ALWAYS | BY DEFAULT } | SET sequence_option | RESTART [ [ WITH ] restart ] } [...] - ALTER [ COLUMN ] column_name DROP IDENTITY [ IF EXISTS ] - ALTER [ COLUMN ] column_name SET STATISTICS integer - ALTER [ COLUMN ] column_name SET ( attribute_option = value [, ... ] ) - ALTER [ COLUMN ] column_name RESET ( attribute_option [, ... ] ) - ALTER [ COLUMN ] column_name SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN } - ADD table_constraint [ NOT VALID ] - ADD table_constraint_using_index - ALTER CONSTRAINT constraint_name [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] - VALIDATE CONSTRAINT constraint_name - DROP CONSTRAINT [ IF EXISTS ] constraint_name [ RESTRICT | CASCADE ] - DISABLE TRIGGER [ trigger_name | ALL | USER ] - ENABLE TRIGGER [ trigger_name | ALL | USER ] - ENABLE REPLICA TRIGGER trigger_name - ENABLE ALWAYS TRIGGER trigger_name - DISABLE RULE rewrite_rule_name - ENABLE RULE rewrite_rule_name - ENABLE REPLICA RULE rewrite_rule_name - ENABLE ALWAYS RULE rewrite_rule_name +ALTER TABLE [ IF EXISTS ] [ ONLY ] name [ * ] + action [, ... ] +ALTER TABLE [ IF EXISTS ] [ ONLY ] name [ * ] + RENAME [ COLUMN ] column_name TO new_column_name +ALTER TABLE [ IF EXISTS ] [ ONLY ] name [ * ] + RENAME CONSTRAINT constraint_name TO new_constraint_name +ALTER TABLE [ IF EXISTS ] name + RENAME TO new_name +ALTER TABLE [ IF EXISTS ] name + SET SCHEMA new_schema +ALTER TABLE ALL IN TABLESPACE name [ OWNED BY role_name [, ... ] ] + SET TABLESPACE new_tablespace [ NOWAIT ] +ALTER TABLE [ IF EXISTS ] name + ATTACH PARTITION partition_name { FOR VALUES partition_bound_spec | DEFAULT } +ALTER TABLE [ IF EXISTS ] name + DETACH PARTITION partition_name + +where action is one of: + + ADD [ COLUMN ] [ IF NOT EXISTS ] column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ] + DROP [ COLUMN ] [ IF EXISTS ] column_name [ RESTRICT | CASCADE ] + ALTER [ COLUMN ] column_name [ SET DATA ] TYPE data_type [ COLLATE collation ] [ USING expression ] + ALTER [ COLUMN ] column_name SET DEFAULT expression + ALTER [ COLUMN ] column_name DROP DEFAULT + ALTER [ COLUMN ] column_name { SET | DROP } NOT NULL + ALTER [ COLUMN ] column_name ADD GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY [ ( sequence_options ) ] + ALTER [ COLUMN ] column_name { SET GENERATED { ALWAYS | BY DEFAULT } | SET sequence_option | RESTART [ [ WITH ] restart ] } [...] + ALTER [ COLUMN ] column_name DROP IDENTITY [ IF EXISTS ] + ALTER [ COLUMN ] column_name SET STATISTICS integer + ALTER [ COLUMN ] column_name SET ( attribute_option = value [, ... ] ) + ALTER [ COLUMN ] column_name RESET ( attribute_option [, ... ] ) + ALTER [ COLUMN ] column_name SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN } + ADD table_constraint [ NOT VALID ] + ADD table_constraint_using_index + ALTER CONSTRAINT constraint_name [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] + VALIDATE CONSTRAINT constraint_name + DROP CONSTRAINT [ IF EXISTS ] constraint_name [ RESTRICT | CASCADE ] + DISABLE TRIGGER [ trigger_name | ALL | USER ] + ENABLE TRIGGER [ trigger_name | ALL | USER ] + ENABLE REPLICA TRIGGER trigger_name + ENABLE ALWAYS TRIGGER trigger_name + DISABLE RULE rewrite_rule_name + ENABLE RULE rewrite_rule_name + ENABLE REPLICA RULE rewrite_rule_name + ENABLE ALWAYS RULE rewrite_rule_name DISABLE ROW LEVEL SECURITY ENABLE ROW LEVEL SECURITY FORCE ROW LEVEL SECURITY NO FORCE ROW LEVEL SECURITY - CLUSTER ON index_name + CLUSTER ON index_name SET WITHOUT CLUSTER SET WITH OIDS SET WITHOUT OIDS - SET TABLESPACE new_tablespace + SET TABLESPACE new_tablespace SET { LOGGED | UNLOGGED } - SET ( storage_parameter = value [, ... ] ) - RESET ( storage_parameter [, ... ] ) - INHERIT parent_table - NO INHERIT parent_table - OF type_name + SET ( storage_parameter = value [, ... ] ) + RESET ( storage_parameter [, ... ] ) + INHERIT parent_table + NO INHERIT parent_table + OF type_name NOT OF - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } - REPLICA IDENTITY { DEFAULT | USING INDEX index_name | FULL | NOTHING } + OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + REPLICA IDENTITY { DEFAULT | USING INDEX index_name | FULL | NOTHING } + +and partition_bound_spec is: + +IN ( { numeric_literal | string_literal | TRUE | FALSE | NULL } [, ...] ) | +FROM ( { numeric_literal | string_literal | TRUE | FALSE | MINVALUE | MAXVALUE } [, ...] ) + TO ( { numeric_literal | string_literal | TRUE | FALSE | MINVALUE | MAXVALUE } [, ...] ) | +WITH ( MODULUS numeric_literal, REMAINDER numeric_literal ) + +and column_constraint is: + +[ CONSTRAINT constraint_name ] +{ NOT NULL | + NULL | + CHECK ( expression ) [ NO INHERIT ] | + DEFAULT default_expr | + GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY [ ( sequence_options ) ] | + UNIQUE index_parameters | + PRIMARY KEY index_parameters | + REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] + [ ON DELETE action ] [ ON UPDATE action ] } +[ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] + +and table_constraint is: + +[ CONSTRAINT constraint_name ] +{ CHECK ( expression ) [ NO INHERIT ] | + UNIQUE ( column_name [, ... ] ) index_parameters | + PRIMARY KEY ( column_name [, ... ] ) index_parameters | + EXCLUDE [ USING index_method ] ( exclude_element WITH operator [, ... ] ) index_parameters [ WHERE ( predicate ) ] | + FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ] + [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] } +[ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] + +and table_constraint_using_index is: + + [ CONSTRAINT constraint_name ] + { UNIQUE | PRIMARY KEY } USING INDEX index_name + [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] -and table_constraint_using_index is: +index_parameters in UNIQUE, PRIMARY KEY, and EXCLUDE constraints are: - [ CONSTRAINT constraint_name ] - { UNIQUE | PRIMARY KEY } USING INDEX index_name - [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] +[ INCLUDE ( column_name [, ... ] ) ] +[ WITH ( storage_parameter [= value] [, ... ] ) ] +[ USING INDEX TABLESPACE tablespace_name ] + +exclude_element in an EXCLUDE constraint is: + +{ column_name | ( expression ) } [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] @@ -109,7 +151,7 @@ ALTER TABLE [ IF EXISTS ] name This form adds a new column to the table, using the same syntax as - . If IF NOT EXISTS + . If IF NOT EXISTS is specified and a column already exists with this name, no error is thrown. @@ -126,7 +168,7 @@ ALTER TABLE [ IF EXISTS ] name Multivariate statistics referencing the dropped column will also be removed if the removal of the column would cause the statistics to contain data for only a single column. - You will need to say CASCADE if anything outside the table + You will need to say CASCADE if anything outside the table depends on the column, for example, foreign key references or views. If IF EXISTS is specified and the column does not exist, no error is thrown. In this case a notice @@ -162,7 +204,7 @@ ALTER TABLE [ IF EXISTS ] name These forms set or remove the default value for a column. Default values only apply in subsequent INSERT - or UPDATE commands; they do not cause rows already in the + or UPDATE commands; they do not cause rows already in the table to change. @@ -174,7 +216,7 @@ ALTER TABLE [ IF EXISTS ] name These forms change whether a column is marked to allow null values or to reject null values. You can only use SET - NOT NULL when the column contains no null values. + NOT NULL when the column contains no null values. @@ -182,7 +224,7 @@ ALTER TABLE [ IF EXISTS ] name on a column if it is marked NOT NULL in the parent table. To drop the NOT NULL constraint from all the partitions, perform DROP NOT NULL on the parent - table. Even if there is no NOT NULL constraint on the + table. Even if there is no NOT NULL constraint on the parent, such a constraint can still be added to individual partitions, if desired; that is, the children can disallow nulls even if the parent allows them, but not the other way around. @@ -198,7 +240,7 @@ ALTER TABLE [ IF EXISTS ] name These forms change whether a column is an identity column or change the generation attribute of an existing identity column. - See for details. + See for details. @@ -216,7 +258,7 @@ ALTER TABLE [ IF EXISTS ] name These forms alter the sequence that underlies an existing identity column. sequence_option is an option - supported by such + supported by such as INCREMENT BY. @@ -228,13 +270,13 @@ ALTER TABLE [ IF EXISTS ] name This form sets the per-column statistics-gathering target for subsequent - operations. + operations. The target can be set in the range 0 to 10000; alternatively, set it to -1 to revert to using the system default statistics - target (). + target (). For more information on the use of statistics by the PostgreSQL query planner, refer to - . + . SET STATISTICS acquires a @@ -244,22 +286,22 @@ ALTER TABLE [ IF EXISTS ] name - SET ( attribute_option = value [, ... ] ) - RESET ( attribute_option [, ... ] ) + SET ( attribute_option = value [, ... ] ) + RESET ( attribute_option [, ... ] ) This form sets or resets per-attribute options. Currently, the only - defined per-attribute options are n_distinct and - n_distinct_inherited, which override the + defined per-attribute options are n_distinct and + n_distinct_inherited, which override the number-of-distinct-values estimates made by subsequent - - operations. n_distinct affects the statistics for the table - itself, while n_distinct_inherited affects the statistics + + operations. n_distinct affects the statistics for the table + itself, while n_distinct_inherited affects the statistics gathered for the table plus its inheritance children. When set to a - positive value, ANALYZE will assume that the column contains + positive value, ANALYZE will assume that the column contains exactly the specified number of distinct nonnull values. When set to a negative value, which must be greater - than or equal to -1, ANALYZE will assume that the number of + than or equal to -1, ANALYZE will assume that the number of distinct nonnull values in the column is linear in the size of the table; the exact count is to be computed by multiplying the estimated table size by the absolute value of the given number. For example, @@ -270,7 +312,7 @@ ALTER TABLE [ IF EXISTS ] name until query planning time. Specify a value of 0 to revert to estimating the number of distinct values normally. For more information on the use of statistics by the PostgreSQL query - planner, refer to . + planner, refer to . Changing per-attribute options acquires a @@ -290,7 +332,7 @@ ALTER TABLE [ IF EXISTS ] name This form sets the storage mode for a column. This controls whether this - column is held inline or in a secondary TOAST table, and + column is held inline or in a secondary TOAST table, and whether the data should be compressed or not. PLAIN must be used for fixed-length values such as integer and is @@ -302,19 +344,19 @@ ALTER TABLE [ IF EXISTS ] name Use of EXTERNAL will make substring operations on very large text and bytea values run faster, at the penalty of increased storage space. Note that - SET STORAGE doesn't itself change anything in the table, + SET STORAGE doesn't itself change anything in the table, it just sets the strategy to be pursued during future table updates. - See for more information. + See for more information. - ADD table_constraint [ NOT VALID ] + ADD table_constraint [ NOT VALID ] This form adds a new constraint to a table using the same syntax as - , plus the option NOT + , plus the option NOT VALID, which is currently only allowed for foreign key and CHECK constraints. If the constraint is marked NOT VALID, the @@ -326,16 +368,28 @@ ALTER TABLE [ IF EXISTS ] name specified check constraints). But the database will not assume that the constraint holds for all rows in the table, until it is validated by using the VALIDATE - CONSTRAINT option. + CONSTRAINT option. Foreign key constraints on partitioned + tables may not be declared NOT VALID at present. + + + + The addition of a foreign key constraint requires a + SHARE ROW EXCLUSIVE lock on the referenced table. + + + + Additional restrictions apply when unique or primary key constraints + are added to partitioned tables; see . + - ADD table_constraint_using_index + ADD table_constraint_using_index - This form adds a new PRIMARY KEY or UNIQUE + This form adds a new PRIMARY KEY or UNIQUE constraint to a table based on an existing unique index. All the columns of the index will be included in the constraint. @@ -344,14 +398,14 @@ ALTER TABLE [ IF EXISTS ] name The index cannot have expression columns nor be a partial index. Also, it must be a b-tree index with default sort ordering. These restrictions ensure that the index is equivalent to one that would be - built by a regular ADD PRIMARY KEY or ADD UNIQUE + built by a regular ADD PRIMARY KEY or ADD UNIQUE command. - If PRIMARY KEY is specified, and the index's columns are not - already marked NOT NULL, then this command will attempt to - do ALTER COLUMN SET NOT NULL against each such column. + If PRIMARY KEY is specified, and the index's columns are not + already marked NOT NULL, then this command will attempt to + do ALTER COLUMN SET NOT NULL against each such column. That requires a full table scan to verify the column(s) contain no nulls. In all other cases, this is a fast operation. @@ -363,19 +417,23 @@ ALTER TABLE [ IF EXISTS ] name - After this command is executed, the index is owned by the + After this command is executed, the index is owned by the constraint, in the same way as if the index had been built by - a regular ADD PRIMARY KEY or ADD UNIQUE + a regular ADD PRIMARY KEY or ADD UNIQUE command. In particular, dropping the constraint will make the index disappear too. + + This form is not currently supported on partitioned tables. + + Adding a constraint using an existing index can be helpful in situations where a new constraint needs to be added without blocking table updates for a long time. To do that, create the index using - CREATE INDEX CONCURRENTLY, and then install it as an + CREATE INDEX CONCURRENTLY, and then install it as an official constraint using this syntax. See the example below. @@ -421,7 +479,8 @@ ALTER TABLE [ IF EXISTS ] name DROP CONSTRAINT [ IF EXISTS ] - This form drops the specified constraint on a table. + This form drops the specified constraint on a table, along with + any index underlying the constraint. If IF EXISTS is specified and the constraint does not exist, no error is thrown. In this case a notice is issued instead. @@ -445,14 +504,30 @@ ALTER TABLE [ IF EXISTS ] name requires superuser privileges; it should be done with caution since of course the integrity of the constraint cannot be guaranteed if the triggers are not executed. + + + The trigger firing mechanism is also affected by the configuration - variable . Simply enabled - triggers will fire when the replication role is origin - (the default) or local. Triggers configured as ENABLE - REPLICA will only fire if the session is in replica + variable . Simply enabled + triggers (the default) will fire when the replication role is origin + (the default) or local. Triggers configured as ENABLE + REPLICA will only fire if the session is in replica mode, and triggers configured as ENABLE ALWAYS will - fire regardless of the current replication mode. + fire regardless of the current replication role. + + + + The effect of this mechanism is that in the default configuration, + triggers do not fire on replicas. This is useful because if a trigger + is used on the origin to propagate data between tables, then the + replication system will also replicate the propagated data, and the + trigger should not fire a second time on the replica, because that would + lead to duplication. However, if a trigger is used for another purpose + such as creating external alerts, then it might be appropriate to set it + to ENABLE ALWAYS so that it is also fired on + replicas. + This command acquires a SHARE ROW EXCLUSIVE lock. @@ -470,6 +545,12 @@ ALTER TABLE [ IF EXISTS ] name are always applied in order to keep views working even if the current session is in a non-default replication role. + + + The rule firing mechanism is also affected by the configuration variable + , analogous to triggers as + described above. + @@ -483,7 +564,7 @@ ALTER TABLE [ IF EXISTS ] name even if row level security is disabled - in this case, the policies will NOT be applied and the policies will be ignored. See also - . + . @@ -498,7 +579,7 @@ ALTER TABLE [ IF EXISTS ] name disabled (the default) then row level security will not be applied when the user is the table owner. See also - . + . @@ -508,7 +589,7 @@ ALTER TABLE [ IF EXISTS ] name This form selects the default index for future - + operations. It does not actually re-cluster the table. @@ -522,7 +603,7 @@ ALTER TABLE [ IF EXISTS ] name This form removes the most recently used - + index specification from the table. This affects future cluster operations that don't specify an index. @@ -537,14 +618,14 @@ ALTER TABLE [ IF EXISTS ] name This form adds an oid system column to the - table (see ). + table (see ). It does nothing if the table already has OIDs. - Note that this is not equivalent to ADD COLUMN oid oid; + Note that this is not equivalent to ADD COLUMN oid oid; that would add a normal column that happened to be named - oid, not a system column. + oid, not a system column. @@ -582,7 +663,7 @@ ALTER TABLE [ IF EXISTS ] name information_schema relations are not considered part of the system catalogs and will be moved. See also - . + . @@ -592,25 +673,25 @@ ALTER TABLE [ IF EXISTS ] name This form changes the table from unlogged to logged or vice-versa - (see ). It cannot be applied + (see ). It cannot be applied to a temporary table. - SET ( storage_parameter = value [, ... ] ) + SET ( storage_parameter = value [, ... ] ) This form changes one or more storage parameters for the table. See - + for details on the available parameters. Note that the table contents will not be modified immediately by this command; depending on the parameter you might need to rewrite the table to get the desired effects. - That can be done with VACUUM - FULL, or one of the forms - of ALTER TABLE that forces a table rewrite. + That can be done with VACUUM + FULL, or one of the forms + of ALTER TABLE that forces a table rewrite. For planner related parameters, changes will take effect from the next time the table is locked so currently executing queries will not be affected. @@ -618,38 +699,38 @@ ALTER TABLE [ IF EXISTS ] name SHARE UPDATE EXCLUSIVE lock will be taken for - fillfactor and autovacuum storage parameters, as well as the + fillfactor, toast and autovacuum storage parameters, as well as the following planner related parameters: - effective_io_concurrency, parallel_workers, seq_page_cost - random_page_cost, n_distinct and n_distinct_inherited. + effective_io_concurrency, parallel_workers, seq_page_cost, + random_page_cost, n_distinct and n_distinct_inherited. - While CREATE TABLE allows OIDS to be specified + While CREATE TABLE allows OIDS to be specified in the WITH (storage_parameter) syntax, - ALTER TABLE does not treat OIDS as a - storage parameter. Instead use the SET WITH OIDS - and SET WITHOUT OIDS forms to change OID status. + class="parameter">storage_parameter) syntax, + ALTER TABLE does not treat OIDS as a + storage parameter. Instead use the SET WITH OIDS + and SET WITHOUT OIDS forms to change OID status. - RESET ( storage_parameter [, ... ] ) + RESET ( storage_parameter [, ... ] ) This form resets one or more storage parameters to their - defaults. As with SET, a table rewrite might be + defaults. As with SET, a table rewrite might be needed to update the table entirely. - INHERIT parent_table + INHERIT parent_table This form adds the target table as a new child of the specified parent @@ -677,7 +758,7 @@ ALTER TABLE [ IF EXISTS ] name - NO INHERIT parent_table + NO INHERIT parent_table This form removes the target table from the list of children of the @@ -689,15 +770,15 @@ ALTER TABLE [ IF EXISTS ] name - OF type_name + OF type_name This form links the table to a composite type as though CREATE - TABLE OF had formed it. The table's list of column names and types + TABLE OF had formed it. The table's list of column names and types must precisely match that of the composite type; the presence of - an oid system column is permitted to differ. The table must + an oid system column is permitted to differ. The table must not inherit from any other table. These restrictions ensure - that CREATE TABLE OF would permit an equivalent table + that CREATE TABLE OF would permit an equivalent table definition. @@ -713,7 +794,7 @@ ALTER TABLE [ IF EXISTS ] name - OWNER + OWNER TO This form changes the owner of the table, sequence, view, materialized view, @@ -722,19 +803,19 @@ ALTER TABLE [ IF EXISTS ] name - + REPLICA IDENTITY This form changes the information which is written to the write-ahead log to identify rows which are updated or deleted. This option has no effect - except when logical replication is in use. DEFAULT + except when logical replication is in use. DEFAULT (the default for non-system tables) records the - old values of the columns of the primary key, if any. USING INDEX + old values of the columns of the primary key, if any. USING INDEX records the old values of the columns covered by the named index, which must be unique, not partial, not deferrable, and include only columns marked - NOT NULL. FULL records the old values of all columns - in the row. NOTHING records no information about the old row. + NOT NULL. FULL records the old values of all columns + in the row. NOTHING records no information about the old row. (This is the default for system tables.) In all cases, no old values are logged unless at least one of the columns that would be logged differs between the old and new versions of the row. @@ -747,8 +828,10 @@ ALTER TABLE [ IF EXISTS ] name The RENAME forms change the name of a table - (or an index, sequence, view, materialized view, or foreign table), the name - of an individual column in a table, or the name of a constraint of the table. + (or an index, sequence, view, materialized view, or foreign table), the + name of an individual column in a table, or the name of a constraint of + the table. When renaming a constraint that has an underlying index, + the index is renamed as well. There is no effect on the stored data. @@ -765,20 +848,32 @@ ALTER TABLE [ IF EXISTS ] name - ATTACH PARTITION partition_name FOR VALUES partition_bound_spec + ATTACH PARTITION partition_name { FOR VALUES partition_bound_spec | DEFAULT } This form attaches an existing table (which might itself be partitioned) - as a partition of the target table using the same syntax for - partition_bound_spec as - . The partition bound specification + as a partition of the target table. The table can be attached + as a partition for specific values using FOR VALUES + or as a default partition by using DEFAULT. + For each index in the target table, a corresponding + one will be created in the attached table; or, if an equivalent + index already exists, will be attached to the target table's index, + as if ALTER INDEX ATTACH PARTITION had been executed. + + + + A partition using FOR VALUES uses same syntax for + partition_bound_spec as + . The partition bound specification must correspond to the partitioning strategy and partition key of the target table. The table to be attached must have all the same columns as the target table and no more; moreover, the column types must also match. Also, it must have all the NOT NULL and CHECK constraints of the target table. Currently - UNIQUE, PRIMARY KEY, and FOREIGN KEY constraints are not considered. + UNIQUE and PRIMARY KEY constraints + from the parent table will be created in the partition, if they don't + already exist. If any of the CHECK constraints of the table being attached is marked NO INHERIT, the command will fail; such a constraint must be recreated without the NO INHERIT @@ -803,19 +898,31 @@ ALTER TABLE [ IF EXISTS ] name If the new partition is a foreign table, nothing is done to verify that all the rows in the foreign table obey the partition constraint. - (See the discussion in about + (See the discussion in about constraints on the foreign table.) + + + When a table has a default partition, defining a new partition changes + the partition constraint for the default partition. The default + partition can't contain any rows that would need to be moved to the new + partition, and will be scanned to verify that none are present. This + scan, like the scan of the new partition, can be avoided if an + appropriate CHECK constraint is present. Also like + the scan of the new partition, it is always skipped when the default + partition is a foreign table. + - DETACH PARTITION partition_name + DETACH PARTITION partition_name This form detaches specified partition of the target table. The detached partition continues to exist as a standalone table, but no longer has any - ties to the table from which it was detached. + ties to the table from which it was detached. Any indexes that were + attached to the target table's indexes are detached. @@ -835,7 +942,7 @@ ALTER TABLE [ IF EXISTS ] name - You must own the table to use ALTER TABLE. + You must own the table to use ALTER TABLE. To change the schema or tablespace of a table, you must also have CREATE privilege on the new schema or tablespace. To add the table as a new child of a parent table, you must own the parent @@ -868,21 +975,21 @@ ALTER TABLE [ IF EXISTS ] name - name + name The name (optionally schema-qualified) of an existing table to - alter. If ONLY is specified before the table name, only - that table is altered. If ONLY is not specified, the table + alter. If ONLY is specified before the table name, only + that table is altered. If ONLY is not specified, the table and all its descendant tables (if any) are altered. Optionally, - * can be specified after the table name to explicitly + * can be specified after the table name to explicitly indicate that descendant tables are included. - column_name + column_name Name of a new or existing column. @@ -891,7 +998,7 @@ ALTER TABLE [ IF EXISTS ] name - new_column_name + new_column_name New name for an existing column. @@ -900,7 +1007,7 @@ ALTER TABLE [ IF EXISTS ] name - new_name + new_name New name for the table. @@ -909,7 +1016,7 @@ ALTER TABLE [ IF EXISTS ] name - data_type + data_type Data type of the new column, or new data type for an existing @@ -919,7 +1026,7 @@ ALTER TABLE [ IF EXISTS ] name - table_constraint + table_constraint New table constraint for the table. @@ -928,7 +1035,7 @@ ALTER TABLE [ IF EXISTS ] name - constraint_name + constraint_name Name of a new or existing constraint. @@ -943,7 +1050,7 @@ ALTER TABLE [ IF EXISTS ] name Automatically drop objects that depend on the dropped column or constraint (for example, views referencing the column), and in turn all objects that depend on those objects - (see ). + (see ). @@ -959,7 +1066,7 @@ ALTER TABLE [ IF EXISTS ] name - trigger_name + trigger_name Name of a single trigger to disable or enable. @@ -993,7 +1100,7 @@ ALTER TABLE [ IF EXISTS ] name - index_name + index_name The name of an existing index. @@ -1002,7 +1109,7 @@ ALTER TABLE [ IF EXISTS ] name - storage_parameter + storage_parameter The name of a table storage parameter. @@ -1011,7 +1118,7 @@ ALTER TABLE [ IF EXISTS ] name - value + value The new value for a table storage parameter. @@ -1021,7 +1128,7 @@ ALTER TABLE [ IF EXISTS ] name - parent_table + parent_table A parent table to associate or de-associate with this table. @@ -1030,7 +1137,7 @@ ALTER TABLE [ IF EXISTS ] name - new_owner + new_owner The user name of the new owner of the table. @@ -1039,7 +1146,7 @@ ALTER TABLE [ IF EXISTS ] name - new_tablespace + new_tablespace The name of the tablespace to which the table will be moved. @@ -1048,7 +1155,7 @@ ALTER TABLE [ IF EXISTS ] name - new_schema + new_schema The name of the schema to which the table will be moved. @@ -1057,7 +1164,7 @@ ALTER TABLE [ IF EXISTS ] name - partition_name + partition_name The name of the table to attach as a new partition or to detach from this table. @@ -1066,11 +1173,11 @@ ALTER TABLE [ IF EXISTS ] name - partition_bound_spec + partition_bound_spec The partition bound specification for a new partition. Refer to - for more details on the syntax of the same. + for more details on the syntax of the same. @@ -1086,30 +1193,30 @@ ALTER TABLE [ IF EXISTS ] name - When a column is added with ADD COLUMN, all existing - rows in the table are initialized with the column's default value - (NULL if no DEFAULT clause is specified). - If there is no DEFAULT clause, this is merely a metadata - change and does not require any immediate update of the table's data; - the added NULL values are supplied on readout, instead. + When a column is added with ADD COLUMN and a + non-volatile DEFAULT is specified, the default is + evaluated at the time of the statement and the result stored in the + table's metadata. That value will be used for the column for all existing + rows. If no DEFAULT is specified, NULL is used. In + neither case is a rewrite of the table required. - Adding a column with a DEFAULT clause or changing the type of - an existing column will require the entire table and its indexes to be - rewritten. As an exception when changing the type of an existing column, - if the USING clause does not change the column - contents and the old type is either binary coercible to the new type or - an unconstrained domain over the new type, a table rewrite is not needed; - but any indexes on the affected columns must still be rebuilt. Adding or - removing a system oid column also requires rewriting the entire - table. Table and/or index rebuilds may take a significant amount of time - for a large table; and will temporarily require as much as double the disk - space. + Adding a column with a volatile DEFAULT or + changing the type of an existing column will require the entire table and + its indexes to be rewritten. As an exception, when changing the type of an + existing column, if the USING clause does not change + the column contents and the old type is either binary coercible to the new + type or an unconstrained domain over the new type, a table rewrite is not + needed; but any indexes on the affected columns must still be rebuilt. + Adding or removing a system oid column also requires + rewriting the entire table. Table and/or index rebuilds may take a + significant amount of time for a large table; and will temporarily require + as much as double the disk space. - Adding a CHECK or NOT NULL constraint requires + Adding a CHECK or NOT NULL constraint requires scanning the table to verify that existing rows meet the constraint, but does not require a table rewrite. @@ -1121,7 +1228,7 @@ ALTER TABLE [ IF EXISTS ] name The main reason for providing the option to specify multiple changes - in a single ALTER TABLE is that multiple table scans or + in a single ALTER TABLE is that multiple table scans or rewrites can thereby be combined into a single pass over the table. @@ -1133,37 +1240,37 @@ ALTER TABLE [ IF EXISTS ] name reduce the on-disk size of your table, as the space occupied by the dropped column is not reclaimed. The space will be reclaimed over time as existing rows are updated. (These statements do - not apply when dropping the system oid column; that is done + not apply when dropping the system oid column; that is done with an immediate rewrite.) To force immediate reclamation of space occupied by a dropped column, - you can execute one of the forms of ALTER TABLE that + you can execute one of the forms of ALTER TABLE that performs a rewrite of the whole table. This results in reconstructing each row with the dropped column replaced by a null value. - The rewriting forms of ALTER TABLE are not MVCC-safe. + The rewriting forms of ALTER TABLE are not MVCC-safe. After a table rewrite, the table will appear empty to concurrent transactions, if they are using a snapshot taken before the rewrite - occurred. See for more details. + occurred. See for more details. - The USING option of SET DATA TYPE can actually + The USING option of SET DATA TYPE can actually specify any expression involving the old values of the row; that is, it can refer to other columns as well as the one being converted. This allows - very general conversions to be done with the SET DATA TYPE + very general conversions to be done with the SET DATA TYPE syntax. Because of this flexibility, the USING expression is not applied to the column's default value (if any); the result might not be a constant expression as required for a default. This means that when there is no implicit or assignment cast from old to - new type, SET DATA TYPE might fail to convert the default even + new type, SET DATA TYPE might fail to convert the default even though a USING clause is supplied. In such cases, - drop the default with DROP DEFAULT, perform the ALTER - TYPE, and then use SET DEFAULT to add a suitable new + drop the default with DROP DEFAULT, perform the ALTER + TYPE, and then use SET DEFAULT to add a suitable new default. Similar considerations apply to indexes and constraints involving the column. @@ -1171,10 +1278,12 @@ ALTER TABLE [ IF EXISTS ] name If a table has any descendant tables, it is not permitted to add, rename, or change the type of a column in the parent table without doing - same to the descendants. This ensures that the descendants always have - columns matching the parent. Similarly, a constraint cannot be renamed - in the parent without also renaming it in all descendants, so that - constraints also match between the parent and its descendants. + the same to the descendants. This ensures that the descendants always + have columns matching the parent. Similarly, a CHECK + constraint cannot be renamed in the parent without also renaming it in + all descendants, so that CHECK constraints also match + between the parent and its descendants. (That restriction does not apply + to index-based constraints, however.) Also, because selecting from the parent also selects from its descendants, a constraint on the parent cannot be marked valid unless it is also marked valid for those descendants. In all of these cases, ALTER TABLE @@ -1198,11 +1307,11 @@ ALTER TABLE [ IF EXISTS ] name The actions for identity columns (ADD GENERATED, SET etc., DROP IDENTITY), as well as the actions - TRIGGER, CLUSTER, OWNER, - and TABLESPACE never recurse to descendant tables; - that is, they always act as though ONLY were specified. - Adding a constraint recurses only for CHECK constraints - that are not marked NO INHERIT. + TRIGGER, CLUSTER, OWNER, + and TABLESPACE never recurse to descendant tables; + that is, they always act as though ONLY were specified. + Adding a constraint recurses only for CHECK constraints + that are not marked NO INHERIT. @@ -1210,8 +1319,8 @@ ALTER TABLE [ IF EXISTS ] name - Refer to for a further description of valid - parameters. has further information on + Refer to for a further description of valid + parameters. has further information on inheritance. @@ -1382,21 +1491,35 @@ ALTER TABLE distributors DROP CONSTRAINT distributors_pkey, - Attach a partition to range partitioned table: + To attach a partition to a range-partitioned table: ALTER TABLE measurement ATTACH PARTITION measurement_y2016m07 FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); - Attach a partition to list partitioned table: + To attach a partition to a list-partitioned table: ALTER TABLE cities ATTACH PARTITION cities_ab FOR VALUES IN ('a', 'b'); - Detach a partition from partitioned table: + To attach a partition to a hash-partitioned table: + +ALTER TABLE orders + ATTACH PARTITION orders_p4 FOR VALUES WITH (MODULUS 4, REMAINDER 3); + + + + To attach a default partition to a partitioned table: + +ALTER TABLE cities + ATTACH PARTITION cities_partdef DEFAULT; + + + + To detach a partition from a partitioned table: ALTER TABLE measurement DETACH PARTITION measurement_y2015m12; @@ -1409,17 +1532,17 @@ ALTER TABLE measurement The forms ADD (without USING INDEX), - DROP [COLUMN], DROP IDENTITY, RESTART, - SET DEFAULT, SET DATA TYPE (without USING), + DROP [COLUMN], DROP IDENTITY, RESTART, + SET DEFAULT, SET DATA TYPE (without USING), SET GENERATED, and SET sequence_option conform with the SQL standard. The other forms are PostgreSQL extensions of the SQL standard. Also, the ability to specify more than one manipulation in a single - ALTER TABLE command is an extension. + ALTER TABLE command is an extension. - ALTER TABLE DROP COLUMN can be used to drop the only + ALTER TABLE DROP COLUMN can be used to drop the only column of a table, leaving a zero-column table. This is an extension of SQL, which disallows zero-column tables. @@ -1429,7 +1552,7 @@ ALTER TABLE measurement See Also - + diff --git a/doc/src/sgml/ref/alter_tablespace.sgml b/doc/src/sgml/ref/alter_tablespace.sgml index 2f41105001..acec33469f 100644 --- a/doc/src/sgml/ref/alter_tablespace.sgml +++ b/doc/src/sgml/ref/alter_tablespace.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_tablespace.sgml PostgreSQL documentation --> - + ALTER TABLESPACE @@ -23,8 +23,8 @@ PostgreSQL documentation ALTER TABLESPACE name RENAME TO new_name ALTER TABLESPACE name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } -ALTER TABLESPACE name SET ( tablespace_option = value [, ... ] ) -ALTER TABLESPACE name RESET ( tablespace_option [, ... ] ) +ALTER TABLESPACE name SET ( tablespace_option = value [, ... ] ) +ALTER TABLESPACE name RESET ( tablespace_option [, ... ] ) @@ -83,14 +83,14 @@ ALTER TABLESPACE name RESET ( , - , - ). This may be useful if + same name (see , + , + ). This may be useful if one tablespace is located on a disk which is faster or slower than the remainder of the I/O subsystem. @@ -130,8 +130,8 @@ ALTER TABLESPACE index_space OWNER TO mary; See Also - - + + diff --git a/doc/src/sgml/ref/alter_trigger.sgml b/doc/src/sgml/ref/alter_trigger.sgml index 47eef6e5e8..6cf789a67a 100644 --- a/doc/src/sgml/ref/alter_trigger.sgml +++ b/doc/src/sgml/ref/alter_trigger.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_trigger.sgml PostgreSQL documentation --> - + ALTER TRIGGER @@ -21,8 +21,8 @@ PostgreSQL documentation -ALTER TRIGGER name ON table_name RENAME TO new_name -ALTER TRIGGER name ON table_name DEPENDS ON EXTENSION extension_name +ALTER TRIGGER name ON table_name RENAME TO new_name +ALTER TRIGGER name ON table_name DEPENDS ON EXTENSION extension_name @@ -48,7 +48,7 @@ ALTER TRIGGER name ON - name + name The name of an existing trigger to alter. @@ -57,7 +57,7 @@ ALTER TRIGGER name ON - table_name + table_name The name of the table on which this trigger acts. @@ -66,7 +66,7 @@ ALTER TRIGGER name ON - new_name + new_name The new name for the trigger. @@ -75,7 +75,7 @@ ALTER TRIGGER name ON - extension_name + extension_name The name of the extension that the trigger is to depend on. @@ -90,8 +90,8 @@ ALTER TRIGGER name ON The ability to temporarily enable or disable a trigger is provided by - , not by - ALTER TRIGGER, because ALTER TRIGGER has no + , not by + ALTER TRIGGER, because ALTER TRIGGER has no convenient way to express the option of enabling or disabling all of a table's triggers at once. @@ -117,7 +117,7 @@ ALTER TRIGGER emp_stamp ON emp DEPENDS ON EXTENSION emplib; Compatibility - ALTER TRIGGER is a PostgreSQL + ALTER TRIGGER is a PostgreSQL extension of the SQL standard. @@ -126,7 +126,7 @@ ALTER TRIGGER emp_stamp ON emp DEPENDS ON EXTENSION emplib; See Also - + diff --git a/doc/src/sgml/ref/alter_tsconfig.sgml b/doc/src/sgml/ref/alter_tsconfig.sgml index 72a719b862..ebe0b94b27 100644 --- a/doc/src/sgml/ref/alter_tsconfig.sgml +++ b/doc/src/sgml/ref/alter_tsconfig.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_tsconfig.sgml PostgreSQL documentation --> - + ALTER TEXT SEARCH CONFIGURATION @@ -49,7 +49,7 @@ ALTER TEXT SEARCH CONFIGURATION name SET SCHEMA You must be the owner of the configuration to use - ALTER TEXT SEARCH CONFIGURATION. + ALTER TEXT SEARCH CONFIGURATION. @@ -136,20 +136,20 @@ ALTER TEXT SEARCH CONFIGURATION name SET SCHEMA - The ADD MAPPING FOR form installs a list of dictionaries to be + The ADD MAPPING FOR form installs a list of dictionaries to be consulted for the specified token type(s); it is an error if there is already a mapping for any of the token types. - The ALTER MAPPING FOR form does the same, but first removing + The ALTER MAPPING FOR form does the same, but first removing any existing mapping for those token types. - The ALTER MAPPING REPLACE forms substitute ALTER MAPPING REPLACE forms substitute new_dictionary for old_dictionary anywhere the latter appears. - This is done for only the specified token types when FOR + This is done for only the specified token types when FOR appears, or for all mappings of the configuration when it doesn't. - The DROP MAPPING form removes all dictionaries for the + The DROP MAPPING form removes all dictionaries for the specified token type(s), causing tokens of those types to be ignored by the text search configuration. It is an error if there is no mapping - for the token types, unless IF EXISTS appears. + for the token types, unless IF EXISTS appears. @@ -158,9 +158,9 @@ ALTER TEXT SEARCH CONFIGURATION name SET SCHEMA Examples - The following example replaces the english dictionary - with the swedish dictionary anywhere that english - is used within my_config. + The following example replaces the english dictionary + with the swedish dictionary anywhere that english + is used within my_config. @@ -182,8 +182,8 @@ ALTER TEXT SEARCH CONFIGURATION my_config See Also - - + + diff --git a/doc/src/sgml/ref/alter_tsdictionary.sgml b/doc/src/sgml/ref/alter_tsdictionary.sgml index 7cecabea83..b29865e11e 100644 --- a/doc/src/sgml/ref/alter_tsdictionary.sgml +++ b/doc/src/sgml/ref/alter_tsdictionary.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_tsdictionary.sgml PostgreSQL documentation --> - + ALTER TEXT SEARCH DICTIONARY @@ -41,7 +41,7 @@ ALTER TEXT SEARCH DICTIONARY name SET SCHEMA You must be the owner of the dictionary to use - ALTER TEXT SEARCH DICTIONARY. + ALTER TEXT SEARCH DICTIONARY. @@ -126,7 +126,7 @@ ALTER TEXT SEARCH DICTIONARY my_dict ( StopWords = newrussian ); - The following example command changes the language option to dutch, + The following example command changes the language option to dutch, and removes the stopword option entirely. @@ -135,7 +135,7 @@ ALTER TEXT SEARCH DICTIONARY my_dict ( language = dutch, StopWords ); - The following example command updates the dictionary's + The following example command updates the dictionary's definition without actually changing anything. @@ -144,7 +144,7 @@ ALTER TEXT SEARCH DICTIONARY my_dict ( dummy ); (The reason this works is that the option removal code doesn't complain if there is no such option.) This trick is useful when changing - configuration files for the dictionary: the ALTER will + configuration files for the dictionary: the ALTER will force existing database sessions to re-read the configuration files, which otherwise they would never do if they had read them earlier. @@ -163,8 +163,8 @@ ALTER TEXT SEARCH DICTIONARY my_dict ( dummy ); See Also - - + + diff --git a/doc/src/sgml/ref/alter_tsparser.sgml b/doc/src/sgml/ref/alter_tsparser.sgml index e2b6060a17..9edff4b71a 100644 --- a/doc/src/sgml/ref/alter_tsparser.sgml +++ b/doc/src/sgml/ref/alter_tsparser.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_tsparser.sgml PostgreSQL documentation --> - + ALTER TEXT SEARCH PARSER @@ -36,7 +36,7 @@ ALTER TEXT SEARCH PARSER name SET SCHEMA - You must be a superuser to use ALTER TEXT SEARCH PARSER. + You must be a superuser to use ALTER TEXT SEARCH PARSER. @@ -86,8 +86,8 @@ ALTER TEXT SEARCH PARSER name SET SCHEMA See Also - - + + diff --git a/doc/src/sgml/ref/alter_tstemplate.sgml b/doc/src/sgml/ref/alter_tstemplate.sgml index e7ae91c0a0..5d3c826533 100644 --- a/doc/src/sgml/ref/alter_tstemplate.sgml +++ b/doc/src/sgml/ref/alter_tstemplate.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_tstemplate.sgml PostgreSQL documentation --> - + ALTER TEXT SEARCH TEMPLATE @@ -36,7 +36,7 @@ ALTER TEXT SEARCH TEMPLATE name SET SCHEMA - You must be a superuser to use ALTER TEXT SEARCH TEMPLATE. + You must be a superuser to use ALTER TEXT SEARCH TEMPLATE. @@ -86,8 +86,8 @@ ALTER TEXT SEARCH TEMPLATE name SET SCHEMA See Also - - + + diff --git a/doc/src/sgml/ref/alter_type.sgml b/doc/src/sgml/ref/alter_type.sgml index d65f70f674..67be1dd568 100644 --- a/doc/src/sgml/ref/alter_type.sgml +++ b/doc/src/sgml/ref/alter_type.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_type.sgml PostgreSQL documentation --> - + ALTER TYPE @@ -23,19 +23,19 @@ PostgreSQL documentation -ALTER TYPE name action [, ... ] -ALTER TYPE name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } -ALTER TYPE name RENAME ATTRIBUTE attribute_name TO new_attribute_name [ CASCADE | RESTRICT ] -ALTER TYPE name RENAME TO new_name -ALTER TYPE name SET SCHEMA new_schema -ALTER TYPE name ADD VALUE [ IF NOT EXISTS ] new_enum_value [ { BEFORE | AFTER } neighbor_enum_value ] -ALTER TYPE name RENAME VALUE existing_enum_value TO new_enum_value - -where action is one of: - - ADD ATTRIBUTE attribute_name data_type [ COLLATE collation ] [ CASCADE | RESTRICT ] - DROP ATTRIBUTE [ IF EXISTS ] attribute_name [ CASCADE | RESTRICT ] - ALTER ATTRIBUTE attribute_name [ SET DATA ] TYPE data_type [ COLLATE collation ] [ CASCADE | RESTRICT ] +ALTER TYPE name action [, ... ] +ALTER TYPE name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER TYPE name RENAME ATTRIBUTE attribute_name TO new_attribute_name [ CASCADE | RESTRICT ] +ALTER TYPE name RENAME TO new_name +ALTER TYPE name SET SCHEMA new_schema +ALTER TYPE name ADD VALUE [ IF NOT EXISTS ] new_enum_value [ { BEFORE | AFTER } neighbor_enum_value ] +ALTER TYPE name RENAME VALUE existing_enum_value TO new_enum_value + +where action is one of: + + ADD ATTRIBUTE attribute_name data_type [ COLLATE collation ] [ CASCADE | RESTRICT ] + DROP ATTRIBUTE [ IF EXISTS ] attribute_name [ CASCADE | RESTRICT ] + ALTER ATTRIBUTE attribute_name [ SET DATA ] TYPE data_type [ COLLATE collation ] [ CASCADE | RESTRICT ] @@ -52,7 +52,7 @@ ALTER TYPE name RENAME VALUE This form adds a new attribute to a composite type, using the same syntax as - . + . @@ -147,7 +147,7 @@ ALTER TYPE name RENAME VALUE - You must own the type to use ALTER TYPE. + You must own the type to use ALTER TYPE. To change the schema of a type, you must also have CREATE privilege on the new schema. To alter the owner, you must also be a direct or indirect member of the new @@ -166,7 +166,7 @@ ALTER TYPE name RENAME VALUE - name + name The name (possibly schema-qualified) of an existing type to @@ -176,7 +176,7 @@ ALTER TYPE name RENAME VALUE - new_name + new_name The new name for the type. @@ -185,7 +185,7 @@ ALTER TYPE name RENAME VALUE - new_owner + new_owner The user name of the new owner of the type. @@ -194,7 +194,7 @@ ALTER TYPE name RENAME VALUE - new_schema + new_schema The new schema for the type. @@ -203,7 +203,7 @@ ALTER TYPE name RENAME VALUE - attribute_name + attribute_name The name of the attribute to add, alter, or drop. @@ -212,7 +212,7 @@ ALTER TYPE name RENAME VALUE - new_attribute_name + new_attribute_name The new name of the attribute to be renamed. @@ -221,7 +221,7 @@ ALTER TYPE name RENAME VALUE - data_type + data_type The data type of the attribute to add, or the new type of the @@ -231,7 +231,7 @@ ALTER TYPE name RENAME VALUE - new_enum_value + new_enum_value The new value to be added to an enum type's list of values, @@ -242,7 +242,7 @@ ALTER TYPE name RENAME VALUE - neighbor_enum_value + neighbor_enum_value The existing enum value that the new value should be added immediately @@ -253,7 +253,7 @@ ALTER TYPE name RENAME VALUE - existing_enum_value + existing_enum_value The existing enum value that should be renamed. @@ -290,12 +290,9 @@ ALTER TYPE name RENAME VALUE Notes - If ALTER TYPE ... ADD VALUE (the form that adds a new value to - an enum type) is executed inside a transaction block, the new value cannot - be used until after the transaction has been committed, except in the case - that the enum type itself was created earlier in the same transaction. - Likewise, when a pre-existing enum value is renamed, the transaction must - be committed before the renamed value can be used. + If ALTER TYPE ... ADD VALUE (the form that adds a new + value to an enum type) is executed inside a transaction block, the new + value cannot be used until after the transaction has been committed. @@ -305,7 +302,7 @@ ALTER TYPE name RENAME VALUE wrapped - around since the original creation of the enum type). The slowdown is + around since the original creation of the enum type). The slowdown is usually insignificant; but if it matters, optimal performance can be regained by dropping and recreating the enum type, or by dumping and reloading the database. @@ -368,12 +365,12 @@ ALTER TYPE colors RENAME VALUE 'purple' TO 'mauve'; - + See Also - - + + diff --git a/doc/src/sgml/ref/alter_user.sgml b/doc/src/sgml/ref/alter_user.sgml index 411a6dcc38..8f50f43089 100644 --- a/doc/src/sgml/ref/alter_user.sgml +++ b/doc/src/sgml/ref/alter_user.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_user.sgml PostgreSQL documentation --> - + ALTER USER @@ -21,9 +21,9 @@ PostgreSQL documentation -ALTER USER role_specification [ WITH ] option [ ... ] +ALTER USER role_specification [ WITH ] option [ ... ] -where option can be: +where option can be: SUPERUSER | NOSUPERUSER | CREATEDB | NOCREATEDB @@ -32,20 +32,20 @@ ALTER USER role_specification [ WIT | LOGIN | NOLOGIN | REPLICATION | NOREPLICATION | BYPASSRLS | NOBYPASSRLS - | CONNECTION LIMIT connlimit - | [ ENCRYPTED ] PASSWORD 'password' - | VALID UNTIL 'timestamp' + | CONNECTION LIMIT connlimit + | [ ENCRYPTED ] PASSWORD 'password' + | VALID UNTIL 'timestamp' -ALTER USER name RENAME TO new_name +ALTER USER name RENAME TO new_name -ALTER USER { role_specification | ALL } [ IN DATABASE database_name ] SET configuration_parameter { TO | = } { value | DEFAULT } -ALTER USER { role_specification | ALL } [ IN DATABASE database_name ] SET configuration_parameter FROM CURRENT -ALTER USER { role_specification | ALL } [ IN DATABASE database_name ] RESET configuration_parameter -ALTER USER { role_specification | ALL } [ IN DATABASE database_name ] RESET ALL +ALTER USER { role_specification | ALL } [ IN DATABASE database_name ] SET configuration_parameter { TO | = } { value | DEFAULT } +ALTER USER { role_specification | ALL } [ IN DATABASE database_name ] SET configuration_parameter FROM CURRENT +ALTER USER { role_specification | ALL } [ IN DATABASE database_name ] RESET configuration_parameter +ALTER USER { role_specification | ALL } [ IN DATABASE database_name ] RESET ALL -where role_specification can be: +where role_specification can be: - [ GROUP ] role_name + role_name | CURRENT_USER | SESSION_USER @@ -56,7 +56,7 @@ ALTER USER { role_specification | A ALTER USER is now an alias for - . + . @@ -74,7 +74,7 @@ ALTER USER { role_specification | A See Also - + diff --git a/doc/src/sgml/ref/alter_user_mapping.sgml b/doc/src/sgml/ref/alter_user_mapping.sgml index 3be54afee5..7a9b5a188a 100644 --- a/doc/src/sgml/ref/alter_user_mapping.sgml +++ b/doc/src/sgml/ref/alter_user_mapping.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_user_mapping.sgml PostgreSQL documentation --> - + ALTER USER MAPPING @@ -23,7 +23,7 @@ PostgreSQL documentation ALTER USER MAPPING FOR { user_name | USER | CURRENT_USER | SESSION_USER | PUBLIC } SERVER server_name - OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) + OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) @@ -38,7 +38,7 @@ ALTER USER MAPPING FOR { user_name The owner of a foreign server can alter user mappings for that server for any user. Also, a user can alter a user mapping for - their own user name if USAGE privilege on the server has + their own user name if USAGE privilege on the server has been granted to the user. @@ -51,9 +51,9 @@ ALTER USER MAPPING FOR { user_name user_name - User name of the mapping. CURRENT_USER - and USER match the name of the current - user. PUBLIC is used to match all present and future + User name of the mapping. CURRENT_USER + and USER match the name of the current + user. PUBLIC is used to match all present and future user names in the system. @@ -69,13 +69,13 @@ ALTER USER MAPPING FOR { user_name - OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) + OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) Change options for the user mapping. The new options override any previously specified - options. ADD, SET, and DROP - specify the action to be performed. ADD is assumed + options. ADD, SET, and DROP + specify the action to be performed. ADD is assumed if no operation is explicitly specified. Option names must be unique; options are also validated by the server's foreign-data wrapper. @@ -89,7 +89,7 @@ ALTER USER MAPPING FOR { user_name Examples - Change the password for user mapping bob, server foo: + Change the password for user mapping bob, server foo: ALTER USER MAPPING FOR bob SERVER foo OPTIONS (SET password 'public'); @@ -116,8 +116,8 @@ ALTER USER MAPPING FOR bob SERVER foo OPTIONS (SET password 'public'); See Also - - + + diff --git a/doc/src/sgml/ref/alter_view.sgml b/doc/src/sgml/ref/alter_view.sgml index 00f4ecb9b1..2e9edc1975 100644 --- a/doc/src/sgml/ref/alter_view.sgml +++ b/doc/src/sgml/ref/alter_view.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/alter_view.sgml PostgreSQL documentation --> - + ALTER VIEW @@ -21,9 +21,9 @@ PostgreSQL documentation -ALTER VIEW [ IF EXISTS ] name ALTER [ COLUMN ] column_name SET DEFAULT expression -ALTER VIEW [ IF EXISTS ] name ALTER [ COLUMN ] column_name DROP DEFAULT -ALTER VIEW [ IF EXISTS ] name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER VIEW [ IF EXISTS ] name ALTER [ COLUMN ] column_name SET DEFAULT expression +ALTER VIEW [ IF EXISTS ] name ALTER [ COLUMN ] column_name DROP DEFAULT +ALTER VIEW [ IF EXISTS ] name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } ALTER VIEW [ IF EXISTS ] name RENAME TO new_name ALTER VIEW [ IF EXISTS ] name SET SCHEMA new_schema ALTER VIEW [ IF EXISTS ] name SET ( view_option_name [= view_option_value] [, ... ] ) @@ -37,12 +37,12 @@ ALTER VIEW [ IF EXISTS ] name RESET ALTER VIEW changes various auxiliary properties of a view. (If you want to modify the view's defining query, - use CREATE OR REPLACE VIEW.) + use CREATE OR REPLACE VIEW.) - You must own the view to use ALTER VIEW. - To change a view's schema, you must also have CREATE + You must own the view to use ALTER VIEW. + To change a view's schema, you must also have CREATE privilege on the new schema. To alter the owner, you must also be a direct or indirect member of the new owning role, and that role must have CREATE privilege on @@ -81,7 +81,7 @@ ALTER VIEW [ IF EXISTS ] name RESET These forms set or remove the default value for a column. A view column's default value is substituted into any - INSERT or UPDATE command whose target is the + INSERT or UPDATE command whose target is the view, before applying any rules or triggers for the view. The view's default will therefore take precedence over any default values from underlying relations. @@ -90,7 +90,7 @@ ALTER VIEW [ IF EXISTS ] name RESET - new_owner + new_owner The user name of the new owner of the view. @@ -185,7 +185,7 @@ INSERT INTO a_view(id) VALUES(2); -- ts will receive the current time Compatibility - ALTER VIEW is a PostgreSQL + ALTER VIEW is a PostgreSQL extension of the SQL standard. @@ -194,8 +194,8 @@ INSERT INTO a_view(id) VALUES(2); -- ts will receive the current time See Also - - + + diff --git a/doc/src/sgml/ref/analyze.sgml b/doc/src/sgml/ref/analyze.sgml index 45dee101df..fea7f46521 100644 --- a/doc/src/sgml/ref/analyze.sgml +++ b/doc/src/sgml/ref/analyze.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/analyze.sgml PostgreSQL documentation --> - + ANALYZE @@ -21,7 +21,17 @@ PostgreSQL documentation -ANALYZE [ VERBOSE ] [ table_name [ ( column_name [, ...] ) ] ] +ANALYZE [ ( option [, ...] ) ] [ table_and_columns [, ...] ] +ANALYZE [ VERBOSE ] [ table_and_columns [, ...] ] + +where option can be one of: + + VERBOSE + SKIP_LOCKED + +and table_and_columns is: + + table_name [ ( column_name [, ...] ) ] @@ -31,18 +41,27 @@ ANALYZE [ VERBOSE ] [ table_name [ ANALYZE collects statistics about the contents of tables in the database, and stores the results in the pg_statistic + linkend="catalog-pg-statistic">pg_statistic system catalog. Subsequently, the query planner uses these statistics to help determine the most efficient execution plans for queries. - With no parameter, ANALYZE examines every table in the - current database. With a parameter, ANALYZE examines - only that table. It is further possible to give a list of column names, + Without a table_and_columns + list, ANALYZE processes every table and materialized view + in the current database that the current user has permission to analyze. + With a list, ANALYZE processes only those table(s). + It is further possible to give a list of column names for a table, in which case only the statistics for those columns are collected. + + + When the option list is surrounded by parentheses, the options can be + written in any order. The parenthesized syntax was added in + PostgreSQL 11; the unparenthesized syntax + is deprecated. + @@ -59,7 +78,25 @@ ANALYZE [ VERBOSE ] [ table_name [ - table_name + SKIP_LOCKED + + + Specifies that ANALYZE should not wait for any + conflicting locks to be released when beginning work on a relation: + if a relation cannot be locked immediately without waiting, the relation + is skipped. Note that even with this option, ANALYZE + may still block when opening the relation's indexes or when acquiring + sample rows from partitions, table inheritance children, and some + types of foreign tables. Also, while ANALYZE + ordinarily processes all partitions of specified partitioned tables, + this option will cause ANALYZE to skip all + partitions if there is a conflicting lock on the partitioned table. + + + + + + table_name The name (possibly schema-qualified) of a specific table to @@ -73,7 +110,7 @@ ANALYZE [ VERBOSE ] [ table_name [ - column_name + column_name The name of a specific column to analyze. Defaults to all columns. @@ -87,7 +124,7 @@ ANALYZE [ VERBOSE ] [ table_name [ Outputs - When VERBOSE is specified, ANALYZE emits + When VERBOSE is specified, ANALYZE emits progress messages to indicate which table is currently being processed. Various statistics about the tables are printed as well. @@ -98,14 +135,14 @@ ANALYZE [ VERBOSE ] [ table_name [ Foreign tables are analyzed only when explicitly selected. Not all - foreign data wrappers support ANALYZE. If the table's - wrapper does not support ANALYZE, the command prints a + foreign data wrappers support ANALYZE. If the table's + wrapper does not support ANALYZE, the command prints a warning and does nothing. In the default PostgreSQL configuration, - the autovacuum daemon (see ) + the autovacuum daemon (see ) takes care of automatic analyzing of tables when they are first loaded with data, and as they change throughout regular operation. When autovacuum is disabled, @@ -113,7 +150,7 @@ ANALYZE [ VERBOSE ] [ table_name [ just after making major changes in the contents of a table. Accurate statistics will help the planner to choose the most appropriate query plan, and thereby improve the speed of query processing. A common - strategy for read-mostly databases is to run + strategy for read-mostly databases is to run and ANALYZE once a day during a low-usage time of day. (This will not be sufficient if there is heavy update activity.) @@ -133,7 +170,7 @@ ANALYZE [ VERBOSE ] [ table_name [ in a unique-key column, there are no common values) or if the column data type does not support the appropriate operators. There is more information about the statistics in . + linkend="maintenance"/>. @@ -144,7 +181,7 @@ ANALYZE [ VERBOSE ] [ table_name [ will change slightly each time ANALYZE is run, even if the actual table contents did not change. This might result in small changes in the planner's estimated costs shown by - . + . In rare situations, this non-determinism will cause the planner's choices of query plans to change after ANALYZE is run. To avoid this, raise the amount of statistics collected by @@ -153,10 +190,10 @@ ANALYZE [ VERBOSE ] [ table_name [ The extent of analysis can be controlled by adjusting the - configuration variable, or + configuration variable, or on a column-by-column basis by setting the per-column statistics target with ALTER TABLE ... ALTER COLUMN ... SET - STATISTICS (see ). + STATISTICS (see ). The target value sets the maximum number of entries in the most-common-value list and the maximum number of bins in the histogram. The default target value @@ -166,8 +203,8 @@ ANALYZE [ VERBOSE ] [ table_name [ pg_statistic. In particular, setting the statistics target to zero disables collection of statistics for that column. It might be useful to do that for columns that are - never used as part of the WHERE, GROUP BY, - or ORDER BY clauses of queries, since the planner will + never used as part of the WHERE, GROUP BY, + or ORDER BY clauses of queries, since the planner will have no use for statistics on such columns. @@ -185,8 +222,8 @@ ANALYZE [ VERBOSE ] [ table_name [ with the largest possible statistics target. If this inaccuracy leads to bad query plans, a more accurate value can be determined manually and then installed with - ALTER TABLE ... ALTER COLUMN ... SET (n_distinct = ...) - (see ). + ALTER TABLE ... ALTER COLUMN ... SET (n_distinct = ...) + (see ). @@ -204,7 +241,7 @@ ANALYZE [ VERBOSE ] [ table_name [ If any of the child tables are foreign tables whose foreign data wrappers - do not support ANALYZE, those child tables are ignored while + do not support ANALYZE, those child tables are ignored while gathering inheritance statistics. @@ -227,10 +264,10 @@ ANALYZE [ VERBOSE ] [ table_name [ See Also - - - - + + + + diff --git a/doc/src/sgml/ref/begin.sgml b/doc/src/sgml/ref/begin.sgml index c04f1c8064..c23bbfb4e7 100644 --- a/doc/src/sgml/ref/begin.sgml +++ b/doc/src/sgml/ref/begin.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/begin.sgml PostgreSQL documentation --> - + BEGIN @@ -38,8 +38,8 @@ BEGIN [ WORK | TRANSACTION ] [ transaction_modeBEGIN initiates a transaction block, that is, all statements after a BEGIN command will be executed in a single transaction until an explicit or is given. + linkend="sql-commit"/> or is given. By default (without BEGIN), PostgreSQL executes transactions in autocommit mode, that is, each @@ -60,7 +60,7 @@ BEGIN [ WORK | TRANSACTION ] [ transaction_mode If the isolation level, read/write mode, or deferrable mode is specified, the new transaction has those characteristics, as if - + was executed. @@ -81,7 +81,7 @@ BEGIN [ WORK | TRANSACTION ] [ transaction_mode - Refer to for information on the meaning + Refer to for information on the meaning of the other parameters to this statement. @@ -90,21 +90,21 @@ BEGIN [ WORK | TRANSACTION ] [ transaction_modeNotes - has the same functionality - as BEGIN. + has the same functionality + as BEGIN. - Use or - + Use or + to terminate a transaction block. - Issuing BEGIN when already inside a transaction block will + Issuing BEGIN when already inside a transaction block will provoke a warning message. The state of the transaction is not affected. To nest transactions within a transaction block, use savepoints - (see ). + (see ). @@ -131,7 +131,7 @@ BEGIN; BEGIN is a PostgreSQL language extension. It is equivalent to the SQL-standard command - , whose reference page + , whose reference page contains additional compatibility information. @@ -152,10 +152,10 @@ BEGIN; See Also - - - - + + + + diff --git a/doc/src/sgml/ref/call.sgml b/doc/src/sgml/ref/call.sgml new file mode 100644 index 0000000000..abaa81c78b --- /dev/null +++ b/doc/src/sgml/ref/call.sgml @@ -0,0 +1,115 @@ + + + + + CALL + + + + CALL + 7 + SQL - Language Statements + + + + CALL + invoke a procedure + + + + +CALL name ( [ argument ] [, ...] ) + + + + + Description + + + CALL executes a procedure. + + + + If the procedure has any output parameters, then a result row will be + returned, containing the values of those parameters. + + + + + Parameters + + + + name + + + The name (optionally schema-qualified) of the procedure. + + + + + + argument + + + An input argument for the procedure call. + See for the full details on + function and procedure call syntax, including use of named parameters. + + + + + + + + Notes + + + The user must have EXECUTE privilege on the procedure in + order to be allowed to invoke it. + + + + To call a function (not a procedure), use SELECT instead. + + + + If CALL is executed in a transaction block, then the + called procedure cannot execute transaction control statements. + Transaction control statements are only allowed if CALL + is executed in its own transaction. + + + + PL/pgSQL handles output parameters + in CALL commands differently; + see . + + + + + Examples + +CALL do_db_maintenance(); + + + + + Compatibility + + + CALL conforms to the SQL standard. + + + + + See Also + + + + + + diff --git a/doc/src/sgml/ref/checkpoint.sgml b/doc/src/sgml/ref/checkpoint.sgml index a8f3186d8c..dfcadcf402 100644 --- a/doc/src/sgml/ref/checkpoint.sgml +++ b/doc/src/sgml/ref/checkpoint.sgml @@ -29,7 +29,7 @@ CHECKPOINT A checkpoint is a point in the write-ahead log sequence at which all data files have been updated to reflect the information in the log. All data files will be flushed to disk. Refer to - for more details about what happens + for more details about what happens during a checkpoint. @@ -37,14 +37,14 @@ CHECKPOINT The CHECKPOINT command forces an immediate checkpoint when the command is issued, without waiting for a regular checkpoint scheduled by the system (controlled by the settings in - ). + ). CHECKPOINT is not intended for use during normal operation. If executed during recovery, the CHECKPOINT command - will force a restartpoint (see ) + will force a restartpoint (see ) rather than writing a new checkpoint. diff --git a/doc/src/sgml/ref/close.sgml b/doc/src/sgml/ref/close.sgml index aacc667144..e464df1965 100644 --- a/doc/src/sgml/ref/close.sgml +++ b/doc/src/sgml/ref/close.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/close.sgml PostgreSQL documentation --> - + CLOSE @@ -26,7 +26,7 @@ PostgreSQL documentation -CLOSE { name | ALL } +CLOSE { name | ALL } @@ -57,7 +57,7 @@ CLOSE { name | ALL } - name + name The name of an open cursor to close. @@ -84,13 +84,13 @@ CLOSE { name | ALL } PostgreSQL does not have an explicit OPEN cursor statement; a cursor is considered open when it is declared. Use the - + statement to declare a cursor. You can see all available cursors by querying the pg_cursors system view. + linkend="view-pg-cursors">pg_cursors system view. @@ -115,7 +115,7 @@ CLOSE liahona; CLOSE is fully conforming with the SQL - standard. CLOSE ALL is a PostgreSQL + standard. CLOSE ALL is a PostgreSQL extension. @@ -124,9 +124,9 @@ CLOSE liahona; See Also - - - + + + diff --git a/doc/src/sgml/ref/cluster.sgml b/doc/src/sgml/ref/cluster.sgml index e6a77095ec..4da60d8d56 100644 --- a/doc/src/sgml/ref/cluster.sgml +++ b/doc/src/sgml/ref/cluster.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/cluster.sgml PostgreSQL documentation --> - + CLUSTER @@ -21,7 +21,7 @@ PostgreSQL documentation -CLUSTER [VERBOSE] table_name [ USING index_name ] +CLUSTER [VERBOSE] table_name [ USING index_name ] CLUSTER [VERBOSE] @@ -57,7 +57,7 @@ CLUSTER [VERBOSE] CLUSTER table_name reclusters the table using the same index as before. You can also use the CLUSTER or SET WITHOUT CLUSTER - forms of to set the index to be used for + forms of to set the index to be used for future cluster operations, or to clear any previous setting. @@ -82,7 +82,7 @@ CLUSTER [VERBOSE] - table_name + table_name The name (possibly schema-qualified) of a table. @@ -91,7 +91,7 @@ CLUSTER [VERBOSE] - index_name + index_name The name of an index. @@ -128,7 +128,7 @@ CLUSTER [VERBOSE] - CLUSTER can re-sort the table using either an index scan + CLUSTER can re-sort the table using either an index scan on the specified index, or (if the index is a b-tree) a sequential scan followed by sorting. It will attempt to choose the method that will be faster, based on planner cost parameters and available statistical @@ -148,18 +148,18 @@ CLUSTER [VERBOSE] as double the table size, plus the index sizes. This method is often faster than the index scan method, but if the disk space requirement is intolerable, you can disable this choice by temporarily setting to off. + linkend="guc-enable-sort"/> to off. - It is advisable to set to + It is advisable to set to a reasonably large value (but not more than the amount of RAM you can - dedicate to the CLUSTER operation) before clustering. + dedicate to the CLUSTER operation) before clustering. Because the planner records statistics about the ordering of - tables, it is advisable to run + tables, it is advisable to run on the newly clustered table. Otherwise, the planner might make poor choices of query plans. @@ -168,7 +168,7 @@ CLUSTER [VERBOSE] Because CLUSTER remembers which indexes are clustered, one can cluster the tables one wants clustered manually the first time, then set up a periodic maintenance script that executes - CLUSTER without any parameters, so that the desired tables + CLUSTER without any parameters, so that the desired tables are periodically reclustered. @@ -210,9 +210,9 @@ CLUSTER; The syntax -CLUSTER index_name ON table_name +CLUSTER index_name ON table_name - is also supported for compatibility with pre-8.3 PostgreSQL + is also supported for compatibility with pre-8.3 PostgreSQL versions. @@ -221,7 +221,7 @@ CLUSTER index_name ON See Also - + diff --git a/doc/src/sgml/ref/clusterdb.sgml b/doc/src/sgml/ref/clusterdb.sgml index 67582fd6e6..ed343dd7da 100644 --- a/doc/src/sgml/ref/clusterdb.sgml +++ b/doc/src/sgml/ref/clusterdb.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/clusterdb.sgml PostgreSQL documentation --> - + clusterdb @@ -60,7 +60,7 @@ PostgreSQL documentation clusterdb is a wrapper around the SQL - command . + command . There is no effective difference between clustering databases via this utility and via other methods for accessing the server. @@ -76,8 +76,8 @@ PostgreSQL documentation - - + + Cluster all databases. @@ -86,8 +86,8 @@ PostgreSQL documentation - - + + Specifies the name of the database to be clustered. @@ -101,8 +101,8 @@ PostgreSQL documentation - - + + Echo the commands that clusterdb generates @@ -112,8 +112,8 @@ PostgreSQL documentation - - + + Do not display progress messages. @@ -122,20 +122,20 @@ PostgreSQL documentation - - + + Cluster table only. Multiple tables can be clustered by writing multiple - switches. - - + + Print detailed information during processing. @@ -144,8 +144,8 @@ PostgreSQL documentation - - + + Print the clusterdb version and exit. @@ -154,8 +154,8 @@ PostgreSQL documentation - - + + Show help about clusterdb command line @@ -173,8 +173,8 @@ PostgreSQL documentation - - + + Specifies the host name of the machine on which the server is @@ -185,8 +185,8 @@ PostgreSQL documentation - - + + Specifies the TCP port or local Unix domain socket file @@ -197,8 +197,8 @@ PostgreSQL documentation - - + + User name to connect as. @@ -207,8 +207,8 @@ PostgreSQL documentation - - + + Never issue a password prompt. If the server requires @@ -222,8 +222,8 @@ PostgreSQL documentation - - + + Force clusterdb to prompt for a @@ -236,14 +236,14 @@ PostgreSQL documentation for a password if the server demands password authentication. However, clusterdb will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. - + Specifies the name of the database to connect to discover what other @@ -277,9 +277,9 @@ PostgreSQL documentation - This utility, like most other PostgreSQL utilities, - also uses the environment variables supported by libpq - (see ). + This utility, like most other PostgreSQL utilities, + also uses the environment variables supported by libpq + (see ). @@ -289,8 +289,8 @@ PostgreSQL documentation Diagnostics - In case of difficulty, see - and for + In case of difficulty, see + and for discussions of potential problems and error messages. The database server must be running at the targeted host. Also, any default connection settings and environment @@ -325,7 +325,7 @@ PostgreSQL documentation See Also - + diff --git a/doc/src/sgml/ref/comment.sgml b/doc/src/sgml/ref/comment.sgml index df328117f1..965c5a40ad 100644 --- a/doc/src/sgml/ref/comment.sgml +++ b/doc/src/sgml/ref/comment.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/comment.sgml PostgreSQL documentation --> - + COMMENT @@ -23,48 +23,50 @@ PostgreSQL documentation COMMENT ON { - ACCESS METHOD object_name | - AGGREGATE aggregate_name ( aggregate_signature ) | + ACCESS METHOD object_name | + AGGREGATE aggregate_name ( aggregate_signature ) | CAST (source_type AS target_type) | - COLLATION object_name | - COLUMN relation_name.column_name | - CONSTRAINT constraint_name ON table_name | - CONSTRAINT constraint_name ON DOMAIN domain_name | - CONVERSION object_name | - DATABASE object_name | - DOMAIN object_name | - EXTENSION object_name | - EVENT TRIGGER object_name | - FOREIGN DATA WRAPPER object_name | - FOREIGN TABLE object_name | - FUNCTION function_name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] | - INDEX object_name | - LARGE OBJECT large_object_oid | - MATERIALIZED VIEW object_name | - OPERATOR operator_name (left_type, right_type) | - OPERATOR CLASS object_name USING index_method | - OPERATOR FAMILY object_name USING index_method | - POLICY policy_name ON table_name | - [ PROCEDURAL ] LANGUAGE object_name | - PUBLICATION object_name | - ROLE object_name | - RULE rule_name ON table_name | - SCHEMA object_name | - SEQUENCE object_name | - SERVER object_name | - STATISTICS object_name | - SUBSCRIPTION object_name | - TABLE object_name | - TABLESPACE object_name | - TEXT SEARCH CONFIGURATION object_name | - TEXT SEARCH DICTIONARY object_name | - TEXT SEARCH PARSER object_name | - TEXT SEARCH TEMPLATE object_name | + COLLATION object_name | + COLUMN relation_name.column_name | + CONSTRAINT constraint_name ON table_name | + CONSTRAINT constraint_name ON DOMAIN domain_name | + CONVERSION object_name | + DATABASE object_name | + DOMAIN object_name | + EXTENSION object_name | + EVENT TRIGGER object_name | + FOREIGN DATA WRAPPER object_name | + FOREIGN TABLE object_name | + FUNCTION function_name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] | + INDEX object_name | + LARGE OBJECT large_object_oid | + MATERIALIZED VIEW object_name | + OPERATOR operator_name (left_type, right_type) | + OPERATOR CLASS object_name USING index_method | + OPERATOR FAMILY object_name USING index_method | + POLICY policy_name ON table_name | + [ PROCEDURAL ] LANGUAGE object_name | + PROCEDURE procedure_name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] | + PUBLICATION object_name | + ROLE object_name | + ROUTINE routine_name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] | + RULE rule_name ON table_name | + SCHEMA object_name | + SEQUENCE object_name | + SERVER object_name | + STATISTICS object_name | + SUBSCRIPTION object_name | + TABLE object_name | + TABLESPACE object_name | + TEXT SEARCH CONFIGURATION object_name | + TEXT SEARCH DICTIONARY object_name | + TEXT SEARCH PARSER object_name | + TEXT SEARCH TEMPLATE object_name | TRANSFORM FOR type_name LANGUAGE lang_name | - TRIGGER trigger_name ON table_name | - TYPE object_name | - VIEW object_name -} IS 'text' + TRIGGER trigger_name ON table_name | + TYPE object_name | + VIEW object_name +} IS 'text' where aggregate_signature is: @@ -83,16 +85,16 @@ COMMENT ON Only one comment string is stored for each object, so to modify a comment, - issue a new COMMENT command for the same object. To remove a + issue a new COMMENT command for the same object. To remove a comment, write NULL in place of the text string. Comments are automatically dropped when their object is dropped. For most kinds of object, only the object's owner can set the comment. - Roles don't have owners, so the rule for COMMENT ON ROLE is + Roles don't have owners, so the rule for COMMENT ON ROLE is that you must be superuser to comment on a superuser role, or have the - CREATEROLE privilege to comment on non-superuser roles. + CREATEROLE privilege to comment on non-superuser roles. Likewise, access methods don't have owners either; you must be superuser to comment on an access method. Of course, a superuser can comment on anything. @@ -103,9 +105,9 @@ COMMENT ON \d family of commands. Other user interfaces to retrieve comments can be built atop the same built-in functions that psql uses, namely - obj_description, col_description, - and shobj_description - (see ). + obj_description, col_description, + and shobj_description + (see ). @@ -121,13 +123,15 @@ COMMENT ON function_name operator_name policy_name + procedure_name + routine_name rule_name trigger_name The name of the object to be commented. Names of tables, aggregates, collations, conversions, domains, foreign tables, functions, - indexes, operators, operator classes, operator families, sequences, + indexes, operators, operator classes, operator families, procedures, routines, sequences, statistics, text search objects, types, and views can be schema-qualified. When commenting on a column, relation_name must refer @@ -170,15 +174,15 @@ COMMENT ON argmode - The mode of a function or aggregate - argument: IN, OUT, - INOUT, or VARIADIC. - If omitted, the default is IN. + The mode of a function, procedure, or aggregate + argument: IN, OUT, + INOUT, or VARIADIC. + If omitted, the default is IN. Note that COMMENT does not actually pay - any attention to OUT arguments, since only the input + any attention to OUT arguments, since only the input arguments are needed to determine the function's identity. - So it is sufficient to list the IN, INOUT, - and VARIADIC arguments. + So it is sufficient to list the IN, INOUT, + and VARIADIC arguments. @@ -187,7 +191,7 @@ COMMENT ON argname - The name of a function or aggregate argument. + The name of a function, procedure, or aggregate argument. Note that COMMENT does not actually pay any attention to argument names, since only the argument data types are needed to determine the function's identity. @@ -199,7 +203,7 @@ COMMENT ON argtype - The data type of a function or aggregate argument. + The data type of a function, procedure, or aggregate argument. @@ -219,7 +223,7 @@ COMMENT ON The data type(s) of the operator's arguments (optionally - schema-qualified). Write NONE for the missing argument + schema-qualified). Write NONE for the missing argument of a prefix or postfix operator. @@ -258,7 +262,7 @@ COMMENT ON text - The new comment, written as a string literal; or NULL + The new comment, written as a string literal; or NULL to drop the comment. @@ -325,6 +329,7 @@ COMMENT ON OPERATOR - (NONE, integer) IS 'Unary minus'; COMMENT ON OPERATOR CLASS int4ops USING btree IS '4 byte integer operators for btrees'; COMMENT ON OPERATOR FAMILY integer_ops USING btree IS 'all integer operators for btrees'; COMMENT ON POLICY my_policy ON mytable IS 'Filter rows by users'; +COMMENT ON PROCEDURE my_proc (integer, integer) IS 'Runs a report'; COMMENT ON ROLE my_role IS 'Administration group for finance tables'; COMMENT ON RULE my_rule ON my_table IS 'Logs updates of employee records'; COMMENT ON SCHEMA my_schema IS 'Departmental data'; diff --git a/doc/src/sgml/ref/commit.sgml b/doc/src/sgml/ref/commit.sgml index e93c216849..b2e8d5d180 100644 --- a/doc/src/sgml/ref/commit.sgml +++ b/doc/src/sgml/ref/commit.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/commit.sgml PostgreSQL documentation --> - + COMMIT @@ -55,12 +55,12 @@ COMMIT [ WORK | TRANSACTION ] Notes - Use to + Use to abort a transaction. - Issuing COMMIT when not inside a transaction does + Issuing COMMIT when not inside a transaction does no harm, but it will provoke a warning message. @@ -89,8 +89,8 @@ COMMIT; See Also - - + + diff --git a/doc/src/sgml/ref/commit_prepared.sgml b/doc/src/sgml/ref/commit_prepared.sgml index e1988ad318..d938b65bbe 100644 --- a/doc/src/sgml/ref/commit_prepared.sgml +++ b/doc/src/sgml/ref/commit_prepared.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/commit_prepared.sgml PostgreSQL documentation --> - + COMMIT PREPARED @@ -21,7 +21,7 @@ PostgreSQL documentation -COMMIT PREPARED transaction_id +COMMIT PREPARED transaction_id @@ -39,7 +39,7 @@ COMMIT PREPARED transaction_id - transaction_id + transaction_id The transaction identifier of the transaction that is to be @@ -75,7 +75,7 @@ COMMIT PREPARED transaction_id Examples Commit the transaction identified by the transaction - identifier foobar: + identifier foobar: COMMIT PREPARED 'foobar'; @@ -99,8 +99,8 @@ COMMIT PREPARED 'foobar'; See Also - - + + diff --git a/doc/src/sgml/ref/copy.sgml b/doc/src/sgml/ref/copy.sgml index 8de1150dfb..13a8b68d95 100644 --- a/doc/src/sgml/ref/copy.sgml +++ b/doc/src/sgml/ref/copy.sgml @@ -4,7 +4,7 @@ PostgreSQL documentation --> - + COPY @@ -54,10 +54,10 @@ COPY { table_name [ ( COPY moves data between PostgreSQL tables and standard file-system files. COPY TO copies the contents of a table - to a file, while COPY FROM copies - data from a file to a table (appending the data to + to a file, while COPY FROM copies + data from a file to a table (appending the data to whatever is in the table already). COPY TO - can also copy the results of a SELECT query. + can also copy the results of a SELECT query. @@ -112,16 +112,16 @@ COPY { table_name [ ( query - A , , - , or - command whose results are to be + A , , + , or + command whose results are to be copied. Note that parentheses are required around the query. - For INSERT, UPDATE and - DELETE queries a RETURNING clause must be provided, + For INSERT, UPDATE and + DELETE queries a RETURNING clause must be provided, and the target relation must not have a conditional rule, nor - an ALSO rule, nor an INSTEAD rule + an ALSO rule, nor an INSTEAD rule that expands to multiple statements. @@ -133,7 +133,7 @@ COPY { table_name [ ( The path name of the input or output file. An input file name can be an absolute or relative path, but an output file name must be an absolute - path. Windows users might need to use an E'' string and + path. Windows users might need to use an E'' string and double any backslashes used in the path name. @@ -144,7 +144,7 @@ COPY { table_name [ ( A command to execute. In COPY FROM, the input is - read from standard output of the command, and in COPY TO, + read from standard output of the command, and in COPY TO, the output is written to the standard input of the command. @@ -181,9 +181,9 @@ COPY { table_name [ ( Specifies whether the selected option should be turned on or off. - You can write TRUE, ON, or + You can write TRUE, ON, or 1 to enable the option, and FALSE, - OFF, or 0 to disable it. The + OFF, or 0 to disable it. The boolean value can also be omitted, in which case TRUE is assumed. @@ -195,10 +195,10 @@ COPY { table_name [ ( Selects the data format to be read or written: - text, - csv (Comma Separated Values), - or binary. - The default is text. + text, + csv (Comma Separated Values), + or binary. + The default is text. @@ -220,7 +220,7 @@ COPY { table_name [ ( Requests copying the data with rows already frozen, just as they - would be after running the VACUUM FREEZE command. + would be after running the VACUUM FREEZE command. This is intended as a performance option for initial data loading. Rows will be frozen only if the table being loaded has been created or truncated in the current subtransaction, there are no cursors @@ -241,9 +241,9 @@ COPY { table_name [ ( Specifies the character that separates columns within each row (line) of the file. The default is a tab character in text format, - a comma in CSV format. + a comma in CSV format. This must be a single one-byte character. - This option is not allowed when using binary format. + This option is not allowed when using binary format. @@ -254,10 +254,10 @@ COPY { table_name [ ( Specifies the string that represents a null value. The default is \N (backslash-N) in text format, and an unquoted empty - string in CSV format. You might prefer an + string in CSV format. You might prefer an empty string even in text format for cases where you don't want to distinguish nulls from empty strings. - This option is not allowed when using binary format. + This option is not allowed when using binary format. @@ -279,7 +279,7 @@ COPY { table_name [ ( CSV format. + This option is allowed only when using CSV format. @@ -291,7 +291,7 @@ COPY { table_name [ ( CSV format. + This option is allowed only when using CSV format. @@ -301,59 +301,59 @@ COPY { table_name [ ( Specifies the character that should appear before a - data character that matches the QUOTE value. - The default is the same as the QUOTE value (so that + data character that matches the QUOTE value. + The default is the same as the QUOTE value (so that the quoting character is doubled if it appears in the data). This must be a single one-byte character. - This option is allowed only when using CSV format. + This option is allowed only when using CSV format. - FORCE_QUOTE + FORCE_QUOTE Forces quoting to be - used for all non-NULL values in each specified column. - NULL output is never quoted. If * is specified, - non-NULL values will be quoted in all columns. - This option is allowed only in COPY TO, and only when - using CSV format. + used for all non-NULL values in each specified column. + NULL output is never quoted. If * is specified, + non-NULL values will be quoted in all columns. + This option is allowed only in COPY TO, and only when + using CSV format. - FORCE_NOT_NULL + FORCE_NOT_NULL Do not match the specified columns' values against the null string. In the default case where the null string is empty, this means that empty values will be read as zero-length strings rather than nulls, even when they are not quoted. - This option is allowed only in COPY FROM, and only when - using CSV format. + This option is allowed only in COPY FROM, and only when + using CSV format. - FORCE_NULL + FORCE_NULL Match the specified columns' values against the null string, even if it has been quoted, and if a match is found set the value to - NULL. In the default case where the null string is empty, + NULL. In the default case where the null string is empty, this converts a quoted empty string into NULL. - This option is allowed only in COPY FROM, and only when - using CSV format. + This option is allowed only in COPY FROM, and only when + using CSV format. - ENCODING + ENCODING Specifies that the file is encoded in the table_name [ ( Outputs - On successful completion, a COPY command returns a command + On successful completion, a COPY command returns a command tag of the form COPY count @@ -382,10 +382,10 @@ COPY count - psql will print this command tag only if the command - was not COPY ... TO STDOUT, or the - equivalent psql meta-command - \copy ... to stdout. This is to prevent confusing the + psql will print this command tag only if the command + was not COPY ... TO STDOUT, or the + equivalent psql meta-command + \copy ... to stdout. This is to prevent confusing the command tag with the data that was just printed. @@ -402,17 +402,18 @@ COPY count - COPY FROM can be used with plain tables and with views - that have INSTEAD OF INSERT triggers. + COPY FROM can be used with plain, foreign, or + partitioned tables or with views that have + INSTEAD OF INSERT triggers. COPY only deals with the specific table named; it does not copy data to or from child tables. Thus for example - COPY table TO + COPY table TO shows the same data as SELECT * FROM ONLY table. But COPY - (SELECT * FROM table) TO ... + class="parameter">table. But COPY + (SELECT * FROM table) TO ... can be used to dump all of the data in an inheritance hierarchy. @@ -427,7 +428,7 @@ COPY count If row-level security is enabled for the table, the relevant SELECT policies will apply to COPY - table TO statements. + table TO statements. Currently, COPY FROM is not supported for tables with row-level security. Use equivalent INSERT statements instead. @@ -444,14 +445,18 @@ COPY count by the server, not by the client application, must be executable by the PostgreSQL user. COPY naming a file or command is only allowed to - database superusers, since it allows reading or writing any file that the - server has privileges to access. + database superusers or users who are granted one of the default roles + pg_read_server_files, + pg_write_server_files, + or pg_execute_server_program, since it allows reading + or writing any file or running a program that the server has privileges to + access. Do not confuse COPY with the psql instruction - \copy. \copy invokes + \copy. \copy invokes COPY FROM STDIN or COPY TO STDOUT, and then fetches/stores the data in a file accessible to the psql client. Thus, @@ -482,7 +487,7 @@ COPY count For identity columns, the COPY FROM command will always write the column values provided in the input data, like - the INPUT option OVERRIDING SYSTEM + the INSERT option OVERRIDING SYSTEM VALUE. @@ -491,10 +496,10 @@ COPY count DateStyle. To ensure portability to other PostgreSQL installations that might use non-default DateStyle settings, - DateStyle should be set to ISO before - using COPY TO. It is also a good idea to avoid dumping + DateStyle should be set to ISO before + using COPY TO. It is also a good idea to avoid dumping data with IntervalStyle set to - sql_standard, because negative interval values might be + sql_standard, because negative interval values might be misinterpreted by a server that has a different setting for IntervalStyle. @@ -519,7 +524,7 @@ COPY count - FORCE_NULL and FORCE_NOT_NULL can be used + FORCE_NULL and FORCE_NOT_NULL can be used simultaneously on the same column. This results in converting quoted null strings to null values and unquoted null strings to empty strings. @@ -533,7 +538,7 @@ COPY count Text Format - When the text format is used, + When the text format is used, the data read or written is a text file with one line per table row. Columns in a row are separated by the delimiter character. The column values themselves are strings generated by the @@ -548,17 +553,17 @@ COPY count End of data can be represented by a single line containing just - backslash-period (\.). An end-of-data marker is + backslash-period (\.). An end-of-data marker is not necessary when reading from a file, since the end of file serves perfectly well; it is needed only when copying data to or from client applications using pre-3.0 client protocol. - Backslash characters (\) can be used in the + Backslash characters (\) can be used in the COPY data to quote data characters that might otherwise be taken as row or column delimiters. In particular, the - following characters must be preceded by a backslash if + following characters must be preceded by a backslash if they appear as part of a column value: backslash itself, newline, carriage return, and the current delimiter character. @@ -587,37 +592,37 @@ COPY count - \b + \b Backspace (ASCII 8) - \f + \f Form feed (ASCII 12) - \n + \n Newline (ASCII 10) - \r + \r Carriage return (ASCII 13) - \t + \t Tab (ASCII 9) - \v + \v Vertical tab (ASCII 11) - \digits + \digits Backslash followed by one to three octal digits specifies the character with that numeric code - \xdigits - Backslash x followed by one or two hex digits specifies + \xdigits + Backslash x followed by one or two hex digits specifies the character with that numeric code @@ -633,15 +638,15 @@ COPY count Any other backslashed character that is not mentioned in the above table will be taken to represent itself. However, beware of adding backslashes unnecessarily, since that might accidentally produce a string matching the - end-of-data marker (\.) or the null string (\N by + end-of-data marker (\.) or the null string (\N by default). These strings will be recognized before any other backslash processing is done. It is strongly recommended that applications generating COPY data convert - data newlines and carriage returns to the \n and - \r sequences respectively. At present it is + data newlines and carriage returns to the \n and + \r sequences respectively. At present it is possible to represent a data carriage return by a backslash and carriage return, and to represent a data newline by a backslash and newline. However, these representations might not be accepted in future releases. @@ -652,10 +657,10 @@ COPY count COPY TO will terminate each row with a Unix-style - newline (\n). Servers running on Microsoft Windows instead - output carriage return/newline (\r\n), but only for - COPY to a server file; for consistency across platforms, - COPY TO STDOUT always sends \n + newline (\n). Servers running on Microsoft Windows instead + output carriage return/newline (\r\n), but only for + COPY to a server file; for consistency across platforms, + COPY TO STDOUT always sends \n regardless of server platform. COPY FROM can handle lines ending with newlines, carriage returns, or carriage return/newlines. To reduce the risk of @@ -670,62 +675,62 @@ COPY count This format option is used for importing and exporting the Comma - Separated Value (CSV) file format used by many other + Separated Value (CSV) file format used by many other programs, such as spreadsheets. Instead of the escaping rules used by PostgreSQL's standard text format, it produces and recognizes the common CSV escaping mechanism. - The values in each record are separated by the DELIMITER + The values in each record are separated by the DELIMITER character. If the value contains the delimiter character, the - QUOTE character, the NULL string, a carriage + QUOTE character, the NULL string, a carriage return, or line feed character, then the whole value is prefixed and - suffixed by the QUOTE character, and any occurrence - within the value of a QUOTE character or the - ESCAPE character is preceded by the escape character. - You can also use FORCE_QUOTE to force quotes when outputting - non-NULL values in specific columns. + suffixed by the QUOTE character, and any occurrence + within the value of a QUOTE character or the + ESCAPE character is preceded by the escape character. + You can also use FORCE_QUOTE to force quotes when outputting + non-NULL values in specific columns. - The CSV format has no standard way to distinguish a - NULL value from an empty string. - PostgreSQL's COPY handles this by quoting. - A NULL is output as the NULL parameter string - and is not quoted, while a non-NULL value matching the - NULL parameter string is quoted. For example, with the - default settings, a NULL is written as an unquoted empty + The CSV format has no standard way to distinguish a + NULL value from an empty string. + PostgreSQL's COPY handles this by quoting. + A NULL is output as the NULL parameter string + and is not quoted, while a non-NULL value matching the + NULL parameter string is quoted. For example, with the + default settings, a NULL is written as an unquoted empty string, while an empty string data value is written with double quotes - (""). Reading values follows similar rules. You can - use FORCE_NOT_NULL to prevent NULL input + (""). Reading values follows similar rules. You can + use FORCE_NOT_NULL to prevent NULL input comparisons for specific columns. You can also use - FORCE_NULL to convert quoted null string data values to - NULL. + FORCE_NULL to convert quoted null string data values to + NULL. - Because backslash is not a special character in the CSV - format, \., the end-of-data marker, could also appear - as a data value. To avoid any misinterpretation, a \. + Because backslash is not a special character in the CSV + format, \., the end-of-data marker, could also appear + as a data value. To avoid any misinterpretation, a \. data value appearing as a lone entry on a line is automatically quoted on output, and on input, if quoted, is not interpreted as the end-of-data marker. If you are loading a file created by another application that has a single unquoted column and might have a - value of \., you might need to quote that value in the + value of \., you might need to quote that value in the input file. - In CSV format, all characters are significant. A quoted value + In CSV format, all characters are significant. A quoted value surrounded by white space, or any characters other than - DELIMITER, will include those characters. This can cause - errors if you import data from a system that pads CSV + DELIMITER, will include those characters. This can cause + errors if you import data from a system that pads CSV lines with white space out to some fixed width. If such a situation - arises you might need to preprocess the CSV file to remove + arises you might need to preprocess the CSV file to remove the trailing white space, before importing the data into - PostgreSQL. + PostgreSQL. @@ -743,7 +748,7 @@ COPY count Many programs produce strange and occasionally perverse CSV files, so the file format is more a convention than a standard. Thus you might encounter some files that cannot be imported using this - mechanism, and COPY might produce files that other + mechanism, and COPY might produce files that other programs cannot process. @@ -756,17 +761,17 @@ COPY count The binary format option causes all data to be stored/read as binary format rather than as text. It is - somewhat faster than the text and CSV formats, + somewhat faster than the text and CSV formats, but a binary-format file is less portable across machine architectures and PostgreSQL versions. Also, the binary format is very data type specific; for example - it will not work to output binary data from a smallint column - and read it into an integer column, even though that would work + it will not work to output binary data from a smallint column + and read it into an integer column, even though that would work fine in text format. - The binary file format consists + The binary file format consists of a file header, zero or more tuples containing the row data, and a file trailer. Headers and data are in network byte order. @@ -790,7 +795,7 @@ COPY count Signature -11-byte sequence PGCOPY\n\377\r\n\0 — note that the zero byte +11-byte sequence PGCOPY\n\377\r\n\0 — note that the zero byte is a required part of the signature. (The signature is designed to allow easy identification of files that have been munged by a non-8-bit-clean transfer. This signature will be changed by end-of-line-translation @@ -804,7 +809,7 @@ filters, dropped zero bytes, dropped high bits, or parity changes.) 32-bit integer bit mask to denote important aspects of the file format. Bits -are numbered from 0 (LSB) to 31 (MSB). Note that +are numbered from 0 (LSB) to 31 (MSB). Note that this field is stored in network byte order (most significant byte first), as are all the integer fields used in the file format. Bits 16-31 are reserved to denote critical file format issues; a reader @@ -880,7 +885,7 @@ to be specified. To determine the appropriate binary format for the actual tuple data you should consult the PostgreSQL source, in -particular the *send and *recv functions for +particular the *send and *recv functions for each column's data type (typically these functions are found in the src/backend/utils/adt/ directory of the source distribution). @@ -924,7 +929,7 @@ COPY country TO STDOUT (DELIMITER '|'); - To copy data from a file into the country table: + To copy data from a file into the country table: COPY country FROM '/usr1/proj/bray/sql/country_data'; @@ -986,7 +991,7 @@ ZW ZIMBABWE - The following syntax was used before PostgreSQL + The following syntax was used before PostgreSQL version 9.0 and is still supported: @@ -1015,13 +1020,13 @@ COPY { table_name [ ( column_name [, ...] | * } ] ] ] - Note that in this syntax, BINARY and CSV are - treated as independent keywords, not as arguments of a FORMAT + Note that in this syntax, BINARY and CSV are + treated as independent keywords, not as arguments of a FORMAT option. - The following syntax was used before PostgreSQL + The following syntax was used before PostgreSQL version 7.3 and is still supported: diff --git a/doc/src/sgml/ref/create_access_method.sgml b/doc/src/sgml/ref/create_access_method.sgml index 891926dba5..851c5e63be 100644 --- a/doc/src/sgml/ref/create_access_method.sgml +++ b/doc/src/sgml/ref/create_access_method.sgml @@ -73,12 +73,12 @@ CREATE ACCESS METHOD name handler_function is the name (possibly schema-qualified) of a previously registered function that represents the access method. The handler function must be - declared to take a single argument of type internal, + declared to take a single argument of type internal, and its return type depends on the type of access method; for INDEX access methods, it must be index_am_handler. The C-level API that the handler function must implement varies depending on the type of access method. - The index access method API is described in . + The index access method API is described in . @@ -89,8 +89,8 @@ CREATE ACCESS METHOD name Examples - Create an index access method heptree with - handler function heptree_handler: + Create an index access method heptree with + handler function heptree_handler: CREATE ACCESS METHOD heptree TYPE INDEX HANDLER heptree_handler; @@ -101,7 +101,7 @@ CREATE ACCESS METHOD heptree TYPE INDEX HANDLER heptree_handler; CREATE ACCESS METHOD is a - PostgreSQL extension. + PostgreSQL extension. @@ -109,9 +109,9 @@ CREATE ACCESS METHOD heptree TYPE INDEX HANDLER heptree_handler; See Also - - - + + + diff --git a/doc/src/sgml/ref/create_aggregate.sgml b/doc/src/sgml/ref/create_aggregate.sgml index 6a8acfb4f9..b8cd2e7af9 100644 --- a/doc/src/sgml/ref/create_aggregate.sgml +++ b/doc/src/sgml/ref/create_aggregate.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_aggregate.sgml PostgreSQL documentation --> - + CREATE AGGREGATE @@ -22,59 +22,64 @@ PostgreSQL documentation CREATE AGGREGATE name ( [ argmode ] [ argname ] arg_data_type [ , ... ] ) ( - SFUNC = sfunc, - STYPE = state_data_type - [ , SSPACE = state_data_size ] - [ , FINALFUNC = ffunc ] + SFUNC = sfunc, + STYPE = state_data_type + [ , SSPACE = state_data_size ] + [ , FINALFUNC = ffunc ] [ , FINALFUNC_EXTRA ] - [ , COMBINEFUNC = combinefunc ] - [ , SERIALFUNC = serialfunc ] - [ , DESERIALFUNC = deserialfunc ] - [ , INITCOND = initial_condition ] - [ , MSFUNC = msfunc ] - [ , MINVFUNC = minvfunc ] - [ , MSTYPE = mstate_data_type ] - [ , MSSPACE = mstate_data_size ] - [ , MFINALFUNC = mffunc ] + [ , FINALFUNC_MODIFY = { READ_ONLY | SHAREABLE | READ_WRITE } ] + [ , COMBINEFUNC = combinefunc ] + [ , SERIALFUNC = serialfunc ] + [ , DESERIALFUNC = deserialfunc ] + [ , INITCOND = initial_condition ] + [ , MSFUNC = msfunc ] + [ , MINVFUNC = minvfunc ] + [ , MSTYPE = mstate_data_type ] + [ , MSSPACE = mstate_data_size ] + [ , MFINALFUNC = mffunc ] [ , MFINALFUNC_EXTRA ] - [ , MINITCOND = minitial_condition ] - [ , SORTOP = sort_operator ] + [ , MFINALFUNC_MODIFY = { READ_ONLY | SHAREABLE | READ_WRITE } ] + [ , MINITCOND = minitial_condition ] + [ , SORTOP = sort_operator ] [ , PARALLEL = { SAFE | RESTRICTED | UNSAFE } ] ) CREATE AGGREGATE name ( [ [ argmode ] [ argname ] arg_data_type [ , ... ] ] ORDER BY [ argmode ] [ argname ] arg_data_type [ , ... ] ) ( - SFUNC = sfunc, - STYPE = state_data_type - [ , SSPACE = state_data_size ] - [ , FINALFUNC = ffunc ] + SFUNC = sfunc, + STYPE = state_data_type + [ , SSPACE = state_data_size ] + [ , FINALFUNC = ffunc ] [ , FINALFUNC_EXTRA ] - [ , INITCOND = initial_condition ] + [ , FINALFUNC_MODIFY = { READ_ONLY | SHAREABLE | READ_WRITE } ] + [ , INITCOND = initial_condition ] [ , PARALLEL = { SAFE | RESTRICTED | UNSAFE } ] [ , HYPOTHETICAL ] ) or the old syntax -CREATE AGGREGATE name ( - BASETYPE = base_type, - SFUNC = sfunc, - STYPE = state_data_type - [ , SSPACE = state_data_size ] - [ , FINALFUNC = ffunc ] +CREATE AGGREGATE name ( + BASETYPE = base_type, + SFUNC = sfunc, + STYPE = state_data_type + [ , SSPACE = state_data_size ] + [ , FINALFUNC = ffunc ] [ , FINALFUNC_EXTRA ] - [ , COMBINEFUNC = combinefunc ] - [ , SERIALFUNC = serialfunc ] - [ , DESERIALFUNC = deserialfunc ] - [ , INITCOND = initial_condition ] - [ , MSFUNC = msfunc ] - [ , MINVFUNC = minvfunc ] - [ , MSTYPE = mstate_data_type ] - [ , MSSPACE = mstate_data_size ] - [ , MFINALFUNC = mffunc ] + [ , FINALFUNC_MODIFY = { READ_ONLY | SHAREABLE | READ_WRITE } ] + [ , COMBINEFUNC = combinefunc ] + [ , SERIALFUNC = serialfunc ] + [ , DESERIALFUNC = deserialfunc ] + [ , INITCOND = initial_condition ] + [ , MSFUNC = msfunc ] + [ , MINVFUNC = minvfunc ] + [ , MSTYPE = mstate_data_type ] + [ , MSSPACE = mstate_data_size ] + [ , MFINALFUNC = mffunc ] [ , MFINALFUNC_EXTRA ] - [ , MINITCOND = minitial_condition ] - [ , SORTOP = sort_operator ] + [ , MFINALFUNC_MODIFY = { READ_ONLY | SHAREABLE | READ_WRITE } ] + [ , MINITCOND = minitial_condition ] + [ , SORTOP = sort_operator ] ) @@ -86,14 +91,14 @@ CREATE AGGREGATE name ( CREATE AGGREGATE defines a new aggregate function. Some basic and commonly-used aggregate functions are included with the distribution; they are documented in . If one defines new types or needs + linkend="functions-aggregate"/>. If one defines new types or needs an aggregate function not already provided, then CREATE AGGREGATE can be used to provide the desired features. If a schema name is given (for example, CREATE AGGREGATE - myschema.myagg ...) then the aggregate function is created in the + myschema.myagg ...) then the aggregate function is created in the specified schema. Otherwise it is created in the current schema. @@ -105,26 +110,26 @@ CREATE AGGREGATE name ( the name and input data type(s) of every ordinary function in the same schema. This behavior is identical to overloading of ordinary function names - (see ). + (see ). A simple aggregate function is made from one or two ordinary functions: a state transition function - sfunc, + sfunc, and an optional final calculation function - ffunc. + ffunc. These are used as follows: -sfunc( internal-state, next-data-values ) ---> next-internal-state -ffunc( internal-state ) ---> aggregate-value +sfunc( internal-state, next-data-values ) ---> next-internal-state +ffunc( internal-state ) ---> aggregate-value PostgreSQL creates a temporary variable - of data type stype + of data type stype to hold the current internal state of the aggregate. At each input row, the aggregate argument value(s) are calculated and the state transition function is invoked with the current state value @@ -155,9 +160,9 @@ CREATE AGGREGATE name ( all-nonnull input values. This is handy for implementing aggregates like max. Note that this behavior is only available when - state_data_type + state_data_type is the same as the first - arg_data_type. + arg_data_type. When these types are different, you must supply a nonnull initial condition or use a nonstrict transition function. @@ -186,56 +191,57 @@ CREATE AGGREGATE name ( is polymorphic and the state value's data type would be inadequate to pin down the result type. These extra parameters are always passed as NULL (and so the final function must not be strict when - the FINALFUNC_EXTRA option is used), but nonetheless they + the FINALFUNC_EXTRA option is used), but nonetheless they are valid parameters. The final function could for example make use - of get_fn_expr_argtype to identify the actual argument type + of get_fn_expr_argtype to identify the actual argument type in the current call. - An aggregate can optionally support moving-aggregate mode, - as described in . This requires - specifying the MSFUNC, MINVFUNC, - and MSTYPE parameters, and optionally - the MSPACE, MFINALFUNC, MFINALFUNC_EXTRA, - and MINITCOND parameters. Except for MINVFUNC, + An aggregate can optionally support moving-aggregate mode, + as described in . This requires + specifying the MSFUNC, MINVFUNC, + and MSTYPE parameters, and optionally + the MSPACE, MFINALFUNC, + MFINALFUNC_EXTRA, MFINALFUNC_MODIFY, + and MINITCOND parameters. Except for MINVFUNC, these parameters work like the corresponding simple-aggregate parameters - without M; they define a separate implementation of the + without M; they define a separate implementation of the aggregate that includes an inverse transition function. The syntax with ORDER BY in the parameter list creates a special type of aggregate called an ordered-set - aggregate; or if HYPOTHETICAL is specified, then + aggregate; or if HYPOTHETICAL is specified, then a hypothetical-set aggregate is created. These aggregates operate over groups of sorted values in order-dependent ways, so that specification of an input sort order is an essential part of a - call. Also, they can have direct arguments, which are + call. Also, they can have direct arguments, which are arguments that are evaluated only once per aggregation rather than once per input row. Hypothetical-set aggregates are a subclass of ordered-set aggregates in which some of the direct arguments are required to match, in number and data types, the aggregated argument columns. This allows the values of those direct arguments to be added to the collection of - aggregate-input rows as an additional hypothetical row. + aggregate-input rows as an additional hypothetical row. - An aggregate can optionally support partial aggregation, - as described in . - This requires specifying the COMBINEFUNC parameter. - If the state_data_type - is internal, it's usually also appropriate to provide the - SERIALFUNC and DESERIALFUNC parameters so that + An aggregate can optionally support partial aggregation, + as described in . + This requires specifying the COMBINEFUNC parameter. + If the state_data_type + is internal, it's usually also appropriate to provide the + SERIALFUNC and DESERIALFUNC parameters so that parallel aggregation is possible. Note that the aggregate must also be - marked PARALLEL SAFE to enable parallel aggregation. + marked PARALLEL SAFE to enable parallel aggregation. - Aggregates that behave like MIN or MAX can + Aggregates that behave like MIN or MAX can sometimes be optimized by looking into an index instead of scanning every input row. If this aggregate can be so optimized, indicate it by - specifying a sort operator. The basic requirement is that + specifying a sort operator. The basic requirement is that the aggregate must yield the first element in the sort ordering induced by the operator; in other words: @@ -247,9 +253,9 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; Further assumptions are that the aggregate ignores null inputs, and that it delivers a null result if and only if there were no non-null inputs. - Ordinarily, a data type's < operator is the proper sort - operator for MIN, and > is the proper sort - operator for MAX. Note that the optimization will never + Ordinarily, a data type's < operator is the proper sort + operator for MIN, and > is the proper sort + operator for MAX. Note that the optimization will never actually take effect unless the specified operator is the less than or greater than strategy member of a B-tree index operator class. @@ -268,7 +274,7 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; - name + name The name (optionally schema-qualified) of the aggregate function @@ -282,10 +288,10 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; - The mode of an argument: IN or VARIADIC. - (Aggregate functions do not support OUT arguments.) - If omitted, the default is IN. Only the last argument - can be marked VARIADIC. + The mode of an argument: IN or VARIADIC. + (Aggregate functions do not support OUT arguments.) + If omitted, the default is IN. Only the last argument + can be marked VARIADIC. @@ -302,11 +308,11 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; - arg_data_type + arg_data_type An input data type on which this aggregate function operates. - To create a zero-argument aggregate function, write * + To create a zero-argument aggregate function, write * in place of the list of argument specifications. (An example of such an aggregate is count(*).) @@ -314,33 +320,33 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; - base_type + base_type - In the old syntax for CREATE AGGREGATE, the input data type - is specified by a basetype parameter rather than being + In the old syntax for CREATE AGGREGATE, the input data type + is specified by a basetype parameter rather than being written next to the aggregate name. Note that this syntax allows only one input parameter. To define a zero-argument aggregate function - with this syntax, specify the basetype as - "ANY" (not *). + with this syntax, specify the basetype as + "ANY" (not *). Ordered-set aggregates cannot be defined with the old syntax. - sfunc + sfunc The name of the state transition function to be called for each - input row. For a normal N-argument - aggregate function, the sfunc - must take N+1 arguments, + input row. For a normal N-argument + aggregate function, the sfunc + must take N+1 arguments, the first being of type state_data_type and the rest + class="parameter">state_data_type and the rest matching the declared input data type(s) of the aggregate. The function must return a value of type state_data_type. This function + class="parameter">state_data_type. This function takes the current state value and the current input data value(s), and returns the next state value. @@ -355,7 +361,7 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; - state_data_type + state_data_type The data type for the aggregate's state value. @@ -364,35 +370,35 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; - state_data_size + state_data_size The approximate average size (in bytes) of the aggregate's state value. If this parameter is omitted or is zero, a default estimate is used - based on the state_data_type. + based on the state_data_type. The planner uses this value to estimate the memory required for a grouped aggregate query. The planner will consider using hash aggregation for such a query only if the hash table is estimated to fit - in ; therefore, large values of this + in ; therefore, large values of this parameter discourage use of hash aggregation. - ffunc + ffunc The name of the final function called to compute the aggregate's result after all input rows have been traversed. For a normal aggregate, this function must take a single argument of type state_data_type. The return + class="parameter">state_data_type. The return data type of the aggregate is defined as the return type of this - function. If ffunc + function. If ffunc is not specified, then the ending state value is used as the aggregate's result, and the return type is state_data_type. + class="parameter">state_data_type. @@ -402,7 +408,7 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; - If FINALFUNC_EXTRA is specified, then in addition to the + If FINALFUNC_EXTRA is specified, then in addition to the final state value and any direct arguments, the final function receives extra NULL values corresponding to the aggregate's regular (aggregated) arguments. This is mainly useful to allow correct @@ -413,30 +419,45 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; - combinefunc + FINALFUNC_MODIFY = { READ_ONLY | SHAREABLE | READ_WRITE } - The combinefunc function + This option specifies whether the final function is a pure function + that does not modify its arguments. READ_ONLY indicates + it does not; the other two values indicate that it may change the + transition state value. See below for more detail. The + default is READ_ONLY, except for ordered-set aggregates, + for which the default is READ_WRITE. + + + + + + combinefunc + + + The combinefunc function may optionally be specified to allow the aggregate function to support partial aggregation. If provided, - the combinefunc must - combine two state_data_type + the combinefunc must + combine two state_data_type values, each containing the result of aggregation over some subset of the input values, to produce a - new state_data_type that + new state_data_type that represents the result of aggregating over both sets of inputs. This function can be thought of as - an sfunc, where instead of + an sfunc, where instead of acting upon an individual input row and adding it to the running aggregate state, it adds another aggregate state to the running state. - The combinefunc must be + The combinefunc must be declared as taking two arguments of - the state_data_type and + the state_data_type and returning a value of - the state_data_type. + the state_data_type. Optionally this function may be strict. In this case the function will not be called when either of the input states are null; the other state will be taken as the correct result. @@ -444,11 +465,11 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; For aggregate functions - whose state_data_type + whose state_data_type is internal, - the combinefunc must not + the combinefunc must not be strict. In this case - the combinefunc must + the combinefunc must ensure that null states are handled correctly and that the state being returned is properly stored in the aggregate memory context. @@ -456,68 +477,68 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; - serialfunc + serialfunc An aggregate function - whose state_data_type - is internal can participate in parallel aggregation only if it - has a serialfunc function, - which must serialize the aggregate state into a bytea value for + whose state_data_type + is internal can participate in parallel aggregation only if it + has a serialfunc function, + which must serialize the aggregate state into a bytea value for transmission to another process. This function must take a single - argument of type internal and return type bytea. A - corresponding deserialfunc + argument of type internal and return type bytea. A + corresponding deserialfunc is also required. - deserialfunc + deserialfunc Deserialize a previously serialized aggregate state back into - state_data_type. This - function must take two arguments of types bytea - and internal, and produce a result of type internal. - (Note: the second, internal argument is unused, but is required + state_data_type. This + function must take two arguments of types bytea + and internal, and produce a result of type internal. + (Note: the second, internal argument is unused, but is required for type safety reasons.) - initial_condition + initial_condition The initial setting for the state value. This must be a string constant in the form accepted for the data type state_data_type. If not + class="parameter">state_data_type. If not specified, the state value starts out null. - msfunc + msfunc The name of the forward state transition function to be called for each input row in moving-aggregate mode. This is exactly like the regular transition function, except that its first argument and result are of - type mstate_data_type, which might be different - from state_data_type. + type mstate_data_type, which might be different + from state_data_type. - minvfunc + minvfunc The name of the inverse state transition function to be used in moving-aggregate mode. This function has the same argument and - result types as msfunc, but it is used to remove + result types as msfunc, but it is used to remove a value from the current aggregate state, rather than add a value to it. The inverse transition function must have the same strictness attribute as the forward state transition function. @@ -526,7 +547,7 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; - mstate_data_type + mstate_data_type The data type for the aggregate's state value, when using @@ -536,49 +557,59 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; - mstate_data_size + mstate_data_size The approximate average size (in bytes) of the aggregate's state value, when using moving-aggregate mode. This works the same as - state_data_size. + state_data_size. - mffunc + mffunc The name of the final function called to compute the aggregate's result after all input rows have been traversed, when using - moving-aggregate mode. This works the same as ffunc, + moving-aggregate mode. This works the same as ffunc, except that its first argument's type - is mstate_data_type and extra dummy arguments are - specified by writing MFINALFUNC_EXTRA. - The aggregate result type determined by mffunc - or mstate_data_type must match that determined by the + is mstate_data_type and extra dummy arguments are + specified by writing MFINALFUNC_EXTRA. + The aggregate result type determined by mffunc + or mstate_data_type must match that determined by the aggregate's regular implementation. - minitial_condition + MFINALFUNC_MODIFY = { READ_ONLY | SHAREABLE | READ_WRITE } + + + This option is like FINALFUNC_MODIFY, but it describes + the behavior of the moving-aggregate final function. + + + + + + minitial_condition The initial setting for the state value, when using moving-aggregate - mode. This works the same as initial_condition. + mode. This works the same as initial_condition. - sort_operator + sort_operator - The associated sort operator for a MIN- or - MAX-like aggregate. + The associated sort operator for a MIN- or + MAX-like aggregate. This is just an operator name (possibly schema-qualified). The operator is assumed to have the same input data types as the aggregate (which must be a single-argument normal aggregate). @@ -587,14 +618,14 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; - PARALLEL + PARALLEL = { SAFE | RESTRICTED | UNSAFE } - The meanings of PARALLEL SAFE, PARALLEL - RESTRICTED, and PARALLEL UNSAFE are the same as - for . An aggregate will not be + The meanings of PARALLEL SAFE, PARALLEL + RESTRICTED, and PARALLEL UNSAFE are the same as + in . An aggregate will not be considered for parallelization if it is marked PARALLEL - UNSAFE (which is the default!) or PARALLEL RESTRICTED. + UNSAFE (which is the default!) or PARALLEL RESTRICTED. Note that the parallel-safety markings of the aggregate's support functions are not consulted by the planner, only the marking of the aggregate itself. @@ -609,7 +640,7 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; For ordered-set aggregates only, this flag specifies that the aggregate arguments are to be processed according to the requirements for hypothetical-set aggregates: that is, the last few direct arguments must - match the data types of the aggregated (WITHIN GROUP) + match the data types of the aggregated (WITHIN GROUP) arguments. The HYPOTHETICAL flag has no effect on run-time behavior, only on parse-time resolution of the data types and collations of the aggregate's arguments. @@ -624,21 +655,50 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; - - Notes + + Notes In parameters that specify support function names, you can write - a schema name if needed, for example SFUNC = public.sum. + a schema name if needed, for example SFUNC = public.sum. Do not write argument types there, however — the argument types of the support functions are determined from other parameters. + + Ordinarily, PostgreSQL functions are expected to be true functions that + do not modify their input values. However, an aggregate transition + function, when used in the context of an aggregate, + is allowed to cheat and modify its transition-state argument in place. + This can provide substantial performance benefits compared to making + a fresh copy of the transition state each time. + + + + Likewise, while an aggregate final function is normally expected not to + modify its input values, sometimes it is impractical to avoid modifying + the transition-state argument. Such behavior must be declared using + the FINALFUNC_MODIFY parameter. + The READ_WRITE + value indicates that the final function modifies the transition state in + unspecified ways. This value prevents use of the aggregate as a window + function, and it also prevents merging of transition states for aggregate + calls that share the same input values and transition functions. + The SHAREABLE value indicates that the transition function + cannot be applied after the final function, but multiple final-function + calls can be performed on the ending transition state value. This value + prevents use of the aggregate as a window function, but it allows merging + of transition states. (That is, the optimization of interest here is not + applying the same final function repeatedly, but applying different final + functions to the same ending transition state value. This is allowed as + long as none of the final functions are marked READ_WRITE.) + + If an aggregate supports moving-aggregate mode, it will improve calculation efficiency when the aggregate is used as a window function for a window with moving frame start (that is, a frame start mode other - than UNBOUNDED PRECEDING). Conceptually, the forward + than UNBOUNDED PRECEDING). Conceptually, the forward transition function adds input values to the aggregate's state when they enter the window frame from the bottom, and the inverse transition function removes them again when they leave the frame at the top. So, @@ -671,27 +731,28 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; Note that whether or not the aggregate supports moving-aggregate mode, PostgreSQL can handle a moving frame end without recalculation; this is done by continuing to add new values - to the aggregate's state. It is assumed that the final function does + to the aggregate's state. This is why use of an aggregate as a window + function requires that the final function be read-only: it must not damage the aggregate's state value, so that the aggregation can be continued even after an aggregate result value has been obtained for one set of frame boundaries. - The syntax for ordered-set aggregates allows VARIADIC + The syntax for ordered-set aggregates allows VARIADIC to be specified for both the last direct parameter and the last - aggregated (WITHIN GROUP) parameter. However, the - current implementation restricts use of VARIADIC + aggregated (WITHIN GROUP) parameter. However, the + current implementation restricts use of VARIADIC in two ways. First, ordered-set aggregates can only use - VARIADIC "any", not other variadic array types. - Second, if the last direct parameter is VARIADIC "any", + VARIADIC "any", not other variadic array types. + Second, if the last direct parameter is VARIADIC "any", then there can be only one aggregated parameter and it must also - be VARIADIC "any". (In the representation used in the + be VARIADIC "any". (In the representation used in the system catalogs, these two parameters are merged into a single - VARIADIC "any" item, since pg_proc cannot - represent functions with more than one VARIADIC parameter.) + VARIADIC "any" item, since pg_proc cannot + represent functions with more than one VARIADIC parameter.) If the aggregate is a hypothetical-set aggregate, the direct arguments - that match the VARIADIC "any" parameter are the hypothetical + that match the VARIADIC "any" parameter are the hypothetical ones; any preceding parameters represent additional direct arguments that are not constrained to match the aggregated arguments. @@ -704,7 +765,7 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; Partial (including parallel) aggregation is currently not supported for ordered-set aggregates. Also, it will never be used for aggregate calls - that include DISTINCT or ORDER BY clauses, since + that include DISTINCT or ORDER BY clauses, since those semantics cannot be supported during partial aggregation. @@ -713,7 +774,7 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; Examples - See . + See . @@ -731,8 +792,8 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; See Also - - + + diff --git a/doc/src/sgml/ref/create_cast.sgml b/doc/src/sgml/ref/create_cast.sgml index a7d13edc22..84317047c2 100644 --- a/doc/src/sgml/ref/create_cast.sgml +++ b/doc/src/sgml/ref/create_cast.sgml @@ -1,6 +1,6 @@ - + CREATE CAST @@ -44,7 +44,7 @@ SELECT CAST(42 AS float8); converts the integer constant 42 to type float8 by invoking a previously specified function, in this case - float8(int4). (If no suitable cast has been defined, the + float8(int4). (If no suitable cast has been defined, the conversion fails.) @@ -64,7 +64,7 @@ SELECT CAST(42 AS float8); - You can define a cast as an I/O conversion cast by using + You can define a cast as an I/O conversion cast by using the WITH INOUT syntax. An I/O conversion cast is performed by invoking the output function of the source data type, and passing the resulting string to the input function of the target data type. @@ -75,14 +75,14 @@ SELECT CAST(42 AS float8); By default, a cast can be invoked only by an explicit cast request, - that is an explicit CAST(x AS - typename) or - x::typename + that is an explicit CAST(x AS + typename) or + x::typename construct. - If the cast is marked AS ASSIGNMENT then it can be invoked + If the cast is marked AS ASSIGNMENT then it can be invoked implicitly when assigning a value to a column of the target data type. For example, supposing that foo.f1 is a column of type text, then: @@ -90,13 +90,13 @@ SELECT CAST(42 AS float8); INSERT INTO foo (f1) VALUES (42); will be allowed if the cast from type integer to type - text is marked AS ASSIGNMENT, otherwise not. + text is marked AS ASSIGNMENT, otherwise not. (We generally use the term assignment cast to describe this kind of cast.) - If the cast is marked AS IMPLICIT then it can be invoked + If the cast is marked AS IMPLICIT then it can be invoked implicitly in any context, whether assignment or internally in an expression. (We generally use the term implicit cast to describe this kind of cast.) @@ -104,12 +104,12 @@ INSERT INTO foo (f1) VALUES (42); SELECT 2 + 4.0; - The parser initially marks the constants as being of type integer - and numeric respectively. There is no integer - + numeric operator in the system catalogs, - but there is a numeric + numeric operator. - The query will therefore succeed if a cast from integer to - numeric is available and is marked AS IMPLICIT — + The parser initially marks the constants as being of type integer + and numeric respectively. There is no integer + + numeric operator in the system catalogs, + but there is a numeric + numeric operator. + The query will therefore succeed if a cast from integer to + numeric is available and is marked AS IMPLICIT — which in fact it is. The parser will apply the implicit cast and resolve the query as if it had been written @@ -118,17 +118,17 @@ SELECT CAST ( 2 AS numeric ) + 4.0; - Now, the catalogs also provide a cast from numeric to - integer. If that cast were marked AS IMPLICIT — + Now, the catalogs also provide a cast from numeric to + integer. If that cast were marked AS IMPLICIT — which it is not — then the parser would be faced with choosing between the above interpretation and the alternative of casting the - numeric constant to integer and applying the - integer + integer operator. Lacking any + numeric constant to integer and applying the + integer + integer operator. Lacking any knowledge of which choice to prefer, it would give up and declare the query ambiguous. The fact that only one of the two casts is implicit is the way in which we teach the parser to prefer resolution - of a mixed numeric-and-integer expression as - numeric; there is no built-in knowledge about that. + of a mixed numeric-and-integer expression as + numeric; there is no built-in knowledge about that. @@ -142,8 +142,8 @@ SELECT CAST ( 2 AS numeric ) + 4.0; general type category. For example, the cast from int2 to int4 can reasonably be implicit, but the cast from float8 to int4 should probably be - assignment-only. Cross-type-category casts, such as text - to int4, are best made explicit-only. + assignment-only. Cross-type-category casts, such as text + to int4, are best made explicit-only. @@ -151,9 +151,9 @@ SELECT CAST ( 2 AS numeric ) + 4.0; Sometimes it is necessary for usability or standards-compliance reasons to provide multiple implicit casts among a set of types, resulting in ambiguity that cannot be avoided as above. The parser has a fallback - heuristic based on type categories and preferred - types that can help to provide desired behavior in such cases. See - for + heuristic based on type categories and preferred + types that can help to provide desired behavior in such cases. See + for more information. @@ -255,11 +255,11 @@ SELECT CAST ( 2 AS numeric ) + 4.0; Cast implementation functions can have one to three arguments. The first argument type must be identical to or binary-coercible from the cast's source type. The second argument, - if present, must be type integer; it receives the type - modifier associated with the destination type, or -1 + if present, must be type integer; it receives the type + modifier associated with the destination type, or -1 if there is none. The third argument, - if present, must be type boolean; it receives true - if the cast is an explicit cast, false otherwise. + if present, must be type boolean; it receives true + if the cast is an explicit cast, false otherwise. (Bizarrely, the SQL standard demands different behaviors for explicit and implicit casts in some cases. This argument is supplied for functions that must implement such casts. It is not recommended that you design @@ -301,7 +301,7 @@ SELECT CAST ( 2 AS numeric ) + 4.0; Notes - Use to remove user-defined casts. + Use to remove user-defined casts. @@ -316,9 +316,9 @@ SELECT CAST ( 2 AS numeric ) + 4.0; It is normally not necessary to create casts between user-defined types - and the standard string types (text, varchar, and - char(n), as well as user-defined types that - are defined to be in the string category). PostgreSQL + and the standard string types (text, varchar, and + char(n), as well as user-defined types that + are defined to be in the string category). PostgreSQL provides automatic I/O conversion casts for that. The automatic casts to string types are treated as assignment casts, while the automatic casts from string types are @@ -338,11 +338,11 @@ SELECT CAST ( 2 AS numeric ) + 4.0; convention of naming cast implementation functions after the target data type. Many users are used to being able to cast data types using a function-style notation, that is - typename(x). This notation is in fact + typename(x). This notation is in fact nothing more nor less than a call of the cast implementation function; it is not specially treated as a cast. If your conversion functions are not named to support this convention then you will have surprised users. - Since PostgreSQL allows overloading of the same function + Since PostgreSQL allows overloading of the same function name with different argument types, there is no difficulty in having multiple conversion functions from different types that all use the target type's name. @@ -353,14 +353,14 @@ SELECT CAST ( 2 AS numeric ) + 4.0; Actually the preceding paragraph is an oversimplification: there are two cases in which a function-call construct will be treated as a cast request without having matched it to an actual function. - If a function call name(x) does not - exactly match any existing function, but name is the name - of a data type and pg_cast provides a binary-coercible cast - to this type from the type of x, then the call will be + If a function call name(x) does not + exactly match any existing function, but name is the name + of a data type and pg_cast provides a binary-coercible cast + to this type from the type of x, then the call will be construed as a binary-coercible cast. This exception is made so that binary-coercible casts can be invoked using functional syntax, even though they lack any function. Likewise, if there is no - pg_cast entry but the cast would be to or from a string + pg_cast entry but the cast would be to or from a string type, the call will be construed as an I/O conversion cast. This exception allows I/O conversion casts to be invoked using functional syntax. @@ -372,7 +372,7 @@ SELECT CAST ( 2 AS numeric ) + 4.0; There is also an exception to the exception: I/O conversion casts from composite types to string types cannot be invoked using functional syntax, but must be written in explicit cast syntax (either - CAST or :: notation). This exception was added + CAST or :: notation). This exception was added because after the introduction of automatically-provided I/O conversion casts, it was found too easy to accidentally invoke such a cast when a function or column reference was intended. @@ -402,7 +402,7 @@ CREATE CAST (bigint AS int4) WITH FUNCTION int4(bigint) AS ASSIGNMENT; SQL standard, except that SQL does not make provisions for binary-coercible types or extra arguments to implementation functions. - AS IMPLICIT is a PostgreSQL + AS IMPLICIT is a PostgreSQL extension, too. @@ -412,9 +412,9 @@ CREATE CAST (bigint AS int4) WITH FUNCTION int4(bigint) AS ASSIGNMENT; See Also - , - , - + , + , + diff --git a/doc/src/sgml/ref/create_collation.sgml b/doc/src/sgml/ref/create_collation.sgml index 2d3e050545..038797fce1 100644 --- a/doc/src/sgml/ref/create_collation.sgml +++ b/doc/src/sgml/ref/create_collation.sgml @@ -1,6 +1,6 @@ - + CREATE COLLATION @@ -93,10 +93,7 @@ CREATE COLLATION [ IF NOT EXISTS ] name FROM Use the specified operating system locale for - the LC_COLLATE locale category. The locale - must be applicable to the current database encoding. - (See for the precise - rules.) + the LC_COLLATE locale category. @@ -107,10 +104,7 @@ CREATE COLLATION [ IF NOT EXISTS ] name FROM Use the specified operating system locale for - the LC_CTYPE locale category. The locale - must be applicable to the current database encoding. - (See for the precise - rules.) + the LC_CTYPE locale category. @@ -122,7 +116,7 @@ CREATE COLLATION [ IF NOT EXISTS ] name FROM Specifies the provider to use for locale services associated with this collation. Possible values - are: icu,ICU + are: icu,ICU libc. libc is the default. The available choices depend on the operating system and build options. @@ -144,7 +138,7 @@ CREATE COLLATION [ IF NOT EXISTS ] name FROM - See also for how to handle + See also for how to handle collation version mismatches. @@ -168,13 +162,25 @@ CREATE COLLATION [ IF NOT EXISTS ] name FROM Notes + + CREATE COLLATION takes a SHARE ROW + EXCLUSIVE lock, which is self-conflicting, on the + pg_collation system catalog, so only one + CREATE COLLATION command can run at a time. + + Use DROP COLLATION to remove user-defined collations. - See for more information about collation - support in PostgreSQL. + See for more information on how to create collations. + + + + When using the libc collation provider, the locale must + be applicable to the current database encoding. + See for the precise rules. @@ -186,7 +192,14 @@ CREATE COLLATION [ IF NOT EXISTS ] name FROM fr_FR.utf8 (assuming the current database encoding is UTF8): -CREATE COLLATION french (LOCALE = 'fr_FR.utf8'); +CREATE COLLATION french (locale = 'fr_FR.utf8'); + + + + + To create a collation using the ICU provider using German phone book sort order: + +CREATE COLLATION german_phonebook (provider = icu, locale = 'de-u-co-phonebk'); @@ -217,8 +230,8 @@ CREATE COLLATION german FROM "de_DE"; See Also - - + + diff --git a/doc/src/sgml/ref/create_conversion.sgml b/doc/src/sgml/ref/create_conversion.sgml index d2e2c010ef..4ddbcfacef 100644 --- a/doc/src/sgml/ref/create_conversion.sgml +++ b/doc/src/sgml/ref/create_conversion.sgml @@ -1,6 +1,6 @@ - + CREATE CONVERSION @@ -29,7 +29,7 @@ CREATE [ DEFAULT ] CONVERSION name CREATE CONVERSION defines a new conversion between character set encodings. Also, conversions that - are marked DEFAULT can be used for automatic encoding + are marked DEFAULT can be used for automatic encoding conversion between client and server. For this purpose, two conversions, from encoding A to B and from encoding B to A, must be defined. @@ -51,7 +51,7 @@ CREATE [ DEFAULT ] CONVERSION name - The DEFAULT clause indicates that this conversion + The DEFAULT clause indicates that this conversion is the default for this particular source to destination encoding. There should be only one default encoding in a schema for the encoding pair. @@ -137,7 +137,7 @@ conv_proc( To create a conversion from encoding UTF8 to - LATIN1 using myfunc: + LATIN1 using myfunc: CREATE CONVERSION myconv FOR 'UTF8' TO 'LATIN1' FROM myfunc; @@ -161,9 +161,9 @@ CREATE CONVERSION myconv FOR 'UTF8' TO 'LATIN1' FROM myfunc; See Also - - - + + + diff --git a/doc/src/sgml/ref/create_database.sgml b/doc/src/sgml/ref/create_database.sgml index 48386a29f9..b2c9e241c2 100644 --- a/doc/src/sgml/ref/create_database.sgml +++ b/doc/src/sgml/ref/create_database.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_database.sgml PostgreSQL documentation --> - + CREATE DATABASE @@ -21,7 +21,7 @@ PostgreSQL documentation -CREATE DATABASE name +CREATE DATABASE name [ [ WITH ] [ OWNER [=] user_name ] [ TEMPLATE [=] template ] [ ENCODING [=] encoding ] @@ -44,21 +44,21 @@ CREATE DATABASE name To create a database, you must be a superuser or have the special - CREATEDB privilege. - See . + CREATEDB privilege. + See . By default, the new database will be created by cloning the standard - system database template1. A different template can be + system database template1. A different template can be specified by writing TEMPLATE name. In particular, - by writing TEMPLATE template0, you can create a virgin + by writing TEMPLATE template0, you can create a virgin database containing only the standard objects predefined by your version of PostgreSQL. This is useful if you wish to avoid copying any installation-local objects that might have been added to - template1. + template1. @@ -106,7 +106,7 @@ CREATE DATABASE name to use the default encoding (namely, the encoding of the template database). The character sets supported by the PostgreSQL server are described in - . See below for + . See below for additional restrictions. @@ -115,7 +115,7 @@ CREATE DATABASE name lc_collate - Collation order (LC_COLLATE) to use in the new database. + Collation order (LC_COLLATE) to use in the new database. This affects the sort order applied to strings, e.g. in queries with ORDER BY, as well as the order used in indexes on text columns. The default is to use the collation order of the template database. @@ -127,7 +127,7 @@ CREATE DATABASE name lc_ctype - Character classification (LC_CTYPE) to use in the new + Character classification (LC_CTYPE) to use in the new database. This affects the categorization of characters, e.g. lower, upper and digit. The default is to use the character classification of the template database. See below for additional restrictions. @@ -143,7 +143,7 @@ CREATE DATABASE name template database's tablespace. This tablespace will be the default tablespace used for objects created in this database. See - + for more information. @@ -155,7 +155,7 @@ CREATE DATABASE name If false then no one can connect to this database. The default is true, allowing connections (except as restricted by other mechanisms, - such as GRANT/REVOKE CONNECT). + such as GRANT/REVOKE CONNECT). @@ -192,52 +192,52 @@ CREATE DATABASE name Notes - CREATE DATABASE cannot be executed inside a transaction + CREATE DATABASE cannot be executed inside a transaction block. - Errors along the line of could not initialize database directory + Errors along the line of could not initialize database directory are most likely related to insufficient permissions on the data directory, a full disk, or other file system problems. - Use to remove a database. + Use to remove a database. - The program is a + The program is a wrapper program around this command, provided for convenience. Database-level configuration parameters (set via ) are not copied from the template + linkend="sql-alterdatabase"/>) are not copied from the template database. - Although it is possible to copy a database other than template1 + Although it is possible to copy a database other than template1 by specifying its name as the template, this is not (yet) intended as a general-purpose COPY DATABASE facility. The principal limitation is that no other sessions can be connected to the template database while it is being copied. CREATE - DATABASE will fail if any other connection exists when it starts; + DATABASE will fail if any other connection exists when it starts; otherwise, new connections to the template database are locked out - until CREATE DATABASE completes. - See for more information. + until CREATE DATABASE completes. + See for more information. The character set encoding specified for the new database must be - compatible with the chosen locale settings (LC_COLLATE and - LC_CTYPE). If the locale is C (or equivalently - POSIX), then all encodings are allowed, but for other + compatible with the chosen locale settings (LC_COLLATE and + LC_CTYPE). If the locale is C (or equivalently + POSIX), then all encodings are allowed, but for other locale settings there is only one encoding that will work properly. (On Windows, however, UTF-8 encoding can be used with any locale.) - CREATE DATABASE will allow superusers to specify - SQL_ASCII encoding regardless of the locale settings, + CREATE DATABASE will allow superusers to specify + SQL_ASCII encoding regardless of the locale settings, but this choice is deprecated and may result in misbehavior of character-string functions if data that is not encoding-compatible with the locale is stored in the database. @@ -245,19 +245,19 @@ CREATE DATABASE name The encoding and locale settings must match those of the template database, - except when template0 is used as template. This is because + except when template0 is used as template. This is because other databases might contain data that does not match the specified encoding, or might contain indexes whose sort ordering is affected by - LC_COLLATE and LC_CTYPE. Copying such data would + LC_COLLATE and LC_CTYPE. Copying such data would result in a database that is corrupt according to the new settings. template0, however, is known to not contain any data or indexes that would be affected. - The CONNECTION LIMIT option is only enforced approximately; + The CONNECTION LIMIT option is only enforced approximately; if two new sessions start at about the same time when just one - connection slot remains for the database, it is possible that + connection slot remains for the database, it is possible that both will fail. Also, the limit is not enforced against superusers or background worker processes. @@ -275,8 +275,8 @@ CREATE DATABASE lusiadas; - To create a database sales owned by user salesapp - with a default tablespace of salesspace: + To create a database sales owned by user salesapp + with a default tablespace of salesspace: CREATE DATABASE sales OWNER salesapp TABLESPACE salesspace; @@ -284,19 +284,19 @@ CREATE DATABASE sales OWNER salesapp TABLESPACE salesspace; - To create a database music with a different locale: + To create a database music with a different locale: CREATE DATABASE music LC_COLLATE 'sv_SE.utf8' LC_CTYPE 'sv_SE.utf8' TEMPLATE template0; - In this example, the TEMPLATE template0 clause is required if - the specified locale is different from the one in template1. + In this example, the TEMPLATE template0 clause is required if + the specified locale is different from the one in template1. (If it is not, then specifying the locale explicitly is redundant.) - To create a database music2 with a different locale and a + To create a database music2 with a different locale and a different character set encoding: CREATE DATABASE music2 @@ -328,8 +328,8 @@ CREATE DATABASE music2 See Also - - + + diff --git a/doc/src/sgml/ref/create_domain.sgml b/doc/src/sgml/ref/create_domain.sgml index 3423bf9a32..49d5304330 100644 --- a/doc/src/sgml/ref/create_domain.sgml +++ b/doc/src/sgml/ref/create_domain.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_domain.sgml PostgreSQL documentation --> - + CREATE DOMAIN @@ -24,12 +24,12 @@ PostgreSQL documentation CREATE DOMAIN name [ AS ] data_type [ COLLATE collation ] [ DEFAULT expression ] - [ constraint [ ... ] ] + [ constraint [ ... ] ] -where constraint is: +where constraint is: -[ CONSTRAINT constraint_name ] -{ NOT NULL | NULL | CHECK (expression) } +[ CONSTRAINT constraint_name ] +{ NOT NULL | NULL | CHECK (expression) } @@ -45,7 +45,7 @@ CREATE DOMAIN name [ AS ] If a schema name is given (for example, CREATE DOMAIN - myschema.mydomain ...) then the domain is created in the + myschema.mydomain ...) then the domain is created in the specified schema. Otherwise it is created in the current schema. The domain name must be unique among the types and domains existing in its schema. @@ -80,7 +80,7 @@ CREATE DOMAIN name [ AS ] - data_type + data_type The underlying data type of the domain. This can include array @@ -95,7 +95,7 @@ CREATE DOMAIN name [ AS ] An optional collation for the domain. If no collation is specified, the underlying data type's default collation is used. - The underlying type must be collatable if COLLATE + The underlying type must be collatable if COLLATE is specified. @@ -106,7 +106,7 @@ CREATE DOMAIN name [ AS ] - The DEFAULT clause specifies a default value for + The DEFAULT clause specifies a default value for columns of the domain data type. The value is any variable-free expression (but subqueries are not allowed). The data type of the default expression must match the data @@ -126,7 +126,7 @@ CREATE DOMAIN name [ AS ] - CONSTRAINT constraint_name + CONSTRAINT constraint_name An optional name for a constraint. If not specified, @@ -136,7 +136,7 @@ CREATE DOMAIN name [ AS ] - NOT NULL + NOT NULL Values of this domain are prevented from being null @@ -146,7 +146,7 @@ CREATE DOMAIN name [ AS ] - NULL + NULL Values of this domain are allowed to be null. This is the default. @@ -161,12 +161,12 @@ CREATE DOMAIN name [ AS ] - CHECK (expression) + CHECK (expression) - CHECK clauses specify integrity constraints or tests + CHECK clauses specify integrity constraints or tests which values of the domain must satisfy. Each constraint must be an expression - producing a Boolean result. It should use the key word VALUE + producing a Boolean result. It should use the key word VALUE to refer to the value being tested. Expressions evaluating to TRUE or UNKNOWN succeed. If the expression produces a FALSE result, an error is reported and the value is not allowed to be converted @@ -175,13 +175,13 @@ CREATE DOMAIN name [ AS ] Currently, CHECK expressions cannot contain - subqueries nor refer to variables other than VALUE. + subqueries nor refer to variables other than VALUE. When a domain has multiple CHECK constraints, they will be tested in alphabetical order by name. - (PostgreSQL versions before 9.5 did not honor any + (PostgreSQL versions before 9.5 did not honor any particular firing order for CHECK constraints.) @@ -193,7 +193,7 @@ CREATE DOMAIN name [ AS ] Notes - Domain constraints, particularly NOT NULL, are checked when + Domain constraints, particularly NOT NULL, are checked when converting a value to the domain type. It is possible for a column that is nominally of the domain type to read as null despite there being such a constraint. For example, this can happen in an outer-join query, if @@ -211,7 +211,7 @@ INSERT INTO tab (domcol) VALUES ((SELECT domcol FROM tab WHERE false)); It is very difficult to avoid such problems, because of SQL's general assumption that a null value is a valid value of every data type. Best practice therefore is to design a domain's constraints so that a null value is allowed, - and then to apply column NOT NULL constraints to columns of + and then to apply column NOT NULL constraints to columns of the domain type as needed, rather than directly to the domain type. @@ -242,7 +242,7 @@ CREATE TABLE us_snail_addy ( - + Compatibility @@ -251,12 +251,12 @@ CREATE TABLE us_snail_addy ( - + See Also - - + + diff --git a/doc/src/sgml/ref/create_event_trigger.sgml b/doc/src/sgml/ref/create_event_trigger.sgml index be18fc36e8..52ba746166 100644 --- a/doc/src/sgml/ref/create_event_trigger.sgml +++ b/doc/src/sgml/ref/create_event_trigger.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_event_trigger.sgml PostgreSQL documentation --> - + CREATE EVENT TRIGGER @@ -21,10 +21,10 @@ PostgreSQL documentation -CREATE EVENT TRIGGER name - ON event - [ WHEN filter_variable IN (filter_value [, ... ]) [ AND ... ] ] - EXECUTE PROCEDURE function_name() +CREATE EVENT TRIGGER name + ON event + [ WHEN filter_variable IN (filter_value [, ... ]) [ AND ... ] ] + EXECUTE { FUNCTION | PROCEDURE } function_name() @@ -33,10 +33,10 @@ CREATE EVENT TRIGGER name CREATE EVENT TRIGGER creates a new event trigger. - Whenever the designated event occurs and the WHEN condition + Whenever the designated event occurs and the WHEN condition associated with the trigger, if any, is satisfied, the trigger function will be executed. For a general introduction to event triggers, see - . The user who creates an event trigger + . The user who creates an event trigger becomes its owner. @@ -60,7 +60,7 @@ CREATE EVENT TRIGGER name The name of the event that triggers a call to the given function. - See for more information + See for more information on event names. @@ -85,8 +85,8 @@ CREATE EVENT TRIGGER name A list of values for the associated filter_variable - for which the trigger should fire. For TAG, this means a - list of command tags (e.g. 'DROP FUNCTION'). + for which the trigger should fire. For TAG, this means a + list of command tags (e.g. 'DROP FUNCTION'). @@ -98,6 +98,14 @@ CREATE EVENT TRIGGER name A user-supplied function that is declared as taking no argument and returning type event_trigger. + + + In the syntax of CREATE EVENT TRIGGER, the keywords + FUNCTION and PROCEDURE are + equivalent, but the referenced function must in any case be a function, + not a procedure. The use of the keyword PROCEDURE + here is historical and deprecated. + @@ -113,7 +121,7 @@ CREATE EVENT TRIGGER name Event triggers are disabled in single-user mode (see ). If an erroneous event trigger disables the + linkend="app-postgres"/>). If an erroneous event trigger disables the database so much that you can't even drop the trigger, restart in single-user mode and you'll be able to do that. @@ -136,7 +144,7 @@ END; $$; CREATE EVENT TRIGGER abort_ddl ON ddl_command_start - EXECUTE PROCEDURE abort_any_command(); + EXECUTE FUNCTION abort_any_command(); @@ -154,9 +162,9 @@ CREATE EVENT TRIGGER abort_ddl ON ddl_command_start See Also - - - + + + diff --git a/doc/src/sgml/ref/create_extension.sgml b/doc/src/sgml/ref/create_extension.sgml index 14e910115a..36837f927d 100644 --- a/doc/src/sgml/ref/create_extension.sgml +++ b/doc/src/sgml/ref/create_extension.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_extension.sgml PostgreSQL documentation --> - + CREATE EXTENSION @@ -39,7 +39,7 @@ CREATE EXTENSION [ IF NOT EXISTS ] extension_name Loading an extension essentially amounts to running the extension's script - file. The script will typically create new SQL objects such as + file. The script will typically create new SQL objects such as functions, data types, operators and index support methods. CREATE EXTENSION additionally records the identities of all the created objects, so that they can be dropped again if @@ -62,7 +62,7 @@ CREATE EXTENSION [ IF NOT EXISTS ] extension_name - IF NOT EXISTS + IF NOT EXISTS Do not throw an error if an extension with the same name already @@ -97,17 +97,17 @@ CREATE EXTENSION [ IF NOT EXISTS ] extension_name - If the extension specifies a schema parameter in its + If the extension specifies a schema parameter in its control file, then that schema cannot be overridden with - a SCHEMA clause. Normally, an error will be raised if - a SCHEMA clause is given and it conflicts with the - extension's schema parameter. However, if - the CASCADE clause is also given, + a SCHEMA clause. Normally, an error will be raised if + a SCHEMA clause is given and it conflicts with the + extension's schema parameter. However, if + the CASCADE clause is also given, then schema_name is ignored when it conflicts. The given schema_name will be used for installation of any needed extensions that do not - specify schema in their control files. + specify schema in their control files. @@ -134,13 +134,13 @@ CREATE EXTENSION [ IF NOT EXISTS ] extension_name old_version - FROM old_version + FROM old_version must be specified when, and only when, you are attempting to install - an extension that replaces an old style module that is just + an extension that replaces an old style module that is just a collection of objects not packaged into an extension. This option - causes CREATE EXTENSION to run an alternative installation + causes CREATE EXTENSION to run an alternative installation script that absorbs the existing objects into the extension, instead - of creating new objects. Be careful that SCHEMA specifies + of creating new objects. Be careful that SCHEMA specifies the schema containing these pre-existing objects. @@ -150,7 +150,7 @@ CREATE EXTENSION [ IF NOT EXISTS ] extension_name extension's author, and might vary if there is more than one version of the old-style module that can be upgraded into an extension. For the standard additional modules supplied with pre-9.1 - PostgreSQL, use unpackaged + PostgreSQL, use unpackaged for old_version when updating a module to extension style. @@ -158,12 +158,12 @@ CREATE EXTENSION [ IF NOT EXISTS ] extension_name - CASCADE + CASCADE Automatically install any extensions that this extension depends on that are not already installed. Their dependencies are likewise - automatically installed, recursively. The SCHEMA clause, + automatically installed, recursively. The SCHEMA clause, if given, applies to all extensions that get installed this way. Other options of the statement are not applied to automatically-installed extensions; in particular, their default @@ -178,7 +178,7 @@ CREATE EXTENSION [ IF NOT EXISTS ] extension_name Notes - Before you can use CREATE EXTENSION to load an extension + Before you can use CREATE EXTENSION to load an extension into a database, the extension's supporting files must be installed. Information about installing the extensions supplied with PostgreSQL can be found in @@ -195,7 +195,7 @@ CREATE EXTENSION [ IF NOT EXISTS ] extension_name For information about writing new extensions, see - . + . @@ -211,13 +211,13 @@ CREATE EXTENSION hstore; - Update a pre-9.1 installation of hstore into + Update a pre-9.1 installation of hstore into extension style: CREATE EXTENSION hstore SCHEMA public FROM unpackaged; Be careful to specify the schema in which you installed the existing - hstore objects. + hstore objects. @@ -225,7 +225,7 @@ CREATE EXTENSION hstore SCHEMA public FROM unpackaged; Compatibility - CREATE EXTENSION is a PostgreSQL + CREATE EXTENSION is a PostgreSQL extension. @@ -234,8 +234,8 @@ CREATE EXTENSION hstore SCHEMA public FROM unpackaged; See Also - - + + diff --git a/doc/src/sgml/ref/create_foreign_data_wrapper.sgml b/doc/src/sgml/ref/create_foreign_data_wrapper.sgml index a3811a3b63..0fcba18a34 100644 --- a/doc/src/sgml/ref/create_foreign_data_wrapper.sgml +++ b/doc/src/sgml/ref/create_foreign_data_wrapper.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_foreign_data_wrapper.sgml PostgreSQL documentation --> - + CREATE FOREIGN DATA WRAPPER @@ -24,7 +24,7 @@ PostgreSQL documentation CREATE FOREIGN DATA WRAPPER name [ HANDLER handler_function | NO HANDLER ] [ VALIDATOR validator_function | NO VALIDATOR ] - [ OPTIONS ( option 'value' [, ... ] ) ] + [ OPTIONS ( option 'value' [, ... ] ) ] @@ -100,7 +100,7 @@ CREATE FOREIGN DATA WRAPPER name - OPTIONS ( option 'value' [, ... ] ) + OPTIONS ( option 'value' [, ... ] ) This clause specifies options for the new foreign-data wrapper. @@ -117,7 +117,7 @@ CREATE FOREIGN DATA WRAPPER name Notes - PostgreSQL's foreign-data functionality is still under + PostgreSQL's foreign-data functionality is still under active development. Optimization of queries is primitive (and mostly left to the wrapper, too). Thus, there is considerable room for future performance improvements. @@ -128,22 +128,22 @@ CREATE FOREIGN DATA WRAPPER name Examples - Create a useless foreign-data wrapper dummy: + Create a useless foreign-data wrapper dummy: CREATE FOREIGN DATA WRAPPER dummy; - Create a foreign-data wrapper file with - handler function file_fdw_handler: + Create a foreign-data wrapper file with + handler function file_fdw_handler: CREATE FOREIGN DATA WRAPPER file HANDLER file_fdw_handler; - Create a foreign-data wrapper mywrapper with some + Create a foreign-data wrapper mywrapper with some options: CREATE FOREIGN DATA WRAPPER mywrapper @@ -159,7 +159,7 @@ CREATE FOREIGN DATA WRAPPER mywrapper 9075-9 (SQL/MED), with the exception that the HANDLER and VALIDATOR clauses are extensions and the standard clauses LIBRARY and LANGUAGE - are not implemented in PostgreSQL. + are not implemented in PostgreSQL. @@ -172,11 +172,11 @@ CREATE FOREIGN DATA WRAPPER mywrapper See Also - - - - - + + + + + diff --git a/doc/src/sgml/ref/create_foreign_table.sgml b/doc/src/sgml/ref/create_foreign_table.sgml index 065c982082..37a45b26db 100644 --- a/doc/src/sgml/ref/create_foreign_table.sgml +++ b/doc/src/sgml/ref/create_foreign_table.sgml @@ -1,6 +1,6 @@ - + CREATE FOREIGN TABLE @@ -18,40 +18,40 @@ -CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name ( [ - { column_name data_type [ OPTIONS ( option 'value' [, ... ] ) ] [ COLLATE collation ] [ column_constraint [ ... ] ] +CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name ( [ + { column_name data_type [ OPTIONS ( option 'value' [, ... ] ) ] [ COLLATE collation ] [ column_constraint [ ... ] ] | table_constraint } [, ... ] ] ) [ INHERITS ( parent_table [, ... ] ) ] SERVER server_name -[ OPTIONS ( option 'value' [, ... ] ) ] +[ OPTIONS ( option 'value' [, ... ] ) ] -CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name - PARTITION OF parent_table [ ( - { column_name [ WITH OPTIONS ] [ column_constraint [ ... ] ] +CREATE FOREIGN TABLE [ IF NOT EXISTS ] table_name + PARTITION OF parent_table [ ( + { column_name [ WITH OPTIONS ] [ column_constraint [ ... ] ] | table_constraint } [, ... ] -) ] partition_bound_spec +) ] partition_bound_spec SERVER server_name -[ OPTIONS ( option 'value' [, ... ] ) ] +[ OPTIONS ( option 'value' [, ... ] ) ] -where column_constraint is: +where column_constraint is: -[ CONSTRAINT constraint_name ] +[ CONSTRAINT constraint_name ] { NOT NULL | NULL | - CHECK ( expression ) [ NO INHERIT ] | + CHECK ( expression ) [ NO INHERIT ] | DEFAULT default_expr } -and table_constraint is: +and table_constraint is: -[ CONSTRAINT constraint_name ] -CHECK ( expression ) [ NO INHERIT ] +[ CONSTRAINT constraint_name ] +CHECK ( expression ) [ NO INHERIT ] - + Description @@ -62,7 +62,7 @@ CHECK ( expression ) [ NO INHERIT ] If a schema name is given (for example, CREATE FOREIGN TABLE - myschema.mytable ...) then the table is created in the specified + myschema.mytable ...) then the table is created in the specified schema. Otherwise it is created in the current schema. The name of the foreign table must be distinct from the name of any other foreign table, table, sequence, index, @@ -95,7 +95,7 @@ CHECK ( expression ) [ NO INHERIT ] - IF NOT EXISTS + IF NOT EXISTS Do not throw an error if a relation with the same name already exists. @@ -107,7 +107,7 @@ CHECK ( expression ) [ NO INHERIT ] - table_name + table_name The name (optionally schema-qualified) of the table to be created. @@ -116,7 +116,7 @@ CHECK ( expression ) [ NO INHERIT ] - column_name + column_name The name of a column to be created in the new table. @@ -125,13 +125,13 @@ CHECK ( expression ) [ NO INHERIT ] - data_type + data_type The data type of the column. This can include array specifiers. For more information on the data types supported by PostgreSQL, refer to . + linkend="datatype"/>. @@ -140,7 +140,7 @@ CHECK ( expression ) [ NO INHERIT ] COLLATE collation - The COLLATE clause assigns a collation to + The COLLATE clause assigns a collation to the column (which must be of a collatable data type). If not specified, the column data type's default collation is used. @@ -151,22 +151,22 @@ CHECK ( expression ) [ NO INHERIT ] INHERITS ( parent_table [, ... ] ) - The optional INHERITS clause specifies a list of + The optional INHERITS clause specifies a list of tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. See the similar form of - for more details. + for more details. - CONSTRAINT constraint_name + CONSTRAINT constraint_name An optional name for a column or table constraint. If the constraint is violated, the constraint name is present in error messages, - so constraint names like col must be positive can be used + so constraint names like col must be positive can be used to communicate helpful constraint information to client applications. (Double-quotes are needed to specify constraint names that contain spaces.) If a constraint name is not specified, the system generates a name. @@ -175,7 +175,7 @@ CHECK ( expression ) [ NO INHERIT ] - NOT NULL + NOT NULL The column is not allowed to contain null values. @@ -184,7 +184,7 @@ CHECK ( expression ) [ NO INHERIT ] - NULL + NULL The column is allowed to contain null values. This is the default. @@ -199,10 +199,10 @@ CHECK ( expression ) [ NO INHERIT ] - CHECK ( expression ) [ NO INHERIT ] + CHECK ( expression ) [ NO INHERIT ] - The CHECK clause specifies an expression producing a + The CHECK clause specifies an expression producing a Boolean result which each row in the foreign table is expected to satisfy; that is, the expression should produce TRUE or UNKNOWN, never FALSE, for all rows in the foreign table. @@ -219,7 +219,7 @@ CHECK ( expression ) [ NO INHERIT ] - A constraint marked with NO INHERIT will not propagate to + A constraint marked with NO INHERIT will not propagate to child tables. @@ -230,7 +230,7 @@ CHECK ( expression ) [ NO INHERIT ] default_expr - The DEFAULT clause assigns a default data value for + The DEFAULT clause assigns a default data value for the column whose column definition it appears within. The value is any variable-free expression (subqueries and cross-references to other columns in the current table are not allowed). The @@ -247,18 +247,18 @@ CHECK ( expression ) [ NO INHERIT ] - server_name + server_name The name of an existing foreign server to use for the foreign table. For details on defining a server, see . + linkend="sql-createserver"/>. - OPTIONS ( option 'value' [, ...] ) + OPTIONS ( option 'value' [, ...] ) Options to be associated with the new foreign table or one of its @@ -279,9 +279,9 @@ CHECK ( expression ) [ NO INHERIT ] Notes - Constraints on foreign tables (such as CHECK - or NOT NULL clauses) are not enforced by the - core PostgreSQL system, and most foreign data wrappers + Constraints on foreign tables (such as CHECK + or NOT NULL clauses) are not enforced by the + core PostgreSQL system, and most foreign data wrappers do not attempt to enforce them either; that is, the constraint is simply assumed to hold true. There would be little point in such enforcement since it would only apply to rows inserted or updated via @@ -300,7 +300,7 @@ CHECK ( expression ) [ NO INHERIT ] - Although PostgreSQL does not attempt to enforce + Although PostgreSQL does not attempt to enforce constraints on foreign tables, it does assume that they are correct for purposes of query optimization. If there are rows visible in the foreign table that do not satisfy a declared constraint, queries on @@ -310,12 +310,12 @@ CHECK ( expression ) [ NO INHERIT ] - + Examples - Create foreign table films, which will be accessed through - the server film_server: + Create foreign table films, which will be accessed through + the server film_server: CREATE FOREIGN TABLE films ( @@ -330,9 +330,9 @@ SERVER film_server; - Create foreign table measurement_y2016m07, which will be - accessed through the server server_07, as a partition - of the range partitioned table measurement: + Create foreign table measurement_y2016m07, which will be + accessed through the server server_07, as a partition + of the range partitioned table measurement: CREATE FOREIGN TABLE measurement_y2016m07 @@ -342,16 +342,16 @@ CREATE FOREIGN TABLE measurement_y2016m07 - - Compatibility + + Compatibility The CREATE FOREIGN TABLE command largely conforms to the SQL standard; however, much as with - CREATE TABLE, - NULL constraints and zero-column foreign tables are permitted. + CREATE TABLE, + NULL constraints and zero-column foreign tables are permitted. The ability to specify column default values is also - a PostgreSQL extension. Table inheritance, in the form + a PostgreSQL extension. Table inheritance, in the form defined by PostgreSQL, is nonstandard. @@ -361,11 +361,11 @@ CREATE FOREIGN TABLE measurement_y2016m07 See Also - - - - - + + + + + diff --git a/doc/src/sgml/ref/create_function.sgml b/doc/src/sgml/ref/create_function.sgml index 072e033687..06be04eb5c 100644 --- a/doc/src/sgml/ref/create_function.sgml +++ b/doc/src/sgml/ref/create_function.sgml @@ -2,7 +2,7 @@ doc/src/sgml/ref/create_function.sgml --> - + CREATE FUNCTION @@ -37,7 +37,6 @@ CREATE [ OR REPLACE ] FUNCTION | AS 'definition' | AS 'obj_file', 'link_symbol' } ... - [ WITH ( attribute [, ...] ) ] @@ -55,10 +54,10 @@ CREATE [ OR REPLACE ] FUNCTION If a schema name is included, then the function is created in the specified schema. Otherwise it is created in the current schema. - The name of the new function must not match any existing function + The name of the new function must not match any existing function or procedure with the same input argument types in the same schema. However, - functions of different argument types can share a name (this is - called overloading). + functions and procedures of different argument types can share a name (this is + called overloading). @@ -68,13 +67,13 @@ CREATE [ OR REPLACE ] FUNCTION tried, you would actually be creating a new, distinct function). Also, CREATE OR REPLACE FUNCTION will not let you change the return type of an existing function. To do that, - you must drop and recreate the function. (When using OUT + you must drop and recreate the function. (When using OUT parameters, that means you cannot change the types of any - OUT parameters except by dropping the function.) + OUT parameters except by dropping the function.) - When CREATE OR REPLACE FUNCTION is used to replace an + When CREATE OR REPLACE FUNCTION is used to replace an existing function, the ownership and permissions of the function do not change. All other function properties are assigned the values specified or implied in the command. You must own the function @@ -87,7 +86,7 @@ CREATE [ OR REPLACE ] FUNCTION triggers, etc. that refer to the old function. Use CREATE OR REPLACE FUNCTION to change a function definition without breaking objects that refer to the function. - Also, ALTER FUNCTION can be used to change most of the + Also, ALTER FUNCTION can be used to change most of the auxiliary properties of an existing function. @@ -121,12 +120,12 @@ CREATE [ OR REPLACE ] FUNCTION - The mode of an argument: IN, OUT, - INOUT, or VARIADIC. - If omitted, the default is IN. - Only OUT arguments can follow a VARIADIC one. - Also, OUT and INOUT arguments cannot be used - together with the RETURNS TABLE notation. + The mode of an argument: IN, OUT, + INOUT, or VARIADIC. + If omitted, the default is IN. + Only OUT arguments can follow a VARIADIC one. + Also, OUT and INOUT arguments cannot be used + together with the RETURNS TABLE notation. @@ -141,7 +140,7 @@ CREATE [ OR REPLACE ] FUNCTION name of an input argument is just extra documentation, so far as the function itself is concerned; but you can use input argument names when calling a function to improve readability (see ). In any case, the name + linkend="sql-syntax-calling-funcs"/>). In any case, the name of an output argument is significant, because it defines the column name in the result row type. (If you omit the name for an output argument, the system will choose a default column name.) @@ -160,7 +159,7 @@ CREATE [ OR REPLACE ] FUNCTION Depending on the implementation language it might also be allowed - to specify pseudo-types such as cstring. + to specify pseudo-types such as cstring. Pseudo-types indicate that the actual argument type is either incompletely specified, or outside the set of ordinary SQL data types. @@ -183,7 +182,7 @@ CREATE [ OR REPLACE ] FUNCTION An expression to be used as default value if the parameter is not specified. The expression has to be coercible to the argument type of the parameter. - Only input (including INOUT) parameters can have a default + Only input (including INOUT) parameters can have a default value. All input parameters following a parameter with a default value must have default values as well. @@ -199,15 +198,15 @@ CREATE [ OR REPLACE ] FUNCTION can be a base, composite, or domain type, or can reference the type of a table column. Depending on the implementation language it might also be allowed - to specify pseudo-types such as cstring. + to specify pseudo-types such as cstring. If the function is not supposed to return a value, specify - void as the return type. + void as the return type. - When there are OUT or INOUT parameters, - the RETURNS clause can be omitted. If present, it + When there are OUT or INOUT parameters, + the RETURNS clause can be omitted. If present, it must agree with the result type implied by the output parameters: - RECORD if there are multiple output parameters, or + RECORD if there are multiple output parameters, or the same type as the single output parameter. @@ -229,10 +228,10 @@ CREATE [ OR REPLACE ] FUNCTION - The name of an output column in the RETURNS TABLE + The name of an output column in the RETURNS TABLE syntax. This is effectively another way of declaring a named - OUT parameter, except that RETURNS TABLE - also implies RETURNS SETOF. + OUT parameter, except that RETURNS TABLE + also implies RETURNS SETOF. @@ -242,7 +241,7 @@ CREATE [ OR REPLACE ] FUNCTION - The data type of an output column in the RETURNS TABLE + The data type of an output column in the RETURNS TABLE syntax. @@ -269,7 +268,7 @@ CREATE [ OR REPLACE ] FUNCTION Lists which transforms a call to the function should apply. Transforms convert between SQL types and language-specific data types; - see . Procedural language + see . Procedural language implementations usually have hardcoded knowledge of the built-in types, so those don't need to be listed here. If a procedural language implementation does not know how to handle a type and no transform is @@ -284,9 +283,9 @@ CREATE [ OR REPLACE ] FUNCTION WINDOW indicates that the function is a - window function rather than a plain function. + window function rather than a plain function. This is currently only useful for functions written in C. - The WINDOW attribute cannot be changed when + The WINDOW attribute cannot be changed when replacing an existing function definition. @@ -321,24 +320,24 @@ CREATE [ OR REPLACE ] FUNCTION result could change across SQL statements. This is the appropriate selection for functions whose results depend on database lookups, parameter variables (such as the current time zone), etc. (It is - inappropriate for AFTER triggers that wish to + inappropriate for AFTER triggers that wish to query rows modified by the current command.) Also note - that the current_timestamp family of functions qualify + that the current_timestamp family of functions qualify as stable, since their values do not change within a transaction. VOLATILE indicates that the function value can change even within a single table scan, so no optimizations can be made. Relatively few database functions are volatile in this sense; - some examples are random(), currval(), - timeofday(). But note that any function that has + some examples are random(), currval(), + timeofday(). But note that any function that has side-effects must be classified volatile, even if its result is quite predictable, to prevent calls from being optimized away; an example is - setval(). + setval(). - For additional details see . + For additional details see . @@ -363,7 +362,7 @@ CREATE [ OR REPLACE ] FUNCTION In addition, functions which do not take arguments or which are not passed any arguments from the security barrier view or table do not have to be marked as leakproof to be executed before security conditions. See - and . + and . This option can only be set by the superuser. @@ -430,11 +429,11 @@ CREATE [ OR REPLACE ] FUNCTION Functions should be labeled parallel unsafe if they modify any database state, or if they make changes to the transaction such as using sub-transactions, or if they access sequences or attempt to make - persistent changes to settings (e.g. setval). They should + persistent changes to settings (e.g. setval). They should be labeled as parallel restricted if they access temporary tables, client connection state, cursors, prepared statements, or miscellaneous backend-local state which the system cannot synchronize in parallel mode - (e.g. setseed cannot be executed other than by the group + (e.g. setseed cannot be executed other than by the group leader because a change made by another process would not be reflected in the leader). In general, if a function is labeled as being safe when it is restricted or unsafe, or if it is labeled as being restricted when @@ -443,19 +442,19 @@ CREATE [ OR REPLACE ] FUNCTION exhibit totally undefined behavior if mislabeled, since there is no way for the system to protect itself against arbitrary C code, but in most likely cases the result will be no worse than for any other function. - If in doubt, functions should be labeled as UNSAFE, which is + If in doubt, functions should be labeled as UNSAFE, which is the default. - execution_cost + COST execution_cost A positive number giving the estimated execution cost for the function, - in units of . If the function + in units of . If the function returns a set, this is the cost per returned row. If the cost is not specified, 1 unit is assumed for C-language and internal functions, and 100 units for functions in all other languages. Larger values @@ -466,7 +465,7 @@ CREATE [ OR REPLACE ] FUNCTION - result_rows + ROWS result_rows @@ -483,29 +482,29 @@ CREATE [ OR REPLACE ] FUNCTION value - The SET clause causes the specified configuration + The SET clause causes the specified configuration parameter to be set to the specified value when the function is entered, and then restored to its prior value when the function exits. - SET FROM CURRENT saves the value of the parameter that - is current when CREATE FUNCTION is executed as the value + SET FROM CURRENT saves the value of the parameter that + is current when CREATE FUNCTION is executed as the value to be applied when the function is entered. - If a SET clause is attached to a function, then - the effects of a SET LOCAL command executed inside the + If a SET clause is attached to a function, then + the effects of a SET LOCAL command executed inside the function for the same variable are restricted to the function: the configuration parameter's prior value is still restored at function exit. However, an ordinary - SET command (without LOCAL) overrides the - SET clause, much as it would do for a previous SET - LOCAL command: the effects of such a command will persist after + SET command (without LOCAL) overrides the + SET clause, much as it would do for a previous SET + LOCAL command: the effects of such a command will persist after function exit, unless the current transaction is rolled back. - See and - + See and + for more information about allowed parameter names and values. @@ -523,7 +522,7 @@ CREATE [ OR REPLACE ] FUNCTION It is often helpful to use dollar quoting (see ) to write the function definition + linkend="sql-syntax-dollar-quoting"/>) to write the function definition string, rather than the normal single quote syntax. Without dollar quoting, any single quotes or backslashes in the function definition must be escaped by doubling them. @@ -543,11 +542,14 @@ CREATE [ OR REPLACE ] FUNCTION the SQL function. The string obj_file is the name of the shared library file containing the compiled C function, and is interpreted - as for the command. The string + as for the command. The string link_symbol is the function's link symbol, that is, the name of the function in the C - language source code. If the link symbol is omitted, it is assumed - to be the same as the name of the SQL function being defined. + language source code. If the link symbol is omitted, it is assumed to + be the same as the name of the SQL function being defined. The C names + of all functions must be different, so you must give overloaded C + functions different C names (for example, use the argument types as + part of the C names). @@ -560,45 +562,10 @@ CREATE [ OR REPLACE ] FUNCTION - - attribute - - - - The historical way to specify optional pieces of information - about the function. The following attributes can appear here: - - - - isStrict - - - Equivalent to STRICT or RETURNS NULL ON NULL INPUT. - - - - - - isCachable - - isCachable is an obsolete equivalent of - IMMUTABLE; it's still accepted for - backwards-compatibility reasons. - - - - - - - Attribute names are not case-sensitive. - - - - - Refer to for further information on writing + Refer to for further information on writing functions. @@ -611,15 +578,14 @@ CREATE [ OR REPLACE ] FUNCTION PostgreSQL allows function overloading; that is, the same name can be used for several different functions so long as they have distinct - input argument types. However, the C names of all functions must be - different, so you must give overloaded C functions different C - names (for example, use the argument types as part of the C - names). + input argument types. Whether or not you use it, this capability entails + security precautions when calling functions in databases where some users + mistrust other users; see . Two functions are considered the same if they have the same names and - input argument types, ignoring any OUT + input argument types, ignoring any OUT parameters. Thus for example these declarations conflict: CREATE FUNCTION foo(int) ... @@ -635,7 +601,7 @@ CREATE FUNCTION foo(int, out text) ... CREATE FUNCTION foo(int) ... CREATE FUNCTION foo(int, int default 42) ... - A call foo(10) will fail due to the ambiguity about which + A call foo(10) will fail due to the ambiguity about which function should be called. @@ -648,16 +614,16 @@ CREATE FUNCTION foo(int, int default 42) ... The full SQL type syntax is allowed for declaring a function's arguments and return value. However, parenthesized type modifiers (e.g., the precision field for - type numeric) are discarded by CREATE FUNCTION. + type numeric) are discarded by CREATE FUNCTION. Thus for example - CREATE FUNCTION foo (varchar(10)) ... + CREATE FUNCTION foo (varchar(10)) ... is exactly the same as - CREATE FUNCTION foo (varchar) .... + CREATE FUNCTION foo (varchar) .... When replacing an existing function with CREATE OR REPLACE - FUNCTION, there are restrictions on changing parameter names. + FUNCTION, there are restrictions on changing parameter names. You cannot change the name already assigned to any input parameter (although you can add names to parameters that had none before). If there is more than one output parameter, you cannot change the @@ -668,9 +634,9 @@ CREATE FUNCTION foo(int, int default 42) ... - If a function is declared STRICT with a VARIADIC + If a function is declared STRICT with a VARIADIC argument, the strictness check tests that the variadic array as - a whole is non-null. The function will still be called if the + a whole is non-null. The function will still be called if the array has null elements. @@ -681,7 +647,7 @@ CREATE FUNCTION foo(int, int default 42) ... Here are some trivial examples to help you get started. For more - information and examples, see . + information and examples, see . CREATE FUNCTION add(integer, integer) RETURNS integer AS 'select $1 + $2;' @@ -723,7 +689,7 @@ CREATE FUNCTION dup(int) RETURNS dup_result SELECT * FROM dup(42); - Another way to return multiple columns is to use a TABLE + Another way to return multiple columns is to use a TABLE function: CREATE FUNCTION dup(int) RETURNS TABLE(f1 int, f2 text) @@ -732,8 +698,8 @@ CREATE FUNCTION dup(int) RETURNS TABLE(f1 int, f2 text) SELECT * FROM dup(42); - However, a TABLE function is different from the - preceding examples, because it actually returns a set + However, a TABLE function is different from the + preceding examples, because it actually returns a set of records, not just one record. @@ -742,15 +708,15 @@ SELECT * FROM dup(42); Writing <literal>SECURITY DEFINER</literal> Functions Safely - search_path configuration parameter - use in securing functions + search_path configuration parameter + use in securing functions Because a SECURITY DEFINER function is executed with the privileges of the user that owns it, care is needed to ensure that the function cannot be misused. For security, - should be set to exclude any schemas + should be set to exclude any schemas writable by untrusted users. This prevents malicious users from creating objects (e.g., tables, functions, and operators) that mask objects intended to be used by the function. @@ -758,7 +724,7 @@ SELECT * FROM dup(42); temporary-table schema, which is searched first by default, and is normally writable by anyone. A secure arrangement can be obtained by forcing the temporary schema to be searched last. To do this, - write pg_temppg_tempsecuring functions as the last entry in search_path. + write pg_temppg_tempsecuring functions as the last entry in search_path. This function illustrates safe usage: @@ -778,27 +744,27 @@ $$ LANGUAGE plpgsql SET search_path = admin, pg_temp; - This function's intention is to access a table admin.pwds. - But without the SET clause, or with a SET clause - mentioning only admin, the function could be subverted by - creating a temporary table named pwds. + This function's intention is to access a table admin.pwds. + But without the SET clause, or with a SET clause + mentioning only admin, the function could be subverted by + creating a temporary table named pwds. Before PostgreSQL version 8.3, the - SET clause was not available, and so older functions may + SET clause was not available, and so older functions may contain rather complicated logic to save, set, and restore - search_path. The SET clause is far easier + search_path. The SET clause is far easier to use for this purpose. Another point to keep in mind is that by default, execute privilege - is granted to PUBLIC for newly created functions - (see for more + is granted to PUBLIC for newly created functions + (see for more information). Frequently you will wish to restrict use of a security definer function to only some users. To do that, you must revoke - the default PUBLIC privileges and then grant execute + the default PUBLIC privileges and then grant execute privilege selectively. To avoid having a window where the new function is accessible to all, create it and set the privileges within a single transaction. For example: @@ -818,7 +784,7 @@ COMMIT; Compatibility - A CREATE FUNCTION command is defined in SQL:1999 and later. + A CREATE FUNCTION command is defined in the SQL standard. The PostgreSQL version is similar but not fully compatible. The attributes are not portable, neither are the different available languages. @@ -843,11 +809,11 @@ COMMIT; See Also - - - - - + + + + + diff --git a/doc/src/sgml/ref/create_group.sgml b/doc/src/sgml/ref/create_group.sgml index 158617cb93..1b8e76e326 100644 --- a/doc/src/sgml/ref/create_group.sgml +++ b/doc/src/sgml/ref/create_group.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_group.sgml PostgreSQL documentation --> - + CREATE GROUP @@ -21,23 +21,23 @@ PostgreSQL documentation -CREATE GROUP name [ [ WITH ] option [ ... ] ] +CREATE GROUP name [ [ WITH ] option [ ... ] ] -where option can be: +where option can be: SUPERUSER | NOSUPERUSER | CREATEDB | NOCREATEDB | CREATEROLE | NOCREATEROLE | INHERIT | NOINHERIT | LOGIN | NOLOGIN - | [ ENCRYPTED ] PASSWORD 'password' - | VALID UNTIL 'timestamp' - | IN ROLE role_name [, ...] - | IN GROUP role_name [, ...] - | ROLE role_name [, ...] - | ADMIN role_name [, ...] - | USER role_name [, ...] - | SYSID uid + | [ ENCRYPTED ] PASSWORD 'password' + | VALID UNTIL 'timestamp' + | IN ROLE role_name [, ...] + | IN GROUP role_name [, ...] + | ROLE role_name [, ...] + | ADMIN role_name [, ...] + | USER role_name [, ...] + | SYSID uid @@ -46,7 +46,7 @@ CREATE GROUP name [ [ WITH ] CREATE GROUP is now an alias for - . + . @@ -63,7 +63,7 @@ CREATE GROUP name [ [ WITH ] See Also - + diff --git a/doc/src/sgml/ref/create_index.sgml b/doc/src/sgml/ref/create_index.sgml index 83ee7d3f25..ad619cdcfe 100644 --- a/doc/src/sgml/ref/create_index.sgml +++ b/doc/src/sgml/ref/create_index.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_index.sgml PostgreSQL documentation --> - + CREATE INDEX @@ -21,9 +21,10 @@ PostgreSQL documentation -CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] name ] ON table_name [ USING method ] +CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] name ] ON [ ONLY ] table_name [ USING method ] ( { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] [, ...] ) - [ WITH ( storage_parameter = value [, ... ] ) ] + [ INCLUDE ( column_name [, ...] ) ] + [ WITH ( storage_parameter = value [, ... ] ) ] [ TABLESPACE tablespace_name ] [ WHERE predicate ] @@ -51,8 +52,8 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] upper(col) would allow the clause - WHERE upper(col) = 'JIM' to use an index. + upper(col) would allow the clause + WHERE upper(col) = 'JIM' to use an index. @@ -72,7 +73,7 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] WHERE with UNIQUE to enforce uniqueness over a subset of a - table. See for more discussion. + table. See for more discussion. @@ -85,7 +86,7 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] All functions and operators used in an index definition must be - immutable, that is, their results must depend only on + immutable, that is, their results must depend only on their arguments and never on any outside influence (such as the contents of another table or the current time). This restriction ensures that the behavior of the index is well-defined. To use a @@ -108,6 +109,11 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] + + + Additional restrictions apply when unique indexes are applied to + partitioned tables; see . + @@ -115,13 +121,13 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] CONCURRENTLY - When this option is used, PostgreSQL will build the + When this option is used, PostgreSQL will build the index without taking any locks that prevent concurrent inserts, updates, or deletes on the table; whereas a standard index build locks out writes (but not reads) on the table until it's done. There are several caveats to be aware of when using this option - — see . + — see . @@ -138,19 +144,75 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] + + INCLUDE + + + The optional INCLUDE clause specifies a + list of columns which will be included in the index + as non-key columns. A non-key column cannot + be used in an index scan search qualification, and it is disregarded + for purposes of any uniqueness or exclusion constraint enforced by + the index. However, an index-only scan can return the contents of + non-key columns without having to visit the index's table, since + they are available directly from the index entry. Thus, addition of + non-key columns allows index-only scans to be used for queries that + otherwise could not use them. + + + + It's wise to be conservative about adding non-key columns to an + index, especially wide columns. If an index tuple exceeds the + maximum size allowed for the index type, data insertion will fail. + In any case, non-key columns duplicate data from the index's table + and bloat the size of the index, thus potentially slowing searches. + + + + Columns listed in the INCLUDE clause don't need + appropriate operator classes; the clause can include + columns whose data types don't have operator classes defined for + a given access method. + + + + Expressions are not supported as included columns since they cannot be + used in index-only scans. + + + + Currently, only the B-tree index access method supports this feature. + In B-tree indexes, the values of columns listed in the + INCLUDE clause are included in leaf tuples which + correspond to heap tuples, but are not included in upper-level + index entries used for tree navigation. + + + + name The name of the index to be created. No schema name can be included here; the index is always created in the same schema as its parent - table. If the name is omitted, PostgreSQL chooses a + table. If the name is omitted, PostgreSQL chooses a suitable name based on the parent table's name and the indexed column name(s). + + ONLY + + + Indicates not to recurse creating indexes on partitions, if the + table is partitioned. The default is to recurse. + + + + table_name @@ -166,8 +228,8 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] The name of the index method to be used. Choices are btree, hash, - gist, spgist, gin, and - brin. + gist, spgist, gin, and + brin. The default method is btree. @@ -217,7 +279,7 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] - ASC + ASC Specifies ascending sort order (which is the default). @@ -226,7 +288,7 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] - DESC + DESC Specifies descending sort order. @@ -235,21 +297,21 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] - NULLS FIRST + NULLS FIRST Specifies that nulls sort before non-nulls. This is the default - when DESC is specified. + when DESC is specified. - NULLS LAST + NULLS LAST Specifies that nulls sort after non-nulls. This is the default - when DESC is not specified. + when DESC is not specified. @@ -259,7 +321,7 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] The name of an index-method-specific storage parameter. See - + for details. @@ -270,8 +332,8 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] The tablespace in which to create the index. If not specified, - is consulted, or - for indexes on temporary + is consulted, or + for indexes on temporary tables. @@ -288,19 +350,19 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] - - Index Storage Parameters + + Index Storage Parameters - The optional WITH clause specifies storage - parameters for the index. Each index method has its own set of allowed + The optional WITH clause specifies storage + parameters for the index. Each index method has its own set of allowed storage parameters. The B-tree, hash, GiST and SP-GiST index methods all accept this parameter: - fillfactor + fillfactor The fillfactor for an index is a percentage that determines how full @@ -321,20 +383,35 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] + + B-tree indexes additionally accept this parameter: + + + + + vacuum_cleanup_index_scale_factor + + + Per-index value for . + + + + + GiST indexes additionally accept this parameter: - buffering + buffering Determines whether the buffering build technique described in - is used to build the index. With - OFF it is disabled, with ON it is enabled, and - with AUTO it is initially disabled, but turned on - on-the-fly once the index size reaches . The default is AUTO. + is used to build the index. With + OFF it is disabled, with ON it is enabled, and + with AUTO it is initially disabled, but turned on + on-the-fly once the index size reaches . The default is AUTO. @@ -346,23 +423,23 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] - fastupdate + fastupdate This setting controls usage of the fast update technique described in - . It is a Boolean parameter: - ON enables fast update, OFF disables it. - (Alternative spellings of ON and OFF are - allowed as described in .) The - default is ON. + . It is a Boolean parameter: + ON enables fast update, OFF disables it. + (Alternative spellings of ON and OFF are + allowed as described in .) The + default is ON. - Turning fastupdate off via ALTER INDEX prevents + Turning fastupdate off via ALTER INDEX prevents future insertions from going into the list of pending index entries, but does not in itself flush previous entries. You might want to - VACUUM the table or call gin_clean_pending_list + VACUUM the table or call gin_clean_pending_list function afterward to ensure the pending list is emptied. @@ -371,10 +448,10 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] - gin_pending_list_limit + gin_pending_list_limit - Custom parameter. + Custom parameter. This value is specified in kilobytes. @@ -382,23 +459,23 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] - BRIN indexes accept different parameters: + BRIN indexes accept different parameters: - pages_per_range + pages_per_range Defines the number of table blocks that make up one block range for - each entry of a BRIN index (see - for more details). The default is 128. + each entry of a BRIN index (see + for more details). The default is 128. - autosummarize + autosummarize Defines whether a summarization run is invoked for the previous page @@ -409,17 +486,17 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] - - Building Indexes Concurrently + + Building Indexes Concurrently - + index building concurrently Creating an index can interfere with regular operation of a database. - Normally PostgreSQL locks the table to be indexed against + Normally PostgreSQL locks the table to be indexed against writes and performs the entire index build with a single scan of the table. Other transactions can still read the table, but if they try to insert, update, or delete rows in the table they will block until the @@ -430,11 +507,11 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] - PostgreSQL supports building indexes without locking + PostgreSQL supports building indexes without locking out writes. This method is invoked by specifying the - CONCURRENTLY option of CREATE INDEX. + CONCURRENTLY option of CREATE INDEX. When this option is used, - PostgreSQL must perform two scans of the table, and in + PostgreSQL must perform two scans of the table, and in addition it must wait for all existing transactions that could potentially modify or use the index to terminate. Thus this method requires more total work than a standard index build and takes @@ -450,9 +527,9 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] ) predating the second + that have a snapshot (see ) predating the second scan to terminate. Then finally the index can be marked ready for use, - and the CREATE INDEX command terminates. + and the CREATE INDEX command terminates. Even then, however, the index may not be immediately usable for queries: in the worst case, it cannot be used as long as transactions exist that predate the start of the index build. @@ -460,11 +537,11 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] If a problem arises while scanning the table, such as a deadlock or a - uniqueness violation in a unique index, the CREATE INDEX - command will fail but leave behind an invalid index. This index + uniqueness violation in a unique index, the CREATE INDEX + command will fail but leave behind an invalid index. This index will be ignored for querying purposes because it might be incomplete; - however it will still consume update overhead. The psql - \d command will report such an index as INVALID: + however it will still consume update overhead. The psql + \d command will report such an index as INVALID: postgres=# \d tab @@ -478,8 +555,8 @@ Indexes: The recommended recovery method in such cases is to drop the index and try again to perform - CREATE INDEX CONCURRENTLY. (Another possibility is to rebuild - the index with REINDEX. However, since REINDEX + CREATE INDEX CONCURRENTLY. (Another possibility is to rebuild + the index with REINDEX. However, since REINDEX does not support concurrent builds, this option is unlikely to seem attractive.) @@ -490,7 +567,7 @@ Indexes: when the second table scan begins. This means that constraint violations could be reported in other queries prior to the index becoming available for use, or even in cases where the index build eventually fails. Also, - if a failure does occur in the second scan, the invalid index + if a failure does occur in the second scan, the invalid index continues to enforce its uniqueness constraint afterwards. @@ -502,11 +579,12 @@ Indexes: Regular index builds permit other regular index builds on the - same table to occur in parallel, but only one concurrent index build - can occur on a table at a time. In both cases, no other types of schema - modification on the table are allowed meanwhile. Another difference - is that a regular CREATE INDEX command can be performed within - a transaction block, but CREATE INDEX CONCURRENTLY cannot. + same table to occur simultaneously, but only one concurrent index build + can occur on a table at a time. In either case, schema modification of the + table is not allowed while the index is being built. Another difference is + that a regular CREATE INDEX command can be performed + within a transaction block, but CREATE INDEX CONCURRENTLY + cannot. @@ -515,7 +593,7 @@ Indexes: Notes - See for information about when indexes can + See for information about when indexes can be used, when they are not used, and in which particular situations they can be useful. @@ -540,36 +618,116 @@ Indexes: ordering. For example, we might want to sort a complex-number data type either by absolute value or by real part. We could do this by defining two operator classes for the data type and then selecting - the proper class when making an index. More information about - operator classes is in and in . + the proper class when creating an index. More information about + operator classes is in and in . + + + + When CREATE INDEX is invoked on a partitioned + table, the default behavior is to recurse to all partitions to ensure + they all have matching indexes. + Each partition is first checked to determine whether an equivalent + index already exists, and if so, that index will become attached as a + partition index to the index being created, which will become its + parent index. + If no matching index exists, a new index will be created and + automatically attached; the name of the new index in each partition + will be determined as if no index name had been specified in the + command. + If the ONLY option is specified, no recursion + is done, and the index is marked invalid. + (ALTER INDEX ... ATTACH PARTITION marks the index + valid, once all partitions acquire matching indexes.) Note, however, + that any partition that is created in the future using + CREATE TABLE ... PARTITION OF will automatically + have a matching index, regardless of whether ONLY is + specified. For index methods that support ordered scans (currently, only B-tree), - the optional clauses ASC, DESC, NULLS - FIRST, and/or NULLS LAST can be specified to modify + the optional clauses ASC, DESC, NULLS + FIRST, and/or NULLS LAST can be specified to modify the sort ordering of the index. Since an ordered index can be scanned either forward or backward, it is not normally useful to create a - single-column DESC index — that sort ordering is already + single-column DESC index — that sort ordering is already available with a regular index. The value of these options is that multicolumn indexes can be created that match the sort ordering requested by a mixed-ordering query, such as SELECT ... ORDER BY x ASC, y - DESC. The NULLS options are useful if you need to support - nulls sort low behavior, rather than the default nulls - sort high, in queries that depend on indexes to avoid sorting steps. + DESC. The NULLS options are useful if you need to support + nulls sort low behavior, rather than the default nulls + sort high, in queries that depend on indexes to avoid sorting steps. For most index methods, the speed of creating an index is - dependent on the setting of . + dependent on the setting of . Larger values will reduce the time needed for index creation, so long as you don't make it larger than the amount of memory really available, which would drive the machine into swapping. - Use + PostgreSQL can build indexes while + leveraging multiple CPUs in order to process the table rows faster. + This feature is known as parallel index + build. For index methods that support building indexes + in parallel (currently, only B-tree), + maintenance_work_mem specifies the maximum + amount of memory that can be used by each index build operation as + a whole, regardless of how many worker processes were started. + Generally, a cost model automatically determines how many worker + processes should be requested, if any. + + + + Parallel index builds may benefit from increasing + maintenance_work_mem where an equivalent serial + index build will see little or no benefit. Note that + maintenance_work_mem may influence the number of + worker processes requested, since parallel workers must have at + least a 32MB share of the total + maintenance_work_mem budget. There must also be + a remaining 32MB share for the leader process. + Increasing + may allow more workers to be used, which will reduce the time + needed for index creation, so long as the index build is not + already I/O bound. Of course, there should also be sufficient + CPU capacity that would otherwise lie idle. + + + + Setting a value for parallel_workers via directly controls how many parallel + worker processes will be requested by a CREATE + INDEX against the table. This bypasses the cost model + completely, and prevents maintenance_work_mem + from affecting how many parallel workers are requested. Setting + parallel_workers to 0 via ALTER + TABLE will disable parallel index builds on the table in + all cases. + + + + + You might want to reset parallel_workers after + setting it as part of tuning an index build. This avoids + inadvertent changes to query plans, since + parallel_workers affects + all parallel table scans. + + + + + While CREATE INDEX with the + CONCURRENTLY option supports parallel builds + without special restrictions, only the first table scan is actually + performed in parallel. + + + + Use to remove an index. @@ -577,8 +735,8 @@ Indexes: Prior releases of PostgreSQL also had an R-tree index method. This method has been removed because it had no significant advantages over the GiST method. - If USING rtree is specified, CREATE INDEX - will interpret it as USING gist, to simplify conversion + If USING rtree is specified, CREATE INDEX + will interpret it as USING gist, to simplify conversion of old databases to GiST. @@ -587,7 +745,7 @@ Indexes: Examples - To create a B-tree index on the column title in + To create a unique B-tree index on the column title in the table films: CREATE UNIQUE INDEX title_idx ON films (title); @@ -595,13 +753,22 @@ CREATE UNIQUE INDEX title_idx ON films (title); - To create an index on the expression lower(title), + To create a unique B-tree index on the column title + with included columns director + and rating in the table films: + +CREATE UNIQUE INDEX title_idx ON films (title) INCLUDE (director, rating); + + + + + To create an index on the expression lower(title), allowing efficient case-insensitive searches: CREATE INDEX ON films ((lower(title))); (In this example we have chosen to omit the index name, so the system - will choose a name, typically films_lower_idx.) + will choose a name, typically films_lower_idx.) @@ -626,16 +793,16 @@ CREATE UNIQUE INDEX title_idx ON films (title) WITH (fillfactor = 70); - To create a GIN index with fast updates disabled: + To create a GIN index with fast updates disabled: CREATE INDEX gin_idx ON documents_table USING GIN (locations) WITH (fastupdate = off); - To create an index on the column code in the table - films and have the index reside in the tablespace - indexspace: + To create an index on the column code in the table + films and have the index reside in the tablespace + indexspace: CREATE INDEX code_idx ON films (code) TABLESPACE indexspace; @@ -675,8 +842,8 @@ CREATE INDEX CONCURRENTLY sales_quantity_index ON sales_table (quantity); See Also - - + + diff --git a/doc/src/sgml/ref/create_language.sgml b/doc/src/sgml/ref/create_language.sgml index 75165b677f..13b28b1ccc 100644 --- a/doc/src/sgml/ref/create_language.sgml +++ b/doc/src/sgml/ref/create_language.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_language.sgml PostgreSQL documentation --> - + CREATE LANGUAGE @@ -33,21 +33,21 @@ CREATE [ OR REPLACE ] [ TRUSTED ] [ PROCEDURAL ] LANGUAGE + languages have been made into extensions, and should + therefore be installed with not CREATE LANGUAGE. Direct use of CREATE LANGUAGE should now be confined to - extension installation scripts. If you have a bare + extension installation scripts. If you have a bare language in your database, perhaps as a result of an upgrade, you can convert it to an extension using - CREATE EXTENSION langname FROM + CREATE EXTENSION langname FROM unpackaged. @@ -55,7 +55,7 @@ CREATE [ OR REPLACE ] [ TRUSTED ] [ PROCEDURAL ] LANGUAGE + functions written in the language. Refer to for more information about language handlers. @@ -67,11 +67,11 @@ CREATE [ OR REPLACE ] [ TRUSTED ] [ PROCEDURAL ] LANGUAGE pg_pltemplate catalog and is marked - as allowed to be created by database owners (tmpldbacreate + as allowed to be created by database owners (tmpldbacreate is true). The default is that trusted languages can be created by database owners, but this can be adjusted by superusers by modifying the contents of pg_pltemplate. @@ -101,9 +101,9 @@ CREATE [ OR REPLACE ] [ TRUSTED ] [ PROCEDURAL ] LANGUAGE inline_handler is the name of a previously registered function that will be called to execute an anonymous code block - ( command) + ( command) in this language. If no inline_handler function is specified, the language does not support anonymous code blocks. The handler function must take one argument of - type internal, which will be the DO command's + type internal, which will be the DO command's internal representation, and it will typically return - void. The return value of the handler is ignored. + void. The return value of the handler is ignored. @@ -204,7 +204,7 @@ CREATE [ OR REPLACE ] [ TRUSTED ] [ PROCEDURAL ] LANGUAGE - The TRUSTED option and the support function name(s) are + The TRUSTED option and the support function name(s) are ignored if the server has an entry for the specified language - name in pg_pltemplate. + name in pg_pltemplate. @@ -230,12 +230,12 @@ CREATE [ OR REPLACE ] [ TRUSTED ] [ PROCEDURAL ] LANGUAGE to drop procedural languages. + Use to drop procedural languages. The system catalog pg_language (see ) records information about the + linkend="catalog-pg-language"/>) records information about the currently installed languages. Also, the psql command \dL lists the installed languages. @@ -243,7 +243,7 @@ CREATE [ OR REPLACE ] [ TRUSTED ] [ PROCEDURAL ] LANGUAGE pg_pltemplate. But when there is an entry, + in pg_pltemplate. But when there is an entry, the functions need not already exist; they will be automatically defined if not present in the database. - (This might result in CREATE LANGUAGE failing, if the + (This might result in CREATE LANGUAGE failing, if the shared library that implements the language is not available in the installation.) @@ -269,11 +269,11 @@ CREATE [ OR REPLACE ] [ TRUSTED ] [ PROCEDURAL ] LANGUAGE - - - - - + + + + + diff --git a/doc/src/sgml/ref/create_materialized_view.sgml b/doc/src/sgml/ref/create_materialized_view.sgml index a8fb84e7a7..eed4273c4b 100644 --- a/doc/src/sgml/ref/create_materialized_view.sgml +++ b/doc/src/sgml/ref/create_materialized_view.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_materialized_view.sgml PostgreSQL documentation --> - + CREATE MATERIALIZED VIEW @@ -23,8 +23,8 @@ PostgreSQL documentation CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] table_name [ (column_name [, ...] ) ] - [ WITH ( storage_parameter [= value] [, ... ] ) ] - [ TABLESPACE tablespace_name ] + [ WITH ( storage_parameter [= value] [, ... ] ) ] + [ TABLESPACE tablespace_name ] AS query [ WITH [ NO ] DATA ] @@ -36,13 +36,13 @@ CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] table_name CREATE MATERIALIZED VIEW defines a materialized view of a query. The query is executed and used to populate the view at the time - the command is issued (unless WITH NO DATA is used) and may be + the command is issued (unless WITH NO DATA is used) and may be refreshed later using REFRESH MATERIALIZED VIEW. CREATE MATERIALIZED VIEW is similar to - CREATE TABLE AS, except that it also remembers the query used + CREATE TABLE AS, except that it also remembers the query used to initialize the view, so that it can be refreshed later upon demand. A materialized view has many of the same properties as a table, but there is no support for temporary materialized views or automatic generation of @@ -55,7 +55,7 @@ CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] table_name - IF NOT EXISTS + IF NOT EXISTS Do not throw an error if a materialized view with the same name already @@ -87,27 +87,27 @@ CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] table_name - WITH ( storage_parameter [= value] [, ... ] ) + WITH ( storage_parameter [= value] [, ... ] ) This clause specifies optional storage parameters for the new materialized view; see for more + endterm="sql-createtable-storage-parameters-title"/> for more information. All parameters supported for CREATE TABLE are also supported for CREATE MATERIALIZED VIEW with the exception of OIDS. - See for more information. + See for more information. - TABLESPACE tablespace_name + TABLESPACE tablespace_name - The tablespace_name is the name + The tablespace_name is the name of the tablespace in which the new materialized view is to be created. - If not specified, is consulted. + If not specified, is consulted. @@ -116,8 +116,8 @@ CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] table_name query - A , TABLE, - or command. This query will run within a + A , TABLE, + or command. This query will run within a security-restricted operation; in particular, calls to functions that themselves create temporary tables will fail. @@ -125,13 +125,13 @@ CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] table_name - WITH [ NO ] DATA + WITH [ NO ] DATA This clause specifies whether or not the materialized view should be populated at creation time. If not, the materialized view will be flagged as unscannable and cannot be queried until REFRESH - MATERIALIZED VIEW is used. + MATERIALIZED VIEW is used. @@ -152,11 +152,11 @@ CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] table_name See Also - - - - - + + + + + diff --git a/doc/src/sgml/ref/create_opclass.sgml b/doc/src/sgml/ref/create_opclass.sgml index 829d8f2fff..dd5252fd97 100644 --- a/doc/src/sgml/ref/create_opclass.sgml +++ b/doc/src/sgml/ref/create_opclass.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_opclass.sgml PostgreSQL documentation --> - + CREATE OPERATOR CLASS @@ -37,8 +37,8 @@ CREATE OPERATOR CLASS name [ DEFAUL CREATE OPERATOR CLASS creates a new operator class. An operator class defines how a particular data type can be used with an index. The operator class specifies that certain operators will fill - particular roles or strategies for this data type and this - index method. The operator class also specifies the support procedures to + particular roles or strategies for this data type and this + index method. The operator class also specifies the support functions to be used by the index method when the operator class is selected for an index column. All the operators and functions used by an operator @@ -69,15 +69,15 @@ CREATE OPERATOR CLASS name [ DEFAUL Related operator classes can be grouped into operator - families. To add a new operator class to an existing family, - specify the FAMILY option in CREATE OPERATOR + families. To add a new operator class to an existing family, + specify the FAMILY option in CREATE OPERATOR CLASS. Without this option, the new class is placed into a family named the same as the new class (creating that family if it doesn't already exist). - Refer to for further information. + Refer to for further information. @@ -96,7 +96,7 @@ CREATE OPERATOR CLASS name [ DEFAUL - DEFAULT + DEFAULT If present, the operator class will become the default @@ -159,15 +159,15 @@ CREATE OPERATOR CLASS name [ DEFAUL op_type - In an OPERATOR clause, - the operand data type(s) of the operator, or NONE to + In an OPERATOR clause, + the operand data type(s) of the operator, or NONE to signify a left-unary or right-unary operator. The operand data types can be omitted in the normal case where they are the same as the operator class's data type. - In a FUNCTION clause, the operand data type(s) the + In a FUNCTION clause, the operand data type(s) the function is intended to support, if different from the input data type(s) of the function (for B-tree comparison functions and hash functions) @@ -175,7 +175,7 @@ CREATE OPERATOR CLASS name [ DEFAUL functions in GiST, SP-GiST, GIN and BRIN operator classes). These defaults are correct, and so op_type need not be specified in - FUNCTION clauses, except for the case of a B-tree sort + FUNCTION clauses, except for the case of a B-tree sort support function that is meant to support cross-data-type comparisons. @@ -191,8 +191,8 @@ CREATE OPERATOR CLASS name [ DEFAUL - If neither FOR SEARCH nor FOR ORDER BY is - specified, FOR SEARCH is the default. + If neither FOR SEARCH nor FOR ORDER BY is + specified, FOR SEARCH is the default. @@ -201,7 +201,7 @@ CREATE OPERATOR CLASS name [ DEFAUL support_number - The index method's support procedure number for a + The index method's support function number for a function associated with the operator class. @@ -212,7 +212,7 @@ CREATE OPERATOR CLASS name [ DEFAUL The name (optionally schema-qualified) of a function that is an - index method support procedure for the operator class. + index method support function for the operator class. @@ -233,11 +233,11 @@ CREATE OPERATOR CLASS name [ DEFAUL The data type actually stored in the index. Normally this is the same as the column data type, but some index methods (currently GiST, GIN and BRIN) allow it to be different. The - STORAGE clause must be omitted unless the index + STORAGE clause must be omitted unless the index method allows a different type to be used. - If the column data_type is specified - as anyarray, the storage_type - can be declared as anyelement to indicate that the index + If the column data_type is specified + as anyarray, the storage_type + can be declared as anyelement to indicate that the index entries are members of the element type belonging to the actual array type that each particular index is created for. @@ -246,7 +246,7 @@ CREATE OPERATOR CLASS name [ DEFAUL - The OPERATOR, FUNCTION, and STORAGE + The OPERATOR, FUNCTION, and STORAGE clauses can appear in any order. @@ -269,9 +269,9 @@ CREATE OPERATOR CLASS name [ DEFAUL - Before PostgreSQL 8.4, the OPERATOR - clause could include a RECHECK option. This is no longer - supported because whether an index operator is lossy is now + Before PostgreSQL 8.4, the OPERATOR + clause could include a RECHECK option. This is no longer + supported because whether an index operator is lossy is now determined on-the-fly at run time. This allows efficient handling of cases where an operator might or might not be lossy. @@ -282,8 +282,8 @@ CREATE OPERATOR CLASS name [ DEFAUL The following example command defines a GiST index operator class - for the data type _int4 (array of int4). See the - module for the complete example. + for the data type _int4 (array of int4). See the + module for the complete example. @@ -319,10 +319,10 @@ CREATE OPERATOR CLASS gist__int_ops See Also - - - - + + + + diff --git a/doc/src/sgml/ref/create_operator.sgml b/doc/src/sgml/ref/create_operator.sgml index 818e3a2315..d5c385c087 100644 --- a/doc/src/sgml/ref/create_operator.sgml +++ b/doc/src/sgml/ref/create_operator.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_operator.sgml PostgreSQL documentation --> - + CREATE OPERATOR @@ -22,7 +22,7 @@ PostgreSQL documentation CREATE OPERATOR name ( - PROCEDURE = function_name + {FUNCTION|PROCEDURE} = function_name [, LEFTARG = left_type ] [, RIGHTARG = right_type ] [, COMMUTATOR = com_op ] [, NEGATOR = neg_op ] [, RESTRICT = res_proc ] [, JOIN = join_proc ] @@ -43,7 +43,7 @@ CREATE OPERATOR name ( - The operator name is a sequence of up to NAMEDATALEN-1 + The operator name is a sequence of up to NAMEDATALEN-1 (63 by default) characters from the following list: + - * / < > = ~ ! @ # % ^ & | ` ? @@ -72,7 +72,7 @@ CREATE OPERATOR name ( - The use of => as an operator name is deprecated. It may + The use of => as an operator name is deprecated. It may be disallowed altogether in a future release. @@ -86,22 +86,30 @@ CREATE OPERATOR name ( - At least one of LEFTARG and RIGHTARG must be defined. For + At least one of LEFTARG and RIGHTARG must be defined. For binary operators, both must be defined. For right unary - operators, only LEFTARG should be defined, while for left - unary operators only RIGHTARG should be defined. + operators, only LEFTARG should be defined, while for left + unary operators only RIGHTARG should be defined. The function_name - procedure must have been previously defined using CREATE + function must have been previously defined using CREATE FUNCTION and must be defined to accept the correct number of arguments (either one or two) of the indicated types. + + In the syntax of CREATE OPERATOR, the keywords + FUNCTION and PROCEDURE are + equivalent, but the referenced function must in any case be a function, not + a procedure. The use of the keyword PROCEDURE here is + historical and deprecated. + + The other clauses specify optional operator optimization clauses. - Their meaning is detailed in . + Their meaning is detailed in . @@ -122,11 +130,11 @@ CREATE OPERATOR name ( The name of the operator to be defined. See above for allowable characters. The name can be schema-qualified, for example - CREATE OPERATOR myschema.+ (...). If not, then + CREATE OPERATOR myschema.+ (...). If not, then the operator is created in the current schema. Two operators in the same schema can have the same name if they operate on different data types. This is called - overloading. + overloading. @@ -218,7 +226,7 @@ CREATE OPERATOR name ( To give a schema-qualified operator name in com_op or the other optional - arguments, use the OPERATOR() syntax, for example: + arguments, use the OPERATOR() syntax, for example: COMMUTATOR = OPERATOR(myschema.===) , @@ -228,28 +236,28 @@ COMMUTATOR = OPERATOR(myschema.===) , Notes - Refer to for further information. + Refer to for further information. It is not possible to specify an operator's lexical precedence in - CREATE OPERATOR, because the parser's precedence behavior - is hard-wired. See for precedence details. + CREATE OPERATOR, because the parser's precedence behavior + is hard-wired. See for precedence details. - The obsolete options SORT1, SORT2, - LTCMP, and GTCMP were formerly used to + The obsolete options SORT1, SORT2, + LTCMP, and GTCMP were formerly used to specify the names of sort operators associated with a merge-joinable operator. This is no longer necessary, since information about associated operators is found by looking at B-tree operator families instead. If one of these options is given, it is ignored except - for implicitly setting MERGES true. + for implicitly setting MERGES true. - Use to delete user-defined operators - from a database. Use to modify operators in a + Use to delete user-defined operators + from a database. Use to modify operators in a database. @@ -264,11 +272,11 @@ COMMUTATOR = OPERATOR(myschema.===) , CREATE OPERATOR === ( LEFTARG = box, RIGHTARG = box, - PROCEDURE = area_equal_procedure, + FUNCTION = area_equal_function, COMMUTATOR = ===, NEGATOR = !==, - RESTRICT = area_restriction_procedure, - JOIN = area_join_procedure, + RESTRICT = area_restriction_function, + JOIN = area_join_function, HASHES, MERGES ); @@ -288,9 +296,9 @@ CREATE OPERATOR === ( See Also - - - + + + diff --git a/doc/src/sgml/ref/create_opfamily.sgml b/doc/src/sgml/ref/create_opfamily.sgml index c4bcf0863e..ba612c2f2b 100644 --- a/doc/src/sgml/ref/create_opfamily.sgml +++ b/doc/src/sgml/ref/create_opfamily.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_opfamily.sgml PostgreSQL documentation --> - + CREATE OPERATOR FAMILY @@ -35,7 +35,7 @@ CREATE OPERATOR FAMILY name USING < compatible with these operator classes but not essential for the functioning of any individual index. (Operators and functions that are essential to indexes should be grouped within the relevant operator - class, rather than being loose in the operator family. + class, rather than being loose in the operator family. Typically, single-data-type operators are bound to operator classes, while cross-data-type operators can be loose in an operator family containing operator classes for both data types.) @@ -45,7 +45,7 @@ CREATE OPERATOR FAMILY name USING < The new operator family is initially empty. It should be populated by issuing subsequent CREATE OPERATOR CLASS commands to add contained operator classes, and optionally - ALTER OPERATOR FAMILY commands to add loose + ALTER OPERATOR FAMILY commands to add loose operators and their corresponding support functions. @@ -64,7 +64,7 @@ CREATE OPERATOR FAMILY name USING < - Refer to for further information. + Refer to for further information. @@ -108,11 +108,11 @@ CREATE OPERATOR FAMILY name USING < See Also - - - - - + + + + + diff --git a/doc/src/sgml/ref/create_policy.sgml b/doc/src/sgml/ref/create_policy.sgml index c0dfe1ea4b..2e1229c4f9 100644 --- a/doc/src/sgml/ref/create_policy.sgml +++ b/doc/src/sgml/ref/create_policy.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_policy.sgml PostgreSQL documentation --> - + CREATE POLICY @@ -73,25 +73,25 @@ CREATE POLICY name ON Policies can be applied for specific commands or for specific roles. The default for newly created policies is that they apply for all commands and - roles, unless otherwise specified. If multiple policies apply to a given - statement, they will be combined using OR (although ON CONFLICT DO - UPDATE and INSERT policies are not combined in this way, but - rather enforced as noted at each stage of ON CONFLICT execution). + roles, unless otherwise specified. Multiple policies may apply to a single + command; see below for more details. + summarizes how the different types + of policy apply to specific commands. - For commands that can have both USING - and WITH CHECK policies (ALL + For policies that can have both USING + and WITH CHECK expressions (ALL and UPDATE), if no WITH CHECK - policy is defined, then the USING policy will be used - both for which rows are visible (normal USING case) - and for which rows will be allowed to be added (WITH - CHECK case). + expression is defined, then the USING expression will be + used both to determine which rows are visible (normal + USING case) and which new rows will be allowed to be + added (WITH CHECK case). If row-level security is enabled for a table, but no applicable policies - exist, a default deny policy is assumed, so that no rows will + exist, a default deny policy is assumed, so that no rows will be visible or updatable. @@ -144,6 +144,16 @@ CREATE POLICY name ON + + + Note that there needs to be at least one permissive policy to grant + access to records before restrictive policies can be usefully used to + reduce that access. If only restrictive policies exist, then no records + will be accessible. When a mix of permissive and restrictive policies + are present, a record is only accessible if at least one of the + permissive policies passes, in addition to all the restrictive + policies. + @@ -181,9 +191,9 @@ CREATE POLICY name ON SELECT), and will not be - available for modification (in an UPDATE - or DELETE). Such rows are silently suppressed; no error + visible to the user (in a SELECT), and will not be + available for modification (in an UPDATE + or DELETE). Such rows are silently suppressed; no error is reported. @@ -210,21 +220,20 @@ CREATE POLICY name ON - + Per-Command Policies - - ALL + + ALL Using ALL for a policy means that it will apply to all commands, regardless of the type of command. If an ALL policy exists and more specific policies exist, then both the ALL policy and the more - specific policy (or policies) will be combined using - OR, as usual for overlapping policies. + specific policy (or policies) will be applied. Additionally, ALL policies will be applied to both the selection side of a query and the modification side, using the USING expression for both cases if only @@ -247,8 +256,8 @@ CREATE POLICY name ON - - SELECT + + SELECT Using SELECT for a policy means that it will apply @@ -267,8 +276,8 @@ CREATE POLICY name ON - - INSERT + + INSERT Using INSERT for a policy means that it will apply @@ -288,16 +297,17 @@ CREATE POLICY name ON - - UPDATE + + UPDATE Using UPDATE for a policy means that it will apply - to UPDATE commands (or auxiliary ON - CONFLICT DO UPDATE clauses of INSERT - commands). Since UPDATE involves pulling an - existing record and then making changes to some portion (but - possibly not all) of the record, UPDATE + to UPDATE, SELECT FOR UPDATE + and SELECT FOR SHARE commands, as well as + auxiliary ON CONFLICT DO UPDATE clauses of + INSERT commands. Since UPDATE + involves pulling an existing record and replacing it with a new + modified record, UPDATE policies accept both a USING expression and a WITH CHECK expression. The USING expression determines which records @@ -306,22 +316,6 @@ CREATE POLICY name ON - - When an UPDATE command is used with a - WHERE clause or a RETURNING - clause, SELECT rights are also required on the - relation being updated and the appropriate SELECT - and ALL policies will be combined (using OR for any - overlapping SELECT related policies found) with the - USING clause of the UPDATE policy - using AND. Therefore, in order for a user to be able to - UPDATE specific rows, the user must have access - to the row(s) through a SELECT - or ALL policy and the row(s) must pass - the UPDATE policy's USING - expression. - - Any rows whose updated values do not pass the WITH CHECK expression will cause an error, and the @@ -331,27 +325,39 @@ CREATE POLICY name ON - Note, however, that INSERT with ON CONFLICT - DO UPDATE requires that an UPDATE policy - USING expression always be enforced as a - WITH CHECK expression. This - UPDATE policy must always pass when the - UPDATE path is taken. Any existing row that - necessitates that the UPDATE path be taken must - pass the (UPDATE or ALL) - USING qualifications (combined using OR), which - are always enforced as WITH CHECK - options in this context. (The UPDATE path will - never be silently avoided; an error will be thrown - instead.) Finally, the final row appended to the relation must pass - any WITH CHECK options that a conventional - UPDATE is required to pass. + Typically an UPDATE command also needs to read + data from columns in the relation being updated (e.g., in a + WHERE clause or a RETURNING + clause, or in an expression on the right hand side of the + SET clause). In this case, + SELECT rights are also required on the relation + being updated, and the appropriate SELECT or + ALL policies will be applied in addition to + the UPDATE policies. Thus the user must have + access to the row(s) being updated through a SELECT + or ALL policy in addition to being granted + permission to update the row(s) via an UPDATE + or ALL policy. + + + + When an INSERT command has an auxiliary + ON CONFLICT DO UPDATE clause, if the + UPDATE path is taken, the row to be updated is + first checked against the USING expressions of + any UPDATE policies, and then the new updated row + is checked against the WITH CHECK expressions. + Note, however, that unlike a standalone UPDATE + command, if the existing row does not pass the + USING expressions, an error will be thrown (the + UPDATE path will never be silently + avoided). - - DELETE + + DELETE Using DELETE for a policy means that it will apply @@ -364,19 +370,18 @@ CREATE POLICY name ON - When a DELETE command is used with a - WHERE clause or a RETURNING - clause, SELECT rights are also required on the - relation being updated and the appropriate SELECT - and ALL policies will be combined (using OR for any - overlapping SELECT related policies found) with the - USING clause of the DELETE policy - using AND. Therefore, in order for a user to be able to - DELETE specific rows, the user must have access - to the row(s) through a SELECT - or ALL policy and the row(s) must pass - the DELETE policy's USING - expression. + In most cases a DELETE command also needs to read + data from columns in the relation that it is deleting from (e.g., + in a WHERE clause or a + RETURNING clause). In this case, + SELECT rights are also required on the relation, + and the appropriate SELECT or + ALL policies will be applied in addition to + the DELETE policies. Thus the user must have + access to the row(s) being deleted through a SELECT + or ALL policy in addition to being granted + permission to delete the row(s) via a DELETE or + ALL policy. @@ -389,6 +394,171 @@ CREATE POLICY name ON + +
+ Policies Applied by Command Type + + + + + + + Command + SELECT/ALL policy + INSERT/ALL policy + UPDATE/ALL policy + DELETE/ALL policy + + + USING expression + WITH CHECK expression + USING expression + WITH CHECK expression + USING expression + + + + + SELECT + Existing row + + + + + + + SELECT FOR UPDATE/SHARE + Existing row + + Existing row + + + + + INSERT + + New row + + + + + + INSERT ... RETURNING + + New row + + If read access is required to the existing or new row (for example, + a WHERE or RETURNING clause + that refers to columns from the relation). + + + + New row + + + + + + UPDATE + + Existing & new rows + + + Existing row + New row + + + + DELETE + + Existing row + + + + + Existing row + + + ON CONFLICT DO UPDATE + Existing & new rows + + Existing row + New row + + + + +
+ + + + + Application of Multiple Policies + + + When multiple policies of different command types apply to the same command + (for example, SELECT and UPDATE + policies applied to an UPDATE command), then the user + must have both types of permissions (for example, permission to select rows + from the relation as well as permission to update them). Thus the + expressions for one type of policy are combined with the expressions for + the other type of policy using the AND operator. + + + + When multiple policies of the same command type apply to the same command, + then there must be at least one PERMISSIVE policy + granting access to the relation, and all of the + RESTRICTIVE policies must pass. Thus all the + PERMISSIVE policy expressions are combined using + OR, all the RESTRICTIVE policy + expressions are combined using AND, and the results are + combined using AND. If there are no + PERMISSIVE policies, then access is denied. + + + + Note that, for the purposes of combining multiple policies, + ALL policies are treated as having the same type as + whichever other type of policy is being applied. + + + + For example, in an UPDATE command requiring both + SELECT and UPDATE permissions, if + there are multiple applicable policies of each type, they will be combined + as follows: + + +expression from RESTRICTIVE SELECT/ALL policy 1 +AND +expression from RESTRICTIVE SELECT/ALL policy 2 +AND +... +AND +( + expression from PERMISSIVE SELECT/ALL policy 1 + OR + expression from PERMISSIVE SELECT/ALL policy 2 + OR + ... +) +AND +expression from RESTRICTIVE UPDATE/ALL policy 1 +AND +expression from RESTRICTIVE UPDATE/ALL policy 2 +AND +... +AND +( + expression from PERMISSIVE UPDATE/ALL policy 1 + OR + expression from PERMISSIVE UPDATE/ALL policy 2 + OR + ... +) + + @@ -418,16 +588,6 @@ CREATE POLICY name ON - - Note that there needs to be at least one permissive policy to grant - access to records before restrictive policies can be usefully used to - reduce that access. If only restrictive policies exist, then no records - will be accessible. When a mix of permissive and restrictive policies - are present, a record is only accessible if at least one of the - permissive policies passes, in addition to all the restrictive - policies. - - Generally, the system will enforce filter conditions imposed using security policies prior to qualifications that appear in user queries, @@ -453,7 +613,7 @@ CREATE POLICY name ON Additional discussion and practical examples can be found - in . + in . @@ -471,9 +631,9 @@ CREATE POLICY name ON See Also - - - + + + diff --git a/doc/src/sgml/ref/create_procedure.sgml b/doc/src/sgml/ref/create_procedure.sgml new file mode 100644 index 0000000000..6c1de34b01 --- /dev/null +++ b/doc/src/sgml/ref/create_procedure.sgml @@ -0,0 +1,357 @@ + + + + + CREATE PROCEDURE + + + + CREATE PROCEDURE + 7 + SQL - Language Statements + + + + CREATE PROCEDURE + define a new procedure + + + + +CREATE [ OR REPLACE ] PROCEDURE + name ( [ [ argmode ] [ argname ] argtype [ { DEFAULT | = } default_expr ] [, ...] ] ) + { LANGUAGE lang_name + | TRANSFORM { FOR TYPE type_name } [, ... ] + | [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER + | SET configuration_parameter { TO value | = value | FROM CURRENT } + | AS 'definition' + | AS 'obj_file', 'link_symbol' + } ... + + + + + Description + + + CREATE PROCEDURE defines a new procedure. + CREATE OR REPLACE PROCEDURE will either create a + new procedure, or replace an existing definition. + To be able to define a procedure, the user must have the + USAGE privilege on the language. + + + + If a schema name is included, then the procedure is created in the + specified schema. Otherwise it is created in the current schema. + The name of the new procedure must not match any existing procedure or function + with the same input argument types in the same schema. However, + procedures and functions of different argument types can share a name (this is + called overloading). + + + + To replace the current definition of an existing procedure, use + CREATE OR REPLACE PROCEDURE. It is not possible + to change the name or argument types of a procedure this way (if you + tried, you would actually be creating a new, distinct procedure). + + + + When CREATE OR REPLACE PROCEDURE is used to replace an + existing procedure, the ownership and permissions of the procedure + do not change. All other procedure properties are assigned the + values specified or implied in the command. You must own the procedure + to replace it (this includes being a member of the owning role). + + + + The user that creates the procedure becomes the owner of the procedure. + + + + To be able to create a procedure, you must have USAGE + privilege on the argument types. + + + + + Parameters + + + + name + + + + The name (optionally schema-qualified) of the procedure to create. + + + + + + argmode + + + + The mode of an argument: IN, + INOUT, or VARIADIC. If omitted, + the default is IN. (OUT + arguments are currently not supported for procedures. Use + INOUT instead.) + + + + + + argname + + + + The name of an argument. + + + + + + argtype + + + + The data type(s) of the procedure's arguments (optionally + schema-qualified), if any. The argument types can be base, composite, + or domain types, or can reference the type of a table column. + + + Depending on the implementation language it might also be allowed + to specify pseudo-types such as cstring. + Pseudo-types indicate that the actual argument type is either + incompletely specified, or outside the set of ordinary SQL data types. + + + The type of a column is referenced by writing + table_name.column_name%TYPE. + Using this feature can sometimes help make a procedure independent of + changes to the definition of a table. + + + + + + default_expr + + + + An expression to be used as default value if the parameter is + not specified. The expression has to be coercible to the + argument type of the parameter. + All input parameters following a + parameter with a default value must have default values as well. + + + + + + lang_name + + + + The name of the language that the procedure is implemented in. + It can be sql, c, + internal, or the name of a user-defined + procedural language, e.g. plpgsql. Enclosing the + name in single quotes is deprecated and requires matching case. + + + + + + TRANSFORM { FOR TYPE type_name } [, ... ] } + + + + Lists which transforms a call to the procedure should apply. Transforms + convert between SQL types and language-specific data types; + see . Procedural language + implementations usually have hardcoded knowledge of the built-in types, + so those don't need to be listed here. If a procedural language + implementation does not know how to handle a type and no transform is + supplied, it will fall back to a default behavior for converting data + types, but this depends on the implementation. + + + + + + EXTERNAL SECURITY INVOKER + EXTERNAL SECURITY DEFINER + + + SECURITY INVOKER indicates that the procedure + is to be executed with the privileges of the user that calls it. + That is the default. SECURITY DEFINER + specifies that the procedure is to be executed with the + privileges of the user that owns it. + + + + The key word EXTERNAL is allowed for SQL + conformance, but it is optional since, unlike in SQL, this feature + applies to all procedures not only external ones. + + + + A SECURITY DEFINER procedure cannot execute + transaction control statements (for example, COMMIT + and ROLLBACK, depending on the language). + + + + + + configuration_parameter + value + + + The SET clause causes the specified configuration + parameter to be set to the specified value when the procedure is + entered, and then restored to its prior value when the procedure exits. + SET FROM CURRENT saves the value of the parameter that + is current when CREATE PROCEDURE is executed as the value + to be applied when the procedure is entered. + + + + If a SET clause is attached to a procedure, then + the effects of a SET LOCAL command executed inside the + procedure for the same variable are restricted to the procedure: the + configuration parameter's prior value is still restored at procedure exit. + However, an ordinary + SET command (without LOCAL) overrides the + SET clause, much as it would do for a previous SET + LOCAL command: the effects of such a command will persist after + procedure exit, unless the current transaction is rolled back. + + + + If a SET clause is attached to a procedure, then + that procedure cannot execute transaction control statements (for + example, COMMIT and ROLLBACK, + depending on the language). + + + + See and + + for more information about allowed parameter names and values. + + + + + + definition + + + + A string constant defining the procedure; the meaning depends on the + language. It can be an internal procedure name, the path to an + object file, an SQL command, or text in a procedural language. + + + + It is often helpful to use dollar quoting (see ) to write the procedure definition + string, rather than the normal single quote syntax. Without dollar + quoting, any single quotes or backslashes in the procedure definition must + be escaped by doubling them. + + + + + + + obj_file, link_symbol + + + + This form of the AS clause is used for + dynamically loadable C language procedures when the procedure name + in the C language source code is not the same as the name of + the SQL procedure. The string obj_file is the name of the shared + library file containing the compiled C procedure, and is interpreted + as for the command. The string + link_symbol is the + procedure's link symbol, that is, the name of the procedure in the C + language source code. If the link symbol is omitted, it is assumed + to be the same as the name of the SQL procedure being defined. + + + + When repeated CREATE PROCEDURE calls refer to + the same object file, the file is only loaded once per session. + To unload and + reload the file (perhaps during development), start a new session. + + + + + + + + + Notes + + + See for more details on function + creation that also apply to procedures. + + + + Use to execute a procedure. + + + + + Examples + + +CREATE PROCEDURE insert_data(a integer, b integer) +LANGUAGE SQL +AS $$ +INSERT INTO tbl VALUES (a); +INSERT INTO tbl VALUES (b); +$$; + +CALL insert_data(1, 2); + + + + + Compatibility + + + A CREATE PROCEDURE command is defined in the SQL + standard. The PostgreSQL version is similar but + not fully compatible. For details see + also . + + + + + + See Also + + + + + + + + + + diff --git a/doc/src/sgml/ref/create_publication.sgml b/doc/src/sgml/ref/create_publication.sgml index 62a5fd432e..99f87ca393 100644 --- a/doc/src/sgml/ref/create_publication.sgml +++ b/doc/src/sgml/ref/create_publication.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_publication.sgml PostgreSQL documentation --> - + CREATE PUBLICATION @@ -41,7 +41,7 @@ CREATE PUBLICATION name A publication is essentially a group of tables whose data changes are intended to be replicated through logical replication. See - for details about how + for details about how publications fit into the logical replication setup. @@ -64,10 +64,10 @@ CREATE PUBLICATION name Specifies a list of tables to add to the publication. If - ONLY is specified before the table name, only - that table is added to the publication. If ONLY is not + ONLY is specified before the table name, only + that table is added to the publication. If ONLY is not specified, the table and all its descendant tables (if any) are added. - Optionally, * can be specified after the table name to + Optionally, * can be specified after the table name to explicitly indicate that descendant tables are included. @@ -106,10 +106,11 @@ CREATE PUBLICATION name This parameter determines which DML operations will be published by the new publication to the subscribers. The value is comma-separated list of operations. The allowed operations are - insert, update, and - delete. The default is to publish all actions, + insert, update, + delete, and truncate. + The default is to publish all actions, and so the default value for this option is - 'insert, update, delete'. + 'insert, update, delete, truncate'.
@@ -138,7 +139,7 @@ CREATE PUBLICATION name To create a publication, the invoking user must have the - CREATE privilege for the current database. + CREATE privilege for the current database. (Of course, superusers bypass this check.) @@ -151,12 +152,12 @@ CREATE PUBLICATION name The tables added to a publication that publishes UPDATE and/or DELETE operations must have - REPLICA IDENTITY defined. Otherwise those operations will be + REPLICA IDENTITY defined. Otherwise those operations will be disallowed on those tables. - For an INSERT ... ON CONFLICT command, the publication will + For an INSERT ... ON CONFLICT command, the publication will publish the operation that actually results from the command. So depending of the outcome, it may be published as either INSERT or UPDATE, or it may not be published at all. @@ -168,8 +169,7 @@ CREATE PUBLICATION name - TRUNCATE and DDL operations - are not published. + DDL operations are not published. @@ -203,7 +203,7 @@ CREATE PUBLICATION insert_only FOR TABLE mydata Compatibility - CREATE PUBLICATION is a PostgreSQL + CREATE PUBLICATION is a PostgreSQL extension. @@ -212,8 +212,8 @@ CREATE PUBLICATION insert_only FOR TABLE mydata See Also - - + + diff --git a/doc/src/sgml/ref/create_role.sgml b/doc/src/sgml/ref/create_role.sgml index 36772b678a..9c3b6978af 100644 --- a/doc/src/sgml/ref/create_role.sgml +++ b/doc/src/sgml/ref/create_role.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_role.sgml PostgreSQL documentation --> - + CREATE ROLE @@ -21,9 +21,9 @@ PostgreSQL documentation -CREATE ROLE name [ [ WITH ] option [ ... ] ] +CREATE ROLE name [ [ WITH ] option [ ... ] ] -where option can be: +where option can be: SUPERUSER | NOSUPERUSER | CREATEDB | NOCREATEDB @@ -32,15 +32,15 @@ CREATE ROLE name [ [ WITH ] connlimit
- | [ ENCRYPTED ] PASSWORD 'password' - | VALID UNTIL 'timestamp' - | IN ROLE role_name [, ...] - | IN GROUP role_name [, ...] - | ROLE role_name [, ...] - | ADMIN role_name [, ...] - | USER role_name [, ...] - | SYSID uid + | CONNECTION LIMIT connlimit + | [ ENCRYPTED ] PASSWORD 'password' + | VALID UNTIL 'timestamp' + | IN ROLE role_name [, ...] + | IN GROUP role_name [, ...] + | ROLE role_name [, ...] + | ADMIN role_name [, ...] + | USER role_name [, ...] + | SYSID uid @@ -51,11 +51,11 @@ CREATE ROLE name [ [ WITH ] CREATE ROLE adds a new role to a PostgreSQL database cluster. A role is an entity that can own database objects and have database privileges; - a role can be considered a user, a group, or both + a role can be considered a user, a group, or both depending on how it is used. Refer to - and for information about managing - users and authentication. You must have CREATEROLE + and for information about managing + users and authentication. You must have CREATEROLE privilege or be a database superuser to use this command. @@ -83,7 +83,7 @@ CREATE ROLE name [ [ WITH ] NOSUPERUSER - These clauses determine whether the new role is a superuser, + These clauses determine whether the new role is a superuser, who can override all access restrictions within the database. Superuser status is dangerous and should be used only when really needed. You must yourself be a superuser to create a new superuser. @@ -94,8 +94,8 @@ CREATE ROLE name [ [ WITH ] - CREATEDB - NOCREATEDB + CREATEDB + NOCREATEDB These clauses define a role's ability to create databases. If @@ -128,13 +128,13 @@ CREATE ROLE name [ [ WITH ] NOINHERIT - These clauses determine whether a role inherits the + These clauses determine whether a role inherits the privileges of roles it is a member of. A role with the INHERIT attribute can automatically use whatever database privileges have been granted to all roles it is directly or indirectly a member of. Without INHERIT, membership in another role - only grants the ability to SET ROLE to that other role; + only grants the ability to SET ROLE to that other role; the privileges of the other role are only available after having done so. If not specified, @@ -156,8 +156,8 @@ CREATE ROLE name [ [ WITH ] NOLOGIN is the default, except when - CREATE ROLE is invoked through its alternative spelling - . + CREATE ROLE is invoked through its alternative spelling + . @@ -172,7 +172,7 @@ CREATE ROLE name [ [ WITH ] REPLICATION attribute is a very + A role having the REPLICATION attribute is a very highly privileged role, and should only be used on roles actually used for replication. If not specified, NOREPLICATION is the default. @@ -210,7 +210,7 @@ CREATE ROLE name [ [ WITH ] - [ ENCRYPTED ] PASSWORD password + [ ENCRYPTED ] PASSWORD password Sets the role's password. (A password is only of use for @@ -225,7 +225,7 @@ CREATE ROLE name [ [ WITH ] Specifying an empty string will also set the password to null, - but that was not the case before PostgreSQL + but that was not the case before PostgreSQL version 10. In earlier versions, an empty string could be used, or not, depending on the authentication method and the exact version, and libpq would refuse to use it in any case. @@ -235,12 +235,12 @@ CREATE ROLE name [ [ WITH ] The password is always stored encrypted in the system catalogs. The - ENCRYPTED keyword has no effect, but is accepted for + ENCRYPTED keyword has no effect, but is accepted for backwards compatibility. The method of encryption is determined - by the configuration parameter . + by the configuration parameter . If the presented password string is already in MD5-encrypted or SCRAM-encrypted format, then it is stored as-is regardless of - password_encryption (since the system cannot decrypt + password_encryption (since the system cannot decrypt the specified encrypted password string, to encrypt it in a different format). This allows reloading of encrypted passwords during dump/restore. @@ -260,61 +260,61 @@ CREATE ROLE name [ [ WITH ] - IN ROLE role_name + IN ROLE role_name The IN ROLE clause lists one or more existing roles to which the new role will be immediately added as a new member. (Note that there is no option to add the new role as an - administrator; use a separate GRANT command to do that.) + administrator; use a separate GRANT command to do that.) - IN GROUP role_name + IN GROUP role_name IN GROUP is an obsolete spelling of - IN ROLE. + IN ROLE. - ROLE role_name + ROLE role_name The ROLE clause lists one or more existing roles which are automatically added as members of the new role. - (This in effect makes the new role a group.) + (This in effect makes the new role a group.) - ADMIN role_name + ADMIN role_name The ADMIN clause is like ROLE, but the named roles are added to the new role WITH ADMIN - OPTION, giving them the right to grant membership in this role + OPTION, giving them the right to grant membership in this role to others. - USER role_name + USER role_name The USER clause is an obsolete spelling of - the ROLE clause. + the ROLE clause. - SYSID uid + SYSID uid The SYSID clause is ignored, but is accepted @@ -329,71 +329,71 @@ CREATE ROLE name [ [ WITH ] Notes - Use to - change the attributes of a role, and + Use to + change the attributes of a role, and to remove a role. All the attributes - specified by CREATE ROLE can be modified by later - ALTER ROLE commands. + specified by CREATE ROLE can be modified by later + ALTER ROLE commands. The preferred way to add and remove members of roles that are being used as groups is to use - and - . + and + . - The VALID UNTIL clause defines an expiration time for a - password only, not for the role per se. In + The VALID UNTIL clause defines an expiration time for a + password only, not for the role per se. In particular, the expiration time is not enforced when logging in using a non-password-based authentication method. - The INHERIT attribute governs inheritance of grantable + The INHERIT attribute governs inheritance of grantable privileges (that is, access privileges for database objects and role memberships). It does not apply to the special role attributes set by - CREATE ROLE and ALTER ROLE. For example, being - a member of a role with CREATEDB privilege does not immediately - grant the ability to create databases, even if INHERIT is set; + CREATE ROLE and ALTER ROLE. For example, being + a member of a role with CREATEDB privilege does not immediately + grant the ability to create databases, even if INHERIT is set; it would be necessary to become that role via - before + before creating a database. - The INHERIT attribute is the default for reasons of backwards + The INHERIT attribute is the default for reasons of backwards compatibility: in prior releases of PostgreSQL, users always had access to all privileges of groups they were members of. - However, NOINHERIT provides a closer match to the semantics + However, NOINHERIT provides a closer match to the semantics specified in the SQL standard. - Be careful with the CREATEROLE privilege. There is no concept of - inheritance for the privileges of a CREATEROLE-role. That + Be careful with the CREATEROLE privilege. There is no concept of + inheritance for the privileges of a CREATEROLE-role. That means that even if a role does not have a certain privilege but is allowed to create other roles, it can easily create another role with different privileges than its own (except for creating roles with superuser - privileges). For example, if the role user has the - CREATEROLE privilege but not the CREATEDB privilege, - nonetheless it can create a new role with the CREATEDB - privilege. Therefore, regard roles that have the CREATEROLE + privileges). For example, if the role user has the + CREATEROLE privilege but not the CREATEDB privilege, + nonetheless it can create a new role with the CREATEDB + privilege. Therefore, regard roles that have the CREATEROLE privilege as almost-superuser-roles. PostgreSQL includes a program that has + linkend="app-createuser"/> that has the same functionality as CREATE ROLE (in fact, it calls this command) but can be run from the command shell. - The CONNECTION LIMIT option is only enforced approximately; + The CONNECTION LIMIT option is only enforced approximately; if two new sessions start at about the same time when just one - connection slot remains for the role, it is possible that + connection slot remains for the role, it is possible that both will fail. Also, the limit is never enforced for superusers. @@ -402,8 +402,8 @@ CREATE ROLE name [ [ WITH ] , however, transmits - the password encrypted. Also, + linkend="app-createuser"/>, however, transmits + the password encrypted. Also, contains a command \password that can be used to safely change the password later. @@ -425,8 +425,8 @@ CREATE ROLE jonathan LOGIN; CREATE USER davide WITH PASSWORD 'jw8s0F4'; - (CREATE USER is the same as CREATE ROLE except - that it implies LOGIN.) + (CREATE USER is the same as CREATE ROLE except + that it implies LOGIN.) @@ -453,7 +453,7 @@ CREATE ROLE admin WITH CREATEDB CREATEROLE; The CREATE ROLE statement is in the SQL standard, but the standard only requires the syntax -CREATE ROLE name [ WITH ADMIN role_name ] +CREATE ROLE name [ WITH ADMIN role_name ] Multiple initial administrators, and all the other options of CREATE ROLE, are @@ -471,8 +471,8 @@ CREATE ROLE name [ WITH ADMIN The behavior specified by the SQL standard is most closely approximated - by giving users the NOINHERIT attribute, while roles are - given the INHERIT attribute. + by giving users the NOINHERIT attribute, while roles are + given the INHERIT attribute. @@ -480,12 +480,12 @@ CREATE ROLE name [ WITH ADMIN See Also - - - - - - + + + + + + diff --git a/doc/src/sgml/ref/create_rule.sgml b/doc/src/sgml/ref/create_rule.sgml index 53fdf56621..dbf4c93784 100644 --- a/doc/src/sgml/ref/create_rule.sgml +++ b/doc/src/sgml/ref/create_rule.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_rule.sgml PostgreSQL documentation --> - + CREATE RULE @@ -55,7 +55,7 @@ CREATE [ OR REPLACE ] RULE name AS transformation happens before the execution of the command starts. If you actually want an operation that fires independently for each physical row, you probably want to use a trigger, not a rule. - More information about the rules system is in . + More information about the rules system is in . @@ -76,13 +76,13 @@ CREATE [ OR REPLACE ] RULE name AS ON DELETE rules (or any subset of those that's sufficient for your purposes) to replace update actions on the view with appropriate updates on other tables. If you want to support - INSERT RETURNING and so on, then be sure to put a suitable - RETURNING clause into each of these rules. + INSERT RETURNING and so on, then be sure to put a suitable + RETURNING clause into each of these rules. There is a catch if you try to use conditional rules for complex view - updates: there must be an unconditional + updates: there must be an unconditional INSTEAD rule for each action you wish to allow on the view. If the rule is conditional, or is not INSTEAD, then the system will still reject @@ -95,21 +95,21 @@ CREATE [ OR REPLACE ] RULE name AS Then make the conditional rules non-INSTEAD; in the cases where they are applied, they add to the default INSTEAD NOTHING action. (This method does not - currently work to support RETURNING queries, however.) + currently work to support RETURNING queries, however.) A view that is simple enough to be automatically updatable (see ) does not require a user-created rule in + linkend="sql-createview"/>) does not require a user-created rule in order to be updatable. While you can create an explicit rule anyway, the automatic update transformation will generally outperform an explicit rule. - Another alternative worth considering is to use INSTEAD OF - triggers (see ) in place of rules. + Another alternative worth considering is to use INSTEAD OF + triggers (see ) in place of rules. @@ -161,7 +161,7 @@ CREATE [ OR REPLACE ] RULE name AS Any SQL conditional expression (returning boolean). The condition expression cannot refer - to any tables except NEW and OLD, and + to any tables except NEW and OLD, and cannot contain aggregate functions. @@ -171,7 +171,7 @@ CREATE [ OR REPLACE ] RULE name AS INSTEAD indicates that the commands should be - executed instead of the original command. + executed instead of the original command. @@ -227,19 +227,19 @@ CREATE [ OR REPLACE ] RULE name AS In a rule for INSERT, UPDATE, or - DELETE on a view, you can add a RETURNING + DELETE on a view, you can add a RETURNING clause that emits the view's columns. This clause will be used to compute - the outputs if the rule is triggered by an INSERT RETURNING, - UPDATE RETURNING, or DELETE RETURNING command + the outputs if the rule is triggered by an INSERT RETURNING, + UPDATE RETURNING, or DELETE RETURNING command respectively. When the rule is triggered by a command without - RETURNING, the rule's RETURNING clause will be + RETURNING, the rule's RETURNING clause will be ignored. The current implementation allows only unconditional - INSTEAD rules to contain RETURNING; furthermore - there can be at most one RETURNING clause among all the rules + INSTEAD rules to contain RETURNING; furthermore + there can be at most one RETURNING clause among all the rules for the same event. (This ensures that there is only one candidate - RETURNING clause to be used to compute the results.) - RETURNING queries on the view will be rejected if - there is no RETURNING clause in any available rule. + RETURNING clause to be used to compute the results.) + RETURNING queries on the view will be rejected if + there is no RETURNING clause in any available rule. @@ -297,8 +297,8 @@ UPDATE mytable SET name = 'foo' WHERE id = 42; See Also - - + + diff --git a/doc/src/sgml/ref/create_schema.sgml b/doc/src/sgml/ref/create_schema.sgml index 554a4483c5..ffbe1ba3bc 100644 --- a/doc/src/sgml/ref/create_schema.sgml +++ b/doc/src/sgml/ref/create_schema.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_schema.sgml PostgreSQL documentation --> - + CREATE SCHEMA @@ -21,14 +21,14 @@ PostgreSQL documentation -CREATE SCHEMA schema_name [ AUTHORIZATION role_specification ] [ schema_element [ ... ] ] -CREATE SCHEMA AUTHORIZATION role_specification [ schema_element [ ... ] ] -CREATE SCHEMA IF NOT EXISTS schema_name [ AUTHORIZATION role_specification ] -CREATE SCHEMA IF NOT EXISTS AUTHORIZATION role_specification +CREATE SCHEMA schema_name [ AUTHORIZATION role_specification ] [ schema_element [ ... ] ] +CREATE SCHEMA AUTHORIZATION role_specification [ schema_element [ ... ] ] +CREATE SCHEMA IF NOT EXISTS schema_name [ AUTHORIZATION role_specification ] +CREATE SCHEMA IF NOT EXISTS AUTHORIZATION role_specification -where role_specification can be: +where role_specification can be: - [ GROUP ] user_name + user_name | CURRENT_USER | SESSION_USER @@ -48,9 +48,9 @@ CREATE SCHEMA IF NOT EXISTS AUTHORIZATION role_sp A schema is essentially a namespace: it contains named objects (tables, data types, functions, and operators) whose names can duplicate those of other objects existing in other - schemas. Named objects are accessed either by qualifying + schemas. Named objects are accessed either by qualifying their names with the schema name as a prefix, or by setting a search - path that includes the desired schema(s). A CREATE command + path that includes the desired schema(s). A CREATE command specifying an unqualified object name creates the object in the current schema (the one at the front of the search path, which can be determined with the function current_schema). @@ -60,7 +60,7 @@ CREATE SCHEMA IF NOT EXISTS AUTHORIZATION role_sp Optionally, CREATE SCHEMA can include subcommands to create objects within the new schema. The subcommands are treated essentially the same as separate commands issued after creating the - schema, except that if the AUTHORIZATION clause is used, + schema, except that if the AUTHORIZATION clause is used, all the created objects will be owned by that user. @@ -100,10 +100,10 @@ CREATE SCHEMA IF NOT EXISTS AUTHORIZATION role_sp An SQL statement defining an object to be created within the schema. Currently, only CREATE - TABLE, CREATE VIEW, CREATE - INDEX, CREATE SEQUENCE, CREATE - TRIGGER and GRANT are accepted as clauses - within CREATE SCHEMA. Other kinds of objects may + TABLE, CREATE VIEW, CREATE + INDEX, CREATE SEQUENCE, CREATE + TRIGGER and GRANT are accepted as clauses + within CREATE SCHEMA. Other kinds of objects may be created in separate commands after the schema is created. @@ -114,7 +114,7 @@ CREATE SCHEMA IF NOT EXISTS AUTHORIZATION role_sp Do nothing (except issuing a notice) if a schema with the same name - already exists. schema_element + already exists. schema_element subcommands cannot be included when this option is used. @@ -127,7 +127,7 @@ CREATE SCHEMA IF NOT EXISTS AUTHORIZATION role_sp To create a schema, the invoking user must have the - CREATE privilege for the current database. + CREATE privilege for the current database. (Of course, superusers bypass this check.) @@ -143,17 +143,17 @@ CREATE SCHEMA myschema; - Create a schema for user joe; the schema will also be - named joe: + Create a schema for user joe; the schema will also be + named joe: CREATE SCHEMA AUTHORIZATION joe; - Create a schema named test that will be owned by user - joe, unless there already is a schema named test. - (It does not matter whether joe owns the pre-existing schema.) + Create a schema named test that will be owned by user + joe, unless there already is a schema named test. + (It does not matter whether joe owns the pre-existing schema.) CREATE SCHEMA IF NOT EXISTS test AUTHORIZATION joe; @@ -185,7 +185,7 @@ CREATE VIEW hollywood.winners AS Compatibility - The SQL standard allows a DEFAULT CHARACTER SET clause + The SQL standard allows a DEFAULT CHARACTER SET clause in CREATE SCHEMA, as well as more subcommand types than are presently accepted by PostgreSQL. @@ -205,7 +205,7 @@ CREATE VIEW hollywood.winners AS all objects within it. PostgreSQL allows schemas to contain objects owned by users other than the schema owner. This can happen only if the schema owner grants the - CREATE privilege on their schema to someone else, or a + CREATE privilege on their schema to someone else, or a superuser chooses to create objects in it. @@ -219,8 +219,8 @@ CREATE VIEW hollywood.winners AS See Also - - + + diff --git a/doc/src/sgml/ref/create_sequence.sgml b/doc/src/sgml/ref/create_sequence.sgml index f1448e7ab3..3e0d339c85 100644 --- a/doc/src/sgml/ref/create_sequence.sgml +++ b/doc/src/sgml/ref/create_sequence.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_sequence.sgml PostgreSQL documentation --> - + CREATE SEQUENCE @@ -56,7 +56,7 @@ CREATE [ TEMPORARY | TEMP ] SEQUENCE [ IF NOT EXISTS ] . + . @@ -67,10 +67,10 @@ SELECT * FROM name; to examine the parameters and current state of a sequence. In particular, - the last_value field of the sequence shows the last value + the last_value field of the sequence shows the last value allocated by any session. (Of course, this value might be obsolete by the time it's printed, if other sessions are actively doing - nextval calls.) + nextval calls.) @@ -119,7 +119,7 @@ SELECT * FROM name; The optional clause AS data_type specifies the data type of the sequence. Valid types are - are smallint, integer, + smallint, integer, and bigint. bigint is the default. The data type determines the default minimum and maximum values of the sequence. @@ -250,14 +250,14 @@ SELECT * FROM name; - Sequences are based on bigint arithmetic, so the range + Sequences are based on bigint arithmetic, so the range cannot exceed the range of an eight-byte integer (-9223372036854775808 to 9223372036854775807). - Because nextval and setval calls are never - rolled back, sequence objects cannot be used if gapless + Because nextval and setval calls are never + rolled back, sequence objects cannot be used if gapless assignment of sequence numbers is needed. It is possible to build gapless assignment by using exclusive locking of a table containing a counter; but this solution is much more expensive than sequence @@ -271,9 +271,9 @@ SELECT * FROM name; used for a sequence object that will be used concurrently by multiple sessions. Each session will allocate and cache successive sequence values during one access to the sequence object and - increase the sequence object's last_value accordingly. + increase the sequence object's last_value accordingly. Then, the next cache-1 - uses of nextval within that session simply return the + uses of nextval within that session simply return the preallocated values without touching the sequence object. So, any numbers allocated but not used within a session will be lost when that session ends, resulting in holes in the @@ -290,18 +290,18 @@ SELECT * FROM name; 11..20 and return nextval=11 before session A has generated nextval=2. Thus, with a cache setting of one - it is safe to assume that nextval values are generated + it is safe to assume that nextval values are generated sequentially; with a cache setting greater than one you - should only assume that the nextval values are all + should only assume that the nextval values are all distinct, not that they are generated purely sequentially. Also, - last_value will reflect the latest value reserved by + last_value will reflect the latest value reserved by any session, whether or not it has yet been returned by - nextval. + nextval. - Another consideration is that a setval executed on + Another consideration is that a setval executed on such a sequence will not be noticed by other sessions until they have used up any preallocated values they have cached. @@ -365,14 +365,14 @@ END; - Obtaining the next value is done using the nextval() + Obtaining the next value is done using the nextval() function instead of the standard's NEXT VALUE FOR expression. - The OWNED BY clause is a PostgreSQL + The OWNED BY clause is a PostgreSQL extension. @@ -383,8 +383,8 @@ END; See Also - - + + diff --git a/doc/src/sgml/ref/create_server.sgml b/doc/src/sgml/ref/create_server.sgml index 7318481487..af0a7a06fd 100644 --- a/doc/src/sgml/ref/create_server.sgml +++ b/doc/src/sgml/ref/create_server.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_server.sgml PostgreSQL documentation --> - + CREATE SERVER @@ -21,9 +21,9 @@ PostgreSQL documentation -CREATE SERVER [IF NOT EXISTS] server_name [ TYPE 'server_type' ] [ VERSION 'server_version' ] +CREATE SERVER [ IF NOT EXISTS ] server_name [ TYPE 'server_type' ] [ VERSION 'server_version' ] FOREIGN DATA WRAPPER fdw_name - [ OPTIONS ( option 'value' [, ... ] ) ] + [ OPTIONS ( option 'value' [, ... ] ) ] @@ -47,7 +47,7 @@ CREATE SERVER [IF NOT EXISTS] server_name - Creating a server requires USAGE privilege on the + Creating a server requires USAGE privilege on the foreign-data wrapper being used. @@ -57,7 +57,7 @@ CREATE SERVER [IF NOT EXISTS] server_name - IF NOT EXISTS + IF NOT EXISTS Do not throw an error if a server with the same name already exists. @@ -105,7 +105,7 @@ CREATE SERVER [IF NOT EXISTS] server_name - OPTIONS ( option 'value' [, ... ] ) + OPTIONS ( option 'value' [, ... ] ) This clause specifies the options for the server. The options @@ -122,9 +122,9 @@ CREATE SERVER [IF NOT EXISTS] server_nameNotes - When using the module, + When using the module, a foreign server's name can be used - as an argument of the + as an argument of the function to indicate the connection parameters. It is necessary to have the USAGE privilege on the foreign server to be able to use it in this way. @@ -135,12 +135,12 @@ CREATE SERVER [IF NOT EXISTS] server_nameExamples - Create a server myserver that uses the - foreign-data wrapper postgres_fdw: + Create a server myserver that uses the + foreign-data wrapper postgres_fdw: CREATE SERVER myserver FOREIGN DATA WRAPPER postgres_fdw OPTIONS (host 'foo', dbname 'foodb', port '5432'); - See for more details. + See for more details. @@ -156,11 +156,11 @@ CREATE SERVER myserver FOREIGN DATA WRAPPER postgres_fdw OPTIONS (host 'foo', db See Also - - - - - + + + + + diff --git a/doc/src/sgml/ref/create_statistics.sgml b/doc/src/sgml/ref/create_statistics.sgml index deda21fec7..539f5bded5 100644 --- a/doc/src/sgml/ref/create_statistics.sgml +++ b/doc/src/sgml/ref/create_statistics.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_statistics.sgml PostgreSQL documentation --> - + CREATE STATISTICS @@ -21,15 +21,15 @@ PostgreSQL documentation -CREATE STATISTICS [ IF NOT EXISTS ] statistics_name - [ ( statistic_type [, ... ] ) ] - ON column_name, column_name [, ...] - FROM table_name +CREATE STATISTICS [ IF NOT EXISTS ] statistics_name + [ ( statistics_kind [, ... ] ) ] + ON column_name, column_name [, ...] + FROM table_name - + Description @@ -41,7 +41,7 @@ CREATE STATISTICS [ IF NOT EXISTS ] statistics_na If a schema name is given (for example, CREATE STATISTICS - myschema.mystat ...) then the statistics object is created in the + myschema.mystat ...) then the statistics object is created in the specified schema. Otherwise it is created in the current schema. The name of the statistics object must be distinct from the name of any other statistics object in the same schema. @@ -54,7 +54,7 @@ CREATE STATISTICS [ IF NOT EXISTS ] statistics_na - IF NOT EXISTS + IF NOT EXISTS Do not throw an error if a statistics object with the same name already @@ -66,7 +66,7 @@ CREATE STATISTICS [ IF NOT EXISTS ] statistics_na - statistics_name + statistics_name The name (optionally schema-qualified) of the statistics object to be @@ -76,24 +76,24 @@ CREATE STATISTICS [ IF NOT EXISTS ] statistics_na - statistic_type + statistics_kind - A statistic type to be computed in this statistics object. - Currently supported types are + A statistics kind to be computed in this statistics object. + Currently supported kinds are ndistinct, which enables n-distinct statistics, and dependencies, which enables functional dependency statistics. - If this clause is omitted, all supported statistic types are + If this clause is omitted, all supported statistics kinds are included in the statistics object. - For more information, see - and . + For more information, see + and . - column_name + column_name The name of a table column to be covered by the computed statistics. @@ -103,7 +103,7 @@ CREATE STATISTICS [ IF NOT EXISTS ] statistics_na - table_name + table_name The name (optionally schema-qualified) of the table containing the @@ -125,11 +125,11 @@ CREATE STATISTICS [ IF NOT EXISTS ] statistics_na - + Examples - Create table t1 with two functionally dependent columns, i.e. + Create table t1 with two functionally dependent columns, i.e. knowledge of a value in the first column is sufficient for determining the value in the other column. Then functional dependency statistics are built on those columns: @@ -157,11 +157,11 @@ EXPLAIN ANALYZE SELECT * FROM t1 WHERE (a = 1) AND (b = 0); Without functional-dependency statistics, the planner would assume - that the two WHERE conditions are independent, and would + that the two WHERE conditions are independent, and would multiply their selectivities together to arrive at a much-too-small row count estimate. - With such statistics, the planner recognizes that the WHERE - conditions are redundant and does not underestimate the rowcount. + With such statistics, the planner recognizes that the WHERE + conditions are redundant and does not underestimate the row count. @@ -178,8 +178,8 @@ EXPLAIN ANALYZE SELECT * FROM t1 WHERE (a = 1) AND (b = 0); See Also - - + + diff --git a/doc/src/sgml/ref/create_subscription.sgml b/doc/src/sgml/ref/create_subscription.sgml index 9f45b6f574..1a90c244fb 100644 --- a/doc/src/sgml/ref/create_subscription.sgml +++ b/doc/src/sgml/ref/create_subscription.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_subscription.sgml PostgreSQL documentation --> - + CREATE SUBSCRIPTION @@ -21,9 +21,9 @@ PostgreSQL documentation -CREATE SUBSCRIPTION subscription_name - CONNECTION 'conninfo' - PUBLICATION publication_name [, ...] +CREATE SUBSCRIPTION subscription_name + CONNECTION 'conninfo' + PUBLICATION publication_name [, ...] [ WITH ( subscription_parameter [= value] [, ... ] ) ] @@ -49,14 +49,9 @@ CREATE SUBSCRIPTION subscription_name - CREATE SUBSCRIPTION cannot be executed inside a - transaction block when the parameter create_slot is specified. - - - - Additional info about subscriptions and logical replication as a whole - can is available at and - . + Additional information about subscriptions and logical replication as a + whole is available at and + . @@ -79,7 +74,7 @@ CREATE SUBSCRIPTION subscription_name The connection string to the publisher. For details - see . + see . @@ -157,7 +152,7 @@ CREATE SUBSCRIPTION subscription_name The value of this parameter overrides the - setting. The default + setting. The default value is off. @@ -206,7 +201,7 @@ CREATE SUBSCRIPTION subscription_namefalse, the tables are not subscribed, and so after you enable the subscription nothing will be replicated. It is required to run - ALTER SUBSCRIPTION ... REFRESH PUBLICATION in order + ALTER SUBSCRIPTION ... REFRESH PUBLICATION in order for tables to be subscribed. @@ -222,11 +217,16 @@ CREATE SUBSCRIPTION subscription_nameNotes - See for details on + See for details on how to configure access control between the subscription and the publication instance. + + When creating a replication slot (the default behavior), CREATE + SUBSCRIPTION cannot be executed inside a transaction block. + + Creating a subscription that connects to the same database cluster (for example, to replicate between databases in the same cluster or to replicate @@ -272,7 +272,7 @@ CREATE SUBSCRIPTION mysub Compatibility - CREATE SUBSCRIPTION is a PostgreSQL + CREATE SUBSCRIPTION is a PostgreSQL extension. @@ -281,10 +281,10 @@ CREATE SUBSCRIPTION mysub See Also - - - - + + + + diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml index e9c2c49533..4b9c8a7801 100644 --- a/doc/src/sgml/ref/create_table.sgml +++ b/doc/src/sgml/ref/create_table.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_table.sgml PostgreSQL documentation --> - + CREATE TABLE @@ -21,88 +21,90 @@ PostgreSQL documentation -CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name ( [ - { column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ] +CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name ( [ + { column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ] | table_constraint | LIKE source_table [ like_option ... ] } [, ... ] ] ) [ INHERITS ( parent_table [, ... ] ) ] -[ PARTITION BY { RANGE | LIST } ( { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [, ... ] ) ] -[ WITH ( storage_parameter [= value] [, ... ] ) | WITH OIDS | WITHOUT OIDS ] +[ PARTITION BY { RANGE | LIST | HASH } ( { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [, ... ] ) ] +[ WITH ( storage_parameter [= value] [, ... ] ) | WITH OIDS | WITHOUT OIDS ] [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ] -[ TABLESPACE tablespace_name ] +[ TABLESPACE tablespace_name ] -CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name - OF type_name [ ( - { column_name [ WITH OPTIONS ] [ column_constraint [ ... ] ] +CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name + OF type_name [ ( + { column_name [ WITH OPTIONS ] [ column_constraint [ ... ] ] | table_constraint } [, ... ] ) ] -[ PARTITION BY { RANGE | LIST } ( { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [, ... ] ) ] -[ WITH ( storage_parameter [= value] [, ... ] ) | WITH OIDS | WITHOUT OIDS ] +[ PARTITION BY { RANGE | LIST | HASH } ( { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [, ... ] ) ] +[ WITH ( storage_parameter [= value] [, ... ] ) | WITH OIDS | WITHOUT OIDS ] [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ] -[ TABLESPACE tablespace_name ] +[ TABLESPACE tablespace_name ] -CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name - PARTITION OF parent_table [ ( - { column_name [ WITH OPTIONS ] [ column_constraint [ ... ] ] +CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name + PARTITION OF parent_table [ ( + { column_name [ WITH OPTIONS ] [ column_constraint [ ... ] ] | table_constraint } [, ... ] -) ] FOR VALUES partition_bound_spec -[ PARTITION BY { RANGE | LIST } ( { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [, ... ] ) ] -[ WITH ( storage_parameter [= value] [, ... ] ) | WITH OIDS | WITHOUT OIDS ] +) ] { FOR VALUES partition_bound_spec | DEFAULT } +[ PARTITION BY { RANGE | LIST | HASH } ( { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [, ... ] ) ] +[ WITH ( storage_parameter [= value] [, ... ] ) | WITH OIDS | WITHOUT OIDS ] [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ] -[ TABLESPACE tablespace_name ] +[ TABLESPACE tablespace_name ] -where column_constraint is: +where column_constraint is: -[ CONSTRAINT constraint_name ] +[ CONSTRAINT constraint_name ] { NOT NULL | NULL | - CHECK ( expression ) [ NO INHERIT ] | + CHECK ( expression ) [ NO INHERIT ] | DEFAULT default_expr | GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY [ ( sequence_options ) ] | - UNIQUE index_parameters | - PRIMARY KEY index_parameters | - REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] + UNIQUE index_parameters | + PRIMARY KEY index_parameters | + REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] } [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] -and table_constraint is: +and table_constraint is: -[ CONSTRAINT constraint_name ] -{ CHECK ( expression ) [ NO INHERIT ] | - UNIQUE ( column_name [, ... ] ) index_parameters | - PRIMARY KEY ( column_name [, ... ] ) index_parameters | +[ CONSTRAINT constraint_name ] +{ CHECK ( expression ) [ NO INHERIT ] | + UNIQUE ( column_name [, ... ] ) index_parameters | + PRIMARY KEY ( column_name [, ... ] ) index_parameters | EXCLUDE [ USING index_method ] ( exclude_element WITH operator [, ... ] ) index_parameters [ WHERE ( predicate ) ] | - FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ] + FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] } [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ] -and like_option is: +and like_option is: -{ INCLUDING | EXCLUDING } { DEFAULTS | CONSTRAINTS | IDENTITY | INDEXES | STORAGE | COMMENTS | ALL } +{ INCLUDING | EXCLUDING } { COMMENTS | CONSTRAINTS | DEFAULTS | IDENTITY | INDEXES | STATISTICS | STORAGE | ALL } -and partition_bound_spec is: +and partition_bound_spec is: -IN ( { numeric_literal | string_literal | NULL } [, ...] ) | -FROM ( { numeric_literal | string_literal | MINVALUE | MAXVALUE } [, ...] ) - TO ( { numeric_literal | string_literal | MINVALUE | MAXVALUE } [, ...] ) +IN ( { numeric_literal | string_literal | TRUE | FALSE | NULL } [, ...] ) | +FROM ( { numeric_literal | string_literal | TRUE | FALSE | MINVALUE | MAXVALUE } [, ...] ) + TO ( { numeric_literal | string_literal | TRUE | FALSE | MINVALUE | MAXVALUE } [, ...] ) | +WITH ( MODULUS numeric_literal, REMAINDER numeric_literal ) -index_parameters in UNIQUE, PRIMARY KEY, and EXCLUDE constraints are: +index_parameters in UNIQUE, PRIMARY KEY, and EXCLUDE constraints are: -[ WITH ( storage_parameter [= value] [, ... ] ) ] -[ USING INDEX TABLESPACE tablespace_name ] +[ INCLUDE ( column_name [, ... ] ) ] +[ WITH ( storage_parameter [= value] [, ... ] ) ] +[ USING INDEX TABLESPACE tablespace_name ] -exclude_element in an EXCLUDE constraint is: +exclude_element in an EXCLUDE constraint is: { column_name | ( expression ) } [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] - + Description @@ -113,7 +115,7 @@ FROM ( { numeric_literal | If a schema name is given (for example, CREATE TABLE - myschema.mytable ...) then the table is created in the specified + myschema.mytable ...) then the table is created in the specified schema. Otherwise it is created in the current schema. Temporary tables exist in a special schema, so a schema name cannot be given when creating a temporary table. The name of the table must be @@ -157,8 +159,8 @@ FROM ( { numeric_literal | - - TEMPORARY or TEMP + + TEMPORARY or TEMP If specified, the table is created as a temporary table. @@ -177,27 +179,27 @@ FROM ( { numeric_literal | ANALYZE on the temporary table after it is populated. + ANALYZE on the temporary table after it is populated. Optionally, GLOBAL or LOCAL - can be written before TEMPORARY or TEMP. - This presently makes no difference in PostgreSQL + can be written before TEMPORARY or TEMP. + This presently makes no difference in PostgreSQL and is deprecated; see . + endterm="sql-createtable-compatibility-title"/>. - - UNLOGGED + + UNLOGGED If specified, the table is created as an unlogged table. Data written to unlogged tables is not written to the write-ahead log (see ), which makes them considerably faster than ordinary + linkend="wal"/>), which makes them considerably faster than ordinary tables. However, they are not crash-safe: an unlogged table is automatically truncated after a crash or unclean shutdown. The contents of an unlogged table are also not replicated to standby servers. @@ -208,7 +210,7 @@ FROM ( { numeric_literal | - IF NOT EXISTS + IF NOT EXISTS Do not throw an error if a relation with the same name already exists. @@ -220,7 +222,7 @@ FROM ( { numeric_literal | - table_name + table_name The name (optionally schema-qualified) of the table to be created. @@ -229,7 +231,7 @@ FROM ( { numeric_literal | - OF type_name + OF type_name Creates a typed table, which takes its @@ -249,134 +251,8 @@ FROM ( { numeric_literal | - - PARTITION OF parent_table FOR VALUES partition_bound_spec - - - Creates the table as a partition of the specified - parent table. - - - - The partition_bound_spec - must correspond to the partitioning method and partition key of the - parent table, and must not overlap with any existing partition of that - parent. The form with IN is used for list partitioning, - while the form with FROM and TO is used for - range partitioning. - - - - Each of the values specified in - the partition_bound_spec is - a literal, NULL, MINVALUE, or - MAXVALUE. Each literal value must be either a - numeric constant that is coercible to the corresponding partition key - column's type, or a string literal that is valid input for that type. - - - - When creating a list partition, NULL can be - specified to signify that the partition allows the partition key - column to be null. However, there cannot be more than one such - list partition for a given parent table. NULL - cannot be specified for range partitions. - - - - When creating a range partition, the lower bound specified with - FROM is an inclusive bound, whereas the upper - bound specified with TO is an exclusive bound. - That is, the values specified in the FROM list - are valid values of the corresponding partition key columns for this - partition, whereas those in the TO list are - not. Note that this statement must be understood according to the - rules of row-wise comparison (). - For example, given PARTITION BY RANGE (x,y), a partition - bound FROM (1, 2) TO (3, 4) - allows x=1 with any y>=2, - x=2 with any non-null y, - and x=3 with any y<4. - - - - The special values MINVALUE and MAXVALUE - may be used when creating a range partition to indicate that there - is no lower or upper bound on the column's value. For example, a - partition defined using FROM (MINVALUE) TO (10) allows - any values less than 10, and a partition defined using - FROM (10) TO (MAXVALUE) allows any values greater than - or equal to 10. - - - - When creating a range partition involving more than one column, it - can also make sense to use MAXVALUE as part of the lower - bound, and MINVALUE as part of the upper bound. For - example, a partition defined using - FROM (0, MAXVALUE) TO (10, MAXVALUE) allows any rows - where the first partition key column is greater than 0 and less than - or equal to 10. Similarly, a partition defined using - FROM ('a', MINVALUE) TO ('b', MINVALUE) allows any rows - where the first partition key column starts with "a". - - - - Note that any values after MINVALUE or - MAXVALUE in a partition bound are ignored; so the bound - (10, MINVALUE, 0) is equivalent to - (10, MINVALUE, 10) and (10, MINVALUE, MINVALUE) - and (10, MINVALUE, MAXVALUE). - - - - Also note that some element types, such as timestamp, - have a notion of "infinity", which is just another value that can - be stored. This is different from MINVALUE and - MAXVALUE, which are not real values that can be stored, - but rather they are ways of saying that the value is unbounded. - MAXVALUE can be thought of as being greater than any - other value, including "infinity" and MINVALUE as being - less than any other value, including "minus infinity". Thus the range - FROM ('infinity') TO (MAXVALUE) is not an empty range; it - allows precisely one value to be stored — "infinity". - - - - A partition must have the same column names and types as the partitioned - table to which it belongs. If the parent is specified WITH - OIDS then all partitions must have OIDs; the parent's OID - column will be inherited by all partitions just like any other column. - Modifications to the column names or types of a partitioned table, or - the addition or removal of an OID column, will automatically propagate - to all partitions. CHECK constraints will be inherited - automatically by every partition, but an individual partition may specify - additional CHECK constraints; additional constraints with - the same name and condition as in the parent will be merged with the - parent constraint. Defaults may be specified separately for each - partition. - - - - Rows inserted into a partitioned table will be automatically routed to - the correct partition. If no suitable partition exists, an error will - occur. Also, if updating a row in a given partition would require it - to move to another partition due to new partition key values, an error - will occur. - - - - Operations such as TRUNCATE which normally affect a table and all of its - inheritance children will cascade to all partitions, but may also be - performed on an individual partition. Note that dropping a partition - with DROP TABLE requires taking an ACCESS - EXCLUSIVE lock on the parent table. - - - - - column_name + column_name The name of a column to be created in the new table. @@ -385,13 +261,13 @@ FROM ( { numeric_literal | - data_type + data_type The data type of the column. This can include array specifiers. For more information on the data types supported by PostgreSQL, refer to . + linkend="datatype"/>. @@ -400,7 +276,7 @@ FROM ( { numeric_literal | COLLATE collation - The COLLATE clause assigns a collation to + The COLLATE clause assigns a collation to the column (which must be of a collatable data type). If not specified, the column data type's default collation is used. @@ -411,13 +287,13 @@ FROM ( { numeric_literal | INHERITS ( parent_table [, ... ] ) - The optional INHERITS clause specifies a list of + The optional INHERITS clause specifies a list of tables from which the new table automatically inherits all columns. Parent tables can be plain tables or foreign tables. - Use of INHERITS creates a persistent relationship + Use of INHERITS creates a persistent relationship between the new child table and its parent table(s). Schema modifications to the parent(s) normally propagate to children as well, and by default the data of the child table is included in @@ -441,19 +317,19 @@ FROM ( { numeric_literal | - CHECK constraints are merged in essentially the same way as + CHECK constraints are merged in essentially the same way as columns: if multiple parent tables and/or the new table definition - contain identically-named CHECK constraints, these + contain identically-named CHECK constraints, these constraints must all have the same check expression, or an error will be reported. Constraints having the same name and expression will - be merged into one copy. A constraint marked NO INHERIT in a - parent will not be considered. Notice that an unnamed CHECK + be merged into one copy. A constraint marked NO INHERIT in a + parent will not be considered. Notice that an unnamed CHECK constraint in the new table will never be merged, since a unique name will always be chosen for it. - Column STORAGE settings are also copied from parent tables. + Column STORAGE settings are also copied from parent tables. @@ -465,25 +341,33 @@ FROM ( { numeric_literal | - PARTITION BY { RANGE | LIST } ( { column_name | ( expression ) } [ opclass ] [, ...] ) + PARTITION BY { RANGE | LIST | HASH } ( { column_name | ( expression ) } [ opclass ] [, ...] ) The optional PARTITION BY clause specifies a strategy of partitioning the table. The table thus created is called a partitioned table. The parenthesized list of columns or expressions forms the partition key - for the table. When using range partitioning, the partition key can - include multiple columns or expressions (up to 32, but this limit can - altered when building PostgreSQL.), but for + for the table. When using range or hash partitioning, the partition key + can include multiple columns or expressions (up to 32, but this limit can + be altered when building PostgreSQL), but for list partitioning, the partition key must consist of a single column or - expression. If no B-tree operator class is specified when creating a - partitioned table, the default B-tree operator class for the datatype will - be used. If there is none, an error will be reported. + expression. + + + + Range and list partitioning require a btree operator class, while hash + partitioning requires a hash operator class. If no operator class is + specified explicitly, the default operator class of the appropriate + type will be used; if no default operator class exists, an error will + be raised. When hash partitioning is used, the operator class used + must implement support function 2 (see + for details). A partitioned table is divided into sub-tables (called partitions), - which are created using separate CREATE TABLE commands. + which are created using separate CREATE TABLE commands. The partitioned table is itself empty. A data row inserted into the table is routed to a partition based on the value of columns or expressions in the partition key. If no existing partition matches @@ -491,86 +375,324 @@ FROM ( { numeric_literal | - Partitioned tables do not support UNIQUE, - PRIMARY KEY, EXCLUDE, or - FOREIGN KEY constraints; however, you can define - these constraints on individual partitions. + Partitioned tables do not support EXCLUDE constraints; + however, you can define these constraints on individual partitions. + Also, while it's possible to define PRIMARY KEY + constraints on partitioned tables, creating foreign keys that + reference a partitioned table is not yet supported. + + + + See for more discussion on table + partitioning. - - LIKE source_table [ like_option ... ] + + PARTITION OF parent_table { FOR VALUES partition_bound_spec | DEFAULT } - The LIKE clause specifies a table from which - the new table automatically copies all column names, their data types, - and their not-null constraints. + Creates the table as a partition of the specified + parent table. The table can be created either as a partition for specific + values using FOR VALUES or as a default partition + using DEFAULT. This option is not available for + hash-partitioned tables. + - Unlike INHERITS, the new table and original table - are completely decoupled after creation is complete. Changes to the - original table will not be applied to the new table, and it is not - possible to include data of the new table in scans of the original - table. + The partition_bound_spec + must correspond to the partitioning method and partition key of the + parent table, and must not overlap with any existing partition of that + parent. The form with IN is used for list partitioning, + the form with FROM and TO is used + for range partitioning, and the form with WITH is used + for hash partitioning. + + + + Each of the values specified in + the partition_bound_spec is + a literal, NULL, MINVALUE, or + MAXVALUE. Each literal value must be either a + numeric constant that is coercible to the corresponding partition key + column's type, or a string literal that is valid input for that type. + - Default expressions for the copied column definitions will be copied - only if INCLUDING DEFAULTS is specified. The - default behavior is to exclude default expressions, resulting in the - copied columns in the new table having null defaults. - Note that copying defaults that call database-modification functions, - such as nextval, may create a functional linkage between - the original and new tables. + When creating a list partition, NULL can be + specified to signify that the partition allows the partition key + column to be null. However, there cannot be more than one such + list partition for a given parent table. NULL + cannot be specified for range partitions. + - Any identity specifications of copied column definitions will only be - copied if INCLUDING IDENTITY is specified. A new - sequence is created for each identity column of the new table, separate - from the sequences associated with the old table. + When creating a range partition, the lower bound specified with + FROM is an inclusive bound, whereas the upper + bound specified with TO is an exclusive bound. + That is, the values specified in the FROM list + are valid values of the corresponding partition key columns for this + partition, whereas those in the TO list are + not. Note that this statement must be understood according to the + rules of row-wise comparison (). + For example, given PARTITION BY RANGE (x,y), a partition + bound FROM (1, 2) TO (3, 4) + allows x=1 with any y>=2, + x=2 with any non-null y, + and x=3 with any y<4. + - Not-null constraints are always copied to the new table. - CHECK constraints will be copied only if - INCLUDING CONSTRAINTS is specified. - No distinction is made between column constraints and table - constraints. + The special values MINVALUE and MAXVALUE + may be used when creating a range partition to indicate that there + is no lower or upper bound on the column's value. For example, a + partition defined using FROM (MINVALUE) TO (10) allows + any values less than 10, and a partition defined using + FROM (10) TO (MAXVALUE) allows any values greater than + or equal to 10. + - Indexes, PRIMARY KEY, UNIQUE, - and EXCLUDE constraints on the original table will be - created on the new table only if INCLUDING INDEXES - is specified. Names for the new indexes and constraints are - chosen according to the default rules, regardless of how the originals - were named. (This behavior avoids possible duplicate-name failures for - the new indexes.) + When creating a range partition involving more than one column, it + can also make sense to use MAXVALUE as part of the lower + bound, and MINVALUE as part of the upper bound. For + example, a partition defined using + FROM (0, MAXVALUE) TO (10, MAXVALUE) allows any rows + where the first partition key column is greater than 0 and less than + or equal to 10. Similarly, a partition defined using + FROM ('a', MINVALUE) TO ('b', MINVALUE) allows any rows + where the first partition key column starts with "a". + - STORAGE settings for the copied column definitions will be - copied only if INCLUDING STORAGE is specified. The - default behavior is to exclude STORAGE settings, resulting - in the copied columns in the new table having type-specific default - settings. For more on STORAGE settings, see - . + Note that if MINVALUE or MAXVALUE is used for + one column of a partitioning bound, the same value must be used for all + subsequent columns. For example, (10, MINVALUE, 0) is not + a valid bound; you should write (10, MINVALUE, MINVALUE). + - Comments for the copied columns, constraints, and indexes - will be copied only if INCLUDING COMMENTS - is specified. The default behavior is to exclude comments, resulting in - the copied columns and constraints in the new table having no comments. + Also note that some element types, such as timestamp, + have a notion of "infinity", which is just another value that can + be stored. This is different from MINVALUE and + MAXVALUE, which are not real values that can be stored, + but rather they are ways of saying that the value is unbounded. + MAXVALUE can be thought of as being greater than any + other value, including "infinity" and MINVALUE as being + less than any other value, including "minus infinity". Thus the range + FROM ('infinity') TO (MAXVALUE) is not an empty range; it + allows precisely one value to be stored — "infinity". + - INCLUDING ALL is an abbreviated form of - INCLUDING DEFAULTS INCLUDING IDENTITY INCLUDING CONSTRAINTS INCLUDING INDEXES INCLUDING STORAGE INCLUDING COMMENTS. + If DEFAULT is specified, the table will be + created as a default partition of the parent table. The parent can + either be a list or range partitioned table. A partition key value + not fitting into any other partition of the given parent will be + routed to the default partition. There can be only one default + partition for a given parent table. + - Note that unlike INHERITS, columns and - constraints copied by LIKE are not merged with similarly + When a table has an existing DEFAULT partition and + a new partition is added to it, the existing default partition must + be scanned to verify that it does not contain any rows which properly + belong in the new partition. If the default partition contains a + large number of rows, this may be slow. The scan will be skipped if + the default partition is a foreign table or if it has a constraint which + proves that it cannot contain rows which should be placed in the new + partition. + + + + When creating a hash partition, a modulus and remainder must be specified. + The modulus must be a positive integer, and the remainder must be a + non-negative integer less than the modulus. Typically, when initially + setting up a hash-partitioned table, you should choose a modulus equal to + the number of partitions and assign every table the same modulus and a + different remainder (see examples, below). However, it is not required + that every partition have the same modulus, only that every modulus which + occurs among the partitions of a hash-partitioned table is a factor of the + next larger modulus. This allows the number of partitions to be increased + incrementally without needing to move all the data at once. For example, + suppose you have a hash-partitioned table with 8 partitions, each of which + has modulus 8, but find it necessary to increase the number of partitions + to 16. You can detach one of the modulus-8 partitions, create two new + modulus-16 partitions covering the same portion of the key space (one with + a remainder equal to the remainder of the detached partition, and the + other with a remainder equal to that value plus 8), and repopulate them + with data. You can then repeat this -- perhaps at a later time -- for + each modulus-8 partition until none remain. While this may still involve + a large amount of data movement at each step, it is still better than + having to create a whole new table and move all the data at once. + + + + A partition must have the same column names and types as the partitioned + table to which it belongs. If the parent is specified WITH + OIDS then all partitions must have OIDs; the parent's OID + column will be inherited by all partitions just like any other column. + Modifications to the column names or types of a partitioned table, or + the addition or removal of an OID column, will automatically propagate + to all partitions. CHECK constraints will be inherited + automatically by every partition, but an individual partition may specify + additional CHECK constraints; additional constraints with + the same name and condition as in the parent will be merged with the + parent constraint. Defaults may be specified separately for each + partition. + + + + Rows inserted into a partitioned table will be automatically routed to + the correct partition. If no suitable partition exists, an error will + occur. + + + + Operations such as TRUNCATE which normally affect a table and all of its + inheritance children will cascade to all partitions, but may also be + performed on an individual partition. Note that dropping a partition + with DROP TABLE requires taking an ACCESS + EXCLUSIVE lock on the parent table. + + + + + + LIKE source_table [ like_option ... ] + + + The LIKE clause specifies a table from which + the new table automatically copies all column names, their data types, + and their not-null constraints. + + + Unlike INHERITS, the new table and original table + are completely decoupled after creation is complete. Changes to the + original table will not be applied to the new table, and it is not + possible to include data of the new table in scans of the original + table. + + + Also unlike INHERITS, columns and + constraints copied by LIKE are not merged with similarly named columns and constraints. If the same name is specified explicitly or in another LIKE clause, an error is signaled. + + The optional like_option clauses specify + which additional properties of the original table to copy. Specifying + INCLUDING copies the property, specifying + EXCLUDING omits the property. + EXCLUDING is the default. If multiple specifications + are made for the same kind of object, the last one is used. The + available options are: + + + + INCLUDING COMMENTS + + + Comments for the copied columns, constraints, and indexes will be + copied. The default behavior is to exclude comments, resulting in + the copied columns and constraints in the new table having no + comments. + + + + + + INCLUDING CONSTRAINTS + + + CHECK constraints will be copied. No distinction + is made between column constraints and table constraints. Not-null + constraints are always copied to the new table. + + + + + + INCLUDING DEFAULTS + + + Default expressions for the copied column definitions will be + copied. Otherwise, default expressions are not copied, resulting in + the copied columns in the new table having null defaults. Note that + copying defaults that call database-modification functions, such as + nextval, may create a functional linkage + between the original and new tables. + + + + + + INCLUDING IDENTITY + + + Any identity specifications of copied column definitions will be + copied. A new sequence is created for each identity column of the + new table, separate from the sequences associated with the old + table. + + + + + + INCLUDING INDEXES + + + Indexes, PRIMARY KEY, UNIQUE, + and EXCLUDE constraints on the original table + will be created on the new table. Names for the new indexes and + constraints are chosen according to the default rules, regardless of + how the originals were named. (This behavior avoids possible + duplicate-name failures for the new indexes.) + + + + + + INCLUDING STATISTICS + + + Extended statistics are copied to the new table. + + + + + + INCLUDING STORAGE + + + STORAGE settings for the copied column + definitions will be copied. The default behavior is to exclude + STORAGE settings, resulting in the copied columns + in the new table having type-specific default settings. For more on + STORAGE settings, see . + + + + + + INCLUDING ALL + + + INCLUDING ALL is an abbreviated form selecting + all the available individual options. (It could be useful to write + individual EXCLUDING clauses after + INCLUDING ALL to select all but some specific + options.) + + + + + + The LIKE clause can also be used to copy column definitions from views, foreign tables, or composite types. @@ -581,12 +703,12 @@ FROM ( { numeric_literal | - CONSTRAINT constraint_name + CONSTRAINT constraint_name An optional name for a column or table constraint. If the constraint is violated, the constraint name is present in error messages, - so constraint names like col must be positive can be used + so constraint names like col must be positive can be used to communicate helpful constraint information to client applications. (Double-quotes are needed to specify constraint names that contain spaces.) If a constraint name is not specified, the system generates a name. @@ -595,7 +717,7 @@ FROM ( { numeric_literal | - NOT NULL + NOT NULL The column is not allowed to contain null values. @@ -604,7 +726,7 @@ FROM ( { numeric_literal | - NULL + NULL The column is allowed to contain null values. This is the default. @@ -619,10 +741,10 @@ FROM ( { numeric_literal | - CHECK ( expression ) [ NO INHERIT ] + CHECK ( expression ) [ NO INHERIT ] - The CHECK clause specifies an expression producing a + The CHECK clause specifies an expression producing a Boolean result which new or updated rows must satisfy for an insert or update operation to succeed. Expressions evaluating to TRUE or UNKNOWN succeed. Should any row of an insert or @@ -641,15 +763,15 @@ FROM ( { numeric_literal | - A constraint marked with NO INHERIT will not propagate to + A constraint marked with NO INHERIT will not propagate to child tables. When a table has multiple CHECK constraints, they will be tested for each row in alphabetical order by name, - after checking NOT NULL constraints. - (PostgreSQL versions before 9.5 did not honor any + after checking NOT NULL constraints. + (PostgreSQL versions before 9.5 did not honor any particular firing order for CHECK constraints.) @@ -660,7 +782,7 @@ FROM ( { numeric_literal | default_expr - The DEFAULT clause assigns a default data value for + The DEFAULT clause assigns a default data value for the column whose column definition it appears within. The value is any variable-free expression (subqueries and cross-references to other columns in the current table are not allowed). The @@ -694,7 +816,7 @@ FROM ( { numeric_literal | INSERT statement specifies OVERRIDING SYSTEM VALUE. If BY DEFAULT is specified, then the user-specified value takes - precedence. See for details. (In + precedence. See for details. (In the COPY command, user-specified values are always used regardless of this setting.) @@ -702,14 +824,15 @@ FROM ( { numeric_literal | The optional sequence_options clause can be used to override the options of the sequence. - See for details. + See for details. - UNIQUE (column constraint) - UNIQUE ( column_name [, ... ] ) (table constraint) + UNIQUE (column constraint) + UNIQUE ( column_name [, ... ] ) + INCLUDE ( column_name [, ...]) (table constraint) @@ -731,15 +854,34 @@ FROM ( { numeric_literal | + + + When establishing a unique constraint for a multi-level partition + hierarchy, all the columns in the partition key of the target + partitioned table, as well as those of all its descendant partitioned + tables, must be included in the constraint definition. + + + + Adding a unique constraint will automatically create a unique btree + index on the column or group of columns used in the constraint. + The optional clause INCLUDE adds to that index + one or more columns on which the uniqueness is not enforced. + Note that although the constraint is not enforced on the included columns, + it still depends on them. Consequently, some operations on these columns + (e.g. DROP COLUMN) can cause cascaded constraint and + index deletion. + - PRIMARY KEY (column constraint) - PRIMARY KEY ( column_name [, ... ] ) (table constraint) + PRIMARY KEY (column constraint) + PRIMARY KEY ( column_name [, ... ] ) + INCLUDE ( column_name [, ...]) (table constraint) - The PRIMARY KEY constraint specifies that a column or + The PRIMARY KEY constraint specifies that a column or columns of a table can contain only unique (non-duplicate), nonnull values. Only one primary key can be specified for a table, whether as a column constraint or a table constraint. @@ -754,58 +896,75 @@ FROM ( { numeric_literal | PRIMARY KEY enforces the same data constraints as - a combination of UNIQUE and NOT NULL, but + a combination of UNIQUE and NOT NULL, but identifying a set of columns as the primary key also provides metadata about the design of the schema, since a primary key implies that other tables can rely on this set of columns as a unique identifier for rows. + + + PRIMARY KEY constraints share the restrictions that + UNIQUE constraints have when placed on partitioned + tables. + + + + Adding a PRIMARY KEY constraint will automatically + create a unique btree index on the column or group of columns used in the + constraint. The optional INCLUDE clause allows a list + of columns to be specified which will be included in the non-key portion + of the index. Although uniqueness is not enforced on the included columns, + the constraint still depends on them. Consequently, some operations on the + included columns (e.g. DROP COLUMN) can cause cascaded + constraint and index deletion. + - + EXCLUDE [ USING index_method ] ( exclude_element WITH operator [, ... ] ) index_parameters [ WHERE ( predicate ) ] - The EXCLUDE clause defines an exclusion + The EXCLUDE clause defines an exclusion constraint, which guarantees that if any two rows are compared on the specified column(s) or expression(s) using the specified operator(s), not all of these - comparisons will return TRUE. If all of the + comparisons will return TRUE. If all of the specified operators test for equality, this is equivalent to a - UNIQUE constraint, although an ordinary unique constraint + UNIQUE constraint, although an ordinary unique constraint will be faster. However, exclusion constraints can specify constraints that are more general than simple equality. For example, you can specify a constraint that no two rows in the table contain overlapping circles - (see ) by using the - && operator. + (see ) by using the + && operator. Exclusion constraints are implemented using an index, so each specified operator must be associated with an appropriate operator class - (see ) for the index access - method index_method. + (see ) for the index access + method index_method. The operators are required to be commutative. Each exclude_element can optionally specify an operator class and/or ordering options; these are described fully under - . + . - The access method must support amgettuple (see ); at present this means GIN + The access method must support amgettuple (see ); at present this means GIN cannot be used. Although it's allowed, there is little point in using B-tree or hash indexes with an exclusion constraint, because this does nothing that an ordinary unique constraint doesn't do better. - So in practice the access method will always be GiST or - SP-GiST. + So in practice the access method will always be GiST or + SP-GiST. - The predicate allows you to specify an + The predicate allows you to specify an exclusion constraint on a subset of the table; internally this creates a partial index. Note that parentheses are required around the predicate. @@ -832,26 +991,30 @@ FROM ( { numeric_literal | reftable is used. The referenced columns must be the columns of a non-deferrable unique or primary key constraint in the referenced table. The user - must have REFERENCES permission on the referenced table - (either the whole table, or the specific referenced columns). + must have REFERENCES permission on the referenced table + (either the whole table, or the specific referenced columns). The + addition of a foreign key constraint requires a + SHARE ROW EXCLUSIVE lock on the referenced table. Note that foreign key constraints cannot be defined between temporary - tables and permanent tables. + tables and permanent tables. Also note that while it is possible to + define a foreign key on a partitioned table, it is not possible to + declare a foreign key that references a partitioned table. A value inserted into the referencing column(s) is matched against the values of the referenced table and referenced columns using the given match type. There are three match types: MATCH - FULL, MATCH PARTIAL, and MATCH + FULL, MATCH PARTIAL, and MATCH SIMPLE (which is the default). MATCH - FULL will not allow one column of a multicolumn foreign key + FULL will not allow one column of a multicolumn foreign key to be null unless all foreign key columns are null; if they are all null, the row is not required to have a match in the referenced table. MATCH SIMPLE allows any of the foreign key columns to be null; if any of them are null, the row is not required to have a match in the referenced table. - MATCH PARTIAL is not yet implemented. - (Of course, NOT NULL constraints can be applied to the + MATCH PARTIAL is not yet implemented. + (Of course, NOT NULL constraints can be applied to the referencing column(s) to prevent these cases from arising.) @@ -946,15 +1109,15 @@ FROM ( { numeric_literal | command). + (using the command). NOT DEFERRABLE is the default. - Currently, only UNIQUE, PRIMARY KEY, - EXCLUDE, and - REFERENCES (foreign key) constraints accept this - clause. NOT NULL and CHECK constraints are not + Currently, only UNIQUE, PRIMARY KEY, + EXCLUDE, and + REFERENCES (foreign key) constraints accept this + clause. NOT NULL and CHECK constraints are not deferrable. Note that deferrable constraints cannot be used as conflict arbitrators in an INSERT statement that - includes an ON CONFLICT DO UPDATE clause. + includes an ON CONFLICT DO UPDATE clause. @@ -970,28 +1133,28 @@ FROM ( { numeric_literal | INITIALLY DEFERRED, it is checked only at the end of the transaction. The constraint check time can be - altered with the command. + altered with the command. - WITH ( storage_parameter [= value] [, ... ] ) + WITH ( storage_parameter [= value] [, ... ] ) This clause specifies optional storage parameters for a table or index; see for more - information. The WITH clause for a - table can also include OIDS=TRUE (or just OIDS) + endterm="sql-createtable-storage-parameters-title"/> for more + information. The WITH clause for a + table can also include OIDS=TRUE (or just OIDS) to specify that rows of the new table should have OIDs (object identifiers) assigned to them, or - OIDS=FALSE to specify that the rows should not have OIDs. - If OIDS is not specified, the default setting depends upon - the configuration parameter. + OIDS=FALSE to specify that the rows should not have OIDs. + If OIDS is not specified, the default setting depends upon + the configuration parameter. (If the new table inherits from any tables that have OIDs, then - OIDS=TRUE is forced even if the command says - OIDS=FALSE.) + OIDS=TRUE is forced even if the command says + OIDS=FALSE.) @@ -1008,20 +1171,20 @@ FROM ( { numeric_literal | To remove OIDs from a table after it has been created, use . + linkend="sql-altertable"/>. - WITH OIDS - WITHOUT OIDS + WITH OIDS + WITHOUT OIDS - These are obsolescent syntaxes equivalent to WITH (OIDS) - and WITH (OIDS=FALSE), respectively. If you wish to give - both an OIDS setting and storage parameters, you must use - the WITH ( ... ) syntax; see above. + These are obsolescent syntaxes equivalent to WITH (OIDS) + and WITH (OIDS=FALSE), respectively. If you wish to give + both an OIDS setting and storage parameters, you must use + the WITH ( ... ) syntax; see above. @@ -1051,8 +1214,9 @@ FROM ( { numeric_literal | All rows in the temporary table will be deleted at the end of each transaction block. Essentially, an automatic is done - at each commit. + linkend="sql-truncate"/> is done + at each commit. When used on a partitioned table, this + is not cascaded to its partitions. @@ -1062,7 +1226,9 @@ FROM ( { numeric_literal | The temporary table will be dropped at the end of the current - transaction block. + transaction block. When used on a partitioned table, this action + drops its partitions and when used on tables with inheritance + children, it drops the dependent children. @@ -1071,53 +1237,53 @@ FROM ( { numeric_literal | - TABLESPACE tablespace_name + TABLESPACE tablespace_name - The tablespace_name is the name + The tablespace_name is the name of the tablespace in which the new table is to be created. If not specified, - is consulted, or - if the table is temporary. + is consulted, or + if the table is temporary. - USING INDEX TABLESPACE tablespace_name + USING INDEX TABLESPACE tablespace_name This clause allows selection of the tablespace in which the index associated with a UNIQUE, PRIMARY - KEY, or EXCLUDE constraint will be created. + KEY, or EXCLUDE constraint will be created. If not specified, - is consulted, or - if the table is temporary. + is consulted, or + if the table is temporary. - - Storage Parameters + + Storage Parameters storage parameters - The WITH clause can specify storage parameters + The WITH clause can specify storage parameters for tables, and for indexes associated with a UNIQUE, - PRIMARY KEY, or EXCLUDE constraint. + PRIMARY KEY, or EXCLUDE constraint. Storage parameters for - indexes are documented in . + indexes are documented in . The storage parameters currently available for tables are listed below. For many of these parameters, as shown, there is an additional parameter with the same name prefixed with toast., which controls the behavior of the - table's secondary TOAST table, if any - (see for more information about TOAST). + table's secondary TOAST table, if any + (see for more information about TOAST). If a table parameter value is set and the equivalent toast. parameter is not, the TOAST table will use the table's parameter value. @@ -1128,14 +1294,14 @@ FROM ( { numeric_literal | - fillfactor (integer) + fillfactor (integer) The fillfactor for a table is a percentage between 10 and 100. 100 (complete packing) is the default. When a smaller fillfactor - is specified, INSERT operations pack table pages only + is specified, INSERT operations pack table pages only to the indicated percentage; the remaining space on each page is - reserved for updating rows on that page. This gives UPDATE + reserved for updating rows on that page. This gives UPDATE a chance to place the updated copy of a row on the same page as the original, which is more efficient than placing it on a different page. For a table whose entries are never updated, complete packing is the @@ -1146,119 +1312,140 @@ FROM ( { numeric_literal | - parallel_workers (integer) + toast_tuple_target (integer) + + + The toast_tuple_target specifies the minimum tuple length required before + we try to move long column values into TOAST tables, and is also the + target length we try to reduce the length below once toasting begins. + This only affects columns marked as either External or Extended + and applies only to new tuples - there is no effect on existing rows. + By default this parameter is set to allow at least 4 tuples per block, + which with the default blocksize will be 2040 bytes. Valid values are + between 128 bytes and the (blocksize - header), by default 8160 bytes. + Changing this value may not be useful for very short or very long rows. + Note that the default setting is often close to optimal, and + it is possible that setting this parameter could have negative + effects in some cases. + This parameter cannot be set for TOAST tables. + + + + + + parallel_workers (integer) This sets the number of workers that should be used to assist a parallel scan of this table. If not set, the system will determine a value based on the relation size. The actual number of workers chosen by the planner - may be less, for example due to - the setting of . + or by utility statements that use parallel scans may be less, for example + due to the setting of . - autovacuum_enabled, toast.autovacuum_enabled (boolean) + autovacuum_enabled, toast.autovacuum_enabled (boolean) Enables or disables the autovacuum daemon for a particular table. - If true, the autovacuum daemon will perform automatic VACUUM - and/or ANALYZE operations on this table following the rules - discussed in . + If true, the autovacuum daemon will perform automatic VACUUM + and/or ANALYZE operations on this table following the rules + discussed in . If false, this table will not be autovacuumed, except to prevent - transaction ID wraparound. See for + transaction ID wraparound. See for more about wraparound prevention. Note that the autovacuum daemon does not run at all (except to prevent - transaction ID wraparound) if the + transaction ID wraparound) if the parameter is false; setting individual tables' storage parameters does not override that. Therefore there is seldom much point in explicitly - setting this storage parameter to true, only - to false. + setting this storage parameter to true, only + to false. - autovacuum_vacuum_threshold, toast.autovacuum_vacuum_threshold (integer) + autovacuum_vacuum_threshold, toast.autovacuum_vacuum_threshold (integer) - Per-table value for + Per-table value for parameter. - autovacuum_vacuum_scale_factor, toast.autovacuum_vacuum_scale_factor (float4) + autovacuum_vacuum_scale_factor, toast.autovacuum_vacuum_scale_factor (float4) - Per-table value for + Per-table value for parameter. - autovacuum_analyze_threshold (integer) + autovacuum_analyze_threshold (integer) - Per-table value for + Per-table value for parameter. - autovacuum_analyze_scale_factor (float4) + autovacuum_analyze_scale_factor (float4) - Per-table value for + Per-table value for parameter. - autovacuum_vacuum_cost_delay, toast.autovacuum_vacuum_cost_delay (integer) + autovacuum_vacuum_cost_delay, toast.autovacuum_vacuum_cost_delay (integer) - Per-table value for + Per-table value for parameter. - autovacuum_vacuum_cost_limit, toast.autovacuum_vacuum_cost_limit (integer) + autovacuum_vacuum_cost_limit, toast.autovacuum_vacuum_cost_limit (integer) - Per-table value for + Per-table value for parameter. - autovacuum_freeze_min_age, toast.autovacuum_freeze_min_age (integer) + autovacuum_freeze_min_age, toast.autovacuum_freeze_min_age (integer) - Per-table value for + Per-table value for parameter. Note that autovacuum will ignore - per-table autovacuum_freeze_min_age parameters that are + per-table autovacuum_freeze_min_age parameters that are larger than half the - system-wide setting. + system-wide setting. - autovacuum_freeze_max_age, toast.autovacuum_freeze_max_age (integer) + autovacuum_freeze_max_age, toast.autovacuum_freeze_max_age (integer) - Per-table value for + Per-table value for parameter. Note that autovacuum will ignore - per-table autovacuum_freeze_max_age parameters that are + per-table autovacuum_freeze_max_age parameters that are larger than the system-wide setting (it can only be set smaller). @@ -1268,7 +1455,7 @@ FROM ( { numeric_literal | autovacuum_freeze_table_age, toast.autovacuum_freeze_table_age (integer) - Per-table value for + Per-table value for parameter. @@ -1278,11 +1465,11 @@ FROM ( { numeric_literal | autovacuum_multixact_freeze_min_age, toast.autovacuum_multixact_freeze_min_age (integer) - Per-table value for + Per-table value for parameter. Note that autovacuum will ignore - per-table autovacuum_multixact_freeze_min_age parameters + per-table autovacuum_multixact_freeze_min_age parameters that are larger than half the - system-wide + system-wide setting. @@ -1293,9 +1480,9 @@ FROM ( { numeric_literal | Per-table value - for parameter. + for parameter. Note that autovacuum will ignore - per-table autovacuum_multixact_freeze_max_age parameters + per-table autovacuum_multixact_freeze_max_age parameters that are larger than the system-wide setting (it can only be set smaller). @@ -1307,7 +1494,7 @@ FROM ( { numeric_literal | Per-table value - for parameter. + for parameter. @@ -1316,7 +1503,7 @@ FROM ( { numeric_literal | log_autovacuum_min_duration, toast.log_autovacuum_min_duration (integer) - Per-table value for + Per-table value for parameter. @@ -1328,7 +1515,7 @@ FROM ( { numeric_literal | Declare the table as an additional catalog table for purposes of logical replication. See - for details. + for details. This parameter cannot be set for TOAST tables. @@ -1339,7 +1526,7 @@ FROM ( { numeric_literal | - + Notes @@ -1348,11 +1535,11 @@ FROM ( { numeric_literal | oid column of that table, to ensure that + on the oid column of that table, to ensure that OIDs in the table will indeed uniquely identify rows even after counter wraparound. Avoid assuming that OIDs are unique across tables; if you need a database-wide unique identifier, use the - combination of tableoid and row OID for the + combination of tableoid and row OID for the purpose. @@ -1369,7 +1556,7 @@ FROM ( { numeric_literal | for more information.) + linkend="sql-createindex"/> for more information.) @@ -1386,12 +1573,12 @@ FROM ( { numeric_literal | - + Examples - Create table films and table - distributors: + Create table films and table + distributors: CREATE TABLE films ( @@ -1455,7 +1642,7 @@ CREATE TABLE distributors ( CREATE TABLE distributors ( did integer, - name varchar(40) + name varchar(40), CONSTRAINT con1 CHECK (did > 100 AND name <> '') ); @@ -1463,7 +1650,7 @@ CREATE TABLE distributors ( Define a primary key table constraint for the table - films: + films: CREATE TABLE films ( @@ -1480,7 +1667,7 @@ CREATE TABLE films ( Define a primary key constraint for table - distributors. The following two examples are + distributors. The following two examples are equivalent, the first using the table constraint syntax, the second the column constraint syntax: @@ -1516,7 +1703,7 @@ CREATE TABLE distributors ( - Define two NOT NULL column constraints on the table + Define two NOT NULL column constraints on the table distributors, one of which is explicitly given a name: @@ -1564,7 +1751,7 @@ WITH (fillfactor=70); - Create table circles with an exclusion + Create table circles with an exclusion constraint that prevents any two circles from overlapping: @@ -1576,7 +1763,7 @@ CREATE TABLE circles ( - Create table cinemas in tablespace diskvol1: + Create table cinemas in tablespace diskvol1: CREATE TABLE cinemas ( @@ -1626,6 +1813,16 @@ CREATE TABLE cities ( name text not null, population bigint ) PARTITION BY LIST (left(lower(name), 1)); + + + + Create a hash partitioned table: + +CREATE TABLE orders ( + order_id bigint not null, + cust_id bigint not null, + status text +) PARTITION BY HASH (order_id); @@ -1643,7 +1840,7 @@ CREATE TABLE measurement_y2016m07 CREATE TABLE measurement_ym_older PARTITION OF measurement_year_month - FOR VALUES FROM (MINVALUE, 0) TO (2016, 11); + FOR VALUES FROM (MINVALUE, MINVALUE) TO (2016, 11); CREATE TABLE measurement_ym_y2016m11 PARTITION OF measurement_year_month @@ -1679,10 +1876,30 @@ CREATE TABLE cities_ab CREATE TABLE cities_ab_10000_to_100000 PARTITION OF cities_ab FOR VALUES FROM (10000) TO (100000); + + + Create partitions of a hash partitioned table: + +CREATE TABLE orders_p1 PARTITION OF orders + FOR VALUES WITH (MODULUS 4, REMAINDER 0); +CREATE TABLE orders_p2 PARTITION OF orders + FOR VALUES WITH (MODULUS 4, REMAINDER 1); +CREATE TABLE orders_p3 PARTITION OF orders + FOR VALUES WITH (MODULUS 4, REMAINDER 2); +CREATE TABLE orders_p4 PARTITION OF orders + FOR VALUES WITH (MODULUS 4, REMAINDER 3); + + + + Create a default partition: + +CREATE TABLE cities_partdef + PARTITION OF cities DEFAULT; + - - Compatibility + + Compatibility The CREATE TABLE command conforms to the @@ -1733,8 +1950,8 @@ CREATE TABLE cities_ab_10000_to_100000 The ON COMMIT clause for temporary tables also resembles the SQL standard, but has some differences. - If the ON COMMIT clause is omitted, SQL specifies that the - default behavior is ON COMMIT DELETE ROWS. However, the + If the ON COMMIT clause is omitted, SQL specifies that the + default behavior is ON COMMIT DELETE ROWS. However, the default behavior in PostgreSQL is ON COMMIT PRESERVE ROWS. The ON COMMIT DROP option does not exist in SQL. @@ -1745,15 +1962,15 @@ CREATE TABLE cities_ab_10000_to_100000 Non-deferred Uniqueness Constraints - When a UNIQUE or PRIMARY KEY constraint is + When a UNIQUE or PRIMARY KEY constraint is not deferrable, PostgreSQL checks for uniqueness immediately whenever a row is inserted or modified. The SQL standard says that uniqueness should be enforced only at the end of the statement; this makes a difference when, for example, a single command updates multiple key values. To obtain standard-compliant behavior, declare the constraint as - DEFERRABLE but not deferred (i.e., INITIALLY - IMMEDIATE). Be aware that this can be significantly slower than + DEFERRABLE but not deferred (i.e., INITIALLY + IMMEDIATE). Be aware that this can be significantly slower than immediate uniqueness checking. @@ -1762,8 +1979,8 @@ CREATE TABLE cities_ab_10000_to_100000 Column Check Constraints - The SQL standard says that CHECK column constraints - can only refer to the column they apply to; only CHECK + The SQL standard says that CHECK column constraints + can only refer to the column they apply to; only CHECK table constraints can refer to multiple columns. PostgreSQL does not enforce this restriction; it treats column and table check constraints alike. @@ -1774,7 +1991,7 @@ CREATE TABLE cities_ab_10000_to_100000 <literal>EXCLUDE</literal> Constraint - The EXCLUDE constraint type is a + The EXCLUDE constraint type is a PostgreSQL extension. @@ -1783,7 +2000,7 @@ CREATE TABLE cities_ab_10000_to_100000 <literal>NULL</literal> <quote>Constraint</quote> - The NULL constraint (actually a + The NULL constraint (actually a non-constraint) is a PostgreSQL extension to the SQL standard that is included for compatibility with some other database systems (and for symmetry with the NOT @@ -1792,6 +2009,30 @@ CREATE TABLE cities_ab_10000_to_100000 + + Constraint Naming + + + The SQL standard says that table and domain constraints must have names + that are unique across the schema containing the table or domain. + PostgreSQL is laxer: it only requires + constraint names to be unique across the constraints attached to a + particular table or domain. However, this extra freedom does not exist + for index-based constraints (UNIQUE, + PRIMARY KEY, and EXCLUDE + constraints), because the associated index is named the same as the + constraint, and index names must be unique across all relations within + the same schema. + + + + Currently, PostgreSQL does not record names + for NOT NULL constraints at all, so they are not + subject to the uniqueness restriction. This might change in a future + release. + + + Inheritance @@ -1810,11 +2051,11 @@ CREATE TABLE cities_ab_10000_to_100000 PostgreSQL allows a table of no columns - to be created (for example, CREATE TABLE foo();). This + to be created (for example, CREATE TABLE foo();). This is an extension from the SQL standard, which does not allow zero-column tables. Zero-column tables are not in themselves very useful, but disallowing them creates odd special cases for ALTER TABLE - DROP COLUMN, so it seems cleaner to ignore this spec restriction. + DROP COLUMN, so it seems cleaner to ignore this spec restriction. @@ -1833,10 +2074,10 @@ CREATE TABLE cities_ab_10000_to_100000 - <literal>LIKE</> Clause + <literal>LIKE</literal> Clause - While a LIKE clause exists in the SQL standard, many of the + While a LIKE clause exists in the SQL standard, many of the options that PostgreSQL accepts for it are not in the standard, and some of the standard's options are not implemented by PostgreSQL. @@ -1844,10 +2085,10 @@ CREATE TABLE cities_ab_10000_to_100000 - <literal>WITH</> Clause + <literal>WITH</literal> Clause - The WITH clause is a PostgreSQL + The WITH clause is a PostgreSQL extension; neither storage parameters nor OIDs are in the standard. @@ -1876,19 +2117,19 @@ CREATE TABLE cities_ab_10000_to_100000 - <literal>PARTITION BY</> Clause + <literal>PARTITION BY</literal> Clause - The PARTITION BY clause is a + The PARTITION BY clause is a PostgreSQL extension. - <literal>PARTITION OF</> Clause + <literal>PARTITION OF</literal> Clause - The PARTITION OF clause is a + The PARTITION OF clause is a PostgreSQL extension. @@ -1900,11 +2141,11 @@ CREATE TABLE cities_ab_10000_to_100000 See Also - - - - - + + + + + diff --git a/doc/src/sgml/ref/create_table_as.sgml b/doc/src/sgml/ref/create_table_as.sgml index 8e4ada794d..527138e787 100644 --- a/doc/src/sgml/ref/create_table_as.sgml +++ b/doc/src/sgml/ref/create_table_as.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_table_as.sgml PostgreSQL documentation --> - + CREATE TABLE AS @@ -23,9 +23,9 @@ PostgreSQL documentation CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name [ (column_name [, ...] ) ] - [ WITH ( storage_parameter [= value] [, ... ] ) | WITH OIDS | WITHOUT OIDS ] + [ WITH ( storage_parameter [= value] [, ... ] ) | WITH OIDS | WITHOUT OIDS ] [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ] - [ TABLESPACE tablespace_name ] + [ TABLESPACE tablespace_name ] AS query [ WITH [ NO ] DATA ] @@ -63,7 +63,7 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI Ignored for compatibility. Use of these keywords is deprecated; - refer to for details. + refer to for details. @@ -71,31 +71,31 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI - TEMPORARY or TEMP + TEMPORARY or TEMP If specified, the table is created as a temporary table. - Refer to for details. + Refer to for details. - UNLOGGED + UNLOGGED If specified, the table is created as an unlogged table. - Refer to for details. + Refer to for details. - IF NOT EXISTS + IF NOT EXISTS Do not throw an error if a relation with the same name already exists. - A notice is issued in this case. Refer to + A notice is issued in this case. Refer to for details. @@ -121,31 +121,31 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI - WITH ( storage_parameter [= value] [, ... ] ) + WITH ( storage_parameter [= value] [, ... ] ) This clause specifies optional storage parameters for the new table; see for more - information. The WITH clause - can also include OIDS=TRUE (or just OIDS) + endterm="sql-createtable-storage-parameters-title"/> for more + information. The WITH clause + can also include OIDS=TRUE (or just OIDS) to specify that rows of the new table should have OIDs (object identifiers) assigned to them, or - OIDS=FALSE to specify that the rows should not have OIDs. - See for more information. + OIDS=FALSE to specify that the rows should not have OIDs. + See for more information. - WITH OIDS - WITHOUT OIDS + WITH OIDS + WITHOUT OIDS - These are obsolescent syntaxes equivalent to WITH (OIDS) - and WITH (OIDS=FALSE), respectively. If you wish to give - both an OIDS setting and storage parameters, you must use - the WITH ( ... ) syntax; see above. + These are obsolescent syntaxes equivalent to WITH (OIDS) + and WITH (OIDS=FALSE), respectively. If you wish to give + both an OIDS setting and storage parameters, you must use + the WITH ( ... ) syntax; see above. @@ -175,7 +175,7 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI All rows in the temporary table will be deleted at the end of each transaction block. Essentially, an automatic is done + linkend="sql-truncate"/> is done at each commit. @@ -195,14 +195,14 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI - TABLESPACE tablespace_name + TABLESPACE tablespace_name - The tablespace_name is the name + The tablespace_name is the name of the tablespace in which the new table is to be created. If not specified, - is consulted, or - if the table is temporary. + is consulted, or + if the table is temporary. @@ -211,17 +211,17 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI query - A , TABLE, or - command, or an command that runs a - prepared SELECT, TABLE, or - VALUES query. + A , TABLE, or + command, or an command that runs a + prepared SELECT, TABLE, or + VALUES query. - WITH [ NO ] DATA + WITH [ NO ] DATA This clause specifies whether or not the data produced by the query @@ -239,9 +239,9 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI This command is functionally similar to , but it is + linkend="sql-selectinto"/>, but it is preferred since it is less likely to be confused with other uses of - the SELECT INTO syntax. Furthermore, CREATE + the SELECT INTO syntax. Furthermore, CREATE TABLE AS offers a superset of the functionality offered by SELECT INTO. @@ -250,7 +250,7 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI The CREATE TABLE AS command allows the user to explicitly specify whether OIDs should be included. If the presence of OIDs is not explicitly specified, - the configuration variable is + the configuration variable is used. @@ -315,16 +315,16 @@ CREATE TEMP TABLE films_recent WITH (OIDS) ON COMMIT DROP AS - PostgreSQL handles temporary tables in a way + PostgreSQL handles temporary tables in a way rather different from the standard; see - + for details. - The WITH clause is a PostgreSQL + The WITH clause is a PostgreSQL extension; neither storage parameters nor OIDs are in the standard. @@ -343,12 +343,12 @@ CREATE TEMP TABLE films_recent WITH (OIDS) ON COMMIT DROP AS See Also - - - - - - + + + + + + diff --git a/doc/src/sgml/ref/create_tablespace.sgml b/doc/src/sgml/ref/create_tablespace.sgml index cf08408f96..c621ec2c6b 100644 --- a/doc/src/sgml/ref/create_tablespace.sgml +++ b/doc/src/sgml/ref/create_tablespace.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_tablespace.sgml PostgreSQL documentation --> - + CREATE TABLESPACE @@ -24,7 +24,7 @@ PostgreSQL documentation CREATE TABLESPACE tablespace_name [ OWNER { new_owner | CURRENT_USER | SESSION_USER } ] LOCATION 'directory' - [ WITH ( tablespace_option = value [, ... ] ) ] + [ WITH ( tablespace_option = value [, ... ] ) ] @@ -45,16 +45,16 @@ CREATE TABLESPACE tablespace_name A user with appropriate privileges can pass - tablespace_name to - CREATE DATABASE, CREATE TABLE, - CREATE INDEX or ADD CONSTRAINT to have the data + tablespace_name to + CREATE DATABASE, CREATE TABLE, + CREATE INDEX or ADD CONSTRAINT to have the data files for these objects stored within the specified tablespace. A tablespace cannot be used independently of the cluster in which it - is defined; see . + is defined; see . @@ -92,8 +92,9 @@ CREATE TABLESPACE tablespace_name The directory that will be used for the tablespace. The directory - should be empty and must be owned by the - PostgreSQL system user. The directory must be + must exist (CREATE TABLESPACE will not create it), + should be empty, and must be owned by the + PostgreSQL system user. The directory must be specified by an absolute path name. @@ -104,14 +105,14 @@ CREATE TABLESPACE tablespace_name A tablespace parameter to be set or reset. Currently, the only - available parameters are seq_page_cost, - random_page_cost and effective_io_concurrency. + available parameters are seq_page_cost, + random_page_cost and effective_io_concurrency. Setting either value for a particular tablespace will override the planner's usual estimate of the cost of reading pages from tables in that tablespace, as established by the configuration parameters of the - same name (see , - , - ). This may be useful if + same name (see , + , + ). This may be useful if one tablespace is located on a disk which is faster or slower than the remainder of the I/O subsystem. @@ -128,7 +129,7 @@ CREATE TABLESPACE tablespace_name - CREATE TABLESPACE cannot be executed inside a transaction + CREATE TABLESPACE cannot be executed inside a transaction block. @@ -137,15 +138,23 @@ CREATE TABLESPACE tablespace_name Examples - Create a tablespace dbspace at /data/dbs: + To create a tablespace dbspace at file system location + /data/dbs, first create the directory using operating + system facilities and set the correct ownership: + +mkdir /data/dbs +chown postgres:postgres /data/dbs + + Then issue the tablespace creation command inside + PostgreSQL: CREATE TABLESPACE dbspace LOCATION '/data/dbs'; - Create a tablespace indexspace at /data/indexes - owned by user genevieve: + To create a tablespace owned by a different database user, use a command + like this: CREATE TABLESPACE indexspace OWNER genevieve LOCATION '/data/indexes'; @@ -155,7 +164,7 @@ CREATE TABLESPACE indexspace OWNER genevieve LOCATION '/data/indexes'; Compatibility - CREATE TABLESPACE is a PostgreSQL + CREATE TABLESPACE is a PostgreSQL extension. @@ -164,11 +173,11 @@ CREATE TABLESPACE indexspace OWNER genevieve LOCATION '/data/indexes'; See Also - - - - - + + + + + diff --git a/doc/src/sgml/ref/create_transform.sgml b/doc/src/sgml/ref/create_transform.sgml index 647c3b9f05..4bce36b41a 100644 --- a/doc/src/sgml/ref/create_transform.sgml +++ b/doc/src/sgml/ref/create_transform.sgml @@ -1,6 +1,6 @@ - + CREATE TRANSFORM @@ -144,7 +144,7 @@ CREATE [ OR REPLACE ] TRANSFORM FOR type_name LANGUAG Notes - Use to remove transforms. + Use to remove transforms. @@ -157,7 +157,7 @@ CREATE [ OR REPLACE ] TRANSFORM FOR type_name LANGUAG CREATE TYPE hstore ...; -CREATE LANGUAGE plpythonu ...; +CREATE EXTENSION plpythonu; Then create the necessary functions: @@ -176,7 +176,7 @@ CREATE TRANSFORM FOR hstore LANGUAGE plpythonu ( TO SQL WITH FUNCTION plpython_to_hstore(internal) ); - In practice, these commands would be wrapped up in extensions. + In practice, these commands would be wrapped up in an extension. @@ -201,10 +201,10 @@ CREATE TRANSFORM FOR hstore LANGUAGE plpythonu ( See Also - , - , - , - + , + , + , + diff --git a/doc/src/sgml/ref/create_trigger.sgml b/doc/src/sgml/ref/create_trigger.sgml index 18efe6a9ed..6514ffc6ae 100644 --- a/doc/src/sgml/ref/create_trigger.sgml +++ b/doc/src/sgml/ref/create_trigger.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_trigger.sgml PostgreSQL documentation --> - + CREATE TRIGGER @@ -26,14 +26,14 @@ PostgreSQL documentation -CREATE [ CONSTRAINT ] TRIGGER name { BEFORE | AFTER | INSTEAD OF } { event [ OR ... ] } - ON table_name +CREATE [ CONSTRAINT ] TRIGGER name { BEFORE | AFTER | INSTEAD OF } { event [ OR ... ] } + ON table_name [ FROM referenced_table_name ] [ NOT DEFERRABLE | [ DEFERRABLE ] [ INITIALLY IMMEDIATE | INITIALLY DEFERRED ] ] - [ REFERENCING { { OLD | NEW } TABLE [ AS ] transition_relation_name } [ ... ] ] + [ REFERENCING { { OLD | NEW } TABLE [ AS ] transition_relation_name } [ ... ] ] [ FOR [ EACH ] { ROW | STATEMENT } ] [ WHEN ( condition ) ] - EXECUTE PROCEDURE function_name ( arguments ) + EXECUTE { FUNCTION | PROCEDURE } function_name ( arguments ) where event can be one of: @@ -52,7 +52,7 @@ CREATE [ CONSTRAINT ] TRIGGER name trigger will be associated with the specified table, view, or foreign table and will execute the specified function function_name when - certain events occur. + certain operations are performed on that table. @@ -82,17 +82,14 @@ CREATE [ CONSTRAINT ] TRIGGER name executes once for any given operation, regardless of how many rows it modifies (in particular, an operation that modifies zero rows will still result in the execution of any applicable FOR - EACH STATEMENT triggers). Note that with an - INSERT with an ON CONFLICT DO UPDATE - clause, both INSERT and - UPDATE statement level trigger will be fired. + EACH STATEMENT triggers). - Triggers that are specified to fire INSTEAD OF the trigger - event must be marked FOR EACH ROW, and can only be defined - on views. BEFORE and AFTER triggers on a view - must be marked as FOR EACH STATEMENT. + Triggers that are specified to fire INSTEAD OF the trigger + event must be marked FOR EACH ROW, and can only be defined + on views. BEFORE and AFTER triggers on a view + must be marked as FOR EACH STATEMENT. @@ -118,35 +115,35 @@ CREATE [ CONSTRAINT ] TRIGGER name - BEFORE - INSERT/UPDATE/DELETE + BEFORE + INSERT/UPDATE/DELETE Tables and foreign tables Tables, views, and foreign tables - TRUNCATE + TRUNCATE Tables - AFTER - INSERT/UPDATE/DELETE + AFTER + INSERT/UPDATE/DELETE Tables and foreign tables Tables, views, and foreign tables - TRUNCATE + TRUNCATE Tables - INSTEAD OF - INSERT/UPDATE/DELETE + INSTEAD OF + INSERT/UPDATE/DELETE Views - TRUNCATE + TRUNCATE @@ -155,11 +152,11 @@ CREATE [ CONSTRAINT ] TRIGGER name - Also, a trigger definition can specify a Boolean WHEN + Also, a trigger definition can specify a Boolean WHEN condition, which will be tested to see whether the trigger should - be fired. In row-level triggers the WHEN condition can + be fired. In row-level triggers the WHEN condition can examine the old and/or new values of columns of the row. Statement-level - triggers can also have WHEN conditions, although the feature + triggers can also have WHEN conditions, although the feature is not so useful for them since the condition cannot refer to any values in the table. @@ -170,36 +167,48 @@ CREATE [ CONSTRAINT ] TRIGGER name - When the CONSTRAINT option is specified, this command creates a - constraint trigger. This is the same as a regular trigger + When the CONSTRAINT option is specified, this command creates a + constraint trigger. This is the same as a regular trigger except that the timing of the trigger firing can be adjusted using - . - Constraint triggers must be AFTER ROW triggers on tables. They + . + Constraint triggers must be AFTER ROW triggers on plain + tables (not foreign tables). They can be fired either at the end of the statement causing the triggering event, or at the end of the containing transaction; in the latter case they - are said to be deferred. A pending deferred-trigger firing + are said to be deferred. A pending deferred-trigger firing can also be forced to happen immediately by using SET - CONSTRAINTS. Constraint triggers are expected to raise an exception + CONSTRAINTS. Constraint triggers are expected to raise an exception when the constraints they implement are violated. - The REFERENCING option is only allowed for an AFTER - trigger which is not a constraint trigger. OLD TABLE may only - be specified once, and only on a trigger which can fire on - UPDATE or DELETE. NEW TABLE may only - be specified once, and only on a trigger which can fire on - UPDATE or INSERT. + The REFERENCING option enables collection + of transition relations, which are row sets that include all + of the rows inserted, deleted, or modified by the current SQL statement. + This feature lets the trigger see a global view of what the statement did, + not just one row at a time. This option is only allowed for + an AFTER trigger that is not a constraint trigger; also, if + the trigger is an UPDATE trigger, it must not specify + a column_name list. + OLD TABLE may only be specified once, and only for a trigger + that can fire on UPDATE or DELETE; it creates a + transition relation containing the before-images of all rows + updated or deleted by the statement. + Similarly, NEW TABLE may only be specified once, and only for + a trigger that can fire on UPDATE or INSERT; + it creates a transition relation containing the after-images + of all rows updated or inserted by the statement. SELECT does not modify any rows so you cannot - create SELECT triggers. Rules and views are more - appropriate in such cases. + create SELECT triggers. Rules and views may provide + workable solutions to problems that seem to need SELECT + triggers. - Refer to for more information about triggers. + Refer to for more information about triggers. @@ -216,7 +225,7 @@ CREATE [ CONSTRAINT ] TRIGGER name The name cannot be schema-qualified — the trigger inherits the schema of its table. For a constraint trigger, this is also the name to use when modifying the trigger's behavior using - SET CONSTRAINTS. + SET CONSTRAINTS. @@ -229,7 +238,7 @@ CREATE [ CONSTRAINT ] TRIGGER name Determines whether the function is called before, after, or instead of the event. A constraint trigger can only be specified as - AFTER. + AFTER. @@ -241,7 +250,8 @@ CREATE [ CONSTRAINT ] TRIGGER name One of INSERT, UPDATE, DELETE, or TRUNCATE; this specifies the event that will fire the trigger. Multiple - events can be specified using OR. + events can be specified using OR, except when + transition relations are requested. @@ -251,10 +261,13 @@ CREATE [ CONSTRAINT ] TRIGGER name UPDATE OF column_name1 [, column_name2 ... ] The trigger will only fire if at least one of the listed columns - is mentioned as a target of the UPDATE command. + is mentioned as a target of the UPDATE command. - INSTEAD OF UPDATE events do not support lists of columns. + + INSTEAD OF UPDATE events do not allow a list of columns. + A column list cannot be specified when requesting transition relations, + either. @@ -270,7 +283,7 @@ UPDATE OF column_name1 [, column_name2 - referenced_table_name + referenced_table_name The (possibly schema-qualified) name of another table referenced by the @@ -289,7 +302,7 @@ UPDATE OF column_name1 [, column_name2 The default timing of the trigger. - See the documentation for details of + See the documentation for details of these constraint options. This can only be specified for constraint triggers. @@ -300,12 +313,9 @@ UPDATE OF column_name1 [, column_name2REFERENCING - This immediately precedes the declaration of one or two relations which - can be used to read the before and/or after images of all rows directly - affected by the triggering statement. An AFTER EACH ROW - trigger is allowed to use both these transition relation names and the - row names (OLD and NEW) which reference each - individual row for which the trigger fires. + This keyword immediately precedes the declaration of one or two + relation names that provide access to the transition relations of the + triggering statement. @@ -315,17 +325,19 @@ UPDATE OF column_name1 [, column_name2NEW TABLE - This specifies whether the named relation contains the before or after - images for rows affected by the statement which fired the trigger. + This clause indicates whether the following relation name is for the + before-image transition relation or the after-image transition + relation. - transition_relation_name + transition_relation_name - The (unqualified) name to be used within the trigger for this relation. + The (unqualified) name to be used within the trigger for this + transition relation. @@ -336,11 +348,11 @@ UPDATE OF column_name1 [, column_name2 - This specifies whether the trigger procedure should be fired + This specifies whether the trigger function should be fired once for every row affected by the trigger event, or just once per SQL statement. If neither is specified, FOR EACH STATEMENT is the default. Constraint triggers can only - be specified FOR EACH ROW. + be specified FOR EACH ROW. @@ -350,20 +362,20 @@ UPDATE OF column_name1 [, column_name2 A Boolean expression that determines whether the trigger function - will actually be executed. If WHEN is specified, the + will actually be executed. If WHEN is specified, the function will only be called if the condition returns true. - In FOR EACH ROW triggers, the WHEN + class="parameter">condition returns true. + In FOR EACH ROW triggers, the WHEN condition can refer to columns of the old and/or new row values by writing OLD.column_name or NEW.column_name respectively. - Of course, INSERT triggers cannot refer to OLD - and DELETE triggers cannot refer to NEW. + Of course, INSERT triggers cannot refer to OLD + and DELETE triggers cannot refer to NEW. - INSTEAD OF triggers do not support WHEN + INSTEAD OF triggers do not support WHEN conditions. @@ -373,7 +385,7 @@ UPDATE OF column_name1 [, column_name2 - Note that for constraint triggers, evaluation of the WHEN + Note that for constraint triggers, evaluation of the WHEN condition is not deferred, but occurs immediately after the row update operation is performed. If the condition does not evaluate to true then the trigger is not queued for deferred execution. @@ -386,9 +398,17 @@ UPDATE OF column_name1 [, column_name2 A user-supplied function that is declared as taking no arguments - and returning type trigger, which is executed when + and returning type trigger, which is executed when the trigger fires. + + + In the syntax of CREATE TRIGGER, the keywords + FUNCTION and PROCEDURE are + equivalent, but the referenced function must in any case be a function, + not a procedure. The use of the keyword PROCEDURE + here is historical and deprecated. + @@ -410,7 +430,7 @@ UPDATE OF column_name1 [, column_name2 - + Notes @@ -420,38 +440,38 @@ UPDATE OF column_name1 [, column_name2 - Use to remove a trigger. + Use to remove a trigger. A column-specific trigger (one defined using the UPDATE OF column_name syntax) will fire when any - of its columns are listed as targets in the UPDATE - command's SET list. It is possible for a column's value + of its columns are listed as targets in the UPDATE + command's SET list. It is possible for a column's value to change even when the trigger is not fired, because changes made to the - row's contents by BEFORE UPDATE triggers are not considered. - Conversely, a command such as UPDATE ... SET x = x ... - will fire a trigger on column x, even though the column's + row's contents by BEFORE UPDATE triggers are not considered. + Conversely, a command such as UPDATE ... SET x = x ... + will fire a trigger on column x, even though the column's value did not change. - In a BEFORE trigger, the WHEN condition is + In a BEFORE trigger, the WHEN condition is evaluated just before the function is or would be executed, so using - WHEN is not materially different from testing the same + WHEN is not materially different from testing the same condition at the beginning of the trigger function. Note in particular - that the NEW row seen by the condition is the current value, - as possibly modified by earlier triggers. Also, a BEFORE - trigger's WHEN condition is not allowed to examine the - system columns of the NEW row (such as oid), + that the NEW row seen by the condition is the current value, + as possibly modified by earlier triggers. Also, a BEFORE + trigger's WHEN condition is not allowed to examine the + system columns of the NEW row (such as oid), because those won't have been set yet. - In an AFTER trigger, the WHEN condition is + In an AFTER trigger, the WHEN condition is evaluated just after the row update occurs, and it determines whether an event is queued to fire the trigger at the end of statement. So when an - AFTER trigger's WHEN condition does not return + AFTER trigger's WHEN condition does not return true, it is not necessary to queue an event nor to re-fetch the row at end of statement. This can result in significant speedups in statements that modify many rows, if the trigger only needs to be fired for a few of the @@ -459,54 +479,104 @@ UPDATE OF column_name1 [, column_name2 - Modifying a partitioned table or a table with inheritance children fires - statement-level triggers directly attached to that table, but not - statement-level triggers for its partitions or child tables. In contrast, - row-level triggers are fired for all affected partitions or child tables. - If a statement-level trigger has been defined with transition relations - named by a REFERENCING clause, then before and after - images of rows are visible from all affected partitions or child tables. - In the case of inheritance children, the row images include only columns - that are present in the table that the trigger is attached to. Currently, - row-level triggers with transition relations cannot be defined on - partitions or inheritance child tables. + In some cases it is possible for a single SQL command to fire more than + one kind of trigger. For instance an INSERT with + an ON CONFLICT DO UPDATE clause may cause both insert and + update operations, so it will fire both kinds of triggers as needed. + The transition relations supplied to triggers are + specific to their event type; thus an INSERT trigger + will see only the inserted rows, while an UPDATE + trigger will see only the updated rows. + + + + Row updates or deletions caused by foreign-key enforcement actions, such + as ON UPDATE CASCADE or ON DELETE SET NULL, are + treated as part of the SQL command that caused them (note that such + actions are never deferred). Relevant triggers on the affected table will + be fired, so that this provides another way in which a SQL command might + fire triggers not directly matching its type. In simple cases, triggers + that request transition relations will see all changes caused in their + table by a single original SQL command as a single transition relation. + However, there are cases in which the presence of an AFTER ROW + trigger that requests transition relations will cause the foreign-key + enforcement actions triggered by a single SQL command to be split into + multiple steps, each with its own transition relation(s). In such cases, + any statement-level triggers that are present will be fired once per + creation of a transition relation set, ensuring that the triggers see + each affected row in a transition relation once and only once. + + + + Statement-level triggers on a view are fired only if the action on the + view is handled by a row-level INSTEAD OF trigger. + If the action is handled by an INSTEAD rule, then + whatever statements are emitted by the rule are executed in place of the + original statement naming the view, so that the triggers that will be + fired are those on tables named in the replacement statements. + Similarly, if the view is automatically updatable, then the action is + handled by automatically rewriting the statement into an action on the + view's base table, so that the base table's statement-level triggers are + the ones that are fired. + + + + Creating a row-level trigger on a partitioned table will cause identical + triggers to be created in all its existing partitions; and any partitions + created or attached later will contain an identical trigger, too. + Triggers on partitioned tables may only be AFTER. + + + + Modifying a partitioned table or a table with inheritance children fires + statement-level triggers attached to the explicitly named table, but not + statement-level triggers for its partitions or child tables. In contrast, + row-level triggers are fired on the rows in affected partitions or + child tables, even if they are not explicitly named in the query. + If a statement-level trigger has been defined with transition relations + named by a REFERENCING clause, then before and after + images of rows are visible from all affected partitions or child tables. + In the case of inheritance children, the row images include only columns + that are present in the table that the trigger is attached to. Currently, + row-level triggers with transition relations cannot be defined on + partitions or inheritance child tables. In PostgreSQL versions before 7.3, it was necessary to declare trigger functions as returning the placeholder - type opaque, rather than trigger. To support loading - of old dump files, CREATE TRIGGER will accept a function - declared as returning opaque, but it will issue a notice and - change the function's declared return type to trigger. + type opaque, rather than trigger. To support loading + of old dump files, CREATE TRIGGER will accept a function + declared as returning opaque, but it will issue a notice and + change the function's declared return type to trigger. - + Examples - Execute the function check_account_update whenever - a row of the table accounts is about to be updated: + Execute the function check_account_update whenever + a row of the table accounts is about to be updated: CREATE TRIGGER check_update BEFORE UPDATE ON accounts FOR EACH ROW - EXECUTE PROCEDURE check_account_update(); + EXECUTE FUNCTION check_account_update(); - The same, but only execute the function if column balance - is specified as a target in the UPDATE command: + The same, but only execute the function if column balance + is specified as a target in the UPDATE command: CREATE TRIGGER check_update BEFORE UPDATE OF balance ON accounts FOR EACH ROW - EXECUTE PROCEDURE check_account_update(); + EXECUTE FUNCTION check_account_update(); - This form only executes the function if column balance + This form only executes the function if column balance has in fact changed value: @@ -514,10 +584,10 @@ CREATE TRIGGER check_update BEFORE UPDATE ON accounts FOR EACH ROW WHEN (OLD.balance IS DISTINCT FROM NEW.balance) - EXECUTE PROCEDURE check_account_update(); + EXECUTE FUNCTION check_account_update(); - Call a function to log updates of accounts, but only if + Call a function to log updates of accounts, but only if something changed: @@ -525,21 +595,21 @@ CREATE TRIGGER log_update AFTER UPDATE ON accounts FOR EACH ROW WHEN (OLD.* IS DISTINCT FROM NEW.*) - EXECUTE PROCEDURE log_account_update(); + EXECUTE FUNCTION log_account_update(); - Execute the function view_insert_row for each row to insert + Execute the function view_insert_row for each row to insert rows into the tables underlying a view: CREATE TRIGGER view_insert INSTEAD OF INSERT ON my_view FOR EACH ROW - EXECUTE PROCEDURE view_insert_row(); + EXECUTE FUNCTION view_insert_row(); - Execute the function check_transfer_balances_to_zero for each - statement to confirm that the transfer rows offset to a net of + Execute the function check_transfer_balances_to_zero for each + statement to confirm that the transfer rows offset to a net of zero: @@ -547,10 +617,10 @@ CREATE TRIGGER transfer_insert AFTER INSERT ON transfer REFERENCING NEW TABLE AS inserted FOR EACH STATEMENT - EXECUTE PROCEDURE check_transfer_balances_to_zero(); + EXECUTE FUNCTION check_transfer_balances_to_zero(); - Execute the function check_matching_pairs for each row to + Execute the function check_matching_pairs for each row to confirm that changes are made to matching pairs at the same time (by the same statement): @@ -559,17 +629,17 @@ CREATE TRIGGER paired_items_update AFTER UPDATE ON paired_items REFERENCING NEW TABLE AS newtab OLD TABLE AS oldtab FOR EACH ROW - EXECUTE PROCEDURE check_matching_pairs(); + EXECUTE FUNCTION check_matching_pairs(); - contains a complete example of a trigger + contains a complete example of a trigger function written in C. - + Compatibility - + CREATE TEXT SEARCH CONFIGURATION @@ -57,7 +57,7 @@ CREATE TEXT SEARCH CONFIGURATION name - Refer to for further information. + Refer to for further information. @@ -99,7 +99,7 @@ CREATE TEXT SEARCH CONFIGURATION nameNotes - The PARSER and COPY options are mutually + The PARSER and COPY options are mutually exclusive, because when an existing configuration is copied, its parser selection is copied too. @@ -119,8 +119,8 @@ CREATE TEXT SEARCH CONFIGURATION nameSee Also - - + + diff --git a/doc/src/sgml/ref/create_tsdictionary.sgml b/doc/src/sgml/ref/create_tsdictionary.sgml index 9c95c11608..0608104821 100644 --- a/doc/src/sgml/ref/create_tsdictionary.sgml +++ b/doc/src/sgml/ref/create_tsdictionary.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_tsdictionary.sgml PostgreSQL documentation --> - + CREATE TEXT SEARCH DICTIONARY @@ -50,7 +50,7 @@ CREATE TEXT SEARCH DICTIONARY name - Refer to for further information. + Refer to for further information. @@ -134,8 +134,8 @@ CREATE TEXT SEARCH DICTIONARY my_russian ( See Also - - + + diff --git a/doc/src/sgml/ref/create_tsparser.sgml b/doc/src/sgml/ref/create_tsparser.sgml index 044581f6f2..088d92323a 100644 --- a/doc/src/sgml/ref/create_tsparser.sgml +++ b/doc/src/sgml/ref/create_tsparser.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_tsparser.sgml PostgreSQL documentation --> - + CREATE TEXT SEARCH PARSER @@ -55,7 +55,7 @@ CREATE TEXT SEARCH PARSER name ( - Refer to for further information. + Refer to for further information. @@ -146,8 +146,8 @@ CREATE TEXT SEARCH PARSER name ( See Also - - + + diff --git a/doc/src/sgml/ref/create_tstemplate.sgml b/doc/src/sgml/ref/create_tstemplate.sgml index 360ad41f35..5b82d5564b 100644 --- a/doc/src/sgml/ref/create_tstemplate.sgml +++ b/doc/src/sgml/ref/create_tstemplate.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_tstemplate.sgml PostgreSQL documentation --> - + CREATE TEXT SEARCH TEMPLATE @@ -49,14 +49,14 @@ CREATE TEXT SEARCH TEMPLATE name ( TEMPLATE. This restriction is made because an erroneous text search template definition could confuse or even crash the server. The reason for separating templates from dictionaries is that a template - encapsulates the unsafe aspects of defining a dictionary. + encapsulates the unsafe aspects of defining a dictionary. The parameters that can be set when defining a dictionary are safe for unprivileged users to set, and so creating a dictionary need not be a privileged operation. - Refer to for further information. + Refer to for further information. @@ -119,8 +119,8 @@ CREATE TEXT SEARCH TEMPLATE name ( See Also - - + + diff --git a/doc/src/sgml/ref/create_type.sgml b/doc/src/sgml/ref/create_type.sgml index 7146c4a27b..fa9b520b24 100644 --- a/doc/src/sgml/ref/create_type.sgml +++ b/doc/src/sgml/ref/create_type.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_type.sgml PostgreSQL documentation --> - + CREATE TYPE @@ -22,7 +22,7 @@ PostgreSQL documentation CREATE TYPE name AS - ( [ attribute_name data_type [ COLLATE collation ] [, ... ] ] ) + ( [ attribute_name data_type [ COLLATE collation ] [, ... ] ] ) CREATE TYPE name AS ENUM ( [ 'label' [, ... ] ] ) @@ -81,8 +81,8 @@ CREATE TYPE name There are five forms of CREATE TYPE, as shown in the syntax synopsis above. They respectively create a composite - type, an enum type, a range type, a - base type, or a shell type. The first four + type, an enum type, a range type, a + base type, or a shell type. The first four of these are discussed in turn below. A shell type is simply a placeholder for a type to be defined later; it is created by issuing CREATE TYPE with no parameters except for the type name. Shell types @@ -111,24 +111,24 @@ CREATE TYPE name - + Enumerated Types The second form of CREATE TYPE creates an enumerated - (enum) type, as described in . + (enum) type, as described in . Enum types take a list of one or more quoted labels, each of which must be less than NAMEDATALEN bytes long (64 bytes in a standard PostgreSQL build). - + Range Types The third form of CREATE TYPE creates a new - range type, as described in . + range type, as described in . @@ -148,13 +148,13 @@ CREATE TYPE name function must take one argument of the range type being defined, and return a value of the same type. This is used to convert range values to a canonical form, when applicable. See for more information. Creating a + linkend="rangetypes-defining"/> for more information. Creating a canonical function is a bit tricky, since it must be defined before the range type can be declared. To do this, you must first create a shell type, which is a placeholder type that has no properties except a name and an owner. This is done by issuing the command CREATE TYPE - name, with no additional parameters. Then + name, with no additional parameters. Then the function can be declared using the shell type as argument and result, and finally the range type can be declared using the same name. This automatically replaces the shell type entry with a valid range type. @@ -167,7 +167,7 @@ CREATE TYPE name and return a double precision value representing the difference between the two given values. While this is optional, providing it allows much greater efficiency of GiST indexes on columns of - the range type. See for more + the range type. See for more information. @@ -211,7 +211,7 @@ CREATE TYPE name The first argument is the input text as a C string, the second argument is the type's own OID (except for array types, which instead receive their element type's OID), - and the third is the typmod of the destination column, if known + and the third is the typmod of the destination column, if known (-1 will be passed if not). The input function must return a value of the data type itself. Usually, an input function should be declared STRICT; if it is not, @@ -264,12 +264,12 @@ CREATE TYPE name You should at this point be wondering how the input and output functions can be declared to have results or arguments of the new type, when they have to be created before the new type can be created. The answer is that - the type should first be defined as a shell type, which is a + the type should first be defined as a shell type, which is a placeholder type that has no properties except a name and an owner. This is done by issuing the command CREATE TYPE - name, with no additional parameters. Then the + name, with no additional parameters. Then the C I/O functions can be defined referencing the shell type. Finally, - CREATE TYPE with a full definition replaces the shell entry + CREATE TYPE with a full definition replaces the shell entry with a complete, valid type definition, after which the new type can be used normally. @@ -279,23 +279,23 @@ CREATE TYPE name type_modifier_input_function and type_modifier_output_function are needed if the type supports modifiers, that is optional constraints - attached to a type declaration, such as char(5) or - numeric(30,2). PostgreSQL allows + attached to a type declaration, such as char(5) or + numeric(30,2). PostgreSQL allows user-defined types to take one or more simple constants or identifiers as modifiers. However, this information must be capable of being packed into a single non-negative integer value for storage in the system catalogs. The type_modifier_input_function - is passed the declared modifier(s) in the form of a cstring + is passed the declared modifier(s) in the form of a cstring array. It must check the values for validity (throwing an error if they are wrong), and if they are correct, return a single non-negative - integer value that will be stored as the column typmod. + integer value that will be stored as the column typmod. Type modifiers will be rejected if the type does not have a type_modifier_input_function. The type_modifier_output_function converts the internal integer typmod value back to the correct form for - user display. It must return a cstring value that is the exact - string to append to the type name; for example numeric's - function might return (30,2). + user display. It must return a cstring value that is the exact + string to append to the type name; for example numeric's + function might return (30,2). It is allowed to omit the type_modifier_output_function, in which case the default display format is just the stored typmod integer @@ -305,14 +305,14 @@ CREATE TYPE name The optional analyze_function performs type-specific statistics collection for columns of the data type. - By default, ANALYZE will attempt to gather statistics using - the type's equals and less-than operators, if there + By default, ANALYZE will attempt to gather statistics using + the type's equals and less-than operators, if there is a default b-tree operator class for the type. For non-scalar types this behavior is likely to be unsuitable, so it can be overridden by specifying a custom analysis function. The analysis function must be - declared to take a single argument of type internal, and return - a boolean result. The detailed API for analysis functions appears - in src/include/commands/vacuum.h. + declared to take a single argument of type internal, and return + a boolean result. The detailed API for analysis functions appears + in src/include/commands/vacuum.h. @@ -327,10 +327,10 @@ CREATE TYPE name positive integer, or variable-length, indicated by setting internallength to VARIABLE. (Internally, this is represented - by setting typlen to -1.) The internal representation of all + by setting typlen to -1.) The internal representation of all variable-length types must start with a 4-byte integer giving the total length of this value of the type. (Note that the length field is often - encoded, as described in ; it's unwise + encoded, as described in ; it's unwise to access it directly.) @@ -338,7 +338,7 @@ CREATE TYPE name The optional flag PASSEDBYVALUE indicates that values of this data type are passed by value, rather than by reference. Types passed by value must be fixed-length, and their internal - representation cannot be larger than the size of the Datum type + representation cannot be larger than the size of the Datum type (4 bytes on some machines, 8 bytes on others). @@ -347,7 +347,7 @@ CREATE TYPE name specifies the storage alignment required for the data type. The allowed values equate to alignment on 1, 2, 4, or 8 byte boundaries. Note that variable-length types must have an alignment of at least - 4, since they necessarily contain an int4 as their first component. + 4, since they necessarily contain an int4 as their first component. @@ -372,12 +372,12 @@ CREATE TYPE name All storage values other than plain imply that the functions of the data type - can handle values that have been toasted, as described - in and . + can handle values that have been toasted, as described + in and . The specific other value given merely determines the default TOAST storage strategy for columns of a toastable data type; users can pick other strategies for individual columns using ALTER TABLE - SET STORAGE. + SET STORAGE. @@ -389,9 +389,9 @@ CREATE TYPE name alignment, and storage are copied from the named type. (It is possible, though usually undesirable, to override - some of these values by specifying them along with the LIKE + some of these values by specifying them along with the LIKE clause.) Specifying representation this way is especially useful when - the low-level implementation of the new type piggybacks on an + the low-level implementation of the new type piggybacks on an existing type in some fashion. @@ -400,15 +400,15 @@ CREATE TYPE name preferred parameters can be used to help control which implicit cast will be applied in ambiguous situations. Each data type belongs to a category named by a single ASCII - character, and each type is either preferred or not within its + character, and each type is either preferred or not within its category. The parser will prefer casting to preferred types (but only from other types within the same category) when this rule is helpful in resolving overloaded functions or operators. For more details see . For types that have no implicit casts to or from any + linkend="typeconv"/>. For types that have no implicit casts to or from any other types, it is sufficient to leave these settings at the defaults. However, for a group of related types that have implicit casts, it is often helpful to mark them all as belonging to a category and select one or two - of the most general types as being preferred within the category. + of the most general types as being preferred within the category. The category parameter is especially useful when adding a user-defined type to an existing built-in category, such as the numeric or string types. However, it is also @@ -426,7 +426,7 @@ CREATE TYPE name To indicate that a type is an array, specify the type of the array - elements using the ELEMENT key word. For example, to + elements using the ELEMENT key word. For example, to define an array of 4-byte integers (int4), specify ELEMENT = int4. More details about array types appear below. @@ -465,26 +465,26 @@ CREATE TYPE name so generated collides with an existing type name, the process is repeated until a non-colliding name is found.) This implicitly-created array type is variable length and uses the - built-in input and output functions array_in and - array_out. The array type tracks any changes in its + built-in input and output functions array_in and + array_out. The array type tracks any changes in its element type's owner or schema, and is dropped if the element type is. - You might reasonably ask why there is an option, if the system makes the correct array type automatically. - The only case where it's useful to use is when you are making a fixed-length type that happens to be internally an array of a number of identical things, and you want to allow these things to be accessed directly by subscripting, in addition to whatever operations you plan - to provide for the type as a whole. For example, type point + to provide for the type as a whole. For example, type point is represented as just two floating-point numbers, which can be accessed - using point[0] and point[1]. + using point[0] and point[1]. Note that this facility only works for fixed-length types whose internal form is exactly a sequence of identical fixed-length fields. A subscriptable variable-length type must have the generalized internal representation - used by array_in and array_out. + used by array_in and array_out. For historical reasons (i.e., this is clearly wrong but it's far too late to change it), subscripting of fixed-length array types starts from zero, rather than from one as for variable-length arrays. @@ -697,7 +697,7 @@ CREATE TYPE name alignment, and storage are copied from that type, unless overridden by explicit - specification elsewhere in this CREATE TYPE command. + specification elsewhere in this CREATE TYPE command. @@ -707,9 +707,9 @@ CREATE TYPE name The category code (a single ASCII character) for this type. - The default is 'U' for user-defined type. + The default is 'U' for user-defined type. Other standard category codes can be found in - . You may also choose + . You may also choose other ASCII characters in order to create custom categories. @@ -769,7 +769,7 @@ CREATE TYPE name - + Notes @@ -779,7 +779,7 @@ CREATE TYPE name This is usually not an issue for the sorts of functions that are useful in a type definition. But you might want to think twice before designing a type - in a way that would require secret information to be used + in a way that would require secret information to be used while converting it to or from external form. @@ -792,7 +792,7 @@ CREATE TYPE name this in case of maximum-length names or collisions with user type names that begin with underscore. Writing code that depends on this convention is therefore deprecated. Instead, use - pg_type.typarray to locate the array type + pg_type.typarray to locate the array type associated with a given type. @@ -807,7 +807,7 @@ CREATE TYPE name Before PostgreSQL version 8.2, the shell-type creation syntax - CREATE TYPE name did not exist. + CREATE TYPE name did not exist. The way to create a new base type was to create its input function first. In this approach, PostgreSQL will first see the name of the new data type as the return type of the input function. @@ -824,10 +824,10 @@ CREATE TYPE name In PostgreSQL versions before 7.3, it was customary to avoid creating a shell type at all, by replacing the functions' forward references to the type name with the placeholder - pseudo-type opaque. The cstring arguments and - results also had to be declared as opaque before 7.3. To - support loading of old dump files, CREATE TYPE will - accept I/O functions declared using opaque, but it will issue + pseudo-type opaque. The cstring arguments and + results also had to be declared as opaque before 7.3. To + support loading of old dump files, CREATE TYPE will + accept I/O functions declared using opaque, but it will issue a notice and change the function declarations to use the correct types. @@ -894,7 +894,7 @@ CREATE TABLE myboxes ( If the internal structure of box were an array of four - float4 elements, we might instead use: + float4 elements, we might instead use: CREATE TYPE box ( INTERNALLENGTH = 16, @@ -924,20 +924,20 @@ CREATE TABLE big_objs ( More examples, including suitable input and output functions, are - in . + in . - + Compatibility The first form of the CREATE TYPE command, which - creates a composite type, conforms to the SQL standard. + creates a composite type, conforms to the SQL standard. The other forms are PostgreSQL extensions. The CREATE TYPE statement in - the SQL standard also defines other forms that are not - implemented in PostgreSQL. + the SQL standard also defines other forms that are not + implemented in PostgreSQL. @@ -947,14 +947,14 @@ CREATE TABLE big_objs ( - + See Also - - - - + + + + diff --git a/doc/src/sgml/ref/create_user.sgml b/doc/src/sgml/ref/create_user.sgml index 8a596eec9f..a51dc50c97 100644 --- a/doc/src/sgml/ref/create_user.sgml +++ b/doc/src/sgml/ref/create_user.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_user.sgml PostgreSQL documentation --> - + CREATE USER @@ -21,9 +21,9 @@ PostgreSQL documentation -CREATE USER name [ [ WITH ] option [ ... ] ] +CREATE USER name [ [ WITH ] option [ ... ] ] -where option can be: +where option can be: SUPERUSER | NOSUPERUSER | CREATEDB | NOCREATEDB @@ -32,15 +32,15 @@ CREATE USER name [ [ WITH ] connlimit - | [ ENCRYPTED ] PASSWORD 'password' - | VALID UNTIL 'timestamp' - | IN ROLE role_name [, ...] - | IN GROUP role_name [, ...] - | ROLE role_name [, ...] - | ADMIN role_name [, ...] - | USER role_name [, ...] - | SYSID uid + | CONNECTION LIMIT connlimit + | [ ENCRYPTED ] PASSWORD 'password' + | VALID UNTIL 'timestamp' + | IN ROLE role_name [, ...] + | IN GROUP role_name [, ...] + | ROLE role_name [, ...] + | ADMIN role_name [, ...] + | USER role_name [, ...] + | SYSID uid @@ -49,10 +49,10 @@ CREATE USER name [ [ WITH ] CREATE USER is now an alias for - . + . The only difference is that when the command is spelled - CREATE USER, LOGIN is assumed - by default, whereas NOLOGIN is assumed when + CREATE USER, LOGIN is assumed + by default, whereas NOLOGIN is assumed when the command is spelled CREATE ROLE. @@ -72,7 +72,7 @@ CREATE USER name [ [ WITH ] See Also - + diff --git a/doc/src/sgml/ref/create_user_mapping.sgml b/doc/src/sgml/ref/create_user_mapping.sgml index 1c44679a98..9719a4ff2c 100644 --- a/doc/src/sgml/ref/create_user_mapping.sgml +++ b/doc/src/sgml/ref/create_user_mapping.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_user_mapping.sgml PostgreSQL documentation --> - + CREATE USER MAPPING @@ -21,9 +21,9 @@ PostgreSQL documentation -CREATE USER MAPPING [IF NOT EXISTS] FOR { user_name | USER | CURRENT_USER | PUBLIC } +CREATE USER MAPPING [ IF NOT EXISTS ] FOR { user_name | USER | CURRENT_USER | PUBLIC } SERVER server_name - [ OPTIONS ( option 'value' [ , ... ] ) ] + [ OPTIONS ( option 'value' [ , ... ] ) ] @@ -41,7 +41,7 @@ CREATE USER MAPPING [IF NOT EXISTS] FOR { user_na The owner of a foreign server can create user mappings for that server for any user. Also, a user can create a user mapping for - their own user name if USAGE privilege on the server has + their own user name if USAGE privilege on the server has been granted to the user. @@ -51,7 +51,7 @@ CREATE USER MAPPING [IF NOT EXISTS] FOR { user_na - IF NOT EXISTS + IF NOT EXISTS Do not throw an error if a mapping of the given user to the given foreign @@ -67,8 +67,8 @@ CREATE USER MAPPING [IF NOT EXISTS] FOR { user_na The name of an existing user that is mapped to foreign server. - CURRENT_USER and USER match the name of - the current user. When PUBLIC is specified, a + CURRENT_USER and USER match the name of + the current user. When PUBLIC is specified, a so-called public mapping is created that is used when no user-specific mapping is applicable. @@ -86,7 +86,7 @@ CREATE USER MAPPING [IF NOT EXISTS] FOR { user_na - OPTIONS ( option 'value' [, ... ] ) + OPTIONS ( option 'value' [, ... ] ) This clause specifies the options of the user mapping. The @@ -103,7 +103,7 @@ CREATE USER MAPPING [IF NOT EXISTS] FOR { user_na Examples - Create a user mapping for user bob, server foo: + Create a user mapping for user bob, server foo: CREATE USER MAPPING FOR bob SERVER foo OPTIONS (user 'bob', password 'secret'); @@ -122,10 +122,10 @@ CREATE USER MAPPING FOR bob SERVER foo OPTIONS (user 'bob', password 'secret'); See Also - - - - + + + + diff --git a/doc/src/sgml/ref/create_view.sgml b/doc/src/sgml/ref/create_view.sgml index a83d9564e5..e7a7e9fae2 100644 --- a/doc/src/sgml/ref/create_view.sgml +++ b/doc/src/sgml/ref/create_view.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/create_view.sgml PostgreSQL documentation --> - + CREATE VIEW @@ -21,9 +21,9 @@ PostgreSQL documentation -CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] [ RECURSIVE ] VIEW name [ ( column_name [, ...] ) ] - [ WITH ( view_option_name [= view_option_value] [, ... ] ) ] - AS query +CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] [ RECURSIVE ] VIEW name [ ( column_name [, ...] ) ] + [ WITH ( view_option_name [= view_option_value] [, ... ] ) ] + AS query [ WITH [ CASCADED | LOCAL ] CHECK OPTION ] @@ -48,7 +48,7 @@ CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] [ RECURSIVE ] VIEW If a schema name is given (for example, CREATE VIEW - myschema.myview ...) then the view is created in the specified + myschema.myview ...) then the view is created in the specified schema. Otherwise it is created in the current schema. Temporary views exist in a special schema, so a schema name cannot be given when creating a temporary view. The name of the view must be @@ -62,7 +62,7 @@ CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] [ RECURSIVE ] VIEW - TEMPORARY or TEMP + TEMPORARY or TEMP If specified, the view is created as a temporary view. @@ -82,16 +82,21 @@ CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] [ RECURSIVE ] VIEW - RECURSIVE + RECURSIVE + + RECURSIVE + in views + + Creates a recursive view. The syntax -CREATE RECURSIVE VIEW [ schema . ] view_name (column_names) AS SELECT ...; +CREATE RECURSIVE VIEW [ schema . ] view_name (column_names) AS SELECT ...; is equivalent to -CREATE VIEW [ schema . ] view_name AS WITH RECURSIVE view_name (column_names) AS (SELECT ...) SELECT column_names FROM view_name; +CREATE VIEW [ schema . ] view_name AS WITH RECURSIVE view_name (column_names) AS (SELECT ...) SELECT column_names FROM view_name; A view column name list must be specified for a recursive view. @@ -118,7 +123,7 @@ CREATE VIEW [ schema . ] view_name AS WITH RECUR - WITH ( view_option_name [= view_option_value] [, ... ] ) + WITH ( view_option_name [= view_option_value] [, ... ] ) This clause specifies optional parameters for a view; the following @@ -129,11 +134,11 @@ CREATE VIEW [ schema . ] view_name AS WITH RECUR check_option (string) - This parameter may be either local or - cascaded, and is equivalent to specifying - WITH [ CASCADED | LOCAL ] CHECK OPTION (see below). + This parameter may be either local or + cascaded, and is equivalent to specifying + WITH [ CASCADED | LOCAL ] CHECK OPTION (see below). This option can be changed on existing views using . + linkend="sql-alterview"/>. @@ -143,7 +148,7 @@ CREATE VIEW [ schema . ] view_name AS WITH RECUR This should be used if the view is intended to provide row-level - security. See for full details. + security. See for full details. @@ -156,8 +161,8 @@ CREATE VIEW [ schema . ] view_name AS WITH RECUR query - A or - command + A or + command which will provide the columns and rows of the view. @@ -165,22 +170,22 @@ CREATE VIEW [ schema . ] view_name AS WITH RECUR WITH [ CASCADED | LOCAL ] CHECK OPTION - + CHECK OPTION - + WITH CHECK OPTION This option controls the behavior of automatically updatable views. When - this option is specified, INSERT and UPDATE + this option is specified, INSERT and UPDATE commands on the view will be checked to ensure that new rows satisfy the view-defining condition (that is, the new rows are checked to ensure that they are visible through the view). If they are not, the update will be - rejected. If the CHECK OPTION is not specified, - INSERT and UPDATE commands on the view are + rejected. If the CHECK OPTION is not specified, + INSERT and UPDATE commands on the view are allowed to create rows that are not visible through the view. The following check options are supported: @@ -191,7 +196,7 @@ CREATE VIEW [ schema . ] view_name AS WITH RECUR New rows are only checked against the conditions defined directly in the view itself. Any conditions defined on underlying base views are - not checked (unless they also specify the CHECK OPTION). + not checked (unless they also specify the CHECK OPTION). @@ -201,9 +206,9 @@ CREATE VIEW [ schema . ] view_name AS WITH RECUR New rows are checked against the conditions of the view and all - underlying base views. If the CHECK OPTION is specified, - and neither LOCAL nor CASCADED is specified, - then CASCADED is assumed. + underlying base views. If the CHECK OPTION is specified, + and neither LOCAL nor CASCADED is specified, + then CASCADED is assumed. @@ -211,26 +216,26 @@ CREATE VIEW [ schema . ] view_name AS WITH RECUR - The CHECK OPTION may not be used with RECURSIVE + The CHECK OPTION may not be used with RECURSIVE views. - Note that the CHECK OPTION is only supported on views that - are automatically updatable, and do not have INSTEAD OF - triggers or INSTEAD rules. If an automatically updatable - view is defined on top of a base view that has INSTEAD OF - triggers, then the LOCAL CHECK OPTION may be used to check + Note that the CHECK OPTION is only supported on views that + are automatically updatable, and do not have INSTEAD OF + triggers or INSTEAD rules. If an automatically updatable + view is defined on top of a base view that has INSTEAD OF + triggers, then the LOCAL CHECK OPTION may be used to check the conditions on the automatically updatable view, but the conditions - on the base view with INSTEAD OF triggers will not be + on the base view with INSTEAD OF triggers will not be checked (a cascaded check option will not cascade down to a trigger-updatable view, and any check options defined directly on a trigger-updatable view will be ignored). If the view or any of its base - relations has an INSTEAD rule that causes the - INSERT or UPDATE command to be rewritten, then + relations has an INSTEAD rule that causes the + INSERT or UPDATE command to be rewritten, then all check options will be ignored in the rewritten query, including any checks from automatically updatable views defined on top of the relation - with the INSTEAD rule. + with the INSTEAD rule. @@ -241,7 +246,7 @@ CREATE VIEW [ schema . ] view_name AS WITH RECUR Notes - Use the + Use the statement to drop views. @@ -251,8 +256,8 @@ CREATE VIEW [ schema . ] view_name AS WITH RECUR CREATE VIEW vista AS SELECT 'Hello World'; - is bad form because the column name defaults to ?column?; - also, the column data type defaults to text, which might not + is bad form because the column name defaults to ?column?; + also, the column data type defaults to text, which might not be what you wanted. Better style for a string literal in a view's result is something like: @@ -264,22 +269,22 @@ CREATE VIEW vista AS SELECT text 'Hello World' AS hello; Access to tables referenced in the view is determined by permissions of the view owner. In some cases, this can be used to provide secure but restricted access to the underlying tables. However, not all views are - secure against tampering; see for + secure against tampering; see for details. Functions called in the view are treated the same as if they had been called directly from the query using the view. Therefore the user of a view must have permissions to call all functions used by the view. - When CREATE OR REPLACE VIEW is used on an + When CREATE OR REPLACE VIEW is used on an existing view, only the view's defining SELECT rule is changed. Other view properties, including ownership, permissions, and non-SELECT rules, remain unchanged. You must own the view to replace it (this includes being a member of the owning role). - - Updatable Views + + Updatable Views updatable views @@ -287,30 +292,30 @@ CREATE VIEW vista AS SELECT text 'Hello World' AS hello; Simple views are automatically updatable: the system will allow - INSERT, UPDATE and DELETE statements + INSERT, UPDATE and DELETE statements to be used on the view in the same way as on a regular table. A view is automatically updatable if it satisfies all of the following conditions: - The view must have exactly one entry in its FROM list, + The view must have exactly one entry in its FROM list, which must be a table or another updatable view. - The view definition must not contain WITH, - DISTINCT, GROUP BY, HAVING, - LIMIT, or OFFSET clauses at the top level. + The view definition must not contain WITH, + DISTINCT, GROUP BY, HAVING, + LIMIT, or OFFSET clauses at the top level. - The view definition must not contain set operations (UNION, - INTERSECT or EXCEPT) at the top level. + The view definition must not contain set operations (UNION, + INTERSECT or EXCEPT) at the top level. @@ -327,42 +332,42 @@ CREATE VIEW vista AS SELECT text 'Hello World' AS hello; An automatically updatable view may contain a mix of updatable and non-updatable columns. A column is updatable if it is a simple reference to an updatable column of the underlying base relation; otherwise the - column is read-only, and an error will be raised if an INSERT - or UPDATE statement attempts to assign a value to it. + column is read-only, and an error will be raised if an INSERT + or UPDATE statement attempts to assign a value to it. If the view is automatically updatable the system will convert any - INSERT, UPDATE or DELETE statement + INSERT, UPDATE or DELETE statement on the view into the corresponding statement on the underlying base - relation. INSERT statements that have an ON - CONFLICT UPDATE clause are fully supported. + relation. INSERT statements that have an ON + CONFLICT UPDATE clause are fully supported. - If an automatically updatable view contains a WHERE + If an automatically updatable view contains a WHERE condition, the condition restricts which rows of the base relation are - available to be modified by UPDATE and DELETE - statements on the view. However, an UPDATE is allowed to - change a row so that it no longer satisfies the WHERE + available to be modified by UPDATE and DELETE + statements on the view. However, an UPDATE is allowed to + change a row so that it no longer satisfies the WHERE condition, and thus is no longer visible through the view. Similarly, - an INSERT command can potentially insert base-relation rows - that do not satisfy the WHERE condition and thus are not - visible through the view (ON CONFLICT UPDATE may + an INSERT command can potentially insert base-relation rows + that do not satisfy the WHERE condition and thus are not + visible through the view (ON CONFLICT UPDATE may similarly affect an existing row not visible through the view). - The CHECK OPTION may be used to prevent - INSERT and UPDATE commands from creating + The CHECK OPTION may be used to prevent + INSERT and UPDATE commands from creating such rows that are not visible through the view. If an automatically updatable view is marked with the - security_barrier property then all the view's WHERE + security_barrier property then all the view's WHERE conditions (and any conditions using operators which are marked as LEAKPROOF) will always be evaluated before any conditions that a user of the view has - added. See for full details. Note that, + added. See for full details. Note that, due to this, rows which are not ultimately returned (because they do not - pass the user's WHERE conditions) may still end up being locked. + pass the user's WHERE conditions) may still end up being locked. EXPLAIN can be used to see which conditions are applied at the relation level (and therefore do not lock rows) and which are not. @@ -372,11 +377,11 @@ CREATE VIEW vista AS SELECT text 'Hello World' AS hello; A more complex view that does not satisfy all these conditions is read-only by default: the system will not allow an insert, update, or delete on the view. You can get the effect of an updatable view by - creating INSTEAD OF triggers on the view, which must + creating INSTEAD OF triggers on the view, which must convert attempted inserts, etc. on the view into appropriate actions on other tables. For more information see . Another possibility is to create rules - (see ), but in practice triggers are + linkend="sql-createtrigger"/>. Another possibility is to create rules + (see ), but in practice triggers are easier to understand and use correctly. @@ -386,7 +391,7 @@ CREATE VIEW vista AS SELECT text 'Hello World' AS hello; view. In addition the view's owner must have the relevant privileges on the underlying base relations, but the user performing the update does not need any permissions on the underlying base relations (see - ). + ). @@ -404,13 +409,13 @@ CREATE VIEW comedies AS WHERE kind = 'Comedy'; This will create a view containing the columns that are in the - film table at the time of view creation. Though - * was used to create the view, columns added later to + film table at the time of view creation. Though + * was used to create the view, columns added later to the table will not be part of the view. - Create a view with LOCAL CHECK OPTION: + Create a view with LOCAL CHECK OPTION: CREATE VIEW universal_comedies AS @@ -419,16 +424,16 @@ CREATE VIEW universal_comedies AS WHERE classification = 'U' WITH LOCAL CHECK OPTION; - This will create a view based on the comedies view, showing - only films with kind = 'Comedy' and - classification = 'U'. Any attempt to INSERT or - UPDATE a row in the view will be rejected if the new row - doesn't have classification = 'U', but the film - kind will not be checked. + This will create a view based on the comedies view, showing + only films with kind = 'Comedy' and + classification = 'U'. Any attempt to INSERT or + UPDATE a row in the view will be rejected if the new row + doesn't have classification = 'U', but the film + kind will not be checked. - Create a view with CASCADED CHECK OPTION: + Create a view with CASCADED CHECK OPTION: CREATE VIEW pg_comedies AS @@ -437,8 +442,8 @@ CREATE VIEW pg_comedies AS WHERE classification = 'PG' WITH CASCADED CHECK OPTION; - This will create a view that checks both the kind and - classification of new rows. + This will create a view that checks both the kind and + classification of new rows. @@ -454,10 +459,10 @@ CREATE VIEW comedies AS FROM films f WHERE f.kind = 'Comedy'; - This view will support INSERT, UPDATE and - DELETE. All the columns from the films table will - be updatable, whereas the computed columns country and - avg_rating will be read-only. + This view will support INSERT, UPDATE and + DELETE. All the columns from the films table will + be updatable, whereas the computed columns country and + avg_rating will be read-only. @@ -466,10 +471,10 @@ CREATE VIEW comedies AS CREATE RECURSIVE VIEW public.nums_1_100 (n) AS VALUES (1) UNION ALL - SELECT n+1 FROM nums_1_100 WHERE n < 100; + SELECT n+1 FROM nums_1_100 WHERE n < 100; Notice that although the recursive view's name is schema-qualified in this - CREATE, its internal self-reference is not schema-qualified. + CREATE, its internal self-reference is not schema-qualified. This is because the implicitly-created CTE's name cannot be schema-qualified. @@ -482,7 +487,7 @@ UNION ALL CREATE OR REPLACE VIEW is a PostgreSQL language extension. So is the concept of a temporary view. - The WITH ( ... ) clause is an extension as well. + The WITH ( ... ) clause is an extension as well. @@ -490,9 +495,9 @@ UNION ALL See Also - - - + + + diff --git a/doc/src/sgml/ref/createdb.sgml b/doc/src/sgml/ref/createdb.sgml index 9fc4c16a81..2658efeb1a 100644 --- a/doc/src/sgml/ref/createdb.sgml +++ b/doc/src/sgml/ref/createdb.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/createdb.sgml PostgreSQL documentation --> - + createdb @@ -30,7 +30,7 @@ PostgreSQL documentation - + Description @@ -48,7 +48,7 @@ PostgreSQL documentation createdb is a wrapper around the - SQL command . + SQL command . There is no effective difference between creating databases via this utility and via other methods for accessing the server. @@ -86,8 +86,8 @@ PostgreSQL documentation - - + + Specifies the default tablespace for the database. (This name @@ -97,8 +97,8 @@ PostgreSQL documentation - - + + Echo the commands that createdb generates @@ -108,21 +108,21 @@ PostgreSQL documentation - - + + Specifies the character encoding scheme to be used in this database. The character sets supported by the PostgreSQL server are described in - . + . - - + + Specifies the locale to be used in this database. This is equivalent @@ -132,7 +132,7 @@ PostgreSQL documentation - + Specifies the LC_COLLATE setting to be used in this database. @@ -141,7 +141,7 @@ PostgreSQL documentation - + Specifies the LC_CTYPE setting to be used in this database. @@ -150,8 +150,8 @@ PostgreSQL documentation - - + + Specifies the database user who will own the new database. @@ -161,8 +161,8 @@ PostgreSQL documentation - - + + Specifies the template database from which to build this @@ -172,8 +172,8 @@ PostgreSQL documentation - - + + Print the createdb version and exit. @@ -182,8 +182,8 @@ PostgreSQL documentation - - + + Show help about createdb command line @@ -199,7 +199,7 @@ PostgreSQL documentation The options , , , , and correspond to options of the underlying - SQL command ; see there for more information + SQL command ; see there for more information about them. @@ -209,8 +209,8 @@ PostgreSQL documentation - - + + Specifies the host name of the machine on which the @@ -221,8 +221,8 @@ PostgreSQL documentation - - + + Specifies the TCP port or the local Unix domain socket file @@ -232,8 +232,8 @@ PostgreSQL documentation - - + + User name to connect as. @@ -242,8 +242,8 @@ PostgreSQL documentation - - + + Never issue a password prompt. If the server requires @@ -257,8 +257,8 @@ PostgreSQL documentation - - + + Force createdb to prompt for a @@ -271,14 +271,14 @@ PostgreSQL documentation for a password if the server demands password authentication. However, createdb will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. - + Specifies the name of the database to connect to when creating the @@ -325,9 +325,9 @@ PostgreSQL documentation - This utility, like most other PostgreSQL utilities, - also uses the environment variables supported by libpq - (see ). + This utility, like most other PostgreSQL utilities, + also uses the environment variables supported by libpq + (see ). @@ -337,8 +337,8 @@ PostgreSQL documentation Diagnostics - In case of difficulty, see - and for + In case of difficulty, see + and for discussions of potential problems and error messages. The database server must be running at the targeted host. Also, any default connection settings and environment @@ -362,7 +362,7 @@ PostgreSQL documentation To create the database demo using the - server on host eden, port 5000, using the + server on host eden, port 5000, using the template0 template database, here is the command-line command and the underlying SQL command: @@ -376,8 +376,8 @@ PostgreSQL documentation See Also - - + + diff --git a/doc/src/sgml/ref/createuser.sgml b/doc/src/sgml/ref/createuser.sgml index fda77976ff..22ee99f2cc 100644 --- a/doc/src/sgml/ref/createuser.sgml +++ b/doc/src/sgml/ref/createuser.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/createuser.sgml PostgreSQL documentation --> - + createuser @@ -34,22 +34,22 @@ PostgreSQL documentation createuser creates a new PostgreSQL user (or more precisely, a role). - Only superusers and users with CREATEROLE privilege can create + Only superusers and users with CREATEROLE privilege can create new users, so createuser must be invoked by someone who can connect as a superuser or a user with - CREATEROLE privilege. + CREATEROLE privilege. If you wish to create a new superuser, you must connect as a - superuser, not merely with CREATEROLE privilege. + superuser, not merely with CREATEROLE privilege. Being a superuser implies the ability to bypass all access permission checks within the database, so superuserdom should not be granted lightly. createuser is a wrapper around the - SQL command . + SQL command . There is no effective difference between creating users via this utility and via other methods for accessing the server. @@ -61,7 +61,7 @@ PostgreSQL documentation Options - createuser accepts the following command-line arguments: + createuser accepts the following command-line arguments: @@ -77,8 +77,8 @@ PostgreSQL documentation - - + + Set a maximum number of connections for the new user. @@ -88,8 +88,8 @@ PostgreSQL documentation - - + + The new user will be allowed to create databases. @@ -98,8 +98,8 @@ PostgreSQL documentation - - + + The new user will not be allowed to create databases. This is the @@ -109,8 +109,8 @@ PostgreSQL documentation - - + + Echo the commands that createuser generates @@ -120,8 +120,8 @@ PostgreSQL documentation - - + + This option is obsolete but still accepted for backward @@ -131,21 +131,21 @@ PostgreSQL documentation - - + + Indicates role to which this role will be added immediately as a new member. Multiple roles to which this role will be added as a member can be specified by writing multiple - switches. - - + + The new role will automatically inherit privileges of roles @@ -156,8 +156,8 @@ PostgreSQL documentation - - + + The new role will not automatically inherit privileges of roles @@ -167,7 +167,7 @@ PostgreSQL documentation - + Prompt for the user name if none is specified on the command line, and @@ -181,8 +181,8 @@ PostgreSQL documentation - - + + The new user will be allowed to log in (that is, the user name @@ -193,8 +193,8 @@ PostgreSQL documentation - - + + The new user will not be allowed to log in. @@ -205,8 +205,8 @@ PostgreSQL documentation - - + + If given, createuser will issue a prompt for @@ -217,19 +217,19 @@ PostgreSQL documentation - - + + The new user will be allowed to create new roles (that is, - this user will have CREATEROLE privilege). + this user will have CREATEROLE privilege). - - + + The new user will not be allowed to create new roles. This is the @@ -239,8 +239,8 @@ PostgreSQL documentation - - + + The new user will be a superuser. @@ -249,8 +249,8 @@ PostgreSQL documentation - - + + The new user will not be a superuser. This is the default. @@ -259,8 +259,8 @@ PostgreSQL documentation - - + + Print the createuser version and exit. @@ -269,30 +269,30 @@ PostgreSQL documentation - + The new user will have the REPLICATION privilege, which is described more fully in the documentation for . + linkend="sql-createrole"/>. - + The new user will not have the REPLICATION privilege, which is described more fully in the documentation for . + linkend="sql-createrole"/>. - - + + Show help about createuser command line @@ -310,8 +310,8 @@ PostgreSQL documentation - - + + Specifies the host name of the machine on which the @@ -323,8 +323,8 @@ PostgreSQL documentation - - + + Specifies the TCP port or local Unix domain socket file @@ -335,8 +335,8 @@ PostgreSQL documentation - - + + User name to connect as (not the user name to create). @@ -345,8 +345,8 @@ PostgreSQL documentation - - + + Never issue a password prompt. If the server requires @@ -360,8 +360,8 @@ PostgreSQL documentation - - + + Force createuser to prompt for a @@ -375,7 +375,7 @@ PostgreSQL documentation for a password if the server demands password authentication. However, createuser will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. @@ -403,9 +403,9 @@ PostgreSQL documentation - This utility, like most other PostgreSQL utilities, - also uses the environment variables supported by libpq - (see ). + This utility, like most other PostgreSQL utilities, + also uses the environment variables supported by libpq + (see ). @@ -415,8 +415,8 @@ PostgreSQL documentation Diagnostics - In case of difficulty, see - and for + In case of difficulty, see + and for discussions of potential problems and error messages. The database server must be running at the targeted host. Also, any default connection settings and environment @@ -451,7 +451,7 @@ PostgreSQL documentation To create the same user joe using the - server on host eden, port 5000, with attributes explicitly specified, + server on host eden, port 5000, with attributes explicitly specified, taking a look at the underlying command: $ createuser -h eden -p 5000 -S -D -R -e joe @@ -479,8 +479,8 @@ PostgreSQL documentation See Also - - + + diff --git a/doc/src/sgml/ref/deallocate.sgml b/doc/src/sgml/ref/deallocate.sgml index 394b125f52..3875e5069e 100644 --- a/doc/src/sgml/ref/deallocate.sgml +++ b/doc/src/sgml/ref/deallocate.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/deallocate.sgml PostgreSQL documentation --> - + DEALLOCATE @@ -41,7 +41,7 @@ DEALLOCATE [ PREPARE ] { name | ALL For more information on prepared statements, see . + linkend="sql-prepare"/>. @@ -91,8 +91,8 @@ DEALLOCATE [ PREPARE ] { name | ALL See Also - - + + diff --git a/doc/src/sgml/ref/declare.sgml b/doc/src/sgml/ref/declare.sgml index 5cb85cc568..34ca9df243 100644 --- a/doc/src/sgml/ref/declare.sgml +++ b/doc/src/sgml/ref/declare.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/declare.sgml PostgreSQL documentation --> - + DECLARE @@ -39,15 +39,15 @@ DECLARE name [ BINARY ] [ INSENSITI can be used to retrieve a small number of rows at a time out of a larger query. After the cursor is created, rows are fetched from it using - . + . This page describes usage of cursors at the SQL command level. - If you are trying to use cursors inside a PL/pgSQL + If you are trying to use cursors inside a PL/pgSQL function, the rules are different — - see . + see . @@ -100,7 +100,7 @@ DECLARE name [ BINARY ] [ INSENSITI used to retrieve rows in a nonsequential fashion. The default is to allow scrolling in some cases; this is not the same as specifying SCROLL. See for details. + endterm="sql-declare-notes-title"/> for details. @@ -124,8 +124,8 @@ DECLARE name [ BINARY ] [ INSENSITI query - A or - command + A or + command which will provide the rows to be returned by the cursor. @@ -144,13 +144,13 @@ DECLARE name [ BINARY ] [ INSENSITI Normal cursors return data in text format, the same as a - SELECT would produce. The BINARY option + SELECT would produce. The BINARY option specifies that the cursor should return data in binary format. This reduces conversion effort for both the server and client, at the cost of more programmer effort to deal with platform-dependent binary data formats. As an example, if a query returns a value of one from an integer column, - you would get a string of 1 with a default cursor, + you would get a string of 1 with a default cursor, whereas with a binary cursor you would get a 4-byte field containing the internal representation of the value (in big-endian byte order). @@ -165,8 +165,8 @@ DECLARE name [ BINARY ] [ INSENSITI - When the client application uses the extended query protocol - to issue a FETCH command, the Bind protocol message + When the client application uses the extended query protocol + to issue a FETCH command, the Bind protocol message specifies whether data is to be retrieved in text or binary format. This choice overrides the way that the cursor is defined. The concept of a binary cursor as such is thus obsolete when using extended query @@ -177,15 +177,15 @@ DECLARE name [ BINARY ] [ INSENSITI Unless WITH HOLD is specified, the cursor created by this command can only be used within the current - transaction. Thus, DECLARE without WITH + transaction. Thus, DECLARE without WITH HOLD is useless outside a transaction block: the cursor would survive only to the completion of the statement. Therefore PostgreSQL reports an error if such a command is used outside a transaction block. Use - and - - (or ) + and + + (or ) to define a transaction block. @@ -204,25 +204,25 @@ DECLARE name [ BINARY ] [ INSENSITI WITH HOLD may not be specified when the query - includes FOR UPDATE or FOR SHARE. + includes FOR UPDATE or FOR SHARE. - The SCROLL option should be specified when defining a + The SCROLL option should be specified when defining a cursor that will be used to fetch backwards. This is required by the SQL standard. However, for compatibility with earlier versions, PostgreSQL will allow - backward fetches without SCROLL, if the cursor's query + backward fetches without SCROLL, if the cursor's query plan is simple enough that no extra overhead is needed to support it. However, application developers are advised not to rely on using backward fetches from a cursor that has not been created - with SCROLL. If NO SCROLL is + with SCROLL. If NO SCROLL is specified, then backward fetches are disallowed in any case. Backward fetches are also disallowed when the query - includes FOR UPDATE or FOR SHARE; therefore + includes FOR UPDATE or FOR SHARE; therefore SCROLL may not be specified in this case. @@ -230,7 +230,7 @@ DECLARE name [ BINARY ] [ INSENSITI Scrollable and WITH HOLD cursors may give unexpected results if they invoke any volatile functions (see ). When a previously fetched row is + linkend="xfunc-volatility"/>). When a previously fetched row is re-fetched, the functions might be re-executed, perhaps leading to results different from the first time. One workaround for such cases is to declare the cursor WITH HOLD and commit the @@ -241,42 +241,43 @@ DECLARE name [ BINARY ] [ INSENSITI - If the cursor's query includes FOR UPDATE or FOR - SHARE, then returned rows are locked at the time they are first + If the cursor's query includes FOR UPDATE or FOR + SHARE, then returned rows are locked at the time they are first fetched, in the same way as for a regular - command with + command with these options. In addition, the returned rows will be the most up-to-date versions; therefore these options provide the equivalent of what the SQL standard - calls a sensitive cursor. (Specifying INSENSITIVE - together with FOR UPDATE or FOR SHARE is an error.) + calls a sensitive cursor. (Specifying INSENSITIVE + together with FOR UPDATE or FOR SHARE is an error.) - It is generally recommended to use FOR UPDATE if the cursor - is intended to be used with UPDATE ... WHERE CURRENT OF or - DELETE ... WHERE CURRENT OF. Using FOR UPDATE + It is generally recommended to use FOR UPDATE if the cursor + is intended to be used with UPDATE ... WHERE CURRENT OF or + DELETE ... WHERE CURRENT OF. Using FOR UPDATE prevents other sessions from changing the rows between the time they are - fetched and the time they are updated. Without FOR UPDATE, - a subsequent WHERE CURRENT OF command will have no effect if + fetched and the time they are updated. Without FOR UPDATE, + a subsequent WHERE CURRENT OF command will have no effect if the row was changed since the cursor was created. - Another reason to use FOR UPDATE is that without it, a - subsequent WHERE CURRENT OF might fail if the cursor query + Another reason to use FOR UPDATE is that without it, a + subsequent WHERE CURRENT OF might fail if the cursor query does not meet the SQL standard's rules for being simply - updatable (in particular, the cursor must reference just one table - and not use grouping or ORDER BY). Cursors + updatable (in particular, the cursor must reference just one table + and not use grouping or ORDER BY). Cursors that are not simply updatable might work, or might not, depending on plan choice details; so in the worst case, an application might work in testing - and then fail in production. + and then fail in production. If FOR UPDATE is + specified, the cursor is guaranteed to be updatable. - The main reason not to use FOR UPDATE with WHERE - CURRENT OF is if you need the cursor to be scrollable, or to be + The main reason not to use FOR UPDATE with WHERE + CURRENT OF is if you need the cursor to be scrollable, or to be insensitive to the subsequent updates (that is, continue to show the old data). If this is a requirement, pay close heed to the caveats shown above. @@ -309,7 +310,7 @@ DECLARE name [ BINARY ] [ INSENSITI DECLARE liahona CURSOR FOR SELECT * FROM films; - See for more + See for more examples of cursor usage. @@ -321,13 +322,13 @@ DECLARE liahona CURSOR FOR SELECT * FROM films; The SQL standard says that it is implementation-dependent whether cursors are sensitive to concurrent updates of the underlying data by default. In PostgreSQL, cursors are insensitive by default, - and can be made sensitive by specifying FOR UPDATE. Other + and can be made sensitive by specifying FOR UPDATE. Other products may work differently. The SQL standard allows cursors only in embedded - SQL and in modules. PostgreSQL + SQL and in modules. PostgreSQL permits cursors to be used interactively. @@ -341,9 +342,9 @@ DECLARE liahona CURSOR FOR SELECT * FROM films; See Also - - - + + + diff --git a/doc/src/sgml/ref/delete.sgml b/doc/src/sgml/ref/delete.sgml index 20417a1391..df8cea48cf 100644 --- a/doc/src/sgml/ref/delete.sgml +++ b/doc/src/sgml/ref/delete.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/delete.sgml PostgreSQL documentation --> - + DELETE @@ -22,9 +22,9 @@ PostgreSQL documentation [ WITH [ RECURSIVE ] with_query [, ...] ] -DELETE FROM [ ONLY ] table_name [ * ] [ [ AS ] alias ] - [ USING using_list ] - [ WHERE condition | WHERE CURRENT OF cursor_name ] +DELETE FROM [ ONLY ] table_name [ * ] [ [ AS ] alias ] + [ USING using_list ] + [ WHERE condition | WHERE CURRENT OF cursor_name ] [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ] @@ -41,7 +41,7 @@ DELETE FROM [ ONLY ] table_name [ * - provides a + provides a faster mechanism to remove all rows from a table. @@ -55,12 +55,12 @@ DELETE FROM [ ONLY ] table_name [ * - The optional RETURNING clause causes DELETE + The optional RETURNING clause causes DELETE to compute and return value(s) based on each row actually deleted. Any expression using the table's columns, and/or columns of other tables mentioned in USING, can be computed. - The syntax of the RETURNING list is identical to that of the - output list of SELECT. + The syntax of the RETURNING list is identical to that of the + output list of SELECT. @@ -81,8 +81,8 @@ DELETE FROM [ ONLY ] table_name [ * The WITH clause allows you to specify one or more - subqueries that can be referenced by name in the DELETE - query. See and + subqueries that can be referenced by name in the DELETE + query. See and for details. @@ -93,11 +93,11 @@ DELETE FROM [ ONLY ] table_name [ * The name (optionally schema-qualified) of the table to delete rows - from. If ONLY is specified before the table name, + from. If ONLY is specified before the table name, matching rows are deleted from the named table only. If - ONLY is not specified, matching rows are also deleted + ONLY is not specified, matching rows are also deleted from any tables inheriting from the named table. Optionally, - * can be specified after the table name to explicitly + * can be specified after the table name to explicitly indicate that descendant tables are included. @@ -109,24 +109,24 @@ DELETE FROM [ ONLY ] table_name [ * A substitute name for the target table. When an alias is provided, it completely hides the actual name of the table. For - example, given DELETE FROM foo AS f, the remainder + example, given DELETE FROM foo AS f, the remainder of the DELETE statement must refer to this - table as f not foo. + table as f not foo. - using_list + using_list A list of table expressions, allowing columns from other tables - to appear in the WHERE condition. This is similar + to appear in the WHERE condition. This is similar to the list of tables that can be specified in the of a + linkend="sql-from" endterm="sql-from-title"/> of a SELECT statement; for example, an alias for the table name can be specified. Do not repeat the target table - in the using_list, + in the using_list, unless you wish to set up a self-join. @@ -137,44 +137,44 @@ DELETE FROM [ ONLY ] table_name [ * An expression that returns a value of type boolean. - Only rows for which this expression returns true + Only rows for which this expression returns true will be deleted. - cursor_name + cursor_name - The name of the cursor to use in a WHERE CURRENT OF + The name of the cursor to use in a WHERE CURRENT OF condition. The row to be deleted is the one most recently fetched from this cursor. The cursor must be a non-grouping - query on the DELETE's target table. - Note that WHERE CURRENT OF cannot be + query on the DELETE's target table. + Note that WHERE CURRENT OF cannot be specified together with a Boolean condition. See - + for more information about using cursors with - WHERE CURRENT OF. + WHERE CURRENT OF. - output_expression + output_expression - An expression to be computed and returned by the DELETE + An expression to be computed and returned by the DELETE command after each row is deleted. The expression can use any - column names of the table named by table_name - or table(s) listed in USING. - Write * to return all columns. + column names of the table named by table_name + or table(s) listed in USING. + Write * to return all columns. - output_name + output_name A name to use for a returned column. @@ -188,7 +188,7 @@ DELETE FROM [ ONLY ] table_name [ * Outputs - On successful completion, a DELETE command returns a command + On successful completion, a DELETE command returns a command tag of the form DELETE count @@ -197,16 +197,16 @@ DELETE count of rows deleted. Note that the number may be less than the number of rows that matched the condition when deletes were - suppressed by a BEFORE DELETE trigger. If BEFORE DELETE trigger. If count is 0, no rows were deleted by the query (this is not considered an error). - If the DELETE command contains a RETURNING - clause, the result will be similar to that of a SELECT + If the DELETE command contains a RETURNING + clause, the result will be similar to that of a SELECT statement containing the columns and values defined in the - RETURNING list, computed over the row(s) deleted by the + RETURNING list, computed over the row(s) deleted by the command. @@ -216,16 +216,16 @@ DELETE count PostgreSQL lets you reference columns of - other tables in the WHERE condition by specifying the + other tables in the WHERE condition by specifying the other tables in the USING clause. For example, to delete all films produced by a given producer, one can do: DELETE FROM films USING producers WHERE producer_id = producers.id AND producers.name = 'foo'; - What is essentially happening here is a join between films - and producers, with all successfully joined - films rows being marked for deletion. + What is essentially happening here is a join between films + and producers, with all successfully joined + films rows being marked for deletion. This syntax is not standard. A more standard way to do it is: DELETE FROM films @@ -261,8 +261,8 @@ DELETE FROM tasks WHERE status = 'DONE' RETURNING *; - Delete the row of tasks on which the cursor - c_tasks is currently positioned: + Delete the row of tasks on which the cursor + c_tasks is currently positioned: DELETE FROM tasks WHERE CURRENT OF c_tasks; @@ -273,9 +273,9 @@ DELETE FROM tasks WHERE CURRENT OF c_tasks; This command conforms to the SQL standard, except - that the USING and RETURNING clauses + that the USING and RETURNING clauses are PostgreSQL extensions, as is the ability - to use WITH with DELETE. + to use WITH with DELETE. @@ -283,7 +283,7 @@ DELETE FROM tasks WHERE CURRENT OF c_tasks; See Also - + diff --git a/doc/src/sgml/ref/discard.sgml b/doc/src/sgml/ref/discard.sgml index e859bf7bab..6b909b7232 100644 --- a/doc/src/sgml/ref/discard.sgml +++ b/doc/src/sgml/ref/discard.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/discard.sgml PostgreSQL documentation --> - + DISCARD @@ -29,10 +29,10 @@ DISCARD { ALL | PLANS | SEQUENCES | TEMPORARY | TEMP } Description - DISCARD releases internal resources associated with a + DISCARD releases internal resources associated with a database session. This command is useful for partially or fully resetting the session's state. There are several subcommands to - release different types of resources; the DISCARD ALL + release different types of resources; the DISCARD ALL variant subsumes all the others, and also resets additional state. @@ -57,10 +57,10 @@ DISCARD { ALL | PLANS | SEQUENCES | TEMPORARY | TEMP } Discards all cached sequence-related state, - including currval()/lastval() + including currval()/lastval() information and any preallocated sequence values that have not - yet been returned by nextval(). - (See for a description of + yet been returned by nextval(). + (See for a description of preallocated sequence values.) @@ -104,7 +104,7 @@ DISCARD TEMP; Notes - DISCARD ALL cannot be executed inside a transaction block. + DISCARD ALL cannot be executed inside a transaction block. diff --git a/doc/src/sgml/ref/do.sgml b/doc/src/sgml/ref/do.sgml index ed5e588ee7..a3a4877e80 100644 --- a/doc/src/sgml/ref/do.sgml +++ b/doc/src/sgml/ref/do.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/do.sgml PostgreSQL documentation --> - + DO @@ -25,7 +25,7 @@ PostgreSQL documentation -DO [ LANGUAGE lang_name ] code +DO [ LANGUAGE lang_name ] code @@ -39,12 +39,12 @@ DO [ LANGUAGE lang_name ] The code block is treated as though it were the body of a function - with no parameters, returning void. It is parsed and + with no parameters, returning void. It is parsed and executed a single time. - The optional LANGUAGE clause can be written either + The optional LANGUAGE clause can be written either before or after the code block. @@ -54,22 +54,22 @@ DO [ LANGUAGE lang_name ] - code + code The procedural language code to be executed. This must be specified - as a string literal, just as in CREATE FUNCTION. + as a string literal, just as in CREATE FUNCTION. Use of a dollar-quoted literal is recommended. - lang_name + lang_name The name of the procedural language the code is written in. - If omitted, the default is plpgsql. + If omitted, the default is plpgsql. @@ -81,23 +81,30 @@ DO [ LANGUAGE lang_name ] The procedural language to be used must already have been installed - into the current database by means of CREATE LANGUAGE. - plpgsql is installed by default, but other languages are not. + into the current database by means of CREATE EXTENSION. + plpgsql is installed by default, but other languages are not. - The user must have USAGE privilege for the procedural + The user must have USAGE privilege for the procedural language, or must be a superuser if the language is untrusted. This is the same privilege requirement as for creating a function in the language. + + + If DO is executed in a transaction block, then the + procedure code cannot execute transaction control statements. Transaction + control statements are only allowed if DO is executed in + its own transaction. + Examples - Grant all privileges on all views in schema public to - role webuser: + Grant all privileges on all views in schema public to + role webuser: DO $$DECLARE r record; BEGIN @@ -122,7 +129,7 @@ END$$; See Also - + diff --git a/doc/src/sgml/ref/drop_access_method.sgml b/doc/src/sgml/ref/drop_access_method.sgml index 8aa9197fe4..a908a64b74 100644 --- a/doc/src/sgml/ref/drop_access_method.sgml +++ b/doc/src/sgml/ref/drop_access_method.sgml @@ -64,7 +64,7 @@ DROP ACCESS METHOD [ IF EXISTS ] name). + (see ). @@ -85,7 +85,7 @@ DROP ACCESS METHOD [ IF EXISTS ] nameExamples - Drop the access method heptree: + Drop the access method heptree: DROP ACCESS METHOD heptree; @@ -96,7 +96,7 @@ DROP ACCESS METHOD heptree; DROP ACCESS METHOD is a - PostgreSQL extension. + PostgreSQL extension. @@ -104,7 +104,7 @@ DROP ACCESS METHOD heptree; See Also - + diff --git a/doc/src/sgml/ref/drop_aggregate.sgml b/doc/src/sgml/ref/drop_aggregate.sgml index dde1ea2444..ba74f4f5eb 100644 --- a/doc/src/sgml/ref/drop_aggregate.sgml +++ b/doc/src/sgml/ref/drop_aggregate.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_aggregate.sgml PostgreSQL documentation --> - + DROP AGGREGATE @@ -70,8 +70,8 @@ DROP AGGREGATE [ IF EXISTS ] name ( aggr - The mode of an argument: IN or VARIADIC. - If omitted, the default is IN. + The mode of an argument: IN or VARIADIC. + If omitted, the default is IN. @@ -94,10 +94,10 @@ DROP AGGREGATE [ IF EXISTS ] name ( aggr An input data type on which the aggregate function operates. - To reference a zero-argument aggregate function, write * + To reference a zero-argument aggregate function, write * in place of the list of argument specifications. To reference an ordered-set aggregate function, write - ORDER BY between the direct and aggregated argument + ORDER BY between the direct and aggregated argument specifications. @@ -110,7 +110,7 @@ DROP AGGREGATE [ IF EXISTS ] name ( aggr Automatically drop objects that depend on the aggregate function (such as views using it), and in turn all objects that depend on those objects - (see ). + (see ). @@ -132,7 +132,7 @@ DROP AGGREGATE [ IF EXISTS ] name ( aggr Alternative syntaxes for referencing ordered-set aggregates - are described under . + are described under . @@ -148,7 +148,7 @@ DROP AGGREGATE myavg(integer); - To remove the hypothetical-set aggregate function myrank, + To remove the hypothetical-set aggregate function myrank, which takes an arbitrary list of ordering columns and a matching list of direct arguments: @@ -176,8 +176,8 @@ DROP AGGREGATE myavg(integer), myavg(bigint); See Also - - + + diff --git a/doc/src/sgml/ref/drop_cast.sgml b/doc/src/sgml/ref/drop_cast.sgml index dae3a39fce..9f42867e12 100644 --- a/doc/src/sgml/ref/drop_cast.sgml +++ b/doc/src/sgml/ref/drop_cast.sgml @@ -1,6 +1,6 @@ - + DROP CAST @@ -107,7 +107,7 @@ DROP CAST (text AS int); See Also - + diff --git a/doc/src/sgml/ref/drop_collation.sgml b/doc/src/sgml/ref/drop_collation.sgml index 2177d8e5d6..89b975aac3 100644 --- a/doc/src/sgml/ref/drop_collation.sgml +++ b/doc/src/sgml/ref/drop_collation.sgml @@ -1,6 +1,6 @@ - + DROP COLLATION @@ -62,7 +62,7 @@ DROP COLLATION [ IF EXISTS ] name [ CASCADE | RESTRIC Automatically drop objects that depend on the collation, and in turn all objects that depend on those objects - (see ). + (see ). @@ -83,7 +83,7 @@ DROP COLLATION [ IF EXISTS ] name [ CASCADE | RESTRIC Examples - To drop the collation named german: + To drop the collation named german: DROP COLLATION german; @@ -95,7 +95,7 @@ DROP COLLATION german; The DROP COLLATION command conforms to the SQL standard, apart from the IF - EXISTS option, which is a PostgreSQL extension. + EXISTS option, which is a PostgreSQL extension. @@ -103,8 +103,8 @@ DROP COLLATION german; See Also - - + + diff --git a/doc/src/sgml/ref/drop_conversion.sgml b/doc/src/sgml/ref/drop_conversion.sgml index 1a33b3dcc5..131e3cbc0b 100644 --- a/doc/src/sgml/ref/drop_conversion.sgml +++ b/doc/src/sgml/ref/drop_conversion.sgml @@ -1,6 +1,6 @@ - + DROP CONVERSION @@ -74,7 +74,7 @@ DROP CONVERSION [ IF EXISTS ] name [ CASCADE | RESTRI Examples - To drop the conversion named myname: + To drop the conversion named myname: DROP CONVERSION myname; @@ -96,8 +96,8 @@ DROP CONVERSION myname; See Also - - + + diff --git a/doc/src/sgml/ref/drop_database.sgml b/doc/src/sgml/ref/drop_database.sgml index 740aa31995..3ac06c984a 100644 --- a/doc/src/sgml/ref/drop_database.sgml +++ b/doc/src/sgml/ref/drop_database.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_database.sgml PostgreSQL documentation --> - + DROP DATABASE @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP DATABASE [ IF EXISTS ] name +DROP DATABASE [ IF EXISTS ] name @@ -57,7 +57,7 @@ DROP DATABASE [ IF EXISTS ] name - name + name The name of the database to remove. @@ -71,14 +71,14 @@ DROP DATABASE [ IF EXISTS ] name Notes - DROP DATABASE cannot be executed inside a transaction + DROP DATABASE cannot be executed inside a transaction block. This command cannot be executed while connected to the target database. Thus, it might be more convenient to use the program - instead, + instead, which is a wrapper around this command. @@ -95,7 +95,7 @@ DROP DATABASE [ IF EXISTS ] name See Also - + diff --git a/doc/src/sgml/ref/drop_domain.sgml b/doc/src/sgml/ref/drop_domain.sgml index e14795e6a3..b18faf3917 100644 --- a/doc/src/sgml/ref/drop_domain.sgml +++ b/doc/src/sgml/ref/drop_domain.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_domain.sgml PostgreSQL documentation --> - + DROP DOMAIN @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP DOMAIN [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +DROP DOMAIN [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] @@ -49,7 +49,7 @@ DROP DOMAIN [ IF EXISTS ] name [, . - name + name The name (optionally schema-qualified) of an existing domain. @@ -58,19 +58,19 @@ DROP DOMAIN [ IF EXISTS ] name [, . - CASCADE + CASCADE Automatically drop objects that depend on the domain (such as table columns), and in turn all objects that depend on those objects - (see ). + (see ). - RESTRICT + RESTRICT Refuse to drop the domain if any objects depend on it. This is @@ -81,7 +81,7 @@ DROP DOMAIN [ IF EXISTS ] name [, . - + Examples @@ -92,22 +92,22 @@ DROP DOMAIN box; - + Compatibility This command conforms to the SQL standard, except for the - IF EXISTS option, which is a PostgreSQL + IF EXISTS option, which is a PostgreSQL extension. - + See Also - - + + diff --git a/doc/src/sgml/ref/drop_event_trigger.sgml b/doc/src/sgml/ref/drop_event_trigger.sgml index 6e3ee22d7b..137884cc83 100644 --- a/doc/src/sgml/ref/drop_event_trigger.sgml +++ b/doc/src/sgml/ref/drop_event_trigger.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_event_trigger.sgml PostgreSQL documentation --> - + DROP EVENT TRIGGER @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP EVENT TRIGGER [ IF EXISTS ] name [ CASCADE | RESTRICT ] +DROP EVENT TRIGGER [ IF EXISTS ] name [ CASCADE | RESTRICT ] @@ -51,7 +51,7 @@ DROP EVENT TRIGGER [ IF EXISTS ] name - name + name The name of the event trigger to remove. @@ -65,7 +65,7 @@ DROP EVENT TRIGGER [ IF EXISTS ] name Automatically drop objects that depend on the trigger, and in turn all objects that depend on those objects - (see ). + (see ). @@ -107,8 +107,8 @@ DROP EVENT TRIGGER snitch; See Also - - + + diff --git a/doc/src/sgml/ref/drop_extension.sgml b/doc/src/sgml/ref/drop_extension.sgml index 7438a08bb3..5e507dec92 100644 --- a/doc/src/sgml/ref/drop_extension.sgml +++ b/doc/src/sgml/ref/drop_extension.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_extension.sgml PostgreSQL documentation --> - + DROP EXTENSION @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP EXTENSION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +DROP EXTENSION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] @@ -54,7 +54,7 @@ DROP EXTENSION [ IF EXISTS ] name [ - name + name The name of an installed extension. @@ -68,7 +68,7 @@ DROP EXTENSION [ IF EXISTS ] name [ Automatically drop objects that depend on the extension, and in turn all objects that depend on those objects - (see ). + (see ). @@ -79,7 +79,7 @@ DROP EXTENSION [ IF EXISTS ] name [ Refuse to drop the extension if any objects depend on it (other than its own member objects and other extensions listed in the same - DROP command). This is the default. + DROP command). This is the default. @@ -97,7 +97,7 @@ DROP EXTENSION hstore; This command will fail if any of hstore's objects are in use in the database, for example if any tables have columns - of the hstore type. Add the CASCADE option to + of the hstore type. Add the CASCADE option to forcibly remove those dependent objects as well. @@ -106,7 +106,7 @@ DROP EXTENSION hstore; Compatibility - DROP EXTENSION is a PostgreSQL + DROP EXTENSION is a PostgreSQL extension. @@ -115,8 +115,8 @@ DROP EXTENSION hstore; See Also - - + + diff --git a/doc/src/sgml/ref/drop_foreign_data_wrapper.sgml b/doc/src/sgml/ref/drop_foreign_data_wrapper.sgml index 702cc021db..e53178b8c7 100644 --- a/doc/src/sgml/ref/drop_foreign_data_wrapper.sgml +++ b/doc/src/sgml/ref/drop_foreign_data_wrapper.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_foreign_data_wrapper.sgml PostgreSQL documentation --> - + DROP FOREIGN DATA WRAPPER @@ -65,7 +65,7 @@ DROP FOREIGN DATA WRAPPER [ IF EXISTS ] name). + (see ). @@ -86,7 +86,7 @@ DROP FOREIGN DATA WRAPPER [ IF EXISTS ] nameExamples - Drop the foreign-data wrapper dbi: + Drop the foreign-data wrapper dbi: DROP FOREIGN DATA WRAPPER dbi; @@ -97,8 +97,8 @@ DROP FOREIGN DATA WRAPPER dbi; DROP FOREIGN DATA WRAPPER conforms to ISO/IEC - 9075-9 (SQL/MED). The IF EXISTS clause is - a PostgreSQL extension. + 9075-9 (SQL/MED). The IF EXISTS clause is + a PostgreSQL extension. @@ -106,8 +106,8 @@ DROP FOREIGN DATA WRAPPER dbi; See Also - - + + diff --git a/doc/src/sgml/ref/drop_foreign_table.sgml b/doc/src/sgml/ref/drop_foreign_table.sgml index 5a2b235d4e..b29169e0d3 100644 --- a/doc/src/sgml/ref/drop_foreign_table.sgml +++ b/doc/src/sgml/ref/drop_foreign_table.sgml @@ -1,6 +1,6 @@ - + DROP FOREIGN TABLE @@ -18,7 +18,7 @@ -DROP FOREIGN TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +DROP FOREIGN TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] @@ -46,7 +46,7 @@ DROP FOREIGN TABLE [ IF EXISTS ] name - name + name The name (optionally schema-qualified) of the foreign table to drop. @@ -60,7 +60,7 @@ DROP FOREIGN TABLE [ IF EXISTS ] name Automatically drop objects that depend on the foreign table (such as views), and in turn all objects that depend on those objects - (see ). + (see ). @@ -95,7 +95,7 @@ DROP FOREIGN TABLE films, distributors; This command conforms to the ISO/IEC 9075-9 (SQL/MED), except that the standard only allows one foreign table to be dropped per command, and apart - from the IF EXISTS option, which is a PostgreSQL + from the IF EXISTS option, which is a PostgreSQL extension. @@ -104,8 +104,8 @@ DROP FOREIGN TABLE films, distributors; See Also - - + + diff --git a/doc/src/sgml/ref/drop_function.sgml b/doc/src/sgml/ref/drop_function.sgml index 0aa984528d..127fdfe419 100644 --- a/doc/src/sgml/ref/drop_function.sgml +++ b/doc/src/sgml/ref/drop_function.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_function.sgml PostgreSQL documentation --> - + DROP FUNCTION @@ -67,14 +67,14 @@ DROP FUNCTION [ IF EXISTS ] name [ - The mode of an argument: IN, OUT, - INOUT, or VARIADIC. - If omitted, the default is IN. + The mode of an argument: IN, OUT, + INOUT, or VARIADIC. + If omitted, the default is IN. Note that DROP FUNCTION does not actually pay - any attention to OUT arguments, since only the input + any attention to OUT arguments, since only the input arguments are needed to determine the function's identity. - So it is sufficient to list the IN, INOUT, - and VARIADIC arguments. + So it is sufficient to list the IN, INOUT, + and VARIADIC arguments. @@ -110,7 +110,7 @@ DROP FUNCTION [ IF EXISTS ] name [ Automatically drop objects that depend on the function (such as operators or triggers), and in turn all objects that depend on those objects - (see ). + (see ). @@ -127,7 +127,7 @@ DROP FUNCTION [ IF EXISTS ] name [ - + Examples @@ -159,7 +159,7 @@ DROP FUNCTION update_employee_salaries(); - + Compatibility @@ -183,8 +183,10 @@ DROP FUNCTION update_employee_salaries(); See Also - - + + + + diff --git a/doc/src/sgml/ref/drop_group.sgml b/doc/src/sgml/ref/drop_group.sgml index e601ff4172..47d4a72121 100644 --- a/doc/src/sgml/ref/drop_group.sgml +++ b/doc/src/sgml/ref/drop_group.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_group.sgml PostgreSQL documentation --> - + DROP GROUP @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP GROUP [ IF EXISTS ] name [, ...] +DROP GROUP [ IF EXISTS ] name [, ...] @@ -30,7 +30,7 @@ DROP GROUP [ IF EXISTS ] name [, .. DROP GROUP is now an alias for - . + . @@ -46,7 +46,7 @@ DROP GROUP [ IF EXISTS ] name [, .. See Also - + diff --git a/doc/src/sgml/ref/drop_index.sgml b/doc/src/sgml/ref/drop_index.sgml index 6fe108ded2..2a8ca5bf68 100644 --- a/doc/src/sgml/ref/drop_index.sgml +++ b/doc/src/sgml/ref/drop_index.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_index.sgml PostgreSQL documentation --> - + DROP INDEX @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP INDEX [ CONCURRENTLY ] [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +DROP INDEX [ CONCURRENTLY ] [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] @@ -44,19 +44,19 @@ DROP INDEX [ CONCURRENTLY ] [ IF EXISTS ] name Drop the index without locking out concurrent selects, inserts, updates, - and deletes on the index's table. A normal DROP INDEX + and deletes on the index's table. A normal DROP INDEX acquires exclusive lock on the table, blocking other accesses until the index drop can be completed. With this option, the command instead waits until conflicting transactions have completed. There are several caveats to be aware of when using this option. - Only one index name can be specified, and the CASCADE option - is not supported. (Thus, an index that supports a UNIQUE or - PRIMARY KEY constraint cannot be dropped this way.) - Also, regular DROP INDEX commands can be + Only one index name can be specified, and the CASCADE option + is not supported. (Thus, an index that supports a UNIQUE or + PRIMARY KEY constraint cannot be dropped this way.) + Also, regular DROP INDEX commands can be performed within a transaction block, but - DROP INDEX CONCURRENTLY cannot. + DROP INDEX CONCURRENTLY cannot. @@ -72,7 +72,7 @@ DROP INDEX [ CONCURRENTLY ] [ IF EXISTS ] name - name + name The name (optionally schema-qualified) of an index to remove. @@ -86,7 +86,7 @@ DROP INDEX [ CONCURRENTLY ] [ IF EXISTS ] name Automatically drop objects that depend on the index, and in turn all objects that depend on those objects - (see ). + (see ). @@ -128,7 +128,7 @@ DROP INDEX title_idx; See Also - + diff --git a/doc/src/sgml/ref/drop_language.sgml b/doc/src/sgml/ref/drop_language.sgml index f014a74d45..976f3a0ce1 100644 --- a/doc/src/sgml/ref/drop_language.sgml +++ b/doc/src/sgml/ref/drop_language.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_language.sgml PostgreSQL documentation --> - + DROP LANGUAGE @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP [ PROCEDURAL ] LANGUAGE [ IF EXISTS ] name [ CASCADE | RESTRICT ] +DROP [ PROCEDURAL ] LANGUAGE [ IF EXISTS ] name [ CASCADE | RESTRICT ] @@ -31,14 +31,14 @@ DROP [ PROCEDURAL ] LANGUAGE [ IF EXISTS ] name DROP LANGUAGE removes the definition of a previously registered procedural language. You must be a superuser - or the owner of the language to use DROP LANGUAGE. + or the owner of the language to use DROP LANGUAGE. As of PostgreSQL 9.1, most procedural - languages have been made into extensions, and should - therefore be removed with + languages have been made into extensions, and should + therefore be removed with not DROP LANGUAGE. @@ -60,7 +60,7 @@ DROP [ PROCEDURAL ] LANGUAGE [ IF EXISTS ] name - name + name The name of an existing procedural language. For backward @@ -76,7 +76,7 @@ DROP [ PROCEDURAL ] LANGUAGE [ IF EXISTS ] name). + (see ). @@ -118,8 +118,8 @@ DROP LANGUAGE plsample; See Also - - + + diff --git a/doc/src/sgml/ref/drop_materialized_view.sgml b/doc/src/sgml/ref/drop_materialized_view.sgml index 36ec33ceb0..c8f3bc5b0d 100644 --- a/doc/src/sgml/ref/drop_materialized_view.sgml +++ b/doc/src/sgml/ref/drop_materialized_view.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_materialized_view.sgml PostgreSQL documentation --> - + DROP MATERIALIZED VIEW @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP MATERIALIZED VIEW [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +DROP MATERIALIZED VIEW [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] @@ -50,7 +50,7 @@ DROP MATERIALIZED VIEW [ IF EXISTS ] name - name + name The name (optionally schema-qualified) of the materialized view to @@ -66,7 +66,7 @@ DROP MATERIALIZED VIEW [ IF EXISTS ] name). + (see ). @@ -107,9 +107,9 @@ DROP MATERIALIZED VIEW order_summary; See Also - - - + + + diff --git a/doc/src/sgml/ref/drop_opclass.sgml b/doc/src/sgml/ref/drop_opclass.sgml index 187a9a4d1f..4cb4cc35f7 100644 --- a/doc/src/sgml/ref/drop_opclass.sgml +++ b/doc/src/sgml/ref/drop_opclass.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_opclass.sgml PostgreSQL documentation --> - + DROP OPERATOR CLASS @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP OPERATOR CLASS [ IF EXISTS ] name USING index_method [ CASCADE | RESTRICT ] +DROP OPERATOR CLASS [ IF EXISTS ] name USING index_method [ CASCADE | RESTRICT ] @@ -37,7 +37,7 @@ DROP OPERATOR CLASS [ IF EXISTS ] nameDROP OPERATOR CLASS does not drop any of the operators or functions referenced by the class. If there are any indexes depending on the operator class, you will need to specify - CASCADE for the drop to complete. + CASCADE for the drop to complete. @@ -80,7 +80,7 @@ DROP OPERATOR CLASS [ IF EXISTS ] name Automatically drop objects that depend on the operator class (such as indexes), and in turn all objects that depend on those objects - (see ). + (see ). @@ -101,13 +101,13 @@ DROP OPERATOR CLASS [ IF EXISTS ] nameNotes - DROP OPERATOR CLASS will not drop the operator family + DROP OPERATOR CLASS will not drop the operator family containing the class, even if there is nothing else left in the family (in particular, in the case where the family was implicitly - created by CREATE OPERATOR CLASS). An empty operator + created by CREATE OPERATOR CLASS). An empty operator family is harmless, but for the sake of tidiness you might wish to - remove the family with DROP OPERATOR FAMILY; or perhaps - better, use DROP OPERATOR FAMILY in the first place. + remove the family with DROP OPERATOR FAMILY; or perhaps + better, use DROP OPERATOR FAMILY in the first place. @@ -122,7 +122,7 @@ DROP OPERATOR CLASS widget_ops USING btree; This command will not succeed if there are any existing indexes - that use the operator class. Add CASCADE to drop + that use the operator class. Add CASCADE to drop such indexes along with the operator class. @@ -140,9 +140,9 @@ DROP OPERATOR CLASS widget_ops USING btree; See Also - - - + + + diff --git a/doc/src/sgml/ref/drop_operator.sgml b/doc/src/sgml/ref/drop_operator.sgml index fc82c3e0e3..2dff050ecf 100644 --- a/doc/src/sgml/ref/drop_operator.sgml +++ b/doc/src/sgml/ref/drop_operator.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_operator.sgml PostgreSQL documentation --> - + DROP OPERATOR @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP OPERATOR [ IF EXISTS ] name ( { left_type | NONE } , { right_type | NONE } ) [, ...] [ CASCADE | RESTRICT ] +DROP OPERATOR [ IF EXISTS ] name ( { left_type | NONE } , { right_type | NONE } ) [, ...] [ CASCADE | RESTRICT ] @@ -85,7 +85,7 @@ DROP OPERATOR [ IF EXISTS ] name ( Automatically drop objects that depend on the operator (such as views using it), and in turn all objects that depend on those objects - (see ). + (see ). @@ -146,8 +146,8 @@ DROP OPERATOR ~ (none, bit), ! (bigint, none); See Also - - + + diff --git a/doc/src/sgml/ref/drop_opfamily.sgml b/doc/src/sgml/ref/drop_opfamily.sgml index 53bce22883..c1bcd0d1df 100644 --- a/doc/src/sgml/ref/drop_opfamily.sgml +++ b/doc/src/sgml/ref/drop_opfamily.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_opfamily.sgml PostgreSQL documentation --> - + DROP OPERATOR FAMILY @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP OPERATOR FAMILY [ IF EXISTS ] name USING index_method [ CASCADE | RESTRICT ] +DROP OPERATOR FAMILY [ IF EXISTS ] name USING index_method [ CASCADE | RESTRICT ] @@ -38,7 +38,7 @@ DROP OPERATOR FAMILY [ IF EXISTS ] nameCASCADE for the drop to complete. + CASCADE for the drop to complete. @@ -81,7 +81,7 @@ DROP OPERATOR FAMILY [ IF EXISTS ] name Automatically drop objects that depend on the operator family, and in turn all objects that depend on those objects - (see ). + (see ). @@ -109,7 +109,7 @@ DROP OPERATOR FAMILY float_ops USING btree; This command will not succeed if there are any existing indexes - that use operator classes within the family. Add CASCADE to + that use operator classes within the family. Add CASCADE to drop such indexes along with the operator family. @@ -127,11 +127,11 @@ DROP OPERATOR FAMILY float_ops USING btree; See Also - - - - - + + + + + diff --git a/doc/src/sgml/ref/drop_owned.sgml b/doc/src/sgml/ref/drop_owned.sgml index 81694b88e5..4c66da2b34 100644 --- a/doc/src/sgml/ref/drop_owned.sgml +++ b/doc/src/sgml/ref/drop_owned.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_owned.sgml PostgreSQL documentation --> - + DROP OWNED @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP OWNED BY { name | CURRENT_USER | SESSION_USER } [, ...] [ CASCADE | RESTRICT ] +DROP OWNED BY { name | CURRENT_USER | SESSION_USER } [, ...] [ CASCADE | RESTRICT ] @@ -42,7 +42,7 @@ DROP OWNED BY { name | CURRENT_USER - name + name The name of a role whose objects will be dropped, and whose @@ -57,7 +57,7 @@ DROP OWNED BY { name | CURRENT_USER Automatically drop objects that depend on the affected objects, and in turn all objects that depend on those objects - (see ). + (see ). @@ -90,9 +90,9 @@ DROP OWNED BY { name | CURRENT_USER - The command is an alternative that + The command is an alternative that reassigns the ownership of all the database objects owned by one or - more roles. However, REASSIGN OWNED does not deal with + more roles. However, REASSIGN OWNED does not deal with privileges for other objects. @@ -101,7 +101,7 @@ DROP OWNED BY { name | CURRENT_USER - See for more discussion. + See for more discussion. @@ -118,8 +118,8 @@ DROP OWNED BY { name | CURRENT_USER See Also - - + + diff --git a/doc/src/sgml/ref/drop_policy.sgml b/doc/src/sgml/ref/drop_policy.sgml index f474692105..9297ade113 100644 --- a/doc/src/sgml/ref/drop_policy.sgml +++ b/doc/src/sgml/ref/drop_policy.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_policy.sgml PostgreSQL documentation --> - + DROP POLICY @@ -111,8 +111,8 @@ DROP POLICY p1 ON my_table; See Also - - + + diff --git a/doc/src/sgml/ref/drop_procedure.sgml b/doc/src/sgml/ref/drop_procedure.sgml new file mode 100644 index 0000000000..fef61b66ac --- /dev/null +++ b/doc/src/sgml/ref/drop_procedure.sgml @@ -0,0 +1,162 @@ + + + + + DROP PROCEDURE + + + + DROP PROCEDURE + 7 + SQL - Language Statements + + + + DROP PROCEDURE + remove a procedure + + + + +DROP PROCEDURE [ IF EXISTS ] name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] [, ...] + [ CASCADE | RESTRICT ] + + + + + Description + + + DROP PROCEDURE removes the definition of an existing + procedure. To execute this command the user must be the + owner of the procedure. The argument types to the + procedure must be specified, since several different procedures + can exist with the same name and different argument lists. + + + + + Parameters + + + + IF EXISTS + + + Do not throw an error if the procedure does not exist. A notice is issued + in this case. + + + + + + name + + + The name (optionally schema-qualified) of an existing procedure. If no + argument list is specified, the name must be unique in its schema. + + + + + + argmode + + + + The mode of an argument: IN or VARIADIC. + If omitted, the default is IN. + + + + + + argname + + + + The name of an argument. + Note that DROP PROCEDURE does not actually pay + any attention to argument names, since only the argument data + types are needed to determine the procedure's identity. + + + + + + argtype + + + + The data type(s) of the procedure's arguments (optionally + schema-qualified), if any. + + + + + + CASCADE + + + Automatically drop objects that depend on the procedure, + and in turn all objects that depend on those objects + (see ). + + + + + + RESTRICT + + + Refuse to drop the procedure if any objects depend on it. This + is the default. + + + + + + + + Examples + + +DROP PROCEDURE do_db_maintenance(); + + + + + Compatibility + + + This command conforms to the SQL standard, with + these PostgreSQL extensions: + + + The standard only allows one procedure to be dropped per command. + + + The IF EXISTS option + + + The ability to specify argument modes and names + + + + + + + See Also + + + + + + + + + + diff --git a/doc/src/sgml/ref/drop_publication.sgml b/doc/src/sgml/ref/drop_publication.sgml index 8e45a43982..5dcbb837d1 100644 --- a/doc/src/sgml/ref/drop_publication.sgml +++ b/doc/src/sgml/ref/drop_publication.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_publication.sgml PostgreSQL documentation --> - + DROP PUBLICATION @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP PUBLICATION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +DROP PUBLICATION [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] @@ -89,7 +89,7 @@ DROP PUBLICATION mypublication; Compatibility - DROP PUBLICATION is a PostgreSQL + DROP PUBLICATION is a PostgreSQL extension. @@ -98,8 +98,8 @@ DROP PUBLICATION mypublication; See Also - - + + diff --git a/doc/src/sgml/ref/drop_role.sgml b/doc/src/sgml/ref/drop_role.sgml index 75b48f94f9..13079f3e1f 100644 --- a/doc/src/sgml/ref/drop_role.sgml +++ b/doc/src/sgml/ref/drop_role.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_role.sgml PostgreSQL documentation --> - + DROP ROLE @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP ROLE [ IF EXISTS ] name [, ...] +DROP ROLE [ IF EXISTS ] name [, ...] @@ -31,7 +31,7 @@ DROP ROLE [ IF EXISTS ] name [, ... DROP ROLE removes the specified role(s). To drop a superuser role, you must be a superuser yourself; - to drop non-superuser roles, you must have CREATEROLE + to drop non-superuser roles, you must have CREATEROLE privilege. @@ -40,14 +40,14 @@ DROP ROLE [ IF EXISTS ] name [, ... of the cluster; an error will be raised if so. Before dropping the role, you must drop all the objects it owns (or reassign their ownership) and revoke any privileges the role has been granted on other objects. - The and - commands can be useful for this purpose; see + The and + commands can be useful for this purpose; see for more discussion. However, it is not necessary to remove role memberships involving - the role; DROP ROLE automatically revokes any memberships + the role; DROP ROLE automatically revokes any memberships of the target role in other roles, and of other roles in the target role. The other roles are not dropped nor otherwise affected. @@ -68,7 +68,7 @@ DROP ROLE [ IF EXISTS ] name [, ... - name + name The name of the role to remove. @@ -83,7 +83,7 @@ DROP ROLE [ IF EXISTS ] name [, ... PostgreSQL includes a program that has the + linkend="app-dropuser"/> that has the same functionality as this command (in fact, it calls this command) but can be run from the command shell. @@ -113,9 +113,9 @@ DROP ROLE jonathan; See Also - - - + + + diff --git a/doc/src/sgml/ref/drop_routine.sgml b/doc/src/sgml/ref/drop_routine.sgml new file mode 100644 index 0000000000..5cd1a0f11e --- /dev/null +++ b/doc/src/sgml/ref/drop_routine.sgml @@ -0,0 +1,94 @@ + + + + + DROP ROUTINE + + + + DROP ROUTINE + 7 + SQL - Language Statements + + + + DROP ROUTINE + remove a routine + + + + +DROP ROUTINE [ IF EXISTS ] name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] [, ...] + [ CASCADE | RESTRICT ] + + + + + Description + + + DROP ROUTINE removes the definition of an existing + routine, which can be an aggregate function, a normal function, or a + procedure. See + under , , + and for the description of the + parameters, more examples, and further details. + + + + + Examples + + + To drop the routine foo for type + integer: + +DROP ROUTINE foo(integer); + + This command will work independent of whether foo is an + aggregate, function, or procedure. + + + + + Compatibility + + + This command conforms to the SQL standard, with + these PostgreSQL extensions: + + + The standard only allows one routine to be dropped per command. + + + The IF EXISTS option + + + The ability to specify argument modes and names + + + Aggregate functions are an extension. + + + + + + + See Also + + + + + + + + + + Note that there is no CREATE ROUTINE command. + + + + diff --git a/doc/src/sgml/ref/drop_rule.sgml b/doc/src/sgml/ref/drop_rule.sgml index d4905a69c9..cc5d00e4dc 100644 --- a/doc/src/sgml/ref/drop_rule.sgml +++ b/doc/src/sgml/ref/drop_rule.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_rule.sgml PostgreSQL documentation --> - + DROP RULE @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP RULE [ IF EXISTS ] name ON table_name [ CASCADE | RESTRICT ] +DROP RULE [ IF EXISTS ] name ON table_name [ CASCADE | RESTRICT ] @@ -73,7 +73,7 @@ DROP RULE [ IF EXISTS ] name ON Automatically drop objects that depend on the rule, and in turn all objects that depend on those objects - (see ). + (see ). @@ -115,8 +115,8 @@ DROP RULE newrule ON mytable; See Also - - + + diff --git a/doc/src/sgml/ref/drop_schema.sgml b/doc/src/sgml/ref/drop_schema.sgml index 5b1697fff2..4d5d9f55df 100644 --- a/doc/src/sgml/ref/drop_schema.sgml +++ b/doc/src/sgml/ref/drop_schema.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_schema.sgml PostgreSQL documentation --> - + DROP SCHEMA @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP SCHEMA [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +DROP SCHEMA [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] @@ -54,7 +54,7 @@ DROP SCHEMA [ IF EXISTS ] name [, . - name + name The name of a schema. @@ -69,7 +69,7 @@ DROP SCHEMA [ IF EXISTS ] name [, . Automatically drop objects (tables, functions, etc.) that are contained in the schema, and in turn all objects that depend on those objects - (see ). + (see ). @@ -114,7 +114,7 @@ DROP SCHEMA mystuff CASCADE; DROP SCHEMA is fully conforming with the SQL standard, except that the standard only allows one schema to be dropped per command, and apart from the - IF EXISTS option, which is a PostgreSQL + IF EXISTS option, which is a PostgreSQL extension. @@ -123,8 +123,8 @@ DROP SCHEMA mystuff CASCADE; See Also - - + + diff --git a/doc/src/sgml/ref/drop_sequence.sgml b/doc/src/sgml/ref/drop_sequence.sgml index f0e1edc81c..387c98edbc 100644 --- a/doc/src/sgml/ref/drop_sequence.sgml +++ b/doc/src/sgml/ref/drop_sequence.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_sequence.sgml PostgreSQL documentation --> - + DROP SEQUENCE @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP SEQUENCE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +DROP SEQUENCE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] @@ -49,7 +49,7 @@ DROP SEQUENCE [ IF EXISTS ] name [, - name + name The name (optionally schema-qualified) of a sequence. @@ -63,7 +63,7 @@ DROP SEQUENCE [ IF EXISTS ] name [, Automatically drop objects that depend on the sequence, and in turn all objects that depend on those objects - (see ). + (see ). @@ -98,7 +98,7 @@ DROP SEQUENCE serial; DROP SEQUENCE conforms to the SQL standard, except that the standard only allows one sequence to be dropped per command, and apart from the - IF EXISTS option, which is a PostgreSQL + IF EXISTS option, which is a PostgreSQL extension. @@ -107,8 +107,8 @@ DROP SEQUENCE serial; See Also - - + + diff --git a/doc/src/sgml/ref/drop_server.sgml b/doc/src/sgml/ref/drop_server.sgml index 42acdd41dc..f83a661b3e 100644 --- a/doc/src/sgml/ref/drop_server.sgml +++ b/doc/src/sgml/ref/drop_server.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_server.sgml PostgreSQL documentation --> - + DROP SERVER @@ -65,7 +65,7 @@ DROP SERVER [ IF EXISTS ] name [, . Automatically drop objects that depend on the server (such as user mappings), and in turn all objects that depend on those objects - (see ). + (see ). @@ -86,7 +86,7 @@ DROP SERVER [ IF EXISTS ] name [, . Examples - Drop a server foo if it exists: + Drop a server foo if it exists: DROP SERVER IF EXISTS foo; @@ -97,8 +97,8 @@ DROP SERVER IF EXISTS foo; DROP SERVER conforms to ISO/IEC 9075-9 - (SQL/MED). The IF EXISTS clause is - a PostgreSQL extension. + (SQL/MED). The IF EXISTS clause is + a PostgreSQL extension. @@ -106,8 +106,8 @@ DROP SERVER IF EXISTS foo; See Also - - + + diff --git a/doc/src/sgml/ref/drop_statistics.sgml b/doc/src/sgml/ref/drop_statistics.sgml index 37fc402589..f58c3d6d22 100644 --- a/doc/src/sgml/ref/drop_statistics.sgml +++ b/doc/src/sgml/ref/drop_statistics.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_statistics.sgml PostgreSQL documentation --> - + DROP STATISTICS @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP STATISTICS [ IF EXISTS ] name [, ...] +DROP STATISTICS [ IF EXISTS ] name [, ...] @@ -51,7 +51,7 @@ DROP STATISTICS [ IF EXISTS ] name - name + name The name (optionally schema-qualified) of the statistics object to drop. @@ -88,8 +88,8 @@ DROP STATISTICS IF EXISTS See Also - - + + diff --git a/doc/src/sgml/ref/drop_subscription.sgml b/doc/src/sgml/ref/drop_subscription.sgml index f535c000c4..adbdeafb4e 100644 --- a/doc/src/sgml/ref/drop_subscription.sgml +++ b/doc/src/sgml/ref/drop_subscription.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_subscription.sgml PostgreSQL documentation --> - + DROP SUBSCRIPTION @@ -91,7 +91,12 @@ DROP SUBSCRIPTION [ IF EXISTS ] name. + also . + + + + If a subscription is associated with a replication slot, then DROP + SUBSCRIPTION cannot be executed inside a transaction block. @@ -109,7 +114,7 @@ DROP SUBSCRIPTION mysub; Compatibility - DROP SUBSCRIPTION is a PostgreSQL + DROP SUBSCRIPTION is a PostgreSQL extension. @@ -118,8 +123,8 @@ DROP SUBSCRIPTION mysub; See Also - - + + diff --git a/doc/src/sgml/ref/drop_table.sgml b/doc/src/sgml/ref/drop_table.sgml index 94d28b06fb..bf8996d198 100644 --- a/doc/src/sgml/ref/drop_table.sgml +++ b/doc/src/sgml/ref/drop_table.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_table.sgml PostgreSQL documentation --> - + DROP TABLE @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +DROP TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] @@ -32,16 +32,16 @@ DROP TABLE [ IF EXISTS ] name [, .. DROP TABLE removes tables from the database. Only the table owner, the schema owner, and superuser can drop a table. To empty a table of rows - without destroying the table, use - or . + without destroying the table, use + or . DROP TABLE always removes any indexes, rules, triggers, and constraints that exist for the target table. However, to drop a table that is referenced by a view or a foreign-key - constraint of another table, CASCADE must be - specified. (CASCADE will remove a dependent view entirely, + constraint of another table, CASCADE must be + specified. (CASCADE will remove a dependent view entirely, but in the foreign-key case it will only remove the foreign-key constraint, not the other table entirely.) @@ -62,7 +62,7 @@ DROP TABLE [ IF EXISTS ] name [, .. - name + name The name (optionally schema-qualified) of the table to drop. @@ -77,7 +77,7 @@ DROP TABLE [ IF EXISTS ] name [, .. Automatically drop objects that depend on the table (such as views), and in turn all objects that depend on those objects - (see ). + (see ). @@ -112,7 +112,7 @@ DROP TABLE films, distributors; This command conforms to the SQL standard, except that the standard only allows one table to be dropped per command, and apart from the - IF EXISTS option, which is a PostgreSQL + IF EXISTS option, which is a PostgreSQL extension. @@ -121,8 +121,8 @@ DROP TABLE films, distributors; See Also - - + + diff --git a/doc/src/sgml/ref/drop_tablespace.sgml b/doc/src/sgml/ref/drop_tablespace.sgml index d0a05af2e1..047e4e0481 100644 --- a/doc/src/sgml/ref/drop_tablespace.sgml +++ b/doc/src/sgml/ref/drop_tablespace.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_tablespace.sgml PostgreSQL documentation --> - + DROP TABLESPACE @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP TABLESPACE [ IF EXISTS ] name +DROP TABLESPACE [ IF EXISTS ] name @@ -38,8 +38,8 @@ DROP TABLESPACE [ IF EXISTS ] name dropped. It is possible that objects in other databases might still reside in the tablespace even if no objects in the current database are using the tablespace. Also, if the tablespace is listed in the setting of any active session, the - DROP might fail due to temporary files residing in the + linkend="guc-temp-tablespaces"/> setting of any active session, the + DROP might fail due to temporary files residing in the tablespace. @@ -60,7 +60,7 @@ DROP TABLESPACE [ IF EXISTS ] name - name + name The name of a tablespace. @@ -74,7 +74,7 @@ DROP TABLESPACE [ IF EXISTS ] name Notes - DROP TABLESPACE cannot be executed inside a transaction block. + DROP TABLESPACE cannot be executed inside a transaction block. @@ -93,7 +93,7 @@ DROP TABLESPACE mystuff; Compatibility - DROP TABLESPACE is a PostgreSQL + DROP TABLESPACE is a PostgreSQL extension. @@ -102,8 +102,8 @@ DROP TABLESPACE mystuff; See Also - - + + diff --git a/doc/src/sgml/ref/drop_transform.sgml b/doc/src/sgml/ref/drop_transform.sgml index 698920a226..582e782219 100644 --- a/doc/src/sgml/ref/drop_transform.sgml +++ b/doc/src/sgml/ref/drop_transform.sgml @@ -1,6 +1,6 @@ - + DROP TRANSFORM @@ -76,7 +76,7 @@ DROP TRANSFORM [ IF EXISTS ] FOR type_name LANGUAGE < Automatically drop objects that depend on the transform, and in turn all objects that depend on those objects - (see ). + (see ). @@ -110,7 +110,7 @@ DROP TRANSFORM FOR hstore LANGUAGE plpythonu; This form of DROP TRANSFORM is a PostgreSQL extension. See for details. + linkend="sql-createtransform"/> for details. @@ -118,7 +118,7 @@ DROP TRANSFORM FOR hstore LANGUAGE plpythonu; See Also - + diff --git a/doc/src/sgml/ref/drop_trigger.sgml b/doc/src/sgml/ref/drop_trigger.sgml index d400b8383f..728541e557 100644 --- a/doc/src/sgml/ref/drop_trigger.sgml +++ b/doc/src/sgml/ref/drop_trigger.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_trigger.sgml PostgreSQL documentation --> - + DROP TRIGGER @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP TRIGGER [ IF EXISTS ] name ON table_name [ CASCADE | RESTRICT ] +DROP TRIGGER [ IF EXISTS ] name ON table_name [ CASCADE | RESTRICT ] @@ -51,7 +51,7 @@ DROP TRIGGER [ IF EXISTS ] name ON - name + name The name of the trigger to remove. @@ -60,7 +60,7 @@ DROP TRIGGER [ IF EXISTS ] name ON - table_name + table_name The name (optionally schema-qualified) of the table for which @@ -75,7 +75,7 @@ DROP TRIGGER [ IF EXISTS ] name ON Automatically drop objects that depend on the trigger, and in turn all objects that depend on those objects - (see ). + (see ). @@ -92,7 +92,7 @@ DROP TRIGGER [ IF EXISTS ] name ON - + Examples @@ -104,7 +104,7 @@ DROP TRIGGER if_dist_exists ON films; - + Compatibility @@ -120,7 +120,7 @@ DROP TRIGGER if_dist_exists ON films; See Also - + diff --git a/doc/src/sgml/ref/drop_tsconfig.sgml b/doc/src/sgml/ref/drop_tsconfig.sgml index 0096e0092d..9eec4bab53 100644 --- a/doc/src/sgml/ref/drop_tsconfig.sgml +++ b/doc/src/sgml/ref/drop_tsconfig.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_tsconfig.sgml PostgreSQL documentation --> - + DROP TEXT SEARCH CONFIGURATION @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP TEXT SEARCH CONFIGURATION [ IF EXISTS ] name [ CASCADE | RESTRICT ] +DROP TEXT SEARCH CONFIGURATION [ IF EXISTS ] name [ CASCADE | RESTRICT ] @@ -66,7 +66,7 @@ DROP TEXT SEARCH CONFIGURATION [ IF EXISTS ] name Automatically drop objects that depend on the text search configuration, and in turn all objects that depend on those objects - (see ). + (see ). @@ -94,8 +94,8 @@ DROP TEXT SEARCH CONFIGURATION my_english; This command will not succeed if there are any existing indexes - that reference the configuration in to_tsvector calls. - Add CASCADE to + that reference the configuration in to_tsvector calls. + Add CASCADE to drop such indexes along with the text search configuration. @@ -113,8 +113,8 @@ DROP TEXT SEARCH CONFIGURATION my_english; See Also - - + + diff --git a/doc/src/sgml/ref/drop_tsdictionary.sgml b/doc/src/sgml/ref/drop_tsdictionary.sgml index 803abf8cba..8d22cfdc75 100644 --- a/doc/src/sgml/ref/drop_tsdictionary.sgml +++ b/doc/src/sgml/ref/drop_tsdictionary.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_tsdictionary.sgml PostgreSQL documentation --> - + DROP TEXT SEARCH DICTIONARY @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP TEXT SEARCH DICTIONARY [ IF EXISTS ] name [ CASCADE | RESTRICT ] +DROP TEXT SEARCH DICTIONARY [ IF EXISTS ] name [ CASCADE | RESTRICT ] @@ -66,7 +66,7 @@ DROP TEXT SEARCH DICTIONARY [ IF EXISTS ] name Automatically drop objects that depend on the text search dictionary, and in turn all objects that depend on those objects - (see ). + (see ). @@ -94,7 +94,7 @@ DROP TEXT SEARCH DICTIONARY english; This command will not succeed if there are any existing text search - configurations that use the dictionary. Add CASCADE to + configurations that use the dictionary. Add CASCADE to drop such configurations along with the dictionary. @@ -112,8 +112,8 @@ DROP TEXT SEARCH DICTIONARY english; See Also - - + + diff --git a/doc/src/sgml/ref/drop_tsparser.sgml b/doc/src/sgml/ref/drop_tsparser.sgml index fa99720161..2b647ccaa6 100644 --- a/doc/src/sgml/ref/drop_tsparser.sgml +++ b/doc/src/sgml/ref/drop_tsparser.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_tsparser.sgml PostgreSQL documentation --> - + DROP TEXT SEARCH PARSER @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP TEXT SEARCH PARSER [ IF EXISTS ] name [ CASCADE | RESTRICT ] +DROP TEXT SEARCH PARSER [ IF EXISTS ] name [ CASCADE | RESTRICT ] @@ -64,7 +64,7 @@ DROP TEXT SEARCH PARSER [ IF EXISTS ] name Automatically drop objects that depend on the text search parser, and in turn all objects that depend on those objects - (see ). + (see ). @@ -92,7 +92,7 @@ DROP TEXT SEARCH PARSER my_parser; This command will not succeed if there are any existing text search - configurations that use the parser. Add CASCADE to + configurations that use the parser. Add CASCADE to drop such configurations along with the parser. @@ -110,8 +110,8 @@ DROP TEXT SEARCH PARSER my_parser; See Also - - + + diff --git a/doc/src/sgml/ref/drop_tstemplate.sgml b/doc/src/sgml/ref/drop_tstemplate.sgml index 9d051eb619..972b90a85c 100644 --- a/doc/src/sgml/ref/drop_tstemplate.sgml +++ b/doc/src/sgml/ref/drop_tstemplate.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_tstemplate.sgml PostgreSQL documentation --> - + DROP TEXT SEARCH TEMPLATE @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP TEXT SEARCH TEMPLATE [ IF EXISTS ] name [ CASCADE | RESTRICT ] +DROP TEXT SEARCH TEMPLATE [ IF EXISTS ] name [ CASCADE | RESTRICT ] @@ -65,7 +65,7 @@ DROP TEXT SEARCH TEMPLATE [ IF EXISTS ] name Automatically drop objects that depend on the text search template, and in turn all objects that depend on those objects - (see ). + (see ). @@ -93,7 +93,7 @@ DROP TEXT SEARCH TEMPLATE thesaurus; This command will not succeed if there are any existing text search - dictionaries that use the template. Add CASCADE to + dictionaries that use the template. Add CASCADE to drop such dictionaries along with the template. @@ -111,8 +111,8 @@ DROP TEXT SEARCH TEMPLATE thesaurus; See Also - - + + diff --git a/doc/src/sgml/ref/drop_type.sgml b/doc/src/sgml/ref/drop_type.sgml index 2c7b8fe9f6..9e555c0624 100644 --- a/doc/src/sgml/ref/drop_type.sgml +++ b/doc/src/sgml/ref/drop_type.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_type.sgml PostgreSQL documentation --> - + DROP TYPE @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP TYPE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +DROP TYPE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] @@ -49,7 +49,7 @@ DROP TYPE [ IF EXISTS ] name [, ... - name + name The name (optionally schema-qualified) of the data type to remove. @@ -64,7 +64,7 @@ DROP TYPE [ IF EXISTS ] name [, ... Automatically drop objects that depend on the type (such as table columns, functions, and operators), and in turn all objects that depend on those objects - (see ). + (see ). @@ -81,7 +81,7 @@ DROP TYPE [ IF EXISTS ] name [, ... - + Examples @@ -91,25 +91,25 @@ DROP TYPE box; - + Compatibility This command is similar to the corresponding command in the SQL - standard, apart from the IF EXISTS - option, which is a PostgreSQL extension. + standard, apart from the IF EXISTS + option, which is a PostgreSQL extension. But note that much of the CREATE TYPE command and the data type extension mechanisms in PostgreSQL differ from the SQL standard. - + See Also - - + + diff --git a/doc/src/sgml/ref/drop_user.sgml b/doc/src/sgml/ref/drop_user.sgml index 38e5418d07..37ab856125 100644 --- a/doc/src/sgml/ref/drop_user.sgml +++ b/doc/src/sgml/ref/drop_user.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_user.sgml PostgreSQL documentation --> - + DROP USER @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP USER [ IF EXISTS ] name [, ...] +DROP USER [ IF EXISTS ] name [, ...] @@ -30,7 +30,7 @@ DROP USER [ IF EXISTS ] name [, ... DROP USER is simply an alternate spelling of - . + . @@ -48,7 +48,7 @@ DROP USER [ IF EXISTS ] name [, ... See Also - + diff --git a/doc/src/sgml/ref/drop_user_mapping.sgml b/doc/src/sgml/ref/drop_user_mapping.sgml index eb4c320293..7cb09f1166 100644 --- a/doc/src/sgml/ref/drop_user_mapping.sgml +++ b/doc/src/sgml/ref/drop_user_mapping.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_user_mapping.sgml PostgreSQL documentation --> - + DROP USER MAPPING @@ -36,7 +36,7 @@ DROP USER MAPPING [ IF EXISTS ] FOR { user_name The owner of a foreign server can drop user mappings for that server for any user. Also, a user can drop a user mapping for their own - user name if USAGE privilege on the server has been + user name if USAGE privilege on the server has been granted to the user. @@ -59,9 +59,9 @@ DROP USER MAPPING [ IF EXISTS ] FOR { user_nameuser_name - User name of the mapping. CURRENT_USER - and USER match the name of the current - user. PUBLIC is used to match all present and + User name of the mapping. CURRENT_USER + and USER match the name of the current + user. PUBLIC is used to match all present and future user names in the system. @@ -82,7 +82,7 @@ DROP USER MAPPING [ IF EXISTS ] FOR { user_nameExamples - Drop a user mapping bob, server foo if it exists: + Drop a user mapping bob, server foo if it exists: DROP USER MAPPING IF EXISTS FOR bob SERVER foo; @@ -93,8 +93,8 @@ DROP USER MAPPING IF EXISTS FOR bob SERVER foo; DROP USER MAPPING conforms to ISO/IEC 9075-9 - (SQL/MED). The IF EXISTS clause is - a PostgreSQL extension. + (SQL/MED). The IF EXISTS clause is + a PostgreSQL extension. @@ -102,8 +102,8 @@ DROP USER MAPPING IF EXISTS FOR bob SERVER foo; See Also - - + + diff --git a/doc/src/sgml/ref/drop_view.sgml b/doc/src/sgml/ref/drop_view.sgml index 40f2356188..a1c550ec3e 100644 --- a/doc/src/sgml/ref/drop_view.sgml +++ b/doc/src/sgml/ref/drop_view.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/drop_view.sgml PostgreSQL documentation --> - + DROP VIEW @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP VIEW [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] +DROP VIEW [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ] @@ -49,7 +49,7 @@ DROP VIEW [ IF EXISTS ] name [, ... - name + name The name (optionally schema-qualified) of the view to remove. @@ -64,7 +64,7 @@ DROP VIEW [ IF EXISTS ] name [, ... Automatically drop objects that depend on the view (such as other views), and in turn all objects that depend on those objects - (see ). + (see ). @@ -97,7 +97,7 @@ DROP VIEW kinds; This command conforms to the SQL standard, except that the standard only allows one view to be dropped per command, and apart from the - IF EXISTS option, which is a PostgreSQL + IF EXISTS option, which is a PostgreSQL extension. @@ -106,8 +106,8 @@ DROP VIEW kinds; See Also - - + + diff --git a/doc/src/sgml/ref/dropdb.sgml b/doc/src/sgml/ref/dropdb.sgml index 16c49e7928..38f38f01ce 100644 --- a/doc/src/sgml/ref/dropdb.sgml +++ b/doc/src/sgml/ref/dropdb.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/dropdb.sgml PostgreSQL documentation --> - + dropdb @@ -41,7 +41,7 @@ PostgreSQL documentation dropdb is a wrapper around the - SQL command . + SQL command . There is no effective difference between dropping databases via this utility and via other methods for accessing the server. @@ -53,7 +53,7 @@ PostgreSQL documentation Options - dropdb accepts the following command-line arguments: + dropdb accepts the following command-line arguments: @@ -66,8 +66,8 @@ PostgreSQL documentation - - + + Echo the commands that dropdb generates @@ -77,8 +77,8 @@ PostgreSQL documentation - - + + Issues a verification prompt before doing anything destructive. @@ -87,8 +87,8 @@ PostgreSQL documentation - - + + Print the dropdb version and exit. @@ -97,7 +97,7 @@ PostgreSQL documentation - + Do not throw an error if the database does not exist. A notice is issued @@ -107,8 +107,8 @@ PostgreSQL documentation - - + + Show help about dropdb command line @@ -127,8 +127,8 @@ PostgreSQL documentation - - + + Specifies the host name of the machine on which the @@ -140,8 +140,8 @@ PostgreSQL documentation - - + + Specifies the TCP port or local Unix domain socket file @@ -152,8 +152,8 @@ PostgreSQL documentation - - + + User name to connect as. @@ -162,8 +162,8 @@ PostgreSQL documentation - - + + Never issue a password prompt. If the server requires @@ -177,8 +177,8 @@ PostgreSQL documentation - - + + Force dropdb to prompt for a @@ -191,14 +191,14 @@ PostgreSQL documentation for a password if the server demands password authentication. However, dropdb will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. - + Specifies the name of the database to connect to in order to drop the @@ -231,9 +231,9 @@ PostgreSQL documentation - This utility, like most other PostgreSQL utilities, - also uses the environment variables supported by libpq - (see ). + This utility, like most other PostgreSQL utilities, + also uses the environment variables supported by libpq + (see ). @@ -243,8 +243,8 @@ PostgreSQL documentation Diagnostics - In case of difficulty, see - and for + In case of difficulty, see + and for discussions of potential problems and error messages. The database server must be running at the targeted host. Also, any default connection settings and environment @@ -283,8 +283,8 @@ Are you sure? (y/n) y See Also - - + + diff --git a/doc/src/sgml/ref/dropuser.sgml b/doc/src/sgml/ref/dropuser.sgml index d7ad61b3d6..3d4e4b37b3 100644 --- a/doc/src/sgml/ref/dropuser.sgml +++ b/doc/src/sgml/ref/dropuser.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/dropuser.sgml PostgreSQL documentation --> - + dropuser @@ -35,14 +35,14 @@ PostgreSQL documentation dropuser removes an existing PostgreSQL user. - Only superusers and users with the CREATEROLE privilege can + Only superusers and users with the CREATEROLE privilege can remove PostgreSQL users. (To remove a superuser, you must yourself be a superuser.) dropuser is a wrapper around the - SQL command . + SQL command . There is no effective difference between dropping users via this utility and via other methods for accessing the server. @@ -70,8 +70,8 @@ PostgreSQL documentation - - + + Echo the commands that dropuser generates @@ -81,8 +81,8 @@ PostgreSQL documentation - - + + Prompt for confirmation before actually removing the user, and prompt @@ -92,8 +92,8 @@ PostgreSQL documentation - - + + Print the dropuser version and exit. @@ -102,7 +102,7 @@ PostgreSQL documentation - + Do not throw an error if the user does not exist. A notice is @@ -112,8 +112,8 @@ PostgreSQL documentation - - + + Show help about dropuser command line @@ -131,8 +131,8 @@ PostgreSQL documentation - - + + Specifies the host name of the machine on which the @@ -144,8 +144,8 @@ PostgreSQL documentation - - + + Specifies the TCP port or local Unix domain socket file @@ -156,8 +156,8 @@ PostgreSQL documentation - - + + User name to connect as (not the user name to drop). @@ -166,8 +166,8 @@ PostgreSQL documentation - - + + Never issue a password prompt. If the server requires @@ -181,8 +181,8 @@ PostgreSQL documentation - - + + Force dropuser to prompt for a @@ -195,7 +195,7 @@ PostgreSQL documentation for a password if the server demands password authentication. However, dropuser will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. @@ -223,9 +223,9 @@ PostgreSQL documentation - This utility, like most other PostgreSQL utilities, - also uses the environment variables supported by libpq - (see ). + This utility, like most other PostgreSQL utilities, + also uses the environment variables supported by libpq + (see ). @@ -235,8 +235,8 @@ PostgreSQL documentation Diagnostics - In case of difficulty, see - and for + In case of difficulty, see + and for discussions of potential problems and error messages. The database server must be running at the targeted host. Also, any default connection settings and environment @@ -275,8 +275,8 @@ Are you sure? (y/n) y See Also - - + + diff --git a/doc/src/sgml/ref/ecpg-ref.sgml b/doc/src/sgml/ref/ecpg-ref.sgml index 8bfb47c4d7..df3c1054f0 100644 --- a/doc/src/sgml/ref/ecpg-ref.sgml +++ b/doc/src/sgml/ref/ecpg-ref.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/ecpg-ref.sgml PostgreSQL documentation --> - + ecpg @@ -28,7 +28,7 @@ PostgreSQL documentation - + Description @@ -51,7 +51,7 @@ PostgreSQL documentation This reference page does not describe the embedded SQL language. - See for more information on that topic. + See for more information on that topic. @@ -79,8 +79,8 @@ PostgreSQL documentation Set a compatibility mode. mode can - be INFORMIX or - INFORMIX_SE. + be INFORMIX, + INFORMIX_SE, or ORACLE. @@ -220,9 +220,9 @@ PostgreSQL documentation When compiling the preprocessed C code files, the compiler needs to - be able to find the ECPG header files in the - PostgreSQL include directory. Therefore, you might - have to use the option when invoking the compiler (e.g., -I/usr/local/pgsql/include). @@ -235,7 +235,7 @@ PostgreSQL documentation The value of either of these directories that is appropriate for the installation can be found out using . + linkend="app-pgconfig"/>. diff --git a/doc/src/sgml/ref/end.sgml b/doc/src/sgml/ref/end.sgml index 10e414515b..7523315f34 100644 --- a/doc/src/sgml/ref/end.sgml +++ b/doc/src/sgml/ref/end.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/end.sgml PostgreSQL documentation --> - + END @@ -33,7 +33,7 @@ END [ WORK | TRANSACTION ] made by the transaction become visible to others and are guaranteed to be durable if a crash occurs. This command is a PostgreSQL extension - that is equivalent to . + that is equivalent to . @@ -57,12 +57,12 @@ END [ WORK | TRANSACTION ] Notes - Use to + Use to abort a transaction. - Issuing END when not inside a transaction does + Issuing END when not inside a transaction does no harm, but it will provoke a warning message. @@ -83,7 +83,7 @@ END; END is a PostgreSQL extension that provides functionality equivalent to , which is + linkend="sql-commit"/>, which is specified in the SQL standard. @@ -92,9 +92,9 @@ END; See Also - - - + + + diff --git a/doc/src/sgml/ref/execute.sgml b/doc/src/sgml/ref/execute.sgml index 76069c019e..aab1f4b7e0 100644 --- a/doc/src/sgml/ref/execute.sgml +++ b/doc/src/sgml/ref/execute.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/execute.sgml PostgreSQL documentation --> - + EXECUTE @@ -26,7 +26,7 @@ PostgreSQL documentation -EXECUTE name [ ( parameter [, ...] ) ] +EXECUTE name [ ( parameter [, ...] ) ] @@ -52,7 +52,7 @@ EXECUTE name [ ( For more information on the creation and usage of prepared statements, - see . + see . @@ -61,7 +61,7 @@ EXECUTE name [ ( - name + name The name of the prepared statement to execute. @@ -70,7 +70,7 @@ EXECUTE name [ ( - parameter + parameter The actual value of a parameter to the prepared statement. This @@ -87,16 +87,16 @@ EXECUTE name [ ( Outputs The command tag returned by EXECUTE - is that of the prepared statement, and not EXECUTE. + is that of the prepared statement, and not EXECUTE. - Examples</> + <title>Examples Examples are given in the section of the documentation. + endterm="sql-prepare-examples-title"/> section of the documentation. @@ -115,8 +115,8 @@ EXECUTE name [ ( See Also - - + + diff --git a/doc/src/sgml/ref/explain.sgml b/doc/src/sgml/ref/explain.sgml index 4219180943..8dc0d7038a 100644 --- a/doc/src/sgml/ref/explain.sgml +++ b/doc/src/sgml/ref/explain.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/explain.sgml PostgreSQL documentation --> - + EXPLAIN @@ -160,15 +160,15 @@ ROLLBACK; shared blocks hit, read, dirtied, and written, the number of local blocks hit, read, dirtied, and written, and the number of temp blocks read and written. - A hit means that a read was avoided because the block was + A hit means that a read was avoided because the block was found already in cache when needed. Shared blocks contain data from regular tables and indexes; local blocks contain data from temporary tables and indexes; while temp blocks contain short-term working data used in sorts, hashes, Materialize plan nodes, and similar cases. - The number of blocks dirtied indicates the number of + The number of blocks dirtied indicates the number of previously unmodified blocks that were changed by this query; while the - number of blocks written indicates the number of + number of blocks written indicates the number of previously-dirtied blocks evicted from cache by this backend during query processing. The number of blocks shown for an @@ -229,9 +229,9 @@ ROLLBACK; Specifies whether the selected option should be turned on or off. - You can write TRUE, ON, or + You can write TRUE, ON, or 1 to enable the option, and FALSE, - OFF, or 0 to disable it. The + OFF, or 0 to disable it. The boolean value can also be omitted, in which case TRUE is assumed. @@ -242,10 +242,10 @@ ROLLBACK; statement - Any SELECT, INSERT, UPDATE, - DELETE, VALUES, EXECUTE, - DECLARE, CREATE TABLE AS, or - CREATE MATERIALIZED VIEW AS statement, whose execution + Any SELECT, INSERT, UPDATE, + DELETE, VALUES, EXECUTE, + DECLARE, CREATE TABLE AS, or + CREATE MATERIALIZED VIEW AS statement, whose execution plan you wish to see. @@ -260,7 +260,7 @@ ROLLBACK; The command's result is a textual description of the plan selected for the statement, optionally annotated with execution statistics. - describes the information provided. + describes the information provided. @@ -276,7 +276,7 @@ ROLLBACK; the autovacuum daemon will take care of that automatically. But if a table has recently had substantial changes in its contents, you might need to do a manual - rather than wait for autovacuum to catch up + rather than wait for autovacuum to catch up with the changes. @@ -450,7 +450,7 @@ EXPLAIN ANALYZE EXECUTE query(100, 200); See Also - + diff --git a/doc/src/sgml/ref/fetch.sgml b/doc/src/sgml/ref/fetch.sgml index 24c8c49156..e802be61c8 100644 --- a/doc/src/sgml/ref/fetch.sgml +++ b/doc/src/sgml/ref/fetch.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/fetch.sgml PostgreSQL documentation --> - + FETCH @@ -27,23 +27,23 @@ PostgreSQL documentation -FETCH [ direction [ FROM | IN ] ] cursor_name +FETCH [ direction [ FROM | IN ] ] cursor_name -where direction can be empty or one of: +where direction can be empty or one of: NEXT PRIOR FIRST LAST - ABSOLUTE count - RELATIVE count - count + ABSOLUTE count + RELATIVE count + count ALL FORWARD - FORWARD count + FORWARD count FORWARD ALL BACKWARD - BACKWARD count + BACKWARD count BACKWARD ALL @@ -57,20 +57,20 @@ FETCH [ direction [ FROM | IN ] ] < A cursor has an associated position, which is used by - FETCH. The cursor position can be before the first row of the + FETCH. The cursor position can be before the first row of the query result, on any particular row of the result, or after the last row of the result. When created, a cursor is positioned before the first row. After fetching some rows, the cursor is positioned on the row most recently - retrieved. If FETCH runs off the end of the available rows + retrieved. If FETCH runs off the end of the available rows then the cursor is left positioned after the last row, or before the first - row if fetching backward. FETCH ALL or FETCH BACKWARD - ALL will always leave the cursor positioned after the last row or before + row if fetching backward. FETCH ALL or FETCH BACKWARD + ALL will always leave the cursor positioned after the last row or before the first row. - The forms NEXT, PRIOR, FIRST, - LAST, ABSOLUTE, RELATIVE fetch + The forms NEXT, PRIOR, FIRST, + LAST, ABSOLUTE, RELATIVE fetch a single row after moving the cursor appropriately. If there is no such row, an empty result is returned, and the cursor is left positioned before the first row or after the last row as @@ -78,17 +78,17 @@ FETCH [ direction [ FROM | IN ] ] < - The forms using FORWARD and BACKWARD + The forms using FORWARD and BACKWARD retrieve the indicated number of rows moving in the forward or backward direction, leaving the cursor positioned on the last-returned row (or after/before all rows, if the count exceeds the number of rows + class="parameter">count exceeds the number of rows available). - RELATIVE 0, FORWARD 0, and - BACKWARD 0 all request fetching the current row without + RELATIVE 0, FORWARD 0, and + BACKWARD 0 all request fetching the current row without moving the cursor, that is, re-fetching the most recently fetched row. This will succeed unless the cursor is positioned before the first row or after the last row; in which case, no row is returned. @@ -97,9 +97,9 @@ FETCH [ direction [ FROM | IN ] ] < This page describes usage of cursors at the SQL command level. - If you are trying to use cursors inside a PL/pgSQL + If you are trying to use cursors inside a PL/pgSQL function, the rules are different — - see . + see . @@ -109,9 +109,9 @@ FETCH [ direction [ FROM | IN ] ] < - direction + direction - direction defines + direction defines the fetch direction and number of rows to fetch. It can be one of the following: @@ -121,7 +121,7 @@ FETCH [ direction [ FROM | IN ] ] < Fetch the next row. This is the default if direction is omitted. + class="parameter">direction is omitted. @@ -154,17 +154,17 @@ FETCH [ direction [ FROM | IN ] ] < - ABSOLUTE count + ABSOLUTE count Fetch the count'th row of the query, + class="parameter">count'th row of the query, or the abs(count)'th row from + class="parameter">count)'th row from the end if count is negative. Position + class="parameter">count is negative. Position before first row or after last row if count is out of range; in + class="parameter">count is out of range; in particular, ABSOLUTE 0 positions before the first row. @@ -172,14 +172,14 @@ FETCH [ direction [ FROM | IN ] ] < - RELATIVE count + RELATIVE count Fetch the count'th succeeding row, or + class="parameter">count'th succeeding row, or the abs(count)'th prior - row if count is + class="parameter">count)'th prior + row if count is negative. RELATIVE 0 re-fetches the current row, if any. @@ -187,13 +187,13 @@ FETCH [ direction [ FROM | IN ] ] < - count + count Fetch the next count rows (same as + class="parameter">count rows (same as FORWARD count). + class="parameter">count
). @@ -217,11 +217,11 @@ FETCH [ direction [ FROM | IN ] ] < - FORWARD count + FORWARD count Fetch the next count rows. + class="parameter">count
rows. FORWARD 0 re-fetches the current row. @@ -246,11 +246,11 @@ FETCH [ direction [ FROM | IN ] ] < - BACKWARD count + BACKWARD count Fetch the prior count rows (scanning + class="parameter">count
rows (scanning backwards). BACKWARD 0 re-fetches the current row. @@ -270,20 +270,20 @@ FETCH [ direction [ FROM | IN ] ] < - count + count - count is a + count is a possibly-signed integer constant, determining the location or - number of rows to fetch. For FORWARD and - BACKWARD cases, specifying a negative count is equivalent to changing - the sense of FORWARD and BACKWARD. + number of rows to fetch. For FORWARD and + BACKWARD cases, specifying a negative count is equivalent to changing + the sense of FORWARD and BACKWARD. - cursor_name + cursor_name An open cursor's name. @@ -297,7 +297,7 @@ FETCH [ direction [ FROM | IN ] ] < Outputs - On successful completion, a FETCH command returns a command + On successful completion, a FETCH command returns a command tag of the form FETCH count @@ -315,8 +315,8 @@ FETCH count The cursor should be declared with the SCROLL - option if one intends to use any variants of FETCH - other than FETCH NEXT or FETCH FORWARD with + option if one intends to use any variants of FETCH + other than FETCH NEXT or FETCH FORWARD with a positive count. For simple queries PostgreSQL will allow backwards fetch from cursors not declared with SCROLL, but this @@ -335,9 +335,9 @@ FETCH count - + is used to define a cursor. Use - + to change cursor position without retrieving data. @@ -394,14 +394,14 @@ COMMIT WORK; The FETCH forms involving FORWARD and BACKWARD, as well as the forms FETCH count and FETCH + class="parameter">count
and FETCH ALL, in which FORWARD is implicit, are PostgreSQL extensions. - The SQL standard allows only FROM preceding the cursor - name; the option to use IN, or to leave them out altogether, is + The SQL standard allows only FROM preceding the cursor + name; the option to use IN, or to leave them out altogether, is an extension. @@ -410,9 +410,9 @@ COMMIT WORK; See Also - - - + + + diff --git a/doc/src/sgml/ref/grant.sgml b/doc/src/sgml/ref/grant.sgml index c63252ca24..ff64c7a3ba 100644 --- a/doc/src/sgml/ref/grant.sgml +++ b/doc/src/sgml/ref/grant.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/grant.sgml PostgreSQL documentation --> - + GRANT @@ -23,70 +23,70 @@ PostgreSQL documentation GRANT { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER } [, ...] | ALL [ PRIVILEGES ] } - ON { [ TABLE ] table_name [, ...] - | ALL TABLES IN SCHEMA schema_name [, ...] } - TO role_specification [, ...] [ WITH GRANT OPTION ] + ON { [ TABLE ] table_name [, ...] + | ALL TABLES IN SCHEMA schema_name [, ...] } + TO role_specification [, ...] [ WITH GRANT OPTION ] -GRANT { { SELECT | INSERT | UPDATE | REFERENCES } ( column_name [, ...] ) - [, ...] | ALL [ PRIVILEGES ] ( column_name [, ...] ) } - ON [ TABLE ] table_name [, ...] - TO role_specification [, ...] [ WITH GRANT OPTION ] +GRANT { { SELECT | INSERT | UPDATE | REFERENCES } ( column_name [, ...] ) + [, ...] | ALL [ PRIVILEGES ] ( column_name [, ...] ) } + ON [ TABLE ] table_name [, ...] + TO role_specification [, ...] [ WITH GRANT OPTION ] GRANT { { USAGE | SELECT | UPDATE } [, ...] | ALL [ PRIVILEGES ] } - ON { SEQUENCE sequence_name [, ...] - | ALL SEQUENCES IN SCHEMA schema_name [, ...] } - TO role_specification [, ...] [ WITH GRANT OPTION ] + ON { SEQUENCE sequence_name [, ...] + | ALL SEQUENCES IN SCHEMA schema_name [, ...] } + TO role_specification [, ...] [ WITH GRANT OPTION ] GRANT { { CREATE | CONNECT | TEMPORARY | TEMP } [, ...] | ALL [ PRIVILEGES ] } ON DATABASE database_name [, ...] - TO role_specification [, ...] [ WITH GRANT OPTION ] + TO role_specification [, ...] [ WITH GRANT OPTION ] GRANT { USAGE | ALL [ PRIVILEGES ] } ON DOMAIN domain_name [, ...] - TO role_specification [, ...] [ WITH GRANT OPTION ] + TO role_specification [, ...] [ WITH GRANT OPTION ] GRANT { USAGE | ALL [ PRIVILEGES ] } ON FOREIGN DATA WRAPPER fdw_name [, ...] - TO role_specification [, ...] [ WITH GRANT OPTION ] + TO role_specification [, ...] [ WITH GRANT OPTION ] GRANT { USAGE | ALL [ PRIVILEGES ] } ON FOREIGN SERVER server_name [, ...] - TO role_specification [, ...] [ WITH GRANT OPTION ] + TO role_specification [, ...] [ WITH GRANT OPTION ] GRANT { EXECUTE | ALL [ PRIVILEGES ] } - ON { FUNCTION function_name [ ( [ [ argmode ] [ arg_name ] arg_type [, ...] ] ) ] [, ...] - | ALL FUNCTIONS IN SCHEMA schema_name [, ...] } - TO role_specification [, ...] [ WITH GRANT OPTION ] + ON { { FUNCTION | PROCEDURE | ROUTINE } routine_name [ ( [ [ argmode ] [ arg_name ] arg_type [, ...] ] ) ] [, ...] + | ALL { FUNCTIONS | PROCEDURES | ROUTINES } IN SCHEMA schema_name [, ...] } + TO role_specification [, ...] [ WITH GRANT OPTION ] GRANT { USAGE | ALL [ PRIVILEGES ] } ON LANGUAGE lang_name [, ...] - TO role_specification [, ...] [ WITH GRANT OPTION ] + TO role_specification [, ...] [ WITH GRANT OPTION ] GRANT { { SELECT | UPDATE } [, ...] | ALL [ PRIVILEGES ] } - ON LARGE OBJECT loid [, ...] - TO role_specification [, ...] [ WITH GRANT OPTION ] + ON LARGE OBJECT loid [, ...] + TO role_specification [, ...] [ WITH GRANT OPTION ] GRANT { { CREATE | USAGE } [, ...] | ALL [ PRIVILEGES ] } ON SCHEMA schema_name [, ...] - TO role_specification [, ...] [ WITH GRANT OPTION ] + TO role_specification [, ...] [ WITH GRANT OPTION ] GRANT { CREATE | ALL [ PRIVILEGES ] } ON TABLESPACE tablespace_name [, ...] - TO role_specification [, ...] [ WITH GRANT OPTION ] + TO role_specification [, ...] [ WITH GRANT OPTION ] GRANT { USAGE | ALL [ PRIVILEGES ] } ON TYPE type_name [, ...] - TO role_specification [, ...] [ WITH GRANT OPTION ] + TO role_specification [, ...] [ WITH GRANT OPTION ] -where role_specification can be: +where role_specification can be: - [ GROUP ] role_name + [ GROUP ] role_name | PUBLIC | CURRENT_USER | SESSION_USER -GRANT role_name [, ...] TO role_name [, ...] [ WITH ADMIN OPTION ] +GRANT role_name [, ...] TO role_name [, ...] [ WITH ADMIN OPTION ] @@ -96,7 +96,7 @@ GRANT role_name [, ...] TO The GRANT command has two basic variants: one that grants privileges on a database object (table, column, view, foreign - table, sequence, database, foreign-data wrapper, foreign server, function, + table, sequence, database, foreign-data wrapper, foreign server, function, procedure, procedural language, schema, or tablespace), and one that grants membership in a role. These variants are similar in many ways, but they are different enough to be described separately. @@ -115,8 +115,11 @@ GRANT role_name [, ...] TO There is also an option to grant privileges on all objects of the same type within one or more schemas. This functionality is currently supported - only for tables, sequences, and functions (but note that ALL - TABLES is considered to include views and foreign tables). + only for tables, sequences, functions, and procedures. ALL + TABLES also affects views and foreign tables, just like the + specific-object GRANT command. ALL + FUNCTIONS also affects aggregate functions, but not procedures, + again just like the specific-object GRANT command. @@ -156,19 +159,29 @@ GRANT role_name [, ...] TO PostgreSQL grants default privileges on some types of objects to PUBLIC. No privileges are granted to - PUBLIC by default on tables, - columns, schemas or tablespaces. For other types, the default privileges + PUBLIC by default on + tables, + table columns, + sequences, + foreign data wrappers, + foreign servers, + large objects, + schemas, + or tablespaces. + For other types of objects, the default privileges granted to PUBLIC are as follows: - CONNECT and CREATE TEMP TABLE for - databases; EXECUTE privilege for functions; and - USAGE privilege for languages. + CONNECT and TEMPORARY (create + temporary tables) privileges for databases; + EXECUTE privilege for functions and procedures; and + USAGE privilege for languages and data types + (including domains). The object owner can, of course, REVOKE both default and expressly granted privileges. (For maximum - security, issue the REVOKE in the same transaction that + security, issue the REVOKE in the same transaction that creates the object; then there is no window in which another user can use the object.) Also, these initial default privilege settings can be changed using the - + command. @@ -180,14 +193,14 @@ GRANT role_name [, ...] TO SELECT - Allows from + Allows from any column, or the specific columns listed, of the specified table, view, or sequence. Also allows the use of - TO. + TO. This privilege is also needed to reference existing column values in - or - . + or + . For sequences, this privilege also allows the use of the currval function. For large objects, this privilege allows the object to be read. @@ -199,11 +212,11 @@ GRANT role_name [, ...] TO INSERT - Allows of a new + Allows of a new row into the specified table. If specific columns are listed, - only those columns may be assigned to in the INSERT + only those columns may be assigned to in the INSERT command (other columns will therefore receive default values). - Also allows FROM. + Also allows FROM. @@ -212,10 +225,10 @@ GRANT role_name [, ...] TO UPDATE - Allows of any + Allows of any column, or the specific columns listed, of the specified table. - (In practice, any nontrivial UPDATE command will require - SELECT privilege as well, since it must reference table + (In practice, any nontrivial UPDATE command will require + SELECT privilege as well, since it must reference table columns to determine which rows to update, and/or to compute new values for columns.) SELECT ... FOR UPDATE @@ -234,10 +247,10 @@ GRANT role_name [, ...] TO DELETE - Allows of a row + Allows of a row from the specified table. - (In practice, any nontrivial DELETE command will require - SELECT privilege as well, since it must reference table + (In practice, any nontrivial DELETE command will require + SELECT privilege as well, since it must reference table columns to determine which rows to delete.) @@ -247,7 +260,7 @@ GRANT role_name [, ...] TO TRUNCATE - Allows on + Allows on the specified table. @@ -259,7 +272,7 @@ GRANT role_name [, ...] TO Allows creation of a foreign key constraint referencing the specified table, or specified column(s) of the table. (See the - statement.) + statement.) @@ -269,7 +282,7 @@ GRANT role_name [, ...] TO Allows the creation of a trigger on the specified table. (See the - statement.) + statement.) @@ -282,7 +295,7 @@ GRANT role_name [, ...] TO For schemas, allows new objects to be created within the schema. - To rename an existing object, you must own the object and + To rename an existing object, you must own the object and have this privilege for the containing schema. @@ -300,7 +313,7 @@ GRANT role_name [, ...] TO Allows the user to connect to the specified database. This privilege is checked at connection startup (in addition to checking - any restrictions imposed by pg_hba.conf). + any restrictions imposed by pg_hba.conf). @@ -319,10 +332,12 @@ GRANT role_name [, ...] TO EXECUTE - Allows the use of the specified function and the use of any - operators that are implemented on top of the function. This is - the only type of privilege that is applicable to functions. - (This syntax works for aggregate functions, as well.) + Allows the use of the specified function or procedure and the use of + any operators that are implemented on top of the function. This is the + only type of privilege that is applicable to functions and procedures. + The FUNCTION syntax also works for aggregate + functions. Alternatively, use ROUTINE to refer to a function, + aggregate function, or procedure regardless of what it is. @@ -338,7 +353,7 @@ GRANT role_name [, ...] TO For schemas, allows access to objects contained in the specified schema (assuming that the objects' own privilege requirements are - also met). Essentially this allows the grantee to look up + also met). Essentially this allows the grantee to look up objects within the schema. Without this permission, it is still possible to see the object names, e.g. by querying the system tables. Also, after revoking this permission, existing backends might have @@ -406,37 +421,37 @@ GRANT role_name [, ...] TO on itself, but it may grant or revoke membership in itself from a database session where the session user matches the role. Database superusers can grant or revoke membership in any role - to anyone. Roles having CREATEROLE privilege can grant + to anyone. Roles having CREATEROLE privilege can grant or revoke membership in any role that is not a superuser. Unlike the case with privileges, membership in a role cannot be granted - to PUBLIC. Note also that this form of the command does not - allow the noise word GROUP. + to PUBLIC. Note also that this form of the command does not + allow the noise word GROUP. - + Notes - The command is used + The command is used to revoke access privileges. Since PostgreSQL 8.1, the concepts of users and groups have been unified into a single kind of entity called a role. - It is therefore no longer necessary to use the keyword GROUP - to identify whether a grantee is a user or a group. GROUP + It is therefore no longer necessary to use the keyword GROUP + to identify whether a grantee is a user or a group. GROUP is still allowed in the command, but it is a noise word. - A user may perform SELECT, INSERT, etc. on a + A user may perform SELECT, INSERT, etc. on a column if they hold that privilege for either the specific column or its whole table. Granting the privilege at the table level and then revoking it for one column will not do what one might wish: the @@ -444,12 +459,12 @@ GRANT role_name [, ...] TO - When a non-owner of an object attempts to GRANT privileges + When a non-owner of an object attempts to GRANT privileges on the object, the command will fail outright if the user has no privileges whatsoever on the object. As long as some privilege is available, the command will proceed, but it will grant only those privileges for which the user has grant options. The GRANT ALL - PRIVILEGES forms will issue a warning message if no grant options are + PRIVILEGES forms will issue a warning message if no grant options are held, while the other forms will issue a warning if grant options for any of the privileges specifically named in the command are not held. (In principle these statements apply to the object owner as well, but @@ -460,13 +475,13 @@ GRANT role_name [, ...] TO It should be noted that database superusers can access all objects regardless of object privilege settings. This - is comparable to the rights of root in a Unix system. - As with root, it's unwise to operate as a superuser + is comparable to the rights of root in a Unix system. + As with root, it's unwise to operate as a superuser except when absolutely necessary. - If a superuser chooses to issue a GRANT or REVOKE + If a superuser chooses to issue a GRANT or REVOKE command, the command is performed as though it were issued by the owner of the affected object. In particular, privileges granted via such a command will appear to have been granted by the object owner. @@ -475,37 +490,37 @@ GRANT role_name [, ...] TO - GRANT and REVOKE can also be done by a role + GRANT and REVOKE can also be done by a role that is not the owner of the affected object, but is a member of the role that owns the object, or is a member of a role that holds privileges WITH GRANT OPTION on the object. In this case the privileges will be recorded as having been granted by the role that actually owns the object or holds the privileges WITH GRANT OPTION. For example, if table - t1 is owned by role g1, of which role - u1 is a member, then u1 can grant privileges - on t1 to u2, but those privileges will appear - to have been granted directly by g1. Any other member - of role g1 could revoke them later. + t1 is owned by role g1, of which role + u1 is a member, then u1 can grant privileges + on t1 to u2, but those privileges will appear + to have been granted directly by g1. Any other member + of role g1 could revoke them later. - If the role executing GRANT holds the required privileges + If the role executing GRANT holds the required privileges indirectly via more than one role membership path, it is unspecified which containing role will be recorded as having done the grant. In such - cases it is best practice to use SET ROLE to become the - specific role you want to do the GRANT as. + cases it is best practice to use SET ROLE to become the + specific role you want to do the GRANT as. Granting permission on a table does not automatically extend permissions to any sequences used by the table, including - sequences tied to SERIAL columns. Permissions on + sequences tied to SERIAL columns. Permissions on sequences must be set separately. - Use 's \dp command + Use 's \dp command to obtain information about existing privileges for tables and columns. For example: @@ -541,8 +556,8 @@ rolename=xxxx -- privileges granted to a role /yyyy -- role that granted this privilege - The above example display would be seen by user miriam after - creating table mytable and doing: + The above example display would be seen by user miriam after + creating table mytable and doing: GRANT SELECT ON mytable TO PUBLIC; @@ -552,31 +567,31 @@ GRANT SELECT (col1), UPDATE (col1) ON mytable TO miriam_rw; - For non-table objects there are other \d commands + For non-table objects there are other \d commands that can display their privileges. - If the Access privileges column is empty for a given object, + If the Access privileges column is empty for a given object, it means the object has default privileges (that is, its privileges column is null). Default privileges always include all privileges for the owner, - and can include some privileges for PUBLIC depending on the - object type, as explained above. The first GRANT or - REVOKE on an object + and can include some privileges for PUBLIC depending on the + object type, as explained above. The first GRANT or + REVOKE on an object will instantiate the default privileges (producing, for example, - {miriam=arwdDxt/miriam}) and then modify them per the + {miriam=arwdDxt/miriam}) and then modify them per the specified request. Similarly, entries are shown in Column access - privileges only for columns with nondefault privileges. - (Note: for this purpose, default privileges always means the + privileges only for columns with nondefault privileges. + (Note: for this purpose, default privileges always means the built-in default privileges for the object's type. An object whose - privileges have been affected by an ALTER DEFAULT PRIVILEGES + privileges have been affected by an ALTER DEFAULT PRIVILEGES command will always be shown with an explicit privilege entry that - includes the effects of the ALTER.) + includes the effects of the ALTER.) Notice that the owner's implicit grant options are not marked in the - access privileges display. A * will appear only when + access privileges display. A * will appear only when grant options have been explicitly granted to someone. @@ -607,7 +622,7 @@ GRANT ALL PRIVILEGES ON kinds TO manuel; - Grant membership in role admins to user joe: + Grant membership in role admins to user joe: GRANT admins TO joe; @@ -627,14 +642,14 @@ GRANT admins TO joe; PostgreSQL allows an object owner to revoke their own ordinary privileges: for example, a table owner can make the table - read-only to themselves by revoking their own INSERT, - UPDATE, DELETE, and TRUNCATE + read-only to themselves by revoking their own INSERT, + UPDATE, DELETE, and TRUNCATE privileges. This is not possible according to the SQL standard. The reason is that PostgreSQL treats the owner's privileges as having been granted by the owner to themselves; therefore they can revoke them too. In the SQL standard, the owner's privileges are - granted by an assumed entity _SYSTEM. Not being - _SYSTEM, the owner cannot revoke these rights. + granted by an assumed entity _SYSTEM. Not being + _SYSTEM, the owner cannot revoke these rights. @@ -672,8 +687,8 @@ GRANT admins TO joe; See Also - - + + diff --git a/doc/src/sgml/ref/import_foreign_schema.sgml b/doc/src/sgml/ref/import_foreign_schema.sgml index b73dee9439..f07f757ac6 100644 --- a/doc/src/sgml/ref/import_foreign_schema.sgml +++ b/doc/src/sgml/ref/import_foreign_schema.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/import_foreign_schema.sgml PostgreSQL documentation --> - + IMPORT FOREIGN SCHEMA @@ -21,15 +21,15 @@ PostgreSQL documentation -IMPORT FOREIGN SCHEMA remote_schema - [ { LIMIT TO | EXCEPT } ( table_name [, ...] ) ] - FROM SERVER server_name - INTO local_schema - [ OPTIONS ( option 'value' [, ... ] ) ] +IMPORT FOREIGN SCHEMA remote_schema + [ { LIMIT TO | EXCEPT } ( table_name [, ...] ) ] + FROM SERVER server_name + INTO local_schema + [ OPTIONS ( option 'value' [, ... ] ) ] - + Description @@ -59,7 +59,7 @@ IMPORT FOREIGN SCHEMA remote_schema - remote_schema + remote_schema The remote schema to import from. The specific meaning of a remote schema @@ -69,7 +69,7 @@ IMPORT FOREIGN SCHEMA remote_schema - LIMIT TO ( table_name [, ...] ) + LIMIT TO ( table_name [, ...] ) Import only foreign tables matching one of the given table names. @@ -79,7 +79,7 @@ IMPORT FOREIGN SCHEMA remote_schema - EXCEPT ( table_name [, ...] ) + EXCEPT ( table_name [, ...] ) Exclude specified foreign tables from the import. All tables @@ -90,7 +90,7 @@ IMPORT FOREIGN SCHEMA remote_schema - server_name + server_name The foreign server to import from. @@ -99,7 +99,7 @@ IMPORT FOREIGN SCHEMA remote_schema - local_schema + local_schema The schema in which the imported foreign tables will be created. @@ -108,7 +108,7 @@ IMPORT FOREIGN SCHEMA remote_schema - OPTIONS ( option 'value' [, ...] ) + OPTIONS ( option 'value' [, ...] ) Options to be used during the import. @@ -120,13 +120,13 @@ IMPORT FOREIGN SCHEMA remote_schema - + Examples - Import table definitions from a remote schema foreign_films - on server film_server, creating the foreign tables in - local schema films: + Import table definitions from a remote schema foreign_films + on server film_server, creating the foreign tables in + local schema films: IMPORT FOREIGN SCHEMA foreign_films @@ -135,8 +135,8 @@ IMPORT FOREIGN SCHEMA foreign_films - As above, but import only the two tables actors and - directors (if they exist): + As above, but import only the two tables actors and + directors (if they exist): IMPORT FOREIGN SCHEMA foreign_films LIMIT TO (actors, directors) @@ -144,13 +144,13 @@ IMPORT FOREIGN SCHEMA foreign_films LIMIT TO (actors, directors) - + Compatibility The IMPORT FOREIGN SCHEMA command conforms to the - SQL standard, except that the OPTIONS - clause is a PostgreSQL extension. + SQL standard, except that the OPTIONS + clause is a PostgreSQL extension. @@ -159,8 +159,8 @@ IMPORT FOREIGN SCHEMA foreign_films LIMIT TO (actors, directors) See Also - - + + diff --git a/doc/src/sgml/ref/initdb.sgml b/doc/src/sgml/ref/initdb.sgml index 6efb2e442d..4489b585c7 100644 --- a/doc/src/sgml/ref/initdb.sgml +++ b/doc/src/sgml/ref/initdb.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/initdb.sgml PostgreSQL documentation --> - + initdb @@ -33,7 +33,7 @@ PostgreSQL documentation - + Description @@ -76,11 +76,19 @@ PostgreSQL documentation to do so.) + + For security reasons the new cluster created by initdb + will only be accessible by the cluster owner by default. The + option allows any user in the same + group as the cluster owner to read files in the cluster. This is useful + for performing backups as a non-privileged user. + + initdb initializes the database cluster's default locale and character set encoding. The character set encoding, - collation order (LC_COLLATE) and character set classes - (LC_CTYPE, e.g. upper, lower, digit) can be set separately + collation order (LC_COLLATE) and character set classes + (LC_CTYPE, e.g. upper, lower, digit) can be set separately for a database when it is created. initdb determines those settings for the template1 database, which will serve as the default for all other databases. @@ -89,7 +97,7 @@ PostgreSQL documentation To alter the default collation order or character set classes, use the and options. - Collation orders other than C or POSIX also have + Collation orders other than C or POSIX also have a performance penalty. For these reasons it is important to choose the right locale when running initdb. @@ -98,14 +106,14 @@ PostgreSQL documentation The remaining locale categories can be changed later when the server is started. You can also use to set the default for all locale categories, including collation order and - character set classes. All server locale values (lc_*) can - be displayed via SHOW ALL. - More details can be found in . + character set classes. All server locale values (lc_*) can + be displayed via SHOW ALL. + More details can be found in . To alter the default encoding, use the . - More details can be found in . + More details can be found in . @@ -121,7 +129,7 @@ PostgreSQL documentation This option specifies the default authentication method for local - users used in pg_hba.conf (host + users used in pg_hba.conf (host and local lines). initdb will prepopulate pg_hba.conf entries using the specified authentication method for non-replication as well as @@ -129,8 +137,8 @@ PostgreSQL documentation - Do not use trust unless you trust all local users on your - system. trust is the default for ease of installation. + Do not use trust unless you trust all local users on your + system. trust is the default for ease of installation. @@ -140,7 +148,7 @@ PostgreSQL documentation This option specifies the authentication method for local users via - TCP/IP connections used in pg_hba.conf + TCP/IP connections used in pg_hba.conf (host lines). @@ -151,7 +159,7 @@ PostgreSQL documentation This option specifies the authentication method for local users via - Unix-domain socket connections used in pg_hba.conf + Unix-domain socket connections used in pg_hba.conf (local lines). @@ -183,7 +191,18 @@ PostgreSQL documentation unless you override it there. The default is derived from the locale, or SQL_ASCII if that does not work. The character sets supported by the PostgreSQL server are described - in . + in . + + + + + + + + + + Allows users in the same group as the cluster owner to read all cluster + files created by initdb. @@ -209,7 +228,7 @@ PostgreSQL documentation Sets the default locale for the database cluster. If this option is not specified, the locale is inherited from the environment that initdb runs in. Locale - support is described in . + support is described in . @@ -255,7 +274,7 @@ PostgreSQL documentation - + Makes initdb read the database superuser's password @@ -270,18 +289,18 @@ PostgreSQL documentation Safely write all database files to disk and exit. This does not - perform any of the normal initdb operations. + perform any of the normal initdb operations. - - + + Sets the default text search configuration. - See for further information. + See for further information. @@ -315,7 +334,7 @@ PostgreSQL documentation - + @@ -326,6 +345,26 @@ PostgreSQL documentation + + + + + Set the WAL segment size, in megabytes. This + is the size of each individual file in the WAL log. The default size + is 16 megabytes. The value must be a power of 2 between 1 and 1024 + (megabytes). This option can only be set during initialization, and + cannot be changed later. + + + + It may be useful to adjust this size to control the granularity of + WAL log shipping or archiving. Also, in databases with a high volume + of WAL, the sheer number of WAL files per directory can become a + performance and management problem. Increasing the WAL file size + will reduce the number of WAL files. + + + @@ -380,8 +419,8 @@ PostgreSQL documentation - - + + Print the initdb version and exit. @@ -390,8 +429,8 @@ PostgreSQL documentation - - + + Show help about initdb command line @@ -427,16 +466,16 @@ PostgreSQL documentation Specifies the default time zone of the created database cluster. The value should be a full time zone name - (see ). + (see ). - This utility, like most other PostgreSQL utilities, - also uses the environment variables supported by libpq - (see ). + This utility, like most other PostgreSQL utilities, + also uses the environment variables supported by libpq + (see ). @@ -454,8 +493,8 @@ PostgreSQL documentation See Also - - + + diff --git a/doc/src/sgml/ref/insert.sgml b/doc/src/sgml/ref/insert.sgml index 94dad00870..62e142fd8e 100644 --- a/doc/src/sgml/ref/insert.sgml +++ b/doc/src/sgml/ref/insert.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/insert.sgml PostgreSQL documentation --> - + INSERT @@ -22,25 +22,25 @@ PostgreSQL documentation [ WITH [ RECURSIVE ] with_query [, ...] ] -INSERT INTO table_name [ AS alias ] [ ( column_name [, ...] ) ] +INSERT INTO table_name [ AS alias ] [ ( column_name [, ...] ) ] [ OVERRIDING { SYSTEM | USER} VALUE ] - { DEFAULT VALUES | VALUES ( { expression | DEFAULT } [, ...] ) [, ...] | query } + { DEFAULT VALUES | VALUES ( { expression | DEFAULT } [, ...] ) [, ...] | query } [ ON CONFLICT [ conflict_target ] conflict_action ] [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ] where conflict_target can be one of: - ( { index_column_name | ( index_expression ) } [ COLLATE collation ] [ opclass ] [, ...] ) [ WHERE index_predicate ] - ON CONSTRAINT constraint_name + ( { index_column_name | ( index_expression ) } [ COLLATE collation ] [ opclass ] [, ...] ) [ WHERE index_predicate ] + ON CONSTRAINT constraint_name and conflict_action is one of: DO NOTHING - DO UPDATE SET { column_name = { expression | DEFAULT } | - ( column_name [, ...] ) = [ ROW ] ( { expression | DEFAULT } [, ...] ) | - ( column_name [, ...] ) = ( sub-SELECT ) + DO UPDATE SET { column_name = { expression | DEFAULT } | + ( column_name [, ...] ) = [ ROW ] ( { expression | DEFAULT } [, ...] ) | + ( column_name [, ...] ) = ( sub-SELECT ) } [, ...] - [ WHERE condition ] + [ WHERE condition ] @@ -56,10 +56,10 @@ INSERT INTO table_name [ AS The target column names can be listed in any order. If no list of column names is given at all, the default is all the columns of the - table in their declared order; or the first N column - names, if there are only N columns supplied by the - VALUES clause or query. The values - supplied by the VALUES clause or query are + table in their declared order; or the first N column + names, if there are only N columns supplied by the + VALUES clause or query. The values + supplied by the VALUES clause or query are associated with the explicit or implicit column list left-to-right. @@ -75,31 +75,31 @@ INSERT INTO table_name [ AS - ON CONFLICT can be used to specify an alternative + ON CONFLICT can be used to specify an alternative action to raising a unique constraint or exclusion constraint violation error. (See below.) + endterm="sql-on-conflict-title"/> below.) - The optional RETURNING clause causes INSERT + The optional RETURNING clause causes INSERT to compute and return value(s) based on each row actually inserted - (or updated, if an ON CONFLICT DO UPDATE clause was + (or updated, if an ON CONFLICT DO UPDATE clause was used). This is primarily useful for obtaining values that were supplied by defaults, such as a serial sequence number. However, any expression using the table's columns is allowed. The syntax of - the RETURNING list is identical to that of the output - list of SELECT. Only rows that were successfully + the RETURNING list is identical to that of the output + list of SELECT. Only rows that were successfully inserted or updated will be returned. For example, if a row was locked but not updated because an ON CONFLICT DO UPDATE ... WHERE clause condition was not satisfied, the + class="parameter">condition was not satisfied, the row will not be returned. You must have INSERT privilege on a table in - order to insert into it. If ON CONFLICT DO UPDATE is + order to insert into it. If ON CONFLICT DO UPDATE is present, UPDATE privilege on the table is also required. @@ -107,19 +107,19 @@ INSERT INTO table_name [ AS If a column list is specified, you only need INSERT privilege on the listed columns. - Similarly, when ON CONFLICT DO UPDATE is specified, you - only need UPDATE privilege on the column(s) that are - listed to be updated. However, ON CONFLICT DO UPDATE - also requires SELECT privilege on any column whose - values are read in the ON CONFLICT DO UPDATE - expressions or condition. + Similarly, when ON CONFLICT DO UPDATE is specified, you + only need UPDATE privilege on the column(s) that are + listed to be updated. However, ON CONFLICT DO UPDATE + also requires SELECT privilege on any column whose + values are read in the ON CONFLICT DO UPDATE + expressions or condition. - Use of the RETURNING clause requires SELECT - privilege on all columns mentioned in RETURNING. + Use of the RETURNING clause requires SELECT + privilege on all columns mentioned in RETURNING. If you use the query clause to insert rows from a + class="parameter">query clause to insert rows from a query, you of course need to have SELECT privilege on any table or column used in the query. @@ -128,7 +128,7 @@ INSERT INTO table_name [ AS Parameters - + Inserting @@ -144,8 +144,8 @@ INSERT INTO table_name [ AS The WITH clause allows you to specify one or more - subqueries that can be referenced by name in the INSERT - query. See and + subqueries that can be referenced by name in the INSERT + query. See and for details. @@ -160,7 +160,7 @@ INSERT INTO table_name [ AS - table_name + table_name The name (optionally schema-qualified) of an existing table. @@ -173,10 +173,10 @@ INSERT INTO table_name [ AS A substitute name for table_name. When an alias is + class="parameter">table_name. When an alias is provided, it completely hides the actual name of the table. - This is particularly useful when ON CONFLICT DO UPDATE - targets a table named excluded, since that will otherwise + This is particularly useful when ON CONFLICT DO UPDATE + targets a table named excluded, since that will otherwise be taken as the name of the special table representing rows proposed for insertion. @@ -185,19 +185,19 @@ INSERT INTO table_name [ AS - column_name + column_name The name of a column in the table named by table_name. The column name + class="parameter">table_name. The column name can be qualified with a subfield name or array subscript, if needed. (Inserting into only some fields of a composite column leaves the other fields null.) When referencing a - column with ON CONFLICT DO UPDATE, do not include + column with ON CONFLICT DO UPDATE, do not include the table's name in the specification of a target column. For example, INSERT INTO table_name ... ON CONFLICT DO UPDATE - SET table_name.col = 1 is invalid (this follows the general - behavior for UPDATE). + SET table_name.col = 1 is invalid (this follows the general + behavior for UPDATE). @@ -227,8 +227,9 @@ INSERT INTO table_name [ AS INSERT INTO tbl2 OVERRIDING USER VALUE SELECT * FROM tbl1 will copy from tbl1 all columns that - are not identity columns in tbl2 but will continue - the sequence counters for any identity columns. + are not identity columns in tbl2 while values for + the identity columns in tbl2 will be generated by + the sequences associated with tbl2. @@ -245,7 +246,7 @@ INSERT INTO table_name [ AS - expression + expression An expression or value to assign to the corresponding column. @@ -264,34 +265,34 @@ INSERT INTO table_name [ AS - query + query A query (SELECT statement) that supplies the rows to be inserted. Refer to the - + statement for a description of the syntax. - output_expression + output_expression An expression to be computed and returned by the - INSERT command after each row is inserted or + INSERT command after each row is inserted or updated. The expression can use any column names of the table named by table_name. Write - * to return all columns of the inserted or updated + class="parameter">table_name. Write + * to return all columns of the inserted or updated row(s). - output_name + output_name A name to use for a returned column. @@ -303,10 +304,10 @@ INSERT INTO table_name [ AS <literal>ON CONFLICT</literal> Clause - + UPSERT - + ON CONFLICT @@ -327,14 +328,14 @@ INSERT INTO table_name [ AS conflict_target can perform unique index inference. When performing inference, it consists of one or more index_column_name columns and/or - index_expression - expressions, and an optional index_predicate. All table_name unique indexes that, + class="parameter">index_column_name
columns and/or + index_expression + expressions, and an optional index_predicate. All table_name unique indexes that, without regard to order, contain exactly the conflict_target-specified columns/expressions are inferred (chosen) as arbiter indexes. If - an index_predicate is + an index_predicate is specified, it must, as a further requirement for inference, satisfy arbiter indexes. Note that this means a non-partial unique index (a unique index without a predicate) will be inferred @@ -385,7 +386,7 @@ INSERT INTO table_name [ AS have access to the existing row using the table's name (or an alias), and to rows proposed for insertion using the special excluded table. - SELECT privilege is required on any column in the + SELECT privilege is required on any column in the target table where corresponding excluded columns are read. @@ -399,42 +400,42 @@ INSERT INTO table_name [ AS - index_column_name + index_column_name The name of a table_name column. Used to + class="parameter">table_name
column. Used to infer arbiter indexes. Follows CREATE - INDEX format. SELECT privilege on - index_column_name + INDEX format. SELECT privilege on + index_column_name is required. - index_expression + index_expression Similar to index_column_name, but used to + class="parameter">index_column_name
, but used to infer expressions on table_name columns appearing + class="parameter">table_name
columns appearing within index definitions (not simple columns). Follows - CREATE INDEX format. SELECT + CREATE INDEX format. SELECT privilege on any column appearing within index_expression is required. + class="parameter">index_expression
is required. - collation + collation When specified, mandates that corresponding index_column_name or - index_expression + class="parameter">index_column_name
or + index_expression use a particular collation in order to be matched during inference. Typically this is omitted, as collations usually do not affect whether or not a constraint violation occurs. @@ -444,12 +445,12 @@ INSERT INTO table_name [ AS - opclass + opclass When specified, mandates that corresponding index_column_name or - index_expression + class="parameter">index_column_name
or + index_expression use particular operator class in order to be matched during inference. Typically this is omitted, as the equality semantics are often equivalent @@ -462,21 +463,21 @@ INSERT INTO table_name [ AS - index_predicate + index_predicate Used to allow inference of partial unique indexes. Any indexes that satisfy the predicate (which need not actually be partial indexes) can be inferred. Follows CREATE - INDEX format. SELECT privilege on any + INDEX format. SELECT privilege on any column appearing within index_predicate is required. + class="parameter">index_predicate
is required. - constraint_name + constraint_name Explicitly specifies an arbiter @@ -487,13 +488,13 @@ INSERT INTO table_name [ AS - condition + condition An expression that returns a value of type boolean. Only rows for which this expression returns true will be updated, although all - rows will be locked when the ON CONFLICT DO UPDATE + rows will be locked when the ON CONFLICT DO UPDATE action is taken. Note that condition is evaluated last, after a conflict has been identified as a candidate to update. @@ -509,7 +510,7 @@ INSERT INTO table_name [ AS - INSERT with an ON CONFLICT DO UPDATE + INSERT with an ON CONFLICT DO UPDATE clause is a deterministic statement. This means that the command will not be allowed to affect any single existing row more than once; a cardinality violation error will be raised @@ -517,11 +518,19 @@ INSERT INTO table_name [ AS + + + Note that it is currently not supported for the + ON CONFLICT DO UPDATE clause of an + INSERT applied to a partitioned table to update the + partition key of a conflicting row such that it requires the row be moved + to a new partition. + It is often preferable to use unique index inference rather than naming a constraint directly using ON CONFLICT ON - CONSTRAINT + CONSTRAINT constraint_name. Inference will continue to work correctly when the underlying index is replaced by another more or less equivalent index in an overlapping way, for example when @@ -537,7 +546,7 @@ INSERT INTO table_name [ AS Outputs - On successful completion, an INSERT command returns a command + On successful completion, an INSERT command returns a command tag of the form INSERT oid count @@ -553,10 +562,10 @@ INSERT oid count - If the INSERT command contains a RETURNING - clause, the result will be similar to that of a SELECT + If the INSERT command contains a RETURNING + clause, the result will be similar to that of a SELECT statement containing the columns and values defined in the - RETURNING list, computed over the row(s) inserted or + RETURNING list, computed over the row(s) inserted or updated by the command. @@ -615,7 +624,7 @@ INSERT INTO films DEFAULT VALUES; - To insert multiple rows using the multirow VALUES syntax: + To insert multiple rows using the multirow VALUES syntax: INSERT INTO films (code, title, did, date_prod, kind) VALUES @@ -674,7 +683,7 @@ INSERT INTO employees_log SELECT *, current_timestamp FROM upd; Insert or update new distributors as appropriate. Assumes a unique index has been defined that constrains values appearing in the did column. Note that the special - excluded table is used to reference values originally + excluded table is used to reference values originally proposed for insertion: INSERT INTO distributors (did, dname) @@ -696,7 +705,7 @@ INSERT INTO distributors (did, dname) VALUES (7, 'Redline GmbH') Insert or update new distributors as appropriate. Example assumes a unique index has been defined that constrains values appearing in - the did column. WHERE clause is + the did column. WHERE clause is used to limit the rows actually updated (any existing row not updated will still be locked, though): @@ -733,13 +742,13 @@ INSERT INTO distributors (did, dname) VALUES (10, 'Conrad International') INSERT conforms to the SQL standard, except that - the RETURNING clause is a + the RETURNING clause is a PostgreSQL extension, as is the ability - to use WITH with INSERT, and the ability to - specify an alternative action with ON CONFLICT. + to use WITH with INSERT, and the ability to + specify an alternative action with ON CONFLICT. Also, the case in which a column name list is omitted, but not all the columns are - filled from the VALUES clause or query, + filled from the VALUES clause or query, is disallowed by the standard. @@ -752,8 +761,8 @@ INSERT INTO distributors (did, dname) VALUES (10, 'Conrad International') Possible limitations of the query clause are documented under - . + class="parameter">query
clause are documented under + . diff --git a/doc/src/sgml/ref/listen.sgml b/doc/src/sgml/ref/listen.sgml index 9cd53b02bb..ecc1fb82de 100644 --- a/doc/src/sgml/ref/listen.sgml +++ b/doc/src/sgml/ref/listen.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/listen.sgml PostgreSQL documentation --> - + LISTEN @@ -21,7 +21,7 @@ PostgreSQL documentation -LISTEN channel +LISTEN channel @@ -31,14 +31,14 @@ LISTEN channel LISTEN registers the current session as a listener on the notification channel named channel. + class="parameter">channel. If the current session is already registered as a listener for this notification channel, nothing is done. Whenever the command NOTIFY channel is invoked, either + class="parameter">channel is invoked, either by this session or another one connected to the same database, all the sessions currently listening on that notification channel are notified, and each will in turn notify its connected client @@ -54,18 +54,18 @@ LISTEN channel The method a client application must use to detect notification events depends on which PostgreSQL application programming interface it - uses. With the libpq library, the application issues + uses. With the libpq library, the application issues LISTEN as an ordinary SQL command, and then must periodically call the function PQnotifies to find out whether any notification events have been received. Other interfaces such as - libpgtcl provide higher-level methods for handling notify events; indeed, - with libpgtcl the application programmer should not even issue + libpgtcl provide higher-level methods for handling notify events; indeed, + with libpgtcl the application programmer should not even issue LISTEN or UNLISTEN directly. See the documentation for the interface you are using for more details. - + contains a more extensive discussion of the use of LISTEN and NOTIFY. @@ -77,7 +77,7 @@ LISTEN channel - channel + channel Name of a notification channel (any identifier). @@ -129,8 +129,8 @@ Asynchronous notification "virtual" received from server process with PID 8448. See Also - - + + diff --git a/doc/src/sgml/ref/load.sgml b/doc/src/sgml/ref/load.sgml index 6e9182fa3b..506699ef6f 100644 --- a/doc/src/sgml/ref/load.sgml +++ b/doc/src/sgml/ref/load.sgml @@ -2,7 +2,7 @@ doc/src/sgml/ref/load.sgml --> - + LOAD @@ -20,7 +20,7 @@ doc/src/sgml/ref/load.sgml -LOAD 'filename' +LOAD 'filename' @@ -28,34 +28,34 @@ LOAD 'filename' Description - This command loads a shared library file into the PostgreSQL + This command loads a shared library file into the PostgreSQL server's address space. If the file has been loaded already, the command does nothing. Shared library files that contain C functions are automatically loaded whenever one of their functions is called. - Therefore, an explicit LOAD is usually only needed to - load a library that modifies the server's behavior through hooks + Therefore, an explicit LOAD is usually only needed to + load a library that modifies the server's behavior through hooks rather than providing a set of functions. The library file name is typically given as just a bare file name, which is sought in the server's library search path (set - by ). Alternatively it can be + by ). Alternatively it can be given as a full path name. In either case the platform's standard shared library file name extension may be omitted. - See for more information on this topic. + See for more information on this topic. - $libdir/plugins + $libdir/plugins - Non-superusers can only apply LOAD to library files - located in $libdir/plugins/ — the specified - filename must begin + Non-superusers can only apply LOAD to library files + located in $libdir/plugins/ — the specified + filename must begin with exactly that string. (It is the database administrator's - responsibility to ensure that only safe libraries + responsibility to ensure that only safe libraries are installed there.) @@ -74,7 +74,7 @@ LOAD 'filename' See Also - + diff --git a/doc/src/sgml/ref/lock.sgml b/doc/src/sgml/ref/lock.sgml index b946eab303..a225cea63b 100644 --- a/doc/src/sgml/ref/lock.sgml +++ b/doc/src/sgml/ref/lock.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/lock.sgml PostgreSQL documentation --> - + LOCK @@ -21,9 +21,9 @@ PostgreSQL documentation -LOCK [ TABLE ] [ ONLY ] name [ * ] [, ...] [ IN lockmode MODE ] [ NOWAIT ] +LOCK [ TABLE ] [ ONLY ] name [ * ] [, ...] [ IN lockmode MODE ] [ NOWAIT ] -where lockmode is one of: +where lockmode is one of: ACCESS SHARE | ROW SHARE | ROW EXCLUSIVE | SHARE UPDATE EXCLUSIVE | SHARE | SHARE ROW EXCLUSIVE | EXCLUSIVE | ACCESS EXCLUSIVE @@ -45,21 +45,26 @@ LOCK [ TABLE ] [ ONLY ] name [ * ] end.) + + When a view is locked, all relations appearing in the view definition + query are also locked recursively with the same lock mode. + + When acquiring locks automatically for commands that reference tables, PostgreSQL always uses the least restrictive lock mode possible. LOCK TABLE provides for cases when you might need more restrictive locking. For example, suppose an application runs a transaction at the - READ COMMITTED isolation level and needs to ensure that + READ COMMITTED isolation level and needs to ensure that data in a table remains stable for the duration of the transaction. - To achieve this you could obtain SHARE lock mode over the + To achieve this you could obtain SHARE lock mode over the table before querying. This will prevent concurrent data changes and ensure subsequent reads of the table see a stable view of - committed data, because SHARE lock mode conflicts with - the ROW EXCLUSIVE lock acquired by writers, and your + committed data, because SHARE lock mode conflicts with + the ROW EXCLUSIVE lock acquired by writers, and your LOCK TABLE name IN SHARE MODE + class="parameter">name IN SHARE MODE statement will wait until any concurrent holders of ROW EXCLUSIVE mode locks commit or roll back. Thus, once you obtain the lock, there are no uncommitted writes outstanding; @@ -68,28 +73,28 @@ LOCK [ TABLE ] [ ONLY ] name [ * ] To achieve a similar effect when running a transaction at the - REPEATABLE READ or SERIALIZABLE - isolation level, you have to execute the LOCK TABLE statement - before executing any SELECT or data modification statement. - A REPEATABLE READ or SERIALIZABLE transaction's + REPEATABLE READ or SERIALIZABLE + isolation level, you have to execute the LOCK TABLE statement + before executing any SELECT or data modification statement. + A REPEATABLE READ or SERIALIZABLE transaction's view of data will be frozen when its first - SELECT or data modification statement begins. A LOCK - TABLE later in the transaction will still prevent concurrent writes + SELECT or data modification statement begins. A LOCK + TABLE later in the transaction will still prevent concurrent writes — but it won't ensure that what the transaction reads corresponds to the latest committed values. If a transaction of this sort is going to change the data in the - table, then it should use SHARE ROW EXCLUSIVE lock mode - instead of SHARE mode. This ensures that only one + table, then it should use SHARE ROW EXCLUSIVE lock mode + instead of SHARE mode. This ensures that only one transaction of this type runs at a time. Without this, a deadlock - is possible: two transactions might both acquire SHARE - mode, and then be unable to also acquire ROW EXCLUSIVE + is possible: two transactions might both acquire SHARE + mode, and then be unable to also acquire ROW EXCLUSIVE mode to actually perform their updates. (Note that a transaction's own locks never conflict, so a transaction can acquire ROW - EXCLUSIVE mode when it holds SHARE mode — but not - if anyone else holds SHARE mode.) To avoid deadlocks, + EXCLUSIVE mode when it holds SHARE mode — but not + if anyone else holds SHARE mode.) To avoid deadlocks, make sure all transactions acquire locks on the same objects in the same order, and if multiple lock modes are involved for a single object, then transactions should always acquire the most @@ -98,7 +103,7 @@ LOCK [ TABLE ] [ ONLY ] name [ * ] More information about the lock modes and locking strategies can be - found in . + found in . @@ -107,20 +112,20 @@ LOCK [ TABLE ] [ ONLY ] name [ * ] - name + name The name (optionally schema-qualified) of an existing table to - lock. If ONLY is specified before the table name, only that - table is locked. If ONLY is not specified, the table and all - its descendant tables (if any) are locked. Optionally, * + lock. If ONLY is specified before the table name, only that + table is locked. If ONLY is not specified, the table and all + its descendant tables (if any) are locked. Optionally, * can be specified after the table name to explicitly indicate that descendant tables are included. - The command LOCK TABLE a, b; is equivalent to - LOCK TABLE a; LOCK TABLE b;. The tables are locked + The command LOCK TABLE a, b; is equivalent to + LOCK TABLE a; LOCK TABLE b;. The tables are locked one-by-one in the order specified in the LOCK TABLE command. @@ -132,7 +137,7 @@ LOCK [ TABLE ] [ ONLY ] name [ * ] The lock mode specifies which locks this lock conflicts with. - Lock modes are described in . + Lock modes are described in . @@ -160,38 +165,45 @@ LOCK [ TABLE ] [ ONLY ] name [ * ] Notes - LOCK TABLE ... IN ACCESS SHARE MODE requires SELECT + LOCK TABLE ... IN ACCESS SHARE MODE requires SELECT privileges on the target table. LOCK TABLE ... IN ROW EXCLUSIVE - MODE requires INSERT, UPDATE, DELETE, - or TRUNCATE privileges on the target table. All other forms of - LOCK require table-level UPDATE, DELETE, - or TRUNCATE privileges. + MODE requires INSERT, UPDATE, DELETE, + or TRUNCATE privileges on the target table. All other forms of + LOCK require table-level UPDATE, DELETE, + or TRUNCATE privileges. + + + + The user performing the lock on the view must have the corresponding privilege + on the view. In addition the view's owner must have the relevant privileges on + the underlying base relations, but the user performing the lock does + not need any permissions on the underlying base relations. - LOCK TABLE is useless outside a transaction block: the lock + LOCK TABLE is useless outside a transaction block: the lock would remain held only to the completion of the statement. Therefore - PostgreSQL reports an error if LOCK + PostgreSQL reports an error if LOCK is used outside a transaction block. Use - and - - (or ) + and + + (or ) to define a transaction block. - LOCK TABLE only deals with table-level locks, and so - the mode names involving ROW are all misnomers. These + LOCK TABLE only deals with table-level locks, and so + the mode names involving ROW are all misnomers. These mode names should generally be read as indicating the intention of the user to acquire row-level locks within the locked table. Also, - ROW EXCLUSIVE mode is a shareable table lock. Keep in + ROW EXCLUSIVE mode is a shareable table lock. Keep in mind that all the lock modes have identical semantics so far as - LOCK TABLE is concerned, differing only in the rules + LOCK TABLE is concerned, differing only in the rules about which modes conflict with which. For information on how to - acquire an actual row-level lock, see + acquire an actual row-level lock, see and the in the SELECT + endterm="sql-for-update-share-title"/> in the SELECT reference documentation. @@ -200,7 +212,7 @@ LOCK [ TABLE ] [ ONLY ] name [ * ] Examples - Obtain a SHARE lock on a primary key table when going to perform + Obtain a SHARE lock on a primary key table when going to perform inserts into a foreign key table: @@ -216,7 +228,7 @@ COMMIT WORK; - Take a SHARE ROW EXCLUSIVE lock on a primary key table when going to perform + Take a SHARE ROW EXCLUSIVE lock on a primary key table when going to perform a delete operation: @@ -236,12 +248,12 @@ COMMIT WORK; There is no LOCK TABLE in the SQL standard, which instead uses SET TRANSACTION to specify concurrency levels on transactions. PostgreSQL supports that too; - see for details. + see for details. - Except for ACCESS SHARE, ACCESS EXCLUSIVE, - and SHARE UPDATE EXCLUSIVE lock modes, the + Except for ACCESS SHARE, ACCESS EXCLUSIVE, + and SHARE UPDATE EXCLUSIVE lock modes, the PostgreSQL lock modes and the LOCK TABLE syntax are compatible with those present in Oracle. diff --git a/doc/src/sgml/ref/move.sgml b/doc/src/sgml/ref/move.sgml index ed64f23068..4c7d1dca39 100644 --- a/doc/src/sgml/ref/move.sgml +++ b/doc/src/sgml/ref/move.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/move.sgml PostgreSQL documentation --> - + MOVE @@ -27,23 +27,23 @@ PostgreSQL documentation -MOVE [ direction [ FROM | IN ] ] cursor_name +MOVE [ direction [ FROM | IN ] ] cursor_name -where direction can be empty or one of: +where direction can be empty or one of: NEXT PRIOR FIRST LAST - ABSOLUTE count - RELATIVE count - count + ABSOLUTE count + RELATIVE count + count ALL FORWARD - FORWARD count + FORWARD count FORWARD ALL BACKWARD - BACKWARD count + BACKWARD count BACKWARD ALL @@ -60,7 +60,7 @@ MOVE [ direction [ FROM | IN ] ] The parameters for the MOVE command are identical to those of the FETCH command; refer to - + for details on syntax and usage. @@ -69,7 +69,7 @@ MOVE [ direction [ FROM | IN ] ] Outputs - On successful completion, a MOVE command returns a command + On successful completion, a MOVE command returns a command tag of the form MOVE count @@ -116,9 +116,9 @@ COMMIT WORK; See Also - - - + + + diff --git a/doc/src/sgml/ref/notify.sgml b/doc/src/sgml/ref/notify.sgml index 3389aa055c..e0e125a2a2 100644 --- a/doc/src/sgml/ref/notify.sgml +++ b/doc/src/sgml/ref/notify.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/notify.sgml PostgreSQL documentation --> - + NOTIFY @@ -21,7 +21,7 @@ PostgreSQL documentation -NOTIFY channel [ , payload ] +NOTIFY channel [ , payload ] @@ -30,9 +30,9 @@ NOTIFY channel [ , The NOTIFY command sends a notification event together - with an optional payload string to each client application that + with an optional payload string to each client application that has previously executed - LISTEN channel + LISTEN channel for the specified channel name in the current database. Notifications are visible to all users. @@ -49,7 +49,7 @@ NOTIFY channel [ , The information passed to the client for a notification event includes the notification channel - name, the notifying session's server process PID, and the + name, the notifying session's server process PID, and the payload string, which is an empty string if it has not been specified. @@ -115,9 +115,9 @@ NOTIFY channel [ , PID (supplied in the + session's server process PID (supplied in the notification event message) is the same as one's own session's - PID (available from libpq). When they + PID (available from libpq). When they are the same, the notification event is one's own work bouncing back, and can be ignored. @@ -128,7 +128,7 @@ NOTIFY channel [ , - channel + channel Name of the notification channel to be signaled (any identifier). @@ -136,10 +136,10 @@ NOTIFY channel [ , - payload + payload - The payload string to be communicated along with the + The payload string to be communicated along with the notification. This must be specified as a simple string literal. In the default configuration it must be shorter than 8000 bytes. (If binary data or large amounts of information need to be communicated, @@ -168,7 +168,7 @@ NOTIFY channel [ , The function pg_notification_queue_usage returns the fraction of the queue that is currently occupied by pending notifications. - See for more information. + See for more information. A transaction that has executed NOTIFY cannot be @@ -226,8 +226,8 @@ Asynchronous notification "foo" with payload "payload" received from server proc See Also - - + + diff --git a/doc/src/sgml/ref/pg_basebackup.sgml b/doc/src/sgml/ref/pg_basebackup.sgml index 2454d35af3..c9f6ce4bb3 100644 --- a/doc/src/sgml/ref/pg_basebackup.sgml +++ b/doc/src/sgml/ref/pg_basebackup.sgml @@ -22,7 +22,7 @@ PostgreSQL documentation pg_basebackup - option + option @@ -34,9 +34,9 @@ PostgreSQL documentation pg_basebackup is used to take base backups of a running PostgreSQL database cluster. These are taken without affecting other clients to the database, and can be used - both for point-in-time recovery (see ) + both for point-in-time recovery (see ) and as the starting point for a log shipping or streaming replication standby - servers (see ). + servers (see ). @@ -45,17 +45,17 @@ PostgreSQL documentation out of backup mode automatically. Backups are always taken of the entire database cluster; it is not possible to back up individual databases or database objects. For individual database backups, a tool such as - must be used. + must be used. The backup is made over a regular PostgreSQL connection, and uses the replication protocol. The connection must be made with a superuser or a user having REPLICATION - permissions (see ), + permissions (see ), and pg_hba.conf must explicitly permit the replication connection. The server must also be configured - with set high enough to leave at least + with set high enough to leave at least one session available for the backup and one for WAL streaming (if used). @@ -69,9 +69,9 @@ PostgreSQL documentation pg_basebackup can make a base backup from not only the master but also the standby. To take a backup from the standby, set up the standby so that it can accept replication connections (that is, set - max_wal_senders and , + max_wal_senders and , and configure host-based authentication). - You will also need to enable on the master. + You will also need to enable on the master. @@ -85,7 +85,7 @@ PostgreSQL documentation - If you are using -X none, there is no guarantee that all + If you are using -X none, there is no guarantee that all WAL files required for the backup are archived at the end of backup. @@ -97,9 +97,9 @@ PostgreSQL documentation All WAL records required for the backup must contain sufficient full-page writes, - which requires you to enable full_page_writes on the master and - not to use a tool like pg_compresslog as - archive_command to remove full-page writes from WAL files. + which requires you to enable full_page_writes on the master and + not to use a tool like pg_compresslog as + archive_command to remove full-page writes from WAL files. @@ -193,8 +193,8 @@ PostgreSQL documentation The maximum transfer rate of data transferred from the server. Values are - in kilobytes per second. Use a suffix of M to indicate megabytes - per second. A suffix of k is also accepted, and has no effect. + in kilobytes per second. Use a suffix of M to indicate megabytes + per second. A suffix of k is also accepted, and has no effect. Valid values are between 32 kilobytes per second and 1024 megabytes per second. @@ -226,48 +226,6 @@ PostgreSQL documentation - - - - - - This option can only be used together with -X - stream. It causes the WAL streaming to use the specified - replication slot. If the base backup is intended to be used as a - streaming replication standby using replication slots, it should then - use the same replication slot name - in recovery.conf. That way, it is ensured that - the server does not remove any necessary WAL data in the time between - the end of the base backup and the start of streaming replication. - - - If this option is not specified and the server supports temporary - replication slots (version 10 and later), then a temporary replication - slot is automatically used for WAL streaming. - - - - - - - - - This option prevents the creation of a temporary replication slot - during the backup even if it's supported by the server. - - - Temporary replication slots are created by default if no slot name - is given with the option when using log streaming. - - - The main purpose of this option is to allow taking a base backup when - the server is out of free replication slots. Using replication slots - is almost always preferred, because it prevents needed WAL from being - removed by the server during the backup. - - - - @@ -341,7 +299,7 @@ PostgreSQL documentation The write-ahead log files are collected at the end of the backup. Therefore, it is necessary for the - parameter to be set high + parameter to be set high enough that the log is not removed before the end of the backup. If the log has been rotated when it's time to transfer it, the backup will fail and be unusable. @@ -362,7 +320,7 @@ PostgreSQL documentation open a second connection to the server and start streaming the write-ahead log in parallel while running the backup. Therefore, it will use up two connections configured by the - parameter. As long as the + parameter. As long as the client can keep up with write-ahead log received, using this mode requires no extra write-ahead logs to be saved on the master. @@ -419,7 +377,19 @@ PostgreSQL documentation - Sets checkpoint mode to fast (immediate) or spread (default) (see ). + Sets checkpoint mode to fast (immediate) or spread (default) (see ). + + + + + + + + + + This option causes creation of a replication slot named by the + --slot option before starting the backup. + An error is raised if the slot already exists. @@ -453,6 +423,21 @@ PostgreSQL documentation + + + + + + By default, pg_basebackup will wait for all files + to be written safely to disk. This option causes + pg_basebackup to return without waiting, which is + faster, but means that a subsequent operating system crash can leave + the base backup corrupt. Generally, this option is useful for testing + but should not be used when creating a production installation. + + + + @@ -476,16 +461,27 @@ PostgreSQL documentation - - + + - By default, pg_basebackup will wait for all files - to be written safely to disk. This option causes - pg_basebackup to return without waiting, which is - faster, but means that a subsequent operating system crash can leave - the base backup corrupt. Generally, this option is useful for testing - but should not be used when creating a production installation. + This option can only be used together with -X + stream. It causes the WAL streaming to use the specified + replication slot. If the base backup is intended to be used as a + streaming replication standby using replication slots, it should then + use the same replication slot name + in recovery.conf. That way, it is ensured that + the server does not remove any necessary WAL data in the time between + the end of the base backup and the start of streaming replication. + + + The specified replication slot has to exist unless the + option is also used. + + + If this option is not specified and the server supports temporary + replication slots (version 10 and later), then a temporary replication + slot is automatically used for WAL streaming. @@ -502,6 +498,41 @@ PostgreSQL documentation + + + + + This option prevents the creation of a temporary replication slot + during the backup even if it's supported by the server. + + + Temporary replication slots are created by default if no slot name + is given with the option when using log streaming. + + + The main purpose of this option is to allow taking a base backup when + the server is out of free replication slots. Using replication slots + is almost always preferred, because it prevents needed WAL from being + removed by the server during the backup. + + + + + + + + + Disables verification of checksums, if they are enabled on the server + the base backup is taken from. + + + By default, checksums are verified and checksum failures will result + in a non-zero exit status. However, the base backup will not be + removed in such a case, as if the option + had been used. + + + @@ -515,10 +546,10 @@ PostgreSQL documentation Specifies parameters used to connect to the server, as a connection - string. See for more information. + string. See for more information. - The option is called --dbname for consistency with other + The option is called --dbname for consistency with other client applications, but because pg_basebackup doesn't connect to any particular database in the cluster, database name in the connection string will be ignored. @@ -578,8 +609,8 @@ PostgreSQL documentation - - + + Never issue a password prompt. If the server requires @@ -607,7 +638,7 @@ PostgreSQL documentation for a password if the server demands password authentication. However, pg_basebackup will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. @@ -620,8 +651,8 @@ PostgreSQL documentation - - + + Print the pg_basebackup version and exit. @@ -630,8 +661,8 @@ PostgreSQL documentation - - + + Show help about pg_basebackup command line @@ -649,9 +680,9 @@ PostgreSQL documentation Environment - This utility, like most other PostgreSQL utilities, - uses the environment variables supported by libpq - (see ). + This utility, like most other PostgreSQL utilities, + uses the environment variables supported by libpq + (see ). @@ -675,7 +706,7 @@ PostgreSQL documentation symbolic links used for tablespaces are preserved. Symbolic links pointing to certain directories known to PostgreSQL are copied as empty directories. Other symbolic links and special device files are skipped. - See for the precise details. + See for the precise details. @@ -693,8 +724,8 @@ PostgreSQL documentation tar file before starting the PostgreSQL server. If there are additional tablespaces, the tar files for them need to be unpacked in the correct locations. In this case the symbolic links for those tablespaces will be created by the server - according to the contents of the tablespace_map file that is - included in the base.tar file. + according to the contents of the tablespace_map file that is + included in the base.tar file. @@ -705,6 +736,12 @@ PostgreSQL documentation or later. + + pg_basebackup will preserve group permissions in + both the plain and tar formats if group + permissions are enabled on the source cluster. + + @@ -752,7 +789,7 @@ PostgreSQL documentation See Also - + diff --git a/doc/src/sgml/ref/pg_config-ref.sgml b/doc/src/sgml/ref/pg_config-ref.sgml index 0210f6389d..b819f3f345 100644 --- a/doc/src/sgml/ref/pg_config-ref.sgml +++ b/doc/src/sgml/ref/pg_config-ref.sgml @@ -13,7 +13,7 @@ pg_config - retrieve information about the installed version of PostgreSQL + retrieve information about the installed version of PostgreSQL @@ -24,12 +24,12 @@ - Description</> + <title>Description - The pg_config utility prints configuration parameters - of the currently installed version of PostgreSQL. It is + The pg_config utility prints configuration parameters + of the currently installed version of PostgreSQL. It is intended, for example, to be used by software packages that want to interface - to PostgreSQL to facilitate finding the required header files + to PostgreSQL to facilitate finding the required header files and libraries. @@ -39,22 +39,22 @@ Options - To use pg_config, supply one or more of the following + To use pg_config, supply one or more of the following options: - + Print the location of user executables. Use this, for example, to find - the psql program. This is normally also the location - where the pg_config program resides. + the psql program. This is normally also the location + where the pg_config program resides. - + Print the location of documentation files. @@ -63,7 +63,7 @@ - + Print the location of HTML documentation files. @@ -72,7 +72,7 @@ - + Print the location of C header files of the client interfaces. @@ -81,7 +81,7 @@ - + Print the location of other C header files. @@ -90,7 +90,7 @@ - + Print the location of C header files for server programming. @@ -99,7 +99,7 @@ - + Print the location of object code libraries. @@ -108,7 +108,7 @@ - + Print the location of dynamically loadable modules, or where @@ -120,18 +120,18 @@ - + Print the location of locale support files. (This will be an empty string if locale support was not configured when - PostgreSQL was built.) + PostgreSQL was built.) - + Print the location of manual pages. @@ -140,7 +140,7 @@ - + Print the location of architecture-independent support files. @@ -149,7 +149,7 @@ - + Print the location of system-wide configuration files. @@ -158,7 +158,7 @@ - + Print the location of extension makefiles. @@ -167,11 +167,11 @@ - + - Print the options that were given to the configure - script when PostgreSQL was configured for building. + Print the options that were given to the configure + script when PostgreSQL was configured for building. This can be used to reproduce the identical configuration, or to find out with what options a binary package was built. (Note however that binary packages often contain vendor-specific custom @@ -181,102 +181,102 @@ - + Print the value of the CC variable that was used for building - PostgreSQL. This shows the C compiler used. + PostgreSQL. This shows the C compiler used. - + Print the value of the CPPFLAGS variable that was used for building - PostgreSQL. This shows C compiler switches needed - at preprocessing time (typically, -I switches). + PostgreSQL. This shows C compiler switches needed + at preprocessing time (typically, -I switches). - + Print the value of the CFLAGS variable that was used for building - PostgreSQL. This shows C compiler switches. + PostgreSQL. This shows C compiler switches. - + Print the value of the CFLAGS_SL variable that was used for building - PostgreSQL. This shows extra C compiler switches + PostgreSQL. This shows extra C compiler switches used for building shared libraries. - + Print the value of the LDFLAGS variable that was used for building - PostgreSQL. This shows linker switches. + PostgreSQL. This shows linker switches. - + Print the value of the LDFLAGS_EX variable that was used for building - PostgreSQL. This shows linker switches + PostgreSQL. This shows linker switches used for building executables only. - + Print the value of the LDFLAGS_SL variable that was used for building - PostgreSQL. This shows linker switches + PostgreSQL. This shows linker switches used for building shared libraries only. - + Print the value of the LIBS variable that was used for building - PostgreSQL. This normally contains -l - switches for external libraries linked into PostgreSQL. + PostgreSQL. This normally contains -l + switches for external libraries linked into PostgreSQL. - + - Print the version of PostgreSQL. + Print the version of PostgreSQL. - - + + Show help about pg_config command line @@ -303,9 +303,9 @@ , , , , , , - and were added in PostgreSQL 8.1. - The option was added in PostgreSQL 8.4. - The option was added in PostgreSQL 9.0. + and were added in PostgreSQL 8.1. + The option was added in PostgreSQL 8.4. + The option was added in PostgreSQL 9.0. diff --git a/doc/src/sgml/ref/pg_controldata.sgml b/doc/src/sgml/ref/pg_controldata.sgml index 4a360d61fd..32081e9b91 100644 --- a/doc/src/sgml/ref/pg_controldata.sgml +++ b/doc/src/sgml/ref/pg_controldata.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/pg_controldata.sgml PostgreSQL documentation --> - + pg_controldata @@ -23,15 +23,21 @@ PostgreSQL documentation pg_controldata option - datadir + + + + + + datadir + - + Description pg_controldata prints information initialized during - initdb, such as the catalog version. + initdb, such as the catalog version. It also shows information about write-ahead logging and checkpoint processing. This information is cluster-wide, and not specific to any one database. @@ -41,10 +47,10 @@ PostgreSQL documentation This utility can only be run by the user who initialized the cluster because it requires read access to the data directory. You can specify the data directory on the command line, or use - the environment variable PGDATA. This utility supports the options - and , which print the pg_controldata version and exit. It also - supports options and , which output the supported arguments. diff --git a/doc/src/sgml/ref/pg_ctl-ref.sgml b/doc/src/sgml/ref/pg_ctl-ref.sgml index 12fa011c4e..e31275a04e 100644 --- a/doc/src/sgml/ref/pg_ctl-ref.sgml +++ b/doc/src/sgml/ref/pg_ctl-ref.sgml @@ -97,6 +97,13 @@ PostgreSQL documentation + + pg_ctl + + datadir + + + pg_ctl @@ -140,7 +147,7 @@ PostgreSQL documentation pg_ctl is a utility for initializing a PostgreSQL database cluster, starting, stopping, or restarting the PostgreSQL - database server (), or displaying the + database server (), or displaying the status of a running server. Although the server can be started manually, pg_ctl encapsulates tasks such as redirecting log output and properly detaching from the terminal @@ -153,19 +160,19 @@ PostgreSQL documentation PostgreSQL database cluster, that is, a collection of databases that will be managed by a single server instance. This mode invokes the initdb - command. See for details. + command. See for details. mode launches a new server. The server is started in the background, and its standard input is attached - to /dev/null (or nul on Windows). + to /dev/null (or nul on Windows). On Unix-like systems, by default, the server's standard output and standard error are sent to pg_ctl's standard output (not standard error). The standard output of pg_ctl should then be redirected to a file or piped to another process such as a log rotating program - like rotatelogs; otherwise postgres + like rotatelogs; otherwise postgres will write its output to the controlling terminal (from the background) and will not leave the shell's process group. On Windows, by default the server's standard output and standard error @@ -203,7 +210,7 @@ PostgreSQL documentation mode simply sends the - postgres server process a SIGHUP + postgres server process a SIGHUP signal, causing it to reread its configuration files (postgresql.conf, pg_hba.conf, etc.). This allows changing @@ -226,16 +233,22 @@ PostgreSQL documentation and begin read-write operations. + + mode rotates the server log file. + For details on how to use this mode with external log rotation tools, see + . + + mode sends a signal to a specified process. - This is primarily valuable on Microsoft Windows - which does not have a built-in kill command. Use - --help to see a list of supported signal names. + This is primarily valuable on Microsoft Windows + which does not have a built-in kill command. Use + --help to see a list of supported signal names. - mode registers the PostgreSQL - server as a system service on Microsoft Windows. + mode registers the PostgreSQL + server as a system service on Microsoft Windows. The option allows selection of service start type, either auto (start service automatically on system startup) or demand (start service on demand). @@ -243,7 +256,7 @@ PostgreSQL documentation mode unregisters a system service - on Microsoft Windows. This undoes the effects of the + on Microsoft Windows. This undoes the effects of the command. @@ -286,7 +299,7 @@ PostgreSQL documentation Append the server log output to filename. If the file does not - exist, it is created. The umask is set to 077, + exist, it is created. The umask is set to 077, so access to the log file is disallowed to other users by default. @@ -313,11 +326,11 @@ PostgreSQL documentation Specifies options to be passed directly to the postgres command. - can be specified multiple times, with all the given options being passed through. - The options should usually be surrounded by single or + The options should usually be surrounded by single or double quotes to ensure that they are passed through as a group. @@ -330,11 +343,11 @@ PostgreSQL documentation Specifies options to be passed directly to the initdb command. - can be specified multiple times, with all the given options being passed through. - The options should usually be surrounded by single or + The initdb-options should usually be surrounded by single or double quotes to ensure that they are passed through as a group. @@ -377,15 +390,15 @@ PostgreSQL documentation Specifies the maximum number of seconds to wait when waiting for an operation to complete (see option ). Defaults to - the value of the PGCTLTIMEOUT environment variable or, if + the value of the PGCTLTIMEOUT environment variable or, if not set, to 60 seconds. - - + + Print the pg_ctl version and exit. @@ -405,10 +418,12 @@ PostgreSQL documentation - When waiting for startup, pg_ctl repeatedly - attempts to connect to the server. - When waiting for shutdown, pg_ctl waits for - the server to remove its PID file. + When waiting, pg_ctl repeatedly checks the + server's PID file, sleeping for a short amount + of time between checks. Startup is considered complete when + the PID file indicates that the server is ready to + accept connections. Shutdown is considered complete when the server + removes the PID file. pg_ctl returns an exit code based on the success of the startup or shutdown. @@ -446,8 +461,8 @@ PostgreSQL documentation - - + + Show help about pg_ctl command line @@ -475,7 +490,7 @@ PostgreSQL documentation default is PostgreSQL. Note that this only controls messages sent from pg_ctl itself; once started, the server will use the event source specified - by its parameter. Should the server + by its parameter. Should the server fail very early in startup, before that parameter has been set, it might also log using the default event source name PostgreSQL. @@ -507,7 +522,7 @@ PostgreSQL documentation - Start type of the system service. start-type can + Start type of the system service. start-type can be auto, or demand, or the first letter of one of these two. If this option is omitted, auto is the default. @@ -559,20 +574,20 @@ PostgreSQL documentation Most pg_ctl modes require knowing the data directory - location; therefore, the option is required unless PGDATA is set. - pg_ctl, like most other PostgreSQL + pg_ctl, like most other PostgreSQL utilities, - also uses the environment variables supported by libpq - (see ). + also uses the environment variables supported by libpq + (see ). For additional variables that affect the server, - see . + see . @@ -610,10 +625,10 @@ PostgreSQL documentation - + Examples - + Starting the Server @@ -632,7 +647,7 @@ PostgreSQL documentation - + Stopping the Server To stop the server, use: @@ -646,7 +661,7 @@ PostgreSQL documentation - + Restarting the Server @@ -661,14 +676,14 @@ PostgreSQL documentation - But if - + Showing the Server Status @@ -679,8 +694,7 @@ PostgreSQL documentation pg_ctl: server is running (PID: 13718) /usr/local/pgsql/bin/postgres "-D" "/usr/local/pgsql/data" "-p" "5433" "-B" "128" - - + The second line is the command that would be invoked in restart mode. @@ -691,8 +705,8 @@ pg_ctl: server is running (PID: 13718) See Also - - + + diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index 7ccbee4855..b5fa4fb85c 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/pg_dump.sgml PostgreSQL documentation --> - + pg_dump @@ -46,9 +46,10 @@ PostgreSQL documentation - pg_dump only dumps a single database. To backup - global objects that are common to all databases in a cluster, such as roles - and tablespaces, use . + pg_dump only dumps a single database. + To back up an entire cluster, or to back up global objects that are + common to all databases in a cluster (such as roles and tablespaces), + use . @@ -56,7 +57,7 @@ PostgreSQL documentation dumps are plain-text files containing the SQL commands required to reconstruct the database to the state it was in at the time it was saved. To restore from such a script, feed it to . Script files + linkend="app-psql"/>. Script files can be used to reconstruct the database even on other machines and other architectures; with some modifications, even on other SQL database products. @@ -64,7 +65,7 @@ PostgreSQL documentation The alternative archive file formats must be used with - to rebuild the database. They + to rebuild the database. They allow pg_restore to be selective about what is restored, or even to reorder the items prior to being restored. @@ -81,7 +82,7 @@ PostgreSQL documentation can be used to examine the archive and/or select which parts of the database are to be restored. The most flexible output file formats are the custom format () and the - directory format(). They allow + directory format (). They allow for selection and reordering of all archived items, support parallel restoration, and are compressed by default. The directory format is the only format that supports parallel dumps. @@ -116,8 +117,8 @@ PostgreSQL documentation - - + + Dump only the data, not the schema (data definitions). @@ -126,39 +127,40 @@ PostgreSQL documentation This option is similar to, but for historical reasons not identical - to, specifying . - - + + Include large objects in the dump. This is the default behavior - except when , , or + is specified. The switch is therefore only useful to add large objects to dumps where a specific schema or table has been requested. Note that blobs are considered data and therefore will be included when - --data-only is used, but not when --schema-only is. + is used, but not + when is. - - + + Exclude large objects in the dump. - When both and are given, the behavior is to output large objects, when data is being dumped, see the - documentation. @@ -170,7 +172,7 @@ PostgreSQL documentation Output commands to clean (drop) database objects prior to outputting the commands for creating them. - (Unless is also specified, restore might generate some harmless error messages, if any objects were not present in the destination database.) @@ -184,8 +186,8 @@ PostgreSQL documentation - - + + Begin the output with a command to create the @@ -196,6 +198,17 @@ PostgreSQL documentation recreates the target database before reconnecting to it. + + With , the output also includes the + database's comment if any, and any configuration variable settings + that are specific to this database, that is, + any ALTER DATABASE ... SET ... + and ALTER ROLE ... IN DATABASE ... SET ... + commands that mention this database. + Access privileges for the database itself are also dumped, + unless is specified. + + This option is only meaningful for the plain-text format. For the archive formats, you can specify the option when you @@ -242,8 +255,8 @@ PostgreSQL documentation - p - plain + p + plain Output a plain-text SQL script file (the default). @@ -252,8 +265,8 @@ PostgreSQL documentation - c - custom + c + custom Output a custom-format archive suitable for input into @@ -267,8 +280,8 @@ PostgreSQL documentation - d - directory + d + directory Output a directory-format archive suitable for input into @@ -286,8 +299,8 @@ PostgreSQL documentation - t - tar + t + tar Output a tar-format archive suitable for input @@ -305,8 +318,8 @@ PostgreSQL documentation - - + + Run the dump in parallel by dumping njobs @@ -315,13 +328,13 @@ PostgreSQL documentation directory output format because this is the only output format where multiple processes can write their data at the same time. - pg_dump will open njobs - + 1 connections to the database, so make sure your + pg_dump will open njobs + + 1 connections to the database, so make sure your setting is high enough to accommodate all connections. Requesting exclusive locks on database objects while running a parallel dump could - cause the dump to fail. The reason is that the pg_dump master process + cause the dump to fail. The reason is that the pg_dump master process requests shared locks on the objects that the worker processes are going to dump later in order to make sure that nobody deletes them and makes them go away while the dump is running. @@ -330,10 +343,10 @@ PostgreSQL documentation released. Consequently any other access to the table will not be granted either and will queue after the exclusive lock request. This includes the worker process trying to dump the table. Without any precautions this would be a classic deadlock situation. - To detect this conflict, the pg_dump worker process requests another - shared lock using the NOWAIT option. If the worker process is not granted + To detect this conflict, the pg_dump worker process requests another + shared lock using the NOWAIT option. If the worker process is not granted this shared lock, somebody else must have requested an exclusive lock in the meantime - and there is no way to continue with the dump, so pg_dump has no choice + and there is no way to continue with the dump, so pg_dump has no choice but to abort the dump. @@ -371,20 +384,20 @@ PostgreSQL documentation schema itself, and all its contained objects. When this option is not specified, all non-system schemas in the target database will be dumped. Multiple schemas can be - selected by writing multiple switches. Also, the schema parameter is interpreted as a pattern according to the same rules used by - psql's \d commands (see ), + psql's \d commands (see ), so multiple schemas can also be selected by writing wildcard characters in the pattern. When using wildcards, be careful to quote the pattern if needed to prevent the shell from expanding the wildcards; see - . + . - When is specified, pg_dump makes no attempt to dump any other database objects that the selected schema(s) might depend upon. Therefore, there is no guarantee that the results of a specific-schema dump can be successfully @@ -394,9 +407,9 @@ PostgreSQL documentation - Non-schema objects such as blobs are not dumped when is specified. You can add blobs back to the dump with the - switch. @@ -410,29 +423,29 @@ PostgreSQL documentation Do not dump any schemas matching the schema pattern. The pattern is - interpreted according to the same rules as for . + can be given more than once to exclude schemas matching any of several patterns. - When both and are given, the behavior + is to dump just the schemas that match at least one + switch but no switches. If appears + without , then schemas matching are excluded from what is otherwise a normal dump. - - + + Dump object identifiers (OIDs) as part of the data for every table. Use this option if your application references - the OID + the OID columns in some way (e.g., in a foreign key constraint). Otherwise, this option should not be used. @@ -440,21 +453,21 @@ PostgreSQL documentation - + Do not output commands to set ownership of objects to match the original database. By default, pg_dump issues - ALTER OWNER or + ALTER OWNER or SET SESSION AUTHORIZATION statements to set ownership of created database objects. These statements will fail when the script is run unless it is started by a superuser (or the same user that owns all of the objects in the script). To make a script that can be restored by any user, but will give - that user ownership of all the objects, specify . @@ -484,18 +497,18 @@ PostgreSQL documentation Dump only the object definitions (schema), not data. - This option is the inverse of . It is similar to, but for historical reasons not identical to, specifying - . - (Do not confuse this with the To exclude table data for only a subset of tables in the database, - see . @@ -506,7 +519,7 @@ PostgreSQL documentation Specify the superuser user name to use when disabling triggers. - This is relevant only if is used. (Usually, it's better to leave this out, and instead start the resulting script as superuser.) @@ -520,29 +533,29 @@ PostgreSQL documentation Dump only tables with names matching table. - For this purpose, table includes views, materialized views, + For this purpose, table includes views, materialized views, sequences, and foreign tables. Multiple tables - can be selected by writing multiple switches. Also, the table parameter is interpreted as a pattern according to the same rules used by - psql's \d commands (see ), + psql's \d commands (see ), so multiple tables can also be selected by writing wildcard characters in the pattern. When using wildcards, be careful to quote the pattern if needed to prevent the shell from expanding the wildcards; see - . + . - The and switches have no effect when + is used, because tables selected by will be dumped regardless of those switches, and non-table objects will not be dumped. - When is specified, pg_dump makes no attempt to dump any other database objects that the selected table(s) might depend upon. Therefore, there is no guarantee that the results of a specific-table dump can be successfully @@ -552,14 +565,14 @@ PostgreSQL documentation - The behavior of the switch is not entirely upward compatible with pre-8.2 PostgreSQL - versions. Formerly, writing -t tab would dump all - tables named tab, but now it just dumps whichever one + versions. Formerly, writing -t tab would dump all + tables named tab, but now it just dumps whichever one is visible in your default search path. To get the old behavior - you can write -t '*.tab'. Also, you must write something - like -t sch.tab to select a table in a particular schema, - rather than the old locution of -n sch -t tab. + you can write -t '*.tab'. Also, you must write something + like -t sch.tab to select a table in a particular schema, + rather than the old locution of -n sch -t tab. @@ -572,24 +585,24 @@ PostgreSQL documentation Do not dump any tables matching the table pattern. The pattern is - interpreted according to the same rules as for . + can be given more than once to exclude tables matching any of several patterns. - When both and are given, the behavior + is to dump just the tables that match at least one + switch but no switches. If appears + without , then tables matching are excluded from what is otherwise a normal dump. - - + + Specifies verbose mode. This will cause @@ -601,8 +614,8 @@ PostgreSQL documentation - - + + Print the pg_dump version and exit. @@ -611,9 +624,9 @@ PostgreSQL documentation - - - + + + Prevent dumping of access privileges (grant/revoke commands). @@ -632,7 +645,7 @@ PostgreSQL documentation at a moderate level. For plain text output, setting a nonzero compression level causes the entire output file to be compressed, as though it had been - fed through gzip; but the default is not to compress. + fed through gzip; but the default is not to compress. The tar archive format currently does not support compression at all. @@ -670,7 +683,7 @@ PostgreSQL documentation - + This option disables the use of dollar quoting for function bodies, @@ -680,7 +693,7 @@ PostgreSQL documentation - + This option is relevant only when creating a data-only dump. @@ -692,9 +705,9 @@ PostgreSQL documentation - Presently, the commands emitted for must be done as superuser. So, you should also specify - a superuser name with , or preferably be careful to start the resulting script as a superuser. @@ -707,16 +720,16 @@ PostgreSQL documentation - + This option is relevant only when dumping the contents of a table which has row security. By default, pg_dump will set - to off, to ensure + to off, to ensure that all data is dumped from the table. If the user does not have sufficient privileges to bypass row security, then an error is thrown. This parameter instructs pg_dump to set - to on instead, allowing the user + to on instead, allowing the user to dump the parts of the contents of the table that they have access to. @@ -734,14 +747,14 @@ PostgreSQL documentation Do not dump data for any tables matching the table pattern. The pattern is - interpreted according to the same rules as for . + can be given more than once to exclude tables matching any of several patterns. This option is useful when you need the definition of a particular table even though you do not need the data in it. - To exclude data for all tables in the database, see . @@ -752,7 +765,7 @@ PostgreSQL documentation Use conditional commands (i.e. add an IF EXISTS clause) when cleaning database objects. This option is not valid - unless is also specified. @@ -776,21 +789,58 @@ PostgreSQL documentation + + + + + When dumping data for a table partition, make + the COPY or INSERT statements + target the root of the partitioning hierarchy that contains it, rather + than the partition itself. This causes the appropriate partition to + be re-determined for each row when the data is loaded. This may be + useful when reloading data on a server where rows do not always fall + into the same partitions as they did on the original server. That + could happen, for example, if the partitioning column is of type text + and the two systems have different definitions of the collation used + to sort the partitioning column. + + + + It is best not to use parallelism when restoring from an archive made + with this option, because pg_restore will + not know exactly which partition(s) a given archive data item will + load data into. This could result in inefficiency due to lock + conflicts between parallel jobs, or perhaps even reload failures due + to foreign key constraints being set up before all the relevant data + is loaded. + + + + Do not wait forever to acquire shared table locks at the beginning of the dump. Instead fail if unable to lock a table within the specified - timeout. The timeout may be + timeout. The timeout may be specified in any of the formats accepted by SET - statement_timeout. (Allowed formats vary depending on the server + statement_timeout. (Allowed formats vary depending on the server version you are dumping from, but an integer number of milliseconds is accepted by all versions.) + + + + + Do not dump comments. + + + + @@ -833,10 +883,10 @@ PostgreSQL documentation - + - This option allows running pg_dump -j against a pre-9.2 + This option allows running pg_dump -j against a pre-9.2 server, see the documentation of the parameter for more details. @@ -873,34 +923,31 @@ PostgreSQL documentation - + - Force quoting of all identifiers. This option is recommended when - dumping a database from a server whose PostgreSQL - major version is different from pg_dump's, or when - the output is intended to be loaded into a server of a different - major version. By default, pg_dump quotes only - identifiers that are reserved words in its own major version. - This sometimes results in compatibility issues when dealing with - servers of other versions that may have slightly different sets - of reserved words. Using or + is also specified. - + - When dumping a COPY or INSERT statement for a partitioned table, - target the root of the partitioning hierarchy which contains it rather - than the partition itself. This may be useful when reloading data on - a server where rows do not always fall into the same partitions as - they did on the original server. This could happen, for example, if - the partitioning column is of type text and the two system have - different definitions of the collation used to partition the data. + Force quoting of all identifiers. This option is recommended when + dumping a database from a server whose PostgreSQL + major version is different from pg_dump's, or when + the output is intended to be loaded into a server of a different + major version. By default, pg_dump quotes only + identifiers that are reserved words in its own major version. + This sometimes results in compatibility issues when dealing with + servers of other versions that may have slightly different sets + of reserved words. Using prevents + such issues, at the price of a harder-to-read dump script. @@ -910,7 +957,7 @@ PostgreSQL documentation Only dump the named section. The section name can be - , , or . This option can be specified more than once to select multiple sections. The default is to dump all sections. @@ -933,7 +980,7 @@ PostgreSQL documentation states; but do this by waiting for a point in the transaction stream at which no anomalies can be present, so that there isn't a risk of the dump failing or causing other transactions to roll back with a - serialization_failure. See + serialization_failure. See for more information about transaction isolation and concurrency control. @@ -965,12 +1012,12 @@ PostgreSQL documentation Use the specified synchronized snapshot when making a dump of the database (see - for more + for more details). This option is useful when needing to synchronize the dump with - a logical replication slot (see ) + a logical replication slot (see ) or with a concurrent session. @@ -981,7 +1028,7 @@ PostgreSQL documentation - + Require that each schema @@ -1003,23 +1050,23 @@ PostgreSQL documentation - + - Output SQL-standard SET SESSION AUTHORIZATION commands - instead of ALTER OWNER commands to determine object + Output SQL-standard SET SESSION AUTHORIZATION commands + instead of ALTER OWNER commands to determine object ownership. This makes the dump more standards-compatible, but depending on the history of the objects in the dump, might not restore - properly. Also, a dump using SET SESSION AUTHORIZATION + properly. Also, a dump using SET SESSION AUTHORIZATION will certainly require superuser privileges to restore correctly, - whereas ALTER OWNER requires lesser privileges. + whereas ALTER OWNER requires lesser privileges. - - + + Show help about pg_dump command line @@ -1036,8 +1083,8 @@ PostgreSQL documentation - - + + Specifies the name of the database to connect to. This is @@ -1050,7 +1097,7 @@ PostgreSQL documentation with a valid URI prefix (postgresql:// or postgres://), it is treated as a - conninfo string. See for more information. + conninfo string. See for more information. @@ -1093,8 +1140,8 @@ PostgreSQL documentation - - + + Never issue a password prompt. If the server requires @@ -1122,7 +1169,7 @@ PostgreSQL documentation for a password if the server demands password authentication. However, pg_dump will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. @@ -1133,11 +1180,11 @@ PostgreSQL documentation Specifies a role name to be used to create the dump. - This option causes pg_dump to issue a - SET ROLE rolename + This option causes pg_dump to issue a + SET ROLE rolename command after connecting to the database. It is useful when the - authenticated user (specified by @@ -1192,7 +1239,7 @@ PostgreSQL documentation The database activity of pg_dump is normally collected by the statistics collector. If this is - undesirable, you can set parameter track_counts + undesirable, you can set parameter track_counts to false via PGOPTIONS or the ALTER USER command. @@ -1204,11 +1251,11 @@ PostgreSQL documentation Notes - If your database cluster has any local additions to the template1 database, + If your database cluster has any local additions to the template1 database, be careful to restore the output of pg_dump into a truly empty database; otherwise you are likely to get errors due to duplicate definitions of the added objects. To make an empty database - without any local additions, copy from template0 not template1, + without any local additions, copy from template0 not template1, for example: CREATE DATABASE foo WITH TEMPLATE template0; @@ -1216,7 +1263,7 @@ CREATE DATABASE foo WITH TEMPLATE template0; - When a data-only dump is chosen and the option is used, pg_dump emits commands to disable triggers on user tables before inserting the data, and then commands to re-enable them after the data has been @@ -1229,33 +1276,29 @@ CREATE DATABASE foo WITH TEMPLATE template0; does not contain the statistics used by the optimizer to make query planning decisions. Therefore, it is wise to run ANALYZE after restoring from a dump file - to ensure optimal performance; see - and for more information. - The dump file also does not - contain any ALTER DATABASE ... SET commands; - these settings are dumped by , - along with database users and other installation-wide settings. + to ensure optimal performance; see + and for more information. Because pg_dump is used to transfer data - to newer versions of PostgreSQL, the output of + to newer versions of PostgreSQL, the output of pg_dump can be expected to load into - PostgreSQL server versions newer than - pg_dump's version. pg_dump can also - dump from PostgreSQL servers older than its own version. + PostgreSQL server versions newer than + pg_dump's version. pg_dump can also + dump from PostgreSQL servers older than its own version. (Currently, servers back to version 8.0 are supported.) - However, pg_dump cannot dump from - PostgreSQL servers newer than its own major version; + However, pg_dump cannot dump from + PostgreSQL servers newer than its own major version; it will refuse to even try, rather than risk making an invalid dump. - Also, it is not guaranteed that pg_dump's output can + Also, it is not guaranteed that pg_dump's output can be loaded into a server of an older major version — not even if the dump was taken from a server of that version. Loading a dump file into an older server may require manual editing of the dump file to remove syntax not understood by the older server. Use of the option is recommended in cross-version cases, as it can prevent problems arising from varying - reserved-word lists in different PostgreSQL versions. + reserved-word lists in different PostgreSQL versions. @@ -1276,7 +1319,7 @@ CREATE DATABASE foo WITH TEMPLATE template0; Examples - To dump a database called mydb into a SQL-script file: + To dump a database called mydb into a SQL-script file: $ pg_dump mydb > db.sql @@ -1284,7 +1327,7 @@ CREATE DATABASE foo WITH TEMPLATE template0; To reload such a script into a (freshly created) database named - newdb: + newdb: $ psql -d newdb -f db.sql @@ -1318,7 +1361,7 @@ CREATE DATABASE foo WITH TEMPLATE template0; To reload an archive file into a (freshly created) database named - newdb: + newdb: $ pg_restore -d newdb db.dump @@ -1326,7 +1369,16 @@ CREATE DATABASE foo WITH TEMPLATE template0; - To dump a single table named mytab: + To reload an archive file into the same database it was dumped from, + discarding the current contents of that database: + + +$ pg_restore -d postgres --clean --create db.dump + + + + + To dump a single table named mytab: $ pg_dump -t mytab mydb > db.sql @@ -1334,8 +1386,8 @@ CREATE DATABASE foo WITH TEMPLATE template0; - To dump all tables whose names start with emp in the - detroit schema, except for the table named + To dump all tables whose names start with emp in the + detroit schema, except for the table named employee_log: @@ -1344,9 +1396,9 @@ CREATE DATABASE foo WITH TEMPLATE template0; - To dump all schemas whose names start with east or - west and end in gsm, excluding any schemas whose - names contain the word test: + To dump all schemas whose names start with east or + west and end in gsm, excluding any schemas whose + names contain the word test: $ pg_dump -n 'east*gsm' -n 'west*gsm' -N '*test*' mydb > db.sql @@ -1371,10 +1423,10 @@ CREATE DATABASE foo WITH TEMPLATE template0; - To specify an upper-case or mixed-case name in and related switches, you need to double-quote the name; else it will be folded to lower case (see ). But + linkend="app-psql-patterns" endterm="app-psql-patterns-title"/>). But double quotes are special to the shell, so in turn they must be quoted. Thus, to dump a single table with a mixed-case name, you need something like @@ -1389,9 +1441,9 @@ CREATE DATABASE foo WITH TEMPLATE template0; See Also - - - + + + diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml index f8a2521743..c51a130f43 100644 --- a/doc/src/sgml/ref/pg_dumpall.sgml +++ b/doc/src/sgml/ref/pg_dumpall.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/pg_dumpall.sgml PostgreSQL documentation --> - + pg_dumpall @@ -32,17 +32,14 @@ PostgreSQL documentation pg_dumpall is a utility for writing out - (dumping) all PostgreSQL databases + (dumping) all PostgreSQL databases of a cluster into one script file. The script file contains SQL commands that can be used as input to to restore the databases. It does this by - calling for each database in a cluster. + linkend="app-psql"/> to restore the databases. It does this by + calling for each database in the cluster. pg_dumpall also dumps global objects - that are common to all databases. + that are common to all databases, that is, database roles and tablespaces. (pg_dump does not save these objects.) - This currently includes information about database users and - groups, tablespaces, and properties such as access permissions - that apply to databases as a whole. @@ -50,7 +47,7 @@ PostgreSQL documentation databases you will most likely have to connect as a database superuser in order to produce a complete dump. Also you will need superuser privileges to execute the saved script in order to be - allowed to add users and groups, and to create databases. + allowed to add roles and create databases. @@ -63,8 +60,8 @@ PostgreSQL documentation times to the PostgreSQL server (once per database). If you use password authentication it will ask for a password each time. It is convenient to have a - ~/.pgpass file in such cases. See for more information. + ~/.pgpass file in such cases. See for more information. @@ -78,8 +75,8 @@ PostgreSQL documentation - - + + Dump only the data, not the schema (data definitions). @@ -93,12 +90,25 @@ PostgreSQL documentation Include SQL commands to clean (drop) databases before - recreating them. DROP commands for roles and + recreating them. DROP commands for roles and tablespaces are added as well. + + + + + + Create the dump in the specified character set encoding. By default, + the dump is created in the database encoding. (Another way to get the + same result is to set the PGCLIENTENCODING environment + variable to the desired dump encoding.) + + + + @@ -121,13 +131,13 @@ PostgreSQL documentation - - + + Dump object identifiers (OIDs) as part of the data for every table. Use this option if your application references - the OID + the OID columns in some way (e.g., in a foreign key constraint). Otherwise, this option should not be used. @@ -135,21 +145,21 @@ PostgreSQL documentation - + Do not output commands to set ownership of objects to match the original database. By default, pg_dumpall issues - ALTER OWNER or + ALTER OWNER or SET SESSION AUTHORIZATION statements to set ownership of created schema elements. These statements will fail when the script is run unless it is started by a superuser (or the same user that owns all of the objects in the script). To make a script that can be restored by any user, but will give - that user ownership of all the objects, specify . @@ -180,7 +190,7 @@ PostgreSQL documentation Specify the superuser user name to use when disabling triggers. - This is relevant only if is used. (Usually, it's better to leave this out, and instead start the resulting script as superuser.) @@ -198,21 +208,21 @@ PostgreSQL documentation - - + + Specifies verbose mode. This will cause pg_dumpall to output start/stop times to the dump file, and progress messages to standard error. - It will also enable verbose output in pg_dump. + It will also enable verbose output in pg_dump. - - + + Print the pg_dumpall version and exit. @@ -221,9 +231,9 @@ PostgreSQL documentation - - - + + + Prevent dumping of access privileges (grant/revoke commands). @@ -260,7 +270,7 @@ PostgreSQL documentation - + This option disables the use of dollar quoting for function bodies, @@ -270,7 +280,7 @@ PostgreSQL documentation - + This option is relevant only when creating a data-only dump. @@ -282,9 +292,9 @@ PostgreSQL documentation - Presently, the commands emitted for must be done as superuser. So, you should also specify - a superuser name with , or preferably be careful to start the resulting script as a superuser. @@ -295,8 +305,8 @@ PostgreSQL documentation Use conditional commands (i.e. add an IF EXISTS - clause) to clean databases and other objects. This option is not valid - unless is also specified. @@ -316,15 +326,37 @@ PostgreSQL documentation + + + + + When dumping data for a table partition, make + the COPY or INSERT statements + target the root of the partitioning hierarchy that contains it, rather + than the partition itself. This causes the appropriate partition to + be re-determined for each row when the data is loaded. This may be + useful when reloading data on a server where rows do not always fall + into the same partitions as they did on the original server. That + could happen, for example, if the partitioning column is of type text + and the two systems have different definitions of the collation used + to sort the partitioning column. + + + + + + Do not wait forever to acquire shared table locks at the beginning of the dump. Instead, fail if unable to lock a table within the specified - timeout. The timeout may be + timeout. The timeout may be specified in any of the formats accepted by SET - statement_timeout. Allowed values vary depending on the server + statement_timeout. Allowed values vary depending on the server version you are dumping from, but an integer number of milliseconds is accepted by all versions since 7.3. This option is ignored when dumping from a pre-7.3 server. @@ -332,6 +364,15 @@ PostgreSQL documentation + + + + + Do not dump comments. + + + + @@ -413,44 +454,41 @@ PostgreSQL documentation - + - Force quoting of all identifiers. This option is recommended when - dumping a database from a server whose PostgreSQL - major version is different from pg_dumpall's, or when - the output is intended to be loaded into a server of a different - major version. By default, pg_dumpall quotes only - identifiers that are reserved words in its own major version. - This sometimes results in compatibility issues when dealing with - servers of other versions that may have slightly different sets - of reserved words. Using or + is also specified. - + - When dumping a COPY or INSERT statement for a partitioned table, - target the root of the partitioning hierarchy which contains it rather - than the partition itself. This may be useful when reloading data on - a server where rows do not always fall into the same partitions as - they did on the original server. This could happen, for example, if - the partitioning column is of type text and the two system have - different definitions of the collation used to partition the data. + Force quoting of all identifiers. This option is recommended when + dumping a database from a server whose PostgreSQL + major version is different from pg_dumpall's, or when + the output is intended to be loaded into a server of a different + major version. By default, pg_dumpall quotes only + identifiers that are reserved words in its own major version. + This sometimes results in compatibility issues when dealing with + servers of other versions that may have slightly different sets + of reserved words. Using prevents + such issues, at the price of a harder-to-read dump script. - + - Output SQL-standard SET SESSION AUTHORIZATION commands - instead of ALTER OWNER commands to determine object + Output SQL-standard SET SESSION AUTHORIZATION commands + instead of ALTER OWNER commands to determine object ownership. This makes the dump more standards compatible, but depending on the history of the objects in the dump, might not restore properly. @@ -459,8 +497,8 @@ PostgreSQL documentation - - + + Show help about pg_dumpall command line @@ -482,15 +520,16 @@ PostgreSQL documentation Specifies parameters used to connect to the server, as a connection - string. See for more information. + string. See for more information. - The option is called --dbname for consistency with other + The option is called --dbname for consistency with other client applications, but because pg_dumpall - needs to connect to many databases, database name in the connection - string will be ignored. Use -l option to specify - the name of the database used to dump global objects and to discover - what other databases should be dumped. + needs to connect to many databases, the database name in the + connection string will be ignored. Use the -l + option to specify the name of the database used for the initial + connection, which will dump global objects and discover what other + databases should be dumped. @@ -546,8 +585,8 @@ PostgreSQL documentation - - + + Never issue a password prompt. If the server requires @@ -575,14 +614,14 @@ PostgreSQL documentation for a password if the server demands password authentication. However, pg_dumpall will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. Note that the password prompt will occur again for each database to be dumped. Usually, it's better to set up a - ~/.pgpass file than to rely on manual password entry. + ~/.pgpass file than to rely on manual password entry. @@ -592,11 +631,11 @@ PostgreSQL documentation Specifies a role name to be used to create the dump. - This option causes pg_dumpall to issue a - SET ROLE rolename + This option causes pg_dumpall to issue a + SET ROLE rolename command after connecting to the database. It is useful when the - authenticated user (specified by - Once restored, it is wise to run ANALYZE on each + The option can be useful even when your + intention is to restore the dump script into a fresh cluster. Use of + authorizes the script to drop and re-create the + built-in postgres and template1 + databases, ensuring that those databases will retain the same properties + (for instance, locale and encoding) that they had in the source cluster. + Without the option, those databases will retain their existing + database-level properties, as well as any pre-existing contents. + + + + Once restored, it is wise to run ANALYZE on each database so the optimizer has useful statistics. You - can also run vacuumdb -a -z to analyze all + can also run vacuumdb -a -z to analyze all databases. + + The dump script should not be expected to run completely without errors. + In particular, because the script will issue CREATE ROLE + for every role existing in the source cluster, it is certain to get a + role already exists error for the bootstrap superuser, + unless the destination cluster was initialized with a different bootstrap + superuser name. This error is harmless and should be ignored. Use of + the option is likely to produce additional + harmless error messages about non-existent objects, although you can + minimize those by adding . + + pg_dumpall requires all needed tablespace directories to exist before the restore; otherwise, @@ -675,10 +737,13 @@ PostgreSQL documentation $ psql -f db.out postgres - (It is not important to which database you connect here since the + It is not important to which database you connect here since the script file created by pg_dumpall will contain the appropriate commands to create and connect to the saved - databases.) + databases. An exception is that if you specified , + you must connect to the postgres database initially; + the script will attempt to drop other databases immediately, and that + will fail for the database you are connected to. @@ -686,7 +751,7 @@ PostgreSQL documentation See Also - Check for details on possible + Check for details on possible error conditions. diff --git a/doc/src/sgml/ref/pg_isready.sgml b/doc/src/sgml/ref/pg_isready.sgml index 2ee79a0bbe..9567b57ebe 100644 --- a/doc/src/sgml/ref/pg_isready.sgml +++ b/doc/src/sgml/ref/pg_isready.sgml @@ -43,8 +43,8 @@ PostgreSQL documentation - - + + Specifies the name of the database to connect to. @@ -55,14 +55,14 @@ PostgreSQL documentation (postgresql:// or postgres://), it is treated as a conninfo string. See for more information. + linkend="libpq-connstring"/> for more information. - - + + Specifies the host name of the machine on which the @@ -74,8 +74,8 @@ PostgreSQL documentation - - + + Specifies the TCP port or the local Unix-domain @@ -98,8 +98,8 @@ PostgreSQL documentation - - + + The maximum number of seconds to wait when attempting connection before @@ -110,8 +110,8 @@ PostgreSQL documentation - - + + Connect to the database as the user - - + + Print the pg_isready version and exit. @@ -131,8 +131,8 @@ PostgreSQL documentation - - + + Show help about pg_isready command line @@ -159,10 +159,10 @@ PostgreSQL documentation Environment - pg_isready, like most other PostgreSQL + pg_isready, like most other PostgreSQL utilities, - also uses the environment variables supported by libpq - (see ). + also uses the environment variables supported by libpq + (see ). diff --git a/doc/src/sgml/ref/pg_receivewal.sgml b/doc/src/sgml/ref/pg_receivewal.sgml index 7c82e36c7c..a18ddd4bff 100644 --- a/doc/src/sgml/ref/pg_receivewal.sgml +++ b/doc/src/sgml/ref/pg_receivewal.sgml @@ -22,7 +22,7 @@ PostgreSQL documentation pg_receivewal - option + option @@ -36,22 +36,22 @@ PostgreSQL documentation log is streamed using the streaming replication protocol, and is written to a local directory of files. This directory can be used as the archive location for doing a restore using point-in-time recovery (see - ). + ). pg_receivewal streams the write-ahead log in real time as it's being generated on the server, and does not wait - for segments to complete like does. + for segments to complete like does. For this reason, it is not necessary to set - when using + when using pg_receivewal. - Unlike the WAL receiver of a PostgreSQL standby server, pg_receivewal + Unlike the WAL receiver of a PostgreSQL standby server, pg_receivewal by default flushes WAL data only when a WAL file is closed. - The option must be specified to flush WAL data in real time. @@ -60,9 +60,9 @@ PostgreSQL documentation PostgreSQL connection and uses the replication protocol. The connection must be made with a superuser or a user having REPLICATION permissions (see - ), and pg_hba.conf + ), and pg_hba.conf must permit the replication connection. The server must also be - configured with set high enough to + configured with set high enough to leave at least one session available for the stream. @@ -77,7 +77,7 @@ PostgreSQL documentation In the absence of fatal errors, pg_receivewal will run until terminated by the SIGINT signal - (ControlC). + (ControlC). @@ -98,6 +98,22 @@ PostgreSQL documentation + + + + + + Automatically stop replication and exit with normal exit status 0 when + receiving reaches the specified LSN. + + + + If there is a record with LSN exactly equal to lsn, + the record will be processed. + + + + @@ -119,6 +135,23 @@ PostgreSQL documentation + + + + + This option causes pg_receivewal to not force WAL + data to be flushed to disk. This is faster, but means that a + subsequent operating system crash can leave the WAL segments corrupt. + Generally, this option is useful for testing but should not be used + when doing WAL archiving on a production deployment. + + + + This option is incompatible with --synchronous. + + + + @@ -139,8 +172,8 @@ PostgreSQL documentation Require pg_receivewal to use an existing - replication slot (see ). - When this option is used, pg_receivewal will report + replication slot (see ). + When this option is used, pg_receivewal will report a flush position to the server, indicating when each segment has been synchronized to disk so that the server can remove that segment if it is not otherwise needed. @@ -165,7 +198,7 @@ PostgreSQL documentation Flush the WAL data to disk immediately after it has been received. Also send a status packet back to the server immediately after flushing, - regardless of --status-interval. + regardless of --status-interval. @@ -211,10 +244,10 @@ PostgreSQL documentation Specifies parameters used to connect to the server, as a connection - string. See for more information. + string. See for more information. - The option is called --dbname for consistency with other + The option is called --dbname for consistency with other client applications, but because pg_receivewal doesn't connect to any particular database in the cluster, database name in the connection string will be ignored. @@ -260,8 +293,8 @@ PostgreSQL documentation - - + + Never issue a password prompt. If the server requires @@ -289,7 +322,7 @@ PostgreSQL documentation for a password if the server demands password authentication. However, pg_receivewal will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. @@ -329,8 +362,8 @@ PostgreSQL documentation - - + + Print the pg_receivewal version and exit. @@ -339,8 +372,8 @@ PostgreSQL documentation - - + + Show help about pg_receivewal command line @@ -370,9 +403,9 @@ PostgreSQL documentation Environment - This utility, like most other PostgreSQL utilities, - uses the environment variables supported by libpq - (see ). + This utility, like most other PostgreSQL utilities, + uses the environment variables supported by libpq + (see ). @@ -382,16 +415,22 @@ PostgreSQL documentation When using pg_receivewal instead of - as the main WAL backup method, it is + as the main WAL backup method, it is strongly recommended to use replication slots. Otherwise, the server is free to recycle or remove write-ahead log files before they are backed up, because it does not have any information, either - from or the replication slots, about + from or the replication slots, about how far the WAL stream has been archived. Note, however, that a replication slot will fill up the server's disk space if the receiver does not keep up with fetching the WAL data. + + pg_receivewal will preserve group permissions on + the received WAL files if group permissions are enabled on the source + cluster. + + @@ -410,7 +449,7 @@ PostgreSQL documentation See Also - + diff --git a/doc/src/sgml/ref/pg_recvlogical.sgml b/doc/src/sgml/ref/pg_recvlogical.sgml index 9c7bb1907b..141c5cddce 100644 --- a/doc/src/sgml/ref/pg_recvlogical.sgml +++ b/doc/src/sgml/ref/pg_recvlogical.sgml @@ -35,16 +35,16 @@ PostgreSQL documentation It creates a replication-mode connection, so it is subject to the same - constraints as , plus those for logical - replication (see ). + constraints as , plus those for logical + replication (see ). - pg_recvlogical has no equivalent to the logical decoding + pg_recvlogical has no equivalent to the logical decoding SQL interface's peek and get modes. It sends replay confirmations for data lazily as it receives it and on clean exit. To examine pending data on a slot without consuming it, use - pg_logical_slot_peek_changes. + pg_logical_slot_peek_changes. @@ -125,7 +125,7 @@ PostgreSQL documentation - If there's a record with LSN exactly equal to lsn, + If there's a record with LSN exactly equal to lsn, the record will be output. @@ -145,7 +145,7 @@ PostgreSQL documentation Write received and decoded transaction data into this - file. Use - for stdout. + file. Use - for stdout. @@ -182,8 +182,8 @@ PostgreSQL documentation In mode, start replication from the given LSN. For details on the effect of this, see the documentation - in - and . Ignored in other modes. + in + and . Ignored in other modes. @@ -226,7 +226,7 @@ PostgreSQL documentation When creating a slot, use the specified logical decoding output - plugin. See . This option has no + plugin. See . This option has no effect if the slot already exists. @@ -238,7 +238,7 @@ PostgreSQL documentation This option has the same effect as the option of the same name - in . See the description there. + in . See the description there. @@ -257,8 +257,8 @@ PostgreSQL documentation - - + + Enables verbose mode. @@ -279,7 +279,7 @@ PostgreSQL documentation The database to connect to. See the description of the actions for what this means in detail. This can be a libpq connection string; - see for more information. Defaults + see for more information. Defaults to user name. @@ -353,7 +353,7 @@ PostgreSQL documentation for a password if the server demands password authentication. However, pg_recvlogical will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. @@ -366,8 +366,8 @@ PostgreSQL documentation - - + + Print the pg_recvlogical version and exit. @@ -376,8 +376,8 @@ PostgreSQL documentation - - + + Show help about pg_recvlogical command line @@ -393,17 +393,28 @@ PostgreSQL documentation Environment - This utility, like most other PostgreSQL utilities, - uses the environment variables supported by libpq - (see ). + This utility, like most other PostgreSQL utilities, + uses the environment variables supported by libpq + (see ). + + Notes + + + pg_recvlogical will preserve group permissions on + the received WAL files if group permissions are enabled on the source + cluster. + + + + Examples - See for an example. + See for an example. @@ -411,7 +422,7 @@ PostgreSQL documentation See Also - + diff --git a/doc/src/sgml/ref/pg_resetwal.sgml b/doc/src/sgml/ref/pg_resetwal.sgml index defaf170dc..3f885bdd62 100644 --- a/doc/src/sgml/ref/pg_resetwal.sgml +++ b/doc/src/sgml/ref/pg_resetwal.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/pg_resetwal.sgml PostgreSQL documentation --> - + pg_resetwal @@ -22,19 +22,31 @@ PostgreSQL documentation pg_resetwal - - + + + + + + + + option - datadir + + + + + + datadir + - + Description pg_resetwal clears the write-ahead log (WAL) and optionally resets some other control information stored in the - pg_control file. This function is sometimes needed + pg_control file. This function is sometimes needed if these files have become corrupted. It should be used only as a last resort, when the server will not start due to such corruption. @@ -43,7 +55,7 @@ PostgreSQL documentation After running this command, it should be possible to start the server, but bear in mind that the database might contain inconsistent data due to partially-committed transactions. You should immediately dump your data, - run initdb, and reload. After reload, check for + run initdb, and reload. After reload, check for inconsistencies and repair as needed. @@ -52,21 +64,21 @@ PostgreSQL documentation it requires read/write access to the data directory. For safety reasons, you must specify the data directory on the command line. pg_resetwal does not use the environment variable - PGDATA. + PGDATA. If pg_resetwal complains that it cannot determine - valid data for pg_control, you can force it to proceed anyway - by specifying the (force) option. In this case plausible values will be substituted for the missing data. Most of the fields can be expected to match, but manual assistance might be needed for the next OID, next transaction ID and epoch, next multitransaction ID and offset, and - WAL starting address fields. These fields can be set using the options + WAL starting location fields. These fields can be set using the options discussed below. If you are not able to determine correct values for all - these fields, can still be used, but the recovered database must be treated with even more suspicion than - usual: an immediate dump and reload is imperative. Do not + usual: an immediate dump and reload is imperative. Do not execute any data-modifying operations in the database before you dump, as any such action is likely to make the corruption worse. @@ -78,21 +90,23 @@ PostgreSQL documentation + Force pg_resetwal to proceed even if it cannot determine - valid data for pg_control, as explained above. + valid data for pg_control, as explained above. + - The / option instructs pg_resetwal to print the values reconstructed from - pg_control and values about to be changed, and then exit + pg_control and values about to be changed, and then exit without modifying anything. This is mainly a debugging tool, but can be useful as a sanity check before allowing pg_resetwal to proceed for real. @@ -116,14 +130,15 @@ PostgreSQL documentation The following options are only needed when pg_resetwal is unable to determine appropriate values - by reading pg_control. Safe values can be determined as + by reading pg_control. Safe values can be determined as described below. For values that take numeric arguments, hexadecimal values can be specified by using the prefix 0x. - xid,xid + + Manually set the oldest and newest transaction IDs for which the commit @@ -134,7 +149,7 @@ PostgreSQL documentation A safe value for the oldest transaction ID for which the commit time can be retrieved (first part) can be determined by looking for the numerically smallest file name in the directory - pg_commit_ts under the data directory. Conversely, a safe + pg_commit_ts under the data directory. Conversely, a safe value for the newest transaction ID for which the commit time can be retrieved (second part) can be determined by looking for the numerically greatest file name in the same directory. The file names are in @@ -144,7 +159,8 @@ PostgreSQL documentation - xid_epoch + + Manually set the next transaction ID's epoch. @@ -155,8 +171,8 @@ PostgreSQL documentation except in the field that is set by pg_resetwal, so any value will work so far as the database itself is concerned. You might need to adjust this value to ensure that replication - systems such as Slony-I and - Skytools work correctly — + systems such as Slony-I and + Skytools work correctly — if so, an appropriate value should be obtainable from the state of the downstream replicated database. @@ -164,38 +180,48 @@ PostgreSQL documentation - walfile + + - Manually set the WAL starting address. + Manually set the WAL starting location by specifying the name of the + next WAL segment file. - The WAL starting address should be + The name of next WAL segment file should be larger than any WAL segment file name currently existing in - the directory pg_wal under the data directory. + the directory pg_wal under the data directory. These names are also in hexadecimal and have three parts. The first - part is the timeline ID and should usually be kept the same. - For example, if 00000001000000320000004A is the - largest entry in pg_wal, use -l 00000001000000320000004B or higher. + part is the timeline ID and should usually be kept the same. + For example, if 00000001000000320000004A is the + largest entry in pg_wal, use -l 00000001000000320000004B or higher. + + + + Note that when using nondefault WAL segment sizes, the numbers in the WAL + file names are different from the LSNs that are reported by system + functions and system views. This option takes a WAL file name, not an + LSN. pg_resetwal itself looks at the files in - pg_wal and chooses a default setting beyond the last existing file name. Therefore, manual adjustment of - - mxid,mxid + + Manually set the next and oldest multitransaction ID. @@ -204,10 +230,10 @@ PostgreSQL documentation A safe value for the next multitransaction ID (first part) can be determined by looking for the numerically largest file name in the - directory pg_multixact/offsets under the data directory, + directory pg_multixact/offsets under the data directory, adding one, and then multiplying by 65536 (0x10000). Conversely, a safe value for the oldest multitransaction ID (second part of - ) can be determined by looking for the numerically smallest file name in the same directory and multiplying by 65536. The file names are in hexadecimal, so the easiest way to do this is to specify the option value in hexadecimal and append four zeroes. @@ -216,7 +242,8 @@ PostgreSQL documentation - oid + + Manually set the next OID. @@ -231,7 +258,8 @@ PostgreSQL documentation - mxoff + + Manually set the next multitransaction offset. @@ -239,7 +267,7 @@ PostgreSQL documentation A safe value can be determined by looking for the numerically largest - file name in the directory pg_multixact/members under the + file name in the directory pg_multixact/members under the data directory, adding one, and then multiplying by 52352 (0xCC80). The file names are in hexadecimal. There is no simple recipe such as the ones for other options of appending zeroes. @@ -248,7 +276,30 @@ PostgreSQL documentation - xid + + + + Set the new WAL segment size, in megabytes. The value must be set to a + power of 2 between 1 and 1024 (megabytes). See the same option of for more information. + + + + + While pg_resetwal will set the WAL starting address + beyond the latest existing WAL segment file, some segment size changes + can cause previous WAL file names to be reused. It is recommended to + use together with this option to manually set the + WAL starting address if WAL file name overlap will cause problems with + your archiving strategy. + + + + + + + + Manually set the next transaction ID. @@ -256,12 +307,12 @@ PostgreSQL documentation A safe value can be determined by looking for the numerically largest - file name in the directory pg_xact under the data directory, + file name in the directory pg_xact under the data directory, adding one, and then multiplying by 1048576 (0x100000). Note that the file names are in hexadecimal. It is usually easiest to specify the option value in - hexadecimal too. For example, if 0011 is the largest entry - in pg_xact, -x 0x1200000 will work (five + hexadecimal too. For example, if 0011 is the largest entry + in pg_xact, -x 0x1200000 will work (five trailing zeroes provide the proper multiplier). @@ -292,7 +343,7 @@ PostgreSQL documentation See Also - + diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml index 5180103526..725acb192c 100644 --- a/doc/src/sgml/ref/pg_restore.sgml +++ b/doc/src/sgml/ref/pg_restore.sgml @@ -1,6 +1,6 @@ - + pg_restore @@ -36,7 +36,7 @@ pg_restore is a utility for restoring a PostgreSQL database from an archive - created by in one of the non-plain-text + created by in one of the non-plain-text formats. It will issue the commands necessary to reconstruct the database to the state it was in at the time it was saved. The archive files also allow pg_restore to @@ -98,7 +98,7 @@ This option is similar to, but for historical reasons not identical - to, specifying . @@ -109,7 +109,7 @@ Clean (drop) database objects before recreating them. - (Unless is used, this might generate some harmless error messages, if any objects were not present in the destination database.) @@ -126,10 +126,21 @@ recreate the target database before connecting to it. + + With , pg_restore + also restores the database's comment if any, and any configuration + variable settings that are specific to this database, that is, + any ALTER DATABASE ... SET ... + and ALTER ROLE ... IN DATABASE ... SET ... + commands that mention this database. + Access privileges for the database itself are also restored, + unless is specified. + + When this option is used, the database named with - is used only to issue the initial DROP DATABASE and - CREATE DATABASE commands. All data is restored into the + is used only to issue the initial DROP DATABASE and + CREATE DATABASE commands. All data is restored into the database name that appears in the archive. @@ -183,8 +194,8 @@ - c - custom + c + custom The archive is in the custom format of @@ -194,8 +205,8 @@ - d - directory + d + directory The archive is a directory archive. @@ -204,8 +215,8 @@ - t - tar + t + tar The archive is a tar archive. @@ -222,7 +233,7 @@ Restore definition of named index only. Multiple indexes - may be specified with multiple switches. @@ -233,7 +244,7 @@ Run the most time-consuming parts - of pg_restore — those which load data, + of pg_restore — those which load data, create indexes, or create constraints — using multiple concurrent jobs. This option can dramatically reduce the time to restore a large database to a server running on a @@ -275,8 +286,8 @@ List the table of contents of the archive. The output of this operation can be used as input to the option. Note that - if filtering switches such as or are + used with , they will restrict the items listed. @@ -287,13 +298,13 @@ Restore only those archive elements that are listed in list-file, and restore them in the + class="parameter">list-file, and restore them in the order they appear in the file. Note that - if filtering switches such as or are + used with , they will further restrict the items restored. - list-file is normally created by - editing the output of a previous @@ -383,14 +394,14 @@ to the extent that schema entries are present in the archive. - This option is the inverse of . It is similar to, but for historical reasons not identical to, specifying - . - (Do not confuse this with the @@ -401,7 +412,7 @@ Specify the superuser user name to use when disabling triggers. - This is relevant only if is used. @@ -412,16 +423,16 @@ Restore definition and/or data of only the named table. - For this purpose, table includes views, materialized views, + For this purpose, table includes views, materialized views, sequences, and foreign tables. Multiple tables - can be selected by writing multiple switches. This option can be combined with the option to specify table(s) in a particular schema. - When is specified, pg_restore + When is specified, pg_restore makes no attempt to restore any other database objects that the selected table(s) might depend upon. Therefore, there is no guarantee that a specific-table restore into a clean database will @@ -433,14 +444,19 @@ This flag does not behave identically to the flag of pg_dump. There is not currently - any provision for wild-card matching in pg_restore, - nor can you include a schema name within its . + And, while pg_dump's + flag will also dump subsidiary objects (such as indexes) of the + selected table(s), + pg_restore's + flag does not include such subsidiary objects. - In versions prior to PostgreSQL 9.6, this flag + In versions prior to PostgreSQL 9.6, this flag matched only tables, not any other type of relation. @@ -453,7 +469,7 @@ Restore named trigger only. Multiple triggers may be specified with - multiple switches. @@ -469,8 +485,8 @@ - - + + Print the pg_restore version and exit. @@ -495,16 +511,16 @@ Execute the restore as a single transaction (that is, wrap the - emitted commands in BEGIN/COMMIT). This + emitted commands in BEGIN/COMMIT). This ensures that either all the commands complete successfully, or no changes are applied. This option implies - . - + This option is relevant only when performing a data-only restore. @@ -517,25 +533,25 @@ Presently, the commands emitted for - must be done as superuser. So you + should also specify a superuser name with or, preferably, run pg_restore as a - PostgreSQL superuser. + PostgreSQL superuser. - + This option is relevant only when restoring the contents of a table which has row security. By default, pg_restore will set - to off, to ensure + to off, to ensure that all data is restored in to the table. If the user does not have sufficient privileges to bypass row security, then an error is thrown. This parameter instructs pg_restore to set - to on instead, allowing the user to attempt to restore + to on instead, allowing the user to attempt to restore the contents of the table with row security enabled. This might still fail if the user does not have the right to insert the rows from the dump into the table. @@ -553,8 +569,18 @@ Use conditional commands (i.e. add an IF EXISTS - clause) when cleaning database objects. This option is not valid - unless is also specified. + + + + + + + + + Do not output commands to restore comments, even if the archive + contains them. @@ -568,8 +594,8 @@ With this option, data for such a table is skipped. This behavior is useful if the target database already contains the desired table contents. For example, - auxiliary tables for PostgreSQL extensions - such as PostGIS might already be loaded in + auxiliary tables for PostgreSQL extensions + such as PostGIS might already be loaded in the target database; specifying this option prevents duplicate or obsolete data from being loaded into them. @@ -627,7 +653,7 @@ Only restore the named section. The section name can be - , , or . This option can be specified more than once to select multiple sections. The default is to restore all sections. @@ -642,7 +668,7 @@ - + Require that each schema @@ -657,8 +683,8 @@ - Output SQL-standard SET SESSION AUTHORIZATION commands - instead of ALTER OWNER commands to determine object + Output SQL-standard SET SESSION AUTHORIZATION commands + instead of ALTER OWNER commands to determine object ownership. This makes the dump more standards-compatible, but depending on the history of the objects in the dump, might not restore properly. @@ -667,8 +693,8 @@ - - + + Show help about pg_restore command line @@ -723,8 +749,8 @@ - - + + Never issue a password prompt. If the server requires @@ -752,7 +778,7 @@ for a password if the server demands password authentication. However, pg_restore will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. @@ -763,11 +789,11 @@ Specifies a role name to be used to perform the restore. - This option causes pg_restore to issue a - SET ROLE rolename + This option causes pg_restore to issue a + SET ROLE rolename command after connecting to the database. It is useful when the - authenticated user (specified by - This utility, like most other PostgreSQL utilities, - also uses the environment variables supported by libpq - (see ). However, it does not read + This utility, like most other PostgreSQL utilities, + also uses the environment variables supported by libpq + (see ). However, it does not read PGDATABASE when a database name is not supplied. @@ -817,7 +843,7 @@ internally executes SQL statements. If you have problems running pg_restore, make sure you are able to select information from the database using, for - example, . Also, any default connection + example, . Also, any default connection settings and environment variables used by the libpq front-end library will apply. @@ -829,11 +855,11 @@ If your installation has any local additions to the - template1 database, be careful to load the output of + template1 database, be careful to load the output of pg_restore into a truly empty database; otherwise you are likely to get errors due to duplicate definitions of the added objects. To make an empty database without any local - additions, copy from template0 not template1, for example: + additions, copy from template0 not template1, for example: CREATE DATABASE foo WITH TEMPLATE template0; @@ -846,7 +872,7 @@ CREATE DATABASE foo WITH TEMPLATE template0; When restoring data to a pre-existing table and the option - is used, pg_restore emits commands to disable triggers on user tables before inserting the data, then emits commands to re-enable them after the data has been inserted. If the restore is stopped in the @@ -867,15 +893,15 @@ CREATE DATABASE foo WITH TEMPLATE template0; - See also the documentation for details on + See also the documentation for details on limitations of pg_dump. - Once restored, it is wise to run ANALYZE on each + Once restored, it is wise to run ANALYZE on each restored table so the optimizer has useful statistics; see - and - for more information. + and + for more information. @@ -885,7 +911,7 @@ CREATE DATABASE foo WITH TEMPLATE template0; Examples - Assume we have dumped a database called mydb into a + Assume we have dumped a database called mydb into a custom-format dump file: @@ -901,24 +927,24 @@ CREATE DATABASE foo WITH TEMPLATE template0; $ pg_restore -C -d postgres db.dump - The database named in the - To reload the dump into a new database called newdb: + To reload the dump into a new database called newdb: $ createdb -T template0 newdb $ pg_restore -d newdb db.dump - Notice we don't use , and instead connect directly to the database to be restored into. Also note that we clone the new database - from template0 not template1, to ensure it is + from template0 not template1, to ensure it is initially empty. @@ -976,9 +1002,9 @@ CREATE DATABASE foo WITH TEMPLATE template0; See Also - - - + + + diff --git a/doc/src/sgml/ref/pg_rewind.sgml b/doc/src/sgml/ref/pg_rewind.sgml index d5430d4324..e2662bbf81 100644 --- a/doc/src/sgml/ref/pg_rewind.sgml +++ b/doc/src/sgml/ref/pg_rewind.sgml @@ -41,7 +41,7 @@ PostgreSQL documentation Description - pg_rewind is a tool for synchronizing a PostgreSQL cluster + pg_rewind is a tool for synchronizing a PostgreSQL cluster with another copy of the same cluster, after the clusters' timelines have diverged. A typical scenario is to bring an old master server back online after failover as a standby that follows the new master. @@ -51,50 +51,70 @@ PostgreSQL documentation The result is equivalent to replacing the target data directory with the source one. Only changed blocks from relation files are copied; all other files are copied in full, including configuration files. The - advantage of pg_rewind over taking a new base backup, or - tools like rsync, is that pg_rewind does + advantage of pg_rewind over taking a new base backup, or + tools like rsync, is that pg_rewind does not require reading through unchanged blocks in the cluster. This makes it a lot faster when the database is large and only a small fraction of blocks differ between the clusters. - pg_rewind examines the timeline histories of the source + pg_rewind examines the timeline histories of the source and target clusters to determine the point where they diverged, and - expects to find WAL in the target cluster's pg_wal directory + expects to find WAL in the target cluster's pg_wal directory reaching all the way back to the point of divergence. The point of divergence can be found either on the target timeline, the source timeline, or their common ancestor. In the typical failover scenario where the target cluster was shut down soon after the divergence, this is not a problem, but if the target cluster ran for a long time after the divergence, the old WAL files might no longer be present. In that case, they can be manually - copied from the WAL archive to the pg_wal directory, or - fetched on startup by configuring recovery.conf. The use of - pg_rewind is not limited to failover, e.g. a standby + copied from the WAL archive to the pg_wal directory, or + fetched on startup by configuring recovery.conf. The use of + pg_rewind is not limited to failover, e.g. a standby server can be promoted, run some write transactions, and then rewinded to become a standby again. When the target server is started for the first time after running - pg_rewind, it will go into recovery mode and replay all + pg_rewind, it will go into recovery mode and replay all WAL generated in the source server after the point of divergence. If some of the WAL was no longer available in the source server when - pg_rewind was run, and therefore could not be copied by the - pg_rewind session, it must be made available when the + pg_rewind was run, and therefore could not be copied by the + pg_rewind session, it must be made available when the target server is started. This can be done by creating a - recovery.conf file in the target data directory with a - suitable restore_command. + recovery.conf file in the target data directory with a + suitable restore_command. - pg_rewind requires that the target server either has - the option enabled - in postgresql.conf or data checksums enabled when - the cluster was initialized with initdb. Neither of these - are currently on by default. - must also be set to on, but is enabled by default. + pg_rewind requires that the target server either has + the option enabled + in postgresql.conf or data checksums enabled when + the cluster was initialized with initdb. Neither of these + are currently on by default. + must also be set to on, but is enabled by default. + + + + If pg_rewind fails while processing, then + the data folder of the target is likely not in a state that can be + recovered. In such a case, taking a new fresh backup is recommended. + + + + pg_rewind will fail immediately if it finds + files it cannot write directly to. This can happen for example when + the source and the target server use the same file mapping for read-only + SSL keys and certificates. If such files are present on the target server + it is recommended to remove them before running + pg_rewind. After doing the rewind, some of + those files may have been copied from the source, in which case it may + be necessary to remove the data copied and restore back the set of links + used before the rewind. + + @@ -133,7 +153,7 @@ PostgreSQL documentation Specifies a libpq connection string to connect to the source - PostgreSQL server to synchronize the target with. + PostgreSQL server to synchronize the target with. The connection must be a normal (non-replication) connection with superuser access. This option requires the source server to be running and not in recovery mode. @@ -151,6 +171,22 @@ PostgreSQL documentation + + + + + + By default, pg_rewind will wait for all files + to be written safely to disk. This option causes + pg_rewind to return without waiting, which is + faster, but means that a subsequent operating system crash can leave + the synchronized data folder corrupt. Generally, this option is + useful for testing but should not be used when creating a production + installation. + + + + @@ -167,7 +203,7 @@ PostgreSQL documentation Print verbose debugging output that is mostly useful for developers - debugging pg_rewind. + debugging pg_rewind. @@ -192,9 +228,9 @@ PostgreSQL documentation Environment - When option is used, pg_rewind also uses the environment variables - supported by libpq (see ). + supported by libpq (see ). @@ -224,22 +260,34 @@ PostgreSQL documentation Copy all those changed blocks from the source cluster to the target cluster, either using direct file system access - () or SQL (). Copy all other files such as pg_xact and configuration files from the source cluster to the target cluster - (everything except the relation files). + (everything except the relation files). Similarly to base backups, + the contents of the directories pg_dynshmem/, + pg_notify/, pg_replslot/, + pg_serial/, pg_snapshots/, + pg_stat_tmp/, and + pg_subtrans/ are omitted from the data copied + from the source cluster. Any file or directory beginning with + pgsql_tmp is omitted, as well as are + backup_label, + tablespace_map, + pg_internal.init, + postmaster.opts and + postmaster.pid. Apply the WAL from the source cluster, starting from the checkpoint - created at failover. (Strictly speaking, pg_rewind + created at failover. (Strictly speaking, pg_rewind doesn't apply the WAL, it just creates a backup label file that - makes PostgreSQL start by replaying all WAL from + makes PostgreSQL start by replaying all WAL from that checkpoint forward.) diff --git a/doc/src/sgml/ref/pg_verify_checksums.sgml b/doc/src/sgml/ref/pg_verify_checksums.sgml new file mode 100644 index 0000000000..905b8f1222 --- /dev/null +++ b/doc/src/sgml/ref/pg_verify_checksums.sgml @@ -0,0 +1,122 @@ + + + + + pg_verify_checksums + + + + pg_verify_checksums + 1 + Application + + + + pg_verify_checksums + verify data checksums in a PostgreSQL database cluster + + + + + pg_verify_checksums + option + + + + + + datadir + + + + + + Description + + pg_verify_checksums verifies data checksums in a + PostgreSQL cluster. The server must be shut + down cleanly before running pg_verify_checksums. + The exit status is zero if there are no checksum errors, otherwise nonzero. + + + + + Options + + + The following command-line options are available: + + + + + + + + Specifies the directory where the database cluster is stored. + + + + + + + + + + Enable verbose output. Lists all checked files. + + + + + + + + + Only validate checksums in the relation with specified relfilenode. + + + + + + + + + + Print the pg_verify_checksums version and exit. + + + + + + + + + + Show help about pg_verify_checksums command line + arguments, and exit. + + + + + + + + + Environment + + + + PGDATA + + + + Specifies the directory where the database cluster is + stored; can be overridden using the option. + + + + + + diff --git a/doc/src/sgml/ref/pg_waldump.sgml b/doc/src/sgml/ref/pg_waldump.sgml index cff88a4c1e..35f974f8c1 100644 --- a/doc/src/sgml/ref/pg_waldump.sgml +++ b/doc/src/sgml/ref/pg_waldump.sgml @@ -29,7 +29,7 @@ PostgreSQL documentation - + Description pg_waldump displays the write-ahead log (WAL) and is mainly @@ -133,7 +133,7 @@ PostgreSQL documentation Only display records generated by the specified resource manager. - If list is passed as name, print a list of valid resource manager + If list is passed as name, print a list of valid resource manager names, and exit. @@ -156,15 +156,15 @@ PostgreSQL documentation Timeline from which to read log records. The default is to use the - value in startseg, if that is specified; otherwise, the + value in startseg, if that is specified; otherwise, the default is 1. - - + + Print the pg_waldump version and exit. @@ -195,8 +195,8 @@ PostgreSQL documentation - - + + Show help about pg_waldump command line @@ -220,8 +220,8 @@ PostgreSQL documentation - pg_waldump cannot read WAL files with suffix - .partial. If those files need to be read, .partial + pg_waldump cannot read WAL files with suffix + .partial. If those files need to be read, .partial suffix needs to be removed from the file name. @@ -230,7 +230,7 @@ PostgreSQL documentation See Also - + diff --git a/doc/src/sgml/ref/pgarchivecleanup.sgml b/doc/src/sgml/ref/pgarchivecleanup.sgml index abe01bef4f..4117a4392c 100644 --- a/doc/src/sgml/ref/pgarchivecleanup.sgml +++ b/doc/src/sgml/ref/pgarchivecleanup.sgml @@ -29,44 +29,44 @@ Description - pg_archivecleanup is designed to be used as an + pg_archivecleanup is designed to be used as an archive_cleanup_command to clean up WAL file archives when - running as a standby server (see ). - pg_archivecleanup can also be used as a standalone program to + running as a standby server (see ). + pg_archivecleanup can also be used as a standalone program to clean WAL file archives. To configure a standby - server to use pg_archivecleanup, put this into its + server to use pg_archivecleanup, put this into its recovery.conf configuration file: -archive_cleanup_command = 'pg_archivecleanup archivelocation %r' +archive_cleanup_command = 'pg_archivecleanup archivelocation %r' - where archivelocation is the directory from which WAL segment + where archivelocation is the directory from which WAL segment files should be removed. - When used within , all WAL files - logically preceding the value of the %r argument will be removed - from archivelocation. This minimizes the number of files + When used within , all WAL files + logically preceding the value of the %r argument will be removed + from archivelocation. This minimizes the number of files that need to be retained, while preserving crash-restart capability. Use of - this parameter is appropriate if the archivelocation is a + this parameter is appropriate if the archivelocation is a transient staging area for this particular standby server, but - not when the archivelocation is intended as a + not when the archivelocation is intended as a long-term WAL archive area, or when multiple standby servers are recovering from the same archive location. When used as a standalone program all WAL files logically preceding the - oldestkeptwalfile will be removed from archivelocation. - In this mode, if you specify a .partial or .backup + oldestkeptwalfile will be removed from archivelocation. + In this mode, if you specify a .partial or .backup file name, then only the file prefix will be used as the - oldestkeptwalfile. This treatment of .backup + oldestkeptwalfile. This treatment of .backup file name allows you to remove all WAL files archived prior to a specific base backup without error. For example, the following example will remove all files older than - WAL file name 000000010000003700000010: + WAL file name 000000010000003700000010: pg_archivecleanup -d archive 000000010000003700000010.00000020.backup @@ -77,7 +77,7 @@ pg_archivecleanup: removing file "archive/00000001000000370000000E" pg_archivecleanup assumes that - archivelocation is a directory readable and writable by the + archivelocation is a directory readable and writable by the server-owning user. @@ -94,7 +94,7 @@ pg_archivecleanup: removing file "archive/00000001000000370000000E" - Print lots of debug logging output on stderr. + Print lots of debug logging output on stderr. @@ -103,14 +103,14 @@ pg_archivecleanup: removing file "archive/00000001000000370000000E" - Print the names of the files that would have been removed on stdout (performs a dry run). + Print the names of the files that would have been removed on stdout (performs a dry run). - - + + Print the pg_archivecleanup version and exit. @@ -119,7 +119,7 @@ pg_archivecleanup: removing file "archive/00000001000000370000000E" - extension + extension Provide an extension @@ -134,8 +134,8 @@ pg_archivecleanup: removing file "archive/00000001000000370000000E" - - + + Show help about pg_archivecleanup command line @@ -152,8 +152,8 @@ pg_archivecleanup: removing file "archive/00000001000000370000000E" pg_archivecleanup is designed to work with - PostgreSQL 8.0 and later when used as a standalone utility, - or with PostgreSQL 9.0 and later when used as an + PostgreSQL 8.0 and later when used as a standalone utility, + or with PostgreSQL 9.0 and later when used as an archive cleanup command. @@ -172,14 +172,14 @@ pg_archivecleanup: removing file "archive/00000001000000370000000E" archive_cleanup_command = 'pg_archivecleanup -d /mnt/standby/archive %r 2>>cleanup.log' where the archive directory is physically located on the standby server, - so that the archive_command is accessing it across NFS, + so that the archive_command is accessing it across NFS, but the files are local to the standby. This will: - produce debugging output in cleanup.log + produce debugging output in cleanup.log @@ -194,7 +194,7 @@ archive_cleanup_command = 'pg_archivecleanup -d /mnt/standby/archive %r 2>>clean See Also - + diff --git a/doc/src/sgml/ref/pgbench.sgml b/doc/src/sgml/ref/pgbench.sgml index 03e1212d50..b5e3a62a33 100644 --- a/doc/src/sgml/ref/pgbench.sgml +++ b/doc/src/sgml/ref/pgbench.sgml @@ -34,12 +34,12 @@ Description pgbench is a simple program for running benchmark - tests on PostgreSQL. It runs the same sequence of SQL + tests on PostgreSQL. It runs the same sequence of SQL commands over and over, possibly in multiple concurrent database sessions, and then calculates the average transaction rate (transactions per second). By default, pgbench tests a scenario that is - loosely based on TPC-B, involving five SELECT, - UPDATE, and INSERT commands per transaction. + loosely based on TPC-B, involving five SELECT, + UPDATE, and INSERT commands per transaction. However, it is easy to test other cases by writing your own transaction script files. @@ -63,7 +63,7 @@ tps = 85.296346 (excluding connections establishing) settings. The next line reports the number of transactions completed and intended (the latter being just the product of number of clients and number of transactions per client); these will be equal unless the run - failed before completion. (In mode, only the actual number of transactions is printed.) The last two lines report the number of transactions per second, figured with and without counting the time to start database sessions. @@ -71,27 +71,27 @@ tps = 85.296346 (excluding connections establishing) The default TPC-B-like transaction test requires specific tables to be - set up beforehand. pgbench should be invoked with - the (initialize) option to create and populate these tables. (When you are testing a custom script, you don't need this step, but will instead need to do whatever setup your test needs.) Initialization looks like: -pgbench -i other-options dbname +pgbench -i other-options dbname - where dbname is the name of the already-created - database to test in. (You may also need , + , and/or options to specify how to connect to the database server.) - pgbench -i creates four tables pgbench_accounts, - pgbench_branches, pgbench_history, and - pgbench_tellers, + pgbench -i creates four tables pgbench_accounts, + pgbench_branches, pgbench_history, and + pgbench_tellers, destroying any existing tables of these names. Be very careful to use another database if you have tables having these names! @@ -99,7 +99,7 @@ pgbench -i other-options dbn - At the default scale factor of 1, the tables initially + At the default scale factor of 1, the tables initially contain this many rows: table # of rows @@ -110,22 +110,22 @@ pgbench_accounts 100000 pgbench_history 0 You can (and, for most purposes, probably should) increase the number - of rows by using the (scale factor) option. The + (fillfactor) option might also be used at this point. Once you have done the necessary setup, you can run your benchmark - with a command that doesn't include , that is -pgbench options dbname +pgbench options dbname In nearly all cases, you'll need some options to make a useful test. - The most important options are (number of clients), + (number of transactions), (time limit), + and (specify a custom script file). See below for a full list. @@ -134,9 +134,9 @@ pgbench options dbname Options - The following is divided into three subsections: Different options are used - during database initialization and while running benchmarks, some options - are useful in both cases. + The following is divided into three subsections. Different options are + used during database initialization and while running benchmarks, but some + options are useful in both cases. @@ -159,13 +159,86 @@ pgbench options dbname - fillfactor - fillfactor + + - Create the pgbench_accounts, - pgbench_tellers and - pgbench_branches tables with the given fillfactor. + Perform just a selected set of the normal initialization steps. + init_steps specifies the + initialization steps to be performed, using one character per step. + Each step is invoked in the specified order. + The default is dtgvp. + The available steps are: + + + + d (Drop) + + + Drop any existing pgbench tables. + + + + + t (create Tables) + + + Create the tables used by the + standard pgbench scenario, namely + pgbench_accounts, + pgbench_branches, + pgbench_history, and + pgbench_tellers. + + + + + g (Generate data) + + + Generate data and load it into the standard tables, + replacing any data already present. + + + + + v (Vacuum) + + + Invoke VACUUM on the standard tables. + + + + + p (create Primary keys) + + + Create primary key indexes on the standard tables. + + + + + f (create Foreign keys) + + + Create foreign key constraints between the standard tables. + (Note that this step is not performed by default.) + + + + + + + + + + fillfactor + fillfactor + + + Create the pgbench_accounts, + pgbench_tellers and + pgbench_branches tables with the given fillfactor. Default is 100. @@ -176,7 +249,9 @@ pgbench options dbname - Perform no vacuuming after initialization. + Perform no vacuuming during initialization. + (This option suppresses the v initialization step, + even if it was specified in .) @@ -194,13 +269,13 @@ pgbench options dbname - scale_factor - scale_factor + scale_factor + scale_factor Multiply the number of rows generated by the scale factor. - For example, -s 100 will create 10,000,000 rows - in the pgbench_accounts table. Default is 1. + For example, -s 100 will create 10,000,000 rows + in the pgbench_accounts table. Default is 1. When the scale is 20,000 or larger, the columns used to hold account identifiers (aid columns) will switch to using larger integers (bigint), @@ -215,6 +290,8 @@ pgbench options dbname Create foreign key constraints between the standard tables. + (This option adds the f step to the initialization + step sequence, if it is not already present.) @@ -262,17 +339,17 @@ pgbench options dbname - - + scriptname[@weight] + =scriptname[@weight] Add the specified built-in script to the list of executed scripts. - An optional integer weight after @ allows to adjust the + An optional integer weight after @ allows to adjust the probability of drawing the script. If not specified, it is set to 1. - Available built-in scripts are: tpcb-like, - simple-update and select-only. + Available built-in scripts are: tpcb-like, + simple-update and select-only. Unambiguous prefixes of built-in names are accepted. - With special name list, show the list of built-in scripts + With special name list, show the list of built-in scripts and exit immediately. @@ -280,8 +357,8 @@ pgbench options dbname - clients - clients + clients + clients Number of clients simulated, that is, number of concurrent database @@ -313,24 +390,24 @@ pgbench options dbname - varname=value - varname=value + varname=value + varname=value Define a variable for use by a custom script (see below). - Multiple options are allowed. - - + filename[@weight] + filename[@weight] - Add a transaction script read from filename to + Add a transaction script read from filename to the list of executed scripts. - An optional integer weight after @ allows to adjust the + An optional integer weight after @ allows to adjust the probability of drawing the test. See below for details. @@ -338,8 +415,8 @@ pgbench options dbname - threads - threads + threads + threads Number of worker threads within pgbench. @@ -362,41 +439,41 @@ pgbench options dbname - limit - limit + limit + limit - Transaction which last more than limit milliseconds - are counted and reported separately, as late. + Transaction which last more than limit milliseconds + are counted and reported separately, as late. - When throttling is used ( - querymode - querymode + querymode + querymode Protocol to use for submitting queries to the server: - simple: use simple query protocol. + simple: use simple query protocol. - extended: use extended query protocol. + extended: use extended query protocol. - prepared: use extended query protocol with prepared statements. + prepared: use extended query protocol with prepared statements. - The default is simple query protocol. (See + The default is simple query protocol. (See for more information.) @@ -408,11 +485,11 @@ pgbench options dbname Perform no vacuuming before running the test. - This option is necessary + This option is necessary if you are running a custom test scenario that does not include - the standard tables pgbench_accounts, - pgbench_branches, pgbench_history, and - pgbench_tellers. + the standard tables pgbench_accounts, + pgbench_branches, pgbench_history, and + pgbench_tellers. @@ -423,20 +500,20 @@ pgbench options dbname Run built-in simple-update script. - Shorthand for . - sec - sec + sec + sec - Show progress report every sec seconds. The report - includes the time since the beginning of the run, the tps since the + Show progress report every sec seconds. The report + includes the time since the beginning of the run, the TPS since the last report, and the transaction latency average and standard - deviation since the last report. Under throttling (), the latency is computed with respect to the transaction scheduled start time, not the actual transaction beginning time, thus it also includes the average schedule lag time. @@ -457,8 +534,8 @@ pgbench options dbname - rate - rate + rate + rate Execute transactions targeting the specified rate instead of running @@ -487,7 +564,7 @@ pgbench options dbname - If is used together with , a transaction can lag behind so much that it is already over the latency limit when the previous transaction ends, because the latency is calculated from the scheduled start time. Such transactions are @@ -508,15 +585,15 @@ pgbench options dbname - scale_factor - scale_factor + scale_factor + scale_factor - Report the specified scale factor in pgbench's + Report the specified scale factor in pgbench's output. With the built-in tests, this is not necessary; the correct scale factor will be detected by counting the number of - rows in the pgbench_branches table. - However, when testing only custom benchmarks ( option), the scale factor will be reported as 1 unless this option is used. @@ -528,14 +605,14 @@ pgbench options dbname Run built-in select-only script. - Shorthand for . - transactions - transactions + transactions + transactions Number of transactions each client runs. Default is 10. @@ -544,8 +621,8 @@ pgbench options dbname - seconds - seconds + seconds + seconds Run the test for this many seconds, rather than a fixed number of @@ -561,15 +638,15 @@ pgbench options dbname Vacuum all four standard tables before running the test. - With neither - + Length of aggregation interval (in seconds). May be used only @@ -580,11 +657,11 @@ pgbench options dbname - + Set the filename prefix for the log files created by - @@ -593,7 +670,7 @@ pgbench options dbname - When showing progress (option ), use a timestamp (Unix epoch) instead of the number of seconds since the beginning of the run. The unit is in seconds, with millisecond precision after the dot. @@ -603,7 +680,44 @@ pgbench options dbname - + SEED + + + Set random generator seed. Seeds the system random number generator, + which then produces a sequence of initial generator states, one for + each thread. + Values for SEED may be: + time (the default, the seed is based on the current time), + rand (use a strong random source, failing if none + is available), or an unsigned decimal integer value. + The random generator is invoked explicitly from a pgbench script + (random... functions) or implicitly (for instance option + uses it to schedule transactions). + When explicitly set, the value used for seeding is shown on the terminal. + Any value allowed for SEED may also be + provided through the environment variable + PGBENCH_RANDOM_SEED. + To ensure that the provided seed impacts all possible uses, put this option + first or use the environment variable. + + + Setting the seed explicitly allows to reproduce a pgbench + run exactly, as far as random numbers are concerned. + As the random state is managed per thread, this means the exact same + pgbench run for an identical invocation if there is one + client per thread and there are no external or data dependencies. + From a statistical viewpoint reproducing runs exactly is a bad idea because + it can hide the performance variability or improve performance unduly, + e.g. by hitting the same pages as a previous run. + However, it may also be of great help for debugging, for instance + re-running a tricky case which leads to an error. + Use wisely. + + + + + + Sampling rate, used when writing data into the log, to reduce the @@ -613,9 +727,9 @@ pgbench options dbname Remember to take the sampling rate into account when processing the - log file. For example, when computing tps values, you need to multiply + log file. For example, when computing TPS values, you need to multiply the numbers accordingly (e.g. with 0.01 sample rate, you'll only get - 1/100 of the actual tps). + 1/100 of the actual TPS). @@ -635,8 +749,8 @@ pgbench options dbname - hostname - hostname + hostname + hostname The database server's host name @@ -645,8 +759,8 @@ pgbench options dbname - port - port + port + port The database server's port number @@ -655,8 +769,8 @@ pgbench options dbname - login - login + login + login The user name to connect as @@ -665,8 +779,8 @@ pgbench options dbname - - + + Print the pgbench version and exit. @@ -675,8 +789,8 @@ pgbench options dbname - - + + Show help about pgbench command line @@ -690,27 +804,39 @@ pgbench options dbname + + Exit Status + + + A successful run will exit with status 0. Exit status 1 indicates static + problems such as invalid command-line options. Errors during the run such + as database errors or problems in the script will result in exit status 2. + In the latter case, pgbench will print partial + results. + + + Notes - What is the <quote>Transaction</> Actually Performed in <application>pgbench</application>? + What is the <quote>Transaction</quote> Actually Performed in <application>pgbench</application>? - pgbench executes test scripts chosen randomly + pgbench executes test scripts chosen randomly from a specified list. - They include built-in scripts with and + user-provided custom scripts with . Each script may be given a relative weight specified after a - @ so as to change its drawing probability. - The default weight is 1. - Scripts with a weight of 0 are ignored. + @ so as to change its drawing probability. + The default weight is 1. + Scripts with a weight of 0 are ignored. - The default built-in transaction script (also invoked with @@ -726,15 +852,15 @@ pgbench options dbname - If you select the simple-update built-in (also ), steps 4 and 5 aren't included in the transaction. This will avoid update contention on these tables, but it makes the test case even less like TPC-B. - If you select the select-only built-in (also @@ -745,41 +871,43 @@ pgbench options dbname pgbench has support for running custom benchmark scenarios by replacing the default transaction script (described above) with a transaction script read from a file - ( option). In this case a transaction + ( option). In this case a transaction counts as one execution of a script file. A script file contains one or more SQL commands terminated by semicolons. Empty lines and lines beginning with - -- are ignored. Script files can also contain - meta commands, which are interpreted by pgbench + -- are ignored. Script files can also contain + meta commands, which are interpreted by pgbench itself, as described below. - Before PostgreSQL 9.6, SQL commands in script files + Before PostgreSQL 9.6, SQL commands in script files were terminated by newlines, and so they could not be continued across - lines. Now a semicolon is required to separate consecutive + lines. Now a semicolon is required to separate consecutive SQL commands (though a SQL command does not need one if it is followed by a meta command). If you need to create a script file that works with - both old and new versions of pgbench, be sure to write + both old and new versions of pgbench, be sure to write each SQL command on a single line ending with a semicolon. There is a simple variable-substitution facility for script files. - Variables can be set by the command-line option, explained above, or by the meta commands explained below. - In addition to any variables preset by command-line options, there are a few variables that are preset automatically, listed in - . A value specified for these - variables using @@ -795,20 +923,30 @@ pgbench options dbname - scale - current scale factor + client_id + unique number identifying the client session (starts from zero) - client_id - unique number identifying the client session (starts from zero) + default_seed + seed used in hash functions by default + + + + random_seed + random generator seed (unless overwritten with ) + + + + scale + current scale factor - Script file meta commands begin with a backslash (\) and + Script file meta commands begin with a backslash (\) and normally extend to the end of the line, although they can be continued to additional lines by writing backslash-return. Arguments to a meta command are separated by white space. @@ -816,23 +954,63 @@ pgbench options dbname + + \if expression + \elif expression + \else + \endif + + + This group of commands implements nestable conditional blocks, + similarly to psql's . + Conditional expressions are identical to those with \set, + with non-zero values interpreted as true. + + + + - \set varname expression + \set varname expression - Sets variable varname to a value calculated - from expression. - The expression may contain integer constants such as 5432, - double constants such as 3.14159, - references to variables :variablename, - unary operators (+, -) and binary operators - (+, -, *, /, - %) with their usual precedence and associativity, - function calls, and - parentheses. + Sets variable varname to a value calculated + from expression. + The expression may contain the NULL constant, + Boolean constants TRUE and FALSE, + integer constants such as 5432, + double constants such as 3.14159, + references to variables :variablename, + operators + with their usual SQL precedence and associativity, + function calls, + SQL CASE generic conditional + expressions and parentheses. + + + + Functions and most operators return NULL on + NULL input. + + + + For conditional purposes, non zero numerical values are + TRUE, zero numerical values and NULL + are FALSE. + + + + Too large or small integer and double constants, as well as + integer arithmetic operators (+, + -, * and /) + raise errors on overflows. + + + + When no final ELSE clause is provided to a + CASE, the default value is NULL. @@ -841,22 +1019,23 @@ pgbench options dbname \set ntellers 10 * :scale \set aid (1021 * random(1, 100000 * :scale)) % \ (100000 * :scale) + 1 +\set divx CASE WHEN :x <> 0 THEN :y/:x ELSE NULL END - \sleep number [ us | ms | s ] + \sleep number [ us | ms | s ] Causes script execution to sleep for the specified duration in - microseconds (us), milliseconds (ms) or seconds - (s). If the unit is omitted then seconds are the default. - number can be either an integer constant or a - :variablename reference to a variable + microseconds (us), milliseconds (ms) or seconds + (s). If the unit is omitted then seconds are the default. + number can be either an integer constant or a + :variablename reference to a variable having an integer value. @@ -870,22 +1049,22 @@ pgbench options dbname - \setshell varname command [ argument ... ] + \setshell varname command [ argument ... ] - Sets variable varname to the result of the shell command - command with the given argument(s). + Sets variable varname to the result of the shell command + command with the given argument(s). The command must return an integer value through its standard output. - command and each argument can be either - a text constant or a :variablename reference - to a variable. If you want to use an argument starting + command and each argument can be either + a text constant or a :variablename reference + to a variable. If you want to use an argument starting with a colon, write an additional colon at the beginning of - argument. + argument. @@ -898,7 +1077,7 @@ pgbench options dbname - \shell command [ argument ... ] + \shell command [ argument ... ] @@ -917,12 +1096,183 @@ pgbench options dbname + + Built-In Operators + + + The arithmetic, bitwise, comparison and logical operators listed in + are built into pgbench + and may be used in expressions appearing in + \set. + + + + pgbench Operators by increasing precedence + + + + Operator + Description + Example + Result + + + + + OR + logical or + 5 or 0 + TRUE + + + AND + logical and + 3 and 0 + FALSE + + + NOT + logical not + not false + TRUE + + + IS [NOT] (NULL|TRUE|FALSE) + value tests + 1 is null + FALSE + + + ISNULL|NOTNULL + null tests + 1 notnull + TRUE + + + = + is equal + 5 = 4 + FALSE + + + <> + is not equal + 5 <> 4 + TRUE + + + != + is not equal + 5 != 5 + FALSE + + + < + lower than + 5 < 4 + FALSE + + + <= + lower or equal + 5 <= 4 + FALSE + + + > + greater than + 5 > 4 + TRUE + + + >= + greater or equal + 5 >= 4 + TRUE + + + | + integer bitwise OR + 1 | 2 + 3 + + + # + integer bitwise XOR + 1 # 3 + 2 + + + & + integer bitwise AND + 1 & 3 + 1 + + + ~ + integer bitwise NOT + ~ 1 + -2 + + + << + integer bitwise shift left + 1 << 2 + 4 + + + >> + integer bitwise shift right + 8 >> 2 + 2 + + + + + addition + 5 + 4 + 9 + + + - + subtraction + 3 - 2.0 + 1.0 + + + * + multiplication + 5 * 4 + 20 + + + / + division (integer truncates the results) + 5 / 3 + 1 + + + % + modulo + 3 % 2 + 1 + + + - + opposite + - 2.0 + -2.0 + + + +
+
+ Built-In Functions - The functions listed in are built - into pgbench and may be used in expressions appearing in + The functions listed in are built + into pgbench and may be used in expressions appearing in \set. @@ -941,123 +1291,181 @@ pgbench options dbname - abs(a) - same as a - absolute value - abs(-17) - 17 + abs(a) + same as a + absolute value + abs(-17) + 17 - debug(a) - same as a - print a to stderr, - and return a - debug(5432.1) - 5432.1 + debug(a) + same as a + print a to stderr, + and return a + debug(5432.1) + 5432.1 - double(i) - double - cast to double - double(5432) - 5432.0 + double(i) + double + cast to double + double(5432) + 5432.0 - greatest(a [, ... ] ) - double if any a is double, else integer - largest value among arguments - greatest(5, 4, 3, 2) - 5 + exp(x) + double + exponential + exp(1.0) + 2.718281828459045 - int(x) - integer - cast to int - int(5.4 + 3.8) - 9 + greatest(a [, ... ] ) + double if any a is double, else integer + largest value among arguments + greatest(5, 4, 3, 2) + 5 - least(a [, ... ] ) - double if any a is double, else integer - smallest value among arguments - least(5, 4, 3, 2.1) - 2.1 + hash(a [, seed ] ) + integer + alias for hash_murmur2() + hash(10, 5432) + -5817877081768721676 - pi() - double - value of the constant PI - pi() - 3.14159265358979323846 + hash_fnv1a(a [, seed ] ) + integer + FNV-1a hash + hash_fnv1a(10, 5432) + -7793829335365542153 - random(lb, ub) - integer - uniformly-distributed random integer in [lb, ub] - random(1, 10) - an integer between 1 and 10 + hash_murmur2(a [, seed ] ) + integer + MurmurHash2 hash + hash_murmur2(10, 5432) + -5817877081768721676 - random_exponential(lb, ub, parameter) - integer - exponentially-distributed random integer in [lb, ub], - see below - random_exponential(1, 10, 3.0) - an integer between 1 and 10 + int(x) + integer + cast to int + int(5.4 + 3.8) + 9 - random_gaussian(lb, ub, parameter) - integer - Gaussian-distributed random integer in [lb, ub], - see below - random_gaussian(1, 10, 2.5) - an integer between 1 and 10 + least(a [, ... ] ) + double if any a is double, else integer + smallest value among arguments + least(5, 4, 3, 2.1) + 2.1 - sqrt(x) - double - square root - sqrt(2.0) - 1.414213562 + ln(x) + double + natural logarithm + ln(2.718281828459045) + 1.0 + + + mod(i, j) + integer + modulo + mod(54, 32) + 22 + + + pi() + double + value of the constant PI + pi() + 3.14159265358979323846 + + + pow(x, y), power(x, y) + double + exponentiation + pow(2.0, 10), power(2.0, 10) + 1024.0 + + + random(lb, ub) + integer + uniformly-distributed random integer in [lb, ub] + random(1, 10) + an integer between 1 and 10 + + + random_exponential(lb, ub, parameter) + integer + exponentially-distributed random integer in [lb, ub], + see below + random_exponential(1, 10, 3.0) + an integer between 1 and 10 + + + random_gaussian(lb, ub, parameter) + integer + Gaussian-distributed random integer in [lb, ub], + see below + random_gaussian(1, 10, 2.5) + an integer between 1 and 10 + + + random_zipfian(lb, ub, parameter) + integer + Zipfian-distributed random integer in [lb, ub], + see below + random_zipfian(1, 10, 1.5) + an integer between 1 and 10 + + + sqrt(x) + double + square root + sqrt(2.0) + 1.414213562 - The random function generates values using a uniform + The random function generates values using a uniform distribution, that is all the values are drawn within the specified - range with equal probability. The random_exponential and - random_gaussian functions require an additional double - parameter which determines the precise shape of the distribution. + range with equal probability. The random_exponential, + random_gaussian and random_zipfian + functions require an additional double parameter which determines the precise + shape of the distribution. - For an exponential distribution, parameter + For an exponential distribution, parameter controls the distribution by truncating a quickly-decreasing - exponential distribution at parameter, and then + exponential distribution at parameter, and then projecting onto integers between the bounds. To be precise, with f(x) = exp(-parameter * (x - min) / (max - min + 1)) / (1 - exp(-parameter)) - Then value i between min and - max inclusive is drawn with probability: - f(i) - f(i + 1). + Then value i between min and + max inclusive is drawn with probability: + f(i) - f(i + 1). - Intuitively, the larger the parameter, the more - frequently values close to min are accessed, and the - less frequently values close to max are accessed. - The closer to 0 parameter is, the flatter (more + Intuitively, the larger the parameter, the more + frequently values close to min are accessed, and the + less frequently values close to max are accessed. + The closer to 0 parameter is, the flatter (more uniform) the access distribution. A crude approximation of the distribution is that the most frequent 1% - values in the range, close to min, are drawn - parameter% of the time. - The parameter value must be strictly positive. + values in the range, close to min, are drawn + parameter% of the time. + The parameter value must be strictly positive. @@ -1065,37 +1473,83 @@ f(x) = exp(-parameter * (x - min) / (max - min + 1)) / (1 - exp(-parameter)) For a Gaussian distribution, the interval is mapped onto a standard normal distribution (the classical bell-shaped Gaussian curve) truncated - at -parameter on the left and +parameter + at -parameter on the left and +parameter on the right. Values in the middle of the interval are more likely to be drawn. - To be precise, if PHI(x) is the cumulative distribution - function of the standard normal distribution, with mean mu - defined as (max + min) / 2.0, with + To be precise, if PHI(x) is the cumulative distribution + function of the standard normal distribution, with mean mu + defined as (max + min) / 2.0, with f(x) = PHI(2.0 * parameter * (x - mu) / (max - min + 1)) / (2.0 * PHI(parameter) - 1) - then value i between min and - max inclusive is drawn with probability: - f(i + 0.5) - f(i - 0.5). - Intuitively, the larger the parameter, the more + then value i between min and + max inclusive is drawn with probability: + f(i + 0.5) - f(i - 0.5). + Intuitively, the larger the parameter, the more frequently values close to the middle of the interval are drawn, and the - less frequently values close to the min and - max bounds. About 67% of values are drawn from the - middle 1.0 / parameter, that is a relative - 0.5 / parameter around the mean, and 95% in the middle - 2.0 / parameter, that is a relative - 1.0 / parameter around the mean; for instance, if - parameter is 4.0, 67% of values are drawn from the + less frequently values close to the min and + max bounds. About 67% of values are drawn from the + middle 1.0 / parameter, that is a relative + 0.5 / parameter around the mean, and 95% in the middle + 2.0 / parameter, that is a relative + 1.0 / parameter around the mean; for instance, if + parameter is 4.0, 67% of values are drawn from the middle quarter (1.0 / 4.0) of the interval (i.e. from - 3.0 / 8.0 to 5.0 / 8.0) and 95% from - the middle half (2.0 / 4.0) of the interval (second and third - quartiles). The minimum parameter is 2.0 for performance + 3.0 / 8.0 to 5.0 / 8.0) and 95% from + the middle half (2.0 / 4.0) of the interval (second and third + quartiles). The minimum parameter is 2.0 for performance of the Box-Muller transform. + + + random_zipfian generates an approximated bounded Zipfian + distribution. For parameter in (0, 1), an + approximated algorithm is taken from + "Quickly Generating Billion-Record Synthetic Databases", + Jim Gray et al, SIGMOD 1994. For parameter + in (1, 1000), a rejection method is used, based on + "Non-Uniform Random Variate Generation", Luc Devroye, p. 550-551, + Springer 1986. The distribution is not defined when the parameter's + value is 1.0. The drawing performance is poor for parameter values + close and above 1.0 and on a small range. + + + parameter + defines how skewed the distribution is. The larger the parameter, the more + frequently values to the beginning of the interval are drawn. + The closer to 0 parameter is, + the flatter (more uniform) the access distribution. + + + + Hash functions hash, hash_murmur2 and + hash_fnv1a accept an input value and an optional seed parameter. + In case the seed isn't provided the value of :default_seed + is used, which is initialized randomly unless set by the command-line + -D option. Hash functions can be used to scatter the + distribution of random functions such as random_zipfian or + random_exponential. For instance, the following pgbench + script simulates possible real world workload typical for social media and + blogging platforms where few accounts generate excessive load: + + +\set r random_zipfian(0, 100000000, 1.07) +\set k abs(hash(:r)) % 1000000 + + + In some cases several distinct distributions are needed which don't correlate + with each other and this is when implicit seed parameter comes in handy: + + +\set k1 abs(hash(:r, :default_seed + 123)) % 1000000 +\set k2 abs(hash(:r, :default_seed + 321)) % 1000000 + + + As an example, the full definition of the built-in TPC-B-like transaction is: @@ -1126,21 +1580,21 @@ END; Per-Transaction Logging - With the option (but without the option), - pgbench writes information about each transaction + pgbench writes information about each transaction to a log file. The log file will be named - prefix.nnn, - where prefix defaults to pgbench_log, and - nnn is the PID of the + prefix.nnn, + where prefix defaults to pgbench_log, and + nnn is the PID of the pgbench process. - The prefix can be changed by using the option. + If the option is 2 or higher, so that there are multiple worker threads, each will have its own log file. The first worker will use the same name for its log file as in the standard single worker case. The additional log files for the other workers will be named - prefix.nnn.mmm, - where mmm is a sequential number for each worker starting + prefix.nnn.mmm, + where mmm is a sequential number for each worker starting with 1. @@ -1148,27 +1602,27 @@ END; The format of the log is: -client_id transaction_no time script_no time_epoch time_us schedule_lag +client_id transaction_no time script_no time_epoch time_us schedule_lag where - client_id indicates which client session ran the transaction, - transaction_no counts how many transactions have been + client_id indicates which client session ran the transaction, + transaction_no counts how many transactions have been run by that session, - time is the total elapsed transaction time in microseconds, - script_no identifies which script file was used (useful when - multiple scripts were specified with @@ -1180,9 +1634,9 @@ END; 0 202 2038 0 1175850569 2663 - Another example with --rate=100 - and --latency-limit=5 (note the additional - schedule_lag column): + Another example with --rate=100 + and --latency-limit=5 (note the additional + schedule_lag column): 0 81 4621 0 1412881037 912698 3005 0 82 6173 0 1412881037 914578 4304 @@ -1199,7 +1653,7 @@ END; When running a long test on hardware that can handle a lot of transactions, - the log files can become very large. The option can be used to log only a random sample of transactions. @@ -1212,30 +1666,30 @@ END; format is used for the log files: -interval_start num_transactions sum_latency sum_latency_2 min_latency max_latency sum_lag sum_lag_2 min_lag max_lag skipped +interval_start num_transactions sum_latency sum_latency_2 min_latency max_latency sum_lag sum_lag_2 min_lag max_lag skipped where - interval_start is the start of the interval (as a Unix + interval_start is the start of the interval (as a Unix epoch time stamp), - num_transactions is the number of transactions + num_transactions is the number of transactions within the interval, sum_latency is the sum of the transaction latencies within the interval, sum_latency_2 is the sum of squares of the transaction latencies within the interval, - min_latency is the minimum latency within the interval, + min_latency is the minimum latency within the interval, and - max_latency is the maximum latency within the interval. + max_latency is the maximum latency within the interval. The next fields, - sum_lag, sum_lag_2, min_lag, - and max_lag, are only present if the option is used. They provide statistics about the time each transaction had to wait for the previous one to finish, i.e. the difference between each transaction's scheduled start time and the time it actually started. - The very last field, skipped, - is only present if the option is used, too. It counts the number of transactions skipped because they would have started too late. Each transaction is counted in the interval when it was committed. @@ -1263,7 +1717,7 @@ END; Per-Statement Latencies - With the
diff --git a/doc/src/sgml/ref/pgtestfsync.sgml b/doc/src/sgml/ref/pgtestfsync.sgml index 467d6e647a..501157cb36 100644 --- a/doc/src/sgml/ref/pgtestfsync.sgml +++ b/doc/src/sgml/ref/pgtestfsync.sgml @@ -27,8 +27,8 @@ Description - pg_test_fsync is intended to give you a reasonable - idea of what the fastest is on your + pg_test_fsync is intended to give you a reasonable + idea of what the fastest is on your specific system, as well as supplying diagnostic information in the event of an identified I/O problem. However, differences shown by @@ -37,7 +37,7 @@ are not speed-limited by their write-ahead logs. pg_test_fsync reports average file sync operation time in microseconds for each wal_sync_method, which can also be used to - inform efforts to optimize the value of . + inform efforts to optimize the value of . @@ -57,9 +57,9 @@ Specifies the file name to write test data in. This file should be in the same file system that the - pg_wal directory is or will be placed in. - (pg_wal contains the WAL files.) - The default is pg_test_fsync.out in the current + pg_wal directory is or will be placed in. + (pg_wal contains the WAL files.) + The default is pg_test_fsync.out in the current directory. @@ -79,8 +79,8 @@ - - + + Print the pg_test_fsync version and exit. @@ -89,8 +89,8 @@ - - + + Show help about pg_test_fsync command line @@ -107,7 +107,7 @@ See Also - +
diff --git a/doc/src/sgml/ref/pgtesttiming.sgml b/doc/src/sgml/ref/pgtesttiming.sgml index e3539cf764..545a934cf8 100644 --- a/doc/src/sgml/ref/pgtesttiming.sgml +++ b/doc/src/sgml/ref/pgtesttiming.sgml @@ -27,7 +27,7 @@ Description - pg_test_timing is a tool to measure the timing overhead + pg_test_timing is a tool to measure the timing overhead on your system and confirm that the system time never moves backwards. Systems that are slow to collect timing data can give less accurate EXPLAIN ANALYZE results. @@ -57,8 +57,8 @@ - - + + Print the pg_test_timing version and exit. @@ -67,8 +67,8 @@ - - + + Show help about pg_test_timing command line @@ -94,7 +94,7 @@ nanoseconds. This example from an Intel i7-860 system using a TSC clock source shows excellent performance: - + +]]> @@ -152,7 +152,7 @@ EXPLAIN ANALYZE SELECT COUNT(*) FROM t; possible from switching to the slower acpi_pm time source, on the same system used for the fast results above: - + /sys/devices/system/clocksource/clocksource0/current_clocksource @@ -165,7 +165,7 @@ Histogram of timing durations: 4 0.07810 3241 8 0.01357 563 16 0.00007 3 - +]]> @@ -201,7 +201,7 @@ kern.timecounter.hardware: ACPI-fast -> TSC implementation, which can have good resolution when it's backed by fast enough timing hardware, as in this example: - + +]]> @@ -294,7 +294,7 @@ Histogram of timing durations: See Also - +
diff --git a/doc/src/sgml/ref/pgupgrade.sgml b/doc/src/sgml/ref/pgupgrade.sgml index d44431803b..2d722b2e79 100644 --- a/doc/src/sgml/ref/pgupgrade.sgml +++ b/doc/src/sgml/ref/pgupgrade.sgml @@ -24,9 +24,9 @@ newbindir - olddatadir + oldconfigdir - newdatadir + newconfigdir option @@ -35,38 +35,38 @@ Description - pg_upgrade (formerly called pg_migrator) allows data - stored in PostgreSQL data files to be upgraded to a later PostgreSQL + pg_upgrade (formerly called pg_migrator) allows data + stored in PostgreSQL data files to be upgraded to a later PostgreSQL major version without the data dump/reload typically required for - major version upgrades, e.g. from 9.6.3 to the current major release - of PostgreSQL. It is not required for minor version upgrades, e.g. from - 9.6.2 to 9.6.3. + major version upgrades, e.g. from 9.5.8 to 9.6.4 or from 10.7 to 11.2. + It is not required for minor version upgrades, e.g. from 9.6.2 to 9.6.3 + or from 10.1 to 10.2. Major PostgreSQL releases regularly add new features that often change the layout of the system tables, but the internal data storage - format rarely changes. pg_upgrade uses this fact + format rarely changes. pg_upgrade uses this fact to perform rapid upgrades by creating new system tables and simply reusing the old user data files. If a future major release ever changes the data storage format in a way that makes the old data - format unreadable, pg_upgrade will not be usable + format unreadable, pg_upgrade will not be usable for such upgrades. (The community will attempt to avoid such situations.) - pg_upgrade does its best to + pg_upgrade does its best to make sure the old and new clusters are binary-compatible, e.g. by checking for compatible compile-time settings, including 32/64-bit binaries. It is important that any external modules are also binary compatible, though this cannot - be checked by pg_upgrade. + be checked by pg_upgrade. pg_upgrade supports upgrades from 8.4.X and later to the current - major release of PostgreSQL, including snapshot and beta releases. + major release of PostgreSQL, including snapshot and beta releases. @@ -79,17 +79,17 @@ - bindir - bindir + bindir + bindir the old PostgreSQL executable directory; - environment variable PGBINOLD + environment variable PGBINOLD - bindir - bindir + bindir + bindir the new PostgreSQL executable directory; - environment variable PGBINNEW + environment variable PGBINNEW @@ -99,17 +99,17 @@ - datadir - datadir - the old cluster data directory; environment - variable PGDATAOLD + configdir + configdir + the old database cluster configuration directory; environment + variable PGDATAOLD - datadir - datadir - the new cluster data directory; environment - variable PGDATANEW + configdir + configdir + the new database cluster configuration directory; environment + variable PGDATANEW @@ -143,17 +143,17 @@ - port - port + port + port the old cluster port number; environment - variable PGPORTOLD + variable PGPORTOLD - port - port + port + port the new cluster port number; environment - variable PGPORTNEW + variable PGPORTNEW @@ -164,10 +164,10 @@ - username - username + username + username cluster's install user name; environment - variable PGUSER + variable PGUSER @@ -182,6 +182,28 @@ display version information, then exit + + + + + Use efficient file cloning (also known as reflinks on + some systems) instead of copying files to the new cluster. This can + result in near-instantaneous copying of the data files, giving the + speed advantages of / while + leaving the old cluster untouched. + + + + File cloning is only supported on some operating systems and file + systems. If it is selected but not supported, the + pg_upgrade run will error. At present, it + is supported on Linux (kernel 4.5 or later) with Btrfs and XFS (on + file systems created with reflink support, which is not the default + for XFS at this writing), and on macOS with APFS. + + + + @@ -207,17 +229,17 @@ If you are using a version-specific installation directory, e.g. - /opt/PostgreSQL/&majorversion;, you do not need to move the old cluster. The + /opt/PostgreSQL/&majorversion;, you do not need to move the old cluster. The graphical installers all use version-specific installation directories. If your installation directory is not version-specific, e.g. - /usr/local/pgsql, it is necessary to move the current PostgreSQL install - directory so it does not interfere with the new PostgreSQL installation. - Once the current PostgreSQL server is shut down, it is safe to rename the + /usr/local/pgsql, it is necessary to move the current PostgreSQL install + directory so it does not interfere with the new PostgreSQL installation. + Once the current PostgreSQL server is shut down, it is safe to rename the PostgreSQL installation directory; assuming the old directory is - /usr/local/pgsql, you can do: + /usr/local/pgsql, you can do: mv /usr/local/pgsql /usr/local/pgsql.old @@ -230,8 +252,8 @@ mv /usr/local/pgsql /usr/local/pgsql.old For source installs, build the new version - Build the new PostgreSQL source with configure flags that are compatible - with the old cluster. pg_upgrade will check pg_controldata to make + Build the new PostgreSQL source with configure flags that are compatible + with the old cluster. pg_upgrade will check pg_controldata to make sure all settings are compatible before starting the upgrade. @@ -241,7 +263,7 @@ mv /usr/local/pgsql /usr/local/pgsql.old Install the new server's binaries and support - files. pg_upgrade is included in a default installation. + files. pg_upgrade is included in a default installation. @@ -273,7 +295,7 @@ make prefix=/usr/local/pgsql.new install into the new cluster, e.g. pgcrypto.so, whether they are from contrib or some other source. Do not install the schema definitions, e.g. - CREATE EXTENSION pgcrypto, because these will be upgraded + CREATE EXTENSION pgcrypto, because these will be upgraded from the old cluster. Also, any custom full text search files (dictionary, synonym, thesaurus, stop words) must also be copied to the new cluster. @@ -284,10 +306,10 @@ make prefix=/usr/local/pgsql.new install Adjust authentication - pg_upgrade will connect to the old and new servers several - times, so you might want to set authentication to peer - in pg_hba.conf or use a ~/.pgpass file - (see ). + pg_upgrade will connect to the old and new servers several + times, so you might want to set authentication to peer + in pg_hba.conf or use a ~/.pgpass file + (see ). @@ -320,30 +342,27 @@ NET STOP postgresql-&majorversion; Prepare for standby server upgrades - If you are upgrading standby servers (as outlined in section ), verify that the old standby - servers are caught up by running pg_controldata + If you are upgrading standby servers using methods outlined in section , verify that the old standby + servers are caught up by running pg_controldata against the old primary and standby clusters. Verify that the - Latest checkpoint location values match in all clusters. + Latest checkpoint location values match in all clusters. (There will be a mismatch if old standby servers were shut down - before the old primary.) - - - - Also, if upgrading standby servers, change wal_level - to replica in the postgresql.conf file on - the new master cluster. + before the old primary or if the old standby servers are still running.) + Also, change wal_level to + replica in the postgresql.conf file on the + new primary cluster. - Run <application>pg_upgrade</> + Run <application>pg_upgrade</application> - Always run the pg_upgrade binary of the new server, not the old one. - pg_upgrade requires the specification of the old and new cluster's - data and executable (bin) directories. You can also specify - user and port values, and whether you want the data files linked + Always run the pg_upgrade binary of the new server, not the old one. + pg_upgrade requires the specification of the old and new cluster's + data and executable (bin) directories. You can also specify + user and port values, and whether you want the data files linked or cloned instead of the default copy behavior. @@ -353,13 +372,17 @@ NET STOP postgresql-&majorversion; your old cluster once you start the new cluster after the upgrade. Link mode also requires that the old and new cluster data directories be in the - same file system. (Tablespaces and pg_wal can be on - different file systems.) See pg_upgrade --help for a full - list of options. + same file system. (Tablespaces and pg_wal can be on + different file systems.) + The clone mode provides the same speed and disk space advantages but will + not leave the old cluster unusable after the upgrade. The clone mode + also requires that the old and new data directories be in the same file + system. The clone mode is only available on certain operating systems + and file systems. - The option allows multiple CPU cores to be used for copying/linking of files and to dump and reload database schemas in parallel; a good place to start is the maximum of the number of CPU cores and tablespaces. This option can dramatically reduce the @@ -369,14 +392,14 @@ NET STOP postgresql-&majorversion; For Windows users, you must be logged into an administrative account, and - then start a shell as the postgres user and set the proper path: + then start a shell as the postgres user and set the proper path: RUNAS /USER:postgres "CMD.EXE" SET PATH=%PATH%;C:\Program Files\PostgreSQL\&majorversion;\bin; - and then run pg_upgrade with quoted directories, e.g.: + and then run pg_upgrade with quoted directories, e.g.: pg_upgrade.exe @@ -386,19 +409,20 @@ pg_upgrade.exe --new-bindir "C:/Program Files/PostgreSQL/&majorversion;/bin" - Once started, pg_upgrade will verify the two clusters are compatible - and then do the upgrade. You can use pg_upgrade --check + Once started, pg_upgrade will verify the two clusters are compatible + and then do the upgrade. You can use pg_upgrade --check to perform only the checks, even if the old server is still - running. pg_upgrade --check will also outline any + running. pg_upgrade --check will also outline any manual adjustments you will need to make after the upgrade. If you - are going to be using link mode, you should use the to enable link-mode-specific checks. - pg_upgrade requires write permission in the current directory. + are going to be using link or clone mode, you should use the option + or with + to enable mode-specific checks. + pg_upgrade requires write permission in the current directory. Obviously, no one should be accessing the clusters during the - upgrade. pg_upgrade defaults to running servers + upgrade. pg_upgrade defaults to running servers on port 50432 to avoid unintended client connections. You can use the same port number for both clusters when doing an upgrade because the old and new clusters will not be running at the @@ -407,8 +431,8 @@ pg_upgrade.exe - If an error occurs while restoring the database schema, pg_upgrade will - exit and you will have to revert to the old cluster as outlined in + If an error occurs while restoring the database schema, pg_upgrade will + exit and you will have to revert to the old cluster as outlined in below. To try pg_upgrade again, you will need to modify the old cluster so the pg_upgrade schema restore succeeds. If the problem is a contrib module, you might need to uninstall the contrib module from @@ -421,12 +445,20 @@ pg_upgrade.exe Upgrade Streaming Replication and Log-Shipping standby servers - If you have Streaming Replication (see ) or Log-Shipping (see ) standby servers, follow these steps to - upgrade them. You will not be running pg_upgrade - on the standby servers, but rather rsync. Do not - start any servers yet. + If you used link mode and have Streaming Replication (see ) or Log-Shipping (see ) standby servers, you can follow these steps to + quickly upgrade them. You will not be running pg_upgrade on + the standby servers, but rather rsync on the primary. + Do not start any servers yet. + + + + If you did not use link mode, do not have or do not + want to use rsync, or want an easier solution, skip + the instructions in this section and simply recreate the standby + servers once pg_upgrade completes and the new primary + is running. @@ -441,12 +473,12 @@ pg_upgrade.exe - Make sure the new standby data directories do <emphasis>not</> exist + Make sure the new standby data directories do <emphasis>not</emphasis> exist - Make sure the new standby data directories do not - exist or are empty. If initdb was run, delete - the standby server data directories. + Make sure the new standby data directories do not + exist or are empty. If initdb was run, delete + the standby servers' new data directories. @@ -455,7 +487,7 @@ pg_upgrade.exe Install the same custom shared object files on the new standbys - that you installed in the new master cluster. + that you installed in the new primary cluster. @@ -472,42 +504,73 @@ pg_upgrade.exe Save configuration files - Save any configuration files from the standbys you need to keep, - e.g. postgresql.conf, recovery.conf, - as these will be overwritten or removed in the next step. + Save any configuration files from the old standbys' configuration + directories you need to keep, e.g. postgresql.conf, + recovery.conf, because these will be overwritten or + removed in the next step. - Run <application>rsync</> + Run <application>rsync</application> - From a directory that is above the old and new database cluster - directories, run this for each standby: + When using link mode, standby servers can be quickly upgraded using + rsync. To accomplish this, from a directory on + the primary server that is above the old and new database cluster + directories, run this on the primary for each standby + server: -rsync --archive --delete --hard-links --size-only old_pgdata new_pgdata remote_dir +rsync --archive --delete --hard-links --size-only --no-inc-recursive old_cluster new_cluster remote_dir - where + + + What this does is to record the links created by + pg_upgrade's link mode that connect files in the + old and new clusters on the primary server. It then finds matching + files in the standby's old cluster and creates links for them in the + standby's new cluster. Files that were not linked on the primary + are copied from the primary to the standby. (They are usually + small.) This provides rapid standby upgrades. Unfortunately, + rsync needlessly copies files associated with + temporary and unlogged tables because these files don't normally + exist on standby servers. If you have tablespaces, you will need to run a similar - rsync command for each tablespace directory. If you - have relocated pg_wal outside the data directories, - rsync must be run on those directories too. + rsync command for each tablespace directory, e.g.: + + +rsync --archive --delete --hard-links --size-only --no-inc-recursive /vol1/pg_tblsp/PG_9.5_201510051 \ + /vol1/pg_tblsp/PG_9.6_201608131 standby.example.com:/vol1/pg_tblsp + + + If you have relocated pg_wal outside the data + directories, rsync must be run on those directories + too. @@ -516,9 +579,9 @@ rsync --archive --delete --hard-links --size-only old_pgdata new_pgdata remote_d Configure the servers for log shipping. (You do not need to run - pg_start_backup() and pg_stop_backup() + pg_start_backup() and pg_stop_backup() or take a file system backup as the standbys are still synchronized - with the master.) + with the primary.) @@ -527,12 +590,12 @@ rsync --archive --delete --hard-links --size-only old_pgdata new_pgdata remote_d - Restore <filename>pg_hba.conf</> + Restore <filename>pg_hba.conf</filename> - If you modified pg_hba.conf, restore its original settings. + If you modified pg_hba.conf, restore its original settings. It might also be necessary to adjust other configuration files in the new - cluster to match the old cluster, e.g. postgresql.conf. + cluster to match the old cluster, e.g. postgresql.conf. @@ -541,7 +604,7 @@ rsync --archive --delete --hard-links --size-only old_pgdata new_pgdata remote_d The new server can now be safely started, and then any - rsync'ed standby servers. + rsync'ed standby servers. @@ -577,7 +640,7 @@ psql --username=postgres --file=script.sql postgres Statistics - Because optimizer statistics are not transferred by pg_upgrade, you will + Because optimizer statistics are not transferred by pg_upgrade, you will be instructed to run a command to regenerate that information at the end of the upgrade. You might need to set connection parameters to match your new cluster. @@ -593,7 +656,7 @@ psql --username=postgres --file=script.sql postgres pg_upgrade completes. (Automatic deletion is not possible if you have user-defined tablespaces inside the old data directory.) You can also delete the old installation directories - (e.g. bin, share). + (e.g. bin, share). @@ -608,7 +671,7 @@ psql --username=postgres --file=script.sql postgres If you ran pg_upgrade - with , no modifications were made to the old cluster and you can re-use it anytime. @@ -616,7 +679,7 @@ psql --username=postgres --file=script.sql postgres If you ran pg_upgrade - with , the data files are shared between the old and new cluster. If you started the new cluster, the new server has written to those shared files and it is unsafe to use the old cluster. @@ -625,13 +688,13 @@ psql --username=postgres --file=script.sql postgres - If you ran pg_upgrade without - or did not start the new server, the old cluster was not modified except that, if linking - started, a .old suffix was appended to - $PGDATA/global/pg_control. To reuse the old - cluster, possibly remove the .old suffix from - $PGDATA/global/pg_control; you can then restart the + started, a .old suffix was appended to + $PGDATA/global/pg_control. To reuse the old + cluster, possibly remove the .old suffix from + $PGDATA/global/pg_control; you can then restart the old cluster. @@ -646,16 +709,16 @@ psql --username=postgres --file=script.sql postgres Notes - pg_upgrade does not support upgrading of databases - containing these reg* OID-referencing system data types: - regproc, regprocedure, regoper, - regoperator, regconfig, and - regdictionary. (regtype can be upgraded.) + pg_upgrade does not support upgrading of databases + containing table columns using these reg* OID-referencing system data types: + regproc, regprocedure, regoper, + regoperator, regconfig, and + regdictionary. (regtype can be upgraded.) All failure, rebuild, and reindex cases will be reported by - pg_upgrade if they affect your installation; + pg_upgrade if they affect your installation; post-upgrade scripts to rebuild tables and indexes will be generated automatically. If you are trying to automate the upgrade of many clusters, you should find that clusters with identical database @@ -670,32 +733,33 @@ psql --username=postgres --file=script.sql postgres - If you are upgrading a pre-PostgreSQL 9.2 cluster + If you are upgrading a pre-PostgreSQL 9.2 cluster that uses a configuration-file-only directory, you must pass the - real data directory location to pg_upgrade, and + real data directory location to pg_upgrade, and pass the configuration directory location to the server, e.g. - -d /real-data-directory -o '-D /configuration-directory'. + -d /real-data-directory -o '-D /configuration-directory'. If using a pre-9.1 old server that is using a non-default Unix-domain socket directory or a default that differs from the default of the - new cluster, set PGHOST to point to the old server's socket + new cluster, set PGHOST to point to the old server's socket location. (This is not relevant on Windows.) If you want to use link mode and you do not want your old cluster - to be modified when the new cluster is started, make a copy of the + to be modified when the new cluster is started, consider using the clone mode. + If that is not available, make a copy of the old cluster and upgrade that in link mode. To make a valid copy - of the old cluster, use rsync to create a dirty + of the old cluster, use rsync to create a dirty copy of the old cluster while the server is running, then shut down - the old server and run rsync --checksum again to update the - copy with any changes to make it consistent. (
diff --git a/doc/src/sgml/ref/postgres-ref.sgml b/doc/src/sgml/ref/postgres-ref.sgml index fc11aa14a6..53dc05a78f 100644 --- a/doc/src/sgml/ref/postgres-ref.sgml +++ b/doc/src/sgml/ref/postgres-ref.sgml @@ -22,7 +22,7 @@ PostgreSQL documentation postgres - option + option @@ -51,8 +51,8 @@ PostgreSQL documentation option or the PGDATA environment variable; there is no default. Typically, or PGDATA points directly to the data area directory - created by . Other possible file layouts are - discussed in . + created by . Other possible file layouts are + discussed in . @@ -65,7 +65,7 @@ PostgreSQL documentation The postgres command can also be called in single-user mode. The primary use for this mode is during - bootstrapping by . Sometimes it is used + bootstrapping by . Sometimes it is used for debugging or disaster recovery; note that running a single-user server is not truly suitable for debugging the server, since no realistic interprocess communication and locking will happen. @@ -87,12 +87,12 @@ PostgreSQL documentation postgres accepts the following command-line arguments. For a detailed discussion of the options consult . You can save typing most of these + linkend="runtime-config"/>. You can save typing most of these options by setting up a configuration file. Some (safe) options can also be set from the connecting client in an application-dependent way to apply only for that session. For example, if the environment variable PGOPTIONS is - set, then libpq-based clients will pass that + set, then libpq-based clients will pass that string to the server, which will interpret it as postgres command-line options. @@ -109,7 +109,7 @@ PostgreSQL documentation processes. The default value of this parameter is chosen automatically by initdb. Specifying this option is equivalent to setting the - configuration parameter. + configuration parameter. @@ -120,9 +120,9 @@ PostgreSQL documentation Sets a named run-time parameter. The configuration parameters supported by PostgreSQL are - described in . Most of the + described in . Most of the other command line options are in fact short forms of such a - parameter assignment. can appear multiple times to set multiple parameters. @@ -133,18 +133,18 @@ PostgreSQL documentation Prints the value of the named run-time parameter, and exits. - (See the option above for details.) This can be used on a running server, and returns values from - postgresql.conf, modified by any parameters + postgresql.conf, modified by any parameters supplied in this invocation. It does not reflect parameters supplied when the cluster was started. This option is meant for other programs that interact with a server - instance, such as , to query configuration + instance, such as , to query configuration parameter values. User-facing applications should instead use or the pg_settings view. + linkend="sql-show"/> or the pg_settings view. @@ -157,7 +157,7 @@ PostgreSQL documentation debugging output is written to the server log. Values are from 1 to 5. It is also possible to pass -d 0 for a specific session, which will prevent the - server log level of the parent postgres process from being + server log level of the parent postgres process from being propagated to this session. @@ -169,7 +169,7 @@ PostgreSQL documentation Specifies the file system location of the database configuration files. See - for details. + for details. @@ -179,9 +179,9 @@ PostgreSQL documentation Sets the default date style to European, that is - DMY ordering of input date fields. This also causes + DMY ordering of input date fields. This also causes the day to be printed before the month in certain date output formats. - See for more information. + See for more information. @@ -193,7 +193,7 @@ PostgreSQL documentation Disables fsync calls for improved performance, at the risk of data corruption in the event of a system crash. Specifying this option is equivalent to - disabling the configuration + disabling the configuration parameter. Read the detailed documentation before using this! @@ -206,14 +206,14 @@ PostgreSQL documentation Specifies the IP host name or address on which postgres is to listen for TCP/IP connections from client applications. The value can also be a - comma-separated list of addresses, or * to specify + comma-separated list of addresses, or * to specify listening on all available interfaces. An empty value specifies not listening on any IP addresses, in which case only Unix-domain sockets can be used to connect to the server. Defaults to listening only on localhost. Specifying this option is equivalent to setting the configuration parameter. + linkend="guc-listen-addresses"/> configuration parameter. @@ -225,13 +225,13 @@ PostgreSQL documentation Allows remote clients to connect via TCP/IP (Internet domain) connections. Without this option, only local connections are accepted. This option is equivalent to setting - listen_addresses to * in - postgresql.conf or via . This option is deprecated since it does not allow access to the - full functionality of . - It's usually better to set listen_addresses directly. + full functionality of . + It's usually better to set listen_addresses directly. @@ -249,7 +249,7 @@ PostgreSQL documentation The default value is normally /tmp, but that can be changed at build time. Specifying this option is equivalent to setting the configuration parameter. + linkend="guc-unix-socket-directories"/> configuration parameter. @@ -262,7 +262,7 @@ PostgreSQL documentation PostgreSQL must have been compiled with support for SSL for this option to be available. For more information on using SSL, - refer to . + refer to . @@ -275,7 +275,7 @@ PostgreSQL documentation server will accept. The default value of this parameter is chosen automatically by initdb. Specifying this option is equivalent to setting the - configuration parameter. + configuration parameter. @@ -291,11 +291,11 @@ PostgreSQL documentation - Spaces within extra-options are + Spaces within extra-options are considered to separate arguments, unless escaped with a backslash - (\); write \\ to represent a literal + (\); write \\ to represent a literal backslash. Multiple arguments can also be specified via multiple - uses of . @@ -340,15 +340,15 @@ PostgreSQL documentation Specifies the amount of memory to be used by internal sorts and hashes before resorting to temporary disk files. See the description of the - work_mem configuration parameter in . + work_mem configuration parameter in . - - + + Print the postgres version and exit. @@ -361,7 +361,7 @@ PostgreSQL documentation Sets a named run-time parameter; a shorter form of - . @@ -371,15 +371,15 @@ PostgreSQL documentation This option dumps out the server's internal configuration variables, - descriptions, and defaults in tab-delimited COPY format. + descriptions, and defaults in tab-delimited COPY format. It is designed primarily for use by administration tools. - - + + Show help about postgres command line @@ -531,7 +531,7 @@ PostgreSQL documentation The following options only apply to the single-user mode (see ). + endterm="app-postgres-single-user-title"/>). @@ -620,7 +620,7 @@ PostgreSQL documentation - Default value of the run-time + Default value of the run-time parameter. (The use of this environment variable is deprecated.) @@ -643,14 +643,14 @@ PostgreSQL documentation Diagnostics - A failure message mentioning semget or - shmget probably indicates you need to configure your + A failure message mentioning semget or + shmget probably indicates you need to configure your kernel to provide adequate shared memory and semaphores. For more - discussion see . You might be able + discussion see . You might be able to postpone reconfiguring your kernel by decreasing to reduce the shared memory - consumption of PostgreSQL, and/or by reducing - to reduce the semaphore + linkend="guc-shared-buffers"/> to reduce the shared memory + consumption of PostgreSQL, and/or by reducing + to reduce the semaphore consumption. @@ -689,7 +689,7 @@ PostgreSQL documentation Notes - The utility command can be used to + The utility command can be used to start and shut down the postgres server safely and comfortably. @@ -725,8 +725,8 @@ PostgreSQL documentation To cancel a running query, send the SIGINT signal to the process running that command. To terminate a backend process cleanly, send SIGTERM to that process. See - also pg_cancel_backend and pg_terminate_backend - in for the SQL-callable equivalents + also pg_cancel_backend and pg_terminate_backend + in for the SQL-callable equivalents of these two actions. @@ -745,9 +745,9 @@ PostgreSQL documentation Bugs - The @@ -759,17 +759,17 @@ PostgreSQL documentation To start a single-user mode server, use a command like -postgres --single -D /usr/local/pgsql/data other-options my_database +postgres --single -D /usr/local/pgsql/data other-options my_database - Provide the correct path to the database directory with Normally, the single-user mode server treats newline as the command entry terminator; there is no intelligence about semicolons, - as there is in psql. To continue a command + as there is in psql. To continue a command across multiple lines, you must type backslash just before each newline except the last one. The backslash and adjacent newline are both dropped from the input command. Note that this will happen even @@ -777,7 +777,7 @@ PostgreSQL documentation - But if you use the command line switch, a single newline does not terminate command entry; instead, the sequence semicolon-newline-newline does. That is, type a semicolon immediately followed by a completely empty line. Backslash-newline is not @@ -794,10 +794,10 @@ PostgreSQL documentation To quit the session, type EOF - (ControlD, usually). + (ControlD, usually). If you've entered any text since the last command entry terminator, then EOF will be taken as a command entry terminator, - and another EOF will be needed to exit. + and another EOF will be needed to exit. @@ -826,7 +826,7 @@ PostgreSQL documentation $ postgres -p 1234 - To connect to this server using psql, specify this port with the -p option: + To connect to this server using psql, specify this port with the -p option: $ psql -p 1234 @@ -844,11 +844,11 @@ PostgreSQL documentation $ postgres --work-mem=1234 Either form overrides whatever setting might exist for - work_mem in postgresql.conf. Notice that + work_mem in postgresql.conf. Notice that underscores in parameter names can be written as either underscore or dash on the command line. Except for short-term experiments, it's probably better practice to edit the setting in - postgresql.conf than to rely on a command-line switch + postgresql.conf than to rely on a command-line switch to set a parameter. @@ -857,8 +857,8 @@ PostgreSQL documentation See Also - , - + , + diff --git a/doc/src/sgml/ref/postmaster.sgml b/doc/src/sgml/ref/postmaster.sgml index 0a58a63331..311510a44d 100644 --- a/doc/src/sgml/ref/postmaster.sgml +++ b/doc/src/sgml/ref/postmaster.sgml @@ -22,7 +22,7 @@ PostgreSQL documentation postmaster - option + option @@ -38,7 +38,7 @@ PostgreSQL documentation See Also - + diff --git a/doc/src/sgml/ref/prepare.sgml b/doc/src/sgml/ref/prepare.sgml index fea2196efe..3d799b5b57 100644 --- a/doc/src/sgml/ref/prepare.sgml +++ b/doc/src/sgml/ref/prepare.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/prepare.sgml PostgreSQL documentation --> - + PREPARE @@ -26,7 +26,7 @@ PostgreSQL documentation -PREPARE name [ ( data_type [, ...] ) ] AS statement +PREPARE name [ ( data_type [, ...] ) ] AS statement @@ -48,14 +48,14 @@ PREPARE name [ ( $1, $2, etc. A corresponding list of + $1, $2, etc. A corresponding list of parameter data types can optionally be specified. When a parameter's data type is not specified or is declared as unknown, the type is inferred from the context - in which the parameter is used (if possible). When executing the + in which the parameter is first referenced (if possible). When executing the statement, specify the actual values for these parameters in the EXECUTE statement. Refer to for more + linkend="sql-execute"/> for more information about that. @@ -66,7 +66,7 @@ PREPARE name [ ( command. + manually cleaned up using the command. @@ -86,7 +86,7 @@ PREPARE name [ ( - name + name An arbitrary name given to this particular prepared @@ -98,13 +98,13 @@ PREPARE name [ ( - data_type + data_type The data type of a parameter to the prepared statement. If the data type of a particular parameter is unspecified or is specified as unknown, it will be inferred - from the context in which the parameter is used. To refer to the + from the context in which the parameter is first referenced. To refer to the parameters in the prepared statement itself, use $1, $2, etc. @@ -112,18 +112,18 @@ PREPARE name [ ( - statement + statement - Any SELECT, INSERT, UPDATE, - DELETE, or VALUES statement. + Any SELECT, INSERT, UPDATE, + DELETE, or VALUES statement. - + Notes @@ -154,10 +154,10 @@ PREPARE name [ ( To examine the query plan PostgreSQL is using - for a prepared statement, use , e.g. - EXPLAIN EXECUTE. + for a prepared statement, use , e.g. + EXPLAIN EXECUTE. If a generic plan is in use, it will contain parameter symbols - $n, while a custom plan will have the + $n, while a custom plan will have the supplied parameter values substituted into it. The row estimates in the generic plan reflect the selectivity computed for the parameters. @@ -166,19 +166,19 @@ PREPARE name [ ( For more information on query planning and the statistics collected by PostgreSQL for that purpose, see - the + the documentation. Although the main point of a prepared statement is to avoid repeated parse - analysis and planning of the statement, PostgreSQL will + analysis and planning of the statement, PostgreSQL will force re-analysis and re-planning of the statement before using it whenever database objects used in the statement have undergone definitional (DDL) changes since the previous use of the prepared - statement. Also, if the value of changes + statement. Also, if the value of changes from one use to the next, the statement will be re-parsed using the new - search_path. (This latter behavior is new as of + search_path. (This latter behavior is new as of PostgreSQL 9.3.) These rules make use of a prepared statement semantically almost equivalent to re-submitting the same query text over and over, but with a performance benefit if no object @@ -186,7 +186,7 @@ PREPARE name [ ( search_path, no automatic re-parse will occur + earlier in the search_path, no automatic re-parse will occur since no object used in the statement changed. However, if some other change forces a re-parse, the new table will be referenced in subsequent uses. @@ -222,7 +222,7 @@ EXECUTE usrrptplan(1, current_date); Note that the data type of the second parameter is not specified, - so it is inferred from the context in which $2 is used. + so it is inferred from the context in which $2 is used. @@ -240,8 +240,8 @@ EXECUTE usrrptplan(1, current_date); See Also - - + + diff --git a/doc/src/sgml/ref/prepare_transaction.sgml b/doc/src/sgml/ref/prepare_transaction.sgml index 626753f576..d958f7a06f 100644 --- a/doc/src/sgml/ref/prepare_transaction.sgml +++ b/doc/src/sgml/ref/prepare_transaction.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/prepare_transaction.sgml PostgreSQL documentation --> - + PREPARE TRANSACTION @@ -21,7 +21,7 @@ PostgreSQL documentation -PREPARE TRANSACTION transaction_id +PREPARE TRANSACTION transaction_id @@ -39,15 +39,15 @@ PREPARE TRANSACTION transaction_id Once prepared, a transaction can later be committed or rolled back - with - or , + with + or , respectively. Those commands can be issued from any session, not only the one that executed the original transaction. From the point of view of the issuing session, PREPARE - TRANSACTION is not unlike a ROLLBACK command: + TRANSACTION is not unlike a ROLLBACK command: after executing it, there is no active current transaction, and the effects of the prepared transaction are no longer visible. (The effects will become visible again if the transaction is committed.) @@ -55,7 +55,7 @@ PREPARE TRANSACTION transaction_id If the PREPARE TRANSACTION command fails for any - reason, it becomes a ROLLBACK: the current transaction + reason, it becomes a ROLLBACK: the current transaction is canceled. @@ -65,11 +65,11 @@ PREPARE TRANSACTION transaction_id - transaction_id + transaction_id An arbitrary identifier that later identifies this transaction for - COMMIT PREPARED or ROLLBACK PREPARED. + COMMIT PREPARED or ROLLBACK PREPARED. The identifier must be written as a string literal, and must be less than 200 bytes long. It must not be the same as the identifier used for any currently prepared transaction. @@ -83,36 +83,37 @@ PREPARE TRANSACTION transaction_id Notes - PREPARE TRANSACTION is not intended for use in applications + PREPARE TRANSACTION is not intended for use in applications or interactive sessions. Its purpose is to allow an external transaction manager to perform atomic global transactions across multiple databases or other transactional resources. Unless you're writing a transaction manager, you probably shouldn't be using PREPARE - TRANSACTION. + TRANSACTION. This command must be used inside a transaction block. Use to start one. + linkend="sql-begin"/> to start one. - It is not currently allowed to PREPARE a transaction that + It is not currently allowed to PREPARE a transaction that has executed any operations involving temporary tables, - created any cursors WITH HOLD, or executed - LISTEN or UNLISTEN. + created any cursors WITH HOLD, or executed + LISTEN, UNLISTEN, or + NOTIFY. Those features are too tightly tied to the current session to be useful in a transaction to be prepared. - If the transaction modified any run-time parameters with SET - (without the LOCAL option), - those effects persist after PREPARE TRANSACTION, and will not + If the transaction modified any run-time parameters with SET + (without the LOCAL option), + those effects persist after PREPARE TRANSACTION, and will not be affected by any later COMMIT PREPARED or ROLLBACK PREPARED. Thus, in this one respect - PREPARE TRANSACTION acts more like COMMIT than - ROLLBACK. + PREPARE TRANSACTION acts more like COMMIT than + ROLLBACK. @@ -124,10 +125,10 @@ PREPARE TRANSACTION transaction_id It is unwise to leave transactions in the prepared state for a long time. - This will interfere with the ability of VACUUM to reclaim + This will interfere with the ability of VACUUM to reclaim storage, and in extreme cases could cause the database to shut down to prevent transaction ID wraparound (see ). Keep in mind also that the transaction + linkend="vacuum-for-wraparound"/>). Keep in mind also that the transaction continues to hold whatever locks it held. The intended usage of the feature is that a prepared transaction will normally be committed or rolled back as soon as an external transaction manager has verified that @@ -138,7 +139,7 @@ PREPARE TRANSACTION transaction_id If you have not set up an external transaction manager to track prepared transactions and ensure they get closed out promptly, it is best to keep the prepared-transaction feature disabled by setting - to zero. This will + to zero. This will prevent accidental creation of prepared transactions that might then be forgotten and eventually cause problems. @@ -149,7 +150,7 @@ PREPARE TRANSACTION transaction_id Examples Prepare the current transaction for two-phase commit, using - foobar as the transaction identifier: + foobar as the transaction identifier: PREPARE TRANSACTION 'foobar'; @@ -172,8 +173,8 @@ PREPARE TRANSACTION 'foobar'; See Also - - + + diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml index c592edac60..a1ca94057b 100644 --- a/doc/src/sgml/ref/psql-ref.sgml +++ b/doc/src/sgml/ref/psql-ref.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/psql-ref.sgml PostgreSQL documentation --> - + psql @@ -45,13 +45,13 @@ PostgreSQL documentation - + Options - - + + Print all nonempty input lines to standard output as they are read. @@ -63,8 +63,8 @@ PostgreSQL documentation - - + + Switches to unaligned output mode. (The default output mode is @@ -75,8 +75,8 @@ PostgreSQL documentation - - + + Print failed SQL commands to standard error output. This is @@ -87,8 +87,8 @@ PostgreSQL documentation - - + + Specifies that psql is to execute the given @@ -116,16 +116,18 @@ psql -c '\x' -c 'SELECT * FROM foo;' echo '\x \\ SELECT * FROM foo;' | psql - (\\ is the separator meta-command.) + (\\ is the separator meta-command.) Each SQL command string passed - to is sent to the server as a single query. + to is sent to the server as a single request. Because of this, the server executes it as a single transaction even if the string contains multiple SQL commands, - unless there are explicit BEGIN/COMMIT + unless there are explicit BEGIN/COMMIT commands included in the string to divide it into multiple - transactions. Also, psql only prints the + transactions. (See + for more details about how the server handles multi-query strings.) + Also, psql only prints the result of the last SQL command in the string. This is different from the behavior when the same string is read from a file or fed to psql's standard input, @@ -133,7 +135,7 @@ echo '\x \\ SELECT * FROM foo;' | psql each SQL command separately. - Because of this behavior, putting more than one command in a + Because of this behavior, putting more than one SQL command in a single string often has unexpected results. It's better to use repeated commands or feed multiple commands to psql's standard input, @@ -150,8 +152,8 @@ EOF - - + + Specifies the name of the database to connect to. This is @@ -165,14 +167,14 @@ EOF (postgresql:// or postgres://), it is treated as a conninfo string. See for more information. + linkend="libpq-connstring"/> for more information. - - + + Copy all SQL commands sent to the server to standard output as well. @@ -184,21 +186,21 @@ EOF - - + + Echo the actual queries generated by \d and other backslash commands. You can use this to study psql's internal operations. This is equivalent to - setting the variable ECHO_HIDDEN to on. + setting the variable ECHO_HIDDEN to on. - - + + Read commands from the @@ -217,7 +219,7 @@ EOF If filename is - (hyphen), then standard input is read until an EOF indication - or \q meta-command. This can be used to intersperse + or \q meta-command. This can be used to intersperse interactive input with input from files. Note however that Readline is not used in this case (much as if had been specified). @@ -239,8 +241,8 @@ EOF - - + + Use separator as the @@ -251,8 +253,8 @@ EOF - - + + Specifies the host name of the machine on which the @@ -264,8 +266,8 @@ EOF - - + + Turn on HTML tabular output. This is @@ -276,20 +278,28 @@ EOF - - + + List all available databases, then exit. Other non-connection options are ignored. This is similar to the meta-command \list. + + + When this option is used, psql will connect + to the database postgres, unless a different database + is named on the command line (option or non-option + argument, possibly via a service entry, but not via an environment + variable). + - - + + Write all query output into file - - + + Do not use Readline for line editing and do @@ -312,8 +322,8 @@ EOF - - + + Put all query output into file - - + + Specifies the TCP port or the local Unix-domain @@ -338,8 +348,8 @@ EOF - - + + Specifies printing options, in the style of @@ -352,8 +362,8 @@ EOF - - + + Specifies that psql should do its work @@ -361,14 +371,14 @@ EOF informational output. If this option is used, none of this happens. This is useful with the option. This is equivalent to setting the variable QUIET - to on. + to on. - - + + Use separator as the @@ -379,8 +389,8 @@ EOF - - + + Run in single-step mode. That means the user is prompted before @@ -391,8 +401,8 @@ EOF - - + + Runs in single-line mode where a newline terminates an SQL command, as a @@ -411,8 +421,8 @@ EOF - - + + Turn off printing of column names and result row count footers, @@ -423,8 +433,8 @@ EOF - - + + Specifies options to be placed within the @@ -435,8 +445,8 @@ EOF - - + + Connect to the database as the user - - - + + + Perform a variable assignment, like the \set @@ -464,8 +474,8 @@ EOF - - + + Print the psql version and exit. @@ -474,8 +484,8 @@ EOF - - + + Never issue a password prompt. If the server requires password @@ -494,8 +504,8 @@ EOF - - + + Force psql to prompt for a @@ -507,7 +517,7 @@ EOF will automatically prompt for a password if the server demands password authentication. However, psql will waste a connection attempt finding out that the server wants a - password. In some cases it is worth typing to avoid the extra connection attempt. @@ -520,8 +530,8 @@ EOF - - + + Turn on the expanded table formatting mode. This is equivalent to @@ -531,8 +541,8 @@ EOF - - + + Do not read the start-up file (neither the system-wide @@ -548,7 +558,7 @@ EOF Set the field separator for unaligned output to a zero byte. This is - equvalent to \pset fieldsep_zero. + equivalent to \pset fieldsep_zero. @@ -572,8 +582,8 @@ EOF This option can only be used in combination with one or more and/or options. It causes - psql to issue a BEGIN command - before the first such option and a COMMIT command after + psql to issue a BEGIN command + before the first such option and a COMMIT command after the last one, thereby wrapping all the commands into a single transaction. This ensures that either all the commands complete successfully, or no changes are applied. @@ -581,8 +591,8 @@ EOF If the commands themselves - contain BEGIN, COMMIT, - or ROLLBACK, this option will not have the desired + contain BEGIN, COMMIT, + or ROLLBACK, this option will not have the desired effects. Also, if an individual command cannot be executed inside a transaction block, specifying this option will cause the whole transaction to fail. @@ -591,17 +601,17 @@ EOF - - + + Show help about psql and exit. The optional - topic parameter (defaulting + topic parameter (defaulting to options) selects which part of psql is - explained: commands describes psql's - backslash commands; options describes the command-line - options that can be passed to psql; - and variables shows help about psql configuration + explained: commands describes psql's + backslash commands; options describes the command-line + options that can be passed to psql; + and variables shows help about psql configuration variables. @@ -627,7 +637,7 @@ EOF Usage - + Connecting to a Database @@ -642,8 +652,8 @@ EOF not belong to any option it will be interpreted as the database name (or the user name, if the database name is already given). Not all of these options are required; there are useful defaults. If you omit the host - name, psql will connect via a Unix-domain socket - to a server on the local host, or via TCP/IP to localhost on + name, psql will connect via a Unix-domain socket + to a server on the local host, or via TCP/IP to localhost on machines that don't have Unix-domain sockets. The default port number is determined at compile time. Since the database server uses the same default, you will not have @@ -660,9 +670,9 @@ EOF PGDATABASE, PGHOST, PGPORT and/or PGUSER to appropriate values. (For additional environment variables, see .) It is also convenient to have a - ~/.pgpass file to avoid regularly having to type in - passwords. See for more information. + linkend="libpq-envars"/>.) It is also convenient to have a + ~/.pgpass file to avoid regularly having to type in + passwords. See for more information. @@ -676,8 +686,8 @@ $ psql "service=myservice sslmode=require" $ psql postgresql://dbmaster:5433/mydb?sslmode=require This way you can also use LDAP for connection - parameter lookup as described in . - See for more information on all the + parameter lookup as described in . + See for more information on all the available connection options. @@ -699,7 +709,7 @@ $ psql postgresql://dbmaster:5433/mydb?sslmode=require - + Entering SQL Commands @@ -725,11 +735,23 @@ testdb=> of the command are displayed on the screen. + + If untrusted users have access to a database that has not adopted a + secure schema usage pattern, + begin your session by removing publicly-writable schemas + from search_path. One can + add options=-csearch_path= to the connection string or + issue SELECT pg_catalog.set_config('search_path', '', + false) before other SQL commands. This consideration is not + specific to psql; it applies to every interface + for executing arbitrary SQL commands. + + Whenever a command is executed, psql also polls for asynchronous notification events generated by - and - . + and + . @@ -739,7 +761,7 @@ testdb=> - + Meta-Commands @@ -775,12 +797,16 @@ testdb=> If an unquoted colon (:) followed by a - psql variable name appears within an argument, it is + psql variable name appears within an argument, it is replaced by the variable's value, as described in . - The forms :'variable_name' and - :"variable_name" described there + linkend="app-psql-interpolation" endterm="app-psql-interpolation-title"/>. + The forms :'variable_name' and + :"variable_name" described there work as well. + The :{?variable_name} syntax allows + testing whether a variable is defined. It is substituted by + TRUE or FALSE. + Escaping the colon with a backslash protects it from substitution. @@ -789,15 +815,15 @@ testdb=> shell. The output of the command (with any trailing newline removed) replaces the backquoted text. Within the text enclosed in backquotes, no special quoting or other processing occurs, except that appearances - of :variable_name where - variable_name is a psql variable name + of :variable_name where + variable_name is a psql variable name are replaced by the variable's value. Also, appearances of - :'variable_name' are replaced by the + :'variable_name' are replaced by the variable's value suitably quoted to become a single shell command argument. (The latter form is almost always preferable, unless you are very sure of what is in the variable.) Because carriage return and line feed characters cannot be safely quoted on all platforms, the - :'variable_name' form prints an + :'variable_name' form prints an error message and does not substitute the variable value when such characters appear in the value. @@ -806,13 +832,13 @@ testdb=> Some commands take an SQL identifier (such as a table name) as argument. These arguments follow the syntax rules of SQL: Unquoted letters are forced to - lowercase, while double quotes (") protect letters + lowercase, while double quotes (") protect letters from case conversion and allow incorporation of whitespace into the identifier. Within double quotes, paired double quotes reduce to a single double quote in the resulting name. For example, - FOO"BAR"BAZ is interpreted as fooBARbaz, - and "A weird"" name" becomes A weird" - name. + FOO"BAR"BAZ is interpreted as fooBARbaz, + and "A weird"" name" becomes A weird" + name. @@ -828,7 +854,7 @@ testdb=> - Many of the meta-commands act on the current query buffer. + Many of the meta-commands act on the current query buffer. This is simply a buffer holding whatever SQL command text has been typed but not yet sent to the server for execution. This will include previous input lines as well as any text appearing before the meta-command on the @@ -855,18 +881,18 @@ testdb=> \c or \connect [ -reuse-previous=on|off ] [ dbname [ username ] [ host ] [ port ] | conninfo ] - Establishes a new connection to a PostgreSQL + Establishes a new connection to a PostgreSQL server. The connection parameters to use can be specified either - using a positional syntax, or using conninfo connection - strings as detailed in . + using a positional syntax, or using conninfo connection + strings as detailed in . Where the command omits database name, user, host, or port, the new connection can reuse values from the previous connection. By default, values from the previous connection are reused except when processing - a conninfo string. Passing a first argument - of -reuse-previous=on + a conninfo string. Passing a first argument + of -reuse-previous=on or -reuse-previous=off overrides that default. When the command neither specifies nor reuses a particular parameter, the libpq default is used. Specifying any @@ -943,7 +969,7 @@ testdb=> - + \copy { table [ ( column_list ) ] | ( query ) } { from | to } { 'filename' | program 'command' | stdin | stdout | pstdin | pstdout } @@ -952,7 +978,7 @@ testdb=> Performs a frontend (client) copy. This is an operation that - runs an SQL + runs an SQL command, but instead of the server reading or writing the specified file, psql reads or writes the file and @@ -963,7 +989,7 @@ testdb=> - When program is specified, + When program is specified, command is executed by psql and the data passed from or to command is @@ -974,27 +1000,27 @@ testdb=> - For \copy ... from stdin, data rows are read from the same + For \copy ... from stdin, data rows are read from the same source that issued the command, continuing until \. - is read or the stream reaches EOF. This option is useful + is read or the stream reaches EOF. This option is useful for populating tables in-line within a SQL script file. - For \copy ... to stdout, output is sent to the same place - as psql command output, and - the COPY count command status is + For \copy ... to stdout, output is sent to the same place + as psql command output, and + the COPY count command status is not printed (since it might be confused with a data row). To read/write psql's standard input or - output regardless of the current command source or \o - option, write from pstdin or to pstdout. + output regardless of the current command source or \o + option, write from pstdin or to pstdout. The syntax of this command is similar to that of the - SQL + SQL command. All options other than the data source/destination are - as specified for . - Because of this, special parsing rules apply to the \copy + as specified for . + Because of this, special parsing rules apply to the \copy meta-command. Unlike most other meta-commands, the entire remainder - of the line is always taken to be the arguments of \copy, + of the line is always taken to be the arguments of \copy, and neither variable interpolation nor backquote expansion are performed in the arguments. @@ -1022,7 +1048,7 @@ testdb=> - + \crosstabview [ colV [ colH @@ -1034,7 +1060,7 @@ testdb=> Executes the current query buffer (like \g) and shows the results in a crosstab grid. The query must return at least three columns. - The output column identified by colV + The output column identified by colV becomes a vertical header and the output column identified by colH becomes a horizontal header. @@ -1062,7 +1088,7 @@ testdb=> The vertical header, displayed as the leftmost column, contains the - values found in column colV, in the + values found in column colV, in the same order as in the query results, but with duplicates removed. @@ -1071,11 +1097,11 @@ testdb=> found in column colH, with duplicates removed. By default, these appear in the same order as in the query results. But if the - optional sortcolH argument is given, + optional sortcolH argument is given, it identifies a column whose values must be integer numbers, and the values from colH will appear in the horizontal header sorted according to the - corresponding sortcolH values. + corresponding sortcolH values. @@ -1088,7 +1114,7 @@ testdb=> the value of colH is x and the value of colV - is y. If there is no such row, the cell is empty. If + is y. If there is no such row, the cell is empty. If there are multiple such rows, an error is reported. @@ -1096,7 +1122,7 @@ testdb=> - \d[S+] [ pattern ] + \d[S+] [ pattern ] @@ -1109,13 +1135,13 @@ testdb=> Associated indexes, constraints, rules, and triggers are also shown. For foreign tables, the associated foreign server is shown as well. - (Matching the pattern is defined in - + (Matching the pattern is defined in + below.) - For some types of relation, \d shows additional information + For some types of relation, \d shows additional information for each column: column values for sequences, indexed expressions for indexes, and foreign data wrapper options for foreign tables. @@ -1125,7 +1151,7 @@ testdb=> more information is displayed: any comments associated with the columns of the table are shown, as is the presence of OIDs in the table, the view definition if the relation is a view, a non-default - replica + replica identity setting. @@ -1149,7 +1175,7 @@ testdb=> - \da[S] [ pattern ] + \da[S] [ pattern ] @@ -1165,7 +1191,7 @@ testdb=> - \dA[+] [ pattern ] + \dA[+] [ pattern ] @@ -1179,7 +1205,7 @@ testdb=> - \db[+] [ pattern ] + \db[+] [ pattern ] @@ -1195,7 +1221,7 @@ testdb=> - \dc[S+] [ pattern ] + \dc[S+] [ pattern ] Lists conversions between character-set encodings. @@ -1213,7 +1239,7 @@ testdb=> - \dC[+] [ pattern ] + \dC[+] [ pattern ] Lists type casts. @@ -1228,12 +1254,12 @@ testdb=> - \dd[S] [ pattern ] + \dd[S] [ pattern ] - Shows the descriptions of objects of type constraint, - operator class, operator family, - rule, and trigger. All + Shows the descriptions of objects of type constraint, + operator class, operator family, + rule, and trigger. All other comments may be viewed by the respective backslash commands for those object types. @@ -1249,7 +1275,7 @@ testdb=> Descriptions for objects can be created with the + linkend="sql-comment"/> SQL command. @@ -1257,7 +1283,7 @@ testdb=> - \dD[S+] [ pattern ] + \dD[S+] [ pattern ] Lists domains. If - \ddp [ pattern ] + \ddp [ pattern ] Lists default access privilege settings. An entry is shown for @@ -1286,22 +1312,22 @@ testdb=> - The command is used to set + The command is used to set default access privileges. The meaning of the privilege display is explained under - . + . - \dE[S+] [ pattern ] - \di[S+] [ pattern ] - \dm[S+] [ pattern ] - \ds[S+] [ pattern ] - \dt[S+] [ pattern ] - \dv[S+] [ pattern ] + \dE[S+] [ pattern ] + \di[S+] [ pattern ] + \dm[S+] [ pattern ] + \ds[S+] [ pattern ] + \dt[S+] [ pattern ] + \dv[S+] [ pattern ] @@ -1312,7 +1338,7 @@ testdb=> respectively. You can specify any or all of these letters, in any order, to obtain a listing of objects - of these types. For example, \dit lists indexes + of these types. For example, \dit lists indexes and tables. If + is appended to the command name, each object is listed with its physical size on disk and its associated description, if any. @@ -1327,7 +1353,7 @@ testdb=> - \des[+] [ pattern ] + \des[+] [ pattern ] Lists foreign servers (mnemonic: external @@ -1343,7 +1369,7 @@ testdb=> - \det[+] [ pattern ] + \det[+] [ pattern ] Lists foreign tables (mnemonic: external tables). @@ -1358,7 +1384,7 @@ testdb=> - \deu[+] [ pattern ] + \deu[+] [ pattern ] Lists user mappings (mnemonic: external @@ -1381,7 +1407,7 @@ testdb=> - \dew[+] [ pattern ] + \dew[+] [ pattern ] Lists foreign-data wrappers (mnemonic: external @@ -1397,16 +1423,16 @@ testdb=> - \df[antwS+] [ pattern ] + \df[anptwS+] [ pattern ] Lists functions, together with their result data types, argument data - types, and function types, which are classified as agg - (aggregate), normal, trigger, or window. + types, and function types, which are classified as agg + (aggregate), normal, procedure, trigger, or window. To display only functions - of specific type(s), add the corresponding letters a, - n, t, or w to the command. + of specific type(s), add the corresponding letters a, + n, p, t, or w to the command. If pattern is specified, only functions whose names match the pattern are shown. @@ -1423,7 +1449,7 @@ testdb=> To look up functions taking arguments or returning values of a specific data type, use your pager's search capability to scroll through the - \df output. + \df output. @@ -1431,7 +1457,7 @@ testdb=> - \dF[+] [ pattern ] + \dF[+] [ pattern ] Lists text search configurations. @@ -1445,7 +1471,7 @@ testdb=> - \dFd[+] [ pattern ] + \dFd[+] [ pattern ] Lists text search dictionaries. @@ -1459,7 +1485,7 @@ testdb=> - \dFp[+] [ pattern ] + \dFp[+] [ pattern ] Lists text search parsers. @@ -1473,7 +1499,7 @@ testdb=> - \dFt[+] [ pattern ] + \dFt[+] [ pattern ] Lists text search templates. @@ -1487,12 +1513,12 @@ testdb=> - \dg[S+] [ pattern ] + \dg[S+] [ pattern ] Lists database roles. - (Since the concepts of users and groups have been - unified into roles, this command is now equivalent to + (Since the concepts of users and groups have been + unified into roles, this command is now equivalent to \du.) By default, only user-created roles are shown; supply the S modifier to include system roles. @@ -1517,7 +1543,7 @@ testdb=> - \dL[S+] [ pattern ] + \dL[S+] [ pattern ] Lists procedural languages. If - \dn[S+] [ pattern ] + \dn[S+] [ pattern ] @@ -1551,7 +1577,7 @@ testdb=> - \do[S+] [ pattern ] + \do[S+] [ pattern ] Lists operators with their operand and result types. @@ -1569,7 +1595,7 @@ testdb=> - \dO[S+] [ pattern ] + \dO[S+] [ pattern ] Lists collations. @@ -1589,7 +1615,7 @@ testdb=> - \dp [ pattern ] + \dp [ pattern ] Lists tables, views and sequences with their @@ -1600,17 +1626,17 @@ testdb=> - The and - + The and + commands are used to set access privileges. The meaning of the privilege display is explained under - . + . - \drds [ role-pattern [ database-pattern ] ] + \drds [ role-pattern [ database-pattern ] ] Lists defined configuration settings. These settings can be @@ -1618,13 +1644,13 @@ testdb=> role-pattern and database-pattern are used to select specific roles and databases to list, respectively. If omitted, or if - * is specified, all settings are listed, including those + * is specified, all settings are listed, including those not role-specific or database-specific, respectively. - The and - + The and + commands are used to define per-role and per-database configuration settings. @@ -1632,7 +1658,7 @@ testdb=> - \dRp[+] [ pattern ] + \dRp[+] [ pattern ] Lists replication publications. @@ -1646,7 +1672,7 @@ testdb=> - \dRs[+] [ pattern ] + \dRs[+] [ pattern ] Lists replication subscriptions. @@ -1660,7 +1686,7 @@ testdb=> - \dT[S+] [ pattern ] + \dT[S+] [ pattern ] Lists data types. @@ -1668,7 +1694,7 @@ testdb=> specified, only types whose names match the pattern are listed. If + is appended to the command name, each type is listed with its internal name and size, its allowed values - if it is an enum type, and its associated permissions. + if it is an enum type, and its associated permissions. By default, only user-created objects are shown; supply a pattern or the S modifier to include system objects. @@ -1677,12 +1703,12 @@ testdb=> - \du[S+] [ pattern ] + \du[S+] [ pattern ] Lists database roles. - (Since the concepts of users and groups have been - unified into roles, this command is now equivalent to + (Since the concepts of users and groups have been + unified into roles, this command is now equivalent to \dg.) By default, only user-created roles are shown; supply the S modifier to include system roles. @@ -1696,7 +1722,7 @@ testdb=> - \dx[+] [ pattern ] + \dx[+] [ pattern ] Lists installed extensions. @@ -1710,7 +1736,7 @@ testdb=> - \dy[+] [ pattern ] + \dy[+] [ pattern ] Lists event triggers. @@ -1724,7 +1750,7 @@ testdb=> - \e or \edit filename line_number + \e or \edit filename line_number @@ -1744,8 +1770,8 @@ testdb=> whole buffer as a single line. Any complete queries are immediately executed; that is, if the query buffer contains or ends with a semicolon, everything up to that point is executed. Whatever remains - will wait in the query buffer; type semicolon or \g to - send it, or \r to cancel it by clearing the query buffer. + will wait in the query buffer; type semicolon or \g to + send it, or \r to cancel it by clearing the query buffer. Treating the buffer as a single line primarily affects meta-commands: whatever is in the buffer after a meta-command will be taken as argument(s) to the meta-command, even if it spans multiple lines. @@ -1764,7 +1790,7 @@ testdb=> See under for how to configure and + endterm="app-psql-environment-title"/> for how to configure and customize your editor. @@ -1797,27 +1823,28 @@ Tue Oct 26 21:40:57 CEST 1999 - \ef function_description line_number + \ef function_description line_number - This command fetches and edits the definition of the named function, - in the form of a CREATE OR REPLACE FUNCTION command. - Editing is done in the same way as for \edit. + This command fetches and edits the definition of the named function or procedure, + in the form of a CREATE OR REPLACE FUNCTION or + CREATE OR REPLACE PROCEDURE command. + Editing is done in the same way as for \edit. After the editor exits, the updated command waits in the query buffer; - type semicolon or \g to send it, or \r + type semicolon or \g to send it, or \r to cancel. The target function can be specified by name alone, or by name - and arguments, for example foo(integer, text). + and arguments, for example foo(integer, text). The argument types must be given if there is more than one function of the same name. - If no function is specified, a blank CREATE FUNCTION + If no function is specified, a blank CREATE FUNCTION template is presented for editing. @@ -1830,7 +1857,7 @@ Tue Oct 26 21:40:57 CEST 1999 Unlike most other meta-commands, the entire remainder of the line is - always taken to be the argument(s) of \ef, and neither + always taken to be the argument(s) of \ef, and neither variable interpolation nor backquote expansion are performed in the arguments. @@ -1838,7 +1865,7 @@ Tue Oct 26 21:40:57 CEST 1999 See under for how to configure and + endterm="app-psql-environment-title"/> for how to configure and customize your editor. @@ -1865,28 +1892,28 @@ Tue Oct 26 21:40:57 CEST 1999 Repeats the most recent server error message at maximum verbosity, as though VERBOSITY were set - to verbose and SHOW_CONTEXT were - set to always. + to verbose and SHOW_CONTEXT were + set to always. - \ev view_name line_number + \ev view_name line_number This command fetches and edits the definition of the named view, - in the form of a CREATE OR REPLACE VIEW command. - Editing is done in the same way as for \edit. + in the form of a CREATE OR REPLACE VIEW command. + Editing is done in the same way as for \edit. After the editor exits, the updated command waits in the query buffer; - type semicolon or \g to send it, or \r + type semicolon or \g to send it, or \r to cancel. - If no view is specified, a blank CREATE VIEW + If no view is specified, a blank CREATE VIEW template is presented for editing. @@ -1897,7 +1924,7 @@ Tue Oct 26 21:40:57 CEST 1999 Unlike most other meta-commands, the entire remainder of the line is - always taken to be the argument(s) of \ev, and neither + always taken to be the argument(s) of \ev, and neither variable interpolation nor backquote expansion are performed in the arguments. @@ -1938,7 +1965,7 @@ Tue Oct 26 21:40:57 CEST 1999 alternative to the \o command. - If the argument begins with |, then the entire remainder + If the argument begins with |, then the entire remainder of the line is taken to be the command to execute, and neither variable interpolation nor backquote expansion are @@ -1949,6 +1976,25 @@ Tue Oct 26 21:40:57 CEST 1999 + + \gdesc + + + + Shows the description (that is, the column names and data types) + of the result of the current query buffer. The query is not + actually executed; however, if it contains some type of syntax + error, that error will be reported in the normal way. + + + + If the current query buffer is empty, the most recently sent query + is described instead. + + + + + \gexec @@ -1957,13 +2003,13 @@ Tue Oct 26 21:40:57 CEST 1999 Sends the current query buffer to the server, then treats each column of each row of the query's output (if any) as a SQL statement to be executed. For example, to create an index on each - column of my_table: + column of my_table: -=> SELECT format('create index on my_table(%I)', attname) --> FROM pg_attribute --> WHERE attrelid = 'my_table'::regclass AND attnum > 0 --> ORDER BY attnum --> \gexec +=> SELECT format('create index on my_table(%I)', attname) +-> FROM pg_attribute +-> WHERE attrelid = 'my_table'::regclass AND attnum > 0 +-> ORDER BY attnum +-> \gexec CREATE INDEX CREATE INDEX CREATE INDEX @@ -1976,14 +2022,14 @@ CREATE INDEX are returned, and left-to-right within each row if there is more than one column. NULL fields are ignored. The generated queries are sent literally to the server for processing, so they cannot be - psql meta-commands nor contain psql + psql meta-commands nor contain psql variable references. If any individual query fails, execution of the remaining queries continues unless ON_ERROR_STOP is set. Execution of each query is subject to ECHO processing. (Setting ECHO to all or queries is often advisable when - using \gexec.) Query logging, single-step mode, + using \gexec.) Query logging, single-step mode, timing, and other query execution features apply to each generated query as well. @@ -2001,8 +2047,8 @@ CREATE INDEX Sends the current query buffer to the server and stores the - query's output into psql variables (see ). + query's output into psql variables (see ). The query to be executed must return exactly one row. Each column of the row is stored into a separate variable, named the same as the column. For example: @@ -2067,7 +2113,7 @@ hello 10 Unlike most other meta-commands, the entire remainder of the line is - always taken to be the argument(s) of \help, and neither + always taken to be the argument(s) of \help, and neither variable interpolation nor backquote expansion are performed in the arguments. @@ -2108,7 +2154,7 @@ hello 10 If filename is - (hyphen), then standard input is read until an EOF indication - or \q meta-command. This can be used to intersperse + or \q meta-command. This can be used to intersperse interactive input with input from files. Note that Readline behavior will be used only if it is active at the outermost level. @@ -2123,7 +2169,7 @@ hello 10 - + \if expression \elif expression \else @@ -2183,7 +2229,7 @@ hello 10 the same source file. If EOF is reached on the main input file or an \include-ed file before all local \if-blocks have been closed, - then psql will raise an error. + then psql will raise an error. Here is an example: @@ -2216,7 +2262,7 @@ SELECT \ir or \include_relative filename - The \ir command is similar to \i, but resolves + The \ir command is similar to \i, but resolves relative file names differently. When executing in interactive mode, the two commands behave identically. However, when invoked from a script, \ir interprets file names relative to the @@ -2228,7 +2274,7 @@ SELECT - \l[+] or \list[+] [ pattern ] + \l[+] or \list[+] [ pattern ] List the databases in the server and show their names, owners, @@ -2341,7 +2387,7 @@ lo_import 152801 - If the argument begins with |, then the entire remainder + If the argument begins with |, then the entire remainder of the line is taken to be the command to execute, and neither variable interpolation nor backquote expansion are @@ -2384,7 +2430,7 @@ lo_import 152801 Changes the password of the specified user (by default, the current user). This command prompts for the new password, encrypts it, and - sends it to the server as an ALTER ROLE command. This + sends it to the server as an ALTER ROLE command. This makes sure that the new password does not appear in cleartext in the command history, the server log, or elsewhere. @@ -2396,16 +2442,16 @@ lo_import 152801 Prompts the user to supply text, which is assigned to the variable - name. + name. An optional prompt string, text, can be specified. (For multiword + class="parameter">text, can be specified. (For multiword prompts, surround the text with single quotes.) - By default, \prompt uses the terminal for input and - output. However, if the @@ -2459,16 +2505,16 @@ lo_import 152801 columns - Sets the target width for the wrapped format, and also + Sets the target width for the wrapped format, and also the width limit for determining whether output is wide enough to require the pager or switch to the vertical display in expanded auto mode. Zero (the default) causes the target width to be controlled by the - environment variable COLUMNS, or the detected screen width - if COLUMNS is not set. - In addition, if columns is zero then the - wrapped format only affects screen output. - If columns is nonzero then file and pipe output is + environment variable COLUMNS, or the detected screen width + if COLUMNS is not set. + In addition, if columns is zero then the + wrapped format only affects screen output. + If columns is nonzero then file and pipe output is wrapped to that width as well. @@ -2527,7 +2573,7 @@ lo_import 152801 If value is specified it must be either on or off which will enable or disable display of the table footer - (the (n rows) count). + (the (n rows) count). If value is omitted the command toggles footer display on or off. @@ -2538,17 +2584,16 @@ lo_import 152801 format - Sets the output format to one of unaligned, - aligned, wrapped, - html, asciidoc, + Sets the output format to one of aligned, + asciidoc, html, latex (uses tabular), - latex-longtable, or - troff-ms. + latex-longtable, troff-ms, + unaligned, or wrapped. Unique abbreviations are allowed. (That would mean one letter is enough.) - unaligned format writes all columns of a row on one + unaligned format writes all columns of a row on one line, separated by the currently active field separator. This is useful for creating output that might be intended to be read in by other programs (for example, tab-separated or comma-separated @@ -2559,26 +2604,26 @@ lo_import 152801 nicely formatted text output; this is the default. - wrapped format is like aligned but wraps + wrapped format is like aligned but wraps wide data values across lines to make the output fit in the target column width. The target width is determined as described under - the columns option. Note that psql will + the columns option. Note that psql will not attempt to wrap column header titles; therefore, - wrapped format behaves the same as aligned + wrapped format behaves the same as aligned if the total width needed for column headers exceeds the target. - The html, asciidoc, latex, - latex-longtable, and troff-ms - formats put out tables that are intended to - be included in documents using the respective mark-up + The asciidoc, html, + latex, latex-longtable, and + troff-ms formats put out tables that are intended + to be included in documents using the respective mark-up language. They are not complete documents! This might not be necessary in HTML, but in LaTeX you must have a complete document wrapper. latex-longtable also requires the LaTeX - longtable and booktabs packages. + longtable and booktabs packages. @@ -2592,9 +2637,9 @@ lo_import 152801 or unicode. Unique abbreviations are allowed. (That would mean one letter is enough.) - The default setting is ascii. - This option only affects the aligned and - wrapped output formats. + The default setting is ascii. + This option only affects the aligned and + wrapped output formats. ascii style uses plain ASCII @@ -2602,17 +2647,17 @@ lo_import 152801 a + symbol in the right-hand margin. When the wrapped format wraps data from one line to the next without a newline character, a dot - (.) is shown in the right-hand margin of the first line, + (.) is shown in the right-hand margin of the first line, and again in the left-hand margin of the following line. - old-ascii style uses plain ASCII + old-ascii style uses plain ASCII characters, using the formatting style used in PostgreSQL 8.4 and earlier. Newlines in data are shown using a : symbol in place of the left-hand column separator. When the data is wrapped from one line - to the next without a newline character, a ; + to the next without a newline character, a ; symbol is used in place of the left-hand column separator. @@ -2625,7 +2670,7 @@ lo_import 152801 - When the border setting is greater than zero, + When the border setting is greater than zero, the linestyle option also determines the characters with which the border lines are drawn. Plain ASCII characters work everywhere, but @@ -2664,21 +2709,21 @@ lo_import 152801 pager - Controls use of a pager program for query and psql - help output. If the environment variable PAGER - is set, the output is piped to the specified program. - Otherwise a platform-dependent default (such as - more) is used. + Controls use of a pager program for query and psql + help output. If the environment variable PSQL_PAGER + or PAGER is set, the output is piped to the + specified program. Otherwise a platform-dependent default program + (such as more) is used. - When the pager option is off, the pager - program is not used. When the pager option is - on, the pager is used when appropriate, i.e., when the + When the pager option is off, the pager + program is not used. When the pager option is + on, the pager is used when appropriate, i.e., when the output is to a terminal and will not fit on the screen. - The pager option can also be set to always, + The pager option can also be set to always, which causes the pager to be used for all terminal output regardless - of whether it fits on the screen. \pset pager + of whether it fits on the screen. \pset pager without a value toggles pager use on and off. @@ -2689,7 +2734,7 @@ lo_import 152801 pager_min_lines - If pager_min_lines is set to a number greater than the + If pager_min_lines is set to a number greater than the page height, the pager program will not be called unless there are at least this many lines of output to show. The default setting is 0. @@ -2735,7 +2780,7 @@ lo_import 152801 In latex-longtable format, this controls the proportional width of each column containing a left-aligned data type. It is specified as a whitespace-separated list of values, - e.g. '0.2 0.2 0.6'. Unspecified output columns + e.g. '0.2 0.2 0.6'. Unspecified output columns use the last specified value. @@ -2806,8 +2851,8 @@ lo_import 152801 Illustrations of how these different formats look can be seen in - the section. + the section. @@ -2877,7 +2922,7 @@ lo_import 152801 - Sets the psql variable psql variable name to value, or if more than one value is given, to the concatenation of all of them. If only one @@ -2885,15 +2930,15 @@ lo_import 152801 unset a variable, use the \unset command. - \set without any arguments displays the names and values - of all currently-set psql variables. + \set without any arguments displays the names and values + of all currently-set psql variables. Valid variable names can contain letters, digits, and underscores. See the section below for details. + linkend="app-psql-variables" + endterm="app-psql-variables-title"/> below for details. Variable names are case-sensitive. @@ -2901,14 +2946,14 @@ lo_import 152801 Certain variables are special, in that they control psql's behavior or are automatically set to reflect connection state. These variables are - documented in , below. + documented in , below. This command is unrelated to the SQL - command . + command . @@ -2933,19 +2978,20 @@ testdb=> \setenv LESS -imx4F - \sf[+] function_description + \sf[+] function_description - This command fetches and shows the definition of the named function, - in the form of a CREATE OR REPLACE FUNCTION command. + This command fetches and shows the definition of the named function or procedure, + in the form of a CREATE OR REPLACE FUNCTION or + CREATE OR REPLACE PROCEDURE command. The definition is printed to the current query output channel, as set by \o. The target function can be specified by name alone, or by name - and arguments, for example foo(integer, text). + and arguments, for example foo(integer, text). The argument types must be given if there is more than one function of the same name. @@ -2958,7 +3004,7 @@ testdb=> \setenv LESS -imx4F Unlike most other meta-commands, the entire remainder of the line is - always taken to be the argument(s) of \sf, and neither + always taken to be the argument(s) of \sf, and neither variable interpolation nor backquote expansion are performed in the arguments. @@ -2967,12 +3013,12 @@ testdb=> \setenv LESS -imx4F - \sv[+] view_name + \sv[+] view_name This command fetches and shows the definition of the named view, - in the form of a CREATE OR REPLACE VIEW command. + in the form of a CREATE OR REPLACE VIEW command. The definition is printed to the current query output channel, as set by \o. @@ -2984,7 +3030,7 @@ testdb=> \setenv LESS -imx4F Unlike most other meta-commands, the entire remainder of the line is - always taken to be the argument(s) of \sv, and neither + always taken to be the argument(s) of \sv, and neither variable interpolation nor backquote expansion are performed in the arguments. @@ -3037,16 +3083,16 @@ testdb=> \setenv LESS -imx4F - Unsets (deletes) the psql variable psql variable name. Most variables that control psql's behavior - cannot be unset; instead, an \unset command is interpreted + cannot be unset; instead, an \unset command is interpreted as setting them to their default values. - See , below. + See , below. @@ -3054,7 +3100,7 @@ testdb=> \setenv LESS -imx4F \w or \write filename - \w or \write |command + \w or \write |command Writes the current query buffer to the file \setenv LESS -imx4F - If the argument begins with |, then the entire remainder + If the argument begins with |, then the entire remainder of the line is taken to be the command to execute, and neither variable interpolation nor backquote expansion are @@ -3080,10 +3126,10 @@ testdb=> \setenv LESS -imx4F \watch [ seconds ] - Repeatedly execute the current query buffer (as \g does) + Repeatedly execute the current query buffer (as \g does) until interrupted or the query fails. Wait the specified number of seconds (default 2) between executions. Each query result is - displayed with a header that includes the \pset title + displayed with a header that includes the \pset title string (if any), the time as of query start, and the delay interval. @@ -3106,7 +3152,7 @@ testdb=> \setenv LESS -imx4F - \z [ pattern ] + \z [ pattern ] Lists tables, views and sequences with their @@ -3128,14 +3174,14 @@ testdb=> \setenv LESS -imx4F \! [ command ] - With no argument, escapes to a sub-shell; psql + With no argument, escapes to a sub-shell; psql resumes when the sub-shell exits. With an argument, executes the shell command command. Unlike most other meta-commands, the entire remainder of the line is - always taken to be the argument(s) of \!, and neither + always taken to be the argument(s) of \!, and neither variable interpolation nor backquote expansion are performed in the arguments. The rest of the line is simply passed literally to the shell. @@ -3145,26 +3191,67 @@ testdb=> \setenv LESS -imx4F - \? [ topic ] + \? [ topic ] Shows help information. The optional - topic parameter - (defaulting to commands) selects which part of psql is - explained: commands describes psql's - backslash commands; options describes the command-line - options that can be passed to psql; - and variables shows help about psql configuration + topic parameter + (defaulting to commands) selects which part of psql is + explained: commands describes psql's + backslash commands; options describes the command-line + options that can be passed to psql; + and variables shows help about psql configuration variables. + + + \; + + + Backslash-semicolon is not a meta-command in the same way as the + preceding commands; rather, it simply causes a semicolon to be + added to the query buffer without any further processing. + + + + Normally, psql will dispatch a SQL command to the + server as soon as it reaches the command-ending semicolon, even if + more input remains on the current line. Thus for example entering + +select 1; select 2; select 3; + + will result in the three SQL commands being individually sent to + the server, with each one's results being displayed before + continuing to the next command. However, a semicolon entered + as \; will not trigger command processing, so that the + command before it and the one after are effectively combined and + sent to the server in one request. So for example + +select 1\; select 2\; select 3; + + results in sending the three SQL commands to the server in a single + request, when the non-backslashed semicolon is reached. + The server executes such a request as a single transaction, + unless there are explicit BEGIN/COMMIT + commands included in the string to divide it into multiple + transactions. (See + for more details about how the server handles multi-query strings.) + psql prints only the last query result + it receives for each request; in this example, although all + three SELECTs are indeed executed, psql + only prints the 3. + + + + - - Patterns + + Patterns patterns @@ -3172,54 +3259,54 @@ testdb=> \setenv LESS -imx4F - The various \d commands accept a \d commands accept a pattern parameter to specify the object name(s) to be displayed. In the simplest case, a pattern is just the exact name of the object. The characters within a pattern are normally folded to lower case, just as in SQL names; - for example, \dt FOO will display the table named - foo. As in SQL names, placing double quotes around + for example, \dt FOO will display the table named + foo. As in SQL names, placing double quotes around a pattern stops folding to lower case. Should you need to include an actual double quote character in a pattern, write it as a pair of double quotes within a double-quote sequence; again this is in accord with the rules for SQL quoted identifiers. For example, - \dt "FOO""BAR" will display the table named - FOO"BAR (not foo"bar). Unlike the normal + \dt "FOO""BAR" will display the table named + FOO"BAR (not foo"bar). Unlike the normal rules for SQL names, you can put double quotes around just part - of a pattern, for instance \dt FOO"FOO"BAR will display - the table named fooFOObar. + of a pattern, for instance \dt FOO"FOO"BAR will display + the table named fooFOObar. Whenever the pattern parameter - is omitted completely, the \d commands display all objects + is omitted completely, the \d commands display all objects that are visible in the current schema search path — this is - equivalent to using * as the pattern. - (An object is said to be visible if its + equivalent to using * as the pattern. + (An object is said to be visible if its containing schema is in the search path and no object of the same kind and name appears earlier in the search path. This is equivalent to the statement that the object can be referenced by name without explicit schema qualification.) To see all objects in the database regardless of visibility, - use *.* as the pattern. + use *.* as the pattern. - Within a pattern, * matches any sequence of characters - (including no characters) and ? matches any single character. + Within a pattern, * matches any sequence of characters + (including no characters) and ? matches any single character. (This notation is comparable to Unix shell file name patterns.) - For example, \dt int* displays tables whose names - begin with int. But within double quotes, * - and ? lose these special meanings and are just matched + For example, \dt int* displays tables whose names + begin with int. But within double quotes, * + and ? lose these special meanings and are just matched literally. - A pattern that contains a dot (.) is interpreted as a schema + A pattern that contains a dot (.) is interpreted as a schema name pattern followed by an object name pattern. For example, - \dt foo*.*bar* displays all tables whose table name - includes bar that are in schemas whose schema name - starts with foo. When no dot appears, then the pattern + \dt foo*.*bar* displays all tables whose table name + includes bar that are in schemas whose schema name + starts with foo. When no dot appears, then the pattern matches only objects that are visible in the current schema search path. Again, a dot within double quotes loses its special meaning and is matched literally. @@ -3227,28 +3314,28 @@ testdb=> \setenv LESS -imx4F Advanced users can use regular-expression notations such as character - classes, for example [0-9] to match any digit. All regular + classes, for example [0-9] to match any digit. All regular expression special characters work as specified in - , except for . which - is taken as a separator as mentioned above, * which is - translated to the regular-expression notation .*, - ? which is translated to ., and - $ which is matched literally. You can emulate + , except for . which + is taken as a separator as mentioned above, * which is + translated to the regular-expression notation .*, + ? which is translated to ., and + $ which is matched literally. You can emulate these pattern characters at need by writing - ? for ., + ? for ., (R+|) for R*, or (R|) for R?. - $ is not needed as a regular-expression character since + $ is not needed as a regular-expression character since the pattern must match the whole name, unlike the usual - interpretation of regular expressions (in other words, $ - is automatically appended to your pattern). Write * at the + interpretation of regular expressions (in other words, $ + is automatically appended to your pattern). Write * at the beginning and/or end if you don't wish the pattern to be anchored. Note that within double quotes, all regular expression special characters lose their special meanings and are matched literally. Also, the regular expression special characters are matched literally in operator name - patterns (i.e., the argument of \do). + patterns (i.e., the argument of \do). @@ -3256,8 +3343,8 @@ testdb=> \setenv LESS -imx4F Advanced Features - - Variables + + Variables psql provides variable substitution @@ -3281,8 +3368,8 @@ testdb=> \echo :foo bar This works in both regular SQL commands and meta-commands; there is - more detail in , below. + more detail in , below. @@ -3321,14 +3408,14 @@ bar Variables that control psql's behavior - generally cannot be unset or set to invalid values. An \unset + generally cannot be unset or set to invalid values. An \unset command is allowed but is interpreted as setting the variable to its - default value. A \set command without a second argument is - interpreted as setting the variable to on, for control + default value. A \set command without a second argument is + interpreted as setting the variable to on, for control variables that accept that value, and is rejected for others. Also, - control variables that accept the values on - and off will also accept other common spellings of Boolean - values, such as true and false. + control variables that accept the values on + and off will also accept other common spellings of Boolean + values, such as true and false. @@ -3346,23 +3433,23 @@ bar - When on (the default), each SQL command is automatically + When on (the default), each SQL command is automatically committed upon successful completion. To postpone commit in this - mode, you must enter a BEGIN or START - TRANSACTION SQL command. When off or unset, SQL + mode, you must enter a BEGIN or START + TRANSACTION SQL command. When off or unset, SQL commands are not committed until you explicitly issue - COMMIT or END. The autocommit-off - mode works by issuing an implicit BEGIN for you, just + COMMIT or END. The autocommit-off + mode works by issuing an implicit BEGIN for you, just before any command that is not already in a transaction block and - is not itself a BEGIN or other transaction-control + is not itself a BEGIN or other transaction-control command, nor a command that cannot be executed inside a transaction - block (such as VACUUM). + block (such as VACUUM). In autocommit-off mode, you must explicitly abandon any failed - transaction by entering ABORT or ROLLBACK. + transaction by entering ABORT or ROLLBACK. Also keep in mind that if you exit the session without committing, your work will be lost. @@ -3370,7 +3457,7 @@ bar - The autocommit-on mode is PostgreSQL's traditional + The autocommit-on mode is PostgreSQL's traditional behavior, but autocommit-off is closer to the SQL spec. If you prefer autocommit-off, you might wish to set it in the system-wide psqlrc file or your @@ -3430,7 +3517,7 @@ bar ECHO_HIDDEN - When this variable is set to on and a backslash command + When this variable is set to on and a backslash command queries the database, the query is first shown. This feature helps you to study PostgreSQL internals and provide @@ -3438,7 +3525,7 @@ bar on program start-up, use the switch .) If you set this variable to the value noexec, the queries are just shown but are not actually sent to the server and executed. - The default value is off. + The default value is off. @@ -3450,11 +3537,21 @@ bar The current client character set encoding. This is set every time you connect to a database (including program start-up), and when you change the encoding - with \encoding, but it can be changed or unset. + with \encoding, but it can be changed or unset. + + ERROR + + + true if the last SQL query failed, false if + it succeeded. See also SQLSTATE. + + + + FETCH_COUNT @@ -3474,7 +3571,7 @@ bar Although you can use any output format with this feature, - the default aligned format tends to look bad + the default aligned format tends to look bad because each group of FETCH_COUNT rows will be formatted separately, leading to varying column widths across the row groups. The other output formats work better. @@ -3561,13 +3658,13 @@ bar IGNOREEOF - If set to 1 or less, sending an EOF character (usually - ControlD) + If set to 1 or less, sending an EOF character (usually + ControlD) to an interactive session of psql will terminate the application. If set to a larger numeric value, - that many consecutive EOF characters must be typed to + that many consecutive EOF characters must be typed to make an interactive session terminate. If the variable is set to a - non-numeric value, it is interpreted as 10. + non-numeric value, it is interpreted as 10. The default is 0. @@ -3591,6 +3688,19 @@ bar + + LAST_ERROR_MESSAGE + LAST_ERROR_SQLSTATE + + + The primary error message and associated SQLSTATE code for the most + recent failed query in the current psql session, or + an empty string and 00000 if no error has occurred in + the current session. + + + + ON_ERROR_ROLLBACK @@ -3601,14 +3711,14 @@ bar - When set to on, if a statement in a transaction block + When set to on, if a statement in a transaction block generates an error, the error is ignored and the transaction - continues. When set to interactive, such errors are only + continues. When set to interactive, such errors are only ignored in interactive sessions, and not when reading script - files. When set to off (the default), a statement in a + files. When set to off (the default), a statement in a transaction block that generates an error aborts the entire transaction. The error rollback mode works by issuing an - implicit SAVEPOINT for you, just before each command + implicit SAVEPOINT for you, just before each command that is in a transaction block, and then rolling back to the savepoint if the command fails. @@ -3620,7 +3730,7 @@ bar By default, command processing continues after an error. When this - variable is set to on, processing will instead stop + variable is set to on, processing will instead stop immediately. In interactive mode, psql will return to the command prompt; otherwise, psql will exit, returning @@ -3653,8 +3763,8 @@ bar These specify what the prompts psql issues should look like. See below. + linkend="app-psql-prompting" + endterm="app-psql-prompting-title"/> below. @@ -3663,25 +3773,50 @@ bar QUIET - Setting this variable to on is equivalent to the command + Setting this variable to on is equivalent to the command line option . It is probably not too useful in interactive mode. + + ROW_COUNT + + + The number of rows returned or affected by the last SQL query, or 0 + if the query failed or did not report a row count. + + + + + + SERVER_VERSION_NAME + SERVER_VERSION_NUM + + + The server's version number as a string, for + example 9.6.2, 10.1 or 11beta1, + and in numeric form, for + example 90602 or 100001. + These are set every time you connect to a database + (including program start-up), but can be changed or unset. + + + + SHOW_CONTEXT This variable can be set to the - values never, errors, or always - to control whether CONTEXT fields are displayed in - messages from the server. The default is errors (meaning + values never, errors, or always + to control whether CONTEXT fields are displayed in + messages from the server. The default is errors (meaning that context will be shown in error messages, but not in notice or warning messages). This setting has no effect - when VERBOSITY is set to terse. - (See also \errverbose, for use when you want a verbose + when VERBOSITY is set to terse. + (See also \errverbose, for use when you want a verbose version of the error you just got.) @@ -3691,7 +3826,7 @@ bar SINGLELINE - Setting this variable to on is equivalent to the command + Setting this variable to on is equivalent to the command line option . @@ -3701,12 +3836,23 @@ bar SINGLESTEP - Setting this variable to on is equivalent to the command + Setting this variable to on is equivalent to the command line option . + + SQLSTATE + + + The error code (see ) associated + with the last SQL query's failure, or 00000 if it + succeeded. + + + + USER @@ -3722,10 +3868,10 @@ bar VERBOSITY - This variable can be set to the values default, - verbose, or terse to control the verbosity + This variable can be set to the values default, + verbose, or terse to control the verbosity of error reports. - (See also \errverbose, for use when you want a verbose + (See also \errverbose, for use when you want a verbose version of the error you just got.) @@ -3733,10 +3879,15 @@ bar VERSION + VERSION_NAME + VERSION_NUM - This variable is set at program start-up to - reflect psql's version. It can be changed or unset. + These variables are set at program start-up to reflect + psql's version, respectively as a verbose string, + a short string (e.g., 9.6.2, 10.1, + or 11beta1), and a number (e.g., 90602 + or 100001). They can be changed or unset. @@ -3745,8 +3896,8 @@ bar - - <acronym>SQL</acronym> Interpolation + + <acronym>SQL</acronym> Interpolation A key feature of psql @@ -3786,7 +3937,7 @@ testdb=> SELECT * FROM :"foo"; Variable interpolation will not be performed within quoted SQL literals and identifiers. Therefore, a - construction such as ':foo' doesn't work to produce a quoted + construction such as ':foo' doesn't work to produce a quoted literal from a variable's value (and it would be unsafe if it did work, since it wouldn't correctly handle quotes embedded in the value). @@ -3812,6 +3963,12 @@ testdb=> INSERT INTO my_table VALUES (:'content'); can escape a colon with a backslash to protect it from substitution. + + The :{?name} special syntax returns TRUE + or FALSE depending on whether the variable exists or not, and is thus + always substituted, unless the colon is backslash-escaped. + + The colon syntax for variables is standard SQL for embedded query languages, such as ECPG. @@ -3824,8 +3981,8 @@ testdb=> INSERT INTO my_table VALUES (:'content'); - - Prompting + + Prompting The prompts psql issues can be customized @@ -3950,8 +4107,8 @@ testdb=> INSERT INTO my_table VALUES (:'content'); Transaction status: an empty string when not in a transaction - block, or * when in a transaction block, or - ! when in a failed transaction block, or ? + block, or * when in a transaction block, or + ! when in a failed transaction block, or ? when the transaction state is indeterminate (for example, because there is no connection). @@ -3962,7 +4119,7 @@ testdb=> INSERT INTO my_table VALUES (:'content'); %l - The line number inside the current statement, starting from 1. + The line number inside the current statement, starting from 1. @@ -3982,8 +4139,8 @@ testdb=> INSERT INTO my_table VALUES (:'content'); The value of the psql variable name. See the - section for details. + section for details. @@ -4050,7 +4207,7 @@ testdb=> \set PROMPT1 '%[%033[1;33;40m%]%n@%/%R%[%033[0m%]%# ' supported, although the completion logic makes no claim to be an SQL parser. The queries generated by tab-completion can also interfere with other SQL commands, e.g. SET - TRANSACTION ISOLATION LEVEL. + TRANSACTION ISOLATION LEVEL. If for some reason you do not like the tab completion, you can turn it off by putting this in a file named .inputrc in your home directory: @@ -4078,29 +4235,14 @@ $endif - If \pset columns is zero, controls the - width for the wrapped format and width for determining + If \pset columns is zero, controls the + width for the wrapped format and width for determining if wide output requires the pager or should be switched to the vertical format in expanded auto mode. - - PAGER - - - - If the query results do not fit on the screen, they are piped - through this command. Typical values are - more or less. The default - is platform-dependent. Use of the pager can be disabled by setting - PAGER to empty, or by using pager-related options of - the \pset command. - - - - PGDATABASE PGHOST @@ -4109,7 +4251,7 @@ $endif - Default connection parameters (see ). + Default connection parameters (see ). @@ -4125,11 +4267,8 @@ $endif and \ev commands. These variables are examined in the order listed; the first that is set is used. - - - - The built-in default editors are vi on Unix - systems and notepad.exe on Windows systems. + If none of them is set, the default is to use vi + on Unix systems or notepad.exe on Windows systems. @@ -4143,8 +4282,8 @@ $endif \ev is used with a line number argument, this variable specifies the command-line argument used to pass the starting line number to - the user's editor. For editors such as Emacs or - vi, this is a plus sign. Include a trailing + the user's editor. For editors such as Emacs or + vi, this is a plus sign. Include a trailing space in the value of the variable if there needs to be space between the option name and the line number. Examples: @@ -4172,6 +4311,27 @@ PSQL_EDITOR_LINENUMBER_ARG='--line ' + + PSQL_PAGER + PAGER + + + + If a query's results do not fit on the screen, they are piped + through this command. Typical values are more + or less. + Use of the pager can be disabled by setting PSQL_PAGER + or PAGER to an empty string, or by adjusting the + pager-related options of the \pset command. + These variables are examined in the order listed; + the first that is set is used. + If none of them is set, the default is to use more on most + platforms, but less on Cygwin. + + + + + PSQLRC @@ -4205,9 +4365,9 @@ PSQL_EDITOR_LINENUMBER_ARG='--line ' - This utility, like most other PostgreSQL utilities, - also uses the environment variables supported by libpq - (see ). + This utility, like most other PostgreSQL utilities, + also uses the environment variables supported by libpq + (see ). @@ -4232,9 +4392,9 @@ PSQL_EDITOR_LINENUMBER_ARG='--line ' The system-wide startup file is named psqlrc and is - sought in the installation's system configuration directory, + sought in the installation's system configuration directory, which is most reliably identified by running pg_config - --sysconfdir. By default this directory will be ../etc/ + --sysconfdir. By default this directory will be ../etc/ relative to the directory containing the PostgreSQL executables. The name of this directory can be set explicitly via the PGSYSCONFDIR @@ -4271,7 +4431,7 @@ PSQL_EDITOR_LINENUMBER_ARG='--line ' The location of the history file can be set explicitly via - the HISTFILE psql variable or + the HISTFILE psql variable or the PSQL_HISTORY environment variable. @@ -4287,10 +4447,10 @@ PSQL_EDITOR_LINENUMBER_ARG='--line ' psql works best with servers of the same or an older major version. Backslash commands are particularly likely - to fail if the server is of a newer version than psql - itself. However, backslash commands of the \d family should + to fail if the server is of a newer version than psql + itself. However, backslash commands of the \d family should work with servers of versions back to 7.4, though not necessarily with - servers newer than psql itself. The general + servers newer than psql itself. The general functionality of running SQL commands and displaying query results should also work with servers of a newer major version, but this cannot be guaranteed in all cases. @@ -4310,7 +4470,7 @@ PSQL_EDITOR_LINENUMBER_ARG='--line ' Before PostgreSQL 9.6, the option implied - (); this is no longer the case. @@ -4332,7 +4492,7 @@ PSQL_EDITOR_LINENUMBER_ARG='--line ' psql is built as a console - application. Since the Windows console windows use a different + application. Since the Windows console windows use a different encoding than the rest of the system, you must take special care when using 8-bit characters within psql. If psql detects a problematic @@ -4351,7 +4511,7 @@ PSQL_EDITOR_LINENUMBER_ARG='--line ' - Set the console font to Lucida Console, because the + Set the console font to Lucida Console, because the raster font does not work with the ANSI code page. @@ -4360,8 +4520,8 @@ PSQL_EDITOR_LINENUMBER_ARG='--line ' - - Examples + + Examples The first example shows how to spread a command over several lines of diff --git a/doc/src/sgml/ref/reassign_owned.sgml b/doc/src/sgml/ref/reassign_owned.sgml index ccd038629b..0fffd6088a 100644 --- a/doc/src/sgml/ref/reassign_owned.sgml +++ b/doc/src/sgml/ref/reassign_owned.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/reassign_owned.sgml PostgreSQL documentation --> - + REASSIGN OWNED @@ -21,8 +21,8 @@ PostgreSQL documentation -REASSIGN OWNED BY { old_role | CURRENT_USER | SESSION_USER } [, ...] - TO { new_role | CURRENT_USER | SESSION_USER } +REASSIGN OWNED BY { old_role | CURRENT_USER | SESSION_USER } [, ...] + TO { new_role | CURRENT_USER | SESSION_USER } @@ -32,8 +32,8 @@ REASSIGN OWNED BY { old_role | CURR REASSIGN OWNED instructs the system to change the ownership of database objects owned by any of the - old_roles to - new_role. + old_roles to + new_role. @@ -42,19 +42,19 @@ REASSIGN OWNED BY { old_role | CURR - old_role + old_role The name of a role. The ownership of all the objects within the current database, and of all shared objects (databases, tablespaces), owned by this role will be reassigned to - new_role. + new_role. - new_role + new_role The name of the role that will be made the new owner of the @@ -77,24 +77,24 @@ REASSIGN OWNED BY { old_role | CURR - REASSIGN OWNED requires privileges on both the + REASSIGN OWNED requires membership on both the source role(s) and the target role. - The command is an alternative that + The command is an alternative that simply drops all the database objects owned by one or more roles. The REASSIGN OWNED command does not affect any - privileges granted to the old_roles for + privileges granted to the old_roles for objects that are not owned by them. Use DROP OWNED to revoke such privileges. - See for more discussion. + See for more discussion. @@ -112,9 +112,9 @@ REASSIGN OWNED BY { old_role | CURR See Also - - - + + + diff --git a/doc/src/sgml/ref/refresh_materialized_view.sgml b/doc/src/sgml/ref/refresh_materialized_view.sgml index c165c69c9f..fd06f1fda1 100644 --- a/doc/src/sgml/ref/refresh_materialized_view.sgml +++ b/doc/src/sgml/ref/refresh_materialized_view.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/refresh_materialized_view.sgml PostgreSQL documentation --> - + REFRESH MATERIALIZED VIEW @@ -21,7 +21,7 @@ PostgreSQL documentation -REFRESH MATERIALIZED VIEW [ CONCURRENTLY ] name +REFRESH MATERIALIZED VIEW [ CONCURRENTLY ] name [ WITH [ NO ] DATA ] @@ -31,7 +31,8 @@ REFRESH MATERIALIZED VIEW [ CONCURRENTLY ] name REFRESH MATERIALIZED VIEW completely replaces the - contents of a materialized view. The old contents are discarded. If + contents of a materialized view. To execute this command you must be the + owner of the materialized view. The old contents are discarded. If WITH DATA is specified (or defaults) the backing query is executed to provide the new data, and the materialized view is left in a scannable state. If WITH NO DATA is specified no new @@ -77,7 +78,7 @@ REFRESH MATERIALIZED VIEW [ CONCURRENTLY ] name - name + name The name (optionally schema-qualified) of the materialized view to @@ -93,10 +94,10 @@ REFRESH MATERIALIZED VIEW [ CONCURRENTLY ] name While the default index for future - - operations is retained, REFRESH MATERIALIZED VIEW does not + + operations is retained, REFRESH MATERIALIZED VIEW does not order the generated rows based on this property. If you want the data - to be ordered upon generation, you must use an ORDER BY + to be ordered upon generation, you must use an ORDER BY clause in the backing query. @@ -135,9 +136,9 @@ REFRESH MATERIALIZED VIEW annual_statistics_basis WITH NO DATA; See Also - - - + + + diff --git a/doc/src/sgml/ref/reindex.sgml b/doc/src/sgml/ref/reindex.sgml index 3908ade37b..47cef987d4 100644 --- a/doc/src/sgml/ref/reindex.sgml +++ b/doc/src/sgml/ref/reindex.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/reindex.sgml PostgreSQL documentation --> - + REINDEX @@ -21,7 +21,7 @@ PostgreSQL documentation -REINDEX [ ( VERBOSE ) ] { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } name +REINDEX [ ( VERBOSE ) ] { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } name @@ -46,13 +46,13 @@ REINDEX [ ( VERBOSE ) ] { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } - An index has become bloated, that is it contains many + An index has become bloated, that is it contains many empty or nearly-empty pages. This can occur with B-tree indexes in PostgreSQL under certain uncommon access patterns. REINDEX provides a way to reduce the space consumption of the index by writing a new version of the index without the dead pages. See for more information. + linkend="routine-reindex"/> for more information. @@ -65,12 +65,12 @@ REINDEX [ ( VERBOSE ) ] { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } - An index build with the CONCURRENTLY option failed, leaving - an invalid index. Such indexes are useless but it can be - convenient to use REINDEX to rebuild them. Note that - REINDEX will not perform a concurrent build. To build the + An index build with the CONCURRENTLY option failed, leaving + an invalid index. Such indexes are useless but it can be + convenient to use REINDEX to rebuild them. Note that + REINDEX will not perform a concurrent build. To build the index without interfering with production you should drop the index and - reissue the CREATE INDEX CONCURRENTLY command. + reissue the CREATE INDEX CONCURRENTLY command. @@ -95,7 +95,7 @@ REINDEX [ ( VERBOSE ) ] { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } Recreate all indexes of the specified table. If the table has a - secondary TOAST table, that is reindexed as well. + secondary TOAST table, that is reindexed as well. @@ -105,7 +105,7 @@ REINDEX [ ( VERBOSE ) ] { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } Recreate all indexes of the specified schema. If a table of this - schema has a secondary TOAST table, that is reindexed as + schema has a secondary TOAST table, that is reindexed as well. Indexes on shared system catalogs are also processed. This form of REINDEX cannot be executed inside a transaction block. @@ -139,12 +139,12 @@ REINDEX [ ( VERBOSE ) ] { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } - name + name The name of the specific index, table, or database to be reindexed. Index and table names can be schema-qualified. - Presently, REINDEX DATABASE and REINDEX SYSTEM + Presently, REINDEX DATABASE and REINDEX SYSTEM can only reindex the current database, so their parameter must match the current database's name. @@ -186,13 +186,13 @@ REINDEX [ ( VERBOSE ) ] { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } PostgreSQL server with the option included on its command line. - Then, REINDEX DATABASE, REINDEX SYSTEM, - REINDEX TABLE, or REINDEX INDEX can be + Then, REINDEX DATABASE, REINDEX SYSTEM, + REINDEX TABLE, or REINDEX INDEX can be issued, depending on how much you want to reconstruct. If in - doubt, use REINDEX SYSTEM to select + doubt, use REINDEX SYSTEM to select reconstruction of all system indexes in the database. Then quit the single-user server session and restart the regular server. - See the reference page for more + See the reference page for more information about how to interact with the single-user server interface. @@ -201,8 +201,8 @@ REINDEX [ ( VERBOSE ) ] { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } -P included in its command line options. The method for doing this varies across clients, but in all - libpq-based clients, it is possible to set - the PGOPTIONS environment variable to -P + libpq-based clients, it is possible to set + the PGOPTIONS environment variable to -P before starting the client. Note that while this method does not require locking out other clients, it might still be wise to prevent other users from connecting to the damaged database until repairs @@ -212,12 +212,12 @@ REINDEX [ ( VERBOSE ) ] { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } REINDEX is similar to a drop and recreate of the index in that the index contents are rebuilt from scratch. However, the locking - considerations are rather different. REINDEX locks out writes + considerations are rather different. REINDEX locks out writes but not reads of the index's parent table. It also takes an exclusive lock on the specific index being processed, which will block reads that attempt - to use that index. In contrast, DROP INDEX momentarily takes + to use that index. In contrast, DROP INDEX momentarily takes an exclusive lock on the parent table, blocking both writes and reads. The - subsequent CREATE INDEX locks out writes but not reads; since + subsequent CREATE INDEX locks out writes but not reads; since the index is not there, no read will attempt to use it, meaning that there will be no blocking but reads might be forced into expensive sequential scans. @@ -225,10 +225,20 @@ REINDEX [ ( VERBOSE ) ] { INDEX | TABLE | SCHEMA | DATABASE | SYSTEM } Reindexing a single index or table requires being the owner of that - index or table. Reindexing a database requires being the owner of - the database (note that the owner can therefore rebuild indexes of - tables owned by other users). Of course, superusers can always - reindex anything. + index or table. Reindexing a schema or database requires being the + owner of that schema or database. Note that is therefore sometimes + possible for non-superusers to rebuild indexes of tables owned by + other users. However, as a special exception, when + REINDEX DATABASE, REINDEX SCHEMA + or REINDEX SYSTEM is issued by a non-superuser, + indexes on shared catalogs will be skipped unless the user owns the + catalog (which typically won't be the case). Of course, superusers + can always reindex anything. + + + + Reindexing partitioned tables or partitioned indexes is not supported. + Each individual partition can be reindexed separately instead. diff --git a/doc/src/sgml/ref/reindexdb.sgml b/doc/src/sgml/ref/reindexdb.sgml index e4721d8113..1273dad807 100644 --- a/doc/src/sgml/ref/reindexdb.sgml +++ b/doc/src/sgml/ref/reindexdb.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/reindexdb.sgml PostgreSQL documentation --> - + reindexdb @@ -93,7 +93,7 @@ PostgreSQL documentation reindexdb is a wrapper around the SQL - command . + command . There is no effective difference between reindexing databases via this utility and via other methods for accessing the server. @@ -109,8 +109,8 @@ PostgreSQL documentation - - + + Reindex all databases. @@ -119,8 +119,8 @@ PostgreSQL documentation - - + + Specifies the name of the database to be reindexed. @@ -134,8 +134,8 @@ PostgreSQL documentation - - + + Echo the commands that reindexdb generates @@ -145,20 +145,20 @@ PostgreSQL documentation - - + + Recreate index only. Multiple indexes can be recreated by writing multiple - switches. - - + + Do not display progress messages. @@ -167,8 +167,8 @@ PostgreSQL documentation - - + + Reindex database's system catalogs. @@ -177,32 +177,32 @@ PostgreSQL documentation - - + + Reindex schema only. Multiple schemas can be reindexed by writing multiple - switches. - - + + Reindex table only. Multiple tables can be reindexed by writing multiple - switches. - - + + Print detailed information during processing. @@ -211,8 +211,8 @@ PostgreSQL documentation - - + + Print the reindexdb version and exit. @@ -221,8 +221,8 @@ PostgreSQL documentation - - + + Show help about reindexdb command line @@ -241,8 +241,8 @@ PostgreSQL documentation - - + + Specifies the host name of the machine on which the server is @@ -253,8 +253,8 @@ PostgreSQL documentation - - + + Specifies the TCP port or local Unix domain socket file @@ -265,8 +265,8 @@ PostgreSQL documentation - - + + User name to connect as. @@ -275,8 +275,8 @@ PostgreSQL documentation - - + + Never issue a password prompt. If the server requires @@ -290,8 +290,8 @@ PostgreSQL documentation - - + + Force reindexdb to prompt for a @@ -304,14 +304,14 @@ PostgreSQL documentation for a password if the server demands password authentication. However, reindexdb will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. - + Specifies the name of the database to connect to discover what other @@ -345,9 +345,9 @@ PostgreSQL documentation - This utility, like most other PostgreSQL utilities, - also uses the environment variables supported by libpq - (see ). + This utility, like most other PostgreSQL utilities, + also uses the environment variables supported by libpq + (see ). @@ -357,8 +357,8 @@ PostgreSQL documentation Diagnostics - In case of difficulty, see - and for + In case of difficulty, see + and for discussions of potential problems and error messages. The database server must be running at the targeted host. Also, any default connection settings and environment @@ -376,8 +376,8 @@ PostgreSQL documentation reindexdb might need to connect several times to the PostgreSQL server, asking for a password each time. It is convenient to have a - ~/.pgpass file in such cases. See for more information. + ~/.pgpass file in such cases. See for more information. @@ -405,7 +405,7 @@ PostgreSQL documentation See Also - + diff --git a/doc/src/sgml/ref/release_savepoint.sgml b/doc/src/sgml/ref/release_savepoint.sgml index b331b7226b..39665d28ef 100644 --- a/doc/src/sgml/ref/release_savepoint.sgml +++ b/doc/src/sgml/ref/release_savepoint.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/release_savepoint.sgml PostgreSQL documentation --> - + RELEASE SAVEPOINT @@ -42,7 +42,7 @@ RELEASE [ SAVEPOINT ] savepoint_name Destroying a savepoint makes it unavailable as a rollback point, but it has no other user visible behavior. It does not undo the effects of commands executed after the savepoint was established. - (To do that, see .) + (To do that, see .) Destroying a savepoint when it is no longer needed allows the system to reclaim some resources earlier than transaction end. @@ -109,7 +109,7 @@ COMMIT; Compatibility - This command conforms to the SQL standard. The standard + This command conforms to the SQL standard. The standard specifies that the key word SAVEPOINT is mandatory, but PostgreSQL allows it to be omitted. @@ -120,11 +120,11 @@ COMMIT; See Also - - - - - + + + + + diff --git a/doc/src/sgml/ref/reset.sgml b/doc/src/sgml/ref/reset.sgml index 7e76891bde..95599072e7 100644 --- a/doc/src/sgml/ref/reset.sgml +++ b/doc/src/sgml/ref/reset.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/reset.sgml PostgreSQL documentation --> - + RESET @@ -21,7 +21,7 @@ PostgreSQL documentation -RESET configuration_parameter +RESET configuration_parameter RESET ALL @@ -36,25 +36,25 @@ RESET ALL SET configuration_parameter TO DEFAULT - Refer to for + Refer to for details. The default value is defined as the value that the parameter would - have had, if no SET had ever been issued for it in the + have had, if no SET had ever been issued for it in the current session. The actual source of this value might be a compiled-in default, the configuration file, command-line options, or per-database or per-user default settings. This is subtly different from defining it as the value that the parameter had at session - start, because if the value came from the configuration file, it + start, because if the value came from the configuration file, it will be reset to whatever is specified by the configuration file now. - See for details. + See for details. - The transactional behavior of RESET is the same as - SET: its effects will be undone by transaction rollback. + The transactional behavior of RESET is the same as + SET: its effects will be undone by transaction rollback. @@ -63,12 +63,12 @@ SET configuration_parameter TO DEFA - configuration_parameter + configuration_parameter Name of a settable run-time parameter. Available parameters are - documented in and on the - reference page. + documented in and on the + reference page. @@ -88,7 +88,7 @@ SET configuration_parameter TO DEFA Examples - Set the timezone configuration variable to its default value: + Set the timezone configuration variable to its default value: RESET timezone; @@ -106,8 +106,8 @@ RESET timezone; See Also - - + + diff --git a/doc/src/sgml/ref/revoke.sgml b/doc/src/sgml/ref/revoke.sgml index ce532543f0..5317f8ccba 100644 --- a/doc/src/sgml/ref/revoke.sgml +++ b/doc/src/sgml/ref/revoke.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/revoke.sgml PostgreSQL documentation --> - + REVOKE @@ -24,94 +24,94 @@ PostgreSQL documentation REVOKE [ GRANT OPTION FOR ] { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER } [, ...] | ALL [ PRIVILEGES ] } - ON { [ TABLE ] table_name [, ...] + ON { [ TABLE ] table_name [, ...] | ALL TABLES IN SCHEMA schema_name [, ...] } - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] - { { SELECT | INSERT | UPDATE | REFERENCES } ( column_name [, ...] ) - [, ...] | ALL [ PRIVILEGES ] ( column_name [, ...] ) } - ON [ TABLE ] table_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + { { SELECT | INSERT | UPDATE | REFERENCES } ( column_name [, ...] ) + [, ...] | ALL [ PRIVILEGES ] ( column_name [, ...] ) } + ON [ TABLE ] table_name [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { { USAGE | SELECT | UPDATE } [, ...] | ALL [ PRIVILEGES ] } - ON { SEQUENCE sequence_name [, ...] + ON { SEQUENCE sequence_name [, ...] | ALL SEQUENCES IN SCHEMA schema_name [, ...] } - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { { CREATE | CONNECT | TEMPORARY | TEMP } [, ...] | ALL [ PRIVILEGES ] } ON DATABASE database_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { USAGE | ALL [ PRIVILEGES ] } ON DOMAIN domain_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { USAGE | ALL [ PRIVILEGES ] } ON FOREIGN DATA WRAPPER fdw_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { USAGE | ALL [ PRIVILEGES ] } ON FOREIGN SERVER server_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { EXECUTE | ALL [ PRIVILEGES ] } - ON { FUNCTION function_name [ ( [ [ argmode ] [ arg_name ] arg_type [, ...] ] ) ] [, ...] - | ALL FUNCTIONS IN SCHEMA schema_name [, ...] } - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + ON { { FUNCTION | PROCEDURE | ROUTINE } function_name [ ( [ [ argmode ] [ arg_name ] arg_type [, ...] ] ) ] [, ...] + | ALL { FUNCTIONS | PROCEDURES | ROUTINES } IN SCHEMA schema_name [, ...] } + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { USAGE | ALL [ PRIVILEGES ] } ON LANGUAGE lang_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { { SELECT | UPDATE } [, ...] | ALL [ PRIVILEGES ] } - ON LARGE OBJECT loid [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + ON LARGE OBJECT loid [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { { CREATE | USAGE } [, ...] | ALL [ PRIVILEGES ] } ON SCHEMA schema_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { CREATE | ALL [ PRIVILEGES ] } ON TABLESPACE tablespace_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ GRANT OPTION FOR ] { USAGE | ALL [ PRIVILEGES ] } ON TYPE type_name [, ...] - FROM { [ GROUP ] role_name | PUBLIC } [, ...] + FROM { [ GROUP ] role_name | PUBLIC } [, ...] [ CASCADE | RESTRICT ] REVOKE [ ADMIN OPTION FOR ] - role_name [, ...] FROM role_name [, ...] + role_name [, ...] FROM role_name [, ...] [ CASCADE | RESTRICT ] - + Description @@ -122,7 +122,7 @@ REVOKE [ ADMIN OPTION FOR ] - See the description of the command for + See the description of the command for the meaning of the privilege types. @@ -130,13 +130,13 @@ REVOKE [ ADMIN OPTION FOR ] Note that any particular role will have the sum of privileges granted directly to it, privileges granted to any role it is presently a member of, and privileges granted to - PUBLIC. Thus, for example, revoking SELECT privilege + PUBLIC. Thus, for example, revoking SELECT privilege from PUBLIC does not necessarily mean that all roles - have lost SELECT privilege on the object: those who have it granted + have lost SELECT privilege on the object: those who have it granted directly or via another role will still have it. Similarly, revoking - SELECT from a user might not prevent that user from using - SELECT if PUBLIC or another membership - role still has SELECT rights. + SELECT from a user might not prevent that user from using + SELECT if PUBLIC or another membership + role still has SELECT rights. @@ -167,28 +167,28 @@ REVOKE [ ADMIN OPTION FOR ] - When revoking membership in a role, GRANT OPTION is instead - called ADMIN OPTION, but the behavior is similar. + When revoking membership in a role, GRANT OPTION is instead + called ADMIN OPTION, but the behavior is similar. Note also that this form of the command does not - allow the noise word GROUP. + allow the noise word GROUP. - + Notes - Use 's \dp command to + Use 's \dp command to display the privileges granted on existing tables and columns. See for information about the - format. For non-table objects there are other \d commands + linkend="sql-grant"/> for information about the + format. For non-table objects there are other \d commands that can display their privileges. A user can only revoke privileges that were granted directly by that user. If, for example, user A has granted a privilege with - grant option to user B, and user B has in turned granted it to user + grant option to user B, and user B has in turn granted it to user C, then user A cannot revoke the privilege directly from C. Instead, user A could revoke the grant option from user B and use the CASCADE option so that the privilege is @@ -198,12 +198,12 @@ REVOKE [ ADMIN OPTION FOR ] - When a non-owner of an object attempts to REVOKE privileges + When a non-owner of an object attempts to REVOKE privileges on the object, the command will fail outright if the user has no privileges whatsoever on the object. As long as some privilege is available, the command will proceed, but it will revoke only those privileges for which the user has grant options. The REVOKE ALL - PRIVILEGES forms will issue a warning message if no grant options are + PRIVILEGES forms will issue a warning message if no grant options are held, while the other forms will issue a warning if grant options for any of the privileges specifically named in the command are not held. (In principle these statements apply to the object owner as well, but @@ -212,7 +212,7 @@ REVOKE [ ADMIN OPTION FOR ] - If a superuser chooses to issue a GRANT or REVOKE + If a superuser chooses to issue a GRANT or REVOKE command, the command is performed as though it were issued by the owner of the affected object. Since all privileges ultimately come from the object owner (possibly indirectly via chains of grant options), @@ -221,32 +221,32 @@ REVOKE [ ADMIN OPTION FOR ] - REVOKE can also be done by a role + REVOKE can also be done by a role that is not the owner of the affected object, but is a member of the role that owns the object, or is a member of a role that holds privileges WITH GRANT OPTION on the object. In this case the command is performed as though it were issued by the containing role that actually owns the object or holds the privileges WITH GRANT OPTION. For example, if table - t1 is owned by role g1, of which role - u1 is a member, then u1 can revoke privileges - on t1 that are recorded as being granted by g1. - This would include grants made by u1 as well as by other - members of role g1. + t1 is owned by role g1, of which role + u1 is a member, then u1 can revoke privileges + on t1 that are recorded as being granted by g1. + This would include grants made by u1 as well as by other + members of role g1. - If the role executing REVOKE holds privileges + If the role executing REVOKE holds privileges indirectly via more than one role membership path, it is unspecified which containing role will be used to perform the command. In such cases - it is best practice to use SET ROLE to become the specific - role you want to do the REVOKE as. Failure to do so might + it is best practice to use SET ROLE to become the specific + role you want to do the REVOKE as. Failure to do so might lead to revoking privileges other than the ones you intended, or not revoking anything at all. - + Examples @@ -267,25 +267,25 @@ REVOKE ALL PRIVILEGES ON kinds FROM manuel; Note that this actually means revoke all privileges that I - granted. + granted. - Revoke membership in role admins from user joe: + Revoke membership in role admins from user joe: REVOKE admins FROM joe; - + Compatibility - The compatibility notes of the command + The compatibility notes of the command apply analogously to REVOKE. The keyword RESTRICT or CASCADE - is required according to the standard, but PostgreSQL + is required according to the standard, but PostgreSQL assumes RESTRICT by default. @@ -294,7 +294,7 @@ REVOKE admins FROM joe; See Also - + diff --git a/doc/src/sgml/ref/rollback.sgml b/doc/src/sgml/ref/rollback.sgml index b0b1e8d0e3..3cafb848a9 100644 --- a/doc/src/sgml/ref/rollback.sgml +++ b/doc/src/sgml/ref/rollback.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/rollback.sgml PostgreSQL documentation --> - + ROLLBACK @@ -54,12 +54,12 @@ ROLLBACK [ WORK | TRANSACTION ] Notes - Use to + Use to successfully terminate a transaction. - Issuing ROLLBACK outside of a transaction + Issuing ROLLBACK outside of a transaction block emits a warning and otherwise has no effect. @@ -88,9 +88,9 @@ ROLLBACK; See Also - - - + + + diff --git a/doc/src/sgml/ref/rollback_prepared.sgml b/doc/src/sgml/ref/rollback_prepared.sgml index a5328e96a2..08821a6652 100644 --- a/doc/src/sgml/ref/rollback_prepared.sgml +++ b/doc/src/sgml/ref/rollback_prepared.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/rollback_prepared.sgml PostgreSQL documentation --> - + ROLLBACK PREPARED @@ -21,7 +21,7 @@ PostgreSQL documentation -ROLLBACK PREPARED transaction_id +ROLLBACK PREPARED transaction_id @@ -39,7 +39,7 @@ ROLLBACK PREPARED transaction_id - transaction_id + transaction_id The transaction identifier of the transaction that is to be @@ -75,7 +75,7 @@ ROLLBACK PREPARED transaction_id Examples Roll back the transaction identified by the transaction - identifier foobar: + identifier foobar: ROLLBACK PREPARED 'foobar'; @@ -99,8 +99,8 @@ ROLLBACK PREPARED 'foobar'; See Also - - + + diff --git a/doc/src/sgml/ref/rollback_to.sgml b/doc/src/sgml/ref/rollback_to.sgml index 060f408a63..4d5647a302 100644 --- a/doc/src/sgml/ref/rollback_to.sgml +++ b/doc/src/sgml/ref/rollback_to.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/rollback_to.sgml PostgreSQL documentation --> - + ROLLBACK TO SAVEPOINT @@ -40,7 +40,7 @@ ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] savepoint_name - ROLLBACK TO SAVEPOINT implicitly destroys all savepoints that + ROLLBACK TO SAVEPOINT implicitly destroys all savepoints that were established after the named savepoint. @@ -50,7 +50,7 @@ ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] savepoint_name - savepoint_name + savepoint_name The savepoint to roll back to. @@ -64,7 +64,7 @@ ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] savepoint_nameNotes - Use to destroy a savepoint + Use to destroy a savepoint without discarding the effects of commands executed after it was established. @@ -77,17 +77,17 @@ ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] savepoint_nameFETCH or MOVE command inside a + affected by a FETCH or MOVE command inside a savepoint that is later rolled back, the cursor remains at the - position that FETCH left it pointing to (that is, the cursor - motion caused by FETCH is not rolled back). + position that FETCH left it pointing to (that is, the cursor + motion caused by FETCH is not rolled back). Closing a cursor is not undone by rolling back, either. However, other side-effects caused by the cursor's query (such as - side-effects of volatile functions called by the query) are + side-effects of volatile functions called by the query) are rolled back if they occur during a savepoint that is later rolled back. A cursor whose execution causes a transaction to abort is put in a cannot-execute state, so while the transaction can be restored using - ROLLBACK TO SAVEPOINT, the cursor can no longer be used. + ROLLBACK TO SAVEPOINT, the cursor can no longer be used. @@ -133,13 +133,13 @@ COMMIT; Compatibility - The SQL standard specifies that the key word - SAVEPOINT is mandatory, but PostgreSQL - and Oracle allow it to be omitted. SQL allows - only WORK, not TRANSACTION, as a noise word - after ROLLBACK. Also, SQL has an optional clause - AND [ NO ] CHAIN which is not currently supported by - PostgreSQL. Otherwise, this command conforms to + The SQL standard specifies that the key word + SAVEPOINT is mandatory, but PostgreSQL + and Oracle allow it to be omitted. SQL allows + only WORK, not TRANSACTION, as a noise word + after ROLLBACK. Also, SQL has an optional clause + AND [ NO ] CHAIN which is not currently supported by + PostgreSQL. Otherwise, this command conforms to the SQL standard. @@ -148,11 +148,11 @@ COMMIT; See Also - - - - - + + + + + diff --git a/doc/src/sgml/ref/savepoint.sgml b/doc/src/sgml/ref/savepoint.sgml index 5b944a2561..87243b1d20 100644 --- a/doc/src/sgml/ref/savepoint.sgml +++ b/doc/src/sgml/ref/savepoint.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/savepoint.sgml PostgreSQL documentation --> - + SAVEPOINT @@ -64,8 +64,8 @@ SAVEPOINT savepoint_name Notes - Use to - rollback to a savepoint. Use + Use to + rollback to a savepoint. Use to destroy a savepoint, keeping the effects of commands executed after it was established. @@ -114,11 +114,11 @@ COMMIT; SQL requires a savepoint to be destroyed automatically when another savepoint with the same name is established. In - PostgreSQL, the old savepoint is kept, though only the more + PostgreSQL, the old savepoint is kept, though only the more recent one will be used when rolling back or releasing. (Releasing the - newer savepoint with RELEASE SAVEPOINT will cause the older one - to again become accessible to ROLLBACK TO SAVEPOINT and - RELEASE SAVEPOINT.) Otherwise, SAVEPOINT is + newer savepoint with RELEASE SAVEPOINT will cause the older one + to again become accessible to ROLLBACK TO SAVEPOINT and + RELEASE SAVEPOINT.) Otherwise, SAVEPOINT is fully SQL conforming. @@ -127,11 +127,11 @@ COMMIT; See Also - - - - - + + + + + diff --git a/doc/src/sgml/ref/security_label.sgml b/doc/src/sgml/ref/security_label.sgml index aa8be473bd..e9cfdec9f9 100644 --- a/doc/src/sgml/ref/security_label.sgml +++ b/doc/src/sgml/ref/security_label.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/security_label.sgml PostgreSQL documentation --> - + SECURITY LABEL @@ -21,28 +21,30 @@ PostgreSQL documentation -SECURITY LABEL [ FOR provider ] ON +SECURITY LABEL [ FOR provider ] ON { - TABLE object_name | - COLUMN table_name.column_name | - AGGREGATE aggregate_name ( aggregate_signature ) | - DATABASE object_name | - DOMAIN object_name | - EVENT TRIGGER object_name | - FOREIGN TABLE object_name - FUNCTION function_name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] | - LARGE OBJECT large_object_oid | - MATERIALIZED VIEW object_name | - [ PROCEDURAL ] LANGUAGE object_name | - PUBLICATION object_name | - ROLE object_name | - SCHEMA object_name | - SEQUENCE object_name | - SUBSCRIPTION object_name | - TABLESPACE object_name | - TYPE object_name | - VIEW object_name -} IS 'label' + TABLE object_name | + COLUMN table_name.column_name | + AGGREGATE aggregate_name ( aggregate_signature ) | + DATABASE object_name | + DOMAIN object_name | + EVENT TRIGGER object_name | + FOREIGN TABLE object_name + FUNCTION function_name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] | + LARGE OBJECT large_object_oid | + MATERIALIZED VIEW object_name | + [ PROCEDURAL ] LANGUAGE object_name | + PROCEDURE procedure_name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] | + PUBLICATION object_name | + ROLE object_name | + ROUTINE routine_name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] | + SCHEMA object_name | + SEQUENCE object_name | + SUBSCRIPTION object_name | + TABLESPACE object_name | + TYPE object_name | + VIEW object_name +} IS 'label' where aggregate_signature is: @@ -60,12 +62,12 @@ SECURITY LABEL [ FOR provider ] ON object. An arbitrary number of security labels, one per label provider, can be associated with a given database object. Label providers are loadable modules which register themselves by using the function - register_label_provider. + register_label_provider. - register_label_provider is not an SQL function; it can + register_label_provider is not an SQL function; it can only be called from C code loaded into the backend. @@ -74,11 +76,11 @@ SECURITY LABEL [ FOR provider ] ON The label provider determines whether a given label is valid and whether it is permissible to assign that label to a given object. The meaning of a given label is likewise at the discretion of the label provider. - PostgreSQL places no restrictions on whether or how a + PostgreSQL places no restrictions on whether or how a label provider must interpret security labels; it merely provides a mechanism for storing them. In practice, this facility is intended to allow integration with label-based mandatory access control (MAC) systems such as - SE-Linux. Such systems make all access control decisions + SE-Linux. Such systems make all access control decisions based on object labels, rather than traditional discretionary access control (DAC) concepts such as users and groups. @@ -93,10 +95,12 @@ SECURITY LABEL [ FOR provider ] ON table_name.column_name aggregate_name function_name + procedure_name + routine_name The name of the object to be labeled. Names of tables, - aggregates, domains, foreign tables, functions, sequences, types, and + aggregates, domains, foreign tables, functions, procedures, routines, sequences, types, and views can be schema-qualified. @@ -119,15 +123,15 @@ SECURITY LABEL [ FOR provider ] ON - The mode of a function or aggregate - argument: IN, OUT, - INOUT, or VARIADIC. - If omitted, the default is IN. + The mode of a function, procedure, or aggregate + argument: IN, OUT, + INOUT, or VARIADIC. + If omitted, the default is IN. Note that SECURITY LABEL does not actually - pay any attention to OUT arguments, since only the input + pay any attention to OUT arguments, since only the input arguments are needed to determine the function's identity. - So it is sufficient to list the IN, INOUT, - and VARIADIC arguments. + So it is sufficient to list the IN, INOUT, + and VARIADIC arguments. @@ -137,7 +141,7 @@ SECURITY LABEL [ FOR provider ] ON - The name of a function or aggregate argument. + The name of a function, procedure, or aggregate argument. Note that SECURITY LABEL does not actually pay any attention to argument names, since only the argument data types are needed to determine the function's identity. @@ -150,7 +154,7 @@ SECURITY LABEL [ FOR provider ] ON - The data type of a function or aggregate argument. + The data type of a function, procedure, or aggregate argument. @@ -178,7 +182,7 @@ SECURITY LABEL [ FOR provider ] ON label - The new security label, written as a string literal; or NULL + The new security label, written as a string literal; or NULL to drop the security label. @@ -208,7 +212,7 @@ SECURITY LABEL FOR selinux ON TABLE mytable IS 'system_u:object_r:sepgsql_table_ See Also - + src/test/modules/dummy_seclabel diff --git a/doc/src/sgml/ref/select.sgml b/doc/src/sgml/ref/select.sgml index 57f11e66fb..4db8142afa 100644 --- a/doc/src/sgml/ref/select.sgml +++ b/doc/src/sgml/ref/select.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/select.sgml PostgreSQL documentation --> - + SELECT @@ -94,7 +94,7 @@ TABLE [ ONLY ] table_name [ * ] in the FROM list. A WITH query that is referenced more than once in FROM is computed only once. - (See below.) + (See below.) @@ -104,7 +104,7 @@ TABLE [ ONLY ] table_name [ * ] (Each element in the FROM list is a real or virtual table.) If more than one element is specified in the FROM list, they are cross-joined together. - (See below.) + (See below.) @@ -113,7 +113,7 @@ TABLE [ ONLY ] table_name [ * ] If the WHERE clause is specified, all rows that do not satisfy the condition are eliminated from the output. (See below.) + endterm="sql-where-title"/> below.) @@ -125,8 +125,8 @@ TABLE [ ONLY ] table_name [ * ] values, and the results of aggregate functions are computed. If the HAVING clause is present, it eliminates groups that do not satisfy the given condition. (See - and - below.) + and + below.) @@ -135,7 +135,7 @@ TABLE [ ONLY ] table_name [ * ] The actual output rows are computed using the SELECT output expressions for each selected row or row group. (See - + below.) @@ -146,7 +146,7 @@ TABLE [ ONLY ] table_name [ * ] match on all the specified expressions. SELECT ALL (the default) will return all candidate rows, including duplicates. (See below.) + endterm="sql-distinct-title"/> below.) @@ -163,13 +163,13 @@ TABLE [ ONLY ] table_name [ * ] operator returns the rows that are in the first result set but not in the second. In all three cases, duplicate rows are eliminated unless ALL is specified. The noise - word DISTINCT can be added to explicitly specify - eliminating duplicate rows. Notice that DISTINCT is + word DISTINCT can be added to explicitly specify + eliminating duplicate rows. Notice that DISTINCT is the default behavior here, even though ALL is - the default for SELECT itself. (See - , , and - below.) + the default for SELECT itself. (See + , , and + below.) @@ -179,7 +179,7 @@ TABLE [ ONLY ] table_name [ * ] returned rows are sorted in the specified order. If ORDER BY is not given, the rows are returned in whatever order the system finds fastest to produce. (See - below.) + below.) @@ -188,18 +188,18 @@ TABLE [ ONLY ] table_name [ * ] If the LIMIT (or FETCH FIRST) or OFFSET clause is specified, the SELECT statement only returns a subset of the result rows. (See below.) + linkend="sql-limit" endterm="sql-limit-title"/> below.) - If FOR UPDATE, FOR NO KEY UPDATE, FOR SHARE + If FOR UPDATE, FOR NO KEY UPDATE, FOR SHARE or FOR KEY SHARE is specified, the SELECT statement locks the selected rows against concurrent updates. (See below.) + endterm="sql-for-update-share-title"/> below.) @@ -207,7 +207,7 @@ TABLE [ ONLY ] table_name [ * ] You must have SELECT privilege on each column used - in a SELECT command. The use of FOR NO KEY UPDATE, + in a SELECT command. The use of FOR NO KEY UPDATE, FOR UPDATE, FOR SHARE or FOR KEY SHARE requires UPDATE privilege as well (for at least one column @@ -218,7 +218,7 @@ TABLE [ ONLY ] table_name [ * ] Parameters - + <literal>WITH</literal> Clause @@ -226,15 +226,15 @@ TABLE [ ONLY ] table_name [ * ] subqueries that can be referenced by name in the primary query. The subqueries effectively act as temporary tables or views for the duration of the primary query. - Each subquery can be a SELECT, TABLE, VALUES, + Each subquery can be a SELECT, TABLE, VALUES, INSERT, UPDATE or DELETE statement. When writing a data-modifying statement (INSERT, UPDATE or DELETE) in - WITH, it is usual to include a RETURNING clause. - It is the output of RETURNING, not the underlying + WITH, it is usual to include a RETURNING clause. + It is the output of RETURNING, not the underlying table that the statement modifies, that forms the temporary table that is - read by the primary query. If RETURNING is omitted, the + read by the primary query. If RETURNING is omitted, the statement is still executed, but it produces no output so it cannot be referenced as a table by the primary query. @@ -254,11 +254,11 @@ TABLE [ ONLY ] table_name [ * ] non_recursive_term UNION [ ALL | DISTINCT ] recursive_term where the recursive self-reference must appear on the right-hand - side of the UNION. Only one recursive self-reference + side of the UNION. Only one recursive self-reference is permitted per query. Recursive data-modifying statements are not supported, but you can use the results of a recursive SELECT query in - a data-modifying statement. See for + a data-modifying statement. See for an example. @@ -285,17 +285,17 @@ TABLE [ ONLY ] table_name [ * ] The primary query and the WITH queries are all (notionally) executed at the same time. This implies that the effects of a data-modifying statement in WITH cannot be seen from - other parts of the query, other than by reading its RETURNING + other parts of the query, other than by reading its RETURNING output. If two such data-modifying statements attempt to modify the same row, the results are unspecified. - See for additional information. + See for additional information. - + <literal>FROM</literal> Clause @@ -303,7 +303,7 @@ TABLE [ ONLY ] table_name [ * ] tables for the SELECT. If multiple sources are specified, the result is the Cartesian product (cross join) of all the sources. But usually qualification conditions are added (via - WHERE) to restrict the returned rows to a small subset of the + WHERE) to restrict the returned rows to a small subset of the Cartesian product. @@ -317,10 +317,10 @@ TABLE [ ONLY ] table_name [ * ] The name (optionally schema-qualified) of an existing table or view. - If ONLY is specified before the table name, only that - table is scanned. If ONLY is not specified, the table + If ONLY is specified before the table name, only that + table is scanned. If ONLY is not specified, the table and all its descendant tables (if any) are scanned. Optionally, - * can be specified after the table name to explicitly + * can be specified after the table name to explicitly indicate that descendant tables are included. @@ -330,14 +330,14 @@ TABLE [ ONLY ] table_name [ * ] alias - A substitute name for the FROM item containing the + A substitute name for the FROM item containing the alias. An alias is used for brevity or to eliminate ambiguity for self-joins (where the same table is scanned multiple times). When an alias is provided, it completely hides the actual name of the table or function; for example given - FROM foo AS f, the remainder of the - SELECT must refer to this FROM - item as f not foo. If an alias is + FROM foo AS f, the remainder of the + SELECT must refer to this FROM + item as f not foo. If an alias is written, a column alias list can also be written to provide substitute names for one or more columns of the table. @@ -348,12 +348,12 @@ TABLE [ ONLY ] table_name [ * ] TABLESAMPLE sampling_method ( argument [, ...] ) [ REPEATABLE ( seed ) ] - A TABLESAMPLE clause after - a table_name indicates that the + A TABLESAMPLE clause after + a table_name indicates that the specified sampling_method should be used to retrieve a subset of the rows in that table. This sampling precedes the application of any other filters such - as WHERE clauses. + as WHERE clauses. The standard PostgreSQL distribution includes two sampling methods, BERNOULLI and SYSTEM, and other sampling methods can be @@ -361,11 +361,11 @@ TABLE [ ONLY ] table_name [ * ] - The BERNOULLI and SYSTEM sampling methods - each accept a single argument + The BERNOULLI and SYSTEM sampling methods + each accept a single argument which is the fraction of the table to sample, expressed as a percentage between 0 and 100. This argument can be - any real-valued expression. (Other sampling methods might + any real-valued expression. (Other sampling methods might accept more or different arguments.) These two methods each return a randomly-chosen sample of the table that will contain approximately the specified percentage of the table's rows. @@ -383,10 +383,10 @@ TABLE [ ONLY ] table_name [ * ] The optional REPEATABLE clause specifies - a seed number or expression to use + a seed number or expression to use for generating random numbers within the sampling method. The seed value can be any non-null floating-point value. Two queries that - specify the same seed and argument + specify the same seed and argument values will select the same sample of the table, if the table has not been changed meanwhile. But different seed values will usually produce different samples. @@ -410,7 +410,7 @@ TABLE [ ONLY ] table_name [ * ] sub-SELECT must be surrounded by parentheses, and an alias must be provided for it. A - command + command can also be used here. @@ -420,9 +420,9 @@ TABLE [ ONLY ] table_name [ * ] with_query_name - A WITH query is referenced by writing its name, + A WITH query is referenced by writing its name, just as though the query's name were a table name. (In fact, - the WITH query hides any real table of the same name + the WITH query hides any real table of the same name for the purposes of the primary query. If necessary, you can refer to a real table of the same name by schema-qualifying the table's name.) @@ -456,8 +456,8 @@ TABLE [ ONLY ] table_name [ * ] Multiple function calls can be combined into a - single FROM-clause item by surrounding them - with ROWS FROM( ... ). The output of such an item is the + single FROM-clause item by surrounding them + with ROWS FROM( ... ). The output of such an item is the concatenation of the first row from each function, then the second row from each function, etc. If some of the functions produce fewer rows than others, null values are substituted for the missing data, so @@ -467,28 +467,28 @@ TABLE [ ONLY ] table_name [ * ] If the function has been defined as returning the - record data type, then an alias or the key word - AS must be present, followed by a column + record data type, then an alias or the key word + AS must be present, followed by a column definition list in the form ( column_name data_type , ... - ). The column definition list must match the + ). The column definition list must match the actual number and types of columns returned by the function. - When using the ROWS FROM( ... ) syntax, if one of the + When using the ROWS FROM( ... ) syntax, if one of the functions requires a column definition list, it's preferred to put the column definition list after the function call inside - ROWS FROM( ... ). A column definition list can be placed - after the ROWS FROM( ... ) construct only if there's just - a single function and no WITH ORDINALITY clause. + ROWS FROM( ... ). A column definition list can be placed + after the ROWS FROM( ... ) construct only if there's just + a single function and no WITH ORDINALITY clause. To use ORDINALITY together with a column definition - list, you must use the ROWS FROM( ... ) syntax and put the - column definition list inside ROWS FROM( ... ). + list, you must use the ROWS FROM( ... ) syntax and put the + column definition list inside ROWS FROM( ... ). @@ -516,9 +516,9 @@ TABLE [ ONLY ] table_name [ * ] - For the INNER and OUTER join types, a + For the INNER and OUTER join types, a join condition must be specified, namely exactly one of - NATURAL, ON NATURAL, ON join_condition, or USING (join_column [, ...]). @@ -527,46 +527,46 @@ TABLE [ ONLY ] table_name [ * ] - A JOIN clause combines two FROM - items, which for convenience we will refer to as tables, - though in reality they can be any type of FROM item. + A JOIN clause combines two FROM + items, which for convenience we will refer to as tables, + though in reality they can be any type of FROM item. Use parentheses if necessary to determine the order of nesting. In the absence of parentheses, JOINs nest left-to-right. In any case JOIN binds more - tightly than the commas separating FROM-list items. + tightly than the commas separating FROM-list items. - CROSS JOIN and INNER JOIN + CROSS JOIN and INNER JOIN produce a simple Cartesian product, the same result as you get from - listing the two tables at the top level of FROM, + listing the two tables at the top level of FROM, but restricted by the join condition (if any). - CROSS JOIN is equivalent to INNER JOIN ON - (TRUE), that is, no rows are removed by qualification. + CROSS JOIN is equivalent to INNER JOIN ON + (TRUE), that is, no rows are removed by qualification. These join types are just a notational convenience, since they - do nothing you couldn't do with plain FROM and - WHERE. + do nothing you couldn't do with plain FROM and + WHERE. - LEFT OUTER JOIN returns all rows in the qualified + LEFT OUTER JOIN returns all rows in the qualified Cartesian product (i.e., all combined rows that pass its join condition), plus one copy of each row in the left-hand table for which there was no right-hand row that passed the join condition. This left-hand row is extended to the full width of the joined table by inserting null values for the - right-hand columns. Note that only the JOIN + right-hand columns. Note that only the JOIN clause's own condition is considered while deciding which rows have matches. Outer conditions are applied afterwards. - Conversely, RIGHT OUTER JOIN returns all the + Conversely, RIGHT OUTER JOIN returns all the joined rows, plus one row for each unmatched right-hand row (extended with nulls on the left). This is just a notational convenience, since you could convert it to a LEFT - OUTER JOIN by switching the left and right tables. + OUTER JOIN by switching the left and right tables. - FULL OUTER JOIN returns all the joined rows, plus + FULL OUTER JOIN returns all the joined rows, plus one row for each unmatched left-hand row (extended with nulls on the right), plus one row for each unmatched right-hand row (extended with nulls on the left). @@ -593,7 +593,7 @@ TABLE [ ONLY ] table_name [ * ] A clause of the form USING ( a, b, ... ) is shorthand for ON left_table.a = right_table.a AND left_table.b = right_table.b .... Also, - USING implies that only one of each pair of + USING implies that only one of each pair of equivalent columns will be included in the join output, not both. @@ -605,10 +605,10 @@ TABLE [ ONLY ] table_name [ * ] NATURAL is shorthand for a - USING list that mentions all columns in the two + USING list that mentions all columns in the two tables that have matching names. If there are no common column names, NATURAL is equivalent - to ON TRUE. + to ON TRUE. @@ -618,32 +618,32 @@ TABLE [ ONLY ] table_name [ * ] The LATERAL key word can precede a - sub-SELECT FROM item. This allows the - sub-SELECT to refer to columns of FROM - items that appear before it in the FROM list. (Without + sub-SELECT FROM item. This allows the + sub-SELECT to refer to columns of FROM + items that appear before it in the FROM list. (Without LATERAL, each sub-SELECT is evaluated independently and so cannot cross-reference any other - FROM item.) + FROM item.) LATERAL can also precede a function-call - FROM item, but in this case it is a noise word, because - the function expression can refer to earlier FROM items + FROM item, but in this case it is a noise word, because + the function expression can refer to earlier FROM items in any case. A LATERAL item can appear at top level in the - FROM list, or within a JOIN tree. In the + FROM list, or within a JOIN tree. In the latter case it can also refer to any items that are on the left-hand - side of a JOIN that it is on the right-hand side of. + side of a JOIN that it is on the right-hand side of. - When a FROM item contains LATERAL + When a FROM item contains LATERAL cross-references, evaluation proceeds as follows: for each row of the - FROM item providing the cross-referenced column(s), or - set of rows of multiple FROM items providing the + FROM item providing the cross-referenced column(s), or + set of rows of multiple FROM items providing the columns, the LATERAL item is evaluated using that row or row set's values of the columns. The resulting row(s) are joined as usual with the rows they were computed from. This is @@ -651,14 +651,14 @@ TABLE [ ONLY ] table_name [ * ] - The column source table(s) must be INNER or - LEFT joined to the LATERAL item, else + The column source table(s) must be INNER or + LEFT joined to the LATERAL item, else there would not be a well-defined set of rows from which to compute each set of rows for the LATERAL item. Thus, - although a construct such as X RIGHT JOIN - LATERAL Y is syntactically valid, it is - not actually allowed for Y to reference - X. + although a construct such as X RIGHT JOIN + LATERAL Y is syntactically valid, it is + not actually allowed for Y to reference + X. @@ -666,7 +666,7 @@ TABLE [ ONLY ] table_name [ * ] - + <literal>WHERE</literal> Clause @@ -683,7 +683,7 @@ WHERE condition - + <literal>GROUP BY</literal> Clause @@ -707,14 +707,14 @@ GROUP BY grouping_element [, ...] - If any of GROUPING SETS, ROLLUP or - CUBE are present as grouping elements, then the - GROUP BY clause as a whole defines some number of - independent grouping sets. The effect of this is - equivalent to constructing a UNION ALL between + If any of GROUPING SETS, ROLLUP or + CUBE are present as grouping elements, then the + GROUP BY clause as a whole defines some number of + independent grouping sets. The effect of this is + equivalent to constructing a UNION ALL between subqueries with the individual grouping sets as their - GROUP BY clauses. For further details on the handling - of grouping sets see . + GROUP BY clauses. For further details on the handling + of grouping sets see . @@ -725,7 +725,7 @@ GROUP BY grouping_element [, ...] the selected rows.) The set of rows fed to each aggregate function can be further filtered by attaching a FILTER clause to the aggregate function - call; see for more information. When + call; see for more information. When a FILTER clause is present, only those rows matching it are included in the input to that aggregate function. @@ -744,20 +744,20 @@ GROUP BY grouping_element [, ...] Keep in mind that all aggregate functions are evaluated before - evaluating any scalar expressions in the HAVING - clause or SELECT list. This means that, for example, - a CASE expression cannot be used to skip evaluation of - an aggregate function; see . + evaluating any scalar expressions in the HAVING + clause or SELECT list. This means that, for example, + a CASE expression cannot be used to skip evaluation of + an aggregate function; see . - Currently, FOR NO KEY UPDATE, FOR UPDATE, - FOR SHARE and FOR KEY SHARE cannot be + Currently, FOR NO KEY UPDATE, FOR UPDATE, + FOR SHARE and FOR KEY SHARE cannot be specified with GROUP BY. - + <literal>HAVING</literal> Clause @@ -784,9 +784,9 @@ HAVING condition The presence of HAVING turns a query into a grouped - query even if there is no GROUP BY clause. This is the + query even if there is no GROUP BY clause. This is the same as what happens when the query contains aggregate functions but - no GROUP BY clause. All the selected rows are considered to + no GROUP BY clause. All the selected rows are considered to form a single group, and the SELECT list and HAVING clause can only reference table columns from within aggregate functions. Such a query will emit a single row if the @@ -794,13 +794,13 @@ HAVING condition - Currently, FOR NO KEY UPDATE, FOR UPDATE, - FOR SHARE and FOR KEY SHARE cannot be + Currently, FOR NO KEY UPDATE, FOR UPDATE, + FOR SHARE and FOR KEY SHARE cannot be specified with HAVING. - + <literal>WINDOW</literal> Clause @@ -809,7 +809,7 @@ HAVING condition WINDOW window_name AS ( window_definition ) [, ...] where window_name is - a name that can be referenced from OVER clauses or + a name that can be referenced from OVER clauses or subsequent window definitions, and window_definition is @@ -822,128 +822,178 @@ WINDOW window_name AS ( If an existing_window_name - is specified it must refer to an earlier entry in the WINDOW + is specified it must refer to an earlier entry in the WINDOW list; the new window copies its partitioning clause from that entry, as well as its ordering clause if any. In this case the new window cannot - specify its own PARTITION BY clause, and it can specify - ORDER BY only if the copied window does not have one. + specify its own PARTITION BY clause, and it can specify + ORDER BY only if the copied window does not have one. The new window always uses its own frame clause; the copied window must not specify a frame clause. - The elements of the PARTITION BY list are interpreted in + The elements of the PARTITION BY list are interpreted in much the same fashion as elements of a - , except that + , except that they are always simple expressions and never the name or number of an output column. Another difference is that these expressions can contain aggregate - function calls, which are not allowed in a regular GROUP BY + function calls, which are not allowed in a regular GROUP BY clause. They are allowed here because windowing occurs after grouping and aggregation. - Similarly, the elements of the ORDER BY list are interpreted + Similarly, the elements of the ORDER BY list are interpreted in much the same fashion as elements of an - , except that + , except that the expressions are always taken as simple expressions and never the name or number of an output column. - The optional frame_clause defines - the window frame for window functions that depend on the + The optional frame_clause defines + the window frame for window functions that depend on the frame (not all do). The window frame is a set of related rows for - each row of the query (called the current row). - The frame_clause can be one of + each row of the query (called the current row). + The frame_clause can be one of -{ RANGE | ROWS } frame_start -{ RANGE | ROWS } BETWEEN frame_start AND frame_end +{ RANGE | ROWS | GROUPS } frame_start [ frame_exclusion ] +{ RANGE | ROWS | GROUPS } BETWEEN frame_start AND frame_end [ frame_exclusion ] - where frame_start and frame_end can be - one of + where frame_start + and frame_end can be one of UNBOUNDED PRECEDING -value PRECEDING +offset PRECEDING CURRENT ROW -value FOLLOWING +offset FOLLOWING UNBOUNDED FOLLOWING - If frame_end is omitted it defaults to CURRENT - ROW. Restrictions are that - frame_start cannot be UNBOUNDED FOLLOWING, - frame_end cannot be UNBOUNDED PRECEDING, - and the frame_end choice cannot appear earlier in the - above list than the frame_start choice — for example - RANGE BETWEEN CURRENT ROW AND value + and frame_exclusion can be one of + + +EXCLUDE CURRENT ROW +EXCLUDE GROUP +EXCLUDE TIES +EXCLUDE NO OTHERS + + + If frame_end is omitted it defaults to CURRENT + ROW. Restrictions are that + frame_start cannot be UNBOUNDED FOLLOWING, + frame_end cannot be UNBOUNDED PRECEDING, + and the frame_end choice cannot appear earlier in the + above list of frame_start + and frame_end options than + the frame_start choice does — for example + RANGE BETWEEN CURRENT ROW AND offset PRECEDING is not allowed. - The default framing option is RANGE UNBOUNDED PRECEDING, + The default framing option is RANGE UNBOUNDED PRECEDING, which is the same as RANGE BETWEEN UNBOUNDED PRECEDING AND - CURRENT ROW; it sets the frame to be all rows from the partition start - up through the current row's last peer (a row that ORDER - BY considers equivalent to the current row, or all rows if there - is no ORDER BY). - In general, UNBOUNDED PRECEDING means that the frame + CURRENT ROW; it sets the frame to be all rows from the partition start + up through the current row's last peer (a row + that the window's ORDER BY clause considers + equivalent to the current row; all rows are peers if there + is no ORDER BY). + In general, UNBOUNDED PRECEDING means that the frame starts with the first row of the partition, and similarly - UNBOUNDED FOLLOWING means that the frame ends with the last - row of the partition (regardless of RANGE or ROWS - mode). In ROWS mode, CURRENT ROW - means that the frame starts or ends with the current row; but in - RANGE mode it means that the frame starts or ends with - the current row's first or last peer in the ORDER BY ordering. - The value PRECEDING and - value FOLLOWING cases are currently only - allowed in ROWS mode. They indicate that the frame starts - or ends with the row that many rows before or after the current row. - value must be an integer expression not - containing any variables, aggregate functions, or window functions. - The value must not be null or negative; but it can be zero, which - selects the current row itself. - - - - Beware that the ROWS options can produce unpredictable - results if the ORDER BY ordering does not order the rows - uniquely. The RANGE options are designed to ensure that - rows that are peers in the ORDER BY ordering are treated - alike; all peer rows will be in the same frame. + UNBOUNDED FOLLOWING means that the frame ends with the last + row of the partition, regardless + of RANGE, ROWS + or GROUPS mode. + In ROWS mode, CURRENT ROW means + that the frame starts or ends with the current row; but + in RANGE or GROUPS mode it means + that the frame starts or ends with the current row's first or last peer + in the ORDER BY ordering. + The offset PRECEDING and + offset FOLLOWING options + vary in meaning depending on the frame mode. + In ROWS mode, the offset + is an integer indicating that the frame starts or ends that many rows + before or after the current row. + In GROUPS mode, the offset + is an integer indicating that the frame starts or ends that many peer + groups before or after the current row's peer group, where + a peer group is a group of rows that are + equivalent according to the window's ORDER BY clause. + In RANGE mode, use of + an offset option requires that there be + exactly one ORDER BY column in the window definition. + Then the frame contains those rows whose ordering column value is no + more than offset less than + (for PRECEDING) or more than + (for FOLLOWING) the current row's ordering column + value. In these cases the data type of + the offset expression depends on the data + type of the ordering column. For numeric ordering columns it is + typically of the same type as the ordering column, but for datetime + ordering columns it is an interval. + In all these cases, the value of the offset + must be non-null and non-negative. Also, while + the offset does not have to be a simple + constant, it cannot contain variables, aggregate functions, or window + functions. + + + + The frame_exclusion option allows rows around + the current row to be excluded from the frame, even if they would be + included according to the frame start and frame end options. + EXCLUDE CURRENT ROW excludes the current row from the + frame. + EXCLUDE GROUP excludes the current row and its + ordering peers from the frame. + EXCLUDE TIES excludes any peers of the current + row from the frame, but not the current row itself. + EXCLUDE NO OTHERS simply specifies explicitly the + default behavior of not excluding the current row or its peers. + + + + Beware that the ROWS mode can produce unpredictable + results if the ORDER BY ordering does not order the rows + uniquely. The RANGE and GROUPS + modes are designed to ensure that rows that are peers in + the ORDER BY ordering are treated alike: all rows of + a given peer group will be in the frame or excluded from it. The purpose of a WINDOW clause is to specify the - behavior of window functions appearing in the query's - or - . These functions + behavior of window functions appearing in the query's + or + . These functions can reference the WINDOW clause entries by name - in their OVER clauses. A WINDOW clause + in their OVER clauses. A WINDOW clause entry does not have to be referenced anywhere, however; if it is not used in the query it is simply ignored. It is possible to use window functions without any WINDOW clause at all, since a window function call can specify its window definition directly in - its OVER clause. However, the WINDOW + its OVER clause. However, the WINDOW clause saves typing when the same window definition is needed for more than one window function. - Currently, FOR NO KEY UPDATE, FOR UPDATE, - FOR SHARE and FOR KEY SHARE cannot be + Currently, FOR NO KEY UPDATE, FOR UPDATE, + FOR SHARE and FOR KEY SHARE cannot be specified with WINDOW. Window functions are described in detail in - , - , and - . + , + , and + . @@ -952,24 +1002,24 @@ UNBOUNDED FOLLOWING The SELECT list (between the key words - SELECT and FROM) specifies expressions + SELECT and FROM) specifies expressions that form the output rows of the SELECT statement. The expressions can (and usually do) refer to columns - computed in the FROM clause. + computed in the FROM clause. Just as in a table, every output column of a SELECT has a name. In a simple SELECT this name is just - used to label the column for display, but when the SELECT + used to label the column for display, but when the SELECT is a sub-query of a larger query, the name is seen by the larger query as the column name of the virtual table produced by the sub-query. To specify the name to use for an output column, write - AS output_name + AS output_name after the column's expression. (You can omit AS, but only if the desired output name does not match any PostgreSQL keyword (see ). For protection against possible + linkend="sql-keywords-appendix"/>). For protection against possible future keyword additions, it is recommended that you always either write AS or double-quote the output name.) If you do not specify a column name, a name is chosen automatically @@ -982,8 +1032,8 @@ UNBOUNDED FOLLOWING An output column's name can be used to refer to the column's value in - ORDER BY and GROUP BY clauses, but not in the - WHERE or HAVING clauses; there you must write + ORDER BY and GROUP BY clauses, but not in the + WHERE or HAVING clauses; there you must write out the expression instead. @@ -993,7 +1043,7 @@ UNBOUNDED FOLLOWING rows. Also, you can write table_name.* as a shorthand for the columns coming from just that table. In these - cases it is not possible to specify new names with AS; + cases it is not possible to specify new names with AS; the output column names will be the same as the table columns' names. @@ -1008,11 +1058,11 @@ UNBOUNDED FOLLOWING contains any volatile or expensive functions. With that behavior, the order of function evaluations is more intuitive and there will not be evaluations corresponding to rows that never appear in the output. - PostgreSQL will effectively evaluate output expressions + PostgreSQL will effectively evaluate output expressions after sorting and limiting, so long as those expressions are not referenced in DISTINCT, ORDER BY or GROUP BY. (As a counterexample, SELECT - f(x) FROM tab ORDER BY 1 clearly must evaluate f(x) + f(x) FROM tab ORDER BY 1 clearly must evaluate f(x) before sorting.) Output expressions that contain set-returning functions are effectively evaluated after sorting and before limiting, so that LIMIT will act to cut off the output from a @@ -1021,7 +1071,7 @@ UNBOUNDED FOLLOWING - PostgreSQL versions before 9.6 did not provide any + PostgreSQL versions before 9.6 did not provide any guarantees about the timing of evaluation of output expressions versus sorting and limiting; it depended on the form of the chosen query plan. @@ -1032,9 +1082,9 @@ UNBOUNDED FOLLOWING <literal>DISTINCT</literal> Clause - If SELECT DISTINCT is specified, all duplicate rows are + If SELECT DISTINCT is specified, all duplicate rows are removed from the result set (one row is kept from each group of - duplicates). SELECT ALL specifies the opposite: all rows are + duplicates). SELECT ALL specifies the opposite: all rows are kept; that is the default. @@ -1044,9 +1094,9 @@ UNBOUNDED FOLLOWING keeps only the first row of each set of rows where the given expressions evaluate to equal. The DISTINCT ON expressions are interpreted using the same rules as for - ORDER BY (see above). Note that the first + ORDER BY (see above). Note that the first row of each set is unpredictable unless ORDER - BY is used to ensure that the desired row appears first. For + BY is used to ensure that the desired row appears first. For example: SELECT DISTINCT ON (location) location, time, report @@ -1054,26 +1104,26 @@ SELECT DISTINCT ON (location) location, time, report ORDER BY location, time DESC; retrieves the most recent weather report for each location. But - if we had not used ORDER BY to force descending order + if we had not used ORDER BY to force descending order of time values for each location, we'd have gotten a report from an unpredictable time for each location. - The DISTINCT ON expression(s) must match the leftmost - ORDER BY expression(s). The ORDER BY clause + The DISTINCT ON expression(s) must match the leftmost + ORDER BY expression(s). The ORDER BY clause will normally contain additional expression(s) that determine the - desired precedence of rows within each DISTINCT ON group. + desired precedence of rows within each DISTINCT ON group. - Currently, FOR NO KEY UPDATE, FOR UPDATE, - FOR SHARE and FOR KEY SHARE cannot be + Currently, FOR NO KEY UPDATE, FOR UPDATE, + FOR SHARE and FOR KEY SHARE cannot be specified with DISTINCT. - + <literal>UNION</literal> Clause @@ -1082,9 +1132,9 @@ SELECT DISTINCT ON (location) location, time, report select_statement UNION [ ALL | DISTINCT ] select_statement select_statement is any SELECT statement without an ORDER - BY, LIMIT, FOR NO KEY UPDATE, FOR UPDATE, + BY, LIMIT, FOR NO KEY UPDATE, FOR UPDATE, FOR SHARE, or FOR KEY SHARE clause. - (ORDER BY and LIMIT can be attached to a + (ORDER BY and LIMIT can be attached to a subexpression if it is enclosed in parentheses. Without parentheses, these clauses will be taken to apply to the result of the UNION, not to its right-hand input @@ -1103,30 +1153,30 @@ SELECT DISTINCT ON (location) location, time, report - The result of UNION does not contain any duplicate - rows unless the ALL option is specified. - ALL prevents elimination of duplicates. (Therefore, - UNION ALL is usually significantly quicker than - UNION; use ALL when you can.) - DISTINCT can be written to explicitly specify the + The result of UNION does not contain any duplicate + rows unless the ALL option is specified. + ALL prevents elimination of duplicates. (Therefore, + UNION ALL is usually significantly quicker than + UNION; use ALL when you can.) + DISTINCT can be written to explicitly specify the default behavior of eliminating duplicate rows. - Multiple UNION operators in the same + Multiple UNION operators in the same SELECT statement are evaluated left to right, unless otherwise indicated by parentheses. - Currently, FOR NO KEY UPDATE, FOR UPDATE, FOR SHARE and - FOR KEY SHARE cannot be - specified either for a UNION result or for any input of a - UNION. + Currently, FOR NO KEY UPDATE, FOR UPDATE, FOR SHARE and + FOR KEY SHARE cannot be + specified either for a UNION result or for any input of a + UNION. - + <literal>INTERSECT</literal> Clause @@ -1135,8 +1185,8 @@ SELECT DISTINCT ON (location) location, time, report select_statement INTERSECT [ ALL | DISTINCT ] select_statement select_statement is any SELECT statement without an ORDER - BY, LIMIT, FOR NO KEY UPDATE, FOR UPDATE, - FOR SHARE, or FOR KEY SHARE clause. + BY, LIMIT, FOR NO KEY UPDATE, FOR UPDATE, + FOR SHARE, or FOR KEY SHARE clause. @@ -1148,11 +1198,11 @@ SELECT DISTINCT ON (location) location, time, report The result of INTERSECT does not contain any - duplicate rows unless the ALL option is specified. - With ALL, a row that has m duplicates in the - left table and n duplicates in the right table will appear - min(m,n) times in the result set. - DISTINCT can be written to explicitly specify the + duplicate rows unless the ALL option is specified. + With ALL, a row that has m duplicates in the + left table and n duplicates in the right table will appear + min(m,n) times in the result set. + DISTINCT can be written to explicitly specify the default behavior of eliminating duplicate rows. @@ -1167,14 +1217,14 @@ SELECT DISTINCT ON (location) location, time, report - Currently, FOR NO KEY UPDATE, FOR UPDATE, FOR SHARE and - FOR KEY SHARE cannot be - specified either for an INTERSECT result or for any input of - an INTERSECT. + Currently, FOR NO KEY UPDATE, FOR UPDATE, FOR SHARE and + FOR KEY SHARE cannot be + specified either for an INTERSECT result or for any input of + an INTERSECT. - + <literal>EXCEPT</literal> Clause @@ -1183,8 +1233,8 @@ SELECT DISTINCT ON (location) location, time, report select_statement EXCEPT [ ALL | DISTINCT ] select_statement select_statement is any SELECT statement without an ORDER - BY, LIMIT, FOR NO KEY UPDATE, FOR UPDATE, - FOR SHARE, or FOR KEY SHARE clause. + BY, LIMIT, FOR NO KEY UPDATE, FOR UPDATE, + FOR SHARE, or FOR KEY SHARE clause. @@ -1195,30 +1245,30 @@ SELECT DISTINCT ON (location) location, time, report The result of EXCEPT does not contain any - duplicate rows unless the ALL option is specified. - With ALL, a row that has m duplicates in the - left table and n duplicates in the right table will appear - max(m-n,0) times in the result set. - DISTINCT can be written to explicitly specify the + duplicate rows unless the ALL option is specified. + With ALL, a row that has m duplicates in the + left table and n duplicates in the right table will appear + max(m-n,0) times in the result set. + DISTINCT can be written to explicitly specify the default behavior of eliminating duplicate rows. Multiple EXCEPT operators in the same SELECT statement are evaluated left to right, - unless parentheses dictate otherwise. EXCEPT binds at - the same level as UNION. + unless parentheses dictate otherwise. EXCEPT binds at + the same level as UNION. - Currently, FOR NO KEY UPDATE, FOR UPDATE, FOR SHARE and - FOR KEY SHARE cannot be - specified either for an EXCEPT result or for any input of - an EXCEPT. + Currently, FOR NO KEY UPDATE, FOR UPDATE, FOR SHARE and + FOR KEY SHARE cannot be + specified either for an EXCEPT result or for any input of + an EXCEPT. - + <literal>ORDER BY</literal> Clause @@ -1247,7 +1297,7 @@ ORDER BY expression [ ASC | DESC | ordering on the basis of a column that does not have a unique name. This is never absolutely necessary because it is always possible to assign a name to an output column using the - AS clause. + AS clause. @@ -1258,65 +1308,65 @@ ORDER BY expression [ ASC | DESC | SELECT name FROM distributors ORDER BY code; - A limitation of this feature is that an ORDER BY - clause applying to the result of a UNION, - INTERSECT, or EXCEPT clause can only + A limitation of this feature is that an ORDER BY + clause applying to the result of a UNION, + INTERSECT, or EXCEPT clause can only specify an output column name or number, not an expression. - If an ORDER BY expression is a simple name that + If an ORDER BY expression is a simple name that matches both an output column name and an input column name, - ORDER BY will interpret it as the output column name. - This is the opposite of the choice that GROUP BY will + ORDER BY will interpret it as the output column name. + This is the opposite of the choice that GROUP BY will make in the same situation. This inconsistency is made to be compatible with the SQL standard. - Optionally one can add the key word ASC (ascending) or - DESC (descending) after any expression in the - ORDER BY clause. If not specified, ASC is + Optionally one can add the key word ASC (ascending) or + DESC (descending) after any expression in the + ORDER BY clause. If not specified, ASC is assumed by default. Alternatively, a specific ordering operator - name can be specified in the USING clause. + name can be specified in the USING clause. An ordering operator must be a less-than or greater-than member of some B-tree operator family. - ASC is usually equivalent to USING < and - DESC is usually equivalent to USING >. + ASC is usually equivalent to USING < and + DESC is usually equivalent to USING >. (But the creator of a user-defined data type can define exactly what the default sort ordering is, and it might correspond to operators with other names.) - If NULLS LAST is specified, null values sort after all - non-null values; if NULLS FIRST is specified, null values + If NULLS LAST is specified, null values sort after all + non-null values; if NULLS FIRST is specified, null values sort before all non-null values. If neither is specified, the default - behavior is NULLS LAST when ASC is specified - or implied, and NULLS FIRST when DESC is specified + behavior is NULLS LAST when ASC is specified + or implied, and NULLS FIRST when DESC is specified (thus, the default is to act as though nulls are larger than non-nulls). - When USING is specified, the default nulls ordering depends + When USING is specified, the default nulls ordering depends on whether the operator is a less-than or greater-than operator. Note that ordering options apply only to the expression they follow; - for example ORDER BY x, y DESC does not mean - the same thing as ORDER BY x DESC, y DESC. + for example ORDER BY x, y DESC does not mean + the same thing as ORDER BY x DESC, y DESC. Character-string data is sorted according to the collation that applies to the column being sorted. That can be overridden at need by including - a COLLATE clause in the + a COLLATE clause in the expression, for example - ORDER BY mycolumn COLLATE "en_US". - For more information see and - . + ORDER BY mycolumn COLLATE "en_US". + For more information see and + . - + <literal>LIMIT</literal> Clause @@ -1337,72 +1387,74 @@ OFFSET start If the count expression - evaluates to NULL, it is treated as LIMIT ALL, i.e., no + evaluates to NULL, it is treated as LIMIT ALL, i.e., no limit. If start evaluates - to NULL, it is treated the same as OFFSET 0. + to NULL, it is treated the same as OFFSET 0. SQL:2008 introduced a different syntax to achieve the same result, - which PostgreSQL also supports. It is: + which PostgreSQL also supports. It is: OFFSET start { ROW | ROWS } FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY - In this syntax, to write anything except a simple integer constant for - start or count, you must write parentheses - around it. - If count is - omitted in a FETCH clause, it defaults to 1. + In this syntax, the start + or count value is required by + the standard to be a literal constant, a parameter, or a variable name; + as a PostgreSQL extension, other expressions + are allowed, but will generally need to be enclosed in parentheses to avoid + ambiguity. + If count is + omitted in a FETCH clause, it defaults to 1. ROW and ROWS as well as FIRST and NEXT are noise words that don't influence the effects of these clauses. According to the standard, the OFFSET clause must come before the FETCH clause if both are present; but - PostgreSQL is laxer and allows either order. + PostgreSQL is laxer and allows either order. - When using LIMIT, it is a good idea to use an - ORDER BY clause that constrains the result rows into a + When using LIMIT, it is a good idea to use an + ORDER BY clause that constrains the result rows into a unique order. Otherwise you will get an unpredictable subset of the query's rows — you might be asking for the tenth through twentieth rows, but tenth through twentieth in what ordering? You - don't know what ordering unless you specify ORDER BY. + don't know what ordering unless you specify ORDER BY. - The query planner takes LIMIT into account when + The query planner takes LIMIT into account when generating a query plan, so you are very likely to get different plans (yielding different row orders) depending on what you use - for LIMIT and OFFSET. Thus, using - different LIMIT/OFFSET values to select + for LIMIT and OFFSET. Thus, using + different LIMIT/OFFSET values to select different subsets of a query result will give inconsistent results unless you enforce a predictable - result ordering with ORDER BY. This is not a bug; it + result ordering with ORDER BY. This is not a bug; it is an inherent consequence of the fact that SQL does not promise to deliver the results of a query in any particular order unless - ORDER BY is used to constrain the order. + ORDER BY is used to constrain the order. - It is even possible for repeated executions of the same LIMIT + It is even possible for repeated executions of the same LIMIT query to return different subsets of the rows of a table, if there - is not an ORDER BY to enforce selection of a deterministic + is not an ORDER BY to enforce selection of a deterministic subset. Again, this is not a bug; determinism of the results is simply not guaranteed in such a case. - + The Locking Clause - FOR UPDATE, FOR NO KEY UPDATE, FOR SHARE - and FOR KEY SHARE - are locking clauses; they affect how SELECT + FOR UPDATE, FOR NO KEY UPDATE, FOR SHARE + and FOR KEY SHARE + are locking clauses; they affect how SELECT locks rows as they are obtained from the table. @@ -1410,10 +1462,10 @@ FETCH { FIRST | NEXT } [ count ] { The locking clause has the general form -FOR lock_strength [ OF table_name [, ...] ] [ NOWAIT | SKIP LOCKED ] +FOR lock_strength [ OF table_name [, ...] ] [ NOWAIT | SKIP LOCKED ] - where lock_strength can be one of + where lock_strength can be one of UPDATE @@ -1425,25 +1477,25 @@ KEY SHARE For more information on each row-level lock mode, refer to - . + . To prevent the operation from waiting for other transactions to commit, - use either the NOWAIT or SKIP LOCKED - option. With NOWAIT, the statement reports an error, rather + use either the NOWAIT or SKIP LOCKED + option. With NOWAIT, the statement reports an error, rather than waiting, if a selected row cannot be locked immediately. With SKIP LOCKED, any selected rows that cannot be immediately locked are skipped. Skipping locked rows provides an inconsistent view of the data, so this is not suitable for general purpose work, but can be used to avoid lock contention with multiple consumers accessing a queue-like table. - Note that NOWAIT and SKIP LOCKED apply only + Note that NOWAIT and SKIP LOCKED apply only to the row-level lock(s) — the required ROW SHARE table-level lock is still taken in the ordinary way (see - ). You can use - - with the NOWAIT option first, + ). You can use + + with the NOWAIT option first, if you need to acquire the table-level lock without waiting. @@ -1457,9 +1509,9 @@ KEY SHARE applied to a view or sub-query, it affects all tables used in the view or sub-query. However, these clauses - do not apply to WITH queries referenced by the primary query. - If you want row locking to occur within a WITH query, specify - a locking clause within the WITH query. + do not apply to WITH queries referenced by the primary query. + If you want row locking to occur within a WITH query, specify + a locking clause within the WITH query. @@ -1469,7 +1521,7 @@ KEY SHARE implicitly affected) by more than one locking clause, then it is processed as if it was only specified by the strongest one. Similarly, a table is processed - as NOWAIT if that is specified in any of the clauses + as NOWAIT if that is specified in any of the clauses affecting it. Otherwise, it is processed as SKIP LOCKED if that is specified in any of the clauses affecting it. @@ -1483,16 +1535,16 @@ KEY SHARE When a locking clause - appears at the top level of a SELECT query, the rows that + appears at the top level of a SELECT query, the rows that are locked are exactly those that are returned by the query; in the case of a join query, the rows locked are those that contribute to returned join rows. In addition, rows that satisfied the query conditions as of the query snapshot will be locked, although they will not be returned if they were updated after the snapshot and no longer satisfy the query conditions. If a - LIMIT is used, locking stops + LIMIT is used, locking stops once enough rows have been returned to satisfy the limit (but note that - rows skipped over by OFFSET will get locked). Similarly, + rows skipped over by OFFSET will get locked). Similarly, if a locking clause is used in a cursor's query, only rows actually fetched or stepped past by the cursor will be locked. @@ -1500,7 +1552,7 @@ KEY SHARE When a locking clause - appears in a sub-SELECT, the rows locked are those + appears in a sub-SELECT, the rows locked are those returned to the outer query by the sub-query. This might involve fewer rows than inspection of the sub-query alone would suggest, since conditions from the outer query might be used to optimize @@ -1508,7 +1560,7 @@ KEY SHARE SELECT * FROM (SELECT * FROM mytable FOR UPDATE) ss WHERE col1 = 5; - will lock only rows having col1 = 5, even though that + will lock only rows having col1 = 5, even though that condition is not textually within the sub-query. @@ -1522,18 +1574,18 @@ SAVEPOINT s; UPDATE mytable SET ... WHERE key = 1; ROLLBACK TO s; - would fail to preserve the FOR UPDATE lock after the - ROLLBACK TO. This has been fixed in release 9.3. + would fail to preserve the FOR UPDATE lock after the + ROLLBACK TO. This has been fixed in release 9.3. - It is possible for a SELECT command running at the READ + It is possible for a SELECT command running at the READ COMMITTED transaction isolation level and using ORDER BY and a locking clause to return rows out of - order. This is because ORDER BY is applied first. + order. This is because ORDER BY is applied first. The command sorts the result, but might then block trying to obtain a lock - on one or more of the rows. Once the SELECT unblocks, some + on one or more of the rows. Once the SELECT unblocks, some of the ordering column values might have been modified, leading to those rows appearing to be out of order (though they are in order in terms of the original column values). This can be worked around at need by @@ -1542,11 +1594,11 @@ ROLLBACK TO s; SELECT * FROM (SELECT * FROM mytable FOR UPDATE) ss ORDER BY column1; - Note that this will result in locking all rows of mytable, - whereas FOR UPDATE at the top level would lock only the + Note that this will result in locking all rows of mytable, + whereas FOR UPDATE at the top level would lock only the actually returned rows. This can make for a significant performance - difference, particularly if the ORDER BY is combined with - LIMIT or other restrictions. So this technique is recommended + difference, particularly if the ORDER BY is combined with + LIMIT or other restrictions. So this technique is recommended only if concurrent updates of the ordering columns are expected and a strictly sorted result is required. @@ -1560,7 +1612,7 @@ SELECT * FROM (SELECT * FROM mytable FOR UPDATE) ss ORDER BY column1; - + <literal>TABLE</literal> Command @@ -1573,11 +1625,11 @@ TABLE name SELECT * FROM name It can be used as a top-level command or as a space-saving syntax - variant in parts of complex queries. Only the WITH, - UNION, INTERSECT, EXCEPT, - ORDER BY, LIMIT, OFFSET, - FETCH and FOR locking clauses can be used - with TABLE; the WHERE clause and any form of + variant in parts of complex queries. Only the WITH, + UNION, INTERSECT, EXCEPT, + ORDER BY, LIMIT, OFFSET, + FETCH and FOR locking clauses can be used + with TABLE; the WHERE clause and any form of aggregation cannot be used. @@ -1702,7 +1754,7 @@ SELECT actors.name - This example shows how to use a function in the FROM + This example shows how to use a function in the FROM clause, both with and without a column definition list: @@ -1744,7 +1796,7 @@ SELECT * FROM unnest(ARRAY['a','b','c','d','e','f']) WITH ORDINALITY; - This example shows how to use a simple WITH clause: + This example shows how to use a simple WITH clause: WITH t AS ( @@ -1764,7 +1816,7 @@ SELECT * FROM t 0.0735620250925422 - Notice that the WITH query was evaluated only once, + Notice that the WITH query was evaluated only once, so that we got two sets of the same three random values. @@ -1791,14 +1843,14 @@ SELECT distance, employee_name FROM employee_recursive; an initial condition, followed by UNION, followed by the recursive part of the query. Be sure that the recursive part of the query will eventually return no tuples, or - else the query will loop indefinitely. (See + else the query will loop indefinitely. (See for more examples.) - This example uses LATERAL to apply a set-returning function - get_product_names() for each row of the - manufacturers table: + This example uses LATERAL to apply a set-returning function + get_product_names() for each row of the + manufacturers table: SELECT m.name AS mname, pname @@ -1866,7 +1918,7 @@ SELECT distributors.* WHERE distributors.name = 'Westward'; This is not valid syntax according to the SQL standard. PostgreSQL allows it to be consistent with allowing zero-column tables. - However, an empty list is not allowed when DISTINCT is used. + However, an empty list is not allowed when DISTINCT is used. @@ -1874,19 +1926,19 @@ SELECT distributors.* WHERE distributors.name = 'Westward'; Omitting the <literal>AS</literal> Key Word - In the SQL standard, the optional key word AS can be + In the SQL standard, the optional key word AS can be omitted before an output column name whenever the new column name is a valid column name (that is, not the same as any reserved keyword). PostgreSQL is slightly more - restrictive: AS is required if the new column name + restrictive: AS is required if the new column name matches any keyword at all, reserved or not. Recommended practice is - to use AS or double-quote output column names, to prevent + to use AS or double-quote output column names, to prevent any possible conflict against future keyword additions. In FROM items, both the standard and - PostgreSQL allow AS to + PostgreSQL allow AS to be omitted before an alias that is an unreserved keyword. But this is impractical for output column names, because of syntactic ambiguities. @@ -1899,12 +1951,12 @@ SELECT distributors.* WHERE distributors.name = 'Westward'; The SQL standard requires parentheses around the table name when writing ONLY, for example SELECT * FROM ONLY - (tab1), ONLY (tab2) WHERE .... PostgreSQL + (tab1), ONLY (tab2) WHERE .... PostgreSQL considers these parentheses to be optional. - PostgreSQL allows a trailing * to be written to + PostgreSQL allows a trailing * to be written to explicitly specify the non-ONLY behavior of including child tables. The standard does not allow this. @@ -1919,9 +1971,9 @@ SELECT distributors.* WHERE distributors.name = 'Westward'; <literal>TABLESAMPLE</literal> Clause Restrictions - The TABLESAMPLE clause is currently accepted only on + The TABLESAMPLE clause is currently accepted only on regular tables and materialized views. According to the SQL standard - it should be possible to apply it to any FROM item. + it should be possible to apply it to any FROM item. @@ -1930,16 +1982,16 @@ SELECT distributors.* WHERE distributors.name = 'Westward'; PostgreSQL allows a function call to be - written directly as a member of the FROM list. In the SQL + written directly as a member of the FROM list. In the SQL standard it would be necessary to wrap such a function call in a sub-SELECT; that is, the syntax - FROM func(...) alias + FROM func(...) alias is approximately equivalent to - FROM LATERAL (SELECT func(...)) alias. - Note that LATERAL is considered to be implicit; this is - because the standard requires LATERAL semantics for an - UNNEST() item in FROM. - PostgreSQL treats UNNEST() the + FROM LATERAL (SELECT func(...)) alias. + Note that LATERAL is considered to be implicit; this is + because the standard requires LATERAL semantics for an + UNNEST() item in FROM. + PostgreSQL treats UNNEST() the same as other set-returning functions. @@ -1974,24 +2026,13 @@ SELECT distributors.* WHERE distributors.name = 'Westward'; PostgreSQL recognizes functional dependency - (allowing columns to be omitted from GROUP BY) only when - a table's primary key is included in the GROUP BY list. + (allowing columns to be omitted from GROUP BY) only when + a table's primary key is included in the GROUP BY list. The SQL standard specifies additional conditions that should be recognized. - - <literal>WINDOW</literal> Clause Restrictions - - - The SQL standard provides additional options for the window - frame_clause. - PostgreSQL currently supports only the - options listed above. - - - <literal>LIMIT</literal> and <literal>OFFSET</literal> @@ -2001,7 +2042,7 @@ SELECT distributors.* WHERE distributors.name = 'Westward'; used by MySQL. The SQL:2008 standard has introduced the clauses OFFSET ... FETCH {FIRST|NEXT} ... for the same functionality, as shown above - in . This + in . This syntax is also used by IBM DB2. (Applications written for Oracle frequently use a workaround involving the automatically @@ -2011,26 +2052,26 @@ SELECT distributors.* WHERE distributors.name = 'Westward'; - <literal>FOR NO KEY UPDATE</>, <literal>FOR UPDATE</>, <literal>FOR SHARE</>, <literal>FOR KEY SHARE</> + <literal>FOR NO KEY UPDATE</literal>, <literal>FOR UPDATE</literal>, <literal>FOR SHARE</literal>, <literal>FOR KEY SHARE</literal> - Although FOR UPDATE appears in the SQL standard, the - standard allows it only as an option of DECLARE CURSOR. - PostgreSQL allows it in any SELECT - query as well as in sub-SELECTs, but this is an extension. - The FOR NO KEY UPDATE, FOR SHARE and - FOR KEY SHARE variants, as well as the NOWAIT + Although FOR UPDATE appears in the SQL standard, the + standard allows it only as an option of DECLARE CURSOR. + PostgreSQL allows it in any SELECT + query as well as in sub-SELECTs, but this is an extension. + The FOR NO KEY UPDATE, FOR SHARE and + FOR KEY SHARE variants, as well as the NOWAIT and SKIP LOCKED options, do not appear in the standard. - Data-Modifying Statements in <literal>WITH</> + Data-Modifying Statements in <literal>WITH</literal> - PostgreSQL allows INSERT, - UPDATE, and DELETE to be used as WITH + PostgreSQL allows INSERT, + UPDATE, and DELETE to be used as WITH queries. This is not found in the SQL standard. @@ -2044,7 +2085,7 @@ SELECT distributors.* WHERE distributors.name = 'Westward'; - ROWS FROM( ... ) is an extension of the SQL standard. + ROWS FROM( ... ) is an extension of the SQL standard. diff --git a/doc/src/sgml/ref/select_into.sgml b/doc/src/sgml/ref/select_into.sgml index 84b0dd831f..6c1a25f5ed 100644 --- a/doc/src/sgml/ref/select_into.sgml +++ b/doc/src/sgml/ref/select_into.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/select_into.sgml PostgreSQL documentation --> - + SELECT INTO @@ -60,7 +60,7 @@ SELECT [ ALL | DISTINCT [ ON ( expression If specified, the table is created as a temporary table. Refer - to for details. + to for details. @@ -70,13 +70,13 @@ SELECT [ ALL | DISTINCT [ ON ( expression If specified, the table is created as an unlogged table. Refer - to for details. + to for details. - new_table + new_table The name (optionally schema-qualified) of the table to be created. @@ -87,7 +87,7 @@ SELECT [ ALL | DISTINCT [ ON ( expression All other parameters are described in detail under . + linkend="sql-select"/>. @@ -95,7 +95,7 @@ SELECT [ ALL | DISTINCT [ ON ( expressionNotes - is functionally similar to + is functionally similar to SELECT INTO. CREATE TABLE AS is the recommended syntax, since this form of SELECT INTO is not available in ECPG @@ -107,7 +107,7 @@ SELECT [ ALL | DISTINCT [ ON ( expression To add OIDs to the table created by SELECT INTO, - enable the configuration + enable the configuration variable. Alternatively, CREATE TABLE AS can be used with the WITH OIDS clause. @@ -132,8 +132,8 @@ SELECT * INTO films_recent FROM films WHERE date_prod >= '2002-01-01'; The SQL standard uses SELECT INTO to represent selecting values into scalar variables of a host program, rather than creating a new table. This indeed is the usage found - in ECPG (see ) and - PL/pgSQL (see ). + in ECPG (see ) and + PL/pgSQL (see ). The PostgreSQL usage of SELECT INTO to represent table creation is historical. It is best to use CREATE TABLE AS for this purpose in @@ -145,7 +145,7 @@ SELECT * INTO films_recent FROM films WHERE date_prod >= '2002-01-01'; See Also - + diff --git a/doc/src/sgml/ref/set.sgml b/doc/src/sgml/ref/set.sgml index 4ebb6a627b..63f312e812 100644 --- a/doc/src/sgml/ref/set.sgml +++ b/doc/src/sgml/ref/set.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/set.sgml PostgreSQL documentation --> - + SET @@ -21,8 +21,8 @@ PostgreSQL documentation -SET [ SESSION | LOCAL ] configuration_parameter { TO | = } { value | 'value' | DEFAULT } -SET [ SESSION | LOCAL ] TIME ZONE { timezone | LOCAL | DEFAULT } +SET [ SESSION | LOCAL ] configuration_parameter { TO | = } { value | 'value' | DEFAULT } +SET [ SESSION | LOCAL ] TIME ZONE { timezone | LOCAL | DEFAULT } @@ -32,7 +32,7 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone The SET command changes run-time configuration parameters. Many of the run-time parameters listed in - can be changed on-the-fly with + can be changed on-the-fly with SET. (But some require superuser privileges to change, and others cannot be changed after server or session start.) @@ -66,15 +66,15 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone If SET LOCAL is used within a function that has a - SET option for the same variable (see - ), + SET option for the same variable (see + ), the effects of the SET LOCAL command disappear at function exit; that is, the value in effect when the function was called is restored anyway. This allows SET LOCAL to be used for dynamic or repeated changes of a parameter within a function, while still - having the convenience of using the SET option to save and - restore the caller's value. However, a regular SET command - overrides any surrounding function's SET option; its effects + having the convenience of using the SET option to save and + restore the caller's value. However, a regular SET command + overrides any surrounding function's SET option; its effects will persist unless rolled back. @@ -94,22 +94,22 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone - SESSION + SESSION Specifies that the command takes effect for the current session. - (This is the default if neither SESSION nor - LOCAL appears.) + (This is the default if neither SESSION nor + LOCAL appears.) - LOCAL + LOCAL Specifies that the command takes effect for only the current - transaction. After COMMIT or ROLLBACK, + transaction. After COMMIT or ROLLBACK, the session-level setting takes effect again. Issuing this outside of a transaction block emits a warning and otherwise has no effect. @@ -118,17 +118,17 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone - configuration_parameter + configuration_parameter Name of a settable run-time parameter. Available parameters are - documented in and below. + documented in and below. - value + value New value of parameter. Values can be specified as string @@ -136,7 +136,7 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezoneDEFAULT can be written to specify resetting the parameter to its default value (that is, whatever - value it would have had if no SET had been executed + value it would have had if no SET had been executed in the current session). @@ -145,7 +145,7 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone Besides the configuration parameters documented in , there are a few that can only be + linkend="runtime-config"/>, there are a few that can only be adjusted using the SET command or that have a special syntax: @@ -153,8 +153,8 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone SCHEMA - SET SCHEMA 'value' is an alias for - SET search_path TO value. Only one + SET SCHEMA 'value' is an alias for + SET search_path TO value. Only one schema can be specified using this syntax. @@ -163,8 +163,8 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezone NAMES - SET NAMES value is an alias for - SET client_encoding TO value. + SET NAMES value is an alias for + SET client_encoding TO value. @@ -176,7 +176,7 @@ SET [ SESSION | LOCAL ] TIME ZONE { timezonerandom). Allowed values are floating-point numbers between -1 and 1, which are then - multiplied by 231-1. + multiplied by 231-1. @@ -191,8 +191,8 @@ SELECT setseed(value); TIME ZONE - SET TIME ZONE value is an alias - for SET timezone TO value. The + SET TIME ZONE value is an alias + for SET timezone TO value. The syntax SET TIME ZONE allows special syntax for the time zone specification. Here are examples of valid values: @@ -238,7 +238,7 @@ SELECT setseed(value); Set the time zone to your local time zone (that is, the - server's default value of timezone). + server's default value of timezone). @@ -248,12 +248,12 @@ SELECT setseed(value); Timezone settings given as numbers or intervals are internally translated to POSIX timezone syntax. For example, after - SET TIME ZONE -7, SHOW TIME ZONE would - report <-07>+07. + SET TIME ZONE -7, SHOW TIME ZONE would + report <-07>+07. - See for more information + See for more information about time zones. @@ -267,10 +267,10 @@ SELECT setseed(value); The function set_config provides equivalent - functionality; see . + functionality; see . Also, it is possible to UPDATE the pg_settings - system view to perform the equivalent of SET. + system view to perform the equivalent of SET. @@ -286,7 +286,7 @@ SET search_path TO my_schema, public; Set the style of date to traditional - POSTGRES with day before month + POSTGRES with day before month input convention: SET datestyle TO postgres, dmy; @@ -323,8 +323,8 @@ SET TIME ZONE 'Europe/Rome'; See Also - - + + diff --git a/doc/src/sgml/ref/set_constraints.sgml b/doc/src/sgml/ref/set_constraints.sgml index 7c31871b0b..671332afc7 100644 --- a/doc/src/sgml/ref/set_constraints.sgml +++ b/doc/src/sgml/ref/set_constraints.sgml @@ -1,5 +1,5 @@ - + SET CONSTRAINTS @@ -67,18 +67,18 @@ SET CONSTRAINTS { ALL | name [, ... - Currently, only UNIQUE, PRIMARY KEY, - REFERENCES (foreign key), and EXCLUDE + Currently, only UNIQUE, PRIMARY KEY, + REFERENCES (foreign key), and EXCLUDE constraints are affected by this setting. - NOT NULL and CHECK constraints are + NOT NULL and CHECK constraints are always checked immediately when a row is inserted or modified - (not at the end of the statement). + (not at the end of the statement). Uniqueness and exclusion constraints that have not been declared - DEFERRABLE are also checked immediately. + DEFERRABLE are also checked immediately. - The firing of triggers that are declared as constraint triggers + The firing of triggers that are declared as constraint triggers is also controlled by this setting — they fire at the same time that the associated constraint should be checked. @@ -111,7 +111,7 @@ SET CONSTRAINTS { ALL | name [, ... This command complies with the behavior defined in the SQL standard, except for the limitation that, in PostgreSQL, it does not apply to - NOT NULL and CHECK constraints. + NOT NULL and CHECK constraints. Also, PostgreSQL checks non-deferrable uniqueness constraints immediately, not at end of statement as the standard would suggest. diff --git a/doc/src/sgml/ref/set_role.sgml b/doc/src/sgml/ref/set_role.sgml index a97ceabcff..0ef6eb9a9c 100644 --- a/doc/src/sgml/ref/set_role.sgml +++ b/doc/src/sgml/ref/set_role.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/set_role.sgml PostgreSQL documentation --> - + SET ROLE @@ -35,7 +35,7 @@ RESET ROLE identifier of the current SQL session to be role_name. The role name can be written as either an identifier or a string literal. - After SET ROLE, permissions checking for SQL commands + After SET ROLE, permissions checking for SQL commands is carried out as though the named role were the one that had logged in originally. @@ -47,13 +47,13 @@ RESET ROLE - The SESSION and LOCAL modifiers act the same - as for the regular + The SESSION and LOCAL modifiers act the same + as for the regular command. - The NONE and RESET forms reset the current + The NONE and RESET forms reset the current user identifier to be the current session user identifier. These forms can be executed by any user. @@ -64,41 +64,41 @@ RESET ROLE Using this command, it is possible to either add privileges or restrict - one's privileges. If the session user role has the INHERITS + one's privileges. If the session user role has the INHERITS attribute, then it automatically has all the privileges of every role that - it could SET ROLE to; in this case SET ROLE + it could SET ROLE to; in this case SET ROLE effectively drops all the privileges assigned directly to the session user and to the other roles it is a member of, leaving only the privileges available to the named role. On the other hand, if the session user role - has the NOINHERITS attribute, SET ROLE drops the + has the NOINHERITS attribute, SET ROLE drops the privileges assigned directly to the session user and instead acquires the privileges available to the named role. - In particular, when a superuser chooses to SET ROLE to a + In particular, when a superuser chooses to SET ROLE to a non-superuser role, they lose their superuser privileges. - SET ROLE has effects comparable to - , but the privilege + SET ROLE has effects comparable to + , but the privilege checks involved are quite different. Also, - SET SESSION AUTHORIZATION determines which roles are - allowable for later SET ROLE commands, whereas changing - roles with SET ROLE does not change the set of roles - allowed to a later SET ROLE. + SET SESSION AUTHORIZATION determines which roles are + allowable for later SET ROLE commands, whereas changing + roles with SET ROLE does not change the set of roles + allowed to a later SET ROLE. - SET ROLE does not process session variables as specified by - the role's settings; this only happens during + SET ROLE does not process session variables as specified by + the role's settings; this only happens during login. - SET ROLE cannot be used within a - SECURITY DEFINER function. + SET ROLE cannot be used within a + SECURITY DEFINER function. @@ -127,14 +127,14 @@ SELECT SESSION_USER, CURRENT_USER; PostgreSQL - allows identifier syntax ("rolename"), while + allows identifier syntax ("rolename"), while the SQL standard requires the role name to be written as a string literal. SQL does not allow this command during a transaction; PostgreSQL does not make this restriction because there is no reason to. - The SESSION and LOCAL modifiers are a + The SESSION and LOCAL modifiers are a PostgreSQL extension, as is the - RESET syntax. + RESET syntax. @@ -142,7 +142,7 @@ SELECT SESSION_USER, CURRENT_USER; See Also - + diff --git a/doc/src/sgml/ref/set_session_auth.sgml b/doc/src/sgml/ref/set_session_auth.sgml index 96d279aaf9..37b9ff8b18 100644 --- a/doc/src/sgml/ref/set_session_auth.sgml +++ b/doc/src/sgml/ref/set_session_auth.sgml @@ -1,5 +1,5 @@ - + SET SESSION AUTHORIZATION @@ -39,9 +39,9 @@ RESET SESSION AUTHORIZATION The session user identifier is initially set to be the (possibly authenticated) user name provided by the client. The current user identifier is normally equal to the session user identifier, but - might change temporarily in the context of SECURITY DEFINER + might change temporarily in the context of SECURITY DEFINER functions and similar mechanisms; it can also be changed by - . + . The current user identifier is relevant for permission checking. @@ -53,13 +53,13 @@ RESET SESSION AUTHORIZATION - The SESSION and LOCAL modifiers act the same - as for the regular + The SESSION and LOCAL modifiers act the same + as for the regular command. - The DEFAULT and RESET forms reset the session + The DEFAULT and RESET forms reset the session and current user identifiers to be the originally authenticated user name. These forms can be executed by any user. @@ -69,8 +69,8 @@ RESET SESSION AUTHORIZATION Notes - SET SESSION AUTHORIZATION cannot be used within a - SECURITY DEFINER function. + SET SESSION AUTHORIZATION cannot be used within a + SECURITY DEFINER function. @@ -101,13 +101,13 @@ SELECT SESSION_USER, CURRENT_USER; The SQL standard allows some other expressions to appear in place of the literal user_name, but these options are not important in practice. PostgreSQL - allows identifier syntax ("username"), which SQL + allows identifier syntax ("username"), which SQL does not. SQL does not allow this command during a transaction; PostgreSQL does not make this restriction because there is no reason to. - The SESSION and LOCAL modifiers are a + The SESSION and LOCAL modifiers are a PostgreSQL extension, as is the - RESET syntax. + RESET syntax. @@ -120,7 +120,7 @@ SELECT SESSION_USER, CURRENT_USER; See Also - + diff --git a/doc/src/sgml/ref/set_transaction.sgml b/doc/src/sgml/ref/set_transaction.sgml index 188d2ed92e..43b1c6c892 100644 --- a/doc/src/sgml/ref/set_transaction.sgml +++ b/doc/src/sgml/ref/set_transaction.sgml @@ -1,5 +1,5 @@ - + SET TRANSACTION @@ -119,7 +119,7 @@ SET SESSION CHARACTERISTICS AS TRANSACTION transa INSERT, DELETE, UPDATE, FETCH, or COPY) of a transaction has been executed. See - for more information about transaction + for more information about transaction isolation and concurrency control. @@ -153,14 +153,14 @@ SET SESSION CHARACTERISTICS AS TRANSACTION transa The SET TRANSACTION SNAPSHOT command allows a new - transaction to run with the same snapshot as an existing + transaction to run with the same snapshot as an existing transaction. The pre-existing transaction must have exported its snapshot with the pg_export_snapshot function (see ). That function returns a + linkend="functions-snapshot-synchronization"/>). That function returns a snapshot identifier, which must be given to SET TRANSACTION SNAPSHOT to specify which snapshot is to be imported. The identifier must be written as a string literal in this command, for example - '000003A1-1'. + '000003A1-1'. SET TRANSACTION SNAPSHOT can only be executed at the start of a transaction, before the first query or data-modification statement (SELECT, @@ -169,7 +169,7 @@ SET SESSION CHARACTERISTICS AS TRANSACTION transa COPY) of the transaction. Furthermore, the transaction must already be set to SERIALIZABLE or REPEATABLE READ isolation level (otherwise, the snapshot - would be discarded immediately, since READ COMMITTED mode takes + would be discarded immediately, since READ COMMITTED mode takes a new snapshot for each command). If the importing transaction uses SERIALIZABLE isolation level, then the transaction that exported the snapshot must also use that isolation level. Also, a @@ -199,13 +199,13 @@ SET SESSION CHARACTERISTICS AS TRANSACTION transa The session default transaction modes can also be set by setting the - configuration parameters , - , and - . + configuration parameters , + , and + . (In fact SET SESSION CHARACTERISTICS is just a - verbose equivalent for setting these variables with SET.) + verbose equivalent for setting these variables with SET.) This means the defaults can be set in the configuration file, via - ALTER DATABASE, etc. Consult + ALTER DATABASE, etc. Consult for more information. @@ -237,13 +237,13 @@ SET TRANSACTION SNAPSHOT '00000003-0000001B-1'; - + Compatibility These commands are defined in the SQL standard, except for the DEFERRABLE transaction mode - and the SET TRANSACTION SNAPSHOT form, which are + and the SET TRANSACTION SNAPSHOT form, which are PostgreSQL extensions. diff --git a/doc/src/sgml/ref/show.sgml b/doc/src/sgml/ref/show.sgml index 46bb239baf..945b0491b1 100644 --- a/doc/src/sgml/ref/show.sgml +++ b/doc/src/sgml/ref/show.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/show.sgml PostgreSQL documentation --> - + SHOW @@ -21,7 +21,7 @@ PostgreSQL documentation -SHOW name +SHOW name SHOW ALL @@ -35,10 +35,10 @@ SHOW ALL SET statement, by editing the postgresql.conf configuration file, through the PGOPTIONS environmental variable (when using - libpq or a libpq-based + libpq or a libpq-based application), or through command-line flags when starting the postgres server. See for details. + linkend="runtime-config"/> for details. @@ -47,12 +47,12 @@ SHOW ALL - name + name The name of a run-time parameter. Available parameters are - documented in and on the reference page. In + documented in and on the reference page. In addition, there are a few parameters that can be shown but not set: @@ -129,7 +129,7 @@ SHOW ALL The function current_setting produces - equivalent output; see . + equivalent output; see . Also, the pg_settings system view produces the same information. @@ -192,8 +192,8 @@ SHOW ALL; See Also - - + + diff --git a/doc/src/sgml/ref/start_transaction.sgml b/doc/src/sgml/ref/start_transaction.sgml index 60926f5dfe..d6cd1d4177 100644 --- a/doc/src/sgml/ref/start_transaction.sgml +++ b/doc/src/sgml/ref/start_transaction.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/start_transaction.sgml PostgreSQL documentation --> - + START TRANSACTION @@ -37,8 +37,8 @@ START TRANSACTION [ transaction_mode This command begins a new transaction block. If the isolation level, read/write mode, or deferrable mode is specified, the new transaction has those - characteristics, as if was executed. This is the same - as the command. + characteristics, as if was executed. This is the same + as the command. @@ -46,7 +46,7 @@ START TRANSACTION [ transaction_modeParameters - Refer to for information on the meaning + Refer to for information on the meaning of the parameters to this statement. @@ -55,12 +55,12 @@ START TRANSACTION [ transaction_modeCompatibility - In the standard, it is not necessary to issue START TRANSACTION + In the standard, it is not necessary to issue START TRANSACTION to start a transaction block: any SQL command implicitly begins a block. PostgreSQL's behavior can be seen as implicitly issuing a COMMIT after each command that does not - follow START TRANSACTION (or BEGIN), - and it is therefore often called autocommit. + follow START TRANSACTION (or BEGIN), + and it is therefore often called autocommit. Other relational database systems might offer an autocommit feature as a convenience. @@ -79,7 +79,7 @@ START TRANSACTION [ transaction_mode - See also the compatibility section of . + See also the compatibility section of . @@ -87,11 +87,11 @@ START TRANSACTION [ transaction_modeSee Also - - - - - + + + + + diff --git a/doc/src/sgml/ref/truncate.sgml b/doc/src/sgml/ref/truncate.sgml index e9c8a03a63..c1e42376ab 100644 --- a/doc/src/sgml/ref/truncate.sgml +++ b/doc/src/sgml/ref/truncate.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/truncate.sgml PostgreSQL documentation --> - + TRUNCATE @@ -21,7 +21,7 @@ PostgreSQL documentation -TRUNCATE [ TABLE ] [ ONLY ] name [ * ] [, ... ] +TRUNCATE [ TABLE ] [ ONLY ] name [ * ] [, ... ] [ RESTART IDENTITY | CONTINUE IDENTITY ] [ CASCADE | RESTRICT ] @@ -44,13 +44,13 @@ TRUNCATE [ TABLE ] [ ONLY ] name [ - name + name The name (optionally schema-qualified) of a table to truncate. - If ONLY is specified before the table name, only that table - is truncated. If ONLY is not specified, the table and all - its descendant tables (if any) are truncated. Optionally, * + If ONLY is specified before the table name, only that table + is truncated. If ONLY is not specified, the table and all + its descendant tables (if any) are truncated. Optionally, * can be specified after the table name to explicitly indicate that descendant tables are included. @@ -108,29 +108,29 @@ TRUNCATE [ TABLE ] [ ONLY ] name [ - TRUNCATE acquires an ACCESS EXCLUSIVE lock on each + TRUNCATE acquires an ACCESS EXCLUSIVE lock on each table it operates on, which blocks all other concurrent operations - on the table. When RESTART IDENTITY is specified, any + on the table. When RESTART IDENTITY is specified, any sequences that are to be restarted are likewise locked exclusively. If concurrent access to a table is required, then - the DELETE command should be used instead. + the DELETE command should be used instead. - TRUNCATE cannot be used on a table that has foreign-key + TRUNCATE cannot be used on a table that has foreign-key references from other tables, unless all such tables are also truncated in the same command. Checking validity in such cases would require table - scans, and the whole point is not to do one. The CASCADE + scans, and the whole point is not to do one. The CASCADE option can be used to automatically include all dependent tables — but be very careful when using this option, or else you might lose data you did not intend to! - TRUNCATE will not fire any ON DELETE + TRUNCATE will not fire any ON DELETE triggers that might exist for the tables. But it will fire ON TRUNCATE triggers. - If ON TRUNCATE triggers are defined for any of + If ON TRUNCATE triggers are defined for any of the tables, then all BEFORE TRUNCATE triggers are fired before any truncation happens, and all AFTER TRUNCATE triggers are fired after the last truncation is @@ -141,36 +141,36 @@ TRUNCATE [ TABLE ] [ ONLY ] name [ - TRUNCATE is not MVCC-safe. After truncation, the table will + TRUNCATE is not MVCC-safe. After truncation, the table will appear empty to concurrent transactions, if they are using a snapshot taken before the truncation occurred. - See for more details. + See for more details. - TRUNCATE is transaction-safe with respect to the data + TRUNCATE is transaction-safe with respect to the data in the tables: the truncation will be safely rolled back if the surrounding transaction does not commit. - When RESTART IDENTITY is specified, the implied - ALTER SEQUENCE RESTART operations are also done + When RESTART IDENTITY is specified, the implied + ALTER SEQUENCE RESTART operations are also done transactionally; that is, they will be rolled back if the surrounding transaction does not commit. This is unlike the normal behavior of - ALTER SEQUENCE RESTART. Be aware that if any additional + ALTER SEQUENCE RESTART. Be aware that if any additional sequence operations are done on the restarted sequences before the transaction rolls back, the effects of these operations on the sequences - will be rolled back, but not their effects on currval(); - that is, after the transaction currval() will continue to + will be rolled back, but not their effects on currval(); + that is, after the transaction currval() will continue to reflect the last sequence value obtained inside the failed transaction, even though the sequence itself may no longer be consistent with that. - This is similar to the usual behavior of currval() after + This is similar to the usual behavior of currval() after a failed transaction. - TRUNCATE is not currently supported for foreign tables. + TRUNCATE is not currently supported for foreign tables. This implies that if a specified table has any descendant tables that are foreign, the command will fail. @@ -225,7 +225,7 @@ TRUNCATE othertable CASCADE; See Also - + diff --git a/doc/src/sgml/ref/unlisten.sgml b/doc/src/sgml/ref/unlisten.sgml index f7c3c47e2f..687bf485c9 100644 --- a/doc/src/sgml/ref/unlisten.sgml +++ b/doc/src/sgml/ref/unlisten.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/unlisten.sgml PostgreSQL documentation --> - + UNLISTEN @@ -21,7 +21,7 @@ PostgreSQL documentation -UNLISTEN { channel | * } +UNLISTEN { channel | * } @@ -34,13 +34,13 @@ UNLISTEN { channel | * } UNLISTEN cancels any existing registration of the current PostgreSQL session as a listener on the notification channel named channel. The special wildcard + class="parameter">channel. The special wildcard * cancels all listener registrations for the current session. - + contains a more extensive discussion of the use of LISTEN and NOTIFY. @@ -52,7 +52,7 @@ UNLISTEN { channel | * } - channel + channel Name of a notification channel (any identifier). @@ -104,7 +104,7 @@ Asynchronous notification "virtual" received from server process with PID 8448. - Once UNLISTEN has been executed, further NOTIFY + Once UNLISTEN has been executed, further NOTIFY messages will be ignored: @@ -126,8 +126,8 @@ NOTIFY virtual; See Also - - + + diff --git a/doc/src/sgml/ref/update.sgml b/doc/src/sgml/ref/update.sgml index 8a1619fb68..77430a586c 100644 --- a/doc/src/sgml/ref/update.sgml +++ b/doc/src/sgml/ref/update.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/update.sgml PostgreSQL documentation --> - + UPDATE @@ -22,13 +22,13 @@ PostgreSQL documentation [ WITH [ RECURSIVE ] with_query [, ...] ] -UPDATE [ ONLY ] table_name [ * ] [ [ AS ] alias ] - SET { column_name = { expression | DEFAULT } | - ( column_name [, ...] ) = [ ROW ] ( { expression | DEFAULT } [, ...] ) | - ( column_name [, ...] ) = ( sub-SELECT ) +UPDATE [ ONLY ] table_name [ * ] [ [ AS ] alias ] + SET { column_name = { expression | DEFAULT } | + ( column_name [, ...] ) = [ ROW ] ( { expression | DEFAULT } [, ...] ) | + ( column_name [, ...] ) = ( sub-SELECT ) } [, ...] - [ FROM from_list ] - [ WHERE condition | WHERE CURRENT OF cursor_name ] + [ FROM from_list ] + [ WHERE condition | WHERE CURRENT OF cursor_name ] [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ] @@ -52,13 +52,13 @@ UPDATE [ ONLY ] table_name [ * ] [ - The optional RETURNING clause causes UPDATE + The optional RETURNING clause causes UPDATE to compute and return value(s) based on each row actually updated. Any expression using the table's columns, and/or columns of other tables mentioned in FROM, can be computed. The new (post-update) values of the table's columns are used. - The syntax of the RETURNING list is identical to that of the - output list of SELECT. + The syntax of the RETURNING list is identical to that of the + output list of SELECT. @@ -80,22 +80,22 @@ UPDATE [ ONLY ] table_name [ * ] [ The WITH clause allows you to specify one or more - subqueries that can be referenced by name in the UPDATE - query. See and + subqueries that can be referenced by name in the UPDATE + query. See and for details. - table_name + table_name The name (optionally schema-qualified) of the table to update. - If ONLY is specified before the table name, matching rows - are updated in the named table only. If ONLY is not + If ONLY is specified before the table name, matching rows + are updated in the named table only. If ONLY is not specified, matching rows are also updated in any tables inheriting from - the named table. Optionally, * can be specified after the + the named table. Optionally, * can be specified after the table name to explicitly indicate that descendant tables are included. @@ -107,29 +107,29 @@ UPDATE [ ONLY ] table_name [ * ] [ A substitute name for the target table. When an alias is provided, it completely hides the actual name of the table. For - example, given UPDATE foo AS f, the remainder of the + example, given UPDATE foo AS f, the remainder of the UPDATE statement must refer to this table as - f not foo. + f not foo. - column_name + column_name The name of a column in the table named by table_name. + class="parameter">table_name. The column name can be qualified with a subfield name or array subscript, if needed. Do not include the table's name in the specification of a target column — for example, - UPDATE table_name SET table_name.col = 1 is invalid. + UPDATE table_name SET table_name.col = 1 is invalid. - expression + expression An expression to assign to the column. The expression can use the @@ -149,10 +149,10 @@ UPDATE [ ONLY ] table_name [ * ] [ - sub-SELECT + sub-SELECT - A SELECT sub-query that produces as many output columns + A SELECT sub-query that produces as many output columns as are listed in the parenthesized column list preceding it. The sub-query must yield no more than one row when executed. If it yields one row, its column values are assigned to the target columns; @@ -164,64 +164,64 @@ UPDATE [ ONLY ] table_name [ * ] [ - from_list + from_list A list of table expressions, allowing columns from other tables - to appear in the WHERE condition and the update + to appear in the WHERE condition and the update expressions. This is similar to the list of tables that can be specified in the of a SELECT + endterm="sql-from-title"/> of a SELECT statement. Note that the target table must not appear in the - from_list, unless you intend a self-join (in which - case it must appear with an alias in the from_list). + from_list, unless you intend a self-join (in which + case it must appear with an alias in the from_list). - condition + condition An expression that returns a value of type boolean. - Only rows for which this expression returns true + Only rows for which this expression returns true will be updated. - cursor_name + cursor_name - The name of the cursor to use in a WHERE CURRENT OF + The name of the cursor to use in a WHERE CURRENT OF condition. The row to be updated is the one most recently fetched from this cursor. The cursor must be a non-grouping - query on the UPDATE's target table. - Note that WHERE CURRENT OF cannot be + query on the UPDATE's target table. + Note that WHERE CURRENT OF cannot be specified together with a Boolean condition. See - + for more information about using cursors with - WHERE CURRENT OF. + WHERE CURRENT OF. - output_expression + output_expression - An expression to be computed and returned by the UPDATE + An expression to be computed and returned by the UPDATE command after each row is updated. The expression can use any - column names of the table named by table_name - or table(s) listed in FROM. - Write * to return all columns. + column names of the table named by table_name + or table(s) listed in FROM. + Write * to return all columns. - output_name + output_name A name to use for a returned column. @@ -235,7 +235,7 @@ UPDATE [ ONLY ] table_name [ * ] [ Outputs - On successful completion, an UPDATE command returns a command + On successful completion, an UPDATE command returns a command tag of the form UPDATE count @@ -244,16 +244,16 @@ UPDATE count of rows updated, including matched rows whose values did not change. Note that the number may be less than the number of rows that matched the condition when - updates were suppressed by a BEFORE UPDATE trigger. If + updates were suppressed by a BEFORE UPDATE trigger. If count is 0, no rows were updated by the query (this is not considered an error). - If the UPDATE command contains a RETURNING - clause, the result will be similar to that of a SELECT + If the UPDATE command contains a RETURNING + clause, the result will be similar to that of a SELECT statement containing the columns and values defined in the - RETURNING list, computed over the row(s) updated by the + RETURNING list, computed over the row(s) updated by the command. @@ -262,11 +262,11 @@ UPDATE count Notes - When a FROM clause is present, what essentially happens + When a FROM clause is present, what essentially happens is that the target table is joined to the tables mentioned in the from_list, and each output row of the join represents an update operation for the target table. When using - FROM you should ensure that the join + FROM you should ensure that the join produces at most one output row for each row to be modified. In other words, a target row shouldn't join to more than one row from the other table(s). If it does, then only one of the join rows @@ -282,10 +282,18 @@ UPDATE count In the case of a partitioned table, updating a row might cause it to no - longer satisfy the partition constraint. Since there is no provision to - move the row to the partition appropriate to the new value of its - partitioning key, an error will occur in this case. This can also happen - when updating a partition directly. + longer satisfy the partition constraint of the containing partition. In that + case, if there is some other partition in the partition tree for which this + row satisfies its partition constraint, then the row is moved to that + partition. If there is no such partition, an error will occur. Behind the + scenes, the row movement is actually a DELETE and + INSERT operation. However, there is a possibility that a + concurrent UPDATE or DELETE on the + same row may miss this row. For details see the section + . + Currently, rows cannot be moved from a partition that is a + foreign table to some other partition, but they can be moved into a foreign + table if the foreign data wrapper supports it. @@ -293,8 +301,8 @@ UPDATE count Examples - Change the word Drama to Dramatic in the - column kind of the table films: + Change the word Drama to Dramatic in the + column kind of the table films: UPDATE films SET kind = 'Dramatic' WHERE kind = 'Drama'; @@ -364,10 +372,10 @@ UPDATE accounts SET contact_first_name = first_name, FROM salesmen WHERE salesmen.id = accounts.sales_id; However, the second query may give unexpected results - if salesmen.id is not a unique key, whereas + if salesmen.id is not a unique key, whereas the first query is guaranteed to raise an error if there are multiple - id matches. Also, if there is no match for a particular - accounts.sales_id entry, the first query + id matches. Also, if there is no match for a particular + accounts.sales_id entry, the first query will set the corresponding name fields to NULL, whereas the second query will not update that row at all. @@ -400,9 +408,9 @@ COMMIT; - Change the kind column of the table + Change the kind column of the table films in the row on which the cursor - c_films is currently positioned: + c_films is currently positioned: UPDATE films SET kind = 'Dramatic' WHERE CURRENT OF c_films; @@ -413,16 +421,16 @@ UPDATE films SET kind = 'Dramatic' WHERE CURRENT OF c_films; This command conforms to the SQL standard, except - that the FROM and RETURNING clauses + that the FROM and RETURNING clauses are PostgreSQL extensions, as is the ability - to use WITH with UPDATE. + to use WITH with UPDATE. - Some other database systems offer a FROM option in which - the target table is supposed to be listed again within FROM. + Some other database systems offer a FROM option in which + the target table is supposed to be listed again within FROM. That is not how PostgreSQL interprets - FROM. Be careful when porting applications that use this + FROM. Be careful when porting applications that use this extension. @@ -431,9 +439,9 @@ UPDATE films SET kind = 'Dramatic' WHERE CURRENT OF c_films; target column names can be any row-valued expression yielding the correct number of columns. PostgreSQL only allows the source value to be a row - constructor or a sub-SELECT. An individual column's - updated value can be specified as DEFAULT in the - row-constructor case, but not inside a sub-SELECT. + constructor or a sub-SELECT. An individual column's + updated value can be specified as DEFAULT in the + row-constructor case, but not inside a sub-SELECT. diff --git a/doc/src/sgml/ref/vacuum.sgml b/doc/src/sgml/ref/vacuum.sgml index 421c18d117..fd911f5776 100644 --- a/doc/src/sgml/ref/vacuum.sgml +++ b/doc/src/sgml/ref/vacuum.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/vacuum.sgml PostgreSQL documentation --> - + VACUUM @@ -21,9 +21,21 @@ PostgreSQL documentation -VACUUM [ ( { FULL | FREEZE | VERBOSE | ANALYZE | DISABLE_PAGE_SKIPPING } [, ...] ) ] [ table_name [ (column_name [, ...] ) ] ] -VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ table_name ] -VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ table_name [ (column_name [, ...] ) ] ] +VACUUM [ ( option [, ...] ) ] [ table_and_columns [, ...] ] +VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ ANALYZE ] [ table_and_columns [, ...] ] + +where option can be one of: + + FULL + FREEZE + VERBOSE + ANALYZE + DISABLE_PAGE_SKIPPING + SKIP_LOCKED + +and table_and_columns is: + + table_name [ ( column_name [, ...] ) ] @@ -40,21 +52,22 @@ VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ - With no parameter, VACUUM processes every table in the - current database that the current user has permission to vacuum. - With a parameter, VACUUM processes only that table. + Without a table_and_columns + list, VACUUM processes every table and materialized view + in the current database that the current user has permission to vacuum. + With a list, VACUUM processes only those table(s). VACUUM ANALYZE performs a VACUUM and then an ANALYZE for each selected table. This is a handy combination form for routine maintenance scripts. See - + for more details about its processing. - Plain VACUUM (without FULL) simply reclaims + Plain VACUUM (without FULL) simply reclaims space and makes it available for re-use. This form of the command can operate in parallel with normal reading and writing of the table, as an exclusive lock @@ -101,10 +114,10 @@ VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ Selects aggressive freezing of tuples. Specifying FREEZE is equivalent to performing VACUUM with the - and - parameters + and + parameters set to zero. Aggressive freezing is always performed when the - table is rewritten, so this option is redundant when FULL + table is rewritten, so this option is redundant when FULL is specified. @@ -133,39 +146,59 @@ VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ DISABLE_PAGE_SKIPPING - Normally, VACUUM will skip pages based on the visibility map. Pages where + Normally, VACUUM will skip pages based on the visibility map. Pages where all tuples are known to be frozen can always be skipped, and those where all tuples are known to be visible to all transactions may be skipped except when performing an aggressive vacuum. Furthermore, except when performing an aggressive vacuum, some pages may be skipped in order to avoid waiting for other sessions to finish using them. This option disables all page-skipping behavior, and is intended to - be used only the contents of the visibility map are thought to - be suspect, which should happen only if there is a hardware or software + be used only when the contents of the visibility map are + suspect, which should happen only if there is a hardware or software issue causing database corruption. - table_name + SKIP_LOCKED + + + Specifies that VACUUM should not wait for any + conflicting locks to be released when beginning work on a relation: + if a relation cannot be locked immediately without waiting, the relation + is skipped. Note that even with this option, + VACUUM may still block when opening the relation's + indexes. Additionally, VACUUM ANALYZE may still + block when acquiring sample rows from partitions, table inheritance + children, and some types of foreign tables. Also, while + VACUUM ordinarily processes all partitions of + specified partitioned tables, this option will cause + VACUUM to skip all partitions if there is a + conflicting lock on the partitioned table. + + + + + + table_name - The name (optionally schema-qualified) of a specific table to - vacuum. If omitted, all regular tables and materialized views in the - current database are vacuumed. If the specified table is a partitioned + The name (optionally schema-qualified) of a specific table or + materialized view to vacuum. If the specified table is a partitioned table, all of its leaf partitions are vacuumed. - column_name + column_name The name of a specific column to analyze. Defaults to all columns. - If a column list is specified, ANALYZE is implied. + If a column list is specified, ANALYZE must also be + specified. @@ -176,7 +209,7 @@ VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ Outputs - When VERBOSE is specified, VACUUM emits + When VERBOSE is specified, VACUUM emits progress messages to indicate which table is currently being processed. Various statistics about the tables are printed as well. @@ -190,20 +223,20 @@ VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ superuser. However, database owners are allowed to vacuum all tables in their databases, except shared catalogs. (The restriction for shared catalogs means that a true database-wide - VACUUM can only be performed by a superuser.) - VACUUM will skip over any tables that the calling user + VACUUM can only be performed by a superuser.) + VACUUM will skip over any tables that the calling user does not have permission to vacuum. - VACUUM cannot be executed inside a transaction block. + VACUUM cannot be executed inside a transaction block. - For tables with GIN indexes, VACUUM (in + For tables with GIN indexes, VACUUM (in any form) also completes any pending index insertions, by moving pending - index entries to the appropriate places in the main GIN index - structure. See for details. + index entries to the appropriate places in the main GIN index + structure. See for details. @@ -231,14 +264,14 @@ VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ VACUUM causes a substantial increase in I/O traffic, which might cause poor performance for other active sessions. Therefore, it is sometimes advisable to use the cost-based vacuum delay feature. - See for details. + See for details. - PostgreSQL includes an autovacuum + PostgreSQL includes an autovacuum facility which can automate routine vacuum maintenance. For more information about automatic and manual vacuuming, see - . + . @@ -266,9 +299,9 @@ VACUUM (VERBOSE, ANALYZE) onek; See Also - - - + + + diff --git a/doc/src/sgml/ref/vacuumdb.sgml b/doc/src/sgml/ref/vacuumdb.sgml index 4f6fa0d708..955a17a849 100644 --- a/doc/src/sgml/ref/vacuumdb.sgml +++ b/doc/src/sgml/ref/vacuumdb.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/vacuumdb.sgml PostgreSQL documentation --> - + vacuumdb @@ -62,7 +62,7 @@ PostgreSQL documentation vacuumdb is a wrapper around the SQL - command . + command . There is no effective difference between vacuuming and analyzing databases via this utility and via other methods for accessing the server. @@ -88,8 +88,8 @@ PostgreSQL documentation - - + + Specifies the name of the database to be cleaned or analyzed. @@ -103,8 +103,8 @@ PostgreSQL documentation - - + + Echo the commands that vacuumdb generates @@ -146,7 +146,7 @@ PostgreSQL documentation vacuumdb will open njobs connections to the - database, so make sure your + database, so make sure your setting is high enough to accommodate all connections. @@ -158,8 +158,8 @@ PostgreSQL documentation - - + + Do not display progress messages. @@ -176,7 +176,7 @@ PostgreSQL documentation Column names can be specified only in conjunction with the or options. Multiple tables can be vacuumed by writing multiple - switches. @@ -198,8 +198,8 @@ PostgreSQL documentation - - + + Print the vacuumdb version and exit. @@ -248,8 +248,8 @@ PostgreSQL documentation - - + + Show help about vacuumdb command line @@ -266,8 +266,8 @@ PostgreSQL documentation the following command-line arguments for connection parameters: - - + + Specifies the host name of the machine on which the server @@ -278,8 +278,8 @@ PostgreSQL documentation - - + + Specifies the TCP port or local Unix domain socket file @@ -290,8 +290,8 @@ PostgreSQL documentation - - + + User name to connect as. @@ -300,8 +300,8 @@ PostgreSQL documentation - - + + Never issue a password prompt. If the server requires @@ -315,8 +315,8 @@ PostgreSQL documentation - - + + Force vacuumdb to prompt for a @@ -329,14 +329,14 @@ PostgreSQL documentation for a password if the server demands password authentication. However, vacuumdb will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. - + Specifies the name of the database to connect to discover what other @@ -370,9 +370,9 @@ PostgreSQL documentation - This utility, like most other PostgreSQL utilities, - also uses the environment variables supported by libpq - (see ). + This utility, like most other PostgreSQL utilities, + also uses the environment variables supported by libpq + (see ). @@ -382,8 +382,8 @@ PostgreSQL documentation Diagnostics - In case of difficulty, see - and for + In case of difficulty, see + and for discussions of potential problems and error messages. The database server must be running at the targeted host. Also, any default connection settings and environment @@ -401,8 +401,8 @@ PostgreSQL documentation vacuumdb might need to connect several times to the PostgreSQL server, asking for a password each time. It is convenient to have a - ~/.pgpass file in such cases. See for more information. + ~/.pgpass file in such cases. See for more information. @@ -439,7 +439,7 @@ PostgreSQL documentation See Also - + diff --git a/doc/src/sgml/ref/values.sgml b/doc/src/sgml/ref/values.sgml index 9b0d8fa4a1..849220b120 100644 --- a/doc/src/sgml/ref/values.sgml +++ b/doc/src/sgml/ref/values.sgml @@ -3,7 +3,7 @@ doc/src/sgml/ref/values.sgml PostgreSQL documentation --> - + VALUES @@ -21,7 +21,7 @@ PostgreSQL documentation -VALUES ( expression [, ...] ) [, ...] +VALUES ( expression [, ...] ) [, ...] [ ORDER BY sort_expression [ ASC | DESC | USING operator ] [, ...] ] [ LIMIT { count | ALL } ] [ OFFSET start [ ROW | ROWS ] ] @@ -35,7 +35,7 @@ VALUES ( expression [, ...] ) [, .. VALUES computes a row value or set of row values specified by value expressions. It is most commonly used to generate - a constant table within a larger command, but it can be + a constant table within a larger command, but it can be used on its own. @@ -43,18 +43,18 @@ VALUES ( expression [, ...] ) [, .. When more than one row is specified, all the rows must have the same number of elements. The data types of the resulting table's columns are determined by combining the explicit or inferred types of the expressions - appearing in that column, using the same rules as for UNION - (see ). + appearing in that column, using the same rules as for UNION + (see ). - Within larger commands, VALUES is syntactically allowed - anywhere that SELECT is. Because it is treated like a - SELECT by the grammar, it is possible to use - the ORDER BY, LIMIT (or + Within larger commands, VALUES is syntactically allowed + anywhere that SELECT is. Because it is treated like a + SELECT by the grammar, it is possible to use + the ORDER BY, LIMIT (or equivalently FETCH FIRST), - and OFFSET clauses with a - VALUES command. + and OFFSET clauses with a + VALUES command. @@ -63,16 +63,16 @@ VALUES ( expression [, ...] ) [, .. - expression + expression A constant or expression to compute and insert at the indicated place - in the resulting table (set of rows). In a VALUES list - appearing at the top level of an INSERT, an - expression can be replaced + in the resulting table (set of rows). In a VALUES list + appearing at the top level of an INSERT, an + expression can be replaced by DEFAULT to indicate that the destination column's default value should be inserted. DEFAULT cannot - be used when VALUES appears in other contexts. + be used when VALUES appears in other contexts. @@ -83,9 +83,9 @@ VALUES ( expression [, ...] ) [, .. An expression or integer constant indicating how to sort the result rows. This expression can refer to the columns of the - VALUES result as column1, column2, + VALUES result as column1, column2, etc. For more details see - . + . @@ -95,7 +95,7 @@ VALUES ( expression [, ...] ) [, .. A sorting operator. For details see - . + . @@ -105,7 +105,7 @@ VALUES ( expression [, ...] ) [, .. The maximum number of rows to return. For details see - . + . @@ -116,7 +116,7 @@ VALUES ( expression [, ...] ) [, .. The number of rows to skip before starting to return rows. For details see - . + . @@ -127,11 +127,11 @@ VALUES ( expression [, ...] ) [, .. Notes - VALUES lists with very large numbers of rows should be avoided, + VALUES lists with very large numbers of rows should be avoided, as you might encounter out-of-memory failures or poor performance. - VALUES appearing within INSERT is a special case - (because the desired column types are known from the INSERT's - target table, and need not be inferred by scanning the VALUES + VALUES appearing within INSERT is a special case + (because the desired column types are known from the INSERT's + target table, and need not be inferred by scanning the VALUES list), so it can handle larger lists than are practical in other contexts. @@ -140,7 +140,7 @@ VALUES ( expression [, ...] ) [, .. Examples - A bare VALUES command: + A bare VALUES command: VALUES (1, 'one'), (2, 'two'), (3, 'three'); @@ -160,8 +160,8 @@ SELECT 3, 'three'; - More usually, VALUES is used within a larger SQL command. - The most common use is in INSERT: + More usually, VALUES is used within a larger SQL command. + The most common use is in INSERT: INSERT INTO films (code, title, did, date_prod, kind) @@ -170,7 +170,7 @@ INSERT INTO films (code, title, did, date_prod, kind) - In the context of INSERT, entries of a VALUES list + In the context of INSERT, entries of a VALUES list can be DEFAULT to indicate that the column default should be used here instead of specifying a value: @@ -182,8 +182,8 @@ INSERT INTO films VALUES - VALUES can also be used where a sub-SELECT might - be written, for example in a FROM clause: + VALUES can also be used where a sub-SELECT might + be written, for example in a FROM clause: SELECT f.* @@ -195,17 +195,17 @@ UPDATE employees SET salary = salary * v.increase WHERE employees.depno = v.depno AND employees.sales >= v.target; - Note that an AS clause is required when VALUES - is used in a FROM clause, just as is true for - SELECT. It is not required that the AS clause + Note that an AS clause is required when VALUES + is used in a FROM clause, just as is true for + SELECT. It is not required that the AS clause specify names for all the columns, but it's good practice to do so. - (The default column names for VALUES are column1, - column2, etc in PostgreSQL, but + (The default column names for VALUES are column1, + column2, etc in PostgreSQL, but these names might be different in other database systems.) - When VALUES is used in INSERT, the values are all + When VALUES is used in INSERT, the values are all automatically coerced to the data type of the corresponding destination column. When it's used in other contexts, it might be necessary to specify the correct data type. If the entries are all quoted literal constants, @@ -218,9 +218,9 @@ WHERE ip_address IN (VALUES('192.168.0.1'::inet), ('192.168.0.10'), ('192.168.1. - For simple IN tests, it's better to rely on the + For simple IN tests, it's better to rely on the list-of-scalars - form of IN than to write a VALUES + form of IN than to write a VALUES query as shown above. The list of scalars method requires less writing and is often more efficient. @@ -233,7 +233,7 @@ WHERE ip_address IN (VALUES('192.168.0.1'::inet), ('192.168.0.10'), ('192.168.1. VALUES conforms to the SQL standard. LIMIT and OFFSET are PostgreSQL extensions; see also - under . + under . @@ -241,8 +241,8 @@ WHERE ip_address IN (VALUES('192.168.0.1'::inet), ('192.168.0.10'), ('192.168.1. See Also - - + + diff --git a/doc/src/sgml/reference.sgml b/doc/src/sgml/reference.sgml index 9000b3aaaa..db4f4167e3 100644 --- a/doc/src/sgml/reference.sgml +++ b/doc/src/sgml/reference.sgml @@ -54,8 +54,10 @@ &alterOperatorClass; &alterOperatorFamily; &alterPolicy; + &alterProcedure; &alterPublication; &alterRole; + &alterRoutine; &alterRule; &alterSchema; &alterSequence; @@ -76,6 +78,7 @@ &alterView; &analyze; &begin; + &call; &checkpoint; &close; &cluster; @@ -103,6 +106,7 @@ &createOperatorClass; &createOperatorFamily; &createPolicy; + &createProcedure; &createPublication; &createRole; &createRule; @@ -150,8 +154,10 @@ &dropOperatorFamily; &dropOwned; &dropPolicy; + &dropProcedure; &dropPublication; &dropRole; + &dropRoutine; &dropRule; &dropSchema; &dropSequence; @@ -264,7 +270,7 @@ PostgreSQL server applications and support utilities. These commands can only be run usefully on the host where the database server resides. Other utility programs - are listed in . + are listed in . @@ -277,6 +283,7 @@ &pgtestfsync; &pgtesttiming; &pgupgrade; + &pgVerifyChecksums; &pgwaldump; &postgres; &postmaster; diff --git a/doc/src/sgml/regress.sgml b/doc/src/sgml/regress.sgml index 7c2b1029c2..673a8c2164 100644 --- a/doc/src/sgml/regress.sgml +++ b/doc/src/sgml/regress.sgml @@ -52,8 +52,8 @@ make check or otherwise a note about which tests failed. See below before assuming that a - failure represents a serious problem. + linkend="regress-evaluation"/> below before assuming that a + failure represents a serious problem. @@ -66,12 +66,12 @@ make check If you have configured PostgreSQL to install into a location where an older PostgreSQL - installation already exists, and you perform make check + installation already exists, and you perform make check before installing the new version, you might find that the tests fail because the new programs try to use the already-installed shared libraries. (Typical symptoms are complaints about undefined symbols.) If you wish to run the tests before overwriting the old installation, - you'll need to build with configure --disable-rpath. + you'll need to build with configure --disable-rpath. It is not recommended that you use this option for the final installation, however. @@ -80,12 +80,12 @@ make check The parallel regression test starts quite a few processes under your user ID. Presently, the maximum concurrency is twenty parallel test scripts, which means forty processes: there's a server process and a - psql process for each test script. + psql process for each test script. So if your system enforces a per-user limit on the number of processes, make sure this limit is at least fifty or so, else you might get random-seeming failures in the parallel test. If you are not in a position to raise the limit, you can cut down the degree of parallelism - by setting the MAX_CONNECTIONS parameter. For example: + by setting the MAX_CONNECTIONS parameter. For example: make MAX_CONNECTIONS=10 check @@ -97,9 +97,9 @@ make MAX_CONNECTIONS=10 check Running the Tests Against an Existing Installation - To run the tests after installation (see ), + To run the tests after installation (see ), initialize a data area and start the - server as explained in , then type: + server as explained in , then type: make installcheck @@ -110,14 +110,14 @@ make installcheck-parallel The tests will expect to contact the server at the local host and the default port number, unless directed otherwise by PGHOST and PGPORT environment variables. The tests will be run in a - database named regression; any existing database by this name + database named regression; any existing database by this name will be dropped. The tests will also transiently create some cluster-wide objects, such as roles and tablespaces. These objects will have names beginning with - regress_. Beware of using installcheck + regress_. Beware of using installcheck mode in installations that have any actual users or tablespaces named that way. @@ -127,9 +127,9 @@ make installcheck-parallel Additional Test Suites - The make check and make installcheck commands - run only the core regression tests, which test built-in - functionality of the PostgreSQL server. The source + The make check and make installcheck commands + run only the core regression tests, which test built-in + functionality of the PostgreSQL server. The source distribution also contains additional test suites, most of them having to do with add-on functionality such as optional procedural languages. @@ -144,18 +144,18 @@ make installcheck-world These commands run the tests using temporary servers or an already-installed server, respectively, just as previously explained - for make check and make installcheck. Other + for make check and make installcheck. Other considerations are the same as previously explained for each method. - Note that make check-world builds a separate temporary + Note that make check-world builds a separate temporary installation tree for each tested module, so it requires a great deal - more time and disk space than make installcheck-world. + more time and disk space than make installcheck-world. Alternatively, you can run individual test suites by typing - make check or make installcheck in the appropriate + make check or make installcheck in the appropriate subdirectory of the build tree. Keep in mind that make - installcheck assumes you've installed the relevant module(s), not + installcheck assumes you've installed the relevant module(s), not only the core server. @@ -167,43 +167,43 @@ make installcheck-world Regression tests for optional procedural languages (other than - PL/pgSQL, which is tested by the core tests). - These are located under src/pl. + PL/pgSQL, which is tested by the core tests). + These are located under src/pl. - Regression tests for contrib modules, - located under contrib. - Not all contrib modules have tests. + Regression tests for contrib modules, + located under contrib. + Not all contrib modules have tests. Regression tests for the ECPG interface library, - located in src/interfaces/ecpg/test. + located in src/interfaces/ecpg/test. Tests stressing behavior of concurrent sessions, - located in src/test/isolation. + located in src/test/isolation. Tests of client programs under src/bin. See - also . + also . - When using installcheck mode, these tests will destroy any - existing databases named pl_regression, - contrib_regression, isolation_regression, - ecpg1_regression, or ecpg2_regression, as well as - regression. + When using installcheck mode, these tests will destroy any + existing databases named pl_regression, + contrib_regression, isolation_regression, + ecpg1_regression, or ecpg2_regression, as well as + regression. @@ -211,6 +211,54 @@ make installcheck-world option . This is recommended for development, but can be omitted if there is no suitable Perl installation. + + + Some test suites are not run by default, either because they are not secure + to run on a multiuser system or because they require special software. You + can decide which test suites to run additionally by setting the + make or environment variable + PG_TEST_EXTRA to a whitespace-separated list, for + example: + +make check-world PG_TEST_EXTRA='kerberos ldap ssl' + + The following values are currently supported: + + + kerberos + + + Runs the test suite under src/test/kerberos. This + requires an MIT Kerberos installation and opens TCP/IP listen sockets. + + + + + + ldap + + + Runs the test suite under src/test/ldap. This + requires an OpenLDAP installation and opens + TCP/IP listen sockets. + + + + + + ssl + + + Runs the test suite under src/test/ssl. This opens TCP/IP listen sockets. + + + + + + Tests for features that are not supported by the current build + configuration are not run even if they are mentioned in + PG_TEST_EXTRA. + @@ -272,7 +320,7 @@ make check EXTRA_TESTS=numeric_big make check EXTRA_TESTS='collate.icu.utf8 collate.linux.utf8' LANG=en_US.utf8 - The collate.linux.utf8 test works only on Linux/glibc + The collate.linux.utf8 test works only on Linux/glibc platforms. The collate.icu.utf8 test only works when support for ICU was built. Both tests will only succeed when run in a database that uses UTF-8 encoding. @@ -294,7 +342,7 @@ make check EXTRA_TESTS='collate.icu.utf8 collate.linux.utf8' LANG=en_US.utf8 To run the Hot Standby tests, first create a database - called regression on the primary: + called regression on the primary: psql -h primary -c "CREATE DATABASE regression" @@ -311,7 +359,7 @@ psql -h primary -f src/test/regress/sql/hs_primary_setup.sql regression Now arrange for the default database connection to be to the standby server under test (for example, by setting the PGHOST and PGPORT environment variables). - Finally, run make standbycheck in the regression directory: + Finally, run make standbycheck in the regression directory: cd src/test/regress make standbycheck @@ -355,7 +403,7 @@ make standbycheck src/test/regress/regression.diffs. (When running a test suite other than the core tests, these files of course appear in the relevant subdirectory, - not src/test/regress.) + not src/test/regress.) @@ -367,11 +415,11 @@ make standbycheck - If for some reason a particular platform generates a failure + If for some reason a particular platform generates a failure for a given test, but inspection of the output convinces you that the result is valid, you can add a new comparison file to silence the failure report in future test runs. See - for details. + for details. @@ -457,8 +505,8 @@ make check NO_LOCALE=1 Some of the tests involve computing 64-bit floating-point numbers (double precision) from table columns. Differences in results involving mathematical functions of double - precision columns have been observed. The float8 and - geometry tests are particularly prone to small differences + precision columns have been observed. The float8 and + geometry tests are particularly prone to small differences across platforms, or even with different compiler optimization settings. Human eyeball comparison is needed to determine the real significance of these differences which are usually 10 places to @@ -466,8 +514,8 @@ make check NO_LOCALE=1 - Some systems display minus zero as -0, while others - just show 0. + Some systems display minus zero as -0, while others + just show 0. @@ -485,23 +533,23 @@ make check NO_LOCALE=1 You might see differences in which the same rows are output in a different order than what appears in the expected file. In most cases this is not, strictly speaking, a bug. Most of the regression test -scripts are not so pedantic as to use an ORDER BY for every single -SELECT, and so their result row orderings are not well-defined +scripts are not so pedantic as to use an ORDER BY for every single +SELECT, and so their result row orderings are not well-defined according to the SQL specification. In practice, since we are looking at the same queries being executed on the same data by the same software, we usually get the same result ordering on all platforms, -so the lack of ORDER BY is not a problem. Some queries do exhibit +so the lack of ORDER BY is not a problem. Some queries do exhibit cross-platform ordering differences, however. When testing against an already-installed server, ordering differences can also be caused by non-C locale settings or non-default parameter settings, such as custom values -of work_mem or the planner cost parameters. +of work_mem or the planner cost parameters. Therefore, if you see an ordering difference, it's not something to -worry about, unless the query does have an ORDER BY that your +worry about, unless the query does have an ORDER BY that your result is violating. However, please report it anyway, so that we can add an -ORDER BY to that particular query to eliminate the bogus +ORDER BY to that particular query to eliminate the bogus failure in future releases. @@ -519,18 +567,18 @@ exclusion of those that don't. If the errors test results in a server crash - at the select infinite_recurse() command, it means that + at the select infinite_recurse() command, it means that the platform's limit on process stack size is smaller than the - parameter indicates. This + parameter indicates. This can be fixed by running the server under a higher stack size limit (4MB is recommended with the default value of - max_stack_depth). If you are unable to do that, an - alternative is to reduce the value of max_stack_depth. + max_stack_depth). If you are unable to do that, an + alternative is to reduce the value of max_stack_depth. - On platforms supporting getrlimit(), the server should - automatically choose a safe value of max_stack_depth; + On platforms supporting getrlimit(), the server should + automatically choose a safe value of max_stack_depth; so unless you've manually overridden this setting, a failure of this kind is a reportable bug. @@ -559,7 +607,7 @@ diff results/random.out expected/random.out parameter settings could cause the tests to fail. For example, changing parameters such as enable_seqscan or enable_indexscan could cause plan changes that would - affect the results of tests that use EXPLAIN. + affect the results of tests that use EXPLAIN. @@ -570,7 +618,7 @@ diff results/random.out expected/random.out Since some of the tests inherently produce environment-dependent - results, we have provided ways to specify alternate expected + results, we have provided ways to specify alternate expected result files. Each regression test can have several comparison files showing possible results on different platforms. There are two independent mechanisms for determining which comparison file is used @@ -597,7 +645,7 @@ testname:output:platformpattern=comparisonfilename standard regression tests, this is always out. The value corresponds to the file extension of the output file. The platform pattern is a pattern in the style of the Unix - tool expr (that is, a regular expression with an implicit + tool expr (that is, a regular expression with an implicit ^ anchor at the start). It is matched against the platform name as printed by config.guess. The comparison file name is the base name of the substitute result @@ -607,7 +655,7 @@ testname:output:platformpattern=comparisonfilename For example: some systems interpret very small floating-point values as zero, rather than reporting an underflow error. This causes a - few differences in the float8 regression test. + few differences in the float8 regression test. Therefore, we provide a variant comparison file, float8-small-is-zero.out, which includes the results to be expected on these systems. To silence the bogus @@ -619,30 +667,30 @@ float8:out:i.86-.*-openbsd=float8-small-is-zero.out which will trigger on any machine where the output of config.guess matches i.86-.*-openbsd. Other lines - in resultmap select the variant comparison file for other + in resultmap select the variant comparison file for other platforms where it's appropriate. The second selection mechanism for variant comparison files is - much more automatic: it simply uses the best match among + much more automatic: it simply uses the best match among several supplied comparison files. The regression test driver script considers both the standard comparison file for a test, - testname.out, and variant files named - testname_digit.out - (where the digit is any single digit - 0-9). If any such file is an exact match, + testname.out, and variant files named + testname_digit.out + (where the digit is any single digit + 0-9). If any such file is an exact match, the test is considered to pass; otherwise, the one that generates the shortest diff is used to create the failure report. (If resultmap includes an entry for the particular - test, then the base testname is the substitute + test, then the base testname is the substitute name given in resultmap.) For example, for the char test, the comparison file char.out contains results that are expected - in the C and POSIX locales, while + in the C and POSIX locales, while the file char_1.out contains results sorted as they appear in many other locales. @@ -652,7 +700,7 @@ float8:out:i.86-.*-openbsd=float8-small-is-zero.out results, but it can be used in any situation where the test results cannot be predicted easily from the platform name alone. A limitation of this mechanism is that the test driver cannot tell which variant is - actually correct for the current environment; it will just pick + actually correct for the current environment; it will just pick the variant that seems to work best. Therefore it is safest to use this mechanism only for variant results that you are willing to consider equally valid in all contexts. @@ -668,13 +716,24 @@ float8:out:i.86-.*-openbsd=float8-small-is-zero.out under src/bin, use the Perl TAP tools and are run using the Perl testing program prove. You can pass command-line options to prove by setting - the make variable PROVE_FLAGS, for example: + the make variable PROVE_FLAGS, for example: make -C src/bin check PROVE_FLAGS='--timer' See the manual page of prove for more information. + + The make variable PROVE_TESTS + can be used to define a whitespace-separated list of paths relative + to the Makefile invoking prove + to run the specified subset of tests instead of the default + t/*.pl. For example: + +make check PROVE_TESTS='t/001_test1.pl t/003_test3.pl' + + + The TAP tests require the Perl module IPC::Run. This module is available from CPAN or an operating system package. @@ -706,6 +765,19 @@ make coverage-html The make commands also work in subdirectories. + + If you don't have lcov or prefer text output over an + HTML report, you can also run + +make coverage + + instead of make coverage-html, which will + produce .gcov output files for each source file + relevant to the test. (make coverage and make + coverage-html will overwrite each other's files, so mixing them + might be confusing.) + + To reset the execution counts between test runs, run: diff --git a/doc/src/sgml/release-10.sgml b/doc/src/sgml/release-10.sgml index 269f1aac86..0c02ecbc4e 100644 --- a/doc/src/sgml/release-10.sgml +++ b/doc/src/sgml/release-10.sgml @@ -1,26 +1,5837 @@ + + Release 10.6 + + + Release date: + 2018-11-08 + + + + This release contains a variety of fixes from 10.5. + For information about new features in major release 10, see + . + + + + Migration to Version 10.6 + + + A dump/restore is not required for those running 10.X. + + + + However, if you use the pg_stat_statements extension, + see the changelog entry below about that. + + + + Also, if you are upgrading from a version earlier than 10.4, + see . + + + + + Changes + + + + + + Ensure proper quoting of transition table names + when pg_dump emits CREATE TRIGGER + ... REFERENCING commands (Tom Lane) + + + + This oversight could be exploited by an unprivileged user to gain + superuser privileges during the next dump/reload + or pg_upgrade run. (CVE-2018-16850) + + + + + + + Fix corner-case failures + in has_foo_privilege() + family of functions (Tom Lane) + + + + Return NULL rather than throwing an error when an invalid object OID + is provided. Some of these functions got that right already, but not + all. has_column_privilege() was additionally + capable of crashing on some platforms. + + + + + + + Fix pg_get_partition_constraintdef() to return + NULL rather than fail when passed an invalid relation OID (Tom Lane) + + + + + + + Avoid O(N^2) slowdown in regular expression match/split functions on + long strings (Andrew Gierth) + + + + + + + Fix parsing of standard multi-character operators that are immediately + followed by a comment or + or - + (Andrew Gierth) + + + + This oversight could lead to parse errors, or to incorrect assignment + of precedence. + + + + + + + Avoid O(N^3) slowdown in lexer for long strings + of + or - characters + (Andrew Gierth) + + + + + + + Fix mis-execution of SubPlans when the outer query is being scanned + backwards (Andrew Gierth) + + + + + + + Fix failure of UPDATE/DELETE ... WHERE CURRENT OF ... + after rewinding the referenced cursor (Tom Lane) + + + + A cursor that scans multiple relations (particularly an inheritance + tree) could produce wrong behavior if rewound to an earlier relation. + + + + + + + Fix EvalPlanQual to handle conditionally-executed + InitPlans properly (Andrew Gierth, Tom Lane) + + + + This resulted in hard-to-reproduce crashes or wrong answers in + concurrent updates, if they contained code such as an uncorrelated + sub-SELECT inside a CASE + construct. + + + + + + Prevent creation of a partition in a trigger attached to its parent + table (Amit Langote) + + + + Ideally we'd allow that, but for the moment it has to be blocked to + avoid crashes. + + + + + + Fix problems with applying ON COMMIT DELETE ROWS to + a partitioned temporary table (Amit Langote) + + + + + + Fix character-class checks to not fail on Windows for Unicode + characters above U+FFFF (Tom Lane, Kenji Uno) + + + + This bug affected full-text-search operations, as well + as contrib/ltree + and contrib/pg_trgm. + + + + + + + Disallow pushing sub-SELECTs containing window + functions, LIMIT, or OFFSET to + parallel workers (Amit Kapila) + + + + Such cases could result in inconsistent behavior due to different + workers getting different answers, as a result of indeterminacy + due to row-ordering variations. + + + + + + + Ensure that sequences owned by a foreign table are processed + by ALTER OWNER on the table (Peter Eisentraut) + + + + The ownership change should propagate to such sequences as well, but + this was missed for foreign tables. + + + + + + Ensure that the server will process + already-received NOTIFY + and SIGTERM interrupts before waiting for client + input (Jeff Janes, Tom Lane) + + + + + + + Fix over-allocation of space for array_out()'s + result string (Keiichi Hirobe) + + + + + + + Avoid query-lifetime memory leak in XMLTABLE + (Andrew Gierth) + + + + + + Fix memory leak in repeated SP-GiST index scans (Tom Lane) + + + + This is only known to amount to anything significant in cases where + an exclusion constraint using SP-GiST receives many new index entries + in a single command. + + + + + + + Ensure that ApplyLogicalMappingFile() closes the + mapping file when done with it (Tomas Vondra) + + + + Previously, the file descriptor was leaked, eventually resulting in + failures during logical decoding. + + + + + + + Fix logical decoding to handle cases where a mapped catalog table is + repeatedly rewritten, e.g. by VACUUM FULL + (Andres Freund) + + + + + + Prevent starting the server with wal_level set + to too low a value to support an existing replication slot (Andres + Freund) + + + + + + + Avoid crash if a utility command causes infinite recursion (Tom Lane) + + + + + + + When initializing a hot standby, cope with duplicate XIDs caused by + two-phase transactions on the master + (Michael Paquier, Konstantin Knizhnik) + + + + + + + Fix event triggers to handle nested ALTER TABLE + commands (Michael Paquier, Álvaro Herrera) + + + + + + + Propagate parent process's transaction and statement start timestamps + to parallel workers (Konstantin Knizhnik) + + + + This prevents misbehavior of functions such + as transaction_timestamp() when executed in a + worker. + + + + + + + Fix transfer of expanded datums to parallel workers so that alignment + is preserved, preventing crashes on alignment-picky platforms + (Tom Lane, Amit Kapila) + + + + + + + Fix WAL file recycling logic to work correctly on standby servers + (Michael Paquier) + + + + Depending on the setting of archive_mode, a standby + might fail to remove some WAL files that could be removed. + + + + + + + Fix handling of commit-timestamp tracking during recovery + (Masahiko Sawasa, Michael Paquier) + + + + If commit timestamp tracking has been turned on or off, recovery might + fail due to trying to fetch the commit timestamp for a transaction + that did not record it. + + + + + + + Randomize the random() seed in bootstrap and + standalone backends, and in initdb + (Noah Misch) + + + + The main practical effect of this change is that it avoids a scenario + where initdb might mistakenly conclude that + POSIX shared memory is not available, due to name collisions caused by + always using the same random seed. + + + + + + + Fix possible shared-memory corruption in DSA logic (Thomas Munro) + + + + + + + Allow DSM allocation to be interrupted (Chris Travers) + + + + + + + Avoid failure in a parallel worker when loading an extension that + tries to access system caches within its init function (Thomas Munro) + + + + We don't consider that to be good extension coding practice, but it + mostly worked before parallel query, so continue to support it for + now. + + + + + + + Properly handle turning full_page_writes on + dynamically (Kyotaro Horiguchi) + + + + + + + Fix possible crash due to double free() during + SP-GiST rescan (Andrew Gierth) + + + + + + + Prevent mis-linking of src/port and src/common functions on ELF-based + BSD platforms, as well as HP-UX and Solaris (Andrew Gierth, Tom Lane) + + + + Shared libraries loaded into a backend's address space could use the + backend's versions of these functions, rather than their own copies as + intended. Since the behavior of the two sets of functions isn't + quite the same, this led to failures. + + + + + + + Avoid possible buffer overrun when replaying GIN page recompression + from WAL (Alexander Korotkov, Sivasubramanian Ramasubramanian) + + + + + + + Avoid overrun of a hash index's metapage + when BLCKSZ is smaller than default (Dilip Kumar) + + + + + + + Fix missed page checksum updates in hash indexes (Amit Kapila) + + + + + + + Fix missed fsync of a replication slot's directory (Konstantin + Knizhnik, Michael Paquier) + + + + + + + Fix unexpected timeouts when + using wal_sender_timeout on a slow server + (Noah Misch) + + + + + + + Ensure that hot standby processes use the correct WAL consistency + point (Alexander Kukushkin, Michael Paquier) + + + + This prevents possible misbehavior just after a standby server has + reached a consistent database state during WAL replay. + + + + + + + Ensure background workers are stopped properly when the postmaster + receives a fast-shutdown request before completing database startup + (Alexander Kukushkin) + + + + + + + Update the free space map during WAL replay of page all-visible/frozen + flag changes (Álvaro Herrera) + + + + Previously we were not careful about this, reasoning that the FSM is + not critical data anyway. However, if it's sufficiently out of date, + that can result in significant performance degradation after a standby + has been promoted to primary. The FSM will eventually be healed by + updates, but we'd like it to be good sooner, so work harder at + maintaining it during WAL replay. + + + + + + + Avoid premature release of parallel-query resources when query end or + tuple count limit is reached (Amit Kapila) + + + + It's only okay to shut down the executor at this point if the caller + cannot demand backwards scan afterwards. + + + + + + + Don't run atexit callbacks when servicing SIGQUIT + (Heikki Linnakangas) + + + + + + + Don't record foreign-server user mappings as members of extensions + (Tom Lane) + + + + If CREATE USER MAPPING is executed in an extension + script, an extension dependency was created for the user mapping, + which is unexpected. Roles can't be extension members, so user + mappings shouldn't be either. + + + + + + + Make syslogger more robust against failures in opening CSV log files + (Tom Lane) + + + + + + + When libpq is given multiple target host + names, do the DNS lookups one at a time, not all at once (Tom Lane) + + + + This prevents unnecessary failures or slow connections when a + connection is successfully made to one of the earlier servers in the + list. + + + + + + + Fix libpq's handling of connection timeouts + so that they are properly applied per host name or IP address (Tom Lane) + + + + Previously, some code paths failed to restart the timer when switching + to a new target host, possibly resulting in premature timeout. + + + + + + Fix psql, as well as documentation + examples, to call PQconsumeInput() before + each PQnotifies() call (Tom Lane) + + + + This fixes cases in which psql would not + report receipt of a NOTIFY message until after the + next command. + + + + + + + Fix pg_dump's + option to also ignore publication + tables (Gilles Darold) + + + + + + + In pg_dump, exclude identity sequences when + their parent table is excluded from the dump (David Rowley) + + + + + + + Fix possible inconsistency in pg_dump's + sorting of dissimilar object names (Jacob Champion) + + + + + + + Ensure that pg_restore will schema-qualify + the table name when + emitting DISABLE/ENABLE TRIGGER + commands (Tom Lane) + + + + This avoids failures due to the new policy of running restores with + restrictive search path. + + + + + + + Fix pg_upgrade to handle event triggers in + extensions correctly (Haribabu Kommi) + + + + pg_upgrade failed to preserve an event + trigger's extension-membership status. + + + + + + + Fix pg_upgrade's cluster state check to + work correctly on a standby server (Bruce Momjian) + + + + + + + Enforce type cube's dimension limit in + all contrib/cube functions (Andrey Borodin) + + + + Previously, some cube-related functions could construct values that + would be rejected by cube_in(), leading to + dump/reload failures. + + + + + + In contrib/pg_stat_statements, disallow + the pg_read_all_stats role from + executing pg_stat_statements_reset() + (Haribabu Kommi) + + + + pg_read_all_stats is only meant to grant permission + to read statistics, not to change them, so this grant was incorrect. + + + + To cause this change to take effect, run ALTER EXTENSION + pg_stat_statements UPDATE in each database + where pg_stat_statements has been installed. + + + + + + + In contrib/postgres_fdw, don't try to ship a + variable-free ORDER BY clause to the remote server + (Andrew Gierth) + + + + + + + Fix contrib/unaccent's + unaccent() function to use + the unaccent text search dictionary that is in the + same schema as the function (Tom Lane) + + + + Previously it tried to look up the dictionary using the search path, + which could fail if the search path has a restrictive value. + + + + + + Fix build problems on macOS 10.14 (Mojave) (Tom Lane) + + + + Adjust configure to add + an switch to CPPFLAGS; + without this, PL/Perl and PL/Tcl fail to configure or build on macOS + 10.14. The specific sysroot used can be overridden at configure time + or build time by setting the PG_SYSROOT variable in + the arguments of configure + or make. + + + + It is now recommended that Perl-related extensions + write $(perl_includespec) rather + than -I$(perl_archlibexp)/CORE in their compiler + flags. The latter continues to work on most platforms, but not recent + macOS. + + + + Also, it should no longer be necessary to + specify manually to get PL/Tcl to + build on recent macOS releases. + + + + + + Fix MSVC build and regression-test scripts to work on recent Perl + versions (Andrew Dunstan) + + + + Perl no longer includes the current directory in its search path + by default; work around that. + + + + + + On Windows, allow the regression tests to be run by an Administrator + account (Andrew Dunstan) + + + + To do this safely, pg_regress now gives up + any such privileges at startup. + + + + + + + Allow btree comparison functions to return INT_MIN + (Tom Lane) + + + + Up to now, we've forbidden datatype-specific comparison functions from + returning INT_MIN, which allows callers to invert + the sort order just by negating the comparison result. However, this + was never safe for comparison functions that directly return the + result of memcmp(), strcmp(), + etc, as POSIX doesn't place any such restriction on those functions. + At least some recent versions of memcmp() can + return INT_MIN, causing incorrect sort ordering. + Hence, we've removed this restriction. Callers must now use + the INVERT_COMPARE_RESULT() macro if they wish to + invert the sort order. + + + + + + + Fix recursion hazard in shared-invalidation message processing + (Tom Lane) + + + + This error could, for example, result in failure to access a system + catalog or index that had just been processed by VACUUM + FULL. + + + + This change adds a new result code + for LockAcquire, which might possibly affect + external callers of that function, though only very unusual usage + patterns would have an issue with it. The API + of LockAcquireExtended is also changed. + + + + + + + Save and restore SPI's global variables + during SPI_connect() + and SPI_finish() (Chapman Flack, Tom Lane) + + + + This prevents possible interference when one SPI-using function calls + another. + + + + + + + Avoid using potentially-under-aligned page buffers (Tom Lane) + + + + Invent new union types PGAlignedBlock + and PGAlignedXLogBlock, and use these in place of plain + char arrays, ensuring that the compiler can't place the buffer at a + misaligned start address. This fixes potential core dumps on + alignment-picky platforms, and may improve performance even on + platforms that allow misalignment. + + + + + + + Make src/port/snprintf.c follow the C99 + standard's definition of snprintf()'s result + value (Tom Lane) + + + + On platforms where this code is used (mostly Windows), its pre-C99 + behavior could lead to failure to detect buffer overrun, if the + calling code assumed C99 semantics. + + + + + + + When building on i386 with the clang + compiler, require to be used (Andres Freund) + + + + This avoids problems with missed floating point overflow checks. + + + + + + + Fix configure's detection of the result + type of strerror_r() (Tom Lane) + + + + The previous coding got the wrong answer when building + with icc on Linux (and perhaps in other + cases), leading to libpq not returning + useful error messages for system-reported errors. + + + + + + Update time zone data files to tzdata + release 2018g for DST law changes in Chile, Fiji, Morocco, and Russia + (Volgograd), plus historical corrections for China, Hawaii, Japan, + Macau, and North Korea. + + + + + + + + + + Release 10.5 + + + Release date: + 2018-08-09 + + + + This release contains a variety of fixes from 10.4. + For information about new features in major release 10, see + . + + + + Migration to Version 10.5 + + + A dump/restore is not required for those running 10.X. + + + + However, if you are upgrading from a version earlier than 10.4, + see . + + + + + Changes + + + + + + + Fix failure to reset libpq's state fully + between connection attempts (Tom Lane) + + + + An unprivileged user of dblink + or postgres_fdw could bypass the checks intended + to prevent use of server-side credentials, such as + a ~/.pgpass file owned by the operating-system + user running the server. Servers allowing peer authentication on + local connections are particularly vulnerable. Other attacks such + as SQL injection into a postgres_fdw session + are also possible. + Attacking postgres_fdw in this way requires the + ability to create a foreign server object with selected connection + parameters, but any user with access to dblink + could exploit the problem. + In general, an attacker with the ability to select the connection + parameters for a libpq-using application + could cause mischief, though other plausible attack scenarios are + harder to think of. + Our thanks to Andrew Krasichkov for reporting this issue. + (CVE-2018-10915) + + + + + + + Fix INSERT ... ON CONFLICT UPDATE through a view + that isn't just SELECT * FROM ... + (Dean Rasheed, Amit Langote) + + + + Erroneous expansion of an updatable view could lead to crashes + or attribute ... has the wrong type errors, if the + view's SELECT list doesn't match one-to-one with + the underlying table's columns. + Furthermore, this bug could be leveraged to allow updates of columns + that an attacking user lacks UPDATE privilege for, + if that user has INSERT and UPDATE + privileges for some other column(s) of the table. + Any user could also use it for disclosure of server memory. + (CVE-2018-10925) + + + + + + + Ensure that updates to the relfrozenxid + and relminmxid values + for nailed system catalogs are processed in a timely + fashion (Andres Freund) + + + + Overoptimistic caching rules could prevent these updates from being + seen by other sessions, leading to spurious errors and/or data + corruption. The problem was significantly worse for shared catalogs, + such as pg_authid, because the stale cache + data could persist into new sessions as well as existing ones. + + + + + + + Fix case where a freshly-promoted standby crashes before having + completed its first post-recovery checkpoint (Michael Paquier, Kyotaro + Horiguchi, Pavan Deolasee, Álvaro Herrera) + + + + This led to a situation where the server did not think it had reached + a consistent database state during subsequent WAL replay, preventing + restart. + + + + + + + Avoid emitting a bogus WAL record when recycling an all-zero btree + page (Amit Kapila) + + + + This mistake has been seen to cause assertion failures, and + potentially it could result in unnecessary query cancellations on hot + standby servers. + + + + + + + During WAL replay, guard against corrupted record lengths exceeding + 1GB (Michael Paquier) + + + + Treat such a case as corrupt data. Previously, the code would try to + allocate space and get a hard error, making recovery impossible. + + + + + + + When ending recovery, delay writing the timeline history file as long + as possible (Heikki Linnakangas) + + + + This avoids some situations where a failure during recovery cleanup + (such as a problem with a two-phase state file) led to inconsistent + timeline state on-disk. + + + + + + + Improve performance of WAL replay for transactions that drop many + relations (Fujii Masao) + + + + This change reduces the number of times that shared buffers are + scanned, so that it is of most benefit when that setting is large. + + + + + + + Improve performance of lock releasing in standby server WAL replay + (Thomas Munro) + + + + + + + Make logical WAL senders report streaming state correctly (Simon + Riggs, Sawada Masahiko) + + + + The code previously mis-detected whether or not it had caught up with + the upstream server. + + + + + + + Ensure that a snapshot is provided when executing data type input + functions in logical replication subscribers (Minh-Quan Tran, + Álvaro Herrera) + + + + This omission led to failures in some cases, such as domains with + constraints using SQL-language functions. + + + + + + + Fix bugs in snapshot handling during logical decoding, allowing wrong + decoding results in rare cases (Arseny Sher, Álvaro Herrera) + + + + + + + Add subtransaction handling in logical-replication table + synchronization workers (Amit Khandekar, Robert Haas) + + + + Previously, table synchronization could misbehave if any + subtransactions were aborted after modifying a table being + synchronized. + + + + + + + Ensure a table's cached index list is correctly rebuilt after an index + creation fails partway through (Peter Geoghegan) + + + + Previously, the failed index's OID could remain in the list, causing + problems later in the same session. + + + + + + + Fix mishandling of empty uncompressed posting list pages in GIN + indexes (Sivasubramanian Ramasubramanian, Alexander Korotkov) + + + + This could result in an assertion failure after pg_upgrade of a + pre-9.4 GIN index (9.4 and later will not create such pages). + + + + + + + Pad arrays of unnamed POSIX semaphores to reduce cache line sharing + (Thomas Munro) + + + + This reduces contention on many-CPU systems, fixing a performance + regression (compared to previous releases) on Linux and FreeBSD. + + + + + + + Ensure that a process doing a parallel index scan will respond to + signals (Amit Kapila) + + + + Previously, parallel workers could get stuck waiting for a lock on an + index page, and not notice requests to abort the query. + + + + + + + Ensure that VACUUM will respond to signals + within btree page deletion loops (Andres Freund) + + + + Corrupted btree indexes could result in an infinite loop here, and + that previously wasn't interruptible without forcing a crash. + + + + + + + Fix hash-join costing mistake introduced with inner_unique + optimization (David Rowley) + + + + This could lead to bad plan choices in situations where that + optimization was applicable. + + + + + + + Fix misoptimization of equivalence classes involving composite-type + columns (Tom Lane) + + + + This resulted in failure to recognize that an index on a composite + column could provide the sort order needed for a mergejoin on that + column. + + + + + + + Fix planner to avoid ORDER/GROUP BY expression not found in + targetlist errors in some queries with set-returning functions + (Tom Lane) + + + + + + + Fix handling of partition keys whose data type uses a polymorphic + btree operator class, such as arrays (Amit Langote, Álvaro + Herrera) + + + + + + + Fix SQL-standard FETCH FIRST syntax to allow + parameters ($n), as the + standard expects (Andrew Gierth) + + + + + + + Remove undocumented restriction against duplicate partition key + columns (Yugo Nagata) + + + + + + + Disallow temporary tables from being partitions of non-temporary + tables (Amit Langote, Michael Paquier) + + + + While previously allowed, this case didn't work reliably. + + + + + + + Fix EXPLAIN's accounting for resource usage, + particularly buffer accesses, in parallel workers + (Amit Kapila, Robert Haas) + + + + + + + Fix SHOW ALL to show all settings to roles that are + members of pg_read_all_settings, and also allow + such roles to see source filename and line number in + the pg_settings view (Laurenz Albe, + Álvaro Herrera) + + + + + + + Fix failure to schema-qualify some object names + in getObjectDescription + and getObjectIdentity output + (Kyotaro Horiguchi, Tom Lane) + + + + Names of collations, conversions, text search objects, publication + relations, and extended statistics objects were not schema-qualified + when they should be. + + + + + + + Fix CREATE AGGREGATE type checking so that + parallelism support functions can be attached to variadic aggregates + (Alexey Bashtanov) + + + + + + + Widen COPY FROM's current-line-number counter + from 32 to 64 bits (David Rowley) + + + + This avoids two problems with input exceeding 4G lines: COPY + FROM WITH HEADER would drop a line every 4G lines, not only + the first line, and error reports could show a wrong line number. + + + + + + + Allow replication slots to be dropped in single-user mode + (Álvaro Herrera) + + + + This use-case was accidentally broken in release 10.0. + + + + + + + Fix incorrect results from variance(int4) and + related aggregates when run in parallel aggregation mode + (David Rowley) + + + + + + + Process TEXT and CDATA nodes + correctly in xmltable() column expressions + (Markus Winand) + + + + + + + Cope with possible failure of OpenSSL's + RAND_bytes() function + (Dean Rasheed, Michael Paquier) + + + + Under rare circumstances, this oversight could result in could + not generate random cancel key failures that could only be + resolved by restarting the postmaster. + + + + + + + Fix libpq's handling of some cases + where hostaddr is specified + (Hari Babu, Tom Lane, Robert Haas) + + + + PQhost() gave misleading or incorrect results + in some cases. Now, it uniformly returns the host name if specified, + or the host address if only that is specified, or the default host + name (typically /tmp + or localhost) if both parameters are omitted. + + + + Also, the wrong value might be compared to the server name when + verifying an SSL certificate. + + + + Also, the wrong value might be compared to the host name field in + ~/.pgpass. Now, that field is compared to the + host name if specified, or the host address if only that is specified, + or localhost if both parameters are omitted. + + + + Also, an incorrect error message was reported for an unparseable + hostaddr value. + + + + Also, when the host, hostaddr, + or port parameters contain comma-separated + lists, libpq is now more careful to treat + empty elements of a list as selecting the default behavior. + + + + + + + Add a string freeing function + to ecpg's pgtypes + library, so that cross-module memory management problems can be + avoided on Windows (Takayuki Tsunakawa) + + + + On Windows, crashes can ensue if the free call + for a given chunk of memory is not made from the same DLL + that malloc'ed the memory. + The pgtypes library sometimes returns strings + that it expects the caller to free, making it impossible to follow + this rule. Add a PGTYPESchar_free() function + that just wraps free, allowing applications + to follow this rule. + + + + + + + Fix ecpg's support for long + long variables on Windows, as well as other platforms that + declare strtoll/strtoull + nonstandardly or not at all (Dang Minh Huong, Tom Lane) + + + + + + + Fix misidentification of SQL statement type in PL/pgSQL, when a rule + change causes a change in the semantics of a statement intra-session + (Tom Lane) + + + + This error led to assertion failures, or in rare cases, failure to + enforce the INTO STRICT option as expected. + + + + + + + Fix password prompting in client programs so that echo is properly + disabled on Windows when stdin is not the + terminal (Matthew Stickney) + + + + + + + Further fix mis-quoting of values for list-valued GUC variables in + dumps (Tom Lane) + + + + The previous fix for quoting of search_path and + other list-valued variables in pg_dump + output turned out to misbehave for empty-string list elements, and it + risked truncation of long file paths. + + + + + + + Fix pg_dump's failure to + dump REPLICA IDENTITY properties for constraint + indexes (Tom Lane) + + + + Manually created unique indexes were properly marked, but not those + created by declaring UNIQUE or PRIMARY + KEY constraints. + + + + + + + Make pg_upgrade check that the old server + was shut down cleanly (Bruce Momjian) + + + + The previous check could be fooled by an immediate-mode shutdown. + + + + + + + Fix contrib/hstore_plperl to look through Perl + scalar references, and to not crash if it doesn't find a hash + reference where it expects one (Tom Lane) + + + + + + + Fix crash in contrib/ltree's + lca() function when the input array is empty + (Pierre Ducroquet) + + + + + + + Fix various error-handling code paths in which an incorrect error code + might be reported (Michael Paquier, Tom Lane, Magnus Hagander) + + + + + + + Rearrange makefiles to ensure that programs link to freshly-built + libraries (such as libpq.so) rather than ones + that might exist in the system library directories (Tom Lane) + + + + This avoids problems when building on platforms that supply old copies + of PostgreSQL libraries. + + + + + + + Update time zone data files to tzdata + release 2018e for DST law changes in North Korea, plus historical + corrections for Czechoslovakia. + + + + This update includes a redefinition of daylight savings + in Ireland, as well as for some past years in Namibia and + Czechoslovakia. In those jurisdictions, legally standard time is + observed in summer, and daylight savings time in winter, so that the + daylight savings offset is one hour behind standard time not one hour + ahead. This does not affect either the actual UTC offset or the + timezone abbreviations in use; the only known effect is that + the is_dst column in + the pg_timezone_names view will now be true + in winter and false in summer in these cases. + + + + + + + + + + Release 10.4 + + + Release date: + 2018-05-10 + + + + This release contains a variety of fixes from 10.3. + For information about new features in major release 10, see + . + + + + Migration to Version 10.4 + + + A dump/restore is not required for those running 10.X. + + + + However, if you use the adminpack extension, + you should update it as per the first changelog entry below. + + + + Also, if the function marking mistakes mentioned in the second and + third changelog entries below affect you, you will want to take steps + to correct your database catalogs. + + + + Also, if you are upgrading from a version earlier than 10.3, + see . + + + + + Changes + + + + + + + Remove public execute privilege + from contrib/adminpack's + pg_logfile_rotate() function (Stephen Frost) + + + + pg_logfile_rotate() is a deprecated wrapper + for the core function pg_rotate_logfile(). + When that function was changed to rely on SQL privileges for access + control rather than a hard-coded superuser + check, pg_logfile_rotate() should have been + updated as well, but the need for this was missed. Hence, + if adminpack is installed, any user could + request a logfile rotation, creating a minor security issue. + + + + After installing this update, administrators should + update adminpack by performing + ALTER EXTENSION adminpack UPDATE in each + database in which adminpack is installed. + (CVE-2018-1115) + + + + + + + Fix incorrect volatility markings on a few built-in functions + (Thomas Munro, Tom Lane) + + + + The functions + query_to_xml, + cursor_to_xml, + cursor_to_xmlschema, + query_to_xmlschema, and + query_to_xml_and_xmlschema + should be marked volatile because they execute user-supplied queries + that might contain volatile operations. They were not, leading to a + risk of incorrect query optimization. This has been repaired for new + installations by correcting the initial catalog data, but existing + installations will continue to contain the incorrect markings. + Practical use of these functions seems to pose little hazard, but in + case of trouble, it can be fixed by manually updating these + functions' pg_proc entries, for example + ALTER FUNCTION pg_catalog.query_to_xml(text, boolean, + boolean, text) VOLATILE. (Note that that will need to be + done in each database of the installation.) Another option is + to pg_upgrade the database to a version + containing the corrected initial data. + + + + + + + Fix incorrect parallel-safety markings on a few built-in functions + (Thomas Munro, Tom Lane) + + + + The functions + brin_summarize_new_values, + brin_summarize_range, + brin_desummarize_range, + gin_clean_pending_list, + cursor_to_xml, + cursor_to_xmlschema, + ts_rewrite, + ts_stat, + binary_upgrade_create_empty_extension, and + pg_import_system_collations + should be marked parallel-unsafe; some because they perform database + modifications directly, and others because they execute user-supplied + queries that might do so. They were marked parallel-restricted + instead, leading to a risk of unexpected query errors. This has been + repaired for new installations by correcting the initial catalog + data, but existing installations will continue to contain the + incorrect markings. Practical use of these functions seems to pose + little hazard unless force_parallel_mode is turned + on. In case of trouble, it can be fixed by manually updating these + functions' pg_proc entries, for example + ALTER FUNCTION pg_catalog.brin_summarize_new_values(regclass) + PARALLEL UNSAFE. (Note that that will need to be done in + each database of the installation.) Another option is + to pg_upgrade the database to a version + containing the corrected initial data. + + + + + + + Avoid re-using TOAST value OIDs that match dead-but-not-yet-vacuumed + TOAST entries (Pavan Deolasee) + + + + Once the OID counter has wrapped around, it's possible to assign a + TOAST value whose OID matches a previously deleted entry in the same + TOAST table. If that entry were not yet vacuumed away, this resulted + in unexpected chunk number 0 (expected 1) for toast + value nnnnn errors, which would + persist until the dead entry was removed + by VACUUM. Fix by not selecting such OIDs when + creating a new TOAST entry. + + + + + + + Correctly enforce any CHECK constraints on + individual partitions during COPY to a partitioned + table (Etsuro Fujita) + + + + Previously, only constraints declared for the partitioned table as a + whole were checked. + + + + + + + Accept TRUE and FALSE as + partition bound values (Amit Langote) + + + + Previously, only string-literal values were accepted for a boolean + partitioning column. But then pg_dump + would print such values as TRUE + or FALSE, leading to dump/reload failures. + + + + + + + Fix memory management for partition key comparison functions + (Álvaro Herrera, Amit Langote) + + + + This error could lead to crashes when using user-defined operator + classes for partition keys. + + + + + + + Fix possible crash when a query inserts tuples in several partitions + of a partitioned table, and those partitions don't have identical row + types (Etsuro Fujita, Amit Langote) + + + + + + + Change ANALYZE's algorithm for updating + pg_class.reltuples + (David Gould) + + + + Previously, pages not actually scanned by ANALYZE + were assumed to retain their old tuple density. In a large table + where ANALYZE samples only a small fraction of the + pages, this meant that the overall tuple density estimate could not + change very much, so that reltuples would + change nearly proportionally to changes in the table's physical size + (relpages) regardless of what was actually + happening in the table. This has been observed to result + in reltuples becoming so much larger than + reality as to effectively shut off autovacuuming. To fix, assume + that ANALYZE's sample is a statistically unbiased + sample of the table (as it should be), and just extrapolate the + density observed within those pages to the whole table. + + + + + + + Include extended-statistics objects in the set of table properties + duplicated by CREATE TABLE ... LIKE ... INCLUDING + ALL (David Rowley) + + + + Also add an INCLUDING STATISTICS option, to allow + finer-grained control over whether this happens. + + + + + + + Fix CREATE TABLE ... LIKE with bigint + identity columns (Peter Eisentraut) + + + + On platforms where long is 32 bits (which includes + 64-bit Windows as well as most 32-bit machines), copied sequence + parameters would be truncated to 32 bits. + + + + + + + Avoid deadlocks in concurrent CREATE INDEX + CONCURRENTLY commands that are run + under SERIALIZABLE or REPEATABLE + READ transaction isolation (Tom Lane) + + + + + + + Fix possible slow execution of REFRESH MATERIALIZED VIEW + CONCURRENTLY (Thomas Munro) + + + + + + + Fix UPDATE/DELETE ... WHERE CURRENT OF to not fail + when the referenced cursor uses an index-only-scan plan (Yugo Nagata, + Tom Lane) + + + + + + + Fix incorrect planning of join clauses pushed into parameterized + paths (Andrew Gierth, Tom Lane) + + + + This error could result in misclassifying a condition as + a join filter for an outer join when it should be a + plain filter condition, leading to incorrect join + output. + + + + + + + Fix possibly incorrect generation of an index-only-scan plan when the + same table column appears in multiple index columns, and only some of + those index columns use operator classes that can return the column + value (Kyotaro Horiguchi) + + + + + + + Fix misoptimization of CHECK constraints having + provably-NULL subclauses of + top-level AND/OR conditions + (Tom Lane, Dean Rasheed) + + + + This could, for example, allow constraint exclusion to exclude a + child table that should not be excluded from a query. + + + + + + + Prevent planner crash when a query has multiple GROUPING + SETS, none of which can be implemented by sorting (Andrew + Gierth) + + + + + + + Fix executor crash due to double free in some GROUPING + SETS usages (Peter Geoghegan) + + + + + + + Fix misexecution of self-joins on transition tables (Thomas Munro) + + + + + + + Avoid crash if a table rewrite event trigger is added concurrently + with a command that could call such a trigger (Álvaro Herrera, + Andrew Gierth, Tom Lane) + + + + + + + Avoid failure if a query-cancel or session-termination interrupt + occurs while committing a prepared transaction (Stas Kelvich) + + + + + + + Fix query-lifespan memory leakage in repeatedly executed hash joins + (Tom Lane) + + + + + + + Fix possible leak or double free of visibility map buffer pins + (Amit Kapila) + + + + + + + Avoid spuriously marking pages as all-visible (Dan Wood, + Pavan Deolasee, Álvaro Herrera) + + + + This could happen if some tuples were locked (but not deleted). While + queries would still function correctly, vacuum would normally ignore + such pages, with the long-term effect that the tuples were never + frozen. In recent releases this would eventually result in errors + such as found multixact nnnnn from + before relminmxid nnnnn. + + + + + + + Fix overly strict sanity check + in heap_prepare_freeze_tuple + (Álvaro Herrera) + + + + This could result in incorrect cannot freeze committed + xmax failures in databases that have + been pg_upgrade'd from 9.2 or earlier. + + + + + + + Prevent dangling-pointer dereference when a C-coded before-update row + trigger returns the old tuple (Rushabh Lathia) + + + + + + + Reduce locking during autovacuum worker scheduling (Jeff Janes) + + + + The previous behavior caused drastic loss of potential worker + concurrency in databases with many tables. + + + + + + + Ensure client hostname is copied while copying + pg_stat_activity data to local memory + (Edmund Horner) + + + + Previously the supposedly-local snapshot contained a pointer into + shared memory, allowing the client hostname column to change + unexpectedly if any existing session disconnected. + + + + + + + Handle pg_stat_activity information for + auxiliary processes correctly (Edmund Horner) + + + + The application_name, + client_hostname, + and query fields might show incorrect + data for such processes. + + + + + + + Fix incorrect processing of multiple compound affixes + in ispell dictionaries (Arthur Zakirov) + + + + + + + Fix collation-aware searches (that is, indexscans using inequality + operators) in SP-GiST indexes on text columns (Tom Lane) + + + + Such searches would return the wrong set of rows in most non-C + locales. + + + + + + + Prevent query-lifespan memory leakage with SP-GiST operator classes + that use traversal values (Anton Dignös) + + + + + + + Count the number of index tuples correctly during initial build of an + SP-GiST index (Tomas Vondra) + + + + Previously, the tuple count was reported to be the same as that of + the underlying table, which is wrong if the index is partial. + + + + + + + Count the number of index tuples correctly during vacuuming of a + GiST index (Andrey Borodin) + + + + Previously it reported the estimated number of heap tuples, + which might be inaccurate, and is certainly wrong if the + index is partial. + + + + + + + Fix a corner case where a streaming standby gets stuck at a WAL + continuation record (Kyotaro Horiguchi) + + + + + + + In logical decoding, avoid possible double processing of WAL data + when a walsender restarts (Craig Ringer) + + + + + + + Fix logical replication to not assume that type OIDs match between + the local and remote servers (Masahiko Sawada) + + + + + + + Allow scalarltsel + and scalargtsel to be used on non-core datatypes + (Tomas Vondra) + + + + + + + Reduce libpq's memory consumption when a + server error is reported after a large amount of query output has + been collected (Tom Lane) + + + + Discard the previous output before, not after, processing the error + message. On some platforms, notably Linux, this can make a + difference in the application's subsequent memory footprint. + + + + + + + Fix double-free crashes in ecpg + (Patrick Krecker, Jeevan Ladhe) + + + + + + + Fix ecpg to handle long long + int variables correctly in MSVC builds (Michael Meskes, + Andrew Gierth) + + + + + + + Fix mis-quoting of values for list-valued GUC variables in dumps + (Michael Paquier, Tom Lane) + + + + The local_preload_libraries, + session_preload_libraries, + shared_preload_libraries, + and temp_tablespaces variables were not correctly + quoted in pg_dump output. This would + cause problems if settings for these variables appeared in + CREATE FUNCTION ... SET or ALTER + DATABASE/ROLE ... SET clauses. + + + + + + + Fix pg_recvlogical to not fail against + pre-v10 PostgreSQL servers + (Michael Paquier) + + + + A previous fix caused pg_recvlogical to + issue a command regardless of server version, but it should only be + issued to v10 and later servers. + + + + + + + Ensure that pg_rewind deletes files on the + target server if they are deleted from the source server during the + run (Takayuki Tsunakawa) + + + + Failure to do this could result in data inconsistency on the target, + particularly if the file in question is a WAL segment. + + + + + + + Fix pg_rewind to handle tables in + non-default tablespaces correctly (Takayuki Tsunakawa) + + + + + + + Fix overflow handling in PL/pgSQL + integer FOR loops (Tom Lane) + + + + The previous coding failed to detect overflow of the loop variable + on some non-gcc compilers, leading to an infinite loop. + + + + + + + Adjust PL/Python regression tests to pass + under Python 3.7 (Peter Eisentraut) + + + + + + + Support testing PL/Python and related + modules when building with Python 3 and MSVC (Andrew Dunstan) + + + + + + + Fix errors in initial build of contrib/bloom + indexes (Tomas Vondra, Tom Lane) + + + + Fix possible omission of the table's last tuple from the index. + Count the number of index tuples correctly, in case it is a partial + index. + + + + + + + Rename internal b64_encode + and b64_decode functions to avoid conflict with + Solaris 11.4 built-in functions (Rainer Orth) + + + + + + + Sync our copy of the timezone library with IANA tzcode release 2018e + (Tom Lane) + + + + This fixes the zic timezone data compiler + to cope with negative daylight-savings offsets. While + the PostgreSQL project will not + immediately ship such timezone data, zic + might be used with timezone data obtained directly from IANA, so it + seems prudent to update zic now. + + + + + + + Update time zone data files to tzdata + release 2018d for DST law changes in Palestine and Antarctica (Casey + Station), plus historical corrections for Portugal and its colonies, + as well as Enderbury, Jamaica, Turks & Caicos Islands, and + Uruguay. + + + + + + + + + + Release 10.3 + + + Release date: + 2018-03-01 + + + + This release contains a variety of fixes from 10.2. + For information about new features in major release 10, see + . + + + + Migration to Version 10.3 + + + A dump/restore is not required for those running 10.X. + + + + However, if you run an installation in which not all users are mutually + trusting, or if you maintain an application or extension that is + intended for use in arbitrary situations, it is strongly recommended + that you read the documentation changes described in the first changelog + entry below, and take suitable steps to ensure that your installation or + code is secure. + + + + Also, the changes described in the second changelog entry below may + cause functions used in index expressions or materialized views to fail + during auto-analyze, or when reloading from a dump. After upgrading, + monitor the server logs for such problems, and fix affected functions. + + + + Also, if you are upgrading from a version earlier than 10.2, + see . + + + + + Changes + + + + + + + Document how to configure installations and applications to guard + against search-path-dependent trojan-horse attacks from other users + (Noah Misch) + + + + Using a search_path setting that includes any + schemas writable by a hostile user enables that user to capture + control of queries and then run arbitrary SQL code with the + permissions of the attacked user. While it is possible to write + queries that are proof against such hijacking, it is notationally + tedious, and it's very easy to overlook holes. Therefore, we now + recommend configurations in which no untrusted schemas appear in + one's search path. Relevant documentation appears in + (for database administrators and users), + (for application authors), + (for extension authors), and + (for authors + of SECURITY DEFINER functions). + (CVE-2018-1058) + + + + + + + Avoid use of insecure search_path settings + in pg_dump and other client programs + (Noah Misch, Tom Lane) + + + + pg_dump, + pg_upgrade, + vacuumdb and + other PostgreSQL-provided applications were + themselves vulnerable to the type of hijacking described in the previous + changelog entry; since these applications are commonly run by + superusers, they present particularly attractive targets. To make them + secure whether or not the installation as a whole has been secured, + modify them to include only the pg_catalog + schema in their search_path settings. + Autovacuum worker processes now do the same, as well. + + + + In cases where user-provided functions are indirectly executed by + these programs — for example, user-provided functions in index + expressions — the tighter search_path may + result in errors, which will need to be corrected by adjusting those + user-provided functions to not assume anything about what search path + they are invoked under. That has always been good practice, but now + it will be necessary for correct behavior. + (CVE-2018-1058) + + + + + + + Prevent logical replication from trying to ship changes for + unpublishable relations (Peter Eisentraut) + + + + A publication marked FOR ALL TABLES would + incorrectly ship changes in materialized views + and information_schema tables, which are + supposed to be omitted from the change stream. + + + + + + + Fix misbehavior of concurrent-update rechecks with CTE references + appearing in subplans (Tom Lane) + + + + If a CTE (WITH clause reference) is used in an + InitPlan or SubPlan, and the query requires a recheck due to trying + to update or lock a concurrently-updated row, incorrect results could + be obtained. + + + + + + + Fix planner failures with overlapping mergejoin clauses in an outer + join (Tom Lane) + + + + These mistakes led to left and right pathkeys do not match in + mergejoin or outer pathkeys do not match + mergeclauses planner errors in corner cases. + + + + + + + Repair pg_upgrade's failure to + preserve relfrozenxid for materialized + views (Tom Lane, Andres Freund) + + + + This oversight could lead to data corruption in materialized views + after an upgrade, manifesting as could not access status of + transaction or found xmin from before + relfrozenxid errors. The problem would be more likely to + occur in seldom-refreshed materialized views, or ones that were + maintained only with REFRESH MATERIALIZED VIEW + CONCURRENTLY. + + + + If such corruption is observed, it can be repaired by refreshing the + materialized view (without CONCURRENTLY). + + + + + + + Fix incorrect pg_dump output for some + non-default sequence limit values (Alexey Bashtanov) + + + + + + + Fix pg_dump's mishandling + of STATISTICS objects (Tom Lane) + + + + An extended statistics object's schema was mislabeled in the dump's + table of contents, possibly leading to the wrong results in a + schema-selective restore. Its ownership was not correctly restored, + either. Also, change the logic so that statistics objects are + dumped/restored, or not, as independent objects rather than tying + them to the dump/restore decision for the table they are on. The + original definition could not scale to the planned future extension to + cross-table statistics. + + + + + + + Fix incorrect reporting of PL/Python function names in + error CONTEXT stacks (Tom Lane) + + + + An error occurring within a nested PL/Python function call (that is, + one reached via a SPI query from another PL/Python function) would + result in a stack trace showing the inner function's name twice, + rather than the expected results. Also, an error in a nested + PL/Python DO block could result in a null pointer + dereference crash on some platforms. + + + + + + + Allow contrib/auto_explain's + log_min_duration setting to range up + to INT_MAX, or about 24 days instead of 35 minutes + (Tom Lane) + + + + + + + Mark assorted GUC variables as PGDLLIMPORT, to + ease porting extension modules to Windows (Metin Doslu) + + + + + + + + + + Release 10.2 + + + Release date: + 2018-02-08 + + + + This release contains a variety of fixes from 10.1. + For information about new features in major release 10, see + . + + + + Migration to Version 10.2 + + + A dump/restore is not required for those running 10.X. + + + + However, + if you use contrib/cube's ~> + operator, see the entry below about that. + + + + Also, if you are upgrading from a version earlier than 10.1, + see . + + + + + Changes + + + + + + + Fix processing of partition keys containing multiple expressions + (Álvaro Herrera, David Rowley) + + + + This error led to crashes or, with carefully crafted input, disclosure + of arbitrary backend memory. + (CVE-2018-1052) + + + + + + + Ensure that all temporary files made + by pg_upgrade are non-world-readable + (Tom Lane, Noah Misch) + + + + pg_upgrade normally restricts its + temporary files to be readable and writable only by the calling user. + But the temporary file containing pg_dumpall -g + output would be group- or world-readable, or even writable, if the + user's umask setting allows. In typical usage on + multi-user machines, the umask and/or the working + directory's permissions would be tight enough to prevent problems; + but there may be people using pg_upgrade + in scenarios where this oversight would permit disclosure of database + passwords to unfriendly eyes. + (CVE-2018-1053) + + + + + + + Fix vacuuming of tuples that were updated while key-share locked + (Andres Freund, Álvaro Herrera) + + + + In some cases VACUUM would fail to remove such + tuples even though they are now dead, leading to assorted data + corruption scenarios. + + + + + + + Fix failure to mark a hash index's metapage dirty after + adding a new overflow page, potentially leading to index corruption + (Lixian Zou, Amit Kapila) + + + + + + + Ensure that vacuum will always clean up the pending-insertions list of + a GIN index (Masahiko Sawada) + + + + This is necessary to ensure that dead index entries get removed. + The old code got it backwards, allowing vacuum to skip the cleanup if + some other process were running cleanup concurrently, thus risking + invalid entries being left behind in the index. + + + + + + + Fix inadequate buffer locking in some LSN fetches (Jacob Champion, + Asim Praveen, Ashwin Agrawal) + + + + These errors could result in misbehavior under concurrent load. + The potential consequences have not been characterized fully. + + + + + + + Fix incorrect query results from cases involving flattening of + subqueries whose outputs are used in GROUPING SETS + (Heikki Linnakangas) + + + + + + + Fix handling of list partitioning constraints for partition keys of + boolean or array types (Amit Langote) + + + + + + + Avoid unnecessary failure in a query on an inheritance tree that + occurs concurrently with some child table being removed from the tree + by ALTER TABLE NO INHERIT (Tom Lane) + + + + + + + Fix spurious deadlock failures when multiple sessions are + running CREATE INDEX CONCURRENTLY (Jeff Janes) + + + + + + + During VACUUM FULL, update the table's size fields + in pg_class sooner (Amit Kapila) + + + + This prevents poor behavior when rebuilding hash indexes on the + table, since those use the pg_class + statistics to govern the initial hash size. + + + + + + + Fix + UNION/INTERSECT/EXCEPT + over zero columns (Tom Lane) + + + + + + + Disallow identity columns on typed tables and partitions + (Michael Paquier) + + + + These cases will be treated as unsupported features for now. + + + + + + + Fix assorted failures to apply the correct default value when + inserting into an identity column (Michael Paquier, Peter Eisentraut) + + + + In several contexts, notably COPY + and ALTER TABLE ADD COLUMN, the expected default + value was not applied and instead a null value was inserted. + + + + + + + Fix failures when an inheritance tree contains foreign child tables + (Etsuro Fujita) + + + + A mix of regular and foreign tables in an inheritance tree resulted in + creation of incorrect plans for UPDATE + and DELETE queries. This led to visible failures in + some cases, notably when there are row-level triggers on a foreign + child table. + + + + + + + Repair failure with correlated sub-SELECT + inside VALUES inside a LATERAL + subquery (Tom Lane) + + + + + + + Fix could not devise a query plan for the given query + planner failure for some cases involving nested UNION + ALL inside a lateral subquery (Tom Lane) + + + + + + + Allow functional dependency statistics to be used for boolean columns + (Tom Lane) + + + + Previously, although extended statistics could be declared and + collected on boolean columns, the planner failed to apply them. + + + + + + + Avoid underestimating the number of groups emitted by subqueries + containing set-returning functions in their grouping columns (Tom Lane) + + + + Cases similar to SELECT DISTINCT unnest(foo) got a + lower output rowcount estimate in 10.0 than they did in earlier + releases, possibly resulting in unfavorable plan choices. Restore the + prior estimation behavior. + + + + + + + Fix use of triggers in logical replication workers (Petr Jelinek) + + + + + + + Fix logical decoding to correctly clean up disk files for crashed + transactions (Atsushi Torikoshi) + + + + Logical decoding may spill WAL records to disk for transactions + generating many WAL records. Normally these files are cleaned up + after the transaction's commit or abort record arrives; but if + no such record is ever seen, the removal code misbehaved. + + + + + + + Fix walsender timeout failure and failure to respond to interrupts + when processing a large transaction (Petr Jelinek) + + + + + + + Fix race condition during replication origin drop that could allow the + dropping process to wait indefinitely (Tom Lane) + + + + + + + Allow members of the pg_read_all_stats role to see + walsender statistics in the pg_stat_replication + view (Feike Steenbergen) + + + + + + + Show walsenders that are sending base backups as active in + the pg_stat_activity view (Magnus Hagander) + + + + + + + Fix reporting of scram-sha-256 authentication + method in the pg_hba_file_rules view + (Michael Paquier) + + + + Previously this was printed as scram-sha256, + possibly confusing users as to the correct spelling. + + + + + + + Fix has_sequence_privilege() to + support WITH GRANT OPTION tests, + as other privilege-testing functions do (Joe Conway) + + + + + + + In databases using UTF8 encoding, ignore any XML declaration that + asserts a different encoding (Pavel Stehule, Noah Misch) + + + + We always store XML strings in the database encoding, so allowing + libxml to act on a declaration of another encoding gave wrong results. + In encodings other than UTF8, we don't promise to support non-ASCII + XML data anyway, so retain the previous behavior for bug compatibility. + This change affects only xpath() and related + functions; other XML code paths already acted this way. + + + + + + + Provide for forward compatibility with future minor protocol versions + (Robert Haas, Badrul Chowdhury) + + + + Up to now, PostgreSQL servers simply + rejected requests to use protocol versions newer than 3.0, so that + there was no functional difference between the major and minor parts + of the protocol version number. Allow clients to request versions 3.x + without failing, sending back a message showing that the server only + understands 3.0. This makes no difference at the moment, but + back-patching this change should allow speedier introduction of future + minor protocol upgrades. + + + + + + + Allow a client that supports SCRAM channel binding (such as v11 or + later libpq) to connect to a v10 server + (Michael Paquier) + + + + v10 does not have this feature, and the connection-time negotiation + about whether to use it was done incorrectly. + + + + + + + Avoid live-lock in ConditionVariableBroadcast() + (Tom Lane, Thomas Munro) + + + + Given repeatedly-unlucky timing, a process attempting to awaken all + waiters for a condition variable could loop indefinitely. Due to the + limited usage of condition variables in v10, this affects only + parallel index scans and some operations on replication slots. + + + + + + + Clean up waits for condition variables correctly during subtransaction + abort (Robert Haas) + + + + + + + Ensure that child processes that are waiting for a condition variable + will exit promptly if the postmaster process dies (Tom Lane) + + + + + + + Fix crashes in parallel queries using more than one Gather node + (Thomas Munro) + + + + + + + Fix hang in parallel index scan when processing a deleted or half-dead + index page (Amit Kapila) + + + + + + + Avoid crash if parallel bitmap heap scan is unable to allocate a + shared memory segment (Robert Haas) + + + + + + + Cope with failure to start a parallel worker process + (Amit Kapila, Robert Haas) + + + + Parallel query previously tended to hang indefinitely if a worker + could not be started, as the result of fork() + failure or other low-probability problems. + + + + + + + Avoid unnecessary failure when no parallel workers can be obtained + during parallel query startup (Robert Haas) + + + + + + + Fix collection of EXPLAIN statistics from parallel + workers (Amit Kapila, Thomas Munro) + + + + + + + Ensure that query strings passed to parallel workers are correctly + null-terminated (Thomas Munro) + + + + This prevents emitting garbage in postmaster log output from such + workers. + + + + + + + Avoid unsafe alignment assumptions when working + with __int128 (Tom Lane) + + + + Typically, compilers assume that __int128 variables are + aligned on 16-byte boundaries, but our memory allocation + infrastructure isn't prepared to guarantee that, and increasing the + setting of MAXALIGN seems infeasible for multiple reasons. Adjust the + code to allow use of __int128 only when we can tell the + compiler to assume lesser alignment. The only known symptom of this + problem so far is crashes in some parallel aggregation queries. + + + + + + + Prevent stack-overflow crashes when planning extremely deeply + nested set operations + (UNION/INTERSECT/EXCEPT) + (Tom Lane) + + + + + + + Avoid crash during an EvalPlanQual recheck of an indexscan that is the + inner child of a merge join (Tom Lane) + + + + This could only happen during an update or SELECT FOR + UPDATE of a join, when there is a concurrent update of some + selected row. + + + + + + + Fix crash in autovacuum when extended statistics are defined + for a table but can't be computed (Álvaro Herrera) + + + + + + + Fix null-pointer crashes for some types of LDAP URLs appearing + in pg_hba.conf (Thomas Munro) + + + + + + + Prevent out-of-memory failures due to excessive growth of simple hash + tables (Tomas Vondra, Andres Freund) + + + + + + + Fix sample INSTR() functions in the PL/pgSQL + documentation (Yugo Nagata, Tom Lane) + + + + These functions are stated to + be Oracle compatible, but + they weren't exactly. In particular, there was a discrepancy in the + interpretation of a negative third parameter: Oracle thinks that a + negative value indicates the last place where the target substring can + begin, whereas our functions took it as the last place where the + target can end. Also, Oracle throws an error for a zero or negative + fourth parameter, whereas our functions returned zero. + + + + The sample code has been adjusted to match Oracle's behavior more + precisely. Users who have copied this code into their applications + may wish to update their copies. + + + + + + + Fix pg_dump to make ACL (permissions), + comment, and security label entries reliably identifiable in archive + output formats (Tom Lane) + + + + The tag portion of an ACL archive entry was usually + just the name of the associated object. Make it start with the object + type instead, bringing ACLs into line with the convention already used + for comment and security label archive entries. Also, fix the + comment and security label entries for the whole database, if present, + to make their tags start with DATABASE so that they + also follow this convention. This prevents false matches in code that + tries to identify large-object-related entries by seeing if the tag + starts with LARGE OBJECT. That could have resulted + in misclassifying entries as data rather than schema, with undesirable + results in a schema-only or data-only dump. + + + + Note that this change has user-visible results in the output + of pg_restore --list. + + + + + + + Rename pg_rewind's + copy_file_range function to avoid conflict + with new Linux system call of that name (Andres Freund) + + + + This change prevents build failures with newer glibc versions. + + + + + + + In ecpg, detect indicator arrays that do + not have the correct length and report an error (David Rader) + + + + + + + Change the behavior of contrib/cube's + cube ~> int + operator to make it compatible with KNN search (Alexander Korotkov) + + + + The meaning of the second argument (the dimension selector) has been + changed to make it predictable which value is selected even when + dealing with cubes of varying dimensionalities. + + + + This is an incompatible change, but since the point of the operator + was to be used in KNN searches, it seems rather useless as-is. + After installing this update, any expression indexes or materialized + views using this operator will need to be reindexed/refreshed. + + + + + + + Avoid triggering a libc assertion + in contrib/hstore, due to use + of memcpy() with equal source and destination + pointers (Tomas Vondra) + + + + + + + Fix incorrect display of tuples' null bitmaps + in contrib/pageinspect (Maksim Milyutin) + + + + + + + Fix incorrect output from contrib/pageinspect's + hash_page_items() function (Masahiko Sawada) + + + + + + + In contrib/postgres_fdw, avoid + outer pathkeys do not match mergeclauses + planner error when constructing a plan involving a remote join + (Robert Haas) + + + + + + + In contrib/postgres_fdw, avoid planner failure + when there are duplicate GROUP BY entries + (Jeevan Chalke) + + + + + + + Provide modern examples of how to auto-start Postgres on macOS + (Tom Lane) + + + + The scripts in contrib/start-scripts/osx use + infrastructure that's been deprecated for over a decade, and which no + longer works at all in macOS releases of the last couple of years. + Add a new subdirectory contrib/start-scripts/macos + containing scripts that use the newer launchd + infrastructure. + + + + + + + Fix incorrect selection of configuration-specific libraries for + OpenSSL on Windows (Andrew Dunstan) + + + + + + + Support linking to MinGW-built versions of libperl (Noah Misch) + + + + This allows building PL/Perl with some common Perl distributions for + Windows. + + + + + + + Fix MSVC build to test whether 32-bit libperl + needs -D_USE_32BIT_TIME_T (Noah Misch) + + + + Available Perl distributions are inconsistent about what they expect, + and lack any reliable means of reporting it, so resort to a build-time + test on what the library being used actually does. + + + + + + + On Windows, install the crash dump handler earlier in postmaster + startup (Takayuki Tsunakawa) + + + + This may allow collection of a core dump for some early-startup + failures that did not produce a dump before. + + + + + + + On Windows, avoid encoding-conversion-related crashes when emitting + messages very early in postmaster startup (Takayuki Tsunakawa) + + + + + + + Use our existing Motorola 68K spinlock code on OpenBSD as + well as NetBSD (David Carlier) + + + + + + + Add support for spinlocks on Motorola 88K (David Carlier) + + + + + + + Update time zone data files to tzdata + release 2018c for DST law changes in Brazil, Sao Tome and Principe, + plus historical corrections for Bolivia, Japan, and South Sudan. + The US/Pacific-New zone has been removed (it was + only an alias for America/Los_Angeles anyway). + + + + + + + + + + Release 10.1 + + + Release date: + 2017-11-09 + + + + This release contains a variety of fixes from 10.0. + For information about new features in major release 10, see + . + + + + Migration to Version 10.1 + + + A dump/restore is not required for those running 10.X. + + + + However, if you use BRIN indexes, see the fourth changelog entry below. + + + + + Changes + + + + + + + Ensure that INSERT ... ON CONFLICT DO UPDATE checks + table permissions and RLS policies in all cases (Dean Rasheed) + + + + The update path of INSERT ... ON CONFLICT DO UPDATE + requires SELECT permission on the columns of the + arbiter index, but it failed to check for that in the case of an + arbiter specified by constraint name. + In addition, for a table with row level security enabled, it failed to + check updated rows against the table's SELECT + policies (regardless of how the arbiter index was specified). + (CVE-2017-15099) + + + + + + + Fix crash due to rowtype mismatch + in json{b}_populate_recordset() + (Michael Paquier, Tom Lane) + + + + These functions used the result rowtype specified in the FROM + ... AS clause without checking that it matched the actual + rowtype of the supplied tuple value. If it didn't, that would usually + result in a crash, though disclosure of server memory contents seems + possible as well. + (CVE-2017-15098) + + + + + + + Fix sample server-start scripts to become $PGUSER + before opening $PGLOG (Noah Misch) + + + + Previously, the postmaster log file was opened while still running as + root. The database owner could therefore mount an attack against + another system user by making $PGLOG be a symbolic + link to some other file, which would then become corrupted by appending + log messages. + + + + By default, these scripts are not installed anywhere. Users who have + made use of them will need to manually recopy them, or apply the same + changes to their modified versions. If the + existing $PGLOG file is root-owned, it will need to + be removed or renamed out of the way before restarting the server with + the corrected script. + (CVE-2017-12172) + + + + + + + Fix BRIN index summarization to handle concurrent table extension + correctly (Álvaro Herrera) + + + + Previously, a race condition allowed some table rows to be omitted from + the index. It may be necessary to reindex existing BRIN indexes to + recover from past occurrences of this problem. + + + + + + + Fix possible failures during concurrent updates of a BRIN index + (Tom Lane) + + + + These race conditions could result in errors like invalid index + offnum or inconsistent range map. + + + + + + + Prevent logical replication from setting non-replicated columns to + nulls when replicating an UPDATE (Petr Jelinek) + + + + + + + Fix logical replication to fire BEFORE ROW DELETE + triggers when expected (Masahiko Sawada) + + + + Previously, that failed to happen unless the table also had + a BEFORE ROW UPDATE trigger. + + + + + + + Fix crash when logical decoding is invoked from a SPI-using function, + in particular any function written in a PL language + (Tom Lane) + + + + + + + Ignore CTEs when looking up the target table for + INSERT/UPDATE/DELETE, + and prevent matching schema-qualified target table names to trigger + transition table names (Thomas Munro) + + + + This restores the pre-v10 behavior for CTEs attached to DML commands. + + + + + + + Avoid evaluating an aggregate function's argument expression(s) at rows + where its FILTER test fails (Tom Lane) + + + + This restores the pre-v10 (and SQL-standard) behavior. + + + + + + + Fix incorrect query results when multiple GROUPING + SETS columns contain the same simple variable (Tom Lane) + + + + + + + Fix query-lifespan memory leakage while evaluating a set-returning + function in a SELECT's target list (Tom Lane) + + + + + + + Allow parallel execution of prepared statements with generic plans + (Amit Kapila, Kuntal Ghosh) + + + + + + + Fix incorrect parallelization decisions for nested queries + (Amit Kapila, Kuntal Ghosh) + + + + + + + Fix parallel query handling to not fail when a recently-used role is + dropped (Amit Kapila) + + + + + + + Fix crash in parallel execution of a bitmap scan having a BitmapAnd + plan node below a BitmapOr node (Dilip Kumar) + + + + + + + Fix json_build_array(), + json_build_object(), and their jsonb + equivalents to handle explicit VARIADIC arguments + correctly (Michael Paquier) + + + + + + + Fix autovacuum's work item logic to prevent possible + crashes and silent loss of work items (Álvaro Herrera) + + + + + + + Fix corner-case crashes when columns have been added to the end of a + view (Tom Lane) + + + + + + + Record proper dependencies when a view or rule + contains FieldSelect + or FieldStore expression nodes (Tom Lane) + + + + Lack of these dependencies could allow a column or data + type DROP to go through when it ought to fail, + thereby causing later uses of the view or rule to get errors. + This patch does not do anything to protect existing views/rules, + only ones created in the future. + + + + + + + Correctly detect hashability of range data types (Tom Lane) + + + + The planner mistakenly assumed that any range type could be hashed + for use in hash joins or hash aggregation, but actually it must check + whether the range's subtype has hash support. This does not affect any + of the built-in range types, since they're all hashable anyway. + + + + + + + Correctly ignore RelabelType expression nodes + when examining functional-dependency statistics (David Rowley) + + + + This allows, e.g., extended statistics on varchar columns + to be used properly. + + + + + + + Prevent sharing transition states between ordered-set aggregates + (David Rowley) + + + + This causes a crash with the built-in ordered-set aggregates, and + probably with user-written ones as well. v11 and later will include + provisions for dealing with such cases safely, but in released + branches, just disable the optimization. + + + + + + + Prevent idle_in_transaction_session_timeout from + being ignored when a statement_timeout occurred + earlier (Lukas Fittl) + + + + + + + Fix low-probability loss of NOTIFY messages due to + XID wraparound (Marko Tiikkaja, Tom Lane) + + + + If a session executed no queries, but merely listened for + notifications, for more than 2 billion transactions, it started to miss + some notifications from concurrently-committing transactions. + + + + + + + Reduce the frequency of data flush requests during bulk file copies to + avoid performance problems on macOS, particularly with its new APFS + file system (Tom Lane) + + + + + + + Allow COPY's FREEZE option to + work when the transaction isolation level is REPEATABLE + READ or higher (Noah Misch) + + + + This case was unintentionally broken by a previous bug fix. + + + + + + + Fix AggGetAggref() to return the + correct Aggref nodes to aggregate final + functions whose transition calculations have been merged (Tom Lane) + + + + + + + Fix insufficient schema-qualification in some new queries + in pg_dump + and psql + (Vitaly Burovoy, Tom Lane, Noah Misch) + + + + + + + Avoid use of @> operator + in psql's queries for \d + (Tom Lane) + + + + This prevents problems when the parray_gin + extension is installed, since that defines a conflicting operator. + + + + + + + Fix pg_basebackup's matching of tablespace + paths to canonicalize both paths before comparing (Michael Paquier) + + + + This is particularly helpful on Windows. + + + + + + + Fix libpq to not require user's home + directory to exist (Tom Lane) + + + + In v10, failure to find the home directory while trying to + read ~/.pgpass was treated as a hard error, + but it should just cause that file to not be found. Both v10 and + previous release branches made the same mistake when + reading ~/.pg_service.conf, though this was less + obvious since that file is not sought unless a service name is + specified. + + + + + + + In ecpglib, correctly handle backslashes in string literals depending + on whether standard_conforming_strings is set + (Tsunakawa Takayuki) + + + + + + + Make ecpglib's Informix-compatibility mode ignore fractional digits in + integer input strings, as expected (Gao Zengqi, Michael Meskes) + + + + + + + Fix missing temp-install prerequisites + for check-like Make targets (Noah Misch) + + + + Some non-default test procedures that are meant to work + like make check failed to ensure that the temporary + installation was up to date. + + + + + + + Update time zone data files to tzdata + release 2017c for DST law changes in Fiji, Namibia, Northern Cyprus, + Sudan, Tonga, and Turks & Caicos Islands, plus historical + corrections for Alaska, Apia, Burma, Calcutta, Detroit, Ireland, + Namibia, and Pago Pago. + + + + + + + In the documentation, restore HTML anchors to being upper-case strings + (Peter Eisentraut) + + + + Due to a toolchain change, the 10.0 user manual had lower-case strings + for intrapage anchors, thus breaking some external links into our + website documentation. Return to our previous convention of using + upper-case strings. + + + + + + + + Release 10 Release date: - 2017-??-?? (current as of 2017-08-05, commit eccead9ed) + 2017-10-05 Overview - Major enhancements in PostgreSQL 10 include: + Major enhancements in PostgreSQL 10 include: - (to be written) + Logical replication using publish/subscribe + Declarative table partitioning + Improved query parallelism + Significant general performance improvements + Stronger password authentication based on SCRAM-SHA-256 + Improved monitoring and control @@ -34,8 +5845,8 @@ Migration to Version 10 - A dump/restore using , or use of , is required for those wishing to migrate data + A dump/restore using , or use of , is required for those wishing to migrate data from any previous release. @@ -50,16 +5861,17 @@ - Hash indexes must be rebuilt after pg_upgrade-ing - from any previous major PostgreSQL version (Mithun - Cy, Robert Haas) + Hash indexes must be rebuilt after pg_upgrade-ing + from any previous major PostgreSQL version (Mithun + Cy, Robert Haas, Amit Kapila) Major hash index improvements necessitated this requirement. - pg_upgrade will create a script to assist with this. + pg_upgrade will create a script to assist with this. @@ -69,9 +5881,9 @@ 2017-03-17 [88e66d193] Rename "pg_clog" directory to "pg_xact". --> - Rename write-ahead log directory pg_xlog - to pg_wal, and rename transaction - status directory pg_clog to pg_xact + Rename write-ahead log directory pg_xlog + to pg_wal, and rename transaction + status directory pg_clog to pg_xact (Michael Paquier) @@ -92,17 +5904,17 @@ 2017-02-15 [0dfa89ba2] Replace reference to "xlog-method" with "wal-method" in --> - Rename SQL functions, tools, and options that reference - xlog to wal (Robert Haas) + Rename SQL functions, tools, and options that reference + xlog to wal (Robert Haas) - For example, pg_switch_xlog() becomes - pg_switch_wal(), pg_receivexlog - becomes pg_receivewal, and @@ -112,8 +5924,8 @@ 2017-05-11 [d10c626de] Rename WAL-related functions and views to use "lsn" not --> - Rename WAL-related functions and views to use lsn - instead of location (David Rowley) + Rename WAL-related functions and views to use lsn + instead of location (David Rowley) @@ -130,22 +5942,80 @@ --> Change the implementation of set-returning functions appearing in - a query's SELECT list (Andres Freund) + a query's SELECT list (Andres Freund) Set-returning functions are now evaluated before evaluation of scalar - expressions in the SELECT list, much as though they had - been placed in a LATERAL FROM-clause item. This allows + expressions in the SELECT list, much as though they had + been placed in a LATERAL FROM-clause item. This allows saner semantics for cases where multiple set-returning functions are present. If they return different numbers of rows, the shorter results are extended to match the longest result by adding nulls. Previously the results were cycled until they all terminated at the same time, producing a number of rows equal to the least common multiple of the functions' periods. In addition, set-returning functions are now - disallowed within CASE and COALESCE constructs. + disallowed within CASE and COALESCE constructs. For more information - see . + see . + + + + + + + Use standard row constructor syntax in UPDATE ... SET + (column_list) = row_constructor + (Tom Lane) + + + + The row_constructor can now begin with the + keyword ROW; previously that had to be omitted. + If just one column name appears in + the column_list, then + the row_constructor now must use + the ROW keyword, since otherwise it is not a valid + row constructor but just a parenthesized expression. + Also, an occurrence + of table_name.* within + the row_constructor is now expanded into + multiple columns, as occurs in other uses + of row_constructors. + + + + + + + When ALTER TABLE ... ADD PRIMARY KEY marks + columns NOT NULL, that change now propagates to + inheritance child tables as well (Michael Paquier) + + + + + + + Prevent statement-level triggers from firing more than once per + statement (Tom Lane) + + + + Cases involving writable CTEs updating the same table updated by the + containing statement, or by another writable CTE, fired BEFORE + STATEMENT or AFTER STATEMENT triggers more than once. + Also, if there were statement-level triggers on a table affected by a + foreign key enforcement action (such as ON DELETE CASCADE), + they could fire more than once per outer SQL statement. This is + contrary to the SQL standard, so change it. @@ -155,23 +6025,24 @@ 2016-11-18 [67dc4ccbb] Add pg_sequences view 2017-05-15 [f8dc1985f] Fix ALTER SEQUENCE locking 2017-06-01 [3d79013b9] Make ALTER SEQUENCE, including RESTART, fully transactio +2017-09-29 [5cc5987ce] psql: Update \d sequence display --> Move sequences' metadata fields into a new pg_sequence + linkend="catalog-pg-sequence">pg_sequence system catalog (Peter Eisentraut) A sequence relation now stores only the fields that can be modified - by nextval(), that - is last_value, log_cnt, - and is_called. Other sequence properties, such as + by nextval(), that + is last_value, log_cnt, + and is_called. Other sequence properties, such as the starting value and increment, are kept in a corresponding row of - the pg_sequence catalog. - ALTER SEQUENCE updates are now fully transactional, + the pg_sequence catalog. + ALTER SEQUENCE updates are now fully transactional, implying that the sequence is locked until commit. - The nextval() and setval() functions + The nextval() and setval() functions remain nontransactional. @@ -179,11 +6050,23 @@ The main incompatibility introduced by this change is that selecting from a sequence relation now returns only the three fields named above. To obtain the sequence's other properties, applications must - look into pg_sequence. The new system - view pg_sequences + look into pg_sequence. The new system + view pg_sequences can also be used for this purpose; it provides column names that are more compatible with existing code. + + + Also, sequences created for SERIAL columns now generate + positive 32-bit wide values, whereas previous versions generated 64-bit + wide values. This has no visible effect if the values are only stored in + a column. + + + + The output of psql's \d command for a + sequence has been redesigned, too. + @@ -191,17 +6074,17 @@ 2017-01-04 [9a4d51077] Make wal streaming the default mode for pg_basebackup --> - Make stream the - WAL needed to restore the backup by default (Magnus + Make stream the + WAL needed to restore the backup by default (Magnus Hagander) - This changes pg_basebackup's - @@ -231,18 +6114,67 @@ 2017-01-14 [05cd12ed5] pg_ctl: Change default to wait for all actions --> - Make all actions wait + Make all actions wait for completion by default (Peter Eisentraut) - Previously some pg_ctl actions didn't wait for - completion, and required the use of to do so. + + Change the default value of the + server parameter from pg_log to log + (Andreas Karlsson) + + + + + + + Add configuration option to + specify file name for custom OpenSSL DH parameters (Heikki Linnakangas) + + + + This replaces the hardcoded, undocumented file + name dh1024.pem. Note that dh1024.pem is + no longer examined by default; you must set this option if you want + to use custom DH parameters. + + + + + + + Increase the size of the default DH parameters used for OpenSSL + ephemeral DH ciphers to 2048 bits (Heikki Linnakangas) + + + + The size of the compiled-in DH parameters has been increased from + 1024 to 2048 bits, making DH key exchange more resistant to + brute-force attacks. However, some old SSL implementations, notably + some revisions of Java Runtime Environment version 6, will not accept + DH parameters longer than 1024 bits, and hence will not be able to + connect over SSL. If it's necessary to support such old clients, you + can use custom 1024-bit DH parameters instead of the compiled-in + defaults. See . + + + + + @@ -251,113 +6183,102 @@ - The server parameter - no longer supports off or plain. - The UNENCRYPTED option is no longer supported in - CREATE/ALTER USER ... PASSSWORD. Similarly, the - - Allow multi-dimensional arrays to be passed into PL/Python functions, - and returned as nested Python lists (Alexey Grishchenko, Dave Cramer, - Heikki Linnakangas) + Add + and server + parameters to control parallel queries (Amit Kapila, Robert Haas) - This feature requires a backwards-incompatible change to the handling - of arrays of composite types in PL/Python. Previously, you could - return an array of composite values by writing, e.g., [[col1, - col2], [col1, col2]]; but now that is interpreted as a - two-dimensional array. Composite types in arrays must now be written - as Python tuples, not lists, to resolve the ambiguity; that is, - write [(col1, col2), (col1, col2)] instead. + These replace min_parallel_relation_size, which was + found to be too generic. - Remove PL/Tcl's module auto-loading facility (Tom Lane) + Don't downcase unquoted text + within and related + server parameters (QL Zhuo) - This functionality has been replaced by new server - parameters - and , which are easier to use - and more similar to features available in other PLs. + These settings are really lists of file names, but they were + previously treated as lists of SQL identifiers, which have different + parsing rules. - - Change the default value of the - server parameter from pg_log to log - (Andreas Karlsson) - - - - - - Remove sql_inheritance server parameter (Robert Haas) + Remove sql_inheritance server parameter (Robert Haas) Changing this setting from the default value caused queries referencing - parent tables to not include child tables. The SQL - standard requires such behavior and this has been the default since - PostgreSQL 7.1. + parent tables to not include child tables. The SQL + standard requires them to be included, however, and this has been the + default since PostgreSQL 7.1. - Add - and server - parameters to control parallel queries (Amit Kapila, Robert Haas) + Allow multi-dimensional arrays to be passed into PL/Python functions, + and returned as nested Python lists (Alexey Grishchenko, Dave Cramer, + Heikki Linnakangas) - These replace min_parallel_relation_size, which was - found to be too generic. + This feature requires a backwards-incompatible change to the handling + of arrays of composite types in PL/Python. Previously, you could + return an array of composite values by writing, e.g., [[col1, + col2], [col1, col2]]; but now that is interpreted as a + two-dimensional array. Composite types in arrays must now be written + as Python tuples, not lists, to resolve the ambiguity; that is, + write [(col1, col2), (col1, col2)] instead. - Don't downcase unquoted text - within and related - server parameters (QL Zhuo) + Remove PL/Tcl's module auto-loading facility (Tom Lane) - - - These settings are really lists of file names, but they were - previously treated as lists of SQL identifiers, which have different - parsing rules. + + + This functionality has been replaced by new server + parameters + and , which are easier to use + and more similar to features available in other PLs. @@ -366,13 +6287,13 @@ 2016-10-12 [64f3524e2] Remove pg_dump/pg_dumpall support for dumping from pre-8 --> - Remove pg_dump/pg_dumpall support + Remove pg_dump/pg_dumpall support for dumping from pre-8.0 servers (Tom Lane) Users needing to dump from pre-8.0 servers will need to use dump - programs from PostgreSQL 9.6 or earlier. The + programs from PostgreSQL 9.6 or earlier. The resulting output should still load successfully into newer servers. @@ -386,9 +6307,9 @@ - This removes configure's @@ -397,61 +6318,12 @@ 2016-10-11 [2f1eaf87e] Drop server support for FE/BE protocol version 1.0. --> - Remove support for client/server protocol version 1.0 (Tom Lane) + Remove server support for client/server protocol version 1.0 (Tom Lane) This protocol hasn't had client support - since PostgreSQL 6.3. - - - - - - - Add configuration option to - specify file name for custom OpenSSL DH parameters (Heikki Linnakangas) - - - - This replaces the hardcoded, undocumented file - name dh1024.pem. Note that dh1024.pem is - no longer examined by default; you must set this option to use custom - DH parameters. - - - - - - - Increase the size of the default DH parameters used for OpenSSL - ephemeral DH ciphers to 2048 bits (Heikki Linnakangas) - - - - The size of the compiled-in DH parameters has been increased from - 1024 to 2048 bits, making DH key exchange more resistant to - brute-force attacks. However, some old SSL implementations, notably - some revisions of Java Runtime Environment version 6, will not accept - DH parameters longer than 1024 bits, and hence will not be able to - connect over SSL. If it's necessary to support such old clients, you - can use custom 1024-bit DH parameters instead of the compiled-in - defaults. See . - - - - - - - When ALTER TABLE ... ADD PRIMARY KEY marks - columns NOT NULL, that change now propagates to - inheritance child tables as well (Michael Paquier) + since PostgreSQL 6.3. @@ -460,12 +6332,12 @@ 2017-02-13 [7ada2d31f] Remove contrib/tsearch2. --> - Remove contrib/tsearch2 module (Robert Haas) + Remove contrib/tsearch2 module (Robert Haas) This module provided compatibility with the version of full text - search that shipped in pre-8.3 PostgreSQL releases. + search that shipped in pre-8.3 PostgreSQL releases. @@ -474,14 +6346,14 @@ 2017-03-23 [50c956add] Remove createlang and droplang --> - Remove createlang and droplang + Remove createlang and droplang command-line applications (Peter Eisentraut) - These were deprecated since PostgreSQL 9.1. Instead, - use CREATE EXTENSION and DROP EXTENSION - directly. + These had been deprecated since PostgreSQL 9.1. + Instead, use CREATE EXTENSION and DROP + EXTENSION directly. @@ -597,18 +6469,29 @@ 2016-12-05 [2b959d495] Reduce the default for max_worker_processes back to 8. --> - Add server parameter + Add server parameter to limit the number of worker processes that can be used for query parallelism (Julien Rouhaud) This parameter can be set lower than to reserve worker processes + linkend="guc-max-worker-processes"/> to reserve worker processes for purposes other than parallel queries. + + + + Enable parallelism by default by changing the default setting + of to + 2. + + + @@ -620,25 +6503,41 @@ - Add SP-GiST index support for INET and - CIDR data types (Emre Hasegeli) + Add write-ahead logging support to hash indexes (Amit Kapila) - These data types already had GiST support. + This makes hash indexes crash-safe and replicatable. + The former warning message about their use is removed. - Reduce page locking during vacuuming of GIN indexes - (Andrey Borodin) + Improve hash index performance (Amit Kapila, Mithun Cy, Ashutosh + Sharma) + + + + + + + Add SP-GiST index support for INET and + CIDR data types (Emre Hasegeli) @@ -647,14 +6546,14 @@ 2017-04-01 [7526e1022] BRIN auto-summarization --> - Add option to allow BRIN index summarization to happen + Add option to allow BRIN index summarization to happen more aggressively (Álvaro Herrera) - Specifically, a new CREATE - INDEX option allows auto-summarization of the - previous BRIN page range when a new page + A new CREATE + INDEX option enables auto-summarization of the + previous BRIN page range when a new page range is created. @@ -664,18 +6563,18 @@ 2017-04-01 [c655899ba] BRIN de-summarization --> - Add functions to remove and re-add BRIN - summarization for BRIN index ranges (Álvaro + Add functions to remove and re-add BRIN + summarization for BRIN index ranges (Álvaro Herrera) - The new SQL function brin_summarize_range() - updates BRIN index summarization for a specified - range and brin_desummarize_range() removes it. + The new SQL function brin_summarize_range() + updates BRIN index summarization for a specified + range and brin_desummarize_range() removes it. This is helpful to update summarization of a range that is now - smaller due to UPDATEs and DELETEs. + smaller due to UPDATEs and DELETEs. @@ -684,7 +6583,7 @@ 2017-04-06 [7e534adcd] Fix BRIN cost estimation --> - Improve accuracy in determining if a BRIN index scan + Improve accuracy in determining if a BRIN index scan is beneficial (David Rowley, Emre Hasegeli) @@ -694,70 +6593,22 @@ 2016-09-09 [b1328d78f] Invent PageIndexTupleOverwrite, and teach BRIN and GiST --> - Allow faster GiST inserts and updates by reusing + Allow faster GiST inserts and updates by reusing index space more efficiently (Andrey Borodin) - - - - <link linkend="indexes-types">Hash Indexes</link> - - - - - - - Add write-ahead logging support to hash indexes (Amit Kapila) - - - - This makes hash indexes crash-safe and replicatable. - The former warning message about their use is removed. - - - - - - - Improve hash index bucket split performance by reducing locking - requirements (Amit Kapila, Mithun Cy) - - - - Also cache hash index meta-information for faster lookups. - - - - - - - Improve efficiency of hash index growth (Amit Kapila, Mithun Cy) - - - - + - - Allow page-at-a-time hash index pruning (Ashutosh Sharma) - - - - + + Reduce page locking during vacuuming of GIN indexes + (Andrey Borodin) + + - + @@ -769,23 +6620,6 @@ - - Reduce locking required for adding values to enum types (Andrew - Dunstan, Tom Lane) - - - - Previously it was impossible to run ALTER TYPE ... ADD - VALUE in a transaction block unless the enum type was created - in the same block. Now, only references to uncommitted enum - values from other transactions are prohibited. - - - - - @@ -796,7 +6630,7 @@ For example, changing a table's setting can now be done + linkend="guc-effective-io-concurrency"/> setting can now be done with a more lightweight lock. @@ -813,8 +6647,8 @@ Lock promotion can now be controlled through two new server parameters, and - . + linkend="guc-max-pred-locks-per-relation"/> and + . @@ -841,9 +6675,9 @@ New commands are CREATE STATISTICS, - ALTER STATISTICS, and - DROP STATISTICS. + linkend="sql-createstatistics">CREATE STATISTICS, + ALTER STATISTICS, and + DROP STATISTICS. This feature is helpful in estimating query memory usage and when combining the statistics from individual columns. @@ -880,9 +6714,9 @@ --> Speed up aggregate functions that calculate a running sum - using numeric-type arithmetic, including some variants - of SUM(), AVG(), - and STDDEV() (Heikki Linnakangas) + using numeric-type arithmetic, including some variants + of SUM(), AVG(), + and STDDEV() (Heikki Linnakangas) @@ -966,41 +6800,41 @@ --> Allow explicit control - over EXPLAIN's display + over EXPLAIN's display of planning and execution time (Ashutosh Bapat) By default planning and execution time are displayed by - EXPLAIN ANALYZE and are not displayed in other cases. - The new EXPLAIN option SUMMARY allows + EXPLAIN ANALYZE and are not displayed in other cases. + The new EXPLAIN option SUMMARY allows explicit control of this. - Properly update the statistics collector during REFRESH MATERIALIZED - VIEW (Jim Mlodgenski) + Add default monitoring roles (Dave Page) + + + + New roles pg_monitor, pg_read_all_settings, + pg_read_all_stats, and pg_stat_scan_tables + allow simplified permission configuration. - Add default monitoring roles (Dave Page) - - - - New roles pg_monitor, pg_read_all_settings, - pg_read_all_stats, and pg_stat_scan_tables - allow simplified permission configuration. + Properly update the statistics collector during REFRESH MATERIALIZED + VIEW (Jim Mlodgenski) @@ -1016,7 +6850,7 @@ 2016-10-17 [7d3235ba4] By default, set log_line_prefix = '%m [%p] '. --> - Change the default value of + Change the default value of to include current timestamp (with milliseconds) and the process ID in each line of postmaster log output (Christoph Berg) @@ -1031,14 +6865,14 @@ 2017-03-16 [befd73c50] Add pg_ls_logdir() and pg_ls_waldir() functions. --> - Add functions to return the log and WAL directory + Add functions to return the log and WAL directory contents (Dave Page) The new functions - are pg_ls_logdir() - and pg_ls_waldir() + are pg_ls_logdir() + and pg_ls_waldir() and can be executed by non-superusers with the proper permissions. @@ -1050,7 +6884,7 @@ --> Add function pg_current_logfile() + linkend="functions-info-session-table">pg_current_logfile() to read logging collector's current stderr and csvlog output file names (Gilles Darold) @@ -1082,7 +6916,7 @@ - These are now DEBUG1-level messages. + These are now DEBUG1-level messages. @@ -1093,12 +6927,12 @@ Reduce message verbosity of lower-numbered debug levels controlled by - (Robert Haas) + (Robert Haas) This also changes the verbosity of debug levels. + linkend="guc-client-min-messages"/> debug levels. @@ -1107,32 +6941,24 @@ - <link linkend="pg-stat-activity-view"><structname>pg_stat_activity</></link> + <link linkend="pg-stat-activity-view"><structname>pg_stat_activity</structname></link> - Add pg_stat_activity reporting of latch wait states - (Michael Paquier, Robert Haas) + Add pg_stat_activity reporting of low-level wait + states (Michael Paquier, Robert Haas, Rushabh Lathia) - This includes the remaining wait events, like client reads, - client writes, and synchronous replication. - - - - - - - Add pg_stat_activity reporting of waits on reads, - writes, and fsyncs (Rushabh Lathia) + This change enables reporting of numerous low-level wait conditions, + including latch waits, file reads/writes/fsyncs, client reads/writes, + and synchronous replication. @@ -1143,13 +6969,13 @@ --> Show auxiliary processes, background workers, and walsender - processes in pg_stat_activity (Kuntal Ghosh, + processes in pg_stat_activity (Kuntal Ghosh, Michael Paquier) This simplifies monitoring. A new - column backend_type identifies the process type. + column backend_type identifies the process type. @@ -1158,7 +6984,7 @@ 2017-02-22 [4c728f382] Pass the source text for a parallel query to the workers --> - Allow pg_stat_activity to show the SQL query + Allow pg_stat_activity to show the SQL query being executed by parallel workers (Rafia Sabih) @@ -1169,9 +6995,9 @@ --> Rename - pg_stat_activity.wait_event_type - values LWLockTranche and - LWLockNamed to LWLock (Robert Haas) + pg_stat_activity.wait_event_type + values LWLockTranche and + LWLockNamed to LWLock (Robert Haas) @@ -1185,7 +7011,7 @@ - <acronym>Authentication</> + <acronym>Authentication</acronym> @@ -1197,13 +7023,13 @@ 2017-04-18 [c727f120f] Rename "scram" to "scram-sha-256" in pg_hba.conf and pas --> - Add SCRAM-SHA-256 + Add SCRAM-SHA-256 support for password negotiation and storage (Michael Paquier, Heikki Linnakangas) - This provides better security than the existing md5 + This provides better security than the existing md5 negotiation and storage method. @@ -1213,8 +7039,8 @@ 2016-09-28 [babe05bc2] Turn password_encryption GUC into an enum. --> - Change the server parameter - from boolean to enum (Michael Paquier) + Change the server parameter + from boolean to enum (Michael Paquier) @@ -1228,8 +7054,8 @@ --> Add view pg_hba_file_rules - to display the contents of pg_hba.conf (Haribabu + linkend="view-pg-hba-file-rules">pg_hba_file_rules + to display the contents of pg_hba.conf (Haribabu Kommi) @@ -1243,11 +7069,11 @@ 2017-03-22 [6b76f1bb5] Support multiple RADIUS servers --> - Support multiple RADIUS servers (Magnus Hagander) + Support multiple RADIUS servers (Magnus Hagander) - All the RADIUS related parameters are now plural and + All the RADIUS related parameters are now plural and support a comma-separated list of servers. @@ -1268,16 +7094,16 @@ 2017-01-04 [6667d9a6d] Re-allow SSL passphrase prompt at server start, but not --> - Allow SSL configuration to be updated during + Allow SSL configuration to be updated during configuration reload (Andreas Karlsson, Tom Lane) - This allows SSL to be reconfigured without a server - restart, by using pg_ctl reload, SELECT - pg_reload_conf(), or sending a SIGHUP signal. - However, reloading the SSL configuration does not work - if the server's SSL key requires a passphrase, as there + This allows SSL to be reconfigured without a server + restart, by using pg_ctl reload, SELECT + pg_reload_conf(), or sending a SIGHUP signal. + However, reloading the SSL configuration does not work + if the server's SSL key requires a passphrase, as there is no way to re-prompt for the passphrase. The original configuration will apply for the life of the postmaster in that case. @@ -1290,7 +7116,7 @@ --> Make the maximum value of effectively unlimited + linkend="guc-bgwriter-lru-maxpages"/> effectively unlimited (Jim Nasby) @@ -1309,8 +7135,8 @@ 2017-03-27 [1b02be21f] Fsync directory after creating or unlinking file. --> - Perform an fsync on the directory after creating or unlinking files - (Michael Paquier) + After creating or unlinking files, perform an fsync on their parent + directory (Michael Paquier) @@ -1321,7 +7147,7 @@ - <link linkend="wal">Write-Ahead Log</> (<acronym>WAL</>) + <link linkend="wal">Write-Ahead Log</link> (<acronym>WAL</acronym>) @@ -1330,7 +7156,7 @@ 2016-12-22 [6ef2eba3f] Skip checkpoints, archiving on idle systems. --> - Prevent unnecessary checkpoints and WAL archiving on + Prevent unnecessary checkpoints and WAL archiving on otherwise-idle systems (Michael Paquier) @@ -1341,8 +7167,8 @@ 2017-03-14 [bb4a39637] hash: Support WAL consistency checking. --> - Add server parameter - to add details to WAL that can be sanity-checked on + Add server parameter + to add details to WAL that can be sanity-checked on the standby (Kuntal Ghosh, Robert Haas) @@ -1356,14 +7182,14 @@ 2017-04-05 [00b6b6feb] Allow -\-with-wal-segsize=n up to n=1024MB --> - Increase the maximum configurable WAL segment size + Increase the maximum configurable WAL segment size to one gigabyte (Beena Emerson) - Larger WAL segment sizes allows for fewer - invocations and fewer - WAL files to manage. + A larger WAL segment size allows for fewer + invocations and fewer + WAL files to manage. @@ -1388,13 +7214,13 @@ --> Add the ability to logically - replicate tables to standby servers (Petr Jelinek) + replicate tables to standby servers (Petr Jelinek) Logical replication allows more flexibility than physical replication does, including replication between different major - versions of PostgreSQL and selective-table + versions of PostgreSQL and selective replication. @@ -1404,15 +7230,15 @@ 2016-12-19 [3901fd70c] Support quorum-based synchronous replication. --> - Allow waiting for commit acknowledgement from standby + Allow waiting for commit acknowledgment from standby servers irrespective of the order they appear in (Masahiko Sawada) + linkend="guc-synchronous-standby-names"/> (Masahiko Sawada) Previously the server always waited for the active standbys that - appeared first in synchronous_standby_names. The new - synchronous_standby_names keyword ANY allows + appeared first in synchronous_standby_names. The new + synchronous_standby_names keyword ANY allows waiting for any number of standbys irrespective of their ordering. This is known as quorum commit. @@ -1430,9 +7256,9 @@ Specifically, the defaults were changed for , , - , and to make them suitable for these usages + linkend="guc-wal-level"/>, , + , and to make them suitable for these usages out-of-the-box. @@ -1443,14 +7269,14 @@ --> Enable replication from localhost connections by default in - pg_hba.conf + pg_hba.conf (Michael Paquier) - Previously pg_hba.conf's replication connection - lines were commented out. This is particularly useful for - . + Previously pg_hba.conf's replication connection + lines were commented out by default. This is particularly useful for + . @@ -1460,13 +7286,13 @@ --> Add columns to pg_stat_replication + linkend="pg-stat-replication-view">pg_stat_replication to report replication delay times (Thomas Munro) - The new columns are write_lag, - flush_lag, and replay_lag. + The new columns are write_lag, + flush_lag, and replay_lag. @@ -1476,8 +7302,8 @@ --> Allow specification of the recovery stopping point by Log Sequence - Number (LSN) in - recovery.conf + Number (LSN) in + recovery.conf (Michael Paquier) @@ -1490,15 +7316,16 @@ Allow users to disable pg_stop_backup()'s - waiting for all WAL to be archived (David Steele) + linkend="functions-admin">pg_stop_backup()'s + waiting for all WAL to be archived (David Steele) - An optional second argument to pg_stop_backup() + An optional second argument to pg_stop_backup() controls that behavior. @@ -1509,7 +7336,7 @@ --> Allow creation of temporary replication slots + linkend="functions-replication-table">temporary replication slots (Petr Jelinek) @@ -1553,45 +7380,25 @@ --> Add XMLTABLE - function that converts XML-formatted data into a row set + linkend="functions-xml-processing-xmltable">XMLTABLE + function that converts XML-formatted data into a row set (Pavel Stehule, Álvaro Herrera) - - Allow standard row constructor syntax in UPDATE ... SET - (column_list) = row_constructor - (Tom Lane) - - - - The row_constructor can now begin with the - keyword ROW; previously that had to be omitted. Also, - an occurrence of table_name.* - within the row_constructor is now expanded into - multiple columns, as in other uses - of row_constructors. - - - - - Fix regular expressions' character class handling for large character - codes, particularly Unicode characters above U+7FF + codes, particularly Unicode characters above U+7FF (Tom Lane) Previously, such characters were never recognized as belonging to - locale-dependent character classes such as [[:alpha:]]. + locale-dependent character classes such as [[:alpha:]]. @@ -1609,8 +7416,8 @@ 2016-12-07 [f0e44751d] Implement table partitioning. --> - Add table partitioning - syntax that automatically creates partition constraints and + Add table partitioning + syntax that automatically creates partition constraints and handles routing of tuple insertions and updates (Amit Langote) @@ -1626,8 +7433,9 @@ 2017-03-31 [597027163] Add transition table support to plpgsql. --> - Add AFTER trigger - transition tables to record changed rows (Kevin Grittner) + Add AFTER trigger + transition tables to record changed rows (Kevin Grittner, Thomas + Munro) @@ -1641,13 +7449,13 @@ 2016-12-05 [093129c9d] Add support for restrictive RLS policies --> - Allow restrictive row-level - security policies (Stephen Frost) + Allow restrictive row-level + security policies (Stephen Frost) Previously all security policies were permissive, meaning that any - matching policy allowed access. Optional restrictive policies must + matching policy allowed access. A restrictive policy must match for access to be granted. These policy types can be combined. @@ -1658,16 +7466,16 @@ --> When creating a foreign-key constraint, check - for REFERENCES permission on only the referenced table + for REFERENCES permission on only the referenced table (Tom Lane) - Previously REFERENCES permission on the referencing + Previously REFERENCES permission on the referencing table was also required. This appears to have stemmed from a misreading of the SQL standard. Since creating a foreign key (or any other type of) constraint requires ownership privilege on the - constrained table, additionally requiring REFERENCES + constrained table, additionally requiring REFERENCES permission seems rather pointless. @@ -1677,12 +7485,12 @@ 2017-03-28 [ab89e465c] Altering default privileges on schemas --> - Allow default - permissions on schemas (Matheus Oliveira) + Allow default + permissions on schemas (Matheus Oliveira) - This is done using the ALTER DEFAULT PRIVILEGES command. + This is done using the ALTER DEFAULT PRIVILEGES command. @@ -1691,8 +7499,8 @@ 2017-02-10 [2ea5b06c7] Add CREATE SEQUENCE AS clause --> - Add CREATE SEQUENCE - AS command to create a sequence matching an integer data type + Add CREATE SEQUENCE + AS command to create a sequence matching an integer data type (Peter Eisentraut) @@ -1707,13 +7515,13 @@ 2016-11-10 [279c439c7] Support "COPY view FROM" for views with INSTEAD OF INSER --> - Allow COPY view - FROM source on views with INSTEAD - INSERT triggers (Haribabu Kommi) + Allow COPY view + FROM source on views with INSTEAD + INSERT triggers (Haribabu Kommi) - The triggers are fed the data rows read by COPY. + The triggers are fed the data rows read by COPY. @@ -1723,14 +7531,14 @@ --> Allow the specification of a function name without arguments in - DDL commands, if it is unique (Peter Eisentraut) + DDL commands, if it is unique (Peter Eisentraut) - For example, allow DROP - FUNCTION on a function name without arguments if there + For example, allow DROP + FUNCTION on a function name without arguments if there is only one function with that name. This behavior is required by the - SQL standard. + SQL standard. @@ -1740,7 +7548,7 @@ --> Allow multiple functions, operators, and aggregates to be dropped - with a single DROP command (Peter Eisentraut) + with a single DROP command (Peter Eisentraut) @@ -1750,10 +7558,10 @@ 2017-03-20 [b6fb534f1] Add IF NOT EXISTS for CREATE SERVER and CREATE USER MAPP --> - Support IF NOT EXISTS - in CREATE SERVER, - CREATE USER MAPPING, - and CREATE COLLATION + Support IF NOT EXISTS + in CREATE SERVER, + CREATE USER MAPPING, + and CREATE COLLATION (Anastasia Lubennikova, Peter Eisentraut) @@ -1764,14 +7572,14 @@ 2017-03-03 [9eb344faf] Allow vacuums to report oldestxmin --> - Make VACUUM VERBOSE report + Make VACUUM VERBOSE report the number of skipped frozen pages and oldest xmin (Masahiko Sawada, Simon Riggs) This information is also included in output. + linkend="guc-log-autovacuum-min-duration"/> output. @@ -1780,7 +7588,7 @@ 2017-01-23 [7e26e02ee] Prefetch blocks during lazy vacuum's truncation scan --> - Improve speed of VACUUM's removal of trailing empty + Improve speed of VACUUM's removal of trailing empty heap pages (Claudio Freire, Álvaro Herrera) @@ -1799,13 +7607,13 @@ 2017-03-31 [e306df7f9] Full Text Search support for JSON and JSONB --> - Add full text search support for JSON and JSONB + Add full text search support for JSON and JSONB (Dmitry Dolgov) - The functions ts_headline() and - to_tsvector() can now be used on these data types. + The functions ts_headline() and + to_tsvector() can now be used on these data types. @@ -1814,15 +7622,15 @@ 2017-03-15 [c7a9fa399] Add support for EUI-64 MAC addresses as macaddr8 --> - Add support for EUI-64 MAC addresses, as a - new data type macaddr8 + Add support for EUI-64 MAC addresses, as a + new data type macaddr8 (Haribabu Kommi) This complements the existing support - for EUI-48 MAC addresses - as macaddr. + for EUI-48 MAC addresses + (type macaddr). @@ -1831,13 +7639,13 @@ 2017-04-06 [321732705] Identity columns --> - Add identity columns for + Add identity columns for assigning a numeric value to columns on insert (Peter Eisentraut) - These are similar to SERIAL columns, but are - SQL standard compliant. + These are similar to SERIAL columns, but are + SQL standard compliant. @@ -1846,13 +7654,13 @@ 2016-09-07 [0ab9c56d0] Support renaming an existing value of an enum type. --> - Allow ENUM values to be + Allow ENUM values to be renamed (Dagfinn Ilmari Mannsåker) - This uses the syntax ALTER - TYPE ... RENAME VALUE. + This uses the syntax ALTER + TYPE ... RENAME VALUE. @@ -1862,14 +7670,14 @@ --> Properly treat array pseudotypes - (anyarray) as arrays in to_json() - and to_jsonb() (Andrew Dunstan) + (anyarray) as arrays in to_json() + and to_jsonb() (Andrew Dunstan) - Previously columns declared as anyarray (particularly those - in the pg_stats view) were converted to JSON + Previously columns declared as anyarray (particularly those + in the pg_stats view) were converted to JSON strings rather than arrays. @@ -1880,16 +7688,16 @@ --> Add operators for multiplication and division - of money values - with int8 values (Peter Eisentraut) + of money values + with int8 values (Peter Eisentraut) - Previously such cases would result in converting the int8 - values to float8 and then using - the money-and-float8 operators. The new behavior + Previously such cases would result in converting the int8 + values to float8 and then using + the money-and-float8 operators. The new behavior avoids possible precision loss. But note that division - of money by int8 now truncates the quotient, like + of money by int8 now truncates the quotient, like other integer-division cases, while the previous behavior would have rounded. @@ -1900,7 +7708,7 @@ 2016-09-14 [656df624c] Add overflow checks to money type input function --> - Check for overflow in the money type's input function + Check for overflow in the money type's input function (Peter Eisentraut) @@ -1920,12 +7728,12 @@ --> Add simplified regexp_match() + linkend="functions-posix-regexp">regexp_match() function (Emre Hasegeli) - This is similar to regexp_matches(), but it only + This is similar to regexp_matches(), but it only returns results from the first match so it does not need to return a set, making it easier to use for simple cases. @@ -1936,8 +7744,8 @@ 2017-01-18 [d00ca333c] Implement array version of jsonb_delete and operator --> - Add a version of jsonb's delete operator that takes + Add a version of jsonb's delete operator that takes an array of keys to delete (Magnus Hagander) @@ -1947,7 +7755,7 @@ 2017-04-06 [cf35346e8] Make json_populate_record and friends operate recursivel --> - Make json_populate_record() + Make json_populate_record() and related functions process JSON arrays and objects recursively (Nikita Glukhov) @@ -1957,7 +7765,7 @@ properly converted from JSON arrays, and composite-type fields are properly converted from JSON objects. Previously, such cases would fail because the text representation of the JSON value would be fed - to array_in() or record_in(), and its + to array_in() or record_in(), and its syntax would not match what those input functions expect. @@ -1968,14 +7776,14 @@ --> Add function txid_current_ifassigned() - to return the current transaction ID or NULL if no + linkend="functions-txid-snapshot">txid_current_if_assigned() + to return the current transaction ID or NULL if no transaction ID has been assigned (Craig Ringer) This is different from txid_current(), + linkend="functions-txid-snapshot">txid_current(), which always returns a transaction ID, assigning one if necessary. Unlike that function, this function can be run on standby servers. @@ -1987,14 +7795,14 @@ --> Add function txid_status() + linkend="functions-txid-snapshot">txid_status() to check if a transaction was committed (Craig Ringer) This is useful for checking after an abrupt disconnection whether your previous transaction committed and you just didn't receive - the acknowledgement. + the acknowledgment. @@ -2004,8 +7812,8 @@ --> Allow make_date() - to interpret negative years as BC years (Álvaro + linkend="functions-datetime-table">make_date() + to interpret negative years as BC years (Álvaro Herrera) @@ -2015,14 +7823,16 @@ 2016-09-28 [d3cd36a13] Make to_timestamp() and to_date() range-check fields of --> - Make to_timestamp() and to_date() reject + Make to_timestamp() + and to_date() reject out-of-range input fields (Artur Zakirov) For example, - previously to_date('2009-06-40','YYYY-MM-DD') was - accepted and returned 2009-07-10. It will now generate + previously to_date('2009-06-40','YYYY-MM-DD') was + accepted and returned 2009-07-10. It will now generate an error. @@ -2041,7 +7851,7 @@ 2017-03-27 [70ec3f1f8] PL/Python: Add cursor and execute methods to plan object --> - Allow PL/Python's cursor() and execute() + Allow PL/Python's cursor() and execute() functions to be called as methods of their plan-object arguments (Peter Eisentraut) @@ -2056,7 +7866,7 @@ 2016-12-13 [55caaaeba] Improve handling of array elements as getdiag_targets an --> - Allow PL/pgSQL's GET DIAGNOSTICS statement to retrieve + Allow PL/pgSQL's GET DIAGNOSTICS statement to retrieve values into array elements (Tom Lane) @@ -2069,7 +7879,7 @@ - <link linkend="pltcl">PL/Tcl</> + <link linkend="pltcl">PL/Tcl</link> @@ -2079,7 +7889,7 @@ --> Allow PL/Tcl functions to return composite types and sets - (Jim Nasby) + (Karl Lehenbauer) @@ -2102,8 +7912,8 @@ 2017-03-07 [0d2b1f305] Invent start_proc parameters for PL/Tcl. --> - Add server parameters - and , to allow initialization + Add server parameters + and , to allow initialization functions to be called on PL/Tcl startup (Tom Lane) @@ -2126,7 +7936,7 @@ --> Allow specification of multiple - host names or addresses in libpq connection strings and URIs + host names or addresses in libpq connection strings and URIs (Robert Haas, Heikki Linnakangas) @@ -2141,7 +7951,7 @@ --> Allow libpq connection strings and URIs to request a read/write host, + linkend="libpq-connect-target-session-attrs">read/write host, that is a master server rather than a standby server (Victor Wagner, Mithun Cy) @@ -2149,7 +7959,7 @@ This is useful when multiple host names are specified. It is controlled by libpq connection parameter - . @@ -2158,7 +7968,7 @@ 2017-01-24 [ba005f193] Allow password file name to be specified as a libpq conn --> - Allow the password file name + Allow the password file name to be specified as a libpq connection parameter (Julian Markwort) @@ -2173,17 +7983,17 @@ --> Add function PQencryptPasswordConn() + linkend="libpq-pqencryptpasswordconn">PQencryptPasswordConn() to allow creation of more types of encrypted passwords on the client side (Michael Paquier, Heikki Linnakangas) - Previously only MD5-encrypted passwords could be created + Previously only MD5-encrypted passwords could be created using PQencryptPassword(). + linkend="libpq-pqencryptpassword">PQencryptPassword(). This new function can also create SCRAM-SHA-256-encrypted + linkend="auth-pg-hba-conf">SCRAM-SHA-256-encrypted passwords. @@ -2193,13 +8003,13 @@ 2016-08-16 [a7b5573d6] Remove separate version numbering for ecpg preprocessor. --> - Change ecpg preprocessor version from 4.12 to 10 + Change ecpg preprocessor version from 4.12 to 10 (Tom Lane) - Henceforth the ecpg version will match - the PostgreSQL distribution version number. + Henceforth the ecpg version will match + the PostgreSQL distribution version number. @@ -2211,7 +8021,7 @@ Client Applications - <xref linkend="APP-PSQL"> + <xref linkend="app-psql"/> @@ -2222,14 +8032,14 @@ 2017-04-02 [68dba97a4] Document psql's behavior of recalling the previously exe --> - Add conditional branch support to psql (Corey + Add conditional branch support to psql (Corey Huinker) - This feature adds psql - meta-commands \if, \elif, \else, - and \endif. This is primarily helpful for scripting. + This feature adds psql + meta-commands \if, \elif, \else, + and \endif. This is primarily helpful for scripting. @@ -2238,39 +8048,23 @@ 2017-03-07 [b2678efd4] psql: Add \gx command --> - Add psql \gx meta-command to execute - (\g) a query in expanded mode (\x) + Add psql \gx meta-command to execute + (\g) a query in expanded mode (\x) (Christoph Berg) - - Improve psql's \d (display relation) - and \dD (display domain) commands to show collation, - nullable, and default properties in separate columns (Peter - Eisentraut) - - - - Previous they were shown in a single Modifiers column. - - - - - - Expand psql variable references in + Expand psql variable references in backtick-executed strings (Tom Lane) - This is particularly useful in the new psql + This is particularly useful in the new psql conditional branch commands. @@ -2282,23 +8076,64 @@ 2017-02-02 [fd6cd6980] Clean up psql's behavior for a few more control variable --> - Prevent psql's special variables from being set to + Prevent psql's special variables from being set to invalid values (Daniel Vérité, Tom Lane) - Previously, setting one of psql's special variables + Previously, setting one of psql's special variables to an invalid value silently resulted in the default behavior. - \set on a special variable now fails if the proposed - new value is invalid. As a special exception, \set + \set on a special variable now fails if the proposed + new value is invalid. As a special exception, \set with an empty or omitted new value, on a boolean-valued special variable, still has the effect of setting the variable - to on; but now it actually acquires that value rather - than an empty string. \unset on a special variable now + to on; but now it actually acquires that value rather + than an empty string. \unset on a special variable now explicitly sets the variable to its default value, which is also the value it acquires at startup. In sum, a control variable now always has a displayable value that reflects - what psql is actually doing. + what psql is actually doing. + + + + + + + Add variables showing server version and psql version + (Fabien Coelho) + + + + + + + Improve psql's \d (display relation) + and \dD (display domain) commands to show collation, + nullable, and default properties in separate columns (Peter + Eisentraut) + + + + Previously they were shown in a single Modifiers column. + + + + + + + Make the various \d commands handle no-matching-object + cases more consistently (Daniel Gustafsson) + + + + They now all print the message about that to stderr, not stdout, + and the message wording is more consistent. @@ -2316,7 +8151,7 @@ 2017-03-16 [d7d77f382] psql: Add completion for \help DROP|ALTER --> - Improve psql's tab completion (Jeff Janes, + Improve psql's tab completion (Jeff Janes, Ian Barwick, Andreas Karlsson, Sehrope Sarkuni, Thomas Munro, Kevin Grittner, Dagfinn Ilmari Mannsåker) @@ -2327,7 +8162,7 @@ - <xref linkend="pgbench"> + <xref linkend="pgbench"/> @@ -2336,7 +8171,7 @@ 2016-11-09 [41124a91e] pgbench: Allow the transaction log file prefix to be cha --> - Add pgbench option to control the log file prefix (Masahiko Sawada) @@ -2346,7 +8181,7 @@ 2017-01-20 [cdc2a7047] Allow backslash line continuations in pgbench's meta com --> - Allow pgbench's meta-commands to span multiple + Allow pgbench's meta-commands to span multiple lines (Fabien Coelho) @@ -2356,6 +8191,16 @@ + + + + Remove restriction on placement of option relative to + other command line options (Tom Lane) + + + @@ -2373,8 +8218,8 @@ --> Add pg_receivewal - option / to specify compression (Michael Paquier) @@ -2385,12 +8230,12 @@ --> Add pg_recvlogical option - to specify the ending position (Craig Ringer) - This complements the existing option. @@ -2399,9 +8244,9 @@ 2016-10-19 [5d58c07a4] initdb pg_basebackup: Rename -\-noxxx options to -\-no-x --> - Rename initdb - options and to be spelled + and (Vik Fearing, Peter Eisentraut) @@ -2413,9 +8258,9 @@ - <link linkend="APP-PGDUMP"><application>pg_dump</></>, - <link linkend="APP-PG-DUMPALL"><application>pg_dumpall</></>, - <link linkend="APP-PGRESTORE"><application>pg_restore</></> + <link linkend="app-pgdump"><application>pg_dump</application></link>, + <link linkend="app-pg-dumpall"><application>pg_dumpall</application></link>, + <link linkend="app-pgrestore"><application>pg_restore</application></link> @@ -2424,11 +8269,11 @@ 2016-09-20 [46b55e7f8] pg_restore: Add -N option to exclude schemas --> - Allow pg_restore to exclude schemas (Michael Banck) + Allow pg_restore to exclude schemas (Michael Banck) - This adds a new / option. @@ -2437,8 +8282,8 @@ 2016-11-29 [4fafa579b] Add -\-no-blobs option to pg_dump --> - Add @@ -2451,31 +8296,41 @@ 2017-03-07 [9a83d56b3] Allow pg_dumpall to dump roles w/o user passwords --> - Add pg_dumpall option - to omit role passwords (Robins Tharakan, Simon Riggs) - This allows use of pg_dumpall by non-superusers; + This allows use of pg_dumpall by non-superusers; without this option, it fails due to inability to read passwords. + + Support using synchronized snapshots when dumping from a standby + server (Petr Jelinek) + + + + + - Issue fsync() on the output files generated by - pg_dump and - pg_dumpall (Michael Paquier) + Issue fsync() on the output files generated by + pg_dump and + pg_dumpall (Michael Paquier) This provides more security that the output is safely stored on disk before the program exits. This can be disabled with - the new option. @@ -2485,7 +8340,7 @@ - <xref linkend="app-pgbasebackup"> + <xref linkend="app-pgbasebackup"/> @@ -2495,12 +8350,12 @@ 2016-12-21 [ecbdc4c55] Forbid invalid combination of options in pg_basebackup. --> - Allow pg_basebackup to stream write-ahead log in + Allow pg_basebackup to stream write-ahead log in tar mode (Magnus Hagander) - The WAL will be stored in a separate tar file from + The WAL will be stored in a separate tar file from the base backup. @@ -2510,13 +8365,13 @@ 2017-01-16 [e7b020f78] Make pg_basebackup use temporary replication slots --> - Make pg_basebackup use temporary replication slots + Make pg_basebackup use temporary replication slots (Magnus Hagander) Temporary replication slots will be used by default when - pg_basebackup uses WAL streaming with default + pg_basebackup uses WAL streaming with default options. @@ -2527,8 +8382,8 @@ --> Be more careful about fsync'ing in all required places - in pg_basebackup and - pg_receivewal (Michael Paquier) + in pg_basebackup and + pg_receivewal (Michael Paquier) @@ -2538,7 +8393,7 @@ 2016-10-19 [5d58c07a4] initdb pg_basebackup: Rename -\-noxxx options to -\-no-x --> - Add pg_basebackup option to disable fsync (Michael Paquier) @@ -2548,7 +8403,7 @@ 2016-09-28 [6ad8ac602] Exclude additional directories in pg_basebackup --> - Improve pg_basebackup's handling of which + Improve pg_basebackup's handling of which directories to skip (David Steele) @@ -2558,7 +8413,7 @@ - <application><xref linkend="app-pg-ctl"></> + <application><xref linkend="app-pg-ctl"/></application> @@ -2567,7 +8422,7 @@ 2016-09-21 [e7010ce47] pg_ctl: Add wait option to promote action --> - Add wait option for 's + Add wait option for 's promote operation (Peter Eisentraut) @@ -2577,8 +8432,8 @@ 2016-10-19 [0be22457d] pg_ctl: Add long options for -w and -W --> - Add long options for pg_ctl wait () + and no-wait () (Vik Fearing) @@ -2587,8 +8442,8 @@ 2016-10-19 [caf936b09] pg_ctl: Add long option for -o --> - Add long option for pg_ctl server options - () (Peter Eisentraut) @@ -2597,14 +8452,14 @@ 2017-06-28 [f13ea95f9] Change pg_ctl to detect server-ready by watching status --> - Make pg_ctl start --wait detect server-ready by - watching postmaster.pid, not by attempting connections + Make pg_ctl start --wait detect server-ready by + watching postmaster.pid, not by attempting connections (Tom Lane) The postmaster has been changed to report its ready-for-connections - status in postmaster.pid, and pg_ctl + status in postmaster.pid, and pg_ctl now examines that file to detect whether startup is complete. This is more efficient and reliable than the old method, and it eliminates postmaster log entries about rejected connection @@ -2617,12 +8472,12 @@ 2017-06-26 [c61559ec3] Reduce pg_ctl's reaction time when waiting for postmaste --> - Reduce pg_ctl's reaction time when waiting for + Reduce pg_ctl's reaction time when waiting for postmaster start/stop (Tom Lane) - pg_ctl now probes ten times per second when waiting + pg_ctl now probes ten times per second when waiting for a postmaster state change, rather than once per second. @@ -2632,14 +8487,14 @@ 2017-07-05 [1bac5f552] pg_ctl: Make failure to complete operation a nonzero exi --> - Ensure that pg_ctl exits with nonzero status if an + Ensure that pg_ctl exits with nonzero status if an operation being waited for does not complete within the timeout (Peter Eisentraut) - The start and promote operations now return - exit status 1, not 0, in such cases. The stop operation + The start and promote operations now return + exit status 1, not 0, in such cases. The stop operation has always done that. @@ -2664,19 +8519,20 @@ - Release numbers will now have two parts (e.g., 10.1) - rather than three (e.g., 9.6.3). + Release numbers will now have two parts (e.g., 10.1) + rather than three (e.g., 9.6.3). Major versions will now increase just the first number, and minor releases will increase just the second number. Release branches will be referred to by single numbers - (e.g., 10 rather than 9.6). + (e.g., 10 rather than 9.6). This change is intended to reduce user confusion about what is a - major or minor release of PostgreSQL. + major or minor release of PostgreSQL. - Improve behavior of pgindent + Improve behavior of pgindent (Piotr Stefaniak, Tom Lane) - We have switched to a new version of pg_bsd_indent + We have switched to a new version of pg_bsd_indent based on recent improvements made by the FreeBSD project. This fixes numerous small bugs that led to odd C code formatting decisions. Most notably, lines within parentheses (such as in a @@ -2704,14 +8560,14 @@ 2017-03-23 [eccfef81e] ICU support --> - Allow the ICU library to + Allow the ICU library to optionally be used for collation support (Peter Eisentraut) - The ICU library has versioning that allows detection + The ICU library has versioning that allows detection of collation changes between versions. It is enabled via configure - option . The default still uses the operating system's native collation library. @@ -2722,14 +8578,14 @@ --> Automatically mark all PG_FUNCTION_INFO_V1 functions - as DLLEXPORT-ed on - Windows (Laurenz Albe) + linkend="xfunc-c">PG_FUNCTION_INFO_V1 functions + as DLLEXPORT-ed on + Windows (Laurenz Albe) - If third-party code is using extern function - declarations, they should also add DLLEXPORT markers + If third-party code is using extern function + declarations, they should also add DLLEXPORT markers to those declarations. @@ -2739,10 +8595,10 @@ 2016-11-08 [1833f1a1c] Simplify code by getting rid of SPI_push, SPI_pop, SPI_r --> - Remove SPI functions SPI_push(), - SPI_pop(), SPI_push_conditional(), - SPI_pop_conditional(), - and SPI_restore_connection() as unnecessary (Tom Lane) + Remove SPI functions SPI_push(), + SPI_pop(), SPI_push_conditional(), + SPI_pop_conditional(), + and SPI_restore_connection() as unnecessary (Tom Lane) @@ -2752,9 +8608,9 @@ - A side effect of this change is that SPI_palloc() and + A side effect of this change is that SPI_palloc() and allied functions now require an active SPI connection; they do not - degenerate to simple palloc() if there is none. That + degenerate to simple palloc() if there is none. That previous behavior was not very useful and posed risks of unexpected memory leaks. @@ -2787,9 +8643,9 @@ 2016-10-09 [ecb0d20a9] Use unnamed POSIX semaphores, if available, on Linux and --> - Use POSIX semaphores rather than SysV semaphores - on Linux and FreeBSD (Tom Lane) + Use POSIX semaphores rather than SysV semaphores + on Linux and FreeBSD (Tom Lane) @@ -2811,7 +8667,7 @@ 2017-03-10 [f8f1430ae] Enable 64 bit atomics on ARM64. --> - Enable 64-bit atomic operations on ARM64 (Roman + Enable 64-bit atomic operations on ARM64 (Roman Shaposhnik) @@ -2821,13 +8677,13 @@ 2017-01-02 [1d63f7d2d] Use clock_gettime(), if available, in instr_time measure --> - Switch to using clock_gettime(), if available, for + Switch to using clock_gettime(), if available, for duration measurements (Tom Lane) - gettimeofday() is still used - if clock_gettime() is not available. + gettimeofday() is still used + if clock_gettime() is not available. @@ -2843,11 +8699,37 @@ - If no strong random number generator can be found, configure will - fail unless the configure - option option is used. However, with + this option, pgcrypto + functions requiring a strong random number generator will be disabled. + + + + + + + Allow WaitLatchOrSocket() to wait for socket + connection on Windows (Andres Freund) + + + + + + + tupconvert.c functions no longer convert tuples just to + embed a different composite-type OID in them (Ashutosh Bapat, Tom Lane) + + + + The majority of callers don't care about the composite-type OID; + but if the result tuple is to be used as a composite Datum, steps + should be taken to make sure the correct OID is inserted in it. @@ -2856,8 +8738,8 @@ 2016-10-11 [2b860f52e] Remove "sco" and "unixware" ports. --> - Remove SCO and Unixware ports (Tom Lane) + Remove SCO and Unixware ports (Tom Lane) @@ -2868,7 +8750,7 @@ --> Overhaul documentation build - process (Alexander Lakhin, Alexander Law) + process (Alexander Lakhin) @@ -2877,13 +8759,13 @@ 2017-04-06 [510074f9f] Remove use of Jade and DSSSL --> - Use XSLT to build the PostgreSQL + Use XSLT to build the PostgreSQL documentation (Peter Eisentraut) - Previously Jade, DSSSL, and - JadeTex were used. + Previously Jade, DSSSL, and + JadeTex were used. @@ -2892,7 +8774,7 @@ 2016-11-15 [e36ddab11] Build HTML documentation using XSLT stylesheets by defau --> - Build HTML documentation using XSLT + Build HTML documentation using XSLT stylesheets by default (Peter Eisentraut) @@ -2911,7 +8793,7 @@ 2016-09-29 [8e91e12bc] Allow contrib/file_fdw to read from a program, like COPY --> - Allow file_fdw to read + Allow file_fdw to read from program output as well as files (Corey Huinker, Adam Gomaa) @@ -2919,31 +8801,27 @@ - Push aggregates to foreign data wrapper servers, where possible + In postgres_fdw, + push aggregate functions to the remote server, when possible (Jeevan Chalke, Ashutosh Bapat) - This reduces the amount of data that must be passed - from the foreign data wrapper server, and offloads - aggregate computation from the requesting server. The postgres_fdw FDW is able to - perform this optimization. There are also improvements in - pushing down joins involving extensions. + This reduces the amount of data that must be passed from the remote + server, and offloads aggregate computation from the requesting server. - Allow push down of FULL JOIN queries containing - subqueries in the - FROM clause to foreign servers (Etsuro Fujita) + In postgres_fdw, push joins to the remote server in + more cases (David Rowley, Ashutosh Bapat, Etsuro Fujita) @@ -2952,12 +8830,12 @@ 2016-08-26 [ae025a159] Support OID system column in postgres_fdw. --> - Properly support OID columns in - postgres_fdw tables (Etsuro Fujita) + Properly support OID columns in + postgres_fdw tables (Etsuro Fujita) - Previously OID columns always returned zeros. + Previously OID columns always returned zeros. @@ -2966,8 +8844,8 @@ 2017-03-21 [f7946a92b] Add btree_gist support for enum types. --> - Allow btree_gist - and btree_gin to + Allow btree_gist + and btree_gin to index enum types (Andrew Dunstan) @@ -2981,8 +8859,8 @@ 2016-11-29 [11da83a0e] Add uuid to the set of types supported by contrib/btree_ --> - Add indexing support to btree_gist for the - UUID data type (Paul Jungwirth) + Add indexing support to btree_gist for the + UUID data type (Paul Jungwirth) @@ -2991,7 +8869,7 @@ 2017-03-09 [3717dc149] Add amcheck extension to contrib. --> - Add amcheck which can + Add amcheck which can check the validity of B-tree indexes (Peter Geoghegan) @@ -3001,10 +8879,10 @@ 2017-03-27 [a6f22e835] Show ignored constants as "$N" rather than "?" in pg_sta --> - Show ignored constants as $N rather than ? + Show ignored constants as $N rather than ? in pg_stat_statements + linkend="pgstatstatements">pg_stat_statements (Lukas Fittl) @@ -3014,13 +8892,13 @@ 2016-09-27 [f31a931fa] Improve contrib/cube's handling of zero-D cubes, infinit --> - Improve cube's handling + Improve cube's handling of zero-dimensional cubes (Tom Lane) - This also improves handling of infinite and - NaN values. + This also improves handling of infinite and + NaN values. @@ -3030,12 +8908,12 @@ --> Allow pg_buffercache to run + linkend="pgbuffercache">pg_buffercache to run with fewer locks (Ivan Kartyshov) - This allows it to be less disruptive when run on production systems. + This makes it less disruptive when run on production systems. @@ -3044,8 +8922,8 @@ 2017-02-03 [e759854a0] pgstattuple: Add pgstathashindex. --> - Add pgstattuple - function pgstathashindex() to view hash index + Add pgstattuple + function pgstathashindex() to view hash index statistics (Ashutosh Sharma) @@ -3055,8 +8933,8 @@ 2016-09-29 [fd321a1df] Remove superuser checks in pgstattuple --> - Use GRANT permissions to - control pgstattuple function usage (Stephen Frost) + Use GRANT permissions to + control pgstattuple function usage (Stephen Frost) @@ -3069,7 +8947,7 @@ 2016-10-28 [d4b5d4cad] pgstattuple: Don't take heavyweight locks when examining --> - Reduce locking when pgstattuple examines hash + Reduce locking when pgstattuple examines hash indexes (Amit Kapila) @@ -3079,8 +8957,8 @@ 2017-03-17 [fef2bcdcb] pageinspect: Add page_checksum function --> - Add pageinspect - function page_checksum() to show a page's checksum + Add pageinspect + function page_checksum() to show a page's checksum (Tomas Vondra) @@ -3090,8 +8968,8 @@ 2017-04-04 [193f5f9e9] pageinspect: Add bt_page_items function with bytea argum --> - Add pageinspect - function bt_page_items() to print page items from a + Add pageinspect + function bt_page_items() to print page items from a page image (Tomas Vondra) @@ -3101,7 +8979,7 @@ 2017-02-02 [08bf6e529] pageinspect: Support hash indexes. --> - Add hash index support to pageinspect (Jesper + Add hash index support to pageinspect (Jesper Pedersen, Ashutosh Sharma) @@ -3112,4 +8990,341 @@ + + Acknowledgments + + + The following individuals (in alphabetical order) have contributed to this + release as patch authors, committers, reviewers, testers, or reporters of + issues. + + + + Adam Brightwell + Adam Brusselback + Adam Gomaa + Adam Sah + Adrian Klaver + Aidan Van Dyk + Aleksander Alekseev + Alexander Korotkov + Alexander Lakhin + Alexander Sosna + Alexey Bashtanov + Alexey Grishchenko + Alexey Isayko + Álvaro Hernández Tortosa + Álvaro Herrera + Amit Kapila + Amit Khandekar + Amit Langote + Amul Sul + Anastasia Lubennikova + Andreas Joseph Krogh + Andreas Karlsson + Andreas Scherbaum + Andreas Seltenreich + Andres Freund + Andrew Dunstan + Andrew Gierth + Andrew Wheelwright + Andrey Borodin + Andrey Lizenko + Andy Abelisto + Antonin Houska + Ants Aasma + Arjen Nienhuis + Arseny Sher + Artur Zakirov + Ashutosh Bapat + Ashutosh Sharma + Ashwin Agrawal + Atsushi Torikoshi + Ayumi Ishii + Basil Bourque + Beena Emerson + Ben de Graaff + Benedikt Grundmann + Bernd Helmle + Brad DeJong + Brandur Leach + Breen Hagan + Bruce Momjian + Bruno Wolff III + Catalin Iacob + Chapman Flack + Chen Huajun + Choi Doo-Won + Chris Bandy + Chris Richards + Chris Ruprecht + Christian Ullrich + Christoph Berg + Chuanting Wang + Claudio Freire + Clinton Adams + Const Zhang + Constantin Pan + Corey Huinker + Craig Ringer + Cynthia Shang + Dagfinn Ilmari Mannsåker + Daisuke Higuchi + Damian Quiroga + Dan Wood + Dang Minh Huong + Daniel Gustafsson + Daniel Vérité + Daniel Westermann + Daniele Varrazzo + Danylo Hlynskyi + Darko Prelec + Dave Cramer + Dave Page + David Christensen + David Fetter + David Johnston + David Rader + David Rowley + David Steele + Dean Rasheed + Denis Smirnov + Denish Patel + Dennis Björklund + Devrim Gündüz + Dilip Kumar + Dilyan Palauzov + Dima Pavlov + Dimitry Ivanov + Dmitriy Sarafannikov + Dmitry Dolgov + Dmitry Fedin + Don Morrison + Egor Rogov + Eiji Seki + Emil Iggland + Emre Hasegeli + Enrique Meneses + Erik Nordström + Erik Rijkers + Erwin Brandstetter + Etsuro Fujita + Eugen Konkov + Eugene Kazakov + Euler Taveira + Fabien Coelho + Fabrízio de Royes Mello + Feike Steenbergen + Felix Gerzaguet + Filip Jirsák + Fujii Masao + Gabriele Bartolini + Gabrielle Roth + Gao Zengqi + Gerdan Santos + Gianni Ciolli + Gilles Darold + Giuseppe Broccolo + Graham Dutton + Greg Atkins + Greg Burek + Grigory Smolkin + Guillaume Lelarge + Hans Buschmann + Haribabu Kommi + Heikki Linnakangas + Henry Boehlert + Huan Ruan + Ian Barwick + Igor Korot + Ildus Kurbangaliev + Ivan Kartyshov + Jaime Casanova + Jakob Egger + James Parks + Jarred Ward + Jason Li + Jason O'Donnell + Jason Petersen + Jeevan Chalke + Jeevan Ladhe + Jeff Dafoe + Jeff Davis + Jeff Janes + Jelte Fennema + Jeremy Finzel + Jeremy Schneider + Jeroen van der Ham + Jesper Pedersen + Jim Mlodgenski + Jim Nasby + Jinyu Zhang + Joe Conway + Joel Jacobson + John Harvey + Jon Nelson + Jordan Gigov + Josh Berkus + Josh Soref + Julian Markwort + Julien Rouhaud + Junseok Yang + Justin Muise + Justin Pryzby + Kacper Zuk + KaiGai Kohei + Karen Huddleston + Karl Lehenbauer + Karl O. Pinc + Keith Fiske + Kevin Grittner + Kim Rose Carlsen + Konstantin Evteev + Konstantin Knizhnik + Kuntal Ghosh + Kurt Kartaltepe + Kyle Conroy + Kyotaro Horiguchi + Laurenz Albe + Leonardo Cecchi + Ludovic Vaugeois-Pepin + Lukas Fittl + Magnus Hagander + Maksim Milyutin + Maksym Sobolyev + Marc Rassbach + Marc-Olaf Jaschke + Marcos Castedo + Marek Cvoren + Mark Dilger + Mark Kirkwood + Mark Pether + Marko Tiikkaja + Markus Winand + Marllius Ribeiro + Marti Raudsepp + Martín Marqués + Masahiko Sawada + Matheus Oliveira + Mathieu Fenniak + Merlin Moncure + Michael Banck + Michael Day + Michael Meskes + Michael Overmeyer + Michael Paquier + Mike Palmiotto + Milos Urbanek + Mithun Cy + Moshe Jacobson + Murtuza Zabuawala + Naoki Okano + Nathan Bossart + Nathan Wagner + Neha Khatri + Neha Sharma + Neil Anderson + Nicolas Baccelli + Nicolas Guini + Nicolas Thauvin + Nikhil Sontakke + Nikita Glukhov + Nikolaus Thiel + Nikolay Nikitin + Nikolay Shaplov + Noah Misch + Noriyoshi Shinoda + Olaf Gawenda + Oleg Bartunov + Oskari Saarenmaa + Otar Shavadze + Paresh More + Paul Jungwirth + Paul Ramsey + Pavan Deolasee + Pavel Golub + Pavel Hanák + Pavel Raiskup + Pavel Stehule + Peng Sun + Peter Eisentraut + Peter Geoghegan + Petr Jelínek + Philippe Beaudoin + Pierre-Emmanuel André + Piotr Stefaniak + Prabhat Sahu + QL Zhuo + Radek Slupik + Rafa de la Torre + Rafia Sabih + Ragnar Ouchterlony + Rahila Syed + Rajkumar Raghuwanshi + Regina Obe + Richard Pistole + Robert Haas + Robins Tharakan + Rod Taylor + Roman Shaposhnik + Rushabh Lathia + Ryan Murphy + Sandeep Thakkar + Scott Milliken + Sean Farrell + Sebastian Luque + Sehrope Sarkuni + Sergey Burladyan + Sergey Koposov + Shay Rojansky + Shinichi Matsuda + Sho Kato + Simon Riggs + Simone Gotti + Spencer Thomason + Stas Kelvich + Stepan Pesternikov + Stephen Frost + Steve Randall + Steve Singer + Steven Fackler + Steven Winfield + Suraj Kharage + Sveinn Sveinsson + Sven R. Kunze + Tahir Fakhroutdinov + Taiki Kondo + Takayuki Tsunakawa + Takeshi Ideriha + Tatsuo Ishii + Tatsuro Yamada + Teodor Sigaev + Thom Brown + Thomas Kellerer + Thomas Munro + Tim Goodaire + Tobias Bussmann + Tom Dunstan + Tom Lane + Tom van Tilburg + Tomas Vondra + Tomonari Katsumata + Tushar Ahuja + Vaishnavi Prabakaran + Venkata Balaji Nagothi + Vicky Vergara + Victor Wagner + Vik Fearing + Vinayak Pokale + Viren Negi + Vitaly Burovoy + Vladimir Kunshchikov + Vladimir Rusinov + Yi Wen Wong + Yugo Nagata + Zhen Ming Yang + Zhou Digoal + + + diff --git a/doc/src/sgml/release-11.sgml b/doc/src/sgml/release-11.sgml new file mode 100644 index 0000000000..f35b0d8cc9 --- /dev/null +++ b/doc/src/sgml/release-11.sgml @@ -0,0 +1,3946 @@ + + + + + Release 11.1 + + + Release date: + 2018-11-08 + + + + This release contains a variety of fixes from 11.0. + For information about new features in major release 11, see + . + + + + Migration to Version 11.1 + + + A dump/restore is not required for those running 11.X. + + + + However, if you use the pg_stat_statements extension, + see the changelog entry below about that. + + + + + Changes + + + + + + + Ensure proper quoting of transition table names + when pg_dump emits CREATE TRIGGER + ... REFERENCING commands (Tom Lane) + + + + This oversight could be exploited by an unprivileged user to gain + superuser privileges during the next dump/reload + or pg_upgrade run. (CVE-2018-16850) + + + + + + + Apply the tablespace specified for a partitioned index when creating a + child index (Álvaro Herrera) + + + + Previously, child indexes were always created in the default + tablespace. + + + + + + + Fix NULL handling in parallel hashed multi-batch left joins (Andrew + Gierth, Thomas Munro) + + + + Outer-relation rows with null values of the hash key were omitted from + the join result. + + + + + + + Fix incorrect processing of an array-type coercion expression + appearing within a CASE clause that has a constant + test expression (Tom Lane) + + + + + + + Fix incorrect expansion of tuples lacking recently-added columns + (Andrew Dunstan, Amit Langote) + + + + This is known to lead to crashes in triggers on tables with + recently-added columns, and could have other symptoms as well. + + + + + + + Fix bugs with named or defaulted arguments in CALL + argument lists (Tom Lane, Pavel Stehule) + + + + + + + Fix strictness check for strict aggregates with ORDER + BY columns (Andrew Gierth, Andres Freund) + + + + The strictness logic incorrectly ignored rows for which + the ORDER BY value(s) were null. + + + + + + + Disable recheck_on_update optimization (Tom Lane) + + + + This new-in-v11 feature turns out not to have been ready for prime + time. Disable it until something can be done about it. + + + + + + + Prevent creation of a partition in a trigger attached to its parent + table (Amit Langote) + + + + Ideally we'd allow that, but for the moment it has to be blocked to + avoid crashes. + + + + + + + Fix problems with applying ON COMMIT DELETE ROWS to + a partitioned temporary table (Amit Langote) + + + + + + + Fix character-class checks to not fail on Windows for Unicode + characters above U+FFFF (Tom Lane, Kenji Uno) + + + + This bug affected full-text-search operations, as well + as contrib/ltree + and contrib/pg_trgm. + + + + + + + Ensure that the server will process + already-received NOTIFY + and SIGTERM interrupts before waiting for client + input (Jeff Janes, Tom Lane) + + + + + + + Fix memory leak in repeated SP-GiST index scans (Tom Lane) + + + + This is only known to amount to anything significant in cases where + an exclusion constraint using SP-GiST receives many new index entries + in a single command. + + + + + + + Prevent starting the server with wal_level set + to too low a value to support an existing replication slot (Andres + Freund) + + + + + + + Fix psql, as well as documentation + examples, to call PQconsumeInput() before + each PQnotifies() call (Tom Lane) + + + + This fixes cases in which psql would not + report receipt of a NOTIFY message until after the + next command. + + + + + + + Fix pg_verify_checksums's determination of + which files to check the checksums of (Michael Paquier) + + + + In some cases it complained about files that are not expected to have + checksums. + + + + + + + In contrib/pg_stat_statements, disallow + the pg_read_all_stats role from + executing pg_stat_statements_reset() + (Haribabu Kommi) + + + + pg_read_all_stats is only meant to grant permission + to read statistics, not to change them, so this grant was incorrect. + + + + To cause this change to take effect, run ALTER EXTENSION + pg_stat_statements UPDATE in each database + where pg_stat_statements has been installed. + (A database freshly created in 11.0 should not need this, but a + database upgraded from a previous release probably still contains + the old version of pg_stat_statements. The + UPDATE command is harmless if the module was + already updated.) + + + + + + + Rename red-black tree support functions to use rbt + prefix not rb prefix (Tom Lane) + + + + This avoids name collisions with Ruby functions, which broke + PL/Ruby. It's hoped that there are no other affected extensions. + + + + + + + Fix build problems on macOS 10.14 (Mojave) (Tom Lane) + + + + Adjust configure to add + an switch to CPPFLAGS; + without this, PL/Perl and PL/Tcl fail to configure or build on macOS + 10.14. The specific sysroot used can be overridden at configure time + or build time by setting the PG_SYSROOT variable in + the arguments of configure + or make. + + + + It is now recommended that Perl-related extensions + write $(perl_includespec) rather + than -I$(perl_archlibexp)/CORE in their compiler + flags. The latter continues to work on most platforms, but not recent + macOS. + + + + Also, it should no longer be necessary to + specify manually to get PL/Tcl to + build on recent macOS releases. + + + + + + + Fix MSVC build and regression-test scripts to work on recent Perl + versions (Andrew Dunstan) + + + + Perl no longer includes the current directory in its search path + by default; work around that. + + + + + + + On Windows, allow the regression tests to be run by an Administrator + account (Andrew Dunstan) + + + + To do this safely, pg_regress now gives up + any such privileges at startup. + + + + + + + Update time zone data files to tzdata + release 2018g for DST law changes in Chile, Fiji, Morocco, and Russia + (Volgograd), plus historical corrections for China, Hawaii, Japan, + Macau, and North Korea. + + + + + + + + + + Release 11 + + + Release date: + 2018-10-18 + + + + Overview + + + Major enhancements in PostgreSQL 11 include: + + + + + + + + + Improvements to partitioning functionality, including: + + + + Add support for partitioning by a hash key + + + + + Add support for PRIMARY KEY, FOREIGN + KEY, indexes, and triggers on partitioned tables + + + + + Allow creation of a default partition for storing + data that does not match any of the remaining partitions + + + + + UPDATE statements that change a partition key + column now cause affected rows to be moved to the appropriate + partitions + + + + + Improve SELECT performance through enhanced + partition elimination strategies during query planning and execution + + + + + + + + + Improvements to parallelism, including: + + + + CREATE INDEX can now use parallel processing + while building a B-tree index + + + + + Parallelization is now possible in CREATE TABLE + ... AS, + CREATE MATERIALIZED VIEW, and certain + queries using UNION + + + + + Parallelized hash joins and parallelized sequential scans now + perform better + + + + + + + + + SQL stored procedures that support embedded transactions + + + + + + Optional Just-in-Time (JIT) compilation for some SQL code, speeding + evaluation of expressions + + + + + + Window functions now support all framing options shown in the SQL:2011 + standard, including RANGE distance + PRECEDING/FOLLOWING, GROUPS mode, and + frame exclusion options + + + + + + Covering indexes can now be created, using the + INCLUDE clause of CREATE INDEX + + + + + + Many other useful performance improvements, including the ability to + avoid a table rewrite for ALTER TABLE ... ADD COLUMN + with a non-null column default + + + + + + + The above items are explained in more detail in the sections below. + + + + + + + Migration to Version 11 + + + A dump/restore using , or use of , is required for those wishing to migrate data + from any previous release. + + + + Version 11 contains a number of changes that may affect compatibility + with previous releases. Observe the following incompatibilities: + + + + + + + + + Make pg_dump + dump the properties of a database, not just its contents + (Haribabu Kommi) + + + + Previously, attributes of the database itself, such as database-level + GRANT/REVOKE permissions and + ALTER DATABASE SET variable settings, were only + dumped by pg_dumpall. + Now pg_dump --create and + pg_restore --create will restore these database + properties in addition to the objects within the + database. pg_dumpall -g now only dumps role- + and tablespace-related attributes. + pg_dumpall's complete output (without + ) is unchanged. + + + + pg_dump and + pg_restore, without + , no longer dump/restore database-level + comments and security labels; those are now treated as properties of + the database. + + + + pg_dumpall's output script will now always + create databases with their original locale and encoding, and hence + will fail if the locale or encoding name is unknown to the + destination system. Previously, CREATE DATABASE + would be emitted without these specifications if the database locale + and encoding matched the old cluster's defaults. + + + + pg_dumpall --clean now restores the original + locale and encoding settings of the postgres + and template1 databases, as well as those of + user-created databases. + + + + + + + + Consider syntactic form when disambiguating function versus column + references (Tom Lane) + + + + When x is a table name or composite + column, PostgreSQL has traditionally + considered the syntactic + forms f(x) + and x.f + to be equivalent, allowing tricks such as writing a function and + then using it as though it were a computed-on-demand column. + However, if both interpretations are feasible, the column + interpretation was always chosen, leading to surprising results if + the user intended the function interpretation. Now, if there is + ambiguity, the interpretation that matches the syntactic form is + chosen. + + + + + + + Fully enforce uniqueness of table and domain constraint names + (Tom Lane) + + + + PostgreSQL expects the names of a table's + constraints to be distinct, and likewise for the names of a domain's + constraints. However, there was not rigid enforcement of this, and + previously there were corner cases where duplicate names could be + created. + + + + + + + + Make power(numeric, numeric) + and power(float8, float8) + handle NaN inputs according to the POSIX standard + (Tom Lane, Dang Minh Huong) + + + + POSIX says that NaN ^ 0 = 1 and 1 ^ NaN + = 1, but all other cases with NaN + input(s) should return NaN. + power(numeric, numeric) just + returned NaN in all such cases; now it honors the + two exceptions. power(float8, float8) followed + the standard if the C library does; but on some old Unix platforms + the library doesn't, and there were also problems on some versions + of Windows. + + + + + + + + Prevent to_number() + from consuming characters when the template separator does not + match (Oliver Ford) + + + + Specifically, SELECT to_number('1234', '9,999') + used to return 134. It will now + return 1234. L and + TH now only consume characters that are not + digits, positive/negative signs, decimal points, or commas. + + + + + + + + Fix to_date(), + to_number(), and + to_timestamp() to skip a character for each + template character (Tom Lane) + + + + Previously, they skipped one byte for each byte + of template character, resulting in strange behavior if either string + contained multibyte characters. + + + + + + + + Adjust the handling of backslashes inside double-quotes in + template strings for to_char(), + to_number(), and + to_timestamp(). + + + + Such a backslash now escapes the character after it, particularly + a double-quote or another backslash. + + + + + + + + Correctly handle relative path expressions + in xmltable(), xpath(), + and other XML-handling functions (Markus Winand) + + + + Per the SQL standard, relative paths start from the document node of + the XML input document, not the root node as these functions + previously did. + + + + + + + + In the extended query + protocol, + make statement_timeout + apply to each Execute message separately, not to all commands before + Sync (Tatsuo Ishii, Andres Freund) + + + + + + + + Remove the relhaspkey column from system + catalog pg_class (Peter Eisentraut) + + + + Applications needing to check for a primary key should consult + pg_index. + + + + + + + + Replace system catalog pg_proc's + proisagg and + proiswindow columns with + prokind (Peter Eisentraut) + + + + This new column more clearly distinguishes functions, procedures, + aggregates, and window functions. + + + + + + + + Correct information schema column tables.table_type + to return FOREIGN instead of FOREIGN + TABLE (Peter Eisentraut) + + + + This new output matches the SQL standard. + + + + + + + + Change the ps process display + labels for background workers to match the pg_stat_activity.backend_type + labels (Peter Eisentraut) + + + + + + + + Cause large object permission checks + to happen during large object open, lo_open(), not + when a read or write is attempted (Tom Lane, Michael Paquier) + + + + If write access is requested and not available, an error will now be + thrown even if the large object is never written to. + + + + + + + Prevent non-superusers from reindexing shared catalogs + (Michael Paquier, Robert Haas) + + + + Previously, database owners were also allowed to do this, but + now it is considered outside the bounds of their privileges. + + + + + + + + Remove deprecated adminpack functions + pg_file_read(), + pg_file_length(), and + pg_logfile_rotate() (Stephen Frost) + + + + Equivalent functionality is now present in the core backend. + Existing adminpack installs will continue to have + access to these functions until they are updated via ALTER + EXTENSION ... UPDATE. + + + + + + + + Honor the capitalization of double-quoted command options + (Daniel Gustafsson) + + + + Previously, option names in certain SQL commands were forcibly + lower-cased even if entered with double quotes; thus for example + "FillFactor" would be accepted as an index storage + option, though properly its name is lower-case. Such cases will now + generate an error. + + + + + + + + Remove server parameter replacement_sort_tuples + (Peter Geoghegan) + + + + Replacement sorts were determined to be no longer useful. + + + + + + + + + Remove WITH clause in CREATE + FUNCTION (Michael Paquier) + + + + PostgreSQL has long supported a more + standard-compliant syntax for this capability. + + + + + + + + + Changes + + + Below you will find a detailed account of the changes between + PostgreSQL 11 and the previous major + release. + + + + Server + + + Partitioning + + + + + + + + Allow the creation of partitions based on hashing a key column + (Amul Sul) + + + + + + + + Support indexes on partitioned tables (Álvaro Herrera, + Amit Langote) + + + + An index on a partitioned table is not a physical + index across the whole partitioned table, but rather a template for + automatically creating similar indexes on each partition of the + table. + + + + If the partition key is part of the index's column set, a + partitioned index may be declared UNIQUE. + It will represent a valid uniqueness constraint across the whole + partitioned table, even though each physical index only enforces + uniqueness within its own partition. + + + + The new command ALTER + INDEX ATTACH PARTITION causes an existing index on + a partition to be associated with a matching index template for its + partitioned table. This provides flexibility in setting up a new + partitioned index for an existing partitioned table. + + + + + + + + Allow foreign keys on partitioned tables (Álvaro Herrera) + + + + + + + + Allow FOR EACH ROW triggers on partitioned + tables (Álvaro Herrera) + + + + Creation of a trigger on a partitioned table automatically creates + triggers on all existing and future partitions. + This also allows deferred unique constraints on partitioned tables. + + + + + + + + Allow partitioned tables to have a default partition (Jeevan Ladhe, + Beena Emerson, Ashutosh Bapat, Rahila Syed, Robert Haas) + + + + The default partition will store rows that don't match any of the + other defined partitions, and is searched accordingly. + + + + + + + + UPDATE statements that change a partition key + column now cause affected rows to be moved to the appropriate + partitions (Amit Khandekar) + + + + + + + + Allow INSERT, UPDATE, and + COPY on partitioned tables to properly route + rows to foreign partitions (Etsuro Fujita, Amit Langote) + + + + This is supported by postgres_fdw + foreign tables. + + + + + + + + Allow faster partition elimination during query processing (Amit + Langote, David Rowley, Dilip Kumar) + + + + This speeds access to partitioned tables with many partitions. + + + + + + + + Allow partition elimination during query execution (David Rowley, + Beena Emerson) + + + + Previously, partition elimination only happened at planning + time, meaning many joins and prepared queries could not use + partition elimination. + + + + + + + + In an equality join between partitioned tables, allow matching + partitions to be joined directly (Ashutosh Bapat) + + + + This feature is disabled by default + but can be enabled by changing enable_partitionwise_join. + + + + + + + + Allow aggregate functions on partitioned tables to be evaluated + separately for each partition, subsequently merging the results + (Jeevan Chalke, Ashutosh Bapat, Robert Haas) + + + + This feature is disabled by default + but can be enabled by changing enable_partitionwise_aggregate. + + + + + + + + Allow postgres_fdw + to push down aggregates to foreign tables that are partitions + (Jeevan Chalke) + + + + + + + + + + Parallel Queries + + + + + + + + Allow parallel building of a btree index (Peter Geoghegan, + Rushabh Lathia, Heikki Linnakangas) + + + + + + + + Allow hash joins to be performed in parallel using a shared hash + table (Thomas Munro) + + + + + + + + Allow UNION to run each + SELECT in parallel if the individual + SELECTs cannot be parallelized (Amit Khandekar, + Robert Haas, Amul Sul) + + + + + + + + Allow partition scans to more efficiently use parallel workers + (Amit Khandekar, Robert Haas, Amul Sul) + + + + + + + + Allow LIMIT to be passed to parallel workers + (Robert Haas, Tom Lane) + + + + This allows workers to reduce returned results and use targeted + index scans. + + + + + + + + Allow single-evaluation queries, e.g. WHERE + clause aggregate queries, and functions in the target list to be + parallelized (Amit Kapila, Robert Haas) + + + + + + + + Add server parameter parallel_leader_participation + to control whether the leader also executes subplans (Thomas Munro) + + + + The default is enabled, meaning the leader will execute subplans. + + + + + + + + Allow parallelization of commands CREATE TABLE + ... AS, SELECT INTO, and + CREATE MATERIALIZED VIEW (Haribabu Kommi) + + + + + + + + Improve performance of sequential scans with many parallel workers + (David Rowley) + + + + + + + + Add reporting of parallel workers' sort activity in + EXPLAIN (Robert Haas, Tom Lane) + + + + + + + + + + Indexes + + + + + + + + Allow B-tree indexes to include columns that are not part of the + search key or unique constraint, but are available to be read by + index-only scans (Anastasia Lubennikova, Alexander Korotkov, Teodor + Sigaev) + + + + This is enabled by the new INCLUDE clause of CREATE INDEX. + It facilitates building covering indexes that optimize + specific types of queries. Columns can be included even if their + data types don't have B-tree support. + + + + + + + + Improve performance of monotonically increasing index additions + (Pavan Deolasee, Peter Geoghegan) + + + + + + + + Improve performance of hash index scans (Ashutosh Sharma) + + + + + + + + Add predicate locking for hash, GiST and GIN indexes (Shubham + Barai) + + + + This reduces the likelihood of serialization conflicts in + serializable-mode transactions. + + + + + + + <link linkend="spgist">SP-Gist</link> + + + + + + + + Add prefix-match + operator text ^@ text, + which is supported by SP-GiST (Ildus Kurbangaliev) + + + + This is similar to using var LIKE + 'word%' with a btree index, but it is more efficient. + + + + + + + + Allow polygons to be indexed with SP-GiST (Nikita Glukhov, + Alexander Korotkov) + + + + + + + + Allow SP-GiST to use lossy representation of leaf keys (Teodor Sigaev, + Heikki Linnakangas, Alexander Korotkov, Nikita Glukhov) + + + + + + + + + + + + Optimizer + + + + + + + + Improve selection of the most common values for statistics + (Jeff Janes, Dean Rasheed) + + + + Previously, the most common values (MCVs) were + identified based on their frequency compared to all column + values. Now, MCVs are chosen based on their + frequency compared to the non-MCV values. + This improves the robustness of the algorithm for both uniform and + non-uniform distributions. + + + + + + + + Improve selectivity estimates for >= + and <= (Tom Lane) + + + + Previously, such cases used the same selectivity estimates + as > and <, respectively, + unless the comparison constants are MCVs. + This change is particularly helpful for queries + involving BETWEEN with small ranges. + + + + + + + + Reduce var = + var + to var IS NOT NULL + where equivalent (Tom Lane) + + + + This leads to better selectivity estimates. + + + + + + + + Improve optimizer's row count estimates for EXISTS + and NOT EXISTS queries (Tom Lane) + + + + + + + + Make the optimizer account for evaluation costs and selectivity + of HAVING clauses (Tom Lane) + + + + + + + + + + General Performance + + + + + + + + Add Just-in-Time + (JIT) compilation of some parts of query plans + to improve execution speed (Andres Freund) + + + + This feature requires LLVM to be + available. It is not currently enabled by default, even in + builds that support it. + + + + + + + + Allow bitmap scans to perform index-only scans when possible + (Alexander Kuzmenkov) + + + + + + + + Update the free space map during VACUUM + (Claudio Freire) + + + + This allows free space to be reused more quickly. + + + + + + + + Allow VACUUM to avoid unnecessary index scans + (Masahiko Sawada, Alexander Korotkov) + + + + + + + + Improve performance of committing multiple concurrent transactions + (Amit Kapila) + + + + + + + + Reduce memory usage for queries using set-returning functions in + their target lists (Andres Freund) + + + + + + + + Improve the speed of aggregate computations (Andres Freund) + + + + + + + + Allow postgres_fdw + to push UPDATEs and DELETEs + using joins to foreign servers (Etsuro Fujita) + + + + Previously, only non-join UPDATEs and + DELETEs were pushed. + + + + + + + + + Add support for large pages on Windows + (Takayuki Tsunakawa, Thomas Munro) + + + + This is controlled by the huge_pages configuration + parameter. + + + + + + + + + Monitoring + + + + + + + + Show memory usage in output from log_statement_stats, + log_parser_stats, + log_planner_stats, and + log_executor_stats (Justin Pryzby, Peter + Eisentraut) + + + + + + + + Add column pg_stat_activity.backend_type + to show the type of a background worker (Peter Eisentraut) + + + + The type is also visible in ps output. + + + + + + + + Make log_autovacuum_min_duration + log skipped tables that are concurrently being dropped (Nathan + Bossart) + + + + + + + + <link linkend="infoschema-tables">Information Schema</link> + + + + + + + + Add information_schema columns related to table + constraints and triggers (Peter Eisentraut) + + + + Specifically, + triggers.action_order, + triggers.action_reference_old_table, + and + triggers.action_reference_new_table + are now populated, where before they were always null. Also, + table_constraints.enforced + now exists but is not yet usefully populated. + + + + + + + + + + + <acronym>Authentication</acronym> + + + + + + + + Allow the server to specify more complex LDAP specifications + in search+bind mode (Thomas Munro) + + + + Specifically, ldapsearchfilter allows pattern matching using + combinations of LDAP attributes. + + + + + + + + Allow LDAP authentication to use + encrypted LDAP (Thomas Munro) + + + + We already supported LDAP over + TLS by using ldaptls=1. + This new TLS LDAP method for + encrypted LDAP is enabled + with ldapscheme=ldaps + or ldapurl=ldaps://. + + + + + + + + Improve logging of LDAP errors (Thomas Munro) + + + + + + + + + + Permissions + + + + + + + + Add default roles that + enable file system access (Stephen Frost) + + + + Specifically, the new roles are: + pg_read_server_files, + pg_write_server_files, and + pg_execute_server_program. These roles now also + control who can use server-side COPY and the file_fdw extension. + Previously, only superusers could use these functions, and that + is still the default behavior. + + + + + + + + Allow access to file system functions to be controlled by + GRANT/REVOKE permissions, + rather than superuser checks (Stephen Frost) + + + + Specifically, these functions were modified: pg_ls_dir(), + pg_read_file(), + pg_read_binary_file(), + pg_stat_file(). + + + + + + + + Use GRANT/REVOKE + to control access to lo_import() + and lo_export() (Michael Paquier, Tom Lane) + + + + Previously, only superusers were granted access to these functions. + + + + The compile-time option ALLOW_DANGEROUS_LO_FUNCTIONS + has been removed. + + + + + + + + Use view owner not session owner when + preventing non-password access to postgres_fdw + tables (Robert Haas) + + + + PostgreSQL only allows superusers to + access postgres_fdw tables without + passwords, e.g. via peer. Previously, the + session owner had to be a superuser to allow such access; now + the view owner is checked instead. + + + + + + + + Fix invalid locking permission check in SELECT FOR + UPDATE on views (Tom Lane) + + + + + + + + + + Server Configuration + + + + + + + + Add server setting ssl_passphrase_command + to allow supplying of the passphrase for SSL + key files (Peter Eisentraut) + + + + Also add ssl_passphrase_command_supports_reload + to specify whether the SSL configuration + should be reloaded and ssl_passphrase_command + called during a server configuration reload. + + + + + + + + Add storage parameter toast_tuple_target + to control the minimum tuple length before TOAST + storage will be considered (Simon Riggs) + + + + The default TOAST threshold has not been + changed. + + + + + + + + Allow server options related to memory and file sizes to be + specified in units of bytes (Beena Emerson) + + + + The new unit suffix is B. This is in addition to the + existing units kB, MB, GB + and TB. + + + + + + + + + + <link linkend="wal">Write-Ahead Log</link> (<acronym>WAL</acronym>) + + + + + + + + Allow the WAL file size to be set + during initdb (Beena Emerson) + + + + Previously, the 16MB default could only be changed at compile time. + + + + + + + + Retain WAL data for only a single checkpoint + (Simon Riggs) + + + + Previously, WAL was retained for two checkpoints. + + + + + + + + Fill the unused portion of force-switched WAL + segment files with zeros for improved compressibility (Chapman + Flack) + + + + + + + + + + + + Base Backup and Streaming Replication + + + + + + + + Replicate TRUNCATE activity when using logical + replication (Simon Riggs, Marco Nenciarini, Peter Eisentraut) + + + + + + + + Pass prepared transaction information to logical replication + subscribers (Nikhil Sontakke, Stas Kelvich) + + + + + + + + Exclude unlogged tables, temporary tables, and + pg_internal.init files from streaming base + backups (David Steele) + + + + There is no need to copy such files. + + + + + + + + Allow checksums of heap pages to be verified during streaming base + backup (Michael Banck) + + + + + + + + Allow replication slots to be advanced programmatically, rather + than be consumed by subscribers (Petr Jelinek) + + + + This allows efficient advancement of replication slots when the + contents do not need to be consumed. This is performed by + pg_replication_slot_advance(). + + + + + + + + Add timeline information to the backup_label + file (Michael Paquier) + + + + Also add a check that the WAL timeline matches + the backup_label file's timeline. + + + + + + + + Add host and port connection information to the + pg_stat_wal_receiver system view + (Haribabu Kommi) + + + + + + + + + Utility Commands + + + + + + + + Allow ALTER TABLE to add a column with + a non-null default without doing a table rewrite (Andrew Dunstan, + Serge Rielau) + + + + This is enabled when the default value is a constant. + + + + + + + + Allow views to be locked by locking the underlying tables + (Yugo Nagata) + + + + + + + + Allow ALTER INDEX to set statistics-gathering + targets for expression indexes (Alexander Korotkov, Adrien Nayrat) + + + + In psql, \d+ now shows + the statistics target for indexes. + + + + + + + + Allow multiple tables to be specified in one + VACUUM or ANALYZE command + (Nathan Bossart) + + + + Also, if any table mentioned in VACUUM uses + a column list, then the ANALYZE keyword must be + supplied; previously, ANALYZE was implied in + such cases. + + + + + + + + Add parenthesized options syntax to ANALYZE + (Nathan Bossart) + + + + This is similar to the syntax supported by + VACUUM. + + + + + + + + Add CREATE AGGREGATE option to specify the + behavior of the aggregate's finalization function (Tom Lane) + + + + This is helpful for allowing user-defined aggregate functions to be + optimized and to work as window functions. + + + + + + + + + + Data Types + + + + + + + + Allow the creation of arrays of domains (Tom Lane) + + + + This also allows array_agg() to be used + on domains. + + + + + + + + Support domains over composite types (Tom Lane) + + + + Also allow PL/Perl, PL/Python, and PL/Tcl to handle + composite-domain function arguments and results. Also improve + PL/Python domain handling. + + + + + + + + Add casts from JSONB scalars to numeric and boolean data + types (Anastasia Lubennikova) + + + + + + + + + + Functions + + + + + + + + Add all window function framing + options specified by SQL:2011 (Oliver Ford, Tom Lane) + + + + Specifically, allow RANGE mode to use + PRECEDING and FOLLOWING to + select rows having grouping values within plus or minus the + specified offset. Add GROUPS mode to include plus + or minus the number of peer groups. Frame exclusion syntax was also + added. + + + + + + + + + Add SHA-2 family of hash functions (Peter + Eisentraut) + + + + Specifically, sha224(), + sha256(), sha384(), + sha512() were added. + + + + + + + + Add support for 64-bit non-cryptographic hash functions (Robert + Haas, Amul Sul) + + + + + + + + Allow to_char() and + to_timestamp() to specify the time zone's + offset from UTC in hours and minutes + (Nikita Glukhov, Andrew Dunstan) + + + + This is done with format specifications TZH + and TZM. + + + + + + + + Add text search function websearch_to_tsquery() + that supports a query syntax similar to that used by web search + engines (Victor Drobny, Dmitry Ivanov) + + + + + + + + Add functions json(b)_to_tsvector() + to create a text search query for matching + JSON/JSONB values (Dmitry Dolgov) + + + + + + + + + + Server-Side Languages + + + + + + + + Add SQL-level procedures, which can start and commit their own + transactions (Peter Eisentraut) + + + + They are created with the new CREATE + PROCEDURE command and invoked via CALL. + + + + The new ALTER/DROP ROUTINE + commands allow altering/dropping of all routine-like objects, + including procedures, functions, and aggregates. + + + + Also, writing FUNCTION is now preferred + over writing PROCEDURE in CREATE + OPERATOR and CREATE TRIGGER, because the + referenced object must be a function not a procedure. However, the + old syntax is still accepted for compatibility. + + + + + + + + Add transaction control to PL/pgSQL, PL/Perl, PL/Python, PL/Tcl, + and SPI server-side languages (Peter Eisentraut) + + + + Transaction control is only available within top-transaction-level + procedures and nested DO and + CALL blocks that only contain other + DO and CALL blocks. + + + + + + + + Add the ability to define PL/pgSQL composite-type variables as not + null, constant, or with initial values (Tom Lane) + + + + + + + + Allow PL/pgSQL to handle changes to composite types (e.g. record, + row) that happen between the first and later function executions + in the same session (Tom Lane) + + + + Previously, such circumstances generated errors. + + + + + + + + Add extension jsonb_plpython to + transform JSONB to/from PL/Python types (Anthony + Bykov) + + + + + + + + Add extension jsonb_plperl to transform + JSONB to/from PL/Perl types (Anthony Bykov) + + + + + + + + + + Client Interfaces + + + + + + + + Change libpq to disable compression by default (Peter Eisentraut) + + + + Compression is already disabled in modern OpenSSL versions, so that + the libpq setting had no effect with such libraries. + + + + + + + + Add DO CONTINUE option + to ecpg's WHENEVER + statement (Vinayak Pokale) + + + + This generates a C continue statement, causing a + return to the top of the contained loop when the specified condition + occurs. + + + + + + + + Add an ecpg mode to enable Oracle + Pro*C-style handling of char arrays. + + + + This mode is enabled with . + + + + + + + + + + Client Applications + + + <xref linkend="app-psql"/> + + + + + + + + Add psql command \gdesc + to display the names and types of the columns in a query result + (Pavel Stehule) + + + + + + + + Add psql variables to report query + activity and errors (Fabien Coelho) + + + + Specifically, the new variables are ERROR, + SQLSTATE, ROW_COUNT, + LAST_ERROR_MESSAGE, and + LAST_ERROR_SQLSTATE. + + + + + + + + Allow psql to test for the existence + of a variable (Fabien Coelho) + + + + Specifically, the syntax :{?variable_name} allows + a variable's existence to be tested in an \if + statement. + + + + + + + + Allow environment variable PSQL_PAGER to control + psql's pager (Pavel Stehule) + + + + This allows psql's default pager to + be specified as a separate environment variable from the pager + for other applications. PAGER is still honored + if PSQL_PAGER is not set. + + + + + + + + Make psql's \d+ command always show the table's + partitioning information (Amit Langote, Ashutosh Bapat) + + + + Previously, partition information would not be displayed for a + partitioned table if it had no partitions. Also indicate which + partitions are themselves partitioned. + + + + + + + + Ensure that psql reports the proper user + name when prompting for a password (Tom Lane) + + + + Previously, combinations of and a user name + embedded in a URI caused incorrect reporting. + Also suppress the user name before the password prompt when + is specified. + + + + + + + + Allow quit and exit to + exit psql when given with no prior input + (Bruce Momjian) + + + + Also print hints about how to exit when quit and + exit are used alone on a line while the input + buffer is not empty. Add a similar hint for help. + + + + + + + + Make psql hint at using control-D + when \q is entered alone on a line but ignored + (Bruce Momjian) + + + + For example, \q does not exit when supplied + in character strings. + + + + + + + + Improve tab completion for ALTER INDEX + RESET/SET (Masahiko Sawada) + + + + + + + + Add infrastructure to allow psql + to adapt its tab completion queries based on the server version + (Tom Lane) + + + + Previously, tab completion queries could fail against older servers. + + + + + + + + + + <link linkend="pgbench"><application>pgbench</application></link> + + + + + + + + Add pgbench expression support for + NULLs, booleans, and some functions and operators (Fabien Coelho) + + + + + + + + Add \if conditional support to + pgbench (Fabien Coelho) + + + + + + + + Allow the use of non-ASCII characters in + pgbench variable names (Fabien Coelho) + + + + + + + + Add pgbench option + to control the initialization steps + performed (Masahiko Sawada) + + + + + + + + Add an approximately Zipfian-distributed random generator to + pgbench (Alik Khilazhev) + + + + + + + + Allow the random seed to be set in + pgbench (Fabien Coelho) + + + + + + + + Allow pgbench to do exponentiation + with pow() and power() + (Raúl Marín Rodríguez) + + + + + + + + Add hashing functions to pgbench + (Ildar Musin) + + + + + + + + Make pgbench statistics more + accurate when using and + (Fabien Coelho) + + + + + + + + + + + + Server Applications + + + + + + + + Add an option to pg_basebackup + that creates a named replication slot (Michael Banck) + + + + The option creates + the named replication slot () + when the WAL streaming method + () is used. + + + + + + + + Allow initdb + to set group read access to the data directory (David Steele) + + + + This is accomplished with the new initdb option + . Administrators + can also set group permissions on the empty data + directory before running initdb. Server variable data_directory_mode + allows reading of data directory group permissions. + + + + + + + + Add pg_verify_checksums + tool to verify database checksums while offline (Magnus Hagander) + + + + + + + + Allow pg_resetwal + to change the WAL segment size via + (Nathan Bossart) + + + + + + + + Add long options to pg_resetwal + and pg_controldata (Nathan Bossart, + Peter Eisentraut) + + + + + + + + Add pg_receivewal + option to prevent synchronous + WAL writes, for testing (Michael Paquier) + + + + + + + + Add pg_receivewal option + to specify when WAL + receiving should stop (Michael Paquier) + + + + + + + + Allow pg_ctl + to send the SIGKILL signal to processes + (Andres Freund) + + + + This was previously unsupported due to concerns over possible misuse. + + + + + + + + Reduce the number of files copied by pg_rewind + (Michael Paquier) + + + + + + + + Prevent pg_rewind from running as + root (Michael Paquier) + + + + + + + + <link linkend="app-pgdump"><application>pg_dump</application></link>, + <link linkend="app-pg-dumpall"><application>pg_dumpall</application></link>, + <link linkend="app-pgrestore"><application>pg_restore</application></link> + + + + + + + + Add pg_dumpall option + to control output encoding + (Michael Paquier) + + + + pg_dump already had this option. + + + + + + + + Add pg_dump option + to force loading of + data into the partition's root table, rather than the original + partition (Rushabh Lathia) + + + + This is useful if the system to be loaded to has different collation + definitions or endianness, possibly requiring rows to be stored in + different partitions than previously. + + + + + + + + Add an option to suppress dumping and restoring database object + comments (Robins Tharakan) + + + + The new pg_dump, + pg_dumpall, and + pg_restore option is + . + + + + + + + + + + + + Source Code + + + + + + + Add PGXS support for installing include + files (Andrew Gierth) + + + + This supports creating extension modules that depend on other + modules. Formerly there was no easy way for the dependent module to + find the referenced one's include files. Several + existing contrib modules that define data types + have been adjusted to install relevant files. Also, PL/Perl and + PL/Python now install their include files, to support creation of + transform modules for those languages. + + + + + + + + Install errcodes.txt to allow extensions to access + the list of error codes known to PostgreSQL + (Thomas Munro) + + + + + + + + Convert documentation to DocBook XML (Peter + Eisentraut, Alexander Lakhin, Jürgen Purtz) + + + + The file names still use an sgml extension + for compatibility with back branches. + + + + + + + + Use stdbool.h to define type bool + on platforms where it's suitable, which is most (Peter Eisentraut) + + + + This eliminates a coding hazard for extension modules that need + to include stdbool.h. + + + + + + + + Overhaul the way that initial system catalog contents are defined + (John Naylor) + + + + The initial data is now represented in Perl data structures, making + it much easier to manipulate mechanically. + + + + + + + + Prevent extensions from creating custom server parameters that + take a quoted list of values (Tom Lane) + + + + This cannot be supported at present because knowledge of the + parameter's property would be required even before the extension is + loaded. + + + + + + + + Add ability to use channel binding when using SCRAM + authentication (Michael Paquier) + + + + Channel binding is intended to prevent man-in-the-middle attacks, but + SCRAM cannot prevent them unless it can be forced + to be active. Unfortunately, there is no way to do that in libpq. + Support for it is expected in future versions of libpq and in + interfaces not built using libpq, e.g. JDBC. + + + + + + + + Allow background workers to attach to databases that normally + disallow connections (Magnus Hagander) + + + + + + + + Add support for hardware CRC calculations + on ARMv8 (Yuqi Gu, Heikki Linnakangas, + Thomas Munro) + + + + + + + + Speed up lookups of built-in functions by OID (Andres Freund) + + + + The previous binary search has been replaced by a lookup array. + + + + + + + + Speed up construction of query results (Andres Freund) + + + + + + + + Improve speed of access to system caches (Andres Freund) + + + + + + + + Add a generational memory allocator which is optimized for serial + allocation/deallocation (Tomas Vondra) + + + + This reduces memory usage for logical decoding. + + + + + + + + Make the computation of + pg_class.reltuples + by VACUUM consistent with its computation + by ANALYZE (Tomas Vondra) + + + + + + + + Update to use perltidy version + 20170521 (Tom Lane, Peter Eisentraut) + + + + + + + + + Additional Modules + + + + + + + + Allow extension pg_prewarm + to restore the previous shared buffer contents on startup (Mithun + Cy, Robert Haas) + + + + This is accomplished by having pg_prewarm store + the shared buffers' relation and block number data to disk + occasionally during server operation, and at shutdown. + + + + + + + + Add pg_trgm + function strict_word_similarity() to compute + the similarity of whole words (Alexander Korotkov) + + + + The function word_similarity() already + existed for this purpose, but it was designed to find similar + parts of words, while strict_word_similarity() + computes the similarity to whole words. + + + + + + + + Allow creation of indexes that can be used by LIKE + comparisons + on citext columns + (Alexey Chernyshov) + + + + To do this, the index must be created using the + citext_pattern_ops operator class. + + + + + + + + Allow btree_gin + to index bool, bpchar, name + and uuid data types (Matheus Oliveira) + + + + + + + + Allow cube + and seg + extensions to perform index-only scans using GiST indexes + (Andrey Borodin) + + + + + + + + Allow retrieval of negative cube coordinates using + the ~> operator (Alexander Korotkov) + + + + This is useful for KNN-GiST searches when looking for coordinates in + descending order. + + + + + + + + Add Vietnamese letter handling to the unaccent + extension (Dang Minh Huong, Michael Paquier) + + + + + + + + Enhance amcheck + to check that each heap tuple has an index entry (Peter Geoghegan) + + + + + + + + Have adminpack + use the new default file system access roles (Stephen Frost) + + + + Previously, only superusers could call adminpack + functions; now role permissions are checked. + + + + + + + + Widen pg_stat_statement's query ID + to 64 bits (Robert Haas) + + + + This greatly reduces the chance of query ID hash collisions. + The query ID can now potentially display as a negative value. + + + + + + + + Remove the contrib/start-scripts/osx scripts + since they are no longer recommended + (use contrib/start-scripts/macos instead) + (Tom Lane) + + + + + + + + Remove the chkpass extension (Peter Eisentraut) + + + + This extension is no longer considered to be a usable security tool + or example of how to write an extension. + + + + + + + + + + + + Acknowledgments + + + The following individuals (in alphabetical order) have contributed to this + release as patch authors, committers, reviewers, testers, or reporters of + issues. + + + + Abhijit Menon-Sen + Adam Bielanski + Adam Brightwell + Adam Brusselback + Aditya Toshniwal + Adrián Escoms + Adrien Nayrat + Akos Vandra + Aleksander Alekseev + Aleksandr Parfenov + Alexander Korotkov + Alexander Kukushkin + Alexander Kuzmenkov + Alexander Lakhin + Alexandre Garcia + Alexey Bashtanov + Alexey Chernyshov + Alexey Kryuchkov + Alik Khilazhev + Álvaro Herrera + Amit Kapila + Amit Khandekar + Amit Langote + Amul Sul + Anastasia Lubennikova + Andreas Joseph Krogh + Andreas Karlsson + Andreas Seltenreich + André Hänsel + Andrei Gorita + Andres Freund + Andrew Dunstan + Andrew Fletcher + Andrew Gierth + Andrew Grossman + Andrew Krasichkov + Andrey Borodin + Andrey Lizenko + Andy Abelisto + Anthony Bykov + Antoine Scemama + Anton Dignös + Antonin Houska + Arseniy Sharoglazov + Arseny Sher + Arthur Zakirov + Ashutosh Bapat + Ashutosh Sharma + Ashwin Agrawal + Asim Praveen + Atsushi Torikoshi + Badrul Chowdhury + Balazs Szilfai + Basil Bourque + Beena Emerson + Ben Chobot + Benjamin Coutu + Bernd Helmle + Blaz Merela + Brad DeJong + Brent Dearth + Brian Cloutier + Bruce Momjian + Catalin Iacob + Chad Trabant + Chapman Flack + Christian Duta + Christian Ullrich + Christoph Berg + Christoph Dreis + Christophe Courtois + Christopher Jones + Claudio Freire + Clayton Salem + Craig Ringer + Dagfinn Ilmari Mannsåker + Dan Vianello + Dan Watson + Dang Minh Huong + Daniel Gustafsson + Daniel Vérité + Daniel Westermann + Daniel Wood + Darafei Praliaskouski + Dave Cramer + Dave Page + David Binderman + David Carlier + David Fetter + David G. Johnston + David Gould + David Hinkle + David Pereiro Lagares + David Rader + David Rowley + David Steele + Davy Machado + Dean Rasheed + Dian Fay + Dilip Kumar + Dmitriy Sarafannikov + Dmitry Dolgov + Dmitry Ivanov + Dmitry Shalashov + Don Seiler + Doug Doole + Doug Rady + Edmund Horner + Eiji Seki + Elvis Pranskevichus + Emre Hasegeli + Erik Rijkers + Erwin Brandstetter + Etsuro Fujita + Euler Taveira + Everaldo Canuto + Fabien Coelho + Fabrízio de Royes Mello + Feike Steenbergen + Frits Jalvingh + Fujii Masao + Gao Zengqi + Gianni Ciolli + Greg Stark + Gunnlaugur Thor Briem + Guo Xiang Tan + Hadi Moshayedi + Hailong Li + Haribabu Kommi + Heath Lord + Heikki Linnakangas + Hugo Mercier + Igor Korot + Igor Neyman + Ildar Musin + Ildus Kurbangaliev + Ioseph Kim + Jacob Champion + Jaime Casanova + Jakob Egger + Jean-Pierre Pelletier + Jeevan Chalke + Jeevan Ladhe + Jeff Davis + Jeff Janes + Jeremy Evans + Jeremy Finzel + Jeremy Schneider + Jesper Pedersen + Jim Nasby + Jimmy Yih + Jing Wang + Jobin Augustine + Joe Conway + John Gorman + John Naylor + Jon Nelson + Jon Wolski + Jonathan Allen + Jonathan S. Katz + Julien Rouhaud + Jürgen Purtz + Justin Pryzby + KaiGai Kohei + Kaiting Chen + Karl Lehenbauer + Keith Fiske + Kevin Bloch + Kha Nguyen + Kim Rose Carlsen + Konstantin Knizhnik + Kuntal Ghosh + Kyle Samson + Kyotaro Horiguchi + Lætitia Avrot + Lars Kanis + Laurenz Albe + Leonardo Cecchi + Liudmila Mantrova + Lixian Zou + Lloyd Albin + Luca Ferrari + Lucas Fairchild + Lukas Eder + Lukas Fittl + Magnus Hagander + Mai Peng + Maksim Milyutin + Maksym Boguk + Mansur Galiev + Marc Dilger + Marco Nenciarini + Marina Polyakova + Mario de Frutos Dieguez + Mark Cave-Ayland + Mark Dilger + Mark Wood + Marko Tiikkaja + Markus Winand + Martín Marqués + Masahiko Sawada + Matheus Oliveira + Matthew Stickney + Metin Doslu + Michael Banck + Michael Meskes + Michael Paquier + Michail Nikolaev + Mike Blackwell + Minh-Quan Tran + Mithun Cy + Morgan Owens + Nathan Bossart + Nathan Wagner + Neil Conway + Nick Barnes + Nicolas Thauvin + Nikhil Sontakke + Nikita Glukhov + Nikolay Shaplov + Noah Misch + Noriyoshi Shinoda + Oleg Bartunov + Oleg Samoilov + Oliver Ford + Pan Bian + Pascal Legrand + Patrick Hemmer + Patrick Krecker + Paul Bonaud + Paul Guo + Paul Ramsey + Pavan Deolasee + Pavan Maddamsetti + Pavel Golub + Pavel Stehule + Peter Eisentraut + Peter Geoghegan + Petr Jelínek + Petru-Florin Mihancea + Phil Florent + Philippe Beaudoin + Pierre Ducroquet + Piotr Stefaniak + Prabhat Sahu + Pu Qun + QL Zhuo + Rafia Sabih + Rahila Syed + Rainer Orth + Rajkumar Raghuwanshi + Raúl Marín Rodríguez + Regina Obe + Richard Yen + Robert Haas + Robins Tharakan + Rod Taylor + Rushabh Lathia + Ryan Murphy + Sahap Asci + Samuel Horwitz + Scott Ure + Sean Johnston + Shao Bret + Shay Rojansky + Shubham Barai + Simon Riggs + Simone Gotti + Sivasubramanian Ramasubramanian + Stas Kelvich + Stefan Kaltenbrunner + Stephen Froehlich + Stephen Frost + Steve Singer + Steven Winfield + Sven Kunze + Taiki Kondo + Takayuki Tsunakawa + Takeshi Ideriha + Tatsuo Ishii + Tatsuro Yamada + Teodor Sigaev + Thom Brown + Thomas Kellerer + Thomas Munro + Thomas Reiss + Tobias Bussmann + Todd A. Cook + Tom Kazimiers + Tom Lane + Tomas Vondra + Tomonari Katsumata + Torsten Grust + Tushar Ahuja + Vaishnavi Prabakaran + Vasundhar Boddapati + Victor Drobny + Victor Wagner + Victor Yegorov + Vik Fearing + Vinayak Pokale + Vincent Lachenal + Vitaliy Garnashevich + Vitaly Burovoy + Vladimir Baranoff + Xin Zhang + Yi Wen Wong + Yorick Peterse + Yugo Nagata + Yuqi Gu + Yura Sokolov + Yves Goergen + Zhou Digoal + + + + diff --git a/doc/src/sgml/release-12.sgml b/doc/src/sgml/release-12.sgml new file mode 100644 index 0000000000..b06fef287d --- /dev/null +++ b/doc/src/sgml/release-12.sgml @@ -0,0 +1,11 @@ + + + + + Release 12 + + JIT is enabled by default in this release. It was disabled by + default in PG 11, so we document is enablement here. + + + diff --git a/doc/src/sgml/release-7.4.sgml b/doc/src/sgml/release-7.4.sgml index bc4f4e18d0..a67945a42b 100644 --- a/doc/src/sgml/release-7.4.sgml +++ b/doc/src/sgml/release-7.4.sgml @@ -12,11 +12,11 @@ This release contains a variety of fixes from 7.4.29. For information about new features in the 7.4 major release, see - . + . - This is expected to be the last PostgreSQL release + This is expected to be the last PostgreSQL release in the 7.4.X series. Users are encouraged to update to a newer release branch soon. @@ -27,7 +27,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.26, - see . + see . @@ -47,7 +47,7 @@ This change prevents security problems that can be caused by subverting Perl or Tcl code that will be executed later in the same session under another SQL user identity (for example, within a SECURITY - DEFINER function). Most scripting languages offer numerous ways that + DEFINER function). Most scripting languages offer numerous ways that that might be done, such as redefining standard functions or operators called by the target function. Without this change, any SQL user with Perl or Tcl language usage rights can do essentially anything with the @@ -76,7 +76,7 @@ - Prevent possible crashes in pg_get_expr() by disallowing + Prevent possible crashes in pg_get_expr() by disallowing it from being called with an argument that is not one of the system catalog columns it's intended to be used with (Heikki Linnakangas, Tom Lane) @@ -97,7 +97,7 @@ Take care to fsync the contents of lockfiles (both - postmaster.pid and the socket lockfile) while writing them + postmaster.pid and the socket lockfile) while writing them (Tom Lane) @@ -111,7 +111,7 @@ - Improve contrib/dblink's handling of tables containing + Improve contrib/dblink's handling of tables containing dropped columns (Tom Lane) @@ -119,7 +119,7 @@ Fix connection leak after duplicate connection name - errors in contrib/dblink (Itagaki Takahiro) + errors in contrib/dblink (Itagaki Takahiro) @@ -146,11 +146,11 @@ This release contains a variety of fixes from 7.4.28. For information about new features in the 7.4 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 7.4.X release series in July 2010. Users are encouraged to update to a newer release branch soon. @@ -161,7 +161,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.26, - see . + see . @@ -173,19 +173,19 @@ - Enforce restrictions in plperl using an opmask applied to - the whole interpreter, instead of using Safe.pm + Enforce restrictions in plperl using an opmask applied to + the whole interpreter, instead of using Safe.pm (Tim Bunce, Andrew Dunstan) - Recent developments have convinced us that Safe.pm is too - insecure to rely on for making plperl trustable. This - change removes use of Safe.pm altogether, in favor of using + Recent developments have convinced us that Safe.pm is too + insecure to rely on for making plperl trustable. This + change removes use of Safe.pm altogether, in favor of using a separate interpreter with an opcode mask that is always applied. Pleasant side effects of the change include that it is now possible to - use Perl's strict pragma in a natural way in - plperl, and that Perl's $a and $b + use Perl's strict pragma in a natural way in + plperl, and that Perl's $a and $b variables work as expected in sort routines, and that function compilation is significantly faster. (CVE-2010-1169) @@ -194,19 +194,19 @@ Prevent PL/Tcl from executing untrustworthy code from - pltcl_modules (Tom) + pltcl_modules (Tom) PL/Tcl's feature for autoloading Tcl code from a database table could be exploited for trojan-horse attacks, because there was no restriction on who could create or insert into that table. This change - disables the feature unless pltcl_modules is owned by a + disables the feature unless pltcl_modules is owned by a superuser. (However, the permissions on the table are not checked, so installations that really need a less-than-secure modules table can still grant suitable privileges to trusted non-superusers.) Also, - prevent loading code into the unrestricted normal Tcl - interpreter unless we are really going to execute a pltclu + prevent loading code into the unrestricted normal Tcl + interpreter unless we are really going to execute a pltclu function. (CVE-2010-1170) @@ -219,10 +219,10 @@ Previously, if an unprivileged user ran ALTER USER ... RESET - ALL for himself, or ALTER DATABASE ... RESET ALL for + ALL for himself, or ALTER DATABASE ... RESET ALL for a database he owns, this would remove all special parameter settings for the user or database, even ones that are only supposed to be - changeable by a superuser. Now, the ALTER will only + changeable by a superuser. Now, the ALTER will only remove the parameters that the user has permission to change. @@ -230,7 +230,7 @@ Avoid possible crash during backend shutdown if shutdown occurs - when a CONTEXT addition would be made to log entries (Tom) + when a CONTEXT addition would be made to log entries (Tom) @@ -242,7 +242,7 @@ - Update PL/Perl's ppport.h for modern Perl versions + Update PL/Perl's ppport.h for modern Perl versions (Andrew) @@ -255,7 +255,7 @@ - Ensure that contrib/pgstattuple functions respond to cancel + Ensure that contrib/pgstattuple functions respond to cancel interrupts promptly (Tatsuhito Kasahara) @@ -263,7 +263,7 @@ Make server startup deal properly with the case that - shmget() returns EINVAL for an existing + shmget() returns EINVAL for an existing shared memory segment (Tom) @@ -290,11 +290,11 @@ This release contains a variety of fixes from 7.4.27. For information about new features in the 7.4 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 7.4.X release series in July 2010. Users are encouraged to update to a newer release branch soon. @@ -305,7 +305,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.26, - see . + see . @@ -317,7 +317,7 @@ - Add new configuration parameter ssl_renegotiation_limit to + Add new configuration parameter ssl_renegotiation_limit to control how often we do session key renegotiation for an SSL connection (Magnus) @@ -332,8 +332,8 @@ - Make substring() for bit types treat any negative - length as meaning all the rest of the string (Tom) + Make substring() for bit types treat any negative + length as meaning all the rest of the string (Tom) @@ -351,17 +351,17 @@ - When reading pg_hba.conf and related files, do not treat - @something as a file inclusion request if the @ - appears inside quote marks; also, never treat @ by itself + When reading pg_hba.conf and related files, do not treat + @something as a file inclusion request if the @ + appears inside quote marks; also, never treat @ by itself as a file inclusion request (Tom) This prevents erratic behavior if a role or database name starts with - @. If you need to include a file whose path name + @. If you need to include a file whose path name contains spaces, you can still do so, but you must write - @"/path to/file" rather than putting the quotes around + @"/path to/file" rather than putting the quotes around the whole construct. @@ -369,7 +369,7 @@ Prevent infinite loop on some platforms if a directory is named as - an inclusion target in pg_hba.conf and related files + an inclusion target in pg_hba.conf and related files (Tom) @@ -381,14 +381,14 @@ The only known symptom of this oversight is that the Tcl - clock command misbehaves if using Tcl 8.5 or later. + clock command misbehaves if using Tcl 8.5 or later. - Prevent crash in contrib/dblink when too many key - columns are specified to a dblink_build_sql_* function + Prevent crash in contrib/dblink when too many key + columns are specified to a dblink_build_sql_* function (Rushabh Lathia, Joe Conway) @@ -409,7 +409,7 @@ This release contains a variety of fixes from 7.4.26. For information about new features in the 7.4 major release, see - . + . @@ -418,7 +418,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.26, - see . + see . @@ -460,14 +460,14 @@ - Prevent signals from interrupting VACUUM at unsafe times + Prevent signals from interrupting VACUUM at unsafe times (Alvaro) - This fix prevents a PANIC if a VACUUM FULL is canceled + This fix prevents a PANIC if a VACUUM FULL is canceled after it's already committed its tuple movements, as well as transient - errors if a plain VACUUM is interrupted after having + errors if a plain VACUUM is interrupted after having truncated the table. @@ -486,7 +486,7 @@ - Fix very rare crash in inet/cidr comparisons (Chris + Fix very rare crash in inet/cidr comparisons (Chris Mikkelson) @@ -498,7 +498,7 @@ The previous code is known to fail with the combination of the Linux - pam_krb5 PAM module with Microsoft Active Directory as the + pam_krb5 PAM module with Microsoft Active Directory as the domain controller. It might have problems elsewhere too, since it was making unjustified assumptions about what arguments the PAM stack would pass to it. @@ -507,7 +507,7 @@ - Make the postmaster ignore any application_name parameter in + Make the postmaster ignore any application_name parameter in connection request packets, to improve compatibility with future libpq versions (Tom) @@ -529,7 +529,7 @@ This release contains a variety of fixes from 7.4.25. For information about new features in the 7.4 major release, see - . + . @@ -537,10 +537,10 @@ A dump/restore is not required for those running 7.4.X. - However, if you have any hash indexes on interval columns, - you must REINDEX them after updating to 7.4.26. + However, if you have any hash indexes on interval columns, + you must REINDEX them after updating to 7.4.26. Also, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -552,14 +552,14 @@ - Disallow RESET ROLE and RESET SESSION - AUTHORIZATION inside security-definer functions (Tom, Heikki) + Disallow RESET ROLE and RESET SESSION + AUTHORIZATION inside security-definer functions (Tom, Heikki) This covers a case that was missed in the previous patch that - disallowed SET ROLE and SET SESSION - AUTHORIZATION inside security-definer functions. + disallowed SET ROLE and SET SESSION + AUTHORIZATION inside security-definer functions. (See CVE-2007-6600) @@ -573,21 +573,21 @@ - Fix hash calculation for data type interval (Tom) + Fix hash calculation for data type interval (Tom) This corrects wrong results for hash joins on interval values. It also changes the contents of hash indexes on interval columns. - If you have any such indexes, you must REINDEX them + If you have any such indexes, you must REINDEX them after updating. - Fix overflow for INTERVAL 'x ms' - when x is more than 2 million and integer + Fix overflow for INTERVAL 'x ms' + when x is more than 2 million and integer datetimes are in use (Alex Hunsaker) @@ -604,7 +604,7 @@ - Fix money data type to work in locales where currency + Fix money data type to work in locales where currency amounts have no fractional digits, e.g. Japan (Itagaki Takahiro) @@ -612,7 +612,7 @@ Properly round datetime input like - 00:12:57.9999999999999999999999999999 (Tom) + 00:12:57.9999999999999999999999999999 (Tom) @@ -631,8 +631,8 @@ - Improve robustness of libpq's code to recover - from errors during COPY FROM STDIN (Tom) + Improve robustness of libpq's code to recover + from errors during COPY FROM STDIN (Tom) @@ -659,7 +659,7 @@ This release contains a variety of fixes from 7.4.24. For information about new features in the 7.4 major release, see - . + . @@ -668,7 +668,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -687,7 +687,7 @@ This change extends fixes made in the last two minor releases for related failure scenarios. The previous fixes were narrowly tailored for the original problem reports, but we have now recognized that - any error thrown by an encoding conversion function could + any error thrown by an encoding conversion function could potentially lead to infinite recursion while trying to report the error. The solution therefore is to disable translation and encoding conversion and report the plain-ASCII form of any error message, @@ -698,7 +698,7 @@ - Disallow CREATE CONVERSION with the wrong encodings + Disallow CREATE CONVERSION with the wrong encodings for the specified conversion function (Heikki) @@ -711,14 +711,14 @@ - Fix core dump when to_char() is given format codes that + Fix core dump when to_char() is given format codes that are inappropriate for the type of the data argument (Tom) - Add MUST (Mauritius Island Summer Time) to the default list + Add MUST (Mauritius Island Summer Time) to the default list of known timezone abbreviations (Xavier Bugaud) @@ -739,7 +739,7 @@ This release contains a variety of fixes from 7.4.23. For information about new features in the 7.4 major release, see - . + . @@ -748,7 +748,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -760,13 +760,13 @@ - Improve handling of URLs in headline() function (Teodor) + Improve handling of URLs in headline() function (Teodor) - Improve handling of overlength headlines in headline() + Improve handling of overlength headlines in headline() function (Teodor) @@ -781,30 +781,30 @@ - Avoid unnecessary locking of small tables in VACUUM + Avoid unnecessary locking of small tables in VACUUM (Heikki) - Fix uninitialized variables in contrib/tsearch2's - get_covers() function (Teodor) + Fix uninitialized variables in contrib/tsearch2's + get_covers() function (Teodor) - Fix bug in to_char()'s handling of TH + Fix bug in to_char()'s handling of TH format codes (Andreas Scherbaum) - Make all documentation reference pgsql-bugs and/or - pgsql-hackers as appropriate, instead of the - now-decommissioned pgsql-ports and pgsql-patches + Make all documentation reference pgsql-bugs and/or + pgsql-hackers as appropriate, instead of the + now-decommissioned pgsql-ports and pgsql-patches mailing lists (Tom) @@ -825,7 +825,7 @@ This release contains a variety of fixes from 7.4.22. For information about new features in the 7.4 major release, see - . + . @@ -834,7 +834,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -852,7 +852,7 @@ We have addressed similar issues before, but it would still fail if - the character has no equivalent message itself couldn't + the character has no equivalent message itself couldn't be converted. The fix is to disable localization and send the plain ASCII error message when we detect such a situation. @@ -868,14 +868,14 @@ Fix improper display of fractional seconds in interval values when - using a non-ISO datestyle in an build (Ron Mayer) - Ensure SPI_getvalue and SPI_getbinval + Ensure SPI_getvalue and SPI_getbinval behave correctly when the passed tuple and tuple descriptor have different numbers of columns (Tom) @@ -889,7 +889,7 @@ - Fix ecpg's parsing of CREATE USER (Michael) + Fix ecpg's parsing of CREATE USER (Michael) @@ -909,7 +909,7 @@ This release contains a variety of fixes from 7.4.21. For information about new features in the 7.4 major release, see - . + . @@ -918,7 +918,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -944,27 +944,27 @@ Fix bug in backwards scanning of a cursor on a SELECT DISTINCT - ON query (Tom) + ON query (Tom) - Fix planner to estimate that GROUP BY expressions yielding + Fix planner to estimate that GROUP BY expressions yielding boolean results always result in two groups, regardless of the expressions' contents (Tom) This is very substantially more accurate than the regular GROUP - BY estimate for certain boolean tests like col - IS NULL. + BY estimate for certain boolean tests like col + IS NULL. - Improve pg_dump and pg_restore's + Improve pg_dump and pg_restore's error reporting after failure to send a SQL command (Tom) @@ -985,7 +985,7 @@ This release contains one serious bug fix over 7.4.20. For information about new features in the 7.4 major release, see - . + . @@ -994,7 +994,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -1006,18 +1006,18 @@ - Make pg_get_ruledef() parenthesize negative constants (Tom) + Make pg_get_ruledef() parenthesize negative constants (Tom) Before this fix, a negative constant in a view or rule might be dumped - as, say, -42::integer, which is subtly incorrect: it should - be (-42)::integer due to operator precedence rules. + as, say, -42::integer, which is subtly incorrect: it should + be (-42)::integer due to operator precedence rules. Usually this would make little difference, but it could interact with another recent patch to cause - PostgreSQL to reject what had been a valid - SELECT DISTINCT view query. Since this could result in - pg_dump output failing to reload, it is being treated + PostgreSQL to reject what had been a valid + SELECT DISTINCT view query. Since this could result in + pg_dump output failing to reload, it is being treated as a high-priority fix. The only released versions in which dump output is actually incorrect are 8.3.1 and 8.2.7. @@ -1039,7 +1039,7 @@ This release contains a variety of fixes from 7.4.19. For information about new features in the 7.4 major release, see - . + . @@ -1048,7 +1048,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -1061,7 +1061,7 @@ Fix conversions between ISO-8859-5 and other encodings to handle - Cyrillic Yo characters (e and E with + Cyrillic Yo characters (e and E with two dots) (Sergey Burladyan) @@ -1076,7 +1076,7 @@ This could lead to failures in which two apparently identical literal values were not seen as equal, resulting in the parser complaining - about unmatched ORDER BY and DISTINCT + about unmatched ORDER BY and DISTINCT expressions. @@ -1084,36 +1084,36 @@ Fix a corner case in regular-expression substring matching - (substring(string from - pattern)) (Tom) + (substring(string from + pattern)) (Tom) The problem occurs when there is a match to the pattern overall but the user has specified a parenthesized subexpression and that subexpression hasn't got a match. An example is - substring('foo' from 'foo(bar)?'). - This should return NULL, since (bar) isn't matched, but + substring('foo' from 'foo(bar)?'). + This should return NULL, since (bar) isn't matched, but it was mistakenly returning the whole-pattern match instead (ie, - foo). + foo). - Fix incorrect result from ecpg's - PGTYPEStimestamp_sub() function (Michael) + Fix incorrect result from ecpg's + PGTYPEStimestamp_sub() function (Michael) - Fix DatumGetBool macro to not fail with gcc + Fix DatumGetBool macro to not fail with gcc 4.3 (Tom) - This problem affects old style (V0) C functions that + This problem affects old style (V0) C functions that return boolean. The fix is already in 8.3, but the need to back-patch it was not realized at the time. @@ -1121,21 +1121,21 @@ - Fix longstanding LISTEN/NOTIFY + Fix longstanding LISTEN/NOTIFY race condition (Tom) In rare cases a session that had just executed a - LISTEN might not get a notification, even though + LISTEN might not get a notification, even though one would be expected because the concurrent transaction executing - NOTIFY was observed to commit later. + NOTIFY was observed to commit later. A side effect of the fix is that a transaction that has executed - a not-yet-committed LISTEN command will not see any - row in pg_listener for the LISTEN, + a not-yet-committed LISTEN command will not see any + row in pg_listener for the LISTEN, should it choose to look; formerly it would have. This behavior was never documented one way or the other, but it is possible that some applications depend on the old behavior. @@ -1144,8 +1144,8 @@ - Fix display of constant expressions in ORDER BY - and GROUP BY (Tom) + Fix display of constant expressions in ORDER BY + and GROUP BY (Tom) @@ -1157,7 +1157,7 @@ - Fix libpq to handle NOTICE messages correctly + Fix libpq to handle NOTICE messages correctly during COPY OUT (Tom) @@ -1185,7 +1185,7 @@ This release contains a variety of fixes from 7.4.18, including fixes for significant security issues. For information about new features in the 7.4 major release, see - . + . @@ -1194,7 +1194,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -1207,7 +1207,7 @@ Prevent functions in indexes from executing with the privileges of - the user running VACUUM, ANALYZE, etc (Tom) + the user running VACUUM, ANALYZE, etc (Tom) @@ -1218,18 +1218,18 @@ (Note that triggers, defaults, check constraints, etc. pose the same type of risk.) But functions in indexes pose extra danger because they will be executed by routine maintenance operations - such as VACUUM FULL, which are commonly performed + such as VACUUM FULL, which are commonly performed automatically under a superuser account. For example, a nefarious user can execute code with superuser privileges by setting up a trojan-horse index definition and waiting for the next routine vacuum. The fix arranges for standard maintenance operations - (including VACUUM, ANALYZE, REINDEX, - and CLUSTER) to execute as the table owner rather than + (including VACUUM, ANALYZE, REINDEX, + and CLUSTER) to execute as the table owner rather than the calling user, using the same privilege-switching mechanism already - used for SECURITY DEFINER functions. To prevent bypassing + used for SECURITY DEFINER functions. To prevent bypassing this security measure, execution of SET SESSION - AUTHORIZATION and SET ROLE is now forbidden within a - SECURITY DEFINER context. (CVE-2007-6600) + AUTHORIZATION and SET ROLE is now forbidden within a + SECURITY DEFINER context. (CVE-2007-6600) @@ -1249,13 +1249,13 @@ - Require non-superusers who use /contrib/dblink to use only + Require non-superusers who use /contrib/dblink to use only password authentication, as a security measure (Joe) The fix that appeared for this in 7.4.18 was incomplete, as it plugged - the hole for only some dblink functions. (CVE-2007-6601, + the hole for only some dblink functions. (CVE-2007-6601, CVE-2007-3278) @@ -1263,13 +1263,13 @@ Fix planner failure in some cases of WHERE false AND var IN - (SELECT ...) (Tom) + (SELECT ...) (Tom) - Fix potential crash in translate() when using a multibyte + Fix potential crash in translate() when using a multibyte database encoding (Tom) @@ -1282,42 +1282,42 @@ - ecpg parser fixes (Michael) + ecpg parser fixes (Michael) - Make contrib/tablefunc's crosstab() handle + Make contrib/tablefunc's crosstab() handle NULL rowid as a category in its own right, rather than crashing (Joe) - Fix tsvector and tsquery output routines to + Fix tsvector and tsquery output routines to escape backslashes correctly (Teodor, Bruce) - Fix crash of to_tsvector() on huge input strings (Teodor) + Fix crash of to_tsvector() on huge input strings (Teodor) - Require a specific version of Autoconf to be used - when re-generating the configure script (Peter) + Require a specific version of Autoconf to be used + when re-generating the configure script (Peter) This affects developers and packagers only. The change was made to prevent accidental use of untested combinations of - Autoconf and PostgreSQL versions. + Autoconf and PostgreSQL versions. You can remove the version check if you really want to use a - different Autoconf version, but it's + different Autoconf version, but it's your responsibility whether the result works or not. @@ -1338,7 +1338,7 @@ This release contains fixes from 7.4.17. For information about new features in the 7.4 major release, see - . + . @@ -1347,7 +1347,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -1360,40 +1360,40 @@ Prevent index corruption when a transaction inserts rows and - then aborts close to the end of a concurrent VACUUM + then aborts close to the end of a concurrent VACUUM on the same table (Tom) - Make CREATE DOMAIN ... DEFAULT NULL work properly (Tom) + Make CREATE DOMAIN ... DEFAULT NULL work properly (Tom) - Fix excessive logging of SSL error messages (Tom) + Fix excessive logging of SSL error messages (Tom) - Fix crash when log_min_error_statement logging runs out + Fix crash when log_min_error_statement logging runs out of memory (Tom) - Prevent CLUSTER from failing + Prevent CLUSTER from failing due to attempting to process temporary tables of other sessions (Alvaro) - Require non-superusers who use /contrib/dblink to use only + Require non-superusers who use /contrib/dblink to use only password authentication, as a security measure (Joe) @@ -1415,7 +1415,7 @@ This release contains fixes from 7.4.16, including a security fix. For information about new features in the 7.4 major release, see - . + . @@ -1424,7 +1424,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -1437,28 +1437,28 @@ Support explicit placement of the temporary-table schema within - search_path, and disable searching it for functions + search_path, and disable searching it for functions and operators (Tom) This is needed to allow a security-definer function to set a - truly secure value of search_path. Without it, + truly secure value of search_path. Without it, an unprivileged SQL user can use temporary objects to execute code with the privileges of the security-definer function (CVE-2007-2138). - See CREATE FUNCTION for more information. + See CREATE FUNCTION for more information. - /contrib/tsearch2 crash fixes (Teodor) + /contrib/tsearch2 crash fixes (Teodor) - Fix potential-data-corruption bug in how VACUUM FULL handles - UPDATE chains (Tom, Pavan Deolasee) + Fix potential-data-corruption bug in how VACUUM FULL handles + UPDATE chains (Tom, Pavan Deolasee) @@ -1486,7 +1486,7 @@ This release contains a variety of fixes from 7.4.15, including a security fix. For information about new features in the 7.4 major release, see - . + . @@ -1495,7 +1495,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -1529,7 +1529,7 @@ - Fix for rare Assert() crash triggered by UNION (Tom) + Fix for rare Assert() crash triggered by UNION (Tom) @@ -1556,7 +1556,7 @@ This release contains a variety of fixes from 7.4.14. For information about new features in the 7.4 major release, see - . + . @@ -1565,7 +1565,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -1577,7 +1577,7 @@ - Improve handling of getaddrinfo() on AIX (Tom) + Improve handling of getaddrinfo() on AIX (Tom) @@ -1588,8 +1588,8 @@ - Fix failed to re-find parent key errors in - VACUUM (Tom) + Fix failed to re-find parent key errors in + VACUUM (Tom) @@ -1601,20 +1601,20 @@ - Fix error when constructing an ARRAY[] made up of multiple + Fix error when constructing an ARRAY[] made up of multiple empty elements (Tom) - to_number() and to_char(numeric) - are now STABLE, not IMMUTABLE, for - new initdb installs (Tom) + to_number() and to_char(numeric) + are now STABLE, not IMMUTABLE, for + new initdb installs (Tom) - This is because lc_numeric can potentially + This is because lc_numeric can potentially change the output of these functions. @@ -1625,7 +1625,7 @@ - This improves psql \d performance also. + This improves psql \d performance also. @@ -1645,7 +1645,7 @@ This release contains a variety of fixes from 7.4.13. For information about new features in the 7.4 major release, see - . + . @@ -1654,7 +1654,7 @@ A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -1665,12 +1665,12 @@ Fix core dump when an untyped literal is taken as ANYARRAY -Fix string_to_array() to handle overlapping +Fix string_to_array() to handle overlapping matches for the separator string -For example, string_to_array('123xx456xxx789', 'xx'). +For example, string_to_array('123xx456xxx789', 'xx'). Fix corner cases in pattern matching for - psql's \d commands + psql's \d commands Fix index-corrupting bugs in /contrib/ltree (Teodor) Fix backslash escaping in /contrib/dbmirror @@ -1693,7 +1693,7 @@ ANYARRAY This release contains a variety of fixes from 7.4.12, including patches for extremely serious security issues. For information about new features in the 7.4 major release, see - . + . @@ -1702,7 +1702,7 @@ ANYARRAY A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -1712,9 +1712,9 @@ ANYARRAY into SQL commands, you should examine them as soon as possible to ensure that they are using recommended escaping techniques. In most cases, applications should be using subroutines provided by - libraries or drivers (such as libpq's - PQescapeStringConn()) to perform string escaping, - rather than relying on ad hoc code to do it. + libraries or drivers (such as libpq's + PQescapeStringConn()) to perform string escaping, + rather than relying on ad hoc code to do it. @@ -1724,48 +1724,48 @@ ANYARRAY Change the server to reject invalidly-encoded multibyte characters in all cases (Tatsuo, Tom) -While PostgreSQL has been moving in this direction for +While PostgreSQL has been moving in this direction for some time, the checks are now applied uniformly to all encodings and all textual input, and are now always errors not merely warnings. This change defends against SQL-injection attacks of the type described in CVE-2006-2313. -Reject unsafe uses of \' in string literals +Reject unsafe uses of \' in string literals As a server-side defense against SQL-injection attacks of the type -described in CVE-2006-2314, the server now only accepts '' and not -\' as a representation of ASCII single quote in SQL string -literals. By default, \' is rejected only when -client_encoding is set to a client-only encoding (SJIS, BIG5, GBK, +described in CVE-2006-2314, the server now only accepts '' and not +\' as a representation of ASCII single quote in SQL string +literals. By default, \' is rejected only when +client_encoding is set to a client-only encoding (SJIS, BIG5, GBK, GB18030, or UHC), which is the scenario in which SQL injection is possible. -A new configuration parameter backslash_quote is available to +A new configuration parameter backslash_quote is available to adjust this behavior when needed. Note that full security against CVE-2006-2314 might require client-side changes; the purpose of -backslash_quote is in part to make it obvious that insecure +backslash_quote is in part to make it obvious that insecure clients are insecure. -Modify libpq's string-escaping routines to be +Modify libpq's string-escaping routines to be aware of encoding considerations and -standard_conforming_strings -This fixes libpq-using applications for the security +standard_conforming_strings +This fixes libpq-using applications for the security issues described in CVE-2006-2313 and CVE-2006-2314, and also future-proofs them against the planned changeover to SQL-standard string literal syntax. -Applications that use multiple PostgreSQL connections -concurrently should migrate to PQescapeStringConn() and -PQescapeByteaConn() to ensure that escaping is done correctly +Applications that use multiple PostgreSQL connections +concurrently should migrate to PQescapeStringConn() and +PQescapeByteaConn() to ensure that escaping is done correctly for the settings in use in each database connection. Applications that -do string escaping by hand should be modified to rely on library +do string escaping by hand should be modified to rely on library routines instead. Fix some incorrect encoding conversion functions -win1251_to_iso, alt_to_iso, -euc_tw_to_big5, euc_tw_to_mic, -mic_to_euc_tw were all broken to varying +win1251_to_iso, alt_to_iso, +euc_tw_to_big5, euc_tw_to_mic, +mic_to_euc_tw were all broken to varying extents. -Clean up stray remaining uses of \' in strings +Clean up stray remaining uses of \' in strings (Bruce, Jan) Fix bug that sometimes caused OR'd index scans to @@ -1774,8 +1774,8 @@ miss rows they should have returned Fix WAL replay for case where a btree index has been truncated -Fix SIMILAR TO for patterns involving -| (Tom) +Fix SIMILAR TO for patterns involving +| (Tom) Fix server to use custom DH SSL parameters correctly (Michael Fuhr) @@ -1799,7 +1799,7 @@ Fuhr) This release contains a variety of fixes from 7.4.11. For information about new features in the 7.4 major release, see - . + . @@ -1808,7 +1808,7 @@ Fuhr) A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.11, - see . + see . @@ -1818,7 +1818,7 @@ Fuhr) Fix potential crash in SET -SESSION AUTHORIZATION (CVE-2006-0553) +SESSION AUTHORIZATION (CVE-2006-0553) An unprivileged user could crash the server process, resulting in momentary denial of service to other users, if the server has been compiled with Asserts enabled (which is not the default). @@ -1833,18 +1833,18 @@ created in 7.4.9 and 7.3.11 releases. Fix race condition that could lead to file already -exists errors during pg_clog file creation +exists errors during pg_clog file creation (Tom) -Properly check DOMAIN constraints for -UNKNOWN parameters in prepared statements +Properly check DOMAIN constraints for +UNKNOWN parameters in prepared statements (Neil) Fix to allow restoring dumps that have cross-schema references to custom operators (Tom) -Portability fix for testing presence of finite -and isinf during configure (Tom) +Portability fix for testing presence of finite +and isinf during configure (Tom) @@ -1862,7 +1862,7 @@ and isinf during configure (Tom) This release contains a variety of fixes from 7.4.10. For information about new features in the 7.4 major release, see - . + . @@ -1871,10 +1871,10 @@ and isinf during configure (Tom) A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.8, - see . - Also, you might need to REINDEX indexes on textual + see . + Also, you might need to REINDEX indexes on textual columns after updating, if you are affected by the locale or - plperl issues described below. + plperl issues described below. @@ -1888,28 +1888,28 @@ outside a transaction or in a failed transaction (Tom) Fix character string comparison for locales that consider different character combinations as equal, such as Hungarian (Tom) -This might require REINDEX to fix existing indexes on +This might require REINDEX to fix existing indexes on textual columns. Set locale environment variables during postmaster startup -to ensure that plperl won't change the locale later -This fixes a problem that occurred if the postmaster was +to ensure that plperl won't change the locale later +This fixes a problem that occurred if the postmaster was started with environment variables specifying a different locale than what -initdb had been told. Under these conditions, any use of -plperl was likely to lead to corrupt indexes. You might need -REINDEX to fix existing indexes on +initdb had been told. Under these conditions, any use of +plperl was likely to lead to corrupt indexes. You might need +REINDEX to fix existing indexes on textual columns if this has happened to you. Fix longstanding bug in strpos() and regular expression handling in certain rarely used Asian multi-byte character sets (Tatsuo) -Fix bug in /contrib/pgcrypto gen_salt, +Fix bug in /contrib/pgcrypto gen_salt, which caused it not to use all available salt space for MD5 and XDES algorithms (Marko Kreen, Solar Designer) Salts for Blowfish and standard DES are unaffected. -Fix /contrib/dblink to throw an error, +Fix /contrib/dblink to throw an error, rather than crashing, when the number of columns specified is different from what's actually returned by the query (Joe) @@ -1929,7 +1929,7 @@ what's actually returned by the query (Joe) This release contains a variety of fixes from 7.4.9. For information about new features in the 7.4 major release, see - . + . @@ -1938,7 +1938,7 @@ what's actually returned by the query (Joe) A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.8, - see . + see . @@ -1956,15 +1956,15 @@ corruption. Prevent failure if client sends Bind protocol message when current transaction is already aborted -/contrib/ltree fixes (Teodor) +/contrib/ltree fixes (Teodor) AIX and HPUX compile fixes (Tom) Fix longstanding planning error for outer joins This bug sometimes caused a bogus error RIGHT JOIN is -only supported with merge-joinable join conditions. +only supported with merge-joinable join conditions. -Prevent core dump in pg_autovacuum when a +Prevent core dump in pg_autovacuum when a table has been dropped @@ -1982,7 +1982,7 @@ table has been dropped This release contains a variety of fixes from 7.4.8. For information about new features in the 7.4 major release, see - . + . @@ -1991,7 +1991,7 @@ table has been dropped A dump/restore is not required for those running 7.4.X. However, if you are upgrading from a version earlier than 7.4.8, - see . + see . @@ -1999,41 +1999,41 @@ table has been dropped Changes -Fix error that allowed VACUUM to remove -ctid chains too soon, and add more checking in code that follows -ctid links +Fix error that allowed VACUUM to remove +ctid chains too soon, and add more checking in code that follows +ctid links This fixes a long-standing problem that could cause crashes in very rare circumstances. -Fix CHAR() to properly pad spaces to the specified +Fix CHAR() to properly pad spaces to the specified length when using a multiple-byte character set (Yoshiyuki Asaba) -In prior releases, the padding of CHAR() was incorrect +In prior releases, the padding of CHAR() was incorrect because it only padded to the specified number of bytes without considering how many characters were stored. Fix the sense of the test for read-only transaction -in COPY -The code formerly prohibited COPY TO, where it should -prohibit COPY FROM. +in COPY +The code formerly prohibited COPY TO, where it should +prohibit COPY FROM. Fix planning problem with outer-join ON clauses that reference only the inner-side relation -Further fixes for x FULL JOIN y ON true corner +Further fixes for x FULL JOIN y ON true corner cases -Make array_in and array_recv more +Make array_in and array_recv more paranoid about validating their OID parameter Fix missing rows in queries like UPDATE a=... WHERE -a... with GiST index on column a +a... with GiST index on column a Improve robustness of datetime parsing Improve checking for partially-written WAL pages Improve robustness of signal handling when SSL is enabled -Don't try to open more than max_files_per_process +Don't try to open more than max_files_per_process files during postmaster startup Various memory leakage fixes Various portability improvements -Fix PL/pgSQL to handle var := var correctly when +Fix PL/pgSQL to handle var := var correctly when the variable is of pass-by-reference type -Update contrib/tsearch2 to use current Snowball +Update contrib/tsearch2 to use current Snowball code @@ -2052,7 +2052,7 @@ code This release contains a variety of fixes from 7.4.7, including several security-related issues. For information about new features in the 7.4 major release, see - . + . @@ -2077,10 +2077,10 @@ code - The lesser problem is that the contrib/tsearch2 module + The lesser problem is that the contrib/tsearch2 module creates several functions that are misdeclared to return - internal when they do not accept internal arguments. - This breaks type safety for all functions using internal + internal when they do not accept internal arguments. + This breaks type safety for all functions using internal arguments. @@ -2106,7 +2106,7 @@ WHERE pronamespace = 11 AND pronargs = 5 COMMIT; - Next, if you have installed contrib/tsearch2, do: + Next, if you have installed contrib/tsearch2, do: BEGIN; @@ -2124,22 +2124,22 @@ COMMIT; If this command fails with a message like function - "dex_init(text)" does not exist, then either tsearch2 + "dex_init(text)" does not exist, then either tsearch2 is not installed in this database, or you already did the update. - The above procedures must be carried out in each database - of an installation, including template1, and ideally - including template0 as well. If you do not fix the + The above procedures must be carried out in each database + of an installation, including template1, and ideally + including template0 as well. If you do not fix the template databases then any subsequently created databases will contain - the same errors. template1 can be fixed in the same way - as any other database, but fixing template0 requires + the same errors. template1 can be fixed in the same way + as any other database, but fixing template0 requires additional steps. First, from any database issue: UPDATE pg_database SET datallowconn = true WHERE datname = 'template0'; - Next connect to template0 and perform the above repair + Next connect to template0 and perform the above repair procedures. Finally, do: -- re-freeze template0: @@ -2156,8 +2156,8 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Change encoding function signature to prevent misuse -Change contrib/tsearch2 to avoid unsafe use of -INTERNAL function results +Change contrib/tsearch2 to avoid unsafe use of +INTERNAL function results Repair ancient race condition that allowed a transaction to be seen as committed for some purposes (eg SELECT FOR UPDATE) slightly sooner than for other purposes @@ -2169,56 +2169,56 @@ VACUUM freshly-inserted data, although the scenario seems of very low probability. There are no known cases of it having caused more than an Assert failure. -Fix comparisons of TIME WITH TIME ZONE values +Fix comparisons of TIME WITH TIME ZONE values The comparison code was wrong in the case where the ---enable-integer-datetimes configuration switch had been used. -NOTE: if you have an index on a TIME WITH TIME ZONE column, -it will need to be REINDEXed after installing this update, because +--enable-integer-datetimes configuration switch had been used. +NOTE: if you have an index on a TIME WITH TIME ZONE column, +it will need to be REINDEXed after installing this update, because the fix corrects the sort order of column values. -Fix EXTRACT(EPOCH) for -TIME WITH TIME ZONE values +Fix EXTRACT(EPOCH) for +TIME WITH TIME ZONE values Fix mis-display of negative fractional seconds in -INTERVAL values +INTERVAL values This error only occurred when the ---enable-integer-datetimes configuration switch had been used. +--enable-integer-datetimes configuration switch had been used. Ensure operations done during backend shutdown are counted by statistics collector -This is expected to resolve reports of pg_autovacuum +This is expected to resolve reports of pg_autovacuum not vacuuming the system catalogs often enough — it was not being told about catalog deletions caused by temporary table removal during backend exit. Additional buffer overrun checks in plpgsql (Neil) -Fix pg_dump to dump trigger names containing % +Fix pg_dump to dump trigger names containing % correctly (Neil) -Fix contrib/pgcrypto for newer OpenSSL builds +Fix contrib/pgcrypto for newer OpenSSL builds (Marko Kreen) Still more 64-bit fixes for -contrib/intagg +contrib/intagg Prevent incorrect optimization of functions returning -RECORD -Prevent to_char(interval) from dumping core for +RECORD +Prevent to_char(interval) from dumping core for month-related formats -Prevent crash on COALESCE(NULL,NULL) -Fix array_map to call PL functions correctly -Fix permission checking in ALTER DATABASE RENAME -Fix ALTER LANGUAGE RENAME -Make RemoveFromWaitQueue clean up after itself +Prevent crash on COALESCE(NULL,NULL) +Fix array_map to call PL functions correctly +Fix permission checking in ALTER DATABASE RENAME +Fix ALTER LANGUAGE RENAME +Make RemoveFromWaitQueue clean up after itself This fixes a lock management error that would only be visible if a transaction was kicked out of a wait for a lock (typically by query cancel) and then the holder of the lock released it within a very narrow window. Fix problem with untyped parameter appearing in -INSERT ... SELECT -Fix CLUSTER failure after -ALTER TABLE SET WITHOUT OIDS +INSERT ... SELECT +Fix CLUSTER failure after +ALTER TABLE SET WITHOUT OIDS @@ -2236,7 +2236,7 @@ holder of the lock released it within a very narrow window. This release contains a variety of fixes from 7.4.6, including several security-related issues. For information about new features in the 7.4 major release, see - . + . @@ -2251,11 +2251,11 @@ holder of the lock released it within a very narrow window. Changes -Disallow LOAD to non-superusers +Disallow LOAD to non-superusers On platforms that will automatically execute initialization functions of a shared library (this includes at least Windows and ELF-based Unixen), -LOAD can be used to make the server execute arbitrary code. +LOAD can be used to make the server execute arbitrary code. Thanks to NGS Software for reporting this. Check that creator of an aggregate function has the right to execute the specified transition functions @@ -2295,7 +2295,7 @@ GMT This release contains a variety of fixes from 7.4.5. For information about new features in the 7.4 major release, see - . + . @@ -2314,7 +2314,7 @@ GMT Repair possible failure to update hint bits on disk Under rare circumstances this oversight could lead to -could not access transaction status failures, which qualifies +could not access transaction status failures, which qualifies it as a potential-data-loss bug. Ensure that hashed outer join does not miss tuples @@ -2322,11 +2322,11 @@ it as a potential-data-loss bug. Very large left joins using a hash join plan could fail to output unmatched left-side rows given just the right data distribution. -Disallow running pg_ctl as root +Disallow running pg_ctl as root This is to guard against any possible security issues. -Avoid using temp files in /tmp in make_oidjoins_check +Avoid using temp files in /tmp in make_oidjoins_check This has been reported as a security issue, though it's hardly worthy of concern since there is no reason for non-developers to use this script anyway. @@ -2343,13 +2343,13 @@ This could lead to misbehavior in some of the system-statistics views. Fix small memory leak in postmaster Fix expected both swapped tables to have TOAST -tables bug +tables bug This could arise in cases such as CLUSTER after ALTER TABLE DROP COLUMN. -Prevent pg_ctl restart from adding -D multiple times +Prevent pg_ctl restart from adding -D multiple times Fix problem with NULL values in GiST indexes -:: is no longer interpreted as a variable in an +:: is no longer interpreted as a variable in an ECPG prepare statement @@ -2367,7 +2367,7 @@ ECPG prepare statement This release contains one serious bug fix over 7.4.4. For information about new features in the 7.4 major release, see - . + . @@ -2405,7 +2405,7 @@ still worth a re-release. The bug does not exist in pre-7.4 releases. This release contains a variety of fixes from 7.4.3. For information about new features in the 7.4 major release, see - . + . @@ -2435,8 +2435,8 @@ aggregate plan Fix hashed crosstab for zero-rows case (Joe) Force cache update after renaming a column in a foreign key Pretty-print UNION queries correctly -Make psql handle \r\n newlines properly in COPY IN -pg_dump handled ACLs with grant options incorrectly +Make psql handle \r\n newlines properly in COPY IN +pg_dump handled ACLs with grant options incorrectly Fix thread support for macOS and Solaris Updated JDBC driver (build 215) with various fixes ECPG fixes @@ -2457,7 +2457,7 @@ aggregate plan This release contains a variety of fixes from 7.4.2. For information about new features in the 7.4 major release, see - . + . @@ -2492,7 +2492,7 @@ large tables, unsigned oids, stability, temp tables, and debug mode Select-list aliases within the sub-select will now take precedence over names from outer query levels. -Do not generate NATURAL CROSS JOIN when decompiling rules (Tom) +Do not generate NATURAL CROSS JOIN when decompiling rules (Tom) Add checks for invalid field length in binary COPY (Tom) This fixes a difficult-to-exploit security hole. @@ -2515,7 +2515,7 @@ names from outer query levels. This release contains a variety of fixes from 7.4.1. For information about new features in the 7.4 major release, see - . + . @@ -2531,29 +2531,29 @@ names from outer query levels. - The more severe of the two errors is that data type anyarray + The more severe of the two errors is that data type anyarray has the wrong alignment label; this is a problem because the - pg_statistic system catalog uses anyarray + pg_statistic system catalog uses anyarray columns. The mislabeling can cause planner misestimations and even - crashes when planning queries that involve WHERE clauses on - double-aligned columns (such as float8 and timestamp). + crashes when planning queries that involve WHERE clauses on + double-aligned columns (such as float8 and timestamp). It is strongly recommended that all installations repair this error, either by initdb or by following the manual repair procedure given below. - The lesser error is that the system view pg_settings + The lesser error is that the system view pg_settings ought to be marked as having public update access, to allow - UPDATE pg_settings to be used as a substitute for - SET. This can also be fixed either by initdb or manually, + UPDATE pg_settings to be used as a substitute for + SET. This can also be fixed either by initdb or manually, but it is not necessary to fix unless you want to use UPDATE - pg_settings. + pg_settings. If you wish not to do an initdb, the following procedure will work - for fixing pg_statistic. As the database superuser, + for fixing pg_statistic. As the database superuser, do: @@ -2573,28 +2573,28 @@ ANALYZE; This can be done in a live database, but beware that all backends running in the altered database must be restarted before it is safe to - repopulate pg_statistic. + repopulate pg_statistic. - To repair the pg_settings error, simply do: + To repair the pg_settings error, simply do: GRANT SELECT, UPDATE ON pg_settings TO PUBLIC; - The above procedures must be carried out in each database - of an installation, including template1, and ideally - including template0 as well. If you do not fix the + The above procedures must be carried out in each database + of an installation, including template1, and ideally + including template0 as well. If you do not fix the template databases then any subsequently created databases will contain - the same errors. template1 can be fixed in the same way - as any other database, but fixing template0 requires + the same errors. template1 can be fixed in the same way + as any other database, but fixing template0 requires additional steps. First, from any database issue: UPDATE pg_database SET datallowconn = true WHERE datname = 'template0'; - Next connect to template0 and perform the above repair + Next connect to template0 and perform the above repair procedures. Finally, do: -- re-freeze template0: @@ -2614,28 +2614,28 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; -Fix pg_statistic alignment bug that could crash optimizer +Fix pg_statistic alignment bug that could crash optimizer See above for details about this problem. -Allow non-super users to update pg_settings +Allow non-super users to update pg_settings Fix several optimizer bugs, most of which led to -variable not found in subplan target lists errors +variable not found in subplan target lists errors Avoid out-of-memory failure during startup of large multiple index scan Fix multibyte problem that could lead to out of -memory error during COPY IN -Fix problems with SELECT INTO / CREATE -TABLE AS from tables without OIDs -Fix problems with alter_table regression test +memory error during COPY IN +Fix problems with SELECT INTO / CREATE +TABLE AS from tables without OIDs +Fix problems with alter_table regression test during parallel testing Fix problems with hitting open file limit, especially on macOS (Tom) Partial fix for Turkish-locale issues initdb will succeed now in Turkish locale, but there are still some -inconveniences associated with the i/I problem. +inconveniences associated with the i/I problem. Make pg_dump set client encoding on restore Other minor pg_dump fixes Allow ecpg to again use C keywords as column names (Michael) -Added ecpg WHENEVER NOT_FOUND to -SELECT/INSERT/UPDATE/DELETE (Michael) +Added ecpg WHENEVER NOT_FOUND to +SELECT/INSERT/UPDATE/DELETE (Michael) Fix ecpg crash for queries calling set-returning functions (Michael) Various other ecpg fixes (Michael) Fixes for Borland compiler @@ -2658,7 +2658,7 @@ inconveniences associated with the i/I problem. This release contains a variety of fixes from 7.4. For information about new features in the 7.4 major release, see - . + . @@ -2810,7 +2810,7 @@ DROP SCHEMA information_schema CASCADE; without sorting, by accumulating results into a hash table with one entry per group. It will still use the sort technique, however, if the hash table is estimated to be too - large to fit in sort_mem. + large to fit in sort_mem. @@ -3125,16 +3125,16 @@ DROP SCHEMA information_schema CASCADE; Trailing spaces are now trimmed when converting from type - char(n) to - varchar(n) or text. + char(n) to + varchar(n) or text. This is what most people always expected to happen anyway. - The data type float(p) now - measures p in binary digits, not decimal + The data type float(p) now + measures p in binary digits, not decimal digits. The new behavior follows the SQL standard. @@ -3143,11 +3143,11 @@ DROP SCHEMA information_schema CASCADE; Ambiguous date values now must match the ordering specified by the datestyle setting. In prior releases, a - date specification of 10/20/03 was interpreted as a - date in October even if datestyle specified that + date specification of 10/20/03 was interpreted as a + date in October even if datestyle specified that the day should be first. 7.4 will throw an error if a date specification is invalid for the current setting of - datestyle. + datestyle. @@ -3167,28 +3167,28 @@ DROP SCHEMA information_schema CASCADE; no longer work as expected in column default expressions; they now cause the time of the table creation to be the default, not the time of the insertion. Functions such as - now(), current_timestamp, or + now(), current_timestamp, or current_date should be used instead. In previous releases, there was special code so that strings such as 'now' were interpreted at - INSERT time and not at table creation time, but + INSERT time and not at table creation time, but this work around didn't cover all cases. Release 7.4 now requires that defaults be defined properly using functions such - as now() or current_timestamp. These + as now() or current_timestamp. These will work in all situations. - The dollar sign ($) is no longer allowed in + The dollar sign ($) is no longer allowed in operator names. It can instead be a non-first character in identifiers. This was done to improve compatibility with other database systems, and to avoid syntax problems when parameter - placeholders ($n) are written + placeholders ($n) are written adjacent to operators. @@ -3333,14 +3333,14 @@ DROP SCHEMA information_schema CASCADE; - Allow IN/NOT IN to be handled via hash + Allow IN/NOT IN to be handled via hash tables (Tom) - Improve NOT IN (subquery) + Improve NOT IN (subquery) performance (Tom) @@ -3490,19 +3490,19 @@ DROP SCHEMA information_schema CASCADE; - Rename server parameter server_min_messages to log_min_messages (Bruce) + Rename server parameter server_min_messages to log_min_messages (Bruce) This was done so most parameters that control the server logs - begin with log_. + begin with log_. - Rename show_*_stats to log_*_stats (Bruce) - Rename show_source_port to log_source_port (Bruce) - Rename hostname_lookup to log_hostname (Bruce) + Rename show_*_stats to log_*_stats (Bruce) + Rename show_source_port to log_source_port (Bruce) + Rename hostname_lookup to log_hostname (Bruce) - Add checkpoint_warning to warn of excessive checkpointing (Bruce) + Add checkpoint_warning to warn of excessive checkpointing (Bruce) In prior releases, it was difficult to determine if checkpoint was happening too frequently. This feature adds a warning to the @@ -3514,8 +3514,8 @@ DROP SCHEMA information_schema CASCADE; - Change debug server log messages to output as DEBUG - rather than LOG (Bruce) + Change debug server log messages to output as DEBUG + rather than LOG (Bruce) @@ -3529,8 +3529,8 @@ DROP SCHEMA information_schema CASCADE; - log_min_messages/client_min_messages now - controls debug_* output (Bruce) + log_min_messages/client_min_messages now + controls debug_* output (Bruce) This centralizes client debug information so all debug output @@ -3589,15 +3589,15 @@ DROP SCHEMA information_schema CASCADE; Add new columns in pg_settings: - context, type, source, - min_val, max_val (Joe) + context, type, source, + min_val, max_val (Joe) - Make default shared_buffers 1000 and - max_connections 100, if possible (Tom) + Make default shared_buffers 1000 and + max_connections 100, if possible (Tom) Prior versions defaulted to 64 shared buffers so PostgreSQL @@ -3612,7 +3612,7 @@ DROP SCHEMA information_schema CASCADE; New pg_hba.conf record type - hostnossl to prevent SSL connections (Jon + hostnossl to prevent SSL connections (Jon Jensen) @@ -3675,7 +3675,7 @@ DROP SCHEMA information_schema CASCADE; Add option to prevent auto-addition of tables referenced in query (Nigel J. Andrews) By default, tables mentioned in the query are automatically - added to the FROM clause if they are not already + added to the FROM clause if they are not already there. This is compatible with historic POSTGRES behavior but is contrary to the SQL standard. This option allows selecting @@ -3692,9 +3692,9 @@ DROP SCHEMA information_schema CASCADE; - Allow expressions to be used in LIMIT/OFFSET (Tom) + Allow expressions to be used in LIMIT/OFFSET (Tom) - In prior releases, LIMIT/OFFSET could + In prior releases, LIMIT/OFFSET could only use constants, not expressions. @@ -3780,7 +3780,7 @@ DROP SCHEMA information_schema CASCADE; Improve automatic type casting for domains (Rod, Tom) Allow dollar signs in identifiers, except as first character (Tom) - Disallow dollar signs in operator names, so x=$1 works (Tom) + Disallow dollar signs in operator names, so x=$1 works (Tom) @@ -3863,9 +3863,9 @@ DROP SCHEMA information_schema CASCADE; - Implement SQL-compatible options FIRST, - LAST, ABSOLUTE n, - RELATIVE n for + Implement SQL-compatible options FIRST, + LAST, ABSOLUTE n, + RELATIVE n for FETCH and MOVE (Tom) @@ -3888,18 +3888,18 @@ DROP SCHEMA information_schema CASCADE; Prevent CLUSTER on partial indexes (Tom) - Allow DOS and Mac line-endings in COPY files (Bruce) + Allow DOS and Mac line-endings in COPY files (Bruce) Disallow literal carriage return as a data value, - backslash-carriage-return and \r are still allowed + backslash-carriage-return and \r are still allowed (Bruce) - COPY changes (binary, \.) (Tom) + COPY changes (binary, \.) (Tom) @@ -3965,7 +3965,7 @@ DROP SCHEMA information_schema CASCADE; - Improve reliability of LISTEN/NOTIFY (Tom) + Improve reliability of LISTEN/NOTIFY (Tom) @@ -3976,8 +3976,8 @@ DROP SCHEMA information_schema CASCADE; requirement of a standalone session, which was necessary in previous releases. The only tables that now require a standalone session for reindexing are the global system tables - pg_database, pg_shadow, and - pg_group. + pg_database, pg_shadow, and + pg_group. @@ -4003,14 +4003,14 @@ DROP SCHEMA information_schema CASCADE; - Remove rarely used functions oidrand, - oidsrand, and userfntest functions + Remove rarely used functions oidrand, + oidsrand, and userfntest functions (Neil) - Add md5() function to main server, already in contrib/pgcrypto (Joe) + Add md5() function to main server, already in contrib/pgcrypto (Joe) An MD5 function was frequently requested. For more complex encryption capabilities, use @@ -4067,8 +4067,8 @@ DROP SCHEMA information_schema CASCADE; Allow WHERE qualification - expr op ANY/SOME/ALL - (array_expr) (Joe) + expr op ANY/SOME/ALL + (array_expr) (Joe) This allows arrays to behave like a list of values, for purposes @@ -4079,10 +4079,10 @@ DROP SCHEMA information_schema CASCADE; - New array functions array_append, - array_cat, array_lower, - array_prepend, array_to_string, - array_upper, string_to_array (Joe) + New array functions array_append, + array_cat, array_lower, + array_prepend, array_to_string, + array_upper, string_to_array (Joe) @@ -4107,14 +4107,14 @@ DROP SCHEMA information_schema CASCADE; Trim trailing spaces when char is cast to - varchar or text (Tom) + varchar or text (Tom) - Make float(p) measure the precision - p in binary digits, not decimal digits + Make float(p) measure the precision + p in binary digits, not decimal digits (Tom) @@ -4164,9 +4164,9 @@ DROP SCHEMA information_schema CASCADE; - Add new datestyle values MDY, - DMY, and YMD to set input field order; - honor US and European for backward + Add new datestyle values MDY, + DMY, and YMD to set input field order; + honor US and European for backward compatibility (Tom) @@ -4182,10 +4182,10 @@ DROP SCHEMA information_schema CASCADE; - Treat NaN as larger than any other value in min()/max() (Tom) + Treat NaN as larger than any other value in min()/max() (Tom) NaN was already sorted after ordinary numeric values for most - purposes, but min() and max() didn't + purposes, but min() and max() didn't get this right. @@ -4203,7 +4203,7 @@ DROP SCHEMA information_schema CASCADE; - Allow time to be specified as 040506 or 0405 (Tom) + Allow time to be specified as 040506 or 0405 (Tom) @@ -4275,7 +4275,7 @@ DROP SCHEMA information_schema CASCADE; - Add new parameter $0 in PL/pgSQL representing the + Add new parameter $0 in PL/pgSQL representing the function's actual return type (Joe) @@ -4310,12 +4310,12 @@ DROP SCHEMA information_schema CASCADE; Improve tab completion (Rod, Ross Reedstrom, Ian Barwick) - Reorder \? help into groupings (Harald Armin Massa, Bruce) + Reorder \? help into groupings (Harald Armin Massa, Bruce) Add backslash commands for listing schemas, casts, and conversions (Christopher) - \encoding now changes based on the server parameter + \encoding now changes based on the server parameter client_encoding (Tom) @@ -4328,7 +4328,7 @@ DROP SCHEMA information_schema CASCADE; Save editor buffer into readline history (Ross) - When \e is used to edit a query, the result is saved + When \e is used to edit a query, the result is saved in the readline history for retrieval using the up arrow. @@ -4373,14 +4373,14 @@ DROP SCHEMA information_schema CASCADE; - Have pg_dumpall use GRANT/REVOKE to dump database-level privileges (Tom) + Have pg_dumpall use GRANT/REVOKE to dump database-level privileges (Tom) - Allow pg_dumpall to support the options , + , of pg_dump (Tom) @@ -4565,7 +4565,7 @@ DROP SCHEMA information_schema CASCADE; Allow libpq to compile with Borland C++ compiler (Lester Godwin, Karl Waclawek) Use our own version of getopt_long() if needed (Peter) Convert administration scripts to C (Peter) - Bison >= 1.85 is now required to build the PostgreSQL grammar, if building from CVS + Bison >= 1.85 is now required to build the PostgreSQL grammar, if building from CVS Merge documentation into one book (Peter) Add Windows compatibility functions (Bruce) Allow client interfaces to compile under MinGW (Bruce) @@ -4605,16 +4605,16 @@ DROP SCHEMA information_schema CASCADE; Update btree_gist (Oleg) New tsearch2 full-text search module (Oleg, Teodor) Add hash-based crosstab function to tablefuncs (Joe) - Add serial column to order connectby() siblings in tablefuncs (Nabil Sayegh,Joe) + Add serial column to order connectby() siblings in tablefuncs (Nabil Sayegh,Joe) Add named persistent connections to dblink (Shridhar Daithanka) New pg_autovacuum allows automatic VACUUM (Matthew T. O'Connor) - Make pgbench honor environment variables PGHOST, PGPORT, PGUSER (Tatsuo) + Make pgbench honor environment variables PGHOST, PGPORT, PGUSER (Tatsuo) Improve intarray (Teodor Sigaev) Improve pgstattuple (Rod) Fix bug in metaphone() in fuzzystrmatch Improve adddepend (Rod) Update spi/timetravel (Böjthe Zoltán) - Fix dbase + Fix dbase option and improve non-ASCII handling (Thomas Behr, Márcio Smiderle) Remove array module because features now included by default (Joe) diff --git a/doc/src/sgml/release-8.0.sgml b/doc/src/sgml/release-8.0.sgml index 0f43e24b1d..6171e0d1ee 100644 --- a/doc/src/sgml/release-8.0.sgml +++ b/doc/src/sgml/release-8.0.sgml @@ -12,11 +12,11 @@ This release contains a variety of fixes from 8.0.25. For information about new features in the 8.0 major release, see - . + . - This is expected to be the last PostgreSQL release + This is expected to be the last PostgreSQL release in the 8.0.X series. Users are encouraged to update to a newer release branch soon. @@ -27,7 +27,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.22, - see . + see . @@ -47,7 +47,7 @@ This change prevents security problems that can be caused by subverting Perl or Tcl code that will be executed later in the same session under another SQL user identity (for example, within a SECURITY - DEFINER function). Most scripting languages offer numerous ways that + DEFINER function). Most scripting languages offer numerous ways that that might be done, such as redefining standard functions or operators called by the target function. Without this change, any SQL user with Perl or Tcl language usage rights can do essentially anything with the @@ -76,7 +76,7 @@ - Prevent possible crashes in pg_get_expr() by disallowing + Prevent possible crashes in pg_get_expr() by disallowing it from being called with an argument that is not one of the system catalog columns it's intended to be used with (Heikki Linnakangas, Tom Lane) @@ -104,7 +104,7 @@ Take care to fsync the contents of lockfiles (both - postmaster.pid and the socket lockfile) while writing them + postmaster.pid and the socket lockfile) while writing them (Tom Lane) @@ -130,7 +130,7 @@ - Fix log_line_prefix's %i escape, + Fix log_line_prefix's %i escape, which could produce junk early in backend startup (Tom Lane) @@ -138,28 +138,28 @@ Fix possible data corruption in ALTER TABLE ... SET - TABLESPACE when archiving is enabled (Jeff Davis) + TABLESPACE when archiving is enabled (Jeff Davis) - Allow CREATE DATABASE and ALTER DATABASE ... SET - TABLESPACE to be interrupted by query-cancel (Guillaume Lelarge) + Allow CREATE DATABASE and ALTER DATABASE ... SET + TABLESPACE to be interrupted by query-cancel (Guillaume Lelarge) In PL/Python, defend against null pointer results from - PyCObject_AsVoidPtr and PyCObject_FromVoidPtr + PyCObject_AsVoidPtr and PyCObject_FromVoidPtr (Peter Eisentraut) - Improve contrib/dblink's handling of tables containing + Improve contrib/dblink's handling of tables containing dropped columns (Tom Lane) @@ -167,13 +167,13 @@ Fix connection leak after duplicate connection name - errors in contrib/dblink (Itagaki Takahiro) + errors in contrib/dblink (Itagaki Takahiro) - Fix contrib/dblink to handle connection names longer than + Fix contrib/dblink to handle connection names longer than 62 bytes correctly (Itagaki Takahiro) @@ -187,7 +187,7 @@ - Update time zone data files to tzdata release 2010l + Update time zone data files to tzdata release 2010l for DST law changes in Egypt and Palestine; also historical corrections for Finland. @@ -216,11 +216,11 @@ This release contains a variety of fixes from 8.0.24. For information about new features in the 8.0 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 8.0.X release series in July 2010. Users are encouraged to update to a newer release branch soon. @@ -231,7 +231,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.22, - see . + see . @@ -243,19 +243,19 @@ - Enforce restrictions in plperl using an opmask applied to - the whole interpreter, instead of using Safe.pm + Enforce restrictions in plperl using an opmask applied to + the whole interpreter, instead of using Safe.pm (Tim Bunce, Andrew Dunstan) - Recent developments have convinced us that Safe.pm is too - insecure to rely on for making plperl trustable. This - change removes use of Safe.pm altogether, in favor of using + Recent developments have convinced us that Safe.pm is too + insecure to rely on for making plperl trustable. This + change removes use of Safe.pm altogether, in favor of using a separate interpreter with an opcode mask that is always applied. Pleasant side effects of the change include that it is now possible to - use Perl's strict pragma in a natural way in - plperl, and that Perl's $a and $b + use Perl's strict pragma in a natural way in + plperl, and that Perl's $a and $b variables work as expected in sort routines, and that function compilation is significantly faster. (CVE-2010-1169) @@ -264,19 +264,19 @@ Prevent PL/Tcl from executing untrustworthy code from - pltcl_modules (Tom) + pltcl_modules (Tom) PL/Tcl's feature for autoloading Tcl code from a database table could be exploited for trojan-horse attacks, because there was no restriction on who could create or insert into that table. This change - disables the feature unless pltcl_modules is owned by a + disables the feature unless pltcl_modules is owned by a superuser. (However, the permissions on the table are not checked, so installations that really need a less-than-secure modules table can still grant suitable privileges to trusted non-superusers.) Also, - prevent loading code into the unrestricted normal Tcl - interpreter unless we are really going to execute a pltclu + prevent loading code into the unrestricted normal Tcl + interpreter unless we are really going to execute a pltclu function. (CVE-2010-1170) @@ -289,10 +289,10 @@ Previously, if an unprivileged user ran ALTER USER ... RESET - ALL for himself, or ALTER DATABASE ... RESET ALL for + ALL for himself, or ALTER DATABASE ... RESET ALL for a database he owns, this would remove all special parameter settings for the user or database, even ones that are only supposed to be - changeable by a superuser. Now, the ALTER will only + changeable by a superuser. Now, the ALTER will only remove the parameters that the user has permission to change. @@ -300,7 +300,7 @@ Avoid possible crash during backend shutdown if shutdown occurs - when a CONTEXT addition would be made to log entries (Tom) + when a CONTEXT addition would be made to log entries (Tom) @@ -312,7 +312,7 @@ - Update PL/Perl's ppport.h for modern Perl versions + Update PL/Perl's ppport.h for modern Perl versions (Andrew) @@ -325,14 +325,14 @@ - Prevent infinite recursion in psql when expanding + Prevent infinite recursion in psql when expanding a variable that refers to itself (Tom) - Ensure that contrib/pgstattuple functions respond to cancel + Ensure that contrib/pgstattuple functions respond to cancel interrupts promptly (Tatsuhito Kasahara) @@ -340,7 +340,7 @@ Make server startup deal properly with the case that - shmget() returns EINVAL for an existing + shmget() returns EINVAL for an existing shared memory segment (Tom) @@ -353,7 +353,7 @@ - Update time zone data files to tzdata release 2010j + Update time zone data files to tzdata release 2010j for DST law changes in Argentina, Australian Antarctic, Bangladesh, Mexico, Morocco, Pakistan, Palestine, Russia, Syria, Tunisia; also historical corrections for Taiwan. @@ -376,11 +376,11 @@ This release contains a variety of fixes from 8.0.23. For information about new features in the 8.0 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 8.0.X release series in July 2010. Users are encouraged to update to a newer release branch soon. @@ -391,7 +391,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.22, - see . + see . @@ -403,7 +403,7 @@ - Add new configuration parameter ssl_renegotiation_limit to + Add new configuration parameter ssl_renegotiation_limit to control how often we do session key renegotiation for an SSL connection (Magnus) @@ -432,8 +432,8 @@ - Make substring() for bit types treat any negative - length as meaning all the rest of the string (Tom) + Make substring() for bit types treat any negative + length as meaning all the rest of the string (Tom) @@ -459,7 +459,7 @@ - Fix the STOP WAL LOCATION entry in backup history files to + Fix the STOP WAL LOCATION entry in backup history files to report the next WAL segment's name when the end location is exactly at a segment boundary (Itagaki Takahiro) @@ -467,17 +467,17 @@ - When reading pg_hba.conf and related files, do not treat - @something as a file inclusion request if the @ - appears inside quote marks; also, never treat @ by itself + When reading pg_hba.conf and related files, do not treat + @something as a file inclusion request if the @ + appears inside quote marks; also, never treat @ by itself as a file inclusion request (Tom) This prevents erratic behavior if a role or database name starts with - @. If you need to include a file whose path name + @. If you need to include a file whose path name contains spaces, you can still do so, but you must write - @"/path to/file" rather than putting the quotes around + @"/path to/file" rather than putting the quotes around the whole construct. @@ -485,7 +485,7 @@ Prevent infinite loop on some platforms if a directory is named as - an inclusion target in pg_hba.conf and related files + an inclusion target in pg_hba.conf and related files (Tom) @@ -499,7 +499,7 @@ - Add volatile markings in PL/Python to avoid possible + Add volatile markings in PL/Python to avoid possible compiler-specific misbehavior (Zdenek Kotala) @@ -511,28 +511,28 @@ The only known symptom of this oversight is that the Tcl - clock command misbehaves if using Tcl 8.5 or later. + clock command misbehaves if using Tcl 8.5 or later. - Prevent crash in contrib/dblink when too many key - columns are specified to a dblink_build_sql_* function + Prevent crash in contrib/dblink when too many key + columns are specified to a dblink_build_sql_* function (Rushabh Lathia, Joe Conway) - Fix assorted crashes in contrib/xml2 caused by sloppy + Fix assorted crashes in contrib/xml2 caused by sloppy memory management (Tom) - Update time zone data files to tzdata release 2010e + Update time zone data files to tzdata release 2010e for DST law changes in Bangladesh, Chile, Fiji, Mexico, Paraguay, Samoa. @@ -553,7 +553,7 @@ This release contains a variety of fixes from 8.0.22. For information about new features in the 8.0 major release, see - . + . @@ -562,7 +562,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.22, - see . + see . @@ -604,14 +604,14 @@ - Prevent signals from interrupting VACUUM at unsafe times + Prevent signals from interrupting VACUUM at unsafe times (Alvaro) - This fix prevents a PANIC if a VACUUM FULL is canceled + This fix prevents a PANIC if a VACUUM FULL is canceled after it's already committed its tuple movements, as well as transient - errors if a plain VACUUM is interrupted after having + errors if a plain VACUUM is interrupted after having truncated the table. @@ -630,7 +630,7 @@ - Fix very rare crash in inet/cidr comparisons (Chris + Fix very rare crash in inet/cidr comparisons (Chris Mikkelson) @@ -649,7 +649,7 @@ The previous code is known to fail with the combination of the Linux - pam_krb5 PAM module with Microsoft Active Directory as the + pam_krb5 PAM module with Microsoft Active Directory as the domain controller. It might have problems elsewhere too, since it was making unjustified assumptions about what arguments the PAM stack would pass to it. @@ -664,20 +664,20 @@ - Ensure psql's flex module is compiled with the correct + Ensure psql's flex module is compiled with the correct system header definitions (Tom) This fixes build failures on platforms where - --enable-largefile causes incompatible changes in the + --enable-largefile causes incompatible changes in the generated code. - Make the postmaster ignore any application_name parameter in + Make the postmaster ignore any application_name parameter in connection request packets, to improve compatibility with future libpq versions (Tom) @@ -685,7 +685,7 @@ - Update time zone data files to tzdata release 2009s + Update time zone data files to tzdata release 2009s for DST law changes in Antarctica, Argentina, Bangladesh, Fiji, Novokuznetsk, Pakistan, Palestine, Samoa, Syria; also historical corrections for Hong Kong. @@ -708,7 +708,7 @@ This release contains a variety of fixes from 8.0.21. For information about new features in the 8.0 major release, see - . + . @@ -716,10 +716,10 @@ A dump/restore is not required for those running 8.0.X. - However, if you have any hash indexes on interval columns, - you must REINDEX them after updating to 8.0.22. + However, if you have any hash indexes on interval columns, + you must REINDEX them after updating to 8.0.22. Also, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -731,14 +731,14 @@ - Disallow RESET ROLE and RESET SESSION - AUTHORIZATION inside security-definer functions (Tom, Heikki) + Disallow RESET ROLE and RESET SESSION + AUTHORIZATION inside security-definer functions (Tom, Heikki) This covers a case that was missed in the previous patch that - disallowed SET ROLE and SET SESSION - AUTHORIZATION inside security-definer functions. + disallowed SET ROLE and SET SESSION + AUTHORIZATION inside security-definer functions. (See CVE-2007-6600) @@ -752,32 +752,32 @@ - Fix hash calculation for data type interval (Tom) + Fix hash calculation for data type interval (Tom) This corrects wrong results for hash joins on interval values. It also changes the contents of hash indexes on interval columns. - If you have any such indexes, you must REINDEX them + If you have any such indexes, you must REINDEX them after updating. - Treat to_char(..., 'TH') as an uppercase ordinal - suffix with 'HH'/'HH12' (Heikki) + Treat to_char(..., 'TH') as an uppercase ordinal + suffix with 'HH'/'HH12' (Heikki) - It was previously handled as 'th' (lowercase). + It was previously handled as 'th' (lowercase). - Fix overflow for INTERVAL 'x ms' - when x is more than 2 million and integer + Fix overflow for INTERVAL 'x ms' + when x is more than 2 million and integer datetimes are in use (Alex Hunsaker) @@ -794,7 +794,7 @@ - Fix money data type to work in locales where currency + Fix money data type to work in locales where currency amounts have no fractional digits, e.g. Japan (Itagaki Takahiro) @@ -802,7 +802,7 @@ Properly round datetime input like - 00:12:57.9999999999999999999999999999 (Tom) + 00:12:57.9999999999999999999999999999 (Tom) @@ -821,22 +821,22 @@ - Fix pg_ctl to not go into an infinite loop if - postgresql.conf is empty (Jeff Davis) + Fix pg_ctl to not go into an infinite loop if + postgresql.conf is empty (Jeff Davis) - Fix contrib/xml2's xslt_process() to + Fix contrib/xml2's xslt_process() to properly handle the maximum number of parameters (twenty) (Tom) - Improve robustness of libpq's code to recover - from errors during COPY FROM STDIN (Tom) + Improve robustness of libpq's code to recover + from errors during COPY FROM STDIN (Tom) @@ -849,7 +849,7 @@ - Update time zone data files to tzdata release 2009l + Update time zone data files to tzdata release 2009l for DST law changes in Bangladesh, Egypt, Jordan, Pakistan, Argentina/San_Luis, Cuba, Jordan (historical correction only), Mauritius, Morocco, Palestine, Syria, Tunisia. @@ -872,7 +872,7 @@ This release contains a variety of fixes from 8.0.20. For information about new features in the 8.0 major release, see - . + . @@ -881,7 +881,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -900,7 +900,7 @@ This change extends fixes made in the last two minor releases for related failure scenarios. The previous fixes were narrowly tailored for the original problem reports, but we have now recognized that - any error thrown by an encoding conversion function could + any error thrown by an encoding conversion function could potentially lead to infinite recursion while trying to report the error. The solution therefore is to disable translation and encoding conversion and report the plain-ASCII form of any error message, @@ -911,7 +911,7 @@ - Disallow CREATE CONVERSION with the wrong encodings + Disallow CREATE CONVERSION with the wrong encodings for the specified conversion function (Heikki) @@ -924,14 +924,14 @@ - Fix core dump when to_char() is given format codes that + Fix core dump when to_char() is given format codes that are inappropriate for the type of the data argument (Tom) - Add MUST (Mauritius Island Summer Time) to the default list + Add MUST (Mauritius Island Summer Time) to the default list of known timezone abbreviations (Xavier Bugaud) @@ -952,7 +952,7 @@ This release contains a variety of fixes from 8.0.19. For information about new features in the 8.0 major release, see - . + . @@ -961,7 +961,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -973,13 +973,13 @@ - Improve handling of URLs in headline() function (Teodor) + Improve handling of URLs in headline() function (Teodor) - Improve handling of overlength headlines in headline() + Improve handling of overlength headlines in headline() function (Teodor) @@ -994,30 +994,30 @@ - Avoid unnecessary locking of small tables in VACUUM + Avoid unnecessary locking of small tables in VACUUM (Heikki) - Fix uninitialized variables in contrib/tsearch2's - get_covers() function (Teodor) + Fix uninitialized variables in contrib/tsearch2's + get_covers() function (Teodor) - Make all documentation reference pgsql-bugs and/or - pgsql-hackers as appropriate, instead of the - now-decommissioned pgsql-ports and pgsql-patches + Make all documentation reference pgsql-bugs and/or + pgsql-hackers as appropriate, instead of the + now-decommissioned pgsql-ports and pgsql-patches mailing lists (Tom) - Update time zone data files to tzdata release 2009a (for + Update time zone data files to tzdata release 2009a (for Kathmandu and historical DST corrections in Switzerland, Cuba) @@ -1038,7 +1038,7 @@ This release contains a variety of fixes from 8.0.18. For information about new features in the 8.0 major release, see - . + . @@ -1047,7 +1047,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -1065,7 +1065,7 @@ We have addressed similar issues before, but it would still fail if - the character has no equivalent message itself couldn't + the character has no equivalent message itself couldn't be converted. The fix is to disable localization and send the plain ASCII error message when we detect such a situation. @@ -1095,14 +1095,14 @@ Fix improper display of fractional seconds in interval values when - using a non-ISO datestyle in an build (Ron Mayer) - Ensure SPI_getvalue and SPI_getbinval + Ensure SPI_getvalue and SPI_getbinval behave correctly when the passed tuple and tuple descriptor have different numbers of columns (Tom) @@ -1116,19 +1116,19 @@ - Fix ecpg's parsing of CREATE USER (Michael) + Fix ecpg's parsing of CREATE USER (Michael) - Fix recent breakage of pg_ctl restart (Tom) + Fix recent breakage of pg_ctl restart (Tom) - Update time zone data files to tzdata release 2008i (for + Update time zone data files to tzdata release 2008i (for DST law changes in Argentina, Brazil, Mauritius, Syria) @@ -1149,7 +1149,7 @@ This release contains a variety of fixes from 8.0.17. For information about new features in the 8.0 major release, see - . + . @@ -1158,7 +1158,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -1176,19 +1176,19 @@ This responds to reports that the counters could overflow in sufficiently long transactions, leading to unexpected lock is - already held errors. + already held errors. Add checks in executor startup to ensure that the tuples produced by an - INSERT or UPDATE will match the target table's + INSERT or UPDATE will match the target table's current rowtype (Tom) - ALTER COLUMN TYPE, followed by re-use of a previously + ALTER COLUMN TYPE, followed by re-use of a previously cached plan, could produce this type of situation. The check protects against data corruption and/or crashes that could ensue. @@ -1210,21 +1210,21 @@ Fix bug in backwards scanning of a cursor on a SELECT DISTINCT - ON query (Tom) + ON query (Tom) - Fix planner to estimate that GROUP BY expressions yielding + Fix planner to estimate that GROUP BY expressions yielding boolean results always result in two groups, regardless of the expressions' contents (Tom) This is very substantially more accurate than the regular GROUP - BY estimate for certain boolean tests like col - IS NULL. + BY estimate for certain boolean tests like col + IS NULL. @@ -1247,21 +1247,21 @@ - Improve pg_dump and pg_restore's + Improve pg_dump and pg_restore's error reporting after failure to send a SQL command (Tom) - Fix pg_ctl to properly preserve postmaster - command-line arguments across a restart (Bruce) + Fix pg_ctl to properly preserve postmaster + command-line arguments across a restart (Bruce) - Update time zone data files to tzdata release 2008f (for + Update time zone data files to tzdata release 2008f (for DST law changes in Argentina, Bahamas, Brazil, Mauritius, Morocco, Pakistan, Palestine, and Paraguay) @@ -1283,7 +1283,7 @@ This release contains one serious bug fix over 8.0.16. For information about new features in the 8.0 major release, see - . + . @@ -1292,7 +1292,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -1304,18 +1304,18 @@ - Make pg_get_ruledef() parenthesize negative constants (Tom) + Make pg_get_ruledef() parenthesize negative constants (Tom) Before this fix, a negative constant in a view or rule might be dumped - as, say, -42::integer, which is subtly incorrect: it should - be (-42)::integer due to operator precedence rules. + as, say, -42::integer, which is subtly incorrect: it should + be (-42)::integer due to operator precedence rules. Usually this would make little difference, but it could interact with another recent patch to cause - PostgreSQL to reject what had been a valid - SELECT DISTINCT view query. Since this could result in - pg_dump output failing to reload, it is being treated + PostgreSQL to reject what had been a valid + SELECT DISTINCT view query. Since this could result in + pg_dump output failing to reload, it is being treated as a high-priority fix. The only released versions in which dump output is actually incorrect are 8.3.1 and 8.2.7. @@ -1337,7 +1337,7 @@ This release contains a variety of fixes from 8.0.15. For information about new features in the 8.0 major release, see - . + . @@ -1346,7 +1346,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -1358,7 +1358,7 @@ - Fix ALTER TABLE ADD COLUMN ... PRIMARY KEY so that the new + Fix ALTER TABLE ADD COLUMN ... PRIMARY KEY so that the new column is correctly checked to see if it's been initialized to all non-nulls (Brendan Jurd) @@ -1370,8 +1370,8 @@ - Fix possible CREATE TABLE failure when inheriting the - same constraint from multiple parent relations that + Fix possible CREATE TABLE failure when inheriting the + same constraint from multiple parent relations that inherited that constraint from a common ancestor (Tom) @@ -1379,7 +1379,7 @@ Fix conversions between ISO-8859-5 and other encodings to handle - Cyrillic Yo characters (e and E with + Cyrillic Yo characters (e and E with two dots) (Sergey Burladyan) @@ -1394,7 +1394,7 @@ This could lead to failures in which two apparently identical literal values were not seen as equal, resulting in the parser complaining - about unmatched ORDER BY and DISTINCT + about unmatched ORDER BY and DISTINCT expressions. @@ -1402,24 +1402,24 @@ Fix a corner case in regular-expression substring matching - (substring(string from - pattern)) (Tom) + (substring(string from + pattern)) (Tom) The problem occurs when there is a match to the pattern overall but the user has specified a parenthesized subexpression and that subexpression hasn't got a match. An example is - substring('foo' from 'foo(bar)?'). - This should return NULL, since (bar) isn't matched, but + substring('foo' from 'foo(bar)?'). + This should return NULL, since (bar) isn't matched, but it was mistakenly returning the whole-pattern match instead (ie, - foo). + foo). - Update time zone data files to tzdata release 2008c (for + Update time zone data files to tzdata release 2008c (for DST law changes in Morocco, Iraq, Choibalsan, Pakistan, Syria, Cuba, Argentina/San_Luis, and Chile) @@ -1427,34 +1427,34 @@ - Fix incorrect result from ecpg's - PGTYPEStimestamp_sub() function (Michael) + Fix incorrect result from ecpg's + PGTYPEStimestamp_sub() function (Michael) - Fix core dump in contrib/xml2's - xpath_table() function when the input query returns a + Fix core dump in contrib/xml2's + xpath_table() function when the input query returns a NULL value (Tom) - Fix contrib/xml2's makefile to not override - CFLAGS (Tom) + Fix contrib/xml2's makefile to not override + CFLAGS (Tom) - Fix DatumGetBool macro to not fail with gcc + Fix DatumGetBool macro to not fail with gcc 4.3 (Tom) - This problem affects old style (V0) C functions that + This problem affects old style (V0) C functions that return boolean. The fix is already in 8.3, but the need to back-patch it was not realized at the time. @@ -1462,21 +1462,21 @@ - Fix longstanding LISTEN/NOTIFY + Fix longstanding LISTEN/NOTIFY race condition (Tom) In rare cases a session that had just executed a - LISTEN might not get a notification, even though + LISTEN might not get a notification, even though one would be expected because the concurrent transaction executing - NOTIFY was observed to commit later. + NOTIFY was observed to commit later. A side effect of the fix is that a transaction that has executed - a not-yet-committed LISTEN command will not see any - row in pg_listener for the LISTEN, + a not-yet-committed LISTEN command will not see any + row in pg_listener for the LISTEN, should it choose to look; formerly it would have. This behavior was never documented one way or the other, but it is possible that some applications depend on the old behavior. @@ -1502,19 +1502,19 @@ - Fix unrecognized node type error in some variants of - ALTER OWNER (Tom) + Fix unrecognized node type error in some variants of + ALTER OWNER (Tom) - Fix pg_ctl to correctly extract the postmaster's port + Fix pg_ctl to correctly extract the postmaster's port number from command-line options (Itagaki Takahiro, Tom) - Previously, pg_ctl start -w could try to contact the + Previously, pg_ctl start -w could try to contact the postmaster on the wrong port, leading to bogus reports of startup failure. @@ -1522,20 +1522,20 @@ - Use - This is known to be necessary when building PostgreSQL - with gcc 4.3 or later. + This is known to be necessary when building PostgreSQL + with gcc 4.3 or later. - Fix display of constant expressions in ORDER BY - and GROUP BY (Tom) + Fix display of constant expressions in ORDER BY + and GROUP BY (Tom) @@ -1547,7 +1547,7 @@ - Fix libpq to handle NOTICE messages correctly + Fix libpq to handle NOTICE messages correctly during COPY OUT (Tom) @@ -1575,12 +1575,12 @@ This release contains a variety of fixes from 8.0.14, including fixes for significant security issues. For information about new features in the 8.0 major release, see - . + . - This is the last 8.0.X release for which the PostgreSQL - community will produce binary packages for Windows. + This is the last 8.0.X release for which the PostgreSQL + community will produce binary packages for Windows. Windows users are encouraged to move to 8.2.X or later, since there are Windows-specific fixes in 8.2.X that are impractical to back-port. 8.0.X will continue to @@ -1593,7 +1593,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -1606,7 +1606,7 @@ Prevent functions in indexes from executing with the privileges of - the user running VACUUM, ANALYZE, etc (Tom) + the user running VACUUM, ANALYZE, etc (Tom) @@ -1617,18 +1617,18 @@ (Note that triggers, defaults, check constraints, etc. pose the same type of risk.) But functions in indexes pose extra danger because they will be executed by routine maintenance operations - such as VACUUM FULL, which are commonly performed + such as VACUUM FULL, which are commonly performed automatically under a superuser account. For example, a nefarious user can execute code with superuser privileges by setting up a trojan-horse index definition and waiting for the next routine vacuum. The fix arranges for standard maintenance operations - (including VACUUM, ANALYZE, REINDEX, - and CLUSTER) to execute as the table owner rather than + (including VACUUM, ANALYZE, REINDEX, + and CLUSTER) to execute as the table owner rather than the calling user, using the same privilege-switching mechanism already - used for SECURITY DEFINER functions. To prevent bypassing + used for SECURITY DEFINER functions. To prevent bypassing this security measure, execution of SET SESSION - AUTHORIZATION and SET ROLE is now forbidden within a - SECURITY DEFINER context. (CVE-2007-6600) + AUTHORIZATION and SET ROLE is now forbidden within a + SECURITY DEFINER context. (CVE-2007-6600) @@ -1648,20 +1648,20 @@ - Require non-superusers who use /contrib/dblink to use only + Require non-superusers who use /contrib/dblink to use only password authentication, as a security measure (Joe) The fix that appeared for this in 8.0.14 was incomplete, as it plugged - the hole for only some dblink functions. (CVE-2007-6601, + the hole for only some dblink functions. (CVE-2007-6601, CVE-2007-3278) - Update time zone data files to tzdata release 2007k + Update time zone data files to tzdata release 2007k (in particular, recent Argentina changes) (Tom) @@ -1669,14 +1669,14 @@ Fix planner failure in some cases of WHERE false AND var IN - (SELECT ...) (Tom) + (SELECT ...) (Tom) Preserve the tablespace of indexes that are - rebuilt by ALTER TABLE ... ALTER COLUMN TYPE (Tom) + rebuilt by ALTER TABLE ... ALTER COLUMN TYPE (Tom) @@ -1695,27 +1695,27 @@ - Make VACUUM not use all of maintenance_work_mem + Make VACUUM not use all of maintenance_work_mem when the table is too small for it to be useful (Alvaro) - Fix potential crash in translate() when using a multibyte + Fix potential crash in translate() when using a multibyte database encoding (Tom) - Fix PL/Perl to cope when platform's Perl defines type bool - as int rather than char (Tom) + Fix PL/Perl to cope when platform's Perl defines type bool + as int rather than char (Tom) While this could theoretically happen anywhere, no standard build of - Perl did things this way ... until macOS 10.5. + Perl did things this way ... until macOS 10.5. @@ -1727,49 +1727,49 @@ - Fix pg_dump to correctly handle inheritance child tables + Fix pg_dump to correctly handle inheritance child tables that have default expressions different from their parent's (Tom) - ecpg parser fixes (Michael) + ecpg parser fixes (Michael) - Make contrib/tablefunc's crosstab() handle + Make contrib/tablefunc's crosstab() handle NULL rowid as a category in its own right, rather than crashing (Joe) - Fix tsvector and tsquery output routines to + Fix tsvector and tsquery output routines to escape backslashes correctly (Teodor, Bruce) - Fix crash of to_tsvector() on huge input strings (Teodor) + Fix crash of to_tsvector() on huge input strings (Teodor) - Require a specific version of Autoconf to be used - when re-generating the configure script (Peter) + Require a specific version of Autoconf to be used + when re-generating the configure script (Peter) This affects developers and packagers only. The change was made to prevent accidental use of untested combinations of - Autoconf and PostgreSQL versions. + Autoconf and PostgreSQL versions. You can remove the version check if you really want to use a - different Autoconf version, but it's + different Autoconf version, but it's your responsibility whether the result works or not. @@ -1790,7 +1790,7 @@ This release contains a variety of fixes from 8.0.13. For information about new features in the 8.0 major release, see - . + . @@ -1799,7 +1799,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -1812,20 +1812,20 @@ Prevent index corruption when a transaction inserts rows and - then aborts close to the end of a concurrent VACUUM + then aborts close to the end of a concurrent VACUUM on the same table (Tom) - Make CREATE DOMAIN ... DEFAULT NULL work properly (Tom) + Make CREATE DOMAIN ... DEFAULT NULL work properly (Tom) - Fix excessive logging of SSL error messages (Tom) + Fix excessive logging of SSL error messages (Tom) @@ -1838,7 +1838,7 @@ - Fix crash when log_min_error_statement logging runs out + Fix crash when log_min_error_statement logging runs out of memory (Tom) @@ -1851,7 +1851,7 @@ - Prevent CLUSTER from failing + Prevent CLUSTER from failing due to attempting to process temporary tables of other sessions (Alvaro) @@ -1870,14 +1870,14 @@ - Suppress timezone name (%Z) in log timestamps on Windows + Suppress timezone name (%Z) in log timestamps on Windows because of possible encoding mismatches (Tom) - Require non-superusers who use /contrib/dblink to use only + Require non-superusers who use /contrib/dblink to use only password authentication, as a security measure (Joe) @@ -1899,7 +1899,7 @@ This release contains a variety of fixes from 8.0.12, including a security fix. For information about new features in the 8.0 major release, see - . + . @@ -1908,7 +1908,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -1921,28 +1921,28 @@ Support explicit placement of the temporary-table schema within - search_path, and disable searching it for functions + search_path, and disable searching it for functions and operators (Tom) This is needed to allow a security-definer function to set a - truly secure value of search_path. Without it, + truly secure value of search_path. Without it, an unprivileged SQL user can use temporary objects to execute code with the privileges of the security-definer function (CVE-2007-2138). - See CREATE FUNCTION for more information. + See CREATE FUNCTION for more information. - /contrib/tsearch2 crash fixes (Teodor) + /contrib/tsearch2 crash fixes (Teodor) - Fix potential-data-corruption bug in how VACUUM FULL handles - UPDATE chains (Tom, Pavan Deolasee) + Fix potential-data-corruption bug in how VACUUM FULL handles + UPDATE chains (Tom, Pavan Deolasee) @@ -1975,7 +1975,7 @@ This release contains one fix from 8.0.11. For information about new features in the 8.0 major release, see - . + . @@ -1984,7 +1984,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -2018,7 +2018,7 @@ This release contains a variety of fixes from 8.0.10, including a security fix. For information about new features in the 8.0 major release, see - . + . @@ -2027,7 +2027,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -2061,7 +2061,7 @@ - Fix for rare Assert() crash triggered by UNION (Tom) + Fix for rare Assert() crash triggered by UNION (Tom) @@ -2088,7 +2088,7 @@ This release contains a variety of fixes from 8.0.9. For information about new features in the 8.0 major release, see - . + . @@ -2097,7 +2097,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -2109,7 +2109,7 @@ - Improve handling of getaddrinfo() on AIX (Tom) + Improve handling of getaddrinfo() on AIX (Tom) @@ -2120,15 +2120,15 @@ - Fix failed to re-find parent key errors in - VACUUM (Tom) + Fix failed to re-find parent key errors in + VACUUM (Tom) Fix race condition for truncation of a large relation across a - gigabyte boundary by VACUUM (Tom) + gigabyte boundary by VACUUM (Tom) @@ -2146,7 +2146,7 @@ - Fix error when constructing an ARRAY[] made up of multiple + Fix error when constructing an ARRAY[] made up of multiple empty elements (Tom) @@ -2159,13 +2159,13 @@ - to_number() and to_char(numeric) - are now STABLE, not IMMUTABLE, for - new initdb installs (Tom) + to_number() and to_char(numeric) + are now STABLE, not IMMUTABLE, for + new initdb installs (Tom) - This is because lc_numeric can potentially + This is because lc_numeric can potentially change the output of these functions. @@ -2176,7 +2176,7 @@ - This improves psql \d performance also. + This improves psql \d performance also. @@ -2207,7 +2207,7 @@ This release contains a variety of fixes from 8.0.8. For information about new features in the 8.0 major release, see - . + . @@ -2216,7 +2216,7 @@ A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -2225,28 +2225,28 @@ Changes -Fix crash when referencing NEW row +Fix crash when referencing NEW row values in rule WHERE expressions (Tom) Fix core dump when an untyped literal is taken as ANYARRAY Fix mishandling of AFTER triggers when query contains a SQL function returning multiple rows (Tom) -Fix ALTER TABLE ... TYPE to recheck -NOT NULL for USING clause (Tom) -Fix string_to_array() to handle overlapping +Fix ALTER TABLE ... TYPE to recheck +NOT NULL for USING clause (Tom) +Fix string_to_array() to handle overlapping matches for the separator string -For example, string_to_array('123xx456xxx789', 'xx'). +For example, string_to_array('123xx456xxx789', 'xx'). Fix corner cases in pattern matching for - psql's \d commands + psql's \d commands Fix index-corrupting bugs in /contrib/ltree (Teodor) -Numerous robustness fixes in ecpg (Joachim +Numerous robustness fixes in ecpg (Joachim Wieland) Fix backslash escaping in /contrib/dbmirror Fix instability of statistics collection on Win32 (Tom, Andrew) -Fixes for AIX and -Intel compilers (Tom) +Fixes for AIX and +Intel compilers (Tom) @@ -2264,7 +2264,7 @@ Wieland) This release contains a variety of fixes from 8.0.7, including patches for extremely serious security issues. For information about new features in the 8.0 major release, see - . + . @@ -2273,7 +2273,7 @@ Wieland) A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -2283,9 +2283,9 @@ Wieland) into SQL commands, you should examine them as soon as possible to ensure that they are using recommended escaping techniques. In most cases, applications should be using subroutines provided by - libraries or drivers (such as libpq's - PQescapeStringConn()) to perform string escaping, - rather than relying on ad hoc code to do it. + libraries or drivers (such as libpq's + PQescapeStringConn()) to perform string escaping, + rather than relying on ad hoc code to do it. @@ -2295,48 +2295,48 @@ Wieland) Change the server to reject invalidly-encoded multibyte characters in all cases (Tatsuo, Tom) -While PostgreSQL has been moving in this direction for +While PostgreSQL has been moving in this direction for some time, the checks are now applied uniformly to all encodings and all textual input, and are now always errors not merely warnings. This change defends against SQL-injection attacks of the type described in CVE-2006-2313. -Reject unsafe uses of \' in string literals +Reject unsafe uses of \' in string literals As a server-side defense against SQL-injection attacks of the type -described in CVE-2006-2314, the server now only accepts '' and not -\' as a representation of ASCII single quote in SQL string -literals. By default, \' is rejected only when -client_encoding is set to a client-only encoding (SJIS, BIG5, GBK, +described in CVE-2006-2314, the server now only accepts '' and not +\' as a representation of ASCII single quote in SQL string +literals. By default, \' is rejected only when +client_encoding is set to a client-only encoding (SJIS, BIG5, GBK, GB18030, or UHC), which is the scenario in which SQL injection is possible. -A new configuration parameter backslash_quote is available to +A new configuration parameter backslash_quote is available to adjust this behavior when needed. Note that full security against CVE-2006-2314 might require client-side changes; the purpose of -backslash_quote is in part to make it obvious that insecure +backslash_quote is in part to make it obvious that insecure clients are insecure. -Modify libpq's string-escaping routines to be +Modify libpq's string-escaping routines to be aware of encoding considerations and -standard_conforming_strings -This fixes libpq-using applications for the security +standard_conforming_strings +This fixes libpq-using applications for the security issues described in CVE-2006-2313 and CVE-2006-2314, and also future-proofs them against the planned changeover to SQL-standard string literal syntax. -Applications that use multiple PostgreSQL connections -concurrently should migrate to PQescapeStringConn() and -PQescapeByteaConn() to ensure that escaping is done correctly +Applications that use multiple PostgreSQL connections +concurrently should migrate to PQescapeStringConn() and +PQescapeByteaConn() to ensure that escaping is done correctly for the settings in use in each database connection. Applications that -do string escaping by hand should be modified to rely on library +do string escaping by hand should be modified to rely on library routines instead. Fix some incorrect encoding conversion functions -win1251_to_iso, alt_to_iso, -euc_tw_to_big5, euc_tw_to_mic, -mic_to_euc_tw were all broken to varying +win1251_to_iso, alt_to_iso, +euc_tw_to_big5, euc_tw_to_mic, +mic_to_euc_tw were all broken to varying extents. -Clean up stray remaining uses of \' in strings +Clean up stray remaining uses of \' in strings (Bruce, Jan) Fix bug that sometimes caused OR'd index scans to @@ -2345,10 +2345,10 @@ miss rows they should have returned Fix WAL replay for case where a btree index has been truncated -Fix SIMILAR TO for patterns involving -| (Tom) +Fix SIMILAR TO for patterns involving +| (Tom) -Fix SELECT INTO and CREATE TABLE AS to +Fix SELECT INTO and CREATE TABLE AS to create tables in the default tablespace, not the base directory (Kris Jurka) @@ -2377,7 +2377,7 @@ Fuhr) This release contains a variety of fixes from 8.0.6. For information about new features in the 8.0 major release, see - . + . @@ -2386,7 +2386,7 @@ Fuhr) A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.6, - see . + see . @@ -2396,7 +2396,7 @@ Fuhr) Fix potential crash in SET -SESSION AUTHORIZATION (CVE-2006-0553) +SESSION AUTHORIZATION (CVE-2006-0553) An unprivileged user could crash the server process, resulting in momentary denial of service to other users, if the server has been compiled with Asserts enabled (which is not the default). @@ -2411,44 +2411,44 @@ created in 8.0.4, 7.4.9, and 7.3.11 releases. Fix race condition that could lead to file already -exists errors during pg_clog and pg_subtrans file creation +exists errors during pg_clog and pg_subtrans file creation (Tom) Fix cases that could lead to crashes if a cache-invalidation message arrives at just the wrong time (Tom) -Properly check DOMAIN constraints for -UNKNOWN parameters in prepared statements +Properly check DOMAIN constraints for +UNKNOWN parameters in prepared statements (Neil) -Ensure ALTER COLUMN TYPE will process -FOREIGN KEY, UNIQUE, and PRIMARY KEY +Ensure ALTER COLUMN TYPE will process +FOREIGN KEY, UNIQUE, and PRIMARY KEY constraints in the proper order (Nakano Yoshihisa) Fixes to allow restoring dumps that have cross-schema references to custom operators or operator classes (Tom) -Allow pg_restore to continue properly after a -COPY failure; formerly it tried to treat the remaining -COPY data as SQL commands (Stephen Frost) +Allow pg_restore to continue properly after a +COPY failure; formerly it tried to treat the remaining +COPY data as SQL commands (Stephen Frost) -Fix pg_ctl unregister crash +Fix pg_ctl unregister crash when the data directory is not specified (Magnus) -Fix ecpg crash on AMD64 and PPC +Fix ecpg crash on AMD64 and PPC (Neil) Recover properly if error occurs during argument passing -in PL/Python (Neil) +in PL/Python (Neil) -Fix PL/Perl's handling of locales on +Fix PL/Perl's handling of locales on Win32 to match the backend (Andrew) -Fix crash when log_min_messages is set to -DEBUG3 or above in postgresql.conf on Win32 +Fix crash when log_min_messages is set to +DEBUG3 or above in postgresql.conf on Win32 (Bruce) -Fix pgxs -L library path +Fix pgxs -L library path specification for Win32, Cygwin, macOS, AIX (Bruce) Check that SID is enabled while checking for Win32 admin @@ -2457,8 +2457,8 @@ privileges (Magnus) Properly reject out-of-range date inputs (Kris Jurka) -Portability fix for testing presence of finite -and isinf during configure (Tom) +Portability fix for testing presence of finite +and isinf during configure (Tom) @@ -2476,7 +2476,7 @@ and isinf during configure (Tom) This release contains a variety of fixes from 8.0.5. For information about new features in the 8.0 major release, see - . + . @@ -2485,10 +2485,10 @@ and isinf during configure (Tom) A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.3, - see . - Also, you might need to REINDEX indexes on textual + see . + Also, you might need to REINDEX indexes on textual columns after updating, if you are affected by the locale or - plperl issues described below. + plperl issues described below. @@ -2501,7 +2501,7 @@ and isinf during configure (Tom) than exit if there is no more room in ShmemBackendArray (Magnus) The previous behavior could lead to a denial-of-service situation if too many connection requests arrive close together. This applies -only to the Windows port. +only to the Windows port. Fix bug introduced in 8.0 that could allow ReadBuffer to return an already-used page as new, potentially causing loss of @@ -2512,16 +2512,16 @@ outside a transaction or in a failed transaction (Tom) Fix character string comparison for locales that consider different character combinations as equal, such as Hungarian (Tom) -This might require REINDEX to fix existing indexes on +This might require REINDEX to fix existing indexes on textual columns. Set locale environment variables during postmaster startup -to ensure that plperl won't change the locale later -This fixes a problem that occurred if the postmaster was +to ensure that plperl won't change the locale later +This fixes a problem that occurred if the postmaster was started with environment variables specifying a different locale than what -initdb had been told. Under these conditions, any use of -plperl was likely to lead to corrupt indexes. You might need -REINDEX to fix existing indexes on +initdb had been told. Under these conditions, any use of +plperl was likely to lead to corrupt indexes. You might need +REINDEX to fix existing indexes on textual columns if this has happened to you. Allow more flexible relocation of installation @@ -2533,15 +2533,15 @@ directory paths were the same except for the last component. handling in certain rarely used Asian multi-byte character sets (Tatsuo) -Various fixes for functions returning RECORDs +Various fixes for functions returning RECORDs (Tom) -Fix bug in /contrib/pgcrypto gen_salt, +Fix bug in /contrib/pgcrypto gen_salt, which caused it not to use all available salt space for MD5 and XDES algorithms (Marko Kreen, Solar Designer) Salts for Blowfish and standard DES are unaffected. -Fix /contrib/dblink to throw an error, +Fix /contrib/dblink to throw an error, rather than crashing, when the number of columns specified is different from what's actually returned by the query (Joe) @@ -2561,7 +2561,7 @@ what's actually returned by the query (Joe) This release contains a variety of fixes from 8.0.4. For information about new features in the 8.0 major release, see - . + . @@ -2570,7 +2570,7 @@ what's actually returned by the query (Joe) A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.3, - see . + see . @@ -2597,35 +2597,35 @@ later VACUUM commands. Prevent failure if client sends Bind protocol message when current transaction is already aborted -/contrib/ltree fixes (Teodor) +/contrib/ltree fixes (Teodor) AIX and HPUX compile fixes (Tom) Retry file reads and writes after Windows NO_SYSTEM_RESOURCES error (Qingqing Zhou) -Fix intermittent failure when log_line_prefix -includes %i +Fix intermittent failure when log_line_prefix +includes %i -Fix psql performance issue with long scripts +Fix psql performance issue with long scripts on Windows (Merlin Moncure) -Fix missing updates of pg_group flat +Fix missing updates of pg_group flat file Fix longstanding planning error for outer joins This bug sometimes caused a bogus error RIGHT JOIN is -only supported with merge-joinable join conditions. +only supported with merge-joinable join conditions. Postpone timezone initialization until after -postmaster.pid is created +postmaster.pid is created This avoids confusing startup scripts that expect the pid file to appear quickly. -Prevent core dump in pg_autovacuum when a +Prevent core dump in pg_autovacuum when a table has been dropped -Fix problems with whole-row references (foo.*) +Fix problems with whole-row references (foo.*) to subquery results @@ -2643,7 +2643,7 @@ to subquery results This release contains a variety of fixes from 8.0.3. For information about new features in the 8.0 major release, see - . + . @@ -2652,7 +2652,7 @@ to subquery results A dump/restore is not required for those running 8.0.X. However, if you are upgrading from a version earlier than 8.0.3, - see . + see . @@ -2660,69 +2660,69 @@ to subquery results Changes -Fix error that allowed VACUUM to remove -ctid chains too soon, and add more checking in code that follows -ctid links +Fix error that allowed VACUUM to remove +ctid chains too soon, and add more checking in code that follows +ctid links This fixes a long-standing problem that could cause crashes in very rare circumstances. -Fix CHAR() to properly pad spaces to the specified +Fix CHAR() to properly pad spaces to the specified length when using a multiple-byte character set (Yoshiyuki Asaba) -In prior releases, the padding of CHAR() was incorrect +In prior releases, the padding of CHAR() was incorrect because it only padded to the specified number of bytes without considering how many characters were stored. Force a checkpoint before committing CREATE -DATABASE -This should fix recent reports of index is not a btree +DATABASE +This should fix recent reports of index is not a btree failures when a crash occurs shortly after CREATE -DATABASE. +DATABASE. Fix the sense of the test for read-only transaction -in COPY -The code formerly prohibited COPY TO, where it should -prohibit COPY FROM. +in COPY +The code formerly prohibited COPY TO, where it should +prohibit COPY FROM. -Handle consecutive embedded newlines in COPY +Handle consecutive embedded newlines in COPY CSV-mode input -Fix date_trunc(week) for dates near year +Fix date_trunc(week) for dates near year end Fix planning problem with outer-join ON clauses that reference only the inner-side relation -Further fixes for x FULL JOIN y ON true corner +Further fixes for x FULL JOIN y ON true corner cases Fix overenthusiastic optimization of x IN (SELECT -DISTINCT ...) and related cases -Fix mis-planning of queries with small LIMIT -values due to poorly thought out fuzzy cost +DISTINCT ...) and related cases +Fix mis-planning of queries with small LIMIT +values due to poorly thought out fuzzy cost comparison -Make array_in and array_recv more +Make array_in and array_recv more paranoid about validating their OID parameter Fix missing rows in queries like UPDATE a=... WHERE -a... with GiST index on column a +a... with GiST index on column a Improve robustness of datetime parsing Improve checking for partially-written WAL pages Improve robustness of signal handling when SSL is enabled Improve MIPS and M68K spinlock code -Don't try to open more than max_files_per_process +Don't try to open more than max_files_per_process files during postmaster startup Various memory leakage fixes Various portability improvements Update timezone data files Improve handling of DLL load failures on Windows Improve random-number generation on Windows -Make psql -f filename return a nonzero exit code +Make psql -f filename return a nonzero exit code when opening the file fails -Change pg_dump to handle inherited check +Change pg_dump to handle inherited check constraints more reliably -Fix password prompting in pg_restore on +Fix password prompting in pg_restore on Windows -Fix PL/pgSQL to handle var := var correctly when +Fix PL/pgSQL to handle var := var correctly when the variable is of pass-by-reference type -Fix PL/Perl %_SHARED so it's actually +Fix PL/Perl %_SHARED so it's actually shared -Fix contrib/pg_autovacuum to allow sleep +Fix contrib/pg_autovacuum to allow sleep intervals over 2000 sec -Update contrib/tsearch2 to use current Snowball +Update contrib/tsearch2 to use current Snowball code @@ -2741,7 +2741,7 @@ code This release contains a variety of fixes from 8.0.2, including several security-related issues. For information about new features in the 8.0 major release, see - . + . @@ -2766,10 +2766,10 @@ code - The lesser problem is that the contrib/tsearch2 module + The lesser problem is that the contrib/tsearch2 module creates several functions that are improperly declared to return - internal when they do not accept internal arguments. - This breaks type safety for all functions using internal + internal when they do not accept internal arguments. + This breaks type safety for all functions using internal arguments. @@ -2794,10 +2794,10 @@ code Change encoding function signature to prevent misuse -Change contrib/tsearch2 to avoid unsafe use of -INTERNAL function results +Change contrib/tsearch2 to avoid unsafe use of +INTERNAL function results Guard against incorrect second parameter to -record_out +record_out Repair ancient race condition that allowed a transaction to be seen as committed for some purposes (eg SELECT FOR UPDATE) slightly sooner than for other purposes @@ -2809,36 +2809,36 @@ VACUUM freshly-inserted data, although the scenario seems of very low probability. There are no known cases of it having caused more than an Assert failure. -Fix comparisons of TIME WITH TIME ZONE values +Fix comparisons of TIME WITH TIME ZONE values The comparison code was wrong in the case where the ---enable-integer-datetimes configuration switch had been used. -NOTE: if you have an index on a TIME WITH TIME ZONE column, -it will need to be REINDEXed after installing this update, because +--enable-integer-datetimes configuration switch had been used. +NOTE: if you have an index on a TIME WITH TIME ZONE column, +it will need to be REINDEXed after installing this update, because the fix corrects the sort order of column values. -Fix EXTRACT(EPOCH) for -TIME WITH TIME ZONE values +Fix EXTRACT(EPOCH) for +TIME WITH TIME ZONE values Fix mis-display of negative fractional seconds in -INTERVAL values +INTERVAL values This error only occurred when the ---enable-integer-datetimes configuration switch had been used. +--enable-integer-datetimes configuration switch had been used. -Fix pg_dump to dump trigger names containing % +Fix pg_dump to dump trigger names containing % correctly (Neil) Still more 64-bit fixes for -contrib/intagg +contrib/intagg Prevent incorrect optimization of functions returning -RECORD -Prevent crash on COALESCE(NULL,NULL) +RECORD +Prevent crash on COALESCE(NULL,NULL) Fix Borland makefile for libpq -Fix contrib/btree_gist for timetz type +Fix contrib/btree_gist for timetz type (Teodor) -Make pg_ctl check the PID found in -postmaster.pid to see if it is still a live +Make pg_ctl check the PID found in +postmaster.pid to see if it is still a live process -Fix pg_dump/pg_restore problems caused +Fix pg_dump/pg_restore problems caused by addition of dump timestamps Fix interaction between materializing holdable cursors and firing deferred triggers during transaction commit @@ -2860,7 +2860,7 @@ data types This release contains a variety of fixes from 8.0.1. For information about new features in the 8.0 major release, see - . + . @@ -2883,51 +2883,51 @@ data types libraries (Bruce) This should have been done in 8.0.0. It is required so 7.4.X versions -of PostgreSQL client applications, like psql, +of PostgreSQL client applications, like psql, can be used on the same machine as 8.0.X applications. This might require re-linking user applications that use these libraries. -Add Windows-only wal_sync_method setting of - +Add Windows-only wal_sync_method setting of + (Magnus, Bruce) This setting causes PostgreSQL to write through any disk-drive write cache when writing to WAL. -This behavior was formerly called , but was +renamed because it acts quite differently from on other platforms. -Enable the wal_sync_method setting of - -Formerly the array would remain NULL, but now it becomes a +Formerly the array would remain NULL, but now it becomes a single-element array. The main SQL engine was changed to handle -UPDATE of a null array value this way in 8.0, but the similar +UPDATE of a null array value this way in 8.0, but the similar case in plpgsql was overlooked. -Convert \r\n and \r to \n +Convert \r\n and \r to \n in plpython function bodies (Michael Fuhr) This prevents syntax errors when plpython code is written on a Windows or @@ -2935,72 +2935,72 @@ in plpython function bodies (Michael Fuhr) Allow SPI cursors to handle utility commands that return rows, -such as EXPLAIN (Tom) -Fix CLUSTER failure after ALTER TABLE -SET WITHOUT OIDS (Tom) -Reduce memory usage of ALTER TABLE ADD COLUMN +such as EXPLAIN (Tom) +Fix CLUSTER failure after ALTER TABLE +SET WITHOUT OIDS (Tom) +Reduce memory usage of ALTER TABLE ADD COLUMN (Neil) -Fix ALTER LANGUAGE RENAME (Tom) -Document the Windows-only register and -unregister options of pg_ctl (Magnus) +Fix ALTER LANGUAGE RENAME (Tom) +Document the Windows-only register and +unregister options of pg_ctl (Magnus) Ensure operations done during backend shutdown are counted by statistics collector -This is expected to resolve reports of pg_autovacuum +This is expected to resolve reports of pg_autovacuum not vacuuming the system catalogs often enough — it was not being told about catalog deletions caused by temporary table removal during backend exit. Change the Windows default for configuration parameter -log_destination to +log_destination to (Magnus) By default, a server running on Windows will now send log output to the Windows event logger rather than standard error. Make Kerberos authentication work on Windows (Magnus) -Allow ALTER DATABASE RENAME by superusers +Allow ALTER DATABASE RENAME by superusers who aren't flagged as having CREATEDB privilege (Tom) -Modify WAL log entries for CREATE and -DROP DATABASE to not specify absolute paths (Tom) +Modify WAL log entries for CREATE and +DROP DATABASE to not specify absolute paths (Tom) This allows point-in-time recovery on a different machine with possibly -different database location. Note that CREATE TABLESPACE still +different database location. Note that CREATE TABLESPACE still poses a hazard in such situations. Fix crash from a backend exiting with an open transaction that created a table and opened a cursor on it (Tom) -Fix array_map() so it can call PL functions +Fix array_map() so it can call PL functions (Tom) -Several contrib/tsearch2 and -contrib/btree_gist fixes (Teodor) +Several contrib/tsearch2 and +contrib/btree_gist fixes (Teodor) -Fix crash of some contrib/pgcrypto +Fix crash of some contrib/pgcrypto functions on some platforms (Marko Kreen) -Fix contrib/intagg for 64-bit platforms +Fix contrib/intagg for 64-bit platforms (Tom) -Fix ecpg bugs in parsing of CREATE statement +Fix ecpg bugs in parsing of CREATE statement (Michael) Work around gcc bug on powerpc and amd64 causing problems in ecpg (Christof Petig) -Do not use locale-aware versions of upper(), -lower(), and initcap() when the locale is -C (Bruce) +Do not use locale-aware versions of upper(), +lower(), and initcap() when the locale is +C (Bruce) This allows these functions to work on platforms that generate errors - for non-7-bit data when the locale is C. + for non-7-bit data when the locale is C. -Fix quote_ident() to quote names that match keywords (Tom) -Fix to_date() to behave reasonably when -CC and YY fields are both used (Karel) -Prevent to_char(interval) from failing +Fix quote_ident() to quote names that match keywords (Tom) +Fix to_date() to behave reasonably when +CC and YY fields are both used (Karel) +Prevent to_char(interval) from failing when given a zero-month interval (Tom) -Fix wrong week returned by date_trunc('week') +Fix wrong week returned by date_trunc('week') (Bruce) -date_trunc('week') +date_trunc('week') returned the wrong year for the first few days of January in some years. -Use the correct default mask length for class D -addresses in INET data types (Tom) +Use the correct default mask length for class D +addresses in INET data types (Tom) @@ -3018,7 +3018,7 @@ addresses in INET data types (Tom) This release contains a variety of fixes from 8.0.0, including several security-related issues. For information about new features in the 8.0 major release, see - . + . @@ -3033,11 +3033,11 @@ addresses in INET data types (Tom) Changes -Disallow LOAD to non-superusers +Disallow LOAD to non-superusers On platforms that will automatically execute initialization functions of a shared library (this includes at least Windows and ELF-based Unixen), -LOAD can be used to make the server execute arbitrary code. +LOAD can be used to make the server execute arbitrary code. Thanks to NGS Software for reporting this. Check that creator of an aggregate function has the right to execute the specified transition functions @@ -3050,7 +3050,7 @@ contrib/intagg Jurka) Avoid buffer overrun when plpgsql cursor declaration has too many parameters (Neil) -Make ALTER TABLE ADD COLUMN enforce domain +Make ALTER TABLE ADD COLUMN enforce domain constraints in all cases Fix planning error for FULL and RIGHT outer joins @@ -3059,7 +3059,7 @@ left input. This could not only deliver mis-sorted output to the user, but in case of nested merge joins could give outright wrong answers. Improve planning of grouped aggregate queries -ROLLBACK TO savepoint +ROLLBACK TO savepoint closes cursors created since the savepoint Fix inadequate backend stack size on Windows Avoid SHGetSpecialFolderPath() on Windows @@ -3099,17 +3099,17 @@ typedefs (Michael) This is the first PostgreSQL release - to run natively on Microsoft Windows as - a server. It can run as a Windows service. This + to run natively on Microsoft Windows as + a server. It can run as a Windows service. This release supports NT-based Windows releases like - Windows 2000 SP4, Windows XP, and - Windows 2003. Older releases like - Windows 95, Windows 98, and - Windows ME are not supported because these operating + Windows 2000 SP4, Windows XP, and + Windows 2003. Older releases like + Windows 95, Windows 98, and + Windows ME are not supported because these operating systems do not have the infrastructure to support PostgreSQL. A separate installer project has been created to ease installation on - Windows — see Windows — see . @@ -3123,7 +3123,7 @@ typedefs (Michael) Previous releases required the Unix emulation toolkit - Cygwin in order to run the server on Windows + Cygwin in order to run the server on Windows operating systems. PostgreSQL has supported native clients on Windows for many years. @@ -3174,7 +3174,7 @@ typedefs (Michael) Tablespaces allow administrators to select different file systems for storage of individual tables, indexes, and databases. This improves performance and control over disk space - usage. Prior releases used initlocation and + usage. Prior releases used initlocation and manual symlink management for such tasks. @@ -3216,7 +3216,7 @@ typedefs (Michael) - A new version of the plperl server-side language now + A new version of the plperl server-side language now supports a persistent shared storage area, triggers, returning records and arrays of records, and SPI calls to access the database. @@ -3257,7 +3257,7 @@ typedefs (Michael) - In serialization mode, volatile functions now see the results of concurrent transactions committed up to the beginning of each statement within the function, rather than up to the beginning of the interactive command that called the function. @@ -3266,18 +3266,18 @@ typedefs (Michael) - Functions declared or always use the snapshot of the calling query, and therefore do not see the effects of actions taken after the calling query starts, whether in their own transaction or other transactions. Such a function must be read-only, too, meaning that it cannot use any SQL commands other than - SELECT. + SELECT. - Nondeferred triggers are now fired immediately after completion of the triggering query, rather than upon finishing the current interactive command. This makes a difference when the triggering query occurred within a function: @@ -3288,19 +3288,19 @@ typedefs (Michael) - Server configuration parameters virtual_host and - tcpip_socket have been replaced with a more general - parameter listen_addresses. Also, the server now listens on - localhost by default, which eliminates the need for the - -i postmaster switch in many scenarios. + Server configuration parameters virtual_host and + tcpip_socket have been replaced with a more general + parameter listen_addresses. Also, the server now listens on + localhost by default, which eliminates the need for the + -i postmaster switch in many scenarios. - Server configuration parameters SortMem and - VacuumMem have been renamed to work_mem - and maintenance_work_mem to better reflect their + Server configuration parameters SortMem and + VacuumMem have been renamed to work_mem + and maintenance_work_mem to better reflect their use. The original names are still supported in SET and SHOW. @@ -3308,34 +3308,34 @@ typedefs (Michael) - Server configuration parameters log_pid, - log_timestamp, and log_source_port have been - replaced with a more general parameter log_line_prefix. + Server configuration parameters log_pid, + log_timestamp, and log_source_port have been + replaced with a more general parameter log_line_prefix. - Server configuration parameter syslog has been - replaced with a more logical log_destination variable to + Server configuration parameter syslog has been + replaced with a more logical log_destination variable to control the log output destination. - Server configuration parameter log_statement has been + Server configuration parameter log_statement has been changed so it can selectively log just database modification or data definition statements. Server configuration parameter - log_duration now prints only when log_statement + log_duration now prints only when log_statement prints the query. - Server configuration parameter max_expr_depth parameter has - been replaced with max_stack_depth which measures the + Server configuration parameter max_expr_depth parameter has + been replaced with max_stack_depth which measures the physical stack size rather than the expression nesting depth. This helps prevent session termination due to stack overflow caused by recursive functions. @@ -3344,14 +3344,14 @@ typedefs (Michael) - The length() function no longer counts trailing spaces in - CHAR(n) values. + The length() function no longer counts trailing spaces in + CHAR(n) values. - Casting an integer to BIT(N) selects the rightmost N bits of the + Casting an integer to BIT(N) selects the rightmost N bits of the integer, not the leftmost N bits as before. @@ -3369,7 +3369,7 @@ typedefs (Michael) Syntax checking of array input values has been tightened up considerably. Junk that was previously allowed in odd places with odd results now causes an error. Empty-string element values - must now be written as "", rather than writing nothing. + must now be written as "", rather than writing nothing. Also changed behavior with respect to whitespace surrounding array elements: trailing whitespace is now ignored, for symmetry with leading whitespace (which has always been ignored). @@ -3386,14 +3386,14 @@ typedefs (Michael) The arithmetic operators associated with the single-byte - "char" data type have been removed. + "char" data type have been removed. - The extract() function (also called - date_part) now returns the proper year for BC dates. + The extract() function (also called + date_part) now returns the proper year for BC dates. It previously returned one less than the correct year. The function now also returns the proper values for millennium and century. @@ -3402,9 +3402,9 @@ typedefs (Michael) - CIDR values now must have their nonmasked bits be zero. + CIDR values now must have their nonmasked bits be zero. For example, we no longer allow - 204.248.199.1/31 as a CIDR value. Such + 204.248.199.1/31 as a CIDR value. Such values should never have been accepted by PostgreSQL and will now be rejected. @@ -3419,11 +3419,11 @@ typedefs (Michael) - psql's \copy command now reads or - writes to the query's stdin/stdout, rather than - psql's stdin/stdout. The previous + psql's \copy command now reads or + writes to the query's stdin/stdout, rather than + psql's stdin/stdout. The previous behavior can be accessed via new - / parameters. @@ -3449,14 +3449,14 @@ typedefs (Michael) one supplied by the operating system. This will provide consistent behavior across all platforms. In most cases, there should be little noticeable difference in time zone behavior, except that - the time zone names used by SET/SHOW - TimeZone might be different from what your platform provides. + the time zone names used by SET/SHOW + TimeZone might be different from what your platform provides. - Configure's threading option no longer requires + Configure's threading option no longer requires users to run tests or edit configuration files; threading options are now detected automatically. @@ -3465,7 +3465,7 @@ typedefs (Michael) Now that tablespaces have been implemented, - initlocation has been removed. + initlocation has been removed. @@ -3495,7 +3495,7 @@ typedefs (Michael) - The 8.1 release will remove the to_char() function + The 8.1 release will remove the to_char() function for intervals. @@ -3513,12 +3513,12 @@ typedefs (Michael) By default, tables in PostgreSQL 8.0 - and earlier are created with OIDs. In the next release, + and earlier are created with OIDs. In the next release, this will not be the case: to create a table - that contains OIDs, the clause must be specified or the default_with_oids configuration parameter must be set. Users are encouraged to - explicitly specify if their tables require OIDs for compatibility with future releases of PostgreSQL. @@ -3581,7 +3581,7 @@ typedefs (Michael) hurt performance. The new code uses a background writer to trickle disk writes at a steady pace so checkpoints have far fewer dirty pages to write to disk. Also, the new code does not issue a global - sync() call, but instead fsync()s just + sync() call, but instead fsync()s just the files written since the last checkpoint. This should improve performance and minimize degradation during checkpoints. @@ -3629,13 +3629,13 @@ typedefs (Michael) - Improved index usage with OR clauses (Tom) + Improved index usage with OR clauses (Tom) This allows the optimizer to use indexes in statements with many OR clauses that would not have been indexed in the past. It can also use multi-column indexes where the first column is specified and the second - column is part of an OR clause. + column is part of an OR clause. @@ -3645,7 +3645,7 @@ typedefs (Michael) The server is now smarter about using partial indexes in queries - involving complex clauses. @@ -3754,7 +3754,7 @@ typedefs (Michael) It is now possible to log server messages conveniently without - relying on either syslog or an external log + relying on either syslog or an external log rotation program. @@ -3762,56 +3762,56 @@ typedefs (Michael) Add new read-only server configuration parameters to show server - compile-time settings: block_size, - integer_datetimes, max_function_args, - max_identifier_length, max_index_keys (Joe) + compile-time settings: block_size, + integer_datetimes, max_function_args, + max_identifier_length, max_index_keys (Joe) - Make quoting of sameuser, samegroup, and - all remove special meaning of these terms in - pg_hba.conf (Andrew) + Make quoting of sameuser, samegroup, and + all remove special meaning of these terms in + pg_hba.conf (Andrew) - Use clearer IPv6 name ::1/128 for - localhost in default pg_hba.conf (Andrew) + Use clearer IPv6 name ::1/128 for + localhost in default pg_hba.conf (Andrew) - Use CIDR format in pg_hba.conf examples (Andrew) + Use CIDR format in pg_hba.conf examples (Andrew) - Rename server configuration parameters SortMem and - VacuumMem to work_mem and - maintenance_work_mem (Old names still supported) (Tom) + Rename server configuration parameters SortMem and + VacuumMem to work_mem and + maintenance_work_mem (Old names still supported) (Tom) This change was made to clarify that bulk operations such as index and - foreign key creation use maintenance_work_mem, while - work_mem is for workspaces used during query execution. + foreign key creation use maintenance_work_mem, while + work_mem is for workspaces used during query execution. Allow logging of session disconnections using server configuration - log_disconnections (Andrew) + log_disconnections (Andrew) - Add new server configuration parameter log_line_prefix to + Add new server configuration parameter log_line_prefix to allow control of information emitted in each log line (Andrew) @@ -3822,21 +3822,21 @@ typedefs (Michael) - Remove server configuration parameters log_pid, - log_timestamp, log_source_port; functionality - superseded by log_line_prefix (Andrew) + Remove server configuration parameters log_pid, + log_timestamp, log_source_port; functionality + superseded by log_line_prefix (Andrew) - Replace the virtual_host and tcpip_socket - parameters with a unified listen_addresses parameter + Replace the virtual_host and tcpip_socket + parameters with a unified listen_addresses parameter (Andrew, Tom) - virtual_host could only specify a single IP address to - listen on. listen_addresses allows multiple addresses + virtual_host could only specify a single IP address to + listen on. listen_addresses allows multiple addresses to be specified. @@ -3844,10 +3844,10 @@ typedefs (Michael) Listen on localhost by default, which eliminates the need for the - postmaster switch in many scenarios (Andrew) - Listening on localhost (127.0.0.1) opens no new + Listening on localhost (127.0.0.1) opens no new security holes but allows configurations like Windows and JDBC, which do not support local sockets, to work without special adjustments. @@ -3856,17 +3856,17 @@ typedefs (Michael) - Remove syslog server configuration parameter, and add more - logical log_destination variable to control log output + Remove syslog server configuration parameter, and add more + logical log_destination variable to control log output location (Magnus) - Change server configuration parameter log_statement to take - values all, mod, ddl, or - none to select which queries are logged (Bruce) + Change server configuration parameter log_statement to take + values all, mod, ddl, or + none to select which queries are logged (Bruce) This allows administrators to log only data definition changes or @@ -3877,12 +3877,12 @@ typedefs (Michael) Some logging-related configuration parameters could formerly be adjusted - by ordinary users, but only in the more verbose direction. + by ordinary users, but only in the more verbose direction. They are now treated more strictly: only superusers can set them. - However, a superuser can use ALTER USER to provide per-user + However, a superuser can use ALTER USER to provide per-user settings of these values for non-superusers. Also, it is now possible for superusers to set values of superuser-only configuration parameters - via PGOPTIONS. + via PGOPTIONS. @@ -3921,8 +3921,8 @@ typedefs (Michael) It is now useful to issue DECLARE CURSOR in a - Parse message with parameters. The parameter values - sent at Bind time will be substituted into the + Parse message with parameters. The parameter values + sent at Bind time will be substituted into the execution of the cursor's query. @@ -3942,7 +3942,7 @@ typedefs (Michael) - Make log_duration print only when log_statement + Make log_duration print only when log_statement prints the query (Ed L.) @@ -4007,10 +4007,10 @@ typedefs (Michael) - Make CASE val WHEN compval1 THEN ... evaluate val only once (Tom) + Make CASE val WHEN compval1 THEN ... evaluate val only once (Tom) - no longer evaluates the tested expression multiple times. This has benefits when the expression is complex or is volatile. @@ -4018,20 +4018,20 @@ typedefs (Michael) - Test before computing target list of an aggregate query (Tom) Fixes improper failure of cases such as SELECT SUM(win)/SUM(lose) - ... GROUP BY ... HAVING SUM(lose) > 0. This should work but formerly + ... GROUP BY ... HAVING SUM(lose) > 0. This should work but formerly could fail with divide-by-zero. - Replace max_expr_depth parameter with - max_stack_depth parameter, measured in kilobytes of stack + Replace max_expr_depth parameter with + max_stack_depth parameter, measured in kilobytes of stack size (Tom) @@ -4054,7 +4054,7 @@ typedefs (Michael) - Allow / to be used as the operator in row and subselect comparisons (Fabien Coelho) @@ -4065,8 +4065,8 @@ typedefs (Michael) identifiers and keywords (Tom) - This solves the Turkish problem with mangling of words - containing I and i. Folding of characters + This solves the Turkish problem with mangling of words + containing I and i. Folding of characters outside the 7-bit-ASCII set is still locale-aware. @@ -4094,7 +4094,7 @@ typedefs (Michael) - Avoid emitting in rule listings (Tom) Such a clause makes no logical sense, but in some cases the rule @@ -4112,36 +4112,36 @@ typedefs (Michael) - Add COMMENT ON for casts, conversions, languages, + Add COMMENT ON for casts, conversions, languages, operator classes, and large objects (Christopher) - Add new server configuration parameter default_with_oids to - control whether tables are created with OIDs by default (Neil) + Add new server configuration parameter default_with_oids to + control whether tables are created with OIDs by default (Neil) This allows administrators to control whether CREATE - TABLE commands create tables with or without OID + TABLE commands create tables with or without OID columns by default. (Note: the current factory default setting for - default_with_oids is TRUE, but the default - will become FALSE in future releases.) + default_with_oids is TRUE, but the default + will become FALSE in future releases.) - Add / clause to CREATE TABLE AS (Neil) - Allow ALTER TABLE DROP COLUMN to drop an OID - column (ALTER TABLE SET WITHOUT OIDS still works) + Allow ALTER TABLE DROP COLUMN to drop an OID + column (ALTER TABLE SET WITHOUT OIDS still works) (Tom) @@ -4154,11 +4154,11 @@ typedefs (Michael) - Allow ALTER ... ADD COLUMN with defaults and - constraints; works per SQL spec (Rod) - It is now possible for to create a column that is not initially filled with NULLs, but with a specified default value. @@ -4166,7 +4166,7 @@ typedefs (Michael) - Add ALTER COLUMN TYPE to change column's type (Rod) + Add ALTER COLUMN TYPE to change column's type (Rod) It is now possible to alter a column's data type without dropping @@ -4176,14 +4176,14 @@ typedefs (Michael) - Allow multiple ALTER actions in a single ALTER + Allow multiple ALTER actions in a single ALTER TABLE command (Rod) - This is particularly useful for ALTER commands that - rewrite the table (which include @@ -4213,13 +4213,13 @@ typedefs (Michael) Allow temporary object creation to be limited to functions (Sean Chittenden) - Add (Christopher) Prior to this release, there was no way to clear an auto-cluster @@ -4229,8 +4229,8 @@ typedefs (Michael) - Constraint/Index/SERIAL names are now - table_column_type + Constraint/Index/SERIAL names are now + table_column_type with numbers appended to guarantee uniqueness within the schema (Tom) @@ -4242,11 +4242,11 @@ typedefs (Michael) - Add pg_get_serial_sequence() to return a - SERIAL column's sequence name (Christopher) + Add pg_get_serial_sequence() to return a + SERIAL column's sequence name (Christopher) - This allows automated scripts to reliably find the SERIAL + This allows automated scripts to reliably find the SERIAL sequence name. @@ -4259,14 +4259,14 @@ typedefs (Michael) - New ALTER INDEX command to allow moving of indexes + New ALTER INDEX command to allow moving of indexes between tablespaces (Gavin) - Make ALTER TABLE OWNER change dependent sequence + Make ALTER TABLE OWNER change dependent sequence ownership too (Alvaro) @@ -4289,18 +4289,18 @@ typedefs (Michael) - Add keyword to CREATE RULE (Fabien Coelho) - This allows to be added to rule creation to contrast it with + rules. - Add option to LOCK (Tatsuo) This allows the LOCK command to fail if it @@ -4336,7 +4336,7 @@ typedefs (Michael) In 7.3 and 7.4, a long-running B-tree index build could block concurrent - CHECKPOINTs from completing, thereby causing WAL bloat because the + CHECKPOINTs from completing, thereby causing WAL bloat because the WAL log could not be recycled. @@ -4384,11 +4384,11 @@ typedefs (Michael) - New pg_ctl option for Windows (Andrew) - Windows does not have a kill command to send signals to - backends so this capability was added to pg_ctl. + Windows does not have a kill command to send signals to + backends so this capability was added to pg_ctl. @@ -4400,7 +4400,7 @@ typedefs (Michael) - Add option to initdb so the initial password can be set by GUI tools (Magnus) @@ -4415,7 +4415,7 @@ typedefs (Michael) - Add @@ -4443,7 +4443,7 @@ typedefs (Michael) Reject nonrectangular array values as erroneous (Joe) - Formerly, array_in would silently build a + Formerly, array_in would silently build a surprising result. @@ -4457,13 +4457,13 @@ typedefs (Michael) The arithmetic operators associated with the single-byte - "char" data type have been removed. + "char" data type have been removed. Formerly, the parser would select these operators in many situations - where an unable to select an operator error would be more - appropriate, such as null * null. If you actually want - to do arithmetic on a "char" column, you can cast it to + where an unable to select an operator error would be more + appropriate, such as null * null. If you actually want + to do arithmetic on a "char" column, you can cast it to integer explicitly. @@ -4474,7 +4474,7 @@ typedefs (Michael) Junk that was previously allowed in odd places with odd results - now causes an ERROR, for example, non-whitespace + now causes an ERROR, for example, non-whitespace after the closing right brace. @@ -4482,7 +4482,7 @@ typedefs (Michael) Empty-string array element values must now be written as - "", rather than writing nothing (Joe) + "", rather than writing nothing (Joe) Formerly, both ways of writing an empty-string element value were @@ -4512,13 +4512,13 @@ typedefs (Michael) - Accept YYYY-monthname-DD as a date string (Tom) + Accept YYYY-monthname-DD as a date string (Tom) - Make netmask and hostmask functions + Make netmask and hostmask functions return maximum-length mask length (Tom) @@ -4535,27 +4535,27 @@ typedefs (Michael) - to_char/to_date() date conversion + to_char/to_date() date conversion improvements (Kurt Roeckx, Fabien Coelho) - Make length() disregard trailing spaces in - CHAR(n) (Gavin) + Make length() disregard trailing spaces in + CHAR(n) (Gavin) This change was made to improve consistency: trailing spaces are - semantically insignificant in CHAR(n) data, so they - should not be counted by length(). + semantically insignificant in CHAR(n) data, so they + should not be counted by length(). Warn about empty string being passed to - OID/float4/float8 data types (Neil) + OID/float4/float8 data types (Neil) 8.1 will throw an error instead. @@ -4565,7 +4565,7 @@ typedefs (Michael) Allow leading or trailing whitespace in - int2/int4/int8/float4/float8 + int2/int4/int8/float4/float8 input routines (Neil) @@ -4573,7 +4573,7 @@ typedefs (Michael) - Better support for IEEE Infinity and NaN + Better support for IEEE Infinity and NaN values in float4/float8 (Neil) @@ -4584,27 +4584,27 @@ typedefs (Michael) - Add - Fix to_char for 1 BC - (previously it returned 1 AD) (Bruce) + Fix to_char for 1 BC + (previously it returned 1 AD) (Bruce) - Fix date_part(year) for BC dates (previously it + Fix date_part(year) for BC dates (previously it returned one less than the correct year) (Bruce) - Fix date_part() to return the proper millennium and + Fix date_part() to return the proper millennium and century (Fabien Coelho) @@ -4616,44 +4616,44 @@ typedefs (Michael) - Add ceiling() as an alias for ceil(), - and power() as an alias for pow() for + Add ceiling() as an alias for ceil(), + and power() as an alias for pow() for standards compliance (Neil) - Change ln(), log(), - power(), and sqrt() to emit the correct - SQLSTATE error codes for certain error conditions, as + Change ln(), log(), + power(), and sqrt() to emit the correct + SQLSTATE error codes for certain error conditions, as specified by SQL:2003 (Neil) - Add width_bucket() function as defined by SQL:2003 (Neil) + Add width_bucket() function as defined by SQL:2003 (Neil) - Add generate_series() functions to simplify working + Add generate_series() functions to simplify working with numeric sets (Joe) - Fix upper/lower/initcap() functions to work with + Fix upper/lower/initcap() functions to work with multibyte encodings (Tom) - Add boolean and bitwise integer / aggregates (Fabien Coelho) @@ -4679,17 +4679,17 @@ typedefs (Michael) - Add interval plus datetime operators (Tom) + Add interval plus datetime operators (Tom) - The reverse ordering, datetime plus interval, + The reverse ordering, datetime plus interval, was already supported, but both are required by the SQL standard. - Casting an integer to BIT(N) selects the rightmost N bits + Casting an integer to BIT(N) selects the rightmost N bits of the integer (Tom) @@ -4702,7 +4702,7 @@ typedefs (Michael) - Require CIDR values to have all nonmasked bits be zero + Require CIDR values to have all nonmasked bits be zero (Kevin Brintnall) @@ -4717,7 +4717,7 @@ typedefs (Michael) - In READ COMMITTED serialization mode, volatile functions + In READ COMMITTED serialization mode, volatile functions now see the results of concurrent transactions committed up to the beginning of each statement within the function, rather than up to the beginning of the interactive command that called the function. @@ -4726,20 +4726,20 @@ typedefs (Michael) - Functions declared STABLE or IMMUTABLE always + Functions declared STABLE or IMMUTABLE always use the snapshot of the calling query, and therefore do not see the effects of actions taken after the calling query starts, whether in their own transaction or other transactions. Such a function must be read-only, too, meaning that it cannot use any SQL commands other than - SELECT. There is a considerable performance gain from - declaring a function STABLE or IMMUTABLE - rather than VOLATILE. + SELECT. There is a considerable performance gain from + declaring a function STABLE or IMMUTABLE + rather than VOLATILE. - Nondeferred triggers are now fired immediately after completion of the triggering query, rather than upon finishing the current interactive command. This makes a difference when the triggering query occurred within a function: the trigger @@ -4801,8 +4801,8 @@ typedefs (Michael) Improve parsing of PL/pgSQL FOR loops (Tom) - Parsing is now driven by presence of ".." rather than - data type of variable. This makes no difference for correct functions, but should result in more understandable error messages when a mistake is made. @@ -4818,18 +4818,18 @@ typedefs (Michael) In PL/Tcl, SPI commands are now run in subtransactions. If an error occurs, the subtransaction is cleaned up and the error is reported - as an ordinary Tcl error, which can be trapped with catch. + as an ordinary Tcl error, which can be trapped with catch. Formerly, it was not possible to catch such errors. - Accept ELSEIF in PL/pgSQL (Neil) + Accept ELSEIF in PL/pgSQL (Neil) - Previously PL/pgSQL only allowed ELSIF, but many people - are accustomed to spelling this keyword ELSEIF. + Previously PL/pgSQL only allowed ELSIF, but many people + are accustomed to spelling this keyword ELSEIF. @@ -4838,47 +4838,47 @@ typedefs (Michael) - <application>psql</> Changes + <application>psql</application> Changes - Improve psql information display about database + Improve psql information display about database objects (Christopher) - Allow psql to display group membership in - \du and \dg (Markus Bertheau) + Allow psql to display group membership in + \du and \dg (Markus Bertheau) - Prevent psql \dn from showing + Prevent psql \dn from showing temporary schemas (Bruce) - Allow psql to handle tilde user expansion for file + Allow psql to handle tilde user expansion for file names (Zach Irmen) - Allow psql to display fancy prompts, including - color, via readline (Reece Hart, Chet Ramey) + Allow psql to display fancy prompts, including + color, via readline (Reece Hart, Chet Ramey) - Make psql \copy match COPY command syntax + Make psql \copy match COPY command syntax fully (Tom) @@ -4891,55 +4891,55 @@ typedefs (Michael) - Add CLUSTER information to psql - \d display + Add CLUSTER information to psql + \d display (Bruce) - Change psql \copy stdin/stdout to read + Change psql \copy stdin/stdout to read from command input/output (Bruce) - Add - Add global psql configuration file, psqlrc.sample + Add global psql configuration file, psqlrc.sample (Bruce) - This allows a central file where global psql startup commands can + This allows a central file where global psql startup commands can be stored. - Have psql \d+ indicate if the table - has an OID column (Neil) + Have psql \d+ indicate if the table + has an OID column (Neil) - On Windows, use binary mode in psql when reading files so control-Z + On Windows, use binary mode in psql when reading files so control-Z is not seen as end-of-file - Have \dn+ show permissions and description for schemas (Dennis + Have \dn+ show permissions and description for schemas (Dennis Björklund) @@ -4961,13 +4961,13 @@ typedefs (Michael) - <application>pg_dump</> Changes + <application>pg_dump</application> Changes Use dependency information to improve the reliability of - pg_dump (Tom) + pg_dump (Tom) This should solve the longstanding problems with related objects @@ -4977,7 +4977,7 @@ typedefs (Michael) - Have pg_dump output objects in alphabetical order if possible (Tom) + Have pg_dump output objects in alphabetical order if possible (Tom) This should make it easier to identify changes between @@ -4987,12 +4987,12 @@ typedefs (Michael) - Allow pg_restore to ignore some SQL errors (Fabien Coelho) + Allow pg_restore to ignore some SQL errors (Fabien Coelho) - This makes pg_restore's behavior similar to the - results of feeding a pg_dump output script to - psql. In most cases, ignoring errors and plowing + This makes pg_restore's behavior similar to the + results of feeding a pg_dump output script to + psql. In most cases, ignoring errors and plowing ahead is the most useful thing to do. Also added was a pg_restore option to give the old behavior of exiting on an error. @@ -5000,36 +5000,36 @@ typedefs (Michael) - pg_restore display now includes objects' schema names - New begin/end markers in pg_dump text output (Bruce) + New begin/end markers in pg_dump text output (Bruce) Add start/stop times for - pg_dump/pg_dumpall in verbose mode + pg_dump/pg_dumpall in verbose mode (Bruce) - Allow most pg_dump options in - pg_dumpall (Christopher) + Allow most pg_dump options in + pg_dumpall (Christopher) - Have pg_dump use ALTER OWNER rather - than SET SESSION AUTHORIZATION by default + Have pg_dump use ALTER OWNER rather + than SET SESSION AUTHORIZATION by default (Christopher) @@ -5044,42 +5044,42 @@ typedefs (Michael) - Make libpq's handling thread-safe (Bruce) - Add PQmbdsplen() which returns the display length + Add PQmbdsplen() which returns the display length of a character (Tatsuo) - Add thread locking to SSL and - Kerberos connections (Manfred Spraul) + Add thread locking to SSL and + Kerberos connections (Manfred Spraul) - Allow PQoidValue(), PQcmdTuples(), and - PQoidStatus() to work on EXECUTE + Allow PQoidValue(), PQcmdTuples(), and + PQoidStatus() to work on EXECUTE commands (Neil) - Add PQserverVersion() to provide more convenient + Add PQserverVersion() to provide more convenient access to the server version number (Greg Sabino Mullane) - Add PQprepare/PQsendPrepared() functions to support + Add PQprepare/PQsendPrepared() functions to support preparing statements without necessarily specifying the data types of their parameters (Abhijit Menon-Sen) @@ -5087,7 +5087,7 @@ typedefs (Michael) - Many ECPG improvements, including SET DESCRIPTOR (Michael) + Many ECPG improvements, including SET DESCRIPTOR (Michael) @@ -5127,7 +5127,7 @@ typedefs (Michael) Directory paths for installed files (such as the - /share directory) are now computed relative to the + /share directory) are now computed relative to the actual location of the executables, so that an installation tree can be moved to another place without reconfiguring and rebuilding. @@ -5136,31 +5136,31 @@ typedefs (Michael) - Use to choose installation location of documentation; also + allow (Peter) - Add to prevent installation of documentation (Peter) - Upgrade to DocBook V4.2 SGML (Peter) + Upgrade to DocBook V4.2 SGML (Peter) - New PostgreSQL CVS tag (Marc) + New PostgreSQL CVS tag (Marc) This was done to make it easier for organizations to manage their own copies of the PostgreSQL - CVS repository. File version stamps from the master + CVS repository. File version stamps from the master repository will not get munged by checking into or out of a copied repository. @@ -5186,7 +5186,7 @@ typedefs (Michael) - Add inlined test-and-set code on PA-RISC for gcc + Add inlined test-and-set code on PA-RISC for gcc (ViSolve, Tom) @@ -5200,7 +5200,7 @@ typedefs (Michael) Clean up spinlock assembly code to avoid warnings from newer - gcc releases (Tom) + gcc releases (Tom) @@ -5230,7 +5230,7 @@ typedefs (Michael) - New fsync() test program (Bruce) + New fsync() test program (Bruce) @@ -5268,7 +5268,7 @@ typedefs (Michael) - Use Olson's public domain timezone library (Magnus) + Use Olson's public domain timezone library (Magnus) @@ -5285,7 +5285,7 @@ typedefs (Michael) - psql now uses a flex-generated + psql now uses a flex-generated lexical analyzer to process command strings @@ -5322,7 +5322,7 @@ typedefs (Michael) - New pgevent for Windows logging + New pgevent for Windows logging @@ -5342,19 +5342,19 @@ typedefs (Michael) - Overhaul of contrib/dblink (Joe) + Overhaul of contrib/dblink (Joe) - contrib/dbmirror improvements (Steven Singer) + contrib/dbmirror improvements (Steven Singer) - New contrib/xml2 (John Gray, Torchbox) + New contrib/xml2 (John Gray, Torchbox) @@ -5366,51 +5366,51 @@ typedefs (Michael) - New version of contrib/btree_gist (Teodor) + New version of contrib/btree_gist (Teodor) - New contrib/trgm, trigram matching for + New contrib/trgm, trigram matching for PostgreSQL (Teodor) - Many contrib/tsearch2 improvements (Teodor) + Many contrib/tsearch2 improvements (Teodor) - Add double metaphone to contrib/fuzzystrmatch (Andrew) + Add double metaphone to contrib/fuzzystrmatch (Andrew) - Allow contrib/pg_autovacuum to run as a Windows service (Dave Page) + Allow contrib/pg_autovacuum to run as a Windows service (Dave Page) - Add functions to contrib/dbsize (Andreas Pflug) + Add functions to contrib/dbsize (Andreas Pflug) - Removed contrib/pg_logger: obsoleted by integrated logging + Removed contrib/pg_logger: obsoleted by integrated logging subprocess - Removed contrib/rserv: obsoleted by various separate projects + Removed contrib/rserv: obsoleted by various separate projects diff --git a/doc/src/sgml/release-8.1.sgml b/doc/src/sgml/release-8.1.sgml index d48bccd17d..44a30892fd 100644 --- a/doc/src/sgml/release-8.1.sgml +++ b/doc/src/sgml/release-8.1.sgml @@ -12,11 +12,11 @@ This release contains a variety of fixes from 8.1.22. For information about new features in the 8.1 major release, see - . + . - This is expected to be the last PostgreSQL release + This is expected to be the last PostgreSQL release in the 8.1.X series. Users are encouraged to update to a newer release branch soon. @@ -27,7 +27,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.18, - see . + see . @@ -40,17 +40,17 @@ Force the default - wal_sync_method - to be fdatasync on Linux (Tom Lane, Marti Raudsepp) + wal_sync_method + to be fdatasync on Linux (Tom Lane, Marti Raudsepp) - The default on Linux has actually been fdatasync for many - years, but recent kernel changes caused PostgreSQL to - choose open_datasync instead. This choice did not result + The default on Linux has actually been fdatasync for many + years, but recent kernel changes caused PostgreSQL to + choose open_datasync instead. This choice did not result in any performance improvement, and caused outright failures on - certain filesystems, notably ext4 with the - data=journal mount option. + certain filesystems, notably ext4 with the + data=journal mount option. @@ -63,19 +63,19 @@ - Add support for detecting register-stack overrun on IA64 + Add support for detecting register-stack overrun on IA64 (Tom Lane) - The IA64 architecture has two hardware stacks. Full + The IA64 architecture has two hardware stacks. Full prevention of stack-overrun failures requires checking both. - Add a check for stack overflow in copyObject() (Tom Lane) + Add a check for stack overflow in copyObject() (Tom Lane) @@ -91,7 +91,7 @@ - It is possible to have a concurrent page split in a + It is possible to have a concurrent page split in a temporary index, if for example there is an open cursor scanning the index when an insertion is done. GiST failed to detect this case and hence could deliver wrong results when execution of the cursor @@ -101,7 +101,7 @@ - Avoid memory leakage while ANALYZE'ing complex index + Avoid memory leakage while ANALYZE'ing complex index expressions (Tom Lane) @@ -113,14 +113,14 @@ - An index declared like create index i on t (foo(t.*)) + An index declared like create index i on t (foo(t.*)) would not automatically get dropped when its table was dropped. - Do not inline a SQL function with multiple OUT + Do not inline a SQL function with multiple OUT parameters (Tom Lane) @@ -132,7 +132,7 @@ - Fix constant-folding of COALESCE() expressions (Tom Lane) + Fix constant-folding of COALESCE() expressions (Tom Lane) @@ -143,11 +143,11 @@ - Add print functionality for InhRelation nodes (Tom Lane) + Add print functionality for InhRelation nodes (Tom Lane) - This avoids a failure when debug_print_parse is enabled + This avoids a failure when debug_print_parse is enabled and certain types of query are executed. @@ -166,29 +166,29 @@ - Fix PL/pgSQL's handling of simple + Fix PL/pgSQL's handling of simple expressions to not fail in recursion or error-recovery cases (Tom Lane) - Fix bug in contrib/cube's GiST picksplit algorithm + Fix bug in contrib/cube's GiST picksplit algorithm (Alexander Korotkov) This could result in considerable inefficiency, though not actually - incorrect answers, in a GiST index on a cube column. - If you have such an index, consider REINDEXing it after + incorrect answers, in a GiST index on a cube column. + If you have such an index, consider REINDEXing it after installing this update. - Don't emit identifier will be truncated notices in - contrib/dblink except when creating new connections + Don't emit identifier will be truncated notices in + contrib/dblink except when creating new connections (Itagaki Takahiro) @@ -196,20 +196,20 @@ Fix potential coredump on missing public key in - contrib/pgcrypto (Marti Raudsepp) + contrib/pgcrypto (Marti Raudsepp) - Fix memory leak in contrib/xml2's XPath query functions + Fix memory leak in contrib/xml2's XPath query functions (Tom Lane) - Update time zone data files to tzdata release 2010o + Update time zone data files to tzdata release 2010o for DST law changes in Fiji and Samoa; also historical corrections for Hong Kong. @@ -231,11 +231,11 @@ This release contains a variety of fixes from 8.1.21. For information about new features in the 8.1 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 8.1.X release series in November 2010. Users are encouraged to update to a newer release branch soon. @@ -246,7 +246,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.18, - see . + see . @@ -266,7 +266,7 @@ This change prevents security problems that can be caused by subverting Perl or Tcl code that will be executed later in the same session under another SQL user identity (for example, within a SECURITY - DEFINER function). Most scripting languages offer numerous ways that + DEFINER function). Most scripting languages offer numerous ways that that might be done, such as redefining standard functions or operators called by the target function. Without this change, any SQL user with Perl or Tcl language usage rights can do essentially anything with the @@ -295,7 +295,7 @@ - Prevent possible crashes in pg_get_expr() by disallowing + Prevent possible crashes in pg_get_expr() by disallowing it from being called with an argument that is not one of the system catalog columns it's intended to be used with (Heikki Linnakangas, Tom Lane) @@ -337,7 +337,7 @@ Take care to fsync the contents of lockfiles (both - postmaster.pid and the socket lockfile) while writing them + postmaster.pid and the socket lockfile) while writing them (Tom Lane) @@ -363,7 +363,7 @@ - Fix log_line_prefix's %i escape, + Fix log_line_prefix's %i escape, which could produce junk early in backend startup (Tom Lane) @@ -371,28 +371,28 @@ Fix possible data corruption in ALTER TABLE ... SET - TABLESPACE when archiving is enabled (Jeff Davis) + TABLESPACE when archiving is enabled (Jeff Davis) - Allow CREATE DATABASE and ALTER DATABASE ... SET - TABLESPACE to be interrupted by query-cancel (Guillaume Lelarge) + Allow CREATE DATABASE and ALTER DATABASE ... SET + TABLESPACE to be interrupted by query-cancel (Guillaume Lelarge) In PL/Python, defend against null pointer results from - PyCObject_AsVoidPtr and PyCObject_FromVoidPtr + PyCObject_AsVoidPtr and PyCObject_FromVoidPtr (Peter Eisentraut) - Improve contrib/dblink's handling of tables containing + Improve contrib/dblink's handling of tables containing dropped columns (Tom Lane) @@ -400,13 +400,13 @@ Fix connection leak after duplicate connection name - errors in contrib/dblink (Itagaki Takahiro) + errors in contrib/dblink (Itagaki Takahiro) - Fix contrib/dblink to handle connection names longer than + Fix contrib/dblink to handle connection names longer than 62 bytes correctly (Itagaki Takahiro) @@ -420,7 +420,7 @@ - Update time zone data files to tzdata release 2010l + Update time zone data files to tzdata release 2010l for DST law changes in Egypt and Palestine; also historical corrections for Finland. @@ -449,7 +449,7 @@ This release contains a variety of fixes from 8.1.20. For information about new features in the 8.1 major release, see - . + . @@ -458,7 +458,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.18, - see . + see . @@ -470,19 +470,19 @@ - Enforce restrictions in plperl using an opmask applied to - the whole interpreter, instead of using Safe.pm + Enforce restrictions in plperl using an opmask applied to + the whole interpreter, instead of using Safe.pm (Tim Bunce, Andrew Dunstan) - Recent developments have convinced us that Safe.pm is too - insecure to rely on for making plperl trustable. This - change removes use of Safe.pm altogether, in favor of using + Recent developments have convinced us that Safe.pm is too + insecure to rely on for making plperl trustable. This + change removes use of Safe.pm altogether, in favor of using a separate interpreter with an opcode mask that is always applied. Pleasant side effects of the change include that it is now possible to - use Perl's strict pragma in a natural way in - plperl, and that Perl's $a and $b + use Perl's strict pragma in a natural way in + plperl, and that Perl's $a and $b variables work as expected in sort routines, and that function compilation is significantly faster. (CVE-2010-1169) @@ -491,19 +491,19 @@ Prevent PL/Tcl from executing untrustworthy code from - pltcl_modules (Tom) + pltcl_modules (Tom) PL/Tcl's feature for autoloading Tcl code from a database table could be exploited for trojan-horse attacks, because there was no restriction on who could create or insert into that table. This change - disables the feature unless pltcl_modules is owned by a + disables the feature unless pltcl_modules is owned by a superuser. (However, the permissions on the table are not checked, so installations that really need a less-than-secure modules table can still grant suitable privileges to trusted non-superusers.) Also, - prevent loading code into the unrestricted normal Tcl - interpreter unless we are really going to execute a pltclu + prevent loading code into the unrestricted normal Tcl + interpreter unless we are really going to execute a pltclu function. (CVE-2010-1170) @@ -516,10 +516,10 @@ Previously, if an unprivileged user ran ALTER USER ... RESET - ALL for himself, or ALTER DATABASE ... RESET ALL for + ALL for himself, or ALTER DATABASE ... RESET ALL for a database he owns, this would remove all special parameter settings for the user or database, even ones that are only supposed to be - changeable by a superuser. Now, the ALTER will only + changeable by a superuser. Now, the ALTER will only remove the parameters that the user has permission to change. @@ -527,7 +527,7 @@ Avoid possible crash during backend shutdown if shutdown occurs - when a CONTEXT addition would be made to log entries (Tom) + when a CONTEXT addition would be made to log entries (Tom) @@ -539,7 +539,7 @@ - Update PL/Perl's ppport.h for modern Perl versions + Update PL/Perl's ppport.h for modern Perl versions (Andrew) @@ -552,14 +552,14 @@ - Prevent infinite recursion in psql when expanding + Prevent infinite recursion in psql when expanding a variable that refers to itself (Tom) - Ensure that contrib/pgstattuple functions respond to cancel + Ensure that contrib/pgstattuple functions respond to cancel interrupts promptly (Tatsuhito Kasahara) @@ -567,7 +567,7 @@ Make server startup deal properly with the case that - shmget() returns EINVAL for an existing + shmget() returns EINVAL for an existing shared memory segment (Tom) @@ -580,7 +580,7 @@ - Update time zone data files to tzdata release 2010j + Update time zone data files to tzdata release 2010j for DST law changes in Argentina, Australian Antarctic, Bangladesh, Mexico, Morocco, Pakistan, Palestine, Russia, Syria, Tunisia; also historical corrections for Taiwan. @@ -603,7 +603,7 @@ This release contains a variety of fixes from 8.1.19. For information about new features in the 8.1 major release, see - . + . @@ -612,7 +612,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.18, - see . + see . @@ -624,7 +624,7 @@ - Add new configuration parameter ssl_renegotiation_limit to + Add new configuration parameter ssl_renegotiation_limit to control how often we do session key renegotiation for an SSL connection (Magnus) @@ -653,8 +653,8 @@ - Make substring() for bit types treat any negative - length as meaning all the rest of the string (Tom) + Make substring() for bit types treat any negative + length as meaning all the rest of the string (Tom) @@ -680,7 +680,7 @@ - Fix the STOP WAL LOCATION entry in backup history files to + Fix the STOP WAL LOCATION entry in backup history files to report the next WAL segment's name when the end location is exactly at a segment boundary (Itagaki Takahiro) @@ -700,17 +700,17 @@ - When reading pg_hba.conf and related files, do not treat - @something as a file inclusion request if the @ - appears inside quote marks; also, never treat @ by itself + When reading pg_hba.conf and related files, do not treat + @something as a file inclusion request if the @ + appears inside quote marks; also, never treat @ by itself as a file inclusion request (Tom) This prevents erratic behavior if a role or database name starts with - @. If you need to include a file whose path name + @. If you need to include a file whose path name contains spaces, you can still do so, but you must write - @"/path to/file" rather than putting the quotes around + @"/path to/file" rather than putting the quotes around the whole construct. @@ -718,14 +718,14 @@ Prevent infinite loop on some platforms if a directory is named as - an inclusion target in pg_hba.conf and related files + an inclusion target in pg_hba.conf and related files (Tom) - Fix psql's numericlocale option to not + Fix psql's numericlocale option to not format strings it shouldn't in latex and troff output formats (Heikki) @@ -739,7 +739,7 @@ - Add volatile markings in PL/Python to avoid possible + Add volatile markings in PL/Python to avoid possible compiler-specific misbehavior (Zdenek Kotala) @@ -751,28 +751,28 @@ The only known symptom of this oversight is that the Tcl - clock command misbehaves if using Tcl 8.5 or later. + clock command misbehaves if using Tcl 8.5 or later. - Prevent crash in contrib/dblink when too many key - columns are specified to a dblink_build_sql_* function + Prevent crash in contrib/dblink when too many key + columns are specified to a dblink_build_sql_* function (Rushabh Lathia, Joe Conway) - Fix assorted crashes in contrib/xml2 caused by sloppy + Fix assorted crashes in contrib/xml2 caused by sloppy memory management (Tom) - Update time zone data files to tzdata release 2010e + Update time zone data files to tzdata release 2010e for DST law changes in Bangladesh, Chile, Fiji, Mexico, Paraguay, Samoa. @@ -793,7 +793,7 @@ This release contains a variety of fixes from 8.1.18. For information about new features in the 8.1 major release, see - . + . @@ -802,7 +802,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.18, - see . + see . @@ -844,14 +844,14 @@ - Prevent signals from interrupting VACUUM at unsafe times + Prevent signals from interrupting VACUUM at unsafe times (Alvaro) - This fix prevents a PANIC if a VACUUM FULL is canceled + This fix prevents a PANIC if a VACUUM FULL is canceled after it's already committed its tuple movements, as well as transient - errors if a plain VACUUM is interrupted after having + errors if a plain VACUUM is interrupted after having truncated the table. @@ -870,7 +870,7 @@ - Fix very rare crash in inet/cidr comparisons (Chris + Fix very rare crash in inet/cidr comparisons (Chris Mikkelson) @@ -896,7 +896,7 @@ The previous code is known to fail with the combination of the Linux - pam_krb5 PAM module with Microsoft Active Directory as the + pam_krb5 PAM module with Microsoft Active Directory as the domain controller. It might have problems elsewhere too, since it was making unjustified assumptions about what arguments the PAM stack would pass to it. @@ -906,14 +906,14 @@ Fix processing of ownership dependencies during CREATE OR - REPLACE FUNCTION (Tom) + REPLACE FUNCTION (Tom) Ensure that Perl arrays are properly converted to - PostgreSQL arrays when returned by a set-returning + PostgreSQL arrays when returned by a set-returning PL/Perl function (Andrew Dunstan, Abhijit Menon-Sen) @@ -930,20 +930,20 @@ - Ensure psql's flex module is compiled with the correct + Ensure psql's flex module is compiled with the correct system header definitions (Tom) This fixes build failures on platforms where - --enable-largefile causes incompatible changes in the + --enable-largefile causes incompatible changes in the generated code. - Make the postmaster ignore any application_name parameter in + Make the postmaster ignore any application_name parameter in connection request packets, to improve compatibility with future libpq versions (Tom) @@ -951,7 +951,7 @@ - Update time zone data files to tzdata release 2009s + Update time zone data files to tzdata release 2009s for DST law changes in Antarctica, Argentina, Bangladesh, Fiji, Novokuznetsk, Pakistan, Palestine, Samoa, Syria; also historical corrections for Hong Kong. @@ -974,7 +974,7 @@ This release contains a variety of fixes from 8.1.17. For information about new features in the 8.1 major release, see - . + . @@ -982,10 +982,10 @@ A dump/restore is not required for those running 8.1.X. - However, if you have any hash indexes on interval columns, - you must REINDEX them after updating to 8.1.18. + However, if you have any hash indexes on interval columns, + you must REINDEX them after updating to 8.1.18. Also, if you are upgrading from a version earlier than 8.1.15, - see . + see . @@ -997,14 +997,14 @@ - Disallow RESET ROLE and RESET SESSION - AUTHORIZATION inside security-definer functions (Tom, Heikki) + Disallow RESET ROLE and RESET SESSION + AUTHORIZATION inside security-definer functions (Tom, Heikki) This covers a case that was missed in the previous patch that - disallowed SET ROLE and SET SESSION - AUTHORIZATION inside security-definer functions. + disallowed SET ROLE and SET SESSION + AUTHORIZATION inside security-definer functions. (See CVE-2007-6600) @@ -1018,32 +1018,32 @@ - Fix hash calculation for data type interval (Tom) + Fix hash calculation for data type interval (Tom) This corrects wrong results for hash joins on interval values. It also changes the contents of hash indexes on interval columns. - If you have any such indexes, you must REINDEX them + If you have any such indexes, you must REINDEX them after updating. - Treat to_char(..., 'TH') as an uppercase ordinal - suffix with 'HH'/'HH12' (Heikki) + Treat to_char(..., 'TH') as an uppercase ordinal + suffix with 'HH'/'HH12' (Heikki) - It was previously handled as 'th' (lowercase). + It was previously handled as 'th' (lowercase). - Fix overflow for INTERVAL 'x ms' - when x is more than 2 million and integer + Fix overflow for INTERVAL 'x ms' + when x is more than 2 million and integer datetimes are in use (Alex Hunsaker) @@ -1060,7 +1060,7 @@ - Fix money data type to work in locales where currency + Fix money data type to work in locales where currency amounts have no fractional digits, e.g. Japan (Itagaki Takahiro) @@ -1068,7 +1068,7 @@ Properly round datetime input like - 00:12:57.9999999999999999999999999999 (Tom) + 00:12:57.9999999999999999999999999999 (Tom) @@ -1087,22 +1087,22 @@ - Fix pg_ctl to not go into an infinite loop if - postgresql.conf is empty (Jeff Davis) + Fix pg_ctl to not go into an infinite loop if + postgresql.conf is empty (Jeff Davis) - Fix contrib/xml2's xslt_process() to + Fix contrib/xml2's xslt_process() to properly handle the maximum number of parameters (twenty) (Tom) - Improve robustness of libpq's code to recover - from errors during COPY FROM STDIN (Tom) + Improve robustness of libpq's code to recover + from errors during COPY FROM STDIN (Tom) @@ -1115,7 +1115,7 @@ - Update time zone data files to tzdata release 2009l + Update time zone data files to tzdata release 2009l for DST law changes in Bangladesh, Egypt, Jordan, Pakistan, Argentina/San_Luis, Cuba, Jordan (historical correction only), Mauritius, Morocco, Palestine, Syria, Tunisia. @@ -1138,7 +1138,7 @@ This release contains a variety of fixes from 8.1.16. For information about new features in the 8.1 major release, see - . + . @@ -1147,7 +1147,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.15, - see . + see . @@ -1166,7 +1166,7 @@ This change extends fixes made in the last two minor releases for related failure scenarios. The previous fixes were narrowly tailored for the original problem reports, but we have now recognized that - any error thrown by an encoding conversion function could + any error thrown by an encoding conversion function could potentially lead to infinite recursion while trying to report the error. The solution therefore is to disable translation and encoding conversion and report the plain-ASCII form of any error message, @@ -1177,7 +1177,7 @@ - Disallow CREATE CONVERSION with the wrong encodings + Disallow CREATE CONVERSION with the wrong encodings for the specified conversion function (Heikki) @@ -1190,20 +1190,20 @@ - Fix core dump when to_char() is given format codes that + Fix core dump when to_char() is given format codes that are inappropriate for the type of the data argument (Tom) - Fix decompilation of CASE WHEN with an implicit coercion + Fix decompilation of CASE WHEN with an implicit coercion (Tom) This mistake could lead to Assert failures in an Assert-enabled build, - or an unexpected CASE WHEN clause error message in other + or an unexpected CASE WHEN clause error message in other cases, when trying to examine or dump a view. @@ -1214,15 +1214,15 @@ - If CLUSTER or a rewriting variant of ALTER TABLE + If CLUSTER or a rewriting variant of ALTER TABLE were executed by someone other than the table owner, the - pg_type entry for the table's TOAST table would end up + pg_type entry for the table's TOAST table would end up marked as owned by that someone. This caused no immediate problems, since the permissions on the TOAST rowtype aren't examined by any ordinary database operation. However, it could lead to unexpected failures if one later tried to drop the role that issued the command - (in 8.1 or 8.2), or owner of data type appears to be invalid - warnings from pg_dump after having done so (in 8.3). + (in 8.1 or 8.2), or owner of data type appears to be invalid + warnings from pg_dump after having done so (in 8.3). @@ -1240,7 +1240,7 @@ - Add MUST (Mauritius Island Summer Time) to the default list + Add MUST (Mauritius Island Summer Time) to the default list of known timezone abbreviations (Xavier Bugaud) @@ -1261,7 +1261,7 @@ This release contains a variety of fixes from 8.1.15. For information about new features in the 8.1 major release, see - . + . @@ -1270,7 +1270,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.15, - see . + see . @@ -1294,13 +1294,13 @@ - Improve handling of URLs in headline() function (Teodor) + Improve handling of URLs in headline() function (Teodor) - Improve handling of overlength headlines in headline() + Improve handling of overlength headlines in headline() function (Teodor) @@ -1315,7 +1315,7 @@ - Avoid unnecessary locking of small tables in VACUUM + Avoid unnecessary locking of small tables in VACUUM (Heikki) @@ -1337,30 +1337,30 @@ - Fix uninitialized variables in contrib/tsearch2's - get_covers() function (Teodor) + Fix uninitialized variables in contrib/tsearch2's + get_covers() function (Teodor) - Fix configure script to properly report failure when + Fix configure script to properly report failure when unable to obtain linkage information for PL/Perl (Andrew) - Make all documentation reference pgsql-bugs and/or - pgsql-hackers as appropriate, instead of the - now-decommissioned pgsql-ports and pgsql-patches + Make all documentation reference pgsql-bugs and/or + pgsql-hackers as appropriate, instead of the + now-decommissioned pgsql-ports and pgsql-patches mailing lists (Tom) - Update time zone data files to tzdata release 2009a (for + Update time zone data files to tzdata release 2009a (for Kathmandu and historical DST corrections in Switzerland, Cuba) @@ -1381,7 +1381,7 @@ This release contains a variety of fixes from 8.1.14. For information about new features in the 8.1 major release, see - . + . @@ -1390,8 +1390,8 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.2, - see . Also, if you were running a previous - 8.1.X release, it is recommended to REINDEX all GiST + see . Also, if you were running a previous + 8.1.X release, it is recommended to REINDEX all GiST indexes after the upgrade. @@ -1405,13 +1405,13 @@ Fix GiST index corruption due to marking the wrong index entry - dead after a deletion (Teodor) + dead after a deletion (Teodor) This would result in index searches failing to find rows they should have found. Corrupted indexes can be fixed with - REINDEX. + REINDEX. @@ -1423,7 +1423,7 @@ We have addressed similar issues before, but it would still fail if - the character has no equivalent message itself couldn't + the character has no equivalent message itself couldn't be converted. The fix is to disable localization and send the plain ASCII error message when we detect such a situation. @@ -1438,13 +1438,13 @@ - Fix mis-expansion of rule queries when a sub-SELECT appears - in a function call in FROM, a multi-row VALUES - list, or a RETURNING list (Tom) + Fix mis-expansion of rule queries when a sub-SELECT appears + in a function call in FROM, a multi-row VALUES + list, or a RETURNING list (Tom) - The usual symptom of this problem is an unrecognized node type + The usual symptom of this problem is an unrecognized node type error. @@ -1458,9 +1458,9 @@ - Prevent possible collision of relfilenode numbers + Prevent possible collision of relfilenode numbers when moving a table to another tablespace with ALTER SET - TABLESPACE (Heikki) + TABLESPACE (Heikki) @@ -1479,14 +1479,14 @@ Fix improper display of fractional seconds in interval values when - using a non-ISO datestyle in an build (Ron Mayer) - Ensure SPI_getvalue and SPI_getbinval + Ensure SPI_getvalue and SPI_getbinval behave correctly when the passed tuple and tuple descriptor have different numbers of columns (Tom) @@ -1500,19 +1500,19 @@ - Fix ecpg's parsing of CREATE ROLE (Michael) + Fix ecpg's parsing of CREATE ROLE (Michael) - Fix recent breakage of pg_ctl restart (Tom) + Fix recent breakage of pg_ctl restart (Tom) - Update time zone data files to tzdata release 2008i (for + Update time zone data files to tzdata release 2008i (for DST law changes in Argentina, Brazil, Mauritius, Syria) @@ -1533,7 +1533,7 @@ This release contains a variety of fixes from 8.1.13. For information about new features in the 8.1 major release, see - . + . @@ -1542,7 +1542,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.2, - see . + see . @@ -1560,7 +1560,7 @@ This responds to reports that the counters could overflow in sufficiently long transactions, leading to unexpected lock is - already held errors. + already held errors. @@ -1573,12 +1573,12 @@ Add checks in executor startup to ensure that the tuples produced by an - INSERT or UPDATE will match the target table's + INSERT or UPDATE will match the target table's current rowtype (Tom) - ALTER COLUMN TYPE, followed by re-use of a previously + ALTER COLUMN TYPE, followed by re-use of a previously cached plan, could produce this type of situation. The check protects against data corruption and/or crashes that could ensue. @@ -1586,18 +1586,18 @@ - Fix AT TIME ZONE to first try to interpret its timezone + Fix AT TIME ZONE to first try to interpret its timezone argument as a timezone abbreviation, and only try it as a full timezone name if that fails, rather than the other way around as formerly (Tom) The timestamp input functions have always resolved ambiguous zone names - in this order. Making AT TIME ZONE do so as well improves + in this order. Making AT TIME ZONE do so as well improves consistency, and fixes a compatibility bug introduced in 8.1: in ambiguous cases we now behave the same as 8.0 and before did, - since in the older versions AT TIME ZONE accepted - only abbreviations. + since in the older versions AT TIME ZONE accepted + only abbreviations. @@ -1617,7 +1617,7 @@ Fix bug in backwards scanning of a cursor on a SELECT DISTINCT - ON query (Tom) + ON query (Tom) @@ -1635,21 +1635,21 @@ - Fix planner to estimate that GROUP BY expressions yielding + Fix planner to estimate that GROUP BY expressions yielding boolean results always result in two groups, regardless of the expressions' contents (Tom) This is very substantially more accurate than the regular GROUP - BY estimate for certain boolean tests like col - IS NULL. + BY estimate for certain boolean tests like col + IS NULL. - Fix PL/pgSQL to not fail when a FOR loop's target variable + Fix PL/pgSQL to not fail when a FOR loop's target variable is a record containing composite-type fields (Tom) @@ -1673,21 +1673,21 @@ - Improve pg_dump and pg_restore's + Improve pg_dump and pg_restore's error reporting after failure to send a SQL command (Tom) - Fix pg_ctl to properly preserve postmaster - command-line arguments across a restart (Bruce) + Fix pg_ctl to properly preserve postmaster + command-line arguments across a restart (Bruce) - Update time zone data files to tzdata release 2008f (for + Update time zone data files to tzdata release 2008f (for DST law changes in Argentina, Bahamas, Brazil, Mauritius, Morocco, Pakistan, Palestine, and Paraguay) @@ -1709,7 +1709,7 @@ This release contains one serious and one minor bug fix over 8.1.12. For information about new features in the 8.1 major release, see - . + . @@ -1718,7 +1718,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.2, - see . + see . @@ -1730,18 +1730,18 @@ - Make pg_get_ruledef() parenthesize negative constants (Tom) + Make pg_get_ruledef() parenthesize negative constants (Tom) Before this fix, a negative constant in a view or rule might be dumped - as, say, -42::integer, which is subtly incorrect: it should - be (-42)::integer due to operator precedence rules. + as, say, -42::integer, which is subtly incorrect: it should + be (-42)::integer due to operator precedence rules. Usually this would make little difference, but it could interact with another recent patch to cause - PostgreSQL to reject what had been a valid - SELECT DISTINCT view query. Since this could result in - pg_dump output failing to reload, it is being treated + PostgreSQL to reject what had been a valid + SELECT DISTINCT view query. Since this could result in + pg_dump output failing to reload, it is being treated as a high-priority fix. The only released versions in which dump output is actually incorrect are 8.3.1 and 8.2.7. @@ -1749,13 +1749,13 @@ - Make ALTER AGGREGATE ... OWNER TO update - pg_shdepend (Tom) + Make ALTER AGGREGATE ... OWNER TO update + pg_shdepend (Tom) This oversight could lead to problems if the aggregate was later - involved in a DROP OWNED or REASSIGN OWNED + involved in a DROP OWNED or REASSIGN OWNED operation. @@ -1776,7 +1776,7 @@ This release contains a variety of fixes from 8.1.11. For information about new features in the 8.1 major release, see - . + . @@ -1785,7 +1785,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.2, - see . + see . @@ -1797,7 +1797,7 @@ - Fix ALTER TABLE ADD COLUMN ... PRIMARY KEY so that the new + Fix ALTER TABLE ADD COLUMN ... PRIMARY KEY so that the new column is correctly checked to see if it's been initialized to all non-nulls (Brendan Jurd) @@ -1809,8 +1809,8 @@ - Fix possible CREATE TABLE failure when inheriting the - same constraint from multiple parent relations that + Fix possible CREATE TABLE failure when inheriting the + same constraint from multiple parent relations that inherited that constraint from a common ancestor (Tom) @@ -1818,7 +1818,7 @@ Fix conversions between ISO-8859-5 and other encodings to handle - Cyrillic Yo characters (e and E with + Cyrillic Yo characters (e and E with two dots) (Sergey Burladyan) @@ -1833,7 +1833,7 @@ This could lead to failures in which two apparently identical literal values were not seen as equal, resulting in the parser complaining - about unmatched ORDER BY and DISTINCT + about unmatched ORDER BY and DISTINCT expressions. @@ -1841,24 +1841,24 @@ Fix a corner case in regular-expression substring matching - (substring(string from - pattern)) (Tom) + (substring(string from + pattern)) (Tom) The problem occurs when there is a match to the pattern overall but the user has specified a parenthesized subexpression and that subexpression hasn't got a match. An example is - substring('foo' from 'foo(bar)?'). - This should return NULL, since (bar) isn't matched, but + substring('foo' from 'foo(bar)?'). + This should return NULL, since (bar) isn't matched, but it was mistakenly returning the whole-pattern match instead (ie, - foo). + foo). - Update time zone data files to tzdata release 2008c (for + Update time zone data files to tzdata release 2008c (for DST law changes in Morocco, Iraq, Choibalsan, Pakistan, Syria, Cuba, Argentina/San_Luis, and Chile) @@ -1866,34 +1866,34 @@ - Fix incorrect result from ecpg's - PGTYPEStimestamp_sub() function (Michael) + Fix incorrect result from ecpg's + PGTYPEStimestamp_sub() function (Michael) - Fix core dump in contrib/xml2's - xpath_table() function when the input query returns a + Fix core dump in contrib/xml2's + xpath_table() function when the input query returns a NULL value (Tom) - Fix contrib/xml2's makefile to not override - CFLAGS (Tom) + Fix contrib/xml2's makefile to not override + CFLAGS (Tom) - Fix DatumGetBool macro to not fail with gcc + Fix DatumGetBool macro to not fail with gcc 4.3 (Tom) - This problem affects old style (V0) C functions that + This problem affects old style (V0) C functions that return boolean. The fix is already in 8.3, but the need to back-patch it was not realized at the time. @@ -1901,21 +1901,21 @@ - Fix longstanding LISTEN/NOTIFY + Fix longstanding LISTEN/NOTIFY race condition (Tom) In rare cases a session that had just executed a - LISTEN might not get a notification, even though + LISTEN might not get a notification, even though one would be expected because the concurrent transaction executing - NOTIFY was observed to commit later. + NOTIFY was observed to commit later. A side effect of the fix is that a transaction that has executed - a not-yet-committed LISTEN command will not see any - row in pg_listener for the LISTEN, + a not-yet-committed LISTEN command will not see any + row in pg_listener for the LISTEN, should it choose to look; formerly it would have. This behavior was never documented one way or the other, but it is possible that some applications depend on the old behavior. @@ -1924,14 +1924,14 @@ - Disallow LISTEN and UNLISTEN within a + Disallow LISTEN and UNLISTEN within a prepared transaction (Tom) This was formerly allowed but trying to do it had various unpleasant consequences, notably that the originating backend could not exit - as long as an UNLISTEN remained uncommitted. + as long as an UNLISTEN remained uncommitted. @@ -1954,19 +1954,19 @@ - Fix unrecognized node type error in some variants of - ALTER OWNER (Tom) + Fix unrecognized node type error in some variants of + ALTER OWNER (Tom) - Fix pg_ctl to correctly extract the postmaster's port + Fix pg_ctl to correctly extract the postmaster's port number from command-line options (Itagaki Takahiro, Tom) - Previously, pg_ctl start -w could try to contact the + Previously, pg_ctl start -w could try to contact the postmaster on the wrong port, leading to bogus reports of startup failure. @@ -1974,20 +1974,20 @@ - Use - This is known to be necessary when building PostgreSQL - with gcc 4.3 or later. + This is known to be necessary when building PostgreSQL + with gcc 4.3 or later. - Fix display of constant expressions in ORDER BY - and GROUP BY (Tom) + Fix display of constant expressions in ORDER BY + and GROUP BY (Tom) @@ -1999,7 +1999,7 @@ - Fix libpq to handle NOTICE messages correctly + Fix libpq to handle NOTICE messages correctly during COPY OUT (Tom) @@ -2027,12 +2027,12 @@ This release contains a variety of fixes from 8.1.10, including fixes for significant security issues. For information about new features in the 8.1 major release, see - . + . - This is the last 8.1.X release for which the PostgreSQL - community will produce binary packages for Windows. + This is the last 8.1.X release for which the PostgreSQL + community will produce binary packages for Windows. Windows users are encouraged to move to 8.2.X or later, since there are Windows-specific fixes in 8.2.X that are impractical to back-port. 8.1.X will continue to @@ -2045,7 +2045,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.2, - see . + see . @@ -2058,7 +2058,7 @@ Prevent functions in indexes from executing with the privileges of - the user running VACUUM, ANALYZE, etc (Tom) + the user running VACUUM, ANALYZE, etc (Tom) @@ -2069,18 +2069,18 @@ (Note that triggers, defaults, check constraints, etc. pose the same type of risk.) But functions in indexes pose extra danger because they will be executed by routine maintenance operations - such as VACUUM FULL, which are commonly performed + such as VACUUM FULL, which are commonly performed automatically under a superuser account. For example, a nefarious user can execute code with superuser privileges by setting up a trojan-horse index definition and waiting for the next routine vacuum. The fix arranges for standard maintenance operations - (including VACUUM, ANALYZE, REINDEX, - and CLUSTER) to execute as the table owner rather than + (including VACUUM, ANALYZE, REINDEX, + and CLUSTER) to execute as the table owner rather than the calling user, using the same privilege-switching mechanism already - used for SECURITY DEFINER functions. To prevent bypassing + used for SECURITY DEFINER functions. To prevent bypassing this security measure, execution of SET SESSION - AUTHORIZATION and SET ROLE is now forbidden within a - SECURITY DEFINER context. (CVE-2007-6600) + AUTHORIZATION and SET ROLE is now forbidden within a + SECURITY DEFINER context. (CVE-2007-6600) @@ -2100,20 +2100,20 @@ - Require non-superusers who use /contrib/dblink to use only + Require non-superusers who use /contrib/dblink to use only password authentication, as a security measure (Joe) The fix that appeared for this in 8.1.10 was incomplete, as it plugged - the hole for only some dblink functions. (CVE-2007-6601, + the hole for only some dblink functions. (CVE-2007-6601, CVE-2007-3278) - Update time zone data files to tzdata release 2007k + Update time zone data files to tzdata release 2007k (in particular, recent Argentina changes) (Tom) @@ -2128,14 +2128,14 @@ Fix planner failure in some cases of WHERE false AND var IN - (SELECT ...) (Tom) + (SELECT ...) (Tom) Preserve the tablespace of indexes that are - rebuilt by ALTER TABLE ... ALTER COLUMN TYPE (Tom) + rebuilt by ALTER TABLE ... ALTER COLUMN TYPE (Tom) @@ -2154,21 +2154,21 @@ - Make VACUUM not use all of maintenance_work_mem + Make VACUUM not use all of maintenance_work_mem when the table is too small for it to be useful (Alvaro) - Fix potential crash in translate() when using a multibyte + Fix potential crash in translate() when using a multibyte database encoding (Tom) - Fix overflow in extract(epoch from interval) for intervals + Fix overflow in extract(epoch from interval) for intervals exceeding 68 years (Tom) @@ -2182,13 +2182,13 @@ - Fix PL/Perl to cope when platform's Perl defines type bool - as int rather than char (Tom) + Fix PL/Perl to cope when platform's Perl defines type bool + as int rather than char (Tom) While this could theoretically happen anywhere, no standard build of - Perl did things this way ... until macOS 10.5. + Perl did things this way ... until macOS 10.5. @@ -2200,64 +2200,64 @@ - Fix pg_dump to correctly handle inheritance child tables + Fix pg_dump to correctly handle inheritance child tables that have default expressions different from their parent's (Tom) - Fix libpq crash when PGPASSFILE refers + Fix libpq crash when PGPASSFILE refers to a file that is not a plain file (Martin Pitt) - ecpg parser fixes (Michael) + ecpg parser fixes (Michael) - Make contrib/pgcrypto defend against - OpenSSL libraries that fail on keys longer than 128 + Make contrib/pgcrypto defend against + OpenSSL libraries that fail on keys longer than 128 bits; which is the case at least on some Solaris versions (Marko Kreen) - Make contrib/tablefunc's crosstab() handle + Make contrib/tablefunc's crosstab() handle NULL rowid as a category in its own right, rather than crashing (Joe) - Fix tsvector and tsquery output routines to + Fix tsvector and tsquery output routines to escape backslashes correctly (Teodor, Bruce) - Fix crash of to_tsvector() on huge input strings (Teodor) + Fix crash of to_tsvector() on huge input strings (Teodor) - Require a specific version of Autoconf to be used - when re-generating the configure script (Peter) + Require a specific version of Autoconf to be used + when re-generating the configure script (Peter) This affects developers and packagers only. The change was made to prevent accidental use of untested combinations of - Autoconf and PostgreSQL versions. + Autoconf and PostgreSQL versions. You can remove the version check if you really want to use a - different Autoconf version, but it's + different Autoconf version, but it's your responsibility whether the result works or not. @@ -2278,7 +2278,7 @@ This release contains a variety of fixes from 8.1.9. For information about new features in the 8.1 major release, see - . + . @@ -2287,7 +2287,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.2, - see . + see . @@ -2300,20 +2300,20 @@ Prevent index corruption when a transaction inserts rows and - then aborts close to the end of a concurrent VACUUM + then aborts close to the end of a concurrent VACUUM on the same table (Tom) - Make CREATE DOMAIN ... DEFAULT NULL work properly (Tom) + Make CREATE DOMAIN ... DEFAULT NULL work properly (Tom) - Allow the interval data type to accept input consisting only of + Allow the interval data type to accept input consisting only of milliseconds or microseconds (Neil) @@ -2326,7 +2326,7 @@ - Fix excessive logging of SSL error messages (Tom) + Fix excessive logging of SSL error messages (Tom) @@ -2339,7 +2339,7 @@ - Fix crash when log_min_error_statement logging runs out + Fix crash when log_min_error_statement logging runs out of memory (Tom) @@ -2352,7 +2352,7 @@ - Prevent REINDEX and CLUSTER from failing + Prevent REINDEX and CLUSTER from failing due to attempting to process temporary tables of other sessions (Alvaro) @@ -2371,14 +2371,14 @@ - Suppress timezone name (%Z) in log timestamps on Windows + Suppress timezone name (%Z) in log timestamps on Windows because of possible encoding mismatches (Tom) - Require non-superusers who use /contrib/dblink to use only + Require non-superusers who use /contrib/dblink to use only password authentication, as a security measure (Joe) @@ -2400,7 +2400,7 @@ This release contains a variety of fixes from 8.1.8, including a security fix. For information about new features in the 8.1 major release, see - . + . @@ -2409,7 +2409,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.2, - see . + see . @@ -2422,35 +2422,35 @@ Support explicit placement of the temporary-table schema within - search_path, and disable searching it for functions + search_path, and disable searching it for functions and operators (Tom) This is needed to allow a security-definer function to set a - truly secure value of search_path. Without it, + truly secure value of search_path. Without it, an unprivileged SQL user can use temporary objects to execute code with the privileges of the security-definer function (CVE-2007-2138). - See CREATE FUNCTION for more information. + See CREATE FUNCTION for more information. - /contrib/tsearch2 crash fixes (Teodor) + /contrib/tsearch2 crash fixes (Teodor) - Require COMMIT PREPARED to be executed in the same + Require COMMIT PREPARED to be executed in the same database as the transaction was prepared in (Heikki) - Fix potential-data-corruption bug in how VACUUM FULL handles - UPDATE chains (Tom, Pavan Deolasee) + Fix potential-data-corruption bug in how VACUUM FULL handles + UPDATE chains (Tom, Pavan Deolasee) @@ -2490,7 +2490,7 @@ This release contains one fix from 8.1.7. For information about new features in the 8.1 major release, see - . + . @@ -2499,7 +2499,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.2, - see . + see . @@ -2533,7 +2533,7 @@ This release contains a variety of fixes from 8.1.6, including a security fix. For information about new features in the 8.1 major release, see - . + . @@ -2542,7 +2542,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.2, - see . + see . @@ -2576,7 +2576,7 @@ - Improve VACUUM performance for databases with many tables (Tom) + Improve VACUUM performance for databases with many tables (Tom) @@ -2593,7 +2593,7 @@ - Fix for rare Assert() crash triggered by UNION (Tom) + Fix for rare Assert() crash triggered by UNION (Tom) @@ -2606,7 +2606,7 @@ - Fix bogus permission denied failures occurring on Windows + Fix bogus permission denied failures occurring on Windows due to attempts to fsync already-deleted files (Magnus, Tom) @@ -2634,7 +2634,7 @@ This release contains a variety of fixes from 8.1.5. For information about new features in the 8.1 major release, see - . + . @@ -2643,7 +2643,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.2, - see . + see . @@ -2655,7 +2655,7 @@ - Improve handling of getaddrinfo() on AIX (Tom) + Improve handling of getaddrinfo() on AIX (Tom) @@ -2666,21 +2666,21 @@ - Fix pg_restore to handle a tar-format backup + Fix pg_restore to handle a tar-format backup that contains large objects (blobs) with comments (Tom) - Fix failed to re-find parent key errors in - VACUUM (Tom) + Fix failed to re-find parent key errors in + VACUUM (Tom) - Clean out pg_internal.init cache files during server + Clean out pg_internal.init cache files during server restart (Simon) @@ -2693,7 +2693,7 @@ Fix race condition for truncation of a large relation across a - gigabyte boundary by VACUUM (Tom) + gigabyte boundary by VACUUM (Tom) @@ -2717,7 +2717,7 @@ - Fix error when constructing an ARRAY[] made up of multiple + Fix error when constructing an ARRAY[] made up of multiple empty elements (Tom) @@ -2736,13 +2736,13 @@ - to_number() and to_char(numeric) - are now STABLE, not IMMUTABLE, for - new initdb installs (Tom) + to_number() and to_char(numeric) + are now STABLE, not IMMUTABLE, for + new initdb installs (Tom) - This is because lc_numeric can potentially + This is because lc_numeric can potentially change the output of these functions. @@ -2753,7 +2753,7 @@ - This improves psql \d performance also. + This improves psql \d performance also. @@ -2784,7 +2784,7 @@ This release contains a variety of fixes from 8.1.4. For information about new features in the 8.1 major release, see - . + . @@ -2793,7 +2793,7 @@ A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.2, - see . + see . @@ -2802,7 +2802,7 @@ Changes -Disallow aggregate functions in UPDATE +Disallow aggregate functions in UPDATE commands, except within sub-SELECTs (Tom) The behavior of such an aggregate was unpredictable, and in 8.1.X could cause a crash, so it has been disabled. The SQL standard does not allow @@ -2810,25 +2810,25 @@ this either. Fix core dump when an untyped literal is taken as ANYARRAY Fix core dump in duration logging for extended query protocol -when a COMMIT or ROLLBACK is +when a COMMIT or ROLLBACK is executed Fix mishandling of AFTER triggers when query contains a SQL function returning multiple rows (Tom) -Fix ALTER TABLE ... TYPE to recheck -NOT NULL for USING clause (Tom) -Fix string_to_array() to handle overlapping +Fix ALTER TABLE ... TYPE to recheck +NOT NULL for USING clause (Tom) +Fix string_to_array() to handle overlapping matches for the separator string -For example, string_to_array('123xx456xxx789', 'xx'). +For example, string_to_array('123xx456xxx789', 'xx'). -Fix to_timestamp() for -AM/PM formats (Bruce) +Fix to_timestamp() for +AM/PM formats (Bruce) Fix autovacuum's calculation that decides whether - ANALYZE is needed (Alvaro) + ANALYZE is needed (Alvaro) Fix corner cases in pattern matching for - psql's \d commands + psql's \d commands Fix index-corrupting bugs in /contrib/ltree (Teodor) -Numerous robustness fixes in ecpg (Joachim +Numerous robustness fixes in ecpg (Joachim Wieland) Fix backslash escaping in /contrib/dbmirror Minor fixes in /contrib/dblink and /contrib/tsearch2 @@ -2836,14 +2836,14 @@ Wieland) Efficiency improvements in hash tables and bitmap index scans (Tom) Fix instability of statistics collection on Windows (Tom, Andrew) -Fix statement_timeout to use the proper +Fix statement_timeout to use the proper units on Win32 (Bruce) In previous Win32 8.1.X versions, the delay was off by a factor of 100. -Fixes for MSVC and Borland C++ +Fixes for MSVC and Borland C++ compilers (Hiroshi Saito) -Fixes for AIX and -Intel compilers (Tom) +Fixes for AIX and +Intel compilers (Tom) Fix rare bug in continuous archiving (Tom) @@ -2862,7 +2862,7 @@ compilers (Hiroshi Saito) This release contains a variety of fixes from 8.1.3, including patches for extremely serious security issues. For information about new features in the 8.1 major release, see - . + . @@ -2871,7 +2871,7 @@ compilers (Hiroshi Saito) A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.2, - see . + see . @@ -2881,9 +2881,9 @@ compilers (Hiroshi Saito) into SQL commands, you should examine them as soon as possible to ensure that they are using recommended escaping techniques. In most cases, applications should be using subroutines provided by - libraries or drivers (such as libpq's - PQescapeStringConn()) to perform string escaping, - rather than relying on ad hoc code to do it. + libraries or drivers (such as libpq's + PQescapeStringConn()) to perform string escaping, + rather than relying on ad hoc code to do it. @@ -2893,61 +2893,61 @@ compilers (Hiroshi Saito) Change the server to reject invalidly-encoded multibyte characters in all cases (Tatsuo, Tom) -While PostgreSQL has been moving in this direction for +While PostgreSQL has been moving in this direction for some time, the checks are now applied uniformly to all encodings and all textual input, and are now always errors not merely warnings. This change defends against SQL-injection attacks of the type described in CVE-2006-2313. -Reject unsafe uses of \' in string literals +Reject unsafe uses of \' in string literals As a server-side defense against SQL-injection attacks of the type -described in CVE-2006-2314, the server now only accepts '' and not -\' as a representation of ASCII single quote in SQL string -literals. By default, \' is rejected only when -client_encoding is set to a client-only encoding (SJIS, BIG5, GBK, +described in CVE-2006-2314, the server now only accepts '' and not +\' as a representation of ASCII single quote in SQL string +literals. By default, \' is rejected only when +client_encoding is set to a client-only encoding (SJIS, BIG5, GBK, GB18030, or UHC), which is the scenario in which SQL injection is possible. -A new configuration parameter backslash_quote is available to +A new configuration parameter backslash_quote is available to adjust this behavior when needed. Note that full security against CVE-2006-2314 might require client-side changes; the purpose of -backslash_quote is in part to make it obvious that insecure +backslash_quote is in part to make it obvious that insecure clients are insecure. -Modify libpq's string-escaping routines to be +Modify libpq's string-escaping routines to be aware of encoding considerations and -standard_conforming_strings -This fixes libpq-using applications for the security +standard_conforming_strings +This fixes libpq-using applications for the security issues described in CVE-2006-2313 and CVE-2006-2314, and also future-proofs them against the planned changeover to SQL-standard string literal syntax. -Applications that use multiple PostgreSQL connections -concurrently should migrate to PQescapeStringConn() and -PQescapeByteaConn() to ensure that escaping is done correctly +Applications that use multiple PostgreSQL connections +concurrently should migrate to PQescapeStringConn() and +PQescapeByteaConn() to ensure that escaping is done correctly for the settings in use in each database connection. Applications that -do string escaping by hand should be modified to rely on library +do string escaping by hand should be modified to rely on library routines instead. Fix weak key selection in pgcrypto (Marko Kreen) Errors in fortuna PRNG reseeding logic could cause a predictable -session key to be selected by pgp_sym_encrypt() in some cases. +session key to be selected by pgp_sym_encrypt() in some cases. This only affects non-OpenSSL-using builds. Fix some incorrect encoding conversion functions -win1251_to_iso, win866_to_iso, -euc_tw_to_big5, euc_tw_to_mic, -mic_to_euc_tw were all broken to varying +win1251_to_iso, win866_to_iso, +euc_tw_to_big5, euc_tw_to_mic, +mic_to_euc_tw were all broken to varying extents. -Clean up stray remaining uses of \' in strings +Clean up stray remaining uses of \' in strings (Bruce, Jan) -Make autovacuum visible in pg_stat_activity +Make autovacuum visible in pg_stat_activity (Alvaro) -Disable full_page_writes (Tom) -In certain cases, having full_page_writes off would cause +Disable full_page_writes (Tom) +In certain cases, having full_page_writes off would cause crash recovery to fail. A proper fix will appear in 8.2; for now it's just disabled. @@ -2965,10 +2965,10 @@ same transaction Fix WAL replay for case where a B-Tree index has been truncated -Fix SIMILAR TO for patterns involving -| (Tom) +Fix SIMILAR TO for patterns involving +| (Tom) -Fix SELECT INTO and CREATE TABLE AS to +Fix SELECT INTO and CREATE TABLE AS to create tables in the default tablespace, not the base directory (Kris Jurka) @@ -2986,18 +2986,18 @@ Fuhr) Fix problem with password prompting on some Win32 systems (Robert Kinberg) -Improve pg_dump's handling of default values +Improve pg_dump's handling of default values for domains -Fix pg_dumpall to handle identically-named +Fix pg_dumpall to handle identically-named users and groups reasonably (only possible when dumping from a pre-8.1 server) (Tom) The user and group will be merged into a single role with -LOGIN permission. Formerly the merged role wouldn't have -LOGIN permission, making it unusable as a user. +LOGIN permission. Formerly the merged role wouldn't have +LOGIN permission, making it unusable as a user. -Fix pg_restore -n to work as +Fix pg_restore -n to work as documented (Tom) @@ -3016,7 +3016,7 @@ documented (Tom) This release contains a variety of fixes from 8.1.2, including one very serious security issue. For information about new features in the 8.1 major release, see - . + . @@ -3025,7 +3025,7 @@ documented (Tom) A dump/restore is not required for those running 8.1.X. However, if you are upgrading from a version earlier than 8.1.2, - see . + see . @@ -3035,14 +3035,14 @@ documented (Tom) Fix bug that allowed any logged-in user to SET -ROLE to any other database user id (CVE-2006-0553) +ROLE to any other database user id (CVE-2006-0553) Due to inadequate validity checking, a user could exploit the special -case that SET ROLE normally uses to restore the previous role +case that SET ROLE normally uses to restore the previous role setting after an error. This allowed ordinary users to acquire superuser status, for example. The escalation-of-privilege risk exists only in 8.1.0-8.1.2. However, in all releases back to 7.3 there is a related bug in SET -SESSION AUTHORIZATION that allows unprivileged users to crash the server, +SESSION AUTHORIZATION that allows unprivileged users to crash the server, if it has been compiled with Asserts enabled (which is not the default). Thanks to Akio Ishida for reporting this problem. @@ -3055,55 +3055,55 @@ created in 8.0.4, 7.4.9, and 7.3.11 releases. Fix race condition that could lead to file already -exists errors during pg_clog and pg_subtrans file creation +exists errors during pg_clog and pg_subtrans file creation (Tom) Fix cases that could lead to crashes if a cache-invalidation message arrives at just the wrong time (Tom) -Properly check DOMAIN constraints for -UNKNOWN parameters in prepared statements +Properly check DOMAIN constraints for +UNKNOWN parameters in prepared statements (Neil) -Ensure ALTER COLUMN TYPE will process -FOREIGN KEY, UNIQUE, and PRIMARY KEY +Ensure ALTER COLUMN TYPE will process +FOREIGN KEY, UNIQUE, and PRIMARY KEY constraints in the proper order (Nakano Yoshihisa) Fixes to allow restoring dumps that have cross-schema references to custom operators or operator classes (Tom) -Allow pg_restore to continue properly after a -COPY failure; formerly it tried to treat the remaining -COPY data as SQL commands (Stephen Frost) +Allow pg_restore to continue properly after a +COPY failure; formerly it tried to treat the remaining +COPY data as SQL commands (Stephen Frost) -Fix pg_ctl unregister crash +Fix pg_ctl unregister crash when the data directory is not specified (Magnus) -Fix libpq PQprint HTML tags +Fix libpq PQprint HTML tags (Christoph Zwerschke) -Fix ecpg crash on AMD64 and PPC +Fix ecpg crash on AMD64 and PPC (Neil) -Allow SETOF and %TYPE to be used +Allow SETOF and %TYPE to be used together in function result type declarations Recover properly if error occurs during argument passing -in PL/Python (Neil) +in PL/Python (Neil) -Fix memory leak in plperl_return_next +Fix memory leak in plperl_return_next (Neil) -Fix PL/Perl's handling of locales on +Fix PL/Perl's handling of locales on Win32 to match the backend (Andrew) Various optimizer fixes (Tom) -Fix crash when log_min_messages is set to -DEBUG3 or above in postgresql.conf on Win32 +Fix crash when log_min_messages is set to +DEBUG3 or above in postgresql.conf on Win32 (Bruce) -Fix pgxs -L library path +Fix pgxs -L library path specification for Win32, Cygwin, macOS, AIX (Bruce) Check that SID is enabled while checking for Win32 admin @@ -3112,13 +3112,13 @@ privileges (Magnus) Properly reject out-of-range date inputs (Kris Jurka) -Portability fix for testing presence of finite -and isinf during configure (Tom) +Portability fix for testing presence of finite +and isinf during configure (Tom) -Improve speed of COPY IN via libpq, by +Improve speed of COPY IN via libpq, by avoiding a kernel call per data line (Alon Goldshuv) -Improve speed of /contrib/tsearch2 index +Improve speed of /contrib/tsearch2 index creation (Tom) @@ -3137,7 +3137,7 @@ creation (Tom) This release contains a variety of fixes from 8.1.1. For information about new features in the 8.1 major release, see - . + . @@ -3145,9 +3145,9 @@ creation (Tom) A dump/restore is not required for those running 8.1.X. - However, you might need to REINDEX indexes on textual + However, you might need to REINDEX indexes on textual columns after updating, if you are affected by the locale or - plperl issues described below. + plperl issues described below. @@ -3160,7 +3160,7 @@ creation (Tom) than exit if there is no more room in ShmemBackendArray (Magnus) The previous behavior could lead to a denial-of-service situation if too many connection requests arrive close together. This applies -only to the Windows port. +only to the Windows port. Fix bug introduced in 8.0 that could allow ReadBuffer to return an already-used page as new, potentially causing loss of @@ -3171,16 +3171,16 @@ outside a transaction or in a failed transaction (Tom) Fix character string comparison for locales that consider different character combinations as equal, such as Hungarian (Tom) -This might require REINDEX to fix existing indexes on +This might require REINDEX to fix existing indexes on textual columns. Set locale environment variables during postmaster startup -to ensure that plperl won't change the locale later -This fixes a problem that occurred if the postmaster was +to ensure that plperl won't change the locale later +This fixes a problem that occurred if the postmaster was started with environment variables specifying a different locale than what -initdb had been told. Under these conditions, any use of -plperl was likely to lead to corrupt indexes. You might need -REINDEX to fix existing indexes on +initdb had been told. Under these conditions, any use of +plperl was likely to lead to corrupt indexes. You might need +REINDEX to fix existing indexes on textual columns if this has happened to you. Allow more flexible relocation of installation @@ -3189,7 +3189,7 @@ directories (Tom) directory paths were the same except for the last component. Prevent crashes caused by the use of -ISO-8859-5 and ISO-8859-9 encodings +ISO-8859-5 and ISO-8859-9 encodings (Tatsuo) Fix longstanding bug in strpos() and regular expression @@ -3197,22 +3197,22 @@ handling in certain rarely used Asian multi-byte character sets (Tatsuo) Fix bug where COPY CSV mode considered any -\. to terminate the copy data The new code -requires \. to appear alone on a line, as per +\. to terminate the copy data The new code +requires \. to appear alone on a line, as per documentation. Make COPY CSV mode quote a literal data value of -\. to ensure it cannot be interpreted as the +\. to ensure it cannot be interpreted as the end-of-data marker (Bruce) -Various fixes for functions returning RECORDs +Various fixes for functions returning RECORDs (Tom) -Fix processing of postgresql.conf so a +Fix processing of postgresql.conf so a final line with no newline is processed properly (Tom) -Fix bug in /contrib/pgcrypto gen_salt, +Fix bug in /contrib/pgcrypto gen_salt, which caused it not to use all available salt space for MD5 and XDES algorithms (Marko Kreen, Solar Designer) Salts for Blowfish and standard DES are unaffected. @@ -3220,7 +3220,7 @@ XDES algorithms (Marko Kreen, Solar Designer) Fix autovacuum crash when processing expression indexes -Fix /contrib/dblink to throw an error, +Fix /contrib/dblink to throw an error, rather than crashing, when the number of columns specified is different from what's actually returned by the query (Joe) @@ -3240,7 +3240,7 @@ what's actually returned by the query (Joe) This release contains a variety of fixes from 8.1.0. For information about new features in the 8.1 major release, see - . + . @@ -3262,7 +3262,7 @@ what's actually returned by the query (Joe) involving sub-selects flattened by the optimizer (Tom) Fix update failures in scenarios involving CHECK constraints, -toasted columns, and indexes (Tom) +toasted columns, and indexes (Tom) Fix bgwriter problems after recovering from errors (Tom) @@ -3276,7 +3276,7 @@ later VACUUM commands. Prevent failure if client sends Bind protocol message when current transaction is already aborted -/contrib/tsearch2 and /contrib/ltree +/contrib/tsearch2 and /contrib/ltree fixes (Teodor) Fix problems with translated error messages in @@ -3285,17 +3285,17 @@ unexpected truncation of output strings and wrong display of the smallest possible bigint value (Andrew, Tom) These problems only appeared on platforms that were using our -port/snprintf.c code, which includes BSD variants if ---enable-nls was given, and perhaps others. In addition, +port/snprintf.c code, which includes BSD variants if +--enable-nls was given, and perhaps others. In addition, a different form of the translated-error-message problem could appear -on Windows depending on which version of libintl was used. +on Windows depending on which version of libintl was used. -Re-allow AM/PM, HH, -HH12, and D format specifiers for -to_char(time) and to_char(interval). -(to_char(interval) should probably use -HH24.) (Bruce) +Re-allow AM/PM, HH, +HH12, and D format specifiers for +to_char(time) and to_char(interval). +(to_char(interval) should probably use +HH24.) (Bruce) AIX, HPUX, and MSVC compile fixes (Tom, Hiroshi Saito) @@ -3305,7 +3305,7 @@ Saito) Retry file reads and writes after Windows NO_SYSTEM_RESOURCES error (Qingqing Zhou) -Prevent autovacuum from crashing during +Prevent autovacuum from crashing during ANALYZE of expression index (Alvaro) Fix problems with ON COMMIT DELETE ROWS temp @@ -3315,7 +3315,7 @@ tables DISTINCT query Add 8.1.0 release note item on how to migrate invalid -UTF-8 byte sequences (Paul Lindner) +UTF-8 byte sequences (Paul Lindner) @@ -3365,13 +3365,13 @@ DISTINCT query In previous releases, only a single index could be used to do lookups on a table. With this feature, if a query has - WHERE tab.col1 = 4 and tab.col2 = 9, and there is - no multicolumn index on col1 and col2, - but there is an index on col1 and another on - col2, it is possible to search both indexes and + WHERE tab.col1 = 4 and tab.col2 = 9, and there is + no multicolumn index on col1 and col2, + but there is an index on col1 and another on + col2, it is possible to search both indexes and combine the results in memory, then do heap fetches for only - the rows matching both the col1 and - col2 restrictions. This is very useful in + the rows matching both the col1 and + col2 restrictions. This is very useful in environments that have a lot of unstructured queries where it is impossible to create indexes that match all possible access conditions. Bitmap scans are useful even with a single index, @@ -3394,9 +3394,9 @@ DISTINCT query their transactions (none failed), all transactions can be committed. Even if a machine crashes after a prepare, the prepared transaction can be committed after the machine is - restarted. New syntax includes PREPARE TRANSACTION and - COMMIT/ROLLBACK PREPARED. A new system view - pg_prepared_xacts has also been added. + restarted. New syntax includes PREPARE TRANSACTION and + COMMIT/ROLLBACK PREPARED. A new system view + pg_prepared_xacts has also been added. @@ -3445,12 +3445,12 @@ DISTINCT query Once a user logs into a role, she obtains capabilities of the login role plus any inherited roles, and can use - SET ROLE to switch to other roles she is a member of. + SET ROLE to switch to other roles she is a member of. This feature is a generalization of the SQL standard's concept of roles. - This change also replaces pg_shadow and - pg_group by new role-capable catalogs - pg_authid and pg_auth_members. The old + This change also replaces pg_shadow and + pg_group by new role-capable catalogs + pg_authid and pg_auth_members. The old tables are redefined as read-only views on the new role tables. @@ -3458,15 +3458,15 @@ DISTINCT query - Automatically use indexes for MIN() and - MAX() (Tom) + Automatically use indexes for MIN() and + MAX() (Tom) In previous releases, the only way to use an index for - MIN() or MAX() was to rewrite the - query as SELECT col FROM tab ORDER BY col LIMIT 1. + MIN() or MAX() was to rewrite the + query as SELECT col FROM tab ORDER BY col LIMIT 1. Index usage now happens automatically. @@ -3474,7 +3474,7 @@ DISTINCT query - Move /contrib/pg_autovacuum into the main server + Move /contrib/pg_autovacuum into the main server (Alvaro) @@ -3483,21 +3483,21 @@ DISTINCT query Integrating autovacuum into the server allows it to be automatically started and stopped in sync with the database server, and allows autovacuum to be configured from - postgresql.conf. + postgresql.conf. - Add shared row level locks using SELECT ... FOR SHARE + Add shared row level locks using SELECT ... FOR SHARE (Alvaro) While PostgreSQL's MVCC locking - allows SELECT to never be blocked by writers and + allows SELECT to never be blocked by writers and therefore does not need shared row locks for typical operations, shared locks are useful for applications that require shared row locking. In particular this reduces the locking requirements @@ -3516,7 +3516,7 @@ DISTINCT query This extension of the dependency mechanism prevents roles from being dropped while there are still database objects they own. - Formerly it was possible to accidentally orphan objects by + Formerly it was possible to accidentally orphan objects by deleting their owner. While this could be recovered from, it was messy and unpleasant. @@ -3537,7 +3537,7 @@ DISTINCT query This allows for a basic type of table partitioning. If child tables store separate key ranges and this is enforced using appropriate - CHECK constraints, the optimizer will skip child + CHECK constraints, the optimizer will skip child table accesses when the constraint guarantees no matching rows exist in the child table. @@ -3556,9 +3556,9 @@ DISTINCT query - The 8.0 release announced that the to_char() function + The 8.0 release announced that the to_char() function for intervals would be removed in 8.1. However, since no better API - has been suggested, to_char(interval) has been enhanced in + has been suggested, to_char(interval) has been enhanced in 8.1 and will remain in the server. @@ -3570,21 +3570,21 @@ DISTINCT query - add_missing_from is now false by default (Neil) + add_missing_from is now false by default (Neil) By default, we now generate an error if a table is used in a query - without a FROM reference. The old behavior is still + without a FROM reference. The old behavior is still available, but the parameter must be set to 'true' to obtain it. - It might be necessary to set add_missing_from to true + It might be necessary to set add_missing_from to true in order to load an existing dump file, if the dump contains any - views or rules created using the implicit-FROM syntax. + views or rules created using the implicit-FROM syntax. This should be a one-time annoyance, because PostgreSQL 8.1 will convert - such views and rules to standard explicit-FROM syntax. + such views and rules to standard explicit-FROM syntax. Subsequent dumps will therefore not have the problem. @@ -3604,29 +3604,29 @@ DISTINCT query - default_with_oids is now false by default (Neil) + default_with_oids is now false by default (Neil) With this option set to false, user-created tables no longer - have an OID column unless WITH OIDS is specified in - CREATE TABLE. Though OIDs have existed in all - releases of PostgreSQL, their use is limited + have an OID column unless WITH OIDS is specified in + CREATE TABLE. Though OIDs have existed in all + releases of PostgreSQL, their use is limited because they are only four bytes long and the counter is shared across all installed databases. The preferred way of uniquely - identifying rows is via sequences and the SERIAL type, - which have been supported since PostgreSQL 6.4. + identifying rows is via sequences and the SERIAL type, + which have been supported since PostgreSQL 6.4. - Add E'' syntax so eventually ordinary strings can + Add E'' syntax so eventually ordinary strings can treat backslashes literally (Bruce) Currently PostgreSQL processes a backslash in a string literal as introducing a special escape sequence, - e.g. \n or \010. + e.g. \n or \010. While this allows easy entry of special values, it is nonstandard and makes porting of applications from other databases more difficult. For this reason, the @@ -3634,8 +3634,8 @@ DISTINCT query remove the special meaning of backslashes in strings. For backward compatibility and for users who want special backslash processing, a new string syntax has been created. This new string - syntax is formed by writing an E immediately preceding the - single quote that starts the string, e.g. E'hi\n'. While + syntax is formed by writing an E immediately preceding the + single quote that starts the string, e.g. E'hi\n'. While this release does not change the handling of backslashes in strings, it does add new configuration parameters to help users migrate applications for future releases: @@ -3644,14 +3644,14 @@ DISTINCT query - standard_conforming_strings — does this release + standard_conforming_strings — does this release treat backslashes literally in ordinary strings? - escape_string_warning — warn about backslashes in + escape_string_warning — warn about backslashes in ordinary (non-E) strings @@ -3659,36 +3659,36 @@ DISTINCT query - The standard_conforming_strings value is read-only. + The standard_conforming_strings value is read-only. Applications can retrieve the value to know how backslashes are processed. (Presence of the parameter can also be taken as an - indication that E'' string syntax is supported.) - In a future release, standard_conforming_strings + indication that E'' string syntax is supported.) + In a future release, standard_conforming_strings will be true, meaning backslashes will be treated literally in - non-E strings. To prepare for this change, use E'' + non-E strings. To prepare for this change, use E'' strings in places that need special backslash processing, and - turn on escape_string_warning to find additional - strings that need to be converted to use E''. - Also, use two single-quotes ('') to embed a literal + turn on escape_string_warning to find additional + strings that need to be converted to use E''. + Also, use two single-quotes ('') to embed a literal single-quote in a string, rather than the PostgreSQL-supported syntax of - backslash single-quote (\'). The former is + backslash single-quote (\'). The former is standards-conforming and does not require the use of the - E'' string syntax. You can also use the - $$ string syntax, which does not treat backslashes + E'' string syntax. You can also use the + $$ string syntax, which does not treat backslashes specially. - Make REINDEX DATABASE reindex all indexes in the + Make REINDEX DATABASE reindex all indexes in the database (Tom) - Formerly, REINDEX DATABASE reindexed only + Formerly, REINDEX DATABASE reindexed only system tables. This new behavior seems more intuitive. A new - command REINDEX SYSTEM provides the old functionality + command REINDEX SYSTEM provides the old functionality of reindexing just the system tables. @@ -3698,13 +3698,13 @@ DISTINCT query Read-only large object descriptors now obey MVCC snapshot semantics - When a large object is opened with INV_READ (and not - INV_WRITE), the data read from the descriptor will now - reflect a snapshot of the large object's state at the + When a large object is opened with INV_READ (and not + INV_WRITE), the data read from the descriptor will now + reflect a snapshot of the large object's state at the time of the transaction snapshot in use by the query that called - lo_open(). To obtain the old behavior of always - returning the latest committed data, include INV_WRITE - in the mode flags for lo_open(). + lo_open(). To obtain the old behavior of always + returning the latest committed data, include INV_WRITE + in the mode flags for lo_open(). @@ -3713,28 +3713,28 @@ DISTINCT query Add proper dependencies for arguments of sequence functions (Tom) - In previous releases, sequence names passed to nextval(), - currval(), and setval() were stored as + In previous releases, sequence names passed to nextval(), + currval(), and setval() were stored as simple text strings, meaning that renaming or dropping a - sequence used in a DEFAULT clause made the clause + sequence used in a DEFAULT clause made the clause invalid. This release stores all newly-created sequence function arguments as internal OIDs, allowing them to track sequence renaming, and adding dependency information that prevents - improper sequence removal. It also makes such DEFAULT + improper sequence removal. It also makes such DEFAULT clauses immune to schema renaming and search path changes. Some applications might rely on the old behavior of run-time lookup for sequence names. This can still be done by - explicitly casting the argument to text, for example - nextval('myseq'::text). + explicitly casting the argument to text, for example + nextval('myseq'::text). Pre-8.1 database dumps loaded into 8.1 will use the old text-based representation and therefore will not have the features of OID-stored arguments. However, it is possible to update a - database containing text-based DEFAULT clauses. - First, save this query into a file, such as fixseq.sql: + database containing text-based DEFAULT clauses. + First, save this query into a file, such as fixseq.sql: SELECT 'ALTER TABLE ' || pg_catalog.quote_ident(n.nspname) || '.' || @@ -3754,11 +3754,11 @@ WHERE n.oid = c.relnamespace AND d.adsrc ~ $$val\(\('[^']*'::text\)::regclass$$; Next, run the query against a database to find what - adjustments are required, like this for database db1: + adjustments are required, like this for database db1: psql -t -f fixseq.sql db1 - This will show the ALTER TABLE commands needed to + This will show the ALTER TABLE commands needed to convert the database to the newer OID-based representation. If the commands look reasonable, run this to update the database: @@ -3771,51 +3771,51 @@ psql -t -f fixseq.sql db1 | psql -e db1 In psql, treat unquoted - \{digit}+ sequences as octal (Bruce) + \{digit}+ sequences as octal (Bruce) - In previous releases, \{digit}+ sequences were - treated as decimal, and only \0{digit}+ were treated + In previous releases, \{digit}+ sequences were + treated as decimal, and only \0{digit}+ were treated as octal. This change was made for consistency. - Remove grammar productions for prefix and postfix % - and ^ operators + Remove grammar productions for prefix and postfix % + and ^ operators (Tom) These have never been documented and complicated the use of the - modulus operator (%) with negative numbers. + modulus operator (%) with negative numbers. - Make &< and &> for polygons + Make &< and &> for polygons consistent with the box "over" operators (Tom) - CREATE LANGUAGE can ignore the provided arguments - in favor of information from pg_pltemplate + CREATE LANGUAGE can ignore the provided arguments + in favor of information from pg_pltemplate (Tom) - A new system catalog pg_pltemplate has been defined + A new system catalog pg_pltemplate has been defined to carry information about the preferred definitions of procedural languages (such as whether they have validator functions). When an entry exists in this catalog for the language being created, - CREATE LANGUAGE will ignore all its parameters except the + CREATE LANGUAGE will ignore all its parameters except the language name and instead use the catalog information. This measure was taken because of increasing problems with obsolete language definitions being loaded by old dump files. As of 8.1, - pg_dump will dump procedural language definitions as - just CREATE LANGUAGE name, relying + pg_dump will dump procedural language definitions as + just CREATE LANGUAGE name, relying on a template entry to exist at load time. We expect this will be a more future-proof representation. @@ -3835,11 +3835,11 @@ psql -t -f fixseq.sql db1 | psql -e db1 sequences to be entered into the database, and this release properly accepts only valid UTF-8 sequences. One way to correct a dumpfile is to run the command iconv -c -f UTF-8 -t - UTF-8 -o cleanfile.sql dumpfile.sql. The -c option + UTF-8 -o cleanfile.sql dumpfile.sql. The -c option removes invalid character sequences. A diff of the two files will - show the sequences that are invalid. iconv reads the + show the sequences that are invalid. iconv reads the entire input file into memory so it might be necessary to use - split to break up the dump into multiple smaller + split to break up the dump into multiple smaller files for processing. @@ -3908,17 +3908,17 @@ psql -t -f fixseq.sql db1 | psql -e db1 For example, this allows an index on columns a,b,c to be used in - a query with WHERE a = 4 and c = 10. + a query with WHERE a = 4 and c = 10. - Skip WAL logging for CREATE TABLE AS / - SELECT INTO (Simon) + Skip WAL logging for CREATE TABLE AS / + SELECT INTO (Simon) - Since a crash during CREATE TABLE AS would cause the + Since a crash during CREATE TABLE AS would cause the table to be dropped during recovery, there is no reason to WAL log as the table is loaded. (Logging still happens if WAL archiving is enabled, however.) @@ -3933,7 +3933,7 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Add configuration parameter full_page_writes to + Add configuration parameter full_page_writes to control writing full pages to WAL (Bruce) @@ -3948,22 +3948,22 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Use O_DIRECT if available when using - O_SYNC for wal_sync_method + Use O_DIRECT if available when using + O_SYNC for wal_sync_method (Itagaki Takahiro) - O_DIRECT causes disk writes to bypass the kernel + O_DIRECT causes disk writes to bypass the kernel cache, and for WAL writes, this improves performance. - Improve COPY FROM performance (Alon Goldshuv) + Improve COPY FROM performance (Alon Goldshuv) - This was accomplished by reading COPY input in + This was accomplished by reading COPY input in larger chunks, rather than character by character. @@ -4005,14 +4005,14 @@ psql -t -f fixseq.sql db1 | psql -e db1 Add warning about the need to increase - max_fsm_relations and max_fsm_pages - during VACUUM (Ron Mayer) + max_fsm_relations and max_fsm_pages + during VACUUM (Ron Mayer) - Add temp_buffers configuration parameter to allow + Add temp_buffers configuration parameter to allow users to determine the size of the local buffer area for temporary table access (Tom) @@ -4021,13 +4021,13 @@ psql -t -f fixseq.sql db1 | psql -e db1 Add session start time and client IP address to - pg_stat_activity (Magnus) + pg_stat_activity (Magnus) - Adjust pg_stat views for bitmap scans (Tom) + Adjust pg_stat views for bitmap scans (Tom) The meanings of some of the fields have changed slightly. @@ -4036,27 +4036,27 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Enhance pg_locks view (Tom) + Enhance pg_locks view (Tom) - Log queries for client-side PREPARE and - EXECUTE (Simon) + Log queries for client-side PREPARE and + EXECUTE (Simon) Allow Kerberos name and user name case sensitivity to be - specified in postgresql.conf (Magnus) + specified in postgresql.conf (Magnus) - Add configuration parameter krb_server_hostname so + Add configuration parameter krb_server_hostname so that the server host name can be specified as part of service principal (Todd Kover) @@ -4069,8 +4069,8 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Add log_line_prefix options for millisecond - timestamps (%m) and remote host (%h) (Ed + Add log_line_prefix options for millisecond + timestamps (%m) and remote host (%h) (Ed L.) @@ -4086,12 +4086,12 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Remove old *.backup files when we do - pg_stop_backup() (Bruce) + Remove old *.backup files when we do + pg_stop_backup() (Bruce) - This prevents a large number of *.backup files from - existing in pg_xlog/. + This prevents a large number of *.backup files from + existing in pg_xlog/. @@ -4112,7 +4112,7 @@ psql -t -f fixseq.sql db1 | psql -e db1 Add per-user and per-database connection limits (Petr Jelinek) - Using ALTER USER and ALTER DATABASE, + Using ALTER USER and ALTER DATABASE, limits can now be enforced on the maximum number of sessions that can concurrently connect as a specific user or to a specific database. Setting the limit to zero disables user or database connections. @@ -4128,7 +4128,7 @@ psql -t -f fixseq.sql db1 | psql -e db1 - New system catalog pg_pltemplate allows overriding + New system catalog pg_pltemplate allows overriding obsolete procedural-language definitions in dump files (Tom) @@ -4149,63 +4149,63 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Fix HAVING without any aggregate functions or - GROUP BY so that the query returns a single group (Tom) + Fix HAVING without any aggregate functions or + GROUP BY so that the query returns a single group (Tom) - Previously, such a case would treat the HAVING - clause the same as a WHERE clause. This was not per spec. + Previously, such a case would treat the HAVING + clause the same as a WHERE clause. This was not per spec. - Add USING clause to allow additional tables to be - specified to DELETE (Euler Taveira de Oliveira, Neil) + Add USING clause to allow additional tables to be + specified to DELETE (Euler Taveira de Oliveira, Neil) In prior releases, there was no clear method for specifying - additional tables to be used for joins in a DELETE - statement. UPDATE already has a FROM + additional tables to be used for joins in a DELETE + statement. UPDATE already has a FROM clause for this purpose. - Add support for \x hex escapes in backend and ecpg + Add support for \x hex escapes in backend and ecpg strings (Bruce) - This is just like the standard C \x escape syntax. + This is just like the standard C \x escape syntax. Octal escapes were already supported. - Add BETWEEN SYMMETRIC query syntax (Pavel Stehule) + Add BETWEEN SYMMETRIC query syntax (Pavel Stehule) - This feature allows BETWEEN comparisons without + This feature allows BETWEEN comparisons without requiring the first value to be less than the second. For - example, 2 BETWEEN [ASYMMETRIC] 3 AND 1 returns - false, while 2 BETWEEN SYMMETRIC 3 AND 1 returns - true. BETWEEN ASYMMETRIC was already supported. + example, 2 BETWEEN [ASYMMETRIC] 3 AND 1 returns + false, while 2 BETWEEN SYMMETRIC 3 AND 1 returns + true. BETWEEN ASYMMETRIC was already supported. - Add NOWAIT option to SELECT ... FOR - UPDATE/SHARE (Hans-Juergen Schoenig) + Add NOWAIT option to SELECT ... FOR + UPDATE/SHARE (Hans-Juergen Schoenig) - While the statement_timeout configuration + While the statement_timeout configuration parameter allows a query taking more than a certain amount of - time to be canceled, the NOWAIT option allows a + time to be canceled, the NOWAIT option allows a query to be canceled as soon as a SELECT ... FOR - UPDATE/SHARE command cannot immediately acquire a row lock. + UPDATE/SHARE command cannot immediately acquire a row lock. @@ -4233,7 +4233,7 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Allow limited ALTER OWNER commands to be performed + Allow limited ALTER OWNER commands to be performed by the object owner (Stephen Frost) @@ -4248,7 +4248,7 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Add ALTER object SET SCHEMA capability + Add ALTER object SET SCHEMA capability for some object types (tables, functions, types) (Bernd Helmle) @@ -4273,54 +4273,54 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Allow TRUNCATE to truncate multiple tables in a + Allow TRUNCATE to truncate multiple tables in a single command (Alvaro) Because of referential integrity checks, it is not allowed to truncate a table that is part of a referential integrity - constraint. Using this new functionality, TRUNCATE + constraint. Using this new functionality, TRUNCATE can be used to truncate such tables, if both tables involved in a referential integrity constraint are truncated in a single - TRUNCATE command. + TRUNCATE command. Properly process carriage returns and line feeds in - COPY CSV mode (Andrew) + COPY CSV mode (Andrew) In release 8.0, carriage returns and line feeds in CSV - COPY TO were processed in an inconsistent manner. (This was + COPY TO were processed in an inconsistent manner. (This was documented on the TODO list.) - Add COPY WITH CSV HEADER to allow a header line as - the first line in COPY (Andrew) + Add COPY WITH CSV HEADER to allow a header line as + the first line in COPY (Andrew) - This allows handling of the common CSV usage of + This allows handling of the common CSV usage of placing the column names on the first line of the data file. For - COPY TO, the first line contains the column names, - and for COPY FROM, the first line is ignored. + COPY TO, the first line contains the column names, + and for COPY FROM, the first line is ignored. On Windows, display better sub-second precision in - EXPLAIN ANALYZE (Magnus) + EXPLAIN ANALYZE (Magnus) - Add trigger duration display to EXPLAIN ANALYZE + Add trigger duration display to EXPLAIN ANALYZE (Tom) @@ -4332,7 +4332,7 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Add support for \x hex escapes in COPY + Add support for \x hex escapes in COPY (Sergey Ten) @@ -4342,11 +4342,11 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Make SHOW ALL include variable descriptions + Make SHOW ALL include variable descriptions (Matthias Schmidt) - SHOW varname still only displays the variable's + SHOW varname still only displays the variable's value and does not include the description. @@ -4354,27 +4354,27 @@ psql -t -f fixseq.sql db1 | psql -e db1 Make initdb create a new standard - database called postgres, and convert utilities to - use postgres rather than template1 for + database called postgres, and convert utilities to + use postgres rather than template1 for standard lookups (Dave) - In prior releases, template1 was used both as a + In prior releases, template1 was used both as a default connection for utilities like createuser, and as a template for - new databases. This caused CREATE DATABASE to + new databases. This caused CREATE DATABASE to sometimes fail, because a new database cannot be created if anyone else is in the template database. With this change, the - default connection database is now postgres, + default connection database is now postgres, meaning it is much less likely someone will be using - template1 during CREATE DATABASE. + template1 during CREATE DATABASE. Create new reindexdb command-line - utility by moving /contrib/reindexdb into the + utility by moving /contrib/reindexdb into the server (Euler Taveira de Oliveira) @@ -4389,38 +4389,38 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Add MAX() and MIN() aggregates for + Add MAX() and MIN() aggregates for array types (Koju Iijima) - Fix to_date() and to_timestamp() to - behave reasonably when CC and YY fields + Fix to_date() and to_timestamp() to + behave reasonably when CC and YY fields are both used (Karel Zak) - If the format specification contains CC and a year - specification is YYY or longer, ignore the - CC. If the year specification is YY or - shorter, interpret CC as the previous century. + If the format specification contains CC and a year + specification is YYY or longer, ignore the + CC. If the year specification is YY or + shorter, interpret CC as the previous century. - Add md5(bytea) (Abhijit Menon-Sen) + Add md5(bytea) (Abhijit Menon-Sen) - md5(text) already existed. + md5(text) already existed. - Add support for numeric ^ numeric based on - power(numeric, numeric) + Add support for numeric ^ numeric based on + power(numeric, numeric) The function already existed, but there was no operator assigned @@ -4430,7 +4430,7 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Fix NUMERIC modulus by properly truncating the quotient + Fix NUMERIC modulus by properly truncating the quotient during computation (Bruce) @@ -4441,29 +4441,29 @@ psql -t -f fixseq.sql db1 | psql -e db1 - Add a function lastval() (Dennis Björklund) + Add a function lastval() (Dennis Björklund) - lastval() is a simplified version of - currval(). It automatically determines the proper - sequence name based on the most recent nextval() or - setval() call performed by the current session. + lastval() is a simplified version of + currval(). It automatically determines the proper + sequence name based on the most recent nextval() or + setval() call performed by the current session. - Add to_timestamp(DOUBLE PRECISION) (Michael Glaesemann) + Add to_timestamp(DOUBLE PRECISION) (Michael Glaesemann) Converts Unix seconds since 1970 to a TIMESTAMP WITH - TIMEZONE. + TIMEZONE. - Add pg_postmaster_start_time() function (Euler + Add pg_postmaster_start_time() function (Euler Taveira de Oliveira, Matthias Schmidt) @@ -4471,11 +4471,11 @@ psql -t -f fixseq.sql db1 | psql -e db1 Allow the full use of time zone names in AT TIME - ZONE, not just the short list previously available (Magnus) + ZONE, not just the short list previously available (Magnus) Previously, only a predefined list of time zone names were - supported by AT TIME ZONE. Now any supported time + supported by AT TIME ZONE. Now any supported time zone name can be used, e.g.: SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; @@ -4488,7 +4488,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Add GREATEST() and LEAST() variadic + Add GREATEST() and LEAST() variadic functions (Pavel Stehule) @@ -4499,7 +4499,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Add pg_column_size() (Mark Kirkwood) + Add pg_column_size() (Mark Kirkwood) This returns storage size of a column, which might be compressed. @@ -4508,7 +4508,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Add regexp_replace() (Atsushi Ogawa) + Add regexp_replace() (Atsushi Ogawa) This allows regular expression replacement, like sed. An optional @@ -4523,8 +4523,8 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; Previous versions sometimes returned unjustified results, like - '4 months'::interval / 5 returning '1 mon - -6 days'. + '4 months'::interval / 5 returning '1 mon + -6 days'. @@ -4534,24 +4534,24 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; This fixes some cases in which the seconds field would be shown as - 60 instead of incrementing the higher-order fields. + 60 instead of incrementing the higher-order fields. - Add a separate day field to type interval so a one day + Add a separate day field to type interval so a one day interval can be distinguished from a 24 hour interval (Michael Glaesemann) Days that contain a daylight saving time adjustment are not 24 hours long, but typically 23 or 25 hours. This change creates a - conceptual distinction between intervals of so many days - and intervals of so many hours. Adding - 1 day to a timestamp now gives the same local time on + conceptual distinction between intervals of so many days + and intervals of so many hours. Adding + 1 day to a timestamp now gives the same local time on the next day even if a daylight saving time adjustment occurs - between, whereas adding 24 hours will give a different + between, whereas adding 24 hours will give a different local time when this happens. For example, under US DST rules: '2005-04-03 00:00:00-05' + '1 day' = '2005-04-04 00:00:00-04' @@ -4562,7 +4562,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Add justify_days() and justify_hours() + Add justify_days() and justify_hours() (Michael Glaesemann) @@ -4574,7 +4574,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Move /contrib/dbsize into the backend, and rename + Move /contrib/dbsize into the backend, and rename some of the functions (Dave Page, Andreas Pflug) @@ -4582,38 +4582,38 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - pg_tablespace_size() + pg_tablespace_size() - pg_database_size() + pg_database_size() - pg_relation_size() + pg_relation_size() - pg_total_relation_size() + pg_total_relation_size() - pg_size_pretty() + pg_size_pretty() - pg_total_relation_size() includes indexes and TOAST + pg_total_relation_size() includes indexes and TOAST tables. @@ -4628,19 +4628,19 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - pg_stat_file() + pg_stat_file() - pg_read_file() + pg_read_file() - pg_ls_dir() + pg_ls_dir() @@ -4650,21 +4650,21 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Add pg_reload_conf() to force reloading of the + Add pg_reload_conf() to force reloading of the configuration files (Dave Page, Andreas Pflug) - Add pg_rotate_logfile() to force rotation of the + Add pg_rotate_logfile() to force rotation of the server log file (Dave Page, Andreas Pflug) - Change pg_stat_* views to include TOAST tables (Tom) + Change pg_stat_* views to include TOAST tables (Tom) @@ -4686,25 +4686,25 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - UNICODE is now UTF8 + UNICODE is now UTF8 - ALT is now WIN866 + ALT is now WIN866 - WIN is now WIN1251 + WIN is now WIN1251 - TCVN is now WIN1258 + TCVN is now WIN1258 @@ -4718,17 +4718,17 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Add support for WIN1252 encoding (Roland Volkmann) + Add support for WIN1252 encoding (Roland Volkmann) - Add support for four-byte UTF8 characters (John + Add support for four-byte UTF8 characters (John Hansen) - Previously only one, two, and three-byte UTF8 characters + Previously only one, two, and three-byte UTF8 characters were supported. This is particularly important for support for some Chinese character sets. @@ -4736,8 +4736,8 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Allow direct conversion between EUC_JP and - SJIS to improve performance (Atsushi Ogawa) + Allow direct conversion between EUC_JP and + SJIS to improve performance (Atsushi Ogawa) @@ -4761,14 +4761,14 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Fix ALTER LANGUAGE RENAME (Sergey Yatskevich) + Fix ALTER LANGUAGE RENAME (Sergey Yatskevich) Allow function characteristics, like strictness and volatility, - to be modified via ALTER FUNCTION (Neil) + to be modified via ALTER FUNCTION (Neil) @@ -4780,14 +4780,14 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Allow SQL and PL/pgSQL functions to use OUT and - INOUT parameters (Tom) + Allow SQL and PL/pgSQL functions to use OUT and + INOUT parameters (Tom) - OUT is an alternate way for a function to return - values. Instead of using RETURN, values can be - returned by assigning to parameters declared as OUT or - INOUT. This is notationally simpler in some cases, + OUT is an alternate way for a function to return + values. Instead of using RETURN, values can be + returned by assigning to parameters declared as OUT or + INOUT. This is notationally simpler in some cases, particularly so when multiple values need to be returned. While returning multiple values from a function was possible in previous releases, this greatly simplifies the @@ -4798,7 +4798,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Move language handler functions into the pg_catalog schema + Move language handler functions into the pg_catalog schema This makes it easier to drop the public schema if desired. @@ -4831,7 +4831,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Check function syntax at CREATE FUNCTION time, + Check function syntax at CREATE FUNCTION time, rather than at runtime (Neil) @@ -4842,19 +4842,19 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Allow OPEN to open non-SELECT queries - like EXPLAIN and SHOW (Tom) + Allow OPEN to open non-SELECT queries + like EXPLAIN and SHOW (Tom) - No longer require functions to issue a RETURN + No longer require functions to issue a RETURN statement (Tom) - This is a byproduct of the newly added OUT and - INOUT functionality. RETURN can + This is a byproduct of the newly added OUT and + INOUT functionality. RETURN can be omitted when it is not needed to provide the function's return value. @@ -4862,21 +4862,21 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Add support for an optional INTO clause to - PL/pgSQL's EXECUTE statement (Pavel Stehule, Neil) + Add support for an optional INTO clause to + PL/pgSQL's EXECUTE statement (Pavel Stehule, Neil) - Make CREATE TABLE AS set ROW_COUNT (Tom) + Make CREATE TABLE AS set ROW_COUNT (Tom) - Define SQLSTATE and SQLERRM to return - the SQLSTATE and error message of the current + Define SQLSTATE and SQLERRM to return + the SQLSTATE and error message of the current exception (Pavel Stehule, Neil) @@ -4886,14 +4886,14 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Allow the parameters to the RAISE statement to be + Allow the parameters to the RAISE statement to be expressions (Pavel Stehule, Neil) - Add a loop CONTINUE statement (Pavel Stehule, Neil) + Add a loop CONTINUE statement (Pavel Stehule, Neil) @@ -4917,7 +4917,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; Menon-Sen) - This allows functions to use return_next() to avoid + This allows functions to use return_next() to avoid building the entire result set in memory. @@ -4927,16 +4927,16 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; Allow one-row-at-a-time retrieval of query results (Abhijit Menon-Sen) - This allows functions to use spi_query() and - spi_fetchrow() to avoid accumulating the entire + This allows functions to use spi_query() and + spi_fetchrow() to avoid accumulating the entire result set in memory. - Force PL/Perl to handle strings as UTF8 if the - server encoding is UTF8 (David Kamholz) + Force PL/Perl to handle strings as UTF8 if the + server encoding is UTF8 (David Kamholz) @@ -4963,14 +4963,14 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Allow Perl nonfatal warnings to generate NOTICE + Allow Perl nonfatal warnings to generate NOTICE messages (Andrew) - Allow Perl's strict mode to be enabled (Andrew) + Allow Perl's strict mode to be enabled (Andrew) @@ -4979,12 +4979,12 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - <application>psql</> Changes + <application>psql</application> Changes - Add \set ON_ERROR_ROLLBACK to allow statements in + Add \set ON_ERROR_ROLLBACK to allow statements in a transaction to error without affecting the rest of the transaction (Greg Sabino Mullane) @@ -4996,8 +4996,8 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Add support for \x hex strings in - psql variables (Bruce) + Add support for \x hex strings in + psql variables (Bruce) Octal escapes were already supported. @@ -5006,7 +5006,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Add support for troff -ms output format (Roger + Add support for troff -ms output format (Roger Leigh) @@ -5014,7 +5014,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; Allow the history file location to be controlled by - HISTFILE (Andreas Seltenreich) + HISTFILE (Andreas Seltenreich) This allows configuration of per-database history storage. @@ -5023,14 +5023,14 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Prevent \x (expanded mode) from affecting - the output of \d tablename (Neil) + Prevent \x (expanded mode) from affecting + the output of \d tablename (Neil) - Add option to psql to log sessions (Lorne Sunley) @@ -5041,44 +5041,44 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Make \d show the tablespaces of indexes (Qingqing + Make \d show the tablespaces of indexes (Qingqing Zhou) - Allow psql help (\h) to + Allow psql help (\h) to make a best guess on the proper help information (Greg Sabino Mullane) - This allows the user to just add \h to the front of + This allows the user to just add \h to the front of the syntax error query and get help on the supported syntax. Previously any additional query text beyond the command name - had to be removed to use \h. + had to be removed to use \h. - Add \pset numericlocale to allow numbers to be + Add \pset numericlocale to allow numbers to be output in a locale-aware format (Eugen Nedelcu) - For example, using C locale 100000 would - be output as 100,000.0 while a European locale might - output this value as 100.000,0. + For example, using C locale 100000 would + be output as 100,000.0 while a European locale might + output this value as 100.000,0. Make startup banner show both server version number and - psql's version number, when they are different (Bruce) + psql's version number, when they are different (Bruce) - Also, a warning will be shown if the server and psql + Also, a warning will be shown if the server and psql are from different major releases. @@ -5088,13 +5088,13 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - <application>pg_dump</> Changes + <application>pg_dump</application> Changes - Add This allows just the objects in a specified schema to be restored. @@ -5103,18 +5103,18 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Allow pg_dump to dump large objects even in + Allow pg_dump to dump large objects even in text mode (Tom) With this change, large objects are now always dumped; the former - switch is a no-op. - Allow pg_dump to dump a consistent snapshot of + Allow pg_dump to dump a consistent snapshot of large objects (Tom) @@ -5127,7 +5127,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Add @@ -5139,14 +5139,14 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Rely on pg_pltemplate for procedural languages (Tom) + Rely on pg_pltemplate for procedural languages (Tom) If the call handler for a procedural language is in the - pg_catalog schema, pg_dump does not + pg_catalog schema, pg_dump does not dump the handler. Instead, it dumps the language using just - CREATE LANGUAGE name, - relying on the pg_pltemplate catalog to provide + CREATE LANGUAGE name, + relying on the pg_pltemplate catalog to provide the language's creation parameters at load time. @@ -5161,15 +5161,15 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Add a PGPASSFILE environment variable to specify the + Add a PGPASSFILE environment variable to specify the password file's filename (Andrew) - Add lo_create(), that is similar to - lo_creat() but allows the OID of the large object + Add lo_create(), that is similar to + lo_creat() but allows the OID of the large object to be specified (Tom) @@ -5191,7 +5191,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Fix pgxs to support building against a relocated + Fix pgxs to support building against a relocated installation @@ -5238,10 +5238,10 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Allow pg_config to be compiled using MSVC (Andrew) + Allow pg_config to be compiled using MSVC (Andrew) - This is required to build DBD::Pg using MSVC. + This is required to build DBD::Pg using MSVC. @@ -5264,15 +5264,15 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Modify postgresql.conf to use documentation defaults - on/off rather than - true/false (Bruce) + Modify postgresql.conf to use documentation defaults + on/off rather than + true/false (Bruce) - Enhance pg_config to be able to report more + Enhance pg_config to be able to report more build-time values (Tom) @@ -5304,11 +5304,11 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - In previous releases, gist.h contained both the + In previous releases, gist.h contained both the public GiST API (intended for use by authors of GiST index implementations) as well as some private declarations used by the implementation of GiST itself. The latter have been moved - to a separate file, gist_private.h. Most GiST + to a separate file, gist_private.h. Most GiST index implementations should be unaffected. @@ -5320,10 +5320,10 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; GiST methods are now always invoked in a short-lived memory - context. Therefore, memory allocated via palloc() + context. Therefore, memory allocated via palloc() will be reclaimed automatically, so GiST index implementations do not need to manually release allocated memory via - pfree(). + pfree(). @@ -5336,7 +5336,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Add /contrib/pg_buffercache contrib module (Mark + Add /contrib/pg_buffercache contrib module (Mark Kirkwood) @@ -5347,28 +5347,28 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Remove /contrib/array because it is obsolete (Tom) + Remove /contrib/array because it is obsolete (Tom) - Clean up the /contrib/lo module (Tom) + Clean up the /contrib/lo module (Tom) - Move /contrib/findoidjoins to - /src/tools (Tom) + Move /contrib/findoidjoins to + /src/tools (Tom) - Remove the <<, >>, - &<, and &> operators from - /contrib/cube + Remove the <<, >>, + &<, and &> operators from + /contrib/cube These operators were not useful. @@ -5377,13 +5377,13 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Improve /contrib/btree_gist (Janko Richter) + Improve /contrib/btree_gist (Janko Richter) - Improve /contrib/pgbench (Tomoaki Sato, Tatsuo) + Improve /contrib/pgbench (Tomoaki Sato, Tatsuo) There is now a facility for testing with SQL command scripts given @@ -5393,7 +5393,7 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Improve /contrib/pgcrypto (Marko Kreen) + Improve /contrib/pgcrypto (Marko Kreen) @@ -5421,16 +5421,16 @@ SELECT CURRENT_TIMESTAMP AT TIME ZONE 'Europe/London'; - Take build parameters (OpenSSL, zlib) from configure result + Take build parameters (OpenSSL, zlib) from configure result - There is no need to edit the Makefile anymore. + There is no need to edit the Makefile anymore. - Remove support for libmhash and libmcrypt + Remove support for libmhash and libmcrypt diff --git a/doc/src/sgml/release-8.2.sgml b/doc/src/sgml/release-8.2.sgml index c00cbd3467..d87c5bbd46 100644 --- a/doc/src/sgml/release-8.2.sgml +++ b/doc/src/sgml/release-8.2.sgml @@ -12,11 +12,11 @@ This release contains a variety of fixes from 8.2.22. For information about new features in the 8.2 major release, see - . + . - This is expected to be the last PostgreSQL release + This is expected to be the last PostgreSQL release in the 8.2.X series. Users are encouraged to update to a newer release branch soon. @@ -30,14 +30,14 @@ However, a longstanding error was discovered in the definition of the - information_schema.referential_constraints view. If you + information_schema.referential_constraints view. If you rely on correct results from that view, you should replace its definition as explained in the first changelog item below. Also, if you are upgrading from a version earlier than 8.2.14, - see . + see . @@ -49,7 +49,7 @@ - Fix bugs in information_schema.referential_constraints view + Fix bugs in information_schema.referential_constraints view (Tom Lane) @@ -62,13 +62,13 @@ - Since the view definition is installed by initdb, + Since the view definition is installed by initdb, merely upgrading will not fix the problem. If you need to fix this in an existing installation, you can (as a superuser) drop the - information_schema schema then re-create it by sourcing - SHAREDIR/information_schema.sql. - (Run pg_config --sharedir if you're uncertain where - SHAREDIR is.) This must be repeated in each database + information_schema schema then re-create it by sourcing + SHAREDIR/information_schema.sql. + (Run pg_config --sharedir if you're uncertain where + SHAREDIR is.) This must be repeated in each database to be fixed. @@ -76,12 +76,12 @@ Fix TOAST-related data corruption during CREATE TABLE dest AS - SELECT * FROM src or INSERT INTO dest SELECT * FROM src + SELECT * FROM src or INSERT INTO dest SELECT * FROM src (Tom Lane) - If a table has been modified by ALTER TABLE ADD COLUMN, + If a table has been modified by ALTER TABLE ADD COLUMN, attempts to copy its data verbatim to another table could produce corrupt results in certain corner cases. The problem can only manifest in this precise form in 8.4 and later, @@ -98,22 +98,22 @@ The typical symptom was transient errors like missing chunk - number 0 for toast value NNNNN in pg_toast_2619, where the cited + number 0 for toast value NNNNN in pg_toast_2619, where the cited toast table would always belong to a system catalog. - Improve locale support in money type's input and output + Improve locale support in money type's input and output (Tom Lane) Aside from not supporting all standard - lc_monetary + lc_monetary formatting options, the input and output functions were inconsistent, - meaning there were locales in which dumped money values could + meaning there were locales in which dumped money values could not be re-read. @@ -121,15 +121,15 @@ Don't let transform_null_equals - affect CASE foo WHEN NULL ... constructs + linkend="guc-transform-null-equals">transform_null_equals + affect CASE foo WHEN NULL ... constructs (Heikki Linnakangas) - transform_null_equals is only supposed to affect - foo = NULL expressions written directly by the user, not - equality checks generated internally by this form of CASE. + transform_null_equals is only supposed to affect + foo = NULL expressions written directly by the user, not + equality checks generated internally by this form of CASE. @@ -141,14 +141,14 @@ For a cascading foreign key that references its own table, a row update - will fire both the ON UPDATE trigger and the - CHECK trigger as one event. The ON UPDATE - trigger must execute first, else the CHECK will check a + will fire both the ON UPDATE trigger and the + CHECK trigger as one event. The ON UPDATE + trigger must execute first, else the CHECK will check a non-final state of the row and possibly throw an inappropriate error. However, the firing order of these triggers is determined by their names, which generally sort in creation order since the triggers have auto-generated names following the convention - RI_ConstraintTrigger_NNNN. A proper fix would require + RI_ConstraintTrigger_NNNN. A proper fix would require modifying that convention, which we will do in 9.2, but it seems risky to change it in existing releases. So this patch just changes the creation order of the triggers. Users encountering this type of error @@ -159,7 +159,7 @@ - Preserve blank lines within commands in psql's command + Preserve blank lines within commands in psql's command history (Robert Haas) @@ -171,7 +171,7 @@ - Use the preferred version of xsubpp to build PL/Perl, + Use the preferred version of xsubpp to build PL/Perl, not necessarily the operating system's main copy (David Wheeler and Alex Hunsaker) @@ -179,7 +179,7 @@ - Honor query cancel interrupts promptly in pgstatindex() + Honor query cancel interrupts promptly in pgstatindex() (Robert Haas) @@ -210,15 +210,15 @@ - Map Central America Standard Time to CST6, not - CST6CDT, because DST is generally not observed anywhere in + Map Central America Standard Time to CST6, not + CST6CDT, because DST is generally not observed anywhere in Central America. - Update time zone data files to tzdata release 2011n + Update time zone data files to tzdata release 2011n for DST law changes in Brazil, Cuba, Fiji, Palestine, Russia, and Samoa; also historical corrections for Alaska and British East Africa. @@ -240,11 +240,11 @@ This release contains a variety of fixes from 8.2.21. For information about new features in the 8.2 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 8.2.X release series in December 2011. Users are encouraged to update to a newer release branch soon. @@ -255,7 +255,7 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.14, - see . + see . @@ -279,7 +279,7 @@ - Avoid possibly accessing off the end of memory in ANALYZE + Avoid possibly accessing off the end of memory in ANALYZE (Noah Misch) @@ -297,7 +297,7 @@ There was a window wherein a new backend process could read a stale init file but miss the inval messages that would tell it the data is stale. The result would be bizarre failures in catalog accesses, typically - could not read block 0 in file ... later during startup. + could not read block 0 in file ... later during startup. @@ -346,13 +346,13 @@ - Fix dump bug for VALUES in a view (Tom Lane) + Fix dump bug for VALUES in a view (Tom Lane) - Disallow SELECT FOR UPDATE/SHARE on sequences (Tom Lane) + Disallow SELECT FOR UPDATE/SHARE on sequences (Tom Lane) @@ -370,18 +370,18 @@ Fix portability bugs in use of credentials control messages for - peer authentication (Tom Lane) + peer authentication (Tom Lane) - Fix typo in pg_srand48 seed initialization (Andres Freund) + Fix typo in pg_srand48 seed initialization (Andres Freund) This led to failure to use all bits of the provided seed. This function - is not used on most platforms (only those without srandom), + is not used on most platforms (only those without srandom), and the potential security exposure from a less-random-than-expected seed seems minimal in any case. @@ -389,25 +389,25 @@ - Avoid integer overflow when the sum of LIMIT and - OFFSET values exceeds 2^63 (Heikki Linnakangas) + Avoid integer overflow when the sum of LIMIT and + OFFSET values exceeds 2^63 (Heikki Linnakangas) - Add overflow checks to int4 and int8 versions of - generate_series() (Robert Haas) + Add overflow checks to int4 and int8 versions of + generate_series() (Robert Haas) - Fix trailing-zero removal in to_char() (Marti Raudsepp) + Fix trailing-zero removal in to_char() (Marti Raudsepp) - In a format with FM and no digit positions + In a format with FM and no digit positions after the decimal point, zeroes to the left of the decimal point could be removed incorrectly. @@ -415,41 +415,41 @@ - Fix pg_size_pretty() to avoid overflow for inputs close to + Fix pg_size_pretty() to avoid overflow for inputs close to 2^63 (Tom Lane) - Fix psql's counting of script file line numbers during - COPY from a different file (Tom Lane) + Fix psql's counting of script file line numbers during + COPY from a different file (Tom Lane) - Fix pg_restore's direct-to-database mode for - standard_conforming_strings (Tom Lane) + Fix pg_restore's direct-to-database mode for + standard_conforming_strings (Tom Lane) - pg_restore could emit incorrect commands when restoring + pg_restore could emit incorrect commands when restoring directly to a database server from an archive file that had been made - with standard_conforming_strings set to on. + with standard_conforming_strings set to on. - Fix write-past-buffer-end and memory leak in libpq's + Fix write-past-buffer-end and memory leak in libpq's LDAP service lookup code (Albe Laurenz) - In libpq, avoid failures when using nonblocking I/O + In libpq, avoid failures when using nonblocking I/O and an SSL connection (Martin Pihlak, Tom Lane) @@ -461,14 +461,14 @@ - In particular, the response to a server report of fork() + In particular, the response to a server report of fork() failure during SSL connection startup is now saner. - Make ecpglib write double values with 15 digits + Make ecpglib write double values with 15 digits precision (Akira Kurosawa) @@ -480,7 +480,7 @@ - contrib/pg_crypto's blowfish encryption code could give + contrib/pg_crypto's blowfish encryption code could give wrong results on platforms where char is signed (which is most), leading to encrypted passwords being weaker than they should be. @@ -488,13 +488,13 @@ - Fix memory leak in contrib/seg (Heikki Linnakangas) + Fix memory leak in contrib/seg (Heikki Linnakangas) - Fix pgstatindex() to give consistent results for empty + Fix pgstatindex() to give consistent results for empty indexes (Tom Lane) @@ -526,7 +526,7 @@ - Update time zone data files to tzdata release 2011i + Update time zone data files to tzdata release 2011i for DST law changes in Canada, Egypt, Russia, Samoa, and South Sudan. @@ -547,7 +547,7 @@ This release contains a variety of fixes from 8.2.20. For information about new features in the 8.2 major release, see - . + . @@ -556,7 +556,7 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.14, - see . + see . @@ -582,15 +582,15 @@ - Fix dangling-pointer problem in BEFORE ROW UPDATE trigger + Fix dangling-pointer problem in BEFORE ROW UPDATE trigger handling when there was a concurrent update to the target tuple (Tom Lane) This bug has been observed to result in intermittent cannot - extract system attribute from virtual tuple failures while trying to - do UPDATE RETURNING ctid. There is a very small probability + extract system attribute from virtual tuple failures while trying to + do UPDATE RETURNING ctid. There is a very small probability of more serious errors, such as generating incorrect index entries for the updated tuple. @@ -598,13 +598,13 @@ - Disallow DROP TABLE when there are pending deferred trigger + Disallow DROP TABLE when there are pending deferred trigger events for the table (Tom Lane) - Formerly the DROP would go through, leading to - could not open relation with OID nnn errors when the + Formerly the DROP would go through, leading to + could not open relation with OID nnn errors when the triggers were eventually fired. @@ -617,7 +617,7 @@ - Fix pg_restore to cope with long lines (over 1KB) in + Fix pg_restore to cope with long lines (over 1KB) in TOC files (Tom Lane) @@ -649,14 +649,14 @@ - Fix path separator used by pg_regress on Cygwin + Fix path separator used by pg_regress on Cygwin (Andrew Dunstan) - Update time zone data files to tzdata release 2011f + Update time zone data files to tzdata release 2011f for DST law changes in Chile, Cuba, Falkland Islands, Morocco, Samoa, and Turkey; also historical corrections for South Australia, Alaska, and Hawaii. @@ -679,7 +679,7 @@ This release contains a variety of fixes from 8.2.19. For information about new features in the 8.2 major release, see - . + . @@ -688,7 +688,7 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.14, - see . + see . @@ -700,15 +700,15 @@ - Avoid failures when EXPLAIN tries to display a simple-form - CASE expression (Tom Lane) + Avoid failures when EXPLAIN tries to display a simple-form + CASE expression (Tom Lane) - If the CASE's test expression was a constant, the planner - could simplify the CASE into a form that confused the + If the CASE's test expression was a constant, the planner + could simplify the CASE into a form that confused the expression-display code, resulting in unexpected CASE WHEN - clause errors. + clause errors. @@ -733,44 +733,44 @@ - The date type supports a wider range of dates than can be - represented by the timestamp types, but the planner assumed it + The date type supports a wider range of dates than can be + represented by the timestamp types, but the planner assumed it could always convert a date to timestamp with impunity. - Fix pg_restore's text output for large objects (BLOBs) - when standard_conforming_strings is on (Tom Lane) + Fix pg_restore's text output for large objects (BLOBs) + when standard_conforming_strings is on (Tom Lane) Although restoring directly to a database worked correctly, string - escaping was incorrect if pg_restore was asked for - SQL text output and standard_conforming_strings had been + escaping was incorrect if pg_restore was asked for + SQL text output and standard_conforming_strings had been enabled in the source database. - Fix erroneous parsing of tsquery values containing + Fix erroneous parsing of tsquery values containing ... & !(subexpression) | ... (Tom Lane) Queries containing this combination of operators were not executed - correctly. The same error existed in contrib/intarray's - query_int type and contrib/ltree's - ltxtquery type. + correctly. The same error existed in contrib/intarray's + query_int type and contrib/ltree's + ltxtquery type. - Fix buffer overrun in contrib/intarray's input function - for the query_int type (Apple) + Fix buffer overrun in contrib/intarray's input function + for the query_int type (Apple) @@ -782,16 +782,16 @@ - Fix bug in contrib/seg's GiST picksplit algorithm + Fix bug in contrib/seg's GiST picksplit algorithm (Alexander Korotkov) This could result in considerable inefficiency, though not actually - incorrect answers, in a GiST index on a seg column. - If you have such an index, consider REINDEXing it after + incorrect answers, in a GiST index on a seg column. + If you have such an index, consider REINDEXing it after installing this update. (This is identical to the bug that was fixed in - contrib/cube in the previous update.) + contrib/cube in the previous update.) @@ -811,7 +811,7 @@ This release contains a variety of fixes from 8.2.18. For information about new features in the 8.2 major release, see - . + . @@ -820,7 +820,7 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.14, - see . + see . @@ -833,17 +833,17 @@ Force the default - wal_sync_method - to be fdatasync on Linux (Tom Lane, Marti Raudsepp) + wal_sync_method + to be fdatasync on Linux (Tom Lane, Marti Raudsepp) - The default on Linux has actually been fdatasync for many - years, but recent kernel changes caused PostgreSQL to - choose open_datasync instead. This choice did not result + The default on Linux has actually been fdatasync for many + years, but recent kernel changes caused PostgreSQL to + choose open_datasync instead. This choice did not result in any performance improvement, and caused outright failures on - certain filesystems, notably ext4 with the - data=journal mount option. + certain filesystems, notably ext4 with the + data=journal mount option. @@ -853,7 +853,7 @@ - This could result in bad buffer id: 0 failures or + This could result in bad buffer id: 0 failures or corruption of index contents during replication. @@ -867,19 +867,19 @@ - Add support for detecting register-stack overrun on IA64 + Add support for detecting register-stack overrun on IA64 (Tom Lane) - The IA64 architecture has two hardware stacks. Full + The IA64 architecture has two hardware stacks. Full prevention of stack-overrun failures requires checking both. - Add a check for stack overflow in copyObject() (Tom Lane) + Add a check for stack overflow in copyObject() (Tom Lane) @@ -895,7 +895,7 @@ - It is possible to have a concurrent page split in a + It is possible to have a concurrent page split in a temporary index, if for example there is an open cursor scanning the index when an insertion is done. GiST failed to detect this case and hence could deliver wrong results when execution of the cursor @@ -905,7 +905,7 @@ - Avoid memory leakage while ANALYZE'ing complex index + Avoid memory leakage while ANALYZE'ing complex index expressions (Tom Lane) @@ -917,14 +917,14 @@ - An index declared like create index i on t (foo(t.*)) + An index declared like create index i on t (foo(t.*)) would not automatically get dropped when its table was dropped. - Do not inline a SQL function with multiple OUT + Do not inline a SQL function with multiple OUT parameters (Tom Lane) @@ -936,15 +936,15 @@ - Behave correctly if ORDER BY, LIMIT, - FOR UPDATE, or WITH is attached to the - VALUES part of INSERT ... VALUES (Tom Lane) + Behave correctly if ORDER BY, LIMIT, + FOR UPDATE, or WITH is attached to the + VALUES part of INSERT ... VALUES (Tom Lane) - Fix constant-folding of COALESCE() expressions (Tom Lane) + Fix constant-folding of COALESCE() expressions (Tom Lane) @@ -955,11 +955,11 @@ - Add print functionality for InhRelation nodes (Tom Lane) + Add print functionality for InhRelation nodes (Tom Lane) - This avoids a failure when debug_print_parse is enabled + This avoids a failure when debug_print_parse is enabled and certain types of query are executed. @@ -978,14 +978,14 @@ - Fix PL/pgSQL's handling of simple + Fix PL/pgSQL's handling of simple expressions to not fail in recursion or error-recovery cases (Tom Lane) - Fix PL/Python's handling of set-returning functions + Fix PL/Python's handling of set-returning functions (Jan Urbanski) @@ -997,22 +997,22 @@ - Fix bug in contrib/cube's GiST picksplit algorithm + Fix bug in contrib/cube's GiST picksplit algorithm (Alexander Korotkov) This could result in considerable inefficiency, though not actually - incorrect answers, in a GiST index on a cube column. - If you have such an index, consider REINDEXing it after + incorrect answers, in a GiST index on a cube column. + If you have such an index, consider REINDEXing it after installing this update. - Don't emit identifier will be truncated notices in - contrib/dblink except when creating new connections + Don't emit identifier will be truncated notices in + contrib/dblink except when creating new connections (Itagaki Takahiro) @@ -1020,20 +1020,20 @@ Fix potential coredump on missing public key in - contrib/pgcrypto (Marti Raudsepp) + contrib/pgcrypto (Marti Raudsepp) - Fix memory leak in contrib/xml2's XPath query functions + Fix memory leak in contrib/xml2's XPath query functions (Tom Lane) - Update time zone data files to tzdata release 2010o + Update time zone data files to tzdata release 2010o for DST law changes in Fiji and Samoa; also historical corrections for Hong Kong. @@ -1055,7 +1055,7 @@ This release contains a variety of fixes from 8.2.17. For information about new features in the 8.2 major release, see - . + . @@ -1064,7 +1064,7 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.14, - see . + see . @@ -1084,7 +1084,7 @@ This change prevents security problems that can be caused by subverting Perl or Tcl code that will be executed later in the same session under another SQL user identity (for example, within a SECURITY - DEFINER function). Most scripting languages offer numerous ways that + DEFINER function). Most scripting languages offer numerous ways that that might be done, such as redefining standard functions or operators called by the target function. Without this change, any SQL user with Perl or Tcl language usage rights can do essentially anything with the @@ -1113,7 +1113,7 @@ - Prevent possible crashes in pg_get_expr() by disallowing + Prevent possible crashes in pg_get_expr() by disallowing it from being called with an argument that is not one of the system catalog columns it's intended to be used with (Heikki Linnakangas, Tom Lane) @@ -1135,7 +1135,7 @@ - Treat exit code 128 (ERROR_WAIT_NO_CHILDREN) as non-fatal on + Treat exit code 128 (ERROR_WAIT_NO_CHILDREN) as non-fatal on Windows (Magnus Hagander) @@ -1149,7 +1149,7 @@ - Fix possible duplicate scans of UNION ALL member relations + Fix possible duplicate scans of UNION ALL member relations (Tom Lane) @@ -1201,7 +1201,7 @@ Take care to fsync the contents of lockfiles (both - postmaster.pid and the socket lockfile) while writing them + postmaster.pid and the socket lockfile) while writing them (Tom Lane) @@ -1227,7 +1227,7 @@ - Fix log_line_prefix's %i escape, + Fix log_line_prefix's %i escape, which could produce junk early in backend startup (Tom Lane) @@ -1235,28 +1235,28 @@ Fix possible data corruption in ALTER TABLE ... SET - TABLESPACE when archiving is enabled (Jeff Davis) + TABLESPACE when archiving is enabled (Jeff Davis) - Allow CREATE DATABASE and ALTER DATABASE ... SET - TABLESPACE to be interrupted by query-cancel (Guillaume Lelarge) + Allow CREATE DATABASE and ALTER DATABASE ... SET + TABLESPACE to be interrupted by query-cancel (Guillaume Lelarge) In PL/Python, defend against null pointer results from - PyCObject_AsVoidPtr and PyCObject_FromVoidPtr + PyCObject_AsVoidPtr and PyCObject_FromVoidPtr (Peter Eisentraut) - Improve contrib/dblink's handling of tables containing + Improve contrib/dblink's handling of tables containing dropped columns (Tom Lane) @@ -1264,30 +1264,30 @@ Fix connection leak after duplicate connection name - errors in contrib/dblink (Itagaki Takahiro) + errors in contrib/dblink (Itagaki Takahiro) - Fix contrib/dblink to handle connection names longer than + Fix contrib/dblink to handle connection names longer than 62 bytes correctly (Itagaki Takahiro) - Add hstore(text, text) - function to contrib/hstore (Robert Haas) + Add hstore(text, text) + function to contrib/hstore (Robert Haas) This function is the recommended substitute for the now-deprecated - => operator. It was back-patched so that future-proofed + => operator. It was back-patched so that future-proofed code can be used with older server versions. Note that the patch will - be effective only after contrib/hstore is installed or + be effective only after contrib/hstore is installed or reinstalled in a particular database. Users might prefer to execute - the CREATE FUNCTION command by hand, instead. + the CREATE FUNCTION command by hand, instead. @@ -1300,7 +1300,7 @@ - Update time zone data files to tzdata release 2010l + Update time zone data files to tzdata release 2010l for DST law changes in Egypt and Palestine; also historical corrections for Finland. @@ -1315,7 +1315,7 @@ - Make Windows' N. Central Asia Standard Time timezone map to + Make Windows' N. Central Asia Standard Time timezone map to Asia/Novosibirsk, not Asia/Almaty (Magnus Hagander) @@ -1341,7 +1341,7 @@ This release contains a variety of fixes from 8.2.16. For information about new features in the 8.2 major release, see - . + . @@ -1350,7 +1350,7 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.14, - see . + see . @@ -1362,19 +1362,19 @@ - Enforce restrictions in plperl using an opmask applied to - the whole interpreter, instead of using Safe.pm + Enforce restrictions in plperl using an opmask applied to + the whole interpreter, instead of using Safe.pm (Tim Bunce, Andrew Dunstan) - Recent developments have convinced us that Safe.pm is too - insecure to rely on for making plperl trustable. This - change removes use of Safe.pm altogether, in favor of using + Recent developments have convinced us that Safe.pm is too + insecure to rely on for making plperl trustable. This + change removes use of Safe.pm altogether, in favor of using a separate interpreter with an opcode mask that is always applied. Pleasant side effects of the change include that it is now possible to - use Perl's strict pragma in a natural way in - plperl, and that Perl's $a and $b + use Perl's strict pragma in a natural way in + plperl, and that Perl's $a and $b variables work as expected in sort routines, and that function compilation is significantly faster. (CVE-2010-1169) @@ -1383,19 +1383,19 @@ Prevent PL/Tcl from executing untrustworthy code from - pltcl_modules (Tom) + pltcl_modules (Tom) PL/Tcl's feature for autoloading Tcl code from a database table could be exploited for trojan-horse attacks, because there was no restriction on who could create or insert into that table. This change - disables the feature unless pltcl_modules is owned by a + disables the feature unless pltcl_modules is owned by a superuser. (However, the permissions on the table are not checked, so installations that really need a less-than-secure modules table can still grant suitable privileges to trusted non-superusers.) Also, - prevent loading code into the unrestricted normal Tcl - interpreter unless we are really going to execute a pltclu + prevent loading code into the unrestricted normal Tcl + interpreter unless we are really going to execute a pltclu function. (CVE-2010-1170) @@ -1419,10 +1419,10 @@ Previously, if an unprivileged user ran ALTER USER ... RESET - ALL for himself, or ALTER DATABASE ... RESET ALL for + ALL for himself, or ALTER DATABASE ... RESET ALL for a database he owns, this would remove all special parameter settings for the user or database, even ones that are only supposed to be - changeable by a superuser. Now, the ALTER will only + changeable by a superuser. Now, the ALTER will only remove the parameters that the user has permission to change. @@ -1430,7 +1430,7 @@ Avoid possible crash during backend shutdown if shutdown occurs - when a CONTEXT addition would be made to log entries (Tom) + when a CONTEXT addition would be made to log entries (Tom) @@ -1442,7 +1442,7 @@ - Update PL/Perl's ppport.h for modern Perl versions + Update PL/Perl's ppport.h for modern Perl versions (Andrew) @@ -1455,15 +1455,15 @@ - Prevent infinite recursion in psql when expanding + Prevent infinite recursion in psql when expanding a variable that refers to itself (Tom) - Fix psql's \copy to not add spaces around - a dot within \copy (select ...) (Tom) + Fix psql's \copy to not add spaces around + a dot within \copy (select ...) (Tom) @@ -1474,7 +1474,7 @@ - Ensure that contrib/pgstattuple functions respond to cancel + Ensure that contrib/pgstattuple functions respond to cancel interrupts promptly (Tatsuhito Kasahara) @@ -1482,7 +1482,7 @@ Make server startup deal properly with the case that - shmget() returns EINVAL for an existing + shmget() returns EINVAL for an existing shared memory segment (Tom) @@ -1514,14 +1514,14 @@ - Update time zone data files to tzdata release 2010j + Update time zone data files to tzdata release 2010j for DST law changes in Argentina, Australian Antarctic, Bangladesh, Mexico, Morocco, Pakistan, Palestine, Russia, Syria, Tunisia; also historical corrections for Taiwan. - Also, add PKST (Pakistan Summer Time) to the default set of + Also, add PKST (Pakistan Summer Time) to the default set of timezone abbreviations. @@ -1542,7 +1542,7 @@ This release contains a variety of fixes from 8.2.15. For information about new features in the 8.2 major release, see - . + . @@ -1551,7 +1551,7 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.14, - see . + see . @@ -1563,7 +1563,7 @@ - Add new configuration parameter ssl_renegotiation_limit to + Add new configuration parameter ssl_renegotiation_limit to control how often we do session key renegotiation for an SSL connection (Magnus) @@ -1619,8 +1619,8 @@ - Make substring() for bit types treat any negative - length as meaning all the rest of the string (Tom) + Make substring() for bit types treat any negative + length as meaning all the rest of the string (Tom) @@ -1646,7 +1646,7 @@ - Fix the STOP WAL LOCATION entry in backup history files to + Fix the STOP WAL LOCATION entry in backup history files to report the next WAL segment's name when the end location is exactly at a segment boundary (Itagaki Takahiro) @@ -1668,23 +1668,23 @@ Improve constraint exclusion processing of boolean-variable cases, in particular make it possible to exclude a partition that has a - bool_column = false constraint (Tom) + bool_column = false constraint (Tom) - When reading pg_hba.conf and related files, do not treat - @something as a file inclusion request if the @ - appears inside quote marks; also, never treat @ by itself + When reading pg_hba.conf and related files, do not treat + @something as a file inclusion request if the @ + appears inside quote marks; also, never treat @ by itself as a file inclusion request (Tom) This prevents erratic behavior if a role or database name starts with - @. If you need to include a file whose path name + @. If you need to include a file whose path name contains spaces, you can still do so, but you must write - @"/path to/file" rather than putting the quotes around + @"/path to/file" rather than putting the quotes around the whole construct. @@ -1692,35 +1692,35 @@ Prevent infinite loop on some platforms if a directory is named as - an inclusion target in pg_hba.conf and related files + an inclusion target in pg_hba.conf and related files (Tom) - Fix possible infinite loop if SSL_read or - SSL_write fails without setting errno (Tom) + Fix possible infinite loop if SSL_read or + SSL_write fails without setting errno (Tom) This is reportedly possible with some Windows versions of - openssl. + OpenSSL. - Fix psql's numericlocale option to not + Fix psql's numericlocale option to not format strings it shouldn't in latex and troff output formats (Heikki) - Make psql return the correct exit status (3) when - ON_ERROR_STOP and --single-transaction are - both specified and an error occurs during the implied COMMIT + Make psql return the correct exit status (3) when + ON_ERROR_STOP and --single-transaction are + both specified and an error occurs during the implied COMMIT (Bruce) @@ -1741,7 +1741,7 @@ - Add volatile markings in PL/Python to avoid possible + Add volatile markings in PL/Python to avoid possible compiler-specific misbehavior (Zdenek Kotala) @@ -1753,28 +1753,28 @@ The only known symptom of this oversight is that the Tcl - clock command misbehaves if using Tcl 8.5 or later. + clock command misbehaves if using Tcl 8.5 or later. - Prevent crash in contrib/dblink when too many key - columns are specified to a dblink_build_sql_* function + Prevent crash in contrib/dblink when too many key + columns are specified to a dblink_build_sql_* function (Rushabh Lathia, Joe Conway) - Fix assorted crashes in contrib/xml2 caused by sloppy + Fix assorted crashes in contrib/xml2 caused by sloppy memory management (Tom) - Make building of contrib/xml2 more robust on Windows + Make building of contrib/xml2 more robust on Windows (Andrew) @@ -1785,14 +1785,14 @@ - One known symptom of this bug is that rows in pg_listener + One known symptom of this bug is that rows in pg_listener could be dropped under heavy load. - Update time zone data files to tzdata release 2010e + Update time zone data files to tzdata release 2010e for DST law changes in Bangladesh, Chile, Fiji, Mexico, Paraguay, Samoa. @@ -1813,7 +1813,7 @@ This release contains a variety of fixes from 8.2.14. For information about new features in the 8.2 major release, see - . + . @@ -1822,7 +1822,7 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.14, - see . + see . @@ -1864,14 +1864,14 @@ - Prevent signals from interrupting VACUUM at unsafe times + Prevent signals from interrupting VACUUM at unsafe times (Alvaro) - This fix prevents a PANIC if a VACUUM FULL is canceled + This fix prevents a PANIC if a VACUUM FULL is canceled after it's already committed its tuple movements, as well as transient - errors if a plain VACUUM is interrupted after having + errors if a plain VACUUM is interrupted after having truncated the table. @@ -1890,7 +1890,7 @@ - Fix very rare crash in inet/cidr comparisons (Chris + Fix very rare crash in inet/cidr comparisons (Chris Mikkelson) @@ -1948,7 +1948,7 @@ The previous code is known to fail with the combination of the Linux - pam_krb5 PAM module with Microsoft Active Directory as the + pam_krb5 PAM module with Microsoft Active Directory as the domain controller. It might have problems elsewhere too, since it was making unjustified assumptions about what arguments the PAM stack would pass to it. @@ -1958,13 +1958,13 @@ Fix processing of ownership dependencies during CREATE OR - REPLACE FUNCTION (Tom) + REPLACE FUNCTION (Tom) - Fix bug with calling plperl from plperlu or vice + Fix bug with calling plperl from plperlu or vice versa (Tom) @@ -1984,7 +1984,7 @@ Ensure that Perl arrays are properly converted to - PostgreSQL arrays when returned by a set-returning + PostgreSQL arrays when returned by a set-returning PL/Perl function (Andrew Dunstan, Abhijit Menon-Sen) @@ -2001,20 +2001,20 @@ - Ensure psql's flex module is compiled with the correct + Ensure psql's flex module is compiled with the correct system header definitions (Tom) This fixes build failures on platforms where - --enable-largefile causes incompatible changes in the + --enable-largefile causes incompatible changes in the generated code. - Make the postmaster ignore any application_name parameter in + Make the postmaster ignore any application_name parameter in connection request packets, to improve compatibility with future libpq versions (Tom) @@ -2027,14 +2027,14 @@ - This includes adding IDT and SGT to the default + This includes adding IDT and SGT to the default timezone abbreviation set. - Update time zone data files to tzdata release 2009s + Update time zone data files to tzdata release 2009s for DST law changes in Antarctica, Argentina, Bangladesh, Fiji, Novokuznetsk, Pakistan, Palestine, Samoa, Syria; also historical corrections for Hong Kong. @@ -2057,7 +2057,7 @@ This release contains a variety of fixes from 8.2.13. For information about new features in the 8.2 major release, see - . + . @@ -2065,10 +2065,10 @@ A dump/restore is not required for those running 8.2.X. - However, if you have any hash indexes on interval columns, - you must REINDEX them after updating to 8.2.14. + However, if you have any hash indexes on interval columns, + you must REINDEX them after updating to 8.2.14. Also, if you are upgrading from a version earlier than 8.2.11, - see . + see . @@ -2080,7 +2080,7 @@ - Force WAL segment switch during pg_start_backup() + Force WAL segment switch during pg_start_backup() (Heikki) @@ -2091,26 +2091,26 @@ - Disallow RESET ROLE and RESET SESSION - AUTHORIZATION inside security-definer functions (Tom, Heikki) + Disallow RESET ROLE and RESET SESSION + AUTHORIZATION inside security-definer functions (Tom, Heikki) This covers a case that was missed in the previous patch that - disallowed SET ROLE and SET SESSION - AUTHORIZATION inside security-definer functions. + disallowed SET ROLE and SET SESSION + AUTHORIZATION inside security-definer functions. (See CVE-2007-6600) - Make LOAD of an already-loaded loadable module + Make LOAD of an already-loaded loadable module into a no-op (Tom) - Formerly, LOAD would attempt to unload and re-load the + Formerly, LOAD would attempt to unload and re-load the module, but this is unsafe and not all that useful. @@ -2145,32 +2145,32 @@ - Fix hash calculation for data type interval (Tom) + Fix hash calculation for data type interval (Tom) This corrects wrong results for hash joins on interval values. It also changes the contents of hash indexes on interval columns. - If you have any such indexes, you must REINDEX them + If you have any such indexes, you must REINDEX them after updating. - Treat to_char(..., 'TH') as an uppercase ordinal - suffix with 'HH'/'HH12' (Heikki) + Treat to_char(..., 'TH') as an uppercase ordinal + suffix with 'HH'/'HH12' (Heikki) - It was previously handled as 'th' (lowercase). + It was previously handled as 'th' (lowercase). - Fix overflow for INTERVAL 'x ms' - when x is more than 2 million and integer + Fix overflow for INTERVAL 'x ms' + when x is more than 2 million and integer datetimes are in use (Alex Hunsaker) @@ -2187,7 +2187,7 @@ - Fix money data type to work in locales where currency + Fix money data type to work in locales where currency amounts have no fractional digits, e.g. Japan (Itagaki Takahiro) @@ -2195,7 +2195,7 @@ Properly round datetime input like - 00:12:57.9999999999999999999999999999 (Tom) + 00:12:57.9999999999999999999999999999 (Tom) @@ -2228,14 +2228,14 @@ - Fix pg_ctl to not go into an infinite loop if - postgresql.conf is empty (Jeff Davis) + Fix pg_ctl to not go into an infinite loop if + postgresql.conf is empty (Jeff Davis) - Make contrib/hstore throw an error when a key or + Make contrib/hstore throw an error when a key or value is too long to fit in its data structure, rather than silently truncating it (Andrew Gierth) @@ -2243,15 +2243,15 @@ - Fix contrib/xml2's xslt_process() to + Fix contrib/xml2's xslt_process() to properly handle the maximum number of parameters (twenty) (Tom) - Improve robustness of libpq's code to recover - from errors during COPY FROM STDIN (Tom) + Improve robustness of libpq's code to recover + from errors during COPY FROM STDIN (Tom) @@ -2264,7 +2264,7 @@ - Update time zone data files to tzdata release 2009l + Update time zone data files to tzdata release 2009l for DST law changes in Bangladesh, Egypt, Jordan, Pakistan, Argentina/San_Luis, Cuba, Jordan (historical correction only), Mauritius, Morocco, Palestine, Syria, Tunisia. @@ -2287,7 +2287,7 @@ This release contains a variety of fixes from 8.2.12. For information about new features in the 8.2 major release, see - . + . @@ -2296,7 +2296,7 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.11, - see . + see . @@ -2315,7 +2315,7 @@ This change extends fixes made in the last two minor releases for related failure scenarios. The previous fixes were narrowly tailored for the original problem reports, but we have now recognized that - any error thrown by an encoding conversion function could + any error thrown by an encoding conversion function could potentially lead to infinite recursion while trying to report the error. The solution therefore is to disable translation and encoding conversion and report the plain-ASCII form of any error message, @@ -2326,7 +2326,7 @@ - Disallow CREATE CONVERSION with the wrong encodings + Disallow CREATE CONVERSION with the wrong encodings for the specified conversion function (Heikki) @@ -2339,40 +2339,40 @@ - Fix core dump when to_char() is given format codes that + Fix core dump when to_char() is given format codes that are inappropriate for the type of the data argument (Tom) - Fix possible failure in contrib/tsearch2 when C locale is + Fix possible failure in contrib/tsearch2 when C locale is used with a multi-byte encoding (Teodor) - Crashes were possible on platforms where wchar_t is narrower - than int; Windows in particular. + Crashes were possible on platforms where wchar_t is narrower + than int; Windows in particular. - Fix extreme inefficiency in contrib/tsearch2 parser's - handling of an email-like string containing multiple @ + Fix extreme inefficiency in contrib/tsearch2 parser's + handling of an email-like string containing multiple @ characters (Heikki) - Fix decompilation of CASE WHEN with an implicit coercion + Fix decompilation of CASE WHEN with an implicit coercion (Tom) This mistake could lead to Assert failures in an Assert-enabled build, - or an unexpected CASE WHEN clause error message in other + or an unexpected CASE WHEN clause error message in other cases, when trying to examine or dump a view. @@ -2383,24 +2383,24 @@ - If CLUSTER or a rewriting variant of ALTER TABLE + If CLUSTER or a rewriting variant of ALTER TABLE were executed by someone other than the table owner, the - pg_type entry for the table's TOAST table would end up + pg_type entry for the table's TOAST table would end up marked as owned by that someone. This caused no immediate problems, since the permissions on the TOAST rowtype aren't examined by any ordinary database operation. However, it could lead to unexpected failures if one later tried to drop the role that issued the command - (in 8.1 or 8.2), or owner of data type appears to be invalid - warnings from pg_dump after having done so (in 8.3). + (in 8.1 or 8.2), or owner of data type appears to be invalid + warnings from pg_dump after having done so (in 8.3). - Fix PL/pgSQL to not treat INTO after INSERT as + Fix PL/pgSQL to not treat INTO after INSERT as an INTO-variables clause anywhere in the string, not only at the start; - in particular, don't fail for INSERT INTO within - CREATE RULE (Tom) + in particular, don't fail for INSERT INTO within + CREATE RULE (Tom) @@ -2418,21 +2418,21 @@ - Retry failed calls to CallNamedPipe() on Windows + Retry failed calls to CallNamedPipe() on Windows (Steve Marshall, Magnus) It appears that this function can sometimes fail transiently; we previously treated any failure as a hard error, which could - confuse LISTEN/NOTIFY as well as other + confuse LISTEN/NOTIFY as well as other operations. - Add MUST (Mauritius Island Summer Time) to the default list + Add MUST (Mauritius Island Summer Time) to the default list of known timezone abbreviations (Xavier Bugaud) @@ -2453,7 +2453,7 @@ This release contains a variety of fixes from 8.2.11. For information about new features in the 8.2 major release, see - . + . @@ -2462,7 +2462,7 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.11, - see . + see . @@ -2474,13 +2474,13 @@ - Improve handling of URLs in headline() function (Teodor) + Improve handling of URLs in headline() function (Teodor) - Improve handling of overlength headlines in headline() + Improve handling of overlength headlines in headline() function (Teodor) @@ -2497,7 +2497,7 @@ Fix possible Assert failure if a statement executed in PL/pgSQL is rewritten into another kind of statement, for example if an - INSERT is rewritten into an UPDATE (Heikki) + INSERT is rewritten into an UPDATE (Heikki) @@ -2507,7 +2507,7 @@ - This primarily affects domains that are declared with CHECK + This primarily affects domains that are declared with CHECK constraints involving user-defined stable or immutable functions. Such functions typically fail if no snapshot has been set. @@ -2522,14 +2522,14 @@ - Avoid unnecessary locking of small tables in VACUUM + Avoid unnecessary locking of small tables in VACUUM (Heikki) - Fix a problem that made UPDATE RETURNING tableoid + Fix a problem that made UPDATE RETURNING tableoid return zero instead of the correct OID (Tom) @@ -2542,13 +2542,13 @@ This could result in bad plans for queries like - ... from a left join b on a.a1 = b.b1 where a.a1 = 42 ... + ... from a left join b on a.a1 = b.b1 where a.a1 = 42 ... - Improve optimizer's handling of long IN lists (Tom) + Improve optimizer's handling of long IN lists (Tom) @@ -2581,37 +2581,37 @@ - Fix contrib/dblink's - dblink_get_result(text,bool) function (Joe) + Fix contrib/dblink's + dblink_get_result(text,bool) function (Joe) - Fix possible garbage output from contrib/sslinfo functions + Fix possible garbage output from contrib/sslinfo functions (Tom) - Fix configure script to properly report failure when + Fix configure script to properly report failure when unable to obtain linkage information for PL/Perl (Andrew) - Make all documentation reference pgsql-bugs and/or - pgsql-hackers as appropriate, instead of the - now-decommissioned pgsql-ports and pgsql-patches + Make all documentation reference pgsql-bugs and/or + pgsql-hackers as appropriate, instead of the + now-decommissioned pgsql-ports and pgsql-patches mailing lists (Tom) - Update time zone data files to tzdata release 2009a (for + Update time zone data files to tzdata release 2009a (for Kathmandu and historical DST corrections in Switzerland, Cuba) @@ -2632,7 +2632,7 @@ This release contains a variety of fixes from 8.2.10. For information about new features in the 8.2 major release, see - . + . @@ -2641,8 +2641,8 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.7, - see . Also, if you were running a previous - 8.2.X release, it is recommended to REINDEX all GiST + see . Also, if you were running a previous + 8.2.X release, it is recommended to REINDEX all GiST indexes after the upgrade. @@ -2656,13 +2656,13 @@ Fix GiST index corruption due to marking the wrong index entry - dead after a deletion (Teodor) + dead after a deletion (Teodor) This would result in index searches failing to find rows they should have found. Corrupted indexes can be fixed with - REINDEX. + REINDEX. @@ -2674,7 +2674,7 @@ We have addressed similar issues before, but it would still fail if - the character has no equivalent message itself couldn't + the character has no equivalent message itself couldn't be converted. The fix is to disable localization and send the plain ASCII error message when we detect such a situation. @@ -2689,8 +2689,8 @@ - Improve optimization of expression IN - (expression-list) queries (Tom, per an idea from Robert + Improve optimization of expression IN + (expression-list) queries (Tom, per an idea from Robert Haas) @@ -2703,13 +2703,13 @@ - Fix mis-expansion of rule queries when a sub-SELECT appears - in a function call in FROM, a multi-row VALUES - list, or a RETURNING list (Tom) + Fix mis-expansion of rule queries when a sub-SELECT appears + in a function call in FROM, a multi-row VALUES + list, or a RETURNING list (Tom) - The usual symptom of this problem is an unrecognized node type + The usual symptom of this problem is an unrecognized node type error. @@ -2729,9 +2729,9 @@ - Prevent possible collision of relfilenode numbers + Prevent possible collision of relfilenode numbers when moving a table to another tablespace with ALTER SET - TABLESPACE (Heikki) + TABLESPACE (Heikki) @@ -2750,14 +2750,14 @@ Fix improper display of fractional seconds in interval values when - using a non-ISO datestyle in an build (Ron Mayer) - Ensure SPI_getvalue and SPI_getbinval + Ensure SPI_getvalue and SPI_getbinval behave correctly when the passed tuple and tuple descriptor have different numbers of columns (Tom) @@ -2771,31 +2771,31 @@ - Fix ecpg's parsing of CREATE ROLE (Michael) + Fix ecpg's parsing of CREATE ROLE (Michael) - Fix recent breakage of pg_ctl restart (Tom) + Fix recent breakage of pg_ctl restart (Tom) - Ensure pg_control is opened in binary mode + Ensure pg_control is opened in binary mode (Itagaki Takahiro) - pg_controldata and pg_resetxlog + pg_controldata and pg_resetxlog did this incorrectly, and so could fail on Windows. - Update time zone data files to tzdata release 2008i (for + Update time zone data files to tzdata release 2008i (for DST law changes in Argentina, Brazil, Mauritius, Syria) @@ -2816,7 +2816,7 @@ This release contains a variety of fixes from 8.2.9. For information about new features in the 8.2 major release, see - . + . @@ -2825,7 +2825,7 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.7, - see . + see . @@ -2847,12 +2847,12 @@ - Fix potential miscalculation of datfrozenxid (Alvaro) + Fix potential miscalculation of datfrozenxid (Alvaro) This error may explain some recent reports of failure to remove old - pg_clog data. + pg_clog data. @@ -2864,7 +2864,7 @@ This responds to reports that the counters could overflow in sufficiently long transactions, leading to unexpected lock is - already held errors. + already held errors. @@ -2877,7 +2877,7 @@ Fix missed permissions checks when a view contains a simple - UNION ALL construct (Heikki) + UNION ALL construct (Heikki) @@ -2889,12 +2889,12 @@ Add checks in executor startup to ensure that the tuples produced by an - INSERT or UPDATE will match the target table's + INSERT or UPDATE will match the target table's current rowtype (Tom) - ALTER COLUMN TYPE, followed by re-use of a previously + ALTER COLUMN TYPE, followed by re-use of a previously cached plan, could produce this type of situation. The check protects against data corruption and/or crashes that could ensue. @@ -2902,29 +2902,29 @@ - Fix possible repeated drops during DROP OWNED (Tom) + Fix possible repeated drops during DROP OWNED (Tom) This would typically result in strange errors such as cache - lookup failed for relation NNN. + lookup failed for relation NNN. - Fix AT TIME ZONE to first try to interpret its timezone + Fix AT TIME ZONE to first try to interpret its timezone argument as a timezone abbreviation, and only try it as a full timezone name if that fails, rather than the other way around as formerly (Tom) The timestamp input functions have always resolved ambiguous zone names - in this order. Making AT TIME ZONE do so as well improves + in this order. Making AT TIME ZONE do so as well improves consistency, and fixes a compatibility bug introduced in 8.1: in ambiguous cases we now behave the same as 8.0 and before did, - since in the older versions AT TIME ZONE accepted - only abbreviations. + since in the older versions AT TIME ZONE accepted + only abbreviations. @@ -2951,14 +2951,14 @@ Allow spaces in the suffix part of an LDAP URL in - pg_hba.conf (Tom) + pg_hba.conf (Tom) Fix bug in backwards scanning of a cursor on a SELECT DISTINCT - ON query (Tom) + ON query (Tom) @@ -2976,21 +2976,21 @@ - Fix planner to estimate that GROUP BY expressions yielding + Fix planner to estimate that GROUP BY expressions yielding boolean results always result in two groups, regardless of the expressions' contents (Tom) This is very substantially more accurate than the regular GROUP - BY estimate for certain boolean tests like col - IS NULL. + BY estimate for certain boolean tests like col + IS NULL. - Fix PL/pgSQL to not fail when a FOR loop's target variable + Fix PL/pgSQL to not fail when a FOR loop's target variable is a record containing composite-type fields (Tom) @@ -3005,28 +3005,28 @@ On Windows, work around a Microsoft bug by preventing - libpq from trying to send more than 64kB per system call + libpq from trying to send more than 64kB per system call (Magnus) - Improve pg_dump and pg_restore's + Improve pg_dump and pg_restore's error reporting after failure to send a SQL command (Tom) - Fix pg_ctl to properly preserve postmaster - command-line arguments across a restart (Bruce) + Fix pg_ctl to properly preserve postmaster + command-line arguments across a restart (Bruce) - Update time zone data files to tzdata release 2008f (for + Update time zone data files to tzdata release 2008f (for DST law changes in Argentina, Bahamas, Brazil, Mauritius, Morocco, Pakistan, Palestine, and Paraguay) @@ -3048,7 +3048,7 @@ This release contains one serious and one minor bug fix over 8.2.8. For information about new features in the 8.2 major release, see - . + . @@ -3057,7 +3057,7 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.7, - see . + see . @@ -3069,18 +3069,18 @@ - Make pg_get_ruledef() parenthesize negative constants (Tom) + Make pg_get_ruledef() parenthesize negative constants (Tom) Before this fix, a negative constant in a view or rule might be dumped - as, say, -42::integer, which is subtly incorrect: it should - be (-42)::integer due to operator precedence rules. + as, say, -42::integer, which is subtly incorrect: it should + be (-42)::integer due to operator precedence rules. Usually this would make little difference, but it could interact with another recent patch to cause - PostgreSQL to reject what had been a valid - SELECT DISTINCT view query. Since this could result in - pg_dump output failing to reload, it is being treated + PostgreSQL to reject what had been a valid + SELECT DISTINCT view query. Since this could result in + pg_dump output failing to reload, it is being treated as a high-priority fix. The only released versions in which dump output is actually incorrect are 8.3.1 and 8.2.7. @@ -3088,13 +3088,13 @@ - Make ALTER AGGREGATE ... OWNER TO update - pg_shdepend (Tom) + Make ALTER AGGREGATE ... OWNER TO update + pg_shdepend (Tom) This oversight could lead to problems if the aggregate was later - involved in a DROP OWNED or REASSIGN OWNED + involved in a DROP OWNED or REASSIGN OWNED operation. @@ -3115,7 +3115,7 @@ This release contains a variety of fixes from 8.2.7. For information about new features in the 8.2 major release, see - . + . @@ -3124,7 +3124,7 @@ A dump/restore is not required for those running 8.2.X. However, if you are upgrading from a version earlier than 8.2.7, - see . + see . @@ -3144,7 +3144,7 @@ - Fix ALTER TABLE ADD COLUMN ... PRIMARY KEY so that the new + Fix ALTER TABLE ADD COLUMN ... PRIMARY KEY so that the new column is correctly checked to see if it's been initialized to all non-nulls (Brendan Jurd) @@ -3156,16 +3156,16 @@ - Fix possible CREATE TABLE failure when inheriting the - same constraint from multiple parent relations that + Fix possible CREATE TABLE failure when inheriting the + same constraint from multiple parent relations that inherited that constraint from a common ancestor (Tom) - Fix pg_get_ruledef() to show the alias, if any, attached - to the target table of an UPDATE or DELETE + Fix pg_get_ruledef() to show the alias, if any, attached + to the target table of an UPDATE or DELETE (Tom) @@ -3200,14 +3200,14 @@ Fix conversions between ISO-8859-5 and other encodings to handle - Cyrillic Yo characters (e and E with + Cyrillic Yo characters (e and E with two dots) (Sergey Burladyan) - Fix several datatype input functions, notably array_in(), + Fix several datatype input functions, notably array_in(), that were allowing unused bytes in their results to contain uninitialized, unpredictable values (Tom) @@ -3215,7 +3215,7 @@ This could lead to failures in which two apparently identical literal values were not seen as equal, resulting in the parser complaining - about unmatched ORDER BY and DISTINCT + about unmatched ORDER BY and DISTINCT expressions. @@ -3223,24 +3223,24 @@ Fix a corner case in regular-expression substring matching - (substring(string from - pattern)) (Tom) + (substring(string from + pattern)) (Tom) The problem occurs when there is a match to the pattern overall but the user has specified a parenthesized subexpression and that subexpression hasn't got a match. An example is - substring('foo' from 'foo(bar)?'). - This should return NULL, since (bar) isn't matched, but + substring('foo' from 'foo(bar)?'). + This should return NULL, since (bar) isn't matched, but it was mistakenly returning the whole-pattern match instead (ie, - foo). + foo). - Update time zone data files to tzdata release 2008c (for + Update time zone data files to tzdata release 2008c (for DST law changes in Morocco, Iraq, Choibalsan, Pakistan, Syria, Cuba, and Argentina/San_Luis) @@ -3248,47 +3248,47 @@ - Fix incorrect result from ecpg's - PGTYPEStimestamp_sub() function (Michael) + Fix incorrect result from ecpg's + PGTYPEStimestamp_sub() function (Michael) - Fix broken GiST comparison function for contrib/tsearch2's - tsquery type (Teodor) + Fix broken GiST comparison function for contrib/tsearch2's + tsquery type (Teodor) - Fix possible crashes in contrib/cube functions (Tom) + Fix possible crashes in contrib/cube functions (Tom) - Fix core dump in contrib/xml2's - xpath_table() function when the input query returns a + Fix core dump in contrib/xml2's + xpath_table() function when the input query returns a NULL value (Tom) - Fix contrib/xml2's makefile to not override - CFLAGS (Tom) + Fix contrib/xml2's makefile to not override + CFLAGS (Tom) - Fix DatumGetBool macro to not fail with gcc + Fix DatumGetBool macro to not fail with gcc 4.3 (Tom) - This problem affects old style (V0) C functions that + This problem affects old style (V0) C functions that return boolean. The fix is already in 8.3, but the need to back-patch it was not realized at the time. @@ -3310,7 +3310,7 @@ This release contains a variety of fixes from 8.2.6. For information about new features in the 8.2 major release, see - . + . @@ -3318,7 +3318,7 @@ A dump/restore is not required for those running 8.2.X. - However, you might need to REINDEX indexes on textual + However, you might need to REINDEX indexes on textual columns after updating, if you are affected by the Windows locale issue described below. @@ -3342,34 +3342,34 @@ over two years ago, but Windows with UTF-8 uses a separate code path that was not updated. If you are using a locale that considers some non-identical strings as equal, you may need to - REINDEX to fix existing indexes on textual columns. + REINDEX to fix existing indexes on textual columns. - Repair potential deadlock between concurrent VACUUM FULL + Repair potential deadlock between concurrent VACUUM FULL operations on different system catalogs (Tom) - Fix longstanding LISTEN/NOTIFY + Fix longstanding LISTEN/NOTIFY race condition (Tom) In rare cases a session that had just executed a - LISTEN might not get a notification, even though + LISTEN might not get a notification, even though one would be expected because the concurrent transaction executing - NOTIFY was observed to commit later. + NOTIFY was observed to commit later. A side effect of the fix is that a transaction that has executed - a not-yet-committed LISTEN command will not see any - row in pg_listener for the LISTEN, + a not-yet-committed LISTEN command will not see any + row in pg_listener for the LISTEN, should it choose to look; formerly it would have. This behavior was never documented one way or the other, but it is possible that some applications depend on the old behavior. @@ -3378,14 +3378,14 @@ - Disallow LISTEN and UNLISTEN within a + Disallow LISTEN and UNLISTEN within a prepared transaction (Tom) This was formerly allowed but trying to do it had various unpleasant consequences, notably that the originating backend could not exit - as long as an UNLISTEN remained uncommitted. + as long as an UNLISTEN remained uncommitted. @@ -3426,14 +3426,14 @@ - Fix unrecognized node type error in some variants of - ALTER OWNER (Tom) + Fix unrecognized node type error in some variants of + ALTER OWNER (Tom) - Ensure pg_stat_activity.waiting flag + Ensure pg_stat_activity.waiting flag is cleared when a lock wait is aborted (Tom) @@ -3451,20 +3451,20 @@ - Update time zone data files to tzdata release 2008a + Update time zone data files to tzdata release 2008a (in particular, recent Chile changes); adjust timezone abbreviation - VET (Venezuela) to mean UTC-4:30, not UTC-4:00 (Tom) + VET (Venezuela) to mean UTC-4:30, not UTC-4:00 (Tom) - Fix pg_ctl to correctly extract the postmaster's port + Fix pg_ctl to correctly extract the postmaster's port number from command-line options (Itagaki Takahiro, Tom) - Previously, pg_ctl start -w could try to contact the + Previously, pg_ctl start -w could try to contact the postmaster on the wrong port, leading to bogus reports of startup failure. @@ -3472,31 +3472,31 @@ - Use - This is known to be necessary when building PostgreSQL - with gcc 4.3 or later. + This is known to be necessary when building PostgreSQL + with gcc 4.3 or later. - Correctly enforce statement_timeout values longer - than INT_MAX microseconds (about 35 minutes) (Tom) + Correctly enforce statement_timeout values longer + than INT_MAX microseconds (about 35 minutes) (Tom) - This bug affects only builds with . - Fix unexpected PARAM_SUBLINK ID planner error when + Fix unexpected PARAM_SUBLINK ID planner error when constant-folding simplifies a sub-select (Tom) @@ -3504,7 +3504,7 @@ Fix logical errors in constraint-exclusion handling of IS - NULL and NOT expressions (Tom) + NULL and NOT expressions (Tom) @@ -3515,7 +3515,7 @@ - Fix another cause of failed to build any N-way joins + Fix another cause of failed to build any N-way joins planner errors (Tom) @@ -3539,8 +3539,8 @@ - Fix display of constant expressions in ORDER BY - and GROUP BY (Tom) + Fix display of constant expressions in ORDER BY + and GROUP BY (Tom) @@ -3552,7 +3552,7 @@ - Fix libpq to handle NOTICE messages correctly + Fix libpq to handle NOTICE messages correctly during COPY OUT (Tom) @@ -3580,7 +3580,7 @@ This release contains a variety of fixes from 8.2.5, including fixes for significant security issues. For information about new features in the 8.2 major release, see - . + . @@ -3600,7 +3600,7 @@ Prevent functions in indexes from executing with the privileges of - the user running VACUUM, ANALYZE, etc (Tom) + the user running VACUUM, ANALYZE, etc (Tom) @@ -3611,18 +3611,18 @@ (Note that triggers, defaults, check constraints, etc. pose the same type of risk.) But functions in indexes pose extra danger because they will be executed by routine maintenance operations - such as VACUUM FULL, which are commonly performed + such as VACUUM FULL, which are commonly performed automatically under a superuser account. For example, a nefarious user can execute code with superuser privileges by setting up a trojan-horse index definition and waiting for the next routine vacuum. The fix arranges for standard maintenance operations - (including VACUUM, ANALYZE, REINDEX, - and CLUSTER) to execute as the table owner rather than + (including VACUUM, ANALYZE, REINDEX, + and CLUSTER) to execute as the table owner rather than the calling user, using the same privilege-switching mechanism already - used for SECURITY DEFINER functions. To prevent bypassing + used for SECURITY DEFINER functions. To prevent bypassing this security measure, execution of SET SESSION - AUTHORIZATION and SET ROLE is now forbidden within a - SECURITY DEFINER context. (CVE-2007-6600) + AUTHORIZATION and SET ROLE is now forbidden within a + SECURITY DEFINER context. (CVE-2007-6600) @@ -3642,13 +3642,13 @@ - Require non-superusers who use /contrib/dblink to use only + Require non-superusers who use /contrib/dblink to use only password authentication, as a security measure (Joe) The fix that appeared for this in 8.2.5 was incomplete, as it plugged - the hole for only some dblink functions. (CVE-2007-6601, + the hole for only some dblink functions. (CVE-2007-6601, CVE-2007-3278) @@ -3662,13 +3662,13 @@ Fix GIN index build to work properly when - maintenance_work_mem is 4GB or more (Tom) + maintenance_work_mem is 4GB or more (Tom) - Update time zone data files to tzdata release 2007k + Update time zone data files to tzdata release 2007k (in particular, recent Argentina changes) (Tom) @@ -3690,22 +3690,22 @@ Fix planner failure in some cases of WHERE false AND var IN - (SELECT ...) (Tom) + (SELECT ...) (Tom) - Make CREATE TABLE ... SERIAL and - ALTER SEQUENCE ... OWNED BY not change the - currval() state of the sequence (Tom) + Make CREATE TABLE ... SERIAL and + ALTER SEQUENCE ... OWNED BY not change the + currval() state of the sequence (Tom) Preserve the tablespace and storage parameters of indexes that are - rebuilt by ALTER TABLE ... ALTER COLUMN TYPE (Tom) + rebuilt by ALTER TABLE ... ALTER COLUMN TYPE (Tom) @@ -3724,28 +3724,28 @@ - Make VACUUM not use all of maintenance_work_mem + Make VACUUM not use all of maintenance_work_mem when the table is too small for it to be useful (Alvaro) - Fix potential crash in translate() when using a multibyte + Fix potential crash in translate() when using a multibyte database encoding (Tom) - Make corr() return the correct result for negative + Make corr() return the correct result for negative correlation values (Neil) - Fix overflow in extract(epoch from interval) for intervals + Fix overflow in extract(epoch from interval) for intervals exceeding 68 years (Tom) @@ -3759,13 +3759,13 @@ - Fix PL/Perl to cope when platform's Perl defines type bool - as int rather than char (Tom) + Fix PL/Perl to cope when platform's Perl defines type bool + as int rather than char (Tom) While this could theoretically happen anywhere, no standard build of - Perl did things this way ... until macOS 10.5. + Perl did things this way ... until macOS 10.5. @@ -3784,73 +3784,73 @@ - Fix pg_dump to correctly handle inheritance child tables + Fix pg_dump to correctly handle inheritance child tables that have default expressions different from their parent's (Tom) - Fix libpq crash when PGPASSFILE refers + Fix libpq crash when PGPASSFILE refers to a file that is not a plain file (Martin Pitt) - ecpg parser fixes (Michael) + ecpg parser fixes (Michael) - Make contrib/pgcrypto defend against - OpenSSL libraries that fail on keys longer than 128 + Make contrib/pgcrypto defend against + OpenSSL libraries that fail on keys longer than 128 bits; which is the case at least on some Solaris versions (Marko Kreen) - Make contrib/tablefunc's crosstab() handle + Make contrib/tablefunc's crosstab() handle NULL rowid as a category in its own right, rather than crashing (Joe) - Fix tsvector and tsquery output routines to + Fix tsvector and tsquery output routines to escape backslashes correctly (Teodor, Bruce) - Fix crash of to_tsvector() on huge input strings (Teodor) + Fix crash of to_tsvector() on huge input strings (Teodor) - Require a specific version of Autoconf to be used - when re-generating the configure script (Peter) + Require a specific version of Autoconf to be used + when re-generating the configure script (Peter) This affects developers and packagers only. The change was made to prevent accidental use of untested combinations of - Autoconf and PostgreSQL versions. + Autoconf and PostgreSQL versions. You can remove the version check if you really want to use a - different Autoconf version, but it's + different Autoconf version, but it's your responsibility whether the result works or not. - Update gettimeofday configuration check so that - PostgreSQL can be built on newer versions of - MinGW (Magnus) + Update gettimeofday configuration check so that + PostgreSQL can be built on newer versions of + MinGW (Magnus) @@ -3870,7 +3870,7 @@ This release contains a variety of fixes from 8.2.4. For information about new features in the 8.2 major release, see - . + . @@ -3890,48 +3890,48 @@ Prevent index corruption when a transaction inserts rows and - then aborts close to the end of a concurrent VACUUM + then aborts close to the end of a concurrent VACUUM on the same table (Tom) - Fix ALTER DOMAIN ADD CONSTRAINT for cases involving + Fix ALTER DOMAIN ADD CONSTRAINT for cases involving domains over domains (Tom) - Make CREATE DOMAIN ... DEFAULT NULL work properly (Tom) + Make CREATE DOMAIN ... DEFAULT NULL work properly (Tom) Fix some planner problems with outer joins, notably poor - size estimation for t1 LEFT JOIN t2 WHERE t2.col IS NULL + size estimation for t1 LEFT JOIN t2 WHERE t2.col IS NULL (Tom) - Allow the interval data type to accept input consisting only of + Allow the interval data type to accept input consisting only of milliseconds or microseconds (Neil) - Allow timezone name to appear before the year in timestamp input (Tom) + Allow timezone name to appear before the year in timestamp input (Tom) - Fixes for GIN indexes used by /contrib/tsearch2 (Teodor) + Fixes for GIN indexes used by /contrib/tsearch2 (Teodor) @@ -3943,7 +3943,7 @@ - Fix excessive logging of SSL error messages (Tom) + Fix excessive logging of SSL error messages (Tom) @@ -3956,7 +3956,7 @@ - Fix crash when log_min_error_statement logging runs out + Fix crash when log_min_error_statement logging runs out of memory (Tom) @@ -3969,13 +3969,13 @@ - Fix stddev_pop(numeric) and var_pop(numeric) (Tom) + Fix stddev_pop(numeric) and var_pop(numeric) (Tom) - Prevent REINDEX and CLUSTER from failing + Prevent REINDEX and CLUSTER from failing due to attempting to process temporary tables of other sessions (Alvaro) @@ -3994,39 +3994,39 @@ - Make pg_ctl -w work properly in Windows service mode (Dave Page) + Make pg_ctl -w work properly in Windows service mode (Dave Page) - Fix memory allocation bug when using MIT Kerberos on Windows (Magnus) + Fix memory allocation bug when using MIT Kerberos on Windows (Magnus) - Suppress timezone name (%Z) in log timestamps on Windows + Suppress timezone name (%Z) in log timestamps on Windows because of possible encoding mismatches (Tom) - Require non-superusers who use /contrib/dblink to use only + Require non-superusers who use /contrib/dblink to use only password authentication, as a security measure (Joe) - Restrict /contrib/pgstattuple functions to superusers, for security reasons (Tom) + Restrict /contrib/pgstattuple functions to superusers, for security reasons (Tom) - Do not let /contrib/intarray try to make its GIN opclass + Do not let /contrib/intarray try to make its GIN opclass the default (this caused problems at dump/restore) (Tom) @@ -4048,7 +4048,7 @@ This release contains a variety of fixes from 8.2.3, including a security fix. For information about new features in the 8.2 major release, see - . + . @@ -4068,56 +4068,56 @@ Support explicit placement of the temporary-table schema within - search_path, and disable searching it for functions + search_path, and disable searching it for functions and operators (Tom) This is needed to allow a security-definer function to set a - truly secure value of search_path. Without it, + truly secure value of search_path. Without it, an unprivileged SQL user can use temporary objects to execute code with the privileges of the security-definer function (CVE-2007-2138). - See CREATE FUNCTION for more information. + See CREATE FUNCTION for more information. - Fix shared_preload_libraries for Windows + Fix shared_preload_libraries for Windows by forcing reload in each backend (Korry Douglas) - Fix to_char() so it properly upper/lower cases localized day or month + Fix to_char() so it properly upper/lower cases localized day or month names (Pavel Stehule) - /contrib/tsearch2 crash fixes (Teodor) + /contrib/tsearch2 crash fixes (Teodor) - Require COMMIT PREPARED to be executed in the same + Require COMMIT PREPARED to be executed in the same database as the transaction was prepared in (Heikki) - Allow pg_dump to do binary backups larger than two gigabytes + Allow pg_dump to do binary backups larger than two gigabytes on Windows (Magnus) - New traditional (Taiwan) Chinese FAQ (Zhou Daojing) + New traditional (Taiwan) Chinese FAQ (Zhou Daojing) @@ -4129,8 +4129,8 @@ - Fix potential-data-corruption bug in how VACUUM FULL handles - UPDATE chains (Tom, Pavan Deolasee) + Fix potential-data-corruption bug in how VACUUM FULL handles + UPDATE chains (Tom, Pavan Deolasee) @@ -4142,8 +4142,8 @@ - Fix pg_dump so it can dump a serial column's sequence - using when not also dumping the owning table (Tom) @@ -4158,7 +4158,7 @@ Fix possible wrong answers or crash when a PL/pgSQL function tries - to RETURN from within an EXCEPTION block + to RETURN from within an EXCEPTION block (Tom) @@ -4191,7 +4191,7 @@ This release contains two fixes from 8.2.2. For information about new features in the 8.2 major release, see - . + . @@ -4238,7 +4238,7 @@ This release contains a variety of fixes from 8.2.1, including a security fix. For information about new features in the 8.2 major release, see - . + . @@ -4286,8 +4286,8 @@ - Properly handle to_char('CC') for years ending in - 00 (Tom) + Properly handle to_char('CC') for years ending in + 00 (Tom) @@ -4297,41 +4297,41 @@ - /contrib/tsearch2 localization improvements (Tatsuo, Teodor) + /contrib/tsearch2 localization improvements (Tatsuo, Teodor) Fix incorrect permission check in - information_schema.key_column_usage view (Tom) + information_schema.key_column_usage view (Tom) - The symptom is relation with OID nnnnn does not exist errors. - To get this fix without using initdb, use CREATE OR - REPLACE VIEW to install the corrected definition found in - share/information_schema.sql. Note you will need to do + The symptom is relation with OID nnnnn does not exist errors. + To get this fix without using initdb, use CREATE OR + REPLACE VIEW to install the corrected definition found in + share/information_schema.sql. Note you will need to do this in each database. - Improve VACUUM performance for databases with many tables (Tom) + Improve VACUUM performance for databases with many tables (Tom) - Fix for rare Assert() crash triggered by UNION (Tom) + Fix for rare Assert() crash triggered by UNION (Tom) Fix potentially incorrect results from index searches using - ROW inequality conditions (Tom) + ROW inequality conditions (Tom) @@ -4344,7 +4344,7 @@ - Fix bogus permission denied failures occurring on Windows + Fix bogus permission denied failures occurring on Windows due to attempts to fsync already-deleted files (Magnus, Tom) @@ -4395,7 +4395,7 @@ This release contains a variety of fixes from 8.2. For information about new features in the 8.2 major release, see - . + . @@ -4414,21 +4414,21 @@ - Fix crash with SELECT ... LIMIT ALL (also - LIMIT NULL) (Tom) + Fix crash with SELECT ... LIMIT ALL (also + LIMIT NULL) (Tom) - Several /contrib/tsearch2 fixes (Teodor) + Several /contrib/tsearch2 fixes (Teodor) On Windows, make log messages coming from the operating system use - ASCII encoding (Hiroshi Saito) + ASCII encoding (Hiroshi Saito) @@ -4439,8 +4439,8 @@ - Fix Windows linking of pg_dump using - win32.mak + Fix Windows linking of pg_dump using + win32.mak (Hiroshi Saito) @@ -4469,13 +4469,13 @@ - Improve build speed of PDF documentation (Peter) + Improve build speed of PDF documentation (Peter) - Re-add JST (Japan) timezone abbreviation (Tom) + Re-add JST (Japan) timezone abbreviation (Tom) @@ -4487,8 +4487,8 @@ - Have psql print multi-byte combining characters as - before, rather than output as \u (Tom) + Have psql print multi-byte combining characters as + before, rather than output as \u (Tom) @@ -4498,19 +4498,19 @@ - This improves psql \d performance also. + This improves psql \d performance also. - Make pg_dumpall assume that databases have public - CONNECT privilege, when dumping from a pre-8.2 server (Tom) + Make pg_dumpall assume that databases have public + CONNECT privilege, when dumping from a pre-8.2 server (Tom) This preserves the previous behavior that anyone can connect to a - database if allowed by pg_hba.conf. + database if allowed by pg_hba.conf. @@ -4541,14 +4541,14 @@ Query language enhancements including INSERT/UPDATE/DELETE RETURNING, multirow VALUES lists, and optional target-table alias in - UPDATE/DELETE + UPDATE/DELETE Index creation without blocking concurrent - INSERT/UPDATE/DELETE + INSERT/UPDATE/DELETE operations @@ -4659,13 +4659,13 @@ Set escape_string_warning - to on by default (Bruce) + linkend="guc-escape-string-warning">escape_string_warning + to on by default (Bruce) This issues a warning if backslash escapes are used in - non-escape (non-E'') + non-escape (non-E'') strings. @@ -4673,8 +4673,8 @@ Change the row - constructor syntax (ROW(...)) so that - list elements foo.* will be expanded to a list + constructor syntax (ROW(...)) so that + list elements foo.* will be expanded to a list of their member fields, rather than creating a nested row type field as formerly (Tom) @@ -4682,15 +4682,15 @@ The new behavior is substantially more useful since it allows, for example, triggers to check for data changes - with IF row(new.*) IS DISTINCT FROM row(old.*). - The old behavior is still available by omitting .*. + with IF row(new.*) IS DISTINCT FROM row(old.*). + The old behavior is still available by omitting .*. Make row comparisons - follow SQL standard semantics and allow them + follow SQL standard semantics and allow them to be used in index scans (Tom) @@ -4704,13 +4704,13 @@ - Make row IS NOT NULL - tests follow SQL standard semantics (Tom) + Make row IS NOT NULL + tests follow SQL standard semantics (Tom) The former behavior conformed to the standard for simple cases - with IS NULL, but IS NOT NULL would return + with IS NULL, but IS NOT NULL would return true if any row field was non-null, whereas the standard says it should return true only when all fields are non-null. @@ -4718,12 +4718,12 @@ - Make SET - CONSTRAINT affect only one constraint (Kris Jurka) + Make SET + CONSTRAINT affect only one constraint (Kris Jurka) - In previous releases, SET CONSTRAINT modified + In previous releases, SET CONSTRAINT modified all constraints with a matching name. In this release, the schema search path is used to modify only the first matching constraint. A schema specification is also @@ -4733,14 +4733,14 @@ - Remove RULE permission for tables, for security reasons + Remove RULE permission for tables, for security reasons (Tom) As of this release, only a table's owner can create or modify rules for the table. For backwards compatibility, - GRANT/REVOKE RULE is still accepted, + GRANT/REVOKE RULE is still accepted, but it does nothing. @@ -4769,14 +4769,14 @@ - Make command-line options of postmaster - and postgres + Make command-line options of postmaster + and postgres identical (Peter) This allows the postmaster to pass arguments to each backend - without using -o. Note that some options are now + without using -o. Note that some options are now only available as long-form options, because there were conflicting single-letter options. @@ -4784,13 +4784,13 @@ - Deprecate use of postmaster symbolic link (Peter) + Deprecate use of postmaster symbolic link (Peter) - postmaster and postgres + postmaster and postgres commands now act identically, with the behavior determined - by command-line options. The postmaster symbolic link is + by command-line options. The postmaster symbolic link is kept for compatibility, but is not really needed. @@ -4798,12 +4798,12 @@ Change log_duration + linkend="guc-log-duration">log_duration to output even if the query is not output (Tom) - In prior releases, log_duration only printed if + In prior releases, log_duration only printed if the query appeared earlier in the log. @@ -4811,15 +4811,15 @@ Make to_char(time) + linkend="functions-formatting">to_char(time) and to_char(interval) - treat HH and HH12 as 12-hour + linkend="functions-formatting">to_char(interval) + treat HH and HH12 as 12-hour intervals - Most applications should use HH24 unless they + Most applications should use HH24 unless they want a 12-hour display. @@ -4827,19 +4827,19 @@ Zero unmasked bits in conversion from INET to CIDR (Tom) + linkend="datatype-inet">INET to CIDR (Tom) This ensures that the converted value is actually valid for - CIDR. + CIDR. - Remove australian_timezones configuration variable + Remove australian_timezones configuration variable (Joachim Wieland) @@ -4857,35 +4857,35 @@ This might eliminate the need to set unrealistically small values of random_page_cost. - If you have been using a very small random_page_cost, + linkend="guc-random-page-cost">random_page_cost. + If you have been using a very small random_page_cost, please recheck your test cases. - Change behavior of pg_dump -n and - -t options. (Greg Sabino Mullane) + Change behavior of pg_dump -n and + -t options. (Greg Sabino Mullane) - See the pg_dump manual page for details. + See the pg_dump manual page for details. - Change libpq - PQdsplen() to return a useful value (Martijn + Change libpq + PQdsplen() to return a useful value (Martijn van Oosterhout) - Declare libpq - PQgetssl() as returning void *, - rather than SSL * (Martijn van Oosterhout) + Declare libpq + PQgetssl() as returning void *, + rather than SSL * (Martijn van Oosterhout) @@ -4897,7 +4897,7 @@ C-language loadable modules must now include a - PG_MODULE_MAGIC + PG_MODULE_MAGIC macro call for version compatibility checking (Martijn van Oosterhout) @@ -4923,12 +4923,12 @@ - In contrib/xml2/, rename xml_valid() to - xml_is_well_formed() (Tom) + In contrib/xml2/, rename xml_valid() to + xml_is_well_formed() (Tom) - xml_valid() will remain for backward compatibility, + xml_valid() will remain for backward compatibility, but its behavior will change to do schema checking in a future release. @@ -4936,7 +4936,7 @@ - Remove contrib/ora2pg/, now at contrib/ora2pg/, now at @@ -4944,21 +4944,21 @@ Remove contrib modules that have been migrated to PgFoundry: - adddepend, dbase, dbmirror, - fulltextindex, mac, userlock + adddepend, dbase, dbmirror, + fulltextindex, mac, userlock Remove abandoned contrib modules: - mSQL-interface, tips + mSQL-interface, tips - Remove QNX and BEOS ports (Bruce) + Remove QNX and BEOS ports (Bruce) @@ -5002,7 +5002,7 @@ Improve efficiency of IN + linkend="functions-comparisons">IN (list-of-expressions) clauses (Tom) @@ -5022,9 +5022,9 @@ - Add FILLFACTOR to table and index creation (ITAGAKI + Add FILLFACTOR to table and index creation (ITAGAKI Takahiro) @@ -5038,8 +5038,8 @@ Increase default values for shared_buffers - and max_fsm_pages + linkend="guc-shared-buffers">shared_buffers + and max_fsm_pages (Andrew) @@ -5074,8 +5074,8 @@ Improve the optimizer's selectivity estimates for LIKE, ILIKE, and + linkend="functions-like">LIKE, ILIKE, and regular expression operations (Tom) @@ -5085,7 +5085,7 @@ Improve planning of joins to inherited tables and UNION - ALL views (Tom) + ALL views (Tom) @@ -5093,18 +5093,18 @@ Allow constraint exclusion to be applied to inherited UPDATE and - DELETE queries (Tom) + linkend="ddl-inherit">inherited UPDATE and + DELETE queries (Tom) - SELECT already honored constraint exclusion. + SELECT already honored constraint exclusion. - Improve planning of constant WHERE clauses, such as + Improve planning of constant WHERE clauses, such as a condition that depends only on variables inherited from an outer query level (Tom) @@ -5113,7 +5113,7 @@ Protocol-level unnamed prepared statements are re-planned - for each set of BIND values (Tom) + for each set of BIND values (Tom) @@ -5132,13 +5132,13 @@ Avoid extra scan of tables without indexes during VACUUM (Greg Stark) + linkend="sql-vacuum">VACUUM (Greg Stark) - Improve multicolumn GiST + Improve multicolumn GiST indexing (Oleg, Teodor) @@ -5167,7 +5167,7 @@ This is valuable for keeping warm standby slave servers in sync with the master. Transaction log file switching now also happens automatically during pg_stop_backup(). + linkend="functions-admin">pg_stop_backup(). This ensures that all transaction log files needed for recovery can be archived immediately. @@ -5175,26 +5175,26 @@ - Add WAL informational functions (Simon) + Add WAL informational functions (Simon) Add functions for interrogating the current transaction log insertion - point and determining WAL filenames from the - hex WAL locations displayed by pg_stop_backup() + point and determining WAL filenames from the + hex WAL locations displayed by pg_stop_backup() and related functions. - Improve recovery from a crash during WAL replay (Simon) + Improve recovery from a crash during WAL replay (Simon) - The server now does periodic checkpoints during WAL - recovery, so if there is a crash, future WAL + The server now does periodic checkpoints during WAL + recovery, so if there is a crash, future WAL recovery is shortened. This also eliminates the need for warm standby servers to replay the entire log since the base backup if they crash. @@ -5203,7 +5203,7 @@ - Improve reliability of long-term WAL replay + Improve reliability of long-term WAL replay (Heikki, Simon, Tom) @@ -5218,7 +5218,7 @@ Add archive_timeout + linkend="guc-archive-timeout">archive_timeout to force transaction log file switches at a given interval (Simon) @@ -5229,46 +5229,46 @@ - Add native LDAP + Add native LDAP authentication (Magnus Hagander) This is particularly useful for platforms that do not - support PAM, such as Windows. + support PAM, such as Windows. Add GRANT - CONNECT ON DATABASE (Gevik Babakhani) + CONNECT ON DATABASE (Gevik Babakhani) This gives SQL-level control over database access. It works as an additional filter on top of the existing - pg_hba.conf + pg_hba.conf controls. - Add support for SSL - Certificate Revocation List (CRL) files + Add support for SSL + Certificate Revocation List (CRL) files (Libor Hohoš) - The server and libpq both recognize CRL + The server and libpq both recognize CRL files now. - GiST indexes are + GiST indexes are now clusterable (Teodor) @@ -5280,7 +5280,7 @@ pg_stat_activity + linkend="monitoring-stats-views-table">pg_stat_activity now shows autovacuum activity. @@ -5304,7 +5304,7 @@ These values now appear in the pg_stat_*_tables + linkend="monitoring-stats-views-table">pg_stat_*_tables system views. @@ -5312,44 +5312,44 @@ Improve performance of statistics monitoring, especially - stats_command_string + stats_command_string (Tom, Bruce) - This release enables stats_command_string by + This release enables stats_command_string by default, now that its overhead is minimal. This means pg_stat_activity + linkend="monitoring-stats-views-table">pg_stat_activity will now show all active queries by default. - Add a waiting column to pg_stat_activity + Add a waiting column to pg_stat_activity (Tom) - This allows pg_stat_activity to show all the - information included in the ps display. + This allows pg_stat_activity to show all the + information included in the ps display. Add configuration parameter update_process_title - to control whether the ps display is updated + linkend="guc-update-process-title">update_process_title + to control whether the ps display is updated for every command (Bruce) - On platforms where it is expensive to update the ps + On platforms where it is expensive to update the ps display, it might be worthwhile to turn this off and rely solely on - pg_stat_activity for status information. + pg_stat_activity for status information. @@ -5361,15 +5361,15 @@ For example, you can now set shared_buffers - to 32MB rather than mentally converting sizes. + linkend="guc-shared-buffers">shared_buffers + to 32MB rather than mentally converting sizes. Add support for include - directives in postgresql.conf (Joachim + directives in postgresql.conf (Joachim Wieland) @@ -5384,21 +5384,21 @@ Such logging now shows statement names, bind parameter values, and the text of the query being executed. Also, the query text is properly included in logged error messages - when enabled by log_min_error_statement. + when enabled by log_min_error_statement. Prevent max_stack_depth + linkend="guc-max-stack-depth">max_stack_depth from being set to unsafe values On platforms where we can determine the actual kernel stack depth limit (which is most), make sure that the initial default value of - max_stack_depth is safe, and reject attempts to set it + max_stack_depth is safe, and reject attempts to set it to unsafely large values. @@ -5418,14 +5418,14 @@ - Fix failed to re-find parent key errors in - VACUUM (Tom) + Fix failed to re-find parent key errors in + VACUUM (Tom) - Clean out pg_internal.init cache files during server + Clean out pg_internal.init cache files during server restart (Simon) @@ -5438,7 +5438,7 @@ Fix race condition for truncation of a large relation across a - gigabyte boundary by VACUUM (Tom) + gigabyte boundary by VACUUM (Tom) @@ -5475,15 +5475,15 @@ - Add INSERT/UPDATE/DELETE - RETURNING (Jonah Harris, Tom) + Add INSERT/UPDATE/DELETE + RETURNING (Jonah Harris, Tom) This allows these commands to return values, such as the - computed serial key for a new row. In the UPDATE + computed serial key for a new row. In the UPDATE case, values from the updated version of the row are returned. @@ -5491,23 +5491,23 @@ Add support for multiple-row VALUES clauses, + linkend="queries-values">VALUES clauses, per SQL standard (Joe, Tom) - This allows INSERT to insert multiple rows of + This allows INSERT to insert multiple rows of constants, or queries to generate result sets using constants. For example, INSERT ... VALUES (...), (...), - ...., and SELECT * FROM (VALUES (...), (...), - ....) AS alias(f1, ...). + ...., and SELECT * FROM (VALUES (...), (...), + ....) AS alias(f1, ...). - Allow UPDATE - and DELETE + Allow UPDATE + and DELETE to use an alias for the target table (Atsushi Ogawa) @@ -5519,7 +5519,7 @@ - Allow UPDATE + Allow UPDATE to set multiple columns with a list of values (Susanne Ebrecht) @@ -5527,7 +5527,7 @@ This is basically a short-hand for assigning the columns and values in pairs. The syntax is UPDATE tab - SET (column, ...) = (val, ...). + SET (column, ...) = (val, ...). @@ -5546,12 +5546,12 @@ - Add CASCADE - option to TRUNCATE (Joachim Wieland) + Add CASCADE + option to TRUNCATE (Joachim Wieland) - This causes TRUNCATE to automatically include all tables + This causes TRUNCATE to automatically include all tables that reference the specified table(s) via foreign keys. While convenient, this is a dangerous tool — use with caution! @@ -5559,8 +5559,8 @@ - Support FOR UPDATE and FOR SHARE - in the same SELECT + Support FOR UPDATE and FOR SHARE + in the same SELECT command (Tom) @@ -5568,21 +5568,21 @@ Add IS NOT - DISTINCT FROM (Pavel Stehule) + DISTINCT FROM (Pavel Stehule) - This operator is similar to equality (=), but + This operator is similar to equality (=), but evaluates to true when both left and right operands are - NULL, and to false when just one is, rather than - yielding NULL in these cases. + NULL, and to false when just one is, rather than + yielding NULL in these cases. Improve the length output used by UNION/INTERSECT/EXCEPT + linkend="queries-union">UNION/INTERSECT/EXCEPT (Tom) @@ -5594,13 +5594,13 @@ - Allow ILIKE + Allow ILIKE to work for multi-byte encodings (Tom) - Internally, ILIKE now calls lower() - and then uses LIKE. Locale-specific regular + Internally, ILIKE now calls lower() + and then uses LIKE. Locale-specific regular expression patterns still do not work in these encodings. @@ -5608,39 +5608,39 @@ Enable standard_conforming_strings - to be turned on (Kevin Grittner) + linkend="guc-standard-conforming-strings">standard_conforming_strings + to be turned on (Kevin Grittner) This allows backslash escaping in strings to be disabled, - making PostgreSQL more - standards-compliant. The default is off for backwards - compatibility, but future releases will default this to on. + making PostgreSQL more + standards-compliant. The default is off for backwards + compatibility, but future releases will default this to on. - Do not flatten subqueries that contain volatile + Do not flatten subqueries that contain volatile functions in their target lists (Jaime Casanova) This prevents surprising behavior due to multiple evaluation - of a volatile function (such as random() - or nextval()). It might cause performance + of a volatile function (such as random() + or nextval()). It might cause performance degradation in the presence of functions that are unnecessarily - marked as volatile. + marked as volatile. Add system views pg_prepared_statements + linkend="view-pg-prepared-statements">pg_prepared_statements and pg_cursors + linkend="view-pg-cursors">pg_cursors to show prepared statements and open cursors (Joachim Wieland, Neil) @@ -5652,32 +5652,32 @@ Support portal parameters in EXPLAIN and EXECUTE (Tom) + linkend="sql-explain">EXPLAIN and EXECUTE (Tom) - This allows, for example, JDBC ? parameters to + This allows, for example, JDBC ? parameters to work in these commands. - If SQL-level PREPARE parameters + If SQL-level PREPARE parameters are unspecified, infer their types from the content of the query (Neil) - Protocol-level PREPARE already did this. + Protocol-level PREPARE already did this. - Allow LIMIT and OFFSET to exceed + Allow LIMIT and OFFSET to exceed two billion (Dhanaraj M) @@ -5692,8 +5692,8 @@ - Add TABLESPACE clause to CREATE TABLE AS + Add TABLESPACE clause to CREATE TABLE AS (Neil) @@ -5704,8 +5704,8 @@ - Add ON COMMIT clause to CREATE TABLE AS + Add ON COMMIT clause to CREATE TABLE AS (Neil) @@ -5718,13 +5718,13 @@ - Add INCLUDING CONSTRAINTS to CREATE TABLE LIKE + Add INCLUDING CONSTRAINTS to CREATE TABLE LIKE (Greg Stark) - This allows easy copying of CHECK constraints to a new + This allows easy copying of CHECK constraints to a new table. @@ -5732,7 +5732,7 @@ Allow the creation of placeholder (shell) types (Martijn van Oosterhout) + linkend="sql-createtype">types (Martijn van Oosterhout) @@ -5740,14 +5740,14 @@ any of the details of the type. Making a shell type is useful because it allows cleaner declaration of the type's input/output functions, which must exist before the type can be defined for - real. The syntax is CREATE TYPE typename. + real. The syntax is CREATE TYPE typename. - Aggregate functions + Aggregate functions now support multiple input parameters (Sergey Koposov, Tom) @@ -5755,13 +5755,13 @@ Add new aggregate creation syntax (Tom) + linkend="sql-createaggregate">syntax (Tom) The new syntax is CREATE AGGREGATE - aggname (input_type) - (parameter_list). This more + aggname (input_type) + (parameter_list). This more naturally supports the new multi-parameter aggregate functionality. The previous syntax is still supported. @@ -5770,77 +5770,77 @@ Add ALTER ROLE PASSWORD NULL + linkend="sql-alterrole">ALTER ROLE PASSWORD NULL to remove a previously set role password (Peter) - Add DROP object IF EXISTS for many + Add DROP object IF EXISTS for many object types (Andrew) - This allows DROP operations on non-existent + This allows DROP operations on non-existent objects without generating an error. - Add DROP OWNED + Add DROP OWNED to drop all objects owned by a role (Alvaro) - Add REASSIGN - OWNED to reassign ownership of all objects owned + Add REASSIGN + OWNED to reassign ownership of all objects owned by a role (Alvaro) - This, and DROP OWNED above, facilitate dropping + This, and DROP OWNED above, facilitate dropping roles. - Add GRANT ON SEQUENCE + Add GRANT ON SEQUENCE syntax (Bruce) This was added for setting sequence-specific permissions. - GRANT ON TABLE for sequences is still supported + GRANT ON TABLE for sequences is still supported for backward compatibility. - Add USAGE - permission for sequences that allows only currval() - and nextval(), not setval() + Add USAGE + permission for sequences that allows only currval() + and nextval(), not setval() (Bruce) - USAGE permission allows more fine-grained - control over sequence access. Granting USAGE + USAGE permission allows more fine-grained + control over sequence access. Granting USAGE allows users to increment a sequence, but prevents them from setting the sequence to - an arbitrary value using setval(). + an arbitrary value using setval(). - Add ALTER TABLE - [ NO ] INHERIT (Greg Stark) + Add ALTER TABLE + [ NO ] INHERIT (Greg Stark) @@ -5852,7 +5852,7 @@ - Allow comments on global + Allow comments on global objects to be stored globally (Kris Jurka) @@ -5881,8 +5881,8 @@ - The new syntax is CREATE - INDEX CONCURRENTLY. The default behavior is + The new syntax is CREATE + INDEX CONCURRENTLY. The default behavior is still to block table modification while an index is being created. @@ -5902,20 +5902,20 @@ - Allow COPY to - dump a SELECT query (Zoltan Boszormenyi, Karel + Allow COPY to + dump a SELECT query (Zoltan Boszormenyi, Karel Zak) - This allows COPY to dump arbitrary SQL - queries. The syntax is COPY (SELECT ...) TO. + This allows COPY to dump arbitrary SQL + queries. The syntax is COPY (SELECT ...) TO. - Make the COPY + Make the COPY command return a command tag that includes the number of rows copied (Volkan YAZICI) @@ -5923,29 +5923,29 @@ - Allow VACUUM + Allow VACUUM to expire rows without being affected by other concurrent - VACUUM operations (Hannu Krossing, Alvaro, Tom) + VACUUM operations (Hannu Krossing, Alvaro, Tom) - Make initdb + Make initdb detect the operating system locale and set the default - DateStyle accordingly (Peter) + DateStyle accordingly (Peter) This makes it more likely that the installed - postgresql.conf DateStyle value will + postgresql.conf DateStyle value will be as desired. - Reduce number of progress messages displayed by initdb (Tom) + Reduce number of progress messages displayed by initdb (Tom) @@ -5960,13 +5960,13 @@ Allow full timezone names in timestamp input values + linkend="datatype-datetime">timestamp input values (Joachim Wieland) For example, '2006-05-24 21:11 - America/New_York'::timestamptz. + America/New_York'::timestamptz. @@ -5978,16 +5978,16 @@ A desired set of timezone abbreviations can be chosen via the configuration parameter timezone_abbreviations. + linkend="guc-timezone-abbreviations">timezone_abbreviations. Add pg_timezone_abbrevs + linkend="view-pg-timezone-abbrevs">pg_timezone_abbrevs and pg_timezone_names + linkend="view-pg-timezone-names">pg_timezone_names views to show supported timezones (Magnus Hagander) @@ -5995,27 +5995,27 @@ Add clock_timestamp(), + linkend="functions-datetime-table">clock_timestamp(), statement_timestamp(), + linkend="functions-datetime-table">statement_timestamp(), and transaction_timestamp() + linkend="functions-datetime-table">transaction_timestamp() (Bruce) - clock_timestamp() is the current wall-clock time, - statement_timestamp() is the time the current + clock_timestamp() is the current wall-clock time, + statement_timestamp() is the time the current statement arrived at the server, and - transaction_timestamp() is an alias for - now(). + transaction_timestamp() is an alias for + now(). Allow to_char() + linkend="functions-formatting">to_char() to print localized month and day names (Euler Taveira de Oliveira) @@ -6024,23 +6024,23 @@ Allow to_char(time) + linkend="functions-formatting">to_char(time) and to_char(interval) - to output AM/PM specifications + linkend="functions-formatting">to_char(interval) + to output AM/PM specifications (Bruce) Intervals and times are treated as 24-hour periods, e.g. - 25 hours is considered AM. + 25 hours is considered AM. Add new function justify_interval() + linkend="functions-datetime-table">justify_interval() to adjust interval units (Mark Dilger) @@ -6071,7 +6071,7 @@ - Allow arrays to contain NULL elements (Tom) + Allow arrays to contain NULL elements (Tom) @@ -6090,13 +6090,13 @@ New built-in operators - for array-subset comparisons (@>, - <@, &&) (Teodor, Tom) + for array-subset comparisons (@>, + <@, &&) (Teodor, Tom) These operators can be indexed for many data types using - GiST or GIN indexes. + GiST or GIN indexes. @@ -6104,15 +6104,15 @@ Add convenient arithmetic operations on - INET/CIDR values (Stephen R. van den + INET/CIDR values (Stephen R. van den Berg) - The new operators are & (and), | - (or), ~ (not), inet + int8, - inet - int8, and - inet - inet. + The new operators are & (and), | + (or), ~ (not), inet + int8, + inet - int8, and + inet - inet. @@ -6124,12 +6124,12 @@ - The new functions are var_pop(), - var_samp(), stddev_pop(), and - stddev_samp(). var_samp() and - stddev_samp() are merely renamings of the - existing aggregates variance() and - stddev(). The latter names remain available + The new functions are var_pop(), + var_samp(), stddev_pop(), and + stddev_samp(). var_samp() and + stddev_samp() are merely renamings of the + existing aggregates variance() and + stddev(). The latter names remain available for backward compatibility. @@ -6142,19 +6142,19 @@ - New functions: regr_intercept(), - regr_slope(), regr_r2(), - corr(), covar_samp(), - covar_pop(), regr_avgx(), - regr_avgy(), regr_sxy(), - regr_sxx(), regr_syy(), - regr_count(). + New functions: regr_intercept(), + regr_slope(), regr_r2(), + corr(), covar_samp(), + covar_pop(), regr_avgx(), + regr_avgy(), regr_sxy(), + regr_sxx(), regr_syy(), + regr_count(). - Allow domains to be + Allow domains to be based on other domains (Tom) @@ -6162,7 +6162,7 @@ Properly enforce domain CHECK constraints + linkend="ddl-constraints">CHECK constraints everywhere (Neil, Tom) @@ -6177,24 +6177,24 @@ Fix problems with dumping renamed SERIAL columns + linkend="datatype-serial">SERIAL columns (Tom) - The fix is to dump a SERIAL column by explicitly - specifying its DEFAULT and sequence elements, - and reconstructing the SERIAL column on reload - using a new ALTER - SEQUENCE OWNED BY command. This also allows - dropping a SERIAL column specification. + The fix is to dump a SERIAL column by explicitly + specifying its DEFAULT and sequence elements, + and reconstructing the SERIAL column on reload + using a new ALTER + SEQUENCE OWNED BY command. This also allows + dropping a SERIAL column specification. Add a server-side sleep function pg_sleep() + linkend="functions-datetime-delay">pg_sleep() (Joachim Wieland) @@ -6202,7 +6202,7 @@ Add all comparison operators for the tid (tuple id) data + linkend="datatype-oid">tid (tuple id) data type (Mark Kirkwood, Greg Stark, Tom) @@ -6217,12 +6217,12 @@ - Add TG_table_name and TG_table_schema to + Add TG_table_name and TG_table_schema to trigger parameters (Andrew) - TG_relname is now deprecated. Comparable + TG_relname is now deprecated. Comparable changes have been made in the trigger parameters for the other PLs as well. @@ -6230,29 +6230,29 @@ - Allow FOR statements to return values to scalars + Allow FOR statements to return values to scalars as well as records and row types (Pavel Stehule) - Add a BY clause to the FOR loop, + Add a BY clause to the FOR loop, to control the iteration increment (Jaime Casanova) - Add STRICT to STRICT to SELECT - INTO (Matt Miller) + INTO (Matt Miller) - STRICT mode throws an exception if more or less - than one row is returned by the SELECT, for - Oracle PL/SQL compatibility. + STRICT mode throws an exception if more or less + than one row is returned by the SELECT, for + Oracle PL/SQL compatibility. @@ -6266,7 +6266,7 @@ - Add table_name and table_schema to + Add table_name and table_schema to trigger parameters (Adam Sjøgren) @@ -6279,7 +6279,7 @@ - Make $_TD trigger data a global variable (Andrew) + Make $_TD trigger data a global variable (Andrew) @@ -6312,13 +6312,13 @@ Named parameters are passed as ordinary variables, as well as in the - args[] array (Sven Suursoho) + args[] array (Sven Suursoho) - Add table_name and table_schema to + Add table_name and table_schema to trigger parameters (Andrew) @@ -6331,14 +6331,14 @@ - Return result-set as list, iterator, - or generator (Sven Suursoho) + Return result-set as list, iterator, + or generator (Sven Suursoho) - Allow functions to return void (Neil) + Allow functions to return void (Neil) @@ -6353,40 +6353,40 @@ - <link linkend="APP-PSQL"><application>psql</></link> Changes + <link linkend="app-psql"><application>psql</application></link> Changes - Add new command \password for changing role + Add new command \password for changing role password with client-side password encryption (Peter) - Allow \c to connect to a new host and port + Allow \c to connect to a new host and port number (David, Volkan YAZICI) - Add tablespace display to \l+ (Philip Yarra) + Add tablespace display to \l+ (Philip Yarra) - Improve \df slash command to include the argument - names and modes (OUT or INOUT) of + Improve \df slash command to include the argument + names and modes (OUT or INOUT) of the function (David Fetter) - Support binary COPY (Andreas Pflug) + Support binary COPY (Andreas Pflug) @@ -6397,21 +6397,21 @@ - Use option -1 or --single-transaction. + Use option -1 or --single-transaction. - Support for automatically retrieving SELECT + Support for automatically retrieving SELECT results in batches using a cursor (Chris Mair) This is enabled using \set FETCH_COUNT - n. This + n. This feature allows large result sets to be retrieved in - psql without attempting to buffer the entire + psql without attempting to buffer the entire result set in memory. @@ -6451,8 +6451,8 @@ Report both the returned data and the command status tag - for INSERT/UPDATE/DELETE - RETURNING (Tom) + for INSERT/UPDATE/DELETE + RETURNING (Tom) @@ -6461,31 +6461,31 @@ - <link linkend="APP-PGDUMP"><application>pg_dump</></link> Changes + <link linkend="app-pgdump"><application>pg_dump</application></link> Changes Allow complex selection of objects to be included or excluded - by pg_dump (Greg Sabino Mullane) + by pg_dump (Greg Sabino Mullane) - pg_dump now supports multiple -n - (schema) and -t (table) options, and adds - -N and -T options to exclude objects. + pg_dump now supports multiple -n + (schema) and -t (table) options, and adds + -N and -T options to exclude objects. Also, the arguments of these switches can now be wild-card expressions rather than single object names, for example - -t 'foo*', and a schema can be part of - a -t or -T switch, for example - -t schema1.table1. + -t 'foo*', and a schema can be part of + a -t or -T switch, for example + -t schema1.table1. - Add pg_restore - --no-data-for-failed-tables option to suppress + Add pg_restore + --no-data-for-failed-tables option to suppress loading data if table creation failed (i.e., the table already exists) (Martin Pitt) @@ -6493,13 +6493,13 @@ - Add pg_restore + Add pg_restore option to run the entire session in a single transaction (Simon) - Use option -1 or --single-transaction. + Use option -1 or --single-transaction. @@ -6508,27 +6508,27 @@ - <link linkend="libpq"><application>libpq</></link> Changes + <link linkend="libpq"><application>libpq</application></link> Changes Add PQencryptPassword() + linkend="libpq-misc">PQencryptPassword() to encrypt passwords (Tom) This allows passwords to be sent pre-encrypted for commands - like ALTER ROLE ... - PASSWORD. + like ALTER ROLE ... + PASSWORD. Add function PQisthreadsafe() + linkend="libpq-threading">PQisthreadsafe() (Bruce) @@ -6541,9 +6541,9 @@ Add PQdescribePrepared(), + linkend="libpq-exec-main">PQdescribePrepared(), PQdescribePortal(), + linkend="libpq-exec-main">PQdescribePortal(), and related functions to return information about previously prepared statements and open cursors (Volkan YAZICI) @@ -6551,9 +6551,9 @@ - Allow LDAP lookups + Allow LDAP lookups from pg_service.conf + linkend="libpq-pgservice">pg_service.conf (Laurenz Albe) @@ -6561,7 +6561,7 @@ Allow a hostname in ~/.pgpass + linkend="libpq-pgpass">~/.pgpass to match the default socket directory (Bruce) @@ -6577,19 +6577,19 @@ - <link linkend="ecpg"><application>ecpg</></link> Changes + <link linkend="ecpg"><application>ecpg</application></link> Changes - Allow SHOW to + Allow SHOW to put its result into a variable (Joachim Wieland) - Add COPY TO STDOUT + Add COPY TO STDOUT (Joachim Wieland) @@ -6611,28 +6611,28 @@ - <application>Windows</> Port + <application>Windows</application> Port - Allow MSVC to compile the PostgreSQL + Allow MSVC to compile the PostgreSQL server (Magnus, Hiroshi Saito) - Add MSVC support for utility commands and pg_dump (Hiroshi + Add MSVC support for utility commands and pg_dump (Hiroshi Saito) - Add support for Windows code pages 1253, - 1254, 1255, and 1257 + Add support for Windows code pages 1253, + 1254, 1255, and 1257 (Kris Jurka) @@ -6670,7 +6670,7 @@ - Add GIN (Generalized + Add GIN (Generalized Inverted iNdex) index access method (Teodor, Oleg) @@ -6682,7 +6682,7 @@ Rtree has been re-implemented using GiST. Among other + linkend="gist">GiST. Among other differences, this means that rtree indexes now have support for crash recovery via write-ahead logging (WAL). @@ -6698,12 +6698,12 @@ Add a configure flag to allow libedit to be preferred over - GNU readline (Bruce) + GNU readline (Bruce) Use configure --with-libedit-preferred. + linkend="configure">--with-libedit-preferred. @@ -6722,21 +6722,21 @@ - Add support for Solaris x86_64 using the - Solaris compiler (Pierre Girard, Theo + Add support for Solaris x86_64 using the + Solaris compiler (Pierre Girard, Theo Schlossnagle, Bruce) - Add DTrace support (Robert Lor) + Add DTrace support (Robert Lor) - Add PG_VERSION_NUM for use by third-party + Add PG_VERSION_NUM for use by third-party applications wanting to test the backend version in C using > and < comparisons (Bruce) @@ -6744,37 +6744,37 @@ - Add XLOG_BLCKSZ as independent from BLCKSZ + Add XLOG_BLCKSZ as independent from BLCKSZ (Mark Wong) - Add LWLOCK_STATS define to report locking + Add LWLOCK_STATS define to report locking activity (Tom) - Emit warnings for unknown configure options + Emit warnings for unknown configure options (Martijn van Oosterhout) - Add server support for plugin libraries + Add server support for plugin libraries that can be used for add-on tasks such as debugging and performance measurement (Korry Douglas) This consists of two features: a table of rendezvous - variables that allows separately-loaded shared libraries to + variables that allows separately-loaded shared libraries to communicate, and a new configuration parameter local_preload_libraries + linkend="guc-local-preload-libraries">local_preload_libraries that allows libraries to be loaded into specific sessions without explicit cooperation from the client application. This allows external add-ons to implement features such as a PL/pgSQL debugger. @@ -6784,27 +6784,27 @@ Rename existing configuration parameter - preload_libraries to shared_preload_libraries + preload_libraries to shared_preload_libraries (Tom) This was done for clarity in comparison to - local_preload_libraries. + local_preload_libraries. Add new configuration parameter server_version_num + linkend="guc-server-version-num">server_version_num (Greg Sabino Mullane) This is like server_version, but is an - integer, e.g. 80200. This allows applications to + integer, e.g. 80200. This allows applications to make version checks more easily. @@ -6812,7 +6812,7 @@ Add a configuration parameter seq_page_cost + linkend="guc-seq-page-cost">seq_page_cost (Tom) @@ -6839,11 +6839,11 @@ New functions - _PG_init() and _PG_fini() are + _PG_init() and _PG_fini() are called if the library defines such symbols. Hence we no longer need to specify an initialization function in - shared_preload_libraries; we can assume that - the library used the _PG_init() convention + shared_preload_libraries; we can assume that + the library used the _PG_init() convention instead. @@ -6851,7 +6851,7 @@ Add PG_MODULE_MAGIC + linkend="xfunc-c-dynload">PG_MODULE_MAGIC header block to all shared object files (Martijn van Oosterhout) @@ -6870,7 +6870,7 @@ - New XML + New XML documentation section (Bruce) @@ -6892,7 +6892,7 @@ - multibyte encoding support, including UTF8 + multibyte encoding support, including UTF8 @@ -6912,13 +6912,13 @@ - Ispell dictionaries now recognize MySpell - format, used by OpenOffice + Ispell dictionaries now recognize MySpell + format, used by OpenOffice - GIN support + GIN support @@ -6928,13 +6928,13 @@ - Add adminpack module containing Pgadmin administration + Add adminpack module containing Pgadmin administration functions (Dave) These functions provide additional file system access - routines not present in the default PostgreSQL + routines not present in the default PostgreSQL server. @@ -6945,7 +6945,7 @@ - Reports information about the current connection's SSL + Reports information about the current connection's SSL certificate. @@ -6972,9 +6972,9 @@ - This new implementation supports EAN13, UPC, - ISBN (books), ISMN (music), and - ISSN (serials). + This new implementation supports EAN13, UPC, + ISBN (books), ISMN (music), and + ISSN (serials). @@ -7034,9 +7034,9 @@ - New functions are cube(float[]), - cube(float[], float[]), and - cube_subset(cube, int4[]). + New functions are cube(float[]), + cube(float[], float[]), and + cube_subset(cube, int4[]). @@ -7049,8 +7049,8 @@ - New operators for array-subset comparisons (@>, - <@, &&) (Tom) + New operators for array-subset comparisons (@>, + <@, &&) (Tom) diff --git a/doc/src/sgml/release-8.3.sgml b/doc/src/sgml/release-8.3.sgml index a82410d057..021922966b 100644 --- a/doc/src/sgml/release-8.3.sgml +++ b/doc/src/sgml/release-8.3.sgml @@ -12,11 +12,11 @@ This release contains a variety of fixes from 8.3.22. For information about new features in the 8.3 major release, see - . + . - This is expected to be the last PostgreSQL release + This is expected to be the last PostgreSQL release in the 8.3.X series. Users are encouraged to update to a newer release branch soon. @@ -30,7 +30,7 @@ However, if you are upgrading from a version earlier than 8.3.17, - see . + see . @@ -42,7 +42,7 @@ - Prevent execution of enum_recv from SQL (Tom Lane) + Prevent execution of enum_recv from SQL (Tom Lane) @@ -63,19 +63,19 @@ Protect against race conditions when scanning - pg_tablespace (Stephen Frost, Tom Lane) + pg_tablespace (Stephen Frost, Tom Lane) - CREATE DATABASE and DROP DATABASE could + CREATE DATABASE and DROP DATABASE could misbehave if there were concurrent updates of - pg_tablespace entries. + pg_tablespace entries. - Prevent DROP OWNED from trying to drop whole databases or + Prevent DROP OWNED from trying to drop whole databases or tablespaces (Álvaro Herrera) @@ -86,13 +86,13 @@ - Prevent misbehavior when a RowExpr or XmlExpr + Prevent misbehavior when a RowExpr or XmlExpr is parse-analyzed twice (Andres Freund, Tom Lane) This mistake could be user-visible in contexts such as - CREATE TABLE LIKE INCLUDING INDEXES. + CREATE TABLE LIKE INCLUDING INDEXES. @@ -110,26 +110,26 @@ - This bug affected psql and some other client programs. + This bug affected psql and some other client programs. - Fix possible crash in psql's \? command + Fix possible crash in psql's \? command when not connected to a database (Meng Qingzhong) - Fix one-byte buffer overrun in libpq's - PQprintTuples (Xi Wang) + Fix one-byte buffer overrun in libpq's + PQprintTuples (Xi Wang) This ancient function is not used anywhere by - PostgreSQL itself, but it might still be used by some + PostgreSQL itself, but it might still be used by some client code. @@ -149,15 +149,15 @@ - Make pgxs build executables with the right - .exe suffix when cross-compiling for Windows + Make pgxs build executables with the right + .exe suffix when cross-compiling for Windows (Zoltan Boszormenyi) - Add new timezone abbreviation FET (Tom Lane) + Add new timezone abbreviation FET (Tom Lane) @@ -181,11 +181,11 @@ This release contains a variety of fixes from 8.3.21. For information about new features in the 8.3 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 8.3.X release series in February 2013. Users are encouraged to update to a newer release branch soon. @@ -199,7 +199,7 @@ However, if you are upgrading from a version earlier than 8.3.17, - see . + see . @@ -212,13 +212,13 @@ Fix multiple bugs associated with CREATE INDEX - CONCURRENTLY (Andres Freund, Tom Lane) + CONCURRENTLY (Andres Freund, Tom Lane) - Fix CREATE INDEX CONCURRENTLY to use + Fix CREATE INDEX CONCURRENTLY to use in-place updates when changing the state of an index's - pg_index row. This prevents race conditions that could + pg_index row. This prevents race conditions that could cause concurrent sessions to miss updating the target index, thus resulting in corrupt concurrently-created indexes. @@ -226,8 +226,8 @@ Also, fix various other operations to ensure that they ignore invalid indexes resulting from a failed CREATE INDEX - CONCURRENTLY command. The most important of these is - VACUUM, because an auto-vacuum could easily be launched + CONCURRENTLY command. The most important of these is + VACUUM, because an auto-vacuum could easily be launched on the table before corrective action can be taken to fix or remove the invalid index. @@ -249,8 +249,8 @@ The planner could derive incorrect constraints from a clause equating a non-strict construct to something else, for example - WHERE COALESCE(foo, 0) = 0 - when foo is coming from the nullable side of an outer join. + WHERE COALESCE(foo, 0) = 0 + when foo is coming from the nullable side of an outer join. @@ -268,10 +268,10 @@ - This affects multicolumn NOT IN subplans, such as - WHERE (a, b) NOT IN (SELECT x, y FROM ...) - when for instance b and y are int4 - and int8 respectively. This mistake led to wrong answers + This affects multicolumn NOT IN subplans, such as + WHERE (a, b) NOT IN (SELECT x, y FROM ...) + when for instance b and y are int4 + and int8 respectively. This mistake led to wrong answers or crashes depending on the specific datatypes involved. @@ -279,7 +279,7 @@ Acquire buffer lock when re-fetching the old tuple for an - AFTER ROW UPDATE/DELETE trigger (Andres Freund) + AFTER ROW UPDATE/DELETE trigger (Andres Freund) @@ -292,14 +292,14 @@ - Fix REASSIGN OWNED to handle grants on tablespaces + Fix REASSIGN OWNED to handle grants on tablespaces (Álvaro Herrera) - Ignore incorrect pg_attribute entries for system + Ignore incorrect pg_attribute entries for system columns for views (Tom Lane) @@ -313,7 +313,7 @@ - Fix rule printing to dump INSERT INTO table + Fix rule printing to dump INSERT INTO table DEFAULT VALUES correctly (Tom Lane) @@ -321,7 +321,7 @@ Guard against stack overflow when there are too many - UNION/INTERSECT/EXCEPT clauses + UNION/INTERSECT/EXCEPT clauses in a query (Tom Lane) @@ -349,7 +349,7 @@ Formerly, this would result in something quite unhelpful, such as - Non-recoverable failure in name resolution. + Non-recoverable failure in name resolution. @@ -362,8 +362,8 @@ - Make pg_ctl more robust about reading the - postmaster.pid file (Heikki Linnakangas) + Make pg_ctl more robust about reading the + postmaster.pid file (Heikki Linnakangas) @@ -373,33 +373,33 @@ - Fix possible crash in psql if incorrectly-encoded data - is presented and the client_encoding setting is a + Fix possible crash in psql if incorrectly-encoded data + is presented and the client_encoding setting is a client-only encoding, such as SJIS (Jiang Guiqing) - Fix bugs in the restore.sql script emitted by - pg_dump in tar output format (Tom Lane) + Fix bugs in the restore.sql script emitted by + pg_dump in tar output format (Tom Lane) The script would fail outright on tables whose names include upper-case characters. Also, make the script capable of restoring - data in mode as well as the regular COPY mode. - Fix pg_restore to accept POSIX-conformant - tar files (Brian Weaver, Tom Lane) + Fix pg_restore to accept POSIX-conformant + tar files (Brian Weaver, Tom Lane) - The original coding of pg_dump's tar + The original coding of pg_dump's tar output mode produced files that are not fully conformant with the POSIX standard. This has been corrected for version 9.3. This patch updates previous branches so that they will accept both the @@ -410,41 +410,41 @@ - Fix pg_resetxlog to locate postmaster.pid + Fix pg_resetxlog to locate postmaster.pid correctly when given a relative path to the data directory (Tom Lane) - This mistake could lead to pg_resetxlog not noticing + This mistake could lead to pg_resetxlog not noticing that there is an active postmaster using the data directory. - Fix libpq's lo_import() and - lo_export() functions to report file I/O errors properly + Fix libpq's lo_import() and + lo_export() functions to report file I/O errors properly (Tom Lane) - Fix ecpg's processing of nested structure pointer + Fix ecpg's processing of nested structure pointer variables (Muhammad Usama) - Make contrib/pageinspect's btree page inspection + Make contrib/pageinspect's btree page inspection functions take buffer locks while examining pages (Tom Lane) - Fix pgxs support for building loadable modules on AIX + Fix pgxs support for building loadable modules on AIX (Tom Lane) @@ -455,7 +455,7 @@ - Update time zone data files to tzdata release 2012j + Update time zone data files to tzdata release 2012j for DST law changes in Cuba, Israel, Jordan, Libya, Palestine, Western Samoa, and portions of Brazil. @@ -477,11 +477,11 @@ This release contains a variety of fixes from 8.3.20. For information about new features in the 8.3 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 8.3.X release series in February 2013. Users are encouraged to update to a newer release branch soon. @@ -495,7 +495,7 @@ However, if you are upgrading from a version earlier than 8.3.17, - see . + see . @@ -524,22 +524,22 @@ - If we revoke a grant option from some role X, but - X still holds that option via a grant from someone + If we revoke a grant option from some role X, but + X still holds that option via a grant from someone else, we should not recursively revoke the corresponding privilege - from role(s) Y that X had granted it + from role(s) Y that X had granted it to. - Fix handling of SIGFPE when PL/Perl is in use (Andres Freund) + Fix handling of SIGFPE when PL/Perl is in use (Andres Freund) - Perl resets the process's SIGFPE handler to - SIG_IGN, which could result in crashes later on. Restore + Perl resets the process's SIGFPE handler to + SIG_IGN, which could result in crashes later on. Restore the normal Postgres signal handler after initializing PL/Perl. @@ -558,7 +558,7 @@ Some Linux distributions contain an incorrect version of - pthread.h that results in incorrect compiled code in + pthread.h that results in incorrect compiled code in PL/Perl, leading to crashes if a PL/Perl function calls another one that throws an error. @@ -566,7 +566,7 @@ - Update time zone data files to tzdata release 2012f + Update time zone data files to tzdata release 2012f for DST law changes in Fiji @@ -587,11 +587,11 @@ This release contains a variety of fixes from 8.3.19. For information about new features in the 8.3 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 8.3.X release series in February 2013. Users are encouraged to update to a newer release branch soon. @@ -605,7 +605,7 @@ However, if you are upgrading from a version earlier than 8.3.17, - see . + see . @@ -622,7 +622,7 @@ - xml_parse() would attempt to fetch external files or + xml_parse() would attempt to fetch external files or URLs as needed to resolve DTD and entity references in an XML value, thus allowing unprivileged database users to attempt to fetch data with the privileges of the database server. While the external data @@ -635,22 +635,22 @@ - Prevent access to external files/URLs via contrib/xml2's - xslt_process() (Peter Eisentraut) + Prevent access to external files/URLs via contrib/xml2's + xslt_process() (Peter Eisentraut) - libxslt offers the ability to read and write both + libxslt offers the ability to read and write both files and URLs through stylesheet commands, thus allowing unprivileged database users to both read and write data with the privileges of the database server. Disable that through proper use - of libxslt's security options. (CVE-2012-3488) + of libxslt's security options. (CVE-2012-3488) - Also, remove xslt_process()'s ability to fetch documents + Also, remove xslt_process()'s ability to fetch documents and stylesheets from external files/URLs. While this was a - documented feature, it was long regarded as a bad idea. + documented feature, it was long regarded as a bad idea. The fix for CVE-2012-3489 broke that capability, and rather than expend effort on trying to fix it, we're just going to summarily remove it. @@ -678,22 +678,22 @@ - If ALTER SEQUENCE was executed on a freshly created or - reset sequence, and then precisely one nextval() call + If ALTER SEQUENCE was executed on a freshly created or + reset sequence, and then precisely one nextval() call was made on it, and then the server crashed, WAL replay would restore the sequence to a state in which it appeared that no - nextval() had been done, thus allowing the first + nextval() had been done, thus allowing the first sequence value to be returned again by the next - nextval() call. In particular this could manifest for - serial columns, since creation of a serial column's sequence - includes an ALTER SEQUENCE OWNED BY step. + nextval() call. In particular this could manifest for + serial columns, since creation of a serial column's sequence + includes an ALTER SEQUENCE OWNED BY step. - Ensure the backup_label file is fsync'd after - pg_start_backup() (Dave Kerr) + Ensure the backup_label file is fsync'd after + pg_start_backup() (Dave Kerr) @@ -718,7 +718,7 @@ The original coding could allow inconsistent behavior in some cases; in particular, an autovacuum could get canceled after less than - deadlock_timeout grace period. + deadlock_timeout grace period. @@ -730,7 +730,7 @@ - Fix log collector so that log_truncate_on_rotation works + Fix log collector so that log_truncate_on_rotation works during the very first log rotation after server start (Tom Lane) @@ -738,24 +738,24 @@ Ensure that a whole-row reference to a subquery doesn't include any - extra GROUP BY or ORDER BY columns (Tom Lane) + extra GROUP BY or ORDER BY columns (Tom Lane) - Disallow copying whole-row references in CHECK - constraints and index definitions during CREATE TABLE + Disallow copying whole-row references in CHECK + constraints and index definitions during CREATE TABLE (Tom Lane) - This situation can arise in CREATE TABLE with - LIKE or INHERITS. The copied whole-row + This situation can arise in CREATE TABLE with + LIKE or INHERITS. The copied whole-row variable was incorrectly labeled with the row type of the original table not the new one. Rejecting the case seems reasonable for - LIKE, since the row types might well diverge later. For - INHERITS we should ideally allow it, with an implicit + LIKE, since the row types might well diverge later. For + INHERITS we should ideally allow it, with an implicit coercion to the parent table's row type; but that will require more work than seems safe to back-patch. @@ -763,7 +763,7 @@ - Fix memory leak in ARRAY(SELECT ...) subqueries (Heikki + Fix memory leak in ARRAY(SELECT ...) subqueries (Heikki Linnakangas, Tom Lane) @@ -775,21 +775,21 @@ The code could get confused by quantified parenthesized - subexpressions, such as ^(foo)?bar. This would lead to + subexpressions, such as ^(foo)?bar. This would lead to incorrect index optimization of searches for such patterns. - Report errors properly in contrib/xml2's - xslt_process() (Tom Lane) + Report errors properly in contrib/xml2's + xslt_process() (Tom Lane) - Update time zone data files to tzdata release 2012e + Update time zone data files to tzdata release 2012e for DST law changes in Morocco and Tokelau @@ -810,7 +810,7 @@ This release contains a variety of fixes from 8.3.18. For information about new features in the 8.3 major release, see - . + . @@ -822,7 +822,7 @@ However, if you are upgrading from a version earlier than 8.3.17, - see . + see . @@ -835,12 +835,12 @@ Fix incorrect password transformation in - contrib/pgcrypto's DES crypt() function + contrib/pgcrypto's DES crypt() function (Solar Designer) - If a password string contained the byte value 0x80, the + If a password string contained the byte value 0x80, the remainder of the password was ignored, causing the password to be much weaker than it appeared. With this fix, the rest of the string is properly included in the DES hash. Any stored password values that are @@ -851,7 +851,7 @@ - Ignore SECURITY DEFINER and SET attributes for + Ignore SECURITY DEFINER and SET attributes for a procedural language's call handler (Tom Lane) @@ -863,7 +863,7 @@ - Allow numeric timezone offsets in timestamp input to be up to + Allow numeric timezone offsets in timestamp input to be up to 16 hours away from UTC (Tom Lane) @@ -889,7 +889,7 @@ - Fix text to name and char to name + Fix text to name and char to name casts to perform string truncation correctly in multibyte encodings (Karl Schnaitter) @@ -897,19 +897,19 @@ - Fix memory copying bug in to_tsquery() (Heikki Linnakangas) + Fix memory copying bug in to_tsquery() (Heikki Linnakangas) - Fix slow session startup when pg_attribute is very large + Fix slow session startup when pg_attribute is very large (Tom Lane) - If pg_attribute exceeds one-fourth of - shared_buffers, cache rebuilding code that is sometimes + If pg_attribute exceeds one-fourth of + shared_buffers, cache rebuilding code that is sometimes needed during session start would trigger the synchronized-scan logic, causing it to take many times longer than normal. The problem was particularly acute if many new sessions were starting at once. @@ -930,8 +930,8 @@ - Ensure the Windows implementation of PGSemaphoreLock() - clears ImmediateInterruptOK before returning (Tom Lane) + Ensure the Windows implementation of PGSemaphoreLock() + clears ImmediateInterruptOK before returning (Tom Lane) @@ -964,7 +964,7 @@ Previously, infinite recursion in a function invoked by - auto-ANALYZE could crash worker processes. + auto-ANALYZE could crash worker processes. @@ -983,25 +983,25 @@ Fix logging collector to ensure it will restart file rotation - after receiving SIGHUP (Tom Lane) + after receiving SIGHUP (Tom Lane) - Fix PL/pgSQL's GET DIAGNOSTICS command when the target + Fix PL/pgSQL's GET DIAGNOSTICS command when the target is the function's first variable (Tom Lane) - Fix several performance problems in pg_dump when + Fix several performance problems in pg_dump when the database contains many objects (Jeff Janes, Tom Lane) - pg_dump could get very slow if the database contained + pg_dump could get very slow if the database contained many schemas, or if many objects are in dependency loops, or if there are many owned sequences. @@ -1009,14 +1009,14 @@ - Fix contrib/dblink's dblink_exec() to not leak + Fix contrib/dblink's dblink_exec() to not leak temporary database connections upon error (Tom Lane) - Update time zone data files to tzdata release 2012c + Update time zone data files to tzdata release 2012c for DST law changes in Antarctica, Armenia, Chile, Cuba, Falkland Islands, Gaza, Haiti, Hebron, Morocco, Syria, and Tokelau Islands; also historical corrections for Canada. @@ -1039,7 +1039,7 @@ This release contains a variety of fixes from 8.3.17. For information about new features in the 8.3 major release, see - . + . @@ -1051,7 +1051,7 @@ However, if you are upgrading from a version earlier than 8.3.17, - see . + see . @@ -1064,26 +1064,26 @@ Require execute permission on the trigger function for - CREATE TRIGGER (Robert Haas) + CREATE TRIGGER (Robert Haas) This missing check could allow another user to execute a trigger function with forged input data, by installing it on a table he owns. This is only of significance for trigger functions marked - SECURITY DEFINER, since otherwise trigger functions run + SECURITY DEFINER, since otherwise trigger functions run as the table owner anyway. (CVE-2012-0866) - Convert newlines to spaces in names written in pg_dump + Convert newlines to spaces in names written in pg_dump comments (Robert Haas) - pg_dump was incautious about sanitizing object names + pg_dump was incautious about sanitizing object names that are emitted within SQL comments in its output script. A name containing a newline would at least render the script syntactically incorrect. Maliciously crafted object names could present a SQL @@ -1099,10 +1099,10 @@ An index page split caused by an insertion could sometimes cause a - concurrently-running VACUUM to miss removing index entries + concurrently-running VACUUM to miss removing index entries that it should remove. After the corresponding table rows are removed, the dangling index entries would cause errors (such as could not - read block N in file ...) or worse, silently wrong query results + read block N in file ...) or worse, silently wrong query results after unrelated rows are re-inserted at the now-free table locations. This bug has been present since release 8.2, but occurs so infrequently that it was not diagnosed until now. If you have reason to suspect @@ -1114,16 +1114,16 @@ Allow non-existent values for some settings in ALTER - USER/DATABASE SET (Heikki Linnakangas) + USER/DATABASE SET (Heikki Linnakangas) - Allow default_text_search_config, - default_tablespace, and temp_tablespaces to be + Allow default_text_search_config, + default_tablespace, and temp_tablespaces to be set to names that are not known. This is because they might be known in another database where the setting is intended to be used, or for the tablespace cases because the tablespace might not be created yet. The - same issue was previously recognized for search_path, and + same issue was previously recognized for search_path, and these settings now act like that one. @@ -1145,7 +1145,7 @@ - Fix regular expression back-references with * attached + Fix regular expression back-references with * attached (Tom Lane) @@ -1159,18 +1159,18 @@ A similar problem still afflicts back-references that are embedded in a larger quantified expression, rather than being the immediate subject of the quantifier. This will be addressed in a future - PostgreSQL release. + PostgreSQL release. Fix recently-introduced memory leak in processing of - inet/cidr values (Heikki Linnakangas) + inet/cidr values (Heikki Linnakangas) - A patch in the December 2011 releases of PostgreSQL + A patch in the December 2011 releases of PostgreSQL caused memory leakage in these operations, which could be significant in scenarios such as building a btree index on such a column. @@ -1201,32 +1201,32 @@ - Improve pg_dump's handling of inherited table columns + Improve pg_dump's handling of inherited table columns (Tom Lane) - pg_dump mishandled situations where a child column has + pg_dump mishandled situations where a child column has a different default expression than its parent column. If the default is textually identical to the parent's default, but not actually the same (for instance, because of schema search path differences) it would not be recognized as different, so that after dump and restore the child would be allowed to inherit the parent's default. Child columns - that are NOT NULL where their parent is not could also be + that are NOT NULL where their parent is not could also be restored subtly incorrectly. - Fix pg_restore's direct-to-database mode for + Fix pg_restore's direct-to-database mode for INSERT-style table data (Tom Lane) Direct-to-database restores from archive files made with - - In particular, the response to a server report of fork() + In particular, the response to a server report of fork() failure during SSL connection startup is now saner. - Improve libpq's error reporting for SSL failures (Tom + Improve libpq's error reporting for SSL failures (Tom Lane) - Make ecpglib write double values with 15 digits + Make ecpglib write double values with 15 digits precision (Akira Kurosawa) - In ecpglib, be sure LC_NUMERIC setting is + In ecpglib, be sure LC_NUMERIC setting is restored after an error (Michael Meskes) @@ -1898,7 +1898,7 @@ - contrib/pg_crypto's blowfish encryption code could give + contrib/pg_crypto's blowfish encryption code could give wrong results on platforms where char is signed (which is most), leading to encrypted passwords being weaker than they should be. @@ -1906,13 +1906,13 @@ - Fix memory leak in contrib/seg (Heikki Linnakangas) + Fix memory leak in contrib/seg (Heikki Linnakangas) - Fix pgstatindex() to give consistent results for empty + Fix pgstatindex() to give consistent results for empty indexes (Tom Lane) @@ -1944,7 +1944,7 @@ - Update time zone data files to tzdata release 2011i + Update time zone data files to tzdata release 2011i for DST law changes in Canada, Egypt, Russia, Samoa, and South Sudan. @@ -1965,7 +1965,7 @@ This release contains a variety of fixes from 8.3.14. For information about new features in the 8.3 major release, see - . + . @@ -1974,7 +1974,7 @@ A dump/restore is not required for those running 8.3.X. However, if you are upgrading from a version earlier than 8.3.8, - see . + see . @@ -2013,15 +2013,15 @@ - Fix dangling-pointer problem in BEFORE ROW UPDATE trigger + Fix dangling-pointer problem in BEFORE ROW UPDATE trigger handling when there was a concurrent update to the target tuple (Tom Lane) This bug has been observed to result in intermittent cannot - extract system attribute from virtual tuple failures while trying to - do UPDATE RETURNING ctid. There is a very small probability + extract system attribute from virtual tuple failures while trying to + do UPDATE RETURNING ctid. There is a very small probability of more serious errors, such as generating incorrect index entries for the updated tuple. @@ -2029,13 +2029,13 @@ - Disallow DROP TABLE when there are pending deferred trigger + Disallow DROP TABLE when there are pending deferred trigger events for the table (Tom Lane) - Formerly the DROP would go through, leading to - could not open relation with OID nnn errors when the + Formerly the DROP would go through, leading to + could not open relation with OID nnn errors when the triggers were eventually fired. @@ -2048,7 +2048,7 @@ - Fix pg_restore to cope with long lines (over 1KB) in + Fix pg_restore to cope with long lines (over 1KB) in TOC files (Tom Lane) @@ -2080,14 +2080,14 @@ - Fix version-incompatibility problem with libintl on + Fix version-incompatibility problem with libintl on Windows (Hiroshi Inoue) - Fix usage of xcopy in Windows build scripts to + Fix usage of xcopy in Windows build scripts to work correctly under Windows 7 (Andrew Dunstan) @@ -2098,14 +2098,14 @@ - Fix path separator used by pg_regress on Cygwin + Fix path separator used by pg_regress on Cygwin (Andrew Dunstan) - Update time zone data files to tzdata release 2011f + Update time zone data files to tzdata release 2011f for DST law changes in Chile, Cuba, Falkland Islands, Morocco, Samoa, and Turkey; also historical corrections for South Australia, Alaska, and Hawaii. @@ -2128,7 +2128,7 @@ This release contains a variety of fixes from 8.3.13. For information about new features in the 8.3 major release, see - . + . @@ -2137,7 +2137,7 @@ A dump/restore is not required for those running 8.3.X. However, if you are upgrading from a version earlier than 8.3.8, - see . + see . @@ -2149,15 +2149,15 @@ - Avoid failures when EXPLAIN tries to display a simple-form - CASE expression (Tom Lane) + Avoid failures when EXPLAIN tries to display a simple-form + CASE expression (Tom Lane) - If the CASE's test expression was a constant, the planner - could simplify the CASE into a form that confused the + If the CASE's test expression was a constant, the planner + could simplify the CASE into a form that confused the expression-display code, resulting in unexpected CASE WHEN - clause errors. + clause errors. @@ -2182,44 +2182,44 @@ - The date type supports a wider range of dates than can be - represented by the timestamp types, but the planner assumed it + The date type supports a wider range of dates than can be + represented by the timestamp types, but the planner assumed it could always convert a date to timestamp with impunity. - Fix pg_restore's text output for large objects (BLOBs) - when standard_conforming_strings is on (Tom Lane) + Fix pg_restore's text output for large objects (BLOBs) + when standard_conforming_strings is on (Tom Lane) Although restoring directly to a database worked correctly, string - escaping was incorrect if pg_restore was asked for - SQL text output and standard_conforming_strings had been + escaping was incorrect if pg_restore was asked for + SQL text output and standard_conforming_strings had been enabled in the source database. - Fix erroneous parsing of tsquery values containing + Fix erroneous parsing of tsquery values containing ... & !(subexpression) | ... (Tom Lane) Queries containing this combination of operators were not executed - correctly. The same error existed in contrib/intarray's - query_int type and contrib/ltree's - ltxtquery type. + correctly. The same error existed in contrib/intarray's + query_int type and contrib/ltree's + ltxtquery type. - Fix buffer overrun in contrib/intarray's input function - for the query_int type (Apple) + Fix buffer overrun in contrib/intarray's input function + for the query_int type (Apple) @@ -2231,16 +2231,16 @@ - Fix bug in contrib/seg's GiST picksplit algorithm + Fix bug in contrib/seg's GiST picksplit algorithm (Alexander Korotkov) This could result in considerable inefficiency, though not actually - incorrect answers, in a GiST index on a seg column. - If you have such an index, consider REINDEXing it after + incorrect answers, in a GiST index on a seg column. + If you have such an index, consider REINDEXing it after installing this update. (This is identical to the bug that was fixed in - contrib/cube in the previous update.) + contrib/cube in the previous update.) @@ -2260,7 +2260,7 @@ This release contains a variety of fixes from 8.3.12. For information about new features in the 8.3 major release, see - . + . @@ -2269,7 +2269,7 @@ A dump/restore is not required for those running 8.3.X. However, if you are upgrading from a version earlier than 8.3.8, - see . + see . @@ -2282,17 +2282,17 @@ Force the default - wal_sync_method - to be fdatasync on Linux (Tom Lane, Marti Raudsepp) + wal_sync_method + to be fdatasync on Linux (Tom Lane, Marti Raudsepp) - The default on Linux has actually been fdatasync for many - years, but recent kernel changes caused PostgreSQL to - choose open_datasync instead. This choice did not result + The default on Linux has actually been fdatasync for many + years, but recent kernel changes caused PostgreSQL to + choose open_datasync instead. This choice did not result in any performance improvement, and caused outright failures on - certain filesystems, notably ext4 with the - data=journal mount option. + certain filesystems, notably ext4 with the + data=journal mount option. @@ -2302,7 +2302,7 @@ - This could result in bad buffer id: 0 failures or + This could result in bad buffer id: 0 failures or corruption of index contents during replication. @@ -2321,7 +2321,7 @@ - The effective vacuum_cost_limit for an autovacuum worker + The effective vacuum_cost_limit for an autovacuum worker could drop to nearly zero if it processed enough tables, causing it to run extremely slowly. @@ -2329,19 +2329,19 @@ - Add support for detecting register-stack overrun on IA64 + Add support for detecting register-stack overrun on IA64 (Tom Lane) - The IA64 architecture has two hardware stacks. Full + The IA64 architecture has two hardware stacks. Full prevention of stack-overrun failures requires checking both. - Add a check for stack overflow in copyObject() (Tom Lane) + Add a check for stack overflow in copyObject() (Tom Lane) @@ -2357,7 +2357,7 @@ - It is possible to have a concurrent page split in a + It is possible to have a concurrent page split in a temporary index, if for example there is an open cursor scanning the index when an insertion is done. GiST failed to detect this case and hence could deliver wrong results when execution of the cursor @@ -2367,7 +2367,7 @@ - Avoid memory leakage while ANALYZE'ing complex index + Avoid memory leakage while ANALYZE'ing complex index expressions (Tom Lane) @@ -2379,14 +2379,14 @@ - An index declared like create index i on t (foo(t.*)) + An index declared like create index i on t (foo(t.*)) would not automatically get dropped when its table was dropped. - Do not inline a SQL function with multiple OUT + Do not inline a SQL function with multiple OUT parameters (Tom Lane) @@ -2398,15 +2398,15 @@ - Behave correctly if ORDER BY, LIMIT, - FOR UPDATE, or WITH is attached to the - VALUES part of INSERT ... VALUES (Tom Lane) + Behave correctly if ORDER BY, LIMIT, + FOR UPDATE, or WITH is attached to the + VALUES part of INSERT ... VALUES (Tom Lane) - Fix constant-folding of COALESCE() expressions (Tom Lane) + Fix constant-folding of COALESCE() expressions (Tom Lane) @@ -2418,7 +2418,7 @@ Fix postmaster crash when connection acceptance - (accept() or one of the calls made immediately after it) + (accept() or one of the calls made immediately after it) fails, and the postmaster was compiled with GSSAPI support (Alexander Chernikov) @@ -2426,7 +2426,7 @@ - Fix missed unlink of temporary files when log_temp_files + Fix missed unlink of temporary files when log_temp_files is active (Tom Lane) @@ -2438,11 +2438,11 @@ - Add print functionality for InhRelation nodes (Tom Lane) + Add print functionality for InhRelation nodes (Tom Lane) - This avoids a failure when debug_print_parse is enabled + This avoids a failure when debug_print_parse is enabled and certain types of query are executed. @@ -2461,14 +2461,14 @@ - Fix PL/pgSQL's handling of simple + Fix PL/pgSQL's handling of simple expressions to not fail in recursion or error-recovery cases (Tom Lane) - Fix PL/Python's handling of set-returning functions + Fix PL/Python's handling of set-returning functions (Jan Urbanski) @@ -2480,22 +2480,22 @@ - Fix bug in contrib/cube's GiST picksplit algorithm + Fix bug in contrib/cube's GiST picksplit algorithm (Alexander Korotkov) This could result in considerable inefficiency, though not actually - incorrect answers, in a GiST index on a cube column. - If you have such an index, consider REINDEXing it after + incorrect answers, in a GiST index on a cube column. + If you have such an index, consider REINDEXing it after installing this update. - Don't emit identifier will be truncated notices in - contrib/dblink except when creating new connections + Don't emit identifier will be truncated notices in + contrib/dblink except when creating new connections (Itagaki Takahiro) @@ -2503,20 +2503,20 @@ Fix potential coredump on missing public key in - contrib/pgcrypto (Marti Raudsepp) + contrib/pgcrypto (Marti Raudsepp) - Fix memory leak in contrib/xml2's XPath query functions + Fix memory leak in contrib/xml2's XPath query functions (Tom Lane) - Update time zone data files to tzdata release 2010o + Update time zone data files to tzdata release 2010o for DST law changes in Fiji and Samoa; also historical corrections for Hong Kong. @@ -2538,7 +2538,7 @@ This release contains a variety of fixes from 8.3.11. For information about new features in the 8.3 major release, see - . + . @@ -2547,7 +2547,7 @@ A dump/restore is not required for those running 8.3.X. However, if you are upgrading from a version earlier than 8.3.8, - see . + see . @@ -2567,7 +2567,7 @@ This change prevents security problems that can be caused by subverting Perl or Tcl code that will be executed later in the same session under another SQL user identity (for example, within a SECURITY - DEFINER function). Most scripting languages offer numerous ways that + DEFINER function). Most scripting languages offer numerous ways that that might be done, such as redefining standard functions or operators called by the target function. Without this change, any SQL user with Perl or Tcl language usage rights can do essentially anything with the @@ -2596,7 +2596,7 @@ - Prevent possible crashes in pg_get_expr() by disallowing + Prevent possible crashes in pg_get_expr() by disallowing it from being called with an argument that is not one of the system catalog columns it's intended to be used with (Heikki Linnakangas, Tom Lane) @@ -2605,7 +2605,7 @@ - Treat exit code 128 (ERROR_WAIT_NO_CHILDREN) as non-fatal on + Treat exit code 128 (ERROR_WAIT_NO_CHILDREN) as non-fatal on Windows (Magnus Hagander) @@ -2627,13 +2627,13 @@ This is a back-patch of an 8.4 fix that was missed in the 8.3 branch. This corrects an error introduced in 8.3.8 that could cause incorrect results for outer joins when the inner relation is an inheritance tree - or UNION ALL subquery. + or UNION ALL subquery. - Fix possible duplicate scans of UNION ALL member relations + Fix possible duplicate scans of UNION ALL member relations (Tom Lane) @@ -2655,7 +2655,7 @@ - If a plan is prepared while CREATE INDEX CONCURRENTLY is + If a plan is prepared while CREATE INDEX CONCURRENTLY is in progress for one of the referenced tables, it is supposed to be re-planned once the index is ready for use. This was not happening reliably. @@ -2709,7 +2709,7 @@ Take care to fsync the contents of lockfiles (both - postmaster.pid and the socket lockfile) while writing them + postmaster.pid and the socket lockfile) while writing them (Tom Lane) @@ -2746,7 +2746,7 @@ - Fix log_line_prefix's %i escape, + Fix log_line_prefix's %i escape, which could produce junk early in backend startup (Tom Lane) @@ -2754,35 +2754,35 @@ Fix possible data corruption in ALTER TABLE ... SET - TABLESPACE when archiving is enabled (Jeff Davis) + TABLESPACE when archiving is enabled (Jeff Davis) - Allow CREATE DATABASE and ALTER DATABASE ... SET - TABLESPACE to be interrupted by query-cancel (Guillaume Lelarge) + Allow CREATE DATABASE and ALTER DATABASE ... SET + TABLESPACE to be interrupted by query-cancel (Guillaume Lelarge) - Fix REASSIGN OWNED to handle operator classes and families + Fix REASSIGN OWNED to handle operator classes and families (Asko Tiidumaa) - Fix possible core dump when comparing two empty tsquery values + Fix possible core dump when comparing two empty tsquery values (Tom Lane) - Fix LIKE's handling of patterns containing % - followed by _ (Tom Lane) + Fix LIKE's handling of patterns containing % + followed by _ (Tom Lane) @@ -2794,14 +2794,14 @@ In PL/Python, defend against null pointer results from - PyCObject_AsVoidPtr and PyCObject_FromVoidPtr + PyCObject_AsVoidPtr and PyCObject_FromVoidPtr (Peter Eisentraut) - Make psql recognize DISCARD ALL as a command that should + Make psql recognize DISCARD ALL as a command that should not be encased in a transaction block in autocommit-off mode (Itagaki Takahiro) @@ -2809,14 +2809,14 @@ - Fix ecpg to process data from RETURNING + Fix ecpg to process data from RETURNING clauses correctly (Michael Meskes) - Improve contrib/dblink's handling of tables containing + Improve contrib/dblink's handling of tables containing dropped columns (Tom Lane) @@ -2824,30 +2824,30 @@ Fix connection leak after duplicate connection name - errors in contrib/dblink (Itagaki Takahiro) + errors in contrib/dblink (Itagaki Takahiro) - Fix contrib/dblink to handle connection names longer than + Fix contrib/dblink to handle connection names longer than 62 bytes correctly (Itagaki Takahiro) - Add hstore(text, text) - function to contrib/hstore (Robert Haas) + Add hstore(text, text) + function to contrib/hstore (Robert Haas) This function is the recommended substitute for the now-deprecated - => operator. It was back-patched so that future-proofed + => operator. It was back-patched so that future-proofed code can be used with older server versions. Note that the patch will - be effective only after contrib/hstore is installed or + be effective only after contrib/hstore is installed or reinstalled in a particular database. Users might prefer to execute - the CREATE FUNCTION command by hand, instead. + the CREATE FUNCTION command by hand, instead. @@ -2860,7 +2860,7 @@ - Update time zone data files to tzdata release 2010l + Update time zone data files to tzdata release 2010l for DST law changes in Egypt and Palestine; also historical corrections for Finland. @@ -2875,7 +2875,7 @@ - Make Windows' N. Central Asia Standard Time timezone map to + Make Windows' N. Central Asia Standard Time timezone map to Asia/Novosibirsk, not Asia/Almaty (Magnus Hagander) @@ -2901,7 +2901,7 @@ This release contains a variety of fixes from 8.3.10. For information about new features in the 8.3 major release, see - . + . @@ -2910,7 +2910,7 @@ A dump/restore is not required for those running 8.3.X. However, if you are upgrading from a version earlier than 8.3.8, - see . + see . @@ -2922,19 +2922,19 @@ - Enforce restrictions in plperl using an opmask applied to - the whole interpreter, instead of using Safe.pm + Enforce restrictions in plperl using an opmask applied to + the whole interpreter, instead of using Safe.pm (Tim Bunce, Andrew Dunstan) - Recent developments have convinced us that Safe.pm is too - insecure to rely on for making plperl trustable. This - change removes use of Safe.pm altogether, in favor of using + Recent developments have convinced us that Safe.pm is too + insecure to rely on for making plperl trustable. This + change removes use of Safe.pm altogether, in favor of using a separate interpreter with an opcode mask that is always applied. Pleasant side effects of the change include that it is now possible to - use Perl's strict pragma in a natural way in - plperl, and that Perl's $a and $b + use Perl's strict pragma in a natural way in + plperl, and that Perl's $a and $b variables work as expected in sort routines, and that function compilation is significantly faster. (CVE-2010-1169) @@ -2943,19 +2943,19 @@ Prevent PL/Tcl from executing untrustworthy code from - pltcl_modules (Tom) + pltcl_modules (Tom) PL/Tcl's feature for autoloading Tcl code from a database table could be exploited for trojan-horse attacks, because there was no restriction on who could create or insert into that table. This change - disables the feature unless pltcl_modules is owned by a + disables the feature unless pltcl_modules is owned by a superuser. (However, the permissions on the table are not checked, so installations that really need a less-than-secure modules table can still grant suitable privileges to trusted non-superusers.) Also, - prevent loading code into the unrestricted normal Tcl - interpreter unless we are really going to execute a pltclu + prevent loading code into the unrestricted normal Tcl + interpreter unless we are really going to execute a pltclu function. (CVE-2010-1170) @@ -2980,7 +2980,7 @@ This avoids failures if the function's code is invalid without the setting; an example is that SQL functions may not parse if the - search_path is not correct. + search_path is not correct. @@ -2992,10 +2992,10 @@ Previously, if an unprivileged user ran ALTER USER ... RESET - ALL for himself, or ALTER DATABASE ... RESET ALL for + ALL for himself, or ALTER DATABASE ... RESET ALL for a database he owns, this would remove all special parameter settings for the user or database, even ones that are only supposed to be - changeable by a superuser. Now, the ALTER will only + changeable by a superuser. Now, the ALTER will only remove the parameters that the user has permission to change. @@ -3003,7 +3003,7 @@ Avoid possible crash during backend shutdown if shutdown occurs - when a CONTEXT addition would be made to log entries (Tom) + when a CONTEXT addition would be made to log entries (Tom) @@ -3016,13 +3016,13 @@ Ensure the archiver process responds to changes in - archive_command as soon as possible (Tom) + archive_command as soon as possible (Tom) - Update PL/Perl's ppport.h for modern Perl versions + Update PL/Perl's ppport.h for modern Perl versions (Andrew) @@ -3035,15 +3035,15 @@ - Prevent infinite recursion in psql when expanding + Prevent infinite recursion in psql when expanding a variable that refers to itself (Tom) - Fix psql's \copy to not add spaces around - a dot within \copy (select ...) (Tom) + Fix psql's \copy to not add spaces around + a dot within \copy (select ...) (Tom) @@ -3054,15 +3054,15 @@ - Fix unnecessary GIN indexes do not support whole-index scans - errors for unsatisfiable queries using contrib/intarray + Fix unnecessary GIN indexes do not support whole-index scans + errors for unsatisfiable queries using contrib/intarray operators (Tom) - Ensure that contrib/pgstattuple functions respond to cancel + Ensure that contrib/pgstattuple functions respond to cancel interrupts promptly (Tatsuhito Kasahara) @@ -3070,7 +3070,7 @@ Make server startup deal properly with the case that - shmget() returns EINVAL for an existing + shmget() returns EINVAL for an existing shared memory segment (Tom) @@ -3102,14 +3102,14 @@ - Update time zone data files to tzdata release 2010j + Update time zone data files to tzdata release 2010j for DST law changes in Argentina, Australian Antarctic, Bangladesh, Mexico, Morocco, Pakistan, Palestine, Russia, Syria, Tunisia; also historical corrections for Taiwan. - Also, add PKST (Pakistan Summer Time) to the default set of + Also, add PKST (Pakistan Summer Time) to the default set of timezone abbreviations. @@ -3130,7 +3130,7 @@ This release contains a variety of fixes from 8.3.9. For information about new features in the 8.3 major release, see - . + . @@ -3139,7 +3139,7 @@ A dump/restore is not required for those running 8.3.X. However, if you are upgrading from a version earlier than 8.3.8, - see . + see . @@ -3151,7 +3151,7 @@ - Add new configuration parameter ssl_renegotiation_limit to + Add new configuration parameter ssl_renegotiation_limit to control how often we do session key renegotiation for an SSL connection (Magnus) @@ -3214,8 +3214,8 @@ - Make substring() for bit types treat any negative - length as meaning all the rest of the string (Tom) + Make substring() for bit types treat any negative + length as meaning all the rest of the string (Tom) @@ -3241,7 +3241,7 @@ - Fix assorted crashes in xml processing caused by sloppy + Fix assorted crashes in xml processing caused by sloppy memory management (Tom) @@ -3261,7 +3261,7 @@ - Fix the STOP WAL LOCATION entry in backup history files to + Fix the STOP WAL LOCATION entry in backup history files to report the next WAL segment's name when the end location is exactly at a segment boundary (Itagaki Takahiro) @@ -3283,23 +3283,23 @@ Improve constraint exclusion processing of boolean-variable cases, in particular make it possible to exclude a partition that has a - bool_column = false constraint (Tom) + bool_column = false constraint (Tom) - When reading pg_hba.conf and related files, do not treat - @something as a file inclusion request if the @ - appears inside quote marks; also, never treat @ by itself + When reading pg_hba.conf and related files, do not treat + @something as a file inclusion request if the @ + appears inside quote marks; also, never treat @ by itself as a file inclusion request (Tom) This prevents erratic behavior if a role or database name starts with - @. If you need to include a file whose path name + @. If you need to include a file whose path name contains spaces, you can still do so, but you must write - @"/path to/file" rather than putting the quotes around + @"/path to/file" rather than putting the quotes around the whole construct. @@ -3307,49 +3307,49 @@ Prevent infinite loop on some platforms if a directory is named as - an inclusion target in pg_hba.conf and related files + an inclusion target in pg_hba.conf and related files (Tom) - Fix possible infinite loop if SSL_read or - SSL_write fails without setting errno (Tom) + Fix possible infinite loop if SSL_read or + SSL_write fails without setting errno (Tom) This is reportedly possible with some Windows versions of - openssl. + OpenSSL. - Disallow GSSAPI authentication on local connections, + Disallow GSSAPI authentication on local connections, since it requires a hostname to function correctly (Magnus) - Make ecpg report the proper SQLSTATE if the connection + Make ecpg report the proper SQLSTATE if the connection disappears (Michael) - Fix psql's numericlocale option to not + Fix psql's numericlocale option to not format strings it shouldn't in latex and troff output formats (Heikki) - Make psql return the correct exit status (3) when - ON_ERROR_STOP and --single-transaction are - both specified and an error occurs during the implied COMMIT + Make psql return the correct exit status (3) when + ON_ERROR_STOP and --single-transaction are + both specified and an error occurs during the implied COMMIT (Bruce) @@ -3370,7 +3370,7 @@ - Add volatile markings in PL/Python to avoid possible + Add volatile markings in PL/Python to avoid possible compiler-specific misbehavior (Zdenek Kotala) @@ -3382,43 +3382,43 @@ The only known symptom of this oversight is that the Tcl - clock command misbehaves if using Tcl 8.5 or later. + clock command misbehaves if using Tcl 8.5 or later. - Prevent crash in contrib/dblink when too many key - columns are specified to a dblink_build_sql_* function + Prevent crash in contrib/dblink when too many key + columns are specified to a dblink_build_sql_* function (Rushabh Lathia, Joe Conway) - Allow zero-dimensional arrays in contrib/ltree operations + Allow zero-dimensional arrays in contrib/ltree operations (Tom) This case was formerly rejected as an error, but it's more convenient to treat it the same as a zero-element array. In particular this avoids - unnecessary failures when an ltree operation is applied to the - result of ARRAY(SELECT ...) and the sub-select returns no + unnecessary failures when an ltree operation is applied to the + result of ARRAY(SELECT ...) and the sub-select returns no rows. - Fix assorted crashes in contrib/xml2 caused by sloppy + Fix assorted crashes in contrib/xml2 caused by sloppy memory management (Tom) - Make building of contrib/xml2 more robust on Windows + Make building of contrib/xml2 more robust on Windows (Andrew) @@ -3429,14 +3429,14 @@ - One known symptom of this bug is that rows in pg_listener + One known symptom of this bug is that rows in pg_listener could be dropped under heavy load. - Update time zone data files to tzdata release 2010e + Update time zone data files to tzdata release 2010e for DST law changes in Bangladesh, Chile, Fiji, Mexico, Paraguay, Samoa. @@ -3457,7 +3457,7 @@ This release contains a variety of fixes from 8.3.8. For information about new features in the 8.3 major release, see - . + . @@ -3466,7 +3466,7 @@ A dump/restore is not required for those running 8.3.X. However, if you are upgrading from a version earlier than 8.3.8, - see . + see . @@ -3514,14 +3514,14 @@ - Prevent signals from interrupting VACUUM at unsafe times + Prevent signals from interrupting VACUUM at unsafe times (Alvaro) - This fix prevents a PANIC if a VACUUM FULL is canceled + This fix prevents a PANIC if a VACUUM FULL is canceled after it's already committed its tuple movements, as well as transient - errors if a plain VACUUM is interrupted after having + errors if a plain VACUUM is interrupted after having truncated the table. @@ -3540,7 +3540,7 @@ - Fix very rare crash in inet/cidr comparisons (Chris + Fix very rare crash in inet/cidr comparisons (Chris Mikkelson) @@ -3617,7 +3617,7 @@ The previous code is known to fail with the combination of the Linux - pam_krb5 PAM module with Microsoft Active Directory as the + pam_krb5 PAM module with Microsoft Active Directory as the domain controller. It might have problems elsewhere too, since it was making unjustified assumptions about what arguments the PAM stack would pass to it. @@ -3650,19 +3650,19 @@ Fix processing of ownership dependencies during CREATE OR - REPLACE FUNCTION (Tom) + REPLACE FUNCTION (Tom) - Fix incorrect handling of WHERE - x=x conditions (Tom) + Fix incorrect handling of WHERE + x=x conditions (Tom) In some cases these could get ignored as redundant, but they aren't - — they're equivalent to x IS NOT NULL. + — they're equivalent to x IS NOT NULL. @@ -3674,7 +3674,7 @@ - Fix encoding handling in xml binary input (Heikki) + Fix encoding handling in xml binary input (Heikki) @@ -3685,7 +3685,7 @@ - Fix bug with calling plperl from plperlu or vice + Fix bug with calling plperl from plperlu or vice versa (Tom) @@ -3705,7 +3705,7 @@ Ensure that Perl arrays are properly converted to - PostgreSQL arrays when returned by a set-returning + PostgreSQL arrays when returned by a set-returning PL/Perl function (Andrew Dunstan, Abhijit Menon-Sen) @@ -3722,7 +3722,7 @@ - In contrib/pg_standby, disable triggering failover with a + In contrib/pg_standby, disable triggering failover with a signal on Windows (Fujii Masao) @@ -3734,20 +3734,20 @@ - Ensure psql's flex module is compiled with the correct + Ensure psql's flex module is compiled with the correct system header definitions (Tom) This fixes build failures on platforms where - --enable-largefile causes incompatible changes in the + --enable-largefile causes incompatible changes in the generated code. - Make the postmaster ignore any application_name parameter in + Make the postmaster ignore any application_name parameter in connection request packets, to improve compatibility with future libpq versions (Tom) @@ -3760,14 +3760,14 @@ - This includes adding IDT and SGT to the default + This includes adding IDT and SGT to the default timezone abbreviation set. - Update time zone data files to tzdata release 2009s + Update time zone data files to tzdata release 2009s for DST law changes in Antarctica, Argentina, Bangladesh, Fiji, Novokuznetsk, Pakistan, Palestine, Samoa, Syria; also historical corrections for Hong Kong. @@ -3790,7 +3790,7 @@ This release contains a variety of fixes from 8.3.7. For information about new features in the 8.3 major release, see - . + . @@ -3798,10 +3798,10 @@ A dump/restore is not required for those running 8.3.X. - However, if you have any hash indexes on interval columns, - you must REINDEX them after updating to 8.3.8. + However, if you have any hash indexes on interval columns, + you must REINDEX them after updating to 8.3.8. Also, if you are upgrading from a version earlier than 8.3.5, - see . + see . @@ -3818,13 +3818,13 @@ This bug led to the often-reported could not reattach - to shared memory error message. + to shared memory error message. - Force WAL segment switch during pg_start_backup() + Force WAL segment switch during pg_start_backup() (Heikki) @@ -3835,26 +3835,26 @@ - Disallow RESET ROLE and RESET SESSION - AUTHORIZATION inside security-definer functions (Tom, Heikki) + Disallow RESET ROLE and RESET SESSION + AUTHORIZATION inside security-definer functions (Tom, Heikki) This covers a case that was missed in the previous patch that - disallowed SET ROLE and SET SESSION - AUTHORIZATION inside security-definer functions. + disallowed SET ROLE and SET SESSION + AUTHORIZATION inside security-definer functions. (See CVE-2007-6600) - Make LOAD of an already-loaded loadable module + Make LOAD of an already-loaded loadable module into a no-op (Tom) - Formerly, LOAD would attempt to unload and re-load the + Formerly, LOAD would attempt to unload and re-load the module, but this is unsafe and not all that useful. @@ -3881,8 +3881,8 @@ - Prevent synchronize_seqscans from changing the results of - scrollable and WITH HOLD cursors (Tom) + Prevent synchronize_seqscans from changing the results of + scrollable and WITH HOLD cursors (Tom) @@ -3896,32 +3896,32 @@ - Fix hash calculation for data type interval (Tom) + Fix hash calculation for data type interval (Tom) This corrects wrong results for hash joins on interval values. It also changes the contents of hash indexes on interval columns. - If you have any such indexes, you must REINDEX them + If you have any such indexes, you must REINDEX them after updating. - Treat to_char(..., 'TH') as an uppercase ordinal - suffix with 'HH'/'HH12' (Heikki) + Treat to_char(..., 'TH') as an uppercase ordinal + suffix with 'HH'/'HH12' (Heikki) - It was previously handled as 'th' (lowercase). + It was previously handled as 'th' (lowercase). - Fix overflow for INTERVAL 'x ms' - when x is more than 2 million and integer + Fix overflow for INTERVAL 'x ms' + when x is more than 2 million and integer datetimes are in use (Alex Hunsaker) @@ -3938,14 +3938,14 @@ - Fix money data type to work in locales where currency + Fix money data type to work in locales where currency amounts have no fractional digits, e.g. Japan (Itagaki Takahiro) - Fix LIKE for case where pattern contains %_ + Fix LIKE for case where pattern contains %_ (Tom) @@ -3953,7 +3953,7 @@ Properly round datetime input like - 00:12:57.9999999999999999999999999999 (Tom) + 00:12:57.9999999999999999999999999999 (Tom) @@ -3972,8 +3972,8 @@ - Ensure that a fast shutdown request will forcibly terminate - open sessions, even if a smart shutdown was already in progress + Ensure that a fast shutdown request will forcibly terminate + open sessions, even if a smart shutdown was already in progress (Fujii Masao) @@ -4000,35 +4000,35 @@ - Fix pg_ctl to not go into an infinite loop if - postgresql.conf is empty (Jeff Davis) + Fix pg_ctl to not go into an infinite loop if + postgresql.conf is empty (Jeff Davis) - Improve pg_dump's efficiency when there are + Improve pg_dump's efficiency when there are many large objects (Tamas Vincze) - Use SIGUSR1, not SIGQUIT, as the - failover signal for pg_standby (Heikki) + Use SIGUSR1, not SIGQUIT, as the + failover signal for pg_standby (Heikki) - Make pg_standby's maxretries option + Make pg_standby's maxretries option behave as documented (Fujii Masao) - Make contrib/hstore throw an error when a key or + Make contrib/hstore throw an error when a key or value is too long to fit in its data structure, rather than silently truncating it (Andrew Gierth) @@ -4036,15 +4036,15 @@ - Fix contrib/xml2's xslt_process() to + Fix contrib/xml2's xslt_process() to properly handle the maximum number of parameters (twenty) (Tom) - Improve robustness of libpq's code to recover - from errors during COPY FROM STDIN (Tom) + Improve robustness of libpq's code to recover + from errors during COPY FROM STDIN (Tom) @@ -4057,7 +4057,7 @@ - Update time zone data files to tzdata release 2009l + Update time zone data files to tzdata release 2009l for DST law changes in Bangladesh, Egypt, Jordan, Pakistan, Argentina/San_Luis, Cuba, Jordan (historical correction only), Mauritius, Morocco, Palestine, Syria, Tunisia. @@ -4080,7 +4080,7 @@ This release contains a variety of fixes from 8.3.6. For information about new features in the 8.3 major release, see - . + . @@ -4089,7 +4089,7 @@ A dump/restore is not required for those running 8.3.X. However, if you are upgrading from a version earlier than 8.3.5, - see . + see . @@ -4108,7 +4108,7 @@ This change extends fixes made in the last two minor releases for related failure scenarios. The previous fixes were narrowly tailored for the original problem reports, but we have now recognized that - any error thrown by an encoding conversion function could + any error thrown by an encoding conversion function could potentially lead to infinite recursion while trying to report the error. The solution therefore is to disable translation and encoding conversion and report the plain-ASCII form of any error message, @@ -4119,7 +4119,7 @@ - Disallow CREATE CONVERSION with the wrong encodings + Disallow CREATE CONVERSION with the wrong encodings for the specified conversion function (Heikki) @@ -4132,19 +4132,19 @@ - Fix xpath() to not modify the path expression unless + Fix xpath() to not modify the path expression unless necessary, and to make a saner attempt at it when necessary (Andrew) - The SQL standard suggests that xpath should work on data - that is a document fragment, but libxml doesn't support + The SQL standard suggests that xpath should work on data + that is a document fragment, but libxml doesn't support that, and indeed it's not clear that this is sensible according to the - XPath standard. xpath attempted to work around this + XPath standard. xpath attempted to work around this mismatch by modifying both the data and the path expression, but the modification was buggy and could cause valid searches to fail. Now, - xpath checks whether the data is in fact a well-formed - document, and if so invokes libxml with no change to the + xpath checks whether the data is in fact a well-formed + document, and if so invokes libxml with no change to the data or path expression. Otherwise, a different modification method that is somewhat less likely to fail is used. @@ -4155,15 +4155,15 @@ seems likely that no real solution is possible. This patch should therefore be viewed as a band-aid to keep from breaking existing applications unnecessarily. It is likely that - PostgreSQL 8.4 will simply reject use of - xpath on data that is not a well-formed document. + PostgreSQL 8.4 will simply reject use of + xpath on data that is not a well-formed document. - Fix core dump when to_char() is given format codes that + Fix core dump when to_char() is given format codes that are inappropriate for the type of the data argument (Tom) @@ -4175,40 +4175,40 @@ - Crashes were possible on platforms where wchar_t is narrower - than int; Windows in particular. + Crashes were possible on platforms where wchar_t is narrower + than int; Windows in particular. Fix extreme inefficiency in text search parser's handling of an - email-like string containing multiple @ characters (Heikki) + email-like string containing multiple @ characters (Heikki) - Fix planner problem with sub-SELECT in the output list + Fix planner problem with sub-SELECT in the output list of a larger subquery (Tom) The known symptom of this bug is a failed to locate grouping - columns error that is dependent on the datatype involved; + columns error that is dependent on the datatype involved; but there could be other issues as well. - Fix decompilation of CASE WHEN with an implicit coercion + Fix decompilation of CASE WHEN with an implicit coercion (Tom) This mistake could lead to Assert failures in an Assert-enabled build, - or an unexpected CASE WHEN clause error message in other + or an unexpected CASE WHEN clause error message in other cases, when trying to examine or dump a view. @@ -4219,38 +4219,38 @@ - If CLUSTER or a rewriting variant of ALTER TABLE + If CLUSTER or a rewriting variant of ALTER TABLE were executed by someone other than the table owner, the - pg_type entry for the table's TOAST table would end up + pg_type entry for the table's TOAST table would end up marked as owned by that someone. This caused no immediate problems, since the permissions on the TOAST rowtype aren't examined by any ordinary database operation. However, it could lead to unexpected failures if one later tried to drop the role that issued the command - (in 8.1 or 8.2), or owner of data type appears to be invalid - warnings from pg_dump after having done so (in 8.3). + (in 8.1 or 8.2), or owner of data type appears to be invalid + warnings from pg_dump after having done so (in 8.3). - Change UNLISTEN to exit quickly if the current session has - never executed any LISTEN command (Tom) + Change UNLISTEN to exit quickly if the current session has + never executed any LISTEN command (Tom) Most of the time this is not a particularly useful optimization, but - since DISCARD ALL invokes UNLISTEN, the previous + since DISCARD ALL invokes UNLISTEN, the previous coding caused a substantial performance problem for applications that - made heavy use of DISCARD ALL. + made heavy use of DISCARD ALL. - Fix PL/pgSQL to not treat INTO after INSERT as + Fix PL/pgSQL to not treat INTO after INSERT as an INTO-variables clause anywhere in the string, not only at the start; - in particular, don't fail for INSERT INTO within - CREATE RULE (Tom) + in particular, don't fail for INSERT INTO within + CREATE RULE (Tom) @@ -4268,21 +4268,21 @@ - Retry failed calls to CallNamedPipe() on Windows + Retry failed calls to CallNamedPipe() on Windows (Steve Marshall, Magnus) It appears that this function can sometimes fail transiently; we previously treated any failure as a hard error, which could - confuse LISTEN/NOTIFY as well as other + confuse LISTEN/NOTIFY as well as other operations. - Add MUST (Mauritius Island Summer Time) to the default list + Add MUST (Mauritius Island Summer Time) to the default list of known timezone abbreviations (Xavier Bugaud) @@ -4303,7 +4303,7 @@ This release contains a variety of fixes from 8.3.5. For information about new features in the 8.3 major release, see - . + . @@ -4312,7 +4312,7 @@ A dump/restore is not required for those running 8.3.X. However, if you are upgrading from a version earlier than 8.3.5, - see . + see . @@ -4324,7 +4324,7 @@ - Make DISCARD ALL release advisory locks, in addition + Make DISCARD ALL release advisory locks, in addition to everything it already did (Tom) @@ -4347,13 +4347,13 @@ - Fix crash of xmlconcat(NULL) (Peter) + Fix crash of xmlconcat(NULL) (Peter) - Fix possible crash in ispell dictionary if high-bit-set + Fix possible crash in ispell dictionary if high-bit-set characters are used as flags (Teodor) @@ -4365,7 +4365,7 @@ - Fix misordering of pg_dump output for composite types + Fix misordering of pg_dump output for composite types (Tom) @@ -4377,13 +4377,13 @@ - Improve handling of URLs in headline() function (Teodor) + Improve handling of URLs in headline() function (Teodor) - Improve handling of overlength headlines in headline() + Improve handling of overlength headlines in headline() function (Teodor) @@ -4400,7 +4400,7 @@ Fix possible Assert failure if a statement executed in PL/pgSQL is rewritten into another kind of statement, for example if an - INSERT is rewritten into an UPDATE (Heikki) + INSERT is rewritten into an UPDATE (Heikki) @@ -4410,7 +4410,7 @@ - This primarily affects domains that are declared with CHECK + This primarily affects domains that are declared with CHECK constraints involving user-defined stable or immutable functions. Such functions typically fail if no snapshot has been set. @@ -4425,7 +4425,7 @@ - Avoid unnecessary locking of small tables in VACUUM + Avoid unnecessary locking of small tables in VACUUM (Heikki) @@ -4433,21 +4433,21 @@ Fix a problem that sometimes kept ALTER TABLE ENABLE/DISABLE - RULE from being recognized by active sessions (Tom) + RULE from being recognized by active sessions (Tom) - Fix a problem that made UPDATE RETURNING tableoid + Fix a problem that made UPDATE RETURNING tableoid return zero instead of the correct OID (Tom) - Allow functions declared as taking ANYARRAY to work on - the pg_statistic columns of that type (Tom) + Allow functions declared as taking ANYARRAY to work on + the pg_statistic columns of that type (Tom) @@ -4463,13 +4463,13 @@ This could result in bad plans for queries like - ... from a left join b on a.a1 = b.b1 where a.a1 = 42 ... + ... from a left join b on a.a1 = b.b1 where a.a1 = 42 ... - Improve optimizer's handling of long IN lists (Tom) + Improve optimizer's handling of long IN lists (Tom) @@ -4521,21 +4521,21 @@ - Fix contrib/dblink's - dblink_get_result(text,bool) function (Joe) + Fix contrib/dblink's + dblink_get_result(text,bool) function (Joe) - Fix possible garbage output from contrib/sslinfo functions + Fix possible garbage output from contrib/sslinfo functions (Tom) - Fix incorrect behavior of contrib/tsearch2 compatibility + Fix incorrect behavior of contrib/tsearch2 compatibility trigger when it's fired more than once in a command (Teodor) @@ -4554,29 +4554,29 @@ - Fix ecpg's handling of varchar structs (Michael) + Fix ecpg's handling of varchar structs (Michael) - Fix configure script to properly report failure when + Fix configure script to properly report failure when unable to obtain linkage information for PL/Perl (Andrew) - Make all documentation reference pgsql-bugs and/or - pgsql-hackers as appropriate, instead of the - now-decommissioned pgsql-ports and pgsql-patches + Make all documentation reference pgsql-bugs and/or + pgsql-hackers as appropriate, instead of the + now-decommissioned pgsql-ports and pgsql-patches mailing lists (Tom) - Update time zone data files to tzdata release 2009a (for + Update time zone data files to tzdata release 2009a (for Kathmandu and historical DST corrections in Switzerland, Cuba) @@ -4597,7 +4597,7 @@ This release contains a variety of fixes from 8.3.4. For information about new features in the 8.3 major release, see - . + . @@ -4606,8 +4606,8 @@ A dump/restore is not required for those running 8.3.X. However, if you are upgrading from a version earlier than 8.3.1, - see . Also, if you were running a previous - 8.3.X release, it is recommended to REINDEX all GiST + see . Also, if you were running a previous + 8.3.X release, it is recommended to REINDEX all GiST indexes after the upgrade. @@ -4621,13 +4621,13 @@ Fix GiST index corruption due to marking the wrong index entry - dead after a deletion (Teodor) + dead after a deletion (Teodor) This would result in index searches failing to find rows they should have found. Corrupted indexes can be fixed with - REINDEX. + REINDEX. @@ -4639,7 +4639,7 @@ We have addressed similar issues before, but it would still fail if - the character has no equivalent message itself couldn't + the character has no equivalent message itself couldn't be converted. The fix is to disable localization and send the plain ASCII error message when we detect such a situation. @@ -4647,7 +4647,7 @@ - Fix possible crash in bytea-to-XML mapping (Michael McMaster) + Fix possible crash in bytea-to-XML mapping (Michael McMaster) @@ -4660,8 +4660,8 @@ - Improve optimization of expression IN - (expression-list) queries (Tom, per an idea from Robert + Improve optimization of expression IN + (expression-list) queries (Tom, per an idea from Robert Haas) @@ -4674,20 +4674,20 @@ - Fix mis-expansion of rule queries when a sub-SELECT appears - in a function call in FROM, a multi-row VALUES - list, or a RETURNING list (Tom) + Fix mis-expansion of rule queries when a sub-SELECT appears + in a function call in FROM, a multi-row VALUES + list, or a RETURNING list (Tom) - The usual symptom of this problem is an unrecognized node type + The usual symptom of this problem is an unrecognized node type error. - Fix Assert failure during rescan of an IS NULL + Fix Assert failure during rescan of an IS NULL search of a GiST index (Teodor) @@ -4707,7 +4707,7 @@ - Force a checkpoint before CREATE DATABASE starts to copy + Force a checkpoint before CREATE DATABASE starts to copy files (Heikki) @@ -4719,9 +4719,9 @@ - Prevent possible collision of relfilenode numbers + Prevent possible collision of relfilenode numbers when moving a table to another tablespace with ALTER SET - TABLESPACE (Heikki) + TABLESPACE (Heikki) @@ -4740,21 +4740,21 @@ Fix improper display of fractional seconds in interval values when - using a non-ISO datestyle in an build (Ron Mayer) - Make ILIKE compare characters case-insensitively + Make ILIKE compare characters case-insensitively even when they're escaped (Andrew) - Ensure DISCARD is handled properly by statement logging (Tom) + Ensure DISCARD is handled properly by statement logging (Tom) @@ -4767,7 +4767,7 @@ - Ensure SPI_getvalue and SPI_getbinval + Ensure SPI_getvalue and SPI_getbinval behave correctly when the passed tuple and tuple descriptor have different numbers of columns (Tom) @@ -4781,15 +4781,15 @@ - Mark SessionReplicationRole as PGDLLIMPORT - so it can be used by Slony on Windows (Magnus) + Mark SessionReplicationRole as PGDLLIMPORT + so it can be used by Slony on Windows (Magnus) - Fix small memory leak when using libpq's - gsslib parameter (Magnus) + Fix small memory leak when using libpq's + gsslib parameter (Magnus) @@ -4800,38 +4800,38 @@ - Ensure libgssapi is linked into libpq + Ensure libgssapi is linked into libpq if needed (Markus Schaaf) - Fix ecpg's parsing of CREATE ROLE (Michael) + Fix ecpg's parsing of CREATE ROLE (Michael) - Fix recent breakage of pg_ctl restart (Tom) + Fix recent breakage of pg_ctl restart (Tom) - Ensure pg_control is opened in binary mode + Ensure pg_control is opened in binary mode (Itagaki Takahiro) - pg_controldata and pg_resetxlog + pg_controldata and pg_resetxlog did this incorrectly, and so could fail on Windows. - Update time zone data files to tzdata release 2008i (for + Update time zone data files to tzdata release 2008i (for DST law changes in Argentina, Brazil, Mauritius, Syria) @@ -4852,7 +4852,7 @@ This release contains a variety of fixes from 8.3.3. For information about new features in the 8.3 major release, see - . + . @@ -4861,7 +4861,7 @@ A dump/restore is not required for those running 8.3.X. However, if you are upgrading from a version earlier than 8.3.1, - see . + see . @@ -4888,41 +4888,41 @@ This error created a risk of corruption in system - catalogs that are consulted by VACUUM: dead tuple versions + catalogs that are consulted by VACUUM: dead tuple versions might be removed too soon. The impact of this on actual database operations would be minimal, since the system doesn't follow MVCC rules while examining catalogs, but it might result in transiently - wrong output from pg_dump or other client programs. + wrong output from pg_dump or other client programs. - Fix potential miscalculation of datfrozenxid (Alvaro) + Fix potential miscalculation of datfrozenxid (Alvaro) This error may explain some recent reports of failure to remove old - pg_clog data. + pg_clog data. - Fix incorrect HOT updates after pg_class is reindexed + Fix incorrect HOT updates after pg_class is reindexed (Tom) - Corruption of pg_class could occur if REINDEX - TABLE pg_class was followed in the same session by an ALTER - TABLE RENAME or ALTER TABLE SET SCHEMA command. + Corruption of pg_class could occur if REINDEX + TABLE pg_class was followed in the same session by an ALTER + TABLE RENAME or ALTER TABLE SET SCHEMA command. - Fix missed combo cid case (Karl Schnaitter) + Fix missed combo cid case (Karl Schnaitter) @@ -4946,7 +4946,7 @@ This responds to reports that the counters could overflow in sufficiently long transactions, leading to unexpected lock is - already held errors. + already held errors. @@ -4972,7 +4972,7 @@ Fix missed permissions checks when a view contains a simple - UNION ALL construct (Heikki) + UNION ALL construct (Heikki) @@ -4984,7 +4984,7 @@ Add checks in executor startup to ensure that the tuples produced by an - INSERT or UPDATE will match the target table's + INSERT or UPDATE will match the target table's current rowtype (Tom) @@ -4996,12 +4996,12 @@ - Fix possible repeated drops during DROP OWNED (Tom) + Fix possible repeated drops during DROP OWNED (Tom) This would typically result in strange errors such as cache - lookup failed for relation NNN. + lookup failed for relation NNN. @@ -5013,7 +5013,7 @@ - Fix xmlserialize() to raise error properly for + Fix xmlserialize() to raise error properly for unacceptable target data type (Tom) @@ -5026,7 +5026,7 @@ Certain characters occurring in configuration files would always cause - invalid byte sequence for encoding failures. + invalid byte sequence for encoding failures. @@ -5039,18 +5039,18 @@ - Fix AT TIME ZONE to first try to interpret its timezone + Fix AT TIME ZONE to first try to interpret its timezone argument as a timezone abbreviation, and only try it as a full timezone name if that fails, rather than the other way around as formerly (Tom) The timestamp input functions have always resolved ambiguous zone names - in this order. Making AT TIME ZONE do so as well improves + in this order. Making AT TIME ZONE do so as well improves consistency, and fixes a compatibility bug introduced in 8.1: in ambiguous cases we now behave the same as 8.0 and before did, - since in the older versions AT TIME ZONE accepted - only abbreviations. + since in the older versions AT TIME ZONE accepted + only abbreviations. @@ -5077,26 +5077,26 @@ Allow spaces in the suffix part of an LDAP URL in - pg_hba.conf (Tom) + pg_hba.conf (Tom) Fix bug in backwards scanning of a cursor on a SELECT DISTINCT - ON query (Tom) + ON query (Tom) - Fix planner bug that could improperly push down IS NULL + Fix planner bug that could improperly push down IS NULL tests below an outer join (Tom) - This was triggered by occurrence of IS NULL tests for - the same relation in all arms of an upper OR clause. + This was triggered by occurrence of IS NULL tests for + the same relation in all arms of an upper OR clause. @@ -5114,21 +5114,21 @@ - Fix planner to estimate that GROUP BY expressions yielding + Fix planner to estimate that GROUP BY expressions yielding boolean results always result in two groups, regardless of the expressions' contents (Tom) This is very substantially more accurate than the regular GROUP - BY estimate for certain boolean tests like col - IS NULL. + BY estimate for certain boolean tests like col + IS NULL. - Fix PL/pgSQL to not fail when a FOR loop's target variable + Fix PL/pgSQL to not fail when a FOR loop's target variable is a record containing composite-type fields (Tom) @@ -5142,49 +5142,49 @@ - Improve performance of PQescapeBytea() (Rudolf Leitgeb) + Improve performance of PQescapeBytea() (Rudolf Leitgeb) On Windows, work around a Microsoft bug by preventing - libpq from trying to send more than 64kB per system call + libpq from trying to send more than 64kB per system call (Magnus) - Fix ecpg to handle variables properly in SET + Fix ecpg to handle variables properly in SET commands (Michael) - Improve pg_dump and pg_restore's + Improve pg_dump and pg_restore's error reporting after failure to send a SQL command (Tom) - Fix pg_ctl to properly preserve postmaster - command-line arguments across a restart (Bruce) + Fix pg_ctl to properly preserve postmaster + command-line arguments across a restart (Bruce) Fix erroneous WAL file cutoff point calculation in - pg_standby (Simon) + pg_standby (Simon) - Update time zone data files to tzdata release 2008f (for + Update time zone data files to tzdata release 2008f (for DST law changes in Argentina, Bahamas, Brazil, Mauritius, Morocco, Pakistan, Palestine, and Paraguay) @@ -5206,7 +5206,7 @@ This release contains one serious and one minor bug fix over 8.3.2. For information about new features in the 8.3 major release, see - . + . @@ -5215,7 +5215,7 @@ A dump/restore is not required for those running 8.3.X. However, if you are upgrading from a version earlier than 8.3.1, - see . + see . @@ -5227,18 +5227,18 @@ - Make pg_get_ruledef() parenthesize negative constants (Tom) + Make pg_get_ruledef() parenthesize negative constants (Tom) Before this fix, a negative constant in a view or rule might be dumped - as, say, -42::integer, which is subtly incorrect: it should - be (-42)::integer due to operator precedence rules. + as, say, -42::integer, which is subtly incorrect: it should + be (-42)::integer due to operator precedence rules. Usually this would make little difference, but it could interact with another recent patch to cause - PostgreSQL to reject what had been a valid - SELECT DISTINCT view query. Since this could result in - pg_dump output failing to reload, it is being treated + PostgreSQL to reject what had been a valid + SELECT DISTINCT view query. Since this could result in + pg_dump output failing to reload, it is being treated as a high-priority fix. The only released versions in which dump output is actually incorrect are 8.3.1 and 8.2.7. @@ -5246,13 +5246,13 @@ - Make ALTER AGGREGATE ... OWNER TO update - pg_shdepend (Tom) + Make ALTER AGGREGATE ... OWNER TO update + pg_shdepend (Tom) This oversight could lead to problems if the aggregate was later - involved in a DROP OWNED or REASSIGN OWNED + involved in a DROP OWNED or REASSIGN OWNED operation. @@ -5273,7 +5273,7 @@ This release contains a variety of fixes from 8.3.1. For information about new features in the 8.3 major release, see - . + . @@ -5282,7 +5282,7 @@ A dump/restore is not required for those running 8.3.X. However, if you are upgrading from a version earlier than 8.3.1, - see . + see . @@ -5303,19 +5303,19 @@ Fix incorrect archive truncation point calculation for the - %r macro in restore_command parameters + %r macro in restore_command parameters (Simon) This could lead to data loss if a warm-standby script relied on - %r to decide when to throw away WAL segment files. + %r to decide when to throw away WAL segment files. - Fix ALTER TABLE ADD COLUMN ... PRIMARY KEY so that the new + Fix ALTER TABLE ADD COLUMN ... PRIMARY KEY so that the new column is correctly checked to see if it's been initialized to all non-nulls (Brendan Jurd) @@ -5327,31 +5327,31 @@ - Fix REASSIGN OWNED so that it works on procedural + Fix REASSIGN OWNED so that it works on procedural languages too (Alvaro) - Fix problems with SELECT FOR UPDATE/SHARE occurring as a - subquery in a query with a non-SELECT top-level operation + Fix problems with SELECT FOR UPDATE/SHARE occurring as a + subquery in a query with a non-SELECT top-level operation (Tom) - Fix possible CREATE TABLE failure when inheriting the - same constraint from multiple parent relations that + Fix possible CREATE TABLE failure when inheriting the + same constraint from multiple parent relations that inherited that constraint from a common ancestor (Tom) - Fix pg_get_ruledef() to show the alias, if any, attached - to the target table of an UPDATE or DELETE + Fix pg_get_ruledef() to show the alias, if any, attached + to the target table of an UPDATE or DELETE (Tom) @@ -5377,13 +5377,13 @@ - Fix broken GiST comparison function for tsquery (Teodor) + Fix broken GiST comparison function for tsquery (Teodor) - Fix tsvector_update_trigger() and ts_stat() + Fix tsvector_update_trigger() and ts_stat() to accept domains over the types they expect to work with (Tom) @@ -5404,7 +5404,7 @@ Fix race conditions between delayed unlinks and DROP - DATABASE (Heikki) + DATABASE (Heikki) @@ -5431,11 +5431,11 @@ Fix possible crash due to incorrect plan generated for an - x IN (SELECT y - FROM ...) clause when x and y + x IN (SELECT y + FROM ...) clause when x and y have different data types; and make sure the behavior is semantically - correct when the conversion from y's type to - x's type is lossy (Tom) + correct when the conversion from y's type to + x's type is lossy (Tom) @@ -5456,15 +5456,15 @@ - Fix planner failure when an indexable MIN or - MAX aggregate is used with DISTINCT or - ORDER BY (Tom) + Fix planner failure when an indexable MIN or + MAX aggregate is used with DISTINCT or + ORDER BY (Tom) - Fix planner to ensure it never uses a physical tlist for a + Fix planner to ensure it never uses a physical tlist for a plan node that is feeding a Sort node (Tom) @@ -5488,7 +5488,7 @@ - Make TransactionIdIsCurrentTransactionId() use binary + Make TransactionIdIsCurrentTransactionId() use binary search instead of linear search when checking child-transaction XIDs (Heikki) @@ -5502,14 +5502,14 @@ Fix conversions between ISO-8859-5 and other encodings to handle - Cyrillic Yo characters (e and E with + Cyrillic Yo characters (e and E with two dots) (Sergey Burladyan) - Fix several datatype input functions, notably array_in(), + Fix several datatype input functions, notably array_in(), that were allowing unused bytes in their results to contain uninitialized, unpredictable values (Tom) @@ -5517,7 +5517,7 @@ This could lead to failures in which two apparently identical literal values were not seen as equal, resulting in the parser complaining - about unmatched ORDER BY and DISTINCT + about unmatched ORDER BY and DISTINCT expressions. @@ -5525,18 +5525,18 @@ Fix a corner case in regular-expression substring matching - (substring(string from - pattern)) (Tom) + (substring(string from + pattern)) (Tom) The problem occurs when there is a match to the pattern overall but the user has specified a parenthesized subexpression and that subexpression hasn't got a match. An example is - substring('foo' from 'foo(bar)?'). - This should return NULL, since (bar) isn't matched, but + substring('foo' from 'foo(bar)?'). + This should return NULL, since (bar) isn't matched, but it was mistakenly returning the whole-pattern match instead (ie, - foo). + foo). @@ -5549,7 +5549,7 @@ - Improve ANALYZE's handling of in-doubt tuples (those + Improve ANALYZE's handling of in-doubt tuples (those inserted or deleted by a not-yet-committed transaction) so that the counts it reports to the stats collector are more likely to be correct (Pavan Deolasee) @@ -5558,14 +5558,14 @@ - Fix initdb to reject a relative path for its - --xlogdir (-X) option (Tom) + Fix initdb to reject a relative path for its + --xlogdir (-X) option (Tom) - Make psql print tab characters as an appropriate + Make psql print tab characters as an appropriate number of spaces, rather than \x09 as was done in 8.3.0 and 8.3.1 (Bruce) @@ -5573,7 +5573,7 @@ - Update time zone data files to tzdata release 2008c (for + Update time zone data files to tzdata release 2008c (for DST law changes in Morocco, Iraq, Choibalsan, Pakistan, Syria, Cuba, and Argentina/San_Luis) @@ -5581,44 +5581,44 @@ - Add ECPGget_PGconn() function to - ecpglib (Michael) + Add ECPGget_PGconn() function to + ecpglib (Michael) - Fix incorrect result from ecpg's - PGTYPEStimestamp_sub() function (Michael) + Fix incorrect result from ecpg's + PGTYPEStimestamp_sub() function (Michael) - Fix handling of continuation line markers in ecpg + Fix handling of continuation line markers in ecpg (Michael) - Fix possible crashes in contrib/cube functions (Tom) + Fix possible crashes in contrib/cube functions (Tom) - Fix core dump in contrib/xml2's - xpath_table() function when the input query returns a + Fix core dump in contrib/xml2's + xpath_table() function when the input query returns a NULL value (Tom) - Fix contrib/xml2's makefile to not override - CFLAGS, and make it auto-configure properly for - libxslt present or not (Tom) + Fix contrib/xml2's makefile to not override + CFLAGS, and make it auto-configure properly for + libxslt present or not (Tom) @@ -5638,7 +5638,7 @@ This release contains a variety of fixes from 8.3.0. For information about new features in the 8.3 major release, see - . + . @@ -5646,7 +5646,7 @@ A dump/restore is not required for those running 8.3.X. - However, you might need to REINDEX indexes on textual + However, you might need to REINDEX indexes on textual columns after updating, if you are affected by the Windows locale issue described below. @@ -5670,17 +5670,17 @@ over two years ago, but Windows with UTF-8 uses a separate code path that was not updated. If you are using a locale that considers some non-identical strings as equal, you may need to - REINDEX to fix existing indexes on textual columns. + REINDEX to fix existing indexes on textual columns. - Repair corner-case bugs in VACUUM FULL (Tom) + Repair corner-case bugs in VACUUM FULL (Tom) - A potential deadlock between concurrent VACUUM FULL + A potential deadlock between concurrent VACUUM FULL operations on different system catalogs was introduced in 8.2. This has now been corrected. 8.3 made this worse because the deadlock could occur within a critical code section, making it @@ -5688,13 +5688,13 @@ - Also, a VACUUM FULL that failed partway through + Also, a VACUUM FULL that failed partway through vacuuming a system catalog could result in cache corruption in concurrent database sessions. - Another VACUUM FULL bug introduced in 8.3 could + Another VACUUM FULL bug introduced in 8.3 could result in a crash or out-of-memory report when dealing with pages containing no live tuples. @@ -5702,13 +5702,13 @@ - Fix misbehavior of foreign key checks involving character - or bit columns (Tom) + Fix misbehavior of foreign key checks involving character + or bit columns (Tom) If the referencing column were of a different but compatible type - (for instance varchar), the constraint was enforced incorrectly. + (for instance varchar), the constraint was enforced incorrectly. @@ -5726,7 +5726,7 @@ This bug affected only protocol-level prepare operations, not - SQL PREPARE, and so tended to be seen only with + SQL PREPARE, and so tended to be seen only with JDBC, DBI, and other client-side drivers that use prepared statements heavily. @@ -5748,21 +5748,21 @@ - Fix longstanding LISTEN/NOTIFY + Fix longstanding LISTEN/NOTIFY race condition (Tom) In rare cases a session that had just executed a - LISTEN might not get a notification, even though + LISTEN might not get a notification, even though one would be expected because the concurrent transaction executing - NOTIFY was observed to commit later. + NOTIFY was observed to commit later. A side effect of the fix is that a transaction that has executed - a not-yet-committed LISTEN command will not see any - row in pg_listener for the LISTEN, + a not-yet-committed LISTEN command will not see any + row in pg_listener for the LISTEN, should it choose to look; formerly it would have. This behavior was never documented one way or the other, but it is possible that some applications depend on the old behavior. @@ -5771,14 +5771,14 @@ - Disallow LISTEN and UNLISTEN within a + Disallow LISTEN and UNLISTEN within a prepared transaction (Tom) This was formerly allowed but trying to do it had various unpleasant consequences, notably that the originating backend could not exit - as long as an UNLISTEN remained uncommitted. + as long as an UNLISTEN remained uncommitted. @@ -5803,20 +5803,20 @@ - Fix incorrect comparison of tsquery values (Teodor) + Fix incorrect comparison of tsquery values (Teodor) - Fix incorrect behavior of LIKE with non-ASCII characters + Fix incorrect behavior of LIKE with non-ASCII characters in single-byte encodings (Rolf Jentsch) - Disable xmlvalidate (Tom) + Disable xmlvalidate (Tom) @@ -5835,8 +5835,8 @@ - Make encode(bytea, 'escape') convert all - high-bit-set byte values into \nnn octal + Make encode(bytea, 'escape') convert all + high-bit-set byte values into \nnn octal escape sequences (Tom) @@ -5844,7 +5844,7 @@ This is necessary to avoid encoding problems when the database encoding is multi-byte. This change could pose compatibility issues for applications that are expecting specific results from - encode. + encode. @@ -5860,21 +5860,21 @@ - Fix unrecognized node type error in some variants of - ALTER OWNER (Tom) + Fix unrecognized node type error in some variants of + ALTER OWNER (Tom) Avoid tablespace permissions errors in CREATE TABLE LIKE - INCLUDING INDEXES (Tom) + INCLUDING INDEXES (Tom) - Ensure pg_stat_activity.waiting flag + Ensure pg_stat_activity.waiting flag is cleared when a lock wait is aborted (Tom) @@ -5892,26 +5892,26 @@ - Update time zone data files to tzdata release 2008a + Update time zone data files to tzdata release 2008a (in particular, recent Chile changes); adjust timezone abbreviation - VET (Venezuela) to mean UTC-4:30, not UTC-4:00 (Tom) + VET (Venezuela) to mean UTC-4:30, not UTC-4:00 (Tom) - Fix ecpg problems with arrays (Michael) + Fix ecpg problems with arrays (Michael) - Fix pg_ctl to correctly extract the postmaster's port + Fix pg_ctl to correctly extract the postmaster's port number from command-line options (Itagaki Takahiro, Tom) - Previously, pg_ctl start -w could try to contact the + Previously, pg_ctl start -w could try to contact the postmaster on the wrong port, leading to bogus reports of startup failure. @@ -5919,19 +5919,19 @@ - Use - This is known to be necessary when building PostgreSQL - with gcc 4.3 or later. + This is known to be necessary when building PostgreSQL + with gcc 4.3 or later. - Enable building contrib/uuid-ossp with MSVC (Hiroshi Saito) + Enable building contrib/uuid-ossp with MSVC (Hiroshi Saito) @@ -5954,7 +5954,7 @@ With significant new functionality and performance enhancements, this release represents a major leap forward for - PostgreSQL. This was made possible by a growing + PostgreSQL. This was made possible by a growing community that has dramatically accelerated the pace of development. This release adds the following major features: @@ -5988,13 +5988,13 @@ - Universally Unique Identifier (UUID) data type + Universally Unique Identifier (UUID) data type - Add control over whether NULLs sort first or last + Add control over whether NULLs sort first or last @@ -6032,7 +6032,7 @@ - Support Security Service Provider Interface (SSPI) for + Support Security Service Provider Interface (SSPI) for authentication on Windows @@ -6046,8 +6046,8 @@ - Allow the whole PostgreSQL distribution to be compiled - with Microsoft Visual C++ + Allow the whole PostgreSQL distribution to be compiled + with Microsoft Visual C++ @@ -6076,8 +6076,8 @@ - Heap-Only Tuples (HOT) accelerate space reuse for - most UPDATEs and DELETEs + Heap-Only Tuples (HOT) accelerate space reuse for + most UPDATEs and DELETEs @@ -6091,7 +6091,7 @@ Using non-persistent transaction IDs for read-only transactions - reduces overhead and VACUUM requirements + reduces overhead and VACUUM requirements @@ -6116,7 +6116,7 @@ - ORDER BY ... LIMIT can be done without sorting + ORDER BY ... LIMIT can be done without sorting @@ -6148,14 +6148,14 @@ Non-character data types are no longer automatically cast to - TEXT (Peter, Tom) + TEXT (Peter, Tom) Previously, if a non-character value was supplied to an operator or - function that requires text input, it was automatically - cast to text, for most (though not all) built-in data types. - This no longer happens: an explicit cast to text is now + function that requires text input, it was automatically + cast to text, for most (though not all) built-in data types. + This no longer happens: an explicit cast to text is now required for all non-character-string types. For example, these expressions formerly worked: @@ -6164,15 +6164,15 @@ substr(current_date, 1, 4) 23 LIKE '2%' - but will now draw function does not exist and operator - does not exist errors respectively. Use an explicit cast instead: + but will now draw function does not exist and operator + does not exist errors respectively. Use an explicit cast instead: substr(current_date::text, 1, 4) 23::text LIKE '2%' - (Of course, you can use the more verbose CAST() syntax too.) + (Of course, you can use the more verbose CAST() syntax too.) The reason for the change is that these automatic casts too often caused surprising behavior. An example is that in previous releases, this expression was accepted but did not do what was expected: @@ -6183,35 +6183,35 @@ current_date < 2017-11-17 This is actually comparing a date to an integer, which should be (and now is) rejected — but in the presence of automatic - casts both sides were cast to text and a textual comparison - was done, because the text < text operator was able - to match the expression when no other < operator could. + casts both sides were cast to text and a textual comparison + was done, because the text < text operator was able + to match the expression when no other < operator could. - Types char(n) and - varchar(n) still cast to text - automatically. Also, automatic casting to text still works for - inputs to the concatenation (||) operator, so long as least + Types char(n) and + varchar(n) still cast to text + automatically. Also, automatic casting to text still works for + inputs to the concatenation (||) operator, so long as least one input is a character-string type. - Full text search features from contrib/tsearch2 have + Full text search features from contrib/tsearch2 have been moved into the core server, with some minor syntax changes - contrib/tsearch2 now contains a compatibility + contrib/tsearch2 now contains a compatibility interface. - ARRAY(SELECT ...), where the SELECT + ARRAY(SELECT ...), where the SELECT returns no rows, now returns an empty array, rather than NULL (Tom) @@ -6233,8 +6233,8 @@ current_date < 2017-11-17 - ORDER BY ... USING operator must now - use a less-than or greater-than operator that is + ORDER BY ... USING operator must now + use a less-than or greater-than operator that is defined in a btree operator class @@ -6251,7 +6251,7 @@ current_date < 2017-11-17 Previously SET LOCAL's effects were lost - after subtransaction commit (RELEASE SAVEPOINT + after subtransaction commit (RELEASE SAVEPOINT or exit from a PL/pgSQL exception block). @@ -6263,15 +6263,15 @@ current_date < 2017-11-17 - For example, "BEGIN; DROP DATABASE; COMMIT" will now be + For example, "BEGIN; DROP DATABASE; COMMIT" will now be rejected even if submitted as a single query message. - ROLLBACK outside a transaction block now - issues NOTICE instead of WARNING (Bruce) + ROLLBACK outside a transaction block now + issues NOTICE instead of WARNING (Bruce) @@ -6282,15 +6282,15 @@ current_date < 2017-11-17 - Formerly, these commands accepted schema.relation but + Formerly, these commands accepted schema.relation but ignored the schema part, which was confusing. - ALTER SEQUENCE no longer affects the sequence's - currval() state (Tom) + ALTER SEQUENCE no longer affects the sequence's + currval() state (Tom) @@ -6314,16 +6314,16 @@ current_date < 2017-11-17 For example, pg_database_size() now requires - CONNECT permission, which is granted to everyone by + CONNECT permission, which is granted to everyone by default. pg_tablespace_size() requires - CREATE permission in the tablespace, or is allowed if + CREATE permission in the tablespace, or is allowed if the tablespace is the default tablespace for the database. - Remove the undocumented !!= (not in) operator (Tom) + Remove the undocumented !!= (not in) operator (Tom) @@ -6339,7 +6339,7 @@ current_date < 2017-11-17 If application code was computing and storing hash values using - internal PostgreSQL hashing functions, the hash + internal PostgreSQL hashing functions, the hash values must be regenerated. @@ -6351,8 +6351,8 @@ current_date < 2017-11-17 - The new SET_VARSIZE() macro must be used - to set the length of generated varlena values. Also, it + The new SET_VARSIZE() macro must be used + to set the length of generated varlena values. Also, it might be necessary to expand (de-TOAST) input values in more cases. @@ -6361,7 +6361,7 @@ current_date < 2017-11-17 Continuous archiving no longer reports each successful archive - operation to the server logs unless DEBUG level is used + operation to the server logs unless DEBUG level is used (Simon) @@ -6381,18 +6381,18 @@ current_date < 2017-11-17 - bgwriter_lru_percent, - bgwriter_all_percent, - bgwriter_all_maxpages, - stats_start_collector, and - stats_reset_on_server_start are removed. - redirect_stderr is renamed to - logging_collector. - stats_command_string is renamed to - track_activities. - stats_block_level and stats_row_level - are merged into track_counts. - A new boolean configuration parameter, archive_mode, + bgwriter_lru_percent, + bgwriter_all_percent, + bgwriter_all_maxpages, + stats_start_collector, and + stats_reset_on_server_start are removed. + redirect_stderr is renamed to + logging_collector. + stats_command_string is renamed to + track_activities. + stats_block_level and stats_row_level + are merged into track_counts. + A new boolean configuration parameter, archive_mode, controls archiving. Autovacuum's default settings have changed. @@ -6403,7 +6403,7 @@ current_date < 2017-11-17 - We now always start the collector process, unless UDP + We now always start the collector process, unless UDP socket creation fails. @@ -6421,7 +6421,7 @@ current_date < 2017-11-17 - Commenting out a parameter in postgresql.conf now + Commenting out a parameter in postgresql.conf now causes it to revert to its default value (Joachim Wieland) @@ -6461,12 +6461,12 @@ current_date < 2017-11-17 - On most platforms, C locale is the only locale that + On most platforms, C locale is the only locale that will work with any database encoding. Other locale settings imply a specific encoding and will misbehave if the database encoding is something different. (Typical symptoms include bogus textual - sort order and wrong results from upper() or - lower().) The server now rejects attempts to create + sort order and wrong results from upper() or + lower().) The server now rejects attempts to create databases that have an incompatible encoding. @@ -6503,7 +6503,7 @@ current_date < 2017-11-17 convert_from(bytea, name) returns - text — converts the first argument from the named + text — converts the first argument from the named encoding to the database encoding @@ -6511,7 +6511,7 @@ current_date < 2017-11-17 convert_to(text, name) returns - bytea — converts the first argument from the + bytea — converts the first argument from the database encoding to the named encoding @@ -6519,7 +6519,7 @@ current_date < 2017-11-17 length(bytea, name) returns - integer — gives the length of the first + integer — gives the length of the first argument in characters in the named encoding @@ -6582,10 +6582,10 @@ current_date < 2017-11-17 database consistency at risk; the worst case is that after a crash the last few reportedly-committed transactions might not be committed after all. - This feature is enabled by turning off synchronous_commit + This feature is enabled by turning off synchronous_commit (which can be done per-session or per-transaction, if some transactions are critical and others are not). - wal_writer_delay can be adjusted to control the maximum + wal_writer_delay can be adjusted to control the maximum delay before transactions actually reach disk. @@ -6609,19 +6609,19 @@ current_date < 2017-11-17 - Heap-Only Tuples (HOT) accelerate space reuse for most - UPDATEs and DELETEs (Pavan Deolasee, with + Heap-Only Tuples (HOT) accelerate space reuse for most + UPDATEs and DELETEs (Pavan Deolasee, with ideas from many others) - UPDATEs and DELETEs leave dead tuples - behind, as do failed INSERTs. Previously only - VACUUM could reclaim space taken by dead tuples. With - HOT dead tuple space can be automatically reclaimed at - the time of INSERT or UPDATE if no changes + UPDATEs and DELETEs leave dead tuples + behind, as do failed INSERTs. Previously only + VACUUM could reclaim space taken by dead tuples. With + HOT dead tuple space can be automatically reclaimed at + the time of INSERT or UPDATE if no changes are made to indexed columns. This allows for more consistent - performance. Also, HOT avoids adding duplicate index + performance. Also, HOT avoids adding duplicate index entries. @@ -6655,13 +6655,13 @@ current_date < 2017-11-17 Using non-persistent transaction IDs for read-only transactions - reduces overhead and VACUUM requirements (Florian Pflug) + reduces overhead and VACUUM requirements (Florian Pflug) Non-persistent transaction IDs do not increment the global transaction counter. Therefore, they reduce the load on - pg_clog and increase the time between forced + pg_clog and increase the time between forced vacuums to prevent transaction ID wraparound. Other performance improvements were also made that should improve concurrency. @@ -6674,7 +6674,7 @@ current_date < 2017-11-17 - There was formerly a hard limit of 232 + There was formerly a hard limit of 232 (4 billion) commands per transaction. Now only commands that actually changed the database count, so while this limit still exists, it should be significantly less annoying. @@ -6683,7 +6683,7 @@ current_date < 2017-11-17 - Create a dedicated WAL writer process to off-load + Create a dedicated WAL writer process to off-load work from backends (Simon) @@ -6696,7 +6696,7 @@ current_date < 2017-11-17 Unless WAL archiving is enabled, the system now avoids WAL writes - for CLUSTER and just fsync()s the + for CLUSTER and just fsync()s the table at the end of the command. It also does the same for COPY if the table was created in the same transaction. @@ -6720,22 +6720,22 @@ current_date < 2017-11-17 middle of the table (where another sequential scan is already in-progress) and wrapping around to the beginning to finish. This can affect the order of returned rows in a query that does not - specify ORDER BY. The synchronize_seqscans + specify ORDER BY. The synchronize_seqscans configuration parameter can be used to disable this if necessary. - ORDER BY ... LIMIT can be done without sorting + ORDER BY ... LIMIT can be done without sorting (Greg Stark) This is done by sequentially scanning the table and tracking just - the top N candidate rows, rather than performing a + the top N candidate rows, rather than performing a full sort of the entire table. This is useful when there is no - matching index and the LIMIT is not large. + matching index and the LIMIT is not large. @@ -6805,7 +6805,7 @@ current_date < 2017-11-17 Previously PL/pgSQL functions that referenced temporary tables would fail if the temporary table was dropped and recreated - between function invocations, unless EXECUTE was + between function invocations, unless EXECUTE was used. This improvement fixes that problem and many related issues. @@ -6830,7 +6830,7 @@ current_date < 2017-11-17 Place temporary tables' TOAST tables in special schemas named - pg_toast_temp_nnn (Tom) + pg_toast_temp_nnn (Tom) @@ -6860,7 +6860,7 @@ current_date < 2017-11-17 - Fix CREATE CONSTRAINT TRIGGER + Fix CREATE CONSTRAINT TRIGGER to convert old-style foreign key trigger definitions into regular foreign key constraints (Tom) @@ -6868,17 +6868,17 @@ current_date < 2017-11-17 This will ease porting of foreign key constraints carried forward from pre-7.3 databases, if they were never converted using - contrib/adddepend. + contrib/adddepend. - Fix DEFAULT NULL to override inherited defaults (Tom) + Fix DEFAULT NULL to override inherited defaults (Tom) - DEFAULT NULL was formerly considered a noise phrase, but it + DEFAULT NULL was formerly considered a noise phrase, but it should (and now does) override non-null defaults that would otherwise be inherited from a parent table or domain. @@ -6998,9 +6998,9 @@ current_date < 2017-11-17 This avoids Windows-specific problems with localized time zone names that are in the wrong encoding. There is a new - log_timezone parameter that controls the timezone + log_timezone parameter that controls the timezone used in log messages, independently of the client-visible - timezone parameter. + timezone parameter. @@ -7031,7 +7031,7 @@ current_date < 2017-11-17 - Add n_live_tuples and n_dead_tuples columns + Add n_live_tuples and n_dead_tuples columns to pg_stat_all_tables and related views (Glen Parker) @@ -7039,8 +7039,8 @@ current_date < 2017-11-17 - Merge stats_block_level and stats_row_level - parameters into a single parameter track_counts, which + Merge stats_block_level and stats_row_level + parameters into a single parameter track_counts, which controls all messages sent to the statistics collector process (Tom) @@ -7070,7 +7070,7 @@ current_date < 2017-11-17 - Support Security Service Provider Interface (SSPI) for + Support Security Service Provider Interface (SSPI) for authentication on Windows (Magnus) @@ -7094,14 +7094,14 @@ current_date < 2017-11-17 - Add ssl_ciphers parameter to control accepted SSL ciphers + Add ssl_ciphers parameter to control accepted SSL ciphers (Victor Wagner) - Add a Kerberos realm parameter, krb_realm (Magnus) + Add a Kerberos realm parameter, krb_realm (Magnus) @@ -7110,7 +7110,7 @@ current_date < 2017-11-17 - Write-Ahead Log (<acronym>WAL</>) and Continuous Archiving + Write-Ahead Log (<acronym>WAL</acronym>) and Continuous Archiving @@ -7133,7 +7133,7 @@ current_date < 2017-11-17 This change allows a warm standby server to pass the name of the earliest still-needed WAL file to the recovery script, allowing automatic removal - of no-longer-needed WAL files. This is done using %r in + of no-longer-needed WAL files. This is done using %r in the restore_command parameter of recovery.conf. @@ -7141,14 +7141,14 @@ current_date < 2017-11-17 - New boolean configuration parameter, archive_mode, + New boolean configuration parameter, archive_mode, controls archiving (Simon) - Previously setting archive_command to an empty string - turned off archiving. Now archive_mode turns archiving - on and off, independently of archive_command. This is + Previously setting archive_command to an empty string + turned off archiving. Now archive_mode turns archiving + on and off, independently of archive_command. This is useful for stopping archiving temporarily. @@ -7169,40 +7169,40 @@ current_date < 2017-11-17 Text search has been improved, moved into the core code, and is now - installed by default. contrib/tsearch2 now contains + installed by default. contrib/tsearch2 now contains a compatibility interface. - Add control over whether NULLs sort first or last (Teodor, Tom) + Add control over whether NULLs sort first or last (Teodor, Tom) - The syntax is ORDER BY ... NULLS FIRST/LAST. + The syntax is ORDER BY ... NULLS FIRST/LAST. - Allow per-column ascending/descending (ASC/DESC) + Allow per-column ascending/descending (ASC/DESC) ordering options for indexes (Teodor, Tom) - Previously a query using ORDER BY with mixed - ASC/DESC specifiers could not fully use + Previously a query using ORDER BY with mixed + ASC/DESC specifiers could not fully use an index. Now an index can be fully used in such cases if the index was created with matching - ASC/DESC specifications. - NULL sort order within an index can be controlled, too. + ASC/DESC specifications. + NULL sort order within an index can be controlled, too. - Allow col IS NULL to use an index (Teodor) + Allow col IS NULL to use an index (Teodor) @@ -7213,8 +7213,8 @@ current_date < 2017-11-17 This eliminates the need to reference a primary key to - UPDATE or DELETE rows returned by a cursor. - The syntax is UPDATE/DELETE WHERE CURRENT OF. + UPDATE or DELETE rows returned by a cursor. + The syntax is UPDATE/DELETE WHERE CURRENT OF. @@ -7243,7 +7243,7 @@ current_date < 2017-11-17 - Allow UNION and related constructs to return a domain + Allow UNION and related constructs to return a domain type, when all inputs are of that domain type (Tom) @@ -7271,7 +7271,7 @@ current_date < 2017-11-17 Improve optimizer logic for detecting when variables are equal - in a WHERE clause (Tom) + in a WHERE clause (Tom) @@ -7318,8 +7318,8 @@ current_date < 2017-11-17 For example, functions can now set their own - search_path to prevent unexpected behavior if a - different search_path exists at run-time. Security + search_path to prevent unexpected behavior if a + different search_path exists at run-time. Security definer functions should set search_path to avoid security loopholes. @@ -7367,7 +7367,7 @@ current_date < 2017-11-17 - Make CREATE/DROP/RENAME DATABASE wait briefly for + Make CREATE/DROP/RENAME DATABASE wait briefly for conflicting backends to exit before failing (Tom) @@ -7385,7 +7385,7 @@ current_date < 2017-11-17 This allows replication systems to disable triggers and rewrite rules as a group without modifying the system catalogs directly. - The behavior is controlled by ALTER TABLE and a new + The behavior is controlled by ALTER TABLE and a new parameter session_replication_role. @@ -7397,7 +7397,7 @@ current_date < 2017-11-17 This allows a user-defined type to take a modifier, like - ssnum(7). Previously only built-in + ssnum(7). Previously only built-in data types could have modifiers. @@ -7419,7 +7419,7 @@ current_date < 2017-11-17 While this is reasonably safe, some administrators might wish to revoke the privilege. It is controlled by - pg_pltemplate.tmpldbacreate. + pg_pltemplate.tmpldbacreate. @@ -7465,7 +7465,7 @@ current_date < 2017-11-17 Add new CLUSTER syntax: CLUSTER - table USING index + table USING index (Holger Schurig) @@ -7483,7 +7483,7 @@ current_date < 2017-11-17 References to subplan outputs are now always shown correctly, - instead of using ?columnN? + instead of using ?columnN? for complicated cases. @@ -7527,19 +7527,19 @@ current_date < 2017-11-17 This feature provides convenient support for fields that have a small, fixed set of allowed values. An example of creating an - ENUM type is - CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy'). + ENUM type is + CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy'). - Universally Unique Identifier (UUID) data type (Gevik + Universally Unique Identifier (UUID) data type (Gevik Babakhani, Neil) - This closely matches RFC 4122. + This closely matches RFC 4122. @@ -7549,7 +7549,7 @@ current_date < 2017-11-17 - This greatly increases the range of supported MONEY + This greatly increases the range of supported MONEY values. @@ -7557,13 +7557,13 @@ current_date < 2017-11-17 Fix float4/float8 to handle - Infinity and NAN (Not A Number) + Infinity and NAN (Not A Number) consistently (Bruce) The code formerly was not consistent about distinguishing - Infinity from overflow conditions. + Infinity from overflow conditions. @@ -7576,7 +7576,7 @@ current_date < 2017-11-17 - Prevent COPY from using digits and lowercase letters as + Prevent COPY from using digits and lowercase letters as delimiters (Tom) @@ -7613,7 +7613,7 @@ current_date < 2017-11-17 - Implement width_bucket() for the float8 + Implement width_bucket() for the float8 data type (Neil) @@ -7636,34 +7636,34 @@ current_date < 2017-11-17 - Add isodow option to EXTRACT() and - date_part() (Bruce) + Add isodow option to EXTRACT() and + date_part() (Bruce) This returns the day of the week, with Sunday as seven. - (dow returns Sunday as zero.) + (dow returns Sunday as zero.) - Add ID (ISO day of week) and IDDD (ISO - day of year) format codes for to_char(), - to_date(), and to_timestamp() (Brendan + Add ID (ISO day of week) and IDDD (ISO + day of year) format codes for to_char(), + to_date(), and to_timestamp() (Brendan Jurd) - Make to_timestamp() and to_date() + Make to_timestamp() and to_date() assume TM (trim) option for potentially variable-width fields (Bruce) - This matches Oracle's behavior. + This matches Oracle's behavior. @@ -7671,7 +7671,7 @@ current_date < 2017-11-17 Fix off-by-one conversion error in to_date()/to_timestamp() - D (non-ISO day of week) fields (Bruce) + D (non-ISO day of week) fields (Bruce) @@ -7757,7 +7757,7 @@ current_date < 2017-11-17 This adds convenient syntax for PL/pgSQL set-returning functions - that want to return the result of a query. RETURN QUERY + that want to return the result of a query. RETURN QUERY is easier and more efficient than a loop around RETURN NEXT. @@ -7770,7 +7770,7 @@ current_date < 2017-11-17 - For example, myfunc.myvar. This is particularly + For example, myfunc.myvar. This is particularly useful for specifying variables in a query where the variable name might match a column name. @@ -7790,11 +7790,11 @@ current_date < 2017-11-17 Tighten requirements for FOR loop - STEP values (Tom) + STEP values (Tom) - Prevent non-positive STEP values, and handle + Prevent non-positive STEP values, and handle loop overflows. @@ -7831,7 +7831,7 @@ current_date < 2017-11-17 - Allow type-name arguments to PL/Tcl spi_prepare to + Allow type-name arguments to PL/Tcl spi_prepare to be data type aliases in addition to names found in pg_type (Andrew) @@ -7852,7 +7852,7 @@ current_date < 2017-11-17 - Fix PL/Tcl problems with thread-enabled libtcl spawning + Fix PL/Tcl problems with thread-enabled libtcl spawning multiple threads within the backend (Steve Marshall, Paul Bayer, Doug Knight) @@ -7867,7 +7867,7 @@ current_date < 2017-11-17 - <link linkend="APP-PSQL"><application>psql</></link> + <link linkend="app-psql"><application>psql</application></link> @@ -7907,20 +7907,20 @@ current_date < 2017-11-17 Allow \pset, \t, and - \x to specify on or off, + \x to specify on or off, rather than just toggling (Chad Wagner) - Add \sleep capability (Jan) + Add \sleep capability (Jan) - Enable \timing output for \copy (Andrew) + Enable \timing output for \copy (Andrew) @@ -7933,20 +7933,20 @@ current_date < 2017-11-17 - Flush \o output after each backslash command (Tom) + Flush \o output after each backslash command (Tom) - Correctly detect and report errors while reading a -f + Correctly detect and report errors while reading a -f input file (Peter) - Remove -u option (this option has long been deprecated) + Remove -u option (this option has long been deprecated) (Tom) @@ -7956,12 +7956,12 @@ current_date < 2017-11-17 - <link linkend="APP-PGDUMP"><application>pg_dump</></link> + <link linkend="app-pgdump"><application>pg_dump</application></link> - Add --tablespaces-only and --roles-only + Add --tablespaces-only and --roles-only options to pg_dumpall (Dave Page) @@ -7980,7 +7980,7 @@ current_date < 2017-11-17 - Allow pg_dumpall to accept an initial-connection + Allow pg_dumpall to accept an initial-connection database name rather than the default template1 (Dave Page) @@ -7988,7 +7988,7 @@ current_date < 2017-11-17 - In -n and -t switches, always match + In -n and -t switches, always match $ literally (Tom) @@ -8001,7 +8001,7 @@ current_date < 2017-11-17 - Remove -u option (this option has long been deprecated) + Remove -u option (this option has long been deprecated) (Tom) @@ -8016,7 +8016,7 @@ current_date < 2017-11-17 - In initdb, allow the location of the + In initdb, allow the location of the pg_xlog directory to be specified (Euler Taveira de Oliveira) @@ -8024,19 +8024,19 @@ current_date < 2017-11-17 - Enable server core dump generation in pg_regress + Enable server core dump generation in pg_regress on supported operating systems (Andrew) - Add a -t (timeout) parameter to pg_ctl + Add a -t (timeout) parameter to pg_ctl (Bruce) - This controls how long pg_ctl will wait when waiting + This controls how long pg_ctl will wait when waiting for server startup or shutdown. Formerly the timeout was hard-wired as 60 seconds. @@ -8044,28 +8044,28 @@ current_date < 2017-11-17 - Add a pg_ctl option to control generation + Add a pg_ctl option to control generation of server core dumps (Andrew) - Allow Control-C to cancel clusterdb, - reindexdb, and vacuumdb (Itagaki + Allow Control-C to cancel clusterdb, + reindexdb, and vacuumdb (Itagaki Takahiro, Magnus) - Suppress command tag output for createdb, - createuser, dropdb, and - dropuser (Peter) + Suppress command tag output for createdb, + createuser, dropdb, and + dropuser (Peter) - The --quiet option is ignored and will be removed in 8.4. + The --quiet option is ignored and will be removed in 8.4. Progress messages when acting on all databases now go to stdout instead of stderr because they are not actually errors. @@ -8076,33 +8076,33 @@ current_date < 2017-11-17 - <link linkend="libpq"><application>libpq</></link> + <link linkend="libpq"><application>libpq</application></link> - Interpret the dbName parameter of - PQsetdbLogin() as a conninfo string if + Interpret the dbName parameter of + PQsetdbLogin() as a conninfo string if it contains an equals sign (Andrew) - This allows use of conninfo strings in client - programs that still use PQsetdbLogin(). + This allows use of conninfo strings in client + programs that still use PQsetdbLogin(). - Support a global SSL configuration file (Victor + Support a global SSL configuration file (Victor Wagner) - Add environment variable PGSSLKEY to control - SSL hardware keys (Victor Wagner) + Add environment variable PGSSLKEY to control + SSL hardware keys (Victor Wagner) @@ -8147,7 +8147,7 @@ current_date < 2017-11-17 - <link linkend="ecpg"><application>ecpg</></link> + <link linkend="ecpg"><application>ecpg</application></link> @@ -8183,13 +8183,13 @@ current_date < 2017-11-17 - <application>Windows</> Port + <application>Windows</application> Port - Allow the whole PostgreSQL distribution to be compiled - with Microsoft Visual C++ (Magnus and others) + Allow the whole PostgreSQL distribution to be compiled + with Microsoft Visual C++ (Magnus and others) @@ -8226,7 +8226,7 @@ current_date < 2017-11-17 - Server Programming Interface (<acronym>SPI</>) + Server Programming Interface (<acronym>SPI</acronym>) @@ -8236,7 +8236,7 @@ current_date < 2017-11-17 Allow access to the cursor-related planning options, and add - FETCH/MOVE routines. + FETCH/MOVE routines. @@ -8247,15 +8247,15 @@ current_date < 2017-11-17 - The macro SPI_ERROR_CURSOR still exists but will + The macro SPI_ERROR_CURSOR still exists but will never be returned. - SPI plan pointers are now declared as SPIPlanPtr instead of - void * (Tom) + SPI plan pointers are now declared as SPIPlanPtr instead of + void * (Tom) @@ -8274,35 +8274,35 @@ current_date < 2017-11-17 - Add configure option --enable-profiling - to enable code profiling (works only with gcc) + Add configure option --enable-profiling + to enable code profiling (works only with gcc) (Korry Douglas and Nikhil Sontakke) - Add configure option --with-system-tzdata + Add configure option --with-system-tzdata to use the operating system's time zone database (Peter) - Fix PGXS so extensions can be built against PostgreSQL - installations whose pg_config program does not - appear first in the PATH (Tom) + Fix PGXS so extensions can be built against PostgreSQL + installations whose pg_config program does not + appear first in the PATH (Tom) Support gmake draft when building the - SGML documentation (Bruce) + SGML documentation (Bruce) - Unless draft is used, the documentation build will + Unless draft is used, the documentation build will now be repeated if necessary to ensure the index is up-to-date. @@ -8317,9 +8317,9 @@ current_date < 2017-11-17 - Rename macro DLLIMPORT to PGDLLIMPORT to + Rename macro DLLIMPORT to PGDLLIMPORT to avoid conflicting with third party includes (like Tcl) that - define DLLIMPORT (Magnus) + define DLLIMPORT (Magnus) @@ -8332,15 +8332,15 @@ current_date < 2017-11-17 - Update GIN extractQuery() API to allow signalling + Update GIN extractQuery() API to allow signalling that nothing can satisfy the query (Teodor) - Move NAMEDATALEN definition from - postgres_ext.h to pg_config_manual.h + Move NAMEDATALEN definition from + postgres_ext.h to pg_config_manual.h (Peter) @@ -8364,7 +8364,7 @@ current_date < 2017-11-17 - Create a function variable join_search_hook to let plugins + Create a function variable join_search_hook to let plugins override the join search order portion of the planner (Julius Stroffek) @@ -8372,7 +8372,7 @@ current_date < 2017-11-17 - Add tas() support for Renesas' M32R processor + Add tas() support for Renesas' M32R processor (Kazuhiro Inaoka) @@ -8388,14 +8388,14 @@ current_date < 2017-11-17 Change the on-disk representation of the NUMERIC - data type so that the sign_dscale word comes + data type so that the sign_dscale word comes before the weight (Tom) - Use SYSV semaphores rather than POSIX on Darwin + Use SYSV semaphores rather than POSIX on Darwin >= 6.0, i.e., macOS 10.2 and up (Chris Marcellino) @@ -8432,8 +8432,8 @@ current_date < 2017-11-17 - Move contrib README content into the - main PostgreSQL documentation (Albert Cervera i + Move contrib README content into the + main PostgreSQL documentation (Albert Cervera i Areny) @@ -8455,11 +8455,11 @@ current_date < 2017-11-17 Add contrib/uuid-ossp module for generating - UUID values using the OSSP UUID library (Peter) + UUID values using the OSSP UUID library (Peter) - Use configure + Use configure --with-ossp-uuid to activate. This takes advantage of the new UUID builtin type. @@ -8477,14 +8477,14 @@ current_date < 2017-11-17 - Allow contrib/pgbench to set the fillfactor (Pavan + Allow contrib/pgbench to set the fillfactor (Pavan Deolasee) - Add timestamps to contrib/pgbench -l + Add timestamps to contrib/pgbench -l (Greg Smith) @@ -8498,13 +8498,13 @@ current_date < 2017-11-17 - Add GIN support for contrib/hstore (Teodor) + Add GIN support for contrib/hstore (Teodor) - Add GIN support for contrib/pg_trgm (Guillaume Smet, Teodor) + Add GIN support for contrib/pg_trgm (Guillaume Smet, Teodor) diff --git a/doc/src/sgml/release-8.4.sgml b/doc/src/sgml/release-8.4.sgml index 16004edb74..934f720387 100644 --- a/doc/src/sgml/release-8.4.sgml +++ b/doc/src/sgml/release-8.4.sgml @@ -12,11 +12,11 @@ This release contains a variety of fixes from 8.4.21. For information about new features in the 8.4 major release, see - . + . - This is expected to be the last PostgreSQL release + This is expected to be the last PostgreSQL release in the 8.4.X series. Users are encouraged to update to a newer release branch soon. @@ -36,7 +36,7 @@ Also, if you are upgrading from a version earlier than 8.4.19, - see . + see . @@ -48,15 +48,15 @@ - Correctly initialize padding bytes in contrib/btree_gist - indexes on bit columns (Heikki Linnakangas) + Correctly initialize padding bytes in contrib/btree_gist + indexes on bit columns (Heikki Linnakangas) This error could result in incorrect query results due to values that should compare equal not being seen as equal. - Users with GiST indexes on bit or bit varying - columns should REINDEX those indexes after installing this + Users with GiST indexes on bit or bit varying + columns should REINDEX those indexes after installing this update. @@ -76,7 +76,7 @@ Fix possibly-incorrect cache invalidation during nested calls - to ReceiveSharedInvalidMessages (Andres Freund) + to ReceiveSharedInvalidMessages (Andres Freund) @@ -103,13 +103,13 @@ This corrects cases where TOAST pointers could be copied into other tables without being dereferenced. If the original data is later deleted, it would lead to errors like missing chunk number 0 - for toast value ... when the now-dangling pointer is used. + for toast value ... when the now-dangling pointer is used. - Fix record type has not been registered failures with + Fix record type has not been registered failures with whole-row references to the output of Append plan nodes (Tom Lane) @@ -124,7 +124,7 @@ Fix query-lifespan memory leak while evaluating the arguments for a - function in FROM (Tom Lane) + function in FROM (Tom Lane) @@ -137,7 +137,7 @@ - Fix data encoding error in hungarian.stop (Tom Lane) + Fix data encoding error in hungarian.stop (Tom Lane) @@ -150,19 +150,19 @@ This could cause problems (at least spurious warnings, and at worst an - infinite loop) if CREATE INDEX or CLUSTER were + infinite loop) if CREATE INDEX or CLUSTER were done later in the same transaction. - Clear pg_stat_activity.xact_start - during PREPARE TRANSACTION (Andres Freund) + Clear pg_stat_activity.xact_start + during PREPARE TRANSACTION (Andres Freund) - After the PREPARE, the originating session is no longer in + After the PREPARE, the originating session is no longer in a transaction, so it should not continue to display a transaction start time. @@ -170,7 +170,7 @@ - Fix REASSIGN OWNED to not fail for text search objects + Fix REASSIGN OWNED to not fail for text search objects (Álvaro Herrera) @@ -182,7 +182,7 @@ This ensures that the postmaster will properly clean up after itself - if, for example, it receives SIGINT while still + if, for example, it receives SIGINT while still starting up. @@ -190,7 +190,7 @@ Secure Unix-domain sockets of temporary postmasters started during - make check (Noah Misch) + make check (Noah Misch) @@ -199,16 +199,16 @@ the operating-system user running the test, as we previously noted in CVE-2014-0067. This change defends against that risk by placing the server's socket in a temporary, mode 0700 subdirectory - of /tmp. The hazard remains however on platforms where + of /tmp. The hazard remains however on platforms where Unix sockets are not supported, notably Windows, because then the temporary postmaster must accept local TCP connections. A useful side effect of this change is to simplify - make check testing in builds that - override DEFAULT_PGSOCKET_DIR. Popular non-default values - like /var/run/postgresql are often not writable by the + make check testing in builds that + override DEFAULT_PGSOCKET_DIR. Popular non-default values + like /var/run/postgresql are often not writable by the build user, requiring workarounds that will no longer be necessary. @@ -216,7 +216,7 @@ On Windows, allow new sessions to absorb values of PGC_BACKEND - parameters (such as ) from the + parameters (such as ) from the configuration file (Amit Kapila) @@ -232,15 +232,15 @@ - This oversight could cause initdb - and pg_upgrade to fail on Windows, if the installation - path contained both spaces and @ signs. + This oversight could cause initdb + and pg_upgrade to fail on Windows, if the installation + path contained both spaces and @ signs. - Fix linking of libpython on macOS (Tom Lane) + Fix linking of libpython on macOS (Tom Lane) @@ -251,17 +251,17 @@ - Avoid buffer bloat in libpq when the server + Avoid buffer bloat in libpq when the server consistently sends data faster than the client can absorb it (Shin-ichi Morita, Tom Lane) - libpq could be coerced into enlarging its input buffer + libpq could be coerced into enlarging its input buffer until it runs out of memory (which would be reported misleadingly - as lost synchronization with server). Under ordinary + as lost synchronization with server). Under ordinary circumstances it's quite far-fetched that data could be continuously - transmitted more quickly than the recv() loop can + transmitted more quickly than the recv() loop can absorb it, but this has been observed when the client is artificially slowed by scheduler constraints. @@ -269,27 +269,27 @@ - Ensure that LDAP lookup attempts in libpq time out as + Ensure that LDAP lookup attempts in libpq time out as intended (Laurenz Albe) - Fix pg_restore's processing of old-style large object + Fix pg_restore's processing of old-style large object comments (Tom Lane) A direct-to-database restore from an archive file generated by a - pre-9.0 version of pg_dump would usually fail if the + pre-9.0 version of pg_dump would usually fail if the archive contained more than a few comments for large objects. - In contrib/pgcrypto functions, ensure sensitive + In contrib/pgcrypto functions, ensure sensitive information is cleared from stack variables before returning (Marko Kreen) @@ -297,20 +297,20 @@ - In contrib/uuid-ossp, cache the state of the OSSP UUID + In contrib/uuid-ossp, cache the state of the OSSP UUID library across calls (Tom Lane) This improves the efficiency of UUID generation and reduces the amount - of entropy drawn from /dev/urandom, on platforms that + of entropy drawn from /dev/urandom, on platforms that have that. - Update time zone data files to tzdata release 2014e + Update time zone data files to tzdata release 2014e for DST law changes in Crimea, Egypt, and Morocco. @@ -331,11 +331,11 @@ This release contains a variety of fixes from 8.4.20. For information about new features in the 8.4 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 8.4.X release series in July 2014. Users are encouraged to update to a newer release branch soon. @@ -349,7 +349,7 @@ However, if you are upgrading from a version earlier than 8.4.19, - see . + see . @@ -387,7 +387,7 @@ - Remove incorrect code that tried to allow OVERLAPS with + Remove incorrect code that tried to allow OVERLAPS with single-element row arguments (Joshua Yanovski) @@ -400,35 +400,35 @@ - Avoid getting more than AccessShareLock when de-parsing a + Avoid getting more than AccessShareLock when de-parsing a rule or view (Dean Rasheed) - This oversight resulted in pg_dump unexpectedly - acquiring RowExclusiveLock locks on tables mentioned as - the targets of INSERT/UPDATE/DELETE + This oversight resulted in pg_dump unexpectedly + acquiring RowExclusiveLock locks on tables mentioned as + the targets of INSERT/UPDATE/DELETE commands in rules. While usually harmless, that could interfere with concurrent transactions that tried to acquire, for example, - ShareLock on those tables. + ShareLock on those tables. - Prevent interrupts while reporting non-ERROR messages + Prevent interrupts while reporting non-ERROR messages (Tom Lane) This guards against rare server-process freezeups due to recursive - entry to syslog(), and perhaps other related problems. + entry to syslog(), and perhaps other related problems. - Update time zone data files to tzdata release 2014a + Update time zone data files to tzdata release 2014a for DST law changes in Fiji and Turkey, plus historical changes in Israel and Ukraine. @@ -450,11 +450,11 @@ This release contains a variety of fixes from 8.4.19. For information about new features in the 8.4 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 8.4.X release series in July 2014. Users are encouraged to update to a newer release branch soon. @@ -468,7 +468,7 @@ However, if you are upgrading from a version earlier than 8.4.19, - see . + see . @@ -480,19 +480,19 @@ - Shore up GRANT ... WITH ADMIN OPTION restrictions + Shore up GRANT ... WITH ADMIN OPTION restrictions (Noah Misch) - Granting a role without ADMIN OPTION is supposed to + Granting a role without ADMIN OPTION is supposed to prevent the grantee from adding or removing members from the granted role, but this restriction was easily bypassed by doing SET - ROLE first. The security impact is mostly that a role member can + ROLE first. The security impact is mostly that a role member can revoke the access of others, contrary to the wishes of his grantor. Unapproved role member additions are a lesser concern, since an uncooperative role member could provide most of his rights to others - anyway by creating views or SECURITY DEFINER functions. + anyway by creating views or SECURITY DEFINER functions. (CVE-2014-0060) @@ -505,7 +505,7 @@ The primary role of PL validator functions is to be called implicitly - during CREATE FUNCTION, but they are also normal SQL + during CREATE FUNCTION, but they are also normal SQL functions that a user can call explicitly. Calling a validator on a function actually written in some other language was not checked for and could be exploited for privilege-escalation purposes. @@ -525,7 +525,7 @@ If the name lookups come to different conclusions due to concurrent activity, we might perform some parts of the DDL on a different table - than other parts. At least in the case of CREATE INDEX, + than other parts. At least in the case of CREATE INDEX, this can be used to cause the permissions checks to be performed against a different table than the index creation, allowing for a privilege escalation attack. @@ -539,12 +539,12 @@ - The MAXDATELEN constant was too small for the longest - possible value of type interval, allowing a buffer overrun - in interval_out(). Although the datetime input + The MAXDATELEN constant was too small for the longest + possible value of type interval, allowing a buffer overrun + in interval_out(). Although the datetime input functions were more careful about avoiding buffer overrun, the limit was short enough to cause them to reject some valid inputs, such as - input containing a very long timezone name. The ecpg + input containing a very long timezone name. The ecpg library contained these vulnerabilities along with some of its own. (CVE-2014-0063) @@ -571,7 +571,7 @@ - Use strlcpy() and related functions to provide a clear + Use strlcpy() and related functions to provide a clear guarantee that fixed-size buffers are not overrun. Unlike the preceding items, it is unclear whether these cases really represent live issues, since in most cases there appear to be previous @@ -583,35 +583,35 @@ - Avoid crashing if crypt() returns NULL (Honza Horak, + Avoid crashing if crypt() returns NULL (Honza Horak, Bruce Momjian) - There are relatively few scenarios in which crypt() - could return NULL, but contrib/chkpass would crash + There are relatively few scenarios in which crypt() + could return NULL, but contrib/chkpass would crash if it did. One practical case in which this could be an issue is - if libc is configured to refuse to execute unapproved - hashing algorithms (e.g., FIPS mode). + if libc is configured to refuse to execute unapproved + hashing algorithms (e.g., FIPS mode). (CVE-2014-0066) - Document risks of make check in the regression testing + Document risks of make check in the regression testing instructions (Noah Misch, Tom Lane) - Since the temporary server started by make check - uses trust authentication, another user on the same machine + Since the temporary server started by make check + uses trust authentication, another user on the same machine could connect to it as database superuser, and then potentially exploit the privileges of the operating-system user who started the tests. A future release will probably incorporate changes in the testing procedure to prevent this risk, but some public discussion is needed first. So for the moment, just warn people against using - make check when there are untrusted users on the + make check when there are untrusted users on the same machine. (CVE-2014-0067) @@ -626,7 +626,7 @@ The WAL update could be applied to the wrong page, potentially many pages past where it should have been. Aside from corrupting data, - this error has been observed to result in significant bloat + this error has been observed to result in significant bloat of standby servers compared to their masters, due to updates being applied far beyond where the end-of-file should have been. This failure mode does not appear to be a significant risk during crash @@ -654,25 +654,25 @@ Ensure that signal handlers don't attempt to use the - process's MyProc pointer after it's no longer valid. + process's MyProc pointer after it's no longer valid. - Fix unsafe references to errno within error reporting + Fix unsafe references to errno within error reporting logic (Christian Kruse) This would typically lead to odd behaviors such as missing or - inappropriate HINT fields. + inappropriate HINT fields. - Fix possible crashes from using ereport() too early + Fix possible crashes from using ereport() too early during server startup (Tom Lane) @@ -696,7 +696,7 @@ - Fix length checking for Unicode identifiers (U&"..." + Fix length checking for Unicode identifiers (U&"..." syntax) containing escapes (Tom Lane) @@ -710,19 +710,19 @@ Fix possible crash due to invalid plan for nested sub-selects, such - as WHERE (... x IN (SELECT ...) ...) IN (SELECT ...) + as WHERE (... x IN (SELECT ...) ...) IN (SELECT ...) (Tom Lane) - Ensure that ANALYZE creates statistics for a table column - even when all the values in it are too wide (Tom Lane) + Ensure that ANALYZE creates statistics for a table column + even when all the values in it are too wide (Tom Lane) - ANALYZE intentionally omits very wide values from its + ANALYZE intentionally omits very wide values from its histogram and most-common-values calculations, but it neglected to do something sane in the case that all the sampled entries are too wide. @@ -730,21 +730,21 @@ - In ALTER TABLE ... SET TABLESPACE, allow the database's + In ALTER TABLE ... SET TABLESPACE, allow the database's default tablespace to be used without a permissions check (Stephen Frost) - CREATE TABLE has always allowed such usage, - but ALTER TABLE didn't get the memo. + CREATE TABLE has always allowed such usage, + but ALTER TABLE didn't get the memo. - Fix cannot accept a set error when some arms of - a CASE return a set and others don't (Tom Lane) + Fix cannot accept a set error when some arms of + a CASE return a set and others don't (Tom Lane) @@ -769,12 +769,12 @@ - Fix possible misbehavior in plainto_tsquery() + Fix possible misbehavior in plainto_tsquery() (Heikki Linnakangas) - Use memmove() not memcpy() for copying + Use memmove() not memcpy() for copying overlapping memory regions. There have been no field reports of this actually causing trouble, but it's certainly risky. @@ -782,51 +782,51 @@ - Accept SHIFT_JIS as an encoding name for locale checking + Accept SHIFT_JIS as an encoding name for locale checking purposes (Tatsuo Ishii) - Fix misbehavior of PQhost() on Windows (Fujii Masao) + Fix misbehavior of PQhost() on Windows (Fujii Masao) - It should return localhost if no host has been specified. + It should return localhost if no host has been specified. - Improve error handling in libpq and psql - for failures during COPY TO STDOUT/FROM STDIN (Tom Lane) + Improve error handling in libpq and psql + for failures during COPY TO STDOUT/FROM STDIN (Tom Lane) In particular this fixes an infinite loop that could occur in 9.2 and up if the server connection was lost during COPY FROM - STDIN. Variants of that scenario might be possible in older + STDIN. Variants of that scenario might be possible in older versions, or with other client applications. - Fix misaligned descriptors in ecpg (MauMau) + Fix misaligned descriptors in ecpg (MauMau) - In ecpg, handle lack of a hostname in the connection + In ecpg, handle lack of a hostname in the connection parameters properly (Michael Meskes) - Fix performance regression in contrib/dblink connection + Fix performance regression in contrib/dblink connection startup (Joe Conway) @@ -837,7 +837,7 @@ - In contrib/isn, fix incorrect calculation of the check + In contrib/isn, fix incorrect calculation of the check digit for ISMN values (Fabien Coelho) @@ -851,21 +851,21 @@ - In Mingw and Cygwin builds, install the libpq DLL - in the bin directory (Andrew Dunstan) + In Mingw and Cygwin builds, install the libpq DLL + in the bin directory (Andrew Dunstan) This duplicates what the MSVC build has long done. It should fix - problems with programs like psql failing to start + problems with programs like psql failing to start because they can't find the DLL. - Don't generate plain-text HISTORY - and src/test/regress/README files anymore (Tom Lane) + Don't generate plain-text HISTORY + and src/test/regress/README files anymore (Tom Lane) @@ -874,20 +874,20 @@ the likely audience for plain-text format. Distribution tarballs will still contain files by these names, but they'll just be stubs directing the reader to consult the main documentation. - The plain-text INSTALL file will still be maintained, as + The plain-text INSTALL file will still be maintained, as there is arguably a use-case for that. - Update time zone data files to tzdata release 2013i + Update time zone data files to tzdata release 2013i for DST law changes in Jordan and historical changes in Cuba. - In addition, the zones Asia/Riyadh87, - Asia/Riyadh88, and Asia/Riyadh89 have been + In addition, the zones Asia/Riyadh87, + Asia/Riyadh88, and Asia/Riyadh89 have been removed, as they are no longer maintained by IANA, and never represented actual civil timekeeping practice. @@ -909,7 +909,7 @@ This release contains a variety of fixes from 8.4.18. For information about new features in the 8.4 major release, see - . + . @@ -927,7 +927,7 @@ Also, if you are upgrading from a version earlier than 8.4.17, - see . + see . @@ -939,13 +939,13 @@ - Fix VACUUM's tests to see whether it can - update relfrozenxid (Andres Freund) + Fix VACUUM's tests to see whether it can + update relfrozenxid (Andres Freund) - In some cases VACUUM (either manual or autovacuum) could - incorrectly advance a table's relfrozenxid value, + In some cases VACUUM (either manual or autovacuum) could + incorrectly advance a table's relfrozenxid value, allowing tuples to escape freezing, causing those rows to become invisible once 2^31 transactions have elapsed. The probability of data loss is fairly low since multiple incorrect advancements would @@ -957,12 +957,12 @@ The issue can be ameliorated by, after upgrading, vacuuming all tables in all databases while having vacuum_freeze_table_age + linkend="guc-vacuum-freeze-table-age">vacuum_freeze_table_age set to zero. This will fix any latent corruption but will not be able to fix all pre-existing data errors. However, an installation can be presumed safe after performing this vacuuming if it has executed fewer than 2^31 update transactions in its lifetime (check this with - SELECT txid_current() < 2^31). + SELECT txid_current() < 2^31). @@ -979,8 +979,8 @@ - Avoid flattening a subquery whose SELECT list contains a - volatile function wrapped inside a sub-SELECT (Tom Lane) + Avoid flattening a subquery whose SELECT list contains a + volatile function wrapped inside a sub-SELECT (Tom Lane) @@ -997,7 +997,7 @@ This error could lead to incorrect plans for queries involving - multiple levels of subqueries within JOIN syntax. + multiple levels of subqueries within JOIN syntax. @@ -1015,13 +1015,13 @@ - Fix array slicing of int2vector and oidvector values + Fix array slicing of int2vector and oidvector values (Tom Lane) Expressions of this kind are now implicitly promoted to - regular int2 or oid arrays. + regular int2 or oid arrays. @@ -1035,7 +1035,7 @@ In some cases, the system would use the simple GMT offset value when it should have used the regular timezone setting that had prevailed before the simple offset was selected. This change also causes - the timeofday function to honor the simple GMT offset + the timeofday function to honor the simple GMT offset zone. @@ -1049,7 +1049,7 @@ - Properly quote generated command lines in pg_ctl + Properly quote generated command lines in pg_ctl (Naoya Anzai and Tom Lane) @@ -1060,10 +1060,10 @@ - Fix pg_dumpall to work when a source database + Fix pg_dumpall to work when a source database sets default_transaction_read_only - via ALTER DATABASE SET (Kevin Grittner) + linkend="guc-default-transaction-read-only">default_transaction_read_only + via ALTER DATABASE SET (Kevin Grittner) @@ -1073,21 +1073,21 @@ - Fix ecpg's processing of lists of variables - declared varchar (Zoltán Böszörményi) + Fix ecpg's processing of lists of variables + declared varchar (Zoltán Böszörményi) - Make contrib/lo defend against incorrect trigger definitions + Make contrib/lo defend against incorrect trigger definitions (Marc Cousin) - Update time zone data files to tzdata release 2013h + Update time zone data files to tzdata release 2013h for DST law changes in Argentina, Brazil, Jordan, Libya, Liechtenstein, Morocco, and Palestine. Also, new timezone abbreviations WIB, WIT, WITA for Indonesia. @@ -1110,7 +1110,7 @@ This release contains a variety of fixes from 8.4.17. For information about new features in the 8.4 major release, see - . + . @@ -1122,7 +1122,7 @@ However, if you are upgrading from a version earlier than 8.4.17, - see . + see . @@ -1139,7 +1139,7 @@ - PostgreSQL case-folds non-ASCII characters only + PostgreSQL case-folds non-ASCII characters only when using a single-byte server encoding. @@ -1153,7 +1153,7 @@ - Fix memory overcommit bug when work_mem is using more + Fix memory overcommit bug when work_mem is using more than 24GB of memory (Stephen Frost) @@ -1171,29 +1171,29 @@ - Previously tests like col IS NOT TRUE and col IS - NOT FALSE did not properly factor in NULL values when estimating + Previously tests like col IS NOT TRUE and col IS + NOT FALSE did not properly factor in NULL values when estimating plan costs. - Prevent pushing down WHERE clauses into unsafe - UNION/INTERSECT subqueries (Tom Lane) + Prevent pushing down WHERE clauses into unsafe + UNION/INTERSECT subqueries (Tom Lane) - Subqueries of a UNION or INTERSECT that + Subqueries of a UNION or INTERSECT that contain set-returning functions or volatile functions in their - SELECT lists could be improperly optimized, leading to + SELECT lists could be improperly optimized, leading to run-time errors or incorrect query results. - Fix rare case of failed to locate grouping columns + Fix rare case of failed to locate grouping columns planner failure (Tom Lane) @@ -1208,13 +1208,13 @@ Fix possible deadlock during concurrent CREATE INDEX - CONCURRENTLY operations (Tom Lane) + CONCURRENTLY operations (Tom Lane) - Fix regexp_matches() handling of zero-length matches + Fix regexp_matches() handling of zero-length matches (Jeevan Chalke) @@ -1238,14 +1238,14 @@ - Prevent CREATE FUNCTION from checking SET + Prevent CREATE FUNCTION from checking SET variables unless function body checking is enabled (Tom Lane) - Fix pgp_pub_decrypt() so it works for secret keys with + Fix pgp_pub_decrypt() so it works for secret keys with passwords (Marko Kreen) @@ -1260,21 +1260,21 @@ Avoid possible failure when performing transaction control commands (e.g - ROLLBACK) in prepared queries (Tom Lane) + ROLLBACK) in prepared queries (Tom Lane) Ensure that floating-point data input accepts standard spellings - of infinity on all platforms (Tom Lane) + of infinity on all platforms (Tom Lane) - The C99 standard says that allowable spellings are inf, - +inf, -inf, infinity, - +infinity, and -infinity. Make sure we - recognize these even if the platform's strtod function + The C99 standard says that allowable spellings are inf, + +inf, -inf, infinity, + +infinity, and -infinity. Make sure we + recognize these even if the platform's strtod function doesn't. @@ -1288,7 +1288,7 @@ - Update time zone data files to tzdata release 2013d + Update time zone data files to tzdata release 2013d for DST law changes in Israel, Morocco, Palestine, and Paraguay. Also, historical zone data corrections for Macquarie Island. @@ -1310,7 +1310,7 @@ This release contains a variety of fixes from 8.4.16. For information about new features in the 8.4 major release, see - . + . @@ -1323,13 +1323,13 @@ However, this release corrects several errors in management of GiST indexes. After installing this update, it is advisable to - REINDEX any GiST indexes that meet one or more of the + REINDEX any GiST indexes that meet one or more of the conditions described below. Also, if you are upgrading from a version earlier than 8.4.10, - see . + see . @@ -1347,41 +1347,41 @@ This avoids a scenario wherein random numbers generated by - contrib/pgcrypto functions might be relatively easy for + contrib/pgcrypto functions might be relatively easy for another database user to guess. The risk is only significant when - the postmaster is configured with ssl = on + the postmaster is configured with ssl = on but most connections don't use SSL encryption. (CVE-2013-1900) - Fix GiST indexes to not use fuzzy geometric comparisons when + Fix GiST indexes to not use fuzzy geometric comparisons when it's not appropriate to do so (Alexander Korotkov) - The core geometric types perform comparisons using fuzzy - equality, but gist_box_same must do exact comparisons, + The core geometric types perform comparisons using fuzzy + equality, but gist_box_same must do exact comparisons, else GiST indexes using it might become inconsistent. After installing - this update, users should REINDEX any GiST indexes on - box, polygon, circle, or point - columns, since all of these use gist_box_same. + this update, users should REINDEX any GiST indexes on + box, polygon, circle, or point + columns, since all of these use gist_box_same. Fix erroneous range-union and penalty logic in GiST indexes that use - contrib/btree_gist for variable-width data types, that is - text, bytea, bit, and numeric + contrib/btree_gist for variable-width data types, that is + text, bytea, bit, and numeric columns (Tom Lane) These errors could result in inconsistent indexes in which some keys that are present would not be found by searches, and also in useless - index bloat. Users are advised to REINDEX such indexes + index bloat. Users are advised to REINDEX such indexes after installing this update. @@ -1396,7 +1396,7 @@ These errors could result in inconsistent indexes in which some keys that are present would not be found by searches, and also in indexes that are unnecessarily inefficient to search. Users are advised to - REINDEX multi-column GiST indexes after installing this + REINDEX multi-column GiST indexes after installing this update. @@ -1417,27 +1417,27 @@ - Fix to_char() to use ASCII-only case-folding rules where + Fix to_char() to use ASCII-only case-folding rules where appropriate (Tom Lane) This fixes misbehavior of some template patterns that should be - locale-independent, but mishandled I and - i in Turkish locales. + locale-independent, but mishandled I and + i in Turkish locales. - Fix unwanted rejection of timestamp 1999-12-31 24:00:00 + Fix unwanted rejection of timestamp 1999-12-31 24:00:00 (Tom Lane) - Remove useless picksplit doesn't support secondary split log + Remove useless picksplit doesn't support secondary split log messages (Josh Hansen, Tom Lane) @@ -1458,28 +1458,28 @@ - Eliminate memory leaks in PL/Perl's spi_prepare() function + Eliminate memory leaks in PL/Perl's spi_prepare() function (Alex Hunsaker, Tom Lane) - Fix pg_dumpall to handle database names containing - = correctly (Heikki Linnakangas) + Fix pg_dumpall to handle database names containing + = correctly (Heikki Linnakangas) - Avoid crash in pg_dump when an incorrect connection + Avoid crash in pg_dump when an incorrect connection string is given (Heikki Linnakangas) - Ignore invalid indexes in pg_dump (Michael Paquier) + Ignore invalid indexes in pg_dump (Michael Paquier) @@ -1488,24 +1488,24 @@ a uniqueness condition not satisfied by the table's data. Also, if the index creation is in fact still in progress, it seems reasonable to consider it to be an uncommitted DDL change, which - pg_dump wouldn't be expected to dump anyway. + pg_dump wouldn't be expected to dump anyway. - Fix contrib/pg_trgm's similarity() function + Fix contrib/pg_trgm's similarity() function to return zero for trigram-less strings (Tom Lane) - Previously it returned NaN due to internal division by zero. + Previously it returned NaN due to internal division by zero. - Update time zone data files to tzdata release 2013b + Update time zone data files to tzdata release 2013b for DST law changes in Chile, Haiti, Morocco, Paraguay, and some Russian areas. Also, historical zone data corrections for numerous places. @@ -1513,12 +1513,12 @@ Also, update the time zone abbreviation files for recent changes in - Russia and elsewhere: CHOT, GET, - IRKT, KGT, KRAT, MAGT, - MAWT, MSK, NOVT, OMST, - TKT, VLAT, WST, YAKT, - YEKT now follow their current meanings, and - VOLT (Europe/Volgograd) and MIST + Russia and elsewhere: CHOT, GET, + IRKT, KGT, KRAT, MAGT, + MAWT, MSK, NOVT, OMST, + TKT, VLAT, WST, YAKT, + YEKT now follow their current meanings, and + VOLT (Europe/Volgograd) and MIST (Antarctica/Macquarie) are added to the default abbreviations list. @@ -1539,7 +1539,7 @@ This release contains a variety of fixes from 8.4.15. For information about new features in the 8.4 major release, see - . + . @@ -1551,7 +1551,7 @@ However, if you are upgrading from a version earlier than 8.4.10, - see . + see . @@ -1563,7 +1563,7 @@ - Prevent execution of enum_recv from SQL (Tom Lane) + Prevent execution of enum_recv from SQL (Tom Lane) @@ -1596,19 +1596,19 @@ Protect against race conditions when scanning - pg_tablespace (Stephen Frost, Tom Lane) + pg_tablespace (Stephen Frost, Tom Lane) - CREATE DATABASE and DROP DATABASE could + CREATE DATABASE and DROP DATABASE could misbehave if there were concurrent updates of - pg_tablespace entries. + pg_tablespace entries. - Prevent DROP OWNED from trying to drop whole databases or + Prevent DROP OWNED from trying to drop whole databases or tablespaces (Álvaro Herrera) @@ -1620,13 +1620,13 @@ Fix error in vacuum_freeze_table_age + linkend="guc-vacuum-freeze-table-age">vacuum_freeze_table_age implementation (Andres Freund) In installations that have existed for more than vacuum_freeze_min_age + linkend="guc-vacuum-freeze-min-age">vacuum_freeze_min_age transactions, this mistake prevented autovacuum from using partial-table scans, so that a full-table scan would always happen instead. @@ -1634,13 +1634,13 @@ - Prevent misbehavior when a RowExpr or XmlExpr + Prevent misbehavior when a RowExpr or XmlExpr is parse-analyzed twice (Andres Freund, Tom Lane) This mistake could be user-visible in contexts such as - CREATE TABLE LIKE INCLUDING INDEXES. + CREATE TABLE LIKE INCLUDING INDEXES. @@ -1653,7 +1653,7 @@ - Reject out-of-range dates in to_date() (Hitoshi Harada) + Reject out-of-range dates in to_date() (Hitoshi Harada) @@ -1664,41 +1664,41 @@ - This bug affected psql and some other client programs. + This bug affected psql and some other client programs. - Fix possible crash in psql's \? command + Fix possible crash in psql's \? command when not connected to a database (Meng Qingzhong) - Fix one-byte buffer overrun in libpq's - PQprintTuples (Xi Wang) + Fix one-byte buffer overrun in libpq's + PQprintTuples (Xi Wang) This ancient function is not used anywhere by - PostgreSQL itself, but it might still be used by some + PostgreSQL itself, but it might still be used by some client code. - Make ecpglib use translated messages properly + Make ecpglib use translated messages properly (Chen Huajun) - Properly install ecpg_compat and - pgtypes libraries on MSVC (Jiang Guiqing) + Properly install ecpg_compat and + pgtypes libraries on MSVC (Jiang Guiqing) @@ -1717,15 +1717,15 @@ - Make pgxs build executables with the right - .exe suffix when cross-compiling for Windows + Make pgxs build executables with the right + .exe suffix when cross-compiling for Windows (Zoltan Boszormenyi) - Add new timezone abbreviation FET (Tom Lane) + Add new timezone abbreviation FET (Tom Lane) @@ -1749,7 +1749,7 @@ This release contains a variety of fixes from 8.4.14. For information about new features in the 8.4 major release, see - . + . @@ -1761,7 +1761,7 @@ However, if you are upgrading from a version earlier than 8.4.10, - see . + see . @@ -1774,13 +1774,13 @@ Fix multiple bugs associated with CREATE INDEX - CONCURRENTLY (Andres Freund, Tom Lane) + CONCURRENTLY (Andres Freund, Tom Lane) - Fix CREATE INDEX CONCURRENTLY to use + Fix CREATE INDEX CONCURRENTLY to use in-place updates when changing the state of an index's - pg_index row. This prevents race conditions that could + pg_index row. This prevents race conditions that could cause concurrent sessions to miss updating the target index, thus resulting in corrupt concurrently-created indexes. @@ -1788,8 +1788,8 @@ Also, fix various other operations to ensure that they ignore invalid indexes resulting from a failed CREATE INDEX - CONCURRENTLY command. The most important of these is - VACUUM, because an auto-vacuum could easily be launched + CONCURRENTLY command. The most important of these is + VACUUM, because an auto-vacuum could easily be launched on the table before corrective action can be taken to fix or remove the invalid index. @@ -1811,8 +1811,8 @@ The planner could derive incorrect constraints from a clause equating a non-strict construct to something else, for example - WHERE COALESCE(foo, 0) = 0 - when foo is coming from the nullable side of an outer join. + WHERE COALESCE(foo, 0) = 0 + when foo is coming from the nullable side of an outer join. @@ -1830,10 +1830,10 @@ - This affects multicolumn NOT IN subplans, such as - WHERE (a, b) NOT IN (SELECT x, y FROM ...) - when for instance b and y are int4 - and int8 respectively. This mistake led to wrong answers + This affects multicolumn NOT IN subplans, such as + WHERE (a, b) NOT IN (SELECT x, y FROM ...) + when for instance b and y are int4 + and int8 respectively. This mistake led to wrong answers or crashes depending on the specific datatypes involved. @@ -1841,7 +1841,7 @@ Acquire buffer lock when re-fetching the old tuple for an - AFTER ROW UPDATE/DELETE trigger (Andres Freund) + AFTER ROW UPDATE/DELETE trigger (Andres Freund) @@ -1854,7 +1854,7 @@ - Fix ALTER COLUMN TYPE to handle inherited check + Fix ALTER COLUMN TYPE to handle inherited check constraints properly (Pavan Deolasee) @@ -1866,14 +1866,14 @@ - Fix REASSIGN OWNED to handle grants on tablespaces + Fix REASSIGN OWNED to handle grants on tablespaces (Álvaro Herrera) - Ignore incorrect pg_attribute entries for system + Ignore incorrect pg_attribute entries for system columns for views (Tom Lane) @@ -1887,7 +1887,7 @@ - Fix rule printing to dump INSERT INTO table + Fix rule printing to dump INSERT INTO table DEFAULT VALUES correctly (Tom Lane) @@ -1895,7 +1895,7 @@ Guard against stack overflow when there are too many - UNION/INTERSECT/EXCEPT clauses + UNION/INTERSECT/EXCEPT clauses in a query (Tom Lane) @@ -1923,7 +1923,7 @@ Formerly, this would result in something quite unhelpful, such as - Non-recoverable failure in name resolution. + Non-recoverable failure in name resolution. @@ -1936,8 +1936,8 @@ - Make pg_ctl more robust about reading the - postmaster.pid file (Heikki Linnakangas) + Make pg_ctl more robust about reading the + postmaster.pid file (Heikki Linnakangas) @@ -1947,33 +1947,33 @@ - Fix possible crash in psql if incorrectly-encoded data - is presented and the client_encoding setting is a + Fix possible crash in psql if incorrectly-encoded data + is presented and the client_encoding setting is a client-only encoding, such as SJIS (Jiang Guiqing) - Fix bugs in the restore.sql script emitted by - pg_dump in tar output format (Tom Lane) + Fix bugs in the restore.sql script emitted by + pg_dump in tar output format (Tom Lane) The script would fail outright on tables whose names include upper-case characters. Also, make the script capable of restoring - data in mode as well as the regular COPY mode. - Fix pg_restore to accept POSIX-conformant - tar files (Brian Weaver, Tom Lane) + Fix pg_restore to accept POSIX-conformant + tar files (Brian Weaver, Tom Lane) - The original coding of pg_dump's tar + The original coding of pg_dump's tar output mode produced files that are not fully conformant with the POSIX standard. This has been corrected for version 9.3. This patch updates previous branches so that they will accept both the @@ -1984,41 +1984,41 @@ - Fix pg_resetxlog to locate postmaster.pid + Fix pg_resetxlog to locate postmaster.pid correctly when given a relative path to the data directory (Tom Lane) - This mistake could lead to pg_resetxlog not noticing + This mistake could lead to pg_resetxlog not noticing that there is an active postmaster using the data directory. - Fix libpq's lo_import() and - lo_export() functions to report file I/O errors properly + Fix libpq's lo_import() and + lo_export() functions to report file I/O errors properly (Tom Lane) - Fix ecpg's processing of nested structure pointer + Fix ecpg's processing of nested structure pointer variables (Muhammad Usama) - Make contrib/pageinspect's btree page inspection + Make contrib/pageinspect's btree page inspection functions take buffer locks while examining pages (Tom Lane) - Fix pgxs support for building loadable modules on AIX + Fix pgxs support for building loadable modules on AIX (Tom Lane) @@ -2029,7 +2029,7 @@ - Update time zone data files to tzdata release 2012j + Update time zone data files to tzdata release 2012j for DST law changes in Cuba, Israel, Jordan, Libya, Palestine, Western Samoa, and portions of Brazil. @@ -2051,7 +2051,7 @@ This release contains a variety of fixes from 8.4.13. For information about new features in the 8.4 major release, see - . + . @@ -2063,7 +2063,7 @@ However, if you are upgrading from a version earlier than 8.4.10, - see . + see . @@ -2081,7 +2081,7 @@ These errors could result in wrong answers from queries that scan the - same WITH subquery multiple times. + same WITH subquery multiple times. @@ -2104,22 +2104,22 @@ - If we revoke a grant option from some role X, but - X still holds that option via a grant from someone + If we revoke a grant option from some role X, but + X still holds that option via a grant from someone else, we should not recursively revoke the corresponding privilege - from role(s) Y that X had granted it + from role(s) Y that X had granted it to. - Fix handling of SIGFPE when PL/Perl is in use (Andres Freund) + Fix handling of SIGFPE when PL/Perl is in use (Andres Freund) - Perl resets the process's SIGFPE handler to - SIG_IGN, which could result in crashes later on. Restore + Perl resets the process's SIGFPE handler to + SIG_IGN, which could result in crashes later on. Restore the normal Postgres signal handler after initializing PL/Perl. @@ -2138,7 +2138,7 @@ Some Linux distributions contain an incorrect version of - pthread.h that results in incorrect compiled code in + pthread.h that results in incorrect compiled code in PL/Perl, leading to crashes if a PL/Perl function calls another one that throws an error. @@ -2146,7 +2146,7 @@ - Update time zone data files to tzdata release 2012f + Update time zone data files to tzdata release 2012f for DST law changes in Fiji @@ -2167,7 +2167,7 @@ This release contains a variety of fixes from 8.4.12. For information about new features in the 8.4 major release, see - . + . @@ -2179,7 +2179,7 @@ However, if you are upgrading from a version earlier than 8.4.10, - see . + see . @@ -2196,7 +2196,7 @@ - xml_parse() would attempt to fetch external files or + xml_parse() would attempt to fetch external files or URLs as needed to resolve DTD and entity references in an XML value, thus allowing unprivileged database users to attempt to fetch data with the privileges of the database server. While the external data @@ -2209,22 +2209,22 @@ - Prevent access to external files/URLs via contrib/xml2's - xslt_process() (Peter Eisentraut) + Prevent access to external files/URLs via contrib/xml2's + xslt_process() (Peter Eisentraut) - libxslt offers the ability to read and write both + libxslt offers the ability to read and write both files and URLs through stylesheet commands, thus allowing unprivileged database users to both read and write data with the privileges of the database server. Disable that through proper use - of libxslt's security options. (CVE-2012-3488) + of libxslt's security options. (CVE-2012-3488) - Also, remove xslt_process()'s ability to fetch documents + Also, remove xslt_process()'s ability to fetch documents and stylesheets from external files/URLs. While this was a - documented feature, it was long regarded as a bad idea. + documented feature, it was long regarded as a bad idea. The fix for CVE-2012-3489 broke that capability, and rather than expend effort on trying to fix it, we're just going to summarily remove it. @@ -2252,22 +2252,22 @@ - If ALTER SEQUENCE was executed on a freshly created or - reset sequence, and then precisely one nextval() call + If ALTER SEQUENCE was executed on a freshly created or + reset sequence, and then precisely one nextval() call was made on it, and then the server crashed, WAL replay would restore the sequence to a state in which it appeared that no - nextval() had been done, thus allowing the first + nextval() had been done, thus allowing the first sequence value to be returned again by the next - nextval() call. In particular this could manifest for - serial columns, since creation of a serial column's sequence - includes an ALTER SEQUENCE OWNED BY step. + nextval() call. In particular this could manifest for + serial columns, since creation of a serial column's sequence + includes an ALTER SEQUENCE OWNED BY step. - Ensure the backup_label file is fsync'd after - pg_start_backup() (Dave Kerr) + Ensure the backup_label file is fsync'd after + pg_start_backup() (Dave Kerr) @@ -2292,7 +2292,7 @@ The original coding could allow inconsistent behavior in some cases; in particular, an autovacuum could get canceled after less than - deadlock_timeout grace period. + deadlock_timeout grace period. @@ -2304,15 +2304,15 @@ - Fix log collector so that log_truncate_on_rotation works + Fix log collector so that log_truncate_on_rotation works during the very first log rotation after server start (Tom Lane) - Fix WITH attached to a nested set operation - (UNION/INTERSECT/EXCEPT) + Fix WITH attached to a nested set operation + (UNION/INTERSECT/EXCEPT) (Tom Lane) @@ -2320,24 +2320,24 @@ Ensure that a whole-row reference to a subquery doesn't include any - extra GROUP BY or ORDER BY columns (Tom Lane) + extra GROUP BY or ORDER BY columns (Tom Lane) - Disallow copying whole-row references in CHECK - constraints and index definitions during CREATE TABLE + Disallow copying whole-row references in CHECK + constraints and index definitions during CREATE TABLE (Tom Lane) - This situation can arise in CREATE TABLE with - LIKE or INHERITS. The copied whole-row + This situation can arise in CREATE TABLE with + LIKE or INHERITS. The copied whole-row variable was incorrectly labeled with the row type of the original table not the new one. Rejecting the case seems reasonable for - LIKE, since the row types might well diverge later. For - INHERITS we should ideally allow it, with an implicit + LIKE, since the row types might well diverge later. For + INHERITS we should ideally allow it, with an implicit coercion to the parent table's row type; but that will require more work than seems safe to back-patch. @@ -2345,7 +2345,7 @@ - Fix memory leak in ARRAY(SELECT ...) subqueries (Heikki + Fix memory leak in ARRAY(SELECT ...) subqueries (Heikki Linnakangas, Tom Lane) @@ -2357,7 +2357,7 @@ The code could get confused by quantified parenthesized - subexpressions, such as ^(foo)?bar. This would lead to + subexpressions, such as ^(foo)?bar. This would lead to incorrect index optimization of searches for such patterns. @@ -2365,22 +2365,22 @@ Fix bugs with parsing signed - hh:mm and - hh:mm:ss - fields in interval constants (Amit Kapila, Tom Lane) + hh:mm and + hh:mm:ss + fields in interval constants (Amit Kapila, Tom Lane) - Report errors properly in contrib/xml2's - xslt_process() (Tom Lane) + Report errors properly in contrib/xml2's + xslt_process() (Tom Lane) - Update time zone data files to tzdata release 2012e + Update time zone data files to tzdata release 2012e for DST law changes in Morocco and Tokelau @@ -2401,7 +2401,7 @@ This release contains a variety of fixes from 8.4.11. For information about new features in the 8.4 major release, see - . + . @@ -2413,7 +2413,7 @@ However, if you are upgrading from a version earlier than 8.4.10, - see . + see . @@ -2426,12 +2426,12 @@ Fix incorrect password transformation in - contrib/pgcrypto's DES crypt() function + contrib/pgcrypto's DES crypt() function (Solar Designer) - If a password string contained the byte value 0x80, the + If a password string contained the byte value 0x80, the remainder of the password was ignored, causing the password to be much weaker than it appeared. With this fix, the rest of the string is properly included in the DES hash. Any stored password values that are @@ -2442,7 +2442,7 @@ - Ignore SECURITY DEFINER and SET attributes for + Ignore SECURITY DEFINER and SET attributes for a procedural language's call handler (Tom Lane) @@ -2454,7 +2454,7 @@ - Allow numeric timezone offsets in timestamp input to be up to + Allow numeric timezone offsets in timestamp input to be up to 16 hours away from UTC (Tom Lane) @@ -2480,7 +2480,7 @@ - Fix text to name and char to name + Fix text to name and char to name casts to perform string truncation correctly in multibyte encodings (Karl Schnaitter) @@ -2488,7 +2488,7 @@ - Fix memory copying bug in to_tsquery() (Heikki Linnakangas) + Fix memory copying bug in to_tsquery() (Heikki Linnakangas) @@ -2502,7 +2502,7 @@ This bug concerns sub-SELECTs that reference variables coming from the nullable side of an outer join of the surrounding query. In 9.1, queries affected by this bug would fail with ERROR: - Upper-level PlaceHolderVar found where not expected. But in 9.0 and + Upper-level PlaceHolderVar found where not expected. But in 9.0 and 8.4, you'd silently get possibly-wrong answers, since the value transmitted into the subquery wouldn't go to null when it should. @@ -2510,13 +2510,13 @@ - Fix slow session startup when pg_attribute is very large + Fix slow session startup when pg_attribute is very large (Tom Lane) - If pg_attribute exceeds one-fourth of - shared_buffers, cache rebuilding code that is sometimes + If pg_attribute exceeds one-fourth of + shared_buffers, cache rebuilding code that is sometimes needed during session start would trigger the synchronized-scan logic, causing it to take many times longer than normal. The problem was particularly acute if many new sessions were starting at once. @@ -2537,8 +2537,8 @@ - Ensure the Windows implementation of PGSemaphoreLock() - clears ImmediateInterruptOK before returning (Tom Lane) + Ensure the Windows implementation of PGSemaphoreLock() + clears ImmediateInterruptOK before returning (Tom Lane) @@ -2565,12 +2565,12 @@ - Fix COPY FROM to properly handle null marker strings that + Fix COPY FROM to properly handle null marker strings that correspond to invalid encoding (Tom Lane) - A null marker string such as E'\\0' should work, and did + A null marker string such as E'\\0' should work, and did work in the past, but the case got broken in 8.4. @@ -2583,7 +2583,7 @@ Previously, infinite recursion in a function invoked by - auto-ANALYZE could crash worker processes. + auto-ANALYZE could crash worker processes. @@ -2602,7 +2602,7 @@ Fix logging collector to ensure it will restart file rotation - after receiving SIGHUP (Tom Lane) + after receiving SIGHUP (Tom Lane) @@ -2615,33 +2615,33 @@ - Fix memory leak in PL/pgSQL's RETURN NEXT command (Joe + Fix memory leak in PL/pgSQL's RETURN NEXT command (Joe Conway) - Fix PL/pgSQL's GET DIAGNOSTICS command when the target + Fix PL/pgSQL's GET DIAGNOSTICS command when the target is the function's first variable (Tom Lane) - Fix potential access off the end of memory in psql's - expanded display (\x) mode (Peter Eisentraut) + Fix potential access off the end of memory in psql's + expanded display (\x) mode (Peter Eisentraut) - Fix several performance problems in pg_dump when + Fix several performance problems in pg_dump when the database contains many objects (Jeff Janes, Tom Lane) - pg_dump could get very slow if the database contained + pg_dump could get very slow if the database contained many schemas, or if many objects are in dependency loops, or if there are many owned sequences. @@ -2649,21 +2649,21 @@ - Fix contrib/dblink's dblink_exec() to not leak + Fix contrib/dblink's dblink_exec() to not leak temporary database connections upon error (Tom Lane) - Fix contrib/dblink to report the correct connection name in + Fix contrib/dblink to report the correct connection name in error messages (Kyotaro Horiguchi) - Update time zone data files to tzdata release 2012c + Update time zone data files to tzdata release 2012c for DST law changes in Antarctica, Armenia, Chile, Cuba, Falkland Islands, Gaza, Haiti, Hebron, Morocco, Syria, and Tokelau Islands; also historical corrections for Canada. @@ -2686,7 +2686,7 @@ This release contains a variety of fixes from 8.4.10. For information about new features in the 8.4 major release, see - . + . @@ -2698,7 +2698,7 @@ However, if you are upgrading from a version earlier than 8.4.10, - see . + see . @@ -2711,14 +2711,14 @@ Require execute permission on the trigger function for - CREATE TRIGGER (Robert Haas) + CREATE TRIGGER (Robert Haas) This missing check could allow another user to execute a trigger function with forged input data, by installing it on a table he owns. This is only of significance for trigger functions marked - SECURITY DEFINER, since otherwise trigger functions run + SECURITY DEFINER, since otherwise trigger functions run as the table owner anyway. (CVE-2012-0866) @@ -2730,7 +2730,7 @@ - Both libpq and the server truncated the common name + Both libpq and the server truncated the common name extracted from an SSL certificate at 32 bytes. Normally this would cause nothing worse than an unexpected verification failure, but there are some rather-implausible scenarios in which it might allow one @@ -2745,12 +2745,12 @@ - Convert newlines to spaces in names written in pg_dump + Convert newlines to spaces in names written in pg_dump comments (Robert Haas) - pg_dump was incautious about sanitizing object names + pg_dump was incautious about sanitizing object names that are emitted within SQL comments in its output script. A name containing a newline would at least render the script syntactically incorrect. Maliciously crafted object names could present a SQL @@ -2766,10 +2766,10 @@ An index page split caused by an insertion could sometimes cause a - concurrently-running VACUUM to miss removing index entries + concurrently-running VACUUM to miss removing index entries that it should remove. After the corresponding table rows are removed, the dangling index entries would cause errors (such as could not - read block N in file ...) or worse, silently wrong query results + read block N in file ...) or worse, silently wrong query results after unrelated rows are re-inserted at the now-free table locations. This bug has been present since release 8.2, but occurs so infrequently that it was not diagnosed until now. If you have reason to suspect @@ -2795,16 +2795,16 @@ Allow non-existent values for some settings in ALTER - USER/DATABASE SET (Heikki Linnakangas) + USER/DATABASE SET (Heikki Linnakangas) - Allow default_text_search_config, - default_tablespace, and temp_tablespaces to be + Allow default_text_search_config, + default_tablespace, and temp_tablespaces to be set to names that are not known. This is because they might be known in another database where the setting is intended to be used, or for the tablespace cases because the tablespace might not be created yet. The - same issue was previously recognized for search_path, and + same issue was previously recognized for search_path, and these settings now act like that one. @@ -2842,7 +2842,7 @@ - Fix regular expression back-references with * attached + Fix regular expression back-references with * attached (Tom Lane) @@ -2856,18 +2856,18 @@ A similar problem still afflicts back-references that are embedded in a larger quantified expression, rather than being the immediate subject of the quantifier. This will be addressed in a future - PostgreSQL release. + PostgreSQL release. Fix recently-introduced memory leak in processing of - inet/cidr values (Heikki Linnakangas) + inet/cidr values (Heikki Linnakangas) - A patch in the December 2011 releases of PostgreSQL + A patch in the December 2011 releases of PostgreSQL caused memory leakage in these operations, which could be significant in scenarios such as building a btree index on such a column. @@ -2875,8 +2875,8 @@ - Fix dangling pointer after CREATE TABLE AS/SELECT - INTO in a SQL-language function (Tom Lane) + Fix dangling pointer after CREATE TABLE AS/SELECT + INTO in a SQL-language function (Tom Lane) @@ -2910,32 +2910,32 @@ - Improve pg_dump's handling of inherited table columns + Improve pg_dump's handling of inherited table columns (Tom Lane) - pg_dump mishandled situations where a child column has + pg_dump mishandled situations where a child column has a different default expression than its parent column. If the default is textually identical to the parent's default, but not actually the same (for instance, because of schema search path differences) it would not be recognized as different, so that after dump and restore the child would be allowed to inherit the parent's default. Child columns - that are NOT NULL where their parent is not could also be + that are NOT NULL where their parent is not could also be restored subtly incorrectly. - Fix pg_restore's direct-to-database mode for + Fix pg_restore's direct-to-database mode for INSERT-style table data (Tom Lane) Direct-to-database restores from archive files made with - - Map Central America Standard Time to CST6, not - CST6CDT, because DST is generally not observed anywhere in + Map Central America Standard Time to CST6, not + CST6CDT, because DST is generally not observed anywhere in Central America. - Update time zone data files to tzdata release 2011n + Update time zone data files to tzdata release 2011n for DST law changes in Brazil, Cuba, Fiji, Palestine, Russia, and Samoa; also historical corrections for Alaska and British East Africa. @@ -3363,7 +3363,7 @@ This release contains a variety of fixes from 8.4.8. For information about new features in the 8.4 major release, see - . + . @@ -3375,7 +3375,7 @@ However, if you are upgrading from a version earlier than 8.4.8, - see . + see . @@ -3410,7 +3410,7 @@ - Fix possible buffer overrun in tsvector_concat() + Fix possible buffer overrun in tsvector_concat() (Tom Lane) @@ -3422,14 +3422,14 @@ - Fix crash in xml_recv when processing a - standalone parameter (Tom Lane) + Fix crash in xml_recv when processing a + standalone parameter (Tom Lane) - Make pg_options_to_table return NULL for an option with no + Make pg_options_to_table return NULL for an option with no value (Tom Lane) @@ -3440,7 +3440,7 @@ - Avoid possibly accessing off the end of memory in ANALYZE + Avoid possibly accessing off the end of memory in ANALYZE and in SJIS-2004 encoding conversion (Noah Misch) @@ -3469,7 +3469,7 @@ There was a window wherein a new backend process could read a stale init file but miss the inval messages that would tell it the data is stale. The result would be bizarre failures in catalog accesses, typically - could not read block 0 in file ... later during startup. + could not read block 0 in file ... later during startup. @@ -3490,7 +3490,7 @@ Fix incorrect memory accounting (leading to possible memory bloat) in tuplestores supporting holdable cursors and plpgsql's RETURN - NEXT command (Tom Lane) + NEXT command (Tom Lane) @@ -3526,7 +3526,7 @@ - Allow nested EXISTS queries to be optimized properly (Tom + Allow nested EXISTS queries to be optimized properly (Tom Lane) @@ -3546,12 +3546,12 @@ - Fix EXPLAIN to handle gating Result nodes within + Fix EXPLAIN to handle gating Result nodes within inner-indexscan subplans (Tom Lane) - The usual symptom of this oversight was bogus varno errors. + The usual symptom of this oversight was bogus varno errors. @@ -3567,13 +3567,13 @@ - Fix dump bug for VALUES in a view (Tom Lane) + Fix dump bug for VALUES in a view (Tom Lane) - Disallow SELECT FOR UPDATE/SHARE on sequences (Tom Lane) + Disallow SELECT FOR UPDATE/SHARE on sequences (Tom Lane) @@ -3583,8 +3583,8 @@ - Fix VACUUM so that it always updates - pg_class.reltuples/relpages (Tom + Fix VACUUM so that it always updates + pg_class.reltuples/relpages (Tom Lane) @@ -3603,7 +3603,7 @@ - Fix cases where CLUSTER might attempt to access + Fix cases where CLUSTER might attempt to access already-removed TOAST data (Tom Lane) @@ -3611,7 +3611,7 @@ Fix portability bugs in use of credentials control messages for - peer authentication (Tom Lane) + peer authentication (Tom Lane) @@ -3623,13 +3623,13 @@ The typical symptom of this problem was The function requested is - not supported errors during SSPI login. + not supported errors during SSPI login. - Throw an error if pg_hba.conf contains hostssl + Throw an error if pg_hba.conf contains hostssl but SSL is disabled (Tom Lane) @@ -3641,12 +3641,12 @@ - Fix typo in pg_srand48 seed initialization (Andres Freund) + Fix typo in pg_srand48 seed initialization (Andres Freund) This led to failure to use all bits of the provided seed. This function - is not used on most platforms (only those without srandom), + is not used on most platforms (only those without srandom), and the potential security exposure from a less-random-than-expected seed seems minimal in any case. @@ -3654,25 +3654,25 @@ - Avoid integer overflow when the sum of LIMIT and - OFFSET values exceeds 2^63 (Heikki Linnakangas) + Avoid integer overflow when the sum of LIMIT and + OFFSET values exceeds 2^63 (Heikki Linnakangas) - Add overflow checks to int4 and int8 versions of - generate_series() (Robert Haas) + Add overflow checks to int4 and int8 versions of + generate_series() (Robert Haas) - Fix trailing-zero removal in to_char() (Marti Raudsepp) + Fix trailing-zero removal in to_char() (Marti Raudsepp) - In a format with FM and no digit positions + In a format with FM and no digit positions after the decimal point, zeroes to the left of the decimal point could be removed incorrectly. @@ -3680,7 +3680,7 @@ - Fix pg_size_pretty() to avoid overflow for inputs close to + Fix pg_size_pretty() to avoid overflow for inputs close to 2^63 (Tom Lane) @@ -3698,59 +3698,59 @@ - Correctly handle quotes in locale names during initdb + Correctly handle quotes in locale names during initdb (Heikki Linnakangas) The case can arise with some Windows locales, such as People's - Republic of China. + Republic of China. - Fix pg_upgrade to preserve toast tables' relfrozenxids + Fix pg_upgrade to preserve toast tables' relfrozenxids during an upgrade from 8.3 (Bruce Momjian) - Failure to do this could lead to pg_clog files being + Failure to do this could lead to pg_clog files being removed too soon after the upgrade. - In pg_ctl, support silent mode for service registrations + In pg_ctl, support silent mode for service registrations on Windows (MauMau) - Fix psql's counting of script file line numbers during - COPY from a different file (Tom Lane) + Fix psql's counting of script file line numbers during + COPY from a different file (Tom Lane) - Fix pg_restore's direct-to-database mode for - standard_conforming_strings (Tom Lane) + Fix pg_restore's direct-to-database mode for + standard_conforming_strings (Tom Lane) - pg_restore could emit incorrect commands when restoring + pg_restore could emit incorrect commands when restoring directly to a database server from an archive file that had been made - with standard_conforming_strings set to on. + with standard_conforming_strings set to on. Be more user-friendly about unsupported cases for parallel - pg_restore (Tom Lane) + pg_restore (Tom Lane) @@ -3761,14 +3761,14 @@ - Fix write-past-buffer-end and memory leak in libpq's + Fix write-past-buffer-end and memory leak in libpq's LDAP service lookup code (Albe Laurenz) - In libpq, avoid failures when using nonblocking I/O + In libpq, avoid failures when using nonblocking I/O and an SSL connection (Martin Pihlak, Tom Lane) @@ -3780,36 +3780,36 @@ - In particular, the response to a server report of fork() + In particular, the response to a server report of fork() failure during SSL connection startup is now saner. - Improve libpq's error reporting for SSL failures (Tom + Improve libpq's error reporting for SSL failures (Tom Lane) - Fix PQsetvalue() to avoid possible crash when adding a new - tuple to a PGresult originally obtained from a server + Fix PQsetvalue() to avoid possible crash when adding a new + tuple to a PGresult originally obtained from a server query (Andrew Chernow) - Make ecpglib write double values with 15 digits + Make ecpglib write double values with 15 digits precision (Akira Kurosawa) - In ecpglib, be sure LC_NUMERIC setting is + In ecpglib, be sure LC_NUMERIC setting is restored after an error (Michael Meskes) @@ -3821,7 +3821,7 @@ - contrib/pg_crypto's blowfish encryption code could give + contrib/pg_crypto's blowfish encryption code could give wrong results on platforms where char is signed (which is most), leading to encrypted passwords being weaker than they should be. @@ -3829,13 +3829,13 @@ - Fix memory leak in contrib/seg (Heikki Linnakangas) + Fix memory leak in contrib/seg (Heikki Linnakangas) - Fix pgstatindex() to give consistent results for empty + Fix pgstatindex() to give consistent results for empty indexes (Tom Lane) @@ -3867,7 +3867,7 @@ - Update time zone data files to tzdata release 2011i + Update time zone data files to tzdata release 2011i for DST law changes in Canada, Egypt, Russia, Samoa, and South Sudan. @@ -3888,7 +3888,7 @@ This release contains a variety of fixes from 8.4.7. For information about new features in the 8.4 major release, see - . + . @@ -3900,10 +3900,10 @@ However, if your installation was upgraded from a previous major - release by running pg_upgrade, you should take + release by running pg_upgrade, you should take action to prevent possible data loss due to a now-fixed bug in - pg_upgrade. The recommended solution is to run - VACUUM FREEZE on all TOAST tables. + pg_upgrade. The recommended solution is to run + VACUUM FREEZE on all TOAST tables. More information is available at http://wiki.postgresql.org/wiki/20110408pg_upgrade_fix. @@ -3911,7 +3911,7 @@ Also, if you are upgrading from a version earlier than 8.4.2, - see . + see . @@ -3923,36 +3923,36 @@ - Fix pg_upgrade's handling of TOAST tables + Fix pg_upgrade's handling of TOAST tables (Bruce Momjian) - The pg_class.relfrozenxid value for + The pg_class.relfrozenxid value for TOAST tables was not correctly copied into the new installation - during pg_upgrade. This could later result in - pg_clog files being discarded while they were still + during pg_upgrade. This could later result in + pg_clog files being discarded while they were still needed to validate tuples in the TOAST tables, leading to - could not access status of transaction failures. + could not access status of transaction failures. This error poses a significant risk of data loss for installations - that have been upgraded with pg_upgrade. This patch - corrects the problem for future uses of pg_upgrade, + that have been upgraded with pg_upgrade. This patch + corrects the problem for future uses of pg_upgrade, but does not in itself cure the issue in installations that have been - processed with a buggy version of pg_upgrade. + processed with a buggy version of pg_upgrade. - Suppress incorrect PD_ALL_VISIBLE flag was incorrectly set + Suppress incorrect PD_ALL_VISIBLE flag was incorrectly set warning (Heikki Linnakangas) - VACUUM would sometimes issue this warning in cases that + VACUUM would sometimes issue this warning in cases that are actually valid. @@ -3986,15 +3986,15 @@ - Fix dangling-pointer problem in BEFORE ROW UPDATE trigger + Fix dangling-pointer problem in BEFORE ROW UPDATE trigger handling when there was a concurrent update to the target tuple (Tom Lane) This bug has been observed to result in intermittent cannot - extract system attribute from virtual tuple failures while trying to - do UPDATE RETURNING ctid. There is a very small probability + extract system attribute from virtual tuple failures while trying to + do UPDATE RETURNING ctid. There is a very small probability of more serious errors, such as generating incorrect index entries for the updated tuple. @@ -4002,13 +4002,13 @@ - Disallow DROP TABLE when there are pending deferred trigger + Disallow DROP TABLE when there are pending deferred trigger events for the table (Tom Lane) - Formerly the DROP would go through, leading to - could not open relation with OID nnn errors when the + Formerly the DROP would go through, leading to + could not open relation with OID nnn errors when the triggers were eventually fired. @@ -4053,7 +4053,7 @@ - Fix pg_restore to cope with long lines (over 1KB) in + Fix pg_restore to cope with long lines (over 1KB) in TOC files (Tom Lane) @@ -4085,14 +4085,14 @@ - Fix version-incompatibility problem with libintl on + Fix version-incompatibility problem with libintl on Windows (Hiroshi Inoue) - Fix usage of xcopy in Windows build scripts to + Fix usage of xcopy in Windows build scripts to work correctly under Windows 7 (Andrew Dunstan) @@ -4103,14 +4103,14 @@ - Fix path separator used by pg_regress on Cygwin + Fix path separator used by pg_regress on Cygwin (Andrew Dunstan) - Update time zone data files to tzdata release 2011f + Update time zone data files to tzdata release 2011f for DST law changes in Chile, Cuba, Falkland Islands, Morocco, Samoa, and Turkey; also historical corrections for South Australia, Alaska, and Hawaii. @@ -4133,7 +4133,7 @@ This release contains a variety of fixes from 8.4.6. For information about new features in the 8.4 major release, see - . + . @@ -4142,7 +4142,7 @@ A dump/restore is not required for those running 8.4.X. However, if you are upgrading from a version earlier than 8.4.2, - see . + see . @@ -4154,15 +4154,15 @@ - Avoid failures when EXPLAIN tries to display a simple-form - CASE expression (Tom Lane) + Avoid failures when EXPLAIN tries to display a simple-form + CASE expression (Tom Lane) - If the CASE's test expression was a constant, the planner - could simplify the CASE into a form that confused the + If the CASE's test expression was a constant, the planner + could simplify the CASE into a form that confused the expression-display code, resulting in unexpected CASE WHEN - clause errors. + clause errors. @@ -4187,44 +4187,44 @@ - The date type supports a wider range of dates than can be - represented by the timestamp types, but the planner assumed it + The date type supports a wider range of dates than can be + represented by the timestamp types, but the planner assumed it could always convert a date to timestamp with impunity. - Fix pg_restore's text output for large objects (BLOBs) - when standard_conforming_strings is on (Tom Lane) + Fix pg_restore's text output for large objects (BLOBs) + when standard_conforming_strings is on (Tom Lane) Although restoring directly to a database worked correctly, string - escaping was incorrect if pg_restore was asked for - SQL text output and standard_conforming_strings had been + escaping was incorrect if pg_restore was asked for + SQL text output and standard_conforming_strings had been enabled in the source database. - Fix erroneous parsing of tsquery values containing + Fix erroneous parsing of tsquery values containing ... & !(subexpression) | ... (Tom Lane) Queries containing this combination of operators were not executed - correctly. The same error existed in contrib/intarray's - query_int type and contrib/ltree's - ltxtquery type. + correctly. The same error existed in contrib/intarray's + query_int type and contrib/ltree's + ltxtquery type. - Fix buffer overrun in contrib/intarray's input function - for the query_int type (Apple) + Fix buffer overrun in contrib/intarray's input function + for the query_int type (Apple) @@ -4236,16 +4236,16 @@ - Fix bug in contrib/seg's GiST picksplit algorithm + Fix bug in contrib/seg's GiST picksplit algorithm (Alexander Korotkov) This could result in considerable inefficiency, though not actually - incorrect answers, in a GiST index on a seg column. - If you have such an index, consider REINDEXing it after + incorrect answers, in a GiST index on a seg column. + If you have such an index, consider REINDEXing it after installing this update. (This is identical to the bug that was fixed in - contrib/cube in the previous update.) + contrib/cube in the previous update.) @@ -4265,7 +4265,7 @@ This release contains a variety of fixes from 8.4.5. For information about new features in the 8.4 major release, see - . + . @@ -4274,7 +4274,7 @@ A dump/restore is not required for those running 8.4.X. However, if you are upgrading from a version earlier than 8.4.2, - see . + see . @@ -4287,17 +4287,17 @@ Force the default - wal_sync_method - to be fdatasync on Linux (Tom Lane, Marti Raudsepp) + wal_sync_method + to be fdatasync on Linux (Tom Lane, Marti Raudsepp) - The default on Linux has actually been fdatasync for many - years, but recent kernel changes caused PostgreSQL to - choose open_datasync instead. This choice did not result + The default on Linux has actually been fdatasync for many + years, but recent kernel changes caused PostgreSQL to + choose open_datasync instead. This choice did not result in any performance improvement, and caused outright failures on - certain filesystems, notably ext4 with the - data=journal mount option. + certain filesystems, notably ext4 with the + data=journal mount option. @@ -4307,7 +4307,7 @@ - This could result in bad buffer id: 0 failures or + This could result in bad buffer id: 0 failures or corruption of index contents during replication. @@ -4326,7 +4326,7 @@ - The effective vacuum_cost_limit for an autovacuum worker + The effective vacuum_cost_limit for an autovacuum worker could drop to nearly zero if it processed enough tables, causing it to run extremely slowly. @@ -4334,19 +4334,19 @@ - Add support for detecting register-stack overrun on IA64 + Add support for detecting register-stack overrun on IA64 (Tom Lane) - The IA64 architecture has two hardware stacks. Full + The IA64 architecture has two hardware stacks. Full prevention of stack-overrun failures requires checking both. - Add a check for stack overflow in copyObject() (Tom Lane) + Add a check for stack overflow in copyObject() (Tom Lane) @@ -4362,7 +4362,7 @@ - It is possible to have a concurrent page split in a + It is possible to have a concurrent page split in a temporary index, if for example there is an open cursor scanning the index when an insertion is done. GiST failed to detect this case and hence could deliver wrong results when execution of the cursor @@ -4389,16 +4389,16 @@ Certain cases where a large number of tuples needed to be read in - advance, but work_mem was large enough to allow them all + advance, but work_mem was large enough to allow them all to be held in memory, were unexpectedly slow. - percent_rank(), cume_dist() and - ntile() in particular were subject to this problem. + percent_rank(), cume_dist() and + ntile() in particular were subject to this problem. - Avoid memory leakage while ANALYZE'ing complex index + Avoid memory leakage while ANALYZE'ing complex index expressions (Tom Lane) @@ -4410,14 +4410,14 @@ - An index declared like create index i on t (foo(t.*)) + An index declared like create index i on t (foo(t.*)) would not automatically get dropped when its table was dropped. - Do not inline a SQL function with multiple OUT + Do not inline a SQL function with multiple OUT parameters (Tom Lane) @@ -4429,15 +4429,15 @@ - Behave correctly if ORDER BY, LIMIT, - FOR UPDATE, or WITH is attached to the - VALUES part of INSERT ... VALUES (Tom Lane) + Behave correctly if ORDER BY, LIMIT, + FOR UPDATE, or WITH is attached to the + VALUES part of INSERT ... VALUES (Tom Lane) - Fix constant-folding of COALESCE() expressions (Tom Lane) + Fix constant-folding of COALESCE() expressions (Tom Lane) @@ -4449,7 +4449,7 @@ Fix postmaster crash when connection acceptance - (accept() or one of the calls made immediately after it) + (accept() or one of the calls made immediately after it) fails, and the postmaster was compiled with GSSAPI support (Alexander Chernikov) @@ -4457,7 +4457,7 @@ - Fix missed unlink of temporary files when log_temp_files + Fix missed unlink of temporary files when log_temp_files is active (Tom Lane) @@ -4469,11 +4469,11 @@ - Add print functionality for InhRelation nodes (Tom Lane) + Add print functionality for InhRelation nodes (Tom Lane) - This avoids a failure when debug_print_parse is enabled + This avoids a failure when debug_print_parse is enabled and certain types of query are executed. @@ -4493,20 +4493,20 @@ Fix incorrect calculation of transaction status in - ecpg (Itagaki Takahiro) + ecpg (Itagaki Takahiro) - Fix PL/pgSQL's handling of simple + Fix PL/pgSQL's handling of simple expressions to not fail in recursion or error-recovery cases (Tom Lane) - Fix PL/Python's handling of set-returning functions + Fix PL/Python's handling of set-returning functions (Jan Urbanski) @@ -4518,22 +4518,22 @@ - Fix bug in contrib/cube's GiST picksplit algorithm + Fix bug in contrib/cube's GiST picksplit algorithm (Alexander Korotkov) This could result in considerable inefficiency, though not actually - incorrect answers, in a GiST index on a cube column. - If you have such an index, consider REINDEXing it after + incorrect answers, in a GiST index on a cube column. + If you have such an index, consider REINDEXing it after installing this update. - Don't emit identifier will be truncated notices in - contrib/dblink except when creating new connections + Don't emit identifier will be truncated notices in + contrib/dblink except when creating new connections (Itagaki Takahiro) @@ -4541,20 +4541,20 @@ Fix potential coredump on missing public key in - contrib/pgcrypto (Marti Raudsepp) + contrib/pgcrypto (Marti Raudsepp) - Fix memory leak in contrib/xml2's XPath query functions + Fix memory leak in contrib/xml2's XPath query functions (Tom Lane) - Update time zone data files to tzdata release 2010o + Update time zone data files to tzdata release 2010o for DST law changes in Fiji and Samoa; also historical corrections for Hong Kong. @@ -4576,7 +4576,7 @@ This release contains a variety of fixes from 8.4.4. For information about new features in the 8.4 major release, see - . + . @@ -4585,7 +4585,7 @@ A dump/restore is not required for those running 8.4.X. However, if you are upgrading from a version earlier than 8.4.2, - see . + see . @@ -4605,7 +4605,7 @@ This change prevents security problems that can be caused by subverting Perl or Tcl code that will be executed later in the same session under another SQL user identity (for example, within a SECURITY - DEFINER function). Most scripting languages offer numerous ways that + DEFINER function). Most scripting languages offer numerous ways that that might be done, such as redefining standard functions or operators called by the target function. Without this change, any SQL user with Perl or Tcl language usage rights can do essentially anything with the @@ -4634,7 +4634,7 @@ - Prevent possible crashes in pg_get_expr() by disallowing + Prevent possible crashes in pg_get_expr() by disallowing it from being called with an argument that is not one of the system catalog columns it's intended to be used with (Heikki Linnakangas, Tom Lane) @@ -4643,7 +4643,7 @@ - Treat exit code 128 (ERROR_WAIT_NO_CHILDREN) as non-fatal on + Treat exit code 128 (ERROR_WAIT_NO_CHILDREN) as non-fatal on Windows (Magnus Hagander) @@ -4669,7 +4669,7 @@ - Fix possible duplicate scans of UNION ALL member relations + Fix possible duplicate scans of UNION ALL member relations (Tom Lane) @@ -4694,18 +4694,18 @@ - Fix mishandling of cross-type IN comparisons (Tom Lane) + Fix mishandling of cross-type IN comparisons (Tom Lane) This could result in failures if the planner tried to implement an - IN join with a sort-then-unique-then-plain-join plan. + IN join with a sort-then-unique-then-plain-join plan. - Fix computation of ANALYZE statistics for tsvector + Fix computation of ANALYZE statistics for tsvector columns (Jan Urbanski) @@ -4717,8 +4717,8 @@ - Improve planner's estimate of memory used by array_agg(), - string_agg(), and similar aggregate functions + Improve planner's estimate of memory used by array_agg(), + string_agg(), and similar aggregate functions (Hitoshi Harada) @@ -4734,7 +4734,7 @@ - If a plan is prepared while CREATE INDEX CONCURRENTLY is + If a plan is prepared while CREATE INDEX CONCURRENTLY is in progress for one of the referenced tables, it is supposed to be re-planned once the index is ready for use. This was not happening reliably. @@ -4812,7 +4812,7 @@ Take care to fsync the contents of lockfiles (both - postmaster.pid and the socket lockfile) while writing them + postmaster.pid and the socket lockfile) while writing them (Tom Lane) @@ -4849,7 +4849,7 @@ - Fix log_line_prefix's %i escape, + Fix log_line_prefix's %i escape, which could produce junk early in backend startup (Tom Lane) @@ -4861,7 +4861,7 @@ - In particular, fillfactor would be read as zero if any + In particular, fillfactor would be read as zero if any other reloption had been set for the table, leading to serious bloat. @@ -4869,49 +4869,49 @@ Fix inheritance count tracking in ALTER TABLE ... ADD - CONSTRAINT (Robert Haas) + CONSTRAINT (Robert Haas) Fix possible data corruption in ALTER TABLE ... SET - TABLESPACE when archiving is enabled (Jeff Davis) + TABLESPACE when archiving is enabled (Jeff Davis) - Allow CREATE DATABASE and ALTER DATABASE ... SET - TABLESPACE to be interrupted by query-cancel (Guillaume Lelarge) + Allow CREATE DATABASE and ALTER DATABASE ... SET + TABLESPACE to be interrupted by query-cancel (Guillaume Lelarge) - Improve CREATE INDEX's checking of whether proposed index + Improve CREATE INDEX's checking of whether proposed index expressions are immutable (Tom Lane) - Fix REASSIGN OWNED to handle operator classes and families + Fix REASSIGN OWNED to handle operator classes and families (Asko Tiidumaa) - Fix possible core dump when comparing two empty tsquery values + Fix possible core dump when comparing two empty tsquery values (Tom Lane) - Fix LIKE's handling of patterns containing % - followed by _ (Tom Lane) + Fix LIKE's handling of patterns containing % + followed by _ (Tom Lane) @@ -4926,7 +4926,7 @@ - Input such as 'J100000'::date worked before 8.4, + Input such as 'J100000'::date worked before 8.4, but was unintentionally broken by added error-checking. @@ -4934,7 +4934,7 @@ Fix PL/pgSQL to throw an error, not crash, if a cursor is closed within - a FOR loop that is iterating over that cursor + a FOR loop that is iterating over that cursor (Heikki Linnakangas) @@ -4942,22 +4942,22 @@ In PL/Python, defend against null pointer results from - PyCObject_AsVoidPtr and PyCObject_FromVoidPtr + PyCObject_AsVoidPtr and PyCObject_FromVoidPtr (Peter Eisentraut) - In libpq, fix full SSL certificate verification for the - case where both host and hostaddr are specified + In libpq, fix full SSL certificate verification for the + case where both host and hostaddr are specified (Tom Lane) - Make psql recognize DISCARD ALL as a command that should + Make psql recognize DISCARD ALL as a command that should not be encased in a transaction block in autocommit-off mode (Itagaki Takahiro) @@ -4965,19 +4965,19 @@ - Fix some issues in pg_dump's handling of SQL/MED objects + Fix some issues in pg_dump's handling of SQL/MED objects (Tom Lane) - Notably, pg_dump would always fail if run by a + Notably, pg_dump would always fail if run by a non-superuser, which was not intended. - Improve pg_dump and pg_restore's + Improve pg_dump and pg_restore's handling of non-seekable archive files (Tom Lane, Robert Haas) @@ -4989,31 +4989,31 @@ Improve parallel pg_restore's ability to cope with selective restore - (-L option) (Tom Lane) + (-L option) (Tom Lane) - The original code tended to fail if the -L file commanded + The original code tended to fail if the -L file commanded a non-default restore ordering. - Fix ecpg to process data from RETURNING + Fix ecpg to process data from RETURNING clauses correctly (Michael Meskes) - Fix some memory leaks in ecpg (Zoltan Boszormenyi) + Fix some memory leaks in ecpg (Zoltan Boszormenyi) - Improve contrib/dblink's handling of tables containing + Improve contrib/dblink's handling of tables containing dropped columns (Tom Lane) @@ -5021,30 +5021,30 @@ Fix connection leak after duplicate connection name - errors in contrib/dblink (Itagaki Takahiro) + errors in contrib/dblink (Itagaki Takahiro) - Fix contrib/dblink to handle connection names longer than + Fix contrib/dblink to handle connection names longer than 62 bytes correctly (Itagaki Takahiro) - Add hstore(text, text) - function to contrib/hstore (Robert Haas) + Add hstore(text, text) + function to contrib/hstore (Robert Haas) This function is the recommended substitute for the now-deprecated - => operator. It was back-patched so that future-proofed + => operator. It was back-patched so that future-proofed code can be used with older server versions. Note that the patch will - be effective only after contrib/hstore is installed or + be effective only after contrib/hstore is installed or reinstalled in a particular database. Users might prefer to execute - the CREATE FUNCTION command by hand, instead. + the CREATE FUNCTION command by hand, instead. @@ -5057,7 +5057,7 @@ - Update time zone data files to tzdata release 2010l + Update time zone data files to tzdata release 2010l for DST law changes in Egypt and Palestine; also historical corrections for Finland. @@ -5072,7 +5072,7 @@ - Make Windows' N. Central Asia Standard Time timezone map to + Make Windows' N. Central Asia Standard Time timezone map to Asia/Novosibirsk, not Asia/Almaty (Magnus Hagander) @@ -5098,7 +5098,7 @@ This release contains a variety of fixes from 8.4.3. For information about new features in the 8.4 major release, see - . + . @@ -5107,7 +5107,7 @@ A dump/restore is not required for those running 8.4.X. However, if you are upgrading from a version earlier than 8.4.2, - see . + see . @@ -5119,19 +5119,19 @@ - Enforce restrictions in plperl using an opmask applied to - the whole interpreter, instead of using Safe.pm + Enforce restrictions in plperl using an opmask applied to + the whole interpreter, instead of using Safe.pm (Tim Bunce, Andrew Dunstan) - Recent developments have convinced us that Safe.pm is too - insecure to rely on for making plperl trustable. This - change removes use of Safe.pm altogether, in favor of using + Recent developments have convinced us that Safe.pm is too + insecure to rely on for making plperl trustable. This + change removes use of Safe.pm altogether, in favor of using a separate interpreter with an opcode mask that is always applied. Pleasant side effects of the change include that it is now possible to - use Perl's strict pragma in a natural way in - plperl, and that Perl's $a and $b + use Perl's strict pragma in a natural way in + plperl, and that Perl's $a and $b variables work as expected in sort routines, and that function compilation is significantly faster. (CVE-2010-1169) @@ -5140,19 +5140,19 @@ Prevent PL/Tcl from executing untrustworthy code from - pltcl_modules (Tom) + pltcl_modules (Tom) PL/Tcl's feature for autoloading Tcl code from a database table could be exploited for trojan-horse attacks, because there was no restriction on who could create or insert into that table. This change - disables the feature unless pltcl_modules is owned by a + disables the feature unless pltcl_modules is owned by a superuser. (However, the permissions on the table are not checked, so installations that really need a less-than-secure modules table can still grant suitable privileges to trusted non-superusers.) Also, - prevent loading code into the unrestricted normal Tcl - interpreter unless we are really going to execute a pltclu + prevent loading code into the unrestricted normal Tcl + interpreter unless we are really going to execute a pltclu function. (CVE-2010-1170) @@ -5160,16 +5160,16 @@ Fix data corruption during WAL replay of - ALTER ... SET TABLESPACE (Tom) + ALTER ... SET TABLESPACE (Tom) - When archive_mode is on, ALTER ... SET TABLESPACE + When archive_mode is on, ALTER ... SET TABLESPACE generates a WAL record whose replay logic was incorrect. It could write the data to the wrong place, leading to possibly-unrecoverable data corruption. Data corruption would be observed on standby slaves, and could occur on the master as well if a database crash and recovery - occurred after committing the ALTER and before the next + occurred after committing the ALTER and before the next checkpoint. @@ -5194,20 +5194,20 @@ This avoids failures if the function's code is invalid without the setting; an example is that SQL functions may not parse if the - search_path is not correct. + search_path is not correct. - Do constraint exclusion for inherited UPDATE and - DELETE target tables when - constraint_exclusion = partition (Tom) + Do constraint exclusion for inherited UPDATE and + DELETE target tables when + constraint_exclusion = partition (Tom) Due to an oversight, this setting previously only caused constraint - exclusion to be checked in SELECT commands. + exclusion to be checked in SELECT commands. @@ -5219,10 +5219,10 @@ Previously, if an unprivileged user ran ALTER USER ... RESET - ALL for himself, or ALTER DATABASE ... RESET ALL for + ALL for himself, or ALTER DATABASE ... RESET ALL for a database he owns, this would remove all special parameter settings for the user or database, even ones that are only supposed to be - changeable by a superuser. Now, the ALTER will only + changeable by a superuser. Now, the ALTER will only remove the parameters that the user has permission to change. @@ -5230,7 +5230,7 @@ Avoid possible crash during backend shutdown if shutdown occurs - when a CONTEXT addition would be made to log entries (Tom) + when a CONTEXT addition would be made to log entries (Tom) @@ -5242,8 +5242,8 @@ - Fix erroneous handling of %r parameter in - recovery_end_command (Heikki) + Fix erroneous handling of %r parameter in + recovery_end_command (Heikki) @@ -5254,20 +5254,20 @@ Ensure the archiver process responds to changes in - archive_command as soon as possible (Tom) + archive_command as soon as possible (Tom) - Fix PL/pgSQL's CASE statement to not fail when the + Fix PL/pgSQL's CASE statement to not fail when the case expression is a query that returns no rows (Tom) - Update PL/Perl's ppport.h for modern Perl versions + Update PL/Perl's ppport.h for modern Perl versions (Andrew) @@ -5286,15 +5286,15 @@ - Prevent infinite recursion in psql when expanding + Prevent infinite recursion in psql when expanding a variable that refers to itself (Tom) - Fix psql's \copy to not add spaces around - a dot within \copy (select ...) (Tom) + Fix psql's \copy to not add spaces around + a dot within \copy (select ...) (Tom) @@ -5305,23 +5305,23 @@ - Avoid formatting failure in psql when running in a - locale context that doesn't match the client_encoding + Avoid formatting failure in psql when running in a + locale context that doesn't match the client_encoding (Tom) - Fix unnecessary GIN indexes do not support whole-index scans - errors for unsatisfiable queries using contrib/intarray + Fix unnecessary GIN indexes do not support whole-index scans + errors for unsatisfiable queries using contrib/intarray operators (Tom) - Ensure that contrib/pgstattuple functions respond to cancel + Ensure that contrib/pgstattuple functions respond to cancel interrupts promptly (Tatsuhito Kasahara) @@ -5329,7 +5329,7 @@ Make server startup deal properly with the case that - shmget() returns EINVAL for an existing + shmget() returns EINVAL for an existing shared memory segment (Tom) @@ -5361,14 +5361,14 @@ - Update time zone data files to tzdata release 2010j + Update time zone data files to tzdata release 2010j for DST law changes in Argentina, Australian Antarctic, Bangladesh, Mexico, Morocco, Pakistan, Palestine, Russia, Syria, Tunisia; also historical corrections for Taiwan. - Also, add PKST (Pakistan Summer Time) to the default set of + Also, add PKST (Pakistan Summer Time) to the default set of timezone abbreviations. @@ -5389,7 +5389,7 @@ This release contains a variety of fixes from 8.4.2. For information about new features in the 8.4 major release, see - . + . @@ -5398,7 +5398,7 @@ A dump/restore is not required for those running 8.4.X. However, if you are upgrading from a version earlier than 8.4.2, - see . + see . @@ -5410,7 +5410,7 @@ - Add new configuration parameter ssl_renegotiation_limit to + Add new configuration parameter ssl_renegotiation_limit to control how often we do session key renegotiation for an SSL connection (Magnus) @@ -5446,7 +5446,7 @@ Fix possible crash due to overenthusiastic invalidation of cached - plan for ROLLBACK (Tom) + plan for ROLLBACK (Tom) @@ -5492,8 +5492,8 @@ - Make substring() for bit types treat any negative - length as meaning all the rest of the string (Tom) + Make substring() for bit types treat any negative + length as meaning all the rest of the string (Tom) @@ -5533,12 +5533,12 @@ - Avoid failure when EXPLAIN has to print a FieldStore or + Avoid failure when EXPLAIN has to print a FieldStore or assignment ArrayRef expression (Tom) - These cases can arise now that EXPLAIN VERBOSE tries to + These cases can arise now that EXPLAIN VERBOSE tries to print plan node target lists. @@ -5547,7 +5547,7 @@ Avoid an unnecessary coercion failure in some cases where an undecorated literal string appears in a subquery within - UNION/INTERSECT/EXCEPT (Tom) + UNION/INTERSECT/EXCEPT (Tom) @@ -5564,7 +5564,7 @@ - Fix the STOP WAL LOCATION entry in backup history files to + Fix the STOP WAL LOCATION entry in backup history files to report the next WAL segment's name when the end location is exactly at a segment boundary (Itagaki Takahiro) @@ -5573,7 +5573,7 @@ Always pass the catalog ID to an option validator function specified in - CREATE FOREIGN DATA WRAPPER (Martin Pihlak) + CREATE FOREIGN DATA WRAPPER (Martin Pihlak) @@ -5591,7 +5591,7 @@ - Add support for doing FULL JOIN ON FALSE (Tom) + Add support for doing FULL JOIN ON FALSE (Tom) @@ -5604,13 +5604,13 @@ Improve constraint exclusion processing of boolean-variable cases, in particular make it possible to exclude a partition that has a - bool_column = false constraint (Tom) + bool_column = false constraint (Tom) - Prevent treating an INOUT cast as representing binary + Prevent treating an INOUT cast as representing binary compatibility (Heikki) @@ -5623,24 +5623,24 @@ This is more useful than before and helps to prevent confusion when - a REVOKE generates multiple messages, which formerly + a REVOKE generates multiple messages, which formerly appeared to be duplicates. - When reading pg_hba.conf and related files, do not treat - @something as a file inclusion request if the @ - appears inside quote marks; also, never treat @ by itself + When reading pg_hba.conf and related files, do not treat + @something as a file inclusion request if the @ + appears inside quote marks; also, never treat @ by itself as a file inclusion request (Tom) This prevents erratic behavior if a role or database name starts with - @. If you need to include a file whose path name + @. If you need to include a file whose path name contains spaces, you can still do so, but you must write - @"/path to/file" rather than putting the quotes around + @"/path to/file" rather than putting the quotes around the whole construct. @@ -5648,83 +5648,83 @@ Prevent infinite loop on some platforms if a directory is named as - an inclusion target in pg_hba.conf and related files + an inclusion target in pg_hba.conf and related files (Tom) - Fix possible infinite loop if SSL_read or - SSL_write fails without setting errno (Tom) + Fix possible infinite loop if SSL_read or + SSL_write fails without setting errno (Tom) This is reportedly possible with some Windows versions of - openssl. + OpenSSL. - Disallow GSSAPI authentication on local connections, + Disallow GSSAPI authentication on local connections, since it requires a hostname to function correctly (Magnus) - Protect ecpg against applications freeing strings + Protect ecpg against applications freeing strings unexpectedly (Michael) - Make ecpg report the proper SQLSTATE if the connection + Make ecpg report the proper SQLSTATE if the connection disappears (Michael) - Fix translation of cell contents in psql \d + Fix translation of cell contents in psql \d output (Heikki) - Fix psql's numericlocale option to not + Fix psql's numericlocale option to not format strings it shouldn't in latex and troff output formats (Heikki) - Fix a small per-query memory leak in psql (Tom) + Fix a small per-query memory leak in psql (Tom) - Make psql return the correct exit status (3) when - ON_ERROR_STOP and --single-transaction are - both specified and an error occurs during the implied COMMIT + Make psql return the correct exit status (3) when + ON_ERROR_STOP and --single-transaction are + both specified and an error occurs during the implied COMMIT (Bruce) - Fix pg_dump's output of permissions for foreign servers + Fix pg_dump's output of permissions for foreign servers (Heikki) - Fix possible crash in parallel pg_restore due to + Fix possible crash in parallel pg_restore due to out-of-range dependency IDs (Tom) @@ -5745,7 +5745,7 @@ - Add volatile markings in PL/Python to avoid possible + Add volatile markings in PL/Python to avoid possible compiler-specific misbehavior (Zdenek Kotala) @@ -5757,55 +5757,55 @@ The only known symptom of this oversight is that the Tcl - clock command misbehaves if using Tcl 8.5 or later. + clock command misbehaves if using Tcl 8.5 or later. - Prevent ExecutorEnd from being run on portals created + Prevent ExecutorEnd from being run on portals created within a failed transaction or subtransaction (Tom) This is known to cause issues when using - contrib/auto_explain. + contrib/auto_explain. - Prevent crash in contrib/dblink when too many key - columns are specified to a dblink_build_sql_* function + Prevent crash in contrib/dblink when too many key + columns are specified to a dblink_build_sql_* function (Rushabh Lathia, Joe Conway) - Allow zero-dimensional arrays in contrib/ltree operations + Allow zero-dimensional arrays in contrib/ltree operations (Tom) This case was formerly rejected as an error, but it's more convenient to treat it the same as a zero-element array. In particular this avoids - unnecessary failures when an ltree operation is applied to the - result of ARRAY(SELECT ...) and the sub-select returns no + unnecessary failures when an ltree operation is applied to the + result of ARRAY(SELECT ...) and the sub-select returns no rows. - Fix assorted crashes in contrib/xml2 caused by sloppy + Fix assorted crashes in contrib/xml2 caused by sloppy memory management (Tom) - Make building of contrib/xml2 more robust on Windows + Make building of contrib/xml2 more robust on Windows (Andrew) @@ -5816,7 +5816,7 @@ - One known symptom of this bug is that rows in pg_listener + One known symptom of this bug is that rows in pg_listener could be dropped under heavy load. @@ -5835,7 +5835,7 @@ - Update time zone data files to tzdata release 2010e + Update time zone data files to tzdata release 2010e for DST law changes in Bangladesh, Chile, Fiji, Mexico, Paraguay, Samoa. @@ -5856,7 +5856,7 @@ This release contains a variety of fixes from 8.4.1. For information about new features in the 8.4 major release, see - . + . @@ -5865,7 +5865,7 @@ A dump/restore is not required for those running 8.4.X. However, if you have any hash indexes, - you should REINDEX them after updating to 8.4.2, + you should REINDEX them after updating to 8.4.2, to repair possible damage. @@ -5911,7 +5911,7 @@ preserve the ordering. So application of either of those operations could lead to permanent corruption of an index, in the sense that searches might fail to find entries that are present. To deal with - this, it is recommended to REINDEX any hash indexes you may + this, it is recommended to REINDEX any hash indexes you may have after installing this update. @@ -5930,14 +5930,14 @@ - Prevent signals from interrupting VACUUM at unsafe times + Prevent signals from interrupting VACUUM at unsafe times (Alvaro) - This fix prevents a PANIC if a VACUUM FULL is canceled + This fix prevents a PANIC if a VACUUM FULL is canceled after it's already committed its tuple movements, as well as transient - errors if a plain VACUUM is interrupted after having + errors if a plain VACUUM is interrupted after having truncated the table. @@ -5956,14 +5956,14 @@ - Fix crash if a DROP is attempted on an internally-dependent + Fix crash if a DROP is attempted on an internally-dependent object (Tom) - Fix very rare crash in inet/cidr comparisons (Chris + Fix very rare crash in inet/cidr comparisons (Chris Mikkelson) @@ -5991,7 +5991,7 @@ - Fix memory leak in postmaster when re-parsing pg_hba.conf + Fix memory leak in postmaster when re-parsing pg_hba.conf (Tom) @@ -6010,8 +6010,8 @@ - Make FOR UPDATE/SHARE in the primary query not propagate - into WITH queries (Tom) + Make FOR UPDATE/SHARE in the primary query not propagate + into WITH queries (Tom) @@ -6019,18 +6019,18 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - the FOR UPDATE will now affect bar but not - foo. This is more useful and consistent than the original - 8.4 behavior, which tried to propagate FOR UPDATE into the - WITH query but always failed due to assorted implementation - restrictions. It also follows the design rule that WITH + the FOR UPDATE will now affect bar but not + foo. This is more useful and consistent than the original + 8.4 behavior, which tried to propagate FOR UPDATE into the + WITH query but always failed due to assorted implementation + restrictions. It also follows the design rule that WITH queries are executed as if independent of the main query. - Fix bug with a WITH RECURSIVE query immediately inside + Fix bug with a WITH RECURSIVE query immediately inside another one (Tom) @@ -6056,7 +6056,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Fix wrong search results for a multi-column GIN index with - fastupdate enabled (Teodor) + fastupdate enabled (Teodor) @@ -6066,7 +6066,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - These bugs were masked when full_page_writes was on, but + These bugs were masked when full_page_writes was on, but with it off a WAL replay failure was certain if a crash occurred before the next checkpoint. @@ -6104,7 +6104,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE The previous code is known to fail with the combination of the Linux - pam_krb5 PAM module with Microsoft Active Directory as the + pam_krb5 PAM module with Microsoft Active Directory as the domain controller. It might have problems elsewhere too, since it was making unjustified assumptions about what arguments the PAM stack would pass to it. @@ -6127,7 +6127,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Ensure that domain constraints are enforced in constructs like - ARRAY[...]::domain, where the domain is over an array type + ARRAY[...]::domain, where the domain is over an array type (Heikki) @@ -6153,7 +6153,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Fix CREATE TABLE to properly merge default expressions + Fix CREATE TABLE to properly merge default expressions coming from different inheritance parent tables (Tom) @@ -6175,39 +6175,39 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Fix processing of ownership dependencies during CREATE OR - REPLACE FUNCTION (Tom) + REPLACE FUNCTION (Tom) - Fix incorrect handling of WHERE - x=x conditions (Tom) + Fix incorrect handling of WHERE + x=x conditions (Tom) In some cases these could get ignored as redundant, but they aren't - — they're equivalent to x IS NOT NULL. + — they're equivalent to x IS NOT NULL. Fix incorrect plan construction when using hash aggregation to implement - DISTINCT for textually identical volatile expressions (Tom) + DISTINCT for textually identical volatile expressions (Tom) - Fix Assert failure for a volatile SELECT DISTINCT ON + Fix Assert failure for a volatile SELECT DISTINCT ON expression (Tom) - Fix ts_stat() to not fail on an empty tsvector + Fix ts_stat() to not fail on an empty tsvector value (Tom) @@ -6220,7 +6220,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Fix encoding handling in xml binary input (Heikki) + Fix encoding handling in xml binary input (Heikki) @@ -6231,7 +6231,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Fix bug with calling plperl from plperlu or vice + Fix bug with calling plperl from plperlu or vice versa (Tom) @@ -6251,7 +6251,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Ensure that Perl arrays are properly converted to - PostgreSQL arrays when returned by a set-returning + PostgreSQL arrays when returned by a set-returning PL/Perl function (Andrew Dunstan, Abhijit Menon-Sen) @@ -6268,43 +6268,43 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Fix ecpg problem with comments in DECLARE - CURSOR statements (Michael) + Fix ecpg problem with comments in DECLARE + CURSOR statements (Michael) - Fix ecpg to not treat recently-added keywords as + Fix ecpg to not treat recently-added keywords as reserved words (Tom) - This affected the keywords CALLED, CATALOG, - DEFINER, ENUM, FOLLOWING, - INVOKER, OPTIONS, PARTITION, - PRECEDING, RANGE, SECURITY, - SERVER, UNBOUNDED, and WRAPPER. + This affected the keywords CALLED, CATALOG, + DEFINER, ENUM, FOLLOWING, + INVOKER, OPTIONS, PARTITION, + PRECEDING, RANGE, SECURITY, + SERVER, UNBOUNDED, and WRAPPER. - Re-allow regular expression special characters in psql's - \df function name parameter (Tom) + Re-allow regular expression special characters in psql's + \df function name parameter (Tom) - In contrib/fuzzystrmatch, correct the calculation of - levenshtein distances with non-default costs (Marcin Mank) + In contrib/fuzzystrmatch, correct the calculation of + levenshtein distances with non-default costs (Marcin Mank) - In contrib/pg_standby, disable triggering failover with a + In contrib/pg_standby, disable triggering failover with a signal on Windows (Fujii Masao) @@ -6316,35 +6316,35 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Put FREEZE and VERBOSE options in the right - order in the VACUUM command that - contrib/vacuumdb produces (Heikki) + Put FREEZE and VERBOSE options in the right + order in the VACUUM command that + contrib/vacuumdb produces (Heikki) - Fix possible leak of connections when contrib/dblink + Fix possible leak of connections when contrib/dblink encounters an error (Tatsuhito Kasahara) - Ensure psql's flex module is compiled with the correct + Ensure psql's flex module is compiled with the correct system header definitions (Tom) This fixes build failures on platforms where - --enable-largefile causes incompatible changes in the + --enable-largefile causes incompatible changes in the generated code. - Make the postmaster ignore any application_name parameter in + Make the postmaster ignore any application_name parameter in connection request packets, to improve compatibility with future libpq versions (Tom) @@ -6357,14 +6357,14 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - This includes adding IDT to the default + This includes adding IDT to the default timezone abbreviation set. - Update time zone data files to tzdata release 2009s + Update time zone data files to tzdata release 2009s for DST law changes in Antarctica, Argentina, Bangladesh, Fiji, Novokuznetsk, Pakistan, Palestine, Samoa, Syria; also historical corrections for Hong Kong. @@ -6387,7 +6387,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE This release contains a variety of fixes from 8.4. For information about new features in the 8.4 major release, see - . + . @@ -6418,7 +6418,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Fix cannot make new WAL entries during recovery error (Tom) + Fix cannot make new WAL entries during recovery error (Tom) @@ -6435,39 +6435,39 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Disallow RESET ROLE and RESET SESSION - AUTHORIZATION inside security-definer functions (Tom, Heikki) + Disallow RESET ROLE and RESET SESSION + AUTHORIZATION inside security-definer functions (Tom, Heikki) This covers a case that was missed in the previous patch that - disallowed SET ROLE and SET SESSION - AUTHORIZATION inside security-definer functions. + disallowed SET ROLE and SET SESSION + AUTHORIZATION inside security-definer functions. (See CVE-2007-6600) - Make LOAD of an already-loaded loadable module + Make LOAD of an already-loaded loadable module into a no-op (Tom) - Formerly, LOAD would attempt to unload and re-load the + Formerly, LOAD would attempt to unload and re-load the module, but this is unsafe and not all that useful. - Make window function PARTITION BY and ORDER BY + Make window function PARTITION BY and ORDER BY items always be interpreted as simple expressions (Tom) In 8.4.0 these lists were parsed following the rules used for - top-level GROUP BY and ORDER BY lists. + top-level GROUP BY and ORDER BY lists. But this was not correct per the SQL standard, and it led to possible circularity. @@ -6479,8 +6479,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - These led to wrong query results in some cases where IN - or EXISTS was used together with another join. + These led to wrong query results in some cases where IN + or EXISTS was used together with another join. @@ -6492,8 +6492,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE An example is - SELECT COUNT(ss.*) FROM ... LEFT JOIN (SELECT ...) ss ON .... - Here, ss.* would be treated as ROW(NULL,NULL,...) + SELECT COUNT(ss.*) FROM ... LEFT JOIN (SELECT ...) ss ON .... + Here, ss.* would be treated as ROW(NULL,NULL,...) for null-extended join rows, which is not the same as a simple NULL. Now it is treated as a simple NULL. @@ -6506,7 +6506,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE This bug led to the often-reported could not reattach - to shared memory error message. + to shared memory error message. @@ -6530,36 +6530,36 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Ensure that a fast shutdown request will forcibly terminate - open sessions, even if a smart shutdown was already in progress + Ensure that a fast shutdown request will forcibly terminate + open sessions, even if a smart shutdown was already in progress (Fujii Masao) - Avoid memory leak for array_agg() in GROUP BY + Avoid memory leak for array_agg() in GROUP BY queries (Tom) - Treat to_char(..., 'TH') as an uppercase ordinal - suffix with 'HH'/'HH12' (Heikki) + Treat to_char(..., 'TH') as an uppercase ordinal + suffix with 'HH'/'HH12' (Heikki) - It was previously handled as 'th' (lowercase). + It was previously handled as 'th' (lowercase). Include the fractional part in the result of - EXTRACT(second) and - EXTRACT(milliseconds) for - time and time with time zone inputs (Tom) + EXTRACT(second) and + EXTRACT(milliseconds) for + time and time with time zone inputs (Tom) @@ -6570,8 +6570,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Fix overflow for INTERVAL 'x ms' - when x is more than 2 million and integer + Fix overflow for INTERVAL 'x ms' + when x is more than 2 million and integer datetimes are in use (Alex Hunsaker) @@ -6589,13 +6589,13 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Fix a typo that disabled commit_delay (Jeff Janes) + Fix a typo that disabled commit_delay (Jeff Janes) - Output early-startup messages to postmaster.log if the + Output early-startup messages to postmaster.log if the server is started in silent mode (Tom) @@ -6619,33 +6619,33 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Fix pg_ctl to not go into an infinite loop if - postgresql.conf is empty (Jeff Davis) + Fix pg_ctl to not go into an infinite loop if + postgresql.conf is empty (Jeff Davis) - Fix several errors in pg_dump's - --binary-upgrade mode (Bruce, Tom) + Fix several errors in pg_dump's + --binary-upgrade mode (Bruce, Tom) - pg_dump --binary-upgrade is used by pg_migrator. + pg_dump --binary-upgrade is used by pg_migrator. - Fix contrib/xml2's xslt_process() to + Fix contrib/xml2's xslt_process() to properly handle the maximum number of parameters (twenty) (Tom) - Improve robustness of libpq's code to recover - from errors during COPY FROM STDIN (Tom) + Improve robustness of libpq's code to recover + from errors during COPY FROM STDIN (Tom) @@ -6658,14 +6658,14 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Work around gcc bug that causes floating-point exception - instead of division by zero on some platforms (Tom) + Work around gcc bug that causes floating-point exception + instead of division by zero on some platforms (Tom) - Update time zone data files to tzdata release 2009l + Update time zone data files to tzdata release 2009l for DST law changes in Bangladesh, Egypt, Mauritius. @@ -6687,7 +6687,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Overview - After many years of development, PostgreSQL has + After many years of development, PostgreSQL has become feature-complete in many areas. This release shows a targeted approach to adding features (e.g., authentication, monitoring, space reuse), and adds capabilities defined in the @@ -6742,7 +6742,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Improved join performance for EXISTS and NOT EXISTS queries + Improved join performance for EXISTS and NOT EXISTS queries @@ -6825,15 +6825,15 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Previously this was selected by configure's - option. To retain + the old behavior, build with . - Remove ipcclean utility command (Bruce) + Remove ipcclean utility command (Bruce) @@ -6853,50 +6853,50 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Change default setting for - log_min_messages to warning (previously - it was notice) to reduce log file volume (Tom) + log_min_messages to warning (previously + it was notice) to reduce log file volume (Tom) - Change default setting for max_prepared_transactions to + Change default setting for max_prepared_transactions to zero (previously it was 5) (Tom) - Make debug_print_parse, debug_print_rewritten, - and debug_print_plan - output appear at LOG message level, not - DEBUG1 as formerly (Tom) + Make debug_print_parse, debug_print_rewritten, + and debug_print_plan + output appear at LOG message level, not + DEBUG1 as formerly (Tom) - Make debug_pretty_print default to on (Tom) + Make debug_pretty_print default to on (Tom) - Remove explain_pretty_print parameter (no longer needed) (Tom) + Remove explain_pretty_print parameter (no longer needed) (Tom) - Make log_temp_files settable by superusers only, like other + Make log_temp_files settable by superusers only, like other logging options (Simon Riggs) - Remove automatic appending of the epoch timestamp when no % - escapes are present in log_filename (Robert Haas) + Remove automatic appending of the epoch timestamp when no % + escapes are present in log_filename (Robert Haas) @@ -6907,22 +6907,22 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Remove log_restartpoints from recovery.conf; - instead use log_checkpoints (Simon) + Remove log_restartpoints from recovery.conf; + instead use log_checkpoints (Simon) - Remove krb_realm and krb_server_hostname; - these are now set in pg_hba.conf instead (Magnus) + Remove krb_realm and krb_server_hostname; + these are now set in pg_hba.conf instead (Magnus) There are also significant changes in pg_hba.conf, + linkend="release-8-4-pg-hba-conf">pg_hba.conf, as described below. @@ -6938,12 +6938,12 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Change TRUNCATE and LOCK to + Change TRUNCATE and LOCK to apply to child tables of the specified table(s) (Peter) - These commands now accept an ONLY option that prevents + These commands now accept an ONLY option that prevents processing child tables; this option must be used if the old behavior is needed. @@ -6951,8 +6951,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - SELECT DISTINCT and - UNION/INTERSECT/EXCEPT + SELECT DISTINCT and + UNION/INTERSECT/EXCEPT no longer always produce sorted output (Tom) @@ -6961,17 +6961,17 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE by means of Sort/Unique processing (i.e., sort then remove adjacent duplicates). Now they can be implemented by hashing, which will not produce sorted output. If an application relied on the output being - in sorted order, the recommended fix is to add an ORDER BY + in sorted order, the recommended fix is to add an ORDER BY clause. As a short-term workaround, the previous behavior can be - restored by disabling enable_hashagg, but that is a very - performance-expensive fix. SELECT DISTINCT ON never uses + restored by disabling enable_hashagg, but that is a very + performance-expensive fix. SELECT DISTINCT ON never uses hashing, however, so its behavior is unchanged. - Force child tables to inherit CHECK constraints from parents + Force child tables to inherit CHECK constraints from parents (Alex Hunsaker, Nikhil Sontakke, Tom) @@ -6985,14 +6985,14 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Disallow negative LIMIT or OFFSET + Disallow negative LIMIT or OFFSET values, rather than treating them as zero (Simon) - Disallow LOCK TABLE outside a transaction block + Disallow LOCK TABLE outside a transaction block (Tom) @@ -7004,12 +7004,12 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Sequences now contain an additional start_value column + Sequences now contain an additional start_value column (Zoltan Boszormenyi) - This supports ALTER SEQUENCE ... RESTART. + This supports ALTER SEQUENCE ... RESTART. @@ -7025,14 +7025,14 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Make numeric zero raised to a fractional power return - 0, rather than throwing an error, and make - numeric zero raised to the zero power return 1, + Make numeric zero raised to a fractional power return + 0, rather than throwing an error, and make + numeric zero raised to the zero power return 1, rather than error (Bruce) - This matches the longstanding float8 behavior. + This matches the longstanding float8 behavior. @@ -7042,7 +7042,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - The changed behavior is more IEEE-standard + The changed behavior is more IEEE-standard compliant. @@ -7050,7 +7050,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Throw an error if an escape character is the last character in - a LIKE pattern (i.e., it has nothing to escape) (Tom) + a LIKE pattern (i.e., it has nothing to escape) (Tom) @@ -7061,8 +7061,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Remove ~=~ and ~<>~ operators - formerly used for LIKE index comparisons (Tom) + Remove ~=~ and ~<>~ operators + formerly used for LIKE index comparisons (Tom) @@ -7072,7 +7072,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - xpath() now passes its arguments to libxml + xpath() now passes its arguments to libxml without any changes (Andrew) @@ -7085,7 +7085,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Make xmlelement() format attribute values just like + Make xmlelement() format attribute values just like content values (Peter) @@ -7098,13 +7098,13 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Rewrite memory management for libxml-using functions + Rewrite memory management for libxml-using functions (Tom) This change should avoid some compatibility problems with use of - libxml in PL/Perl and other add-on code. + libxml in PL/Perl and other add-on code. @@ -7129,8 +7129,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - DateStyle no longer controls interval output - formatting; instead there is a new variable IntervalStyle + DateStyle no longer controls interval output + formatting; instead there is a new variable IntervalStyle (Ron Mayer) @@ -7138,7 +7138,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Improve consistency of handling of fractional seconds in - timestamp and interval output (Ron Mayer) + timestamp and interval output (Ron Mayer) @@ -7149,15 +7149,15 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Make to_char()'s localized month/day names depend - on LC_TIME, not LC_MESSAGES (Euler + Make to_char()'s localized month/day names depend + on LC_TIME, not LC_MESSAGES (Euler Taveira de Oliveira) - Cause to_date() and to_timestamp() + Cause to_date() and to_timestamp() to more consistently report errors for invalid input (Brendan Jurd) @@ -7171,15 +7171,15 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Fix to_timestamp() to not require upper/lower case - matching for meridian (AM/PM) and era - (BC/AD) format designations (Brendan + Fix to_timestamp() to not require upper/lower case + matching for meridian (AM/PM) and era + (BC/AD) format designations (Brendan Jurd) - For example, input value ad now matches the format - string AD. + For example, input value ad now matches the format + string AD. @@ -7217,8 +7217,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Allow SELECT DISTINCT and - UNION/INTERSECT/EXCEPT to + Allow SELECT DISTINCT and + UNION/INTERSECT/EXCEPT to use hashing (Tom) @@ -7235,12 +7235,12 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE This work formalizes our previous ad-hoc treatment of IN - (SELECT ...) clauses, and extends it to EXISTS and - NOT EXISTS clauses. It should result in significantly - better planning of EXISTS and NOT EXISTS - queries. In general, logically equivalent IN and - EXISTS clauses should now have similar performance, - whereas previously IN often won. + (SELECT ...) clauses, and extends it to EXISTS and + NOT EXISTS clauses. It should result in significantly + better planning of EXISTS and NOT EXISTS + queries. In general, logically equivalent IN and + EXISTS clauses should now have similar performance, + whereas previously IN often won. @@ -7258,7 +7258,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Improve the performance of text_position() and + Improve the performance of text_position() and related functions by using Boyer-Moore-Horspool searching (David Rowley) @@ -7283,26 +7283,26 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Increase the default value of default_statistics_target - from 10 to 100 (Greg Sabino Mullane, + Increase the default value of default_statistics_target + from 10 to 100 (Greg Sabino Mullane, Tom) - The maximum value was also increased from 1000 to - 10000. + The maximum value was also increased from 1000 to + 10000. - Perform constraint_exclusion checking by default - in queries involving inheritance or UNION ALL (Tom) + Perform constraint_exclusion checking by default + in queries involving inheritance or UNION ALL (Tom) - A new constraint_exclusion setting, - partition, was added to specify this behavior. + A new constraint_exclusion setting, + partition, was added to specify this behavior. @@ -7313,15 +7313,15 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE The amount of read-ahead is controlled by - effective_io_concurrency. This feature is available only - if the kernel has posix_fadvise() support. + effective_io_concurrency. This feature is available only + if the kernel has posix_fadvise() support. - Inline simple set-returning SQL functions in - FROM clauses (Richard Rowell) + Inline simple set-returning SQL functions in + FROM clauses (Richard Rowell) @@ -7336,7 +7336,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Reduce volume of temporary data in multi-batch hash joins - by suppressing physical tlist optimization (Michael + by suppressing physical tlist optimization (Michael Henderson, Ramon Lawrence) @@ -7344,7 +7344,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Avoid waiting for idle-in-transaction sessions during - CREATE INDEX CONCURRENTLY (Simon) + CREATE INDEX CONCURRENTLY (Simon) @@ -7368,15 +7368,15 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Convert many postgresql.conf settings to enumerated - values so that pg_settings can display the valid + Convert many postgresql.conf settings to enumerated + values so that pg_settings can display the valid values (Magnus) - Add cursor_tuple_fraction parameter to control the + Add cursor_tuple_fraction parameter to control the fraction of a cursor's rows that the planner assumes will be fetched (Robert Hell) @@ -7385,7 +7385,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Allow underscores in the names of custom variable - classes in postgresql.conf (Tom) + classes in postgresql.conf (Tom) @@ -7399,12 +7399,12 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Remove support for the (insecure) crypt authentication method + Remove support for the (insecure) crypt authentication method (Magnus) - This effectively obsoletes pre-PostgreSQL 7.2 client + This effectively obsoletes pre-PostgreSQL 7.2 client libraries, as there is no longer any non-plaintext password method that they can use. @@ -7412,21 +7412,21 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Support regular expressions in pg_ident.conf + Support regular expressions in pg_ident.conf (Magnus) - Allow Kerberos/GSSAPI parameters + Allow Kerberos/GSSAPI parameters to be changed without restarting the postmaster (Magnus) - Support SSL certificate chains in server certificate + Support SSL certificate chains in server certificate file (Andrew Gierth) @@ -7440,8 +7440,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Report appropriate error message for combination of MD5 - authentication and db_user_namespace enabled (Bruce) + Report appropriate error message for combination of MD5 + authentication and db_user_namespace enabled (Bruce) @@ -7449,26 +7449,26 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - <filename>pg_hba.conf</> + <filename>pg_hba.conf</filename> - Change all authentication options to use name=value + Change all authentication options to use name=value syntax (Magnus) - This makes incompatible changes to the ldap, - pam and ident authentication methods. All - pg_hba.conf entries with these methods need to be + This makes incompatible changes to the ldap, + pam and ident authentication methods. All + pg_hba.conf entries with these methods need to be rewritten using the new format. - Remove the ident sameuser option, instead making that + Remove the ident sameuser option, instead making that behavior the default if no usermap is specified (Magnus) @@ -7480,14 +7480,14 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Previously a usermap was only supported for ident + Previously a usermap was only supported for ident authentication. - Add clientcert option to control requesting of a + Add clientcert option to control requesting of a client certificate (Magnus) @@ -7499,13 +7499,13 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add cert authentication method to allow - user authentication via SSL certificates + Add cert authentication method to allow + user authentication via SSL certificates (Magnus) - Previously SSL certificates could only verify that + Previously SSL certificates could only verify that the client had access to a certificate, not authenticate a user. @@ -7513,20 +7513,20 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Allow krb5, gssapi and sspi - realm and krb5 host settings to be specified in - pg_hba.conf (Magnus) + Allow krb5, gssapi and sspi + realm and krb5 host settings to be specified in + pg_hba.conf (Magnus) - These override the settings in postgresql.conf. + These override the settings in postgresql.conf. - Add include_realm parameter for krb5, - gssapi, and sspi methods (Magnus) + Add include_realm parameter for krb5, + gssapi, and sspi methods (Magnus) @@ -7537,7 +7537,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Parse pg_hba.conf fully when it is loaded, + Parse pg_hba.conf fully when it is loaded, so that errors are reported immediately (Magnus) @@ -7552,15 +7552,15 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Show all parsing errors in pg_hba.conf instead of + Show all parsing errors in pg_hba.conf instead of aborting after the first one (Selena Deckelmann) - Support ident authentication over Unix-domain sockets - on Solaris (Garick Hamlin) + Support ident authentication over Unix-domain sockets + on Solaris (Garick Hamlin) @@ -7574,7 +7574,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Provide an option to pg_start_backup() to force its + Provide an option to pg_start_backup() to force its implied checkpoint to finish as quickly as possible (Tom) @@ -7586,13 +7586,13 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Make pg_stop_backup() wait for modified WAL + Make pg_stop_backup() wait for modified WAL files to be archived (Simon) This guarantees that the backup is valid at the time - pg_stop_backup() completes. + pg_stop_backup() completes. @@ -7606,22 +7606,22 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Delay smart shutdown while a continuous archiving base backup + Delay smart shutdown while a continuous archiving base backup is in progress (Laurenz Albe) - Cancel a continuous archiving base backup if fast shutdown + Cancel a continuous archiving base backup if fast shutdown is requested (Laurenz Albe) - Allow recovery.conf boolean variables to take the - same range of string values as postgresql.conf + Allow recovery.conf boolean variables to take the + same range of string values as postgresql.conf boolean variables (Bruce) @@ -7637,20 +7637,20 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add pg_conf_load_time() to report when - the PostgreSQL configuration files were last loaded + Add pg_conf_load_time() to report when + the PostgreSQL configuration files were last loaded (George Gensure) - Add pg_terminate_backend() to safely terminate a - backend (the SIGTERM signal works also) (Tom, Bruce) + Add pg_terminate_backend() to safely terminate a + backend (the SIGTERM signal works also) (Tom, Bruce) - While it's always been possible to SIGTERM a single + While it's always been possible to SIGTERM a single backend, this was previously considered unsupported; and testing of the case found some bugs that are now fixed. @@ -7664,30 +7664,30 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Function statistics appear in a new system view, - pg_stat_user_functions. Tracking is controlled - by the new parameter track_functions. + pg_stat_user_functions. Tracking is controlled + by the new parameter track_functions. Allow specification of the maximum query string size in - pg_stat_activity via new - track_activity_query_size parameter (Thomas Lee) + pg_stat_activity via new + track_activity_query_size parameter (Thomas Lee) - Increase the maximum line length sent to syslog, in + Increase the maximum line length sent to syslog, in hopes of improving performance (Tom) - Add read-only configuration variables segment_size, - wal_block_size, and wal_segment_size + Add read-only configuration variables segment_size, + wal_block_size, and wal_segment_size (Bernd Helmle) @@ -7701,7 +7701,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add pg_stat_get_activity(pid) function to return + Add pg_stat_get_activity(pid) function to return information about a specific process id (Magnus) @@ -7709,14 +7709,14 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Allow the location of the server's statistics file to be specified - via stats_temp_directory (Magnus) + via stats_temp_directory (Magnus) This allows the statistics file to be placed in a - RAM-resident directory to reduce I/O requirements. + RAM-resident directory to reduce I/O requirements. On startup/shutdown, the file is copied to its traditional location - ($PGDATA/global/) so it is preserved across restarts. + ($PGDATA/global/) so it is preserved across restarts. @@ -7732,45 +7732,45 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add support for WINDOW functions (Hitoshi Harada) + Add support for WINDOW functions (Hitoshi Harada) - Add support for WITH clauses (CTEs), including WITH - RECURSIVE (Yoshiyuki Asaba, Tatsuo Ishii, Tom) + Add support for WITH clauses (CTEs), including WITH + RECURSIVE (Yoshiyuki Asaba, Tatsuo Ishii, Tom) - Add TABLE command (Peter) + Add TABLE command (Peter) - TABLE tablename is a SQL standard short-hand for - SELECT * FROM tablename. + TABLE tablename is a SQL standard short-hand for + SELECT * FROM tablename. - Allow AS to be optional when specifying a - SELECT (or RETURNING) column output + Allow AS to be optional when specifying a + SELECT (or RETURNING) column output label (Hiroshi Saito) This works so long as the column label is not any - PostgreSQL keyword; otherwise AS is still + PostgreSQL keyword; otherwise AS is still needed. - Support set-returning functions in SELECT result lists + Support set-returning functions in SELECT result lists even for functions that return their result via a tuplestore (Tom) @@ -7789,22 +7789,22 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Allow SELECT FOR UPDATE/SHARE to work + Allow SELECT FOR UPDATE/SHARE to work on inheritance trees (Tom) - Add infrastructure for SQL/MED (Martin Pihlak, + Add infrastructure for SQL/MED (Martin Pihlak, Peter) - There are no remote or external SQL/MED capabilities + There are no remote or external SQL/MED capabilities yet, but this change provides a standardized and future-proof system for managing connection information for modules like - dblink and plproxy. + dblink and plproxy. @@ -7827,7 +7827,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE This allows constructs such as - row(1, 1.1) = any (array[row(7, 7.7), row(1, 1.0)]). + row(1, 1.1) = any (array[row(7, 7.7), row(1, 1.0)]). This is particularly useful in recursive queries. @@ -7835,14 +7835,14 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Add support for Unicode string literal and identifier specifications - using code points, e.g. U&'d\0061t\+000061' + using code points, e.g. U&'d\0061t\+000061' (Peter) - Reject \000 in string literals and COPY data + Reject \000 in string literals and COPY data (Tom) @@ -7866,37 +7866,37 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - <command>TRUNCATE</> + <command>TRUNCATE</command> - Support statement-level ON TRUNCATE triggers (Simon) + Support statement-level ON TRUNCATE triggers (Simon) - Add RESTART/CONTINUE IDENTITY options - for TRUNCATE TABLE + Add RESTART/CONTINUE IDENTITY options + for TRUNCATE TABLE (Zoltan Boszormenyi) The start value of a sequence can be changed by ALTER - SEQUENCE START WITH. + SEQUENCE START WITH. - Allow TRUNCATE tab1, tab1 to succeed (Bruce) + Allow TRUNCATE tab1, tab1 to succeed (Bruce) - Add a separate TRUNCATE permission (Robert Haas) + Add a separate TRUNCATE permission (Robert Haas) @@ -7905,38 +7905,38 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - <command>EXPLAIN</> + <command>EXPLAIN</command> - Make EXPLAIN VERBOSE show the output columns of each + Make EXPLAIN VERBOSE show the output columns of each plan node (Tom) - Previously EXPLAIN VERBOSE output an internal + Previously EXPLAIN VERBOSE output an internal representation of the query plan. (That behavior is now - available via debug_print_plan.) + available via debug_print_plan.) - Make EXPLAIN identify subplans and initplans with + Make EXPLAIN identify subplans and initplans with individual labels (Tom) - Make EXPLAIN honor debug_print_plan (Tom) + Make EXPLAIN honor debug_print_plan (Tom) - Allow EXPLAIN on CREATE TABLE AS (Peter) + Allow EXPLAIN on CREATE TABLE AS (Peter) @@ -7945,25 +7945,25 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - <literal>LIMIT</>/<literal>OFFSET</> + <literal>LIMIT</literal>/<literal>OFFSET</literal> - Allow sub-selects in LIMIT and OFFSET (Tom) + Allow sub-selects in LIMIT and OFFSET (Tom) - Add SQL-standard syntax for - LIMIT/OFFSET capabilities (Peter) + Add SQL-standard syntax for + LIMIT/OFFSET capabilities (Peter) To wit, OFFSET num {ROW|ROWS} FETCH {FIRST|NEXT} [num] {ROW|ROWS} - ONLY. + ONLY. @@ -7986,20 +7986,20 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Refactor multi-object DROP operations to reduce the - need for CASCADE (Alex Hunsaker) + Refactor multi-object DROP operations to reduce the + need for CASCADE (Alex Hunsaker) - For example, if table B has a dependency on table - A, the command DROP TABLE A, B no longer - requires the CASCADE option. + For example, if table B has a dependency on table + A, the command DROP TABLE A, B no longer + requires the CASCADE option. - Fix various problems with concurrent DROP commands + Fix various problems with concurrent DROP commands by ensuring that locks are taken before we begin to drop dependencies of an object (Tom) @@ -8007,15 +8007,15 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Improve reporting of dependencies during DROP + Improve reporting of dependencies during DROP commands (Tom) - Add WITH [NO] DATA clause to CREATE TABLE - AS, per the SQL standard (Peter, Tom) + Add WITH [NO] DATA clause to CREATE TABLE + AS, per the SQL standard (Peter, Tom) @@ -8027,14 +8027,14 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Allow CREATE AGGREGATE to use an internal + Allow CREATE AGGREGATE to use an internal transition datatype (Tom) - Add LIKE clause to CREATE TYPE (Tom) + Add LIKE clause to CREATE TYPE (Tom) @@ -8045,7 +8045,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Allow specification of the type category and preferred + Allow specification of the type category and preferred status for user-defined base types (Tom) @@ -8057,7 +8057,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Allow CREATE OR REPLACE VIEW to add columns to the + Allow CREATE OR REPLACE VIEW to add columns to the end of a view (Robert Haas) @@ -8065,25 +8065,25 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - <command>ALTER</> + <command>ALTER</command> - Add ALTER TYPE RENAME (Petr Jelinek) + Add ALTER TYPE RENAME (Petr Jelinek) - Add ALTER SEQUENCE ... RESTART (with no parameter) to + Add ALTER SEQUENCE ... RESTART (with no parameter) to reset a sequence to its initial value (Zoltan Boszormenyi) - Modify the ALTER TABLE syntax to allow all reasonable + Modify the ALTER TABLE syntax to allow all reasonable combinations for tables, indexes, sequences, and views (Tom) @@ -8093,28 +8093,28 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - ALTER SEQUENCE OWNER TO + ALTER SEQUENCE OWNER TO - ALTER VIEW ALTER COLUMN SET/DROP DEFAULT + ALTER VIEW ALTER COLUMN SET/DROP DEFAULT - ALTER VIEW OWNER TO + ALTER VIEW OWNER TO - ALTER VIEW SET SCHEMA + ALTER VIEW SET SCHEMA There is no actual new functionality here, but formerly - you had to say ALTER TABLE to do these things, + you had to say ALTER TABLE to do these things, which was confusing. @@ -8122,24 +8122,24 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Add support for the syntax ALTER TABLE ... ALTER COLUMN - ... SET DATA TYPE (Peter) + ... SET DATA TYPE (Peter) - This is SQL-standard syntax for functionality that + This is SQL-standard syntax for functionality that was already supported. - Make ALTER TABLE SET WITHOUT OIDS rewrite the table - to physically remove OID values (Tom) + Make ALTER TABLE SET WITHOUT OIDS rewrite the table + to physically remove OID values (Tom) - Also, add ALTER TABLE SET WITH OIDS to rewrite the - table to add OIDs. + Also, add ALTER TABLE SET WITH OIDS to rewrite the + table to add OIDs. @@ -8154,7 +8154,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Improve reporting of - CREATE/DROP/RENAME DATABASE + CREATE/DROP/RENAME DATABASE failure when uncommitted prepared transactions are the cause (Tom) @@ -8162,7 +8162,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Make LC_COLLATE and LC_CTYPE into + Make LC_COLLATE and LC_CTYPE into per-database settings (Radek Strnad, Heikki) @@ -8175,20 +8175,20 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Improve checks that the database encoding, collation - (LC_COLLATE), and character classes - (LC_CTYPE) match (Heikki, Tom) + (LC_COLLATE), and character classes + (LC_CTYPE) match (Heikki, Tom) Note in particular that a new database's encoding and locale - settings can be changed only when copying from template0. + settings can be changed only when copying from template0. This prevents possibly copying data that doesn't match the settings. - Add ALTER DATABASE SET TABLESPACE to move a database + Add ALTER DATABASE SET TABLESPACE to move a database to a new tablespace (Guillaume Lelarge, Bernd Helmle) @@ -8206,8 +8206,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add a VERBOSE option to the CLUSTER command and - clusterdb (Jim Cox) + Add a VERBOSE option to the CLUSTER command and + clusterdb (Jim Cox) @@ -8261,8 +8261,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - xxx_pattern_ops indexes can now be used for simple - equality comparisons, not only for LIKE (Tom) + xxx_pattern_ops indexes can now be used for simple + equality comparisons, not only for LIKE (Tom) @@ -8276,19 +8276,19 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Remove the requirement to use @@@ when doing - GIN weighted lookups on full text indexes (Tom, Teodor) + Remove the requirement to use @@@ when doing + GIN weighted lookups on full text indexes (Tom, Teodor) - The normal @@ text search operator can be used + The normal @@ text search operator can be used instead. - Add an optimizer selectivity function for @@ text + Add an optimizer selectivity function for @@ text search operations (Jan Urbanski) @@ -8302,7 +8302,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Support multi-column GIN indexes (Teodor Sigaev) + Support multi-column GIN indexes (Teodor Sigaev) @@ -8317,18 +8317,18 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - <command>VACUUM</> + <command>VACUUM</command> - Track free space in separate per-relation fork files (Heikki) + Track free space in separate per-relation fork files (Heikki) - Free space discovered by VACUUM is now recorded in - *_fsm files, rather than in a fixed-sized shared memory - area. The max_fsm_pages and max_fsm_relations + Free space discovered by VACUUM is now recorded in + *_fsm files, rather than in a fixed-sized shared memory + area. The max_fsm_pages and max_fsm_relations settings have been removed, greatly simplifying administration of free space management. @@ -8341,16 +8341,16 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - This allows VACUUM to avoid scanning all of + This allows VACUUM to avoid scanning all of a table when only a portion of the table needs vacuuming. - The visibility map is stored in per-relation fork files. + The visibility map is stored in per-relation fork files. - Add vacuum_freeze_table_age parameter to control - when VACUUM should ignore the visibility map and + Add vacuum_freeze_table_age parameter to control + when VACUUM should ignore the visibility map and do a full table scan to freeze tuples (Heikki) @@ -8361,15 +8361,15 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - This improves VACUUM's ability to reclaim space + This improves VACUUM's ability to reclaim space in the presence of long-running transactions. - Add ability to specify per-relation autovacuum and TOAST - parameters in CREATE TABLE (Alvaro, Euler Taveira de + Add ability to specify per-relation autovacuum and TOAST + parameters in CREATE TABLE (Alvaro, Euler Taveira de Oliveira) @@ -8380,7 +8380,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add --freeze option to vacuumdb + Add --freeze option to vacuumdb (Bruce) @@ -8397,20 +8397,20 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add a CaseSensitive option for text search synonym + Add a CaseSensitive option for text search synonym dictionaries (Simon) - Improve the precision of NUMERIC division (Tom) + Improve the precision of NUMERIC division (Tom) - Add basic arithmetic operators for int2 with int8 + Add basic arithmetic operators for int2 with int8 (Tom) @@ -8421,22 +8421,22 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Allow UUID input to accept an optional hyphen after + Allow UUID input to accept an optional hyphen after every fourth digit (Robert Haas) - Allow on/off as input for the boolean data type + Allow on/off as input for the boolean data type (Itagaki Takahiro) - Allow spaces around NaN in the input string for - type numeric (Sam Mason) + Allow spaces around NaN in the input string for + type numeric (Sam Mason) @@ -8448,53 +8448,53 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Reject year 0 BC and years 000 and - 0000 (Tom) + Reject year 0 BC and years 000 and + 0000 (Tom) - Previously these were interpreted as 1 BC. - (Note: years 0 and 00 are still assumed to be + Previously these were interpreted as 1 BC. + (Note: years 0 and 00 are still assumed to be the year 2000.) - Include SGT (Singapore time) in the default list of + Include SGT (Singapore time) in the default list of known time zone abbreviations (Tom) - Support infinity and -infinity as - values of type date (Tom) + Support infinity and -infinity as + values of type date (Tom) - Make parsing of interval literals more standard-compliant + Make parsing of interval literals more standard-compliant (Tom, Ron Mayer) - For example, INTERVAL '1' YEAR now does what it's + For example, INTERVAL '1' YEAR now does what it's supposed to. - Allow interval fractional-seconds precision to be specified - after the second keyword, for SQL standard + Allow interval fractional-seconds precision to be specified + after the second keyword, for SQL standard compliance (Tom) Formerly the precision had to be specified after the keyword - interval. (For backwards compatibility, this syntax is still + interval. (For backwards compatibility, this syntax is still supported, though deprecated.) Data type definitions will now be output using the standard format. @@ -8502,26 +8502,26 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Support the IS0 8601 interval syntax (Ron + Support the IS0 8601 interval syntax (Ron Mayer, Kevin Grittner) - For example, INTERVAL 'P1Y2M3DT4H5M6.7S' is now + For example, INTERVAL 'P1Y2M3DT4H5M6.7S' is now supported. - Add IntervalStyle parameter - which controls how interval values are output (Ron Mayer) + Add IntervalStyle parameter + which controls how interval values are output (Ron Mayer) - Valid values are: postgres, postgres_verbose, - sql_standard, iso_8601. This setting also - controls the handling of negative interval input when only + Valid values are: postgres, postgres_verbose, + sql_standard, iso_8601. This setting also + controls the handling of negative interval input when only some fields have positive/negative designations. @@ -8529,7 +8529,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Improve consistency of handling of fractional seconds in - timestamp and interval output (Ron Mayer) + timestamp and interval output (Ron Mayer) @@ -8543,38 +8543,38 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Improve the handling of casts applied to ARRAY[] - constructs, such as ARRAY[...]::integer[] + Improve the handling of casts applied to ARRAY[] + constructs, such as ARRAY[...]::integer[] (Brendan Jurd) - Formerly PostgreSQL attempted to determine a data type - for the ARRAY[] construct without reference to the ensuing + Formerly PostgreSQL attempted to determine a data type + for the ARRAY[] construct without reference to the ensuing cast. This could fail unnecessarily in many cases, in particular when - the ARRAY[] construct was empty or contained only - ambiguous entries such as NULL. Now the cast is consulted + the ARRAY[] construct was empty or contained only + ambiguous entries such as NULL. Now the cast is consulted to determine the type that the array elements must be. - Make SQL-syntax ARRAY dimensions optional - to match the SQL standard (Peter) + Make SQL-syntax ARRAY dimensions optional + to match the SQL standard (Peter) - Add array_ndims() to return the number + Add array_ndims() to return the number of dimensions of an array (Robert Haas) - Add array_length() to return the length + Add array_length() to return the length of an array for a specified dimension (Jim Nasby, Robert Haas, Peter Eisentraut) @@ -8582,7 +8582,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add aggregate function array_agg(), which + Add aggregate function array_agg(), which returns all aggregated values as a single array (Robert Haas, Jeff Davis, Peter) @@ -8590,25 +8590,25 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add unnest(), which converts an array to + Add unnest(), which converts an array to individual row values (Tom) - This is the opposite of array_agg(). + This is the opposite of array_agg(). - Add array_fill() to create arrays initialized with + Add array_fill() to create arrays initialized with a value (Pavel Stehule) - Add generate_subscripts() to simplify generating + Add generate_subscripts() to simplify generating the range of an array's subscripts (Pavel Stehule) @@ -8618,19 +8618,19 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Wide-Value Storage (<acronym>TOAST</>) + Wide-Value Storage (<acronym>TOAST</acronym>) - Consider TOAST compression on values as short as + Consider TOAST compression on values as short as 32 bytes (previously 256 bytes) (Greg Stark) - Require 25% minimum space savings before using TOAST + Require 25% minimum space savings before using TOAST compression (previously 20% for small values and any-savings-at-all for large values) (Greg) @@ -8638,7 +8638,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Improve TOAST heuristics for rows that have a mix of large + Improve TOAST heuristics for rows that have a mix of large and small toastable fields, so that we prefer to push large values out of line and don't compress small values unnecessarily (Greg, Tom) @@ -8656,52 +8656,52 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Document that setseed() allows values from - -1 to 1 (not just 0 to - 1), and enforce the valid range (Kris Jurka) + Document that setseed() allows values from + -1 to 1 (not just 0 to + 1), and enforce the valid range (Kris Jurka) - Add server-side function lo_import(filename, oid) + Add server-side function lo_import(filename, oid) (Tatsuo) - Add quote_nullable(), which behaves like - quote_literal() but returns the string NULL for + Add quote_nullable(), which behaves like + quote_literal() but returns the string NULL for a null argument (Brendan Jurd) - Improve full text search headline() function to + Improve full text search headline() function to allow extracting several fragments of text (Sushant Sinha) - Add suppress_redundant_updates_trigger() trigger + Add suppress_redundant_updates_trigger() trigger function to avoid overhead for non-data-changing updates (Andrew) - Add div(numeric, numeric) to perform numeric + Add div(numeric, numeric) to perform numeric division without rounding (Tom) - Add timestamp and timestamptz versions of - generate_series() (Hitoshi Harada) + Add timestamp and timestamptz versions of + generate_series() (Hitoshi Harada) @@ -8713,54 +8713,54 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Implement current_query() for use by functions + Implement current_query() for use by functions that need to know the currently running query (Tomas Doran) - Add pg_get_keywords() to return a list of the + Add pg_get_keywords() to return a list of the parser keywords (Dave Page) - Add pg_get_functiondef() to see a function's + Add pg_get_functiondef() to see a function's definition (Abhijit Menon-Sen) - Allow the second argument of pg_get_expr() to be zero + Allow the second argument of pg_get_expr() to be zero when deparsing an expression that does not contain variables (Tom) - Modify pg_relation_size() to use regclass + Modify pg_relation_size() to use regclass (Heikki) - pg_relation_size(data_type_name) no longer works. + pg_relation_size(data_type_name) no longer works. - Add boot_val and reset_val columns to - pg_settings output (Greg Smith) + Add boot_val and reset_val columns to + pg_settings output (Greg Smith) Add source file name and line number columns to - pg_settings output for variables set in a configuration + pg_settings output for variables set in a configuration file (Magnus, Alvaro) @@ -8771,26 +8771,26 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add support for CURRENT_CATALOG, - CURRENT_SCHEMA, SET CATALOG, SET - SCHEMA (Peter) + Add support for CURRENT_CATALOG, + CURRENT_SCHEMA, SET CATALOG, SET + SCHEMA (Peter) - These provide SQL-standard syntax for existing features. + These provide SQL-standard syntax for existing features. - Add pg_typeof() which returns the data type + Add pg_typeof() which returns the data type of any value (Brendan Jurd) - Make version() return information about whether + Make version() return information about whether the server is a 32- or 64-bit binary (Bruce) @@ -8798,7 +8798,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Fix the behavior of information schema columns - is_insertable_into and is_updatable to + is_insertable_into and is_updatable to be consistent (Peter) @@ -8806,13 +8806,13 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Improve the behavior of information schema - datetime_precision columns (Peter) + datetime_precision columns (Peter) - These columns now show zero for date columns, and 6 - (the default precision) for time, timestamp, and - interval without a declared precision, rather than showing + These columns now show zero for date columns, and 6 + (the default precision) for time, timestamp, and + interval without a declared precision, rather than showing null as formerly. @@ -8820,28 +8820,28 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Convert remaining builtin set-returning functions to use - OUT parameters (Jaime Casanova) + OUT parameters (Jaime Casanova) This makes it possible to call these functions without specifying - a column list: pg_show_all_settings(), - pg_lock_status(), pg_prepared_xact(), - pg_prepared_statement(), pg_cursor() + a column list: pg_show_all_settings(), + pg_lock_status(), pg_prepared_xact(), + pg_prepared_statement(), pg_cursor() - Make pg_*_is_visible() and - has_*_privilege() functions return NULL + Make pg_*_is_visible() and + has_*_privilege() functions return NULL for invalid OIDs, rather than reporting an error (Tom) - Extend has_*_privilege() functions to allow inquiring + Extend has_*_privilege() functions to allow inquiring about the OR of multiple privileges in one call (Stephen Frost, Tom) @@ -8849,8 +8849,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add has_column_privilege() and - has_any_column_privilege() functions (Stephen + Add has_column_privilege() and + has_any_column_privilege() functions (Stephen Frost, Tom) @@ -8883,16 +8883,16 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add CREATE FUNCTION ... RETURNS TABLE clause (Pavel + Add CREATE FUNCTION ... RETURNS TABLE clause (Pavel Stehule) - Allow SQL-language functions to return the output - of an INSERT/UPDATE/DELETE - RETURNING clause (Tom) + Allow SQL-language functions to return the output + of an INSERT/UPDATE/DELETE + RETURNING clause (Tom) @@ -8906,38 +8906,38 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Support EXECUTE USING for easier insertion of data + Support EXECUTE USING for easier insertion of data values into a dynamic query string (Pavel Stehule) - Allow looping over the results of a cursor using a FOR + Allow looping over the results of a cursor using a FOR loop (Pavel Stehule) - Support RETURN QUERY EXECUTE (Pavel + Support RETURN QUERY EXECUTE (Pavel Stehule) - Improve the RAISE command (Pavel Stehule) + Improve the RAISE command (Pavel Stehule) - Support DETAIL and HINT fields + Support DETAIL and HINT fields - Support specification of the SQLSTATE error code + Support specification of the SQLSTATE error code @@ -8947,7 +8947,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Allow RAISE without parameters in an exception + Allow RAISE without parameters in an exception block to re-throw the current error @@ -8957,45 +8957,45 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Allow specification of SQLSTATE codes - in EXCEPTION lists (Pavel Stehule) + Allow specification of SQLSTATE codes + in EXCEPTION lists (Pavel Stehule) - This is useful for handling custom SQLSTATE codes. + This is useful for handling custom SQLSTATE codes. - Support the CASE statement (Pavel Stehule) + Support the CASE statement (Pavel Stehule) - Make RETURN QUERY set the special FOUND and - GET DIAGNOSTICS ROW_COUNT variables + Make RETURN QUERY set the special FOUND and + GET DIAGNOSTICS ROW_COUNT variables (Pavel Stehule) - Make FETCH and MOVE set the - GET DIAGNOSTICS ROW_COUNT variable + Make FETCH and MOVE set the + GET DIAGNOSTICS ROW_COUNT variable (Andrew Gierth) - Make EXIT without a label always exit the innermost + Make EXIT without a label always exit the innermost loop (Tom) - Formerly, if there were a BEGIN block more closely nested + Formerly, if there were a BEGIN block more closely nested than any loop, it would exit that block instead. The new behavior matches Oracle(TM) and is also what was previously stated by our own documentation. @@ -9009,11 +9009,11 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - In particular, the format string in RAISE now works + In particular, the format string in RAISE now works the same as any other string literal, including being subject - to standard_conforming_strings. This change also + to standard_conforming_strings. This change also fixes other cases in which valid commands would fail when - standard_conforming_strings is on. + standard_conforming_strings is on. @@ -9037,28 +9037,28 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Fix pg_ctl restart to preserve command-line arguments + Fix pg_ctl restart to preserve command-line arguments (Bruce) - Add -w/--no-password option that + Add -w/--no-password option that prevents password prompting in all utilities that have a - -W/--password option (Peter) + -W/--password option (Peter) - Remove - These options have had no effect since PostgreSQL + These options have had no effect since PostgreSQL 8.3. @@ -9066,41 +9066,41 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - <application>psql</> + <application>psql</application> - Remove verbose startup banner; now just suggest help + Remove verbose startup banner; now just suggest help (Joshua Drake) - Make help show common backslash commands (Greg + Make help show common backslash commands (Greg Sabino Mullane) - Add \pset format wrapped mode to wrap output to the - screen width, or file/pipe output too if \pset columns + Add \pset format wrapped mode to wrap output to the + screen width, or file/pipe output too if \pset columns is set (Bryce Nesbitt) - Allow all supported spellings of boolean values in \pset, - rather than just on and off (Bruce) + Allow all supported spellings of boolean values in \pset, + rather than just on and off (Bruce) - Formerly, any string other than off was silently taken - to mean true. psql will now complain - about unrecognized spellings (but still take them as true). + Formerly, any string other than off was silently taken + to mean true. psql will now complain + about unrecognized spellings (but still take them as true). @@ -9130,8 +9130,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add optional on/off argument for - \timing (David Fetter) + Add optional on/off argument for + \timing (David Fetter) @@ -9144,20 +9144,20 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Make \l show database access privileges (Andrew Gilligan) + Make \l show database access privileges (Andrew Gilligan) - Make \l+ show database sizes, if permissions + Make \l+ show database sizes, if permissions allow (Andrew Gilligan) - Add the \ef command to edit function definitions + Add the \ef command to edit function definitions (Abhijit Menon-Sen) @@ -9167,28 +9167,28 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - <application>psql</> \d* commands + <application>psql</application> \d* commands - Make \d* commands that do not have a pattern argument - show system objects only if the S modifier is specified + Make \d* commands that do not have a pattern argument + show system objects only if the S modifier is specified (Greg Sabino Mullane, Bruce) The former behavior was inconsistent across different variants - of \d, and in most cases it provided no easy way to see + of \d, and in most cases it provided no easy way to see just user objects. - Improve \d* commands to work with older - PostgreSQL server versions (back to 7.4), + Improve \d* commands to work with older + PostgreSQL server versions (back to 7.4), not only the current server version (Guillaume Lelarge) @@ -9196,14 +9196,14 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Make \d show foreign-key constraints that reference + Make \d show foreign-key constraints that reference the selected table (Kenneth D'Souza) - Make \d on a sequence show its column values + Make \d on a sequence show its column values (Euler Taveira de Oliveira) @@ -9211,43 +9211,43 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Add column storage type and other relation options to the - \d+ display (Gregory Stark, Euler Taveira de + \d+ display (Gregory Stark, Euler Taveira de Oliveira) - Show relation size in \dt+ output (Dickson S. + Show relation size in \dt+ output (Dickson S. Guedes) - Show the possible values of enum types in \dT+ + Show the possible values of enum types in \dT+ (David Fetter) - Allow \dC to accept a wildcard pattern, which matches + Allow \dC to accept a wildcard pattern, which matches either datatype involved in the cast (Tom) - Add a function type column to \df's output, and add + Add a function type column to \df's output, and add options to list only selected types of functions (David Fetter) - Make \df not hide functions that take or return - type cstring (Tom) + Make \df not hide functions that take or return + type cstring (Tom) @@ -9263,13 +9263,13 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - <application>pg_dump</> + <application>pg_dump</application> - Add a --no-tablespaces option to - pg_dump/pg_dumpall/pg_restore + Add a --no-tablespaces option to + pg_dump/pg_dumpall/pg_restore so that dumps can be restored to clusters that have non-matching tablespace layouts (Gavin Roy) @@ -9277,23 +9277,23 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Remove These options were too frequently confused with the option to - select a database name in other PostgreSQL + select a database name in other PostgreSQL client applications. The functionality is still available, but you must now spell out the long option name - or . - Remove @@ -9305,15 +9305,15 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Disable statement_timeout during dump and restore + Disable statement_timeout during dump and restore (Joshua Drake) - Add pg_dump/pg_dumpall option - (David Gould) @@ -9324,7 +9324,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Reorder pg_dump --data-only output + Reorder pg_dump --data-only output to dump tables referenced by foreign keys before the referencing tables (Tom) @@ -9332,27 +9332,27 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE This allows data loads when foreign keys are already present. If circular references make a safe ordering impossible, a - NOTICE is issued. + NOTICE is issued. - Allow pg_dump, pg_dumpall, and - pg_restore to use a specified role (Benedek + Allow pg_dump, pg_dumpall, and + pg_restore to use a specified role (Benedek László) - Allow pg_restore to use multiple concurrent + Allow pg_restore to use multiple concurrent connections to do the restore (Andrew) The number of concurrent connections is controlled by the option - --jobs. This is supported only for custom-format archives. + --jobs. This is supported only for custom-format archives. @@ -9366,24 +9366,24 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE Programming Tools - <application>libpq</> + <application>libpq</application> - Allow the OID to be specified when importing a large - object, via new function lo_import_with_oid() (Tatsuo) + Allow the OID to be specified when importing a large + object, via new function lo_import_with_oid() (Tatsuo) - Add events support (Andrew Chernow, Merlin Moncure) + Add events support (Andrew Chernow, Merlin Moncure) This adds the ability to register callbacks to manage private - data associated with PGconn and PGresult + data associated with PGconn and PGresult objects. @@ -9397,18 +9397,18 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Make PQexecParams() and related functions return - PGRES_EMPTY_QUERY for an empty query (Tom) + Make PQexecParams() and related functions return + PGRES_EMPTY_QUERY for an empty query (Tom) - They previously returned PGRES_COMMAND_OK. + They previously returned PGRES_COMMAND_OK. - Document how to avoid the overhead of WSACleanup() + Document how to avoid the overhead of WSACleanup() on Windows (Andrew Chernow) @@ -9434,22 +9434,22 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - <application>libpq</> <acronym>SSL</> (Secure Sockets Layer) + <title><application>libpq</application> <acronym>SSL</acronym> (Secure Sockets Layer) support - Fix certificate validation for SSL connections + Fix certificate validation for SSL connections (Magnus) - libpq now supports verifying both the certificate - and the name of the server when making SSL + libpq now supports verifying both the certificate + and the name of the server when making SSL connections. If a root certificate is not available to use for - verification, SSL connections will fail. The - sslmode parameter is used to enable certificate + verification, SSL connections will fail. The + sslmode parameter is used to enable certificate verification and set the level of checking. The default is still not to do any verification, allowing connections to SSL-enabled servers without requiring a root certificate on the @@ -9463,7 +9463,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - If a certificate CN starts with *, it will + If a certificate CN starts with *, it will be treated as a wildcard when matching the hostname, allowing the use of the same certificate for multiple servers. @@ -9478,21 +9478,21 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add a PQinitOpenSSL function to allow greater control + Add a PQinitOpenSSL function to allow greater control over OpenSSL/libcrypto initialization (Andrew Chernow) - Make libpq unregister its OpenSSL + Make libpq unregister its OpenSSL callbacks when no database connections remain open (Bruce, Magnus, Russell Smith) This is required for applications that unload the libpq library, - otherwise invalid OpenSSL callbacks will remain. + otherwise invalid OpenSSL callbacks will remain. @@ -9501,7 +9501,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - <application>ecpg</> + <application>ecpg</application> @@ -9527,7 +9527,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Server Programming Interface (<acronym>SPI</>) + Server Programming Interface (<acronym>SPI</acronym>) @@ -9539,8 +9539,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add new SPI_OK_REWRITTEN return code for - SPI_execute() (Heikki) + Add new SPI_OK_REWRITTEN return code for + SPI_execute() (Heikki) @@ -9551,12 +9551,12 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Remove unnecessary inclusions from executor/spi.h (Tom) + Remove unnecessary inclusions from executor/spi.h (Tom) - SPI-using modules might need to add some #include - lines if they were depending on spi.h to include + SPI-using modules might need to add some #include + lines if they were depending on spi.h to include things for them. @@ -9573,13 +9573,13 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Update build system to use Autoconf 2.61 (Peter) + Update build system to use Autoconf 2.61 (Peter) - Require GNU bison for source code builds (Peter) + Require GNU bison for source code builds (Peter) @@ -9590,63 +9590,63 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add pg_config --htmldir option + Add pg_config --htmldir option (Peter) - Pass float4 by value inside the server (Zoltan + Pass float4 by value inside the server (Zoltan Boszormenyi) - Add configure option - --disable-float4-byval to use the old behavior. + Add configure option + --disable-float4-byval to use the old behavior. External C functions that use old-style (version 0) call convention - and pass or return float4 values will be broken by this - change, so you may need the configure option if you + and pass or return float4 values will be broken by this + change, so you may need the configure option if you have such functions and don't want to update them. - Pass float8, int8, and related datatypes + Pass float8, int8, and related datatypes by value inside the server on 64-bit platforms (Zoltan Boszormenyi) - Add configure option - --disable-float8-byval to use the old behavior. + Add configure option + --disable-float8-byval to use the old behavior. As above, this change might break old-style external C functions. - Add configure options --with-segsize, - --with-blocksize, --with-wal-blocksize, - --with-wal-segsize (Zdenek Kotala, Tom) + Add configure options --with-segsize, + --with-blocksize, --with-wal-blocksize, + --with-wal-segsize (Zdenek Kotala, Tom) This simplifies build-time control over several constants that previously could only be changed by editing - pg_config_manual.h. + pg_config_manual.h. - Allow threaded builds on Solaris 2.5 (Bruce) + Allow threaded builds on Solaris 2.5 (Bruce) - Use the system's getopt_long() on Solaris + Use the system's getopt_long() on Solaris (Zdenek Kotala, Tom) @@ -9658,16 +9658,16 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add support for the Sun Studio compiler on - Linux (Julius Stroffek) + Add support for the Sun Studio compiler on + Linux (Julius Stroffek) - Append the major version number to the backend gettext - domain, and the soname major version number to - libraries' gettext domain (Peter) + Append the major version number to the backend gettext + domain, and the soname major version number to + libraries' gettext domain (Peter) @@ -9677,21 +9677,21 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add support for code coverage testing with gcov + Add support for code coverage testing with gcov (Michelle Caisse) - Allow out-of-tree builds on Mingw and - Cygwin (Richard Evans) + Allow out-of-tree builds on Mingw and + Cygwin (Richard Evans) - Fix the use of Mingw as a cross-compiling source + Fix the use of Mingw as a cross-compiling source platform (Peter) @@ -9710,20 +9710,20 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - This adds support for daylight saving time (DST) + This adds support for daylight saving time (DST) calculations beyond the year 2038. - Deprecate use of platform's time_t data type (Tom) + Deprecate use of platform's time_t data type (Tom) - Some platforms have migrated to 64-bit time_t, some have + Some platforms have migrated to 64-bit time_t, some have not, and Windows can't make up its mind what it's doing. Define - pg_time_t to have the same meaning as time_t, + pg_time_t to have the same meaning as time_t, but always be 64 bits (unless the platform has no 64-bit integer type), and use that type in all module APIs and on-disk data formats. @@ -9745,7 +9745,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Improve gettext support to allow better translation + Improve gettext support to allow better translation of plurals (Peter) @@ -9758,44 +9758,44 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add more DTrace probes (Robert Lor) + Add more DTrace probes (Robert Lor) - Enable DTrace support on macOS - Leopard and other non-Solaris platforms (Robert Lor) + Enable DTrace support on macOS + Leopard and other non-Solaris platforms (Robert Lor) Simplify and standardize conversions between C strings and - text datums, by providing common functions for the purpose + text datums, by providing common functions for the purpose (Brendan Jurd, Tom) - Clean up the include/catalog/ header files so that + Clean up the include/catalog/ header files so that frontend programs can include them without including - postgres.h + postgres.h (Zdenek Kotala) - Make name char-aligned, and suppress zero-padding of - name entries in indexes (Tom) + Make name char-aligned, and suppress zero-padding of + name entries in indexes (Tom) - Recover better if dynamically-loaded code executes exit() + Recover better if dynamically-loaded code executes exit() (Tom) @@ -9816,55 +9816,55 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add shmem_startup_hook() for custom shared memory + Add shmem_startup_hook() for custom shared memory requirements (Tom) - Replace the index access method amgetmulti entry point - with amgetbitmap, and extend the API for - amgettuple to support run-time determination of + Replace the index access method amgetmulti entry point + with amgetbitmap, and extend the API for + amgettuple to support run-time determination of operator lossiness (Heikki, Tom, Teodor) - The API for GIN and GiST opclass consistent functions + The API for GIN and GiST opclass consistent functions has been extended as well. - Add support for partial-match searches in GIN indexes + Add support for partial-match searches in GIN indexes (Teodor Sigaev, Oleg Bartunov) - Replace pg_class column reltriggers - with boolean relhastriggers (Simon) + Replace pg_class column reltriggers + with boolean relhastriggers (Simon) - Also remove unused pg_class columns - relukeys, relfkeys, and - relrefs. + Also remove unused pg_class columns + relukeys, relfkeys, and + relrefs. - Add a relistemp column to pg_class + Add a relistemp column to pg_class to ease identification of temporary tables (Tom) - Move platform FAQs into the main documentation + Move platform FAQs into the main documentation (Peter) @@ -9878,7 +9878,7 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add support for the KOI8U (Ukrainian) encoding + Add support for the KOI8U (Ukrainian) encoding (Peter) @@ -9895,8 +9895,8 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Fix problem when setting LC_MESSAGES on - MSVC-built systems (Hiroshi Inoue, Hiroshi + Fix problem when setting LC_MESSAGES on + MSVC-built systems (Hiroshi Inoue, Hiroshi Saito, Magnus) @@ -9912,65 +9912,65 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add contrib/auto_explain to automatically run - EXPLAIN on queries exceeding a specified duration + Add contrib/auto_explain to automatically run + EXPLAIN on queries exceeding a specified duration (Itagaki Takahiro, Tom) - Add contrib/btree_gin to allow GIN indexes to + Add contrib/btree_gin to allow GIN indexes to handle more datatypes (Oleg, Teodor) - Add contrib/citext to provide a case-insensitive, + Add contrib/citext to provide a case-insensitive, multibyte-aware text data type (David Wheeler) - Add contrib/pg_stat_statements for server-wide + Add contrib/pg_stat_statements for server-wide tracking of statement execution statistics (Itagaki Takahiro) - Add duration and query mode options to contrib/pgbench + Add duration and query mode options to contrib/pgbench (Itagaki Takahiro) - Make contrib/pgbench use table names - pgbench_accounts, pgbench_branches, - pgbench_history, and pgbench_tellers, - rather than just accounts, branches, - history, and tellers (Tom) + Make contrib/pgbench use table names + pgbench_accounts, pgbench_branches, + pgbench_history, and pgbench_tellers, + rather than just accounts, branches, + history, and tellers (Tom) This is to reduce the risk of accidentally destroying real data - by running pgbench. + by running pgbench. - Fix contrib/pgstattuple to handle tables and + Fix contrib/pgstattuple to handle tables and indexes with over 2 billion pages (Tatsuhito Kasahara) - In contrib/fuzzystrmatch, add a version of the + In contrib/fuzzystrmatch, add a version of the Levenshtein string-distance function that allows the user to specify the costs of insertion, deletion, and substitution (Volkan Yazici) @@ -9979,28 +9979,28 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Make contrib/ltree support multibyte encodings + Make contrib/ltree support multibyte encodings (laser) - Enable contrib/dblink to use connection information + Enable contrib/dblink to use connection information stored in the SQL/MED catalogs (Joe Conway) - Improve contrib/dblink's reporting of errors from + Improve contrib/dblink's reporting of errors from the remote server (Joe Conway) - Make contrib/dblink set client_encoding + Make contrib/dblink set client_encoding to match the local database's encoding (Joe Conway) @@ -10012,9 +10012,9 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Make sure contrib/dblink uses a password supplied + Make sure contrib/dblink uses a password supplied by the user, and not accidentally taken from the server's - .pgpass file (Joe Conway) + .pgpass file (Joe Conway) @@ -10024,51 +10024,51 @@ WITH w AS (SELECT * FROM foo) SELECT * FROM w, bar ... FOR UPDATE - Add fsm_page_contents() - to contrib/pageinspect (Heikki) + Add fsm_page_contents() + to contrib/pageinspect (Heikki) - Modify get_raw_page() to support free space map - (*_fsm) files. Also update - contrib/pg_freespacemap. + Modify get_raw_page() to support free space map + (*_fsm) files. Also update + contrib/pg_freespacemap. - Add support for multibyte encodings to contrib/pg_trgm + Add support for multibyte encodings to contrib/pg_trgm (Teodor) - Rewrite contrib/intagg to use new - functions array_agg() and unnest() + Rewrite contrib/intagg to use new + functions array_agg() and unnest() (Tom) - Make contrib/pg_standby recover all available WAL before + Make contrib/pg_standby recover all available WAL before failover (Fujii Masao, Simon, Heikki) To make this work safely, you now need to set the new - recovery_end_command option in recovery.conf - to clean up the trigger file after failover. pg_standby + recovery_end_command option in recovery.conf + to clean up the trigger file after failover. pg_standby will no longer remove the trigger file itself. - contrib/pg_standby's option is now a no-op, because it is unsafe to use a symlink (Simon) diff --git a/doc/src/sgml/release-9.0.sgml b/doc/src/sgml/release-9.0.sgml index e7d2ffddaf..9e90f5a7f3 100644 --- a/doc/src/sgml/release-9.0.sgml +++ b/doc/src/sgml/release-9.0.sgml @@ -12,11 +12,11 @@ This release contains a variety of fixes from 9.0.22. For information about new features in the 9.0 major release, see - . + . - This is expected to be the last PostgreSQL release + This is expected to be the last PostgreSQL release in the 9.0.X series. Users are encouraged to update to a newer release branch soon. @@ -30,7 +30,7 @@ However, if you are upgrading from a version earlier than 9.0.18, - see . + see . @@ -42,8 +42,8 @@ - Fix contrib/pgcrypto to detect and report - too-short crypt() salts (Josh Kupershmidt) + Fix contrib/pgcrypto to detect and report + too-short crypt() salts (Josh Kupershmidt) @@ -69,13 +69,13 @@ - Fix insertion of relations into the relation cache init file + Fix insertion of relations into the relation cache init file (Tom Lane) An oversight in a patch in the most recent minor releases - caused pg_trigger_tgrelid_tgname_index to be omitted + caused pg_trigger_tgrelid_tgname_index to be omitted from the init file. Subsequent sessions detected this, then deemed the init file to be broken and silently ignored it, resulting in a significant degradation in session startup time. In addition to fixing @@ -93,7 +93,7 @@ - Improve LISTEN startup time when there are many unread + Improve LISTEN startup time when there are many unread notifications (Matt Newell) @@ -108,13 +108,13 @@ too many bugs in practice, both in the underlying OpenSSL library and in our usage of it. Renegotiation will be removed entirely in 9.5 and later. In the older branches, just change the default value - of ssl_renegotiation_limit to zero (disabled). + of ssl_renegotiation_limit to zero (disabled). - Lower the minimum values of the *_freeze_max_age parameters + Lower the minimum values of the *_freeze_max_age parameters (Andres Freund) @@ -126,14 +126,14 @@ - Limit the maximum value of wal_buffers to 2GB to avoid + Limit the maximum value of wal_buffers to 2GB to avoid server crashes (Josh Berkus) - Fix rare internal overflow in multiplication of numeric values + Fix rare internal overflow in multiplication of numeric values (Dean Rasheed) @@ -141,21 +141,21 @@ Guard against hard-to-reach stack overflows involving record types, - range types, json, jsonb, tsquery, - ltxtquery and query_int (Noah Misch) + range types, json, jsonb, tsquery, + ltxtquery and query_int (Noah Misch) - Fix handling of DOW and DOY in datetime input + Fix handling of DOW and DOY in datetime input (Greg Stark) These tokens aren't meant to be used in datetime values, but previously they resulted in opaque internal error messages rather - than invalid input syntax. + than invalid input syntax. @@ -168,7 +168,7 @@ Add recursion depth protections to regular expression, SIMILAR - TO, and LIKE matching (Tom Lane) + TO, and LIKE matching (Tom Lane) @@ -212,22 +212,22 @@ - Fix unexpected out-of-memory situation during sort errors - when using tuplestores with small work_mem settings (Tom + Fix unexpected out-of-memory situation during sort errors + when using tuplestores with small work_mem settings (Tom Lane) - Fix very-low-probability stack overrun in qsort (Tom Lane) + Fix very-low-probability stack overrun in qsort (Tom Lane) - Fix invalid memory alloc request size failure in hash joins - with large work_mem settings (Tomas Vondra, Tom Lane) + Fix invalid memory alloc request size failure in hash joins + with large work_mem settings (Tomas Vondra, Tom Lane) @@ -240,9 +240,9 @@ These mistakes could lead to incorrect query plans that would give wrong answers, or to assertion failures in assert-enabled builds, or to odd planner errors such as could not devise a query plan for the - given query, could not find pathkey item to - sort, plan should not reference subplan's variable, - or failed to assign all NestLoopParams to plan nodes. + given query, could not find pathkey item to + sort, plan should not reference subplan's variable, + or failed to assign all NestLoopParams to plan nodes. Thanks are due to Andreas Seltenreich and Piotr Stefaniak for fuzz testing that exposed these problems. @@ -263,12 +263,12 @@ During postmaster shutdown, ensure that per-socket lock files are removed and listen sockets are closed before we remove - the postmaster.pid file (Tom Lane) + the postmaster.pid file (Tom Lane) This avoids race-condition failures if an external script attempts to - start a new postmaster as soon as pg_ctl stop returns. + start a new postmaster as soon as pg_ctl stop returns. @@ -288,7 +288,7 @@ - Do not print a WARNING when an autovacuum worker is already + Do not print a WARNING when an autovacuum worker is already gone when we attempt to signal it, and reduce log verbosity for such signals (Tom Lane) @@ -321,30 +321,30 @@ Fix off-by-one error that led to otherwise-harmless warnings - about apparent wraparound in subtrans/multixact truncation + about apparent wraparound in subtrans/multixact truncation (Thomas Munro) - Fix misreporting of CONTINUE and MOVE statement - types in PL/pgSQL's error context messages + Fix misreporting of CONTINUE and MOVE statement + types in PL/pgSQL's error context messages (Pavel Stehule, Tom Lane) - Fix some places in PL/Tcl that neglected to check for - failure of malloc() calls (Michael Paquier, Álvaro + Fix some places in PL/Tcl that neglected to check for + failure of malloc() calls (Michael Paquier, Álvaro Herrera) - Improve libpq's handling of out-of-memory conditions + Improve libpq's handling of out-of-memory conditions (Michael Paquier, Heikki Linnakangas) @@ -352,61 +352,61 @@ Fix memory leaks and missing out-of-memory checks - in ecpg (Michael Paquier) + in ecpg (Michael Paquier) - Fix psql's code for locale-aware formatting of numeric + Fix psql's code for locale-aware formatting of numeric output (Tom Lane) - The formatting code invoked by \pset numericlocale on + The formatting code invoked by \pset numericlocale on did the wrong thing for some uncommon cases such as numbers with an exponent but no decimal point. It could also mangle already-localized - output from the money data type. + output from the money data type. - Prevent crash in psql's \c command when + Prevent crash in psql's \c command when there is no current connection (Noah Misch) - Ensure that temporary files created during a pg_dump - run with tar-format output are not world-readable (Michael + Ensure that temporary files created during a pg_dump + run with tar-format output are not world-readable (Michael Paquier) - Fix pg_dump and pg_upgrade to support - cases where the postgres or template1 database + Fix pg_dump and pg_upgrade to support + cases where the postgres or template1 database is in a non-default tablespace (Marti Raudsepp, Bruce Momjian) - Fix pg_dump to handle object privileges sanely when + Fix pg_dump to handle object privileges sanely when dumping from a server too old to have a particular privilege type (Tom Lane) When dumping functions or procedural languages from pre-7.3 - servers, pg_dump would - produce GRANT/REVOKE commands that revoked the + servers, pg_dump would + produce GRANT/REVOKE commands that revoked the owner's grantable privileges and instead granted all privileges - to PUBLIC. Since the privileges involved are - just USAGE and EXECUTE, this isn't a security + to PUBLIC. Since the privileges involved are + just USAGE and EXECUTE, this isn't a security problem, but it's certainly a surprising representation of the older systems' behavior. Fix it to leave the default privilege state alone in these cases. @@ -415,23 +415,23 @@ - Fix pg_dump to dump shell types (Tom Lane) + Fix pg_dump to dump shell types (Tom Lane) Shell types (that is, not-yet-fully-defined types) aren't useful for - much, but nonetheless pg_dump should dump them. + much, but nonetheless pg_dump should dump them. Fix spinlock assembly code for PPC hardware to be compatible - with AIX's native assembler (Tom Lane) + with AIX's native assembler (Tom Lane) - Building with gcc didn't work if gcc + Building with gcc didn't work if gcc had been configured to use the native assembler, which is becoming more common. @@ -439,14 +439,14 @@ - On AIX, test the -qlonglong compiler option + On AIX, test the -qlonglong compiler option rather than just assuming it's safe to use (Noah Misch) - On AIX, use -Wl,-brtllib link option to allow + On AIX, use -Wl,-brtllib link option to allow symbols to be resolved at runtime (Noah Misch) @@ -458,38 +458,38 @@ Avoid use of inline functions when compiling with - 32-bit xlc, due to compiler bugs (Noah Misch) + 32-bit xlc, due to compiler bugs (Noah Misch) - Use librt for sched_yield() when necessary, + Use librt for sched_yield() when necessary, which it is on some Solaris versions (Oskari Saarenmaa) - Fix Windows install.bat script to handle target directory + Fix Windows install.bat script to handle target directory names that contain spaces (Heikki Linnakangas) - Make the numeric form of the PostgreSQL version number - (e.g., 90405) readily available to extension Makefiles, - as a variable named VERSION_NUM (Michael Paquier) + Make the numeric form of the PostgreSQL version number + (e.g., 90405) readily available to extension Makefiles, + as a variable named VERSION_NUM (Michael Paquier) - Update time zone data files to tzdata release 2015g for + Update time zone data files to tzdata release 2015g for DST law changes in Cayman Islands, Fiji, Moldova, Morocco, Norfolk Island, North Korea, Turkey, and Uruguay. There is a new zone name - America/Fort_Nelson for the Canadian Northern Rockies. + America/Fort_Nelson for the Canadian Northern Rockies. @@ -509,11 +509,11 @@ This release contains a small number of fixes from 9.0.21. For information about new features in the 9.0 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 9.0.X release series in September 2015. Users are encouraged to update to a newer release branch soon. @@ -527,7 +527,7 @@ However, if you are upgrading from a version earlier than 9.0.18, - see . + see . @@ -544,7 +544,7 @@ With just the wrong timing of concurrent activity, a VACUUM - FULL on a system catalog might fail to update the init file + FULL on a system catalog might fail to update the init file that's used to avoid cache-loading work for new sessions. This would result in later sessions being unable to access that catalog at all. This is a very ancient bug, but it's so hard to trigger that no @@ -555,13 +555,13 @@ Avoid deadlock between incoming sessions and CREATE/DROP - DATABASE (Tom Lane) + DATABASE (Tom Lane) A new session starting in a database that is the target of - a DROP DATABASE command, or is the template for - a CREATE DATABASE command, could cause the command to wait + a DROP DATABASE command, or is the template for + a CREATE DATABASE command, could cause the command to wait for five seconds and then fail, even if the new session would have exited before that. @@ -583,11 +583,11 @@ This release contains a small number of fixes from 9.0.20. For information about new features in the 9.0 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 9.0.X release series in September 2015. Users are encouraged to update to a newer release branch soon. @@ -601,7 +601,7 @@ However, if you are upgrading from a version earlier than 9.0.18, - see . + see . @@ -613,12 +613,12 @@ - Avoid failures while fsync'ing data directory during + Avoid failures while fsync'ing data directory during crash restart (Abhijit Menon-Sen, Tom Lane) - In the previous minor releases we added a patch to fsync + In the previous minor releases we added a patch to fsync everything in the data directory after a crash. Unfortunately its response to any error condition was to fail, thereby preventing the server from starting up, even when the problem was quite harmless. @@ -632,29 +632,29 @@ - Remove configure's check prohibiting linking to a - threaded libpython - on OpenBSD (Tom Lane) + Remove configure's check prohibiting linking to a + threaded libpython + on OpenBSD (Tom Lane) The failure this restriction was meant to prevent seems to not be a - problem anymore on current OpenBSD + problem anymore on current OpenBSD versions. - Allow libpq to use TLS protocol versions beyond v1 + Allow libpq to use TLS protocol versions beyond v1 (Noah Misch) - For a long time, libpq was coded so that the only SSL + For a long time, libpq was coded so that the only SSL protocol it would allow was TLS v1. Now that newer TLS versions are becoming popular, allow it to negotiate the highest commonly-supported - TLS version with the server. (PostgreSQL servers were + TLS version with the server. (PostgreSQL servers were already capable of such negotiation, so no change is needed on the server side.) This is a back-patch of a change already released in 9.4.0. @@ -677,11 +677,11 @@ This release contains a variety of fixes from 9.0.19. For information about new features in the 9.0 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 9.0.X release series in September 2015. Users are encouraged to update to a newer release branch soon. @@ -695,7 +695,7 @@ However, if you are upgrading from a version earlier than 9.0.18, - see . + see . @@ -727,7 +727,7 @@ - Our replacement implementation of snprintf() failed to + Our replacement implementation of snprintf() failed to check for errors reported by the underlying system library calls; the main case that might be missed is out-of-memory situations. In the worst case this might lead to information exposure, due to our @@ -737,7 +737,7 @@ - It remains possible that some calls of the *printf() + It remains possible that some calls of the *printf() family of functions are vulnerable to information disclosure if an out-of-memory error occurs at just the wrong time. We judge the risk to not be large, but will continue analysis in this area. @@ -747,15 +747,15 @@ - In contrib/pgcrypto, uniformly report decryption failures - as Wrong key or corrupt data (Noah Misch) + In contrib/pgcrypto, uniformly report decryption failures + as Wrong key or corrupt data (Noah Misch) Previously, some cases of decryption with an incorrect key could report other error message texts. It has been shown that such variance in error reports can aid attackers in recovering keys from other systems. - While it's unknown whether pgcrypto's specific behaviors + While it's unknown whether pgcrypto's specific behaviors are likewise exploitable, it seems better to avoid the risk by using a one-size-fits-all message. (CVE-2015-3167) @@ -786,7 +786,7 @@ This oversight in the planner has been observed to cause could - not find RelOptInfo for given relids errors, but it seems possible + not find RelOptInfo for given relids errors, but it seems possible that sometimes an incorrect query plan might get past that consistency check and result in silently-wrong query output. @@ -814,7 +814,7 @@ This oversight has been seen to lead to failed to join all - relations together errors in queries involving LATERAL, + relations together errors in queries involving LATERAL, and that might happen in other cases as well. @@ -822,7 +822,7 @@ Fix possible deadlock at startup - when max_prepared_transactions is too small + when max_prepared_transactions is too small (Heikki Linnakangas) @@ -836,14 +836,14 @@ - Avoid cannot GetMultiXactIdMembers() during recovery error + Avoid cannot GetMultiXactIdMembers() during recovery error (Álvaro Herrera) - Recursively fsync() the data directory after a crash + Recursively fsync() the data directory after a crash (Abhijit Menon-Sen, Robert Haas) @@ -863,13 +863,13 @@ - Cope with unexpected signals in LockBufferForCleanup() + Cope with unexpected signals in LockBufferForCleanup() (Andres Freund) This oversight could result in spurious errors about multiple - backends attempting to wait for pincount 1. + backends attempting to wait for pincount 1. @@ -910,9 +910,9 @@ - ANALYZE executes index expressions many times; if there are + ANALYZE executes index expressions many times; if there are slow functions in such an expression, it's desirable to be able to - cancel the ANALYZE before that loop finishes. + cancel the ANALYZE before that loop finishes. @@ -925,20 +925,20 @@ - Recommend setting include_realm to 1 when using + Recommend setting include_realm to 1 when using Kerberos/GSSAPI/SSPI authentication (Stephen Frost) Without this, identically-named users from different realms cannot be distinguished. For the moment this is only a documentation change, but - it will become the default setting in PostgreSQL 9.5. + it will become the default setting in PostgreSQL 9.5. - Remove code for matching IPv4 pg_hba.conf entries to + Remove code for matching IPv4 pg_hba.conf entries to IPv4-in-IPv6 addresses (Tom Lane) @@ -951,7 +951,7 @@ crashes on some systems, so let's just remove it rather than fix it. (Had we chosen to fix it, that would make for a subtle and potentially security-sensitive change in the effective meaning of - IPv4 pg_hba.conf entries, which does not seem like a good + IPv4 pg_hba.conf entries, which does not seem like a good thing to do in minor releases.) @@ -960,14 +960,14 @@ While shutting down service on Windows, periodically send status updates to the Service Control Manager to prevent it from killing the - service too soon; and ensure that pg_ctl will wait for + service too soon; and ensure that pg_ctl will wait for shutdown (Krystian Bigaj) - Reduce risk of network deadlock when using libpq's + Reduce risk of network deadlock when using libpq's non-blocking mode (Heikki Linnakangas) @@ -976,25 +976,25 @@ buffer every so often, in case the server has sent enough response data to cause it to block on output. (A typical scenario is that the server is sending a stream of NOTICE messages during COPY FROM - STDIN.) This worked properly in the normal blocking mode, but not - so much in non-blocking mode. We've modified libpq + STDIN.) This worked properly in the normal blocking mode, but not + so much in non-blocking mode. We've modified libpq to opportunistically drain input when it can, but a full defense against this problem requires application cooperation: the application should watch for socket read-ready as well as write-ready conditions, - and be sure to call PQconsumeInput() upon read-ready. + and be sure to call PQconsumeInput() upon read-ready. - Fix array handling in ecpg (Michael Meskes) + Fix array handling in ecpg (Michael Meskes) - Fix psql to sanely handle URIs and conninfo strings as - the first parameter to \connect + Fix psql to sanely handle URIs and conninfo strings as + the first parameter to \connect (David Fetter, Andrew Dunstan, Álvaro Herrera) @@ -1007,37 +1007,37 @@ - Suppress incorrect complaints from psql on some - platforms that it failed to write ~/.psql_history at exit + Suppress incorrect complaints from psql on some + platforms that it failed to write ~/.psql_history at exit (Tom Lane) This misbehavior was caused by a workaround for a bug in very old - (pre-2006) versions of libedit. We fixed it by + (pre-2006) versions of libedit. We fixed it by removing the workaround, which will cause a similar failure to appear - for anyone still using such versions of libedit. - Recommendation: upgrade that library, or use libreadline. + for anyone still using such versions of libedit. + Recommendation: upgrade that library, or use libreadline. - Fix pg_dump's rule for deciding which casts are + Fix pg_dump's rule for deciding which casts are system-provided casts that should not be dumped (Tom Lane) - Fix dumping of views that are just VALUES(...) but have + Fix dumping of views that are just VALUES(...) but have column aliases (Tom Lane) - In pg_upgrade, force timeline 1 in the new cluster + In pg_upgrade, force timeline 1 in the new cluster (Bruce Momjian) @@ -1049,7 +1049,7 @@ - In pg_upgrade, check for improperly non-connectable + In pg_upgrade, check for improperly non-connectable databases before proceeding (Bruce Momjian) @@ -1057,28 +1057,28 @@ - In pg_upgrade, quote directory paths - properly in the generated delete_old_cluster script + In pg_upgrade, quote directory paths + properly in the generated delete_old_cluster script (Bruce Momjian) - In pg_upgrade, preserve database-level freezing info + In pg_upgrade, preserve database-level freezing info properly (Bruce Momjian) This oversight could cause missing-clog-file errors for tables within - the postgres and template1 databases. + the postgres and template1 databases. - Run pg_upgrade and pg_resetxlog with + Run pg_upgrade and pg_resetxlog with restricted privileges on Windows, so that they don't fail when run by an administrator (Muhammad Asif Naeem) @@ -1086,7 +1086,7 @@ - Fix slow sorting algorithm in contrib/intarray (Tom Lane) + Fix slow sorting algorithm in contrib/intarray (Tom Lane) @@ -1098,7 +1098,7 @@ - Update time zone data files to tzdata release 2015d + Update time zone data files to tzdata release 2015d for DST law changes in Egypt, Mongolia, and Palestine, plus historical changes in Canada and Chile. Also adopt revised zone abbreviations for the America/Adak zone (HST/HDT not HAST/HADT). @@ -1121,7 +1121,7 @@ This release contains a variety of fixes from 9.0.18. For information about new features in the 9.0 major release, see - . + . @@ -1133,7 +1133,7 @@ However, if you are upgrading from a version earlier than 9.0.18, - see . + see . @@ -1145,15 +1145,15 @@ - Fix buffer overruns in to_char() + Fix buffer overruns in to_char() (Bruce Momjian) - When to_char() processes a numeric formatting template - calling for a large number of digits, PostgreSQL + When to_char() processes a numeric formatting template + calling for a large number of digits, PostgreSQL would read past the end of a buffer. When processing a crafted - timestamp formatting template, PostgreSQL would write + timestamp formatting template, PostgreSQL would write past the end of a buffer. Either case could crash the server. We have not ruled out the possibility of attacks that lead to privilege escalation, though they seem unlikely. @@ -1163,27 +1163,27 @@ - Fix buffer overrun in replacement *printf() functions + Fix buffer overrun in replacement *printf() functions (Tom Lane) - PostgreSQL includes a replacement implementation - of printf and related functions. This code will overrun + PostgreSQL includes a replacement implementation + of printf and related functions. This code will overrun a stack buffer when formatting a floating point number (conversion - specifiers e, E, f, F, - g or G) with requested precision greater than + specifiers e, E, f, F, + g or G) with requested precision greater than about 500. This will crash the server, and we have not ruled out the possibility of attacks that lead to privilege escalation. A database user can trigger such a buffer overrun through - the to_char() SQL function. While that is the only - affected core PostgreSQL functionality, extension + the to_char() SQL function. While that is the only + affected core PostgreSQL functionality, extension modules that use printf-family functions may be at risk as well. - This issue primarily affects PostgreSQL on Windows. - PostgreSQL uses the system implementation of these + This issue primarily affects PostgreSQL on Windows. + PostgreSQL uses the system implementation of these functions where adequate, which it is on other modern platforms. (CVE-2015-0242) @@ -1191,12 +1191,12 @@ - Fix buffer overruns in contrib/pgcrypto + Fix buffer overruns in contrib/pgcrypto (Marko Tiikkaja, Noah Misch) - Errors in memory size tracking within the pgcrypto + Errors in memory size tracking within the pgcrypto module permitted stack buffer overruns and improper dependence on the contents of uninitialized memory. The buffer overrun cases can crash the server, and we have not ruled out the possibility of @@ -1237,7 +1237,7 @@ Some server error messages show the values of columns that violate a constraint, such as a unique constraint. If the user does not have - SELECT privilege on all columns of the table, this could + SELECT privilege on all columns of the table, this could mean exposing values that the user should not be able to see. Adjust the code so that values are displayed only when they came from the SQL command or could be selected by the user. @@ -1263,21 +1263,21 @@ Avoid possible data corruption if ALTER DATABASE SET - TABLESPACE is used to move a database to a new tablespace and then + TABLESPACE is used to move a database to a new tablespace and then shortly later move it back to its original tablespace (Tom Lane) - Avoid corrupting tables when ANALYZE inside a transaction + Avoid corrupting tables when ANALYZE inside a transaction is rolled back (Andres Freund, Tom Lane, Michael Paquier) If the failing transaction had earlier removed the last index, rule, or trigger from the table, the table would be left in a corrupted state - with the relevant pg_class flags not set though they + with the relevant pg_class flags not set though they should be. @@ -1289,22 +1289,22 @@ - In READ COMMITTED mode, queries that lock or update + In READ COMMITTED mode, queries that lock or update recently-updated rows could crash as a result of this bug. - Fix planning of SELECT FOR UPDATE when using a partial + Fix planning of SELECT FOR UPDATE when using a partial index on a child table (Kyotaro Horiguchi) - In READ COMMITTED mode, SELECT FOR UPDATE must - also recheck the partial index's WHERE condition when + In READ COMMITTED mode, SELECT FOR UPDATE must + also recheck the partial index's WHERE condition when rechecking a recently-updated row to see if it still satisfies the - query's WHERE condition. This requirement was missed if the + query's WHERE condition. This requirement was missed if the index belonged to an inheritance child table, so that it was possible to incorrectly return rows that no longer satisfy the query condition. @@ -1312,12 +1312,12 @@ - Fix corner case wherein SELECT FOR UPDATE could return a row + Fix corner case wherein SELECT FOR UPDATE could return a row twice, and possibly miss returning other rows (Tom Lane) - In READ COMMITTED mode, a SELECT FOR UPDATE + In READ COMMITTED mode, a SELECT FOR UPDATE that is scanning an inheritance tree could incorrectly return a row from a prior child table instead of the one it should return from a later child table. @@ -1327,7 +1327,7 @@ Reject duplicate column names in the referenced-columns list of - a FOREIGN KEY declaration (David Rowley) + a FOREIGN KEY declaration (David Rowley) @@ -1339,7 +1339,7 @@ - Fix bugs in raising a numeric value to a large integral power + Fix bugs in raising a numeric value to a large integral power (Tom Lane) @@ -1352,19 +1352,19 @@ - In numeric_recv(), truncate away any fractional digits - that would be hidden according to the value's dscale field + In numeric_recv(), truncate away any fractional digits + that would be hidden according to the value's dscale field (Tom Lane) - A numeric value's display scale (dscale) should + A numeric value's display scale (dscale) should never be less than the number of nonzero fractional digits; but apparently there's at least one broken client application that - transmits binary numeric values in which that's true. + transmits binary numeric values in which that's true. This leads to strange behavior since the extra digits are taken into account by arithmetic operations even though they aren't printed. - The least risky fix seems to be to truncate away such hidden + The least risky fix seems to be to truncate away such hidden digits on receipt, so that the value is indeed what it prints as. @@ -1384,7 +1384,7 @@ - Fix bugs in tsquery @> tsquery + Fix bugs in tsquery @> tsquery operator (Heikki Linnakangas) @@ -1415,14 +1415,14 @@ - Fix namespace handling in xpath() (Ali Akbar) + Fix namespace handling in xpath() (Ali Akbar) - Previously, the xml value resulting from - an xpath() call would not have namespace declarations if + Previously, the xml value resulting from + an xpath() call would not have namespace declarations if the namespace declarations were attached to an ancestor element in the - input xml value, rather than to the specific element being + input xml value, rather than to the specific element being returned. Propagate the ancestral declaration so that the result is correct when considered in isolation. @@ -1431,7 +1431,7 @@ Fix planner problems with nested append relations, such as inherited - tables within UNION ALL subqueries (Tom Lane) + tables within UNION ALL subqueries (Tom Lane) @@ -1444,8 +1444,8 @@ - Exempt tables that have per-table cost_limit - and/or cost_delay settings from autovacuum's global cost + Exempt tables that have per-table cost_limit + and/or cost_delay settings from autovacuum's global cost balancing rules (Álvaro Herrera) @@ -1471,7 +1471,7 @@ the target database, if they met the usual thresholds for autovacuuming. This is at best pretty unexpected; at worst it delays response to the wraparound threat. Fix it so that if autovacuum is - turned off, workers only do anti-wraparound vacuums and + turned off, workers only do anti-wraparound vacuums and not any other work. @@ -1491,19 +1491,19 @@ Fix several cases where recovery logic improperly ignored WAL records - for COMMIT/ABORT PREPARED (Heikki Linnakangas) + for COMMIT/ABORT PREPARED (Heikki Linnakangas) The most notable oversight was - that recovery_target_xid could not be used to stop at + that recovery_target_xid could not be used to stop at a two-phase commit. - Avoid creating unnecessary .ready marker files for + Avoid creating unnecessary .ready marker files for timeline history files (Fujii Masao) @@ -1511,14 +1511,14 @@ Fix possible null pointer dereference when an empty prepared statement - is used and the log_statement setting is mod - or ddl (Fujii Masao) + is used and the log_statement setting is mod + or ddl (Fujii Masao) - Change pgstat wait timeout warning message to be LOG level, + Change pgstat wait timeout warning message to be LOG level, and rephrase it to be more understandable (Tom Lane) @@ -1527,7 +1527,7 @@ case, but it occurs often enough on our slower buildfarm members to be a nuisance. Reduce it to LOG level, and expend a bit more effort on the wording: it now reads using stale statistics instead of - current ones because stats collector is not responding. + current ones because stats collector is not responding. @@ -1541,32 +1541,32 @@ - Warn if macOS's setlocale() starts an unwanted extra + Warn if macOS's setlocale() starts an unwanted extra thread inside the postmaster (Noah Misch) - Fix processing of repeated dbname parameters - in PQconnectdbParams() (Alex Shulgin) + Fix processing of repeated dbname parameters + in PQconnectdbParams() (Alex Shulgin) Unexpected behavior ensued if the first occurrence - of dbname contained a connection string or URI to be + of dbname contained a connection string or URI to be expanded. - Ensure that libpq reports a suitable error message on + Ensure that libpq reports a suitable error message on unexpected socket EOF (Marko Tiikkaja, Tom Lane) - Depending on kernel behavior, libpq might return an + Depending on kernel behavior, libpq might return an empty error string rather than something useful when the server unexpectedly closed the socket. @@ -1574,14 +1574,14 @@ - Clear any old error message during PQreset() + Clear any old error message during PQreset() (Heikki Linnakangas) - If PQreset() is called repeatedly, and the connection + If PQreset() is called repeatedly, and the connection cannot be re-established, error messages from the failed connection - attempts kept accumulating in the PGconn's error + attempts kept accumulating in the PGconn's error string. @@ -1589,32 +1589,32 @@ Properly handle out-of-memory conditions while parsing connection - options in libpq (Alex Shulgin, Heikki Linnakangas) + options in libpq (Alex Shulgin, Heikki Linnakangas) - Fix array overrun in ecpg's version - of ParseDateTime() (Michael Paquier) + Fix array overrun in ecpg's version + of ParseDateTime() (Michael Paquier) - In initdb, give a clearer error message if a password + In initdb, give a clearer error message if a password file is specified but is empty (Mats Erik Andersson) - Fix psql's \s command to work nicely with + Fix psql's \s command to work nicely with libedit, and add pager support (Stepan Rutz, Tom Lane) - When using libedit rather than readline, \s printed the + When using libedit rather than readline, \s printed the command history in a fairly unreadable encoded format, and on recent libedit versions might fail altogether. Fix that by printing the history ourselves rather than having the library do it. A pleasant @@ -1624,7 +1624,7 @@ This patch also fixes a bug that caused newline encoding to be applied inconsistently when saving the command history with libedit. - Multiline history entries written by older psql + Multiline history entries written by older psql versions will be read cleanly with this patch, but perhaps not vice versa, depending on the exact libedit versions involved. @@ -1632,17 +1632,17 @@ - Improve consistency of parsing of psql's special + Improve consistency of parsing of psql's special variables (Tom Lane) - Allow variant spellings of on and off (such - as 1/0) for ECHO_HIDDEN - and ON_ERROR_ROLLBACK. Report a warning for unrecognized - values for COMP_KEYWORD_CASE, ECHO, - ECHO_HIDDEN, HISTCONTROL, - ON_ERROR_ROLLBACK, and VERBOSITY. Recognize + Allow variant spellings of on and off (such + as 1/0) for ECHO_HIDDEN + and ON_ERROR_ROLLBACK. Report a warning for unrecognized + values for COMP_KEYWORD_CASE, ECHO, + ECHO_HIDDEN, HISTCONTROL, + ON_ERROR_ROLLBACK, and VERBOSITY. Recognize all values for all these variables case-insensitively; previously there was a mishmash of case-sensitive and case-insensitive behaviors. @@ -1650,9 +1650,9 @@ - Fix psql's expanded-mode display to work - consistently when using border = 3 - and linestyle = ascii or unicode + Fix psql's expanded-mode display to work + consistently when using border = 3 + and linestyle = ascii or unicode (Stephen Frost) @@ -1666,7 +1666,7 @@ - Fix core dump in pg_dump --binary-upgrade on zero-column + Fix core dump in pg_dump --binary-upgrade on zero-column composite type (Rushabh Lathia) @@ -1674,7 +1674,7 @@ Fix block number checking - in contrib/pageinspect's get_raw_page() + in contrib/pageinspect's get_raw_page() (Tom Lane) @@ -1686,7 +1686,7 @@ - Fix contrib/pgcrypto's pgp_sym_decrypt() + Fix contrib/pgcrypto's pgp_sym_decrypt() to not fail on messages whose length is 6 less than a power of 2 (Marko Tiikkaja) @@ -1695,24 +1695,24 @@ Handle unexpected query results, especially NULLs, safely in - contrib/tablefunc's connectby() + contrib/tablefunc's connectby() (Michael Paquier) - connectby() previously crashed if it encountered a NULL + connectby() previously crashed if it encountered a NULL key value. It now prints that row but doesn't recurse further. - Avoid a possible crash in contrib/xml2's - xslt_process() (Mark Simonetti) + Avoid a possible crash in contrib/xml2's + xslt_process() (Mark Simonetti) - libxslt seems to have an undocumented dependency on + libxslt seems to have an undocumented dependency on the order in which resources are freed; reorder our calls to avoid a crash. @@ -1739,29 +1739,29 @@ With OpenLDAP versions 2.4.24 through 2.4.31, - inclusive, PostgreSQL backends can crash at exit. - Raise a warning during configure based on the + inclusive, PostgreSQL backends can crash at exit. + Raise a warning during configure based on the compile-time OpenLDAP version number, and test the crashing scenario - in the contrib/dblink regression test. + in the contrib/dblink regression test. - In non-MSVC Windows builds, ensure libpq.dll is installed + In non-MSVC Windows builds, ensure libpq.dll is installed with execute permissions (Noah Misch) - Make pg_regress remove any temporary installation it + Make pg_regress remove any temporary installation it created upon successful exit (Tom Lane) This results in a very substantial reduction in disk space usage - during make check-world, since that sequence involves + during make check-world, since that sequence involves creation of numerous temporary installations. @@ -1773,15 +1773,15 @@ - Previously, PostgreSQL assumed that the UTC offset - associated with a time zone abbreviation (such as EST) + Previously, PostgreSQL assumed that the UTC offset + associated with a time zone abbreviation (such as EST) never changes in the usage of any particular locale. However this assumption fails in the real world, so introduce the ability for a zone abbreviation to represent a UTC offset that sometimes changes. Update the zone abbreviation definition files to make use of this feature in timezone locales that have changed the UTC offset of their abbreviations since 1970 (according to the IANA timezone database). - In such timezones, PostgreSQL will now associate the + In such timezones, PostgreSQL will now associate the correct UTC offset with the abbreviation depending on the given date. @@ -1793,9 +1793,9 @@ Add CST (China Standard Time) to our lists. - Remove references to ADT as Arabia Daylight Time, an + Remove references to ADT as Arabia Daylight Time, an abbreviation that's been out of use since 2007; therefore, claiming - there is a conflict with Atlantic Daylight Time doesn't seem + there is a conflict with Atlantic Daylight Time doesn't seem especially helpful. Fix entirely incorrect GMT offsets for CKT (Cook Islands), FJT, and FJST (Fiji); we didn't even have them on the proper side of the date line. @@ -1804,21 +1804,21 @@ - Update time zone data files to tzdata release 2015a. + Update time zone data files to tzdata release 2015a. The IANA timezone database has adopted abbreviations of the form - AxST/AxDT + AxST/AxDT for all Australian time zones, reflecting what they believe to be current majority practice Down Under. These names do not conflict with usage elsewhere (other than ACST for Acre Summer Time, which has been in disuse since 1994). Accordingly, adopt these names into - our Default timezone abbreviation set. - The Australia abbreviation set now contains only CST, EAST, + our Default timezone abbreviation set. + The Australia abbreviation set now contains only CST, EAST, EST, SAST, SAT, and WST, all of which are thought to be mostly historical usage. Note that SAST has also been changed to be South - Africa Standard Time in the Default abbreviation set. + Africa Standard Time in the Default abbreviation set. @@ -1847,7 +1847,7 @@ This release contains a variety of fixes from 9.0.17. For information about new features in the 9.0 major release, see - . + . @@ -1865,7 +1865,7 @@ Also, if you are upgrading from a version earlier than 9.0.15, - see . + see . @@ -1877,15 +1877,15 @@ - Correctly initialize padding bytes in contrib/btree_gist - indexes on bit columns (Heikki Linnakangas) + Correctly initialize padding bytes in contrib/btree_gist + indexes on bit columns (Heikki Linnakangas) This error could result in incorrect query results due to values that should compare equal not being seen as equal. - Users with GiST indexes on bit or bit varying - columns should REINDEX those indexes after installing this + Users with GiST indexes on bit or bit varying + columns should REINDEX those indexes after installing this update. @@ -1917,7 +1917,7 @@ Fix possibly-incorrect cache invalidation during nested calls - to ReceiveSharedInvalidMessages (Andres Freund) + to ReceiveSharedInvalidMessages (Andres Freund) @@ -1944,13 +1944,13 @@ This corrects cases where TOAST pointers could be copied into other tables without being dereferenced. If the original data is later deleted, it would lead to errors like missing chunk number 0 - for toast value ... when the now-dangling pointer is used. + for toast value ... when the now-dangling pointer is used. - Fix record type has not been registered failures with + Fix record type has not been registered failures with whole-row references to the output of Append plan nodes (Tom Lane) @@ -1965,7 +1965,7 @@ Fix query-lifespan memory leak while evaluating the arguments for a - function in FROM (Tom Lane) + function in FROM (Tom Lane) @@ -1978,7 +1978,7 @@ - Fix data encoding error in hungarian.stop (Tom Lane) + Fix data encoding error in hungarian.stop (Tom Lane) @@ -1991,19 +1991,19 @@ This could cause problems (at least spurious warnings, and at worst an - infinite loop) if CREATE INDEX or CLUSTER were + infinite loop) if CREATE INDEX or CLUSTER were done later in the same transaction. - Clear pg_stat_activity.xact_start - during PREPARE TRANSACTION (Andres Freund) + Clear pg_stat_activity.xact_start + during PREPARE TRANSACTION (Andres Freund) - After the PREPARE, the originating session is no longer in + After the PREPARE, the originating session is no longer in a transaction, so it should not continue to display a transaction start time. @@ -2011,7 +2011,7 @@ - Fix REASSIGN OWNED to not fail for text search objects + Fix REASSIGN OWNED to not fail for text search objects (Álvaro Herrera) @@ -2023,7 +2023,7 @@ This ensures that the postmaster will properly clean up after itself - if, for example, it receives SIGINT while still + if, for example, it receives SIGINT while still starting up. @@ -2031,7 +2031,7 @@ Secure Unix-domain sockets of temporary postmasters started during - make check (Noah Misch) + make check (Noah Misch) @@ -2040,16 +2040,16 @@ the operating-system user running the test, as we previously noted in CVE-2014-0067. This change defends against that risk by placing the server's socket in a temporary, mode 0700 subdirectory - of /tmp. The hazard remains however on platforms where + of /tmp. The hazard remains however on platforms where Unix sockets are not supported, notably Windows, because then the temporary postmaster must accept local TCP connections. A useful side effect of this change is to simplify - make check testing in builds that - override DEFAULT_PGSOCKET_DIR. Popular non-default values - like /var/run/postgresql are often not writable by the + make check testing in builds that + override DEFAULT_PGSOCKET_DIR. Popular non-default values + like /var/run/postgresql are often not writable by the build user, requiring workarounds that will no longer be necessary. @@ -2069,7 +2069,7 @@ On Windows, allow new sessions to absorb values of PGC_BACKEND - parameters (such as ) from the + parameters (such as ) from the configuration file (Amit Kapila) @@ -2085,15 +2085,15 @@ - This oversight could cause initdb - and pg_upgrade to fail on Windows, if the installation - path contained both spaces and @ signs. + This oversight could cause initdb + and pg_upgrade to fail on Windows, if the installation + path contained both spaces and @ signs. - Fix linking of libpython on macOS (Tom Lane) + Fix linking of libpython on macOS (Tom Lane) @@ -2104,17 +2104,17 @@ - Avoid buffer bloat in libpq when the server + Avoid buffer bloat in libpq when the server consistently sends data faster than the client can absorb it (Shin-ichi Morita, Tom Lane) - libpq could be coerced into enlarging its input buffer + libpq could be coerced into enlarging its input buffer until it runs out of memory (which would be reported misleadingly - as lost synchronization with server). Under ordinary + as lost synchronization with server). Under ordinary circumstances it's quite far-fetched that data could be continuously - transmitted more quickly than the recv() loop can + transmitted more quickly than the recv() loop can absorb it, but this has been observed when the client is artificially slowed by scheduler constraints. @@ -2122,15 +2122,15 @@ - Ensure that LDAP lookup attempts in libpq time out as + Ensure that LDAP lookup attempts in libpq time out as intended (Laurenz Albe) - Fix ecpg to do the right thing when an array - of char * is the target for a FETCH statement returning more + Fix ecpg to do the right thing when an array + of char * is the target for a FETCH statement returning more than one row, as well as some other array-handling fixes (Ashutosh Bapat) @@ -2138,20 +2138,20 @@ - Fix pg_restore's processing of old-style large object + Fix pg_restore's processing of old-style large object comments (Tom Lane) A direct-to-database restore from an archive file generated by a - pre-9.0 version of pg_dump would usually fail if the + pre-9.0 version of pg_dump would usually fail if the archive contained more than a few comments for large objects. - In contrib/pgcrypto functions, ensure sensitive + In contrib/pgcrypto functions, ensure sensitive information is cleared from stack variables before returning (Marko Kreen) @@ -2159,20 +2159,20 @@ - In contrib/uuid-ossp, cache the state of the OSSP UUID + In contrib/uuid-ossp, cache the state of the OSSP UUID library across calls (Tom Lane) This improves the efficiency of UUID generation and reduces the amount - of entropy drawn from /dev/urandom, on platforms that + of entropy drawn from /dev/urandom, on platforms that have that. - Update time zone data files to tzdata release 2014e + Update time zone data files to tzdata release 2014e for DST law changes in Crimea, Egypt, and Morocco. @@ -2193,7 +2193,7 @@ This release contains a variety of fixes from 9.0.16. For information about new features in the 9.0 major release, see - . + . @@ -2205,7 +2205,7 @@ However, if you are upgrading from a version earlier than 9.0.15, - see . + see . @@ -2232,7 +2232,7 @@ Avoid race condition in checking transaction commit status during - receipt of a NOTIFY message (Marko Tiikkaja) + receipt of a NOTIFY message (Marko Tiikkaja) @@ -2256,7 +2256,7 @@ - Remove incorrect code that tried to allow OVERLAPS with + Remove incorrect code that tried to allow OVERLAPS with single-element row arguments (Joshua Yanovski) @@ -2269,17 +2269,17 @@ - Avoid getting more than AccessShareLock when de-parsing a + Avoid getting more than AccessShareLock when de-parsing a rule or view (Dean Rasheed) - This oversight resulted in pg_dump unexpectedly - acquiring RowExclusiveLock locks on tables mentioned as - the targets of INSERT/UPDATE/DELETE + This oversight resulted in pg_dump unexpectedly + acquiring RowExclusiveLock locks on tables mentioned as + the targets of INSERT/UPDATE/DELETE commands in rules. While usually harmless, that could interfere with concurrent transactions that tried to acquire, for example, - ShareLock on those tables. + ShareLock on those tables. @@ -2305,26 +2305,26 @@ - Prevent interrupts while reporting non-ERROR messages + Prevent interrupts while reporting non-ERROR messages (Tom Lane) This guards against rare server-process freezeups due to recursive - entry to syslog(), and perhaps other related problems. + entry to syslog(), and perhaps other related problems. - Prevent intermittent could not reserve shared memory region + Prevent intermittent could not reserve shared memory region failures on recent Windows versions (MauMau) - Update time zone data files to tzdata release 2014a + Update time zone data files to tzdata release 2014a for DST law changes in Fiji and Turkey, plus historical changes in Israel and Ukraine. @@ -2346,7 +2346,7 @@ This release contains a variety of fixes from 9.0.15. For information about new features in the 9.0 major release, see - . + . @@ -2358,7 +2358,7 @@ However, if you are upgrading from a version earlier than 9.0.15, - see . + see . @@ -2370,19 +2370,19 @@ - Shore up GRANT ... WITH ADMIN OPTION restrictions + Shore up GRANT ... WITH ADMIN OPTION restrictions (Noah Misch) - Granting a role without ADMIN OPTION is supposed to + Granting a role without ADMIN OPTION is supposed to prevent the grantee from adding or removing members from the granted role, but this restriction was easily bypassed by doing SET - ROLE first. The security impact is mostly that a role member can + ROLE first. The security impact is mostly that a role member can revoke the access of others, contrary to the wishes of his grantor. Unapproved role member additions are a lesser concern, since an uncooperative role member could provide most of his rights to others - anyway by creating views or SECURITY DEFINER functions. + anyway by creating views or SECURITY DEFINER functions. (CVE-2014-0060) @@ -2395,7 +2395,7 @@ The primary role of PL validator functions is to be called implicitly - during CREATE FUNCTION, but they are also normal SQL + during CREATE FUNCTION, but they are also normal SQL functions that a user can call explicitly. Calling a validator on a function actually written in some other language was not checked for and could be exploited for privilege-escalation purposes. @@ -2415,7 +2415,7 @@ If the name lookups come to different conclusions due to concurrent activity, we might perform some parts of the DDL on a different table - than other parts. At least in the case of CREATE INDEX, + than other parts. At least in the case of CREATE INDEX, this can be used to cause the permissions checks to be performed against a different table than the index creation, allowing for a privilege escalation attack. @@ -2429,12 +2429,12 @@ - The MAXDATELEN constant was too small for the longest - possible value of type interval, allowing a buffer overrun - in interval_out(). Although the datetime input + The MAXDATELEN constant was too small for the longest + possible value of type interval, allowing a buffer overrun + in interval_out(). Although the datetime input functions were more careful about avoiding buffer overrun, the limit was short enough to cause them to reject some valid inputs, such as - input containing a very long timezone name. The ecpg + input containing a very long timezone name. The ecpg library contained these vulnerabilities along with some of its own. (CVE-2014-0063) @@ -2461,7 +2461,7 @@ - Use strlcpy() and related functions to provide a clear + Use strlcpy() and related functions to provide a clear guarantee that fixed-size buffers are not overrun. Unlike the preceding items, it is unclear whether these cases really represent live issues, since in most cases there appear to be previous @@ -2473,35 +2473,35 @@ - Avoid crashing if crypt() returns NULL (Honza Horak, + Avoid crashing if crypt() returns NULL (Honza Horak, Bruce Momjian) - There are relatively few scenarios in which crypt() - could return NULL, but contrib/chkpass would crash + There are relatively few scenarios in which crypt() + could return NULL, but contrib/chkpass would crash if it did. One practical case in which this could be an issue is - if libc is configured to refuse to execute unapproved - hashing algorithms (e.g., FIPS mode). + if libc is configured to refuse to execute unapproved + hashing algorithms (e.g., FIPS mode). (CVE-2014-0066) - Document risks of make check in the regression testing + Document risks of make check in the regression testing instructions (Noah Misch, Tom Lane) - Since the temporary server started by make check - uses trust authentication, another user on the same machine + Since the temporary server started by make check + uses trust authentication, another user on the same machine could connect to it as database superuser, and then potentially exploit the privileges of the operating-system user who started the tests. A future release will probably incorporate changes in the testing procedure to prevent this risk, but some public discussion is needed first. So for the moment, just warn people against using - make check when there are untrusted users on the + make check when there are untrusted users on the same machine. (CVE-2014-0067) @@ -2516,7 +2516,7 @@ The WAL update could be applied to the wrong page, potentially many pages past where it should have been. Aside from corrupting data, - this error has been observed to result in significant bloat + this error has been observed to result in significant bloat of standby servers compared to their masters, due to updates being applied far beyond where the end-of-file should have been. This failure mode does not appear to be a significant risk during crash @@ -2536,20 +2536,20 @@ was already consistent at the start of replay, thus possibly allowing hot-standby queries before the database was really consistent. Other symptoms such as PANIC: WAL contains references to invalid - pages were also possible. + pages were also possible. Fix improper locking of btree index pages while replaying - a VACUUM operation in hot-standby mode (Andres Freund, + a VACUUM operation in hot-standby mode (Andres Freund, Heikki Linnakangas, Tom Lane) This error could result in PANIC: WAL contains references to - invalid pages failures. + invalid pages failures. @@ -2572,25 +2572,25 @@ Ensure that signal handlers don't attempt to use the - process's MyProc pointer after it's no longer valid. + process's MyProc pointer after it's no longer valid. - Fix unsafe references to errno within error reporting + Fix unsafe references to errno within error reporting logic (Christian Kruse) This would typically lead to odd behaviors such as missing or - inappropriate HINT fields. + inappropriate HINT fields. - Fix possible crashes from using ereport() too early + Fix possible crashes from using ereport() too early during server startup (Tom Lane) @@ -2614,7 +2614,7 @@ - Fix length checking for Unicode identifiers (U&"..." + Fix length checking for Unicode identifiers (U&"..." syntax) containing escapes (Tom Lane) @@ -2634,26 +2634,26 @@ A previous patch allowed such keywords to be used without quoting in places such as role identifiers; but it missed cases where a - list of role identifiers was permitted, such as DROP ROLE. + list of role identifiers was permitted, such as DROP ROLE. Fix possible crash due to invalid plan for nested sub-selects, such - as WHERE (... x IN (SELECT ...) ...) IN (SELECT ...) + as WHERE (... x IN (SELECT ...) ...) IN (SELECT ...) (Tom Lane) - Ensure that ANALYZE creates statistics for a table column - even when all the values in it are too wide (Tom Lane) + Ensure that ANALYZE creates statistics for a table column + even when all the values in it are too wide (Tom Lane) - ANALYZE intentionally omits very wide values from its + ANALYZE intentionally omits very wide values from its histogram and most-common-values calculations, but it neglected to do something sane in the case that all the sampled entries are too wide. @@ -2661,21 +2661,21 @@ - In ALTER TABLE ... SET TABLESPACE, allow the database's + In ALTER TABLE ... SET TABLESPACE, allow the database's default tablespace to be used without a permissions check (Stephen Frost) - CREATE TABLE has always allowed such usage, - but ALTER TABLE didn't get the memo. + CREATE TABLE has always allowed such usage, + but ALTER TABLE didn't get the memo. - Fix cannot accept a set error when some arms of - a CASE return a set and others don't (Tom Lane) + Fix cannot accept a set error when some arms of + a CASE return a set and others don't (Tom Lane) @@ -2700,12 +2700,12 @@ - Fix possible misbehavior in plainto_tsquery() + Fix possible misbehavior in plainto_tsquery() (Heikki Linnakangas) - Use memmove() not memcpy() for copying + Use memmove() not memcpy() for copying overlapping memory regions. There have been no field reports of this actually causing trouble, but it's certainly risky. @@ -2713,51 +2713,51 @@ - Accept SHIFT_JIS as an encoding name for locale checking + Accept SHIFT_JIS as an encoding name for locale checking purposes (Tatsuo Ishii) - Fix misbehavior of PQhost() on Windows (Fujii Masao) + Fix misbehavior of PQhost() on Windows (Fujii Masao) - It should return localhost if no host has been specified. + It should return localhost if no host has been specified. - Improve error handling in libpq and psql - for failures during COPY TO STDOUT/FROM STDIN (Tom Lane) + Improve error handling in libpq and psql + for failures during COPY TO STDOUT/FROM STDIN (Tom Lane) In particular this fixes an infinite loop that could occur in 9.2 and up if the server connection was lost during COPY FROM - STDIN. Variants of that scenario might be possible in older + STDIN. Variants of that scenario might be possible in older versions, or with other client applications. - Fix misaligned descriptors in ecpg (MauMau) + Fix misaligned descriptors in ecpg (MauMau) - In ecpg, handle lack of a hostname in the connection + In ecpg, handle lack of a hostname in the connection parameters properly (Michael Meskes) - Fix performance regression in contrib/dblink connection + Fix performance regression in contrib/dblink connection startup (Joe Conway) @@ -2768,7 +2768,7 @@ - In contrib/isn, fix incorrect calculation of the check + In contrib/isn, fix incorrect calculation of the check digit for ISMN values (Fabien Coelho) @@ -2782,28 +2782,28 @@ - In Mingw and Cygwin builds, install the libpq DLL - in the bin directory (Andrew Dunstan) + In Mingw and Cygwin builds, install the libpq DLL + in the bin directory (Andrew Dunstan) This duplicates what the MSVC build has long done. It should fix - problems with programs like psql failing to start + problems with programs like psql failing to start because they can't find the DLL. - Avoid using the deprecated dllwrap tool in Cygwin builds + Avoid using the deprecated dllwrap tool in Cygwin builds (Marco Atzeri) - Don't generate plain-text HISTORY - and src/test/regress/README files anymore (Tom Lane) + Don't generate plain-text HISTORY + and src/test/regress/README files anymore (Tom Lane) @@ -2812,20 +2812,20 @@ the likely audience for plain-text format. Distribution tarballs will still contain files by these names, but they'll just be stubs directing the reader to consult the main documentation. - The plain-text INSTALL file will still be maintained, as + The plain-text INSTALL file will still be maintained, as there is arguably a use-case for that. - Update time zone data files to tzdata release 2013i + Update time zone data files to tzdata release 2013i for DST law changes in Jordan and historical changes in Cuba. - In addition, the zones Asia/Riyadh87, - Asia/Riyadh88, and Asia/Riyadh89 have been + In addition, the zones Asia/Riyadh87, + Asia/Riyadh88, and Asia/Riyadh89 have been removed, as they are no longer maintained by IANA, and never represented actual civil timekeeping practice. @@ -2847,7 +2847,7 @@ This release contains a variety of fixes from 9.0.14. For information about new features in the 9.0 major release, see - . + . @@ -2865,7 +2865,7 @@ Also, if you are upgrading from a version earlier than 9.0.13, - see . + see . @@ -2877,13 +2877,13 @@ - Fix VACUUM's tests to see whether it can - update relfrozenxid (Andres Freund) + Fix VACUUM's tests to see whether it can + update relfrozenxid (Andres Freund) - In some cases VACUUM (either manual or autovacuum) could - incorrectly advance a table's relfrozenxid value, + In some cases VACUUM (either manual or autovacuum) could + incorrectly advance a table's relfrozenxid value, allowing tuples to escape freezing, causing those rows to become invisible once 2^31 transactions have elapsed. The probability of data loss is fairly low since multiple incorrect advancements would @@ -2895,18 +2895,18 @@ The issue can be ameliorated by, after upgrading, vacuuming all tables in all databases while having vacuum_freeze_table_age + linkend="guc-vacuum-freeze-table-age">vacuum_freeze_table_age set to zero. This will fix any latent corruption but will not be able to fix all pre-existing data errors. However, an installation can be presumed safe after performing this vacuuming if it has executed fewer than 2^31 update transactions in its lifetime (check this with - SELECT txid_current() < 2^31). + SELECT txid_current() < 2^31). - Fix initialization of pg_clog and pg_subtrans + Fix initialization of pg_clog and pg_subtrans during hot standby startup (Andres Freund, Heikki Linnakangas) @@ -2932,7 +2932,7 @@ - Truncate pg_multixact contents during WAL replay + Truncate pg_multixact contents during WAL replay (Andres Freund) @@ -2954,8 +2954,8 @@ - Avoid flattening a subquery whose SELECT list contains a - volatile function wrapped inside a sub-SELECT (Tom Lane) + Avoid flattening a subquery whose SELECT list contains a + volatile function wrapped inside a sub-SELECT (Tom Lane) @@ -2972,7 +2972,7 @@ This error could lead to incorrect plans for queries involving - multiple levels of subqueries within JOIN syntax. + multiple levels of subqueries within JOIN syntax. @@ -2990,13 +2990,13 @@ - Fix array slicing of int2vector and oidvector values + Fix array slicing of int2vector and oidvector values (Tom Lane) Expressions of this kind are now implicitly promoted to - regular int2 or oid arrays. + regular int2 or oid arrays. @@ -3010,7 +3010,7 @@ In some cases, the system would use the simple GMT offset value when it should have used the regular timezone setting that had prevailed before the simple offset was selected. This change also causes - the timeofday function to honor the simple GMT offset + the timeofday function to honor the simple GMT offset zone. @@ -3024,7 +3024,7 @@ - Properly quote generated command lines in pg_ctl + Properly quote generated command lines in pg_ctl (Naoya Anzai and Tom Lane) @@ -3035,10 +3035,10 @@ - Fix pg_dumpall to work when a source database + Fix pg_dumpall to work when a source database sets default_transaction_read_only - via ALTER DATABASE SET (Kevin Grittner) + linkend="guc-default-transaction-read-only">default_transaction_read_only + via ALTER DATABASE SET (Kevin Grittner) @@ -3048,21 +3048,21 @@ - Fix ecpg's processing of lists of variables - declared varchar (Zoltán Böszörményi) + Fix ecpg's processing of lists of variables + declared varchar (Zoltán Böszörményi) - Make contrib/lo defend against incorrect trigger definitions + Make contrib/lo defend against incorrect trigger definitions (Marc Cousin) - Update time zone data files to tzdata release 2013h + Update time zone data files to tzdata release 2013h for DST law changes in Argentina, Brazil, Jordan, Libya, Liechtenstein, Morocco, and Palestine. Also, new timezone abbreviations WIB, WIT, WITA for Indonesia. @@ -3085,7 +3085,7 @@ This release contains a variety of fixes from 9.0.13. For information about new features in the 9.0 major release, see - . + . @@ -3097,7 +3097,7 @@ However, if you are upgrading from a version earlier than 9.0.13, - see . + see . @@ -3114,7 +3114,7 @@ - PostgreSQL case-folds non-ASCII characters only + PostgreSQL case-folds non-ASCII characters only when using a single-byte server encoding. @@ -3122,7 +3122,7 @@ Fix checkpoint memory leak in background writer when wal_level = - hot_standby (Naoya Anzai) + hot_standby (Naoya Anzai) @@ -3135,7 +3135,7 @@ - Fix memory overcommit bug when work_mem is using more + Fix memory overcommit bug when work_mem is using more than 24GB of memory (Stephen Frost) @@ -3160,29 +3160,29 @@ - Previously tests like col IS NOT TRUE and col IS - NOT FALSE did not properly factor in NULL values when estimating + Previously tests like col IS NOT TRUE and col IS + NOT FALSE did not properly factor in NULL values when estimating plan costs. - Prevent pushing down WHERE clauses into unsafe - UNION/INTERSECT subqueries (Tom Lane) + Prevent pushing down WHERE clauses into unsafe + UNION/INTERSECT subqueries (Tom Lane) - Subqueries of a UNION or INTERSECT that + Subqueries of a UNION or INTERSECT that contain set-returning functions or volatile functions in their - SELECT lists could be improperly optimized, leading to + SELECT lists could be improperly optimized, leading to run-time errors or incorrect query results. - Fix rare case of failed to locate grouping columns + Fix rare case of failed to locate grouping columns planner failure (Tom Lane) @@ -3196,37 +3196,37 @@ - Properly record index comments created using UNIQUE - and PRIMARY KEY syntax (Andres Freund) + Properly record index comments created using UNIQUE + and PRIMARY KEY syntax (Andres Freund) - This fixes a parallel pg_restore failure. + This fixes a parallel pg_restore failure. - Fix REINDEX TABLE and REINDEX DATABASE + Fix REINDEX TABLE and REINDEX DATABASE to properly revalidate constraints and mark invalidated indexes as valid (Noah Misch) - REINDEX INDEX has always worked properly. + REINDEX INDEX has always worked properly. Fix possible deadlock during concurrent CREATE INDEX - CONCURRENTLY operations (Tom Lane) + CONCURRENTLY operations (Tom Lane) - Fix regexp_matches() handling of zero-length matches + Fix regexp_matches() handling of zero-length matches (Jeevan Chalke) @@ -3250,14 +3250,14 @@ - Prevent CREATE FUNCTION from checking SET + Prevent CREATE FUNCTION from checking SET variables unless function body checking is enabled (Tom Lane) - Allow ALTER DEFAULT PRIVILEGES to operate on schemas + Allow ALTER DEFAULT PRIVILEGES to operate on schemas without requiring CREATE permission (Tom Lane) @@ -3269,16 +3269,16 @@ Specifically, lessen keyword restrictions for role names, language - names, EXPLAIN and COPY options, and - SET values. This allows COPY ... (FORMAT - BINARY) to work as expected; previously BINARY needed + names, EXPLAIN and COPY options, and + SET values. This allows COPY ... (FORMAT + BINARY) to work as expected; previously BINARY needed to be quoted. - Fix pgp_pub_decrypt() so it works for secret keys with + Fix pgp_pub_decrypt() so it works for secret keys with passwords (Marko Kreen) @@ -3292,7 +3292,7 @@ - Ensure that VACUUM ANALYZE still runs the ANALYZE phase + Ensure that VACUUM ANALYZE still runs the ANALYZE phase if its attempt to truncate the file is cancelled due to lock conflicts (Kevin Grittner) @@ -3308,14 +3308,14 @@ Ensure that floating-point data input accepts standard spellings - of infinity on all platforms (Tom Lane) + of infinity on all platforms (Tom Lane) - The C99 standard says that allowable spellings are inf, - +inf, -inf, infinity, - +infinity, and -infinity. Make sure we - recognize these even if the platform's strtod function + The C99 standard says that allowable spellings are inf, + +inf, -inf, infinity, + +infinity, and -infinity. Make sure we + recognize these even if the platform's strtod function doesn't. @@ -3329,7 +3329,7 @@ - Update time zone data files to tzdata release 2013d + Update time zone data files to tzdata release 2013d for DST law changes in Israel, Morocco, Palestine, and Paraguay. Also, historical zone data corrections for Macquarie Island. @@ -3351,7 +3351,7 @@ This release contains a variety of fixes from 9.0.12. For information about new features in the 9.0 major release, see - . + . @@ -3364,13 +3364,13 @@ However, this release corrects several errors in management of GiST indexes. After installing this update, it is advisable to - REINDEX any GiST indexes that meet one or more of the + REINDEX any GiST indexes that meet one or more of the conditions described below. Also, if you are upgrading from a version earlier than 9.0.6, - see . + see . @@ -3388,7 +3388,7 @@ A connection request containing a database name that begins with - - could be crafted to damage or destroy + - could be crafted to damage or destroy files within the server's data directory, even if the request is eventually rejected. (CVE-2013-1899) @@ -3402,41 +3402,41 @@ This avoids a scenario wherein random numbers generated by - contrib/pgcrypto functions might be relatively easy for + contrib/pgcrypto functions might be relatively easy for another database user to guess. The risk is only significant when - the postmaster is configured with ssl = on + the postmaster is configured with ssl = on but most connections don't use SSL encryption. (CVE-2013-1900) - Fix GiST indexes to not use fuzzy geometric comparisons when + Fix GiST indexes to not use fuzzy geometric comparisons when it's not appropriate to do so (Alexander Korotkov) - The core geometric types perform comparisons using fuzzy - equality, but gist_box_same must do exact comparisons, + The core geometric types perform comparisons using fuzzy + equality, but gist_box_same must do exact comparisons, else GiST indexes using it might become inconsistent. After installing - this update, users should REINDEX any GiST indexes on - box, polygon, circle, or point - columns, since all of these use gist_box_same. + this update, users should REINDEX any GiST indexes on + box, polygon, circle, or point + columns, since all of these use gist_box_same. Fix erroneous range-union and penalty logic in GiST indexes that use - contrib/btree_gist for variable-width data types, that is - text, bytea, bit, and numeric + contrib/btree_gist for variable-width data types, that is + text, bytea, bit, and numeric columns (Tom Lane) These errors could result in inconsistent indexes in which some keys that are present would not be found by searches, and also in useless - index bloat. Users are advised to REINDEX such indexes + index bloat. Users are advised to REINDEX such indexes after installing this update. @@ -3451,21 +3451,21 @@ These errors could result in inconsistent indexes in which some keys that are present would not be found by searches, and also in indexes that are unnecessarily inefficient to search. Users are advised to - REINDEX multi-column GiST indexes after installing this + REINDEX multi-column GiST indexes after installing this update. - Fix gist_point_consistent + Fix gist_point_consistent to handle fuzziness consistently (Alexander Korotkov) - Index scans on GiST indexes on point columns would sometimes + Index scans on GiST indexes on point columns would sometimes yield results different from a sequential scan, because - gist_point_consistent disagreed with the underlying + gist_point_consistent disagreed with the underlying operator code about whether to do comparisons exactly or fuzzily. @@ -3476,21 +3476,21 @@ - This bug could result in incorrect local pin count errors + This bug could result in incorrect local pin count errors during replay, making recovery impossible. - Fix race condition in DELETE RETURNING (Tom Lane) + Fix race condition in DELETE RETURNING (Tom Lane) - Under the right circumstances, DELETE RETURNING could + Under the right circumstances, DELETE RETURNING could attempt to fetch data from a shared buffer that the current process no longer has any pin on. If some other process changed the buffer - meanwhile, this would lead to garbage RETURNING output, or + meanwhile, this would lead to garbage RETURNING output, or even a crash. @@ -3511,28 +3511,28 @@ - Fix to_char() to use ASCII-only case-folding rules where + Fix to_char() to use ASCII-only case-folding rules where appropriate (Tom Lane) This fixes misbehavior of some template patterns that should be - locale-independent, but mishandled I and - i in Turkish locales. + locale-independent, but mishandled I and + i in Turkish locales. - Fix unwanted rejection of timestamp 1999-12-31 24:00:00 + Fix unwanted rejection of timestamp 1999-12-31 24:00:00 (Tom Lane) - Fix logic error when a single transaction does UNLISTEN - then LISTEN (Tom Lane) + Fix logic error when a single transaction does UNLISTEN + then LISTEN (Tom Lane) @@ -3543,7 +3543,7 @@ - Remove useless picksplit doesn't support secondary split log + Remove useless picksplit doesn't support secondary split log messages (Josh Hansen, Tom Lane) @@ -3564,29 +3564,29 @@ - Eliminate memory leaks in PL/Perl's spi_prepare() function + Eliminate memory leaks in PL/Perl's spi_prepare() function (Alex Hunsaker, Tom Lane) - Fix pg_dumpall to handle database names containing - = correctly (Heikki Linnakangas) + Fix pg_dumpall to handle database names containing + = correctly (Heikki Linnakangas) - Avoid crash in pg_dump when an incorrect connection + Avoid crash in pg_dump when an incorrect connection string is given (Heikki Linnakangas) - Ignore invalid indexes in pg_dump and - pg_upgrade (Michael Paquier, Bruce Momjian) + Ignore invalid indexes in pg_dump and + pg_upgrade (Michael Paquier, Bruce Momjian) @@ -3595,26 +3595,26 @@ a uniqueness condition not satisfied by the table's data. Also, if the index creation is in fact still in progress, it seems reasonable to consider it to be an uncommitted DDL change, which - pg_dump wouldn't be expected to dump anyway. - pg_upgrade now also skips invalid indexes rather than + pg_dump wouldn't be expected to dump anyway. + pg_upgrade now also skips invalid indexes rather than failing. - Fix contrib/pg_trgm's similarity() function + Fix contrib/pg_trgm's similarity() function to return zero for trigram-less strings (Tom Lane) - Previously it returned NaN due to internal division by zero. + Previously it returned NaN due to internal division by zero. - Update time zone data files to tzdata release 2013b + Update time zone data files to tzdata release 2013b for DST law changes in Chile, Haiti, Morocco, Paraguay, and some Russian areas. Also, historical zone data corrections for numerous places. @@ -3622,12 +3622,12 @@ Also, update the time zone abbreviation files for recent changes in - Russia and elsewhere: CHOT, GET, - IRKT, KGT, KRAT, MAGT, - MAWT, MSK, NOVT, OMST, - TKT, VLAT, WST, YAKT, - YEKT now follow their current meanings, and - VOLT (Europe/Volgograd) and MIST + Russia and elsewhere: CHOT, GET, + IRKT, KGT, KRAT, MAGT, + MAWT, MSK, NOVT, OMST, + TKT, VLAT, WST, YAKT, + YEKT now follow their current meanings, and + VOLT (Europe/Volgograd) and MIST (Antarctica/Macquarie) are added to the default abbreviations list. @@ -3648,7 +3648,7 @@ This release contains a variety of fixes from 9.0.11. For information about new features in the 9.0 major release, see - . + . @@ -3660,7 +3660,7 @@ However, if you are upgrading from a version earlier than 9.0.6, - see . + see . @@ -3672,7 +3672,7 @@ - Prevent execution of enum_recv from SQL (Tom Lane) + Prevent execution of enum_recv from SQL (Tom Lane) @@ -3742,19 +3742,19 @@ Protect against race conditions when scanning - pg_tablespace (Stephen Frost, Tom Lane) + pg_tablespace (Stephen Frost, Tom Lane) - CREATE DATABASE and DROP DATABASE could + CREATE DATABASE and DROP DATABASE could misbehave if there were concurrent updates of - pg_tablespace entries. + pg_tablespace entries. - Prevent DROP OWNED from trying to drop whole databases or + Prevent DROP OWNED from trying to drop whole databases or tablespaces (Álvaro Herrera) @@ -3766,13 +3766,13 @@ Fix error in vacuum_freeze_table_age + linkend="guc-vacuum-freeze-table-age">vacuum_freeze_table_age implementation (Andres Freund) In installations that have existed for more than vacuum_freeze_min_age + linkend="guc-vacuum-freeze-min-age">vacuum_freeze_min_age transactions, this mistake prevented autovacuum from using partial-table scans, so that a full-table scan would always happen instead. @@ -3780,13 +3780,13 @@ - Prevent misbehavior when a RowExpr or XmlExpr + Prevent misbehavior when a RowExpr or XmlExpr is parse-analyzed twice (Andres Freund, Tom Lane) This mistake could be user-visible in contexts such as - CREATE TABLE LIKE INCLUDING INDEXES. + CREATE TABLE LIKE INCLUDING INDEXES. @@ -3799,7 +3799,7 @@ - Reject out-of-range dates in to_date() (Hitoshi Harada) + Reject out-of-range dates in to_date() (Hitoshi Harada) @@ -3810,55 +3810,55 @@ - This bug affected psql and some other client programs. + This bug affected psql and some other client programs. - Fix possible crash in psql's \? command + Fix possible crash in psql's \? command when not connected to a database (Meng Qingzhong) - Fix pg_upgrade to deal with invalid indexes safely + Fix pg_upgrade to deal with invalid indexes safely (Bruce Momjian) - Fix one-byte buffer overrun in libpq's - PQprintTuples (Xi Wang) + Fix one-byte buffer overrun in libpq's + PQprintTuples (Xi Wang) This ancient function is not used anywhere by - PostgreSQL itself, but it might still be used by some + PostgreSQL itself, but it might still be used by some client code. - Make ecpglib use translated messages properly + Make ecpglib use translated messages properly (Chen Huajun) - Properly install ecpg_compat and - pgtypes libraries on MSVC (Jiang Guiqing) + Properly install ecpg_compat and + pgtypes libraries on MSVC (Jiang Guiqing) - Include our version of isinf() in - libecpg if it's not provided by the system + Include our version of isinf() in + libecpg if it's not provided by the system (Jiang Guiqing) @@ -3878,15 +3878,15 @@ - Make pgxs build executables with the right - .exe suffix when cross-compiling for Windows + Make pgxs build executables with the right + .exe suffix when cross-compiling for Windows (Zoltan Boszormenyi) - Add new timezone abbreviation FET (Tom Lane) + Add new timezone abbreviation FET (Tom Lane) @@ -3910,7 +3910,7 @@ This release contains a variety of fixes from 9.0.10. For information about new features in the 9.0 major release, see - . + . @@ -3922,7 +3922,7 @@ However, if you are upgrading from a version earlier than 9.0.6, - see . + see . @@ -3935,13 +3935,13 @@ Fix multiple bugs associated with CREATE INDEX - CONCURRENTLY (Andres Freund, Tom Lane) + CONCURRENTLY (Andres Freund, Tom Lane) - Fix CREATE INDEX CONCURRENTLY to use + Fix CREATE INDEX CONCURRENTLY to use in-place updates when changing the state of an index's - pg_index row. This prevents race conditions that could + pg_index row. This prevents race conditions that could cause concurrent sessions to miss updating the target index, thus resulting in corrupt concurrently-created indexes. @@ -3949,8 +3949,8 @@ Also, fix various other operations to ensure that they ignore invalid indexes resulting from a failed CREATE INDEX - CONCURRENTLY command. The most important of these is - VACUUM, because an auto-vacuum could easily be launched + CONCURRENTLY command. The most important of these is + VACUUM, because an auto-vacuum could easily be launched on the table before corrective action can be taken to fix or remove the invalid index. @@ -3987,13 +3987,13 @@ This oversight could prevent subsequent execution of certain - operations such as CREATE INDEX CONCURRENTLY. + operations such as CREATE INDEX CONCURRENTLY. - Avoid bogus out-of-sequence timeline ID errors in standby + Avoid bogus out-of-sequence timeline ID errors in standby mode (Heikki Linnakangas) @@ -4026,8 +4026,8 @@ The planner could derive incorrect constraints from a clause equating a non-strict construct to something else, for example - WHERE COALESCE(foo, 0) = 0 - when foo is coming from the nullable side of an outer join. + WHERE COALESCE(foo, 0) = 0 + when foo is coming from the nullable side of an outer join. @@ -4045,10 +4045,10 @@ - This affects multicolumn NOT IN subplans, such as - WHERE (a, b) NOT IN (SELECT x, y FROM ...) - when for instance b and y are int4 - and int8 respectively. This mistake led to wrong answers + This affects multicolumn NOT IN subplans, such as + WHERE (a, b) NOT IN (SELECT x, y FROM ...) + when for instance b and y are int4 + and int8 respectively. This mistake led to wrong answers or crashes depending on the specific datatypes involved. @@ -4056,7 +4056,7 @@ Acquire buffer lock when re-fetching the old tuple for an - AFTER ROW UPDATE/DELETE trigger (Andres Freund) + AFTER ROW UPDATE/DELETE trigger (Andres Freund) @@ -4069,7 +4069,7 @@ - Fix ALTER COLUMN TYPE to handle inherited check + Fix ALTER COLUMN TYPE to handle inherited check constraints properly (Pavan Deolasee) @@ -4081,14 +4081,14 @@ - Fix REASSIGN OWNED to handle grants on tablespaces + Fix REASSIGN OWNED to handle grants on tablespaces (Álvaro Herrera) - Ignore incorrect pg_attribute entries for system + Ignore incorrect pg_attribute entries for system columns for views (Tom Lane) @@ -4102,7 +4102,7 @@ - Fix rule printing to dump INSERT INTO table + Fix rule printing to dump INSERT INTO table DEFAULT VALUES correctly (Tom Lane) @@ -4110,7 +4110,7 @@ Guard against stack overflow when there are too many - UNION/INTERSECT/EXCEPT clauses + UNION/INTERSECT/EXCEPT clauses in a query (Tom Lane) @@ -4132,14 +4132,14 @@ Fix failure to advance XID epoch if XID wraparound happens during a - checkpoint and wal_level is hot_standby + checkpoint and wal_level is hot_standby (Tom Lane, Andres Freund) While this mistake had no particular impact on PostgreSQL itself, it was bad for - applications that rely on txid_current() and related + applications that rely on txid_current() and related functions: the TXID value would appear to go backwards. @@ -4153,7 +4153,7 @@ Formerly, this would result in something quite unhelpful, such as - Non-recoverable failure in name resolution. + Non-recoverable failure in name resolution. @@ -4166,8 +4166,8 @@ - Make pg_ctl more robust about reading the - postmaster.pid file (Heikki Linnakangas) + Make pg_ctl more robust about reading the + postmaster.pid file (Heikki Linnakangas) @@ -4177,33 +4177,33 @@ - Fix possible crash in psql if incorrectly-encoded data - is presented and the client_encoding setting is a + Fix possible crash in psql if incorrectly-encoded data + is presented and the client_encoding setting is a client-only encoding, such as SJIS (Jiang Guiqing) - Fix bugs in the restore.sql script emitted by - pg_dump in tar output format (Tom Lane) + Fix bugs in the restore.sql script emitted by + pg_dump in tar output format (Tom Lane) The script would fail outright on tables whose names include upper-case characters. Also, make the script capable of restoring - data in mode as well as the regular COPY mode. - Fix pg_restore to accept POSIX-conformant - tar files (Brian Weaver, Tom Lane) + Fix pg_restore to accept POSIX-conformant + tar files (Brian Weaver, Tom Lane) - The original coding of pg_dump's tar + The original coding of pg_dump's tar output mode produced files that are not fully conformant with the POSIX standard. This has been corrected for version 9.3. This patch updates previous branches so that they will accept both the @@ -4214,48 +4214,48 @@ - Fix pg_resetxlog to locate postmaster.pid + Fix pg_resetxlog to locate postmaster.pid correctly when given a relative path to the data directory (Tom Lane) - This mistake could lead to pg_resetxlog not noticing + This mistake could lead to pg_resetxlog not noticing that there is an active postmaster using the data directory. - Fix libpq's lo_import() and - lo_export() functions to report file I/O errors properly + Fix libpq's lo_import() and + lo_export() functions to report file I/O errors properly (Tom Lane) - Fix ecpg's processing of nested structure pointer + Fix ecpg's processing of nested structure pointer variables (Muhammad Usama) - Fix ecpg's ecpg_get_data function to + Fix ecpg's ecpg_get_data function to handle arrays properly (Michael Meskes) - Make contrib/pageinspect's btree page inspection + Make contrib/pageinspect's btree page inspection functions take buffer locks while examining pages (Tom Lane) - Fix pgxs support for building loadable modules on AIX + Fix pgxs support for building loadable modules on AIX (Tom Lane) @@ -4266,7 +4266,7 @@ - Update time zone data files to tzdata release 2012j + Update time zone data files to tzdata release 2012j for DST law changes in Cuba, Israel, Jordan, Libya, Palestine, Western Samoa, and portions of Brazil. @@ -4288,7 +4288,7 @@ This release contains a variety of fixes from 9.0.9. For information about new features in the 9.0 major release, see - . + . @@ -4300,7 +4300,7 @@ However, if you are upgrading from a version earlier than 9.0.6, - see . + see . @@ -4318,7 +4318,7 @@ These errors could result in wrong answers from queries that scan the - same WITH subquery multiple times. + same WITH subquery multiple times. @@ -4341,10 +4341,10 @@ - If we revoke a grant option from some role X, but - X still holds that option via a grant from someone + If we revoke a grant option from some role X, but + X still holds that option via a grant from someone else, we should not recursively revoke the corresponding privilege - from role(s) Y that X had granted it + from role(s) Y that X had granted it to. @@ -4358,12 +4358,12 @@ - Fix handling of SIGFPE when PL/Perl is in use (Andres Freund) + Fix handling of SIGFPE when PL/Perl is in use (Andres Freund) - Perl resets the process's SIGFPE handler to - SIG_IGN, which could result in crashes later on. Restore + Perl resets the process's SIGFPE handler to + SIG_IGN, which could result in crashes later on. Restore the normal Postgres signal handler after initializing PL/Perl. @@ -4382,7 +4382,7 @@ Some Linux distributions contain an incorrect version of - pthread.h that results in incorrect compiled code in + pthread.h that results in incorrect compiled code in PL/Perl, leading to crashes if a PL/Perl function calls another one that throws an error. @@ -4390,26 +4390,26 @@ - Fix pg_upgrade's handling of line endings on Windows + Fix pg_upgrade's handling of line endings on Windows (Andrew Dunstan) - Previously, pg_upgrade might add or remove carriage + Previously, pg_upgrade might add or remove carriage returns in places such as function bodies. - On Windows, make pg_upgrade use backslash path + On Windows, make pg_upgrade use backslash path separators in the scripts it emits (Andrew Dunstan) - Update time zone data files to tzdata release 2012f + Update time zone data files to tzdata release 2012f for DST law changes in Fiji @@ -4430,7 +4430,7 @@ This release contains a variety of fixes from 9.0.8. For information about new features in the 9.0 major release, see - . + . @@ -4442,7 +4442,7 @@ However, if you are upgrading from a version earlier than 9.0.6, - see . + see . @@ -4459,7 +4459,7 @@ - xml_parse() would attempt to fetch external files or + xml_parse() would attempt to fetch external files or URLs as needed to resolve DTD and entity references in an XML value, thus allowing unprivileged database users to attempt to fetch data with the privileges of the database server. While the external data @@ -4472,22 +4472,22 @@ - Prevent access to external files/URLs via contrib/xml2's - xslt_process() (Peter Eisentraut) + Prevent access to external files/URLs via contrib/xml2's + xslt_process() (Peter Eisentraut) - libxslt offers the ability to read and write both + libxslt offers the ability to read and write both files and URLs through stylesheet commands, thus allowing unprivileged database users to both read and write data with the privileges of the database server. Disable that through proper use - of libxslt's security options. (CVE-2012-3488) + of libxslt's security options. (CVE-2012-3488) - Also, remove xslt_process()'s ability to fetch documents + Also, remove xslt_process()'s ability to fetch documents and stylesheets from external files/URLs. While this was a - documented feature, it was long regarded as a bad idea. + documented feature, it was long regarded as a bad idea. The fix for CVE-2012-3489 broke that capability, and rather than expend effort on trying to fix it, we're just going to summarily remove it. @@ -4515,21 +4515,21 @@ - If ALTER SEQUENCE was executed on a freshly created or - reset sequence, and then precisely one nextval() call + If ALTER SEQUENCE was executed on a freshly created or + reset sequence, and then precisely one nextval() call was made on it, and then the server crashed, WAL replay would restore the sequence to a state in which it appeared that no - nextval() had been done, thus allowing the first + nextval() had been done, thus allowing the first sequence value to be returned again by the next - nextval() call. In particular this could manifest for - serial columns, since creation of a serial column's sequence - includes an ALTER SEQUENCE OWNED BY step. + nextval() call. In particular this could manifest for + serial columns, since creation of a serial column's sequence + includes an ALTER SEQUENCE OWNED BY step. - Fix txid_current() to report the correct epoch when not + Fix txid_current() to report the correct epoch when not in hot standby (Heikki Linnakangas) @@ -4546,14 +4546,14 @@ This mistake led to failures reported as out-of-order XID - insertion in KnownAssignedXids. + insertion in KnownAssignedXids. - Ensure the backup_label file is fsync'd after - pg_start_backup() (Dave Kerr) + Ensure the backup_label file is fsync'd after + pg_start_backup() (Dave Kerr) @@ -4564,7 +4564,7 @@ WAL sender background processes neglected to establish a - SIGALRM handler, meaning they would wait forever in + SIGALRM handler, meaning they would wait forever in some corner cases where a timeout ought to happen. @@ -4583,15 +4583,15 @@ - Fix LISTEN/NOTIFY to cope better with I/O + Fix LISTEN/NOTIFY to cope better with I/O problems, such as out of disk space (Tom Lane) After a write failure, all subsequent attempts to send more - NOTIFY messages would fail with messages like - Could not read from file "pg_notify/nnnn" at - offset nnnnn: Success. + NOTIFY messages would fail with messages like + Could not read from file "pg_notify/nnnn" at + offset nnnnn: Success. @@ -4604,7 +4604,7 @@ The original coding could allow inconsistent behavior in some cases; in particular, an autovacuum could get canceled after less than - deadlock_timeout grace period. + deadlock_timeout grace period. @@ -4616,15 +4616,15 @@ - Fix log collector so that log_truncate_on_rotation works + Fix log collector so that log_truncate_on_rotation works during the very first log rotation after server start (Tom Lane) - Fix WITH attached to a nested set operation - (UNION/INTERSECT/EXCEPT) + Fix WITH attached to a nested set operation + (UNION/INTERSECT/EXCEPT) (Tom Lane) @@ -4632,24 +4632,24 @@ Ensure that a whole-row reference to a subquery doesn't include any - extra GROUP BY or ORDER BY columns (Tom Lane) + extra GROUP BY or ORDER BY columns (Tom Lane) - Disallow copying whole-row references in CHECK - constraints and index definitions during CREATE TABLE + Disallow copying whole-row references in CHECK + constraints and index definitions during CREATE TABLE (Tom Lane) - This situation can arise in CREATE TABLE with - LIKE or INHERITS. The copied whole-row + This situation can arise in CREATE TABLE with + LIKE or INHERITS. The copied whole-row variable was incorrectly labeled with the row type of the original table not the new one. Rejecting the case seems reasonable for - LIKE, since the row types might well diverge later. For - INHERITS we should ideally allow it, with an implicit + LIKE, since the row types might well diverge later. For + INHERITS we should ideally allow it, with an implicit coercion to the parent table's row type; but that will require more work than seems safe to back-patch. @@ -4657,7 +4657,7 @@ - Fix memory leak in ARRAY(SELECT ...) subqueries (Heikki + Fix memory leak in ARRAY(SELECT ...) subqueries (Heikki Linnakangas, Tom Lane) @@ -4669,7 +4669,7 @@ The code could get confused by quantified parenthesized - subexpressions, such as ^(foo)?bar. This would lead to + subexpressions, such as ^(foo)?bar. This would lead to incorrect index optimization of searches for such patterns. @@ -4677,9 +4677,9 @@ Fix bugs with parsing signed - hh:mm and - hh:mm:ss - fields in interval constants (Amit Kapila, Tom Lane) + hh:mm and + hh:mm:ss + fields in interval constants (Amit Kapila, Tom Lane) @@ -4708,14 +4708,14 @@ - Report errors properly in contrib/xml2's - xslt_process() (Tom Lane) + Report errors properly in contrib/xml2's + xslt_process() (Tom Lane) - Update time zone data files to tzdata release 2012e + Update time zone data files to tzdata release 2012e for DST law changes in Morocco and Tokelau @@ -4736,7 +4736,7 @@ This release contains a variety of fixes from 9.0.7. For information about new features in the 9.0 major release, see - . + . @@ -4748,7 +4748,7 @@ However, if you are upgrading from a version earlier than 9.0.6, - see . + see . @@ -4761,12 +4761,12 @@ Fix incorrect password transformation in - contrib/pgcrypto's DES crypt() function + contrib/pgcrypto's DES crypt() function (Solar Designer) - If a password string contained the byte value 0x80, the + If a password string contained the byte value 0x80, the remainder of the password was ignored, causing the password to be much weaker than it appeared. With this fix, the rest of the string is properly included in the DES hash. Any stored password values that are @@ -4777,7 +4777,7 @@ - Ignore SECURITY DEFINER and SET attributes for + Ignore SECURITY DEFINER and SET attributes for a procedural language's call handler (Tom Lane) @@ -4789,7 +4789,7 @@ - Allow numeric timezone offsets in timestamp input to be up to + Allow numeric timezone offsets in timestamp input to be up to 16 hours away from UTC (Tom Lane) @@ -4815,7 +4815,7 @@ - Fix text to name and char to name + Fix text to name and char to name casts to perform string truncation correctly in multibyte encodings (Karl Schnaitter) @@ -4823,13 +4823,13 @@ - Fix memory copying bug in to_tsquery() (Heikki Linnakangas) + Fix memory copying bug in to_tsquery() (Heikki Linnakangas) - Ensure txid_current() reports the correct epoch when + Ensure txid_current() reports the correct epoch when executed in hot standby (Simon Riggs) @@ -4844,7 +4844,7 @@ This bug concerns sub-SELECTs that reference variables coming from the nullable side of an outer join of the surrounding query. In 9.1, queries affected by this bug would fail with ERROR: - Upper-level PlaceHolderVar found where not expected. But in 9.0 and + Upper-level PlaceHolderVar found where not expected. But in 9.0 and 8.4, you'd silently get possibly-wrong answers, since the value transmitted into the subquery wouldn't go to null when it should. @@ -4852,13 +4852,13 @@ - Fix slow session startup when pg_attribute is very large + Fix slow session startup when pg_attribute is very large (Tom Lane) - If pg_attribute exceeds one-fourth of - shared_buffers, cache rebuilding code that is sometimes + If pg_attribute exceeds one-fourth of + shared_buffers, cache rebuilding code that is sometimes needed during session start would trigger the synchronized-scan logic, causing it to take many times longer than normal. The problem was particularly acute if many new sessions were starting at once. @@ -4879,8 +4879,8 @@ - Ensure the Windows implementation of PGSemaphoreLock() - clears ImmediateInterruptOK before returning (Tom Lane) + Ensure the Windows implementation of PGSemaphoreLock() + clears ImmediateInterruptOK before returning (Tom Lane) @@ -4907,12 +4907,12 @@ - Fix COPY FROM to properly handle null marker strings that + Fix COPY FROM to properly handle null marker strings that correspond to invalid encoding (Tom Lane) - A null marker string such as E'\\0' should work, and did + A null marker string such as E'\\0' should work, and did work in the past, but the case got broken in 8.4. @@ -4925,7 +4925,7 @@ Previously, infinite recursion in a function invoked by - auto-ANALYZE could crash worker processes. + auto-ANALYZE could crash worker processes. @@ -4944,7 +4944,7 @@ Fix logging collector to ensure it will restart file rotation - after receiving SIGHUP (Tom Lane) + after receiving SIGHUP (Tom Lane) @@ -4957,33 +4957,33 @@ - Fix memory leak in PL/pgSQL's RETURN NEXT command (Joe + Fix memory leak in PL/pgSQL's RETURN NEXT command (Joe Conway) - Fix PL/pgSQL's GET DIAGNOSTICS command when the target + Fix PL/pgSQL's GET DIAGNOSTICS command when the target is the function's first variable (Tom Lane) - Fix potential access off the end of memory in psql's - expanded display (\x) mode (Peter Eisentraut) + Fix potential access off the end of memory in psql's + expanded display (\x) mode (Peter Eisentraut) - Fix several performance problems in pg_dump when + Fix several performance problems in pg_dump when the database contains many objects (Jeff Janes, Tom Lane) - pg_dump could get very slow if the database contained + pg_dump could get very slow if the database contained many schemas, or if many objects are in dependency loops, or if there are many owned sequences. @@ -4991,7 +4991,7 @@ - Fix pg_upgrade for the case that a database stored in a + Fix pg_upgrade for the case that a database stored in a non-default tablespace contains a table in the cluster's default tablespace (Bruce Momjian) @@ -4999,41 +4999,41 @@ - In ecpg, fix rare memory leaks and possible overwrite - of one byte after the sqlca_t structure (Peter Eisentraut) + In ecpg, fix rare memory leaks and possible overwrite + of one byte after the sqlca_t structure (Peter Eisentraut) - Fix contrib/dblink's dblink_exec() to not leak + Fix contrib/dblink's dblink_exec() to not leak temporary database connections upon error (Tom Lane) - Fix contrib/dblink to report the correct connection name in + Fix contrib/dblink to report the correct connection name in error messages (Kyotaro Horiguchi) - Fix contrib/vacuumlo to use multiple transactions when + Fix contrib/vacuumlo to use multiple transactions when dropping many large objects (Tim Lewis, Robert Haas, Tom Lane) - This change avoids exceeding max_locks_per_transaction when + This change avoids exceeding max_locks_per_transaction when many objects need to be dropped. The behavior can be adjusted with the - new -l (limit) option. + new -l (limit) option. - Update time zone data files to tzdata release 2012c + Update time zone data files to tzdata release 2012c for DST law changes in Antarctica, Armenia, Chile, Cuba, Falkland Islands, Gaza, Haiti, Hebron, Morocco, Syria, and Tokelau Islands; also historical corrections for Canada. @@ -5056,7 +5056,7 @@ This release contains a variety of fixes from 9.0.6. For information about new features in the 9.0 major release, see - . + . @@ -5068,7 +5068,7 @@ However, if you are upgrading from a version earlier than 9.0.6, - see . + see . @@ -5081,14 +5081,14 @@ Require execute permission on the trigger function for - CREATE TRIGGER (Robert Haas) + CREATE TRIGGER (Robert Haas) This missing check could allow another user to execute a trigger function with forged input data, by installing it on a table he owns. This is only of significance for trigger functions marked - SECURITY DEFINER, since otherwise trigger functions run + SECURITY DEFINER, since otherwise trigger functions run as the table owner anyway. (CVE-2012-0866) @@ -5100,7 +5100,7 @@ - Both libpq and the server truncated the common name + Both libpq and the server truncated the common name extracted from an SSL certificate at 32 bytes. Normally this would cause nothing worse than an unexpected verification failure, but there are some rather-implausible scenarios in which it might allow one @@ -5115,12 +5115,12 @@ - Convert newlines to spaces in names written in pg_dump + Convert newlines to spaces in names written in pg_dump comments (Robert Haas) - pg_dump was incautious about sanitizing object names + pg_dump was incautious about sanitizing object names that are emitted within SQL comments in its output script. A name containing a newline would at least render the script syntactically incorrect. Maliciously crafted object names could present a SQL @@ -5136,10 +5136,10 @@ An index page split caused by an insertion could sometimes cause a - concurrently-running VACUUM to miss removing index entries + concurrently-running VACUUM to miss removing index entries that it should remove. After the corresponding table rows are removed, the dangling index entries would cause errors (such as could not - read block N in file ...) or worse, silently wrong query results + read block N in file ...) or worse, silently wrong query results after unrelated rows are re-inserted at the now-free table locations. This bug has been present since release 8.2, but occurs so infrequently that it was not diagnosed until now. If you have reason to suspect @@ -5158,7 +5158,7 @@ that the contents were transiently invalid. In hot standby mode this can result in a query that's executing in parallel seeing garbage data. Various symptoms could result from that, but the most common one seems - to be invalid memory alloc request size. + to be invalid memory alloc request size. @@ -5176,13 +5176,13 @@ - Fix CLUSTER/VACUUM FULL handling of toast + Fix CLUSTER/VACUUM FULL handling of toast values owned by recently-updated rows (Tom Lane) This oversight could lead to duplicate key value violates unique - constraint errors being reported against the toast table's index + constraint errors being reported against the toast table's index during one of these commands. @@ -5204,11 +5204,11 @@ Support foreign data wrappers and foreign servers in - REASSIGN OWNED (Alvaro Herrera) + REASSIGN OWNED (Alvaro Herrera) - This command failed with unexpected classid errors if + This command failed with unexpected classid errors if it needed to change the ownership of any such objects. @@ -5216,16 +5216,16 @@ Allow non-existent values for some settings in ALTER - USER/DATABASE SET (Heikki Linnakangas) + USER/DATABASE SET (Heikki Linnakangas) - Allow default_text_search_config, - default_tablespace, and temp_tablespaces to be + Allow default_text_search_config, + default_tablespace, and temp_tablespaces to be set to names that are not known. This is because they might be known in another database where the setting is intended to be used, or for the tablespace cases because the tablespace might not be created yet. The - same issue was previously recognized for search_path, and + same issue was previously recognized for search_path, and these settings now act like that one. @@ -5249,7 +5249,7 @@ Recover from errors occurring during WAL replay of DROP - TABLESPACE (Tom Lane) + TABLESPACE (Tom Lane) @@ -5271,7 +5271,7 @@ Sometimes a lock would be logged as being held by transaction - zero. This is at least known to produce assertion failures on + zero. This is at least known to produce assertion failures on slave servers, and might be the cause of more serious problems. @@ -5293,7 +5293,7 @@ - Prevent emitting misleading consistent recovery state reached + Prevent emitting misleading consistent recovery state reached log message at the beginning of crash recovery (Heikki Linnakangas) @@ -5301,7 +5301,7 @@ Fix initial value of - pg_stat_replication.replay_location + pg_stat_replication.replay_location (Fujii Masao) @@ -5313,7 +5313,7 @@ - Fix regular expression back-references with * attached + Fix regular expression back-references with * attached (Tom Lane) @@ -5327,18 +5327,18 @@ A similar problem still afflicts back-references that are embedded in a larger quantified expression, rather than being the immediate subject of the quantifier. This will be addressed in a future - PostgreSQL release. + PostgreSQL release. Fix recently-introduced memory leak in processing of - inet/cidr values (Heikki Linnakangas) + inet/cidr values (Heikki Linnakangas) - A patch in the December 2011 releases of PostgreSQL + A patch in the December 2011 releases of PostgreSQL caused memory leakage in these operations, which could be significant in scenarios such as building a btree index on such a column. @@ -5346,8 +5346,8 @@ - Fix dangling pointer after CREATE TABLE AS/SELECT - INTO in a SQL-language function (Tom Lane) + Fix dangling pointer after CREATE TABLE AS/SELECT + INTO in a SQL-language function (Tom Lane) @@ -5381,32 +5381,32 @@ - Improve pg_dump's handling of inherited table columns + Improve pg_dump's handling of inherited table columns (Tom Lane) - pg_dump mishandled situations where a child column has + pg_dump mishandled situations where a child column has a different default expression than its parent column. If the default is textually identical to the parent's default, but not actually the same (for instance, because of schema search path differences) it would not be recognized as different, so that after dump and restore the child would be allowed to inherit the parent's default. Child columns - that are NOT NULL where their parent is not could also be + that are NOT NULL where their parent is not could also be restored subtly incorrectly. - Fix pg_restore's direct-to-database mode for + Fix pg_restore's direct-to-database mode for INSERT-style table data (Tom Lane) Direct-to-database restores from archive files made with - - Fix trigger WHEN conditions when both BEFORE and - AFTER triggers exist (Tom Lane) + Fix trigger WHEN conditions when both BEFORE and + AFTER triggers exist (Tom Lane) - Evaluation of WHEN conditions for AFTER ROW - UPDATE triggers could crash if there had been a BEFORE - ROW trigger fired for the same update. + Evaluation of WHEN conditions for AFTER ROW + UPDATE triggers could crash if there had been a BEFORE + ROW trigger fired for the same update. @@ -6202,7 +6202,7 @@ - Allow nested EXISTS queries to be optimized properly (Tom + Allow nested EXISTS queries to be optimized properly (Tom Lane) @@ -6222,19 +6222,19 @@ - Fix EXPLAIN to handle gating Result nodes within + Fix EXPLAIN to handle gating Result nodes within inner-indexscan subplans (Tom Lane) - The usual symptom of this oversight was bogus varno errors. + The usual symptom of this oversight was bogus varno errors. - Fix btree preprocessing of indexedcol IS - NULL conditions (Dean Rasheed) + Fix btree preprocessing of indexedcol IS + NULL conditions (Dean Rasheed) @@ -6257,13 +6257,13 @@ - Fix dump bug for VALUES in a view (Tom Lane) + Fix dump bug for VALUES in a view (Tom Lane) - Disallow SELECT FOR UPDATE/SHARE on sequences (Tom Lane) + Disallow SELECT FOR UPDATE/SHARE on sequences (Tom Lane) @@ -6273,8 +6273,8 @@ - Fix VACUUM so that it always updates - pg_class.reltuples/relpages (Tom + Fix VACUUM so that it always updates + pg_class.reltuples/relpages (Tom Lane) @@ -6293,7 +6293,7 @@ - Fix cases where CLUSTER might attempt to access + Fix cases where CLUSTER might attempt to access already-removed TOAST data (Tom Lane) @@ -6308,7 +6308,7 @@ Fix portability bugs in use of credentials control messages for - peer authentication (Tom Lane) + peer authentication (Tom Lane) @@ -6320,20 +6320,20 @@ The typical symptom of this problem was The function requested is - not supported errors during SSPI login. + not supported errors during SSPI login. Fix failure when adding a new variable of a custom variable class to - postgresql.conf (Tom Lane) + postgresql.conf (Tom Lane) - Throw an error if pg_hba.conf contains hostssl + Throw an error if pg_hba.conf contains hostssl but SSL is disabled (Tom Lane) @@ -6345,19 +6345,19 @@ - Fix failure when DROP OWNED BY attempts to remove default + Fix failure when DROP OWNED BY attempts to remove default privileges on sequences (Shigeru Hanada) - Fix typo in pg_srand48 seed initialization (Andres Freund) + Fix typo in pg_srand48 seed initialization (Andres Freund) This led to failure to use all bits of the provided seed. This function - is not used on most platforms (only those without srandom), + is not used on most platforms (only those without srandom), and the potential security exposure from a less-random-than-expected seed seems minimal in any case. @@ -6365,25 +6365,25 @@ - Avoid integer overflow when the sum of LIMIT and - OFFSET values exceeds 2^63 (Heikki Linnakangas) + Avoid integer overflow when the sum of LIMIT and + OFFSET values exceeds 2^63 (Heikki Linnakangas) - Add overflow checks to int4 and int8 versions of - generate_series() (Robert Haas) + Add overflow checks to int4 and int8 versions of + generate_series() (Robert Haas) - Fix trailing-zero removal in to_char() (Marti Raudsepp) + Fix trailing-zero removal in to_char() (Marti Raudsepp) - In a format with FM and no digit positions + In a format with FM and no digit positions after the decimal point, zeroes to the left of the decimal point could be removed incorrectly. @@ -6391,7 +6391,7 @@ - Fix pg_size_pretty() to avoid overflow for inputs close to + Fix pg_size_pretty() to avoid overflow for inputs close to 2^63 (Tom Lane) @@ -6409,19 +6409,19 @@ - Correctly handle quotes in locale names during initdb + Correctly handle quotes in locale names during initdb (Heikki Linnakangas) The case can arise with some Windows locales, such as People's - Republic of China. + Republic of China. - In pg_upgrade, avoid dumping orphaned temporary tables + In pg_upgrade, avoid dumping orphaned temporary tables (Bruce Momjian) @@ -6433,54 +6433,54 @@ - Fix pg_upgrade to preserve toast tables' relfrozenxids + Fix pg_upgrade to preserve toast tables' relfrozenxids during an upgrade from 8.3 (Bruce Momjian) - Failure to do this could lead to pg_clog files being + Failure to do this could lead to pg_clog files being removed too soon after the upgrade. - In pg_upgrade, fix the -l (log) option to + In pg_upgrade, fix the -l (log) option to work on Windows (Bruce Momjian) - In pg_ctl, support silent mode for service registrations + In pg_ctl, support silent mode for service registrations on Windows (MauMau) - Fix psql's counting of script file line numbers during - COPY from a different file (Tom Lane) + Fix psql's counting of script file line numbers during + COPY from a different file (Tom Lane) - Fix pg_restore's direct-to-database mode for - standard_conforming_strings (Tom Lane) + Fix pg_restore's direct-to-database mode for + standard_conforming_strings (Tom Lane) - pg_restore could emit incorrect commands when restoring + pg_restore could emit incorrect commands when restoring directly to a database server from an archive file that had been made - with standard_conforming_strings set to on. + with standard_conforming_strings set to on. Be more user-friendly about unsupported cases for parallel - pg_restore (Tom Lane) + pg_restore (Tom Lane) @@ -6491,14 +6491,14 @@ - Fix write-past-buffer-end and memory leak in libpq's + Fix write-past-buffer-end and memory leak in libpq's LDAP service lookup code (Albe Laurenz) - In libpq, avoid failures when using nonblocking I/O + In libpq, avoid failures when using nonblocking I/O and an SSL connection (Martin Pihlak, Tom Lane) @@ -6510,36 +6510,36 @@ - In particular, the response to a server report of fork() + In particular, the response to a server report of fork() failure during SSL connection startup is now saner. - Improve libpq's error reporting for SSL failures (Tom + Improve libpq's error reporting for SSL failures (Tom Lane) - Fix PQsetvalue() to avoid possible crash when adding a new - tuple to a PGresult originally obtained from a server + Fix PQsetvalue() to avoid possible crash when adding a new + tuple to a PGresult originally obtained from a server query (Andrew Chernow) - Make ecpglib write double values with 15 digits + Make ecpglib write double values with 15 digits precision (Akira Kurosawa) - In ecpglib, be sure LC_NUMERIC setting is + In ecpglib, be sure LC_NUMERIC setting is restored after an error (Michael Meskes) @@ -6551,7 +6551,7 @@ - contrib/pg_crypto's blowfish encryption code could give + contrib/pg_crypto's blowfish encryption code could give wrong results on platforms where char is signed (which is most), leading to encrypted passwords being weaker than they should be. @@ -6559,13 +6559,13 @@ - Fix memory leak in contrib/seg (Heikki Linnakangas) + Fix memory leak in contrib/seg (Heikki Linnakangas) - Fix pgstatindex() to give consistent results for empty + Fix pgstatindex() to give consistent results for empty indexes (Tom Lane) @@ -6585,7 +6585,7 @@ - Update time zone data files to tzdata release 2011i + Update time zone data files to tzdata release 2011i for DST law changes in Canada, Egypt, Russia, Samoa, and South Sudan. @@ -6606,7 +6606,7 @@ This release contains a variety of fixes from 9.0.3. For information about new features in the 9.0 major release, see - . + . @@ -6618,10 +6618,10 @@ However, if your installation was upgraded from a previous major - release by running pg_upgrade, you should take + release by running pg_upgrade, you should take action to prevent possible data loss due to a now-fixed bug in - pg_upgrade. The recommended solution is to run - VACUUM FREEZE on all TOAST tables. + pg_upgrade. The recommended solution is to run + VACUUM FREEZE on all TOAST tables. More information is available at http://wiki.postgresql.org/wiki/20110408pg_upgrade_fix. @@ -6636,36 +6636,36 @@ - Fix pg_upgrade's handling of TOAST tables + Fix pg_upgrade's handling of TOAST tables (Bruce Momjian) - The pg_class.relfrozenxid value for + The pg_class.relfrozenxid value for TOAST tables was not correctly copied into the new installation - during pg_upgrade. This could later result in - pg_clog files being discarded while they were still + during pg_upgrade. This could later result in + pg_clog files being discarded while they were still needed to validate tuples in the TOAST tables, leading to - could not access status of transaction failures. + could not access status of transaction failures. This error poses a significant risk of data loss for installations - that have been upgraded with pg_upgrade. This patch - corrects the problem for future uses of pg_upgrade, + that have been upgraded with pg_upgrade. This patch + corrects the problem for future uses of pg_upgrade, but does not in itself cure the issue in installations that have been - processed with a buggy version of pg_upgrade. + processed with a buggy version of pg_upgrade. - Suppress incorrect PD_ALL_VISIBLE flag was incorrectly set + Suppress incorrect PD_ALL_VISIBLE flag was incorrectly set warning (Heikki Linnakangas) - VACUUM would sometimes issue this warning in cases that + VACUUM would sometimes issue this warning in cases that are actually valid. @@ -6680,8 +6680,8 @@ All retryable conflict errors now have an error code that indicates that a retry is possible. Also, session closure due to the database being dropped on the master is now reported as - ERRCODE_DATABASE_DROPPED, rather than - ERRCODE_ADMIN_SHUTDOWN, so that connection poolers can + ERRCODE_DATABASE_DROPPED, rather than + ERRCODE_ADMIN_SHUTDOWN, so that connection poolers can handle the situation correctly. @@ -6726,15 +6726,15 @@ - Fix dangling-pointer problem in BEFORE ROW UPDATE trigger + Fix dangling-pointer problem in BEFORE ROW UPDATE trigger handling when there was a concurrent update to the target tuple (Tom Lane) This bug has been observed to result in intermittent cannot - extract system attribute from virtual tuple failures while trying to - do UPDATE RETURNING ctid. There is a very small probability + extract system attribute from virtual tuple failures while trying to + do UPDATE RETURNING ctid. There is a very small probability of more serious errors, such as generating incorrect index entries for the updated tuple. @@ -6742,25 +6742,25 @@ - Disallow DROP TABLE when there are pending deferred trigger + Disallow DROP TABLE when there are pending deferred trigger events for the table (Tom Lane) - Formerly the DROP would go through, leading to - could not open relation with OID nnn errors when the + Formerly the DROP would go through, leading to + could not open relation with OID nnn errors when the triggers were eventually fired. - Allow replication as a user name in - pg_hba.conf (Andrew Dunstan) + Allow replication as a user name in + pg_hba.conf (Andrew Dunstan) - replication is special in the database name column, but it + replication is special in the database name column, but it was mistakenly also treated as special in the user name column. @@ -6781,13 +6781,13 @@ - Fix handling of SELECT FOR UPDATE in a sub-SELECT + Fix handling of SELECT FOR UPDATE in a sub-SELECT (Tom Lane) This bug typically led to cannot extract system attribute from - virtual tuple errors. + virtual tuple errors. @@ -6813,7 +6813,7 @@ - Allow libpq's SSL initialization to succeed when + Allow libpq's SSL initialization to succeed when user's home directory is unavailable (Tom Lane) @@ -6826,34 +6826,34 @@ - Fix libpq to return a useful error message for errors - detected in conninfo_array_parse (Joseph Adams) + Fix libpq to return a useful error message for errors + detected in conninfo_array_parse (Joseph Adams) A typo caused the library to return NULL, rather than the - PGconn structure containing the error message, to the + PGconn structure containing the error message, to the application. - Fix ecpg preprocessor's handling of float constants + Fix ecpg preprocessor's handling of float constants (Heikki Linnakangas) - Fix parallel pg_restore to handle comments on + Fix parallel pg_restore to handle comments on POST_DATA items correctly (Arnd Hannemann) - Fix pg_restore to cope with long lines (over 1KB) in + Fix pg_restore to cope with long lines (over 1KB) in TOC files (Tom Lane) @@ -6899,14 +6899,14 @@ - Fix version-incompatibility problem with libintl on + Fix version-incompatibility problem with libintl on Windows (Hiroshi Inoue) - Fix usage of xcopy in Windows build scripts to + Fix usage of xcopy in Windows build scripts to work correctly under Windows 7 (Andrew Dunstan) @@ -6917,14 +6917,14 @@ - Fix path separator used by pg_regress on Cygwin + Fix path separator used by pg_regress on Cygwin (Andrew Dunstan) - Update time zone data files to tzdata release 2011f + Update time zone data files to tzdata release 2011f for DST law changes in Chile, Cuba, Falkland Islands, Morocco, Samoa, and Turkey; also historical corrections for South Australia, Alaska, and Hawaii. @@ -6947,7 +6947,7 @@ This release contains a variety of fixes from 9.0.2. For information about new features in the 9.0 major release, see - . + . @@ -6966,7 +6966,7 @@ - Before exiting walreceiver, ensure all the received WAL + Before exiting walreceiver, ensure all the received WAL is fsync'd to disk (Heikki Linnakangas) @@ -6978,27 +6978,27 @@ - Avoid excess fsync activity in walreceiver + Avoid excess fsync activity in walreceiver (Heikki Linnakangas) - Make ALTER TABLE revalidate uniqueness and exclusion + Make ALTER TABLE revalidate uniqueness and exclusion constraints when needed (Noah Misch) This was broken in 9.0 by a change that was intended to suppress - revalidation during VACUUM FULL and CLUSTER, - but unintentionally affected ALTER TABLE as well. + revalidation during VACUUM FULL and CLUSTER, + but unintentionally affected ALTER TABLE as well. - Fix EvalPlanQual for UPDATE of an inheritance tree in which + Fix EvalPlanQual for UPDATE of an inheritance tree in which the tables are not all alike (Tom Lane) @@ -7013,15 +7013,15 @@ - Avoid failures when EXPLAIN tries to display a simple-form - CASE expression (Tom Lane) + Avoid failures when EXPLAIN tries to display a simple-form + CASE expression (Tom Lane) - If the CASE's test expression was a constant, the planner - could simplify the CASE into a form that confused the + If the CASE's test expression was a constant, the planner + could simplify the CASE into a form that confused the expression-display code, resulting in unexpected CASE WHEN - clause errors. + clause errors. @@ -7046,8 +7046,8 @@ - The date type supports a wider range of dates than can be - represented by the timestamp types, but the planner assumed it + The date type supports a wider range of dates than can be + represented by the timestamp types, but the planner assumed it could always convert a date to timestamp with impunity. @@ -7060,29 +7060,29 @@ - Remove ecpg's fixed length limit for constants defining + Remove ecpg's fixed length limit for constants defining an array dimension (Michael Meskes) - Fix erroneous parsing of tsquery values containing + Fix erroneous parsing of tsquery values containing ... & !(subexpression) | ... (Tom Lane) Queries containing this combination of operators were not executed - correctly. The same error existed in contrib/intarray's - query_int type and contrib/ltree's - ltxtquery type. + correctly. The same error existed in contrib/intarray's + query_int type and contrib/ltree's + ltxtquery type. - Fix buffer overrun in contrib/intarray's input function - for the query_int type (Apple) + Fix buffer overrun in contrib/intarray's input function + for the query_int type (Apple) @@ -7094,16 +7094,16 @@ - Fix bug in contrib/seg's GiST picksplit algorithm + Fix bug in contrib/seg's GiST picksplit algorithm (Alexander Korotkov) This could result in considerable inefficiency, though not actually - incorrect answers, in a GiST index on a seg column. - If you have such an index, consider REINDEXing it after + incorrect answers, in a GiST index on a seg column. + If you have such an index, consider REINDEXing it after installing this update. (This is identical to the bug that was fixed in - contrib/cube in the previous update.) + contrib/cube in the previous update.) @@ -7123,7 +7123,7 @@ This release contains a variety of fixes from 9.0.1. For information about new features in the 9.0 major release, see - . + . @@ -7143,23 +7143,23 @@ Force the default - wal_sync_method - to be fdatasync on Linux (Tom Lane, Marti Raudsepp) + wal_sync_method + to be fdatasync on Linux (Tom Lane, Marti Raudsepp) - The default on Linux has actually been fdatasync for many - years, but recent kernel changes caused PostgreSQL to - choose open_datasync instead. This choice did not result + The default on Linux has actually been fdatasync for many + years, but recent kernel changes caused PostgreSQL to + choose open_datasync instead. This choice did not result in any performance improvement, and caused outright failures on - certain filesystems, notably ext4 with the - data=journal mount option. + certain filesystems, notably ext4 with the + data=journal mount option. - Fix too many KnownAssignedXids error during Hot Standby + Fix too many KnownAssignedXids error during Hot Standby replay (Heikki Linnakangas) @@ -7188,7 +7188,7 @@ - This could result in bad buffer id: 0 failures or + This could result in bad buffer id: 0 failures or corruption of index contents during replication. @@ -7214,7 +7214,7 @@ - The effective vacuum_cost_limit for an autovacuum worker + The effective vacuum_cost_limit for an autovacuum worker could drop to nearly zero if it processed enough tables, causing it to run extremely slowly. @@ -7240,19 +7240,19 @@ - Add support for detecting register-stack overrun on IA64 + Add support for detecting register-stack overrun on IA64 (Tom Lane) - The IA64 architecture has two hardware stacks. Full + The IA64 architecture has two hardware stacks. Full prevention of stack-overrun failures requires checking both. - Add a check for stack overflow in copyObject() (Tom Lane) + Add a check for stack overflow in copyObject() (Tom Lane) @@ -7268,7 +7268,7 @@ - It is possible to have a concurrent page split in a + It is possible to have a concurrent page split in a temporary index, if for example there is an open cursor scanning the index when an insertion is done. GiST failed to detect this case and hence could deliver wrong results when execution of the cursor @@ -7295,16 +7295,16 @@ Certain cases where a large number of tuples needed to be read in - advance, but work_mem was large enough to allow them all + advance, but work_mem was large enough to allow them all to be held in memory, were unexpectedly slow. - percent_rank(), cume_dist() and - ntile() in particular were subject to this problem. + percent_rank(), cume_dist() and + ntile() in particular were subject to this problem. - Avoid memory leakage while ANALYZE'ing complex index + Avoid memory leakage while ANALYZE'ing complex index expressions (Tom Lane) @@ -7316,21 +7316,21 @@ - An index declared like create index i on t (foo(t.*)) + An index declared like create index i on t (foo(t.*)) would not automatically get dropped when its table was dropped. - Add missing support in DROP OWNED BY for removing foreign + Add missing support in DROP OWNED BY for removing foreign data wrapper/server privileges belonging to a user (Heikki Linnakangas) - Do not inline a SQL function with multiple OUT + Do not inline a SQL function with multiple OUT parameters (Tom Lane) @@ -7349,28 +7349,28 @@ - Behave correctly if ORDER BY, LIMIT, - FOR UPDATE, or WITH is attached to the - VALUES part of INSERT ... VALUES (Tom Lane) + Behave correctly if ORDER BY, LIMIT, + FOR UPDATE, or WITH is attached to the + VALUES part of INSERT ... VALUES (Tom Lane) - Make the OFF keyword unreserved (Heikki Linnakangas) + Make the OFF keyword unreserved (Heikki Linnakangas) - This prevents problems with using off as a variable name in - PL/pgSQL. That worked before 9.0, but was now broken - because PL/pgSQL now treats all core reserved words + This prevents problems with using off as a variable name in + PL/pgSQL. That worked before 9.0, but was now broken + because PL/pgSQL now treats all core reserved words as reserved. - Fix constant-folding of COALESCE() expressions (Tom Lane) + Fix constant-folding of COALESCE() expressions (Tom Lane) @@ -7381,7 +7381,7 @@ - Fix could not find pathkey item to sort planner failure + Fix could not find pathkey item to sort planner failure with comparison of whole-row Vars (Tom Lane) @@ -7389,7 +7389,7 @@ Fix postmaster crash when connection acceptance - (accept() or one of the calls made immediately after it) + (accept() or one of the calls made immediately after it) fails, and the postmaster was compiled with GSSAPI support (Alexander Chernikov) @@ -7408,7 +7408,7 @@ - Fix missed unlink of temporary files when log_temp_files + Fix missed unlink of temporary files when log_temp_files is active (Tom Lane) @@ -7420,11 +7420,11 @@ - Add print functionality for InhRelation nodes (Tom Lane) + Add print functionality for InhRelation nodes (Tom Lane) - This avoids a failure when debug_print_parse is enabled + This avoids a failure when debug_print_parse is enabled and certain types of query are executed. @@ -7444,46 +7444,46 @@ Fix incorrect calculation of transaction status in - ecpg (Itagaki Takahiro) + ecpg (Itagaki Takahiro) - Fix errors in psql's Unicode-escape support (Tom Lane) + Fix errors in psql's Unicode-escape support (Tom Lane) - Speed up parallel pg_restore when the archive + Speed up parallel pg_restore when the archive contains many large objects (blobs) (Tom Lane) - Fix PL/pgSQL's handling of simple + Fix PL/pgSQL's handling of simple expressions to not fail in recursion or error-recovery cases (Tom Lane) - Fix PL/pgSQL's error reporting for no-such-column + Fix PL/pgSQL's error reporting for no-such-column cases (Tom Lane) As of 9.0, it would sometimes report missing FROM-clause entry - for table foo when record foo has no field bar would be + for table foo when record foo has no field bar would be more appropriate. - Fix PL/Python to honor typmod (i.e., length or + Fix PL/Python to honor typmod (i.e., length or precision restrictions) when assigning to tuple fields (Tom Lane) @@ -7494,7 +7494,7 @@ - Fix PL/Python's handling of set-returning functions + Fix PL/Python's handling of set-returning functions (Jan Urbanski) @@ -7506,22 +7506,22 @@ - Fix bug in contrib/cube's GiST picksplit algorithm + Fix bug in contrib/cube's GiST picksplit algorithm (Alexander Korotkov) This could result in considerable inefficiency, though not actually - incorrect answers, in a GiST index on a cube column. - If you have such an index, consider REINDEXing it after + incorrect answers, in a GiST index on a cube column. + If you have such an index, consider REINDEXing it after installing this update. - Don't emit identifier will be truncated notices in - contrib/dblink except when creating new connections + Don't emit identifier will be truncated notices in + contrib/dblink except when creating new connections (Itagaki Takahiro) @@ -7529,26 +7529,26 @@ Fix potential coredump on missing public key in - contrib/pgcrypto (Marti Raudsepp) + contrib/pgcrypto (Marti Raudsepp) - Fix buffer overrun in contrib/pg_upgrade (Hernan Gonzalez) + Fix buffer overrun in contrib/pg_upgrade (Hernan Gonzalez) - Fix memory leak in contrib/xml2's XPath query functions + Fix memory leak in contrib/xml2's XPath query functions (Tom Lane) - Update time zone data files to tzdata release 2010o + Update time zone data files to tzdata release 2010o for DST law changes in Fiji and Samoa; also historical corrections for Hong Kong. @@ -7570,7 +7570,7 @@ This release contains a variety of fixes from 9.0.0. For information about new features in the 9.0 major release, see - . + . @@ -7597,7 +7597,7 @@ This change prevents security problems that can be caused by subverting Perl or Tcl code that will be executed later in the same session under another SQL user identity (for example, within a SECURITY - DEFINER function). Most scripting languages offer numerous ways that + DEFINER function). Most scripting languages offer numerous ways that that might be done, such as redefining standard functions or operators called by the target function. Without this change, any SQL user with Perl or Tcl language usage rights can do essentially anything with the @@ -7626,7 +7626,7 @@ - Improve pg_get_expr() security fix so that the function + Improve pg_get_expr() security fix so that the function can still be used on the output of a sub-select (Tom Lane) @@ -7651,7 +7651,7 @@ - Fix possible duplicate scans of UNION ALL member relations + Fix possible duplicate scans of UNION ALL member relations (Tom Lane) @@ -7676,14 +7676,14 @@ - Input such as 'J100000'::date worked before 8.4, + Input such as 'J100000'::date worked before 8.4, but was unintentionally broken by added error-checking. - Make psql recognize DISCARD ALL as a command that should + Make psql recognize DISCARD ALL as a command that should not be encased in a transaction block in autocommit-off mode (Itagaki Takahiro) @@ -7714,12 +7714,12 @@ This release of - PostgreSQL adds features that have been requested + PostgreSQL adds features that have been requested for years, such as easy-to-use replication, a mass permission-changing facility, and anonymous code blocks. While past major releases have been conservative in their scope, this release shows a bold new desire to provide facilities that new and existing - users of PostgreSQL will embrace. This has all + users of PostgreSQL will embrace. This has all been done with few incompatibilities. Major enhancements include: @@ -7732,7 +7732,7 @@ Built-in replication based on log shipping. This advance consists of two features: Streaming Replication, allowing continuous archive - (WAL) files to be streamed over a network connection to a + (WAL) files to be streamed over a network connection to a standby server, and Hot Standby, allowing continuous archive standby servers to execute read-only queries. The net effect is to support a single master with multiple read-only slave servers. @@ -7742,10 +7742,10 @@ Easier database object permissions management. GRANT/REVOKE IN - SCHEMA supports mass permissions changes on existing objects, - while ALTER DEFAULT - PRIVILEGES allows control of privileges for objects created in + linkend="sql-grant">GRANT/REVOKE IN + SCHEMA supports mass permissions changes on existing objects, + while ALTER DEFAULT + PRIVILEGES allows control of privileges for objects created in the future. Large objects (BLOBs) now support permissions management as well. @@ -7754,8 +7754,8 @@ Broadly enhanced stored procedure support. - The DO statement supports - ad-hoc or anonymous code blocks. + The DO statement supports + ad-hoc or anonymous code blocks. Functions can now be called using named parameters. PL/pgSQL is now installed by default, and PL/Perl and Full support for 64-bit - Windows. + Windows. More advanced reporting queries, including additional windowing options - (PRECEDING and FOLLOWING) and the ability to + (PRECEDING and FOLLOWING) and the ability to control the order in which values are fed to aggregate functions. @@ -7783,14 +7783,14 @@ New trigger features, including SQL-standard-compliant per-column triggers and + linkend="sql-createtrigger">per-column triggers and conditional trigger execution. - Deferrable + Deferrable unique constraints. Mass updates to unique keys are now possible without trickery. @@ -7808,7 +7808,7 @@ New and enhanced security features, including RADIUS authentication, LDAP authentication improvements, and a new contrib module - passwordcheck + passwordcheck for testing password strength. @@ -7816,10 +7816,10 @@ New high-performance implementation of the - LISTEN/NOTIFY feature. + LISTEN/NOTIFY feature. Pending events are now stored in a memory-based queue rather than - a table. Also, a payload string can be sent with each + a table. Also, a payload string can be sent with each event, rather than transmitting just an event name as before. @@ -7827,7 +7827,7 @@ New implementation of - VACUUM FULL. + VACUUM FULL. This command now rewrites the entire table and indexes, rather than moving individual rows to compact space. It is substantially faster in most cases, and no longer results in index bloat. @@ -7837,7 +7837,7 @@ New contrib module - pg_upgrade + pg_upgrade to support in-place upgrades from 8.3 or 8.4 to 9.0. @@ -7853,7 +7853,7 @@ - EXPLAIN enhancements. + EXPLAIN enhancements. The output is now available in JSON, XML, or YAML format, and includes buffer utilization and other data not previously available. @@ -7861,7 +7861,7 @@ - hstore improvements, + hstore improvements, including new functions and greater data capacity. @@ -7901,34 +7901,34 @@ - Remove server parameter add_missing_from, which was + Remove server parameter add_missing_from, which was defaulted to off for many years (Tom Lane) - Remove server parameter regex_flavor, which + Remove server parameter regex_flavor, which was defaulted to advanced + linkend="posix-syntax-details">advanced for many years (Tom Lane) - archive_mode + archive_mode now only affects archive_command; + linkend="guc-archive-command">archive_command; a new setting, wal_level, affects + linkend="guc-wal-level">wal_level, affects the contents of the write-ahead log (Heikki Linnakangas) - log_temp_files + log_temp_files now uses default file size units of kilobytes (Robert Haas) @@ -7967,13 +7967,13 @@ - bytea output now + bytea output now appears in hex format by default (Peter Eisentraut) The server parameter bytea_output can be + linkend="guc-bytea-output">bytea_output can be used to select the traditional output format if needed for compatibility. @@ -7995,18 +7995,18 @@ Improve standards compliance of SIMILAR TO - patterns and SQL-style substring() patterns (Tom Lane) + linkend="functions-similarto-regexp">SIMILAR TO + patterns and SQL-style substring() patterns (Tom Lane) - This includes treating ? and {...} as + This includes treating ? and {...} as pattern metacharacters, while they were simple literal characters before; that corresponds to new features added in SQL:2008. - Also, ^ and $ are now treated as simple + Also, ^ and $ are now treated as simple literal characters; formerly they were treated as metacharacters, as if the pattern were following POSIX rather than SQL rules. - Also, in SQL-standard substring(), use of parentheses + Also, in SQL-standard substring(), use of parentheses for nesting no longer interferes with capturing of a substring. Also, processing of bracket expressions (character classes) is now more standards-compliant. @@ -8016,14 +8016,14 @@ Reject negative length values in 3-parameter substring() + linkend="functions-string-sql">substring() for bit strings, per the SQL standard (Tom Lane) - Make date_trunc truncate rather than round when reducing + Make date_trunc truncate rather than round when reducing precision of fractional seconds (Tom Lane) @@ -8044,7 +8044,7 @@ - Tighten enforcement of column name consistency during RENAME + Tighten enforcement of column name consistency during RENAME when a child table inherits the same column from multiple unrelated parents (KaiGai Kohei) @@ -8100,8 +8100,8 @@ situations. Although it's recommended that functions encountering this type of error be modified to remove the conflict, the old behavior can be restored if necessary via the configuration parameter plpgsql.variable_conflict, - or via the per-function option #variable_conflict. + linkend="plpgsql-var-subst">plpgsql.variable_conflict, + or via the per-function option #variable_conflict. @@ -8126,8 +8126,8 @@ For example, if a column of the result type is declared as - NUMERIC(30,2), it is no longer acceptable to return a - NUMERIC of some other precision in that column. Previous + NUMERIC(30,2), it is no longer acceptable to return a + NUMERIC of some other precision in that column. Previous versions neglected to check the type modifier and would thus allow result rows that didn't actually conform to the declared restrictions. @@ -8141,33 +8141,33 @@ Formerly, a statement like - SELECT ... INTO rec.fld FROM ... + SELECT ... INTO rec.fld FROM ... was treated as a scalar assignment even if the record field - fld was of composite type. Now it is treated as a - record assignment, the same as when the INTO target is a + fld was of composite type. Now it is treated as a + record assignment, the same as when the INTO target is a regular variable of composite type. So the values to be assigned to the field's subfields should be written as separate columns of the - SELECT list, not as a ROW(...) construct as in + SELECT list, not as a ROW(...) construct as in previous versions. If you need to do this in a way that will work in both 9.0 and previous releases, you can write something like - rec.fld := ROW(...) FROM .... + rec.fld := ROW(...) FROM .... - Remove PL/pgSQL's RENAME declaration (Tom Lane) + Remove PL/pgSQL's RENAME declaration (Tom Lane) - Instead of RENAME, use ALIAS, + Instead of RENAME, use ALIAS, which can now create an alias for any variable, not only dollar sign - parameter names (such as $1) as before. + parameter names (such as $1) as before. @@ -8181,11 +8181,11 @@ - Deprecate use of => as an operator name (Robert Haas) + Deprecate use of => as an operator name (Robert Haas) - Future versions of PostgreSQL will probably reject + Future versions of PostgreSQL will probably reject this operator name entirely, in order to support the SQL-standard notation for named function parameters. For the moment, it is still allowed, but a warning is emitted when such an operator is @@ -8240,7 +8240,7 @@ This feature is called Hot Standby. There are new - postgresql.conf and recovery.conf + postgresql.conf and recovery.conf settings to control this feature, as well as extensive documentation. @@ -8248,18 +8248,18 @@ - Allow write-ahead log (WAL) data to be streamed to a + Allow write-ahead log (WAL) data to be streamed to a standby server (Fujii Masao, Heikki Linnakangas) This feature is called Streaming Replication. - Previously WAL data could be sent to standby servers only - in units of entire WAL files (normally 16 megabytes each). + Previously WAL data could be sent to standby servers only + in units of entire WAL files (normally 16 megabytes each). Streaming Replication eliminates this inefficiency and allows updates on the master to be propagated to standby servers with very little - delay. There are new postgresql.conf and - recovery.conf settings to control this feature, as well as + delay. There are new postgresql.conf and + recovery.conf settings to control this feature, as well as extensive documentation. @@ -8267,9 +8267,9 @@ Add pg_last_xlog_receive_location() - and pg_last_xlog_replay_location(), which - can be used to monitor standby server WAL + linkend="functions-recovery-info-table">pg_last_xlog_receive_location() + and pg_last_xlog_replay_location(), which + can be used to monitor standby server WAL activity (Simon Riggs, Fujii Masao, Heikki Linnakangas) @@ -8286,9 +8286,9 @@ Allow per-tablespace values to be set for sequential and random page - cost estimates (seq_page_cost/random_page_cost) - via ALTER TABLESPACE - ... SET/RESET (Robert Haas) + cost estimates (seq_page_cost/random_page_cost) + via ALTER TABLESPACE + ... SET/RESET (Robert Haas) @@ -8299,8 +8299,8 @@ - UPDATE, DELETE, and SELECT FOR - UPDATE/SHARE queries that involve joins will now behave much better + UPDATE, DELETE, and SELECT FOR + UPDATE/SHARE queries that involve joins will now behave much better when encountering freshly-updated rows. @@ -8308,7 +8308,7 @@ Improve performance of TRUNCATE when + linkend="sql-truncate">TRUNCATE when the table was created or truncated earlier in the same transaction (Tom Lane) @@ -8345,12 +8345,12 @@ - Allow IS NOT NULL restrictions to use indexes (Tom Lane) + Allow IS NOT NULL restrictions to use indexes (Tom Lane) This is particularly useful for finding - MAX()/MIN() values in indexes that + MAX()/MIN() values in indexes that contain many null values. @@ -8358,7 +8358,7 @@ Improve the optimizer's choices about when to use materialize nodes, - and when to use sorting versus hashing for DISTINCT + and when to use sorting versus hashing for DISTINCT (Tom Lane) @@ -8366,7 +8366,7 @@ Improve the optimizer's equivalence detection for expressions involving - boolean <> operators (Tom Lane) + boolean <> operators (Tom Lane) @@ -8387,7 +8387,7 @@ While the Genetic Query Optimizer (GEQO) still selects random plans, it now always selects the same random plans for identical queries, thus giving more consistent performance. You can modify geqo_seed to experiment with + linkend="guc-geqo-seed">geqo_seed to experiment with alternative plans. @@ -8398,7 +8398,7 @@ - This avoids the rare error failed to make a valid plan, + This avoids the rare error failed to make a valid plan, and should also improve planning speed. @@ -8414,7 +8414,7 @@ - Improve ANALYZE + Improve ANALYZE to support inheritance-tree statistics (Tom Lane) @@ -8451,14 +8451,14 @@ Allow setting of number-of-distinct-values statistics using ALTER TABLE + linkend="sql-altertable">ALTER TABLE (Robert Haas) This allows users to override the estimated number or percentage of distinct values for a column. This statistic is normally computed by - ANALYZE, but the estimate can be poor, especially on tables + ANALYZE, but the estimate can be poor, especially on tables with very large numbers of rows. @@ -8475,7 +8475,7 @@ Add support for RADIUS (Remote + linkend="auth-radius">RADIUS (Remote Authentication Dial In User Service) authentication (Magnus Hagander) @@ -8483,28 +8483,28 @@ - Allow LDAP + Allow LDAP (Lightweight Directory Access Protocol) authentication - to operate in search/bind mode + to operate in search/bind mode (Robert Fleming, Magnus Hagander) This allows the user to be looked up first, then the system uses - the DN (Distinguished Name) returned for that user. + the DN (Distinguished Name) returned for that user. Add samehost - and samenet designations to - pg_hba.conf (Stef Walter) + linkend="auth-pg-hba-conf">samehost + and samenet designations to + pg_hba.conf (Stef Walter) - These match the server's IP address and subnet address + These match the server's IP address and subnet address respectively. @@ -8530,7 +8530,7 @@ Add the ability for clients to set an application name, which is displayed in - pg_stat_activity (Dave Page) + pg_stat_activity (Dave Page) @@ -8541,8 +8541,8 @@ - Add a SQLSTATE option (%e) to log_line_prefix + Add a SQLSTATE option (%e) to log_line_prefix (Guillaume Smet) @@ -8555,7 +8555,7 @@ - Write to the Windows event log in UTF16 encoding + Write to the Windows event log in UTF16 encoding (Itagaki Takahiro) @@ -8577,7 +8577,7 @@ Add pg_stat_reset_shared('bgwriter') + linkend="monitoring-stats-funcs-table">pg_stat_reset_shared('bgwriter') to reset the cluster-wide shared statistics for the background writer (Greg Smith) @@ -8586,8 +8586,8 @@ Add pg_stat_reset_single_table_counters() - and pg_stat_reset_single_function_counters() + linkend="monitoring-stats-funcs-table">pg_stat_reset_single_table_counters() + and pg_stat_reset_single_function_counters() to allow resetting the statistics counters for individual tables and functions (Magnus Hagander) @@ -8612,10 +8612,10 @@ Previously only per-database and per-role settings were possible, not combinations. All role and database settings are now stored - in the new pg_db_role_setting system catalog. A new - psql command \drds shows these settings. - The legacy system views pg_roles, - pg_shadow, and pg_user + in the new pg_db_role_setting system catalog. A new + psql command \drds shows these settings. + The legacy system views pg_roles, + pg_shadow, and pg_user do not show combination settings, and therefore no longer completely represent the configuration for a user or database. @@ -8624,9 +8624,9 @@ Add server parameter bonjour, which + linkend="guc-bonjour">bonjour, which controls whether a Bonjour-enabled server advertises - itself via Bonjour (Tom Lane) + itself via Bonjour (Tom Lane) @@ -8639,7 +8639,7 @@ Add server parameter enable_material, which + linkend="guc-enable-material">enable_material, which controls the use of materialize nodes in the optimizer (Robert Haas) @@ -8654,7 +8654,7 @@ Change server parameter log_temp_files to + linkend="guc-log-temp-files">log_temp_files to use default file size units of kilobytes (Robert Haas) @@ -8666,14 +8666,14 @@ - Log changes of parameter values when postgresql.conf is + Log changes of parameter values when postgresql.conf is reloaded (Peter Eisentraut) This lets administrators and security staff audit changes of database settings, and is also very convenient for checking the effects of - postgresql.conf edits. + postgresql.conf edits. @@ -8685,10 +8685,10 @@ Non-superusers can no longer issue ALTER - ROLE/DATABASE SET for parameters that are not currently + ROLE/DATABASE SET for parameters that are not currently known to the server. This allows the server to correctly check that superuser-only parameters are only set by superusers. Previously, - the SET would be allowed and then ignored at session start, + the SET would be allowed and then ignored at session start, making superuser-only custom parameters much less useful than they should be. @@ -8707,25 +8707,25 @@ - Perform SELECT - FOR UPDATE/SHARE processing after - applying LIMIT, so the number of rows returned + Perform SELECT + FOR UPDATE/SHARE processing after + applying LIMIT, so the number of rows returned is always predictable (Tom Lane) Previously, changes made by concurrent transactions could cause a - SELECT FOR UPDATE to unexpectedly return fewer rows than - specified by its LIMIT. FOR UPDATE in combination - with ORDER BY can still produce surprising results, but that - can be corrected by placing FOR UPDATE in a subquery. + SELECT FOR UPDATE to unexpectedly return fewer rows than + specified by its LIMIT. FOR UPDATE in combination + with ORDER BY can still produce surprising results, but that + can be corrected by placing FOR UPDATE in a subquery. Allow mixing of traditional and SQL-standard LIMIT/OFFSET + linkend="sql-limit">LIMIT/OFFSET syntax (Tom Lane) @@ -8733,20 +8733,20 @@ Extend the supported frame options in window functions (Hitoshi + linkend="sql-window">window functions (Hitoshi Harada) - Frames can now start with CURRENT ROW, and the ROWS - n PRECEDING/FOLLOWING options are now + Frames can now start with CURRENT ROW, and the ROWS + n PRECEDING/FOLLOWING options are now supported. - Make SELECT INTO and CREATE TABLE AS return + Make SELECT INTO and CREATE TABLE AS return row counts to the client in their command tags (Boszormenyi Zoltan) @@ -8769,7 +8769,7 @@ Support Unicode surrogate pairs (dual 16-bit representation) in U& + linkend="sql-syntax-strings-uescape">U& strings and identifiers (Peter Eisentraut) @@ -8777,7 +8777,7 @@ Support Unicode escapes in E'...' + linkend="sql-syntax-strings-escape">E'...' strings (Marko Kreen) @@ -8795,36 +8795,36 @@ - Speed up CREATE - DATABASE by deferring flushes to disk (Andres + Speed up CREATE + DATABASE by deferring flushes to disk (Andres Freund, Greg Stark) - Allow comments on + Allow comments on columns of tables, views, and composite types only, not other - relation types such as indexes and TOAST tables (Tom Lane) + relation types such as indexes and TOAST tables (Tom Lane) Allow the creation of enumerated types containing + linkend="sql-createtype-enum">enumerated types containing no values (Bruce Momjian) - Let values of columns having storage type MAIN remain on + Let values of columns having storage type MAIN remain on the main heap page unless the row cannot fit on a page (Kevin Grittner) - Previously MAIN values were forced out to TOAST + Previously MAIN values were forced out to TOAST tables until the row size was less than one-quarter of the page size. @@ -8832,26 +8832,26 @@ - <command>ALTER TABLE</> + <command>ALTER TABLE</command> - Implement IF EXISTS for ALTER TABLE DROP COLUMN - and ALTER TABLE DROP CONSTRAINT (Andres Freund) + Implement IF EXISTS for ALTER TABLE DROP COLUMN + and ALTER TABLE DROP CONSTRAINT (Andres Freund) - Allow ALTER TABLE commands that rewrite tables to skip - WAL logging (Itagaki Takahiro) + Allow ALTER TABLE commands that rewrite tables to skip + WAL logging (Itagaki Takahiro) Such operations either produce a new copy of the table or are rolled - back, so WAL archiving can be skipped, unless running in + back, so WAL archiving can be skipped, unless running in continuous archiving mode. This reduces I/O overhead and improves performance. @@ -8859,8 +8859,8 @@ - Fix failure of ALTER TABLE table ADD COLUMN - col serial when done by non-owner of table + Fix failure of ALTER TABLE table ADD COLUMN + col serial when done by non-owner of table (Tom Lane) @@ -8870,14 +8870,14 @@ - <link linkend="SQL-CREATETABLE"><command>CREATE TABLE</></link> + <link linkend="sql-createtable"><command>CREATE TABLE</command></link> - Add support for copying COMMENTS and STORAGE - settings in CREATE TABLE ... LIKE commands + Add support for copying COMMENTS and STORAGE + settings in CREATE TABLE ... LIKE commands (Itagaki Takahiro) @@ -8885,14 +8885,14 @@ Add a shortcut for copying all properties in CREATE - TABLE ... LIKE commands (Itagaki Takahiro) + TABLE ... LIKE commands (Itagaki Takahiro) Add the SQL-standard - CREATE TABLE ... OF type command + CREATE TABLE ... OF type command (Peter Eisentraut) @@ -8914,16 +8914,16 @@ - Add deferrable + Add deferrable unique constraints (Dean Rasheed) This allows mass updates, such as - UPDATE tab SET col = col + 1, + UPDATE tab SET col = col + 1, to work reliably on columns that have unique indexes or are marked as primary keys. - If the constraint is specified as DEFERRABLE it will be + If the constraint is specified as DEFERRABLE it will be checked at the end of the statement, rather than after each row is updated. The constraint check can also be deferred until the end of the current transaction, allowing such updates to be spread over multiple @@ -8941,8 +8941,8 @@ Exclusion constraints generalize uniqueness constraints by allowing arbitrary comparison operators, not just equality. They are created - with the CREATE - TABLE CONSTRAINT ... EXCLUDE clause. + with the CREATE + TABLE CONSTRAINT ... EXCLUDE clause. The most common use of exclusion constraints is to specify that column entries must not overlap, rather than simply not be equal. This is useful for time periods and other ranges, as well as arrays. @@ -8959,7 +8959,7 @@ For example, a uniqueness constraint violation might now report - Key (x)=(2) already exists. + Key (x)=(2) already exists. @@ -8976,8 +8976,8 @@ Add the ability to make mass permission changes across a whole schema using the new GRANT/REVOKE - IN SCHEMA clause (Petr Jelinek) + linkend="sql-grant">GRANT/REVOKE + IN SCHEMA clause (Petr Jelinek) @@ -8989,8 +8989,8 @@ - Add ALTER - DEFAULT PRIVILEGES command to control privileges + Add ALTER + DEFAULT PRIVILEGES command to control privileges of objects created later (Petr Jelinek) @@ -9005,7 +9005,7 @@ Add the ability to control large object (BLOB) permissions with - GRANT/REVOKE (KaiGai Kohei) + GRANT/REVOKE (KaiGai Kohei) @@ -9028,8 +9028,8 @@ - Make LISTEN/NOTIFY store pending events + Make LISTEN/NOTIFY store pending events in a memory queue, rather than in a system table (Joachim Wieland) @@ -9042,21 +9042,21 @@ - Allow NOTIFY - to pass an optional payload string to listeners + Allow NOTIFY + to pass an optional payload string to listeners (Joachim Wieland) This greatly improves the usefulness of - LISTEN/NOTIFY as a + LISTEN/NOTIFY as a general-purpose event queue system. - Allow CLUSTER + Allow CLUSTER on all per-database system catalogs (Tom Lane) @@ -9068,30 +9068,30 @@ - <link linkend="SQL-COPY"><command>COPY</></link> + <link linkend="sql-copy"><command>COPY</command></link> - Accept COPY ... CSV FORCE QUOTE * + Accept COPY ... CSV FORCE QUOTE * (Itagaki Takahiro) - Now * can be used as shorthand for all columns - in the FORCE QUOTE clause. + Now * can be used as shorthand for all columns + in the FORCE QUOTE clause. - Add new COPY syntax that allows options to be + Add new COPY syntax that allows options to be specified inside parentheses (Robert Haas, Emmanuel Cecchet) - This allows greater flexibility for future COPY options. + This allows greater flexibility for future COPY options. The old syntax is still supported, but only for pre-existing options. @@ -9101,27 +9101,27 @@ - <link linkend="SQL-EXPLAIN"><command>EXPLAIN</></link> + <link linkend="sql-explain"><command>EXPLAIN</command></link> - Allow EXPLAIN to output in XML, - JSON, or YAML format (Robert Haas, Greg + Allow EXPLAIN to output in XML, + JSON, or YAML format (Robert Haas, Greg Sabino Mullane) The new output formats are easily machine-readable, supporting the - development of new tools for analysis of EXPLAIN output. + development of new tools for analysis of EXPLAIN output. - Add new BUFFERS option to report query - buffer usage during EXPLAIN ANALYZE (Itagaki Takahiro) + Add new BUFFERS option to report query + buffer usage during EXPLAIN ANALYZE (Itagaki Takahiro) @@ -9134,19 +9134,19 @@ - Add hash usage information to EXPLAIN output (Robert + Add hash usage information to EXPLAIN output (Robert Haas) - Add new EXPLAIN syntax that allows options to be + Add new EXPLAIN syntax that allows options to be specified inside parentheses (Robert Haas) - This allows greater flexibility for future EXPLAIN options. + This allows greater flexibility for future EXPLAIN options. The old syntax is still supported, but only for pre-existing options. @@ -9156,13 +9156,13 @@ - <link linkend="SQL-VACUUM"><command>VACUUM</></link> + <link linkend="sql-vacuum"><command>VACUUM</command></link> - Change VACUUM FULL to rewrite the entire table and + Change VACUUM FULL to rewrite the entire table and rebuild its indexes, rather than moving individual rows around to compact space (Itagaki Takahiro, Tom Lane) @@ -9170,7 +9170,7 @@ The previous method was usually slower and caused index bloat. Note that the new method will use more disk space transiently - during VACUUM FULL; potentially as much as twice + during VACUUM FULL; potentially as much as twice the space normally occupied by the table and its indexes. @@ -9178,12 +9178,12 @@ - Add new VACUUM syntax that allows options to be + Add new VACUUM syntax that allows options to be specified inside parentheses (Itagaki Takahiro) - This allows greater flexibility for future VACUUM options. + This allows greater flexibility for future VACUUM options. The old syntax is still supported, but only for pre-existing options. @@ -9200,7 +9200,7 @@ Allow an index to be named automatically by omitting the index name in - CREATE INDEX + CREATE INDEX (Tom Lane) @@ -9228,22 +9228,22 @@ - Add point_ops operator class for GiST + Add point_ops operator class for GiST (Teodor Sigaev) - This feature permits GiST indexing of point + This feature permits GiST indexing of point columns. The index can be used for several types of queries - such as point <@ polygon + such as point <@ polygon (point is in polygon). This should make many - PostGIS queries faster. + PostGIS queries faster. - Use red-black binary trees for GIN index creation + Use red-black binary trees for GIN index creation (Teodor Sigaev) @@ -9267,16 +9267,16 @@ - Allow bytea values + Allow bytea values to be written in hex notation (Peter Eisentraut) The server parameter bytea_output controls - whether hex or traditional format is used for bytea - output. Libpq's PQescapeByteaConn() function automatically - uses the hex format when connected to PostgreSQL 9.0 + linkend="guc-bytea-output">bytea_output controls + whether hex or traditional format is used for bytea + output. Libpq's PQescapeByteaConn() function automatically + uses the hex format when connected to PostgreSQL 9.0 or newer servers. However, pre-9.0 libpq versions will not correctly process hex format from newer servers. @@ -9293,20 +9293,20 @@ Allow server parameter extra_float_digits - to be increased to 3 (Tom Lane) + to be increased to 3 (Tom Lane) - The previous maximum extra_float_digits setting was - 2. There are cases where 3 digits are needed to dump and - restore float4 values exactly. pg_dump will + The previous maximum extra_float_digits setting was + 2. There are cases where 3 digits are needed to dump and + restore float4 values exactly. pg_dump will now use the setting of 3 when dumping from a server that allows it. - Tighten input checking for int2vector values (Caleb + Tighten input checking for int2vector values (Caleb Welton) @@ -9320,14 +9320,14 @@ - Add prefix support in synonym dictionaries + Add prefix support in synonym dictionaries (Teodor Sigaev) - Add filtering dictionaries (Teodor Sigaev) + Add filtering dictionaries (Teodor Sigaev) @@ -9344,7 +9344,7 @@ - Use more standards-compliant rules for parsing URL tokens + Use more standards-compliant rules for parsing URL tokens (Tom Lane) @@ -9367,9 +9367,9 @@ - For example, if a function is defined to take parameters a - and b, it can be called with func(a := 7, b - := 12) or func(b := 12, a := 7). + For example, if a function is defined to take parameters a + and b, it can be called with func(a := 7, b + := 12) or func(b := 12, a := 7). @@ -9377,24 +9377,24 @@ Support locale-specific regular expression - processing with UTF-8 server encoding (Tom Lane) + processing with UTF-8 server encoding (Tom Lane) Locale-specific regular expression functionality includes case-insensitive matching and locale-specific character classes. - Previously, these features worked correctly for non-ASCII + Previously, these features worked correctly for non-ASCII characters only if the database used a single-byte server encoding (such as LATIN1). They will still misbehave in multi-byte encodings other - than UTF-8. + than UTF-8. Add support for scientific notation in to_char() - (EEEE + linkend="functions-formatting">to_char() + (EEEE specification) (Pavel Stehule, Brendan Jurd) @@ -9402,21 +9402,21 @@ - Make to_char() honor FM - (fill mode) in Y, YY, and - YYY specifications (Bruce Momjian, Tom Lane) + Make to_char() honor FM + (fill mode) in Y, YY, and + YYY specifications (Bruce Momjian, Tom Lane) - It was already honored by YYYY. + It was already honored by YYYY. - Fix to_char() to output localized numeric and monetary - strings in the correct encoding on Windows + Fix to_char() to output localized numeric and monetary + strings in the correct encoding on Windows (Hiroshi Inoue, Itagaki Takahiro, Bruce Momjian) @@ -9429,12 +9429,12 @@ - The polygon && (overlaps) operator formerly just + The polygon && (overlaps) operator formerly just checked to see if the two polygons' bounding boxes overlapped. It now - does a more correct check. The polygon @> and - <@ (contains/contained by) operators formerly checked + does a more correct check. The polygon @> and + <@ (contains/contained by) operators formerly checked to see if one polygon's vertexes were all contained in the other; - this can wrongly report true for some non-convex polygons. + this can wrongly report true for some non-convex polygons. Now they check that all line segments of one polygon are contained in the other. @@ -9450,12 +9450,12 @@ Allow aggregate functions to use ORDER BY (Andrew Gierth) + linkend="syntax-aggregates">ORDER BY (Andrew Gierth) For example, this is now supported: array_agg(a ORDER BY - b). This is useful with aggregates for which the order of input + b). This is useful with aggregates for which the order of input values is significant, and eliminates the need to use a nonstandard subquery to determine the ordering. @@ -9463,7 +9463,7 @@ - Multi-argument aggregate functions can now use DISTINCT + Multi-argument aggregate functions can now use DISTINCT (Andrew Gierth) @@ -9471,7 +9471,7 @@ Add the string_agg() + linkend="functions-aggregate-table">string_agg() aggregate function to combine values into a single string (Pavel Stehule) @@ -9479,15 +9479,15 @@ - Aggregate functions that are called with DISTINCT are + Aggregate functions that are called with DISTINCT are now passed NULL values if the aggregate transition function is - not marked as STRICT (Andrew Gierth) + not marked as STRICT (Andrew Gierth) - For example, agg(DISTINCT x) might pass a NULL x - value to agg(). This is more consistent with the behavior - in non-DISTINCT cases. + For example, agg(DISTINCT x) might pass a NULL x + value to agg(). This is more consistent with the behavior + in non-DISTINCT cases. @@ -9503,9 +9503,9 @@ Add get_bit() - and set_bit() functions for bit - strings, mirroring those for bytea (Leonardo + linkend="functions-binarystring-other">get_bit() + and set_bit() functions for bit + strings, mirroring those for bytea (Leonardo F) @@ -9513,8 +9513,8 @@ Implement OVERLAY() - (replace) for bit strings and bytea + linkend="functions-string-sql">OVERLAY() + (replace) for bit strings and bytea (Leonardo F) @@ -9531,9 +9531,9 @@ Add pg_table_size() - and pg_indexes_size() to provide a more - user-friendly interface to the pg_relation_size() + linkend="functions-admin-dbsize">pg_table_size() + and pg_indexes_size() to provide a more + user-friendly interface to the pg_relation_size() function (Bernd Helmle) @@ -9541,7 +9541,7 @@ Add has_sequence_privilege() + linkend="functions-info-access-table">has_sequence_privilege() for sequence permission checking (Abhijit Menon-Sen) @@ -9556,15 +9556,15 @@ - Make the information_schema views correctly display maximum - octet lengths for char and varchar columns (Peter + Make the information_schema views correctly display maximum + octet lengths for char and varchar columns (Peter Eisentraut) - Speed up information_schema privilege views + Speed up information_schema privilege views (Joachim Wieland) @@ -9581,7 +9581,7 @@ Support execution of anonymous code blocks using the DO statement + linkend="sql-do">DO statement (Petr Jelinek, Joshua Tolley, Hannu Valtonen) @@ -9595,28 +9595,28 @@ Implement SQL-standard-compliant per-column triggers + linkend="sql-createtrigger">per-column triggers (Itagaki Takahiro) Such triggers are fired only when the specified column(s) are affected - by the query, e.g. appear in an UPDATE's SET + by the query, e.g. appear in an UPDATE's SET list. - Add the WHEN clause to CREATE TRIGGER + Add the WHEN clause to CREATE TRIGGER to allow control over whether a trigger is fired (Itagaki Takahiro) While the same type of check can always be performed inside the - trigger, doing it in an external WHEN clause can have + trigger, doing it in an external WHEN clause can have performance benefits. @@ -9634,8 +9634,8 @@ - Add the OR REPLACE clause to CREATE LANGUAGE + Add the OR REPLACE clause to CREATE LANGUAGE (Tom Lane) @@ -9677,8 +9677,8 @@ The default behavior is now to throw an error when there is a conflict, so as to avoid surprising behaviors. This can be modified, via the configuration parameter plpgsql.variable_conflict - or the per-function option #variable_conflict, to allow + linkend="plpgsql-var-subst">plpgsql.variable_conflict + or the per-function option #variable_conflict, to allow either the variable or the query-supplied column to be used. In any case PL/pgSQL will no longer attempt to substitute variables in places where they would not be syntactically valid. @@ -9731,7 +9731,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Formerly, input parameters were treated as being declared - CONST, so the function's code could not change their + CONST, so the function's code could not change their values. This restriction has been removed to simplify porting of functions from other DBMSes that do not impose the equivalent restriction. An input parameter now acts like a local @@ -9747,26 +9747,26 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Add count and ALL options to MOVE - FORWARD/BACKWARD in PL/pgSQL (Pavel Stehule) + Add count and ALL options to MOVE + FORWARD/BACKWARD in PL/pgSQL (Pavel Stehule) - Allow PL/pgSQL's WHERE CURRENT OF to use a cursor + Allow PL/pgSQL's WHERE CURRENT OF to use a cursor variable (Tom Lane) - Allow PL/pgSQL's OPEN cursor FOR EXECUTE to + Allow PL/pgSQL's OPEN cursor FOR EXECUTE to use parameters (Pavel Stehule, Itagaki Takahiro) - This is accomplished with a new USING clause. + This is accomplished with a new USING clause. @@ -9782,28 +9782,28 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Add new PL/Perl functions: quote_literal(), - quote_nullable(), quote_ident(), - encode_bytea(), decode_bytea(), - looks_like_number(), - encode_array_literal(), - encode_array_constructor() (Tim Bunce) + linkend="plperl-utility-functions">quote_literal(), + quote_nullable(), quote_ident(), + encode_bytea(), decode_bytea(), + looks_like_number(), + encode_array_literal(), + encode_array_constructor() (Tim Bunce) Add server parameter plperl.on_init to + linkend="guc-plperl-on-init">plperl.on_init to specify a PL/Perl initialization function (Tim Bunce) plperl.on_plperl_init + linkend="guc-plperl-on-plperl-init">plperl.on_plperl_init and plperl.on_plperlu_init + linkend="guc-plperl-on-plperl-init">plperl.on_plperlu_init are also available for initialization that is specific to the trusted or untrusted language respectively. @@ -9811,29 +9811,29 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Support END blocks in PL/Perl (Tim Bunce) + Support END blocks in PL/Perl (Tim Bunce) - END blocks do not currently allow database access. + END blocks do not currently allow database access. - Allow use strict in PL/Perl (Tim Bunce) + Allow use strict in PL/Perl (Tim Bunce) - Perl strict checks can also be globally enabled with the + Perl strict checks can also be globally enabled with the new server parameter plperl.use_strict. + linkend="guc-plperl-use-strict">plperl.use_strict. - Allow require in PL/Perl (Tim Bunce) + Allow require in PL/Perl (Tim Bunce) @@ -9845,7 +9845,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Allow use feature in PL/Perl if Perl version 5.10 or + Allow use feature in PL/Perl if Perl version 5.10 or later is used (Tim Bunce) @@ -9879,13 +9879,13 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Improve bytea support in PL/Python (Caleb Welton) + Improve bytea support in PL/Python (Caleb Welton) - Bytea values passed into PL/Python are now represented as - binary, rather than the PostgreSQL bytea text format. - Bytea values containing null bytes are now also output + Bytea values passed into PL/Python are now represented as + binary, rather than the PostgreSQL bytea text format. + Bytea values containing null bytes are now also output properly from PL/Python. Passing of boolean, integer, and float values was also improved. @@ -9906,14 +9906,14 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Add Python 3 support to PL/Python (Peter Eisentraut) + Add Python 3 support to PL/Python (Peter Eisentraut) The new server-side language is called plpython3u. This + linkend="plpython-python23">plpython3u. This cannot be used in the same session with the - Python 2 server-side language. + Python 2 server-side language. @@ -9936,8 +9936,8 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Add an @@ -9945,21 +9945,21 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - <link linkend="APP-PSQL"><application>psql</></link> + <link linkend="app-psql"><application>psql</application></link> - Add support for quoting/escaping the values of psql - variables as SQL strings or + Add support for quoting/escaping the values of psql + variables as SQL strings or identifiers (Pavel Stehule, Robert Haas) - For example, :'var' will produce the value of - var quoted and properly escaped as a literal string, while - :"var" will produce its value quoted and escaped as an + For example, :'var' will produce the value of + var quoted and properly escaped as a literal string, while + :"var" will produce its value quoted and escaped as an identifier. @@ -9967,11 +9967,11 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Ignore a leading UTF-8-encoded Unicode byte-order marker in - script files read by psql (Itagaki Takahiro) + script files read by psql (Itagaki Takahiro) - This is enabled when the client encoding is UTF-8. + This is enabled when the client encoding is UTF-8. It improves compatibility with certain editors, mostly on Windows, that insist on inserting such markers. @@ -9979,57 +9979,57 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Fix psql --file - to properly honor (Bruce Momjian) - Avoid overwriting of psql's command-line history when - two psql sessions are run concurrently (Tom Lane) + Avoid overwriting of psql's command-line history when + two psql sessions are run concurrently (Tom Lane) - Improve psql's tab completion support (Itagaki + Improve psql's tab completion support (Itagaki Takahiro) - Show \timing output when it is enabled, regardless of - quiet mode (Peter Eisentraut) + Show \timing output when it is enabled, regardless of + quiet mode (Peter Eisentraut) - <application>psql</> Display + <application>psql</application> Display - Improve display of wrapped columns in psql (Roger + Improve display of wrapped columns in psql (Roger Leigh) This behavior is now the default. The previous formatting is available by using \pset linestyle - old-ascii. + old-ascii. - Allow psql to use fancy Unicode line-drawing - characters via \pset linestyle unicode (Roger Leigh) + Allow psql to use fancy Unicode line-drawing + characters via \pset linestyle unicode (Roger Leigh) @@ -10038,27 +10038,27 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - <application>psql</> <link - linkend="APP-PSQL-meta-commands"><command>\d</></link> + <title><application>psql</application> <link + linkend="app-psql-meta-commands"><command>\d</command></link> Commands - Make \d show child tables that inherit from the specified + Make \d show child tables that inherit from the specified parent (Damien Clochard) - \d shows only the number of child tables, while - \d+ shows the names of all child tables. + \d shows only the number of child tables, while + \d+ shows the names of all child tables. - Show definitions of index columns in \d index_name + Show definitions of index columns in \d index_name (Khee Chin) @@ -10070,7 +10070,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Show a view's defining query only in - \d+, not in \d (Peter Eisentraut) + \d+, not in \d (Peter Eisentraut) @@ -10084,33 +10084,33 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - <link linkend="APP-PGDUMP"><application>pg_dump</></link> + <link linkend="app-pgdump"><application>pg_dump</application></link> - Make pg_dump/pg_restore - also remove large objects (Itagaki Takahiro) - Fix pg_dump to properly dump large objects when - standard_conforming_strings is enabled (Tom Lane) + Fix pg_dump to properly dump large objects when + standard_conforming_strings is enabled (Tom Lane) The previous coding could fail when dumping to an archive file - and then generating script output from pg_restore. + and then generating script output from pg_restore. - pg_restore now emits large-object data in hex format + pg_restore now emits large-object data in hex format when generating script output (Tom Lane) @@ -10123,16 +10123,16 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Allow pg_dump to dump comments attached to columns + Allow pg_dump to dump comments attached to columns of composite types (Taro Minowa (Higepon)) - Make pg_dump @@ -10143,7 +10143,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - pg_restore now complains if any command-line arguments + pg_restore now complains if any command-line arguments remain after the switches and optional file name (Tom Lane) @@ -10158,28 +10158,28 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then <link - linkend="app-pg-ctl"><application>pg_ctl</></link> + linkend="app-pg-ctl">pg_ctl - Allow pg_ctl to be used safely to start the - postmaster during a system reboot (Tom Lane) + Allow pg_ctl to be used safely to start the + postmaster during a system reboot (Tom Lane) - Previously, pg_ctl's parent process could have been - mistakenly identified as a running postmaster based on - a stale postmaster lock file, resulting in a transient + Previously, pg_ctl's parent process could have been + mistakenly identified as a running postmaster based on + a stale postmaster lock file, resulting in a transient failure to start the database. - Give pg_ctl the ability to initialize the database - (by invoking initdb) (Zdenek Kotala) + Give pg_ctl the ability to initialize the database + (by invoking initdb) (Zdenek Kotala) @@ -10190,25 +10190,25 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - <application>Development Tools</> + <application>Development Tools</application> - <link linkend="libpq"><application>libpq</></link> + <link linkend="libpq"><application>libpq</application></link> - Add new libpq functions + Add new libpq functions PQconnectdbParams() - and PQconnectStartParams() (Guillaume + linkend="libpq-connect">PQconnectdbParams() + and PQconnectStartParams() (Guillaume Lelarge) - These functions are similar to PQconnectdb() and - PQconnectStart() except that they accept a null-terminated + These functions are similar to PQconnectdb() and + PQconnectStart() except that they accept a null-terminated array of connection options, rather than requiring all options to be provided in a single string. @@ -10216,22 +10216,22 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Add libpq functions PQescapeLiteral() - and PQescapeIdentifier() (Robert Haas) + Add libpq functions PQescapeLiteral() + and PQescapeIdentifier() (Robert Haas) These functions return appropriately quoted and escaped SQL string literals and identifiers. The caller is not required to pre-allocate - the string result, as is required by PQescapeStringConn(). + the string result, as is required by PQescapeStringConn(). Add support for a per-user service file (.pg_service.conf), + linkend="libpq-pgservice">.pg_service.conf), which is checked before the site-wide service file (Peter Eisentraut) @@ -10239,7 +10239,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Properly report an error if the specified libpq service + Properly report an error if the specified libpq service cannot be found (Peter Eisentraut) @@ -10258,15 +10258,15 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Avoid extra system calls to block and unblock SIGPIPE - in libpq, on platforms that offer alternative methods + Avoid extra system calls to block and unblock SIGPIPE + in libpq, on platforms that offer alternative methods (Jeremy Kerr) - When a .pgpass-supplied + When a .pgpass-supplied password fails, mention where the password came from in the error message (Bruce Momjian) @@ -10288,22 +10288,22 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - <link linkend="ecpg"><application>ecpg</></link> + <link linkend="ecpg"><application>ecpg</application></link> - Add SQLDA - (SQL Descriptor Area) support to ecpg + Add SQLDA + (SQL Descriptor Area) support to ecpg (Boszormenyi Zoltan) - Add the DESCRIBE - [ OUTPUT ] statement to ecpg + Add the DESCRIBE + [ OUTPUT ] statement to ecpg (Boszormenyi Zoltan) @@ -10317,28 +10317,28 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Add the string data type in ecpg + Add the string data type in ecpg Informix-compatibility mode (Boszormenyi Zoltan) - Allow ecpg to use new and old + Allow ecpg to use new and old variable names without restriction (Michael Meskes) - Allow ecpg to use variable names in - free() (Michael Meskes) + Allow ecpg to use variable names in + free() (Michael Meskes) - Make ecpg_dynamic_type() return zero for non-SQL3 data + Make ecpg_dynamic_type() return zero for non-SQL3 data types (Michael Meskes) @@ -10350,41 +10350,41 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Support long long types on platforms that already have 64-bit - long (Michael Meskes) + Support long long types on platforms that already have 64-bit + long (Michael Meskes) - <application>ecpg</> Cursors + <application>ecpg</application> Cursors - Add out-of-scope cursor support in ecpg's native mode + Add out-of-scope cursor support in ecpg's native mode (Boszormenyi Zoltan) - This allows DECLARE to use variables that are not in - scope when OPEN is called. This facility already existed - in ecpg's Informix-compatibility mode. + This allows DECLARE to use variables that are not in + scope when OPEN is called. This facility already existed + in ecpg's Informix-compatibility mode. - Allow dynamic cursor names in ecpg (Boszormenyi Zoltan) + Allow dynamic cursor names in ecpg (Boszormenyi Zoltan) - Allow ecpg to use noise words FROM and - IN in FETCH and MOVE (Boszormenyi + Allow ecpg to use noise words FROM and + IN in FETCH and MOVE (Boszormenyi Zoltan) @@ -10409,8 +10409,8 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then The thread-safety option can be disabled with configure - . @@ -10421,12 +10421,12 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Now that /proc/self/oom_adj allows disabling - of the Linux out-of-memory (OOM) + Now that /proc/self/oom_adj allows disabling + of the Linux out-of-memory (OOM) killer, it's recommendable to disable OOM kills for the postmaster. It may then be desirable to re-enable OOM kills for the postmaster's child processes. The new compile-time option LINUX_OOM_ADJ + linkend="linux-memory-overcommit">LINUX_OOM_ADJ allows the killer to be reactivated for child processes. @@ -10440,31 +10440,31 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - New Makefile targets world, - install-world, and installcheck-world + New Makefile targets world, + install-world, and installcheck-world (Andrew Dunstan) - These are similar to the existing all, install, - and installcheck targets, but they also build the - HTML documentation, build and test contrib, - and test server-side languages and ecpg. + These are similar to the existing all, install, + and installcheck targets, but they also build the + HTML documentation, build and test contrib, + and test server-side languages and ecpg. Add data and documentation installation location control to - PGXS Makefiles (Mark Cave-Ayland) + PGXS Makefiles (Mark Cave-Ayland) - Add Makefile rules to build the PostgreSQL documentation - as a single HTML file or as a single plain-text file + Add Makefile rules to build the PostgreSQL documentation + as a single HTML file or as a single plain-text file (Peter Eisentraut, Bruce Momjian) @@ -10482,12 +10482,12 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Support compiling on 64-bit - Windows and running in 64-bit + Windows and running in 64-bit mode (Tsutomu Yamada, Magnus Hagander) - This allows for large shared memory sizes on Windows. + This allows for large shared memory sizes on Windows. @@ -10495,7 +10495,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Support server builds using Visual Studio - 2008 (Magnus Hagander) + 2008 (Magnus Hagander) @@ -10518,8 +10518,8 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - For example, the prebuilt HTML documentation is now in - doc/src/sgml/html/; the manual pages are packaged + For example, the prebuilt HTML documentation is now in + doc/src/sgml/html/; the manual pages are packaged similarly. @@ -10543,13 +10543,13 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then User-defined constraint triggers now have entries in - pg_constraint as well as pg_trigger + pg_constraint as well as pg_trigger (Tom Lane) Because of this change, - pg_constraint.pgconstrname is now + pg_constraint.pgconstrname is now redundant and has been removed. @@ -10557,8 +10557,8 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Add system catalog columns - pg_constraint.conindid and - pg_trigger.tgconstrindid + pg_constraint.conindid and + pg_trigger.tgconstrindid to better document the use of indexes for constraint enforcement (Tom Lane) @@ -10578,7 +10578,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Improve source code test coverage, including contrib, PL/Python, + Improve source code test coverage, including contrib, PL/Python, and PL/Perl (Peter Eisentraut, Andrew Dunstan) @@ -10598,7 +10598,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Automatically generate the initial contents of - pg_attribute for bootstrapped catalogs + pg_attribute for bootstrapped catalogs (John Naylor) @@ -10610,8 +10610,8 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Split the processing of - INSERT/UPDATE/DELETE operations out - of execMain.c (Marko Tiikkaja) + INSERT/UPDATE/DELETE operations out + of execMain.c (Marko Tiikkaja) @@ -10622,7 +10622,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Simplify translation of psql's SQL help text + Simplify translation of psql's SQL help text (Peter Eisentraut) @@ -10641,8 +10641,8 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Add a new ERRCODE_INVALID_PASSWORD - SQLSTATE error code (Bruce Momjian) + linkend="errcodes-table">ERRCODE_INVALID_PASSWORD + SQLSTATE error code (Bruce Momjian) @@ -10661,23 +10661,23 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Add new documentation section - about running PostgreSQL in non-durable mode + about running PostgreSQL in non-durable mode to improve performance (Bruce Momjian) - Restructure the HTML documentation - Makefile rules to make their dependency checks work + Restructure the HTML documentation + Makefile rules to make their dependency checks work correctly, avoiding unnecessary rebuilds (Peter Eisentraut) - Use DocBook XSL stylesheets for man page - building, rather than Docbook2X (Peter Eisentraut) + Use DocBook XSL stylesheets for man page + building, rather than Docbook2X (Peter Eisentraut) @@ -10711,22 +10711,22 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Require Autoconf 2.63 to build - configure (Peter Eisentraut) + Require Autoconf 2.63 to build + configure (Peter Eisentraut) - Require Flex 2.5.31 or later to build - from a CVS checkout (Tom Lane) + Require Flex 2.5.31 or later to build + from a CVS checkout (Tom Lane) - Require Perl version 5.8 or later to build - from a CVS checkout (John Naylor, Andrew Dunstan) + Require Perl version 5.8 or later to build + from a CVS checkout (John Naylor, Andrew Dunstan) @@ -10741,25 +10741,25 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Use a more modern API for Bonjour (Tom Lane) + Use a more modern API for Bonjour (Tom Lane) - Bonjour support now requires macOS 10.3 or later. + Bonjour support now requires macOS 10.3 or later. The older API has been deprecated by Apple. - Add spinlock support for the SuperH + Add spinlock support for the SuperH architecture (Nobuhiro Iwamatsu) - Allow non-GCC compilers to use inline functions if + Allow non-GCC compilers to use inline functions if they support them (Kurt Harriman) @@ -10773,14 +10773,14 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Restructure use of LDFLAGS to be more consistent + Restructure use of LDFLAGS to be more consistent across platforms (Tom Lane) - LDFLAGS is now used for linking both executables and shared - libraries, and we add on LDFLAGS_EX when linking - executables, or LDFLAGS_SL when linking shared libraries. + LDFLAGS is now used for linking both executables and shared + libraries, and we add on LDFLAGS_EX when linking + executables, or LDFLAGS_SL when linking shared libraries. @@ -10795,15 +10795,15 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Make backend header files safe to include in C++ + Make backend header files safe to include in C++ (Kurt Harriman, Peter Eisentraut) These changes remove keyword conflicts that previously made - C++ usage difficult in backend code. However, there - are still other complexities when using C++ for backend - functions. extern "C" { } is still necessary in + C++ usage difficult in backend code. However, there + are still other complexities when using C++ for backend + functions. extern "C" { } is still necessary in appropriate places, and memory management and error handling are still problematic. @@ -10812,15 +10812,15 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Add AggCheckCallContext() - for use in detecting if a C function is + linkend="xaggr">AggCheckCallContext() + for use in detecting if a C function is being called as an aggregate (Hitoshi Harada) - Change calling convention for SearchSysCache() and related + Change calling convention for SearchSysCache() and related functions to avoid hard-wiring the maximum number of cache keys (Robert Haas) @@ -10833,8 +10833,8 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Require calls of fastgetattr() and - heap_getattr() backend macros to provide a non-NULL fourth + Require calls of fastgetattr() and + heap_getattr() backend macros to provide a non-NULL fourth argument (Robert Haas) @@ -10842,7 +10842,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Custom typanalyze functions should no longer rely on - VacAttrStats.attr to determine the type + VacAttrStats.attr to determine the type of data they will be passed (Tom Lane) @@ -10888,7 +10888,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Add contrib/pg_upgrade + Add contrib/pg_upgrade to support in-place upgrades (Bruce Momjian) @@ -10903,15 +10903,15 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Add support for preserving relation relfilenode values + linkend="catalog-pg-class">relfilenode values during binary upgrades (Bruce Momjian) - Add support for preserving pg_type - and pg_enum OIDs during binary upgrades + Add support for preserving pg_type + and pg_enum OIDs during binary upgrades (Bruce Momjian) @@ -10919,7 +10919,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Move data files within tablespaces into - PostgreSQL-version-specific subdirectories + PostgreSQL-version-specific subdirectories (Bruce Momjian) @@ -10941,22 +10941,22 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then - Add multithreading option ( - This allows multiple CPUs to be used by pgbench, + This allows multiple CPUs to be used by pgbench, reducing the risk of pgbench itself becoming the test bottleneck. - Add \shell and \setshell meta + Add \shell and \setshell meta commands to contrib/pgbench + linkend="pgbench">contrib/pgbench (Michael Paquier) @@ -10964,20 +10964,20 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then New features for contrib/dict_xsyn + linkend="dict-xsyn">contrib/dict_xsyn (Sergey Karpov) - The new options are matchorig, matchsynonyms, - and keepsynonyms. + The new options are matchorig, matchsynonyms, + and keepsynonyms. Add full text dictionary contrib/unaccent + linkend="unaccent">contrib/unaccent (Teodor Sigaev) @@ -10990,24 +10990,24 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Add dblink_get_notify() - to contrib/dblink (Marcus Kempe) + linkend="contrib-dblink-get-notify">dblink_get_notify() + to contrib/dblink (Marcus Kempe) - This allows asynchronous notifications in dblink. + This allows asynchronous notifications in dblink. - Improve contrib/dblink's handling of dropped columns + Improve contrib/dblink's handling of dropped columns (Tom Lane) This affects dblink_build_sql_insert() + linkend="contrib-dblink-build-sql-insert">dblink_build_sql_insert() and related functions. These functions now number columns according to logical not physical column numbers. @@ -11016,23 +11016,23 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Greatly increase contrib/hstore's data + linkend="hstore">contrib/hstore's data length limit, and add B-tree and hash support so GROUP - BY and DISTINCT operations are possible on - hstore columns (Andrew Gierth) + BY and DISTINCT operations are possible on + hstore columns (Andrew Gierth) New functions and operators were also added. These improvements - make hstore a full-function key-value store embedded in - PostgreSQL. + make hstore a full-function key-value store embedded in + PostgreSQL. Add contrib/passwordcheck + linkend="passwordcheck">contrib/passwordcheck to support site-specific password strength policies (Laurenz Albe) @@ -11046,7 +11046,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Add contrib/pg_archivecleanup + linkend="pgarchivecleanup">contrib/pg_archivecleanup tool (Simon Riggs) @@ -11060,7 +11060,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Add query text to contrib/auto_explain + linkend="auto-explain">contrib/auto_explain output (Andrew Dunstan) @@ -11068,7 +11068,7 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Add buffer access counters to contrib/pg_stat_statements + linkend="pgstatstatements">contrib/pg_stat_statements (Itagaki Takahiro) @@ -11076,10 +11076,10 @@ if TG_OP = 'INSERT' and NEW.col1 = ... then Update contrib/start-scripts/linux - to use /proc/self/oom_adj to disable the - Linux - out-of-memory (OOM) killer (Alex + linkend="server-start">contrib/start-scripts/linux + to use /proc/self/oom_adj to disable the + Linux + out-of-memory (OOM) killer (Alex Hunsaker, Tom Lane) diff --git a/doc/src/sgml/release-9.1.sgml b/doc/src/sgml/release-9.1.sgml index 0454f849d4..e6ce80032f 100644 --- a/doc/src/sgml/release-9.1.sgml +++ b/doc/src/sgml/release-9.1.sgml @@ -12,11 +12,11 @@ This release contains a variety of fixes from 9.1.23. For information about new features in the 9.1 major release, see - . + . - This is expected to be the last PostgreSQL release + This is expected to be the last PostgreSQL release in the 9.1.X series. Users are encouraged to update to a newer release branch soon. @@ -30,7 +30,7 @@ However, if you are upgrading from a version earlier than 9.1.16, - see . + see . @@ -68,13 +68,13 @@ - Fix timeout length when VACUUM is waiting for exclusive + Fix timeout length when VACUUM is waiting for exclusive table lock so that it can truncate the table (Simon Riggs) The timeout was meant to be 50 milliseconds, but it was actually only - 50 microseconds, causing VACUUM to give up on truncation + 50 microseconds, causing VACUUM to give up on truncation much more easily than intended. Set it to the intended value. @@ -82,15 +82,15 @@ Remove artificial restrictions on the values accepted - by numeric_in() and numeric_recv() + by numeric_in() and numeric_recv() (Tom Lane) We allow numeric values up to the limit of the storage format (more - than 1e100000), so it seems fairly pointless - that numeric_in() rejected scientific-notation exponents - above 1000. Likewise, it was silly for numeric_recv() to + than 1e100000), so it seems fairly pointless + that numeric_in() rejected scientific-notation exponents + above 1000. Likewise, it was silly for numeric_recv() to reject more than 1000 digits in an input value. @@ -112,7 +112,7 @@ - Disallow starting a standalone backend with standby_mode + Disallow starting a standalone backend with standby_mode turned on (Michael Paquier) @@ -126,7 +126,7 @@ Don't try to share SSL contexts across multiple connections - in libpq (Heikki Linnakangas) + in libpq (Heikki Linnakangas) @@ -137,26 +137,26 @@ - Avoid corner-case memory leak in libpq (Tom Lane) + Avoid corner-case memory leak in libpq (Tom Lane) The reported problem involved leaking an error report - during PQreset(), but there might be related cases. + during PQreset(), but there might be related cases. - Make ecpg's and options work consistently with our other executables (Haribabu Kommi) - Fix contrib/intarray/bench/bench.pl to print the results - of the EXPLAIN it does when given the option (Daniel Gustafsson) @@ -170,17 +170,17 @@ If a dynamic time zone abbreviation does not match any entry in the referenced time zone, treat it as equivalent to the time zone name. This avoids unexpected failures when IANA removes abbreviations from - their time zone database, as they did in tzdata + their time zone database, as they did in tzdata release 2016f and seem likely to do again in the future. The consequences were not limited to not recognizing the individual abbreviation; any mismatch caused - the pg_timezone_abbrevs view to fail altogether. + the pg_timezone_abbrevs view to fail altogether. - Update time zone data files to tzdata release 2016h + Update time zone data files to tzdata release 2016h for DST law changes in Palestine and Turkey, plus historical corrections for Turkey and some regions of Russia. Switch to numeric abbreviations for some time zones in Antarctica, @@ -193,15 +193,15 @@ or no currency among the local population. They are in process of reversing that policy in favor of using numeric UTC offsets in zones where there is no evidence of real-world use of an English - abbreviation. At least for the time being, PostgreSQL + abbreviation. At least for the time being, PostgreSQL will continue to accept such removed abbreviations for timestamp input. - But they will not be shown in the pg_timezone_names + But they will not be shown in the pg_timezone_names view nor used for output. - In this update, AMT is no longer shown as being in use to - mean Armenia Time. Therefore, we have changed the Default + In this update, AMT is no longer shown as being in use to + mean Armenia Time. Therefore, we have changed the Default abbreviation set to interpret it as Amazon Time, thus UTC-4 not UTC+4. @@ -222,11 +222,11 @@ This release contains a variety of fixes from 9.1.22. For information about new features in the 9.1 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 9.1.X release series in September 2016. Users are encouraged to update to a newer release branch soon. @@ -240,7 +240,7 @@ However, if you are upgrading from a version earlier than 9.1.16, - see . + see . @@ -253,17 +253,17 @@ Fix possible mis-evaluation of - nested CASE-WHEN expressions (Heikki + nested CASE-WHEN expressions (Heikki Linnakangas, Michael Paquier, Tom Lane) - A CASE expression appearing within the test value - subexpression of another CASE could become confused about + A CASE expression appearing within the test value + subexpression of another CASE could become confused about whether its own test value was null or not. Also, inlining of a SQL function implementing the equality operator used by - a CASE expression could result in passing the wrong test - value to functions called within a CASE expression in the + a CASE expression could result in passing the wrong test + value to functions called within a CASE expression in the SQL function's body. If the test values were of different data types, a crash might result; moreover such situations could be abused to allow disclosure of portions of server memory. (CVE-2016-5423) @@ -277,7 +277,7 @@ - Numerous places in vacuumdb and other client programs + Numerous places in vacuumdb and other client programs could become confused by database and role names containing double quotes or backslashes. Tighten up quoting rules to make that safe. Also, ensure that when a conninfo string is used as a database name @@ -286,22 +286,22 @@ Fix handling of paired double quotes - in psql's \connect - and \password commands to match the documentation. + in psql's \connect + and \password commands to match the documentation. - Introduce a new - pg_dumpall now refuses to deal with database and role + pg_dumpall now refuses to deal with database and role names containing carriage returns or newlines, as it seems impractical to quote those characters safely on Windows. In future we may reject such names on the server side, but that step has not been taken yet. @@ -311,40 +311,40 @@ These are considered security fixes because crafted object names containing special characters could have been used to execute commands with superuser privileges the next time a superuser - executes pg_dumpall or other routine maintenance + executes pg_dumpall or other routine maintenance operations. (CVE-2016-5424) - Fix corner-case misbehaviors for IS NULL/IS NOT - NULL applied to nested composite values (Andrew Gierth, Tom Lane) + Fix corner-case misbehaviors for IS NULL/IS NOT + NULL applied to nested composite values (Andrew Gierth, Tom Lane) - The SQL standard specifies that IS NULL should return + The SQL standard specifies that IS NULL should return TRUE for a row of all null values (thus ROW(NULL,NULL) IS - NULL yields TRUE), but this is not meant to apply recursively - (thus ROW(NULL, ROW(NULL,NULL)) IS NULL yields FALSE). + NULL yields TRUE), but this is not meant to apply recursively + (thus ROW(NULL, ROW(NULL,NULL)) IS NULL yields FALSE). The core executor got this right, but certain planner optimizations treated the test as recursive (thus producing TRUE in both cases), - and contrib/postgres_fdw could produce remote queries + and contrib/postgres_fdw could produce remote queries that misbehaved similarly. - Make the inet and cidr data types properly reject + Make the inet and cidr data types properly reject IPv6 addresses with too many colon-separated fields (Tom Lane) - Prevent crash in close_ps() - (the point ## lseg operator) + Prevent crash in close_ps() + (the point ## lseg operator) for NaN input coordinates (Tom Lane) @@ -355,12 +355,12 @@ - Fix several one-byte buffer over-reads in to_number() + Fix several one-byte buffer over-reads in to_number() (Peter Eisentraut) - In several cases the to_number() function would read one + In several cases the to_number() function would read one more character than it should from the input string. There is a small chance of a crash, if the input happens to be adjacent to the end of memory. @@ -370,7 +370,7 @@ Avoid unsafe intermediate state during expensive paths - through heap_update() (Masahiko Sawada, Andres Freund) + through heap_update() (Masahiko Sawada, Andres Freund) @@ -383,12 +383,12 @@ - Avoid consuming a transaction ID during VACUUM + Avoid consuming a transaction ID during VACUUM (Alexander Korotkov) - Some cases in VACUUM unnecessarily caused an XID to be + Some cases in VACUUM unnecessarily caused an XID to be assigned to the current transaction. Normally this is negligible, but if one is up against the XID wraparound limit, consuming more XIDs during anti-wraparound vacuums is a very bad thing. @@ -397,12 +397,12 @@ - Avoid canceling hot-standby queries during VACUUM FREEZE + Avoid canceling hot-standby queries during VACUUM FREEZE (Simon Riggs, Álvaro Herrera) - VACUUM FREEZE on an otherwise-idle master server could + VACUUM FREEZE on an otherwise-idle master server could result in unnecessary cancellations of queries on its standby servers. @@ -410,8 +410,8 @@ - When a manual ANALYZE specifies a column list, don't - reset the table's changes_since_analyze counter + When a manual ANALYZE specifies a column list, don't + reset the table's changes_since_analyze counter (Tom Lane) @@ -423,7 +423,7 @@ - Fix ANALYZE's overestimation of n_distinct + Fix ANALYZE's overestimation of n_distinct for a unique or nearly-unique column with many null entries (Tom Lane) @@ -451,8 +451,8 @@ - Fix contrib/btree_gin to handle the smallest - possible bigint value correctly (Peter Eisentraut) + Fix contrib/btree_gin to handle the smallest + possible bigint value correctly (Peter Eisentraut) @@ -465,21 +465,21 @@ It's planned to switch to two-part instead of three-part server version numbers for releases after 9.6. Make sure - that PQserverVersion() returns the correct value for + that PQserverVersion() returns the correct value for such cases. - Fix ecpg's code for unsigned long long + Fix ecpg's code for unsigned long long array elements (Michael Meskes) - Make pg_basebackup accept -Z 0 as + Make pg_basebackup accept -Z 0 as specifying no compression (Fujii Masao) @@ -491,13 +491,13 @@ Branch: REL9_1_STABLE [d56c02f1a] 2016-06-19 13:45:03 -0400 Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 --> - Revert to the old heuristic timeout for pg_ctl start -w + Revert to the old heuristic timeout for pg_ctl start -w (Tom Lane) The new method adopted as of release 9.1.20 does not work - when silent_mode is enabled, so go back to the old way. + when silent_mode is enabled, so go back to the old way. @@ -530,7 +530,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 Update our copy of the timezone code to match - IANA's tzcode release 2016c (Tom Lane) + IANA's tzcode release 2016c (Tom Lane) @@ -542,7 +542,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Update time zone data files to tzdata release 2016f + Update time zone data files to tzdata release 2016f for DST law changes in Kemerovo and Novosibirsk, plus historical corrections for Azerbaijan, Belarus, and Morocco. @@ -564,11 +564,11 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 This release contains a variety of fixes from 9.1.21. For information about new features in the 9.1 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 9.1.X release series in September 2016. Users are encouraged to update to a newer release branch soon. @@ -582,7 +582,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 However, if you are upgrading from a version earlier than 9.1.16, - see . + see . @@ -604,7 +604,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 using OpenSSL within a single process and not all the code involved follows the same rules for when to clear the error queue. Failures have been reported specifically when a client application - uses SSL connections in libpq concurrently with + uses SSL connections in libpq concurrently with SSL connections using the PHP, Python, or Ruby wrappers for OpenSSL. It's possible for similar problems to arise within the server as well, if an extension module establishes an outgoing SSL connection. @@ -613,7 +613,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Fix failed to build any N-way joins + Fix failed to build any N-way joins planner error with a full join enclosed in the right-hand side of a left join (Tom Lane) @@ -621,8 +621,8 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Fix possible misbehavior of TH, th, - and Y,YYY format codes in to_timestamp() + Fix possible misbehavior of TH, th, + and Y,YYY format codes in to_timestamp() (Tom Lane) @@ -634,28 +634,28 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Fix dumping of rules and views in which the array - argument of a value operator - ANY (array) construct is a sub-SELECT + Fix dumping of rules and views in which the array + argument of a value operator + ANY (array) construct is a sub-SELECT (Tom Lane) - Make pg_regress use a startup timeout from the - PGCTLTIMEOUT environment variable, if that's set (Tom Lane) + Make pg_regress use a startup timeout from the + PGCTLTIMEOUT environment variable, if that's set (Tom Lane) This is for consistency with a behavior recently added - to pg_ctl; it eases automated testing on slow machines. + to pg_ctl; it eases automated testing on slow machines. - Fix pg_upgrade to correctly restore extension + Fix pg_upgrade to correctly restore extension membership for operator families containing only one operator class (Tom Lane) @@ -663,23 +663,23 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 In such a case, the operator family was restored into the new database, but it was no longer marked as part of the extension. This had no - immediate ill effects, but would cause later pg_dump + immediate ill effects, but would cause later pg_dump runs to emit output that would cause (harmless) errors on restore. - Rename internal function strtoi() - to strtoint() to avoid conflict with a NetBSD library + Rename internal function strtoi() + to strtoint() to avoid conflict with a NetBSD library function (Thomas Munro) - Fix reporting of errors from bind() - and listen() system calls on Windows (Tom Lane) + Fix reporting of errors from bind() + and listen() system calls on Windows (Tom Lane) @@ -692,12 +692,12 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Avoid possibly-unsafe use of Windows' FormatMessage() + Avoid possibly-unsafe use of Windows' FormatMessage() function (Christian Ullrich) - Use the FORMAT_MESSAGE_IGNORE_INSERTS flag where + Use the FORMAT_MESSAGE_IGNORE_INSERTS flag where appropriate. No live bug is known to exist here, but it seems like a good idea to be careful. @@ -705,9 +705,9 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Update time zone data files to tzdata release 2016d + Update time zone data files to tzdata release 2016d for DST law changes in Russia and Venezuela. There are new zone - names Europe/Kirov and Asia/Tomsk to reflect + names Europe/Kirov and Asia/Tomsk to reflect the fact that these regions now have different time zone histories from adjacent regions. @@ -729,7 +729,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 This release contains a variety of fixes from 9.1.20. For information about new features in the 9.1 major release, see - . + . @@ -741,7 +741,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 However, if you are upgrading from a version earlier than 9.1.16, - see . + see . @@ -754,56 +754,56 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 Fix incorrect handling of NULL index entries in - indexed ROW() comparisons (Tom Lane) + indexed ROW() comparisons (Tom Lane) An index search using a row comparison such as ROW(a, b) > - ROW('x', 'y') would stop upon reaching a NULL entry in - the b column, ignoring the fact that there might be - non-NULL b values associated with later values - of a. + ROW('x', 'y') would stop upon reaching a NULL entry in + the b column, ignoring the fact that there might be + non-NULL b values associated with later values + of a. Avoid unlikely data-loss scenarios due to renaming files without - adequate fsync() calls before and after (Michael Paquier, + adequate fsync() calls before and after (Michael Paquier, Tomas Vondra, Andres Freund) - Correctly handle cases where pg_subtrans is close to XID + Correctly handle cases where pg_subtrans is close to XID wraparound during server startup (Jeff Janes) - Fix corner-case crash due to trying to free localeconv() + Fix corner-case crash due to trying to free localeconv() output strings more than once (Tom Lane) - Fix parsing of affix files for ispell dictionaries + Fix parsing of affix files for ispell dictionaries (Tom Lane) The code could go wrong if the affix file contained any characters whose byte length changes during case-folding, for - example I in Turkish UTF8 locales. + example I in Turkish UTF8 locales. - Avoid use of sscanf() to parse ispell + Avoid use of sscanf() to parse ispell dictionary files (Artur Zakirov) @@ -829,27 +829,27 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Fix psql's tab completion logic to handle multibyte + Fix psql's tab completion logic to handle multibyte characters properly (Kyotaro Horiguchi, Robert Haas) - Fix psql's tab completion for - SECURITY LABEL (Tom Lane) + Fix psql's tab completion for + SECURITY LABEL (Tom Lane) - Pressing TAB after SECURITY LABEL might cause a crash + Pressing TAB after SECURITY LABEL might cause a crash or offering of inappropriate keywords. - Make pg_ctl accept a wait timeout from the - PGCTLTIMEOUT environment variable, if none is specified on + Make pg_ctl accept a wait timeout from the + PGCTLTIMEOUT environment variable, if none is specified on the command line (Noah Misch) @@ -863,20 +863,20 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 Fix incorrect test for Windows service status - in pg_ctl (Manuel Mathar) + in pg_ctl (Manuel Mathar) The previous set of minor releases attempted to - fix pg_ctl to properly determine whether to send log + fix pg_ctl to properly determine whether to send log messages to Window's Event Log, but got the test backwards. - Fix pgbench to correctly handle the combination - of -C and -M prepared options (Tom Lane) + Fix pgbench to correctly handle the combination + of -C and -M prepared options (Tom Lane) @@ -897,21 +897,21 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 Fix multiple mistakes in the statistics returned - by contrib/pgstattuple's pgstatindex() + by contrib/pgstattuple's pgstatindex() function (Tom Lane) - Remove dependency on psed in MSVC builds, since it's no + Remove dependency on psed in MSVC builds, since it's no longer provided by core Perl (Michael Paquier, Andrew Dunstan) - Update time zone data files to tzdata release 2016c + Update time zone data files to tzdata release 2016c for DST law changes in Azerbaijan, Chile, Haiti, Palestine, and Russia (Altai, Astrakhan, Kirov, Sakhalin, Ulyanovsk regions), plus historical corrections for Lithuania, Moldova, and Russia @@ -935,7 +935,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 This release contains a variety of fixes from 9.1.19. For information about new features in the 9.1 major release, see - . + . @@ -947,7 +947,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 However, if you are upgrading from a version earlier than 9.1.16, - see . + see . @@ -972,25 +972,25 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Perform an immediate shutdown if the postmaster.pid file + Perform an immediate shutdown if the postmaster.pid file is removed (Tom Lane) The postmaster now checks every minute or so - that postmaster.pid is still there and still contains its + that postmaster.pid is still there and still contains its own PID. If not, it performs an immediate shutdown, as though it had - received SIGQUIT. The main motivation for this change + received SIGQUIT. The main motivation for this change is to ensure that failed buildfarm runs will get cleaned up without manual intervention; but it also serves to limit the bad effects if a - DBA forcibly removes postmaster.pid and then starts a new + DBA forcibly removes postmaster.pid and then starts a new postmaster. - In SERIALIZABLE transaction isolation mode, serialization + In SERIALIZABLE transaction isolation mode, serialization anomalies could be missed due to race conditions during insertions (Kevin Grittner, Thomas Munro) @@ -999,7 +999,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 Fix failure to emit appropriate WAL records when doing ALTER - TABLE ... SET TABLESPACE for unlogged relations (Michael Paquier, + TABLE ... SET TABLESPACE for unlogged relations (Michael Paquier, Andres Freund) @@ -1018,21 +1018,21 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Fix ALTER COLUMN TYPE to reconstruct inherited check + Fix ALTER COLUMN TYPE to reconstruct inherited check constraints properly (Tom Lane) - Fix REASSIGN OWNED to change ownership of composite types + Fix REASSIGN OWNED to change ownership of composite types properly (Álvaro Herrera) - Fix REASSIGN OWNED and ALTER OWNER to correctly + Fix REASSIGN OWNED and ALTER OWNER to correctly update granted-permissions lists when changing owners of data types, foreign data wrappers, or foreign servers (Bruce Momjian, Álvaro Herrera) @@ -1041,7 +1041,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Fix REASSIGN OWNED to ignore foreign user mappings, + Fix REASSIGN OWNED to ignore foreign user mappings, rather than fail (Álvaro Herrera) @@ -1063,14 +1063,14 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Fix dumping of whole-row Vars in ROW() - and VALUES() lists (Tom Lane) + Fix dumping of whole-row Vars in ROW() + and VALUES() lists (Tom Lane) - Fix possible internal overflow in numeric division + Fix possible internal overflow in numeric division (Dean Rasheed) @@ -1122,7 +1122,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 This causes the code to emit regular expression is too - complex errors in some cases that previously used unreasonable + complex errors in some cases that previously used unreasonable amounts of time and memory. @@ -1135,14 +1135,14 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Make %h and %r escapes - in log_line_prefix work for messages emitted due - to log_connections (Tom Lane) + Make %h and %r escapes + in log_line_prefix work for messages emitted due + to log_connections (Tom Lane) - Previously, %h/%r started to work just after a - new session had emitted the connection received log message; + Previously, %h/%r started to work just after a + new session had emitted the connection received log message; now they work for that message too. @@ -1155,7 +1155,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 This oversight resulted in failure to recover from crashes - whenever logging_collector is turned on. + whenever logging_collector is turned on. @@ -1181,13 +1181,13 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - In psql, ensure that libreadline's idea + In psql, ensure that libreadline's idea of the screen size is updated when the terminal window size changes (Merlin Moncure) - Previously, libreadline did not notice if the window + Previously, libreadline did not notice if the window was resized during query output, leading to strange behavior during later input of multiline queries. @@ -1195,15 +1195,15 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Fix psql's \det command to interpret its - pattern argument the same way as other \d commands with + Fix psql's \det command to interpret its + pattern argument the same way as other \d commands with potentially schema-qualified patterns do (Reece Hart) - Avoid possible crash in psql's \c command + Avoid possible crash in psql's \c command when previous connection was via Unix socket and command specifies a new hostname and same username (Tom Lane) @@ -1211,21 +1211,21 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - In pg_ctl start -w, test child process status directly + In pg_ctl start -w, test child process status directly rather than relying on heuristics (Tom Lane, Michael Paquier) - Previously, pg_ctl relied on an assumption that the new - postmaster would always create postmaster.pid within five + Previously, pg_ctl relied on an assumption that the new + postmaster would always create postmaster.pid within five seconds. But that can fail on heavily-loaded systems, - causing pg_ctl to report incorrectly that the + causing pg_ctl to report incorrectly that the postmaster failed to start. Except on Windows, this change also means that a pg_ctl start - -w done immediately after another such command will now reliably + -w done immediately after another such command will now reliably fail, whereas previously it would report success if done within two seconds of the first command. @@ -1233,23 +1233,23 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - In pg_ctl start -w, don't attempt to use a wildcard listen + In pg_ctl start -w, don't attempt to use a wildcard listen address to connect to the postmaster (Kondo Yuta) - On Windows, pg_ctl would fail to detect postmaster - startup if listen_addresses is set to 0.0.0.0 - or ::, because it would try to use that value verbatim as + On Windows, pg_ctl would fail to detect postmaster + startup if listen_addresses is set to 0.0.0.0 + or ::, because it would try to use that value verbatim as the address to connect to, which doesn't work. Instead assume - that 127.0.0.1 or ::1, respectively, is the + that 127.0.0.1 or ::1, respectively, is the right thing to use. - In pg_ctl on Windows, check service status to decide + In pg_ctl on Windows, check service status to decide where to send output, rather than checking if standard output is a terminal (Michael Paquier) @@ -1257,18 +1257,18 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - In pg_dump and pg_basebackup, adopt + In pg_dump and pg_basebackup, adopt the GNU convention for handling tar-archive members exceeding 8GB (Tom Lane) - The POSIX standard for tar file format does not allow + The POSIX standard for tar file format does not allow archive member files to exceed 8GB, but most modern implementations - of tar support an extension that fixes that. Adopt - this extension so that pg_dump with no longer fails on tables with more than 8GB of data, and so - that pg_basebackup can handle files larger than 8GB. + that pg_basebackup can handle files larger than 8GB. In addition, fix some portability issues that could cause failures for members between 4GB and 8GB on some platforms. Potentially these problems could cause unrecoverable data loss due to unreadable backup @@ -1278,44 +1278,44 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Fix assorted corner-case bugs in pg_dump's processing + Fix assorted corner-case bugs in pg_dump's processing of extension member objects (Tom Lane) - Make pg_dump mark a view's triggers as needing to be + Make pg_dump mark a view's triggers as needing to be processed after its rule, to prevent possible failure during - parallel pg_restore (Tom Lane) + parallel pg_restore (Tom Lane) Ensure that relation option values are properly quoted - in pg_dump (Kouhei Sutou, Tom Lane) + in pg_dump (Kouhei Sutou, Tom Lane) A reloption value that isn't a simple identifier or number could lead to dump/reload failures due to syntax errors in CREATE statements - issued by pg_dump. This is not an issue with any - reloption currently supported by core PostgreSQL, but + issued by pg_dump. This is not an issue with any + reloption currently supported by core PostgreSQL, but extensions could allow reloptions that cause the problem. - Fix pg_upgrade's file-copying code to handle errors + Fix pg_upgrade's file-copying code to handle errors properly on Windows (Bruce Momjian) - Install guards in pgbench against corner-case overflow + Install guards in pgbench against corner-case overflow conditions during evaluation of script-specified division or modulo operators (Fabien Coelho, Michael Paquier) @@ -1323,22 +1323,22 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Prevent certain PL/Java parameters from being set by + Prevent certain PL/Java parameters from being set by non-superusers (Noah Misch) - This change mitigates a PL/Java security bug - (CVE-2016-0766), which was fixed in PL/Java by marking + This change mitigates a PL/Java security bug + (CVE-2016-0766), which was fixed in PL/Java by marking these parameters as superuser-only. To fix the security hazard for - sites that update PostgreSQL more frequently - than PL/Java, make the core code aware of them also. + sites that update PostgreSQL more frequently + than PL/Java, make the core code aware of them also. - Improve libpq's handling of out-of-memory situations + Improve libpq's handling of out-of-memory situations (Michael Paquier, Amit Kapila, Heikki Linnakangas) @@ -1346,42 +1346,42 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 Fix order of arguments - in ecpg-generated typedef statements + in ecpg-generated typedef statements (Michael Meskes) - Use %g not %f format - in ecpg's PGTYPESnumeric_from_double() + Use %g not %f format + in ecpg's PGTYPESnumeric_from_double() (Tom Lane) - Fix ecpg-supplied header files to not contain comments + Fix ecpg-supplied header files to not contain comments continued from a preprocessor directive line onto the next line (Michael Meskes) - Such a comment is rejected by ecpg. It's not yet clear - whether ecpg itself should be changed. + Such a comment is rejected by ecpg. It's not yet clear + whether ecpg itself should be changed. - Ensure that contrib/pgcrypto's crypt() + Ensure that contrib/pgcrypto's crypt() function can be interrupted by query cancel (Andreas Karlsson) - Accept flex versions later than 2.5.x + Accept flex versions later than 2.5.x (Tom Lane, Michael Paquier) @@ -1393,19 +1393,19 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Install our missing script where PGXS builds can find it + Install our missing script where PGXS builds can find it (Jim Nasby) This allows sane behavior in a PGXS build done on a machine where build - tools such as bison are missing. + tools such as bison are missing. - Ensure that dynloader.h is included in the installed + Ensure that dynloader.h is included in the installed header files in MSVC builds (Bruce Momjian, Michael Paquier) @@ -1413,11 +1413,11 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 Add variant regression test expected-output file to match behavior of - current libxml2 (Tom Lane) + current libxml2 (Tom Lane) - The fix for libxml2's CVE-2015-7499 causes it not to + The fix for libxml2's CVE-2015-7499 causes it not to output error context reports in some cases where it used to do so. This seems to be a bug, but we'll probably have to live with it for some time, so work around it. @@ -1426,7 +1426,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Update time zone data files to tzdata release 2016a for + Update time zone data files to tzdata release 2016a for DST law changes in Cayman Islands, Metlakatla, and Trans-Baikal Territory (Zabaykalsky Krai), plus historical corrections for Pakistan. @@ -1448,7 +1448,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 This release contains a variety of fixes from 9.1.18. For information about new features in the 9.1 major release, see - . + . @@ -1460,7 +1460,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 However, if you are upgrading from a version earlier than 9.1.16, - see . + see . @@ -1472,8 +1472,8 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Fix contrib/pgcrypto to detect and report - too-short crypt() salts (Josh Kupershmidt) + Fix contrib/pgcrypto to detect and report + too-short crypt() salts (Josh Kupershmidt) @@ -1499,13 +1499,13 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Fix insertion of relations into the relation cache init file + Fix insertion of relations into the relation cache init file (Tom Lane) An oversight in a patch in the most recent minor releases - caused pg_trigger_tgrelid_tgname_index to be omitted + caused pg_trigger_tgrelid_tgname_index to be omitted from the init file. Subsequent sessions detected this, then deemed the init file to be broken and silently ignored it, resulting in a significant degradation in session startup time. In addition to fixing @@ -1523,7 +1523,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Improve LISTEN startup time when there are many unread + Improve LISTEN startup time when there are many unread notifications (Matt Newell) @@ -1535,7 +1535,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - This substantially improves performance when pg_dump + This substantially improves performance when pg_dump tries to dump a large number of tables. @@ -1550,13 +1550,13 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 too many bugs in practice, both in the underlying OpenSSL library and in our usage of it. Renegotiation will be removed entirely in 9.5 and later. In the older branches, just change the default value - of ssl_renegotiation_limit to zero (disabled). + of ssl_renegotiation_limit to zero (disabled). - Lower the minimum values of the *_freeze_max_age parameters + Lower the minimum values of the *_freeze_max_age parameters (Andres Freund) @@ -1568,14 +1568,14 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Limit the maximum value of wal_buffers to 2GB to avoid + Limit the maximum value of wal_buffers to 2GB to avoid server crashes (Josh Berkus) - Fix rare internal overflow in multiplication of numeric values + Fix rare internal overflow in multiplication of numeric values (Dean Rasheed) @@ -1583,21 +1583,21 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 Guard against hard-to-reach stack overflows involving record types, - range types, json, jsonb, tsquery, - ltxtquery and query_int (Noah Misch) + range types, json, jsonb, tsquery, + ltxtquery and query_int (Noah Misch) - Fix handling of DOW and DOY in datetime input + Fix handling of DOW and DOY in datetime input (Greg Stark) These tokens aren't meant to be used in datetime values, but previously they resulted in opaque internal error messages rather - than invalid input syntax. + than invalid input syntax. @@ -1610,7 +1610,7 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 Add recursion depth protections to regular expression, SIMILAR - TO, and LIKE matching (Tom Lane) + TO, and LIKE matching (Tom Lane) @@ -1654,22 +1654,22 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 - Fix unexpected out-of-memory situation during sort errors - when using tuplestores with small work_mem settings (Tom + Fix unexpected out-of-memory situation during sort errors + when using tuplestores with small work_mem settings (Tom Lane) - Fix very-low-probability stack overrun in qsort (Tom Lane) + Fix very-low-probability stack overrun in qsort (Tom Lane) - Fix invalid memory alloc request size failure in hash joins - with large work_mem settings (Tomas Vondra, Tom Lane) + Fix invalid memory alloc request size failure in hash joins + with large work_mem settings (Tomas Vondra, Tom Lane) @@ -1682,9 +1682,9 @@ Branch: REL9_1_STABLE [354b3a3ac] 2016-06-19 14:01:17 -0400 These mistakes could lead to incorrect query plans that would give wrong answers, or to assertion failures in assert-enabled builds, or to odd planner errors such as could not devise a query plan for the - given query, could not find pathkey item to - sort, plan should not reference subplan's variable, - or failed to assign all NestLoopParams to plan nodes. + given query, could not find pathkey item to + sort, plan should not reference subplan's variable, + or failed to assign all NestLoopParams to plan nodes. Thanks are due to Andreas Seltenreich and Piotr Stefaniak for fuzz testing that exposed these problems. @@ -1723,12 +1723,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 During postmaster shutdown, ensure that per-socket lock files are removed and listen sockets are closed before we remove - the postmaster.pid file (Tom Lane) + the postmaster.pid file (Tom Lane) This avoids race-condition failures if an external script attempts to - start a new postmaster as soon as pg_ctl stop returns. + start a new postmaster as soon as pg_ctl stop returns. @@ -1748,7 +1748,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Do not print a WARNING when an autovacuum worker is already + Do not print a WARNING when an autovacuum worker is already gone when we attempt to signal it, and reduce log verbosity for such signals (Tom Lane) @@ -1781,44 +1781,44 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix off-by-one error that led to otherwise-harmless warnings - about apparent wraparound in subtrans/multixact truncation + about apparent wraparound in subtrans/multixact truncation (Thomas Munro) - Fix misreporting of CONTINUE and MOVE statement - types in PL/pgSQL's error context messages + Fix misreporting of CONTINUE and MOVE statement + types in PL/pgSQL's error context messages (Pavel Stehule, Tom Lane) - Fix PL/Perl to handle non-ASCII error + Fix PL/Perl to handle non-ASCII error message texts correctly (Alex Hunsaker) - Fix PL/Python crash when returning the string - representation of a record result (Tom Lane) + Fix PL/Python crash when returning the string + representation of a record result (Tom Lane) - Fix some places in PL/Tcl that neglected to check for - failure of malloc() calls (Michael Paquier, Álvaro + Fix some places in PL/Tcl that neglected to check for + failure of malloc() calls (Michael Paquier, Álvaro Herrera) - In contrib/isn, fix output of ISBN-13 numbers that begin + In contrib/isn, fix output of ISBN-13 numbers that begin with 979 (Fabien Coelho) @@ -1830,7 +1830,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Improve libpq's handling of out-of-memory conditions + Improve libpq's handling of out-of-memory conditions (Michael Paquier, Heikki Linnakangas) @@ -1838,68 +1838,68 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix memory leaks and missing out-of-memory checks - in ecpg (Michael Paquier) + in ecpg (Michael Paquier) - Fix psql's code for locale-aware formatting of numeric + Fix psql's code for locale-aware formatting of numeric output (Tom Lane) - The formatting code invoked by \pset numericlocale on + The formatting code invoked by \pset numericlocale on did the wrong thing for some uncommon cases such as numbers with an exponent but no decimal point. It could also mangle already-localized - output from the money data type. + output from the money data type. - Prevent crash in psql's \c command when + Prevent crash in psql's \c command when there is no current connection (Noah Misch) - Fix selection of default zlib compression level - in pg_dump's directory output format (Andrew Dunstan) + Fix selection of default zlib compression level + in pg_dump's directory output format (Andrew Dunstan) - Ensure that temporary files created during a pg_dump - run with tar-format output are not world-readable (Michael + Ensure that temporary files created during a pg_dump + run with tar-format output are not world-readable (Michael Paquier) - Fix pg_dump and pg_upgrade to support - cases where the postgres or template1 database + Fix pg_dump and pg_upgrade to support + cases where the postgres or template1 database is in a non-default tablespace (Marti Raudsepp, Bruce Momjian) - Fix pg_dump to handle object privileges sanely when + Fix pg_dump to handle object privileges sanely when dumping from a server too old to have a particular privilege type (Tom Lane) When dumping functions or procedural languages from pre-7.3 - servers, pg_dump would - produce GRANT/REVOKE commands that revoked the + servers, pg_dump would + produce GRANT/REVOKE commands that revoked the owner's grantable privileges and instead granted all privileges - to PUBLIC. Since the privileges involved are - just USAGE and EXECUTE, this isn't a security + to PUBLIC. Since the privileges involved are + just USAGE and EXECUTE, this isn't a security problem, but it's certainly a surprising representation of the older systems' behavior. Fix it to leave the default privilege state alone in these cases. @@ -1908,18 +1908,18 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix pg_dump to dump shell types (Tom Lane) + Fix pg_dump to dump shell types (Tom Lane) Shell types (that is, not-yet-fully-defined types) aren't useful for - much, but nonetheless pg_dump should dump them. + much, but nonetheless pg_dump should dump them. - Fix assorted minor memory leaks in pg_dump and other + Fix assorted minor memory leaks in pg_dump and other client-side programs (Michael Paquier) @@ -1927,11 +1927,11 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix spinlock assembly code for PPC hardware to be compatible - with AIX's native assembler (Tom Lane) + with AIX's native assembler (Tom Lane) - Building with gcc didn't work if gcc + Building with gcc didn't work if gcc had been configured to use the native assembler, which is becoming more common. @@ -1939,14 +1939,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - On AIX, test the -qlonglong compiler option + On AIX, test the -qlonglong compiler option rather than just assuming it's safe to use (Noah Misch) - On AIX, use -Wl,-brtllib link option to allow + On AIX, use -Wl,-brtllib link option to allow symbols to be resolved at runtime (Noah Misch) @@ -1958,38 +1958,38 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Avoid use of inline functions when compiling with - 32-bit xlc, due to compiler bugs (Noah Misch) + 32-bit xlc, due to compiler bugs (Noah Misch) - Use librt for sched_yield() when necessary, + Use librt for sched_yield() when necessary, which it is on some Solaris versions (Oskari Saarenmaa) - Fix Windows install.bat script to handle target directory + Fix Windows install.bat script to handle target directory names that contain spaces (Heikki Linnakangas) - Make the numeric form of the PostgreSQL version number - (e.g., 90405) readily available to extension Makefiles, - as a variable named VERSION_NUM (Michael Paquier) + Make the numeric form of the PostgreSQL version number + (e.g., 90405) readily available to extension Makefiles, + as a variable named VERSION_NUM (Michael Paquier) - Update time zone data files to tzdata release 2015g for + Update time zone data files to tzdata release 2015g for DST law changes in Cayman Islands, Fiji, Moldova, Morocco, Norfolk Island, North Korea, Turkey, and Uruguay. There is a new zone name - America/Fort_Nelson for the Canadian Northern Rockies. + America/Fort_Nelson for the Canadian Northern Rockies. @@ -2009,7 +2009,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a small number of fixes from 9.1.17. For information about new features in the 9.1 major release, see - . + . @@ -2021,7 +2021,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 However, if you are upgrading from a version earlier than 9.1.16, - see . + see . @@ -2038,7 +2038,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 With just the wrong timing of concurrent activity, a VACUUM - FULL on a system catalog might fail to update the init file + FULL on a system catalog might fail to update the init file that's used to avoid cache-loading work for new sessions. This would result in later sessions being unable to access that catalog at all. This is a very ancient bug, but it's so hard to trigger that no @@ -2049,13 +2049,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Avoid deadlock between incoming sessions and CREATE/DROP - DATABASE (Tom Lane) + DATABASE (Tom Lane) A new session starting in a database that is the target of - a DROP DATABASE command, or is the template for - a CREATE DATABASE command, could cause the command to wait + a DROP DATABASE command, or is the template for + a CREATE DATABASE command, could cause the command to wait for five seconds and then fail, even if the new session would have exited before that. @@ -2077,7 +2077,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a small number of fixes from 9.1.16. For information about new features in the 9.1 major release, see - . + . @@ -2089,7 +2089,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 However, if you are upgrading from a version earlier than 9.1.16, - see . + see . @@ -2101,12 +2101,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Avoid failures while fsync'ing data directory during + Avoid failures while fsync'ing data directory during crash restart (Abhijit Menon-Sen, Tom Lane) - In the previous minor releases we added a patch to fsync + In the previous minor releases we added a patch to fsync everything in the data directory after a crash. Unfortunately its response to any error condition was to fail, thereby preventing the server from starting up, even when the problem was quite harmless. @@ -2120,29 +2120,29 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Remove configure's check prohibiting linking to a - threaded libpython - on OpenBSD (Tom Lane) + Remove configure's check prohibiting linking to a + threaded libpython + on OpenBSD (Tom Lane) The failure this restriction was meant to prevent seems to not be a - problem anymore on current OpenBSD + problem anymore on current OpenBSD versions. - Allow libpq to use TLS protocol versions beyond v1 + Allow libpq to use TLS protocol versions beyond v1 (Noah Misch) - For a long time, libpq was coded so that the only SSL + For a long time, libpq was coded so that the only SSL protocol it would allow was TLS v1. Now that newer TLS versions are becoming popular, allow it to negotiate the highest commonly-supported - TLS version with the server. (PostgreSQL servers were + TLS version with the server. (PostgreSQL servers were already capable of such negotiation, so no change is needed on the server side.) This is a back-patch of a change already released in 9.4.0. @@ -2165,7 +2165,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.15. For information about new features in the 9.1 major release, see - . + . @@ -2176,14 +2176,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - However, if you use contrib/citext's - regexp_matches() functions, see the changelog entry below + However, if you use contrib/citext's + regexp_matches() functions, see the changelog entry below about that. Also, if you are upgrading from a version earlier than 9.1.14, - see . + see . @@ -2215,7 +2215,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Our replacement implementation of snprintf() failed to + Our replacement implementation of snprintf() failed to check for errors reported by the underlying system library calls; the main case that might be missed is out-of-memory situations. In the worst case this might lead to information exposure, due to our @@ -2225,7 +2225,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - It remains possible that some calls of the *printf() + It remains possible that some calls of the *printf() family of functions are vulnerable to information disclosure if an out-of-memory error occurs at just the wrong time. We judge the risk to not be large, but will continue analysis in this area. @@ -2235,15 +2235,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - In contrib/pgcrypto, uniformly report decryption failures - as Wrong key or corrupt data (Noah Misch) + In contrib/pgcrypto, uniformly report decryption failures + as Wrong key or corrupt data (Noah Misch) Previously, some cases of decryption with an incorrect key could report other error message texts. It has been shown that such variance in error reports can aid attackers in recovering keys from other systems. - While it's unknown whether pgcrypto's specific behaviors + While it's unknown whether pgcrypto's specific behaviors are likewise exploitable, it seems better to avoid the risk by using a one-size-fits-all message. (CVE-2015-3167) @@ -2252,16 +2252,16 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix incorrect declaration of contrib/citext's - regexp_matches() functions (Tom Lane) + Fix incorrect declaration of contrib/citext's + regexp_matches() functions (Tom Lane) - These functions should return setof text[], like the core + These functions should return setof text[], like the core functions they are wrappers for; but they were incorrectly declared as - returning just text[]. This mistake had two results: first, + returning just text[]. This mistake had two results: first, if there was no match you got a scalar null result, whereas what you - should get is an empty set (zero rows). Second, the g flag + should get is an empty set (zero rows). Second, the g flag was effectively ignored, since you would get only one result array even if there were multiple matches. @@ -2269,16 +2269,16 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 While the latter behavior is clearly a bug, there might be applications depending on the former behavior; therefore the function declarations - will not be changed by default until PostgreSQL 9.5. + will not be changed by default until PostgreSQL 9.5. In pre-9.5 branches, the old behavior exists in version 1.0 of - the citext extension, while we have provided corrected - declarations in version 1.1 (which is not installed by + the citext extension, while we have provided corrected + declarations in version 1.1 (which is not installed by default). To adopt the fix in pre-9.5 branches, execute - ALTER EXTENSION citext UPDATE TO '1.1' in each database in - which citext is installed. (You can also update + ALTER EXTENSION citext UPDATE TO '1.1' in each database in + which citext is installed. (You can also update back to 1.0 if you need to undo that.) Be aware that either update direction will require dropping and recreating any views or rules that - use citext's regexp_matches() functions. + use citext's regexp_matches() functions. @@ -2306,7 +2306,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This oversight in the planner has been observed to cause could - not find RelOptInfo for given relids errors, but it seems possible + not find RelOptInfo for given relids errors, but it seems possible that sometimes an incorrect query plan might get past that consistency check and result in silently-wrong query output. @@ -2334,7 +2334,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This oversight has been seen to lead to failed to join all - relations together errors in queries involving LATERAL, + relations together errors in queries involving LATERAL, and that might happen in other cases as well. @@ -2342,7 +2342,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix possible deadlock at startup - when max_prepared_transactions is too small + when max_prepared_transactions is too small (Heikki Linnakangas) @@ -2356,14 +2356,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Avoid cannot GetMultiXactIdMembers() during recovery error + Avoid cannot GetMultiXactIdMembers() during recovery error (Álvaro Herrera) - Recursively fsync() the data directory after a crash + Recursively fsync() the data directory after a crash (Abhijit Menon-Sen, Robert Haas) @@ -2383,13 +2383,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Cope with unexpected signals in LockBufferForCleanup() + Cope with unexpected signals in LockBufferForCleanup() (Andres Freund) This oversight could result in spurious errors about multiple - backends attempting to wait for pincount 1. + backends attempting to wait for pincount 1. @@ -2430,18 +2430,18 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - ANALYZE executes index expressions many times; if there are + ANALYZE executes index expressions many times; if there are slow functions in such an expression, it's desirable to be able to - cancel the ANALYZE before that loop finishes. + cancel the ANALYZE before that loop finishes. - Ensure tableoid of a foreign table is reported - correctly when a READ COMMITTED recheck occurs after - locking rows in SELECT FOR UPDATE, UPDATE, - or DELETE (Etsuro Fujita) + Ensure tableoid of a foreign table is reported + correctly when a READ COMMITTED recheck occurs after + locking rows in SELECT FOR UPDATE, UPDATE, + or DELETE (Etsuro Fujita) @@ -2454,20 +2454,20 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Recommend setting include_realm to 1 when using + Recommend setting include_realm to 1 when using Kerberos/GSSAPI/SSPI authentication (Stephen Frost) Without this, identically-named users from different realms cannot be distinguished. For the moment this is only a documentation change, but - it will become the default setting in PostgreSQL 9.5. + it will become the default setting in PostgreSQL 9.5. - Remove code for matching IPv4 pg_hba.conf entries to + Remove code for matching IPv4 pg_hba.conf entries to IPv4-in-IPv6 addresses (Tom Lane) @@ -2480,20 +2480,20 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 crashes on some systems, so let's just remove it rather than fix it. (Had we chosen to fix it, that would make for a subtle and potentially security-sensitive change in the effective meaning of - IPv4 pg_hba.conf entries, which does not seem like a good + IPv4 pg_hba.conf entries, which does not seem like a good thing to do in minor releases.) - Report WAL flush, not insert, position in IDENTIFY_SYSTEM + Report WAL flush, not insert, position in IDENTIFY_SYSTEM replication command (Heikki Linnakangas) This avoids a possible startup failure - in pg_receivexlog. + in pg_receivexlog. @@ -2501,14 +2501,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 While shutting down service on Windows, periodically send status updates to the Service Control Manager to prevent it from killing the - service too soon; and ensure that pg_ctl will wait for + service too soon; and ensure that pg_ctl will wait for shutdown (Krystian Bigaj) - Reduce risk of network deadlock when using libpq's + Reduce risk of network deadlock when using libpq's non-blocking mode (Heikki Linnakangas) @@ -2517,25 +2517,25 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 buffer every so often, in case the server has sent enough response data to cause it to block on output. (A typical scenario is that the server is sending a stream of NOTICE messages during COPY FROM - STDIN.) This worked properly in the normal blocking mode, but not - so much in non-blocking mode. We've modified libpq + STDIN.) This worked properly in the normal blocking mode, but not + so much in non-blocking mode. We've modified libpq to opportunistically drain input when it can, but a full defense against this problem requires application cooperation: the application should watch for socket read-ready as well as write-ready conditions, - and be sure to call PQconsumeInput() upon read-ready. + and be sure to call PQconsumeInput() upon read-ready. - Fix array handling in ecpg (Michael Meskes) + Fix array handling in ecpg (Michael Meskes) - Fix psql to sanely handle URIs and conninfo strings as - the first parameter to \connect + Fix psql to sanely handle URIs and conninfo strings as + the first parameter to \connect (David Fetter, Andrew Dunstan, Álvaro Herrera) @@ -2548,38 +2548,38 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Suppress incorrect complaints from psql on some - platforms that it failed to write ~/.psql_history at exit + Suppress incorrect complaints from psql on some + platforms that it failed to write ~/.psql_history at exit (Tom Lane) This misbehavior was caused by a workaround for a bug in very old - (pre-2006) versions of libedit. We fixed it by + (pre-2006) versions of libedit. We fixed it by removing the workaround, which will cause a similar failure to appear - for anyone still using such versions of libedit. - Recommendation: upgrade that library, or use libreadline. + for anyone still using such versions of libedit. + Recommendation: upgrade that library, or use libreadline. - Fix pg_dump's rule for deciding which casts are + Fix pg_dump's rule for deciding which casts are system-provided casts that should not be dumped (Tom Lane) - In pg_dump, fix failure to honor -Z - compression level option together with -Fd + In pg_dump, fix failure to honor -Z + compression level option together with -Fd (Michael Paquier) - Make pg_dump consider foreign key relationships + Make pg_dump consider foreign key relationships between extension configuration tables while choosing dump order (Gilles Darold, Michael Paquier, Stephen Frost) @@ -2592,14 +2592,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix dumping of views that are just VALUES(...) but have + Fix dumping of views that are just VALUES(...) but have column aliases (Tom Lane) - In pg_upgrade, force timeline 1 in the new cluster + In pg_upgrade, force timeline 1 in the new cluster (Bruce Momjian) @@ -2611,7 +2611,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - In pg_upgrade, check for improperly non-connectable + In pg_upgrade, check for improperly non-connectable databases before proceeding (Bruce Momjian) @@ -2619,28 +2619,28 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - In pg_upgrade, quote directory paths - properly in the generated delete_old_cluster script + In pg_upgrade, quote directory paths + properly in the generated delete_old_cluster script (Bruce Momjian) - In pg_upgrade, preserve database-level freezing info + In pg_upgrade, preserve database-level freezing info properly (Bruce Momjian) This oversight could cause missing-clog-file errors for tables within - the postgres and template1 databases. + the postgres and template1 databases. - Run pg_upgrade and pg_resetxlog with + Run pg_upgrade and pg_resetxlog with restricted privileges on Windows, so that they don't fail when run by an administrator (Muhammad Asif Naeem) @@ -2648,15 +2648,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Improve handling of readdir() failures when scanning - directories in initdb and pg_basebackup + Improve handling of readdir() failures when scanning + directories in initdb and pg_basebackup (Marco Nenciarini) - Fix slow sorting algorithm in contrib/intarray (Tom Lane) + Fix slow sorting algorithm in contrib/intarray (Tom Lane) @@ -2668,7 +2668,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Update time zone data files to tzdata release 2015d + Update time zone data files to tzdata release 2015d for DST law changes in Egypt, Mongolia, and Palestine, plus historical changes in Canada and Chile. Also adopt revised zone abbreviations for the America/Adak zone (HST/HDT not HAST/HADT). @@ -2691,7 +2691,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.14. For information about new features in the 9.1 major release, see - . + . @@ -2703,7 +2703,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 However, if you are upgrading from a version earlier than 9.1.14, - see . + see . @@ -2715,15 +2715,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix buffer overruns in to_char() + Fix buffer overruns in to_char() (Bruce Momjian) - When to_char() processes a numeric formatting template - calling for a large number of digits, PostgreSQL + When to_char() processes a numeric formatting template + calling for a large number of digits, PostgreSQL would read past the end of a buffer. When processing a crafted - timestamp formatting template, PostgreSQL would write + timestamp formatting template, PostgreSQL would write past the end of a buffer. Either case could crash the server. We have not ruled out the possibility of attacks that lead to privilege escalation, though they seem unlikely. @@ -2733,27 +2733,27 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix buffer overrun in replacement *printf() functions + Fix buffer overrun in replacement *printf() functions (Tom Lane) - PostgreSQL includes a replacement implementation - of printf and related functions. This code will overrun + PostgreSQL includes a replacement implementation + of printf and related functions. This code will overrun a stack buffer when formatting a floating point number (conversion - specifiers e, E, f, F, - g or G) with requested precision greater than + specifiers e, E, f, F, + g or G) with requested precision greater than about 500. This will crash the server, and we have not ruled out the possibility of attacks that lead to privilege escalation. A database user can trigger such a buffer overrun through - the to_char() SQL function. While that is the only - affected core PostgreSQL functionality, extension + the to_char() SQL function. While that is the only + affected core PostgreSQL functionality, extension modules that use printf-family functions may be at risk as well. - This issue primarily affects PostgreSQL on Windows. - PostgreSQL uses the system implementation of these + This issue primarily affects PostgreSQL on Windows. + PostgreSQL uses the system implementation of these functions where adequate, which it is on other modern platforms. (CVE-2015-0242) @@ -2761,12 +2761,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix buffer overruns in contrib/pgcrypto + Fix buffer overruns in contrib/pgcrypto (Marko Tiikkaja, Noah Misch) - Errors in memory size tracking within the pgcrypto + Errors in memory size tracking within the pgcrypto module permitted stack buffer overruns and improper dependence on the contents of uninitialized memory. The buffer overrun cases can crash the server, and we have not ruled out the possibility of @@ -2807,7 +2807,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Some server error messages show the values of columns that violate a constraint, such as a unique constraint. If the user does not have - SELECT privilege on all columns of the table, this could + SELECT privilege on all columns of the table, this could mean exposing values that the user should not be able to see. Adjust the code so that values are displayed only when they came from the SQL command or could be selected by the user. @@ -2833,21 +2833,21 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Avoid possible data corruption if ALTER DATABASE SET - TABLESPACE is used to move a database to a new tablespace and then + TABLESPACE is used to move a database to a new tablespace and then shortly later move it back to its original tablespace (Tom Lane) - Avoid corrupting tables when ANALYZE inside a transaction + Avoid corrupting tables when ANALYZE inside a transaction is rolled back (Andres Freund, Tom Lane, Michael Paquier) If the failing transaction had earlier removed the last index, rule, or trigger from the table, the table would be left in a corrupted state - with the relevant pg_class flags not set though they + with the relevant pg_class flags not set though they should be. @@ -2855,14 +2855,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Ensure that unlogged tables are copied correctly - during CREATE DATABASE or ALTER DATABASE SET - TABLESPACE (Pavan Deolasee, Andres Freund) + during CREATE DATABASE or ALTER DATABASE SET + TABLESPACE (Pavan Deolasee, Andres Freund) - Fix DROP's dependency searching to correctly handle the + Fix DROP's dependency searching to correctly handle the case where a table column is recursively visited before its table (Petr Jelinek, Tom Lane) @@ -2870,7 +2870,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This case is only known to arise when an extension creates both a datatype and a table using that datatype. The faulty code might - refuse a DROP EXTENSION unless CASCADE is + refuse a DROP EXTENSION unless CASCADE is specified, which should not be required. @@ -2882,22 +2882,22 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - In READ COMMITTED mode, queries that lock or update + In READ COMMITTED mode, queries that lock or update recently-updated rows could crash as a result of this bug. - Fix planning of SELECT FOR UPDATE when using a partial + Fix planning of SELECT FOR UPDATE when using a partial index on a child table (Kyotaro Horiguchi) - In READ COMMITTED mode, SELECT FOR UPDATE must - also recheck the partial index's WHERE condition when + In READ COMMITTED mode, SELECT FOR UPDATE must + also recheck the partial index's WHERE condition when rechecking a recently-updated row to see if it still satisfies the - query's WHERE condition. This requirement was missed if the + query's WHERE condition. This requirement was missed if the index belonged to an inheritance child table, so that it was possible to incorrectly return rows that no longer satisfy the query condition. @@ -2905,12 +2905,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix corner case wherein SELECT FOR UPDATE could return a row + Fix corner case wherein SELECT FOR UPDATE could return a row twice, and possibly miss returning other rows (Tom Lane) - In READ COMMITTED mode, a SELECT FOR UPDATE + In READ COMMITTED mode, a SELECT FOR UPDATE that is scanning an inheritance tree could incorrectly return a row from a prior child table instead of the one it should return from a later child table. @@ -2920,7 +2920,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Reject duplicate column names in the referenced-columns list of - a FOREIGN KEY declaration (David Rowley) + a FOREIGN KEY declaration (David Rowley) @@ -2932,7 +2932,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix bugs in raising a numeric value to a large integral power + Fix bugs in raising a numeric value to a large integral power (Tom Lane) @@ -2945,19 +2945,19 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - In numeric_recv(), truncate away any fractional digits - that would be hidden according to the value's dscale field + In numeric_recv(), truncate away any fractional digits + that would be hidden according to the value's dscale field (Tom Lane) - A numeric value's display scale (dscale) should + A numeric value's display scale (dscale) should never be less than the number of nonzero fractional digits; but apparently there's at least one broken client application that - transmits binary numeric values in which that's true. + transmits binary numeric values in which that's true. This leads to strange behavior since the extra digits are taken into account by arithmetic operations even though they aren't printed. - The least risky fix seems to be to truncate away such hidden + The least risky fix seems to be to truncate away such hidden digits on receipt, so that the value is indeed what it prints as. @@ -2977,7 +2977,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix bugs in tsquery @> tsquery + Fix bugs in tsquery @> tsquery operator (Heikki Linnakangas) @@ -3008,14 +3008,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix namespace handling in xpath() (Ali Akbar) + Fix namespace handling in xpath() (Ali Akbar) - Previously, the xml value resulting from - an xpath() call would not have namespace declarations if + Previously, the xml value resulting from + an xpath() call would not have namespace declarations if the namespace declarations were attached to an ancestor element in the - input xml value, rather than to the specific element being + input xml value, rather than to the specific element being returned. Propagate the ancestral declaration so that the result is correct when considered in isolation. @@ -3024,7 +3024,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix planner problems with nested append relations, such as inherited - tables within UNION ALL subqueries (Tom Lane) + tables within UNION ALL subqueries (Tom Lane) @@ -3037,8 +3037,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Exempt tables that have per-table cost_limit - and/or cost_delay settings from autovacuum's global cost + Exempt tables that have per-table cost_limit + and/or cost_delay settings from autovacuum's global cost balancing rules (Álvaro Herrera) @@ -3064,7 +3064,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 the target database, if they met the usual thresholds for autovacuuming. This is at best pretty unexpected; at worst it delays response to the wraparound threat. Fix it so that if autovacuum is - turned off, workers only do anti-wraparound vacuums and + turned off, workers only do anti-wraparound vacuums and not any other work. @@ -3097,19 +3097,19 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix several cases where recovery logic improperly ignored WAL records - for COMMIT/ABORT PREPARED (Heikki Linnakangas) + for COMMIT/ABORT PREPARED (Heikki Linnakangas) The most notable oversight was - that recovery_target_xid could not be used to stop at + that recovery_target_xid could not be used to stop at a two-phase commit. - Avoid creating unnecessary .ready marker files for + Avoid creating unnecessary .ready marker files for timeline history files (Fujii Masao) @@ -3117,14 +3117,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix possible null pointer dereference when an empty prepared statement - is used and the log_statement setting is mod - or ddl (Fujii Masao) + is used and the log_statement setting is mod + or ddl (Fujii Masao) - Change pgstat wait timeout warning message to be LOG level, + Change pgstat wait timeout warning message to be LOG level, and rephrase it to be more understandable (Tom Lane) @@ -3133,7 +3133,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 case, but it occurs often enough on our slower buildfarm members to be a nuisance. Reduce it to LOG level, and expend a bit more effort on the wording: it now reads using stale statistics instead of - current ones because stats collector is not responding. + current ones because stats collector is not responding. @@ -3147,32 +3147,32 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Warn if macOS's setlocale() starts an unwanted extra + Warn if macOS's setlocale() starts an unwanted extra thread inside the postmaster (Noah Misch) - Fix processing of repeated dbname parameters - in PQconnectdbParams() (Alex Shulgin) + Fix processing of repeated dbname parameters + in PQconnectdbParams() (Alex Shulgin) Unexpected behavior ensued if the first occurrence - of dbname contained a connection string or URI to be + of dbname contained a connection string or URI to be expanded. - Ensure that libpq reports a suitable error message on + Ensure that libpq reports a suitable error message on unexpected socket EOF (Marko Tiikkaja, Tom Lane) - Depending on kernel behavior, libpq might return an + Depending on kernel behavior, libpq might return an empty error string rather than something useful when the server unexpectedly closed the socket. @@ -3180,14 +3180,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Clear any old error message during PQreset() + Clear any old error message during PQreset() (Heikki Linnakangas) - If PQreset() is called repeatedly, and the connection + If PQreset() is called repeatedly, and the connection cannot be re-established, error messages from the failed connection - attempts kept accumulating in the PGconn's error + attempts kept accumulating in the PGconn's error string. @@ -3195,32 +3195,32 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Properly handle out-of-memory conditions while parsing connection - options in libpq (Alex Shulgin, Heikki Linnakangas) + options in libpq (Alex Shulgin, Heikki Linnakangas) - Fix array overrun in ecpg's version - of ParseDateTime() (Michael Paquier) + Fix array overrun in ecpg's version + of ParseDateTime() (Michael Paquier) - In initdb, give a clearer error message if a password + In initdb, give a clearer error message if a password file is specified but is empty (Mats Erik Andersson) - Fix psql's \s command to work nicely with + Fix psql's \s command to work nicely with libedit, and add pager support (Stepan Rutz, Tom Lane) - When using libedit rather than readline, \s printed the + When using libedit rather than readline, \s printed the command history in a fairly unreadable encoded format, and on recent libedit versions might fail altogether. Fix that by printing the history ourselves rather than having the library do it. A pleasant @@ -3230,7 +3230,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This patch also fixes a bug that caused newline encoding to be applied inconsistently when saving the command history with libedit. - Multiline history entries written by older psql + Multiline history entries written by older psql versions will be read cleanly with this patch, but perhaps not vice versa, depending on the exact libedit versions involved. @@ -3238,17 +3238,17 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Improve consistency of parsing of psql's special + Improve consistency of parsing of psql's special variables (Tom Lane) - Allow variant spellings of on and off (such - as 1/0) for ECHO_HIDDEN - and ON_ERROR_ROLLBACK. Report a warning for unrecognized - values for COMP_KEYWORD_CASE, ECHO, - ECHO_HIDDEN, HISTCONTROL, - ON_ERROR_ROLLBACK, and VERBOSITY. Recognize + Allow variant spellings of on and off (such + as 1/0) for ECHO_HIDDEN + and ON_ERROR_ROLLBACK. Report a warning for unrecognized + values for COMP_KEYWORD_CASE, ECHO, + ECHO_HIDDEN, HISTCONTROL, + ON_ERROR_ROLLBACK, and VERBOSITY. Recognize all values for all these variables case-insensitively; previously there was a mishmash of case-sensitive and case-insensitive behaviors. @@ -3256,16 +3256,16 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix psql's expanded-mode display to work - consistently when using border = 3 - and linestyle = ascii or unicode + Fix psql's expanded-mode display to work + consistently when using border = 3 + and linestyle = ascii or unicode (Stephen Frost) - Improve performance of pg_dump when the database + Improve performance of pg_dump when the database contains many instances of multiple dependency paths between the same two objects (Tom Lane) @@ -3280,21 +3280,21 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix core dump in pg_dump --binary-upgrade on zero-column + Fix core dump in pg_dump --binary-upgrade on zero-column composite type (Rushabh Lathia) - Prevent WAL files created by pg_basebackup -x/-X from + Prevent WAL files created by pg_basebackup -x/-X from being archived again when the standby is promoted (Andres Freund) - Fix upgrade-from-unpackaged script for contrib/citext + Fix upgrade-from-unpackaged script for contrib/citext (Tom Lane) @@ -3302,7 +3302,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix block number checking - in contrib/pageinspect's get_raw_page() + in contrib/pageinspect's get_raw_page() (Tom Lane) @@ -3314,7 +3314,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix contrib/pgcrypto's pgp_sym_decrypt() + Fix contrib/pgcrypto's pgp_sym_decrypt() to not fail on messages whose length is 6 less than a power of 2 (Marko Tiikkaja) @@ -3322,7 +3322,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix file descriptor leak in contrib/pg_test_fsync + Fix file descriptor leak in contrib/pg_test_fsync (Jeff Janes) @@ -3334,24 +3334,24 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Handle unexpected query results, especially NULLs, safely in - contrib/tablefunc's connectby() + contrib/tablefunc's connectby() (Michael Paquier) - connectby() previously crashed if it encountered a NULL + connectby() previously crashed if it encountered a NULL key value. It now prints that row but doesn't recurse further. - Avoid a possible crash in contrib/xml2's - xslt_process() (Mark Simonetti) + Avoid a possible crash in contrib/xml2's + xslt_process() (Mark Simonetti) - libxslt seems to have an undocumented dependency on + libxslt seems to have an undocumented dependency on the order in which resources are freed; reorder our calls to avoid a crash. @@ -3359,7 +3359,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Mark some contrib I/O functions with correct volatility + Mark some contrib I/O functions with correct volatility properties (Tom Lane) @@ -3393,29 +3393,29 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 With OpenLDAP versions 2.4.24 through 2.4.31, - inclusive, PostgreSQL backends can crash at exit. - Raise a warning during configure based on the + inclusive, PostgreSQL backends can crash at exit. + Raise a warning during configure based on the compile-time OpenLDAP version number, and test the crashing scenario - in the contrib/dblink regression test. + in the contrib/dblink regression test. - In non-MSVC Windows builds, ensure libpq.dll is installed + In non-MSVC Windows builds, ensure libpq.dll is installed with execute permissions (Noah Misch) - Make pg_regress remove any temporary installation it + Make pg_regress remove any temporary installation it created upon successful exit (Tom Lane) This results in a very substantial reduction in disk space usage - during make check-world, since that sequence involves + during make check-world, since that sequence involves creation of numerous temporary installations. @@ -3427,15 +3427,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Previously, PostgreSQL assumed that the UTC offset - associated with a time zone abbreviation (such as EST) + Previously, PostgreSQL assumed that the UTC offset + associated with a time zone abbreviation (such as EST) never changes in the usage of any particular locale. However this assumption fails in the real world, so introduce the ability for a zone abbreviation to represent a UTC offset that sometimes changes. Update the zone abbreviation definition files to make use of this feature in timezone locales that have changed the UTC offset of their abbreviations since 1970 (according to the IANA timezone database). - In such timezones, PostgreSQL will now associate the + In such timezones, PostgreSQL will now associate the correct UTC offset with the abbreviation depending on the given date. @@ -3447,9 +3447,9 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add CST (China Standard Time) to our lists. - Remove references to ADT as Arabia Daylight Time, an + Remove references to ADT as Arabia Daylight Time, an abbreviation that's been out of use since 2007; therefore, claiming - there is a conflict with Atlantic Daylight Time doesn't seem + there is a conflict with Atlantic Daylight Time doesn't seem especially helpful. Fix entirely incorrect GMT offsets for CKT (Cook Islands), FJT, and FJST (Fiji); we didn't even have them on the proper side of the date line. @@ -3458,21 +3458,21 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Update time zone data files to tzdata release 2015a. + Update time zone data files to tzdata release 2015a. The IANA timezone database has adopted abbreviations of the form - AxST/AxDT + AxST/AxDT for all Australian time zones, reflecting what they believe to be current majority practice Down Under. These names do not conflict with usage elsewhere (other than ACST for Acre Summer Time, which has been in disuse since 1994). Accordingly, adopt these names into - our Default timezone abbreviation set. - The Australia abbreviation set now contains only CST, EAST, + our Default timezone abbreviation set. + The Australia abbreviation set now contains only CST, EAST, EST, SAST, SAT, and WST, all of which are thought to be mostly historical usage. Note that SAST has also been changed to be South - Africa Standard Time in the Default abbreviation set. + Africa Standard Time in the Default abbreviation set. @@ -3501,7 +3501,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.13. For information about new features in the 9.1 major release, see - . + . @@ -3519,7 +3519,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Also, if you are upgrading from a version earlier than 9.1.11, - see . + see . @@ -3531,15 +3531,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Correctly initialize padding bytes in contrib/btree_gist - indexes on bit columns (Heikki Linnakangas) + Correctly initialize padding bytes in contrib/btree_gist + indexes on bit columns (Heikki Linnakangas) This error could result in incorrect query results due to values that should compare equal not being seen as equal. - Users with GiST indexes on bit or bit varying - columns should REINDEX those indexes after installing this + Users with GiST indexes on bit or bit varying + columns should REINDEX those indexes after installing this update. @@ -3570,7 +3570,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix feedback status when is + Fix feedback status when is turned off on-the-fly (Simon Riggs) @@ -3578,14 +3578,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix possibly-incorrect cache invalidation during nested calls - to ReceiveSharedInvalidMessages (Andres Freund) + to ReceiveSharedInvalidMessages (Andres Freund) - Fix could not find pathkey item to sort planner failures - with UNION ALL over subqueries reading from tables with + Fix could not find pathkey item to sort planner failures + with UNION ALL over subqueries reading from tables with inheritance children (Tom Lane) @@ -3613,13 +3613,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This corrects cases where TOAST pointers could be copied into other tables without being dereferenced. If the original data is later deleted, it would lead to errors like missing chunk number 0 - for toast value ... when the now-dangling pointer is used. + for toast value ... when the now-dangling pointer is used. - Fix record type has not been registered failures with + Fix record type has not been registered failures with whole-row references to the output of Append plan nodes (Tom Lane) @@ -3634,7 +3634,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix query-lifespan memory leak while evaluating the arguments for a - function in FROM (Tom Lane) + function in FROM (Tom Lane) @@ -3647,14 +3647,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix data encoding error in hungarian.stop (Tom Lane) + Fix data encoding error in hungarian.stop (Tom Lane) Prevent foreign tables from being created with OIDS - when is true + when is true (Etsuro Fujita) @@ -3668,19 +3668,19 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This could cause problems (at least spurious warnings, and at worst an - infinite loop) if CREATE INDEX or CLUSTER were + infinite loop) if CREATE INDEX or CLUSTER were done later in the same transaction. - Clear pg_stat_activity.xact_start - during PREPARE TRANSACTION (Andres Freund) + Clear pg_stat_activity.xact_start + during PREPARE TRANSACTION (Andres Freund) - After the PREPARE, the originating session is no longer in + After the PREPARE, the originating session is no longer in a transaction, so it should not continue to display a transaction start time. @@ -3688,7 +3688,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix REASSIGN OWNED to not fail for text search objects + Fix REASSIGN OWNED to not fail for text search objects (Álvaro Herrera) @@ -3700,14 +3700,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This ensures that the postmaster will properly clean up after itself - if, for example, it receives SIGINT while still + if, for example, it receives SIGINT while still starting up. - Fix client host name lookup when processing pg_hba.conf + Fix client host name lookup when processing pg_hba.conf entries that specify host names instead of IP addresses (Tom Lane) @@ -3722,7 +3722,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Secure Unix-domain sockets of temporary postmasters started during - make check (Noah Misch) + make check (Noah Misch) @@ -3731,16 +3731,16 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 the operating-system user running the test, as we previously noted in CVE-2014-0067. This change defends against that risk by placing the server's socket in a temporary, mode 0700 subdirectory - of /tmp. The hazard remains however on platforms where + of /tmp. The hazard remains however on platforms where Unix sockets are not supported, notably Windows, because then the temporary postmaster must accept local TCP connections. A useful side effect of this change is to simplify - make check testing in builds that - override DEFAULT_PGSOCKET_DIR. Popular non-default values - like /var/run/postgresql are often not writable by the + make check testing in builds that + override DEFAULT_PGSOCKET_DIR. Popular non-default values + like /var/run/postgresql are often not writable by the build user, requiring workarounds that will no longer be necessary. @@ -3760,7 +3760,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 On Windows, allow new sessions to absorb values of PGC_BACKEND - parameters (such as ) from the + parameters (such as ) from the configuration file (Amit Kapila) @@ -3776,15 +3776,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - This oversight could cause initdb - and pg_upgrade to fail on Windows, if the installation - path contained both spaces and @ signs. + This oversight could cause initdb + and pg_upgrade to fail on Windows, if the installation + path contained both spaces and @ signs. - Fix linking of libpython on macOS (Tom Lane) + Fix linking of libpython on macOS (Tom Lane) @@ -3795,17 +3795,17 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Avoid buffer bloat in libpq when the server + Avoid buffer bloat in libpq when the server consistently sends data faster than the client can absorb it (Shin-ichi Morita, Tom Lane) - libpq could be coerced into enlarging its input buffer + libpq could be coerced into enlarging its input buffer until it runs out of memory (which would be reported misleadingly - as lost synchronization with server). Under ordinary + as lost synchronization with server). Under ordinary circumstances it's quite far-fetched that data could be continuously - transmitted more quickly than the recv() loop can + transmitted more quickly than the recv() loop can absorb it, but this has been observed when the client is artificially slowed by scheduler constraints. @@ -3813,15 +3813,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Ensure that LDAP lookup attempts in libpq time out as + Ensure that LDAP lookup attempts in libpq time out as intended (Laurenz Albe) - Fix ecpg to do the right thing when an array - of char * is the target for a FETCH statement returning more + Fix ecpg to do the right thing when an array + of char * is the target for a FETCH statement returning more than one row, as well as some other array-handling fixes (Ashutosh Bapat) @@ -3829,20 +3829,20 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix pg_restore's processing of old-style large object + Fix pg_restore's processing of old-style large object comments (Tom Lane) A direct-to-database restore from an archive file generated by a - pre-9.0 version of pg_dump would usually fail if the + pre-9.0 version of pg_dump would usually fail if the archive contained more than a few comments for large objects. - In contrib/pgcrypto functions, ensure sensitive + In contrib/pgcrypto functions, ensure sensitive information is cleared from stack variables before returning (Marko Kreen) @@ -3850,20 +3850,20 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - In contrib/uuid-ossp, cache the state of the OSSP UUID + In contrib/uuid-ossp, cache the state of the OSSP UUID library across calls (Tom Lane) This improves the efficiency of UUID generation and reduces the amount - of entropy drawn from /dev/urandom, on platforms that + of entropy drawn from /dev/urandom, on platforms that have that. - Update time zone data files to tzdata release 2014e + Update time zone data files to tzdata release 2014e for DST law changes in Crimea, Egypt, and Morocco. @@ -3884,7 +3884,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.12. For information about new features in the 9.1 major release, see - . + . @@ -3896,7 +3896,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 However, if you are upgrading from a version earlier than 9.1.11, - see . + see . @@ -3923,7 +3923,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Avoid race condition in checking transaction commit status during - receipt of a NOTIFY message (Marko Tiikkaja) + receipt of a NOTIFY message (Marko Tiikkaja) @@ -3947,7 +3947,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Remove incorrect code that tried to allow OVERLAPS with + Remove incorrect code that tried to allow OVERLAPS with single-element row arguments (Joshua Yanovski) @@ -3960,17 +3960,17 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Avoid getting more than AccessShareLock when de-parsing a + Avoid getting more than AccessShareLock when de-parsing a rule or view (Dean Rasheed) - This oversight resulted in pg_dump unexpectedly - acquiring RowExclusiveLock locks on tables mentioned as - the targets of INSERT/UPDATE/DELETE + This oversight resulted in pg_dump unexpectedly + acquiring RowExclusiveLock locks on tables mentioned as + the targets of INSERT/UPDATE/DELETE commands in rules. While usually harmless, that could interfere with concurrent transactions that tried to acquire, for example, - ShareLock on those tables. + ShareLock on those tables. @@ -3989,8 +3989,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix walsender's failure to shut down cleanly when client - is pg_receivexlog (Fujii Masao) + Fix walsender's failure to shut down cleanly when client + is pg_receivexlog (Fujii Masao) @@ -4003,13 +4003,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Prevent interrupts while reporting non-ERROR messages + Prevent interrupts while reporting non-ERROR messages (Tom Lane) This guards against rare server-process freezeups due to recursive - entry to syslog(), and perhaps other related problems. + entry to syslog(), and perhaps other related problems. @@ -4022,14 +4022,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Prevent intermittent could not reserve shared memory region + Prevent intermittent could not reserve shared memory region failures on recent Windows versions (MauMau) - Update time zone data files to tzdata release 2014a + Update time zone data files to tzdata release 2014a for DST law changes in Fiji and Turkey, plus historical changes in Israel and Ukraine. @@ -4051,7 +4051,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.11. For information about new features in the 9.1 major release, see - . + . @@ -4063,7 +4063,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 However, if you are upgrading from a version earlier than 9.1.11, - see . + see . @@ -4075,19 +4075,19 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Shore up GRANT ... WITH ADMIN OPTION restrictions + Shore up GRANT ... WITH ADMIN OPTION restrictions (Noah Misch) - Granting a role without ADMIN OPTION is supposed to + Granting a role without ADMIN OPTION is supposed to prevent the grantee from adding or removing members from the granted role, but this restriction was easily bypassed by doing SET - ROLE first. The security impact is mostly that a role member can + ROLE first. The security impact is mostly that a role member can revoke the access of others, contrary to the wishes of his grantor. Unapproved role member additions are a lesser concern, since an uncooperative role member could provide most of his rights to others - anyway by creating views or SECURITY DEFINER functions. + anyway by creating views or SECURITY DEFINER functions. (CVE-2014-0060) @@ -4100,7 +4100,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 The primary role of PL validator functions is to be called implicitly - during CREATE FUNCTION, but they are also normal SQL + during CREATE FUNCTION, but they are also normal SQL functions that a user can call explicitly. Calling a validator on a function actually written in some other language was not checked for and could be exploited for privilege-escalation purposes. @@ -4120,7 +4120,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 If the name lookups come to different conclusions due to concurrent activity, we might perform some parts of the DDL on a different table - than other parts. At least in the case of CREATE INDEX, + than other parts. At least in the case of CREATE INDEX, this can be used to cause the permissions checks to be performed against a different table than the index creation, allowing for a privilege escalation attack. @@ -4134,12 +4134,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - The MAXDATELEN constant was too small for the longest - possible value of type interval, allowing a buffer overrun - in interval_out(). Although the datetime input + The MAXDATELEN constant was too small for the longest + possible value of type interval, allowing a buffer overrun + in interval_out(). Although the datetime input functions were more careful about avoiding buffer overrun, the limit was short enough to cause them to reject some valid inputs, such as - input containing a very long timezone name. The ecpg + input containing a very long timezone name. The ecpg library contained these vulnerabilities along with some of its own. (CVE-2014-0063) @@ -4166,7 +4166,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Use strlcpy() and related functions to provide a clear + Use strlcpy() and related functions to provide a clear guarantee that fixed-size buffers are not overrun. Unlike the preceding items, it is unclear whether these cases really represent live issues, since in most cases there appear to be previous @@ -4178,35 +4178,35 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Avoid crashing if crypt() returns NULL (Honza Horak, + Avoid crashing if crypt() returns NULL (Honza Horak, Bruce Momjian) - There are relatively few scenarios in which crypt() - could return NULL, but contrib/chkpass would crash + There are relatively few scenarios in which crypt() + could return NULL, but contrib/chkpass would crash if it did. One practical case in which this could be an issue is - if libc is configured to refuse to execute unapproved - hashing algorithms (e.g., FIPS mode). + if libc is configured to refuse to execute unapproved + hashing algorithms (e.g., FIPS mode). (CVE-2014-0066) - Document risks of make check in the regression testing + Document risks of make check in the regression testing instructions (Noah Misch, Tom Lane) - Since the temporary server started by make check - uses trust authentication, another user on the same machine + Since the temporary server started by make check + uses trust authentication, another user on the same machine could connect to it as database superuser, and then potentially exploit the privileges of the operating-system user who started the tests. A future release will probably incorporate changes in the testing procedure to prevent this risk, but some public discussion is needed first. So for the moment, just warn people against using - make check when there are untrusted users on the + make check when there are untrusted users on the same machine. (CVE-2014-0067) @@ -4221,7 +4221,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 The WAL update could be applied to the wrong page, potentially many pages past where it should have been. Aside from corrupting data, - this error has been observed to result in significant bloat + this error has been observed to result in significant bloat of standby servers compared to their masters, due to updates being applied far beyond where the end-of-file should have been. This failure mode does not appear to be a significant risk during crash @@ -4241,20 +4241,20 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 was already consistent at the start of replay, thus possibly allowing hot-standby queries before the database was really consistent. Other symptoms such as PANIC: WAL contains references to invalid - pages were also possible. + pages were also possible. Fix improper locking of btree index pages while replaying - a VACUUM operation in hot-standby mode (Andres Freund, + a VACUUM operation in hot-standby mode (Andres Freund, Heikki Linnakangas, Tom Lane) This error could result in PANIC: WAL contains references to - invalid pages failures. + invalid pages failures. @@ -4272,8 +4272,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - When pause_at_recovery_target - and recovery_target_inclusive are both set, ensure the + When pause_at_recovery_target + and recovery_target_inclusive are both set, ensure the target record is applied before pausing, not after (Heikki Linnakangas) @@ -4286,7 +4286,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Ensure that signal handlers don't attempt to use the - process's MyProc pointer after it's no longer valid. + process's MyProc pointer after it's no longer valid. @@ -4299,19 +4299,19 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix unsafe references to errno within error reporting + Fix unsafe references to errno within error reporting logic (Christian Kruse) This would typically lead to odd behaviors such as missing or - inappropriate HINT fields. + inappropriate HINT fields. - Fix possible crashes from using ereport() too early + Fix possible crashes from using ereport() too early during server startup (Tom Lane) @@ -4335,7 +4335,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix length checking for Unicode identifiers (U&"..." + Fix length checking for Unicode identifiers (U&"..." syntax) containing escapes (Tom Lane) @@ -4355,7 +4355,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 A previous patch allowed such keywords to be used without quoting in places such as role identifiers; but it missed cases where a - list of role identifiers was permitted, such as DROP ROLE. + list of role identifiers was permitted, such as DROP ROLE. @@ -4369,19 +4369,19 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix possible crash due to invalid plan for nested sub-selects, such - as WHERE (... x IN (SELECT ...) ...) IN (SELECT ...) + as WHERE (... x IN (SELECT ...) ...) IN (SELECT ...) (Tom Lane) - Ensure that ANALYZE creates statistics for a table column - even when all the values in it are too wide (Tom Lane) + Ensure that ANALYZE creates statistics for a table column + even when all the values in it are too wide (Tom Lane) - ANALYZE intentionally omits very wide values from its + ANALYZE intentionally omits very wide values from its histogram and most-common-values calculations, but it neglected to do something sane in the case that all the sampled entries are too wide. @@ -4389,21 +4389,21 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - In ALTER TABLE ... SET TABLESPACE, allow the database's + In ALTER TABLE ... SET TABLESPACE, allow the database's default tablespace to be used without a permissions check (Stephen Frost) - CREATE TABLE has always allowed such usage, - but ALTER TABLE didn't get the memo. + CREATE TABLE has always allowed such usage, + but ALTER TABLE didn't get the memo. - Fix cannot accept a set error when some arms of - a CASE return a set and others don't (Tom Lane) + Fix cannot accept a set error when some arms of + a CASE return a set and others don't (Tom Lane) @@ -4428,12 +4428,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix possible misbehavior in plainto_tsquery() + Fix possible misbehavior in plainto_tsquery() (Heikki Linnakangas) - Use memmove() not memcpy() for copying + Use memmove() not memcpy() for copying overlapping memory regions. There have been no field reports of this actually causing trouble, but it's certainly risky. @@ -4441,8 +4441,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix placement of permissions checks in pg_start_backup() - and pg_stop_backup() (Andres Freund, Magnus Hagander) + Fix placement of permissions checks in pg_start_backup() + and pg_stop_backup() (Andres Freund, Magnus Hagander) @@ -4453,31 +4453,31 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Accept SHIFT_JIS as an encoding name for locale checking + Accept SHIFT_JIS as an encoding name for locale checking purposes (Tatsuo Ishii) - Fix misbehavior of PQhost() on Windows (Fujii Masao) + Fix misbehavior of PQhost() on Windows (Fujii Masao) - It should return localhost if no host has been specified. + It should return localhost if no host has been specified. - Improve error handling in libpq and psql - for failures during COPY TO STDOUT/FROM STDIN (Tom Lane) + Improve error handling in libpq and psql + for failures during COPY TO STDOUT/FROM STDIN (Tom Lane) In particular this fixes an infinite loop that could occur in 9.2 and up if the server connection was lost during COPY FROM - STDIN. Variants of that scenario might be possible in older + STDIN. Variants of that scenario might be possible in older versions, or with other client applications. @@ -4485,7 +4485,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix possible incorrect printing of filenames - in pg_basebackup's verbose mode (Magnus Hagander) + in pg_basebackup's verbose mode (Magnus Hagander) @@ -4498,20 +4498,20 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix misaligned descriptors in ecpg (MauMau) + Fix misaligned descriptors in ecpg (MauMau) - In ecpg, handle lack of a hostname in the connection + In ecpg, handle lack of a hostname in the connection parameters properly (Michael Meskes) - Fix performance regression in contrib/dblink connection + Fix performance regression in contrib/dblink connection startup (Joe Conway) @@ -4522,7 +4522,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - In contrib/isn, fix incorrect calculation of the check + In contrib/isn, fix incorrect calculation of the check digit for ISMN values (Fabien Coelho) @@ -4536,28 +4536,28 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - In Mingw and Cygwin builds, install the libpq DLL - in the bin directory (Andrew Dunstan) + In Mingw and Cygwin builds, install the libpq DLL + in the bin directory (Andrew Dunstan) This duplicates what the MSVC build has long done. It should fix - problems with programs like psql failing to start + problems with programs like psql failing to start because they can't find the DLL. - Avoid using the deprecated dllwrap tool in Cygwin builds + Avoid using the deprecated dllwrap tool in Cygwin builds (Marco Atzeri) - Don't generate plain-text HISTORY - and src/test/regress/README files anymore (Tom Lane) + Don't generate plain-text HISTORY + and src/test/regress/README files anymore (Tom Lane) @@ -4566,20 +4566,20 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 the likely audience for plain-text format. Distribution tarballs will still contain files by these names, but they'll just be stubs directing the reader to consult the main documentation. - The plain-text INSTALL file will still be maintained, as + The plain-text INSTALL file will still be maintained, as there is arguably a use-case for that. - Update time zone data files to tzdata release 2013i + Update time zone data files to tzdata release 2013i for DST law changes in Jordan and historical changes in Cuba. - In addition, the zones Asia/Riyadh87, - Asia/Riyadh88, and Asia/Riyadh89 have been + In addition, the zones Asia/Riyadh87, + Asia/Riyadh88, and Asia/Riyadh89 have been removed, as they are no longer maintained by IANA, and never represented actual civil timekeeping practice. @@ -4601,7 +4601,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.10. For information about new features in the 9.1 major release, see - . + . @@ -4619,7 +4619,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Also, if you are upgrading from a version earlier than 9.1.9, - see . + see . @@ -4631,13 +4631,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix VACUUM's tests to see whether it can - update relfrozenxid (Andres Freund) + Fix VACUUM's tests to see whether it can + update relfrozenxid (Andres Freund) - In some cases VACUUM (either manual or autovacuum) could - incorrectly advance a table's relfrozenxid value, + In some cases VACUUM (either manual or autovacuum) could + incorrectly advance a table's relfrozenxid value, allowing tuples to escape freezing, causing those rows to become invisible once 2^31 transactions have elapsed. The probability of data loss is fairly low since multiple incorrect advancements would @@ -4649,18 +4649,18 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 The issue can be ameliorated by, after upgrading, vacuuming all tables in all databases while having vacuum_freeze_table_age + linkend="guc-vacuum-freeze-table-age">vacuum_freeze_table_age set to zero. This will fix any latent corruption but will not be able to fix all pre-existing data errors. However, an installation can be presumed safe after performing this vacuuming if it has executed fewer than 2^31 update transactions in its lifetime (check this with - SELECT txid_current() < 2^31). + SELECT txid_current() < 2^31). - Fix initialization of pg_clog and pg_subtrans + Fix initialization of pg_clog and pg_subtrans during hot standby startup (Andres Freund, Heikki Linnakangas) @@ -4686,7 +4686,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Truncate pg_multixact contents during WAL replay + Truncate pg_multixact contents during WAL replay (Andres Freund) @@ -4708,8 +4708,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Avoid flattening a subquery whose SELECT list contains a - volatile function wrapped inside a sub-SELECT (Tom Lane) + Avoid flattening a subquery whose SELECT list contains a + volatile function wrapped inside a sub-SELECT (Tom Lane) @@ -4726,7 +4726,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This error could lead to incorrect plans for queries involving - multiple levels of subqueries within JOIN syntax. + multiple levels of subqueries within JOIN syntax. @@ -4756,13 +4756,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix array slicing of int2vector and oidvector values + Fix array slicing of int2vector and oidvector values (Tom Lane) Expressions of this kind are now implicitly promoted to - regular int2 or oid arrays. + regular int2 or oid arrays. @@ -4776,7 +4776,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 In some cases, the system would use the simple GMT offset value when it should have used the regular timezone setting that had prevailed before the simple offset was selected. This change also causes - the timeofday function to honor the simple GMT offset + the timeofday function to honor the simple GMT offset zone. @@ -4790,7 +4790,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Properly quote generated command lines in pg_ctl + Properly quote generated command lines in pg_ctl (Naoya Anzai and Tom Lane) @@ -4801,10 +4801,10 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix pg_dumpall to work when a source database + Fix pg_dumpall to work when a source database sets default_transaction_read_only - via ALTER DATABASE SET (Kevin Grittner) + linkend="guc-default-transaction-read-only">default_transaction_read_only + via ALTER DATABASE SET (Kevin Grittner) @@ -4814,28 +4814,28 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Make ecpg search for quoted cursor names + Make ecpg search for quoted cursor names case-sensitively (Zoltán Böszörményi) - Fix ecpg's processing of lists of variables - declared varchar (Zoltán Böszörményi) + Fix ecpg's processing of lists of variables + declared varchar (Zoltán Böszörményi) - Make contrib/lo defend against incorrect trigger definitions + Make contrib/lo defend against incorrect trigger definitions (Marc Cousin) - Update time zone data files to tzdata release 2013h + Update time zone data files to tzdata release 2013h for DST law changes in Argentina, Brazil, Jordan, Libya, Liechtenstein, Morocco, and Palestine. Also, new timezone abbreviations WIB, WIT, WITA for Indonesia. @@ -4858,7 +4858,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.9. For information about new features in the 9.1 major release, see - . + . @@ -4870,7 +4870,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 However, if you are upgrading from a version earlier than 9.1.9, - see . + see . @@ -4887,7 +4887,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - PostgreSQL case-folds non-ASCII characters only + PostgreSQL case-folds non-ASCII characters only when using a single-byte server encoding. @@ -4895,7 +4895,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix checkpoint memory leak in background writer when wal_level = - hot_standby (Naoya Anzai) + hot_standby (Naoya Anzai) @@ -4908,7 +4908,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix memory overcommit bug when work_mem is using more + Fix memory overcommit bug when work_mem is using more than 24GB of memory (Stephen Frost) @@ -4939,46 +4939,46 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Previously tests like col IS NOT TRUE and col IS - NOT FALSE did not properly factor in NULL values when estimating + Previously tests like col IS NOT TRUE and col IS + NOT FALSE did not properly factor in NULL values when estimating plan costs. - Prevent pushing down WHERE clauses into unsafe - UNION/INTERSECT subqueries (Tom Lane) + Prevent pushing down WHERE clauses into unsafe + UNION/INTERSECT subqueries (Tom Lane) - Subqueries of a UNION or INTERSECT that + Subqueries of a UNION or INTERSECT that contain set-returning functions or volatile functions in their - SELECT lists could be improperly optimized, leading to + SELECT lists could be improperly optimized, leading to run-time errors or incorrect query results. - Fix rare case of failed to locate grouping columns + Fix rare case of failed to locate grouping columns planner failure (Tom Lane) - Fix pg_dump of foreign tables with dropped columns (Andrew Dunstan) + Fix pg_dump of foreign tables with dropped columns (Andrew Dunstan) - Previously such cases could cause a pg_upgrade error. + Previously such cases could cause a pg_upgrade error. - Reorder pg_dump processing of extension-related + Reorder pg_dump processing of extension-related rules and event triggers (Joe Conway) @@ -4986,7 +4986,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Force dumping of extension tables if specified by pg_dump - -t or -n (Joe Conway) + -t or -n (Joe Conway) @@ -4999,19 +4999,19 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix pg_restore -l with the directory archive to display + Fix pg_restore -l with the directory archive to display the correct format name (Fujii Masao) - Properly record index comments created using UNIQUE - and PRIMARY KEY syntax (Andres Freund) + Properly record index comments created using UNIQUE + and PRIMARY KEY syntax (Andres Freund) - This fixes a parallel pg_restore failure. + This fixes a parallel pg_restore failure. @@ -5041,26 +5041,26 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix REINDEX TABLE and REINDEX DATABASE + Fix REINDEX TABLE and REINDEX DATABASE to properly revalidate constraints and mark invalidated indexes as valid (Noah Misch) - REINDEX INDEX has always worked properly. + REINDEX INDEX has always worked properly. Fix possible deadlock during concurrent CREATE INDEX - CONCURRENTLY operations (Tom Lane) + CONCURRENTLY operations (Tom Lane) - Fix regexp_matches() handling of zero-length matches + Fix regexp_matches() handling of zero-length matches (Jeevan Chalke) @@ -5084,14 +5084,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Prevent CREATE FUNCTION from checking SET + Prevent CREATE FUNCTION from checking SET variables unless function body checking is enabled (Tom Lane) - Allow ALTER DEFAULT PRIVILEGES to operate on schemas + Allow ALTER DEFAULT PRIVILEGES to operate on schemas without requiring CREATE permission (Tom Lane) @@ -5103,24 +5103,24 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Specifically, lessen keyword restrictions for role names, language - names, EXPLAIN and COPY options, and - SET values. This allows COPY ... (FORMAT - BINARY) to work as expected; previously BINARY needed + names, EXPLAIN and COPY options, and + SET values. This allows COPY ... (FORMAT + BINARY) to work as expected; previously BINARY needed to be quoted. - Fix pgp_pub_decrypt() so it works for secret keys with + Fix pgp_pub_decrypt() so it works for secret keys with passwords (Marko Kreen) - Make pg_upgrade use pg_dump - --quote-all-identifiers to avoid problems with keyword changes + Make pg_upgrade use pg_dump + --quote-all-identifiers to avoid problems with keyword changes between releases (Tom Lane) @@ -5134,7 +5134,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Ensure that VACUUM ANALYZE still runs the ANALYZE phase + Ensure that VACUUM ANALYZE still runs the ANALYZE phase if its attempt to truncate the file is cancelled due to lock conflicts (Kevin Grittner) @@ -5143,21 +5143,21 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Avoid possible failure when performing transaction control commands (e.g - ROLLBACK) in prepared queries (Tom Lane) + ROLLBACK) in prepared queries (Tom Lane) Ensure that floating-point data input accepts standard spellings - of infinity on all platforms (Tom Lane) + of infinity on all platforms (Tom Lane) - The C99 standard says that allowable spellings are inf, - +inf, -inf, infinity, - +infinity, and -infinity. Make sure we - recognize these even if the platform's strtod function + The C99 standard says that allowable spellings are inf, + +inf, -inf, infinity, + +infinity, and -infinity. Make sure we + recognize these even if the platform's strtod function doesn't. @@ -5171,7 +5171,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Update time zone data files to tzdata release 2013d + Update time zone data files to tzdata release 2013d for DST law changes in Israel, Morocco, Palestine, and Paraguay. Also, historical zone data corrections for Macquarie Island. @@ -5193,7 +5193,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.8. For information about new features in the 9.1 major release, see - . + . @@ -5206,13 +5206,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 However, this release corrects several errors in management of GiST indexes. After installing this update, it is advisable to - REINDEX any GiST indexes that meet one or more of the + REINDEX any GiST indexes that meet one or more of the conditions described below. Also, if you are upgrading from a version earlier than 9.1.6, - see . + see . @@ -5230,7 +5230,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 A connection request containing a database name that begins with - - could be crafted to damage or destroy + - could be crafted to damage or destroy files within the server's data directory, even if the request is eventually rejected. (CVE-2013-1899) @@ -5244,9 +5244,9 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This avoids a scenario wherein random numbers generated by - contrib/pgcrypto functions might be relatively easy for + contrib/pgcrypto functions might be relatively easy for another database user to guess. The risk is only significant when - the postmaster is configured with ssl = on + the postmaster is configured with ssl = on but most connections don't use SSL encryption. (CVE-2013-1900) @@ -5259,7 +5259,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 An unprivileged database user could exploit this mistake to call - pg_start_backup() or pg_stop_backup(), + pg_start_backup() or pg_stop_backup(), thus possibly interfering with creation of routine backups. (CVE-2013-1901) @@ -5267,32 +5267,32 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix GiST indexes to not use fuzzy geometric comparisons when + Fix GiST indexes to not use fuzzy geometric comparisons when it's not appropriate to do so (Alexander Korotkov) - The core geometric types perform comparisons using fuzzy - equality, but gist_box_same must do exact comparisons, + The core geometric types perform comparisons using fuzzy + equality, but gist_box_same must do exact comparisons, else GiST indexes using it might become inconsistent. After installing - this update, users should REINDEX any GiST indexes on - box, polygon, circle, or point - columns, since all of these use gist_box_same. + this update, users should REINDEX any GiST indexes on + box, polygon, circle, or point + columns, since all of these use gist_box_same. Fix erroneous range-union and penalty logic in GiST indexes that use - contrib/btree_gist for variable-width data types, that is - text, bytea, bit, and numeric + contrib/btree_gist for variable-width data types, that is + text, bytea, bit, and numeric columns (Tom Lane) These errors could result in inconsistent indexes in which some keys that are present would not be found by searches, and also in useless - index bloat. Users are advised to REINDEX such indexes + index bloat. Users are advised to REINDEX such indexes after installing this update. @@ -5307,21 +5307,21 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 These errors could result in inconsistent indexes in which some keys that are present would not be found by searches, and also in indexes that are unnecessarily inefficient to search. Users are advised to - REINDEX multi-column GiST indexes after installing this + REINDEX multi-column GiST indexes after installing this update. - Fix gist_point_consistent + Fix gist_point_consistent to handle fuzziness consistently (Alexander Korotkov) - Index scans on GiST indexes on point columns would sometimes + Index scans on GiST indexes on point columns would sometimes yield results different from a sequential scan, because - gist_point_consistent disagreed with the underlying + gist_point_consistent disagreed with the underlying operator code about whether to do comparisons exactly or fuzzily. @@ -5332,21 +5332,21 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - This bug could result in incorrect local pin count errors + This bug could result in incorrect local pin count errors during replay, making recovery impossible. - Fix race condition in DELETE RETURNING (Tom Lane) + Fix race condition in DELETE RETURNING (Tom Lane) - Under the right circumstances, DELETE RETURNING could + Under the right circumstances, DELETE RETURNING could attempt to fetch data from a shared buffer that the current process no longer has any pin on. If some other process changed the buffer - meanwhile, this would lead to garbage RETURNING output, or + meanwhile, this would lead to garbage RETURNING output, or even a crash. @@ -5367,28 +5367,28 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix to_char() to use ASCII-only case-folding rules where + Fix to_char() to use ASCII-only case-folding rules where appropriate (Tom Lane) This fixes misbehavior of some template patterns that should be - locale-independent, but mishandled I and - i in Turkish locales. + locale-independent, but mishandled I and + i in Turkish locales. - Fix unwanted rejection of timestamp 1999-12-31 24:00:00 + Fix unwanted rejection of timestamp 1999-12-31 24:00:00 (Tom Lane) - Fix logic error when a single transaction does UNLISTEN - then LISTEN (Tom Lane) + Fix logic error when a single transaction does UNLISTEN + then LISTEN (Tom Lane) @@ -5406,7 +5406,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Remove useless picksplit doesn't support secondary split log + Remove useless picksplit doesn't support secondary split log messages (Josh Hansen, Tom Lane) @@ -5427,29 +5427,29 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Eliminate memory leaks in PL/Perl's spi_prepare() function + Eliminate memory leaks in PL/Perl's spi_prepare() function (Alex Hunsaker, Tom Lane) - Fix pg_dumpall to handle database names containing - = correctly (Heikki Linnakangas) + Fix pg_dumpall to handle database names containing + = correctly (Heikki Linnakangas) - Avoid crash in pg_dump when an incorrect connection + Avoid crash in pg_dump when an incorrect connection string is given (Heikki Linnakangas) - Ignore invalid indexes in pg_dump and - pg_upgrade (Michael Paquier, Bruce Momjian) + Ignore invalid indexes in pg_dump and + pg_upgrade (Michael Paquier, Bruce Momjian) @@ -5458,15 +5458,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 a uniqueness condition not satisfied by the table's data. Also, if the index creation is in fact still in progress, it seems reasonable to consider it to be an uncommitted DDL change, which - pg_dump wouldn't be expected to dump anyway. - pg_upgrade now also skips invalid indexes rather than + pg_dump wouldn't be expected to dump anyway. + pg_upgrade now also skips invalid indexes rather than failing. - In pg_basebackup, include only the current server + In pg_basebackup, include only the current server version's subdirectory when backing up a tablespace (Heikki Linnakangas) @@ -5474,26 +5474,26 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Add a server version check in pg_basebackup and - pg_receivexlog, so they fail cleanly with version + Add a server version check in pg_basebackup and + pg_receivexlog, so they fail cleanly with version combinations that won't work (Heikki Linnakangas) - Fix contrib/pg_trgm's similarity() function + Fix contrib/pg_trgm's similarity() function to return zero for trigram-less strings (Tom Lane) - Previously it returned NaN due to internal division by zero. + Previously it returned NaN due to internal division by zero. - Update time zone data files to tzdata release 2013b + Update time zone data files to tzdata release 2013b for DST law changes in Chile, Haiti, Morocco, Paraguay, and some Russian areas. Also, historical zone data corrections for numerous places. @@ -5501,12 +5501,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Also, update the time zone abbreviation files for recent changes in - Russia and elsewhere: CHOT, GET, - IRKT, KGT, KRAT, MAGT, - MAWT, MSK, NOVT, OMST, - TKT, VLAT, WST, YAKT, - YEKT now follow their current meanings, and - VOLT (Europe/Volgograd) and MIST + Russia and elsewhere: CHOT, GET, + IRKT, KGT, KRAT, MAGT, + MAWT, MSK, NOVT, OMST, + TKT, VLAT, WST, YAKT, + YEKT now follow their current meanings, and + VOLT (Europe/Volgograd) and MIST (Antarctica/Macquarie) are added to the default abbreviations list. @@ -5527,7 +5527,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.7. For information about new features in the 9.1 major release, see - . + . @@ -5539,7 +5539,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 However, if you are upgrading from a version earlier than 9.1.6, - see . + see . @@ -5551,7 +5551,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Prevent execution of enum_recv from SQL (Tom Lane) + Prevent execution of enum_recv from SQL (Tom Lane) @@ -5635,19 +5635,19 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Protect against race conditions when scanning - pg_tablespace (Stephen Frost, Tom Lane) + pg_tablespace (Stephen Frost, Tom Lane) - CREATE DATABASE and DROP DATABASE could + CREATE DATABASE and DROP DATABASE could misbehave if there were concurrent updates of - pg_tablespace entries. + pg_tablespace entries. - Prevent DROP OWNED from trying to drop whole databases or + Prevent DROP OWNED from trying to drop whole databases or tablespaces (Álvaro Herrera) @@ -5659,13 +5659,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix error in vacuum_freeze_table_age + linkend="guc-vacuum-freeze-table-age">vacuum_freeze_table_age implementation (Andres Freund) In installations that have existed for more than vacuum_freeze_min_age + linkend="guc-vacuum-freeze-min-age">vacuum_freeze_min_age transactions, this mistake prevented autovacuum from using partial-table scans, so that a full-table scan would always happen instead. @@ -5673,13 +5673,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Prevent misbehavior when a RowExpr or XmlExpr + Prevent misbehavior when a RowExpr or XmlExpr is parse-analyzed twice (Andres Freund, Tom Lane) This mistake could be user-visible in contexts such as - CREATE TABLE LIKE INCLUDING INDEXES. + CREATE TABLE LIKE INCLUDING INDEXES. @@ -5699,13 +5699,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Reject out-of-range dates in to_date() (Hitoshi Harada) + Reject out-of-range dates in to_date() (Hitoshi Harada) - Fix pg_extension_config_dump() to handle + Fix pg_extension_config_dump() to handle extension-update cases properly (Tom Lane) @@ -5729,13 +5729,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - This bug affected psql and some other client programs. + This bug affected psql and some other client programs. - Fix possible crash in psql's \? command + Fix possible crash in psql's \? command when not connected to a database (Meng Qingzhong) @@ -5743,61 +5743,61 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix possible error if a relation file is removed while - pg_basebackup is running (Heikki Linnakangas) + pg_basebackup is running (Heikki Linnakangas) - Make pg_dump exclude data of unlogged tables when + Make pg_dump exclude data of unlogged tables when running on a hot-standby server (Magnus Hagander) This would fail anyway because the data is not available on the standby server, so it seems most convenient to assume - automatically. - Fix pg_upgrade to deal with invalid indexes safely + Fix pg_upgrade to deal with invalid indexes safely (Bruce Momjian) - Fix one-byte buffer overrun in libpq's - PQprintTuples (Xi Wang) + Fix one-byte buffer overrun in libpq's + PQprintTuples (Xi Wang) This ancient function is not used anywhere by - PostgreSQL itself, but it might still be used by some + PostgreSQL itself, but it might still be used by some client code. - Make ecpglib use translated messages properly + Make ecpglib use translated messages properly (Chen Huajun) - Properly install ecpg_compat and - pgtypes libraries on MSVC (Jiang Guiqing) + Properly install ecpg_compat and + pgtypes libraries on MSVC (Jiang Guiqing) - Include our version of isinf() in - libecpg if it's not provided by the system + Include our version of isinf() in + libecpg if it's not provided by the system (Jiang Guiqing) @@ -5817,15 +5817,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Make pgxs build executables with the right - .exe suffix when cross-compiling for Windows + Make pgxs build executables with the right + .exe suffix when cross-compiling for Windows (Zoltan Boszormenyi) - Add new timezone abbreviation FET (Tom Lane) + Add new timezone abbreviation FET (Tom Lane) @@ -5849,7 +5849,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.6. For information about new features in the 9.1 major release, see - . + . @@ -5861,7 +5861,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 However, if you are upgrading from a version earlier than 9.1.6, - see . + see . @@ -5874,13 +5874,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix multiple bugs associated with CREATE INDEX - CONCURRENTLY (Andres Freund, Tom Lane) + CONCURRENTLY (Andres Freund, Tom Lane) - Fix CREATE INDEX CONCURRENTLY to use + Fix CREATE INDEX CONCURRENTLY to use in-place updates when changing the state of an index's - pg_index row. This prevents race conditions that could + pg_index row. This prevents race conditions that could cause concurrent sessions to miss updating the target index, thus resulting in corrupt concurrently-created indexes. @@ -5888,8 +5888,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Also, fix various other operations to ensure that they ignore invalid indexes resulting from a failed CREATE INDEX - CONCURRENTLY command. The most important of these is - VACUUM, because an auto-vacuum could easily be launched + CONCURRENTLY command. The most important of these is + VACUUM, because an auto-vacuum could easily be launched on the table before corrective action can be taken to fix or remove the invalid index. @@ -5926,13 +5926,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This oversight could prevent subsequent execution of certain - operations such as CREATE INDEX CONCURRENTLY. + operations such as CREATE INDEX CONCURRENTLY. - Avoid bogus out-of-sequence timeline ID errors in standby + Avoid bogus out-of-sequence timeline ID errors in standby mode (Heikki Linnakangas) @@ -5990,20 +5990,20 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 The planner could derive incorrect constraints from a clause equating a non-strict construct to something else, for example - WHERE COALESCE(foo, 0) = 0 - when foo is coming from the nullable side of an outer join. + WHERE COALESCE(foo, 0) = 0 + when foo is coming from the nullable side of an outer join. - Fix SELECT DISTINCT with index-optimized - MIN/MAX on an inheritance tree (Tom Lane) + Fix SELECT DISTINCT with index-optimized + MIN/MAX on an inheritance tree (Tom Lane) The planner would fail with failed to re-find MinMaxAggInfo - record given this combination of factors. + record given this combination of factors. @@ -6021,10 +6021,10 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - This affects multicolumn NOT IN subplans, such as - WHERE (a, b) NOT IN (SELECT x, y FROM ...) - when for instance b and y are int4 - and int8 respectively. This mistake led to wrong answers + This affects multicolumn NOT IN subplans, such as + WHERE (a, b) NOT IN (SELECT x, y FROM ...) + when for instance b and y are int4 + and int8 respectively. This mistake led to wrong answers or crashes depending on the specific datatypes involved. @@ -6032,12 +6032,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Acquire buffer lock when re-fetching the old tuple for an - AFTER ROW UPDATE/DELETE trigger (Andres Freund) + AFTER ROW UPDATE/DELETE trigger (Andres Freund) In very unusual circumstances, this oversight could result in passing - incorrect data to a trigger WHEN condition, or to the + incorrect data to a trigger WHEN condition, or to the precheck logic for a foreign-key enforcement trigger. That could result in a crash, or in an incorrect decision about whether to fire the trigger. @@ -6046,7 +6046,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix ALTER COLUMN TYPE to handle inherited check + Fix ALTER COLUMN TYPE to handle inherited check constraints properly (Pavan Deolasee) @@ -6058,7 +6058,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix ALTER EXTENSION SET SCHEMA's failure to move some + Fix ALTER EXTENSION SET SCHEMA's failure to move some subsidiary objects into the new schema (Álvaro Herrera, Dimitri Fontaine) @@ -6066,14 +6066,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix REASSIGN OWNED to handle grants on tablespaces + Fix REASSIGN OWNED to handle grants on tablespaces (Álvaro Herrera) - Ignore incorrect pg_attribute entries for system + Ignore incorrect pg_attribute entries for system columns for views (Tom Lane) @@ -6087,7 +6087,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix rule printing to dump INSERT INTO table + Fix rule printing to dump INSERT INTO table DEFAULT VALUES correctly (Tom Lane) @@ -6095,7 +6095,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Guard against stack overflow when there are too many - UNION/INTERSECT/EXCEPT clauses + UNION/INTERSECT/EXCEPT clauses in a query (Tom Lane) @@ -6117,14 +6117,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix failure to advance XID epoch if XID wraparound happens during a - checkpoint and wal_level is hot_standby + checkpoint and wal_level is hot_standby (Tom Lane, Andres Freund) While this mistake had no particular impact on PostgreSQL itself, it was bad for - applications that rely on txid_current() and related + applications that rely on txid_current() and related functions: the TXID value would appear to go backwards. @@ -6132,7 +6132,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix display of - pg_stat_replication.sync_state at a + pg_stat_replication.sync_state at a page boundary (Kyotaro Horiguchi) @@ -6146,7 +6146,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Formerly, this would result in something quite unhelpful, such as - Non-recoverable failure in name resolution. + Non-recoverable failure in name resolution. @@ -6159,8 +6159,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Make pg_ctl more robust about reading the - postmaster.pid file (Heikki Linnakangas) + Make pg_ctl more robust about reading the + postmaster.pid file (Heikki Linnakangas) @@ -6170,15 +6170,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix possible crash in psql if incorrectly-encoded data - is presented and the client_encoding setting is a + Fix possible crash in psql if incorrectly-encoded data + is presented and the client_encoding setting is a client-only encoding, such as SJIS (Jiang Guiqing) - Make pg_dump dump SEQUENCE SET items in + Make pg_dump dump SEQUENCE SET items in the data not pre-data section of the archive (Tom Lane) @@ -6190,25 +6190,25 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix bugs in the restore.sql script emitted by - pg_dump in tar output format (Tom Lane) + Fix bugs in the restore.sql script emitted by + pg_dump in tar output format (Tom Lane) The script would fail outright on tables whose names include upper-case characters. Also, make the script capable of restoring - data in mode as well as the regular COPY mode. - Fix pg_restore to accept POSIX-conformant - tar files (Brian Weaver, Tom Lane) + Fix pg_restore to accept POSIX-conformant + tar files (Brian Weaver, Tom Lane) - The original coding of pg_dump's tar + The original coding of pg_dump's tar output mode produced files that are not fully conformant with the POSIX standard. This has been corrected for version 9.3. This patch updates previous branches so that they will accept both the @@ -6219,67 +6219,67 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix tar files emitted by pg_basebackup to + Fix tar files emitted by pg_basebackup to be POSIX conformant (Brian Weaver, Tom Lane) - Fix pg_resetxlog to locate postmaster.pid + Fix pg_resetxlog to locate postmaster.pid correctly when given a relative path to the data directory (Tom Lane) - This mistake could lead to pg_resetxlog not noticing + This mistake could lead to pg_resetxlog not noticing that there is an active postmaster using the data directory. - Fix libpq's lo_import() and - lo_export() functions to report file I/O errors properly + Fix libpq's lo_import() and + lo_export() functions to report file I/O errors properly (Tom Lane) - Fix ecpg's processing of nested structure pointer + Fix ecpg's processing of nested structure pointer variables (Muhammad Usama) - Fix ecpg's ecpg_get_data function to + Fix ecpg's ecpg_get_data function to handle arrays properly (Michael Meskes) - Make contrib/pageinspect's btree page inspection + Make contrib/pageinspect's btree page inspection functions take buffer locks while examining pages (Tom Lane) - Ensure that make install for an extension creates the - extension installation directory (Cédric Villemain) + Ensure that make install for an extension creates the + extension installation directory (Cédric Villemain) - Previously, this step was missed if MODULEDIR was set in + Previously, this step was missed if MODULEDIR was set in the extension's Makefile. - Fix pgxs support for building loadable modules on AIX + Fix pgxs support for building loadable modules on AIX (Tom Lane) @@ -6290,7 +6290,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Update time zone data files to tzdata release 2012j + Update time zone data files to tzdata release 2012j for DST law changes in Cuba, Israel, Jordan, Libya, Palestine, Western Samoa, and portions of Brazil. @@ -6312,7 +6312,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.5. For information about new features in the 9.1 major release, see - . + . @@ -6323,14 +6323,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - However, you may need to perform REINDEX operations to + However, you may need to perform REINDEX operations to recover from the effects of the data corruption bug described in the first changelog item below. Also, if you are upgrading from a version earlier than 9.1.4, - see . + see . @@ -6354,7 +6354,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 likely to occur on standby slave servers since those perform much more WAL replay. There is a low probability of corruption of btree and GIN indexes. There is a much higher probability of corruption of - table visibility maps. Fortunately, visibility maps are + table visibility maps. Fortunately, visibility maps are non-critical data in 9.1, so the worst consequence of such corruption in 9.1 installations is transient inefficiency of vacuuming. Table data proper cannot be corrupted by this bug. @@ -6363,18 +6363,18 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 While no index corruption due to this bug is known to have occurred in the field, as a precautionary measure it is recommended that - production installations REINDEX all btree and GIN + production installations REINDEX all btree and GIN indexes at a convenient time after upgrading to 9.1.6. Also, if you intend to do an in-place upgrade to 9.2.X, before doing - so it is recommended to perform a VACUUM of all tables + so it is recommended to perform a VACUUM of all tables while having vacuum_freeze_table_age + linkend="guc-vacuum-freeze-table-age">vacuum_freeze_table_age set to zero. This will ensure that any lingering wrong data in the visibility maps is corrected before 9.2.X can depend on it. vacuum_cost_delay + linkend="guc-vacuum-cost-delay">vacuum_cost_delay can be adjusted to reduce the performance impact of vacuuming, while causing it to take longer to finish. @@ -6388,15 +6388,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 These errors could result in wrong answers from queries that scan the - same WITH subquery multiple times. + same WITH subquery multiple times. Fix misbehavior when default_transaction_isolation - is set to serializable (Kevin Grittner, Tom Lane, Heikki + linkend="guc-default-transaction-isolation">default_transaction_isolation + is set to serializable (Kevin Grittner, Tom Lane, Heikki Linnakangas) @@ -6409,7 +6409,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Improve selectivity estimation for text search queries involving - prefixes, i.e. word:* patterns (Tom Lane) + prefixes, i.e. word:* patterns (Tom Lane) @@ -6432,10 +6432,10 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - If we revoke a grant option from some role X, but - X still holds that option via a grant from someone + If we revoke a grant option from some role X, but + X still holds that option via a grant from someone else, we should not recursively revoke the corresponding privilege - from role(s) Y that X had granted it + from role(s) Y that X had granted it to. @@ -6448,7 +6448,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This situation creates circular dependencies that confuse - pg_dump and probably other things. It's confusing + pg_dump and probably other things. It's confusing for humans too, so disallow it. @@ -6462,7 +6462,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Make configure probe for mbstowcs_l (Tom + Make configure probe for mbstowcs_l (Tom Lane) @@ -6473,12 +6473,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix handling of SIGFPE when PL/Perl is in use (Andres Freund) + Fix handling of SIGFPE when PL/Perl is in use (Andres Freund) - Perl resets the process's SIGFPE handler to - SIG_IGN, which could result in crashes later on. Restore + Perl resets the process's SIGFPE handler to + SIG_IGN, which could result in crashes later on. Restore the normal Postgres signal handler after initializing PL/Perl. @@ -6497,7 +6497,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Some Linux distributions contain an incorrect version of - pthread.h that results in incorrect compiled code in + pthread.h that results in incorrect compiled code in PL/Perl, leading to crashes if a PL/Perl function calls another one that throws an error. @@ -6505,45 +6505,45 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix bugs in contrib/pg_trgm's LIKE pattern + Fix bugs in contrib/pg_trgm's LIKE pattern analysis code (Fujii Masao) - LIKE queries using a trigram index could produce wrong - results if the pattern contained LIKE escape characters. + LIKE queries using a trigram index could produce wrong + results if the pattern contained LIKE escape characters. - Fix pg_upgrade's handling of line endings on Windows + Fix pg_upgrade's handling of line endings on Windows (Andrew Dunstan) - Previously, pg_upgrade might add or remove carriage + Previously, pg_upgrade might add or remove carriage returns in places such as function bodies. - On Windows, make pg_upgrade use backslash path + On Windows, make pg_upgrade use backslash path separators in the scripts it emits (Andrew Dunstan) - Remove unnecessary dependency on pg_config from - pg_upgrade (Peter Eisentraut) + Remove unnecessary dependency on pg_config from + pg_upgrade (Peter Eisentraut) - Update time zone data files to tzdata release 2012f + Update time zone data files to tzdata release 2012f for DST law changes in Fiji @@ -6564,7 +6564,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.4. For information about new features in the 9.1 major release, see - . + . @@ -6576,7 +6576,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 However, if you are upgrading from a version earlier than 9.1.4, - see . + see . @@ -6593,7 +6593,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - xml_parse() would attempt to fetch external files or + xml_parse() would attempt to fetch external files or URLs as needed to resolve DTD and entity references in an XML value, thus allowing unprivileged database users to attempt to fetch data with the privileges of the database server. While the external data @@ -6606,22 +6606,22 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Prevent access to external files/URLs via contrib/xml2's - xslt_process() (Peter Eisentraut) + Prevent access to external files/URLs via contrib/xml2's + xslt_process() (Peter Eisentraut) - libxslt offers the ability to read and write both + libxslt offers the ability to read and write both files and URLs through stylesheet commands, thus allowing unprivileged database users to both read and write data with the privileges of the database server. Disable that through proper use - of libxslt's security options. (CVE-2012-3488) + of libxslt's security options. (CVE-2012-3488) - Also, remove xslt_process()'s ability to fetch documents + Also, remove xslt_process()'s ability to fetch documents and stylesheets from external files/URLs. While this was a - documented feature, it was long regarded as a bad idea. + documented feature, it was long regarded as a bad idea. The fix for CVE-2012-3489 broke that capability, and rather than expend effort on trying to fix it, we're just going to summarily remove it. @@ -6649,21 +6649,21 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - If ALTER SEQUENCE was executed on a freshly created or - reset sequence, and then precisely one nextval() call + If ALTER SEQUENCE was executed on a freshly created or + reset sequence, and then precisely one nextval() call was made on it, and then the server crashed, WAL replay would restore the sequence to a state in which it appeared that no - nextval() had been done, thus allowing the first + nextval() had been done, thus allowing the first sequence value to be returned again by the next - nextval() call. In particular this could manifest for - serial columns, since creation of a serial column's sequence - includes an ALTER SEQUENCE OWNED BY step. + nextval() call. In particular this could manifest for + serial columns, since creation of a serial column's sequence + includes an ALTER SEQUENCE OWNED BY step. - Fix race condition in enum-type value comparisons (Robert + Fix race condition in enum-type value comparisons (Robert Haas, Tom Lane) @@ -6675,7 +6675,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix txid_current() to report the correct epoch when not + Fix txid_current() to report the correct epoch when not in hot standby (Heikki Linnakangas) @@ -6692,7 +6692,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 The master might improperly choose pseudo-servers such as - pg_receivexlog or pg_basebackup + pg_receivexlog or pg_basebackup as the synchronous standby, and then wait indefinitely for them. @@ -6705,14 +6705,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This mistake led to failures reported as out-of-order XID - insertion in KnownAssignedXids. + insertion in KnownAssignedXids. - Ensure the backup_label file is fsync'd after - pg_start_backup() (Dave Kerr) + Ensure the backup_label file is fsync'd after + pg_start_backup() (Dave Kerr) @@ -6723,7 +6723,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 WAL sender background processes neglected to establish a - SIGALRM handler, meaning they would wait forever in + SIGALRM handler, meaning they would wait forever in some corner cases where a timeout ought to happen. @@ -6742,15 +6742,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix LISTEN/NOTIFY to cope better with I/O + Fix LISTEN/NOTIFY to cope better with I/O problems, such as out of disk space (Tom Lane) After a write failure, all subsequent attempts to send more - NOTIFY messages would fail with messages like - Could not read from file "pg_notify/nnnn" at - offset nnnnn: Success. + NOTIFY messages would fail with messages like + Could not read from file "pg_notify/nnnn" at + offset nnnnn: Success. @@ -6763,7 +6763,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 The original coding could allow inconsistent behavior in some cases; in particular, an autovacuum could get canceled after less than - deadlock_timeout grace period. + deadlock_timeout grace period. @@ -6775,15 +6775,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix log collector so that log_truncate_on_rotation works + Fix log collector so that log_truncate_on_rotation works during the very first log rotation after server start (Tom Lane) - Fix WITH attached to a nested set operation - (UNION/INTERSECT/EXCEPT) + Fix WITH attached to a nested set operation + (UNION/INTERSECT/EXCEPT) (Tom Lane) @@ -6791,44 +6791,44 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Ensure that a whole-row reference to a subquery doesn't include any - extra GROUP BY or ORDER BY columns (Tom Lane) + extra GROUP BY or ORDER BY columns (Tom Lane) Fix dependencies generated during ALTER TABLE ... ADD - CONSTRAINT USING INDEX (Tom Lane) + CONSTRAINT USING INDEX (Tom Lane) - This command left behind a redundant pg_depend entry + This command left behind a redundant pg_depend entry for the index, which could confuse later operations, notably - ALTER TABLE ... ALTER COLUMN TYPE on one of the indexed + ALTER TABLE ... ALTER COLUMN TYPE on one of the indexed columns. - Fix REASSIGN OWNED to work on extensions (Alvaro Herrera) + Fix REASSIGN OWNED to work on extensions (Alvaro Herrera) - Disallow copying whole-row references in CHECK - constraints and index definitions during CREATE TABLE + Disallow copying whole-row references in CHECK + constraints and index definitions during CREATE TABLE (Tom Lane) - This situation can arise in CREATE TABLE with - LIKE or INHERITS. The copied whole-row + This situation can arise in CREATE TABLE with + LIKE or INHERITS. The copied whole-row variable was incorrectly labeled with the row type of the original table not the new one. Rejecting the case seems reasonable for - LIKE, since the row types might well diverge later. For - INHERITS we should ideally allow it, with an implicit + LIKE, since the row types might well diverge later. For + INHERITS we should ideally allow it, with an implicit coercion to the parent table's row type; but that will require more work than seems safe to back-patch. @@ -6836,7 +6836,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix memory leak in ARRAY(SELECT ...) subqueries (Heikki + Fix memory leak in ARRAY(SELECT ...) subqueries (Heikki Linnakangas, Tom Lane) @@ -6860,7 +6860,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 The code could get confused by quantified parenthesized - subexpressions, such as ^(foo)?bar. This would lead to + subexpressions, such as ^(foo)?bar. This would lead to incorrect index optimization of searches for such patterns. @@ -6868,26 +6868,26 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix bugs with parsing signed - hh:mm and - hh:mm:ss - fields in interval constants (Amit Kapila, Tom Lane) + hh:mm and + hh:mm:ss + fields in interval constants (Amit Kapila, Tom Lane) - Fix pg_dump to better handle views containing partial - GROUP BY lists (Tom Lane) + Fix pg_dump to better handle views containing partial + GROUP BY lists (Tom Lane) - A view that lists only a primary key column in GROUP BY, + A view that lists only a primary key column in GROUP BY, but uses other table columns as if they were grouped, gets marked as depending on the primary key. Improper handling of such primary key - dependencies in pg_dump resulted in poorly-ordered + dependencies in pg_dump resulted in poorly-ordered dumps, which at best would be inefficient to restore and at worst could result in outright failure of a parallel - pg_restore run. + pg_restore run. @@ -6923,14 +6923,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Report errors properly in contrib/xml2's - xslt_process() (Tom Lane) + Report errors properly in contrib/xml2's + xslt_process() (Tom Lane) - Update time zone data files to tzdata release 2012e + Update time zone data files to tzdata release 2012e for DST law changes in Morocco and Tokelau @@ -6951,7 +6951,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.3. For information about new features in the 9.1 major release, see - . + . @@ -6962,20 +6962,20 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - However, if you use the citext data type, and you upgraded - from a previous major release by running pg_upgrade, - you should run CREATE EXTENSION citext FROM unpackaged - to avoid collation-related failures in citext operations. + However, if you use the citext data type, and you upgraded + from a previous major release by running pg_upgrade, + you should run CREATE EXTENSION citext FROM unpackaged + to avoid collation-related failures in citext operations. The same is necessary if you restore a dump from a pre-9.1 database - that contains an instance of the citext data type. - If you've already run the CREATE EXTENSION command before + that contains an instance of the citext data type. + If you've already run the CREATE EXTENSION command before upgrading to 9.1.4, you will instead need to do manual catalog updates as explained in the third changelog item below. Also, if you are upgrading from a version earlier than 9.1.2, - see . + see . @@ -6988,12 +6988,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix incorrect password transformation in - contrib/pgcrypto's DES crypt() function + contrib/pgcrypto's DES crypt() function (Solar Designer) - If a password string contained the byte value 0x80, the + If a password string contained the byte value 0x80, the remainder of the password was ignored, causing the password to be much weaker than it appeared. With this fix, the rest of the string is properly included in the DES hash. Any stored password values that are @@ -7004,7 +7004,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Ignore SECURITY DEFINER and SET attributes for + Ignore SECURITY DEFINER and SET attributes for a procedural language's call handler (Tom Lane) @@ -7016,16 +7016,16 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Make contrib/citext's upgrade script fix collations of - citext arrays and domains over citext + Make contrib/citext's upgrade script fix collations of + citext arrays and domains over citext (Tom Lane) - Release 9.1.2 provided a fix for collations of citext columns + Release 9.1.2 provided a fix for collations of citext columns and indexes in databases upgraded or reloaded from pre-9.1 installations, but that fix was incomplete: it neglected to handle arrays - and domains over citext. This release extends the module's + and domains over citext. This release extends the module's upgrade script to handle these cases. As before, if you have already run the upgrade script, you'll need to run the collation update commands by hand instead. See the 9.1.2 release notes for more @@ -7035,7 +7035,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Allow numeric timezone offsets in timestamp input to be up to + Allow numeric timezone offsets in timestamp input to be up to 16 hours away from UTC (Tom Lane) @@ -7061,7 +7061,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix text to name and char to name + Fix text to name and char to name casts to perform string truncation correctly in multibyte encodings (Karl Schnaitter) @@ -7069,13 +7069,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix memory copying bug in to_tsquery() (Heikki Linnakangas) + Fix memory copying bug in to_tsquery() (Heikki Linnakangas) - Ensure txid_current() reports the correct epoch when + Ensure txid_current() reports the correct epoch when executed in hot standby (Simon Riggs) @@ -7090,7 +7090,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This bug concerns sub-SELECTs that reference variables coming from the nullable side of an outer join of the surrounding query. In 9.1, queries affected by this bug would fail with ERROR: - Upper-level PlaceHolderVar found where not expected. But in 9.0 and + Upper-level PlaceHolderVar found where not expected. But in 9.0 and 8.4, you'd silently get possibly-wrong answers, since the value transmitted into the subquery wouldn't go to null when it should. @@ -7098,26 +7098,26 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix planning of UNION ALL subqueries with output columns + Fix planning of UNION ALL subqueries with output columns that are not simple variables (Tom Lane) Planning of such cases got noticeably worse in 9.1 as a result of a misguided fix for MergeAppend child's targetlist doesn't match - MergeAppend errors. Revert that fix and do it another way. + MergeAppend errors. Revert that fix and do it another way. - Fix slow session startup when pg_attribute is very large + Fix slow session startup when pg_attribute is very large (Tom Lane) - If pg_attribute exceeds one-fourth of - shared_buffers, cache rebuilding code that is sometimes + If pg_attribute exceeds one-fourth of + shared_buffers, cache rebuilding code that is sometimes needed during session start would trigger the synchronized-scan logic, causing it to take many times longer than normal. The problem was particularly acute if many new sessions were starting at once. @@ -7138,8 +7138,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Ensure the Windows implementation of PGSemaphoreLock() - clears ImmediateInterruptOK before returning (Tom Lane) + Ensure the Windows implementation of PGSemaphoreLock() + clears ImmediateInterruptOK before returning (Tom Lane) @@ -7166,31 +7166,31 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix COPY FROM to properly handle null marker strings that + Fix COPY FROM to properly handle null marker strings that correspond to invalid encoding (Tom Lane) - A null marker string such as E'\\0' should work, and did + A null marker string such as E'\\0' should work, and did work in the past, but the case got broken in 8.4. - Fix EXPLAIN VERBOSE for writable CTEs containing - RETURNING clauses (Tom Lane) + Fix EXPLAIN VERBOSE for writable CTEs containing + RETURNING clauses (Tom Lane) - Fix PREPARE TRANSACTION to work correctly in the presence + Fix PREPARE TRANSACTION to work correctly in the presence of advisory locks (Tom Lane) - Historically, PREPARE TRANSACTION has simply ignored any + Historically, PREPARE TRANSACTION has simply ignored any session-level advisory locks the session holds, but this case was accidentally broken in 9.1. @@ -7205,14 +7205,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Ignore missing schemas during non-interactive assignments of - search_path (Tom Lane) + search_path (Tom Lane) This re-aligns 9.1's behavior with that of older branches. Previously 9.1 would throw an error for nonexistent schemas mentioned in - search_path settings obtained from places such as - ALTER DATABASE SET. + search_path settings obtained from places such as + ALTER DATABASE SET. @@ -7223,7 +7223,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - This includes cases such as a rewriting ALTER TABLE within + This includes cases such as a rewriting ALTER TABLE within an extension update script, since that uses a transient table behind the scenes. @@ -7237,7 +7237,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Previously, infinite recursion in a function invoked by - auto-ANALYZE could crash worker processes. + auto-ANALYZE could crash worker processes. @@ -7256,13 +7256,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix logging collector to ensure it will restart file rotation - after receiving SIGHUP (Tom Lane) + after receiving SIGHUP (Tom Lane) - Fix too many LWLocks taken failure in GiST indexes (Heikki + Fix too many LWLocks taken failure in GiST indexes (Heikki Linnakangas) @@ -7296,35 +7296,35 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix error handling in pg_basebackup + Fix error handling in pg_basebackup (Thomas Ogrisegg, Fujii Masao) - Fix walsender to not go into a busy loop if connection + Fix walsender to not go into a busy loop if connection is terminated (Fujii Masao) - Fix memory leak in PL/pgSQL's RETURN NEXT command (Joe + Fix memory leak in PL/pgSQL's RETURN NEXT command (Joe Conway) - Fix PL/pgSQL's GET DIAGNOSTICS command when the target + Fix PL/pgSQL's GET DIAGNOSTICS command when the target is the function's first variable (Tom Lane) - Ensure that PL/Perl package-qualifies the _TD variable + Ensure that PL/Perl package-qualifies the _TD variable (Alex Hunsaker) @@ -7349,19 +7349,19 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix potential access off the end of memory in psql's - expanded display (\x) mode (Peter Eisentraut) + Fix potential access off the end of memory in psql's + expanded display (\x) mode (Peter Eisentraut) - Fix several performance problems in pg_dump when + Fix several performance problems in pg_dump when the database contains many objects (Jeff Janes, Tom Lane) - pg_dump could get very slow if the database contained + pg_dump could get very slow if the database contained many schemas, or if many objects are in dependency loops, or if there are many owned sequences. @@ -7369,14 +7369,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix memory and file descriptor leaks in pg_restore + Fix memory and file descriptor leaks in pg_restore when reading a directory-format archive (Peter Eisentraut) - Fix pg_upgrade for the case that a database stored in a + Fix pg_upgrade for the case that a database stored in a non-default tablespace contains a table in the cluster's default tablespace (Bruce Momjian) @@ -7384,41 +7384,41 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - In ecpg, fix rare memory leaks and possible overwrite - of one byte after the sqlca_t structure (Peter Eisentraut) + In ecpg, fix rare memory leaks and possible overwrite + of one byte after the sqlca_t structure (Peter Eisentraut) - Fix contrib/dblink's dblink_exec() to not leak + Fix contrib/dblink's dblink_exec() to not leak temporary database connections upon error (Tom Lane) - Fix contrib/dblink to report the correct connection name in + Fix contrib/dblink to report the correct connection name in error messages (Kyotaro Horiguchi) - Fix contrib/vacuumlo to use multiple transactions when + Fix contrib/vacuumlo to use multiple transactions when dropping many large objects (Tim Lewis, Robert Haas, Tom Lane) - This change avoids exceeding max_locks_per_transaction when + This change avoids exceeding max_locks_per_transaction when many objects need to be dropped. The behavior can be adjusted with the - new -l (limit) option. + new -l (limit) option. - Update time zone data files to tzdata release 2012c + Update time zone data files to tzdata release 2012c for DST law changes in Antarctica, Armenia, Chile, Cuba, Falkland Islands, Gaza, Haiti, Hebron, Morocco, Syria, and Tokelau Islands; also historical corrections for Canada. @@ -7441,7 +7441,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a variety of fixes from 9.1.2. For information about new features in the 9.1 major release, see - . + . @@ -7453,7 +7453,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 However, if you are upgrading from a version earlier than 9.1.2, - see . + see . @@ -7466,14 +7466,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Require execute permission on the trigger function for - CREATE TRIGGER (Robert Haas) + CREATE TRIGGER (Robert Haas) This missing check could allow another user to execute a trigger function with forged input data, by installing it on a table he owns. This is only of significance for trigger functions marked - SECURITY DEFINER, since otherwise trigger functions run + SECURITY DEFINER, since otherwise trigger functions run as the table owner anyway. (CVE-2012-0866) @@ -7485,7 +7485,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Both libpq and the server truncated the common name + Both libpq and the server truncated the common name extracted from an SSL certificate at 32 bytes. Normally this would cause nothing worse than an unexpected verification failure, but there are some rather-implausible scenarios in which it might allow one @@ -7500,12 +7500,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Convert newlines to spaces in names written in pg_dump + Convert newlines to spaces in names written in pg_dump comments (Robert Haas) - pg_dump was incautious about sanitizing object names + pg_dump was incautious about sanitizing object names that are emitted within SQL comments in its output script. A name containing a newline would at least render the script syntactically incorrect. Maliciously crafted object names could present a SQL @@ -7521,10 +7521,10 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 An index page split caused by an insertion could sometimes cause a - concurrently-running VACUUM to miss removing index entries + concurrently-running VACUUM to miss removing index entries that it should remove. After the corresponding table rows are removed, the dangling index entries would cause errors (such as could not - read block N in file ...) or worse, silently wrong query results + read block N in file ...) or worse, silently wrong query results after unrelated rows are re-inserted at the now-free table locations. This bug has been present since release 8.2, but occurs so infrequently that it was not diagnosed until now. If you have reason to suspect @@ -7543,22 +7543,22 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 that the contents were transiently invalid. In hot standby mode this can result in a query that's executing in parallel seeing garbage data. Various symptoms could result from that, but the most common one seems - to be invalid memory alloc request size. + to be invalid memory alloc request size. - Fix handling of data-modifying WITH subplans in - READ COMMITTED rechecking (Tom Lane) + Fix handling of data-modifying WITH subplans in + READ COMMITTED rechecking (Tom Lane) - A WITH clause containing - INSERT/UPDATE/DELETE would crash - if the parent UPDATE or DELETE command needed + A WITH clause containing + INSERT/UPDATE/DELETE would crash + if the parent UPDATE or DELETE command needed to be re-evaluated at one or more rows due to concurrent updates - in READ COMMITTED mode. + in READ COMMITTED mode. @@ -7589,13 +7589,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix CLUSTER/VACUUM FULL handling of toast + Fix CLUSTER/VACUUM FULL handling of toast values owned by recently-updated rows (Tom Lane) This oversight could lead to duplicate key value violates unique - constraint errors being reported against the toast table's index + constraint errors being reported against the toast table's index during one of these commands. @@ -7617,11 +7617,11 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Support foreign data wrappers and foreign servers in - REASSIGN OWNED (Alvaro Herrera) + REASSIGN OWNED (Alvaro Herrera) - This command failed with unexpected classid errors if + This command failed with unexpected classid errors if it needed to change the ownership of any such objects. @@ -7629,24 +7629,24 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Allow non-existent values for some settings in ALTER - USER/DATABASE SET (Heikki Linnakangas) + USER/DATABASE SET (Heikki Linnakangas) - Allow default_text_search_config, - default_tablespace, and temp_tablespaces to be + Allow default_text_search_config, + default_tablespace, and temp_tablespaces to be set to names that are not known. This is because they might be known in another database where the setting is intended to be used, or for the tablespace cases because the tablespace might not be created yet. The - same issue was previously recognized for search_path, and + same issue was previously recognized for search_path, and these settings now act like that one. - Fix unsupported node type error caused by COLLATE - in an INSERT expression (Tom Lane) + Fix unsupported node type error caused by COLLATE + in an INSERT expression (Tom Lane) @@ -7669,7 +7669,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Recover from errors occurring during WAL replay of DROP - TABLESPACE (Tom Lane) + TABLESPACE (Tom Lane) @@ -7691,7 +7691,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Sometimes a lock would be logged as being held by transaction - zero. This is at least known to produce assertion failures on + zero. This is at least known to produce assertion failures on slave servers, and might be the cause of more serious problems. @@ -7713,7 +7713,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Prevent emitting misleading consistent recovery state reached + Prevent emitting misleading consistent recovery state reached log message at the beginning of crash recovery (Heikki Linnakangas) @@ -7721,7 +7721,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix initial value of - pg_stat_replication.replay_location + pg_stat_replication.replay_location (Fujii Masao) @@ -7733,7 +7733,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix regular expression back-references with * attached + Fix regular expression back-references with * attached (Tom Lane) @@ -7747,18 +7747,18 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 A similar problem still afflicts back-references that are embedded in a larger quantified expression, rather than being the immediate subject of the quantifier. This will be addressed in a future - PostgreSQL release. + PostgreSQL release. Fix recently-introduced memory leak in processing of - inet/cidr values (Heikki Linnakangas) + inet/cidr values (Heikki Linnakangas) - A patch in the December 2011 releases of PostgreSQL + A patch in the December 2011 releases of PostgreSQL caused memory leakage in these operations, which could be significant in scenarios such as building a btree index on such a column. @@ -7767,7 +7767,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix planner's ability to push down index-expression restrictions - through UNION ALL (Tom Lane) + through UNION ALL (Tom Lane) @@ -7778,19 +7778,19 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix planning of WITH clauses referenced in - UPDATE/DELETE on an inherited table + Fix planning of WITH clauses referenced in + UPDATE/DELETE on an inherited table (Tom Lane) - This bug led to could not find plan for CTE failures. + This bug led to could not find plan for CTE failures. - Fix GIN cost estimation to handle column IN (...) + Fix GIN cost estimation to handle column IN (...) index conditions (Marti Raudsepp) @@ -7813,8 +7813,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix dangling pointer after CREATE TABLE AS/SELECT - INTO in a SQL-language function (Tom Lane) + Fix dangling pointer after CREATE TABLE AS/SELECT + INTO in a SQL-language function (Tom Lane) @@ -7853,14 +7853,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This function crashes when handed a typeglob or certain read-only - objects such as $^V. Make plperl avoid passing those to + objects such as $^V. Make plperl avoid passing those to it. - In pg_dump, don't dump contents of an extension's + In pg_dump, don't dump contents of an extension's configuration tables if the extension itself is not being dumped (Tom Lane) @@ -7868,32 +7868,32 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Improve pg_dump's handling of inherited table columns + Improve pg_dump's handling of inherited table columns (Tom Lane) - pg_dump mishandled situations where a child column has + pg_dump mishandled situations where a child column has a different default expression than its parent column. If the default is textually identical to the parent's default, but not actually the same (for instance, because of schema search path differences) it would not be recognized as different, so that after dump and restore the child would be allowed to inherit the parent's default. Child columns - that are NOT NULL where their parent is not could also be + that are NOT NULL where their parent is not could also be restored subtly incorrectly. - Fix pg_restore's direct-to-database mode for + Fix pg_restore's direct-to-database mode for INSERT-style table data (Tom Lane) Direct-to-database restores from archive files made with - - Cope with invalid pre-existing search_path settings during - CREATE EXTENSION (Tom Lane) + Cope with invalid pre-existing search_path settings during + CREATE EXTENSION (Tom Lane) @@ -8453,14 +8453,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Ensure walsender processes respond promptly to SIGTERM + Ensure walsender processes respond promptly to SIGTERM (Magnus Hagander) - Exclude postmaster.opts from base backups + Exclude postmaster.opts from base backups (Magnus Hagander) @@ -8473,20 +8473,20 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Formerly, these would not be displayed correctly in the - pg_settings view. + pg_settings view. - Fix incorrect field alignment in ecpg's SQLDA area + Fix incorrect field alignment in ecpg's SQLDA area (Zoltan Boszormenyi) - Preserve blank lines within commands in psql's command + Preserve blank lines within commands in psql's command history (Robert Haas) @@ -8498,41 +8498,41 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Avoid platform-specific infinite loop in pg_dump + Avoid platform-specific infinite loop in pg_dump (Steve Singer) - Fix compression of plain-text output format in pg_dump + Fix compression of plain-text output format in pg_dump (Adrian Klaver and Tom Lane) - pg_dump has historically understood -Z with - no -F switch to mean that it should emit a gzip-compressed + pg_dump has historically understood -Z with + no -F switch to mean that it should emit a gzip-compressed version of its plain text output. Restore that behavior. - Fix pg_dump to dump user-defined casts between + Fix pg_dump to dump user-defined casts between auto-generated types, such as table rowtypes (Tom Lane) - Fix missed quoting of foreign server names in pg_dump + Fix missed quoting of foreign server names in pg_dump (Tom Lane) - Assorted fixes for pg_upgrade (Bruce Momjian) + Assorted fixes for pg_upgrade (Bruce Momjian) @@ -8556,15 +8556,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Restore the pre-9.1 behavior that PL/Perl functions returning - void ignore the result value of their last Perl statement; + void ignore the result value of their last Perl statement; 9.1.0 would throw an error if that statement returned a reference. Also, make sure it works to return a string value for a composite type, so long as the string meets the type's input format. In addition, throw errors for attempts to return Perl arrays or hashes when the function's declared result type is not an array or composite type, respectively. (Pre-9.1 versions rather uselessly returned - strings like ARRAY(0x221a9a0) or - HASH(0x221aa90) in such cases.) + strings like ARRAY(0x221a9a0) or + HASH(0x221aa90) in such cases.) @@ -8577,7 +8577,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Use the preferred version of xsubpp to build PL/Perl, + Use the preferred version of xsubpp to build PL/Perl, not necessarily the operating system's main copy (David Wheeler and Alex Hunsaker) @@ -8599,14 +8599,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Change all the contrib extension script files to report - a useful error message if they are fed to psql + Change all the contrib extension script files to report + a useful error message if they are fed to psql (Andrew Dunstan and Tom Lane) This should help teach people about the new method of using - CREATE EXTENSION to load these files. In most cases, + CREATE EXTENSION to load these files. In most cases, sourcing the scripts directly would fail anyway, but with harder-to-interpret messages. @@ -8614,19 +8614,19 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix incorrect coding in contrib/dict_int and - contrib/dict_xsyn (Tom Lane) + Fix incorrect coding in contrib/dict_int and + contrib/dict_xsyn (Tom Lane) Some functions incorrectly assumed that memory returned by - palloc() is guaranteed zeroed. + palloc() is guaranteed zeroed. - Remove contrib/sepgsql tests from the regular regression + Remove contrib/sepgsql tests from the regular regression test mechanism (Tom Lane) @@ -8639,14 +8639,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix assorted errors in contrib/unaccent's configuration + Fix assorted errors in contrib/unaccent's configuration file parsing (Tom Lane) - Honor query cancel interrupts promptly in pgstatindex() + Honor query cancel interrupts promptly in pgstatindex() (Robert Haas) @@ -8660,7 +8660,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Revert unintentional enabling of WAL_DEBUG (Robert Haas) + Revert unintentional enabling of WAL_DEBUG (Robert Haas) @@ -8695,15 +8695,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Map Central America Standard Time to CST6, not - CST6CDT, because DST is generally not observed anywhere in + Map Central America Standard Time to CST6, not + CST6CDT, because DST is generally not observed anywhere in Central America. - Update time zone data files to tzdata release 2011n + Update time zone data files to tzdata release 2011n for DST law changes in Brazil, Cuba, Fiji, Palestine, Russia, and Samoa; also historical corrections for Alaska and British East Africa. @@ -8725,7 +8725,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This release contains a small number of fixes from 9.1.0. For information about new features in the 9.1 major release, see - . + . @@ -8744,7 +8744,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Make pg_options_to_table return NULL for an option with no + Make pg_options_to_table return NULL for an option with no value (Tom Lane) @@ -8768,8 +8768,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Fix explicit reference to pg_temp schema in CREATE - TEMPORARY TABLE (Robert Haas) + Fix explicit reference to pg_temp schema in CREATE + TEMPORARY TABLE (Robert Haas) @@ -8794,9 +8794,9 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Overview - This release shows PostgreSQL moving beyond the + This release shows PostgreSQL moving beyond the traditional relational-database feature set with new, ground-breaking - functionality that is unique to PostgreSQL. + functionality that is unique to PostgreSQL. The streaming replication feature introduced in release 9.0 is significantly enhanced by adding a synchronous-replication option, streaming backups, and monitoring improvements. @@ -8816,7 +8816,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Add support for foreign + Add support for foreign tables @@ -8831,7 +8831,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add extensions which - simplify packaging of additions to PostgreSQL + simplify packaging of additions to PostgreSQL @@ -8844,32 +8844,32 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Support unlogged tables using the UNLOGGED - option in CREATE - TABLE + Support unlogged tables using the UNLOGGED + option in CREATE + TABLE Allow data-modification commands - (INSERT/UPDATE/DELETE) in - WITH clauses + (INSERT/UPDATE/DELETE) in + WITH clauses Add nearest-neighbor (order-by-operator) searching to GiST indexes + linkend="gist">GiST indexes - Add a SECURITY - LABEL command and support for - SELinux permissions control + Add a SECURITY + LABEL command and support for + SELinux permissions control @@ -8912,7 +8912,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Change the default value of standard_conforming_strings + linkend="guc-standard-conforming-strings">standard_conforming_strings to on (Robert Haas) @@ -8920,8 +8920,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 By default, backslashes are now ordinary characters in string literals, not escape characters. This change removes a long-standing incompatibility with the SQL standard. escape_string_warning - has produced warnings about this usage for years. E'' + linkend="guc-escape-string-warning">escape_string_warning + has produced warnings about this usage for years. E'' strings are the proper way to embed backslash escapes in strings and are unaffected by this change. @@ -8955,12 +8955,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 For example, disallow - composite_value.text and - text(composite_value). + composite_value.text and + text(composite_value). Unintentional uses of this syntax have frequently resulted in bug reports; although it was not a bug, it seems better to go back to rejecting such expressions. - The CAST and :: syntaxes are still available + The CAST and :: syntaxes are still available for use when a cast of an entire composite value is actually intended. @@ -8972,10 +8972,10 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 When a domain is based on an array type, it is allowed to look - through the domain type to access the array elements, including + through the domain type to access the array elements, including subscripting the domain value to fetch or assign an element. Assignment to an element of such a domain value, for instance via - UPDATE ... SET domaincol[5] = ..., will now result in + UPDATE ... SET domaincol[5] = ..., will now result in rechecking the domain type's constraints, whereas before the checks were skipped. @@ -8993,7 +8993,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Change string_to_array() + linkend="array-functions-table">string_to_array() to return an empty array for a zero-length string (Pavel Stehule) @@ -9006,8 +9006,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Change string_to_array() - so a NULL separator splits the string into characters + linkend="array-functions-table">string_to_array() + so a NULL separator splits the string into characters (Pavel Stehule) @@ -9031,8 +9031,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Triggers can now be fired in three cases: BEFORE, - AFTER, or INSTEAD OF some action. + Triggers can now be fired in three cases: BEFORE, + AFTER, or INSTEAD OF some action. Trigger function authors should verify that their logic behaves sanely in all three cases. @@ -9040,7 +9040,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Require superuser or CREATEROLE permissions in order to + Require superuser or CREATEROLE permissions in order to set comments on roles (Tom Lane) @@ -9057,12 +9057,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Change pg_last_xlog_receive_location() + linkend="functions-recovery-info-table">pg_last_xlog_receive_location() so it never moves backwards (Fujii Masao) - Previously, the value of pg_last_xlog_receive_location() + Previously, the value of pg_last_xlog_receive_location() could move backward when streaming replication is restarted. @@ -9070,7 +9070,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Have logging of replication connections honor log_connections + linkend="guc-log-connections">log_connections (Magnus Hagander) @@ -9090,12 +9090,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Change PL/pgSQL's RAISE command without parameters + Change PL/pgSQL's RAISE command without parameters to be catchable by the attached exception block (Piyush Newe) - Previously RAISE in a code block was always scoped to + Previously RAISE in a code block was always scoped to an attached exception block, so it was uncatchable at the same scope. @@ -9154,7 +9154,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 All contrib modules are now installed with CREATE EXTENSION + linkend="sql-createextension">CREATE EXTENSION rather than by manually invoking their SQL scripts (Dimitri Fontaine, Tom Lane) @@ -9164,7 +9164,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 module, use CREATE EXTENSION ... FROM unpackaged to wrap the existing contrib module's objects into an extension. When updating from a pre-9.0 version, drop the contrib module's objects - using its old uninstall script, then use CREATE EXTENSION. + using its old uninstall script, then use CREATE EXTENSION. @@ -9180,26 +9180,26 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Make pg_stat_reset() + linkend="monitoring-stats-funcs-table">pg_stat_reset() reset all database-level statistics (Tomas Vondra) - Some pg_stat_database counters were not being reset. + Some pg_stat_database counters were not being reset. Fix some information_schema.triggers + linkend="infoschema-triggers">information_schema.triggers column names to match the new SQL-standard names (Dean Rasheed) - Treat ECPG cursor names as case-insensitive + Treat ECPG cursor names as case-insensitive (Zoltan Boszormenyi) @@ -9228,9 +9228,9 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Support unlogged tables using the UNLOGGED - option in CREATE - TABLE (Robert Haas) + Support unlogged tables using the UNLOGGED + option in CREATE + TABLE (Robert Haas) @@ -9244,8 +9244,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Allow FULL OUTER JOIN to be implemented as a - hash join, and allow either side of a LEFT OUTER JOIN - or RIGHT OUTER JOIN to be hashed (Tom Lane) + hash join, and allow either side of a LEFT OUTER JOIN + or RIGHT OUTER JOIN to be hashed (Tom Lane) @@ -9270,7 +9270,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Improve performance of commit_siblings + linkend="guc-commit-siblings">commit_siblings (Greg Smith) @@ -9289,7 +9289,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Avoid leaving data files open after blind writes + Avoid leaving data files open after blind writes (Alvaro Herrera) @@ -9317,7 +9317,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 This allows better optimization of queries that use ORDER - BY, LIMIT, or MIN/MAX with + BY, LIMIT, or MIN/MAX with inherited tables. @@ -9346,34 +9346,34 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Support host names and host suffixes - (e.g. .example.com) in pg_hba.conf + (e.g. .example.com) in pg_hba.conf (Peter Eisentraut) - Previously only host IP addresses and CIDR + Previously only host IP addresses and CIDR values were supported. - Support the key word all in the host column of pg_hba.conf + Support the key word all in the host column of pg_hba.conf (Peter Eisentraut) - Previously people used 0.0.0.0/0 or ::/0 + Previously people used 0.0.0.0/0 or ::/0 for this. - Reject local lines in pg_hba.conf + Reject local lines in pg_hba.conf on platforms that don't support Unix-socket connections (Magnus Hagander) @@ -9386,14 +9386,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Allow GSSAPI + Allow GSSAPI to be used to authenticate to servers via SSPI (Christian Ullrich) + linkend="sspi-auth">SSPI (Christian Ullrich) - Specifically this allows Unix-based GSSAPI clients - to do SSPI authentication with Windows servers. + Specifically this allows Unix-based GSSAPI clients + to do SSPI authentication with Windows servers. @@ -9414,14 +9414,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Rewrite peer + Rewrite peer authentication to avoid use of credential control messages (Tom Lane) This change makes the peer authentication code simpler and better-performing. However, it requires the platform to provide the - getpeereid function or an equivalent socket operation. + getpeereid function or an equivalent socket operation. So far as is known, the only platform for which peer authentication worked before and now will not is pre-5.0 NetBSD. @@ -9440,19 +9440,19 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add details to the logging of restartpoints and checkpoints, which is controlled by log_checkpoints + linkend="guc-log-checkpoints">log_checkpoints (Fujii Masao, Greg Smith) - New details include WAL file and sync activity. + New details include WAL file and sync activity. Add log_file_mode + linkend="guc-log-file-mode">log_file_mode which controls the permissions on log files created by the logging collector (Martin Pihlak) @@ -9460,7 +9460,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Reduce the default maximum line length for syslog + Reduce the default maximum line length for syslog logging to 900 bytes plus prefixes (Noah Misch) @@ -9482,7 +9482,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add client_hostname column to pg_stat_activity + linkend="monitoring-stats-views-table">pg_stat_activity (Peter Eisentraut) @@ -9494,7 +9494,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add pg_stat_xact_* + linkend="monitoring-stats-views-table">pg_stat_xact_* statistics functions and views (Joel Jacobson) @@ -9515,15 +9515,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add columns showing the number of vacuum and analyze operations in pg_stat_*_tables + linkend="monitoring-stats-views-table">pg_stat_*_tables views (Magnus Hagander) - Add buffers_backend_fsync column to pg_stat_bgwriter + Add buffers_backend_fsync column to pg_stat_bgwriter (Greg Smith) @@ -9545,13 +9545,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Provide auto-tuning of wal_buffers (Greg + linkend="guc-wal-buffers">wal_buffers (Greg Smith) - By default, the value of wal_buffers is now chosen - automatically based on the value of shared_buffers. + By default, the value of wal_buffers is now chosen + automatically based on the value of shared_buffers. @@ -9598,7 +9598,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 synchronous_standby_names setting. Synchronous replication can be enabled or disabled on a per-transaction basis using the - synchronous_commit + synchronous_commit setting. @@ -9619,13 +9619,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add - replication_timeout + replication_timeout setting (Fujii Masao, Heikki Linnakangas) Replication connections that are idle for more than the - replication_timeout interval will be terminated + replication_timeout interval will be terminated automatically. Formerly, a failed connection was typically not detected until the TCP timeout elapsed, which is inconveniently long in many situations. @@ -9635,7 +9635,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add command-line tool pg_basebackup + linkend="app-pgbasebackup">pg_basebackup for creating a new standby server or database backup (Magnus Hagander) @@ -9643,7 +9643,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Add a replication permission + Add a replication permission for roles (Magnus Hagander) @@ -9667,8 +9667,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add system view pg_stat_replication - which displays activity of WAL sender processes (Itagaki + linkend="pg-stat-replication-view">pg_stat_replication + which displays activity of WAL sender processes (Itagaki Takahiro, Simon Riggs) @@ -9680,7 +9680,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add monitoring function pg_last_xact_replay_timestamp() + linkend="functions-recovery-info-table">pg_last_xact_replay_timestamp() (Fujii Masao) @@ -9702,7 +9702,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add configuration parameter hot_standby_feedback + linkend="guc-hot-standby-feedback">hot_standby_feedback to enable standbys to postpone cleanup of old row versions on the primary (Simon Riggs) @@ -9715,7 +9715,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add the pg_stat_database_conflicts + linkend="monitoring-stats-views-table">pg_stat_database_conflicts system view to show queries that have been canceled and the reason (Magnus Hagander) @@ -9728,8 +9728,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Add a conflicts count to pg_stat_database + Add a conflicts count to pg_stat_database (Magnus Hagander) @@ -9754,7 +9754,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add ERRCODE_T_R_DATABASE_DROPPED + linkend="errcodes-table">ERRCODE_T_R_DATABASE_DROPPED error code to report recovery conflicts due to dropped databases (Tatsuo Ishii) @@ -9780,18 +9780,18 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 The new functions are pg_xlog_replay_pause(), + linkend="functions-recovery-control-table">pg_xlog_replay_pause(), pg_xlog_replay_resume(), + linkend="functions-recovery-control-table">pg_xlog_replay_resume(), and the status function pg_is_xlog_replay_paused(). + linkend="functions-recovery-control-table">pg_is_xlog_replay_paused(). - Add recovery.conf setting - pause_at_recovery_target + Add recovery.conf setting + pause_at_recovery_target to pause recovery at target (Simon Riggs) @@ -9804,14 +9804,14 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add the ability to create named restore points using pg_create_restore_point() + linkend="functions-admin-backup-table">pg_create_restore_point() (Jaime Casanova) These named restore points can be specified as recovery - targets using the new recovery.conf setting - recovery_target_name. + targets using the new recovery.conf setting + recovery_target_name. @@ -9830,7 +9830,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add restart_after_crash + linkend="guc-restart-after-crash">restart_after_crash setting which disables automatic server restart after a backend crash (Robert Haas) @@ -9844,8 +9844,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Allow recovery.conf - to use the same quoting behavior as postgresql.conf + linkend="recovery-config">recovery.conf + to use the same quoting behavior as postgresql.conf (Dimitri Fontaine) @@ -9877,7 +9877,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 single MVCC snapshot would be used for the entire transaction, which allowed certain documented anomalies. The old snapshot isolation behavior is still available by requesting the REPEATABLE READ + linkend="xact-repeatable-read">REPEATABLE READ isolation level. @@ -9885,30 +9885,30 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Allow data-modification commands - (INSERT/UPDATE/DELETE) in - WITH clauses + (INSERT/UPDATE/DELETE) in + WITH clauses (Marko Tiikkaja, Hitoshi Harada) - These commands can use RETURNING to pass data up to the + These commands can use RETURNING to pass data up to the containing query. - Allow WITH - clauses to be attached to INSERT, UPDATE, - DELETE statements (Marko Tiikkaja, Hitoshi Harada) + Allow WITH + clauses to be attached to INSERT, UPDATE, + DELETE statements (Marko Tiikkaja, Hitoshi Harada) Allow non-GROUP - BY columns in the query target list when the primary - key is specified in the GROUP BY clause (Peter + BY columns in the query target list when the primary + key is specified in the GROUP BY clause (Peter Eisentraut) @@ -9920,13 +9920,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Allow use of the key word DISTINCT in UNION/INTERSECT/EXCEPT + Allow use of the key word DISTINCT in UNION/INTERSECT/EXCEPT clauses (Tom Lane) - DISTINCT is the default behavior so use of this + DISTINCT is the default behavior so use of this key word is redundant, but the SQL standard allows it. @@ -9934,13 +9934,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Fix ordinary queries with rules to use the same snapshot behavior - as EXPLAIN ANALYZE (Marko Tiikkaja) + as EXPLAIN ANALYZE (Marko Tiikkaja) - Previously EXPLAIN ANALYZE used slightly different + Previously EXPLAIN ANALYZE used slightly different snapshot timing for queries involving rules. The - EXPLAIN ANALYZE behavior was judged to be more logical. + EXPLAIN ANALYZE behavior was judged to be more logical. @@ -9962,7 +9962,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Previously collation (the sort ordering of text strings) could only be chosen at database creation. Collation can now be set per column, domain, index, or - expression, via the SQL-standard COLLATE clause. + expression, via the SQL-standard COLLATE clause. @@ -9980,30 +9980,30 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add extensions which - simplify packaging of additions to PostgreSQL + simplify packaging of additions to PostgreSQL (Dimitri Fontaine, Tom Lane) Extensions are controlled by the new CREATE/ALTER/DROP EXTENSION + linkend="sql-createextension">CREATE/ALTER/DROP EXTENSION commands. This replaces ad-hoc methods of grouping objects that - are added to a PostgreSQL installation. + are added to a PostgreSQL installation. - Add support for foreign + Add support for foreign tables (Shigeru Hanada, Robert Haas, Jan Urbanski, Heikki Linnakangas) This allows data stored outside the database to be used like - native PostgreSQL-stored data. Foreign tables + native PostgreSQL-stored data. Foreign tables are currently read-only, however. @@ -10011,15 +10011,15 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Allow new values to be added to an existing enum type via - ALTER TYPE (Andrew + ALTER TYPE (Andrew Dunstan) - Add ALTER TYPE ... - ADD/DROP/ALTER/RENAME ATTRIBUTE (Peter Eisentraut) + Add ALTER TYPE ... + ADD/DROP/ALTER/RENAME ATTRIBUTE (Peter Eisentraut) @@ -10030,28 +10030,28 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - <command>ALTER</> Object + <command>ALTER</command> Object - Add RESTRICT/CASCADE to ALTER TYPE operations + Add RESTRICT/CASCADE to ALTER TYPE operations on typed tables (Peter Eisentraut) This controls - ADD/DROP/ALTER/RENAME - ATTRIBUTE cascading behavior. + ADD/DROP/ALTER/RENAME + ATTRIBUTE cascading behavior. - Support ALTER TABLE name {OF | NOT OF} - type + Support ALTER TABLE name {OF | NOT OF} + type (Noah Misch) @@ -10064,7 +10064,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add support for more object types in ALTER ... SET - SCHEMA commands (Dimitri Fontaine) + SCHEMA commands (Dimitri Fontaine) @@ -10079,7 +10079,7 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - <link linkend="SQL-CREATETABLE"><command>CREATE/ALTER TABLE</></link> + <link linkend="sql-createtable"><command>CREATE/ALTER TABLE</command></link> @@ -10098,13 +10098,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Allow ALTER TABLE + Allow ALTER TABLE to add foreign keys without validation (Simon Riggs) - The new option is called NOT VALID. The constraint's - state can later be modified to VALIDATED and validation + The new option is called NOT VALID. The constraint's + state can later be modified to VALIDATED and validation checks performed. Together these allow you to add a foreign key with minimal impact on read and write operations. @@ -10112,23 +10112,23 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Allow ALTER TABLE + Allow ALTER TABLE ... SET DATA TYPE to avoid table rewrites in appropriate cases (Noah Misch, Robert Haas) - For example, converting a varchar column to - text no longer requires a rewrite of the table. + For example, converting a varchar column to + text no longer requires a rewrite of the table. However, increasing the length constraint on a - varchar column still requires a table rewrite. + varchar column still requires a table rewrite. - Add CREATE TABLE IF - NOT EXISTS syntax (Robert Haas) + Add CREATE TABLE IF + NOT EXISTS syntax (Robert Haas) @@ -10162,8 +10162,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Add a SECURITY - LABEL command (KaiGai Kohei) + Add a SECURITY + LABEL command (KaiGai Kohei) @@ -10196,8 +10196,8 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - Make TRUNCATE ... RESTART - IDENTITY restart sequences transactionally (Steve + Make TRUNCATE ... RESTART + IDENTITY restart sequences transactionally (Steve Singer) @@ -10211,26 +10211,26 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - <link linkend="SQL-COPY"><command>COPY</></link> + <link linkend="sql-copy"><command>COPY</command></link> - Add ENCODING option to COPY TO/FROM (Hitoshi + Add ENCODING option to COPY TO/FROM (Hitoshi Harada, Itagaki Takahiro) - This allows the encoding of the COPY file to be + This allows the encoding of the COPY file to be specified separately from client encoding. - Add bidirectional COPY + Add bidirectional COPY protocol support (Fujii Masao) @@ -10244,13 +10244,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - <link linkend="SQL-EXPLAIN"><command>EXPLAIN</></link> + <link linkend="sql-explain"><command>EXPLAIN</command></link> - Make EXPLAIN VERBOSE show the function call expression + Make EXPLAIN VERBOSE show the function call expression in a FunctionScan node (Tom Lane) @@ -10260,21 +10260,21 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - <link linkend="SQL-VACUUM"><command>VACUUM</></link> + <link linkend="sql-vacuum"><command>VACUUM</command></link> Add additional details to the output of VACUUM FULL VERBOSE - and CLUSTER VERBOSE + linkend="sql-vacuum">VACUUM FULL VERBOSE + and CLUSTER VERBOSE (Itagaki Takahiro) New information includes the live and dead tuple count and - whether CLUSTER is using an index to rebuild. + whether CLUSTER is using an index to rebuild. @@ -10294,13 +10294,13 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 - <link linkend="SQL-CLUSTER"><command>CLUSTER</></link> + <link linkend="sql-cluster"><command>CLUSTER</command></link> - Allow CLUSTER to sort the table rather than scanning + Allow CLUSTER to sort the table rather than scanning the index when it seems likely to be cheaper (Leonardo Francalanci) @@ -10317,12 +10317,12 @@ Branch: REL9_0_STABLE [9d6af7367] 2015-08-15 11:02:34 -0400 Add nearest-neighbor (order-by-operator) searching to GiST indexes (Teodor Sigaev, Tom Lane) + linkend="gist">GiST indexes (Teodor Sigaev, Tom Lane) - This allows GiST indexes to quickly return the - N closest values in a query with LIMIT. + This allows GiST indexes to quickly return the + N closest values in a query with LIMIT. For example point '(101,456)' LIMIT 10; @@ -10334,19 +10334,19 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Allow GIN indexes to index null + Allow GIN indexes to index null and empty values (Tom Lane) - This allows full GIN index scans, and fixes various + This allows full GIN index scans, and fixes various corner cases in which GIN scans would fail. - Allow GIN indexes to + Allow GIN indexes to better recognize duplicate search entries (Tom Lane) @@ -10358,12 +10358,12 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Fix GiST indexes to be fully + Fix GiST indexes to be fully crash-safe (Heikki Linnakangas) - Previously there were rare cases where a REINDEX + Previously there were rare cases where a REINDEX would be required (you would be informed). @@ -10381,19 +10381,19 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Allow numeric to use a more compact, two-byte header + Allow numeric to use a more compact, two-byte header in common cases (Robert Haas) - Previously all numeric values had four-byte headers; + Previously all numeric values had four-byte headers; this change saves on disk storage. - Add support for dividing money by money + Add support for dividing money by money (Andy Balholm) @@ -10431,9 +10431,9 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - This avoids possible could not identify a comparison function + This avoids possible could not identify a comparison function failures at runtime, if it is possible to implement the query without - sorting. Also, ANALYZE won't try to use inappropriate + sorting. Also, ANALYZE won't try to use inappropriate statistics-gathering methods for columns of such composite types. @@ -10447,15 +10447,15 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Add support for casting between money and numeric + Add support for casting between money and numeric (Andy Balholm) - Add support for casting from int4 and int8 - to money (Joey Adams) + Add support for casting from int4 and int8 + to money (Joey Adams) @@ -10476,15 +10476,15 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - <link linkend="functions-xml"><acronym>XML</></link> + <link linkend="functions-xml"><acronym>XML</acronym></link> - Add XML function XMLEXISTS and xpath_exists() + Add XML function XMLEXISTS and xpath_exists() functions (Mike Fowler) @@ -10495,17 +10495,17 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Add XML functions xml_is_well_formed(), + Add XML functions xml_is_well_formed(), xml_is_well_formed_document(), + linkend="xml-is-well-formed">xml_is_well_formed_document(), xml_is_well_formed_content() + linkend="xml-is-well-formed">xml_is_well_formed_content() (Mike Fowler) - These check whether the input is properly-formed XML. + These check whether the input is properly-formed XML. They provide functionality that was previously available only in the deprecated contrib/xml2 module. @@ -10525,8 +10525,8 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add SQL function format(text, ...), which - behaves analogously to C's printf() (Pavel Stehule, + linkend="format">format(text, ...), which + behaves analogously to C's printf() (Pavel Stehule, Robert Haas) @@ -10539,13 +10539,13 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add string functions concat(), + linkend="functions-string-other">concat(), concat_ws(), - left(), - right(), + linkend="functions-string-other">concat_ws(), + left(), + right(), and reverse() + linkend="functions-string-other">reverse() (Pavel Stehule) @@ -10557,7 +10557,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add function pg_read_binary_file() + linkend="functions-admin-genfile">pg_read_binary_file() to read binary files (Dimitri Fontaine, Itagaki Takahiro) @@ -10565,7 +10565,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add a single-parameter version of function pg_read_file() + linkend="functions-admin-genfile">pg_read_file() to read an entire file (Dimitri Fontaine, Itagaki Takahiro) @@ -10573,9 +10573,9 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add three-parameter forms of array_to_string() + linkend="array-functions-table">array_to_string() and string_to_array() + linkend="array-functions-table">string_to_array() for null value processing control (Pavel Stehule) @@ -10590,7 +10590,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add the pg_describe_object() + linkend="functions-info-catalog-table">pg_describe_object() function (Alvaro Herrera) @@ -10619,10 +10619,10 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add variable quote_all_identifiers - to force the quoting of all identifiers in EXPLAIN + linkend="guc-quote-all-identifiers">quote_all_identifiers + to force the quoting of all identifiers in EXPLAIN and in system catalog functions like pg_get_viewdef() + linkend="functions-info-catalog-table">pg_get_viewdef() (Robert Haas) @@ -10635,7 +10635,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add columns to the information_schema.sequences + linkend="infoschema-sequences">information_schema.sequences system view (Peter Eisentraut) @@ -10647,8 +10647,8 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Allow public as a pseudo-role name in has_table_privilege() + Allow public as a pseudo-role name in has_table_privilege() and related functions (Alvaro Herrera) @@ -10668,8 +10668,8 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Support INSTEAD - OF triggers on views (Dean Rasheed) + Support INSTEAD + OF triggers on views (Dean Rasheed) @@ -10694,7 +10694,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add FOREACH IN - ARRAY to PL/pgSQL + ARRAY to PL/pgSQL (Pavel Stehule) @@ -10734,7 +10734,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - PL/Perl functions can now be declared to accept type record. + PL/Perl functions can now be declared to accept type record. The behavior is the same as for any named composite type. @@ -10776,7 +10776,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - PL/Python can now return multiple OUT parameters + PL/Python can now return multiple OUT parameters and record sets. @@ -10816,10 +10816,10 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; These functions are plpy.quote_ident, - plpy.quote_literal, + linkend="plpython-util">plpy.quote_ident, + plpy.quote_literal, and plpy.quote_nullable. + linkend="plpython-util">plpy.quote_nullable. @@ -10831,7 +10831,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Report PL/Python errors from iterators with PLy_elog (Jan + Report PL/Python errors from iterators with PLy_elog (Jan Urbanski) @@ -10843,7 +10843,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Exception classes were previously not available in - plpy under Python 3. + plpy under Python 3. @@ -10860,7 +10860,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Mark createlang and droplang + Mark createlang and droplang as deprecated now that they just invoke extension commands (Tom Lane) @@ -10869,64 +10869,64 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - <link linkend="APP-PSQL"><application>psql</></link> + <link linkend="app-psql"><application>psql</application></link> - Add psql command \conninfo + Add psql command \conninfo to show current connection information (David Christensen) - Add psql command \sf to + Add psql command \sf to show a function's definition (Pavel Stehule) - Add psql command \dL to list + Add psql command \dL to list languages (Fernando Ike) - Add the - \dn without S now suppresses system + \dn without S now suppresses system schemas. - Allow psql's \e and \ef + Allow psql's \e and \ef commands to accept a line number to be used to position the cursor in the editor (Pavel Stehule) This is passed to the editor according to the - PSQL_EDITOR_LINENUMBER_ARG environment variable. + PSQL_EDITOR_LINENUMBER_ARG environment variable. - Have psql set the client encoding from the + Have psql set the client encoding from the operating system locale by default (Heikki Linnakangas) - This only happens if the PGCLIENTENCODING environment + This only happens if the PGCLIENTENCODING environment variable is not set. @@ -10940,8 +10940,8 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Make \dt+ report pg_table_size - instead of pg_relation_size when talking to 9.0 or + Make \dt+ report pg_table_size + instead of pg_relation_size when talking to 9.0 or later servers (Bernd Helmle) @@ -10963,29 +10963,29 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - <link linkend="APP-PGDUMP"><application>pg_dump</></link> + <link linkend="app-pgdump"><application>pg_dump</application></link> - Add pg_dump + Add pg_dump and pg_dumpall - option to force quoting of all identifiers (Robert Haas) - Add directory format to pg_dump + Add directory format to pg_dump (Joachim Wieland, Heikki Linnakangas) - This is internally similar to the tar - pg_dump format. + This is internally similar to the tar + pg_dump format. @@ -10994,27 +10994,27 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - <link linkend="APP-PG-CTL"><application>pg_ctl</></link> + <link linkend="app-pg-ctl"><application>pg_ctl</application></link> - Fix pg_ctl + Fix pg_ctl so it no longer incorrectly reports that the server is not running (Bruce Momjian) Previously this could happen if the server was running but - pg_ctl could not authenticate. + pg_ctl could not authenticate. - Improve pg_ctl start's wait - () option (Bruce Momjian, Tom Lane) @@ -11027,7 +11027,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Add promote option to pg_ctl to + Add promote option to pg_ctl to switch a standby server to primary (Fujii Masao) @@ -11039,23 +11039,23 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - <application>Development Tools</> + <application>Development Tools</application> - <link linkend="libpq"><application>libpq</></link> + <link linkend="libpq"><application>libpq</application></link> Add a libpq connection option client_encoding - which behaves like the PGCLIENTENCODING environment + linkend="libpq-connect-client-encoding">client_encoding + which behaves like the PGCLIENTENCODING environment variable (Heikki Linnakangas) - The value auto sets the client encoding based on + The value auto sets the client encoding based on the operating system locale. @@ -11063,13 +11063,13 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add PQlibVersion() + linkend="libpq-pqlibversion">PQlibVersion() function which returns the libpq library version (Magnus Hagander) - libpq already had PQserverVersion() which returns + libpq already had PQserverVersion() which returns the server version. @@ -11079,22 +11079,22 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Allow libpq-using clients to check the user name of the server process when connecting via Unix-domain sockets, with the new requirepeer + linkend="libpq-connect-requirepeer">requirepeer connection option (Peter Eisentraut) - PostgreSQL already allowed servers to check + PostgreSQL already allowed servers to check the client user name when connecting via Unix-domain sockets. - Add PQping() + Add PQping() and PQpingParams() + linkend="libpq-pqpingparams">PQpingParams() to libpq (Bruce Momjian, Tom Lane) @@ -11109,7 +11109,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - <link linkend="ecpg"><application>ECPG</></link> + <link linkend="ecpg"><application>ECPG</application></link> @@ -11123,7 +11123,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Make ecpglib write double values with a + Make ecpglib write double values with a precision of 15 digits, not 14 as formerly (Akira Kurosawa) @@ -11140,7 +11140,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Use +Olibmerrno compile flag with HP-UX C compilers + Use +Olibmerrno compile flag with HP-UX C compilers that accept it (Ibrar Ahmed) @@ -11163,15 +11163,15 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - This allows for faster compiles. Also, make -k + This allows for faster compiles. Also, make -k now works more consistently. - Require GNU make + Require GNU make 3.80 or newer (Peter Eisentraut) @@ -11182,7 +11182,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Add make maintainer-check target + Add make maintainer-check target (Peter Eisentraut) @@ -11195,15 +11195,15 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Support make check in contrib + Support make check in contrib (Peter Eisentraut) - Formerly only make installcheck worked, but now + Formerly only make installcheck worked, but now there is support for testing in a temporary installation. - The top-level make check-world target now includes - testing contrib this way. + The top-level make check-world target now includes + testing contrib this way. @@ -11219,7 +11219,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; On Windows, allow pg_ctl to register + linkend="app-pg-ctl">pg_ctl to register the service as auto-start or start-on-demand (Quan Zongliang) @@ -11231,7 +11231,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - minidumps can now be generated by non-debug + minidumps can now be generated by non-debug Windows binaries and analyzed by standard debugging tools. @@ -11287,7 +11287,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Add missing get_object_oid() functions, for consistency + Add missing get_object_oid() functions, for consistency (Robert Haas) @@ -11302,13 +11302,13 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Add support for DragonFly BSD (Rumko) + Add support for DragonFly BSD (Rumko) - Expose quote_literal_cstr() for backend use + Expose quote_literal_cstr() for backend use (Robert Haas) @@ -11321,22 +11321,22 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Regression tests were previously always run with - SQL_ASCII encoding. + SQL_ASCII encoding. - Add src/tools/git_changelog to replace - cvs2cl and pgcvslog (Robert + Add src/tools/git_changelog to replace + cvs2cl and pgcvslog (Robert Haas, Tom Lane) - Add git-external-diff script to - src/tools (Bruce Momjian) + Add git-external-diff script to + src/tools (Bruce Momjian) @@ -11391,7 +11391,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Modify contrib modules and procedural + Modify contrib modules and procedural languages to install via the new extension mechanism (Tom Lane, Dimitri Fontaine) @@ -11400,21 +11400,21 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Add contrib/file_fdw + Add contrib/file_fdw foreign-data wrapper (Shigeru Hanada) Foreign tables using this foreign data wrapper can read flat files - in a manner very similar to COPY. + in a manner very similar to COPY. Add nearest-neighbor search support to contrib/pg_trgm and contrib/btree_gist + linkend="pgtrgm">contrib/pg_trgm and contrib/btree_gist (Teodor Sigaev) @@ -11422,7 +11422,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add contrib/btree_gist + linkend="btree-gist">contrib/btree_gist support for searching on not-equals (Jeff Davis) @@ -11430,25 +11430,25 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Fix contrib/fuzzystrmatch's - levenshtein() function to handle multibyte characters + linkend="fuzzystrmatch">contrib/fuzzystrmatch's + levenshtein() function to handle multibyte characters (Alexander Korotkov) - Add ssl_cipher() and ssl_version() + Add ssl_cipher() and ssl_version() functions to contrib/sslinfo (Robert + linkend="sslinfo">contrib/sslinfo (Robert Haas) - Fix contrib/intarray - and contrib/hstore + Fix contrib/intarray + and contrib/hstore to give consistent results with indexed empty arrays (Tom Lane) @@ -11460,7 +11460,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Allow contrib/intarray + Allow contrib/intarray to work properly on multidimensional arrays (Tom Lane) @@ -11468,7 +11468,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; In - contrib/intarray, + contrib/intarray, avoid errors complaining about the presence of nulls in cases where no nulls are actually present (Tom Lane) @@ -11477,7 +11477,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; In - contrib/intarray, + contrib/intarray, fix behavior of containment operators with respect to empty arrays (Tom Lane) @@ -11490,10 +11490,10 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Remove contrib/xml2's + Remove contrib/xml2's arbitrary limit on the number of - parameter=value pairs that can be - handled by xslt_process() (Pavel Stehule) + parameter=value pairs that can be + handled by xslt_process() (Pavel Stehule) @@ -11503,7 +11503,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - In contrib/pageinspect, + In contrib/pageinspect, fix heap_page_item to return infomasks as 32-bit values (Alvaro Herrera) @@ -11522,13 +11522,13 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Add contrib/sepgsql - to interface permission checks with SELinux (KaiGai Kohei) + Add contrib/sepgsql + to interface permission checks with SELinux (KaiGai Kohei) This uses the new SECURITY LABEL + linkend="sql-security-label">SECURITY LABEL facility. @@ -11536,7 +11536,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add contrib module auth_delay (KaiGai + linkend="auth-delay">auth_delay (KaiGai Kohei) @@ -11549,7 +11549,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Add dummy_seclabel + Add dummy_seclabel contrib module (KaiGai Kohei) @@ -11569,17 +11569,17 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Add support for LIKE and ILIKE index + Add support for LIKE and ILIKE index searches to contrib/pg_trgm (Alexander + linkend="pgtrgm">contrib/pg_trgm (Alexander Korotkov) - Add levenshtein_less_equal() function to contrib/fuzzystrmatch, + Add levenshtein_less_equal() function to contrib/fuzzystrmatch, which is optimized for small distances (Alexander Korotkov) @@ -11587,7 +11587,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Improve performance of index lookups on contrib/seg columns (Alexander + linkend="seg">contrib/seg columns (Alexander Korotkov) @@ -11595,7 +11595,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Improve performance of pg_upgrade for + linkend="pgupgrade">pg_upgrade for databases with many relations (Bruce Momjian) @@ -11603,7 +11603,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add flag to contrib/pgbench to + linkend="pgbench">contrib/pgbench to report per-statement latencies (Florian Pflug) @@ -11619,29 +11619,29 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Move src/tools/test_fsync to contrib/pg_test_fsync + Move src/tools/test_fsync to contrib/pg_test_fsync (Bruce Momjian, Tom Lane) - Add O_DIRECT support to contrib/pg_test_fsync + Add O_DIRECT support to contrib/pg_test_fsync (Bruce Momjian) - This matches the use of O_DIRECT by wal_sync_method. + This matches the use of O_DIRECT by wal_sync_method. Add new tests to contrib/pg_test_fsync + linkend="pgtestfsync">contrib/pg_test_fsync (Bruce Momjian) @@ -11659,7 +11659,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Extensive ECPG + Extensive ECPG documentation improvements (Satoshi Nagayasu) @@ -11674,7 +11674,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add documentation for exit_on_error + linkend="guc-exit-on-error">exit_on_error (Robert Haas) @@ -11686,7 +11686,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Add documentation for pg_options_to_table() + linkend="functions-info-catalog-table">pg_options_to_table() (Josh Berkus) @@ -11699,7 +11699,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Document that it is possible to access all composite type fields using (compositeval).* + linkend="field-selection">(compositeval).* syntax (Peter Eisentraut) @@ -11707,16 +11707,16 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; Document that translate() - removes characters in from that don't have a - corresponding to character (Josh Kupershmidt) + linkend="functions-string-other">translate() + removes characters in from that don't have a + corresponding to character (Josh Kupershmidt) - Merge documentation for CREATE CONSTRAINT TRIGGER and CREATE TRIGGER + Merge documentation for CREATE CONSTRAINT TRIGGER and CREATE TRIGGER (Alvaro Herrera) @@ -11741,12 +11741,12 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; - Handle non-ASCII characters consistently in HISTORY file + Handle non-ASCII characters consistently in HISTORY file (Peter Eisentraut) - While the HISTORY file is in English, we do have to deal + While the HISTORY file is in English, we do have to deal with non-ASCII letters in contributor names. These are now transliterated so that they are reasonably legible without assumptions about character set. diff --git a/doc/src/sgml/release-9.2.sgml b/doc/src/sgml/release-9.2.sgml index 14fafc0e96..3494ddb5ce 100644 --- a/doc/src/sgml/release-9.2.sgml +++ b/doc/src/sgml/release-9.2.sgml @@ -1,6 +1,376 @@ + + Release 9.2.24 + + + Release date: + 2017-11-09 + + + + This release contains a variety of fixes from 9.2.23. + For information about new features in the 9.2 major release, see + . + + + + This is expected to be the last PostgreSQL + release in the 9.2.X series. Users are encouraged to update to a newer + release branch soon. + + + + Migration to Version 9.2.24 + + + A dump/restore is not required for those running 9.2.X. + + + + However, if you are upgrading from a version earlier than 9.2.22, + see . + + + + + + Changes + + + + + + Fix sample server-start scripts to become $PGUSER + before opening $PGLOG (Noah Misch) + + + + Previously, the postmaster log file was opened while still running as + root. The database owner could therefore mount an attack against + another system user by making $PGLOG be a symbolic + link to some other file, which would then become corrupted by appending + log messages. + + + + By default, these scripts are not installed anywhere. Users who have + made use of them will need to manually recopy them, or apply the same + changes to their modified versions. If the + existing $PGLOG file is root-owned, it will need to + be removed or renamed out of the way before restarting the server with + the corrected script. + (CVE-2017-12172) + + + + + + Properly reject attempts to convert infinite float values to + type numeric (Tom Lane, KaiGai Kohei) + + + + Previously the behavior was platform-dependent. + + + + + + Fix corner-case crashes when columns have been added to the end of a + view (Tom Lane) + + + + + + Record proper dependencies when a view or rule + contains FieldSelect + or FieldStore expression nodes (Tom Lane) + + + + Lack of these dependencies could allow a column or data + type DROP to go through when it ought to fail, + thereby causing later uses of the view or rule to get errors. + This patch does not do anything to protect existing views/rules, + only ones created in the future. + + + + + + Correctly detect hashability of range data types (Tom Lane) + + + + The planner mistakenly assumed that any range type could be hashed + for use in hash joins or hash aggregation, but actually it must check + whether the range's subtype has hash support. This does not affect any + of the built-in range types, since they're all hashable anyway. + + + + + + Fix low-probability loss of NOTIFY messages due to + XID wraparound (Marko Tiikkaja, Tom Lane) + + + + If a session executed no queries, but merely listened for + notifications, for more than 2 billion transactions, it started to miss + some notifications from concurrently-committing transactions. + + + + + + Prevent low-probability crash in processing of nested trigger firings + (Tom Lane) + + + + + + Correctly restore the umask setting when file creation fails + in COPY or lo_export() + (Peter Eisentraut) + + + + + + Give a better error message for duplicate column names + in ANALYZE (Nathan Bossart) + + + + + + Fix libpq to not require user's home + directory to exist (Tom Lane) + + + + In v10, failure to find the home directory while trying to + read ~/.pgpass was treated as a hard error, + but it should just cause that file to not be found. Both v10 and + previous release branches made the same mistake when + reading ~/.pg_service.conf, though this was less + obvious since that file is not sought unless a service name is + specified. + + + + + + Fix libpq to guard against integer + overflow in the row count of a PGresult + (Michael Paquier) + + + + + + Sync our copy of the timezone library with IANA release tzcode2017c + (Tom Lane) + + + + This fixes various issues; the only one likely to be user-visible + is that the default DST rules for a POSIX-style zone name, if + no posixrules file exists in the timezone data + directory, now match current US law rather than what it was a dozen + years ago. + + + + + + Update time zone data files to tzdata + release 2017c for DST law changes in Fiji, Namibia, Northern Cyprus, + Sudan, Tonga, and Turks & Caicos Islands, plus historical + corrections for Alaska, Apia, Burma, Calcutta, Detroit, Ireland, + Namibia, and Pago Pago. + + + + + + + + + + Release 9.2.23 + + + Release date: + 2017-08-31 + + + + This release contains a small number of fixes from 9.2.22. + For information about new features in the 9.2 major release, see + . + + + + The PostgreSQL community will stop releasing updates + for the 9.2.X release series in September 2017. + Users are encouraged to update to a newer release branch soon. + + + + Migration to Version 9.2.23 + + + A dump/restore is not required for those running 9.2.X. + + + + However, if you are upgrading from a version earlier than 9.2.22, + see . + + + + + + Changes + + + + + + Show foreign tables + in information_schema.table_privileges + view (Peter Eisentraut) + + + + All other relevant information_schema views include + foreign tables, but this one ignored them. + + + + Since this view definition is installed by initdb, + merely upgrading will not fix the problem. If you need to fix this + in an existing installation, you can, as a superuser, do this + in psql: + +SET search_path TO information_schema; +CREATE OR REPLACE VIEW table_privileges AS + SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, + CAST(grantee.rolname AS sql_identifier) AS grantee, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(c.prtype AS character_data) AS privilege_type, + CAST( + CASE WHEN + -- object owner always has grant options + pg_has_role(grantee.oid, c.relowner, 'USAGE') + OR c.grantable + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable, + CAST(CASE WHEN c.prtype = 'SELECT' THEN 'YES' ELSE 'NO' END AS yes_or_no) AS with_hierarchy + + FROM ( + SELECT oid, relname, relnamespace, relkind, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* FROM pg_class + ) AS c (oid, relname, relnamespace, relkind, relowner, grantor, grantee, prtype, grantable), + pg_namespace nc, + pg_authid u_grantor, + ( + SELECT oid, rolname FROM pg_authid + UNION ALL + SELECT 0::oid, 'PUBLIC' + ) AS grantee (oid, rolname) + + WHERE c.relnamespace = nc.oid + AND c.relkind IN ('r', 'v', 'f') + AND c.grantee = grantee.oid + AND c.grantor = u_grantor.oid + AND c.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER') + AND (pg_has_role(u_grantor.oid, 'USAGE') + OR pg_has_role(grantee.oid, 'USAGE') + OR grantee.rolname = 'PUBLIC'); + + This must be repeated in each database to be fixed, + including template0. + + + + + + Clean up handling of a fatal exit (e.g., due to receipt + of SIGTERM) that occurs while trying to execute + a ROLLBACK of a failed transaction (Tom Lane) + + + + This situation could result in an assertion failure. In production + builds, the exit would still occur, but it would log an unexpected + message about cannot drop active portal. + + + + + + Remove assertion that could trigger during a fatal exit (Tom Lane) + + + + + + Correctly identify columns that are of a range type or domain type over + a composite type or domain type being searched for (Tom Lane) + + + + Certain ALTER commands that change the definition of a + composite type or domain type are supposed to fail if there are any + stored values of that type in the database, because they lack the + infrastructure needed to update or check such values. Previously, + these checks could miss relevant values that are wrapped inside range + types or sub-domains, possibly allowing the database to become + inconsistent. + + + + + + Change ecpg's parser to allow RETURNING + clauses without attached C variables (Michael Meskes) + + + + This allows ecpg programs to contain SQL constructs + that use RETURNING internally (for example, inside a CTE) + rather than using it to define values to be returned to the client. + + + + + + Improve selection of compiler flags for PL/Perl on Windows (Tom Lane) + + + + This fix avoids possible crashes of PL/Perl due to inconsistent + assumptions about the width of time_t values. + A side-effect that may be visible to extension developers is + that _USE_32BIT_TIME_T is no longer defined globally + in PostgreSQL Windows builds. This is not expected + to cause problems, because type time_t is not used + in any PostgreSQL API definitions. + + + + + + + + Release 9.2.22 @@ -12,11 +382,11 @@ This release contains a variety of fixes from 9.2.21. For information about new features in the 9.2 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 9.2.X release series in September 2017. Users are encouraged to update to a newer release branch soon. @@ -35,7 +405,7 @@ Also, if you are upgrading from a version earlier than 9.2.20, - see . + see . @@ -48,7 +418,7 @@ Further restrict visibility - of pg_user_mappings.umoptions, to + of pg_user_mappings.umoptions, to protect passwords stored as user mapping options (Noah Misch) @@ -56,11 +426,11 @@ The fix for CVE-2017-7486 was incorrect: it allowed a user to see the options in her own user mapping, even if she did not - have USAGE permission on the associated foreign server. + have USAGE permission on the associated foreign server. Such options might include a password that had been provided by the server owner rather than the user herself. - Since information_schema.user_mapping_options does not - show the options in such cases, pg_user_mappings + Since information_schema.user_mapping_options does not + show the options in such cases, pg_user_mappings should not either. (CVE-2017-7547) @@ -75,15 +445,15 @@ Restart the postmaster after adding allow_system_table_mods - = true to postgresql.conf. (In versions - supporting ALTER SYSTEM, you can use that to make the + = true to postgresql.conf. (In versions + supporting ALTER SYSTEM, you can use that to make the configuration change, but you'll still need a restart.) - In each database of the cluster, + In each database of the cluster, run the following commands as superuser: SET search_path = pg_catalog; @@ -114,15 +484,15 @@ CREATE OR REPLACE VIEW pg_user_mappings AS - Do not forget to include the template0 - and template1 databases, or the vulnerability will still - exist in databases you create later. To fix template0, + Do not forget to include the template0 + and template1 databases, or the vulnerability will still + exist in databases you create later. To fix template0, you'll need to temporarily make it accept connections. - In PostgreSQL 9.5 and later, you can use + In PostgreSQL 9.5 and later, you can use ALTER DATABASE template0 WITH ALLOW_CONNECTIONS true; - and then after fixing template0, undo that with + and then after fixing template0, undo that with ALTER DATABASE template0 WITH ALLOW_CONNECTIONS false; @@ -136,7 +506,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Finally, remove the allow_system_table_mods configuration + Finally, remove the allow_system_table_mods configuration setting, and again restart the postmaster. @@ -150,16 +520,16 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - libpq ignores empty password specifications, and does + libpq ignores empty password specifications, and does not transmit them to the server. So, if a user's password has been set to the empty string, it's impossible to log in with that password - via psql or other libpq-based + via psql or other libpq-based clients. An administrator might therefore believe that setting the password to empty is equivalent to disabling password login. - However, with a modified or non-libpq-based client, + However, with a modified or non-libpq-based client, logging in could be possible, depending on which authentication method is configured. In particular the most common - method, md5, accepted empty passwords. + method, md5, accepted empty passwords. Change the server to reject empty passwords in all cases. (CVE-2017-7546) @@ -205,7 +575,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix code for setting on + Fix code for setting on Solaris (Tom Lane) @@ -237,28 +607,28 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Fix possible creation of an invalid WAL segment when a standby is - promoted just after it processes an XLOG_SWITCH WAL + promoted just after it processes an XLOG_SWITCH WAL record (Andres Freund) - Fix SIGHUP and SIGUSR1 handling in + Fix SIGHUP and SIGUSR1 handling in walsender processes (Petr Jelinek, Andres Freund) - Fix unnecessarily slow restarts of walreceiver + Fix unnecessarily slow restarts of walreceiver processes due to race condition in postmaster (Tom Lane) - Fix cases where an INSERT or UPDATE assigns + Fix cases where an INSERT or UPDATE assigns to more than one element of a column that is of domain-over-array type (Tom Lane) @@ -267,56 +637,56 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Move autogenerated array types out of the way during - ALTER ... RENAME (Vik Fearing) + ALTER ... RENAME (Vik Fearing) Previously, we would rename a conflicting autogenerated array type - out of the way during CREATE; this fix extends that + out of the way during CREATE; this fix extends that behavior to renaming operations. - Ensure that ALTER USER ... SET accepts all the syntax - variants that ALTER ROLE ... SET does (Peter Eisentraut) + Ensure that ALTER USER ... SET accepts all the syntax + variants that ALTER ROLE ... SET does (Peter Eisentraut) Properly update dependency info when changing a datatype I/O - function's argument or return type from opaque to the + function's argument or return type from opaque to the correct type (Heikki Linnakangas) - CREATE TYPE updates I/O functions declared in this + CREATE TYPE updates I/O functions declared in this long-obsolete style, but it forgot to record a dependency on the - type, allowing a subsequent DROP TYPE to leave broken + type, allowing a subsequent DROP TYPE to leave broken function definitions behind. - Reduce memory usage when ANALYZE processes - a tsvector column (Heikki Linnakangas) + Reduce memory usage when ANALYZE processes + a tsvector column (Heikki Linnakangas) Fix unnecessary precision loss and sloppy rounding when multiplying - or dividing money values by integers or floats (Tom Lane) + or dividing money values by integers or floats (Tom Lane) Tighten checks for whitespace in functions that parse identifiers, - such as regprocedurein() (Tom Lane) + such as regprocedurein() (Tom Lane) @@ -327,22 +697,22 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Use relevant #define symbols from Perl while - compiling PL/Perl (Ashutosh Sharma, Tom Lane) + Use relevant #define symbols from Perl while + compiling PL/Perl (Ashutosh Sharma, Tom Lane) This avoids portability problems, typically manifesting as - a handshake mismatch during library load, when working with + a handshake mismatch during library load, when working with recent Perl versions. - In psql, fix failure when COPY FROM STDIN + In psql, fix failure when COPY FROM STDIN is ended with a keyboard EOF signal and then another COPY - FROM STDIN is attempted (Thomas Munro) + FROM STDIN is attempted (Thomas Munro) @@ -353,14 +723,14 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_dump to not emit invalid SQL for an empty + Fix pg_dump to not emit invalid SQL for an empty operator class (Daniel Gustafsson) - Fix pg_dump output to stdout on Windows (Kuntal Ghosh) + Fix pg_dump output to stdout on Windows (Kuntal Ghosh) @@ -371,21 +741,21 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_get_ruledef() to print correct output for - the ON SELECT rule of a view whose columns have been + Fix pg_get_ruledef() to print correct output for + the ON SELECT rule of a view whose columns have been renamed (Tom Lane) - In some corner cases, pg_dump relies - on pg_get_ruledef() to dump views, so that this error + In some corner cases, pg_dump relies + on pg_get_ruledef() to dump views, so that this error could result in dump/reload failures. - Fix dumping of function expressions in the FROM clause in + Fix dumping of function expressions in the FROM clause in cases where the expression does not deparse into something that looks like a function call (Tom Lane) @@ -393,7 +763,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_basebackup output to stdout on Windows + Fix pg_basebackup output to stdout on Windows (Haribabu Kommi) @@ -405,8 +775,8 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_upgrade to ensure that the ending WAL record - does not have = minimum + Fix pg_upgrade to ensure that the ending WAL record + does not have = minimum (Bruce Momjian) @@ -418,7 +788,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Always use , not , when building shared libraries with gcc (Tom Lane) @@ -438,27 +808,27 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - In MSVC builds, handle the case where the openssl - library is not within a VC subdirectory (Andrew Dunstan) + In MSVC builds, handle the case where the OpenSSL + library is not within a VC subdirectory (Andrew Dunstan) - In MSVC builds, add proper include path for libxml2 + In MSVC builds, add proper include path for libxml2 header files (Andrew Dunstan) This fixes a former need to move things around in standard Windows - installations of libxml2. + installations of libxml2. In MSVC builds, recognize a Tcl library that is - named tcl86.lib (Noah Misch) + named tcl86.lib (Noah Misch) @@ -478,11 +848,11 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; This release contains a variety of fixes from 9.2.20. For information about new features in the 9.2 major release, see - . + . - The PostgreSQL community will stop releasing updates + The PostgreSQL community will stop releasing updates for the 9.2.X release series in September 2017. Users are encouraged to update to a newer release branch soon. @@ -501,7 +871,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Also, if you are upgrading from a version earlier than 9.2.20, - see . + see . @@ -514,18 +884,18 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Restrict visibility - of pg_user_mappings.umoptions, to + of pg_user_mappings.umoptions, to protect passwords stored as user mapping options (Michael Paquier, Feike Steenbergen) The previous coding allowed the owner of a foreign server object, - or anyone he has granted server USAGE permission to, + or anyone he has granted server USAGE permission to, to see the options for all user mappings associated with that server. This might well include passwords for other users. Adjust the view definition to match the behavior of - information_schema.user_mapping_options, namely that + information_schema.user_mapping_options, namely that these options are visible to the user being mapped, or if the mapping is for PUBLIC and the current user is the server owner, or if the current user is a superuser. @@ -536,7 +906,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; By itself, this patch will only fix the behavior in newly initdb'd databases. If you wish to apply this change in an existing database, follow the corrected procedure shown in the changelog entry for - CVE-2017-7547, in . + CVE-2017-7547, in . @@ -549,7 +919,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Some selectivity estimation functions in the planner will apply user-defined operators to values obtained - from pg_statistic, such as most common values and + from pg_statistic, such as most common values and histogram entries. This occurs before table permissions are checked, so a nefarious user could exploit the behavior to obtain these values for table columns he does not have permission to read. To fix, @@ -563,7 +933,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix possible corruption of init forks of unlogged indexes + Fix possible corruption of init forks of unlogged indexes (Robert Haas, Michael Paquier) @@ -576,7 +946,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix incorrect reconstruction of pg_subtrans entries + Fix incorrect reconstruction of pg_subtrans entries when a standby server replays a prepared but uncommitted two-phase transaction (Tom Lane) @@ -584,7 +954,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; In most cases this turned out to have no visible ill effects, but in corner cases it could result in circular references - in pg_subtrans, potentially causing infinite loops + in pg_subtrans, potentially causing infinite loops in queries that examine rows modified by the two-phase transaction. @@ -599,19 +969,19 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Due to lack of a cache flush step between commands in an extension script file, non-utility queries might not see the effects of an immediately preceding catalog change, such as ALTER TABLE - ... RENAME. + ... RENAME. Skip tablespace privilege checks when ALTER TABLE ... ALTER - COLUMN TYPE rebuilds an existing index (Noah Misch) + COLUMN TYPE rebuilds an existing index (Noah Misch) The command failed if the calling user did not currently have - CREATE privilege for the tablespace containing the index. + CREATE privilege for the tablespace containing the index. That behavior seems unhelpful, so skip the check, allowing the index to be rebuilt where it is. @@ -619,27 +989,27 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix ALTER TABLE ... VALIDATE CONSTRAINT to not recurse - to child tables when the constraint is marked NO INHERIT + Fix ALTER TABLE ... VALIDATE CONSTRAINT to not recurse + to child tables when the constraint is marked NO INHERIT (Amit Langote) - This fix prevents unwanted constraint does not exist failures + This fix prevents unwanted constraint does not exist failures when no matching constraint is present in the child tables. - Fix VACUUM to account properly for pages that could not + Fix VACUUM to account properly for pages that could not be scanned due to conflicting page pins (Andrew Gierth) This tended to lead to underestimation of the number of tuples in the table. In the worst case of a small heavily-contended - table, VACUUM could incorrectly report that the table + table, VACUUM could incorrectly report that the table contained no tuples, leading to very bad planning choices. @@ -653,33 +1023,33 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix cursor_to_xml() to produce valid output - with tableforest = false + Fix cursor_to_xml() to produce valid output + with tableforest = false (Thomas Munro, Peter Eisentraut) - Previously it failed to produce a wrapping <table> + Previously it failed to produce a wrapping <table> element. - Improve performance of pg_timezone_names view + Improve performance of pg_timezone_names view (Tom Lane, David Rowley) - Fix sloppy handling of corner-case errors from lseek() - and close() (Tom Lane) + Fix sloppy handling of corner-case errors from lseek() + and close() (Tom Lane) Neither of these system calls are likely to fail in typical situations, - but if they did, fd.c could get quite confused. + but if they did, fd.c could get quite confused. @@ -697,21 +1067,21 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix ecpg to support COMMIT PREPARED - and ROLLBACK PREPARED (Masahiko Sawada) + Fix ecpg to support COMMIT PREPARED + and ROLLBACK PREPARED (Masahiko Sawada) Fix a double-free error when processing dollar-quoted string literals - in ecpg (Michael Meskes) + in ecpg (Michael Meskes) - In pg_dump, fix incorrect schema and owner marking for + In pg_dump, fix incorrect schema and owner marking for comments and security labels of some types of database objects (Giuseppe Broccolo, Tom Lane) @@ -726,20 +1096,20 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Avoid emitting an invalid list file in pg_restore -l + Avoid emitting an invalid list file in pg_restore -l when SQL object names contain newlines (Tom Lane) Replace newlines by spaces, which is sufficient to make the output - valid for pg_restore -L's purposes. + valid for pg_restore -L's purposes. - Fix pg_upgrade to transfer comments and security labels - attached to large objects (blobs) (Stephen Frost) + Fix pg_upgrade to transfer comments and security labels + attached to large objects (blobs) (Stephen Frost) @@ -751,19 +1121,19 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Improve error handling - in contrib/adminpack's pg_file_write() + in contrib/adminpack's pg_file_write() function (Noah Misch) Notably, it failed to detect errors reported - by fclose(). + by fclose(). - In contrib/dblink, avoid leaking the previous unnamed + In contrib/dblink, avoid leaking the previous unnamed connection when establishing a new unnamed connection (Joe Conway) @@ -798,7 +1168,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Update time zone data files to tzdata release 2017b + Update time zone data files to tzdata release 2017b for DST law changes in Chile, Haiti, and Mongolia, plus historical corrections for Ecuador, Kazakhstan, Liberia, and Spain. Switch to numeric abbreviations for numerous time zones in South @@ -812,9 +1182,9 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; or no currency among the local population. They are in process of reversing that policy in favor of using numeric UTC offsets in zones where there is no evidence of real-world use of an English - abbreviation. At least for the time being, PostgreSQL + abbreviation. At least for the time being, PostgreSQL will continue to accept such removed abbreviations for timestamp input. - But they will not be shown in the pg_timezone_names + But they will not be shown in the pg_timezone_names view nor used for output. @@ -827,16 +1197,16 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; The Microsoft MSVC build scripts neglected to install - the posixrules file in the timezone directory tree. + the posixrules file in the timezone directory tree. This resulted in the timezone code falling back to its built-in rule about what DST behavior to assume for a POSIX-style time zone name. For historical reasons that still corresponds to the DST rules the USA was using before 2007 (i.e., change on first Sunday in April and last Sunday in October). With this fix, a POSIX-style zone name will use the current and historical DST transition dates of - the US/Eastern zone. If you don't want that, remove - the posixrules file, or replace it with a copy of some - other zone file (see ). Note that + the US/Eastern zone. If you don't want that, remove + the posixrules file, or replace it with a copy of some + other zone file (see ). Note that due to caching, you may need to restart the server to get such changes to take effect. @@ -858,7 +1228,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; This release contains a variety of fixes from 9.2.19. For information about new features in the 9.2 major release, see - . + . @@ -876,7 +1246,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Also, if you are upgrading from a version earlier than 9.2.11, - see . + see . @@ -889,15 +1259,15 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Fix a race condition that could cause indexes built - with CREATE INDEX CONCURRENTLY to be corrupt + with CREATE INDEX CONCURRENTLY to be corrupt (Pavan Deolasee, Tom Lane) - If CREATE INDEX CONCURRENTLY was used to build an index + If CREATE INDEX CONCURRENTLY was used to build an index that depends on a column not previously indexed, then rows updated by transactions that ran concurrently with - the CREATE INDEX command could have received incorrect + the CREATE INDEX command could have received incorrect index entries. If you suspect this may have happened, the most reliable solution is to rebuild affected indexes after installing this update. @@ -906,13 +1276,13 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Unconditionally WAL-log creation of the init fork for an + Unconditionally WAL-log creation of the init fork for an unlogged table (Michael Paquier) - Previously, this was skipped when - = minimal, but actually it's necessary even in that case + Previously, this was skipped when + = minimal, but actually it's necessary even in that case to ensure that the unlogged table is properly reset to empty after a crash. @@ -929,7 +1299,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - In corner cases, a spurious out-of-sequence TLI error + In corner cases, a spurious out-of-sequence TLI error could be reported during recovery. @@ -975,13 +1345,13 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Make sure ALTER TABLE preserves index tablespace + Make sure ALTER TABLE preserves index tablespace assignments when rebuilding indexes (Tom Lane, Michael Paquier) Previously, non-default settings - of could result in broken + of could result in broken indexes. @@ -993,15 +1363,15 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - This avoids could not find trigger NNN - or relation NNN has no triggers errors. + This avoids could not find trigger NNN + or relation NNN has no triggers errors. Fix processing of OID column when a table with OIDs is associated to - a parent with OIDs via ALTER TABLE ... INHERIT (Amit + a parent with OIDs via ALTER TABLE ... INHERIT (Amit Langote) @@ -1034,12 +1404,12 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Ensure that column typmods are determined accurately for - multi-row VALUES constructs (Tom Lane) + multi-row VALUES constructs (Tom Lane) This fixes problems occurring when the first value in a column has a - determinable typmod (e.g., length for a varchar value) but + determinable typmod (e.g., length for a varchar value) but later values don't share the same limit. @@ -1054,15 +1424,15 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Normally, a Unicode surrogate leading character must be followed by a Unicode surrogate trailing character, but the check for this was missed if the leading character was the last character in a Unicode - string literal (U&'...') or Unicode identifier - (U&"..."). + string literal (U&'...') or Unicode identifier + (U&"..."). Ensure that a purely negative text search query, such - as !foo, matches empty tsvectors (Tom Dunstan) + as !foo, matches empty tsvectors (Tom Dunstan) @@ -1073,33 +1443,33 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Prevent crash when ts_rewrite() replaces a non-top-level + Prevent crash when ts_rewrite() replaces a non-top-level subtree with an empty query (Artur Zakirov) - Fix performance problems in ts_rewrite() (Tom Lane) + Fix performance problems in ts_rewrite() (Tom Lane) - Fix ts_rewrite()'s handling of nested NOT operators + Fix ts_rewrite()'s handling of nested NOT operators (Tom Lane) - Fix array_fill() to handle empty arrays properly (Tom Lane) + Fix array_fill() to handle empty arrays properly (Tom Lane) - Fix one-byte buffer overrun in quote_literal_cstr() + Fix one-byte buffer overrun in quote_literal_cstr() (Heikki Linnakangas) @@ -1111,8 +1481,8 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Prevent multiple calls of pg_start_backup() - and pg_stop_backup() from running concurrently (Michael + Prevent multiple calls of pg_start_backup() + and pg_stop_backup() from running concurrently (Michael Paquier) @@ -1124,28 +1494,28 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Avoid discarding interval-to-interval casts + Avoid discarding interval-to-interval casts that aren't really no-ops (Tom Lane) In some cases, a cast that should result in zeroing out - low-order interval fields was mistakenly deemed to be a + low-order interval fields was mistakenly deemed to be a no-op and discarded. An example is that casting from INTERVAL - MONTH to INTERVAL YEAR failed to clear the months field. + MONTH to INTERVAL YEAR failed to clear the months field. - Fix pg_dump to dump user-defined casts and transforms + Fix pg_dump to dump user-defined casts and transforms that use built-in functions (Stephen Frost) - Fix possible pg_basebackup failure on standby + Fix possible pg_basebackup failure on standby server when including WAL files (Amit Kapila, Robert Haas) @@ -1164,21 +1534,21 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix PL/Tcl to support triggers on tables that have .tupno + Fix PL/Tcl to support triggers on tables that have .tupno as a column name (Tom Lane) This matches the (previously undocumented) behavior of - PL/Tcl's spi_exec and spi_execp commands, - namely that a magic .tupno column is inserted only if + PL/Tcl's spi_exec and spi_execp commands, + namely that a magic .tupno column is inserted only if there isn't a real column named that. - Allow DOS-style line endings in ~/.pgpass files, + Allow DOS-style line endings in ~/.pgpass files, even on Unix (Vik Fearing) @@ -1190,23 +1560,23 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix one-byte buffer overrun if ecpg is given a file + Fix one-byte buffer overrun if ecpg is given a file name that ends with a dot (Takayuki Tsunakawa) - Fix psql's tab completion for ALTER DEFAULT - PRIVILEGES (Gilles Darold, Stephen Frost) + Fix psql's tab completion for ALTER DEFAULT + PRIVILEGES (Gilles Darold, Stephen Frost) - In psql, treat an empty or all-blank setting of - the PAGER environment variable as meaning no - pager (Tom Lane) + In psql, treat an empty or all-blank setting of + the PAGER environment variable as meaning no + pager (Tom Lane) @@ -1217,8 +1587,8 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Improve contrib/dblink's reporting of - low-level libpq errors, such as out-of-memory + Improve contrib/dblink's reporting of + low-level libpq errors, such as out-of-memory (Joe Conway) @@ -1245,7 +1615,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Update time zone data files to tzdata release 2016j + Update time zone data files to tzdata release 2016j for DST law changes in northern Cyprus (adding a new zone Asia/Famagusta), Russia (adding a new zone Europe/Saratov), Tonga, and Antarctica/Casey. @@ -1270,7 +1640,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 This release contains a variety of fixes from 9.2.18. For information about new features in the 9.2 major release, see - . + . @@ -1282,7 +1652,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 However, if you are upgrading from a version earlier than 9.2.11, - see . + see . @@ -1320,71 +1690,71 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix EXPLAIN to emit valid XML when - is on (Markus Winand) + Fix EXPLAIN to emit valid XML when + is on (Markus Winand) Previously the XML output-format option produced syntactically invalid - tags such as <I/O-Read-Time>. That is now - rendered as <I-O-Read-Time>. + tags such as <I/O-Read-Time>. That is now + rendered as <I-O-Read-Time>. Suppress printing of zeroes for unmeasured times - in EXPLAIN (Maksim Milyutin) + in EXPLAIN (Maksim Milyutin) Certain option combinations resulted in printing zero values for times that actually aren't ever measured in that combination. Our general - policy in EXPLAIN is not to print such fields at all, so + policy in EXPLAIN is not to print such fields at all, so do that consistently in all cases. - Fix timeout length when VACUUM is waiting for exclusive + Fix timeout length when VACUUM is waiting for exclusive table lock so that it can truncate the table (Simon Riggs) The timeout was meant to be 50 milliseconds, but it was actually only - 50 microseconds, causing VACUUM to give up on truncation + 50 microseconds, causing VACUUM to give up on truncation much more easily than intended. Set it to the intended value. - Fix bugs in merging inherited CHECK constraints while + Fix bugs in merging inherited CHECK constraints while creating or altering a table (Tom Lane, Amit Langote) - Allow identical CHECK constraints to be added to a parent + Allow identical CHECK constraints to be added to a parent and child table in either order. Prevent merging of a valid - constraint from the parent table with a NOT VALID + constraint from the parent table with a NOT VALID constraint on the child. Likewise, prevent merging of a NO - INHERIT child constraint with an inherited constraint. + INHERIT child constraint with an inherited constraint. Remove artificial restrictions on the values accepted - by numeric_in() and numeric_recv() + by numeric_in() and numeric_recv() (Tom Lane) We allow numeric values up to the limit of the storage format (more - than 1e100000), so it seems fairly pointless - that numeric_in() rejected scientific-notation exponents - above 1000. Likewise, it was silly for numeric_recv() to + than 1e100000), so it seems fairly pointless + that numeric_in() rejected scientific-notation exponents + above 1000. Likewise, it was silly for numeric_recv() to reject more than 1000 digits in an input value. @@ -1406,7 +1776,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Disallow starting a standalone backend with standby_mode + Disallow starting a standalone backend with standby_mode turned on (Michael Paquier) @@ -1420,7 +1790,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Don't try to share SSL contexts across multiple connections - in libpq (Heikki Linnakangas) + in libpq (Heikki Linnakangas) @@ -1431,30 +1801,30 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Avoid corner-case memory leak in libpq (Tom Lane) + Avoid corner-case memory leak in libpq (Tom Lane) The reported problem involved leaking an error report - during PQreset(), but there might be related cases. + during PQreset(), but there might be related cases. - Make ecpg's and options work consistently with our other executables (Haribabu Kommi) - In pg_dump, never dump range constructor functions + In pg_dump, never dump range constructor functions (Tom Lane) - This oversight led to pg_upgrade failures with + This oversight led to pg_upgrade failures with extensions containing range types, due to duplicate creation of the constructor functions. @@ -1462,8 +1832,8 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix contrib/intarray/bench/bench.pl to print the results - of the EXPLAIN it does when given the option (Daniel Gustafsson) @@ -1484,17 +1854,17 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 If a dynamic time zone abbreviation does not match any entry in the referenced time zone, treat it as equivalent to the time zone name. This avoids unexpected failures when IANA removes abbreviations from - their time zone database, as they did in tzdata + their time zone database, as they did in tzdata release 2016f and seem likely to do again in the future. The consequences were not limited to not recognizing the individual abbreviation; any mismatch caused - the pg_timezone_abbrevs view to fail altogether. + the pg_timezone_abbrevs view to fail altogether. - Update time zone data files to tzdata release 2016h + Update time zone data files to tzdata release 2016h for DST law changes in Palestine and Turkey, plus historical corrections for Turkey and some regions of Russia. Switch to numeric abbreviations for some time zones in Antarctica, @@ -1507,15 +1877,15 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 or no currency among the local population. They are in process of reversing that policy in favor of using numeric UTC offsets in zones where there is no evidence of real-world use of an English - abbreviation. At least for the time being, PostgreSQL + abbreviation. At least for the time being, PostgreSQL will continue to accept such removed abbreviations for timestamp input. - But they will not be shown in the pg_timezone_names + But they will not be shown in the pg_timezone_names view nor used for output. - In this update, AMT is no longer shown as being in use to - mean Armenia Time. Therefore, we have changed the Default + In this update, AMT is no longer shown as being in use to + mean Armenia Time. Therefore, we have changed the Default abbreviation set to interpret it as Amazon Time, thus UTC-4 not UTC+4. @@ -1536,7 +1906,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 This release contains a variety of fixes from 9.2.17. For information about new features in the 9.2 major release, see - . + . @@ -1548,7 +1918,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 However, if you are upgrading from a version earlier than 9.2.11, - see . + see . @@ -1561,17 +1931,17 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Fix possible mis-evaluation of - nested CASE-WHEN expressions (Heikki + nested CASE-WHEN expressions (Heikki Linnakangas, Michael Paquier, Tom Lane) - A CASE expression appearing within the test value - subexpression of another CASE could become confused about + A CASE expression appearing within the test value + subexpression of another CASE could become confused about whether its own test value was null or not. Also, inlining of a SQL function implementing the equality operator used by - a CASE expression could result in passing the wrong test - value to functions called within a CASE expression in the + a CASE expression could result in passing the wrong test + value to functions called within a CASE expression in the SQL function's body. If the test values were of different data types, a crash might result; moreover such situations could be abused to allow disclosure of portions of server memory. (CVE-2016-5423) @@ -1585,7 +1955,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Numerous places in vacuumdb and other client programs + Numerous places in vacuumdb and other client programs could become confused by database and role names containing double quotes or backslashes. Tighten up quoting rules to make that safe. Also, ensure that when a conninfo string is used as a database name @@ -1594,22 +1964,22 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Fix handling of paired double quotes - in psql's \connect - and \password commands to match the documentation. + in psql's \connect + and \password commands to match the documentation. - Introduce a new - pg_dumpall now refuses to deal with database and role + pg_dumpall now refuses to deal with database and role names containing carriage returns or newlines, as it seems impractical to quote those characters safely on Windows. In future we may reject such names on the server side, but that step has not been taken yet. @@ -1619,40 +1989,40 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 These are considered security fixes because crafted object names containing special characters could have been used to execute commands with superuser privileges the next time a superuser - executes pg_dumpall or other routine maintenance + executes pg_dumpall or other routine maintenance operations. (CVE-2016-5424) - Fix corner-case misbehaviors for IS NULL/IS NOT - NULL applied to nested composite values (Andrew Gierth, Tom Lane) + Fix corner-case misbehaviors for IS NULL/IS NOT + NULL applied to nested composite values (Andrew Gierth, Tom Lane) - The SQL standard specifies that IS NULL should return + The SQL standard specifies that IS NULL should return TRUE for a row of all null values (thus ROW(NULL,NULL) IS - NULL yields TRUE), but this is not meant to apply recursively - (thus ROW(NULL, ROW(NULL,NULL)) IS NULL yields FALSE). + NULL yields TRUE), but this is not meant to apply recursively + (thus ROW(NULL, ROW(NULL,NULL)) IS NULL yields FALSE). The core executor got this right, but certain planner optimizations treated the test as recursive (thus producing TRUE in both cases), - and contrib/postgres_fdw could produce remote queries + and contrib/postgres_fdw could produce remote queries that misbehaved similarly. - Make the inet and cidr data types properly reject + Make the inet and cidr data types properly reject IPv6 addresses with too many colon-separated fields (Tom Lane) - Prevent crash in close_ps() - (the point ## lseg operator) + Prevent crash in close_ps() + (the point ## lseg operator) for NaN input coordinates (Tom Lane) @@ -1663,12 +2033,12 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix several one-byte buffer over-reads in to_number() + Fix several one-byte buffer over-reads in to_number() (Peter Eisentraut) - In several cases the to_number() function would read one + In several cases the to_number() function would read one more character than it should from the input string. There is a small chance of a crash, if the input happens to be adjacent to the end of memory. @@ -1678,7 +2048,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Avoid unsafe intermediate state during expensive paths - through heap_update() (Masahiko Sawada, Andres Freund) + through heap_update() (Masahiko Sawada, Andres Freund) @@ -1691,19 +2061,19 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Avoid crash in postgres -C when the specified variable + Avoid crash in postgres -C when the specified variable has a null string value (Michael Paquier) - Avoid consuming a transaction ID during VACUUM + Avoid consuming a transaction ID during VACUUM (Alexander Korotkov) - Some cases in VACUUM unnecessarily caused an XID to be + Some cases in VACUUM unnecessarily caused an XID to be assigned to the current transaction. Normally this is negligible, but if one is up against the XID wraparound limit, consuming more XIDs during anti-wraparound vacuums is a very bad thing. @@ -1712,12 +2082,12 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Avoid canceling hot-standby queries during VACUUM FREEZE + Avoid canceling hot-standby queries during VACUUM FREEZE (Simon Riggs, Álvaro Herrera) - VACUUM FREEZE on an otherwise-idle master server could + VACUUM FREEZE on an otherwise-idle master server could result in unnecessary cancellations of queries on its standby servers. @@ -1725,8 +2095,8 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - When a manual ANALYZE specifies a column list, don't - reset the table's changes_since_analyze counter + When a manual ANALYZE specifies a column list, don't + reset the table's changes_since_analyze counter (Tom Lane) @@ -1738,7 +2108,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix ANALYZE's overestimation of n_distinct + Fix ANALYZE's overestimation of n_distinct for a unique or nearly-unique column with many null entries (Tom Lane) @@ -1773,8 +2143,8 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix contrib/btree_gin to handle the smallest - possible bigint value correctly (Peter Eisentraut) + Fix contrib/btree_gin to handle the smallest + possible bigint value correctly (Peter Eisentraut) @@ -1787,29 +2157,29 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 It's planned to switch to two-part instead of three-part server version numbers for releases after 9.6. Make sure - that PQserverVersion() returns the correct value for + that PQserverVersion() returns the correct value for such cases. - Fix ecpg's code for unsigned long long + Fix ecpg's code for unsigned long long array elements (Michael Meskes) - In pg_dump with both - Make pg_basebackup accept -Z 0 as + Make pg_basebackup accept -Z 0 as specifying no compression (Fujii Masao) @@ -1843,7 +2213,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Update our copy of the timezone code to match - IANA's tzcode release 2016c (Tom Lane) + IANA's tzcode release 2016c (Tom Lane) @@ -1855,7 +2225,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Update time zone data files to tzdata release 2016f + Update time zone data files to tzdata release 2016f for DST law changes in Kemerovo and Novosibirsk, plus historical corrections for Azerbaijan, Belarus, and Morocco. @@ -1877,7 +2247,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 This release contains a variety of fixes from 9.2.16. For information about new features in the 9.2 major release, see - . + . @@ -1889,7 +2259,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 However, if you are upgrading from a version earlier than 9.2.11, - see . + see . @@ -1911,7 +2281,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 using OpenSSL within a single process and not all the code involved follows the same rules for when to clear the error queue. Failures have been reported specifically when a client application - uses SSL connections in libpq concurrently with + uses SSL connections in libpq concurrently with SSL connections using the PHP, Python, or Ruby wrappers for OpenSSL. It's possible for similar problems to arise within the server as well, if an extension module establishes an outgoing SSL connection. @@ -1920,7 +2290,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix failed to build any N-way joins + Fix failed to build any N-way joins planner error with a full join enclosed in the right-hand side of a left join (Tom Lane) @@ -1934,10 +2304,10 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Given a three-or-more-way equivalence class of variables, such - as X.X = Y.Y = Z.Z, it was possible for the planner to omit + as X.X = Y.Y = Z.Z, it was possible for the planner to omit some of the tests needed to enforce that all the variables are actually equal, leading to join rows being output that didn't satisfy - the WHERE clauses. For various reasons, erroneous plans + the WHERE clauses. For various reasons, erroneous plans were seldom selected in practice, so that this bug has gone undetected for a long time. @@ -1945,8 +2315,8 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix possible misbehavior of TH, th, - and Y,YYY format codes in to_timestamp() + Fix possible misbehavior of TH, th, + and Y,YYY format codes in to_timestamp() (Tom Lane) @@ -1958,28 +2328,28 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix dumping of rules and views in which the array - argument of a value operator - ANY (array) construct is a sub-SELECT + Fix dumping of rules and views in which the array + argument of a value operator + ANY (array) construct is a sub-SELECT (Tom Lane) - Make pg_regress use a startup timeout from the - PGCTLTIMEOUT environment variable, if that's set (Tom Lane) + Make pg_regress use a startup timeout from the + PGCTLTIMEOUT environment variable, if that's set (Tom Lane) This is for consistency with a behavior recently added - to pg_ctl; it eases automated testing on slow machines. + to pg_ctl; it eases automated testing on slow machines. - Fix pg_upgrade to correctly restore extension + Fix pg_upgrade to correctly restore extension membership for operator families containing only one operator class (Tom Lane) @@ -1987,7 +2357,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 In such a case, the operator family was restored into the new database, but it was no longer marked as part of the extension. This had no - immediate ill effects, but would cause later pg_dump + immediate ill effects, but would cause later pg_dump runs to emit output that would cause (harmless) errors on restore. @@ -2008,22 +2378,22 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Reduce the number of SysV semaphores used by a build configured with - (Tom Lane) - Rename internal function strtoi() - to strtoint() to avoid conflict with a NetBSD library + Rename internal function strtoi() + to strtoint() to avoid conflict with a NetBSD library function (Thomas Munro) - Fix reporting of errors from bind() - and listen() system calls on Windows (Tom Lane) + Fix reporting of errors from bind() + and listen() system calls on Windows (Tom Lane) @@ -2036,12 +2406,12 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Avoid possibly-unsafe use of Windows' FormatMessage() + Avoid possibly-unsafe use of Windows' FormatMessage() function (Christian Ullrich) - Use the FORMAT_MESSAGE_IGNORE_INSERTS flag where + Use the FORMAT_MESSAGE_IGNORE_INSERTS flag where appropriate. No live bug is known to exist here, but it seems like a good idea to be careful. @@ -2049,9 +2419,9 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Update time zone data files to tzdata release 2016d + Update time zone data files to tzdata release 2016d for DST law changes in Russia and Venezuela. There are new zone - names Europe/Kirov and Asia/Tomsk to reflect + names Europe/Kirov and Asia/Tomsk to reflect the fact that these regions now have different time zone histories from adjacent regions. @@ -2073,7 +2443,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 This release contains a variety of fixes from 9.2.15. For information about new features in the 9.2 major release, see - . + . @@ -2085,7 +2455,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 However, if you are upgrading from a version earlier than 9.2.11, - see . + see . @@ -2098,56 +2468,56 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Fix incorrect handling of NULL index entries in - indexed ROW() comparisons (Tom Lane) + indexed ROW() comparisons (Tom Lane) An index search using a row comparison such as ROW(a, b) > - ROW('x', 'y') would stop upon reaching a NULL entry in - the b column, ignoring the fact that there might be - non-NULL b values associated with later values - of a. + ROW('x', 'y') would stop upon reaching a NULL entry in + the b column, ignoring the fact that there might be + non-NULL b values associated with later values + of a. Avoid unlikely data-loss scenarios due to renaming files without - adequate fsync() calls before and after (Michael Paquier, + adequate fsync() calls before and after (Michael Paquier, Tomas Vondra, Andres Freund) - Correctly handle cases where pg_subtrans is close to XID + Correctly handle cases where pg_subtrans is close to XID wraparound during server startup (Jeff Janes) - Fix corner-case crash due to trying to free localeconv() + Fix corner-case crash due to trying to free localeconv() output strings more than once (Tom Lane) - Fix parsing of affix files for ispell dictionaries + Fix parsing of affix files for ispell dictionaries (Tom Lane) The code could go wrong if the affix file contained any characters whose byte length changes during case-folding, for - example I in Turkish UTF8 locales. + example I in Turkish UTF8 locales. - Avoid use of sscanf() to parse ispell + Avoid use of sscanf() to parse ispell dictionary files (Artur Zakirov) @@ -2173,27 +2543,27 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix psql's tab completion logic to handle multibyte + Fix psql's tab completion logic to handle multibyte characters properly (Kyotaro Horiguchi, Robert Haas) - Fix psql's tab completion for - SECURITY LABEL (Tom Lane) + Fix psql's tab completion for + SECURITY LABEL (Tom Lane) - Pressing TAB after SECURITY LABEL might cause a crash + Pressing TAB after SECURITY LABEL might cause a crash or offering of inappropriate keywords. - Make pg_ctl accept a wait timeout from the - PGCTLTIMEOUT environment variable, if none is specified on + Make pg_ctl accept a wait timeout from the + PGCTLTIMEOUT environment variable, if none is specified on the command line (Noah Misch) @@ -2207,20 +2577,20 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Fix incorrect test for Windows service status - in pg_ctl (Manuel Mathar) + in pg_ctl (Manuel Mathar) The previous set of minor releases attempted to - fix pg_ctl to properly determine whether to send log + fix pg_ctl to properly determine whether to send log messages to Window's Event Log, but got the test backwards. - Fix pgbench to correctly handle the combination - of -C and -M prepared options (Tom Lane) + Fix pgbench to correctly handle the combination + of -C and -M prepared options (Tom Lane) @@ -2241,21 +2611,21 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Fix multiple mistakes in the statistics returned - by contrib/pgstattuple's pgstatindex() + by contrib/pgstattuple's pgstatindex() function (Tom Lane) - Remove dependency on psed in MSVC builds, since it's no + Remove dependency on psed in MSVC builds, since it's no longer provided by core Perl (Michael Paquier, Andrew Dunstan) - Update time zone data files to tzdata release 2016c + Update time zone data files to tzdata release 2016c for DST law changes in Azerbaijan, Chile, Haiti, Palestine, and Russia (Altai, Astrakhan, Kirov, Sakhalin, Ulyanovsk regions), plus historical corrections for Lithuania, Moldova, and Russia @@ -2279,7 +2649,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 This release contains a variety of fixes from 9.2.14. For information about new features in the 9.2 major release, see - . + . @@ -2291,7 +2661,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 However, if you are upgrading from a version earlier than 9.2.11, - see . + see . @@ -2316,25 +2686,25 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Perform an immediate shutdown if the postmaster.pid file + Perform an immediate shutdown if the postmaster.pid file is removed (Tom Lane) The postmaster now checks every minute or so - that postmaster.pid is still there and still contains its + that postmaster.pid is still there and still contains its own PID. If not, it performs an immediate shutdown, as though it had - received SIGQUIT. The main motivation for this change + received SIGQUIT. The main motivation for this change is to ensure that failed buildfarm runs will get cleaned up without manual intervention; but it also serves to limit the bad effects if a - DBA forcibly removes postmaster.pid and then starts a new + DBA forcibly removes postmaster.pid and then starts a new postmaster. - In SERIALIZABLE transaction isolation mode, serialization + In SERIALIZABLE transaction isolation mode, serialization anomalies could be missed due to race conditions during insertions (Kevin Grittner, Thomas Munro) @@ -2343,7 +2713,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Fix failure to emit appropriate WAL records when doing ALTER - TABLE ... SET TABLESPACE for unlogged relations (Michael Paquier, + TABLE ... SET TABLESPACE for unlogged relations (Michael Paquier, Andres Freund) @@ -2362,21 +2732,21 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix ALTER COLUMN TYPE to reconstruct inherited check + Fix ALTER COLUMN TYPE to reconstruct inherited check constraints properly (Tom Lane) - Fix REASSIGN OWNED to change ownership of composite types + Fix REASSIGN OWNED to change ownership of composite types properly (Álvaro Herrera) - Fix REASSIGN OWNED and ALTER OWNER to correctly + Fix REASSIGN OWNED and ALTER OWNER to correctly update granted-permissions lists when changing owners of data types, foreign data wrappers, or foreign servers (Bruce Momjian, Álvaro Herrera) @@ -2385,7 +2755,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix REASSIGN OWNED to ignore foreign user mappings, + Fix REASSIGN OWNED to ignore foreign user mappings, rather than fail (Álvaro Herrera) @@ -2407,14 +2777,14 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix dumping of whole-row Vars in ROW() - and VALUES() lists (Tom Lane) + Fix dumping of whole-row Vars in ROW() + and VALUES() lists (Tom Lane) - Fix possible internal overflow in numeric division + Fix possible internal overflow in numeric division (Dean Rasheed) @@ -2466,7 +2836,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 This causes the code to emit regular expression is too - complex errors in some cases that previously used unreasonable + complex errors in some cases that previously used unreasonable amounts of time and memory. @@ -2479,14 +2849,14 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Make %h and %r escapes - in log_line_prefix work for messages emitted due - to log_connections (Tom Lane) + Make %h and %r escapes + in log_line_prefix work for messages emitted due + to log_connections (Tom Lane) - Previously, %h/%r started to work just after a - new session had emitted the connection received log message; + Previously, %h/%r started to work just after a + new session had emitted the connection received log message; now they work for that message too. @@ -2499,7 +2869,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 This oversight resulted in failure to recover from crashes - whenever logging_collector is turned on. + whenever logging_collector is turned on. @@ -2525,13 +2895,13 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - In psql, ensure that libreadline's idea + In psql, ensure that libreadline's idea of the screen size is updated when the terminal window size changes (Merlin Moncure) - Previously, libreadline did not notice if the window + Previously, libreadline did not notice if the window was resized during query output, leading to strange behavior during later input of multiline queries. @@ -2539,15 +2909,15 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix psql's \det command to interpret its - pattern argument the same way as other \d commands with + Fix psql's \det command to interpret its + pattern argument the same way as other \d commands with potentially schema-qualified patterns do (Reece Hart) - Avoid possible crash in psql's \c command + Avoid possible crash in psql's \c command when previous connection was via Unix socket and command specifies a new hostname and same username (Tom Lane) @@ -2555,21 +2925,21 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - In pg_ctl start -w, test child process status directly + In pg_ctl start -w, test child process status directly rather than relying on heuristics (Tom Lane, Michael Paquier) - Previously, pg_ctl relied on an assumption that the new - postmaster would always create postmaster.pid within five + Previously, pg_ctl relied on an assumption that the new + postmaster would always create postmaster.pid within five seconds. But that can fail on heavily-loaded systems, - causing pg_ctl to report incorrectly that the + causing pg_ctl to report incorrectly that the postmaster failed to start. Except on Windows, this change also means that a pg_ctl start - -w done immediately after another such command will now reliably + -w done immediately after another such command will now reliably fail, whereas previously it would report success if done within two seconds of the first command. @@ -2577,23 +2947,23 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - In pg_ctl start -w, don't attempt to use a wildcard listen + In pg_ctl start -w, don't attempt to use a wildcard listen address to connect to the postmaster (Kondo Yuta) - On Windows, pg_ctl would fail to detect postmaster - startup if listen_addresses is set to 0.0.0.0 - or ::, because it would try to use that value verbatim as + On Windows, pg_ctl would fail to detect postmaster + startup if listen_addresses is set to 0.0.0.0 + or ::, because it would try to use that value verbatim as the address to connect to, which doesn't work. Instead assume - that 127.0.0.1 or ::1, respectively, is the + that 127.0.0.1 or ::1, respectively, is the right thing to use. - In pg_ctl on Windows, check service status to decide + In pg_ctl on Windows, check service status to decide where to send output, rather than checking if standard output is a terminal (Michael Paquier) @@ -2601,18 +2971,18 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - In pg_dump and pg_basebackup, adopt + In pg_dump and pg_basebackup, adopt the GNU convention for handling tar-archive members exceeding 8GB (Tom Lane) - The POSIX standard for tar file format does not allow + The POSIX standard for tar file format does not allow archive member files to exceed 8GB, but most modern implementations - of tar support an extension that fixes that. Adopt - this extension so that pg_dump with no longer fails on tables with more than 8GB of data, and so - that pg_basebackup can handle files larger than 8GB. + that pg_basebackup can handle files larger than 8GB. In addition, fix some portability issues that could cause failures for members between 4GB and 8GB on some platforms. Potentially these problems could cause unrecoverable data loss due to unreadable backup @@ -2622,44 +2992,44 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix assorted corner-case bugs in pg_dump's processing + Fix assorted corner-case bugs in pg_dump's processing of extension member objects (Tom Lane) - Make pg_dump mark a view's triggers as needing to be + Make pg_dump mark a view's triggers as needing to be processed after its rule, to prevent possible failure during - parallel pg_restore (Tom Lane) + parallel pg_restore (Tom Lane) Ensure that relation option values are properly quoted - in pg_dump (Kouhei Sutou, Tom Lane) + in pg_dump (Kouhei Sutou, Tom Lane) A reloption value that isn't a simple identifier or number could lead to dump/reload failures due to syntax errors in CREATE statements - issued by pg_dump. This is not an issue with any - reloption currently supported by core PostgreSQL, but + issued by pg_dump. This is not an issue with any + reloption currently supported by core PostgreSQL, but extensions could allow reloptions that cause the problem. - Fix pg_upgrade's file-copying code to handle errors + Fix pg_upgrade's file-copying code to handle errors properly on Windows (Bruce Momjian) - Install guards in pgbench against corner-case overflow + Install guards in pgbench against corner-case overflow conditions during evaluation of script-specified division or modulo operators (Fabien Coelho, Michael Paquier) @@ -2668,22 +3038,22 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Fix failure to localize messages emitted - by pg_receivexlog and pg_recvlogical + by pg_receivexlog and pg_recvlogical (Ioseph Kim) - Avoid dump/reload problems when using both plpython2 - and plpython3 (Tom Lane) + Avoid dump/reload problems when using both plpython2 + and plpython3 (Tom Lane) - In principle, both versions of PL/Python can be used in + In principle, both versions of PL/Python can be used in the same database, though not in the same session (because the two - versions of libpython cannot safely be used concurrently). - However, pg_restore and pg_upgrade both + versions of libpython cannot safely be used concurrently). + However, pg_restore and pg_upgrade both do things that can fall foul of the same-session restriction. Work around that by changing the timing of the check. @@ -2691,29 +3061,29 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix PL/Python regression tests to pass with Python 3.5 + Fix PL/Python regression tests to pass with Python 3.5 (Peter Eisentraut) - Prevent certain PL/Java parameters from being set by + Prevent certain PL/Java parameters from being set by non-superusers (Noah Misch) - This change mitigates a PL/Java security bug - (CVE-2016-0766), which was fixed in PL/Java by marking + This change mitigates a PL/Java security bug + (CVE-2016-0766), which was fixed in PL/Java by marking these parameters as superuser-only. To fix the security hazard for - sites that update PostgreSQL more frequently - than PL/Java, make the core code aware of them also. + sites that update PostgreSQL more frequently + than PL/Java, make the core code aware of them also. - Improve libpq's handling of out-of-memory situations + Improve libpq's handling of out-of-memory situations (Michael Paquier, Amit Kapila, Heikki Linnakangas) @@ -2721,42 +3091,42 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Fix order of arguments - in ecpg-generated typedef statements + in ecpg-generated typedef statements (Michael Meskes) - Use %g not %f format - in ecpg's PGTYPESnumeric_from_double() + Use %g not %f format + in ecpg's PGTYPESnumeric_from_double() (Tom Lane) - Fix ecpg-supplied header files to not contain comments + Fix ecpg-supplied header files to not contain comments continued from a preprocessor directive line onto the next line (Michael Meskes) - Such a comment is rejected by ecpg. It's not yet clear - whether ecpg itself should be changed. + Such a comment is rejected by ecpg. It's not yet clear + whether ecpg itself should be changed. - Ensure that contrib/pgcrypto's crypt() + Ensure that contrib/pgcrypto's crypt() function can be interrupted by query cancel (Andreas Karlsson) - Accept flex versions later than 2.5.x + Accept flex versions later than 2.5.x (Tom Lane, Michael Paquier) @@ -2768,19 +3138,19 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Install our missing script where PGXS builds can find it + Install our missing script where PGXS builds can find it (Jim Nasby) This allows sane behavior in a PGXS build done on a machine where build - tools such as bison are missing. + tools such as bison are missing. - Ensure that dynloader.h is included in the installed + Ensure that dynloader.h is included in the installed header files in MSVC builds (Bruce Momjian, Michael Paquier) @@ -2788,11 +3158,11 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 Add variant regression test expected-output file to match behavior of - current libxml2 (Tom Lane) + current libxml2 (Tom Lane) - The fix for libxml2's CVE-2015-7499 causes it not to + The fix for libxml2's CVE-2015-7499 causes it not to output error context reports in some cases where it used to do so. This seems to be a bug, but we'll probably have to live with it for some time, so work around it. @@ -2801,7 +3171,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Update time zone data files to tzdata release 2016a for + Update time zone data files to tzdata release 2016a for DST law changes in Cayman Islands, Metlakatla, and Trans-Baikal Territory (Zabaykalsky Krai), plus historical corrections for Pakistan. @@ -2823,7 +3193,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 This release contains a variety of fixes from 9.2.13. For information about new features in the 9.2 major release, see - . + . @@ -2835,7 +3205,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 However, if you are upgrading from a version earlier than 9.2.11, - see . + see . @@ -2847,8 +3217,8 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix contrib/pgcrypto to detect and report - too-short crypt() salts (Josh Kupershmidt) + Fix contrib/pgcrypto to detect and report + too-short crypt() salts (Josh Kupershmidt) @@ -2874,13 +3244,13 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Fix insertion of relations into the relation cache init file + Fix insertion of relations into the relation cache init file (Tom Lane) An oversight in a patch in the most recent minor releases - caused pg_trigger_tgrelid_tgname_index to be omitted + caused pg_trigger_tgrelid_tgname_index to be omitted from the init file. Subsequent sessions detected this, then deemed the init file to be broken and silently ignored it, resulting in a significant degradation in session startup time. In addition to fixing @@ -2898,7 +3268,7 @@ Branch: REL9_2_STABLE [38bec1805] 2017-01-25 07:02:25 +0900 - Improve LISTEN startup time when there are many unread + Improve LISTEN startup time when there are many unread notifications (Matt Newell) @@ -2916,7 +3286,7 @@ Branch: REL9_1_STABLE [9b1b9446f] 2015-08-27 12:22:10 -0400 - This substantially improves performance when pg_dump + This substantially improves performance when pg_dump tries to dump a large number of tables. @@ -2931,13 +3301,13 @@ Branch: REL9_1_STABLE [9b1b9446f] 2015-08-27 12:22:10 -0400 too many bugs in practice, both in the underlying OpenSSL library and in our usage of it. Renegotiation will be removed entirely in 9.5 and later. In the older branches, just change the default value - of ssl_renegotiation_limit to zero (disabled). + of ssl_renegotiation_limit to zero (disabled). - Lower the minimum values of the *_freeze_max_age parameters + Lower the minimum values of the *_freeze_max_age parameters (Andres Freund) @@ -2949,14 +3319,14 @@ Branch: REL9_1_STABLE [9b1b9446f] 2015-08-27 12:22:10 -0400 - Limit the maximum value of wal_buffers to 2GB to avoid + Limit the maximum value of wal_buffers to 2GB to avoid server crashes (Josh Berkus) - Fix rare internal overflow in multiplication of numeric values + Fix rare internal overflow in multiplication of numeric values (Dean Rasheed) @@ -2964,21 +3334,21 @@ Branch: REL9_1_STABLE [9b1b9446f] 2015-08-27 12:22:10 -0400 Guard against hard-to-reach stack overflows involving record types, - range types, json, jsonb, tsquery, - ltxtquery and query_int (Noah Misch) + range types, json, jsonb, tsquery, + ltxtquery and query_int (Noah Misch) - Fix handling of DOW and DOY in datetime input + Fix handling of DOW and DOY in datetime input (Greg Stark) These tokens aren't meant to be used in datetime values, but previously they resulted in opaque internal error messages rather - than invalid input syntax. + than invalid input syntax. @@ -2991,7 +3361,7 @@ Branch: REL9_1_STABLE [9b1b9446f] 2015-08-27 12:22:10 -0400 Add recursion depth protections to regular expression, SIMILAR - TO, and LIKE matching (Tom Lane) + TO, and LIKE matching (Tom Lane) @@ -3043,22 +3413,22 @@ Branch: REL9_1_STABLE [9b1b9446f] 2015-08-27 12:22:10 -0400 - Fix unexpected out-of-memory situation during sort errors - when using tuplestores with small work_mem settings (Tom + Fix unexpected out-of-memory situation during sort errors + when using tuplestores with small work_mem settings (Tom Lane) - Fix very-low-probability stack overrun in qsort (Tom Lane) + Fix very-low-probability stack overrun in qsort (Tom Lane) - Fix invalid memory alloc request size failure in hash joins - with large work_mem settings (Tomas Vondra, Tom Lane) + Fix invalid memory alloc request size failure in hash joins + with large work_mem settings (Tomas Vondra, Tom Lane) @@ -3071,9 +3441,9 @@ Branch: REL9_1_STABLE [9b1b9446f] 2015-08-27 12:22:10 -0400 These mistakes could lead to incorrect query plans that would give wrong answers, or to assertion failures in assert-enabled builds, or to odd planner errors such as could not devise a query plan for the - given query, could not find pathkey item to - sort, plan should not reference subplan's variable, - or failed to assign all NestLoopParams to plan nodes. + given query, could not find pathkey item to + sort, plan should not reference subplan's variable, + or failed to assign all NestLoopParams to plan nodes. Thanks are due to Andreas Seltenreich and Piotr Stefaniak for fuzz testing that exposed these problems. @@ -3081,7 +3451,7 @@ Branch: REL9_1_STABLE [9b1b9446f] 2015-08-27 12:22:10 -0400 - Improve planner's performance for UPDATE/DELETE + Improve planner's performance for UPDATE/DELETE on large inheritance sets (Tom Lane, Dean Rasheed) @@ -3102,12 +3472,12 @@ Branch: REL9_1_STABLE [9b1b9446f] 2015-08-27 12:22:10 -0400 During postmaster shutdown, ensure that per-socket lock files are removed and listen sockets are closed before we remove - the postmaster.pid file (Tom Lane) + the postmaster.pid file (Tom Lane) This avoids race-condition failures if an external script attempts to - start a new postmaster as soon as pg_ctl stop returns. + start a new postmaster as soon as pg_ctl stop returns. @@ -3127,7 +3497,7 @@ Branch: REL9_1_STABLE [9b1b9446f] 2015-08-27 12:22:10 -0400 - Do not print a WARNING when an autovacuum worker is already + Do not print a WARNING when an autovacuum worker is already gone when we attempt to signal it, and reduce log verbosity for such signals (Tom Lane) @@ -3164,7 +3534,7 @@ Branch: REL9_1_STABLE [9b1b9446f] 2015-08-27 12:22:10 -0400 - VACUUM attempted to recycle such pages, but did so in a + VACUUM attempted to recycle such pages, but did so in a way that wasn't crash-safe. @@ -3172,44 +3542,44 @@ Branch: REL9_1_STABLE [9b1b9446f] 2015-08-27 12:22:10 -0400 Fix off-by-one error that led to otherwise-harmless warnings - about apparent wraparound in subtrans/multixact truncation + about apparent wraparound in subtrans/multixact truncation (Thomas Munro) - Fix misreporting of CONTINUE and MOVE statement - types in PL/pgSQL's error context messages + Fix misreporting of CONTINUE and MOVE statement + types in PL/pgSQL's error context messages (Pavel Stehule, Tom Lane) - Fix PL/Perl to handle non-ASCII error + Fix PL/Perl to handle non-ASCII error message texts correctly (Alex Hunsaker) - Fix PL/Python crash when returning the string - representation of a record result (Tom Lane) + Fix PL/Python crash when returning the string + representation of a record result (Tom Lane) - Fix some places in PL/Tcl that neglected to check for - failure of malloc() calls (Michael Paquier, Álvaro + Fix some places in PL/Tcl that neglected to check for + failure of malloc() calls (Michael Paquier, Álvaro Herrera) - In contrib/isn, fix output of ISBN-13 numbers that begin + In contrib/isn, fix output of ISBN-13 numbers that begin with 979 (Fabien Coelho) @@ -3226,14 +3596,14 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 - Fix contrib/sepgsql's handling of SELECT INTO + Fix contrib/sepgsql's handling of SELECT INTO statements (Kohei KaiGai) - Improve libpq's handling of out-of-memory conditions + Improve libpq's handling of out-of-memory conditions (Michael Paquier, Heikki Linnakangas) @@ -3241,64 +3611,64 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 Fix memory leaks and missing out-of-memory checks - in ecpg (Michael Paquier) + in ecpg (Michael Paquier) - Fix psql's code for locale-aware formatting of numeric + Fix psql's code for locale-aware formatting of numeric output (Tom Lane) - The formatting code invoked by \pset numericlocale on + The formatting code invoked by \pset numericlocale on did the wrong thing for some uncommon cases such as numbers with an exponent but no decimal point. It could also mangle already-localized - output from the money data type. + output from the money data type. - Prevent crash in psql's \c command when + Prevent crash in psql's \c command when there is no current connection (Noah Misch) - Make pg_dump handle inherited NOT VALID + Make pg_dump handle inherited NOT VALID check constraints correctly (Tom Lane) - Fix selection of default zlib compression level - in pg_dump's directory output format (Andrew Dunstan) + Fix selection of default zlib compression level + in pg_dump's directory output format (Andrew Dunstan) - Ensure that temporary files created during a pg_dump - run with tar-format output are not world-readable (Michael + Ensure that temporary files created during a pg_dump + run with tar-format output are not world-readable (Michael Paquier) - Fix pg_dump and pg_upgrade to support - cases where the postgres or template1 database + Fix pg_dump and pg_upgrade to support + cases where the postgres or template1 database is in a non-default tablespace (Marti Raudsepp, Bruce Momjian) - Fix pg_dump to handle object privileges sanely when + Fix pg_dump to handle object privileges sanely when dumping from a server too old to have a particular privilege type (Tom Lane) @@ -3306,11 +3676,11 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 When dumping data types from pre-9.2 servers, and when dumping functions or procedural languages from pre-7.3 - servers, pg_dump would - produce GRANT/REVOKE commands that revoked the + servers, pg_dump would + produce GRANT/REVOKE commands that revoked the owner's grantable privileges and instead granted all privileges - to PUBLIC. Since the privileges involved are - just USAGE and EXECUTE, this isn't a security + to PUBLIC. Since the privileges involved are + just USAGE and EXECUTE, this isn't a security problem, but it's certainly a surprising representation of the older systems' behavior. Fix it to leave the default privilege state alone in these cases. @@ -3319,18 +3689,18 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 - Fix pg_dump to dump shell types (Tom Lane) + Fix pg_dump to dump shell types (Tom Lane) Shell types (that is, not-yet-fully-defined types) aren't useful for - much, but nonetheless pg_dump should dump them. + much, but nonetheless pg_dump should dump them. - Fix assorted minor memory leaks in pg_dump and other + Fix assorted minor memory leaks in pg_dump and other client-side programs (Michael Paquier) @@ -3338,11 +3708,11 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 Fix spinlock assembly code for PPC hardware to be compatible - with AIX's native assembler (Tom Lane) + with AIX's native assembler (Tom Lane) - Building with gcc didn't work if gcc + Building with gcc didn't work if gcc had been configured to use the native assembler, which is becoming more common. @@ -3350,14 +3720,14 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 - On AIX, test the -qlonglong compiler option + On AIX, test the -qlonglong compiler option rather than just assuming it's safe to use (Noah Misch) - On AIX, use -Wl,-brtllib link option to allow + On AIX, use -Wl,-brtllib link option to allow symbols to be resolved at runtime (Noah Misch) @@ -3369,38 +3739,38 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 Avoid use of inline functions when compiling with - 32-bit xlc, due to compiler bugs (Noah Misch) + 32-bit xlc, due to compiler bugs (Noah Misch) - Use librt for sched_yield() when necessary, + Use librt for sched_yield() when necessary, which it is on some Solaris versions (Oskari Saarenmaa) - Fix Windows install.bat script to handle target directory + Fix Windows install.bat script to handle target directory names that contain spaces (Heikki Linnakangas) - Make the numeric form of the PostgreSQL version number - (e.g., 90405) readily available to extension Makefiles, - as a variable named VERSION_NUM (Michael Paquier) + Make the numeric form of the PostgreSQL version number + (e.g., 90405) readily available to extension Makefiles, + as a variable named VERSION_NUM (Michael Paquier) - Update time zone data files to tzdata release 2015g for + Update time zone data files to tzdata release 2015g for DST law changes in Cayman Islands, Fiji, Moldova, Morocco, Norfolk Island, North Korea, Turkey, and Uruguay. There is a new zone name - America/Fort_Nelson for the Canadian Northern Rockies. + America/Fort_Nelson for the Canadian Northern Rockies. @@ -3420,7 +3790,7 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 This release contains a small number of fixes from 9.2.12. For information about new features in the 9.2 major release, see - . + . @@ -3432,7 +3802,7 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 However, if you are upgrading from a version earlier than 9.2.11, - see . + see . @@ -3449,7 +3819,7 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 With just the wrong timing of concurrent activity, a VACUUM - FULL on a system catalog might fail to update the init file + FULL on a system catalog might fail to update the init file that's used to avoid cache-loading work for new sessions. This would result in later sessions being unable to access that catalog at all. This is a very ancient bug, but it's so hard to trigger that no @@ -3460,13 +3830,13 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 Avoid deadlock between incoming sessions and CREATE/DROP - DATABASE (Tom Lane) + DATABASE (Tom Lane) A new session starting in a database that is the target of - a DROP DATABASE command, or is the template for - a CREATE DATABASE command, could cause the command to wait + a DROP DATABASE command, or is the template for + a CREATE DATABASE command, could cause the command to wait for five seconds and then fail, even if the new session would have exited before that. @@ -3488,7 +3858,7 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 This release contains a small number of fixes from 9.2.11. For information about new features in the 9.2 major release, see - . + . @@ -3500,7 +3870,7 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 However, if you are upgrading from a version earlier than 9.2.11, - see . + see . @@ -3512,12 +3882,12 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 - Avoid failures while fsync'ing data directory during + Avoid failures while fsync'ing data directory during crash restart (Abhijit Menon-Sen, Tom Lane) - In the previous minor releases we added a patch to fsync + In the previous minor releases we added a patch to fsync everything in the data directory after a crash. Unfortunately its response to any error condition was to fail, thereby preventing the server from starting up, even when the problem was quite harmless. @@ -3531,36 +3901,36 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 - Fix pg_get_functiondef() to show - functions' LEAKPROOF property, if set (Jeevan Chalke) + Fix pg_get_functiondef() to show + functions' LEAKPROOF property, if set (Jeevan Chalke) - Remove configure's check prohibiting linking to a - threaded libpython - on OpenBSD (Tom Lane) + Remove configure's check prohibiting linking to a + threaded libpython + on OpenBSD (Tom Lane) The failure this restriction was meant to prevent seems to not be a - problem anymore on current OpenBSD + problem anymore on current OpenBSD versions. - Allow libpq to use TLS protocol versions beyond v1 + Allow libpq to use TLS protocol versions beyond v1 (Noah Misch) - For a long time, libpq was coded so that the only SSL + For a long time, libpq was coded so that the only SSL protocol it would allow was TLS v1. Now that newer TLS versions are becoming popular, allow it to negotiate the highest commonly-supported - TLS version with the server. (PostgreSQL servers were + TLS version with the server. (PostgreSQL servers were already capable of such negotiation, so no change is needed on the server side.) This is a back-patch of a change already released in 9.4.0. @@ -3583,7 +3953,7 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 This release contains a variety of fixes from 9.2.10. For information about new features in the 9.2 major release, see - . + . @@ -3594,14 +3964,14 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 - However, if you use contrib/citext's - regexp_matches() functions, see the changelog entry below + However, if you use contrib/citext's + regexp_matches() functions, see the changelog entry below about that. Also, if you are upgrading from a version earlier than 9.2.10, - see . + see . @@ -3633,7 +4003,7 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 - Our replacement implementation of snprintf() failed to + Our replacement implementation of snprintf() failed to check for errors reported by the underlying system library calls; the main case that might be missed is out-of-memory situations. In the worst case this might lead to information exposure, due to our @@ -3643,7 +4013,7 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 - It remains possible that some calls of the *printf() + It remains possible that some calls of the *printf() family of functions are vulnerable to information disclosure if an out-of-memory error occurs at just the wrong time. We judge the risk to not be large, but will continue analysis in this area. @@ -3653,15 +4023,15 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 - In contrib/pgcrypto, uniformly report decryption failures - as Wrong key or corrupt data (Noah Misch) + In contrib/pgcrypto, uniformly report decryption failures + as Wrong key or corrupt data (Noah Misch) Previously, some cases of decryption with an incorrect key could report other error message texts. It has been shown that such variance in error reports can aid attackers in recovering keys from other systems. - While it's unknown whether pgcrypto's specific behaviors + While it's unknown whether pgcrypto's specific behaviors are likewise exploitable, it seems better to avoid the risk by using a one-size-fits-all message. (CVE-2015-3167) @@ -3670,16 +4040,16 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 - Fix incorrect declaration of contrib/citext's - regexp_matches() functions (Tom Lane) + Fix incorrect declaration of contrib/citext's + regexp_matches() functions (Tom Lane) - These functions should return setof text[], like the core + These functions should return setof text[], like the core functions they are wrappers for; but they were incorrectly declared as - returning just text[]. This mistake had two results: first, + returning just text[]. This mistake had two results: first, if there was no match you got a scalar null result, whereas what you - should get is an empty set (zero rows). Second, the g flag + should get is an empty set (zero rows). Second, the g flag was effectively ignored, since you would get only one result array even if there were multiple matches. @@ -3687,16 +4057,16 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 While the latter behavior is clearly a bug, there might be applications depending on the former behavior; therefore the function declarations - will not be changed by default until PostgreSQL 9.5. + will not be changed by default until PostgreSQL 9.5. In pre-9.5 branches, the old behavior exists in version 1.0 of - the citext extension, while we have provided corrected - declarations in version 1.1 (which is not installed by + the citext extension, while we have provided corrected + declarations in version 1.1 (which is not installed by default). To adopt the fix in pre-9.5 branches, execute - ALTER EXTENSION citext UPDATE TO '1.1' in each database in - which citext is installed. (You can also update + ALTER EXTENSION citext UPDATE TO '1.1' in each database in + which citext is installed. (You can also update back to 1.0 if you need to undo that.) Be aware that either update direction will require dropping and recreating any views or rules that - use citext's regexp_matches() functions. + use citext's regexp_matches() functions. @@ -3738,7 +4108,7 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 This oversight in the planner has been observed to cause could - not find RelOptInfo for given relids errors, but it seems possible + not find RelOptInfo for given relids errors, but it seems possible that sometimes an incorrect query plan might get past that consistency check and result in silently-wrong query output. @@ -3766,7 +4136,7 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 This oversight has been seen to lead to failed to join all - relations together errors in queries involving LATERAL, + relations together errors in queries involving LATERAL, and that might happen in other cases as well. @@ -3774,7 +4144,7 @@ Branch: REL9_2_STABLE [e90a629e1] 2015-09-22 14:58:38 -0700 Fix possible deadlock at startup - when max_prepared_transactions is too small + when max_prepared_transactions is too small (Heikki Linnakangas) @@ -3795,14 +4165,14 @@ Branch: REL9_0_STABLE [850e1a566] 2015-05-18 17:44:21 -0300 - Avoid cannot GetMultiXactIdMembers() during recovery error + Avoid cannot GetMultiXactIdMembers() during recovery error (Álvaro Herrera) - Recursively fsync() the data directory after a crash + Recursively fsync() the data directory after a crash (Abhijit Menon-Sen, Robert Haas) @@ -3822,19 +4192,19 @@ Branch: REL9_0_STABLE [850e1a566] 2015-05-18 17:44:21 -0300 - Cope with unexpected signals in LockBufferForCleanup() + Cope with unexpected signals in LockBufferForCleanup() (Andres Freund) This oversight could result in spurious errors about multiple - backends attempting to wait for pincount 1. + backends attempting to wait for pincount 1. - Fix crash when doing COPY IN to a table with check + Fix crash when doing COPY IN to a table with check constraints that contain whole-row references (Tom Lane) @@ -3881,18 +4251,18 @@ Branch: REL9_0_STABLE [850e1a566] 2015-05-18 17:44:21 -0300 - ANALYZE executes index expressions many times; if there are + ANALYZE executes index expressions many times; if there are slow functions in such an expression, it's desirable to be able to - cancel the ANALYZE before that loop finishes. + cancel the ANALYZE before that loop finishes. - Ensure tableoid of a foreign table is reported - correctly when a READ COMMITTED recheck occurs after - locking rows in SELECT FOR UPDATE, UPDATE, - or DELETE (Etsuro Fujita) + Ensure tableoid of a foreign table is reported + correctly when a READ COMMITTED recheck occurs after + locking rows in SELECT FOR UPDATE, UPDATE, + or DELETE (Etsuro Fujita) @@ -3905,20 +4275,20 @@ Branch: REL9_0_STABLE [850e1a566] 2015-05-18 17:44:21 -0300 - Recommend setting include_realm to 1 when using + Recommend setting include_realm to 1 when using Kerberos/GSSAPI/SSPI authentication (Stephen Frost) Without this, identically-named users from different realms cannot be distinguished. For the moment this is only a documentation change, but - it will become the default setting in PostgreSQL 9.5. + it will become the default setting in PostgreSQL 9.5. - Remove code for matching IPv4 pg_hba.conf entries to + Remove code for matching IPv4 pg_hba.conf entries to IPv4-in-IPv6 addresses (Tom Lane) @@ -3931,20 +4301,20 @@ Branch: REL9_0_STABLE [850e1a566] 2015-05-18 17:44:21 -0300 crashes on some systems, so let's just remove it rather than fix it. (Had we chosen to fix it, that would make for a subtle and potentially security-sensitive change in the effective meaning of - IPv4 pg_hba.conf entries, which does not seem like a good + IPv4 pg_hba.conf entries, which does not seem like a good thing to do in minor releases.) - Report WAL flush, not insert, position in IDENTIFY_SYSTEM + Report WAL flush, not insert, position in IDENTIFY_SYSTEM replication command (Heikki Linnakangas) This avoids a possible startup failure - in pg_receivexlog. + in pg_receivexlog. @@ -3952,14 +4322,14 @@ Branch: REL9_0_STABLE [850e1a566] 2015-05-18 17:44:21 -0300 While shutting down service on Windows, periodically send status updates to the Service Control Manager to prevent it from killing the - service too soon; and ensure that pg_ctl will wait for + service too soon; and ensure that pg_ctl will wait for shutdown (Krystian Bigaj) - Reduce risk of network deadlock when using libpq's + Reduce risk of network deadlock when using libpq's non-blocking mode (Heikki Linnakangas) @@ -3968,32 +4338,32 @@ Branch: REL9_0_STABLE [850e1a566] 2015-05-18 17:44:21 -0300 buffer every so often, in case the server has sent enough response data to cause it to block on output. (A typical scenario is that the server is sending a stream of NOTICE messages during COPY FROM - STDIN.) This worked properly in the normal blocking mode, but not - so much in non-blocking mode. We've modified libpq + STDIN.) This worked properly in the normal blocking mode, but not + so much in non-blocking mode. We've modified libpq to opportunistically drain input when it can, but a full defense against this problem requires application cooperation: the application should watch for socket read-ready as well as write-ready conditions, - and be sure to call PQconsumeInput() upon read-ready. + and be sure to call PQconsumeInput() upon read-ready. - In libpq, fix misparsing of empty values in URI + In libpq, fix misparsing of empty values in URI connection strings (Thomas Fanghaenel) - Fix array handling in ecpg (Michael Meskes) + Fix array handling in ecpg (Michael Meskes) - Fix psql to sanely handle URIs and conninfo strings as - the first parameter to \connect + Fix psql to sanely handle URIs and conninfo strings as + the first parameter to \connect (David Fetter, Andrew Dunstan, Álvaro Herrera) @@ -4006,38 +4376,38 @@ Branch: REL9_0_STABLE [850e1a566] 2015-05-18 17:44:21 -0300 - Suppress incorrect complaints from psql on some - platforms that it failed to write ~/.psql_history at exit + Suppress incorrect complaints from psql on some + platforms that it failed to write ~/.psql_history at exit (Tom Lane) This misbehavior was caused by a workaround for a bug in very old - (pre-2006) versions of libedit. We fixed it by + (pre-2006) versions of libedit. We fixed it by removing the workaround, which will cause a similar failure to appear - for anyone still using such versions of libedit. - Recommendation: upgrade that library, or use libreadline. + for anyone still using such versions of libedit. + Recommendation: upgrade that library, or use libreadline. - Fix pg_dump's rule for deciding which casts are + Fix pg_dump's rule for deciding which casts are system-provided casts that should not be dumped (Tom Lane) - In pg_dump, fix failure to honor -Z - compression level option together with -Fd + In pg_dump, fix failure to honor -Z + compression level option together with -Fd (Michael Paquier) - Make pg_dump consider foreign key relationships + Make pg_dump consider foreign key relationships between extension configuration tables while choosing dump order (Gilles Darold, Michael Paquier, Stephen Frost) @@ -4050,14 +4420,14 @@ Branch: REL9_0_STABLE [850e1a566] 2015-05-18 17:44:21 -0300 - Fix dumping of views that are just VALUES(...) but have + Fix dumping of views that are just VALUES(...) but have column aliases (Tom Lane) - In pg_upgrade, force timeline 1 in the new cluster + In pg_upgrade, force timeline 1 in the new cluster (Bruce Momjian) @@ -4069,7 +4439,7 @@ Branch: REL9_0_STABLE [850e1a566] 2015-05-18 17:44:21 -0300 - In pg_upgrade, check for improperly non-connectable + In pg_upgrade, check for improperly non-connectable databases before proceeding (Bruce Momjian) @@ -4077,28 +4447,28 @@ Branch: REL9_0_STABLE [850e1a566] 2015-05-18 17:44:21 -0300 - In pg_upgrade, quote directory paths - properly in the generated delete_old_cluster script + In pg_upgrade, quote directory paths + properly in the generated delete_old_cluster script (Bruce Momjian) - In pg_upgrade, preserve database-level freezing info + In pg_upgrade, preserve database-level freezing info properly (Bruce Momjian) This oversight could cause missing-clog-file errors for tables within - the postgres and template1 databases. + the postgres and template1 databases. - Run pg_upgrade and pg_resetxlog with + Run pg_upgrade and pg_resetxlog with restricted privileges on Windows, so that they don't fail when run by an administrator (Muhammad Asif Naeem) @@ -4106,8 +4476,8 @@ Branch: REL9_0_STABLE [850e1a566] 2015-05-18 17:44:21 -0300 - Improve handling of readdir() failures when scanning - directories in initdb and pg_basebackup + Improve handling of readdir() failures when scanning + directories in initdb and pg_basebackup (Marco Nenciarini) @@ -4119,18 +4489,18 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix failure in pg_receivexlog (Andres Freund) + Fix failure in pg_receivexlog (Andres Freund) A patch merge mistake in 9.2.10 led to could not create archive - status file errors. + status file errors. - Fix slow sorting algorithm in contrib/intarray (Tom Lane) + Fix slow sorting algorithm in contrib/intarray (Tom Lane) @@ -4142,7 +4512,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Update time zone data files to tzdata release 2015d + Update time zone data files to tzdata release 2015d for DST law changes in Egypt, Mongolia, and Palestine, plus historical changes in Canada and Chile. Also adopt revised zone abbreviations for the America/Adak zone (HST/HDT not HAST/HADT). @@ -4165,7 +4535,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This release contains a variety of fixes from 9.2.9. For information about new features in the 9.2 major release, see - . + . @@ -4177,16 +4547,16 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 However, if you are a Windows user and are using the Norwegian - (Bokmål) locale, manual action is needed after the upgrade to - replace any Norwegian (Bokmål)_Norway locale names stored - in PostgreSQL system catalogs with the plain-ASCII - alias Norwegian_Norway. For details see - + (Bokmål) locale, manual action is needed after the upgrade to + replace any Norwegian (Bokmål)_Norway locale names stored + in PostgreSQL system catalogs with the plain-ASCII + alias Norwegian_Norway. For details see + Also, if you are upgrading from a version earlier than 9.2.9, - see . + see . @@ -4198,15 +4568,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix buffer overruns in to_char() + Fix buffer overruns in to_char() (Bruce Momjian) - When to_char() processes a numeric formatting template - calling for a large number of digits, PostgreSQL + When to_char() processes a numeric formatting template + calling for a large number of digits, PostgreSQL would read past the end of a buffer. When processing a crafted - timestamp formatting template, PostgreSQL would write + timestamp formatting template, PostgreSQL would write past the end of a buffer. Either case could crash the server. We have not ruled out the possibility of attacks that lead to privilege escalation, though they seem unlikely. @@ -4216,27 +4586,27 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix buffer overrun in replacement *printf() functions + Fix buffer overrun in replacement *printf() functions (Tom Lane) - PostgreSQL includes a replacement implementation - of printf and related functions. This code will overrun + PostgreSQL includes a replacement implementation + of printf and related functions. This code will overrun a stack buffer when formatting a floating point number (conversion - specifiers e, E, f, F, - g or G) with requested precision greater than + specifiers e, E, f, F, + g or G) with requested precision greater than about 500. This will crash the server, and we have not ruled out the possibility of attacks that lead to privilege escalation. A database user can trigger such a buffer overrun through - the to_char() SQL function. While that is the only - affected core PostgreSQL functionality, extension + the to_char() SQL function. While that is the only + affected core PostgreSQL functionality, extension modules that use printf-family functions may be at risk as well. - This issue primarily affects PostgreSQL on Windows. - PostgreSQL uses the system implementation of these + This issue primarily affects PostgreSQL on Windows. + PostgreSQL uses the system implementation of these functions where adequate, which it is on other modern platforms. (CVE-2015-0242) @@ -4244,12 +4614,12 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix buffer overruns in contrib/pgcrypto + Fix buffer overruns in contrib/pgcrypto (Marko Tiikkaja, Noah Misch) - Errors in memory size tracking within the pgcrypto + Errors in memory size tracking within the pgcrypto module permitted stack buffer overruns and improper dependence on the contents of uninitialized memory. The buffer overrun cases can crash the server, and we have not ruled out the possibility of @@ -4290,7 +4660,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Some server error messages show the values of columns that violate a constraint, such as a unique constraint. If the user does not have - SELECT privilege on all columns of the table, this could + SELECT privilege on all columns of the table, this could mean exposing values that the user should not be able to see. Adjust the code so that values are displayed only when they came from the SQL command or could be selected by the user. @@ -4315,35 +4685,35 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Cope with the Windows locale named Norwegian (Bokmål) + Cope with the Windows locale named Norwegian (Bokmål) (Heikki Linnakangas) Non-ASCII locale names are problematic since it's not clear what encoding they should be represented in. Map the troublesome locale - name to a plain-ASCII alias, Norwegian_Norway. + name to a plain-ASCII alias, Norwegian_Norway. Avoid possible data corruption if ALTER DATABASE SET - TABLESPACE is used to move a database to a new tablespace and then + TABLESPACE is used to move a database to a new tablespace and then shortly later move it back to its original tablespace (Tom Lane) - Avoid corrupting tables when ANALYZE inside a transaction + Avoid corrupting tables when ANALYZE inside a transaction is rolled back (Andres Freund, Tom Lane, Michael Paquier) If the failing transaction had earlier removed the last index, rule, or trigger from the table, the table would be left in a corrupted state - with the relevant pg_class flags not set though they + with the relevant pg_class flags not set though they should be. @@ -4351,14 +4721,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Ensure that unlogged tables are copied correctly - during CREATE DATABASE or ALTER DATABASE SET - TABLESPACE (Pavan Deolasee, Andres Freund) + during CREATE DATABASE or ALTER DATABASE SET + TABLESPACE (Pavan Deolasee, Andres Freund) - Fix DROP's dependency searching to correctly handle the + Fix DROP's dependency searching to correctly handle the case where a table column is recursively visited before its table (Petr Jelinek, Tom Lane) @@ -4366,7 +4736,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This case is only known to arise when an extension creates both a datatype and a table using that datatype. The faulty code might - refuse a DROP EXTENSION unless CASCADE is + refuse a DROP EXTENSION unless CASCADE is specified, which should not be required. @@ -4378,22 +4748,22 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - In READ COMMITTED mode, queries that lock or update + In READ COMMITTED mode, queries that lock or update recently-updated rows could crash as a result of this bug. - Fix planning of SELECT FOR UPDATE when using a partial + Fix planning of SELECT FOR UPDATE when using a partial index on a child table (Kyotaro Horiguchi) - In READ COMMITTED mode, SELECT FOR UPDATE must - also recheck the partial index's WHERE condition when + In READ COMMITTED mode, SELECT FOR UPDATE must + also recheck the partial index's WHERE condition when rechecking a recently-updated row to see if it still satisfies the - query's WHERE condition. This requirement was missed if the + query's WHERE condition. This requirement was missed if the index belonged to an inheritance child table, so that it was possible to incorrectly return rows that no longer satisfy the query condition. @@ -4401,12 +4771,12 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix corner case wherein SELECT FOR UPDATE could return a row + Fix corner case wherein SELECT FOR UPDATE could return a row twice, and possibly miss returning other rows (Tom Lane) - In READ COMMITTED mode, a SELECT FOR UPDATE + In READ COMMITTED mode, a SELECT FOR UPDATE that is scanning an inheritance tree could incorrectly return a row from a prior child table instead of the one it should return from a later child table. @@ -4416,7 +4786,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Reject duplicate column names in the referenced-columns list of - a FOREIGN KEY declaration (David Rowley) + a FOREIGN KEY declaration (David Rowley) @@ -4442,7 +4812,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix bugs in raising a numeric value to a large integral power + Fix bugs in raising a numeric value to a large integral power (Tom Lane) @@ -4455,19 +4825,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - In numeric_recv(), truncate away any fractional digits - that would be hidden according to the value's dscale field + In numeric_recv(), truncate away any fractional digits + that would be hidden according to the value's dscale field (Tom Lane) - A numeric value's display scale (dscale) should + A numeric value's display scale (dscale) should never be less than the number of nonzero fractional digits; but apparently there's at least one broken client application that - transmits binary numeric values in which that's true. + transmits binary numeric values in which that's true. This leads to strange behavior since the extra digits are taken into account by arithmetic operations even though they aren't printed. - The least risky fix seems to be to truncate away such hidden + The least risky fix seems to be to truncate away such hidden digits on receipt, so that the value is indeed what it prints as. @@ -4480,7 +4850,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Matching would often fail when the number of allowed iterations is - limited by a ? quantifier or a bound expression. + limited by a ? quantifier or a bound expression. @@ -4499,7 +4869,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix bugs in tsquery @> tsquery + Fix bugs in tsquery @> tsquery operator (Heikki Linnakangas) @@ -4530,14 +4900,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix namespace handling in xpath() (Ali Akbar) + Fix namespace handling in xpath() (Ali Akbar) - Previously, the xml value resulting from - an xpath() call would not have namespace declarations if + Previously, the xml value resulting from + an xpath() call would not have namespace declarations if the namespace declarations were attached to an ancestor element in the - input xml value, rather than to the specific element being + input xml value, rather than to the specific element being returned. Propagate the ancestral declaration so that the result is correct when considered in isolation. @@ -4551,7 +4921,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - In some contexts, constructs like row_to_json(tab.*) may + In some contexts, constructs like row_to_json(tab.*) may not produce the expected column names. This is fixed properly as of 9.4; in older branches, just ensure that we produce some nonempty name. (In some cases this will be the underlying table's column name @@ -4563,19 +4933,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix mishandling of system columns, - particularly tableoid, in FDW queries (Etsuro Fujita) + particularly tableoid, in FDW queries (Etsuro Fujita) - Avoid doing indexed_column = ANY - (array) as an index qualifier if that leads + Avoid doing indexed_column = ANY + (array) as an index qualifier if that leads to an inferior plan (Andrew Gierth) - In some cases, = ANY conditions applied to non-first index + In some cases, = ANY conditions applied to non-first index columns would be done as index conditions even though it would be better to use them as simple filter conditions. @@ -4584,7 +4954,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix planner problems with nested append relations, such as inherited - tables within UNION ALL subqueries (Tom Lane) + tables within UNION ALL subqueries (Tom Lane) @@ -4597,8 +4967,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Exempt tables that have per-table cost_limit - and/or cost_delay settings from autovacuum's global cost + Exempt tables that have per-table cost_limit + and/or cost_delay settings from autovacuum's global cost balancing rules (Álvaro Herrera) @@ -4624,7 +4994,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 the target database, if they met the usual thresholds for autovacuuming. This is at best pretty unexpected; at worst it delays response to the wraparound threat. Fix it so that if autovacuum is - turned off, workers only do anti-wraparound vacuums and + turned off, workers only do anti-wraparound vacuums and not any other work. @@ -4657,12 +5027,12 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix several cases where recovery logic improperly ignored WAL records - for COMMIT/ABORT PREPARED (Heikki Linnakangas) + for COMMIT/ABORT PREPARED (Heikki Linnakangas) The most notable oversight was - that recovery_target_xid could not be used to stop at + that recovery_target_xid could not be used to stop at a two-phase commit. @@ -4676,7 +5046,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Avoid creating unnecessary .ready marker files for + Avoid creating unnecessary .ready marker files for timeline history files (Fujii Masao) @@ -4684,14 +5054,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix possible null pointer dereference when an empty prepared statement - is used and the log_statement setting is mod - or ddl (Fujii Masao) + is used and the log_statement setting is mod + or ddl (Fujii Masao) - Change pgstat wait timeout warning message to be LOG level, + Change pgstat wait timeout warning message to be LOG level, and rephrase it to be more understandable (Tom Lane) @@ -4700,7 +5070,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 case, but it occurs often enough on our slower buildfarm members to be a nuisance. Reduce it to LOG level, and expend a bit more effort on the wording: it now reads using stale statistics instead of - current ones because stats collector is not responding. + current ones because stats collector is not responding. @@ -4714,32 +5084,32 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Warn if macOS's setlocale() starts an unwanted extra + Warn if macOS's setlocale() starts an unwanted extra thread inside the postmaster (Noah Misch) - Fix processing of repeated dbname parameters - in PQconnectdbParams() (Alex Shulgin) + Fix processing of repeated dbname parameters + in PQconnectdbParams() (Alex Shulgin) Unexpected behavior ensued if the first occurrence - of dbname contained a connection string or URI to be + of dbname contained a connection string or URI to be expanded. - Ensure that libpq reports a suitable error message on + Ensure that libpq reports a suitable error message on unexpected socket EOF (Marko Tiikkaja, Tom Lane) - Depending on kernel behavior, libpq might return an + Depending on kernel behavior, libpq might return an empty error string rather than something useful when the server unexpectedly closed the socket. @@ -4747,14 +5117,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Clear any old error message during PQreset() + Clear any old error message during PQreset() (Heikki Linnakangas) - If PQreset() is called repeatedly, and the connection + If PQreset() is called repeatedly, and the connection cannot be re-established, error messages from the failed connection - attempts kept accumulating in the PGconn's error + attempts kept accumulating in the PGconn's error string. @@ -4762,32 +5132,32 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Properly handle out-of-memory conditions while parsing connection - options in libpq (Alex Shulgin, Heikki Linnakangas) + options in libpq (Alex Shulgin, Heikki Linnakangas) - Fix array overrun in ecpg's version - of ParseDateTime() (Michael Paquier) + Fix array overrun in ecpg's version + of ParseDateTime() (Michael Paquier) - In initdb, give a clearer error message if a password + In initdb, give a clearer error message if a password file is specified but is empty (Mats Erik Andersson) - Fix psql's \s command to work nicely with + Fix psql's \s command to work nicely with libedit, and add pager support (Stepan Rutz, Tom Lane) - When using libedit rather than readline, \s printed the + When using libedit rather than readline, \s printed the command history in a fairly unreadable encoded format, and on recent libedit versions might fail altogether. Fix that by printing the history ourselves rather than having the library do it. A pleasant @@ -4797,7 +5167,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This patch also fixes a bug that caused newline encoding to be applied inconsistently when saving the command history with libedit. - Multiline history entries written by older psql + Multiline history entries written by older psql versions will be read cleanly with this patch, but perhaps not vice versa, depending on the exact libedit versions involved. @@ -4805,17 +5175,17 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Improve consistency of parsing of psql's special + Improve consistency of parsing of psql's special variables (Tom Lane) - Allow variant spellings of on and off (such - as 1/0) for ECHO_HIDDEN - and ON_ERROR_ROLLBACK. Report a warning for unrecognized - values for COMP_KEYWORD_CASE, ECHO, - ECHO_HIDDEN, HISTCONTROL, - ON_ERROR_ROLLBACK, and VERBOSITY. Recognize + Allow variant spellings of on and off (such + as 1/0) for ECHO_HIDDEN + and ON_ERROR_ROLLBACK. Report a warning for unrecognized + values for COMP_KEYWORD_CASE, ECHO, + ECHO_HIDDEN, HISTCONTROL, + ON_ERROR_ROLLBACK, and VERBOSITY. Recognize all values for all these variables case-insensitively; previously there was a mishmash of case-sensitive and case-insensitive behaviors. @@ -4823,16 +5193,16 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix psql's expanded-mode display to work - consistently when using border = 3 - and linestyle = ascii or unicode + Fix psql's expanded-mode display to work + consistently when using border = 3 + and linestyle = ascii or unicode (Stephen Frost) - Improve performance of pg_dump when the database + Improve performance of pg_dump when the database contains many instances of multiple dependency paths between the same two objects (Tom Lane) @@ -4840,7 +5210,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix pg_dumpall to restore its ability to dump from + Fix pg_dumpall to restore its ability to dump from pre-8.1 servers (Gilles Darold) @@ -4854,28 +5224,28 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix core dump in pg_dump --binary-upgrade on zero-column + Fix core dump in pg_dump --binary-upgrade on zero-column composite type (Rushabh Lathia) - Prevent WAL files created by pg_basebackup -x/-X from + Prevent WAL files created by pg_basebackup -x/-X from being archived again when the standby is promoted (Andres Freund) - Fix failure of contrib/auto_explain to print per-node - timing information when doing EXPLAIN ANALYZE (Tom Lane) + Fix failure of contrib/auto_explain to print per-node + timing information when doing EXPLAIN ANALYZE (Tom Lane) - Fix upgrade-from-unpackaged script for contrib/citext + Fix upgrade-from-unpackaged script for contrib/citext (Tom Lane) @@ -4883,7 +5253,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix block number checking - in contrib/pageinspect's get_raw_page() + in contrib/pageinspect's get_raw_page() (Tom Lane) @@ -4895,7 +5265,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix contrib/pgcrypto's pgp_sym_decrypt() + Fix contrib/pgcrypto's pgp_sym_decrypt() to not fail on messages whose length is 6 less than a power of 2 (Marko Tiikkaja) @@ -4903,7 +5273,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix file descriptor leak in contrib/pg_test_fsync + Fix file descriptor leak in contrib/pg_test_fsync (Jeff Janes) @@ -4915,24 +5285,24 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Handle unexpected query results, especially NULLs, safely in - contrib/tablefunc's connectby() + contrib/tablefunc's connectby() (Michael Paquier) - connectby() previously crashed if it encountered a NULL + connectby() previously crashed if it encountered a NULL key value. It now prints that row but doesn't recurse further. - Avoid a possible crash in contrib/xml2's - xslt_process() (Mark Simonetti) + Avoid a possible crash in contrib/xml2's + xslt_process() (Mark Simonetti) - libxslt seems to have an undocumented dependency on + libxslt seems to have an undocumented dependency on the order in which resources are freed; reorder our calls to avoid a crash. @@ -4940,7 +5310,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Mark some contrib I/O functions with correct volatility + Mark some contrib I/O functions with correct volatility properties (Tom Lane) @@ -4974,29 +5344,29 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 With OpenLDAP versions 2.4.24 through 2.4.31, - inclusive, PostgreSQL backends can crash at exit. - Raise a warning during configure based on the + inclusive, PostgreSQL backends can crash at exit. + Raise a warning during configure based on the compile-time OpenLDAP version number, and test the crashing scenario - in the contrib/dblink regression test. + in the contrib/dblink regression test. - In non-MSVC Windows builds, ensure libpq.dll is installed + In non-MSVC Windows builds, ensure libpq.dll is installed with execute permissions (Noah Misch) - Make pg_regress remove any temporary installation it + Make pg_regress remove any temporary installation it created upon successful exit (Tom Lane) This results in a very substantial reduction in disk space usage - during make check-world, since that sequence involves + during make check-world, since that sequence involves creation of numerous temporary installations. @@ -5008,15 +5378,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Previously, PostgreSQL assumed that the UTC offset - associated with a time zone abbreviation (such as EST) + Previously, PostgreSQL assumed that the UTC offset + associated with a time zone abbreviation (such as EST) never changes in the usage of any particular locale. However this assumption fails in the real world, so introduce the ability for a zone abbreviation to represent a UTC offset that sometimes changes. Update the zone abbreviation definition files to make use of this feature in timezone locales that have changed the UTC offset of their abbreviations since 1970 (according to the IANA timezone database). - In such timezones, PostgreSQL will now associate the + In such timezones, PostgreSQL will now associate the correct UTC offset with the abbreviation depending on the given date. @@ -5028,9 +5398,9 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add CST (China Standard Time) to our lists. - Remove references to ADT as Arabia Daylight Time, an + Remove references to ADT as Arabia Daylight Time, an abbreviation that's been out of use since 2007; therefore, claiming - there is a conflict with Atlantic Daylight Time doesn't seem + there is a conflict with Atlantic Daylight Time doesn't seem especially helpful. Fix entirely incorrect GMT offsets for CKT (Cook Islands), FJT, and FJST (Fiji); we didn't even have them on the proper side of the date line. @@ -5039,21 +5409,21 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Update time zone data files to tzdata release 2015a. + Update time zone data files to tzdata release 2015a. The IANA timezone database has adopted abbreviations of the form - AxST/AxDT + AxST/AxDT for all Australian time zones, reflecting what they believe to be current majority practice Down Under. These names do not conflict with usage elsewhere (other than ACST for Acre Summer Time, which has been in disuse since 1994). Accordingly, adopt these names into - our Default timezone abbreviation set. - The Australia abbreviation set now contains only CST, EAST, + our Default timezone abbreviation set. + The Australia abbreviation set now contains only CST, EAST, EST, SAST, SAT, and WST, all of which are thought to be mostly historical usage. Note that SAST has also been changed to be South - Africa Standard Time in the Default abbreviation set. + Africa Standard Time in the Default abbreviation set. @@ -5082,7 +5452,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This release contains a variety of fixes from 9.2.8. For information about new features in the 9.2 major release, see - . + . @@ -5100,7 +5470,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Also, if you are upgrading from a version earlier than 9.2.6, - see . + see . @@ -5112,15 +5482,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Correctly initialize padding bytes in contrib/btree_gist - indexes on bit columns (Heikki Linnakangas) + Correctly initialize padding bytes in contrib/btree_gist + indexes on bit columns (Heikki Linnakangas) This error could result in incorrect query results due to values that should compare equal not being seen as equal. - Users with GiST indexes on bit or bit varying - columns should REINDEX those indexes after installing this + Users with GiST indexes on bit or bit varying + columns should REINDEX those indexes after installing this update. @@ -5158,7 +5528,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix feedback status when is + Fix feedback status when is turned off on-the-fly (Simon Riggs) @@ -5166,7 +5536,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix possibly-incorrect cache invalidation during nested calls - to ReceiveSharedInvalidMessages (Andres Freund) + to ReceiveSharedInvalidMessages (Andres Freund) @@ -5178,14 +5548,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This oversight could result in variable not found in subplan - target lists errors, or in silently wrong query results. + target lists errors, or in silently wrong query results. - Fix could not find pathkey item to sort planner failures - with UNION ALL over subqueries reading from tables with + Fix could not find pathkey item to sort planner failures + with UNION ALL over subqueries reading from tables with inheritance children (Tom Lane) @@ -5206,7 +5576,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Improve planner to drop constant-NULL inputs - of AND/OR when possible (Tom Lane) + of AND/OR when possible (Tom Lane) @@ -5218,13 +5588,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix identification of input type category in to_json() + Fix identification of input type category in to_json() and friends (Tom Lane) - This is known to have led to inadequate quoting of money - fields in the JSON result, and there may have been wrong + This is known to have led to inadequate quoting of money + fields in the JSON result, and there may have been wrong results for other data types as well. @@ -5239,13 +5609,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This corrects cases where TOAST pointers could be copied into other tables without being dereferenced. If the original data is later deleted, it would lead to errors like missing chunk number 0 - for toast value ... when the now-dangling pointer is used. + for toast value ... when the now-dangling pointer is used. - Fix record type has not been registered failures with + Fix record type has not been registered failures with whole-row references to the output of Append plan nodes (Tom Lane) @@ -5260,7 +5630,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix query-lifespan memory leak while evaluating the arguments for a - function in FROM (Tom Lane) + function in FROM (Tom Lane) @@ -5273,14 +5643,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix data encoding error in hungarian.stop (Tom Lane) + Fix data encoding error in hungarian.stop (Tom Lane) Prevent foreign tables from being created with OIDS - when is true + when is true (Etsuro Fujita) @@ -5294,19 +5664,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This could cause problems (at least spurious warnings, and at worst an - infinite loop) if CREATE INDEX or CLUSTER were + infinite loop) if CREATE INDEX or CLUSTER were done later in the same transaction. - Clear pg_stat_activity.xact_start - during PREPARE TRANSACTION (Andres Freund) + Clear pg_stat_activity.xact_start + during PREPARE TRANSACTION (Andres Freund) - After the PREPARE, the originating session is no longer in + After the PREPARE, the originating session is no longer in a transaction, so it should not continue to display a transaction start time. @@ -5314,7 +5684,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix REASSIGN OWNED to not fail for text search objects + Fix REASSIGN OWNED to not fail for text search objects (Álvaro Herrera) @@ -5326,14 +5696,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This ensures that the postmaster will properly clean up after itself - if, for example, it receives SIGINT while still + if, for example, it receives SIGINT while still starting up. - Fix client host name lookup when processing pg_hba.conf + Fix client host name lookup when processing pg_hba.conf entries that specify host names instead of IP addresses (Tom Lane) @@ -5347,21 +5717,21 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Allow the root user to use postgres -C variable and - postgres --describe-config (MauMau) + Allow the root user to use postgres -C variable and + postgres --describe-config (MauMau) The prohibition on starting the server as root does not need to extend to these operations, and relaxing it prevents failure - of pg_ctl in some scenarios. + of pg_ctl in some scenarios. Secure Unix-domain sockets of temporary postmasters started during - make check (Noah Misch) + make check (Noah Misch) @@ -5370,16 +5740,16 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 the operating-system user running the test, as we previously noted in CVE-2014-0067. This change defends against that risk by placing the server's socket in a temporary, mode 0700 subdirectory - of /tmp. The hazard remains however on platforms where + of /tmp. The hazard remains however on platforms where Unix sockets are not supported, notably Windows, because then the temporary postmaster must accept local TCP connections. A useful side effect of this change is to simplify - make check testing in builds that - override DEFAULT_PGSOCKET_DIR. Popular non-default values - like /var/run/postgresql are often not writable by the + make check testing in builds that + override DEFAULT_PGSOCKET_DIR. Popular non-default values + like /var/run/postgresql are often not writable by the build user, requiring workarounds that will no longer be necessary. @@ -5399,7 +5769,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 On Windows, allow new sessions to absorb values of PGC_BACKEND - parameters (such as ) from the + parameters (such as ) from the configuration file (Amit Kapila) @@ -5415,15 +5785,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - This oversight could cause initdb - and pg_upgrade to fail on Windows, if the installation - path contained both spaces and @ signs. + This oversight could cause initdb + and pg_upgrade to fail on Windows, if the installation + path contained both spaces and @ signs. - Fix linking of libpython on macOS (Tom Lane) + Fix linking of libpython on macOS (Tom Lane) @@ -5434,17 +5804,17 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Avoid buffer bloat in libpq when the server + Avoid buffer bloat in libpq when the server consistently sends data faster than the client can absorb it (Shin-ichi Morita, Tom Lane) - libpq could be coerced into enlarging its input buffer + libpq could be coerced into enlarging its input buffer until it runs out of memory (which would be reported misleadingly - as lost synchronization with server). Under ordinary + as lost synchronization with server). Under ordinary circumstances it's quite far-fetched that data could be continuously - transmitted more quickly than the recv() loop can + transmitted more quickly than the recv() loop can absorb it, but this has been observed when the client is artificially slowed by scheduler constraints. @@ -5452,15 +5822,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Ensure that LDAP lookup attempts in libpq time out as + Ensure that LDAP lookup attempts in libpq time out as intended (Laurenz Albe) - Fix ecpg to do the right thing when an array - of char * is the target for a FETCH statement returning more + Fix ecpg to do the right thing when an array + of char * is the target for a FETCH statement returning more than one row, as well as some other array-handling fixes (Ashutosh Bapat) @@ -5468,52 +5838,52 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix pg_restore's processing of old-style large object + Fix pg_restore's processing of old-style large object comments (Tom Lane) A direct-to-database restore from an archive file generated by a - pre-9.0 version of pg_dump would usually fail if the + pre-9.0 version of pg_dump would usually fail if the archive contained more than a few comments for large objects. - Fix pg_upgrade for cases where the new server creates + Fix pg_upgrade for cases where the new server creates a TOAST table but the old version did not (Bruce Momjian) - This rare situation would manifest as relation OID mismatch + This rare situation would manifest as relation OID mismatch errors. - Prevent contrib/auto_explain from changing the output of - a user's EXPLAIN (Tom Lane) + Prevent contrib/auto_explain from changing the output of + a user's EXPLAIN (Tom Lane) - If auto_explain is active, it could cause - an EXPLAIN (ANALYZE, TIMING OFF) command to nonetheless + If auto_explain is active, it could cause + an EXPLAIN (ANALYZE, TIMING OFF) command to nonetheless print timing information. - Fix query-lifespan memory leak in contrib/dblink + Fix query-lifespan memory leak in contrib/dblink (MauMau, Joe Conway) - In contrib/pgcrypto functions, ensure sensitive + In contrib/pgcrypto functions, ensure sensitive information is cleared from stack variables before returning (Marko Kreen) @@ -5522,27 +5892,27 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Prevent use of already-freed memory in - contrib/pgstattuple's pgstat_heap() + contrib/pgstattuple's pgstat_heap() (Noah Misch) - In contrib/uuid-ossp, cache the state of the OSSP UUID + In contrib/uuid-ossp, cache the state of the OSSP UUID library across calls (Tom Lane) This improves the efficiency of UUID generation and reduces the amount - of entropy drawn from /dev/urandom, on platforms that + of entropy drawn from /dev/urandom, on platforms that have that. - Update time zone data files to tzdata release 2014e + Update time zone data files to tzdata release 2014e for DST law changes in Crimea, Egypt, and Morocco. @@ -5563,7 +5933,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This release contains a variety of fixes from 9.2.7. For information about new features in the 9.2 major release, see - . + . @@ -5575,7 +5945,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 However, if you are upgrading from a version earlier than 9.2.6, - see . + see . @@ -5602,7 +5972,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Avoid race condition in checking transaction commit status during - receipt of a NOTIFY message (Marko Tiikkaja) + receipt of a NOTIFY message (Marko Tiikkaja) @@ -5626,7 +5996,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Remove incorrect code that tried to allow OVERLAPS with + Remove incorrect code that tried to allow OVERLAPS with single-element row arguments (Joshua Yanovski) @@ -5639,17 +6009,17 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Avoid getting more than AccessShareLock when de-parsing a + Avoid getting more than AccessShareLock when de-parsing a rule or view (Dean Rasheed) - This oversight resulted in pg_dump unexpectedly - acquiring RowExclusiveLock locks on tables mentioned as - the targets of INSERT/UPDATE/DELETE + This oversight resulted in pg_dump unexpectedly + acquiring RowExclusiveLock locks on tables mentioned as + the targets of INSERT/UPDATE/DELETE commands in rules. While usually harmless, that could interfere with concurrent transactions that tried to acquire, for example, - ShareLock on those tables. + ShareLock on those tables. @@ -5668,8 +6038,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix walsender's failure to shut down cleanly when client - is pg_receivexlog (Fujii Masao) + Fix walsender's failure to shut down cleanly when client + is pg_receivexlog (Fujii Masao) @@ -5689,13 +6059,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Prevent interrupts while reporting non-ERROR messages + Prevent interrupts while reporting non-ERROR messages (Tom Lane) This guards against rare server-process freezeups due to recursive - entry to syslog(), and perhaps other related problems. + entry to syslog(), and perhaps other related problems. @@ -5708,13 +6078,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix tracking of psql script line numbers - during \copy from out-of-line data + Fix tracking of psql script line numbers + during \copy from out-of-line data (Kumar Rajeev Rastogi, Amit Khandekar) - \copy ... from incremented the script file line number + \copy ... from incremented the script file line number for each data line, even if the data was not coming from the script file. This mistake resulted in wrong line numbers being reported for any errors occurring later in the same script file. @@ -5723,14 +6093,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Prevent intermittent could not reserve shared memory region + Prevent intermittent could not reserve shared memory region failures on recent Windows versions (MauMau) - Update time zone data files to tzdata release 2014a + Update time zone data files to tzdata release 2014a for DST law changes in Fiji and Turkey, plus historical changes in Israel and Ukraine. @@ -5752,7 +6122,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This release contains a variety of fixes from 9.2.6. For information about new features in the 9.2 major release, see - . + . @@ -5764,7 +6134,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 However, if you are upgrading from a version earlier than 9.2.6, - see . + see . @@ -5776,19 +6146,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Shore up GRANT ... WITH ADMIN OPTION restrictions + Shore up GRANT ... WITH ADMIN OPTION restrictions (Noah Misch) - Granting a role without ADMIN OPTION is supposed to + Granting a role without ADMIN OPTION is supposed to prevent the grantee from adding or removing members from the granted role, but this restriction was easily bypassed by doing SET - ROLE first. The security impact is mostly that a role member can + ROLE first. The security impact is mostly that a role member can revoke the access of others, contrary to the wishes of his grantor. Unapproved role member additions are a lesser concern, since an uncooperative role member could provide most of his rights to others - anyway by creating views or SECURITY DEFINER functions. + anyway by creating views or SECURITY DEFINER functions. (CVE-2014-0060) @@ -5801,7 +6171,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 The primary role of PL validator functions is to be called implicitly - during CREATE FUNCTION, but they are also normal SQL + during CREATE FUNCTION, but they are also normal SQL functions that a user can call explicitly. Calling a validator on a function actually written in some other language was not checked for and could be exploited for privilege-escalation purposes. @@ -5821,7 +6191,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 If the name lookups come to different conclusions due to concurrent activity, we might perform some parts of the DDL on a different table - than other parts. At least in the case of CREATE INDEX, + than other parts. At least in the case of CREATE INDEX, this can be used to cause the permissions checks to be performed against a different table than the index creation, allowing for a privilege escalation attack. @@ -5835,12 +6205,12 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - The MAXDATELEN constant was too small for the longest - possible value of type interval, allowing a buffer overrun - in interval_out(). Although the datetime input + The MAXDATELEN constant was too small for the longest + possible value of type interval, allowing a buffer overrun + in interval_out(). Although the datetime input functions were more careful about avoiding buffer overrun, the limit was short enough to cause them to reject some valid inputs, such as - input containing a very long timezone name. The ecpg + input containing a very long timezone name. The ecpg library contained these vulnerabilities along with some of its own. (CVE-2014-0063) @@ -5867,7 +6237,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Use strlcpy() and related functions to provide a clear + Use strlcpy() and related functions to provide a clear guarantee that fixed-size buffers are not overrun. Unlike the preceding items, it is unclear whether these cases really represent live issues, since in most cases there appear to be previous @@ -5879,35 +6249,35 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Avoid crashing if crypt() returns NULL (Honza Horak, + Avoid crashing if crypt() returns NULL (Honza Horak, Bruce Momjian) - There are relatively few scenarios in which crypt() - could return NULL, but contrib/chkpass would crash + There are relatively few scenarios in which crypt() + could return NULL, but contrib/chkpass would crash if it did. One practical case in which this could be an issue is - if libc is configured to refuse to execute unapproved - hashing algorithms (e.g., FIPS mode). + if libc is configured to refuse to execute unapproved + hashing algorithms (e.g., FIPS mode). (CVE-2014-0066) - Document risks of make check in the regression testing + Document risks of make check in the regression testing instructions (Noah Misch, Tom Lane) - Since the temporary server started by make check - uses trust authentication, another user on the same machine + Since the temporary server started by make check + uses trust authentication, another user on the same machine could connect to it as database superuser, and then potentially exploit the privileges of the operating-system user who started the tests. A future release will probably incorporate changes in the testing procedure to prevent this risk, but some public discussion is needed first. So for the moment, just warn people against using - make check when there are untrusted users on the + make check when there are untrusted users on the same machine. (CVE-2014-0067) @@ -5922,7 +6292,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 The WAL update could be applied to the wrong page, potentially many pages past where it should have been. Aside from corrupting data, - this error has been observed to result in significant bloat + this error has been observed to result in significant bloat of standby servers compared to their masters, due to updates being applied far beyond where the end-of-file should have been. This failure mode does not appear to be a significant risk during crash @@ -5942,20 +6312,20 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 was already consistent at the start of replay, thus possibly allowing hot-standby queries before the database was really consistent. Other symptoms such as PANIC: WAL contains references to invalid - pages were also possible. + pages were also possible. Fix improper locking of btree index pages while replaying - a VACUUM operation in hot-standby mode (Andres Freund, + a VACUUM operation in hot-standby mode (Andres Freund, Heikki Linnakangas, Tom Lane) This error could result in PANIC: WAL contains references to - invalid pages failures. + invalid pages failures. @@ -5973,8 +6343,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - When pause_at_recovery_target - and recovery_target_inclusive are both set, ensure the + When pause_at_recovery_target + and recovery_target_inclusive are both set, ensure the target record is applied before pausing, not after (Heikki Linnakangas) @@ -5987,7 +6357,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Ensure that signal handlers don't attempt to use the - process's MyProc pointer after it's no longer valid. + process's MyProc pointer after it's no longer valid. @@ -6000,19 +6370,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix unsafe references to errno within error reporting + Fix unsafe references to errno within error reporting logic (Christian Kruse) This would typically lead to odd behaviors such as missing or - inappropriate HINT fields. + inappropriate HINT fields. - Fix possible crashes from using ereport() too early + Fix possible crashes from using ereport() too early during server startup (Tom Lane) @@ -6036,7 +6406,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix length checking for Unicode identifiers (U&"..." + Fix length checking for Unicode identifiers (U&"..." syntax) containing escapes (Tom Lane) @@ -6056,7 +6426,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 A previous patch allowed such keywords to be used without quoting in places such as role identifiers; but it missed cases where a - list of role identifiers was permitted, such as DROP ROLE. + list of role identifiers was permitted, such as DROP ROLE. @@ -6070,19 +6440,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix possible crash due to invalid plan for nested sub-selects, such - as WHERE (... x IN (SELECT ...) ...) IN (SELECT ...) + as WHERE (... x IN (SELECT ...) ...) IN (SELECT ...) (Tom Lane) - Fix UPDATE/DELETE of an inherited target table - that has UNION ALL subqueries (Tom Lane) + Fix UPDATE/DELETE of an inherited target table + that has UNION ALL subqueries (Tom Lane) - Without this fix, UNION ALL subqueries aren't correctly + Without this fix, UNION ALL subqueries aren't correctly inserted into the update plans for inheritance child tables after the first one, typically resulting in no update happening for those child table(s). @@ -6091,12 +6461,12 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Ensure that ANALYZE creates statistics for a table column - even when all the values in it are too wide (Tom Lane) + Ensure that ANALYZE creates statistics for a table column + even when all the values in it are too wide (Tom Lane) - ANALYZE intentionally omits very wide values from its + ANALYZE intentionally omits very wide values from its histogram and most-common-values calculations, but it neglected to do something sane in the case that all the sampled entries are too wide. @@ -6104,21 +6474,21 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - In ALTER TABLE ... SET TABLESPACE, allow the database's + In ALTER TABLE ... SET TABLESPACE, allow the database's default tablespace to be used without a permissions check (Stephen Frost) - CREATE TABLE has always allowed such usage, - but ALTER TABLE didn't get the memo. + CREATE TABLE has always allowed such usage, + but ALTER TABLE didn't get the memo. - Fix cannot accept a set error when some arms of - a CASE return a set and others don't (Tom Lane) + Fix cannot accept a set error when some arms of + a CASE return a set and others don't (Tom Lane) @@ -6150,12 +6520,12 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix possible misbehavior in plainto_tsquery() + Fix possible misbehavior in plainto_tsquery() (Heikki Linnakangas) - Use memmove() not memcpy() for copying + Use memmove() not memcpy() for copying overlapping memory regions. There have been no field reports of this actually causing trouble, but it's certainly risky. @@ -6163,8 +6533,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix placement of permissions checks in pg_start_backup() - and pg_stop_backup() (Andres Freund, Magnus Hagander) + Fix placement of permissions checks in pg_start_backup() + and pg_stop_backup() (Andres Freund, Magnus Hagander) @@ -6175,44 +6545,44 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Accept SHIFT_JIS as an encoding name for locale checking + Accept SHIFT_JIS as an encoding name for locale checking purposes (Tatsuo Ishii) - Fix *-qualification of named parameters in SQL-language + Fix *-qualification of named parameters in SQL-language functions (Tom Lane) Given a composite-type parameter - named foo, $1.* worked fine, - but foo.* not so much. + named foo, $1.* worked fine, + but foo.* not so much. - Fix misbehavior of PQhost() on Windows (Fujii Masao) + Fix misbehavior of PQhost() on Windows (Fujii Masao) - It should return localhost if no host has been specified. + It should return localhost if no host has been specified. - Improve error handling in libpq and psql - for failures during COPY TO STDOUT/FROM STDIN (Tom Lane) + Improve error handling in libpq and psql + for failures during COPY TO STDOUT/FROM STDIN (Tom Lane) In particular this fixes an infinite loop that could occur in 9.2 and up if the server connection was lost during COPY FROM - STDIN. Variants of that scenario might be possible in older + STDIN. Variants of that scenario might be possible in older versions, or with other client applications. @@ -6220,14 +6590,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix incorrect translation handling in - some psql \d commands + some psql \d commands (Peter Eisentraut, Tom Lane) - Ensure pg_basebackup's background process is killed + Ensure pg_basebackup's background process is killed when exiting its foreground process (Magnus Hagander) @@ -6235,7 +6605,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix possible incorrect printing of filenames - in pg_basebackup's verbose mode (Magnus Hagander) + in pg_basebackup's verbose mode (Magnus Hagander) @@ -6248,20 +6618,20 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix misaligned descriptors in ecpg (MauMau) + Fix misaligned descriptors in ecpg (MauMau) - In ecpg, handle lack of a hostname in the connection + In ecpg, handle lack of a hostname in the connection parameters properly (Michael Meskes) - Fix performance regression in contrib/dblink connection + Fix performance regression in contrib/dblink connection startup (Joe Conway) @@ -6272,15 +6642,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - In contrib/isn, fix incorrect calculation of the check + In contrib/isn, fix incorrect calculation of the check digit for ISMN values (Fabien Coelho) - Fix contrib/pg_stat_statement's handling - of CURRENT_DATE and related constructs (Kyotaro + Fix contrib/pg_stat_statement's handling + of CURRENT_DATE and related constructs (Kyotaro Horiguchi) @@ -6294,28 +6664,28 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - In Mingw and Cygwin builds, install the libpq DLL - in the bin directory (Andrew Dunstan) + In Mingw and Cygwin builds, install the libpq DLL + in the bin directory (Andrew Dunstan) This duplicates what the MSVC build has long done. It should fix - problems with programs like psql failing to start + problems with programs like psql failing to start because they can't find the DLL. - Avoid using the deprecated dllwrap tool in Cygwin builds + Avoid using the deprecated dllwrap tool in Cygwin builds (Marco Atzeri) - Don't generate plain-text HISTORY - and src/test/regress/README files anymore (Tom Lane) + Don't generate plain-text HISTORY + and src/test/regress/README files anymore (Tom Lane) @@ -6324,20 +6694,20 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 the likely audience for plain-text format. Distribution tarballs will still contain files by these names, but they'll just be stubs directing the reader to consult the main documentation. - The plain-text INSTALL file will still be maintained, as + The plain-text INSTALL file will still be maintained, as there is arguably a use-case for that. - Update time zone data files to tzdata release 2013i + Update time zone data files to tzdata release 2013i for DST law changes in Jordan and historical changes in Cuba. - In addition, the zones Asia/Riyadh87, - Asia/Riyadh88, and Asia/Riyadh89 have been + In addition, the zones Asia/Riyadh87, + Asia/Riyadh88, and Asia/Riyadh89 have been removed, as they are no longer maintained by IANA, and never represented actual civil timekeeping practice. @@ -6359,7 +6729,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This release contains a variety of fixes from 9.2.5. For information about new features in the 9.2 major release, see - . + . @@ -6377,7 +6747,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Also, if you are upgrading from a version earlier than 9.2.4, - see . + see . @@ -6389,19 +6759,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix VACUUM's tests to see whether it can - update relfrozenxid (Andres Freund) + Fix VACUUM's tests to see whether it can + update relfrozenxid (Andres Freund) - In some cases VACUUM (either manual or autovacuum) could - incorrectly advance a table's relfrozenxid value, + In some cases VACUUM (either manual or autovacuum) could + incorrectly advance a table's relfrozenxid value, allowing tuples to escape freezing, causing those rows to become invisible once 2^31 transactions have elapsed. The probability of data loss is fairly low since multiple incorrect advancements would need to happen before actual loss occurs, but it's not zero. In 9.2.0 and later, the probability of loss is higher, and it's also possible - to get could not access status of transaction errors as a + to get could not access status of transaction errors as a consequence of this bug. Users upgrading from releases 9.0.4 or 8.4.8 or earlier are not affected, but all later versions contain the bug. @@ -6409,18 +6779,18 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 The issue can be ameliorated by, after upgrading, vacuuming all tables in all databases while having vacuum_freeze_table_age + linkend="guc-vacuum-freeze-table-age">vacuum_freeze_table_age set to zero. This will fix any latent corruption but will not be able to fix all pre-existing data errors. However, an installation can be presumed safe after performing this vacuuming if it has executed fewer than 2^31 update transactions in its lifetime (check this with - SELECT txid_current() < 2^31). + SELECT txid_current() < 2^31). - Fix initialization of pg_clog and pg_subtrans + Fix initialization of pg_clog and pg_subtrans during hot standby startup (Andres Freund, Heikki Linnakangas) @@ -6451,13 +6821,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This could lead to corruption of the lock data structures in shared - memory, causing lock already held and other odd errors. + memory, causing lock already held and other odd errors. - Truncate pg_multixact contents during WAL replay + Truncate pg_multixact contents during WAL replay (Andres Freund) @@ -6468,14 +6838,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Ensure an anti-wraparound VACUUM counts a page as scanned + Ensure an anti-wraparound VACUUM counts a page as scanned when it's only verified that no tuples need freezing (Sergey Burladyan, Jeff Janes) This bug could result in failing to - advance relfrozenxid, so that the table would still be + advance relfrozenxid, so that the table would still be thought to need another anti-wraparound vacuum. In the worst case the database might even shut down to prevent wraparound. @@ -6494,15 +6864,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix unexpected spgdoinsert() failure error during SP-GiST + Fix unexpected spgdoinsert() failure error during SP-GiST index creation (Teodor Sigaev) - Avoid flattening a subquery whose SELECT list contains a - volatile function wrapped inside a sub-SELECT (Tom Lane) + Avoid flattening a subquery whose SELECT list contains a + volatile function wrapped inside a sub-SELECT (Tom Lane) @@ -6519,14 +6889,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This error could lead to incorrect plans for queries involving - multiple levels of subqueries within JOIN syntax. + multiple levels of subqueries within JOIN syntax. Fix incorrect planning in cases where the same non-strict expression - appears in multiple WHERE and outer JOIN + appears in multiple WHERE and outer JOIN equality clauses (Tom Lane) @@ -6594,13 +6964,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix array slicing of int2vector and oidvector values + Fix array slicing of int2vector and oidvector values (Tom Lane) Expressions of this kind are now implicitly promoted to - regular int2 or oid arrays. + regular int2 or oid arrays. @@ -6614,7 +6984,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 In some cases, the system would use the simple GMT offset value when it should have used the regular timezone setting that had prevailed before the simple offset was selected. This change also causes - the timeofday function to honor the simple GMT offset + the timeofday function to honor the simple GMT offset zone. @@ -6628,7 +6998,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Properly quote generated command lines in pg_ctl + Properly quote generated command lines in pg_ctl (Naoya Anzai and Tom Lane) @@ -6639,10 +7009,10 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix pg_dumpall to work when a source database + Fix pg_dumpall to work when a source database sets default_transaction_read_only - via ALTER DATABASE SET (Kevin Grittner) + linkend="guc-default-transaction-read-only">default_transaction_read_only + via ALTER DATABASE SET (Kevin Grittner) @@ -6652,28 +7022,28 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Make ecpg search for quoted cursor names + Make ecpg search for quoted cursor names case-sensitively (Zoltán Böszörményi) - Fix ecpg's processing of lists of variables - declared varchar (Zoltán Böszörményi) + Fix ecpg's processing of lists of variables + declared varchar (Zoltán Böszörményi) - Make contrib/lo defend against incorrect trigger definitions + Make contrib/lo defend against incorrect trigger definitions (Marc Cousin) - Update time zone data files to tzdata release 2013h + Update time zone data files to tzdata release 2013h for DST law changes in Argentina, Brazil, Jordan, Libya, Liechtenstein, Morocco, and Palestine. Also, new timezone abbreviations WIB, WIT, WITA for Indonesia. @@ -6696,7 +7066,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This release contains a variety of fixes from 9.2.4. For information about new features in the 9.2 major release, see - . + . @@ -6708,7 +7078,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 However, if you are upgrading from a version earlier than 9.2.4, - see . + see . @@ -6725,7 +7095,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - PostgreSQL case-folds non-ASCII characters only + PostgreSQL case-folds non-ASCII characters only when using a single-byte server encoding. @@ -6740,7 +7110,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix checkpoint memory leak in background writer when wal_level = - hot_standby (Naoya Anzai) + hot_standby (Naoya Anzai) @@ -6753,7 +7123,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix memory overcommit bug when work_mem is using more + Fix memory overcommit bug when work_mem is using more than 24GB of memory (Stephen Frost) @@ -6795,58 +7165,58 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Previously tests like col IS NOT TRUE and col IS - NOT FALSE did not properly factor in NULL values when estimating + Previously tests like col IS NOT TRUE and col IS + NOT FALSE did not properly factor in NULL values when estimating plan costs. - Fix accounting for qualifier evaluation costs in UNION ALL + Fix accounting for qualifier evaluation costs in UNION ALL and inheritance queries (Tom Lane) This fixes cases where suboptimal query plans could be chosen if - some WHERE clauses are expensive to calculate. + some WHERE clauses are expensive to calculate. - Prevent pushing down WHERE clauses into unsafe - UNION/INTERSECT subqueries (Tom Lane) + Prevent pushing down WHERE clauses into unsafe + UNION/INTERSECT subqueries (Tom Lane) - Subqueries of a UNION or INTERSECT that + Subqueries of a UNION or INTERSECT that contain set-returning functions or volatile functions in their - SELECT lists could be improperly optimized, leading to + SELECT lists could be improperly optimized, leading to run-time errors or incorrect query results. - Fix rare case of failed to locate grouping columns + Fix rare case of failed to locate grouping columns planner failure (Tom Lane) - Fix pg_dump of foreign tables with dropped columns (Andrew Dunstan) + Fix pg_dump of foreign tables with dropped columns (Andrew Dunstan) - Previously such cases could cause a pg_upgrade error. + Previously such cases could cause a pg_upgrade error. - Reorder pg_dump processing of extension-related + Reorder pg_dump processing of extension-related rules and event triggers (Joe Conway) @@ -6854,7 +7224,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Force dumping of extension tables if specified by pg_dump - -t or -n (Joe Conway) + -t or -n (Joe Conway) @@ -6867,25 +7237,25 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix pg_restore -l with the directory archive to display + Fix pg_restore -l with the directory archive to display the correct format name (Fujii Masao) - Properly record index comments created using UNIQUE - and PRIMARY KEY syntax (Andres Freund) + Properly record index comments created using UNIQUE + and PRIMARY KEY syntax (Andres Freund) - This fixes a parallel pg_restore failure. + This fixes a parallel pg_restore failure. - Cause pg_basebackup -x with an empty xlog directory + Cause pg_basebackup -x with an empty xlog directory to throw an error rather than crashing (Magnus Hagander, Haruka Takatsuka) @@ -6924,13 +7294,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix REINDEX TABLE and REINDEX DATABASE + Fix REINDEX TABLE and REINDEX DATABASE to properly revalidate constraints and mark invalidated indexes as valid (Noah Misch) - REINDEX INDEX has always worked properly. + REINDEX INDEX has always worked properly. @@ -6943,7 +7313,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix possible deadlock during concurrent CREATE INDEX - CONCURRENTLY operations (Tom Lane) + CONCURRENTLY operations (Tom Lane) @@ -6955,7 +7325,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix regexp_matches() handling of zero-length matches + Fix regexp_matches() handling of zero-length matches (Jeevan Chalke) @@ -6979,14 +7349,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Prevent CREATE FUNCTION from checking SET + Prevent CREATE FUNCTION from checking SET variables unless function body checking is enabled (Tom Lane) - Allow ALTER DEFAULT PRIVILEGES to operate on schemas + Allow ALTER DEFAULT PRIVILEGES to operate on schemas without requiring CREATE permission (Tom Lane) @@ -6998,31 +7368,31 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Specifically, lessen keyword restrictions for role names, language - names, EXPLAIN and COPY options, and - SET values. This allows COPY ... (FORMAT - BINARY) to work as expected; previously BINARY needed + names, EXPLAIN and COPY options, and + SET values. This allows COPY ... (FORMAT + BINARY) to work as expected; previously BINARY needed to be quoted. - Print proper line number during COPY failure (Heikki + Print proper line number during COPY failure (Heikki Linnakangas) - Fix pgp_pub_decrypt() so it works for secret keys with + Fix pgp_pub_decrypt() so it works for secret keys with passwords (Marko Kreen) - Make pg_upgrade use pg_dump - --quote-all-identifiers to avoid problems with keyword changes + Make pg_upgrade use pg_dump + --quote-all-identifiers to avoid problems with keyword changes between releases (Tom Lane) @@ -7036,7 +7406,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Ensure that VACUUM ANALYZE still runs the ANALYZE phase + Ensure that VACUUM ANALYZE still runs the ANALYZE phase if its attempt to truncate the file is cancelled due to lock conflicts (Kevin Grittner) @@ -7045,28 +7415,28 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Avoid possible failure when performing transaction control commands (e.g - ROLLBACK) in prepared queries (Tom Lane) + ROLLBACK) in prepared queries (Tom Lane) Ensure that floating-point data input accepts standard spellings - of infinity on all platforms (Tom Lane) + of infinity on all platforms (Tom Lane) - The C99 standard says that allowable spellings are inf, - +inf, -inf, infinity, - +infinity, and -infinity. Make sure we - recognize these even if the platform's strtod function + The C99 standard says that allowable spellings are inf, + +inf, -inf, infinity, + +infinity, and -infinity. Make sure we + recognize these even if the platform's strtod function doesn't. - Avoid unnecessary reporting when track_activities is off + Avoid unnecessary reporting when track_activities is off (Tom Lane) @@ -7080,7 +7450,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Prevent crash when psql's PSQLRC variable + Prevent crash when psql's PSQLRC variable contains a tilde (Bruce Momjian) @@ -7093,7 +7463,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Update time zone data files to tzdata release 2013d + Update time zone data files to tzdata release 2013d for DST law changes in Israel, Morocco, Palestine, and Paraguay. Also, historical zone data corrections for Macquarie Island. @@ -7115,7 +7485,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This release contains a variety of fixes from 9.2.3. For information about new features in the 9.2 major release, see - . + . @@ -7128,13 +7498,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 However, this release corrects several errors in management of GiST indexes. After installing this update, it is advisable to - REINDEX any GiST indexes that meet one or more of the + REINDEX any GiST indexes that meet one or more of the conditions described below. Also, if you are upgrading from a version earlier than 9.2.2, - see . + see . @@ -7152,7 +7522,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 A connection request containing a database name that begins with - - could be crafted to damage or destroy + - could be crafted to damage or destroy files within the server's data directory, even if the request is eventually rejected. (CVE-2013-1899) @@ -7166,9 +7536,9 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This avoids a scenario wherein random numbers generated by - contrib/pgcrypto functions might be relatively easy for + contrib/pgcrypto functions might be relatively easy for another database user to guess. The risk is only significant when - the postmaster is configured with ssl = on + the postmaster is configured with ssl = on but most connections don't use SSL encryption. (CVE-2013-1900) @@ -7181,7 +7551,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 An unprivileged database user could exploit this mistake to call - pg_start_backup() or pg_stop_backup(), + pg_start_backup() or pg_stop_backup(), thus possibly interfering with creation of routine backups. (CVE-2013-1901) @@ -7189,32 +7559,32 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix GiST indexes to not use fuzzy geometric comparisons when + Fix GiST indexes to not use fuzzy geometric comparisons when it's not appropriate to do so (Alexander Korotkov) - The core geometric types perform comparisons using fuzzy - equality, but gist_box_same must do exact comparisons, + The core geometric types perform comparisons using fuzzy + equality, but gist_box_same must do exact comparisons, else GiST indexes using it might become inconsistent. After installing - this update, users should REINDEX any GiST indexes on - box, polygon, circle, or point - columns, since all of these use gist_box_same. + this update, users should REINDEX any GiST indexes on + box, polygon, circle, or point + columns, since all of these use gist_box_same. Fix erroneous range-union and penalty logic in GiST indexes that use - contrib/btree_gist for variable-width data types, that is - text, bytea, bit, and numeric + contrib/btree_gist for variable-width data types, that is + text, bytea, bit, and numeric columns (Tom Lane) These errors could result in inconsistent indexes in which some keys that are present would not be found by searches, and also in useless - index bloat. Users are advised to REINDEX such indexes + index bloat. Users are advised to REINDEX such indexes after installing this update. @@ -7229,21 +7599,21 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 These errors could result in inconsistent indexes in which some keys that are present would not be found by searches, and also in indexes that are unnecessarily inefficient to search. Users are advised to - REINDEX multi-column GiST indexes after installing this + REINDEX multi-column GiST indexes after installing this update. - Fix gist_point_consistent + Fix gist_point_consistent to handle fuzziness consistently (Alexander Korotkov) - Index scans on GiST indexes on point columns would sometimes + Index scans on GiST indexes on point columns would sometimes yield results different from a sequential scan, because - gist_point_consistent disagreed with the underlying + gist_point_consistent disagreed with the underlying operator code about whether to do comparisons exactly or fuzzily. @@ -7254,7 +7624,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - This bug could result in incorrect local pin count errors + This bug could result in incorrect local pin count errors during replay, making recovery impossible. @@ -7262,7 +7632,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Ensure we do crash recovery before entering archive recovery, if the - database was not stopped cleanly and a recovery.conf file + database was not stopped cleanly and a recovery.conf file is present (Heikki Linnakangas, Kyotaro Horiguchi, Mitsumasa Kondo) @@ -7282,14 +7652,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix race condition in DELETE RETURNING (Tom Lane) + Fix race condition in DELETE RETURNING (Tom Lane) - Under the right circumstances, DELETE RETURNING could + Under the right circumstances, DELETE RETURNING could attempt to fetch data from a shared buffer that the current process no longer has any pin on. If some other process changed the buffer - meanwhile, this would lead to garbage RETURNING output, or + meanwhile, this would lead to garbage RETURNING output, or even a crash. @@ -7310,20 +7680,20 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix to_char() to use ASCII-only case-folding rules where + Fix to_char() to use ASCII-only case-folding rules where appropriate (Tom Lane) This fixes misbehavior of some template patterns that should be - locale-independent, but mishandled I and - i in Turkish locales. + locale-independent, but mishandled I and + i in Turkish locales. - Fix unwanted rejection of timestamp 1999-12-31 24:00:00 + Fix unwanted rejection of timestamp 1999-12-31 24:00:00 (Tom Lane) @@ -7337,8 +7707,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix logic error when a single transaction does UNLISTEN - then LISTEN (Tom Lane) + Fix logic error when a single transaction does UNLISTEN + then LISTEN (Tom Lane) @@ -7356,14 +7726,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix performance issue in EXPLAIN (ANALYZE, TIMING OFF) + Fix performance issue in EXPLAIN (ANALYZE, TIMING OFF) (Pavel Stehule) - Remove useless picksplit doesn't support secondary split log + Remove useless picksplit doesn't support secondary split log messages (Josh Hansen, Tom Lane) @@ -7378,7 +7748,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Remove vestigial secondary-split support in - gist_box_picksplit() (Tom Lane) + gist_box_picksplit() (Tom Lane) @@ -7397,29 +7767,29 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Eliminate memory leaks in PL/Perl's spi_prepare() function + Eliminate memory leaks in PL/Perl's spi_prepare() function (Alex Hunsaker, Tom Lane) - Fix pg_dumpall to handle database names containing - = correctly (Heikki Linnakangas) + Fix pg_dumpall to handle database names containing + = correctly (Heikki Linnakangas) - Avoid crash in pg_dump when an incorrect connection + Avoid crash in pg_dump when an incorrect connection string is given (Heikki Linnakangas) - Ignore invalid indexes in pg_dump and - pg_upgrade (Michael Paquier, Bruce Momjian) + Ignore invalid indexes in pg_dump and + pg_upgrade (Michael Paquier, Bruce Momjian) @@ -7428,15 +7798,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 a uniqueness condition not satisfied by the table's data. Also, if the index creation is in fact still in progress, it seems reasonable to consider it to be an uncommitted DDL change, which - pg_dump wouldn't be expected to dump anyway. - pg_upgrade now also skips invalid indexes rather than + pg_dump wouldn't be expected to dump anyway. + pg_upgrade now also skips invalid indexes rather than failing. - In pg_basebackup, include only the current server + In pg_basebackup, include only the current server version's subdirectory when backing up a tablespace (Heikki Linnakangas) @@ -7444,16 +7814,16 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add a server version check in pg_basebackup and - pg_receivexlog, so they fail cleanly with version + Add a server version check in pg_basebackup and + pg_receivexlog, so they fail cleanly with version combinations that won't work (Heikki Linnakangas) - Fix contrib/dblink to handle inconsistent settings of - DateStyle or IntervalStyle safely (Daniel + Fix contrib/dblink to handle inconsistent settings of + DateStyle or IntervalStyle safely (Daniel Farina, Tom Lane) @@ -7461,7 +7831,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Previously, if the remote server had different settings of these parameters, ambiguous dates might be read incorrectly. This fix ensures that datetime and interval columns fetched by a - dblink query will be interpreted correctly. Note however + dblink query will be interpreted correctly. Note however that inconsistent settings are still risky, since literal values appearing in SQL commands sent to the remote server might be interpreted differently than they would be locally. @@ -7470,25 +7840,25 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix contrib/pg_trgm's similarity() function + Fix contrib/pg_trgm's similarity() function to return zero for trigram-less strings (Tom Lane) - Previously it returned NaN due to internal division by zero. + Previously it returned NaN due to internal division by zero. - Enable building PostgreSQL with Microsoft Visual + Enable building PostgreSQL with Microsoft Visual Studio 2012 (Brar Piening, Noah Misch) - Update time zone data files to tzdata release 2013b + Update time zone data files to tzdata release 2013b for DST law changes in Chile, Haiti, Morocco, Paraguay, and some Russian areas. Also, historical zone data corrections for numerous places. @@ -7496,12 +7866,12 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Also, update the time zone abbreviation files for recent changes in - Russia and elsewhere: CHOT, GET, - IRKT, KGT, KRAT, MAGT, - MAWT, MSK, NOVT, OMST, - TKT, VLAT, WST, YAKT, - YEKT now follow their current meanings, and - VOLT (Europe/Volgograd) and MIST + Russia and elsewhere: CHOT, GET, + IRKT, KGT, KRAT, MAGT, + MAWT, MSK, NOVT, OMST, + TKT, VLAT, WST, YAKT, + YEKT now follow their current meanings, and + VOLT (Europe/Volgograd) and MIST (Antarctica/Macquarie) are added to the default abbreviations list. @@ -7522,7 +7892,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This release contains a variety of fixes from 9.2.2. For information about new features in the 9.2 major release, see - . + . @@ -7534,7 +7904,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 However, if you are upgrading from a version earlier than 9.2.2, - see . + see . @@ -7546,7 +7916,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Prevent execution of enum_recv from SQL (Tom Lane) + Prevent execution of enum_recv from SQL (Tom Lane) @@ -7573,7 +7943,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This mistake could result in incorrect WAL ends before end of - online backup errors. + online backup errors. @@ -7655,8 +8025,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Improve performance of SPI_execute and related - functions, thereby improving PL/pgSQL's EXECUTE + Improve performance of SPI_execute and related + functions, thereby improving PL/pgSQL's EXECUTE (Heikki Linnakangas, Tom Lane) @@ -7691,20 +8061,20 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix intermittent crash in DROP INDEX CONCURRENTLY (Tom Lane) + Fix intermittent crash in DROP INDEX CONCURRENTLY (Tom Lane) Fix potential corruption of shared-memory lock table during - CREATE/DROP INDEX CONCURRENTLY (Tom Lane) + CREATE/DROP INDEX CONCURRENTLY (Tom Lane) - Fix COPY's multiple-tuple-insertion code for the case of + Fix COPY's multiple-tuple-insertion code for the case of a tuple larger than page size minus fillfactor (Heikki Linnakangas) @@ -7716,19 +8086,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Protect against race conditions when scanning - pg_tablespace (Stephen Frost, Tom Lane) + pg_tablespace (Stephen Frost, Tom Lane) - CREATE DATABASE and DROP DATABASE could + CREATE DATABASE and DROP DATABASE could misbehave if there were concurrent updates of - pg_tablespace entries. + pg_tablespace entries. - Prevent DROP OWNED from trying to drop whole databases or + Prevent DROP OWNED from trying to drop whole databases or tablespaces (Álvaro Herrera) @@ -7740,13 +8110,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix error in vacuum_freeze_table_age + linkend="guc-vacuum-freeze-table-age">vacuum_freeze_table_age implementation (Andres Freund) In installations that have existed for more than vacuum_freeze_min_age + linkend="guc-vacuum-freeze-min-age">vacuum_freeze_min_age transactions, this mistake prevented autovacuum from using partial-table scans, so that a full-table scan would always happen instead. @@ -7754,13 +8124,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Prevent misbehavior when a RowExpr or XmlExpr + Prevent misbehavior when a RowExpr or XmlExpr is parse-analyzed twice (Andres Freund, Tom Lane) This mistake could be user-visible in contexts such as - CREATE TABLE LIKE INCLUDING INDEXES. + CREATE TABLE LIKE INCLUDING INDEXES. @@ -7778,7 +8148,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 There were some issues with default privileges for types, and - pg_dump failed to dump such privileges at all. + pg_dump failed to dump such privileges at all. @@ -7798,13 +8168,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Reject out-of-range dates in to_date() (Hitoshi Harada) + Reject out-of-range dates in to_date() (Hitoshi Harada) - Fix pg_extension_config_dump() to handle + Fix pg_extension_config_dump() to handle extension-update cases properly (Tom Lane) @@ -7822,7 +8192,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 The previous coding resulted in sometimes omitting the first line in - the CONTEXT traceback for the error. + the CONTEXT traceback for the error. @@ -7840,13 +8210,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - This bug affected psql and some other client programs. + This bug affected psql and some other client programs. - Fix possible crash in psql's \? command + Fix possible crash in psql's \? command when not connected to a database (Meng Qingzhong) @@ -7854,74 +8224,74 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix possible error if a relation file is removed while - pg_basebackup is running (Heikki Linnakangas) + pg_basebackup is running (Heikki Linnakangas) - Tolerate timeline switches while pg_basebackup -X fetch + Tolerate timeline switches while pg_basebackup -X fetch is backing up a standby server (Heikki Linnakangas) - Make pg_dump exclude data of unlogged tables when + Make pg_dump exclude data of unlogged tables when running on a hot-standby server (Magnus Hagander) This would fail anyway because the data is not available on the standby server, so it seems most convenient to assume - automatically. - Fix pg_upgrade to deal with invalid indexes safely + Fix pg_upgrade to deal with invalid indexes safely (Bruce Momjian) - Fix pg_upgrade's -O/-o options (Marti Raudsepp) + Fix pg_upgrade's -O/-o options (Marti Raudsepp) - Fix one-byte buffer overrun in libpq's - PQprintTuples (Xi Wang) + Fix one-byte buffer overrun in libpq's + PQprintTuples (Xi Wang) This ancient function is not used anywhere by - PostgreSQL itself, but it might still be used by some + PostgreSQL itself, but it might still be used by some client code. - Make ecpglib use translated messages properly + Make ecpglib use translated messages properly (Chen Huajun) - Properly install ecpg_compat and - pgtypes libraries on MSVC (Jiang Guiqing) + Properly install ecpg_compat and + pgtypes libraries on MSVC (Jiang Guiqing) - Include our version of isinf() in - libecpg if it's not provided by the system + Include our version of isinf() in + libecpg if it's not provided by the system (Jiang Guiqing) @@ -7941,15 +8311,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Make pgxs build executables with the right - .exe suffix when cross-compiling for Windows + Make pgxs build executables with the right + .exe suffix when cross-compiling for Windows (Zoltan Boszormenyi) - Add new timezone abbreviation FET (Tom Lane) + Add new timezone abbreviation FET (Tom Lane) @@ -7973,7 +8343,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This release contains a variety of fixes from 9.2.1. For information about new features in the 9.2 major release, see - . + . @@ -7984,14 +8354,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - However, you may need to perform REINDEX operations to + However, you may need to perform REINDEX operations to correct problems in concurrently-built indexes, as described in the first changelog item below. Also, if you are upgrading from version 9.2.0, - see . + see . @@ -8004,22 +8374,22 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix multiple bugs associated with CREATE/DROP INDEX - CONCURRENTLY (Andres Freund, Tom Lane, Simon Riggs, Pavan Deolasee) + CONCURRENTLY (Andres Freund, Tom Lane, Simon Riggs, Pavan Deolasee) - An error introduced while adding DROP INDEX CONCURRENTLY + An error introduced while adding DROP INDEX CONCURRENTLY allowed incorrect indexing decisions to be made during the initial - phase of CREATE INDEX CONCURRENTLY; so that indexes built + phase of CREATE INDEX CONCURRENTLY; so that indexes built by that command could be corrupt. It is recommended that indexes - built in 9.2.X with CREATE INDEX CONCURRENTLY be rebuilt + built in 9.2.X with CREATE INDEX CONCURRENTLY be rebuilt after applying this update. - In addition, fix CREATE/DROP INDEX CONCURRENTLY to use + In addition, fix CREATE/DROP INDEX CONCURRENTLY to use in-place updates when changing the state of an index's - pg_index row. This prevents race conditions that could + pg_index row. This prevents race conditions that could cause concurrent sessions to miss updating the target index, thus again resulting in corrupt concurrently-created indexes. @@ -8027,33 +8397,33 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Also, fix various other operations to ensure that they ignore invalid indexes resulting from a failed CREATE INDEX - CONCURRENTLY command. The most important of these is - VACUUM, because an auto-vacuum could easily be launched + CONCURRENTLY command. The most important of these is + VACUUM, because an auto-vacuum could easily be launched on the table before corrective action can be taken to fix or remove the invalid index. - Also fix DROP INDEX CONCURRENTLY to not disable + Also fix DROP INDEX CONCURRENTLY to not disable insertions into the target index until all queries using it are done. - Also fix misbehavior if DROP INDEX CONCURRENTLY is + Also fix misbehavior if DROP INDEX CONCURRENTLY is canceled: the previous coding could leave an un-droppable index behind. - Correct predicate locking for DROP INDEX CONCURRENTLY + Correct predicate locking for DROP INDEX CONCURRENTLY (Kevin Grittner) Previously, SSI predicate locks were processed at the wrong time, possibly leading to incorrect behavior of serializable transactions - executing in parallel with the DROP. + executing in parallel with the DROP. @@ -8111,13 +8481,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This oversight could prevent subsequent execution of certain - operations such as CREATE INDEX CONCURRENTLY. + operations such as CREATE INDEX CONCURRENTLY. - Avoid bogus out-of-sequence timeline ID errors in standby + Avoid bogus out-of-sequence timeline ID errors in standby mode (Heikki Linnakangas) @@ -8137,20 +8507,20 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix the syslogger process to not fail when - log_rotation_age exceeds 2^31 milliseconds (about 25 days) + log_rotation_age exceeds 2^31 milliseconds (about 25 days) (Tom Lane) - Fix WaitLatch() to return promptly when the requested + Fix WaitLatch() to return promptly when the requested timeout expires (Jeff Janes, Tom Lane) With the previous coding, a steady stream of non-wait-terminating - interrupts could delay return from WaitLatch() + interrupts could delay return from WaitLatch() indefinitely. This has been shown to be a problem for the autovacuum launcher process, and might cause trouble elsewhere as well. @@ -8203,8 +8573,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 The planner could derive incorrect constraints from a clause equating a non-strict construct to something else, for example - WHERE COALESCE(foo, 0) = 0 - when foo is coming from the nullable side of an outer join. + WHERE COALESCE(foo, 0) = 0 + when foo is coming from the nullable side of an outer join. 9.2 showed this type of error in more cases than previous releases, but the basic bug has been there for a long time. @@ -8212,13 +8582,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix SELECT DISTINCT with index-optimized - MIN/MAX on an inheritance tree (Tom Lane) + Fix SELECT DISTINCT with index-optimized + MIN/MAX on an inheritance tree (Tom Lane) The planner would fail with failed to re-find MinMaxAggInfo - record given this combination of factors. + record given this combination of factors. @@ -8238,7 +8608,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 A strict join clause can be sufficient to establish an - x IS NOT NULL predicate, for example. + x IS NOT NULL predicate, for example. This fixes a planner regression in 9.2, since previous versions could make comparable deductions. @@ -8265,10 +8635,10 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - This affects multicolumn NOT IN subplans, such as - WHERE (a, b) NOT IN (SELECT x, y FROM ...) - when for instance b and y are int4 - and int8 respectively. This mistake led to wrong answers + This affects multicolumn NOT IN subplans, such as + WHERE (a, b) NOT IN (SELECT x, y FROM ...) + when for instance b and y are int4 + and int8 respectively. This mistake led to wrong answers or crashes depending on the specific datatypes involved. @@ -8281,8 +8651,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This oversight could result in wrong answers from merge joins whose inner side is an index scan using an - indexed_column = - ANY(array) condition. + indexed_column = + ANY(array) condition. @@ -8306,12 +8676,12 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Acquire buffer lock when re-fetching the old tuple for an - AFTER ROW UPDATE/DELETE trigger (Andres Freund) + AFTER ROW UPDATE/DELETE trigger (Andres Freund) In very unusual circumstances, this oversight could result in passing - incorrect data to a trigger WHEN condition, or to the + incorrect data to a trigger WHEN condition, or to the precheck logic for a foreign-key enforcement trigger. That could result in a crash, or in an incorrect decision about whether to fire the trigger. @@ -8320,7 +8690,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix ALTER COLUMN TYPE to handle inherited check + Fix ALTER COLUMN TYPE to handle inherited check constraints properly (Pavan Deolasee) @@ -8332,7 +8702,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix ALTER EXTENSION SET SCHEMA's failure to move some + Fix ALTER EXTENSION SET SCHEMA's failure to move some subsidiary objects into the new schema (Álvaro Herrera, Dimitri Fontaine) @@ -8340,7 +8710,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Handle CREATE TABLE AS EXECUTE correctly in extended query + Handle CREATE TABLE AS EXECUTE correctly in extended query protocol (Tom Lane) @@ -8348,7 +8718,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Don't modify the input parse tree in DROP RULE IF NOT - EXISTS and DROP TRIGGER IF NOT EXISTS (Tom Lane) + EXISTS and DROP TRIGGER IF NOT EXISTS (Tom Lane) @@ -8359,14 +8729,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix REASSIGN OWNED to handle grants on tablespaces + Fix REASSIGN OWNED to handle grants on tablespaces (Álvaro Herrera) - Ignore incorrect pg_attribute entries for system + Ignore incorrect pg_attribute entries for system columns for views (Tom Lane) @@ -8380,7 +8750,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix rule printing to dump INSERT INTO table + Fix rule printing to dump INSERT INTO table DEFAULT VALUES correctly (Tom Lane) @@ -8388,7 +8758,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Guard against stack overflow when there are too many - UNION/INTERSECT/EXCEPT clauses + UNION/INTERSECT/EXCEPT clauses in a query (Tom Lane) @@ -8410,22 +8780,22 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix failure to advance XID epoch if XID wraparound happens during a - checkpoint and wal_level is hot_standby + checkpoint and wal_level is hot_standby (Tom Lane, Andres Freund) While this mistake had no particular impact on PostgreSQL itself, it was bad for - applications that rely on txid_current() and related + applications that rely on txid_current() and related functions: the TXID value would appear to go backwards. - Fix pg_terminate_backend() and - pg_cancel_backend() to not throw error for a non-existent + Fix pg_terminate_backend() and + pg_cancel_backend() to not throw error for a non-existent target process (Josh Kupershmidt) @@ -8438,7 +8808,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix display of - pg_stat_replication.sync_state at a + pg_stat_replication.sync_state at a page boundary (Kyotaro Horiguchi) @@ -8452,7 +8822,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Formerly, this would result in something quite unhelpful, such as - Non-recoverable failure in name resolution. + Non-recoverable failure in name resolution. @@ -8477,8 +8847,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Make pg_ctl more robust about reading the - postmaster.pid file (Heikki Linnakangas) + Make pg_ctl more robust about reading the + postmaster.pid file (Heikki Linnakangas) @@ -8488,45 +8858,45 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix possible crash in psql if incorrectly-encoded data - is presented and the client_encoding setting is a + Fix possible crash in psql if incorrectly-encoded data + is presented and the client_encoding setting is a client-only encoding, such as SJIS (Jiang Guiqing) - Make pg_dump dump SEQUENCE SET items in + Make pg_dump dump SEQUENCE SET items in the data not pre-data section of the archive (Tom Lane) This fixes an undesirable inconsistency between the meanings of - and , and also fixes dumping of sequences that are marked as extension configuration tables. - Fix pg_dump's handling of DROP DATABASE - commands in mode (Guillaume Lelarge) - Beginning in 9.2.0, pg_dump --clean would issue a - DROP DATABASE command, which was either useless or + Beginning in 9.2.0, pg_dump --clean would issue a + DROP DATABASE command, which was either useless or dangerous depending on the usage scenario. It no longer does that. - This change also fixes the combination of - Fix pg_dump for views with circular dependencies and + Fix pg_dump for views with circular dependencies and no relation options (Tom Lane) @@ -8534,31 +8904,31 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 The previous fix to dump relation options when a view is involved in a circular dependency didn't work right for the case that the view has no options; it emitted ALTER VIEW foo - SET () which is invalid syntax. + SET () which is invalid syntax. - Fix bugs in the restore.sql script emitted by - pg_dump in tar output format (Tom Lane) + Fix bugs in the restore.sql script emitted by + pg_dump in tar output format (Tom Lane) The script would fail outright on tables whose names include upper-case characters. Also, make the script capable of restoring - data in mode as well as the regular COPY mode. - Fix pg_restore to accept POSIX-conformant - tar files (Brian Weaver, Tom Lane) + Fix pg_restore to accept POSIX-conformant + tar files (Brian Weaver, Tom Lane) - The original coding of pg_dump's tar + The original coding of pg_dump's tar output mode produced files that are not fully conformant with the POSIX standard. This has been corrected for version 9.3. This patch updates previous branches so that they will accept both the @@ -8569,82 +8939,82 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix tar files emitted by pg_basebackup to + Fix tar files emitted by pg_basebackup to be POSIX conformant (Brian Weaver, Tom Lane) - Fix pg_resetxlog to locate postmaster.pid + Fix pg_resetxlog to locate postmaster.pid correctly when given a relative path to the data directory (Tom Lane) - This mistake could lead to pg_resetxlog not noticing + This mistake could lead to pg_resetxlog not noticing that there is an active postmaster using the data directory. - Fix libpq's lo_import() and - lo_export() functions to report file I/O errors properly + Fix libpq's lo_import() and + lo_export() functions to report file I/O errors properly (Tom Lane) - Fix ecpg's processing of nested structure pointer + Fix ecpg's processing of nested structure pointer variables (Muhammad Usama) - Fix ecpg's ecpg_get_data function to + Fix ecpg's ecpg_get_data function to handle arrays properly (Michael Meskes) - Prevent pg_upgrade from trying to process TOAST tables + Prevent pg_upgrade from trying to process TOAST tables for system catalogs (Bruce Momjian) - This fixes an error seen when the information_schema has + This fixes an error seen when the information_schema has been dropped and recreated. Other failures were also possible. - Improve pg_upgrade performance by setting - synchronous_commit to off in the new cluster + Improve pg_upgrade performance by setting + synchronous_commit to off in the new cluster (Bruce Momjian) - Make contrib/pageinspect's btree page inspection + Make contrib/pageinspect's btree page inspection functions take buffer locks while examining pages (Tom Lane) - Work around unportable behavior of malloc(0) and - realloc(NULL, 0) (Tom Lane) + Work around unportable behavior of malloc(0) and + realloc(NULL, 0) (Tom Lane) - On platforms where these calls return NULL, some code + On platforms where these calls return NULL, some code mistakenly thought that meant out-of-memory. - This is known to have broken pg_dump for databases + This is known to have broken pg_dump for databases containing no user-defined aggregates. There might be other cases as well. @@ -8652,19 +9022,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Ensure that make install for an extension creates the - extension installation directory (Cédric Villemain) + Ensure that make install for an extension creates the + extension installation directory (Cédric Villemain) - Previously, this step was missed if MODULEDIR was set in + Previously, this step was missed if MODULEDIR was set in the extension's Makefile. - Fix pgxs support for building loadable modules on AIX + Fix pgxs support for building loadable modules on AIX (Tom Lane) @@ -8675,7 +9045,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Update time zone data files to tzdata release 2012j + Update time zone data files to tzdata release 2012j for DST law changes in Cuba, Israel, Jordan, Libya, Palestine, Western Samoa, and portions of Brazil. @@ -8697,7 +9067,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This release contains a variety of fixes from 9.2.0. For information about new features in the 9.2 major release, see - . + . @@ -8708,8 +9078,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - However, you may need to perform REINDEX and/or - VACUUM operations to recover from the effects of the data + However, you may need to perform REINDEX and/or + VACUUM operations to recover from the effects of the data corruption bug described in the first changelog item below. @@ -8734,7 +9104,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 likely to occur on standby slave servers since those perform much more WAL replay. There is a low probability of corruption of btree and GIN indexes. There is a much higher probability of corruption - of table visibility maps, which might lead to wrong answers + of table visibility maps, which might lead to wrong answers from index-only scans. Table data proper cannot be corrupted by this bug. @@ -8742,16 +9112,16 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 While no index corruption due to this bug is known to have occurred in the field, as a precautionary measure it is recommended that - production installations REINDEX all btree and GIN + production installations REINDEX all btree and GIN indexes at a convenient time after upgrading to 9.2.1. - Also, it is recommended to perform a VACUUM of all tables + Also, it is recommended to perform a VACUUM of all tables while having vacuum_freeze_table_age + linkend="guc-vacuum-freeze-table-age">vacuum_freeze_table_age set to zero. This will fix any incorrect visibility map data. vacuum_cost_delay + linkend="guc-vacuum-cost-delay">vacuum_cost_delay can be adjusted to reduce the performance impact of vacuuming, while causing it to take longer to finish. @@ -8760,14 +9130,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix possible incorrect sorting of output from queries involving - WHERE indexed_column IN - (list_of_values) (Tom Lane) + WHERE indexed_column IN + (list_of_values) (Tom Lane) - Fix planner failure for queries involving GROUP BY + Fix planner failure for queries involving GROUP BY expressions along with window functions and aggregates (Tom Lane) @@ -8779,7 +9149,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This error could result in wrong answers from queries that scan the - same WITH subquery multiple times. + same WITH subquery multiple times. @@ -8792,7 +9162,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Improve selectivity estimation for text search queries involving - prefixes, i.e. word:* patterns (Tom Lane) + prefixes, i.e. word:* patterns (Tom Lane) @@ -8803,14 +9173,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 A command that needed no locks other than ones its transaction already - had might fail to notice a concurrent GRANT or - REVOKE that committed since the start of its transaction. + had might fail to notice a concurrent GRANT or + REVOKE that committed since the start of its transaction. - Fix ANALYZE to not fail when a column is a domain over an + Fix ANALYZE to not fail when a column is a domain over an array type (Tom Lane) @@ -8829,7 +9199,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Some Linux distributions contain an incorrect version of - pthread.h that results in incorrect compiled code in + pthread.h that results in incorrect compiled code in PL/Perl, leading to crashes if a PL/Perl function calls another one that throws an error. @@ -8837,14 +9207,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Remove unnecessary dependency on pg_config from - pg_upgrade (Peter Eisentraut) + Remove unnecessary dependency on pg_config from + pg_upgrade (Peter Eisentraut) - Update time zone data files to tzdata release 2012f + Update time zone data files to tzdata release 2012f for DST law changes in Fiji @@ -8878,7 +9248,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Allow queries to retrieve data only from indexes, avoiding heap - access (index-only scans) + access (index-only scans) @@ -8900,14 +9270,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Allow streaming replication slaves to forward data to other slaves (cascading - replication) + replication) Allow pg_basebackup + linkend="app-pgbasebackup">pg_basebackup to make base backups from standby servers @@ -8915,14 +9285,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add a pg_receivexlog + linkend="app-pgreceivewal">pg_receivexlog tool to archive WAL file changes as they are written - Add the SP-GiST (Space-Partitioned + Add the SP-GiST (Space-Partitioned GiST) index access method @@ -8943,14 +9313,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add a security_barrier + linkend="sql-createview">security_barrier option for views - Allow libpq connection strings to have the format of a + Allow libpq connection strings to have the format of a URI @@ -8958,7 +9328,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add a single-row processing - mode to libpq for better handling of large + mode to libpq for better handling of large result sets @@ -8993,8 +9363,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Remove the spclocation field from pg_tablespace + Remove the spclocation field from pg_tablespace (Magnus Hagander) @@ -9004,23 +9374,23 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 a tablespace. This change allows tablespace directories to be moved while the server is down, by manually adjusting the symbolic links. To replace this field, we have added pg_tablespace_location() + linkend="functions-info-catalog-table">pg_tablespace_location() to allow querying of the symbolic links. - Move tsvector most-common-element statistics to new - pg_stats columns + Move tsvector most-common-element statistics to new + pg_stats columns (Alexander Korotkov) - Consult most_common_elems - and most_common_elem_freqs for the data formerly - available in most_common_vals - and most_common_freqs for a tsvector column. + Consult most_common_elems + and most_common_elem_freqs for the data formerly + available in most_common_vals + and most_common_freqs for a tsvector column. @@ -9035,14 +9405,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Remove hstore's => + Remove hstore's => operator (Robert Haas) - Users should now use hstore(text, text). Since + Users should now use hstore(text, text). Since PostgreSQL 9.0, a warning message has been - emitted when an operator named => is created because + emitted when an operator named => is created because the SQL standard reserves that token for another use. @@ -9051,7 +9421,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Ensure that xpath() + linkend="functions-xml-processing">xpath() escapes special characters in string values (Florian Pflug) @@ -9064,13 +9434,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Make pg_relation_size() + linkend="functions-admin-dbobject">pg_relation_size() and friends return NULL if the object does not exist (Phil Sorber) This prevents queries that call these functions from returning - errors immediately after a concurrent DROP. + errors immediately after a concurrent DROP. @@ -9078,7 +9448,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Make EXTRACT(EPOCH FROM - timestamp without time zone) + timestamp without time zone) measure the epoch from local midnight, not UTC midnight (Tom Lane) @@ -9087,17 +9457,17 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This change reverts an ill-considered change made in release 7.3. Measuring from UTC midnight was inconsistent because it made the result dependent on the timezone setting, which - computations for timestamp without time zone should not be. + linkend="guc-timezone">timezone setting, which + computations for timestamp without time zone should not be. The previous behavior remains available by casting the input value - to timestamp with time zone. + to timestamp with time zone. - Properly parse time strings with trailing yesterday, - today, and tomorrow (Dean Rasheed) + Properly parse time strings with trailing yesterday, + today, and tomorrow (Dean Rasheed) @@ -9109,8 +9479,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix to_date() and - to_timestamp() to wrap incomplete dates toward 2020 + linkend="functions-formatting">to_date() and + to_timestamp() to wrap incomplete dates toward 2020 (Bruce Momjian) @@ -9131,7 +9501,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Prevent ALTER + Prevent ALTER DOMAIN from working on non-domain types (Peter Eisentraut) @@ -9145,15 +9515,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 No longer forcibly lowercase procedural language names in CREATE FUNCTION + linkend="sql-createfunction">CREATE FUNCTION (Robert Haas) While unquoted language identifiers are still lowercased, strings and quoted identifiers are no longer forcibly down-cased. - Thus for example CREATE FUNCTION ... LANGUAGE 'C' - will no longer work; it must be spelled 'c', or better + Thus for example CREATE FUNCTION ... LANGUAGE 'C' + will no longer work; it must be spelled 'c', or better omit the quotes. @@ -9183,15 +9553,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Provide consistent backquote, variable expansion, and quoted substring behavior in psql meta-command + linkend="app-psql">psql meta-command arguments (Tom Lane) Previously, such references were treated oddly when not separated by - whitespace from adjacent text. For example 'FOO'BAR was - output as FOO BAR (unexpected insertion of a space) and - FOO'BAR'BAZ was output unchanged (not removing the quotes + whitespace from adjacent text. For example 'FOO'BAR was + output as FOO BAR (unexpected insertion of a space) and + FOO'BAR'BAZ was output unchanged (not removing the quotes as most would expect). @@ -9199,9 +9569,9 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 No longer treat clusterdb + linkend="app-clusterdb">clusterdb table names as double-quoted; no longer treat reindexdb table + linkend="app-reindexdb">reindexdb table and index names as double-quoted (Bruce Momjian) @@ -9213,20 +9583,20 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - createuser + createuser no longer prompts for option settings by default (Peter Eisentraut) - Use to obtain the old behavior. Disable prompting for the user name in dropuser unless - is specified (Peter Eisentraut) @@ -9248,36 +9618,36 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This allows changing the names and locations of the files that were - previously hard-coded as server.crt, - server.key, root.crt, and - root.crl in the data directory. - The server will no longer examine root.crt or - root.crl by default; to load these files, the + previously hard-coded as server.crt, + server.key, root.crt, and + root.crl in the data directory. + The server will no longer examine root.crt or + root.crl by default; to load these files, the associated parameters must be set to non-default values. - Remove the silent_mode parameter (Heikki Linnakangas) + Remove the silent_mode parameter (Heikki Linnakangas) Similar behavior can be obtained with pg_ctl start - -l postmaster.log. + -l postmaster.log. - Remove the wal_sender_delay parameter, + Remove the wal_sender_delay parameter, as it is no longer needed (Tom Lane) - Remove the custom_variable_classes parameter (Tom Lane) + Remove the custom_variable_classes parameter (Tom Lane) @@ -9297,19 +9667,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Rename pg_stat_activity.procpid - to pid, to match other system tables (Magnus Hagander) + linkend="monitoring-stats-views-table">pg_stat_activity.procpid + to pid, to match other system tables (Magnus Hagander) - Create a separate pg_stat_activity column to + Create a separate pg_stat_activity column to report process state (Scott Mead, Magnus Hagander) - The previous query and query_start + The previous query and query_start values now remain available for an idle session, allowing enhanced analysis. @@ -9317,8 +9687,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Rename pg_stat_activity.current_query to - query because it is not cleared when the query + Rename pg_stat_activity.current_query to + query because it is not cleared when the query completes (Magnus Hagander) @@ -9326,24 +9696,24 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Change all SQL-level statistics timing values - to be float8 columns measured in milliseconds (Tom Lane) + to be float8 columns measured in milliseconds (Tom Lane) This change eliminates the designed-in assumption that the values - are accurate to microseconds and no more (since the float8 + are accurate to microseconds and no more (since the float8 values can be fractional). The columns affected are - pg_stat_user_functions.total_time, - pg_stat_user_functions.self_time, - pg_stat_xact_user_functions.total_time, + pg_stat_user_functions.total_time, + pg_stat_user_functions.self_time, + pg_stat_xact_user_functions.total_time, and - pg_stat_xact_user_functions.self_time. + pg_stat_xact_user_functions.self_time. The statistics functions underlying these columns now also return - float8 milliseconds, rather than bigint + float8 milliseconds, rather than bigint microseconds. - contrib/pg_stat_statements' - total_time column is now also measured in + contrib/pg_stat_statements' + total_time column is now also measured in milliseconds. @@ -9377,7 +9747,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - This feature is often called index-only scans. + This feature is often called index-only scans. Heap access can be skipped for heap pages containing only tuples that are visible to all sessions, as reported by the visibility map; so the benefit applies mainly to mostly-static data. The visibility map @@ -9387,7 +9757,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add the SP-GiST (Space-Partitioned + Add the SP-GiST (Space-Partitioned GiST) index access method (Teodor Sigaev, Oleg Bartunov, Tom Lane) @@ -9449,7 +9819,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Move the frequently accessed members of the PGPROC + Move the frequently accessed members of the PGPROC shared memory array to a separate array (Pavan Deolasee, Heikki Linnakangas, Robert Haas) @@ -9494,7 +9864,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Make the number of CLOG buffers scale based on shared_buffers + linkend="guc-shared-buffers">shared_buffers (Robert Haas, Simon Riggs, Tom Lane) @@ -9555,7 +9925,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Previously, only wal_writer_delay + linkend="guc-wal-writer-delay">wal_writer_delay triggered WAL flushing to disk; now filling a WAL buffer also triggers WAL writes. @@ -9594,7 +9964,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 In the past, a prepared statement always had a single - generic plan that was used for all parameter values, which + generic plan that was used for all parameter values, which was frequently much inferior to the plans used for non-prepared statements containing explicit constant values. Now, the planner attempts to generate custom plans for specific parameter values. @@ -9612,7 +9982,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - The new parameterized path mechanism allows inner + The new parameterized path mechanism allows inner index scans to use values from relations that are more than one join level up from the scan. This can greatly improve performance in situations where semantic restrictions (such as outer joins) limit @@ -9627,7 +9997,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Wrappers can now provide multiple access paths for their + Wrappers can now provide multiple access paths for their tables, allowing more flexibility in join planning. @@ -9640,14 +10010,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This check is only performed when constraint_exclusion + linkend="guc-constraint-exclusion">constraint_exclusion is on. - Allow indexed_col op ANY(ARRAY[...]) conditions to be + Allow indexed_col op ANY(ARRAY[...]) conditions to be used in plain index scans and index-only scans (Tom Lane) @@ -9658,14 +10028,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Support MIN/MAX index optimizations on + Support MIN/MAX index optimizations on boolean columns (Marti Raudsepp) - Account for set-returning functions in SELECT target + Account for set-returning functions in SELECT target lists when setting row count estimates (Tom Lane) @@ -9713,7 +10083,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Improve statistical estimates for subqueries using - DISTINCT (Tom Lane) + DISTINCT (Tom Lane) @@ -9728,13 +10098,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Do not treat role names and samerole specified in samerole specified in pg_hba.conf as automatically including superusers (Andrew Dunstan) - This makes it easier to use reject lines with group roles. + This makes it easier to use reject lines with group roles. @@ -9789,7 +10159,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This logging is triggered by log_autovacuum_min_duration. + linkend="guc-log-autovacuum-min-duration">log_autovacuum_min_duration. @@ -9808,7 +10178,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add pg_xlog_location_diff() + linkend="functions-admin-backup">pg_xlog_location_diff() to simplify WAL location comparisons (Euler Taveira de Oliveira) @@ -9826,15 +10196,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This allows different instances to use the event log with different identifiers, by setting the event_source + linkend="guc-event-source">event_source server parameter, which is similar to how syslog_ident works. + linkend="guc-syslog-ident">syslog_ident works. - Change unexpected EOF messages to DEBUG1 level, + Change unexpected EOF messages to DEBUG1 level, except when there is an open transaction (Magnus Hagander) @@ -9856,14 +10226,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Track temporary file sizes and file counts in the pg_stat_database + linkend="pg-stat-database-view">pg_stat_database system view (Tomas Vondra) - Add a deadlock counter to the pg_stat_database + Add a deadlock counter to the pg_stat_database system view (Magnus Hagander) @@ -9871,7 +10241,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add a server parameter track_io_timing + linkend="guc-track-io-timing">track_io_timing to track I/O timings (Ants Aasma, Robert Haas) @@ -9879,7 +10249,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Report checkpoint timing information in pg_stat_bgwriter + linkend="pg-stat-bgwriter-view">pg_stat_bgwriter (Greg Smith, Peter Geoghegan) @@ -9896,7 +10266,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Silently ignore nonexistent schemas specified in search_path (Tom Lane) + linkend="guc-search-path">search_path (Tom Lane) @@ -9908,12 +10278,12 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Allow superusers to set deadlock_timeout + linkend="guc-deadlock-timeout">deadlock_timeout per-session, not just per-cluster (Noah Misch) - This allows deadlock_timeout to be reduced for + This allows deadlock_timeout to be reduced for transactions that are likely to be involved in a deadlock, thus detecting the failure more quickly. Alternatively, increasing the value can be used to reduce the chances of a session being chosen for @@ -9924,7 +10294,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add a server parameter temp_file_limit + linkend="guc-temp-file-limit">temp_file_limit to constrain temporary file space usage per session (Mark Kirkwood) @@ -9945,13 +10315,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add postmaster option to query configuration parameters (Bruce Momjian) - This allows pg_ctl to better handle cases where - PGDATA or points to a configuration-only directory. @@ -9959,14 +10329,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Replace an empty locale name with the implied value in - CREATE DATABASE + CREATE DATABASE (Tom Lane) This prevents cases where - pg_database.datcollate or - datctype could be interpreted differently after a + pg_database.datcollate or + datctype could be interpreted differently after a server restart. @@ -10001,22 +10371,22 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add an include_if_exists facility for configuration + Add an include_if_exists facility for configuration files (Greg Smith) - This works the same as include, except that an error + This works the same as include, except that an error is not thrown if the file is missing. - Identify the server time zone during initdb, and set + Identify the server time zone during initdb, and set postgresql.conf entries - timezone and - log_timezone + timezone and + log_timezone accordingly (Tom Lane) @@ -10028,7 +10398,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Fix pg_settings to + linkend="view-pg-settings">pg_settings to report postgresql.conf line numbers on Windows (Tom Lane) @@ -10051,7 +10421,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Allow streaming replication slaves to forward data to other slaves (cascading - replication) (Fujii Masao) + replication) (Fujii Masao) @@ -10063,8 +10433,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add new synchronous_commit - mode remote_write (Fujii Masao, Simon Riggs) + linkend="guc-synchronous-commit">synchronous_commit + mode remote_write (Fujii Masao, Simon Riggs) @@ -10077,7 +10447,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add a pg_receivexlog + linkend="app-pgreceivewal">pg_receivexlog tool to archive WAL file changes as they are written, rather than waiting for completed WAL files (Magnus Hagander) @@ -10086,7 +10456,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Allow pg_basebackup + linkend="app-pgbasebackup">pg_basebackup to make base backups from standby servers (Jun Ishizuka, Fujii Masao) @@ -10098,7 +10468,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Allow streaming of WAL files while pg_basebackup + Allow streaming of WAL files while pg_basebackup is performing a backup (Magnus Hagander) @@ -10137,19 +10507,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This change allows better results when a row value is converted to - hstore or json type: the fields of the resulting + hstore or json type: the fields of the resulting value will now have the expected names. - Improve column labels used for sub-SELECT results + Improve column labels used for sub-SELECT results (Marti Raudsepp) - Previously, the generic label ?column? was used. + Previously, the generic label ?column? was used. @@ -10179,7 +10549,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - When a row fails a CHECK or NOT NULL + When a row fails a CHECK or NOT NULL constraint, show the row's contents as error detail (Jan Kundrát) @@ -10207,7 +10577,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This change adds locking that should eliminate cache lookup - failed errors in many scenarios. Also, it is no longer possible + failed errors in many scenarios. Also, it is no longer possible to add relations to a schema that is being concurrently dropped, a scenario that formerly led to inconsistent system catalog contents. @@ -10215,8 +10585,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add CONCURRENTLY option to DROP INDEX + Add CONCURRENTLY option to DROP INDEX (Simon Riggs) @@ -10246,31 +10616,31 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Allow CHECK - constraints to be declared NOT VALID (Álvaro + Allow CHECK + constraints to be declared NOT VALID (Álvaro Herrera) - Adding a NOT VALID constraint does not cause the table to + Adding a NOT VALID constraint does not cause the table to be scanned to verify that existing rows meet the constraint. Subsequently, newly added or updated rows are checked. Such constraints are ignored by the planner when considering - constraint_exclusion, since it is not certain that all + constraint_exclusion, since it is not certain that all rows meet the constraint. - The new ALTER TABLE VALIDATE command allows NOT - VALID constraints to be checked for existing rows, after which + The new ALTER TABLE VALIDATE command allows NOT + VALID constraints to be checked for existing rows, after which they are converted into ordinary constraints. - Allow CHECK constraints to be declared NO - INHERIT (Nikhil Sontakke, Alex Hunsaker, Álvaro Herrera) + Allow CHECK constraints to be declared NO + INHERIT (Nikhil Sontakke, Alex Hunsaker, Álvaro Herrera) @@ -10281,7 +10651,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add the ability to rename + Add the ability to rename constraints (Peter Eisentraut) @@ -10290,32 +10660,32 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - <command>ALTER</> + <command>ALTER</command> Reduce need to rebuild tables and indexes for certain ALTER TABLE - ... ALTER COLUMN TYPE operations (Noah Misch) + linkend="sql-altertable">ALTER TABLE + ... ALTER COLUMN TYPE operations (Noah Misch) - Increasing the length limit for a varchar or varbit + Increasing the length limit for a varchar or varbit column, or removing the limit altogether, no longer requires a table rewrite. Similarly, increasing the allowable precision of a - numeric column, or changing a column from constrained - numeric to unconstrained numeric, no longer + numeric column, or changing a column from constrained + numeric to unconstrained numeric, no longer requires a table rewrite. Table rewrites are also avoided in similar - cases involving the interval, timestamp, and - timestamptz types. + cases involving the interval, timestamp, and + timestamptz types. - Avoid having ALTER + Avoid having ALTER TABLE revalidate foreign key constraints in some cases where it is not necessary (Noah Misch) @@ -10323,7 +10693,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add IF EXISTS options to some ALTER + Add IF EXISTS options to some ALTER commands (Pavel Stehule) @@ -10335,17 +10705,17 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add ALTER - FOREIGN DATA WRAPPER ... RENAME - and ALTER - SERVER ... RENAME (Peter Eisentraut) + Add ALTER + FOREIGN DATA WRAPPER ... RENAME + and ALTER + SERVER ... RENAME (Peter Eisentraut) - Add ALTER - DOMAIN ... RENAME (Peter Eisentraut) + Add ALTER + DOMAIN ... RENAME (Peter Eisentraut) @@ -10357,11 +10727,11 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Throw an error for ALTER DOMAIN ... DROP - CONSTRAINT on a nonexistent constraint (Peter Eisentraut) + CONSTRAINT on a nonexistent constraint (Peter Eisentraut) - An IF EXISTS option has been added to provide the + An IF EXISTS option has been added to provide the previous behavior. @@ -10371,7 +10741,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - <link linkend="SQL-CREATETABLE"><command>CREATE TABLE</></link> + <link linkend="sql-createtable"><command>CREATE TABLE</command></link> @@ -10396,8 +10766,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Fix CREATE TABLE ... AS EXECUTE - to handle WITH NO DATA and column name specifications + Fix CREATE TABLE ... AS EXECUTE + to handle WITH NO DATA and column name specifications (Tom Lane) @@ -10414,14 +10784,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add a security_barrier + linkend="sql-createview">security_barrier option for views (KaiGai Kohei, Robert Haas) This option prevents optimizations that might allow view-protected data to be exposed to users, for example pushing a clause involving - an insecure function into the WHERE clause of the view. + an insecure function into the WHERE clause of the view. Such views can be expected to perform more poorly than ordinary views. @@ -10430,9 +10800,9 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add a new LEAKPROOF function + linkend="sql-createfunction">LEAKPROOF function attribute to mark functions that can safely be pushed down - into security_barrier views (KaiGai Kohei) + into security_barrier views (KaiGai Kohei) @@ -10442,8 +10812,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - This adds support for the SQL-conforming - USAGE privilege on types and domains. The intent is + This adds support for the SQL-conforming + USAGE privilege on types and domains. The intent is to be able to restrict which users can create dependencies on types, since such dependencies limit the owner's ability to alter the type. @@ -10459,7 +10829,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Because the object is being created by SELECT INTO or CREATE TABLE AS, the creator would ordinarily have insert permissions; but there are corner cases where this is not - true, such as when ALTER DEFAULT PRIVILEGES has removed + true, such as when ALTER DEFAULT PRIVILEGES has removed such permissions. @@ -10477,20 +10847,20 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Allow VACUUM to more + Allow VACUUM to more easily skip pages that cannot be locked (Simon Riggs, Robert Haas) - This change should greatly reduce the incidence of VACUUM - getting stuck waiting for other sessions. + This change should greatly reduce the incidence of VACUUM + getting stuck waiting for other sessions. - Make EXPLAIN - (BUFFERS) count blocks dirtied and written (Robert Haas) + Make EXPLAIN + (BUFFERS) count blocks dirtied and written (Robert Haas) @@ -10508,8 +10878,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - This is accomplished by setting the new TIMING option to - FALSE. + This is accomplished by setting the new TIMING option to + FALSE. @@ -10550,41 +10920,41 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add array_to_json() - and row_to_json() (Andrew Dunstan) + linkend="functions-json">array_to_json() + and row_to_json() (Andrew Dunstan) - Add a SMALLSERIAL + Add a SMALLSERIAL data type (Mike Pultz) - This is like SERIAL, except it stores the sequence in - a two-byte integer column (int2). + This is like SERIAL, except it stores the sequence in + a two-byte integer column (int2). - Allow domains to be - declared NOT VALID (Álvaro Herrera) + Allow domains to be + declared NOT VALID (Álvaro Herrera) This option can be set at domain creation time, or via ALTER - DOMAIN ... ADD CONSTRAINT ... NOT - VALID. ALTER DOMAIN ... VALIDATE - CONSTRAINT fully validates the constraint. + DOMAIN ... ADD CONSTRAINT ... NOT + VALID. ALTER DOMAIN ... VALIDATE + CONSTRAINT fully validates the constraint. Support more locale-specific formatting options for the money data type (Tom Lane) + linkend="datatype-money">money data type (Tom Lane) @@ -10597,22 +10967,22 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add bitwise and, or, and not - operators for the macaddr data type (Brendan Jurd) + Add bitwise and, or, and not + operators for the macaddr data type (Brendan Jurd) Allow xpath() to + linkend="functions-xml-processing">xpath() to return a single-element XML array when supplied a scalar value (Florian Pflug) Previously, it returned an empty array. This change will also - cause xpath_exists() to return true, not false, + cause xpath_exists() to return true, not false, for such expressions. @@ -10636,9 +11006,9 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Allow non-superusers to use pg_cancel_backend() + linkend="functions-admin-signal">pg_cancel_backend() and pg_terminate_backend() + linkend="functions-admin-signal">pg_terminate_backend() on other sessions belonging to the same user (Magnus Hagander, Josh Kupershmidt, Dan Farina) @@ -10658,8 +11028,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This allows multiple transactions to share identical views of the database state. Snapshots are exported via pg_export_snapshot() - and imported via SET + linkend="functions-snapshot-synchronization">pg_export_snapshot() + and imported via SET TRANSACTION SNAPSHOT. Only snapshots from currently-running transactions can be imported. @@ -10669,7 +11039,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Support COLLATION - FOR on expressions (Peter Eisentraut) + FOR on expressions (Peter Eisentraut) @@ -10680,23 +11050,23 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add pg_opfamily_is_visible() + linkend="functions-info-schema-table">pg_opfamily_is_visible() (Josh Kupershmidt) - Add a numeric variant of pg_size_pretty() - for use with pg_xlog_location_diff() (Fujii Masao) + Add a numeric variant of pg_size_pretty() + for use with pg_xlog_location_diff() (Fujii Masao) Add a pg_trigger_depth() + linkend="functions-info-session-table">pg_trigger_depth() function (Kevin Grittner) @@ -10708,8 +11078,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Allow string_agg() - to process bytea values (Pavel Stehule) + linkend="functions-aggregate-table">string_agg() + to process bytea values (Pavel Stehule) @@ -10720,7 +11090,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - For example, ^(\w+)( \1)+$. Previous releases did not + For example, ^(\w+)( \1)+$. Previous releases did not check that the back-reference actually matched the first occurrence. @@ -10737,22 +11107,22 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add information schema views - role_udt_grants, udt_privileges, - and user_defined_types (Peter Eisentraut) + role_udt_grants, udt_privileges, + and user_defined_types (Peter Eisentraut) Add composite-type attributes to the - information schema element_types view + information schema element_types view (Peter Eisentraut) - Implement interval_type columns in the information + Implement interval_type columns in the information schema (Peter Eisentraut) @@ -10764,23 +11134,23 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Implement collation-related columns in the information schema - attributes, columns, - domains, and element_types + attributes, columns, + domains, and element_types views (Peter Eisentraut) - Implement the with_hierarchy column in the - information schema table_privileges view (Peter + Implement the with_hierarchy column in the + information schema table_privileges view (Peter Eisentraut) - Add display of sequence USAGE privileges to information + Add display of sequence USAGE privileges to information schema (Peter Eisentraut) @@ -10811,7 +11181,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Allow the PL/pgSQL OPEN cursor command to supply + Allow the PL/pgSQL OPEN cursor command to supply parameters by name (Yeb Havinga) @@ -10833,7 +11203,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Improve performance and memory consumption for long chains of - ELSIF clauses (Tom Lane) + ELSIF clauses (Tom Lane) @@ -10914,31 +11284,31 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add initdb - options and (Peter Eisentraut) - This allows separate control of local and - host pg_hba.conf authentication - settings. still controls both. - Add - Add the @@ -10946,15 +11316,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Give command-line tools the ability to specify the name of the - database to connect to, and fall back to template1 - if a postgres database connection fails (Robert Haas) + database to connect to, and fall back to template1 + if a postgres database connection fails (Robert Haas) - <link linkend="APP-PSQL"><application>psql</></link> + <link linkend="app-psql"><application>psql</application></link> @@ -10965,7 +11335,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - This adds the auto option to the \x + This adds the auto option to the \x command, which switches to the expanded mode when the normal output would be wider than the screen. @@ -10978,32 +11348,32 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - This is done with a new command \ir. + This is done with a new command \ir. Add support for non-ASCII characters in - psql variable names (Tom Lane) + psql variable names (Tom Lane) - Add support for major-version-specific .psqlrc files + Add support for major-version-specific .psqlrc files (Bruce Momjian) - psql already supported minor-version-specific - .psqlrc files. + psql already supported minor-version-specific + .psqlrc files. - Provide environment variable overrides for psql + Provide environment variable overrides for psql history and startup file locations (Andrew Dunstan) @@ -11015,15 +11385,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add a \setenv command to modify + Add a \setenv command to modify the environment variables passed to child processes (Andrew Dunstan) - Name psql's temporary editor files with a - .sql extension (Peter Eisentraut) + Name psql's temporary editor files with a + .sql extension (Peter Eisentraut) @@ -11033,19 +11403,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Allow psql to use zero-byte field and record + Allow psql to use zero-byte field and record separators (Peter Eisentraut) Various shell tools use zero-byte (NUL) separators, - e.g. find. + e.g. find. - Make the \timing option report times for + Make the \timing option report times for failed queries (Magnus Hagander) @@ -11056,13 +11426,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Unify and tighten psql's treatment of \copy - and SQL COPY (Noah Misch) + Unify and tighten psql's treatment of \copy + and SQL COPY (Noah Misch) This fix makes failure behavior more predictable and honors - \set ON_ERROR_ROLLBACK. + \set ON_ERROR_ROLLBACK. @@ -11076,21 +11446,21 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Make \d on a sequence show the + Make \d on a sequence show the table/column name owning it (Magnus Hagander) - Show statistics target for columns in \d+ (Magnus + Show statistics target for columns in \d+ (Magnus Hagander) - Show role password expiration dates in \du + Show role password expiration dates in \du (Fabrízio de Royes Mello) @@ -11102,8 +11472,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - These are included in the output of \dC+, - \dc+, \dD+, and \dL respectively. + These are included in the output of \dC+, + \dc+, \dD+, and \dL respectively. @@ -11114,15 +11484,15 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - These are included in the output of \des+, - \det+, and \dew+ for foreign servers, foreign + These are included in the output of \des+, + \det+, and \dew+ for foreign servers, foreign tables, and foreign data wrappers respectively. - Change \dd to display comments only for object types + Change \dd to display comments only for object types without their own backslash command (Josh Kupershmidt) @@ -11138,9 +11508,9 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - In psql tab completion, complete SQL + In psql tab completion, complete SQL keywords in either upper or lower case according to the new COMP_KEYWORD_CASE + linkend="app-psql-variables">COMP_KEYWORD_CASE setting (Peter Eisentraut) @@ -11179,14 +11549,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - <link linkend="APP-PGDUMP"><application>pg_dump</></link> + <link linkend="app-pgdump"><application>pg_dump</application></link> - Add an @@ -11197,13 +11567,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add a - Valid values are pre-data, data, - and post-data. The option can be + Valid values are pre-data, data, + and post-data. The option can be given more than once to select two or more sections. @@ -11211,7 +11581,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Make pg_dumpall dump all + linkend="app-pg-dumpall">pg_dumpall dump all roles first, then all configuration settings on roles (Phil Sorber) @@ -11223,8 +11593,8 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Allow pg_dumpall to avoid errors if the - postgres database is missing in the new cluster + Allow pg_dumpall to avoid errors if the + postgres database is missing in the new cluster (Robert Haas) @@ -11249,13 +11619,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Tighten rules for when extension configuration tables are dumped - by pg_dump (Tom Lane) + by pg_dump (Tom Lane) - Make pg_dump emit more useful dependency + Make pg_dump emit more useful dependency information (Tom Lane) @@ -11269,7 +11639,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Improve pg_dump's performance when dumping many + Improve pg_dump's performance when dumping many database objects (Tom Lane) @@ -11281,19 +11651,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - <link linkend="libpq"><application>libpq</></link> + <link linkend="libpq"><application>libpq</application></link> - Allow libpq connection strings to have the format of a + Allow libpq connection strings to have the format of a URI (Alexander Shulgin) - The syntax begins with postgres://. This can allow + The syntax begins with postgres://. This can allow applications to avoid implementing their own parser for URIs representing database connections. @@ -11320,30 +11690,30 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Previously, libpq always collected the entire query + Previously, libpq always collected the entire query result in memory before passing it back to the application. - Add const qualifiers to the declarations of the functions - PQconnectdbParams, PQconnectStartParams, - and PQpingParams (Lionel Elie Mamane) + Add const qualifiers to the declarations of the functions + PQconnectdbParams, PQconnectStartParams, + and PQpingParams (Lionel Elie Mamane) - Allow the .pgpass file to include escaped characters + Allow the .pgpass file to include escaped characters in the password field (Robert Haas) - Make library functions use abort() instead of - exit() when it is necessary to terminate the process + Make library functions use abort() instead of + exit() when it is necessary to terminate the process (Peter Eisentraut) @@ -11388,7 +11758,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Install plpgsql.h into include/server during installation + Install plpgsql.h into include/server during installation (Heikki Linnakangas) @@ -11414,14 +11784,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Improve the concurrent transaction regression tests - (isolationtester) (Noah Misch) + (isolationtester) (Noah Misch) - Modify thread_test to create its test files in - the current directory, rather than /tmp (Bruce Momjian) + Modify thread_test to create its test files in + the current directory, rather than /tmp (Bruce Momjian) @@ -11470,7 +11840,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add a pg_upgrade test suite (Peter Eisentraut) + Add a pg_upgrade test suite (Peter Eisentraut) @@ -11490,14 +11860,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add options to git_changelog for use in major + Add options to git_changelog for use in major release note creation (Bruce Momjian) - Support Linux's /proc/self/oom_score_adj API (Tom Lane) + Support Linux's /proc/self/oom_score_adj API (Tom Lane) @@ -11519,13 +11889,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 This improvement does not apply to - dblink_send_query()/dblink_get_result(). + dblink_send_query()/dblink_get_result(). - Support force_not_null option in force_not_null option in file_fdw (Shigeru Hanada) @@ -11533,7 +11903,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Implement dry-run mode for pg_archivecleanup + linkend="pgarchivecleanup">pg_archivecleanup (Gabriele Bartolini) @@ -11545,29 +11915,29 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add new pgbench switches - , , and + (Robert Haas) Change pg_test_fsync to test + linkend="pgtestfsync">pg_test_fsync to test for a fixed amount of time, rather than a fixed number of cycles (Bruce Momjian) - The /cycles option was removed, and + /seconds added. Add a pg_test_timing + linkend="pgtesttiming">pg_test_timing utility to measure clock monotonicity and timing overhead (Ants Aasma, Greg Smith) @@ -11584,19 +11954,19 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - <link linkend="pgupgrade"><application>pg_upgrade</></link> + <link linkend="pgupgrade"><application>pg_upgrade</application></link> - Adjust pg_upgrade environment variables (Bruce + Adjust pg_upgrade environment variables (Bruce Momjian) Rename data, bin, and port environment - variables to begin with PG, and support + variables to begin with PG, and support PGPORTOLD/PGPORTNEW, to replace PGPORT. @@ -11604,22 +11974,22 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Overhaul pg_upgrade logging and failure reporting + Overhaul pg_upgrade logging and failure reporting (Bruce Momjian) Create four append-only log files, and delete them on success. - Add - Make pg_upgrade create a script to incrementally + Make pg_upgrade create a script to incrementally generate more accurate optimizer statistics (Bruce Momjian) @@ -11631,14 +12001,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Allow pg_upgrade to upgrade an old cluster that - does not have a postgres database (Bruce Momjian) + Allow pg_upgrade to upgrade an old cluster that + does not have a postgres database (Bruce Momjian) - Allow pg_upgrade to handle cases where some + Allow pg_upgrade to handle cases where some old or new databases are missing, as long as they are empty (Bruce Momjian) @@ -11646,14 +12016,14 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Allow pg_upgrade to handle configuration-only + Allow pg_upgrade to handle configuration-only directory installations (Bruce Momjian) - In pg_upgrade, add / options to pass parameters to the servers (Bruce Momjian) @@ -11664,7 +12034,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Change pg_upgrade to use port 50432 by default + Change pg_upgrade to use port 50432 by default (Bruce Momjian) @@ -11675,7 +12045,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Reduce cluster locking in pg_upgrade (Bruce + Reduce cluster locking in pg_upgrade (Bruce Momjian) @@ -11690,13 +12060,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - <link linkend="pgstatstatements"><application>pg_stat_statements</></link> + <link linkend="pgstatstatements"><application>pg_stat_statements</application></link> - Allow pg_stat_statements to aggregate similar + Allow pg_stat_statements to aggregate similar queries via SQL text normalization (Peter Geoghegan, Tom Lane) @@ -11709,13 +12079,13 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Add dirtied and written block counts and read/write times to - pg_stat_statements (Robert Haas, Ants Aasma) + pg_stat_statements (Robert Haas, Ants Aasma) - Prevent pg_stat_statements from double-counting + Prevent pg_stat_statements from double-counting PREPARE and EXECUTE commands (Tom Lane) @@ -11731,7 +12101,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Support SECURITY LABEL on global objects (KaiGai + Support SECURITY LABEL on global objects (KaiGai Kohei, Robert Haas) @@ -11756,7 +12126,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Add sepgsql_setcon() and related functions to control + Add sepgsql_setcon() and related functions to control the sepgsql security domain (KaiGai Kohei) @@ -11785,7 +12155,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Use gmake STYLE=website draft. + Use gmake STYLE=website draft. @@ -11798,7 +12168,7 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 Document that user/database names are preserved with double-quoting - by command-line tools like vacuumdb (Bruce + by command-line tools like vacuumdb (Bruce Momjian) @@ -11812,12 +12182,12 @@ Branch: REL9_2_STABLE [6b700301c] 2015-02-17 16:03:00 +0100 - Deprecate use of GLOBAL and LOCAL in - CREATE TEMP TABLE (Noah Misch) + Deprecate use of GLOBAL and LOCAL in + CREATE TEMP TABLE (Noah Misch) - PostgreSQL has long treated these keyword as no-ops, + PostgreSQL has long treated these keyword as no-ops, and continues to do so; but in future they might mean what the SQL standard says they mean, so applications should avoid using them. diff --git a/doc/src/sgml/release-9.3.sgml b/doc/src/sgml/release-9.3.sgml index e95efefd66..0c1498015b 100644 --- a/doc/src/sgml/release-9.3.sgml +++ b/doc/src/sgml/release-9.3.sgml @@ -1,6 +1,2071 @@ + + Release 9.3.25 + + + Release date: + 2018-11-08 + + + + This release contains a variety of fixes from 9.3.24. + For information about new features in the 9.3 major release, see + . + + + + This is expected to be the last PostgreSQL + release in the 9.3.X series. Users are encouraged to update to a newer + release branch soon. + + + + Migration to Version 9.3.25 + + + A dump/restore is not required for those running 9.3.X. + + + + However, if you are upgrading from a version earlier than 9.3.23, + see . + + + + + Changes + + + + + + Fix corner-case failures + in has_foo_privilege() + family of functions (Tom Lane) + + + + Return NULL rather than throwing an error when an invalid object OID + is provided. Some of these functions got that right already, but not + all. has_column_privilege() was additionally + capable of crashing on some platforms. + + + + + + Avoid O(N^2) slowdown in regular expression match/split functions on + long strings (Andrew Gierth) + + + + + + Avoid O(N^3) slowdown in lexer for long strings + of + or - characters + (Andrew Gierth) + + + + + + Fix mis-execution of SubPlans when the outer query is being scanned + backwards (Andrew Gierth) + + + + + + Fix failure of UPDATE/DELETE ... WHERE CURRENT OF ... + after rewinding the referenced cursor (Tom Lane) + + + + A cursor that scans multiple relations (particularly an inheritance + tree) could produce wrong behavior if rewound to an earlier relation. + + + + + + Fix EvalPlanQual to handle conditionally-executed + InitPlans properly (Andrew Gierth, Tom Lane) + + + + This resulted in hard-to-reproduce crashes or wrong answers in + concurrent updates, if they contained code such as an uncorrelated + sub-SELECT inside a CASE + construct. + + + + + + Fix character-class checks to not fail on Windows for Unicode + characters above U+FFFF (Tom Lane, Kenji Uno) + + + + This bug affected full-text-search operations, as well + as contrib/ltree + and contrib/pg_trgm. + + + + + + Ensure that sequences owned by a foreign table are processed + by ALTER OWNER on the table (Peter Eisentraut) + + + + The ownership change should propagate to such sequences as well, but + this was missed for foreign tables. + + + + + + Fix over-allocation of space for array_out()'s + result string (Keiichi Hirobe) + + + + + + Fix memory leak in repeated SP-GiST index scans (Tom Lane) + + + + This is only known to amount to anything significant in cases where + an exclusion constraint using SP-GiST receives many new index entries + in a single command. + + + + + + Avoid crash if a utility command causes infinite recursion (Tom Lane) + + + + + + When initializing a hot standby, cope with duplicate XIDs caused by + two-phase transactions on the master + (Michael Paquier, Konstantin Knizhnik) + + + + + + Randomize the random() seed in bootstrap and + standalone backends, and in initdb + (Noah Misch) + + + + The main practical effect of this change is that it avoids a scenario + where initdb might mistakenly conclude that + POSIX shared memory is not available, due to name collisions caused by + always using the same random seed. + + + + + + Ensure that hot standby processes use the correct WAL consistency + point (Alexander Kukushkin, Michael Paquier) + + + + This prevents possible misbehavior just after a standby server has + reached a consistent database state during WAL replay. + + + + + + Don't run atexit callbacks when servicing SIGQUIT + (Heikki Linnakangas) + + + + + + Don't record foreign-server user mappings as members of extensions + (Tom Lane) + + + + If CREATE USER MAPPING is executed in an extension + script, an extension dependency was created for the user mapping, + which is unexpected. Roles can't be extension members, so user + mappings shouldn't be either. + + + + + + Make syslogger more robust against failures in opening CSV log files + (Tom Lane) + + + + + + Fix possible inconsistency in pg_dump's + sorting of dissimilar object names (Jacob Champion) + + + + + + Ensure that pg_restore will schema-qualify + the table name when + emitting DISABLE/ENABLE TRIGGER + commands (Tom Lane) + + + + This avoids failures due to the new policy of running restores with + restrictive search path. + + + + + + Fix pg_upgrade to handle event triggers in + extensions correctly (Haribabu Kommi) + + + + pg_upgrade failed to preserve an event + trigger's extension-membership status. + + + + + + Fix pg_upgrade's cluster state check to + work correctly on a standby server (Bruce Momjian) + + + + + + Enforce type cube's dimension limit in + all contrib/cube functions (Andrey Borodin) + + + + Previously, some cube-related functions could construct values that + would be rejected by cube_in(), leading to + dump/reload failures. + + + + + + Fix contrib/unaccent's + unaccent() function to use + the unaccent text search dictionary that is in the + same schema as the function (Tom Lane) + + + + Previously it tried to look up the dictionary using the search path, + which could fail if the search path has a restrictive value. + + + + + + Fix build problems on macOS 10.14 (Mojave) (Tom Lane) + + + + Adjust configure to add + an switch to CPPFLAGS; + without this, PL/Perl and PL/Tcl fail to configure or build on macOS + 10.14. The specific sysroot used can be overridden at configure time + or build time by setting the PG_SYSROOT variable in + the arguments of configure + or make. + + + + It is now recommended that Perl-related extensions + write $(perl_includespec) rather + than -I$(perl_archlibexp)/CORE in their compiler + flags. The latter continues to work on most platforms, but not recent + macOS. + + + + Also, it should no longer be necessary to + specify manually to get PL/Tcl to + build on recent macOS releases. + + + + + + Fix MSVC build and regression-test scripts to work on recent Perl + versions (Andrew Dunstan) + + + + Perl no longer includes the current directory in its search path + by default; work around that. + + + + + + Support building on Windows with Visual Studio 2015 or Visual Studio 2017 + (Michael Paquier, Haribabu Kommi) + + + + + + Allow btree comparison functions to return INT_MIN + (Tom Lane) + + + + Up to now, we've forbidden datatype-specific comparison functions from + returning INT_MIN, which allows callers to invert + the sort order just by negating the comparison result. However, this + was never safe for comparison functions that directly return the + result of memcmp(), strcmp(), + etc, as POSIX doesn't place any such restriction on those functions. + At least some recent versions of memcmp() can + return INT_MIN, causing incorrect sort ordering. + Hence, we've removed this restriction. Callers must now use + the INVERT_COMPARE_RESULT() macro if they wish to + invert the sort order. + + + + + + Fix recursion hazard in shared-invalidation message processing + (Tom Lane) + + + + This error could, for example, result in failure to access a system + catalog or index that had just been processed by VACUUM + FULL. + + + + This change adds a new result code + for LockAcquire, which might possibly affect + external callers of that function, though only very unusual usage + patterns would have an issue with it. The API + of LockAcquireExtended is also changed. + + + + + + Save and restore SPI's global variables + during SPI_connect() + and SPI_finish() (Chapman Flack, Tom Lane) + + + + This prevents possible interference when one SPI-using function calls + another. + + + + + + Provide ALLOCSET_DEFAULT_SIZES and sibling macros + in back branches (Tom Lane) + + + + These macros have existed since 9.6, but there were requests to add + them to older branches to allow extensions to rely on them without + branch-specific coding. + + + + + + Avoid using potentially-under-aligned page buffers (Tom Lane) + + + + Invent new union types PGAlignedBlock + and PGAlignedXLogBlock, and use these in place of plain + char arrays, ensuring that the compiler can't place the buffer at a + misaligned start address. This fixes potential core dumps on + alignment-picky platforms, and may improve performance even on + platforms that allow misalignment. + + + + + + Make src/port/snprintf.c follow the C99 + standard's definition of snprintf()'s result + value (Tom Lane) + + + + On platforms where this code is used (mostly Windows), its pre-C99 + behavior could lead to failure to detect buffer overrun, if the + calling code assumed C99 semantics. + + + + + + When building on i386 with the clang + compiler, require to be used (Andres Freund) + + + + This avoids problems with missed floating point overflow checks. + + + + + + Fix configure's detection of the result + type of strerror_r() (Tom Lane) + + + + The previous coding got the wrong answer when building + with icc on Linux (and perhaps in other + cases), leading to libpq not returning + useful error messages for system-reported errors. + + + + + + Update time zone data files to tzdata + release 2018g for DST law changes in Chile, Fiji, Morocco, and Russia + (Volgograd), plus historical corrections for China, Hawaii, Japan, + Macau, and North Korea. + + + + + + + + + + Release 9.3.24 + + + Release date: + 2018-08-09 + + + + This release contains a variety of fixes from 9.3.23. + For information about new features in the 9.3 major release, see + . + + + + The PostgreSQL community will stop releasing + updates for the 9.3.X release series shortly after September 2018. + Users are encouraged to update to a newer release branch soon. + + + + Migration to Version 9.3.24 + + + A dump/restore is not required for those running 9.3.X. + + + + However, if you are upgrading from a version earlier than 9.3.23, + see . + + + + + Changes + + + + + + Fix failure to reset libpq's state fully + between connection attempts (Tom Lane) + + + + An unprivileged user of dblink + or postgres_fdw could bypass the checks intended + to prevent use of server-side credentials, such as + a ~/.pgpass file owned by the operating-system + user running the server. Servers allowing peer authentication on + local connections are particularly vulnerable. Other attacks such + as SQL injection into a postgres_fdw session + are also possible. + Attacking postgres_fdw in this way requires the + ability to create a foreign server object with selected connection + parameters, but any user with access to dblink + could exploit the problem. + In general, an attacker with the ability to select the connection + parameters for a libpq-using application + could cause mischief, though other plausible attack scenarios are + harder to think of. + Our thanks to Andrew Krasichkov for reporting this issue. + (CVE-2018-10915) + + + + + + Ensure that updates to the relfrozenxid + and relminmxid values + for nailed system catalogs are processed in a timely + fashion (Andres Freund) + + + + Overoptimistic caching rules could prevent these updates from being + seen by other sessions, leading to spurious errors and/or data + corruption. The problem was significantly worse for shared catalogs, + such as pg_authid, because the stale cache + data could persist into new sessions as well as existing ones. + + + + + + Fix case where a freshly-promoted standby crashes before having + completed its first post-recovery checkpoint (Michael Paquier, Kyotaro + Horiguchi, Pavan Deolasee, Álvaro Herrera) + + + + This led to a situation where the server did not think it had reached + a consistent database state during subsequent WAL replay, preventing + restart. + + + + + + Avoid emitting a bogus WAL record when recycling an all-zero btree + page (Amit Kapila) + + + + This mistake has been seen to cause assertion failures, and + potentially it could result in unnecessary query cancellations on hot + standby servers. + + + + + + Improve performance of WAL replay for transactions that drop many + relations (Fujii Masao) + + + + This change reduces the number of times that shared buffers are + scanned, so that it is of most benefit when that setting is large. + + + + + + Improve performance of lock releasing in standby server WAL replay + (Thomas Munro) + + + + + + Ensure a table's cached index list is correctly rebuilt after an index + creation fails partway through (Peter Geoghegan) + + + + Previously, the failed index's OID could remain in the list, causing + problems later in the same session. + + + + + + Fix misoptimization of equivalence classes involving composite-type + columns (Tom Lane) + + + + This resulted in failure to recognize that an index on a composite + column could provide the sort order needed for a mergejoin on that + column. + + + + + + Fix SQL-standard FETCH FIRST syntax to allow + parameters ($n), as the + standard expects (Andrew Gierth) + + + + + + Fix failure to schema-qualify some object names + in getObjectDescription output + (Kyotaro Horiguchi, Tom Lane) + + + + Names of collations, conversions, and text search objects + were not schema-qualified when they should be. + + + + + + Widen COPY FROM's current-line-number counter + from 32 to 64 bits (David Rowley) + + + + This avoids two problems with input exceeding 4G lines: COPY + FROM WITH HEADER would drop a line every 4G lines, not only + the first line, and error reports could show a wrong line number. + + + + + + Add a string freeing function + to ecpg's pgtypes + library, so that cross-module memory management problems can be + avoided on Windows (Takayuki Tsunakawa) + + + + On Windows, crashes can ensue if the free call + for a given chunk of memory is not made from the same DLL + that malloc'ed the memory. + The pgtypes library sometimes returns strings + that it expects the caller to free, making it impossible to follow + this rule. Add a PGTYPESchar_free() function + that just wraps free, allowing applications + to follow this rule. + + + + + + Fix ecpg's support for long + long variables on Windows, as well as other platforms that + declare strtoll/strtoull + nonstandardly or not at all (Dang Minh Huong, Tom Lane) + + + + + + Fix misidentification of SQL statement type in PL/pgSQL, when a rule + change causes a change in the semantics of a statement intra-session + (Tom Lane) + + + + This error led to assertion failures, or in rare cases, failure to + enforce the INTO STRICT option as expected. + + + + + + Fix password prompting in client programs so that echo is properly + disabled on Windows when stdin is not the + terminal (Matthew Stickney) + + + + + + Further fix mis-quoting of values for list-valued GUC variables in + dumps (Tom Lane) + + + + The previous fix for quoting of search_path and + other list-valued variables in pg_dump + output turned out to misbehave for empty-string list elements, and it + risked truncation of long file paths. + + + + + + Make pg_upgrade check that the old server + was shut down cleanly (Bruce Momjian) + + + + The previous check could be fooled by an immediate-mode shutdown. + + + + + + Fix crash in contrib/ltree's + lca() function when the input array is empty + (Pierre Ducroquet) + + + + + + Fix various error-handling code paths in which an incorrect error code + might be reported (Michael Paquier, Tom Lane, Magnus Hagander) + + + + + + Rearrange makefiles to ensure that programs link to freshly-built + libraries (such as libpq.so) rather than ones + that might exist in the system library directories (Tom Lane) + + + + This avoids problems when building on platforms that supply old copies + of PostgreSQL libraries. + + + + + + Update time zone data files to tzdata + release 2018e for DST law changes in North Korea, plus historical + corrections for Czechoslovakia. + + + + This update includes a redefinition of daylight savings + in Ireland, as well as for some past years in Namibia and + Czechoslovakia. In those jurisdictions, legally standard time is + observed in summer, and daylight savings time in winter, so that the + daylight savings offset is one hour behind standard time not one hour + ahead. This does not affect either the actual UTC offset or the + timezone abbreviations in use; the only known effect is that + the is_dst column in + the pg_timezone_names view will now be true + in winter and false in summer in these cases. + + + + + + + + + + Release 9.3.23 + + + Release date: + 2018-05-10 + + + + This release contains a variety of fixes from 9.3.22. + For information about new features in the 9.3 major release, see + . + + + + Migration to Version 9.3.23 + + + A dump/restore is not required for those running 9.3.X. + + + + However, if the function marking mistakes mentioned in the first + changelog entry below affect you, you will want to take steps to + correct your database catalogs. + + + + Also, if you are upgrading from a version earlier than 9.3.22, + see . + + + + + Changes + + + + + + Fix incorrect volatility markings on a few built-in functions + (Thomas Munro, Tom Lane) + + + + The functions + query_to_xml, + cursor_to_xml, + cursor_to_xmlschema, + query_to_xmlschema, and + query_to_xml_and_xmlschema + should be marked volatile because they execute user-supplied queries + that might contain volatile operations. They were not, leading to a + risk of incorrect query optimization. This has been repaired for new + installations by correcting the initial catalog data, but existing + installations will continue to contain the incorrect markings. + Practical use of these functions seems to pose little hazard, but in + case of trouble, it can be fixed by manually updating these + functions' pg_proc entries, for example + ALTER FUNCTION pg_catalog.query_to_xml(text, boolean, + boolean, text) VOLATILE. (Note that that will need to be + done in each database of the installation.) Another option is + to pg_upgrade the database to a version + containing the corrected initial data. + + + + + + Avoid re-using TOAST value OIDs that match dead-but-not-yet-vacuumed + TOAST entries (Pavan Deolasee) + + + + Once the OID counter has wrapped around, it's possible to assign a + TOAST value whose OID matches a previously deleted entry in the same + TOAST table. If that entry were not yet vacuumed away, this resulted + in unexpected chunk number 0 (expected 1) for toast + value nnnnn errors, which would + persist until the dead entry was removed + by VACUUM. Fix by not selecting such OIDs when + creating a new TOAST entry. + + + + + + Change ANALYZE's algorithm for updating + pg_class.reltuples + (David Gould) + + + + Previously, pages not actually scanned by ANALYZE + were assumed to retain their old tuple density. In a large table + where ANALYZE samples only a small fraction of the + pages, this meant that the overall tuple density estimate could not + change very much, so that reltuples would + change nearly proportionally to changes in the table's physical size + (relpages) regardless of what was actually + happening in the table. This has been observed to result + in reltuples becoming so much larger than + reality as to effectively shut off autovacuuming. To fix, assume + that ANALYZE's sample is a statistically unbiased + sample of the table (as it should be), and just extrapolate the + density observed within those pages to the whole table. + + + + + + Fix UPDATE/DELETE ... WHERE CURRENT OF to not fail + when the referenced cursor uses an index-only-scan plan (Yugo Nagata, + Tom Lane) + + + + + + Fix incorrect planning of join clauses pushed into parameterized + paths (Andrew Gierth, Tom Lane) + + + + This error could result in misclassifying a condition as + a join filter for an outer join when it should be a + plain filter condition, leading to incorrect join + output. + + + + + + Fix misoptimization of CHECK constraints having + provably-NULL subclauses of + top-level AND/OR conditions + (Tom Lane, Dean Rasheed) + + + + This could, for example, allow constraint exclusion to exclude a + child table that should not be excluded from a query. + + + + + + Avoid failure if a query-cancel or session-termination interrupt + occurs while committing a prepared transaction (Stas Kelvich) + + + + + + Fix query-lifespan memory leakage in repeatedly executed hash joins + (Tom Lane) + + + + + + Fix overly strict sanity check + in heap_prepare_freeze_tuple + (Álvaro Herrera) + + + + This could result in incorrect cannot freeze committed + xmax failures in databases that have + been pg_upgrade'd from 9.2 or earlier. + + + + + + Prevent dangling-pointer dereference when a C-coded before-update row + trigger returns the old tuple (Rushabh Lathia) + + + + + + Reduce locking during autovacuum worker scheduling (Jeff Janes) + + + + The previous behavior caused drastic loss of potential worker + concurrency in databases with many tables. + + + + + + Ensure client hostname is copied while copying + pg_stat_activity data to local memory + (Edmund Horner) + + + + Previously the supposedly-local snapshot contained a pointer into + shared memory, allowing the client hostname column to change + unexpectedly if any existing session disconnected. + + + + + + Fix incorrect processing of multiple compound affixes + in ispell dictionaries (Arthur Zakirov) + + + + + + Fix collation-aware searches (that is, indexscans using inequality + operators) in SP-GiST indexes on text columns (Tom Lane) + + + + Such searches would return the wrong set of rows in most non-C + locales. + + + + + + Count the number of index tuples correctly during initial build of an + SP-GiST index (Tomas Vondra) + + + + Previously, the tuple count was reported to be the same as that of + the underlying table, which is wrong if the index is partial. + + + + + + Count the number of index tuples correctly during vacuuming of a + GiST index (Andrey Borodin) + + + + Previously it reported the estimated number of heap tuples, + which might be inaccurate, and is certainly wrong if the + index is partial. + + + + + + Allow scalarltsel + and scalargtsel to be used on non-core datatypes + (Tomas Vondra) + + + + + + Reduce libpq's memory consumption when a + server error is reported after a large amount of query output has + been collected (Tom Lane) + + + + Discard the previous output before, not after, processing the error + message. On some platforms, notably Linux, this can make a + difference in the application's subsequent memory footprint. + + + + + + Fix double-free crashes in ecpg + (Patrick Krecker, Jeevan Ladhe) + + + + + + Fix ecpg to handle long long + int variables correctly in MSVC builds (Michael Meskes, + Andrew Gierth) + + + + + + Fix mis-quoting of values for list-valued GUC variables in dumps + (Michael Paquier, Tom Lane) + + + + The local_preload_libraries, + session_preload_libraries, + shared_preload_libraries, + and temp_tablespaces variables were not correctly + quoted in pg_dump output. This would + cause problems if settings for these variables appeared in + CREATE FUNCTION ... SET or ALTER + DATABASE/ROLE ... SET clauses. + + + + + + Fix overflow handling in PL/pgSQL + integer FOR loops (Tom Lane) + + + + The previous coding failed to detect overflow of the loop variable + on some non-gcc compilers, leading to an infinite loop. + + + + + + Adjust PL/Python regression tests to pass + under Python 3.7 (Peter Eisentraut) + + + + + + Support testing PL/Python and related + modules when building with Python 3 and MSVC (Andrew Dunstan) + + + + + + Rename internal b64_encode + and b64_decode functions to avoid conflict with + Solaris 11.4 built-in functions (Rainer Orth) + + + + + + Sync our copy of the timezone library with IANA tzcode release 2018e + (Tom Lane) + + + + This fixes the zic timezone data compiler + to cope with negative daylight-savings offsets. While + the PostgreSQL project will not + immediately ship such timezone data, zic + might be used with timezone data obtained directly from IANA, so it + seems prudent to update zic now. + + + + + + Update time zone data files to tzdata + release 2018d for DST law changes in Palestine and Antarctica (Casey + Station), plus historical corrections for Portugal and its colonies, + as well as Enderbury, Jamaica, Turks & Caicos Islands, and + Uruguay. + + + + + + + + + + Release 9.3.22 + + + Release date: + 2018-03-01 + + + + This release contains a variety of fixes from 9.3.21. + For information about new features in the 9.3 major release, see + . + + + + Migration to Version 9.3.22 + + + A dump/restore is not required for those running 9.3.X. + + + + However, if you run an installation in which not all users are mutually + trusting, or if you maintain an application or extension that is + intended for use in arbitrary situations, it is strongly recommended + that you read the documentation changes described in the first changelog + entry below, and take suitable steps to ensure that your installation or + code is secure. + + + + Also, the changes described in the second changelog entry below may + cause functions used in index expressions or materialized views to fail + during auto-analyze, or when reloading from a dump. After upgrading, + monitor the server logs for such problems, and fix affected functions. + + + + Also, if you are upgrading from a version earlier than 9.3.18, + see . + + + + + Changes + + + + + + Document how to configure installations and applications to guard + against search-path-dependent trojan-horse attacks from other users + (Noah Misch) + + + + Using a search_path setting that includes any + schemas writable by a hostile user enables that user to capture + control of queries and then run arbitrary SQL code with the + permissions of the attacked user. While it is possible to write + queries that are proof against such hijacking, it is notationally + tedious, and it's very easy to overlook holes. Therefore, we now + recommend configurations in which no untrusted schemas appear in + one's search path. Relevant documentation appears in + (for database administrators and users), + (for application authors), + (for extension authors), and + (for authors + of SECURITY DEFINER functions). + (CVE-2018-1058) + + + + + + Avoid use of insecure search_path settings + in pg_dump and other client programs + (Noah Misch, Tom Lane) + + + + pg_dump, + pg_upgrade, + vacuumdb and + other PostgreSQL-provided applications were + themselves vulnerable to the type of hijacking described in the previous + changelog entry; since these applications are commonly run by + superusers, they present particularly attractive targets. To make them + secure whether or not the installation as a whole has been secured, + modify them to include only the pg_catalog + schema in their search_path settings. + Autovacuum worker processes now do the same, as well. + + + + In cases where user-provided functions are indirectly executed by + these programs — for example, user-provided functions in index + expressions — the tighter search_path may + result in errors, which will need to be corrected by adjusting those + user-provided functions to not assume anything about what search path + they are invoked under. That has always been good practice, but now + it will be necessary for correct behavior. + (CVE-2018-1058) + + + + + + Fix misbehavior of concurrent-update rechecks with CTE references + appearing in subplans (Tom Lane) + + + + If a CTE (WITH clause reference) is used in an + InitPlan or SubPlan, and the query requires a recheck due to trying + to update or lock a concurrently-updated row, incorrect results could + be obtained. + + + + + + Fix planner failures with overlapping mergejoin clauses in an outer + join (Tom Lane) + + + + These mistakes led to left and right pathkeys do not match in + mergejoin or outer pathkeys do not match + mergeclauses planner errors in corner cases. + + + + + + Repair pg_upgrade's failure to + preserve relfrozenxid for materialized + views (Tom Lane, Andres Freund) + + + + This oversight could lead to data corruption in materialized views + after an upgrade, manifesting as could not access status of + transaction or found xmin from before + relfrozenxid errors. The problem would be more likely to + occur in seldom-refreshed materialized views, or ones that were + maintained only with REFRESH MATERIALIZED VIEW + CONCURRENTLY. + + + + If such corruption is observed, it can be repaired by refreshing the + materialized view (without CONCURRENTLY). + + + + + + Fix incorrect reporting of PL/Python function names in + error CONTEXT stacks (Tom Lane) + + + + An error occurring within a nested PL/Python function call (that is, + one reached via a SPI query from another PL/Python function) would + result in a stack trace showing the inner function's name twice, + rather than the expected results. Also, an error in a nested + PL/Python DO block could result in a null pointer + dereference crash on some platforms. + + + + + + Allow contrib/auto_explain's + log_min_duration setting to range up + to INT_MAX, or about 24 days instead of 35 minutes + (Tom Lane) + + + + + + + + + + Release 9.3.21 + + + Release date: + 2018-02-08 + + + + This release contains a variety of fixes from 9.3.20. + For information about new features in the 9.3 major release, see + . + + + + Migration to Version 9.3.21 + + + A dump/restore is not required for those running 9.3.X. + + + + However, if you are upgrading from a version earlier than 9.3.18, + see . + + + + + Changes + + + + + + Ensure that all temporary files made + by pg_upgrade are non-world-readable + (Tom Lane, Noah Misch) + + + + pg_upgrade normally restricts its + temporary files to be readable and writable only by the calling user. + But the temporary file containing pg_dumpall -g + output would be group- or world-readable, or even writable, if the + user's umask setting allows. In typical usage on + multi-user machines, the umask and/or the working + directory's permissions would be tight enough to prevent problems; + but there may be people using pg_upgrade + in scenarios where this oversight would permit disclosure of database + passwords to unfriendly eyes. + (CVE-2018-1053) + + + + + + Fix vacuuming of tuples that were updated while key-share locked + (Andres Freund, Álvaro Herrera) + + + + In some cases VACUUM would fail to remove such + tuples even though they are now dead, leading to assorted data + corruption scenarios. + + + + + + Fix inadequate buffer locking in some LSN fetches (Jacob Champion, + Asim Praveen, Ashwin Agrawal) + + + + These errors could result in misbehavior under concurrent load. + The potential consequences have not been characterized fully. + + + + + + Avoid unnecessary failure in a query on an inheritance tree that + occurs concurrently with some child table being removed from the tree + by ALTER TABLE NO INHERIT (Tom Lane) + + + + + + Repair failure with correlated sub-SELECT + inside VALUES inside a LATERAL + subquery (Tom Lane) + + + + + + Fix could not devise a query plan for the given query + planner failure for some cases involving nested UNION + ALL inside a lateral subquery (Tom Lane) + + + + + + Fix has_sequence_privilege() to + support WITH GRANT OPTION tests, + as other privilege-testing functions do (Joe Conway) + + + + + + In databases using UTF8 encoding, ignore any XML declaration that + asserts a different encoding (Pavel Stehule, Noah Misch) + + + + We always store XML strings in the database encoding, so allowing + libxml to act on a declaration of another encoding gave wrong results. + In encodings other than UTF8, we don't promise to support non-ASCII + XML data anyway, so retain the previous behavior for bug compatibility. + This change affects only xpath() and related + functions; other XML code paths already acted this way. + + + + + + Provide for forward compatibility with future minor protocol versions + (Robert Haas, Badrul Chowdhury) + + + + Up to now, PostgreSQL servers simply + rejected requests to use protocol versions newer than 3.0, so that + there was no functional difference between the major and minor parts + of the protocol version number. Allow clients to request versions 3.x + without failing, sending back a message showing that the server only + understands 3.0. This makes no difference at the moment, but + back-patching this change should allow speedier introduction of future + minor protocol upgrades. + + + + + + Prevent stack-overflow crashes when planning extremely deeply + nested set operations + (UNION/INTERSECT/EXCEPT) + (Tom Lane) + + + + + + Fix null-pointer crashes for some types of LDAP URLs appearing + in pg_hba.conf (Thomas Munro) + + + + + + Fix sample INSTR() functions in the PL/pgSQL + documentation (Yugo Nagata, Tom Lane) + + + + These functions are stated to + be Oracle compatible, but + they weren't exactly. In particular, there was a discrepancy in the + interpretation of a negative third parameter: Oracle thinks that a + negative value indicates the last place where the target substring can + begin, whereas our functions took it as the last place where the + target can end. Also, Oracle throws an error for a zero or negative + fourth parameter, whereas our functions returned zero. + + + + The sample code has been adjusted to match Oracle's behavior more + precisely. Users who have copied this code into their applications + may wish to update their copies. + + + + + + Fix pg_dump to make ACL (permissions), + comment, and security label entries reliably identifiable in archive + output formats (Tom Lane) + + + + The tag portion of an ACL archive entry was usually + just the name of the associated object. Make it start with the object + type instead, bringing ACLs into line with the convention already used + for comment and security label archive entries. Also, fix the + comment and security label entries for the whole database, if present, + to make their tags start with DATABASE so that they + also follow this convention. This prevents false matches in code that + tries to identify large-object-related entries by seeing if the tag + starts with LARGE OBJECT. That could have resulted + in misclassifying entries as data rather than schema, with undesirable + results in a schema-only or data-only dump. + + + + Note that this change has user-visible results in the output + of pg_restore --list. + + + + + + In ecpg, detect indicator arrays that do + not have the correct length and report an error (David Rader) + + + + + + Avoid triggering a libc assertion + in contrib/hstore, due to use + of memcpy() with equal source and destination + pointers (Tomas Vondra) + + + + + + Provide modern examples of how to auto-start Postgres on macOS + (Tom Lane) + + + + The scripts in contrib/start-scripts/osx use + infrastructure that's been deprecated for over a decade, and which no + longer works at all in macOS releases of the last couple of years. + Add a new subdirectory contrib/start-scripts/macos + containing scripts that use the newer launchd + infrastructure. + + + + + + Fix incorrect selection of configuration-specific libraries for + OpenSSL on Windows (Andrew Dunstan) + + + + + + Support linking to MinGW-built versions of libperl (Noah Misch) + + + + This allows building PL/Perl with some common Perl distributions for + Windows. + + + + + + Fix MSVC build to test whether 32-bit libperl + needs -D_USE_32BIT_TIME_T (Noah Misch) + + + + Available Perl distributions are inconsistent about what they expect, + and lack any reliable means of reporting it, so resort to a build-time + test on what the library being used actually does. + + + + + + On Windows, install the crash dump handler earlier in postmaster + startup (Takayuki Tsunakawa) + + + + This may allow collection of a core dump for some early-startup + failures that did not produce a dump before. + + + + + + On Windows, avoid encoding-conversion-related crashes when emitting + messages very early in postmaster startup (Takayuki Tsunakawa) + + + + + + Use our existing Motorola 68K spinlock code on OpenBSD as + well as NetBSD (David Carlier) + + + + + + Add support for spinlocks on Motorola 88K (David Carlier) + + + + + + Update time zone data files to tzdata + release 2018c for DST law changes in Brazil, Sao Tome and Principe, + plus historical corrections for Bolivia, Japan, and South Sudan. + The US/Pacific-New zone has been removed (it was + only an alias for America/Los_Angeles anyway). + + + + + + + + + + Release 9.3.20 + + + Release date: + 2017-11-09 + + + + This release contains a variety of fixes from 9.3.19. + For information about new features in the 9.3 major release, see + . + + + + Migration to Version 9.3.20 + + + A dump/restore is not required for those running 9.3.X. + + + + However, if you are upgrading from a version earlier than 9.3.18, + see . + + + + + + Changes + + + + + + Fix crash due to rowtype mismatch + in json{b}_populate_recordset() + (Michael Paquier, Tom Lane) + + + + These functions used the result rowtype specified in the FROM + ... AS clause without checking that it matched the actual + rowtype of the supplied tuple value. If it didn't, that would usually + result in a crash, though disclosure of server memory contents seems + possible as well. + (CVE-2017-15098) + + + + + + Fix sample server-start scripts to become $PGUSER + before opening $PGLOG (Noah Misch) + + + + Previously, the postmaster log file was opened while still running as + root. The database owner could therefore mount an attack against + another system user by making $PGLOG be a symbolic + link to some other file, which would then become corrupted by appending + log messages. + + + + By default, these scripts are not installed anywhere. Users who have + made use of them will need to manually recopy them, or apply the same + changes to their modified versions. If the + existing $PGLOG file is root-owned, it will need to + be removed or renamed out of the way before restarting the server with + the corrected script. + (CVE-2017-12172) + + + + + + Properly reject attempts to convert infinite float values to + type numeric (Tom Lane, KaiGai Kohei) + + + + Previously the behavior was platform-dependent. + + + + + + Fix corner-case crashes when columns have been added to the end of a + view (Tom Lane) + + + + + + Record proper dependencies when a view or rule + contains FieldSelect + or FieldStore expression nodes (Tom Lane) + + + + Lack of these dependencies could allow a column or data + type DROP to go through when it ought to fail, + thereby causing later uses of the view or rule to get errors. + This patch does not do anything to protect existing views/rules, + only ones created in the future. + + + + + + Correctly detect hashability of range data types (Tom Lane) + + + + The planner mistakenly assumed that any range type could be hashed + for use in hash joins or hash aggregation, but actually it must check + whether the range's subtype has hash support. This does not affect any + of the built-in range types, since they're all hashable anyway. + + + + + + Fix low-probability loss of NOTIFY messages due to + XID wraparound (Marko Tiikkaja, Tom Lane) + + + + If a session executed no queries, but merely listened for + notifications, for more than 2 billion transactions, it started to miss + some notifications from concurrently-committing transactions. + + + + + + Prevent low-probability crash in processing of nested trigger firings + (Tom Lane) + + + + + + Correctly restore the umask setting when file creation fails + in COPY or lo_export() + (Peter Eisentraut) + + + + + + Give a better error message for duplicate column names + in ANALYZE (Nathan Bossart) + + + + + + Fix mis-parsing of the last line in a + non-newline-terminated pg_hba.conf file + (Tom Lane) + + + + + + Fix libpq to not require user's home + directory to exist (Tom Lane) + + + + In v10, failure to find the home directory while trying to + read ~/.pgpass was treated as a hard error, + but it should just cause that file to not be found. Both v10 and + previous release branches made the same mistake when + reading ~/.pg_service.conf, though this was less + obvious since that file is not sought unless a service name is + specified. + + + + + + Fix libpq to guard against integer + overflow in the row count of a PGresult + (Michael Paquier) + + + + + + Fix ecpg's handling of out-of-scope cursor + declarations with pointer or array variables (Michael Meskes) + + + + + + Make ecpglib's Informix-compatibility mode ignore fractional digits in + integer input strings, as expected (Gao Zengqi, Michael Meskes) + + + + + + Sync our copy of the timezone library with IANA release tzcode2017c + (Tom Lane) + + + + This fixes various issues; the only one likely to be user-visible + is that the default DST rules for a POSIX-style zone name, if + no posixrules file exists in the timezone data + directory, now match current US law rather than what it was a dozen + years ago. + + + + + + Update time zone data files to tzdata + release 2017c for DST law changes in Fiji, Namibia, Northern Cyprus, + Sudan, Tonga, and Turks & Caicos Islands, plus historical + corrections for Alaska, Apia, Burma, Calcutta, Detroit, Ireland, + Namibia, and Pago Pago. + + + + + + + + + + Release 9.3.19 + + + Release date: + 2017-08-31 + + + + This release contains a small number of fixes from 9.3.18. + For information about new features in the 9.3 major release, see + . + + + + Migration to Version 9.3.19 + + + A dump/restore is not required for those running 9.3.X. + + + + However, if you are upgrading from a version earlier than 9.3.18, + see . + + + + + + Changes + + + + + + Show foreign tables + in information_schema.table_privileges + view (Peter Eisentraut) + + + + All other relevant information_schema views include + foreign tables, but this one ignored them. + + + + Since this view definition is installed by initdb, + merely upgrading will not fix the problem. If you need to fix this + in an existing installation, you can, as a superuser, do this + in psql: + +SET search_path TO information_schema; +CREATE OR REPLACE VIEW table_privileges AS + SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, + CAST(grantee.rolname AS sql_identifier) AS grantee, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(c.prtype AS character_data) AS privilege_type, + CAST( + CASE WHEN + -- object owner always has grant options + pg_has_role(grantee.oid, c.relowner, 'USAGE') + OR c.grantable + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable, + CAST(CASE WHEN c.prtype = 'SELECT' THEN 'YES' ELSE 'NO' END AS yes_or_no) AS with_hierarchy + + FROM ( + SELECT oid, relname, relnamespace, relkind, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* FROM pg_class + ) AS c (oid, relname, relnamespace, relkind, relowner, grantor, grantee, prtype, grantable), + pg_namespace nc, + pg_authid u_grantor, + ( + SELECT oid, rolname FROM pg_authid + UNION ALL + SELECT 0::oid, 'PUBLIC' + ) AS grantee (oid, rolname) + + WHERE c.relnamespace = nc.oid + AND c.relkind IN ('r', 'v', 'f') + AND c.grantee = grantee.oid + AND c.grantor = u_grantor.oid + AND c.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER') + AND (pg_has_role(u_grantor.oid, 'USAGE') + OR pg_has_role(grantee.oid, 'USAGE') + OR grantee.rolname = 'PUBLIC'); + + This must be repeated in each database to be fixed, + including template0. + + + + + + Clean up handling of a fatal exit (e.g., due to receipt + of SIGTERM) that occurs while trying to execute + a ROLLBACK of a failed transaction (Tom Lane) + + + + This situation could result in an assertion failure. In production + builds, the exit would still occur, but it would log an unexpected + message about cannot drop active portal. + + + + + + Remove assertion that could trigger during a fatal exit (Tom Lane) + + + + + + Correctly identify columns that are of a range type or domain type over + a composite type or domain type being searched for (Tom Lane) + + + + Certain ALTER commands that change the definition of a + composite type or domain type are supposed to fail if there are any + stored values of that type in the database, because they lack the + infrastructure needed to update or check such values. Previously, + these checks could miss relevant values that are wrapped inside range + types or sub-domains, possibly allowing the database to become + inconsistent. + + + + + + Fix crash in pg_restore when using parallel mode and + using a list file to select a subset of items to restore + (Fabrízio de Royes Mello) + + + + + + Change ecpg's parser to allow RETURNING + clauses without attached C variables (Michael Meskes) + + + + This allows ecpg programs to contain SQL constructs + that use RETURNING internally (for example, inside a CTE) + rather than using it to define values to be returned to the client. + + + + + + Improve selection of compiler flags for PL/Perl on Windows (Tom Lane) + + + + This fix avoids possible crashes of PL/Perl due to inconsistent + assumptions about the width of time_t values. + A side-effect that may be visible to extension developers is + that _USE_32BIT_TIME_T is no longer defined globally + in PostgreSQL Windows builds. This is not expected + to cause problems, because type time_t is not used + in any PostgreSQL API definitions. + + + + + + + + Release 9.3.18 @@ -12,7 +2077,7 @@ This release contains a variety of fixes from 9.3.17. For information about new features in the 9.3 major release, see - . + . @@ -29,7 +2094,7 @@ Also, if you are upgrading from a version earlier than 9.3.16, - see . + see . @@ -42,7 +2107,7 @@ Further restrict visibility - of pg_user_mappings.umoptions, to + of pg_user_mappings.umoptions, to protect passwords stored as user mapping options (Noah Misch) @@ -50,11 +2115,11 @@ The fix for CVE-2017-7486 was incorrect: it allowed a user to see the options in her own user mapping, even if she did not - have USAGE permission on the associated foreign server. + have USAGE permission on the associated foreign server. Such options might include a password that had been provided by the server owner rather than the user herself. - Since information_schema.user_mapping_options does not - show the options in such cases, pg_user_mappings + Since information_schema.user_mapping_options does not + show the options in such cases, pg_user_mappings should not either. (CVE-2017-7547) @@ -69,15 +2134,15 @@ Restart the postmaster after adding allow_system_table_mods - = true to postgresql.conf. (In versions - supporting ALTER SYSTEM, you can use that to make the + = true to postgresql.conf. (In versions + supporting ALTER SYSTEM, you can use that to make the configuration change, but you'll still need a restart.) - In each database of the cluster, + In each database of the cluster, run the following commands as superuser: SET search_path = pg_catalog; @@ -108,15 +2173,15 @@ CREATE OR REPLACE VIEW pg_user_mappings AS - Do not forget to include the template0 - and template1 databases, or the vulnerability will still - exist in databases you create later. To fix template0, + Do not forget to include the template0 + and template1 databases, or the vulnerability will still + exist in databases you create later. To fix template0, you'll need to temporarily make it accept connections. - In PostgreSQL 9.5 and later, you can use + In PostgreSQL 9.5 and later, you can use ALTER DATABASE template0 WITH ALLOW_CONNECTIONS true; - and then after fixing template0, undo that with + and then after fixing template0, undo that with ALTER DATABASE template0 WITH ALLOW_CONNECTIONS false; @@ -130,7 +2195,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Finally, remove the allow_system_table_mods configuration + Finally, remove the allow_system_table_mods configuration setting, and again restart the postmaster. @@ -144,16 +2209,16 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - libpq ignores empty password specifications, and does + libpq ignores empty password specifications, and does not transmit them to the server. So, if a user's password has been set to the empty string, it's impossible to log in with that password - via psql or other libpq-based + via psql or other libpq-based clients. An administrator might therefore believe that setting the password to empty is equivalent to disabling password login. - However, with a modified or non-libpq-based client, + However, with a modified or non-libpq-based client, logging in could be possible, depending on which authentication method is configured. In particular the most common - method, md5, accepted empty passwords. + method, md5, accepted empty passwords. Change the server to reject empty passwords in all cases. (CVE-2017-7546) @@ -221,7 +2286,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix code for setting on + Fix code for setting on Solaris (Tom Lane) @@ -253,28 +2318,28 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Fix possible creation of an invalid WAL segment when a standby is - promoted just after it processes an XLOG_SWITCH WAL + promoted just after it processes an XLOG_SWITCH WAL record (Andres Freund) - Fix SIGHUP and SIGUSR1 handling in + Fix SIGHUP and SIGUSR1 handling in walsender processes (Petr Jelinek, Andres Freund) - Fix unnecessarily slow restarts of walreceiver + Fix unnecessarily slow restarts of walreceiver processes due to race condition in postmaster (Tom Lane) - Fix cases where an INSERT or UPDATE assigns + Fix cases where an INSERT or UPDATE assigns to more than one element of a column that is of domain-over-array type (Tom Lane) @@ -282,7 +2347,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Allow window functions to be used in sub-SELECTs that + Allow window functions to be used in sub-SELECTs that are within the arguments of an aggregate function (Tom Lane) @@ -290,56 +2355,56 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Move autogenerated array types out of the way during - ALTER ... RENAME (Vik Fearing) + ALTER ... RENAME (Vik Fearing) Previously, we would rename a conflicting autogenerated array type - out of the way during CREATE; this fix extends that + out of the way during CREATE; this fix extends that behavior to renaming operations. - Ensure that ALTER USER ... SET accepts all the syntax - variants that ALTER ROLE ... SET does (Peter Eisentraut) + Ensure that ALTER USER ... SET accepts all the syntax + variants that ALTER ROLE ... SET does (Peter Eisentraut) Properly update dependency info when changing a datatype I/O - function's argument or return type from opaque to the + function's argument or return type from opaque to the correct type (Heikki Linnakangas) - CREATE TYPE updates I/O functions declared in this + CREATE TYPE updates I/O functions declared in this long-obsolete style, but it forgot to record a dependency on the - type, allowing a subsequent DROP TYPE to leave broken + type, allowing a subsequent DROP TYPE to leave broken function definitions behind. - Reduce memory usage when ANALYZE processes - a tsvector column (Heikki Linnakangas) + Reduce memory usage when ANALYZE processes + a tsvector column (Heikki Linnakangas) Fix unnecessary precision loss and sloppy rounding when multiplying - or dividing money values by integers or floats (Tom Lane) + or dividing money values by integers or floats (Tom Lane) Tighten checks for whitespace in functions that parse identifiers, - such as regprocedurein() (Tom Lane) + such as regprocedurein() (Tom Lane) @@ -350,20 +2415,20 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Use relevant #define symbols from Perl while - compiling PL/Perl (Ashutosh Sharma, Tom Lane) + Use relevant #define symbols from Perl while + compiling PL/Perl (Ashutosh Sharma, Tom Lane) This avoids portability problems, typically manifesting as - a handshake mismatch during library load, when working with + a handshake mismatch during library load, when working with recent Perl versions. - In libpq, reset GSS/SASL and SSPI authentication + In libpq, reset GSS/SASL and SSPI authentication state properly after a failed connection attempt (Michael Paquier) @@ -376,9 +2441,9 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - In psql, fix failure when COPY FROM STDIN + In psql, fix failure when COPY FROM STDIN is ended with a keyboard EOF signal and then another COPY - FROM STDIN is attempted (Thomas Munro) + FROM STDIN is attempted (Thomas Munro) @@ -389,8 +2454,8 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_dump and pg_restore to - emit REFRESH MATERIALIZED VIEW commands last (Tom Lane) + Fix pg_dump and pg_restore to + emit REFRESH MATERIALIZED VIEW commands last (Tom Lane) @@ -401,7 +2466,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_dump with the option to drop event triggers as expected (Tom Lane) @@ -414,14 +2479,14 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_dump to not emit invalid SQL for an empty + Fix pg_dump to not emit invalid SQL for an empty operator class (Daniel Gustafsson) - Fix pg_dump output to stdout on Windows (Kuntal Ghosh) + Fix pg_dump output to stdout on Windows (Kuntal Ghosh) @@ -432,14 +2497,14 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_get_ruledef() to print correct output for - the ON SELECT rule of a view whose columns have been + Fix pg_get_ruledef() to print correct output for + the ON SELECT rule of a view whose columns have been renamed (Tom Lane) - In some corner cases, pg_dump relies - on pg_get_ruledef() to dump views, so that this error + In some corner cases, pg_dump relies + on pg_get_ruledef() to dump views, so that this error could result in dump/reload failures. @@ -447,13 +2512,13 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Fix dumping of outer joins with empty constraints, such as the result - of a NATURAL LEFT JOIN with no common columns (Tom Lane) + of a NATURAL LEFT JOIN with no common columns (Tom Lane) - Fix dumping of function expressions in the FROM clause in + Fix dumping of function expressions in the FROM clause in cases where the expression does not deparse into something that looks like a function call (Tom Lane) @@ -461,7 +2526,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_basebackup output to stdout on Windows + Fix pg_basebackup output to stdout on Windows (Haribabu Kommi) @@ -473,8 +2538,8 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_upgrade to ensure that the ending WAL record - does not have = minimum + Fix pg_upgrade to ensure that the ending WAL record + does not have = minimum (Bruce Momjian) @@ -486,9 +2551,9 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - In postgres_fdw, re-establish connections to remote - servers after ALTER SERVER or ALTER USER - MAPPING commands (Kyotaro Horiguchi) + In postgres_fdw, re-establish connections to remote + servers after ALTER SERVER or ALTER USER + MAPPING commands (Kyotaro Horiguchi) @@ -499,7 +2564,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - In postgres_fdw, allow cancellation of remote + In postgres_fdw, allow cancellation of remote transaction control commands (Robert Haas, Rafia Sabih) @@ -511,7 +2576,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Always use , not , when building shared libraries with gcc (Tom Lane) @@ -531,27 +2596,27 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - In MSVC builds, handle the case where the openssl - library is not within a VC subdirectory (Andrew Dunstan) + In MSVC builds, handle the case where the OpenSSL + library is not within a VC subdirectory (Andrew Dunstan) - In MSVC builds, add proper include path for libxml2 + In MSVC builds, add proper include path for libxml2 header files (Andrew Dunstan) This fixes a former need to move things around in standard Windows - installations of libxml2. + installations of libxml2. In MSVC builds, recognize a Tcl library that is - named tcl86.lib (Noah Misch) + named tcl86.lib (Noah Misch) @@ -571,7 +2636,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; This release contains a variety of fixes from 9.3.16. For information about new features in the 9.3 major release, see - . + . @@ -588,7 +2653,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Also, if you are upgrading from a version earlier than 9.3.16, - see . + see . @@ -601,18 +2666,18 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Restrict visibility - of pg_user_mappings.umoptions, to + of pg_user_mappings.umoptions, to protect passwords stored as user mapping options (Michael Paquier, Feike Steenbergen) The previous coding allowed the owner of a foreign server object, - or anyone he has granted server USAGE permission to, + or anyone he has granted server USAGE permission to, to see the options for all user mappings associated with that server. This might well include passwords for other users. Adjust the view definition to match the behavior of - information_schema.user_mapping_options, namely that + information_schema.user_mapping_options, namely that these options are visible to the user being mapped, or if the mapping is for PUBLIC and the current user is the server owner, or if the current user is a superuser. @@ -623,7 +2688,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; By itself, this patch will only fix the behavior in newly initdb'd databases. If you wish to apply this change in an existing database, follow the corrected procedure shown in the changelog entry for - CVE-2017-7547, in . + CVE-2017-7547, in . @@ -636,7 +2701,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Some selectivity estimation functions in the planner will apply user-defined operators to values obtained - from pg_statistic, such as most common values and + from pg_statistic, such as most common values and histogram entries. This occurs before table permissions are checked, so a nefarious user could exploit the behavior to obtain these values for table columns he does not have permission to read. To fix, @@ -650,17 +2715,17 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Restore libpq's recognition of - the PGREQUIRESSL environment variable (Daniel Gustafsson) + Restore libpq's recognition of + the PGREQUIRESSL environment variable (Daniel Gustafsson) Processing of this environment variable was unintentionally dropped - in PostgreSQL 9.3, but its documentation remained. + in PostgreSQL 9.3, but its documentation remained. This creates a security hazard, since users might be relying on the environment variable to force SSL-encrypted connections, but that would no longer be guaranteed. Restore handling of the variable, - but give it lower priority than PGSSLMODE, to avoid + but give it lower priority than PGSSLMODE, to avoid breaking configurations that work correctly with post-9.3 code. (CVE-2017-7485) @@ -668,7 +2733,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix possible corruption of init forks of unlogged indexes + Fix possible corruption of init forks of unlogged indexes (Robert Haas, Michael Paquier) @@ -681,7 +2746,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix incorrect reconstruction of pg_subtrans entries + Fix incorrect reconstruction of pg_subtrans entries when a standby server replays a prepared but uncommitted two-phase transaction (Tom Lane) @@ -689,7 +2754,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; In most cases this turned out to have no visible ill effects, but in corner cases it could result in circular references - in pg_subtrans, potentially causing infinite loops + in pg_subtrans, potentially causing infinite loops in queries that examine rows modified by the two-phase transaction. @@ -704,19 +2769,19 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Due to lack of a cache flush step between commands in an extension script file, non-utility queries might not see the effects of an immediately preceding catalog change, such as ALTER TABLE - ... RENAME. + ... RENAME. Skip tablespace privilege checks when ALTER TABLE ... ALTER - COLUMN TYPE rebuilds an existing index (Noah Misch) + COLUMN TYPE rebuilds an existing index (Noah Misch) The command failed if the calling user did not currently have - CREATE privilege for the tablespace containing the index. + CREATE privilege for the tablespace containing the index. That behavior seems unhelpful, so skip the check, allowing the index to be rebuilt where it is. @@ -724,27 +2789,27 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix ALTER TABLE ... VALIDATE CONSTRAINT to not recurse - to child tables when the constraint is marked NO INHERIT + Fix ALTER TABLE ... VALIDATE CONSTRAINT to not recurse + to child tables when the constraint is marked NO INHERIT (Amit Langote) - This fix prevents unwanted constraint does not exist failures + This fix prevents unwanted constraint does not exist failures when no matching constraint is present in the child tables. - Fix VACUUM to account properly for pages that could not + Fix VACUUM to account properly for pages that could not be scanned due to conflicting page pins (Andrew Gierth) This tended to lead to underestimation of the number of tuples in the table. In the worst case of a small heavily-contended - table, VACUUM could incorrectly report that the table + table, VACUUM could incorrectly report that the table contained no tuples, leading to very bad planning choices. @@ -758,33 +2823,33 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix cursor_to_xml() to produce valid output - with tableforest = false + Fix cursor_to_xml() to produce valid output + with tableforest = false (Thomas Munro, Peter Eisentraut) - Previously it failed to produce a wrapping <table> + Previously it failed to produce a wrapping <table> element. - Improve performance of pg_timezone_names view + Improve performance of pg_timezone_names view (Tom Lane, David Rowley) - Fix sloppy handling of corner-case errors from lseek() - and close() (Tom Lane) + Fix sloppy handling of corner-case errors from lseek() + and close() (Tom Lane) Neither of these system calls are likely to fail in typical situations, - but if they did, fd.c could get quite confused. + but if they did, fd.c could get quite confused. @@ -802,21 +2867,21 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix ecpg to support COMMIT PREPARED - and ROLLBACK PREPARED (Masahiko Sawada) + Fix ecpg to support COMMIT PREPARED + and ROLLBACK PREPARED (Masahiko Sawada) Fix a double-free error when processing dollar-quoted string literals - in ecpg (Michael Meskes) + in ecpg (Michael Meskes) - In pg_dump, fix incorrect schema and owner marking for + In pg_dump, fix incorrect schema and owner marking for comments and security labels of some types of database objects (Giuseppe Broccolo, Tom Lane) @@ -831,20 +2896,20 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Avoid emitting an invalid list file in pg_restore -l + Avoid emitting an invalid list file in pg_restore -l when SQL object names contain newlines (Tom Lane) Replace newlines by spaces, which is sufficient to make the output - valid for pg_restore -L's purposes. + valid for pg_restore -L's purposes. - Fix pg_upgrade to transfer comments and security labels - attached to large objects (blobs) (Stephen Frost) + Fix pg_upgrade to transfer comments and security labels + attached to large objects (blobs) (Stephen Frost) @@ -856,26 +2921,26 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Improve error handling - in contrib/adminpack's pg_file_write() + in contrib/adminpack's pg_file_write() function (Noah Misch) Notably, it failed to detect errors reported - by fclose(). + by fclose(). - In contrib/dblink, avoid leaking the previous unnamed + In contrib/dblink, avoid leaking the previous unnamed connection when establishing a new unnamed connection (Joe Conway) - Fix contrib/pg_trgm's extraction of trigrams from regular + Fix contrib/pg_trgm's extraction of trigrams from regular expressions (Tom Lane) @@ -888,7 +2953,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - In contrib/postgres_fdw, + In contrib/postgres_fdw, transmit query cancellation requests to the remote server (Michael Paquier, Etsuro Fujita) @@ -930,7 +2995,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Update time zone data files to tzdata release 2017b + Update time zone data files to tzdata release 2017b for DST law changes in Chile, Haiti, and Mongolia, plus historical corrections for Ecuador, Kazakhstan, Liberia, and Spain. Switch to numeric abbreviations for numerous time zones in South @@ -944,9 +3009,9 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; or no currency among the local population. They are in process of reversing that policy in favor of using numeric UTC offsets in zones where there is no evidence of real-world use of an English - abbreviation. At least for the time being, PostgreSQL + abbreviation. At least for the time being, PostgreSQL will continue to accept such removed abbreviations for timestamp input. - But they will not be shown in the pg_timezone_names + But they will not be shown in the pg_timezone_names view nor used for output. @@ -959,16 +3024,16 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; The Microsoft MSVC build scripts neglected to install - the posixrules file in the timezone directory tree. + the posixrules file in the timezone directory tree. This resulted in the timezone code falling back to its built-in rule about what DST behavior to assume for a POSIX-style time zone name. For historical reasons that still corresponds to the DST rules the USA was using before 2007 (i.e., change on first Sunday in April and last Sunday in October). With this fix, a POSIX-style zone name will use the current and historical DST transition dates of - the US/Eastern zone. If you don't want that, remove - the posixrules file, or replace it with a copy of some - other zone file (see ). Note that + the US/Eastern zone. If you don't want that, remove + the posixrules file, or replace it with a copy of some + other zone file (see ). Note that due to caching, you may need to restart the server to get such changes to take effect. @@ -990,7 +3055,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; This release contains a variety of fixes from 9.3.15. For information about new features in the 9.3 major release, see - . + . @@ -1008,7 +3073,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Also, if you are upgrading from a version earlier than 9.3.15, - see . + see . @@ -1021,15 +3086,15 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Fix a race condition that could cause indexes built - with CREATE INDEX CONCURRENTLY to be corrupt + with CREATE INDEX CONCURRENTLY to be corrupt (Pavan Deolasee, Tom Lane) - If CREATE INDEX CONCURRENTLY was used to build an index + If CREATE INDEX CONCURRENTLY was used to build an index that depends on a column not previously indexed, then rows updated by transactions that ran concurrently with - the CREATE INDEX command could have received incorrect + the CREATE INDEX command could have received incorrect index entries. If you suspect this may have happened, the most reliable solution is to rebuild affected indexes after installing this update. @@ -1038,13 +3103,13 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Unconditionally WAL-log creation of the init fork for an + Unconditionally WAL-log creation of the init fork for an unlogged table (Michael Paquier) - Previously, this was skipped when - = minimal, but actually it's necessary even in that case + Previously, this was skipped when + = minimal, but actually it's necessary even in that case to ensure that the unlogged table is properly reset to empty after a crash. @@ -1098,13 +3163,13 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Make sure ALTER TABLE preserves index tablespace + Make sure ALTER TABLE preserves index tablespace assignments when rebuilding indexes (Tom Lane, Michael Paquier) Previously, non-default settings - of could result in broken + of could result in broken indexes. @@ -1116,15 +3181,15 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - This avoids could not find trigger NNN - or relation NNN has no triggers errors. + This avoids could not find trigger NNN + or relation NNN has no triggers errors. Fix processing of OID column when a table with OIDs is associated to - a parent with OIDs via ALTER TABLE ... INHERIT (Amit + a parent with OIDs via ALTER TABLE ... INHERIT (Amit Langote) @@ -1138,7 +3203,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Report correct object identity during ALTER TEXT SEARCH - CONFIGURATION (Artur Zakirov) + CONFIGURATION (Artur Zakirov) @@ -1168,13 +3233,13 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Prevent multicolumn expansion of foo.* in - an UPDATE source expression (Tom Lane) + Prevent multicolumn expansion of foo.* in + an UPDATE source expression (Tom Lane) This led to UPDATE target count mismatch --- internal - error. Now the syntax is understood as a whole-row variable, + error. Now the syntax is understood as a whole-row variable, as it would be in other contexts. @@ -1182,12 +3247,12 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Ensure that column typmods are determined accurately for - multi-row VALUES constructs (Tom Lane) + multi-row VALUES constructs (Tom Lane) This fixes problems occurring when the first value in a column has a - determinable typmod (e.g., length for a varchar value) but + determinable typmod (e.g., length for a varchar value) but later values don't share the same limit. @@ -1202,15 +3267,15 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Normally, a Unicode surrogate leading character must be followed by a Unicode surrogate trailing character, but the check for this was missed if the leading character was the last character in a Unicode - string literal (U&'...') or Unicode identifier - (U&"..."). + string literal (U&'...') or Unicode identifier + (U&"..."). Ensure that a purely negative text search query, such - as !foo, matches empty tsvectors (Tom Dunstan) + as !foo, matches empty tsvectors (Tom Dunstan) @@ -1221,33 +3286,33 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Prevent crash when ts_rewrite() replaces a non-top-level + Prevent crash when ts_rewrite() replaces a non-top-level subtree with an empty query (Artur Zakirov) - Fix performance problems in ts_rewrite() (Tom Lane) + Fix performance problems in ts_rewrite() (Tom Lane) - Fix ts_rewrite()'s handling of nested NOT operators + Fix ts_rewrite()'s handling of nested NOT operators (Tom Lane) - Fix array_fill() to handle empty arrays properly (Tom Lane) + Fix array_fill() to handle empty arrays properly (Tom Lane) - Fix one-byte buffer overrun in quote_literal_cstr() + Fix one-byte buffer overrun in quote_literal_cstr() (Heikki Linnakangas) @@ -1259,8 +3324,8 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Prevent multiple calls of pg_start_backup() - and pg_stop_backup() from running concurrently (Michael + Prevent multiple calls of pg_start_backup() + and pg_stop_backup() from running concurrently (Michael Paquier) @@ -1272,15 +3337,15 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Avoid discarding interval-to-interval casts + Avoid discarding interval-to-interval casts that aren't really no-ops (Tom Lane) In some cases, a cast that should result in zeroing out - low-order interval fields was mistakenly deemed to be a + low-order interval fields was mistakenly deemed to be a no-op and discarded. An example is that casting from INTERVAL - MONTH to INTERVAL YEAR failed to clear the months field. + MONTH to INTERVAL YEAR failed to clear the months field. @@ -1293,14 +3358,14 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_dump to dump user-defined casts and transforms + Fix pg_dump to dump user-defined casts and transforms that use built-in functions (Stephen Frost) - Fix possible pg_basebackup failure on standby + Fix possible pg_basebackup failure on standby server when including WAL files (Amit Kapila, Robert Haas) @@ -1319,21 +3384,21 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix PL/Tcl to support triggers on tables that have .tupno + Fix PL/Tcl to support triggers on tables that have .tupno as a column name (Tom Lane) This matches the (previously undocumented) behavior of - PL/Tcl's spi_exec and spi_execp commands, - namely that a magic .tupno column is inserted only if + PL/Tcl's spi_exec and spi_execp commands, + namely that a magic .tupno column is inserted only if there isn't a real column named that. - Allow DOS-style line endings in ~/.pgpass files, + Allow DOS-style line endings in ~/.pgpass files, even on Unix (Vik Fearing) @@ -1345,23 +3410,23 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix one-byte buffer overrun if ecpg is given a file + Fix one-byte buffer overrun if ecpg is given a file name that ends with a dot (Takayuki Tsunakawa) - Fix psql's tab completion for ALTER DEFAULT - PRIVILEGES (Gilles Darold, Stephen Frost) + Fix psql's tab completion for ALTER DEFAULT + PRIVILEGES (Gilles Darold, Stephen Frost) - In psql, treat an empty or all-blank setting of - the PAGER environment variable as meaning no - pager (Tom Lane) + In psql, treat an empty or all-blank setting of + the PAGER environment variable as meaning no + pager (Tom Lane) @@ -1372,22 +3437,22 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Improve contrib/dblink's reporting of - low-level libpq errors, such as out-of-memory + Improve contrib/dblink's reporting of + low-level libpq errors, such as out-of-memory (Joe Conway) - Teach contrib/dblink to ignore irrelevant server options - when it uses a contrib/postgres_fdw foreign server as + Teach contrib/dblink to ignore irrelevant server options + when it uses a contrib/postgres_fdw foreign server as the source of connection options (Corey Huinker) Previously, if the foreign server object had options that were not - also libpq connection options, an error occurred. + also libpq connection options, an error occurred. @@ -1413,7 +3478,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Update time zone data files to tzdata release 2016j + Update time zone data files to tzdata release 2016j for DST law changes in northern Cyprus (adding a new zone Asia/Famagusta), Russia (adding a new zone Europe/Saratov), Tonga, and Antarctica/Casey. @@ -1438,7 +3503,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; This release contains a variety of fixes from 9.3.14. For information about new features in the 9.3 major release, see - . + . @@ -1456,7 +3521,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Also, if you are upgrading from a version earlier than 9.3.9, - see . + see . @@ -1477,7 +3542,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; crash recovery, or to be written incorrectly on a standby server. Bogus entries in a free space map could lead to attempts to access pages that have been truncated away from the relation itself, typically - producing errors like could not read block XXX: + producing errors like could not read block XXX: read only 0 of 8192 bytes. Checksum failures in the visibility map are also possible, if checksumming is enabled. @@ -1485,19 +3550,19 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Procedures for determining whether there is a problem and repairing it if so are discussed at - . + . - Fix SELECT FOR UPDATE/SHARE to correctly lock tuples that + Fix SELECT FOR UPDATE/SHARE to correctly lock tuples that have been updated by a subsequently-aborted transaction (Álvaro Herrera) - In 9.5 and later, the SELECT would sometimes fail to + In 9.5 and later, the SELECT would sometimes fail to return such tuples at all. A failure has not been proven to occur in earlier releases, but might be possible with concurrent updates. @@ -1531,71 +3596,71 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix EXPLAIN to emit valid XML when - is on (Markus Winand) + Fix EXPLAIN to emit valid XML when + is on (Markus Winand) Previously the XML output-format option produced syntactically invalid - tags such as <I/O-Read-Time>. That is now - rendered as <I-O-Read-Time>. + tags such as <I/O-Read-Time>. That is now + rendered as <I-O-Read-Time>. Suppress printing of zeroes for unmeasured times - in EXPLAIN (Maksim Milyutin) + in EXPLAIN (Maksim Milyutin) Certain option combinations resulted in printing zero values for times that actually aren't ever measured in that combination. Our general - policy in EXPLAIN is not to print such fields at all, so + policy in EXPLAIN is not to print such fields at all, so do that consistently in all cases. - Fix timeout length when VACUUM is waiting for exclusive + Fix timeout length when VACUUM is waiting for exclusive table lock so that it can truncate the table (Simon Riggs) The timeout was meant to be 50 milliseconds, but it was actually only - 50 microseconds, causing VACUUM to give up on truncation + 50 microseconds, causing VACUUM to give up on truncation much more easily than intended. Set it to the intended value. - Fix bugs in merging inherited CHECK constraints while + Fix bugs in merging inherited CHECK constraints while creating or altering a table (Tom Lane, Amit Langote) - Allow identical CHECK constraints to be added to a parent + Allow identical CHECK constraints to be added to a parent and child table in either order. Prevent merging of a valid - constraint from the parent table with a NOT VALID + constraint from the parent table with a NOT VALID constraint on the child. Likewise, prevent merging of a NO - INHERIT child constraint with an inherited constraint. + INHERIT child constraint with an inherited constraint. Remove artificial restrictions on the values accepted - by numeric_in() and numeric_recv() + by numeric_in() and numeric_recv() (Tom Lane) We allow numeric values up to the limit of the storage format (more - than 1e100000), so it seems fairly pointless - that numeric_in() rejected scientific-notation exponents - above 1000. Likewise, it was silly for numeric_recv() to + than 1e100000), so it seems fairly pointless + that numeric_in() rejected scientific-notation exponents + above 1000. Likewise, it was silly for numeric_recv() to reject more than 1000 digits in an input value. @@ -1617,7 +3682,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Disallow starting a standalone backend with standby_mode + Disallow starting a standalone backend with standby_mode turned on (Michael Paquier) @@ -1631,7 +3696,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Don't try to share SSL contexts across multiple connections - in libpq (Heikki Linnakangas) + in libpq (Heikki Linnakangas) @@ -1642,30 +3707,30 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Avoid corner-case memory leak in libpq (Tom Lane) + Avoid corner-case memory leak in libpq (Tom Lane) The reported problem involved leaking an error report - during PQreset(), but there might be related cases. + during PQreset(), but there might be related cases. - Make ecpg's and options work consistently with our other executables (Haribabu Kommi) - In pg_dump, never dump range constructor functions + In pg_dump, never dump range constructor functions (Tom Lane) - This oversight led to pg_upgrade failures with + This oversight led to pg_upgrade failures with extensions containing range types, due to duplicate creation of the constructor functions. @@ -1673,8 +3738,8 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - In pg_xlogdump, retry opening new WAL segments when - using option (Magnus Hagander) @@ -1685,7 +3750,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_xlogdump to cope with a WAL file that begins + Fix pg_xlogdump to cope with a WAL file that begins with a continuation record spanning more than one page (Pavan Deolasee) @@ -1693,8 +3758,8 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix contrib/intarray/bench/bench.pl to print the results - of the EXPLAIN it does when given the option (Daniel Gustafsson) @@ -1715,17 +3780,17 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; If a dynamic time zone abbreviation does not match any entry in the referenced time zone, treat it as equivalent to the time zone name. This avoids unexpected failures when IANA removes abbreviations from - their time zone database, as they did in tzdata + their time zone database, as they did in tzdata release 2016f and seem likely to do again in the future. The consequences were not limited to not recognizing the individual abbreviation; any mismatch caused - the pg_timezone_abbrevs view to fail altogether. + the pg_timezone_abbrevs view to fail altogether. - Update time zone data files to tzdata release 2016h + Update time zone data files to tzdata release 2016h for DST law changes in Palestine and Turkey, plus historical corrections for Turkey and some regions of Russia. Switch to numeric abbreviations for some time zones in Antarctica, @@ -1738,15 +3803,15 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; or no currency among the local population. They are in process of reversing that policy in favor of using numeric UTC offsets in zones where there is no evidence of real-world use of an English - abbreviation. At least for the time being, PostgreSQL + abbreviation. At least for the time being, PostgreSQL will continue to accept such removed abbreviations for timestamp input. - But they will not be shown in the pg_timezone_names + But they will not be shown in the pg_timezone_names view nor used for output. - In this update, AMT is no longer shown as being in use to - mean Armenia Time. Therefore, we have changed the Default + In this update, AMT is no longer shown as being in use to + mean Armenia Time. Therefore, we have changed the Default abbreviation set to interpret it as Amazon Time, thus UTC-4 not UTC+4. @@ -1767,7 +3832,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; This release contains a variety of fixes from 9.3.13. For information about new features in the 9.3 major release, see - . + . @@ -1779,7 +3844,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; However, if you are upgrading from a version earlier than 9.3.9, - see . + see . @@ -1792,17 +3857,17 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Fix possible mis-evaluation of - nested CASE-WHEN expressions (Heikki + nested CASE-WHEN expressions (Heikki Linnakangas, Michael Paquier, Tom Lane) - A CASE expression appearing within the test value - subexpression of another CASE could become confused about + A CASE expression appearing within the test value + subexpression of another CASE could become confused about whether its own test value was null or not. Also, inlining of a SQL function implementing the equality operator used by - a CASE expression could result in passing the wrong test - value to functions called within a CASE expression in the + a CASE expression could result in passing the wrong test + value to functions called within a CASE expression in the SQL function's body. If the test values were of different data types, a crash might result; moreover such situations could be abused to allow disclosure of portions of server memory. (CVE-2016-5423) @@ -1816,7 +3881,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Numerous places in vacuumdb and other client programs + Numerous places in vacuumdb and other client programs could become confused by database and role names containing double quotes or backslashes. Tighten up quoting rules to make that safe. Also, ensure that when a conninfo string is used as a database name @@ -1825,22 +3890,22 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Fix handling of paired double quotes - in psql's \connect - and \password commands to match the documentation. + in psql's \connect + and \password commands to match the documentation. - Introduce a new - pg_dumpall now refuses to deal with database and role + pg_dumpall now refuses to deal with database and role names containing carriage returns or newlines, as it seems impractical to quote those characters safely on Windows. In future we may reject such names on the server side, but that step has not been taken yet. @@ -1850,40 +3915,40 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; These are considered security fixes because crafted object names containing special characters could have been used to execute commands with superuser privileges the next time a superuser - executes pg_dumpall or other routine maintenance + executes pg_dumpall or other routine maintenance operations. (CVE-2016-5424) - Fix corner-case misbehaviors for IS NULL/IS NOT - NULL applied to nested composite values (Andrew Gierth, Tom Lane) + Fix corner-case misbehaviors for IS NULL/IS NOT + NULL applied to nested composite values (Andrew Gierth, Tom Lane) - The SQL standard specifies that IS NULL should return + The SQL standard specifies that IS NULL should return TRUE for a row of all null values (thus ROW(NULL,NULL) IS - NULL yields TRUE), but this is not meant to apply recursively - (thus ROW(NULL, ROW(NULL,NULL)) IS NULL yields FALSE). + NULL yields TRUE), but this is not meant to apply recursively + (thus ROW(NULL, ROW(NULL,NULL)) IS NULL yields FALSE). The core executor got this right, but certain planner optimizations treated the test as recursive (thus producing TRUE in both cases), - and contrib/postgres_fdw could produce remote queries + and contrib/postgres_fdw could produce remote queries that misbehaved similarly. - Make the inet and cidr data types properly reject + Make the inet and cidr data types properly reject IPv6 addresses with too many colon-separated fields (Tom Lane) - Prevent crash in close_ps() - (the point ## lseg operator) + Prevent crash in close_ps() + (the point ## lseg operator) for NaN input coordinates (Tom Lane) @@ -1894,19 +3959,19 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Avoid possible crash in pg_get_expr() when inconsistent + Avoid possible crash in pg_get_expr() when inconsistent values are passed to it (Michael Paquier, Thomas Munro) - Fix several one-byte buffer over-reads in to_number() + Fix several one-byte buffer over-reads in to_number() (Peter Eisentraut) - In several cases the to_number() function would read one + In several cases the to_number() function would read one more character than it should from the input string. There is a small chance of a crash, if the input happens to be adjacent to the end of memory. @@ -1916,8 +3981,8 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Do not run the planner on the query contained in CREATE - MATERIALIZED VIEW or CREATE TABLE AS - when WITH NO DATA is specified (Michael Paquier, + MATERIALIZED VIEW or CREATE TABLE AS + when WITH NO DATA is specified (Michael Paquier, Tom Lane) @@ -1931,7 +3996,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Avoid unsafe intermediate state during expensive paths - through heap_update() (Masahiko Sawada, Andres Freund) + through heap_update() (Masahiko Sawada, Andres Freund) @@ -1957,15 +4022,15 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Avoid unnecessary could not serialize access errors when - acquiring FOR KEY SHARE row locks in serializable mode + Avoid unnecessary could not serialize access errors when + acquiring FOR KEY SHARE row locks in serializable mode (Álvaro Herrera) - Avoid crash in postgres -C when the specified variable + Avoid crash in postgres -C when the specified variable has a null string value (Michael Paquier) @@ -1994,12 +4059,12 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Avoid consuming a transaction ID during VACUUM + Avoid consuming a transaction ID during VACUUM (Alexander Korotkov) - Some cases in VACUUM unnecessarily caused an XID to be + Some cases in VACUUM unnecessarily caused an XID to be assigned to the current transaction. Normally this is negligible, but if one is up against the XID wraparound limit, consuming more XIDs during anti-wraparound vacuums is a very bad thing. @@ -2008,12 +4073,12 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Avoid canceling hot-standby queries during VACUUM FREEZE + Avoid canceling hot-standby queries during VACUUM FREEZE (Simon Riggs, Álvaro Herrera) - VACUUM FREEZE on an otherwise-idle master server could + VACUUM FREEZE on an otherwise-idle master server could result in unnecessary cancellations of queries on its standby servers. @@ -2028,15 +4093,15 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; The usual symptom of this bug is errors - like MultiXactId NNN has not been created + like MultiXactId NNN has not been created yet -- apparent wraparound. - When a manual ANALYZE specifies a column list, don't - reset the table's changes_since_analyze counter + When a manual ANALYZE specifies a column list, don't + reset the table's changes_since_analyze counter (Tom Lane) @@ -2048,7 +4113,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix ANALYZE's overestimation of n_distinct + Fix ANALYZE's overestimation of n_distinct for a unique or nearly-unique column with many null entries (Tom Lane) @@ -2083,8 +4148,8 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix contrib/btree_gin to handle the smallest - possible bigint value correctly (Peter Eisentraut) + Fix contrib/btree_gin to handle the smallest + possible bigint value correctly (Peter Eisentraut) @@ -2097,53 +4162,53 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; It's planned to switch to two-part instead of three-part server version numbers for releases after 9.6. Make sure - that PQserverVersion() returns the correct value for + that PQserverVersion() returns the correct value for such cases. - Fix ecpg's code for unsigned long long + Fix ecpg's code for unsigned long long array elements (Michael Meskes) - In pg_dump with both - Improve handling of SIGTERM/control-C in - parallel pg_dump and pg_restore (Tom + Improve handling of SIGTERM/control-C in + parallel pg_dump and pg_restore (Tom Lane) Make sure that the worker processes will exit promptly, and also arrange to send query-cancel requests to the connected backends, in case they - are doing something long-running such as a CREATE INDEX. + are doing something long-running such as a CREATE INDEX. - Fix error reporting in parallel pg_dump - and pg_restore (Tom Lane) + Fix error reporting in parallel pg_dump + and pg_restore (Tom Lane) - Previously, errors reported by pg_dump - or pg_restore worker processes might never make it to + Previously, errors reported by pg_dump + or pg_restore worker processes might never make it to the user's console, because the messages went through the master process, and there were various deadlock scenarios that would prevent the master process from passing on the messages. Instead, just print - everything to stderr. In some cases this will result in + everything to stderr. In some cases this will result in duplicate messages (for instance, if all the workers report a server shutdown), but that seems better than no message. @@ -2151,8 +4216,8 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Ensure that parallel pg_dump - or pg_restore on Windows will shut down properly + Ensure that parallel pg_dump + or pg_restore on Windows will shut down properly after an error (Kyotaro Horiguchi) @@ -2164,7 +4229,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Make pg_dump behave better when built without zlib + Make pg_dump behave better when built without zlib support (Kyotaro Horiguchi) @@ -2176,7 +4241,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Make pg_basebackup accept -Z 0 as + Make pg_basebackup accept -Z 0 as specifying no compression (Fujii Masao) @@ -2197,13 +4262,13 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Be more predictable about reporting statement timeout - versus lock timeout (Tom Lane) + Be more predictable about reporting statement timeout + versus lock timeout (Tom Lane) On heavily loaded machines, the regression tests sometimes failed due - to reporting lock timeout even though the statement timeout + to reporting lock timeout even though the statement timeout should have occurred first. @@ -2223,7 +4288,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Update our copy of the timezone code to match - IANA's tzcode release 2016c (Tom Lane) + IANA's tzcode release 2016c (Tom Lane) @@ -2235,7 +4300,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Update time zone data files to tzdata release 2016f + Update time zone data files to tzdata release 2016f for DST law changes in Kemerovo and Novosibirsk, plus historical corrections for Azerbaijan, Belarus, and Morocco. @@ -2257,7 +4322,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; This release contains a variety of fixes from 9.3.12. For information about new features in the 9.3 major release, see - . + . @@ -2269,7 +4334,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; However, if you are upgrading from a version earlier than 9.3.9, - see . + see . @@ -2291,7 +4356,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; using OpenSSL within a single process and not all the code involved follows the same rules for when to clear the error queue. Failures have been reported specifically when a client application - uses SSL connections in libpq concurrently with + uses SSL connections in libpq concurrently with SSL connections using the PHP, Python, or Ruby wrappers for OpenSSL. It's possible for similar problems to arise within the server as well, if an extension module establishes an outgoing SSL connection. @@ -2300,7 +4365,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix failed to build any N-way joins + Fix failed to build any N-way joins planner error with a full join enclosed in the right-hand side of a left join (Tom Lane) @@ -2314,10 +4379,10 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Given a three-or-more-way equivalence class of variables, such - as X.X = Y.Y = Z.Z, it was possible for the planner to omit + as X.X = Y.Y = Z.Z, it was possible for the planner to omit some of the tests needed to enforce that all the variables are actually equal, leading to join rows being output that didn't satisfy - the WHERE clauses. For various reasons, erroneous plans + the WHERE clauses. For various reasons, erroneous plans were seldom selected in practice, so that this bug has gone undetected for a long time. @@ -2325,8 +4390,8 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix possible misbehavior of TH, th, - and Y,YYY format codes in to_timestamp() + Fix possible misbehavior of TH, th, + and Y,YYY format codes in to_timestamp() (Tom Lane) @@ -2338,28 +4403,28 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix dumping of rules and views in which the array - argument of a value operator - ANY (array) construct is a sub-SELECT + Fix dumping of rules and views in which the array + argument of a value operator + ANY (array) construct is a sub-SELECT (Tom Lane) - Make pg_regress use a startup timeout from the - PGCTLTIMEOUT environment variable, if that's set (Tom Lane) + Make pg_regress use a startup timeout from the + PGCTLTIMEOUT environment variable, if that's set (Tom Lane) This is for consistency with a behavior recently added - to pg_ctl; it eases automated testing on slow machines. + to pg_ctl; it eases automated testing on slow machines. - Fix pg_upgrade to correctly restore extension + Fix pg_upgrade to correctly restore extension membership for operator families containing only one operator class (Tom Lane) @@ -2367,20 +4432,20 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; In such a case, the operator family was restored into the new database, but it was no longer marked as part of the extension. This had no - immediate ill effects, but would cause later pg_dump + immediate ill effects, but would cause later pg_dump runs to emit output that would cause (harmless) errors on restore. - Fix pg_upgrade to not fail when new-cluster TOAST rules + Fix pg_upgrade to not fail when new-cluster TOAST rules differ from old (Tom Lane) - pg_upgrade had special-case code to handle the - situation where the new PostgreSQL version thinks that + pg_upgrade had special-case code to handle the + situation where the new PostgreSQL version thinks that a table should have a TOAST table while the old version did not. That code was broken, so remove it, and instead do nothing in such cases; there seems no reason to believe that we can't get along fine without @@ -2415,22 +4480,22 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Reduce the number of SysV semaphores used by a build configured with - (Tom Lane) - Rename internal function strtoi() - to strtoint() to avoid conflict with a NetBSD library + Rename internal function strtoi() + to strtoint() to avoid conflict with a NetBSD library function (Thomas Munro) - Fix reporting of errors from bind() - and listen() system calls on Windows (Tom Lane) + Fix reporting of errors from bind() + and listen() system calls on Windows (Tom Lane) @@ -2443,19 +4508,19 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Fix putenv() to work properly with Visual Studio 2013 + Fix putenv() to work properly with Visual Studio 2013 (Michael Paquier) - Avoid possibly-unsafe use of Windows' FormatMessage() + Avoid possibly-unsafe use of Windows' FormatMessage() function (Christian Ullrich) - Use the FORMAT_MESSAGE_IGNORE_INSERTS flag where + Use the FORMAT_MESSAGE_IGNORE_INSERTS flag where appropriate. No live bug is known to exist here, but it seems like a good idea to be careful. @@ -2463,9 +4528,9 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Update time zone data files to tzdata release 2016d + Update time zone data files to tzdata release 2016d for DST law changes in Russia and Venezuela. There are new zone - names Europe/Kirov and Asia/Tomsk to reflect + names Europe/Kirov and Asia/Tomsk to reflect the fact that these regions now have different time zone histories from adjacent regions. @@ -2487,7 +4552,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 This release contains a variety of fixes from 9.3.11. For information about new features in the 9.3 major release, see - . + . @@ -2499,7 +4564,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 However, if you are upgrading from a version earlier than 9.3.9, - see . + see . @@ -2512,56 +4577,56 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Fix incorrect handling of NULL index entries in - indexed ROW() comparisons (Tom Lane) + indexed ROW() comparisons (Tom Lane) An index search using a row comparison such as ROW(a, b) > - ROW('x', 'y') would stop upon reaching a NULL entry in - the b column, ignoring the fact that there might be - non-NULL b values associated with later values - of a. + ROW('x', 'y') would stop upon reaching a NULL entry in + the b column, ignoring the fact that there might be + non-NULL b values associated with later values + of a. Avoid unlikely data-loss scenarios due to renaming files without - adequate fsync() calls before and after (Michael Paquier, + adequate fsync() calls before and after (Michael Paquier, Tomas Vondra, Andres Freund) - Correctly handle cases where pg_subtrans is close to XID + Correctly handle cases where pg_subtrans is close to XID wraparound during server startup (Jeff Janes) - Fix corner-case crash due to trying to free localeconv() + Fix corner-case crash due to trying to free localeconv() output strings more than once (Tom Lane) - Fix parsing of affix files for ispell dictionaries + Fix parsing of affix files for ispell dictionaries (Tom Lane) The code could go wrong if the affix file contained any characters whose byte length changes during case-folding, for - example I in Turkish UTF8 locales. + example I in Turkish UTF8 locales. - Avoid use of sscanf() to parse ispell + Avoid use of sscanf() to parse ispell dictionary files (Artur Zakirov) @@ -2587,27 +4652,27 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Fix psql's tab completion logic to handle multibyte + Fix psql's tab completion logic to handle multibyte characters properly (Kyotaro Horiguchi, Robert Haas) - Fix psql's tab completion for - SECURITY LABEL (Tom Lane) + Fix psql's tab completion for + SECURITY LABEL (Tom Lane) - Pressing TAB after SECURITY LABEL might cause a crash + Pressing TAB after SECURITY LABEL might cause a crash or offering of inappropriate keywords. - Make pg_ctl accept a wait timeout from the - PGCTLTIMEOUT environment variable, if none is specified on + Make pg_ctl accept a wait timeout from the + PGCTLTIMEOUT environment variable, if none is specified on the command line (Noah Misch) @@ -2621,26 +4686,26 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Fix incorrect test for Windows service status - in pg_ctl (Manuel Mathar) + in pg_ctl (Manuel Mathar) The previous set of minor releases attempted to - fix pg_ctl to properly determine whether to send log + fix pg_ctl to properly determine whether to send log messages to Window's Event Log, but got the test backwards. - Fix pgbench to correctly handle the combination - of -C and -M prepared options (Tom Lane) + Fix pgbench to correctly handle the combination + of -C and -M prepared options (Tom Lane) - In pg_upgrade, skip creating a deletion script when + In pg_upgrade, skip creating a deletion script when the new data directory is inside the old data directory (Bruce Momjian) @@ -2668,21 +4733,21 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Fix multiple mistakes in the statistics returned - by contrib/pgstattuple's pgstatindex() + by contrib/pgstattuple's pgstatindex() function (Tom Lane) - Remove dependency on psed in MSVC builds, since it's no + Remove dependency on psed in MSVC builds, since it's no longer provided by core Perl (Michael Paquier, Andrew Dunstan) - Update time zone data files to tzdata release 2016c + Update time zone data files to tzdata release 2016c for DST law changes in Azerbaijan, Chile, Haiti, Palestine, and Russia (Altai, Astrakhan, Kirov, Sakhalin, Ulyanovsk regions), plus historical corrections for Lithuania, Moldova, and Russia @@ -2706,7 +4771,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 This release contains a variety of fixes from 9.3.10. For information about new features in the 9.3 major release, see - . + . @@ -2718,7 +4783,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 However, if you are upgrading from a version earlier than 9.3.9, - see . + see . @@ -2743,25 +4808,25 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Perform an immediate shutdown if the postmaster.pid file + Perform an immediate shutdown if the postmaster.pid file is removed (Tom Lane) The postmaster now checks every minute or so - that postmaster.pid is still there and still contains its + that postmaster.pid is still there and still contains its own PID. If not, it performs an immediate shutdown, as though it had - received SIGQUIT. The main motivation for this change + received SIGQUIT. The main motivation for this change is to ensure that failed buildfarm runs will get cleaned up without manual intervention; but it also serves to limit the bad effects if a - DBA forcibly removes postmaster.pid and then starts a new + DBA forcibly removes postmaster.pid and then starts a new postmaster. - In SERIALIZABLE transaction isolation mode, serialization + In SERIALIZABLE transaction isolation mode, serialization anomalies could be missed due to race conditions during insertions (Kevin Grittner, Thomas Munro) @@ -2770,7 +4835,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Fix failure to emit appropriate WAL records when doing ALTER - TABLE ... SET TABLESPACE for unlogged relations (Michael Paquier, + TABLE ... SET TABLESPACE for unlogged relations (Michael Paquier, Andres Freund) @@ -2796,21 +4861,21 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Fix ALTER COLUMN TYPE to reconstruct inherited check + Fix ALTER COLUMN TYPE to reconstruct inherited check constraints properly (Tom Lane) - Fix REASSIGN OWNED to change ownership of composite types + Fix REASSIGN OWNED to change ownership of composite types properly (Álvaro Herrera) - Fix REASSIGN OWNED and ALTER OWNER to correctly + Fix REASSIGN OWNED and ALTER OWNER to correctly update granted-permissions lists when changing owners of data types, foreign data wrappers, or foreign servers (Bruce Momjian, Álvaro Herrera) @@ -2819,7 +4884,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Fix REASSIGN OWNED to ignore foreign user mappings, + Fix REASSIGN OWNED to ignore foreign user mappings, rather than fail (Álvaro Herrera) @@ -2833,13 +4898,13 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Fix planner's handling of LATERAL references (Tom + Fix planner's handling of LATERAL references (Tom Lane) This fixes some corner cases that led to failed to build any - N-way joins or could not devise a query plan planner + N-way joins or could not devise a query plan planner failures. @@ -2861,22 +4926,22 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Speed up generation of unique table aliases in EXPLAIN and + Speed up generation of unique table aliases in EXPLAIN and rule dumping, and ensure that generated aliases do not - exceed NAMEDATALEN (Tom Lane) + exceed NAMEDATALEN (Tom Lane) - Fix dumping of whole-row Vars in ROW() - and VALUES() lists (Tom Lane) + Fix dumping of whole-row Vars in ROW() + and VALUES() lists (Tom Lane) - Fix possible internal overflow in numeric division + Fix possible internal overflow in numeric division (Dean Rasheed) @@ -2928,7 +4993,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 This causes the code to emit regular expression is too - complex errors in some cases that previously used unreasonable + complex errors in some cases that previously used unreasonable amounts of time and memory. @@ -2941,14 +5006,14 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Make %h and %r escapes - in log_line_prefix work for messages emitted due - to log_connections (Tom Lane) + Make %h and %r escapes + in log_line_prefix work for messages emitted due + to log_connections (Tom Lane) - Previously, %h/%r started to work just after a - new session had emitted the connection received log message; + Previously, %h/%r started to work just after a + new session had emitted the connection received log message; now they work for that message too. @@ -2961,7 +5026,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 This oversight resulted in failure to recover from crashes - whenever logging_collector is turned on. + whenever logging_collector is turned on. @@ -2987,13 +5052,13 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - In psql, ensure that libreadline's idea + In psql, ensure that libreadline's idea of the screen size is updated when the terminal window size changes (Merlin Moncure) - Previously, libreadline did not notice if the window + Previously, libreadline did not notice if the window was resized during query output, leading to strange behavior during later input of multiline queries. @@ -3001,15 +5066,15 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Fix psql's \det command to interpret its - pattern argument the same way as other \d commands with + Fix psql's \det command to interpret its + pattern argument the same way as other \d commands with potentially schema-qualified patterns do (Reece Hart) - Avoid possible crash in psql's \c command + Avoid possible crash in psql's \c command when previous connection was via Unix socket and command specifies a new hostname and same username (Tom Lane) @@ -3017,21 +5082,21 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - In pg_ctl start -w, test child process status directly + In pg_ctl start -w, test child process status directly rather than relying on heuristics (Tom Lane, Michael Paquier) - Previously, pg_ctl relied on an assumption that the new - postmaster would always create postmaster.pid within five + Previously, pg_ctl relied on an assumption that the new + postmaster would always create postmaster.pid within five seconds. But that can fail on heavily-loaded systems, - causing pg_ctl to report incorrectly that the + causing pg_ctl to report incorrectly that the postmaster failed to start. Except on Windows, this change also means that a pg_ctl start - -w done immediately after another such command will now reliably + -w done immediately after another such command will now reliably fail, whereas previously it would report success if done within two seconds of the first command. @@ -3039,23 +5104,23 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - In pg_ctl start -w, don't attempt to use a wildcard listen + In pg_ctl start -w, don't attempt to use a wildcard listen address to connect to the postmaster (Kondo Yuta) - On Windows, pg_ctl would fail to detect postmaster - startup if listen_addresses is set to 0.0.0.0 - or ::, because it would try to use that value verbatim as + On Windows, pg_ctl would fail to detect postmaster + startup if listen_addresses is set to 0.0.0.0 + or ::, because it would try to use that value verbatim as the address to connect to, which doesn't work. Instead assume - that 127.0.0.1 or ::1, respectively, is the + that 127.0.0.1 or ::1, respectively, is the right thing to use. - In pg_ctl on Windows, check service status to decide + In pg_ctl on Windows, check service status to decide where to send output, rather than checking if standard output is a terminal (Michael Paquier) @@ -3063,18 +5128,18 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - In pg_dump and pg_basebackup, adopt + In pg_dump and pg_basebackup, adopt the GNU convention for handling tar-archive members exceeding 8GB (Tom Lane) - The POSIX standard for tar file format does not allow + The POSIX standard for tar file format does not allow archive member files to exceed 8GB, but most modern implementations - of tar support an extension that fixes that. Adopt - this extension so that pg_dump with no longer fails on tables with more than 8GB of data, and so - that pg_basebackup can handle files larger than 8GB. + that pg_basebackup can handle files larger than 8GB. In addition, fix some portability issues that could cause failures for members between 4GB and 8GB on some platforms. Potentially these problems could cause unrecoverable data loss due to unreadable backup @@ -3084,51 +5149,51 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Fix assorted corner-case bugs in pg_dump's processing + Fix assorted corner-case bugs in pg_dump's processing of extension member objects (Tom Lane) - Make pg_dump mark a view's triggers as needing to be + Make pg_dump mark a view's triggers as needing to be processed after its rule, to prevent possible failure during - parallel pg_restore (Tom Lane) + parallel pg_restore (Tom Lane) Ensure that relation option values are properly quoted - in pg_dump (Kouhei Sutou, Tom Lane) + in pg_dump (Kouhei Sutou, Tom Lane) A reloption value that isn't a simple identifier or number could lead to dump/reload failures due to syntax errors in CREATE statements - issued by pg_dump. This is not an issue with any - reloption currently supported by core PostgreSQL, but + issued by pg_dump. This is not an issue with any + reloption currently supported by core PostgreSQL, but extensions could allow reloptions that cause the problem. - Avoid repeated password prompts during parallel pg_dump + Avoid repeated password prompts during parallel pg_dump (Zeus Kronion) - Fix pg_upgrade's file-copying code to handle errors + Fix pg_upgrade's file-copying code to handle errors properly on Windows (Bruce Momjian) - Install guards in pgbench against corner-case overflow + Install guards in pgbench against corner-case overflow conditions during evaluation of script-specified division or modulo operators (Fabien Coelho, Michael Paquier) @@ -3137,22 +5202,22 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Fix failure to localize messages emitted - by pg_receivexlog and pg_recvlogical + by pg_receivexlog and pg_recvlogical (Ioseph Kim) - Avoid dump/reload problems when using both plpython2 - and plpython3 (Tom Lane) + Avoid dump/reload problems when using both plpython2 + and plpython3 (Tom Lane) - In principle, both versions of PL/Python can be used in + In principle, both versions of PL/Python can be used in the same database, though not in the same session (because the two - versions of libpython cannot safely be used concurrently). - However, pg_restore and pg_upgrade both + versions of libpython cannot safely be used concurrently). + However, pg_restore and pg_upgrade both do things that can fall foul of the same-session restriction. Work around that by changing the timing of the check. @@ -3160,42 +5225,42 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Fix PL/Python regression tests to pass with Python 3.5 + Fix PL/Python regression tests to pass with Python 3.5 (Peter Eisentraut) - Fix premature clearing of libpq's input buffer when + Fix premature clearing of libpq's input buffer when socket EOF is seen (Tom Lane) - This mistake caused libpq to sometimes not report the + This mistake caused libpq to sometimes not report the backend's final error message before reporting server closed the - connection unexpectedly. + connection unexpectedly. - Prevent certain PL/Java parameters from being set by + Prevent certain PL/Java parameters from being set by non-superusers (Noah Misch) - This change mitigates a PL/Java security bug - (CVE-2016-0766), which was fixed in PL/Java by marking + This change mitigates a PL/Java security bug + (CVE-2016-0766), which was fixed in PL/Java by marking these parameters as superuser-only. To fix the security hazard for - sites that update PostgreSQL more frequently - than PL/Java, make the core code aware of them also. + sites that update PostgreSQL more frequently + than PL/Java, make the core code aware of them also. - Improve libpq's handling of out-of-memory situations + Improve libpq's handling of out-of-memory situations (Michael Paquier, Amit Kapila, Heikki Linnakangas) @@ -3203,36 +5268,36 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Fix order of arguments - in ecpg-generated typedef statements + in ecpg-generated typedef statements (Michael Meskes) - Use %g not %f format - in ecpg's PGTYPESnumeric_from_double() + Use %g not %f format + in ecpg's PGTYPESnumeric_from_double() (Tom Lane) - Fix ecpg-supplied header files to not contain comments + Fix ecpg-supplied header files to not contain comments continued from a preprocessor directive line onto the next line (Michael Meskes) - Such a comment is rejected by ecpg. It's not yet clear - whether ecpg itself should be changed. + Such a comment is rejected by ecpg. It's not yet clear + whether ecpg itself should be changed. - Fix hstore_to_json_loose()'s test for whether - an hstore value can be converted to a JSON number (Tom Lane) + Fix hstore_to_json_loose()'s test for whether + an hstore value can be converted to a JSON number (Tom Lane) @@ -3243,14 +5308,14 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Ensure that contrib/pgcrypto's crypt() + Ensure that contrib/pgcrypto's crypt() function can be interrupted by query cancel (Andreas Karlsson) - Accept flex versions later than 2.5.x + Accept flex versions later than 2.5.x (Tom Lane, Michael Paquier) @@ -3274,19 +5339,19 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Install our missing script where PGXS builds can find it + Install our missing script where PGXS builds can find it (Jim Nasby) This allows sane behavior in a PGXS build done on a machine where build - tools such as bison are missing. + tools such as bison are missing. - Ensure that dynloader.h is included in the installed + Ensure that dynloader.h is included in the installed header files in MSVC builds (Bruce Momjian, Michael Paquier) @@ -3294,11 +5359,11 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Add variant regression test expected-output file to match behavior of - current libxml2 (Tom Lane) + current libxml2 (Tom Lane) - The fix for libxml2's CVE-2015-7499 causes it not to + The fix for libxml2's CVE-2015-7499 causes it not to output error context reports in some cases where it used to do so. This seems to be a bug, but we'll probably have to live with it for some time, so work around it. @@ -3307,7 +5372,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Update time zone data files to tzdata release 2016a for + Update time zone data files to tzdata release 2016a for DST law changes in Cayman Islands, Metlakatla, and Trans-Baikal Territory (Zabaykalsky Krai), plus historical corrections for Pakistan. @@ -3329,7 +5394,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 This release contains a variety of fixes from 9.3.9. For information about new features in the 9.3 major release, see - . + . @@ -3341,7 +5406,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 However, if you are upgrading from a version earlier than 9.3.9, - see . + see . @@ -3353,13 +5418,13 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Guard against stack overflows in json parsing + Guard against stack overflows in json parsing (Oskari Saarenmaa) - If an application constructs PostgreSQL json - or jsonb values from arbitrary user input, the application's + If an application constructs PostgreSQL json + or jsonb values from arbitrary user input, the application's users can reliably crash the PostgreSQL server, causing momentary denial of service. (CVE-2015-5289) @@ -3367,8 +5432,8 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Fix contrib/pgcrypto to detect and report - too-short crypt() salts (Josh Kupershmidt) + Fix contrib/pgcrypto to detect and report + too-short crypt() salts (Josh Kupershmidt) @@ -3401,13 +5466,13 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Fix insertion of relations into the relation cache init file + Fix insertion of relations into the relation cache init file (Tom Lane) An oversight in a patch in the most recent minor releases - caused pg_trigger_tgrelid_tgname_index to be omitted + caused pg_trigger_tgrelid_tgname_index to be omitted from the init file. Subsequent sessions detected this, then deemed the init file to be broken and silently ignored it, resulting in a significant degradation in session startup time. In addition to fixing @@ -3425,7 +5490,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Improve LISTEN startup time when there are many unread + Improve LISTEN startup time when there are many unread notifications (Matt Newell) @@ -3437,7 +5502,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - This was seen primarily when restoring pg_dump output + This was seen primarily when restoring pg_dump output for databases with many thousands of tables. @@ -3452,13 +5517,13 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 too many bugs in practice, both in the underlying OpenSSL library and in our usage of it. Renegotiation will be removed entirely in 9.5 and later. In the older branches, just change the default value - of ssl_renegotiation_limit to zero (disabled). + of ssl_renegotiation_limit to zero (disabled). - Lower the minimum values of the *_freeze_max_age parameters + Lower the minimum values of the *_freeze_max_age parameters (Andres Freund) @@ -3470,7 +5535,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Limit the maximum value of wal_buffers to 2GB to avoid + Limit the maximum value of wal_buffers to 2GB to avoid server crashes (Josh Berkus) @@ -3478,15 +5543,15 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Avoid logging complaints when a parameter that can only be set at - server start appears multiple times in postgresql.conf, - and fix counting of line numbers after an include_dir + server start appears multiple times in postgresql.conf, + and fix counting of line numbers after an include_dir directive (Tom Lane) - Fix rare internal overflow in multiplication of numeric values + Fix rare internal overflow in multiplication of numeric values (Dean Rasheed) @@ -3494,21 +5559,21 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Guard against hard-to-reach stack overflows involving record types, - range types, json, jsonb, tsquery, - ltxtquery and query_int (Noah Misch) + range types, json, jsonb, tsquery, + ltxtquery and query_int (Noah Misch) - Fix handling of DOW and DOY in datetime input + Fix handling of DOW and DOY in datetime input (Greg Stark) These tokens aren't meant to be used in datetime values, but previously they resulted in opaque internal error messages rather - than invalid input syntax. + than invalid input syntax. @@ -3521,7 +5586,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Add recursion depth protections to regular expression, SIMILAR - TO, and LIKE matching (Tom Lane) + TO, and LIKE matching (Tom Lane) @@ -3573,22 +5638,22 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Fix unexpected out-of-memory situation during sort errors - when using tuplestores with small work_mem settings (Tom + Fix unexpected out-of-memory situation during sort errors + when using tuplestores with small work_mem settings (Tom Lane) - Fix very-low-probability stack overrun in qsort (Tom Lane) + Fix very-low-probability stack overrun in qsort (Tom Lane) - Fix invalid memory alloc request size failure in hash joins - with large work_mem settings (Tomas Vondra, Tom Lane) + Fix invalid memory alloc request size failure in hash joins + with large work_mem settings (Tomas Vondra, Tom Lane) @@ -3601,9 +5666,9 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 These mistakes could lead to incorrect query plans that would give wrong answers, or to assertion failures in assert-enabled builds, or to odd planner errors such as could not devise a query plan for the - given query, could not find pathkey item to - sort, plan should not reference subplan's variable, - or failed to assign all NestLoopParams to plan nodes. + given query, could not find pathkey item to + sort, plan should not reference subplan's variable, + or failed to assign all NestLoopParams to plan nodes. Thanks are due to Andreas Seltenreich and Piotr Stefaniak for fuzz testing that exposed these problems. @@ -3611,7 +5676,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Improve planner's performance for UPDATE/DELETE + Improve planner's performance for UPDATE/DELETE on large inheritance sets (Tom Lane, Dean Rasheed) @@ -3632,12 +5697,12 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 During postmaster shutdown, ensure that per-socket lock files are removed and listen sockets are closed before we remove - the postmaster.pid file (Tom Lane) + the postmaster.pid file (Tom Lane) This avoids race-condition failures if an external script attempts to - start a new postmaster as soon as pg_ctl stop returns. + start a new postmaster as soon as pg_ctl stop returns. @@ -3664,7 +5729,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Do not print a WARNING when an autovacuum worker is already + Do not print a WARNING when an autovacuum worker is already gone when we attempt to signal it, and reduce log verbosity for such signals (Tom Lane) @@ -3701,7 +5766,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - VACUUM attempted to recycle such pages, but did so in a + VACUUM attempted to recycle such pages, but did so in a way that wasn't crash-safe. @@ -3709,44 +5774,44 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Fix off-by-one error that led to otherwise-harmless warnings - about apparent wraparound in subtrans/multixact truncation + about apparent wraparound in subtrans/multixact truncation (Thomas Munro) - Fix misreporting of CONTINUE and MOVE statement - types in PL/pgSQL's error context messages + Fix misreporting of CONTINUE and MOVE statement + types in PL/pgSQL's error context messages (Pavel Stehule, Tom Lane) - Fix PL/Perl to handle non-ASCII error + Fix PL/Perl to handle non-ASCII error message texts correctly (Alex Hunsaker) - Fix PL/Python crash when returning the string - representation of a record result (Tom Lane) + Fix PL/Python crash when returning the string + representation of a record result (Tom Lane) - Fix some places in PL/Tcl that neglected to check for - failure of malloc() calls (Michael Paquier, Álvaro + Fix some places in PL/Tcl that neglected to check for + failure of malloc() calls (Michael Paquier, Álvaro Herrera) - In contrib/isn, fix output of ISBN-13 numbers that begin + In contrib/isn, fix output of ISBN-13 numbers that begin with 979 (Fabien Coelho) @@ -3758,20 +5823,20 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Improve contrib/postgres_fdw's handling of + Improve contrib/postgres_fdw's handling of collation-related decisions (Tom Lane) The main user-visible effect is expected to be that comparisons - involving varchar columns will be sent to the remote server + involving varchar columns will be sent to the remote server for execution in more cases than before. - Improve libpq's handling of out-of-memory conditions + Improve libpq's handling of out-of-memory conditions (Michael Paquier, Heikki Linnakangas) @@ -3779,64 +5844,64 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Fix memory leaks and missing out-of-memory checks - in ecpg (Michael Paquier) + in ecpg (Michael Paquier) - Fix psql's code for locale-aware formatting of numeric + Fix psql's code for locale-aware formatting of numeric output (Tom Lane) - The formatting code invoked by \pset numericlocale on + The formatting code invoked by \pset numericlocale on did the wrong thing for some uncommon cases such as numbers with an exponent but no decimal point. It could also mangle already-localized - output from the money data type. + output from the money data type. - Prevent crash in psql's \c command when + Prevent crash in psql's \c command when there is no current connection (Noah Misch) - Make pg_dump handle inherited NOT VALID + Make pg_dump handle inherited NOT VALID check constraints correctly (Tom Lane) - Fix selection of default zlib compression level - in pg_dump's directory output format (Andrew Dunstan) + Fix selection of default zlib compression level + in pg_dump's directory output format (Andrew Dunstan) - Ensure that temporary files created during a pg_dump - run with tar-format output are not world-readable (Michael + Ensure that temporary files created during a pg_dump + run with tar-format output are not world-readable (Michael Paquier) - Fix pg_dump and pg_upgrade to support - cases where the postgres or template1 database + Fix pg_dump and pg_upgrade to support + cases where the postgres or template1 database is in a non-default tablespace (Marti Raudsepp, Bruce Momjian) - Fix pg_dump to handle object privileges sanely when + Fix pg_dump to handle object privileges sanely when dumping from a server too old to have a particular privilege type (Tom Lane) @@ -3844,11 +5909,11 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 When dumping data types from pre-9.2 servers, and when dumping functions or procedural languages from pre-7.3 - servers, pg_dump would - produce GRANT/REVOKE commands that revoked the + servers, pg_dump would + produce GRANT/REVOKE commands that revoked the owner's grantable privileges and instead granted all privileges - to PUBLIC. Since the privileges involved are - just USAGE and EXECUTE, this isn't a security + to PUBLIC. Since the privileges involved are + just USAGE and EXECUTE, this isn't a security problem, but it's certainly a surprising representation of the older systems' behavior. Fix it to leave the default privilege state alone in these cases. @@ -3857,18 +5922,18 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Fix pg_dump to dump shell types (Tom Lane) + Fix pg_dump to dump shell types (Tom Lane) Shell types (that is, not-yet-fully-defined types) aren't useful for - much, but nonetheless pg_dump should dump them. + much, but nonetheless pg_dump should dump them. - Fix assorted minor memory leaks in pg_dump and other + Fix assorted minor memory leaks in pg_dump and other client-side programs (Michael Paquier) @@ -3876,11 +5941,11 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Fix spinlock assembly code for PPC hardware to be compatible - with AIX's native assembler (Tom Lane) + with AIX's native assembler (Tom Lane) - Building with gcc didn't work if gcc + Building with gcc didn't work if gcc had been configured to use the native assembler, which is becoming more common. @@ -3888,14 +5953,14 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - On AIX, test the -qlonglong compiler option + On AIX, test the -qlonglong compiler option rather than just assuming it's safe to use (Noah Misch) - On AIX, use -Wl,-brtllib link option to allow + On AIX, use -Wl,-brtllib link option to allow symbols to be resolved at runtime (Noah Misch) @@ -3907,38 +5972,38 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Avoid use of inline functions when compiling with - 32-bit xlc, due to compiler bugs (Noah Misch) + 32-bit xlc, due to compiler bugs (Noah Misch) - Use librt for sched_yield() when necessary, + Use librt for sched_yield() when necessary, which it is on some Solaris versions (Oskari Saarenmaa) - Fix Windows install.bat script to handle target directory + Fix Windows install.bat script to handle target directory names that contain spaces (Heikki Linnakangas) - Make the numeric form of the PostgreSQL version number - (e.g., 90405) readily available to extension Makefiles, - as a variable named VERSION_NUM (Michael Paquier) + Make the numeric form of the PostgreSQL version number + (e.g., 90405) readily available to extension Makefiles, + as a variable named VERSION_NUM (Michael Paquier) - Update time zone data files to tzdata release 2015g for + Update time zone data files to tzdata release 2015g for DST law changes in Cayman Islands, Fiji, Moldova, Morocco, Norfolk Island, North Korea, Turkey, and Uruguay. There is a new zone name - America/Fort_Nelson for the Canadian Northern Rockies. + America/Fort_Nelson for the Canadian Northern Rockies. @@ -3958,7 +6023,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 This release contains a small number of fixes from 9.3.8. For information about new features in the 9.3 major release, see - . + . @@ -3970,13 +6035,13 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 However, if you are upgrading an installation that was previously - upgraded using a pg_upgrade version between 9.3.0 and + upgraded using a pg_upgrade version between 9.3.0 and 9.3.4 inclusive, see the first changelog entry below. Also, if you are upgrading from a version earlier than 9.3.7, - see . + see . @@ -3993,52 +6058,52 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Recent PostgreSQL releases introduced mechanisms to + Recent PostgreSQL releases introduced mechanisms to protect against multixact wraparound, but some of that code did not account for the possibility that it would need to run during crash recovery, when the database may not be in a consistent state. This could result in failure to restart after a crash, or failure to start up a secondary server. The lingering effects of a previously-fixed - bug in pg_upgrade could also cause such a failure, in - installations that had used pg_upgrade versions + bug in pg_upgrade could also cause such a failure, in + installations that had used pg_upgrade versions between 9.3.0 and 9.3.4. - The pg_upgrade bug in question was that it would - set oldestMultiXid to 1 in pg_control even + The pg_upgrade bug in question was that it would + set oldestMultiXid to 1 in pg_control even if the true value should be higher. With the fixes introduced in this release, such a situation will result in immediate emergency - autovacuuming until a correct oldestMultiXid value can be + autovacuuming until a correct oldestMultiXid value can be determined. If that would pose a hardship, users can avoid it by - doing manual vacuuming before upgrading to this release. + doing manual vacuuming before upgrading to this release. In detail: - Check whether pg_controldata reports Latest - checkpoint's oldestMultiXid to be 1. If not, there's nothing + Check whether pg_controldata reports Latest + checkpoint's oldestMultiXid to be 1. If not, there's nothing to do. - Look in PGDATA/pg_multixact/offsets to see if there's a - file named 0000. If there is, there's nothing to do. + Look in PGDATA/pg_multixact/offsets to see if there's a + file named 0000. If there is, there's nothing to do. Otherwise, for each table that has - pg_class.relminmxid equal to 1, - VACUUM that table with - both - and set to + pg_class.relminmxid equal to 1, + VACUUM that table with + both + and set to zero. (You can use the vacuum cost delay parameters described - in to reduce + in to reduce the performance consequences for concurrent sessions.) You must - use PostgreSQL 9.3.5 or later to perform this step. + use PostgreSQL 9.3.5 or later to perform this step. @@ -4052,7 +6117,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 With just the wrong timing of concurrent activity, a VACUUM - FULL on a system catalog might fail to update the init file + FULL on a system catalog might fail to update the init file that's used to avoid cache-loading work for new sessions. This would result in later sessions being unable to access that catalog at all. This is a very ancient bug, but it's so hard to trigger that no @@ -4063,13 +6128,13 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 Avoid deadlock between incoming sessions and CREATE/DROP - DATABASE (Tom Lane) + DATABASE (Tom Lane) A new session starting in a database that is the target of - a DROP DATABASE command, or is the template for - a CREATE DATABASE command, could cause the command to wait + a DROP DATABASE command, or is the template for + a CREATE DATABASE command, could cause the command to wait for five seconds and then fail, even if the new session would have exited before that. @@ -4107,7 +6172,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 This release contains a small number of fixes from 9.3.7. For information about new features in the 9.3 major release, see - . + . @@ -4119,7 +6184,7 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 However, if you are upgrading from a version earlier than 9.3.7, - see . + see . @@ -4131,12 +6196,12 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Avoid failures while fsync'ing data directory during + Avoid failures while fsync'ing data directory during crash restart (Abhijit Menon-Sen, Tom Lane) - In the previous minor releases we added a patch to fsync + In the previous minor releases we added a patch to fsync everything in the data directory after a crash. Unfortunately its response to any error condition was to fail, thereby preventing the server from starting up, even when the problem was quite harmless. @@ -4148,28 +6213,28 @@ Branch: REL9_2_STABLE [37f30b251] 2016-04-18 13:19:52 -0400 - Also apply the same rules in initdb --sync-only. + Also apply the same rules in initdb --sync-only. This case is less critical but it should act similarly. - Fix pg_get_functiondef() to show - functions' LEAKPROOF property, if set (Jeevan Chalke) + Fix pg_get_functiondef() to show + functions' LEAKPROOF property, if set (Jeevan Chalke) - Remove configure's check prohibiting linking to a - threaded libpython - on OpenBSD (Tom Lane) + Remove configure's check prohibiting linking to a + threaded libpython + on OpenBSD (Tom Lane) The failure this restriction was meant to prevent seems to not be a - problem anymore on current OpenBSD + problem anymore on current OpenBSD versions. @@ -4184,15 +6249,15 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - Allow libpq to use TLS protocol versions beyond v1 + Allow libpq to use TLS protocol versions beyond v1 (Noah Misch) - For a long time, libpq was coded so that the only SSL + For a long time, libpq was coded so that the only SSL protocol it would allow was TLS v1. Now that newer TLS versions are becoming popular, allow it to negotiate the highest commonly-supported - TLS version with the server. (PostgreSQL servers were + TLS version with the server. (PostgreSQL servers were already capable of such negotiation, so no change is needed on the server side.) This is a back-patch of a change already released in 9.4.0. @@ -4215,7 +6280,7 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 This release contains a variety of fixes from 9.3.6. For information about new features in the 9.3 major release, see - . + . @@ -4226,14 +6291,14 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - However, if you use contrib/citext's - regexp_matches() functions, see the changelog entry below + However, if you use contrib/citext's + regexp_matches() functions, see the changelog entry below about that. Also, if you are upgrading from a version earlier than 9.3.6, - see . + see . @@ -4265,7 +6330,7 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - Our replacement implementation of snprintf() failed to + Our replacement implementation of snprintf() failed to check for errors reported by the underlying system library calls; the main case that might be missed is out-of-memory situations. In the worst case this might lead to information exposure, due to our @@ -4275,7 +6340,7 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - It remains possible that some calls of the *printf() + It remains possible that some calls of the *printf() family of functions are vulnerable to information disclosure if an out-of-memory error occurs at just the wrong time. We judge the risk to not be large, but will continue analysis in this area. @@ -4285,15 +6350,15 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - In contrib/pgcrypto, uniformly report decryption failures - as Wrong key or corrupt data (Noah Misch) + In contrib/pgcrypto, uniformly report decryption failures + as Wrong key or corrupt data (Noah Misch) Previously, some cases of decryption with an incorrect key could report other error message texts. It has been shown that such variance in error reports can aid attackers in recovering keys from other systems. - While it's unknown whether pgcrypto's specific behaviors + While it's unknown whether pgcrypto's specific behaviors are likewise exploitable, it seems better to avoid the risk by using a one-size-fits-all message. (CVE-2015-3167) @@ -4308,7 +6373,7 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 Under certain usage patterns, the existing defenses against this might - be insufficient, allowing pg_multixact/members files to be + be insufficient, allowing pg_multixact/members files to be removed too early, resulting in data loss. The fix for this includes modifying the server to fail transactions that would result in overwriting old multixact member ID data, and @@ -4320,16 +6385,16 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - Fix incorrect declaration of contrib/citext's - regexp_matches() functions (Tom Lane) + Fix incorrect declaration of contrib/citext's + regexp_matches() functions (Tom Lane) - These functions should return setof text[], like the core + These functions should return setof text[], like the core functions they are wrappers for; but they were incorrectly declared as - returning just text[]. This mistake had two results: first, + returning just text[]. This mistake had two results: first, if there was no match you got a scalar null result, whereas what you - should get is an empty set (zero rows). Second, the g flag + should get is an empty set (zero rows). Second, the g flag was effectively ignored, since you would get only one result array even if there were multiple matches. @@ -4337,16 +6402,16 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 While the latter behavior is clearly a bug, there might be applications depending on the former behavior; therefore the function declarations - will not be changed by default until PostgreSQL 9.5. + will not be changed by default until PostgreSQL 9.5. In pre-9.5 branches, the old behavior exists in version 1.0 of - the citext extension, while we have provided corrected - declarations in version 1.1 (which is not installed by + the citext extension, while we have provided corrected + declarations in version 1.1 (which is not installed by default). To adopt the fix in pre-9.5 branches, execute - ALTER EXTENSION citext UPDATE TO '1.1' in each database in - which citext is installed. (You can also update + ALTER EXTENSION citext UPDATE TO '1.1' in each database in + which citext is installed. (You can also update back to 1.0 if you need to undo that.) Be aware that either update direction will require dropping and recreating any views or rules that - use citext's regexp_matches() functions. + use citext's regexp_matches() functions. @@ -4388,7 +6453,7 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 This oversight in the planner has been observed to cause could - not find RelOptInfo for given relids errors, but it seems possible + not find RelOptInfo for given relids errors, but it seems possible that sometimes an incorrect query plan might get past that consistency check and result in silently-wrong query output. @@ -4416,7 +6481,7 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 This oversight has been seen to lead to failed to join all - relations together errors in queries involving LATERAL, + relations together errors in queries involving LATERAL, and that might happen in other cases as well. @@ -4424,7 +6489,7 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 Fix possible deadlock at startup - when max_prepared_transactions is too small + when max_prepared_transactions is too small (Heikki Linnakangas) @@ -4438,7 +6503,7 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - Recursively fsync() the data directory after a crash + Recursively fsync() the data directory after a crash (Abhijit Menon-Sen, Robert Haas) @@ -4458,19 +6523,19 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - Cope with unexpected signals in LockBufferForCleanup() + Cope with unexpected signals in LockBufferForCleanup() (Andres Freund) This oversight could result in spurious errors about multiple - backends attempting to wait for pincount 1. + backends attempting to wait for pincount 1. - Fix crash when doing COPY IN to a table with check + Fix crash when doing COPY IN to a table with check constraints that contain whole-row references (Tom Lane) @@ -4517,18 +6582,18 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - ANALYZE executes index expressions many times; if there are + ANALYZE executes index expressions many times; if there are slow functions in such an expression, it's desirable to be able to - cancel the ANALYZE before that loop finishes. + cancel the ANALYZE before that loop finishes. - Ensure tableoid of a foreign table is reported - correctly when a READ COMMITTED recheck occurs after - locking rows in SELECT FOR UPDATE, UPDATE, - or DELETE (Etsuro Fujita) + Ensure tableoid of a foreign table is reported + correctly when a READ COMMITTED recheck occurs after + locking rows in SELECT FOR UPDATE, UPDATE, + or DELETE (Etsuro Fujita) @@ -4548,20 +6613,20 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - Recommend setting include_realm to 1 when using + Recommend setting include_realm to 1 when using Kerberos/GSSAPI/SSPI authentication (Stephen Frost) Without this, identically-named users from different realms cannot be distinguished. For the moment this is only a documentation change, but - it will become the default setting in PostgreSQL 9.5. + it will become the default setting in PostgreSQL 9.5. - Remove code for matching IPv4 pg_hba.conf entries to + Remove code for matching IPv4 pg_hba.conf entries to IPv4-in-IPv6 addresses (Tom Lane) @@ -4574,20 +6639,20 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 crashes on some systems, so let's just remove it rather than fix it. (Had we chosen to fix it, that would make for a subtle and potentially security-sensitive change in the effective meaning of - IPv4 pg_hba.conf entries, which does not seem like a good + IPv4 pg_hba.conf entries, which does not seem like a good thing to do in minor releases.) - Report WAL flush, not insert, position in IDENTIFY_SYSTEM + Report WAL flush, not insert, position in IDENTIFY_SYSTEM replication command (Heikki Linnakangas) This avoids a possible startup failure - in pg_receivexlog. + in pg_receivexlog. @@ -4595,14 +6660,14 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 While shutting down service on Windows, periodically send status updates to the Service Control Manager to prevent it from killing the - service too soon; and ensure that pg_ctl will wait for + service too soon; and ensure that pg_ctl will wait for shutdown (Krystian Bigaj) - Reduce risk of network deadlock when using libpq's + Reduce risk of network deadlock when using libpq's non-blocking mode (Heikki Linnakangas) @@ -4611,32 +6676,32 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 buffer every so often, in case the server has sent enough response data to cause it to block on output. (A typical scenario is that the server is sending a stream of NOTICE messages during COPY FROM - STDIN.) This worked properly in the normal blocking mode, but not - so much in non-blocking mode. We've modified libpq + STDIN.) This worked properly in the normal blocking mode, but not + so much in non-blocking mode. We've modified libpq to opportunistically drain input when it can, but a full defense against this problem requires application cooperation: the application should watch for socket read-ready as well as write-ready conditions, - and be sure to call PQconsumeInput() upon read-ready. + and be sure to call PQconsumeInput() upon read-ready. - In libpq, fix misparsing of empty values in URI + In libpq, fix misparsing of empty values in URI connection strings (Thomas Fanghaenel) - Fix array handling in ecpg (Michael Meskes) + Fix array handling in ecpg (Michael Meskes) - Fix psql to sanely handle URIs and conninfo strings as - the first parameter to \connect + Fix psql to sanely handle URIs and conninfo strings as + the first parameter to \connect (David Fetter, Andrew Dunstan, Álvaro Herrera) @@ -4649,38 +6714,38 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - Suppress incorrect complaints from psql on some - platforms that it failed to write ~/.psql_history at exit + Suppress incorrect complaints from psql on some + platforms that it failed to write ~/.psql_history at exit (Tom Lane) This misbehavior was caused by a workaround for a bug in very old - (pre-2006) versions of libedit. We fixed it by + (pre-2006) versions of libedit. We fixed it by removing the workaround, which will cause a similar failure to appear - for anyone still using such versions of libedit. - Recommendation: upgrade that library, or use libreadline. + for anyone still using such versions of libedit. + Recommendation: upgrade that library, or use libreadline. - Fix pg_dump's rule for deciding which casts are + Fix pg_dump's rule for deciding which casts are system-provided casts that should not be dumped (Tom Lane) - In pg_dump, fix failure to honor -Z - compression level option together with -Fd + In pg_dump, fix failure to honor -Z + compression level option together with -Fd (Michael Paquier) - Make pg_dump consider foreign key relationships + Make pg_dump consider foreign key relationships between extension configuration tables while choosing dump order (Gilles Darold, Michael Paquier, Stephen Frost) @@ -4693,21 +6758,21 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - Avoid possible pg_dump failure when concurrent sessions + Avoid possible pg_dump failure when concurrent sessions are creating and dropping temporary functions (Tom Lane) - Fix dumping of views that are just VALUES(...) but have + Fix dumping of views that are just VALUES(...) but have column aliases (Tom Lane) - In pg_upgrade, force timeline 1 in the new cluster + In pg_upgrade, force timeline 1 in the new cluster (Bruce Momjian) @@ -4719,7 +6784,7 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - In pg_upgrade, check for improperly non-connectable + In pg_upgrade, check for improperly non-connectable databases before proceeding (Bruce Momjian) @@ -4727,28 +6792,28 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - In pg_upgrade, quote directory paths - properly in the generated delete_old_cluster script + In pg_upgrade, quote directory paths + properly in the generated delete_old_cluster script (Bruce Momjian) - In pg_upgrade, preserve database-level freezing info + In pg_upgrade, preserve database-level freezing info properly (Bruce Momjian) This oversight could cause missing-clog-file errors for tables within - the postgres and template1 databases. + the postgres and template1 databases. - Run pg_upgrade and pg_resetxlog with + Run pg_upgrade and pg_resetxlog with restricted privileges on Windows, so that they don't fail when run by an administrator (Muhammad Asif Naeem) @@ -4756,15 +6821,15 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - Improve handling of readdir() failures when scanning - directories in initdb and pg_basebackup + Improve handling of readdir() failures when scanning + directories in initdb and pg_basebackup (Marco Nenciarini) - Fix slow sorting algorithm in contrib/intarray (Tom Lane) + Fix slow sorting algorithm in contrib/intarray (Tom Lane) @@ -4782,7 +6847,7 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 - Update time zone data files to tzdata release 2015d + Update time zone data files to tzdata release 2015d for DST law changes in Egypt, Mongolia, and Palestine, plus historical changes in Canada and Chile. Also adopt revised zone abbreviations for the America/Adak zone (HST/HDT not HAST/HADT). @@ -4805,7 +6870,7 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 This release contains a variety of fixes from 9.3.5. For information about new features in the 9.3 major release, see - . + . @@ -4817,16 +6882,16 @@ Branch: REL9_0_STABLE [4dddf8552] 2015-05-21 20:41:55 -0400 However, if you are a Windows user and are using the Norwegian - (Bokmål) locale, manual action is needed after the upgrade to - replace any Norwegian (Bokmål)_Norway locale names stored - in PostgreSQL system catalogs with the plain-ASCII - alias Norwegian_Norway. For details see - + (Bokmål) locale, manual action is needed after the upgrade to + replace any Norwegian (Bokmål)_Norway locale names stored + in PostgreSQL system catalogs with the plain-ASCII + alias Norwegian_Norway. For details see + Also, if you are upgrading from a version earlier than 9.3.5, - see . + see . @@ -4855,15 +6920,15 @@ Branch: REL9_0_STABLE [56b970f2e] 2015-02-02 10:00:52 -0500 - Fix buffer overruns in to_char() + Fix buffer overruns in to_char() (Bruce Momjian) - When to_char() processes a numeric formatting template - calling for a large number of digits, PostgreSQL + When to_char() processes a numeric formatting template + calling for a large number of digits, PostgreSQL would read past the end of a buffer. When processing a crafted - timestamp formatting template, PostgreSQL would write + timestamp formatting template, PostgreSQL would write past the end of a buffer. Either case could crash the server. We have not ruled out the possibility of attacks that lead to privilege escalation, though they seem unlikely. @@ -4883,27 +6948,27 @@ Branch: REL9_0_STABLE [9e05c5063] 2015-02-02 10:00:52 -0500 - Fix buffer overrun in replacement *printf() functions + Fix buffer overrun in replacement *printf() functions (Tom Lane) - PostgreSQL includes a replacement implementation - of printf and related functions. This code will overrun + PostgreSQL includes a replacement implementation + of printf and related functions. This code will overrun a stack buffer when formatting a floating point number (conversion - specifiers e, E, f, F, - g or G) with requested precision greater than + specifiers e, E, f, F, + g or G) with requested precision greater than about 500. This will crash the server, and we have not ruled out the possibility of attacks that lead to privilege escalation. A database user can trigger such a buffer overrun through - the to_char() SQL function. While that is the only - affected core PostgreSQL functionality, extension + the to_char() SQL function. While that is the only + affected core PostgreSQL functionality, extension modules that use printf-family functions may be at risk as well. - This issue primarily affects PostgreSQL on Windows. - PostgreSQL uses the system implementation of these + This issue primarily affects PostgreSQL on Windows. + PostgreSQL uses the system implementation of these functions where adequate, which it is on other modern platforms. (CVE-2015-0242) @@ -4928,12 +6993,12 @@ Branch: REL9_0_STABLE [0a3ee8a5f] 2015-02-02 10:00:52 -0500 - Fix buffer overruns in contrib/pgcrypto + Fix buffer overruns in contrib/pgcrypto (Marko Tiikkaja, Noah Misch) - Errors in memory size tracking within the pgcrypto + Errors in memory size tracking within the pgcrypto module permitted stack buffer overruns and improper dependence on the contents of uninitialized memory. The buffer overrun cases can crash the server, and we have not ruled out the possibility of @@ -4994,7 +7059,7 @@ Branch: REL9_0_STABLE [3a2063369] 2015-01-28 12:33:29 -0500 Some server error messages show the values of columns that violate a constraint, such as a unique constraint. If the user does not have - SELECT privilege on all columns of the table, this could + SELECT privilege on all columns of the table, this could mean exposing values that the user should not be able to see. Adjust the code so that values are displayed only when they came from the SQL command or could be selected by the user. @@ -5043,14 +7108,14 @@ Branch: REL9_2_STABLE [6bf343c6e] 2015-01-16 13:10:23 +0200 - Cope with the Windows locale named Norwegian (Bokmål) + Cope with the Windows locale named Norwegian (Bokmål) (Heikki Linnakangas) Non-ASCII locale names are problematic since it's not clear what encoding they should be represented in. Map the troublesome locale - name to a plain-ASCII alias, Norwegian_Norway. + name to a plain-ASCII alias, Norwegian_Norway. @@ -5065,7 +7130,7 @@ Branch: REL9_0_STABLE [45a607d5c] 2014-11-04 13:24:26 -0500 Avoid possible data corruption if ALTER DATABASE SET - TABLESPACE is used to move a database to a new tablespace and then + TABLESPACE is used to move a database to a new tablespace and then shortly later move it back to its original tablespace (Tom Lane) @@ -5085,14 +7150,14 @@ Branch: REL9_0_STABLE [73f950fc8] 2014-10-30 13:03:39 -0400 - Avoid corrupting tables when ANALYZE inside a transaction + Avoid corrupting tables when ANALYZE inside a transaction is rolled back (Andres Freund, Tom Lane, Michael Paquier) If the failing transaction had earlier removed the last index, rule, or trigger from the table, the table would be left in a corrupted state - with the relevant pg_class flags not set though they + with the relevant pg_class flags not set though they should be. @@ -5107,8 +7172,8 @@ Branch: REL9_1_STABLE [d5fef87e9] 2014-10-20 23:47:45 +0200 Ensure that unlogged tables are copied correctly - during CREATE DATABASE or ALTER DATABASE SET - TABLESPACE (Pavan Deolasee, Andres Freund) + during CREATE DATABASE or ALTER DATABASE SET + TABLESPACE (Pavan Deolasee, Andres Freund) @@ -5120,12 +7185,12 @@ Branch: REL9_3_STABLE [e35db342a] 2014-09-22 16:19:59 -0400 Fix incorrect processing - of CreateEventTrigStmt.eventname (Petr + of CreateEventTrigStmt.eventname (Petr Jelinek) - This could result in misbehavior if CREATE EVENT TRIGGER + This could result in misbehavior if CREATE EVENT TRIGGER were executed as a prepared query, or via extended query protocol. @@ -5139,7 +7204,7 @@ Branch: REL9_1_STABLE [94d5d57d5] 2014-11-11 17:00:28 -0500 - Fix DROP's dependency searching to correctly handle the + Fix DROP's dependency searching to correctly handle the case where a table column is recursively visited before its table (Petr Jelinek, Tom Lane) @@ -5147,7 +7212,7 @@ Branch: REL9_1_STABLE [94d5d57d5] 2014-11-11 17:00:28 -0500 This case is only known to arise when an extension creates both a datatype and a table using that datatype. The faulty code might - refuse a DROP EXTENSION unless CASCADE is + refuse a DROP EXTENSION unless CASCADE is specified, which should not be required. @@ -5169,7 +7234,7 @@ Branch: REL9_0_STABLE [5308e085b] 2015-01-15 18:52:38 -0500 - In READ COMMITTED mode, queries that lock or update + In READ COMMITTED mode, queries that lock or update recently-updated rows could crash as a result of this bug. @@ -5198,8 +7263,8 @@ Branch: REL9_3_STABLE [54a8abc2b] 2015-01-04 15:48:29 -0300 Fix failure to wait when a transaction tries to acquire a FOR - NO KEY EXCLUSIVE tuple lock, while multiple other transactions - currently hold FOR SHARE locks (Álvaro Herrera) + NO KEY EXCLUSIVE tuple lock, while multiple other transactions + currently hold FOR SHARE locks (Álvaro Herrera) @@ -5213,15 +7278,15 @@ Branch: REL9_0_STABLE [662eebdc6] 2014-12-11 21:02:41 -0500 - Fix planning of SELECT FOR UPDATE when using a partial + Fix planning of SELECT FOR UPDATE when using a partial index on a child table (Kyotaro Horiguchi) - In READ COMMITTED mode, SELECT FOR UPDATE must - also recheck the partial index's WHERE condition when + In READ COMMITTED mode, SELECT FOR UPDATE must + also recheck the partial index's WHERE condition when rechecking a recently-updated row to see if it still satisfies the - query's WHERE condition. This requirement was missed if the + query's WHERE condition. This requirement was missed if the index belonged to an inheritance child table, so that it was possible to incorrectly return rows that no longer satisfy the query condition. @@ -5237,12 +7302,12 @@ Branch: REL9_0_STABLE [f5e4e92fb] 2014-12-11 19:37:17 -0500 - Fix corner case wherein SELECT FOR UPDATE could return a row + Fix corner case wherein SELECT FOR UPDATE could return a row twice, and possibly miss returning other rows (Tom Lane) - In READ COMMITTED mode, a SELECT FOR UPDATE + In READ COMMITTED mode, a SELECT FOR UPDATE that is scanning an inheritance tree could incorrectly return a row from a prior child table instead of the one it should return from a later child table. @@ -5258,7 +7323,7 @@ Branch: REL9_3_STABLE [939f0fb67] 2015-01-15 13:18:19 -0500 - Improve performance of EXPLAIN with large range tables + Improve performance of EXPLAIN with large range tables (Tom Lane) @@ -5274,7 +7339,7 @@ Branch: REL9_0_STABLE [4ff49746e] 2014-08-09 13:46:52 -0400 Reject duplicate column names in the referenced-columns list of - a FOREIGN KEY declaration (David Rowley) + a FOREIGN KEY declaration (David Rowley) @@ -5291,7 +7356,7 @@ Branch: REL9_3_STABLE [6306d0712] 2014-07-22 13:30:14 -0400 - Re-enable error for SELECT ... OFFSET -1 (Tom Lane) + Re-enable error for SELECT ... OFFSET -1 (Tom Lane) @@ -5328,7 +7393,7 @@ Branch: REL9_3_STABLE [8571ecb24] 2014-12-02 15:02:43 -0500 - Fix json_agg() to not return extra trailing right + Fix json_agg() to not return extra trailing right brackets in its result (Tom Lane) @@ -5343,7 +7408,7 @@ Branch: REL9_0_STABLE [26f8a4691] 2014-09-11 23:31:06 -0400 - Fix bugs in raising a numeric value to a large integral power + Fix bugs in raising a numeric value to a large integral power (Tom Lane) @@ -5364,19 +7429,19 @@ Branch: REL9_0_STABLE [e6550626c] 2014-12-01 15:25:18 -0500 - In numeric_recv(), truncate away any fractional digits - that would be hidden according to the value's dscale field + In numeric_recv(), truncate away any fractional digits + that would be hidden according to the value's dscale field (Tom Lane) - A numeric value's display scale (dscale) should + A numeric value's display scale (dscale) should never be less than the number of nonzero fractional digits; but apparently there's at least one broken client application that - transmits binary numeric values in which that's true. + transmits binary numeric values in which that's true. This leads to strange behavior since the extra digits are taken into account by arithmetic operations even though they aren't printed. - The least risky fix seems to be to truncate away such hidden + The least risky fix seems to be to truncate away such hidden digits on receipt, so that the value is indeed what it prints as. @@ -5395,7 +7460,7 @@ Branch: REL9_2_STABLE [3359a818c] 2014-09-23 20:25:39 -0400 Matching would often fail when the number of allowed iterations is - limited by a ? quantifier or a bound expression. + limited by a ? quantifier or a bound expression. @@ -5430,7 +7495,7 @@ Branch: REL9_0_STABLE [10059c2da] 2014-10-27 10:51:38 +0200 - Fix bugs in tsquery @> tsquery + Fix bugs in tsquery @> tsquery operator (Heikki Linnakangas) @@ -5487,14 +7552,14 @@ Branch: REL9_0_STABLE [cebb3f032] 2015-01-17 22:37:32 -0500 - Fix namespace handling in xpath() (Ali Akbar) + Fix namespace handling in xpath() (Ali Akbar) - Previously, the xml value resulting from - an xpath() call would not have namespace declarations if + Previously, the xml value resulting from + an xpath() call would not have namespace declarations if the namespace declarations were attached to an ancestor element in the - input xml value, rather than to the specific element being + input xml value, rather than to the specific element being returned. Propagate the ancestral declaration so that the result is correct when considered in isolation. @@ -5514,7 +7579,7 @@ Branch: REL9_2_STABLE [19ccaf9d4] 2014-11-10 15:21:26 -0500 - In some contexts, constructs like row_to_json(tab.*) may + In some contexts, constructs like row_to_json(tab.*) may not produce the expected column names. This is fixed properly as of 9.4; in older branches, just ensure that we produce some nonempty name. (In some cases this will be the underlying table's column name @@ -5532,7 +7597,7 @@ Branch: REL9_2_STABLE [906599f65] 2014-11-22 16:01:15 -0500 Fix mishandling of system columns, - particularly tableoid, in FDW queries (Etsuro Fujita) + particularly tableoid, in FDW queries (Etsuro Fujita) @@ -5550,7 +7615,7 @@ Branch: REL9_3_STABLE [527ff8baf] 2015-01-30 12:30:43 -0500 - This patch fixes corner-case unexpected operator NNNN planner + This patch fixes corner-case unexpected operator NNNN planner errors, and improves the selectivity estimates for some other cases. @@ -5563,13 +7628,13 @@ Branch: REL9_2_STABLE [4586572d7] 2014-10-26 16:12:32 -0400 - Avoid doing indexed_column = ANY - (array) as an index qualifier if that leads + Avoid doing indexed_column = ANY + (array) as an index qualifier if that leads to an inferior plan (Andrew Gierth) - In some cases, = ANY conditions applied to non-first index + In some cases, = ANY conditions applied to non-first index columns would be done as index conditions even though it would be better to use them as simple filter conditions. @@ -5582,9 +7647,9 @@ Branch: REL9_3_STABLE [4e54685d0] 2014-10-20 12:23:48 -0400 - Fix variable not found in subplan target list planner + Fix variable not found in subplan target list planner failure when an inline-able SQL function taking a composite argument - is used in a LATERAL subselect and the composite argument + is used in a LATERAL subselect and the composite argument is a lateral reference (Tom Lane) @@ -5600,7 +7665,7 @@ Branch: REL9_0_STABLE [288f15b7c] 2014-10-01 19:30:41 -0400 Fix planner problems with nested append relations, such as inherited - tables within UNION ALL subqueries (Tom Lane) + tables within UNION ALL subqueries (Tom Lane) @@ -5629,8 +7694,8 @@ Branch: REL9_0_STABLE [50a757698] 2014-10-03 13:01:27 -0300 - Exempt tables that have per-table cost_limit - and/or cost_delay settings from autovacuum's global cost + Exempt tables that have per-table cost_limit + and/or cost_delay settings from autovacuum's global cost balancing rules (Álvaro Herrera) @@ -5664,7 +7729,7 @@ Branch: REL9_0_STABLE [91b4a881c] 2014-07-30 14:42:12 -0400 the target database, if they met the usual thresholds for autovacuuming. This is at best pretty unexpected; at worst it delays response to the wraparound threat. Fix it so that if autovacuum is - turned off, workers only do anti-wraparound vacuums and + turned off, workers only do anti-wraparound vacuums and not any other work. @@ -5728,12 +7793,12 @@ Branch: REL9_0_STABLE [804983961] 2014-07-29 11:58:17 +0300 Fix several cases where recovery logic improperly ignored WAL records - for COMMIT/ABORT PREPARED (Heikki Linnakangas) + for COMMIT/ABORT PREPARED (Heikki Linnakangas) The most notable oversight was - that recovery_target_xid could not be used to stop at + that recovery_target_xid could not be used to stop at a two-phase commit. @@ -5761,7 +7826,7 @@ Branch: REL9_0_STABLE [83c7bfb9a] 2014-11-06 21:26:21 +0900 - Avoid creating unnecessary .ready marker files for + Avoid creating unnecessary .ready marker files for timeline history files (Fujii Masao) @@ -5777,8 +7842,8 @@ Branch: REL9_0_STABLE [857a5d6b5] 2014-09-05 02:19:57 +0900 Fix possible null pointer dereference when an empty prepared statement - is used and the log_statement setting is mod - or ddl (Fujii Masao) + is used and the log_statement setting is mod + or ddl (Fujii Masao) @@ -5794,7 +7859,7 @@ Branch: REL9_0_STABLE [a1a8d0249] 2015-01-19 23:01:46 -0500 - Change pgstat wait timeout warning message to be LOG level, + Change pgstat wait timeout warning message to be LOG level, and rephrase it to be more understandable (Tom Lane) @@ -5803,7 +7868,7 @@ Branch: REL9_0_STABLE [a1a8d0249] 2015-01-19 23:01:46 -0500 case, but it occurs often enough on our slower buildfarm members to be a nuisance. Reduce it to LOG level, and expend a bit more effort on the wording: it now reads using stale statistics instead of - current ones because stats collector is not responding. + current ones because stats collector is not responding. @@ -5847,7 +7912,7 @@ Branch: REL9_0_STABLE [2e4946169] 2015-01-07 22:46:20 -0500 - Warn if macOS's setlocale() starts an unwanted extra + Warn if macOS's setlocale() starts an unwanted extra thread inside the postmaster (Noah Misch) @@ -5862,13 +7927,13 @@ Branch: REL9_0_STABLE [9880fea4f] 2014-11-25 17:39:09 +0200 - Fix processing of repeated dbname parameters - in PQconnectdbParams() (Alex Shulgin) + Fix processing of repeated dbname parameters + in PQconnectdbParams() (Alex Shulgin) Unexpected behavior ensued if the first occurrence - of dbname contained a connection string or URI to be + of dbname contained a connection string or URI to be expanded. @@ -5883,12 +7948,12 @@ Branch: REL9_0_STABLE [ac6e87537] 2014-10-22 18:42:01 -0400 - Ensure that libpq reports a suitable error message on + Ensure that libpq reports a suitable error message on unexpected socket EOF (Marko Tiikkaja, Tom Lane) - Depending on kernel behavior, libpq might return an + Depending on kernel behavior, libpq might return an empty error string rather than something useful when the server unexpectedly closed the socket. @@ -5904,14 +7969,14 @@ Branch: REL9_0_STABLE [49ef4eba2] 2014-10-29 14:35:39 +0200 - Clear any old error message during PQreset() + Clear any old error message during PQreset() (Heikki Linnakangas) - If PQreset() is called repeatedly, and the connection + If PQreset() is called repeatedly, and the connection cannot be re-established, error messages from the failed connection - attempts kept accumulating in the PGconn's error + attempts kept accumulating in the PGconn's error string. @@ -5927,7 +7992,7 @@ Branch: REL9_0_STABLE [1f3517039] 2014-11-25 14:10:54 +0200 Properly handle out-of-memory conditions while parsing connection - options in libpq (Alex Shulgin, Heikki Linnakangas) + options in libpq (Alex Shulgin, Heikki Linnakangas) @@ -5941,8 +8006,8 @@ Branch: REL9_0_STABLE [d9a1e9de5] 2014-10-06 21:23:50 -0400 - Fix array overrun in ecpg's version - of ParseDateTime() (Michael Paquier) + Fix array overrun in ecpg's version + of ParseDateTime() (Michael Paquier) @@ -5956,7 +8021,7 @@ Branch: REL9_0_STABLE [d67be559e] 2014-12-05 14:30:55 +0200 - In initdb, give a clearer error message if a password + In initdb, give a clearer error message if a password file is specified but is empty (Mats Erik Andersson) @@ -5971,12 +8036,12 @@ Branch: REL9_0_STABLE [44c518328] 2014-09-08 16:10:05 -0400 - Fix psql's \s command to work nicely with + Fix psql's \s command to work nicely with libedit, and add pager support (Stepan Rutz, Tom Lane) - When using libedit rather than readline, \s printed the + When using libedit rather than readline, \s printed the command history in a fairly unreadable encoded format, and on recent libedit versions might fail altogether. Fix that by printing the history ourselves rather than having the library do it. A pleasant @@ -5986,7 +8051,7 @@ Branch: REL9_0_STABLE [44c518328] 2014-09-08 16:10:05 -0400 This patch also fixes a bug that caused newline encoding to be applied inconsistently when saving the command history with libedit. - Multiline history entries written by older psql + Multiline history entries written by older psql versions will be read cleanly with this patch, but perhaps not vice versa, depending on the exact libedit versions involved. @@ -6004,17 +8069,17 @@ Branch: REL9_0_STABLE [2600e4436] 2014-12-31 12:17:12 -0500 - Improve consistency of parsing of psql's special + Improve consistency of parsing of psql's special variables (Tom Lane) - Allow variant spellings of on and off (such - as 1/0) for ECHO_HIDDEN - and ON_ERROR_ROLLBACK. Report a warning for unrecognized - values for COMP_KEYWORD_CASE, ECHO, - ECHO_HIDDEN, HISTCONTROL, - ON_ERROR_ROLLBACK, and VERBOSITY. Recognize + Allow variant spellings of on and off (such + as 1/0) for ECHO_HIDDEN + and ON_ERROR_ROLLBACK. Report a warning for unrecognized + values for COMP_KEYWORD_CASE, ECHO, + ECHO_HIDDEN, HISTCONTROL, + ON_ERROR_ROLLBACK, and VERBOSITY. Recognize all values for all these variables case-insensitively; previously there was a mishmash of case-sensitive and case-insensitive behaviors. @@ -6027,8 +8092,8 @@ Branch: REL9_3_STABLE [4b1953079] 2014-11-28 02:44:40 +0900 - Make psql's \watch command display - nulls as specified by \pset null (Fujii Masao) + Make psql's \watch command display + nulls as specified by \pset null (Fujii Masao) @@ -6042,9 +8107,9 @@ Branch: REL9_0_STABLE [1f89fc218] 2014-09-12 11:24:39 -0400 - Fix psql's expanded-mode display to work - consistently when using border = 3 - and linestyle = ascii or unicode + Fix psql's expanded-mode display to work + consistently when using border = 3 + and linestyle = ascii or unicode (Stephen Frost) @@ -6058,7 +8123,7 @@ Branch: REL9_3_STABLE [bb1e2426b] 2015-01-05 19:27:09 -0500 - Fix pg_dump to handle comments on event triggers + Fix pg_dump to handle comments on event triggers without failing (Tom Lane) @@ -6072,8 +8137,8 @@ Branch: REL9_3_STABLE [cc609c46f] 2015-01-30 09:01:36 -0600 - Allow parallel pg_dump to - use (Kevin Grittner) @@ -6086,7 +8151,7 @@ Branch: REL9_1_STABLE [40c333c39] 2014-07-25 19:48:54 -0400 - Improve performance of pg_dump when the database + Improve performance of pg_dump when the database contains many instances of multiple dependency paths between the same two objects (Tom Lane) @@ -6100,7 +8165,7 @@ Branch: REL9_2_STABLE [3c5ce5102] 2014-11-13 18:19:35 -0500 - Fix pg_dumpall to restore its ability to dump from + Fix pg_dumpall to restore its ability to dump from pre-8.1 servers (Gilles Darold) @@ -6130,7 +8195,7 @@ Branch: REL9_0_STABLE [31021e7ba] 2014-10-17 12:49:15 -0400 - Fix core dump in pg_dump --binary-upgrade on zero-column + Fix core dump in pg_dump --binary-upgrade on zero-column composite type (Rushabh Lathia) @@ -6143,7 +8208,7 @@ Branch: REL9_3_STABLE [26a4e0ed7] 2014-11-15 01:21:11 +0100 Fix failure to fsync tables in nondefault tablespaces - during pg_upgrade (Abhijit Menon-Sen, Andres Freund) + during pg_upgrade (Abhijit Menon-Sen, Andres Freund) @@ -6159,7 +8224,7 @@ Branch: REL9_3_STABLE [fca9f349b] 2014-08-07 14:56:13 -0400 - In pg_upgrade, cope with cases where the new cluster + In pg_upgrade, cope with cases where the new cluster creates a TOAST table for a table that didn't previously have one (Bruce Momjian) @@ -6176,8 +8241,8 @@ Branch: REL9_3_STABLE [24ae44914] 2014-08-04 11:45:45 -0400 - In pg_upgrade, don't try to - set autovacuum_multixact_freeze_max_age for the old cluster + In pg_upgrade, don't try to + set autovacuum_multixact_freeze_max_age for the old cluster (Bruce Momjian) @@ -6194,12 +8259,12 @@ Branch: REL9_3_STABLE [5724f491d] 2014-09-11 18:39:46 -0400 - In pg_upgrade, preserve the transaction ID epoch + In pg_upgrade, preserve the transaction ID epoch (Bruce Momjian) - This oversight did not bother PostgreSQL proper, + This oversight did not bother PostgreSQL proper, but could confuse some external replication tools. @@ -6215,7 +8280,7 @@ Branch: REL9_1_STABLE [2a0bfa4d6] 2015-01-03 20:54:13 +0100 - Prevent WAL files created by pg_basebackup -x/-X from + Prevent WAL files created by pg_basebackup -x/-X from being archived again when the standby is promoted (Andres Freund) @@ -6227,7 +8292,7 @@ Branch: REL9_3_STABLE [9747a9898] 2014-08-02 15:19:45 +0900 - Fix memory leak in pg_receivexlog (Fujii Masao) + Fix memory leak in pg_receivexlog (Fujii Masao) @@ -6238,7 +8303,7 @@ Branch: REL9_3_STABLE [39217ce41] 2014-08-02 14:59:10 +0900 - Fix unintended suppression of pg_receivexlog verbose + Fix unintended suppression of pg_receivexlog verbose messages (Fujii Masao) @@ -6251,8 +8316,8 @@ Branch: REL9_2_STABLE [5ff8c2d7d] 2014-09-19 13:19:05 -0400 - Fix failure of contrib/auto_explain to print per-node - timing information when doing EXPLAIN ANALYZE (Tom Lane) + Fix failure of contrib/auto_explain to print per-node + timing information when doing EXPLAIN ANALYZE (Tom Lane) @@ -6265,7 +8330,7 @@ Branch: REL9_1_STABLE [9807c8220] 2014-08-28 18:21:20 -0400 - Fix upgrade-from-unpackaged script for contrib/citext + Fix upgrade-from-unpackaged script for contrib/citext (Tom Lane) @@ -6278,7 +8343,7 @@ Branch: REL9_3_STABLE [f44290b7b] 2014-11-04 16:54:59 -0500 Avoid integer overflow and buffer overrun - in contrib/hstore's hstore_to_json() + in contrib/hstore's hstore_to_json() (Heikki Linnakangas) @@ -6290,7 +8355,7 @@ Branch: REL9_3_STABLE [55c880797] 2014-12-01 11:44:48 -0500 - Fix recognition of numbers in hstore_to_json_loose(), + Fix recognition of numbers in hstore_to_json_loose(), so that JSON numbers and strings are correctly distinguished (Andrew Dunstan) @@ -6307,7 +8372,7 @@ Branch: REL9_0_STABLE [9dc2a3fd0] 2014-07-22 11:46:04 -0400 Fix block number checking - in contrib/pageinspect's get_raw_page() + in contrib/pageinspect's get_raw_page() (Tom Lane) @@ -6327,7 +8392,7 @@ Branch: REL9_0_STABLE [ef5a3b957] 2014-11-11 17:22:58 -0500 - Fix contrib/pgcrypto's pgp_sym_decrypt() + Fix contrib/pgcrypto's pgp_sym_decrypt() to not fail on messages whose length is 6 less than a power of 2 (Marko Tiikkaja) @@ -6342,7 +8407,7 @@ Branch: REL9_1_STABLE [a855c90a7] 2014-11-19 12:26:06 -0500 - Fix file descriptor leak in contrib/pg_test_fsync + Fix file descriptor leak in contrib/pg_test_fsync (Jeff Janes) @@ -6364,12 +8429,12 @@ Branch: REL9_0_STABLE [dc9a506e6] 2015-01-29 20:18:46 -0500 Handle unexpected query results, especially NULLs, safely in - contrib/tablefunc's connectby() + contrib/tablefunc's connectby() (Michael Paquier) - connectby() previously crashed if it encountered a NULL + connectby() previously crashed if it encountered a NULL key value. It now prints that row but doesn't recurse further. @@ -6384,12 +8449,12 @@ Branch: REL9_0_STABLE [6a694bbab] 2014-11-27 11:13:03 -0500 - Avoid a possible crash in contrib/xml2's - xslt_process() (Mark Simonetti) + Avoid a possible crash in contrib/xml2's + xslt_process() (Mark Simonetti) - libxslt seems to have an undocumented dependency on + libxslt seems to have an undocumented dependency on the order in which resources are freed; reorder our calls to avoid a crash. @@ -6404,7 +8469,7 @@ Branch: REL9_1_STABLE [7225abf00] 2014-11-05 11:34:25 -0500 - Mark some contrib I/O functions with correct volatility + Mark some contrib I/O functions with correct volatility properties (Tom Lane) @@ -6525,10 +8590,10 @@ Branch: REL9_0_STABLE [4c6d0abde] 2014-07-22 11:02:25 -0400 With OpenLDAP versions 2.4.24 through 2.4.31, - inclusive, PostgreSQL backends can crash at exit. - Raise a warning during configure based on the + inclusive, PostgreSQL backends can crash at exit. + Raise a warning during configure based on the compile-time OpenLDAP version number, and test the crashing scenario - in the contrib/dblink regression test. + in the contrib/dblink regression test. @@ -6542,7 +8607,7 @@ Branch: REL9_0_STABLE [e6841c4d6] 2014-08-18 23:01:23 -0400 - In non-MSVC Windows builds, ensure libpq.dll is installed + In non-MSVC Windows builds, ensure libpq.dll is installed with execute permissions (Noah Misch) @@ -6559,13 +8624,13 @@ Branch: REL9_0_STABLE [338ff75fc] 2015-01-19 23:44:33 -0500 - Make pg_regress remove any temporary installation it + Make pg_regress remove any temporary installation it created upon successful exit (Tom Lane) This results in a very substantial reduction in disk space usage - during make check-world, since that sequence involves + during make check-world, since that sequence involves creation of numerous temporary installations. @@ -6585,15 +8650,15 @@ Branch: REL9_0_STABLE [870a980aa] 2014-10-16 15:22:26 -0400 - Previously, PostgreSQL assumed that the UTC offset - associated with a time zone abbreviation (such as EST) + Previously, PostgreSQL assumed that the UTC offset + associated with a time zone abbreviation (such as EST) never changes in the usage of any particular locale. However this assumption fails in the real world, so introduce the ability for a zone abbreviation to represent a UTC offset that sometimes changes. Update the zone abbreviation definition files to make use of this feature in timezone locales that have changed the UTC offset of their abbreviations since 1970 (according to the IANA timezone database). - In such timezones, PostgreSQL will now associate the + In such timezones, PostgreSQL will now associate the correct UTC offset with the abbreviation depending on the given date. @@ -6618,9 +8683,9 @@ Branch: REL9_0_STABLE [8b70023af] 2014-12-24 16:35:54 -0500 Add CST (China Standard Time) to our lists. - Remove references to ADT as Arabia Daylight Time, an + Remove references to ADT as Arabia Daylight Time, an abbreviation that's been out of use since 2007; therefore, claiming - there is a conflict with Atlantic Daylight Time doesn't seem + there is a conflict with Atlantic Daylight Time doesn't seem especially helpful. Fix entirely incorrect GMT offsets for CKT (Cook Islands), FJT, and FJST (Fiji); we didn't even have them on the proper side of the date line. @@ -6647,21 +8712,21 @@ Branch: REL9_0_STABLE [b6391f587] 2014-10-04 14:18:43 -0400 - Update time zone data files to tzdata release 2015a. + Update time zone data files to tzdata release 2015a. The IANA timezone database has adopted abbreviations of the form - AxST/AxDT + AxST/AxDT for all Australian time zones, reflecting what they believe to be current majority practice Down Under. These names do not conflict with usage elsewhere (other than ACST for Acre Summer Time, which has been in disuse since 1994). Accordingly, adopt these names into - our Default timezone abbreviation set. - The Australia abbreviation set now contains only CST, EAST, + our Default timezone abbreviation set. + The Australia abbreviation set now contains only CST, EAST, EST, SAST, SAT, and WST, all of which are thought to be mostly historical usage. Note that SAST has also been changed to be South - Africa Standard Time in the Default abbreviation set. + Africa Standard Time in the Default abbreviation set. @@ -6690,7 +8755,7 @@ Branch: REL9_0_STABLE [b6391f587] 2014-10-04 14:18:43 -0400 This release contains a variety of fixes from 9.3.4. For information about new features in the 9.3 major release, see - . + . @@ -6702,7 +8767,7 @@ Branch: REL9_0_STABLE [b6391f587] 2014-10-04 14:18:43 -0400 However, this release corrects a logic error - in pg_upgrade, as well as an index corruption problem in + in pg_upgrade, as well as an index corruption problem in some GiST indexes. See the first two changelog entries below to find out whether your installation has been affected and what steps you should take if so. @@ -6710,7 +8775,7 @@ Branch: REL9_0_STABLE [b6391f587] 2014-10-04 14:18:43 -0400 Also, if you are upgrading from a version earlier than 9.3.4, - see . + see . @@ -6729,15 +8794,15 @@ Branch: REL9_3_STABLE [cc5841809] 2014-06-24 16:11:06 -0400 - In pg_upgrade, remove pg_multixact files - left behind by initdb (Bruce Momjian) + In pg_upgrade, remove pg_multixact files + left behind by initdb (Bruce Momjian) - If you used a pre-9.3.5 version of pg_upgrade to + If you used a pre-9.3.5 version of pg_upgrade to upgrade a database cluster to 9.3, it might have left behind a file - $PGDATA/pg_multixact/offsets/0000 that should not be - there and will eventually cause problems in VACUUM. + $PGDATA/pg_multixact/offsets/0000 that should not be + there and will eventually cause problems in VACUUM. However, in common cases this file is actually valid and must not be removed. To determine whether your installation has this problem, run this @@ -6750,9 +8815,9 @@ SELECT EXISTS (SELECT * FROM list WHERE file = '0000') AND EXISTS (SELECT * FROM list WHERE file != '0000') AS file_0000_removal_required; - If this query returns t, manually remove the file - $PGDATA/pg_multixact/offsets/0000. - Do nothing if the query returns f. + If this query returns t, manually remove the file + $PGDATA/pg_multixact/offsets/0000. + Do nothing if the query returns f. @@ -6768,15 +8833,15 @@ Branch: REL8_4_STABLE [e31d77c96] 2014-05-13 15:27:43 +0300 - Correctly initialize padding bytes in contrib/btree_gist - indexes on bit columns (Heikki Linnakangas) + Correctly initialize padding bytes in contrib/btree_gist + indexes on bit columns (Heikki Linnakangas) This error could result in incorrect query results due to values that should compare equal not being seen as equal. - Users with GiST indexes on bit or bit varying - columns should REINDEX those indexes after installing this + Users with GiST indexes on bit or bit varying + columns should REINDEX those indexes after installing this update. @@ -6861,7 +8926,7 @@ Branch: REL9_3_STABLE [167a2535f] 2014-06-09 15:17:23 -0400 - Fix wraparound handling for pg_multixact/members + Fix wraparound handling for pg_multixact/members (Álvaro Herrera) @@ -6875,12 +8940,12 @@ Branch: REL9_3_STABLE [9a28c3752] 2014-06-27 14:43:52 -0400 - Truncate pg_multixact during checkpoints, not - during VACUUM (Álvaro Herrera) + Truncate pg_multixact during checkpoints, not + during VACUUM (Álvaro Herrera) - This change ensures that pg_multixact segments can't be + This change ensures that pg_multixact segments can't be removed if they'd still be needed during WAL replay after a crash. @@ -6911,7 +8976,7 @@ Branch: REL8_4_STABLE [3ada1fab8] 2014-05-05 14:43:55 -0400 Fix possibly-incorrect cache invalidation during nested calls - to ReceiveSharedInvalidMessages (Andres Freund) + to ReceiveSharedInvalidMessages (Andres Freund) @@ -6937,8 +9002,8 @@ Branch: REL9_1_STABLE [555d0b200] 2014-06-26 10:42:08 -0700 - Fix could not find pathkey item to sort planner failures - with UNION ALL over subqueries reading from tables with + Fix could not find pathkey item to sort planner failures + with UNION ALL over subqueries reading from tables with inheritance children (Tom Lane) @@ -6977,7 +9042,7 @@ Branch: REL9_2_STABLE [0901dbab3] 2014-04-29 13:12:33 -0400 Improve planner to drop constant-NULL inputs - of AND/OR when possible (Tom Lane) + of AND/OR when possible (Tom Lane) @@ -6995,8 +9060,8 @@ Branch: REL9_3_STABLE [d359f71ac] 2014-04-03 22:02:27 -0400 - Ensure that the planner sees equivalent VARIADIC and - non-VARIADIC function calls as equivalent (Tom Lane) + Ensure that the planner sees equivalent VARIADIC and + non-VARIADIC function calls as equivalent (Tom Lane) @@ -7017,13 +9082,13 @@ Branch: REL9_3_STABLE [a1fc36495] 2014-06-24 21:22:47 -0700 - Fix handling of nested JSON objects - in json_populate_recordset() and friends + Fix handling of nested JSON objects + in json_populate_recordset() and friends (Michael Paquier, Tom Lane) - A nested JSON object could result in previous fields of the + A nested JSON object could result in previous fields of the parent object not being shown in the output. @@ -7037,13 +9102,13 @@ Branch: REL9_2_STABLE [25c933c5c] 2014-05-09 12:55:06 -0400 - Fix identification of input type category in to_json() + Fix identification of input type category in to_json() and friends (Tom Lane) - This is known to have led to inadequate quoting of money - fields in the JSON result, and there may have been wrong + This is known to have led to inadequate quoting of money + fields in the JSON result, and there may have been wrong results for other data types as well. @@ -7068,7 +9133,7 @@ Branch: REL8_4_STABLE [70debcf09] 2014-05-01 15:19:23 -0400 This corrects cases where TOAST pointers could be copied into other tables without being dereferenced. If the original data is later deleted, it would lead to errors like missing chunk number 0 - for toast value ... when the now-dangling pointer is used. + for toast value ... when the now-dangling pointer is used. @@ -7085,7 +9150,7 @@ Branch: REL8_4_STABLE [a81fbcfb3] 2014-07-11 19:12:56 -0400 - Fix record type has not been registered failures with + Fix record type has not been registered failures with whole-row references to the output of Append plan nodes (Tom Lane) @@ -7121,7 +9186,7 @@ Branch: REL8_4_STABLE [d297c91d4] 2014-06-19 22:14:00 -0400 Fix query-lifespan memory leak while evaluating the arguments for a - function in FROM (Tom Lane) + function in FROM (Tom Lane) @@ -7156,7 +9221,7 @@ Branch: REL8_4_STABLE [f3f40434b] 2014-06-10 22:49:08 -0400 - Fix data encoding error in hungarian.stop (Tom Lane) + Fix data encoding error in hungarian.stop (Tom Lane) @@ -7172,7 +9237,7 @@ Branch: REL9_1_STABLE [dd1a5b09b] 2014-06-24 13:30:41 +0300 Prevent foreign tables from being created with OIDS - when is true + when is true (Etsuro Fujita) @@ -7196,7 +9261,7 @@ Branch: REL8_4_STABLE [80d45ae4e] 2014-06-04 23:27:38 +0200 This could cause problems (at least spurious warnings, and at worst an - infinite loop) if CREATE INDEX or CLUSTER were + infinite loop) if CREATE INDEX or CLUSTER were done later in the same transaction. @@ -7213,12 +9278,12 @@ Branch: REL8_4_STABLE [82fbd88a7] 2014-04-24 13:30:14 -0400 - Clear pg_stat_activity.xact_start - during PREPARE TRANSACTION (Andres Freund) + Clear pg_stat_activity.xact_start + during PREPARE TRANSACTION (Andres Freund) - After the PREPARE, the originating session is no longer in + After the PREPARE, the originating session is no longer in a transaction, so it should not continue to display a transaction start time. @@ -7237,7 +9302,7 @@ Branch: REL8_4_STABLE [4b767789d] 2014-07-15 13:24:07 -0400 - Fix REASSIGN OWNED to not fail for text search objects + Fix REASSIGN OWNED to not fail for text search objects (Álvaro Herrera) @@ -7251,8 +9316,8 @@ Branch: REL9_3_STABLE [e86cfc4bb] 2014-06-27 14:43:45 -0400 - Prevent pg_class.relminmxid values from - going backwards during VACUUM FULL (Álvaro Herrera) + Prevent pg_class.relminmxid values from + going backwards during VACUUM FULL (Álvaro Herrera) @@ -7290,7 +9355,7 @@ Branch: REL9_3_STABLE [e31193d49] 2014-05-01 20:22:39 -0400 Fix dumping of rules/views when subsequent addition of a column has - resulted in multiple input columns matching a USING + resulted in multiple input columns matching a USING specification (Tom Lane) @@ -7305,7 +9370,7 @@ Branch: REL9_3_STABLE [b978ab5f6] 2014-07-19 14:29:05 -0400 Repair view printing for some cases involving functions - in FROM that return a composite type containing dropped + in FROM that return a composite type containing dropped columns (Tom Lane) @@ -7327,7 +9392,7 @@ Branch: REL8_4_STABLE [969735cf1] 2014-04-05 18:16:24 -0400 This ensures that the postmaster will properly clean up after itself - if, for example, it receives SIGINT while still + if, for example, it receives SIGINT while still starting up. @@ -7342,7 +9407,7 @@ Branch: REL9_1_STABLE [b7a424371] 2014-04-02 17:11:34 -0400 - Fix client host name lookup when processing pg_hba.conf + Fix client host name lookup when processing pg_hba.conf entries that specify host names instead of IP addresses (Tom Lane) @@ -7363,14 +9428,14 @@ Branch: REL9_2_STABLE [6d25eb314] 2014-04-04 22:03:42 -0400 - Allow the root user to use postgres -C variable and - postgres --describe-config (MauMau) + Allow the root user to use postgres -C variable and + postgres --describe-config (MauMau) The prohibition on starting the server as root does not need to extend to these operations, and relaxing it prevents failure - of pg_ctl in some scenarios. + of pg_ctl in some scenarios. @@ -7388,7 +9453,7 @@ Branch: REL8_4_STABLE [95cefd30e] 2014-06-14 09:41:18 -0400 Secure Unix-domain sockets of temporary postmasters started during - make check (Noah Misch) + make check (Noah Misch) @@ -7397,16 +9462,16 @@ Branch: REL8_4_STABLE [95cefd30e] 2014-06-14 09:41:18 -0400 the operating-system user running the test, as we previously noted in CVE-2014-0067. This change defends against that risk by placing the server's socket in a temporary, mode 0700 subdirectory - of /tmp. The hazard remains however on platforms where + of /tmp. The hazard remains however on platforms where Unix sockets are not supported, notably Windows, because then the temporary postmaster must accept local TCP connections. A useful side effect of this change is to simplify - make check testing in builds that - override DEFAULT_PGSOCKET_DIR. Popular non-default values - like /var/run/postgresql are often not writable by the + make check testing in builds that + override DEFAULT_PGSOCKET_DIR. Popular non-default values + like /var/run/postgresql are often not writable by the build user, requiring workarounds that will no longer be necessary. @@ -7454,7 +9519,7 @@ Branch: REL8_4_STABLE [30e434bdf] 2014-04-05 12:41:40 -0400 On Windows, allow new sessions to absorb values of PGC_BACKEND - parameters (such as ) from the + parameters (such as ) from the configuration file (Amit Kapila) @@ -7480,9 +9545,9 @@ Branch: REL8_4_STABLE [e3f273ff6] 2014-04-30 10:39:03 +0300 - This oversight could cause initdb - and pg_upgrade to fail on Windows, if the installation - path contained both spaces and @ signs. + This oversight could cause initdb + and pg_upgrade to fail on Windows, if the installation + path contained both spaces and @ signs. @@ -7498,7 +9563,7 @@ Branch: REL8_4_STABLE [ae41bb4be] 2014-05-30 18:18:32 -0400 - Fix linking of libpython on macOS (Tom Lane) + Fix linking of libpython on macOS (Tom Lane) @@ -7519,17 +9584,17 @@ Branch: REL8_4_STABLE [664ac3de7] 2014-05-07 21:38:50 -0400 - Avoid buffer bloat in libpq when the server + Avoid buffer bloat in libpq when the server consistently sends data faster than the client can absorb it (Shin-ichi Morita, Tom Lane) - libpq could be coerced into enlarging its input buffer + libpq could be coerced into enlarging its input buffer until it runs out of memory (which would be reported misleadingly - as lost synchronization with server). Under ordinary + as lost synchronization with server). Under ordinary circumstances it's quite far-fetched that data could be continuously - transmitted more quickly than the recv() loop can + transmitted more quickly than the recv() loop can absorb it, but this has been observed when the client is artificially slowed by scheduler constraints. @@ -7547,7 +9612,7 @@ Branch: REL8_4_STABLE [b4ae2e37d] 2014-04-16 18:59:48 +0200 - Ensure that LDAP lookup attempts in libpq time out as + Ensure that LDAP lookup attempts in libpq time out as intended (Laurenz Albe) @@ -7570,8 +9635,8 @@ Branch: REL9_0_STABLE [0c2eb989e] 2014-04-09 12:12:32 +0200 - Fix ecpg to do the right thing when an array - of char * is the target for a FETCH statement returning more + Fix ecpg to do the right thing when an array + of char * is the target for a FETCH statement returning more than one row, as well as some other array-handling fixes (Ashutosh Bapat) @@ -7585,13 +9650,13 @@ Branch: REL9_3_STABLE [3080bbaa9] 2014-03-29 17:34:03 -0400 - Fix pg_dump to cope with a materialized view that + Fix pg_dump to cope with a materialized view that depends on a table's primary key (Tom Lane) This occurs if the view's query relies on functional dependency to - abbreviate a GROUP BY list. pg_dump got + abbreviate a GROUP BY list. pg_dump got sufficiently confused that it dumped the materialized view as a regular view. @@ -7605,7 +9670,7 @@ Branch: REL9_3_STABLE [63817f86b] 2014-03-18 10:38:38 -0400 - Fix parsing of pg_dumpall's switch (Tom Lane) @@ -7623,13 +9688,13 @@ Branch: REL8_4_STABLE [6adddac8a] 2014-06-12 20:14:55 -0400 - Fix pg_restore's processing of old-style large object + Fix pg_restore's processing of old-style large object comments (Tom Lane) A direct-to-database restore from an archive file generated by a - pre-9.0 version of pg_dump would usually fail if the + pre-9.0 version of pg_dump would usually fail if the archive contained more than a few comments for large objects. @@ -7644,12 +9709,12 @@ Branch: REL9_2_STABLE [759c9fb63] 2014-07-07 13:24:08 -0400 - Fix pg_upgrade for cases where the new server creates + Fix pg_upgrade for cases where the new server creates a TOAST table but the old version did not (Bruce Momjian) - This rare situation would manifest as relation OID mismatch + This rare situation would manifest as relation OID mismatch errors. @@ -7668,9 +9733,9 @@ Branch: REL9_3_STABLE [e7984cca0] 2014-07-21 11:42:05 -0400 - In pg_upgrade, - preserve pg_database.datminmxid - and pg_class.relminmxid values from the + In pg_upgrade, + preserve pg_database.datminmxid + and pg_class.relminmxid values from the old cluster, or insert reasonable values when upgrading from pre-9.3; also defend against unreasonable values in the core server (Bruce Momjian, Álvaro Herrera, Tom Lane) @@ -7693,13 +9758,13 @@ Branch: REL9_2_STABLE [31f579f09] 2014-05-20 12:20:57 -0400 - Prevent contrib/auto_explain from changing the output of - a user's EXPLAIN (Tom Lane) + Prevent contrib/auto_explain from changing the output of + a user's EXPLAIN (Tom Lane) - If auto_explain is active, it could cause - an EXPLAIN (ANALYZE, TIMING OFF) command to nonetheless + If auto_explain is active, it could cause + an EXPLAIN (ANALYZE, TIMING OFF) command to nonetheless print timing information. @@ -7714,7 +9779,7 @@ Branch: REL9_2_STABLE [3e2cfa42f] 2014-06-20 12:27:04 -0700 - Fix query-lifespan memory leak in contrib/dblink + Fix query-lifespan memory leak in contrib/dblink (MauMau, Joe Conway) @@ -7731,7 +9796,7 @@ Branch: REL8_4_STABLE [df2e62603] 2014-04-17 12:37:53 -0400 - In contrib/pgcrypto functions, ensure sensitive + In contrib/pgcrypto functions, ensure sensitive information is cleared from stack variables before returning (Marko Kreen) @@ -7748,7 +9813,7 @@ Branch: REL9_2_STABLE [f6d6b7b1e] 2014-06-30 17:00:40 -0400 Prevent use of already-freed memory in - contrib/pgstattuple's pgstat_heap() + contrib/pgstattuple's pgstat_heap() (Noah Misch) @@ -7765,13 +9830,13 @@ Branch: REL8_4_STABLE [fd785441f] 2014-05-29 13:51:18 -0400 - In contrib/uuid-ossp, cache the state of the OSSP UUID + In contrib/uuid-ossp, cache the state of the OSSP UUID library across calls (Tom Lane) This improves the efficiency of UUID generation and reduces the amount - of entropy drawn from /dev/urandom, on platforms that + of entropy drawn from /dev/urandom, on platforms that have that. @@ -7789,7 +9854,7 @@ Branch: REL8_4_STABLE [c51da696b] 2014-07-19 15:01:45 -0400 - Update time zone data files to tzdata release 2014e + Update time zone data files to tzdata release 2014e for DST law changes in Crimea, Egypt, and Morocco. @@ -7810,7 +9875,7 @@ Branch: REL8_4_STABLE [c51da696b] 2014-07-19 15:01:45 -0400 This release contains a variety of fixes from 9.3.3. For information about new features in the 9.3 major release, see - . + . @@ -7829,7 +9894,7 @@ Branch: REL8_4_STABLE [c51da696b] 2014-07-19 15:01:45 -0400 Also, if you are upgrading from a version earlier than 9.3.3, - see . + see . @@ -7900,7 +9965,7 @@ Branch: REL9_0_STABLE [7aea1050e] 2014-03-13 12:03:07 -0400 Avoid race condition in checking transaction commit status during - receipt of a NOTIFY message (Marko Tiikkaja) + receipt of a NOTIFY message (Marko Tiikkaja) @@ -7918,8 +9983,8 @@ Branch: REL9_3_STABLE [3973034e6] 2014-03-06 11:37:04 -0500 - Allow materialized views to be referenced in UPDATE - and DELETE commands (Michael Paquier) + Allow materialized views to be referenced in UPDATE + and DELETE commands (Michael Paquier) @@ -7962,7 +10027,7 @@ Branch: REL8_4_STABLE [dd378dd1e] 2014-02-18 12:44:36 -0500 - Remove incorrect code that tried to allow OVERLAPS with + Remove incorrect code that tried to allow OVERLAPS with single-element row arguments (Joshua Yanovski) @@ -7985,17 +10050,17 @@ Branch: REL8_4_STABLE [f043bddfe] 2014-03-06 19:31:22 -0500 - Avoid getting more than AccessShareLock when de-parsing a + Avoid getting more than AccessShareLock when de-parsing a rule or view (Dean Rasheed) - This oversight resulted in pg_dump unexpectedly - acquiring RowExclusiveLock locks on tables mentioned as - the targets of INSERT/UPDATE/DELETE + This oversight resulted in pg_dump unexpectedly + acquiring RowExclusiveLock locks on tables mentioned as + the targets of INSERT/UPDATE/DELETE commands in rules. While usually harmless, that could interfere with concurrent transactions that tried to acquire, for example, - ShareLock on those tables. + ShareLock on those tables. @@ -8030,9 +10095,9 @@ Branch: REL9_3_STABLE [e8655a77f] 2014-02-21 17:10:49 -0500 Use non-default selectivity estimates for - value IN (list) and - value operator ANY - (array) + value IN (list) and + value operator ANY + (array) expressions when the righthand side is a stable expression (Tom Lane) @@ -8046,16 +10111,16 @@ Branch: REL9_3_STABLE [13ea43ab8] 2014-03-05 13:03:29 -0300 Remove the correct per-database statistics file during DROP - DATABASE (Tomas Vondra) + DATABASE (Tomas Vondra) This fix prevents a permanent leak of statistics file space. - Users who have done many DROP DATABASE commands since - upgrading to PostgreSQL 9.3 may wish to check their + Users who have done many DROP DATABASE commands since + upgrading to PostgreSQL 9.3 may wish to check their statistics directory and delete statistics files that do not correspond to any existing database. Please note - that db_0.stat should not be removed. + that db_0.stat should not be removed. @@ -8067,15 +10132,15 @@ Branch: REL9_3_STABLE [dcd1131c8] 2014-03-06 21:40:50 +0200 - Fix walsender ping logic to avoid inappropriate + Fix walsender ping logic to avoid inappropriate disconnects under continuous load (Andres Freund, Heikki Linnakangas) - walsender failed to send ping messages to the client + walsender failed to send ping messages to the client if it was constantly busy sending WAL data; but it expected to see ping responses despite that, and would therefore disconnect - once elapsed. + once elapsed. @@ -8089,8 +10154,8 @@ Branch: REL9_1_STABLE [65e8dbb18] 2014-03-17 20:42:35 +0900 - Fix walsender's failure to shut down cleanly when client - is pg_receivexlog (Fujii Masao) + Fix walsender's failure to shut down cleanly when client + is pg_receivexlog (Fujii Masao) @@ -8131,7 +10196,7 @@ Branch: REL9_3_STABLE [5a7e75849] 2014-02-20 10:46:54 +0200 - Add read-only parameter to + Add read-only parameter to display whether page checksums are enabled (Heikki Linnakangas) @@ -8153,13 +10218,13 @@ Branch: REL8_4_STABLE [172c53e92] 2014-03-13 20:59:57 -0400 - Prevent interrupts while reporting non-ERROR messages + Prevent interrupts while reporting non-ERROR messages (Tom Lane) This guards against rare server-process freezeups due to recursive - entry to syslog(), and perhaps other related problems. + entry to syslog(), and perhaps other related problems. @@ -8187,13 +10252,13 @@ Branch: REL9_2_STABLE [b315b767f] 2014-03-10 15:47:13 -0400 - Fix tracking of psql script line numbers - during \copy from out-of-line data + Fix tracking of psql script line numbers + during \copy from out-of-line data (Kumar Rajeev Rastogi, Amit Khandekar) - \copy ... from incremented the script file line number + \copy ... from incremented the script file line number for each data line, even if the data was not coming from the script file. This mistake resulted in wrong line numbers being reported for any errors occurring later in the same script file. @@ -8208,12 +10273,12 @@ Branch: REL9_3_STABLE [73f0483fd] 2014-03-07 16:36:50 -0500 - Fix contrib/postgres_fdw to handle multiple join + Fix contrib/postgres_fdw to handle multiple join conditions properly (Tom Lane) - This oversight could result in sending WHERE clauses to + This oversight could result in sending WHERE clauses to the remote server for execution even though the clauses are not known to have the same semantics on the remote server (for example, clauses that use non-built-in operators). The query might succeed anyway, @@ -8233,7 +10298,7 @@ Branch: REL9_0_STABLE [665515539] 2014-03-16 11:47:37 +0100 - Prevent intermittent could not reserve shared memory region + Prevent intermittent could not reserve shared memory region failures on recent Windows versions (MauMau) @@ -8250,7 +10315,7 @@ Branch: REL8_4_STABLE [6e6c2c2e1] 2014-03-15 13:36:57 -0400 - Update time zone data files to tzdata release 2014a + Update time zone data files to tzdata release 2014a for DST law changes in Fiji and Turkey, plus historical changes in Israel and Ukraine. @@ -8272,7 +10337,7 @@ Branch: REL8_4_STABLE [6e6c2c2e1] 2014-03-15 13:36:57 -0400 This release contains a variety of fixes from 9.3.2. For information about new features in the 9.3 major release, see - . + . @@ -8301,7 +10366,7 @@ Branch: REL8_4_STABLE [6e6c2c2e1] 2014-03-15 13:36:57 -0400 Also, if you are upgrading from a version earlier than 9.3.2, - see . + see . @@ -8323,19 +10388,19 @@ Branch: REL8_4_STABLE [ff35425c8] 2014-02-17 09:33:38 -0500 - Shore up GRANT ... WITH ADMIN OPTION restrictions + Shore up GRANT ... WITH ADMIN OPTION restrictions (Noah Misch) - Granting a role without ADMIN OPTION is supposed to + Granting a role without ADMIN OPTION is supposed to prevent the grantee from adding or removing members from the granted role, but this restriction was easily bypassed by doing SET - ROLE first. The security impact is mostly that a role member can + ROLE first. The security impact is mostly that a role member can revoke the access of others, contrary to the wishes of his grantor. Unapproved role member additions are a lesser concern, since an uncooperative role member could provide most of his rights to others - anyway by creating views or SECURITY DEFINER functions. + anyway by creating views or SECURITY DEFINER functions. (CVE-2014-0060) @@ -8358,7 +10423,7 @@ Branch: REL8_4_STABLE [823b9dc25] 2014-02-17 09:33:38 -0500 The primary role of PL validator functions is to be called implicitly - during CREATE FUNCTION, but they are also normal SQL + during CREATE FUNCTION, but they are also normal SQL functions that a user can call explicitly. Calling a validator on a function actually written in some other language was not checked for and could be exploited for privilege-escalation purposes. @@ -8388,7 +10453,7 @@ Branch: REL8_4_STABLE [e46476133] 2014-02-17 09:33:38 -0500 If the name lookups come to different conclusions due to concurrent activity, we might perform some parts of the DDL on a different table - than other parts. At least in the case of CREATE INDEX, + than other parts. At least in the case of CREATE INDEX, this can be used to cause the permissions checks to be performed against a different table than the index creation, allowing for a privilege escalation attack. @@ -8412,12 +10477,12 @@ Branch: REL8_4_STABLE [d0ed1a6c0] 2014-02-17 09:33:39 -0500 - The MAXDATELEN constant was too small for the longest - possible value of type interval, allowing a buffer overrun - in interval_out(). Although the datetime input + The MAXDATELEN constant was too small for the longest + possible value of type interval, allowing a buffer overrun + in interval_out(). Although the datetime input functions were more careful about avoiding buffer overrun, the limit was short enough to cause them to reject some valid inputs, such as - input containing a very long timezone name. The ecpg + input containing a very long timezone name. The ecpg library contained these vulnerabilities along with some of its own. (CVE-2014-0063) @@ -8464,7 +10529,7 @@ Branch: REL8_4_STABLE [69d2bc14a] 2014-02-17 11:20:38 -0500 - Use strlcpy() and related functions to provide a clear + Use strlcpy() and related functions to provide a clear guarantee that fixed-size buffers are not overrun. Unlike the preceding items, it is unclear whether these cases really represent live issues, since in most cases there appear to be previous @@ -8486,16 +10551,16 @@ Branch: REL8_4_STABLE [69d2bc14a] 2014-02-17 11:20:38 -0500 - Avoid crashing if crypt() returns NULL (Honza Horak, + Avoid crashing if crypt() returns NULL (Honza Horak, Bruce Momjian) - There are relatively few scenarios in which crypt() - could return NULL, but contrib/chkpass would crash + There are relatively few scenarios in which crypt() + could return NULL, but contrib/chkpass would crash if it did. One practical case in which this could be an issue is - if libc is configured to refuse to execute unapproved - hashing algorithms (e.g., FIPS mode). + if libc is configured to refuse to execute unapproved + hashing algorithms (e.g., FIPS mode). (CVE-2014-0066) @@ -8512,19 +10577,19 @@ Branch: REL8_4_STABLE [f58663ab1] 2014-02-17 11:24:51 -0500 - Document risks of make check in the regression testing + Document risks of make check in the regression testing instructions (Noah Misch, Tom Lane) - Since the temporary server started by make check - uses trust authentication, another user on the same machine + Since the temporary server started by make check + uses trust authentication, another user on the same machine could connect to it as database superuser, and then potentially exploit the privileges of the operating-system user who started the tests. A future release will probably incorporate changes in the testing procedure to prevent this risk, but some public discussion is needed first. So for the moment, just warn people against using - make check when there are untrusted users on the + make check when there are untrusted users on the same machine. (CVE-2014-0067) @@ -8545,7 +10610,7 @@ Branch: REL9_3_STABLE [8e9a16ab8] 2013-12-16 11:29:51 -0300 The logic for tuple freezing was unable to handle some cases involving freezing of - multixact + multixact IDs, with the practical effect that shared row-level locks might be forgotten once old enough. @@ -8554,7 +10619,7 @@ Branch: REL9_3_STABLE [8e9a16ab8] 2013-12-16 11:29:51 -0300 Fixing this required changing the WAL record format for tuple freezing. While this is no issue for standalone servers, when using replication it means that standby servers must be upgraded - to 9.3.3 or later before their masters are. An older standby will + to 9.3.3 or later before their masters are. An older standby will be unable to interpret freeze records generated by a newer master, and will fail with a PANIC message. (In such a case, upgrading the standby should be sufficient to let it resume execution.) @@ -8580,9 +10645,9 @@ Branch: REL9_3_STABLE [fb47de2be] 2014-02-13 19:30:30 -0300 freezing parameters were used for multixact IDs too; but since the consumption rates of transaction IDs and multixact IDs can be quite different, this did not work very well. Introduce new settings - , - , and - + , + , and + to control when to freeze multixacts. @@ -8612,8 +10677,8 @@ Branch: REL9_3_STABLE [db1014bc4] 2013-12-18 13:31:27 -0300 This oversight could allow referential integrity checks to give false positives (for instance, allow deletes that should have been rejected). - Applications using the new commands SELECT FOR KEY SHARE - and SELECT FOR NO KEY UPDATE might also have suffered + Applications using the new commands SELECT FOR KEY SHARE + and SELECT FOR NO KEY UPDATE might also have suffered locking failures of this kind. @@ -8626,7 +10691,7 @@ Branch: REL9_3_STABLE [c6cd27e36] 2013-12-05 12:21:55 -0300 - Prevent forgetting valid row locks when one of several + Prevent forgetting valid row locks when one of several holders of a row lock aborts (Álvaro Herrera) @@ -8651,8 +10716,8 @@ Branch: REL9_3_STABLE [2dcc48c35] 2013-12-05 17:47:51 -0300 This mistake could result in spurious could not serialize access - due to concurrent update errors in REPEATABLE READ - and SERIALIZABLE transaction isolation modes. + due to concurrent update errors in REPEATABLE READ + and SERIALIZABLE transaction isolation modes. @@ -8665,7 +10730,7 @@ Branch: REL9_3_STABLE [03db79459] 2014-01-02 18:17:07 -0300 Handle wraparound correctly during extension or truncation - of pg_multixact/members + of pg_multixact/members (Andres Freund, Álvaro Herrera) @@ -8678,7 +10743,7 @@ Branch: REL9_3_STABLE [948a3dfbb] 2014-01-02 18:17:29 -0300 - Fix handling of 5-digit filenames in pg_multixact/members + Fix handling of 5-digit filenames in pg_multixact/members (Álvaro Herrera) @@ -8715,7 +10780,7 @@ Branch: REL9_3_STABLE [85d3b3c3a] 2013-12-19 16:39:59 -0300 This fixes a performance regression from pre-9.3 versions when doing - SELECT FOR UPDATE followed by UPDATE/DELETE. + SELECT FOR UPDATE followed by UPDATE/DELETE. @@ -8729,7 +10794,7 @@ Branch: REL9_3_STABLE [762bd379a] 2014-02-14 15:18:34 +0200 During archive recovery, prefer highest timeline number when WAL segments with the same ID are present in both the archive - and pg_xlog/ (Kyotaro Horiguchi) + and pg_xlog/ (Kyotaro Horiguchi) @@ -8758,7 +10823,7 @@ Branch: REL8_4_STABLE [9620fede9] 2014-02-12 14:52:32 -0500 The WAL update could be applied to the wrong page, potentially many pages past where it should have been. Aside from corrupting data, - this error has been observed to result in significant bloat + this error has been observed to result in significant bloat of standby servers compared to their masters, due to updates being applied far beyond where the end-of-file should have been. This failure mode does not appear to be a significant risk during crash @@ -8787,7 +10852,7 @@ Branch: REL9_0_STABLE [5301c8395] 2014-01-08 14:34:21 +0200 was already consistent at the start of replay, thus possibly allowing hot-standby queries before the database was really consistent. Other symptoms such as PANIC: WAL contains references to invalid - pages were also possible. + pages were also possible. @@ -8815,13 +10880,13 @@ Branch: REL9_0_STABLE [5d742b9ce] 2014-01-14 17:35:00 -0500 Fix improper locking of btree index pages while replaying - a VACUUM operation in hot-standby mode (Andres Freund, + a VACUUM operation in hot-standby mode (Andres Freund, Heikki Linnakangas, Tom Lane) This error could result in PANIC: WAL contains references to - invalid pages failures. + invalid pages failures. @@ -8857,8 +10922,8 @@ Branch: REL9_1_STABLE [0402f2441] 2014-01-08 23:31:01 +0200 - When pause_at_recovery_target - and recovery_target_inclusive are both set, ensure the + When pause_at_recovery_target + and recovery_target_inclusive are both set, ensure the target record is applied before pausing, not after (Heikki Linnakangas) @@ -8887,14 +10952,14 @@ Branch: REL9_3_STABLE [478af9b79] 2013-12-13 11:50:25 -0500 Prevent timeout interrupts from taking control away from mainline - code unless ImmediateInterruptOK is set + code unless ImmediateInterruptOK is set (Andres Freund, Tom Lane) This is a serious issue for any application making use of statement timeouts, as it could cause all manner of strange failures after a - timeout occurred. We have seen reports of stuck spinlocks, + timeout occurred. We have seen reports of stuck spinlocks, ERRORs being unexpectedly promoted to PANICs, unkillable backends, and other misbehaviors. @@ -8917,7 +10982,7 @@ Branch: REL8_4_STABLE [458b20f2d] 2014-01-31 21:41:09 -0500 Ensure that signal handlers don't attempt to use the - process's MyProc pointer after it's no longer valid. + process's MyProc pointer after it's no longer valid. @@ -8948,13 +11013,13 @@ Branch: REL8_4_STABLE [01b882fd8] 2014-01-29 20:04:14 -0500 - Fix unsafe references to errno within error reporting + Fix unsafe references to errno within error reporting logic (Christian Kruse) This would typically lead to odd behaviors such as missing or - inappropriate HINT fields. + inappropriate HINT fields. @@ -8970,7 +11035,7 @@ Branch: REL8_4_STABLE [d0070ac81] 2014-01-11 16:35:44 -0500 - Fix possible crashes from using ereport() too early + Fix possible crashes from using ereport() too early during server startup (Tom Lane) @@ -9014,7 +11079,7 @@ Branch: REL8_4_STABLE [a8a46d846] 2014-02-13 14:24:58 -0500 - Fix length checking for Unicode identifiers (U&"..." + Fix length checking for Unicode identifiers (U&"..." syntax) containing escapes (Tom Lane) @@ -9056,7 +11121,7 @@ Branch: REL9_0_STABLE [f2eede9b5] 2014-01-21 23:01:40 -0500 A previous patch allowed such keywords to be used without quoting in places such as role identifiers; but it missed cases where a - list of role identifiers was permitted, such as DROP ROLE. + list of role identifiers was permitted, such as DROP ROLE. @@ -9088,7 +11153,7 @@ Branch: REL8_4_STABLE [884c6384a] 2013-12-10 16:10:36 -0500 Fix possible crash due to invalid plan for nested sub-selects, such - as WHERE (... x IN (SELECT ...) ...) IN (SELECT ...) + as WHERE (... x IN (SELECT ...) ...) IN (SELECT ...) (Tom Lane) @@ -9101,13 +11166,13 @@ Branch: REL9_3_STABLE [a4aa854ca] 2014-01-30 14:51:19 -0500 - Fix mishandling of WHERE conditions pulled up from - a LATERAL subquery (Tom Lane) + Fix mishandling of WHERE conditions pulled up from + a LATERAL subquery (Tom Lane) The typical symptom of this bug was a JOIN qualification - cannot refer to other relations error, though subtle logic + cannot refer to other relations error, though subtle logic errors in created plans seem possible as well. @@ -9120,8 +11185,8 @@ Branch: REL9_3_STABLE [27ff4cfe7] 2014-01-11 19:03:15 -0500 - Disallow LATERAL references to the target table of - an UPDATE/DELETE (Tom Lane) + Disallow LATERAL references to the target table of + an UPDATE/DELETE (Tom Lane) @@ -9139,12 +11204,12 @@ Branch: REL9_2_STABLE [5d545b7ed] 2013-12-14 17:34:00 -0500 - Fix UPDATE/DELETE of an inherited target table - that has UNION ALL subqueries (Tom Lane) + Fix UPDATE/DELETE of an inherited target table + that has UNION ALL subqueries (Tom Lane) - Without this fix, UNION ALL subqueries aren't correctly + Without this fix, UNION ALL subqueries aren't correctly inserted into the update plans for inheritance child tables after the first one, typically resulting in no update happening for those child table(s). @@ -9159,7 +11224,7 @@ Branch: REL9_3_STABLE [663f8419b] 2013-12-23 22:18:23 -0500 - Fix ANALYZE to not fail on a column that's a domain over + Fix ANALYZE to not fail on a column that's a domain over a range type (Tom Lane) @@ -9176,12 +11241,12 @@ Branch: REL8_4_STABLE [00b77771a] 2014-01-11 13:42:11 -0500 - Ensure that ANALYZE creates statistics for a table column - even when all the values in it are too wide (Tom Lane) + Ensure that ANALYZE creates statistics for a table column + even when all the values in it are too wide (Tom Lane) - ANALYZE intentionally omits very wide values from its + ANALYZE intentionally omits very wide values from its histogram and most-common-values calculations, but it neglected to do something sane in the case that all the sampled entries are too wide. @@ -9199,14 +11264,14 @@ Branch: REL8_4_STABLE [0fb4e3ceb] 2014-01-18 18:50:47 -0500 - In ALTER TABLE ... SET TABLESPACE, allow the database's + In ALTER TABLE ... SET TABLESPACE, allow the database's default tablespace to be used without a permissions check (Stephen Frost) - CREATE TABLE has always allowed such usage, - but ALTER TABLE didn't get the memo. + CREATE TABLE has always allowed such usage, + but ALTER TABLE didn't get the memo. @@ -9234,8 +11299,8 @@ Branch: REL8_4_STABLE [57ac7d8a7] 2014-01-08 20:18:24 -0500 - Fix cannot accept a set error when some arms of - a CASE return a set and others don't (Tom Lane) + Fix cannot accept a set error when some arms of + a CASE return a set and others don't (Tom Lane) @@ -9316,12 +11381,12 @@ Branch: REL8_4_STABLE [6141983fb] 2014-02-10 10:00:50 +0200 - Fix possible misbehavior in plainto_tsquery() + Fix possible misbehavior in plainto_tsquery() (Heikki Linnakangas) - Use memmove() not memcpy() for copying + Use memmove() not memcpy() for copying overlapping memory regions. There have been no field reports of this actually causing trouble, but it's certainly risky. @@ -9337,8 +11402,8 @@ Branch: REL9_1_STABLE [026a91f86] 2014-01-07 18:00:36 +0100 - Fix placement of permissions checks in pg_start_backup() - and pg_stop_backup() (Andres Freund, Magnus Hagander) + Fix placement of permissions checks in pg_start_backup() + and pg_stop_backup() (Andres Freund, Magnus Hagander) @@ -9359,7 +11424,7 @@ Branch: REL8_4_STABLE [69f77d756] 2013-12-15 11:11:11 +0900 - Accept SHIFT_JIS as an encoding name for locale checking + Accept SHIFT_JIS as an encoding name for locale checking purposes (Tatsuo Ishii) @@ -9373,14 +11438,14 @@ Branch: REL9_2_STABLE [888b56570] 2014-02-03 14:46:57 -0500 - Fix *-qualification of named parameters in SQL-language + Fix *-qualification of named parameters in SQL-language functions (Tom Lane) Given a composite-type parameter - named foo, $1.* worked fine, - but foo.* not so much. + named foo, $1.* worked fine, + but foo.* not so much. @@ -9396,11 +11461,11 @@ Branch: REL8_4_STABLE [5525529db] 2014-01-23 23:02:30 +0900 - Fix misbehavior of PQhost() on Windows (Fujii Masao) + Fix misbehavior of PQhost() on Windows (Fujii Masao) - It should return localhost if no host has been specified. + It should return localhost if no host has been specified. @@ -9416,14 +11481,14 @@ Branch: REL8_4_STABLE [7644a7bd8] 2014-02-13 18:45:32 -0500 - Improve error handling in libpq and psql - for failures during COPY TO STDOUT/FROM STDIN (Tom Lane) + Improve error handling in libpq and psql + for failures during COPY TO STDOUT/FROM STDIN (Tom Lane) In particular this fixes an infinite loop that could occur in 9.2 and up if the server connection was lost during COPY FROM - STDIN. Variants of that scenario might be possible in older + STDIN. Variants of that scenario might be possible in older versions, or with other client applications. @@ -9438,7 +11503,7 @@ Branch: REL9_2_STABLE [fa28f9cba] 2014-01-04 16:05:23 -0500 Fix incorrect translation handling in - some psql \d commands + some psql \d commands (Peter Eisentraut, Tom Lane) @@ -9452,7 +11517,7 @@ Branch: REL9_2_STABLE [0ae288d2d] 2014-02-12 14:51:00 +0100 - Ensure pg_basebackup's background process is killed + Ensure pg_basebackup's background process is killed when exiting its foreground process (Magnus Hagander) @@ -9468,7 +11533,7 @@ Branch: REL9_1_STABLE [c6e5c4dd1] 2014-02-09 12:09:55 +0100 Fix possible incorrect printing of filenames - in pg_basebackup's verbose mode (Magnus Hagander) + in pg_basebackup's verbose mode (Magnus Hagander) @@ -9499,7 +11564,7 @@ Branch: REL8_4_STABLE [d68a65b01] 2014-01-09 15:58:37 +0100 - Fix misaligned descriptors in ecpg (MauMau) + Fix misaligned descriptors in ecpg (MauMau) @@ -9515,7 +11580,7 @@ Branch: REL8_4_STABLE [96de4939c] 2014-01-01 12:44:58 +0100 - In ecpg, handle lack of a hostname in the connection + In ecpg, handle lack of a hostname in the connection parameters properly (Michael Meskes) @@ -9532,7 +11597,7 @@ Branch: REL8_4_STABLE [6c8b16e30] 2013-12-07 16:56:34 -0800 - Fix performance regression in contrib/dblink connection + Fix performance regression in contrib/dblink connection startup (Joe Conway) @@ -9553,7 +11618,7 @@ Branch: REL8_4_STABLE [492b68541] 2014-01-13 15:44:14 +0200 - In contrib/isn, fix incorrect calculation of the check + In contrib/isn, fix incorrect calculation of the check digit for ISMN values (Fabien Coelho) @@ -9566,7 +11631,7 @@ Branch: REL9_3_STABLE [27902bc91] 2013-12-12 19:07:53 +0900 - Fix contrib/pgbench's progress logging to avoid overflow + Fix contrib/pgbench's progress logging to avoid overflow when the scale factor is large (Tatsuo Ishii) @@ -9580,8 +11645,8 @@ Branch: REL9_2_STABLE [27ab1eb7e] 2014-01-21 16:34:35 -0500 - Fix contrib/pg_stat_statement's handling - of CURRENT_DATE and related constructs (Kyotaro + Fix contrib/pg_stat_statement's handling + of CURRENT_DATE and related constructs (Kyotaro Horiguchi) @@ -9595,7 +11660,7 @@ Branch: REL9_3_STABLE [eb3d350db] 2014-02-03 21:30:28 -0500 Improve lost-connection error handling - in contrib/postgres_fdw (Tom Lane) + in contrib/postgres_fdw (Tom Lane) @@ -9628,13 +11693,13 @@ Branch: REL8_4_STABLE [ae3c98b9b] 2014-02-01 15:16:52 -0500 - In Mingw and Cygwin builds, install the libpq DLL - in the bin directory (Andrew Dunstan) + In Mingw and Cygwin builds, install the libpq DLL + in the bin directory (Andrew Dunstan) This duplicates what the MSVC build has long done. It should fix - problems with programs like psql failing to start + problems with programs like psql failing to start because they can't find the DLL. @@ -9650,7 +11715,7 @@ Branch: REL9_0_STABLE [1c0bf372f] 2014-02-01 16:14:15 -0500 - Avoid using the deprecated dllwrap tool in Cygwin builds + Avoid using the deprecated dllwrap tool in Cygwin builds (Marco Atzeri) @@ -9679,8 +11744,8 @@ Branch: REL8_4_STABLE [432735cbf] 2014-02-10 20:48:30 -0500 - Don't generate plain-text HISTORY - and src/test/regress/README files anymore (Tom Lane) + Don't generate plain-text HISTORY + and src/test/regress/README files anymore (Tom Lane) @@ -9689,7 +11754,7 @@ Branch: REL8_4_STABLE [432735cbf] 2014-02-10 20:48:30 -0500 the likely audience for plain-text format. Distribution tarballs will still contain files by these names, but they'll just be stubs directing the reader to consult the main documentation. - The plain-text INSTALL file will still be maintained, as + The plain-text INSTALL file will still be maintained, as there is arguably a use-case for that. @@ -9706,13 +11771,13 @@ Branch: REL8_4_STABLE [c0c2d62ac] 2014-02-14 21:59:56 -0500 - Update time zone data files to tzdata release 2013i + Update time zone data files to tzdata release 2013i for DST law changes in Jordan and historical changes in Cuba. - In addition, the zones Asia/Riyadh87, - Asia/Riyadh88, and Asia/Riyadh89 have been + In addition, the zones Asia/Riyadh87, + Asia/Riyadh88, and Asia/Riyadh89 have been removed, as they are no longer maintained by IANA, and never represented actual civil timekeeping practice. @@ -9734,7 +11799,7 @@ Branch: REL8_4_STABLE [c0c2d62ac] 2014-02-14 21:59:56 -0500 This release contains a variety of fixes from 9.3.1. For information about new features in the 9.3 major release, see - . + . @@ -9752,7 +11817,7 @@ Branch: REL8_4_STABLE [c0c2d62ac] 2014-02-14 21:59:56 -0500 Also, if you are upgrading from a version earlier than 9.3.1, - see . + see . @@ -9764,19 +11829,19 @@ Branch: REL8_4_STABLE [c0c2d62ac] 2014-02-14 21:59:56 -0500 - Fix VACUUM's tests to see whether it can - update relfrozenxid (Andres Freund) + Fix VACUUM's tests to see whether it can + update relfrozenxid (Andres Freund) - In some cases VACUUM (either manual or autovacuum) could - incorrectly advance a table's relfrozenxid value, + In some cases VACUUM (either manual or autovacuum) could + incorrectly advance a table's relfrozenxid value, allowing tuples to escape freezing, causing those rows to become invisible once 2^31 transactions have elapsed. The probability of data loss is fairly low since multiple incorrect advancements would need to happen before actual loss occurs, but it's not zero. In 9.2.0 and later, the probability of loss is higher, and it's also possible - to get could not access status of transaction errors as a + to get could not access status of transaction errors as a consequence of this bug. Users upgrading from releases 9.0.4 or 8.4.8 or earlier are not affected, but all later versions contain the bug. @@ -9784,12 +11849,12 @@ Branch: REL8_4_STABLE [c0c2d62ac] 2014-02-14 21:59:56 -0500 The issue can be ameliorated by, after upgrading, vacuuming all tables in all databases while having vacuum_freeze_table_age + linkend="guc-vacuum-freeze-table-age">vacuum_freeze_table_age set to zero. This will fix any latent corruption but will not be able to fix all pre-existing data errors. However, an installation can be presumed safe after performing this vacuuming if it has executed fewer than 2^31 update transactions in its lifetime (check this with - SELECT txid_current() < 2^31). + SELECT txid_current() < 2^31). @@ -9801,14 +11866,14 @@ Branch: REL8_4_STABLE [c0c2d62ac] 2014-02-14 21:59:56 -0500 These bugs could lead to could not access status of - transaction errors, or to duplicate or vanishing rows. + transaction errors, or to duplicate or vanishing rows. Users upgrading from releases prior to 9.3.0 are not affected. The issue can be ameliorated by, after upgrading, vacuuming all tables in all databases while having vacuum_freeze_table_age + linkend="guc-vacuum-freeze-table-age">vacuum_freeze_table_age set to zero. This will fix latent corruption but will not be able to fix all pre-existing data errors. @@ -9824,7 +11889,7 @@ Branch: REL8_4_STABLE [c0c2d62ac] 2014-02-14 21:59:56 -0500 - Fix initialization of pg_clog and pg_subtrans + Fix initialization of pg_clog and pg_subtrans during hot standby startup (Andres Freund, Heikki Linnakangas) @@ -9857,7 +11922,7 @@ Branch: REL8_4_STABLE [c0c2d62ac] 2014-02-14 21:59:56 -0500 These bugs could result in incorrect behavior, such as locking or even updating the wrong row, in the presence of concurrent updates. - Spurious unable to fetch updated version of tuple errors + Spurious unable to fetch updated version of tuple errors were also possible. @@ -9869,7 +11934,7 @@ Branch: REL8_4_STABLE [c0c2d62ac] 2014-02-14 21:59:56 -0500 This could lead to corruption of the lock data structures in shared - memory, causing lock already held and other odd errors. + memory, causing lock already held and other odd errors. @@ -9886,7 +11951,7 @@ Branch: REL8_4_STABLE [c0c2d62ac] 2014-02-14 21:59:56 -0500 - Truncate pg_multixact contents during WAL replay + Truncate pg_multixact contents during WAL replay (Andres Freund) @@ -9897,14 +11962,14 @@ Branch: REL8_4_STABLE [c0c2d62ac] 2014-02-14 21:59:56 -0500 - Ensure an anti-wraparound VACUUM counts a page as scanned + Ensure an anti-wraparound VACUUM counts a page as scanned when it's only verified that no tuples need freezing (Sergey Burladyan, Jeff Janes) This bug could result in failing to - advance relfrozenxid, so that the table would still be + advance relfrozenxid, so that the table would still be thought to need another anti-wraparound vacuum. In the worst case the database might even shut down to prevent wraparound. @@ -9933,7 +11998,7 @@ Branch: REL8_4_STABLE [c0c2d62ac] 2014-02-14 21:59:56 -0500 - Fix unexpected spgdoinsert() failure error during SP-GiST + Fix unexpected spgdoinsert() failure error during SP-GiST index creation (Teodor Sigaev) @@ -9951,12 +12016,12 @@ Branch: REL8_4_STABLE [c0c2d62ac] 2014-02-14 21:59:56 -0500 - Historically PostgreSQL has accepted queries like + Historically PostgreSQL has accepted queries like SELECT ... FROM tab1 x CROSS JOIN (tab2 x CROSS JOIN tab3 y) z although a strict reading of the SQL standard would forbid the - duplicate usage of table alias x. A misguided change in + duplicate usage of table alias x. A misguided change in 9.3.0 caused it to reject some such cases that were formerly accepted. Restore the previous behavior. @@ -9964,8 +12029,8 @@ SELECT ... FROM tab1 x CROSS JOIN (tab2 x CROSS JOIN tab3 y) z - Avoid flattening a subquery whose SELECT list contains a - volatile function wrapped inside a sub-SELECT (Tom Lane) + Avoid flattening a subquery whose SELECT list contains a + volatile function wrapped inside a sub-SELECT (Tom Lane) @@ -9982,14 +12047,14 @@ SELECT ... FROM tab1 x CROSS JOIN (tab2 x CROSS JOIN tab3 y) z This error could lead to incorrect plans for queries involving - multiple levels of subqueries within JOIN syntax. + multiple levels of subqueries within JOIN syntax. Fix incorrect planning in cases where the same non-strict expression - appears in multiple WHERE and outer JOIN + appears in multiple WHERE and outer JOIN equality clauses (Tom Lane) @@ -10077,20 +12142,20 @@ SELECT ... FROM tab1 x CROSS JOIN (tab2 x CROSS JOIN tab3 y) z - Fix array slicing of int2vector and oidvector values + Fix array slicing of int2vector and oidvector values (Tom Lane) Expressions of this kind are now implicitly promoted to - regular int2 or oid arrays. + regular int2 or oid arrays. - Return a valid JSON value when converting an empty hstore value - to json + Return a valid JSON value when converting an empty hstore value + to json (Oskari Saarenmaa) @@ -10105,7 +12170,7 @@ SELECT ... FROM tab1 x CROSS JOIN (tab2 x CROSS JOIN tab3 y) z In some cases, the system would use the simple GMT offset value when it should have used the regular timezone setting that had prevailed before the simple offset was selected. This change also causes - the timeofday function to honor the simple GMT offset + the timeofday function to honor the simple GMT offset zone. @@ -10119,7 +12184,7 @@ SELECT ... FROM tab1 x CROSS JOIN (tab2 x CROSS JOIN tab3 y) z - Properly quote generated command lines in pg_ctl + Properly quote generated command lines in pg_ctl (Naoya Anzai and Tom Lane) @@ -10130,10 +12195,10 @@ SELECT ... FROM tab1 x CROSS JOIN (tab2 x CROSS JOIN tab3 y) z - Fix pg_dumpall to work when a source database + Fix pg_dumpall to work when a source database sets default_transaction_read_only - via ALTER DATABASE SET (Kevin Grittner) + linkend="guc-default-transaction-read-only">default_transaction_read_only + via ALTER DATABASE SET (Kevin Grittner) @@ -10143,19 +12208,19 @@ SELECT ... FROM tab1 x CROSS JOIN (tab2 x CROSS JOIN tab3 y) z - Fix pg_isready to handle its option properly (Fabrízio de Royes Mello and Fujii Masao) - Fix parsing of WAL file names in pg_receivexlog + Fix parsing of WAL file names in pg_receivexlog (Heikki Linnakangas) - This error made pg_receivexlog unable to restart + This error made pg_receivexlog unable to restart streaming after stopping, once at least 4 GB of WAL had been written. @@ -10163,34 +12228,34 @@ SELECT ... FROM tab1 x CROSS JOIN (tab2 x CROSS JOIN tab3 y) z Report out-of-disk-space failures properly - in pg_upgrade (Peter Eisentraut) + in pg_upgrade (Peter Eisentraut) - Make ecpg search for quoted cursor names + Make ecpg search for quoted cursor names case-sensitively (Zoltán Böszörményi) - Fix ecpg's processing of lists of variables - declared varchar (Zoltán Böszörményi) + Fix ecpg's processing of lists of variables + declared varchar (Zoltán Böszörményi) - Make contrib/lo defend against incorrect trigger definitions + Make contrib/lo defend against incorrect trigger definitions (Marc Cousin) - Update time zone data files to tzdata release 2013h + Update time zone data files to tzdata release 2013h for DST law changes in Argentina, Brazil, Jordan, Libya, Liechtenstein, Morocco, and Palestine. Also, new timezone abbreviations WIB, WIT, WITA for Indonesia. @@ -10213,7 +12278,7 @@ SELECT ... FROM tab1 x CROSS JOIN (tab2 x CROSS JOIN tab3 y) z This release contains a variety of fixes from 9.3.0. For information about new features in the 9.3 major release, see - . + . @@ -10224,7 +12289,7 @@ SELECT ... FROM tab1 x CROSS JOIN (tab2 x CROSS JOIN tab3 y) z - However, if you use the hstore extension, see the + However, if you use the hstore extension, see the first changelog entry. @@ -10237,18 +12302,18 @@ SELECT ... FROM tab1 x CROSS JOIN (tab2 x CROSS JOIN tab3 y) z - Ensure new-in-9.3 JSON functionality is added to the hstore + Ensure new-in-9.3 JSON functionality is added to the hstore extension during an update (Andrew Dunstan) - Users who upgraded a pre-9.3 database containing hstore + Users who upgraded a pre-9.3 database containing hstore should execute ALTER EXTENSION hstore UPDATE; after installing 9.3.1, to add two new JSON functions and a cast. - (If hstore is already up to date, this command does + (If hstore is already up to date, this command does nothing.) @@ -10281,14 +12346,14 @@ ALTER EXTENSION hstore UPDATE; - Fix timeline handling bugs in pg_receivexlog + Fix timeline handling bugs in pg_receivexlog (Heikki Linnakangas, Andrew Gierth) - Prevent CREATE FUNCTION from checking SET + Prevent CREATE FUNCTION from checking SET variables unless function body checking is enabled (Tom Lane) @@ -10317,7 +12382,7 @@ ALTER EXTENSION hstore UPDATE; Overview - Major enhancements in PostgreSQL 9.3 include: + Major enhancements in PostgreSQL 9.3 include: @@ -10326,7 +12391,7 @@ ALTER EXTENSION hstore UPDATE; - Add materialized + Add materialized views @@ -10334,29 +12399,29 @@ ALTER EXTENSION hstore UPDATE; Make simple views auto-updatable + linkend="sql-createview-updatable-views">auto-updatable - Add many features for the JSON data type, + Add many features for the JSON data type, including operators and functions - to extract elements from JSON values + to extract elements from JSON values - Implement SQL-standard LATERAL option for - FROM-clause subqueries and function calls + Implement SQL-standard LATERAL option for + FROM-clause subqueries and function calls - Allow foreign data + Allow foreign data wrappers to support writes (inserts/updates/deletes) on foreign tables @@ -10364,9 +12429,9 @@ ALTER EXTENSION hstore UPDATE; - Add a Postgres foreign + Add a Postgres foreign data wrapper to allow access to - other Postgres servers + other Postgres servers @@ -10411,8 +12476,8 @@ ALTER EXTENSION hstore UPDATE; A dump/restore using pg_dumpall, or use - of pg_upgrade, is + linkend="app-pg-dumpall">pg_dumpall, or use + of pg_upgrade, is required for those wishing to migrate data from any previous release. @@ -10428,21 +12493,21 @@ ALTER EXTENSION hstore UPDATE; - Rename replication_timeout to wal_sender_timeout + Rename replication_timeout to wal_sender_timeout (Amit Kapila) This setting controls the WAL sender timeout. + linkend="wal">WAL sender timeout. Require superuser privileges to set commit_delay + linkend="guc-commit-delay">commit_delay because it can now potentially delay other sessions (Simon Riggs) @@ -10454,7 +12519,7 @@ ALTER EXTENSION hstore UPDATE; Users who have set work_mem based on the + linkend="guc-work-mem">work_mem based on the previous behavior may need to revisit that setting. @@ -10471,7 +12536,7 @@ ALTER EXTENSION hstore UPDATE; Throw an error if a tuple to be updated or deleted has already been - updated or deleted by a BEFORE trigger (Kevin Grittner) + updated or deleted by a BEFORE trigger (Kevin Grittner) @@ -10481,7 +12546,7 @@ ALTER EXTENSION hstore UPDATE; Now an error is thrown to prevent the inconsistent results from being committed. If this change affects your application, the best solution is usually to move the data-propagation actions to - an AFTER trigger. + an AFTER trigger. @@ -10493,16 +12558,16 @@ ALTER EXTENSION hstore UPDATE; - Change multicolumn ON UPDATE - SET NULL/SET DEFAULT foreign key actions to affect + Change multicolumn ON UPDATE + SET NULL/SET DEFAULT foreign key actions to affect all columns of the constraint, not just those changed in the - UPDATE (Tom Lane) + UPDATE (Tom Lane) Previously, we would set only those referencing columns that correspond to referenced columns that were changed by - the UPDATE. This was what was required by SQL-92, + the UPDATE. This was what was required by SQL-92, but more recent editions of the SQL standard specify the new behavior. @@ -10510,35 +12575,35 @@ ALTER EXTENSION hstore UPDATE; Force cached plans to be replanned if the search_path changes + linkend="guc-search-path">search_path changes (Tom Lane) Previously, cached plans already generated in the current session were not redone if the query was re-executed with a - new search_path setting, resulting in surprising behavior. + new search_path setting, resulting in surprising behavior. Fix to_number() + linkend="functions-formatting-table">to_number() to properly handle a period used as a thousands separator (Tom Lane) Previously, a period was considered to be a decimal point even when - the locale says it isn't and the D format code is used to + the locale says it isn't and the D format code is used to specify use of the locale-specific decimal point. This resulted in - wrong answers if FM format was also used. + wrong answers if FM format was also used. - Fix STRICT non-set-returning functions that have + Fix STRICT non-set-returning functions that have set-returning functions in their arguments to properly return null rows (Tom Lane) @@ -10551,14 +12616,14 @@ ALTER EXTENSION hstore UPDATE; - Store WAL in a continuous + Store WAL in a continuous stream, rather than skipping the last 16MB segment every 4GB (Heikki Linnakangas) - Previously, WAL files with names ending in FF - were not used because of this skipping. If you have WAL + Previously, WAL files with names ending in FF + were not used because of this skipping. If you have WAL backup or restore scripts that took this behavior into account, they will need to be adjusted. @@ -10567,15 +12632,15 @@ ALTER EXTENSION hstore UPDATE; In pg_constraint.confmatchtype, - store the default foreign key match type (non-FULL, - non-PARTIAL) as s for simple + linkend="catalog-pg-constraint">pg_constraint.confmatchtype, + store the default foreign key match type (non-FULL, + non-PARTIAL) as s for simple (Tom Lane) - Previously this case was represented by u - for unspecified. + Previously this case was represented by u + for unspecified. @@ -10612,10 +12677,10 @@ ALTER EXTENSION hstore UPDATE; This change improves concurrency and reduces the probability of deadlocks when updating tables involved in a foreign-key constraint. - UPDATEs that do not change any columns referenced in a - foreign key now take the new NO KEY UPDATE lock mode on - the row, while foreign key checks use the new KEY SHARE - lock mode, which does not conflict with NO KEY UPDATE. + UPDATEs that do not change any columns referenced in a + foreign key now take the new NO KEY UPDATE lock mode on + the row, while foreign key checks use the new KEY SHARE + lock mode, which does not conflict with NO KEY UPDATE. So there is no blocking unless a foreign-key column is changed. @@ -10623,7 +12688,7 @@ ALTER EXTENSION hstore UPDATE; Add configuration variable lock_timeout to + linkend="guc-lock-timeout">lock_timeout to allow limiting how long a session will wait to acquire any one lock (Zoltán Böszörményi) @@ -10640,21 +12705,21 @@ ALTER EXTENSION hstore UPDATE; - Add SP-GiST + Add SP-GiST support for range data types (Alexander Korotkov) - Allow GiST indexes to be + Allow GiST indexes to be unlogged (Jeevan Chalke) - Improve performance of GiST index insertion by randomizing + Improve performance of GiST index insertion by randomizing the choice of which page to descend to when there are multiple equally good alternatives (Heikki Linnakangas) @@ -10692,7 +12757,7 @@ ALTER EXTENSION hstore UPDATE; Improve optimizer's hash table size estimate for - doing DISTINCT via hash aggregation (Tom Lane) + doing DISTINCT via hash aggregation (Tom Lane) @@ -10722,7 +12787,7 @@ ALTER EXTENSION hstore UPDATE; - Add COPY FREEZE + Add COPY FREEZE option to avoid the overhead of marking tuples as frozen later (Simon Riggs, Jeff Davis) @@ -10731,7 +12796,7 @@ ALTER EXTENSION hstore UPDATE; Improve performance of NUMERIC calculations + linkend="datatype-numeric">NUMERIC calculations (Kyotaro Horiguchi) @@ -10739,20 +12804,20 @@ ALTER EXTENSION hstore UPDATE; Improve synchronization of sessions waiting for commit_delay + linkend="guc-commit-delay">commit_delay (Peter Geoghegan) - This greatly improves the usefulness of commit_delay. + This greatly improves the usefulness of commit_delay. Improve performance of the CREATE TEMPORARY TABLE ... ON - COMMIT DELETE ROWS option by not truncating such temporary + linkend="sql-createtable">CREATE TEMPORARY TABLE ... ON + COMMIT DELETE ROWS option by not truncating such temporary tables in transactions that haven't touched any temporary tables (Heikki Linnakangas) @@ -10777,7 +12842,7 @@ ALTER EXTENSION hstore UPDATE; This speeds up lock bookkeeping at statement completion in multi-statement transactions that hold many locks; it is particularly - useful for pg_dump. + useful for pg_dump. @@ -10789,7 +12854,7 @@ ALTER EXTENSION hstore UPDATE; This speeds up sessions that create many tables in successive - small transactions, such as a pg_restore run. + small transactions, such as a pg_restore run. @@ -10818,7 +12883,7 @@ ALTER EXTENSION hstore UPDATE; The checksum option can be set during initdb. + linkend="app-initdb">initdb. @@ -10871,7 +12936,7 @@ ALTER EXTENSION hstore UPDATE; When an authentication failure occurs, log the relevant - pg_hba.conf + pg_hba.conf line, to ease debugging of unintended failures (Magnus Hagander) @@ -10879,23 +12944,23 @@ ALTER EXTENSION hstore UPDATE; - Improve LDAP error + Improve LDAP error reporting and documentation (Peter Eisentraut) - Add support for specifying LDAP authentication parameters - in URL format, per RFC 4516 (Peter Eisentraut) + Add support for specifying LDAP authentication parameters + in URL format, per RFC 4516 (Peter Eisentraut) Change the ssl_ciphers parameter - to start with DEFAULT, rather than ALL, + linkend="guc-ssl-ciphers">ssl_ciphers parameter + to start with DEFAULT, rather than ALL, then remove insecure ciphers (Magnus Hagander) @@ -10907,12 +12972,12 @@ ALTER EXTENSION hstore UPDATE; Parse and load pg_ident.conf + linkend="auth-username-maps">pg_ident.conf once, not during each connection (Amit Kapila) - This is similar to how pg_hba.conf is processed. + This is similar to how pg_hba.conf is processed. @@ -10932,8 +12997,8 @@ ALTER EXTENSION hstore UPDATE; - On Unix-like systems, mmap() is now used for most - of PostgreSQL's shared memory. For most users, this + On Unix-like systems, mmap() is now used for most + of PostgreSQL's shared memory. For most users, this will eliminate any need to adjust kernel parameters for shared memory. @@ -10946,8 +13011,8 @@ ALTER EXTENSION hstore UPDATE; The configuration parameter - unix_socket_directory is replaced by unix_socket_directories, + unix_socket_directory is replaced by unix_socket_directories, which accepts a list of directories. @@ -10960,7 +13025,7 @@ ALTER EXTENSION hstore UPDATE; Such a directory is specified with include_dir in the server + linkend="config-includes">include_dir in the server configuration file. @@ -10968,14 +13033,14 @@ ALTER EXTENSION hstore UPDATE; Increase the maximum initdb-configured value for shared_buffers + linkend="app-initdb">initdb-configured value for shared_buffers to 128MB (Robert Haas) This is the maximum value that initdb will attempt to set in postgresql.conf; + linkend="config-setting-configuration-file">postgresql.conf; the previous maximum was 32MB. @@ -10983,7 +13048,7 @@ ALTER EXTENSION hstore UPDATE; Remove the external - PID file, if any, on postmaster exit + PID file, if any, on postmaster exit (Peter Eisentraut) @@ -11015,10 +13080,10 @@ ALTER EXTENSION hstore UPDATE; - Add SQL functions pg_is_in_backup() + Add SQL functions pg_is_in_backup() and pg_backup_start_time() + linkend="functions-admin-backup">pg_backup_start_time() (Gilles Darold) @@ -11030,7 +13095,7 @@ ALTER EXTENSION hstore UPDATE; Improve performance of streaming log shipping with synchronous_commit + linkend="guc-synchronous-commit">synchronous_commit disabled (Andres Freund) @@ -11045,12 +13110,12 @@ ALTER EXTENSION hstore UPDATE; Add the last checkpoint's redo location to pg_controldata's + linkend="app-pgcontroldata">pg_controldata's output (Fujii Masao) - This information is useful for determining which WAL + This information is useful for determining which WAL files are needed for restore. @@ -11058,7 +13123,7 @@ ALTER EXTENSION hstore UPDATE; Allow tools like pg_receivexlog + linkend="app-pgreceivewal">pg_receivexlog to run on computers with different architectures (Heikki Linnakangas) @@ -11074,9 +13139,9 @@ ALTER EXTENSION hstore UPDATE; Make pg_basebackup - @@ -11088,10 +13153,10 @@ ALTER EXTENSION hstore UPDATE; Allow pg_receivexlog + linkend="app-pgreceivewal">pg_receivexlog and pg_basebackup - to handle streaming timeline switches (Heikki Linnakangas) @@ -11099,8 +13164,8 @@ ALTER EXTENSION hstore UPDATE; Add wal_receiver_timeout - parameter to control the WAL receiver's timeout + linkend="guc-wal-receiver-timeout">wal_receiver_timeout + parameter to control the WAL receiver's timeout (Amit Kapila) @@ -11111,7 +13176,7 @@ ALTER EXTENSION hstore UPDATE; - Change the WAL record format to + Change the WAL record format to allow splitting the record header across pages (Heikki Linnakangas) @@ -11132,23 +13197,23 @@ ALTER EXTENSION hstore UPDATE; - Implement SQL-standard LATERAL option for - FROM-clause subqueries and function calls (Tom Lane) + Implement SQL-standard LATERAL option for + FROM-clause subqueries and function calls (Tom Lane) - This feature allows subqueries and functions in FROM to - reference columns from other tables in the FROM - clause. The LATERAL keyword is optional for functions. + This feature allows subqueries and functions in FROM to + reference columns from other tables in the FROM + clause. The LATERAL keyword is optional for functions. Add support for piping COPY and psql \copy + linkend="sql-copy">COPY and psql \copy data to/from an external program (Etsuro Fujita) @@ -11156,8 +13221,8 @@ ALTER EXTENSION hstore UPDATE; Allow a multirow VALUES clause in a rule - to reference OLD/NEW (Tom Lane) + linkend="sql-values">VALUES clause in a rule + to reference OLD/NEW (Tom Lane) @@ -11184,7 +13249,7 @@ ALTER EXTENSION hstore UPDATE; - Allow foreign data + Allow foreign data wrappers to support writes (inserts/updates/deletes) on foreign tables (KaiGai Kohei) @@ -11192,15 +13257,15 @@ ALTER EXTENSION hstore UPDATE; - Add CREATE SCHEMA ... IF - NOT EXISTS clause (Fabrízio de Royes Mello) + Add CREATE SCHEMA ... IF + NOT EXISTS clause (Fabrízio de Royes Mello) - Make REASSIGN - OWNED also change ownership of shared objects + Make REASSIGN + OWNED also change ownership of shared objects (Álvaro Herrera) @@ -11208,28 +13273,28 @@ ALTER EXTENSION hstore UPDATE; Make CREATE - AGGREGATE complain if the given initial value string is not + AGGREGATE complain if the given initial value string is not valid input for the transition datatype (Tom Lane) - Suppress CREATE - TABLE's messages about implicit index and sequence creation + Suppress CREATE + TABLE's messages about implicit index and sequence creation (Robert Haas) - These messages now appear at DEBUG1 verbosity, so that + These messages now appear at DEBUG1 verbosity, so that they will not be shown by default. - Allow DROP TABLE IF - EXISTS to succeed when a non-existent schema is specified + Allow DROP TABLE IF + EXISTS to succeed when a non-existent schema is specified in the table name (Bruce Momjian) @@ -11256,14 +13321,14 @@ ALTER EXTENSION hstore UPDATE; - <command>ALTER</> + <command>ALTER</command> - Support IF NOT EXISTS option in ALTER TYPE ... ADD VALUE + Support IF NOT EXISTS option in ALTER TYPE ... ADD VALUE (Andrew Dunstan) @@ -11274,22 +13339,22 @@ ALTER EXTENSION hstore UPDATE; - Add ALTER ROLE ALL - SET to establish settings for all users (Peter Eisentraut) + Add ALTER ROLE ALL + SET to establish settings for all users (Peter Eisentraut) This allows settings to apply to all users in all databases. ALTER DATABASE SET + linkend="sql-alterdatabase">ALTER DATABASE SET already allowed addition of settings for all users in a single - database. postgresql.conf has a similar effect. + database. postgresql.conf has a similar effect. - Add support for ALTER RULE - ... RENAME (Ali Dar) + Add support for ALTER RULE + ... RENAME (Ali Dar) @@ -11298,13 +13363,13 @@ ALTER EXTENSION hstore UPDATE; - <link linkend="rules-views"><command>VIEWs</></link> + <link linkend="rules-views"><command>VIEWs</command></link> - Add materialized + Add materialized views (Kevin Grittner) @@ -11320,7 +13385,7 @@ ALTER EXTENSION hstore UPDATE; Make simple views auto-updatable + linkend="sql-createview-updatable-views">auto-updatable (Dean Rasheed) @@ -11328,20 +13393,20 @@ ALTER EXTENSION hstore UPDATE; Simple views that reference some or all columns from a single base table are now updatable by default. More complex views can be made updatable using INSTEAD OF triggers - or INSTEAD rules. + linkend="sql-createtrigger">INSTEAD OF triggers + or INSTEAD rules. - Add CREATE RECURSIVE - VIEW syntax (Peter Eisentraut) + Add CREATE RECURSIVE + VIEW syntax (Peter Eisentraut) Internally this is translated into CREATE VIEW ... WITH - RECURSIVE .... + RECURSIVE .... @@ -11374,7 +13439,7 @@ ALTER EXTENSION hstore UPDATE; - Increase the maximum size of large + Increase the maximum size of large objects from 2GB to 4TB (Nozomi Anzai, Yugo Nagata) @@ -11387,8 +13452,8 @@ ALTER EXTENSION hstore UPDATE; Allow text timezone - designations, e.g. America/Chicago, in the - T field of ISO-format timestamptz + designations, e.g. America/Chicago, in the + T field of ISO-format timestamptz input (Bruce Momjian) @@ -11396,20 +13461,20 @@ ALTER EXTENSION hstore UPDATE; - <link linkend="datatype-json"><type>JSON</></link> + <link linkend="datatype-json"><type>JSON</type></link> Add operators and functions - to extract elements from JSON values (Andrew Dunstan) + to extract elements from JSON values (Andrew Dunstan) - Allow JSON values to be JSON values to be converted into records (Andrew Dunstan) @@ -11418,7 +13483,7 @@ ALTER EXTENSION hstore UPDATE; Add functions to convert - scalars, records, and hstore values to JSON (Andrew + scalars, records, and hstore values to JSON (Andrew Dunstan) @@ -11438,9 +13503,9 @@ ALTER EXTENSION hstore UPDATE; Add array_remove() + linkend="array-functions-table">array_remove() and array_replace() + linkend="array-functions-table">array_replace() functions (Marco Nenciarini, Gabriele Bartolini) @@ -11448,10 +13513,10 @@ ALTER EXTENSION hstore UPDATE; Allow concat() + linkend="functions-string-other">concat() and format() - to properly expand VARIADIC-labeled arguments + linkend="functions-string-format">format() + to properly expand VARIADIC-labeled arguments (Pavel Stehule) @@ -11459,7 +13524,7 @@ ALTER EXTENSION hstore UPDATE; Improve format() + linkend="functions-string-format">format() to provide field width and left/right alignment options (Pavel Stehule) @@ -11467,29 +13532,29 @@ ALTER EXTENSION hstore UPDATE; Make to_char(), + linkend="functions-formatting-table">to_char(), to_date(), + linkend="functions-formatting-table">to_date(), and to_timestamp() + linkend="functions-formatting-table">to_timestamp() handle negative (BC) century values properly (Bruce Momjian) Previously the behavior was either wrong or inconsistent - with positive/AD handling, e.g. with the format mask - IYYY-IW-DY. + with positive/AD handling, e.g. with the format mask + IYYY-IW-DY. Make to_date() + linkend="functions-formatting-table">to_date() and to_timestamp() - return proper results when mixing ISO and Gregorian + linkend="functions-formatting-table">to_timestamp() + return proper results when mixing ISO and Gregorian week/day designations (Bruce Momjian) @@ -11497,27 +13562,27 @@ ALTER EXTENSION hstore UPDATE; Cause pg_get_viewdef() - to start a new line by default after each SELECT target - list entry and FROM entry (Marko Tiikkaja) + linkend="functions-info-catalog-table">pg_get_viewdef() + to start a new line by default after each SELECT target + list entry and FROM entry (Marko Tiikkaja) This reduces line length in view printing, for instance in pg_dump output. + linkend="app-pgdump">pg_dump output. - Fix map_sql_value_to_xml_value() to print values of + Fix map_sql_value_to_xml_value() to print values of domain types the same way their base type would be printed (Pavel Stehule) There are special formatting rules for certain built-in types such as - boolean; these rules now also apply to domains over these + boolean; these rules now also apply to domains over these types. @@ -11536,13 +13601,13 @@ ALTER EXTENSION hstore UPDATE; - Allow PL/pgSQL to use RETURN with a composite-type + Allow PL/pgSQL to use RETURN with a composite-type expression (Asif Rehman) Previously, in a function returning a composite type, - RETURN could only reference a variable of that type. + RETURN could only reference a variable of that type. @@ -11557,14 +13622,14 @@ ALTER EXTENSION hstore UPDATE; Allow PL/pgSQL to access the number of rows processed by - COPY (Pavel Stehule) + COPY (Pavel Stehule) - A COPY executed in a PL/pgSQL function now updates the + A COPY executed in a PL/pgSQL function now updates the value retrieved by GET DIAGNOSTICS - x = ROW_COUNT. + x = ROW_COUNT. @@ -11608,9 +13673,9 @@ ALTER EXTENSION hstore UPDATE; - Handle SPI errors raised - explicitly (with PL/Python's RAISE) the same as - internal SPI errors (Oskari Saarenmaa and Jan Urbanski) + Handle SPI errors raised + explicitly (with PL/Python's RAISE) the same as + internal SPI errors (Oskari Saarenmaa and Jan Urbanski) @@ -11627,7 +13692,7 @@ ALTER EXTENSION hstore UPDATE; - Prevent leakage of SPI tuple tables during subtransaction + Prevent leakage of SPI tuple tables during subtransaction abort (Tom Lane) @@ -11638,7 +13703,7 @@ ALTER EXTENSION hstore UPDATE; of such tuple tables and release them manually in error-recovery code. Failure to do so caused a number of transaction-lifespan memory leakage issues in PL/pgSQL and perhaps other SPI clients. SPI_freetuptable() + linkend="spi-spi-freetupletable">SPI_freetuptable() now protects itself against multiple freeing requests, so any existing code that did take care to clean up shouldn't be broken by this change. @@ -11646,8 +13711,8 @@ ALTER EXTENSION hstore UPDATE; - Allow SPI functions to access the number of rows processed - by COPY (Pavel Stehule) + Allow SPI functions to access the number of rows processed + by COPY (Pavel Stehule) @@ -11663,35 +13728,35 @@ ALTER EXTENSION hstore UPDATE; Add command-line utility pg_isready to + linkend="app-pg-isready">pg_isready to check if the server is ready to accept connections (Phil Sorber) - Support multiple This is similar to the way pg_dump's - option works. - Add @@ -11699,7 +13764,7 @@ ALTER EXTENSION hstore UPDATE; Add libpq function PQconninfo() + linkend="libpq-pqconninfo">PQconninfo() to return connection information (Zoltán Böszörményi, Magnus Hagander) @@ -11708,27 +13773,27 @@ ALTER EXTENSION hstore UPDATE; - <link linkend="APP-PSQL"><application>psql</></link> + <link linkend="app-psql"><application>psql</application></link> - Adjust function cost settings so psql tab + Adjust function cost settings so psql tab completion and pattern searching are more efficient (Tom Lane) - Improve psql's tab completion coverage (Jeff Janes, + Improve psql's tab completion coverage (Jeff Janes, Dean Rasheed, Peter Eisentraut, Magnus Hagander) - Allow the psql mode to work when reading from standard input (Fabien Coelho, Robert Haas) @@ -11740,61 +13805,61 @@ ALTER EXTENSION hstore UPDATE; - Remove psql warning when connecting to an older + Remove psql warning when connecting to an older server (Peter Eisentraut) A warning is still issued when connecting to a server of a newer major - version than psql's. + version than psql's. - <link linkend="APP-PSQL-meta-commands">Backslash Commands</link> + <link linkend="app-psql-meta-commands">Backslash Commands</link> - Add psql command \watch to repeatedly + Add psql command \watch to repeatedly execute a SQL command (Will Leinweber) - Add psql command \gset to store query - results in psql variables (Pavel Stehule) + Add psql command \gset to store query + results in psql variables (Pavel Stehule) - Add SSL information to psql's - \conninfo command (Alastair Turner) + Add SSL information to psql's + \conninfo command (Alastair Turner) - Add Security column to psql's - \df+ output (Jon Erdman) + Add Security column to psql's + \df+ output (Jon Erdman) - Allow psql command \l to accept a database + Allow psql command \l to accept a database name pattern (Peter Eisentraut) - In psql, do not allow \connect to + In psql, do not allow \connect to use defaults if there is no active connection (Bruce Momjian) @@ -11806,7 +13871,7 @@ ALTER EXTENSION hstore UPDATE; Properly reset state after failure of a SQL command executed with - psql's \g file + psql's \g file (Tom Lane) @@ -11827,8 +13892,8 @@ ALTER EXTENSION hstore UPDATE; - Add a latex-longtable output format to - psql (Bruce Momjian) + Add a latex-longtable output format to + psql (Bruce Momjian) @@ -11838,21 +13903,21 @@ ALTER EXTENSION hstore UPDATE; - Add a border=3 output mode to the psql - latex format (Bruce Momjian) + Add a border=3 output mode to the psql + latex format (Bruce Momjian) - In psql's tuples-only and expanded output modes, no - longer emit (No rows) for zero rows (Peter Eisentraut) + In psql's tuples-only and expanded output modes, no + longer emit (No rows) for zero rows (Peter Eisentraut) - In psql's unaligned, expanded output mode, no longer + In psql's unaligned, expanded output mode, no longer print an empty line for zero rows (Peter Eisentraut) @@ -11864,34 +13929,34 @@ ALTER EXTENSION hstore UPDATE; - <link linkend="APP-PGDUMP"><application>pg_dump</></link> + <link linkend="app-pgdump"><application>pg_dump</application></link> - Add pg_dump option to dump tables in parallel (Joachim Wieland) - Make pg_dump output functions in a more predictable + Make pg_dump output functions in a more predictable order (Joel Jacobson) - Fix tar files emitted by pg_dump - to be POSIX conformant (Brian Weaver, Tom Lane) + Fix tar files emitted by pg_dump + to be POSIX conformant (Brian Weaver, Tom Lane) - Add @@ -11905,7 +13970,7 @@ ALTER EXTENSION hstore UPDATE; - <link linkend="APP-INITDB"><application>initdb</></link> + <link linkend="app-initdb"><application>initdb</application></link> @@ -11916,19 +13981,19 @@ ALTER EXTENSION hstore UPDATE; This insures data integrity in event of a system crash shortly after - initdb. This can be disabled by using . - Add initdb option to sync the data directory to durable storage (Bruce Momjian) This is used by pg_upgrade. + linkend="pgupgrade">pg_upgrade. @@ -11960,14 +14025,14 @@ ALTER EXTENSION hstore UPDATE; - Create a centralized timeout API (Zoltán + Create a centralized timeout API (Zoltán Böszörményi) - Create libpgcommon and move pg_malloc() and other + Create libpgcommon and move pg_malloc() and other functions there (Álvaro Herrera, Andres Freund) @@ -11984,15 +14049,15 @@ ALTER EXTENSION hstore UPDATE; - Use SA_RESTART for all signals, - including SIGALRM (Tom Lane) + Use SA_RESTART for all signals, + including SIGALRM (Tom Lane) Ensure that the correct text domain is used when - translating errcontext() messages + translating errcontext() messages (Heikki Linnakangas) @@ -12005,7 +14070,7 @@ ALTER EXTENSION hstore UPDATE; - Provide support for static assertions that will fail at + Provide support for static assertions that will fail at compile time if some compile-time-constant condition is not met (Andres Freund, Tom Lane) @@ -12013,14 +14078,14 @@ ALTER EXTENSION hstore UPDATE; - Support Assert() in client-side code (Andrew Dunstan) + Support Assert() in client-side code (Andrew Dunstan) - Add decoration to inform the C compiler that some ereport() - and elog() calls do not return (Peter Eisentraut, + Add decoration to inform the C compiler that some ereport() + and elog() calls do not return (Peter Eisentraut, Andres Freund, Tom Lane, Heikki Linnakangas) @@ -12029,7 +14094,7 @@ ALTER EXTENSION hstore UPDATE; Allow options to be passed to the regression test output comparison utility via PG_REGRESS_DIFF_OPTS + linkend="regress-evaluation">PG_REGRESS_DIFF_OPTS (Peter Eisentraut) @@ -12037,43 +14102,43 @@ ALTER EXTENSION hstore UPDATE; Add isolation tests for CREATE INDEX - CONCURRENTLY (Abhijit Menon-Sen) + linkend="sql-createindex">CREATE INDEX + CONCURRENTLY (Abhijit Menon-Sen) - Remove typedefs for int2/int4 as they are better - represented as int16/int32 (Peter Eisentraut) + Remove typedefs for int2/int4 as they are better + represented as int16/int32 (Peter Eisentraut) Fix install-strip on Mac OS - X (Peter Eisentraut) + X (Peter Eisentraut) Remove configure flag - , as it is no longer supported (Bruce Momjian) - Rewrite pgindent in Perl (Andrew Dunstan) + Rewrite pgindent in Perl (Andrew Dunstan) Provide Emacs macro to set Perl formatting to - match PostgreSQL's perltidy settings (Peter Eisentraut) + match PostgreSQL's perltidy settings (Peter Eisentraut) @@ -12086,25 +14151,25 @@ ALTER EXTENSION hstore UPDATE; - Change the way UESCAPE is lexed, to significantly reduce + Change the way UESCAPE is lexed, to significantly reduce the size of the lexer tables (Heikki Linnakangas) - Centralize flex and bison - make rules (Peter Eisentraut) + Centralize flex and bison + make rules (Peter Eisentraut) - This is useful for pgxs authors. + This is useful for pgxs authors. - Change many internal backend functions to return object OIDs + Change many internal backend functions to return object OIDs rather than void (Dimitri Fontaine) @@ -12128,7 +14193,7 @@ ALTER EXTENSION hstore UPDATE; Add function pg_identify_object() + linkend="functions-info-catalog-table">pg_identify_object() to produce a machine-readable description of a database object (Álvaro Herrera) @@ -12136,7 +14201,7 @@ ALTER EXTENSION hstore UPDATE; - Add post-ALTER-object server hooks (KaiGai Kohei) + Add post-ALTER-object server hooks (KaiGai Kohei) @@ -12150,28 +14215,28 @@ ALTER EXTENSION hstore UPDATE; Provide a tool to help detect timezone abbreviation changes when - updating the src/timezone/data files + updating the src/timezone/data files (Tom Lane) - Add pkg-config support for libpq - and ecpg libraries (Peter Eisentraut) + Add pkg-config support for libpq + and ecpg libraries (Peter Eisentraut) - Remove src/tools/backend, now that the content is on - the PostgreSQL wiki (Bruce Momjian) + Remove src/tools/backend, now that the content is on + the PostgreSQL wiki (Bruce Momjian) - Split out WAL reading as + Split out WAL reading as an independent facility (Heikki Linnakangas, Andres Freund) @@ -12179,13 +14244,13 @@ ALTER EXTENSION hstore UPDATE; Use a 64-bit integer to represent WAL positions - (XLogRecPtr) instead of two 32-bit integers + linkend="wal">WAL positions + (XLogRecPtr) instead of two 32-bit integers (Heikki Linnakangas) - Generally, tools that need to read the WAL format + Generally, tools that need to read the WAL format will need to be adjusted. @@ -12200,7 +14265,7 @@ ALTER EXTENSION hstore UPDATE; Allow PL/Python on OS - X to build against custom versions of Python + X to build against custom versions of Python (Peter Eisentraut) @@ -12216,9 +14281,9 @@ ALTER EXTENSION hstore UPDATE; - Add a Postgres foreign + Add a Postgres foreign data wrapper contrib module to allow access to - other Postgres servers (Shigeru Hanada) + other Postgres servers (Shigeru Hanada) @@ -12228,7 +14293,7 @@ ALTER EXTENSION hstore UPDATE; - Add pg_xlogdump + Add pg_xlogdump contrib program (Andres Freund) @@ -12236,46 +14301,46 @@ ALTER EXTENSION hstore UPDATE; Add support for indexing of regular-expression searches in - pg_trgm + pg_trgm (Alexander Korotkov) - Improve pg_trgm's + Improve pg_trgm's handling of multibyte characters (Tom Lane) On a platform that does not have the wcstombs() or towlower() library functions, this could result in an incompatible change in the contents - of pg_trgm indexes for non-ASCII data. In such cases, - REINDEX those indexes to ensure correct search results. + of pg_trgm indexes for non-ASCII data. In such cases, + REINDEX those indexes to ensure correct search results. Add a pgstattuple function to report - the size of the pending-insertions list of a GIN index + the size of the pending-insertions list of a GIN index (Fujii Masao) - Make oid2name, - pgbench, and - vacuumlo set - fallback_application_name (Amit Kapila) + Make oid2name, + pgbench, and + vacuumlo set + fallback_application_name (Amit Kapila) Improve output of pg_test_timing + linkend="pgtesttiming">pg_test_timing (Bruce Momjian) @@ -12283,7 +14348,7 @@ ALTER EXTENSION hstore UPDATE; Improve output of pg_test_fsync + linkend="pgtestfsync">pg_test_fsync (Peter Geoghegan) @@ -12295,9 +14360,9 @@ ALTER EXTENSION hstore UPDATE; - When using this FDW to define the target of a dblink + When using this FDW to define the target of a dblink connection, instead of using a hard-wired list of connection options, - the underlying libpq library is consulted to see what + the underlying libpq library is consulted to see what connection options it supports. @@ -12305,26 +14370,26 @@ ALTER EXTENSION hstore UPDATE; - <link linkend="pgupgrade"><application>pg_upgrade</></link> + <link linkend="pgupgrade"><application>pg_upgrade</application></link> - Allow pg_upgrade to do dumps and restores in + Allow pg_upgrade to do dumps and restores in parallel (Bruce Momjian, Andrew Dunstan) This allows parallel schema dump/restore of databases, as well as parallel copy/link of data files per tablespace. Use the - option to specify the level of parallelism. - Make pg_upgrade create Unix-domain sockets in + Make pg_upgrade create Unix-domain sockets in the current directory (Bruce Momjian, Tom Lane) @@ -12336,7 +14401,7 @@ ALTER EXTENSION hstore UPDATE; - Make pg_upgrade mode properly detect the location of non-default socket directories (Bruce Momjian, Tom Lane) @@ -12344,21 +14409,21 @@ ALTER EXTENSION hstore UPDATE; - Improve performance of pg_upgrade for databases + Improve performance of pg_upgrade for databases with many tables (Bruce Momjian) - Improve pg_upgrade's logs by showing + Improve pg_upgrade's logs by showing executed commands (Álvaro Herrera) - Improve pg_upgrade's status display during + Improve pg_upgrade's status display during copy/link (Bruce Momjian) @@ -12368,33 +14433,33 @@ ALTER EXTENSION hstore UPDATE; - <link linkend="pgbench"><application>pgbench</></link> + <link linkend="pgbench"><application>pgbench</application></link> - Add This adds foreign key constraints to the standard tables created by - pgbench, for use in foreign key performance testing. + pgbench, for use in foreign key performance testing. - Allow pgbench to aggregate performance statistics - and produce output every seconds (Tomas Vondra) - Add pgbench option to control the percentage of transactions logged (Tomas Vondra) @@ -12402,29 +14467,29 @@ ALTER EXTENSION hstore UPDATE; Reduce and improve the status message output of - pgbench's initialization mode (Robert Haas, + pgbench's initialization mode (Robert Haas, Peter Eisentraut) - Add pgbench mode to print one output line every five seconds (Tomas Vondra) - Output pgbench elapsed and estimated remaining + Output pgbench elapsed and estimated remaining time during initialization (Tomas Vondra) - Allow pgbench to use much larger scale factors, - by changing relevant columns from integer to bigint + Allow pgbench to use much larger scale factors, + by changing relevant columns from integer to bigint when the requested scale factor exceeds 20000 (Greg Smith) @@ -12443,21 +14508,21 @@ ALTER EXTENSION hstore UPDATE; - Allow EPUB-format documentation to be created + Allow EPUB-format documentation to be created (Peter Eisentraut) - Update FreeBSD kernel configuration documentation + Update FreeBSD kernel configuration documentation (Brad Davis) - Improve WINDOW + Improve WINDOW function documentation (Bruce Momjian, Florian Pflug) @@ -12465,7 +14530,7 @@ ALTER EXTENSION hstore UPDATE; Add instructions for setting - up the documentation tool chain on macOS + up the documentation tool chain on macOS (Peter Eisentraut) @@ -12473,7 +14538,7 @@ ALTER EXTENSION hstore UPDATE; Improve commit_delay + linkend="guc-commit-delay">commit_delay documentation (Peter Geoghegan) diff --git a/doc/src/sgml/release-9.4.sgml b/doc/src/sgml/release-9.4.sgml index c616c1a514..50442e98b4 100644 --- a/doc/src/sgml/release-9.4.sgml +++ b/doc/src/sgml/release-9.4.sgml @@ -1,6 +1,2317 @@ + + Release 9.4.20 + + + Release date: + 2018-11-08 + + + + This release contains a variety of fixes from 9.4.19. + For information about new features in the 9.4 major release, see + . + + + + Migration to Version 9.4.20 + + + A dump/restore is not required for those running 9.4.X. + + + + However, if you are upgrading from a version earlier than 9.4.18, + see . + + + + + Changes + + + + + + Fix corner-case failures + in has_foo_privilege() + family of functions (Tom Lane) + + + + Return NULL rather than throwing an error when an invalid object OID + is provided. Some of these functions got that right already, but not + all. has_column_privilege() was additionally + capable of crashing on some platforms. + + + + + + Avoid O(N^2) slowdown in regular expression match/split functions on + long strings (Andrew Gierth) + + + + + + Avoid O(N^3) slowdown in lexer for long strings + of + or - characters + (Andrew Gierth) + + + + + + Fix mis-execution of SubPlans when the outer query is being scanned + backwards (Andrew Gierth) + + + + + + Fix failure of UPDATE/DELETE ... WHERE CURRENT OF ... + after rewinding the referenced cursor (Tom Lane) + + + + A cursor that scans multiple relations (particularly an inheritance + tree) could produce wrong behavior if rewound to an earlier relation. + + + + + + Fix EvalPlanQual to handle conditionally-executed + InitPlans properly (Andrew Gierth, Tom Lane) + + + + This resulted in hard-to-reproduce crashes or wrong answers in + concurrent updates, if they contained code such as an uncorrelated + sub-SELECT inside a CASE + construct. + + + + + + Fix character-class checks to not fail on Windows for Unicode + characters above U+FFFF (Tom Lane, Kenji Uno) + + + + This bug affected full-text-search operations, as well + as contrib/ltree + and contrib/pg_trgm. + + + + + + Ensure that sequences owned by a foreign table are processed + by ALTER OWNER on the table (Peter Eisentraut) + + + + The ownership change should propagate to such sequences as well, but + this was missed for foreign tables. + + + + + + Fix over-allocation of space for array_out()'s + result string (Keiichi Hirobe) + + + + + + Fix memory leak in repeated SP-GiST index scans (Tom Lane) + + + + This is only known to amount to anything significant in cases where + an exclusion constraint using SP-GiST receives many new index entries + in a single command. + + + + + + Ensure that ApplyLogicalMappingFile() closes the + mapping file when done with it (Tomas Vondra) + + + + Previously, the file descriptor was leaked, eventually resulting in + failures during logical decoding. + + + + + + Fix logical decoding to handle cases where a mapped catalog table is + repeatedly rewritten, e.g. by VACUUM FULL + (Andres Freund) + + + + + + Prevent starting the server with wal_level set + to too low a value to support an existing replication slot (Andres + Freund) + + + + + + Avoid crash if a utility command causes infinite recursion (Tom Lane) + + + + + + When initializing a hot standby, cope with duplicate XIDs caused by + two-phase transactions on the master + (Michael Paquier, Konstantin Knizhnik) + + + + + + Randomize the random() seed in bootstrap and + standalone backends, and in initdb + (Noah Misch) + + + + The main practical effect of this change is that it avoids a scenario + where initdb might mistakenly conclude that + POSIX shared memory is not available, due to name collisions caused by + always using the same random seed. + + + + + + Allow DSM allocation to be interrupted (Chris Travers) + + + + + + Avoid possible buffer overrun when replaying GIN page recompression + from WAL (Alexander Korotkov, Sivasubramanian Ramasubramanian) + + + + + + Fix missed fsync of a replication slot's directory (Konstantin + Knizhnik, Michael Paquier) + + + + + + Fix unexpected timeouts when + using wal_sender_timeout on a slow server + (Noah Misch) + + + + + + Ensure that hot standby processes use the correct WAL consistency + point (Alexander Kukushkin, Michael Paquier) + + + + This prevents possible misbehavior just after a standby server has + reached a consistent database state during WAL replay. + + + + + + Don't run atexit callbacks when servicing SIGQUIT + (Heikki Linnakangas) + + + + + + Don't record foreign-server user mappings as members of extensions + (Tom Lane) + + + + If CREATE USER MAPPING is executed in an extension + script, an extension dependency was created for the user mapping, + which is unexpected. Roles can't be extension members, so user + mappings shouldn't be either. + + + + + + Make syslogger more robust against failures in opening CSV log files + (Tom Lane) + + + + + + Fix possible inconsistency in pg_dump's + sorting of dissimilar object names (Jacob Champion) + + + + + + Ensure that pg_restore will schema-qualify + the table name when + emitting DISABLE/ENABLE TRIGGER + commands (Tom Lane) + + + + This avoids failures due to the new policy of running restores with + restrictive search path. + + + + + + Fix pg_upgrade to handle event triggers in + extensions correctly (Haribabu Kommi) + + + + pg_upgrade failed to preserve an event + trigger's extension-membership status. + + + + + + Fix pg_upgrade's cluster state check to + work correctly on a standby server (Bruce Momjian) + + + + + + Enforce type cube's dimension limit in + all contrib/cube functions (Andrey Borodin) + + + + Previously, some cube-related functions could construct values that + would be rejected by cube_in(), leading to + dump/reload failures. + + + + + + Fix contrib/unaccent's + unaccent() function to use + the unaccent text search dictionary that is in the + same schema as the function (Tom Lane) + + + + Previously it tried to look up the dictionary using the search path, + which could fail if the search path has a restrictive value. + + + + + + Fix build problems on macOS 10.14 (Mojave) (Tom Lane) + + + + Adjust configure to add + an switch to CPPFLAGS; + without this, PL/Perl and PL/Tcl fail to configure or build on macOS + 10.14. The specific sysroot used can be overridden at configure time + or build time by setting the PG_SYSROOT variable in + the arguments of configure + or make. + + + + It is now recommended that Perl-related extensions + write $(perl_includespec) rather + than -I$(perl_archlibexp)/CORE in their compiler + flags. The latter continues to work on most platforms, but not recent + macOS. + + + + Also, it should no longer be necessary to + specify manually to get PL/Tcl to + build on recent macOS releases. + + + + + + Fix MSVC build and regression-test scripts to work on recent Perl + versions (Andrew Dunstan) + + + + Perl no longer includes the current directory in its search path + by default; work around that. + + + + + + Support building on Windows with Visual Studio 2015 or Visual Studio 2017 + (Michael Paquier, Haribabu Kommi) + + + + + + Allow btree comparison functions to return INT_MIN + (Tom Lane) + + + + Up to now, we've forbidden datatype-specific comparison functions from + returning INT_MIN, which allows callers to invert + the sort order just by negating the comparison result. However, this + was never safe for comparison functions that directly return the + result of memcmp(), strcmp(), + etc, as POSIX doesn't place any such restriction on those functions. + At least some recent versions of memcmp() can + return INT_MIN, causing incorrect sort ordering. + Hence, we've removed this restriction. Callers must now use + the INVERT_COMPARE_RESULT() macro if they wish to + invert the sort order. + + + + + + Fix recursion hazard in shared-invalidation message processing + (Tom Lane) + + + + This error could, for example, result in failure to access a system + catalog or index that had just been processed by VACUUM + FULL. + + + + This change adds a new result code + for LockAcquire, which might possibly affect + external callers of that function, though only very unusual usage + patterns would have an issue with it. The API + of LockAcquireExtended is also changed. + + + + + + Save and restore SPI's global variables + during SPI_connect() + and SPI_finish() (Chapman Flack, Tom Lane) + + + + This prevents possible interference when one SPI-using function calls + another. + + + + + + Provide ALLOCSET_DEFAULT_SIZES and sibling macros + in back branches (Tom Lane) + + + + These macros have existed since 9.6, but there were requests to add + them to older branches to allow extensions to rely on them without + branch-specific coding. + + + + + + Avoid using potentially-under-aligned page buffers (Tom Lane) + + + + Invent new union types PGAlignedBlock + and PGAlignedXLogBlock, and use these in place of plain + char arrays, ensuring that the compiler can't place the buffer at a + misaligned start address. This fixes potential core dumps on + alignment-picky platforms, and may improve performance even on + platforms that allow misalignment. + + + + + + Make src/port/snprintf.c follow the C99 + standard's definition of snprintf()'s result + value (Tom Lane) + + + + On platforms where this code is used (mostly Windows), its pre-C99 + behavior could lead to failure to detect buffer overrun, if the + calling code assumed C99 semantics. + + + + + + When building on i386 with the clang + compiler, require to be used (Andres Freund) + + + + This avoids problems with missed floating point overflow checks. + + + + + + Fix configure's detection of the result + type of strerror_r() (Tom Lane) + + + + The previous coding got the wrong answer when building + with icc on Linux (and perhaps in other + cases), leading to libpq not returning + useful error messages for system-reported errors. + + + + + + Update time zone data files to tzdata + release 2018g for DST law changes in Chile, Fiji, Morocco, and Russia + (Volgograd), plus historical corrections for China, Hawaii, Japan, + Macau, and North Korea. + + + + + + + + + + Release 9.4.19 + + + Release date: + 2018-08-09 + + + + This release contains a variety of fixes from 9.4.18. + For information about new features in the 9.4 major release, see + . + + + + Migration to Version 9.4.19 + + + A dump/restore is not required for those running 9.4.X. + + + + However, if you are upgrading from a version earlier than 9.4.18, + see . + + + + + Changes + + + + + + Fix failure to reset libpq's state fully + between connection attempts (Tom Lane) + + + + An unprivileged user of dblink + or postgres_fdw could bypass the checks intended + to prevent use of server-side credentials, such as + a ~/.pgpass file owned by the operating-system + user running the server. Servers allowing peer authentication on + local connections are particularly vulnerable. Other attacks such + as SQL injection into a postgres_fdw session + are also possible. + Attacking postgres_fdw in this way requires the + ability to create a foreign server object with selected connection + parameters, but any user with access to dblink + could exploit the problem. + In general, an attacker with the ability to select the connection + parameters for a libpq-using application + could cause mischief, though other plausible attack scenarios are + harder to think of. + Our thanks to Andrew Krasichkov for reporting this issue. + (CVE-2018-10915) + + + + + + Ensure that updates to the relfrozenxid + and relminmxid values + for nailed system catalogs are processed in a timely + fashion (Andres Freund) + + + + Overoptimistic caching rules could prevent these updates from being + seen by other sessions, leading to spurious errors and/or data + corruption. The problem was significantly worse for shared catalogs, + such as pg_authid, because the stale cache + data could persist into new sessions as well as existing ones. + + + + + + Fix case where a freshly-promoted standby crashes before having + completed its first post-recovery checkpoint (Michael Paquier, Kyotaro + Horiguchi, Pavan Deolasee, Álvaro Herrera) + + + + This led to a situation where the server did not think it had reached + a consistent database state during subsequent WAL replay, preventing + restart. + + + + + + Avoid emitting a bogus WAL record when recycling an all-zero btree + page (Amit Kapila) + + + + This mistake has been seen to cause assertion failures, and + potentially it could result in unnecessary query cancellations on hot + standby servers. + + + + + + Improve performance of WAL replay for transactions that drop many + relations (Fujii Masao) + + + + This change reduces the number of times that shared buffers are + scanned, so that it is of most benefit when that setting is large. + + + + + + Improve performance of lock releasing in standby server WAL replay + (Thomas Munro) + + + + + + Make logical WAL senders report streaming state correctly (Simon + Riggs, Sawada Masahiko) + + + + The code previously mis-detected whether or not it had caught up with + the upstream server. + + + + + + Fix bugs in snapshot handling during logical decoding, allowing wrong + decoding results in rare cases (Arseny Sher, Álvaro Herrera) + + + + + + Ensure a table's cached index list is correctly rebuilt after an index + creation fails partway through (Peter Geoghegan) + + + + Previously, the failed index's OID could remain in the list, causing + problems later in the same session. + + + + + + Fix mishandling of empty uncompressed posting list pages in GIN + indexes (Sivasubramanian Ramasubramanian, Alexander Korotkov) + + + + This could result in an assertion failure after pg_upgrade of a + pre-9.4 GIN index (9.4 and later will not create such pages). + + + + + + Ensure that VACUUM will respond to signals + within btree page deletion loops (Andres Freund) + + + + Corrupted btree indexes could result in an infinite loop here, and + that previously wasn't interruptible without forcing a crash. + + + + + + Fix misoptimization of equivalence classes involving composite-type + columns (Tom Lane) + + + + This resulted in failure to recognize that an index on a composite + column could provide the sort order needed for a mergejoin on that + column. + + + + + + Fix SQL-standard FETCH FIRST syntax to allow + parameters ($n), as the + standard expects (Andrew Gierth) + + + + + + Fix failure to schema-qualify some object names + in getObjectDescription output + (Kyotaro Horiguchi, Tom Lane) + + + + Names of collations, conversions, and text search objects + were not schema-qualified when they should be. + + + + + + Widen COPY FROM's current-line-number counter + from 32 to 64 bits (David Rowley) + + + + This avoids two problems with input exceeding 4G lines: COPY + FROM WITH HEADER would drop a line every 4G lines, not only + the first line, and error reports could show a wrong line number. + + + + + + Add a string freeing function + to ecpg's pgtypes + library, so that cross-module memory management problems can be + avoided on Windows (Takayuki Tsunakawa) + + + + On Windows, crashes can ensue if the free call + for a given chunk of memory is not made from the same DLL + that malloc'ed the memory. + The pgtypes library sometimes returns strings + that it expects the caller to free, making it impossible to follow + this rule. Add a PGTYPESchar_free() function + that just wraps free, allowing applications + to follow this rule. + + + + + + Fix ecpg's support for long + long variables on Windows, as well as other platforms that + declare strtoll/strtoull + nonstandardly or not at all (Dang Minh Huong, Tom Lane) + + + + + + Fix misidentification of SQL statement type in PL/pgSQL, when a rule + change causes a change in the semantics of a statement intra-session + (Tom Lane) + + + + This error led to assertion failures, or in rare cases, failure to + enforce the INTO STRICT option as expected. + + + + + + Fix password prompting in client programs so that echo is properly + disabled on Windows when stdin is not the + terminal (Matthew Stickney) + + + + + + Further fix mis-quoting of values for list-valued GUC variables in + dumps (Tom Lane) + + + + The previous fix for quoting of search_path and + other list-valued variables in pg_dump + output turned out to misbehave for empty-string list elements, and it + risked truncation of long file paths. + + + + + + Fix pg_dump's failure to + dump REPLICA IDENTITY properties for constraint + indexes (Tom Lane) + + + + Manually created unique indexes were properly marked, but not those + created by declaring UNIQUE or PRIMARY + KEY constraints. + + + + + + Make pg_upgrade check that the old server + was shut down cleanly (Bruce Momjian) + + + + The previous check could be fooled by an immediate-mode shutdown. + + + + + + Fix crash in contrib/ltree's + lca() function when the input array is empty + (Pierre Ducroquet) + + + + + + Fix various error-handling code paths in which an incorrect error code + might be reported (Michael Paquier, Tom Lane, Magnus Hagander) + + + + + + Rearrange makefiles to ensure that programs link to freshly-built + libraries (such as libpq.so) rather than ones + that might exist in the system library directories (Tom Lane) + + + + This avoids problems when building on platforms that supply old copies + of PostgreSQL libraries. + + + + + + Update time zone data files to tzdata + release 2018e for DST law changes in North Korea, plus historical + corrections for Czechoslovakia. + + + + This update includes a redefinition of daylight savings + in Ireland, as well as for some past years in Namibia and + Czechoslovakia. In those jurisdictions, legally standard time is + observed in summer, and daylight savings time in winter, so that the + daylight savings offset is one hour behind standard time not one hour + ahead. This does not affect either the actual UTC offset or the + timezone abbreviations in use; the only known effect is that + the is_dst column in + the pg_timezone_names view will now be true + in winter and false in summer in these cases. + + + + + + + + + + Release 9.4.18 + + + Release date: + 2018-05-10 + + + + This release contains a variety of fixes from 9.4.17. + For information about new features in the 9.4 major release, see + . + + + + Migration to Version 9.4.18 + + + A dump/restore is not required for those running 9.4.X. + + + + However, if the function marking mistakes mentioned in the first + changelog entry below affect you, you will want to take steps to + correct your database catalogs. + + + + Also, if you are upgrading from a version earlier than 9.4.17, + see . + + + + + Changes + + + + + + Fix incorrect volatility markings on a few built-in functions + (Thomas Munro, Tom Lane) + + + + The functions + query_to_xml, + cursor_to_xml, + cursor_to_xmlschema, + query_to_xmlschema, and + query_to_xml_and_xmlschema + should be marked volatile because they execute user-supplied queries + that might contain volatile operations. They were not, leading to a + risk of incorrect query optimization. This has been repaired for new + installations by correcting the initial catalog data, but existing + installations will continue to contain the incorrect markings. + Practical use of these functions seems to pose little hazard, but in + case of trouble, it can be fixed by manually updating these + functions' pg_proc entries, for example + ALTER FUNCTION pg_catalog.query_to_xml(text, boolean, + boolean, text) VOLATILE. (Note that that will need to be + done in each database of the installation.) Another option is + to pg_upgrade the database to a version + containing the corrected initial data. + + + + + + Avoid re-using TOAST value OIDs that match dead-but-not-yet-vacuumed + TOAST entries (Pavan Deolasee) + + + + Once the OID counter has wrapped around, it's possible to assign a + TOAST value whose OID matches a previously deleted entry in the same + TOAST table. If that entry were not yet vacuumed away, this resulted + in unexpected chunk number 0 (expected 1) for toast + value nnnnn errors, which would + persist until the dead entry was removed + by VACUUM. Fix by not selecting such OIDs when + creating a new TOAST entry. + + + + + + Change ANALYZE's algorithm for updating + pg_class.reltuples + (David Gould) + + + + Previously, pages not actually scanned by ANALYZE + were assumed to retain their old tuple density. In a large table + where ANALYZE samples only a small fraction of the + pages, this meant that the overall tuple density estimate could not + change very much, so that reltuples would + change nearly proportionally to changes in the table's physical size + (relpages) regardless of what was actually + happening in the table. This has been observed to result + in reltuples becoming so much larger than + reality as to effectively shut off autovacuuming. To fix, assume + that ANALYZE's sample is a statistically unbiased + sample of the table (as it should be), and just extrapolate the + density observed within those pages to the whole table. + + + + + + Avoid deadlocks in concurrent CREATE INDEX + CONCURRENTLY commands that are run + under SERIALIZABLE or REPEATABLE + READ transaction isolation (Tom Lane) + + + + + + Fix possible slow execution of REFRESH MATERIALIZED VIEW + CONCURRENTLY (Thomas Munro) + + + + + + Fix UPDATE/DELETE ... WHERE CURRENT OF to not fail + when the referenced cursor uses an index-only-scan plan (Yugo Nagata, + Tom Lane) + + + + + + Fix incorrect planning of join clauses pushed into parameterized + paths (Andrew Gierth, Tom Lane) + + + + This error could result in misclassifying a condition as + a join filter for an outer join when it should be a + plain filter condition, leading to incorrect join + output. + + + + + + Fix misoptimization of CHECK constraints having + provably-NULL subclauses of + top-level AND/OR conditions + (Tom Lane, Dean Rasheed) + + + + This could, for example, allow constraint exclusion to exclude a + child table that should not be excluded from a query. + + + + + + Avoid failure if a query-cancel or session-termination interrupt + occurs while committing a prepared transaction (Stas Kelvich) + + + + + + Fix query-lifespan memory leakage in repeatedly executed hash joins + (Tom Lane) + + + + + + Fix overly strict sanity check + in heap_prepare_freeze_tuple + (Álvaro Herrera) + + + + This could result in incorrect cannot freeze committed + xmax failures in databases that have + been pg_upgrade'd from 9.2 or earlier. + + + + + + Prevent dangling-pointer dereference when a C-coded before-update row + trigger returns the old tuple (Rushabh Lathia) + + + + + + Reduce locking during autovacuum worker scheduling (Jeff Janes) + + + + The previous behavior caused drastic loss of potential worker + concurrency in databases with many tables. + + + + + + Ensure client hostname is copied while copying + pg_stat_activity data to local memory + (Edmund Horner) + + + + Previously the supposedly-local snapshot contained a pointer into + shared memory, allowing the client hostname column to change + unexpectedly if any existing session disconnected. + + + + + + Fix incorrect processing of multiple compound affixes + in ispell dictionaries (Arthur Zakirov) + + + + + + Fix collation-aware searches (that is, indexscans using inequality + operators) in SP-GiST indexes on text columns (Tom Lane) + + + + Such searches would return the wrong set of rows in most non-C + locales. + + + + + + Count the number of index tuples correctly during initial build of an + SP-GiST index (Tomas Vondra) + + + + Previously, the tuple count was reported to be the same as that of + the underlying table, which is wrong if the index is partial. + + + + + + Count the number of index tuples correctly during vacuuming of a + GiST index (Andrey Borodin) + + + + Previously it reported the estimated number of heap tuples, + which might be inaccurate, and is certainly wrong if the + index is partial. + + + + + + Fix a corner case where a streaming standby gets stuck at a WAL + continuation record (Kyotaro Horiguchi) + + + + + + In logical decoding, avoid possible double processing of WAL data + when a walsender restarts (Craig Ringer) + + + + + + Allow scalarltsel + and scalargtsel to be used on non-core datatypes + (Tomas Vondra) + + + + + + Reduce libpq's memory consumption when a + server error is reported after a large amount of query output has + been collected (Tom Lane) + + + + Discard the previous output before, not after, processing the error + message. On some platforms, notably Linux, this can make a + difference in the application's subsequent memory footprint. + + + + + + Fix double-free crashes in ecpg + (Patrick Krecker, Jeevan Ladhe) + + + + + + Fix ecpg to handle long long + int variables correctly in MSVC builds (Michael Meskes, + Andrew Gierth) + + + + + + Fix mis-quoting of values for list-valued GUC variables in dumps + (Michael Paquier, Tom Lane) + + + + The local_preload_libraries, + session_preload_libraries, + shared_preload_libraries, + and temp_tablespaces variables were not correctly + quoted in pg_dump output. This would + cause problems if settings for these variables appeared in + CREATE FUNCTION ... SET or ALTER + DATABASE/ROLE ... SET clauses. + + + + + + Fix pg_recvlogical to not fail against + pre-v10 PostgreSQL servers + (Michael Paquier) + + + + A previous fix caused pg_recvlogical to + issue a command regardless of server version, but it should only be + issued to v10 and later servers. + + + + + + Fix overflow handling in PL/pgSQL + integer FOR loops (Tom Lane) + + + + The previous coding failed to detect overflow of the loop variable + on some non-gcc compilers, leading to an infinite loop. + + + + + + Adjust PL/Python regression tests to pass + under Python 3.7 (Peter Eisentraut) + + + + + + Support testing PL/Python and related + modules when building with Python 3 and MSVC (Andrew Dunstan) + + + + + + Rename internal b64_encode + and b64_decode functions to avoid conflict with + Solaris 11.4 built-in functions (Rainer Orth) + + + + + + Sync our copy of the timezone library with IANA tzcode release 2018e + (Tom Lane) + + + + This fixes the zic timezone data compiler + to cope with negative daylight-savings offsets. While + the PostgreSQL project will not + immediately ship such timezone data, zic + might be used with timezone data obtained directly from IANA, so it + seems prudent to update zic now. + + + + + + Update time zone data files to tzdata + release 2018d for DST law changes in Palestine and Antarctica (Casey + Station), plus historical corrections for Portugal and its colonies, + as well as Enderbury, Jamaica, Turks & Caicos Islands, and + Uruguay. + + + + + + + + + + Release 9.4.17 + + + Release date: + 2018-03-01 + + + + This release contains a variety of fixes from 9.4.16. + For information about new features in the 9.4 major release, see + . + + + + Migration to Version 9.4.17 + + + A dump/restore is not required for those running 9.4.X. + + + + However, if you run an installation in which not all users are mutually + trusting, or if you maintain an application or extension that is + intended for use in arbitrary situations, it is strongly recommended + that you read the documentation changes described in the first changelog + entry below, and take suitable steps to ensure that your installation or + code is secure. + + + + Also, the changes described in the second changelog entry below may + cause functions used in index expressions or materialized views to fail + during auto-analyze, or when reloading from a dump. After upgrading, + monitor the server logs for such problems, and fix affected functions. + + + + Also, if you are upgrading from a version earlier than 9.4.13, + see . + + + + + Changes + + + + + + Document how to configure installations and applications to guard + against search-path-dependent trojan-horse attacks from other users + (Noah Misch) + + + + Using a search_path setting that includes any + schemas writable by a hostile user enables that user to capture + control of queries and then run arbitrary SQL code with the + permissions of the attacked user. While it is possible to write + queries that are proof against such hijacking, it is notationally + tedious, and it's very easy to overlook holes. Therefore, we now + recommend configurations in which no untrusted schemas appear in + one's search path. Relevant documentation appears in + (for database administrators and users), + (for application authors), + (for extension authors), and + (for authors + of SECURITY DEFINER functions). + (CVE-2018-1058) + + + + + + Avoid use of insecure search_path settings + in pg_dump and other client programs + (Noah Misch, Tom Lane) + + + + pg_dump, + pg_upgrade, + vacuumdb and + other PostgreSQL-provided applications were + themselves vulnerable to the type of hijacking described in the previous + changelog entry; since these applications are commonly run by + superusers, they present particularly attractive targets. To make them + secure whether or not the installation as a whole has been secured, + modify them to include only the pg_catalog + schema in their search_path settings. + Autovacuum worker processes now do the same, as well. + + + + In cases where user-provided functions are indirectly executed by + these programs — for example, user-provided functions in index + expressions — the tighter search_path may + result in errors, which will need to be corrected by adjusting those + user-provided functions to not assume anything about what search path + they are invoked under. That has always been good practice, but now + it will be necessary for correct behavior. + (CVE-2018-1058) + + + + + + Fix misbehavior of concurrent-update rechecks with CTE references + appearing in subplans (Tom Lane) + + + + If a CTE (WITH clause reference) is used in an + InitPlan or SubPlan, and the query requires a recheck due to trying + to update or lock a concurrently-updated row, incorrect results could + be obtained. + + + + + + Fix planner failures with overlapping mergejoin clauses in an outer + join (Tom Lane) + + + + These mistakes led to left and right pathkeys do not match in + mergejoin or outer pathkeys do not match + mergeclauses planner errors in corner cases. + + + + + + Repair pg_upgrade's failure to + preserve relfrozenxid for materialized + views (Tom Lane, Andres Freund) + + + + This oversight could lead to data corruption in materialized views + after an upgrade, manifesting as could not access status of + transaction or found xmin from before + relfrozenxid errors. The problem would be more likely to + occur in seldom-refreshed materialized views, or ones that were + maintained only with REFRESH MATERIALIZED VIEW + CONCURRENTLY. + + + + If such corruption is observed, it can be repaired by refreshing the + materialized view (without CONCURRENTLY). + + + + + + Fix incorrect reporting of PL/Python function names in + error CONTEXT stacks (Tom Lane) + + + + An error occurring within a nested PL/Python function call (that is, + one reached via a SPI query from another PL/Python function) would + result in a stack trace showing the inner function's name twice, + rather than the expected results. Also, an error in a nested + PL/Python DO block could result in a null pointer + dereference crash on some platforms. + + + + + + Allow contrib/auto_explain's + log_min_duration setting to range up + to INT_MAX, or about 24 days instead of 35 minutes + (Tom Lane) + + + + + + + + + + Release 9.4.16 + + + Release date: + 2018-02-08 + + + + This release contains a variety of fixes from 9.4.15. + For information about new features in the 9.4 major release, see + . + + + + Migration to Version 9.4.16 + + + A dump/restore is not required for those running 9.4.X. + + + + However, if you are upgrading from a version earlier than 9.4.13, + see . + + + + + Changes + + + + + + Ensure that all temporary files made + by pg_upgrade are non-world-readable + (Tom Lane, Noah Misch) + + + + pg_upgrade normally restricts its + temporary files to be readable and writable only by the calling user. + But the temporary file containing pg_dumpall -g + output would be group- or world-readable, or even writable, if the + user's umask setting allows. In typical usage on + multi-user machines, the umask and/or the working + directory's permissions would be tight enough to prevent problems; + but there may be people using pg_upgrade + in scenarios where this oversight would permit disclosure of database + passwords to unfriendly eyes. + (CVE-2018-1053) + + + + + + Fix vacuuming of tuples that were updated while key-share locked + (Andres Freund, Álvaro Herrera) + + + + In some cases VACUUM would fail to remove such + tuples even though they are now dead, leading to assorted data + corruption scenarios. + + + + + + Fix inadequate buffer locking in some LSN fetches (Jacob Champion, + Asim Praveen, Ashwin Agrawal) + + + + These errors could result in misbehavior under concurrent load. + The potential consequences have not been characterized fully. + + + + + + Avoid unnecessary failure in a query on an inheritance tree that + occurs concurrently with some child table being removed from the tree + by ALTER TABLE NO INHERIT (Tom Lane) + + + + + + Fix spurious deadlock failures when multiple sessions are + running CREATE INDEX CONCURRENTLY (Jeff Janes) + + + + + + Repair failure with correlated sub-SELECT + inside VALUES inside a LATERAL + subquery (Tom Lane) + + + + + + Fix could not devise a query plan for the given query + planner failure for some cases involving nested UNION + ALL inside a lateral subquery (Tom Lane) + + + + + + Fix logical decoding to correctly clean up disk files for crashed + transactions (Atsushi Torikoshi) + + + + Logical decoding may spill WAL records to disk for transactions + generating many WAL records. Normally these files are cleaned up + after the transaction's commit or abort record arrives; but if + no such record is ever seen, the removal code misbehaved. + + + + + + Fix walsender timeout failure and failure to respond to interrupts + when processing a large transaction (Petr Jelinek) + + + + + + Fix has_sequence_privilege() to + support WITH GRANT OPTION tests, + as other privilege-testing functions do (Joe Conway) + + + + + + In databases using UTF8 encoding, ignore any XML declaration that + asserts a different encoding (Pavel Stehule, Noah Misch) + + + + We always store XML strings in the database encoding, so allowing + libxml to act on a declaration of another encoding gave wrong results. + In encodings other than UTF8, we don't promise to support non-ASCII + XML data anyway, so retain the previous behavior for bug compatibility. + This change affects only xpath() and related + functions; other XML code paths already acted this way. + + + + + + Provide for forward compatibility with future minor protocol versions + (Robert Haas, Badrul Chowdhury) + + + + Up to now, PostgreSQL servers simply + rejected requests to use protocol versions newer than 3.0, so that + there was no functional difference between the major and minor parts + of the protocol version number. Allow clients to request versions 3.x + without failing, sending back a message showing that the server only + understands 3.0. This makes no difference at the moment, but + back-patching this change should allow speedier introduction of future + minor protocol upgrades. + + + + + + Cope with failure to start a parallel worker process + (Amit Kapila, Robert Haas) + + + + Parallel query previously tended to hang indefinitely if a worker + could not be started, as the result of fork() + failure or other low-probability problems. + + + + + + Prevent stack-overflow crashes when planning extremely deeply + nested set operations + (UNION/INTERSECT/EXCEPT) + (Tom Lane) + + + + + + Fix null-pointer crashes for some types of LDAP URLs appearing + in pg_hba.conf (Thomas Munro) + + + + + + Fix sample INSTR() functions in the PL/pgSQL + documentation (Yugo Nagata, Tom Lane) + + + + These functions are stated to + be Oracle compatible, but + they weren't exactly. In particular, there was a discrepancy in the + interpretation of a negative third parameter: Oracle thinks that a + negative value indicates the last place where the target substring can + begin, whereas our functions took it as the last place where the + target can end. Also, Oracle throws an error for a zero or negative + fourth parameter, whereas our functions returned zero. + + + + The sample code has been adjusted to match Oracle's behavior more + precisely. Users who have copied this code into their applications + may wish to update their copies. + + + + + + Fix pg_dump to make ACL (permissions), + comment, and security label entries reliably identifiable in archive + output formats (Tom Lane) + + + + The tag portion of an ACL archive entry was usually + just the name of the associated object. Make it start with the object + type instead, bringing ACLs into line with the convention already used + for comment and security label archive entries. Also, fix the + comment and security label entries for the whole database, if present, + to make their tags start with DATABASE so that they + also follow this convention. This prevents false matches in code that + tries to identify large-object-related entries by seeing if the tag + starts with LARGE OBJECT. That could have resulted + in misclassifying entries as data rather than schema, with undesirable + results in a schema-only or data-only dump. + + + + Note that this change has user-visible results in the output + of pg_restore --list. + + + + + + In ecpg, detect indicator arrays that do + not have the correct length and report an error (David Rader) + + + + + + Avoid triggering a libc assertion + in contrib/hstore, due to use + of memcpy() with equal source and destination + pointers (Tomas Vondra) + + + + + + Provide modern examples of how to auto-start Postgres on macOS + (Tom Lane) + + + + The scripts in contrib/start-scripts/osx use + infrastructure that's been deprecated for over a decade, and which no + longer works at all in macOS releases of the last couple of years. + Add a new subdirectory contrib/start-scripts/macos + containing scripts that use the newer launchd + infrastructure. + + + + + + Fix incorrect selection of configuration-specific libraries for + OpenSSL on Windows (Andrew Dunstan) + + + + + + Support linking to MinGW-built versions of libperl (Noah Misch) + + + + This allows building PL/Perl with some common Perl distributions for + Windows. + + + + + + Fix MSVC build to test whether 32-bit libperl + needs -D_USE_32BIT_TIME_T (Noah Misch) + + + + Available Perl distributions are inconsistent about what they expect, + and lack any reliable means of reporting it, so resort to a build-time + test on what the library being used actually does. + + + + + + On Windows, install the crash dump handler earlier in postmaster + startup (Takayuki Tsunakawa) + + + + This may allow collection of a core dump for some early-startup + failures that did not produce a dump before. + + + + + + On Windows, avoid encoding-conversion-related crashes when emitting + messages very early in postmaster startup (Takayuki Tsunakawa) + + + + + + Use our existing Motorola 68K spinlock code on OpenBSD as + well as NetBSD (David Carlier) + + + + + + Add support for spinlocks on Motorola 88K (David Carlier) + + + + + + Update time zone data files to tzdata + release 2018c for DST law changes in Brazil, Sao Tome and Principe, + plus historical corrections for Bolivia, Japan, and South Sudan. + The US/Pacific-New zone has been removed (it was + only an alias for America/Los_Angeles anyway). + + + + + + + + + + Release 9.4.15 + + + Release date: + 2017-11-09 + + + + This release contains a variety of fixes from 9.4.14. + For information about new features in the 9.4 major release, see + . + + + + Migration to Version 9.4.15 + + + A dump/restore is not required for those running 9.4.X. + + + + However, if you are upgrading from a version earlier than 9.4.13, + see . + + + + + Changes + + + + + + Fix crash due to rowtype mismatch + in json{b}_populate_recordset() + (Michael Paquier, Tom Lane) + + + + These functions used the result rowtype specified in the FROM + ... AS clause without checking that it matched the actual + rowtype of the supplied tuple value. If it didn't, that would usually + result in a crash, though disclosure of server memory contents seems + possible as well. + (CVE-2017-15098) + + + + + + Fix sample server-start scripts to become $PGUSER + before opening $PGLOG (Noah Misch) + + + + Previously, the postmaster log file was opened while still running as + root. The database owner could therefore mount an attack against + another system user by making $PGLOG be a symbolic + link to some other file, which would then become corrupted by appending + log messages. + + + + By default, these scripts are not installed anywhere. Users who have + made use of them will need to manually recopy them, or apply the same + changes to their modified versions. If the + existing $PGLOG file is root-owned, it will need to + be removed or renamed out of the way before restarting the server with + the corrected script. + (CVE-2017-12172) + + + + + + Fix crash when logical decoding is invoked from a SPI-using function, + in particular any function written in a PL language + (Tom Lane) + + + + + + Fix json_build_array(), + json_build_object(), and their jsonb + equivalents to handle explicit VARIADIC arguments + correctly (Michael Paquier) + + + + + + Properly reject attempts to convert infinite float values to + type numeric (Tom Lane, KaiGai Kohei) + + + + Previously the behavior was platform-dependent. + + + + + + Fix corner-case crashes when columns have been added to the end of a + view (Tom Lane) + + + + + + Record proper dependencies when a view or rule + contains FieldSelect + or FieldStore expression nodes (Tom Lane) + + + + Lack of these dependencies could allow a column or data + type DROP to go through when it ought to fail, + thereby causing later uses of the view or rule to get errors. + This patch does not do anything to protect existing views/rules, + only ones created in the future. + + + + + + Correctly detect hashability of range data types (Tom Lane) + + + + The planner mistakenly assumed that any range type could be hashed + for use in hash joins or hash aggregation, but actually it must check + whether the range's subtype has hash support. This does not affect any + of the built-in range types, since they're all hashable anyway. + + + + + + Fix low-probability loss of NOTIFY messages due to + XID wraparound (Marko Tiikkaja, Tom Lane) + + + + If a session executed no queries, but merely listened for + notifications, for more than 2 billion transactions, it started to miss + some notifications from concurrently-committing transactions. + + + + + + Avoid SIGBUS crash on Linux when a DSM memory + request exceeds the space available in tmpfs + (Thomas Munro) + + + + + + Prevent low-probability crash in processing of nested trigger firings + (Tom Lane) + + + + + + Allow COPY's FREEZE option to + work when the transaction isolation level is REPEATABLE + READ or higher (Noah Misch) + + + + This case was unintentionally broken by a previous bug fix. + + + + + + Correctly restore the umask setting when file creation fails + in COPY or lo_export() + (Peter Eisentraut) + + + + + + Give a better error message for duplicate column names + in ANALYZE (Nathan Bossart) + + + + + + Fix mis-parsing of the last line in a + non-newline-terminated pg_hba.conf file + (Tom Lane) + + + + + + Fix libpq to not require user's home + directory to exist (Tom Lane) + + + + In v10, failure to find the home directory while trying to + read ~/.pgpass was treated as a hard error, + but it should just cause that file to not be found. Both v10 and + previous release branches made the same mistake when + reading ~/.pg_service.conf, though this was less + obvious since that file is not sought unless a service name is + specified. + + + + + + Fix libpq to guard against integer + overflow in the row count of a PGresult + (Michael Paquier) + + + + + + Fix ecpg's handling of out-of-scope cursor + declarations with pointer or array variables (Michael Meskes) + + + + + + In ecpglib, correctly handle backslashes in string literals depending + on whether standard_conforming_strings is set + (Tsunakawa Takayuki) + + + + + + Make ecpglib's Informix-compatibility mode ignore fractional digits in + integer input strings, as expected (Gao Zengqi, Michael Meskes) + + + + + + Sync our copy of the timezone library with IANA release tzcode2017c + (Tom Lane) + + + + This fixes various issues; the only one likely to be user-visible + is that the default DST rules for a POSIX-style zone name, if + no posixrules file exists in the timezone data + directory, now match current US law rather than what it was a dozen + years ago. + + + + + + Update time zone data files to tzdata + release 2017c for DST law changes in Fiji, Namibia, Northern Cyprus, + Sudan, Tonga, and Turks & Caicos Islands, plus historical + corrections for Alaska, Apia, Burma, Calcutta, Detroit, Ireland, + Namibia, and Pago Pago. + + + + + + + + + + Release 9.4.14 + + + Release date: + 2017-08-31 + + + + This release contains a small number of fixes from 9.4.13. + For information about new features in the 9.4 major release, see + . + + + + Migration to Version 9.4.14 + + + A dump/restore is not required for those running 9.4.X. + + + + However, if you are upgrading from a version earlier than 9.4.13, + see . + + + + + Changes + + + + + + + Fix failure of walsender processes to respond to shutdown signals + (Marco Nenciarini) + + + + A missed flag update resulted in walsenders continuing to run as long + as they had a standby server connected, preventing primary-server + shutdown unless immediate shutdown mode is used. + + + + + + Show foreign tables + in information_schema.table_privileges + view (Peter Eisentraut) + + + + All other relevant information_schema views include + foreign tables, but this one ignored them. + + + + Since this view definition is installed by initdb, + merely upgrading will not fix the problem. If you need to fix this + in an existing installation, you can, as a superuser, do this + in psql: + +SET search_path TO information_schema; +CREATE OR REPLACE VIEW table_privileges AS + SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, + CAST(grantee.rolname AS sql_identifier) AS grantee, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(c.prtype AS character_data) AS privilege_type, + CAST( + CASE WHEN + -- object owner always has grant options + pg_has_role(grantee.oid, c.relowner, 'USAGE') + OR c.grantable + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable, + CAST(CASE WHEN c.prtype = 'SELECT' THEN 'YES' ELSE 'NO' END AS yes_or_no) AS with_hierarchy + + FROM ( + SELECT oid, relname, relnamespace, relkind, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* FROM pg_class + ) AS c (oid, relname, relnamespace, relkind, relowner, grantor, grantee, prtype, grantable), + pg_namespace nc, + pg_authid u_grantor, + ( + SELECT oid, rolname FROM pg_authid + UNION ALL + SELECT 0::oid, 'PUBLIC' + ) AS grantee (oid, rolname) + + WHERE c.relnamespace = nc.oid + AND c.relkind IN ('r', 'v', 'f') + AND c.grantee = grantee.oid + AND c.grantor = u_grantor.oid + AND c.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER') + AND (pg_has_role(u_grantor.oid, 'USAGE') + OR pg_has_role(grantee.oid, 'USAGE') + OR grantee.rolname = 'PUBLIC'); + + This must be repeated in each database to be fixed, + including template0. + + + + + + Clean up handling of a fatal exit (e.g., due to receipt + of SIGTERM) that occurs while trying to execute + a ROLLBACK of a failed transaction (Tom Lane) + + + + This situation could result in an assertion failure. In production + builds, the exit would still occur, but it would log an unexpected + message about cannot drop active portal. + + + + + + Remove assertion that could trigger during a fatal exit (Tom Lane) + + + + + + Correctly identify columns that are of a range type or domain type over + a composite type or domain type being searched for (Tom Lane) + + + + Certain ALTER commands that change the definition of a + composite type or domain type are supposed to fail if there are any + stored values of that type in the database, because they lack the + infrastructure needed to update or check such values. Previously, + these checks could miss relevant values that are wrapped inside range + types or sub-domains, possibly allowing the database to become + inconsistent. + + + + + + Fix crash in pg_restore when using parallel mode and + using a list file to select a subset of items to restore + (Fabrízio de Royes Mello) + + + + + + Change ecpg's parser to allow RETURNING + clauses without attached C variables (Michael Meskes) + + + + This allows ecpg programs to contain SQL constructs + that use RETURNING internally (for example, inside a CTE) + rather than using it to define values to be returned to the client. + + + + + + Improve selection of compiler flags for PL/Perl on Windows (Tom Lane) + + + + This fix avoids possible crashes of PL/Perl due to inconsistent + assumptions about the width of time_t values. + A side-effect that may be visible to extension developers is + that _USE_32BIT_TIME_T is no longer defined globally + in PostgreSQL Windows builds. This is not expected + to cause problems, because type time_t is not used + in any PostgreSQL API definitions. + + + + + + + + Release 9.4.13 @@ -12,7 +2323,7 @@ This release contains a variety of fixes from 9.4.12. For information about new features in the 9.4 major release, see - . + . @@ -29,7 +2340,7 @@ Also, if you are upgrading from a version earlier than 9.4.12, - see . + see . @@ -41,7 +2352,7 @@ Further restrict visibility - of pg_user_mappings.umoptions, to + of pg_user_mappings.umoptions, to protect passwords stored as user mapping options (Noah Misch) @@ -49,11 +2360,11 @@ The fix for CVE-2017-7486 was incorrect: it allowed a user to see the options in her own user mapping, even if she did not - have USAGE permission on the associated foreign server. + have USAGE permission on the associated foreign server. Such options might include a password that had been provided by the server owner rather than the user herself. - Since information_schema.user_mapping_options does not - show the options in such cases, pg_user_mappings + Since information_schema.user_mapping_options does not + show the options in such cases, pg_user_mappings should not either. (CVE-2017-7547) @@ -68,15 +2379,15 @@ Restart the postmaster after adding allow_system_table_mods - = true to postgresql.conf. (In versions - supporting ALTER SYSTEM, you can use that to make the + = true to postgresql.conf. (In versions + supporting ALTER SYSTEM, you can use that to make the configuration change, but you'll still need a restart.) - In each database of the cluster, + In each database of the cluster, run the following commands as superuser: SET search_path = pg_catalog; @@ -107,15 +2418,15 @@ CREATE OR REPLACE VIEW pg_user_mappings AS - Do not forget to include the template0 - and template1 databases, or the vulnerability will still - exist in databases you create later. To fix template0, + Do not forget to include the template0 + and template1 databases, or the vulnerability will still + exist in databases you create later. To fix template0, you'll need to temporarily make it accept connections. - In PostgreSQL 9.5 and later, you can use + In PostgreSQL 9.5 and later, you can use ALTER DATABASE template0 WITH ALLOW_CONNECTIONS true; - and then after fixing template0, undo that with + and then after fixing template0, undo that with ALTER DATABASE template0 WITH ALLOW_CONNECTIONS false; @@ -129,7 +2440,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Finally, remove the allow_system_table_mods configuration + Finally, remove the allow_system_table_mods configuration setting, and again restart the postmaster. @@ -143,16 +2454,16 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - libpq ignores empty password specifications, and does + libpq ignores empty password specifications, and does not transmit them to the server. So, if a user's password has been set to the empty string, it's impossible to log in with that password - via psql or other libpq-based + via psql or other libpq-based clients. An administrator might therefore believe that setting the password to empty is equivalent to disabling password login. - However, with a modified or non-libpq-based client, + However, with a modified or non-libpq-based client, logging in could be possible, depending on which authentication method is configured. In particular the most common - method, md5, accepted empty passwords. + method, md5, accepted empty passwords. Change the server to reject empty passwords in all cases. (CVE-2017-7546) @@ -160,13 +2471,13 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Make lo_put() check for UPDATE privilege on + Make lo_put() check for UPDATE privilege on the target large object (Tom Lane, Michael Paquier) - lo_put() should surely require the same permissions - as lowrite(), but the check was missing, allowing any + lo_put() should surely require the same permissions + as lowrite(), but the check was missing, allowing any user to change the data in a large object. (CVE-2017-7548) @@ -241,7 +2552,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix code for setting on + Fix code for setting on Solaris (Tom Lane) @@ -273,21 +2584,21 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Fix possible creation of an invalid WAL segment when a standby is - promoted just after it processes an XLOG_SWITCH WAL + promoted just after it processes an XLOG_SWITCH WAL record (Andres Freund) - Fix walsender to exit promptly when client requests + Fix walsender to exit promptly when client requests shutdown (Tom Lane) - Fix SIGHUP and SIGUSR1 handling in + Fix SIGHUP and SIGUSR1 handling in walsender processes (Petr Jelinek, Andres Freund) @@ -301,7 +2612,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix unnecessarily slow restarts of walreceiver + Fix unnecessarily slow restarts of walreceiver processes due to race condition in postmaster (Tom Lane) @@ -318,7 +2629,7 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 Logical decoding crashed on tuples that are wider than 64KB (after compression, but with all data in-line). The case arises only - when REPLICA IDENTITY FULL is enabled for a table + when REPLICA IDENTITY FULL is enabled for a table containing such tuples. @@ -366,7 +2677,7 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Fix cases where an INSERT or UPDATE assigns + Fix cases where an INSERT or UPDATE assigns to more than one element of a column that is of domain-over-array type (Tom Lane) @@ -374,7 +2685,7 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Allow window functions to be used in sub-SELECTs that + Allow window functions to be used in sub-SELECTs that are within the arguments of an aggregate function (Tom Lane) @@ -382,56 +2693,56 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 Move autogenerated array types out of the way during - ALTER ... RENAME (Vik Fearing) + ALTER ... RENAME (Vik Fearing) Previously, we would rename a conflicting autogenerated array type - out of the way during CREATE; this fix extends that + out of the way during CREATE; this fix extends that behavior to renaming operations. - Ensure that ALTER USER ... SET accepts all the syntax - variants that ALTER ROLE ... SET does (Peter Eisentraut) + Ensure that ALTER USER ... SET accepts all the syntax + variants that ALTER ROLE ... SET does (Peter Eisentraut) Properly update dependency info when changing a datatype I/O - function's argument or return type from opaque to the + function's argument or return type from opaque to the correct type (Heikki Linnakangas) - CREATE TYPE updates I/O functions declared in this + CREATE TYPE updates I/O functions declared in this long-obsolete style, but it forgot to record a dependency on the - type, allowing a subsequent DROP TYPE to leave broken + type, allowing a subsequent DROP TYPE to leave broken function definitions behind. - Reduce memory usage when ANALYZE processes - a tsvector column (Heikki Linnakangas) + Reduce memory usage when ANALYZE processes + a tsvector column (Heikki Linnakangas) Fix unnecessary precision loss and sloppy rounding when multiplying - or dividing money values by integers or floats (Tom Lane) + or dividing money values by integers or floats (Tom Lane) Tighten checks for whitespace in functions that parse identifiers, - such as regprocedurein() (Tom Lane) + such as regprocedurein() (Tom Lane) @@ -442,20 +2753,20 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Use relevant #define symbols from Perl while - compiling PL/Perl (Ashutosh Sharma, Tom Lane) + Use relevant #define symbols from Perl while + compiling PL/Perl (Ashutosh Sharma, Tom Lane) This avoids portability problems, typically manifesting as - a handshake mismatch during library load, when working with + a handshake mismatch during library load, when working with recent Perl versions. - In libpq, reset GSS/SASL and SSPI authentication + In libpq, reset GSS/SASL and SSPI authentication state properly after a failed connection attempt (Michael Paquier) @@ -468,9 +2779,9 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - In psql, fix failure when COPY FROM STDIN + In psql, fix failure when COPY FROM STDIN is ended with a keyboard EOF signal and then another COPY - FROM STDIN is attempted (Thomas Munro) + FROM STDIN is attempted (Thomas Munro) @@ -481,8 +2792,8 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Fix pg_dump and pg_restore to - emit REFRESH MATERIALIZED VIEW commands last (Tom Lane) + Fix pg_dump and pg_restore to + emit REFRESH MATERIALIZED VIEW commands last (Tom Lane) @@ -493,15 +2804,15 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Improve pg_dump/pg_restore's - reporting of error conditions originating in zlib + Improve pg_dump/pg_restore's + reporting of error conditions originating in zlib (Vladimir Kunschikov, Álvaro Herrera) - Fix pg_dump with the option to drop event triggers as expected (Tom Lane) @@ -514,14 +2825,14 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Fix pg_dump to not emit invalid SQL for an empty + Fix pg_dump to not emit invalid SQL for an empty operator class (Daniel Gustafsson) - Fix pg_dump output to stdout on Windows (Kuntal Ghosh) + Fix pg_dump output to stdout on Windows (Kuntal Ghosh) @@ -532,14 +2843,14 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Fix pg_get_ruledef() to print correct output for - the ON SELECT rule of a view whose columns have been + Fix pg_get_ruledef() to print correct output for + the ON SELECT rule of a view whose columns have been renamed (Tom Lane) - In some corner cases, pg_dump relies - on pg_get_ruledef() to dump views, so that this error + In some corner cases, pg_dump relies + on pg_get_ruledef() to dump views, so that this error could result in dump/reload failures. @@ -547,13 +2858,13 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 Fix dumping of outer joins with empty constraints, such as the result - of a NATURAL LEFT JOIN with no common columns (Tom Lane) + of a NATURAL LEFT JOIN with no common columns (Tom Lane) - Fix dumping of function expressions in the FROM clause in + Fix dumping of function expressions in the FROM clause in cases where the expression does not deparse into something that looks like a function call (Tom Lane) @@ -561,7 +2872,7 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Fix pg_basebackup output to stdout on Windows + Fix pg_basebackup output to stdout on Windows (Haribabu Kommi) @@ -573,8 +2884,8 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Fix pg_upgrade to ensure that the ending WAL record - does not have = minimum + Fix pg_upgrade to ensure that the ending WAL record + does not have = minimum (Bruce Momjian) @@ -586,9 +2897,9 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - In postgres_fdw, re-establish connections to remote - servers after ALTER SERVER or ALTER USER - MAPPING commands (Kyotaro Horiguchi) + In postgres_fdw, re-establish connections to remote + servers after ALTER SERVER or ALTER USER + MAPPING commands (Kyotaro Horiguchi) @@ -599,7 +2910,7 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - In postgres_fdw, allow cancellation of remote + In postgres_fdw, allow cancellation of remote transaction control commands (Robert Haas, Rafia Sabih) @@ -611,14 +2922,14 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Increase MAX_SYSCACHE_CALLBACKS to provide more room for + Increase MAX_SYSCACHE_CALLBACKS to provide more room for extensions (Tom Lane) - Always use , not , when building shared libraries with gcc (Tom Lane) @@ -638,34 +2949,34 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - In MSVC builds, handle the case where the openssl - library is not within a VC subdirectory (Andrew Dunstan) + In MSVC builds, handle the case where the OpenSSL + library is not within a VC subdirectory (Andrew Dunstan) - In MSVC builds, add proper include path for libxml2 + In MSVC builds, add proper include path for libxml2 header files (Andrew Dunstan) This fixes a former need to move things around in standard Windows - installations of libxml2. + installations of libxml2. In MSVC builds, recognize a Tcl library that is - named tcl86.lib (Noah Misch) + named tcl86.lib (Noah Misch) - In MSVC builds, honor PROVE_FLAGS settings - on vcregress.pl's command line (Andrew Dunstan) + In MSVC builds, honor PROVE_FLAGS settings + on vcregress.pl's command line (Andrew Dunstan) @@ -685,7 +2996,7 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 This release contains a variety of fixes from 9.4.11. For information about new features in the 9.4 major release, see - . + . @@ -702,12 +3013,12 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 Also, if you are using third-party replication tools that depend - on logical decoding, see the fourth changelog entry below. + on logical decoding, see the fourth changelog entry below. Also, if you are upgrading from a version earlier than 9.4.11, - see . + see . @@ -719,18 +3030,18 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 Restrict visibility - of pg_user_mappings.umoptions, to + of pg_user_mappings.umoptions, to protect passwords stored as user mapping options (Michael Paquier, Feike Steenbergen) The previous coding allowed the owner of a foreign server object, - or anyone he has granted server USAGE permission to, + or anyone he has granted server USAGE permission to, to see the options for all user mappings associated with that server. This might well include passwords for other users. Adjust the view definition to match the behavior of - information_schema.user_mapping_options, namely that + information_schema.user_mapping_options, namely that these options are visible to the user being mapped, or if the mapping is for PUBLIC and the current user is the server owner, or if the current user is a superuser. @@ -741,7 +3052,7 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 By itself, this patch will only fix the behavior in newly initdb'd databases. If you wish to apply this change in an existing database, follow the corrected procedure shown in the changelog entry for - CVE-2017-7547, in . + CVE-2017-7547, in . @@ -754,7 +3065,7 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 Some selectivity estimation functions in the planner will apply user-defined operators to values obtained - from pg_statistic, such as most common values and + from pg_statistic, such as most common values and histogram entries. This occurs before table permissions are checked, so a nefarious user could exploit the behavior to obtain these values for table columns he does not have permission to read. To fix, @@ -768,17 +3079,17 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Restore libpq's recognition of - the PGREQUIRESSL environment variable (Daniel Gustafsson) + Restore libpq's recognition of + the PGREQUIRESSL environment variable (Daniel Gustafsson) Processing of this environment variable was unintentionally dropped - in PostgreSQL 9.3, but its documentation remained. + in PostgreSQL 9.3, but its documentation remained. This creates a security hazard, since users might be relying on the environment variable to force SSL-encrypted connections, but that would no longer be guaranteed. Restore handling of the variable, - but give it lower priority than PGSSLMODE, to avoid + but give it lower priority than PGSSLMODE, to avoid breaking configurations that work correctly with post-9.3 code. (CVE-2017-7485) @@ -809,7 +3120,7 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Fix possible corruption of init forks of unlogged indexes + Fix possible corruption of init forks of unlogged indexes (Robert Haas, Michael Paquier) @@ -822,7 +3133,7 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Fix incorrect reconstruction of pg_subtrans entries + Fix incorrect reconstruction of pg_subtrans entries when a standby server replays a prepared but uncommitted two-phase transaction (Tom Lane) @@ -830,21 +3141,21 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 In most cases this turned out to have no visible ill effects, but in corner cases it could result in circular references - in pg_subtrans, potentially causing infinite loops + in pg_subtrans, potentially causing infinite loops in queries that examine rows modified by the two-phase transaction. - Avoid possible crash in walsender due to failure + Avoid possible crash in walsender due to failure to initialize a string buffer (Stas Kelvich, Fujii Masao) - Fix postmaster's handling of fork() failure for a + Fix postmaster's handling of fork() failure for a background worker process (Tom Lane) @@ -865,19 +3176,19 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 Due to lack of a cache flush step between commands in an extension script file, non-utility queries might not see the effects of an immediately preceding catalog change, such as ALTER TABLE - ... RENAME. + ... RENAME. Skip tablespace privilege checks when ALTER TABLE ... ALTER - COLUMN TYPE rebuilds an existing index (Noah Misch) + COLUMN TYPE rebuilds an existing index (Noah Misch) The command failed if the calling user did not currently have - CREATE privilege for the tablespace containing the index. + CREATE privilege for the tablespace containing the index. That behavior seems unhelpful, so skip the check, allowing the index to be rebuilt where it is. @@ -885,27 +3196,27 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Fix ALTER TABLE ... VALIDATE CONSTRAINT to not recurse - to child tables when the constraint is marked NO INHERIT + Fix ALTER TABLE ... VALIDATE CONSTRAINT to not recurse + to child tables when the constraint is marked NO INHERIT (Amit Langote) - This fix prevents unwanted constraint does not exist failures + This fix prevents unwanted constraint does not exist failures when no matching constraint is present in the child tables. - Fix VACUUM to account properly for pages that could not + Fix VACUUM to account properly for pages that could not be scanned due to conflicting page pins (Andrew Gierth) This tended to lead to underestimation of the number of tuples in the table. In the worst case of a small heavily-contended - table, VACUUM could incorrectly report that the table + table, VACUUM could incorrectly report that the table contained no tuples, leading to very bad planning choices. @@ -919,12 +3230,12 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Fix integer-overflow problems in interval comparison (Kyotaro + Fix integer-overflow problems in interval comparison (Kyotaro Horiguchi, Tom Lane) - The comparison operators for type interval could yield wrong + The comparison operators for type interval could yield wrong answers for intervals larger than about 296000 years. Indexes on columns containing such large values should be reindexed, since they may be corrupt. @@ -933,21 +3244,21 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Fix cursor_to_xml() to produce valid output - with tableforest = false + Fix cursor_to_xml() to produce valid output + with tableforest = false (Thomas Munro, Peter Eisentraut) - Previously it failed to produce a wrapping <table> + Previously it failed to produce a wrapping <table> element. - Fix roundoff problems in float8_timestamptz() - and make_interval() (Tom Lane) + Fix roundoff problems in float8_timestamptz() + and make_interval() (Tom Lane) @@ -959,7 +3270,7 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Improve performance of pg_timezone_names view + Improve performance of pg_timezone_names view (Tom Lane, David Rowley) @@ -973,13 +3284,13 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Fix sloppy handling of corner-case errors from lseek() - and close() (Tom Lane) + Fix sloppy handling of corner-case errors from lseek() + and close() (Tom Lane) Neither of these system calls are likely to fail in typical situations, - but if they did, fd.c could get quite confused. + but if they did, fd.c could get quite confused. @@ -997,21 +3308,21 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Fix ecpg to support COMMIT PREPARED - and ROLLBACK PREPARED (Masahiko Sawada) + Fix ecpg to support COMMIT PREPARED + and ROLLBACK PREPARED (Masahiko Sawada) Fix a double-free error when processing dollar-quoted string literals - in ecpg (Michael Meskes) + in ecpg (Michael Meskes) - In pg_dump, fix incorrect schema and owner marking for + In pg_dump, fix incorrect schema and owner marking for comments and security labels of some types of database objects (Giuseppe Broccolo, Tom Lane) @@ -1026,20 +3337,20 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - Avoid emitting an invalid list file in pg_restore -l + Avoid emitting an invalid list file in pg_restore -l when SQL object names contain newlines (Tom Lane) Replace newlines by spaces, which is sufficient to make the output - valid for pg_restore -L's purposes. + valid for pg_restore -L's purposes. - Fix pg_upgrade to transfer comments and security labels - attached to large objects (blobs) (Stephen Frost) + Fix pg_upgrade to transfer comments and security labels + attached to large objects (blobs) (Stephen Frost) @@ -1051,26 +3362,26 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 Improve error handling - in contrib/adminpack's pg_file_write() + in contrib/adminpack's pg_file_write() function (Noah Misch) Notably, it failed to detect errors reported - by fclose(). + by fclose(). - In contrib/dblink, avoid leaking the previous unnamed + In contrib/dblink, avoid leaking the previous unnamed connection when establishing a new unnamed connection (Joe Conway) - Fix contrib/pg_trgm's extraction of trigrams from regular + Fix contrib/pg_trgm's extraction of trigrams from regular expressions (Tom Lane) @@ -1083,7 +3394,7 @@ Branch: REL9_4_STABLE [23a2b818f] 2017-08-05 14:56:40 -0700 - In contrib/postgres_fdw, + In contrib/postgres_fdw, transmit query cancellation requests to the remote server (Michael Paquier, Etsuro Fujita) @@ -1133,7 +3444,7 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Update time zone data files to tzdata release 2017b + Update time zone data files to tzdata release 2017b for DST law changes in Chile, Haiti, and Mongolia, plus historical corrections for Ecuador, Kazakhstan, Liberia, and Spain. Switch to numeric abbreviations for numerous time zones in South @@ -1147,9 +3458,9 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 or no currency among the local population. They are in process of reversing that policy in favor of using numeric UTC offsets in zones where there is no evidence of real-world use of an English - abbreviation. At least for the time being, PostgreSQL + abbreviation. At least for the time being, PostgreSQL will continue to accept such removed abbreviations for timestamp input. - But they will not be shown in the pg_timezone_names + But they will not be shown in the pg_timezone_names view nor used for output. @@ -1162,16 +3473,16 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 The Microsoft MSVC build scripts neglected to install - the posixrules file in the timezone directory tree. + the posixrules file in the timezone directory tree. This resulted in the timezone code falling back to its built-in rule about what DST behavior to assume for a POSIX-style time zone name. For historical reasons that still corresponds to the DST rules the USA was using before 2007 (i.e., change on first Sunday in April and last Sunday in October). With this fix, a POSIX-style zone name will use the current and historical DST transition dates of - the US/Eastern zone. If you don't want that, remove - the posixrules file, or replace it with a copy of some - other zone file (see ). Note that + the US/Eastern zone. If you don't want that, remove + the posixrules file, or replace it with a copy of some + other zone file (see ). Note that due to caching, you may need to restart the server to get such changes to take effect. @@ -1193,7 +3504,7 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 This release contains a variety of fixes from 9.4.10. For information about new features in the 9.4 major release, see - . + . @@ -1211,7 +3522,7 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 Also, if you are upgrading from a version earlier than 9.4.10, - see . + see . @@ -1223,15 +3534,15 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 Fix a race condition that could cause indexes built - with CREATE INDEX CONCURRENTLY to be corrupt + with CREATE INDEX CONCURRENTLY to be corrupt (Pavan Deolasee, Tom Lane) - If CREATE INDEX CONCURRENTLY was used to build an index + If CREATE INDEX CONCURRENTLY was used to build an index that depends on a column not previously indexed, then rows updated by transactions that ran concurrently with - the CREATE INDEX command could have received incorrect + the CREATE INDEX command could have received incorrect index entries. If you suspect this may have happened, the most reliable solution is to rebuild affected indexes after installing this update. @@ -1248,19 +3559,19 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 Backends failed to account for this snapshot when advertising their oldest xmin, potentially allowing concurrent vacuuming operations to remove data that was still needed. This led to transient failures - along the lines of cache lookup failed for relation 1255. + along the lines of cache lookup failed for relation 1255. - Unconditionally WAL-log creation of the init fork for an + Unconditionally WAL-log creation of the init fork for an unlogged table (Michael Paquier) - Previously, this was skipped when - = minimal, but actually it's necessary even in that case + Previously, this was skipped when + = minimal, but actually it's necessary even in that case to ensure that the unlogged table is properly reset to empty after a crash. @@ -1326,13 +3637,13 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Make sure ALTER TABLE preserves index tablespace + Make sure ALTER TABLE preserves index tablespace assignments when rebuilding indexes (Tom Lane, Michael Paquier) Previously, non-default settings - of could result in broken + of could result in broken indexes. @@ -1341,7 +3652,7 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 Fix incorrect updating of trigger function properties when changing a foreign-key constraint's deferrability properties with ALTER - TABLE ... ALTER CONSTRAINT (Tom Lane) + TABLE ... ALTER CONSTRAINT (Tom Lane) @@ -1357,15 +3668,15 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - This avoids could not find trigger NNN - or relation NNN has no triggers errors. + This avoids could not find trigger NNN + or relation NNN has no triggers errors. Fix processing of OID column when a table with OIDs is associated to - a parent with OIDs via ALTER TABLE ... INHERIT (Amit + a parent with OIDs via ALTER TABLE ... INHERIT (Amit Langote) @@ -1378,7 +3689,7 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Fix CREATE OR REPLACE VIEW to update the view query + Fix CREATE OR REPLACE VIEW to update the view query before attempting to apply the new view options (Dean Rasheed) @@ -1391,7 +3702,7 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 Report correct object identity during ALTER TEXT SEARCH - CONFIGURATION (Artur Zakirov) + CONFIGURATION (Artur Zakirov) @@ -1421,13 +3732,13 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Prevent multicolumn expansion of foo.* in - an UPDATE source expression (Tom Lane) + Prevent multicolumn expansion of foo.* in + an UPDATE source expression (Tom Lane) This led to UPDATE target count mismatch --- internal - error. Now the syntax is understood as a whole-row variable, + error. Now the syntax is understood as a whole-row variable, as it would be in other contexts. @@ -1435,12 +3746,12 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 Ensure that column typmods are determined accurately for - multi-row VALUES constructs (Tom Lane) + multi-row VALUES constructs (Tom Lane) This fixes problems occurring when the first value in a column has a - determinable typmod (e.g., length for a varchar value) but + determinable typmod (e.g., length for a varchar value) but later values don't share the same limit. @@ -1455,15 +3766,15 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 Normally, a Unicode surrogate leading character must be followed by a Unicode surrogate trailing character, but the check for this was missed if the leading character was the last character in a Unicode - string literal (U&'...') or Unicode identifier - (U&"..."). + string literal (U&'...') or Unicode identifier + (U&"..."). Ensure that a purely negative text search query, such - as !foo, matches empty tsvectors (Tom Dunstan) + as !foo, matches empty tsvectors (Tom Dunstan) @@ -1474,33 +3785,33 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Prevent crash when ts_rewrite() replaces a non-top-level + Prevent crash when ts_rewrite() replaces a non-top-level subtree with an empty query (Artur Zakirov) - Fix performance problems in ts_rewrite() (Tom Lane) + Fix performance problems in ts_rewrite() (Tom Lane) - Fix ts_rewrite()'s handling of nested NOT operators + Fix ts_rewrite()'s handling of nested NOT operators (Tom Lane) - Fix array_fill() to handle empty arrays properly (Tom Lane) + Fix array_fill() to handle empty arrays properly (Tom Lane) - Fix one-byte buffer overrun in quote_literal_cstr() + Fix one-byte buffer overrun in quote_literal_cstr() (Heikki Linnakangas) @@ -1512,8 +3823,8 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Prevent multiple calls of pg_start_backup() - and pg_stop_backup() from running concurrently (Michael + Prevent multiple calls of pg_start_backup() + and pg_stop_backup() from running concurrently (Michael Paquier) @@ -1525,15 +3836,15 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Avoid discarding interval-to-interval casts + Avoid discarding interval-to-interval casts that aren't really no-ops (Tom Lane) In some cases, a cast that should result in zeroing out - low-order interval fields was mistakenly deemed to be a + low-order interval fields was mistakenly deemed to be a no-op and discarded. An example is that casting from INTERVAL - MONTH to INTERVAL YEAR failed to clear the months field. + MONTH to INTERVAL YEAR failed to clear the months field. @@ -1546,28 +3857,28 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Fix pg_dump to dump user-defined casts and transforms + Fix pg_dump to dump user-defined casts and transforms that use built-in functions (Stephen Frost) - Fix pg_restore with to behave more sanely if an archive contains - unrecognized DROP commands (Tom Lane) + unrecognized DROP commands (Tom Lane) This doesn't fix any live bug, but it may improve the behavior in - future if pg_restore is used with an archive - generated by a later pg_dump version. + future if pg_restore is used with an archive + generated by a later pg_dump version. - Fix pg_basebackup's rate limiting in the presence of + Fix pg_basebackup's rate limiting in the presence of slow I/O (Antonin Houska) @@ -1580,15 +3891,15 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Fix pg_basebackup's handling of - symlinked pg_stat_tmp and pg_replslot + Fix pg_basebackup's handling of + symlinked pg_stat_tmp and pg_replslot subdirectories (Magnus Hagander, Michael Paquier) - Fix possible pg_basebackup failure on standby + Fix possible pg_basebackup failure on standby server when including WAL files (Amit Kapila, Robert Haas) @@ -1607,21 +3918,21 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Fix PL/Tcl to support triggers on tables that have .tupno + Fix PL/Tcl to support triggers on tables that have .tupno as a column name (Tom Lane) This matches the (previously undocumented) behavior of - PL/Tcl's spi_exec and spi_execp commands, - namely that a magic .tupno column is inserted only if + PL/Tcl's spi_exec and spi_execp commands, + namely that a magic .tupno column is inserted only if there isn't a real column named that. - Allow DOS-style line endings in ~/.pgpass files, + Allow DOS-style line endings in ~/.pgpass files, even on Unix (Vik Fearing) @@ -1633,23 +3944,23 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Fix one-byte buffer overrun if ecpg is given a file + Fix one-byte buffer overrun if ecpg is given a file name that ends with a dot (Takayuki Tsunakawa) - Fix psql's tab completion for ALTER DEFAULT - PRIVILEGES (Gilles Darold, Stephen Frost) + Fix psql's tab completion for ALTER DEFAULT + PRIVILEGES (Gilles Darold, Stephen Frost) - In psql, treat an empty or all-blank setting of - the PAGER environment variable as meaning no - pager (Tom Lane) + In psql, treat an empty or all-blank setting of + the PAGER environment variable as meaning no + pager (Tom Lane) @@ -1660,22 +3971,22 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Improve contrib/dblink's reporting of - low-level libpq errors, such as out-of-memory + Improve contrib/dblink's reporting of + low-level libpq errors, such as out-of-memory (Joe Conway) - Teach contrib/dblink to ignore irrelevant server options - when it uses a contrib/postgres_fdw foreign server as + Teach contrib/dblink to ignore irrelevant server options + when it uses a contrib/postgres_fdw foreign server as the source of connection options (Corey Huinker) Previously, if the foreign server object had options that were not - also libpq connection options, an error occurred. + also libpq connection options, an error occurred. @@ -1701,7 +4012,7 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Update time zone data files to tzdata release 2016j + Update time zone data files to tzdata release 2016j for DST law changes in northern Cyprus (adding a new zone Asia/Famagusta), Russia (adding a new zone Europe/Saratov), Tonga, and Antarctica/Casey. @@ -1726,7 +4037,7 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 This release contains a variety of fixes from 9.4.9. For information about new features in the 9.4 major release, see - . + . @@ -1744,7 +4055,7 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 Also, if you are upgrading from a version earlier than 9.4.6, - see . + see . @@ -1764,7 +4075,7 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 crash recovery, or to be written incorrectly on a standby server. Bogus entries in a free space map could lead to attempts to access pages that have been truncated away from the relation itself, typically - producing errors like could not read block XXX: + producing errors like could not read block XXX: read only 0 of 8192 bytes. Checksum failures in the visibility map are also possible, if checksumming is enabled. @@ -1772,7 +4083,7 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 Procedures for determining whether there is a problem and repairing it if so are discussed at - . + . @@ -1783,20 +4094,20 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - The typical symptom was unexpected GIN leaf action errors + The typical symptom was unexpected GIN leaf action errors during WAL replay. - Fix SELECT FOR UPDATE/SHARE to correctly lock tuples that + Fix SELECT FOR UPDATE/SHARE to correctly lock tuples that have been updated by a subsequently-aborted transaction (Álvaro Herrera) - In 9.5 and later, the SELECT would sometimes fail to + In 9.5 and later, the SELECT would sometimes fail to return such tuples at all. A failure has not been proven to occur in earlier releases, but might be possible with concurrent updates. @@ -1830,79 +4141,79 @@ Branch: REL9_2_STABLE [fb50c38e9] 2017-04-17 13:52:42 -0400 - Fix query-lifespan memory leak in a bulk UPDATE on a table - with a PRIMARY KEY or REPLICA IDENTITY index + Fix query-lifespan memory leak in a bulk UPDATE on a table + with a PRIMARY KEY or REPLICA IDENTITY index (Tom Lane) - Fix EXPLAIN to emit valid XML when - is on (Markus Winand) + Fix EXPLAIN to emit valid XML when + is on (Markus Winand) Previously the XML output-format option produced syntactically invalid - tags such as <I/O-Read-Time>. That is now - rendered as <I-O-Read-Time>. + tags such as <I/O-Read-Time>. That is now + rendered as <I-O-Read-Time>. Suppress printing of zeroes for unmeasured times - in EXPLAIN (Maksim Milyutin) + in EXPLAIN (Maksim Milyutin) Certain option combinations resulted in printing zero values for times that actually aren't ever measured in that combination. Our general - policy in EXPLAIN is not to print such fields at all, so + policy in EXPLAIN is not to print such fields at all, so do that consistently in all cases. - Fix timeout length when VACUUM is waiting for exclusive + Fix timeout length when VACUUM is waiting for exclusive table lock so that it can truncate the table (Simon Riggs) The timeout was meant to be 50 milliseconds, but it was actually only - 50 microseconds, causing VACUUM to give up on truncation + 50 microseconds, causing VACUUM to give up on truncation much more easily than intended. Set it to the intended value. - Fix bugs in merging inherited CHECK constraints while + Fix bugs in merging inherited CHECK constraints while creating or altering a table (Tom Lane, Amit Langote) - Allow identical CHECK constraints to be added to a parent + Allow identical CHECK constraints to be added to a parent and child table in either order. Prevent merging of a valid - constraint from the parent table with a NOT VALID + constraint from the parent table with a NOT VALID constraint on the child. Likewise, prevent merging of a NO - INHERIT child constraint with an inherited constraint. + INHERIT child constraint with an inherited constraint. Remove artificial restrictions on the values accepted - by numeric_in() and numeric_recv() + by numeric_in() and numeric_recv() (Tom Lane) We allow numeric values up to the limit of the storage format (more - than 1e100000), so it seems fairly pointless - that numeric_in() rejected scientific-notation exponents - above 1000. Likewise, it was silly for numeric_recv() to + than 1e100000), so it seems fairly pointless + that numeric_in() rejected scientific-notation exponents + above 1000. Likewise, it was silly for numeric_recv() to reject more than 1000 digits in an input value. @@ -1947,7 +4258,7 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 - Disallow starting a standalone backend with standby_mode + Disallow starting a standalone backend with standby_mode turned on (Michael Paquier) @@ -1966,7 +4277,7 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 This failure to reset all of the fields of the slot could - prevent VACUUM from removing dead tuples. + prevent VACUUM from removing dead tuples. @@ -1977,7 +4288,7 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 - This avoids possible failures during munmap() on systems + This avoids possible failures during munmap() on systems with atypical default huge page sizes. Except in crash-recovery cases, there were no ill effects other than a log message. @@ -1991,7 +4302,7 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 Previously, the same value would be chosen every time, because it was - derived from random() but srandom() had not + derived from random() but srandom() had not yet been called. While relatively harmless, this was not the intended behavior. @@ -2004,8 +4315,8 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 - Windows sometimes returns ERROR_ACCESS_DENIED rather - than ERROR_ALREADY_EXISTS when there is an existing + Windows sometimes returns ERROR_ACCESS_DENIED rather + than ERROR_ALREADY_EXISTS when there is an existing segment. This led to postmaster startup failure due to believing that the former was an unrecoverable error. @@ -2014,7 +4325,7 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 Don't try to share SSL contexts across multiple connections - in libpq (Heikki Linnakangas) + in libpq (Heikki Linnakangas) @@ -2025,30 +4336,30 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 - Avoid corner-case memory leak in libpq (Tom Lane) + Avoid corner-case memory leak in libpq (Tom Lane) The reported problem involved leaking an error report - during PQreset(), but there might be related cases. + during PQreset(), but there might be related cases. - Make ecpg's and options work consistently with our other executables (Haribabu Kommi) - Fix pgbench's calculation of average latency + Fix pgbench's calculation of average latency (Fabien Coelho) - The calculation was incorrect when there were \sleep + The calculation was incorrect when there were \sleep commands in the script, or when the test duration was specified in number of transactions rather than total time. @@ -2056,12 +4367,12 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 - In pg_dump, never dump range constructor functions + In pg_dump, never dump range constructor functions (Tom Lane) - This oversight led to pg_upgrade failures with + This oversight led to pg_upgrade failures with extensions containing range types, due to duplicate creation of the constructor functions. @@ -2069,8 +4380,8 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 - In pg_xlogdump, retry opening new WAL segments when - using option (Magnus Hagander) @@ -2081,7 +4392,7 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 - Fix pg_xlogdump to cope with a WAL file that begins + Fix pg_xlogdump to cope with a WAL file that begins with a continuation record spanning more than one page (Pavan Deolasee) @@ -2089,15 +4400,15 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 - Fix contrib/pg_buffercache to work - when shared_buffers exceeds 256GB (KaiGai Kohei) + Fix contrib/pg_buffercache to work + when shared_buffers exceeds 256GB (KaiGai Kohei) - Fix contrib/intarray/bench/bench.pl to print the results - of the EXPLAIN it does when given the option (Daniel Gustafsson) @@ -2109,17 +4420,17 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 - When PostgreSQL has been configured - with - In MSVC builds, include pg_recvlogical in a + In MSVC builds, include pg_recvlogical in a client-only installation (MauMau) @@ -2140,17 +4451,17 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 If a dynamic time zone abbreviation does not match any entry in the referenced time zone, treat it as equivalent to the time zone name. This avoids unexpected failures when IANA removes abbreviations from - their time zone database, as they did in tzdata + their time zone database, as they did in tzdata release 2016f and seem likely to do again in the future. The consequences were not limited to not recognizing the individual abbreviation; any mismatch caused - the pg_timezone_abbrevs view to fail altogether. + the pg_timezone_abbrevs view to fail altogether. - Update time zone data files to tzdata release 2016h + Update time zone data files to tzdata release 2016h for DST law changes in Palestine and Turkey, plus historical corrections for Turkey and some regions of Russia. Switch to numeric abbreviations for some time zones in Antarctica, @@ -2163,15 +4474,15 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 or no currency among the local population. They are in process of reversing that policy in favor of using numeric UTC offsets in zones where there is no evidence of real-world use of an English - abbreviation. At least for the time being, PostgreSQL + abbreviation. At least for the time being, PostgreSQL will continue to accept such removed abbreviations for timestamp input. - But they will not be shown in the pg_timezone_names + But they will not be shown in the pg_timezone_names view nor used for output. - In this update, AMT is no longer shown as being in use to - mean Armenia Time. Therefore, we have changed the Default + In this update, AMT is no longer shown as being in use to + mean Armenia Time. Therefore, we have changed the Default abbreviation set to interpret it as Amazon Time, thus UTC-4 not UTC+4. @@ -2192,7 +4503,7 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 This release contains a variety of fixes from 9.4.8. For information about new features in the 9.4 major release, see - . + . @@ -2204,7 +4515,7 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 However, if you are upgrading from a version earlier than 9.4.6, - see . + see . @@ -2216,17 +4527,17 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 Fix possible mis-evaluation of - nested CASE-WHEN expressions (Heikki + nested CASE-WHEN expressions (Heikki Linnakangas, Michael Paquier, Tom Lane) - A CASE expression appearing within the test value - subexpression of another CASE could become confused about + A CASE expression appearing within the test value + subexpression of another CASE could become confused about whether its own test value was null or not. Also, inlining of a SQL function implementing the equality operator used by - a CASE expression could result in passing the wrong test - value to functions called within a CASE expression in the + a CASE expression could result in passing the wrong test + value to functions called within a CASE expression in the SQL function's body. If the test values were of different data types, a crash might result; moreover such situations could be abused to allow disclosure of portions of server memory. (CVE-2016-5423) @@ -2240,7 +4551,7 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 - Numerous places in vacuumdb and other client programs + Numerous places in vacuumdb and other client programs could become confused by database and role names containing double quotes or backslashes. Tighten up quoting rules to make that safe. Also, ensure that when a conninfo string is used as a database name @@ -2249,22 +4560,22 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 Fix handling of paired double quotes - in psql's \connect - and \password commands to match the documentation. + in psql's \connect + and \password commands to match the documentation. - Introduce a new - pg_dumpall now refuses to deal with database and role + pg_dumpall now refuses to deal with database and role names containing carriage returns or newlines, as it seems impractical to quote those characters safely on Windows. In future we may reject such names on the server side, but that step has not been taken yet. @@ -2274,40 +4585,40 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 These are considered security fixes because crafted object names containing special characters could have been used to execute commands with superuser privileges the next time a superuser - executes pg_dumpall or other routine maintenance + executes pg_dumpall or other routine maintenance operations. (CVE-2016-5424) - Fix corner-case misbehaviors for IS NULL/IS NOT - NULL applied to nested composite values (Andrew Gierth, Tom Lane) + Fix corner-case misbehaviors for IS NULL/IS NOT + NULL applied to nested composite values (Andrew Gierth, Tom Lane) - The SQL standard specifies that IS NULL should return + The SQL standard specifies that IS NULL should return TRUE for a row of all null values (thus ROW(NULL,NULL) IS - NULL yields TRUE), but this is not meant to apply recursively - (thus ROW(NULL, ROW(NULL,NULL)) IS NULL yields FALSE). + NULL yields TRUE), but this is not meant to apply recursively + (thus ROW(NULL, ROW(NULL,NULL)) IS NULL yields FALSE). The core executor got this right, but certain planner optimizations treated the test as recursive (thus producing TRUE in both cases), - and contrib/postgres_fdw could produce remote queries + and contrib/postgres_fdw could produce remote queries that misbehaved similarly. - Make the inet and cidr data types properly reject + Make the inet and cidr data types properly reject IPv6 addresses with too many colon-separated fields (Tom Lane) - Prevent crash in close_ps() - (the point ## lseg operator) + Prevent crash in close_ps() + (the point ## lseg operator) for NaN input coordinates (Tom Lane) @@ -2318,19 +4629,19 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 - Avoid possible crash in pg_get_expr() when inconsistent + Avoid possible crash in pg_get_expr() when inconsistent values are passed to it (Michael Paquier, Thomas Munro) - Fix several one-byte buffer over-reads in to_number() + Fix several one-byte buffer over-reads in to_number() (Peter Eisentraut) - In several cases the to_number() function would read one + In several cases the to_number() function would read one more character than it should from the input string. There is a small chance of a crash, if the input happens to be adjacent to the end of memory. @@ -2340,8 +4651,8 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 Do not run the planner on the query contained in CREATE - MATERIALIZED VIEW or CREATE TABLE AS - when WITH NO DATA is specified (Michael Paquier, + MATERIALIZED VIEW or CREATE TABLE AS + when WITH NO DATA is specified (Michael Paquier, Tom Lane) @@ -2355,7 +4666,7 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 Avoid unsafe intermediate state during expensive paths - through heap_update() (Masahiko Sawada, Andres Freund) + through heap_update() (Masahiko Sawada, Andres Freund) @@ -2381,15 +4692,15 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 - Avoid unnecessary could not serialize access errors when - acquiring FOR KEY SHARE row locks in serializable mode + Avoid unnecessary could not serialize access errors when + acquiring FOR KEY SHARE row locks in serializable mode (Álvaro Herrera) - Avoid crash in postgres -C when the specified variable + Avoid crash in postgres -C when the specified variable has a null string value (Michael Paquier) @@ -2432,12 +4743,12 @@ Branch: REL9_4_STABLE [10ad15f48] 2016-09-01 11:45:16 -0400 - Avoid consuming a transaction ID during VACUUM + Avoid consuming a transaction ID during VACUUM (Alexander Korotkov) - Some cases in VACUUM unnecessarily caused an XID to be + Some cases in VACUUM unnecessarily caused an XID to be assigned to the current transaction. Normally this is negligible, but if one is up against the XID wraparound limit, consuming more XIDs during anti-wraparound vacuums is a very bad thing. @@ -2453,12 +4764,12 @@ Branch: REL9_2_STABLE [294509ea9] 2016-05-25 19:39:49 -0400 Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 --> - Avoid canceling hot-standby queries during VACUUM FREEZE + Avoid canceling hot-standby queries during VACUUM FREEZE (Simon Riggs, Álvaro Herrera) - VACUUM FREEZE on an otherwise-idle master server could + VACUUM FREEZE on an otherwise-idle master server could result in unnecessary cancellations of queries on its standby servers. @@ -2473,15 +4784,15 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 The usual symptom of this bug is errors - like MultiXactId NNN has not been created + like MultiXactId NNN has not been created yet -- apparent wraparound. - When a manual ANALYZE specifies a column list, don't - reset the table's changes_since_analyze counter + When a manual ANALYZE specifies a column list, don't + reset the table's changes_since_analyze counter (Tom Lane) @@ -2493,7 +4804,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Fix ANALYZE's overestimation of n_distinct + Fix ANALYZE's overestimation of n_distinct for a unique or nearly-unique column with many null entries (Tom Lane) @@ -2526,7 +4837,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - This mistake prevented VACUUM from completing in some + This mistake prevented VACUUM from completing in some cases involving corrupt b-tree indexes. @@ -2540,8 +4851,8 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Fix contrib/btree_gin to handle the smallest - possible bigint value correctly (Peter Eisentraut) + Fix contrib/btree_gin to handle the smallest + possible bigint value correctly (Peter Eisentraut) @@ -2554,53 +4865,53 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 It's planned to switch to two-part instead of three-part server version numbers for releases after 9.6. Make sure - that PQserverVersion() returns the correct value for + that PQserverVersion() returns the correct value for such cases. - Fix ecpg's code for unsigned long long + Fix ecpg's code for unsigned long long array elements (Michael Meskes) - In pg_dump with both - Improve handling of SIGTERM/control-C in - parallel pg_dump and pg_restore (Tom + Improve handling of SIGTERM/control-C in + parallel pg_dump and pg_restore (Tom Lane) Make sure that the worker processes will exit promptly, and also arrange to send query-cancel requests to the connected backends, in case they - are doing something long-running such as a CREATE INDEX. + are doing something long-running such as a CREATE INDEX. - Fix error reporting in parallel pg_dump - and pg_restore (Tom Lane) + Fix error reporting in parallel pg_dump + and pg_restore (Tom Lane) - Previously, errors reported by pg_dump - or pg_restore worker processes might never make it to + Previously, errors reported by pg_dump + or pg_restore worker processes might never make it to the user's console, because the messages went through the master process, and there were various deadlock scenarios that would prevent the master process from passing on the messages. Instead, just print - everything to stderr. In some cases this will result in + everything to stderr. In some cases this will result in duplicate messages (for instance, if all the workers report a server shutdown), but that seems better than no message. @@ -2608,8 +4919,8 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Ensure that parallel pg_dump - or pg_restore on Windows will shut down properly + Ensure that parallel pg_dump + or pg_restore on Windows will shut down properly after an error (Kyotaro Horiguchi) @@ -2621,7 +4932,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Make pg_dump behave better when built without zlib + Make pg_dump behave better when built without zlib support (Kyotaro Horiguchi) @@ -2633,7 +4944,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Make pg_basebackup accept -Z 0 as + Make pg_basebackup accept -Z 0 as specifying no compression (Fujii Masao) @@ -2654,13 +4965,13 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Be more predictable about reporting statement timeout - versus lock timeout (Tom Lane) + Be more predictable about reporting statement timeout + versus lock timeout (Tom Lane) On heavily loaded machines, the regression tests sometimes failed due - to reporting lock timeout even though the statement timeout + to reporting lock timeout even though the statement timeout should have occurred first. @@ -2680,7 +4991,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 Update our copy of the timezone code to match - IANA's tzcode release 2016c (Tom Lane) + IANA's tzcode release 2016c (Tom Lane) @@ -2692,7 +5003,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Update time zone data files to tzdata release 2016f + Update time zone data files to tzdata release 2016f for DST law changes in Kemerovo and Novosibirsk, plus historical corrections for Azerbaijan, Belarus, and Morocco. @@ -2714,7 +5025,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 This release contains a variety of fixes from 9.4.7. For information about new features in the 9.4 major release, see - . + . @@ -2726,7 +5037,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 However, if you are upgrading from a version earlier than 9.4.6, - see . + see . @@ -2747,7 +5058,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 using OpenSSL within a single process and not all the code involved follows the same rules for when to clear the error queue. Failures have been reported specifically when a client application - uses SSL connections in libpq concurrently with + uses SSL connections in libpq concurrently with SSL connections using the PHP, Python, or Ruby wrappers for OpenSSL. It's possible for similar problems to arise within the server as well, if an extension module establishes an outgoing SSL connection. @@ -2756,7 +5067,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Fix failed to build any N-way joins + Fix failed to build any N-way joins planner error with a full join enclosed in the right-hand side of a left join (Tom Lane) @@ -2770,10 +5081,10 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 Given a three-or-more-way equivalence class of variables, such - as X.X = Y.Y = Z.Z, it was possible for the planner to omit + as X.X = Y.Y = Z.Z, it was possible for the planner to omit some of the tests needed to enforce that all the variables are actually equal, leading to join rows being output that didn't satisfy - the WHERE clauses. For various reasons, erroneous plans + the WHERE clauses. For various reasons, erroneous plans were seldom selected in practice, so that this bug has gone undetected for a long time. @@ -2794,14 +5105,14 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 The memory leak would typically not amount to much in simple queries, but it could be very substantial during a large GIN index build with - high maintenance_work_mem. + high maintenance_work_mem. - Fix possible misbehavior of TH, th, - and Y,YYY format codes in to_timestamp() + Fix possible misbehavior of TH, th, + and Y,YYY format codes in to_timestamp() (Tom Lane) @@ -2813,29 +5124,29 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Fix dumping of rules and views in which the array - argument of a value operator - ANY (array) construct is a sub-SELECT + Fix dumping of rules and views in which the array + argument of a value operator + ANY (array) construct is a sub-SELECT (Tom Lane) - Disallow newlines in ALTER SYSTEM parameter values + Disallow newlines in ALTER SYSTEM parameter values (Tom Lane) The configuration-file parser doesn't support embedded newlines in string literals, so we mustn't allow them in values to be inserted - by ALTER SYSTEM. + by ALTER SYSTEM. - Fix ALTER TABLE ... REPLICA IDENTITY USING INDEX to + Fix ALTER TABLE ... REPLICA IDENTITY USING INDEX to work properly if an index on OID is selected (David Rowley) @@ -2861,19 +5172,19 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Make pg_regress use a startup timeout from the - PGCTLTIMEOUT environment variable, if that's set (Tom Lane) + Make pg_regress use a startup timeout from the + PGCTLTIMEOUT environment variable, if that's set (Tom Lane) This is for consistency with a behavior recently added - to pg_ctl; it eases automated testing on slow machines. + to pg_ctl; it eases automated testing on slow machines. - Fix pg_upgrade to correctly restore extension + Fix pg_upgrade to correctly restore extension membership for operator families containing only one operator class (Tom Lane) @@ -2881,20 +5192,20 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 In such a case, the operator family was restored into the new database, but it was no longer marked as part of the extension. This had no - immediate ill effects, but would cause later pg_dump + immediate ill effects, but would cause later pg_dump runs to emit output that would cause (harmless) errors on restore. - Fix pg_upgrade to not fail when new-cluster TOAST rules + Fix pg_upgrade to not fail when new-cluster TOAST rules differ from old (Tom Lane) - pg_upgrade had special-case code to handle the - situation where the new PostgreSQL version thinks that + pg_upgrade had special-case code to handle the + situation where the new PostgreSQL version thinks that a table should have a TOAST table while the old version did not. That code was broken, so remove it, and instead do nothing in such cases; there seems no reason to believe that we can't get along fine without @@ -2905,22 +5216,22 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 Reduce the number of SysV semaphores used by a build configured with - (Tom Lane) - Rename internal function strtoi() - to strtoint() to avoid conflict with a NetBSD library + Rename internal function strtoi() + to strtoint() to avoid conflict with a NetBSD library function (Thomas Munro) - Fix reporting of errors from bind() - and listen() system calls on Windows (Tom Lane) + Fix reporting of errors from bind() + and listen() system calls on Windows (Tom Lane) @@ -2933,19 +5244,19 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Fix putenv() to work properly with Visual Studio 2013 + Fix putenv() to work properly with Visual Studio 2013 (Michael Paquier) - Avoid possibly-unsafe use of Windows' FormatMessage() + Avoid possibly-unsafe use of Windows' FormatMessage() function (Christian Ullrich) - Use the FORMAT_MESSAGE_IGNORE_INSERTS flag where + Use the FORMAT_MESSAGE_IGNORE_INSERTS flag where appropriate. No live bug is known to exist here, but it seems like a good idea to be careful. @@ -2953,9 +5264,9 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Update time zone data files to tzdata release 2016d + Update time zone data files to tzdata release 2016d for DST law changes in Russia and Venezuela. There are new zone - names Europe/Kirov and Asia/Tomsk to reflect + names Europe/Kirov and Asia/Tomsk to reflect the fact that these regions now have different time zone histories from adjacent regions. @@ -2977,7 +5288,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 This release contains a variety of fixes from 9.4.6. For information about new features in the 9.4 major release, see - . + . @@ -2989,7 +5300,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 However, if you are upgrading from a version earlier than 9.4.6, - see . + see . @@ -3001,29 +5312,29 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 Fix incorrect handling of NULL index entries in - indexed ROW() comparisons (Tom Lane) + indexed ROW() comparisons (Tom Lane) An index search using a row comparison such as ROW(a, b) > - ROW('x', 'y') would stop upon reaching a NULL entry in - the b column, ignoring the fact that there might be - non-NULL b values associated with later values - of a. + ROW('x', 'y') would stop upon reaching a NULL entry in + the b column, ignoring the fact that there might be + non-NULL b values associated with later values + of a. Avoid unlikely data-loss scenarios due to renaming files without - adequate fsync() calls before and after (Michael Paquier, + adequate fsync() calls before and after (Michael Paquier, Tomas Vondra, Andres Freund) - Fix bug in json_to_record() when a field of its input + Fix bug in json_to_record() when a field of its input object contains a sub-object with a field name matching one of the requested output column names (Tom Lane) @@ -3032,20 +5343,20 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 Fix misformatting of negative time zone offsets - by to_char()'s OF format code + by to_char()'s OF format code (Thomas Munro, Tom Lane) - Ignore parameter until + Ignore parameter until recovery has reached a consistent state (Michael Paquier) Previously, standby servers would delay application of WAL records in - response to recovery_min_apply_delay even while replaying + response to recovery_min_apply_delay even while replaying the initial portion of WAL needed to make their database state valid. Since the standby is useless until it's reached a consistent database state, this was deemed unhelpful. @@ -3054,7 +5365,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Correctly handle cases where pg_subtrans is close to XID + Correctly handle cases where pg_subtrans is close to XID wraparound during server startup (Jeff Janes) @@ -3066,44 +5377,44 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 Trouble cases included tuples larger than one page when replica - identity is FULL, UPDATEs that change a + identity is FULL, UPDATEs that change a primary key within a transaction large enough to be spooled to disk, incorrect reports of subxact logged without previous toplevel - record, and incorrect reporting of a transaction's commit time. + record, and incorrect reporting of a transaction's commit time. Fix planner error with nested security barrier views when the outer - view has a WHERE clause containing a correlated subquery + view has a WHERE clause containing a correlated subquery (Dean Rasheed) - Fix corner-case crash due to trying to free localeconv() + Fix corner-case crash due to trying to free localeconv() output strings more than once (Tom Lane) - Fix parsing of affix files for ispell dictionaries + Fix parsing of affix files for ispell dictionaries (Tom Lane) The code could go wrong if the affix file contained any characters whose byte length changes during case-folding, for - example I in Turkish UTF8 locales. + example I in Turkish UTF8 locales. - Avoid use of sscanf() to parse ispell + Avoid use of sscanf() to parse ispell dictionary files (Artur Zakirov) @@ -3129,27 +5440,27 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 - Fix psql's tab completion logic to handle multibyte + Fix psql's tab completion logic to handle multibyte characters properly (Kyotaro Horiguchi, Robert Haas) - Fix psql's tab completion for - SECURITY LABEL (Tom Lane) + Fix psql's tab completion for + SECURITY LABEL (Tom Lane) - Pressing TAB after SECURITY LABEL might cause a crash + Pressing TAB after SECURITY LABEL might cause a crash or offering of inappropriate keywords. - Make pg_ctl accept a wait timeout from the - PGCTLTIMEOUT environment variable, if none is specified on + Make pg_ctl accept a wait timeout from the + PGCTLTIMEOUT environment variable, if none is specified on the command line (Noah Misch) @@ -3163,26 +5474,26 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 Fix incorrect test for Windows service status - in pg_ctl (Manuel Mathar) + in pg_ctl (Manuel Mathar) The previous set of minor releases attempted to - fix pg_ctl to properly determine whether to send log + fix pg_ctl to properly determine whether to send log messages to Window's Event Log, but got the test backwards. - Fix pgbench to correctly handle the combination - of -C and -M prepared options (Tom Lane) + Fix pgbench to correctly handle the combination + of -C and -M prepared options (Tom Lane) - In pg_upgrade, skip creating a deletion script when + In pg_upgrade, skip creating a deletion script when the new data directory is inside the old data directory (Bruce Momjian) @@ -3210,21 +5521,21 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 Fix multiple mistakes in the statistics returned - by contrib/pgstattuple's pgstatindex() + by contrib/pgstattuple's pgstatindex() function (Tom Lane) - Remove dependency on psed in MSVC builds, since it's no + Remove dependency on psed in MSVC builds, since it's no longer provided by core Perl (Michael Paquier, Andrew Dunstan) - Update time zone data files to tzdata release 2016c + Update time zone data files to tzdata release 2016c for DST law changes in Azerbaijan, Chile, Haiti, Palestine, and Russia (Altai, Astrakhan, Kirov, Sakhalin, Ulyanovsk regions), plus historical corrections for Lithuania, Moldova, and Russia @@ -3248,7 +5559,7 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 This release contains a variety of fixes from 9.4.5. For information about new features in the 9.4 major release, see - . + . @@ -3260,13 +5571,13 @@ Branch: REL9_1_STABLE [de887cc8a] 2016-05-25 19:39:49 -0400 However, if you are upgrading an installation that contains any GIN - indexes that use the (non-default) jsonb_path_ops operator + indexes that use the (non-default) jsonb_path_ops operator class, see the first changelog entry below. Also, if you are upgrading from a version earlier than 9.4.4, - see . + see . @@ -3284,19 +5595,19 @@ Branch: REL9_4_STABLE [788e35ac0] 2015-11-05 18:15:48 -0500 - Fix inconsistent hash calculations in jsonb_path_ops GIN + Fix inconsistent hash calculations in jsonb_path_ops GIN indexes (Tom Lane) - When processing jsonb values that contain both scalars and + When processing jsonb values that contain both scalars and sub-objects at the same nesting level, for example an array containing both scalars and sub-arrays, key hash values could be calculated differently than they would be for the same key in a different context. This could result in queries not finding entries that they should find. Fixing this means that existing indexes may now be inconsistent with the new hash calculation code. Users - should REINDEX jsonb_path_ops GIN indexes after + should REINDEX jsonb_path_ops GIN indexes after installing this update to make sure that all searches work as expected. @@ -3326,18 +5637,18 @@ Branch: REL9_1_STABLE [dea6da132] 2015-10-06 17:15:27 -0400 - Perform an immediate shutdown if the postmaster.pid file + Perform an immediate shutdown if the postmaster.pid file is removed (Tom Lane) The postmaster now checks every minute or so - that postmaster.pid is still there and still contains its + that postmaster.pid is still there and still contains its own PID. If not, it performs an immediate shutdown, as though it had - received SIGQUIT. The main motivation for this change + received SIGQUIT. The main motivation for this change is to ensure that failed buildfarm runs will get cleaned up without manual intervention; but it also serves to limit the bad effects if a - DBA forcibly removes postmaster.pid and then starts a new + DBA forcibly removes postmaster.pid and then starts a new postmaster. @@ -3354,7 +5665,7 @@ Branch: REL9_1_STABLE [08322daed] 2015-10-31 14:36:58 -0500 - In SERIALIZABLE transaction isolation mode, serialization + In SERIALIZABLE transaction isolation mode, serialization anomalies could be missed due to race conditions during insertions (Kevin Grittner, Thomas Munro) @@ -3373,7 +5684,7 @@ Branch: REL9_1_STABLE [5f9a86b35] 2015-12-12 14:19:29 +0100 Fix failure to emit appropriate WAL records when doing ALTER - TABLE ... SET TABLESPACE for unlogged relations (Michael Paquier, + TABLE ... SET TABLESPACE for unlogged relations (Michael Paquier, Andres Freund) @@ -3427,7 +5738,7 @@ Branch: REL9_1_STABLE [60ba32cb5] 2015-11-20 14:55:29 -0500 - Fix ALTER COLUMN TYPE to reconstruct inherited check + Fix ALTER COLUMN TYPE to reconstruct inherited check constraints properly (Tom Lane) @@ -3442,7 +5753,7 @@ Branch: REL9_1_STABLE [7e29e7f55] 2015-12-21 19:49:15 -0300 - Fix REASSIGN OWNED to change ownership of composite types + Fix REASSIGN OWNED to change ownership of composite types properly (Álvaro Herrera) @@ -3457,7 +5768,7 @@ Branch: REL9_1_STABLE [ab14c1383] 2015-12-21 19:16:15 -0300 - Fix REASSIGN OWNED and ALTER OWNER to correctly + Fix REASSIGN OWNED and ALTER OWNER to correctly update granted-permissions lists when changing owners of data types, foreign data wrappers, or foreign servers (Bruce Momjian, Álvaro Herrera) @@ -3476,7 +5787,7 @@ Branch: REL9_1_STABLE [f44c5203b] 2015-12-11 18:39:09 -0300 - Fix REASSIGN OWNED to ignore foreign user mappings, + Fix REASSIGN OWNED to ignore foreign user mappings, rather than fail (Álvaro Herrera) @@ -3510,13 +5821,13 @@ Branch: REL9_3_STABLE [0a34ff7e9] 2015-12-07 17:41:45 -0500 - Fix planner's handling of LATERAL references (Tom + Fix planner's handling of LATERAL references (Tom Lane) This fixes some corner cases that led to failed to build any - N-way joins or could not devise a query plan planner + N-way joins or could not devise a query plan planner failures. @@ -3566,9 +5877,9 @@ Branch: REL9_3_STABLE [faf18a905] 2015-11-16 13:45:17 -0500 - Speed up generation of unique table aliases in EXPLAIN and + Speed up generation of unique table aliases in EXPLAIN and rule dumping, and ensure that generated aliases do not - exceed NAMEDATALEN (Tom Lane) + exceed NAMEDATALEN (Tom Lane) @@ -3584,8 +5895,8 @@ Branch: REL9_1_STABLE [7b21d1bca] 2015-11-15 14:41:09 -0500 - Fix dumping of whole-row Vars in ROW() - and VALUES() lists (Tom Lane) + Fix dumping of whole-row Vars in ROW() + and VALUES() lists (Tom Lane) @@ -3598,8 +5909,8 @@ Branch: REL9_4_STABLE [4f33572ee] 2015-10-20 11:06:24 -0700 - Translation of minus-infinity dates and timestamps to json - or jsonb incorrectly rendered them as plus-infinity (Tom Lane) + Translation of minus-infinity dates and timestamps to json + or jsonb incorrectly rendered them as plus-infinity (Tom Lane) @@ -3615,7 +5926,7 @@ Branch: REL9_1_STABLE [728a2ac21] 2015-11-17 15:47:12 -0500 - Fix possible internal overflow in numeric division + Fix possible internal overflow in numeric division (Dean Rasheed) @@ -3707,7 +6018,7 @@ Branch: REL9_1_STABLE [b94c2b6a6] 2015-10-16 15:36:17 -0400 This causes the code to emit regular expression is too - complex errors in some cases that previously used unreasonable + complex errors in some cases that previously used unreasonable amounts of time and memory. @@ -3742,14 +6053,14 @@ Branch: REL9_1_STABLE [b00c79b5b] 2015-10-16 14:43:18 -0400 - Make %h and %r escapes - in log_line_prefix work for messages emitted due - to log_connections (Tom Lane) + Make %h and %r escapes + in log_line_prefix work for messages emitted due + to log_connections (Tom Lane) - Previously, %h/%r started to work just after a - new session had emitted the connection received log message; + Previously, %h/%r started to work just after a + new session had emitted the connection received log message; now they work for that message too. @@ -3772,7 +6083,7 @@ Branch: REL9_1_STABLE [b0d858359] 2015-10-13 11:21:33 -0400 This oversight resulted in failure to recover from crashes - whenever logging_collector is turned on. + whenever logging_collector is turned on. @@ -3822,13 +6133,13 @@ Branch: REL9_1_STABLE [db462a44e] 2015-12-17 16:55:51 -0500 - In psql, ensure that libreadline's idea + In psql, ensure that libreadline's idea of the screen size is updated when the terminal window size changes (Merlin Moncure) - Previously, libreadline did not notice if the window + Previously, libreadline did not notice if the window was resized during query output, leading to strange behavior during later input of multiline queries. @@ -3836,8 +6147,8 @@ Branch: REL9_1_STABLE [db462a44e] 2015-12-17 16:55:51 -0500 - Fix psql's \det command to interpret its - pattern argument the same way as other \d commands with + Fix psql's \det command to interpret its + pattern argument the same way as other \d commands with potentially schema-qualified patterns do (Reece Hart) @@ -3854,7 +6165,7 @@ Branch: REL9_1_STABLE [6430a11fa] 2015-11-25 17:31:54 -0500 - Avoid possible crash in psql's \c command + Avoid possible crash in psql's \c command when previous connection was via Unix socket and command specifies a new hostname and same username (Tom Lane) @@ -3872,21 +6183,21 @@ Branch: REL9_1_STABLE [c869a7d5b] 2015-10-12 18:30:37 -0400 - In pg_ctl start -w, test child process status directly + In pg_ctl start -w, test child process status directly rather than relying on heuristics (Tom Lane, Michael Paquier) - Previously, pg_ctl relied on an assumption that the new - postmaster would always create postmaster.pid within five + Previously, pg_ctl relied on an assumption that the new + postmaster would always create postmaster.pid within five seconds. But that can fail on heavily-loaded systems, - causing pg_ctl to report incorrectly that the + causing pg_ctl to report incorrectly that the postmaster failed to start. Except on Windows, this change also means that a pg_ctl start - -w done immediately after another such command will now reliably + -w done immediately after another such command will now reliably fail, whereas previously it would report success if done within two seconds of the first command. @@ -3904,23 +6215,23 @@ Branch: REL9_1_STABLE [87deb55a4] 2015-11-08 17:31:24 -0500 - In pg_ctl start -w, don't attempt to use a wildcard listen + In pg_ctl start -w, don't attempt to use a wildcard listen address to connect to the postmaster (Kondo Yuta) - On Windows, pg_ctl would fail to detect postmaster - startup if listen_addresses is set to 0.0.0.0 - or ::, because it would try to use that value verbatim as + On Windows, pg_ctl would fail to detect postmaster + startup if listen_addresses is set to 0.0.0.0 + or ::, because it would try to use that value verbatim as the address to connect to, which doesn't work. Instead assume - that 127.0.0.1 or ::1, respectively, is the + that 127.0.0.1 or ::1, respectively, is the right thing to use. - In pg_ctl on Windows, check service status to decide + In pg_ctl on Windows, check service status to decide where to send output, rather than checking if standard output is a terminal (Michael Paquier) @@ -3940,18 +6251,18 @@ Branch: REL9_1_STABLE [6df62ef43] 2015-11-23 00:32:01 -0500 - In pg_dump and pg_basebackup, adopt + In pg_dump and pg_basebackup, adopt the GNU convention for handling tar-archive members exceeding 8GB (Tom Lane) - The POSIX standard for tar file format does not allow + The POSIX standard for tar file format does not allow archive member files to exceed 8GB, but most modern implementations - of tar support an extension that fixes that. Adopt - this extension so that pg_dump with no longer fails on tables with more than 8GB of data, and so - that pg_basebackup can handle files larger than 8GB. + that pg_basebackup can handle files larger than 8GB. In addition, fix some portability issues that could cause failures for members between 4GB and 8GB on some platforms. Potentially these problems could cause unrecoverable data loss due to unreadable backup @@ -3961,16 +6272,16 @@ Branch: REL9_1_STABLE [6df62ef43] 2015-11-23 00:32:01 -0500 - Fix assorted corner-case bugs in pg_dump's processing + Fix assorted corner-case bugs in pg_dump's processing of extension member objects (Tom Lane) - Make pg_dump mark a view's triggers as needing to be + Make pg_dump mark a view's triggers as needing to be processed after its rule, to prevent possible failure during - parallel pg_restore (Tom Lane) + parallel pg_restore (Tom Lane) @@ -3993,14 +6304,14 @@ Branch: REL9_1_STABLE [e4959fb5c] 2016-01-02 19:04:45 -0500 Ensure that relation option values are properly quoted - in pg_dump (Kouhei Sutou, Tom Lane) + in pg_dump (Kouhei Sutou, Tom Lane) A reloption value that isn't a simple identifier or number could lead to dump/reload failures due to syntax errors in CREATE statements - issued by pg_dump. This is not an issue with any - reloption currently supported by core PostgreSQL, but + issued by pg_dump. This is not an issue with any + reloption currently supported by core PostgreSQL, but extensions could allow reloptions that cause the problem. @@ -4015,7 +6326,7 @@ Branch: REL9_3_STABLE [534a4159c] 2015-12-23 14:25:31 -0500 - Avoid repeated password prompts during parallel pg_dump + Avoid repeated password prompts during parallel pg_dump (Zeus Kronion) @@ -4038,14 +6349,14 @@ Branch: REL9_1_STABLE [c36064e43] 2015-11-24 17:18:27 -0500 - Fix pg_upgrade's file-copying code to handle errors + Fix pg_upgrade's file-copying code to handle errors properly on Windows (Bruce Momjian) - Install guards in pgbench against corner-case overflow + Install guards in pgbench against corner-case overflow conditions during evaluation of script-specified division or modulo operators (Fabien Coelho, Michael Paquier) @@ -4063,22 +6374,22 @@ Branch: REL9_2_STABLE [4fb9e6109] 2015-12-28 10:50:35 -0300 Fix failure to localize messages emitted - by pg_receivexlog and pg_recvlogical + by pg_receivexlog and pg_recvlogical (Ioseph Kim) - Avoid dump/reload problems when using both plpython2 - and plpython3 (Tom Lane) + Avoid dump/reload problems when using both plpython2 + and plpython3 (Tom Lane) - In principle, both versions of PL/Python can be used in + In principle, both versions of PL/Python can be used in the same database, though not in the same session (because the two - versions of libpython cannot safely be used concurrently). - However, pg_restore and pg_upgrade both + versions of libpython cannot safely be used concurrently). + However, pg_restore and pg_upgrade both do things that can fall foul of the same-session restriction. Work around that by changing the timing of the check. @@ -4086,7 +6397,7 @@ Branch: REL9_2_STABLE [4fb9e6109] 2015-12-28 10:50:35 -0300 - Fix PL/Python regression tests to pass with Python 3.5 + Fix PL/Python regression tests to pass with Python 3.5 (Peter Eisentraut) @@ -4101,29 +6412,29 @@ Branch: REL9_3_STABLE [db6e8e162] 2015-11-12 13:03:53 -0500 - Fix premature clearing of libpq's input buffer when + Fix premature clearing of libpq's input buffer when socket EOF is seen (Tom Lane) - This mistake caused libpq to sometimes not report the + This mistake caused libpq to sometimes not report the backend's final error message before reporting server closed the - connection unexpectedly. + connection unexpectedly. - Prevent certain PL/Java parameters from being set by + Prevent certain PL/Java parameters from being set by non-superusers (Noah Misch) - This change mitigates a PL/Java security bug - (CVE-2016-0766), which was fixed in PL/Java by marking + This change mitigates a PL/Java security bug + (CVE-2016-0766), which was fixed in PL/Java by marking these parameters as superuser-only. To fix the security hazard for - sites that update PostgreSQL more frequently - than PL/Java, make the core code aware of them also. + sites that update PostgreSQL more frequently + than PL/Java, make the core code aware of them also. @@ -4139,7 +6450,7 @@ Branch: REL9_1_STABLE [4b58ded74] 2015-12-14 18:48:49 +0200 - Improve libpq's handling of out-of-memory situations + Improve libpq's handling of out-of-memory situations (Michael Paquier, Amit Kapila, Heikki Linnakangas) @@ -4156,7 +6467,7 @@ Branch: REL9_1_STABLE [a9bcd8370] 2015-10-18 10:17:12 +0200 Fix order of arguments - in ecpg-generated typedef statements + in ecpg-generated typedef statements (Michael Meskes) @@ -4173,29 +6484,29 @@ Branch: REL9_1_STABLE [84387496f] 2015-12-01 11:42:52 -0500 - Use %g not %f format - in ecpg's PGTYPESnumeric_from_double() + Use %g not %f format + in ecpg's PGTYPESnumeric_from_double() (Tom Lane) - Fix ecpg-supplied header files to not contain comments + Fix ecpg-supplied header files to not contain comments continued from a preprocessor directive line onto the next line (Michael Meskes) - Such a comment is rejected by ecpg. It's not yet clear - whether ecpg itself should be changed. + Such a comment is rejected by ecpg. It's not yet clear + whether ecpg itself should be changed. - Fix hstore_to_json_loose()'s test for whether - an hstore value can be converted to a JSON number (Tom Lane) + Fix hstore_to_json_loose()'s test for whether + an hstore value can be converted to a JSON number (Tom Lane) @@ -4216,15 +6527,15 @@ Branch: REL9_1_STABLE [1b6102eb7] 2015-12-27 13:03:19 -0300 - Ensure that contrib/pgcrypto's crypt() + Ensure that contrib/pgcrypto's crypt() function can be interrupted by query cancel (Andreas Karlsson) - In contrib/postgres_fdw, fix bugs triggered by use - of tableoid in data-modifying commands (Etsuro Fujita, + In contrib/postgres_fdw, fix bugs triggered by use + of tableoid in data-modifying commands (Etsuro Fujita, Robert Haas) @@ -4246,7 +6557,7 @@ Branch: REL9_2_STABLE [7f94a5c10] 2015-12-10 10:19:31 -0500 - Accept flex versions later than 2.5.x + Accept flex versions later than 2.5.x (Tom Lane, Michael Paquier) @@ -4280,19 +6591,19 @@ Branch: REL9_1_STABLE [2a37a103b] 2015-12-11 16:14:48 -0500 - Install our missing script where PGXS builds can find it + Install our missing script where PGXS builds can find it (Jim Nasby) This allows sane behavior in a PGXS build done on a machine where build - tools such as bison are missing. + tools such as bison are missing. - Ensure that dynloader.h is included in the installed + Ensure that dynloader.h is included in the installed header files in MSVC builds (Bruce Momjian, Michael Paquier) @@ -4310,11 +6621,11 @@ Branch: REL9_1_STABLE [386dcd539] 2015-12-11 19:08:40 -0500 Add variant regression test expected-output file to match behavior of - current libxml2 (Tom Lane) + current libxml2 (Tom Lane) - The fix for libxml2's CVE-2015-7499 causes it not to + The fix for libxml2's CVE-2015-7499 causes it not to output error context reports in some cases where it used to do so. This seems to be a bug, but we'll probably have to live with it for some time, so work around it. @@ -4323,7 +6634,7 @@ Branch: REL9_1_STABLE [386dcd539] 2015-12-11 19:08:40 -0500 - Update time zone data files to tzdata release 2016a for + Update time zone data files to tzdata release 2016a for DST law changes in Cayman Islands, Metlakatla, and Trans-Baikal Territory (Zabaykalsky Krai), plus historical corrections for Pakistan. @@ -4345,7 +6656,7 @@ Branch: REL9_1_STABLE [386dcd539] 2015-12-11 19:08:40 -0500 This release contains a variety of fixes from 9.4.4. For information about new features in the 9.4 major release, see - . + . @@ -4357,7 +6668,7 @@ Branch: REL9_1_STABLE [386dcd539] 2015-12-11 19:08:40 -0500 However, if you are upgrading from a version earlier than 9.4.4, - see . + see . @@ -4376,13 +6687,13 @@ Branch: REL9_3_STABLE [f8862172e] 2015-10-05 10:06:34 -0400 - Guard against stack overflows in json parsing + Guard against stack overflows in json parsing (Oskari Saarenmaa) - If an application constructs PostgreSQL json - or jsonb values from arbitrary user input, the application's + If an application constructs PostgreSQL json + or jsonb values from arbitrary user input, the application's users can reliably crash the PostgreSQL server, causing momentary denial of service. (CVE-2015-5289) @@ -4401,8 +6712,8 @@ Branch: REL9_0_STABLE [188e081ef] 2015-10-05 10:06:36 -0400 - Fix contrib/pgcrypto to detect and report - too-short crypt() salts (Josh Kupershmidt) + Fix contrib/pgcrypto to detect and report + too-short crypt() salts (Josh Kupershmidt) @@ -4447,7 +6758,7 @@ Branch: REL9_4_STABLE [bab959906] 2015-08-02 20:09:05 +0300 Fix possible deadlock during WAL insertion - when commit_delay is set (Heikki Linnakangas) + when commit_delay is set (Heikki Linnakangas) @@ -4478,13 +6789,13 @@ Branch: REL9_0_STABLE [45c69178b] 2015-06-25 14:39:06 -0400 - Fix insertion of relations into the relation cache init file + Fix insertion of relations into the relation cache init file (Tom Lane) An oversight in a patch in the most recent minor releases - caused pg_trigger_tgrelid_tgname_index to be omitted + caused pg_trigger_tgrelid_tgname_index to be omitted from the init file. Subsequent sessions detected this, then deemed the init file to be broken and silently ignored it, resulting in a significant degradation in session startup time. In addition to fixing @@ -4524,7 +6835,7 @@ Branch: REL9_0_STABLE [2d4336cf8] 2015-09-30 23:32:23 -0400 - Improve LISTEN startup time when there are many unread + Improve LISTEN startup time when there are many unread notifications (Matt Newell) @@ -4544,7 +6855,7 @@ Branch: REL9_3_STABLE [1bcc9e60a] 2015-09-25 13:16:31 -0400 - This was seen primarily when restoring pg_dump output + This was seen primarily when restoring pg_dump output for databases with many thousands of tables. @@ -4568,7 +6879,7 @@ Branch: REL9_0_STABLE [444b2ebee] 2015-07-28 22:06:32 +0200 too many bugs in practice, both in the underlying OpenSSL library and in our usage of it. Renegotiation will be removed entirely in 9.5 and later. In the older branches, just change the default value - of ssl_renegotiation_limit to zero (disabled). + of ssl_renegotiation_limit to zero (disabled). @@ -4592,7 +6903,7 @@ Branch: REL9_0_STABLE [eeb0b7830] 2015-10-05 11:57:25 +0200 - Lower the minimum values of the *_freeze_max_age parameters + Lower the minimum values of the *_freeze_max_age parameters (Andres Freund) @@ -4615,7 +6926,7 @@ Branch: REL9_0_STABLE [b09446ed7] 2015-08-04 13:12:03 -0400 - Limit the maximum value of wal_buffers to 2GB to avoid + Limit the maximum value of wal_buffers to 2GB to avoid server crashes (Josh Berkus) @@ -4629,8 +6940,8 @@ Branch: REL9_3_STABLE [5a56c2545] 2015-06-28 18:38:06 -0400 Avoid logging complaints when a parameter that can only be set at - server start appears multiple times in postgresql.conf, - and fix counting of line numbers after an include_dir + server start appears multiple times in postgresql.conf, + and fix counting of line numbers after an include_dir directive (Tom Lane) @@ -4648,7 +6959,7 @@ Branch: REL9_0_STABLE [a89781e34] 2015-09-21 12:12:16 -0400 - Fix rare internal overflow in multiplication of numeric values + Fix rare internal overflow in multiplication of numeric values (Dean Rasheed) @@ -4675,8 +6986,8 @@ Branch: REL9_2_STABLE [8dacb29ca] 2015-10-05 10:06:35 -0400 Guard against hard-to-reach stack overflows involving record types, - range types, json, jsonb, tsquery, - ltxtquery and query_int (Noah Misch) + range types, json, jsonb, tsquery, + ltxtquery and query_int (Noah Misch) @@ -4700,14 +7011,14 @@ Branch: REL9_0_STABLE [92d956f51] 2015-09-07 20:47:06 +0100 - Fix handling of DOW and DOY in datetime input + Fix handling of DOW and DOY in datetime input (Greg Stark) These tokens aren't meant to be used in datetime values, but previously they resulted in opaque internal error messages rather - than invalid input syntax. + than invalid input syntax. @@ -4749,7 +7060,7 @@ Branch: REL9_0_STABLE [b875ca09f] 2015-10-02 15:00:52 -0400 Add recursion depth protections to regular expression, SIMILAR - TO, and LIKE matching (Tom Lane) + TO, and LIKE matching (Tom Lane) @@ -4865,8 +7176,8 @@ Branch: REL9_0_STABLE [bd327627f] 2015-08-04 18:18:47 -0400 - Fix unexpected out-of-memory situation during sort errors - when using tuplestores with small work_mem settings (Tom + Fix unexpected out-of-memory situation during sort errors + when using tuplestores with small work_mem settings (Tom Lane) @@ -4884,7 +7195,7 @@ Branch: REL9_0_STABLE [36522d627] 2015-07-16 22:57:46 -0400 - Fix very-low-probability stack overrun in qsort (Tom Lane) + Fix very-low-probability stack overrun in qsort (Tom Lane) @@ -4906,8 +7217,8 @@ Branch: REL9_0_STABLE [d637a899c] 2015-10-04 15:55:07 -0400 - Fix invalid memory alloc request size failure in hash joins - with large work_mem settings (Tomas Vondra, Tom Lane) + Fix invalid memory alloc request size failure in hash joins + with large work_mem settings (Tomas Vondra, Tom Lane) @@ -4985,9 +7296,9 @@ Branch: REL9_0_STABLE [7b4b57fc4] 2015-08-12 21:19:10 -0400 These mistakes could lead to incorrect query plans that would give wrong answers, or to assertion failures in assert-enabled builds, or to odd planner errors such as could not devise a query plan for the - given query, could not find pathkey item to - sort, plan should not reference subplan's variable, - or failed to assign all NestLoopParams to plan nodes. + given query, could not find pathkey item to + sort, plan should not reference subplan's variable, + or failed to assign all NestLoopParams to plan nodes. Thanks are due to Andreas Seltenreich and Piotr Stefaniak for fuzz testing that exposed these problems. @@ -5003,7 +7314,7 @@ Branch: REL9_2_STABLE [e538e510e] 2015-06-22 18:53:27 -0400 - Improve planner's performance for UPDATE/DELETE + Improve planner's performance for UPDATE/DELETE on large inheritance sets (Tom Lane, Dean Rasheed) @@ -5045,12 +7356,12 @@ Branch: REL9_0_STABLE [8b53c087d] 2015-08-02 14:54:44 -0400 During postmaster shutdown, ensure that per-socket lock files are removed and listen sockets are closed before we remove - the postmaster.pid file (Tom Lane) + the postmaster.pid file (Tom Lane) This avoids race-condition failures if an external script attempts to - start a new postmaster as soon as pg_ctl stop returns. + start a new postmaster as soon as pg_ctl stop returns. @@ -5124,7 +7435,7 @@ Branch: REL9_0_STABLE [f527c0a2a] 2015-07-28 17:34:00 -0400 - Do not print a WARNING when an autovacuum worker is already + Do not print a WARNING when an autovacuum worker is already gone when we attempt to signal it, and reduce log verbosity for such signals (Tom Lane) @@ -5202,7 +7513,7 @@ Branch: REL9_2_STABLE [f4297f8c5] 2015-07-27 12:32:48 +0300 - VACUUM attempted to recycle such pages, but did so in a + VACUUM attempted to recycle such pages, but did so in a way that wasn't crash-safe. @@ -5221,7 +7532,7 @@ Branch: REL9_0_STABLE [40ad78220] 2015-07-23 01:30:19 +0300 Fix off-by-one error that led to otherwise-harmless warnings - about apparent wraparound in subtrans/multixact truncation + about apparent wraparound in subtrans/multixact truncation (Thomas Munro) @@ -5239,8 +7550,8 @@ Branch: REL9_0_STABLE [e41718fa1] 2015-08-18 19:22:38 -0400 - Fix misreporting of CONTINUE and MOVE statement - types in PL/pgSQL's error context messages + Fix misreporting of CONTINUE and MOVE statement + types in PL/pgSQL's error context messages (Pavel Stehule, Tom Lane) @@ -5257,7 +7568,7 @@ Branch: REL9_1_STABLE [ca6c2f863] 2015-09-29 10:52:22 -0400 - Fix PL/Perl to handle non-ASCII error + Fix PL/Perl to handle non-ASCII error message texts correctly (Alex Hunsaker) @@ -5280,8 +7591,8 @@ Branch: REL9_1_STABLE [1d190d095] 2015-08-21 12:21:37 -0400 - Fix PL/Python crash when returning the string - representation of a record result (Tom Lane) + Fix PL/Python crash when returning the string + representation of a record result (Tom Lane) @@ -5298,8 +7609,8 @@ Branch: REL9_0_STABLE [4c11967e7] 2015-07-20 14:18:08 +0200 - Fix some places in PL/Tcl that neglected to check for - failure of malloc() calls (Michael Paquier, Álvaro + Fix some places in PL/Tcl that neglected to check for + failure of malloc() calls (Michael Paquier, Álvaro Herrera) @@ -5316,7 +7627,7 @@ Branch: REL9_1_STABLE [2d19a0e97] 2015-08-02 22:12:51 +0300 - In contrib/isn, fix output of ISBN-13 numbers that begin + In contrib/isn, fix output of ISBN-13 numbers that begin with 979 (Fabien Coelho) @@ -5335,7 +7646,7 @@ Branch: REL9_4_STABLE [93840f96c] 2015-10-04 17:58:30 -0400 - Improve contrib/pg_stat_statements' handling of + Improve contrib/pg_stat_statements' handling of query-text garbage collection (Peter Geoghegan) @@ -5356,13 +7667,13 @@ Branch: REL9_3_STABLE [b7dcb2dd4] 2015-09-24 12:47:30 -0400 - Improve contrib/postgres_fdw's handling of + Improve contrib/postgres_fdw's handling of collation-related decisions (Tom Lane) The main user-visible effect is expected to be that comparisons - involving varchar columns will be sent to the remote server + involving varchar columns will be sent to the remote server for execution in more cases than before. @@ -5380,7 +7691,7 @@ Branch: REL9_0_STABLE [2b189c7ec] 2015-07-07 18:45:31 +0300 - Improve libpq's handling of out-of-memory conditions + Improve libpq's handling of out-of-memory conditions (Michael Paquier, Heikki Linnakangas) @@ -5416,7 +7727,7 @@ Branch: REL9_0_STABLE [d278ff3b2] 2015-06-15 14:27:39 +0200 Fix memory leaks and missing out-of-memory checks - in ecpg (Michael Paquier) + in ecpg (Michael Paquier) @@ -5447,15 +7758,15 @@ Branch: REL9_0_STABLE [98d8c75f9] 2015-09-25 12:20:46 -0400 - Fix psql's code for locale-aware formatting of numeric + Fix psql's code for locale-aware formatting of numeric output (Tom Lane) - The formatting code invoked by \pset numericlocale on + The formatting code invoked by \pset numericlocale on did the wrong thing for some uncommon cases such as numbers with an exponent but no decimal point. It could also mangle already-localized - output from the money data type. + output from the money data type. @@ -5472,7 +7783,7 @@ Branch: REL9_0_STABLE [6087bf1a1] 2015-07-08 20:44:27 -0400 - Prevent crash in psql's \c command when + Prevent crash in psql's \c command when there is no current connection (Noah Misch) @@ -5488,7 +7799,7 @@ Branch: REL9_2_STABLE [3756c65a0] 2015-10-01 16:19:49 -0400 - Make pg_dump handle inherited NOT VALID + Make pg_dump handle inherited NOT VALID check constraints correctly (Tom Lane) @@ -5505,8 +7816,8 @@ Branch: REL9_1_STABLE [af225551e] 2015-07-25 17:16:39 -0400 - Fix selection of default zlib compression level - in pg_dump's directory output format (Andrew Dunstan) + Fix selection of default zlib compression level + in pg_dump's directory output format (Andrew Dunstan) @@ -5523,8 +7834,8 @@ Branch: REL9_0_STABLE [24aed2124] 2015-09-20 20:44:34 -0400 - Ensure that temporary files created during a pg_dump - run with tar-format output are not world-readable (Michael + Ensure that temporary files created during a pg_dump + run with tar-format output are not world-readable (Michael Paquier) @@ -5542,8 +7853,8 @@ Branch: REL9_0_STABLE [52b07779d] 2015-09-11 15:51:10 -0400 - Fix pg_dump and pg_upgrade to support - cases where the postgres or template1 database + Fix pg_dump and pg_upgrade to support + cases where the postgres or template1 database is in a non-default tablespace (Marti Raudsepp, Bruce Momjian) @@ -5561,7 +7872,7 @@ Branch: REL9_0_STABLE [298d1f808] 2015-08-10 20:10:16 -0400 - Fix pg_dump to handle object privileges sanely when + Fix pg_dump to handle object privileges sanely when dumping from a server too old to have a particular privilege type (Tom Lane) @@ -5569,11 +7880,11 @@ Branch: REL9_0_STABLE [298d1f808] 2015-08-10 20:10:16 -0400 When dumping data types from pre-9.2 servers, and when dumping functions or procedural languages from pre-7.3 - servers, pg_dump would - produce GRANT/REVOKE commands that revoked the + servers, pg_dump would + produce GRANT/REVOKE commands that revoked the owner's grantable privileges and instead granted all privileges - to PUBLIC. Since the privileges involved are - just USAGE and EXECUTE, this isn't a security + to PUBLIC. Since the privileges involved are + just USAGE and EXECUTE, this isn't a security problem, but it's certainly a surprising representation of the older systems' behavior. Fix it to leave the default privilege state alone in these cases. @@ -5593,12 +7904,12 @@ Branch: REL9_0_STABLE [5d175be17] 2015-08-04 19:34:12 -0400 - Fix pg_dump to dump shell types (Tom Lane) + Fix pg_dump to dump shell types (Tom Lane) Shell types (that is, not-yet-fully-defined types) aren't useful for - much, but nonetheless pg_dump should dump them. + much, but nonetheless pg_dump should dump them. @@ -5614,7 +7925,7 @@ Branch: REL9_1_STABLE [e9a859b54] 2015-07-12 16:25:52 -0400 - Fix assorted minor memory leaks in pg_dump and other + Fix assorted minor memory leaks in pg_dump and other client-side programs (Michael Paquier) @@ -5628,8 +7939,8 @@ Branch: REL9_4_STABLE [9d6352aaa] 2015-07-03 11:15:27 +0300 - Fix pgbench's progress-report behavior when a query, - or pgbench itself, gets stuck (Fabien Coelho) + Fix pgbench's progress-report behavior when a query, + or pgbench itself, gets stuck (Fabien Coelho) @@ -5658,11 +7969,11 @@ Branch: REL9_0_STABLE [b5a22d8bb] 2015-08-29 16:09:25 -0400 Fix spinlock assembly code for PPC hardware to be compatible - with AIX's native assembler (Tom Lane) + with AIX's native assembler (Tom Lane) - Building with gcc didn't work if gcc + Building with gcc didn't work if gcc had been configured to use the native assembler, which is becoming more common. @@ -5681,7 +7992,7 @@ Branch: REL9_0_STABLE [cdf596b1c] 2015-07-17 03:02:46 -0400 - On AIX, test the -qlonglong compiler option + On AIX, test the -qlonglong compiler option rather than just assuming it's safe to use (Noah Misch) @@ -5699,7 +8010,7 @@ Branch: REL9_0_STABLE [7803d5720] 2015-07-15 21:00:31 -0400 - On AIX, use -Wl,-brtllib link option to allow + On AIX, use -Wl,-brtllib link option to allow symbols to be resolved at runtime (Noah Misch) @@ -5722,7 +8033,7 @@ Branch: REL9_0_STABLE [2d8c136e7] 2015-07-29 22:54:08 -0400 Avoid use of inline functions when compiling with - 32-bit xlc, due to compiler bugs (Noah Misch) + 32-bit xlc, due to compiler bugs (Noah Misch) @@ -5738,7 +8049,7 @@ Branch: REL9_0_STABLE [b185c42c1] 2015-06-30 14:20:37 -0300 - Use librt for sched_yield() when necessary, + Use librt for sched_yield() when necessary, which it is on some Solaris versions (Oskari Saarenmaa) @@ -5752,7 +8063,7 @@ Branch: REL9_4_STABLE [a0104e080] 2015-08-14 20:23:42 -0400 - Translate encoding UHC as Windows code page 949 + Translate encoding UHC as Windows code page 949 (Noah Misch) @@ -5785,12 +8096,12 @@ Branch: REL9_4_STABLE [b2ed1682d] 2015-06-20 12:10:56 -0400 Fix postmaster startup failure due to not - copying setlocale()'s return value (Noah Misch) + copying setlocale()'s return value (Noah Misch) This has been reported on Windows systems with the ANSI code page set - to CP936 (Chinese (Simplified, PRC)), and may occur with + to CP936 (Chinese (Simplified, PRC)), and may occur with other multibyte code pages. @@ -5808,7 +8119,7 @@ Branch: REL9_0_STABLE [341b877d3] 2015-07-07 16:39:25 +0300 - Fix Windows install.bat script to handle target directory + Fix Windows install.bat script to handle target directory names that contain spaces (Heikki Linnakangas) @@ -5826,9 +8137,9 @@ Branch: REL9_0_STABLE [29ff43adf] 2015-07-05 12:01:02 -0400 - Make the numeric form of the PostgreSQL version number - (e.g., 90405) readily available to extension Makefiles, - as a variable named VERSION_NUM (Michael Paquier) + Make the numeric form of the PostgreSQL version number + (e.g., 90405) readily available to extension Makefiles, + as a variable named VERSION_NUM (Michael Paquier) @@ -5845,10 +8156,10 @@ Branch: REL9_0_STABLE [47ac95f37] 2015-10-02 19:16:37 -0400 - Update time zone data files to tzdata release 2015g for + Update time zone data files to tzdata release 2015g for DST law changes in Cayman Islands, Fiji, Moldova, Morocco, Norfolk Island, North Korea, Turkey, and Uruguay. There is a new zone name - America/Fort_Nelson for the Canadian Northern Rockies. + America/Fort_Nelson for the Canadian Northern Rockies. @@ -5868,7 +8179,7 @@ Branch: REL9_0_STABLE [47ac95f37] 2015-10-02 19:16:37 -0400 This release contains a small number of fixes from 9.4.3. For information about new features in the 9.4 major release, see - . + . @@ -5880,13 +8191,13 @@ Branch: REL9_0_STABLE [47ac95f37] 2015-10-02 19:16:37 -0400 However, if you are upgrading an installation that was previously - upgraded using a pg_upgrade version between 9.3.0 and + upgraded using a pg_upgrade version between 9.3.0 and 9.3.4 inclusive, see the first changelog entry below. Also, if you are upgrading from a version earlier than 9.4.2, - see . + see . @@ -5909,50 +8220,50 @@ Branch: REL9_3_STABLE [2a9b01928] 2015-06-05 09:34:15 -0400 - Recent PostgreSQL releases introduced mechanisms to + Recent PostgreSQL releases introduced mechanisms to protect against multixact wraparound, but some of that code did not account for the possibility that it would need to run during crash recovery, when the database may not be in a consistent state. This could result in failure to restart after a crash, or failure to start up a secondary server. The lingering effects of a previously-fixed - bug in pg_upgrade could also cause such a failure, in - installations that had used pg_upgrade versions + bug in pg_upgrade could also cause such a failure, in + installations that had used pg_upgrade versions between 9.3.0 and 9.3.4. - The pg_upgrade bug in question was that it would - set oldestMultiXid to 1 in pg_control even + The pg_upgrade bug in question was that it would + set oldestMultiXid to 1 in pg_control even if the true value should be higher. With the fixes introduced in this release, such a situation will result in immediate emergency - autovacuuming until a correct oldestMultiXid value can + autovacuuming until a correct oldestMultiXid value can be determined. If that would pose a hardship, users can avoid it by - doing manual vacuuming before upgrading to this release. + doing manual vacuuming before upgrading to this release. In detail: - Check whether pg_controldata reports Latest - checkpoint's oldestMultiXid to be 1. If not, there's nothing + Check whether pg_controldata reports Latest + checkpoint's oldestMultiXid to be 1. If not, there's nothing to do. - Look in PGDATA/pg_multixact/offsets to see if there's a - file named 0000. If there is, there's nothing to do. + Look in PGDATA/pg_multixact/offsets to see if there's a + file named 0000. If there is, there's nothing to do. Otherwise, for each table that has - pg_class.relminmxid equal to 1, - VACUUM that table with - both - and set to + pg_class.relminmxid equal to 1, + VACUUM that table with + both + and set to zero. (You can use the vacuum cost delay parameters described - in to reduce + in to reduce the performance consequences for concurrent sessions.) @@ -5977,7 +8288,7 @@ Branch: REL9_0_STABLE [2fe1939b0] 2015-06-07 15:32:09 -0400 With just the wrong timing of concurrent activity, a VACUUM - FULL on a system catalog might fail to update the init file + FULL on a system catalog might fail to update the init file that's used to avoid cache-loading work for new sessions. This would result in later sessions being unable to access that catalog at all. This is a very ancient bug, but it's so hard to trigger that no @@ -5998,13 +8309,13 @@ Branch: REL9_0_STABLE [dbd99c7f0] 2015-06-05 13:22:27 -0400 Avoid deadlock between incoming sessions and CREATE/DROP - DATABASE (Tom Lane) + DATABASE (Tom Lane) A new session starting in a database that is the target of - a DROP DATABASE command, or is the template for - a CREATE DATABASE command, could cause the command to wait + a DROP DATABASE command, or is the template for + a CREATE DATABASE command, could cause the command to wait for five seconds and then fail, even if the new session would have exited before that. @@ -6049,7 +8360,7 @@ Branch: REL9_3_STABLE [d3fdec6ae] 2015-06-03 11:58:47 -0400 This release contains a small number of fixes from 9.4.2. For information about new features in the 9.4 major release, see - . + . @@ -6061,7 +8372,7 @@ Branch: REL9_3_STABLE [d3fdec6ae] 2015-06-03 11:58:47 -0400 However, if you are upgrading from a version earlier than 9.4.2, - see . + see . @@ -6097,12 +8408,12 @@ Branch: REL9_3_STABLE [c2b68b1f7] 2015-05-29 17:02:58 -0400 - Avoid failures while fsync'ing data directory during + Avoid failures while fsync'ing data directory during crash restart (Abhijit Menon-Sen, Tom Lane) - In the previous minor releases we added a patch to fsync + In the previous minor releases we added a patch to fsync everything in the data directory after a crash. Unfortunately its response to any error condition was to fail, thereby preventing the server from starting up, even when the problem was quite harmless. @@ -6114,7 +8425,7 @@ Branch: REL9_3_STABLE [c2b68b1f7] 2015-05-29 17:02:58 -0400 - Also apply the same rules in initdb --sync-only. + Also apply the same rules in initdb --sync-only. This case is less critical but it should act similarly. @@ -6129,8 +8440,8 @@ Branch: REL9_2_STABLE [f3c67aad4] 2015-05-28 11:24:37 -0400 - Fix pg_get_functiondef() to show - functions' LEAKPROOF property, if set (Jeevan Chalke) + Fix pg_get_functiondef() to show + functions' LEAKPROOF property, if set (Jeevan Chalke) @@ -6142,7 +8453,7 @@ Branch: REL9_4_STABLE [9b74f32cd] 2015-05-22 10:31:29 -0400 - Fix pushJsonbValue() to unpack jbvBinary + Fix pushJsonbValue() to unpack jbvBinary objects (Andrew Dunstan) @@ -6164,14 +8475,14 @@ Branch: REL9_0_STABLE [b06649b7f] 2015-05-26 22:15:00 -0400 - Remove configure's check prohibiting linking to a - threaded libpython - on OpenBSD (Tom Lane) + Remove configure's check prohibiting linking to a + threaded libpython + on OpenBSD (Tom Lane) The failure this restriction was meant to prevent seems to not be a - problem anymore on current OpenBSD + problem anymore on current OpenBSD versions. @@ -6192,7 +8503,7 @@ Branch: REL9_0_STABLE [b06649b7f] 2015-05-26 22:15:00 -0400 This release contains a variety of fixes from 9.4.1. For information about new features in the 9.4 major release, see - . + . @@ -6203,14 +8514,14 @@ Branch: REL9_0_STABLE [b06649b7f] 2015-05-26 22:15:00 -0400 - However, if you use contrib/citext's - regexp_matches() functions, see the changelog entry below + However, if you use contrib/citext's + regexp_matches() functions, see the changelog entry below about that. Also, if you are upgrading from a version earlier than 9.4.1, - see . + see . @@ -6282,7 +8593,7 @@ Branch: REL9_0_STABLE [cf893530a] 2015-05-19 18:18:56 -0400 - Our replacement implementation of snprintf() failed to + Our replacement implementation of snprintf() failed to check for errors reported by the underlying system library calls; the main case that might be missed is out-of-memory situations. In the worst case this might lead to information exposure, due to our @@ -6292,7 +8603,7 @@ Branch: REL9_0_STABLE [cf893530a] 2015-05-19 18:18:56 -0400 - It remains possible that some calls of the *printf() + It remains possible that some calls of the *printf() family of functions are vulnerable to information disclosure if an out-of-memory error occurs at just the wrong time. We judge the risk to not be large, but will continue analysis in this area. @@ -6312,15 +8623,15 @@ Branch: REL9_0_STABLE [b84e5c017] 2015-05-18 10:02:39 -0400 - In contrib/pgcrypto, uniformly report decryption failures - as Wrong key or corrupt data (Noah Misch) + In contrib/pgcrypto, uniformly report decryption failures + as Wrong key or corrupt data (Noah Misch) Previously, some cases of decryption with an incorrect key could report other error message texts. It has been shown that such variance in error reports can aid attackers in recovering keys from other systems. - While it's unknown whether pgcrypto's specific behaviors + While it's unknown whether pgcrypto's specific behaviors are likewise exploitable, it seems better to avoid the risk by using a one-size-fits-all message. (CVE-2015-3167) @@ -6370,7 +8681,7 @@ Branch: REL9_3_STABLE [ddebd2119] 2015-05-11 12:16:51 -0400 Under certain usage patterns, the existing defenses against this might - be insufficient, allowing pg_multixact/members files to be + be insufficient, allowing pg_multixact/members files to be removed too early, resulting in data loss. The fix for this includes modifying the server to fail transactions that would result in overwriting old multixact member ID data, and @@ -6391,16 +8702,16 @@ Branch: REL9_1_STABLE [801e250a8] 2015-05-05 15:50:53 -0400 - Fix incorrect declaration of contrib/citext's - regexp_matches() functions (Tom Lane) + Fix incorrect declaration of contrib/citext's + regexp_matches() functions (Tom Lane) - These functions should return setof text[], like the core + These functions should return setof text[], like the core functions they are wrappers for; but they were incorrectly declared as - returning just text[]. This mistake had two results: first, + returning just text[]. This mistake had two results: first, if there was no match you got a scalar null result, whereas what you - should get is an empty set (zero rows). Second, the g flag + should get is an empty set (zero rows). Second, the g flag was effectively ignored, since you would get only one result array even if there were multiple matches. @@ -6408,16 +8719,16 @@ Branch: REL9_1_STABLE [801e250a8] 2015-05-05 15:50:53 -0400 While the latter behavior is clearly a bug, there might be applications depending on the former behavior; therefore the function declarations - will not be changed by default until PostgreSQL 9.5. + will not be changed by default until PostgreSQL 9.5. In pre-9.5 branches, the old behavior exists in version 1.0 of - the citext extension, while we have provided corrected - declarations in version 1.1 (which is not installed by + the citext extension, while we have provided corrected + declarations in version 1.1 (which is not installed by default). To adopt the fix in pre-9.5 branches, execute - ALTER EXTENSION citext UPDATE TO '1.1' in each database in - which citext is installed. (You can also update + ALTER EXTENSION citext UPDATE TO '1.1' in each database in + which citext is installed. (You can also update back to 1.0 if you need to undo that.) Be aware that either update direction will require dropping and recreating any views or rules that - use citext's regexp_matches() functions. + use citext's regexp_matches() functions. @@ -6429,8 +8740,8 @@ Branch: REL9_4_STABLE [79afe6e66] 2015-02-26 12:34:43 -0500 - Render infinite dates and timestamps as infinity when - converting to json, rather than throwing an error + Render infinite dates and timestamps as infinity when + converting to json, rather than throwing an error (Andrew Dunstan) @@ -6443,8 +8754,8 @@ Branch: REL9_4_STABLE [997066f44] 2015-05-04 12:43:16 -0400 - Fix json/jsonb's populate_record() - and to_record() functions to handle empty input properly + Fix json/jsonb's populate_record() + and to_record() functions to handle empty input properly (Andrew Dunstan) @@ -6484,7 +8795,7 @@ Branch: REL9_4_STABLE [79edb2981] 2015-05-03 11:30:24 -0400 Fix behavior when changing foreign key constraint deferrability status - with ALTER TABLE ... ALTER CONSTRAINT (Tom Lane) + with ALTER TABLE ... ALTER CONSTRAINT (Tom Lane) @@ -6533,7 +8844,7 @@ Branch: REL9_0_STABLE [985da346e] 2015-04-25 16:44:27 -0400 This oversight in the planner has been observed to cause could - not find RelOptInfo for given relids errors, but it seems possible + not find RelOptInfo for given relids errors, but it seems possible that sometimes an incorrect query plan might get past that consistency check and result in silently-wrong query output. @@ -6581,7 +8892,7 @@ Branch: REL9_0_STABLE [72bbca27e] 2015-02-10 20:37:31 -0500 This oversight has been seen to lead to failed to join all - relations together errors in queries involving LATERAL, + relations together errors in queries involving LATERAL, and that might happen in other cases as well. @@ -6595,7 +8906,7 @@ Branch: REL9_4_STABLE [f16270ade] 2015-02-25 21:36:40 -0500 Ensure that row locking occurs properly when the target of - an UPDATE or DELETE is a security-barrier view + an UPDATE or DELETE is a security-barrier view (Stephen Frost) @@ -6614,7 +8925,7 @@ Branch: REL9_4_STABLE [fd3dfc236] 2015-04-28 00:18:04 +0200 On some platforms, the previous coding could result in errors like - could not fsync file "pg_replslot/...": Bad file descriptor. + could not fsync file "pg_replslot/...": Bad file descriptor. @@ -6631,7 +8942,7 @@ Branch: REL9_0_STABLE [223a94680] 2015-04-23 21:37:09 +0300 Fix possible deadlock at startup - when max_prepared_transactions is too small + when max_prepared_transactions is too small (Heikki Linnakangas) @@ -6672,7 +8983,7 @@ Branch: REL9_0_STABLE [262fbcb9d] 2015-05-05 09:30:07 -0400 - Recursively fsync() the data directory after a crash + Recursively fsync() the data directory after a crash (Abhijit Menon-Sen, Robert Haas) @@ -6714,7 +9025,7 @@ Branch: REL9_4_STABLE [ee0d06c0b] 2015-04-03 00:07:29 -0400 This oversight could result in failures in sessions that start - concurrently with a VACUUM FULL on a system catalog. + concurrently with a VACUUM FULL on a system catalog. @@ -6726,7 +9037,7 @@ Branch: REL9_4_STABLE [2897e069c] 2015-03-30 13:05:35 -0400 - Fix crash in BackendIdGetTransactionIds() when trying + Fix crash in BackendIdGetTransactionIds() when trying to get status for a backend process that just exited (Tom Lane) @@ -6743,13 +9054,13 @@ Branch: REL9_0_STABLE [87b7fcc87] 2015-02-23 16:14:16 +0100 - Cope with unexpected signals in LockBufferForCleanup() + Cope with unexpected signals in LockBufferForCleanup() (Andres Freund) This oversight could result in spurious errors about multiple - backends attempting to wait for pincount 1. + backends attempting to wait for pincount 1. @@ -6763,7 +9074,7 @@ Branch: REL9_2_STABLE [effcaa4c2] 2015-02-15 23:26:46 -0500 - Fix crash when doing COPY IN to a table with check + Fix crash when doing COPY IN to a table with check constraints that contain whole-row references (Tom Lane) @@ -6808,7 +9119,7 @@ Branch: REL9_4_STABLE [16be9737c] 2015-03-23 16:52:17 +0100 - Avoid busy-waiting with short recovery_min_apply_delay + Avoid busy-waiting with short recovery_min_apply_delay values (Andres Freund) @@ -6874,9 +9185,9 @@ Branch: REL9_0_STABLE [152c94632] 2015-03-29 15:04:38 -0400 - ANALYZE executes index expressions many times; if there are + ANALYZE executes index expressions many times; if there are slow functions in such an expression, it's desirable to be able to - cancel the ANALYZE before that loop finishes. + cancel the ANALYZE before that loop finishes. @@ -6891,10 +9202,10 @@ Branch: REL9_1_STABLE [4a4fd2b0c] 2015-03-12 13:38:49 -0400 - Ensure tableoid of a foreign table is reported - correctly when a READ COMMITTED recheck occurs after - locking rows in SELECT FOR UPDATE, UPDATE, - or DELETE (Etsuro Fujita) + Ensure tableoid of a foreign table is reported + correctly when a READ COMMITTED recheck occurs after + locking rows in SELECT FOR UPDATE, UPDATE, + or DELETE (Etsuro Fujita) @@ -6940,14 +9251,14 @@ Branch: REL9_0_STABLE [c981e5999] 2015-05-08 19:40:15 -0400 - Recommend setting include_realm to 1 when using + Recommend setting include_realm to 1 when using Kerberos/GSSAPI/SSPI authentication (Stephen Frost) Without this, identically-named users from different realms cannot be distinguished. For the moment this is only a documentation change, but - it will become the default setting in PostgreSQL 9.5. + it will become the default setting in PostgreSQL 9.5. @@ -6970,7 +9281,7 @@ Branch: REL9_0_STABLE [e48ce4f33] 2015-02-17 12:49:18 -0500 - Remove code for matching IPv4 pg_hba.conf entries to + Remove code for matching IPv4 pg_hba.conf entries to IPv4-in-IPv6 addresses (Tom Lane) @@ -6983,7 +9294,7 @@ Branch: REL9_0_STABLE [e48ce4f33] 2015-02-17 12:49:18 -0500 crashes on some systems, so let's just remove it rather than fix it. (Had we chosen to fix it, that would make for a subtle and potentially security-sensitive change in the effective meaning of - IPv4 pg_hba.conf entries, which does not seem like a good + IPv4 pg_hba.conf entries, which does not seem like a good thing to do in minor releases.) @@ -7010,7 +9321,7 @@ Branch: REL9_4_STABLE [a1f4ade01] 2015-04-02 14:39:18 -0400 After a database crash, don't restart background workers that are - marked BGW_NEVER_RESTART (Amit Khandekar) + marked BGW_NEVER_RESTART (Amit Khandekar) @@ -7025,13 +9336,13 @@ Branch: REL9_1_STABLE [0d36d9f2b] 2015-02-06 11:32:42 +0200 - Report WAL flush, not insert, position in IDENTIFY_SYSTEM + Report WAL flush, not insert, position in IDENTIFY_SYSTEM replication command (Heikki Linnakangas) This avoids a possible startup failure - in pg_receivexlog. + in pg_receivexlog. @@ -7049,7 +9360,7 @@ Branch: REL9_0_STABLE [78ce2dc8e] 2015-05-07 15:10:01 +0200 While shutting down service on Windows, periodically send status updates to the Service Control Manager to prevent it from killing the - service too soon; and ensure that pg_ctl will wait for + service too soon; and ensure that pg_ctl will wait for shutdown (Krystian Bigaj) @@ -7066,7 +9377,7 @@ Branch: REL9_0_STABLE [8878eaaa8] 2015-02-23 13:32:53 +0200 - Reduce risk of network deadlock when using libpq's + Reduce risk of network deadlock when using libpq's non-blocking mode (Heikki Linnakangas) @@ -7075,12 +9386,12 @@ Branch: REL9_0_STABLE [8878eaaa8] 2015-02-23 13:32:53 +0200 buffer every so often, in case the server has sent enough response data to cause it to block on output. (A typical scenario is that the server is sending a stream of NOTICE messages during COPY FROM - STDIN.) This worked properly in the normal blocking mode, but not - so much in non-blocking mode. We've modified libpq + STDIN.) This worked properly in the normal blocking mode, but not + so much in non-blocking mode. We've modified libpq to opportunistically drain input when it can, but a full defense against this problem requires application cooperation: the application should watch for socket read-ready as well as write-ready conditions, - and be sure to call PQconsumeInput() upon read-ready. + and be sure to call PQconsumeInput() upon read-ready. @@ -7094,7 +9405,7 @@ Branch: REL9_2_STABLE [83c3115dd] 2015-02-21 12:59:43 -0500 - In libpq, fix misparsing of empty values in URI + In libpq, fix misparsing of empty values in URI connection strings (Thomas Fanghaenel) @@ -7111,7 +9422,7 @@ Branch: REL9_0_STABLE [ce2fcc58e] 2015-02-11 11:30:11 +0100 - Fix array handling in ecpg (Michael Meskes) + Fix array handling in ecpg (Michael Meskes) @@ -7127,8 +9438,8 @@ Branch: REL9_0_STABLE [557fcfae3] 2015-04-01 20:00:07 -0300 - Fix psql to sanely handle URIs and conninfo strings as - the first parameter to \connect + Fix psql to sanely handle URIs and conninfo strings as + the first parameter to \connect (David Fetter, Andrew Dunstan, Álvaro Herrera) @@ -7151,17 +9462,17 @@ Branch: REL9_0_STABLE [396ef6fd8] 2015-03-14 13:43:26 -0400 - Suppress incorrect complaints from psql on some - platforms that it failed to write ~/.psql_history at exit + Suppress incorrect complaints from psql on some + platforms that it failed to write ~/.psql_history at exit (Tom Lane) This misbehavior was caused by a workaround for a bug in very old - (pre-2006) versions of libedit. We fixed it by + (pre-2006) versions of libedit. We fixed it by removing the workaround, which will cause a similar failure to appear - for anyone still using such versions of libedit. - Recommendation: upgrade that library, or use libreadline. + for anyone still using such versions of libedit. + Recommendation: upgrade that library, or use libreadline. @@ -7177,7 +9488,7 @@ Branch: REL9_0_STABLE [8e70f3c40] 2015-02-10 22:38:29 -0500 - Fix pg_dump's rule for deciding which casts are + Fix pg_dump's rule for deciding which casts are system-provided casts that should not be dumped (Tom Lane) @@ -7193,8 +9504,8 @@ Branch: REL9_1_STABLE [b0d53b2e3] 2015-02-18 11:43:00 -0500 - In pg_dump, fix failure to honor -Z - compression level option together with -Fd + In pg_dump, fix failure to honor -Z + compression level option together with -Fd (Michael Paquier) @@ -7210,7 +9521,7 @@ Branch: REL9_1_STABLE [dcb467b8e] 2015-03-02 14:12:43 -0500 - Make pg_dump consider foreign key relationships + Make pg_dump consider foreign key relationships between extension configuration tables while choosing dump order (Gilles Darold, Michael Paquier, Stephen Frost) @@ -7230,7 +9541,7 @@ Branch: REL9_3_STABLE [d645273cf] 2015-03-06 13:27:46 -0500 - Avoid possible pg_dump failure when concurrent sessions + Avoid possible pg_dump failure when concurrent sessions are creating and dropping temporary functions (Tom Lane) @@ -7247,7 +9558,7 @@ Branch: REL9_0_STABLE [7a501bcbf] 2015-02-25 12:01:12 -0500 - Fix dumping of views that are just VALUES(...) but have + Fix dumping of views that are just VALUES(...) but have column aliases (Tom Lane) @@ -7261,7 +9572,7 @@ Branch: REL9_4_STABLE [70fac4844] 2015-05-01 13:03:23 -0400 Ensure that a view's replication identity is correctly set - to nothing during dump/restore (Marko Tiikkaja) + to nothing during dump/restore (Marko Tiikkaja) @@ -7285,7 +9596,7 @@ Branch: REL9_3_STABLE [4e9935979] 2015-05-16 15:16:28 -0400 - In pg_upgrade, force timeline 1 in the new cluster + In pg_upgrade, force timeline 1 in the new cluster (Bruce Momjian) @@ -7307,7 +9618,7 @@ Branch: REL9_0_STABLE [2194aa92b] 2015-05-16 00:10:03 -0400 - In pg_upgrade, check for improperly non-connectable + In pg_upgrade, check for improperly non-connectable databases before proceeding (Bruce Momjian) @@ -7325,8 +9636,8 @@ Branch: REL9_0_STABLE [4ae178f60] 2015-02-11 22:06:04 -0500 - In pg_upgrade, quote directory paths - properly in the generated delete_old_cluster script + In pg_upgrade, quote directory paths + properly in the generated delete_old_cluster script (Bruce Momjian) @@ -7343,14 +9654,14 @@ Branch: REL9_0_STABLE [85dac37ee] 2015-02-11 21:02:06 -0500 - In pg_upgrade, preserve database-level freezing info + In pg_upgrade, preserve database-level freezing info properly (Bruce Momjian) This oversight could cause missing-clog-file errors for tables within - the postgres and template1 databases. + the postgres and template1 databases. @@ -7366,7 +9677,7 @@ Branch: REL9_0_STABLE [bf22a8e58] 2015-03-30 17:18:10 -0400 - Run pg_upgrade and pg_resetxlog with + Run pg_upgrade and pg_resetxlog with restricted privileges on Windows, so that they don't fail when run by an administrator (Muhammad Asif Naeem) @@ -7384,8 +9695,8 @@ Branch: REL9_1_STABLE [d7d294f59] 2015-02-17 11:08:40 -0500 - Improve handling of readdir() failures when scanning - directories in initdb and pg_basebackup + Improve handling of readdir() failures when scanning + directories in initdb and pg_basebackup (Marco Nenciarini) @@ -7402,7 +9713,7 @@ Branch: REL9_0_STABLE [40b0c10b7] 2015-03-15 23:22:03 -0400 - Fix slow sorting algorithm in contrib/intarray (Tom Lane) + Fix slow sorting algorithm in contrib/intarray (Tom Lane) @@ -7450,7 +9761,7 @@ Branch: REL9_0_STABLE [3c3749a3b] 2015-05-15 19:36:20 -0400 - Update time zone data files to tzdata release 2015d + Update time zone data files to tzdata release 2015d for DST law changes in Egypt, Mongolia, and Palestine, plus historical changes in Canada and Chile. Also adopt revised zone abbreviations for the America/Adak zone (HST/HDT not HAST/HADT). @@ -7473,7 +9784,7 @@ Branch: REL9_0_STABLE [3c3749a3b] 2015-05-15 19:36:20 -0400 This release contains a variety of fixes from 9.4.0. For information about new features in the 9.4 major release, see - . + . @@ -7485,12 +9796,12 @@ Branch: REL9_0_STABLE [3c3749a3b] 2015-05-15 19:36:20 -0400 However, if you are a Windows user and are using the Norwegian - (Bokmål) locale, manual action is needed after the upgrade to - replace any Norwegian (Bokmål)_Norway - or norwegian-bokmal locale names stored - in PostgreSQL system catalogs with the plain-ASCII - alias Norwegian_Norway. For details see - + (Bokmål) locale, manual action is needed after the upgrade to + replace any Norwegian (Bokmål)_Norway + or norwegian-bokmal locale names stored + in PostgreSQL system catalogs with the plain-ASCII + alias Norwegian_Norway. For details see + @@ -7518,15 +9829,15 @@ Branch: REL9_0_STABLE [56b970f2e] 2015-02-02 10:00:52 -0500 - Fix buffer overruns in to_char() + Fix buffer overruns in to_char() (Bruce Momjian) - When to_char() processes a numeric formatting template - calling for a large number of digits, PostgreSQL + When to_char() processes a numeric formatting template + calling for a large number of digits, PostgreSQL would read past the end of a buffer. When processing a crafted - timestamp formatting template, PostgreSQL would write + timestamp formatting template, PostgreSQL would write past the end of a buffer. Either case could crash the server. We have not ruled out the possibility of attacks that lead to privilege escalation, though they seem unlikely. @@ -7546,27 +9857,27 @@ Branch: REL9_0_STABLE [9e05c5063] 2015-02-02 10:00:52 -0500 - Fix buffer overrun in replacement *printf() functions + Fix buffer overrun in replacement *printf() functions (Tom Lane) - PostgreSQL includes a replacement implementation - of printf and related functions. This code will overrun + PostgreSQL includes a replacement implementation + of printf and related functions. This code will overrun a stack buffer when formatting a floating point number (conversion - specifiers e, E, f, F, - g or G) with requested precision greater than + specifiers e, E, f, F, + g or G) with requested precision greater than about 500. This will crash the server, and we have not ruled out the possibility of attacks that lead to privilege escalation. A database user can trigger such a buffer overrun through - the to_char() SQL function. While that is the only - affected core PostgreSQL functionality, extension + the to_char() SQL function. While that is the only + affected core PostgreSQL functionality, extension modules that use printf-family functions may be at risk as well. - This issue primarily affects PostgreSQL on Windows. - PostgreSQL uses the system implementation of these + This issue primarily affects PostgreSQL on Windows. + PostgreSQL uses the system implementation of these functions where adequate, which it is on other modern platforms. (CVE-2015-0242) @@ -7591,12 +9902,12 @@ Branch: REL9_0_STABLE [0a3ee8a5f] 2015-02-02 10:00:52 -0500 - Fix buffer overruns in contrib/pgcrypto + Fix buffer overruns in contrib/pgcrypto (Marko Tiikkaja, Noah Misch) - Errors in memory size tracking within the pgcrypto + Errors in memory size tracking within the pgcrypto module permitted stack buffer overruns and improper dependence on the contents of uninitialized memory. The buffer overrun cases can crash the server, and we have not ruled out the possibility of @@ -7657,7 +9968,7 @@ Branch: REL9_0_STABLE [3a2063369] 2015-01-28 12:33:29 -0500 Some server error messages show the values of columns that violate a constraint, such as a unique constraint. If the user does not have - SELECT privilege on all columns of the table, this could + SELECT privilege on all columns of the table, this could mean exposing values that the user should not be able to see. Adjust the code so that values are displayed only when they came from the SQL command or could be selected by the user. @@ -7706,20 +10017,20 @@ Branch: REL9_2_STABLE [6bf343c6e] 2015-01-16 13:10:23 +0200 - Cope with the Windows locale named Norwegian (Bokmål) + Cope with the Windows locale named Norwegian (Bokmål) (Heikki Linnakangas) Non-ASCII locale names are problematic since it's not clear what encoding they should be represented in. Map the troublesome locale - name to a plain-ASCII alias, Norwegian_Norway. + name to a plain-ASCII alias, Norwegian_Norway. - 9.4.0 mapped the troublesome name to norwegian-bokmal, + 9.4.0 mapped the troublesome name to norwegian-bokmal, but that turns out not to work on all Windows configurations. - Norwegian_Norway is now recommended instead. + Norwegian_Norway is now recommended instead. @@ -7740,7 +10051,7 @@ Branch: REL9_0_STABLE [5308e085b] 2015-01-15 18:52:38 -0500 - In READ COMMITTED mode, queries that lock or update + In READ COMMITTED mode, queries that lock or update recently-updated rows could crash as a result of this bug. @@ -7769,8 +10080,8 @@ Branch: REL9_3_STABLE [54a8abc2b] 2015-01-04 15:48:29 -0300 Fix failure to wait when a transaction tries to acquire a FOR - NO KEY EXCLUSIVE tuple lock, while multiple other transactions - currently hold FOR SHARE locks (Álvaro Herrera) + NO KEY EXCLUSIVE tuple lock, while multiple other transactions + currently hold FOR SHARE locks (Álvaro Herrera) @@ -7783,7 +10094,7 @@ Branch: REL9_3_STABLE [939f0fb67] 2015-01-15 13:18:19 -0500 - Improve performance of EXPLAIN with large range tables + Improve performance of EXPLAIN with large range tables (Tom Lane) @@ -7796,41 +10107,41 @@ Branch: REL9_4_STABLE [4cbf390d5] 2015-01-30 14:44:49 -0500 - Fix jsonb Unicode escape processing, and in consequence - disallow \u0000 (Tom Lane) + Fix jsonb Unicode escape processing, and in consequence + disallow \u0000 (Tom Lane) - Previously, the JSON Unicode escape \u0000 was accepted + Previously, the JSON Unicode escape \u0000 was accepted and was stored as those six characters; but that is indistinguishable - from what is stored for the input \\u0000, resulting in + from what is stored for the input \\u0000, resulting in ambiguity. Moreover, in cases where de-escaped textual output is - expected, such as the ->> operator, the sequence was - printed as \u0000, which does not meet the expectation + expected, such as the ->> operator, the sequence was + printed as \u0000, which does not meet the expectation that JSON escaping would be removed. (Consistent behavior would - require emitting a zero byte, but PostgreSQL does not + require emitting a zero byte, but PostgreSQL does not support zero bytes embedded in text strings.) 9.4.0 included an ill-advised attempt to improve this situation by adjusting JSON output conversion rules; but of course that could not fix the fundamental ambiguity, and it turned out to break other usages of Unicode escape sequences. Revert that, and to avoid the core problem, - reject \u0000 in jsonb input. + reject \u0000 in jsonb input. - If a jsonb column contains a \u0000 value stored + If a jsonb column contains a \u0000 value stored with 9.4.0, it will henceforth read out as though it - were \\u0000, which is the other valid interpretation of + were \\u0000, which is the other valid interpretation of the data stored by 9.4.0 for this case. - The json type did not have the storage-ambiguity problem, but + The json type did not have the storage-ambiguity problem, but it did have the problem of inconsistent de-escaped textual output. - Therefore \u0000 will now also be rejected - in json values when conversion to de-escaped form is + Therefore \u0000 will now also be rejected + in json values when conversion to de-escaped form is required. This change does not break the ability to - store \u0000 in json columns so long as no + store \u0000 in json columns so long as no processing is done on the values. This is exactly parallel to the cases in which non-ASCII Unicode escapes are allowed when the database encoding is not UTF8. @@ -7849,14 +10160,14 @@ Branch: REL9_0_STABLE [cebb3f032] 2015-01-17 22:37:32 -0500 - Fix namespace handling in xpath() (Ali Akbar) + Fix namespace handling in xpath() (Ali Akbar) - Previously, the xml value resulting from - an xpath() call would not have namespace declarations if + Previously, the xml value resulting from + an xpath() call would not have namespace declarations if the namespace declarations were attached to an ancestor element in the - input xml value, rather than to the specific element being + input xml value, rather than to the specific element being returned. Propagate the ancestral declaration so that the result is correct when considered in isolation. @@ -7876,7 +10187,7 @@ Branch: REL9_3_STABLE [527ff8baf] 2015-01-30 12:30:43 -0500 - This patch fixes corner-case unexpected operator NNNN planner + This patch fixes corner-case unexpected operator NNNN planner errors, and improves the selectivity estimates for some other cases. @@ -7894,7 +10205,7 @@ Branch: REL9_4_STABLE [4e241f7cd] 2014-12-30 14:53:03 +0200 - 9.4.0 could fail with index row size exceeds maximum errors + 9.4.0 could fail with index row size exceeds maximum errors for data that previous versions would accept. @@ -7924,7 +10235,7 @@ Branch: REL9_1_STABLE [37e0f13f2] 2015-01-29 19:37:22 +0200 Fix possible crash when using - nonzero gin_fuzzy_search_limit (Heikki Linnakangas) + nonzero gin_fuzzy_search_limit (Heikki Linnakangas) @@ -7952,7 +10263,7 @@ Branch: REL9_4_STABLE [b337d9657] 2015-01-15 20:52:18 +0200 Fix incorrect replay of WAL parameter change records that report - changes in the wal_log_hints setting (Petr Jelinek) + changes in the wal_log_hints setting (Petr Jelinek) @@ -7968,7 +10279,7 @@ Branch: REL9_0_STABLE [a1a8d0249] 2015-01-19 23:01:46 -0500 - Change pgstat wait timeout warning message to be LOG level, + Change pgstat wait timeout warning message to be LOG level, and rephrase it to be more understandable (Tom Lane) @@ -7977,7 +10288,7 @@ Branch: REL9_0_STABLE [a1a8d0249] 2015-01-19 23:01:46 -0500 case, but it occurs often enough on our slower buildfarm members to be a nuisance. Reduce it to LOG level, and expend a bit more effort on the wording: it now reads using stale statistics instead of - current ones because stats collector is not responding. + current ones because stats collector is not responding. @@ -7993,7 +10304,7 @@ Branch: REL9_0_STABLE [2e4946169] 2015-01-07 22:46:20 -0500 - Warn if macOS's setlocale() starts an unwanted extra + Warn if macOS's setlocale() starts an unwanted extra thread inside the postmaster (Noah Misch) @@ -8006,18 +10317,18 @@ Branch: REL9_4_STABLE [733728ff3] 2015-01-11 12:35:47 -0500 - Fix libpq's behavior when /etc/passwd + Fix libpq's behavior when /etc/passwd isn't readable (Tom Lane) - While doing PQsetdbLogin(), libpq + While doing PQsetdbLogin(), libpq attempts to ascertain the user's operating system name, which on most - Unix platforms involves reading /etc/passwd. As of 9.4, + Unix platforms involves reading /etc/passwd. As of 9.4, failure to do that was treated as a hard error. Restore the previous behavior, which was to fail only if the application does not provide a database role name to connect as. This supports operation in chroot - environments that lack an /etc/passwd file. + environments that lack an /etc/passwd file. @@ -8033,17 +10344,17 @@ Branch: REL9_0_STABLE [2600e4436] 2014-12-31 12:17:12 -0500 - Improve consistency of parsing of psql's special + Improve consistency of parsing of psql's special variables (Tom Lane) - Allow variant spellings of on and off (such - as 1/0) for ECHO_HIDDEN - and ON_ERROR_ROLLBACK. Report a warning for unrecognized - values for COMP_KEYWORD_CASE, ECHO, - ECHO_HIDDEN, HISTCONTROL, - ON_ERROR_ROLLBACK, and VERBOSITY. Recognize + Allow variant spellings of on and off (such + as 1/0) for ECHO_HIDDEN + and ON_ERROR_ROLLBACK. Report a warning for unrecognized + values for COMP_KEYWORD_CASE, ECHO, + ECHO_HIDDEN, HISTCONTROL, + ON_ERROR_ROLLBACK, and VERBOSITY. Recognize all values for all these variables case-insensitively; previously there was a mishmash of case-sensitive and case-insensitive behaviors. @@ -8058,7 +10369,7 @@ Branch: REL9_3_STABLE [bb1e2426b] 2015-01-05 19:27:09 -0500 - Fix pg_dump to handle comments on event triggers + Fix pg_dump to handle comments on event triggers without failing (Tom Lane) @@ -8072,8 +10383,8 @@ Branch: REL9_3_STABLE [cc609c46f] 2015-01-30 09:01:36 -0600 - Allow parallel pg_dump to - use (Kevin Grittner) @@ -8088,7 +10399,7 @@ Branch: REL9_1_STABLE [2a0bfa4d6] 2015-01-03 20:54:13 +0100 - Prevent WAL files created by pg_basebackup -x/-X from + Prevent WAL files created by pg_basebackup -x/-X from being archived again when the standby is promoted (Andres Freund) @@ -8106,12 +10417,12 @@ Branch: REL9_0_STABLE [dc9a506e6] 2015-01-29 20:18:46 -0500 Handle unexpected query results, especially NULLs, safely in - contrib/tablefunc's connectby() + contrib/tablefunc's connectby() (Michael Paquier) - connectby() previously crashed if it encountered a NULL + connectby() previously crashed if it encountered a NULL key value. It now prints that row but doesn't recurse further. @@ -8205,14 +10516,14 @@ Branch: REL9_4_STABLE [adb355106] 2015-01-14 11:08:17 -0500 - Allow CFLAGS from configure's environment - to override automatically-supplied CFLAGS (Tom Lane) + Allow CFLAGS from configure's environment + to override automatically-supplied CFLAGS (Tom Lane) - Previously, configure would add any switches that it + Previously, configure would add any switches that it chose of its own accord to the end of the - user-specified CFLAGS string. Since most compilers + user-specified CFLAGS string. Since most compilers process switches left-to-right, this meant that configure's choices would override the user-specified flags in case of conflicts. That should work the other way around, so adjust the logic to put the @@ -8232,13 +10543,13 @@ Branch: REL9_0_STABLE [338ff75fc] 2015-01-19 23:44:33 -0500 - Make pg_regress remove any temporary installation it + Make pg_regress remove any temporary installation it created upon successful exit (Tom Lane) This results in a very substantial reduction in disk space usage - during make check-world, since that sequence involves + during make check-world, since that sequence involves creation of numerous temporary installations. @@ -8264,7 +10575,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Update time zone data files to tzdata release 2015a + Update time zone data files to tzdata release 2015a for DST law changes in Chile and Mexico, plus historical changes in Iceland. @@ -8287,7 +10598,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Overview - Major enhancements in PostgreSQL 9.4 include: + Major enhancements in PostgreSQL 9.4 include: @@ -8296,35 +10607,35 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add jsonb, a more - capable and efficient data type for storing JSON data + Add jsonb, a more + capable and efficient data type for storing JSON data - Add new SQL command - for changing postgresql.conf configuration file entries + Add new SQL command + for changing postgresql.conf configuration file entries - Reduce lock strength for some + Reduce lock strength for some commands - Allow materialized views + Allow materialized views to be refreshed without blocking concurrent reads - Add support for logical decoding + Add support for logical decoding of WAL data, to allow database changes to be streamed out in a customizable format @@ -8332,7 +10643,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Allow background worker processes + Allow background worker processes to be dynamically registered, started and terminated @@ -8350,8 +10661,8 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Migration to Version 9.4 - A dump/restore using , or use - of , is required for those wishing to migrate + A dump/restore using , or use + of , is required for those wishing to migrate data from any previous release. @@ -8371,24 +10682,24 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Previously, an input array string that started with a single-element sub-array could later contain multi-element sub-arrays, - e.g. '{{1}, {2,3}}'::int[] would be accepted. + e.g. '{{1}, {2,3}}'::int[] would be accepted. - When converting values of type date, timestamp - or timestamptz + When converting values of type date, timestamp + or timestamptz to JSON, render the values in a format compliant with ISO 8601 (Andrew Dunstan) Previously such values were rendered according to the current - setting; but many JSON processors + setting; but many JSON processors require timestamps to be in ISO 8601 format. If necessary, the previous behavior can be obtained by explicitly casting the datetime - value to text before passing it to the JSON conversion + value to text before passing it to the JSON conversion function. @@ -8396,15 +10707,15 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 The json - #> text[] path extraction operator now + #> text[] path extraction operator now returns its lefthand input, not NULL, if the array is empty (Tom Lane) This is consistent with the notion that this represents zero applications of the simple field/element extraction - operator ->. Similarly, json - #>> text[] with an empty array merely + operator ->. Similarly, json + #>> text[] with an empty array merely coerces its lefthand input to text. @@ -8429,26 +10740,26 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Cause consecutive whitespace in to_timestamp() - and to_date() format strings to consume a corresponding + linkend="functions-formatting-table">to_timestamp() + and to_date() format strings to consume a corresponding number of characters in the input string (whitespace or not), then - conditionally consume adjacent whitespace, if not in FX + conditionally consume adjacent whitespace, if not in FX mode (Jeevan Chalke) - Previously, consecutive whitespace characters in a non-FX + Previously, consecutive whitespace characters in a non-FX format string behaved like a single whitespace character and consumed all adjacent whitespace in the input string. For example, previously a format string of three spaces would consume only the first space in - ' 12', but it will now consume all three characters. + ' 12', but it will now consume all three characters. Fix ts_rank_cd() + linkend="textsearch-functions-table">ts_rank_cd() to ignore stripped lexemes (Alex Hill) @@ -8462,15 +10773,15 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 For functions declared to take VARIADIC - "any", an actual parameter marked as VARIADIC + "any", an actual parameter marked as VARIADIC must be of a determinable array type (Pavel Stehule) Such parameters can no longer be written as an undecorated string - literal or NULL; a cast to an appropriate array data type + literal or NULL; a cast to an appropriate array data type will now be required. Note that this does not affect parameters not - marked VARIADIC. + marked VARIADIC. @@ -8482,8 +10793,8 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Constructs like row_to_json(tab.*) now always emit column - names that match the column aliases visible for table tab + Constructs like row_to_json(tab.*) now always emit column + names that match the column aliases visible for table tab at the point of the call. In previous releases the emitted column names would sometimes be the table's actual column names regardless of any aliases assigned in the query. @@ -8492,15 +10803,15 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - now also discards sequence-related state + now also discards sequence-related state (Fabrízio de Royes Mello, Robert Haas) - Rename EXPLAIN - ANALYZE's total runtime output + Rename EXPLAIN + ANALYZE's total runtime output to execution time (Tom Lane) @@ -8512,15 +10823,15 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - SHOW TIME ZONE now - outputs simple numeric UTC offsets in POSIX timezone + SHOW TIME ZONE now + outputs simple numeric UTC offsets in POSIX timezone format (Tom Lane) Previously, such timezone settings were displayed as interval values. - The new output is properly interpreted by SET TIME ZONE + linkend="datatype-interval-output">interval values. + The new output is properly interpreted by SET TIME ZONE when passed as a simple string, whereas the old output required special treatment to be re-parsed correctly. @@ -8529,25 +10840,25 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Foreign data wrappers that support updating foreign tables must - consider the possible presence of AFTER ROW triggers + consider the possible presence of AFTER ROW triggers (Noah Misch) - When an AFTER ROW trigger is present, all columns of the + When an AFTER ROW trigger is present, all columns of the table must be returned by updating actions, since the trigger might inspect any or all of them. Previously, foreign tables never had triggers, so the FDW might optimize away fetching columns not mentioned - in the RETURNING clause (if any). + in the RETURNING clause (if any). Prevent CHECK + linkend="ddl-constraints-check-constraints">CHECK constraints from referencing system columns, except - tableoid (Amit Kapila) + tableoid (Amit Kapila) @@ -8565,7 +10876,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Previously, there was an undocumented precedence order among - the recovery_target_xxx parameters. + the recovery_target_xxx parameters. @@ -8578,15 +10889,15 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 User commands that did their own quote preservation might need adjustment. This is likely to be an issue for commands used in - , , - and COPY TO/FROM PROGRAM. + , , + and COPY TO/FROM PROGRAM. Remove catalog column pg_class.reltoastidxid + linkend="catalog-pg-class">pg_class.reltoastidxid (Michael Paquier) @@ -8594,33 +10905,33 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Remove catalog column pg_rewrite.ev_attr + linkend="catalog-pg-rewrite">pg_rewrite.ev_attr (Kevin Grittner) Per-column rules have not been supported since - PostgreSQL 7.3. + PostgreSQL 7.3. - Remove native support for Kerberos authentication - (, etc) (Magnus Hagander) - The supported way to use Kerberos authentication is - with GSSAPI. The native code has been deprecated since - PostgreSQL 8.3. + The supported way to use Kerberos authentication is + with GSSAPI. The native code has been deprecated since + PostgreSQL 8.3. - In PL/Python, handle domains over arrays like the + In PL/Python, handle domains over arrays like the underlying array type (Rodolfo Campero) @@ -8632,9 +10943,9 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Make libpq's PQconnectdbParams() + linkend="libpq-pqconnectdbparams">PQconnectdbParams() and PQpingParams() + linkend="libpq-pqpingparams">PQpingParams() functions process zero-length strings as defaults (Adrian Vondendriesch) @@ -8647,27 +10958,27 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Change empty arrays returned by the module + Change empty arrays returned by the module to be zero-dimensional arrays (Bruce Momjian) Previously, empty arrays were returned as zero-length one-dimensional arrays, whose text representation looked the same as zero-dimensional - arrays ({}), but they acted differently in array - operations. intarray's behavior in this area now + arrays ({}), but they acted differently in array + operations. intarray's behavior in this area now matches the built-in array operators. - now uses - Previously this option was spelled or , but that was inconsistent with other tools. @@ -8697,7 +11008,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - The new worker_spi module shows an example of use + The new worker_spi module shows an example of use of this feature. @@ -8717,15 +11028,15 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 During crash recovery or immediate shutdown, send uncatchable - termination signals (SIGKILL) to child processes + termination signals (SIGKILL) to child processes that do not shut down promptly (MauMau, Álvaro Herrera) This reduces the likelihood of leaving orphaned child processes - behind after shutdown, as well + behind after shutdown, as well as ensuring that crash recovery can proceed if some child processes - have become stuck. + have become stuck. @@ -8737,7 +11048,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Make properly report dead but + Make properly report dead but not-yet-removable rows to the statistics collector (Hari Babu) @@ -8755,14 +11066,14 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Reduce GIN index size + Reduce GIN index size (Alexander Korotkov, Heikki Linnakangas) - Indexes upgraded via will work fine - but will still be in the old, larger GIN format. - Use to recreate old GIN indexes in the + Indexes upgraded via will work fine + but will still be in the old, larger GIN format. + Use to recreate old GIN indexes in the new format. @@ -8770,16 +11081,16 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Improve speed of multi-key GIN lookups (Alexander Korotkov, + linkend="gin">GIN lookups (Alexander Korotkov, Heikki Linnakangas) - Add GiST index support - for inet and - cidr data types + Add GiST index support + for inet and + cidr data types (Emre Hasegeli) @@ -8815,7 +11126,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Allow multiple backends to insert - into WAL buffers + into WAL buffers concurrently (Heikki Linnakangas) @@ -8827,7 +11138,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Conditionally write only the modified portion of updated rows to - WAL (Amit Kapila) + WAL (Amit Kapila) @@ -8842,7 +11153,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Improve speed of aggregates that - use numeric state + use numeric state values (Hadi Moshayedi) @@ -8851,8 +11162,8 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Attempt to freeze tuples when tables are rewritten with or VACUUM FULL (Robert Haas, + linkend="sql-cluster"/> or VACUUM FULL (Robert Haas, Andres Freund) @@ -8863,8 +11174,8 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Improve speed of with default nextval() + Improve speed of with default nextval() columns (Simon Riggs) @@ -8872,7 +11183,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Improve speed of accessing many different sequences in the same session + linkend="sql-createsequence">sequences in the same session (David Rowley) @@ -8886,26 +11197,26 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Reduce memory allocated by PL/pgSQL - blocks (Tom Lane) + Reduce memory allocated by PL/pgSQL + blocks (Tom Lane) Make the planner more aggressive about extracting restriction clauses - from mixed AND/OR clauses (Tom Lane) + from mixed AND/OR clauses (Tom Lane) - Disallow pushing volatile WHERE clauses down - into DISTINCT subqueries (Tom Lane) + Disallow pushing volatile WHERE clauses down + into DISTINCT subqueries (Tom Lane) - Pushing down a WHERE clause can produce a more + Pushing down a WHERE clause can produce a more efficient plan overall, but at the cost of evaluating the clause more often than is implied by the text of the query; so don't do it if the clause contains any volatile functions. @@ -8934,32 +11245,32 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add system view to - report WAL archiver activity + Add system view to + report WAL archiver activity (Gabriele Bartolini) - Add n_mod_since_analyze columns to - and related system views + Add n_mod_since_analyze columns to + and related system views (Mark Kirkwood) These columns expose the system's estimate of the number of changed - tuples since the table's last . This + tuples since the table's last . This estimate drives decisions about when to auto-analyze. - Add backend_xid and backend_xmin - columns to the system view , - and a backend_xmin column to - (Christian Kruse) + Add backend_xid and backend_xmin + columns to the system view , + and a backend_xmin column to + (Christian Kruse) @@ -8968,28 +11279,28 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - <acronym>SSL</> + <acronym>SSL</acronym> - Add support for SSL ECDH key exchange + Add support for SSL ECDH key exchange (Marko Kreen) This allows use of Elliptic Curve keys for server authentication. - Such keys are faster and have better security than RSA + Such keys are faster and have better security than RSA keys. The new configuration parameter - - controls which curve is used for ECDH. + + controls which curve is used for ECDH. - Improve the default setting + Improve the default setting (Marko Kreen) @@ -8997,29 +11308,29 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 By default, the server not the client now controls the preference - order of SSL ciphers + order of SSL ciphers (Marko Kreen) - Previously, the order specified by + Previously, the order specified by was usually ignored in favor of client-side defaults, which are not - configurable in most PostgreSQL clients. If + configurable in most PostgreSQL clients. If desired, the old behavior can be restored via the new configuration - parameter . + parameter . - Make show SSL + Make show SSL encryption information (Andreas Kunert) - Improve SSL renegotiation handling (Álvaro + Improve SSL renegotiation handling (Álvaro Herrera) @@ -9035,20 +11346,20 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add new SQL command - for changing postgresql.conf configuration file entries + Add new SQL command + for changing postgresql.conf configuration file entries (Amit Kapila) Previously such settings could only be changed by manually - editing postgresql.conf. + editing postgresql.conf. - Add configuration parameter + Add configuration parameter to control the amount of memory used by autovacuum workers (Peter Geoghegan) @@ -9056,7 +11367,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add parameter to allow using huge + Add parameter to allow using huge memory pages on Linux (Christian Kruse, Richard Poole, Abhijit Menon-Sen) @@ -9068,7 +11379,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add parameter + Add parameter to limit the number of background workers (Robert Haas) @@ -9080,34 +11391,34 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add superuser-only + Add superuser-only parameter to load libraries at session start (Peter Eisentraut) - In contrast to , this + In contrast to , this parameter can load any shared library, not just those in - the $libdir/plugins directory. + the $libdir/plugins directory. - Add parameter to enable WAL + Add parameter to enable WAL logging of hint-bit changes (Sawada Masahiko) Hint bit changes are not normally logged, except when checksums are enabled. This is useful for external tools - like pg_rewind. + like pg_rewind. - Increase the default settings of - and by four times (Bruce + Increase the default settings of + and by four times (Bruce Momjian) @@ -9119,7 +11430,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Increase the default setting of + linkend="guc-effective-cache-size"/> to 4GB (Bruce Momjian, Tom Lane) @@ -9127,21 +11438,21 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Allow printf-style space padding to be - specified in (David Rowley) + specified in (David Rowley) - Allow terabyte units (TB) to be used when specifying + Allow terabyte units (TB) to be used when specifying configuration variable values (Simon Riggs) - Show PIDs of lock holders and waiters and improve - information about relations in + Show PIDs of lock holders and waiters and improve + information about relations in log messages (Christian Kruse) @@ -9153,22 +11464,22 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - The previous level was LOG, which was too verbose + The previous level was LOG, which was too verbose for libraries loaded per-session. - On Windows, make SQL_ASCII-encoded databases and server - processes (e.g., ) emit messages in + On Windows, make SQL_ASCII-encoded databases and server + processes (e.g., ) emit messages in the character encoding of the server's Windows user locale (Alexander Law, Noah Misch) Previously these messages were output in the Windows - ANSI code page. + ANSI code page. @@ -9192,14 +11503,14 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Replication slots allow preservation of resources like - WAL files on the primary until they are no longer + WAL files on the primary until they are no longer needed by standby servers. - Add recovery parameter + Add recovery parameter to delay replication (Robert Haas, Fabrízio de Royes Mello, Simon Riggs) @@ -9212,9 +11523,9 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add - option @@ -9226,7 +11537,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 The timestamp reported - by pg_last_xact_replay_timestamp() + by pg_last_xact_replay_timestamp() now reflects already-committed records, not transactions about to be committed. Recovering to a restore point now replays the restore point, rather than stopping just before the restore point. @@ -9236,34 +11547,34 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 pg_switch_xlog() - now clears any unused trailing space in the old WAL file + linkend="functions-admin-backup-table">pg_switch_xlog() + now clears any unused trailing space in the old WAL file (Heikki Linnakangas) - This improves the compression ratio for WAL files. + This improves the compression ratio for WAL files. Report failure return codes from external recovery commands + linkend="archive-recovery-settings">external recovery commands (Peter Eisentraut) - Reduce spinlock contention during WAL replay (Heikki + Reduce spinlock contention during WAL replay (Heikki Linnakangas) - Write WAL records of running transactions more + Write WAL records of running transactions more frequently (Andres Freund) @@ -9276,12 +11587,12 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - <link linkend="logicaldecoding">Logical Decoding</> + <link linkend="logicaldecoding">Logical Decoding</link> Logical decoding allows database changes to be streamed in a configurable format. The data is read from - the WAL and transformed into the + the WAL and transformed into the desired target format. To implement this feature, the following changes were made: @@ -9290,7 +11601,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add support for logical decoding + Add support for logical decoding of WAL data, to allow database changes to be streamed out in a customizable format (Andres Freund) @@ -9299,8 +11610,8 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add new setting @@ -9308,7 +11619,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Add table-level parameter REPLICA IDENTITY + linkend="catalog-pg-class">REPLICA IDENTITY to control logical replication (Andres Freund) @@ -9316,7 +11627,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Add relation option to identify user-created tables involved in logical change-set encoding (Andres Freund) @@ -9324,15 +11635,15 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add application to receive + Add application to receive logical-decoding data (Andres Freund) - Add module to illustrate logical - decoding at the SQL level (Andres Freund) + Add module to illustrate logical + decoding at the SQL level (Andres Freund) @@ -9350,28 +11661,28 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Add WITH - ORDINALITY syntax to number the rows returned from a - set-returning function in the FROM clause + ORDINALITY syntax to number the rows returned from a + set-returning function in the FROM clause (Andrew Gierth, David Fetter) This is particularly useful for functions like - unnest(). + unnest(). Add ROWS - FROM() syntax to allow horizontal concatenation of - set-returning functions in the FROM clause (Andrew Gierth) + FROM() syntax to allow horizontal concatenation of + set-returning functions in the FROM clause (Andrew Gierth) - Allow to have + Allow to have an empty target list (Tom Lane) @@ -9383,8 +11694,8 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Ensure that SELECT ... FOR UPDATE - NOWAIT does not wait in corner cases involving + Ensure that SELECT ... FOR UPDATE + NOWAIT does not wait in corner cases involving already-concurrently-updated tuples (Craig Ringer and Thomas Munro) @@ -9400,22 +11711,22 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add DISCARD - SEQUENCES command to discard cached sequence-related state + Add DISCARD + SEQUENCES command to discard cached sequence-related state (Fabrízio de Royes Mello, Robert Haas) - DISCARD ALL will now also discard such information. + DISCARD ALL will now also discard such information. - Add FORCE NULL option - to COPY FROM, which + Add FORCE NULL option + to COPY FROM, which causes quoted strings matching the specified null string to be - converted to NULLs in CSV mode (Ian Barwick, Michael + converted to NULLs in CSV mode (Ian Barwick, Michael Paquier) @@ -9433,35 +11744,35 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 New warnings are issued for SET - LOCAL, SET CONSTRAINTS, SET TRANSACTION and - ABORT when used outside a transaction block. + LOCAL, SET CONSTRAINTS, SET TRANSACTION and + ABORT when used outside a transaction block. - <xref linkend="SQL-EXPLAIN"> + <xref linkend="sql-explain"/> - Make EXPLAIN ANALYZE show planning time (Andreas + Make EXPLAIN ANALYZE show planning time (Andreas Karlsson) - Make EXPLAIN show the grouping columns in Agg and + Make EXPLAIN show the grouping columns in Agg and Group nodes (Tom Lane) - Make EXPLAIN ANALYZE show exact and lossy + Make EXPLAIN ANALYZE show exact and lossy block counts in bitmap heap scans (Etsuro Fujita) @@ -9477,22 +11788,22 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Allow a materialized view + Allow a materialized view to be refreshed without blocking other sessions from reading the view meanwhile (Kevin Grittner) This is done with REFRESH MATERIALIZED - VIEW CONCURRENTLY. + linkend="sql-refreshmaterializedview">REFRESH MATERIALIZED + VIEW CONCURRENTLY. Allow views to be automatically + linkend="sql-createview-updatable-views">automatically updated even if they contain some non-updatable columns (Dean Rasheed) @@ -9500,28 +11811,28 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Previously the presence of non-updatable output columns such as expressions, literals, and function calls prevented automatic - updates. Now INSERTs, UPDATEs and - DELETEs are supported, provided that they do not + updates. Now INSERTs, UPDATEs and + DELETEs are supported, provided that they do not attempt to assign new values to any of the non-updatable columns. - Allow control over whether INSERTs and - UPDATEs can add rows to an auto-updatable view that + Allow control over whether INSERTs and + UPDATEs can add rows to an auto-updatable view that would not appear in the view (Dean Rasheed) - This is controlled with the new - clause WITH CHECK OPTION. + This is controlled with the new + clause WITH CHECK OPTION. - Allow security barrier views + Allow security barrier views to be automatically updatable (Dean Rasheed) @@ -9539,60 +11850,60 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Support triggers on foreign - tables (Ronan Dunklau) + Support triggers on foreign + tables (Ronan Dunklau) Allow moving groups of objects from one tablespace to another - using the ALL IN TABLESPACE ... SET TABLESPACE form of - , , or - (Stephen Frost) + using the ALL IN TABLESPACE ... SET TABLESPACE form of + , , or + (Stephen Frost) Allow changing foreign key constraint deferrability - via ... ALTER - CONSTRAINT (Simon Riggs) + via ... ALTER + CONSTRAINT (Simon Riggs) - Reduce lock strength for some + Reduce lock strength for some commands (Simon Riggs, Noah Misch, Robert Haas) - Specifically, VALIDATE CONSTRAINT, CLUSTER - ON, SET WITHOUT CLUSTER, ALTER COLUMN - SET STATISTICS, ALTER COLUMN SET - Allow tablespace options to be set - in (Vik Fearing) + in (Vik Fearing) Formerly these options could only be set - via . + via . - Allow to define the estimated + Allow to define the estimated size of the aggregate's transition state data (Hadi Moshayedi) @@ -9604,7 +11915,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Fix DROP IF EXISTS to avoid errors for non-existent + Fix DROP IF EXISTS to avoid errors for non-existent objects in more cases (Pavel Stehule, Dean Rasheed) @@ -9616,7 +11927,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Previously, relations once moved into the pg_catalog + Previously, relations once moved into the pg_catalog schema could no longer be modified or dropped. @@ -9633,14 +11944,14 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Fully implement the line data type (Peter + linkend="datatype-line">line data type (Peter Eisentraut) - The line segment data type (lseg) has always been - fully supported. The previous line data type (which was + The line segment data type (lseg) has always been + fully supported. The previous line data type (which was enabled only via a compile-time option) is not binary or dump-compatible with the new implementation. @@ -9648,17 +11959,17 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add pg_lsn - data type to represent a WAL log sequence number - (LSN) (Robert Haas, Michael Paquier) + Add pg_lsn + data type to represent a WAL log sequence number + (LSN) (Robert Haas, Michael Paquier) Allow single-point polygons to be converted - to circles + linkend="datatype-polygon">polygons to be converted + to circles (Bruce Momjian) @@ -9670,31 +11981,31 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Previously, PostgreSQL assumed that the UTC offset - associated with a time zone abbreviation (such as EST) + Previously, PostgreSQL assumed that the UTC offset + associated with a time zone abbreviation (such as EST) never changes in the usage of any particular locale. However this assumption fails in the real world, so introduce the ability for a zone abbreviation to represent a UTC offset that sometimes changes. Update the zone abbreviation definition files to make use of this feature in timezone locales that have changed the UTC offset of their abbreviations since 1970 (according to the IANA timezone database). - In such timezones, PostgreSQL will now associate the + In such timezones, PostgreSQL will now associate the correct UTC offset with the abbreviation depending on the given date. - Allow 5+ digit years for non-ISO timestamp and - date strings, where appropriate (Bruce Momjian) + Allow 5+ digit years for non-ISO timestamp and + date strings, where appropriate (Bruce Momjian) Add checks for overflow/underflow of interval values + linkend="datatype-datetime">interval values (Bruce Momjian) @@ -9702,14 +12013,14 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - <link linkend="datatype-json"><acronym>JSON</></link> + <link linkend="datatype-json"><acronym>JSON</acronym></link> - Add jsonb, a more - capable and efficient data type for storing JSON data + Add jsonb, a more + capable and efficient data type for storing JSON data (Oleg Bartunov, Teodor Sigaev, Alexander Korotkov, Peter Geoghegan, Andrew Dunstan) @@ -9717,9 +12028,9 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 This new type allows faster access to values within a JSON document, and faster and more useful indexing of JSON columns. - Scalar values in jsonb documents are stored as appropriate + Scalar values in jsonb documents are stored as appropriate scalar SQL types, and the JSON document structure is pre-parsed - rather than being stored as text as in the original json + rather than being stored as text as in the original json data type. @@ -9732,18 +12043,18 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 New functions include json_array_elements_text(), - json_build_array(), json_object(), - json_object_agg(), json_to_record(), - and json_to_recordset(). + linkend="functions-json-processing-table">json_array_elements_text(), + json_build_array(), json_object(), + json_object_agg(), json_to_record(), + and json_to_recordset(). Add json_typeof() - to return the data type of a json value (Andrew Tipton) + linkend="functions-json-processing-table">json_typeof() + to return the data type of a json value (Andrew Tipton) @@ -9761,13 +12072,13 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Add pg_sleep_for(interval) - and pg_sleep_until(timestamp) to specify + linkend="functions-datetime-delay">pg_sleep_for(interval) + and pg_sleep_until(timestamp) to specify delays more flexibly (Vik Fearing, Julien Rouhaud) - The existing pg_sleep() function only supports delays + The existing pg_sleep() function only supports delays specified in seconds. @@ -9775,7 +12086,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Add cardinality() + linkend="array-functions-table">cardinality() function for arrays (Marko Tiikkaja) @@ -9787,7 +12098,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add SQL functions to allow large + Add SQL functions to allow large object reads/writes at arbitrary offsets (Pavel Stehule) @@ -9795,7 +12106,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Allow unnest() + linkend="array-functions-table">unnest() to take multiple arguments, which are individually unnested then horizontally concatenated (Andrew Gierth) @@ -9803,36 +12114,36 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add functions to construct times, dates, - timestamps, timestamptzs, and intervals + Add functions to construct times, dates, + timestamps, timestamptzs, and intervals from individual values, rather than strings (Pavel Stehule) - These functions' names are prefixed with make_, - e.g. make_date(). + These functions' names are prefixed with make_, + e.g. make_date(). Make to_char()'s - TZ format specifier return a useful value for simple + linkend="functions-formatting-table">to_char()'s + TZ format specifier return a useful value for simple numeric time zone offsets (Tom Lane) - Previously, to_char(CURRENT_TIMESTAMP, 'TZ') returned - an empty string if the timezone was set to a constant - like -4. + Previously, to_char(CURRENT_TIMESTAMP, 'TZ') returned + an empty string if the timezone was set to a constant + like -4. - Add timezone offset format specifier OF to to_char() + Add timezone offset format specifier OF to to_char() (Bruce Momjian) @@ -9840,7 +12151,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Improve the random seed used for random() + linkend="functions-math-random-table">random() (Honza Horak) @@ -9848,7 +12159,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Tighten validity checking for Unicode code points in chr(int) + linkend="functions-string-other">chr(int) (Tom Lane) @@ -9867,18 +12178,18 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add functions for looking up objects in pg_class, - pg_proc, pg_type, and - pg_operator that do not generate errors for + Add functions for looking up objects in pg_class, + pg_proc, pg_type, and + pg_operator that do not generate errors for non-existent objects (Yugo Nagata, Nozomi Anzai, Robert Haas) For example, to_regclass() - does a lookup in pg_class similarly to - the regclass input function, but it returns NULL for a + linkend="functions-info-catalog-table">to_regclass() + does a lookup in pg_class similarly to + the regclass input function, but it returns NULL for a non-existent object instead of failing. @@ -9886,7 +12197,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Add function pg_filenode_relation() + linkend="functions-admin-dblocation">pg_filenode_relation() to allow for more efficient lookup of relation names from filenodes (Andres Freund) @@ -9894,8 +12205,8 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add parameter_default column to information_schema.parameters + Add parameter_default column to information_schema.parameters view (Peter Eisentraut) @@ -9903,7 +12214,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Make information_schema.schemata + linkend="infoschema-schemata">information_schema.schemata show all accessible schemas (Peter Eisentraut) @@ -9925,7 +12236,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Add control over which rows are passed into aggregate functions via the FILTER clause + linkend="syntax-aggregates">FILTER clause (David Fetter) @@ -9933,7 +12244,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Support ordered-set (WITHIN GROUP) + linkend="syntax-aggregates">WITHIN GROUP) aggregates (Atri Sharma, Andrew Gierth, Tom Lane) @@ -9941,11 +12252,11 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Add standard ordered-set aggregates percentile_cont(), - percentile_disc(), mode(), rank(), - dense_rank(), percent_rank(), and - cume_dist() + linkend="functions-orderedset-table">percentile_cont(), + percentile_disc(), mode(), rank(), + dense_rank(), percent_rank(), and + cume_dist() (Atri Sharma, Andrew Gierth) @@ -9953,7 +12264,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Support VARIADIC + linkend="xfunc-sql-variadic-functions">VARIADIC aggregate functions (Tom Lane) @@ -9965,7 +12276,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 This allows proper declaration in SQL of aggregates like the built-in - aggregate array_agg(). + aggregate array_agg(). @@ -9982,20 +12293,20 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add event trigger support to PL/Perl - and PL/Tcl (Dimitri Fontaine) + Add event trigger support to PL/Perl + and PL/Tcl (Dimitri Fontaine) - Convert numeric - values to decimal in PL/Python + Convert numeric + values to decimal in PL/Python (Szymon Guz, Ronan Dunklau) - Previously such values were converted to Python float values, + Previously such values were converted to Python float values, risking loss of precision. @@ -10011,7 +12322,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Add ability to retrieve the current PL/pgSQL call stack using GET - DIAGNOSTICS + DIAGNOSTICS (Pavel Stehule, Stephen Frost) @@ -10019,17 +12330,17 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Add option to display the parameters passed to a query that violated a - STRICT constraint (Marko Tiikkaja) + STRICT constraint (Marko Tiikkaja) Add variables plpgsql.extra_warnings - and plpgsql.extra_errors to enable additional PL/pgSQL + linkend="plpgsql-extra-checks">plpgsql.extra_warnings + and plpgsql.extra_errors to enable additional PL/pgSQL warnings and errors (Marko Tiikkaja, Petr Jelinek) @@ -10045,13 +12356,13 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - <link linkend="libpq"><application>libpq</></link> + <link linkend="libpq"><application>libpq</application></link> Make libpq's PQconndefaults() + linkend="libpq-pqconndefaults">PQconndefaults() function ignore invalid service files (Steve Singer, Bruce Momjian) @@ -10063,7 +12374,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Accept TLS protocol versions beyond TLSv1 + Accept TLS protocol versions beyond TLSv1 in libpq (Marko Kreen) @@ -10079,15 +12390,15 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add option - Add - option @@ -10098,15 +12409,15 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Make pg_resetxlog - with option output current and potentially changed values (Rajeev Rastogi) - Make throw error for incorrect locale + Make throw error for incorrect locale settings, rather than silently falling back to a default choice (Tom Lane) @@ -10114,21 +12425,21 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Make return exit code 4 for + Make return exit code 4 for an inaccessible data directory (Amit Kapila, Bruce Momjian) This behavior more closely matches the Linux Standard Base - (LSB) Core Specification. + (LSB) Core Specification. - On Windows, ensure that a non-absolute path specification is interpreted relative - to 's current directory + to 's current directory (Kumar Rajeev Rastogi) @@ -10140,7 +12451,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Allow sizeof() in ECPG + Allow sizeof() in ECPG C array definitions (Michael Meskes) @@ -10148,7 +12459,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Make ECPG properly handle nesting - of C-style comments in both C and SQL text + of C-style comments in both C and SQL text (Michael Meskes) @@ -10156,21 +12467,21 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - <xref linkend="APP-PSQL"> + <xref linkend="app-psql"/> - Suppress No rows output in psql mode when the footer is disabled (Bruce Momjian) - Allow Control-C to abort psql when it's hung at + Allow Control-C to abort psql when it's hung at connection startup (Peter Eisentraut) @@ -10178,28 +12489,28 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - <link linkend="APP-PSQL-meta-commands">Backslash Commands</link> + <link linkend="app-psql-meta-commands">Backslash Commands</link> - Make psql's \db+ show tablespace options + Make psql's \db+ show tablespace options (Magnus Hagander) - Make \do+ display the functions + Make \do+ display the functions that implement the operators (Marko Tiikkaja) - Make \d+ output an - OID line only if an oid column + Make \d+ output an + OID line only if an oid column exists in the table (Bruce Momjian) @@ -10211,7 +12522,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Make \d show disabled system triggers (Bruce + Make \d show disabled system triggers (Bruce Momjian) @@ -10223,55 +12534,55 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Fix \copy to no longer require - a space between stdin and a semicolon (Etsuro Fujita) + Fix \copy to no longer require + a space between stdin and a semicolon (Etsuro Fujita) - Output the row count at the end of \copy, just - like COPY already did (Kumar Rajeev Rastogi) + Output the row count at the end of \copy, just + like COPY already did (Kumar Rajeev Rastogi) - Fix \conninfo to display the - server's IP address for connections using - hostaddr (Fujii Masao) + Fix \conninfo to display the + server's IP address for connections using + hostaddr (Fujii Masao) - Previously \conninfo could not display the server's - IP address in such cases. + Previously \conninfo could not display the server's + IP address in such cases. - Show the SSL protocol version in - \conninfo (Marko Kreen) + Show the SSL protocol version in + \conninfo (Marko Kreen) - Add tab completion for \pset + Add tab completion for \pset (Pavel Stehule) - Allow \pset with no arguments + Allow \pset with no arguments to show all settings (Gilles Darold) - Make \s display the name of the history file it wrote + Make \s display the name of the history file it wrote without converting it to an absolute path (Tom Lane) @@ -10288,14 +12599,14 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - <xref linkend="APP-PGDUMP"> + <xref linkend="app-pgdump"/> - Allow options - @@ -10306,17 +12617,17 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Optionally add IF EXISTS clauses to the DROP + Optionally add IF EXISTS clauses to the DROP commands emitted when removing old objects during a restore (Pavel Stehule) This change prevents unnecessary errors when removing old objects. - The new @@ -10325,26 +12636,26 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - <xref linkend="app-pgbasebackup"> + <xref linkend="app-pgbasebackup"/> - Add pg_basebackup option - Allow pg_basebackup to relocate tablespaces in + Allow pg_basebackup to relocate tablespaces in the backup copy (Steeve Lennmark) - This is particularly useful for using pg_basebackup + This is particularly useful for using pg_basebackup on the same machine as the primary. @@ -10355,8 +12666,8 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - This can be controlled with the pg_basebackup - parameter. @@ -10387,13 +12698,13 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 No longer require function prototypes for functions marked with the - PG_FUNCTION_INFO_V1 + PG_FUNCTION_INFO_V1 macro (Peter Eisentraut) This change eliminates the need to write boilerplate prototypes. - Note that the PG_FUNCTION_INFO_V1 macro must appear + Note that the PG_FUNCTION_INFO_V1 macro must appear before the corresponding function definition to avoid compiler warnings. @@ -10401,41 +12712,41 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Remove SnapshotNow and - HeapTupleSatisfiesNow() (Robert Haas) + Remove SnapshotNow and + HeapTupleSatisfiesNow() (Robert Haas) All existing uses have been switched to more appropriate snapshot - types. Catalog scans now use MVCC snapshots. + types. Catalog scans now use MVCC snapshots. - Add an API to allow memory allocations over one gigabyte + Add an API to allow memory allocations over one gigabyte (Noah Misch) - Add psprintf() to simplify memory allocation during + Add psprintf() to simplify memory allocation during string composition (Peter Eisentraut, Tom Lane) - Support printf() size modifier z to - print size_t values (Andres Freund) + Support printf() size modifier z to + print size_t values (Andres Freund) - Change API of appendStringInfoVA() - to better use vsnprintf() (David Rowley, Tom Lane) + Change API of appendStringInfoVA() + to better use vsnprintf() (David Rowley, Tom Lane) @@ -10455,7 +12766,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Improve spinlock speed on x86_64 CPUs (Heikki + Improve spinlock speed on x86_64 CPUs (Heikki Linnakangas) @@ -10463,56 +12774,56 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Remove spinlock support for unsupported platforms - SINIX, Sun3, and - NS32K (Robert Haas) + SINIX, Sun3, and + NS32K (Robert Haas) - Remove IRIX port (Robert Haas) + Remove IRIX port (Robert Haas) Reduce the number of semaphores required by - builds (Robert Haas) - Rewrite duplicate_oids Unix shell script in - Perl (Andrew Dunstan) + Rewrite duplicate_oids Unix shell script in + Perl (Andrew Dunstan) - Add Test Anything Protocol (TAP) tests for client + Add Test Anything Protocol (TAP) tests for client programs (Peter Eisentraut) - Currently, these tests are run by make check-world - only if the - Add make targets and + , which allow selection of individual tests to be run (Andrew Dunstan) - Remove makefile rule (Peter Eisentraut) @@ -10522,7 +12833,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Improve support for VPATH builds of PGXS + Improve support for VPATH builds of PGXS modules (Cédric Villemain, Andrew Dunstan, Peter Eisentraut) @@ -10535,8 +12846,8 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add a configure flag that appends custom text to the - PG_VERSION string (Oskari Saarenmaa) + Add a configure flag that appends custom text to the + PG_VERSION string (Oskari Saarenmaa) @@ -10546,46 +12857,46 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Improve DocBook XML validity (Peter Eisentraut) + Improve DocBook XML validity (Peter Eisentraut) Fix various minor security and sanity issues reported by the - Coverity scanner (Stephen Frost) + Coverity scanner (Stephen Frost) Improve detection of invalid memory usage when testing - PostgreSQL with Valgrind + PostgreSQL with Valgrind (Noah Misch) - Improve sample Emacs configuration file - emacs.samples (Peter Eisentraut) + Improve sample Emacs configuration file + emacs.samples (Peter Eisentraut) - Also add .dir-locals.el to the top of the source tree. + Also add .dir-locals.el to the top of the source tree. - Allow pgindent to accept a command-line list + Allow pgindent to accept a command-line list of typedefs (Bruce Momjian) - Make pgindent smarter about blank lines + Make pgindent smarter about blank lines around preprocessor conditionals (Bruce Momjian) @@ -10593,14 +12904,14 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Avoid most uses of dlltool - in Cygwin and - Mingw builds (Marco Atzeri, Hiroshi Inoue) + in Cygwin and + Mingw builds (Marco Atzeri, Hiroshi Inoue) - Support client-only installs in MSVC (Windows) builds + Support client-only installs in MSVC (Windows) builds (MauMau) @@ -10616,7 +12927,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add extension to preload relation data + Add extension to preload relation data into the shared buffer cache at server start (Robert Haas) @@ -10627,26 +12938,26 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add UUID random number generator - gen_random_uuid() to + Add UUID random number generator + gen_random_uuid() to (Oskari Saarenmaa) - This allows creation of version 4 UUIDs without - requiring installation of . + This allows creation of version 4 UUIDs without + requiring installation of . - Allow to work with - the BSD or e2fsprogs UUID libraries, - not only the OSSP UUID library (Matteo Beccati) + Allow to work with + the BSD or e2fsprogs UUID libraries, + not only the OSSP UUID library (Matteo Beccati) - This improves the uuid-ossp module's portability + This improves the uuid-ossp module's portability since it no longer has to have the increasingly-obsolete OSSP library. The module's name is now rather a misnomer, but we won't change it. @@ -10655,21 +12966,21 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add option to to include trigger + Add option to to include trigger execution time (Horiguchi Kyotaro) - Fix to not report rows from + Fix to not report rows from uncommitted transactions as dead (Robert Haas) - Make functions + Make functions use regclass-type arguments (Satoshi Nagayasu) @@ -10681,14 +12992,14 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Improve consistency of output to honor + Improve consistency of output to honor snapshot rules more consistently (Robert Haas) - Improve 's choice of trigrams for indexed + Improve 's choice of trigrams for indexed regular expression searches (Alexander Korotkov) @@ -10700,15 +13011,15 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Allow pg_xlogdump - to report a live log stream with (Heikki Linnakangas) - Store data more compactly (Stas Kelvich) + Store data more compactly (Stas Kelvich) @@ -10719,7 +13030,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Reduce client-side memory usage by using + Reduce client-side memory usage by using a cursor (Andrew Dunstan) @@ -10727,13 +13038,13 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 Dramatically reduce memory consumption - in (Bruce Momjian) + in (Bruce Momjian) - Pass 's user name ( @@ -10741,37 +13052,37 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - <xref linkend="pgbench"> + <xref linkend="pgbench"/> - Remove line length limit for pgbench scripts (Sawada + Remove line length limit for pgbench scripts (Sawada Masahiko) - The previous line limit was BUFSIZ. + The previous line limit was BUFSIZ. - Add long option names to pgbench (Fabien Coelho) + Add long option names to pgbench (Fabien Coelho) - Add pgbench option to control the transaction rate (Fabien Coelho) - Add pgbench option to print periodic progress reports (Fabien Coelho) @@ -10782,13 +13093,13 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - <xref linkend="pgstatstatements"> + <xref linkend="pgstatstatements"/> - Make pg_stat_statements use a file, rather than + Make pg_stat_statements use a file, rather than shared memory, for query text storage (Peter Geoghegan) @@ -10800,7 +13111,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Allow reporting of pg_stat_statements's internal + Allow reporting of pg_stat_statements's internal query hash identifier (Daniel Farina, Sameer Thakur, Peter Geoghegan) @@ -10808,7 +13119,7 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Add the ability to retrieve all pg_stat_statements + Add the ability to retrieve all pg_stat_statements information except the query text (Peter Geoghegan) @@ -10821,20 +13132,20 @@ Branch: REL9_4_STABLE [c2b06ab17] 2015-01-30 22:45:58 -0500 - Make pg_stat_statements ignore DEALLOCATE + Make pg_stat_statements ignore DEALLOCATE commands (Fabien Coelho) - It already ignored PREPARE, as well as planning time in + It already ignored PREPARE, as well as planning time in general, so this seems more consistent. - Save the statistics file into $PGDATA/pg_stat at server - shutdown, rather than $PGDATA/global (Fujii Masao) + Save the statistics file into $PGDATA/pg_stat at server + shutdown, rather than $PGDATA/global (Fujii Masao) diff --git a/doc/src/sgml/release-9.5.sgml b/doc/src/sgml/release-9.5.sgml index ceece4b8a5..1324bc09f9 100644 --- a/doc/src/sgml/release-9.5.sgml +++ b/doc/src/sgml/release-9.5.sgml @@ -1,6 +1,2679 @@ + + Release 9.5.15 + + + Release date: + 2018-11-08 + + + + This release contains a variety of fixes from 9.5.14. + For information about new features in the 9.5 major release, see + . + + + + Migration to Version 9.5.15 + + + A dump/restore is not required for those running 9.5.X. + + + + However, if you are upgrading from a version earlier than 9.5.13, + see . + + + + + Changes + + + + + + Fix corner-case failures + in has_foo_privilege() + family of functions (Tom Lane) + + + + Return NULL rather than throwing an error when an invalid object OID + is provided. Some of these functions got that right already, but not + all. has_column_privilege() was additionally + capable of crashing on some platforms. + + + + + + Avoid O(N^2) slowdown in regular expression match/split functions on + long strings (Andrew Gierth) + + + + + + Fix parsing of standard multi-character operators that are immediately + followed by a comment or + or - + (Andrew Gierth) + + + + This oversight could lead to parse errors, or to incorrect assignment + of precedence. + + + + + + Avoid O(N^3) slowdown in lexer for long strings + of + or - characters + (Andrew Gierth) + + + + + + Fix mis-execution of SubPlans when the outer query is being scanned + backwards (Andrew Gierth) + + + + + + Fix failure of UPDATE/DELETE ... WHERE CURRENT OF ... + after rewinding the referenced cursor (Tom Lane) + + + + A cursor that scans multiple relations (particularly an inheritance + tree) could produce wrong behavior if rewound to an earlier relation. + + + + + + Fix EvalPlanQual to handle conditionally-executed + InitPlans properly (Andrew Gierth, Tom Lane) + + + + This resulted in hard-to-reproduce crashes or wrong answers in + concurrent updates, if they contained code such as an uncorrelated + sub-SELECT inside a CASE + construct. + + + + + + Fix character-class checks to not fail on Windows for Unicode + characters above U+FFFF (Tom Lane, Kenji Uno) + + + + This bug affected full-text-search operations, as well + as contrib/ltree + and contrib/pg_trgm. + + + + + + Ensure that sequences owned by a foreign table are processed + by ALTER OWNER on the table (Peter Eisentraut) + + + + The ownership change should propagate to such sequences as well, but + this was missed for foreign tables. + + + + + + Ensure that the server will process + already-received NOTIFY + and SIGTERM interrupts before waiting for client + input (Jeff Janes, Tom Lane) + + + + + + Fix over-allocation of space for array_out()'s + result string (Keiichi Hirobe) + + + + + + Fix memory leak in repeated SP-GiST index scans (Tom Lane) + + + + This is only known to amount to anything significant in cases where + an exclusion constraint using SP-GiST receives many new index entries + in a single command. + + + + + + Ensure that ApplyLogicalMappingFile() closes the + mapping file when done with it (Tomas Vondra) + + + + Previously, the file descriptor was leaked, eventually resulting in + failures during logical decoding. + + + + + + Fix logical decoding to handle cases where a mapped catalog table is + repeatedly rewritten, e.g. by VACUUM FULL + (Andres Freund) + + + + + + Prevent starting the server with wal_level set + to too low a value to support an existing replication slot (Andres + Freund) + + + + + + Avoid crash if a utility command causes infinite recursion (Tom Lane) + + + + + + When initializing a hot standby, cope with duplicate XIDs caused by + two-phase transactions on the master + (Michael Paquier, Konstantin Knizhnik) + + + + + + Fix event triggers to handle nested ALTER TABLE + commands (Michael Paquier, Álvaro Herrera) + + + + + + Propagate parent process's transaction and statement start timestamps + to parallel workers (Konstantin Knizhnik) + + + + This prevents misbehavior of functions such + as transaction_timestamp() when executed in a + worker. + + + + + + Fix WAL file recycling logic to work correctly on standby servers + (Michael Paquier) + + + + Depending on the setting of archive_mode, a standby + might fail to remove some WAL files that could be removed. + + + + + + Fix handling of commit-timestamp tracking during recovery + (Masahiko Sawasa, Michael Paquier) + + + + If commit timestamp tracking has been turned on or off, recovery might + fail due to trying to fetch the commit timestamp for a transaction + that did not record it. + + + + + + Randomize the random() seed in bootstrap and + standalone backends, and in initdb + (Noah Misch) + + + + The main practical effect of this change is that it avoids a scenario + where initdb might mistakenly conclude that + POSIX shared memory is not available, due to name collisions caused by + always using the same random seed. + + + + + + Allow DSM allocation to be interrupted (Chris Travers) + + + + + + Properly handle turning full_page_writes on + dynamically (Kyotaro Horiguchi) + + + + + + Avoid possible buffer overrun when replaying GIN page recompression + from WAL (Alexander Korotkov, Sivasubramanian Ramasubramanian) + + + + + + Fix missed fsync of a replication slot's directory (Konstantin + Knizhnik, Michael Paquier) + + + + + + Fix unexpected timeouts when + using wal_sender_timeout on a slow server + (Noah Misch) + + + + + + Ensure that hot standby processes use the correct WAL consistency + point (Alexander Kukushkin, Michael Paquier) + + + + This prevents possible misbehavior just after a standby server has + reached a consistent database state during WAL replay. + + + + + + Ensure background workers are stopped properly when the postmaster + receives a fast-shutdown request before completing database startup + (Alexander Kukushkin) + + + + + + Don't run atexit callbacks when servicing SIGQUIT + (Heikki Linnakangas) + + + + + + Don't record foreign-server user mappings as members of extensions + (Tom Lane) + + + + If CREATE USER MAPPING is executed in an extension + script, an extension dependency was created for the user mapping, + which is unexpected. Roles can't be extension members, so user + mappings shouldn't be either. + + + + + + Make syslogger more robust against failures in opening CSV log files + (Tom Lane) + + + + + + Fix psql, as well as documentation + examples, to call PQconsumeInput() before + each PQnotifies() call (Tom Lane) + + + + This fixes cases in which psql would not + report receipt of a NOTIFY message until after the + next command. + + + + + + Fix possible inconsistency in pg_dump's + sorting of dissimilar object names (Jacob Champion) + + + + + + Ensure that pg_restore will schema-qualify + the table name when + emitting DISABLE/ENABLE TRIGGER + commands (Tom Lane) + + + + This avoids failures due to the new policy of running restores with + restrictive search path. + + + + + + Fix pg_upgrade to handle event triggers in + extensions correctly (Haribabu Kommi) + + + + pg_upgrade failed to preserve an event + trigger's extension-membership status. + + + + + + Fix pg_upgrade's cluster state check to + work correctly on a standby server (Bruce Momjian) + + + + + + Enforce type cube's dimension limit in + all contrib/cube functions (Andrey Borodin) + + + + Previously, some cube-related functions could construct values that + would be rejected by cube_in(), leading to + dump/reload failures. + + + + + + Fix contrib/unaccent's + unaccent() function to use + the unaccent text search dictionary that is in the + same schema as the function (Tom Lane) + + + + Previously it tried to look up the dictionary using the search path, + which could fail if the search path has a restrictive value. + + + + + + Fix build problems on macOS 10.14 (Mojave) (Tom Lane) + + + + Adjust configure to add + an switch to CPPFLAGS; + without this, PL/Perl and PL/Tcl fail to configure or build on macOS + 10.14. The specific sysroot used can be overridden at configure time + or build time by setting the PG_SYSROOT variable in + the arguments of configure + or make. + + + + It is now recommended that Perl-related extensions + write $(perl_includespec) rather + than -I$(perl_archlibexp)/CORE in their compiler + flags. The latter continues to work on most platforms, but not recent + macOS. + + + + Also, it should no longer be necessary to + specify manually to get PL/Tcl to + build on recent macOS releases. + + + + + + Fix MSVC build and regression-test scripts to work on recent Perl + versions (Andrew Dunstan) + + + + Perl no longer includes the current directory in its search path + by default; work around that. + + + + + + On Windows, allow the regression tests to be run by an Administrator + account (Andrew Dunstan) + + + + To do this safely, pg_regress now gives up + any such privileges at startup. + + + + + + + Support building on Windows with Visual Studio 2015 or Visual Studio 2017 + (Michael Paquier, Haribabu Kommi) + + + + + + Allow btree comparison functions to return INT_MIN + (Tom Lane) + + + + Up to now, we've forbidden datatype-specific comparison functions from + returning INT_MIN, which allows callers to invert + the sort order just by negating the comparison result. However, this + was never safe for comparison functions that directly return the + result of memcmp(), strcmp(), + etc, as POSIX doesn't place any such restriction on those functions. + At least some recent versions of memcmp() can + return INT_MIN, causing incorrect sort ordering. + Hence, we've removed this restriction. Callers must now use + the INVERT_COMPARE_RESULT() macro if they wish to + invert the sort order. + + + + + + Fix recursion hazard in shared-invalidation message processing + (Tom Lane) + + + + This error could, for example, result in failure to access a system + catalog or index that had just been processed by VACUUM + FULL. + + + + This change adds a new result code + for LockAcquire, which might possibly affect + external callers of that function, though only very unusual usage + patterns would have an issue with it. The API + of LockAcquireExtended is also changed. + + + + + + Save and restore SPI's global variables + during SPI_connect() + and SPI_finish() (Chapman Flack, Tom Lane) + + + + This prevents possible interference when one SPI-using function calls + another. + + + + + + + Provide ALLOCSET_DEFAULT_SIZES and sibling macros + in back branches (Tom Lane) + + + + These macros have existed since 9.6, but there were requests to add + them to older branches to allow extensions to rely on them without + branch-specific coding. + + + + + + Avoid using potentially-under-aligned page buffers (Tom Lane) + + + + Invent new union types PGAlignedBlock + and PGAlignedXLogBlock, and use these in place of plain + char arrays, ensuring that the compiler can't place the buffer at a + misaligned start address. This fixes potential core dumps on + alignment-picky platforms, and may improve performance even on + platforms that allow misalignment. + + + + + + Make src/port/snprintf.c follow the C99 + standard's definition of snprintf()'s result + value (Tom Lane) + + + + On platforms where this code is used (mostly Windows), its pre-C99 + behavior could lead to failure to detect buffer overrun, if the + calling code assumed C99 semantics. + + + + + + When building on i386 with the clang + compiler, require to be used (Andres Freund) + + + + This avoids problems with missed floating point overflow checks. + + + + + + Fix configure's detection of the result + type of strerror_r() (Tom Lane) + + + + The previous coding got the wrong answer when building + with icc on Linux (and perhaps in other + cases), leading to libpq not returning + useful error messages for system-reported errors. + + + + + + Update time zone data files to tzdata + release 2018g for DST law changes in Chile, Fiji, Morocco, and Russia + (Volgograd), plus historical corrections for China, Hawaii, Japan, + Macau, and North Korea. + + + + + + + + + + Release 9.5.14 + + + Release date: + 2018-08-09 + + + + This release contains a variety of fixes from 9.5.13. + For information about new features in the 9.5 major release, see + . + + + + Migration to Version 9.5.14 + + + A dump/restore is not required for those running 9.5.X. + + + + However, if you are upgrading from a version earlier than 9.5.13, + see . + + + + + Changes + + + + + + Fix failure to reset libpq's state fully + between connection attempts (Tom Lane) + + + + An unprivileged user of dblink + or postgres_fdw could bypass the checks intended + to prevent use of server-side credentials, such as + a ~/.pgpass file owned by the operating-system + user running the server. Servers allowing peer authentication on + local connections are particularly vulnerable. Other attacks such + as SQL injection into a postgres_fdw session + are also possible. + Attacking postgres_fdw in this way requires the + ability to create a foreign server object with selected connection + parameters, but any user with access to dblink + could exploit the problem. + In general, an attacker with the ability to select the connection + parameters for a libpq-using application + could cause mischief, though other plausible attack scenarios are + harder to think of. + Our thanks to Andrew Krasichkov for reporting this issue. + (CVE-2018-10915) + + + + + + Fix INSERT ... ON CONFLICT UPDATE through a view + that isn't just SELECT * FROM ... + (Dean Rasheed, Amit Langote) + + + + Erroneous expansion of an updatable view could lead to crashes + or attribute ... has the wrong type errors, if the + view's SELECT list doesn't match one-to-one with + the underlying table's columns. + Furthermore, this bug could be leveraged to allow updates of columns + that an attacking user lacks UPDATE privilege for, + if that user has INSERT and UPDATE + privileges for some other column(s) of the table. + Any user could also use it for disclosure of server memory. + (CVE-2018-10925) + + + + + + Ensure that updates to the relfrozenxid + and relminmxid values + for nailed system catalogs are processed in a timely + fashion (Andres Freund) + + + + Overoptimistic caching rules could prevent these updates from being + seen by other sessions, leading to spurious errors and/or data + corruption. The problem was significantly worse for shared catalogs, + such as pg_authid, because the stale cache + data could persist into new sessions as well as existing ones. + + + + + + Fix case where a freshly-promoted standby crashes before having + completed its first post-recovery checkpoint (Michael Paquier, Kyotaro + Horiguchi, Pavan Deolasee, Álvaro Herrera) + + + + This led to a situation where the server did not think it had reached + a consistent database state during subsequent WAL replay, preventing + restart. + + + + + + Avoid emitting a bogus WAL record when recycling an all-zero btree + page (Amit Kapila) + + + + This mistake has been seen to cause assertion failures, and + potentially it could result in unnecessary query cancellations on hot + standby servers. + + + + + + During WAL replay, guard against corrupted record lengths exceeding + 1GB (Michael Paquier) + + + + Treat such a case as corrupt data. Previously, the code would try to + allocate space and get a hard error, making recovery impossible. + + + + + + When ending recovery, delay writing the timeline history file as long + as possible (Heikki Linnakangas) + + + + This avoids some situations where a failure during recovery cleanup + (such as a problem with a two-phase state file) led to inconsistent + timeline state on-disk. + + + + + + Improve performance of WAL replay for transactions that drop many + relations (Fujii Masao) + + + + This change reduces the number of times that shared buffers are + scanned, so that it is of most benefit when that setting is large. + + + + + + Improve performance of lock releasing in standby server WAL replay + (Thomas Munro) + + + + + + Make logical WAL senders report streaming state correctly (Simon + Riggs, Sawada Masahiko) + + + + The code previously mis-detected whether or not it had caught up with + the upstream server. + + + + + + Fix bugs in snapshot handling during logical decoding, allowing wrong + decoding results in rare cases (Arseny Sher, Álvaro Herrera) + + + + + + Ensure a table's cached index list is correctly rebuilt after an index + creation fails partway through (Peter Geoghegan) + + + + Previously, the failed index's OID could remain in the list, causing + problems later in the same session. + + + + + + Fix mishandling of empty uncompressed posting list pages in GIN + indexes (Sivasubramanian Ramasubramanian, Alexander Korotkov) + + + + This could result in an assertion failure after pg_upgrade of a + pre-9.4 GIN index (9.4 and later will not create such pages). + + + + + + Ensure that VACUUM will respond to signals + within btree page deletion loops (Andres Freund) + + + + Corrupted btree indexes could result in an infinite loop here, and + that previously wasn't interruptible without forcing a crash. + + + + + + Fix misoptimization of equivalence classes involving composite-type + columns (Tom Lane) + + + + This resulted in failure to recognize that an index on a composite + column could provide the sort order needed for a mergejoin on that + column. + + + + + + Fix SQL-standard FETCH FIRST syntax to allow + parameters ($n), as the + standard expects (Andrew Gierth) + + + + + + Fix failure to schema-qualify some object names + in getObjectDescription output + (Kyotaro Horiguchi, Tom Lane) + + + + Names of collations, conversions, and text search objects + were not schema-qualified when they should be. + + + + + + Widen COPY FROM's current-line-number counter + from 32 to 64 bits (David Rowley) + + + + This avoids two problems with input exceeding 4G lines: COPY + FROM WITH HEADER would drop a line every 4G lines, not only + the first line, and error reports could show a wrong line number. + + + + + + Add a string freeing function + to ecpg's pgtypes + library, so that cross-module memory management problems can be + avoided on Windows (Takayuki Tsunakawa) + + + + On Windows, crashes can ensue if the free call + for a given chunk of memory is not made from the same DLL + that malloc'ed the memory. + The pgtypes library sometimes returns strings + that it expects the caller to free, making it impossible to follow + this rule. Add a PGTYPESchar_free() function + that just wraps free, allowing applications + to follow this rule. + + + + + + Fix ecpg's support for long + long variables on Windows, as well as other platforms that + declare strtoll/strtoull + nonstandardly or not at all (Dang Minh Huong, Tom Lane) + + + + + + Fix misidentification of SQL statement type in PL/pgSQL, when a rule + change causes a change in the semantics of a statement intra-session + (Tom Lane) + + + + This error led to assertion failures, or in rare cases, failure to + enforce the INTO STRICT option as expected. + + + + + + Fix password prompting in client programs so that echo is properly + disabled on Windows when stdin is not the + terminal (Matthew Stickney) + + + + + + Further fix mis-quoting of values for list-valued GUC variables in + dumps (Tom Lane) + + + + The previous fix for quoting of search_path and + other list-valued variables in pg_dump + output turned out to misbehave for empty-string list elements, and it + risked truncation of long file paths. + + + + + + Fix pg_dump's failure to + dump REPLICA IDENTITY properties for constraint + indexes (Tom Lane) + + + + Manually created unique indexes were properly marked, but not those + created by declaring UNIQUE or PRIMARY + KEY constraints. + + + + + + Make pg_upgrade check that the old server + was shut down cleanly (Bruce Momjian) + + + + The previous check could be fooled by an immediate-mode shutdown. + + + + + + Fix contrib/hstore_plperl to look through Perl + scalar references, and to not crash if it doesn't find a hash + reference where it expects one (Tom Lane) + + + + + + Fix crash in contrib/ltree's + lca() function when the input array is empty + (Pierre Ducroquet) + + + + + + Fix various error-handling code paths in which an incorrect error code + might be reported (Michael Paquier, Tom Lane, Magnus Hagander) + + + + + + Rearrange makefiles to ensure that programs link to freshly-built + libraries (such as libpq.so) rather than ones + that might exist in the system library directories (Tom Lane) + + + + This avoids problems when building on platforms that supply old copies + of PostgreSQL libraries. + + + + + + Update time zone data files to tzdata + release 2018e for DST law changes in North Korea, plus historical + corrections for Czechoslovakia. + + + + This update includes a redefinition of daylight savings + in Ireland, as well as for some past years in Namibia and + Czechoslovakia. In those jurisdictions, legally standard time is + observed in summer, and daylight savings time in winter, so that the + daylight savings offset is one hour behind standard time not one hour + ahead. This does not affect either the actual UTC offset or the + timezone abbreviations in use; the only known effect is that + the is_dst column in + the pg_timezone_names view will now be true + in winter and false in summer in these cases. + + + + + + + + + + Release 9.5.13 + + + Release date: + 2018-05-10 + + + + This release contains a variety of fixes from 9.5.12. + For information about new features in the 9.5 major release, see + . + + + + Migration to Version 9.5.13 + + + A dump/restore is not required for those running 9.5.X. + + + + However, if the function marking mistakes mentioned in the first + changelog entry below affect you, you will want to take steps to + correct your database catalogs. + + + + Also, if you are upgrading from a version earlier than 9.5.12, + see . + + + + + Changes + + + + + + Fix incorrect volatility markings on a few built-in functions + (Thomas Munro, Tom Lane) + + + + The functions + query_to_xml, + cursor_to_xml, + cursor_to_xmlschema, + query_to_xmlschema, and + query_to_xml_and_xmlschema + should be marked volatile because they execute user-supplied queries + that might contain volatile operations. They were not, leading to a + risk of incorrect query optimization. This has been repaired for new + installations by correcting the initial catalog data, but existing + installations will continue to contain the incorrect markings. + Practical use of these functions seems to pose little hazard, but in + case of trouble, it can be fixed by manually updating these + functions' pg_proc entries, for example + ALTER FUNCTION pg_catalog.query_to_xml(text, boolean, + boolean, text) VOLATILE. (Note that that will need to be + done in each database of the installation.) Another option is + to pg_upgrade the database to a version + containing the corrected initial data. + + + + + + Avoid re-using TOAST value OIDs that match dead-but-not-yet-vacuumed + TOAST entries (Pavan Deolasee) + + + + Once the OID counter has wrapped around, it's possible to assign a + TOAST value whose OID matches a previously deleted entry in the same + TOAST table. If that entry were not yet vacuumed away, this resulted + in unexpected chunk number 0 (expected 1) for toast + value nnnnn errors, which would + persist until the dead entry was removed + by VACUUM. Fix by not selecting such OIDs when + creating a new TOAST entry. + + + + + + Change ANALYZE's algorithm for updating + pg_class.reltuples + (David Gould) + + + + Previously, pages not actually scanned by ANALYZE + were assumed to retain their old tuple density. In a large table + where ANALYZE samples only a small fraction of the + pages, this meant that the overall tuple density estimate could not + change very much, so that reltuples would + change nearly proportionally to changes in the table's physical size + (relpages) regardless of what was actually + happening in the table. This has been observed to result + in reltuples becoming so much larger than + reality as to effectively shut off autovacuuming. To fix, assume + that ANALYZE's sample is a statistically unbiased + sample of the table (as it should be), and just extrapolate the + density observed within those pages to the whole table. + + + + + + Avoid deadlocks in concurrent CREATE INDEX + CONCURRENTLY commands that are run + under SERIALIZABLE or REPEATABLE + READ transaction isolation (Tom Lane) + + + + + + Fix possible slow execution of REFRESH MATERIALIZED VIEW + CONCURRENTLY (Thomas Munro) + + + + + + Fix UPDATE/DELETE ... WHERE CURRENT OF to not fail + when the referenced cursor uses an index-only-scan plan (Yugo Nagata, + Tom Lane) + + + + + + Fix incorrect planning of join clauses pushed into parameterized + paths (Andrew Gierth, Tom Lane) + + + + This error could result in misclassifying a condition as + a join filter for an outer join when it should be a + plain filter condition, leading to incorrect join + output. + + + + + + Fix possibly incorrect generation of an index-only-scan plan when the + same table column appears in multiple index columns, and only some of + those index columns use operator classes that can return the column + value (Kyotaro Horiguchi) + + + + + + Fix misoptimization of CHECK constraints having + provably-NULL subclauses of + top-level AND/OR conditions + (Tom Lane, Dean Rasheed) + + + + This could, for example, allow constraint exclusion to exclude a + child table that should not be excluded from a query. + + + + + + Fix executor crash due to double free in some GROUPING + SET usages (Peter Geoghegan) + + + + + + Avoid crash if a table rewrite event trigger is added concurrently + with a command that could call such a trigger (Álvaro Herrera, + Andrew Gierth, Tom Lane) + + + + + + Avoid failure if a query-cancel or session-termination interrupt + occurs while committing a prepared transaction (Stas Kelvich) + + + + + + Fix query-lifespan memory leakage in repeatedly executed hash joins + (Tom Lane) + + + + + + Fix overly strict sanity check + in heap_prepare_freeze_tuple + (Álvaro Herrera) + + + + This could result in incorrect cannot freeze committed + xmax failures in databases that have + been pg_upgrade'd from 9.2 or earlier. + + + + + + Prevent dangling-pointer dereference when a C-coded before-update row + trigger returns the old tuple (Rushabh Lathia) + + + + + + Reduce locking during autovacuum worker scheduling (Jeff Janes) + + + + The previous behavior caused drastic loss of potential worker + concurrency in databases with many tables. + + + + + + Ensure client hostname is copied while copying + pg_stat_activity data to local memory + (Edmund Horner) + + + + Previously the supposedly-local snapshot contained a pointer into + shared memory, allowing the client hostname column to change + unexpectedly if any existing session disconnected. + + + + + + Fix incorrect processing of multiple compound affixes + in ispell dictionaries (Arthur Zakirov) + + + + + + Fix collation-aware searches (that is, indexscans using inequality + operators) in SP-GiST indexes on text columns (Tom Lane) + + + + Such searches would return the wrong set of rows in most non-C + locales. + + + + + + Count the number of index tuples correctly during initial build of an + SP-GiST index (Tomas Vondra) + + + + Previously, the tuple count was reported to be the same as that of + the underlying table, which is wrong if the index is partial. + + + + + + Count the number of index tuples correctly during vacuuming of a + GiST index (Andrey Borodin) + + + + Previously it reported the estimated number of heap tuples, + which might be inaccurate, and is certainly wrong if the + index is partial. + + + + + + Fix a corner case where a streaming standby gets stuck at a WAL + continuation record (Kyotaro Horiguchi) + + + + + + In logical decoding, avoid possible double processing of WAL data + when a walsender restarts (Craig Ringer) + + + + + + Allow scalarltsel + and scalargtsel to be used on non-core datatypes + (Tomas Vondra) + + + + + + Reduce libpq's memory consumption when a + server error is reported after a large amount of query output has + been collected (Tom Lane) + + + + Discard the previous output before, not after, processing the error + message. On some platforms, notably Linux, this can make a + difference in the application's subsequent memory footprint. + + + + + + Fix double-free crashes in ecpg + (Patrick Krecker, Jeevan Ladhe) + + + + + + Fix ecpg to handle long long + int variables correctly in MSVC builds (Michael Meskes, + Andrew Gierth) + + + + + + Fix mis-quoting of values for list-valued GUC variables in dumps + (Michael Paquier, Tom Lane) + + + + The local_preload_libraries, + session_preload_libraries, + shared_preload_libraries, + and temp_tablespaces variables were not correctly + quoted in pg_dump output. This would + cause problems if settings for these variables appeared in + CREATE FUNCTION ... SET or ALTER + DATABASE/ROLE ... SET clauses. + + + + + + Fix pg_recvlogical to not fail against + pre-v10 PostgreSQL servers + (Michael Paquier) + + + + A previous fix caused pg_recvlogical to + issue a command regardless of server version, but it should only be + issued to v10 and later servers. + + + + + + Ensure that pg_rewind deletes files on the + target server if they are deleted from the source server during the + run (Takayuki Tsunakawa) + + + + Failure to do this could result in data inconsistency on the target, + particularly if the file in question is a WAL segment. + + + + + + Fix pg_rewind to handle tables in + non-default tablespaces correctly (Takayuki Tsunakawa) + + + + + + Fix overflow handling in PL/pgSQL + integer FOR loops (Tom Lane) + + + + The previous coding failed to detect overflow of the loop variable + on some non-gcc compilers, leading to an infinite loop. + + + + + + Adjust PL/Python regression tests to pass + under Python 3.7 (Peter Eisentraut) + + + + + + Support testing PL/Python and related + modules when building with Python 3 and MSVC (Andrew Dunstan) + + + + + + + Support building with Microsoft Visual Studio 2015 (Michael Paquier) + + + + Various fixes needed for VS2015 compatibility were previously + back-patched into the 9.5 branch, but this one was missed. + + + + + + Rename internal b64_encode + and b64_decode functions to avoid conflict with + Solaris 11.4 built-in functions (Rainer Orth) + + + + + + Sync our copy of the timezone library with IANA tzcode release 2018e + (Tom Lane) + + + + This fixes the zic timezone data compiler + to cope with negative daylight-savings offsets. While + the PostgreSQL project will not + immediately ship such timezone data, zic + might be used with timezone data obtained directly from IANA, so it + seems prudent to update zic now. + + + + + + Update time zone data files to tzdata + release 2018d for DST law changes in Palestine and Antarctica (Casey + Station), plus historical corrections for Portugal and its colonies, + as well as Enderbury, Jamaica, Turks & Caicos Islands, and + Uruguay. + + + + + + + + + + Release 9.5.12 + + + Release date: + 2018-03-01 + + + + This release contains a variety of fixes from 9.5.11. + For information about new features in the 9.5 major release, see + . + + + + Migration to Version 9.5.12 + + + A dump/restore is not required for those running 9.5.X. + + + + However, if you run an installation in which not all users are mutually + trusting, or if you maintain an application or extension that is + intended for use in arbitrary situations, it is strongly recommended + that you read the documentation changes described in the first changelog + entry below, and take suitable steps to ensure that your installation or + code is secure. + + + + Also, the changes described in the second changelog entry below may + cause functions used in index expressions or materialized views to fail + during auto-analyze, or when reloading from a dump. After upgrading, + monitor the server logs for such problems, and fix affected functions. + + + + Also, if you are upgrading from a version earlier than 9.5.10, + see . + + + + + Changes + + + + + + Document how to configure installations and applications to guard + against search-path-dependent trojan-horse attacks from other users + (Noah Misch) + + + + Using a search_path setting that includes any + schemas writable by a hostile user enables that user to capture + control of queries and then run arbitrary SQL code with the + permissions of the attacked user. While it is possible to write + queries that are proof against such hijacking, it is notationally + tedious, and it's very easy to overlook holes. Therefore, we now + recommend configurations in which no untrusted schemas appear in + one's search path. Relevant documentation appears in + (for database administrators and users), + (for application authors), + (for extension authors), and + (for authors + of SECURITY DEFINER functions). + (CVE-2018-1058) + + + + + + Avoid use of insecure search_path settings + in pg_dump and other client programs + (Noah Misch, Tom Lane) + + + + pg_dump, + pg_upgrade, + vacuumdb and + other PostgreSQL-provided applications were + themselves vulnerable to the type of hijacking described in the previous + changelog entry; since these applications are commonly run by + superusers, they present particularly attractive targets. To make them + secure whether or not the installation as a whole has been secured, + modify them to include only the pg_catalog + schema in their search_path settings. + Autovacuum worker processes now do the same, as well. + + + + In cases where user-provided functions are indirectly executed by + these programs — for example, user-provided functions in index + expressions — the tighter search_path may + result in errors, which will need to be corrected by adjusting those + user-provided functions to not assume anything about what search path + they are invoked under. That has always been good practice, but now + it will be necessary for correct behavior. + (CVE-2018-1058) + + + + + + Fix misbehavior of concurrent-update rechecks with CTE references + appearing in subplans (Tom Lane) + + + + If a CTE (WITH clause reference) is used in an + InitPlan or SubPlan, and the query requires a recheck due to trying + to update or lock a concurrently-updated row, incorrect results could + be obtained. + + + + + + Fix planner failures with overlapping mergejoin clauses in an outer + join (Tom Lane) + + + + These mistakes led to left and right pathkeys do not match in + mergejoin or outer pathkeys do not match + mergeclauses planner errors in corner cases. + + + + + + Repair pg_upgrade's failure to + preserve relfrozenxid for materialized + views (Tom Lane, Andres Freund) + + + + This oversight could lead to data corruption in materialized views + after an upgrade, manifesting as could not access status of + transaction or found xmin from before + relfrozenxid errors. The problem would be more likely to + occur in seldom-refreshed materialized views, or ones that were + maintained only with REFRESH MATERIALIZED VIEW + CONCURRENTLY. + + + + If such corruption is observed, it can be repaired by refreshing the + materialized view (without CONCURRENTLY). + + + + + + Fix incorrect reporting of PL/Python function names in + error CONTEXT stacks (Tom Lane) + + + + An error occurring within a nested PL/Python function call (that is, + one reached via a SPI query from another PL/Python function) would + result in a stack trace showing the inner function's name twice, + rather than the expected results. Also, an error in a nested + PL/Python DO block could result in a null pointer + dereference crash on some platforms. + + + + + + Allow contrib/auto_explain's + log_min_duration setting to range up + to INT_MAX, or about 24 days instead of 35 minutes + (Tom Lane) + + + + + + + + + + Release 9.5.11 + + + Release date: + 2018-02-08 + + + + This release contains a variety of fixes from 9.5.10. + For information about new features in the 9.5 major release, see + . + + + + Migration to Version 9.5.11 + + + A dump/restore is not required for those running 9.5.X. + + + + However, if you are upgrading from a version earlier than 9.5.10, + see . + + + + + Changes + + + + + + Ensure that all temporary files made + by pg_upgrade are non-world-readable + (Tom Lane, Noah Misch) + + + + pg_upgrade normally restricts its + temporary files to be readable and writable only by the calling user. + But the temporary file containing pg_dumpall -g + output would be group- or world-readable, or even writable, if the + user's umask setting allows. In typical usage on + multi-user machines, the umask and/or the working + directory's permissions would be tight enough to prevent problems; + but there may be people using pg_upgrade + in scenarios where this oversight would permit disclosure of database + passwords to unfriendly eyes. + (CVE-2018-1053) + + + + + + Fix vacuuming of tuples that were updated while key-share locked + (Andres Freund, Álvaro Herrera) + + + + In some cases VACUUM would fail to remove such + tuples even though they are now dead, leading to assorted data + corruption scenarios. + + + + + + Fix inadequate buffer locking in some LSN fetches (Jacob Champion, + Asim Praveen, Ashwin Agrawal) + + + + These errors could result in misbehavior under concurrent load. + The potential consequences have not been characterized fully. + + + + + + Fix incorrect query results from cases involving flattening of + subqueries whose outputs are used in GROUPING SETS + (Heikki Linnakangas) + + + + + + Avoid unnecessary failure in a query on an inheritance tree that + occurs concurrently with some child table being removed from the tree + by ALTER TABLE NO INHERIT (Tom Lane) + + + + + + Fix spurious deadlock failures when multiple sessions are + running CREATE INDEX CONCURRENTLY (Jeff Janes) + + + + + + Fix failures when an inheritance tree contains foreign child tables + (Etsuro Fujita) + + + + A mix of regular and foreign tables in an inheritance tree resulted in + creation of incorrect plans for UPDATE + and DELETE queries. This led to visible failures in + some cases, notably when there are row-level triggers on a foreign + child table. + + + + + + Repair failure with correlated sub-SELECT + inside VALUES inside a LATERAL + subquery (Tom Lane) + + + + + + Fix could not devise a query plan for the given query + planner failure for some cases involving nested UNION + ALL inside a lateral subquery (Tom Lane) + + + + + + Fix logical decoding to correctly clean up disk files for crashed + transactions (Atsushi Torikoshi) + + + + Logical decoding may spill WAL records to disk for transactions + generating many WAL records. Normally these files are cleaned up + after the transaction's commit or abort record arrives; but if + no such record is ever seen, the removal code misbehaved. + + + + + + Fix walsender timeout failure and failure to respond to interrupts + when processing a large transaction (Petr Jelinek) + + + + + + Fix has_sequence_privilege() to + support WITH GRANT OPTION tests, + as other privilege-testing functions do (Joe Conway) + + + + + + In databases using UTF8 encoding, ignore any XML declaration that + asserts a different encoding (Pavel Stehule, Noah Misch) + + + + We always store XML strings in the database encoding, so allowing + libxml to act on a declaration of another encoding gave wrong results. + In encodings other than UTF8, we don't promise to support non-ASCII + XML data anyway, so retain the previous behavior for bug compatibility. + This change affects only xpath() and related + functions; other XML code paths already acted this way. + + + + + + Provide for forward compatibility with future minor protocol versions + (Robert Haas, Badrul Chowdhury) + + + + Up to now, PostgreSQL servers simply + rejected requests to use protocol versions newer than 3.0, so that + there was no functional difference between the major and minor parts + of the protocol version number. Allow clients to request versions 3.x + without failing, sending back a message showing that the server only + understands 3.0. This makes no difference at the moment, but + back-patching this change should allow speedier introduction of future + minor protocol upgrades. + + + + + + Cope with failure to start a parallel worker process + (Amit Kapila, Robert Haas) + + + + Parallel query previously tended to hang indefinitely if a worker + could not be started, as the result of fork() + failure or other low-probability problems. + + + + + + Avoid unsafe alignment assumptions when working + with __int128 (Tom Lane) + + + + Typically, compilers assume that __int128 variables are + aligned on 16-byte boundaries, but our memory allocation + infrastructure isn't prepared to guarantee that, and increasing the + setting of MAXALIGN seems infeasible for multiple reasons. Adjust the + code to allow use of __int128 only when we can tell the + compiler to assume lesser alignment. The only known symptom of this + problem so far is crashes in some parallel aggregation queries. + + + + + + Prevent stack-overflow crashes when planning extremely deeply + nested set operations + (UNION/INTERSECT/EXCEPT) + (Tom Lane) + + + + + + Fix null-pointer crashes for some types of LDAP URLs appearing + in pg_hba.conf (Thomas Munro) + + + + + + Fix sample INSTR() functions in the PL/pgSQL + documentation (Yugo Nagata, Tom Lane) + + + + These functions are stated to + be Oracle compatible, but + they weren't exactly. In particular, there was a discrepancy in the + interpretation of a negative third parameter: Oracle thinks that a + negative value indicates the last place where the target substring can + begin, whereas our functions took it as the last place where the + target can end. Also, Oracle throws an error for a zero or negative + fourth parameter, whereas our functions returned zero. + + + + The sample code has been adjusted to match Oracle's behavior more + precisely. Users who have copied this code into their applications + may wish to update their copies. + + + + + + Fix pg_dump to make ACL (permissions), + comment, and security label entries reliably identifiable in archive + output formats (Tom Lane) + + + + The tag portion of an ACL archive entry was usually + just the name of the associated object. Make it start with the object + type instead, bringing ACLs into line with the convention already used + for comment and security label archive entries. Also, fix the + comment and security label entries for the whole database, if present, + to make their tags start with DATABASE so that they + also follow this convention. This prevents false matches in code that + tries to identify large-object-related entries by seeing if the tag + starts with LARGE OBJECT. That could have resulted + in misclassifying entries as data rather than schema, with undesirable + results in a schema-only or data-only dump. + + + + Note that this change has user-visible results in the output + of pg_restore --list. + + + + + + Rename pg_rewind's + copy_file_range function to avoid conflict + with new Linux system call of that name (Andres Freund) + + + + This change prevents build failures with newer glibc versions. + + + + + + In ecpg, detect indicator arrays that do + not have the correct length and report an error (David Rader) + + + + + + Avoid triggering a libc assertion + in contrib/hstore, due to use + of memcpy() with equal source and destination + pointers (Tomas Vondra) + + + + + + Provide modern examples of how to auto-start Postgres on macOS + (Tom Lane) + + + + The scripts in contrib/start-scripts/osx use + infrastructure that's been deprecated for over a decade, and which no + longer works at all in macOS releases of the last couple of years. + Add a new subdirectory contrib/start-scripts/macos + containing scripts that use the newer launchd + infrastructure. + + + + + + Fix incorrect selection of configuration-specific libraries for + OpenSSL on Windows (Andrew Dunstan) + + + + + + Support linking to MinGW-built versions of libperl (Noah Misch) + + + + This allows building PL/Perl with some common Perl distributions for + Windows. + + + + + + Fix MSVC build to test whether 32-bit libperl + needs -D_USE_32BIT_TIME_T (Noah Misch) + + + + Available Perl distributions are inconsistent about what they expect, + and lack any reliable means of reporting it, so resort to a build-time + test on what the library being used actually does. + + + + + + On Windows, install the crash dump handler earlier in postmaster + startup (Takayuki Tsunakawa) + + + + This may allow collection of a core dump for some early-startup + failures that did not produce a dump before. + + + + + + On Windows, avoid encoding-conversion-related crashes when emitting + messages very early in postmaster startup (Takayuki Tsunakawa) + + + + + + Use our existing Motorola 68K spinlock code on OpenBSD as + well as NetBSD (David Carlier) + + + + + + Add support for spinlocks on Motorola 88K (David Carlier) + + + + + + Update time zone data files to tzdata + release 2018c for DST law changes in Brazil, Sao Tome and Principe, + plus historical corrections for Bolivia, Japan, and South Sudan. + The US/Pacific-New zone has been removed (it was + only an alias for America/Los_Angeles anyway). + + + + + + + + + + Release 9.5.10 + + + Release date: + 2017-11-09 + + + + This release contains a variety of fixes from 9.5.9. + For information about new features in the 9.5 major release, see + . + + + + Migration to Version 9.5.10 + + + A dump/restore is not required for those running 9.5.X. + + + + However, if you use BRIN indexes, see the fourth changelog entry below. + + + + Also, if you are upgrading from a version earlier than 9.5.8, + see . + + + + + Changes + + + + + + Ensure that INSERT ... ON CONFLICT DO UPDATE checks + table permissions and RLS policies in all cases (Dean Rasheed) + + + + The update path of INSERT ... ON CONFLICT DO UPDATE + requires SELECT permission on the columns of the + arbiter index, but it failed to check for that in the case of an + arbiter specified by constraint name. + In addition, for a table with row level security enabled, it failed to + check updated rows against the table's SELECT + policies (regardless of how the arbiter index was specified). + (CVE-2017-15099) + + + + + + Fix crash due to rowtype mismatch + in json{b}_populate_recordset() + (Michael Paquier, Tom Lane) + + + + These functions used the result rowtype specified in the FROM + ... AS clause without checking that it matched the actual + rowtype of the supplied tuple value. If it didn't, that would usually + result in a crash, though disclosure of server memory contents seems + possible as well. + (CVE-2017-15098) + + + + + + Fix sample server-start scripts to become $PGUSER + before opening $PGLOG (Noah Misch) + + + + Previously, the postmaster log file was opened while still running as + root. The database owner could therefore mount an attack against + another system user by making $PGLOG be a symbolic + link to some other file, which would then become corrupted by appending + log messages. + + + + By default, these scripts are not installed anywhere. Users who have + made use of them will need to manually recopy them, or apply the same + changes to their modified versions. If the + existing $PGLOG file is root-owned, it will need to + be removed or renamed out of the way before restarting the server with + the corrected script. + (CVE-2017-12172) + + + + + + Fix BRIN index summarization to handle concurrent table extension + correctly (Álvaro Herrera) + + + + Previously, a race condition allowed some table rows to be omitted from + the index. It may be necessary to reindex existing BRIN indexes to + recover from past occurrences of this problem. + + + + + + Fix possible failures during concurrent updates of a BRIN index + (Tom Lane) + + + + These race conditions could result in errors like invalid index + offnum or inconsistent range map. + + + + + + Fix crash when logical decoding is invoked from a SPI-using function, + in particular any function written in a PL language + (Tom Lane) + + + + + + Fix json_build_array(), + json_build_object(), and their jsonb + equivalents to handle explicit VARIADIC arguments + correctly (Michael Paquier) + + + + + + Properly reject attempts to convert infinite float values to + type numeric (Tom Lane, KaiGai Kohei) + + + + Previously the behavior was platform-dependent. + + + + + + Fix corner-case crashes when columns have been added to the end of a + view (Tom Lane) + + + + + + Record proper dependencies when a view or rule + contains FieldSelect + or FieldStore expression nodes (Tom Lane) + + + + Lack of these dependencies could allow a column or data + type DROP to go through when it ought to fail, + thereby causing later uses of the view or rule to get errors. + This patch does not do anything to protect existing views/rules, + only ones created in the future. + + + + + + Correctly detect hashability of range data types (Tom Lane) + + + + The planner mistakenly assumed that any range type could be hashed + for use in hash joins or hash aggregation, but actually it must check + whether the range's subtype has hash support. This does not affect any + of the built-in range types, since they're all hashable anyway. + + + + + + Correctly ignore RelabelType expression nodes + when determining relation distinctness (David Rowley) + + + + This allows the intended optimization to occur when a subquery has + a result column of type varchar. + + + + + + Fix low-probability loss of NOTIFY messages due to + XID wraparound (Marko Tiikkaja, Tom Lane) + + + + If a session executed no queries, but merely listened for + notifications, for more than 2 billion transactions, it started to miss + some notifications from concurrently-committing transactions. + + + + + + Avoid SIGBUS crash on Linux when a DSM memory + request exceeds the space available in tmpfs + (Thomas Munro) + + + + + + Prevent low-probability crash in processing of nested trigger firings + (Tom Lane) + + + + + + Allow COPY's FREEZE option to + work when the transaction isolation level is REPEATABLE + READ or higher (Noah Misch) + + + + This case was unintentionally broken by a previous bug fix. + + + + + + Correctly restore the umask setting when file creation fails + in COPY or lo_export() + (Peter Eisentraut) + + + + + + Give a better error message for duplicate column names + in ANALYZE (Nathan Bossart) + + + + + + Fix mis-parsing of the last line in a + non-newline-terminated pg_hba.conf file + (Tom Lane) + + + + + + Fix pg_basebackup's matching of tablespace + paths to canonicalize both paths before comparing (Michael Paquier) + + + + This is particularly helpful on Windows. + + + + + + Fix libpq to not require user's home + directory to exist (Tom Lane) + + + + In v10, failure to find the home directory while trying to + read ~/.pgpass was treated as a hard error, + but it should just cause that file to not be found. Both v10 and + previous release branches made the same mistake when + reading ~/.pg_service.conf, though this was less + obvious since that file is not sought unless a service name is + specified. + + + + + + Fix libpq to guard against integer + overflow in the row count of a PGresult + (Michael Paquier) + + + + + + Fix ecpg's handling of out-of-scope cursor + declarations with pointer or array variables (Michael Meskes) + + + + + + In ecpglib, correctly handle backslashes in string literals depending + on whether standard_conforming_strings is set + (Tsunakawa Takayuki) + + + + + + Make ecpglib's Informix-compatibility mode ignore fractional digits in + integer input strings, as expected (Gao Zengqi, Michael Meskes) + + + + + + Fix missing temp-install prerequisites + for check-like Make targets (Noah Misch) + + + + Some non-default test procedures that are meant to work + like make check failed to ensure that the temporary + installation was up to date. + + + + + + Sync our copy of the timezone library with IANA release tzcode2017c + (Tom Lane) + + + + This fixes various issues; the only one likely to be user-visible + is that the default DST rules for a POSIX-style zone name, if + no posixrules file exists in the timezone data + directory, now match current US law rather than what it was a dozen + years ago. + + + + + + Update time zone data files to tzdata + release 2017c for DST law changes in Fiji, Namibia, Northern Cyprus, + Sudan, Tonga, and Turks & Caicos Islands, plus historical + corrections for Alaska, Apia, Burma, Calcutta, Detroit, Ireland, + Namibia, and Pago Pago. + + + + + + + + + + Release 9.5.9 + + + Release date: + 2017-08-31 + + + + This release contains a small number of fixes from 9.5.8. + For information about new features in the 9.5 major release, see + . + + + + Migration to Version 9.5.9 + + + A dump/restore is not required for those running 9.5.X. + + + + However, if you are upgrading from a version earlier than 9.5.8, + see . + + + + + Changes + + + + + + Show foreign tables + in information_schema.table_privileges + view (Peter Eisentraut) + + + + All other relevant information_schema views include + foreign tables, but this one ignored them. + + + + Since this view definition is installed by initdb, + merely upgrading will not fix the problem. If you need to fix this + in an existing installation, you can, as a superuser, do this + in psql: + +SET search_path TO information_schema; +CREATE OR REPLACE VIEW table_privileges AS + SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, + CAST(grantee.rolname AS sql_identifier) AS grantee, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(c.prtype AS character_data) AS privilege_type, + CAST( + CASE WHEN + -- object owner always has grant options + pg_has_role(grantee.oid, c.relowner, 'USAGE') + OR c.grantable + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable, + CAST(CASE WHEN c.prtype = 'SELECT' THEN 'YES' ELSE 'NO' END AS yes_or_no) AS with_hierarchy + + FROM ( + SELECT oid, relname, relnamespace, relkind, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* FROM pg_class + ) AS c (oid, relname, relnamespace, relkind, relowner, grantor, grantee, prtype, grantable), + pg_namespace nc, + pg_authid u_grantor, + ( + SELECT oid, rolname FROM pg_authid + UNION ALL + SELECT 0::oid, 'PUBLIC' + ) AS grantee (oid, rolname) + + WHERE c.relnamespace = nc.oid + AND c.relkind IN ('r', 'v', 'f') + AND c.grantee = grantee.oid + AND c.grantor = u_grantor.oid + AND c.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER') + AND (pg_has_role(u_grantor.oid, 'USAGE') + OR pg_has_role(grantee.oid, 'USAGE') + OR grantee.rolname = 'PUBLIC'); + + This must be repeated in each database to be fixed, + including template0. + + + + + + Clean up handling of a fatal exit (e.g., due to receipt + of SIGTERM) that occurs while trying to execute + a ROLLBACK of a failed transaction (Tom Lane) + + + + This situation could result in an assertion failure. In production + builds, the exit would still occur, but it would log an unexpected + message about cannot drop active portal. + + + + + + Remove assertion that could trigger during a fatal exit (Tom Lane) + + + + + + Correctly identify columns that are of a range type or domain type over + a composite type or domain type being searched for (Tom Lane) + + + + Certain ALTER commands that change the definition of a + composite type or domain type are supposed to fail if there are any + stored values of that type in the database, because they lack the + infrastructure needed to update or check such values. Previously, + these checks could miss relevant values that are wrapped inside range + types or sub-domains, possibly allowing the database to become + inconsistent. + + + + + + Fix crash in pg_restore when using parallel mode and + using a list file to select a subset of items to restore + (Fabrízio de Royes Mello) + + + + + + Change ecpg's parser to allow RETURNING + clauses without attached C variables (Michael Meskes) + + + + This allows ecpg programs to contain SQL constructs + that use RETURNING internally (for example, inside a CTE) + rather than using it to define values to be returned to the client. + + + + + + Improve selection of compiler flags for PL/Perl on Windows (Tom Lane) + + + + This fix avoids possible crashes of PL/Perl due to inconsistent + assumptions about the width of time_t values. + A side-effect that may be visible to extension developers is + that _USE_32BIT_TIME_T is no longer defined globally + in PostgreSQL Windows builds. This is not expected + to cause problems, because type time_t is not used + in any PostgreSQL API definitions. + + + + + + Fix make check to behave correctly when invoked via a + non-GNU make program (Thomas Munro) + + + + + + + + Release 9.5.8 @@ -12,7 +2685,7 @@ This release contains a variety of fixes from 9.5.7. For information about new features in the 9.5 major release, see - . + . @@ -29,7 +2702,7 @@ Also, if you are upgrading from a version earlier than 9.5.7, - see . + see . @@ -41,7 +2714,7 @@ Further restrict visibility - of pg_user_mappings.umoptions, to + of pg_user_mappings.umoptions, to protect passwords stored as user mapping options (Noah Misch) @@ -49,11 +2722,11 @@ The fix for CVE-2017-7486 was incorrect: it allowed a user to see the options in her own user mapping, even if she did not - have USAGE permission on the associated foreign server. + have USAGE permission on the associated foreign server. Such options might include a password that had been provided by the server owner rather than the user herself. - Since information_schema.user_mapping_options does not - show the options in such cases, pg_user_mappings + Since information_schema.user_mapping_options does not + show the options in such cases, pg_user_mappings should not either. (CVE-2017-7547) @@ -68,15 +2741,15 @@ Restart the postmaster after adding allow_system_table_mods - = true to postgresql.conf. (In versions - supporting ALTER SYSTEM, you can use that to make the + = true to postgresql.conf. (In versions + supporting ALTER SYSTEM, you can use that to make the configuration change, but you'll still need a restart.) - In each database of the cluster, + In each database of the cluster, run the following commands as superuser: SET search_path = pg_catalog; @@ -107,15 +2780,15 @@ CREATE OR REPLACE VIEW pg_user_mappings AS - Do not forget to include the template0 - and template1 databases, or the vulnerability will still - exist in databases you create later. To fix template0, + Do not forget to include the template0 + and template1 databases, or the vulnerability will still + exist in databases you create later. To fix template0, you'll need to temporarily make it accept connections. - In PostgreSQL 9.5 and later, you can use + In PostgreSQL 9.5 and later, you can use ALTER DATABASE template0 WITH ALLOW_CONNECTIONS true; - and then after fixing template0, undo that with + and then after fixing template0, undo that with ALTER DATABASE template0 WITH ALLOW_CONNECTIONS false; @@ -129,7 +2802,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Finally, remove the allow_system_table_mods configuration + Finally, remove the allow_system_table_mods configuration setting, and again restart the postmaster. @@ -143,16 +2816,16 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - libpq ignores empty password specifications, and does + libpq ignores empty password specifications, and does not transmit them to the server. So, if a user's password has been set to the empty string, it's impossible to log in with that password - via psql or other libpq-based + via psql or other libpq-based clients. An administrator might therefore believe that setting the password to empty is equivalent to disabling password login. - However, with a modified or non-libpq-based client, + However, with a modified or non-libpq-based client, logging in could be possible, depending on which authentication method is configured. In particular the most common - method, md5, accepted empty passwords. + method, md5, accepted empty passwords. Change the server to reject empty passwords in all cases. (CVE-2017-7546) @@ -160,13 +2833,13 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Make lo_put() check for UPDATE privilege on + Make lo_put() check for UPDATE privilege on the target large object (Tom Lane, Michael Paquier) - lo_put() should surely require the same permissions - as lowrite(), but the check was missing, allowing any + lo_put() should surely require the same permissions + as lowrite(), but the check was missing, allowing any user to change the data in a large object. (CVE-2017-7548) @@ -175,12 +2848,12 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Correct the documentation about the process for upgrading standby - servers with pg_upgrade (Bruce Momjian) + servers with pg_upgrade (Bruce Momjian) The previous documentation instructed users to start/stop the primary - server after running pg_upgrade but before syncing + server after running pg_upgrade but before syncing the standby servers. This sequence is unsafe. @@ -254,7 +2927,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix code for setting on + Fix code for setting on Solaris (Tom Lane) @@ -286,21 +2959,21 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Fix possible creation of an invalid WAL segment when a standby is - promoted just after it processes an XLOG_SWITCH WAL + promoted just after it processes an XLOG_SWITCH WAL record (Andres Freund) - Fix walsender to exit promptly when client requests + Fix walsender to exit promptly when client requests shutdown (Tom Lane) - Fix SIGHUP and SIGUSR1 handling in + Fix SIGHUP and SIGUSR1 handling in walsender processes (Petr Jelinek, Andres Freund) @@ -314,7 +2987,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix unnecessarily slow restarts of walreceiver + Fix unnecessarily slow restarts of walreceiver processes due to race condition in postmaster (Tom Lane) @@ -362,7 +3035,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix cases where an INSERT or UPDATE assigns + Fix cases where an INSERT or UPDATE assigns to more than one element of a column that is of domain-over-array type (Tom Lane) @@ -370,7 +3043,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Allow window functions to be used in sub-SELECTs that + Allow window functions to be used in sub-SELECTs that are within the arguments of an aggregate function (Tom Lane) @@ -378,19 +3051,19 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Move autogenerated array types out of the way during - ALTER ... RENAME (Vik Fearing) + ALTER ... RENAME (Vik Fearing) Previously, we would rename a conflicting autogenerated array type - out of the way during CREATE; this fix extends that + out of the way during CREATE; this fix extends that behavior to renaming operations. - Fix dangling pointer in ALTER TABLE when there is a + Fix dangling pointer in ALTER TABLE when there is a comment on a constraint belonging to the table (David Rowley) @@ -402,44 +3075,44 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Ensure that ALTER USER ... SET accepts all the syntax - variants that ALTER ROLE ... SET does (Peter Eisentraut) + Ensure that ALTER USER ... SET accepts all the syntax + variants that ALTER ROLE ... SET does (Peter Eisentraut) Properly update dependency info when changing a datatype I/O - function's argument or return type from opaque to the + function's argument or return type from opaque to the correct type (Heikki Linnakangas) - CREATE TYPE updates I/O functions declared in this + CREATE TYPE updates I/O functions declared in this long-obsolete style, but it forgot to record a dependency on the - type, allowing a subsequent DROP TYPE to leave broken + type, allowing a subsequent DROP TYPE to leave broken function definitions behind. - Reduce memory usage when ANALYZE processes - a tsvector column (Heikki Linnakangas) + Reduce memory usage when ANALYZE processes + a tsvector column (Heikki Linnakangas) Fix unnecessary precision loss and sloppy rounding when multiplying - or dividing money values by integers or floats (Tom Lane) + or dividing money values by integers or floats (Tom Lane) Tighten checks for whitespace in functions that parse identifiers, - such as regprocedurein() (Tom Lane) + such as regprocedurein() (Tom Lane) @@ -450,20 +3123,20 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Use relevant #define symbols from Perl while - compiling PL/Perl (Ashutosh Sharma, Tom Lane) + Use relevant #define symbols from Perl while + compiling PL/Perl (Ashutosh Sharma, Tom Lane) This avoids portability problems, typically manifesting as - a handshake mismatch during library load, when working with + a handshake mismatch during library load, when working with recent Perl versions. - In libpq, reset GSS/SASL and SSPI authentication + In libpq, reset GSS/SASL and SSPI authentication state properly after a failed connection attempt (Michael Paquier) @@ -476,9 +3149,9 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - In psql, fix failure when COPY FROM STDIN + In psql, fix failure when COPY FROM STDIN is ended with a keyboard EOF signal and then another COPY - FROM STDIN is attempted (Thomas Munro) + FROM STDIN is attempted (Thomas Munro) @@ -489,8 +3162,8 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_dump and pg_restore to - emit REFRESH MATERIALIZED VIEW commands last (Tom Lane) + Fix pg_dump and pg_restore to + emit REFRESH MATERIALIZED VIEW commands last (Tom Lane) @@ -501,15 +3174,15 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Improve pg_dump/pg_restore's - reporting of error conditions originating in zlib + Improve pg_dump/pg_restore's + reporting of error conditions originating in zlib (Vladimir Kunschikov, Álvaro Herrera) - Fix pg_dump with the option to drop event triggers as expected (Tom Lane) @@ -522,14 +3195,14 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_dump to not emit invalid SQL for an empty + Fix pg_dump to not emit invalid SQL for an empty operator class (Daniel Gustafsson) - Fix pg_dump output to stdout on Windows (Kuntal Ghosh) + Fix pg_dump output to stdout on Windows (Kuntal Ghosh) @@ -540,14 +3213,14 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_get_ruledef() to print correct output for - the ON SELECT rule of a view whose columns have been + Fix pg_get_ruledef() to print correct output for + the ON SELECT rule of a view whose columns have been renamed (Tom Lane) - In some corner cases, pg_dump relies - on pg_get_ruledef() to dump views, so that this error + In some corner cases, pg_dump relies + on pg_get_ruledef() to dump views, so that this error could result in dump/reload failures. @@ -555,13 +3228,13 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; Fix dumping of outer joins with empty constraints, such as the result - of a NATURAL LEFT JOIN with no common columns (Tom Lane) + of a NATURAL LEFT JOIN with no common columns (Tom Lane) - Fix dumping of function expressions in the FROM clause in + Fix dumping of function expressions in the FROM clause in cases where the expression does not deparse into something that looks like a function call (Tom Lane) @@ -569,7 +3242,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_basebackup output to stdout on Windows + Fix pg_basebackup output to stdout on Windows (Haribabu Kommi) @@ -581,20 +3254,20 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_rewind to correctly handle files exceeding 2GB + Fix pg_rewind to correctly handle files exceeding 2GB (Kuntal Ghosh, Michael Paquier) - Ordinarily such files won't appear in PostgreSQL data + Ordinarily such files won't appear in PostgreSQL data directories, but they could be present in some cases. - Fix pg_upgrade to ensure that the ending WAL record - does not have = minimum + Fix pg_upgrade to ensure that the ending WAL record + does not have = minimum (Bruce Momjian) @@ -606,16 +3279,16 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Fix pg_xlogdump's computation of WAL record length + Fix pg_xlogdump's computation of WAL record length (Andres Freund) - In postgres_fdw, re-establish connections to remote - servers after ALTER SERVER or ALTER USER - MAPPING commands (Kyotaro Horiguchi) + In postgres_fdw, re-establish connections to remote + servers after ALTER SERVER or ALTER USER + MAPPING commands (Kyotaro Horiguchi) @@ -626,7 +3299,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - In postgres_fdw, allow cancellation of remote + In postgres_fdw, allow cancellation of remote transaction control commands (Robert Haas, Rafia Sabih) @@ -638,14 +3311,14 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Increase MAX_SYSCACHE_CALLBACKS to provide more room for + Increase MAX_SYSCACHE_CALLBACKS to provide more room for extensions (Tom Lane) - Always use , not , when building shared libraries with gcc (Tom Lane) @@ -672,34 +3345,34 @@ Branch: REL9_2_STABLE [1188b9b2c] 2017-08-02 15:07:21 -0400 - In MSVC builds, handle the case where the openssl - library is not within a VC subdirectory (Andrew Dunstan) + In MSVC builds, handle the case where the OpenSSL + library is not within a VC subdirectory (Andrew Dunstan) - In MSVC builds, add proper include path for libxml2 + In MSVC builds, add proper include path for libxml2 header files (Andrew Dunstan) This fixes a former need to move things around in standard Windows - installations of libxml2. + installations of libxml2. In MSVC builds, recognize a Tcl library that is - named tcl86.lib (Noah Misch) + named tcl86.lib (Noah Misch) - In MSVC builds, honor PROVE_FLAGS settings - on vcregress.pl's command line (Andrew Dunstan) + In MSVC builds, honor PROVE_FLAGS settings + on vcregress.pl's command line (Andrew Dunstan) @@ -719,7 +3392,7 @@ Branch: REL9_2_STABLE [1188b9b2c] 2017-08-02 15:07:21 -0400 This release contains a variety of fixes from 9.5.6. For information about new features in the 9.5 major release, see - . + . @@ -736,12 +3409,12 @@ Branch: REL9_2_STABLE [1188b9b2c] 2017-08-02 15:07:21 -0400 Also, if you are using third-party replication tools that depend - on logical decoding, see the fourth changelog entry below. + on logical decoding, see the fourth changelog entry below. Also, if you are upgrading from a version earlier than 9.5.6, - see . + see . @@ -753,18 +3426,18 @@ Branch: REL9_2_STABLE [1188b9b2c] 2017-08-02 15:07:21 -0400 Restrict visibility - of pg_user_mappings.umoptions, to + of pg_user_mappings.umoptions, to protect passwords stored as user mapping options (Michael Paquier, Feike Steenbergen) The previous coding allowed the owner of a foreign server object, - or anyone he has granted server USAGE permission to, + or anyone he has granted server USAGE permission to, to see the options for all user mappings associated with that server. This might well include passwords for other users. Adjust the view definition to match the behavior of - information_schema.user_mapping_options, namely that + information_schema.user_mapping_options, namely that these options are visible to the user being mapped, or if the mapping is for PUBLIC and the current user is the server owner, or if the current user is a superuser. @@ -775,7 +3448,7 @@ Branch: REL9_2_STABLE [1188b9b2c] 2017-08-02 15:07:21 -0400 By itself, this patch will only fix the behavior in newly initdb'd databases. If you wish to apply this change in an existing database, follow the corrected procedure shown in the changelog entry for - CVE-2017-7547, in . + CVE-2017-7547, in . @@ -788,7 +3461,7 @@ Branch: REL9_2_STABLE [1188b9b2c] 2017-08-02 15:07:21 -0400 Some selectivity estimation functions in the planner will apply user-defined operators to values obtained - from pg_statistic, such as most common values and + from pg_statistic, such as most common values and histogram entries. This occurs before table permissions are checked, so a nefarious user could exploit the behavior to obtain these values for table columns he does not have permission to read. To fix, @@ -802,17 +3475,17 @@ Branch: REL9_2_STABLE [1188b9b2c] 2017-08-02 15:07:21 -0400 - Restore libpq's recognition of - the PGREQUIRESSL environment variable (Daniel Gustafsson) + Restore libpq's recognition of + the PGREQUIRESSL environment variable (Daniel Gustafsson) Processing of this environment variable was unintentionally dropped - in PostgreSQL 9.3, but its documentation remained. + in PostgreSQL 9.3, but its documentation remained. This creates a security hazard, since users might be relying on the environment variable to force SSL-encrypted connections, but that would no longer be guaranteed. Restore handling of the variable, - but give it lower priority than PGSSLMODE, to avoid + but give it lower priority than PGSSLMODE, to avoid breaking configurations that work correctly with post-9.3 code. (CVE-2017-7485) @@ -843,7 +3516,7 @@ Branch: REL9_2_STABLE [1188b9b2c] 2017-08-02 15:07:21 -0400 - Fix possible corruption of init forks of unlogged indexes + Fix possible corruption of init forks of unlogged indexes (Robert Haas, Michael Paquier) @@ -856,7 +3529,7 @@ Branch: REL9_2_STABLE [1188b9b2c] 2017-08-02 15:07:21 -0400 - Fix incorrect reconstruction of pg_subtrans entries + Fix incorrect reconstruction of pg_subtrans entries when a standby server replays a prepared but uncommitted two-phase transaction (Tom Lane) @@ -864,14 +3537,14 @@ Branch: REL9_2_STABLE [1188b9b2c] 2017-08-02 15:07:21 -0400 In most cases this turned out to have no visible ill effects, but in corner cases it could result in circular references - in pg_subtrans, potentially causing infinite loops + in pg_subtrans, potentially causing infinite loops in queries that examine rows modified by the two-phase transaction. - Avoid possible crash in walsender due to failure + Avoid possible crash in walsender due to failure to initialize a string buffer (Stas Kelvich, Fujii Masao) @@ -885,7 +3558,7 @@ Branch: REL9_2_STABLE [1188b9b2c] 2017-08-02 15:07:21 -0400 - Fix postmaster's handling of fork() failure for a + Fix postmaster's handling of fork() failure for a background worker process (Tom Lane) @@ -902,14 +3575,14 @@ Author: Andrew Gierth Branch: REL9_5_STABLE [7be3678a8] 2017-04-24 07:53:05 +0100 --> - Fix crash or wrong answers when a GROUPING SETS column's + Fix crash or wrong answers when a GROUPING SETS column's data type is hashable but not sortable (Pavan Deolasee) - Avoid applying physical targetlist optimization to custom + Avoid applying physical targetlist optimization to custom scans (Dmitry Ivanov, Tom Lane) @@ -922,13 +3595,13 @@ Branch: REL9_5_STABLE [7be3678a8] 2017-04-24 07:53:05 +0100 - Use the correct sub-expression when applying a FOR ALL + Use the correct sub-expression when applying a FOR ALL row-level-security policy (Stephen Frost) - In some cases the WITH CHECK restriction would be applied - when the USING restriction is more appropriate. + In some cases the WITH CHECK restriction would be applied + when the USING restriction is more appropriate. @@ -942,19 +3615,19 @@ Branch: REL9_5_STABLE [7be3678a8] 2017-04-24 07:53:05 +0100 Due to lack of a cache flush step between commands in an extension script file, non-utility queries might not see the effects of an immediately preceding catalog change, such as ALTER TABLE - ... RENAME. + ... RENAME. Skip tablespace privilege checks when ALTER TABLE ... ALTER - COLUMN TYPE rebuilds an existing index (Noah Misch) + COLUMN TYPE rebuilds an existing index (Noah Misch) The command failed if the calling user did not currently have - CREATE privilege for the tablespace containing the index. + CREATE privilege for the tablespace containing the index. That behavior seems unhelpful, so skip the check, allowing the index to be rebuilt where it is. @@ -962,20 +3635,20 @@ Branch: REL9_5_STABLE [7be3678a8] 2017-04-24 07:53:05 +0100 - Fix ALTER TABLE ... VALIDATE CONSTRAINT to not recurse - to child tables when the constraint is marked NO INHERIT + Fix ALTER TABLE ... VALIDATE CONSTRAINT to not recurse + to child tables when the constraint is marked NO INHERIT (Amit Langote) - This fix prevents unwanted constraint does not exist failures + This fix prevents unwanted constraint does not exist failures when no matching constraint is present in the child tables. - Avoid dangling pointer in COPY ... TO when row-level + Avoid dangling pointer in COPY ... TO when row-level security is active for the source table (Tom Lane) @@ -987,8 +3660,8 @@ Branch: REL9_5_STABLE [7be3678a8] 2017-04-24 07:53:05 +0100 - Avoid accessing an already-closed relcache entry in CLUSTER - and VACUUM FULL (Tom Lane) + Avoid accessing an already-closed relcache entry in CLUSTER + and VACUUM FULL (Tom Lane) @@ -999,14 +3672,14 @@ Branch: REL9_5_STABLE [7be3678a8] 2017-04-24 07:53:05 +0100 - Fix VACUUM to account properly for pages that could not + Fix VACUUM to account properly for pages that could not be scanned due to conflicting page pins (Andrew Gierth) This tended to lead to underestimation of the number of tuples in the table. In the worst case of a small heavily-contended - table, VACUUM could incorrectly report that the table + table, VACUUM could incorrectly report that the table contained no tuples, leading to very bad planning choices. @@ -1020,12 +3693,12 @@ Branch: REL9_5_STABLE [7be3678a8] 2017-04-24 07:53:05 +0100 - Fix integer-overflow problems in interval comparison (Kyotaro + Fix integer-overflow problems in interval comparison (Kyotaro Horiguchi, Tom Lane) - The comparison operators for type interval could yield wrong + The comparison operators for type interval could yield wrong answers for intervals larger than about 296000 years. Indexes on columns containing such large values should be reindexed, since they may be corrupt. @@ -1034,21 +3707,21 @@ Branch: REL9_5_STABLE [7be3678a8] 2017-04-24 07:53:05 +0100 - Fix cursor_to_xml() to produce valid output - with tableforest = false + Fix cursor_to_xml() to produce valid output + with tableforest = false (Thomas Munro, Peter Eisentraut) - Previously it failed to produce a wrapping <table> + Previously it failed to produce a wrapping <table> element. - Fix roundoff problems in float8_timestamptz() - and make_interval() (Tom Lane) + Fix roundoff problems in float8_timestamptz() + and make_interval() (Tom Lane) @@ -1060,14 +3733,14 @@ Branch: REL9_5_STABLE [7be3678a8] 2017-04-24 07:53:05 +0100 - Fix pg_get_object_address() to handle members of operator + Fix pg_get_object_address() to handle members of operator families correctly (Álvaro Herrera) - Improve performance of pg_timezone_names view + Improve performance of pg_timezone_names view (Tom Lane, David Rowley) @@ -1081,13 +3754,13 @@ Branch: REL9_5_STABLE [7be3678a8] 2017-04-24 07:53:05 +0100 - Fix sloppy handling of corner-case errors from lseek() - and close() (Tom Lane) + Fix sloppy handling of corner-case errors from lseek() + and close() (Tom Lane) Neither of these system calls are likely to fail in typical situations, - but if they did, fd.c could get quite confused. + but if they did, fd.c could get quite confused. @@ -1105,21 +3778,21 @@ Branch: REL9_5_STABLE [7be3678a8] 2017-04-24 07:53:05 +0100 - Fix ecpg to support COMMIT PREPARED - and ROLLBACK PREPARED (Masahiko Sawada) + Fix ecpg to support COMMIT PREPARED + and ROLLBACK PREPARED (Masahiko Sawada) Fix a double-free error when processing dollar-quoted string literals - in ecpg (Michael Meskes) + in ecpg (Michael Meskes) - In pg_dump, fix incorrect schema and owner marking for + In pg_dump, fix incorrect schema and owner marking for comments and security labels of some types of database objects (Giuseppe Broccolo, Tom Lane) @@ -1134,20 +3807,20 @@ Branch: REL9_5_STABLE [7be3678a8] 2017-04-24 07:53:05 +0100 - Avoid emitting an invalid list file in pg_restore -l + Avoid emitting an invalid list file in pg_restore -l when SQL object names contain newlines (Tom Lane) Replace newlines by spaces, which is sufficient to make the output - valid for pg_restore -L's purposes. + valid for pg_restore -L's purposes. - Fix pg_upgrade to transfer comments and security labels - attached to large objects (blobs) (Stephen Frost) + Fix pg_upgrade to transfer comments and security labels + attached to large objects (blobs) (Stephen Frost) @@ -1159,26 +3832,26 @@ Branch: REL9_5_STABLE [7be3678a8] 2017-04-24 07:53:05 +0100 Improve error handling - in contrib/adminpack's pg_file_write() + in contrib/adminpack's pg_file_write() function (Noah Misch) Notably, it failed to detect errors reported - by fclose(). + by fclose(). - In contrib/dblink, avoid leaking the previous unnamed + In contrib/dblink, avoid leaking the previous unnamed connection when establishing a new unnamed connection (Joe Conway) - Fix contrib/pg_trgm's extraction of trigrams from regular + Fix contrib/pg_trgm's extraction of trigrams from regular expressions (Tom Lane) @@ -1197,7 +3870,7 @@ Branch: REL9_4_STABLE [f14bf0a8f] 2017-05-06 22:19:56 -0400 Branch: REL9_3_STABLE [3aa16b117] 2017-05-06 22:17:35 -0400 --> - In contrib/postgres_fdw, + In contrib/postgres_fdw, transmit query cancellation requests to the remote server (Michael Paquier, Etsuro Fujita) @@ -1228,7 +3901,7 @@ Branch: REL9_3_STABLE [3aa16b117] 2017-05-06 22:17:35 -0400 - Update time zone data files to tzdata release 2017b + Update time zone data files to tzdata release 2017b for DST law changes in Chile, Haiti, and Mongolia, plus historical corrections for Ecuador, Kazakhstan, Liberia, and Spain. Switch to numeric abbreviations for numerous time zones in South @@ -1242,9 +3915,9 @@ Branch: REL9_3_STABLE [3aa16b117] 2017-05-06 22:17:35 -0400 or no currency among the local population. They are in process of reversing that policy in favor of using numeric UTC offsets in zones where there is no evidence of real-world use of an English - abbreviation. At least for the time being, PostgreSQL + abbreviation. At least for the time being, PostgreSQL will continue to accept such removed abbreviations for timestamp input. - But they will not be shown in the pg_timezone_names + But they will not be shown in the pg_timezone_names view nor used for output. @@ -1257,16 +3930,16 @@ Branch: REL9_3_STABLE [3aa16b117] 2017-05-06 22:17:35 -0400 The Microsoft MSVC build scripts neglected to install - the posixrules file in the timezone directory tree. + the posixrules file in the timezone directory tree. This resulted in the timezone code falling back to its built-in rule about what DST behavior to assume for a POSIX-style time zone name. For historical reasons that still corresponds to the DST rules the USA was using before 2007 (i.e., change on first Sunday in April and last Sunday in October). With this fix, a POSIX-style zone name will use the current and historical DST transition dates of - the US/Eastern zone. If you don't want that, remove - the posixrules file, or replace it with a copy of some - other zone file (see ). Note that + the US/Eastern zone. If you don't want that, remove + the posixrules file, or replace it with a copy of some + other zone file (see ). Note that due to caching, you may need to restart the server to get such changes to take effect. @@ -1288,7 +3961,7 @@ Branch: REL9_3_STABLE [3aa16b117] 2017-05-06 22:17:35 -0400 This release contains a variety of fixes from 9.5.5. For information about new features in the 9.5 major release, see - . + . @@ -1306,7 +3979,7 @@ Branch: REL9_3_STABLE [3aa16b117] 2017-05-06 22:17:35 -0400 Also, if you are upgrading from a version earlier than 9.5.5, - see . + see . @@ -1318,15 +3991,15 @@ Branch: REL9_3_STABLE [3aa16b117] 2017-05-06 22:17:35 -0400 Fix a race condition that could cause indexes built - with CREATE INDEX CONCURRENTLY to be corrupt + with CREATE INDEX CONCURRENTLY to be corrupt (Pavan Deolasee, Tom Lane) - If CREATE INDEX CONCURRENTLY was used to build an index + If CREATE INDEX CONCURRENTLY was used to build an index that depends on a column not previously indexed, then rows updated by transactions that ran concurrently with - the CREATE INDEX command could have received incorrect + the CREATE INDEX command could have received incorrect index entries. If you suspect this may have happened, the most reliable solution is to rebuild affected indexes after installing this update. @@ -1343,7 +4016,7 @@ Branch: REL9_3_STABLE [3aa16b117] 2017-05-06 22:17:35 -0400 Backends failed to account for this snapshot when advertising their oldest xmin, potentially allowing concurrent vacuuming operations to remove data that was still needed. This led to transient failures - along the lines of cache lookup failed for relation 1255. + along the lines of cache lookup failed for relation 1255. @@ -1353,7 +4026,7 @@ Branch: REL9_3_STABLE [3aa16b117] 2017-05-06 22:17:35 -0400 - The WAL record emitted for a BRIN revmap page when moving an + The WAL record emitted for a BRIN revmap page when moving an index tuple to a different page was incorrect. Replay would make the related portion of the index useless, forcing it to be recomputed. @@ -1361,13 +4034,13 @@ Branch: REL9_3_STABLE [3aa16b117] 2017-05-06 22:17:35 -0400 - Unconditionally WAL-log creation of the init fork for an + Unconditionally WAL-log creation of the init fork for an unlogged table (Michael Paquier) - Previously, this was skipped when - = minimal, but actually it's necessary even in that case + Previously, this was skipped when + = minimal, but actually it's necessary even in that case to ensure that the unlogged table is properly reset to empty after a crash. @@ -1438,13 +4111,13 @@ Branch: REL9_4_STABLE [30e3cb307] 2016-11-17 13:31:30 -0300 - Make sure ALTER TABLE preserves index tablespace + Make sure ALTER TABLE preserves index tablespace assignments when rebuilding indexes (Tom Lane, Michael Paquier) Previously, non-default settings - of could result in broken + of could result in broken indexes. @@ -1453,7 +4126,7 @@ Branch: REL9_4_STABLE [30e3cb307] 2016-11-17 13:31:30 -0300 Fix incorrect updating of trigger function properties when changing a foreign-key constraint's deferrability properties with ALTER - TABLE ... ALTER CONSTRAINT (Tom Lane) + TABLE ... ALTER CONSTRAINT (Tom Lane) @@ -1469,29 +4142,29 @@ Branch: REL9_4_STABLE [30e3cb307] 2016-11-17 13:31:30 -0300 - This avoids could not find trigger NNN - or relation NNN has no triggers errors. + This avoids could not find trigger NNN + or relation NNN has no triggers errors. - Fix ALTER TABLE ... SET DATA TYPE ... USING when child + Fix ALTER TABLE ... SET DATA TYPE ... USING when child table has different column ordering than the parent (Álvaro Herrera) - Failure to adjust the column numbering in the USING + Failure to adjust the column numbering in the USING expression led to errors, - typically attribute N has wrong type. + typically attribute N has wrong type. Fix processing of OID column when a table with OIDs is associated to - a parent with OIDs via ALTER TABLE ... INHERIT (Amit + a parent with OIDs via ALTER TABLE ... INHERIT (Amit Langote) @@ -1504,7 +4177,7 @@ Branch: REL9_4_STABLE [30e3cb307] 2016-11-17 13:31:30 -0300 - Fix CREATE OR REPLACE VIEW to update the view query + Fix CREATE OR REPLACE VIEW to update the view query before attempting to apply the new view options (Dean Rasheed) @@ -1517,7 +4190,7 @@ Branch: REL9_4_STABLE [30e3cb307] 2016-11-17 13:31:30 -0300 Report correct object identity during ALTER TEXT SEARCH - CONFIGURATION (Artur Zakirov) + CONFIGURATION (Artur Zakirov) @@ -1529,8 +4202,8 @@ Branch: REL9_4_STABLE [30e3cb307] 2016-11-17 13:31:30 -0300 Fix commit timestamp mechanism to not fail when queried about - the special XIDs FrozenTransactionId - and BootstrapTransactionId (Craig Ringer) + the special XIDs FrozenTransactionId + and BootstrapTransactionId (Craig Ringer) @@ -1568,28 +4241,28 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 The symptom was spurious ON CONFLICT is not supported on table - ... used as a catalog table errors when the target - of INSERT ... ON CONFLICT is a view with cascade option. + ... used as a catalog table errors when the target + of INSERT ... ON CONFLICT is a view with cascade option. - Fix incorrect target lists can have at most N - entries complaint when using ON CONFLICT with + Fix incorrect target lists can have at most N + entries complaint when using ON CONFLICT with wide tables (Tom Lane) - Prevent multicolumn expansion of foo.* in - an UPDATE source expression (Tom Lane) + Prevent multicolumn expansion of foo.* in + an UPDATE source expression (Tom Lane) This led to UPDATE target count mismatch --- internal - error. Now the syntax is understood as a whole-row variable, + error. Now the syntax is understood as a whole-row variable, as it would be in other contexts. @@ -1597,12 +4270,12 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 Ensure that column typmods are determined accurately for - multi-row VALUES constructs (Tom Lane) + multi-row VALUES constructs (Tom Lane) This fixes problems occurring when the first value in a column has a - determinable typmod (e.g., length for a varchar value) but + determinable typmod (e.g., length for a varchar value) but later values don't share the same limit. @@ -1617,15 +4290,15 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 Normally, a Unicode surrogate leading character must be followed by a Unicode surrogate trailing character, but the check for this was missed if the leading character was the last character in a Unicode - string literal (U&'...') or Unicode identifier - (U&"..."). + string literal (U&'...') or Unicode identifier + (U&"..."). Ensure that a purely negative text search query, such - as !foo, matches empty tsvectors (Tom Dunstan) + as !foo, matches empty tsvectors (Tom Dunstan) @@ -1636,20 +4309,20 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 - Prevent crash when ts_rewrite() replaces a non-top-level + Prevent crash when ts_rewrite() replaces a non-top-level subtree with an empty query (Artur Zakirov) - Fix performance problems in ts_rewrite() (Tom Lane) + Fix performance problems in ts_rewrite() (Tom Lane) - Fix ts_rewrite()'s handling of nested NOT operators + Fix ts_rewrite()'s handling of nested NOT operators (Tom Lane) @@ -1657,27 +4330,27 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 Improve speed of user-defined aggregates that - use array_append() as transition function (Tom Lane) + use array_append() as transition function (Tom Lane) - Fix array_fill() to handle empty arrays properly (Tom Lane) + Fix array_fill() to handle empty arrays properly (Tom Lane) - Fix possible crash in array_position() - or array_positions() when processing arrays of records + Fix possible crash in array_position() + or array_positions() when processing arrays of records (Junseok Yang) - Fix one-byte buffer overrun in quote_literal_cstr() + Fix one-byte buffer overrun in quote_literal_cstr() (Heikki Linnakangas) @@ -1689,8 +4362,8 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 - Prevent multiple calls of pg_start_backup() - and pg_stop_backup() from running concurrently (Michael + Prevent multiple calls of pg_start_backup() + and pg_stop_backup() from running concurrently (Michael Paquier) @@ -1703,7 +4376,7 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 Disable transform that attempted to remove no-op AT TIME - ZONE conversions (Tom Lane) + ZONE conversions (Tom Lane) @@ -1714,15 +4387,15 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 - Avoid discarding interval-to-interval casts + Avoid discarding interval-to-interval casts that aren't really no-ops (Tom Lane) In some cases, a cast that should result in zeroing out - low-order interval fields was mistakenly deemed to be a + low-order interval fields was mistakenly deemed to be a no-op and discarded. An example is that casting from INTERVAL - MONTH to INTERVAL YEAR failed to clear the months field. + MONTH to INTERVAL YEAR failed to clear the months field. @@ -1742,28 +4415,28 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 - Fix pg_dump to dump user-defined casts and transforms + Fix pg_dump to dump user-defined casts and transforms that use built-in functions (Stephen Frost) - Fix pg_restore with to behave more sanely if an archive contains - unrecognized DROP commands (Tom Lane) + unrecognized DROP commands (Tom Lane) This doesn't fix any live bug, but it may improve the behavior in - future if pg_restore is used with an archive - generated by a later pg_dump version. + future if pg_restore is used with an archive + generated by a later pg_dump version. - Fix pg_basebackup's rate limiting in the presence of + Fix pg_basebackup's rate limiting in the presence of slow I/O (Antonin Houska) @@ -1776,15 +4449,15 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 - Fix pg_basebackup's handling of - symlinked pg_stat_tmp and pg_replslot + Fix pg_basebackup's handling of + symlinked pg_stat_tmp and pg_replslot subdirectories (Magnus Hagander, Michael Paquier) - Fix possible pg_basebackup failure on standby + Fix possible pg_basebackup failure on standby server when including WAL files (Amit Kapila, Robert Haas) @@ -1792,7 +4465,7 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 Fix possible mishandling of expanded arrays in domain check - constraints and CASE execution (Tom Lane) + constraints and CASE execution (Tom Lane) @@ -1824,21 +4497,21 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 - Fix PL/Tcl to support triggers on tables that have .tupno + Fix PL/Tcl to support triggers on tables that have .tupno as a column name (Tom Lane) This matches the (previously undocumented) behavior of - PL/Tcl's spi_exec and spi_execp commands, - namely that a magic .tupno column is inserted only if + PL/Tcl's spi_exec and spi_execp commands, + namely that a magic .tupno column is inserted only if there isn't a real column named that. - Allow DOS-style line endings in ~/.pgpass files, + Allow DOS-style line endings in ~/.pgpass files, even on Unix (Vik Fearing) @@ -1850,23 +4523,23 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 - Fix one-byte buffer overrun if ecpg is given a file + Fix one-byte buffer overrun if ecpg is given a file name that ends with a dot (Takayuki Tsunakawa) - Fix psql's tab completion for ALTER DEFAULT - PRIVILEGES (Gilles Darold, Stephen Frost) + Fix psql's tab completion for ALTER DEFAULT + PRIVILEGES (Gilles Darold, Stephen Frost) - In psql, treat an empty or all-blank setting of - the PAGER environment variable as meaning no - pager (Tom Lane) + In psql, treat an empty or all-blank setting of + the PAGER environment variable as meaning no + pager (Tom Lane) @@ -1877,28 +4550,28 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 - Improve contrib/dblink's reporting of - low-level libpq errors, such as out-of-memory + Improve contrib/dblink's reporting of + low-level libpq errors, such as out-of-memory (Joe Conway) - Teach contrib/dblink to ignore irrelevant server options - when it uses a contrib/postgres_fdw foreign server as + Teach contrib/dblink to ignore irrelevant server options + when it uses a contrib/postgres_fdw foreign server as the source of connection options (Corey Huinker) Previously, if the foreign server object had options that were not - also libpq connection options, an error occurred. + also libpq connection options, an error occurred. - Fix portability problems in contrib/pageinspect's + Fix portability problems in contrib/pageinspect's functions for GIN indexes (Peter Eisentraut, Tom Lane) @@ -1925,7 +4598,7 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 - Update time zone data files to tzdata release 2016j + Update time zone data files to tzdata release 2016j for DST law changes in northern Cyprus (adding a new zone Asia/Famagusta), Russia (adding a new zone Europe/Saratov), Tonga, and Antarctica/Casey. @@ -1950,7 +4623,7 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 This release contains a variety of fixes from 9.5.4. For information about new features in the 9.5 major release, see - . + . @@ -1968,7 +4641,7 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 Also, if you are upgrading from a version earlier than 9.5.2, - see . + see . @@ -1988,7 +4661,7 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 crash recovery, or to be written incorrectly on a standby server. Bogus entries in a free space map could lead to attempts to access pages that have been truncated away from the relation itself, typically - producing errors like could not read block XXX: + producing errors like could not read block XXX: read only 0 of 8192 bytes. Checksum failures in the visibility map are also possible, if checksumming is enabled. @@ -1996,7 +4669,7 @@ Branch: REL9_2_STABLE [60314e28e] 2016-12-13 19:08:09 -0600 Procedures for determining whether there is a problem and repairing it if so are discussed at - . + . @@ -2014,7 +4687,7 @@ Branch: REL9_4_STABLE [a69443564] 2016-09-03 13:28:53 -0400 - The typical symptom was unexpected GIN leaf action errors + The typical symptom was unexpected GIN leaf action errors during WAL replay. @@ -2029,13 +4702,13 @@ Branch: REL9_4_STABLE [8778da2af] 2016-09-09 15:54:29 -0300 Branch: REL9_3_STABLE [dfe7121df] 2016-09-09 15:54:29 -0300 --> - Fix SELECT FOR UPDATE/SHARE to correctly lock tuples that + Fix SELECT FOR UPDATE/SHARE to correctly lock tuples that have been updated by a subsequently-aborted transaction (Álvaro Herrera) - In 9.5 and later, the SELECT would sometimes fail to + In 9.5 and later, the SELECT would sometimes fail to return such tuples at all. A failure has not been proven to occur in earlier releases, but might be possible with concurrent updates. @@ -2071,13 +4744,13 @@ Branch: REL9_5_STABLE [94bc30725] 2016-08-17 17:03:36 -0700 --> Fix deletion of speculatively inserted TOAST tuples when backing out - of INSERT ... ON CONFLICT (Oskari Saarenmaa) + of INSERT ... ON CONFLICT (Oskari Saarenmaa) In the race condition where two transactions try to insert conflicting tuples at about the same time, the loser would fail with - an attempted to delete invisible tuple error if its + an attempted to delete invisible tuple error if its insertion included any TOAST'ed fields. @@ -2085,7 +4758,7 @@ Branch: REL9_5_STABLE [94bc30725] 2016-08-17 17:03:36 -0700 Don't throw serialization errors for self-conflicting insertions - in INSERT ... ON CONFLICT (Thomas Munro, Peter Geoghegan) + in INSERT ... ON CONFLICT (Thomas Munro, Peter Geoghegan) @@ -2123,29 +4796,29 @@ Branch: REL9_5_STABLE [46bd14a10] 2016-08-24 22:20:01 -0400 Branch: REL9_4_STABLE [566afa15c] 2016-08-24 22:20:01 -0400 --> - Fix query-lifespan memory leak in a bulk UPDATE on a table - with a PRIMARY KEY or REPLICA IDENTITY index + Fix query-lifespan memory leak in a bulk UPDATE on a table + with a PRIMARY KEY or REPLICA IDENTITY index (Tom Lane) - Fix COPY with a column name list from a table that has + Fix COPY with a column name list from a table that has row-level security enabled (Adam Brightwell) - Fix EXPLAIN to emit valid XML when - is on (Markus Winand) + Fix EXPLAIN to emit valid XML when + is on (Markus Winand) Previously the XML output-format option produced syntactically invalid - tags such as <I/O-Read-Time>. That is now - rendered as <I-O-Read-Time>. + tags such as <I/O-Read-Time>. That is now + rendered as <I-O-Read-Time>. @@ -2160,20 +4833,20 @@ Branch: REL9_2_STABLE [ceb005319] 2016-08-12 12:13:04 -0400 --> Suppress printing of zeroes for unmeasured times - in EXPLAIN (Maksim Milyutin) + in EXPLAIN (Maksim Milyutin) Certain option combinations resulted in printing zero values for times that actually aren't ever measured in that combination. Our general - policy in EXPLAIN is not to print such fields at all, so + policy in EXPLAIN is not to print such fields at all, so do that consistently in all cases. - Fix statistics update for TRUNCATE in a prepared + Fix statistics update for TRUNCATE in a prepared transaction (Stas Kelvich) @@ -2190,37 +4863,37 @@ Branch: REL9_2_STABLE [eaf6fe7fa] 2016-09-09 11:45:40 +0100 Branch: REL9_1_STABLE [3ed7f54bc] 2016-09-09 11:46:03 +0100 --> - Fix timeout length when VACUUM is waiting for exclusive + Fix timeout length when VACUUM is waiting for exclusive table lock so that it can truncate the table (Simon Riggs) The timeout was meant to be 50 milliseconds, but it was actually only - 50 microseconds, causing VACUUM to give up on truncation + 50 microseconds, causing VACUUM to give up on truncation much more easily than intended. Set it to the intended value. - Fix bugs in merging inherited CHECK constraints while + Fix bugs in merging inherited CHECK constraints while creating or altering a table (Tom Lane, Amit Langote) - Allow identical CHECK constraints to be added to a parent + Allow identical CHECK constraints to be added to a parent and child table in either order. Prevent merging of a valid - constraint from the parent table with a NOT VALID + constraint from the parent table with a NOT VALID constraint on the child. Likewise, prevent merging of a NO - INHERIT child constraint with an inherited constraint. + INHERIT child constraint with an inherited constraint. Show a sensible value - in pg_settings.unit - for min_wal_size and max_wal_size (Tom Lane) + in pg_settings.unit + for min_wal_size and max_wal_size (Tom Lane) @@ -2236,15 +4909,15 @@ Branch: REL9_1_STABLE [7e01c8ef3] 2016-08-14 15:06:02 -0400 --> Remove artificial restrictions on the values accepted - by numeric_in() and numeric_recv() + by numeric_in() and numeric_recv() (Tom Lane) We allow numeric values up to the limit of the storage format (more - than 1e100000), so it seems fairly pointless - that numeric_in() rejected scientific-notation exponents - above 1000. Likewise, it was silly for numeric_recv() to + than 1e100000), so it seems fairly pointless + that numeric_in() rejected scientific-notation exponents + above 1000. Likewise, it was silly for numeric_recv() to reject more than 1000 digits in an input value. @@ -2264,7 +4937,7 @@ Branch: REL9_1_STABLE [7e01c8ef3] 2016-08-14 15:06:02 -0400 - With turned on, old + With turned on, old commit timestamps became inaccessible after a clean server restart. @@ -2290,7 +4963,7 @@ Branch: REL9_5_STABLE [da9659f87] 2016-08-22 15:30:37 -0400 In the worst case, this could result in a corrupt btree index, which - would need to be rebuilt using REINDEX. However, the + would need to be rebuilt using REINDEX. However, the situation is believed to be rare. @@ -2324,7 +4997,7 @@ Branch: REL9_2_STABLE [823df401d] 2016-08-31 08:52:13 -0400 Branch: REL9_1_STABLE [e3439a455] 2016-08-31 08:52:13 -0400 --> - Disallow starting a standalone backend with standby_mode + Disallow starting a standalone backend with standby_mode turned on (Michael Paquier) @@ -2350,7 +5023,7 @@ Branch: REL9_4_STABLE [690a2fb90] 2016-08-17 13:15:04 -0700 This failure to reset all of the fields of the slot could - prevent VACUUM from removing dead tuples. + prevent VACUUM from removing dead tuples. @@ -2361,7 +5034,7 @@ Branch: REL9_4_STABLE [690a2fb90] 2016-08-17 13:15:04 -0700 - This avoids possible failures during munmap() on systems + This avoids possible failures during munmap() on systems with atypical default huge page sizes. Except in crash-recovery cases, there were no ill effects other than a log message. @@ -2387,7 +5060,7 @@ Branch: REL9_4_STABLE [32cdf680f] 2016-09-23 09:54:11 -0400 Previously, the same value would be chosen every time, because it was - derived from random() but srandom() had not + derived from random() but srandom() had not yet been called. While relatively harmless, this was not the intended behavior. @@ -2407,8 +5080,8 @@ Branch: REL9_4_STABLE [c23b2523d] 2016-09-20 12:12:36 -0400 - Windows sometimes returns ERROR_ACCESS_DENIED rather - than ERROR_ALREADY_EXISTS when there is an existing + Windows sometimes returns ERROR_ACCESS_DENIED rather + than ERROR_ALREADY_EXISTS when there is an existing segment. This led to postmaster startup failure due to believing that the former was an unrecoverable error. @@ -2422,8 +5095,8 @@ Branch: REL9_6_STABLE Release: REL9_6_0 [c81c71d88] 2016-08-18 14:48:51 -0400 Branch: REL9_5_STABLE [a8fc19505] 2016-08-18 14:48:51 -0400 --> - Fix PL/pgSQL to not misbehave with parameters and - local variables of type int2vector or oidvector + Fix PL/pgSQL to not misbehave with parameters and + local variables of type int2vector or oidvector (Tom Lane) @@ -2431,7 +5104,7 @@ Branch: REL9_5_STABLE [a8fc19505] 2016-08-18 14:48:51 -0400 Don't try to share SSL contexts across multiple connections - in libpq (Heikki Linnakangas) + in libpq (Heikki Linnakangas) @@ -2442,12 +5115,12 @@ Branch: REL9_5_STABLE [a8fc19505] 2016-08-18 14:48:51 -0400 - Avoid corner-case memory leak in libpq (Tom Lane) + Avoid corner-case memory leak in libpq (Tom Lane) The reported problem involved leaking an error report - during PQreset(), but there might be related cases. + during PQreset(), but there might be related cases. @@ -2463,7 +5136,7 @@ Branch: REL9_2_STABLE [a4a3fac16] 2016-09-18 14:00:13 +0300 Branch: REL9_1_STABLE [ed29d2de2] 2016-09-18 14:07:30 +0300 --> - Make ecpg's and options work consistently with our other executables (Haribabu Kommi) @@ -2481,12 +5154,12 @@ Branch: REL9_5_STABLE [b93d37474] 2016-09-21 13:16:20 +0300 Branch: REL9_4_STABLE [f16d4a241] 2016-09-21 13:16:24 +0300 --> - Fix pgbench's calculation of average latency + Fix pgbench's calculation of average latency (Fabien Coelho) - The calculation was incorrect when there were \sleep + The calculation was incorrect when there were \sleep commands in the script, or when the test duration was specified in number of transactions rather than total time. @@ -2494,7 +5167,7 @@ Branch: REL9_4_STABLE [f16d4a241] 2016-09-21 13:16:24 +0300 - In pg_upgrade, check library loadability in name order + In pg_upgrade, check library loadability in name order (Tom Lane) @@ -2516,12 +5189,12 @@ Branch: REL9_3_STABLE [f39bb487d] 2016-09-23 13:49:27 -0400 Branch: REL9_2_STABLE [53b29d986] 2016-09-23 13:49:27 -0400 --> - In pg_dump, never dump range constructor functions + In pg_dump, never dump range constructor functions (Tom Lane) - This oversight led to pg_upgrade failures with + This oversight led to pg_upgrade failures with extensions containing range types, due to duplicate creation of the constructor functions. @@ -2535,9 +5208,9 @@ Branch: REL9_6_STABLE Release: REL9_6_0 [a88cee90f] 2016-09-08 10:48:03 -0400 Branch: REL9_5_STABLE [142a110b3] 2016-09-08 10:48:03 -0400 --> - In pg_dump with @@ -2550,27 +5223,27 @@ Branch: REL9_5_STABLE [9050e5c89] 2016-08-29 12:18:57 +0100 Branch: REL9_5_STABLE [3aa233f82] 2016-08-29 18:12:04 -0300 --> - Make pg_receivexlog work correctly - with without slots (Gabriele Bartolini) - Disallow specifying both - Make pg_rewind turn off synchronous_commit + Make pg_rewind turn off synchronous_commit in its session on the source server (Michael Banck, Michael Paquier) - This allows pg_rewind to work even when the source + This allows pg_rewind to work even when the source server is using synchronous replication that is not working for some reason. @@ -2578,8 +5251,8 @@ Branch: REL9_5_STABLE [3aa233f82] 2016-08-29 18:12:04 -0300 - In pg_xlogdump, retry opening new WAL segments when - using option (Magnus Hagander) @@ -2598,7 +5271,7 @@ Branch: REL9_4_STABLE [314a25fb3] 2016-08-29 14:38:17 +0900 Branch: REL9_3_STABLE [5833306dd] 2016-08-29 15:51:30 +0900 --> - Fix pg_xlogdump to cope with a WAL file that begins + Fix pg_xlogdump to cope with a WAL file that begins with a continuation record spanning more than one page (Pavan Deolasee) @@ -2613,8 +5286,8 @@ Branch: REL9_5_STABLE [60b6d99da] 2016-09-15 09:30:36 -0400 Branch: REL9_4_STABLE [1336bd986] 2016-09-15 09:22:52 -0400 --> - Fix contrib/pg_buffercache to work - when shared_buffers exceeds 256GB (KaiGai Kohei) + Fix contrib/pg_buffercache to work + when shared_buffers exceeds 256GB (KaiGai Kohei) @@ -2630,8 +5303,8 @@ Branch: REL9_2_STABLE [60bb1bb12] 2016-08-17 15:51:11 -0400 Branch: REL9_1_STABLE [9942376a5] 2016-08-17 15:51:11 -0400 --> - Fix contrib/intarray/bench/bench.pl to print the results - of the EXPLAIN it does when given the option (Daniel Gustafsson) @@ -2665,11 +5338,11 @@ Branch: REL9_4_STABLE [5d41f27a9] 2016-09-23 15:50:00 -0400 - When PostgreSQL has been configured - with @@ -2682,7 +5355,7 @@ Branch: REL9_5_STABLE [52acf020a] 2016-09-19 14:27:08 -0400 Branch: REL9_4_STABLE [ca93b816f] 2016-09-19 14:27:13 -0400 --> - In MSVC builds, include pg_recvlogical in a + In MSVC builds, include pg_recvlogical in a client-only installation (MauMau) @@ -2722,17 +5395,17 @@ Branch: REL9_1_STABLE [380dad29d] 2016-09-02 17:29:32 -0400 If a dynamic time zone abbreviation does not match any entry in the referenced time zone, treat it as equivalent to the time zone name. This avoids unexpected failures when IANA removes abbreviations from - their time zone database, as they did in tzdata + their time zone database, as they did in tzdata release 2016f and seem likely to do again in the future. The consequences were not limited to not recognizing the individual abbreviation; any mismatch caused - the pg_timezone_abbrevs view to fail altogether. + the pg_timezone_abbrevs view to fail altogether. - Update time zone data files to tzdata release 2016h + Update time zone data files to tzdata release 2016h for DST law changes in Palestine and Turkey, plus historical corrections for Turkey and some regions of Russia. Switch to numeric abbreviations for some time zones in Antarctica, @@ -2745,15 +5418,15 @@ Branch: REL9_1_STABLE [380dad29d] 2016-09-02 17:29:32 -0400 or no currency among the local population. They are in process of reversing that policy in favor of using numeric UTC offsets in zones where there is no evidence of real-world use of an English - abbreviation. At least for the time being, PostgreSQL + abbreviation. At least for the time being, PostgreSQL will continue to accept such removed abbreviations for timestamp input. - But they will not be shown in the pg_timezone_names + But they will not be shown in the pg_timezone_names view nor used for output. - In this update, AMT is no longer shown as being in use to - mean Armenia Time. Therefore, we have changed the Default + In this update, AMT is no longer shown as being in use to + mean Armenia Time. Therefore, we have changed the Default abbreviation set to interpret it as Amazon Time, thus UTC-4 not UTC+4. @@ -2774,7 +5447,7 @@ Branch: REL9_1_STABLE [380dad29d] 2016-09-02 17:29:32 -0400 This release contains a variety of fixes from 9.5.3. For information about new features in the 9.5 major release, see - . + . @@ -2786,7 +5459,7 @@ Branch: REL9_1_STABLE [380dad29d] 2016-09-02 17:29:32 -0400 However, if you are upgrading from a version earlier than 9.5.2, - see . + see . @@ -2807,17 +5480,17 @@ Branch: REL9_1_STABLE [5327b764a] 2016-08-08 10:33:47 -0400 --> Fix possible mis-evaluation of - nested CASE-WHEN expressions (Heikki + nested CASE-WHEN expressions (Heikki Linnakangas, Michael Paquier, Tom Lane) - A CASE expression appearing within the test value - subexpression of another CASE could become confused about + A CASE expression appearing within the test value + subexpression of another CASE could become confused about whether its own test value was null or not. Also, inlining of a SQL function implementing the equality operator used by - a CASE expression could result in passing the wrong test - value to functions called within a CASE expression in the + a CASE expression could result in passing the wrong test + value to functions called within a CASE expression in the SQL function's body. If the test values were of different data types, a crash might result; moreover such situations could be abused to allow disclosure of portions of server memory. (CVE-2016-5423) @@ -2878,7 +5551,7 @@ Branch: REL9_1_STABLE [aed766ab5] 2016-08-08 10:07:53 -0400 - Numerous places in vacuumdb and other client programs + Numerous places in vacuumdb and other client programs could become confused by database and role names containing double quotes or backslashes. Tighten up quoting rules to make that safe. Also, ensure that when a conninfo string is used as a database name @@ -2887,22 +5560,22 @@ Branch: REL9_1_STABLE [aed766ab5] 2016-08-08 10:07:53 -0400 Fix handling of paired double quotes - in psql's \connect - and \password commands to match the documentation. + in psql's \connect + and \password commands to match the documentation. - Introduce a new - pg_dumpall now refuses to deal with database and role + pg_dumpall now refuses to deal with database and role names containing carriage returns or newlines, as it seems impractical to quote those characters safely on Windows. In future we may reject such names on the server side, but that step has not been taken yet. @@ -2912,7 +5585,7 @@ Branch: REL9_1_STABLE [aed766ab5] 2016-08-08 10:07:53 -0400 These are considered security fixes because crafted object names containing special characters could have been used to execute commands with superuser privileges the next time a superuser - executes pg_dumpall or other routine maintenance + executes pg_dumpall or other routine maintenance operations. (CVE-2016-5424) @@ -2934,18 +5607,18 @@ Branch: REL9_2_STABLE [7b8526e5d] 2016-07-28 16:09:15 -0400 Branch: REL9_1_STABLE [c0e5096fc] 2016-07-28 16:09:15 -0400 --> - Fix corner-case misbehaviors for IS NULL/IS NOT - NULL applied to nested composite values (Andrew Gierth, Tom Lane) + Fix corner-case misbehaviors for IS NULL/IS NOT + NULL applied to nested composite values (Andrew Gierth, Tom Lane) - The SQL standard specifies that IS NULL should return + The SQL standard specifies that IS NULL should return TRUE for a row of all null values (thus ROW(NULL,NULL) IS - NULL yields TRUE), but this is not meant to apply recursively - (thus ROW(NULL, ROW(NULL,NULL)) IS NULL yields FALSE). + NULL yields TRUE), but this is not meant to apply recursively + (thus ROW(NULL, ROW(NULL,NULL)) IS NULL yields FALSE). The core executor got this right, but certain planner optimizations treated the test as recursive (thus producing TRUE in both cases), - and contrib/postgres_fdw could produce remote queries + and contrib/postgres_fdw could produce remote queries that misbehaved similarly. @@ -2957,8 +5630,8 @@ Branch: master [eae1ad9b6] 2016-05-23 19:23:36 -0400 Branch: REL9_5_STABLE [e504d915b] 2016-05-23 19:23:36 -0400 --> - Fix unrecognized node type error for INSERT ... ON - CONFLICT within a recursive CTE (a WITH item) (Peter + Fix unrecognized node type error for INSERT ... ON + CONFLICT within a recursive CTE (a WITH item) (Peter Geoghegan) @@ -2970,7 +5643,7 @@ Branch: master [26e66184d] 2016-05-11 16:20:23 -0400 Branch: REL9_5_STABLE [58d802410] 2016-05-11 16:20:03 -0400 --> - Fix INSERT ... ON CONFLICT to successfully match index + Fix INSERT ... ON CONFLICT to successfully match index expressions or index predicates that are simplified during the planner's expression preprocessing phase (Tom Lane) @@ -2984,7 +5657,7 @@ Branch: REL9_5_STABLE [31ce32ade] 2016-07-04 16:09:11 -0400 --> Correctly handle violations of exclusion constraints that apply to - the target table of an INSERT ... ON CONFLICT command, + the target table of an INSERT ... ON CONFLICT command, but are not one of the selected arbiter indexes (Tom Lane) @@ -3001,7 +5674,7 @@ Branch: master [8a13d5e6d] 2016-05-11 17:06:53 -0400 Branch: REL9_5_STABLE [428484ce1] 2016-05-11 17:06:53 -0400 --> - Fix INSERT ... ON CONFLICT to not fail if the target + Fix INSERT ... ON CONFLICT to not fail if the target table has a unique index on OID (Tom Lane) @@ -3017,7 +5690,7 @@ Branch: REL9_2_STABLE [f66e0fec3] 2016-06-16 17:16:53 -0400 Branch: REL9_1_STABLE [7b97dafa2] 2016-06-16 17:16:58 -0400 --> - Make the inet and cidr data types properly reject + Make the inet and cidr data types properly reject IPv6 addresses with too many colon-separated fields (Tom Lane) @@ -3033,8 +5706,8 @@ Branch: REL9_2_STABLE [89b301104] 2016-07-16 14:42:37 -0400 Branch: REL9_1_STABLE [608cc0c41] 2016-07-16 14:42:37 -0400 --> - Prevent crash in close_ps() - (the point ## lseg operator) + Prevent crash in close_ps() + (the point ## lseg operator) for NaN input coordinates (Tom Lane) @@ -3052,7 +5725,7 @@ Branch: REL9_4_STABLE [b25d87f91] 2016-07-01 11:40:22 -0400 Branch: REL9_3_STABLE [b0f20c2ea] 2016-07-01 11:40:22 -0400 --> - Avoid possible crash in pg_get_expr() when inconsistent + Avoid possible crash in pg_get_expr() when inconsistent values are passed to it (Michael Paquier, Thomas Munro) @@ -3068,12 +5741,12 @@ Branch: REL9_2_STABLE [b0134fe84] 2016-08-08 11:13:45 -0400 Branch: REL9_1_STABLE [d555d2642] 2016-08-08 11:13:51 -0400 --> - Fix several one-byte buffer over-reads in to_number() + Fix several one-byte buffer over-reads in to_number() (Peter Eisentraut) - In several cases the to_number() function would read one + In several cases the to_number() function would read one more character than it should from the input string. There is a small chance of a crash, if the input happens to be adjacent to the end of memory. @@ -3090,8 +5763,8 @@ Branch: REL9_3_STABLE [17bfef80e] 2016-06-27 15:57:21 -0400 --> Do not run the planner on the query contained in CREATE - MATERIALIZED VIEW or CREATE TABLE AS - when WITH NO DATA is specified (Michael Paquier, + MATERIALIZED VIEW or CREATE TABLE AS + when WITH NO DATA is specified (Michael Paquier, Tom Lane) @@ -3114,7 +5787,7 @@ Branch: REL9_1_STABLE [37276017f] 2016-07-15 17:49:49 -0700 --> Avoid unsafe intermediate state during expensive paths - through heap_update() (Masahiko Sawada, Andres Freund) + through heap_update() (Masahiko Sawada, Andres Freund) @@ -3154,8 +5827,8 @@ Branch: REL9_4_STABLE [166873dd0] 2016-07-15 14:17:20 -0400 Branch: REL9_3_STABLE [6c243f90a] 2016-07-15 14:17:20 -0400 --> - Avoid unnecessary could not serialize access errors when - acquiring FOR KEY SHARE row locks in serializable mode + Avoid unnecessary could not serialize access errors when + acquiring FOR KEY SHARE row locks in serializable mode (Álvaro Herrera) @@ -3169,14 +5842,14 @@ Branch: master [9eaf5be50] 2016-06-03 18:07:14 -0400 Branch: REL9_5_STABLE [8355897ff] 2016-06-03 18:07:14 -0400 --> - Make sure expanded datums returned by a plan node are + Make sure expanded datums returned by a plan node are read-only (Tom Lane) This avoids failures in some cases where the result of a lower plan node is referenced in multiple places in upper nodes. So far as - core PostgreSQL is concerned, only array values + core PostgreSQL is concerned, only array values returned by PL/pgSQL functions are at risk; but extensions might use expanded datums for other things. @@ -3197,7 +5870,7 @@ Branch: REL9_3_STABLE [dafdcbb6c] 2016-06-22 11:55:32 -0400 Branch: REL9_2_STABLE [dd41661d2] 2016-06-22 11:55:35 -0400 --> - Avoid crash in postgres -C when the specified variable + Avoid crash in postgres -C when the specified variable has a null string value (Michael Paquier) @@ -3293,12 +5966,12 @@ Branch: REL9_2_STABLE [4cf0978ea] 2016-05-24 15:47:51 -0400 Branch: REL9_1_STABLE [5551dac59] 2016-05-24 15:47:51 -0400 --> - Avoid consuming a transaction ID during VACUUM + Avoid consuming a transaction ID during VACUUM (Alexander Korotkov) - Some cases in VACUUM unnecessarily caused an XID to be + Some cases in VACUUM unnecessarily caused an XID to be assigned to the current transaction. Normally this is negligible, but if one is up against the XID wraparound limit, consuming more XIDs during anti-wraparound vacuums is a very bad thing. @@ -3321,7 +5994,7 @@ Branch: REL9_3_STABLE [28f294afd] 2016-06-24 18:29:28 -0400 The usual symptom of this bug is errors - like MultiXactId NNN has not been created + like MultiXactId NNN has not been created yet -- apparent wraparound. @@ -3337,8 +6010,8 @@ Branch: REL9_2_STABLE [3201709de] 2016-06-06 17:44:18 -0400 Branch: REL9_1_STABLE [32ceb8dfb] 2016-06-06 17:44:18 -0400 --> - When a manual ANALYZE specifies a column list, don't - reset the table's changes_since_analyze counter + When a manual ANALYZE specifies a column list, don't + reset the table's changes_since_analyze counter (Tom Lane) @@ -3359,7 +6032,7 @@ Branch: REL9_2_STABLE [127d73009] 2016-08-07 18:52:02 -0400 Branch: REL9_1_STABLE [a449ad095] 2016-08-07 18:52:02 -0400 --> - Fix ANALYZE's overestimation of n_distinct + Fix ANALYZE's overestimation of n_distinct for a unique or nearly-unique column with many null entries (Tom Lane) @@ -3423,7 +6096,7 @@ Branch: REL9_4_STABLE [98d5f366b] 2016-08-06 14:28:38 -0400 - This mistake prevented VACUUM from completing in some + This mistake prevented VACUUM from completing in some cases involving corrupt b-tree indexes. @@ -3435,7 +6108,7 @@ Branch: master [8cf739de8] 2016-06-24 16:57:36 -0400 Branch: REL9_5_STABLE [07f69137b] 2016-06-24 16:57:36 -0400 --> - Fix building of large (bigger than shared_buffers) + Fix building of large (bigger than shared_buffers) hash indexes (Tom Lane) @@ -3469,9 +6142,9 @@ Branch: master [8a859691d] 2016-06-05 11:53:06 -0400 Branch: REL9_5_STABLE [a7aa61ffe] 2016-06-05 11:53:06 -0400 --> - Fix possible crash during a nearest-neighbor (ORDER BY - distance) indexscan on a contrib/btree_gist index on - an interval column (Peter Geoghegan) + Fix possible crash during a nearest-neighbor (ORDER BY + distance) indexscan on a contrib/btree_gist index on + an interval column (Peter Geoghegan) @@ -3482,7 +6155,7 @@ Branch: master [975ad4e60] 2016-05-30 14:47:22 -0400 Branch: REL9_5_STABLE [2973d7d02] 2016-05-30 14:47:22 -0400 --> - Fix PANIC: failed to add BRIN tuple error when attempting + Fix PANIC: failed to add BRIN tuple error when attempting to update a BRIN index entry (Álvaro Herrera) @@ -3505,8 +6178,8 @@ Branch: master [baebab3ac] 2016-07-12 18:07:03 -0400 Branch: REL9_5_STABLE [a0943dbbe] 2016-07-12 18:06:50 -0400 --> - Fix PL/pgSQL's handling of the INTO clause - within IMPORT FOREIGN SCHEMA commands (Tom Lane) + Fix PL/pgSQL's handling of the INTO clause + within IMPORT FOREIGN SCHEMA commands (Tom Lane) @@ -3521,8 +6194,8 @@ Branch: REL9_2_STABLE [6c0be49b2] 2016-07-17 09:39:51 -0400 Branch: REL9_1_STABLE [84d679204] 2016-07-17 09:41:08 -0400 --> - Fix contrib/btree_gin to handle the smallest - possible bigint value correctly (Peter Eisentraut) + Fix contrib/btree_gin to handle the smallest + possible bigint value correctly (Peter Eisentraut) @@ -3544,7 +6217,7 @@ Branch: REL9_1_STABLE [1f63b0e09] 2016-08-05 18:58:36 -0400 It's planned to switch to two-part instead of three-part server version numbers for releases after 9.6. Make sure - that PQserverVersion() returns the correct value for + that PQserverVersion() returns the correct value for such cases. @@ -3560,7 +6233,7 @@ Branch: REL9_2_STABLE [295edbecf] 2016-08-01 15:08:48 +0200 Branch: REL9_1_STABLE [c15f502b6] 2016-08-01 15:08:36 +0200 --> - Fix ecpg's code for unsigned long long + Fix ecpg's code for unsigned long long array elements (Michael Meskes) @@ -3575,8 +6248,8 @@ Branch: REL9_3_STABLE [6693c9d7b] 2016-08-02 12:49:09 -0400 Branch: REL9_2_STABLE [a5a7caaa1] 2016-08-02 12:49:15 -0400 --> - In pg_dump with both @@ -3594,15 +6267,15 @@ Branch: REL9_4_STABLE [53c2601a5] 2016-06-03 11:29:20 -0400 Branch: REL9_3_STABLE [4a21c6fd7] 2016-06-03 11:29:20 -0400 --> - Improve handling of SIGTERM/control-C in - parallel pg_dump and pg_restore (Tom + Improve handling of SIGTERM/control-C in + parallel pg_dump and pg_restore (Tom Lane) Make sure that the worker processes will exit promptly, and also arrange to send query-cancel requests to the connected backends, in case they - are doing something long-running such as a CREATE INDEX. + are doing something long-running such as a CREATE INDEX. @@ -3615,17 +6288,17 @@ Branch: REL9_4_STABLE [ea274b2f4] 2016-05-25 12:39:57 -0400 Branch: REL9_3_STABLE [1c8205159] 2016-05-25 12:39:57 -0400 --> - Fix error reporting in parallel pg_dump - and pg_restore (Tom Lane) + Fix error reporting in parallel pg_dump + and pg_restore (Tom Lane) - Previously, errors reported by pg_dump - or pg_restore worker processes might never make it to + Previously, errors reported by pg_dump + or pg_restore worker processes might never make it to the user's console, because the messages went through the master process, and there were various deadlock scenarios that would prevent the master process from passing on the messages. Instead, just print - everything to stderr. In some cases this will result in + everything to stderr. In some cases this will result in duplicate messages (for instance, if all the workers report a server shutdown), but that seems better than no message. @@ -3640,8 +6313,8 @@ Branch: REL9_4_STABLE [d32bc204c] 2016-05-26 10:50:42 -0400 Branch: REL9_3_STABLE [b9784e1f7] 2016-05-26 10:50:46 -0400 --> - Ensure that parallel pg_dump - or pg_restore on Windows will shut down properly + Ensure that parallel pg_dump + or pg_restore on Windows will shut down properly after an error (Kyotaro Horiguchi) @@ -3658,13 +6331,13 @@ Branch: master [d74048def] 2016-05-26 22:14:23 +0200 Branch: REL9_5_STABLE [47e596976] 2016-05-26 22:18:04 +0200 --> - Make parallel pg_dump fail cleanly when run against a + Make parallel pg_dump fail cleanly when run against a standby server (Magnus Hagander) This usage is not supported - unless is specified, but the error was not handled very well. @@ -3678,7 +6351,7 @@ Branch: REL9_4_STABLE [f2f18a37c] 2016-05-26 11:51:16 -0400 Branch: REL9_3_STABLE [99565a1ef] 2016-05-26 11:51:20 -0400 --> - Make pg_dump behave better when built without zlib + Make pg_dump behave better when built without zlib support (Kyotaro Horiguchi) @@ -3699,7 +6372,7 @@ Branch: REL9_2_STABLE [a21617759] 2016-08-01 17:38:00 +0900 Branch: REL9_1_STABLE [366f4a962] 2016-08-01 17:38:05 +0900 --> - Make pg_basebackup accept -Z 0 as + Make pg_basebackup accept -Z 0 as specifying no compression (Fujii Masao) @@ -3745,13 +6418,13 @@ Branch: REL9_4_STABLE [c2651cd24] 2016-05-27 10:40:20 -0400 Branch: REL9_3_STABLE [1f1e70a87] 2016-05-27 10:40:20 -0400 --> - Be more predictable about reporting statement timeout - versus lock timeout (Tom Lane) + Be more predictable about reporting statement timeout + versus lock timeout (Tom Lane) On heavily loaded machines, the regression tests sometimes failed due - to reporting lock timeout even though the statement timeout + to reporting lock timeout even though the statement timeout should have occurred first. @@ -3804,7 +6477,7 @@ Branch: REL9_1_STABLE [d70df7867] 2016-07-19 17:53:31 -0400 --> Update our copy of the timezone code to match - IANA's tzcode release 2016c (Tom Lane) + IANA's tzcode release 2016c (Tom Lane) @@ -3825,7 +6498,7 @@ Branch: REL9_2_STABLE [7822792f7] 2016-08-05 12:58:58 -0400 Branch: REL9_1_STABLE [a44388ffe] 2016-08-05 12:59:02 -0400 --> - Update time zone data files to tzdata release 2016f + Update time zone data files to tzdata release 2016f for DST law changes in Kemerovo and Novosibirsk, plus historical corrections for Azerbaijan, Belarus, and Morocco. @@ -3847,7 +6520,7 @@ Branch: REL9_1_STABLE [a44388ffe] 2016-08-05 12:59:02 -0400 This release contains a variety of fixes from 9.5.2. For information about new features in the 9.5 major release, see - . + . @@ -3859,7 +6532,7 @@ Branch: REL9_1_STABLE [a44388ffe] 2016-08-05 12:59:02 -0400 However, if you are upgrading from a version earlier than 9.5.2, - see . + see . @@ -3889,7 +6562,7 @@ Branch: REL9_1_STABLE [9b676fd49] 2016-05-07 00:09:37 -0400 using OpenSSL within a single process and not all the code involved follows the same rules for when to clear the error queue. Failures have been reported specifically when a client application - uses SSL connections in libpq concurrently with + uses SSL connections in libpq concurrently with SSL connections using the PHP, Python, or Ruby wrappers for OpenSSL. It's possible for similar problems to arise within the server as well, if an extension module establishes an outgoing SSL connection. @@ -3907,7 +6580,7 @@ Branch: REL9_2_STABLE [ad2d32b57] 2016-04-21 20:05:58 -0400 Branch: REL9_1_STABLE [6882dbd34] 2016-04-21 20:05:58 -0400 --> - Fix failed to build any N-way joins + Fix failed to build any N-way joins planner error with a full join enclosed in the right-hand side of a left join (Tom Lane) @@ -3929,10 +6602,10 @@ Branch: REL9_2_STABLE [f02cb8c9a] 2016-04-29 20:19:38 -0400 Given a three-or-more-way equivalence class of variables, such - as X.X = Y.Y = Z.Z, it was possible for the planner to omit + as X.X = Y.Y = Z.Z, it was possible for the planner to omit some of the tests needed to enforce that all the variables are actually equal, leading to join rows being output that didn't satisfy - the WHERE clauses. For various reasons, erroneous plans + the WHERE clauses. For various reasons, erroneous plans were seldom selected in practice, so that this bug has gone undetected for a long time. @@ -3946,12 +6619,12 @@ Branch: REL9_5_STABLE [81deadd31] 2016-04-21 23:17:36 -0400 --> Fix corner-case parser failures occurring - when is turned on + when is turned on (Tom Lane) - An example is that SELECT (ARRAY[])::text[] gave an error, + An example is that SELECT (ARRAY[])::text[] gave an error, though it worked without the parentheses. @@ -3983,7 +6656,7 @@ Branch: REL9_4_STABLE [ef35afa35] 2016-04-20 14:25:15 -0400 The memory leak would typically not amount to much in simple queries, but it could be very substantial during a large GIN index build with - high maintenance_work_mem. + high maintenance_work_mem. @@ -3998,8 +6671,8 @@ Branch: REL9_2_STABLE [11247dd99] 2016-05-06 12:09:20 -0400 Branch: REL9_1_STABLE [7bad282c3] 2016-05-06 12:09:20 -0400 --> - Fix possible misbehavior of TH, th, - and Y,YYY format codes in to_timestamp() + Fix possible misbehavior of TH, th, + and Y,YYY format codes in to_timestamp() (Tom Lane) @@ -4020,9 +6693,9 @@ Branch: REL9_2_STABLE [c7c145e4f] 2016-04-21 14:20:18 -0400 Branch: REL9_1_STABLE [663624e60] 2016-04-21 14:20:18 -0400 --> - Fix dumping of rules and views in which the array - argument of a value operator - ANY (array) construct is a sub-SELECT + Fix dumping of rules and views in which the array + argument of a value operator + ANY (array) construct is a sub-SELECT (Tom Lane) @@ -4035,14 +6708,14 @@ Branch: REL9_5_STABLE [f3d17491c] 2016-04-04 18:05:23 -0400 Branch: REL9_4_STABLE [28148e258] 2016-04-04 18:05:24 -0400 --> - Disallow newlines in ALTER SYSTEM parameter values + Disallow newlines in ALTER SYSTEM parameter values (Tom Lane) The configuration-file parser doesn't support embedded newlines in string literals, so we mustn't allow them in values to be inserted - by ALTER SYSTEM. + by ALTER SYSTEM. @@ -4054,7 +6727,7 @@ Branch: REL9_5_STABLE [8f8e65d34] 2016-04-15 12:11:27 -0400 Branch: REL9_4_STABLE [8eed31ffb] 2016-04-15 12:11:27 -0400 --> - Fix ALTER TABLE ... REPLICA IDENTITY USING INDEX to + Fix ALTER TABLE ... REPLICA IDENTITY USING INDEX to work properly if an index on OID is selected (David Rowley) @@ -4113,13 +6786,13 @@ Branch: REL9_2_STABLE [1b22368ff] 2016-04-20 23:48:13 -0400 Branch: REL9_1_STABLE [4c1c9f80b] 2016-04-20 23:48:13 -0400 --> - Make pg_regress use a startup timeout from the - PGCTLTIMEOUT environment variable, if that's set (Tom Lane) + Make pg_regress use a startup timeout from the + PGCTLTIMEOUT environment variable, if that's set (Tom Lane) This is for consistency with a behavior recently added - to pg_ctl; it eases automated testing on slow machines. + to pg_ctl; it eases automated testing on slow machines. @@ -4134,7 +6807,7 @@ Branch: REL9_2_STABLE [6bb42d520] 2016-04-13 18:57:52 -0400 Branch: REL9_1_STABLE [3ef1f3a3e] 2016-04-13 18:57:52 -0400 --> - Fix pg_upgrade to correctly restore extension + Fix pg_upgrade to correctly restore extension membership for operator families containing only one operator class (Tom Lane) @@ -4142,7 +6815,7 @@ Branch: REL9_1_STABLE [3ef1f3a3e] 2016-04-13 18:57:52 -0400 In such a case, the operator family was restored into the new database, but it was no longer marked as part of the extension. This had no - immediate ill effects, but would cause later pg_dump + immediate ill effects, but would cause later pg_dump runs to emit output that would cause (harmless) errors on restore. @@ -4156,13 +6829,13 @@ Branch: REL9_4_STABLE [e1aecebc0] 2016-05-06 22:05:51 -0400 Branch: REL9_3_STABLE [e1d88f983] 2016-05-06 22:05:51 -0400 --> - Fix pg_upgrade to not fail when new-cluster TOAST rules + Fix pg_upgrade to not fail when new-cluster TOAST rules differ from old (Tom Lane) - pg_upgrade had special-case code to handle the - situation where the new PostgreSQL version thinks that + pg_upgrade had special-case code to handle the + situation where the new PostgreSQL version thinks that a table should have a TOAST table while the old version did not. That code was broken, so remove it, and instead do nothing in such cases; there seems no reason to believe that we can't get along fine without @@ -4192,7 +6865,7 @@ Branch: REL9_2_STABLE [b24f7e280] 2016-04-18 13:33:07 -0400 --> Reduce the number of SysV semaphores used by a build configured with - (Tom Lane) @@ -4207,8 +6880,8 @@ Branch: REL9_2_STABLE [0f5491283] 2016-04-23 16:53:15 -0400 Branch: REL9_1_STABLE [cbff4b708] 2016-04-23 16:53:15 -0400 --> - Rename internal function strtoi() - to strtoint() to avoid conflict with a NetBSD library + Rename internal function strtoi() + to strtoint() to avoid conflict with a NetBSD library function (Thomas Munro) @@ -4230,8 +6903,8 @@ Branch: REL9_2_STABLE [b5ebc513d] 2016-04-21 16:59:13 -0400 Branch: REL9_1_STABLE [9028f404e] 2016-04-21 16:59:17 -0400 --> - Fix reporting of errors from bind() - and listen() system calls on Windows (Tom Lane) + Fix reporting of errors from bind() + and listen() system calls on Windows (Tom Lane) @@ -4286,7 +6959,7 @@ Branch: REL9_4_STABLE [c238a4101] 2016-04-22 05:20:07 -0400 Branch: REL9_3_STABLE [ab5c6d01f] 2016-04-22 05:20:18 -0400 --> - Fix putenv() to work properly with Visual Studio 2013 + Fix putenv() to work properly with Visual Studio 2013 (Michael Paquier) @@ -4302,12 +6975,12 @@ Branch: REL9_2_STABLE [b4b06931e] 2016-03-29 11:54:58 -0400 Branch: REL9_1_STABLE [6cd30292b] 2016-03-29 11:54:58 -0400 --> - Avoid possibly-unsafe use of Windows' FormatMessage() + Avoid possibly-unsafe use of Windows' FormatMessage() function (Christian Ullrich) - Use the FORMAT_MESSAGE_IGNORE_INSERTS flag where + Use the FORMAT_MESSAGE_IGNORE_INSERTS flag where appropriate. No live bug is known to exist here, but it seems like a good idea to be careful. @@ -4324,9 +6997,9 @@ Branch: REL9_2_STABLE [29d154e36] 2016-05-05 20:09:27 -0400 Branch: REL9_1_STABLE [bfc39da64] 2016-05-05 20:09:32 -0400 --> - Update time zone data files to tzdata release 2016d + Update time zone data files to tzdata release 2016d for DST law changes in Russia and Venezuela. There are new zone - names Europe/Kirov and Asia/Tomsk to reflect + names Europe/Kirov and Asia/Tomsk to reflect the fact that these regions now have different time zone histories from adjacent regions. @@ -4348,7 +7021,7 @@ Branch: REL9_1_STABLE [bfc39da64] 2016-05-05 20:09:32 -0400 This release contains a variety of fixes from 9.5.1. For information about new features in the 9.5 major release, see - . + . @@ -4359,7 +7032,7 @@ Branch: REL9_1_STABLE [bfc39da64] 2016-05-05 20:09:32 -0400 - However, you may need to REINDEX some indexes after applying + However, you may need to REINDEX some indexes after applying the update, as per the first changelog entry below. @@ -4377,39 +7050,39 @@ Branch: REL9_5_STABLE [8aa6e9780] 2016-03-23 16:04:35 -0400 - Disable abbreviated keys for string sorting in non-C + Disable abbreviated keys for string sorting in non-C locales (Robert Haas) - PostgreSQL 9.5 introduced logic for speeding up + PostgreSQL 9.5 introduced logic for speeding up comparisons of string data types by using the standard C library - function strxfrm() as a substitute - for strcoll(). It now emerges that most versions of + function strxfrm() as a substitute + for strcoll(). It now emerges that most versions of glibc (Linux's implementation of the C library) have buggy - implementations of strxfrm() that, in some locales, + implementations of strxfrm() that, in some locales, can produce string comparison results that do not - match strcoll(). Until this problem can be better - characterized, disable the optimization in all non-C - locales. (C locale is safe since it uses - neither strcoll() nor strxfrm().) + match strcoll(). Until this problem can be better + characterized, disable the optimization in all non-C + locales. (C locale is safe since it uses + neither strcoll() nor strxfrm().) Unfortunately, this problem affects not only sorting but also entry ordering in B-tree indexes, which means that B-tree indexes - on text, varchar, or char columns may now + on text, varchar, or char columns may now be corrupt if they sort according to an affected locale and were - built or modified under PostgreSQL 9.5.0 or 9.5.1. - Users should REINDEX indexes that might be affected. + built or modified under PostgreSQL 9.5.0 or 9.5.1. + Users should REINDEX indexes that might be affected. It is not possible at this time to give an exhaustive list of - known-affected locales. C locale is known safe, and + known-affected locales. C locale is known safe, and there is no evidence of trouble in English-based locales such - as en_US, but some other popular locales such - as de_DE are affected in most glibc versions. + as en_US, but some other popular locales such + as de_DE are affected in most glibc versions. @@ -4442,14 +7115,14 @@ Branch: REL9_5_STABLE [bf78a6f10] 2016-03-28 10:57:46 -0300 Add must-be-superuser checks to some - new contrib/pageinspect functions (Andreas Seltenreich) + new contrib/pageinspect functions (Andreas Seltenreich) - Most functions in the pageinspect extension that - inspect bytea values disallow calls by non-superusers, - but brin_page_type() and brin_metapage_info() - failed to do so. Passing contrived bytea values to them might + Most functions in the pageinspect extension that + inspect bytea values disallow calls by non-superusers, + but brin_page_type() and brin_metapage_info() + failed to do so. Passing contrived bytea values to them might crash the server or disclose a few bytes of server memory. Add the missing permissions checks to prevent misuse. (CVE-2016-3065) @@ -4464,15 +7137,15 @@ Branch: REL9_5_STABLE [bf7ced5e2] 2016-03-03 09:50:38 +0000 - Fix incorrect handling of indexed ROW() comparisons + Fix incorrect handling of indexed ROW() comparisons (Simon Riggs) Flaws in a minor optimization introduced in 9.5 caused incorrect - results if the ROW() comparison matches the index ordering + results if the ROW() comparison matches the index ordering partially but not exactly (for example, differing column order, or the - index contains both ASC and DESC columns). + index contains both ASC and DESC columns). Pending a better solution, the optimization has been removed. @@ -4490,15 +7163,15 @@ Branch: REL9_1_STABLE [d485d9581] 2016-03-09 14:51:02 -0500 Fix incorrect handling of NULL index entries in - indexed ROW() comparisons (Tom Lane) + indexed ROW() comparisons (Tom Lane) An index search using a row comparison such as ROW(a, b) > - ROW('x', 'y') would stop upon reaching a NULL entry in - the b column, ignoring the fact that there might be - non-NULL b values associated with later values - of a. + ROW('x', 'y') would stop upon reaching a NULL entry in + the b column, ignoring the fact that there might be + non-NULL b values associated with later values + of a. @@ -4521,7 +7194,7 @@ Branch: REL9_1_STABLE [d0e47bcd4] 2016-03-09 18:53:54 -0800 Avoid unlikely data-loss scenarios due to renaming files without - adequate fsync() calls before and after (Michael Paquier, + adequate fsync() calls before and after (Michael Paquier, Tomas Vondra, Andres Freund) @@ -4535,14 +7208,14 @@ Branch: REL9_5_STABLE [d8d5a00b1] 2016-03-22 17:56:06 -0400 Fix incorrect behavior when rechecking a just-modified row in a query - that does SELECT FOR UPDATE/SHARE and contains some + that does SELECT FOR UPDATE/SHARE and contains some relations that need not be locked (Tom Lane) Rows from non-locked relations were incorrectly treated as containing all NULLs during the recheck, which could result in incorrectly - deciding that the updated row no longer passes the WHERE + deciding that the updated row no longer passes the WHERE condition, or in incorrectly outputting NULLs. @@ -4556,7 +7229,7 @@ Branch: REL9_4_STABLE [597e41e45] 2016-03-02 23:31:39 -0500 - Fix bug in json_to_record() when a field of its input + Fix bug in json_to_record() when a field of its input object contains a sub-object with a field name matching one of the requested output column names (Tom Lane) @@ -4571,7 +7244,7 @@ Branch: REL9_5_STABLE [68d68ff83] 2016-02-21 10:40:39 -0500 Fix nonsense result from two-argument form - of jsonb_object() when called with empty arrays + of jsonb_object() when called with empty arrays (Michael Paquier, Andrew Dunstan) @@ -4584,7 +7257,7 @@ Branch: REL9_5_STABLE [5f95521b3] 2016-03-23 10:43:24 -0400 - Fix misbehavior in jsonb_set() when converting a path + Fix misbehavior in jsonb_set() when converting a path array element into an integer for use as an array subscript (Michael Paquier) @@ -4600,7 +7273,7 @@ Branch: REL9_4_STABLE [17a250b18] 2016-03-17 15:50:33 -0400 Fix misformatting of negative time zone offsets - by to_char()'s OF format code + by to_char()'s OF format code (Thomas Munro, Tom Lane) @@ -4614,7 +7287,7 @@ Branch: REL9_5_STABLE [3f14d8d59] 2016-03-15 18:04:48 -0400 Fix possible incorrect logging of waits done by - INSERT ... ON CONFLICT (Peter Geoghegan) + INSERT ... ON CONFLICT (Peter Geoghegan) @@ -4632,13 +7305,13 @@ Branch: REL9_4_STABLE [a9613ee69] 2016-03-06 02:43:26 +0900 - Ignore parameter until + Ignore parameter until recovery has reached a consistent state (Michael Paquier) Previously, standby servers would delay application of WAL records in - response to recovery_min_apply_delay even while replaying + response to recovery_min_apply_delay even while replaying the initial portion of WAL needed to make their database state valid. Since the standby is useless until it's reached a consistent database state, this was deemed unhelpful. @@ -4657,7 +7330,7 @@ Branch: REL9_1_STABLE [ca32f125b] 2016-02-19 08:35:02 +0000 - Correctly handle cases where pg_subtrans is close to XID + Correctly handle cases where pg_subtrans is close to XID wraparound during server startup (Jeff Janes) @@ -4693,10 +7366,10 @@ Branch: REL9_5_STABLE [f8a75881f] 2016-03-02 23:43:42 -0800 Trouble cases included tuples larger than one page when replica - identity is FULL, UPDATEs that change a + identity is FULL, UPDATEs that change a primary key within a transaction large enough to be spooled to disk, incorrect reports of subxact logged without previous toplevel - record, and incorrect reporting of a transaction's commit time. + record, and incorrect reporting of a transaction's commit time. @@ -4710,7 +7383,7 @@ Branch: REL9_4_STABLE [9b69d5c1d] 2016-02-29 12:34:33 +0000 Fix planner error with nested security barrier views when the outer - view has a WHERE clause containing a correlated subquery + view has a WHERE clause containing a correlated subquery (Dean Rasheed) @@ -4739,7 +7412,7 @@ Branch: REL9_1_STABLE [7d6c58aa1] 2016-02-28 23:40:35 -0500 - Fix corner-case crash due to trying to free localeconv() + Fix corner-case crash due to trying to free localeconv() output strings more than once (Tom Lane) @@ -4756,14 +7429,14 @@ Branch: REL9_1_STABLE [fe747b741] 2016-03-06 19:21:03 -0500 - Fix parsing of affix files for ispell dictionaries + Fix parsing of affix files for ispell dictionaries (Tom Lane) The code could go wrong if the affix file contained any characters whose byte length changes during case-folding, for - example I in Turkish UTF8 locales. + example I in Turkish UTF8 locales. @@ -4779,7 +7452,7 @@ Branch: REL9_1_STABLE [e56acbe2a] 2016-02-10 19:30:12 -0500 - Avoid use of sscanf() to parse ispell + Avoid use of sscanf() to parse ispell dictionary files (Artur Zakirov) @@ -4843,7 +7516,7 @@ Branch: REL9_1_STABLE [b4895bf79] 2016-03-04 11:57:40 -0500 - Fix psql's tab completion logic to handle multibyte + Fix psql's tab completion logic to handle multibyte characters properly (Kyotaro Horiguchi, Robert Haas) @@ -4859,12 +7532,12 @@ Branch: REL9_1_STABLE [2d61d88d8] 2016-03-14 11:31:49 -0400 - Fix psql's tab completion for - SECURITY LABEL (Tom Lane) + Fix psql's tab completion for + SECURITY LABEL (Tom Lane) - Pressing TAB after SECURITY LABEL might cause a crash + Pressing TAB after SECURITY LABEL might cause a crash or offering of inappropriate keywords. @@ -4881,8 +7554,8 @@ Branch: REL9_1_STABLE [f97664cf5] 2016-02-10 20:34:48 -0500 - Make pg_ctl accept a wait timeout from the - PGCTLTIMEOUT environment variable, if none is specified on + Make pg_ctl accept a wait timeout from the + PGCTLTIMEOUT environment variable, if none is specified on the command line (Noah Misch) @@ -4906,12 +7579,12 @@ Branch: REL9_1_STABLE [5a39c7395] 2016-03-07 10:41:11 -0500 Fix incorrect test for Windows service status - in pg_ctl (Manuel Mathar) + in pg_ctl (Manuel Mathar) The previous set of minor releases attempted to - fix pg_ctl to properly determine whether to send log + fix pg_ctl to properly determine whether to send log messages to Window's Event Log, but got the test backwards. @@ -4928,8 +7601,8 @@ Branch: REL9_1_STABLE [1965a8ce1] 2016-03-16 23:18:08 -0400 - Fix pgbench to correctly handle the combination - of -C and -M prepared options (Tom Lane) + Fix pgbench to correctly handle the combination + of -C and -M prepared options (Tom Lane) @@ -4943,7 +7616,7 @@ Branch: REL9_3_STABLE [bf26c4f44] 2016-02-18 18:32:26 -0500 - In pg_upgrade, skip creating a deletion script when + In pg_upgrade, skip creating a deletion script when the new data directory is inside the old data directory (Bruce Momjian) @@ -5001,7 +7674,7 @@ Branch: REL9_1_STABLE [0f359c7de] 2016-02-18 15:40:36 -0500 Fix multiple mistakes in the statistics returned - by contrib/pgstattuple's pgstatindex() + by contrib/pgstattuple's pgstatindex() function (Tom Lane) @@ -5018,7 +7691,7 @@ Branch: REL9_1_STABLE [2aa9fd963] 2016-03-19 18:59:41 -0400 - Remove dependency on psed in MSVC builds, since it's no + Remove dependency on psed in MSVC builds, since it's no longer provided by core Perl (Michael Paquier, Andrew Dunstan) @@ -5035,7 +7708,7 @@ Branch: REL9_1_STABLE [e5fd35cc5] 2016-03-25 19:03:54 -0400 - Update time zone data files to tzdata release 2016c + Update time zone data files to tzdata release 2016c for DST law changes in Azerbaijan, Chile, Haiti, Palestine, and Russia (Altai, Astrakhan, Kirov, Sakhalin, Ulyanovsk regions), plus historical corrections for Lithuania, Moldova, and Russia @@ -5059,7 +7732,7 @@ Branch: REL9_1_STABLE [e5fd35cc5] 2016-03-25 19:03:54 -0400 This release contains a variety of fixes from 9.5.0. For information about new features in the 9.5 major release, see - . + . @@ -5119,7 +7792,7 @@ Branch: REL9_5_STABLE [87dbc72a7] 2016-02-08 11:03:37 +0100 - Avoid pushdown of HAVING clauses when grouping sets are + Avoid pushdown of HAVING clauses when grouping sets are used (Andrew Gierth) @@ -5132,7 +7805,7 @@ Branch: REL9_5_STABLE [82406d6ff] 2016-02-07 14:57:24 -0500 - Fix deparsing of ON CONFLICT arbiter WHERE + Fix deparsing of ON CONFLICT arbiter WHERE clauses (Peter Geoghegan) @@ -5149,14 +7822,14 @@ Branch: REL9_1_STABLE [b043df093] 2016-01-26 15:38:33 -0500 - Make %h and %r escapes - in log_line_prefix work for messages emitted due - to log_connections (Tom Lane) + Make %h and %r escapes + in log_line_prefix work for messages emitted due + to log_connections (Tom Lane) - Previously, %h/%r started to work just after a - new session had emitted the connection received log message; + Previously, %h/%r started to work just after a + new session had emitted the connection received log message; now they work for that message too. @@ -5190,8 +7863,8 @@ Branch: REL9_1_STABLE [ed5f57218] 2016-01-29 10:28:03 +0100 - Fix psql's \det command to interpret its - pattern argument the same way as other \d commands with + Fix psql's \det command to interpret its + pattern argument the same way as other \d commands with potentially schema-qualified patterns do (Reece Hart) @@ -5208,7 +7881,7 @@ Branch: REL9_1_STABLE [b96f6f444] 2016-01-07 11:59:08 -0300 - In pg_ctl on Windows, check service status to decide + In pg_ctl on Windows, check service status to decide where to send output, rather than checking if standard output is a terminal (Michael Paquier) @@ -5226,7 +7899,7 @@ Branch: REL9_1_STABLE [5108013db] 2016-01-13 18:55:27 -0500 - Fix assorted corner-case bugs in pg_dump's processing + Fix assorted corner-case bugs in pg_dump's processing of extension member objects (Tom Lane) @@ -5240,7 +7913,7 @@ Branch: REL9_5_STABLE [1e910cf5b] 2016-01-22 20:04:35 -0300 Fix improper quoting of domain constraint names - in pg_dump (Elvis Pranskevichus) + in pg_dump (Elvis Pranskevichus) @@ -5256,9 +7929,9 @@ Branch: REL9_1_STABLE [9c704632c] 2016-02-04 00:26:10 -0500 - Make pg_dump mark a view's triggers as needing to be + Make pg_dump mark a view's triggers as needing to be processed after its rule, to prevent possible failure during - parallel pg_restore (Tom Lane) + parallel pg_restore (Tom Lane) @@ -5274,7 +7947,7 @@ Branch: REL9_1_STABLE [4c8b07d3c] 2016-02-03 09:25:34 -0500 - Install guards in pgbench against corner-case overflow + Install guards in pgbench against corner-case overflow conditions during evaluation of script-specified division or modulo operators (Fabien Coelho, Michael Paquier) @@ -5288,7 +7961,7 @@ Branch: REL9_5_STABLE [7ef311eb4] 2016-01-05 17:25:12 -0300 - Suppress useless warning message when pg_receivexlog + Suppress useless warning message when pg_receivexlog connects to a pre-9.4 server (Marco Nenciarini) @@ -5306,15 +7979,15 @@ Branch: REL9_5_STABLE [5ef26b8de] 2016-01-11 20:06:47 -0500 - Avoid dump/reload problems when using both plpython2 - and plpython3 (Tom Lane) + Avoid dump/reload problems when using both plpython2 + and plpython3 (Tom Lane) - In principle, both versions of PL/Python can be used in + In principle, both versions of PL/Python can be used in the same database, though not in the same session (because the two - versions of libpython cannot safely be used concurrently). - However, pg_restore and pg_upgrade both + versions of libpython cannot safely be used concurrently). + However, pg_restore and pg_upgrade both do things that can fall foul of the same-session restriction. Work around that by changing the timing of the check. @@ -5331,7 +8004,7 @@ Branch: REL9_5_STABLE [a66c1fcdd] 2016-01-08 11:39:28 -0500 - Fix PL/Python regression tests to pass with Python 3.5 + Fix PL/Python regression tests to pass with Python 3.5 (Peter Eisentraut) @@ -5348,16 +8021,16 @@ Branch: REL9_1_STABLE [b1f591c50] 2016-02-05 20:23:19 -0500 - Prevent certain PL/Java parameters from being set by + Prevent certain PL/Java parameters from being set by non-superusers (Noah Misch) - This change mitigates a PL/Java security bug - (CVE-2016-0766), which was fixed in PL/Java by marking + This change mitigates a PL/Java security bug + (CVE-2016-0766), which was fixed in PL/Java by marking these parameters as superuser-only. To fix the security hazard for - sites that update PostgreSQL more frequently - than PL/Java, make the core code aware of them also. + sites that update PostgreSQL more frequently + than PL/Java, make the core code aware of them also. @@ -5374,14 +8047,14 @@ Branch: REL9_4_STABLE [33b26426e] 2016-02-08 11:10:14 +0100 - Fix ecpg-supplied header files to not contain comments + Fix ecpg-supplied header files to not contain comments continued from a preprocessor directive line onto the next line (Michael Meskes) - Such a comment is rejected by ecpg. It's not yet clear - whether ecpg itself should be changed. + Such a comment is rejected by ecpg. It's not yet clear + whether ecpg itself should be changed. @@ -5395,8 +8068,8 @@ Branch: REL9_3_STABLE [1f2b195eb] 2016-02-03 01:39:08 -0500 - Fix hstore_to_json_loose()'s test for whether - an hstore value can be converted to a JSON number (Tom Lane) + Fix hstore_to_json_loose()'s test for whether + an hstore value can be converted to a JSON number (Tom Lane) @@ -5417,8 +8090,8 @@ Branch: REL9_4_STABLE [2099b911d] 2016-02-04 22:27:47 -0500 - In contrib/postgres_fdw, fix bugs triggered by use - of tableoid in data-modifying commands (Etsuro Fujita, + In contrib/postgres_fdw, fix bugs triggered by use + of tableoid in data-modifying commands (Etsuro Fujita, Robert Haas) @@ -5431,7 +8104,7 @@ Branch: REL9_5_STABLE [47acf3add] 2016-01-22 11:53:06 -0500 - Fix ill-advised restriction of NAMEDATALEN to be less + Fix ill-advised restriction of NAMEDATALEN to be less than 256 (Robert Haas, Tom Lane) @@ -5468,7 +8141,7 @@ Branch: REL9_1_STABLE [b1bc38144] 2016-01-19 23:30:28 -0500 - Ensure that dynloader.h is included in the installed + Ensure that dynloader.h is included in the installed header files in MSVC builds (Bruce Momjian, Michael Paquier) @@ -5485,7 +8158,7 @@ Branch: REL9_1_STABLE [6887d72d0] 2016-02-05 10:59:39 -0500 - Update time zone data files to tzdata release 2016a for + Update time zone data files to tzdata release 2016a for DST law changes in Cayman Islands, Metlakatla, and Trans-Baikal Territory (Zabaykalsky Krai), plus historical corrections for Pakistan. @@ -5508,7 +8181,7 @@ Branch: REL9_1_STABLE [6887d72d0] 2016-02-05 10:59:39 -0500 Overview - Major enhancements in PostgreSQL 9.5 include: + Major enhancements in PostgreSQL 9.5 include: @@ -5517,31 +8190,31 @@ Branch: REL9_1_STABLE [6887d72d0] 2016-02-05 10:59:39 -0500 - Allow INSERTs + Allow INSERTs that would generate constraint conflicts to be turned into - UPDATEs or ignored + UPDATEs or ignored - Add GROUP BY analysis features GROUPING SETS, - CUBE and - ROLLUP + Add GROUP BY analysis features GROUPING SETS, + CUBE and + ROLLUP - Add row-level security control + Add row-level security control Create mechanisms for tracking - the progress of replication, + the progress of replication, including methods for identifying the origin of individual changes during logical replication @@ -5549,7 +8222,7 @@ Branch: REL9_1_STABLE [6887d72d0] 2016-02-05 10:59:39 -0500 - Add Block Range Indexes (BRIN) + Add Block Range Indexes (BRIN) @@ -5578,8 +8251,8 @@ Branch: REL9_1_STABLE [6887d72d0] 2016-02-05 10:59:39 -0500 Migration to Version 9.5 - A dump/restore using , or use - of , is required for those wishing to migrate + A dump/restore using , or use + of , is required for those wishing to migrate data from any previous release. @@ -5595,24 +8268,24 @@ Branch: REL9_1_STABLE [6887d72d0] 2016-02-05 10:59:39 -0500 2015-03-11 [c6b3c93] Tom Lane: Make operator precedence follow the SQL standar.. --> - Adjust operator precedence - to match the SQL standard (Tom Lane) + Adjust operator precedence + to match the SQL standard (Tom Lane) The precedence of <=, >= and <> has been reduced to match that of <, > - and =. The precedence of IS tests - (e.g., x IS NULL) has been reduced to be + and =. The precedence of IS tests + (e.g., x IS NULL) has been reduced to be just below these six comparison operators. - Also, multi-keyword operators beginning with NOT now have + Also, multi-keyword operators beginning with NOT now have the precedence of their base operator (for example, NOT - BETWEEN now has the same precedence as BETWEEN) whereas - before they had inconsistent precedence, behaving like NOT + BETWEEN now has the same precedence as BETWEEN) whereas + before they had inconsistent precedence, behaving like NOT with respect to their left operand but like their base operator with respect to their right operand. The new configuration - parameter can be + parameter can be enabled to warn about queries in which these precedence changes result in different parsing choices. @@ -5623,8 +8296,8 @@ Branch: REL9_1_STABLE [6887d72d0] 2016-02-05 10:59:39 -0500 2015-03-31 [0badb06] Bruce ..: pg_ctl: change default shutdown mode from 'sma.. --> - Change 's default shutdown mode from - smart to fast (Bruce Momjian) + Change 's default shutdown mode from + smart to fast (Bruce Momjian) @@ -5639,18 +8312,18 @@ Branch: REL9_1_STABLE [6887d72d0] 2016-02-05 10:59:39 -0500 --> Use assignment cast behavior for data type conversions - in PL/pgSQL assignments, rather than converting to and + in PL/pgSQL assignments, rather than converting to and from text (Tom Lane) This change causes conversions of Booleans to strings to - produce true or false, not t - or f. Other type conversions may succeed in more cases - than before; for example, assigning a numeric value 3.9 to + produce true or false, not t + or f. Other type conversions may succeed in more cases + than before; for example, assigning a numeric value 3.9 to an integer variable will now assign 4 rather than failing. If no assignment-grade cast is defined for the particular source and - destination types, PL/pgSQL will fall back to its old + destination types, PL/pgSQL will fall back to its old I/O conversion behavior. @@ -5661,13 +8334,13 @@ Branch: REL9_1_STABLE [6887d72d0] 2016-02-05 10:59:39 -0500 --> Allow characters in server - command-line options to be escaped with a backslash (Andres Freund) + command-line options to be escaped with a backslash (Andres Freund) Formerly, spaces in the options string always separated options, so there was no way to include a space in an option value. Including - a backslash in an option value now requires writing \\. + a backslash in an option value now requires writing \\. @@ -5677,9 +8350,9 @@ Branch: REL9_1_STABLE [6887d72d0] 2016-02-05 10:59:39 -0500 --> Change the default value of the GSSAPI include_realm parameter to 1, so - that by default the realm is not removed from a GSS - or SSPI principal name (Stephen Frost) + linkend="gssapi-auth">include_realm parameter to 1, so + that by default the realm is not removed from a GSS + or SSPI principal name (Stephen Frost) @@ -5690,9 +8363,9 @@ Branch: REL9_1_STABLE [6887d72d0] 2016-02-05 10:59:39 -0500 2015-06-29 [d661532] Heikki..: Also trigger restartpoints based on max_wal_siz.. --> - Replace configuration parameter checkpoint_segments - with - and (Heikki Linnakangas) + Replace configuration parameter checkpoint_segments + with + and (Heikki Linnakangas) @@ -5712,13 +8385,13 @@ max_wal_size = (3 * checkpoint_segments) * 16MB 2014-06-18 [df8b7bc] Tom Lane: Improve our mechanism for controlling the Linux.. --> - Control the Linux OOM killer via new environment + Control the Linux OOM killer via new environment variables PG_OOM_ADJUST_FILE + linkend="linux-memory-overcommit">PG_OOM_ADJUST_FILE and PG_OOM_ADJUST_VALUE, - instead of compile-time options LINUX_OOM_SCORE_ADJ and - LINUX_OOM_ADJ + linkend="linux-memory-overcommit">PG_OOM_ADJUST_VALUE, + instead of compile-time options LINUX_OOM_SCORE_ADJ and + LINUX_OOM_ADJ (Gurjeet Singh) @@ -5730,7 +8403,7 @@ max_wal_size = (3 * checkpoint_segments) * 16MB --> Decommission server configuration - parameter ssl_renegotiation_limit, which was deprecated + parameter ssl_renegotiation_limit, which was deprecated in earlier releases (Andres Freund) @@ -5738,8 +8411,8 @@ max_wal_size = (3 * checkpoint_segments) * 16MB While SSL renegotiation is a good idea in theory, it has caused enough bugs to be considered a net negative in practice, and it is due to be removed from future versions of the relevant standards. We have - therefore removed support for it from PostgreSQL. - The ssl_renegotiation_limit parameter still exists, but + therefore removed support for it from PostgreSQL. + The ssl_renegotiation_limit parameter still exists, but cannot be set to anything but zero (disabled). It's not documented anymore, either. @@ -5750,7 +8423,7 @@ max_wal_size = (3 * checkpoint_segments) * 16MB 2014-11-05 [525a489] Tom Lane: Remove the last vestige of server-side autocomm.. --> - Remove server configuration parameter autocommit, which + Remove server configuration parameter autocommit, which was already deprecated and non-operational (Tom Lane) @@ -5760,8 +8433,8 @@ max_wal_size = (3 * checkpoint_segments) * 16MB 2015-03-06 [bb8582a] Peter ..: Remove rolcatupdate --> - Remove the pg_authid - catalog's rolcatupdate field, as it had no usefulness + Remove the pg_authid + catalog's rolcatupdate field, as it had no usefulness (Adam Brightwell) @@ -5772,8 +8445,8 @@ max_wal_size = (3 * checkpoint_segments) * 16MB --> The pg_stat_replication - system view's sent field is now NULL, not zero, when + linkend="pg-stat-replication-view">pg_stat_replication + system view's sent field is now NULL, not zero, when it has no valid value (Magnus Hagander) @@ -5783,13 +8456,13 @@ max_wal_size = (3 * checkpoint_segments) * 16MB 2015-07-17 [89ddd29] Andrew..: Support JSON negative array subscripts everywh.. --> - Allow json and jsonb array extraction operators to + Allow json and jsonb array extraction operators to accept negative subscripts, which count from the end of JSON arrays (Peter Geoghegan, Andrew Dunstan) - Previously, these operators returned NULL for negative + Previously, these operators returned NULL for negative subscripts. @@ -5822,12 +8495,12 @@ max_wal_size = (3 * checkpoint_segments) * 16MB 2015-05-15 [b0b7be6] Alvaro..: Add BRIN infrastructure for "inclusion" opclasses --> - Add Block Range Indexes (BRIN) + Add Block Range Indexes (BRIN) (Álvaro Herrera) - BRIN indexes store only summary data (such as minimum + BRIN indexes store only summary data (such as minimum and maximum values) for ranges of heap blocks. They are therefore very compact and cheap to update; but if the data is naturally clustered, they can still provide substantial speedup of searches. @@ -5841,7 +8514,7 @@ max_wal_size = (3 * checkpoint_segments) * 16MB Allow queries to perform accurate distance filtering of bounding-box-indexed objects (polygons, circles) using GiST indexes (Alexander Korotkov, Heikki + linkend="gist">GiST indexes (Alexander Korotkov, Heikki Linnakangas) @@ -5861,7 +8534,7 @@ max_wal_size = (3 * checkpoint_segments) * 16MB 2015-03-30 [0633a60] Heikki..: Add index-only scan support to range type GiST .. --> - Allow GiST indexes to perform index-only + Allow GiST indexes to perform index-only scans (Anastasia Lubennikova, Heikki Linnakangas, Andreas Karlsson) @@ -5871,15 +8544,15 @@ max_wal_size = (3 * checkpoint_segments) * 16MB Add GUC and storage parameter to set the maximum size of GIN pending list. --> - Add configuration parameter - to control the size of GIN pending lists (Fujii Masao) + Add configuration parameter + to control the size of GIN pending lists (Fujii Masao) This value can also be set on a per-index basis as an index storage parameter. Previously the pending-list size was controlled - by , which was awkward because - appropriate values for work_mem are often much too large + by , which was awkward because + appropriate values for work_mem are often much too large for this purpose. @@ -5890,7 +8563,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Issue a warning during the creation of hash indexes because they are not + linkend="indexes-types">hash indexes because they are not crash-safe (Bruce Momjian) @@ -5911,8 +8584,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-13 [78efd5c] Robert..: Extend abbreviated key infrastructure to datum .. --> - Improve the speed of sorting of varchar, text, - and numeric fields via abbreviated keys + Improve the speed of sorting of varchar, text, + and numeric fields via abbreviated keys (Peter Geoghegan, Andrew Gierth, Robert Haas) @@ -5924,8 +8597,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. Extend the infrastructure that allows sorting to be performed by inlined, non-SQL-callable comparison functions to - cover CREATE INDEX, REINDEX, and - CLUSTER (Peter Geoghegan) + cover CREATE INDEX, REINDEX, and + CLUSTER (Peter Geoghegan) @@ -5986,7 +8659,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. This particularly addresses scalability problems when running on - systems with multiple CPU sockets. + systems with multiple CPU sockets. @@ -6006,7 +8679,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Allow pushdown of query restrictions into subqueries with window functions, where appropriate + linkend="tutorial-window">window functions, where appropriate (David Rowley) @@ -6029,7 +8702,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. Teach the planner to use statistics obtained from an expression index on a boolean-returning function, when a matching function call - appears in WHERE (Tom Lane) + appears in WHERE (Tom Lane) @@ -6038,7 +8711,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-09-23 [cfb2024] Tom Lane: Make ANALYZE compute basic statistics even for.. --> - Make ANALYZE compute basic statistics (null fraction and + Make ANALYZE compute basic statistics (null fraction and average column width) even for columns whose data type lacks an equality function (Oleksandr Shulgin) @@ -6052,7 +8725,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> - Speed up CRC (cyclic redundancy check) computations + Speed up CRC (cyclic redundancy check) computations and switch to CRC-32C (Abhijit Menon-Sen, Heikki Linnakangas) @@ -6072,7 +8745,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-07-01 [9f03ca9] Robert..: Avoid copying index tuples when building an ind.. --> - Speed up CREATE INDEX by avoiding unnecessary memory + Speed up CREATE INDEX by avoiding unnecessary memory copies (Robert Haas) @@ -6106,7 +8779,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add per-table autovacuum logging control via new - log_autovacuum_min_duration storage parameter + log_autovacuum_min_duration storage parameter (Michael Paquier) @@ -6116,13 +8789,13 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-06-29 [51adcaa] Andres..: Add cluster_name GUC which is included in proce.. --> - Add new configuration parameter + Add new configuration parameter (Thomas Munro) This string, typically set in postgresql.conf, + linkend="config-setting-configuration-file">postgresql.conf, allows clients to identify the cluster. This name also appears in the process title of all server processes, allowing for easier identification of processes belonging to the same cluster. @@ -6135,7 +8808,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Prevent non-superusers from changing on connection startup (Fujii Masao) + linkend="guc-log-disconnections"/> on connection startup (Fujii Masao) @@ -6144,7 +8817,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. - <acronym>SSL</> + <acronym>SSL</acronym> @@ -6154,13 +8827,13 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Check Subject Alternative - Names in SSL server certificates, if present + Names in SSL server certificates, if present (Alexey Klyukin) When they are present, this replaces checks against the certificate's - Common Name. + Common Name. @@ -6170,8 +8843,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add system view pg_stat_ssl to report - SSL connection information (Magnus Hagander) + linkend="pg-stat-ssl-view">pg_stat_ssl to report + SSL connection information (Magnus Hagander) @@ -6180,22 +8853,22 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-02-03 [91fa7b4] Heikki..: Add API functions to libpq to interrogate SSL .. --> - Add libpq functions to return SSL + Add libpq functions to return SSL information in an implementation-independent way (Heikki Linnakangas) - While PQgetssl() can - still be used to call OpenSSL functions, it is now + While PQgetssl() can + still be used to call OpenSSL functions, it is now considered deprecated because future versions - of libpq might support other SSL + of libpq might support other SSL implementations. When possible, use the new functions PQsslAttribute(), PQsslAttributeNames(), - and PQsslInUse() - to obtain SSL information in - an SSL-implementation-independent way. + linkend="libpq-pqsslattribute">PQsslAttribute(), PQsslAttributeNames(), + and PQsslInUse() + to obtain SSL information in + an SSL-implementation-independent way. @@ -6204,7 +8877,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-04-09 [8a0d34e4] Peter ..: libpq: Don't overwrite existing OpenSSL thread.. --> - Make libpq honor any OpenSSL + Make libpq honor any OpenSSL thread callbacks (Jan Urbanski) @@ -6229,20 +8902,20 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-06-29 [d661532] Heikki..: Also trigger restartpoints based on max_wal_siz.. --> - Replace configuration parameter checkpoint_segments - with - and (Heikki Linnakangas) + Replace configuration parameter checkpoint_segments + with + and (Heikki Linnakangas) - This change allows the allocation of a large number of WAL + This change allows the allocation of a large number of WAL files without keeping them after they are no longer needed. - Therefore the default for max_wal_size has been set - to 1GB, much larger than the old default - for checkpoint_segments. + Therefore the default for max_wal_size has been set + to 1GB, much larger than the old default + for checkpoint_segments. Also note that standby servers perform restartpoints to try to limit - their WAL space consumption to max_wal_size; previously - they did not pay any attention to checkpoint_segments. + their WAL space consumption to max_wal_size; previously + they did not pay any attention to checkpoint_segments. @@ -6251,18 +8924,18 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-06-18 [df8b7bc] Tom Lane: Improve our mechanism for controlling the Linux.. --> - Control the Linux OOM killer via new environment + Control the Linux OOM killer via new environment variables PG_OOM_ADJUST_FILE + linkend="linux-memory-overcommit">PG_OOM_ADJUST_FILE and PG_OOM_ADJUST_VALUE + linkend="linux-memory-overcommit">PG_OOM_ADJUST_VALUE (Gurjeet Singh) - The previous OOM control infrastructure involved - compile-time options LINUX_OOM_SCORE_ADJ and - LINUX_OOM_ADJ, which are no longer supported. + The previous OOM control infrastructure involved + compile-time options LINUX_OOM_SCORE_ADJ and + LINUX_OOM_ADJ, which are no longer supported. The new behavior is available in all builds. @@ -6274,14 +8947,14 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. Allow recording of transaction commit time stamps when configuration parameter + linkend="guc-track-commit-timestamp"/> is enabled (Álvaro Herrera, Petr Jelínek) Time stamp information can be accessed using functions pg_xact_commit_timestamp() - and pg_last_committed_xact(). + linkend="functions-commit-timestamp">pg_xact_commit_timestamp() + and pg_last_committed_xact(). @@ -6290,8 +8963,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-12-22 [584e35d] Peter ..: Change local_preload_libraries to PGC_USERSET --> - Allow to be set - by ALTER ROLE SET (Peter Eisentraut, Kyotaro Horiguchi) + Allow to be set + by ALTER ROLE SET (Peter Eisentraut, Kyotaro Horiguchi) @@ -6300,7 +8973,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-04-03 [a75fb9b] Alvaro..: Have autovacuum workers listen to SIGHUP, too --> - Allow autovacuum workers + Allow autovacuum workers to respond to configuration parameter changes during a run (Michael Paquier) @@ -6311,7 +8984,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-06-20 [3bdcf6a] Andres..: Don't allow to disable backend assertions via t.. --> - Make configuration parameter + Make configuration parameter read-only (Andres Freund) @@ -6319,7 +8992,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. This means that assertions can no longer be turned off if they were enabled at compile time, allowing for more efficient code optimization. This change also removes the postgres option. @@ -6328,7 +9001,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-10-18 [7feaccc] Peter ..: Allow setting effective_io_concurrency even on.. --> - Allow setting on + Allow setting on systems where it has no effect (Peter Eisentraut) @@ -6340,7 +9013,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add system view pg_file_settings + linkend="view-pg-file-settings">pg_file_settings to show the contents of the server's configuration files (Sawada Masahiko) @@ -6351,8 +9024,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-14 [a486e35] Peter ..: Add pg_settings.pending_restart column --> - Add pending_restart to the system view pg_settings to + Add pending_restart to the system view pg_settings to indicate a change has been made but will not take effect until a database restart (Peter Eisentraut) @@ -6363,14 +9036,14 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-09-02 [bd3b7a9] Fujii ..: Support ALTER SYSTEM RESET command. --> - Allow ALTER SYSTEM - values to be reset with ALTER SYSTEM RESET (Vik + Allow ALTER SYSTEM + values to be reset with ALTER SYSTEM RESET (Vik Fearing) This command removes the specified setting - from postgresql.auto.conf. + from postgresql.auto.conf. @@ -6391,7 +9064,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Create mechanisms for tracking - the progress of replication, + the progress of replication, including methods for identifying the origin of individual changes during logical replication (Andres Freund) @@ -6423,14 +9096,14 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-03-15 [51c11a7] Andres..: Remove pause_at_recovery_target recovery.conf s.. --> - Add recovery.conf + Add recovery.conf parameter recovery_target_action + linkend="recovery-target-action">recovery_target_action to control post-recovery activity (Petr Jelínek) - This replaces the old parameter pause_at_recovery_target. + This replaces the old parameter pause_at_recovery_target. @@ -6439,9 +9112,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-15 [ffd3774] Heikki..: Add archive_mode='always' option. --> - Add new value - always to allow standbys to always archive received - WAL files (Fujii Masao) + Add new value + always to allow standbys to always archive received + WAL files (Fujii Masao) @@ -6451,8 +9124,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add configuration - parameter to - control WAL read retry after failure + parameter to + control WAL read retry after failure (Alexey Vasiliev, Michael Paquier) @@ -6466,14 +9139,14 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-03-11 [57aa5b2] Fujii ..: Add GUC to enable compression of full page imag.. --> - Allow compression of full-page images stored in WAL + Allow compression of full-page images stored in WAL (Rahila Syed, Michael Paquier) This feature reduces WAL volume, at the cost of more CPU time spent on WAL logging and WAL replay. It is controlled by a new - configuration parameter , which + configuration parameter , which currently is off by default. @@ -6483,7 +9156,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-08 [de76884] Heikki..: At promotion, archive last segment from old tim.. --> - Archive WAL files with suffix .partial + Archive WAL files with suffix .partial during standby promotion (Heikki Linnakangas) @@ -6494,15 +9167,15 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add configuration parameter + linkend="guc-log-replication-commands"/> to log replication commands (Fujii Masao) By default, replication commands, e.g. IDENTIFY_SYSTEM, - are not logged, even when is set - to all. + linkend="protocol-replication">IDENTIFY_SYSTEM, + are not logged, even when is set + to all. @@ -6512,12 +9185,12 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Report the processes holding replication slots in pg_replication_slots + linkend="view-pg-replication-slots">pg_replication_slots (Craig Ringer) - The new output column is active_pid. + The new output column is active_pid. @@ -6526,9 +9199,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-11-25 [b3fc672] Heikki..: Allow using connection URI in primary_conninfo. --> - Allow recovery.conf's primary_conninfo setting to - use connection URIs, e.g. postgres:// + Allow recovery.conf's primary_conninfo setting to + use connection URIs, e.g. postgres:// (Alexander Shulgin) @@ -6548,16 +9221,16 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-08 [2c8f483] Andres..: Represent columns requiring insert and update p.. --> - Allow INSERTs + Allow INSERTs that would generate constraint conflicts to be turned into - UPDATEs or ignored (Peter Geoghegan, Heikki + UPDATEs or ignored (Peter Geoghegan, Heikki Linnakangas, Andres Freund) - The syntax is INSERT ... ON CONFLICT DO NOTHING/UPDATE. + The syntax is INSERT ... ON CONFLICT DO NOTHING/UPDATE. This is the Postgres implementation of the popular - UPSERT command. + UPSERT command. @@ -6566,10 +9239,10 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-16 [f3d3118] Andres..: Support GROUPING SETS, CUBE and ROLLUP. --> - Add GROUP BY analysis features GROUPING SETS, - CUBE and - ROLLUP + Add GROUP BY analysis features GROUPING SETS, + CUBE and + ROLLUP (Andrew Gierth, Atri Sharma) @@ -6580,13 +9253,13 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Allow setting multiple target columns in - an UPDATE from the result of + an UPDATE from the result of a single sub-SELECT (Tom Lane) This is accomplished using the syntax UPDATE tab SET - (col1, col2, ...) = (SELECT ...). + (col1, col2, ...) = (SELECT ...). @@ -6595,13 +9268,13 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-10-07 [df630b0] Alvaro..: Implement SKIP LOCKED for row-level locks --> - Add SELECT option - SKIP LOCKED to skip locked rows (Thomas Munro) + Add SELECT option + SKIP LOCKED to skip locked rows (Thomas Munro) This does not throw an error for locked rows like - NOWAIT does. + NOWAIT does. @@ -6610,8 +9283,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-15 [f6d208d] Simon ..: TABLESAMPLE, SQL Standard and extensible --> - Add SELECT option - TABLESAMPLE to return a subset of a table (Petr + Add SELECT option + TABLESAMPLE to return a subset of a table (Petr Jelínek) @@ -6619,7 +9292,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. This feature supports the SQL-standard table sampling methods. In addition, there are provisions for user-defined - table sampling methods. + table sampling methods. @@ -6648,13 +9321,13 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add more details about sort ordering in EXPLAIN output (Marius Timmer, + linkend="sql-explain">EXPLAIN output (Marius Timmer, Lukas Kreft, Arne Scheffer) - Details include COLLATE, DESC, - USING, and NULLS FIRST/LAST. + Details include COLLATE, DESC, + USING, and NULLS FIRST/LAST. @@ -6663,7 +9336,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-12-18 [35192f0] Alvaro..: Have VACUUM log number of skipped pages due to .. --> - Make VACUUM log the + Make VACUUM log the number of pages skipped due to pins (Jim Nasby) @@ -6673,15 +9346,15 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-02-20 [d42358e] Alvaro..: Have TRUNCATE update pgstat tuple counters --> - Make TRUNCATE properly - update the pg_stat* tuple counters (Alexander Shulgin) + Make TRUNCATE properly + update the pg_stat* tuple counters (Alexander Shulgin) - <xref linkend="SQL-REINDEX"> + <xref linkend="sql-reindex"/> @@ -6690,8 +9363,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-12-09 [fe263d1] Simon ..: REINDEX SCHEMA --> - Allow REINDEX to reindex an entire schema using the - SCHEMA option (Sawada Masahiko) + Allow REINDEX to reindex an entire schema using the + SCHEMA option (Sawada Masahiko) @@ -6700,7 +9373,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-15 [ecd222e] Fujii ..: Support VERBOSE option in REINDEX command. --> - Add VERBOSE option to REINDEX (Sawada + Add VERBOSE option to REINDEX (Sawada Masahiko) @@ -6710,8 +9383,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-12-09 [ae4e688] Simon ..: Silence REINDEX --> - Prevent REINDEX DATABASE and SCHEMA - from outputting object names, unless VERBOSE is used + Prevent REINDEX DATABASE and SCHEMA + from outputting object names, unless VERBOSE is used (Simon Riggs) @@ -6721,7 +9394,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-04-09 [17d436d] Fujii ..: Remove obsolete FORCE option from REINDEX. --> - Remove obsolete FORCE option from REINDEX + Remove obsolete FORCE option from REINDEX (Fujii Masao) @@ -6741,7 +9414,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-09-19 [491c029] Stephe..: Row-Level Security Policies (RLS) --> - Add row-level security control + Add row-level security control (Craig Ringer, KaiGai Kohei, Adam Brightwell, Dean Rasheed, Stephen Frost) @@ -6749,11 +9422,11 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. This feature allows row-by-row control over which users can add, modify, or even see rows in a table. This is controlled by new - commands CREATE/ALTER/DROP POLICY and ALTER TABLE ... ENABLE/DISABLE - ROW SECURITY. + commands CREATE/ALTER/DROP POLICY and ALTER TABLE ... ENABLE/DISABLE + ROW SECURITY. @@ -6764,8 +9437,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. Allow changing of the WAL logging status of a table after creation with ALTER TABLE ... SET LOGGED / - UNLOGGED (Fabrízio de Royes Mello) + linkend="sql-altertable">ALTER TABLE ... SET LOGGED / + UNLOGGED (Fabrízio de Royes Mello) @@ -6776,12 +9449,12 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-12-13 [e39b6f9] Andrew..: Add CINE option for CREATE TABLE AS and CREATE .. --> - Add IF NOT EXISTS clause to CREATE TABLE AS, - CREATE INDEX, - CREATE SEQUENCE, - and CREATE - MATERIALIZED VIEW (Fabrízio de Royes Mello) + Add IF NOT EXISTS clause to CREATE TABLE AS, + CREATE INDEX, + CREATE SEQUENCE, + and CREATE + MATERIALIZED VIEW (Fabrízio de Royes Mello) @@ -6790,9 +9463,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-03-24 [1d8198b] Bruce ..: Add support for ALTER TABLE IF EXISTS ... RENAM.. --> - Add support for IF EXISTS to ALTER TABLE ... RENAME - CONSTRAINT (Bruce Momjian) + Add support for IF EXISTS to ALTER TABLE ... RENAME + CONSTRAINT (Bruce Momjian) @@ -6801,17 +9474,17 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-03-09 [31eae60] Alvaro..: Allow CURRENT/SESSION_USER to be used in certai.. --> - Allow some DDL commands to accept CURRENT_USER - or SESSION_USER, meaning the current user or session + Allow some DDL commands to accept CURRENT_USER + or SESSION_USER, meaning the current user or session user, in place of a specific user name (Kyotaro Horiguchi, Álvaro Herrera) This feature is now supported in - , , - , , - and ALTER object OWNER TO commands. + , , + , , + and ALTER object OWNER TO commands. @@ -6820,8 +9493,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-12-23 [7eca575] Alvaro..: get_object_address: separate domain constraints.. --> - Support comments on domain - constraints (Álvaro Herrera) + Support comments on domain + constraints (Álvaro Herrera) @@ -6840,14 +9513,14 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-11 [fa26424] Stephe..: Allow LOCK TABLE .. ROW EXCLUSIVE MODE with IN.. --> - Allow LOCK TABLE ... ROW EXCLUSIVE - MODE for those with INSERT privileges on the + Allow LOCK TABLE ... ROW EXCLUSIVE + MODE for those with INSERT privileges on the target table (Stephen Frost) - Previously this command required UPDATE, DELETE, - or TRUNCATE privileges. + Previously this command required UPDATE, DELETE, + or TRUNCATE privileges. @@ -6856,7 +9529,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-03-23 [e5f455f] Tom Lane: Apply table and domain CHECK constraints in nam. --> - Apply table and domain CHECK constraints in order by name + Apply table and domain CHECK constraints in order by name (Tom Lane) @@ -6872,16 +9545,16 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Allow CREATE/ALTER DATABASE - to manipulate datistemplate and - datallowconn (Vik Fearing) + linkend="sql-createdatabase">CREATE/ALTER DATABASE + to manipulate datistemplate and + datallowconn (Vik Fearing) This allows these per-database settings to be changed without manually modifying the pg_database + linkend="catalog-pg-database">pg_database system catalog. @@ -6898,7 +9571,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-07-10 [59efda3] Tom Lane: Implement IMPORT FOREIGN SCHEMA. --> - Add support for + Add support for (Ronan Dunklau, Michael Paquier, Tom Lane) @@ -6913,7 +9586,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-12-17 [fc2ac1f] Tom Lane: Allow CHECK constraints to be placed on foreign.. --> - Allow CHECK constraints to be placed on foreign tables + Allow CHECK constraints to be placed on foreign tables (Shigeru Hanada, Etsuro Fujita) @@ -6922,7 +9595,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. and are not enforced locally. However, they are assumed to hold for purposes of query optimization, such as constraint - exclusion. + exclusion. @@ -6938,7 +9611,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. To let this work naturally, foreign tables are now allowed to have check constraints marked as not valid, and to set storage - and OID characteristics, even though these operations are + and OID characteristics, even though these operations are effectively no-ops for a foreign table. @@ -6968,14 +9641,14 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-11 [b488c58] Alvaro..: Allow on-the-fly capture of DDL event details --> - Whenever a ddl_command_end event trigger is installed, - capture details of DDL activity for it to inspect + Whenever a ddl_command_end event trigger is installed, + capture details of DDL activity for it to inspect (Álvaro Herrera) This information is available through a set-returning function pg_event_trigger_ddl_commands(), + linkend="pg-event-trigger-ddl-command-end-functions">pg_event_trigger_ddl_commands(), or by inspection of C data structures if that function doesn't provide enough detail. @@ -6987,7 +9660,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Allow event triggers on table rewrites caused by ALTER TABLE (Dimitri + linkend="sql-altertable">ALTER TABLE (Dimitri Fontaine) @@ -6998,10 +9671,10 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add event trigger support for database-level COMMENT, SECURITY LABEL, - and GRANT/REVOKE (Álvaro Herrera) + linkend="sql-comment">COMMENT, SECURITY LABEL, + and GRANT/REVOKE (Álvaro Herrera) @@ -7012,7 +9685,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add columns to the output of pg_event_trigger_dropped_objects + linkend="pg-event-trigger-sql-drop-functions">pg_event_trigger_dropped_objects (Álvaro Herrera) @@ -7037,12 +9710,12 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-09-09 [57b1085] Peter ..: Allow empty content in xml type --> - Allow the xml data type + Allow the xml data type to accept empty or all-whitespace content values (Peter Eisentraut) - This is required by the SQL/XML + This is required by the SQL/XML specification. @@ -7052,8 +9725,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-10-21 [6f04368] Peter ..: Allow input format xxxx-xxxx-xxxx for macaddr .. --> - Allow macaddr input - using the format xxxx-xxxx-xxxx (Herwin Weststrate) + Allow macaddr input + using the format xxxx-xxxx-xxxx (Herwin Weststrate) @@ -7063,15 +9736,15 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Disallow non-SQL-standard syntax for interval with + linkend="datatype-interval-input">interval with both precision and field specifications (Bruce Momjian) Per the standard, such type specifications should be written as, - for example, INTERVAL MINUTE TO SECOND(2). - PostgreSQL formerly allowed this to be written as - INTERVAL(2) MINUTE TO SECOND, but it must now be + for example, INTERVAL MINUTE TO SECOND(2). + PostgreSQL formerly allowed this to be written as + INTERVAL(2) MINUTE TO SECOND, but it must now be written in the standard way. @@ -7082,8 +9755,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add selectivity estimators for inet/cidr operators and improve + linkend="datatype-inet">inet/cidr operators and improve estimators for text search functions (Emre Hasegeli, Tom Lane) @@ -7095,9 +9768,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add data - types regrole - and regnamespace - to simplify entering and pretty-printing the OID of a role + types regrole + and regnamespace + to simplify entering and pretty-printing the OID of a role or namespace (Kyotaro Horiguchi) @@ -7105,7 +9778,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. - <link linkend="datatype-json"><acronym>JSON</></link> + <link linkend="datatype-json"><acronym>JSON</acronym></link> @@ -7115,10 +9788,10 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-31 [37def42] Andrew..: Rename jsonb_replace to jsonb_set and allow it .. --> - Add jsonb functions jsonb_set() + Add jsonb functions jsonb_set() and jsonb_pretty() + linkend="functions-json-processing-table">jsonb_pretty() (Dmitry Dolgov, Andrew Dunstan, Petr Jelínek) @@ -7128,23 +9801,23 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-12-12 [7e354ab] Andrew..: Add several generator functions for jsonb that .. --> - Add jsonb generator functions to_jsonb(), + Add jsonb generator functions to_jsonb(), jsonb_object(), + linkend="functions-json-creation-table">jsonb_object(), jsonb_build_object(), + linkend="functions-json-creation-table">jsonb_build_object(), jsonb_build_array(), + linkend="functions-json-creation-table">jsonb_build_array(), jsonb_agg(), + linkend="functions-aggregate-table">jsonb_agg(), and jsonb_object_agg() + linkend="functions-aggregate-table">jsonb_object_agg() (Andrew Dunstan) - Equivalent functions already existed for type json. + Equivalent functions already existed for type json. @@ -7154,8 +9827,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Reduce casting requirements to/from json and jsonb (Tom Lane) + linkend="datatype-json">json and jsonb (Tom Lane) @@ -7164,9 +9837,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-06-11 [908e234] Andrew..: Rename jsonb - text[] operator to #- to avoid a.. --> - Allow text, text array, and integer - values to be subtracted - from jsonb documents (Dmitry Dolgov, Andrew Dunstan) + Allow text, text array, and integer + values to be subtracted + from jsonb documents (Dmitry Dolgov, Andrew Dunstan) @@ -7175,8 +9848,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-12 [c694701] Andrew..: Additional functions and operators for jsonb --> - Add jsonb || operator + Add jsonb || operator (Dmitry Dolgov, Andrew Dunstan) @@ -7187,9 +9860,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add json_strip_nulls() + linkend="functions-json-processing-table">json_strip_nulls() and jsonb_strip_nulls() + linkend="functions-json-processing-table">jsonb_strip_nulls() functions to remove JSON null values from documents (Andrew Dunstan) @@ -7211,8 +9884,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-11-11 [1871c89] Fujii ..: Add generate_series(numeric, numeric). --> - Add generate_series() - for numeric values (Plato Malugin) + Add generate_series() + for numeric values (Plato Malugin) @@ -7222,8 +9895,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Allow array_agg() and - ARRAY() to take arrays as inputs (Ali Akbar, Tom Lane) + linkend="functions-aggregate-table">array_agg() and + ARRAY() to take arrays as inputs (Ali Akbar, Tom Lane) @@ -7234,9 +9907,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add functions array_position() + linkend="array-functions-table">array_position() and array_positions() + linkend="array-functions-table">array_positions() to return subscripts of array values (Pavel Stehule) @@ -7246,8 +9919,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-12-15 [4520ba6] Heikki..: Add point <-> polygon distance operator. --> - Add a point-to-polygon distance operator - <-> + Add a point-to-polygon distance operator + <-> (Alexander Korotkov) @@ -7258,8 +9931,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Allow multibyte characters as escapes in SIMILAR TO - and SUBSTRING + linkend="functions-similarto-regexp">SIMILAR TO + and SUBSTRING (Jeff Davis) @@ -7274,7 +9947,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add a width_bucket() + linkend="functions-math-func-table">width_bucket() variant that supports any sortable data type and non-uniform bucket widths (Petr Jelínek) @@ -7285,8 +9958,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-06-28 [cb2acb1] Heikki..: Add missing_ok option to the SQL functions for.. --> - Add an optional missing_ok argument to pg_read_file() + Add an optional missing_ok argument to pg_read_file() and related functions (Michael Paquier, Heikki Linnakangas) @@ -7296,14 +9969,14 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-03-10 [865f14a] Robert..: Allow named parameters to be specified using =>.. --> - Allow => + Allow => to specify named parameters in function calls (Pavel Stehule) - Previously only := could be used. This requires removing - the possibility for => to be a user-defined operator. - Creation of user-defined => operators has been issuing + Previously only := could be used. This requires removing + the possibility for => to be a user-defined operator. + Creation of user-defined => operators has been issuing warnings since PostgreSQL 9.0. @@ -7313,7 +9986,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-03-25 [06bf0dd] Tom Lane: Upgrade src/port/rint.c to be POSIX-compliant. --> - Add POSIX-compliant rounding for platforms that use + Add POSIX-compliant rounding for platforms that use PostgreSQL-supplied rounding functions (Pedro Gimeno Fortea) @@ -7332,11 +10005,11 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add function pg_get_object_address() - to return OIDs that uniquely + linkend="functions-info-object-table">pg_get_object_address() + to return OIDs that uniquely identify an object, and function pg_identify_object_as_address() - to return object information based on OIDs (Álvaro + linkend="functions-info-object-table">pg_identify_object_as_address() + to return object information based on OIDs (Álvaro Herrera) @@ -7347,11 +10020,11 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Loosen security checks for viewing queries in pg_stat_activity, + linkend="pg-stat-activity-view">pg_stat_activity, executing pg_cancel_backend(), + linkend="functions-admin-signal-table">pg_cancel_backend(), and executing pg_terminate_backend() + linkend="functions-admin-signal-table">pg_terminate_backend() (Stephen Frost) @@ -7367,7 +10040,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add pg_stat_get_snapshot_timestamp() + linkend="monitoring-stats-funcs-table">pg_stat_get_snapshot_timestamp() to output the time stamp of the statistics snapshot (Matt Kelly) @@ -7383,7 +10056,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add mxid_age() + linkend="vacuum-for-multixact-wraparound">mxid_age() to compute multi-xid age (Bruce Momjian) @@ -7401,9 +10074,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-08-28 [6c40f83] Tom Lane: Add min and max aggregates for inet/cidr data t.. --> - Add min()/max() aggregates - for inet/cidr data types (Haribabu + Add min()/max() aggregates + for inet/cidr data types (Haribabu Kommi) @@ -7436,12 +10109,12 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Improve support for composite types in PL/Python (Ed Behn, Ronan + linkend="plpython">PL/Python (Ed Behn, Ronan Dunklau) - This allows PL/Python functions to return arrays + This allows PL/Python functions to return arrays of composite types. @@ -7452,7 +10125,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Reduce lossiness of PL/Python floating-point value + linkend="plpython">PL/Python floating-point value conversions (Marko Kreen) @@ -7462,19 +10135,19 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-04-26 [cac7658] Peter ..: Add transforms feature --> - Allow specification of conversion routines between SQL + Allow specification of conversion routines between SQL data types and data types of procedural languages (Peter Eisentraut) This change adds new commands CREATE/DROP TRANSFORM. + linkend="sql-createtransform">CREATE/DROP TRANSFORM. This also adds optional transformations between the hstore and ltree types to/from PL/Perl and PL/Python. + linkend="hstore">hstore and ltree types to/from PL/Perl and PL/Python. @@ -7493,7 +10166,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-02-16 [9e3ad1a] Tom Lane: Use fast path in plpgsql's RETURN/RETURN NEXT i.. --> - Improve PL/pgSQL array + Improve PL/pgSQL array performance (Tom Lane) @@ -7503,8 +10176,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-03-25 [a4847fc] Tom Lane: Add an ASSERT statement in plpgsql. --> - Add an ASSERT - statement in PL/pgSQL (Pavel Stehule) + Add an ASSERT + statement in PL/pgSQL (Pavel Stehule) @@ -7513,7 +10186,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-11-25 [bb1b8f6] Tom Lane: De-reserve most statement-introducing keywords .. --> - Allow more PL/pgSQL + Allow more PL/pgSQL keywords to be used as identifiers (Tom Lane) @@ -7538,11 +10211,11 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Move pg_archivecleanup, - pg_test_fsync, - pg_test_timing, - and pg_xlogdump - from contrib to src/bin (Peter Eisentraut) + linkend="pgarchivecleanup">pg_archivecleanup, + pg_test_fsync, + pg_test_timing, + and pg_xlogdump + from contrib to src/bin (Peter Eisentraut) @@ -7556,7 +10229,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-03-23 [61081e7] Heikki..: Add pg_rewind, for re-synchronizing a master se.. --> - Add pg_rewind, + Add pg_rewind, which allows re-synchronizing a master server after failback (Heikki Linnakangas) @@ -7568,13 +10241,13 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Allow pg_receivexlog + linkend="app-pgreceivewal">pg_receivexlog to manage physical replication slots (Michael Paquier) - This is controlled via new and + options. @@ -7584,13 +10257,13 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Allow pg_receivexlog - to synchronously flush WAL to storage using new - option (Furuya Osamu, Fujii Masao) - Without this, WAL files are fsync'ed only on close. + Without this, WAL files are fsync'ed only on close. @@ -7599,8 +10272,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-01-23 [a179232] Alvaro..: vacuumdb: enable parallel mode --> - Allow vacuumdb to - vacuum in parallel using new option (Dilip Kumar) @@ -7609,7 +10282,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-11-12 [5094da9] Alvaro..: vacuumdb: don't prompt for passwords over and .. --> - In vacuumdb, do not + In vacuumdb, do not prompt for the same password repeatedly when multiple connections are necessary (Haribabu Kommi, Michael Paquier) @@ -7620,8 +10293,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-15 [458a077] Fujii ..: Support ––verbose option in reindexdb. --> - Add @@ -7631,10 +10304,10 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-12 [72d422a] Andrew..: Map basebackup tablespaces using a tablespace_.. --> - Make pg_basebackup - use a tablespace mapping file when using tar format, + Make pg_basebackup + use a tablespace mapping file when using tar format, to support symbolic links and file paths of 100+ characters in length - on MS Windows (Amit Kapila) + on MS Windows (Amit Kapila) @@ -7644,15 +10317,15 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-09-19 [bdd5726] Andres..: Add the capability to display summary statistic.. --> - Add pg_xlogdump option - to display summary statistics (Abhijit Menon-Sen) - <xref linkend="APP-PSQL"> + <xref linkend="app-psql"/> @@ -7661,7 +10334,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-03-31 [9d9991c] Bruce ..: psql: add asciidoc output format --> - Allow psql to produce AsciiDoc output (Szymon Guz) + Allow psql to produce AsciiDoc output (Szymon Guz) @@ -7670,14 +10343,14 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-07-10 [5b214c5] Fujii ..: Add new ECHO mode 'errors' that displays only .. --> - Add an errors mode that displays only failed commands - to psql's ECHO variable + Add an errors mode that displays only failed commands + to psql's ECHO variable (Pavel Stehule) - This behavior can also be selected with psql's - option. @@ -7687,12 +10360,12 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Provide separate column, header, and border linestyle control - in psql's unicode linestyle (Pavel Stehule) + in psql's unicode linestyle (Pavel Stehule) Single or double lines are supported; the default is - single. + single. @@ -7701,8 +10374,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-09-02 [51bb795] Andres..: Add psql PROMPT variable showing which line of .. --> - Add new option %l in psql's PROMPT variables + Add new option %l in psql's PROMPT variables to display the current multiline statement line number (Sawada Masahiko) @@ -7713,8 +10386,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-03-28 [7655f4c] Andrew..: Add a pager_min_lines setting to psql --> - Add \pset option pager_min_lines + Add \pset option pager_min_lines to control pager invocation (Andrew Dunstan) @@ -7724,7 +10397,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-11-21 [4077fb4] Andrew..: Fix an error in psql that overcounted output l.. --> - Improve psql line counting used when deciding + Improve psql line counting used when deciding to invoke the pager (Andrew Dunstan) @@ -7735,8 +10408,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-12-08 [e90371d] Tom Lane: Make failure to open psql log-file fatal. --> - psql now fails if the file specified by - an or switch cannot be written (Tom Lane, Daniel Vérité) @@ -7750,8 +10423,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-07-12 [bd40951] Andres..: Minimal psql tab completion support for SET se.. --> - Add psql tab completion when setting the - variable (Jeff Janes) + Add psql tab completion when setting the + variable (Jeff Janes) @@ -7764,7 +10437,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-06-23 [631e7f6] Heikki..: Improve tab-completion of DROP and ALTER ENABLE.. --> - Improve psql's tab completion for triggers and rules + Improve psql's tab completion for triggers and rules (Andreas Karlsson) @@ -7772,7 +10445,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. - <link linkend="APP-PSQL-meta-commands">Backslash Commands</link> + <link linkend="app-psql-meta-commands">Backslash Commands</link> @@ -7781,17 +10454,17 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-09-10 [07c8651] Andres..: Add new psql help topics, accessible to both.. --> - Add psql \? help sections - variables and options (Pavel Stehule) + Add psql \? help sections + variables and options (Pavel Stehule) - \? variables shows psql's special - variables and \? options shows the command-line options. - \? commands shows the meta-commands, which is the + \? variables shows psql's special + variables and \? options shows the command-line options. + \? commands shows the meta-commands, which is the traditional output and remains the default. These help displays can also be obtained with the command-line - option --help=section. + option --help=section. @@ -7800,7 +10473,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-07-14 [ee80f04] Alvaro..: psql: Show tablespace size in \db+ --> - Show tablespace size in psql's \db+ + Show tablespace size in psql's \db+ (Fabrízio de Royes Mello) @@ -7810,7 +10483,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-04-09 [a6f3c1f] Magnus..: Show owner of types in psql \dT+ --> - Show data type owners in psql's \dT+ + Show data type owners in psql's \dT+ (Magnus Hagander) @@ -7820,13 +10493,13 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-09-04 [f6f654f] Fujii ..: Allow \watch to display query execution time if.. --> - Allow psql's \watch to output - \timing information (Fujii Masao) + Allow psql's \watch to output + \timing information (Fujii Masao) - Also prevent @@ -7835,8 +10508,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-11-22 [eca2b9b] Andrew..: Rework echo_hidden for \sf and \ef from commit .. --> - Make psql's \sf and \ef - commands honor ECHO_HIDDEN (Andrew Dunstan) + Make psql's \sf and \ef + commands honor ECHO_HIDDEN (Andrew Dunstan) @@ -7845,8 +10518,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-08-12 [e15c4ab] Fujii ..: Add tab-completion for \unset and valid setting.. --> - Improve psql tab completion for \set, - \unset, and :variable names (Pavel + Improve psql tab completion for \set, + \unset, and :variable names (Pavel Stehule) @@ -7857,7 +10530,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Allow tab completion of role names - in psql \c commands (Ian Barwick) + in psql \c commands (Ian Barwick) @@ -7868,7 +10541,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. - <xref linkend="APP-PGDUMP"> + <xref linkend="app-pgdump"/> @@ -7877,15 +10550,15 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-11-17 [be1cc8f] Simon ..: Add pg_dump ––snapshot option --> - Allow pg_dump to share a snapshot taken by another - session using (Simon Riggs, Michael Paquier) The remote snapshot must have been exported by - pg_export_snapshot() or logical replication slot + pg_export_snapshot() or logical replication slot creation. This can be used to share a consistent snapshot - across multiple pg_dump processes. + across multiple pg_dump processes. @@ -7910,13 +10583,13 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-07-07 [7700597] Tom Lane: In pg_dump, show server and pg_dump versions w.. --> - Make pg_dump always print the server and - pg_dump versions (Jing Wang) + Make pg_dump always print the server and + pg_dump versions (Jing Wang) Previously, version information was only printed in - mode. @@ -7925,9 +10598,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-06-04 [232cd63] Fujii ..: Remove -i/-ignore-version option from pg_dump.. --> - Remove the long-ignored @@ -7936,7 +10609,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. - <xref linkend="app-pg-ctl"> + <xref linkend="app-pg-ctl"/> @@ -7945,7 +10618,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-08-25 [ebe30ad] Bruce ..: pg_ctl, pg_upgrade: allow multiple -o/-O opti.. --> - Support multiple pg_ctl options, concatenating their values (Bruce Momjian) @@ -7955,13 +10628,13 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-07-17 [c0e4520] Magnus..: Add option to pg_ctl to choose event source for.. --> - Allow control of pg_ctl's event source logging - on MS Windows (MauMau) + Allow control of pg_ctl's event source logging + on MS Windows (MauMau) - This only controls pg_ctl, not the server, which - has separate settings in postgresql.conf. + This only controls pg_ctl, not the server, which + has separate settings in postgresql.conf. @@ -7971,14 +10644,14 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> If the server's listen address is set to a wildcard value - (0.0.0.0 in IPv4 or :: in IPv6), connect via + (0.0.0.0 in IPv4 or :: in IPv6), connect via the loopback address rather than trying to use the wildcard address literally (Kondo Yuta) This fix primarily affects Windows, since on other platforms - pg_ctl will prefer to use a Unix-domain socket. + pg_ctl will prefer to use a Unix-domain socket. @@ -7987,7 +10660,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. - <xref linkend="pgupgrade"> + <xref linkend="pgupgrade"/> @@ -7996,13 +10669,13 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-04-14 [9fa8b0e] Peter ..: Move pg_upgrade from contrib/ to src/bin/ --> - Move pg_upgrade from contrib to - src/bin (Peter Eisentraut) + Move pg_upgrade from contrib to + src/bin (Peter Eisentraut) In connection with this change, the functionality previously - provided by the pg_upgrade_support module has been + provided by the pg_upgrade_support module has been moved into the core server. @@ -8012,8 +10685,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-08-25 [ebe30ad] Bruce ..: pg_ctl, pg_upgrade: allow multiple -o/-O optio.. --> - Support multiple pg_upgrade - / options, concatenating their values (Bruce Momjian) @@ -8024,7 +10697,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Improve database collation comparisons in - pg_upgrade (Heikki Linnakangas) + pg_upgrade (Heikki Linnakangas) @@ -8042,7 +10715,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. - <xref linkend="pgbench"> + <xref linkend="pgbench"/> @@ -8051,7 +10724,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-04-13 [81134af] Peter ..: Move pgbench from contrib/ to src/bin/ --> - Move pgbench from contrib to src/bin + Move pgbench from contrib to src/bin (Peter Eisentraut) @@ -8062,7 +10735,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Fix calculation of TPS number excluding connections - establishing (Tatsuo Ishii, Fabien Coelho) + establishing (Tatsuo Ishii, Fabien Coelho) @@ -8084,7 +10757,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. - This is controlled by a new option. @@ -8094,7 +10767,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Allow pgbench to generate Gaussian/exponential distributions - using \setrandom (Kondo Mitsumasa, Fabien Coelho) + using \setrandom (Kondo Mitsumasa, Fabien Coelho) @@ -8103,9 +10776,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-03-02 [878fdcb] Robert..: pgbench: Add a real expression syntax to \set --> - Allow pgbench's \set command to handle + Allow pgbench's \set command to handle arithmetic expressions containing more than one operator, and add - % (modulo) to the set of operators it supports + % (modulo) to the set of operators it supports (Robert Haas, Fabien Coelho) @@ -8126,7 +10799,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-11-20 [2c03216] Heikki..: Revamp the WAL record format. --> - Simplify WAL record format + Simplify WAL record format (Heikki Linnakangas) @@ -8151,7 +10824,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-09-25 [b64d92f] Andres..: Add a basic atomic ops API abstracting away pla.. --> - Add atomic memory operations API (Andres Freund) + Add atomic memory operations API (Andres Freund) @@ -8189,13 +10862,13 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Foreign tables can now take part in INSERT ... ON CONFLICT - DO NOTHING queries (Peter Geoghegan, Heikki Linnakangas, + DO NOTHING queries (Peter Geoghegan, Heikki Linnakangas, Andres Freund) Foreign data wrappers must be modified to handle this. - INSERT ... ON CONFLICT DO UPDATE is not supported on + INSERT ... ON CONFLICT DO UPDATE is not supported on foreign tables. @@ -8205,7 +10878,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-12-18 [4a14f13] Tom Lane: Improve hash_create's API for selecting simple-.. --> - Improve hash_create()'s API for selecting + Improve hash_create()'s API for selecting simple-binary-key hash functions (Teodor Sigaev, Tom Lane) @@ -8226,8 +10899,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-06-28 [a6d488c] Andres..: Remove Alpha and Tru64 support. --> - Remove Alpha (CPU) and Tru64 (OS) ports (Andres Freund) + Remove Alpha (CPU) and Tru64 (OS) ports (Andres Freund) @@ -8237,11 +10910,11 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Remove swap-byte-based spinlock implementation for - ARMv5 and earlier CPUs (Robert Haas) + ARMv5 and earlier CPUs (Robert Haas) - ARMv5's weak memory ordering made this locking + ARMv5's weak memory ordering made this locking implementation unsafe. Spinlock support is still possible on newer gcc implementations with atomics support. @@ -8267,10 +10940,10 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Change index operator class for columns pg_seclabel.provider + linkend="catalog-pg-seclabel">pg_seclabel.provider and pg_shseclabel.provider - to be text_pattern_ops (Tom Lane) + linkend="catalog-pg-shseclabel">pg_shseclabel.provider + to be text_pattern_ops (Tom Lane) @@ -8303,8 +10976,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Allow higher-precision time stamp resolution on Windows 8, Windows - Server 2012, and later Windows systems (Craig Ringer) + class="osname">Windows 8, Windows + Server 2012, and later Windows systems (Craig Ringer) @@ -8313,8 +10986,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-03-18 [f9dead5] Alvaro..: Install shared libraries to bin/ in Windows un.. --> - Install shared libraries to bin in MS Windows (Peter Eisentraut, Michael Paquier) + Install shared libraries to bin in MS Windows (Peter Eisentraut, Michael Paquier) @@ -8323,8 +10996,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-04-16 [22d0053] Alvaro..: MSVC: install src/test/modules together with c.. --> - Install src/test/modules together with - contrib on MSVC builds (Michael + Install src/test/modules together with + contrib on MSVC builds (Michael Paquier) @@ -8334,9 +11007,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-07-12 [8d9a0e8] Magnus..: Support ––with-extra-version equivalent functi.. --> - Allow configure's - @@ -8345,7 +11018,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-07-14 [91f03ba] Noah M..: MSVC: Recognize PGFILEDESC in contrib and conv.. --> - Pass PGFILEDESC into MSVC contrib builds + Pass PGFILEDESC into MSVC contrib builds (Michael Paquier) @@ -8355,8 +11028,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-07-14 [c4a448e] Noah M..: MSVC: Apply icons to all binaries having them .. --> - Add icons to all MSVC-built binaries and version - information to all MS Windows + Add icons to all MSVC-built binaries and version + information to all MS Windows binaries (Noah Misch) @@ -8371,12 +11044,12 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add optional-argument support to the internal - getopt_long() implementation (Michael Paquier, + getopt_long() implementation (Michael Paquier, Andres Freund) - This is used by the MSVC build. + This is used by the MSVC build. @@ -8398,7 +11071,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. Add statistics for minimum, maximum, mean, and standard deviation times to pg_stat_statements + linkend="pgstatstatements-columns">pg_stat_statements (Mitsumasa Kondo, Andrew Dunstan) @@ -8408,8 +11081,8 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-10-01 [32984d8] Heikki..: Add functions for dealing with PGP armor heade.. --> - Add pgcrypto function - pgp_armor_headers() to extract PGP + Add pgcrypto function + pgp_armor_headers() to extract PGP armor headers (Marko Tiikkaja, Heikki Linnakangas) @@ -8420,7 +11093,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Allow empty replacement strings in unaccent (Mohammad Alhashash) + linkend="unaccent">unaccent (Mohammad Alhashash) @@ -8435,7 +11108,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Allow multicharacter source strings in unaccent (Tom Lane) + linkend="unaccent">unaccent (Tom Lane) @@ -8451,9 +11124,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-15 [149f6f1] Simon ..: TABLESAMPLE system_time(limit) --> - Add contrib modules tsm_system_rows and - tsm_system_time + Add contrib modules tsm_system_rows and + tsm_system_time to allow additional table sampling methods (Petr Jelínek) @@ -8463,9 +11136,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-11-21 [3a82bc6] Heikki..: Add pageinspect functions for inspecting GIN in.. --> - Add GIN + Add GIN index inspection functions to pageinspect (Heikki + linkend="pageinspect">pageinspect (Heikki Linnakangas, Peter Geoghegan, Michael Paquier) @@ -8476,7 +11149,7 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. --> Add information about buffer pins to pg_buffercache display + linkend="pgbuffercache">pg_buffercache display (Andres Freund) @@ -8486,9 +11159,9 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2015-05-13 [5850b20] Andres..: Add pgstattuple_approx() to the pgstattuple ext.. --> - Allow pgstattuple + Allow pgstattuple to report approximate answers with less overhead using - pgstattuple_approx() (Abhijit Menon-Sen) + pgstattuple_approx() (Abhijit Menon-Sen) @@ -8498,15 +11171,15 @@ Add GUC and storage parameter to set the maximum size of GIN pending list. 2014-12-01 [df761e3] Alvaro..: Move security_label test --> - Move dummy_seclabel, test_shm_mq, - test_parser, and worker_spi - from contrib to src/test/modules + Move dummy_seclabel, test_shm_mq, + test_parser, and worker_spi + from contrib to src/test/modules (Álvaro Herrera) These modules are only meant for server testing, so they do not need - to be built or installed when packaging PostgreSQL. + to be built or installed when packaging PostgreSQL. diff --git a/doc/src/sgml/release-9.6.sgml b/doc/src/sgml/release-9.6.sgml index 078ac87841..2ad4e8ea86 100644 --- a/doc/src/sgml/release-9.6.sgml +++ b/doc/src/sgml/release-9.6.sgml @@ -1,6 +1,3229 @@ + + Release 9.6.11 + + + Release date: + 2018-11-08 + + + + This release contains a variety of fixes from 9.6.10. + For information about new features in the 9.6 major release, see + . + + + + Migration to Version 9.6.11 + + + A dump/restore is not required for those running 9.6.X. + + + + However, if you are upgrading from a version earlier than 9.6.9, + see . + + + + + Changes + + + + + + Fix corner-case failures + in has_foo_privilege() + family of functions (Tom Lane) + + + + Return NULL rather than throwing an error when an invalid object OID + is provided. Some of these functions got that right already, but not + all. has_column_privilege() was additionally + capable of crashing on some platforms. + + + + + + Avoid O(N^2) slowdown in regular expression match/split functions on + long strings (Andrew Gierth) + + + + + + Fix parsing of standard multi-character operators that are immediately + followed by a comment or + or - + (Andrew Gierth) + + + + This oversight could lead to parse errors, or to incorrect assignment + of precedence. + + + + + + Avoid O(N^3) slowdown in lexer for long strings + of + or - characters + (Andrew Gierth) + + + + + + Fix mis-execution of SubPlans when the outer query is being scanned + backwards (Andrew Gierth) + + + + + + Fix failure of UPDATE/DELETE ... WHERE CURRENT OF ... + after rewinding the referenced cursor (Tom Lane) + + + + A cursor that scans multiple relations (particularly an inheritance + tree) could produce wrong behavior if rewound to an earlier relation. + + + + + + Fix EvalPlanQual to handle conditionally-executed + InitPlans properly (Andrew Gierth, Tom Lane) + + + + This resulted in hard-to-reproduce crashes or wrong answers in + concurrent updates, if they contained code such as an uncorrelated + sub-SELECT inside a CASE + construct. + + + + + + Fix character-class checks to not fail on Windows for Unicode + characters above U+FFFF (Tom Lane, Kenji Uno) + + + + This bug affected full-text-search operations, as well + as contrib/ltree + and contrib/pg_trgm. + + + + + + Disallow pushing sub-SELECTs containing window + functions, LIMIT, or OFFSET to + parallel workers (Amit Kapila) + + + + Such cases could result in inconsistent behavior due to different + workers getting different answers, as a result of indeterminacy + due to row-ordering variations. + + + + + + Ensure that sequences owned by a foreign table are processed + by ALTER OWNER on the table (Peter Eisentraut) + + + + The ownership change should propagate to such sequences as well, but + this was missed for foreign tables. + + + + + + Ensure that the server will process + already-received NOTIFY + and SIGTERM interrupts before waiting for client + input (Jeff Janes, Tom Lane) + + + + + + Fix over-allocation of space for array_out()'s + result string (Keiichi Hirobe) + + + + + + Fix memory leak in repeated SP-GiST index scans (Tom Lane) + + + + This is only known to amount to anything significant in cases where + an exclusion constraint using SP-GiST receives many new index entries + in a single command. + + + + + + Ensure that ApplyLogicalMappingFile() closes the + mapping file when done with it (Tomas Vondra) + + + + Previously, the file descriptor was leaked, eventually resulting in + failures during logical decoding. + + + + + + Fix logical decoding to handle cases where a mapped catalog table is + repeatedly rewritten, e.g. by VACUUM FULL + (Andres Freund) + + + + + + Prevent starting the server with wal_level set + to too low a value to support an existing replication slot (Andres + Freund) + + + + + + Avoid crash if a utility command causes infinite recursion (Tom Lane) + + + + + + When initializing a hot standby, cope with duplicate XIDs caused by + two-phase transactions on the master + (Michael Paquier, Konstantin Knizhnik) + + + + + + Fix event triggers to handle nested ALTER TABLE + commands (Michael Paquier, Álvaro Herrera) + + + + + + Propagate parent process's transaction and statement start timestamps + to parallel workers (Konstantin Knizhnik) + + + + This prevents misbehavior of functions such + as transaction_timestamp() when executed in a + worker. + + + + + + Fix transfer of expanded datums to parallel workers so that alignment + is preserved, preventing crashes on alignment-picky platforms + (Tom Lane, Amit Kapila) + + + + + + Fix WAL file recycling logic to work correctly on standby servers + (Michael Paquier) + + + + Depending on the setting of archive_mode, a standby + might fail to remove some WAL files that could be removed. + + + + + + Fix handling of commit-timestamp tracking during recovery + (Masahiko Sawasa, Michael Paquier) + + + + If commit timestamp tracking has been turned on or off, recovery might + fail due to trying to fetch the commit timestamp for a transaction + that did not record it. + + + + + + Randomize the random() seed in bootstrap and + standalone backends, and in initdb + (Noah Misch) + + + + The main practical effect of this change is that it avoids a scenario + where initdb might mistakenly conclude that + POSIX shared memory is not available, due to name collisions caused by + always using the same random seed. + + + + + + Allow DSM allocation to be interrupted (Chris Travers) + + + + + + Avoid failure in a parallel worker when loading an extension that + tries to access system caches within its init function (Thomas Munro) + + + + We don't consider that to be good extension coding practice, but it + mostly worked before parallel query, so continue to support it for + now. + + + + + + Properly handle turning full_page_writes on + dynamically (Kyotaro Horiguchi) + + + + + + Fix possible crash due to double free() during + SP-GiST rescan (Andrew Gierth) + + + + + + Avoid possible buffer overrun when replaying GIN page recompression + from WAL (Alexander Korotkov, Sivasubramanian Ramasubramanian) + + + + + + Fix missed fsync of a replication slot's directory (Konstantin + Knizhnik, Michael Paquier) + + + + + + Fix unexpected timeouts when + using wal_sender_timeout on a slow server + (Noah Misch) + + + + + + Ensure that hot standby processes use the correct WAL consistency + point (Alexander Kukushkin, Michael Paquier) + + + + This prevents possible misbehavior just after a standby server has + reached a consistent database state during WAL replay. + + + + + + Ensure background workers are stopped properly when the postmaster + receives a fast-shutdown request before completing database startup + (Alexander Kukushkin) + + + + + + Update the free space map during WAL replay of page all-visible/frozen + flag changes (Álvaro Herrera) + + + + Previously we were not careful about this, reasoning that the FSM is + not critical data anyway. However, if it's sufficiently out of date, + that can result in significant performance degradation after a standby + has been promoted to primary. The FSM will eventually be healed by + updates, but we'd like it to be good sooner, so work harder at + maintaining it during WAL replay. + + + + + + Avoid premature release of parallel-query resources when query end or + tuple count limit is reached (Amit Kapila) + + + + It's only okay to shut down the executor at this point if the caller + cannot demand backwards scan afterwards. + + + + + + Don't run atexit callbacks when servicing SIGQUIT + (Heikki Linnakangas) + + + + + + Don't record foreign-server user mappings as members of extensions + (Tom Lane) + + + + If CREATE USER MAPPING is executed in an extension + script, an extension dependency was created for the user mapping, + which is unexpected. Roles can't be extension members, so user + mappings shouldn't be either. + + + + + + Make syslogger more robust against failures in opening CSV log files + (Tom Lane) + + + + + + Fix psql, as well as documentation + examples, to call PQconsumeInput() before + each PQnotifies() call (Tom Lane) + + + + This fixes cases in which psql would not + report receipt of a NOTIFY message until after the + next command. + + + + + + Fix possible inconsistency in pg_dump's + sorting of dissimilar object names (Jacob Champion) + + + + + + Ensure that pg_restore will schema-qualify + the table name when + emitting DISABLE/ENABLE TRIGGER + commands (Tom Lane) + + + + This avoids failures due to the new policy of running restores with + restrictive search path. + + + + + + Fix pg_upgrade to handle event triggers in + extensions correctly (Haribabu Kommi) + + + + pg_upgrade failed to preserve an event + trigger's extension-membership status. + + + + + + Fix pg_upgrade's cluster state check to + work correctly on a standby server (Bruce Momjian) + + + + + + Enforce type cube's dimension limit in + all contrib/cube functions (Andrey Borodin) + + + + Previously, some cube-related functions could construct values that + would be rejected by cube_in(), leading to + dump/reload failures. + + + + + + In contrib/postgres_fdw, don't try to ship a + variable-free ORDER BY clause to the remote server + (Andrew Gierth) + + + + + + Fix contrib/unaccent's + unaccent() function to use + the unaccent text search dictionary that is in the + same schema as the function (Tom Lane) + + + + Previously it tried to look up the dictionary using the search path, + which could fail if the search path has a restrictive value. + + + + + + Fix build problems on macOS 10.14 (Mojave) (Tom Lane) + + + + Adjust configure to add + an switch to CPPFLAGS; + without this, PL/Perl and PL/Tcl fail to configure or build on macOS + 10.14. The specific sysroot used can be overridden at configure time + or build time by setting the PG_SYSROOT variable in + the arguments of configure + or make. + + + + It is now recommended that Perl-related extensions + write $(perl_includespec) rather + than -I$(perl_archlibexp)/CORE in their compiler + flags. The latter continues to work on most platforms, but not recent + macOS. + + + + Also, it should no longer be necessary to + specify manually to get PL/Tcl to + build on recent macOS releases. + + + + + + Fix MSVC build and regression-test scripts to work on recent Perl + versions (Andrew Dunstan) + + + + Perl no longer includes the current directory in its search path + by default; work around that. + + + + + + On Windows, allow the regression tests to be run by an Administrator + account (Andrew Dunstan) + + + + To do this safely, pg_regress now gives up + any such privileges at startup. + + + + + + Allow btree comparison functions to return INT_MIN + (Tom Lane) + + + + Up to now, we've forbidden datatype-specific comparison functions from + returning INT_MIN, which allows callers to invert + the sort order just by negating the comparison result. However, this + was never safe for comparison functions that directly return the + result of memcmp(), strcmp(), + etc, as POSIX doesn't place any such restriction on those functions. + At least some recent versions of memcmp() can + return INT_MIN, causing incorrect sort ordering. + Hence, we've removed this restriction. Callers must now use + the INVERT_COMPARE_RESULT() macro if they wish to + invert the sort order. + + + + + + Fix recursion hazard in shared-invalidation message processing + (Tom Lane) + + + + This error could, for example, result in failure to access a system + catalog or index that had just been processed by VACUUM + FULL. + + + + This change adds a new result code + for LockAcquire, which might possibly affect + external callers of that function, though only very unusual usage + patterns would have an issue with it. The API + of LockAcquireExtended is also changed. + + + + + + Save and restore SPI's global variables + during SPI_connect() + and SPI_finish() (Chapman Flack, Tom Lane) + + + + This prevents possible interference when one SPI-using function calls + another. + + + + + + Avoid using potentially-under-aligned page buffers (Tom Lane) + + + + Invent new union types PGAlignedBlock + and PGAlignedXLogBlock, and use these in place of plain + char arrays, ensuring that the compiler can't place the buffer at a + misaligned start address. This fixes potential core dumps on + alignment-picky platforms, and may improve performance even on + platforms that allow misalignment. + + + + + + Make src/port/snprintf.c follow the C99 + standard's definition of snprintf()'s result + value (Tom Lane) + + + + On platforms where this code is used (mostly Windows), its pre-C99 + behavior could lead to failure to detect buffer overrun, if the + calling code assumed C99 semantics. + + + + + + When building on i386 with the clang + compiler, require to be used (Andres Freund) + + + + This avoids problems with missed floating point overflow checks. + + + + + + Fix configure's detection of the result + type of strerror_r() (Tom Lane) + + + + The previous coding got the wrong answer when building + with icc on Linux (and perhaps in other + cases), leading to libpq not returning + useful error messages for system-reported errors. + + + + + + Update time zone data files to tzdata + release 2018g for DST law changes in Chile, Fiji, Morocco, and Russia + (Volgograd), plus historical corrections for China, Hawaii, Japan, + Macau, and North Korea. + + + + + + + + + + Release 9.6.10 + + + Release date: + 2018-08-09 + + + + This release contains a variety of fixes from 9.6.9. + For information about new features in the 9.6 major release, see + . + + + + Migration to Version 9.6.10 + + + A dump/restore is not required for those running 9.6.X. + + + + However, if you are upgrading from a version earlier than 9.6.9, + see . + + + + + Changes + + + + + + Fix failure to reset libpq's state fully + between connection attempts (Tom Lane) + + + + An unprivileged user of dblink + or postgres_fdw could bypass the checks intended + to prevent use of server-side credentials, such as + a ~/.pgpass file owned by the operating-system + user running the server. Servers allowing peer authentication on + local connections are particularly vulnerable. Other attacks such + as SQL injection into a postgres_fdw session + are also possible. + Attacking postgres_fdw in this way requires the + ability to create a foreign server object with selected connection + parameters, but any user with access to dblink + could exploit the problem. + In general, an attacker with the ability to select the connection + parameters for a libpq-using application + could cause mischief, though other plausible attack scenarios are + harder to think of. + Our thanks to Andrew Krasichkov for reporting this issue. + (CVE-2018-10915) + + + + + + Fix INSERT ... ON CONFLICT UPDATE through a view + that isn't just SELECT * FROM ... + (Dean Rasheed, Amit Langote) + + + + Erroneous expansion of an updatable view could lead to crashes + or attribute ... has the wrong type errors, if the + view's SELECT list doesn't match one-to-one with + the underlying table's columns. + Furthermore, this bug could be leveraged to allow updates of columns + that an attacking user lacks UPDATE privilege for, + if that user has INSERT and UPDATE + privileges for some other column(s) of the table. + Any user could also use it for disclosure of server memory. + (CVE-2018-10925) + + + + + + Ensure that updates to the relfrozenxid + and relminmxid values + for nailed system catalogs are processed in a timely + fashion (Andres Freund) + + + + Overoptimistic caching rules could prevent these updates from being + seen by other sessions, leading to spurious errors and/or data + corruption. The problem was significantly worse for shared catalogs, + such as pg_authid, because the stale cache + data could persist into new sessions as well as existing ones. + + + + + + Fix case where a freshly-promoted standby crashes before having + completed its first post-recovery checkpoint (Michael Paquier, Kyotaro + Horiguchi, Pavan Deolasee, Álvaro Herrera) + + + + This led to a situation where the server did not think it had reached + a consistent database state during subsequent WAL replay, preventing + restart. + + + + + + Avoid emitting a bogus WAL record when recycling an all-zero btree + page (Amit Kapila) + + + + This mistake has been seen to cause assertion failures, and + potentially it could result in unnecessary query cancellations on hot + standby servers. + + + + + + During WAL replay, guard against corrupted record lengths exceeding + 1GB (Michael Paquier) + + + + Treat such a case as corrupt data. Previously, the code would try to + allocate space and get a hard error, making recovery impossible. + + + + + + When ending recovery, delay writing the timeline history file as long + as possible (Heikki Linnakangas) + + + + This avoids some situations where a failure during recovery cleanup + (such as a problem with a two-phase state file) led to inconsistent + timeline state on-disk. + + + + + + Improve performance of WAL replay for transactions that drop many + relations (Fujii Masao) + + + + This change reduces the number of times that shared buffers are + scanned, so that it is of most benefit when that setting is large. + + + + + + Improve performance of lock releasing in standby server WAL replay + (Thomas Munro) + + + + + + Make logical WAL senders report streaming state correctly (Simon + Riggs, Sawada Masahiko) + + + + The code previously mis-detected whether or not it had caught up with + the upstream server. + + + + + + Fix bugs in snapshot handling during logical decoding, allowing wrong + decoding results in rare cases (Arseny Sher, Álvaro Herrera) + + + + + + Ensure a table's cached index list is correctly rebuilt after an index + creation fails partway through (Peter Geoghegan) + + + + Previously, the failed index's OID could remain in the list, causing + problems later in the same session. + + + + + + Fix mishandling of empty uncompressed posting list pages in GIN + indexes (Sivasubramanian Ramasubramanian, Alexander Korotkov) + + + + This could result in an assertion failure after pg_upgrade of a + pre-9.4 GIN index (9.4 and later will not create such pages). + + + + + + Ensure that VACUUM will respond to signals + within btree page deletion loops (Andres Freund) + + + + Corrupted btree indexes could result in an infinite loop here, and + that previously wasn't interruptible without forcing a crash. + + + + + + Fix misoptimization of equivalence classes involving composite-type + columns (Tom Lane) + + + + This resulted in failure to recognize that an index on a composite + column could provide the sort order needed for a mergejoin on that + column. + + + + + + Fix planner to avoid ORDER/GROUP BY expression not found in + targetlist errors in some queries with set-returning functions + (Tom Lane) + + + + + + Fix SQL-standard FETCH FIRST syntax to allow + parameters ($n), as the + standard expects (Andrew Gierth) + + + + + + Fix EXPLAIN's accounting for resource usage, + particularly buffer accesses, in parallel workers + (Amit Kapila, Robert Haas) + + + + + + Fix failure to schema-qualify some object names + in getObjectDescription output + (Kyotaro Horiguchi, Tom Lane) + + + + Names of collations, conversions, and text search objects + were not schema-qualified when they should be. + + + + + + Fix CREATE AGGREGATE type checking so that + parallelism support functions can be attached to variadic aggregates + (Alexey Bashtanov) + + + + + + Widen COPY FROM's current-line-number counter + from 32 to 64 bits (David Rowley) + + + + This avoids two problems with input exceeding 4G lines: COPY + FROM WITH HEADER would drop a line every 4G lines, not only + the first line, and error reports could show a wrong line number. + + + + + + Add a string freeing function + to ecpg's pgtypes + library, so that cross-module memory management problems can be + avoided on Windows (Takayuki Tsunakawa) + + + + On Windows, crashes can ensue if the free call + for a given chunk of memory is not made from the same DLL + that malloc'ed the memory. + The pgtypes library sometimes returns strings + that it expects the caller to free, making it impossible to follow + this rule. Add a PGTYPESchar_free() function + that just wraps free, allowing applications + to follow this rule. + + + + + + Fix ecpg's support for long + long variables on Windows, as well as other platforms that + declare strtoll/strtoull + nonstandardly or not at all (Dang Minh Huong, Tom Lane) + + + + + + Fix misidentification of SQL statement type in PL/pgSQL, when a rule + change causes a change in the semantics of a statement intra-session + (Tom Lane) + + + + This error led to assertion failures, or in rare cases, failure to + enforce the INTO STRICT option as expected. + + + + + + Fix password prompting in client programs so that echo is properly + disabled on Windows when stdin is not the + terminal (Matthew Stickney) + + + + + + Further fix mis-quoting of values for list-valued GUC variables in + dumps (Tom Lane) + + + + The previous fix for quoting of search_path and + other list-valued variables in pg_dump + output turned out to misbehave for empty-string list elements, and it + risked truncation of long file paths. + + + + + + Fix pg_dump's failure to + dump REPLICA IDENTITY properties for constraint + indexes (Tom Lane) + + + + Manually created unique indexes were properly marked, but not those + created by declaring UNIQUE or PRIMARY + KEY constraints. + + + + + + Make pg_upgrade check that the old server + was shut down cleanly (Bruce Momjian) + + + + The previous check could be fooled by an immediate-mode shutdown. + + + + + + Fix contrib/hstore_plperl to look through Perl + scalar references, and to not crash if it doesn't find a hash + reference where it expects one (Tom Lane) + + + + + + Fix crash in contrib/ltree's + lca() function when the input array is empty + (Pierre Ducroquet) + + + + + + Fix various error-handling code paths in which an incorrect error code + might be reported (Michael Paquier, Tom Lane, Magnus Hagander) + + + + + + Rearrange makefiles to ensure that programs link to freshly-built + libraries (such as libpq.so) rather than ones + that might exist in the system library directories (Tom Lane) + + + + This avoids problems when building on platforms that supply old copies + of PostgreSQL libraries. + + + + + + Update time zone data files to tzdata + release 2018e for DST law changes in North Korea, plus historical + corrections for Czechoslovakia. + + + + This update includes a redefinition of daylight savings + in Ireland, as well as for some past years in Namibia and + Czechoslovakia. In those jurisdictions, legally standard time is + observed in summer, and daylight savings time in winter, so that the + daylight savings offset is one hour behind standard time not one hour + ahead. This does not affect either the actual UTC offset or the + timezone abbreviations in use; the only known effect is that + the is_dst column in + the pg_timezone_names view will now be true + in winter and false in summer in these cases. + + + + + + + + + + Release 9.6.9 + + + Release date: + 2018-05-10 + + + + This release contains a variety of fixes from 9.6.8. + For information about new features in the 9.6 major release, see + . + + + + Migration to Version 9.6.9 + + + A dump/restore is not required for those running 9.6.X. + + + + However, if you use the adminpack extension, + you should update it as per the first changelog entry below. + + + + Also, if the function marking mistakes mentioned in the second and + third changelog entries below affect you, you will want to take steps + to correct your database catalogs. + + + + Also, if you are upgrading from a version earlier than 9.6.8, + see . + + + + + Changes + + + + + + Remove public execute privilege + from contrib/adminpack's + pg_logfile_rotate() function (Stephen Frost) + + + + pg_logfile_rotate() is a deprecated wrapper + for the core function pg_rotate_logfile(). + When that function was changed to rely on SQL privileges for access + control rather than a hard-coded superuser + check, pg_logfile_rotate() should have been + updated as well, but the need for this was missed. Hence, + if adminpack is installed, any user could + request a logfile rotation, creating a minor security issue. + + + + After installing this update, administrators should + update adminpack by performing + ALTER EXTENSION adminpack UPDATE in each + database in which adminpack is installed. + (CVE-2018-1115) + + + + + + Fix incorrect volatility markings on a few built-in functions + (Thomas Munro, Tom Lane) + + + + The functions + query_to_xml, + cursor_to_xml, + cursor_to_xmlschema, + query_to_xmlschema, and + query_to_xml_and_xmlschema + should be marked volatile because they execute user-supplied queries + that might contain volatile operations. They were not, leading to a + risk of incorrect query optimization. This has been repaired for new + installations by correcting the initial catalog data, but existing + installations will continue to contain the incorrect markings. + Practical use of these functions seems to pose little hazard, but in + case of trouble, it can be fixed by manually updating these + functions' pg_proc entries, for example + ALTER FUNCTION pg_catalog.query_to_xml(text, boolean, + boolean, text) VOLATILE. (Note that that will need to be + done in each database of the installation.) Another option is + to pg_upgrade the database to a version + containing the corrected initial data. + + + + + + Fix incorrect parallel-safety markings on a few built-in functions + (Thomas Munro, Tom Lane) + + + + The functions + brin_summarize_new_values, + gin_clean_pending_list, + cursor_to_xml, + cursor_to_xmlschema, + ts_rewrite, + ts_stat, and + binary_upgrade_create_empty_extension + should be marked parallel-unsafe; some because they perform database + modifications directly, and others because they execute user-supplied + queries that might do so. They were marked parallel-restricted + instead, leading to a risk of unexpected query errors. This has been + repaired for new installations by correcting the initial catalog + data, but existing installations will continue to contain the + incorrect markings. Practical use of these functions seems to pose + little hazard unless force_parallel_mode is turned + on. In case of trouble, it can be fixed by manually updating these + functions' pg_proc entries, for example + ALTER FUNCTION pg_catalog.brin_summarize_new_values(regclass) + PARALLEL UNSAFE. (Note that that will need to be done in + each database of the installation.) Another option is + to pg_upgrade the database to a version + containing the corrected initial data. + + + + + + Avoid re-using TOAST value OIDs that match dead-but-not-yet-vacuumed + TOAST entries (Pavan Deolasee) + + + + Once the OID counter has wrapped around, it's possible to assign a + TOAST value whose OID matches a previously deleted entry in the same + TOAST table. If that entry were not yet vacuumed away, this resulted + in unexpected chunk number 0 (expected 1) for toast + value nnnnn errors, which would + persist until the dead entry was removed + by VACUUM. Fix by not selecting such OIDs when + creating a new TOAST entry. + + + + + + Change ANALYZE's algorithm for updating + pg_class.reltuples + (David Gould) + + + + Previously, pages not actually scanned by ANALYZE + were assumed to retain their old tuple density. In a large table + where ANALYZE samples only a small fraction of the + pages, this meant that the overall tuple density estimate could not + change very much, so that reltuples would + change nearly proportionally to changes in the table's physical size + (relpages) regardless of what was actually + happening in the table. This has been observed to result + in reltuples becoming so much larger than + reality as to effectively shut off autovacuuming. To fix, assume + that ANALYZE's sample is a statistically unbiased + sample of the table (as it should be), and just extrapolate the + density observed within those pages to the whole table. + + + + + + Avoid deadlocks in concurrent CREATE INDEX + CONCURRENTLY commands that are run + under SERIALIZABLE or REPEATABLE + READ transaction isolation (Tom Lane) + + + + + + Fix possible slow execution of REFRESH MATERIALIZED VIEW + CONCURRENTLY (Thomas Munro) + + + + + + Fix UPDATE/DELETE ... WHERE CURRENT OF to not fail + when the referenced cursor uses an index-only-scan plan (Yugo Nagata, + Tom Lane) + + + + + + Fix incorrect planning of join clauses pushed into parameterized + paths (Andrew Gierth, Tom Lane) + + + + This error could result in misclassifying a condition as + a join filter for an outer join when it should be a + plain filter condition, leading to incorrect join + output. + + + + + + Fix possibly incorrect generation of an index-only-scan plan when the + same table column appears in multiple index columns, and only some of + those index columns use operator classes that can return the column + value (Kyotaro Horiguchi) + + + + + + Fix misoptimization of CHECK constraints having + provably-NULL subclauses of + top-level AND/OR conditions + (Tom Lane, Dean Rasheed) + + + + This could, for example, allow constraint exclusion to exclude a + child table that should not be excluded from a query. + + + + + + Fix executor crash due to double free in some GROUPING + SET usages (Peter Geoghegan) + + + + + + Avoid crash if a table rewrite event trigger is added concurrently + with a command that could call such a trigger (Álvaro Herrera, + Andrew Gierth, Tom Lane) + + + + + + Avoid failure if a query-cancel or session-termination interrupt + occurs while committing a prepared transaction (Stas Kelvich) + + + + + + Fix query-lifespan memory leakage in repeatedly executed hash joins + (Tom Lane) + + + + + + Fix possible leak or double free of visibility map buffer pins + (Amit Kapila) + + + + + + Avoid spuriously marking pages as all-visible (Dan Wood, + Pavan Deolasee, Álvaro Herrera) + + + + This could happen if some tuples were locked (but not deleted). While + queries would still function correctly, vacuum would normally ignore + such pages, with the long-term effect that the tuples were never + frozen. In recent releases this would eventually result in errors + such as found multixact nnnnn from + before relminmxid nnnnn. + + + + + + Fix overly strict sanity check + in heap_prepare_freeze_tuple + (Álvaro Herrera) + + + + This could result in incorrect cannot freeze committed + xmax failures in databases that have + been pg_upgrade'd from 9.2 or earlier. + + + + + + Prevent dangling-pointer dereference when a C-coded before-update row + trigger returns the old tuple (Rushabh Lathia) + + + + + + Reduce locking during autovacuum worker scheduling (Jeff Janes) + + + + The previous behavior caused drastic loss of potential worker + concurrency in databases with many tables. + + + + + + Ensure client hostname is copied while copying + pg_stat_activity data to local memory + (Edmund Horner) + + + + Previously the supposedly-local snapshot contained a pointer into + shared memory, allowing the client hostname column to change + unexpectedly if any existing session disconnected. + + + + + + Fix incorrect processing of multiple compound affixes + in ispell dictionaries (Arthur Zakirov) + + + + + + Fix collation-aware searches (that is, indexscans using inequality + operators) in SP-GiST indexes on text columns (Tom Lane) + + + + Such searches would return the wrong set of rows in most non-C + locales. + + + + + + Prevent query-lifespan memory leakage with SP-GiST operator classes + that use traversal values (Anton Dignös) + + + + + + Count the number of index tuples correctly during initial build of an + SP-GiST index (Tomas Vondra) + + + + Previously, the tuple count was reported to be the same as that of + the underlying table, which is wrong if the index is partial. + + + + + + Count the number of index tuples correctly during vacuuming of a + GiST index (Andrey Borodin) + + + + Previously it reported the estimated number of heap tuples, + which might be inaccurate, and is certainly wrong if the + index is partial. + + + + + + Fix a corner case where a streaming standby gets stuck at a WAL + continuation record (Kyotaro Horiguchi) + + + + + + In logical decoding, avoid possible double processing of WAL data + when a walsender restarts (Craig Ringer) + + + + + + Allow scalarltsel + and scalargtsel to be used on non-core datatypes + (Tomas Vondra) + + + + + + Reduce libpq's memory consumption when a + server error is reported after a large amount of query output has + been collected (Tom Lane) + + + + Discard the previous output before, not after, processing the error + message. On some platforms, notably Linux, this can make a + difference in the application's subsequent memory footprint. + + + + + + Fix double-free crashes in ecpg + (Patrick Krecker, Jeevan Ladhe) + + + + + + Fix ecpg to handle long long + int variables correctly in MSVC builds (Michael Meskes, + Andrew Gierth) + + + + + + Fix mis-quoting of values for list-valued GUC variables in dumps + (Michael Paquier, Tom Lane) + + + + The local_preload_libraries, + session_preload_libraries, + shared_preload_libraries, + and temp_tablespaces variables were not correctly + quoted in pg_dump output. This would + cause problems if settings for these variables appeared in + CREATE FUNCTION ... SET or ALTER + DATABASE/ROLE ... SET clauses. + + + + + + Fix pg_recvlogical to not fail against + pre-v10 PostgreSQL servers + (Michael Paquier) + + + + A previous fix caused pg_recvlogical to + issue a command regardless of server version, but it should only be + issued to v10 and later servers. + + + + + + Ensure that pg_rewind deletes files on the + target server if they are deleted from the source server during the + run (Takayuki Tsunakawa) + + + + Failure to do this could result in data inconsistency on the target, + particularly if the file in question is a WAL segment. + + + + + + Fix pg_rewind to handle tables in + non-default tablespaces correctly (Takayuki Tsunakawa) + + + + + + Fix overflow handling in PL/pgSQL + integer FOR loops (Tom Lane) + + + + The previous coding failed to detect overflow of the loop variable + on some non-gcc compilers, leading to an infinite loop. + + + + + + Adjust PL/Python regression tests to pass + under Python 3.7 (Peter Eisentraut) + + + + + + Support testing PL/Python and related + modules when building with Python 3 and MSVC (Andrew Dunstan) + + + + + + Fix errors in initial build of contrib/bloom + indexes (Tomas Vondra, Tom Lane) + + + + Fix possible omission of the table's last tuple from the index. + Count the number of index tuples correctly, in case it is a partial + index. + + + + + + Rename internal b64_encode + and b64_decode functions to avoid conflict with + Solaris 11.4 built-in functions (Rainer Orth) + + + + + + Sync our copy of the timezone library with IANA tzcode release 2018e + (Tom Lane) + + + + This fixes the zic timezone data compiler + to cope with negative daylight-savings offsets. While + the PostgreSQL project will not + immediately ship such timezone data, zic + might be used with timezone data obtained directly from IANA, so it + seems prudent to update zic now. + + + + + + Update time zone data files to tzdata + release 2018d for DST law changes in Palestine and Antarctica (Casey + Station), plus historical corrections for Portugal and its colonies, + as well as Enderbury, Jamaica, Turks & Caicos Islands, and + Uruguay. + + + + + + + + + + Release 9.6.8 + + + Release date: + 2018-03-01 + + + + This release contains a variety of fixes from 9.6.7. + For information about new features in the 9.6 major release, see + . + + + + Migration to Version 9.6.8 + + + A dump/restore is not required for those running 9.6.X. + + + + However, if you run an installation in which not all users are mutually + trusting, or if you maintain an application or extension that is + intended for use in arbitrary situations, it is strongly recommended + that you read the documentation changes described in the first changelog + entry below, and take suitable steps to ensure that your installation or + code is secure. + + + + Also, the changes described in the second changelog entry below may + cause functions used in index expressions or materialized views to fail + during auto-analyze, or when reloading from a dump. After upgrading, + monitor the server logs for such problems, and fix affected functions. + + + + Also, if you are upgrading from a version earlier than 9.6.7, + see . + + + + + Changes + + + + + + Document how to configure installations and applications to guard + against search-path-dependent trojan-horse attacks from other users + (Noah Misch) + + + + Using a search_path setting that includes any + schemas writable by a hostile user enables that user to capture + control of queries and then run arbitrary SQL code with the + permissions of the attacked user. While it is possible to write + queries that are proof against such hijacking, it is notationally + tedious, and it's very easy to overlook holes. Therefore, we now + recommend configurations in which no untrusted schemas appear in + one's search path. Relevant documentation appears in + (for database administrators and users), + (for application authors), + (for extension authors), and + (for authors + of SECURITY DEFINER functions). + (CVE-2018-1058) + + + + + + Avoid use of insecure search_path settings + in pg_dump and other client programs + (Noah Misch, Tom Lane) + + + + pg_dump, + pg_upgrade, + vacuumdb and + other PostgreSQL-provided applications were + themselves vulnerable to the type of hijacking described in the previous + changelog entry; since these applications are commonly run by + superusers, they present particularly attractive targets. To make them + secure whether or not the installation as a whole has been secured, + modify them to include only the pg_catalog + schema in their search_path settings. + Autovacuum worker processes now do the same, as well. + + + + In cases where user-provided functions are indirectly executed by + these programs — for example, user-provided functions in index + expressions — the tighter search_path may + result in errors, which will need to be corrected by adjusting those + user-provided functions to not assume anything about what search path + they are invoked under. That has always been good practice, but now + it will be necessary for correct behavior. + (CVE-2018-1058) + + + + + + Fix misbehavior of concurrent-update rechecks with CTE references + appearing in subplans (Tom Lane) + + + + If a CTE (WITH clause reference) is used in an + InitPlan or SubPlan, and the query requires a recheck due to trying + to update or lock a concurrently-updated row, incorrect results could + be obtained. + + + + + + Fix planner failures with overlapping mergejoin clauses in an outer + join (Tom Lane) + + + + These mistakes led to left and right pathkeys do not match in + mergejoin or outer pathkeys do not match + mergeclauses planner errors in corner cases. + + + + + + Repair pg_upgrade's failure to + preserve relfrozenxid for materialized + views (Tom Lane, Andres Freund) + + + + This oversight could lead to data corruption in materialized views + after an upgrade, manifesting as could not access status of + transaction or found xmin from before + relfrozenxid errors. The problem would be more likely to + occur in seldom-refreshed materialized views, or ones that were + maintained only with REFRESH MATERIALIZED VIEW + CONCURRENTLY. + + + + If such corruption is observed, it can be repaired by refreshing the + materialized view (without CONCURRENTLY). + + + + + + Fix incorrect reporting of PL/Python function names in + error CONTEXT stacks (Tom Lane) + + + + An error occurring within a nested PL/Python function call (that is, + one reached via a SPI query from another PL/Python function) would + result in a stack trace showing the inner function's name twice, + rather than the expected results. Also, an error in a nested + PL/Python DO block could result in a null pointer + dereference crash on some platforms. + + + + + + Allow contrib/auto_explain's + log_min_duration setting to range up + to INT_MAX, or about 24 days instead of 35 minutes + (Tom Lane) + + + + + + Mark assorted GUC variables as PGDLLIMPORT, to + ease porting extension modules to Windows (Metin Doslu) + + + + + + + + + + Release 9.6.7 + + + Release date: + 2018-02-08 + + + + This release contains a variety of fixes from 9.6.6. + For information about new features in the 9.6 major release, see + . + + + + Migration to Version 9.6.7 + + + A dump/restore is not required for those running 9.6.X. + + + + However, + if you use contrib/cube's ~> + operator, see the entry below about that. + + + + Also, if you are upgrading from a version earlier than 9.6.6, + see . + + + + + Changes + + + + + + Ensure that all temporary files made + by pg_upgrade are non-world-readable + (Tom Lane, Noah Misch) + + + + pg_upgrade normally restricts its + temporary files to be readable and writable only by the calling user. + But the temporary file containing pg_dumpall -g + output would be group- or world-readable, or even writable, if the + user's umask setting allows. In typical usage on + multi-user machines, the umask and/or the working + directory's permissions would be tight enough to prevent problems; + but there may be people using pg_upgrade + in scenarios where this oversight would permit disclosure of database + passwords to unfriendly eyes. + (CVE-2018-1053) + + + + + + Fix vacuuming of tuples that were updated while key-share locked + (Andres Freund, Álvaro Herrera) + + + + In some cases VACUUM would fail to remove such + tuples even though they are now dead, leading to assorted data + corruption scenarios. + + + + + + Ensure that vacuum will always clean up the pending-insertions list of + a GIN index (Masahiko Sawada) + + + + This is necessary to ensure that dead index entries get removed. + The old code got it backwards, allowing vacuum to skip the cleanup if + some other process were running cleanup concurrently, thus risking + invalid entries being left behind in the index. + + + + + + Fix inadequate buffer locking in some LSN fetches (Jacob Champion, + Asim Praveen, Ashwin Agrawal) + + + + These errors could result in misbehavior under concurrent load. + The potential consequences have not been characterized fully. + + + + + + Fix incorrect query results from cases involving flattening of + subqueries whose outputs are used in GROUPING SETS + (Heikki Linnakangas) + + + + + + Avoid unnecessary failure in a query on an inheritance tree that + occurs concurrently with some child table being removed from the tree + by ALTER TABLE NO INHERIT (Tom Lane) + + + + + + Fix spurious deadlock failures when multiple sessions are + running CREATE INDEX CONCURRENTLY (Jeff Janes) + + + + + + Fix failures when an inheritance tree contains foreign child tables + (Etsuro Fujita) + + + + A mix of regular and foreign tables in an inheritance tree resulted in + creation of incorrect plans for UPDATE + and DELETE queries. This led to visible failures in + some cases, notably when there are row-level triggers on a foreign + child table. + + + + + + Repair failure with correlated sub-SELECT + inside VALUES inside a LATERAL + subquery (Tom Lane) + + + + + + Fix could not devise a query plan for the given query + planner failure for some cases involving nested UNION + ALL inside a lateral subquery (Tom Lane) + + + + + + Fix logical decoding to correctly clean up disk files for crashed + transactions (Atsushi Torikoshi) + + + + Logical decoding may spill WAL records to disk for transactions + generating many WAL records. Normally these files are cleaned up + after the transaction's commit or abort record arrives; but if + no such record is ever seen, the removal code misbehaved. + + + + + + Fix walsender timeout failure and failure to respond to interrupts + when processing a large transaction (Petr Jelinek) + + + + + + Fix has_sequence_privilege() to + support WITH GRANT OPTION tests, + as other privilege-testing functions do (Joe Conway) + + + + + + In databases using UTF8 encoding, ignore any XML declaration that + asserts a different encoding (Pavel Stehule, Noah Misch) + + + + We always store XML strings in the database encoding, so allowing + libxml to act on a declaration of another encoding gave wrong results. + In encodings other than UTF8, we don't promise to support non-ASCII + XML data anyway, so retain the previous behavior for bug compatibility. + This change affects only xpath() and related + functions; other XML code paths already acted this way. + + + + + + Provide for forward compatibility with future minor protocol versions + (Robert Haas, Badrul Chowdhury) + + + + Up to now, PostgreSQL servers simply + rejected requests to use protocol versions newer than 3.0, so that + there was no functional difference between the major and minor parts + of the protocol version number. Allow clients to request versions 3.x + without failing, sending back a message showing that the server only + understands 3.0. This makes no difference at the moment, but + back-patching this change should allow speedier introduction of future + minor protocol upgrades. + + + + + + Cope with failure to start a parallel worker process + (Amit Kapila, Robert Haas) + + + + Parallel query previously tended to hang indefinitely if a worker + could not be started, as the result of fork() + failure or other low-probability problems. + + + + + + Fix collection of EXPLAIN statistics from parallel + workers (Amit Kapila, Thomas Munro) + + + + + + Avoid unsafe alignment assumptions when working + with __int128 (Tom Lane) + + + + Typically, compilers assume that __int128 variables are + aligned on 16-byte boundaries, but our memory allocation + infrastructure isn't prepared to guarantee that, and increasing the + setting of MAXALIGN seems infeasible for multiple reasons. Adjust the + code to allow use of __int128 only when we can tell the + compiler to assume lesser alignment. The only known symptom of this + problem so far is crashes in some parallel aggregation queries. + + + + + + Prevent stack-overflow crashes when planning extremely deeply + nested set operations + (UNION/INTERSECT/EXCEPT) + (Tom Lane) + + + + + + Fix null-pointer crashes for some types of LDAP URLs appearing + in pg_hba.conf (Thomas Munro) + + + + + + Fix sample INSTR() functions in the PL/pgSQL + documentation (Yugo Nagata, Tom Lane) + + + + These functions are stated to + be Oracle compatible, but + they weren't exactly. In particular, there was a discrepancy in the + interpretation of a negative third parameter: Oracle thinks that a + negative value indicates the last place where the target substring can + begin, whereas our functions took it as the last place where the + target can end. Also, Oracle throws an error for a zero or negative + fourth parameter, whereas our functions returned zero. + + + + The sample code has been adjusted to match Oracle's behavior more + precisely. Users who have copied this code into their applications + may wish to update their copies. + + + + + + Fix pg_dump to make ACL (permissions), + comment, and security label entries reliably identifiable in archive + output formats (Tom Lane) + + + + The tag portion of an ACL archive entry was usually + just the name of the associated object. Make it start with the object + type instead, bringing ACLs into line with the convention already used + for comment and security label archive entries. Also, fix the + comment and security label entries for the whole database, if present, + to make their tags start with DATABASE so that they + also follow this convention. This prevents false matches in code that + tries to identify large-object-related entries by seeing if the tag + starts with LARGE OBJECT. That could have resulted + in misclassifying entries as data rather than schema, with undesirable + results in a schema-only or data-only dump. + + + + Note that this change has user-visible results in the output + of pg_restore --list. + + + + + + Rename pg_rewind's + copy_file_range function to avoid conflict + with new Linux system call of that name (Andres Freund) + + + + This change prevents build failures with newer glibc versions. + + + + + + In ecpg, detect indicator arrays that do + not have the correct length and report an error (David Rader) + + + + + + Change the behavior of contrib/cube's + cube ~> int + operator to make it compatible with KNN search (Alexander Korotkov) + + + + The meaning of the second argument (the dimension selector) has been + changed to make it predictable which value is selected even when + dealing with cubes of varying dimensionalities. + + + + This is an incompatible change, but since the point of the operator + was to be used in KNN searches, it seems rather useless as-is. + After installing this update, any expression indexes or materialized + views using this operator will need to be reindexed/refreshed. + + + + + + Avoid triggering a libc assertion + in contrib/hstore, due to use + of memcpy() with equal source and destination + pointers (Tomas Vondra) + + + + + + Fix incorrect display of tuples' null bitmaps + in contrib/pageinspect (Maksim Milyutin) + + + + + + In contrib/postgres_fdw, avoid + outer pathkeys do not match mergeclauses + planner error when constructing a plan involving a remote join + (Robert Haas) + + + + + + Provide modern examples of how to auto-start Postgres on macOS + (Tom Lane) + + + + The scripts in contrib/start-scripts/osx use + infrastructure that's been deprecated for over a decade, and which no + longer works at all in macOS releases of the last couple of years. + Add a new subdirectory contrib/start-scripts/macos + containing scripts that use the newer launchd + infrastructure. + + + + + + Fix incorrect selection of configuration-specific libraries for + OpenSSL on Windows (Andrew Dunstan) + + + + + + Support linking to MinGW-built versions of libperl (Noah Misch) + + + + This allows building PL/Perl with some common Perl distributions for + Windows. + + + + + + Fix MSVC build to test whether 32-bit libperl + needs -D_USE_32BIT_TIME_T (Noah Misch) + + + + Available Perl distributions are inconsistent about what they expect, + and lack any reliable means of reporting it, so resort to a build-time + test on what the library being used actually does. + + + + + + On Windows, install the crash dump handler earlier in postmaster + startup (Takayuki Tsunakawa) + + + + This may allow collection of a core dump for some early-startup + failures that did not produce a dump before. + + + + + + On Windows, avoid encoding-conversion-related crashes when emitting + messages very early in postmaster startup (Takayuki Tsunakawa) + + + + + + Use our existing Motorola 68K spinlock code on OpenBSD as + well as NetBSD (David Carlier) + + + + + + Add support for spinlocks on Motorola 88K (David Carlier) + + + + + + Update time zone data files to tzdata + release 2018c for DST law changes in Brazil, Sao Tome and Principe, + plus historical corrections for Bolivia, Japan, and South Sudan. + The US/Pacific-New zone has been removed (it was + only an alias for America/Los_Angeles anyway). + + + + + + + + + + Release 9.6.6 + + + Release date: + 2017-11-09 + + + + This release contains a variety of fixes from 9.6.5. + For information about new features in the 9.6 major release, see + . + + + + Migration to Version 9.6.6 + + + A dump/restore is not required for those running 9.6.X. + + + + However, if you use BRIN indexes, see the fourth changelog entry below. + + + + Also, if you are upgrading from a version earlier than 9.6.4, + see . + + + + + Changes + + + + + + Ensure that INSERT ... ON CONFLICT DO UPDATE checks + table permissions and RLS policies in all cases (Dean Rasheed) + + + + The update path of INSERT ... ON CONFLICT DO UPDATE + requires SELECT permission on the columns of the + arbiter index, but it failed to check for that in the case of an + arbiter specified by constraint name. + In addition, for a table with row level security enabled, it failed to + check updated rows against the table's SELECT + policies (regardless of how the arbiter index was specified). + (CVE-2017-15099) + + + + + + Fix crash due to rowtype mismatch + in json{b}_populate_recordset() + (Michael Paquier, Tom Lane) + + + + These functions used the result rowtype specified in the FROM + ... AS clause without checking that it matched the actual + rowtype of the supplied tuple value. If it didn't, that would usually + result in a crash, though disclosure of server memory contents seems + possible as well. + (CVE-2017-15098) + + + + + + Fix sample server-start scripts to become $PGUSER + before opening $PGLOG (Noah Misch) + + + + Previously, the postmaster log file was opened while still running as + root. The database owner could therefore mount an attack against + another system user by making $PGLOG be a symbolic + link to some other file, which would then become corrupted by appending + log messages. + + + + By default, these scripts are not installed anywhere. Users who have + made use of them will need to manually recopy them, or apply the same + changes to their modified versions. If the + existing $PGLOG file is root-owned, it will need to + be removed or renamed out of the way before restarting the server with + the corrected script. + (CVE-2017-12172) + + + + + + Fix BRIN index summarization to handle concurrent table extension + correctly (Álvaro Herrera) + + + + Previously, a race condition allowed some table rows to be omitted from + the index. It may be necessary to reindex existing BRIN indexes to + recover from past occurrences of this problem. + + + + + + Fix possible failures during concurrent updates of a BRIN index + (Tom Lane) + + + + These race conditions could result in errors like invalid index + offnum or inconsistent range map. + + + + + + Fix crash when logical decoding is invoked from a SPI-using function, + in particular any function written in a PL language + (Tom Lane) + + + + + + Fix incorrect query results when multiple GROUPING + SETS columns contain the same simple variable (Tom Lane) + + + + + + Fix incorrect parallelization decisions for nested queries + (Amit Kapila, Kuntal Ghosh) + + + + + + Fix parallel query handling to not fail when a recently-used role is + dropped (Amit Kapila) + + + + + + Fix json_build_array(), + json_build_object(), and their jsonb + equivalents to handle explicit VARIADIC arguments + correctly (Michael Paquier) + + + + + + + Properly reject attempts to convert infinite float values to + type numeric (Tom Lane, KaiGai Kohei) + + + + Previously the behavior was platform-dependent. + + + + + + Fix corner-case crashes when columns have been added to the end of a + view (Tom Lane) + + + + + + Record proper dependencies when a view or rule + contains FieldSelect + or FieldStore expression nodes (Tom Lane) + + + + Lack of these dependencies could allow a column or data + type DROP to go through when it ought to fail, + thereby causing later uses of the view or rule to get errors. + This patch does not do anything to protect existing views/rules, + only ones created in the future. + + + + + + Correctly detect hashability of range data types (Tom Lane) + + + + The planner mistakenly assumed that any range type could be hashed + for use in hash joins or hash aggregation, but actually it must check + whether the range's subtype has hash support. This does not affect any + of the built-in range types, since they're all hashable anyway. + + + + + + + Correctly ignore RelabelType expression nodes + when determining relation distinctness (David Rowley) + + + + This allows the intended optimization to occur when a subquery has + a result column of type varchar. + + + + + + Prevent sharing transition states between ordered-set aggregates + (David Rowley) + + + + This causes a crash with the built-in ordered-set aggregates, and + probably with user-written ones as well. v11 and later will include + provisions for dealing with such cases safely, but in released + branches, just disable the optimization. + + + + + + Prevent idle_in_transaction_session_timeout from + being ignored when a statement_timeout occurred + earlier (Lukas Fittl) + + + + + + Fix low-probability loss of NOTIFY messages due to + XID wraparound (Marko Tiikkaja, Tom Lane) + + + + If a session executed no queries, but merely listened for + notifications, for more than 2 billion transactions, it started to miss + some notifications from concurrently-committing transactions. + + + + + + + Avoid SIGBUS crash on Linux when a DSM memory + request exceeds the space available in tmpfs + (Thomas Munro) + + + + + + Reduce the frequency of data flush requests during bulk file copies to + avoid performance problems on macOS, particularly with its new APFS + file system (Tom Lane) + + + + + + + Prevent low-probability crash in processing of nested trigger firings + (Tom Lane) + + + + + + Allow COPY's FREEZE option to + work when the transaction isolation level is REPEATABLE + READ or higher (Noah Misch) + + + + This case was unintentionally broken by a previous bug fix. + + + + + + + Correctly restore the umask setting when file creation fails + in COPY or lo_export() + (Peter Eisentraut) + + + + + + + Give a better error message for duplicate column names + in ANALYZE (Nathan Bossart) + + + + + + + Add missing cases in GetCommandLogLevel(), + preventing errors when certain SQL commands are used while + log_statement is set to ddl + (Michael Paquier) + + + + + + + Fix mis-parsing of the last line in a + non-newline-terminated pg_hba.conf file + (Tom Lane) + + + + + + Fix AggGetAggref() to return the + correct Aggref nodes to aggregate final + functions whose transition calculations have been merged (Tom Lane) + + + + + + + Fix pg_dump to ensure that it + emits GRANT commands in a valid order + (Stephen Frost) + + + + + + Fix pg_basebackup's matching of tablespace + paths to canonicalize both paths before comparing (Michael Paquier) + + + + This is particularly helpful on Windows. + + + + + + Fix libpq to not require user's home + directory to exist (Tom Lane) + + + + In v10, failure to find the home directory while trying to + read ~/.pgpass was treated as a hard error, + but it should just cause that file to not be found. Both v10 and + previous release branches made the same mistake when + reading ~/.pg_service.conf, though this was less + obvious since that file is not sought unless a service name is + specified. + + + + + + + Fix libpq to guard against integer + overflow in the row count of a PGresult + (Michael Paquier) + + + + + + + Fix ecpg's handling of out-of-scope cursor + declarations with pointer or array variables (Michael Meskes) + + + + + + In ecpglib, correctly handle backslashes in string literals depending + on whether standard_conforming_strings is set + (Tsunakawa Takayuki) + + + + + + Make ecpglib's Informix-compatibility mode ignore fractional digits in + integer input strings, as expected (Gao Zengqi, Michael Meskes) + + + + + + + Fix ecpg's regression tests to work reliably + on Windows (Christian Ullrich, Michael Meskes) + + + + + + Fix missing temp-install prerequisites + for check-like Make targets (Noah Misch) + + + + Some non-default test procedures that are meant to work + like make check failed to ensure that the temporary + installation was up to date. + + + + + + + Sync our copy of the timezone library with IANA release tzcode2017c + (Tom Lane) + + + + This fixes various issues; the only one likely to be user-visible + is that the default DST rules for a POSIX-style zone name, if + no posixrules file exists in the timezone data + directory, now match current US law rather than what it was a dozen + years ago. + + + + + + Update time zone data files to tzdata + release 2017c for DST law changes in Fiji, Namibia, Northern Cyprus, + Sudan, Tonga, and Turks & Caicos Islands, plus historical + corrections for Alaska, Apia, Burma, Calcutta, Detroit, Ireland, + Namibia, and Pago Pago. + + + + + + + + + + Release 9.6.5 + + + Release date: + 2017-08-31 + + + + This release contains a small number of fixes from 9.6.4. + For information about new features in the 9.6 major release, see + . + + + + Migration to Version 9.6.5 + + + A dump/restore is not required for those running 9.6.X. + + + + However, if you are upgrading from a version earlier than 9.6.4, + see . + + + + + Changes + + + + + + + Show foreign tables + in information_schema.table_privileges + view (Peter Eisentraut) + + + + All other relevant information_schema views include + foreign tables, but this one ignored them. + + + + Since this view definition is installed by initdb, + merely upgrading will not fix the problem. If you need to fix this + in an existing installation, you can, as a superuser, do this + in psql: + +SET search_path TO information_schema; +CREATE OR REPLACE VIEW table_privileges AS + SELECT CAST(u_grantor.rolname AS sql_identifier) AS grantor, + CAST(grantee.rolname AS sql_identifier) AS grantee, + CAST(current_database() AS sql_identifier) AS table_catalog, + CAST(nc.nspname AS sql_identifier) AS table_schema, + CAST(c.relname AS sql_identifier) AS table_name, + CAST(c.prtype AS character_data) AS privilege_type, + CAST( + CASE WHEN + -- object owner always has grant options + pg_has_role(grantee.oid, c.relowner, 'USAGE') + OR c.grantable + THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_grantable, + CAST(CASE WHEN c.prtype = 'SELECT' THEN 'YES' ELSE 'NO' END AS yes_or_no) AS with_hierarchy + + FROM ( + SELECT oid, relname, relnamespace, relkind, relowner, (aclexplode(coalesce(relacl, acldefault('r', relowner)))).* FROM pg_class + ) AS c (oid, relname, relnamespace, relkind, relowner, grantor, grantee, prtype, grantable), + pg_namespace nc, + pg_authid u_grantor, + ( + SELECT oid, rolname FROM pg_authid + UNION ALL + SELECT 0::oid, 'PUBLIC' + ) AS grantee (oid, rolname) + + WHERE c.relnamespace = nc.oid + AND c.relkind IN ('r', 'v', 'f') + AND c.grantee = grantee.oid + AND c.grantor = u_grantor.oid + AND c.prtype IN ('INSERT', 'SELECT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER') + AND (pg_has_role(u_grantor.oid, 'USAGE') + OR pg_has_role(grantee.oid, 'USAGE') + OR grantee.rolname = 'PUBLIC'); + + This must be repeated in each database to be fixed, + including template0. + + + + + + + Clean up handling of a fatal exit (e.g., due to receipt + of SIGTERM) that occurs while trying to execute + a ROLLBACK of a failed transaction (Tom Lane) + + + + This situation could result in an assertion failure. In production + builds, the exit would still occur, but it would log an unexpected + message about cannot drop active portal. + + + + + + + Remove assertion that could trigger during a fatal exit (Tom Lane) + + + + + + + Correctly identify columns that are of a range type or domain type over + a composite type or domain type being searched for (Tom Lane) + + + + Certain ALTER commands that change the definition of a + composite type or domain type are supposed to fail if there are any + stored values of that type in the database, because they lack the + infrastructure needed to update or check such values. Previously, + these checks could miss relevant values that are wrapped inside range + types or sub-domains, possibly allowing the database to become + inconsistent. + + + + + + + Prevent crash when passing fixed-length pass-by-reference data types + to parallel worker processes (Tom Lane) + + + + + + + Fix crash in pg_restore when using parallel mode and + using a list file to select a subset of items to restore + (Fabrízio de Royes Mello) + + + + + + + Change ecpg's parser to allow RETURNING + clauses without attached C variables (Michael Meskes) + + + + This allows ecpg programs to contain SQL constructs + that use RETURNING internally (for example, inside a CTE) + rather than using it to define values to be returned to the client. + + + + + + + Change ecpg's parser to recognize backslash + continuation of C preprocessor command lines (Michael Meskes) + + + + + + + Improve selection of compiler flags for PL/Perl on Windows (Tom Lane) + + + + This fix avoids possible crashes of PL/Perl due to inconsistent + assumptions about the width of time_t values. + A side-effect that may be visible to extension developers is + that _USE_32BIT_TIME_T is no longer defined globally + in PostgreSQL Windows builds. This is not expected + to cause problems, because type time_t is not used + in any PostgreSQL API definitions. + + + + + + + Fix make check to behave correctly when invoked via a + non-GNU make program (Thomas Munro) + + + + + + + + Release 9.6.4 @@ -12,7 +3235,7 @@ This release contains a variety of fixes from 9.6.3. For information about new features in the 9.6 major release, see - . + . @@ -29,7 +3252,7 @@ Also, if you are upgrading from a version earlier than 9.6.3, - see . + see . @@ -50,7 +3273,7 @@ Branch: REL9_2_STABLE [e255e97a2] 2017-08-07 07:09:32 -0700 --> Further restrict visibility - of pg_user_mappings.umoptions, to + of pg_user_mappings.umoptions, to protect passwords stored as user mapping options (Noah Misch) @@ -58,11 +3281,11 @@ Branch: REL9_2_STABLE [e255e97a2] 2017-08-07 07:09:32 -0700 The fix for CVE-2017-7486 was incorrect: it allowed a user to see the options in her own user mapping, even if she did not - have USAGE permission on the associated foreign server. + have USAGE permission on the associated foreign server. Such options might include a password that had been provided by the server owner rather than the user herself. - Since information_schema.user_mapping_options does not - show the options in such cases, pg_user_mappings + Since information_schema.user_mapping_options does not + show the options in such cases, pg_user_mappings should not either. (CVE-2017-7547) @@ -77,15 +3300,15 @@ Branch: REL9_2_STABLE [e255e97a2] 2017-08-07 07:09:32 -0700 Restart the postmaster after adding allow_system_table_mods - = true to postgresql.conf. (In versions - supporting ALTER SYSTEM, you can use that to make the + = true to postgresql.conf. (In versions + supporting ALTER SYSTEM, you can use that to make the configuration change, but you'll still need a restart.) - In each database of the cluster, + In each database of the cluster, run the following commands as superuser: SET search_path = pg_catalog; @@ -116,15 +3339,15 @@ CREATE OR REPLACE VIEW pg_user_mappings AS - Do not forget to include the template0 - and template1 databases, or the vulnerability will still - exist in databases you create later. To fix template0, + Do not forget to include the template0 + and template1 databases, or the vulnerability will still + exist in databases you create later. To fix template0, you'll need to temporarily make it accept connections. - In PostgreSQL 9.5 and later, you can use + In PostgreSQL 9.5 and later, you can use ALTER DATABASE template0 WITH ALLOW_CONNECTIONS true; - and then after fixing template0, undo that with + and then after fixing template0, undo that with ALTER DATABASE template0 WITH ALLOW_CONNECTIONS false; @@ -138,7 +3361,7 @@ UPDATE pg_database SET datallowconn = false WHERE datname = 'template0'; - Finally, remove the allow_system_table_mods configuration + Finally, remove the allow_system_table_mods configuration setting, and again restart the postmaster. @@ -161,16 +3384,16 @@ Branch: REL9_2_STABLE [06651648a] 2017-08-07 17:04:17 +0300 - libpq ignores empty password specifications, and does + libpq ignores empty password specifications, and does not transmit them to the server. So, if a user's password has been set to the empty string, it's impossible to log in with that password - via psql or other libpq-based + via psql or other libpq-based clients. An administrator might therefore believe that setting the password to empty is equivalent to disabling password login. - However, with a modified or non-libpq-based client, + However, with a modified or non-libpq-based client, logging in could be possible, depending on which authentication method is configured. In particular the most common - method, md5, accepted empty passwords. + method, md5, accepted empty passwords. Change the server to reject empty passwords in all cases. (CVE-2017-7546) @@ -185,13 +3408,13 @@ Branch: REL9_5_STABLE [873741c68] 2017-08-07 10:19:21 -0400 Branch: REL9_4_STABLE [f1cda6d6c] 2017-08-07 10:19:22 -0400 --> - Make lo_put() check for UPDATE privilege on + Make lo_put() check for UPDATE privilege on the target large object (Tom Lane, Michael Paquier) - lo_put() should surely require the same permissions - as lowrite(), but the check was missing, allowing any + lo_put() should surely require the same permissions + as lowrite(), but the check was missing, allowing any user to change the data in a large object. (CVE-2017-7548) @@ -206,12 +3429,12 @@ Branch: REL9_5_STABLE [fd376afc9] 2017-06-15 12:30:02 -0400 --> Correct the documentation about the process for upgrading standby - servers with pg_upgrade (Bruce Momjian) + servers with pg_upgrade (Bruce Momjian) The previous documentation instructed users to start/stop the primary - server after running pg_upgrade but before syncing + server after running pg_upgrade but before syncing the standby servers. This sequence is unsafe. @@ -359,7 +3582,7 @@ Branch: REL9_3_STABLE [cc154d9a0] 2017-06-28 12:30:16 -0400 Branch: REL9_2_STABLE [5e7447132] 2017-06-28 12:30:16 -0400 --> - Fix code for setting on + Fix code for setting on Solaris (Tom Lane) @@ -418,7 +3641,7 @@ Branch: REL9_2_STABLE [81bf7b5b1] 2017-06-21 14:13:58 -0700 --> Fix possible creation of an invalid WAL segment when a standby is - promoted just after it processes an XLOG_SWITCH WAL + promoted just after it processes an XLOG_SWITCH WAL record (Andres Freund) @@ -432,7 +3655,7 @@ Branch: REL9_5_STABLE [446914f6b] 2017-06-30 12:00:03 -0400 Branch: REL9_4_STABLE [5aa8db014] 2017-06-30 12:00:03 -0400 --> - Fix walsender to exit promptly when client requests + Fix walsender to exit promptly when client requests shutdown (Tom Lane) @@ -452,7 +3675,7 @@ Branch: REL9_3_STABLE [45d067d50] 2017-06-05 19:18:16 -0700 Branch: REL9_2_STABLE [133b1920c] 2017-06-05 19:18:16 -0700 --> - Fix SIGHUP and SIGUSR1 handling in + Fix SIGHUP and SIGUSR1 handling in walsender processes (Petr Jelinek, Andres Freund) @@ -482,7 +3705,7 @@ Branch: REL9_3_STABLE [cb59949f6] 2017-06-26 17:31:56 -0400 Branch: REL9_2_STABLE [e96adaacd] 2017-06-26 17:31:56 -0400 --> - Fix unnecessarily slow restarts of walreceiver + Fix unnecessarily slow restarts of walreceiver processes due to race condition in postmaster (Tom Lane) @@ -601,7 +3824,7 @@ Branch: REL9_3_STABLE [aea1a3f0e] 2017-07-12 18:00:04 -0400 Branch: REL9_2_STABLE [75670ec37] 2017-07-12 18:00:04 -0400 --> - Fix cases where an INSERT or UPDATE assigns + Fix cases where an INSERT or UPDATE assigns to more than one element of a column that is of domain-over-array type (Tom Lane) @@ -617,7 +3840,7 @@ Branch: REL9_4_STABLE [dc777f9db] 2017-06-27 17:51:11 -0400 Branch: REL9_3_STABLE [66dee28b4] 2017-06-27 17:51:11 -0400 --> - Allow window functions to be used in sub-SELECTs that + Allow window functions to be used in sub-SELECTs that are within the arguments of an aggregate function (Tom Lane) @@ -629,7 +3852,7 @@ Branch: master [7086be6e3] 2017-07-24 15:57:24 -0400 Branch: REL9_6_STABLE [971faefc2] 2017-07-24 16:24:42 -0400 --> - Ensure that a view's CHECK OPTIONS clause is enforced + Ensure that a view's CHECK OPTIONS clause is enforced properly when the underlying table is a foreign table (Etsuro Fujita) @@ -651,12 +3874,12 @@ Branch: REL9_2_STABLE [da9165686] 2017-05-26 15:16:59 -0400 --> Move autogenerated array types out of the way during - ALTER ... RENAME (Vik Fearing) + ALTER ... RENAME (Vik Fearing) Previously, we would rename a conflicting autogenerated array type - out of the way during CREATE; this fix extends that + out of the way during CREATE; this fix extends that behavior to renaming operations. @@ -669,7 +3892,7 @@ Branch: REL9_6_STABLE [b35cce914] 2017-05-15 11:33:44 -0400 Branch: REL9_5_STABLE [53a1aa9f9] 2017-05-15 11:33:45 -0400 --> - Fix dangling pointer in ALTER TABLE when there is a + Fix dangling pointer in ALTER TABLE when there is a comment on a constraint belonging to the table (David Rowley) @@ -690,8 +3913,8 @@ Branch: REL9_3_STABLE [b7d1bc820] 2017-08-03 21:29:36 -0400 Branch: REL9_2_STABLE [22eb38caa] 2017-08-03 21:42:46 -0400 --> - Ensure that ALTER USER ... SET accepts all the syntax - variants that ALTER ROLE ... SET does (Peter Eisentraut) + Ensure that ALTER USER ... SET accepts all the syntax + variants that ALTER ROLE ... SET does (Peter Eisentraut) @@ -702,18 +3925,18 @@ Branch: master [86705aa8c] 2017-08-03 13:24:48 -0400 Branch: REL9_6_STABLE [1f220c390] 2017-08-03 13:25:32 -0400 --> - Allow a foreign table's CHECK constraints to be - initially NOT VALID (Amit Langote) + Allow a foreign table's CHECK constraints to be + initially NOT VALID (Amit Langote) - CREATE TABLE silently drops NOT VALID - specifiers for CHECK constraints, reasoning that the + CREATE TABLE silently drops NOT VALID + specifiers for CHECK constraints, reasoning that the table must be empty so the constraint can be validated immediately. - But this is wrong for CREATE FOREIGN TABLE, where there's + But this is wrong for CREATE FOREIGN TABLE, where there's no reason to suppose that the underlying table is empty, and even if it is it's no business of ours to decide that the constraint can be - treated as valid going forward. Skip this optimization for + treated as valid going forward. Skip this optimization for foreign tables. @@ -730,14 +3953,14 @@ Branch: REL9_2_STABLE [ac93a78b0] 2017-06-16 11:46:26 +0300 --> Properly update dependency info when changing a datatype I/O - function's argument or return type from opaque to the + function's argument or return type from opaque to the correct type (Heikki Linnakangas) - CREATE TYPE updates I/O functions declared in this + CREATE TYPE updates I/O functions declared in this long-obsolete style, but it forgot to record a dependency on the - type, allowing a subsequent DROP TYPE to leave broken + type, allowing a subsequent DROP TYPE to leave broken function definitions behind. @@ -749,7 +3972,7 @@ Branch: master [34aebcf42] 2017-06-02 19:11:15 -0700 Branch: REL9_6_STABLE [8a7cd781e] 2017-06-02 19:11:23 -0700 --> - Allow parallelism in the query plan when COPY copies from + Allow parallelism in the query plan when COPY copies from a query's result (Andres Freund) @@ -765,8 +3988,8 @@ Branch: REL9_3_STABLE [11854dee0] 2017-07-12 22:04:08 +0300 Branch: REL9_2_STABLE [40ba61b44] 2017-07-12 22:04:15 +0300 --> - Reduce memory usage when ANALYZE processes - a tsvector column (Heikki Linnakangas) + Reduce memory usage when ANALYZE processes + a tsvector column (Heikki Linnakangas) @@ -782,7 +4005,7 @@ Branch: REL9_2_STABLE [798d2321e] 2017-05-21 13:05:17 -0400 --> Fix unnecessary precision loss and sloppy rounding when multiplying - or dividing money values by integers or floats (Tom Lane) + or dividing money values by integers or floats (Tom Lane) @@ -798,7 +4021,7 @@ Branch: REL9_2_STABLE [a047270d5] 2017-05-24 15:28:35 -0400 --> Tighten checks for whitespace in functions that parse identifiers, - such as regprocedurein() (Tom Lane) + such as regprocedurein() (Tom Lane) @@ -824,13 +4047,13 @@ Branch: REL9_3_STABLE [0d8f015e7] 2017-07-31 12:38:35 -0400 Branch: REL9_2_STABLE [456c7dff2] 2017-07-31 12:38:35 -0400 --> - Use relevant #define symbols from Perl while - compiling PL/Perl (Ashutosh Sharma, Tom Lane) + Use relevant #define symbols from Perl while + compiling PL/Perl (Ashutosh Sharma, Tom Lane) This avoids portability problems, typically manifesting as - a handshake mismatch during library load, when working with + a handshake mismatch during library load, when working with recent Perl versions. @@ -845,7 +4068,7 @@ Branch: REL9_4_STABLE [1fe1fc449] 2017-06-07 14:04:49 +0300 Branch: REL9_3_STABLE [f2fa0c651] 2017-06-07 14:04:44 +0300 --> - In libpq, reset GSS/SASL and SSPI authentication + In libpq, reset GSS/SASL and SSPI authentication state properly after a failed connection attempt (Michael Paquier) @@ -867,9 +4090,9 @@ Branch: REL9_3_STABLE [6bc710f6d] 2017-05-17 12:24:19 -0400 Branch: REL9_2_STABLE [07477130e] 2017-05-17 12:24:19 -0400 --> - In psql, fix failure when COPY FROM STDIN + In psql, fix failure when COPY FROM STDIN is ended with a keyboard EOF signal and then another COPY - FROM STDIN is attempted (Thomas Munro) + FROM STDIN is attempted (Thomas Munro) @@ -888,8 +4111,8 @@ Branch: REL9_4_STABLE [b93217653] 2017-08-03 17:36:43 -0400 Branch: REL9_3_STABLE [035bb8222] 2017-08-03 17:36:23 -0400 --> - Fix pg_dump and pg_restore to - emit REFRESH MATERIALIZED VIEW commands last (Tom Lane) + Fix pg_dump and pg_restore to + emit REFRESH MATERIALIZED VIEW commands last (Tom Lane) @@ -911,8 +4134,8 @@ Branch: REL9_5_STABLE [12f1e523a] 2017-08-03 14:55:17 -0400 Branch: REL9_4_STABLE [69ad12b58] 2017-08-03 14:55:17 -0400 --> - Improve pg_dump/pg_restore's - reporting of error conditions originating in zlib + Improve pg_dump/pg_restore's + reporting of error conditions originating in zlib (Vladimir Kunschikov, Álvaro Herrera) @@ -927,7 +4150,7 @@ Branch: REL9_4_STABLE [502ead3d6] 2017-07-22 20:20:10 -0400 Branch: REL9_3_STABLE [68a22bc69] 2017-07-22 20:20:10 -0400 --> - Fix pg_dump with the option to drop event triggers as expected (Tom Lane) @@ -945,8 +4168,8 @@ Branch: master [4500edc7e] 2017-06-28 10:33:57 -0400 Branch: REL9_6_STABLE [a2de017b3] 2017-06-28 10:34:01 -0400 --> - Fix pg_dump with the @@ -961,7 +4184,7 @@ Branch: REL9_3_STABLE [a561254e4] 2017-05-26 12:51:05 -0400 Branch: REL9_2_STABLE [f62e1eff5] 2017-05-26 12:51:06 -0400 --> - Fix pg_dump to not emit invalid SQL for an empty + Fix pg_dump to not emit invalid SQL for an empty operator class (Daniel Gustafsson) @@ -977,7 +4200,7 @@ Branch: REL9_3_STABLE [2943c04f7] 2017-06-19 11:03:16 -0400 Branch: REL9_2_STABLE [c10cbf77a] 2017-06-19 11:03:21 -0400 --> - Fix pg_dump output to stdout on Windows (Kuntal Ghosh) + Fix pg_dump output to stdout on Windows (Kuntal Ghosh) @@ -997,14 +4220,14 @@ Branch: REL9_3_STABLE [b6d640047] 2017-07-24 15:16:31 -0400 Branch: REL9_2_STABLE [d9874fde8] 2017-07-24 15:16:31 -0400 --> - Fix pg_get_ruledef() to print correct output for - the ON SELECT rule of a view whose columns have been + Fix pg_get_ruledef() to print correct output for + the ON SELECT rule of a view whose columns have been renamed (Tom Lane) - In some corner cases, pg_dump relies - on pg_get_ruledef() to dump views, so that this error + In some corner cases, pg_dump relies + on pg_get_ruledef() to dump views, so that this error could result in dump/reload failures. @@ -1020,7 +4243,7 @@ Branch: REL9_3_STABLE [e947838ae] 2017-07-20 11:29:36 -0400 --> Fix dumping of outer joins with empty constraints, such as the result - of a NATURAL LEFT JOIN with no common columns (Tom Lane) + of a NATURAL LEFT JOIN with no common columns (Tom Lane) @@ -1035,7 +4258,7 @@ Branch: REL9_3_STABLE [0ecc407d9] 2017-07-13 19:24:44 -0400 Branch: REL9_2_STABLE [bccfb1776] 2017-07-13 19:24:44 -0400 --> - Fix dumping of function expressions in the FROM clause in + Fix dumping of function expressions in the FROM clause in cases where the expression does not deparse into something that looks like a function call (Tom Lane) @@ -1052,7 +4275,7 @@ Branch: REL9_3_STABLE [f3633689f] 2017-07-14 16:03:23 +0300 Branch: REL9_2_STABLE [4b994a96c] 2017-07-14 16:03:27 +0300 --> - Fix pg_basebackup output to stdout on Windows + Fix pg_basebackup output to stdout on Windows (Haribabu Kommi) @@ -1070,12 +4293,12 @@ Branch: REL9_6_STABLE [73fbf3d3d] 2017-07-21 22:04:55 -0400 Branch: REL9_5_STABLE [ed367be64] 2017-07-21 22:05:07 -0400 --> - Fix pg_rewind to correctly handle files exceeding 2GB + Fix pg_rewind to correctly handle files exceeding 2GB (Kuntal Ghosh, Michael Paquier) - Ordinarily such files won't appear in PostgreSQL data + Ordinarily such files won't appear in PostgreSQL data directories, but they could be present in some cases. @@ -1091,8 +4314,8 @@ Branch: REL9_3_STABLE [5c890645d] 2017-06-20 13:20:02 -0400 Branch: REL9_2_STABLE [65beccae5] 2017-06-20 13:20:02 -0400 --> - Fix pg_upgrade to ensure that the ending WAL record - does not have = minimum + Fix pg_upgrade to ensure that the ending WAL record + does not have = minimum (Bruce Momjian) @@ -1110,7 +4333,7 @@ Branch: REL9_6_STABLE [d3ca4b4b4] 2017-06-05 16:10:07 -0700 Branch: REL9_5_STABLE [25653c171] 2017-06-05 16:10:07 -0700 --> - Fix pg_xlogdump's computation of WAL record length + Fix pg_xlogdump's computation of WAL record length (Andres Freund) @@ -1130,9 +4353,9 @@ Branch: REL9_4_STABLE [a648fc70a] 2017-07-21 14:20:43 -0400 Branch: REL9_3_STABLE [6d9de660d] 2017-07-21 14:20:43 -0400 --> - In postgres_fdw, re-establish connections to remote - servers after ALTER SERVER or ALTER USER - MAPPING commands (Kyotaro Horiguchi) + In postgres_fdw, re-establish connections to remote + servers after ALTER SERVER or ALTER USER + MAPPING commands (Kyotaro Horiguchi) @@ -1151,7 +4374,7 @@ Branch: REL9_4_STABLE [c02c450cf] 2017-06-07 15:40:35 -0400 Branch: REL9_3_STABLE [fc267a0c3] 2017-06-07 15:41:05 -0400 --> - In postgres_fdw, allow cancellation of remote + In postgres_fdw, allow cancellation of remote transaction control commands (Robert Haas, Rafia Sabih) @@ -1170,7 +4393,7 @@ Branch: REL9_5_STABLE [6f2fe2468] 2017-05-11 14:51:38 -0400 Branch: REL9_4_STABLE [5c633f76b] 2017-05-11 14:51:46 -0400 --> - Increase MAX_SYSCACHE_CALLBACKS to provide more room for + Increase MAX_SYSCACHE_CALLBACKS to provide more room for extensions (Tom Lane) @@ -1186,7 +4409,7 @@ Branch: REL9_3_STABLE [cee7238de] 2017-06-01 13:32:56 -0400 Branch: REL9_2_STABLE [a378b9bc2] 2017-06-01 13:32:56 -0400 --> - Always use , not , when building shared libraries with gcc (Tom Lane) @@ -1213,8 +4436,8 @@ Branch: REL9_3_STABLE [da30fa603] 2017-06-05 20:40:47 -0400 Branch: REL9_2_STABLE [f964a7c5a] 2017-06-05 20:41:01 -0400 --> - In MSVC builds, handle the case where the openssl - library is not within a VC subdirectory (Andrew Dunstan) + In MSVC builds, handle the case where the OpenSSL + library is not within a VC subdirectory (Andrew Dunstan) @@ -1229,13 +4452,13 @@ Branch: REL9_3_STABLE [2c7d2114b] 2017-05-12 10:24:16 -0400 Branch: REL9_2_STABLE [614f83c12] 2017-05-12 10:24:36 -0400 --> - In MSVC builds, add proper include path for libxml2 + In MSVC builds, add proper include path for libxml2 header files (Andrew Dunstan) This fixes a former need to move things around in standard Windows - installations of libxml2. + installations of libxml2. @@ -1251,7 +4474,7 @@ Branch: REL9_2_STABLE [4885e5c88] 2017-07-23 23:53:55 -0700 --> In MSVC builds, recognize a Tcl library that is - named tcl86.lib (Noah Misch) + named tcl86.lib (Noah Misch) @@ -1272,8 +4495,8 @@ Branch: REL9_5_STABLE [7eb4124da] 2017-07-16 11:27:07 -0400 Branch: REL9_4_STABLE [9c3f502b4] 2017-07-16 11:27:15 -0400 --> - In MSVC builds, honor PROVE_FLAGS settings - on vcregress.pl's command line (Andrew Dunstan) + In MSVC builds, honor PROVE_FLAGS settings + on vcregress.pl's command line (Andrew Dunstan) @@ -1293,7 +4516,7 @@ Branch: REL9_4_STABLE [9c3f502b4] 2017-07-16 11:27:15 -0400 This release contains a variety of fixes from 9.6.2. For information about new features in the 9.6 major release, see - . + . @@ -1310,12 +4533,12 @@ Branch: REL9_4_STABLE [9c3f502b4] 2017-07-16 11:27:15 -0400 Also, if you are using third-party replication tools that depend - on logical decoding, see the fourth changelog entry below. + on logical decoding, see the fourth changelog entry below. Also, if you are upgrading from a version earlier than 9.6.2, - see . + see . @@ -1336,18 +4559,18 @@ Branch: REL9_2_STABLE [99cbb0bd9] 2017-05-08 07:24:28 -0700 --> Restrict visibility - of pg_user_mappings.umoptions, to + of pg_user_mappings.umoptions, to protect passwords stored as user mapping options (Michael Paquier, Feike Steenbergen) The previous coding allowed the owner of a foreign server object, - or anyone he has granted server USAGE permission to, + or anyone he has granted server USAGE permission to, to see the options for all user mappings associated with that server. This might well include passwords for other users. Adjust the view definition to match the behavior of - information_schema.user_mapping_options, namely that + information_schema.user_mapping_options, namely that these options are visible to the user being mapped, or if the mapping is for PUBLIC and the current user is the server owner, or if the current user is a superuser. @@ -1358,7 +4581,7 @@ Branch: REL9_2_STABLE [99cbb0bd9] 2017-05-08 07:24:28 -0700 By itself, this patch will only fix the behavior in newly initdb'd databases. If you wish to apply this change in an existing database, follow the corrected procedure shown in the changelog entry for - CVE-2017-7547, in . + CVE-2017-7547, in . @@ -1386,7 +4609,7 @@ Branch: REL9_3_STABLE [703da1795] 2017-05-08 11:19:08 -0400 Some selectivity estimation functions in the planner will apply user-defined operators to values obtained - from pg_statistic, such as most common values and + from pg_statistic, such as most common values and histogram entries. This occurs before table permissions are checked, so a nefarious user could exploit the behavior to obtain these values for table columns he does not have permission to read. To fix, @@ -1408,17 +4631,17 @@ Branch: REL9_4_STABLE [ed36c1fe1] 2017-05-08 07:24:27 -0700 Branch: REL9_3_STABLE [3eab81127] 2017-05-08 07:24:28 -0700 --> - Restore libpq's recognition of - the PGREQUIRESSL environment variable (Daniel Gustafsson) + Restore libpq's recognition of + the PGREQUIRESSL environment variable (Daniel Gustafsson) Processing of this environment variable was unintentionally dropped - in PostgreSQL 9.3, but its documentation remained. + in PostgreSQL 9.3, but its documentation remained. This creates a security hazard, since users might be relying on the environment variable to force SSL-encrypted connections, but that would no longer be guaranteed. Restore handling of the variable, - but give it lower priority than PGSSLMODE, to avoid + but give it lower priority than PGSSLMODE, to avoid breaking configurations that work correctly with post-9.3 code. (CVE-2017-7485) @@ -1469,7 +4692,7 @@ Branch: REL9_3_STABLE [6bd7816e7] 2017-03-14 12:08:14 -0400 Branch: REL9_2_STABLE [b2ae1d6c4] 2017-03-14 12:10:36 -0400 --> - Fix possible corruption of init forks of unlogged indexes + Fix possible corruption of init forks of unlogged indexes (Robert Haas, Michael Paquier) @@ -1491,7 +4714,7 @@ Branch: REL9_3_STABLE [856580873] 2017-04-23 13:10:57 -0400 Branch: REL9_2_STABLE [952e33b05] 2017-04-23 13:10:58 -0400 --> - Fix incorrect reconstruction of pg_subtrans entries + Fix incorrect reconstruction of pg_subtrans entries when a standby server replays a prepared but uncommitted two-phase transaction (Tom Lane) @@ -1499,7 +4722,7 @@ Branch: REL9_2_STABLE [952e33b05] 2017-04-23 13:10:58 -0400 In most cases this turned out to have no visible ill effects, but in corner cases it could result in circular references - in pg_subtrans, potentially causing infinite loops + in pg_subtrans, potentially causing infinite loops in queries that examine rows modified by the two-phase transaction. @@ -1513,7 +4736,7 @@ Branch: REL9_5_STABLE [feb659cce] 2017-02-22 08:29:44 +0900 Branch: REL9_4_STABLE [a3eb715a3] 2017-02-22 08:29:57 +0900 --> - Avoid possible crash in walsender due to failure + Avoid possible crash in walsender due to failure to initialize a string buffer (Stas Kelvich, Fujii Masao) @@ -1561,7 +4784,7 @@ Branch: REL9_5_STABLE [dba1f310a] 2017-04-24 12:16:58 -0400 Branch: REL9_4_STABLE [436b560b8] 2017-04-24 12:16:58 -0400 --> - Fix postmaster's handling of fork() failure for a + Fix postmaster's handling of fork() failure for a background worker process (Tom Lane) @@ -1579,7 +4802,7 @@ Branch: master [89deca582] 2017-04-07 12:18:38 -0400 Branch: REL9_6_STABLE [c0a493e17] 2017-04-07 12:18:38 -0400 --> - Fix possible no relation entry for relid 0 error when + Fix possible no relation entry for relid 0 error when planning nested set operations (Tom Lane) @@ -1607,7 +4830,7 @@ Branch: REL9_6_STABLE [6c73b390b] 2017-04-17 15:29:00 -0400 Branch: REL9_5_STABLE [6f0f98bb0] 2017-04-17 15:29:00 -0400 --> - Avoid applying physical targetlist optimization to custom + Avoid applying physical targetlist optimization to custom scans (Dmitry Ivanov, Tom Lane) @@ -1626,13 +4849,13 @@ Branch: REL9_6_STABLE [92b15224b] 2017-05-06 21:46:41 -0400 Branch: REL9_5_STABLE [d617c7629] 2017-05-06 21:46:56 -0400 --> - Use the correct sub-expression when applying a FOR ALL + Use the correct sub-expression when applying a FOR ALL row-level-security policy (Stephen Frost) - In some cases the WITH CHECK restriction would be applied - when the USING restriction is more appropriate. + In some cases the WITH CHECK restriction would be applied + when the USING restriction is more appropriate. @@ -1655,7 +4878,7 @@ Branch: REL9_2_STABLE [c9d6c564f] 2017-05-02 18:05:54 -0400 Due to lack of a cache flush step between commands in an extension script file, non-utility queries might not see the effects of an immediately preceding catalog change, such as ALTER TABLE - ... RENAME. + ... RENAME. @@ -1671,12 +4894,12 @@ Branch: REL9_2_STABLE [27a8c8033] 2017-02-12 16:05:23 -0500 --> Skip tablespace privilege checks when ALTER TABLE ... ALTER - COLUMN TYPE rebuilds an existing index (Noah Misch) + COLUMN TYPE rebuilds an existing index (Noah Misch) The command failed if the calling user did not currently have - CREATE privilege for the tablespace containing the index. + CREATE privilege for the tablespace containing the index. That behavior seems unhelpful, so skip the check, allowing the index to be rebuilt where it is. @@ -1693,13 +4916,13 @@ Branch: REL9_3_STABLE [954744f7a] 2017-04-28 14:53:56 -0400 Branch: REL9_2_STABLE [f60f0c8fe] 2017-04-28 14:55:42 -0400 --> - Fix ALTER TABLE ... VALIDATE CONSTRAINT to not recurse - to child tables when the constraint is marked NO INHERIT + Fix ALTER TABLE ... VALIDATE CONSTRAINT to not recurse + to child tables when the constraint is marked NO INHERIT (Amit Langote) - This fix prevents unwanted constraint does not exist failures + This fix prevents unwanted constraint does not exist failures when no matching constraint is present in the child tables. @@ -1712,7 +4935,7 @@ Branch: REL9_6_STABLE [943140d57] 2017-03-06 16:50:47 -0500 Branch: REL9_5_STABLE [420d9ec0a] 2017-03-06 16:50:47 -0500 --> - Avoid dangling pointer in COPY ... TO when row-level + Avoid dangling pointer in COPY ... TO when row-level security is active for the source table (Tom Lane) @@ -1730,8 +4953,8 @@ Branch: REL9_6_STABLE [68f7b91e5] 2017-03-04 16:09:33 -0500 Branch: REL9_5_STABLE [807df31d1] 2017-03-04 16:09:33 -0500 --> - Avoid accessing an already-closed relcache entry in CLUSTER - and VACUUM FULL (Tom Lane) + Avoid accessing an already-closed relcache entry in CLUSTER + and VACUUM FULL (Tom Lane) @@ -1753,14 +4976,14 @@ Branch: master [64ae420b2] 2017-03-17 14:35:54 +0000 Branch: REL9_6_STABLE [733488dc6] 2017-03-17 14:46:15 +0000 --> - Fix VACUUM to account properly for pages that could not + Fix VACUUM to account properly for pages that could not be scanned due to conflicting page pins (Andrew Gierth) This tended to lead to underestimation of the number of tuples in the table. In the worst case of a small heavily-contended - table, VACUUM could incorrectly report that the table + table, VACUUM could incorrectly report that the table contained no tuples, leading to very bad planning choices. @@ -1788,13 +5011,13 @@ Branch: master [d5286aa90] 2017-03-21 16:23:10 +0300 Branch: REL9_6_STABLE [a4d07d2e9] 2017-03-21 16:24:10 +0300 --> - Fix incorrect support for certain box operators in SP-GiST + Fix incorrect support for certain box operators in SP-GiST (Nikita Glukhov) - SP-GiST index scans using the operators &< - &> &<| and |&> + SP-GiST index scans using the operators &< + &> &<| and |&> would yield incorrect answers. @@ -1808,12 +5031,12 @@ Branch: REL9_5_STABLE [d68a2b20a] 2017-04-05 23:51:28 -0400 Branch: REL9_4_STABLE [8851bcf88] 2017-04-05 23:51:28 -0400 --> - Fix integer-overflow problems in interval comparison (Kyotaro + Fix integer-overflow problems in interval comparison (Kyotaro Horiguchi, Tom Lane) - The comparison operators for type interval could yield wrong + The comparison operators for type interval could yield wrong answers for intervals larger than about 296000 years. Indexes on columns containing such large values should be reindexed, since they may be corrupt. @@ -1831,13 +5054,13 @@ Branch: REL9_3_STABLE [6e86b448f] 2017-05-04 21:31:12 -0400 Branch: REL9_2_STABLE [a48d47908] 2017-05-04 22:39:23 -0400 --> - Fix cursor_to_xml() to produce valid output - with tableforest = false + Fix cursor_to_xml() to produce valid output + with tableforest = false (Thomas Munro, Peter Eisentraut) - Previously it failed to produce a wrapping <table> + Previously it failed to produce a wrapping <table> element. @@ -1855,8 +5078,8 @@ Branch: REL9_5_STABLE [cf73c6bfc] 2017-02-09 15:49:57 -0500 Branch: REL9_4_STABLE [86ef376bb] 2017-02-09 15:49:58 -0500 --> - Fix roundoff problems in float8_timestamptz() - and make_interval() (Tom Lane) + Fix roundoff problems in float8_timestamptz() + and make_interval() (Tom Lane) @@ -1876,7 +5099,7 @@ Branch: REL9_6_STABLE [1ec36a9eb] 2017-04-16 20:49:40 -0400 Branch: REL9_5_STABLE [b6e6ae1dc] 2017-04-16 20:50:31 -0400 --> - Fix pg_get_object_address() to handle members of operator + Fix pg_get_object_address() to handle members of operator families correctly (Álvaro Herrera) @@ -1888,12 +5111,12 @@ Branch: master [78874531b] 2017-03-24 13:53:40 +0300 Branch: REL9_6_STABLE [8de6278d3] 2017-03-24 13:55:02 +0300 --> - Fix cancelling of pg_stop_backup() when attempting to stop + Fix cancelling of pg_stop_backup() when attempting to stop a non-exclusive backup (Michael Paquier, David Steele) - If pg_stop_backup() was cancelled while waiting for a + If pg_stop_backup() was cancelled while waiting for a non-exclusive backup to end, related state was left inconsistent; a new exclusive backup could not be started, and there were other minor problems. @@ -1917,7 +5140,7 @@ Branch: REL9_3_STABLE [07987304d] 2017-05-07 11:35:05 -0400 Branch: REL9_2_STABLE [9061680f0] 2017-05-07 11:35:11 -0400 --> - Improve performance of pg_timezone_names view + Improve performance of pg_timezone_names view (Tom Lane, David Rowley) @@ -1947,13 +5170,13 @@ Branch: REL9_3_STABLE [3f613c6a4] 2017-02-21 17:51:28 -0500 Branch: REL9_2_STABLE [775227590] 2017-02-21 17:51:28 -0500 --> - Fix sloppy handling of corner-case errors from lseek() - and close() (Tom Lane) + Fix sloppy handling of corner-case errors from lseek() + and close() (Tom Lane) Neither of these system calls are likely to fail in typical situations, - but if they did, fd.c could get quite confused. + but if they did, fd.c could get quite confused. @@ -1994,8 +5217,8 @@ Branch: REL9_3_STABLE [04207ef76] 2017-03-13 20:52:05 +0100 Branch: REL9_2_STABLE [d8c207437] 2017-03-13 20:52:16 +0100 --> - Fix ecpg to support COMMIT PREPARED - and ROLLBACK PREPARED (Masahiko Sawada) + Fix ecpg to support COMMIT PREPARED + and ROLLBACK PREPARED (Masahiko Sawada) @@ -2011,7 +5234,7 @@ Branch: REL9_2_STABLE [731afc91f] 2017-03-10 10:52:01 +0100 --> Fix a double-free error when processing dollar-quoted string literals - in ecpg (Michael Meskes) + in ecpg (Michael Meskes) @@ -2021,8 +5244,8 @@ Author: Teodor Sigaev Branch: REL9_6_STABLE [2ed391f95] 2017-03-24 19:23:13 +0300 --> - Fix pgbench to handle the combination - of and options correctly (Fabien Coelho) @@ -2034,8 +5257,8 @@ Branch: master [ef2662394] 2017-03-07 11:36:42 -0500 Branch: REL9_6_STABLE [0e2c85d13] 2017-03-07 11:36:35 -0500 --> - Fix pgbench to honor the long-form option - spelling , as per its documentation (Tom Lane) @@ -2046,15 +5269,15 @@ Branch: master [330b84d8c] 2017-03-06 23:29:02 -0500 Branch: REL9_6_STABLE [e961341cc] 2017-03-06 23:29:08 -0500 --> - Fix pg_dump/pg_restore to correctly - handle privileges for the public schema when - using option (Stephen Frost) Other schemas start out with no privileges granted, - but public does not; this requires special-case treatment - when it is dropped and restored due to the option. @@ -2069,7 +5292,7 @@ Branch: REL9_3_STABLE [783acfd4d] 2017-03-06 19:33:59 -0500 Branch: REL9_2_STABLE [0ab75448e] 2017-03-06 19:33:59 -0500 --> - In pg_dump, fix incorrect schema and owner marking for + In pg_dump, fix incorrect schema and owner marking for comments and security labels of some types of database objects (Giuseppe Broccolo, Tom Lane) @@ -2089,12 +5312,12 @@ Branch: master [39370e6a0] 2017-02-17 15:06:28 -0500 Branch: REL9_6_STABLE [4e8b2fd33] 2017-02-17 15:06:34 -0500 --> - Fix typo in pg_dump's query for initial privileges + Fix typo in pg_dump's query for initial privileges of a procedural language (Peter Eisentraut) - This resulted in pg_dump always believing that the + This resulted in pg_dump always believing that the language had no initial privileges. Since that's true for most procedural languages, ill effects from this bug are probably rare. @@ -2111,13 +5334,13 @@ Branch: REL9_3_STABLE [0c0a95c2f] 2017-03-10 14:15:09 -0500 Branch: REL9_2_STABLE [e6d2ba419] 2017-03-10 14:15:09 -0500 --> - Avoid emitting an invalid list file in pg_restore -l + Avoid emitting an invalid list file in pg_restore -l when SQL object names contain newlines (Tom Lane) Replace newlines by spaces, which is sufficient to make the output - valid for pg_restore -L's purposes. + valid for pg_restore -L's purposes. @@ -2132,8 +5355,8 @@ Branch: REL9_3_STABLE [7f831f09b] 2017-03-06 17:04:29 -0500 Branch: REL9_2_STABLE [e864cd25b] 2017-03-06 17:04:55 -0500 --> - Fix pg_upgrade to transfer comments and security labels - attached to large objects (blobs) (Stephen Frost) + Fix pg_upgrade to transfer comments and security labels + attached to large objects (blobs) (Stephen Frost) @@ -2154,13 +5377,13 @@ Branch: REL9_2_STABLE [0276da5eb] 2017-03-12 19:36:28 -0400 --> Improve error handling - in contrib/adminpack's pg_file_write() + in contrib/adminpack's pg_file_write() function (Noah Misch) Notably, it failed to detect errors reported - by fclose(). + by fclose(). @@ -2175,7 +5398,7 @@ Branch: REL9_3_STABLE [f6cfc14e5] 2017-03-11 13:33:22 -0800 Branch: REL9_2_STABLE [c4613c3f4] 2017-03-11 13:33:30 -0800 --> - In contrib/dblink, avoid leaking the previous unnamed + In contrib/dblink, avoid leaking the previous unnamed connection when establishing a new unnamed connection (Joe Conway) @@ -2200,7 +5423,7 @@ Branch: REL9_4_STABLE [b179684c7] 2017-04-13 17:18:35 -0400 Branch: REL9_3_STABLE [5be58cc89] 2017-04-13 17:18:35 -0400 --> - Fix contrib/pg_trgm's extraction of trigrams from regular + Fix contrib/pg_trgm's extraction of trigrams from regular expressions (Tom Lane) @@ -2218,7 +5441,7 @@ Branch: master [332bec1e6] 2017-04-24 22:50:07 -0400 Branch: REL9_6_STABLE [86e640a69] 2017-04-26 09:14:21 -0400 --> - In contrib/postgres_fdw, allow join conditions that + In contrib/postgres_fdw, allow join conditions that contain shippable extension-provided functions to be pushed to the remote server (David Rowley, Ashutosh Bapat) @@ -2276,7 +5499,7 @@ Branch: REL9_3_STABLE [dc93cafca] 2017-05-01 11:54:02 -0400 Branch: REL9_2_STABLE [c96ccc40e] 2017-05-01 11:54:08 -0400 --> - Update time zone data files to tzdata release 2017b + Update time zone data files to tzdata release 2017b for DST law changes in Chile, Haiti, and Mongolia, plus historical corrections for Ecuador, Kazakhstan, Liberia, and Spain. Switch to numeric abbreviations for numerous time zones in South @@ -2290,9 +5513,9 @@ Branch: REL9_2_STABLE [c96ccc40e] 2017-05-01 11:54:08 -0400 or no currency among the local population. They are in process of reversing that policy in favor of using numeric UTC offsets in zones where there is no evidence of real-world use of an English - abbreviation. At least for the time being, PostgreSQL + abbreviation. At least for the time being, PostgreSQL will continue to accept such removed abbreviations for timestamp input. - But they will not be shown in the pg_timezone_names + But they will not be shown in the pg_timezone_names view nor used for output. @@ -2314,16 +5537,16 @@ Branch: REL9_2_STABLE [82e7d3dfd] 2017-05-07 11:57:41 -0400 The Microsoft MSVC build scripts neglected to install - the posixrules file in the timezone directory tree. + the posixrules file in the timezone directory tree. This resulted in the timezone code falling back to its built-in rule about what DST behavior to assume for a POSIX-style time zone name. For historical reasons that still corresponds to the DST rules the USA was using before 2007 (i.e., change on first Sunday in April and last Sunday in October). With this fix, a POSIX-style zone name will use the current and historical DST transition dates of - the US/Eastern zone. If you don't want that, remove - the posixrules file, or replace it with a copy of some - other zone file (see ). Note that + the US/Eastern zone. If you don't want that, remove + the posixrules file, or replace it with a copy of some + other zone file (see ). Note that due to caching, you may need to restart the server to get such changes to take effect. @@ -2345,7 +5568,7 @@ Branch: REL9_2_STABLE [82e7d3dfd] 2017-05-07 11:57:41 -0400 This release contains a variety of fixes from 9.6.1. For information about new features in the 9.6 major release, see - . + . @@ -2363,7 +5586,7 @@ Branch: REL9_2_STABLE [82e7d3dfd] 2017-05-07 11:57:41 -0400 Also, if you are upgrading from a version earlier than 9.6.1, - see . + see . @@ -2384,15 +5607,15 @@ Branch: REL9_2_STABLE [bcd7b47c2] 2017-02-06 13:20:25 -0500 --> Fix a race condition that could cause indexes built - with CREATE INDEX CONCURRENTLY to be corrupt + with CREATE INDEX CONCURRENTLY to be corrupt (Pavan Deolasee, Tom Lane) - If CREATE INDEX CONCURRENTLY was used to build an index + If CREATE INDEX CONCURRENTLY was used to build an index that depends on a column not previously indexed, then rows updated by transactions that ran concurrently with - the CREATE INDEX command could have received incorrect + the CREATE INDEX command could have received incorrect index entries. If you suspect this may have happened, the most reliable solution is to rebuild affected indexes after installing this update. @@ -2416,7 +5639,7 @@ Branch: REL9_4_STABLE [3e844a34b] 2016-11-15 15:55:36 -0500 Backends failed to account for this snapshot when advertising their oldest xmin, potentially allowing concurrent vacuuming operations to remove data that was still needed. This led to transient failures - along the lines of cache lookup failed for relation 1255. + along the lines of cache lookup failed for relation 1255. @@ -2432,7 +5655,7 @@ Branch: REL9_5_STABLE [ed8e8b814] 2017-01-09 18:19:29 -0300 - The WAL record emitted for a BRIN revmap page when moving an + The WAL record emitted for a BRIN revmap page when moving an index tuple to a different page was incorrect. Replay would make the related portion of the index useless, forcing it to be recomputed. @@ -2449,13 +5672,13 @@ Branch: REL9_3_STABLE [8e403f215] 2016-12-08 14:16:47 -0500 Branch: REL9_2_STABLE [a00ac6299] 2016-12-08 14:19:25 -0500 --> - Unconditionally WAL-log creation of the init fork for an + Unconditionally WAL-log creation of the init fork for an unlogged table (Michael Paquier) - Previously, this was skipped when - = minimal, but actually it's necessary even in that case + Previously, this was skipped when + = minimal, but actually it's necessary even in that case to ensure that the unlogged table is properly reset to empty after a crash. @@ -2537,8 +5760,8 @@ Branch: master [93eb619cd] 2016-12-17 02:22:15 +0900 Branch: REL9_6_STABLE [6c75fb6b3] 2016-12-17 02:25:47 +0900 --> - Disallow setting the num_sync field to zero in - (Fujii Masao) + Disallow setting the num_sync field to zero in + (Fujii Masao) @@ -2588,7 +5811,7 @@ Branch: REL9_6_STABLE [20064c0ec] 2017-01-29 23:05:09 -0500 --> Fix tracking of initial privileges for extension member objects so - that it works correctly with ALTER EXTENSION ... ADD/DROP + that it works correctly with ALTER EXTENSION ... ADD/DROP (Stephen Frost) @@ -2596,7 +5819,7 @@ Branch: REL9_6_STABLE [20064c0ec] 2017-01-29 23:05:09 -0500 An object's current privileges at the time it is added to the extension will now be considered its default privileges; only later changes in its privileges will be dumped by - subsequent pg_dump runs. + subsequent pg_dump runs. @@ -2611,13 +5834,13 @@ Branch: REL9_3_STABLE [8f67a6c22] 2016-11-23 13:45:56 -0500 Branch: REL9_2_STABLE [05975ab0a] 2016-11-23 13:45:56 -0500 --> - Make sure ALTER TABLE preserves index tablespace + Make sure ALTER TABLE preserves index tablespace assignments when rebuilding indexes (Tom Lane, Michael Paquier) Previously, non-default settings - of could result in broken + of could result in broken indexes. @@ -2633,7 +5856,7 @@ Branch: REL9_4_STABLE [3a9a8c408] 2016-10-26 17:05:06 -0400 Fix incorrect updating of trigger function properties when changing a foreign-key constraint's deferrability properties with ALTER - TABLE ... ALTER CONSTRAINT (Tom Lane) + TABLE ... ALTER CONSTRAINT (Tom Lane) @@ -2658,8 +5881,8 @@ Branch: REL9_2_STABLE [6a363a4c2] 2016-11-25 13:44:48 -0500 - This avoids could not find trigger NNN - or relation NNN has no triggers errors. + This avoids could not find trigger NNN + or relation NNN has no triggers errors. @@ -2671,15 +5894,15 @@ Branch: REL9_6_STABLE [4e563a1f6] 2017-01-09 19:26:58 -0300 Branch: REL9_5_STABLE [4d4ab6ccd] 2017-01-09 19:26:58 -0300 --> - Fix ALTER TABLE ... SET DATA TYPE ... USING when child + Fix ALTER TABLE ... SET DATA TYPE ... USING when child table has different column ordering than the parent (Álvaro Herrera) - Failure to adjust the column numbering in the USING + Failure to adjust the column numbering in the USING expression led to errors, - typically attribute N has wrong type. + typically attribute N has wrong type. @@ -2695,7 +5918,7 @@ Branch: REL9_2_STABLE [6c4cf2be8] 2017-01-04 18:00:12 -0500 --> Fix processing of OID column when a table with OIDs is associated to - a parent with OIDs via ALTER TABLE ... INHERIT (Amit + a parent with OIDs via ALTER TABLE ... INHERIT (Amit Langote) @@ -2713,8 +5936,8 @@ Branch: master [1ead0208b] 2016-12-22 16:23:38 -0500 Branch: REL9_6_STABLE [68330c8b4] 2016-12-22 16:23:34 -0500 --> - Ensure that CREATE TABLE ... LIKE ... WITH OIDS creates - a table with OIDs, whether or not the LIKE-referenced + Ensure that CREATE TABLE ... LIKE ... WITH OIDS creates + a table with OIDs, whether or not the LIKE-referenced table(s) have OIDs (Tom Lane) @@ -2728,7 +5951,7 @@ Branch: REL9_5_STABLE [78a98b767] 2016-12-21 17:02:47 +0000 Branch: REL9_4_STABLE [cad24980e] 2016-12-21 17:03:54 +0000 --> - Fix CREATE OR REPLACE VIEW to update the view query + Fix CREATE OR REPLACE VIEW to update the view query before attempting to apply the new view options (Dean Rasheed) @@ -2749,7 +5972,7 @@ Branch: REL9_3_STABLE [0e3aadb68] 2016-12-22 17:09:00 -0500 --> Report correct object identity during ALTER TEXT SEARCH - CONFIGURATION (Artur Zakirov) + CONFIGURATION (Artur Zakirov) @@ -2767,8 +5990,8 @@ Branch: REL9_5_STABLE [7816d1356] 2016-11-24 15:39:55 -0300 --> Fix commit timestamp mechanism to not fail when queried about - the special XIDs FrozenTransactionId - and BootstrapTransactionId (Craig Ringer) + the special XIDs FrozenTransactionId + and BootstrapTransactionId (Craig Ringer) @@ -2789,8 +6012,8 @@ Branch: REL9_5_STABLE [6e00ba1e1] 2016-11-10 15:00:58 -0500 The symptom was spurious ON CONFLICT is not supported on table - ... used as a catalog table errors when the target - of INSERT ... ON CONFLICT is a view with cascade option. + ... used as a catalog table errors when the target + of INSERT ... ON CONFLICT is a view with cascade option. @@ -2802,8 +6025,8 @@ Branch: REL9_6_STABLE [da05d0ebc] 2016-12-04 15:02:46 -0500 Branch: REL9_5_STABLE [25c06a1ed] 2016-12-04 15:02:48 -0500 --> - Fix incorrect target lists can have at most N - entries complaint when using ON CONFLICT with + Fix incorrect target lists can have at most N + entries complaint when using ON CONFLICT with wide tables (Tom Lane) @@ -2815,8 +6038,8 @@ Branch: master [da8f3ebf3] 2016-11-02 14:32:13 -0400 Branch: REL9_6_STABLE [f4d865f22] 2016-11-02 14:32:13 -0400 --> - Fix spurious query provides a value for a dropped column - errors during INSERT or UPDATE on a table + Fix spurious query provides a value for a dropped column + errors during INSERT or UPDATE on a table with a dropped column (Tom Lane) @@ -2831,13 +6054,13 @@ Branch: REL9_4_STABLE [44c8b4fcd] 2016-11-20 14:26:19 -0500 Branch: REL9_3_STABLE [71db302ec] 2016-11-20 14:26:19 -0500 --> - Prevent multicolumn expansion of foo.* in - an UPDATE source expression (Tom Lane) + Prevent multicolumn expansion of foo.* in + an UPDATE source expression (Tom Lane) This led to UPDATE target count mismatch --- internal - error. Now the syntax is understood as a whole-row variable, + error. Now the syntax is understood as a whole-row variable, as it would be in other contexts. @@ -2854,12 +6077,12 @@ Branch: REL9_2_STABLE [082d1fb9e] 2016-12-09 12:01:14 -0500 --> Ensure that column typmods are determined accurately for - multi-row VALUES constructs (Tom Lane) + multi-row VALUES constructs (Tom Lane) This fixes problems occurring when the first value in a column has a - determinable typmod (e.g., length for a varchar value) but + determinable typmod (e.g., length for a varchar value) but later values don't share the same limit. @@ -2883,8 +6106,8 @@ Branch: REL9_2_STABLE [6e2c21ec5] 2016-12-21 17:39:33 -0500 Normally, a Unicode surrogate leading character must be followed by a Unicode surrogate trailing character, but the check for this was missed if the leading character was the last character in a Unicode - string literal (U&'...') or Unicode identifier - (U&"..."). + string literal (U&'...') or Unicode identifier + (U&"..."). @@ -2895,7 +6118,7 @@ Branch: master [db80acfc9] 2016-12-20 09:20:17 +0200 Branch: REL9_6_STABLE [ce92fc4e2] 2016-12-20 09:20:30 +0200 --> - Fix execution of DISTINCT and ordered aggregates when + Fix execution of DISTINCT and ordered aggregates when multiple such aggregates are able to share the same transition state (Heikki Linnakangas) @@ -2910,7 +6133,7 @@ Branch: master [260443847] 2016-12-19 13:49:50 -0500 Branch: REL9_6_STABLE [3f07eff10] 2016-12-19 13:49:45 -0500 --> - Fix implementation of phrase search operators in tsquery + Fix implementation of phrase search operators in tsquery (Tom Lane) @@ -2939,7 +6162,7 @@ Branch: REL9_2_STABLE [fe6120f9b] 2017-01-26 12:17:47 -0500 --> Ensure that a purely negative text search query, such - as !foo, matches empty tsvectors (Tom Dunstan) + as !foo, matches empty tsvectors (Tom Dunstan) @@ -2959,7 +6182,7 @@ Branch: REL9_3_STABLE [79e1a9efa] 2016-12-11 13:09:57 -0500 Branch: REL9_2_STABLE [f4ccee408] 2016-12-11 13:09:57 -0500 --> - Prevent crash when ts_rewrite() replaces a non-top-level + Prevent crash when ts_rewrite() replaces a non-top-level subtree with an empty query (Artur Zakirov) @@ -2975,7 +6198,7 @@ Branch: REL9_3_STABLE [407d513df] 2016-10-30 17:35:43 -0400 Branch: REL9_2_STABLE [606e16a7f] 2016-10-30 17:35:43 -0400 --> - Fix performance problems in ts_rewrite() (Tom Lane) + Fix performance problems in ts_rewrite() (Tom Lane) @@ -2990,7 +6213,7 @@ Branch: REL9_3_STABLE [77a22f898] 2016-10-30 15:24:40 -0400 Branch: REL9_2_STABLE [b0f8a273e] 2016-10-30 15:24:40 -0400 --> - Fix ts_rewrite()'s handling of nested NOT operators + Fix ts_rewrite()'s handling of nested NOT operators (Tom Lane) @@ -3004,7 +6227,7 @@ Branch: REL9_5_STABLE [7151e72d7] 2016-10-30 12:27:41 -0400 --> Improve speed of user-defined aggregates that - use array_append() as transition function (Tom Lane) + use array_append() as transition function (Tom Lane) @@ -3019,7 +6242,7 @@ Branch: REL9_3_STABLE [ee9cb284a] 2017-01-05 11:33:51 -0500 Branch: REL9_2_STABLE [e0d59c6ef] 2017-01-05 11:33:51 -0500 --> - Fix array_fill() to handle empty arrays properly (Tom Lane) + Fix array_fill() to handle empty arrays properly (Tom Lane) @@ -3031,8 +6254,8 @@ Branch: REL9_6_STABLE [79c89f1f4] 2016-12-09 12:42:17 -0300 Branch: REL9_5_STABLE [581b09c72] 2016-12-09 12:42:17 -0300 --> - Fix possible crash in array_position() - or array_positions() when processing arrays of records + Fix possible crash in array_position() + or array_positions() when processing arrays of records (Junseok Yang) @@ -3048,7 +6271,7 @@ Branch: REL9_3_STABLE [e71fe8470] 2016-12-16 12:53:22 +0200 Branch: REL9_2_STABLE [c8f8ed5c2] 2016-12-16 12:53:27 +0200 --> - Fix one-byte buffer overrun in quote_literal_cstr() + Fix one-byte buffer overrun in quote_literal_cstr() (Heikki Linnakangas) @@ -3069,8 +6292,8 @@ Branch: REL9_3_STABLE [f64b11fa0] 2017-01-17 17:32:20 +0900 Branch: REL9_2_STABLE [c73157ca0] 2017-01-17 17:32:45 +0900 --> - Prevent multiple calls of pg_start_backup() - and pg_stop_backup() from running concurrently (Michael + Prevent multiple calls of pg_start_backup() + and pg_stop_backup() from running concurrently (Michael Paquier) @@ -3089,7 +6312,7 @@ Branch: REL9_5_STABLE [74e67bbad] 2017-01-18 15:21:52 -0500 --> Disable transform that attempted to remove no-op AT TIME - ZONE conversions (Tom Lane) + ZONE conversions (Tom Lane) @@ -3109,15 +6332,15 @@ Branch: REL9_3_STABLE [583599839] 2016-12-27 15:43:54 -0500 Branch: REL9_2_STABLE [beae7d5f0] 2016-12-27 15:43:55 -0500 --> - Avoid discarding interval-to-interval casts + Avoid discarding interval-to-interval casts that aren't really no-ops (Tom Lane) In some cases, a cast that should result in zeroing out - low-order interval fields was mistakenly deemed to be a + low-order interval fields was mistakenly deemed to be a no-op and discarded. An example is that casting from INTERVAL - MONTH to INTERVAL YEAR failed to clear the months field. + MONTH to INTERVAL YEAR failed to clear the months field. @@ -3153,7 +6376,7 @@ Branch: master [4212cb732] 2016-12-06 11:11:54 -0500 Branch: REL9_6_STABLE [ebe5dc9e0] 2016-12-06 11:43:12 -0500 --> - Allow statements prepared with PREPARE to be given + Allow statements prepared with PREPARE to be given parallel plans (Amit Kapila, Tobias Bussmann) @@ -3222,7 +6445,7 @@ Branch: REL9_6_STABLE [7defc3b97] 2016-11-10 11:31:56 -0500 --> Fix the plan generated for sorted partial aggregation with a constant - GROUP BY clause (Tom Lane) + GROUP BY clause (Tom Lane) @@ -3233,8 +6456,8 @@ Branch: master [1f542a2ea] 2016-12-13 13:20:37 -0500 Branch: REL9_6_STABLE [997a2994e] 2016-12-13 13:20:16 -0500 --> - Fix could not find plan for CTE planner error when dealing - with a UNION ALL containing CTE references (Tom Lane) + Fix could not find plan for CTE planner error when dealing + with a UNION ALL containing CTE references (Tom Lane) @@ -3251,7 +6474,7 @@ Branch: REL9_6_STABLE [b971a98ce] 2017-02-02 19:11:27 -0500 The typical consequence of this mistake was a plan should not - reference subplan's variable error. + reference subplan's variable error. @@ -3282,7 +6505,7 @@ Branch: master [bec96c82f] 2017-01-19 12:06:21 -0500 Branch: REL9_6_STABLE [fd081cabf] 2017-01-19 12:06:27 -0500 --> - Fix pg_dump to emit the data of a sequence that is + Fix pg_dump to emit the data of a sequence that is marked as an extension configuration table (Michael Paquier) @@ -3294,14 +6517,14 @@ Branch: master [e2090d9d2] 2017-01-31 16:24:11 -0500 Branch: REL9_6_STABLE [eb5e9d90d] 2017-01-31 16:24:14 -0500 --> - Fix mishandling of ALTER DEFAULT PRIVILEGES ... REVOKE - in pg_dump (Stephen Frost) + Fix mishandling of ALTER DEFAULT PRIVILEGES ... REVOKE + in pg_dump (Stephen Frost) - pg_dump missed issuing the - required REVOKE commands in cases where ALTER - DEFAULT PRIVILEGES had been used to reduce privileges to less than + pg_dump missed issuing the + required REVOKE commands in cases where ALTER + DEFAULT PRIVILEGES had been used to reduce privileges to less than they would normally be. @@ -3323,7 +6546,7 @@ Branch: REL9_3_STABLE [fc03f7dd1] 2016-12-21 13:47:28 -0500 Branch: REL9_2_STABLE [59a389891] 2016-12-21 13:47:32 -0500 --> - Fix pg_dump to dump user-defined casts and transforms + Fix pg_dump to dump user-defined casts and transforms that use built-in functions (Stephen Frost) @@ -3337,15 +6560,15 @@ Branch: REL9_5_STABLE [a7864037d] 2016-11-17 14:59:23 -0500 Branch: REL9_4_STABLE [e69b532be] 2016-11-17 14:59:26 -0500 --> - Fix pg_restore with to behave more sanely if an archive contains - unrecognized DROP commands (Tom Lane) + unrecognized DROP commands (Tom Lane) This doesn't fix any live bug, but it may improve the behavior in - future if pg_restore is used with an archive - generated by a later pg_dump version. + future if pg_restore is used with an archive + generated by a later pg_dump version. @@ -3358,7 +6581,7 @@ Branch: REL9_5_STABLE [bc53d7130] 2016-12-19 10:16:02 +0100 Branch: REL9_4_STABLE [f6508827a] 2016-12-19 10:16:12 +0100 --> - Fix pg_basebackup's rate limiting in the presence of + Fix pg_basebackup's rate limiting in the presence of slow I/O (Antonin Houska) @@ -3377,8 +6600,8 @@ Branch: REL9_5_STABLE [6d779e05a] 2016-11-07 15:03:56 +0100 Branch: REL9_4_STABLE [5556420d4] 2016-11-07 15:04:23 +0100 --> - Fix pg_basebackup's handling of - symlinked pg_stat_tmp and pg_replslot + Fix pg_basebackup's handling of + symlinked pg_stat_tmp and pg_replslot subdirectories (Magnus Hagander, Michael Paquier) @@ -3394,7 +6617,7 @@ Branch: REL9_3_STABLE [92929a3e3] 2016-10-27 12:00:05 -0400 Branch: REL9_2_STABLE [629575fa2] 2016-10-27 12:14:07 -0400 --> - Fix possible pg_basebackup failure on standby + Fix possible pg_basebackup failure on standby server when including WAL files (Amit Kapila, Robert Haas) @@ -3406,10 +6629,10 @@ Branch: master [dbdfd114f] 2016-11-25 18:36:10 -0500 Branch: REL9_6_STABLE [255bcd27f] 2016-11-25 18:36:10 -0500 --> - Improve initdb to insert the correct + Improve initdb to insert the correct platform-specific default values for - the xxx_flush_after parameters - into postgresql.conf (Fabien Coelho, Tom Lane) + the xxx_flush_after parameters + into postgresql.conf (Fabien Coelho, Tom Lane) @@ -3427,7 +6650,7 @@ Branch: REL9_5_STABLE [c472f2a33] 2016-12-22 15:01:39 -0500 --> Fix possible mishandling of expanded arrays in domain check - constraints and CASE execution (Tom Lane) + constraints and CASE execution (Tom Lane) @@ -3483,14 +6706,14 @@ Branch: REL9_3_STABLE [9c0b04f18] 2016-11-06 14:43:14 -0500 Branch: REL9_2_STABLE [92b7b1058] 2016-11-06 14:43:14 -0500 --> - Fix PL/Tcl to support triggers on tables that have .tupno + Fix PL/Tcl to support triggers on tables that have .tupno as a column name (Tom Lane) This matches the (previously undocumented) behavior of - PL/Tcl's spi_exec and spi_execp commands, - namely that a magic .tupno column is inserted only if + PL/Tcl's spi_exec and spi_execp commands, + namely that a magic .tupno column is inserted only if there isn't a real column named that. @@ -3506,7 +6729,7 @@ Branch: REL9_3_STABLE [46b6f3fff] 2016-11-15 16:17:19 -0500 Branch: REL9_2_STABLE [13aa9af37] 2016-11-15 16:17:19 -0500 --> - Allow DOS-style line endings in ~/.pgpass files, + Allow DOS-style line endings in ~/.pgpass files, even on Unix (Vik Fearing) @@ -3527,7 +6750,7 @@ Branch: REL9_3_STABLE [1df8b3fe8] 2016-12-22 08:32:25 +0100 Branch: REL9_2_STABLE [501c91074] 2016-12-22 08:34:07 +0100 --> - Fix one-byte buffer overrun if ecpg is given a file + Fix one-byte buffer overrun if ecpg is given a file name that ends with a dot (Takayuki Tsunakawa) @@ -3540,11 +6763,11 @@ Branch: REL9_6_STABLE [6a8c67f50] 2016-12-25 16:04:47 -0500 --> Fix incorrect error reporting for duplicate data - in psql's \crosstabview (Tom Lane) + in psql's \crosstabview (Tom Lane) - psql sometimes quoted the wrong row and/or column + psql sometimes quoted the wrong row and/or column values when complaining about multiple entries for the same crosstab cell. @@ -3561,8 +6784,8 @@ Branch: REL9_3_STABLE [2022d594d] 2016-12-23 21:01:48 -0500 Branch: REL9_2_STABLE [26b55d669] 2016-12-23 21:01:51 -0500 --> - Fix psql's tab completion for ALTER DEFAULT - PRIVILEGES (Gilles Darold, Stephen Frost) + Fix psql's tab completion for ALTER DEFAULT + PRIVILEGES (Gilles Darold, Stephen Frost) @@ -3573,8 +6796,8 @@ Branch: master [404e66758] 2016-11-28 11:51:30 -0500 Branch: REL9_6_STABLE [28735cc72] 2016-11-28 11:51:35 -0500 --> - Fix psql's tab completion for ALTER TABLE t - ALTER c DROP ... (Kyotaro Horiguchi) + Fix psql's tab completion for ALTER TABLE t + ALTER c DROP ... (Kyotaro Horiguchi) @@ -3589,9 +6812,9 @@ Branch: REL9_3_STABLE [82eb5c514] 2016-12-07 12:19:56 -0500 Branch: REL9_2_STABLE [1ec5cc025] 2016-12-07 12:19:57 -0500 --> - In psql, treat an empty or all-blank setting of - the PAGER environment variable as meaning no - pager (Tom Lane) + In psql, treat an empty or all-blank setting of + the PAGER environment variable as meaning no + pager (Tom Lane) @@ -3611,8 +6834,8 @@ Branch: REL9_3_STABLE [9b8507bfa] 2016-12-22 09:47:25 -0800 Branch: REL9_2_STABLE [44de099f8] 2016-12-22 09:46:46 -0800 --> - Improve contrib/dblink's reporting of - low-level libpq errors, such as out-of-memory + Improve contrib/dblink's reporting of + low-level libpq errors, such as out-of-memory (Joe Conway) @@ -3627,14 +6850,14 @@ Branch: REL9_4_STABLE [cb687e0ac] 2016-12-22 09:19:08 -0800 Branch: REL9_3_STABLE [bd46cce21] 2016-12-22 09:18:50 -0800 --> - Teach contrib/dblink to ignore irrelevant server options - when it uses a contrib/postgres_fdw foreign server as + Teach contrib/dblink to ignore irrelevant server options + when it uses a contrib/postgres_fdw foreign server as the source of connection options (Corey Huinker) Previously, if the foreign server object had options that were not - also libpq connection options, an error occurred. + also libpq connection options, an error occurred. @@ -3648,7 +6871,7 @@ Branch: REL9_6_STABLE [2a8783e44] 2016-11-02 00:09:28 -0400 Branch: REL9_5_STABLE [af636d7b5] 2016-11-02 00:09:28 -0400 --> - Fix portability problems in contrib/pageinspect's + Fix portability problems in contrib/pageinspect's functions for GIN indexes (Peter Eisentraut, Tom Lane) @@ -3737,7 +6960,7 @@ Branch: REL9_3_STABLE [2b133be04] 2017-01-30 11:41:02 -0500 Branch: REL9_2_STABLE [ef878cc2c] 2017-01-30 11:41:09 -0500 --> - Update time zone data files to tzdata release 2016j + Update time zone data files to tzdata release 2016j for DST law changes in northern Cyprus (adding a new zone Asia/Famagusta), Russia (adding a new zone Europe/Saratov), Tonga, and Antarctica/Casey. @@ -3762,7 +6985,7 @@ Branch: REL9_2_STABLE [ef878cc2c] 2017-01-30 11:41:09 -0500 This release contains a variety of fixes from 9.6.0. For information about new features in the 9.6 major release, see - . + . @@ -3804,7 +7027,7 @@ Branch: REL9_3_STABLE [1c02ee314] 2016-10-19 15:00:34 +0300 crash recovery, or to be written incorrectly on a standby server. Bogus entries in a free space map could lead to attempts to access pages that have been truncated away from the relation itself, typically - producing errors like could not read block XXX: + producing errors like could not read block XXX: read only 0 of 8192 bytes. Checksum failures in the visibility map are also possible, if checksumming is enabled. @@ -3812,7 +7035,7 @@ Branch: REL9_3_STABLE [1c02ee314] 2016-10-19 15:00:34 +0300 Procedures for determining whether there is a problem and repairing it if so are discussed at - . + . @@ -3823,7 +7046,7 @@ Branch: master [5afcd2aa7] 2016-09-30 20:40:55 -0400 Branch: REL9_6_STABLE [b6d906073] 2016-09-30 20:39:06 -0400 --> - Fix possible data corruption when pg_upgrade rewrites + Fix possible data corruption when pg_upgrade rewrites a relation visibility map into 9.6 format (Tom Lane) @@ -3833,20 +7056,20 @@ Branch: REL9_6_STABLE [b6d906073] 2016-09-30 20:39:06 -0400 Windows, the old map was read using text mode, leading to incorrect results if the map happened to contain consecutive bytes that matched a carriage return/line feed sequence. The latter error would almost - always lead to a pg_upgrade failure due to the map + always lead to a pg_upgrade failure due to the map file appearing to be the wrong length. If you are using a big-endian machine (many non-Intel architectures - are big-endian) and have used pg_upgrade to upgrade + are big-endian) and have used pg_upgrade to upgrade from a pre-9.6 release, you should assume that all visibility maps are incorrect and need to be regenerated. It is sufficient to truncate each relation's visibility map - with contrib/pg_visibility's - pg_truncate_visibility_map() function. + with contrib/pg_visibility's + pg_truncate_visibility_map() function. For more information see - . + . @@ -3859,7 +7082,7 @@ Branch: REL9_5_STABLE [65d85b8f9] 2016-10-23 18:36:13 -0400 --> Don't throw serialization errors for self-conflicting insertions - in INSERT ... ON CONFLICT (Thomas Munro, Peter Geoghegan) + in INSERT ... ON CONFLICT (Thomas Munro, Peter Geoghegan) @@ -3871,7 +7094,7 @@ Branch: REL9_6_STABLE [a5f0bd77a] 2016-10-17 12:13:35 +0300 --> Fix use-after-free hazard in execution of aggregate functions - using DISTINCT (Peter Geoghegan) + using DISTINCT (Peter Geoghegan) @@ -3906,7 +7129,7 @@ Branch: REL9_6_STABLE [190765a05] 2016-10-03 16:23:02 -0400 Branch: REL9_5_STABLE [647a86e37] 2016-10-03 16:23:12 -0400 --> - Fix COPY with a column name list from a table that has + Fix COPY with a column name list from a table that has row-level security enabled (Adam Brightwell) @@ -3922,14 +7145,14 @@ Branch: REL9_3_STABLE [edb514306] 2016-10-20 17:18:09 -0400 Branch: REL9_2_STABLE [f17c26dbd] 2016-10-20 17:18:14 -0400 --> - Fix EXPLAIN to emit valid XML when - is on (Markus Winand) + Fix EXPLAIN to emit valid XML when + is on (Markus Winand) Previously the XML output-format option produced syntactically invalid - tags such as <I/O-Read-Time>. That is now - rendered as <I-O-Read-Time>. + tags such as <I/O-Read-Time>. That is now + rendered as <I-O-Read-Time>. @@ -3941,7 +7164,7 @@ Branch: REL9_6_STABLE [03f2bf70a] 2016-10-13 19:46:06 -0400 Branch: REL9_5_STABLE [3cd504254] 2016-10-13 19:45:58 -0400 --> - Fix statistics update for TRUNCATE in a prepared + Fix statistics update for TRUNCATE in a prepared transaction (Stas Kelvich) @@ -3963,16 +7186,16 @@ Branch: REL9_3_STABLE [f0bf0f233] 2016-10-13 17:05:15 -0400 Branch: REL9_2_STABLE [6f2db29ec] 2016-10-13 17:05:15 -0400 --> - Fix bugs in merging inherited CHECK constraints while + Fix bugs in merging inherited CHECK constraints while creating or altering a table (Tom Lane, Amit Langote) - Allow identical CHECK constraints to be added to a parent + Allow identical CHECK constraints to be added to a parent and child table in either order. Prevent merging of a valid - constraint from the parent table with a NOT VALID + constraint from the parent table with a NOT VALID constraint on the child. Likewise, prevent merging of a NO - INHERIT child constraint with an inherited constraint. + INHERIT child constraint with an inherited constraint. @@ -3985,8 +7208,8 @@ Branch: REL9_5_STABLE [f50fa46cc] 2016-10-03 16:40:27 -0400 --> Show a sensible value - in pg_settings.unit - for min_wal_size and max_wal_size (Tom Lane) + in pg_settings.unit + for min_wal_size and max_wal_size (Tom Lane) @@ -3997,7 +7220,7 @@ Branch: master [9c4cc9e2c] 2016-10-13 00:25:48 -0400 Branch: REL9_6_STABLE [0e9e64c07] 2016-10-13 00:25:28 -0400 --> - Fix replacement of array elements in jsonb_set() + Fix replacement of array elements in jsonb_set() (Tom Lane) @@ -4041,7 +7264,7 @@ Branch: REL9_5_STABLE [7a2fa5774] 2016-10-24 09:38:28 -0300 - With turned on, old + With turned on, old commit timestamps became inaccessible after a clean server restart. @@ -4085,7 +7308,7 @@ Branch: REL9_4_STABLE [6d3cbbf59] 2016-10-13 15:07:11 -0400 - This avoids possible failures during munmap() on systems + This avoids possible failures during munmap() on systems with atypical default huge page sizes. Except in crash-recovery cases, there were no ill effects other than a log message. @@ -4111,7 +7334,7 @@ Branch: REL9_1_STABLE [e84e4761f] 2016-10-07 12:53:51 +0300 --> Don't try to share SSL contexts across multiple connections - in libpq (Heikki Linnakangas) + in libpq (Heikki Linnakangas) @@ -4132,12 +7355,12 @@ Branch: REL9_2_STABLE [7397f62e7] 2016-10-10 10:35:58 -0400 Branch: REL9_1_STABLE [fb6825fe5] 2016-10-10 10:35:58 -0400 --> - Avoid corner-case memory leak in libpq (Tom Lane) + Avoid corner-case memory leak in libpq (Tom Lane) The reported problem involved leaking an error report - during PQreset(), but there might be related cases. + during PQreset(), but there might be related cases. @@ -4149,7 +7372,7 @@ Branch: REL9_6_STABLE [bac56dbe0] 2016-10-03 10:07:39 -0400 Branch: REL9_5_STABLE [0f259bd17] 2016-10-03 10:07:39 -0400 --> - In pg_upgrade, check library loadability in name order + In pg_upgrade, check library loadability in name order (Tom Lane) @@ -4167,13 +7390,13 @@ Branch: master [e8bdee277] 2016-10-02 14:31:28 -0400 Branch: REL9_6_STABLE [f40334b85] 2016-10-02 14:31:28 -0400 --> - Fix pg_upgrade to work correctly for extensions + Fix pg_upgrade to work correctly for extensions containing index access methods (Tom Lane) To allow this, the server has been extended to support ALTER - EXTENSION ADD/DROP ACCESS METHOD. That functionality should have + EXTENSION ADD/DROP ACCESS METHOD. That functionality should have been included in the original patch to support dynamic creation of access methods, but it was overlooked. @@ -4186,7 +7409,7 @@ Branch: master [f002ed2b8] 2016-09-30 20:40:56 -0400 Branch: REL9_6_STABLE [53fbeed40] 2016-09-30 20:40:27 -0400 --> - Improve error reporting in pg_upgrade's file + Improve error reporting in pg_upgrade's file copying/linking/rewriting steps (Tom Lane, Álvaro Herrera) @@ -4198,7 +7421,7 @@ Branch: master [4806f26f9] 2016-10-07 09:51:18 -0400 Branch: REL9_6_STABLE [1749332ec] 2016-10-07 09:51:28 -0400 --> - Fix pg_dump to work against pre-7.4 servers + Fix pg_dump to work against pre-7.4 servers (Amit Langote, Tom Lane) @@ -4211,8 +7434,8 @@ Branch: REL9_6_STABLE [2933ed036] 2016-10-07 14:35:41 +0300 Branch: REL9_5_STABLE [010a1b561] 2016-10-07 14:35:45 +0300 --> - Disallow specifying both @@ -4225,12 +7448,12 @@ Branch: REL9_6_STABLE [aab809664] 2016-10-06 13:34:38 +0300 Branch: REL9_5_STABLE [69da71254] 2016-10-06 13:34:32 +0300 --> - Make pg_rewind turn off synchronous_commit + Make pg_rewind turn off synchronous_commit in its session on the source server (Michael Banck, Michael Paquier) - This allows pg_rewind to work even when the source + This allows pg_rewind to work even when the source server is using synchronous replication that is not working for some reason. @@ -4246,8 +7469,8 @@ Branch: REL9_4_STABLE [da3f71a08] 2016-09-30 11:22:49 +0200 Branch: REL9_3_STABLE [4bff35cca] 2016-09-30 11:23:25 +0200 --> - In pg_xlogdump, retry opening new WAL segments when - using option (Magnus Hagander) @@ -4263,7 +7486,7 @@ Branch: master [9a109452d] 2016-10-01 16:32:54 -0400 Branch: REL9_6_STABLE [f4e787c82] 2016-10-01 16:32:55 -0400 --> - Fix contrib/pg_visibility to report the correct TID for + Fix contrib/pg_visibility to report the correct TID for a corrupt tuple that has been the subject of a rolled-back update (Tom Lane) @@ -4277,7 +7500,7 @@ Branch: REL9_6_STABLE [68fb75e10] 2016-10-01 13:35:20 -0400 --> Fix makefile dependencies so that parallel make - of PL/Python by itself will succeed reliably + of PL/Python by itself will succeed reliably (Pavel Raiskup) @@ -4315,7 +7538,7 @@ Branch: REL9_2_STABLE [a03339aef] 2016-10-19 17:57:01 -0400 Branch: REL9_1_STABLE [22cf97635] 2016-10-19 17:57:06 -0400 --> - Update time zone data files to tzdata release 2016h + Update time zone data files to tzdata release 2016h for DST law changes in Palestine and Turkey, plus historical corrections for Turkey and some regions of Russia. Switch to numeric abbreviations for some time zones in Antarctica, @@ -4328,15 +7551,15 @@ Branch: REL9_1_STABLE [22cf97635] 2016-10-19 17:57:06 -0400 or no currency among the local population. They are in process of reversing that policy in favor of using numeric UTC offsets in zones where there is no evidence of real-world use of an English - abbreviation. At least for the time being, PostgreSQL + abbreviation. At least for the time being, PostgreSQL will continue to accept such removed abbreviations for timestamp input. - But they will not be shown in the pg_timezone_names + But they will not be shown in the pg_timezone_names view nor used for output. - In this update, AMT is no longer shown as being in use to - mean Armenia Time. Therefore, we have changed the Default + In this update, AMT is no longer shown as being in use to + mean Armenia Time. Therefore, we have changed the Default abbreviation set to interpret it as Amazon Time, thus UTC-4 not UTC+4. @@ -4358,7 +7581,7 @@ Branch: REL9_1_STABLE [22cf97635] 2016-10-19 17:57:06 -0400 Overview - Major enhancements in PostgreSQL 9.6 include: + Major enhancements in PostgreSQL 9.6 include: @@ -4392,15 +7615,15 @@ Branch: REL9_1_STABLE [22cf97635] 2016-10-19 17:57:06 -0400 - postgres_fdw now supports remote joins, sorts, - UPDATEs, and DELETEs + postgres_fdw now supports remote joins, sorts, + UPDATEs, and DELETEs Substantial performance improvements, especially in the area of - scalability on multi-CPU-socket servers + scalability on multi-CPU-socket servers @@ -4417,8 +7640,8 @@ Branch: REL9_1_STABLE [22cf97635] 2016-10-19 17:57:06 -0400 Migration to Version 9.6 - A dump/restore using , or use of , is required for those wishing to migrate data + A dump/restore using , or use of , is required for those wishing to migrate data from any previous release. @@ -4435,7 +7658,7 @@ Branch: REL9_1_STABLE [22cf97635] 2016-10-19 17:57:06 -0400 --> Improve the pg_stat_activity + linkend="pg-stat-activity-view">pg_stat_activity view's information about what a process is waiting for (Amit Kapila, Ildus Kurbangaliev) @@ -4443,10 +7666,10 @@ Branch: REL9_1_STABLE [22cf97635] 2016-10-19 17:57:06 -0400 Historically a process has only been shown as waiting if it was waiting for a heavyweight lock. Now waits for lightweight locks - and buffer pins are also shown in pg_stat_activity. + and buffer pins are also shown in pg_stat_activity. Also, the type of lock being waited for is now visible. - These changes replace the waiting column with - wait_event_type and wait_event. + These changes replace the waiting column with + wait_event_type and wait_event. @@ -4456,14 +7679,14 @@ Branch: REL9_1_STABLE [22cf97635] 2016-10-19 17:57:06 -0400 --> In to_char(), + linkend="functions-formatting-table">to_char(), do not count a minus sign (when needed) as part of the field width for time-related fields (Bruce Momjian) - For example, to_char('-4 years'::interval, 'YY') - now returns -04, rather than -4. + For example, to_char('-4 years'::interval, 'YY') + now returns -04, rather than -4. @@ -4473,18 +7696,18 @@ Branch: REL9_1_STABLE [22cf97635] 2016-10-19 17:57:06 -0400 --> Make extract() behave + linkend="functions-datetime-table">extract() behave more reasonably with infinite inputs (Vitaly Burovoy) - Historically the extract() function just returned + Historically the extract() function just returned zero given an infinite timestamp, regardless of the given field name. Make it return infinity or -infinity as appropriate when the requested field is one that is monotonically increasing (e.g, - year, epoch), or NULL when - it is not (e.g., day, hour). Also, + year, epoch), or NULL when + it is not (e.g., day, hour). Also, throw the expected error for bad field names. @@ -4495,9 +7718,9 @@ Branch: REL9_1_STABLE [22cf97635] 2016-10-19 17:57:06 -0400 This commit is also listed under libpq and psql --> - Remove PL/pgSQL's feature that suppressed the - innermost line of CONTEXT for messages emitted by - RAISE commands (Pavel Stehule) + Remove PL/pgSQL's feature that suppressed the + innermost line of CONTEXT for messages emitted by + RAISE commands (Pavel Stehule) @@ -4512,13 +7735,13 @@ This commit is also listed under libpq and psql --> Fix the default text search parser to allow leading digits - in email and host tokens (Artur Zakirov) + in email and host tokens (Artur Zakirov) In most cases this will result in few changes in the parsing of text. But if you have data where such addresses occur frequently, - it may be worth rebuilding dependent tsvector columns + it may be worth rebuilding dependent tsvector columns and indexes so that addresses of this form will be found properly by text searches. @@ -4530,8 +7753,8 @@ This commit is also listed under libpq and psql 2016-03-16 [9a206d063] Improve script generating unaccent rules --> - Extend contrib/unaccent's - standard unaccent.rules file to handle all diacritics + Extend contrib/unaccent's + standard unaccent.rules file to handle all diacritics known to Unicode, and to expand ligatures correctly (Thomas Munro, Léonard Benedetti) @@ -4540,7 +7763,7 @@ This commit is also listed under libpq and psql The previous version neglected to convert some less-common letters with diacritic marks. Also, ligatures are now expanded into separate letters. Installations that use this rules file may wish - to rebuild tsvector columns and indexes that depend on the + to rebuild tsvector columns and indexes that depend on the result. @@ -4551,15 +7774,15 @@ This commit is also listed under libpq and psql --> Remove the long-deprecated - CREATEUSER/NOCREATEUSER options from - CREATE ROLE and allied commands (Tom Lane) + CREATEUSER/NOCREATEUSER options from + CREATE ROLE and allied commands (Tom Lane) - CREATEUSER actually meant SUPERUSER, + CREATEUSER actually meant SUPERUSER, for ancient backwards-compatibility reasons. This has been a constant source of confusion for people who (reasonably) expect - it to mean CREATEROLE. It has been deprecated for + it to mean CREATEROLE. It has been deprecated for ten years now, so fix the problem by removing it. @@ -4571,13 +7794,13 @@ This commit is also listed under libpq and psql 2016-05-08 [7df974ee0] Disallow superuser names starting with 'pg_' in initdb --> - Treat role names beginning with pg_ as reserved + Treat role names beginning with pg_ as reserved (Stephen Frost) User creation of such role names is now disallowed. This prevents - conflicts with built-in roles created by initdb. + conflicts with built-in roles created by initdb. @@ -4587,16 +7810,16 @@ This commit is also listed under libpq and psql --> Change a column name in the - information_schema.routines - view from result_cast_character_set_name - to result_cast_char_set_name (Clément + information_schema.routines + view from result_cast_character_set_name + to result_cast_char_set_name (Clément Prévost) The SQL:2011 standard specifies the longer name, but that appears to be a mistake, because adjacent column names use the shorter - style, as do other information_schema views. + style, as do other information_schema views. @@ -4605,7 +7828,7 @@ This commit is also listed under libpq and psql 2015-12-08 [d5563d7df] psql: Support multiple -c and -f options, and allow mixi --> - psql's option no longer implies + psql's option no longer implies (Pavel Stehule, Catalin Iacob) @@ -4614,7 +7837,7 @@ This commit is also listed under libpq and psql Write (or its abbreviation ) explicitly to obtain the old behavior. Scripts so modified will still work with old - versions of psql. + versions of psql. @@ -4623,7 +7846,7 @@ This commit is also listed under libpq and psql 2015-07-02 [5671aaca8] Improve pg_restore's -t switch to match all types of rel --> - Improve pg_restore's option to + Improve pg_restore's option to match all types of relations, not only plain tables (Craig Ringer) @@ -4633,17 +7856,17 @@ This commit is also listed under libpq and psql 2016-02-12 [59a884e98] Change delimiter used for display of NextXID --> - Change the display format used for NextXID in - pg_controldata and related places (Joe Conway, + Change the display format used for NextXID in + pg_controldata and related places (Joe Conway, Bruce Momjian) Display epoch-and-transaction-ID values in the format - number:number. + number:number. The previous format - number/number was - confusingly similar to that used for LSNs. + number/number was + confusingly similar to that used for LSNs. @@ -4661,8 +7884,8 @@ and many others in the same vein Many of the standard extensions have been updated to allow their functions to be executed within parallel query worker processes. These changes will not take effect in - databases pg_upgrade'd from prior versions unless - you apply ALTER EXTENSION UPDATE to each such extension + databases pg_upgrade'd from prior versions unless + you apply ALTER EXTENSION UPDATE to each such extension (in each database of a cluster). @@ -4723,7 +7946,7 @@ and many others in the same vein - With 9.6, PostgreSQL introduces initial support + With 9.6, PostgreSQL introduces initial support for parallel execution of large queries. Only strictly read-only queries where the driving table is accessed via a sequential scan can be parallelized. Hash joins and nested loops can be performed @@ -4735,12 +7958,12 @@ and many others in the same vein Parallel query execution is not (yet) enabled by default. To allow it, set the new configuration - parameter to a + parameter to a value larger than zero. Additional control over use of parallelism is available through other new configuration parameters - , - , , and + , + , , and min_parallel_relation_size. @@ -4769,8 +7992,8 @@ and many others in the same vein 2015-09-02 [30bb26b5e] Allow usage of huge maintenance_work_mem for GIN build. --> - Allow GIN index builds to - make effective use of + Allow GIN index builds to + make effective use of settings larger than 1 GB (Robert Abraham, Teodor Sigaev) @@ -4797,7 +8020,7 @@ and many others in the same vein --> Add gin_clean_pending_list() + linkend="functions-admin-index">gin_clean_pending_list() function to allow manual invocation of pending-list cleanup for a GIN index (Jeff Janes) @@ -4815,7 +8038,7 @@ and many others in the same vein --> Improve handling of dead index tuples in GiST indexes (Anastasia Lubennikova) + linkend="gist">GiST indexes (Anastasia Lubennikova) @@ -4831,8 +8054,8 @@ and many others in the same vein 2016-03-30 [acdf2a8b3] Introduce SP-GiST operator class over box. --> - Add an SP-GiST operator class for - type box (Alexander Lebedev) + Add an SP-GiST operator class for + type box (Alexander Lebedev) @@ -4858,10 +8081,10 @@ and many others in the same vein - The new approach makes better use of the CPU cache + The new approach makes better use of the CPU cache for typical cache sizes and data volumes. Where necessary, the behavior can be adjusted via the new configuration parameter - . + replacement_sort_tuples. @@ -4883,17 +8106,17 @@ and many others in the same vein 2016-02-17 [f1f5ec1ef] Reuse abbreviated keys in ordered [set] aggregates. --> - Speed up sorting of uuid, bytea, and - char(n) fields by using abbreviated keys + Speed up sorting of uuid, bytea, and + char(n) fields by using abbreviated keys (Peter Geoghegan) Support for abbreviated keys has also been added to the non-default operator classes text_pattern_ops, - varchar_pattern_ops, and - bpchar_pattern_ops. Processing of ordered-set + linkend="indexes-opclass">text_pattern_ops, + varchar_pattern_ops, and + bpchar_pattern_ops. Processing of ordered-set aggregates can also now exploit abbreviated keys. @@ -4903,8 +8126,8 @@ and many others in the same vein 2015-12-16 [b648b7034] Speed up CREATE INDEX CONCURRENTLY's TID sort. --> - Speed up CREATE INDEX CONCURRENTLY by treating - TIDs as 64-bit integers during sorting (Peter + Speed up CREATE INDEX CONCURRENTLY by treating + TIDs as 64-bit integers during sorting (Peter Geoghegan) @@ -4924,7 +8147,7 @@ and many others in the same vein 2015-09-03 [4aec49899] Assorted code review for recent ProcArrayLock patch. --> - Reduce contention for the ProcArrayLock (Amit Kapila, + Reduce contention for the ProcArrayLock (Amit Kapila, Robert Haas) @@ -4955,7 +8178,7 @@ and many others in the same vein --> Use atomic operations, rather than a spinlock, to protect an - LWLock's wait queue (Andres Freund) + LWLock's wait queue (Andres Freund) @@ -4965,7 +8188,7 @@ and many others in the same vein --> Partition the shared hash table freelist to reduce contention on - multi-CPU-socket servers (Aleksander Alekseev) + multi-CPU-socket servers (Aleksander Alekseev) @@ -5001,14 +8224,14 @@ and many others in the same vein 2016-04-04 [391159e03] Partially revert commit 3d3bf62f30200500637b24fdb7b992a9 --> - Improve ANALYZE's estimates for columns with many nulls + Improve ANALYZE's estimates for columns with many nulls (Tomas Vondra, Alex Shulgin) - Previously ANALYZE tended to underestimate the number - of non-NULL distinct values in a column with many - NULLs, and was also inaccurate in computing the + Previously ANALYZE tended to underestimate the number + of non-NULL distinct values in a column with many + NULLs, and was also inaccurate in computing the most-common values. @@ -5035,13 +8258,13 @@ and many others in the same vein - If a table t has a foreign key restriction, say - (a,b) REFERENCES r (x,y), then a WHERE - condition such as t.a = r.x AND t.b = r.y cannot - select more than one r row per t row. - The planner formerly considered these AND conditions + If a table t has a foreign key restriction, say + (a,b) REFERENCES r (x,y), then a WHERE + condition such as t.a = r.x AND t.b = r.y cannot + select more than one r row per t row. + The planner formerly considered these AND conditions to be independent and would often drastically misestimate - selectivity as a result. Now it compares the WHERE + selectivity as a result. Now it compares the WHERE conditions to applicable foreign key constraints and produces better estimates. @@ -5052,7 +8275,7 @@ and many others in the same vein - <command>VACUUM</> + <command>VACUUM</command> @@ -5082,7 +8305,7 @@ and many others in the same vein If necessary, vacuum can be forced to process all-frozen - pages using the new DISABLE_PAGE_SKIPPING option. + pages using the new DISABLE_PAGE_SKIPPING option. Normally this should never be needed, but it might help in recovering from visibility-map corruption. @@ -5093,7 +8316,7 @@ and many others in the same vein 2015-12-30 [e84290823] Avoid useless truncation attempts during VACUUM. --> - Avoid useless heap-truncation attempts during VACUUM + Avoid useless heap-truncation attempts during VACUUM (Jeff Janes, Tom Lane) @@ -5122,19 +8345,19 @@ and many others in the same vein 2016-08-07 [9ee1cf04a] Fix TOAST access failure in RETURNING queries. --> - Allow old MVCC snapshots to be invalidated after a + Allow old MVCC snapshots to be invalidated after a configurable timeout (Kevin Grittner) Normally, deleted tuples cannot be physically removed by - vacuuming until the last transaction that could see + vacuuming until the last transaction that could see them is gone. A transaction that stays open for a long time can thus cause considerable table bloat because space cannot be recycled. This feature allows setting a time-based limit, via the new configuration parameter - , on how long an - MVCC snapshot is guaranteed to be valid. After that, + , on how long an + MVCC snapshot is guaranteed to be valid. After that, dead tuples are candidates for removal. A transaction using an outdated snapshot will get an error if it attempts to read a page that potentially could have contained such data. @@ -5146,12 +8369,12 @@ and many others in the same vein 2016-02-11 [d4c3a156c] Remove GROUP BY columns that are functionally dependent --> - Ignore GROUP BY columns that are + Ignore GROUP BY columns that are functionally dependent on other columns (David Rowley) - If a GROUP BY clause includes all columns of a + If a GROUP BY clause includes all columns of a non-deferred primary key, as well as other columns of the same table, those other columns are redundant and can be dropped from the grouping. This saves computation in many common cases. @@ -5164,17 +8387,17 @@ and many others in the same vein --> Allow use of an index-only - scan on a partial index when the index's WHERE + scan on a partial index when the index's WHERE clause references columns that are not indexed (Tomas Vondra, Kyotaro Horiguchi) For example, an index defined by CREATE INDEX tidx_partial - ON t(b) WHERE a > 0 can now be used for an index-only scan by - a query that specifies WHERE a > 0 and does not - otherwise use a. Previously this was disallowed - because a is not listed as an index column. + ON t(b) WHERE a > 0 can now be used for an index-only scan by + a query that specifies WHERE a > 0 and does not + otherwise use a. Previously this was disallowed + because a is not listed as an index column. @@ -5214,7 +8437,7 @@ and many others in the same vein - PostgreSQL writes data to the kernel's disk cache, + PostgreSQL writes data to the kernel's disk cache, from where it will be flushed to physical storage in due time. Many operating systems are not smart about managing this and allow large amounts of dirty data to accumulate before deciding to flush @@ -5225,21 +8448,21 @@ and many others in the same vein - On Linux, sync_file_range() is used for this purpose, + On Linux, sync_file_range() is used for this purpose, and the feature is on by default on Linux because that function has few downsides. This flushing capability is also available on other - platforms if they have msync() - or posix_fadvise(), but those interfaces have some + platforms if they have msync() + or posix_fadvise(), but those interfaces have some undesirable side-effects so the feature is disabled by default on non-Linux platforms. The new configuration parameters , , , and control this behavior. + linkend="guc-backend-flush-after"/>, , , and control this behavior. @@ -5254,7 +8477,7 @@ and many others in the same vein - For example, SELECT AVG(x), VARIANCE(x) FROM tab can use + For example, SELECT AVG(x), VARIANCE(x) FROM tab can use a single per-row computation for both aggregates. @@ -5265,7 +8488,7 @@ and many others in the same vein --> Speed up visibility tests for recently-created tuples by checking - the current transaction's snapshot, not pg_clog, to + the current transaction's snapshot, not pg_clog, to decide if the source transaction should be considered committed (Jeff Janes, Tom Lane) @@ -5291,9 +8514,9 @@ and many others in the same vein - Two-phase commit information is now written only to WAL - during PREPARE TRANSACTION, and will be read back from - WAL during COMMIT PREPARED if that happens + Two-phase commit information is now written only to WAL + during PREPARE TRANSACTION, and will be read back from + WAL during COMMIT PREPARED if that happens soon thereafter. A separate state file is created only if the pending transaction does not get committed or aborted by the time of the next checkpoint. @@ -5324,8 +8547,8 @@ and many others in the same vein 2016-02-06 [aa2387e2f] Improve speed of timestamp/time/date output functions. --> - Improve speed of the output functions for timestamp, - time, and date data types (David Rowley, + Improve speed of the output functions for timestamp, + time, and date data types (David Rowley, Andres Freund) @@ -5336,7 +8559,7 @@ and many others in the same vein --> Avoid some unnecessary cancellations of hot-standby queries - during replay of actions that take AccessExclusive + during replay of actions that take AccessExclusive locks (Jeff Janes) @@ -5370,8 +8593,8 @@ and many others in the same vein 2015-07-05 [6c82d8d1f] Further reduce overhead for passing plpgsql variables to --> - Speed up expression evaluation in PL/pgSQL by - keeping ParamListInfo entries for simple variables + Speed up expression evaluation in PL/pgSQL by + keeping ParamListInfo entries for simple variables valid at all times (Tom Lane) @@ -5381,7 +8604,7 @@ and many others in the same vein 2015-07-06 [4f33621f3] Don't set SO_SNDBUF on recent Windows versions that have --> - Avoid reducing the SO_SNDBUF setting below its default + Avoid reducing the SO_SNDBUF setting below its default on recent Windows versions (Chen Huajun) @@ -5391,7 +8614,7 @@ and many others in the same vein 2016-08-17 [9b33c7e80] Disable update_process_title by default on Windows --> - Disable by default on + Disable by default on Windows (Takayuki Tsunakawa) @@ -5417,8 +8640,8 @@ and many others in the same vein --> Add pg_stat_progress_vacuum - system view to provide progress reporting for VACUUM + linkend="pg-stat-progress-vacuum-view">pg_stat_progress_vacuum + system view to provide progress reporting for VACUUM operations (Amit Langote, Robert Haas, Vinayak Pokale, Rahila Syed) @@ -5429,11 +8652,11 @@ and many others in the same vein --> Add pg_control_system(), - pg_control_checkpoint(), - pg_control_recovery(), and - pg_control_init() functions to expose fields of - pg_control to SQL (Joe Conway, Michael + linkend="functions-controldata">pg_control_system(), + pg_control_checkpoint(), + pg_control_recovery(), and + pg_control_init() functions to expose fields of + pg_control to SQL (Joe Conway, Michael Paquier) @@ -5443,15 +8666,15 @@ and many others in the same vein 2016-02-17 [a5c43b886] Add new system view, pg_config --> - Add pg_config + Add pg_config system view (Joe Conway) This view exposes the same information available from - the pg_config command-line utility, + the pg_config command-line utility, namely assorted compile-time configuration information for - PostgreSQL. + PostgreSQL. @@ -5460,8 +8683,8 @@ and many others in the same vein 2015-08-10 [3f811c2d6] Add confirmed_flush column to pg_replication_slots. --> - Add a confirmed_flush_lsn column to the pg_replication_slots + Add a confirmed_flush_lsn column to the pg_replication_slots system view (Marko Tiikkaja) @@ -5474,9 +8697,9 @@ and many others in the same vein --> Add pg_stat_wal_receiver + linkend="pg-stat-wal-receiver-view">pg_stat_wal_receiver system view to provide information about the state of a hot-standby - server's WAL receiver process (Michael Paquier) + server's WAL receiver process (Michael Paquier) @@ -5486,7 +8709,7 @@ and many others in the same vein --> Add pg_blocking_pids() + linkend="functions-info-session-table">pg_blocking_pids() function to reliably identify which sessions block which others (Tom Lane) @@ -5495,7 +8718,7 @@ and many others in the same vein This function returns an array of the process IDs of any sessions that are blocking the session with the given process ID. Historically users have obtained such information using a self-join - on the pg_locks view. However, it is unreasonably + on the pg_locks view. However, it is unreasonably tedious to do it that way with any modicum of correctness, and the addition of parallel queries has made the old approach entirely impractical, since locks might be held or awaited by child worker @@ -5509,7 +8732,7 @@ and many others in the same vein --> Add function pg_current_xlog_flush_location() + linkend="functions-admin-backup-table">pg_current_xlog_flush_location() to expose the current transaction log flush location (Tomas Vondra) @@ -5520,8 +8743,8 @@ and many others in the same vein --> Add function pg_notification_queue_usage() - to report how full the NOTIFY queue is (Brendan Jurd) + linkend="functions-info-session-table">pg_notification_queue_usage() + to report how full the NOTIFY queue is (Brendan Jurd) @@ -5537,7 +8760,7 @@ and many others in the same vein The memory usage dump that is output to the postmaster log during an out-of-memory failure now summarizes statistics when there are a large number of memory contexts, rather than possibly generating - a very large report. There is also a grand total + a very large report. There is also a grand total summary line now. @@ -5547,7 +8770,7 @@ and many others in the same vein - <acronym>Authentication</> + <acronym>Authentication</acronym> @@ -5556,15 +8779,15 @@ and many others in the same vein 2016-04-08 [34c33a1f0] Add BSD authentication method. --> - Add a BSD authentication + Add a BSD authentication method to allow use of - the BSD Authentication service for - PostgreSQL client authentication (Marisa Emerson) + the BSD Authentication service for + PostgreSQL client authentication (Marisa Emerson) BSD Authentication is currently only available on OpenBSD. + class="osname">OpenBSD. @@ -5573,9 +8796,9 @@ and many others in the same vein 2016-04-08 [2f1d2b7a7] Set PAM_RHOST item for PAM authentication --> - When using PAM + When using PAM authentication, provide the client IP address or host name - to PAM modules via the PAM_RHOST item + to PAM modules via the PAM_RHOST item (Grzegorz Sampolski) @@ -5591,7 +8814,7 @@ and many others in the same vein All ordinarily-reachable password authentication failure cases - should now provide specific DETAIL fields in the log. + should now provide specific DETAIL fields in the log. @@ -5600,7 +8823,7 @@ and many others in the same vein 2015-09-06 [643beffe8] Support RADIUS passwords up to 128 characters --> - Support RADIUS passwords + Support RADIUS passwords up to 128 characters long (Marko Tiikkaja) @@ -5610,11 +8833,11 @@ and many others in the same vein 2016-04-08 [35e2e357c] Add authentication parameters compat_realm and upn_usena --> - Add new SSPI + Add new SSPI authentication parameters - compat_realm and upn_username to control - whether NetBIOS or Kerberos - realm names and user names are used during SSPI + compat_realm and upn_username to control + whether NetBIOS or Kerberos + realm names and user names are used during SSPI authentication (Christian Ullrich) @@ -5639,7 +8862,7 @@ and many others in the same vein This behavior is controlled by the new configuration parameter - . It can + . It can be useful to prevent forgotten transactions from holding locks or preventing vacuum cleanup for too long. @@ -5651,7 +8874,7 @@ and many others in the same vein --> Raise the maximum allowed value - of to 24 hours (Simon Riggs) + of to 24 hours (Simon Riggs) @@ -5660,7 +8883,7 @@ and many others in the same vein 2015-09-08 [1aba62ec6] Allow per-tablespace effective_io_concurrency --> - Allow effective_io_concurrency to be set per-tablespace + Allow effective_io_concurrency to be set per-tablespace to support cases where different tablespaces have different I/O characteristics (Julien Rouhaud) @@ -5672,7 +8895,7 @@ and many others in the same vein 2015-09-07 [b1e1862a1] Coordinate log_line_prefix options 'm' and 'n' to share --> - Add option %n to + Add option %n to print the current time in Unix epoch form, with milliseconds (Tomas Vondra, Jeff Davis) @@ -5684,10 +8907,10 @@ and many others in the same vein 2016-03-16 [fc201dfd9] Add syslog_split_messages parameter --> - Add and configuration parameters + Add and configuration parameters to provide more control over the message format when logging to - syslog (Peter Eisentraut) + syslog (Peter Eisentraut) @@ -5696,16 +8919,16 @@ and many others in the same vein 2016-03-18 [b555ed810] Merge wal_level "archive" and "hot_standby" into new nam --> - Merge the archive and hot_standby values - of the configuration parameter - into a single new value replica (Peter Eisentraut) + Merge the archive and hot_standby values + of the configuration parameter + into a single new value replica (Peter Eisentraut) Making a distinction between these settings is no longer useful, and merging them is a step towards a planned future simplification of replication setup. The old names are still accepted but are - converted to replica internally. + converted to replica internally. @@ -5714,15 +8937,15 @@ and many others in the same vein 2016-02-02 [7d17e683f] Add support for systemd service notifications --> - Add configure option - This allows the use of systemd service units of - type notify, which greatly simplifies the management - of PostgreSQL under systemd. + This allows the use of systemd service units of + type notify, which greatly simplifies the management + of PostgreSQL under systemd. @@ -5731,17 +8954,17 @@ and many others in the same vein 2016-03-19 [9a83564c5] Allow SSL server key file to have group read access if o --> - Allow the server's SSL key file to have group read - access if it is owned by root (Christoph Berg) + Allow the server's SSL key file to have group read + access if it is owned by root (Christoph Berg) Formerly, we insisted the key file be owned by the - user running the PostgreSQL server, but + user running the PostgreSQL server, but that is inconvenient on some systems (such as Debian) that are configured to manage + class="osname">Debian) that are configured to manage certificates centrally. Therefore, allow the case where the key - file is owned by root and has group read access. + file is owned by root and has group read access. It is up to the operating system administrator to ensure that the group does not include any untrusted users. @@ -5806,8 +9029,8 @@ XXX this is pending backpatch, may need to remove 2016-04-26 [c6ff84b06] Emit invalidations to standby for transactions without x --> - Ensure that invalidation messages are recorded in WAL - even when issued by a transaction that has no XID + Ensure that invalidation messages are recorded in WAL + even when issued by a transaction that has no XID assigned (Andres Freund) @@ -5823,7 +9046,7 @@ XXX this is pending backpatch, may need to remove 2016-04-28 [e2c79e14d] Prevent multiple cleanup process for pending list in GIN --> - Prevent multiple processes from trying to clean a GIN + Prevent multiple processes from trying to clean a GIN index's pending list concurrently (Teodor Sigaev, Jeff Janes) @@ -5859,7 +9082,7 @@ XXX this is pending backpatch, may need to remove The number of standby servers that must acknowledge a commit before it is considered complete is now configurable as part of - the parameter. + the parameter. @@ -5868,13 +9091,13 @@ XXX this is pending backpatch, may need to remove 2016-03-29 [314cbfc5d] Add new replication mode synchronous_commit = 'remote_ap --> - Add new setting remote_apply for configuration - parameter (Thomas Munro) + Add new setting remote_apply for configuration + parameter (Thomas Munro) In this mode, the master waits for the transaction to be - applied on the standby server, not just written + applied on the standby server, not just written to disk. That means that you can count on a transaction started on the standby to see all commits previously acknowledged by the master. @@ -5889,14 +9112,14 @@ XXX this is pending backpatch, may need to remove Add a feature to the replication protocol, and a corresponding option to pg_create_physical_replication_slot(), - to allow reserving WAL immediately when creating a + linkend="functions-replication-table">pg_create_physical_replication_slot(), + to allow reserving WAL immediately when creating a replication slot (Gurjeet Singh, Michael Paquier) This allows the creation of a replication slot to guarantee - that all the WAL needed for a base backup will be + that all the WAL needed for a base backup will be available. @@ -5907,13 +9130,13 @@ XXX this is pending backpatch, may need to remove --> Add a option to - pg_basebackup + pg_basebackup (Peter Eisentraut) - This lets pg_basebackup use a replication - slot defined for WAL streaming. After the base + This lets pg_basebackup use a replication + slot defined for WAL streaming. After the base backup completes, selecting the same slot for regular streaming replication allows seamless startup of the new standby server. @@ -5926,8 +9149,8 @@ XXX this is pending backpatch, may need to remove --> Extend pg_start_backup() - and pg_stop_backup() to support non-exclusive backups + linkend="functions-admin-backup-table">pg_start_backup() + and pg_stop_backup() to support non-exclusive backups (Magnus Hagander) @@ -5947,14 +9170,14 @@ XXX this is pending backpatch, may need to remove --> Allow functions that return sets of tuples to return simple - NULLs (Andrew Gierth, Tom Lane) + NULLs (Andrew Gierth, Tom Lane) - In the context of SELECT FROM function(...), a function + In the context of SELECT FROM function(...), a function that returned a set of composite values was previously not allowed - to return a plain NULL value as part of the set. - Now that is allowed and interpreted as a row of NULLs. + to return a plain NULL value as part of the set. + Now that is allowed and interpreted as a row of NULLs. This avoids corner-case errors with, for example, unnesting an array of composite values. @@ -5966,14 +9189,14 @@ XXX this is pending backpatch, may need to remove --> Fully support array subscripts and field selections in the - target column list of an INSERT with multiple - VALUES rows (Tom Lane) + target column list of an INSERT with multiple + VALUES rows (Tom Lane) Previously, such cases failed if the same target column was mentioned more than once, e.g., INSERT INTO tab (x[1], - x[2]) VALUES (...). + x[2]) VALUES (...). @@ -5983,16 +9206,16 @@ XXX this is pending backpatch, may need to remove 2016-03-25 [d543170f2] Don't split up SRFs when choosing to postpone SELECT out --> - When appropriate, postpone evaluation of SELECT - output expressions until after an ORDER BY sort + When appropriate, postpone evaluation of SELECT + output expressions until after an ORDER BY sort (Konstantin Knizhnik) This change ensures that volatile or expensive functions in the output list are executed in the order suggested by ORDER - BY, and that they are not evaluated more times than required - when there is a LIMIT clause. Previously, these + BY, and that they are not evaluated more times than required + when there is a LIMIT clause. Previously, these properties held if the ordering was performed by an index scan or pre-merge-join sort, but not if it was performed by a top-level sort. @@ -6010,9 +9233,9 @@ XXX this is pending backpatch, may need to remove - This change allows command tags, e.g. SELECT, to + This change allows command tags, e.g. SELECT, to correctly report tuple counts larger than 4 billion. This also - applies to PL/pgSQL's GET DIAGNOSTICS ... ROW_COUNT + applies to PL/pgSQL's GET DIAGNOSTICS ... ROW_COUNT command. @@ -6023,17 +9246,17 @@ XXX this is pending backpatch, may need to remove --> Avoid doing encoding conversions by converting through the - MULE_INTERNAL encoding (Tom Lane) + MULE_INTERNAL encoding (Tom Lane) Previously, many conversions for Cyrillic and Central European single-byte encodings were done by converting to a - related MULE_INTERNAL coding scheme and then to the + related MULE_INTERNAL coding scheme and then to the destination encoding. Aside from being inefficient, this meant that when the conversion encountered an untranslatable character, the error message would confusingly complain about failure to - convert to or from MULE_INTERNAL, rather than the + convert to or from MULE_INTERNAL, rather than the user-visible encoding. @@ -6052,7 +9275,7 @@ XXX this is pending backpatch, may need to remove Previously, the foreign join pushdown infrastructure left the question of security entirely up to individual foreign data - wrappers, but that made it too easy for an FDW to + wrappers, but that made it too easy for an FDW to inadvertently create subtle security holes. So, make it the core code's job to determine which role ID will access each table, and do not attempt join pushdown unless the role is the same for @@ -6074,13 +9297,13 @@ XXX this is pending backpatch, may need to remove 2015-11-27 [92e38182d] COPY (INSERT/UPDATE/DELETE .. RETURNING ..) --> - Allow COPY to copy the output of an - INSERT/UPDATE/DELETE - ... RETURNING query (Marko Tiikkaja) + Allow COPY to copy the output of an + INSERT/UPDATE/DELETE + ... RETURNING query (Marko Tiikkaja) - Previously, an intermediate CTE had to be written to + Previously, an intermediate CTE had to be written to get this result. @@ -6090,16 +9313,16 @@ XXX this is pending backpatch, may need to remove 2016-04-05 [f2fcad27d] Support ALTER THING .. DEPENDS ON EXTENSION --> - Introduce ALTER object DEPENDS ON + Introduce ALTER object DEPENDS ON EXTENSION (Abhijit Menon-Sen) This command allows a database object to be marked as depending on an extension, so that it will be dropped automatically if - the extension is dropped (without needing CASCADE). + the extension is dropped (without needing CASCADE). However, the object is not part of the extension, and thus will - be dumped separately by pg_dump. + be dumped separately by pg_dump. @@ -6108,7 +9331,7 @@ XXX this is pending backpatch, may need to remove 2015-11-19 [bc4996e61] Make ALTER .. SET SCHEMA do nothing, instead of throwing --> - Make ALTER object SET SCHEMA do nothing + Make ALTER object SET SCHEMA do nothing when the object is already in the requested schema, rather than throwing an error as it historically has for most object types (Marti Raudsepp) @@ -6132,8 +9355,8 @@ XXX this is pending backpatch, may need to remove 2015-07-29 [2cd40adb8] Add IF NOT EXISTS processing to ALTER TABLE ADD COLUMN --> - Add an @@ -6143,7 +9366,7 @@ XXX this is pending backpatch, may need to remove 2016-03-10 [fcb4bfddb] Reduce lock level for altering fillfactor --> - Reduce the lock strength needed by ALTER TABLE + Reduce the lock strength needed by ALTER TABLE when setting fillfactor and autovacuum-related relation options (Fabrízio de Royes Mello, Simon Riggs) @@ -6155,7 +9378,7 @@ XXX this is pending backpatch, may need to remove --> Introduce CREATE - ACCESS METHOD to allow extensions to create index access + ACCESS METHOD to allow extensions to create index access methods (Alexander Korotkov, Petr Jelínek) @@ -6165,7 +9388,7 @@ XXX this is pending backpatch, may need to remove 2015-10-03 [b67aaf21e] Add CASCADE support for CREATE EXTENSION. --> - Add a CASCADE option to CREATE + Add a CASCADE option to CREATE EXTENSION to automatically create any extensions the requested one depends on (Petr Jelínek) @@ -6176,7 +9399,7 @@ XXX this is pending backpatch, may need to remove 2015-10-05 [b943f502b] Have CREATE TABLE LIKE add OID column if any LIKEd table --> - Make CREATE TABLE ... LIKE include an OID + Make CREATE TABLE ... LIKE include an OID column if any source table has one (Bruce Momjian) @@ -6186,14 +9409,14 @@ XXX this is pending backpatch, may need to remove 2015-12-16 [f27a6b15e] Mark CHECK constraints declared NOT VALID valid if creat --> - If a CHECK constraint is declared NOT VALID + If a CHECK constraint is declared NOT VALID in a table creation command, automatically mark it as valid (Amit Langote, Amul Sul) This is safe because the table has no existing rows. This matches - the longstanding behavior of FOREIGN KEY constraints. + the longstanding behavior of FOREIGN KEY constraints. @@ -6202,16 +9425,16 @@ XXX this is pending backpatch, may need to remove 2016-03-25 [c94959d41] Fix DROP OPERATOR to reset oprcom/oprnegate links to the --> - Fix DROP OPERATOR to clear - pg_operator.oprcom and - pg_operator.oprnegate links to + Fix DROP OPERATOR to clear + pg_operator.oprcom and + pg_operator.oprnegate links to the dropped operator (Roma Sokolov) Formerly such links were left as-is, which could pose a problem in the somewhat unlikely event that the dropped operator's - OID was reused for another operator. + OID was reused for another operator. @@ -6220,13 +9443,13 @@ XXX this is pending backpatch, may need to remove 2016-07-11 [4d042999f] Print a given subplan only once in EXPLAIN. --> - Do not show the same subplan twice in EXPLAIN output + Do not show the same subplan twice in EXPLAIN output (Tom Lane) In certain cases, typically involving SubPlan nodes in index - conditions, EXPLAIN would print data for the same + conditions, EXPLAIN would print data for the same subplan twice. @@ -6237,7 +9460,7 @@ XXX this is pending backpatch, may need to remove --> Disallow creation of indexes on system columns, except for - OID columns (David Rowley) + OID columns (David Rowley) @@ -6271,8 +9494,8 @@ XXX this is pending backpatch, may need to remove checks that would throw an error if they were called by a non-superuser. This forced the use of superuser roles for some relatively pedestrian tasks. The hard-wired error checks - are now gone in favor of making initdb revoke the - default public EXECUTE privilege on these functions. + are now gone in favor of making initdb revoke the + default public EXECUTE privilege on these functions. This allows installations to choose to grant usage of such functions to trusted roles that do not need all superuser privileges. @@ -6290,7 +9513,7 @@ XXX this is pending backpatch, may need to remove - Currently the only such role is pg_signal_backend, + Currently the only such role is pg_signal_backend, but more are expected to be added in future. @@ -6312,19 +9535,19 @@ XXX this is pending backpatch, may need to remove 2016-06-27 [6734a1cac] Change predecence of phrase operator. --> - Improve full-text search to support + Improve full-text search to support searching for phrases, that is, lexemes appearing adjacent to each other in a specific order, or with a specified distance between them (Teodor Sigaev, Oleg Bartunov, Dmitry Ivanov) - A phrase-search query can be specified in tsquery - input using the new operators <-> and - <N>. The former means + A phrase-search query can be specified in tsquery + input using the new operators <-> and + <N>. The former means that the lexemes before and after it must appear adjacent to each other in that order. The latter means they must be exactly - N lexemes apart. + N lexemes apart. @@ -6334,7 +9557,7 @@ XXX this is pending backpatch, may need to remove --> Allow omitting one or both boundaries in an array slice specifier, - e.g. array_col[3:] (Yury Zhuravlev) + e.g. array_col[3:] (Yury Zhuravlev) @@ -6355,19 +9578,19 @@ XXX this is pending backpatch, may need to remove This change prevents unexpected out-of-range errors for - timestamp with time zone values very close to the - implementation limits. Previously, the same value might - be accepted or not depending on the timezone setting, + timestamp with time zone values very close to the + implementation limits. Previously, the same value might + be accepted or not depending on the timezone setting, meaning that a dump and reload could fail on a value that had been accepted when presented. Now the limits are enforced according - to the equivalent UTC time, not local time, so as to - be independent of timezone. + to the equivalent UTC time, not local time, so as to + be independent of timezone. - Also, PostgreSQL is now more careful to detect + Also, PostgreSQL is now more careful to detect overflow in operations that compute new date or timestamp values, - such as date + integer. + such as date + integer. @@ -6376,14 +9599,14 @@ XXX this is pending backpatch, may need to remove 2016-03-30 [50861cd68] Improve portability of I/O behavior for the geometric ty --> - For geometric data types, make sure infinity and - NaN component values are treated consistently during + For geometric data types, make sure infinity and + NaN component values are treated consistently during input and output (Tom Lane) Such values will now always print the same as they would in - a simple float8 column, and be accepted the same way + a simple float8 column, and be accepted the same way on input. Previously the behavior was platform-dependent. @@ -6396,8 +9619,8 @@ XXX this is pending backpatch, may need to remove --> Upgrade - the ispell - dictionary type to handle modern Hunspell files and + the ispell + dictionary type to handle modern Hunspell files and support more languages (Artur Zakirov) @@ -6408,7 +9631,7 @@ XXX this is pending backpatch, may need to remove --> Implement look-behind constraints - in regular expressions + in regular expressions (Tom Lane) @@ -6427,12 +9650,12 @@ XXX this is pending backpatch, may need to remove --> In regular expressions, if an apparent three-digit octal escape - \nnn would exceed 377 (255 decimal), + \nnn would exceed 377 (255 decimal), assume it is a two-digit octal escape instead (Tom Lane) - This makes the behavior match current Tcl releases. + This makes the behavior match current Tcl releases. @@ -6441,8 +9664,8 @@ XXX this is pending backpatch, may need to remove 2015-11-07 [c5e86ea93] Add "xid <> xid" and "xid <> int4" operators. --> - Add transaction ID operators xid <> - xid and xid <> int4, + Add transaction ID operators xid <> + xid and xid <> int4, for consistency with the corresponding equality operators (Michael Paquier) @@ -6463,9 +9686,9 @@ XXX this is pending backpatch, may need to remove --> Add jsonb_insert() - function to insert a new element into a jsonb array, - or a not-previously-existing key into a jsonb object + linkend="functions-json-processing-table">jsonb_insert() + function to insert a new element into a jsonb array, + or a not-previously-existing key into a jsonb object (Dmitry Dolgov) @@ -6476,9 +9699,9 @@ XXX this is pending backpatch, may need to remove 2016-05-05 [18a02ad2a] Fix corner-case loss of precision in numeric pow() calcu --> - Improve the accuracy of the ln(), log(), - exp(), and pow() functions for type - numeric (Dean Rasheed) + Improve the accuracy of the ln(), log(), + exp(), and pow() functions for type + numeric (Dean Rasheed) @@ -6488,8 +9711,8 @@ XXX this is pending backpatch, may need to remove --> Add a scale(numeric) - function to extract the display scale of a numeric value + linkend="functions-math-func-table">scale(numeric) + function to extract the display scale of a numeric value (Marko Tiikkaja) @@ -6504,8 +9727,8 @@ XXX this is pending backpatch, may need to remove For example, sind() - measures its argument in degrees, whereas sin() + linkend="functions-math-trig-table">sind() + measures its argument in degrees, whereas sin() measures in radians. These functions go to some lengths to deliver exact results for values where an exact result can be expected, for instance sind(30) = 0.5. @@ -6517,15 +9740,15 @@ XXX this is pending backpatch, may need to remove 2016-01-22 [fd5200c3d] Improve cross-platform consistency of Inf/NaN handling i --> - Ensure that trigonometric functions handle infinity - and NaN inputs per the POSIX standard + Ensure that trigonometric functions handle infinity + and NaN inputs per the POSIX standard (Dean Rasheed) - The POSIX standard says that these functions should - return NaN for NaN input, and should throw - an error for out-of-range inputs including infinity. + The POSIX standard says that these functions should + return NaN for NaN input, and should throw + an error for out-of-range inputs including infinity. Previously our behavior varied across platforms. @@ -6536,9 +9759,9 @@ XXX this is pending backpatch, may need to remove --> Make to_timestamp(float8) - convert float infinity to - timestamp infinity (Vitaly Burovoy) + linkend="functions-datetime-table">to_timestamp(float8) + convert float infinity to + timestamp infinity (Vitaly Burovoy) @@ -6552,15 +9775,15 @@ XXX this is pending backpatch, may need to remove 2016-05-05 [0b9a23443] Rename tsvector delete() to ts_delete(), and filter() to --> - Add new functions for tsvector data (Stas Kelvich) + Add new functions for tsvector data (Stas Kelvich) The new functions are ts_delete(), - ts_filter(), unnest(), - tsvector_to_array(), array_to_tsvector(), - and a variant of setweight() that sets the weight + linkend="textsearch-functions-table">ts_delete(), + ts_filter(), unnest(), + tsvector_to_array(), array_to_tsvector(), + and a variant of setweight() that sets the weight only for specified lexeme(s). @@ -6570,11 +9793,11 @@ XXX this is pending backpatch, may need to remove 2015-09-17 [9acb9007d] Fix oversight in tsearch type check --> - Allow ts_stat() - and tsvector_update_trigger() + Allow ts_stat() + and tsvector_update_trigger() to operate on values that are of types binary-compatible with the expected argument type, not just exactly that type; for example - allow citext where text is expected (Teodor + allow citext where text is expected (Teodor Sigaev) @@ -6585,14 +9808,14 @@ XXX this is pending backpatch, may need to remove --> Add variadic functions num_nulls() - and num_nonnulls() that count the number of their + linkend="functions-comparison-func-table">num_nulls() + and num_nonnulls() that count the number of their arguments that are null or non-null (Marko Tiikkaja) - An example usage is CHECK(num_nonnulls(a,b,c) = 1) - which asserts that exactly one of a,b,c is not NULL. + An example usage is CHECK(num_nonnulls(a,b,c) = 1) + which asserts that exactly one of a,b,c is not NULL. These functions can also be used to count the number of null or nonnull elements in an array. @@ -6604,8 +9827,8 @@ XXX this is pending backpatch, may need to remove --> Add function parse_ident() - to split a qualified, possibly quoted SQL identifier + linkend="functions-string-other">parse_ident() + to split a qualified, possibly quoted SQL identifier into its parts (Pavel Stehule) @@ -6616,15 +9839,15 @@ XXX this is pending backpatch, may need to remove --> In to_number(), - interpret a V format code as dividing by 10 to the - power of the number of digits following V (Bruce + linkend="functions-formatting-table">to_number(), + interpret a V format code as dividing by 10 to the + power of the number of digits following V (Bruce Momjian) This makes it operate in an inverse fashion to - to_char(). + to_char(). @@ -6634,8 +9857,8 @@ XXX this is pending backpatch, may need to remove --> Make the to_reg*() - functions accept type text not cstring + linkend="functions-info-catalog-table">to_reg*() + functions accept type text not cstring (Petr Korobeinikov) @@ -6651,16 +9874,16 @@ XXX this is pending backpatch, may need to remove --> Add pg_size_bytes() + linkend="functions-admin-dbsize">pg_size_bytes() function to convert human-readable size strings to numbers (Pavel Stehule, Vitaly Burovoy, Dean Rasheed) This function converts strings like those produced by - pg_size_pretty() into bytes. An example + pg_size_pretty() into bytes. An example usage is SELECT oid::regclass FROM pg_class WHERE - pg_total_relation_size(oid) > pg_size_bytes('10 GB'). + pg_total_relation_size(oid) > pg_size_bytes('10 GB'). @@ -6670,7 +9893,7 @@ XXX this is pending backpatch, may need to remove --> In pg_size_pretty(), + linkend="functions-admin-dbsize">pg_size_pretty(), format negative numbers similarly to positive ones (Adrian Vondendriesch) @@ -6686,14 +9909,14 @@ XXX this is pending backpatch, may need to remove 2015-07-02 [10fb48d66] Add an optional missing_ok argument to SQL function curr --> - Add an optional missing_ok argument to the current_setting() + Add an optional missing_ok argument to the current_setting() function (David Christensen) This allows avoiding an error for an unrecognized parameter - name, instead returning a NULL. + name, instead returning a NULL. @@ -6705,16 +9928,16 @@ XXX this is pending backpatch, may need to remove --> Change various catalog-inspection functions to return - NULL for invalid input (Michael Paquier) + NULL for invalid input (Michael Paquier) pg_get_viewdef() - now returns NULL if given an invalid view OID, - and several similar functions likewise return NULL for + linkend="functions-info-catalog-table">pg_get_viewdef() + now returns NULL if given an invalid view OID, + and several similar functions likewise return NULL for bad input. Previously, such cases usually led to cache - lookup failed errors, which are not meant to occur in + lookup failed errors, which are not meant to occur in user-facing cases. @@ -6725,13 +9948,13 @@ XXX this is pending backpatch, may need to remove --> Fix pg_replication_origin_xact_reset() + linkend="pg-replication-origin-xact-reset">pg_replication_origin_xact_reset() to not have any arguments (Fujii Masao) The documentation said that it has no arguments, and the C code did - not expect any arguments, but the entry in pg_proc + not expect any arguments, but the entry in pg_proc mistakenly specified two arguments. @@ -6751,7 +9974,7 @@ XXX this is pending backpatch, may need to remove --> In PL/pgSQL, detect mismatched - CONTINUE and EXIT statements while + CONTINUE and EXIT statements while compiling a function, rather than at execution time (Jim Nasby) @@ -6764,7 +9987,7 @@ XXX this is pending backpatch, may need to remove 2016-07-02 [3a4a33ad4] PL/Python: Report argument parsing errors using exceptio --> - Extend PL/Python's error-reporting and + Extend PL/Python's error-reporting and message-reporting functions to allow specifying additional message fields besides the primary error message (Pavel Stehule) @@ -6776,7 +9999,7 @@ XXX this is pending backpatch, may need to remove --> Allow PL/Python functions to call themselves recursively - via SPI, and fix the behavior when multiple + via SPI, and fix the behavior when multiple set-returning PL/Python functions are called within one query (Alexey Grishchenko, Tom Lane) @@ -6798,14 +10021,14 @@ XXX this is pending backpatch, may need to remove 2016-03-02 [e2609323e] Make PL/Tcl require Tcl 8.4 or later. --> - Modernize PL/Tcl to use Tcl's object - APIs instead of simple strings (Jim Nasby, Karl + Modernize PL/Tcl to use Tcl's object + APIs instead of simple strings (Jim Nasby, Karl Lehenbauer) This can improve performance substantially in some cases. - Note that PL/Tcl now requires Tcl 8.4 or later. + Note that PL/Tcl now requires Tcl 8.4 or later. @@ -6815,8 +10038,8 @@ XXX this is pending backpatch, may need to remove 2016-03-25 [cd37bb785] Improve PL/Tcl errorCode facility by providing decoded n --> - In PL/Tcl, make database-reported errors return - additional information in Tcl's errorCode global + In PL/Tcl, make database-reported errors return + additional information in Tcl's errorCode global variable (Jim Nasby, Tom Lane) @@ -6831,15 +10054,15 @@ XXX this is pending backpatch, may need to remove 2016-03-02 [c8c7c93de] Fix PL/Tcl's encoding conversion logic. --> - Fix PL/Tcl to perform encoding conversion between - the database encoding and UTF-8, which is what Tcl + Fix PL/Tcl to perform encoding conversion between + the database encoding and UTF-8, which is what Tcl expects (Tom Lane) Previously, strings were passed through without conversion, - leading to misbehavior with non-ASCII characters when - the database encoding was not UTF-8. + leading to misbehavior with non-ASCII characters when + the database encoding was not UTF-8. @@ -6858,7 +10081,7 @@ XXX this is pending backpatch, may need to remove --> Add a nonlocalized version of - the severity field in + the severity field in error and notice messages (Tom Lane) @@ -6875,17 +10098,17 @@ XXX this is pending backpatch, may need to remove This commit is also listed under psql and PL/pgSQL --> - Introduce a feature in libpq whereby the - CONTEXT field of messages can be suppressed, either + Introduce a feature in libpq whereby the + CONTEXT field of messages can be suppressed, either always or only for non-error messages (Pavel Stehule) The default behavior of PQerrorMessage() - is now to print CONTEXT + linkend="libpq-pqerrormessage">PQerrorMessage() + is now to print CONTEXT only for errors. The new function PQsetErrorContextVisibility() + linkend="libpq-pqseterrorcontextvisibility">PQsetErrorContextVisibility() can be used to adjust this. @@ -6895,14 +10118,14 @@ This commit is also listed under psql and PL/pgSQL 2016-04-03 [e3161b231] Add libpq support for recreating an error message with d --> - Add support in libpq for regenerating an error + Add support in libpq for regenerating an error message with a different verbosity level (Alex Shulgin) This is done with the new function PQresultVerboseErrorMessage(). - This supports psql's new \errverbose + linkend="libpq-pqresultverboseerrormessage">PQresultVerboseErrorMessage(). + This supports psql's new \errverbose feature, and may be useful for other clients as well. @@ -6912,13 +10135,13 @@ This commit is also listed under psql and PL/pgSQL 2015-11-27 [40cb21f70] Improve PQhost() to return useful data for default Unix- --> - Improve libpq's PQhost() function to return + Improve libpq's PQhost() function to return useful data for default Unix-socket connections (Tom Lane) - Previously it would return NULL if no explicit host + Previously it would return NULL if no explicit host specification had been given; now it returns the default socket directory path. @@ -6929,7 +10152,7 @@ This commit is also listed under psql and PL/pgSQL 2016-02-16 [fc1ae7d2e] Change ecpg lexer to accept comments with line breaks in --> - Fix ecpg's lexer to handle line breaks within + Fix ecpg's lexer to handle line breaks within comments starting on preprocessor directive lines (Michael Meskes) @@ -6948,9 +10171,9 @@ This commit is also listed under psql and PL/pgSQL 2015-09-14 [d02426029] Check existency of table/schema for -t/-n option (pg_dum --> - Add a @@ -6970,7 +10193,7 @@ This commit is also listed under psql and PL/pgSQL 2016-05-06 [e1b120a8c] Only issue LOCK TABLE commands when necessary --> - In pg_dump, dump locally-made changes of privilege + In pg_dump, dump locally-made changes of privilege assignments for system objects (Stephen Frost) @@ -6978,7 +10201,7 @@ This commit is also listed under psql and PL/pgSQL While it has always been possible for a superuser to change the privilege assignments for built-in or extension-created objects, such changes were formerly lost in a dump and reload. - Now, pg_dump recognizes and dumps such changes. + Now, pg_dump recognizes and dumps such changes. (This works only when dumping from a 9.6 or later server, however.) @@ -6988,7 +10211,7 @@ This commit is also listed under psql and PL/pgSQL 2016-09-08 [31eb14504] Allow pg_dump to dump non-extension members of an extens --> - Allow pg_dump to dump non-extension-owned objects + Allow pg_dump to dump non-extension-owned objects that are within an extension-owned schema (Martín Marqués) @@ -7004,7 +10227,7 @@ This commit is also listed under psql and PL/pgSQL 2016-04-06 [3b3fcc4ee] pg_dump: Add table qualifications to some tags --> - In pg_dump output, include the table name in object + In pg_dump output, include the table name in object tags for object types that are only uniquely named per-table (for example, triggers) (Peter Eisentraut) @@ -7013,7 +10236,7 @@ This commit is also listed under psql and PL/pgSQL - <xref linkend="APP-PSQL"> + <xref linkend="app-psql"/> @@ -7029,7 +10252,7 @@ this commit is also listed in the compatibility section The specified operations are carried out in the order in which the - options are given, and then psql terminates. + options are given, and then psql terminates. @@ -7038,7 +10261,7 @@ this commit is also listed in the compatibility section 2016-04-08 [c09b18f21] Support \crosstabview in psql --> - Add a \crosstabview command that prints the results of + Add a \crosstabview command that prints the results of a query in a cross-tabulated display (Daniel Vérité) @@ -7054,13 +10277,13 @@ this commit is also listed in the compatibility section 2016-04-03 [3cc38ca7d] Add psql \errverbose command to see last server error at --> - Add an \errverbose command that shows the last server + Add an \errverbose command that shows the last server error at full verbosity (Alex Shulgin) This is useful after getting an unexpected error — you - no longer need to adjust the VERBOSITY variable and + no longer need to adjust the VERBOSITY variable and recreate the failure in order to see error fields that are not shown by default. @@ -7072,13 +10295,13 @@ this commit is also listed in the compatibility section 2016-05-06 [9b66aa006] Fix psql's \ev and \sv commands so that they handle view --> - Add \ev and \sv commands for editing and + Add \ev and \sv commands for editing and showing view definitions (Petr Korobeinikov) - These are parallel to the existing \ef and - \sf commands for functions. + These are parallel to the existing \ef and + \sf commands for functions. @@ -7087,7 +10310,7 @@ this commit is also listed in the compatibility section 2016-04-04 [2bbe9112a] Add a \gexec command to psql for evaluation of computed --> - Add a \gexec command that executes a query and + Add a \gexec command that executes a query and re-submits the result(s) as new queries (Corey Huinker) @@ -7097,9 +10320,9 @@ this commit is also listed in the compatibility section 2015-10-05 [2145a7660] psql: allow \pset C in setting the title, matches \C --> - Allow \pset C string + Allow \pset C string to set the table title, for consistency with \C - string (Bruce Momjian) + string (Bruce Momjian) @@ -7108,7 +10331,7 @@ this commit is also listed in the compatibility section 2016-03-11 [69ab7b9d6] psql: Don't automatically use expanded format when there --> - In \pset expanded auto mode, do not use expanded + In \pset expanded auto mode, do not use expanded format for query results with only one column (Andreas Karlsson, Robert Haas) @@ -7120,16 +10343,16 @@ this commit is also listed in the compatibility section 2016-06-15 [9901d8ac2] Use strftime("%c") to format timestamps in psql's \watch --> - Improve the headers output by the \watch command + Improve the headers output by the \watch command (Michael Paquier, Tom Lane) - Include the \pset title string if one has + Include the \pset title string if one has been set, and shorten the prefabricated part of the - header to be timestamp (every - Ns). Also, the timestamp format now - obeys psql's locale environment. + header to be timestamp (every + Ns). Also, the timestamp format now + obeys psql's locale environment. @@ -7177,7 +10400,7 @@ this commit is also listed in the compatibility section 2015-07-07 [275f05c99] Add psql PROMPT variable showing the pid of the connecte --> - Add a PROMPT option %p to insert the + Add a PROMPT option %p to insert the process ID of the connected backend (Julien Rouhaud) @@ -7188,13 +10411,13 @@ this commit is also listed in the compatibility section This commit is also listed under libpq and PL/pgSQL --> - Introduce a feature whereby the CONTEXT field of + Introduce a feature whereby the CONTEXT field of messages can be suppressed, either always or only for non-error messages (Pavel Stehule) - Printing CONTEXT only for errors is now the default + Printing CONTEXT only for errors is now the default behavior. This can be changed by setting the special variable SHOW_CONTEXT. @@ -7205,7 +10428,7 @@ This commit is also listed under libpq and PL/pgSQL 2016-07-11 [a670c24c3] Improve output of psql's \df+ command. --> - Make \df+ show function access privileges and + Make \df+ show function access privileges and parallel-safety attributes (Michael Paquier) @@ -7215,7 +10438,7 @@ This commit is also listed under libpq and PL/pgSQL - <xref linkend="pgbench"> + <xref linkend="pgbench"/> @@ -7224,7 +10447,7 @@ This commit is also listed under libpq and PL/pgSQL 2016-03-20 [68ab8e8ba] SQL commands in pgbench scripts are now ended by semicol --> - SQL commands in pgbench scripts are now ended by + SQL commands in pgbench scripts are now ended by semicolons, not newlines (Kyotaro Horiguchi, Tom Lane) @@ -7233,7 +10456,7 @@ This commit is also listed under libpq and PL/pgSQL Existing custom scripts will need to be modified to add a semicolon at the end of each line that does not have one already. (Doing so does not break the script for use with older versions - of pgbench.) + of pgbench.) @@ -7246,7 +10469,7 @@ This commit is also listed under libpq and PL/pgSQL --> Support floating-point arithmetic, as well as some built-in functions, in + linkend="pgbench-builtin-functions">built-in functions, in expressions in backslash commands (Fabien Coelho) @@ -7256,18 +10479,18 @@ This commit is also listed under libpq and PL/pgSQL 2016-03-29 [ad9566470] pgbench: Remove \setrandom. --> - Replace \setrandom with built-in functions (Fabien + Replace \setrandom with built-in functions (Fabien Coelho) The new built-in functions include random(), - random_exponential(), and - random_gaussian(), which perform the same work as - \setrandom, but are easier to use since they can be + linkend="pgbench-functions">random(), + random_exponential(), and + random_gaussian(), which perform the same work as + \setrandom, but are easier to use since they can be embedded in larger expressions. Since these additions have made - \setrandom obsolete, remove it. + \setrandom obsolete, remove it. @@ -7282,8 +10505,8 @@ This commit is also listed under libpq and PL/pgSQL - This is done with the new switch, which works + similarly to for custom scripts. @@ -7298,7 +10521,7 @@ This commit is also listed under libpq and PL/pgSQL - When multiple scripts are specified, each pgbench + When multiple scripts are specified, each pgbench transaction randomly chooses one to execute. Formerly this was always done with uniform probability, but now different selection probabilities can be specified for different scripts. @@ -7325,7 +10548,7 @@ This commit is also listed under libpq and PL/pgSQL 2015-09-16 [1def9063c] pgbench progress with timestamp --> - Add a option to report progress with Unix epoch timestamps, instead of time since the run started (Fabien Coelho) @@ -7336,8 +10559,8 @@ This commit is also listed under libpq and PL/pgSQL 2015-07-03 [ba3deeefb] Lift the limitation that # of clients must be a multiple --> - Allow the number of client connections () to not + be an exact multiple of the number of threads () (Fabien Coelho) @@ -7347,13 +10570,13 @@ This commit is also listed under libpq and PL/pgSQL 2016-03-09 [accf7616f] pgbench: When -T is used, don't wait for transactions be --> - When the option is used, stop promptly at the end of the specified time (Fabien Coelho) Previously, specifying a low transaction rate could cause - pgbench to wait significantly longer than + pgbench to wait significantly longer than specified. @@ -7374,15 +10597,15 @@ This commit is also listed under libpq and PL/pgSQL 2015-12-17 [66d947b9d] Adjust behavior of single-user -j mode for better initdb --> - Improve error reporting during initdb's + Improve error reporting during initdb's post-bootstrap phase (Tom Lane) Previously, an error here led to reporting the entire input - file as the failing query; now just the current + file as the failing query; now just the current query is reported. To get the desired behavior, queries in - initdb's input files must be separated by blank + initdb's input files must be separated by blank lines. @@ -7393,7 +10616,7 @@ This commit is also listed under libpq and PL/pgSQL 2016-08-30 [d9720e437] Fix initdb misbehavior when user mis-enters superuser pa --> - Speed up initdb by using just one + Speed up initdb by using just one standalone-backend session for all the post-bootstrap steps (Tom Lane) @@ -7404,7 +10627,7 @@ This commit is also listed under libpq and PL/pgSQL 2015-12-01 [e50cda784] Use pg_rewind when target timeline was switched --> - Improve pg_rewind + Improve pg_rewind so that it can work when the target timeline changes (Alexander Korotkov) @@ -7430,7 +10653,7 @@ This commit is also listed under libpq and PL/pgSQL --> Remove obsolete - heap_formtuple/heap_modifytuple/heap_deformtuple + heap_formtuple/heap_modifytuple/heap_deformtuple functions (Peter Geoghegan) @@ -7440,16 +10663,16 @@ This commit is also listed under libpq and PL/pgSQL 2016-08-27 [b9fe6cbc8] Add macros to make AllocSetContextCreate() calls simpler --> - Add macros to make AllocSetContextCreate() calls simpler + Add macros to make AllocSetContextCreate() calls simpler and safer (Tom Lane) Writing out the individual sizing parameters for a memory context is now deprecated in favor of using one of the new - macros ALLOCSET_DEFAULT_SIZES, - ALLOCSET_SMALL_SIZES, - or ALLOCSET_START_SMALL_SIZES. + macros ALLOCSET_DEFAULT_SIZES, + ALLOCSET_SMALL_SIZES, + or ALLOCSET_START_SMALL_SIZES. Existing code continues to work, however. @@ -7459,7 +10682,7 @@ This commit is also listed under libpq and PL/pgSQL 2015-08-05 [de6fd1c89] Rely on inline functions even if that causes warnings in --> - Unconditionally use static inline functions in header + Unconditionally use static inline functions in header files (Andres Freund) @@ -7480,7 +10703,7 @@ This commit is also listed under libpq and PL/pgSQL 2016-05-06 [6bd356c33] Add TAP tests for pg_dump --> - Improve TAP testing infrastructure (Michael + Improve TAP testing infrastructure (Michael Paquier, Craig Ringer, Álvaro Herrera, Stephen Frost) @@ -7495,7 +10718,7 @@ This commit is also listed under libpq and PL/pgSQL 2015-09-11 [aa65de042] When trace_lwlocks is used, identify individual lwlocks --> - Make trace_lwlocks identify individual locks by name + Make trace_lwlocks identify individual locks by name (Robert Haas) @@ -7507,7 +10730,7 @@ This commit is also listed under libpq and PL/pgSQL 2016-01-05 [4f18010af] Convert psql's tab completion for backslash commands to --> - Improve psql's tab-completion code infrastructure + Improve psql's tab-completion code infrastructure (Thomas Munro, Michael Paquier) @@ -7522,7 +10745,7 @@ This commit is also listed under libpq and PL/pgSQL 2016-01-05 [efa318bcf] Make pg_shseclabel available in early backend startup --> - Nail the pg_shseclabel system catalog into cache, + Nail the pg_shseclabel system catalog into cache, so that it is available for access during connection authentication (Adam Brightwell) @@ -7541,21 +10764,21 @@ This commit is also listed under libpq and PL/pgSQL --> Restructure index access - method API to hide most of it at - the C level (Alexander Korotkov, Andrew Gierth) + method API to hide most of it at + the C level (Alexander Korotkov, Andrew Gierth) - This change modernizes the index AM API to look more + This change modernizes the index AM API to look more like the designs we have adopted for foreign data wrappers and - tablesample handlers. This simplifies the C code + tablesample handlers. This simplifies the C code and makes it much more practical to define index access methods in installable extensions. A consequence is that most of the columns - of the pg_am system catalog have disappeared. + of the pg_am system catalog have disappeared. New inspection functions have been added to allow SQL queries to determine index AM properties that used to be discoverable - from pg_am. + from pg_am. @@ -7565,14 +10788,14 @@ This commit is also listed under libpq and PL/pgSQL --> Add pg_init_privs + linkend="catalog-pg-init-privs">pg_init_privs system catalog to hold original privileges - of initdb-created and extension-created objects + of initdb-created and extension-created objects (Stephen Frost) - This infrastructure allows pg_dump to dump changes + This infrastructure allows pg_dump to dump changes that an installation may have made in privileges attached to system objects. Formerly, such changes would be lost in a dump and reload, but now they are preserved. @@ -7584,14 +10807,14 @@ This commit is also listed under libpq and PL/pgSQL 2016-02-04 [c1772ad92] Change the way that LWLocks for extensions are allocated --> - Change the way that extensions allocate custom LWLocks + Change the way that extensions allocate custom LWLocks (Amit Kapila, Robert Haas) - The RequestAddinLWLocks() function is removed, - and replaced by RequestNamedLWLockTranche(). - This allows better identification of custom LWLocks, + The RequestAddinLWLocks() function is removed, + and replaced by RequestNamedLWLockTranche(). + This allows better identification of custom LWLocks, and is less error-prone. @@ -7615,7 +10838,7 @@ This commit is also listed under libpq and PL/pgSQL - This change allows FDWs or custom scan providers + This change allows FDWs or custom scan providers to store data in a plan tree in a more convenient format than was previously possible. @@ -7632,7 +10855,7 @@ This commit is also listed under libpq and PL/pgSQL --> Make the planner deal with post-scan/join query steps by generating - and comparing Paths, replacing a lot of ad-hoc logic + and comparing Paths, replacing a lot of ad-hoc logic (Tom Lane) @@ -7682,7 +10905,7 @@ This commit is also listed under libpq and PL/pgSQL 2016-03-24 [c1156411a] Move psql's psqlscan.l into src/fe_utils. --> - Separate out psql's flex lexer to + Separate out psql's flex lexer to make it usable by other client programs (Tom Lane, Kyotaro Horiguchi) @@ -7691,12 +10914,12 @@ This commit is also listed under libpq and PL/pgSQL This eliminates code duplication for programs that need to be able to parse SQL commands well enough to identify command boundaries. Doing that in full generality is more painful than one could - wish, and up to now only psql has really gotten + wish, and up to now only psql has really gotten it right among our supported client programs. - A new source-code subdirectory src/fe_utils/ has + A new source-code subdirectory src/fe_utils/ has been created to hold this and other code that is shared across our client programs. Formerly such sharing was accomplished by symbolic linking or copying source files at build time, which @@ -7709,7 +10932,7 @@ This commit is also listed under libpq and PL/pgSQL 2016-03-21 [98a64d0bd] Introduce WaitEventSet API. --> - Introduce WaitEventSet API to allow + Introduce WaitEventSet API to allow efficient waiting for event sets that usually do not change from one wait to the next (Andres Freund, Amit Kapila) @@ -7720,16 +10943,16 @@ This commit is also listed under libpq and PL/pgSQL 2016-04-01 [65578341a] Add Generic WAL interface --> - Add a generic interface for writing WAL records + Add a generic interface for writing WAL records (Alexander Korotkov, Petr Jelínek, Markus Nullmeier) - This change allows extensions to write WAL records for + This change allows extensions to write WAL records for changes to pages using a standard layout. The problem of needing to - replay WAL without access to the extension is solved by + replay WAL without access to the extension is solved by having generic replay code. This allows extensions to implement, - for example, index access methods and have WAL + for example, index access methods and have WAL support for them. @@ -7739,13 +10962,13 @@ This commit is also listed under libpq and PL/pgSQL 2016-04-06 [3fe3511d0] Generic Messages for Logical Decoding --> - Support generic WAL messages for logical decoding + Support generic WAL messages for logical decoding (Petr Jelínek, Andres Freund) This feature allows extensions to insert data into the - WAL stream that can be read by logical-decoding + WAL stream that can be read by logical-decoding plugins, but is not connected to physical data restoration. @@ -7757,12 +10980,12 @@ This commit is also listed under libpq and PL/pgSQL --> Allow SP-GiST operator classes to store an arbitrary - traversal value while descending the index (Alexander + traversal value while descending the index (Alexander Lebedev, Teodor Sigaev) - This is somewhat like the reconstructed value, but it + This is somewhat like the reconstructed value, but it could be any arbitrary chunk of data, not necessarily of the same data type as the indexed column. @@ -7773,12 +10996,12 @@ This commit is also listed under libpq and PL/pgSQL 2016-04-04 [66229ac00] Introduce a LOG_SERVER_ONLY ereport level, which is neve --> - Introduce a LOG_SERVER_ONLY message level for - ereport() (David Steele) + Introduce a LOG_SERVER_ONLY message level for + ereport() (David Steele) - This level acts like LOG except that the message is + This level acts like LOG except that the message is never sent to the client. It is meant for use in auditing and similar applications. @@ -7789,14 +11012,14 @@ This commit is also listed under libpq and PL/pgSQL 2016-07-01 [548af97fc] Provide and use a makefile target to build all generated --> - Provide a Makefile target to build all generated + Provide a Makefile target to build all generated headers (Michael Paquier, Tom Lane) - submake-generated-headers can now be invoked to ensure + submake-generated-headers can now be invoked to ensure that generated backend header files are up-to-date. This is - useful in subdirectories that might be built standalone. + useful in subdirectories that might be built standalone. @@ -7825,8 +11048,8 @@ This commit is also listed under libpq and PL/pgSQL 2016-03-13 [7a8d87483] Rename auto_explain.sample_ratio to sample_rate --> - Add configuration parameter auto_explain.sample_rate to - allow contrib/auto_explain + Add configuration parameter auto_explain.sample_rate to + allow contrib/auto_explain to capture just a configurable fraction of all queries (Craig Ringer, Julien Rouhaud) @@ -7842,7 +11065,7 @@ This commit is also listed under libpq and PL/pgSQL 2016-04-01 [9ee014fc8] Bloom index contrib module --> - Add contrib/bloom module that + Add contrib/bloom module that implements an index access method based on Bloom filtering (Teodor Sigaev, Alexander Korotkov) @@ -7860,7 +11083,7 @@ This commit is also listed under libpq and PL/pgSQL 2015-12-28 [81ee726d8] Code and docs review for cube kNN support. --> - In contrib/cube, introduce + In contrib/cube, introduce distance operators for cubes, and support kNN-style searches in GiST indexes on cube columns (Stas Kelvich) @@ -7871,19 +11094,19 @@ This commit is also listed under libpq and PL/pgSQL 2016-02-03 [41d2c081c] Make hstore_to_jsonb_loose match hstore_to_json_loose on --> - Make contrib/hstore's hstore_to_jsonb_loose() - and hstore_to_json_loose() functions agree on what + Make contrib/hstore's hstore_to_jsonb_loose() + and hstore_to_json_loose() functions agree on what is a number (Tom Lane) - Previously, hstore_to_jsonb_loose() would convert - numeric-looking strings to JSON numbers, rather than - strings, even if they did not exactly match the JSON + Previously, hstore_to_jsonb_loose() would convert + numeric-looking strings to JSON numbers, rather than + strings, even if they did not exactly match the JSON syntax specification for numbers. This was inconsistent with - hstore_to_json_loose(), so tighten the test to match - the JSON syntax. + hstore_to_json_loose(), so tighten the test to match + the JSON syntax. @@ -7893,7 +11116,7 @@ This commit is also listed under libpq and PL/pgSQL --> Add selectivity estimation functions for - contrib/intarray operators + contrib/intarray operators to improve plans for queries using those operators (Yury Zhuravlev, Alexander Korotkov) @@ -7905,10 +11128,10 @@ This commit is also listed under libpq and PL/pgSQL --> Make contrib/pageinspect's - heap_page_items() function show the raw data in each - tuple, and add new functions tuple_data_split() and - heap_page_item_attrs() for inspection of individual + linkend="pageinspect">contrib/pageinspect's + heap_page_items() function show the raw data in each + tuple, and add new functions tuple_data_split() and + heap_page_item_attrs() for inspection of individual tuple fields (Nikolay Shaplov) @@ -7918,9 +11141,9 @@ This commit is also listed under libpq and PL/pgSQL 2016-03-09 [188f359d3] pgcrypto: support changing S2K iteration count --> - Add an optional S2K iteration count parameter to - contrib/pgcrypto's - pgp_sym_encrypt() function (Jeff Janes) + Add an optional S2K iteration count parameter to + contrib/pgcrypto's + pgp_sym_encrypt() function (Jeff Janes) @@ -7929,8 +11152,8 @@ This commit is also listed under libpq and PL/pgSQL 2016-03-16 [f576b17cd] Add word_similarity to pg_trgm contrib module. --> - Add support for word similarity to - contrib/pg_trgm + Add support for word similarity to + contrib/pg_trgm (Alexander Korotkov, Artur Zakirov) @@ -7947,14 +11170,14 @@ This commit is also listed under libpq and PL/pgSQL --> Add configuration parameter - pg_trgm.similarity_threshold for - contrib/pg_trgm's similarity threshold (Artur Zakirov) + pg_trgm.similarity_threshold for + contrib/pg_trgm's similarity threshold (Artur Zakirov) This threshold has always been configurable, but formerly it was - controlled by special-purpose functions set_limit() - and show_limit(). Those are now deprecated. + controlled by special-purpose functions set_limit() + and show_limit(). Those are now deprecated. @@ -7963,7 +11186,7 @@ This commit is also listed under libpq and PL/pgSQL 2015-07-20 [97f301464] This supports the triconsistent function for pg_trgm GIN --> - Improve contrib/pg_trgm's GIN operator class to + Improve contrib/pg_trgm's GIN operator class to speed up index searches in which both common and rare keys appear (Jeff Janes) @@ -7975,7 +11198,7 @@ This commit is also listed under libpq and PL/pgSQL --> Improve performance of similarity searches in - contrib/pg_trgm GIN indexes (Christophe Fornaroli) + contrib/pg_trgm GIN indexes (Christophe Fornaroli) @@ -7986,7 +11209,7 @@ This commit is also listed under libpq and PL/pgSQL --> Add contrib/pg_visibility module + linkend="pgvisibility">contrib/pg_visibility module to allow examining table visibility maps (Robert Haas) @@ -7996,9 +11219,9 @@ This commit is also listed under libpq and PL/pgSQL 2015-09-07 [49124613f] contrib/sslinfo: add ssl_extension_info SRF --> - Add ssl_extension_info() - function to contrib/sslinfo, to print information - about SSL extensions present in the X509 + Add ssl_extension_info() + function to contrib/sslinfo, to print information + about SSL extensions present in the X509 certificate used for the current connection (Dmitry Voronin) @@ -8006,7 +11229,7 @@ This commit is also listed under libpq and PL/pgSQL - <link linkend="postgres-fdw"><filename>postgres_fdw</></> + <link linkend="postgres-fdw"><filename>postgres_fdw</filename></link> @@ -8053,12 +11276,12 @@ This commit is also listed under libpq and PL/pgSQL 2016-03-18 [0bf3ae88a] Directly modify foreign tables. --> - When feasible, perform UPDATE or DELETE + When feasible, perform UPDATE or DELETE entirely on the remote server (Etsuro Fujita) - Formerly, remote updates involved sending a SELECT FOR UPDATE + Formerly, remote updates involved sending a SELECT FOR UPDATE command and then updating or deleting the selected rows one-by-one. While that is still necessary if the operation requires any local processing, it can now be done remotely if all elements of the @@ -8076,7 +11299,7 @@ This commit is also listed under libpq and PL/pgSQL - Formerly, postgres_fdw always fetched 100 rows at + Formerly, postgres_fdw always fetched 100 rows at a time from remote queries; now that behavior is configurable. diff --git a/doc/src/sgml/release-old.sgml b/doc/src/sgml/release-old.sgml index d4de6b1357..d55209d85b 100644 --- a/doc/src/sgml/release-old.sgml +++ b/doc/src/sgml/release-old.sgml @@ -15,7 +15,7 @@ - This is expected to be the last PostgreSQL release + This is expected to be the last PostgreSQL release in the 7.3.X series. Users are encouraged to update to a newer release branch soon. @@ -26,7 +26,7 @@ A dump/restore is not required for those running 7.3.X. However, if you are upgrading from a version earlier than 7.3.13, - see . + see . @@ -39,7 +39,7 @@ Prevent functions in indexes from executing with the privileges of - the user running VACUUM, ANALYZE, etc (Tom) + the user running VACUUM, ANALYZE, etc (Tom) @@ -50,60 +50,60 @@ (Note that triggers, defaults, check constraints, etc. pose the same type of risk.) But functions in indexes pose extra danger because they will be executed by routine maintenance operations - such as VACUUM FULL, which are commonly performed + such as VACUUM FULL, which are commonly performed automatically under a superuser account. For example, a nefarious user can execute code with superuser privileges by setting up a trojan-horse index definition and waiting for the next routine vacuum. The fix arranges for standard maintenance operations - (including VACUUM, ANALYZE, REINDEX, - and CLUSTER) to execute as the table owner rather than + (including VACUUM, ANALYZE, REINDEX, + and CLUSTER) to execute as the table owner rather than the calling user, using the same privilege-switching mechanism already - used for SECURITY DEFINER functions. To prevent bypassing + used for SECURITY DEFINER functions. To prevent bypassing this security measure, execution of SET SESSION - AUTHORIZATION and SET ROLE is now forbidden within a - SECURITY DEFINER context. (CVE-2007-6600) + AUTHORIZATION and SET ROLE is now forbidden within a + SECURITY DEFINER context. (CVE-2007-6600) - Require non-superusers who use /contrib/dblink to use only + Require non-superusers who use /contrib/dblink to use only password authentication, as a security measure (Joe) The fix that appeared for this in 7.3.20 was incomplete, as it plugged - the hole for only some dblink functions. (CVE-2007-6601, + the hole for only some dblink functions. (CVE-2007-6601, CVE-2007-3278) - Fix potential crash in translate() when using a multibyte + Fix potential crash in translate() when using a multibyte database encoding (Tom) - Make contrib/tablefunc's crosstab() handle + Make contrib/tablefunc's crosstab() handle NULL rowid as a category in its own right, rather than crashing (Joe) - Require a specific version of Autoconf to be used - when re-generating the configure script (Peter) + Require a specific version of Autoconf to be used + when re-generating the configure script (Peter) This affects developers and packagers only. The change was made to prevent accidental use of untested combinations of - Autoconf and PostgreSQL versions. + Autoconf and PostgreSQL versions. You can remove the version check if you really want to use a - different Autoconf version, but it's + different Autoconf version, but it's your responsibility whether the result works or not. @@ -131,7 +131,7 @@ A dump/restore is not required for those running 7.3.X. However, if you are upgrading from a version earlier than 7.3.13, - see . + see . @@ -144,27 +144,27 @@ Prevent index corruption when a transaction inserts rows and - then aborts close to the end of a concurrent VACUUM + then aborts close to the end of a concurrent VACUUM on the same table (Tom) - Make CREATE DOMAIN ... DEFAULT NULL work properly (Tom) + Make CREATE DOMAIN ... DEFAULT NULL work properly (Tom) - Fix crash when log_min_error_statement logging runs out + Fix crash when log_min_error_statement logging runs out of memory (Tom) - Require non-superusers who use /contrib/dblink to use only + Require non-superusers who use /contrib/dblink to use only password authentication, as a security measure (Joe) @@ -193,7 +193,7 @@ A dump/restore is not required for those running 7.3.X. However, if you are upgrading from a version earlier than 7.3.13, - see . + see . @@ -206,22 +206,22 @@ Support explicit placement of the temporary-table schema within - search_path, and disable searching it for functions + search_path, and disable searching it for functions and operators (Tom) This is needed to allow a security-definer function to set a - truly secure value of search_path. Without it, + truly secure value of search_path. Without it, an unprivileged SQL user can use temporary objects to execute code with the privileges of the security-definer function (CVE-2007-2138). - See CREATE FUNCTION for more information. + See CREATE FUNCTION for more information. - Fix potential-data-corruption bug in how VACUUM FULL handles - UPDATE chains (Tom, Pavan Deolasee) + Fix potential-data-corruption bug in how VACUUM FULL handles + UPDATE chains (Tom, Pavan Deolasee) @@ -249,7 +249,7 @@ A dump/restore is not required for those running 7.3.X. However, if you are upgrading from a version earlier than 7.3.13, - see . + see . @@ -310,7 +310,7 @@ A dump/restore is not required for those running 7.3.X. However, if you are upgrading from a version earlier than 7.3.13, - see . + see . @@ -322,13 +322,13 @@ - to_number() and to_char(numeric) - are now STABLE, not IMMUTABLE, for - new initdb installs (Tom) + to_number() and to_char(numeric) + are now STABLE, not IMMUTABLE, for + new initdb installs (Tom) - This is because lc_numeric can potentially + This is because lc_numeric can potentially change the output of these functions. @@ -339,7 +339,7 @@ - This improves psql \d performance also. + This improves psql \d performance also. @@ -366,7 +366,7 @@ A dump/restore is not required for those running 7.3.X. However, if you are upgrading from a version earlier than 7.3.13, - see . + see . @@ -376,7 +376,7 @@ Fix corner cases in pattern matching for - psql's \d commands + psql's \d commands Fix index-corrupting bugs in /contrib/ltree (Teodor) Back-port 7.4 spinlock code to improve performance and support @@ -409,7 +409,7 @@ A dump/restore is not required for those running 7.3.X. However, if you are upgrading from a version earlier than 7.3.13, - see . + see . @@ -419,9 +419,9 @@ into SQL commands, you should examine them as soon as possible to ensure that they are using recommended escaping techniques. In most cases, applications should be using subroutines provided by - libraries or drivers (such as libpq's - PQescapeStringConn()) to perform string escaping, - rather than relying on ad hoc code to do it. + libraries or drivers (such as libpq's + PQescapeStringConn()) to perform string escaping, + rather than relying on ad hoc code to do it. @@ -431,46 +431,46 @@ Change the server to reject invalidly-encoded multibyte characters in all cases (Tatsuo, Tom) -While PostgreSQL has been moving in this direction for +While PostgreSQL has been moving in this direction for some time, the checks are now applied uniformly to all encodings and all textual input, and are now always errors not merely warnings. This change defends against SQL-injection attacks of the type described in CVE-2006-2313. -Reject unsafe uses of \' in string literals +Reject unsafe uses of \' in string literals As a server-side defense against SQL-injection attacks of the type -described in CVE-2006-2314, the server now only accepts '' and not -\' as a representation of ASCII single quote in SQL string -literals. By default, \' is rejected only when -client_encoding is set to a client-only encoding (SJIS, BIG5, GBK, +described in CVE-2006-2314, the server now only accepts '' and not +\' as a representation of ASCII single quote in SQL string +literals. By default, \' is rejected only when +client_encoding is set to a client-only encoding (SJIS, BIG5, GBK, GB18030, or UHC), which is the scenario in which SQL injection is possible. -A new configuration parameter backslash_quote is available to +A new configuration parameter backslash_quote is available to adjust this behavior when needed. Note that full security against CVE-2006-2314 might require client-side changes; the purpose of -backslash_quote is in part to make it obvious that insecure +backslash_quote is in part to make it obvious that insecure clients are insecure. -Modify libpq's string-escaping routines to be +Modify libpq's string-escaping routines to be aware of encoding considerations -This fixes libpq-using applications for the security +This fixes libpq-using applications for the security issues described in CVE-2006-2313 and CVE-2006-2314. -Applications that use multiple PostgreSQL connections -concurrently should migrate to PQescapeStringConn() and -PQescapeByteaConn() to ensure that escaping is done correctly +Applications that use multiple PostgreSQL connections +concurrently should migrate to PQescapeStringConn() and +PQescapeByteaConn() to ensure that escaping is done correctly for the settings in use in each database connection. Applications that -do string escaping by hand should be modified to rely on library +do string escaping by hand should be modified to rely on library routines instead. Fix some incorrect encoding conversion functions -win1251_to_iso, alt_to_iso, -euc_tw_to_big5, euc_tw_to_mic, -mic_to_euc_tw were all broken to varying +win1251_to_iso, alt_to_iso, +euc_tw_to_big5, euc_tw_to_mic, +mic_to_euc_tw were all broken to varying extents. -Clean up stray remaining uses of \' in strings +Clean up stray remaining uses of \' in strings (Bruce, Jan) Fix server to use custom DH SSL parameters correctly (Michael @@ -500,7 +500,7 @@ Fuhr) A dump/restore is not required for those running 7.3.X. However, if you are upgrading from a version earlier than 7.3.13, - see . + see . @@ -510,7 +510,7 @@ Fuhr) Fix potential crash in SET -SESSION AUTHORIZATION (CVE-2006-0553) +SESSION AUTHORIZATION (CVE-2006-0553) An unprivileged user could crash the server process, resulting in momentary denial of service to other users, if the server has been compiled with Asserts enabled (which is not the default). @@ -525,14 +525,14 @@ created in 7.3.11 release. Fix race condition that could lead to file already -exists errors during pg_clog file creation +exists errors during pg_clog file creation (Tom) Fix to allow restoring dumps that have cross-schema references to custom operators (Tom) -Portability fix for testing presence of finite -and isinf during configure (Tom) +Portability fix for testing presence of finite +and isinf during configure (Tom) @@ -557,10 +557,10 @@ and isinf during configure (Tom) A dump/restore is not required for those running 7.3.X. However, if you are upgrading from a version earlier than 7.3.10, - see . - Also, you might need to REINDEX indexes on textual + see . + Also, you might need to REINDEX indexes on textual columns after updating, if you are affected by the locale or - plperl issues described below. + plperl issues described below. @@ -571,28 +571,28 @@ and isinf during configure (Tom) Fix character string comparison for locales that consider different character combinations as equal, such as Hungarian (Tom) -This might require REINDEX to fix existing indexes on +This might require REINDEX to fix existing indexes on textual columns. Set locale environment variables during postmaster startup -to ensure that plperl won't change the locale later -This fixes a problem that occurred if the postmaster was +to ensure that plperl won't change the locale later +This fixes a problem that occurred if the postmaster was started with environment variables specifying a different locale than what -initdb had been told. Under these conditions, any use of -plperl was likely to lead to corrupt indexes. You might need -REINDEX to fix existing indexes on +initdb had been told. Under these conditions, any use of +plperl was likely to lead to corrupt indexes. You might need +REINDEX to fix existing indexes on textual columns if this has happened to you. Fix longstanding bug in strpos() and regular expression handling in certain rarely used Asian multi-byte character sets (Tatsuo) -Fix bug in /contrib/pgcrypto gen_salt, +Fix bug in /contrib/pgcrypto gen_salt, which caused it not to use all available salt space for MD5 and XDES algorithms (Marko Kreen, Solar Designer) Salts for Blowfish and standard DES are unaffected. -Fix /contrib/dblink to throw an error, +Fix /contrib/dblink to throw an error, rather than crashing, when the number of columns specified is different from what's actually returned by the query (Joe) @@ -619,7 +619,7 @@ what's actually returned by the query (Joe) A dump/restore is not required for those running 7.3.X. However, if you are upgrading from a version earlier than 7.3.10, - see . + see . @@ -634,13 +634,13 @@ for the wrong page, leading to an Assert failure or data corruption. -/contrib/ltree fixes (Teodor) +/contrib/ltree fixes (Teodor) Fix longstanding planning error for outer joins This bug sometimes caused a bogus error RIGHT JOIN is -only supported with merge-joinable join conditions. +only supported with merge-joinable join conditions. -Prevent core dump in pg_autovacuum when a +Prevent core dump in pg_autovacuum when a table has been dropped @@ -666,7 +666,7 @@ table has been dropped A dump/restore is not required for those running 7.3.X. However, if you are upgrading from a version earlier than 7.3.10, - see . + see . @@ -674,25 +674,25 @@ table has been dropped Changes -Fix error that allowed VACUUM to remove -ctid chains too soon, and add more checking in code that follows -ctid links +Fix error that allowed VACUUM to remove +ctid chains too soon, and add more checking in code that follows +ctid links This fixes a long-standing problem that could cause crashes in very rare circumstances. -Fix CHAR() to properly pad spaces to the specified +Fix CHAR() to properly pad spaces to the specified length when using a multiple-byte character set (Yoshiyuki Asaba) -In prior releases, the padding of CHAR() was incorrect +In prior releases, the padding of CHAR() was incorrect because it only padded to the specified number of bytes without considering how many characters were stored. Fix missing rows in queries like UPDATE a=... WHERE -a... with GiST index on column a +a... with GiST index on column a Improve checking for partially-written WAL pages Improve robustness of signal handling when SSL is enabled Various memory leakage fixes Various portability improvements -Fix PL/pgSQL to handle var := var correctly when +Fix PL/pgSQL to handle var := var correctly when the variable is of pass-by-reference type @@ -754,17 +754,17 @@ COMMIT; - The above procedure must be carried out in each database - of an installation, including template1, and ideally - including template0 as well. If you do not fix the + The above procedure must be carried out in each database + of an installation, including template1, and ideally + including template0 as well. If you do not fix the template databases then any subsequently created databases will contain - the same error. template1 can be fixed in the same way - as any other database, but fixing template0 requires + the same error. template1 can be fixed in the same way + as any other database, but fixing template0 requires additional steps. First, from any database issue: UPDATE pg_database SET datallowconn = true WHERE datname = 'template0'; - Next connect to template0 and perform the above repair + Next connect to template0 and perform the above repair procedure. Finally, do: -- re-freeze template0: @@ -792,34 +792,34 @@ VACUUM freshly-inserted data, although the scenario seems of very low probability. There are no known cases of it having caused more than an Assert failure. -Fix comparisons of TIME WITH TIME ZONE values +Fix comparisons of TIME WITH TIME ZONE values The comparison code was wrong in the case where the ---enable-integer-datetimes configuration switch had been used. -NOTE: if you have an index on a TIME WITH TIME ZONE column, -it will need to be REINDEXed after installing this update, because +--enable-integer-datetimes configuration switch had been used. +NOTE: if you have an index on a TIME WITH TIME ZONE column, +it will need to be REINDEXed after installing this update, because the fix corrects the sort order of column values. -Fix EXTRACT(EPOCH) for -TIME WITH TIME ZONE values +Fix EXTRACT(EPOCH) for +TIME WITH TIME ZONE values Fix mis-display of negative fractional seconds in -INTERVAL values +INTERVAL values This error only occurred when the ---enable-integer-datetimes configuration switch had been used. +--enable-integer-datetimes configuration switch had been used. Additional buffer overrun checks in plpgsql (Neil) -Fix pg_dump to dump trigger names containing % +Fix pg_dump to dump trigger names containing % correctly (Neil) -Prevent to_char(interval) from dumping core for +Prevent to_char(interval) from dumping core for month-related formats -Fix contrib/pgcrypto for newer OpenSSL builds +Fix contrib/pgcrypto for newer OpenSSL builds (Marko Kreen) Still more 64-bit fixes for -contrib/intagg +contrib/intagg Prevent incorrect optimization of functions returning -RECORD +RECORD @@ -850,11 +850,11 @@ month-related formats Changes -Disallow LOAD to non-superusers +Disallow LOAD to non-superusers On platforms that will automatically execute initialization functions of a shared library (this includes at least Windows and ELF-based Unixen), -LOAD can be used to make the server execute arbitrary code. +LOAD can be used to make the server execute arbitrary code. Thanks to NGS Software for reporting this. Check that creator of an aggregate function has the right to execute the specified transition functions @@ -909,7 +909,7 @@ datestyles Repair possible failure to update hint bits on disk Under rare circumstances this oversight could lead to -could not access transaction status failures, which qualifies +could not access transaction status failures, which qualifies it as a potential-data-loss bug. Ensure that hashed outer join does not miss tuples @@ -1264,13 +1264,13 @@ operations on bytea columns (Joe) Restore creation of OID column in CREATE TABLE AS / SELECT INTO -Fix pg_dump core dump when dumping views having comments +Fix pg_dump core dump when dumping views having comments Dump DEFERRABLE/INITIALLY DEFERRED constraints properly Fix UPDATE when child table's column numbering differs from parent Increase default value of max_fsm_relations Fix problem when fetching backwards in a cursor for a single-row query Make backward fetch work properly with cursor on SELECT DISTINCT query -Fix problems with loading pg_dump files containing contrib/lo usage +Fix problems with loading pg_dump files containing contrib/lo usage Fix problem with all-numeric user names Fix possible memory leak and core dump during disconnect in libpgtcl Make plpython's spi_execute command handle nulls properly (Andrew Bosma) @@ -1328,7 +1328,7 @@ operations on bytea columns (Joe) Fix a core dump of COPY TO when client/server encodings don't match (Tom) -Allow pg_dump to work with pre-7.2 servers (Philip) +Allow pg_dump to work with pre-7.2 servers (Philip) contrib/adddepend fixes (Tom) Fix problem with deletion of per-user/per-database config settings (Tom) contrib/vacuumlo fix (Tom) @@ -1418,7 +1418,7 @@ operations on bytea columns (Joe) PostgreSQL now records object dependencies, which allows improvements in many areas. DROP statements now take either - CASCADE or RESTRICT to control whether + CASCADE or RESTRICT to control whether dependent objects are also dropped. @@ -1458,7 +1458,7 @@ operations on bytea columns (Joe) A large number of interfaces have been moved to http://gborg.postgresql.org + url="http://gborg.postgresql.org">http://gborg.postgresql.org where they can be developed and released independently. @@ -1469,9 +1469,9 @@ operations on bytea columns (Joe) By default, functions can now take up to 32 parameters, and - identifiers can be up to 63 bytes long. Also, OPAQUE - is now deprecated: there are specific pseudo-datatypes - to represent each of the former meanings of OPAQUE + identifiers can be up to 63 bytes long. Also, OPAQUE + is now deprecated: there are specific pseudo-datatypes + to represent each of the former meanings of OPAQUE in function argument and result types. @@ -1484,12 +1484,12 @@ operations on bytea columns (Joe) Migration to Version 7.3 - A dump/restore using pg_dump is required for those + A dump/restore using pg_dump is required for those wishing to migrate data from any previous release. If your application examines the system catalogs, additional changes will be required due to the introduction of schemas in 7.3; for more information, see: . + url="http://developer.postgresql.org/~momjian/upgrade_tips_7.3">. @@ -1538,7 +1538,7 @@ operations on bytea columns (Joe) serial columns are no longer automatically - UNIQUE; thus, an index will not automatically be + UNIQUE; thus, an index will not automatically be created. @@ -1724,7 +1724,7 @@ operations on bytea columns (Joe) Have COPY TO output embedded carriage returns and newlines as \r and \n (Tom) Allow DELIMITER in COPY FROM to be 8-bit clean (Tatsuo) -Make pg_dump use ALTER TABLE ADD PRIMARY KEY, for performance (Neil) +Make pg_dump use ALTER TABLE ADD PRIMARY KEY, for performance (Neil) Disable brackets in multistatement rules (Bruce) Disable VACUUM from being called inside a function (Bruce) Allow dropdb and other scripts to use identifiers with spaces (Bruce) @@ -1736,7 +1736,7 @@ operations on bytea columns (Joe) Add 'SET LOCAL var = value' to set configuration variables for a single transaction (Tom) Allow ANALYZE to run in a transaction (Bruce) Improve COPY syntax using new WITH clauses, keep backward compatibility (Bruce) -Fix pg_dump to consistently output tags in non-ASCII dumps (Bruce) +Fix pg_dump to consistently output tags in non-ASCII dumps (Bruce) Make foreign key constraints clearer in dump file (Rod) Add COMMENT ON CONSTRAINT (Rod) Allow COPY TO/FROM to specify column names (Brent Verner) @@ -1745,9 +1745,9 @@ operations on bytea columns (Joe) Generate failure on short COPY lines rather than pad NULLs (Neil) Fix CLUSTER to preserve all table attributes (Alvaro Herrera) New pg_settings table to view/modify GUC settings (Joe) -Add smart quoting, portability improvements to pg_dump output (Peter) +Add smart quoting, portability improvements to pg_dump output (Peter) Dump serial columns out as SERIAL (Tom) -Enable large file support, >2G for pg_dump (Peter, Philip Warner, Bruce) +Enable large file support, >2G for pg_dump (Peter, Philip Warner, Bruce) Disallow TRUNCATE on tables that are involved in referential constraints (Rod) Have TRUNCATE also auto-truncate the toast table of the relation (Tom) Add clusterdb utility that will auto-cluster an entire database based on previous CLUSTER operations (Alvaro Herrera) @@ -2020,15 +2020,15 @@ VACUUM freshly-inserted data, although the scenario seems of very low probability. There are no known cases of it having caused more than an Assert failure. -Fix EXTRACT(EPOCH) for -TIME WITH TIME ZONE values +Fix EXTRACT(EPOCH) for +TIME WITH TIME ZONE values Additional buffer overrun checks in plpgsql (Neil) Fix pg_dump to dump index names and trigger names containing -% correctly (Neil) -Prevent to_char(interval) from dumping core for +% correctly (Neil) +Prevent to_char(interval) from dumping core for month-related formats -Fix contrib/pgcrypto for newer OpenSSL builds +Fix contrib/pgcrypto for newer OpenSSL builds (Marko Kreen) @@ -2060,11 +2060,11 @@ month-related formats Changes -Disallow LOAD to non-superusers +Disallow LOAD to non-superusers On platforms that will automatically execute initialization functions of a shared library (this includes at least Windows and ELF-based Unixen), -LOAD can be used to make the server execute arbitrary code. +LOAD can be used to make the server execute arbitrary code. Thanks to NGS Software for reporting this. Add needed STRICT marking to some contrib functions (Kris Jurka) @@ -2111,7 +2111,7 @@ datestyles Repair possible failure to update hint bits on disk Under rare circumstances this oversight could lead to -could not access transaction status failures, which qualifies +could not access transaction status failures, which qualifies it as a potential-data-loss bug. Ensure that hashed outer join does not miss tuples @@ -2247,7 +2247,7 @@ since PostgreSQL 7.1. Handle pre-1970 date values in newer versions of glibc (Tom) Fix possible hang during server shutdown Prevent spinlock hangs on SMP PPC machines (Tomoyuki Niijima) -Fix pg_dump to properly dump FULL JOIN USING (Tom) +Fix pg_dump to properly dump FULL JOIN USING (Tom) @@ -2281,7 +2281,7 @@ since PostgreSQL 7.1. Allow EXECUTE of "CREATE TABLE AS ... SELECT" in PL/pgSQL (Tom) Fix for compressed transaction log id wraparound (Tom) Fix PQescapeBytea/PQunescapeBytea so that they handle bytes > 0x7f (Tatsuo) -Fix for psql and pg_dump crashing when invoked with non-existent long options (Tatsuo) +Fix for psql and pg_dump crashing when invoked with non-existent long options (Tatsuo) Fix crash when invoking geometric operators (Tom) Allow OPEN cursor(args) (Tom) Fix for rtree_gist index build (Teodor) @@ -2354,7 +2354,7 @@ since PostgreSQL 7.1. Overview - This release improves PostgreSQL for use in + This release improves PostgreSQL for use in high-volume applications. @@ -2368,7 +2368,7 @@ since PostgreSQL 7.1. Vacuuming no longer locks tables, thus allowing normal user - access during the vacuum. A new VACUUM FULL + access during the vacuum. A new VACUUM FULL command does old-style vacuum by locking the table and shrinking the on-disk copy of the table. @@ -2400,7 +2400,7 @@ since PostgreSQL 7.1. The system now computes histogram column statistics during - ANALYZE, allowing much better optimizer choices. + ANALYZE, allowing much better optimizer choices. @@ -2472,15 +2472,15 @@ since PostgreSQL 7.1. - The pg_hba.conf and pg_ident.conf + The pg_hba.conf and pg_ident.conf configuration is now only reloaded after receiving a - SIGHUP signal, not with each connection. + SIGHUP signal, not with each connection. - The function octet_length() now returns the uncompressed data length. + The function octet_length() now returns the uncompressed data length. @@ -2693,7 +2693,7 @@ since PostgreSQL 7.1. Internationalization -National language support in psql, pg_dump, libpq, and server (Peter E) +National language support in psql, pg_dump, libpq, and server (Peter E) Message translations in Chinese (simplified, traditional), Czech, French, German, Hungarian, Russian, Swedish (Peter E, Serguei A. Mokhov, Karel Zak, Weiping He, Zhenbang Wei, Kovacs Zoltan) Make trim, ltrim, rtrim, btrim, lpad, rpad, translate multibyte aware (Tatsuo) Add LATIN5,6,7,8,9,10 support (Tatsuo) @@ -2705,7 +2705,7 @@ since PostgreSQL 7.1. - <application>PL/pgSQL</> + <application>PL/pgSQL</application> Now uses portals for SELECT loops, allowing huge result sets (Jan) CURSOR and REFCURSOR support (Jan) @@ -2745,7 +2745,7 @@ since PostgreSQL 7.1. - <application>psql</> + <application>psql</application> \d displays indexes in unique, primary groupings (Christopher Kings-Lynne) Allow trailing semicolons in backslash commands (Greg Sabino Mullane) @@ -2756,7 +2756,7 @@ since PostgreSQL 7.1. - <application>libpq</> + <application>libpq</application> New function PQescapeString() to escape quotes in command strings (Florian Weimer) New function PQescapeBytea() escapes binary strings for use as SQL string literals @@ -2818,7 +2818,7 @@ since PostgreSQL 7.1. - <application>ECPG</> + <application>ECPG</application> EXECUTE ... INTO implemented (Christof Petig) Multiple row descriptor support (e.g. CARDINALITY) (Christof Petig) @@ -2839,7 +2839,7 @@ since PostgreSQL 7.1. Python fix fetchone() (Gerhard Haring) Use UTF, Unicode in Tcl where appropriate (Vsevolod Lobko, Reinhard Max) Add Tcl COPY TO/FROM (ljb) -Prevent output of default index op class in pg_dump (Tom) +Prevent output of default index op class in pg_dump (Tom) Fix libpgeasy memory leak (Bruce) @@ -3547,9 +3547,9 @@ ecpg changes (Michael) SQL92 join syntax is now supported, though only as - INNER JOIN for this release. JOIN, - NATURAL JOIN, JOIN/USING, - and JOIN/ON are available, as are + INNER JOIN for this release. JOIN, + NATURAL JOIN, JOIN/USING, + and JOIN/ON are available, as are column correlation names. @@ -3959,7 +3959,7 @@ New multibyte encodings This is basically a cleanup release for 6.5.2. We have added a new - PgAccess that was missing in 6.5.2, and installed an NT-specific fix. + PgAccess that was missing in 6.5.2, and installed an NT-specific fix. @@ -4209,7 +4209,7 @@ Add Win1250 (Czech) support (Pavel Behal) We continue to expand our port list, this time including - Windows NT/ix86 and NetBSD/arm32. + Windows NT/ix86 and NetBSD/arm32. @@ -4234,7 +4234,7 @@ Add Win1250 (Czech) support (Pavel Behal) New and updated material is present throughout the documentation. New FAQs have been - contributed for SGI and AIX platforms. + contributed for SGI and AIX platforms. The Tutorial has introductory information on SQL from Stefan Simkovics. For the User's Guide, there are @@ -4926,7 +4926,7 @@ Correctly handles function calls on the left side of BETWEEN and LIKE clauses. A dump/restore is NOT required for those running 6.3 or 6.3.1. A -make distclean, make, and make install is all that is required. +make distclean, make, and make install is all that is required. This last step should be performed while the postmaster is not running. You should re-link any custom applications that use PostgreSQL libraries. @@ -5003,7 +5003,7 @@ Improvements to the configuration autodetection for installation. A dump/restore is NOT required for those running 6.3. A -make distclean, make, and make install is all that is required. +make distclean, make, and make install is all that is required. This last step should be performed while the postmaster is not running. You should re-link any custom applications that use PostgreSQL libraries. @@ -5128,7 +5128,7 @@ Better identify tcl and tk libs and includes(Bruce) Third, char() fields will now allow faster access than varchar() or - text. Specifically, the text and varchar() have a penalty for access to + text. Specifically, the text and varchar() have a penalty for access to any columns after the first column of this type. char() used to also have this access penalty, but it no longer does. This might suggest that you redesign some of your tables, especially if you have short character @@ -5470,7 +5470,7 @@ to dump the 6.1 database. -Migration from version 1.<replaceable>x</> to version 6.2 +Migration from version 1.<replaceable>x</replaceable> to version 6.2 Those migrating from earlier 1.* releases should first upgrade to 1.09 @@ -5689,11 +5689,11 @@ optimizer which uses genetic - The random results in the random test should cause the + The random results in the random test should cause the random test to be failed, since the regression tests are evaluated using a simple diff. However, - random does not seem to produce random results on my test - machine (Linux/gcc/i686). + random does not seem to produce random results on my test + machine (Linux/gcc/i686). @@ -5990,16 +5990,16 @@ and a script to convert old ASCII files. The following notes are for the benefit of users who want to migrate -databases from Postgres95 1.01 and 1.02 to Postgres95 1.02.1. +databases from Postgres95 1.01 and 1.02 to Postgres95 1.02.1. -If you are starting afresh with Postgres95 1.02.1 and do not need +If you are starting afresh with Postgres95 1.02.1 and do not need to migrate old databases, you do not need to read any further. -In order to upgrade older Postgres95 version 1.01 or 1.02 databases to +In order to upgrade older Postgres95 version 1.01 or 1.02 databases to version 1.02.1, the following steps are required: @@ -6013,7 +6013,7 @@ Start up a new 1.02.1 postmaster Add the new built-in functions and operators of 1.02.1 to 1.01 or 1.02 databases. This is done by running the new 1.02.1 server against your own 1.01 or 1.02 database and applying the queries attached at - the end of the file. This can be done easily through psql. If your + the end of the file. This can be done easily through psql. If your 1.01 or 1.02 database is named testdb and you have cut the commands from the end of this file and saved them in addfunc.sql: @@ -6044,7 +6044,7 @@ sed 's/^\.$/\\./g' <in_file >out_file -If you are loading an older binary copy or non-stdout copy, there is no +If you are loading an older binary copy or non-stdout copy, there is no end-of-data character, and hence no conversion necessary. @@ -6135,15 +6135,15 @@ Contributors (apologies to any missed) The following notes are for the benefit of users who want to migrate -databases from Postgres95 1.0 to Postgres95 1.01. +databases from Postgres95 1.0 to Postgres95 1.01. -If you are starting afresh with Postgres95 1.01 and do not need +If you are starting afresh with Postgres95 1.01 and do not need to migrate old databases, you do not need to read any further. -In order to Postgres95 version 1.01 with databases created with -Postgres95 version 1.0, the following steps are required: +In order to Postgres95 version 1.01 with databases created with +Postgres95 version 1.0, the following steps are required: @@ -6555,103 +6555,3 @@ The following bugs have been fixed in postgres95-beta-0.02: Initial release. - - - Timing Results - - - These timing results are from running the regression test with the commands - - -% cd src/test/regress -% make all -% time make runtest - - - - Timing under Linux 2.0.27 seems to have a roughly 5% variation from run - to run, presumably due to the scheduling vagaries of multitasking systems. - - - - Version 6.5 - - - As has been the case for previous releases, timing between - releases is not directly comparable since new regression tests - have been added. In general, 6.5 is faster than previous - releases. - - - - Timing with fsync() disabled: - - -Time System -02:00 Dual Pentium Pro 180, 224MB, UW-SCSI, Linux 2.0.36, gcc 2.7.2.3 -O2 -m486 -04:38 Sparc Ultra 1 143MHz, 64MB, Solaris 2.6 - - - - - Timing with fsync() enabled: - - -Time System -04:21 Dual Pentium Pro 180, 224MB, UW-SCSI, Linux 2.0.36, gcc 2.7.2.3 -O2 -m486 - - - For the Linux system above, using UW-SCSI disks rather than (older) IDE - disks leads to a 50% improvement in speed on the regression test. - - - - -Version 6.4beta - - -The times for this release are not directly comparable to those for previous releases -since some additional regression tests have been included. -In general, however, 6.4 should be slightly faster than the previous release (thanks, Bruce!). - - - -Time System -02:26 Dual Pentium Pro 180, 96MB, UW-SCSI, Linux 2.0.30, gcc 2.7.2.1 -O2 -m486 - - - - - -Version 6.3 - - -The times for this release are not directly comparable to those for previous releases -since some additional regression tests have been included and some obsolete tests involving -time travel have been removed. -In general, however, 6.3 is substantially faster than previous releases (thanks, Bruce!). - - - - Time System - 02:30 Dual Pentium Pro 180, 96MB, UW-SCSI, Linux 2.0.30, gcc 2.7.2.1 -O2 -m486 - 04:12 Dual Pentium Pro 180, 96MB, EIDE, Linux 2.0.30, gcc 2.7.2.1 -O2 -m486 - - - - - -Version 6.1 - - - - Time System - 06:12 Pentium Pro 180, 32MB, EIDE, Linux 2.0.30, gcc 2.7.2 -O2 -m486 - 12:06 P-100, 48MB, Linux 2.0.29, gcc - 39:58 Sparc IPC 32MB, Solaris 2.5, gcc 2.7.2.1 -O -g - - - - -]]> diff --git a/doc/src/sgml/release.sgml b/doc/src/sgml/release.sgml index f1f4e91252..c4e763a043 100644 --- a/doc/src/sgml/release.sgml +++ b/doc/src/sgml/release.sgml @@ -24,12 +24,14 @@ non-ASCII characters find using grep -P '[\x80-\xFF]' one page: http://www.zipcon.net/~swhite/docs/computers/browsers/entities_page.html other lists: http://www.zipcon.net/~swhite/docs/computers/browsers/entities.html http://www.zipcon.net/~swhite/docs/computers/browsers/entities_page.html - http://en.wikipedia.org/wiki/List_of_XML_and_HTML_character_entity_references + https://en.wikipedia.org/wiki/List_of_XML_and_HTML_character_entity_references - we cannot use UTF8 because SGML Docbook does not support it + We cannot use UTF8 because back branches still use SGML Docbook, + which does not support it. Also, rendering engines have to + support the referenced characters. - do not use numeric _UTF_ numeric character escapes (&#nnn;), - we can only use Latin1 + Do not use numeric _UTF_ numeric character escapes (&#nnn;), + we can only use Latin1. Example: Alvaro Herrera is Álvaro Herrera @@ -44,7 +46,7 @@ For new features, add links to the documentation sections. The release notes contain the significant changes in each - PostgreSQL release, with major features and migration + PostgreSQL release, with major features and migration issues listed at the top. The release notes do not contain changes that affect only a few users or changes that are internal and therefore not user-visible. For example, the optimizer is improved in almost every @@ -56,9 +58,9 @@ For new features, add links to the documentation sections. A complete list of changes for each release can be obtained by viewing the Git logs for each release. The pgsql-committers + url="https://www.postgresql.org/list/pgsql-committers/">pgsql-committers email list records all source code changes as well. There is also - a web + a web interface that shows changes to specific files. @@ -74,6 +76,8 @@ For new features, add links to the documentation sections. The reason for splitting the release notes this way is so that appropriate subsets can easily be copied into back branches. --> +&release-12; +&release-11; &release-10; &release-9.6; &release-9.5; diff --git a/doc/src/sgml/replication-origins.sgml b/doc/src/sgml/replication-origins.sgml index 317ca9a1df..a03ce76e2e 100644 --- a/doc/src/sgml/replication-origins.sgml +++ b/doc/src/sgml/replication-origins.sgml @@ -83,7 +83,7 @@ replication and inefficiencies. Replication origins provide an optional mechanism to recognize and prevent that. When configured using the functions referenced in the previous paragraph, every change and transaction passed to - output plugin callbacks (see ) + output plugin callbacks (see ) generated by the session is tagged with the replication origin of the generating session. This allows treating them differently in the output plugin, e.g. ignoring all but locally-originating rows. Additionally diff --git a/doc/src/sgml/rowtypes.sgml b/doc/src/sgml/rowtypes.sgml index 9d6768e006..a6f4f6709c 100644 --- a/doc/src/sgml/rowtypes.sgml +++ b/doc/src/sgml/rowtypes.sgml @@ -12,7 +12,7 @@ - A composite type represents the structure of a row or record; + A composite type represents the structure of a row or record; it is essentially just a list of field names and their data types. PostgreSQL allows composite types to be used in many of the same ways that simple types can be used. For example, a @@ -36,11 +36,11 @@ CREATE TYPE inventory_item AS ( price numeric ); - The syntax is comparable to CREATE TABLE, except that only + The syntax is comparable to CREATE TABLE, except that only field names and types can be specified; no constraints (such as NOT - NULL) can presently be included. Note that the AS keyword + NULL) can presently be included. Note that the AS keyword is essential; without it, the system will think a different kind - of CREATE TYPE command is meant, and you will get odd syntax + of CREATE TYPE command is meant, and you will get odd syntax errors. @@ -78,14 +78,15 @@ CREATE TABLE inventory_item ( price numeric CHECK (price > 0) ); - then the same inventory_item composite type shown above would + then the same inventory_item composite type shown above would come into being as a byproduct, and could be used just as above. Note however an important restriction of the current implementation: since no constraints are associated with a composite type, the constraints shown in the table - definition do not apply to values of the composite type - outside the table. (A partial workaround is to use domain - types as members of composite types.) + definition do not apply to values of the composite type + outside the table. (To work around this, create a domain over the composite + type, and apply the desired constraints as CHECK + constraints of the domain.) @@ -111,7 +112,7 @@ CREATE TABLE inventory_item ( '("fuzzy dice",42,1.99)' - which would be a valid value of the inventory_item type + which would be a valid value of the inventory_item type defined above. To make a field be NULL, write no characters at all in its position in the list. For example, this constant specifies a NULL third field: @@ -128,7 +129,7 @@ CREATE TABLE inventory_item ( (These constants are actually only a special case of the generic type constants discussed in . The constant is initially + linkend="sql-syntax-constants-generic"/>. The constant is initially treated as a string and passed to the composite-type input conversion routine. An explicit type specification might be necessary to tell which type to convert the constant to.) @@ -150,8 +151,8 @@ ROW('', 42, NULL) ('fuzzy dice', 42, 1.99) ('', 42, NULL) - The ROW expression syntax is discussed in more detail in . + The ROW expression syntax is discussed in more detail in . @@ -163,15 +164,15 @@ ROW('', 42, NULL) name, much like selecting a field from a table name. In fact, it's so much like selecting from a table name that you often have to use parentheses to keep from confusing the parser. For example, you might try to select - some subfields from our on_hand example table with something + some subfields from our on_hand example table with something like: SELECT item.name FROM on_hand WHERE item.price > 9.99; - This will not work since the name item is taken to be a table - name, not a column name of on_hand, per SQL syntax rules. + This will not work since the name item is taken to be a table + name, not a column name of on_hand, per SQL syntax rules. You must write it like this: @@ -186,7 +187,7 @@ SELECT (on_hand.item).name FROM on_hand WHERE (on_hand.item).price > 9.99; Now the parenthesized object is correctly interpreted as a reference to - the item column, and then the subfield can be selected from it. + the item column, and then the subfield can be selected from it. @@ -202,8 +203,8 @@ SELECT (my_func(...)).field FROM ... - The special field name * means all fields, as - further explained in . + The special field name * means all fields, as + further explained in . @@ -221,7 +222,7 @@ INSERT INTO mytab (complex_col) VALUES((1.1,2.2)); UPDATE mytab SET complex_col = ROW(1.1,2.2) WHERE ...; - The first example omits ROW, the second uses it; we + The first example omits ROW, the second uses it; we could have done it either way. @@ -234,12 +235,12 @@ UPDATE mytab SET complex_col.r = (complex_col).r + 1 WHERE ...; Notice here that we don't need to (and indeed cannot) put parentheses around the column name appearing just after - SET, but we do need parentheses when referencing the same + SET, but we do need parentheses when referencing the same column in the expression to the right of the equal sign. - And we can specify subfields as targets for INSERT, too: + And we can specify subfields as targets for INSERT, too: INSERT INTO mytab (complex_col.r, complex_col.i) VALUES(1.1, 2.2); @@ -260,10 +261,10 @@ INSERT INTO mytab (complex_col.r, complex_col.i) VALUES(1.1, 2.2); - In PostgreSQL, a reference to a table name (or alias) + In PostgreSQL, a reference to a table name (or alias) in a query is effectively a reference to the composite value of the table's current row. For example, if we had a table - inventory_item as shown + inventory_item as shown above, we could write: SELECT c FROM inventory_item c; @@ -278,12 +279,12 @@ SELECT c FROM inventory_item c; Note however that simple names are matched to column names before table names, so this example works only because there is no column - named c in the query's tables. + named c in the query's tables. The ordinary qualified-column-name - syntax table_name.column_name + syntax table_name.column_name can be understood as applying field selection to the composite value of the table's current row. (For efficiency reasons, it's not actually implemented that way.) @@ -306,13 +307,13 @@ SELECT c.* FROM inventory_item c; SELECT c.name, c.supplier_id, c.price FROM inventory_item c; - PostgreSQL will apply this expansion behavior to + PostgreSQL will apply this expansion behavior to any composite-valued expression, although as shown above, you need to write parentheses - around the value that .* is applied to whenever it's not a - simple table name. For example, if myfunc() is a function - returning a composite type with columns a, - b, and c, then these two queries have the + around the value that .* is applied to whenever it's not a + simple table name. For example, if myfunc() is a function + returning a composite type with columns a, + b, and c, then these two queries have the same result: SELECT (myfunc(x)).* FROM some_table; @@ -322,33 +323,38 @@ SELECT (myfunc(x)).a, (myfunc(x)).b, (myfunc(x)).c FROM some_table; - PostgreSQL handles column expansion by + PostgreSQL handles column expansion by actually transforming the first form into the second. So, in this - example, myfunc() would get invoked three times per row + example, myfunc() would get invoked three times per row with either syntax. If it's an expensive function you may wish to avoid that, which you can do with a query like: -SELECT (m).* FROM (SELECT myfunc(x) AS m FROM some_table OFFSET 0) ss; +SELECT m.* FROM some_table, LATERAL myfunc(x) AS m; - The OFFSET 0 clause keeps the optimizer - from flattening the sub-select to arrive at the form with - multiple calls of myfunc(). + Placing the function in + a LATERAL FROM item keeps it from + being invoked more than once per row. m.* is still + expanded into m.a, m.b, m.c, but now those variables + are just references to the output of the FROM item. + (The LATERAL keyword is optional here, but we show it + to clarify that the function is getting x + from some_table.) - The composite_value.* syntax results in + The composite_value.* syntax results in column expansion of this kind when it appears at the top level of - a SELECT output - list, a RETURNING - list in INSERT/UPDATE/DELETE, - a VALUES clause, or + a SELECT output + list, a RETURNING + list in INSERT/UPDATE/DELETE, + a VALUES clause, or a row constructor. In all other contexts (including when nested inside one of those - constructs), attaching .* to a composite value does not - change the value, since it means all columns and so the + constructs), attaching .* to a composite value does not + change the value, since it means all columns and so the same composite value is produced again. For example, - if somefunc() accepts a composite-valued argument, + if somefunc() accepts a composite-valued argument, these queries are the same: @@ -356,16 +362,16 @@ SELECT somefunc(c.*) FROM inventory_item c; SELECT somefunc(c) FROM inventory_item c; - In both cases, the current row of inventory_item is + In both cases, the current row of inventory_item is passed to the function as a single composite-valued argument. - Even though .* does nothing in such cases, using it is good + Even though .* does nothing in such cases, using it is good style, since it makes clear that a composite value is intended. In - particular, the parser will consider c in c.* to + particular, the parser will consider c in c.* to refer to a table name or alias, not to a column name, so that there is - no ambiguity; whereas without .*, it is not clear - whether c means a table name or a column name, and in fact + no ambiguity; whereas without .*, it is not clear + whether c means a table name or a column name, and in fact the column-name interpretation will be preferred if there is a column - named c. + named c. @@ -376,27 +382,27 @@ SELECT * FROM inventory_item c ORDER BY c; SELECT * FROM inventory_item c ORDER BY c.*; SELECT * FROM inventory_item c ORDER BY ROW(c.*); - All of these ORDER BY clauses specify the row's composite + All of these ORDER BY clauses specify the row's composite value, resulting in sorting the rows according to the rules described - in . However, - if inventory_item contained a column - named c, the first case would be different from the + in . However, + if inventory_item contained a column + named c, the first case would be different from the others, as it would mean to sort by that column only. Given the column names previously shown, these queries are also equivalent to those above: SELECT * FROM inventory_item c ORDER BY ROW(c.name, c.supplier_id, c.price); SELECT * FROM inventory_item c ORDER BY (c.name, c.supplier_id, c.price); - (The last case uses a row constructor with the key word ROW + (The last case uses a row constructor with the key word ROW omitted.) Another special syntactical behavior associated with composite values is - that we can use functional notation for extracting a field + that we can use functional notation for extracting a field of a composite value. The simple way to explain this is that - the notations field(table) - and table.field + the notations field(table) + and table.field are interchangeable. For example, these queries are equivalent: @@ -418,7 +424,7 @@ SELECT c.somefunc FROM inventory_item c; This equivalence between functional notation and field notation makes it possible to use functions on composite types to implement - computed fields. + computed fields. computed field @@ -427,7 +433,7 @@ SELECT c.somefunc FROM inventory_item c; computed An application using the last query above wouldn't need to be directly - aware that somefunc isn't a real column of the table. + aware that somefunc isn't a real column of the table. @@ -435,10 +441,13 @@ SELECT c.somefunc FROM inventory_item c; Because of this behavior, it's unwise to give a function that takes a single composite-type argument the same name as any of the fields of that composite type. If there is ambiguity, the field-name - interpretation will be preferred, so that such a function could not be - called without tricks. One way to force the function interpretation is - to schema-qualify the function name, that is, write - schema.func(compositevalue). + interpretation will be chosen if field-name syntax is used, while the + function will be chosen if function-call syntax is used. However, + PostgreSQL versions before 11 always chose the + field-name interpretation, unless the syntax of the call required it to + be a function call. One way to force the function interpretation in + older versions is to schema-qualify the function name, that is, write + schema.func(compositevalue). @@ -450,8 +459,8 @@ SELECT c.somefunc FROM inventory_item c; The external text representation of a composite value consists of items that are interpreted according to the I/O conversion rules for the individual field types, plus decoration that indicates the composite structure. - The decoration consists of parentheses (( and )) - around the whole value, plus commas (,) between adjacent + The decoration consists of parentheses (( and )) + around the whole value, plus commas (,) between adjacent items. Whitespace outside the parentheses is ignored, but within the parentheses it is considered part of the field value, and might or might not be significant depending on the input conversion rules for the field data type. @@ -466,7 +475,7 @@ SELECT c.somefunc FROM inventory_item c; As shown previously, when writing a composite value you can write double quotes around any individual field value. - You must do so if the field value would otherwise + You must do so if the field value would otherwise confuse the composite-value parser. In particular, fields containing parentheses, commas, double quotes, or backslashes must be double-quoted. To put a double quote or backslash in a quoted composite field value, @@ -481,7 +490,7 @@ SELECT c.somefunc FROM inventory_item c; A completely empty field value (no characters at all between the commas or parentheses) represents a NULL. To write a value that is an empty - string rather than NULL, write "". + string rather than NULL, write "". @@ -497,31 +506,31 @@ SELECT c.somefunc FROM inventory_item c; Remember that what you write in an SQL command will first be interpreted as a string literal, and then as a composite. This doubles the number of backslashes you need (assuming escape string syntax is used). - For example, to insert a text field + For example, to insert a text field containing a double quote and a backslash in a composite value, you'd need to write: -INSERT ... VALUES (E'("\\"\\\\")'); +INSERT ... VALUES ('("\"\\")'); The string-literal processor removes one level of backslashes, so that what arrives at the composite-value parser looks like - ("\"\\"). In turn, the string - fed to the text data type's input routine - becomes "\. (If we were working + ("\"\\"). In turn, the string + fed to the text data type's input routine + becomes "\. (If we were working with a data type whose input routine also treated backslashes specially, - bytea for example, we might need as many as eight backslashes + bytea for example, we might need as many as eight backslashes in the command to get one backslash into the stored composite field.) - Dollar quoting (see ) can be + Dollar quoting (see ) can be used to avoid the need to double backslashes. - The ROW constructor syntax is usually easier to work with + The ROW constructor syntax is usually easier to work with than the composite-literal syntax when writing composite values in SQL commands. - In ROW, individual field values are written the same way + In ROW, individual field values are written the same way they would be written when not members of a composite. diff --git a/doc/src/sgml/rules.sgml b/doc/src/sgml/rules.sgml index bcbc170335..3372b1ac2b 100644 --- a/doc/src/sgml/rules.sgml +++ b/doc/src/sgml/rules.sgml @@ -29,8 +29,8 @@ execution. It is very powerful, and can be used for many things such as query language procedures, views, and versions. The theoretical foundations and the power of this rule system are - also discussed in and . + also discussed in and . @@ -99,7 +99,7 @@ the range table - range table + range table @@ -150,7 +150,7 @@ the target list - target list + target list @@ -167,18 +167,18 @@ DELETE commands don't need a normal target list - because they don't produce any result. Instead, the rule system - adds a special CTID entry to the empty target list, + because they don't produce any result. Instead, the planner + adds a special CTID entry to the empty target list, to allow the executor to find the row to be deleted. - (CTID is added when the result relation is an ordinary - table. If it is a view, a whole-row variable is added instead, - as described in .) + (CTID is added when the result relation is an ordinary + table. If it is a view, a whole-row variable is added instead, by + the rule system, as described in .) For INSERT commands, the target list describes the new rows that should go into the result relation. It consists of the - expressions in the VALUES clause or the ones from the + expressions in the VALUES clause or the ones from the SELECT clause in INSERT ... SELECT. The first step of the rewrite process adds target list entries for any columns that were not assigned to by @@ -193,8 +193,8 @@ rule system, it contains just the expressions from the SET column = expression part of the command. The planner will handle missing columns by inserting expressions that copy the values - from the old row into the new one. Just as for DELETE, - the rule system adds a CTID or whole-row variable so that + from the old row into the new one. Just as for DELETE, + a CTID or whole-row variable is added so that the executor can identify the old row to be updated. @@ -218,7 +218,7 @@ this expression is a Boolean that tells whether the operation (INSERT, UPDATE, DELETE, or SELECT) for the - final result row should be executed or not. It corresponds to the WHERE clause + final result row should be executed or not. It corresponds to the WHERE clause of an SQL statement. @@ -230,18 +230,18 @@ - The query's join tree shows the structure of the FROM clause. + The query's join tree shows the structure of the FROM clause. For a simple query like SELECT ... FROM a, b, c, the join tree is just - a list of the FROM items, because we are allowed to join them in - any order. But when JOIN expressions, particularly outer joins, + a list of the FROM items, because we are allowed to join them in + any order. But when JOIN expressions, particularly outer joins, are used, we have to join in the order shown by the joins. - In that case, the join tree shows the structure of the JOIN expressions. The - restrictions associated with particular JOIN clauses (from ON or - USING expressions) are stored as qualification expressions attached + In that case, the join tree shows the structure of the JOIN expressions. The + restrictions associated with particular JOIN clauses (from ON or + USING expressions) are stored as qualification expressions attached to those join-tree nodes. It turns out to be convenient to store - the top-level WHERE expression as a qualification attached to the + the top-level WHERE expression as a qualification attached to the top-level join-tree item, too. So really the join tree represents - both the FROM and WHERE clauses of a SELECT. + both the FROM and WHERE clauses of a SELECT. @@ -252,7 +252,7 @@ - The other parts of the query tree like the ORDER BY + The other parts of the query tree like the ORDER BY clause aren't of interest here. The rule system substitutes some entries there while applying rules, but that doesn't have much to do with the fundamentals of the rule @@ -274,8 +274,8 @@ - view - implementation through rules + view + implementation through rules @@ -313,7 +313,7 @@ CREATE RULE "_RETURN" AS ON SELECT TO myview DO INSTEAD - Rules ON SELECT are applied to all queries as the last step, even + Rules ON SELECT are applied to all queries as the last step, even if the command given is an INSERT, UPDATE or DELETE. And they have different semantics from rules on the other command types in that they modify the @@ -322,10 +322,10 @@ CREATE RULE "_RETURN" AS ON SELECT TO myview DO INSTEAD - Currently, there can be only one action in an ON SELECT rule, and it must - be an unconditional SELECT action that is INSTEAD. This restriction was + Currently, there can be only one action in an ON SELECT rule, and it must + be an unconditional SELECT action that is INSTEAD. This restriction was required to make rules safe enough to open them for ordinary users, and - it restricts ON SELECT rules to act like views. + it restricts ON SELECT rules to act like views. @@ -423,12 +423,12 @@ CREATE VIEW shoe_ready AS The CREATE VIEW command for the shoelace view (which is the simplest one we - have) will create a relation shoelace and an entry in + have) will create a relation shoelace and an entry in pg_rewrite that tells that there is a - rewrite rule that must be applied whenever the relation shoelace + rewrite rule that must be applied whenever the relation shoelace is referenced in a query's range table. The rule has no rule - qualification (discussed later, with the non-SELECT rules, since - SELECT rules currently cannot have them) and it is INSTEAD. Note + qualification (discussed later, with the non-SELECT rules, since + SELECT rules currently cannot have them) and it is INSTEAD. Note that rule qualifications are not the same as query qualifications. The action of our rule has a query qualification. The action of the rule is one query tree that is a copy of the @@ -438,7 +438,7 @@ CREATE VIEW shoe_ready AS The two extra range - table entries for NEW and OLD that you can see in + table entries for NEW and OLD that you can see in the pg_rewrite entry aren't of interest for SELECT rules. @@ -533,7 +533,7 @@ SELECT shoelace.sl_name, shoelace.sl_avail, There is one difference however: the subquery's range table has two - extra entries shoelace old and shoelace new. These entries don't + extra entries shoelace old and shoelace new. These entries don't participate directly in the query, since they aren't referenced by the subquery's join tree or target list. The rewriter uses them to store the access privilege check information that was originally present @@ -548,8 +548,8 @@ SELECT shoelace.sl_name, shoelace.sl_avail, the remaining range-table entries in the top query (in this example there are no more), and it will recursively check the range-table entries in the added subquery to see if any of them reference views. (But it - won't expand old or new — otherwise we'd have infinite recursion!) - In this example, there are no rewrite rules for shoelace_data or unit, + won't expand old or new — otherwise we'd have infinite recursion!) + In this example, there are no rewrite rules for shoelace_data or unit, so rewriting is complete and the above is the final result given to the planner. @@ -671,8 +671,8 @@ SELECT shoe_ready.shoename, shoe_ready.sh_avail, command other than a SELECT, the result relation points to the range-table entry where the result should go. Everything else is absolutely the same. So having two tables - t1 and t2 with columns a and - b, the query trees for the two statements: + t1 and t2 with columns a and + b, the query trees for the two statements: SELECT t2.b FROM t1, t2 WHERE t1.a = t2.a; @@ -685,27 +685,27 @@ UPDATE t1 SET b = t2.b FROM t2 WHERE t1.a = t2.a; - The range tables contain entries for the tables t1 and t2. + The range tables contain entries for the tables t1 and t2. The target lists contain one variable that points to column - b of the range table entry for table t2. + b of the range table entry for table t2. - The qualification expressions compare the columns a of both + The qualification expressions compare the columns a of both range-table entries for equality. - The join trees show a simple join between t1 and t2. + The join trees show a simple join between t1 and t2. @@ -714,7 +714,7 @@ UPDATE t1 SET b = t2.b FROM t2 WHERE t1.a = t2.a; The consequence is, that both query trees result in similar execution plans: They are both joins over the two tables. For the - UPDATE the missing columns from t1 are added to + UPDATE the missing columns from t1 are added to the target list by the planner and the final query tree will read as: @@ -736,7 +736,7 @@ SELECT t1.a, t2.b FROM t1, t2 WHERE t1.a = t2.a; one is a SELECT command and the other is an UPDATE is handled higher up in the executor, where it knows that this is an UPDATE, and it knows that - this result should go into table t1. But which of the rows + this result should go into table t1. But which of the rows that are there has to be replaced by the new row? @@ -744,12 +744,12 @@ SELECT t1.a, t2.b FROM t1, t2 WHERE t1.a = t2.a; To resolve this problem, another entry is added to the target list in UPDATE (and also in DELETE) statements: the current tuple ID - (CTID).CTID + (CTID).CTID This is a system column containing the file block number and position in the block for the row. Knowing - the table, the CTID can be used to retrieve the - original row of t1 to be updated. After adding the - CTID to the target list, the query actually looks like: + the table, the CTID can be used to retrieve the + original row of t1 to be updated. After adding the + CTID to the target list, the query actually looks like: SELECT t1.a, t2.b, t1.ctid FROM t1, t2 WHERE t1.a = t2.a; @@ -759,9 +759,9 @@ SELECT t1.a, t2.b, t1.ctid FROM t1, t2 WHERE t1.a = t2.a; the stage. Old table rows aren't overwritten, and this is why ROLLBACK is fast. In an UPDATE, the new result row is inserted into the table (after stripping the - CTID) and in the row header of the old row, which the - CTID pointed to, the cmax and - xmax entries are set to the current command counter + CTID) and in the row header of the old row, which the + CTID pointed to, the cmax and + xmax entries are set to the current command counter and current transaction ID. Thus the old row is hidden, and after the transaction commits the vacuum cleaner can eventually remove the dead row. @@ -780,7 +780,7 @@ SELECT t1.a, t2.b, t1.ctid FROM t1, t2 WHERE t1.a = t2.a; The above demonstrates how the rule system incorporates view definitions into the original query tree. In the second example, a simple SELECT from one view created a final - query tree that is a join of 4 tables (unit was used twice with + query tree that is a join of 4 tables (unit was used twice with different names). @@ -811,7 +811,7 @@ SELECT t1.a, t2.b, t1.ctid FROM t1, t2 WHERE t1.a = t2.a; DELETE? Doing the substitutions described above would give a query tree in which the result relation points at a subquery range-table entry, which will not - work. There are several ways in which PostgreSQL + work. There are several ways in which PostgreSQL can support the appearance of updating a view, however. @@ -821,20 +821,20 @@ SELECT t1.a, t2.b, t1.ctid FROM t1, t2 WHERE t1.a = t2.a; underlying base relation so that the INSERT, UPDATE, or DELETE is applied to the base relation in the appropriate way. Views that are - simple enough for this are called automatically - updatable. For detailed information on the kinds of view that can - be automatically updated, see . + simple enough for this are called automatically + updatable. For detailed information on the kinds of view that can + be automatically updated, see . Alternatively, the operation may be handled by a user-provided - INSTEAD OF trigger on the view. + INSTEAD OF trigger on the view. Rewriting works slightly differently in this case. For INSERT, the rewriter does nothing at all with the view, leaving it as the result relation for the query. For UPDATE and DELETE, it's still necessary to expand the - view query to produce the old rows that the command will + view query to produce the old rows that the command will attempt to update or delete. So the view is expanded as normal, but another unexpanded range-table entry is added to the query to represent the view in its capacity as the result relation. @@ -843,33 +843,33 @@ SELECT t1.a, t2.b, t1.ctid FROM t1, t2 WHERE t1.a = t2.a; The problem that now arises is how to identify the rows to be updated in the view. Recall that when the result relation - is a table, a special CTID entry is added to the target + is a table, a special CTID entry is added to the target list to identify the physical locations of the rows to be updated. This does not work if the result relation is a view, because a view - does not have any CTID, since its rows do not have + does not have any CTID, since its rows do not have actual physical locations. Instead, for an UPDATE - or DELETE operation, a special wholerow + or DELETE operation, a special wholerow entry is added to the target list, which expands to include all columns from the view. The executor uses this value to supply the - old row to the INSTEAD OF trigger. It is + old row to the INSTEAD OF trigger. It is up to the trigger to work out what to update based on the old and new row values. - Another possibility is for the user to define INSTEAD + Another possibility is for the user to define INSTEAD rules that specify substitute actions for INSERT, UPDATE, and DELETE commands on a view. These rules will rewrite the command, typically into a command that updates one or more tables, rather than views. That is the topic - of . + of . Note that rules are evaluated first, rewriting the original query before it is planned and executed. Therefore, if a view has - INSTEAD OF triggers as well as rules on INSERT, - UPDATE, or DELETE, then the rules will be + INSTEAD OF triggers as well as rules on INSERT, + UPDATE, or DELETE, then the rules will be evaluated first, and depending on the result, the triggers may not be used at all. @@ -883,7 +883,7 @@ SELECT t1.a, t2.b, t1.ctid FROM t1, t2 WHERE t1.a = t2.a; - If there are no INSTEAD rules or INSTEAD OF + If there are no INSTEAD rules or INSTEAD OF triggers for the view, and the rewriter cannot automatically rewrite the query as an update on the underlying base relation, an error will be thrown because the executor cannot update a view as such. @@ -902,13 +902,13 @@ SELECT t1.a, t2.b, t1.ctid FROM t1, t2 WHERE t1.a = t2.a; - materialized view - implementation through rules + materialized view + implementation through rules - view - materialized + view + materialized @@ -970,7 +970,7 @@ CREATE MATERIALIZED VIEW sales_summary AS invoice_date, sum(invoice_amt)::numeric(13,2) as sales_amt FROM invoice - WHERE invoice_date < CURRENT_DATE + WHERE invoice_date < CURRENT_DATE GROUP BY seller_no, invoice_date @@ -1030,7 +1030,7 @@ SELECT count(*) FROM words WHERE word = 'caterpiler'; (1 row) - With EXPLAIN ANALYZE, we see: + With EXPLAIN ANALYZE, we see: Aggregate (cost=21763.99..21764.00 rows=1 width=0) (actual time=188.180..188.181 rows=1 loops=1) @@ -1058,7 +1058,7 @@ SELECT count(*) FROM words WHERE word = 'caterpiler'; have wanted. Again using file_fdw: -SELECT word FROM words ORDER BY word <-> 'caterpiler' LIMIT 10; +SELECT word FROM words ORDER BY word <-> 'caterpiler' LIMIT 10; word --------------- @@ -1104,7 +1104,7 @@ SELECT word FROM words ORDER BY word <-> 'caterpiler' LIMIT 10; -Rules on <command>INSERT</>, <command>UPDATE</>, and <command>DELETE</> +Rules on <command>INSERT</command>, <command>UPDATE</command>, and <command>DELETE</command> rule @@ -1122,8 +1122,8 @@ SELECT word FROM words ORDER BY word <-> 'caterpiler' LIMIT 10; - Rules that are defined on INSERT, UPDATE, - and DELETE are significantly different from the view rules + Rules that are defined on INSERT, UPDATE, + and DELETE are significantly different from the view rules described in the previous section. First, their CREATE RULE command allows more: @@ -1142,13 +1142,13 @@ SELECT word FROM words ORDER BY word <-> 'caterpiler' LIMIT 10; - They can be INSTEAD or ALSO (the default). + They can be INSTEAD or ALSO (the default). - The pseudorelations NEW and OLD become useful. + The pseudorelations NEW and OLD become useful. @@ -1167,7 +1167,7 @@ SELECT word FROM words ORDER BY word <-> 'caterpiler' LIMIT 10; In many cases, tasks that could be performed by rules - on INSERT/UPDATE/DELETE are better done + on INSERT/UPDATE/DELETE are better done with triggers. Triggers are notationally a bit more complicated, but their semantics are much simpler to understand. Rules tend to have surprising results when the original query contains volatile functions: volatile @@ -1177,9 +1177,9 @@ SELECT word FROM words ORDER BY word <-> 'caterpiler' LIMIT 10; Also, there are some cases that are not supported by these types of rules at - all, notably including WITH clauses in the original query and - multiple-assignment sub-SELECTs in the SET list - of UPDATE queries. This is because copying these constructs + all, notably including WITH clauses in the original query and + multiple-assignment sub-SELECTs in the SET list + of UPDATE queries. This is because copying these constructs into a rule query would result in multiple evaluations of the sub-query, contrary to the express intent of the query's author. @@ -1198,8 +1198,8 @@ CREATE [ OR REPLACE ] RULE name AS in mind. - In the following, update rules means rules that are defined - on INSERT, UPDATE, or DELETE. + In the following, update rules means rules that are defined + on INSERT, UPDATE, or DELETE. @@ -1208,16 +1208,16 @@ CREATE [ OR REPLACE ] RULE name AS object and event given in the CREATE RULE command. For update rules, the rule system creates a list of query trees. Initially the query-tree list is empty. - There can be zero (NOTHING key word), one, or multiple actions. + There can be zero (NOTHING key word), one, or multiple actions. To simplify, we will look at a rule with one action. This rule - can have a qualification or not and it can be INSTEAD or - ALSO (the default). + can have a qualification or not and it can be INSTEAD or + ALSO (the default). What is a rule qualification? It is a restriction that tells when the actions of the rule should be done and when not. This - qualification can only reference the pseudorelations NEW and/or OLD, + qualification can only reference the pseudorelations NEW and/or OLD, which basically represent the relation that was given as object (but with a special meaning). @@ -1228,8 +1228,8 @@ CREATE [ OR REPLACE ] RULE name AS - No qualification, with either ALSO or - INSTEAD + No qualification, with either ALSO or + INSTEAD the query tree from the rule action with the original query @@ -1239,7 +1239,7 @@ CREATE [ OR REPLACE ] RULE name AS - Qualification given and ALSO + Qualification given and ALSO the query tree from the rule action with the rule @@ -1250,7 +1250,7 @@ CREATE [ OR REPLACE ] RULE name AS - Qualification given and INSTEAD + Qualification given and INSTEAD the query tree from the rule action with the rule @@ -1262,17 +1262,17 @@ CREATE [ OR REPLACE ] RULE name AS - Finally, if the rule is ALSO, the unchanged original query tree is - added to the list. Since only qualified INSTEAD rules already add the + Finally, if the rule is ALSO, the unchanged original query tree is + added to the list. Since only qualified INSTEAD rules already add the original query tree, we end up with either one or two output query trees for a rule with one action. - For ON INSERT rules, the original query (if not suppressed by INSTEAD) + For ON INSERT rules, the original query (if not suppressed by INSTEAD) is done before any actions added by rules. This allows the actions to - see the inserted row(s). But for ON UPDATE and ON - DELETE rules, the original query is done after the actions added by rules. + see the inserted row(s). But for ON UPDATE and ON + DELETE rules, the original query is done after the actions added by rules. This ensures that the actions can see the to-be-updated or to-be-deleted rows; otherwise, the actions might do nothing because they find no rows matching their qualifications. @@ -1293,12 +1293,12 @@ CREATE [ OR REPLACE ] RULE name AS The query trees found in the actions of the pg_rewrite system catalog are only templates. Since they can reference the range-table entries for - NEW and OLD, some substitutions have to be made before they can be - used. For any reference to NEW, the target list of the original + NEW and OLD, some substitutions have to be made before they can be + used. For any reference to NEW, the target list of the original query is searched for a corresponding entry. If found, that - entry's expression replaces the reference. Otherwise, NEW means the - same as OLD (for an UPDATE) or is replaced by - a null value (for an INSERT). Any reference to OLD is + entry's expression replaces the reference. Otherwise, NEW means the + same as OLD (for an UPDATE) or is replaced by + a null value (for an INSERT). Any reference to OLD is replaced by a reference to the range-table entry that is the result relation. @@ -1313,7 +1313,7 @@ CREATE [ OR REPLACE ] RULE name AS A First Rule Step by Step - Say we want to trace changes to the sl_avail column in the + Say we want to trace changes to the sl_avail column in the shoelace_data relation. So we set up a log table and a rule that conditionally writes a log entry when an UPDATE is performed on @@ -1367,7 +1367,7 @@ UPDATE shoelace_data SET sl_avail = 6 WHERE shoelace_data.sl_name = 'sl7'; - There is a rule log_shoelace that is ON UPDATE with the rule + There is a rule log_shoelace that is ON UPDATE with the rule qualification expression: @@ -1384,15 +1384,15 @@ INSERT INTO shoelace_log VALUES ( (This looks a little strange since you cannot normally write - INSERT ... VALUES ... FROM. The FROM + INSERT ... VALUES ... FROM. The FROM clause here is just to indicate that there are range-table entries - in the query tree for new and old. + in the query tree for new and old. These are needed so that they can be referenced by variables in the INSERT command's query tree.) - The rule is a qualified ALSO rule, so the rule system + The rule is a qualified ALSO rule, so the rule system has to return two query trees: the modified rule action and the original query tree. In step 1, the range table of the original query is incorporated into the rule's action query tree. This results in: @@ -1406,7 +1406,7 @@ INSERT INTO shoelace_log VALUES ( In step 2, the rule qualification is added to it, so the result set - is restricted to rows where sl_avail changes: + is restricted to rows where sl_avail changes: INSERT INTO shoelace_log VALUES ( @@ -1417,10 +1417,10 @@ INSERT INTO shoelace_log VALUES ( WHERE new.sl_avail <> old.sl_avail; - (This looks even stranger, since INSERT ... VALUES doesn't have - a WHERE clause either, but the planner and executor will have no + (This looks even stranger, since INSERT ... VALUES doesn't have + a WHERE clause either, but the planner and executor will have no difficulty with it. They need to support this same functionality - anyway for INSERT ... SELECT.) + anyway for INSERT ... SELECT.) @@ -1440,7 +1440,7 @@ INSERT INTO shoelace_log VALUES ( - Step 4 replaces references to NEW by the target list entries from the + Step 4 replaces references to NEW by the target list entries from the original query tree or by the matching variable references from the result relation: @@ -1457,7 +1457,7 @@ INSERT INTO shoelace_log VALUES ( - Step 5 changes OLD references into result relation references: + Step 5 changes OLD references into result relation references: INSERT INTO shoelace_log VALUES ( @@ -1471,7 +1471,7 @@ INSERT INTO shoelace_log VALUES ( - That's it. Since the rule is ALSO, we also output the + That's it. Since the rule is ALSO, we also output the original query tree. In short, the output from the rule system is a list of two query trees that correspond to these statements: @@ -1502,8 +1502,8 @@ UPDATE shoelace_data SET sl_color = 'green' no log entry would get written. In that case, the original query tree does not contain a target list entry for - sl_avail, so NEW.sl_avail will get - replaced by shoelace_data.sl_avail. Thus, the extra + sl_avail, so NEW.sl_avail will get + replaced by shoelace_data.sl_avail. Thus, the extra command generated by the rule is: @@ -1527,8 +1527,8 @@ UPDATE shoelace_data SET sl_avail = 0 WHERE sl_color = 'black'; - four rows in fact get updated (sl1, sl2, sl3, and sl4). - But sl3 already has sl_avail = 0. In this case, the original + four rows in fact get updated (sl1, sl2, sl3, and sl4). + But sl3 already has sl_avail = 0. In this case, the original query trees qualification is different and that results in the extra query tree: @@ -1559,7 +1559,7 @@ SELECT shoelace_data.sl_name, 0, Cooperation with Views -viewupdating +viewupdating A simple way to protect view relations from the mentioned @@ -1579,7 +1579,7 @@ CREATE RULE shoe_del_protect AS ON DELETE TO shoe If someone now tries to do any of these operations on the view relation shoe, the rule system will apply these rules. Since the rules have - no actions and are INSTEAD, the resulting list of + no actions and are INSTEAD, the resulting list of query trees will be empty and the whole query will become nothing because there is nothing left to be optimized or executed after the rule system is done with it. @@ -1621,8 +1621,8 @@ CREATE RULE shoelace_del AS ON DELETE TO shoelace - If you want to support RETURNING queries on the view, - you need to make the rules include RETURNING clauses that + If you want to support RETURNING queries on the view, + you need to make the rules include RETURNING clauses that compute the view rows. This is usually pretty trivial for views on a single table, but it's a bit tedious for join views such as shoelace. An example for the insert case is: @@ -1643,9 +1643,9 @@ CREATE RULE shoelace_ins AS ON INSERT TO shoelace FROM unit u WHERE shoelace_data.sl_unit = u.un_name); - Note that this one rule supports both INSERT and - INSERT RETURNING queries on the view — the - RETURNING clause is simply ignored for INSERT. + Note that this one rule supports both INSERT and + INSERT RETURNING queries on the view — the + RETURNING clause is simply ignored for INSERT. @@ -1785,7 +1785,7 @@ UPDATE shoelace_data AND shoelace_data.sl_name = shoelace.sl_name; - Again it's an INSTEAD rule and the previous query tree is trashed. + Again it's an INSTEAD rule and the previous query tree is trashed. Note that this query still uses the view shoelace. But the rule system isn't finished with this step, so it continues and applies the _RETURN rule on it, and we get: @@ -2041,16 +2041,16 @@ GRANT SELECT ON phone_number TO assistant; Nobody except that user (and the database superusers) can access the - phone_data table. But because of the GRANT, + phone_data table. But because of the GRANT, the assistant can run a SELECT on the - phone_number view. The rule system will rewrite the - SELECT from phone_number into a - SELECT from phone_data. + phone_number view. The rule system will rewrite the + SELECT from phone_number into a + SELECT from phone_data. Since the user is the owner of - phone_number and therefore the owner of the rule, the - read access to phone_data is now checked against the user's + phone_number and therefore the owner of the rule, the + read access to phone_data is now checked against the user's privileges and the query is permitted. The check for accessing - phone_number is also performed, but this is done + phone_number is also performed, but this is done against the invoking user, so nobody but the user and the assistant can use it. @@ -2059,19 +2059,19 @@ GRANT SELECT ON phone_number TO assistant; The privileges are checked rule by rule. So the assistant is for now the only one who can see the public phone numbers. But the assistant can set up another view and grant access to that to the public. Then, anyone - can see the phone_number data through the assistant's view. + can see the phone_number data through the assistant's view. What the assistant cannot do is to create a view that directly - accesses phone_data. (Actually the assistant can, but it will not work since + accesses phone_data. (Actually the assistant can, but it will not work since every access will be denied during the permission checks.) And as soon as the user notices that the assistant opened - their phone_number view, the user can revoke the assistant's access. Immediately, any + their phone_number view, the user can revoke the assistant's access. Immediately, any access to the assistant's view would fail. One might think that this rule-by-rule checking is a security hole, but in fact it isn't. But if it did not work this way, the assistant - could set up a table with the same columns as phone_number and + could set up a table with the same columns as phone_number and copy the data to there once per day. Then it's the assistant's own data and the assistant can grant access to everyone they want. A GRANT command means, I trust you. @@ -2090,9 +2090,9 @@ CREATE VIEW phone_number AS SELECT person, phone FROM phone_data WHERE phone NOT LIKE '412%'; This view might seem secure, since the rule system will rewrite any - SELECT from phone_number into a - SELECT from phone_data and add the - qualification that only entries where phone does not begin + SELECT from phone_number into a + SELECT from phone_data and add the + qualification that only entries where phone does not begin with 412 are wanted. But if the user can create their own functions, it is not difficult to convince the planner to execute the user-defined function prior to the NOT LIKE expression. @@ -2107,7 +2107,7 @@ $$ LANGUAGE plpgsql COST 0.0000000000000000000001; SELECT * FROM phone_number WHERE tricky(person, phone); - Every person and phone number in the phone_data table will be + Every person and phone number in the phone_data table will be printed as a NOTICE, because the planner will choose to execute the inexpensive tricky function before the more expensive NOT LIKE. Even if the user is @@ -2119,17 +2119,17 @@ SELECT * FROM phone_number WHERE tricky(person, phone); Similar considerations apply to update rules. In the examples of the previous section, the owner of the tables in the example - database could grant the privileges SELECT, - INSERT, UPDATE, and DELETE on - the shoelace view to someone else, but only - SELECT on shoelace_log. The rule action to + database could grant the privileges SELECT, + INSERT, UPDATE, and DELETE on + the shoelace view to someone else, but only + SELECT on shoelace_log. The rule action to write log entries will still be executed successfully, and that other user could see the log entries. But they could not create fake entries, nor could they manipulate or remove existing ones. In this case, there is no possibility of subverting the rules by convincing the planner to alter the order of operations, because the only rule - which references shoelace_log is an unqualified - INSERT. This might not be true in more complex scenarios. + which references shoelace_log is an unqualified + INSERT. This might not be true in more complex scenarios. @@ -2189,7 +2189,7 @@ CREATE VIEW phone_number WITH (security_barrier) AS The PostgreSQL server returns a command - status string, such as INSERT 149592 1, for each + status string, such as INSERT 149592 1, for each command it receives. This is simple enough when there are no rules involved, but what happens when the query is rewritten by rules? @@ -2200,10 +2200,10 @@ CREATE VIEW phone_number WITH (security_barrier) AS - If there is no unconditional INSTEAD rule for the query, then + If there is no unconditional INSTEAD rule for the query, then the originally given query will be executed, and its command status will be returned as usual. (But note that if there were - any conditional INSTEAD rules, the negation of their qualifications + any conditional INSTEAD rules, the negation of their qualifications will have been added to the original query. This might reduce the number of rows it processes, and if so the reported status will be affected.) @@ -2212,10 +2212,10 @@ CREATE VIEW phone_number WITH (security_barrier) AS - If there is any unconditional INSTEAD rule for the query, then + If there is any unconditional INSTEAD rule for the query, then the original query will not be executed at all. In this case, the server will return the command status for the last query - that was inserted by an INSTEAD rule (conditional or + that was inserted by an INSTEAD rule (conditional or unconditional) and is of the same command type (INSERT, UPDATE, or DELETE) as the original query. If no query @@ -2228,7 +2228,7 @@ CREATE VIEW phone_number WITH (security_barrier) AS - The programmer can ensure that any desired INSTEAD rule is the one + The programmer can ensure that any desired INSTEAD rule is the one that sets the command status in the second case, by giving it the alphabetically last rule name among the active rules, so that it gets applied last. @@ -2253,7 +2253,7 @@ CREATE VIEW phone_number WITH (security_barrier) AS implemented using the PostgreSQL rule system. One of the things that cannot be implemented by rules are some kinds of constraints, especially foreign keys. It is possible - to place a qualified rule that rewrites a command to NOTHING + to place a qualified rule that rewrites a command to NOTHING if the value of a column does not appear in another table. But then the data is silently thrown away and that's not a good idea. If checks for valid values are required, @@ -2264,7 +2264,7 @@ CREATE VIEW phone_number WITH (security_barrier) AS In this chapter, we focused on using rules to update views. All of the update rule examples in this chapter can also be implemented - using INSTEAD OF triggers on the views. Writing such + using INSTEAD OF triggers on the views. Writing such triggers is often easier than writing rules, particularly if complex logic is required to perform the update. @@ -2298,8 +2298,8 @@ CREATE TABLE software ( Both tables have many thousands of rows and the indexes on - hostname are unique. The rule or trigger should - implement a constraint that deletes rows from software + hostname are unique. The rule or trigger should + implement a constraint that deletes rows from software that reference a deleted computer. The trigger would use this command: @@ -2307,8 +2307,8 @@ DELETE FROM software WHERE hostname = $1; Since the trigger is called for each individual row deleted from - computer, it can prepare and save the plan for this - command and pass the hostname value in the + computer, it can prepare and save the plan for this + command and pass the hostname value in the parameter. The rule would be written as: @@ -2324,7 +2324,7 @@ CREATE RULE computer_del AS ON DELETE TO computer DELETE FROM computer WHERE hostname = 'mypc.local.net'; - the table computer is scanned by index (fast), and the + the table computer is scanned by index (fast), and the command issued by the trigger would also use an index scan (also fast). The extra command from the rule would be: @@ -2348,8 +2348,8 @@ Nestloop With the next delete we want to get rid of all the 2000 computers - where the hostname starts with - old. There are two possible commands to do that. One + where the hostname starts with + old. There are two possible commands to do that. One is: @@ -2389,17 +2389,17 @@ Nestloop This shows, that the planner does not realize that the - qualification for hostname in - computer could also be used for an index scan on - software when there are multiple qualification - expressions combined with AND, which is what it does + qualification for hostname in + computer could also be used for an index scan on + software when there are multiple qualification + expressions combined with AND, which is what it does in the regular-expression version of the command. The trigger will get invoked once for each of the 2000 old computers that have to be deleted, and that will result in one index scan over - computer and 2000 index scans over - software. The rule implementation will do it with two + computer and 2000 index scans over + software. The rule implementation will do it with two commands that use indexes. And it depends on the overall size of - the table software whether the rule will still be faster in the + the table software whether the rule will still be faster in the sequential scan situation. 2000 command executions from the trigger over the SPI manager take some time, even if all the index blocks will soon be in the cache. @@ -2412,7 +2412,7 @@ DELETE FROM computer WHERE manufacturer = 'bim'; Again this could result in many rows to be deleted from - computer. So the trigger will again run many commands + computer. So the trigger will again run many commands through the executor. The command generated by the rule will be: @@ -2421,7 +2421,7 @@ DELETE FROM software WHERE computer.manufacturer = 'bim' The plan for that command will again be the nested loop over two - index scans, only using a different index on computer: + index scans, only using a different index on computer: Nestloop @@ -2434,8 +2434,8 @@ Nestloop in a command. - + The summary is, rules will only be significantly slower than diff --git a/doc/src/sgml/runtime.sgml b/doc/src/sgml/runtime.sgml index 6d57525515..8d9d40664b 100644 --- a/doc/src/sgml/runtime.sgml +++ b/doc/src/sgml/runtime.sgml @@ -60,7 +60,7 @@ during initialization is called template1. As the name suggests, this will be used as a template for subsequently created databases; it should not be - used for actual work. (See for + used for actual work. (See for information about creating new databases within a cluster.) @@ -73,12 +73,12 @@ /usr/local/pgsql/data or /var/lib/pgsql/data are popular. To initialize a database cluster, use the command ,initdb which is + linkend="app-initdb"/>,initdb which is installed with PostgreSQL. The desired file system location of your database cluster is indicated by the option, for example: -$ initdb -D /usr/local/pgsql/data +$ initdb -D /usr/local/pgsql/data Note that you must execute this command while logged into the PostgreSQL user account, which is @@ -95,14 +95,14 @@ Alternatively, you can run initdb via - the - programpg_ctl like so: + the + programpg_ctl like so: -$ pg_ctl -D /usr/local/pgsql/data initdb +$ pg_ctl -D /usr/local/pgsql/data initdb This may be more intuitive if you are using pg_ctl for starting and stopping the - server (see ), so + server (see ), so that pg_ctl would be the sole command you use for managing the database server instance. @@ -137,7 +137,22 @@ postgres$ initdb -D /usr/local/pgsql/data database, it is essential that it be secured from unauthorized access. initdb therefore revokes access permissions from everyone but the - PostgreSQL user. + PostgreSQL user, and optionally, group. + Group access, when enabled, is read-only. This allows an unprivileged + user in the same group as the cluster owner to take a backup of the + cluster data or perform other operations that only require read access. + + + + Note that enabling or disabling group access on an existing cluster requires + the cluster to be shut down and the appropriate mode to be set on all + directories and files before restarting + PostgreSQL. Otherwise, a mix of modes might + exist in the data directory. For clusters that allow access only by the + owner, the appropriate modes are 0700 for directories + and 0600 for files. For clusters that also allow + reads by the group, the appropriate modes are 0750 + for directories and 0640 for files. @@ -148,43 +163,43 @@ postgres$ initdb -D /usr/local/pgsql/data initdb's , or options to assign a password to the database superuser. - password - of the superuser + password + of the superuser - Also, specify initdb also initializes the default - localelocale for the database cluster. + localelocale for the database cluster. Normally, it will just take the locale settings in the environment and apply them to the initialized database. It is possible to specify a different locale for the database; more information about - that can be found in . The default sort order used + that can be found in . The default sort order used within the particular database cluster is set by initdb, and while you can create new databases using different sort order, the order used in the template databases that initdb creates cannot be changed without dropping and recreating them. There is also a performance impact for using locales - other than C or POSIX. Therefore, it is + other than C or POSIX. Therefore, it is important to make this choice correctly the first time. initdb also sets the default character set encoding for the database cluster. Normally this should be chosen to match the - locale setting. For details see . + locale setting. For details see . - Non-C and non-POSIX locales rely on the + Non-C and non-POSIX locales rely on the operating system's collation library for character set ordering. This controls the ordering of keys stored in indexes. For this reason, a cluster cannot switch to an incompatible collation library version, @@ -201,14 +216,14 @@ postgres$ initdb -D /usr/local/pgsql/data Many installations create their database clusters on file systems - (volumes) other than the machine's root volume. If you + (volumes) other than the machine's root volume. If you choose to do this, it is not advisable to try to use the secondary volume's topmost directory (mount point) as the data directory. Best practice is to create a directory within the mount-point directory that is owned by the PostgreSQL user, and then create the data directory within that. This avoids permissions problems, particularly for operations such - as pg_upgrade, and it also ensures clean failures if + as pg_upgrade, and it also ensures clean failures if the secondary volume is taken offline. @@ -220,30 +235,30 @@ postgres$ initdb -D /usr/local/pgsql/data Network File Systems - NFSNetwork File Systems - Network Attached Storage (NAS)Network File Systems + NFSNetwork File Systems + Network Attached Storage (NAS)Network File Systems Many installations create their database clusters on network file - systems. Sometimes this is done via NFS, or by using a - Network Attached Storage (NAS) device that uses - NFS internally. PostgreSQL does nothing - special for NFS file systems, meaning it assumes - NFS behaves exactly like locally-connected drives. - If the client or server NFS implementation does not + systems. Sometimes this is done via NFS, or by using a + Network Attached Storage (NAS) device that uses + NFS internally. PostgreSQL does nothing + special for NFS file systems, meaning it assumes + NFS behaves exactly like locally-connected drives. + If the client or server NFS implementation does not provide standard file system semantics, this can cause reliability problems (see ). - Specifically, delayed (asynchronous) writes to the NFS + url="https://www.time-travellers.org/shane/papers/NFS_considered_harmful.html">). + Specifically, delayed (asynchronous) writes to the NFS server can cause data corruption problems. If possible, mount the - NFS file system synchronously (without caching) to avoid - this hazard. Also, soft-mounting the NFS file system is + NFS file system synchronously (without caching) to avoid + this hazard. Also, soft-mounting the NFS file system is not recommended. - Storage Area Networks (SAN) typically use communication - protocols other than NFS, and may or may not be subject + Storage Area Networks (SAN) typically use communication + protocols other than NFS, and may or may not be subject to hazards of this sort. It's advisable to consult the vendor's documentation concerning data consistency guarantees. PostgreSQL cannot be more reliable than @@ -260,7 +275,7 @@ postgres$ initdb -D /usr/local/pgsql/data Before anyone can access the database, you must start the database server. The database server program is called - postgres.postgres + postgres.postgres The postgres program must know where to find the data it is supposed to use. This is done with the option. Thus, the simplest way to start the @@ -281,24 +296,24 @@ $ postgres -D /usr/local/pgsql/data $ postgres -D /usr/local/pgsql/data >logfile 2>&1 & - It is important to store the server's stdout and - stderr output somewhere, as shown above. It will help + It is important to store the server's stdout and + stderr output somewhere, as shown above. It will help for auditing purposes and to diagnose problems. (See for a more thorough discussion of log + linkend="logfile-maintenance"/> for a more thorough discussion of log file handling.) The postgres program also takes a number of other command-line options. For more information, see the - reference page - and below. + reference page + and below. This shell syntax can get tedious quickly. Therefore the wrapper program - pg_ctl + pg_ctl is provided to simplify some tasks. For example: pg_ctl start -l logfile @@ -312,13 +327,13 @@ pg_ctl start -l logfile Normally, you will want to start the database server when the computer boots. - booting - starting the server during + booting + starting the server during Autostart scripts are operating-system-specific. There are a few distributed with PostgreSQL in the - contrib/start-scripts directory. Installing one will require + contrib/start-scripts directory. Installing one will require root privileges. @@ -327,7 +342,7 @@ pg_ctl start -l logfile at boot time. Many systems have a file /etc/rc.local or /etc/rc.d/rc.local. Others use init.d or - rc.d directories. Whatever you do, the server must be + rc.d directories. Whatever you do, the server must be run by the PostgreSQL user account and not by root or any other user. Therefore you probably should form your commands using @@ -348,7 +363,7 @@ su postgres -c 'pg_ctl start -D /usr/local/pgsql/data -l serverlog' For FreeBSD, look at the file contrib/start-scripts/freebsd in the PostgreSQL source distribution. - FreeBSDstart script + FreeBSDstart script @@ -356,7 +371,7 @@ su postgres -c 'pg_ctl start -D /usr/local/pgsql/data -l serverlog' On OpenBSD, add the following lines to the file /etc/rc.local: - OpenBSDstart script + OpenBSDstart script if [ -x /usr/local/pgsql/bin/pg_ctl -a -x /usr/local/pgsql/bin/postgres ]; then su -l postgres -c '/usr/local/pgsql/bin/pg_ctl start -s -l /var/postgresql/log -D /usr/local/pgsql/data' @@ -369,7 +384,7 @@ fi On Linux systems either add - Linuxstart script + Linuxstart script /usr/local/pgsql/bin/pg_ctl start -l logfile -D /usr/local/pgsql/data @@ -421,7 +436,7 @@ WantedBy=multi-user.target FreeBSD or Linux start scripts, depending on preference. - NetBSDstart script + NetBSDstart script @@ -430,12 +445,12 @@ WantedBy=multi-user.target On Solaris, create a file called /etc/init.d/postgresql that contains the following line: - Solarisstart script + Solarisstart script su - postgres -c "/usr/local/pgsql/bin/pg_ctl start -l logfile -D /usr/local/pgsql/data" - Then, create a symbolic link to it in /etc/rc3.d as - S99postgresql. + Then, create a symbolic link to it in /etc/rc3.d as + S99postgresql. @@ -494,7 +509,7 @@ DETAIL: Failed system call was shmget(key=5440001, size=4011376640, 03600). mean that you do not have System-V-style shared memory support configured into your kernel at all. As a temporary workaround, you can try starting the server with a smaller-than-normal number of - buffers (). You will eventually want + buffers (). You will eventually want to reconfigure your kernel to increase the allowed shared memory size. You might also see this message when trying to start multiple servers on the same machine, if their total space requested @@ -509,24 +524,24 @@ DETAIL: Failed system call was semget(5440126, 17, 03600). does not mean you've run out of disk space. It means your kernel's limit on the number of System V semaphores is smaller than the number + class="osname">System V semaphores is smaller than the number PostgreSQL wants to create. As above, you might be able to work around the problem by starting the server with a reduced number of allowed connections - (), but you'll eventually want to + (), but you'll eventually want to increase the kernel limit. - If you get an illegal system call error, it is likely that + If you get an illegal system call error, it is likely that shared memory or semaphores are not supported in your kernel at all. In that case your only option is to reconfigure the kernel to enable these features. - Details about configuring System V - IPC facilities are given in . + Details about configuring System V + IPC facilities are given in . @@ -574,7 +589,7 @@ psql: could not connect to server: No such file or directory does not mean that the server got your connection request and rejected it. That case will produce a different message, as shown in .) Other error messages + linkend="client-authentication-problems"/>.) Other error messages such as Connection timed out might indicate more fundamental problems, like lack of network connectivity. @@ -586,10 +601,10 @@ psql: could not connect to server: No such file or directory Managing Kernel Resources - PostgreSQL can sometimes exhaust various operating system + PostgreSQL can sometimes exhaust various operating system resource limits, especially when multiple copies of the server are running on the same system, or in very large installations. This section explains - the kernel resources used by PostgreSQL and the steps you + the kernel resources used by PostgreSQL and the steps you can take to resolve problems related to kernel resource consumption. @@ -605,27 +620,27 @@ psql: could not connect to server: No such file or directory - PostgreSQL requires the operating system to provide - inter-process communication (IPC) features, specifically + PostgreSQL requires the operating system to provide + inter-process communication (IPC) features, specifically shared memory and semaphores. Unix-derived systems typically provide - System V IPC, - POSIX IPC, or both. - Windows has its own implementation of + System V IPC, + POSIX IPC, or both. + Windows has its own implementation of these features and is not discussed here. The complete lack of these facilities is usually manifested by an - Illegal system call error upon server + Illegal system call error upon server start. In that case there is no alternative but to reconfigure your - kernel. PostgreSQL won't work without them. + kernel. PostgreSQL won't work without them. This situation is rare, however, among modern operating systems. - Upon starting the server, PostgreSQL normally allocates + Upon starting the server, PostgreSQL normally allocates a very small amount of System V shared memory, as well as a much larger - amount of POSIX (mmap) shared memory. + amount of POSIX (mmap) shared memory. In addition a significant number of semaphores, which can be either System V or POSIX style, are created at server startup. Currently, POSIX semaphores are used on Linux and FreeBSD systems while other @@ -634,7 +649,7 @@ psql: could not connect to server: No such file or directory - Prior to PostgreSQL 9.3, only System V shared memory + Prior to PostgreSQL 9.3, only System V shared memory was used, so the amount of System V shared memory required to start the server was much larger. If you are running an older version of the server, please consult the documentation for your server version. @@ -642,91 +657,91 @@ psql: could not connect to server: No such file or directory - System V IPC features are typically constrained by + System V IPC features are typically constrained by system-wide allocation limits. - When PostgreSQL exceeds one of these limits, + When PostgreSQL exceeds one of these limits, the server will refuse to start and should leave an instructive error message describing the problem and what to do about it. (See also .) The relevant kernel + linkend="server-start-failures"/>.) The relevant kernel parameters are named consistently across different systems; gives an overview. The methods to set + linkend="sysvipc-parameters"/> gives an overview. The methods to set them, however, vary. Suggestions for some platforms are given below. - <systemitem class="osname">System V</> <acronym>IPC</> Parameters + <systemitem class="osname">System V</systemitem> <acronym>IPC</acronym> Parameters - Name - Description - Values needed to run one PostgreSQL instance + Name + Description + Values needed to run one PostgreSQL instance - SHMMAX - Maximum size of shared memory segment (bytes) + SHMMAX + Maximum size of shared memory segment (bytes) at least 1kB, but the default is usually much higher - SHMMIN - Minimum size of shared memory segment (bytes) - 1 + SHMMIN + Minimum size of shared memory segment (bytes) + 1 - SHMALL - Total amount of shared memory available (bytes or pages) + SHMALL + Total amount of shared memory available (bytes or pages) same as SHMMAX if bytes, or ceil(SHMMAX/PAGE_SIZE) if pages, - plus room for other applications + plus room for other applications - SHMSEG - Maximum number of shared memory segments per process - only 1 segment is needed, but the default is much higher + SHMSEG + Maximum number of shared memory segments per process + only 1 segment is needed, but the default is much higher - SHMMNI - Maximum number of shared memory segments system-wide - like SHMSEG plus room for other applications + SHMMNI + Maximum number of shared memory segments system-wide + like SHMSEG plus room for other applications - SEMMNI - Maximum number of semaphore identifiers (i.e., sets) - at least ceil((max_connections + autovacuum_max_workers + max_worker_processes + 5) / 16) plus room for other applications + SEMMNI + Maximum number of semaphore identifiers (i.e., sets) + at least ceil((max_connections + autovacuum_max_workers + max_worker_processes + 5) / 16) plus room for other applications - SEMMNS - Maximum number of semaphores system-wide - ceil((max_connections + autovacuum_max_workers + max_worker_processes + 5) / 16) * 17 plus room for other applications + SEMMNS + Maximum number of semaphores system-wide + ceil((max_connections + autovacuum_max_workers + max_worker_processes + 5) / 16) * 17 plus room for other applications - SEMMSL - Maximum number of semaphores per set - at least 17 + SEMMSL + Maximum number of semaphores per set + at least 17 - SEMMAP - Number of entries in semaphore map - see text + SEMMAP + Number of entries in semaphore map + see text - SEMVMX - Maximum value of semaphore - at least 1000 (The default is often 32767; do not change unless necessary) + SEMVMX + Maximum value of semaphore + at least 1000 (The default is often 32767; do not change unless necessary) @@ -734,54 +749,54 @@ psql: could not connect to server: No such file or directory
- PostgreSQL requires a few bytes of System V shared memory + PostgreSQL requires a few bytes of System V shared memory (typically 48 bytes, on 64-bit platforms) for each copy of the server. On most modern operating systems, this amount can easily be allocated. However, if you are running many copies of the server, or if other applications are also using System V shared memory, it may be necessary to - increase SHMALL, which is the total amount of System V shared - memory system-wide. Note that SHMALL is measured in pages + increase SHMALL, which is the total amount of System V shared + memory system-wide. Note that SHMALL is measured in pages rather than bytes on many systems. Less likely to cause problems is the minimum size for shared - memory segments (SHMMIN), which should be at most - approximately 32 bytes for PostgreSQL (it is + memory segments (SHMMIN), which should be at most + approximately 32 bytes for PostgreSQL (it is usually just 1). The maximum number of segments system-wide - (SHMMNI) or per-process (SHMSEG) are unlikely + (SHMMNI) or per-process (SHMSEG) are unlikely to cause a problem unless your system has them set to zero. When using System V semaphores, - PostgreSQL uses one semaphore per allowed connection - (), allowed autovacuum worker process - () and allowed background - process (), in sets of 16. + PostgreSQL uses one semaphore per allowed connection + (), allowed autovacuum worker process + () and allowed background + process (), in sets of 16. Each such set will also contain a 17th semaphore which contains a magic number, to detect collision with semaphore sets used by other applications. The maximum number of semaphores in the system - is set by SEMMNS, which consequently must be at least - as high as max_connections plus - autovacuum_max_workers plus max_worker_processes, + is set by SEMMNS, which consequently must be at least + as high as max_connections plus + autovacuum_max_workers plus max_worker_processes, plus one extra for each 16 allowed connections plus workers (see the formula in ). The parameter SEMMNI + linkend="sysvipc-parameters"/>). The parameter SEMMNI determines the limit on the number of semaphore sets that can exist on the system at one time. Hence this parameter must be at - least ceil((max_connections + autovacuum_max_workers + max_worker_processes + 5) / 16). + least ceil((max_connections + autovacuum_max_workers + max_worker_processes + 5) / 16). Lowering the number of allowed connections is a temporary workaround for failures, which are usually confusingly worded No space - left on device, from the function semget. + left on device, from the function semget. In some cases it might also be necessary to increase - SEMMAP to be at least on the order of - SEMMNS. This parameter defines the size of the semaphore + SEMMAP to be at least on the order of + SEMMNS. This parameter defines the size of the semaphore resource map, in which each contiguous block of available semaphores needs an entry. When a semaphore set is freed it is either added to an existing entry that is adjacent to the freed block or it is @@ -792,17 +807,17 @@ psql: could not connect to server: No such file or directory - Various other settings related to semaphore undo, such as - SEMMNU and SEMUME, do not affect - PostgreSQL. + Various other settings related to semaphore undo, such as + SEMMNU and SEMUME, do not affect + PostgreSQL. When using POSIX semaphores, the number of semaphores needed is the same as for System V, that is one semaphore per allowed connection - (), allowed autovacuum worker process - () and allowed background - process (). + (), allowed autovacuum worker process + () and allowed background + process (). On the platforms where this option is preferred, there is no specific kernel limit on the number of POSIX semaphores. @@ -810,8 +825,8 @@ psql: could not connect to server: No such file or directory - AIX - AIXIPC configuration + AIX + AIXIPC configuration @@ -833,8 +848,8 @@ psql: could not connect to server: No such file or directory - FreeBSD - FreeBSDIPC configuration + FreeBSD + FreeBSDIPC configuration @@ -861,8 +876,8 @@ kern.ipc.semmnu=256 After modifying these values a reboot is required for the new settings to take effect. - (Note: FreeBSD does not use SEMMAP. Older versions - would accept but ignore a setting for kern.ipc.semmap; + (Note: FreeBSD does not use SEMMAP. Older versions + would accept but ignore a setting for kern.ipc.semmap; newer versions reject it altogether.) @@ -874,8 +889,8 @@ kern.ipc.semmnu=256 - If running in FreeBSD jails by enabling sysctl's - security.jail.sysvipc_allowed, postmasters + If running in FreeBSD jails by enabling sysctl's + security.jail.sysvipc_allowed, postmasters running in different jails should be run by different operating system users. This improves security because it prevents non-root users from interfering with shared memory or semaphores in different jails, @@ -886,19 +901,19 @@ kern.ipc.semmnu=256 - FreeBSD versions before 4.0 work like - OpenBSD (see below). + FreeBSD versions before 4.0 work like + OpenBSD (see below). - NetBSD - NetBSDIPC configuration + NetBSD + NetBSDIPC configuration - In NetBSD 5.0 and later, + In NetBSD 5.0 and later, IPC parameters can be adjusted using sysctl, for example: @@ -916,24 +931,24 @@ kern.ipc.semmnu=256 - NetBSD versions before 5.0 work like - OpenBSD (see below), except that - parameters should be set with the keyword options not - option. + NetBSD versions before 5.0 work like + OpenBSD (see below), except that + parameters should be set with the keyword options not + option. - OpenBSD - OpenBSDIPC configuration + OpenBSD + OpenBSDIPC configuration - The options SYSVSHM and SYSVSEM need + The options SYSVSHM and SYSVSEM need to be enabled when the kernel is compiled. (They are by default.) The maximum size of shared memory is determined by - the option SHMMAXPGS (in pages). The following + the option SHMMAXPGS (in pages). The following shows an example of how to set the various parameters: option SYSVSHM @@ -944,44 +959,37 @@ option SYSVSEM option SEMMNI=256 option SEMMNS=512 option SEMMNU=256 -option SEMMAP=256 - - You might also want to configure your kernel to lock shared - memory into RAM and prevent it from being paged out to swap. - This can be accomplished using the sysctl - setting kern.ipc.shm_use_phys. - - HP-UX - HP-UXIPC configuration + HP-UX + HP-UXIPC configuration The default settings tend to suffice for normal installations. - On HP-UX 10, the factory default for - SEMMNS is 128, which might be too low for larger + On HP-UX 10, the factory default for + SEMMNS is 128, which might be too low for larger database sites. - IPC parameters can be set in the System - Administration Manager (SAM) under + IPC parameters can be set in the System + Administration Manager (SAM) under Kernel - ConfigurationConfigurable Parameters. Choose - Create A New Kernel when you're done. + ConfigurationConfigurable Parameters. Choose + Create A New Kernel when you're done. - Linux - LinuxIPC configuration + Linux + LinuxIPC configuration @@ -1023,13 +1031,13 @@ option SEMMAP=256 - macOS - macOSIPC configuration + macOS + macOSIPC configuration The recommended method for configuring shared memory in macOS - is to create a file named /etc/sysctl.conf, + is to create a file named /etc/sysctl.conf, containing variable assignments such as: kern.sysv.shmmax=4194304 @@ -1039,32 +1047,32 @@ kern.sysv.shmseg=8 kern.sysv.shmall=1024 Note that in some macOS versions, - all five shared-memory parameters must be set in - /etc/sysctl.conf, else the values will be ignored. + all five shared-memory parameters must be set in + /etc/sysctl.conf, else the values will be ignored. Beware that recent releases of macOS ignore attempts to set - SHMMAX to a value that isn't an exact multiple of 4096. + SHMMAX to a value that isn't an exact multiple of 4096. - SHMALL is measured in 4 kB pages on this platform. + SHMALL is measured in 4 kB pages on this platform. In older macOS versions, you will need to reboot to have changes in the shared memory parameters take effect. As of 10.5 it is possible to - change all but SHMMNI on the fly, using - sysctl. But it's still best to set up your preferred - values via /etc/sysctl.conf, so that the values will be + change all but SHMMNI on the fly, using + sysctl. But it's still best to set up your preferred + values via /etc/sysctl.conf, so that the values will be kept across reboots. - The file /etc/sysctl.conf is only honored in macOS + The file /etc/sysctl.conf is only honored in macOS 10.3.9 and later. If you are running a previous 10.3.x release, - you must edit the file /etc/rc + you must edit the file /etc/rc and change the values in the following commands: sysctl -w kern.sysv.shmmax @@ -1074,27 +1082,27 @@ sysctl -w kern.sysv.shmseg sysctl -w kern.sysv.shmall Note that - /etc/rc is usually overwritten by macOS system updates, + /etc/rc is usually overwritten by macOS system updates, so you should expect to have to redo these edits after each update. In macOS 10.2 and earlier, instead edit these commands in the file - /System/Library/StartupItems/SystemTuning/SystemTuning. + /System/Library/StartupItems/SystemTuning/SystemTuning. - Solaris 2.6 to 2.9 (Solaris + Solaris 2.6 to 2.9 (Solaris 6 to Solaris 9) - SolarisIPC configuration + SolarisIPC configuration The relevant settings can be changed in - /etc/system, for example: + /etc/system, for example: set shmsys:shminfo_shmmax=0x2000000 set shmsys:shminfo_shmmin=1 @@ -1114,30 +1122,30 @@ set semsys:seminfo_semmsl=32 - Solaris 2.10 (Solaris + Solaris 2.10 (Solaris 10) and later - OpenSolaris + OpenSolaris In Solaris 10 and later, and OpenSolaris, the default shared memory and semaphore settings are good enough for most - PostgreSQL applications. Solaris now defaults - to a SHMMAX of one-quarter of system RAM. + PostgreSQL applications. Solaris now defaults + to a SHMMAX of one-quarter of system RAM. To further adjust this setting, use a project setting associated - with the postgres user. For example, run the - following as root: + with the postgres user. For example, run the + following as root: projadd -c "PostgreSQL DB User" -K "project.max-shm-memory=(privileged,8GB,deny)" -U postgres -G postgres user.postgres - This command adds the user.postgres project and - sets the shared memory maximum for the postgres + This command adds the user.postgres project and + sets the shared memory maximum for the postgres user to 8GB, and takes effect the next time that user logs - in, or when you restart PostgreSQL (not reload). - The above assumes that PostgreSQL is run by - the postgres user in the postgres + in, or when you restart PostgreSQL (not reload). + The above assumes that PostgreSQL is run by + the postgres user in the postgres group. No server reboot is required. @@ -1152,11 +1160,11 @@ project.max-msg-ids=(priv,4096,deny) - Additionally, if you are running PostgreSQL + Additionally, if you are running PostgreSQL inside a zone, you may need to raise the zone resource usage limits as well. See "Chapter2: Projects and Tasks" in the - System Administrator's Guide for more - information on projects and prctl. + System Administrator's Guide for more + information on projects and prctl. @@ -1259,7 +1267,7 @@ RemoveIPC=no limit can only be changed by the root user. The system call setrlimit is responsible for setting these parameters. The shell's built-in command ulimit - (Bourne shells) or limit (csh) is + (Bourne shells) or limit (csh) is used to control the resource limits from the command line. On BSD-derived systems the file /etc/login.conf controls the various resource limits set during login. See the @@ -1320,8 +1328,8 @@ default:\ processes to open large numbers of files; if more than a few processes do so then the system-wide limit can easily be exceeded. If you find this happening, and you do not want to alter the - system-wide limit, you can set PostgreSQL's configuration parameter to + system-wide limit, you can set PostgreSQL's configuration parameter to limit the consumption of open files.
@@ -1380,36 +1388,36 @@ Out of Memory: Killed process 12345 (postgres). system running out of memory, you can avoid the problem by changing your configuration. In some cases, it may help to lower memory-related configuration parameters, particularly - shared_buffers - and work_mem. In + shared_buffers + and work_mem. In other cases, the problem may be caused by allowing too many connections to the database server itself. In many cases, it may be better to reduce - max_connections + max_connections and instead make use of external connection-pooling software. On Linux 2.6 and later, it is possible to modify the - kernel's behavior so that it will not overcommit memory. + kernel's behavior so that it will not overcommit memory. Although this setting will not prevent the OOM killer from being invoked + url="https://lwn.net/Articles/104179/">OOM killer from being invoked altogether, it will lower the chances significantly and will therefore lead to more robust system behavior. This is done by selecting strict overcommit mode via sysctl: sysctl -w vm.overcommit_memory=2 - or placing an equivalent entry in /etc/sysctl.conf. + or placing an equivalent entry in /etc/sysctl.conf. You might also wish to modify the related setting - vm.overcommit_ratio. For details see the kernel documentation + vm.overcommit_ratio. For details see the kernel documentation file . Another approach, which can be used with or without altering - vm.overcommit_memory, is to set the process-specific - OOM score adjustment value for the postmaster process to - -1000, thereby guaranteeing it will not be targeted by the OOM + vm.overcommit_memory, is to set the process-specific + OOM score adjustment value for the postmaster process to + -1000, thereby guaranteeing it will not be targeted by the OOM killer. The simplest way to do this is to execute echo -1000 > /proc/self/oom_score_adj @@ -1426,33 +1434,33 @@ export PG_OOM_ADJUST_VALUE=0 These settings will cause postmaster child processes to run with the normal OOM score adjustment of zero, so that the OOM killer can still target them at need. You could use some other value for - PG_OOM_ADJUST_VALUE if you want the child processes to run - with some other OOM score adjustment. (PG_OOM_ADJUST_VALUE + PG_OOM_ADJUST_VALUE if you want the child processes to run + with some other OOM score adjustment. (PG_OOM_ADJUST_VALUE can also be omitted, in which case it defaults to zero.) If you do not - set PG_OOM_ADJUST_FILE, the child processes will run with the + set PG_OOM_ADJUST_FILE, the child processes will run with the same OOM score adjustment as the postmaster, which is unwise since the whole point is to ensure that the postmaster has a preferential setting. - Older Linux kernels do not offer /proc/self/oom_score_adj, + Older Linux kernels do not offer /proc/self/oom_score_adj, but may have a previous version of the same functionality called - /proc/self/oom_adj. This works the same except the disable - value is -17 not -1000. + /proc/self/oom_adj. This works the same except the disable + value is -17 not -1000. Some vendors' Linux 2.4 kernels are reported to have early versions of the 2.6 overcommit sysctl parameter. However, setting - vm.overcommit_memory to 2 + vm.overcommit_memory to 2 on a 2.4 kernel that does not have the relevant code will make things worse, not better. It is recommended that you inspect the actual kernel source code (see the function - vm_enough_memory in the file mm/mmap.c) + vm_enough_memory in the file mm/mmap.c) to verify what is supported in your kernel before you try this in a 2.4 - installation. The presence of the overcommit-accounting - documentation file should not be taken as evidence that the + installation. The presence of the overcommit-accounting + documentation file should not be taken as evidence that the feature is there. If in any doubt, consult a kernel expert or your kernel vendor. @@ -1465,21 +1473,21 @@ export PG_OOM_ADJUST_VALUE=0 Using huge pages reduces overhead when using large contiguous chunks of memory, as PostgreSQL does, particularly when - using large values of . To use this + using large values of . To use this feature in PostgreSQL you need a kernel with CONFIG_HUGETLBFS=y and CONFIG_HUGETLB_PAGE=y. You will also have to adjust the kernel setting vm.nr_hugepages. To estimate the number of huge pages needed, start PostgreSQL without huge pages enabled and check the - postmaster's VmPeak value, as well as the system's - huge page size, using the /proc file system. This might + postmaster's anonymous shared memory segment size, as well as the system's + huge page size, using the /proc file system. This might look like: $ head -1 $PGDATA/postmaster.pid 4170 -$ grep ^VmPeak /proc/4170/status -VmPeak: 6490428 kB +$ pmap 4170 | awk '/rw-s/ && /zero/ {print $2}' +6490428K $ grep ^Hugepagesize /proc/meminfo Hugepagesize: 2048 kB @@ -1509,17 +1517,17 @@ $ grep Huge /proc/meminfo It may also be necessary to give the database server's operating system user permission to use huge pages by setting - vm.hugetlb_shm_group via sysctl, and/or - give permission to lock memory with ulimit -l. + vm.hugetlb_shm_group via sysctl, and/or + give permission to lock memory with ulimit -l. The default behavior for huge pages in PostgreSQL is to use them when possible and to fall back to normal pages when failing. To enforce the use of huge - pages, you can set - to on in postgresql.conf. - Note that with this setting PostgreSQL will fail to + pages, you can set + to on in postgresql.conf. + Note that with this setting PostgreSQL will fail to start if not enough huge pages are available. @@ -1537,7 +1545,7 @@ $ grep Huge /proc/meminfo Shutting Down the Server - shutdown + shutdown @@ -1547,7 +1555,7 @@ $ grep Huge /proc/meminfo - SIGTERMSIGTERM + SIGTERMSIGTERM This is the Smart Shutdown mode. @@ -1566,7 +1574,7 @@ $ grep Huge /proc/meminfo - SIGINTSIGINT + SIGINTSIGINT This is the Fast Shutdown mode. @@ -1581,7 +1589,7 @@ $ grep Huge /proc/meminfo - SIGQUITSIGQUIT + SIGQUITSIGQUIT This is the Immediate Shutdown mode. @@ -1600,11 +1608,11 @@ $ grep Huge /proc/meminfo - The program provides a convenient + The program provides a convenient interface for sending these signals to shut down the server. - Alternatively, you can send the signal directly using kill + Alternatively, you can send the signal directly using kill on non-Windows systems. - The PID of the postgres process can be + The PID of the postgres process can be found using the ps program, or from the file postmaster.pid in the data directory. For example, to do a fast shutdown: @@ -1628,15 +1636,15 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid` To terminate an individual session while allowing other sessions to - continue, use pg_terminate_backend() (see ) or send a - SIGTERM signal to the child process associated with + continue, use pg_terminate_backend() (see ) or send a + SIGTERM signal to the child process associated with the session. - Upgrading a <productname>PostgreSQL</> Cluster + Upgrading a <productname>PostgreSQL</productname> Cluster upgrading @@ -1649,7 +1657,7 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid` This section discusses how to upgrade your database data from one - PostgreSQL release to a newer one. + PostgreSQL release to a newer one. @@ -1676,11 +1684,11 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid` - For major releases of PostgreSQL, the + For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades. The traditional method for moving data to a new major version is to dump and reload the database, though this can be slow. A - faster method is . Replication methods are + faster method is . Replication methods are also available, as discussed below. @@ -1688,7 +1696,7 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid`); pay particular attention to the section + linkend="release"/>); pay particular attention to the section labeled "Migration". If you are upgrading across several major versions, be sure to read the release notes for each intervening version. @@ -1698,7 +1706,7 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid`PostgreSQL major upgrade, consider the + testing a PostgreSQL major upgrade, consider the following categories of possible changes: @@ -1728,7 +1736,7 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid`Library API - Typically libraries like libpq only add new + Typically libraries like libpq only add new functionality, again unless mentioned in the release notes. @@ -1757,13 +1765,13 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid` - Upgrading Data via <application>pg_dumpall</> + Upgrading Data via <application>pg_dumpall</application> One upgrade method is to dump data from one major version of - PostgreSQL and reload it in another — to do - this, you must use a logical backup tool like - pg_dumpall; file system + PostgreSQL and reload it in another — to do + this, you must use a logical backup tool like + pg_dumpall; file system level backup methods will not work. (There are checks in place that prevent you from using a data directory with an incompatible version of PostgreSQL, so no great harm can be done by @@ -1771,18 +1779,18 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid` - It is recommended that you use the pg_dump and - pg_dumpall programs from the newer + It is recommended that you use the pg_dump and + pg_dumpall programs from the newer version of - PostgreSQL, to take advantage of enhancements + PostgreSQL, to take advantage of enhancements that might have been made in these programs. Current releases of the dump programs can read data from any server version back to 7.0. These instructions assume that your existing installation is under the - /usr/local/pgsql directory, and that the data area is in - /usr/local/pgsql/data. Substitute your paths + /usr/local/pgsql directory, and that the data area is in + /usr/local/pgsql/data. Substitute your paths appropriately. @@ -1792,9 +1800,9 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid`/usr/local/pgsql/data/pg_hba.conf + permissions in the file /usr/local/pgsql/data/pg_hba.conf (or equivalent) to disallow access from everyone except you. - See for additional information on + See for additional information on access control. @@ -1806,14 +1814,14 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid` -pg_dumpall > outputfile +pg_dumpall > outputfile To make the backup, you can use the pg_dumpall command from the version you are currently running; see for more details. For best + linkend="backup-dump-all"/> for more details. For best results, however, try to use the pg_dumpall command from PostgreSQL &version;, since this version contains bug fixes and improvements over older @@ -1830,16 +1838,16 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid` Shut down the old server: -pg_ctl stop +pg_ctl stop - On systems that have PostgreSQL started at boot time, + On systems that have PostgreSQL started at boot time, there is probably a start-up file that will accomplish the same thing. For - example, on a Red Hat Linux system one + example, on a Red Hat Linux system one might find that this works: /etc/rc.d/init.d/postgresql stop - See for details about starting and + See for details about starting and stopping the server. @@ -1853,7 +1861,7 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid` -mv /usr/local/pgsql /usr/local/pgsql.old +mv /usr/local/pgsql /usr/local/pgsql.old (Be sure to move the directory as a single unit so relative paths remain unchanged.) @@ -1863,8 +1871,7 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid` Install the new version of PostgreSQL as - outlined in - .]]> + outlined in . @@ -1874,15 +1881,15 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid` -/usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data +/usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data - Restore your previous pg_hba.conf and any - postgresql.conf modifications. + Restore your previous pg_hba.conf and any + postgresql.conf modifications. @@ -1891,7 +1898,7 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid` -/usr/local/pgsql/bin/postgres -D /usr/local/pgsql/data +/usr/local/pgsql/bin/postgres -D /usr/local/pgsql/data @@ -1900,9 +1907,9 @@ $ kill -INT `head -1 /usr/local/pgsql/data/postmaster.pid` Finally, restore your data from backup with: -/usr/local/pgsql/bin/psql -d postgres -f outputfile +/usr/local/pgsql/bin/psql -d postgres -f outputfile - using the new psql. + using the new psql. @@ -1921,16 +1928,16 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 - Upgrading Data via <application>pg_upgrade</> + Upgrading Data via <application>pg_upgrade</application> - The module allows an installation to - be migrated in-place from one major PostgreSQL + The module allows an installation to + be migrated in-place from one major PostgreSQL version to another. Upgrades can be performed in minutes, - particularly with @@ -1939,18 +1946,25 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 Upgrading Data via Replication - It is also possible to use certain replication methods, such as - Slony, to create a standby server with the updated version of - PostgreSQL. This is possible because Slony supports + It is also possible to use logical replication methods to create a standby + server with the updated version of PostgreSQL. + This is possible because logical replication supports replication between different major versions of - PostgreSQL. The standby can be on the same computer or + PostgreSQL. The standby can be on the same computer or a different computer. Once it has synced up with the master server - (running the older version of PostgreSQL), you can + (running the older version of PostgreSQL), you can switch masters and make the standby the master and shut down the older database instance. Such a switch-over results in only several seconds of downtime for an upgrade. + + This method of upgrading can be performed using the built-in logical + replication facilities as well as using external logical replication + systems such as pglogical, + Slony, Londiste, and + Bucardo. + @@ -1967,28 +1981,28 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 server is down, it is possible for a local user to spoof the normal server by starting their own server. The spoof server could read passwords and queries sent by clients, but could not return any data - because the PGDATA directory would still be secure because + because the PGDATA directory would still be secure because of directory permissions. Spoofing is possible because any user can start a database server; a client cannot identify an invalid server unless it is specially configured. - One way to prevent spoofing of local + One way to prevent spoofing of local connections is to use a Unix domain socket directory () that has write permission only + linkend="guc-unix-socket-directories"/>) that has write permission only for a trusted local user. This prevents a malicious user from creating their own socket file in that directory. If you are concerned that - some applications might still reference /tmp for the + some applications might still reference /tmp for the socket file and hence be vulnerable to spoofing, during operating system - startup create a symbolic link /tmp/.s.PGSQL.5432 that points + startup create a symbolic link /tmp/.s.PGSQL.5432 that points to the relocated socket file. You also might need to modify your - /tmp cleanup script to prevent removal of the symbolic link. + /tmp cleanup script to prevent removal of the symbolic link. - Another option for local connections is for clients to use - requirepeer + Another option for local connections is for clients to use + requirepeer to specify the required owner of the server process connected to the socket. @@ -1997,12 +2011,12 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 To prevent spoofing on TCP connections, the best solution is to use SSL certificates and make sure that clients check the server's certificate. To do that, the server - must be configured to accept only hostssl connections () and have SSL key and certificate files - (). The TCP client must connect using - sslmode=verify-ca or - verify-full and have the appropriate root certificate - file installed (). + must be configured to accept only hostssl connections () and have SSL key and certificate files + (). The TCP client must connect using + sslmode=verify-ca or + verify-full and have the appropriate root certificate + file installed (). @@ -2024,16 +2038,18 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 - Password Storage Encryption + Password Encryption - By default, database user passwords are stored as MD5 hashes, so - the administrator cannot determine the actual password assigned - to the user. If MD5 encryption is used for client authentication, - the unencrypted password is never even temporarily present on the - server because the client MD5-encrypts it before being sent - across the network. + Database user passwords are stored as hashes (determined by the setting + ), so the administrator cannot + determine the actual password assigned to the user. If SCRAM or MD5 + encryption is used for client authentication, the unencrypted password is + never even temporarily present on the server because the client encrypts + it before being sent across the network. SCRAM is preferred, because it + is an Internet standard and is more secure than the PostgreSQL-specific + MD5 authentication protocol. @@ -2043,7 +2059,7 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 - The module allows certain fields to be + The module allows certain fields to be stored encrypted. This is useful if only some of the data is sensitive. The client supplies the decryption key and the data is decrypted @@ -2087,24 +2103,6 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 - - Encrypting Passwords Across A Network - - - - The MD5 authentication method double-encrypts the - password on the client before sending it to the server. It first - MD5-encrypts it based on the user name, and then encrypts it - based on a random salt sent by the server when the database - connection was made. It is this double-encrypted value that is - sent over the network to the server. Double-encryption not only - prevents the password from being discovered, it also prevents - another connection from using the same encrypted password to - connect to the database server at a later time. - - - - Encrypting Data Across A Network @@ -2112,12 +2110,12 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 SSL connections encrypt all data sent across the network: the password, the queries, and the data returned. The - pg_hba.conf file allows administrators to specify - which hosts can use non-encrypted connections (host) + pg_hba.conf file allows administrators to specify + which hosts can use non-encrypted connections (host) and which require SSL-encrypted connections - (hostssl). Also, clients can specify that they - connect to servers only via SSL. Stunnel or - SSH can also be used to encrypt transmissions. + (hostssl). Also, clients can specify that they + connect to servers only via SSL. Stunnel or + SSH can also be used to encrypt transmissions. @@ -2132,7 +2130,7 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 on each side, but this provides stronger verification of identity than the mere use of passwords. It prevents a computer from pretending to be the server just long enough to read the password - sent by the client. It also helps prevent man in the middle + sent by the client. It also helps prevent man in the middle attacks where a computer between the client and server pretends to be the server and reads and passes all data between the client and server. @@ -2167,63 +2165,37 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 - PostgreSQL has native support for using - SSL connections to encrypt client/server communications + PostgreSQL has native support for using + SSL connections to encrypt client/server communications for increased security. This requires that OpenSSL is installed on both client and - server systems and that support in PostgreSQL is - enabled at build time (see ). + server systems and that support in PostgreSQL is + enabled at build time (see ). - - With SSL support compiled in, the - PostgreSQL server can be started with - SSL enabled by setting the parameter - to on in - postgresql.conf. The server will listen for both normal - and SSL connections on the same TCP port, and will negotiate - with any connecting client on whether to use SSL. By - default, this is at the client's option; see about how to set up the server to require - use of SSL for some or all connections. - - - - PostgreSQL reads the system-wide - OpenSSL configuration file. By default, this - file is named openssl.cnf and is located in the - directory reported by openssl version -d. - This default can be overridden by setting environment variable - OPENSSL_CONF to the name of the desired configuration file. - + + Basic Setup - OpenSSL supports a wide range of ciphers - and authentication algorithms, of varying strength. While a list of - ciphers can be specified in the OpenSSL - configuration file, you can specify ciphers specifically for use by - the database server by modifying in - postgresql.conf. + With SSL support compiled in, the + PostgreSQL server can be started with + SSL enabled by setting the parameter + to on in + postgresql.conf. The server will listen for both normal + and SSL connections on the same TCP port, and will negotiate + with any connecting client on whether to use SSL. By + default, this is at the client's option; see about how to set up the server to require + use of SSL for some or all connections. - - - It is possible to have authentication without encryption overhead by - using NULL-SHA or NULL-MD5 ciphers. However, - a man-in-the-middle could read and pass communications between client - and server. Also, encryption overhead is minimal compared to the - overhead of authentication. For these reasons NULL ciphers are not - recommended. - - - - To start in SSL mode, files containing the server certificate + To start in SSL mode, files containing the server certificate and private key must exist. By default, these files are expected to be - named server.crt and server.key, respectively, in + named server.crt and server.key, respectively, in the server's data directory, but other names and locations can be specified - using the configuration parameters - and . + using the configuration parameters + and . @@ -2237,6 +2209,15 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 member of the group that has access to those certificate and key files. + + If the data directory allows group read access then certificate files may + need to be located outside of the data directory in order to conform to the + security requirements outlined above. Generally, group access is enabled + to allow an unprivileged user to backup the database, and in that case the + backup software will not be able to read the certificate files and will + likely error. + + If the private key is protected with a passphrase, the server will prompt for the passphrase and will not start until it has @@ -2248,70 +2229,101 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 - In some cases, the server certificate might be signed by an - intermediate certificate authority, rather than one that is - directly trusted by clients. To use such a certificate, append the - certificate of the signing authority to the server.crt file, - then its parent authority's certificate, and so on up to a certificate - authority, root or intermediate, that is trusted by - clients, i.e. signed by a certificate in the clients' - root.crt files. + The first certificate in server.crt must be the + server's certificate because it must match the server's private key. + The certificates of intermediate certificate authorities + can also be appended to the file. Doing this avoids the necessity of + storing intermediate certificates on clients, assuming the root and + intermediate certificates were created with v3_ca + extensions. This allows easier expiration of intermediate certificates. + + It is not necessary to add the root certificate to + server.crt. Instead, clients must have the root + certificate of the server's certificate chain. + + + + + OpenSSL Configuration + + + PostgreSQL reads the system-wide + OpenSSL configuration file. By default, this + file is named openssl.cnf and is located in the + directory reported by openssl version -d. + This default can be overridden by setting environment variable + OPENSSL_CONF to the name of the desired configuration file. + + + + OpenSSL supports a wide range of ciphers + and authentication algorithms, of varying strength. While a list of + ciphers can be specified in the OpenSSL + configuration file, you can specify ciphers specifically for use by + the database server by modifying in + postgresql.conf. + + + + + It is possible to have authentication without encryption overhead by + using NULL-SHA or NULL-MD5 ciphers. However, + a man-in-the-middle could read and pass communications between client + and server. Also, encryption overhead is minimal compared to the + overhead of authentication. For these reasons NULL ciphers are not + recommended. + + + + Using Client Certificates - To require the client to supply a trusted certificate, place - certificates of the certificate authorities (CAs) - you trust in the file root.crt in the data - directory, set the parameter in - postgresql.conf to root.crt, - and add the authentication option clientcert=1 to the - appropriate hostssl line(s) in pg_hba.conf. - A certificate will then be requested from the client during - SSL connection startup. (See for a - description of how to set up certificates on the client.) The server will + To require the client to supply a trusted certificate, + place certificates of the root certificate authorities + (CAs) you trust in a file in the data + directory, set the parameter in + postgresql.conf to the new file name, and add the + authentication option clientcert=1 to the appropriate + hostssl line(s) in pg_hba.conf. + A certificate will then be requested from the client during SSL + connection startup. (See for a description + of how to set up certificates on the client.) The server will verify that the client's certificate is signed by one of the trusted certificate authorities. - If intermediate CAs appear in - root.crt, the file must also contain certificate - chains to their root CAs. Certificate Revocation List - (CRL) entries - are also checked if the parameter is set. - + Intermediate certificates that chain up to existing root certificates + can also appear in the file if + you wish to avoid storing them on clients (assuming the root and + intermediate certificates were created with v3_ca + extensions). Certificate Revocation List (CRL) entries are also + checked if the parameter is set. (See + url="http://h41379.www4.hpe.com/doc/83final/ba554_90007/ch04s02.html"> for diagrams showing SSL certificate usage.) The clientcert authentication option is available for - all authentication methods, but only in pg_hba.conf lines - specified as hostssl. When clientcert is + all authentication methods, but only in pg_hba.conf lines + specified as hostssl. When clientcert is not specified or is set to 0, the server will still verify any presented client certificates against its CA file, if one is configured — but it will not insist that a client certificate be presented. - - Note that the server's root.crt lists the top-level - CAs that are considered trusted for signing client certificates. - In principle it need - not list the CA that signed the server's certificate, though in most cases - that CA would also be trusted for client certificates. - - If you are setting up client certificates, you may wish to use - the cert authentication method, so that the certificates + the cert authentication method, so that the certificates control user authentication as well as providing connection security. - See for details. (It is not necessary to + See for details. (It is not necessary to specify clientcert=1 explicitly when using - the cert authentication method.) + the cert authentication method.) @@ -2319,9 +2331,9 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 SSL Server File Usage - summarizes the files that are + summarizes the files that are relevant to the SSL setup on the server. (The shown file names are default - or typical names. The locally configured names could be different.) + names. The locally configured names could be different.) @@ -2338,27 +2350,27 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 - ($PGDATA/server.crt) + ($PGDATA/server.crt) server certificate sent to client to indicate server's identity - ($PGDATA/server.key) + ($PGDATA/server.key) server private key proves server certificate was sent by the owner; does not indicate certificate owner is trustworthy - ($PGDATA/root.crt) + trusted certificate authorities checks that client certificate is signed by a trusted certificate authority - ($PGDATA/root.crl) + certificates revoked by certificate authorities client certificate must not be on this list @@ -2369,7 +2381,7 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 The server reads these files at server start and whenever the server - configuration is reloaded. On Windows + configuration is reloaded. On Windows systems, they are also re-read whenever a new backend process is spawned for a new client connection. @@ -2378,7 +2390,7 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 If an error in these files is detected at server start, the server will refuse to start. But if an error is detected during a configuration reload, the files are ignored and the old SSL configuration continues to - be used. On Windows systems, if an error in + be used. On Windows systems, if an error in these files is detected at backend start, that backend will be unable to establish an SSL connection. In all these cases, the error condition is reported in the server log. @@ -2386,15 +2398,16 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 - Creating a Self-signed Certificate + Creating Certificates - To create a quick self-signed certificate for the server, valid for 365 + To create a simple self-signed certificate for the server, valid for 365 days, use the following OpenSSL command, - replacing yourdomain.com with the server's host name: + replacing dbhost.yourdomain.com with the + server's host name: openssl req -new -x509 -days 365 -nodes -text -out server.crt \ - -keyout server.key -subj "/CN=yourdomain.com" + -keyout server.key -subj "/CN=dbhost.yourdomain.com" Then do: @@ -2403,18 +2416,90 @@ chmod og-rwx server.key because the server will reject the file if its permissions are more liberal than this. For more details on how to create your server private key and - certificate, refer to the OpenSSL documentation. + certificate, refer to the OpenSSL documentation. - A self-signed certificate can be used for testing, but a certificate - signed by a certificate authority (CA) (either one of the - global CAs or a local one) should be used in production - so that clients can verify the server's identity. If all the clients - are local to the organization, using a local CA is - recommended. + While a self-signed certificate can be used for testing, a certificate + signed by a certificate authority (CA) (usually an + enterprise-wide root CA) should be used in production. + + + + To create a server certificate whose identity can be validated + by clients, first create a certificate signing request + (CSR) and a public/private key file: + +openssl req -new -nodes -text -out root.csr \ + -keyout root.key -subj "/CN=root.yourdomain.com" +chmod og-rwx root.key + + Then, sign the request with the key to create a root certificate + authority (using the default OpenSSL + configuration file location on Linux): + +openssl x509 -req -in root.csr -text -days 3650 \ + -extfile /etc/ssl/openssl.cnf -extensions v3_ca \ + -signkey root.key -out root.crt + + Finally, create a server certificate signed by the new root certificate + authority: + +openssl req -new -nodes -text -out server.csr \ + -keyout server.key -subj "/CN=dbhost.yourdomain.com" +chmod og-rwx server.key + +openssl x509 -req -in server.csr -text -days 365 \ + -CA root.crt -CAkey root.key -CAcreateserial \ + -out server.crt + + server.crt and server.key + should be stored on the server, and root.crt should + be stored on the client so the client can verify that the server's leaf + certificate was signed by its trusted root certificate. + root.key should be stored offline for use in + creating future certificates. + + It is also possible to create a chain of trust that includes + intermediate certificates: + +# root +openssl req -new -nodes -text -out root.csr \ + -keyout root.key -subj "/CN=root.yourdomain.com" +chmod og-rwx root.key +openssl x509 -req -in root.csr -text -days 3650 \ + -extfile /etc/ssl/openssl.cnf -extensions v3_ca \ + -signkey root.key -out root.crt + +# intermediate +openssl req -new -nodes -text -out intermediate.csr \ + -keyout intermediate.key -subj "/CN=intermediate.yourdomain.com" +chmod og-rwx intermediate.key +openssl x509 -req -in intermediate.csr -text -days 1825 \ + -extfile /etc/ssl/openssl.cnf -extensions v3_ca \ + -CA root.crt -CAkey root.key -CAcreateserial \ + -out intermediate.crt + +# leaf +openssl req -new -nodes -text -out server.csr \ + -keyout server.key -subj "/CN=dbhost.yourdomain.com" +chmod og-rwx server.key +openssl x509 -req -in server.csr -text -days 365 \ + -CA intermediate.crt -CAkey intermediate.key -CAcreateserial \ + -out server.crt + + server.crt and + intermediate.crt should be concatenated + into a certificate file bundle and stored on the server. + server.key should also be stored on the server. + root.crt should be stored on the client so + the client can verify that the server's leaf certificate was signed + by a chain of certificates linked to its trusted root certificate. + root.key and intermediate.key + should be stored offline for use in creating future certificates. + @@ -2512,8 +2597,8 @@ ssh -L 63333:db.foo.com:5432 joe@shell.foo.com - Registering <application>Event Log</> on <systemitem - class="osname">Windows</> + Registering <application>Event Log</application> on <systemitem + class="osname">Windows</systemitem> event log @@ -2521,11 +2606,11 @@ ssh -L 63333:db.foo.com:5432 joe@shell.foo.com - To register a Windows - event log library with the operating system, + To register a Windows + event log library with the operating system, issue this command: -regsvr32 pgsql_library_directory/pgevent.dll +regsvr32 pgsql_library_directory/pgevent.dll This creates registry entries used by the event viewer, under the default event source named PostgreSQL. @@ -2533,25 +2618,25 @@ ssh -L 63333:db.foo.com:5432 joe@shell.foo.com To specify a different event source name (see - ), use the /n + ), use the /n and /i options: -regsvr32 /n /i:event_source_name pgsql_library_directory/pgevent.dll +regsvr32 /n /i:event_source_name pgsql_library_directory/pgevent.dll - To unregister the event log library from + To unregister the event log library from the operating system, issue this command: -regsvr32 /u [/i:event_source_name] pgsql_library_directory/pgevent.dll +regsvr32 /u [/i:event_source_name] pgsql_library_directory/pgevent.dll To enable event logging in the database server, modify - to include + to include eventlog in postgresql.conf. diff --git a/doc/src/sgml/seg.sgml b/doc/src/sgml/seg.sgml index 5d1f546b53..d07329f5d1 100644 --- a/doc/src/sgml/seg.sgml +++ b/doc/src/sgml/seg.sgml @@ -8,9 +8,9 @@ - This module implements a data type seg for + This module implements a data type seg for representing line segments, or floating point intervals. - seg can represent uncertainty in the interval endpoints, + seg can represent uncertainty in the interval endpoints, making it especially useful for representing laboratory measurements. @@ -86,46 +86,46 @@ test=> select '6.25 .. 6.50'::seg as "pH"; Optional certainty indicators (<, > or ~) can be stored as well. (Certainty indicators are ignored by all the built-in operators, however.) - gives an overview of allowed - representations; shows some + gives an overview of allowed + representations; shows some examples. - In , x, y, and - delta denote - floating-point numbers. x and y, but - not delta, can be preceded by a certainty indicator. + In , x, y, and + delta denote + floating-point numbers. x and y, but + not delta, can be preceded by a certainty indicator.
- <type>seg</> External Representations + <type>seg</type> External Representations - x + x Single value (zero-length interval) - x .. y - Interval from x to y + x .. y + Interval from x to y - x (+-) delta - Interval from x - delta to - x + delta + x (+-) delta + Interval from x - delta to + x + delta - x .. - Open interval with lower bound x + x .. + Open interval with lower bound x - .. x - Open interval with upper bound x + .. x + Open interval with upper bound x @@ -133,7 +133,7 @@ test=> select '6.25 .. 6.50'::seg as "pH";
- Examples of Valid <type>seg</> Input + Examples of Valid <type>seg</type> Input @@ -146,8 +146,8 @@ test=> select '6.25 .. 6.50'::seg as "pH"; ~5.0 Creates a zero-length segment and records - ~ in the data. ~ is ignored - by seg operations, but + ~ in the data. ~ is ignored + by seg operations, but is preserved as a comment. @@ -169,7 +169,7 @@ test=> select '6.25 .. 6.50'::seg as "pH"; 5(+-)0.3 Creates an interval 4.7 .. 5.3. - Note that the (+-) notation isn't preserved. + Note that the (+-) notation isn't preserved. @@ -197,17 +197,17 @@ test=> select '6.25 .. 6.50'::seg as "pH";
- Because ... is widely used in data sources, it is allowed - as an alternative spelling of ... Unfortunately, this + Because ... is widely used in data sources, it is allowed + as an alternative spelling of ... Unfortunately, this creates a parsing ambiguity: it is not clear whether the upper bound - in 0...23 is meant to be 23 or 0.23. + in 0...23 is meant to be 23 or 0.23. This is resolved by requiring at least one digit before the decimal - point in all numbers in seg input. + point in all numbers in seg input. - As a sanity check, seg rejects intervals with the lower bound - greater than the upper, for example 5 .. 2. + As a sanity check, seg rejects intervals with the lower bound + greater than the upper, for example 5 .. 2. @@ -216,7 +216,7 @@ test=> select '6.25 .. 6.50'::seg as "pH"; Precision - seg values are stored internally as pairs of 32-bit floating point + seg values are stored internally as pairs of 32-bit floating point numbers. This means that numbers with more than 7 significant digits will be truncated. @@ -235,9 +235,9 @@ test=> select '6.25 .. 6.50'::seg as "pH"; Usage - The seg module includes a GiST index operator class for - seg values. - The operators supported by the GiST operator class are shown in . + The seg module includes a GiST index operator class for + seg values. + The operators supported by the GiST operator class are shown in . @@ -304,8 +304,8 @@ test=> select '6.25 .. 6.50'::seg as "pH";
- (Before PostgreSQL 8.2, the containment operators @> and <@ were - respectively called @ and ~. These names are still available, but are + (Before PostgreSQL 8.2, the containment operators @> and <@ were + respectively called @ and ~. These names are still available, but are deprecated and will eventually be retired. Notice that the old names are reversed from the convention formerly followed by the core geometric data types!) @@ -349,11 +349,11 @@ test=> select '6.25 .. 6.50'::seg as "pH"; Notes - For examples of usage, see the regression test sql/seg.sql. + For examples of usage, see the regression test sql/seg.sql. - The mechanism that converts (+-) to regular ranges + The mechanism that converts (+-) to regular ranges isn't completely accurate in determining the number of significant digits for the boundaries. For example, it adds an extra digit to the lower boundary if the resulting interval includes a power of ten: @@ -369,7 +369,7 @@ postgres=> select '10(+-)1'::seg as seg; The performance of an R-tree index can largely depend on the initial order of input values. It may be very helpful to sort the input table - on the seg column; see the script sort-segments.pl + on the seg column; see the script sort-segments.pl for an example. diff --git a/doc/src/sgml/sepgsql.sgml b/doc/src/sgml/sepgsql.sgml index 0b611eeeca..f8c99e1b00 100644 --- a/doc/src/sgml/sepgsql.sgml +++ b/doc/src/sgml/sepgsql.sgml @@ -8,8 +8,8 @@ - sepgsql is a loadable module that supports label-based - mandatory access control (MAC) based on SELinux security + sepgsql is a loadable module that supports label-based + mandatory access control (MAC) based on SELinux security policy. @@ -17,7 +17,7 @@ The current implementation has significant limitations, and does not enforce mandatory access control for all actions. See - . + . @@ -25,10 +25,10 @@ Overview - This module integrates with SELinux to provide an + This module integrates with SELinux to provide an additional layer of security checking above and beyond what is normally provided by PostgreSQL. From the perspective of - SELinux, this module allows + SELinux, this module allows PostgreSQL to function as a user-space object manager. Each table or function access initiated by a DML query will be checked against the system security policy. This check is in addition to @@ -39,7 +39,7 @@ SELinux access control decisions are made using security labels, which are represented by strings such as - system_u:object_r:sepgsql_table_t:s0. Each access control + system_u:object_r:sepgsql_table_t:s0. Each access control decision involves two labels: the label of the subject attempting to perform the action, and the label of the object on which the operation is to be performed. Since these labels can be applied to any sort of object, @@ -51,7 +51,7 @@ - The statement allows assignment of + The statement allows assignment of a security label to a database object. @@ -60,17 +60,17 @@ Installation - sepgsql can only be used on Linux + sepgsql can only be used on Linux 2.6.28 or higher with SELinux enabled. It is not available on any other platform. You will also need - libselinux 2.1.10 or higher and - selinux-policy 3.9.13 or higher (although some + libselinux 2.1.10 or higher and + selinux-policy 3.9.13 or higher (although some distributions may backport the necessary rules into older policy versions). - The sestatus command allows you to check the status of + The sestatus command allows you to check the status of SELinux. A typical display is: $ sestatus @@ -81,20 +81,20 @@ Mode from config file: enforcing Policy version: 24 Policy from config file: targeted - If SELinux is disabled or not installed, you must set + If SELinux is disabled or not installed, you must set that product up first before installing this module. - To build this module, include the option --with-selinux in - your PostgreSQL configure command. Be sure that the - libselinux-devel RPM is installed at build time. + To build this module, include the option --with-selinux in + your PostgreSQL configure command. Be sure that the + libselinux-devel RPM is installed at build time. - To use this module, you must include sepgsql - in the parameter in - postgresql.conf. The module will not function correctly + To use this module, you must include sepgsql + in the parameter in + postgresql.conf. The module will not function correctly if loaded in any other manner. Once the module is loaded, you should execute sepgsql.sql in each database. This will install functions needed for security label management, and @@ -103,7 +103,7 @@ Policy from config file: targeted Here is an example showing how to initialize a fresh database cluster - with sepgsql functions and security labels installed. + with sepgsql functions and security labels installed. Adjust the paths shown as appropriate for your installation: @@ -124,7 +124,7 @@ $ for DBNAME in template0 template1 postgres; do Please note that you may see some or all of the following notifications depending on the particular versions you have of - libselinux and selinux-policy: + libselinux and selinux-policy: /etc/selinux/targeted/contexts/sepgsql_contexts: line 33 has invalid object type db_blobs /etc/selinux/targeted/contexts/sepgsql_contexts: line 36 has invalid object type db_language @@ -147,38 +147,38 @@ $ for DBNAME in template0 template1 postgres; do Due to the nature of SELinux, running the - regression tests for sepgsql requires several extra + regression tests for sepgsql requires several extra configuration steps, some of which must be done as root. The regression tests will not be run by an ordinary - make check or make installcheck command; you must + make check or make installcheck command; you must set up the configuration and then invoke the test script manually. - The tests must be run in the contrib/sepgsql directory + The tests must be run in the contrib/sepgsql directory of a configured PostgreSQL build tree. Although they require a build tree, the tests are designed to be executed against an installed server, - that is they are comparable to make installcheck not - make check. + that is they are comparable to make installcheck not + make check. First, set up sepgsql in a working database - according to the instructions in . + according to the instructions in . Note that the current operating system user must be able to connect to the database as superuser without password authentication. Second, build and install the policy package for the regression test. - The sepgsql-regtest policy is a special purpose policy package + The sepgsql-regtest policy is a special purpose policy package which provides a set of rules to be allowed during the regression tests. It should be built from the policy source file - sepgsql-regtest.te, which is done using + sepgsql-regtest.te, which is done using make with a Makefile supplied by SELinux. You will need to locate the appropriate Makefile on your system; the path shown below is only an example. Once built, install this policy package using the - semodule command, which loads supplied policy packages + semodule command, which loads supplied policy packages into the kernel. If the package is correctly installed, - semodule -l should list sepgsql-regtest as an + semodule -l should list sepgsql-regtest as an available policy package: @@ -191,12 +191,12 @@ sepgsql-regtest 1.07 - Third, turn on sepgsql_regression_test_mode. - For security reasons, the rules in sepgsql-regtest + Third, turn on sepgsql_regression_test_mode. + For security reasons, the rules in sepgsql-regtest are not enabled by default; the sepgsql_regression_test_mode parameter enables the rules needed to launch the regression tests. - It can be turned on using the setsebool command: + It can be turned on using the setsebool command: @@ -206,7 +206,7 @@ sepgsql_regression_test_mode --> on - Fourth, verify your shell is operating in the unconfined_t + Fourth, verify your shell is operating in the unconfined_t domain: @@ -215,7 +215,7 @@ unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 - See for details on adjusting your + See for details on adjusting your working domain, if necessary. @@ -229,7 +229,7 @@ $ ./test_sepgsql This script will attempt to verify that you have done all the configuration steps correctly, and then it will run the regression tests for the - sepgsql module. + sepgsql module. @@ -242,7 +242,7 @@ $ sudo setsebool sepgsql_regression_test_mode off - You might prefer to remove the sepgsql-regtest policy + You might prefer to remove the sepgsql-regtest policy entirely: @@ -257,22 +257,22 @@ $ sudo semodule -r sepgsql-regtest - sepgsql.permissive (boolean) + sepgsql.permissive (boolean) - sepgsql.permissive configuration parameter + sepgsql.permissive configuration parameter - This parameter enables sepgsql to function + This parameter enables sepgsql to function in permissive mode, regardless of the system setting. The default is off. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - When this parameter is on, sepgsql functions + When this parameter is on, sepgsql functions in permissive mode, even if SELinux in general is working in enforcing mode. This parameter is primarily useful for testing purposes. @@ -281,9 +281,9 @@ $ sudo semodule -r sepgsql-regtest - sepgsql.debug_audit (boolean) + sepgsql.debug_audit (boolean) - sepgsql.debug_audit configuration parameter + sepgsql.debug_audit configuration parameter @@ -295,7 +295,7 @@ $ sudo semodule -r sepgsql-regtest - The security policy of SELinux also has rules to + The security policy of SELinux also has rules to control whether or not particular accesses are logged. By default, access violations are logged, but allowed accesses are not. @@ -315,13 +315,13 @@ $ sudo semodule -r sepgsql-regtest Controlled Object Classes - The security model of SELinux describes all the access + The security model of SELinux describes all the access control rules as relationships between a subject entity (typically, a client of the database) and an object entity (such as a database object), each of which is identified by a security label. If access to an unlabeled object is attempted, the object is treated as if it were assigned the label - unlabeled_t. + unlabeled_t. @@ -349,67 +349,67 @@ $ sudo semodule -r sepgsql-regtest DML Permissions - For tables, db_table:select, db_table:insert, - db_table:update or db_table:delete are + For tables, db_table:select, db_table:insert, + db_table:update or db_table:delete are checked for all the referenced target tables depending on the kind of - statement; in addition, db_table:select is also checked for + statement; in addition, db_table:select is also checked for all the tables that contain columns referenced in the - WHERE or RETURNING clause, as a data source - for UPDATE, and so on. + WHERE or RETURNING clause, as a data source + for UPDATE, and so on. Column-level permissions will also be checked for each referenced column. - db_column:select is checked on not only the columns being - read using SELECT, but those being referenced in other DML - statements; db_column:update or db_column:insert - will also be checked for columns being modified by UPDATE or - INSERT. + db_column:select is checked on not only the columns being + read using SELECT, but those being referenced in other DML + statements; db_column:update or db_column:insert + will also be checked for columns being modified by UPDATE or + INSERT. For example, consider: -UPDATE t1 SET x = 2, y = md5sum(y) WHERE z = 100; +UPDATE t1 SET x = 2, y = func1(y) WHERE z = 100; - Here, db_column:update will be checked for - t1.x, since it is being updated, - db_column:{select update} will be checked for - t1.y, since it is both updated and referenced, and - db_column:select will be checked for t1.z, since + Here, db_column:update will be checked for + t1.x, since it is being updated, + db_column:{select update} will be checked for + t1.y, since it is both updated and referenced, and + db_column:select will be checked for t1.z, since it is only referenced. - db_table:{select update} will also be checked + db_table:{select update} will also be checked at the table level. - For sequences, db_sequence:get_value is checked when we - reference a sequence object using SELECT; however, note that we + For sequences, db_sequence:get_value is checked when we + reference a sequence object using SELECT; however, note that we do not currently check permissions on execution of corresponding functions - such as lastval(). + such as lastval(). - For views, db_view:expand will be checked, then any other + For views, db_view:expand will be checked, then any other required permissions will be checked on the objects being expanded from the view, individually. - For functions, db_procedure:{execute} will be checked when + For functions, db_procedure:{execute} will be checked when user tries to execute a function as a part of query, or using fast-path invocation. If this function is a trusted procedure, it also checks - db_procedure:{entrypoint} permission to check whether it + db_procedure:{entrypoint} permission to check whether it can perform as entry point of trusted procedure. - In order to access any schema object, db_schema:search + In order to access any schema object, db_schema:search permission is required on the containing schema. When an object is referenced without schema qualification, schemas on which this permission is not present will not be searched (just as if the user did - not have USAGE privilege on the schema). If an explicit schema + not have USAGE privilege on the schema). If an explicit schema qualification is present, an error will occur if the user does not have the requisite permission on the named schema. @@ -425,22 +425,22 @@ UPDATE t1 SET x = 2, y = md5sum(y) WHERE z = 100; The default database privilege system allows database superusers to modify system catalogs using DML commands, and reference or modify toast tables. These operations are prohibited when - sepgsql is enabled. + sepgsql is enabled. DDL Permissions - SELinux defines several permissions to control common + SELinux defines several permissions to control common operations for each object type; such as creation, alter, drop and relabel of security label. In addition, several object types have special permissions to control their characteristic operations; such as addition or deletion of name entries within a particular schema. - Creating a new database object requires create permission. - SELinux will grant or deny this permission based on the + Creating a new database object requires create permission. + SELinux will grant or deny this permission based on the client's security label and the proposed security label for the new object. In some cases, additional privileges are required: @@ -448,13 +448,13 @@ UPDATE t1 SET x = 2, y = md5sum(y) WHERE z = 100; - additionally requires - getattr permission for the source or template database. + additionally requires + getattr permission for the source or template database. - Creating a schema object additionally requires add_name + Creating a schema object additionally requires add_name permission on the parent schema. @@ -467,23 +467,23 @@ UPDATE t1 SET x = 2, y = md5sum(y) WHERE z = 100; - Creating a function marked as LEAKPROOF additionally - requires install permission. (This permission is also - checked when LEAKPROOF is set for an existing function.) + Creating a function marked as LEAKPROOF additionally + requires install permission. (This permission is also + checked when LEAKPROOF is set for an existing function.) - When DROP command is executed, drop will be + When DROP command is executed, drop will be checked on the object being removed. Permissions will be also checked for - objects dropped indirectly via CASCADE. Deletion of objects + objects dropped indirectly via CASCADE. Deletion of objects contained within a particular schema (tables, views, sequences and - procedures) additionally requires remove_name on the schema. + procedures) additionally requires remove_name on the schema. - When ALTER command is executed, setattr will be + When ALTER command is executed, setattr will be checked on the object being modified for each object types, except for subsidiary objects such as the indexes or triggers of a table, where permissions are instead checked on the parent object. In some cases, @@ -494,25 +494,25 @@ UPDATE t1 SET x = 2, y = md5sum(y) WHERE z = 100; Moving an object to a new schema additionally requires - remove_name permission on the old schema and - add_name permission on the new one. + remove_name permission on the old schema and + add_name permission on the new one. - Setting the LEAKPROOF attribute on a function requires - install permission. + Setting the LEAKPROOF attribute on a function requires + install permission. - Using on an object additionally - requires relabelfrom permission for the object in - conjunction with its old security label and relabelto + Using on an object additionally + requires relabelfrom permission for the object in + conjunction with its old security label and relabelto permission for the object in conjunction with its new security label. (In cases where multiple label providers are installed and the user tries to set a security label, but it is not managed by - SELinux, only setattr should be checked here. + SELinux, only setattr should be checked here. This is currently not done due to implementation restrictions.) @@ -524,7 +524,7 @@ UPDATE t1 SET x = 2, y = md5sum(y) WHERE z = 100; Trusted Procedures Trusted procedures are similar to security definer functions or setuid - commands. SELinux provides a feature to allow trusted + commands. SELinux provides a feature to allow trusted code to run using a security label different from that of the client, generally for the purpose of providing highly controlled access to sensitive data (e.g. rows might be omitted, or the precision of stored @@ -569,8 +569,8 @@ postgres=# SELECT cid, cname, show_credit(cid) FROM customer; - In this case, a regular user cannot reference customer.credit - directly, but a trusted procedure show_credit allows the user + In this case, a regular user cannot reference customer.credit + directly, but a trusted procedure show_credit allows the user to print the credit card numbers of customers with some of the digits masked out. @@ -582,8 +582,8 @@ postgres=# SELECT cid, cname, show_credit(cid) FROM customer; It is possible to use SELinux's dynamic domain transition feature to switch the security label of the client process, the client domain, to a new context, if that is allowed by the security policy. - The client domain needs the setcurrent permission and also - dyntransition from the old to the new domain. + The client domain needs the setcurrent permission and also + dyntransition from the old to the new domain. Dynamic domain transitions should be considered carefully, because they @@ -612,7 +612,7 @@ ERROR: SELinux: security policy violation In this example above we were allowed to switch from the larger MCS - range c1.c1023 to the smaller range c1.c4, but + range c1.c1023 to the smaller range c1.c4, but switching back was denied. @@ -641,7 +641,7 @@ ERROR: SELinux: security policy violation Miscellaneous - We reject the command across the board, because + We reject the command across the board, because any module loaded could easily circumvent security policy enforcement. @@ -651,7 +651,7 @@ ERROR: SELinux: security policy violation Sepgsql Functions - shows the available functions. + shows the available functions. @@ -726,7 +726,7 @@ ERROR: SELinux: security policy violation Row-level access control - PostgreSQL supports row-level access, but + PostgreSQL supports row-level access, but sepgsql does not. @@ -736,7 +736,7 @@ ERROR: SELinux: security policy violation Covert channels - sepgsql does not try to hide the existence of + sepgsql does not try to hide the existence of a certain object, even if the user is not allowed to reference it. For example, we can infer the existence of an invisible object as a result of primary key conflicts, foreign key violations, and so on, @@ -762,17 +762,17 @@ ERROR: SELinux: security policy violation - Fedora SELinux User Guide + SELinux User's and Administrator's Guide This document provides a wide spectrum of knowledge to administer - SELinux on your systems. - It focuses primarily on Fedora, but is not limited to Fedora. + SELinux on your systems. + It focuses primarily on Red Hat operating systems, but is not limited to them. - Fedora SELinux FAQ + Fedora SELinux FAQ This document answers frequently asked questions about diff --git a/doc/src/sgml/sourcerepo.sgml b/doc/src/sgml/sourcerepo.sgml index dd9da5a7b0..aaeacb14c5 100644 --- a/doc/src/sgml/sourcerepo.sgml +++ b/doc/src/sgml/sourcerepo.sgml @@ -18,18 +18,18 @@ Note that building PostgreSQL from the source - repository requires reasonably up-to-date versions of bison, - flex, and Perl. These tools are not needed + repository requires reasonably up-to-date versions of bison, + flex, and Perl. These tools are not needed to build from a distribution tarball, because the files that these tools are used to build are included in the tarball. Other tool requirements - are the same as shown in . + are the same as shown in . - Getting The Source via <productname>Git</> + Getting The Source via <productname>Git</productname> - With Git you will make a copy of the entire code repository + With Git you will make a copy of the entire code repository on your local machine, so you will have access to all history and branches offline. This is the fastest and most flexible way to develop or test patches. @@ -40,9 +40,9 @@ - You will need an installed version of Git, which you can - get from . Many systems already - have a recent version of Git installed by default, or + You will need an installed version of Git, which you can + get from . Many systems already + have a recent version of Git installed by default, or available in their package distribution system. @@ -52,32 +52,29 @@ To begin using the Git repository, make a clone of the official mirror: -git clone git://git.postgresql.org/git/postgresql.git +git clone https://git.postgresql.org/git/postgresql.git This will copy the full repository to your local machine, so it may take a while to complete, especially if you have a slow Internet connection. - The files will be placed in a new subdirectory postgresql of + The files will be placed in a new subdirectory postgresql of your current directory. - The Git mirror can also be reached via the HTTP protocol, if for example - a firewall is blocking access to the Git protocol. Just change the URL - prefix to https, as in: + The Git mirror can also be reached via the Git protocol. Just change the URL + prefix to git, as in: -git clone https://git.postgresql.org/git/postgresql.git +git clone git://git.postgresql.org/git/postgresql.git - The HTTP protocol is less efficient than the Git protocol, so it will be - slower to use. - Whenever you want to get the latest updates in the system, cd + Whenever you want to get the latest updates in the system, cd into the repository, and run: @@ -88,9 +85,9 @@ git fetch - Git can do a lot more things than just fetch the source. For - more information, consult the Git man pages, or see the - website at . + Git can do a lot more things than just fetch the source. For + more information, consult the Git man pages, or see the + website at . diff --git a/doc/src/sgml/sources.sgml b/doc/src/sgml/sources.sgml index 877fcedbb3..419f753c7b 100644 --- a/doc/src/sgml/sources.sgml +++ b/doc/src/sgml/sources.sgml @@ -14,8 +14,8 @@ Layout rules (brace positioning, etc) follow BSD conventions. In - particular, curly braces for the controlled blocks of if, - while, switch, etc go on their own lines. + particular, curly braces for the controlled blocks of if, + while, switch, etc go on their own lines. @@ -26,7 +26,7 @@ - Do not use C++ style comments (// comments). Strict ANSI C + Do not use C++ style comments (// comments). Strict ANSI C compilers do not accept them. For the same reason, do not use C++ extensions such as declaring new variables mid-block. @@ -40,7 +40,7 @@ */ Note that comment blocks that begin in column 1 will be preserved as-is - by pgindent, but it will re-flow indented comment blocks + by pgindent, but it will re-flow indented comment blocks as though they were plain text. If you want to preserve the line breaks in an indented block, add dashes like this: @@ -55,10 +55,10 @@ While submitted patches do not absolutely have to follow these formatting rules, it's a good idea to do so. Your code will get run through - pgindent before the next release, so there's no point in + pgindent before the next release, so there's no point in making it look nice under some other set of formatting conventions. A good rule of thumb for patches is make the new code look like - the existing code around it. + the existing code around it. @@ -92,37 +92,37 @@ less -x4 Error, warning, and log messages generated within the server code - should be created using ereport, or its older cousin - elog. The use of this function is complex enough to + should be created using ereport, or its older cousin + elog. The use of this function is complex enough to require some explanation. There are two required elements for every message: a severity level - (ranging from DEBUG to PANIC) and a primary + (ranging from DEBUG to PANIC) and a primary message text. In addition there are optional elements, the most common of which is an error identifier code that follows the SQL spec's SQLSTATE conventions. - ereport itself is just a shell function, that exists + ereport itself is just a shell function, that exists mainly for the syntactic convenience of making message generation look like a function call in the C source code. The only parameter - accepted directly by ereport is the severity level. + accepted directly by ereport is the severity level. The primary message text and any optional message elements are - generated by calling auxiliary functions, such as errmsg, - within the ereport call. + generated by calling auxiliary functions, such as errmsg, + within the ereport call. - A typical call to ereport might look like this: + A typical call to ereport might look like this: ereport(ERROR, (errcode(ERRCODE_DIVISION_BY_ZERO), errmsg("division by zero"))); - This specifies error severity level ERROR (a run-of-the-mill - error). The errcode call specifies the SQLSTATE error code - using a macro defined in src/include/utils/errcodes.h. The - errmsg call provides the primary message text. Notice the + This specifies error severity level ERROR (a run-of-the-mill + error). The errcode call specifies the SQLSTATE error code + using a macro defined in src/include/utils/errcodes.h. The + errmsg call provides the primary message text. Notice the extra set of parentheses surrounding the auxiliary function calls — these are annoying but syntactically necessary. @@ -139,90 +139,90 @@ ereport(ERROR, "You might need to add explicit typecasts."))); This illustrates the use of format codes to embed run-time values into - a message text. Also, an optional hint message is provided. + a message text. Also, an optional hint message is provided. - If the severity level is ERROR or higher, - ereport aborts the execution of the user-defined + If the severity level is ERROR or higher, + ereport aborts the execution of the user-defined function and does not return to the caller. If the severity level is - lower than ERROR, ereport returns normally. + lower than ERROR, ereport returns normally. - The available auxiliary routines for ereport are: + The available auxiliary routines for ereport are: errcode(sqlerrcode) specifies the SQLSTATE error identifier code for the condition. If this routine is not called, the error identifier defaults to - ERRCODE_INTERNAL_ERROR when the error severity level is - ERROR or higher, ERRCODE_WARNING when the - error level is WARNING, otherwise (for NOTICE - and below) ERRCODE_SUCCESSFUL_COMPLETION. + ERRCODE_INTERNAL_ERROR when the error severity level is + ERROR or higher, ERRCODE_WARNING when the + error level is WARNING, otherwise (for NOTICE + and below) ERRCODE_SUCCESSFUL_COMPLETION. While these defaults are often convenient, always think whether they - are appropriate before omitting the errcode() call. + are appropriate before omitting the errcode() call. errmsg(const char *msg, ...) specifies the primary error message text, and possibly run-time values to insert into it. Insertions - are specified by sprintf-style format codes. In addition to - the standard format codes accepted by sprintf, the format - code %m can be used to insert the error message returned - by strerror for the current value of errno. + are specified by sprintf-style format codes. In addition to + the standard format codes accepted by sprintf, the format + code %m can be used to insert the error message returned + by strerror for the current value of errno. - That is, the value that was current when the ereport call - was reached; changes of errno within the auxiliary reporting + That is, the value that was current when the ereport call + was reached; changes of errno within the auxiliary reporting routines will not affect it. That would not be true if you were to - write strerror(errno) explicitly in errmsg's + write strerror(errno) explicitly in errmsg's parameter list; accordingly, do not do so. - %m does not require any - corresponding entry in the parameter list for errmsg. - Note that the message string will be run through gettext + %m does not require any + corresponding entry in the parameter list for errmsg. + Note that the message string will be run through gettext for possible localization before format codes are processed. errmsg_internal(const char *msg, ...) is the same as - errmsg, except that the message string will not be + errmsg, except that the message string will not be translated nor included in the internationalization message dictionary. - This should be used for cannot happen cases that are probably + This should be used for cannot happen cases that are probably not worth expending translation effort on. errmsg_plural(const char *fmt_singular, const char *fmt_plural, - unsigned long n, ...) is like errmsg, but with + unsigned long n, ...) is like errmsg, but with support for various plural forms of the message. - fmt_singular is the English singular format, - fmt_plural is the English plural format, - n is the integer value that determines which plural + fmt_singular is the English singular format, + fmt_plural is the English plural format, + n is the integer value that determines which plural form is needed, and the remaining arguments are formatted according to the selected format string. For more information see - . + . errdetail(const char *msg, ...) supplies an optional - detail message; this is to be used when there is additional + detail message; this is to be used when there is additional information that seems inappropriate to put in the primary message. The message string is processed in just the same way as for - errmsg. + errmsg. errdetail_internal(const char *msg, ...) is the same - as errdetail, except that the message string will not be + as errdetail, except that the message string will not be translated nor included in the internationalization message dictionary. This should be used for detail messages that are not worth expending translation effort on, for instance because they are too technical to be @@ -232,18 +232,18 @@ ereport(ERROR, errdetail_plural(const char *fmt_singular, const char *fmt_plural, - unsigned long n, ...) is like errdetail, but with + unsigned long n, ...) is like errdetail, but with support for various plural forms of the message. - For more information see . + For more information see . errdetail_log(const char *msg, ...) is the same as - errdetail except that this string goes only to the server - log, never to the client. If both errdetail (or one of + errdetail except that this string goes only to the server + log, never to the client. If both errdetail (or one of its equivalents above) and - errdetail_log are used then one string goes to the client + errdetail_log are used then one string goes to the client and the other to the log. This is useful for error details that are too security-sensitive or too bulky to include in the report sent to the client. @@ -253,31 +253,31 @@ ereport(ERROR, errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n, ...) is like - errdetail_log, but with support for various plural forms of + errdetail_log, but with support for various plural forms of the message. - For more information see . + For more information see . errhint(const char *msg, ...) supplies an optional - hint message; this is to be used when offering suggestions + hint message; this is to be used when offering suggestions about how to fix the problem, as opposed to factual details about what went wrong. The message string is processed in just the same way as for - errmsg. + errmsg. errcontext(const char *msg, ...) is not normally called - directly from an ereport message site; rather it is used - in error_context_stack callback functions to provide + directly from an ereport message site; rather it is used + in error_context_stack callback functions to provide information about the context in which an error occurred, such as the current location in a PL function. The message string is processed in just the same way as for - errmsg. Unlike the other auxiliary functions, this can - be called more than once per ereport call; the successive + errmsg. Unlike the other auxiliary functions, this can + be called more than once per ereport call; the successive strings thus supplied are concatenated with separating newlines. @@ -309,9 +309,9 @@ ereport(ERROR, specifies a table constraint whose name, table name, and schema name should be included as auxiliary fields in the error report. Indexes should be considered to be constraints for this purpose, whether or - not they have an associated pg_constraint entry. Be + not they have an associated pg_constraint entry. Be careful to pass the underlying heap relation, not the index itself, as - rel. + rel. @@ -330,17 +330,17 @@ ereport(ERROR, - errcode_for_file_access() is a convenience function that + errcode_for_file_access() is a convenience function that selects an appropriate SQLSTATE error identifier for a failure in a file-access-related system call. It uses the saved - errno to determine which error code to generate. - Usually this should be used in combination with %m in the + errno to determine which error code to generate. + Usually this should be used in combination with %m in the primary error message text. - errcode_for_socket_access() is a convenience function that + errcode_for_socket_access() is a convenience function that selects an appropriate SQLSTATE error identifier for a failure in a socket-related system call. @@ -348,7 +348,7 @@ ereport(ERROR, errhidestmt(bool hide_stmt) can be called to specify - suppression of the STATEMENT: portion of a message in the + suppression of the STATEMENT: portion of a message in the postmaster log. Generally this is appropriate if the message text includes the current statement already. @@ -356,7 +356,7 @@ ereport(ERROR, errhidecontext(bool hide_ctx) can be called to - specify suppression of the CONTEXT: portion of a message in + specify suppression of the CONTEXT: portion of a message in the postmaster log. This should only be used for verbose debugging messages where the repeated inclusion of context would bloat the log volume too much. @@ -367,24 +367,24 @@ ereport(ERROR, - At most one of the functions errtable, - errtablecol, errtableconstraint, - errdatatype, or errdomainconstraint should - be used in an ereport call. These functions exist to + At most one of the functions errtable, + errtablecol, errtableconstraint, + errdatatype, or errdomainconstraint should + be used in an ereport call. These functions exist to allow applications to extract the name of a database object associated with the error condition without having to examine the potentially-localized error message text. These functions should be used in error reports for which it's likely that applications would wish to have automatic error handling. As of - PostgreSQL 9.3, complete coverage exists only for + PostgreSQL 9.3, complete coverage exists only for errors in SQLSTATE class 23 (integrity constraint violation), but this is likely to be expanded in future. - There is an older function elog that is still heavily used. - An elog call: + There is an older function elog that is still heavily used. + An elog call: elog(level, "format string", ...); @@ -394,17 +394,17 @@ ereport(level, (errmsg_internal("format string", ...))); Notice that the SQLSTATE error code is always defaulted, and the message string is not subject to translation. - Therefore, elog should be used only for internal errors and + Therefore, elog should be used only for internal errors and low-level debug logging. Any message that is likely to be of interest to - ordinary users should go through ereport. Nonetheless, - there are enough internal cannot happen error checks in the - system that elog is still widely used; it is preferred for + ordinary users should go through ereport. Nonetheless, + there are enough internal cannot happen error checks in the + system that elog is still widely used; it is preferred for those messages for its notational simplicity. Advice about writing good error messages can be found in - . + . @@ -414,7 +414,7 @@ ereport(level, (errmsg_internal("format string", ...))); This style guide is offered in the hope of maintaining a consistent, user-friendly style throughout all the messages generated by - PostgreSQL. + PostgreSQL. @@ -643,7 +643,7 @@ cannot open file "%s" - Rationale: Otherwise no one will know what foo.bar.baz + Rationale: Otherwise no one will know what foo.bar.baz refers to. @@ -709,8 +709,8 @@ BETTER: could not open file %s (I/O failure) not helpful information. If the error text doesn't make as much sense without the function name, reword it. -BAD: pg_atoi: error in "z": cannot parse "z" -BETTER: invalid input syntax for integer: "z" +BAD: pg_strtoint32: error in "z": cannot parse "z" +BETTER: invalid input syntax for type integer: "z" @@ -853,7 +853,7 @@ BETTER: unrecognized node type: 42 Keep in mind that error message texts need to be translated into other - languages. Follow the guidelines in + languages. Follow the guidelines in to avoid making life difficult for translators. @@ -866,27 +866,38 @@ BETTER: unrecognized node type: 42 C Standard - Code in PostgreSQL should only rely on language - features available in the C89 standard. That means a conforming - C89 compiler has to be able to compile postgres, at least aside - from a few platform dependent pieces. Features from later - revision of the C standard or compiler specific features can be - used, if a fallback is provided. + Code in PostgreSQL should only rely on language + features available in the C99 standard. That means a conforming + C99 compiler has to be able to compile postgres, at least aside + from a few platform dependent pieces. - For example static inline and - _StaticAssert() are currently used, even - though they are from newer revisions of the C standard. If not - available we respectively fall back to defining the functions - without inline, and to using a C89 compatible replacement that - performs the same checks, but emits rather cryptic messages. + A few features included in the C99 standard are, at this time, not be + permitted to be used in core PostgreSQL + code. This currently includes variable length arrays, intermingled + declarations and code, // comments, universal + character names. Reasons for that include portability and historical + practices. + + + Features from later revision of the C standard or compiler specific + features can be used, if a fallback is provided. + + + For example _StaticAssert() and + __builtin_constant_p are currently used, even though + they are from newer revisions of the C standard and a + GCC extension respectively. If not available + we respectively fall back to using a C99 compatible replacement that + performs the same checks, but emits rather cryptic messages and do not + use __builtin_constant_p. Function-Like Macros and Inline Functions - Both, macros with arguments and static inline + Both, macros with arguments and static inline functions, may be used. The latter are preferable if there are multiple-evaluation hazards when written as a macro, as e.g. the case with @@ -914,7 +925,7 @@ MemoryContextSwitchTo(MemoryContext context) } #endif /* FRONTEND */ - In this example CurrentMemoryContext, which is only + In this example CurrentMemoryContext, which is only available in the backend, is referenced and the function thus hidden with a #ifndef FRONTEND. This rule exists because some compilers emit references to symbols @@ -957,12 +968,30 @@ handle_sighup(SIGNAL_ARGS) errno = save_errno; } - errno is saved and restored because - SetLatch() might change it. If that were not done + errno is saved and restored because + SetLatch() might change it. If that were not done interrupted code that's currently inspecting errno might see the wrong value. + + Calling Function Pointers + + + For clarity, it is preferred to explicitly dereference a function pointer + when calling the pointed-to function if the pointer is a simple variable, + for example: + +(*emit_log_hook) (edata); + + (even though emit_log_hook(edata) would also work). + When the function pointer is part of a structure, then the extra + punctuation can and usually should be omitted, for example: + +paramInfo->paramFetch(paramInfo, paramId); + + + diff --git a/doc/src/sgml/spgist.sgml b/doc/src/sgml/spgist.sgml index cd4a8d07c4..126d1f6c15 100644 --- a/doc/src/sgml/spgist.sgml +++ b/doc/src/sgml/spgist.sgml @@ -1,6 +1,6 @@ - + SP-GiST Indexes @@ -43,7 +43,7 @@ Some of the information here is derived from Purdue University's SP-GiST Indexing Project - web site. + web site. The SP-GiST implementation in PostgreSQL is primarily maintained by Teodor Sigaev and Oleg Bartunov, and there is more information on their @@ -57,109 +57,146 @@ Built-in Operator Classes - The core PostgreSQL distribution + The core PostgreSQL distribution includes the SP-GiST operator classes shown in - . + .
Built-in <acronym>SP-GiST</acronym> Operator Classes - + Name Indexed Data Type Indexable Operators + Ordering Operators - kd_point_ops - point + kd_point_ops + point - << - <@ - <^ - >> - >^ - ~= + << + <@ + <^ + >> + >^ + ~= + + + <-> - quad_point_ops - point + quad_point_ops + point - << - <@ - <^ - >> - >^ - ~= + << + <@ + <^ + >> + >^ + ~= + + + <-> - range_ops + range_ops any range type - && - &< - &> - -|- - << - <@ - = - >> - @> + && + &< + &> + -|- + << + <@ + = + >> + @> + + - box_ops - box + box_ops + box - << - &< - && - &> - >> - ~= - @> - <@ - &<| - <<| + << + &< + && + &> + >> + ~= + @> + <@ + &<| + <<| |>> - |&> + |&> + + - text_ops - text + poly_ops + polygon + + << + &< + && + &> + >> + ~= + @> + <@ + &<| + <<| + |>> + |&> + + + <-> + + + + text_ops + text + + < + <= + = + > + >= + ~<=~ + ~<~ + ~>=~ + ~>~ + ^@ + - < - <= - = - > - >= - ~<=~ - ~<~ - ~>=~ - ~>~ - inet_ops - inet, cidr + inet_ops + inet, cidr + + && + >> + >>= + > + >= + <> + << + <<= + < + <= + = + - && - >> - >>= - > - >= - <> - << - <<= - < - <= - = @@ -167,11 +204,17 @@
- Of the two operator classes for type point, - quad_point_ops is the default. kd_point_ops + Of the two operator classes for type point, + quad_point_ops is the default. kd_point_ops supports the same operators but uses a different index data structure which may offer better performance in some applications. + + The quad_point_ops, kd_point_ops and + poly_ops operator classes support the <-> + ordering operator, which enables the k-nearest neighbor (k-NN) + search over indexed point or polygon datasets. + @@ -199,15 +242,15 @@ Inner tuples are more complex, since they are branching points in the search tree. Each inner tuple contains a set of one or more - nodes, which represent groups of similar leaf values. + nodes, which represent groups of similar leaf values. A node contains a downlink that leads either to another, lower-level inner tuple, or to a short list of leaf tuples that all lie on the same index page. - Each node normally has a label that describes it; for example, + Each node normally has a label that describes it; for example, in a radix tree the node label could be the next character of the string value. (Alternatively, an operator class can omit the node labels, if it works with a fixed set of nodes for all inner tuples; - see .) - Optionally, an inner tuple can have a prefix value + see .) + Optionally, an inner tuple can have a prefix value that describes all its members. In a radix tree this could be the common prefix of the represented strings. The prefix value is not necessarily really a prefix, but can be any data needed by the operator class; @@ -223,7 +266,7 @@ operator classes to manage level counting while descending the tree. There is also support for incrementally reconstructing the represented value when that is needed, and for passing down additional data (called - traverse values) during a tree descent. + traverse values) during a tree descent. @@ -240,38 +283,40 @@ There are five user-defined methods that an index operator class for - SP-GiST must provide. All five follow the convention - of accepting two internal arguments, the first of which is a - pointer to a C struct containing input values for the support method, - while the second argument is a pointer to a C struct where output values - must be placed. Four of the methods just return void, since - all their results appear in the output struct; but - leaf_consistent additionally returns a boolean result. + SP-GiST must provide, and one is optional. All five + mandatory methods follow the convention of accepting two internal + arguments, the first of which is a pointer to a C struct containing input + values for the support method, while the second argument is a pointer to a + C struct where output values must be placed. Four of the mandatory methods just + return void, since all their results appear in the output struct; but + leaf_consistent additionally returns a boolean result. The methods must not modify any fields of their input structs. In all cases, the output struct is initialized to zeroes before calling the - user-defined method. + user-defined method. Optional sixth method compress + accepts datum to be indexed as the only argument and returns value suitable + for physical storage in leaf tuple. - The five user-defined methods are: + The five mandatory user-defined methods are: - config + config Returns static information about the index implementation, including the data type OIDs of the prefix and node label data types. - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE FUNCTION my_config(internal, internal) RETURNS void ... - The first argument is a pointer to a spgConfigIn + The first argument is a pointer to a spgConfigIn C struct, containing input data for the function. - The second argument is a pointer to a spgConfigOut + The second argument is a pointer to a spgConfigOut C struct, which the function must fill with result data. typedef struct spgConfigIn @@ -283,46 +328,63 @@ typedef struct spgConfigOut { Oid prefixType; /* Data type of inner-tuple prefixes */ Oid labelType; /* Data type of inner-tuple node labels */ + Oid leafType; /* Data type of leaf-tuple values */ bool canReturnData; /* Opclass can reconstruct original data */ bool longValuesOK; /* Opclass can cope with values > 1 page */ } spgConfigOut; - attType is passed in order to support polymorphic + attType is passed in order to support polymorphic index operator classes; for ordinary fixed-data-type operator classes, it will always have the same value and so can be ignored. For operator classes that do not use prefixes, - prefixType can be set to VOIDOID. + prefixType can be set to VOIDOID. Likewise, for operator classes that do not use node labels, - labelType can be set to VOIDOID. - canReturnData should be set true if the operator class + labelType can be set to VOIDOID. + canReturnData should be set true if the operator class is capable of reconstructing the originally-supplied index value. - longValuesOK should be set true only when the - attType is of variable length and the operator + longValuesOK should be set true only when the + attType is of variable length and the operator class is capable of segmenting long values by repeated suffixing - (see ). + (see ). + + + + leafType is typically the same as + attType. For the reasons of backward + compatibility, method config can + leave leafType uninitialized; that would + give the same effect as setting leafType equal + to attType. When attType + and leafType are different, then optional + method compress must be provided. + Method compress is responsible + for transformation of datums to be indexed from attType + to leafType. + Note: both consistent functions will get scankeys + unchanged, without transformation using compress. - choose + choose Chooses a method for inserting a new value into an inner tuple. - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE FUNCTION my_choose(internal, internal) RETURNS void ... - The first argument is a pointer to a spgChooseIn + The first argument is a pointer to a spgChooseIn C struct, containing input data for the function. - The second argument is a pointer to a spgChooseOut + The second argument is a pointer to a spgChooseOut C struct, which the function must fill with result data. typedef struct spgChooseIn @@ -380,25 +442,31 @@ typedef struct spgChooseOut } spgChooseOut; - datum is the original datum that was to be inserted - into the index. - leafDatum is initially the same as - datum, but can change at lower levels of the tree + datum is the original datum of + spgConfigIn.attType + type that was to be inserted into the index. + leafDatum is a value of + spgConfigOut.leafType + type which is initially an result of method + compress applied to datum + when method compress is provided, or same value as + datum otherwise. + leafDatum can change at lower levels of the tree if the choose or picksplit methods change it. When the insertion search reaches a leaf page, - the current value of leafDatum is what will be stored + the current value of leafDatum is what will be stored in the newly created leaf tuple. - level is the current inner tuple's level, starting at + level is the current inner tuple's level, starting at zero for the root level. - allTheSame is true if the current inner tuple is + allTheSame is true if the current inner tuple is marked as containing multiple equivalent nodes - (see ). - hasPrefix is true if the current inner tuple contains + (see ). + hasPrefix is true if the current inner tuple contains a prefix; if so, - prefixDatum is its value. - nNodes is the number of child nodes contained in the + prefixDatum is its value. + nNodes is the number of child nodes contained in the inner tuple, and - nodeLabels is an array of their label values, or + nodeLabels is an array of their label values, or NULL if there are no labels. @@ -412,80 +480,80 @@ typedef struct spgChooseOut If the new value matches one of the existing child nodes, - set resultType to spgMatchNode. - Set nodeN to the index (from zero) of that node in + set resultType to spgMatchNode. + Set nodeN to the index (from zero) of that node in the node array. - Set levelAdd to the increment in - level caused by descending through that node, + Set levelAdd to the increment in + level caused by descending through that node, or leave it as zero if the operator class does not use levels. - Set restDatum to equal datum + Set restDatum to equal leafDatum if the operator class does not modify datums from one level to the next, or otherwise set it to the modified value to be used as - leafDatum at the next level. + leafDatum at the next level. If a new child node must be added, - set resultType to spgAddNode. - Set nodeLabel to the label to be used for the new - node, and set nodeN to the index (from zero) at which + set resultType to spgAddNode. + Set nodeLabel to the label to be used for the new + node, and set nodeN to the index (from zero) at which to insert the node in the node array. After the node has been added, the choose function will be called again with the modified inner tuple; - that call should result in an spgMatchNode result. + that call should result in an spgMatchNode result. If the new value is inconsistent with the tuple prefix, - set resultType to spgSplitTuple. + set resultType to spgSplitTuple. This action moves all the existing nodes into a new lower-level inner tuple, and replaces the existing inner tuple with a tuple having a single downlink pointing to the new lower-level inner tuple. - Set prefixHasPrefix to indicate whether the new + Set prefixHasPrefix to indicate whether the new upper tuple should have a prefix, and if so set - prefixPrefixDatum to the prefix value. This new + prefixPrefixDatum to the prefix value. This new prefix value must be sufficiently less restrictive than the original to accept the new value to be indexed. - Set prefixNNodes to the number of nodes needed in the - new tuple, and set prefixNodeLabels to a palloc'd array + Set prefixNNodes to the number of nodes needed in the + new tuple, and set prefixNodeLabels to a palloc'd array holding their labels, or to NULL if node labels are not required. Note that the total size of the new upper tuple must be no more than the total size of the tuple it is replacing; this constrains the lengths of the new prefix and new labels. - Set childNodeN to the index (from zero) of the node + Set childNodeN to the index (from zero) of the node that will downlink to the new lower-level inner tuple. - Set postfixHasPrefix to indicate whether the new + Set postfixHasPrefix to indicate whether the new lower-level inner tuple should have a prefix, and if so set - postfixPrefixDatum to the prefix value. The + postfixPrefixDatum to the prefix value. The combination of these two prefixes and the downlink node's label (if any) must have the same meaning as the original prefix, because there is no opportunity to alter the node labels that are moved to the new lower-level tuple, nor to change any child index entries. After the node has been split, the choose function will be called again with the replacement inner tuple. - That call may return an spgAddNode result, if no suitable - node was created by the spgSplitTuple action. Eventually - choose must return spgMatchNode to + That call may return an spgAddNode result, if no suitable + node was created by the spgSplitTuple action. Eventually + choose must return spgMatchNode to allow the insertion to descend to the next level. - picksplit + picksplit Decides how to create a new inner tuple over a set of leaf tuples. - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE FUNCTION my_picksplit(internal, internal) RETURNS void ... - The first argument is a pointer to a spgPickSplitIn + The first argument is a pointer to a spgPickSplitIn C struct, containing input data for the function. - The second argument is a pointer to a spgPickSplitOut + The second argument is a pointer to a spgPickSplitOut C struct, which the function must fill with result data. typedef struct spgPickSplitIn @@ -508,83 +576,88 @@ typedef struct spgPickSplitOut } spgPickSplitOut; - nTuples is the number of leaf tuples provided. - datums is an array of their datum values. - level is the current level that all the leaf tuples + nTuples is the number of leaf tuples provided. + datums is an array of their datum values of + spgConfigOut.leafType + type. + level is the current level that all the leaf tuples share, which will become the level of the new inner tuple. - Set hasPrefix to indicate whether the new inner + Set hasPrefix to indicate whether the new inner tuple should have a prefix, and if so set - prefixDatum to the prefix value. - Set nNodes to indicate the number of nodes that + prefixDatum to the prefix value. + Set nNodes to indicate the number of nodes that the new inner tuple will contain, and - set nodeLabels to an array of their label values, + set nodeLabels to an array of their label values, or to NULL if node labels are not required. - Set mapTuplesToNodes to an array that gives the index + Set mapTuplesToNodes to an array that gives the index (from zero) of the node that each leaf tuple should be assigned to. - Set leafTupleDatums to an array of the values to + Set leafTupleDatums to an array of the values to be stored in the new leaf tuples (these will be the same as the - input datums if the operator class does not modify + input datums if the operator class does not modify datums from one level to the next). - Note that the picksplit function is + Note that the picksplit function is responsible for palloc'ing the - nodeLabels, mapTuplesToNodes and - leafTupleDatums arrays. + nodeLabels, mapTuplesToNodes and + leafTupleDatums arrays. If more than one leaf tuple is supplied, it is expected that the - picksplit function will classify them into more than + picksplit function will classify them into more than one node; otherwise it is not possible to split the leaf tuples across multiple pages, which is the ultimate purpose of this - operation. Therefore, if the picksplit function + operation. Therefore, if the picksplit function ends up placing all the leaf tuples in the same node, the core SP-GiST code will override that decision and generate an inner tuple in which the leaf tuples are assigned at random to several identically-labeled nodes. Such a tuple is marked - allTheSame to signify that this has happened. The - choose and inner_consistent functions + allTheSame to signify that this has happened. The + choose and inner_consistent functions must take suitable care with such inner tuples. - See for more information. + See for more information. - picksplit can be applied to a single leaf tuple only - in the case that the config function set - longValuesOK to true and a larger-than-a-page input + picksplit can be applied to a single leaf tuple only + in the case that the config function set + longValuesOK to true and a larger-than-a-page input value has been supplied. In this case the point of the operation is to strip off a prefix and produce a new, shorter leaf datum value. The call will be repeated until a leaf datum short enough to fit on - a page has been produced. See for + a page has been produced. See for more information. - inner_consistent + inner_consistent Returns set of nodes (branches) to follow during tree search. - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE FUNCTION my_inner_consistent(internal, internal) RETURNS void ... - The first argument is a pointer to a spgInnerConsistentIn + The first argument is a pointer to a spgInnerConsistentIn C struct, containing input data for the function. - The second argument is a pointer to a spgInnerConsistentOut + The second argument is a pointer to a spgInnerConsistentOut C struct, which the function must fill with result data. typedef struct spgInnerConsistentIn { ScanKey scankeys; /* array of operators and comparison values */ - int nkeys; /* length of array */ + ScanKey orderbys; /* array of ordering operators and comparison + * values */ + int nkeys; /* length of scankeys array */ + int norderbys; /* length of orderbys array */ Datum reconstructedValue; /* value reconstructed at parent */ void *traversalValue; /* opclass-specific traverse value */ @@ -607,99 +680,112 @@ typedef struct spgInnerConsistentOut int *levelAdds; /* increment level by this much for each */ Datum *reconstructedValues; /* associated reconstructed values */ void **traversalValues; /* opclass-specific traverse values */ + double **distances; /* associated distances */ } spgInnerConsistentOut; - The array scankeys, of length nkeys, + The array scankeys, of length nkeys, describes the index search condition(s). These conditions are combined with AND — only index entries that satisfy all of - them are interesting. (Note that nkeys = 0 implies + them are interesting. (Note that nkeys = 0 implies that all index entries satisfy the query.) Usually the consistent - function only cares about the sk_strategy and - sk_argument fields of each array entry, which + function only cares about the sk_strategy and + sk_argument fields of each array entry, which respectively give the indexable operator and comparison value. - In particular it is not necessary to check sk_flags to + In particular it is not necessary to check sk_flags to see if the comparison value is NULL, because the SP-GiST core code will filter out such conditions. - reconstructedValue is the value reconstructed for the - parent tuple; it is (Datum) 0 at the root level or if the - inner_consistent function did not provide a value at the - parent level. - traversalValue is a pointer to any traverse data - passed down from the previous call of inner_consistent + The array orderbys, of length norderbys, + describes ordering operators (if any) in the same manner. + reconstructedValue is the value reconstructed for the + parent tuple; it is (Datum) 0 at the root level or if the + inner_consistent function did not provide a value at the + parent level. reconstructedValue is always of + spgConfigOut.leafType type. + traversalValue is a pointer to any traverse data + passed down from the previous call of inner_consistent on the parent index tuple, or NULL at the root level. - traversalMemoryContext is the memory context in which + traversalMemoryContext is the memory context in which to store output traverse values (see below). - level is the current inner tuple's level, starting at + level is the current inner tuple's level, starting at zero for the root level. - returnData is true if reconstructed data is + returnData is true if reconstructed data is required for this query; this will only be so if the - config function asserted canReturnData. - allTheSame is true if the current inner tuple is - marked all-the-same; in this case all the nodes have the + config function asserted canReturnData. + allTheSame is true if the current inner tuple is + marked all-the-same; in this case all the nodes have the same label (if any) and so either all or none of them match the query - (see ). - hasPrefix is true if the current inner tuple contains + (see ). + hasPrefix is true if the current inner tuple contains a prefix; if so, - prefixDatum is its value. - nNodes is the number of child nodes contained in the + prefixDatum is its value. + nNodes is the number of child nodes contained in the inner tuple, and - nodeLabels is an array of their label values, or + nodeLabels is an array of their label values, or NULL if the nodes do not have labels. - nNodes must be set to the number of child nodes that + nNodes must be set to the number of child nodes that need to be visited by the search, and - nodeNumbers must be set to an array of their indexes. + nodeNumbers must be set to an array of their indexes. If the operator class keeps track of levels, set - levelAdds to an array of the level increments + levelAdds to an array of the level increments required when descending to each node to be visited. (Often these increments will be the same for all the nodes, but that's not necessarily so, so an array is used.) If value reconstruction is needed, set - reconstructedValues to an array of the values + reconstructedValues to an array of the values + of spgConfigOut.leafType type reconstructed for each child node to be visited; otherwise, leave - reconstructedValues as NULL. + reconstructedValues as NULL. + If ordered search is performed, set distances + to an array of distance values according to orderbys + array (nodes with lowest distances will be processed first). Leave it + NULL otherwise. If it is desired to pass down additional out-of-band information - (traverse values) to lower levels of the tree search, - set traversalValues to an array of the appropriate + (traverse values) to lower levels of the tree search, + set traversalValues to an array of the appropriate traverse values, one for each child node to be visited; otherwise, - leave traversalValues as NULL. - Note that the inner_consistent function is + leave traversalValues as NULL. + Note that the inner_consistent function is responsible for palloc'ing the - nodeNumbers, levelAdds, - reconstructedValues, and - traversalValues arrays in the current memory context. + nodeNumbers, levelAdds, + distances, + reconstructedValues, and + traversalValues arrays in the current memory context. However, any output traverse values pointed to by - the traversalValues array should be allocated - in traversalMemoryContext. + the traversalValues array should be allocated + in traversalMemoryContext. Each traverse value must be a single palloc'd chunk. - leaf_consistent + leaf_consistent Returns true if a leaf tuple satisfies a query. - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE FUNCTION my_leaf_consistent(internal, internal) RETURNS bool ... - The first argument is a pointer to a spgLeafConsistentIn + The first argument is a pointer to a spgLeafConsistentIn C struct, containing input data for the function. - The second argument is a pointer to a spgLeafConsistentOut + The second argument is a pointer to a spgLeafConsistentOut C struct, which the function must fill with result data. typedef struct spgLeafConsistentIn { ScanKey scankeys; /* array of operators and comparison values */ - int nkeys; /* length of array */ + ScanKey orderbys; /* array of ordering operators and comparison + * values */ + int nkeys; /* length of scankeys array */ + int norderbys; /* length of orderbys array */ Datum reconstructedValue; /* value reconstructed at parent */ void *traversalValue; /* opclass-specific traverse value */ @@ -711,66 +797,99 @@ typedef struct spgLeafConsistentIn typedef struct spgLeafConsistentOut { - Datum leafValue; /* reconstructed original data, if any */ - bool recheck; /* set true if operator must be rechecked */ + Datum leafValue; /* reconstructed original data, if any */ + bool recheck; /* set true if operator must be rechecked */ + bool recheckDistances; /* set true if distances must be rechecked */ + double *distances; /* associated distances */ } spgLeafConsistentOut; - The array scankeys, of length nkeys, + The array scankeys, of length nkeys, describes the index search condition(s). These conditions are combined with AND — only index entries that satisfy all of - them satisfy the query. (Note that nkeys = 0 implies + them satisfy the query. (Note that nkeys = 0 implies that all index entries satisfy the query.) Usually the consistent - function only cares about the sk_strategy and - sk_argument fields of each array entry, which + function only cares about the sk_strategy and + sk_argument fields of each array entry, which respectively give the indexable operator and comparison value. - In particular it is not necessary to check sk_flags to + In particular it is not necessary to check sk_flags to see if the comparison value is NULL, because the SP-GiST core code will filter out such conditions. - reconstructedValue is the value reconstructed for the - parent tuple; it is (Datum) 0 at the root level or if the - inner_consistent function did not provide a value at the - parent level. - traversalValue is a pointer to any traverse data - passed down from the previous call of inner_consistent + The array orderbys, of length norderbys, + describes the ordering operators in the same manner. + reconstructedValue is the value reconstructed for the + parent tuple; it is (Datum) 0 at the root level or if the + inner_consistent function did not provide a value at the + parent level. reconstructedValue is always of + spgConfigOut.leafType type. + traversalValue is a pointer to any traverse data + passed down from the previous call of inner_consistent on the parent index tuple, or NULL at the root level. - level is the current leaf tuple's level, starting at + level is the current leaf tuple's level, starting at zero for the root level. - returnData is true if reconstructed data is + returnData is true if reconstructed data is required for this query; this will only be so if the - config function asserted canReturnData. - leafDatum is the key value stored in the current - leaf tuple. + config function asserted canReturnData. + leafDatum is the key value of + spgConfigOut.leafType + stored in the current leaf tuple. - The function must return true if the leaf tuple matches the - query, or false if not. In the true case, - if returnData is true then - leafValue must be set to the value originally supplied - to be indexed for this leaf tuple. Also, - recheck may be set to true if the match + The function must return true if the leaf tuple matches the + query, or false if not. In the true case, + if returnData is true then + leafValue must be set to the value of + spgConfigIn.attType type + originally supplied to be indexed for this leaf tuple. Also, + recheck may be set to true if the match is uncertain and so the operator(s) must be re-applied to the actual heap tuple to verify the match. + If ordered search is performed, set distances + to an array of distance values according to orderbys + array. Leave it NULL otherwise. If at least one of returned distances + is not exact, set recheckDistances to true. + In this case, the executor will calculate the exact distances after + fetching the tuple from the heap, and will reorder the tuples if needed. + + The optional user-defined method is: + + + + + Datum compress(Datum in) + + + Converts the data item into a format suitable for physical storage in + a leaf tuple of index page. It accepts + spgConfigIn.attType + value and return + spgConfigOut.leafType + value. Output value should not be toasted. + + + + + All the SP-GiST support methods are normally called in a short-lived - memory context; that is, CurrentMemoryContext will be reset + memory context; that is, CurrentMemoryContext will be reset after processing of each tuple. It is therefore not very important to - worry about pfree'ing everything you palloc. (The config + worry about pfree'ing everything you palloc. (The config method is an exception: it should try to avoid leaking memory. But - usually the config method need do nothing but assign + usually the config method need do nothing but assign constants into the passed parameter struct.) If the indexed column is of a collatable data type, the index collation will be passed to all the support methods, using the standard - PG_GET_COLLATION() mechanism. + PG_GET_COLLATION() mechanism. @@ -794,7 +913,7 @@ typedef struct spgLeafConsistentOut trees, in which each level of the tree includes a prefix that is short enough to fit on a page, and the final leaf level includes a suffix also short enough to fit on a page. The operator class should set - longValuesOK to TRUE only if it is prepared to arrange for + longValuesOK to true only if it is prepared to arrange for this to happen. Otherwise, the SP-GiST core will reject any request to index a value that is too large to fit on an index page. @@ -814,10 +933,10 @@ typedef struct spgLeafConsistentOut links that chain such tuples together.) If the set of leaf tuples grows too large for a page, a split is performed and an intermediate inner tuple is inserted. For this to fix the problem, the new inner - tuple must divide the set of leaf values into more than one - node group. If the operator class's picksplit function + tuple must divide the set of leaf values into more than one + node group. If the operator class's picksplit function fails to do that, the SP-GiST core resorts to - extraordinary measures described in . + extraordinary measures described in .
@@ -830,58 +949,58 @@ typedef struct spgLeafConsistentOut corresponding to the four quadrants around the inner tuple's centroid point. In such a case the code typically works with the nodes by number, and there is no need for explicit node labels. To suppress - node labels (and thereby save some space), the picksplit - function can return NULL for the nodeLabels array, - and likewise the choose function can return NULL for - the prefixNodeLabels array during - a spgSplitTuple action. - This will in turn result in nodeLabels being NULL during - subsequent calls to choose and inner_consistent. + node labels (and thereby save some space), the picksplit + function can return NULL for the nodeLabels array, + and likewise the choose function can return NULL for + the prefixNodeLabels array during + a spgSplitTuple action. + This will in turn result in nodeLabels being NULL during + subsequent calls to choose and inner_consistent. In principle, node labels could be used for some inner tuples and omitted for others in the same index.
When working with an inner tuple having unlabeled nodes, it is an error - for choose to return spgAddNode, since the set + for choose to return spgAddNode, since the set of nodes is supposed to be fixed in such cases. - <quote>All-the-same</> Inner Tuples + <quote>All-the-same</quote> Inner Tuples The SP-GiST core can override the results of the - operator class's picksplit function when - picksplit fails to divide the supplied leaf values into + operator class's picksplit function when + picksplit fails to divide the supplied leaf values into at least two node categories. When this happens, the new inner tuple is created with multiple nodes that each have the same label (if any) - that picksplit gave to the one node it did use, and the + that picksplit gave to the one node it did use, and the leaf values are divided at random among these equivalent nodes. - The allTheSame flag is set on the inner tuple to warn the - choose and inner_consistent functions that the + The allTheSame flag is set on the inner tuple to warn the + choose and inner_consistent functions that the tuple does not have the node set that they might otherwise expect. - When dealing with an allTheSame tuple, a choose - result of spgMatchNode is interpreted to mean that the new + When dealing with an allTheSame tuple, a choose + result of spgMatchNode is interpreted to mean that the new value can be assigned to any of the equivalent nodes; the core code will - ignore the supplied nodeN value and descend into one + ignore the supplied nodeN value and descend into one of the nodes at random (so as to keep the tree balanced). It is an - error for choose to return spgAddNode, since + error for choose to return spgAddNode, since that would make the nodes not all equivalent; the - spgSplitTuple action must be used if the value to be inserted + spgSplitTuple action must be used if the value to be inserted doesn't match the existing nodes. - When dealing with an allTheSame tuple, the - inner_consistent function should return either all or none + When dealing with an allTheSame tuple, the + inner_consistent function should return either all or none of the nodes as targets for continuing the index search, since they are all equivalent. This may or may not require any special-case code, - depending on how much the inner_consistent function normally + depending on how much the inner_consistent function normally assumes about the meaning of the nodes. @@ -894,9 +1013,9 @@ typedef struct spgLeafConsistentOut The PostgreSQL source distribution includes several examples of index operator classes for SP-GiST, - as described in . Look - into src/backend/access/spgist/ - and src/backend/utils/adt/ to see the code. + as described in . Look + into src/backend/access/spgist/ + and src/backend/utils/adt/ to see the code. diff --git a/doc/src/sgml/spi.sgml b/doc/src/sgml/spi.sgml index 86be87c0fd..9db11d22fb 100644 --- a/doc/src/sgml/spi.sgml +++ b/doc/src/sgml/spi.sgml @@ -21,29 +21,21 @@ The available procedural languages provide various means to - execute SQL commands from procedures. Most of these facilities are + execute SQL commands from functions. Most of these facilities are based on SPI, so this documentation might be of use for users of those languages as well. - - To avoid misunderstanding we'll use the term function - when we speak of SPI interface functions and - procedure for a user-defined C-function that is - using SPI. - - Note that if a command invoked via SPI fails, then control will not be - returned to your procedure. Rather, the - transaction or subtransaction in which your procedure executes will be + returned to your C function. Rather, the + transaction or subtransaction in which your C function executes will be rolled back. (This might seem surprising given that the SPI functions mostly have documented error-return conventions. Those conventions only apply for errors detected within the SPI functions themselves, however.) It is possible to recover control after an error by establishing your own - subtransaction surrounding SPI calls that might fail. This is not currently - documented because the mechanisms required are still in flux. + subtransaction surrounding SPI calls that might fail. @@ -64,6 +56,7 @@ SPI_connect + SPI_connect_ext SPI_connect @@ -72,12 +65,17 @@ SPI_connect - connect a procedure to the SPI manager + SPI_connect_ext + connect a C function to the SPI manager int SPI_connect(void) + + + +int SPI_connect_ext(int options) @@ -86,9 +84,34 @@ int SPI_connect(void) SPI_connect opens a connection from a - procedure invocation to the SPI manager. You must call this + C function invocation to the SPI manager. You must call this function if you want to execute commands through SPI. Some utility - SPI functions can be called from unconnected procedures. + SPI functions can be called from unconnected C functions. + + + + SPI_connect_ext does the same but has an argument that + allows passing option flags. Currently, the following option values are + available: + + + SPI_OPT_NONATOMIC + + + Sets the SPI connection to be nonatomic, which + means that transaction control calls SPI_commit, + SPI_rollback, and + SPI_start_transaction are allowed. Otherwise, + calling these functions will result in an immediate error. + + + + + + + + SPI_connect() is equivalent to + SPI_connect_ext(0). @@ -129,7 +152,7 @@ int SPI_connect(void) SPI_finish - disconnect a procedure from the SPI manager + disconnect a C function from the SPI manager @@ -144,7 +167,7 @@ int SPI_finish(void) SPI_finish closes an existing connection to the SPI manager. You must call this function after completing the - SPI operations needed during your procedure's current invocation. + SPI operations needed during your C function's current invocation. You do not need to worry about making this happen, however, if you abort the transaction via elog(ERROR). In that case SPI will clean itself up automatically. @@ -168,7 +191,7 @@ int SPI_finish(void) SPI_ERROR_UNCONNECTED - if called from an unconnected procedure + if called from an unconnected C function @@ -203,12 +226,12 @@ int SPI_execute(const char * command, bool rea SPI_execute executes the specified SQL command for count rows. If read_only - is true, the command must be read-only, and execution overhead + is true, the command must be read-only, and execution overhead is somewhat reduced. - This function can only be called from a connected procedure. + This function can only be called from a connected C function. @@ -225,13 +248,13 @@ SPI_execute("SELECT * FROM foo", true, 5); SPI_execute("INSERT INTO foo SELECT * FROM bar", false, 5); - inserts all rows from bar, ignoring the + inserts all rows from bar, ignoring the count parameter. However, with SPI_execute("INSERT INTO foo SELECT * FROM bar RETURNING *", false, 5); at most 5 rows would be inserted, since execution would stop after the - fifth RETURNING result row is retrieved. + fifth RETURNING result row is retrieved. @@ -244,26 +267,26 @@ SPI_execute("INSERT INTO foo SELECT * FROM bar RETURNING *", false, 5); - When read_only is false, + When read_only is false, SPI_execute increments the command - counter and computes a new snapshot before executing each + counter and computes a new snapshot before executing each command in the string. The snapshot does not actually change if the - current transaction isolation level is SERIALIZABLE or REPEATABLE READ, but in - READ COMMITTED mode the snapshot update allows each command to + current transaction isolation level is SERIALIZABLE or REPEATABLE READ, but in + READ COMMITTED mode the snapshot update allows each command to see the results of newly committed transactions from other sessions. This is essential for consistent behavior when the commands are modifying the database. - When read_only is true, + When read_only is true, SPI_execute does not update either the snapshot - or the command counter, and it allows only plain SELECT + or the command counter, and it allows only plain SELECT commands to appear in the command string. The commands are executed using the snapshot previously established for the surrounding query. This execution mode is somewhat faster than the read/write mode due to eliminating per-command overhead. It also allows genuinely - stable functions to be built: since successive executions + stable functions to be built: since successive executions will all use the same snapshot, there will be no change in the results. @@ -284,11 +307,11 @@ SPI_execute("INSERT INTO foo SELECT * FROM bar RETURNING *", false, 5); then you can use the global pointer SPITupleTable *SPI_tuptable to access the result rows. Some utility commands (such as - EXPLAIN) also return row sets, and SPI_tuptable + EXPLAIN) also return row sets, and SPI_tuptable will contain the result in these cases too. Some utility commands - (COPY, CREATE TABLE AS) don't return a row set, so - SPI_tuptable is NULL, but they still return the number of - rows processed in SPI_processed. + (COPY, CREATE TABLE AS) don't return a row set, so + SPI_tuptable is NULL, but they still return the number of + rows processed in SPI_processed. @@ -304,18 +327,18 @@ typedef struct HeapTuple *vals; /* rows */ } SPITupleTable; - vals is an array of pointers to rows. (The number + vals is an array of pointers to rows. (The number of valid entries is given by SPI_processed.) - tupdesc is a row descriptor which you can pass to - SPI functions dealing with rows. tuptabcxt, - alloced, and free are internal + tupdesc is a row descriptor which you can pass to + SPI functions dealing with rows. tuptabcxt, + alloced, and free are internal fields not intended for use by SPI callers. SPI_finish frees all - SPITupleTables allocated during the current - procedure. You can free a particular result table earlier, if you + SPITupleTables allocated during the current + C function. You can free a particular result table earlier, if you are done with it, by calling SPI_freetuptable. @@ -336,7 +359,7 @@ typedef struct bool read_only - true for read-only execution + true for read-only execution @@ -345,7 +368,7 @@ typedef struct maximum number of rows to return, - or 0 for no limit + or 0 for no limit @@ -365,7 +388,7 @@ typedef struct if a SELECT (but not SELECT - INTO) was executed + INTO) was executed @@ -473,7 +496,7 @@ typedef struct SPI_ERROR_COPY - if COPY TO stdout or COPY FROM stdin + if COPY TO stdout or COPY FROM stdin was attempted @@ -484,13 +507,13 @@ typedef struct if a transaction manipulation command was attempted - (BEGIN, - COMMIT, - ROLLBACK, - SAVEPOINT, - PREPARE TRANSACTION, - COMMIT PREPARED, - ROLLBACK PREPARED, + (BEGIN, + COMMIT, + ROLLBACK, + SAVEPOINT, + PREPARE TRANSACTION, + COMMIT PREPARED, + ROLLBACK PREPARED, or any variant thereof) @@ -509,7 +532,7 @@ typedef struct SPI_ERROR_UNCONNECTED - if called from an unconnected procedure + if called from an unconnected C function @@ -525,7 +548,7 @@ typedef struct SPI_processed and SPI_tuptable (just the pointer, not the contents of the structure). Save these two global variables into local - procedure variables if you need to access the result table of + C function variables if you need to access the result table of SPI_execute or another query-execution function across later calls. @@ -560,7 +583,7 @@ int SPI_exec(const char * command, long count< SPI_exec is the same as SPI_execute, with the latter's read_only parameter always taken as - false. + false.
@@ -582,7 +605,7 @@ int SPI_exec(const char * command, long count< maximum number of rows to return, - or 0 for no limit + or 0 for no limit @@ -628,7 +651,7 @@ int SPI_execute_with_args(const char *command, SPI_execute_with_args executes a command that might include references to externally supplied parameters. The command text - refers to a parameter as $n, and + refers to a parameter as $n, and the call specifies data types and values for each such symbol. read_only and count have the same interpretation as in SPI_execute. @@ -642,7 +665,7 @@ int SPI_execute_with_args(const char *command, - Similar results can be achieved with SPI_prepare followed by + Similar results can be achieved with SPI_prepare followed by SPI_execute_plan; however, when using this function the query plan is always customized to the specific parameter values provided. @@ -670,7 +693,7 @@ int SPI_execute_with_args(const char *command, int nargs - number of input parameters ($1, $2, etc.) + number of input parameters ($1, $2, etc.) @@ -707,12 +730,12 @@ int SPI_execute_with_args(const char *command, If nulls is NULL then SPI_execute_with_args assumes that no parameters are null. Otherwise, each entry of the nulls - array should be ' ' if the corresponding parameter - value is non-null, or 'n' if the corresponding parameter + array should be ' ' if the corresponding parameter + value is non-null, or 'n' if the corresponding parameter value is null. (In the latter case, the actual value in the corresponding values entry doesn't matter.) Note that nulls is not a text string, just an array: - it does not need a '\0' terminator. + it does not need a '\0' terminator. @@ -720,7 +743,7 @@ int SPI_execute_with_args(const char *command, bool read_only - true for read-only execution + true for read-only execution @@ -729,7 +752,7 @@ int SPI_execute_with_args(const char *command, maximum number of rows to return, - or 0 for no limit + or 0 for no limit @@ -796,7 +819,7 @@ SPIPlanPtr SPI_prepare(const char * command, int A prepared command can be generalized by writing parameters - ($1, $2, etc.) in place of what would be + ($1, $2, etc.) in place of what would be constants in a normal command. The actual values of the parameters are then specified when SPI_execute_plan is called. This allows the prepared command to be used over a wider range of @@ -805,7 +828,7 @@ SPIPlanPtr SPI_prepare(const char * command, int The statement returned by SPI_prepare can be used - only in the current invocation of the procedure, since + only in the current invocation of the C function, since SPI_finish frees memory allocated for such a statement. But the statement can be saved for longer using the functions SPI_keepplan or SPI_saveplan. @@ -829,7 +852,7 @@ SPIPlanPtr SPI_prepare(const char * command, int int nargs - number of input parameters ($1, $2, etc.) + number of input parameters ($1, $2, etc.) @@ -851,14 +874,14 @@ SPIPlanPtr SPI_prepare(const char * command, int SPI_prepare returns a non-null pointer to an - SPIPlan, which is an opaque struct representing a prepared + SPIPlan, which is an opaque struct representing a prepared statement. On error, NULL will be returned, and SPI_result will be set to one of the same error codes used by SPI_execute, except that it is set to SPI_ERROR_ARGUMENT if command is NULL, or if - nargs is less than 0, or if nargs is - greater than 0 and argtypes is NULL. + nargs is less than 0, or if nargs is + greater than 0 and argtypes is NULL.
@@ -875,39 +898,39 @@ SPIPlanPtr SPI_prepare(const char * command, int CURSOR_OPT_GENERIC_PLAN or - CURSOR_OPT_CUSTOM_PLAN flag to + passing the CURSOR_OPT_GENERIC_PLAN or + CURSOR_OPT_CUSTOM_PLAN flag to SPI_prepare_cursor, to force use of generic or custom plans respectively.
Although the main point of a prepared statement is to avoid repeated parse - analysis and planning of the statement, PostgreSQL will + analysis and planning of the statement, PostgreSQL will force re-analysis and re-planning of the statement before using it whenever database objects used in the statement have undergone definitional (DDL) changes since the previous use of the prepared - statement. Also, if the value of changes + statement. Also, if the value of changes from one use to the next, the statement will be re-parsed using the new - search_path. (This latter behavior is new as of + search_path. (This latter behavior is new as of PostgreSQL 9.3.) See for more information about the behavior of prepared + linkend="sql-prepare"/> for more information about the behavior of prepared statements. - This function should only be called from a connected procedure. + This function should only be called from a connected C function. - SPIPlanPtr is declared as a pointer to an opaque struct type in - spi.h. It is unwise to try to access its contents + SPIPlanPtr is declared as a pointer to an opaque struct type in + spi.h. It is unwise to try to access its contents directly, as that makes your code much more likely to break in future revisions of PostgreSQL. - The name SPIPlanPtr is somewhat historical, since the data + The name SPIPlanPtr is somewhat historical, since the data structure no longer necessarily contains an execution plan. @@ -941,9 +964,9 @@ SPIPlanPtr SPI_prepare_cursor(const char * command, int < SPI_prepare_cursor is identical to SPI_prepare, except that it also allows specification - of the planner's cursor options parameter. This is a bit mask + of the planner's cursor options parameter. This is a bit mask having the values shown in nodes/parsenodes.h - for the options field of DeclareCursorStmt. + for the options field of DeclareCursorStmt. SPI_prepare always takes the cursor options as zero. @@ -965,7 +988,7 @@ SPIPlanPtr SPI_prepare_cursor(const char * command, int < int nargs - number of input parameters ($1, $2, etc.) + number of input parameters ($1, $2, etc.) @@ -1004,7 +1027,7 @@ SPIPlanPtr SPI_prepare_cursor(const char * command, int < Notes - Useful bits to set in cursorOptions include + Useful bits to set in cursorOptions include CURSOR_OPT_SCROLL, CURSOR_OPT_NO_SCROLL, CURSOR_OPT_FAST_PLAN, @@ -1262,9 +1285,9 @@ bool SPI_is_cursor_plan(SPIPlanPtr plan) as an argument to SPI_cursor_open, or false if that is not the case. The criteria are that the plan represents one single command and that this - command returns tuples to the caller; for example, SELECT - is allowed unless it contains an INTO clause, and - UPDATE is allowed only if it contains a RETURNING + command returns tuples to the caller; for example, SELECT + is allowed unless it contains an INTO clause, and + UPDATE is allowed only if it contains a RETURNING clause. @@ -1368,12 +1391,12 @@ int SPI_execute_plan(SPIPlanPtr plan, Datum * If nulls is NULL then SPI_execute_plan assumes that no parameters are null. Otherwise, each entry of the nulls - array should be ' ' if the corresponding parameter - value is non-null, or 'n' if the corresponding parameter + array should be ' ' if the corresponding parameter + value is non-null, or 'n' if the corresponding parameter value is null. (In the latter case, the actual value in the corresponding values entry doesn't matter.) Note that nulls is not a text string, just an array: - it does not need a '\0' terminator. + it does not need a '\0' terminator.
@@ -1381,7 +1404,7 @@ int SPI_execute_plan(SPIPlanPtr plan, Datum * bool read_only - true for read-only execution + true for read-only execution @@ -1390,7 +1413,7 @@ int SPI_execute_plan(SPIPlanPtr plan, Datum * maximum number of rows to return, - or 0 for no limit + or 0 for no limit @@ -1467,10 +1490,10 @@ int SPI_execute_plan_with_paramlist(SPIPlanPtr plan, prepared by SPI_prepare. This function is equivalent to SPI_execute_plan except that information about the parameter values to be passed to the - query is presented differently. The ParamListInfo + query is presented differently. The ParamListInfo representation can be convenient for passing down values that are already available in that format. It also supports use of dynamic - parameter sets via hook functions specified in ParamListInfo. + parameter sets via hook functions specified in ParamListInfo.
@@ -1499,7 +1522,7 @@ int SPI_execute_plan_with_paramlist(SPIPlanPtr plan, bool read_only - true for read-only execution + true for read-only execution @@ -1508,7 +1531,7 @@ int SPI_execute_plan_with_paramlist(SPIPlanPtr plan, maximum number of rows to return, - or 0 for no limit + or 0 for no limit @@ -1558,7 +1581,7 @@ int SPI_execp(SPIPlanPtr plan, Datum * values< SPI_execp is the same as SPI_execute_plan, with the latter's read_only parameter always taken as - false. + false.
@@ -1597,12 +1620,12 @@ int SPI_execp(SPIPlanPtr plan, Datum * values< If nulls is NULL then SPI_execp assumes that no parameters are null. Otherwise, each entry of the nulls - array should be ' ' if the corresponding parameter - value is non-null, or 'n' if the corresponding parameter + array should be ' ' if the corresponding parameter + value is non-null, or 'n' if the corresponding parameter value is null. (In the latter case, the actual value in the corresponding values entry doesn't matter.) Note that nulls is not a text string, just an array: - it does not need a '\0' terminator. + it does not need a '\0' terminator.
@@ -1612,7 +1635,7 @@ int SPI_execp(SPIPlanPtr plan, Datum * values< maximum number of rows to return, - or 0 for no limit + or 0 for no limit @@ -1672,9 +1695,9 @@ Portal SPI_cursor_open(const char * name, SPIPlanPtr @@ -1729,12 +1752,12 @@ Portal SPI_cursor_open(const char * name, SPIPlanPtr nulls is NULL then SPI_cursor_open assumes that no parameters are null. Otherwise, each entry of the nulls - array should be ' ' if the corresponding parameter - value is non-null, or 'n' if the corresponding parameter + array should be ' ' if the corresponding parameter + value is non-null, or 'n' if the corresponding parameter value is null. (In the latter case, the actual value in the corresponding values entry doesn't matter.) Note that nulls is not a text string, just an array: - it does not need a '\0' terminator. + it does not need a '\0' terminator. @@ -1742,7 +1765,7 @@ Portal SPI_cursor_open(const char * name, SPIPlanPtr bool read_only - true for read-only execution + true for read-only execution
@@ -1753,7 +1776,7 @@ Portal SPI_cursor_open(const char * name, SPIPlanPtr Pointer to portal containing the cursor. Note there is no error - return convention; any error will be reported via elog. + return convention; any error will be reported via elog. @@ -1836,7 +1859,7 @@ Portal SPI_cursor_open_with_args(const char *name, int nargs - number of input parameters ($1, $2, etc.) + number of input parameters ($1, $2, etc.) @@ -1873,12 +1896,12 @@ Portal SPI_cursor_open_with_args(const char *name, If nulls is NULL then SPI_cursor_open_with_args assumes that no parameters are null. Otherwise, each entry of the nulls - array should be ' ' if the corresponding parameter - value is non-null, or 'n' if the corresponding parameter + array should be ' ' if the corresponding parameter + value is non-null, or 'n' if the corresponding parameter value is null. (In the latter case, the actual value in the corresponding values entry doesn't matter.) Note that nulls is not a text string, just an array: - it does not need a '\0' terminator. + it does not need a '\0' terminator. @@ -1886,7 +1909,7 @@ Portal SPI_cursor_open_with_args(const char *name, bool read_only - true for read-only execution + true for read-only execution @@ -1906,7 +1929,7 @@ Portal SPI_cursor_open_with_args(const char *name, Pointer to portal containing the cursor. Note there is no error - return convention; any error will be reported via elog. + return convention; any error will be reported via elog. @@ -1944,10 +1967,10 @@ Portal SPI_cursor_open_with_paramlist(const char *name, SPI_prepare. This function is equivalent to SPI_cursor_open except that information about the parameter values to be passed to the - query is presented differently. The ParamListInfo + query is presented differently. The ParamListInfo representation can be convenient for passing down values that are already available in that format. It also supports use of dynamic - parameter sets via hook functions specified in ParamListInfo. + parameter sets via hook functions specified in ParamListInfo. @@ -1991,7 +2014,7 @@ Portal SPI_cursor_open_with_paramlist(const char *name, bool read_only - true for read-only execution + true for read-only execution @@ -2002,7 +2025,7 @@ Portal SPI_cursor_open_with_paramlist(const char *name, Pointer to portal containing the cursor. Note there is no error - return convention; any error will be reported via elog. + return convention; any error will be reported via elog. @@ -2090,7 +2113,7 @@ void SPI_cursor_fetch(Portal portal, bool forw SPI_cursor_fetch fetches some rows from a cursor. This is equivalent to a subset of the SQL command - FETCH (see SPI_scroll_cursor_fetch + FETCH (see SPI_scroll_cursor_fetch for more functionality). @@ -2175,7 +2198,7 @@ void SPI_cursor_move(Portal portal, bool forwa SPI_cursor_move skips over some number of rows in a cursor. This is equivalent to a subset of the SQL command - MOVE (see SPI_scroll_cursor_move + MOVE (see SPI_scroll_cursor_move for more functionality). @@ -2250,7 +2273,7 @@ void SPI_scroll_cursor_fetch(Portal portal, FetchDirectio SPI_scroll_cursor_fetch fetches some rows from a - cursor. This is equivalent to the SQL command FETCH. + cursor. This is equivalent to the SQL command FETCH. @@ -2308,7 +2331,7 @@ void SPI_scroll_cursor_fetch(Portal portal, FetchDirectio Notes - See the SQL command + See the SQL command for details of the interpretation of the direction and count parameters. @@ -2350,7 +2373,7 @@ void SPI_scroll_cursor_move(Portal portal, FetchDirection SPI_scroll_cursor_move skips over some number of rows in a cursor. This is equivalent to the SQL command - MOVE. + MOVE. @@ -2400,7 +2423,7 @@ void SPI_scroll_cursor_move(Portal portal, FetchDirection SPI_processed is set as in SPI_execute if successful. - SPI_tuptable is set to NULL, since + SPI_tuptable is set to NULL, since no rows are returned by this function. @@ -2409,7 +2432,7 @@ void SPI_scroll_cursor_move(Portal portal, FetchDirection Notes - See the SQL command + See the SQL command for details of the interpretation of the direction and count parameters. @@ -2504,7 +2527,7 @@ int SPI_keepplan(SPIPlanPtr plan) SPI_prepare) so that it will not be freed by SPI_finish nor by the transaction manager. This gives you the ability to reuse prepared statements in the subsequent - invocations of your procedure in the current session. + invocations of your C function in the current session. @@ -2574,7 +2597,7 @@ SPIPlanPtr SPI_saveplan(SPIPlanPtr plan) by SPI_finish nor by the transaction manager, and returns a pointer to the copied statement. This gives you the ability to reuse prepared statements in the subsequent invocations of - your procedure in the current session. + your C function in the current session. @@ -2614,7 +2637,7 @@ SPIPlanPtr SPI_saveplan(SPIPlanPtr plan) SPI_ERROR_UNCONNECTED - if called from an unconnected procedure + if called from an unconnected C function @@ -2628,7 +2651,7 @@ SPIPlanPtr SPI_saveplan(SPIPlanPtr plan) The originally passed-in statement is not freed, so you might wish to do SPI_freeplan on it to avoid leaking memory - until SPI_finish. + until SPI_finish. @@ -2656,7 +2679,7 @@ SPIPlanPtr SPI_saveplan(SPIPlanPtr plan) SPI_register_relation - make a ephemeral named relation available by name in SPI queries + make an ephemeral named relation available by name in SPI queries @@ -2727,7 +2750,7 @@ int SPI_register_relation(EphemeralNamedRelation enr) SPI_ERROR_UNCONNECTED - if called from an unconnected procedure + if called from an unconnected C function @@ -2832,7 +2855,7 @@ int SPI_unregister_relation(const char * name) SPI_ERROR_UNCONNECTED - if called from an unconnected procedure + if called from an unconnected C function @@ -2947,7 +2970,7 @@ int SPI_register_trigger_data(TriggerData *tdata) SPI_ERROR_UNCONNECTED - if called from an unconnected procedure + if called from an unconnected C function @@ -2975,13 +2998,13 @@ int SPI_register_trigger_data(TriggerData *tdata) The functions described here provide an interface for extracting - information from result sets returned by SPI_execute and + information from result sets returned by SPI_execute and other SPI functions. All functions described in this section can be used by both - connected and unconnected procedures. + connected and unconnected C functions. @@ -3082,7 +3105,7 @@ int SPI_fnumber(TupleDesc rowdesc, const char * If colname refers to a system column (e.g., - oid) then the appropriate negative column number will + oid) then the appropriate negative column number will be returned. The caller should be careful to test the return value for exact equality to SPI_ERROR_NOATTRIBUTE to detect an error; testing the result for less than or equal to 0 is @@ -3546,6 +3569,59 @@ char * SPI_getnspname(Relation rel) + + SPI_result_code_string + + + SPI_result_code_string + 3 + + + + SPI_result_code_string + return error code as string + + + + +const char * SPI_result_code_string(int code); + + + + + Description + + + SPI_result_code_string returns a string representation + of the result code returned by various SPI functions or stored + in SPI_result. + + + + + Arguments + + + + int code + + + result code + + + + + + + + Return Value + + + A string representation of the result code. + + + + @@ -3564,7 +3640,7 @@ char * SPI_getnspname(Relation rel) to keep track of individual objects to avoid memory leaks; instead only a relatively small number of contexts have to be managed. palloc and related functions allocate memory - from the current context. + from the current context. @@ -3572,37 +3648,37 @@ char * SPI_getnspname(Relation rel) makes it current. SPI_finish restores the previous current memory context and destroys the context created by SPI_connect. These actions ensure that - transient memory allocations made inside your procedure are - reclaimed at procedure exit, avoiding memory leakage. + transient memory allocations made inside your C function are + reclaimed at C function exit, avoiding memory leakage. - However, if your procedure needs to return an object in allocated + However, if your C function needs to return an object in allocated memory (such as a value of a pass-by-reference data type), you cannot allocate that memory using palloc, at least not while you are connected to SPI. If you try, the object will be deallocated by SPI_finish, and your - procedure will not work reliably. To solve this problem, use + C function will not work reliably. To solve this problem, use SPI_palloc to allocate memory for your return object. SPI_palloc allocates memory in the upper executor context, that is, the memory context that was current when SPI_connect was called, which is precisely the right context for a value returned from your - procedure. Several of the other utility procedures described in + C function. Several of the other utility functions described in this section also return objects created in the upper executor context. When SPI_connect is called, the private - context of the procedure, which is created by + context of the C function, which is created by SPI_connect, is made the current context. All allocations made by palloc, repalloc, or SPI utility functions (except as described in this section) are made in this context. When a - procedure disconnects from the SPI manager (via + C function disconnects from the SPI manager (via SPI_finish) the current context is restored to the upper executor context, and all allocations made in the - procedure memory context are freed and cannot be used any more. + C function memory context are freed and cannot be used any more. @@ -3890,7 +3966,7 @@ HeapTupleHeader SPI_returntuple(HeapTuple row, TupleDesc Note that this should be used for functions that are declared to return composite types. It is not used for triggers; use - SPI_copytuple for returning a modified row in a trigger. + SPI_copytuple for returning a modified row in a trigger. @@ -4034,12 +4110,12 @@ HeapTuple SPI_modifytuple(Relation rel, HeapTuple nulls is NULL then SPI_modifytuple assumes that no new values are null. Otherwise, each entry of the nulls - array should be ' ' if the corresponding new value is - non-null, or 'n' if the corresponding new value is + array should be ' ' if the corresponding new value is + non-null, or 'n' if the corresponding new value is null. (In the latter case, the actual value in the corresponding values entry doesn't matter.) Note that nulls is not a text string, just an array: it - does not need a '\0' terminator. + does not need a '\0' terminator. @@ -4062,10 +4138,10 @@ HeapTuple SPI_modifytuple(Relation rel, HeapTuple SPI_ERROR_ARGUMENT - if rel is NULL, or if - row is NULL, or if ncols - is less than or equal to 0, or if colnum is - NULL, or if values is NULL. + if rel is NULL, or if + row is NULL, or if ncols + is less than or equal to 0, or if colnum is + NULL, or if values is NULL. @@ -4074,9 +4150,9 @@ HeapTuple SPI_modifytuple(Relation rel, HeapTuple SPI_ERROR_NOATTRIBUTE - if colnum contains an invalid column number (less + if colnum contains an invalid column number (less than or equal to 0 or greater than the number of columns in - row) + row) @@ -4158,7 +4234,7 @@ void SPI_freetuple(HeapTuple row) SPI_freetuptable - free a row set created by SPI_execute or a similar + free a row set created by SPI_execute or a similar function @@ -4174,23 +4250,23 @@ void SPI_freetuptable(SPITupleTable * tuptable) SPI_freetuptable frees a row set created by a prior SPI command execution function, such as - SPI_execute. Therefore, this function is often called + SPI_execute. Therefore, this function is often called with the global variable SPI_tuptable as argument. - This function is useful if a SPI procedure needs to execute + This function is useful if an SPI-using C function needs to execute multiple commands and does not want to keep the results of earlier commands around until it ends. Note that any unfreed row sets will - be freed anyway at SPI_finish. + be freed anyway at SPI_finish. Also, if a subtransaction is started and then aborted within execution - of a SPI procedure, SPI automatically frees any row sets created while + of an SPI-using C function, SPI automatically frees any row sets created while the subtransaction was running. - Beginning in PostgreSQL 9.3, + Beginning in PostgreSQL 9.3, SPI_freetuptable contains guard logic to protect against duplicate deletion requests for the same row set. In previous releases, duplicate deletions would lead to crashes. @@ -4272,6 +4348,152 @@ int SPI_freeplan(SPIPlanPtr plan) + + Transaction Management + + + It is not possible to run transaction control commands such + as COMMIT and ROLLBACK through SPI + functions such as SPI_execute. There are, however, + separate interface functions that allow transaction control through SPI. + + + + It is not generally safe and sensible to start and end transactions in + arbitrary user-defined SQL-callable functions without taking into account + the context in which they are called. For example, a transaction boundary + in the middle of a function that is part of a complex SQL expression that + is part of some SQL command will probably result in obscure internal errors + or crashes. The interface functions presented here are primarily intended + to be used by procedural language implementations to support transaction + management in SQL-level procedures that are invoked by the CALL + command, taking the context of the CALL invocation into + account. SPI-using procedures implemented in C can implement the same logic, but + the details of that are beyond the scope of this documentation. + + + + + + SPI_commit + + + SPI_commit + 3 + + + + SPI_commit + commit the current transaction + + + + +void SPI_commit(void) + + + + + Description + + + SPI_commit commits the current transaction. It is + approximately equivalent to running the SQL + command COMMIT. After a transaction is committed, a new + transaction has to be started + using SPI_start_transaction before further database + actions can be executed. + + + + This function can only be executed if the SPI connection has been set as + nonatomic in the call to SPI_connect_ext. + + + + + + + + SPI_rollback + + + SPI_rollback + 3 + + + + SPI_rollback + abort the current transaction + + + + +void SPI_rollback(void) + + + + + Description + + + SPI_rollback rolls back the current transaction. It + is approximately equivalent to running the SQL + command ROLLBACK. After a transaction is rolled back, a + new transaction has to be started + using SPI_start_transaction before further database + actions can be executed. + + + + This function can only be executed if the SPI connection has been set as + nonatomic in the call to SPI_connect_ext. + + + + + + + + SPI_start_transaction + + + SPI_start_transaction + 3 + + + + SPI_start_transaction + start a new transaction + + + + +void SPI_start_transaction(void) + + + + + Description + + + SPI_start_transaction starts a new transaction. It + can only be called after SPI_commit + or SPI_rollback, as there is no transaction active at + that point. Normally, when an SPI-using procedure is called, there is already a + transaction active, so attempting to start another one before closing out + the current one will result in an error. + + + + This function can only be executed if the SPI connection has been set as + nonatomic in the call to SPI_connect_ext. + + + + + + Visibility of Data Changes @@ -4317,8 +4539,8 @@ INSERT INTO a SELECT * FROM a; All standard procedural languages set the SPI read-write mode depending on the volatility attribute of the function. Commands of - STABLE and IMMUTABLE functions are done in - read-only mode, while commands of VOLATILE functions are + STABLE and IMMUTABLE functions are done in + read-only mode, while commands of VOLATILE functions are done in read-write mode. While authors of C functions are able to violate this convention, it's unlikely to be a good idea to do so. @@ -4337,13 +4559,13 @@ INSERT INTO a SELECT * FROM a; This section contains a very simple example of SPI usage. The - procedure execq takes an SQL command as its + C function execq takes an SQL command as its first argument and a row count as its second, executes the command using SPI_exec and returns the number of rows that were processed by the command. You can find more complex examples for SPI in the source tree in src/test/regress/regress.c and in the - module. + module. @@ -4352,21 +4574,21 @@ INSERT INTO a SELECT * FROM a; #include "executor/spi.h" #include "utils/builtins.h" -#ifdef PG_MODULE_MAGIC PG_MODULE_MAGIC; -#endif -int64 execq(text *sql, int cnt); +PG_FUNCTION_INFO_V1(execq); -int64 -execq(text *sql, int cnt) +Datum +execq(PG_FUNCTION_ARGS) { char *command; + int cnt; int ret; uint64 proc; /* Convert given text object to a C string */ - command = text_to_cstring(sql); + command = text_to_cstring(PG_GETARG_TEXT_PP(1)); + cnt = PG_GETARG_INT32(2); SPI_connect(); @@ -4399,19 +4621,13 @@ execq(text *sql, int cnt) SPI_finish(); pfree(command); - return (proc); + PG_RETURN_INT64(proc); } - - (This function uses call convention version 0, to make the example - easier to understand. In real applications you should use the new - version 1 interface.) - - This is how you declare the function after having compiled it into - a shared library (details are in .): + a shared library (details are in .): CREATE FUNCTION execq(text, integer) RETURNS int8 diff --git a/doc/src/sgml/sslinfo.sgml b/doc/src/sgml/sslinfo.sgml index 7bda33efa3..cda09aaafd 100644 --- a/doc/src/sgml/sslinfo.sgml +++ b/doc/src/sgml/sslinfo.sgml @@ -8,15 +8,15 @@ - The sslinfo module provides information about the SSL + The sslinfo module provides information about the SSL certificate that the current client provided when connecting to - PostgreSQL. The module is useless (most functions + PostgreSQL. The module is useless (most functions will return NULL) if the current connection does not use SSL. This extension won't build at all unless the installation was - configured with --with-openssl. + configured with --with-openssl. @@ -32,7 +32,7 @@ - Returns TRUE if current connection to server uses SSL, and FALSE + Returns true if current connection to server uses SSL, and false otherwise. @@ -77,8 +77,8 @@ - Returns TRUE if current client has presented a valid SSL client - certificate to the server, and FALSE otherwise. (The server + Returns true if current client has presented a valid SSL client + certificate to the server, and false otherwise. (The server might or might not be configured to require a client certificate.) @@ -126,7 +126,7 @@ - The result looks like /CN=Somebody /C=Some country/O=Some organization. + The result looks like /CN=Somebody /C=Some country/O=Some organization. @@ -142,7 +142,7 @@ Returns the full issuer name of the current client certificate, converting character data into the current database encoding. Encoding conversions - are handled the same as for ssl_client_dn. + are handled the same as for ssl_client_dn. The combination of the return value of this function with the @@ -150,7 +150,7 @@ This function is really useful only if you have more than one trusted CA - certificate in your server's root.crt file, or if this CA + certificate in your server's certificate authority file, or if this CA has issued some intermediate certificate authority certificates. @@ -195,7 +195,7 @@ role emailAddress - All of these fields are optional, except commonName. + All of these fields are optional, except commonName. It depends entirely on your CA's policy which of them would be included and which wouldn't. The meaning of these fields, however, is strictly defined by @@ -214,7 +214,7 @@ emailAddress - Same as ssl_client_dn_field, but for the certificate issuer + Same as ssl_client_dn_field, but for the certificate issuer rather than the certificate subject. diff --git a/doc/src/sgml/standalone-install.sgml b/doc/src/sgml/standalone-install.sgml deleted file mode 100644 index 1942f9dc4c..0000000000 --- a/doc/src/sgml/standalone-install.sgml +++ /dev/null @@ -1,28 +0,0 @@ - - - - - -%version; - - - - - - - -]> diff --git a/doc/src/sgml/standalone-install.xml b/doc/src/sgml/standalone-install.xml new file mode 100644 index 0000000000..62582effed --- /dev/null +++ b/doc/src/sgml/standalone-install.xml @@ -0,0 +1,167 @@ + + + +
+ <productname>PostgreSQL</productname> Installation from Source Code + + + This document describes the installation of + PostgreSQL using this source code distribution. + + + + + + + + + Getting Started + + + The following is a quick summary of how to get PostgreSQL up and + running once installed. The main documentation contains more information. + + + + + + Create a user account for the PostgreSQL + server. This is the user the server will run as. For production + use you should create a separate, unprivileged account + (postgres is commonly used). If you do not have root + access or just want to play around, your own user account is + enough, but running the server as root is a security risk and + will not work. +adduser postgres + + + + + + Create a database installation with the initdb + command. To run initdb you must be logged in to your + PostgreSQL server account. It will not work as + root. +root# mkdir /usr/local/pgsql/data +root# chown postgres /usr/local/pgsql/data +root# su - postgres +postgres$ /usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data + + + + The option specifies the location where the data + will be stored. You can use any path you want, it does not have + to be under the installation directory. Just make sure that the + server account can write to the directory (or create it, if it + doesn't already exist) before starting initdb, as + illustrated here. + + + + + + At this point, if you did not use the initdb -A + option, you might want to modify pg_hba.conf to control + local access to the server before you start it. The default is to + trust all local users. + + + + + + The previous initdb step should have told you how to + start up the database server. Do so now. The command should look + something like: +/usr/local/pgsql/bin/postgres -D /usr/local/pgsql/data + This will start the server in the foreground. To put the server + in the background use something like: +nohup /usr/local/pgsql/bin/postgres -D /usr/local/pgsql/data \ + </dev/null >>server.log 2>&1 </dev/null & + + + + To stop a server running in the background you can type: +kill `cat /usr/local/pgsql/data/postmaster.pid` + + + + + + Create a database: +createdb testdb + Then enter: +psql testdb + to connect to that database. At the prompt you can enter SQL + commands and start experimenting. + + + + + + + What Now? + + + + + + The PostgreSQL distribution contains a + comprehensive documentation set, which you should read sometime. + After installation, the documentation can be accessed by + pointing your browser to + /usr/local/pgsql/doc/html/index.html, unless you + changed the installation directories. + + + + The first few chapters of the main documentation are the Tutorial, + which should be your first reading if you are completely new to + SQL databases. If you are familiar with database + concepts then you want to proceed with part on server + administration, which contains information about how to set up + the database server, database users, and authentication. + + + + + + Usually, you will want to modify your computer so that it will + automatically start the database server whenever it boots. Some + suggestions for this are in the documentation. + + + + + + Run the regression tests against the installed server (using + make installcheck). If you didn't run the + tests before installation, you should definitely do it now. This + is also explained in the documentation. + + + + + + By default, PostgreSQL is configured to run on + minimal hardware. This allows it to start up with almost any + hardware configuration. The default configuration is, however, + not designed for optimum performance. To achieve optimum + performance, several server parameters must be adjusted, the two + most common being shared_buffers and + work_mem. + Other parameters mentioned in the documentation also affect + performance. + + + + + + + + +
diff --git a/doc/src/sgml/standalone-profile.xsl b/doc/src/sgml/standalone-profile.xsl new file mode 100644 index 0000000000..ff464c1654 --- /dev/null +++ b/doc/src/sgml/standalone-profile.xsl @@ -0,0 +1,81 @@ + + + + + + + + + + + + + + + + + + + + + + document + + + + the documentation about client authentication and libpq + + + + the main documentation's appendix on documentation + + + + the documentation + + + + the documentation + + + + pgcrypto + + + + the PL/Python documentation + + + + the file + src/test/regress/README + and the documentation + + + + the documentation + + + + uuid-ossp + + + + xml2 + + + diff --git a/doc/src/sgml/start.sgml b/doc/src/sgml/start.sgml index 1ce1a24e10..5b73557835 100644 --- a/doc/src/sgml/start.sgml +++ b/doc/src/sgml/start.sgml @@ -29,7 +29,7 @@ If you are installing PostgreSQL - yourself, then refer to + yourself, then refer to for instructions on installation, and return to this guide when the installation is complete. Be sure to follow closely the section about setting up the appropriate environment @@ -162,7 +162,7 @@ createdb: command not found - then PostgreSQL was not installed properly. Either it was not + then PostgreSQL was not installed properly. Either it was not installed at all or your shell's search path was not set to include it. Try calling the command with an absolute path instead: @@ -191,17 +191,17 @@ createdb: could not connect to database postgres: could not connect to server: N createdb: could not connect to database postgres: FATAL: role "joe" does not exist where your own login name is mentioned. This will happen if the - administrator has not created a PostgreSQL user account - for you. (PostgreSQL user accounts are distinct from + administrator has not created a PostgreSQL user account + for you. (PostgreSQL user accounts are distinct from operating system user accounts.) If you are the administrator, see - for help creating accounts. You will need to - become the operating system user under which PostgreSQL - was installed (usually postgres) to create the first user + for help creating accounts. You will need to + become the operating system user under which PostgreSQL + was installed (usually postgres) to create the first user account. It could also be that you were assigned a - PostgreSQL user name that is different from your - operating system user name; in that case you need to use the @@ -268,7 +268,7 @@ createdb: database creation failed: ERROR: permission denied to create database More about createdb and dropdb can - be found in and + be found in and respectively.
@@ -288,7 +288,7 @@ createdb: database creation failed: ERROR: permission denied to create database Running the PostgreSQL interactive - terminal program, called psql, which allows you + terminal program, called psql, which allows you to interactively enter, edit, and execute SQL commands. @@ -298,7 +298,7 @@ createdb: database creation failed: ERROR: permission denied to create database Using an existing graphical frontend tool like pgAdmin or an office suite with - ODBC or JDBC support to create and manipulate a + ODBC or JDBC support to create and manipulate a database. These possibilities are not covered in this tutorial. @@ -308,7 +308,7 @@ createdb: database creation failed: ERROR: permission denied to create database Writing a custom application, using one of the several available language bindings. These possibilities are discussed - further in . + further in . @@ -402,7 +402,7 @@ mydb=# command shell. (For more internal commands, type \? at the psql prompt.) The full capabilities of psql are documented in - . In this tutorial we will not use these + . In this tutorial we will not use these features explicitly, but you can use them yourself when it is helpful.
diff --git a/doc/src/sgml/storage.sgml b/doc/src/sgml/storage.sgml index aed2cf8bca..8ef2ac8010 100644 --- a/doc/src/sgml/storage.sgml +++ b/doc/src/sgml/storage.sgml @@ -21,23 +21,23 @@ directories. Traditionally, the configuration and data files used by a database cluster are stored together within the cluster's data -directory, commonly referred to as PGDATA (after the name of the +directory, commonly referred to as PGDATA (after the name of the environment variable that can be used to define it). A common location for -PGDATA is /var/lib/pgsql/data. Multiple clusters, +PGDATA is /var/lib/pgsql/data. Multiple clusters, managed by different server instances, can exist on the same machine. -The PGDATA directory contains several subdirectories and control -files, as shown in . In addition to +The PGDATA directory contains several subdirectories and control +files, as shown in . In addition to these required items, the cluster configuration files postgresql.conf, pg_hba.conf, and pg_ident.conf are traditionally stored in -PGDATA, although it is possible to place them elsewhere. +PGDATA, although it is possible to place them elsewhere. -Contents of <varname>PGDATA</> +Contents of <varname>PGDATA</varname> @@ -51,126 +51,126 @@ Item - PG_VERSION + PG_VERSION A file containing the major version number of PostgreSQL - base + base Subdirectory containing per-database subdirectories - current_logfiles + current_logfiles File recording the log file(s) currently written to by the logging collector - global + global Subdirectory containing cluster-wide tables, such as - pg_database + pg_database - pg_commit_ts + pg_commit_ts Subdirectory containing transaction commit timestamp data - pg_dynshmem + pg_dynshmem Subdirectory containing files used by the dynamic shared memory subsystem - pg_logical + pg_logical Subdirectory containing status data for logical decoding - pg_multixact + pg_multixact Subdirectory containing multitransaction status data (used for shared row locks) - pg_notify + pg_notify Subdirectory containing LISTEN/NOTIFY status data - pg_replslot + pg_replslot Subdirectory containing replication slot data - pg_serial + pg_serial Subdirectory containing information about committed serializable transactions - pg_snapshots + pg_snapshots Subdirectory containing exported snapshots - pg_stat + pg_stat Subdirectory containing permanent files for the statistics subsystem - pg_stat_tmp + pg_stat_tmp Subdirectory containing temporary files for the statistics subsystem - pg_subtrans + pg_subtrans Subdirectory containing subtransaction status data - pg_tblspc + pg_tblspc Subdirectory containing symbolic links to tablespaces - pg_twophase + pg_twophase Subdirectory containing state files for prepared transactions - pg_wal + pg_wal Subdirectory containing WAL (Write Ahead Log) files - pg_xact + pg_xact Subdirectory containing transaction commit status data - postgresql.auto.conf + postgresql.auto.conf A file used for storing configuration parameters that are set by ALTER SYSTEM - postmaster.opts + postmaster.opts A file recording the command-line options the server was last started with - postmaster.pid + postmaster.pid A lock file recording the current postmaster process ID (PID), cluster data directory path, postmaster start timestamp, port number, Unix-domain socket directory path (empty on Windows), - first valid listen_address (IP address or *, or empty if + first valid listen_address (IP address or *, or empty if not listening on TCP), and shared memory segment ID (this file is not present after server shutdown) @@ -182,113 +182,113 @@ last started with For each database in the cluster there is a subdirectory within -PGDATA/base, named after the database's OID in -pg_database. This subdirectory is the default location +PGDATA/base, named after the database's OID in +pg_database. This subdirectory is the default location for the database's files; in particular, its system catalogs are stored there. Each table and index is stored in a separate file. For ordinary relations, -these files are named after the table or index's filenode number, -which can be found in pg_class.relfilenode. But +these files are named after the table or index's filenode number, +which can be found in pg_class.relfilenode. But for temporary relations, the file name is of the form -tBBB_FFF, where BBB -is the backend ID of the backend which created the file, and FFF +tBBB_FFF, where BBB +is the backend ID of the backend which created the file, and FFF is the filenode number. In either case, in addition to the main file (a/k/a -main fork), each table and index has a free space map (see ), which stores information about free space available in +main fork), each table and index has a free space map (see ), which stores information about free space available in the relation. The free space map is stored in a file named with the filenode -number plus the suffix _fsm. Tables also have a -visibility map, stored in a fork with the suffix _vm, +number plus the suffix _fsm. Tables also have a +visibility map, stored in a fork with the suffix _vm, to track which pages are known to have no dead tuples. The visibility map is -described further in . Unlogged tables and indexes +described further in . Unlogged tables and indexes have a third fork, known as the initialization fork, which is stored in a fork -with the suffix _init (see ). +with the suffix _init (see ). Note that while a table's filenode often matches its OID, this is -not necessarily the case; some operations, like -TRUNCATE, REINDEX, CLUSTER and some forms -of ALTER TABLE, can change the filenode while preserving the OID. +not necessarily the case; some operations, like +TRUNCATE, REINDEX, CLUSTER and some forms +of ALTER TABLE, can change the filenode while preserving the OID. Avoid assuming that filenode and table OID are the same. -Also, for certain system catalogs including pg_class itself, -pg_class.relfilenode contains zero. The +Also, for certain system catalogs including pg_class itself, +pg_class.relfilenode contains zero. The actual filenode number of these catalogs is stored in a lower-level data -structure, and can be obtained using the pg_relation_filenode() +structure, and can be obtained using the pg_relation_filenode() function. When a table or index exceeds 1 GB, it is divided into gigabyte-sized -segments. The first segment's file name is the same as the +segments. The first segment's file name is the same as the filenode; subsequent segments are named filenode.1, filenode.2, etc. This arrangement avoids problems on platforms that have file size limitations. (Actually, 1 GB is just the default segment size. The segment size can be adjusted using the configuration option -when building PostgreSQL.) +when building PostgreSQL.) In principle, free space map and visibility map forks could require multiple segments as well, though this is unlikely to happen in practice. A table that has columns with potentially large entries will have an -associated TOAST table, which is used for out-of-line storage of +associated TOAST table, which is used for out-of-line storage of field values that are too large to keep in the table rows proper. -pg_class.reltoastrelid links from a table to -its TOAST table, if any. -See for more information. +pg_class.reltoastrelid links from a table to +its TOAST table, if any. +See for more information. The contents of tables and indexes are discussed further in -. +. Tablespaces make the scenario more complicated. Each user-defined tablespace -has a symbolic link inside the PGDATA/pg_tblspc +has a symbolic link inside the PGDATA/pg_tblspc directory, which points to the physical tablespace directory (i.e., the -location specified in the tablespace's CREATE TABLESPACE command). +location specified in the tablespace's CREATE TABLESPACE command). This symbolic link is named after the tablespace's OID. Inside the physical tablespace directory there is -a subdirectory with a name that depends on the PostgreSQL -server version, such as PG_9.0_201008051. (The reason for using +a subdirectory with a name that depends on the PostgreSQL +server version, such as PG_9.0_201008051. (The reason for using this subdirectory is so that successive versions of the database can use -the same CREATE TABLESPACE location value without conflicts.) +the same CREATE TABLESPACE location value without conflicts.) Within the version-specific subdirectory, there is a subdirectory for each database that has elements in the tablespace, named after the database's OID. Tables and indexes are stored within that directory, using the filenode naming scheme. -The pg_default tablespace is not accessed through -pg_tblspc, but corresponds to -PGDATA/base. Similarly, the pg_global -tablespace is not accessed through pg_tblspc, but corresponds to -PGDATA/global. +The pg_default tablespace is not accessed through +pg_tblspc, but corresponds to +PGDATA/base. Similarly, the pg_global +tablespace is not accessed through pg_tblspc, but corresponds to +PGDATA/global. -The pg_relation_filepath() function shows the entire path -(relative to PGDATA) of any relation. It is often useful +The pg_relation_filepath() function shows the entire path +(relative to PGDATA) of any relation. It is often useful as a substitute for remembering many of the above rules. But keep in mind that this function just gives the name of the first segment of the main fork of the relation — you may need to append a segment number -and/or _fsm, _vm, or _init to find all +and/or _fsm, _vm, or _init to find all the files associated with the relation. Temporary files (for operations such as sorting more data than can fit in -memory) are created within PGDATA/base/pgsql_tmp, -or within a pgsql_tmp subdirectory of a tablespace directory -if a tablespace other than pg_default is specified for them. +memory) are created within PGDATA/base/pgsql_tmp, +or within a pgsql_tmp subdirectory of a tablespace directory +if a tablespace other than pg_default is specified for them. The name of a temporary file has the form -pgsql_tmpPPP.NNN, -where PPP is the PID of the owning backend and -NNN distinguishes different temporary files of that backend. +pgsql_tmpPPP.NNN, +where PPP is the PID of the owning backend and +NNN distinguishes different temporary files of that backend. @@ -300,10 +300,10 @@ where PPP is the PID of the owning backend and TOAST - sliced breadTOAST + sliced breadTOAST -This section provides an overview of TOAST (The +This section provides an overview of TOAST (The Oversized-Attribute Storage Technique). @@ -314,36 +314,36 @@ not possible to store very large field values directly. To overcome this limitation, large field values are compressed and/or broken up into multiple physical rows. This happens transparently to the user, with only small impact on most of the backend code. The technique is affectionately -known as TOAST (or the best thing since sliced bread). -The TOAST infrastructure is also used to improve handling of +known as TOAST (or the best thing since sliced bread). +The TOAST infrastructure is also used to improve handling of large data values in-memory. -Only certain data types support TOAST — there is no need to +Only certain data types support TOAST — there is no need to impose the overhead on data types that cannot produce large field values. -To support TOAST, a data type must have a variable-length -(varlena) representation, in which, ordinarily, the first +To support TOAST, a data type must have a variable-length +(varlena) representation, in which, ordinarily, the first four-byte word of any stored value contains the total length of the value in -bytes (including itself). TOAST does not constrain the rest +bytes (including itself). TOAST does not constrain the rest of the data type's representation. The special representations collectively -called TOASTed values work by modifying or +called TOASTed values work by modifying or reinterpreting this initial length word. Therefore, the C-level functions -supporting a TOAST-able data type must be careful about how they -handle potentially TOASTed input values: an input might not +supporting a TOAST-able data type must be careful about how they +handle potentially TOASTed input values: an input might not actually consist of a four-byte length word and contents until after it's -been detoasted. (This is normally done by invoking -PG_DETOAST_DATUM before doing anything with an input value, +been detoasted. (This is normally done by invoking +PG_DETOAST_DATUM before doing anything with an input value, but in some cases more efficient approaches are possible. -See for more detail.) +See for more detail.) -TOAST usurps two bits of the varlena length word (the high-order +TOAST usurps two bits of the varlena length word (the high-order bits on big-endian machines, the low-order bits on little-endian machines), -thereby limiting the logical size of any value of a TOAST-able -data type to 1 GB (230 - 1 bytes). When both bits are zero, -the value is an ordinary un-TOASTed value of the data type, and +thereby limiting the logical size of any value of a TOAST-able +data type to 1 GB (230 - 1 bytes). When both bits are zero, +the value is an ordinary un-TOASTed value of the data type, and the remaining bits of the length word give the total datum size (including length word) in bytes. When the highest-order or lowest-order bit is set, the value has only a single-byte header instead of the normal four-byte @@ -357,7 +357,7 @@ additional space savings that is significant compared to short values. As a special case, if the remaining bits of a single-byte header are all zero (which would be impossible for a self-inclusive length), the value is a pointer to out-of-line data, with several possible alternatives as -described below. The type and size of such a TOAST pointer +described below. The type and size of such a TOAST pointer are determined by a code stored in the second byte of the datum. Lastly, when the highest-order or lowest-order bit is clear but the adjacent bit is set, the content of the datum has been compressed and must be @@ -365,80 +365,80 @@ decompressed before use. In this case the remaining bits of the four-byte length word give the total size of the compressed datum, not the original data. Note that compression is also possible for out-of-line data but the varlena header does not tell whether it has occurred — -the content of the TOAST pointer tells that, instead. +the content of the TOAST pointer tells that, instead. -As mentioned, there are multiple types of TOAST pointer datums. +As mentioned, there are multiple types of TOAST pointer datums. The oldest and most common type is a pointer to out-of-line data stored in -a TOAST table that is separate from, but -associated with, the table containing the TOAST pointer datum -itself. These on-disk pointer datums are created by the -TOAST management code (in access/heap/tuptoaster.c) +a TOAST table that is separate from, but +associated with, the table containing the TOAST pointer datum +itself. These on-disk pointer datums are created by the +TOAST management code (in access/heap/tuptoaster.c) when a tuple to be stored on disk is too large to be stored as-is. -Further details appear in . -Alternatively, a TOAST pointer datum can contain a pointer to +Further details appear in . +Alternatively, a TOAST pointer datum can contain a pointer to out-of-line data that appears elsewhere in memory. Such datums are necessarily short-lived, and will never appear on-disk, but they are very useful for avoiding copying and redundant processing of large data values. -Further details appear in . +Further details appear in . The compression technique used for either in-line or out-of-line compressed data is a fairly simple and very fast member of the LZ family of compression techniques. See -src/common/pg_lzcompress.c for the details. +src/common/pg_lzcompress.c for the details. Out-of-line, on-disk TOAST storage -If any of the columns of a table are TOAST-able, the table will -have an associated TOAST table, whose OID is stored in the table's -pg_class.reltoastrelid entry. On-disk -TOASTed values are kept in the TOAST table, as +If any of the columns of a table are TOAST-able, the table will +have an associated TOAST table, whose OID is stored in the table's +pg_class.reltoastrelid entry. On-disk +TOASTed values are kept in the TOAST table, as described in more detail below. Out-of-line values are divided (after compression if used) into chunks of at -most TOAST_MAX_CHUNK_SIZE bytes (by default this value is chosen +most TOAST_MAX_CHUNK_SIZE bytes (by default this value is chosen so that four chunk rows will fit on a page, making it about 2000 bytes). -Each chunk is stored as a separate row in the TOAST table +Each chunk is stored as a separate row in the TOAST table belonging to the owning table. Every -TOAST table has the columns chunk_id (an OID -identifying the particular TOASTed value), -chunk_seq (a sequence number for the chunk within its value), -and chunk_data (the actual data of the chunk). A unique index -on chunk_id and chunk_seq provides fast +TOAST table has the columns chunk_id (an OID +identifying the particular TOASTed value), +chunk_seq (a sequence number for the chunk within its value), +and chunk_data (the actual data of the chunk). A unique index +on chunk_id and chunk_seq provides fast retrieval of the values. A pointer datum representing an out-of-line on-disk -TOASTed value therefore needs to store the OID of the -TOAST table in which to look and the OID of the specific value -(its chunk_id). For convenience, pointer datums also store the +TOASTed value therefore needs to store the OID of the +TOAST table in which to look and the OID of the specific value +(its chunk_id). For convenience, pointer datums also store the logical datum size (original uncompressed data length) and physical stored size (different if compression was applied). Allowing for the varlena header bytes, -the total size of an on-disk TOAST pointer datum is therefore 18 +the total size of an on-disk TOAST pointer datum is therefore 18 bytes regardless of the actual size of the represented value. -The TOAST management code is triggered only +The TOAST management code is triggered only when a row value to be stored in a table is wider than -TOAST_TUPLE_THRESHOLD bytes (normally 2 kB). -The TOAST code will compress and/or move +TOAST_TUPLE_THRESHOLD bytes (normally 2 kB). +The TOAST code will compress and/or move field values out-of-line until the row value is shorter than -TOAST_TUPLE_TARGET bytes (also normally 2 kB) +TOAST_TUPLE_TARGET bytes (also normally 2 kB, adjustable) or no more gains can be had. During an UPDATE operation, values of unchanged fields are normally preserved as-is; so an -UPDATE of a row with out-of-line values incurs no TOAST costs if +UPDATE of a row with out-of-line values incurs no TOAST costs if none of the out-of-line values change. -The TOAST management code recognizes four different strategies -for storing TOAST-able columns on disk: +The TOAST management code recognizes four different strategies +for storing TOAST-able columns on disk: @@ -447,13 +447,13 @@ for storing TOAST-able columns on disk: out-of-line storage; furthermore it disables use of single-byte headers for varlena types. This is the only possible strategy for - columns of non-TOAST-able data types. + columns of non-TOAST-able data types. EXTENDED allows both compression and out-of-line - storage. This is the default for most TOAST-able data types. + storage. This is the default for most TOAST-able data types. Compression will be attempted first, then out-of-line storage if the row is still too big. @@ -478,9 +478,14 @@ for storing TOAST-able columns on disk: -Each TOAST-able data type specifies a default strategy for columns +Each TOAST-able data type specifies a default strategy for columns of that data type, but the strategy for a given table column can be altered -with ALTER TABLE ... SET STORAGE. +with ALTER TABLE ... SET STORAGE. + + + +TOAST_TUPLE_TARGET can be adjusted for each table using +ALTER TABLE ... SET (toast_tuple_target = N) @@ -488,15 +493,15 @@ This scheme has a number of advantages compared to a more straightforward approach such as allowing row values to span pages. Assuming that queries are usually qualified by comparisons against relatively small key values, most of the work of the executor will be done using the main row entry. The big values -of TOASTed attributes will only be pulled out (if selected at all) +of TOASTed attributes will only be pulled out (if selected at all) at the time the result set is sent to the client. Thus, the main table is much smaller and more of its rows fit in the shared buffer cache than would be the case without any out-of-line storage. Sort sets shrink also, and sorts will more often be done entirely in memory. A little test showed that a table containing typical HTML pages and their URLs was stored in about half of the -raw data size including the TOAST table, and that the main table +raw data size including the TOAST table, and that the main table contained only about 10% of the entire data (the URLs and some small HTML -pages). There was no run time difference compared to an un-TOASTed +pages). There was no run time difference compared to an un-TOASTed comparison table, in which all the HTML pages were cut down to 7 kB to fit. @@ -506,16 +511,16 @@ comparison table, in which all the HTML pages were cut down to 7 kB to fit. Out-of-line, in-memory TOAST storage -TOAST pointers can point to data that is not on disk, but is +TOAST pointers can point to data that is not on disk, but is elsewhere in the memory of the current server process. Such pointers obviously cannot be long-lived, but they are nonetheless useful. There are currently two sub-cases: -pointers to indirect data and -pointers to expanded data. +pointers to indirect data and +pointers to expanded data. -Indirect TOAST pointers simply point at a non-indirect varlena +Indirect TOAST pointers simply point at a non-indirect varlena value stored somewhere in memory. This case was originally created merely as a proof of concept, but it is currently used during logical decoding to avoid possibly having to create physical tuples exceeding 1 GB (as pulling @@ -526,34 +531,34 @@ and there is no infrastructure to help with this. -Expanded TOAST pointers are useful for complex data types +Expanded TOAST pointers are useful for complex data types whose on-disk representation is not especially suited for computational purposes. As an example, the standard varlena representation of a -PostgreSQL array includes dimensionality information, a +PostgreSQL array includes dimensionality information, a nulls bitmap if there are any null elements, then the values of all the elements in order. When the element type itself is variable-length, the -only way to find the N'th element is to scan through all the +only way to find the N'th element is to scan through all the preceding elements. This representation is appropriate for on-disk storage because of its compactness, but for computations with the array it's much -nicer to have an expanded or deconstructed +nicer to have an expanded or deconstructed representation in which all the element starting locations have been -identified. The TOAST pointer mechanism supports this need by +identified. The TOAST pointer mechanism supports this need by allowing a pass-by-reference Datum to point to either a standard varlena -value (the on-disk representation) or a TOAST pointer that +value (the on-disk representation) or a TOAST pointer that points to an expanded representation somewhere in memory. The details of this expanded representation are up to the data type, though it must have a standard header and meet the other API requirements given -in src/include/utils/expandeddatum.h. C-level functions +in src/include/utils/expandeddatum.h. C-level functions working with the data type can choose to handle either representation. Functions that do not know about the expanded representation, but simply -apply PG_DETOAST_DATUM to their inputs, will automatically +apply PG_DETOAST_DATUM to their inputs, will automatically receive the traditional varlena representation; so support for an expanded representation can be introduced incrementally, one function at a time. -TOAST pointers to expanded values are further broken down -into read-write and read-only pointers. +TOAST pointers to expanded values are further broken down +into read-write and read-only pointers. The pointed-to representation is the same either way, but a function that receives a read-write pointer is allowed to modify the referenced value in-place, whereas one that receives a read-only pointer must not; it must @@ -563,11 +568,11 @@ unnecessary copying of expanded values during query execution. -For all types of in-memory TOAST pointer, the TOAST +For all types of in-memory TOAST pointer, the TOAST management code ensures that no such pointer datum can accidentally get -stored on disk. In-memory TOAST pointers are automatically +stored on disk. In-memory TOAST pointers are automatically expanded to normal in-line varlena values before storage — and then -possibly converted to on-disk TOAST pointers, if the containing +possibly converted to on-disk TOAST pointers, if the containing tuple would otherwise be too big. @@ -582,36 +587,36 @@ tuple would otherwise be too big. Free Space Map -FSMFree Space Map +FSMFree Space Map Each heap and index relation, except for hash indexes, has a Free Space Map (FSM) to keep track of available space in the relation. It's stored alongside the main relation data in a separate relation fork, named after the -filenode number of the relation, plus a _fsm suffix. For example, +filenode number of the relation, plus a _fsm suffix. For example, if the filenode of a relation is 12345, the FSM is stored in a file called -12345_fsm, in the same directory as the main relation file. +12345_fsm, in the same directory as the main relation file. -The Free Space Map is organized as a tree of FSM pages. The -bottom level FSM pages store the free space available on each +The Free Space Map is organized as a tree of FSM pages. The +bottom level FSM pages store the free space available on each heap (or index) page, using one byte to represent each such page. The upper levels aggregate information from the lower levels. -Within each FSM page is a binary tree, stored in an array with +Within each FSM page is a binary tree, stored in an array with one byte per node. Each leaf node represents a heap page, or a lower level -FSM page. In each non-leaf node, the higher of its children's +FSM page. In each non-leaf node, the higher of its children's values is stored. The maximum value in the leaf nodes is therefore stored at the root. -See src/backend/storage/freespace/README for more details on -how the FSM is structured, and how it's updated and searched. -The module +See src/backend/storage/freespace/README for more details on +how the FSM is structured, and how it's updated and searched. +The module can be used to examine the information stored in free space maps. @@ -624,7 +629,7 @@ can be used to examine the information stored in free space maps. Visibility Map -VMVisibility Map +VMVisibility Map Each heap relation has a Visibility Map @@ -632,9 +637,9 @@ Each heap relation has a Visibility Map visible to all active transactions; it also keeps track of which pages contain only frozen tuples. It's stored alongside the main relation data in a separate relation fork, named after the -filenode number of the relation, plus a _vm suffix. For example, +filenode number of the relation, plus a _vm suffix. For example, if the filenode of a relation is 12345, the VM is stored in a file called -12345_vm, in the same directory as the main relation file. +12345_vm, in the same directory as the main relation file. Note that indexes do not have VMs. @@ -644,7 +649,7 @@ indicates that the page is all-visible, or in other words that the page does not contain any tuples that need to be vacuumed. This information can also be used by index-only -scans to answer queries using only the index tuple. +scans to answer queries using only the index tuple. The second bit, if set, means that all tuples on the page have been frozen. That means that even an anti-wraparound vacuum need not revisit the page. @@ -657,7 +662,7 @@ cleared by any data-modifying operations on a page. -The module can be used to examine the +The module can be used to examine the information stored in the visibility map. @@ -695,7 +700,7 @@ This section provides an overview of the page format used within the item layout rules. -Sequences and TOAST tables are formatted just like a regular table. +Sequences and TOAST tables are formatted just like a regular table. @@ -708,17 +713,17 @@ an item is a row; in an index, an item is an index entry. -Every table and index is stored as an array of pages of a +Every table and index is stored as an array of pages of a fixed size (usually 8 kB, although a different page size can be selected when compiling the server). In a table, all the pages are logically equivalent, so a particular item (row) can be stored in any page. In -indexes, the first page is generally reserved as a metapage +indexes, the first page is generally reserved as a metapage holding control information, and there can be different types of pages within the index, depending on the index access method. - shows the overall layout of a page. + shows the overall layout of a page. There are five parts to each page. @@ -773,10 +778,10 @@ data. Empty in ordinary tables. The first 24 bytes of each page consists of a page header - (PageHeaderData). Its format is detailed in . The first field tracks the most + (PageHeaderData). Its format is detailed in . The first field tracks the most recent WAL entry related to this page. The second field contains - the page checksum if are + the page checksum if are enabled. Next is a 2-byte field containing flag bits. This is followed by three 2-byte integer fields (pd_lower, pd_upper, and @@ -871,7 +876,6 @@ data. Empty in ordinary tables. - Following the page header are item identifiers (ItemIdData), each requiring four bytes. An item identifier contains a byte-offset to @@ -880,7 +884,7 @@ data. Empty in ordinary tables. New item identifiers are allocated as needed from the beginning of the unallocated space. The number of item identifiers present can be determined by looking at - pd_lower, which is increased to allocate a new identifier. + pd_lower, which is increased to allocate a new identifier. Because an item identifier is never moved until it is freed, its index can be used on a long-term basis to reference an item, even when the item itself is moved @@ -904,35 +908,39 @@ data. Empty in ordinary tables. The final section is the special section which can - contain anything the access method wishes to store. For example, + contain anything the access method wishes to store. For example, b-tree indexes store links to the page's left and right siblings, as well as some other data relevant to the index structure. Ordinary tables do not use a special section at all (indicated by setting - pd_special to equal the page size). + pd_special to equal the page size). + + + Table Row Layout + All table rows are structured in the same way. There is a fixed-size header (occupying 23 bytes on most machines), followed by an optional null bitmap, an optional object ID field, and the user data. The header is detailed - in . The actual user data + in . The actual user data (columns of the row) begins at the offset indicated by - t_hoff, which must always be a multiple of the MAXALIGN + t_hoff, which must always be a multiple of the MAXALIGN distance for the platform. The null bitmap is only present if the HEAP_HASNULL bit is set in t_infomask. If it is present it begins just after the fixed header and occupies enough bytes to have one bit per data column - (that is, t_natts bits altogether). In this list of bits, a + (that is, t_natts bits altogether). In this list of bits, a 1 bit indicates not-null, a 0 bit is a null. When the bitmap is not present, all columns are assumed not-null. The object ID is only present if the HEAP_HASOID bit is set in t_infomask. If present, it appears just - before the t_hoff boundary. Any padding needed to make - t_hoff a MAXALIGN multiple will appear between the null + before the t_hoff boundary. Any padding needed to make + t_hoff a MAXALIGN multiple will appear between the null bitmap and the object ID. (This in turn ensures that the object ID is suitably aligned.) @@ -1031,10 +1039,11 @@ data. Empty in ordinary tables. All variable-length data types share the common header structure struct varlena, which includes the total length of the stored value and some flag bits. Depending on the flags, the data can be either - inline or in a TOAST table; - it might be compressed, too (see ). + inline or in a TOAST table; + it might be compressed, too (see ). + diff --git a/doc/src/sgml/stylesheet-common.xsl b/doc/src/sgml/stylesheet-common.xsl index 658a5ac5e1..6d26e7e5c9 100644 --- a/doc/src/sgml/stylesheet-common.xsl +++ b/doc/src/sgml/stylesheet-common.xsl @@ -36,6 +36,7 @@ + 1 diff --git a/doc/src/sgml/stylesheet-html-common.xsl b/doc/src/sgml/stylesheet-html-common.xsl index 72fac1e806..17b7230d2c 100644 --- a/doc/src/sgml/stylesheet-html-common.xsl +++ b/doc/src/sgml/stylesheet-html-common.xsl @@ -263,4 +263,29 @@ set toc,title + + + + + + + + + + + + + + + + id- + + + + + + + + + diff --git a/doc/src/sgml/stylesheet-man.xsl b/doc/src/sgml/stylesheet-man.xsl index e9c407230c..fcb485c293 100644 --- a/doc/src/sgml/stylesheet-man.xsl +++ b/doc/src/sgml/stylesheet-man.xsl @@ -12,11 +12,13 @@ 0 0 +0 - + 32 40 + + + + + + diff --git a/doc/src/sgml/syntax.sgml b/doc/src/sgml/syntax.sgml index a2d136eaf8..86869bdf8c 100644 --- a/doc/src/sgml/syntax.sgml +++ b/doc/src/sgml/syntax.sgml @@ -75,7 +75,7 @@ INSERT INTO MY_TABLE VALUES (3, 'hi there'); a SET token to appear in a certain position, and this particular variation of INSERT also requires a VALUES in order to be complete. The - precise syntax rules for each command are described in . + precise syntax rules for each command are described in . @@ -109,7 +109,7 @@ INSERT INTO MY_TABLE VALUES (3, 'hi there'); same lexical structure, meaning that one cannot know whether a token is an identifier or a key word without knowing the language. A complete list of key words can be found in . + linkend="sql-keywords-appendix"/>. @@ -119,7 +119,7 @@ INSERT INTO MY_TABLE VALUES (3, 'hi there'); (_). Subsequent characters in an identifier or key word can be letters, underscores, digits (0-9), or dollar signs - ($). Note that dollar signs are not allowed in identifiers + ($). Note that dollar signs are not allowed in identifiers according to the letter of the SQL standard, so their use might render applications less portable. The SQL standard will not define a key word that contains @@ -240,7 +240,7 @@ U&"d!0061t!+000061" UESCAPE '!' The Unicode escape syntax works only when the server encoding is - UTF8. When other server encodings are used, only code + UTF8. When other server encodings are used, only code points in the ASCII range (up to \007F) can be specified. Both the 4-digit and the 6-digit form can be used to specify UTF-16 surrogate pairs to compose characters with code @@ -258,7 +258,7 @@ U&"d!0061t!+000061" UESCAPE '!' PostgreSQL, but "Foo" and "FOO" are different from these three and each other. (The folding of - unquoted names to lower case in PostgreSQL is + unquoted names to lower case in PostgreSQL is incompatible with the SQL standard, which says that unquoted names should be folded to upper case. Thus, foo should be equivalent to "FOO" not @@ -305,8 +305,8 @@ U&"d!0061t!+000061" UESCAPE '!' a single-quote character within a string constant, write two adjacent single quotes, e.g., 'Dianne''s horse'. - Note that this is not the same as a double-quote - character ("). + Note that this is not the same as a double-quote + character ("). @@ -343,17 +343,17 @@ SELECT 'foo' 'bar'; - PostgreSQL also accepts escape + PostgreSQL also accepts escape string constants, which are an extension to the SQL standard. An escape string constant is specified by writing the letter E (upper or lower case) just before the opening single - quote, e.g., E'foo'. (When continuing an escape string - constant across lines, write E only before the first opening + quote, e.g., E'foo'. (When continuing an escape string + constant across lines, write E only before the first opening quote.) - Within an escape string, a backslash character (\) begins a - C-like backslash escape sequence, in which the combination + Within an escape string, a backslash character (\) begins a + C-like backslash escape sequence, in which the combination of backslash and following character(s) represent a special byte - value, as shown in . + value, as shown in .
@@ -361,7 +361,7 @@ SELECT 'foo' 'bar'; - Backslash Escape Sequence + Backslash Escape Sequence Interpretation @@ -419,9 +419,9 @@ SELECT 'foo' 'bar'; Any other character following a backslash is taken literally. Thus, to - include a backslash character, write two backslashes (\\). + include a backslash character, write two backslashes (\\). Also, a single quote can be included in an escape string by writing - \', in addition to the normal way of ''. + \', in addition to the normal way of ''. @@ -430,44 +430,44 @@ SELECT 'foo' 'bar'; valid characters in the server character set encoding. When the server encoding is UTF-8, then the Unicode escapes or the alternative Unicode escape syntax, explained - in , should be used + in , should be used instead. (The alternative would be doing the UTF-8 encoding by hand and writing out the bytes, which would be very cumbersome.) The Unicode escape syntax works fully only when the server - encoding is UTF8. When other server encodings are + encoding is UTF8. When other server encodings are used, only code points in the ASCII range (up - to \u007F) can be specified. Both the 4-digit and + to \u007F) can be specified. Both the 4-digit and the 8-digit form can be used to specify UTF-16 surrogate pairs to compose characters with code points larger than U+FFFF, although the availability of the 8-digit form technically makes this unnecessary. (When surrogate pairs are used when the server - encoding is UTF8, they are first combined into a + encoding is UTF8, they are first combined into a single code point that is then encoded in UTF-8.) If the configuration parameter - is off, + is off, then PostgreSQL recognizes backslash escapes in both regular and escape string constants. However, as of - PostgreSQL 9.1, the default is on, meaning + PostgreSQL 9.1, the default is on, meaning that backslash escapes are recognized only in escape string constants. This behavior is more standards-compliant, but might break applications which rely on the historical behavior, where backslash escapes were always recognized. As a workaround, you can set this parameter - to off, but it is better to migrate away from using backslash + to off, but it is better to migrate away from using backslash escapes. If you need to use a backslash escape to represent a special - character, write the string constant with an E. + character, write the string constant with an E. - In addition to standard_conforming_strings, the configuration - parameters and - govern treatment of backslashes + In addition to standard_conforming_strings, the configuration + parameters and + govern treatment of backslashes in string constants. @@ -525,13 +525,13 @@ U&'d!0061t!+000061' UESCAPE '!' The Unicode escape syntax works only when the server encoding is - UTF8. When other server encodings are used, only + UTF8. When other server encodings are used, only code points in the ASCII range (up to \007F) can be specified. Both the 4-digit and the 6-digit form can be used to specify UTF-16 surrogate pairs to compose characters with code points larger than U+FFFF, although the availability of the 6-digit form technically makes this unnecessary. (When surrogate - pairs are used when the server encoding is UTF8, they + pairs are used when the server encoding is UTF8, they are first combined into a single code point that is then encoded in UTF-8.) @@ -539,7 +539,7 @@ U&'d!0061t!+000061' UESCAPE '!' Also, the Unicode escape syntax for string constants only works when the configuration - parameter is + parameter is turned on. This is because otherwise this syntax could confuse clients that parse the SQL statements to the point that it could lead to SQL injections and similar security issues. If the @@ -573,7 +573,7 @@ U&'d!0061t!+000061' UESCAPE '!' sign, an arbitrary sequence of characters that makes up the string content, a dollar sign, the same tag that began this dollar quote, and a dollar sign. For example, here are two - different ways to specify the string Dianne's horse + different ways to specify the string Dianne's horse using dollar quoting: $$Dianne's horse$$ @@ -598,11 +598,11 @@ BEGIN END; $function$ - Here, the sequence $q$[\t\r\n\v\\]$q$ represents a - dollar-quoted literal string [\t\r\n\v\\], which will + Here, the sequence $q$[\t\r\n\v\\]$q$ represents a + dollar-quoted literal string [\t\r\n\v\\], which will be recognized when the function body is executed by - PostgreSQL. But since the sequence does not match - the outer dollar quoting delimiter $function$, it is + PostgreSQL. But since the sequence does not match + the outer dollar quoting delimiter $function$, it is just some more characters within the constant so far as the outer string is concerned. @@ -707,13 +707,13 @@ $function$ bigintnumeric A numeric constant that contains neither a decimal point nor an - exponent is initially presumed to be type integer if its - value fits in type integer (32 bits); otherwise it is - presumed to be type bigint if its - value fits in type bigint (64 bits); otherwise it is - taken to be type numeric. Constants that contain decimal + exponent is initially presumed to be type integer if its + value fits in type integer (32 bits); otherwise it is + presumed to be type bigint if its + value fits in type bigint (64 bits); otherwise it is + taken to be type numeric. Constants that contain decimal points and/or exponents are always initially presumed to be type - numeric. + numeric. @@ -724,7 +724,7 @@ $function$ force a numeric value to be interpreted as a specific data type by casting it.type cast For example, you can force a numeric value to be treated as type - real (float4) by writing: + real (float4) by writing: REAL '1.23' -- string style @@ -772,25 +772,25 @@ CAST ( 'string' AS type ) typename ( 'string' ) but not all type names can be used in this way; see for details. + linkend="sql-syntax-type-casts"/> for details. The ::, CAST(), and function-call syntaxes can also be used to specify run-time type conversions of arbitrary expressions, as discussed in . To avoid syntactic ambiguity, the - type 'string' + linkend="sql-syntax-type-casts"/>. To avoid syntactic ambiguity, the + type 'string' syntax can only be used to specify the type of a simple literal constant. Another restriction on the - type 'string' + type 'string' syntax is that it does not work for array types; use :: or CAST() to specify the type of an array constant. - The CAST() syntax conforms to SQL. The - type 'string' + The CAST() syntax conforms to SQL. The + type 'string' syntax is a generalization of the standard: SQL specifies this syntax only for a few data types, but PostgreSQL allows it for all types. The syntax with @@ -827,7 +827,7 @@ CAST ( 'string' AS type ) - A multiple-character operator name cannot end in + or -, + A multiple-character operator name cannot end in + or -, unless the name also contains at least one of these characters: ~ ! @ # % ^ & | ` ? @@ -885,7 +885,7 @@ CAST ( 'string' AS type ) Brackets ([]) are used to select the elements - of an array. See for more information + of an array. See for more information on arrays. @@ -909,7 +909,7 @@ CAST ( 'string' AS type ) The colon (:) is used to select slices from arrays. (See .) In certain SQL dialects (such as Embedded + linkend="arrays"/>.) In certain SQL dialects (such as Embedded SQL), the colon is used to prefix variable names. @@ -980,8 +980,8 @@ CAST ( 'string' AS type ) - shows the precedence and - associativity of the operators in PostgreSQL. + shows the precedence and + associativity of the operators in PostgreSQL. Most operators have the same precedence and are left-associative. The precedence and associativity of the operators is hard-wired into the parser. @@ -1085,8 +1085,8 @@ SELECT (5 !) - 6; IS ISNULL NOTNULL - IS TRUE, IS FALSE, IS - NULL, IS DISTINCT FROM, etc + IS TRUE, IS FALSE, IS + NULL, IS DISTINCT FROM, etc @@ -1121,34 +1121,34 @@ SELECT (5 !) - 6; When a schema-qualified operator name is used in the - OPERATOR syntax, as for example in: + OPERATOR syntax, as for example in: SELECT 3 OPERATOR(pg_catalog.+) 4; - the OPERATOR construct is taken to have the default precedence - shown in for - any other operator. This is true no matter - which specific operator appears inside OPERATOR(). + the OPERATOR construct is taken to have the default precedence + shown in for + any other operator. This is true no matter + which specific operator appears inside OPERATOR(). - PostgreSQL versions before 9.5 used slightly different + PostgreSQL versions before 9.5 used slightly different operator precedence rules. In particular, <= >= and <> used to be treated as - generic operators; IS tests used to have higher priority; - and NOT BETWEEN and related constructs acted inconsistently, - being taken in some cases as having the precedence of NOT - rather than BETWEEN. These rules were changed for better + generic operators; IS tests used to have higher priority; + and NOT BETWEEN and related constructs acted inconsistently, + being taken in some cases as having the precedence of NOT + rather than BETWEEN. These rules were changed for better compliance with the SQL standard and to reduce confusion from inconsistent treatment of logically equivalent constructs. In most cases, these changes will result in no behavioral change, or perhaps - in no such operator failures which can be resolved by adding + in no such operator failures which can be resolved by adding parentheses. However there are corner cases in which a query might change behavior without any parsing error being reported. If you are concerned about whether these changes have silently broken something, you can test your application with the configuration - parameter turned on + parameter turned on to see if any warnings are logged. @@ -1279,7 +1279,7 @@ SELECT 3 OPERATOR(pg_catalog.+) 4; Another value expression in parentheses (used to group subexpressions and override - precedenceparenthesis) + precedenceparenthesis) @@ -1290,13 +1290,13 @@ SELECT 3 OPERATOR(pg_catalog.+) 4; be classified as an expression but do not follow any general syntax rules. These generally have the semantics of a function or operator and are explained in the appropriate location in . An example is the IS NULL + linkend="functions"/>. An example is the IS NULL clause. We have already discussed constants in . The following sections discuss + linkend="sql-syntax-constants"/>. The following sections discuss the remaining options. @@ -1319,7 +1319,7 @@ SELECT 3 OPERATOR(pg_catalog.+) 4; table (possibly qualified with a schema name), or an alias for a table defined by means of a FROM clause. The correlation name and separating dot can be omitted if the column name - is unique across all the tables being used in the current query. (See also .) + is unique across all the tables being used in the current query. (See also .) @@ -1376,7 +1376,7 @@ CREATE FUNCTION dept(text) RETURNS dept expression[subscript] - or multiple adjacent elements (an array slice) can be extracted + or multiple adjacent elements (an array slice) can be extracted by writing expression[lower_subscript:upper_subscript] @@ -1402,7 +1402,7 @@ $1[10:42] The parentheses in the last example are required. - See for more about arrays. + See for more about arrays. @@ -1443,8 +1443,8 @@ $1.somecolumn The parentheses are required here to show that - compositecol is a column name not a table name, - or that mytable is a table name not a schema name + compositecol is a column name not a table name, + or that mytable is a table name not a schema name in the second case. @@ -1455,7 +1455,7 @@ $1.somecolumn (compositecol).* This notation behaves differently depending on context; - see for details. + see for details. @@ -1475,15 +1475,15 @@ $1.somecolumn expression operator (unary postfix operator) where the operator token follows the syntax - rules of , or is one of the + rules of , or is one of the key words AND, OR, and NOT, or is a qualified operator name in the form: -OPERATOR(schema.operatorname) +OPERATOR(schema.operatorname) Which particular operators exist and whether they are unary or binary depends on what operators have been - defined by the system or the user. + defined by the system or the user. describes the built-in operators. @@ -1514,13 +1514,19 @@ sqrt(2) - The list of built-in functions is in . + The list of built-in functions is in . Other functions can be added by the user. + + When issuing queries in a database where some users mistrust other users, + observe security precautions from when + writing function calls. + + The arguments can optionally have names attached. - See for details. + See for details. @@ -1528,11 +1534,11 @@ sqrt(2) A function that takes a single argument of composite type can optionally be called using field-selection syntax, and conversely field selection can be written in functional style. That is, the - notations col(table) and table.col are + notations col(table) and table.col are interchangeable. This behavior is not SQL-standard but is provided - in PostgreSQL because it allows use of functions to - emulate computed fields. For more information see - . + in PostgreSQL because it allows use of functions to + emulate computed fields. For more information see + . @@ -1592,7 +1598,7 @@ sqrt(2) The fourth form invokes the aggregate once for each input row; since no particular input value is specified, it is generally only useful for the count(*) aggregate function. - The last form is used with ordered-set aggregate + The last form is used with ordered-set aggregate functions, which are described below. @@ -1607,7 +1613,7 @@ sqrt(2) For example, count(*) yields the total number of input rows; count(f1) yields the number of input rows in which f1 is non-null, since - count ignores nulls; and + count ignores nulls; and count(distinct f1) yields the number of distinct non-null values of f1. @@ -1615,14 +1621,14 @@ sqrt(2) Ordinarily, the input rows are fed to the aggregate function in an unspecified order. In many cases this does not matter; for example, - min produces the same result no matter what order it + min produces the same result no matter what order it receives the inputs in. However, some aggregate functions - (such as array_agg and string_agg) produce + (such as array_agg and string_agg) produce results that depend on the ordering of the input rows. When using - such an aggregate, the optional order_by_clause can be - used to specify the desired ordering. The order_by_clause - has the same syntax as for a query-level ORDER BY clause, as - described in , except that its expressions + such an aggregate, the optional order_by_clause can be + used to specify the desired ordering. The order_by_clause + has the same syntax as for a query-level ORDER BY clause, as + described in , except that its expressions are always just expressions and cannot be output-column names or numbers. For example: @@ -1632,7 +1638,7 @@ SELECT array_agg(a ORDER BY b DESC) FROM table; When dealing with multiple-argument aggregate functions, note that the - ORDER BY clause goes after all the aggregate arguments. + ORDER BY clause goes after all the aggregate arguments. For example, write this: SELECT string_agg(a, ',' ORDER BY a) FROM table; @@ -1642,58 +1648,58 @@ SELECT string_agg(a, ',' ORDER BY a) FROM table; SELECT string_agg(a ORDER BY a, ',') FROM table; -- incorrect The latter is syntactically valid, but it represents a call of a - single-argument aggregate function with two ORDER BY keys + single-argument aggregate function with two ORDER BY keys (the second one being rather useless since it's a constant). - If DISTINCT is specified in addition to an - order_by_clause, then all the ORDER BY + If DISTINCT is specified in addition to an + order_by_clause, then all the ORDER BY expressions must match regular arguments of the aggregate; that is, you cannot sort on an expression that is not included in the - DISTINCT list. + DISTINCT list. - The ability to specify both DISTINCT and ORDER BY - in an aggregate function is a PostgreSQL extension. + The ability to specify both DISTINCT and ORDER BY + in an aggregate function is a PostgreSQL extension. - Placing ORDER BY within the aggregate's regular argument + Placing ORDER BY within the aggregate's regular argument list, as described so far, is used when ordering the input rows for general-purpose and statistical aggregates, for which ordering is optional. There is a subclass of aggregate functions called ordered-set - aggregates for which an order_by_clause - is required, usually because the aggregate's computation is + aggregates for which an order_by_clause + is required, usually because the aggregate's computation is only sensible in terms of a specific ordering of its input rows. Typical examples of ordered-set aggregates include rank and percentile calculations. For an ordered-set aggregate, the order_by_clause is written - inside WITHIN GROUP (...), as shown in the final syntax + inside WITHIN GROUP (...), as shown in the final syntax alternative above. The expressions in the order_by_clause are evaluated once per input row just like regular aggregate arguments, sorted as per the order_by_clause's requirements, and fed to the aggregate function as input arguments. (This is unlike the case - for a non-WITHIN GROUP order_by_clause, + for a non-WITHIN GROUP order_by_clause, which is not treated as argument(s) to the aggregate function.) The - argument expressions preceding WITHIN GROUP, if any, are - called direct arguments to distinguish them from - the aggregated arguments listed in + argument expressions preceding WITHIN GROUP, if any, are + called direct arguments to distinguish them from + the aggregated arguments listed in the order_by_clause. Unlike regular aggregate arguments, direct arguments are evaluated only once per aggregate call, not once per input row. This means that they can contain variables only - if those variables are grouped by GROUP BY; this restriction + if those variables are grouped by GROUP BY; this restriction is the same as if the direct arguments were not inside an aggregate expression at all. Direct arguments are typically used for things like percentile fractions, which only make sense as a single value per aggregation calculation. The direct argument list can be empty; in this - case, write just () not (*). - (PostgreSQL will actually accept either spelling, but + case, write just () not (*). + (PostgreSQL will actually accept either spelling, but only the first way conforms to the SQL standard.) @@ -1712,8 +1718,8 @@ SELECT percentile_cont(0.5) WITHIN GROUP (ORDER BY income) FROM households; which obtains the 50th percentile, or median, value of - the income column from table households. - Here, 0.5 is a direct argument; it would make no sense + the income column from table households. + Here, 0.5 is a direct argument; it would make no sense for the percentile fraction to be a value varying across rows. @@ -1725,7 +1731,7 @@ SELECT percentile_cont(0.5) WITHIN GROUP (ORDER BY income) FROM households; SELECT count(*) AS unfiltered, - count(*) FILTER (WHERE i < 5) AS filtered + count(*) FILTER (WHERE i < 5) AS filtered FROM generate_series(1,10) AS s(i); unfiltered | filtered ------------+---------- @@ -1736,22 +1742,22 @@ FROM generate_series(1,10) AS s(i); The predefined aggregate functions are described in . Other aggregate functions can be added + linkend="functions-aggregate"/>. Other aggregate functions can be added by the user. An aggregate expression can only appear in the result list or - HAVING clause of a SELECT command. - It is forbidden in other clauses, such as WHERE, + HAVING clause of a SELECT command. + It is forbidden in other clauses, such as WHERE, because those clauses are logically evaluated before the results of aggregates are formed. When an aggregate expression appears in a subquery (see - and - ), the aggregate is normally + and + ), the aggregate is normally evaluated over the rows of the subquery. But an exception occurs if the aggregate's arguments (and filter_clause if any) contain only outer-level variables: @@ -1760,7 +1766,7 @@ FROM generate_series(1,10) AS s(i); as a whole is then an outer reference for the subquery it appears in, and acts as a constant over any one evaluation of that subquery. The restriction about - appearing only in the result list or HAVING clause + appearing only in the result list or HAVING clause applies with respect to the query level that the aggregate belongs to. @@ -1784,7 +1790,7 @@ FROM generate_series(1,10) AS s(i); to grouping of the selected rows into a single output row — each row remains separate in the query output. However the window function has access to all the rows that would be part of the current row's - group according to the grouping specification (PARTITION BY + group according to the grouping specification (PARTITION BY list) of the window function call. The syntax of a window function call is one of the following: @@ -1802,20 +1808,27 @@ FROM generate_series(1,10) AS s(i); [ ORDER BY expression [ ASC | DESC | USING operator ] [ NULLS { FIRST | LAST } ] [, ...] ] [ frame_clause ] - and the optional frame_clause + The optional frame_clause can be one of -{ RANGE | ROWS } frame_start -{ RANGE | ROWS } BETWEEN frame_start AND frame_end +{ RANGE | ROWS | GROUPS } frame_start [ frame_exclusion ] +{ RANGE | ROWS | GROUPS } BETWEEN frame_start AND frame_end [ frame_exclusion ] - where frame_start and frame_end can be - one of + where frame_start + and frame_end can be one of UNBOUNDED PRECEDING -value PRECEDING +offset PRECEDING CURRENT ROW -value FOLLOWING +offset FOLLOWING UNBOUNDED FOLLOWING + + and frame_exclusion can be one of + +EXCLUDE CURRENT ROW +EXCLUDE GROUP +EXCLUDE TIES +EXCLUDE NO OTHERS @@ -1830,85 +1843,160 @@ UNBOUNDED FOLLOWING Alternatively, a full window_definition can be given within parentheses, using the same syntax as for defining a named window in the WINDOW clause; see the - reference page for details. It's worth - pointing out that OVER wname is not exactly equivalent to - OVER (wname ...); the latter implies copying and modifying the + reference page for details. It's worth + pointing out that OVER wname is not exactly equivalent to + OVER (wname ...); the latter implies copying and modifying the window definition, and will be rejected if the referenced window specification includes a frame clause. - The PARTITION BY clause groups the rows of the query into - partitions, which are processed separately by the window - function. PARTITION BY works similarly to a query-level - GROUP BY clause, except that its expressions are always just + The PARTITION BY clause groups the rows of the query into + partitions, which are processed separately by the window + function. PARTITION BY works similarly to a query-level + GROUP BY clause, except that its expressions are always just expressions and cannot be output-column names or numbers. - Without PARTITION BY, all rows produced by the query are + Without PARTITION BY, all rows produced by the query are treated as a single partition. - The ORDER BY clause determines the order in which the rows + The ORDER BY clause determines the order in which the rows of a partition are processed by the window function. It works similarly - to a query-level ORDER BY clause, but likewise cannot use - output-column names or numbers. Without ORDER BY, rows are + to a query-level ORDER BY clause, but likewise cannot use + output-column names or numbers. Without ORDER BY, rows are processed in an unspecified order. The frame_clause specifies - the set of rows constituting the window frame, which is a + the set of rows constituting the window frame, which is a subset of the current partition, for those window functions that act on - the frame instead of the whole partition. The frame can be specified in - either RANGE or ROWS mode; in either case, it - runs from the frame_start to the - frame_end. If frame_end is omitted, - it defaults to CURRENT ROW. + the frame instead of the whole partition. The set of rows in the frame + can vary depending on which row is the current row. The frame can be + specified in RANGE, ROWS + or GROUPS mode; in each case, it runs from + the frame_start to + the frame_end. + If frame_end is omitted, the end defaults + to CURRENT ROW. - A frame_start of UNBOUNDED PRECEDING means + A frame_start of UNBOUNDED PRECEDING means that the frame starts with the first row of the partition, and similarly - a frame_end of UNBOUNDED FOLLOWING means + a frame_end of UNBOUNDED FOLLOWING means that the frame ends with the last row of the partition. - In RANGE mode, a frame_start of - CURRENT ROW means the frame starts with the current row's - first peer row (a row that ORDER BY considers - equivalent to the current row), while a frame_end of - CURRENT ROW means the frame ends with the last equivalent - ORDER BY peer. In ROWS mode, CURRENT ROW simply means - the current row. + In RANGE or GROUPS mode, + a frame_start of + CURRENT ROW means the frame starts with the current + row's first peer row (a row that the + window's ORDER BY clause sorts as equivalent to the + current row), while a frame_end of + CURRENT ROW means the frame ends with the current + row's last peer row. + In ROWS mode, CURRENT ROW simply + means the current row. - The value PRECEDING and - value FOLLOWING cases are currently only - allowed in ROWS mode. They indicate that the frame starts - or ends the specified number of rows before or after the current row. - value must be an integer expression not + In the offset PRECEDING + and offset FOLLOWING frame + options, the offset must be an expression not containing any variables, aggregate functions, or window functions. - The value must not be null or negative; but it can be zero, which - just selects the current row. + The meaning of the offset depends on the + frame mode: + + + + In ROWS mode, + the offset must yield a non-null, + non-negative integer, and the option means that the frame starts or + ends the specified number of rows before or after the current row. + + + + + In GROUPS mode, + the offset again must yield a non-null, + non-negative integer, and the option means that the frame starts or + ends the specified number of peer groups + before or after the current row's peer group, where a peer group is a + set of rows that are equivalent in the ORDER BY + ordering. (There must be an ORDER BY clause + in the window definition to use GROUPS mode.) + + + + + In RANGE mode, these options require that + the ORDER BY clause specify exactly one column. + The offset specifies the maximum + difference between the value of that column in the current row and + its value in preceding or following rows of the frame. The data type + of the offset expression varies depending + on the data type of the ordering column. For numeric ordering + columns it is typically of the same type as the ordering column, + but for datetime ordering columns it is an interval. + For example, if the ordering column is of type date + or timestamp, one could write RANGE BETWEEN + '1 day' PRECEDING AND '10 days' FOLLOWING. + The offset is still required to be + non-null and non-negative, though the meaning + of non-negative depends on its data type. + + + + In any case, the distance to the end of the frame is limited by the + distance to the end of the partition, so that for rows near the partition + ends the frame might contain fewer rows than elsewhere. + + + + Notice that in both ROWS and GROUPS + mode, 0 PRECEDING and 0 FOLLOWING + are equivalent to CURRENT ROW. This normally holds + in RANGE mode as well, for an appropriate + data-type-specific meaning of zero. + + + + The frame_exclusion option allows rows around + the current row to be excluded from the frame, even if they would be + included according to the frame start and frame end options. + EXCLUDE CURRENT ROW excludes the current row from the + frame. + EXCLUDE GROUP excludes the current row and its + ordering peers from the frame. + EXCLUDE TIES excludes any peers of the current + row from the frame, but not the current row itself. + EXCLUDE NO OTHERS simply specifies explicitly the + default behavior of not excluding the current row or its peers. - The default framing option is RANGE UNBOUNDED PRECEDING, + The default framing option is RANGE UNBOUNDED PRECEDING, which is the same as RANGE BETWEEN UNBOUNDED PRECEDING AND - CURRENT ROW. With ORDER BY, this sets the frame to be + CURRENT ROW. With ORDER BY, this sets the frame to be all rows from the partition start up through the current row's last - ORDER BY peer. Without ORDER BY, all rows of the partition are - included in the window frame, since all rows become peers of the current - row. + ORDER BY peer. Without ORDER BY, + this means all rows of the partition are included in the window frame, + since all rows become peers of the current row. Restrictions are that - frame_start cannot be UNBOUNDED FOLLOWING, - frame_end cannot be UNBOUNDED PRECEDING, - and the frame_end choice cannot appear earlier in the - above list than the frame_start choice — for example - RANGE BETWEEN CURRENT ROW AND value + frame_start cannot be UNBOUNDED FOLLOWING, + frame_end cannot be UNBOUNDED PRECEDING, + and the frame_end choice cannot appear earlier in the + above list of frame_start + and frame_end options than + the frame_start choice does — for example + RANGE BETWEEN CURRENT ROW AND offset PRECEDING is not allowed. + But, for example, ROWS BETWEEN 7 PRECEDING AND 8 + PRECEDING is allowed, even though it would never select any + rows. @@ -1921,32 +2009,32 @@ UNBOUNDED FOLLOWING The built-in window functions are described in . Other window functions can be added by + linkend="functions-window-table"/>. Other window functions can be added by the user. Also, any built-in or user-defined general-purpose or statistical aggregate can be used as a window function. (Ordered-set and hypothetical-set aggregates cannot presently be used as window functions.) - The syntaxes using * are used for calling parameter-less + The syntaxes using * are used for calling parameter-less aggregate functions as window functions, for example - count(*) OVER (PARTITION BY x ORDER BY y). - The asterisk (*) is customarily not used for + count(*) OVER (PARTITION BY x ORDER BY y). + The asterisk (*) is customarily not used for window-specific functions. Window-specific functions do not - allow DISTINCT or ORDER BY to be used within the + allow DISTINCT or ORDER BY to be used within the function argument list. Window function calls are permitted only in the SELECT - list and the ORDER BY clause of the query. + list and the ORDER BY clause of the query. More information about window functions can be found in - , - , and - . + , + , and + . @@ -1974,7 +2062,7 @@ UNBOUNDED FOLLOWING CAST ( expression AS type ) expression::type - The CAST syntax conforms to SQL; the syntax with + The CAST syntax conforms to SQL; the syntax with :: is historical PostgreSQL usage. @@ -1984,7 +2072,7 @@ CAST ( expression AS type represents a run-time type conversion. The cast will succeed only if a suitable type conversion operation has been defined. Notice that this is subtly different from the use of casts with constants, as shown in - . A cast applied to an + . A cast applied to an unadorned string literal represents the initial assignment of a type to a literal constant value, and so it will succeed for any type (if the contents of the string literal are acceptable input syntax for the @@ -1996,7 +2084,7 @@ CAST ( expression AS type to the type that a value expression must produce (for example, when it is assigned to a table column); the system will automatically apply a type cast in such cases. However, automatic casting is only done for - casts that are marked OK to apply implicitly + casts that are marked OK to apply implicitly in the system catalogs. Other casts must be invoked with explicit casting syntax. This restriction is intended to prevent surprising conversions from being applied silently. @@ -2011,8 +2099,8 @@ CAST ( expression AS type However, this only works for types whose names are also valid as function names. For example, double precision cannot be used this way, but the equivalent float8 - can. Also, the names interval, time, and - timestamp can only be used in this fashion if they are + can. Also, the names interval, time, and + timestamp can only be used in this fashion if they are double-quoted, because of syntactic conflicts. Therefore, the use of the function-like cast syntax leads to inconsistencies and should probably be avoided. @@ -2025,10 +2113,10 @@ CAST ( expression AS type conversion, it will internally invoke a registered function to perform the conversion. By convention, these conversion functions have the same name as their output type, and thus the function-like - syntax is nothing more than a direct invocation of the underlying + syntax is nothing more than a direct invocation of the underlying conversion function. Obviously, this is not something that a portable application should rely on. For further details see - . + . @@ -2061,7 +2149,7 @@ CAST ( expression AS type The two common uses of the COLLATE clause are - overriding the sort order in an ORDER BY clause, for + overriding the sort order in an ORDER BY clause, for example: SELECT a, b, c FROM tbl WHERE ... ORDER BY a COLLATE "C"; @@ -2071,15 +2159,15 @@ SELECT a, b, c FROM tbl WHERE ... ORDER BY a COLLATE "C"; SELECT * FROM tbl WHERE a > 'foo' COLLATE "C"; - Note that in the latter case the COLLATE clause is + Note that in the latter case the COLLATE clause is attached to an input argument of the operator we wish to affect. It doesn't matter which argument of the operator or function call the - COLLATE clause is attached to, because the collation that is + COLLATE clause is attached to, because the collation that is applied by the operator or function is derived by considering all - arguments, and an explicit COLLATE clause will override the + arguments, and an explicit COLLATE clause will override the collations of all other arguments. (Attaching non-matching - COLLATE clauses to more than one argument, however, is an - error. For more details see .) + COLLATE clauses to more than one argument, however, is an + error. For more details see .) Thus, this gives the same result as the previous example: SELECT * FROM tbl WHERE a COLLATE "C" > 'foo'; @@ -2089,8 +2177,8 @@ SELECT * FROM tbl WHERE a COLLATE "C" > 'foo'; SELECT * FROM tbl WHERE (a > 'foo') COLLATE "C"; because it attempts to apply a collation to the result of the - > operator, which is of the non-collatable data type - boolean. + > operator, which is of the non-collatable data type + boolean. @@ -2104,7 +2192,7 @@ SELECT * FROM tbl WHERE (a > 'foo') COLLATE "C"; A scalar subquery is an ordinary SELECT query in parentheses that returns exactly one - row with one column. (See for information about writing queries.) + row with one column. (See for information about writing queries.) The SELECT query is executed and the single returned value is used in the surrounding value expression. It is an error to use a query that @@ -2113,7 +2201,7 @@ SELECT * FROM tbl WHERE (a > 'foo') COLLATE "C"; there is no error; the scalar result is taken to be null.) The subquery can refer to variables from the surrounding query, which will act as constants during any one evaluation of the subquery. - See also for other expressions involving subqueries. + See also for other expressions involving subqueries. @@ -2143,8 +2231,8 @@ SELECT name, (SELECT max(pop) FROM cities WHERE cities.state = states.name) array value using values for its member elements. A simple array constructor consists of the key word ARRAY, a left square bracket - [, a list of expressions (separated by commas) for the - array element values, and finally a right square bracket ]. + [, a list of expressions (separated by commas) for the + array element values, and finally a right square bracket ]. For example: SELECT ARRAY[1,2,3+4]; @@ -2155,8 +2243,8 @@ SELECT ARRAY[1,2,3+4]; By default, the array element type is the common type of the member expressions, - determined using the same rules as for UNION or - CASE constructs (see ). + determined using the same rules as for UNION or + CASE constructs (see ). You can override this by explicitly casting the array constructor to the desired type, for example: @@ -2168,7 +2256,7 @@ SELECT ARRAY[1,2,22.7]::integer[]; This has the same effect as casting each expression to the array element type individually. - For more on casting, see . + For more on casting, see . @@ -2193,13 +2281,13 @@ SELECT ARRAY[[1,2],[3,4]]; Since multidimensional arrays must be rectangular, inner constructors at the same level must produce sub-arrays of identical dimensions. - Any cast applied to the outer ARRAY constructor propagates + Any cast applied to the outer ARRAY constructor propagates automatically to all the inner constructors. Multidimensional array constructor elements can be anything yielding - an array of the proper kind, not only a sub-ARRAY construct. + an array of the proper kind, not only a sub-ARRAY construct. For example: CREATE TABLE arr(f1 int[], f2 int[]); @@ -2259,7 +2347,7 @@ SELECT ARRAY(SELECT ARRAY[i, i*2] FROM generate_series(1,5) AS a(i)); The subscripts of an array value built with ARRAY always begin with one. For more information about arrays, see - . + . @@ -2291,7 +2379,7 @@ SELECT ARRAY(SELECT ARRAY[i, i*2] FROM generate_series(1,5) AS a(i)); SELECT ROW(1,2.5,'this is a test'); - The key word ROW is optional when there is more than one + The key word ROW is optional when there is more than one expression in the list. @@ -2299,10 +2387,10 @@ SELECT ROW(1,2.5,'this is a test'); A row constructor can include the syntax rowvalue.*, which will be expanded to a list of the elements of the row value, - just as occurs when the .* syntax is used at the top level - of a SELECT list (see ). - For example, if table t has - columns f1 and f2, these are the same: + just as occurs when the .* syntax is used at the top level + of a SELECT list (see ). + For example, if table t has + columns f1 and f2, these are the same: SELECT ROW(t.*, 42) FROM t; SELECT ROW(t.f1, t.f2, 42) FROM t; @@ -2313,19 +2401,19 @@ SELECT ROW(t.f1, t.f2, 42) FROM t; Before PostgreSQL 8.2, the .* syntax was not expanded in row constructors, so - that writing ROW(t.*, 42) created a two-field row whose first + that writing ROW(t.*, 42) created a two-field row whose first field was another row value. The new behavior is usually more useful. If you need the old behavior of nested row values, write the inner row value without .*, for instance - ROW(t, 42). + ROW(t, 42). - By default, the value created by a ROW expression is of + By default, the value created by a ROW expression is of an anonymous record type. If necessary, it can be cast to a named composite type — either the row type of a table, or a composite type - created with CREATE TYPE AS. An explicit cast might be needed + created with CREATE TYPE AS. An explicit cast might be needed to avoid ambiguity. For example: CREATE TABLE mytable(f1 int, f2 float, f3 text); @@ -2366,15 +2454,15 @@ SELECT getf1(CAST(ROW(11,'this is a test',2.5) AS myrowtype)); in a composite-type table column, or to be passed to a function that accepts a composite parameter. Also, it is possible to compare two row values or test a row with - IS NULL or IS NOT NULL, for example: + IS NULL or IS NOT NULL, for example: SELECT ROW(1,2.5,'this is a test') = ROW(1, 3, 'not the same'); SELECT ROW(table.*) IS NULL FROM table; -- detect all-null rows - For more detail see . + For more detail see . Row constructors can also be used in connection with subqueries, - as discussed in . + as discussed in . @@ -2413,18 +2501,18 @@ SELECT somefunc() OR true; As a consequence, it is unwise to use functions with side effects as part of complex expressions. It is particularly dangerous to - rely on side effects or evaluation order in WHERE and HAVING clauses, + rely on side effects or evaluation order in WHERE and HAVING clauses, since those clauses are extensively reprocessed as part of developing an execution plan. Boolean - expressions (AND/OR/NOT combinations) in those clauses can be reorganized + expressions (AND/OR/NOT combinations) in those clauses can be reorganized in any manner allowed by the laws of Boolean algebra. - When it is essential to force evaluation order, a CASE - construct (see ) can be + When it is essential to force evaluation order, a CASE + construct (see ) can be used. For example, this is an untrustworthy way of trying to - avoid division by zero in a WHERE clause: + avoid division by zero in a WHERE clause: SELECT ... WHERE x > 0 AND y/x > 1.5; @@ -2432,17 +2520,17 @@ SELECT ... WHERE x > 0 AND y/x > 1.5; SELECT ... WHERE CASE WHEN x > 0 THEN y/x > 1.5 ELSE false END; - A CASE construct used in this fashion will defeat optimization + A CASE construct used in this fashion will defeat optimization attempts, so it should only be done when necessary. (In this particular example, it would be better to sidestep the problem by writing - y > 1.5*x instead.) + y > 1.5*x instead.) - CASE is not a cure-all for such issues, however. + CASE is not a cure-all for such issues, however. One limitation of the technique illustrated above is that it does not prevent early evaluation of constant subexpressions. - As described in , functions and + As described in , functions and operators marked IMMUTABLE can be evaluated when the query is planned rather than when it is executed. Thus for example @@ -2450,8 +2538,8 @@ SELECT CASE WHEN x > 0 THEN x ELSE 1/0 END FROM tab; is likely to result in a division-by-zero failure due to the planner trying to simplify the constant subexpression, - even if every row in the table has x > 0 so that the - ELSE arm would never be entered at run time. + even if every row in the table has x > 0 so that the + ELSE arm would never be entered at run time. @@ -2459,17 +2547,17 @@ SELECT CASE WHEN x > 0 THEN x ELSE 1/0 END FROM tab; obviously involve constants can occur in queries executed within functions, since the values of function arguments and local variables can be inserted into queries as constants for planning purposes. - Within PL/pgSQL functions, for example, using an - IF-THEN-ELSE statement to protect + Within PL/pgSQL functions, for example, using an + IF-THEN-ELSE statement to protect a risky computation is much safer than just nesting it in a - CASE expression. + CASE expression. - Another limitation of the same kind is that a CASE cannot + Another limitation of the same kind is that a CASE cannot prevent evaluation of an aggregate expression contained within it, because aggregate expressions are computed before other - expressions in a SELECT list or HAVING clause + expressions in a SELECT list or HAVING clause are considered. For example, the following query can cause a division-by-zero error despite seemingly having protected against it: @@ -2478,12 +2566,12 @@ SELECT CASE WHEN min(employees) > 0 END FROM departments; - The min() and avg() aggregates are computed + The min() and avg() aggregates are computed concurrently over all the input rows, so if any row - has employees equal to zero, the division-by-zero error + has employees equal to zero, the division-by-zero error will occur before there is any opportunity to test the result of - min(). Instead, use a WHERE - or FILTER clause to prevent problematic input rows from + min(). Instead, use a WHERE + or FILTER clause to prevent problematic input rows from reaching an aggregate function in the first place. @@ -2508,6 +2596,8 @@ SELECT CASE WHEN min(employees) > 0 its argument values in the same order as they are defined in the function declaration. In named notation, the arguments are matched to the function parameters by name and can be written in any order. + For each notation, also consider the effect of function argument types, + documented in . @@ -2547,7 +2637,7 @@ LANGUAGE SQL IMMUTABLE STRICT; b inputs will be concatenated, and forced to either upper or lower case depending on the uppercase parameter. The remaining details of this function - definition are not important here (see for + definition are not important here (see for more information). @@ -2657,7 +2747,7 @@ SELECT concat_lower_or_upper('Hello', 'World', uppercase => true); In the above query, the arguments a and b are specified positionally, while - uppercase is specified by name. In this example, + uppercase is specified by name. In this example, that adds little except documentation. With a more complex function having numerous parameters that have default values, named or mixed notation can save a great deal of writing and reduce chances for error. diff --git a/doc/src/sgml/tablefunc.sgml b/doc/src/sgml/tablefunc.sgml index 90f6df9545..007e9c62f5 100644 --- a/doc/src/sgml/tablefunc.sgml +++ b/doc/src/sgml/tablefunc.sgml @@ -8,7 +8,7 @@ - The tablefunc module includes various functions that return + The tablefunc module includes various functions that return tables (that is, multiple rows). These functions are useful both in their own right and as examples of how to write C functions that return multiple rows. @@ -18,12 +18,12 @@ Functions Provided - shows the functions provided + shows the functions provided by the tablefunc module.
- <filename>tablefunc</> Functions + <filename>tablefunc</filename> Functions @@ -35,46 +35,46 @@ normal_rand(int numvals, float8 mean, float8 stddev) - setof float8 + setof float8 Produces a set of normally distributed random values crosstab(text sql) - setof record + setof record - Produces a pivot table containing - row names plus N value columns, where - N is determined by the row type specified in the calling + Produces a pivot table containing + row names plus N value columns, where + N is determined by the row type specified in the calling query - crosstabN(text sql) - setof table_crosstab_N + crosstabN(text sql) + setof table_crosstab_N - Produces a pivot table containing - row names plus N value columns. - crosstab2, crosstab3, and - crosstab4 are predefined, but you can create additional - crosstabN functions as described below + Produces a pivot table containing + row names plus N value columns. + crosstab2, crosstab3, and + crosstab4 are predefined, but you can create additional + crosstabN functions as described below crosstab(text source_sql, text category_sql) - setof record + setof record - Produces a pivot table + Produces a pivot table with the value columns specified by a second query crosstab(text sql, int N) - setof record + setof record - Obsolete version of crosstab(text). - The parameter N is now ignored, since the number of + Obsolete version of crosstab(text). + The parameter N is now ignored, since the number of value columns is always determined by the calling query @@ -88,7 +88,7 @@ connectby - setof record + setof record Produces a representation of a hierarchical tree structure @@ -109,7 +109,7 @@ normal_rand(int numvals, float8 mean, float8 stddev) returns setof float8 - normal_rand produces a set of normally distributed random + normal_rand produces a set of normally distributed random values (Gaussian distribution). @@ -157,7 +157,7 @@ crosstab(text sql, int N) - The crosstab function is used to produce pivot + The crosstab function is used to produce pivot displays, wherein data is listed across the page rather than down. For example, we might have data like @@ -176,7 +176,7 @@ row1 val11 val12 val13 ... row2 val21 val22 val23 ... ... - The crosstab function takes a text parameter that is a SQL + The crosstab function takes a text parameter that is a SQL query producing raw data formatted in the first way, and produces a table formatted in the second way. @@ -209,9 +209,9 @@ row2 val21 val22 val23 ... - The crosstab function is declared to return setof + The crosstab function is declared to return setof record, so the actual names and types of the output columns must be - defined in the FROM clause of the calling SELECT + defined in the FROM clause of the calling SELECT statement, for example: SELECT * FROM crosstab('...') AS ct(row_name text, category_1 text, category_2 text); @@ -227,30 +227,30 @@ SELECT * FROM crosstab('...') AS ct(row_name text, category_1 text, category_2 t - The FROM clause must define the output as one - row_name column (of the same data type as the first result - column of the SQL query) followed by N value columns + The FROM clause must define the output as one + row_name column (of the same data type as the first result + column of the SQL query) followed by N value columns (all of the same data type as the third result column of the SQL query). You can set up as many output value columns as you wish. The names of the output columns are up to you. - The crosstab function produces one output row for each + The crosstab function produces one output row for each consecutive group of input rows with the same row_name value. It fills the output - value columns, left to right, with the + value columns, left to right, with the value fields from these rows. If there - are fewer rows in a group than there are output value + are fewer rows in a group than there are output value columns, the extra output columns are filled with nulls; if there are more rows, the extra input rows are skipped. - In practice the SQL query should always specify ORDER BY 1,2 + In practice the SQL query should always specify ORDER BY 1,2 to ensure that the input rows are properly ordered, that is, values with the same row_name are brought together and - correctly ordered within the row. Notice that crosstab + correctly ordered within the row. Notice that crosstab itself does not pay any attention to the second column of the query result; it's just there to be ordered by, to control the order in which the third-column values appear across the page. @@ -286,41 +286,41 @@ AS ct(row_name text, category_1 text, category_2 text, category_3 text); - You can avoid always having to write out a FROM clause to + You can avoid always having to write out a FROM clause to define the output columns, by setting up a custom crosstab function that has the desired output row type wired into its definition. This is described in the next section. Another possibility is to embed the - required FROM clause in a view definition. + required FROM clause in a view definition. - See also the \crosstabview - command in psql, which provides functionality similar - to crosstab(). + See also the \crosstabview + command in psql, which provides functionality similar + to crosstab(). - <function>crosstab<replaceable>N</>(text)</function> + <function>crosstab<replaceable>N</replaceable>(text)</function> crosstab -crosstabN(text sql) +crosstabN(text sql) - The crosstabN functions are examples of how - to set up custom wrappers for the general crosstab function, + The crosstabN functions are examples of how + to set up custom wrappers for the general crosstab function, so that you need not write out column names and types in the calling - SELECT query. The tablefunc module includes - crosstab2, crosstab3, and - crosstab4, whose output row types are defined as + SELECT query. The tablefunc module includes + crosstab2, crosstab3, and + crosstab4, whose output row types are defined as @@ -337,10 +337,10 @@ CREATE TYPE tablefunc_crosstab_N AS ( Thus, these functions can be used directly when the input query produces - row_name and value columns of type - text, and you want 2, 3, or 4 output values columns. + row_name and value columns of type + text, and you want 2, 3, or 4 output values columns. In all other ways they behave exactly as described above for the - general crosstab function. + general crosstab function. @@ -359,7 +359,7 @@ FROM crosstab3( These functions are provided mostly for illustration purposes. You can create your own return types and functions based on the - underlying crosstab() function. There are two ways + underlying crosstab() function. There are two ways to do it: @@ -367,13 +367,13 @@ FROM crosstab3( Create a composite type describing the desired output columns, similar to the examples in - contrib/tablefunc/tablefunc--1.0.sql. + contrib/tablefunc/tablefunc--1.0.sql. Then define a - unique function name accepting one text parameter and returning - setof your_type_name, but linking to the same underlying - crosstab C function. For example, if your source data - produces row names that are text, and values that are - float8, and you want 5 value columns: + unique function name accepting one text parameter and returning + setof your_type_name, but linking to the same underlying + crosstab C function. For example, if your source data + produces row names that are text, and values that are + float8, and you want 5 value columns: CREATE TYPE my_crosstab_float8_5_cols AS ( my_row_name text, @@ -393,7 +393,7 @@ CREATE OR REPLACE FUNCTION crosstab_float8_5_cols(text) - Use OUT parameters to define the return type implicitly. + Use OUT parameters to define the return type implicitly. The same example could also be done this way: CREATE OR REPLACE FUNCTION crosstab_float8_5_cols( @@ -426,12 +426,12 @@ crosstab(text source_sql, text category_sql) - The main limitation of the single-parameter form of crosstab + The main limitation of the single-parameter form of crosstab is that it treats all values in a group alike, inserting each value into the first available column. If you want the value columns to correspond to specific categories of data, and some groups might not have data for some of the categories, that doesn't work well. - The two-parameter form of crosstab handles this case by + The two-parameter form of crosstab handles this case by providing an explicit list of the categories corresponding to the output columns. @@ -447,7 +447,7 @@ crosstab(text source_sql, text category_sql) category and value columns must be the last two columns, in that order. Any columns between row_name and - category are treated as extra. + category are treated as extra. The extra columns are expected to be the same for all rows with the same row_name value. @@ -489,9 +489,9 @@ SELECT DISTINCT cat FROM foo ORDER BY 1; - The crosstab function is declared to return setof + The crosstab function is declared to return setof record, so the actual names and types of the output columns must be - defined in the FROM clause of the calling SELECT + defined in the FROM clause of the calling SELECT statement, for example: @@ -512,25 +512,25 @@ row_name extra cat1 cat2 cat3 cat4 - The FROM clause must define the proper number of output - columns of the proper data types. If there are N - columns in the source_sql query's result, the first - N-2 of them must match up with the first - N-2 output columns. The remaining output columns - must have the type of the last column of the source_sql + The FROM clause must define the proper number of output + columns of the proper data types. If there are N + columns in the source_sql query's result, the first + N-2 of them must match up with the first + N-2 output columns. The remaining output columns + must have the type of the last column of the source_sql query's result, and there must be exactly as many of them as there are rows in the category_sql query's result. - The crosstab function produces one output row for each + The crosstab function produces one output row for each consecutive group of input rows with the same row_name value. The output - row_name column, plus any extra + row_name column, plus any extra columns, are copied from the first row of the group. The output - value columns are filled with the + value columns are filled with the value fields from rows having matching - category values. If a row's category + category values. If a row's category does not match any output of the category_sql query, its value is ignored. Output columns whose matching category is not present in any input row @@ -539,7 +539,7 @@ row_name extra cat1 cat2 cat3 cat4 In practice the source_sql query should always - specify ORDER BY 1 to ensure that values with the same + specify ORDER BY 1 to ensure that values with the same row_name are brought together. However, ordering of the categories within a group is not important. Also, it is essential to be sure that the order of the @@ -619,7 +619,7 @@ AS You can create predefined functions to avoid having to write out the result column names and types in each query. See the examples in the previous section. The underlying C function for this form - of crosstab is named crosstab_hash. + of crosstab is named crosstab_hash. @@ -638,15 +638,15 @@ connectby(text relname, text keyid_fld, text parent_keyid_fld - The connectby function produces a display of hierarchical + The connectby function produces a display of hierarchical data that is stored in a table. The table must have a key field that uniquely identifies rows, and a parent-key field that references the - parent (if any) of each row. connectby can display the + parent (if any) of each row. connectby can display the sub-tree descending from any row. - explains the + explains the parameters. @@ -694,14 +694,14 @@ connectby(text relname, text keyid_fld, text parent_keyid_fld The key and parent-key fields can be any data type, but they must be - the same type. Note that the start_with value must be + the same type. Note that the start_with value must be entered as a text string, regardless of the type of the key field. - The connectby function is declared to return setof + The connectby function is declared to return setof record, so the actual names and types of the output columns must be - defined in the FROM clause of the calling SELECT + defined in the FROM clause of the calling SELECT statement, for example: @@ -714,15 +714,15 @@ SELECT * FROM connectby('connectby_tree', 'keyid', 'parent_keyid', 'pos', 'row2' The first two output columns are used for the current row's key and its parent row's key; they must match the type of the table's key field. The third output column is the depth in the tree and must be of type - integer. If a branch_delim parameter was + integer. If a branch_delim parameter was given, the next output column is the branch display and must be of type - text. Finally, if an orderby_fld + text. Finally, if an orderby_fld parameter was given, the last output column is a serial number, and must - be of type integer. + be of type integer. - The branch output column shows the path of keys taken to + The branch output column shows the path of keys taken to reach the current row. The keys are separated by the specified branch_delim string. If no branch display is wanted, omit both the branch_delim parameter @@ -740,7 +740,7 @@ SELECT * FROM connectby('connectby_tree', 'keyid', 'parent_keyid', 'pos', 'row2' The parameters representing table and field names are copied as-is - into the SQL queries that connectby generates internally. + into the SQL queries that connectby generates internally. Therefore, include double quotes if the names are mixed-case or contain special characters. You may also need to schema-qualify the table name. @@ -752,10 +752,10 @@ SELECT * FROM connectby('connectby_tree', 'keyid', 'parent_keyid', 'pos', 'row2' It is important that the branch_delim string - not appear in any key values, else connectby may incorrectly + not appear in any key values, else connectby may incorrectly report an infinite-recursion error. Note that if branch_delim is not provided, a default value - of ~ is used for recursion detection purposes. + of ~ is used for recursion detection purposes. diff --git a/doc/src/sgml/tablesample-method.sgml b/doc/src/sgml/tablesample-method.sgml index 22f8bbe19a..b84b7ba885 100644 --- a/doc/src/sgml/tablesample-method.sgml +++ b/doc/src/sgml/tablesample-method.sgml @@ -12,11 +12,11 @@ - PostgreSQL's implementation of the TABLESAMPLE + PostgreSQL's implementation of the TABLESAMPLE clause supports custom table sampling methods, in addition to - the BERNOULLI and SYSTEM methods that are required + the BERNOULLI and SYSTEM methods that are required by the SQL standard. The sampling method determines which rows of the - table will be selected when the TABLESAMPLE clause is used. + table will be selected when the TABLESAMPLE clause is used. @@ -26,18 +26,18 @@ method_name(internal) RETURNS tsm_handler The name of the function is the same method name appearing in the - TABLESAMPLE clause. The internal argument is a dummy + TABLESAMPLE clause. The internal argument is a dummy (always having value zero) that simply serves to prevent this function from being called directly from a SQL command. The result of the function must be a palloc'd struct of - type TsmRoutine, which contains pointers to support functions for + type TsmRoutine, which contains pointers to support functions for the sampling method. These support functions are plain C functions and are not visible or callable at the SQL level. The support functions are - described in . + described in . - In addition to function pointers, the TsmRoutine struct must + In addition to function pointers, the TsmRoutine struct must provide these additional fields: @@ -47,9 +47,9 @@ method_name(internal) RETURNS tsm_handler This is an OID list containing the data type OIDs of the parameter(s) - that will be accepted by the TABLESAMPLE clause when this + that will be accepted by the TABLESAMPLE clause when this sampling method is used. For example, for the built-in methods, this - list contains a single item with value FLOAT4OID, which + list contains a single item with value FLOAT4OID, which represents the sampling percentage. Custom sampling methods can have more or different parameters. @@ -60,11 +60,11 @@ method_name(internal) RETURNS tsm_handler bool repeatable_across_queries - If true, the sampling method can deliver identical samples + If true, the sampling method can deliver identical samples across successive queries, if the same parameters - and REPEATABLE seed value are supplied each time and the - table contents have not changed. When this is false, - the REPEATABLE clause is not accepted for use with the + and REPEATABLE seed value are supplied each time and the + table contents have not changed. When this is false, + the REPEATABLE clause is not accepted for use with the sampling method. @@ -74,10 +74,10 @@ method_name(internal) RETURNS tsm_handler bool repeatable_across_scans - If true, the sampling method can deliver identical samples + If true, the sampling method can deliver identical samples across successive scans in the same query (assuming unchanging parameters, seed value, and snapshot). - When this is false, the planner will not select plans that + When this is false, the planner will not select plans that would require scanning the sampled table more than once, since that might result in inconsistent query output. @@ -86,16 +86,16 @@ method_name(internal) RETURNS tsm_handler - The TsmRoutine struct type is declared - in src/include/access/tsmapi.h, which see for additional + The TsmRoutine struct type is declared + in src/include/access/tsmapi.h, which see for additional details. The table sampling methods included in the standard distribution are good references when trying to write your own. Look into - the src/backend/access/tablesample subdirectory of the source - tree for the built-in sampling methods, and into the contrib + the src/backend/access/tablesample subdirectory of the source + tree for the built-in sampling methods, and into the contrib subdirectory for add-on methods. @@ -103,7 +103,7 @@ method_name(internal) RETURNS tsm_handler Sampling Method Support Functions - The TSM handler function returns a palloc'd TsmRoutine struct + The TSM handler function returns a palloc'd TsmRoutine struct containing pointers to the support functions described below. Most of the functions are required, but some are optional, and those pointers can be NULL. @@ -123,16 +123,16 @@ SampleScanGetSampleSize (PlannerInfo *root, relation pages that will be read during a sample scan, and the number of tuples that will be selected by the scan. (For example, these might be determined by estimating the sampling fraction, and then multiplying - the baserel->pages and baserel->tuples + the baserel->pages and baserel->tuples numbers by that, being sure to round the results to integral values.) - The paramexprs list holds the expression(s) that are - parameters to the TABLESAMPLE clause. It is recommended to - use estimate_expression_value() to try to reduce these + The paramexprs list holds the expression(s) that are + parameters to the TABLESAMPLE clause. It is recommended to + use estimate_expression_value() to try to reduce these expressions to constants, if their values are needed for estimation purposes; but the function must provide size estimates even if they cannot be reduced, and it should not fail even if the values appear invalid (remember that they're only estimates of what the run-time values will be). - The pages and tuples parameters are outputs. + The pages and tuples parameters are outputs. @@ -145,29 +145,29 @@ InitSampleScan (SampleScanState *node, Initialize for execution of a SampleScan plan node. This is called during executor startup. It should perform any initialization needed before processing can start. - The SampleScanState node has already been created, but - its tsm_state field is NULL. - The InitSampleScan function can palloc whatever internal + The SampleScanState node has already been created, but + its tsm_state field is NULL. + The InitSampleScan function can palloc whatever internal state data is needed by the sampling method, and store a pointer to - it in node->tsm_state. + it in node->tsm_state. Information about the table to scan is accessible through other fields - of the SampleScanState node (but note that the - node->ss.ss_currentScanDesc scan descriptor is not set + of the SampleScanState node (but note that the + node->ss.ss_currentScanDesc scan descriptor is not set up yet). - eflags contains flag bits describing the executor's + eflags contains flag bits describing the executor's operating mode for this plan node. - When (eflags & EXEC_FLAG_EXPLAIN_ONLY) is true, + When (eflags & EXEC_FLAG_EXPLAIN_ONLY) is true, the scan will not actually be performed, so this function should only do - the minimum required to make the node state valid for EXPLAIN - and EndSampleScan. + the minimum required to make the node state valid for EXPLAIN + and EndSampleScan. This function can be omitted (set the pointer to NULL), in which case - BeginSampleScan must perform all initialization needed + BeginSampleScan must perform all initialization needed by the sampling method. @@ -184,32 +184,32 @@ BeginSampleScan (SampleScanState *node, This is called just before the first attempt to fetch a tuple, and may be called again if the scan needs to be restarted. Information about the table to scan is accessible through fields - of the SampleScanState node (but note that the - node->ss.ss_currentScanDesc scan descriptor is not set + of the SampleScanState node (but note that the + node->ss.ss_currentScanDesc scan descriptor is not set up yet). - The params array, of length nparams, contains the - values of the parameters supplied in the TABLESAMPLE clause. + The params array, of length nparams, contains the + values of the parameters supplied in the TABLESAMPLE clause. These will have the number and types specified in the sampling method's parameterTypes list, and have been checked to not be null. - seed contains a seed to use for any random numbers generated + seed contains a seed to use for any random numbers generated within the sampling method; it is either a hash derived from the - REPEATABLE value if one was given, or the result - of random() if not. + REPEATABLE value if one was given, or the result + of random() if not. - This function may adjust the fields node->use_bulkread - and node->use_pagemode. - If node->use_bulkread is true, which it is by + This function may adjust the fields node->use_bulkread + and node->use_pagemode. + If node->use_bulkread is true, which it is by default, the scan will use a buffer access strategy that encourages recycling buffers after use. It might be reasonable to set this - to false if the scan will visit only a small fraction of the + to false if the scan will visit only a small fraction of the table's pages. - If node->use_pagemode is true, which it is by + If node->use_pagemode is true, which it is by default, the scan will perform visibility checking in a single pass for all tuples on each visited page. It might be reasonable to set this - to false if the scan will select only a small fraction of the + to false if the scan will select only a small fraction of the tuples on each visited page. That will result in fewer tuple visibility checks being performed, though each one will be more expensive because it will require more locking. @@ -219,8 +219,8 @@ BeginSampleScan (SampleScanState *node, If the sampling method is marked repeatable_across_scans, it must be able to select the same set of tuples during a rescan as it did originally, that is - a fresh call of BeginSampleScan must lead to selecting the - same tuples as before (if the TABLESAMPLE parameters + a fresh call of BeginSampleScan must lead to selecting the + same tuples as before (if the TABLESAMPLE parameters and seed don't change). @@ -231,7 +231,7 @@ NextSampleBlock (SampleScanState *node); Returns the block number of the next page to be scanned, or - InvalidBlockNumber if no pages remain to be scanned. + InvalidBlockNumber if no pages remain to be scanned. @@ -251,34 +251,34 @@ NextSampleTuple (SampleScanState *node, Returns the offset number of the next tuple to be sampled on the - specified page, or InvalidOffsetNumber if no tuples remain to - be sampled. maxoffset is the largest offset number in use + specified page, or InvalidOffsetNumber if no tuples remain to + be sampled. maxoffset is the largest offset number in use on the page. - NextSampleTuple is not explicitly told which of the offset - numbers in the range 1 .. maxoffset actually contain valid + NextSampleTuple is not explicitly told which of the offset + numbers in the range 1 .. maxoffset actually contain valid tuples. This is not normally a problem since the core code ignores requests to sample missing or invisible tuples; that should not result in any bias in the sample. However, if necessary, the function can - examine node->ss.ss_currentScanDesc->rs_vistuples[] + examine node->ss.ss_currentScanDesc->rs_vistuples[] to identify which tuples are valid and visible. (This - requires node->use_pagemode to be true.) + requires node->use_pagemode to be true.) - NextSampleTuple must not assume - that blockno is the same page number returned by the most - recent NextSampleBlock call. It was returned by some - previous NextSampleBlock call, but the core code is allowed - to call NextSampleBlock in advance of actually scanning + NextSampleTuple must not assume + that blockno is the same page number returned by the most + recent NextSampleBlock call. It was returned by some + previous NextSampleBlock call, but the core code is allowed + to call NextSampleBlock in advance of actually scanning pages, so as to support prefetching. It is OK to assume that once - sampling of a given page begins, successive NextSampleTuple - calls all refer to the same page until InvalidOffsetNumber is + sampling of a given page begins, successive NextSampleTuple + calls all refer to the same page until InvalidOffsetNumber is returned. diff --git a/doc/src/sgml/tcn.sgml b/doc/src/sgml/tcn.sgml index 623094183d..aa2fe4f00a 100644 --- a/doc/src/sgml/tcn.sgml +++ b/doc/src/sgml/tcn.sgml @@ -12,16 +12,16 @@ - The tcn module provides a trigger function that notifies + The tcn module provides a trigger function that notifies listeners of changes to any table on which it is attached. It must be - used as an AFTER trigger FOR EACH ROW. + used as an AFTER trigger FOR EACH ROW. Only one parameter may be supplied to the function in a - CREATE TRIGGER statement, and that is optional. If supplied + CREATE TRIGGER statement, and that is optional. If supplied it will be used for the channel name for the notifications. If omitted - tcn will be used for the channel name. + tcn will be used for the channel name. @@ -47,7 +47,7 @@ test(# ); CREATE TABLE test=# create trigger tcndata_tcn_trigger test-# after insert or update or delete on tcndata -test-# for each row execute procedure triggered_change_notification(); +test-# for each row execute function triggered_change_notification(); CREATE TRIGGER test=# listen tcn; LISTEN diff --git a/doc/src/sgml/test-decoding.sgml b/doc/src/sgml/test-decoding.sgml index 4f4fd41e32..8356a3d67b 100644 --- a/doc/src/sgml/test-decoding.sgml +++ b/doc/src/sgml/test-decoding.sgml @@ -8,13 +8,13 @@ - test_decoding is an example of a logical decoding + test_decoding is an example of a logical decoding output plugin. It doesn't do anything especially useful, but can serve as - a starting point for developing your own decoder. + a starting point for developing your own output plugin. - test_decoding receives WAL through the logical decoding + test_decoding receives WAL through the logical decoding mechanism and decodes it into text representations of the operations performed. diff --git a/doc/src/sgml/textsearch.sgml b/doc/src/sgml/textsearch.sgml index fe630a66b3..ecebade767 100644 --- a/doc/src/sgml/textsearch.sgml +++ b/doc/src/sgml/textsearch.sgml @@ -16,7 +16,7 @@ Full Text Searching (or just text search) provides - the capability to identify natural-language documents that + the capability to identify natural-language documents that satisfy a query, and optionally to sort them by relevance to the query. The most common type of search is to find all documents containing given query terms @@ -73,13 +73,13 @@ - Parsing documents into tokens. It is + Parsing documents into tokens. It is useful to identify various classes of tokens, e.g., numbers, words, complex words, email addresses, so that they can be processed differently. In principle token classes depend on the specific application, but for most purposes it is adequate to use a predefined set of classes. - PostgreSQL uses a parser to + PostgreSQL uses a parser to perform this step. A standard parser is provided, and custom parsers can be created for specific needs. @@ -87,19 +87,19 @@ - Converting tokens into lexemes. + Converting tokens into lexemes. A lexeme is a string, just like a token, but it has been - normalized so that different forms of the same word + normalized so that different forms of the same word are made alike. For example, normalization almost always includes folding upper-case letters to lower-case, and often involves removal - of suffixes (such as s or es in English). + of suffixes (such as s or es in English). This allows searches to find variant forms of the same word, without tediously entering all the possible variants. - Also, this step typically eliminates stop words, which + Also, this step typically eliminates stop words, which are words that are so common that they are useless for searching. (In short, then, tokens are raw fragments of the document text, while lexemes are words that are believed useful for indexing and searching.) - PostgreSQL uses dictionaries to + PostgreSQL uses dictionaries to perform this step. Various standard dictionaries are provided, and custom ones can be created for specific needs. @@ -112,7 +112,7 @@ as a sorted array of normalized lexemes. Along with the lexemes it is often desirable to store positional information to use for proximity ranking, so that a document that - contains a more dense region of query words is + contains a more dense region of query words is assigned a higher rank than one with scattered query words. @@ -132,7 +132,7 @@ - Map synonyms to a single word using Ispell. + Map synonyms to a single word using Ispell. @@ -145,14 +145,14 @@ Map different variations of a word to a canonical form using - an Ispell dictionary. + an Ispell dictionary. Map different variations of a word to a canonical form using - Snowball stemmer rules. + Snowball stemmer rules. @@ -160,12 +160,12 @@ A data type tsvector is provided for storing preprocessed documents, along with a type tsquery for representing processed - queries (). There are many + queries (). There are many functions and operators available for these data types - (), the most important of which is + (), the most important of which is the match operator @@, which we introduce in - . Full text searches can be accelerated - using indexes (). + . Full text searches can be accelerated + using indexes (). @@ -178,7 +178,7 @@ - A document is the unit of searching in a full text search + A document is the unit of searching in a full text search system; for example, a magazine article or email message. The text search engine must be able to parse documents and store associations of lexemes (key words) with their parent document. Later, these associations are @@ -226,11 +226,11 @@ WHERE mid = did AND mid = 12; For text search purposes, each document must be reduced to the - preprocessed tsvector format. Searching and ranking - are performed entirely on the tsvector representation + preprocessed tsvector format. Searching and ranking + are performed entirely on the tsvector representation of a document — the original text need only be retrieved when the document has been selected for display to a user. - We therefore often speak of the tsvector as being the + We therefore often speak of the tsvector as being the document, but of course it is only a compact representation of the full document. @@ -264,12 +264,12 @@ SELECT 'fat & cow'::tsquery @@ 'a fat cat sat on a mat and ate a fat rat'::t text, any more than a tsvector is. A tsquery contains search terms, which must be already-normalized lexemes, and may combine multiple terms using AND, OR, NOT, and FOLLOWED BY operators. - (For syntax details see .) There are - functions to_tsquery, plainto_tsquery, - and phraseto_tsquery + (For syntax details see .) There are + functions to_tsquery, plainto_tsquery, + and phraseto_tsquery that are helpful in converting user-written text into a proper tsquery, primarily by normalizing words appearing in - the text. Similarly, to_tsvector is used to parse and + the text. Similarly, to_tsvector is used to parse and normalize a document string. So in practice a text search match would look more like this: @@ -289,15 +289,15 @@ SELECT 'fat cats ate fat rats'::tsvector @@ to_tsquery('fat & rat'); f - since here no normalization of the word rats will occur. - The elements of a tsvector are lexemes, which are assumed - already normalized, so rats does not match rat. + since here no normalization of the word rats will occur. + The elements of a tsvector are lexemes, which are assumed + already normalized, so rats does not match rat. The @@ operator also supports text input, allowing explicit conversion of a text - string to tsvector or tsquery to be skipped + string to tsvector or tsquery to be skipped in simple cases. The variants available are: @@ -317,19 +317,19 @@ text @@ text - Within a tsquery, the & (AND) operator + Within a tsquery, the & (AND) operator specifies that both its arguments must appear in the document to have a match. Similarly, the | (OR) operator specifies that - at least one of its arguments must appear, while the ! (NOT) - operator specifies that its argument must not appear in + at least one of its arguments must appear, while the ! (NOT) + operator specifies that its argument must not appear in order to have a match. - For example, the query fat & ! rat matches documents that - contain fat but not rat. + For example, the query fat & ! rat matches documents that + contain fat but not rat. Searching for phrases is possible with the help of - the <-> (FOLLOWED BY) tsquery operator, which + the <-> (FOLLOWED BY) tsquery operator, which matches only if its arguments have matches that are adjacent and in the given order. For example: @@ -346,13 +346,13 @@ SELECT to_tsvector('error is not fatal') @@ to_tsquery('fatal <-> error'); There is a more general version of the FOLLOWED BY operator having the - form <N>, - where N is an integer standing for the difference between + form <N>, + where N is an integer standing for the difference between the positions of the matching lexemes. <1> is - the same as <->, while <2> + the same as <->, while <2> allows exactly one other lexeme to appear between the matches, and so - on. The phraseto_tsquery function makes use of this - operator to construct a tsquery that can match a multi-word + on. The phraseto_tsquery function makes use of this + operator to construct a tsquery that can match a multi-word phrase when some of the words are stop words. For example: @@ -374,7 +374,7 @@ SELECT phraseto_tsquery('the cats ate the rats'); - Parentheses can be used to control nesting of the tsquery + Parentheses can be used to control nesting of the tsquery operators. Without parentheses, | binds least tightly, then &, then <->, and ! most tightly. @@ -384,20 +384,20 @@ SELECT phraseto_tsquery('the cats ate the rats'); It's worth noticing that the AND/OR/NOT operators mean something subtly different when they are within the arguments of a FOLLOWED BY operator than when they are not, because within FOLLOWED BY the exact position of - the match is significant. For example, normally !x matches - only documents that do not contain x anywhere. - But !x <-> y matches y if it is not - immediately after an x; an occurrence of x + the match is significant. For example, normally !x matches + only documents that do not contain x anywhere. + But !x <-> y matches y if it is not + immediately after an x; an occurrence of x elsewhere in the document does not prevent a match. Another example is - that x & y normally only requires that x - and y both appear somewhere in the document, but - (x & y) <-> z requires x - and y to match at the same place, immediately before - a z. Thus this query behaves differently from - x <-> z & y <-> z, which will match a - document containing two separate sequences x z and - y z. (This specific query is useless as written, - since x and y could not match at the same place; + that x & y normally only requires that x + and y both appear somewhere in the document, but + (x & y) <-> z requires x + and y to match at the same place, immediately before + a z. Thus this query behaves differently from + x <-> z & y <-> z, which will match a + document containing two separate sequences x z and + y z. (This specific query is useless as written, + since x and y could not match at the same place; but with more complex situations such as prefix-match patterns, a query of this form could be useful.) @@ -412,26 +412,26 @@ SELECT phraseto_tsquery('the cats ate the rats'); skip indexing certain words (stop words), process synonyms, and use sophisticated parsing, e.g., parse based on more than just white space. This functionality is controlled by text search - configurations. PostgreSQL comes with predefined + configurations. PostgreSQL comes with predefined configurations for many languages, and you can easily create your own - configurations. (psql's \dF command + configurations. (psql's \dF command shows all available configurations.) During installation an appropriate configuration is selected and - is set accordingly - in postgresql.conf. If you are using the same text search + is set accordingly + in postgresql.conf. If you are using the same text search configuration for the entire cluster you can use the value in - postgresql.conf. To use different configurations + postgresql.conf. To use different configurations throughout the cluster but the same configuration within any one database, - use ALTER DATABASE ... SET. Otherwise, you can set + use ALTER DATABASE ... SET. Otherwise, you can set default_text_search_config in each session. Each text search function that depends on a configuration has an optional - regconfig argument, so that the configuration to use can be + regconfig argument, so that the configuration to use can be specified explicitly. default_text_search_config is used only when this argument is omitted. @@ -439,28 +439,28 @@ SELECT phraseto_tsquery('the cats ate the rats'); To make it easier to build custom text search configurations, a configuration is built up from simpler database objects. - PostgreSQL's text search facility provides + PostgreSQL's text search facility provides four types of configuration-related database objects: - Text search parsers break documents into tokens + Text search parsers break documents into tokens and classify each token (for example, as words or numbers). - Text search dictionaries convert tokens to normalized + Text search dictionaries convert tokens to normalized form and reject stop words. - Text search templates provide the functions underlying + Text search templates provide the functions underlying dictionaries. (A dictionary simply specifies a template and a set of parameters for the template.) @@ -468,7 +468,7 @@ SELECT phraseto_tsquery('the cats ate the rats'); - Text search configurations select a parser and a set + Text search configurations select a parser and a set of dictionaries to use to normalize the tokens produced by the parser. @@ -478,8 +478,8 @@ SELECT phraseto_tsquery('the cats ate the rats'); Text search parsers and templates are built from low-level C functions; therefore it requires C programming ability to develop new ones, and superuser privileges to install one into a database. (There are examples - of add-on parsers and templates in the contrib/ area of the - PostgreSQL distribution.) Since dictionaries and + of add-on parsers and templates in the contrib/ area of the + PostgreSQL distribution.) Since dictionaries and configurations just parameterize and connect together some underlying parsers and templates, no special privilege is needed to create a new dictionary or configuration. Examples of creating custom dictionaries and @@ -504,8 +504,8 @@ SELECT phraseto_tsquery('the cats ate the rats'); It is possible to do a full text search without an index. A simple query - to print the title of each row that contains the word - friend in its body field is: + to print the title of each row that contains the word + friend in its body field is: SELECT title @@ -513,13 +513,13 @@ FROM pgweb WHERE to_tsvector('english', body) @@ to_tsquery('english', 'friend'); - This will also find related words such as friends - and friendly, since all these are reduced to the same + This will also find related words such as friends + and friendly, since all these are reduced to the same normalized lexeme. - The query above specifies that the english configuration + The query above specifies that the english configuration is to be used to parse and normalize the strings. Alternatively we could omit the configuration parameters: @@ -530,13 +530,13 @@ WHERE to_tsvector(body) @@ to_tsquery('friend'); This query will use the configuration set by . + linkend="guc-default-text-search-config"/>. A more complex example is to - select the ten most recent documents that contain create and - table in the title or body: + select the ten most recent documents that contain create and + table in the title or body: SELECT title @@ -565,7 +565,7 @@ LIMIT 10; We can create a GIN index () to speed up text searches: + linkend="textsearch-indexes"/>) to speed up text searches: CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector('english', body)); @@ -573,11 +573,11 @@ CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector('english', body)); Notice that the 2-argument version of to_tsvector is used. Only text search functions that specify a configuration name can - be used in expression indexes (). + be used in expression indexes (). This is because the index contents must be unaffected by . If they were affected, the + linkend="guc-default-text-search-config"/>. If they were affected, the index contents might be inconsistent because different entries could - contain tsvectors that were created with different text search + contain tsvectors that were created with different text search configurations, and there would be no way to guess which was which. It would be impossible to dump and restore such an index correctly. @@ -587,8 +587,8 @@ CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector('english', body)); used in the index above, only a query reference that uses the 2-argument version of to_tsvector with the same configuration name will use that index. That is, WHERE - to_tsvector('english', body) @@ 'a & b' can use the index, - but WHERE to_tsvector(body) @@ 'a & b' cannot. + to_tsvector('english', body) @@ 'a & b' can use the index, + but WHERE to_tsvector(body) @@ 'a & b' cannot. This ensures that an index will be used only with the same configuration used to create the index entries. @@ -601,13 +601,13 @@ CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector('english', body)); CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector(config_name, body)); - where config_name is a column in the pgweb + where config_name is a column in the pgweb table. This allows mixed configurations in the same index while recording which configuration was used for each index entry. This would be useful, for example, if the document collection contained documents in different languages. Again, queries that are meant to use the index must be phrased to match, e.g., - WHERE to_tsvector(config_name, body) @@ 'a & b'. + WHERE to_tsvector(config_name, body) @@ 'a & b'. @@ -619,11 +619,11 @@ CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector('english', title || ' ' | - Another approach is to create a separate tsvector column - to hold the output of to_tsvector. This example is a + Another approach is to create a separate tsvector column + to hold the output of to_tsvector. This example is a concatenation of title and body, - using coalesce to ensure that one field will still be - indexed when the other is NULL: + using coalesce to ensure that one field will still be + indexed when the other is NULL: ALTER TABLE pgweb ADD COLUMN textsearchable_index_col tsvector; @@ -649,11 +649,11 @@ LIMIT 10; - When using a separate column to store the tsvector + When using a separate column to store the tsvector representation, - it is necessary to create a trigger to keep the tsvector - column current anytime title or body changes. - explains how to do that. + it is necessary to create a trigger to keep the tsvector + column current anytime title or body changes. + explains how to do that. @@ -661,13 +661,13 @@ LIMIT 10; is that it is not necessary to explicitly specify the text search configuration in queries in order to make use of the index. As shown in the example above, the query can depend on - default_text_search_config. Another advantage is that + default_text_search_config. Another advantage is that searches will be faster, since it will not be necessary to redo the - to_tsvector calls to verify index matches. (This is more + to_tsvector calls to verify index matches. (This is more important when using a GiST index than a GIN index; see .) The expression-index approach is + linkend="textsearch-indexes"/>.) The expression-index approach is simpler to set up, however, and it requires less disk space since the - tsvector representation is not stored explicitly. + tsvector representation is not stored explicitly. @@ -701,7 +701,7 @@ LIMIT 10; -to_tsvector( config regconfig, document text) returns tsvector +to_tsvector( config regconfig, document text) returns tsvector @@ -732,14 +732,14 @@ SELECT to_tsvector('english', 'a fat cat sat on a mat - it ate a fat rats'); The to_tsvector function internally calls a parser which breaks the document text into tokens and assigns a type to each token. For each token, a list of - dictionaries () is consulted, + dictionaries () is consulted, where the list can vary depending on the token type. The first dictionary - that recognizes the token emits one or more normalized + that recognizes the token emits one or more normalized lexemes to represent the token. For example, rats became rat because one of the dictionaries recognized that the word rats is a plural form of rat. Some words are recognized as - stop words (), which + stop words (), which causes them to be ignored since they occur too frequently to be useful in searching. In our example these are a, on, and it. @@ -749,7 +749,7 @@ SELECT to_tsvector('english', 'a fat cat sat on a mat - it ate a fat rats'); (Space symbols), meaning space tokens will never be indexed. The choices of parser, dictionaries and which types of tokens to index are determined by the selected text search configuration (). It is possible to have + linkend="textsearch-configuration"/>). It is possible to have many different configurations in the same database, and predefined configurations are available for various languages. In our example we used the default configuration english for the @@ -758,9 +758,9 @@ SELECT to_tsvector('english', 'a fat cat sat on a mat - it ate a fat rats'); The function setweight can be used to label the - entries of a tsvector with a given weight, - where a weight is one of the letters A, B, - C, or D. + entries of a tsvector with a given weight, + where a weight is one of the letters A, B, + C, or D. This is typically used to mark entries coming from different parts of a document, such as title versus body. Later, this information can be used for ranking of search results. @@ -783,9 +783,9 @@ UPDATE tt SET ti = Here we have used setweight to label the source of each lexeme in the finished tsvector, and then merged - the labeled tsvector values using the tsvector - concatenation operator ||. ( gives details about these + the labeled tsvector values using the tsvector + concatenation operator ||. ( gives details about these operations.) @@ -797,13 +797,16 @@ UPDATE tt SET ti = PostgreSQL provides the functions to_tsquery, - plainto_tsquery, and - phraseto_tsquery + plainto_tsquery, + phraseto_tsquery and + websearch_to_tsquery for converting a query to the tsquery data type. to_tsquery offers access to more features than either plainto_tsquery or - phraseto_tsquery, but it is less forgiving - about its input. + phraseto_tsquery, but it is less forgiving about its + input. websearch_to_tsquery is a simplified version + of to_tsquery with an alternative syntax, similar + to the one used by web search engines. @@ -811,20 +814,20 @@ UPDATE tt SET ti = -to_tsquery( config regconfig, querytext text) returns tsquery +to_tsquery( config regconfig, querytext text) returns tsquery - to_tsquery creates a tsquery value from + to_tsquery creates a tsquery value from querytext, which must consist of single tokens - separated by the tsquery operators & (AND), + separated by the tsquery operators & (AND), | (OR), ! (NOT), and <-> (FOLLOWED BY), possibly grouped using parentheses. In other words, the input to to_tsquery must already follow the general rules for - tsquery input, as described in . The difference is that while basic - tsquery input takes the tokens at face value, + tsquery input, as described in . The difference is that while basic + tsquery input takes the tokens at face value, to_tsquery normalizes each token into a lexeme using the specified or default configuration, and discards any tokens that are stop words according to the configuration. For example: @@ -836,8 +839,8 @@ SELECT to_tsquery('english', 'The & Fat & Rats'); 'fat' & 'rat' - As in basic tsquery input, weight(s) can be attached to each - lexeme to restrict it to match only tsvector lexemes of those + As in basic tsquery input, weight(s) can be attached to each + lexeme to restrict it to match only tsvector lexemes of those weight(s). For example: @@ -847,7 +850,7 @@ SELECT to_tsquery('english', 'Fat | Rats:AB'); 'fat' | 'rat':AB - Also, * can be attached to a lexeme to specify prefix matching: + Also, * can be attached to a lexeme to specify prefix matching: SELECT to_tsquery('supern:*A & star:A*B'); @@ -856,7 +859,7 @@ SELECT to_tsquery('supern:*A & star:A*B'); 'supern':*A & 'star':*AB - Such a lexeme will match any word in a tsvector that begins + Such a lexeme will match any word in a tsvector that begins with the given string. @@ -884,13 +887,13 @@ SELECT to_tsquery('''supernovae stars'' & !crab'); -plainto_tsquery( config regconfig, querytext text) returns tsquery +plainto_tsquery( config regconfig, querytext text) returns tsquery - plainto_tsquery transforms the unformatted text + plainto_tsquery transforms the unformatted text querytext to a tsquery value. - The text is parsed and normalized much as for to_tsvector, + The text is parsed and normalized much as for to_tsvector, then the & (AND) tsquery operator is inserted between surviving words. @@ -905,7 +908,7 @@ SELECT plainto_tsquery('english', 'The Fat Rats'); 'fat' & 'rat' - Note that plainto_tsquery will not + Note that plainto_tsquery will not recognize tsquery operators, weight labels, or prefix-match labels in its input: @@ -924,16 +927,16 @@ SELECT plainto_tsquery('english', 'The Fat & Rats:C'); -phraseto_tsquery( config regconfig, querytext text) returns tsquery +phraseto_tsquery( config regconfig, querytext text) returns tsquery - phraseto_tsquery behaves much like - plainto_tsquery, except that it inserts + phraseto_tsquery behaves much like + plainto_tsquery, except that it inserts the <-> (FOLLOWED BY) operator between surviving words instead of the & (AND) operator. Also, stop words are not simply discarded, but are accounted for by - inserting <N> operators rather + inserting <N> operators rather than <-> operators. This function is useful when searching for exact lexeme sequences, since the FOLLOWED BY operators check lexeme order not just the presence of all the lexemes. @@ -949,8 +952,8 @@ SELECT phraseto_tsquery('english', 'The Fat Rats'); 'fat' <-> 'rat' - Like plainto_tsquery, the - phraseto_tsquery function will not + Like plainto_tsquery, the + phraseto_tsquery function will not recognize tsquery operators, weight labels, or prefix-match labels in its input: @@ -962,6 +965,83 @@ SELECT phraseto_tsquery('english', 'The Fat & Rats:C'); + +websearch_to_tsquery( config regconfig, querytext text) returns tsquery + + + + websearch_to_tsquery creates a tsquery + value from querytext using an alternative + syntax in which simple unformatted text is a valid query. + Unlike plainto_tsquery + and phraseto_tsquery, it also recognizes certain + operators. Moreover, this function should never raise syntax errors, + which makes it possible to use raw user-supplied input for search. + The following syntax is supported: + + + + unquoted text: text not inside quote marks will be + converted to terms separated by & operators, as + if processed by + plainto_tsquery. + + + + + "quoted text": text inside quote marks will be + converted to terms separated by <-> + operators, as if processed by phraseto_tsquery. + + + + + OR: logical or will be converted to + the | operator. + + + + + -: the logical not operator, converted to the + the ! operator. + + + + + + Examples: + +SELECT websearch_to_tsquery('english', 'The fat rats'); + websearch_to_tsquery +---------------------- + 'fat' & 'rat' +(1 row) + +SELECT websearch_to_tsquery('english', '"supernovae stars" -crab'); + websearch_to_tsquery +---------------------------------- + 'supernova' <-> 'star' & !'crab' +(1 row) + +SELECT websearch_to_tsquery('english', '"sad cat" or "fat rat"'); + websearch_to_tsquery +----------------------------------- + 'sad' <-> 'cat' | 'fat' <-> 'rat' +(1 row) + +SELECT websearch_to_tsquery('english', 'signal -"segmentation fault"'); + websearch_to_tsquery +--------------------------------------- + 'signal' & !( 'segment' <-> 'fault' ) +(1 row) + +SELECT websearch_to_tsquery('english', '""" )( dummy \\ query <->'); + websearch_to_tsquery +---------------------- + 'dummi' & 'queri' +(1 row) + + @@ -994,7 +1074,7 @@ SELECT phraseto_tsquery('english', 'The Fat & Rats:C'); ts_rank - ts_rank( weights float4[], vector tsvector, query tsquery , normalization integer ) returns float4 + ts_rank( weights float4[], vector tsvector, query tsquery , normalization integer ) returns float4 @@ -1011,7 +1091,7 @@ SELECT phraseto_tsquery('english', 'The Fat & Rats:C'); ts_rank_cd - ts_rank_cd( weights float4[], vector tsvector, query tsquery , normalization integer ) returns float4 + ts_rank_cd( weights float4[], vector tsvector, query tsquery , normalization integer ) returns float4 @@ -1020,19 +1100,19 @@ SELECT phraseto_tsquery('english', 'The Fat & Rats:C'); ranking for the given document vector and query, as described in Clarke, Cormack, and Tudhope's "Relevance Ranking for One to Three Term Queries" in the journal "Information Processing and Management", - 1999. Cover density is similar to ts_rank ranking + 1999. Cover density is similar to ts_rank ranking except that the proximity of matching lexemes to each other is taken into consideration. This function requires lexeme positional information to perform - its calculation. Therefore, it ignores any stripped - lexemes in the tsvector. If there are no unstripped + its calculation. Therefore, it ignores any stripped + lexemes in the tsvector. If there are no unstripped lexemes in the input, the result will be zero. (See for more information - about the strip function and positional information - in tsvectors.) + linkend="textsearch-manipulate-tsvector"/> for more information + about the strip function and positional information + in tsvectors.) @@ -1043,7 +1123,7 @@ SELECT phraseto_tsquery('english', 'The Fat & Rats:C'); For both these functions, - the optional weights + the optional weights argument offers the ability to weigh word instances more or less heavily depending on how they are labeled. The weight arrays specify how heavily to weigh each category of word, in the order: @@ -1052,7 +1132,7 @@ SELECT phraseto_tsquery('english', 'The Fat & Rats:C'); {D-weight, C-weight, B-weight, A-weight} - If no weights are provided, + If no weights are provided, then these defaults are used: @@ -1094,7 +1174,7 @@ SELECT phraseto_tsquery('english', 'The Fat & Rats:C'); 4 divides the rank by the mean harmonic distance between extents - (this is implemented only by ts_rank_cd) + (this is implemented only by ts_rank_cd) @@ -1189,7 +1269,7 @@ LIMIT 10; To present search results it is ideal to show a part of each document and how it is related to the query. Usually, search engines show fragments of - the document with marked search terms. PostgreSQL + the document with marked search terms. PostgreSQL provides a function ts_headline that implements this functionality. @@ -1199,7 +1279,7 @@ LIMIT 10; -ts_headline( config regconfig, document text, query tsquery , options text ) returns text +ts_headline( config regconfig, document text, query tsquery , options text ) returns text @@ -1215,13 +1295,13 @@ ts_headline( config If an options string is specified it must consist of a comma-separated list of one or more - option=value pairs. + option=value pairs. The available options are: - StartSel, StopSel: the strings with + StartSel, StopSel: the strings with which to delimit query words appearing in the document, to distinguish them from other excerpted words. You must double-quote these strings if they contain spaces or commas. @@ -1229,7 +1309,7 @@ ts_headline( config - MaxWords, MinWords: these numbers + MaxWords, MinWords: these numbers determine the longest and shortest headlines to output. @@ -1256,10 +1336,10 @@ ts_headline( config MaxWords and - words of length ShortWord or less are dropped at the start + each side. Each fragment will be of at most MaxWords and + words of length ShortWord or less are dropped at the start and end of each fragment. If not all query words are found in the - document, then a single fragment of the first MinWords + document, then a single fragment of the first MinWords in the document will be displayed. @@ -1271,6 +1351,7 @@ ts_headline( config + These option names are recognized case-insensitively. Any unspecified options receive these defaults: @@ -1312,7 +1393,7 @@ query.', - ts_headline uses the original document, not a + ts_headline uses the original document, not a tsvector summary, so it can be slow and should be used with care. @@ -1333,11 +1414,11 @@ query.', Manipulating Documents - showed how raw textual - documents can be converted into tsvector values. + showed how raw textual + documents can be converted into tsvector values. PostgreSQL also provides functions and operators that can be used to manipulate documents that are already - in tsvector form. + in tsvector form. @@ -1349,18 +1430,18 @@ query.', tsvector concatenation - tsvector || tsvector + tsvector || tsvector - The tsvector concatenation operator + The tsvector concatenation operator returns a vector which combines the lexemes and positional information of the two vectors given as arguments. Positions and weight labels are retained during the concatenation. Positions appearing in the right-hand vector are offset by the largest position mentioned in the left-hand vector, so that the result is - nearly equivalent to the result of performing to_tsvector + nearly equivalent to the result of performing to_tsvector on the concatenation of the two original document strings. (The equivalence is not exact, because any stop-words removed from the end of the left-hand argument will not affect the result, whereas @@ -1370,11 +1451,11 @@ query.', One advantage of using concatenation in the vector form, rather than - concatenating text before applying to_tsvector, is that + concatenating text before applying to_tsvector, is that you can use different configurations to parse different sections - of the document. Also, because the setweight function + of the document. Also, because the setweight function marks all lexemes of the given vector the same way, it is necessary - to parse the text and do setweight before concatenating + to parse the text and do setweight before concatenating if you want to label different parts of the document with different weights. @@ -1388,13 +1469,13 @@ query.', setweight - setweight(vector tsvector, weight "char") returns tsvector + setweight(vector tsvector, weight "char") returns tsvector - setweight returns a copy of the input vector in which every - position has been labeled with the given weight, either + setweight returns a copy of the input vector in which every + position has been labeled with the given weight, either A, B, C, or D. (D is the default for new vectors and as such is not displayed on output.) These labels are @@ -1403,9 +1484,9 @@ query.', - Note that weight labels apply to positions, not - lexemes. If the input vector has been stripped of - positions then setweight does nothing. + Note that weight labels apply to positions, not + lexemes. If the input vector has been stripped of + positions then setweight does nothing. @@ -1416,7 +1497,7 @@ query.', length(tsvector) - length(vector tsvector) returns integer + length(vector tsvector) returns integer @@ -1433,7 +1514,7 @@ query.', strip - strip(vector tsvector) returns tsvector + strip(vector tsvector) returns tsvector @@ -1443,7 +1524,7 @@ query.', smaller than an unstripped vector, but it is also less useful. Relevance ranking does not work as well on stripped vectors as unstripped ones. Also, - the <-> (FOLLOWED BY) tsquery operator + the <-> (FOLLOWED BY) tsquery operator will never match stripped input, since it cannot determine the distance between lexeme occurrences. @@ -1454,8 +1535,8 @@ query.', - A full list of tsvector-related functions is available - in . + A full list of tsvector-related functions is available + in . @@ -1464,11 +1545,11 @@ query.', Manipulating Queries - showed how raw textual - queries can be converted into tsquery values. + showed how raw textual + queries can be converted into tsquery values. PostgreSQL also provides functions and operators that can be used to manipulate queries that are already - in tsquery form. + in tsquery form. @@ -1476,7 +1557,7 @@ query.', - tsquery && tsquery + tsquery && tsquery @@ -1490,7 +1571,7 @@ query.', - tsquery || tsquery + tsquery || tsquery @@ -1504,7 +1585,7 @@ query.', - !! tsquery + !! tsquery @@ -1518,15 +1599,15 @@ query.', - tsquery <-> tsquery + tsquery <-> tsquery Returns a query that searches for a match to the first given query immediately followed by a match to the second given query, using - the <-> (FOLLOWED BY) - tsquery operator. For example: + the <-> (FOLLOWED BY) + tsquery operator. For example: SELECT to_tsquery('fat') <-> to_tsquery('cat | rat'); @@ -1546,7 +1627,7 @@ SELECT to_tsquery('fat') <-> to_tsquery('cat | rat'); tsquery_phrase - tsquery_phrase(query1 tsquery, query2 tsquery [, distance integer ]) returns tsquery + tsquery_phrase(query1 tsquery, query2 tsquery [, distance integer ]) returns tsquery @@ -1554,8 +1635,8 @@ SELECT to_tsquery('fat') <-> to_tsquery('cat | rat'); Returns a query that searches for a match to the first given query followed by a match to the second given query at a distance of at distance lexemes, using - the <N> - tsquery operator. For example: + the <N> + tsquery operator. For example: SELECT tsquery_phrase(to_tsquery('fat'), to_tsquery('cat'), 10); @@ -1575,13 +1656,13 @@ SELECT tsquery_phrase(to_tsquery('fat'), to_tsquery('cat'), 10); numnode - numnode(query tsquery) returns integer + numnode(query tsquery) returns integer Returns the number of nodes (lexemes plus operators) in a - tsquery. This function is useful + tsquery. This function is useful to determine if the query is meaningful (returns > 0), or contains only stop words (returns 0). Examples: @@ -1609,12 +1690,12 @@ SELECT numnode('foo & bar'::tsquery); querytree - querytree(query tsquery) returns text + querytree(query tsquery) returns text - Returns the portion of a tsquery that can be used for + Returns the portion of a tsquery that can be used for searching an index. This function is useful for detecting unindexable queries, for example those containing only stop words or only negated terms. For example: @@ -1640,18 +1721,18 @@ SELECT querytree(to_tsquery('!defined')); The ts_rewrite family of functions search a - given tsquery for occurrences of a target + given tsquery for occurrences of a target subquery, and replace each occurrence with a substitute subquery. In essence this operation is a - tsquery-specific version of substring replacement. + tsquery-specific version of substring replacement. A target and substitute combination can be - thought of as a query rewrite rule. A collection + thought of as a query rewrite rule. A collection of such rewrite rules can be a powerful search aid. For example, you can expand the search using synonyms - (e.g., new york, big apple, nyc, - gotham) or narrow the search to direct the user to some hot + (e.g., new york, big apple, nyc, + gotham) or narrow the search to direct the user to some hot topic. There is some overlap in functionality between this feature - and thesaurus dictionaries (). + and thesaurus dictionaries (). However, you can modify a set of rewrite rules on-the-fly without reindexing, whereas updating a thesaurus requires reindexing to be effective. @@ -1662,16 +1743,16 @@ SELECT querytree(to_tsquery('!defined')); - ts_rewrite (query tsquery, target tsquery, substitute tsquery) returns tsquery + ts_rewrite (query tsquery, target tsquery, substitute tsquery) returns tsquery - This form of ts_rewrite simply applies a single - rewrite rule: target - is replaced by substitute + This form of ts_rewrite simply applies a single + rewrite rule: target + is replaced by substitute wherever it appears in query. For example: + class="parameter">query. For example: SELECT ts_rewrite('a & b'::tsquery, 'a'::tsquery, 'c'::tsquery); @@ -1686,18 +1767,18 @@ SELECT ts_rewrite('a & b'::tsquery, 'a'::tsquery, 'c'::tsquery); - ts_rewrite (query tsquery, select text) returns tsquery + ts_rewrite (query tsquery, select text) returns tsquery - This form of ts_rewrite accepts a starting - query and a SQL select command, which - is given as a text string. The select must yield two - columns of tsquery type. For each row of the - select result, occurrences of the first column value + This form of ts_rewrite accepts a starting + query and a SQL select command, which + is given as a text string. The select must yield two + columns of tsquery type. For each row of the + select result, occurrences of the first column value (the target) are replaced by the second column value (the substitute) - within the current query value. For example: + within the current query value. For example: CREATE TABLE aliases (t tsquery PRIMARY KEY, s tsquery); @@ -1713,7 +1794,7 @@ SELECT ts_rewrite('a & b'::tsquery, 'SELECT t,s FROM aliases'); Note that when multiple rewrite rules are applied in this way, the order of application can be important; so in practice you will - want the source query to ORDER BY some ordering key. + want the source query to ORDER BY some ordering key. @@ -1777,22 +1858,22 @@ SELECT ts_rewrite('a & b'::tsquery, - When using a separate column to store the tsvector representation + When using a separate column to store the tsvector representation of your documents, it is necessary to create a trigger to update the - tsvector column when the document content columns change. + tsvector column when the document content columns change. Two built-in trigger functions are available for this, or you can write your own. -tsvector_update_trigger(tsvector_column_name, config_name, text_column_name , ... ) -tsvector_update_trigger_column(tsvector_column_name, config_column_name, text_column_name , ... ) +tsvector_update_trigger(tsvector_column_name, config_name, text_column_name , ... ) +tsvector_update_trigger_column(tsvector_column_name, config_column_name, text_column_name , ... ) - These trigger functions automatically compute a tsvector + These trigger functions automatically compute a tsvector column from one or more textual columns, under the control of - parameters specified in the CREATE TRIGGER command. + parameters specified in the CREATE TRIGGER command. An example of their use is: @@ -1803,7 +1884,7 @@ CREATE TABLE messages ( ); CREATE TRIGGER tsvectorupdate BEFORE INSERT OR UPDATE -ON messages FOR EACH ROW EXECUTE PROCEDURE +ON messages FOR EACH ROW EXECUTE FUNCTION tsvector_update_trigger(tsv, 'pg_catalog.english', title, body); INSERT INTO messages VALUES('title here', 'the body text is here'); @@ -1819,24 +1900,24 @@ SELECT title, body FROM messages WHERE tsv @@ to_tsquery('title & body'); title here | the body text is here - Having created this trigger, any change in title or - body will automatically be reflected into - tsv, without the application having to worry about it. + Having created this trigger, any change in title or + body will automatically be reflected into + tsv, without the application having to worry about it. - The first trigger argument must be the name of the tsvector + The first trigger argument must be the name of the tsvector column to be updated. The second argument specifies the text search configuration to be used to perform the conversion. For - tsvector_update_trigger, the configuration name is simply + tsvector_update_trigger, the configuration name is simply given as the second trigger argument. It must be schema-qualified as shown above, so that the trigger behavior will not change with changes - in search_path. For - tsvector_update_trigger_column, the second trigger argument + in search_path. For + tsvector_update_trigger_column, the second trigger argument is the name of another table column, which must be of type - regconfig. This allows a per-row selection of configuration + regconfig. This allows a per-row selection of configuration to be made. The remaining argument(s) are the names of textual columns - (of type text, varchar, or char). These + (of type text, varchar, or char). These will be included in the document in the order given. NULL values will be skipped (but the other columns will still be indexed). @@ -1859,15 +1940,15 @@ end $$ LANGUAGE plpgsql; CREATE TRIGGER tsvectorupdate BEFORE INSERT OR UPDATE - ON messages FOR EACH ROW EXECUTE PROCEDURE messages_trigger(); + ON messages FOR EACH ROW EXECUTE FUNCTION messages_trigger(); Keep in mind that it is important to specify the configuration name - explicitly when creating tsvector values inside triggers, + explicitly when creating tsvector values inside triggers, so that the column's contents will not be affected by changes to - default_text_search_config. Failure to do this is likely to + default_text_search_config. Failure to do this is likely to lead to problems such as search results changing after a dump and reload. @@ -1881,38 +1962,38 @@ CREATE TRIGGER tsvectorupdate BEFORE INSERT OR UPDATE - The function ts_stat is useful for checking your + The function ts_stat is useful for checking your configuration and for finding stop-word candidates. -ts_stat(sqlquery text, weights text, - OUT word text, OUT ndoc integer, - OUT nentry integer) returns setof record +ts_stat(sqlquery text, weights text, + OUT word text, OUT ndoc integer, + OUT nentry integer) returns setof record sqlquery is a text value containing an SQL query which must return a single tsvector column. - ts_stat executes the query and returns statistics about + ts_stat executes the query and returns statistics about each distinct lexeme (word) contained in the tsvector data. The columns returned are - word text — the value of a lexeme + word text — the value of a lexeme - ndoc integer — number of documents - (tsvectors) the word occurred in + ndoc integer — number of documents + (tsvectors) the word occurred in - nentry integer — total number of + nentry integer — total number of occurrences of the word @@ -1931,8 +2012,8 @@ ORDER BY nentry DESC, ndoc DESC, word LIMIT 10; - The same, but counting only word occurrences with weight A - or B: + The same, but counting only word occurrences with weight A + or B: SELECT * FROM ts_stat('SELECT vector FROM apod', 'ab') @@ -1950,7 +2031,7 @@ LIMIT 10; Text search parsers are responsible for splitting raw document text - into tokens and identifying each token's type, where + into tokens and identifying each token's type, where the set of possible types is defined by the parser itself. Note that a parser does not modify the text at all — it simply identifies plausible word boundaries. Because of this limited scope, @@ -1961,8 +2042,8 @@ LIMIT 10; - The built-in parser is named pg_catalog.default. - It recognizes 23 token types, shown in . + The built-in parser is named pg_catalog.default. + It recognizes 23 token types, shown in .
@@ -1977,119 +2058,119 @@ LIMIT 10; - asciiword + asciiword Word, all ASCII letters elephant - word + word Word, all letters mañana - numword + numword Word, letters and digits beta1 - asciihword + asciihword Hyphenated word, all ASCII up-to-date - hword + hword Hyphenated word, all letters lógico-matemática - numhword + numhword Hyphenated word, letters and digits postgresql-beta1 - hword_asciipart + hword_asciipart Hyphenated word part, all ASCII postgresql in the context postgresql-beta1 - hword_part + hword_part Hyphenated word part, all letters lógico or matemática in the context lógico-matemática - hword_numpart + hword_numpart Hyphenated word part, letters and digits beta1 in the context postgresql-beta1 - email + email Email address foo@example.com - protocol + protocol Protocol head http:// - url + url URL example.com/stuff/index.html - host + host Host example.com - url_path + url_path URL path /stuff/index.html, in the context of a URL - file + file File or path name /usr/local/foo.txt, if not within a URL - sfloat + sfloat Scientific notation -1.234e56 - float + float Decimal notation -1.234 - int + int Signed integer -1234 - uint + uint Unsigned integer 1234 - version + version Version number 8.3.0 - tag + tag XML tag <a href="dictionaries.html"> - entity + entity XML entity &amp; - blank + blank Space symbols (any whitespace or punctuation not otherwise recognized) @@ -2099,16 +2180,16 @@ LIMIT 10; - The parser's notion of a letter is determined by the database's - locale setting, specifically lc_ctype. Words containing + The parser's notion of a letter is determined by the database's + locale setting, specifically lc_ctype. Words containing only the basic ASCII letters are reported as a separate token type, since it is sometimes useful to distinguish them. In most European - languages, token types word and asciiword + languages, token types word and asciiword should be treated alike. - email does not support all valid email characters as + email does not support all valid email characters as defined by RFC 5322. Specifically, the only non-alphanumeric characters supported for email user names are period, dash, and underscore. @@ -2154,9 +2235,9 @@ SELECT alias, description, token FROM ts_debug('http://example.com/stuff/index.h Dictionaries are used to eliminate words that should not be considered in a - search (stop words), and to normalize words so + search (stop words), and to normalize words so that different derived forms of the same word will match. A successfully - normalized word is called a lexeme. Aside from + normalized word is called a lexeme. Aside from improving search quality, normalization and removal of stop words reduce the size of the tsvector representation of a document, thereby improving performance. Normalization does not always have linguistic meaning @@ -2229,10 +2310,10 @@ SELECT alias, description, token FROM ts_debug('http://example.com/stuff/index.h - a single lexeme with the TSL_FILTER flag set, to replace + a single lexeme with the TSL_FILTER flag set, to replace the original token with a new token to be passed to subsequent dictionaries (a dictionary that does this is called a - filtering dictionary) + filtering dictionary) @@ -2254,7 +2335,7 @@ SELECT alias, description, token FROM ts_debug('http://example.com/stuff/index.h used to create new dictionaries with custom parameters. Each predefined dictionary template is described below. If no existing template is suitable, it is possible to create new ones; see the - contrib/ area of the PostgreSQL distribution + contrib/ area of the PostgreSQL distribution for examples. @@ -2267,7 +2348,7 @@ SELECT alias, description, token FROM ts_debug('http://example.com/stuff/index.h until some dictionary recognizes it as a known word. If it is identified as a stop word, or if no dictionary recognizes the token, it will be discarded and not indexed or searched for. - Normally, the first dictionary that returns a non-NULL + Normally, the first dictionary that returns a non-NULL output determines the result, and any remaining dictionaries are not consulted; but a filtering dictionary can replace the given word with a modified word, which is then passed to subsequent dictionaries. @@ -2277,11 +2358,11 @@ SELECT alias, description, token FROM ts_debug('http://example.com/stuff/index.h The general rule for configuring a list of dictionaries is to place first the most narrow, most specific dictionary, then the more general dictionaries, finishing with a very general dictionary, like - a Snowball stemmer or simple, which + a Snowball stemmer or simple, which recognizes everything. For example, for an astronomy-specific search (astro_en configuration) one could bind token type asciiword (ASCII word) to a synonym dictionary of astronomical - terms, a general English dictionary and a Snowball English + terms, a general English dictionary and a Snowball English stemmer: @@ -2295,7 +2376,7 @@ ALTER TEXT SEARCH CONFIGURATION astro_en end where it'd be useless. Filtering dictionaries are useful to partially normalize words to simplify the task of later dictionaries. For example, a filtering dictionary could be used to remove accents from accented - letters, as is done by the module. + letters, as is done by the module. @@ -2305,7 +2386,7 @@ ALTER TEXT SEARCH CONFIGURATION astro_en Stop words are words that are very common, appear in almost every document, and have no discrimination value. Therefore, they can be ignored in the context of full text searching. For example, every English text - contains words like a and the, so it is + contains words like a and the, so it is useless to store them in an index. However, stop words do affect the positions in tsvector, which in turn affect ranking: @@ -2347,7 +2428,7 @@ SELECT ts_rank_cd (to_tsvector('english','list stop words'), to_tsquery('list &a Simple Dictionary - The simple dictionary template operates by converting the + The simple dictionary template operates by converting the input token to lower case and checking it against a file of stop words. If it is found in the file then an empty array is returned, causing the token to be discarded. If not, the lower-cased form of the word @@ -2357,7 +2438,7 @@ SELECT ts_rank_cd (to_tsvector('english','list stop words'), to_tsquery('list &a - Here is an example of a dictionary definition using the simple + Here is an example of a dictionary definition using the simple template: @@ -2369,11 +2450,11 @@ CREATE TEXT SEARCH DICTIONARY public.simple_dict ( Here, english is the base name of a file of stop words. The file's full name will be - $SHAREDIR/tsearch_data/english.stop, - where $SHAREDIR means the + $SHAREDIR/tsearch_data/english.stop, + where $SHAREDIR means the PostgreSQL installation's shared-data directory, - often /usr/local/share/postgresql (use pg_config - --sharedir to determine it if you're not sure). + often /usr/local/share/postgresql (use pg_config + --sharedir to determine it if you're not sure). The file format is simply a list of words, one per line. Blank lines and trailing spaces are ignored, and upper case is folded to lower case, but no other processing is done @@ -2397,10 +2478,10 @@ SELECT ts_lexize('public.simple_dict','The'); - We can also choose to return NULL, instead of the lower-cased + We can also choose to return NULL, instead of the lower-cased word, if it is not found in the stop words file. This behavior is - selected by setting the dictionary's Accept parameter to - false. Continuing the example: + selected by setting the dictionary's Accept parameter to + false. Continuing the example: ALTER TEXT SEARCH DICTIONARY public.simple_dict ( Accept = false ); @@ -2418,17 +2499,17 @@ SELECT ts_lexize('public.simple_dict','The'); - With the default setting of Accept = true, - it is only useful to place a simple dictionary at the end + With the default setting of Accept = true, + it is only useful to place a simple dictionary at the end of a list of dictionaries, since it will never pass on any token to - a following dictionary. Conversely, Accept = false + a following dictionary. Conversely, Accept = false is only useful when there is at least one following dictionary. Most types of dictionaries rely on configuration files, such as files of - stop words. These files must be stored in UTF-8 encoding. + stop words. These files must be stored in UTF-8 encoding. They will be translated to the actual database encoding, if that is different, when they are read into the server. @@ -2439,8 +2520,8 @@ SELECT ts_lexize('public.simple_dict','The'); Normally, a database session will read a dictionary configuration file only once, when it is first used within the session. If you modify a configuration file and want to force existing sessions to pick up the - new contents, issue an ALTER TEXT SEARCH DICTIONARY command - on the dictionary. This can be a dummy update that doesn't + new contents, issue an ALTER TEXT SEARCH DICTIONARY command + on the dictionary. This can be a dummy update that doesn't actually change any parameter values. @@ -2453,11 +2534,11 @@ SELECT ts_lexize('public.simple_dict','The'); This dictionary template is used to create dictionaries that replace a word with a synonym. Phrases are not supported (use the thesaurus - template () for that). A synonym + template () for that). A synonym dictionary can be used to overcome linguistic problems, for example, to prevent an English stemmer dictionary from reducing the word Paris to pari. It is enough to have a Paris paris line in the - synonym dictionary and put it before the english_stem + synonym dictionary and put it before the english_stem dictionary. For example: @@ -2483,24 +2564,24 @@ SELECT * FROM ts_debug('english', 'Paris'); - The only parameter required by the synonym template is - SYNONYMS, which is the base name of its configuration file - — my_synonyms in the above example. + The only parameter required by the synonym template is + SYNONYMS, which is the base name of its configuration file + — my_synonyms in the above example. The file's full name will be - $SHAREDIR/tsearch_data/my_synonyms.syn - (where $SHAREDIR means the - PostgreSQL installation's shared-data directory). + $SHAREDIR/tsearch_data/my_synonyms.syn + (where $SHAREDIR means the + PostgreSQL installation's shared-data directory). The file format is just one line per word to be substituted, with the word followed by its synonym, separated by white space. Blank lines and trailing spaces are ignored. - The synonym template also has an optional parameter - CaseSensitive, which defaults to false. When - CaseSensitive is false, words in the synonym file + The synonym template also has an optional parameter + CaseSensitive, which defaults to false. When + CaseSensitive is false, words in the synonym file are folded to lower case, as are input tokens. When it is - true, words and tokens are not folded to lower case, + true, words and tokens are not folded to lower case, but are compared as-is. @@ -2511,9 +2592,9 @@ SELECT * FROM ts_debug('english', 'Paris'); to_tsvector(), but when it is used in to_tsquery(), the result will be a query item with the prefix match marker (see - ). + ). For example, suppose we have these entries in - $SHAREDIR/tsearch_data/synonym_sample.syn: + $SHAREDIR/tsearch_data/synonym_sample.syn: postgres pgsql postgresql pgsql @@ -2573,7 +2654,7 @@ mydb=# SELECT 'indexes are very useful'::tsvector @@ to_tsquery('tst','indices') Basically a thesaurus dictionary replaces all non-preferred terms by one preferred term and, optionally, preserves the original terms for indexing - as well. PostgreSQL's current implementation of the + as well. PostgreSQL's current implementation of the thesaurus dictionary is an extension of the synonym dictionary with added phrase support. A thesaurus dictionary requires a configuration file of the following format: @@ -2597,7 +2678,7 @@ more sample word(s) : more indexed word(s) recognize a word. In that case, you should remove the use of the word or teach the subdictionary about it. You can place an asterisk (*) at the beginning of an indexed word to skip applying - the subdictionary to it, but all sample words must be known + the subdictionary to it, but all sample words must be known to the subdictionary. @@ -2609,16 +2690,16 @@ more sample word(s) : more indexed word(s) Specific stop words recognized by the subdictionary cannot be - specified; instead use ? to mark the location where any - stop word can appear. For example, assuming that a and - the are stop words according to the subdictionary: + specified; instead use ? to mark the location where any + stop word can appear. For example, assuming that a and + the are stop words according to the subdictionary: ? one ? two : swsw - matches a one the two and the one a two; - both would be replaced by swsw. + matches a one the two and the one a two; + both would be replaced by swsw. @@ -2628,7 +2709,7 @@ more sample word(s) : more indexed word(s) accumulation. The thesaurus dictionary must be configured carefully. For example, if the thesaurus dictionary is assigned to handle only the asciiword token, then a thesaurus dictionary - definition like one 7 will not work since token type + definition like one 7 will not work since token type uint is not assigned to the thesaurus dictionary. @@ -2645,7 +2726,7 @@ more sample word(s) : more indexed word(s) Thesaurus Configuration - To define a new thesaurus dictionary, use the thesaurus + To define a new thesaurus dictionary, use the thesaurus template. For example: @@ -2667,8 +2748,8 @@ CREATE TEXT SEARCH DICTIONARY thesaurus_simple ( mythesaurus is the base name of the thesaurus configuration file. - (Its full name will be $SHAREDIR/tsearch_data/mythesaurus.ths, - where $SHAREDIR means the installation shared-data + (Its full name will be $SHAREDIR/tsearch_data/mythesaurus.ths, + where $SHAREDIR means the installation shared-data directory.) @@ -2752,7 +2833,7 @@ SELECT to_tsquery('''supernova star'''); Notice that supernova star matches supernovae stars in thesaurus_astro because we specified the english_stem stemmer in the thesaurus definition. - The stemmer removed the e and s. + The stemmer removed the e and s. @@ -2774,41 +2855,41 @@ SELECT plainto_tsquery('supernova star'); - <application>Ispell</> Dictionary + <application>Ispell</application> Dictionary - The Ispell dictionary template supports - morphological dictionaries, which can normalize many + The Ispell dictionary template supports + morphological dictionaries, which can normalize many different linguistic forms of a word into the same lexeme. For example, - an English Ispell dictionary can match all declensions and + an English Ispell dictionary can match all declensions and conjugations of the search term bank, e.g., - banking, banked, banks, - banks', and bank's. + banking, banked, banks, + banks', and bank's. The standard PostgreSQL distribution does - not include any Ispell configuration files. + not include any Ispell configuration files. Dictionaries for a large number of languages are available from Ispell. + url="https://www.cs.hmc.edu/~geoff/ispell.html">Ispell. Also, some more modern dictionary file formats are supported — MySpell (OO < 2.0.1) - and Hunspell + url="https://en.wikipedia.org/wiki/MySpell">MySpell (OO < 2.0.1) + and Hunspell (OO >= 2.0.2). A large list of dictionaries is available on the OpenOffice + url="https://wiki.openoffice.org/wiki/Dictionaries">OpenOffice Wiki. - To create an Ispell dictionary perform these steps: + To create an Ispell dictionary perform these steps: - download dictionary configuration files. OpenOffice - extension files have the .oxt extension. It is necessary - to extract .aff and .dic files, change - extensions to .affix and .dict. For some + download dictionary configuration files. OpenOffice + extension files have the .oxt extension. It is necessary + to extract .aff and .dic files, change + extensions to .affix and .dict. For some dictionary files it is also needed to convert characters to the UTF-8 encoding with commands (for example, for a Norwegian language dictionary): @@ -2819,7 +2900,7 @@ iconv -f ISO_8859-1 -t UTF-8 -o nn_no.dict nn_NO.dic - copy files to the $SHAREDIR/tsearch_data directory + copy files to the $SHAREDIR/tsearch_data directory @@ -2837,10 +2918,10 @@ CREATE TEXT SEARCH DICTIONARY english_hunspell ( - Here, DictFile, AffFile, and StopWords + Here, DictFile, AffFile, and StopWords specify the base names of the dictionary, affixes, and stop-words files. The stop-words file has the same format explained above for the - simple dictionary type. The format of the other files is + simple dictionary type. The format of the other files is not specified here but is available from the above-mentioned web sites. @@ -2851,7 +2932,7 @@ CREATE TEXT SEARCH DICTIONARY english_hunspell ( - The .affix file of Ispell has the following + The .affix file of Ispell has the following structure: prefixes @@ -2866,7 +2947,7 @@ flag T: - And the .dict file has the following structure: + And the .dict file has the following structure: lapse/ADGRS lard/DGRS @@ -2876,14 +2957,14 @@ lark/MRS - Format of the .dict file is: + Format of the .dict file is: basic_form/affix_class_name - In the .affix file every affix flag is described in the + In the .affix file every affix flag is described in the following format: condition > [-stripping_letters,] adding_affix @@ -2892,12 +2973,12 @@ condition > [-stripping_letters,] adding_affix Here, condition has a format similar to the format of regular expressions. - It can use groupings [...] and [^...]. - For example, [AEIOU]Y means that the last letter of the word - is "y" and the penultimate letter is "a", - "e", "i", "o" or "u". - [^EY] means that the last letter is neither "e" - nor "y". + It can use groupings [...] and [^...]. + For example, [AEIOU]Y means that the last letter of the word + is "y" and the penultimate letter is "a", + "e", "i", "o" or "u". + [^EY] means that the last letter is neither "e" + nor "y". @@ -2922,8 +3003,8 @@ SELECT ts_lexize('norwegian_ispell', 'sjokoladefabrikk'); - MySpell format is a subset of Hunspell. - The .affix file of Hunspell has the following + MySpell format is a subset of Hunspell. + The .affix file of Hunspell has the following structure: PFX A Y 1 @@ -2970,8 +3051,8 @@ SFX T 0 est [^ey] - The .dict file looks like the .dict file of - Ispell: + The .dict file looks like the .dict file of + Ispell: larder/M lardy/RT @@ -2982,8 +3063,8 @@ largehearted - MySpell does not support compound words. - Hunspell has sophisticated support for compound words. At + MySpell does not support compound words. + Hunspell has sophisticated support for compound words. At present, PostgreSQL implements only the basic compound word operations of Hunspell. @@ -2992,18 +3073,18 @@ largehearted - <application>Snowball</> Dictionary + <application>Snowball</application> Dictionary - The Snowball dictionary template is based on a project + The Snowball dictionary template is based on a project by Martin Porter, inventor of the popular Porter's stemming algorithm for the English language. Snowball now provides stemming algorithms for many languages (see the Snowball site for more information). Each algorithm understands how to reduce common variant forms of words to a base, or stem, spelling within - its language. A Snowball dictionary requires a language + its language. A Snowball dictionary requires a language parameter to identify which stemmer to use, and optionally can specify a - stopword file name that gives a list of words to eliminate. + stopword file name that gives a list of words to eliminate. (PostgreSQL's standard stopword lists are also provided by the Snowball project.) For example, there is a built-in definition equivalent to @@ -3020,7 +3101,7 @@ CREATE TEXT SEARCH DICTIONARY english_stem ( - A Snowball dictionary recognizes everything, whether + A Snowball dictionary recognizes everything, whether or not it is able to simplify the word, so it should be placed at the end of the dictionary list. It is useless to have it before any other dictionary because a token will never pass through it to @@ -3042,12 +3123,12 @@ CREATE TEXT SEARCH DICTIONARY english_stem ( to_tsvector or to_tsquery needs a text search configuration to perform its processing. The configuration parameter - + specifies the name of the default configuration, which is the one used by text search functions if an explicit configuration parameter is omitted. It can be set in postgresql.conf, or set for an - individual session using the SET command. + individual session using the SET command. @@ -3055,13 +3136,13 @@ CREATE TEXT SEARCH DICTIONARY english_stem ( you can create custom configurations easily. To facilitate management of text search objects, a set of SQL commands is available, and there are several psql commands that display information - about text search objects (). + about text search objects (). As an example we will create a configuration pg, starting by duplicating the built-in - english configuration: + english configuration: CREATE TEXT SEARCH CONFIGURATION public.pg ( COPY = pg_catalog.english ); @@ -3088,7 +3169,7 @@ CREATE TEXT SEARCH DICTIONARY pg_dict ( ); - Next we register the Ispell dictionary + Next we register the Ispell dictionary english_ispell, which has its own configuration files: @@ -3101,7 +3182,7 @@ CREATE TEXT SEARCH DICTIONARY english_ispell ( Now we can set up the mappings for words in configuration - pg: + pg: ALTER TEXT SEARCH CONFIGURATION pg @@ -3133,7 +3214,7 @@ version of our software. The next step is to set the session to use the new configuration, which was - created in the public schema: + created in the public schema: => \dF @@ -3177,64 +3258,64 @@ SHOW default_text_search_config; -ts_debug( config regconfig, document text, - OUT alias text, - OUT description text, - OUT token text, - OUT dictionaries regdictionary[], - OUT dictionary regdictionary, - OUT lexemes text[]) +ts_debug( config regconfig, document text, + OUT alias text, + OUT description text, + OUT token text, + OUT dictionaries regdictionary[], + OUT dictionary regdictionary, + OUT lexemes text[]) returns setof record - ts_debug displays information about every token of - document as produced by the + ts_debug displays information about every token of + document as produced by the parser and processed by the configured dictionaries. It uses the configuration specified by config, + class="parameter">config, or default_text_search_config if that argument is omitted. - ts_debug returns one row for each token identified in the text + ts_debug returns one row for each token identified in the text by the parser. The columns returned are - alias text — short name of the token type + alias text — short name of the token type - description text — description of the + description text — description of the token type - token text — text of the token + token text — text of the token - dictionaries regdictionary[] — the + dictionaries regdictionary[] — the dictionaries selected by the configuration for this token type - dictionary regdictionary — the dictionary - that recognized the token, or NULL if none did + dictionary regdictionary — the dictionary + that recognized the token, or NULL if none did - lexemes text[] — the lexeme(s) produced - by the dictionary that recognized the token, or NULL if - none did; an empty array ({}) means it was recognized as a + lexemes text[] — the lexeme(s) produced + by the dictionary that recognized the token, or NULL if + none did; an empty array ({}) means it was recognized as a stop word @@ -3307,10 +3388,10 @@ SELECT * FROM ts_debug('public.english','The Brightest supernovaes'); - In this example, the word Brightest was recognized by the + In this example, the word Brightest was recognized by the parser as an ASCII word (alias asciiword). For this token type the dictionary list is - english_ispell and + english_ispell and english_stem. The word was recognized by english_ispell, which reduced it to the noun bright. The word supernovaes is @@ -3324,7 +3405,7 @@ SELECT * FROM ts_debug('public.english','The Brightest supernovaes'); The word The was recognized by the english_ispell dictionary as a stop word () and will not be indexed. + linkend="textsearch-stopwords"/>) and will not be indexed. The spaces are discarded too, since the configuration provides no dictionaries at all for them. @@ -3360,14 +3441,14 @@ FROM ts_debug('public.english','The Brightest supernovaes'); -ts_parse(parser_name text, document text, - OUT tokid integer, OUT token text) returns setof record -ts_parse(parser_oid oid, document text, - OUT tokid integer, OUT token text) returns setof record +ts_parse(parser_name text, document text, + OUT tokid integer, OUT token text) returns setof record +ts_parse(parser_oid oid, document text, + OUT tokid integer, OUT token text) returns setof record - ts_parse parses the given document + ts_parse parses the given document and returns a series of records, one for each token produced by parsing. Each record includes a tokid showing the assigned token type and a token which is the text of the @@ -3391,14 +3472,14 @@ SELECT * FROM ts_parse('default', '123 - a number'); -ts_token_type(parser_name text, OUT tokid integer, - OUT alias text, OUT description text) returns setof record -ts_token_type(parser_oid oid, OUT tokid integer, - OUT alias text, OUT description text) returns setof record +ts_token_type(parser_name text, OUT tokid integer, + OUT alias text, OUT description text) returns setof record +ts_token_type(parser_oid oid, OUT tokid integer, + OUT alias text, OUT description text) returns setof record - ts_token_type returns a table which describes each type of + ts_token_type returns a table which describes each type of token the specified parser can recognize. For each token type, the table gives the integer tokid that the parser uses to label a token of that type, the alias that names the token type @@ -3441,7 +3522,7 @@ SELECT * FROM ts_token_type('default'); Dictionary Testing - The ts_lexize function facilitates dictionary testing. + The ts_lexize function facilitates dictionary testing. @@ -3449,11 +3530,11 @@ SELECT * FROM ts_token_type('default'); -ts_lexize(dict regdictionary, token text) returns text[] +ts_lexize(dict regdictionary, token text) returns text[] - ts_lexize returns an array of lexemes if the input + ts_lexize returns an array of lexemes if the input token is known to the dictionary, or an empty array if the token is known to the dictionary but it is a stop word, or @@ -3490,9 +3571,9 @@ SELECT ts_lexize('thesaurus_astro','supernovae stars') is null; The thesaurus dictionary thesaurus_astro does know the - phrase supernovae stars, but ts_lexize + phrase supernovae stars, but ts_lexize fails since it does not parse the input text but treats it as a single - token. Use plainto_tsquery or to_tsvector to + token. Use plainto_tsquery or to_tsvector to test thesaurus dictionaries, for example: @@ -3540,7 +3621,7 @@ SELECT plainto_tsquery('supernovae stars'); Creates a GIN (Generalized Inverted Index)-based index. - The column must be of tsvector type. + The column must be of tsvector type. @@ -3560,8 +3641,8 @@ SELECT plainto_tsquery('supernovae stars'); Creates a GiST (Generalized Search Tree)-based index. - The column can be of tsvector or - tsquery type. + The column can be of tsvector or + tsquery type. @@ -3575,7 +3656,7 @@ SELECT plainto_tsquery('supernovae stars'); compressed list of matching locations. Multi-word searches can find the first match, then use the index to remove rows that are lacking additional words. GIN indexes store only the words (lexemes) of - tsvector values, and not their weight labels. Thus a table + tsvector values, and not their weight labels. Thus a table row recheck is needed when using a query that involves weights. @@ -3604,7 +3685,7 @@ SELECT plainto_tsquery('supernovae stars'); Note that GIN index build time can often be improved - by increasing , while + by increasing , while GiST index build time is not sensitive to that parameter. @@ -3614,15 +3695,16 @@ SELECT plainto_tsquery('supernovae stars'); allows the implementation of very fast searches with online update. Partitioning can be done at the database level using table inheritance, or by distributing documents over - servers and collecting search results using the - module. The latter is possible because ranking functions use + servers and collecting external search results, e.g. via Foreign Data access. + The latter is possible because ranking functions use only local information. - <application>psql</> Support + <application>psql</application> Support Information about text search configuration objects can be obtained @@ -3666,7 +3748,7 @@ SELECT plainto_tsquery('supernovae stars'); \dF+ PATTERN - List text search configurations (add + for more detail). + List text search configurations (add + for more detail). => \dF russian List of text search configurations @@ -3707,12 +3789,13 @@ Parser: "pg_catalog.default" \dFd+ PATTERN - List text search dictionaries (add + for more detail). + List text search dictionaries (add + for more detail). => \dFd - List of text search dictionaries - Schema | Name | Description + List of text search dictionaries + Schema | Name | Description ------------+-----------------+----------------------------------------------------------- + pg_catalog | arabic_stem | snowball stemmer for arabic language pg_catalog | danish_stem | snowball stemmer for danish language pg_catalog | dutch_stem | snowball stemmer for dutch language pg_catalog | english_stem | snowball stemmer for english language @@ -3720,7 +3803,11 @@ Parser: "pg_catalog.default" pg_catalog | french_stem | snowball stemmer for french language pg_catalog | german_stem | snowball stemmer for german language pg_catalog | hungarian_stem | snowball stemmer for hungarian language + pg_catalog | indonesian_stem | snowball stemmer for indonesian language + pg_catalog | irish_stem | snowball stemmer for irish language pg_catalog | italian_stem | snowball stemmer for italian language + pg_catalog | lithuanian_stem | snowball stemmer for lithuanian language + pg_catalog | nepali_stem | snowball stemmer for nepali language pg_catalog | norwegian_stem | snowball stemmer for norwegian language pg_catalog | portuguese_stem | snowball stemmer for portuguese language pg_catalog | romanian_stem | snowball stemmer for romanian language @@ -3728,6 +3815,7 @@ Parser: "pg_catalog.default" pg_catalog | simple | simple dictionary: just lower case and check for stopword pg_catalog | spanish_stem | snowball stemmer for spanish language pg_catalog | swedish_stem | snowball stemmer for swedish language + pg_catalog | tamil_stem | snowball stemmer for tamil language pg_catalog | turkish_stem | snowball stemmer for turkish language @@ -3738,7 +3826,7 @@ Parser: "pg_catalog.default" \dFp+ PATTERN - List text search parsers (add + for more detail). + List text search parsers (add + for more detail). => \dFp List of text search parsers @@ -3791,7 +3879,7 @@ Parser: "pg_catalog.default" \dFt+ PATTERN - List text search templates (add + for more detail). + List text search templates (add + for more detail). => \dFt List of text search templates @@ -3830,12 +3918,12 @@ Parser: "pg_catalog.default" 264 - Position values in tsvector must be greater than 0 and + Position values in tsvector must be greater than 0 and no more than 16,383 - The match distance in a <N> - (FOLLOWED BY) tsquery operator cannot be more than + The match distance in a <N> + (FOLLOWED BY) tsquery operator cannot be more than 16,384 @@ -3851,7 +3939,7 @@ Parser: "pg_catalog.default" For comparison, the PostgreSQL 8.1 documentation contained 10,441 unique words, a total of 335,420 words, and the most - frequent word postgresql was mentioned 6,127 times in 655 + frequent word postgresql was mentioned 6,127 times in 655 documents. diff --git a/doc/src/sgml/trigger.sgml b/doc/src/sgml/trigger.sgml index 950245d19a..be9c228448 100644 --- a/doc/src/sgml/trigger.sgml +++ b/doc/src/sgml/trigger.sgml @@ -11,10 +11,10 @@ This chapter provides general information about writing trigger functions. Trigger functions can be written in most of the available procedural languages, including - PL/pgSQL (), - PL/Tcl (), - PL/Perl (), and - PL/Python (). + PL/pgSQL (), + PL/Tcl (), + PL/Perl (), and + PL/Python (). After reading this chapter, you should consult the chapter for your favorite procedural language to find out the language-specific details of writing a trigger in it. @@ -41,52 +41,54 @@ On tables and foreign tables, triggers can be defined to execute either before or after any INSERT, UPDATE, or DELETE operation, either once per modified row, - or once per SQL statement. If an - INSERT contains an ON CONFLICT DO UPDATE - clause, it is possible that the effects of a BEFORE insert trigger and - a BEFORE update trigger can both be applied together, if a reference to - an EXCLUDED column appears. UPDATE - triggers can moreover be set to fire only if certain columns are - mentioned in the SET clause of the - UPDATE statement. Triggers can also fire for - TRUNCATE statements. If a trigger event occurs, + or once per SQL statement. + UPDATE triggers can moreover be set to fire only if + certain columns are mentioned in the SET clause of + the UPDATE statement. Triggers can also fire + for TRUNCATE statements. If a trigger event occurs, the trigger's function is called at the appropriate time to handle the - event. Foreign tables do not support the TRUNCATE statement at all. + event. On views, triggers can be defined to execute instead of INSERT, UPDATE, or - DELETE operations. INSTEAD OF triggers + DELETE operations. + Such INSTEAD OF triggers are fired once for each row that needs to be modified in the view. It is the responsibility of the - trigger's function to perform the necessary modifications to the - underlying base tables and, where appropriate, return the modified + trigger's function to perform the necessary modifications to the view's + underlying base table(s) and, where appropriate, return the modified row as it will appear in the view. Triggers on views can also be defined to execute once per SQL statement, before or after INSERT, UPDATE, or DELETE operations. + However, such triggers are fired only if there is also + an INSTEAD OF trigger on the view. Otherwise, + any statement targeting the view must be rewritten into a statement + affecting its underlying base table(s), and then the triggers + that will be fired are the ones attached to the base table(s). The trigger function must be defined before the trigger itself can be created. The trigger function must be declared as a - function taking no arguments and returning type trigger. + function taking no arguments and returning type trigger. (The trigger function receives its input through a specially-passed - TriggerData structure, not in the form of ordinary function + TriggerData structure, not in the form of ordinary function arguments.) Once a suitable trigger function has been created, the trigger is established with - . + . The same trigger function can be used for multiple triggers. - PostgreSQL offers both per-row - triggers and per-statement triggers. With a per-row + PostgreSQL offers both per-row + triggers and per-statement triggers. With a per-row trigger, the trigger function is invoked once for each row that is affected by the statement that fired the trigger. In contrast, a per-statement trigger is @@ -94,32 +96,29 @@ regardless of the number of rows affected by that statement. In particular, a statement that affects zero rows will still result in the execution of any applicable per-statement triggers. These - two types of triggers are sometimes called row-level - triggers and statement-level triggers, + two types of triggers are sometimes called row-level + triggers and statement-level triggers, respectively. Triggers on TRUNCATE may only be - defined at statement level. On views, triggers that fire before or - after may only be defined at statement level, while triggers that fire - instead of an INSERT, UPDATE, - or DELETE may only be defined at row level. + defined at statement level, not per-row. Triggers are also classified according to whether they fire - before, after, or - instead of the operation. These are referred to - as BEFORE triggers, AFTER triggers, and - INSTEAD OF triggers respectively. - Statement-level BEFORE triggers naturally fire before the - statement starts to do anything, while statement-level AFTER + before, after, or + instead of the operation. These are referred to + as BEFORE triggers, AFTER triggers, and + INSTEAD OF triggers respectively. + Statement-level BEFORE triggers naturally fire before the + statement starts to do anything, while statement-level AFTER triggers fire at the very end of the statement. These types of triggers may be defined on tables, views, or foreign tables. Row-level - BEFORE triggers fire immediately before a particular row is - operated on, while row-level AFTER triggers fire at the end of - the statement (but before any statement-level AFTER triggers). + BEFORE triggers fire immediately before a particular row is + operated on, while row-level AFTER triggers fire at the end of + the statement (but before any statement-level AFTER triggers). These types of triggers may only be defined on non-partitioned tables and - foreign tables. Row-level INSTEAD OF triggers may only be - defined on views, and fire immediately as each row in the view is - identified as needing to be operated on. + foreign tables, not views. INSTEAD OF triggers may only be + defined on views, and only at row level; they fire immediately as each + row in the view is identified as needing to be operated on. @@ -132,33 +131,57 @@ If an INSERT contains an ON CONFLICT - DO UPDATE clause, it is possible that the effects of all - row-level BEFORE INSERT triggers - and all row-level BEFORE UPDATE triggers can + DO UPDATE clause, it is possible that the effects of + row-level BEFORE INSERT triggers and + row-level BEFORE UPDATE triggers can both be applied in a way that is apparent from the final state of - the updated row, if an EXCLUDED column is referenced. - There need not be an EXCLUDED column reference for - both sets of row-level BEFORE triggers to execute, though. The + the updated row, if an EXCLUDED column is referenced. + There need not be an EXCLUDED column reference for + both sets of row-level BEFORE triggers to execute, + though. The possibility of surprising outcomes should be considered when there - are both BEFORE INSERT and - BEFORE UPDATE row-level triggers - that both affect a row being inserted/updated (this can still be - problematic if the modifications are more or less equivalent if + are both BEFORE INSERT and + BEFORE UPDATE row-level triggers + that change a row being inserted/updated (this can be + problematic even if the modifications are more or less equivalent, if they're not also idempotent). Note that statement-level UPDATE triggers are executed when ON - CONFLICT DO UPDATE is specified, regardless of whether or not + CONFLICT DO UPDATE is specified, regardless of whether or not any rows were affected by the UPDATE (and regardless of whether the alternative UPDATE path was ever taken). An INSERT with an - ON CONFLICT DO UPDATE clause will execute - statement-level BEFORE INSERT - triggers first, then statement-level BEFORE + ON CONFLICT DO UPDATE clause will execute + statement-level BEFORE INSERT + triggers first, then statement-level BEFORE UPDATE triggers, followed by statement-level - AFTER UPDATE triggers and finally - statement-level AFTER INSERT + AFTER UPDATE triggers and finally + statement-level AFTER INSERT triggers. + + If an UPDATE on a partitioned table causes a row to move + to another partition, it will be performed as a DELETE + from the original partition followed by an INSERT into + the new partition. In this case, all row-level BEFORE + UPDATE triggers and all row-level + BEFORE DELETE triggers are fired on + the original partition. Then all row-level BEFORE + INSERT triggers are fired on the destination partition. + The possibility of surprising outcomes should be considered when all these + triggers affect the row being moved. As far as AFTER ROW + triggers are concerned, AFTER DELETE + and AFTER INSERT triggers are + applied; but AFTER UPDATE triggers + are not applied because the UPDATE has been converted to + a DELETE and an INSERT. As far as + statement-level triggers are concerned, none of the + DELETE or INSERT triggers are fired, + even if row movement occurs; only the UPDATE triggers + defined on the target table used in the UPDATE statement + will be fired. + + Trigger functions invoked by per-statement triggers should always return NULL. Trigger functions invoked by per-row @@ -170,7 +193,7 @@ - It can return NULL to skip the operation for the + It can return NULL to skip the operation for the current row. This instructs the executor to not perform the row-level operation that invoked the trigger (the insertion, modification, or deletion of a particular table row). @@ -188,7 +211,7 @@ - A row-level BEFORE trigger that does not intend to cause + A row-level BEFORE trigger that does not intend to cause either of these behaviors must be careful to return as its result the same row that was passed in (that is, the NEW row for INSERT and UPDATE @@ -197,8 +220,8 @@ - A row-level INSTEAD OF trigger should either return - NULL to indicate that it did not modify any data from + A row-level INSTEAD OF trigger should either return + NULL to indicate that it did not modify any data from the view's underlying base tables, or it should return the view row that was passed in (the NEW row for INSERT and UPDATE @@ -207,66 +230,66 @@ used to signal that the trigger performed the necessary data modifications in the view. This will cause the count of the number of rows affected by the command to be incremented. For - INSERT and UPDATE operations, the trigger - may modify the NEW row before returning it. This will + INSERT and UPDATE operations, the trigger + may modify the NEW row before returning it. This will change the data returned by - INSERT RETURNING or UPDATE RETURNING, + INSERT RETURNING or UPDATE RETURNING, and is useful when the view will not show exactly the same data that was provided. The return value is ignored for row-level triggers fired after an - operation, and so they can return NULL. + operation, and so they can return NULL. If more than one trigger is defined for the same event on the same relation, the triggers will be fired in alphabetical order by - trigger name. In the case of BEFORE and - INSTEAD OF triggers, the possibly-modified row returned by + trigger name. In the case of BEFORE and + INSTEAD OF triggers, the possibly-modified row returned by each trigger becomes the input to the next trigger. If any - BEFORE or INSTEAD OF trigger returns - NULL, the operation is abandoned for that row and subsequent + BEFORE or INSTEAD OF trigger returns + NULL, the operation is abandoned for that row and subsequent triggers are not fired (for that row). - A trigger definition can also specify a Boolean WHEN + A trigger definition can also specify a Boolean WHEN condition, which will be tested to see whether the trigger should - be fired. In row-level triggers the WHEN condition can + be fired. In row-level triggers the WHEN condition can examine the old and/or new values of columns of the row. (Statement-level - triggers can also have WHEN conditions, although the feature - is not so useful for them.) In a BEFORE trigger, the - WHEN + triggers can also have WHEN conditions, although the feature + is not so useful for them.) In a BEFORE trigger, the + WHEN condition is evaluated just before the function is or would be executed, - so using WHEN is not materially different from testing the + so using WHEN is not materially different from testing the same condition at the beginning of the trigger function. However, in - an AFTER trigger, the WHEN condition is evaluated + an AFTER trigger, the WHEN condition is evaluated just after the row update occurs, and it determines whether an event is queued to fire the trigger at the end of statement. So when an - AFTER trigger's - WHEN condition does not return true, it is not necessary + AFTER trigger's + WHEN condition does not return true, it is not necessary to queue an event nor to re-fetch the row at end of statement. This can result in significant speedups in statements that modify many rows, if the trigger only needs to be fired for a few of the rows. - INSTEAD OF triggers do not support - WHEN conditions. + INSTEAD OF triggers do not support + WHEN conditions. - Typically, row-level BEFORE triggers are used for checking or + Typically, row-level BEFORE triggers are used for checking or modifying the data that will be inserted or updated. For example, - a BEFORE trigger might be used to insert the current time into a + a BEFORE trigger might be used to insert the current time into a timestamp column, or to check that two elements of the row are - consistent. Row-level AFTER triggers are most sensibly + consistent. Row-level AFTER triggers are most sensibly used to propagate the updates to other tables, or make consistency checks against other tables. The reason for this division of labor is - that an AFTER trigger can be certain it is seeing the final - value of the row, while a BEFORE trigger cannot; there might - be other BEFORE triggers firing after it. If you have no - specific reason to make a trigger BEFORE or - AFTER, the BEFORE case is more efficient, since + that an AFTER trigger can be certain it is seeing the final + value of the row, while a BEFORE trigger cannot; there might + be other BEFORE triggers firing after it. If you have no + specific reason to make a trigger BEFORE or + AFTER, the BEFORE case is more efficient, since the information about the operation doesn't have to be saved until end of statement. @@ -285,8 +308,8 @@ - trigger - arguments for trigger functions + trigger + arguments for trigger functions When a trigger is being defined, arguments can be specified for it. The purpose of including arguments in the @@ -309,13 +332,25 @@ for making the trigger input data available to the trigger function. This input data includes the type of trigger event (e.g., INSERT or UPDATE) as well as any - arguments that were listed in CREATE TRIGGER. + arguments that were listed in CREATE TRIGGER. For a row-level trigger, the input data also includes the NEW row for INSERT and UPDATE triggers, and/or the OLD row for UPDATE and DELETE triggers. - Statement-level triggers do not currently have any way to examine the - individual row(s) modified by the statement. + + + + By default, statement-level triggers do not have any way to examine the + individual row(s) modified by the statement. But an AFTER + STATEMENT trigger can request that transition tables + be created to make the sets of affected rows available to the trigger. + AFTER ROW triggers can also request transition tables, so + that they can see the total changes in the table as well as the change in + the individual row they are currently being fired for. The method for + examining the transition tables again depends on the programming language + that is being used, but the typical approach is to make the transition + tables act like read-only temporary tables that can be accessed by SQL + commands issued within the trigger function. @@ -337,7 +372,7 @@ Statement-level triggers follow simple visibility rules: none of the changes made by a statement are visible to statement-level BEFORE triggers, whereas all - modifications are visible to statement-level AFTER + modifications are visible to statement-level AFTER triggers. @@ -346,14 +381,14 @@ The data change (insertion, update, or deletion) causing the trigger to fire is naturally not visible - to SQL commands executed in a row-level BEFORE trigger, + to SQL commands executed in a row-level BEFORE trigger, because it hasn't happened yet. - However, SQL commands executed in a row-level BEFORE + However, SQL commands executed in a row-level BEFORE trigger will see the effects of data changes for rows previously processed in the same outer command. This requires caution, since the ordering of these @@ -364,15 +399,15 @@ - Similarly, a row-level INSTEAD OF trigger will see the + Similarly, a row-level INSTEAD OF trigger will see the effects of data changes made by previous firings of INSTEAD - OF triggers in the same outer command. + OF triggers in the same outer command. - When a row-level AFTER trigger is fired, all data + When a row-level AFTER trigger is fired, all data changes made by the outer command are already complete, and are visible to the invoked trigger function. @@ -384,15 +419,15 @@ If your trigger function is written in any of the standard procedural languages, then the above statements apply only if the function is - declared VOLATILE. Functions that are declared - STABLE or IMMUTABLE will not see changes made by + declared VOLATILE. Functions that are declared + STABLE or IMMUTABLE will not see changes made by the calling command in any case. Further information about data visibility rules can be found in - . The example in contains a demonstration of these rules. + . The example in contains a demonstration of these rules. @@ -420,14 +455,14 @@ - Trigger functions must use the version 1 function manager + Trigger functions must use the version 1 function manager interface. When a function is called by the trigger manager, it is not passed - any normal arguments, but it is passed a context - pointer pointing to a TriggerData structure. C + any normal arguments, but it is passed a context + pointer pointing to a TriggerData structure. C functions can check whether they were called from the trigger manager or not by executing the macro: @@ -438,10 +473,10 @@ CALLED_AS_TRIGGER(fcinfo) ((fcinfo)->context != NULL && IsA((fcinfo)->context, TriggerData)) If this returns true, then it is safe to cast - fcinfo->context to type TriggerData + fcinfo->context to type TriggerData * and make use of the pointed-to - TriggerData structure. The function must - not alter the TriggerData + TriggerData structure. The function must + not alter the TriggerData structure or any of the data it points to. @@ -469,7 +504,7 @@ typedef struct TriggerData - type + type Always T_TriggerData. @@ -478,7 +513,7 @@ typedef struct TriggerData - tg_event + tg_event Describes the event for which the function is called. You can use the @@ -571,24 +606,24 @@ typedef struct TriggerData - tg_relation + tg_relation A pointer to a structure describing the relation that the trigger fired for. - Look at utils/rel.h for details about + Look at utils/rel.h for details about this structure. The most interesting things are - tg_relation->rd_att (descriptor of the relation - tuples) and tg_relation->rd_rel->relname - (relation name; the type is not char* but - NameData; use - SPI_getrelname(tg_relation) to get a char* if you + tg_relation->rd_att (descriptor of the relation + tuples) and tg_relation->rd_rel->relname + (relation name; the type is not char* but + NameData; use + SPI_getrelname(tg_relation) to get a char* if you need a copy of the name). - tg_trigtuple + tg_trigtuple A pointer to the row for which the trigger was fired. This is @@ -604,11 +639,11 @@ typedef struct TriggerData - tg_newtuple + tg_newtuple A pointer to the new version of the row, if the trigger was - fired for an UPDATE, and NULL if + fired for an UPDATE, and NULL if it is for an INSERT or a DELETE. This is what you have to return from the function if the event is an UPDATE @@ -620,11 +655,11 @@ typedef struct TriggerData - tg_trigger + tg_trigger - A pointer to a structure of type Trigger, - defined in utils/reltrigger.h: + A pointer to a structure of type Trigger, + defined in utils/reltrigger.h: typedef struct Trigger @@ -650,9 +685,9 @@ typedef struct Trigger } Trigger; - where tgname is the trigger's name, - tgnargs is the number of arguments in - tgargs, and tgargs is an array of + where tgname is the trigger's name, + tgnargs is the number of arguments in + tgargs, and tgargs is an array of pointers to the arguments specified in the CREATE TRIGGER statement. The other members are for internal use only. @@ -661,7 +696,7 @@ typedef struct Trigger - tg_trigtuplebuf + tg_trigtuplebuf The buffer containing tg_trigtuple, or InvalidBuffer if there @@ -671,7 +706,7 @@ typedef struct Trigger - tg_newtuplebuf + tg_newtuplebuf The buffer containing tg_newtuple, or InvalidBuffer if there @@ -681,24 +716,24 @@ typedef struct Trigger - tg_oldtable + tg_oldtable A pointer to a structure of type Tuplestorestate containing zero or more rows in the format specified by - tg_relation, or a NULL pointer + tg_relation, or a NULL pointer if there is no OLD TABLE transition relation. - tg_newtable + tg_newtable A pointer to a structure of type Tuplestorestate containing zero or more rows in the format specified by - tg_relation, or a NULL pointer + tg_relation, or a NULL pointer if there is no NEW TABLE transition relation. @@ -709,15 +744,15 @@ typedef struct Trigger To allow queries issued through SPI to reference transition tables, see - . + . A trigger function must return either a - HeapTuple pointer or a NULL pointer - (not an SQL null value, that is, do not set isNull true). + HeapTuple pointer or a NULL pointer + (not an SQL null value, that is, do not set isNull true). Be careful to return either - tg_trigtuple or tg_newtuple, + tg_trigtuple or tg_newtuple, as appropriate, if you don't want to modify the row being operated on. @@ -732,10 +767,10 @@ typedef struct Trigger - The function trigf reports the number of rows in the - table ttest and skips the actual operation if the + The function trigf reports the number of rows in the + table ttest and skips the actual operation if the command attempts to insert a null value into the column - x. (So the trigger acts as a not-null constraint but + x. (So the trigger acts as a not-null constraint but doesn't abort the transaction.) @@ -829,17 +864,17 @@ trigf(PG_FUNCTION_ARGS) After you have compiled the source code (see ), declare the function and the triggers: + linkend="dfunc"/>), declare the function and the triggers: CREATE FUNCTION trigf() RETURNS trigger - AS 'filename' + AS 'filename' LANGUAGE C; CREATE TRIGGER tbefore BEFORE INSERT OR UPDATE OR DELETE ON ttest - FOR EACH ROW EXECUTE PROCEDURE trigf(); + FOR EACH ROW EXECUTE FUNCTION trigf(); CREATE TRIGGER tafter AFTER INSERT OR UPDATE OR DELETE ON ttest - FOR EACH ROW EXECUTE PROCEDURE trigf(); + FOR EACH ROW EXECUTE FUNCTION trigf(); @@ -915,7 +950,7 @@ DELETE 2 There are more complex examples in src/test/regress/regress.c and - in . + in . diff --git a/doc/src/sgml/tsm-system-rows.sgml b/doc/src/sgml/tsm-system-rows.sgml index 93aa536664..3dcd948ff8 100644 --- a/doc/src/sgml/tsm-system-rows.sgml +++ b/doc/src/sgml/tsm-system-rows.sgml @@ -8,9 +8,9 @@ - The tsm_system_rows module provides the table sampling method + The tsm_system_rows module provides the table sampling method SYSTEM_ROWS, which can be used in - the TABLESAMPLE clause of a + the TABLESAMPLE clause of a command. @@ -38,7 +38,7 @@ Here is an example of selecting a sample of a table with - SYSTEM_ROWS. First install the extension: + SYSTEM_ROWS. First install the extension: @@ -55,7 +55,7 @@ SELECT * FROM my_table TABLESAMPLE SYSTEM_ROWS(100); This command will return a sample of 100 rows from the - table my_table (unless the table does not have 100 + table my_table (unless the table does not have 100 visible rows, in which case all its rows are returned). diff --git a/doc/src/sgml/tsm-system-time.sgml b/doc/src/sgml/tsm-system-time.sgml index 3f8ff1a026..fd8e999544 100644 --- a/doc/src/sgml/tsm-system-time.sgml +++ b/doc/src/sgml/tsm-system-time.sgml @@ -8,9 +8,9 @@ - The tsm_system_time module provides the table sampling method + The tsm_system_time module provides the table sampling method SYSTEM_TIME, which can be used in - the TABLESAMPLE clause of a + the TABLESAMPLE clause of a command. @@ -40,7 +40,7 @@ Here is an example of selecting a sample of a table with - SYSTEM_TIME. First install the extension: + SYSTEM_TIME. First install the extension: @@ -56,7 +56,7 @@ SELECT * FROM my_table TABLESAMPLE SYSTEM_TIME(1000); - This command will return as large a sample of my_table as + This command will return as large a sample of my_table as it can read in 1 second (1000 milliseconds). Of course, if the whole table can be read in under 1 second, all its rows will be returned. diff --git a/doc/src/sgml/typeconv.sgml b/doc/src/sgml/typeconv.sgml index 63d41f03f3..81dba7dacf 100644 --- a/doc/src/sgml/typeconv.sgml +++ b/doc/src/sgml/typeconv.sgml @@ -26,7 +26,7 @@ can be tailored by using explicit type conversion. This chapter introduces the PostgreSQL type conversion mechanisms and conventions. -Refer to the relevant sections in and +Refer to the relevant sections in and for more information on specific data types and allowed functions and operators. @@ -40,7 +40,7 @@ has an associated data type which determines its behavior and allowed usage. PostgreSQL has an extensible type system that is more general and flexible than other SQL implementations. Hence, most type conversion behavior in PostgreSQL -is governed by general rules rather than by ad hoc +is governed by general rules rather than by ad hoc heuristics. This allows the use of mixed-type expressions even with user-defined types. @@ -124,11 +124,11 @@ with, and perhaps converted to, the types of the target columns. Since all query results from a unionized SELECT statement must appear in a single set of columns, the types of the results of each -SELECT clause must be matched up and converted to a uniform set. -Similarly, the result expressions of a CASE construct must be -converted to a common type so that the CASE expression as a whole -has a known output type. The same holds for ARRAY constructs, -and for the GREATEST and LEAST functions. +SELECT clause must be matched up and converted to a uniform set. +Similarly, the result expressions of a CASE construct must be +converted to a common type so that the CASE expression as a whole +has a known output type. The same holds for ARRAY constructs, +and for the GREATEST and LEAST functions. @@ -139,7 +139,7 @@ and for the GREATEST and LEAST functions. The system catalogs store information about which conversions, or casts, exist between which data types, and how to perform those conversions. Additional casts can be added by the user -with the +with the command. (This is usually done in conjunction with defining new data types. The set of casts between built-in types has been carefully crafted and is best not @@ -158,7 +158,7 @@ Data types are divided into several basic type categories, including boolean, numeric, string, bitstring, datetime, timespan, geometric, network, and -user-defined. (For a list see ; +user-defined. (For a list see ; but note it is also possible to create custom type categories.) Within each category there can be one or more preferred types, which are preferred when there is a choice of possible types. With careful selection @@ -213,7 +213,7 @@ should use this new function and no longer do implicit conversion to use the old Note that this procedure is indirectly affected by the precedence of the operators involved, since that will determine which sub-expressions are taken to be the inputs of which operators. - See for more information. + See for more information. @@ -225,7 +225,7 @@ Select the operators to be considered from the pg_operator system catalog. If a non-schema-qualified operator name was used (the usual case), the operators considered are those with the matching name and argument count that are -visible in the current search path (see ). +visible in the current search path (see ). If a qualified operator name was given, only operators in the specified schema are considered. @@ -246,7 +246,19 @@ search path position. Check for an operator accepting exactly the input argument types. If one exists (there can be only one exact match in the set of -operators considered), use it. +operators considered), use it. Lack of an exact match creates a security +hazard when calling, via qualified name + + + + The hazard does not arise with a non-schema-qualified name, because a + search path containing schemas that permit untrusted users to create + objects is not a secure schema usage + pattern. + + +(not typical), any operator found in a schema that permits untrusted users to +create objects. In such situations, cast arguments to force an exact match. @@ -345,7 +357,7 @@ Some examples follow. Factorial Operator Type Resolution -There is only one factorial operator (postfix !) +There is only one factorial operator (postfix !) defined in the standard catalog, and it takes an argument of type bigint. The scanner assigns an initial type of integer to the argument @@ -423,11 +435,11 @@ type to resolve the unknown-type literals as. The PostgreSQL operator catalog has several -entries for the prefix operator @, all of which implement +entries for the prefix operator @, all of which implement absolute-value operations for various numeric data types. One of these entries is for type float8, which is the preferred type in the numeric category. Therefore, PostgreSQL -will use that entry when faced with an unknown input: +will use that entry when faced with an unknown input: SELECT @ '-4.5' AS "abs"; abs @@ -446,9 +458,9 @@ ERROR: "-4.5e500" is out of range for type double precision -On the other hand, the prefix operator ~ (bitwise negation) +On the other hand, the prefix operator ~ (bitwise negation) is defined only for integer data types, not for float8. So, if we -try a similar case with ~, we get: +try a similar case with ~, we get: SELECT ~ '20' AS "negation"; @@ -457,7 +469,7 @@ HINT: Could not choose a best candidate operator. You might need to add explicit type casts. This happens because the system cannot decide which of the several -possible ~ operators should be preferred. We can help +possible ~ operators should be preferred. We can help it out with an explicit cast: SELECT ~ CAST('20' AS int8) AS "negation"; @@ -485,14 +497,14 @@ SELECT array[1,2] <@ '{1,2,3}' as "is subset"; (1 row) The PostgreSQL operator catalog has several -entries for the infix operator <@, but the only two that +entries for the infix operator <@, but the only two that could possibly accept an integer array on the left-hand side are -array inclusion (anyarray <@ anyarray) -and range inclusion (anyelement <@ anyrange). +array inclusion (anyarray <@ anyarray) +and range inclusion (anyelement <@ anyrange). Since none of these polymorphic pseudo-types (see ) are considered preferred, the parser cannot +linkend="datatype-pseudo"/>) are considered preferred, the parser cannot resolve the ambiguity on that basis. -However, tells +However, tells it to assume that the unknown-type literal is of the same type as the other input, that is, integer array. Now only one of the two operators can match, so array inclusion is selected. (Had range inclusion been selected, we would @@ -518,19 +530,19 @@ CREATE TABLE mytable (val mytext); SELECT * FROM mytable WHERE val = 'foo'; This query will not use the custom operator. The parser will first see if -there is a mytext = mytext operator -(), which there is not; -then it will consider the domain's base type text, and see if -there is a text = text operator -(), which there is; -so it resolves the unknown-type literal as text and -uses the text = text operator. +there is a mytext = mytext operator +(), which there is not; +then it will consider the domain's base type text, and see if +there is a text = text operator +(), which there is; +so it resolves the unknown-type literal as text and +uses the text = text operator. The only way to get the custom operator to be used is to explicitly cast the literal: SELECT * FROM mytable WHERE val = text 'foo'; -so that the mytext = text operator is found +so that the mytext = text operator is found immediately according to the exact-match rule. If the best-match rules are reached, they actively discriminate against operators on domain types. If they did not, such an operator would create too many ambiguous-operator @@ -564,7 +576,7 @@ Select the functions to be considered from the pg_proc system catalog. If a non-schema-qualified function name was used, the functions considered are those with the matching name and argument count that are -visible in the current search path (see ). +visible in the current search path (see ). If a qualified function name was given, only functions in the specified schema are considered. @@ -580,8 +592,8 @@ search path position. -If a function is declared with a VARIADIC array parameter, and -the call does not use the VARIADIC keyword, then the function +If a function is declared with a VARIADIC array parameter, and +the call does not use the VARIADIC keyword, then the function is treated as if the array parameter were replaced by one or more occurrences of its element type, as needed to match the call. After such expansion the function might have effective argument types identical to some non-variadic @@ -589,6 +601,26 @@ function. In that case the function appearing earlier in the search path is used, or if the two functions are in the same schema, the non-variadic one is preferred. + +This creates a security hazard when calling, via qualified name + + + + The hazard does not arise with a non-schema-qualified name, because a + search path containing schemas that permit untrusted users to create + objects is not a secure schema usage + pattern. + + , +a variadic function found in a schema that permits untrusted users to create +objects. A malicious user can take control and execute arbitrary SQL +functions as though you executed them. Substitute a call bearing +the VARIADIC keyword, which bypasses this hazard. Calls +populating VARIADIC "any" parameters often have no +equivalent formulation containing the VARIADIC keyword. To +issue those calls safely, the function's schema must permit only trusted users +to create objects. + @@ -599,9 +631,18 @@ search path is used. If there are two or more such functions in the same schema with identical parameter types in the non-defaulted positions (which is possible if they have different sets of defaultable parameters), the system will not be able to determine which to prefer, and so an ambiguous -function call error will result if no better match to the call can be +function call error will result if no better match to the call can be found. + +This creates an availability hazard when calling, via qualified +name, any function found in a +schema that permits untrusted users to create objects. A malicious user can +create a function with the name of an existing function, replicating that +function's parameters and appending novel parameters having default values. +This precludes new calls to the original function. To forestall this hazard, +place functions in schemas that permit only trusted users to create objects. + @@ -610,9 +651,12 @@ found. Check for a function accepting exactly the input argument types. If one exists (there can be only one exact match in the set of -functions considered), use it. -(Cases involving unknown will never find a match at -this step.) +functions considered), use it. Lack of an exact match creates a security +hazard when calling, via qualified +name, a function found in a +schema that permits untrusted users to create objects. In such situations, +cast arguments to force an exact match. (Cases involving unknown +will never find a match at this step.) @@ -626,14 +670,14 @@ an unknown-type literal, or a type that is binary-coercible to the named data type, or a type that could be converted to the named data type by applying that type's I/O functions (that is, the conversion is either to or from one of the standard string types). When these conditions are met, -the function call is treated as a form of CAST specification. +the function call is treated as a form of CAST specification. The reason for this step is to support function-style cast specifications in cases where there is not an actual cast function. If there is a cast function, it is conventionally named after its output type, and so there is no need to have a special case. See - + for additional commentary. @@ -709,7 +753,7 @@ Otherwise, fail. -Note that the best match rules are identical for operator and +Note that the best match rules are identical for operator and function type resolution. Some examples follow. @@ -750,6 +794,57 @@ SELECT round(4.0, 4); + +Variadic Function Resolution + + + +CREATE FUNCTION public.variadic_example(VARIADIC numeric[]) RETURNS int + LANGUAGE sql AS 'SELECT 1'; +CREATE FUNCTION + + +This function accepts, but does not require, the VARIADIC keyword. It +tolerates both integer and numeric arguments: + + +SELECT public.variadic_example(0), + public.variadic_example(0.0), + public.variadic_example(VARIADIC array[0.0]); + variadic_example | variadic_example | variadic_example +------------------+------------------+------------------ + 1 | 1 | 1 +(1 row) + + +However, the first and second calls will prefer more-specific functions, if +available: + + +CREATE FUNCTION public.variadic_example(numeric) RETURNS int + LANGUAGE sql AS 'SELECT 2'; +CREATE FUNCTION + +CREATE FUNCTION public.variadic_example(int) RETURNS int + LANGUAGE sql AS 'SELECT 3'; +CREATE FUNCTION + +SELECT public.variadic_example(0), + public.variadic_example(0.0), + public.variadic_example(VARIADIC array[0.0]); + variadic_example | variadic_example | variadic_example +------------------+------------------+------------------ + 3 | 2 | 1 +(1 row) + + +Given the default configuration and only the first function existing, the +first and second calls are insecure. Any user could intercept them by +creating the second or third function. By matching the argument type exactly +and using the VARIADIC keyword, the third call is secure. + + + Substring Function Type Resolution @@ -790,7 +885,7 @@ SELECT substr(CAST (varchar '1234' AS text), 3); -The parser learns from the pg_cast catalog that +The parser learns from the pg_cast catalog that text and varchar are binary-compatible, meaning that one can be passed to a function that accepts the other without doing any physical conversion. Therefore, no @@ -809,8 +904,8 @@ HINT: No function matches the given name and argument types. You might need to add explicit type casts. -This does not work because integer does not have an implicit cast -to text. An explicit cast will work, however: +This does not work because integer does not have an implicit cast +to text. An explicit cast will work, however: SELECT substr(CAST (1234 AS text), 3); @@ -845,8 +940,8 @@ Check for an exact match with the target. Otherwise, try to convert the expression to the target type. This is possible -if an assignment cast between the two types is registered in the -pg_cast catalog (see ). +if an assignment cast between the two types is registered in the +pg_cast catalog (see ). Alternatively, if the expression is an unknown-type literal, the contents of the literal string will be fed to the input conversion routine for the target type. @@ -857,12 +952,12 @@ type. Check to see if there is a sizing cast for the target type. A sizing cast is a cast from that type to itself. If one is found in the -pg_cast catalog, apply it to the expression before storing +pg_cast catalog, apply it to the expression before storing into the destination column. The implementation function for such a cast always takes an extra parameter of type integer, which receives -the destination column's atttypmod value (typically its -declared length, although the interpretation of atttypmod -varies for different data types), and it may take a third boolean +the destination column's atttypmod value (typically its +declared length, although the interpretation of atttypmod +varies for different data types), and it may take a third boolean parameter that says whether the cast is explicit or implicit. The cast function is responsible for applying any length-dependent semantics such as size @@ -896,11 +991,11 @@ What has really happened here is that the two unknown literals are resolved to text by default, allowing the || operator to be resolved as text concatenation. Then the text result of the operator is converted to bpchar (blank-padded -char, the internal name of the character data type) to match the target +char, the internal name of the character data type) to match the target column type. (Since the conversion from text to bpchar is binary-coercible, this conversion does not insert any real function call.) Finally, the sizing function -bpchar(bpchar, integer, boolean) is found in the system catalog +bpchar(bpchar, integer, boolean) is found in the system catalog and applied to the operator's result and the stored column length. This type-specific function performs the required length check and addition of padding spaces. @@ -942,13 +1037,13 @@ padding spaces. -SQL UNION constructs must match up possibly dissimilar +SQL UNION constructs must match up possibly dissimilar types to become a single result set. The resolution algorithm is applied separately to each output column of a union query. The -INTERSECT and EXCEPT constructs resolve -dissimilar types in the same way as UNION. The -CASE, ARRAY, VALUES, -GREATEST and LEAST constructs use the identical +INTERSECT and EXCEPT constructs resolve +dissimilar types in the same way as UNION. The +CASE, ARRAY, VALUES, +GREATEST and LEAST constructs use the identical algorithm to match up their component expressions and select a result data type. @@ -972,7 +1067,7 @@ domain's base type for all subsequent steps. Somewhat like the treatment of domain inputs for operators and functions, this behavior allows a domain type to be preserved through - a UNION or similar construct, so long as the user is + a UNION or similar construct, so long as the user is careful to ensure that all inputs are implicitly or explicitly of that exact type. Otherwise the domain's base type will be preferred. @@ -1053,9 +1148,9 @@ SELECT 1.2 AS "numeric" UNION SELECT 1; 1.2 (2 rows) -The literal 1.2 is of type numeric, -and the integer value 1 can be cast implicitly to -numeric, so that type is used. +The literal 1.2 is of type numeric, +and the integer value 1 can be cast implicitly to +numeric, so that type is used. @@ -1072,9 +1167,39 @@ SELECT 1 AS "real" UNION SELECT CAST('2.2' AS REAL); 2.2 (2 rows) -Here, since type real cannot be implicitly cast to integer, -but integer can be implicitly cast to real, the union -result type is resolved as real. +Here, since type real cannot be implicitly cast to integer, +but integer can be implicitly cast to real, the union +result type is resolved as real. + + + + +Type Resolution in a Nested Union + + + +SELECT NULL UNION SELECT NULL UNION SELECT 1; + +ERROR: UNION types text and integer cannot be matched + +This failure occurs because PostgreSQL treats +multiple UNIONs as a nest of pairwise operations; +that is, this input is the same as + +(SELECT NULL UNION SELECT NULL) UNION SELECT 1; + +The inner UNION is resolved as emitting +type text, according to the rules given above. Then the +outer UNION has inputs of types text +and integer, leading to the observed error. The problem +can be fixed by ensuring that the leftmost UNION +has at least one input of the desired result type. + + + +INTERSECT and EXCEPT operations are +likewise resolved pairwise. However, the other constructs described in this +section consider all of their inputs in one resolution step. @@ -1089,38 +1214,38 @@ result type is resolved as real. The rules given in the preceding sections will result in assignment -of non-unknown data types to all expressions in a SQL query, +of non-unknown data types to all expressions in a SQL query, except for unspecified-type literals that appear as simple output -columns of a SELECT command. For example, in +columns of a SELECT command. For example, in SELECT 'Hello World'; there is nothing to identify what type the string literal should be -taken as. In this situation PostgreSQL will fall back -to resolving the literal's type as text. +taken as. In this situation PostgreSQL will fall back +to resolving the literal's type as text. -When the SELECT is one arm of a UNION -(or INTERSECT or EXCEPT) construct, or when it -appears within INSERT ... SELECT, this rule is not applied +When the SELECT is one arm of a UNION +(or INTERSECT or EXCEPT) construct, or when it +appears within INSERT ... SELECT, this rule is not applied since rules given in preceding sections take precedence. The type of an -unspecified-type literal can be taken from the other UNION arm +unspecified-type literal can be taken from the other UNION arm in the first case, or from the destination column in the second case. -RETURNING lists are treated the same as SELECT +RETURNING lists are treated the same as SELECT output lists for this purpose. - Prior to PostgreSQL 10, this rule did not exist, and - unspecified-type literals in a SELECT output list were - left as type unknown. That had assorted bad consequences, + Prior to PostgreSQL 10, this rule did not exist, and + unspecified-type literals in a SELECT output list were + left as type unknown. That had assorted bad consequences, so it's been changed. diff --git a/doc/src/sgml/unaccent.sgml b/doc/src/sgml/unaccent.sgml index 2b127e6736..547ac54a71 100644 --- a/doc/src/sgml/unaccent.sgml +++ b/doc/src/sgml/unaccent.sgml @@ -8,7 +8,7 @@ - unaccent is a text search dictionary that removes accents + unaccent is a text search dictionary that removes accents (diacritic signs) from lexemes. It's a filtering dictionary, which means its output is always passed to the next dictionary (if any), unlike the normal @@ -17,7 +17,7 @@ - The current implementation of unaccent cannot be used as a + The current implementation of unaccent cannot be used as a normalizing dictionary for the thesaurus dictionary. @@ -25,17 +25,17 @@ Configuration - An unaccent dictionary accepts the following options: + An unaccent dictionary accepts the following options: - RULES is the base name of the file containing the list of + RULES is the base name of the file containing the list of translation rules. This file must be stored in - $SHAREDIR/tsearch_data/ (where $SHAREDIR means - the PostgreSQL installation's shared-data directory). - Its name must end in .rules (which is not to be included in - the RULES parameter). + $SHAREDIR/tsearch_data/ (where $SHAREDIR means + the PostgreSQL installation's shared-data directory). + Its name must end in .rules (which is not to be included in + the RULES parameter). @@ -72,15 +72,15 @@ - Actually, each character can be any string not containing - whitespace, so unaccent dictionaries could be used for + Actually, each character can be any string not containing + whitespace, so unaccent dictionaries could be used for other sorts of substring substitutions besides diacritic removal. - As with other PostgreSQL text search configuration files, + As with other PostgreSQL text search configuration files, the rules file must be stored in UTF-8 encoding. The data is automatically translated into the current database's encoding when loaded. Any lines containing untranslatable characters are silently @@ -92,8 +92,8 @@ A more complete example, which is directly useful for most European - languages, can be found in unaccent.rules, which is installed - in $SHAREDIR/tsearch_data/ when the unaccent + languages, can be found in unaccent.rules, which is installed + in $SHAREDIR/tsearch_data/ when the unaccent module is installed. This rules file translates characters with accents to the same characters without accents, and it also expands ligatures into the equivalent series of simple characters (for example, Æ to @@ -105,11 +105,11 @@ Usage - Installing the unaccent extension creates a text - search template unaccent and a dictionary unaccent - based on it. The unaccent dictionary has the default - parameter setting RULES='unaccent', which makes it immediately - usable with the standard unaccent.rules file. + Installing the unaccent extension creates a text + search template unaccent and a dictionary unaccent + based on it. The unaccent dictionary has the default + parameter setting RULES='unaccent', which makes it immediately + usable with the standard unaccent.rules file. If you wish, you can alter the parameter, for example @@ -132,7 +132,7 @@ mydb=# select ts_lexize('unaccent','Hôtel'); Here is an example showing how to insert the - unaccent dictionary into a text search configuration: + unaccent dictionary into a text search configuration: mydb=# CREATE TEXT SEARCH CONFIGURATION fr ( COPY = french ); mydb=# ALTER TEXT SEARCH CONFIGURATION fr @@ -163,9 +163,9 @@ mydb=# select ts_headline('fr','Hôtel de la Mer',to_tsquery('fr','Hotels') Functions - The unaccent() function removes accents (diacritic signs) from + The unaccent() function removes accents (diacritic signs) from a given string. Basically, it's a wrapper around - unaccent-type dictionaries, but it can be used outside normal + unaccent-type dictionaries, but it can be used outside normal text search contexts. @@ -174,12 +174,14 @@ mydb=# select ts_headline('fr','Hôtel de la Mer',to_tsquery('fr','Hotels') -unaccent(dictionary, string) returns text +unaccent(dictionary regdictionary, string text) returns text - If the dictionary argument is - omitted, unaccent is assumed. + If the dictionary argument is + omitted, the text search dictionary named unaccent and + appearing in the same schema as the unaccent() + function itself is used. diff --git a/doc/src/sgml/user-manag.sgml b/doc/src/sgml/user-manag.sgml index 46989f0169..6106244d32 100644 --- a/doc/src/sgml/user-manag.sgml +++ b/doc/src/sgml/user-manag.sgml @@ -5,18 +5,18 @@ PostgreSQL manages database access permissions - using the concept of roles. A role can be thought of as + using the concept of roles. A role can be thought of as either a database user, or a group of database users, depending on how the role is set up. Roles can own database objects (for example, tables and functions) and can assign privileges on those objects to other roles to control who has access to which objects. Furthermore, it is possible - to grant membership in a role to another role, thus + to grant membership in a role to another role, thus allowing the member role to use privileges assigned to another role. - The concept of roles subsumes the concepts of users and - groups. In PostgreSQL versions + The concept of roles subsumes the concepts of users and + groups. In PostgreSQL versions before 8.1, users and groups were distinct kinds of entities, but now there are only roles. Any role can act as a user, a group, or both. @@ -24,7 +24,7 @@ This chapter describes how to create and manage roles. More information about the effects of role privileges on various - database objects can be found in . + database objects can be found in . @@ -52,16 +52,16 @@ maintain a correspondence, but this is not required. Database roles are global across a database cluster installation (and not per individual database). To create a role use the SQL command: + linkend="sql-createrole"/> SQL command: CREATE ROLE name; name follows the rules for SQL identifiers: either unadorned without special characters, or double-quoted. (In practice, you will usually want to add additional - options, such as LOGIN, to the command. More details appear + options, such as LOGIN, to the command. More details appear below.) To remove an existing role, use the analogous - command: + command: DROP ROLE name; @@ -76,8 +76,8 @@ DROP ROLE name; - For convenience, the programs - and are provided as wrappers + For convenience, the programs + and are provided as wrappers around these SQL commands that can be called from the shell command line: @@ -87,19 +87,19 @@ dropuser name - To determine the set of existing roles, examine the pg_roles + To determine the set of existing roles, examine the pg_roles system catalog, for example SELECT rolname FROM pg_roles; - The program's \du meta-command + The program's \du meta-command is also useful for listing the existing roles. In order to bootstrap the database system, a freshly initialized system always contains one predefined role. This role is always - a superuser, and by default (unless altered when running + a superuser, and by default (unless altered when running initdb) it will have the same name as the operating system user that initialized the database cluster. Customarily, this role will be named @@ -118,7 +118,7 @@ SELECT rolname FROM pg_roles; command line option to indicate the role to connect as. Many applications assume the name of the current operating system user by default (including - createuser and psql). Therefore it + createuser and psql). Therefore it is often convenient to maintain a naming correspondence between roles and operating system users. @@ -126,7 +126,7 @@ SELECT rolname FROM pg_roles; The set of database roles a given client connection can connect as is determined by the client authentication setup, as explained in - . (Thus, a client is not + . (Thus, a client is not limited to connect as the role matching its operating system user, just as a person's login name need not match his or her real name.) Since the role @@ -145,27 +145,27 @@ SELECT rolname FROM pg_roles; - login privilegelogin privilege + login privilegelogin privilege - Only roles that have the LOGIN attribute can be used + Only roles that have the LOGIN attribute can be used as the initial role name for a database connection. A role with - the LOGIN attribute can be considered the same - as a database user. To create a role with login privilege, + the LOGIN attribute can be considered the same + as a database user. To create a role with login privilege, use either: CREATE ROLE name LOGIN; CREATE USER name; - (CREATE USER is equivalent to CREATE ROLE - except that CREATE USER assumes LOGIN by - default, while CREATE ROLE does not.) + (CREATE USER is equivalent to CREATE ROLE + except that CREATE USER includes LOGIN by + default, while CREATE ROLE does not.) - superuser statussuperuser + superuser statussuperuser A database superuser bypasses all permission checks, except the right @@ -179,7 +179,7 @@ CREATE USER name; - database creationdatabaseprivilege to create + database creationdatabaseprivilege to create A role must be explicitly given permission to create databases @@ -191,30 +191,30 @@ CREATE USER name; - role creationroleprivilege to create + role creationroleprivilege to create A role must be explicitly given permission to create more roles (except for superusers, since those bypass all permission checks). To create such a role, use CREATE ROLE name CREATEROLE. - A role with CREATEROLE privilege can alter and drop + A role with CREATEROLE privilege can alter and drop other roles, too, as well as grant or revoke membership in them. However, to create, alter, drop, or change membership of a superuser role, superuser status is required; - CREATEROLE is insufficient for that. + CREATEROLE is insufficient for that. - initiating replicationroleprivilege to initiate replication + initiating replicationroleprivilege to initiate replication A role must explicitly be given permission to initiate streaming replication (except for superusers, since those bypass all permission checks). A role used for streaming replication must - have LOGIN permission as well. To create such a role, use + have LOGIN permission as well. To create such a role, use CREATE ROLE name REPLICATION LOGIN. @@ -222,32 +222,32 @@ CREATE USER name; - passwordpassword + passwordpassword A password is only significant if the client authentication method requires the user to supply a password when connecting - to the database. The and + authentication methods make use of passwords. Database passwords are separate from operating system passwords. Specify a password upon role creation with CREATE ROLE - name PASSWORD 'string'. + name PASSWORD 'string'. A role's attributes can be modified after creation with - ALTER ROLE.ALTER ROLE - See the reference pages for the - and commands for details. + ALTER ROLE.ALTER ROLE + See the reference pages for the + and commands for details. - It is good practice to create a role that has the CREATEDB - and CREATEROLE privileges, but is not a superuser, and then + It is good practice to create a role that has the CREATEDB + and CREATEROLE privileges, but is not a superuser, and then use this role for all routine management of databases and roles. This approach avoids the dangers of operating as a superuser for tasks that do not really require it. @@ -257,7 +257,7 @@ CREATE USER name; A role can also have role-specific defaults for many of the run-time configuration settings described in . For example, if for some reason you + linkend="runtime-config"/>. For example, if for some reason you want to disable index scans (hint: not a good idea) anytime you connect, you can use: @@ -269,9 +269,9 @@ ALTER ROLE myname SET enable_indexscan TO off; just before the session started. You can still alter this setting during the session; it will only be the default. To remove a role-specific default setting, use - ALTER ROLE rolename RESET varname. + ALTER ROLE rolename RESET varname. Note that role-specific defaults attached to roles without - LOGIN privilege are fairly useless, since they will never + LOGIN privilege are fairly useless, since they will never be invoked. @@ -280,7 +280,7 @@ ALTER ROLE myname SET enable_indexscan TO off; Role Membership - rolemembership in + rolemembership in @@ -288,7 +288,7 @@ ALTER ROLE myname SET enable_indexscan TO off; management of privileges: that way, privileges can be granted to, or revoked from, a group as a whole. In PostgreSQL this is done by creating a role that represents the group, and then - granting membership in the group role to individual user + granting membership in the group role to individual user roles. @@ -297,14 +297,14 @@ ALTER ROLE myname SET enable_indexscan TO off; CREATE ROLE name; - Typically a role being used as a group would not have the LOGIN + Typically a role being used as a group would not have the LOGIN attribute, though you can set it if you wish. Once the group role exists, you can add and remove members using the - and - commands: + and + commands: GRANT group_role TO role1, ... ; REVOKE group_role FROM role1, ... ; @@ -319,12 +319,12 @@ REVOKE group_role FROM role1 The members of a group role can use the privileges of the role in two ways. First, every member of a group can explicitly do - to - temporarily become the group role. In this state, the + to + temporarily become the group role. In this state, the database session has access to the privileges of the group role rather than the original login role, and any database objects created are considered owned by the group role not the login role. Second, member - roles that have the INHERIT attribute automatically have use + roles that have the INHERIT attribute automatically have use of the privileges of roles of which they are members, including any privileges inherited by those roles. As an example, suppose we have done: @@ -335,25 +335,25 @@ CREATE ROLE wheel NOINHERIT; GRANT admin TO joe; GRANT wheel TO admin; - Immediately after connecting as role joe, a database - session will have use of privileges granted directly to joe - plus any privileges granted to admin, because joe - inherits admin's privileges. However, privileges - granted to wheel are not available, because even though - joe is indirectly a member of wheel, the - membership is via admin which has the NOINHERIT + Immediately after connecting as role joe, a database + session will have use of privileges granted directly to joe + plus any privileges granted to admin, because joe + inherits admin's privileges. However, privileges + granted to wheel are not available, because even though + joe is indirectly a member of wheel, the + membership is via admin which has the NOINHERIT attribute. After: SET ROLE admin; the session would have use of only those privileges granted to - admin, and not those granted to joe. After: + admin, and not those granted to joe. After: SET ROLE wheel; the session would have use of only those privileges granted to - wheel, and not those granted to either joe - or admin. The original privilege state can be restored + wheel, and not those granted to either joe + or admin. The original privilege state can be restored with any of: SET ROLE joe; @@ -364,10 +364,10 @@ RESET ROLE; - The SET ROLE command always allows selecting any role + The SET ROLE command always allows selecting any role that the original login role is directly or indirectly a member of. Thus, in the above example, it is not necessary to become - admin before becoming wheel. + admin before becoming wheel. @@ -376,26 +376,26 @@ RESET ROLE; In the SQL standard, there is a clear distinction between users and roles, and users do not automatically inherit privileges while roles do. This behavior can be obtained in PostgreSQL by giving - roles being used as SQL roles the INHERIT attribute, while - giving roles being used as SQL users the NOINHERIT attribute. + roles being used as SQL roles the INHERIT attribute, while + giving roles being used as SQL users the NOINHERIT attribute. However, PostgreSQL defaults to giving all roles - the INHERIT attribute, for backward compatibility with pre-8.1 + the INHERIT attribute, for backward compatibility with pre-8.1 releases in which users always had use of permissions granted to groups they were members of. - The role attributes LOGIN, SUPERUSER, - CREATEDB, and CREATEROLE can be thought of as + The role attributes LOGIN, SUPERUSER, + CREATEDB, and CREATEROLE can be thought of as special privileges, but they are never inherited as ordinary privileges - on database objects are. You must actually SET ROLE to a + on database objects are. You must actually SET ROLE to a specific role having one of these attributes in order to make use of the attribute. Continuing the above example, we might choose to - grant CREATEDB and CREATEROLE to the - admin role. Then a session connecting as role joe + grant CREATEDB and CREATEROLE to the + admin role. Then a session connecting as role joe would not have these privileges immediately, only after doing - SET ROLE admin. + SET ROLE admin. @@ -403,7 +403,7 @@ RESET ROLE; To destroy a group role, use : + linkend="sql-droprole"/>: DROP ROLE name; @@ -418,23 +418,23 @@ DROP ROLE name; Because roles can own database objects and can hold privileges to access other objects, dropping a role is often not just a matter of a - quick . Any objects owned by the role must + quick . Any objects owned by the role must first be dropped or reassigned to other owners; and any permissions granted to the role must be revoked. Ownership of objects can be transferred one at a time - using ALTER commands, for example: + using ALTER commands, for example: ALTER TABLE bobs_table OWNER TO alice; - Alternatively, the command can be + Alternatively, the command can be used to reassign ownership of all objects owned by the role-to-be-dropped - to a single other role. Because REASSIGN OWNED cannot access + to a single other role. Because REASSIGN OWNED cannot access objects in other databases, it is necessary to run it in each database that contains objects owned by the role. (Note that the first - such REASSIGN OWNED will change the ownership of any + such REASSIGN OWNED will change the ownership of any shared-across-databases objects, that is databases or tablespaces, that are owned by the role-to-be-dropped.) @@ -442,20 +442,20 @@ ALTER TABLE bobs_table OWNER TO alice; Once any valuable objects have been transferred to new owners, any remaining objects owned by the role-to-be-dropped can be dropped with - the command. Again, this command cannot + the command. Again, this command cannot access objects in other databases, so it is necessary to run it in each database that contains objects owned by the role. Also, DROP - OWNED will not drop entire databases or tablespaces, so it is + OWNED will not drop entire databases or tablespaces, so it is necessary to do that manually if the role owns any databases or tablespaces that have not been transferred to new owners. - DROP OWNED also takes care of removing any privileges granted + DROP OWNED also takes care of removing any privileges granted to the target role for objects that do not belong to it. - Because REASSIGN OWNED does not touch such objects, it's - typically necessary to run both REASSIGN OWNED - and DROP OWNED (in that order!) to fully remove the + Because REASSIGN OWNED does not touch such objects, it's + typically necessary to run both REASSIGN OWNED + and DROP OWNED (in that order!) to fully remove the dependencies of a role to be dropped. @@ -477,7 +477,7 @@ DROP ROLE doomed_role; - If DROP ROLE is attempted while dependent objects still + If DROP ROLE is attempted while dependent objects still remain, it will issue messages identifying which objects need to be reassigned or dropped. @@ -487,7 +487,7 @@ DROP ROLE doomed_role; Default Roles - role + role @@ -499,7 +499,7 @@ DROP ROLE doomed_role; - The default roles are described in . + The default roles are described in . Note that the specific permissions for each of the default roles may change in the future as additional capabilities are added. Administrators should monitor the release notes for changes. @@ -534,6 +534,21 @@ DROP ROLE doomed_role; pg_signal_backend Send signals to other backends (eg: cancel query, terminate). + + pg_read_server_files + Allow reading files from any location the database can access on the server with COPY and + other file-access functions. + + + pg_write_server_files + Allow writing to files in any location the database can access on the server with COPY and + other file-access functions. + + + pg_execute_server_program + Allow executing programs on the database server as the user the database runs as with + COPY and other functions which allow executing a server-side program. + pg_monitor Read/execute various monitoring views and functions. @@ -545,6 +560,16 @@ DROP ROLE doomed_role;
+ + The pg_read_server_files, pg_write_server_files and + pg_execute_server_program roles are intended to allow administrators to have + trusted, but non-superuser, roles which are able to access files and run programs on the + database server as the user the database runs as. As these roles are able to access any file on + the server file system, they bypass all database-level permission checks when accessing files + directly and they could be used to gain superuser-level access, therefore care should be taken + when granting these roles to users. + + The pg_monitor, pg_read_all_settings, pg_read_all_stats and pg_stat_scan_tables @@ -556,7 +581,8 @@ DROP ROLE doomed_role; Care should be taken when granting these roles to ensure they are only used where - needed to perform the desired monitoring. + needed and with the understanding that these roles grant access to privileged + information. @@ -571,14 +597,17 @@ GRANT pg_signal_backend TO admin_user; - Function and Trigger Security + Function Security - Functions and triggers allow users to insert code into the backend - server that other users might execute unintentionally. Hence, both - mechanisms permit users to Trojan horse - others with relative ease. The only real protection is tight - control over who can define functions. + Functions, triggers and row-level security policies allow users to insert + code into the backend server that other users might execute + unintentionally. Hence, these mechanisms permit users to Trojan + horse others with relative ease. The strongest protection is tight + control over who can define objects. Where that is infeasible, write + queries referring only to objects having trusted owners. Remove + from search_path the public schema and any other schemas + that permit untrusted users to create objects. @@ -589,7 +618,7 @@ GRANT pg_signal_backend TO admin_user; possible to change the server's internal data structures. Hence, among many other things, such functions can circumvent any system access controls. Function languages that allow such access - are considered untrusted, and + are considered untrusted, and PostgreSQL allows only superusers to create functions written in those languages. diff --git a/doc/src/sgml/uuid-ossp.sgml b/doc/src/sgml/uuid-ossp.sgml index 227d4a839c..b3b816c372 100644 --- a/doc/src/sgml/uuid-ossp.sgml +++ b/doc/src/sgml/uuid-ossp.sgml @@ -8,7 +8,7 @@ - The uuid-ossp module provides functions to generate universally + The uuid-ossp module provides functions to generate universally unique identifiers (UUIDs) using one of several standard algorithms. There are also functions to produce certain special UUID constants. @@ -17,7 +17,7 @@ <literal>uuid-ossp</literal> Functions - shows the functions available to + shows the functions available to generate UUIDs. The relevant standards ITU-T Rec. X.667, ISO/IEC 9834-8:2005, and RFC 4122 specify four algorithms for generating UUIDs, identified by the @@ -63,8 +63,8 @@ This function generates a version 3 UUID in the given namespace using the specified input name. The namespace should be one of the special - constants produced by the uuid_ns_*() functions shown - in . (It could be any UUID in theory.) The name is an identifier + constants produced by the uuid_ns_*() functions shown + in . (It could be any UUID in theory.) The name is an identifier in the selected namespace. @@ -114,7 +114,7 @@ SELECT uuid_generate_v3(uuid_ns_url(), 'http://www.postgresql.org'); uuid_nil() - A nil UUID constant, which does not occur as a real UUID. + A nil UUID constant, which does not occur as a real UUID. @@ -140,7 +140,7 @@ SELECT uuid_generate_v3(uuid_ns_url(), 'http://www.postgresql.org'); Constant designating the ISO object identifier (OID) namespace for UUIDs. (This pertains to ASN.1 OIDs, which are unrelated to the OIDs - used in PostgreSQL.) + used in PostgreSQL.) @@ -159,34 +159,34 @@ SELECT uuid_generate_v3(uuid_ns_url(), 'http://www.postgresql.org'); - Building <filename>uuid-ossp</> + Building <filename>uuid-ossp</filename> Historically this module depended on the OSSP UUID library, which accounts for the module's name. While the OSSP UUID library can still be found at , it is not well maintained, and is becoming increasingly difficult to port to newer - platforms. uuid-ossp can now be built without the OSSP + platforms. uuid-ossp can now be built without the OSSP library on some platforms. On FreeBSD, NetBSD, and some other BSD-derived platforms, suitable UUID creation functions are included in the - core libc library. On Linux, macOS, and some other - platforms, suitable functions are provided in the libuuid - library, which originally came from the e2fsprogs project + core libc library. On Linux, macOS, and some other + platforms, suitable functions are provided in the libuuid + library, which originally came from the e2fsprogs project (though on modern Linux it is considered part - of util-linux-ng). When invoking configure, + of util-linux-ng). When invoking configure, specify to use the BSD functions, or to - use e2fsprogs' libuuid, or + use e2fsprogs' libuuid, or to use the OSSP UUID library. More than one of these libraries might be available on a particular - machine, so configure does not automatically choose one. + machine, so configure does not automatically choose one. If you only need randomly-generated (version 4) UUIDs, - consider using the gen_random_uuid() function - from the module instead. + consider using the gen_random_uuid() function + from the module instead. diff --git a/doc/src/sgml/vacuumlo.sgml b/doc/src/sgml/vacuumlo.sgml index 9da61c93fe..0b57a77af4 100644 --- a/doc/src/sgml/vacuumlo.sgml +++ b/doc/src/sgml/vacuumlo.sgml @@ -28,17 +28,17 @@ Description - vacuumlo is a simple utility program that will remove any - orphaned large objects from a - PostgreSQL database. An orphaned large object (LO) is - considered to be any LO whose OID does not appear in any oid or - lo data column of the database. + vacuumlo is a simple utility program that will remove any + orphaned large objects from a + PostgreSQL database. An orphaned large object (LO) is + considered to be any LO whose OID does not appear in any oid or + lo data column of the database. - If you use this, you may also be interested in the lo_manage - trigger in the module. - lo_manage is useful to try + If you use this, you may also be interested in the lo_manage + trigger in the module. + lo_manage is useful to try to avoid creating orphaned LOs in the first place. @@ -55,13 +55,14 @@ - limit + + - Remove no more than limit large objects per + Remove no more than limit large objects per transaction (default 1000). Since the server acquires a lock per LO removed, removing too many LOs in one transaction risks exceeding - . Set the limit to + . Set the limit to zero if you want all removals done in a single transaction. @@ -69,6 +70,7 @@ + Don't remove anything, just show what would be done. @@ -76,14 +78,15 @@ + Write a lot of progress messages. - - + + Print the vacuumlo version and exit. @@ -92,8 +95,8 @@ - - + + Show help about vacuumlo command line @@ -110,29 +113,32 @@ - hostname + + Database server's host. - port + + Database server's port. - username + + User name to connect as. - - + + Never issue a password prompt. If the server requires password @@ -146,6 +152,7 @@ + Force vacuumlo to prompt for a @@ -158,7 +165,7 @@ for a password if the server demands password authentication. However, vacuumlo will waste a connection attempt finding out that the server wants a password. - In some cases it is worth typing to avoid the extra connection attempt. @@ -167,15 +174,39 @@ + + Environment + + + + PGHOST + PGPORT + PGUSER + + + + Default connection parameters. + + + + + + + This utility, like most other PostgreSQL utilities, + also uses the environment variables supported by libpq + (see ). + + + Notes vacuumlo works by the following method: - First, vacuumlo builds a temporary table which contains all + First, vacuumlo builds a temporary table which contains all of the OIDs of the large objects in the selected database. It then scans through all columns in the database that are of type - oid or lo, and removes matching entries from the temporary + oid or lo, and removes matching entries from the temporary table. (Note: Only types with these names are considered; in particular, domains over them are not considered.) The remaining entries in the temporary table identify orphaned LOs. These are removed. diff --git a/doc/src/sgml/wal.sgml b/doc/src/sgml/wal.sgml index 940c37b21a..4eb8feb903 100644 --- a/doc/src/sgml/wal.sgml +++ b/doc/src/sgml/wal.sgml @@ -13,7 +13,7 @@ Reliability is an important property of any serious database - system, and PostgreSQL does everything possible to + system, and PostgreSQL does everything possible to guarantee reliable operation. One aspect of reliable operation is that all data recorded by a committed transaction should be stored in a nonvolatile area that is safe from power loss, operating @@ -34,21 +34,21 @@ First, there is the operating system's buffer cache, which caches frequently requested disk blocks and combines disk writes. Fortunately, all operating systems give applications a way to force writes from - the buffer cache to disk, and PostgreSQL uses those - features. (See the parameter + the buffer cache to disk, and PostgreSQL uses those + features. (See the parameter to adjust how this is done.) Next, there might be a cache in the disk drive controller; this is - particularly common on RAID controller cards. Some of - these caches are write-through, meaning writes are sent + particularly common on RAID controller cards. Some of + these caches are write-through, meaning writes are sent to the drive as soon as they arrive. Others are - write-back, meaning data is sent to the drive at + write-back, meaning data is sent to the drive at some later time. Such caches can be a reliability hazard because the memory in the disk controller cache is volatile, and will lose its contents in a power failure. Better controller cards have - battery-backup units (BBUs), meaning + battery-backup units (BBUs), meaning the card has a battery that maintains power to the cache in case of system power loss. After power is restored the data will be written to the disk drives. @@ -71,22 +71,22 @@ - On Linux, IDE and SATA drives can be queried using + On Linux, IDE and SATA drives can be queried using hdparm -I; write caching is enabled if there is - a * next to Write cache. hdparm -W 0 + a * next to Write cache. hdparm -W 0 can be used to turn off write caching. SCSI drives can be queried - using sdparm. + using sdparm. Use sdparm --get=WCE to check - whether the write cache is enabled and sdparm --clear=WCE + whether the write cache is enabled and sdparm --clear=WCE to disable it. - On FreeBSD, IDE drives can be queried using + On FreeBSD, IDE drives can be queried using atacontrol and write caching turned off using - hw.ata.wc=0 in /boot/loader.conf; + hw.ata.wc=0 in /boot/loader.conf; SCSI drives can be queried using camcontrol identify, and the write cache both queried and changed using sdparm when available. @@ -95,20 +95,20 @@ - On Solaris, the disk write cache is controlled by - format -e. - (The Solaris ZFS file system is safe with disk write-cache + On Solaris, the disk write cache is controlled by + format -e. + (The Solaris ZFS file system is safe with disk write-cache enabled because it issues its own disk cache flush commands.) - On Windows, if wal_sync_method is - open_datasync (the default), write caching can be disabled - by unchecking My Computer\Open\disk drive\Properties\Hardware\Properties\Policies\Enable write caching on the disk. + On Windows, if wal_sync_method is + open_datasync (the default), write caching can be disabled + by unchecking My Computer\Open\disk drive\Properties\Hardware\Properties\Policies\Enable write caching on the disk. Alternatively, set wal_sync_method to - fsync or fsync_writethrough, which prevent + fsync or fsync_writethrough, which prevent write caching. @@ -116,24 +116,24 @@ On macOS, write caching can be prevented by - setting wal_sync_method to fsync_writethrough. + setting wal_sync_method to fsync_writethrough. - Recent SATA drives (those following ATAPI-6 or later) - offer a drive cache flush command (FLUSH CACHE EXT), + Recent SATA drives (those following ATAPI-6 or later) + offer a drive cache flush command (FLUSH CACHE EXT), while SCSI drives have long supported a similar command - SYNCHRONIZE CACHE. These commands are not directly - accessible to PostgreSQL, but some file systems - (e.g., ZFS, ext4) can use them to flush + SYNCHRONIZE CACHE. These commands are not directly + accessible to PostgreSQL, but some file systems + (e.g., ZFS, ext4) can use them to flush data to the platters on write-back-enabled drives. Unfortunately, such file systems behave suboptimally when combined with battery-backup unit - (BBU) disk controllers. In such setups, the synchronize + (BBU) disk controllers. In such setups, the synchronize command forces all data from the controller cache to the disks, eliminating much of the benefit of the BBU. You can run the - program to see + program to see if you are affected. If you are affected, the performance benefits of the BBU can be regained by turning off write barriers in the file system or reconfiguring the disk controller, if that is @@ -155,7 +155,7 @@ If you use SSDs, be aware that many of these do not honor cache flush commands by default. You can test for reliable I/O subsystem behavior using diskchecker.pl. + url="https://brad.livejournal.com/2116715.html">diskchecker.pl. @@ -164,22 +164,22 @@ commonly 512 bytes each. Every physical read or write operation processes a whole sector. When a write request arrives at the drive, it might be for some multiple - of 512 bytes (PostgreSQL typically writes 8192 bytes, or + of 512 bytes (PostgreSQL typically writes 8192 bytes, or 16 sectors, at a time), and the process of writing could fail due to power loss at any time, meaning some of the 512-byte sectors were written while others were not. To guard against such failures, - PostgreSQL periodically writes full page images to - permanent WAL storage before modifying the actual page on - disk. By doing this, during crash recovery PostgreSQL can + PostgreSQL periodically writes full page images to + permanent WAL storage before modifying the actual page on + disk. By doing this, during crash recovery PostgreSQL can restore partially-written pages from WAL. If you have file-system software that prevents partial page writes (e.g., ZFS), you can turn off this page imaging by turning off the parameter. Battery-Backed Unit + linkend="guc-full-page-writes"/> parameter. Battery-Backed Unit (BBU) disk controllers do not prevent partial page writes unless they guarantee that data is written to the BBU as full (8kB) pages. - PostgreSQL also protects against some kinds of data corruption + PostgreSQL also protects against some kinds of data corruption on storage devices that may occur because of hardware errors or media failure over time, such as reading/writing garbage data. @@ -195,7 +195,7 @@ Data pages are not currently checksummed by default, though full page images recorded in WAL records will be protected; see initdb + linkend="app-initdb-data-checksums">initdb for details about enabling data page checksums. @@ -224,7 +224,7 @@ - PostgreSQL does not protect against correctable memory errors + PostgreSQL does not protect against correctable memory errors and it is assumed you will operate using RAM that uses industry standard Error Correcting Codes (ECC) or better protection. @@ -267,7 +267,7 @@ causes file system data to be flushed to disk. Fortunately, data flushing during journaling can often be disabled with a file system mount option, e.g. - data=writeback on a Linux ext3 file system. + data=writeback on a Linux ext3 file system. Journaled file systems do improve boot speed after a crash. @@ -290,7 +290,7 @@ WAL also makes it possible to support on-line backup and point-in-time recovery, as described in . By archiving the WAL data we can support + linkend="continuous-archiving"/>. By archiving the WAL data we can support reverting to any time instant covered by the available WAL data: we simply install a prior physical backup of the database, and replay the WAL log just as far as the desired time. What's more, @@ -313,7 +313,7 @@ - Asynchronous commit is an option that allows transactions + Asynchronous commit is an option that allows transactions to complete more quickly, at the cost that the most recent transactions may be lost if the database should crash. In many applications this is an acceptable trade-off. @@ -321,7 +321,7 @@ As described in the previous section, transaction commit is normally - synchronous: the server waits for the transaction's + synchronous: the server waits for the transaction's WAL records to be flushed to permanent storage before returning a success indication to the client. The client is therefore guaranteed that a transaction reported to be committed will @@ -367,30 +367,30 @@ transactions running concurrently. This allows flexible trade-offs between performance and certainty of transaction durability. The commit mode is controlled by the user-settable parameter - , which can be changed in any of + , which can be changed in any of the ways that a configuration parameter can be set. The mode used for any one transaction depends on the value of synchronous_commit when transaction commit begins. - Certain utility commands, for instance DROP TABLE, are + Certain utility commands, for instance DROP TABLE, are forced to commit synchronously regardless of the setting of synchronous_commit. This is to ensure consistency between the server's file system and the logical state of the database. The commands supporting two-phase commit, such as PREPARE - TRANSACTION, are also always synchronous. + TRANSACTION, are also always synchronous. If the database crashes during the risk window between an asynchronous commit and the writing of the transaction's WAL records, - then changes made during that transaction will be lost. + then changes made during that transaction will be lost. The duration of the risk window is limited because a background process (the WAL - writer) flushes unwritten WAL records to disk - every milliseconds. + writer) flushes unwritten WAL records to disk + every milliseconds. The actual maximum duration of the risk window is three times wal_writer_delay because the WAL writer is designed to favor writing whole pages at a time during busy periods. @@ -405,13 +405,13 @@ Asynchronous commit provides behavior different from setting - = off. + = off. fsync is a server-wide setting that will alter the behavior of all transactions. It disables - all logic within PostgreSQL that attempts to synchronize + all logic within PostgreSQL that attempts to synchronize writes to different portions of the database, and therefore a system crash (that is, a hardware or operating system crash, not a failure of - PostgreSQL itself) could result in arbitrarily bad + PostgreSQL itself) could result in arbitrarily bad corruption of the database state. In many scenarios, asynchronous commit provides most of the performance improvement that could be obtained by turning off fsync, but without the risk @@ -419,7 +419,7 @@ - also sounds very similar to + also sounds very similar to asynchronous commit, but it is actually a synchronous commit method (in fact, commit_delay is ignored during an asynchronous commit). commit_delay causes a delay @@ -437,14 +437,14 @@ <acronym>WAL</acronym> Configuration - There are several WAL-related configuration parameters that + There are several WAL-related configuration parameters that affect database performance. This section explains their use. - Consult for general information about + Consult for general information about setting server configuration parameters. - Checkpointscheckpoint + Checkpointscheckpoint are points in the sequence of transactions at which it is guaranteed that the heap and index data files have been updated with all information written before that checkpoint. At checkpoint time, all @@ -472,15 +472,15 @@ The server's checkpointer process automatically performs a checkpoint every so often. A checkpoint is begun every seconds, or if - is about to be exceeded, + linkend="guc-checkpoint-timeout"/> seconds, or if + is about to be exceeded, whichever comes first. The default settings are 5 minutes and 1 GB, respectively. If no WAL has been written since the previous checkpoint, new checkpoints - will be skipped even if checkpoint_timeout has passed. + will be skipped even if checkpoint_timeout has passed. (If WAL archiving is being used and you want to put a lower limit on how often files are archived in order to bound potential data loss, you should - adjust the parameter rather than the + adjust the parameter rather than the checkpoint parameters.) It is also possible to force a checkpoint by using the SQL command CHECKPOINT. @@ -492,7 +492,7 @@ more often. This allows faster after-crash recovery, since less work will need to be redone. However, one must balance this against the increased cost of flushing dirty data pages more often. If - is set (as is the default), there is + is set (as is the default), there is another factor to consider. To ensure data page consistency, the first modification of a data page after each checkpoint results in logging the entire page content. In that case, @@ -507,15 +507,15 @@ extra subsequent WAL traffic as discussed above. It is therefore wise to set the checkpointing parameters high enough so that checkpoints don't happen too often. As a simple sanity check on your checkpointing - parameters, you can set the + parameters, you can set the parameter. If checkpoints happen closer together than - checkpoint_warning seconds, + checkpoint_warning seconds, a message will be output to the server log recommending increasing max_wal_size. Occasional appearance of such a message is not cause for alarm, but if it appears often then the checkpoint control parameters should be increased. Bulk operations such - as large COPY transfers might cause a number of such warnings - to appear if you have not set max_wal_size high + as large COPY transfers might cause a number of such warnings + to appear if you have not set max_wal_size high enough. @@ -523,14 +523,14 @@ To avoid flooding the I/O system with a burst of page writes, writing dirty buffers during a checkpoint is spread over a period of time. That period is controlled by - , which is + , which is given as a fraction of the checkpoint interval. The I/O rate is adjusted so that the checkpoint finishes when the given fraction of checkpoint_timeout seconds have elapsed, or before max_wal_size is exceeded, whichever is sooner. With the default value of 0.5, - PostgreSQL can be expected to complete each checkpoint + PostgreSQL can be expected to complete each checkpoint in about half the time before the next checkpoint starts. On a system that's very close to maximum I/O throughput during normal operation, you might want to increase checkpoint_completion_target @@ -546,23 +546,23 @@ - On Linux and POSIX platforms + On Linux and POSIX platforms allows to force the OS that pages written by the checkpoint should be flushed to disk after a configurable number of bytes. Otherwise, these pages may be kept in the OS's page cache, inducing a stall when - fsync is issued at the end of a checkpoint. This setting will - often help to reduce transaction latency, but it also can an adverse effect - on performance; particularly for workloads that are bigger than - , but smaller than the OS's page cache. + fsync is issued at the end of a checkpoint. This setting will + often help to reduce transaction latency, but it also can have an adverse + effect on performance; particularly for workloads that are bigger than + , but smaller than the OS's page cache. - The number of WAL segment files in pg_wal directory depends on - min_wal_size, max_wal_size and + The number of WAL segment files in pg_wal directory depends on + min_wal_size, max_wal_size and the amount of WAL generated in previous checkpoint cycles. When old log segment files are no longer needed, they are removed or recycled (that is, renamed to become future segments in the numbered sequence). If, due to a - short-term peak of log output rate, max_wal_size is + short-term peak of log output rate, max_wal_size is exceeded, the unneeded segment files will be removed until the system gets back under this limit. Below that limit, the system recycles enough WAL files to cover the estimated need until the next checkpoint, and @@ -570,7 +570,7 @@ of WAL files used in previous checkpoint cycles. The moving average is increased immediately if the actual usage exceeds the estimate, so it accommodates peak usage rather than average usage to some extent. - min_wal_size puts a minimum on the amount of WAL files + min_wal_size puts a minimum on the amount of WAL files recycled for future usage; that much WAL is always recycled for future use, even if the system is idle and the WAL usage estimate suggests that little WAL is needed. @@ -578,33 +578,33 @@ Independently of max_wal_size, - + 1 most recent WAL files are + + 1 most recent WAL files are kept at all times. Also, if WAL archiving is used, old segments can not be removed or recycled until they are archived. If WAL archiving cannot keep up with the pace that WAL is generated, or if archive_command - fails repeatedly, old WAL files will accumulate in pg_wal + fails repeatedly, old WAL files will accumulate in pg_wal until the situation is resolved. A slow or failed standby server that uses a replication slot will have the same effect (see - ). + ). In archive recovery or standby mode, the server periodically performs - restartpoints,restartpoint + restartpoints,restartpoint which are similar to checkpoints in normal operation: the server forces - all its state to disk, updates the pg_control file to + all its state to disk, updates the pg_control file to indicate that the already-processed WAL data need not be scanned again, - and then recycles any old log segment files in the pg_wal + and then recycles any old log segment files in the pg_wal directory. Restartpoints can't be performed more frequently than checkpoints in the master because restartpoints can only be performed at checkpoint records. A restartpoint is triggered when a checkpoint record is reached if at - least checkpoint_timeout seconds have passed since the last + least checkpoint_timeout seconds have passed since the last restartpoint, or if WAL size is about to exceed - max_wal_size. However, because of limitations on when a - restartpoint can be performed, max_wal_size is often exceeded + max_wal_size. However, because of limitations on when a + restartpoint can be performed, max_wal_size is often exceeded during recovery, by up to one checkpoint cycle's worth of WAL. - (max_wal_size is never a hard limit anyway, so you should + (max_wal_size is never a hard limit anyway, so you should always leave plenty of headroom to avoid running out of disk space.) @@ -629,21 +629,21 @@ not occur often enough to prevent XLogInsertRecord from having to do writes. On such systems one should increase the number of WAL buffers by - modifying the parameter. When - is set and the system is very busy, - setting wal_buffers higher will help smooth response times + modifying the parameter. When + is set and the system is very busy, + setting wal_buffers higher will help smooth response times during the period immediately following each checkpoint. - The parameter defines for how many + The parameter defines for how many microseconds a group commit leader process will sleep after acquiring a lock within XLogFlush, while group commit followers queue up behind the leader. This delay allows other server processes to add their commit records to the WAL buffers so that all of them will be flushed by the leader's eventual sync operation. No sleep - will occur if is not enabled, or if fewer - than other sessions are currently + will occur if is not enabled, or if fewer + than other sessions are currently in active transactions; this avoids sleeping when it's unlikely that any other session will commit soon. Note that on some platforms, the resolution of a sleep request is ten milliseconds, so that any nonzero @@ -661,7 +661,7 @@ be chosen intelligently. The higher that cost is, the more effective commit_delay is expected to be in increasing transaction throughput, up to a point. The program can be used to measure the average time + linkend="pgtestfsync"/> program can be used to measure the average time in microseconds that a single WAL flush operation takes. A value of half of the average time the program reports it takes to flush after a single 8kB write operation is often the most effective setting for @@ -686,7 +686,7 @@ will consist only of sessions that reach the point where they need to flush their commit records during the window in which the previous flush operation (if any) is occurring. At higher client counts a - gangway effect tends to occur, so that the effects of group + gangway effect tends to occur, so that the effects of group commit become significant even when commit_delay is zero, and thus explicitly setting commit_delay tends to help less. Setting commit_delay can only help @@ -698,21 +698,21 @@ - The parameter determines how + The parameter determines how PostgreSQL will ask the kernel to force WAL updates out to disk. All the options should be the same in terms of reliability, with - the exception of fsync_writethrough, which can sometimes + the exception of fsync_writethrough, which can sometimes force a flush of the disk cache even when other options do not do so. However, it's quite platform-specific which one will be the fastest. You can test the speeds of different options using the program. + linkend="pgtestfsync"/> program. Note that this parameter is irrelevant if fsync has been turned off. - Enabling the configuration parameter + Enabling the configuration parameter (provided that PostgreSQL has been compiled with support for it) will result in each XLogInsertRecord and XLogFlush @@ -733,7 +733,7 @@ required from the administrator except ensuring that the disk-space requirements for the WAL logs are met, and that any necessary tuning is done (see ). + linkend="wal-configuration"/>). @@ -742,7 +742,7 @@ a Log Sequence Number (LSN) that is a byte offset into the logs, increasing monotonically with each new record. LSN values are returned as the datatype - pg_lsn. Values can be + pg_lsn. Values can be compared to calculate the volume of WAL data that separates them, so they are used to measure the progress of replication and recovery. @@ -752,13 +752,12 @@ WAL logs are stored in the directory pg_wal under the data directory, as a set of segment files, normally each 16 MB in size (but the size can be changed - by altering the initdb option). Each segment is + divided into pages, normally 8 kB each (this size can be changed via the + configure option). The log record headers + are described in access/xlogrecord.h; the record + content is dependent on the type of event that is being logged. Segment + files are given ever-increasing numbers as names, starting at 000000010000000000000000. The numbers do not wrap, but it will take a very, very long time to exhaust the available stock of numbers. @@ -775,14 +774,14 @@ The aim of WAL is to ensure that the log is written before database records are altered, but this can be subverted by - disk drivesdisk drive that falsely report a + disk drivesdisk drive that falsely report a successful write to the kernel, when in fact they have only cached the data and not yet stored it on the disk. A power failure in such a situation might lead to irrecoverable data corruption. Administrators should try to ensure that disks holding PostgreSQL's WAL log files do not make such false reports. - (See .) + (See .) @@ -794,7 +793,7 @@ scanning forward from the log location indicated in the checkpoint record. Because the entire content of data pages is saved in the log on the first page modification after a checkpoint (assuming - is not disabled), all pages + is not disabled), all pages changed since the checkpoint will be restored to a consistent state. diff --git a/doc/src/sgml/xaggr.sgml b/doc/src/sgml/xaggr.sgml index 79a9f288b2..4155b01ece 100644 --- a/doc/src/sgml/xaggr.sgml +++ b/doc/src/sgml/xaggr.sgml @@ -41,10 +41,10 @@ If we define an aggregate that does not use a final function, we have an aggregate that computes a running function of - the column values from each row. sum is an - example of this kind of aggregate. sum starts at + the column values from each row. sum is an + example of this kind of aggregate. sum starts at zero and always adds the current row's value to - its running total. For example, if we want to make a sum + its running total. For example, if we want to make a sum aggregate to work on a data type for complex numbers, we only need the addition function for that data type. The aggregate definition would be: @@ -69,7 +69,7 @@ SELECT sum(a) FROM test_complex; (Notice that we are relying on function overloading: there is more than - one aggregate named sum, but + one aggregate named sum, but PostgreSQL can figure out which kind of sum applies to a column of type complex.) @@ -83,17 +83,17 @@ SELECT sum(a) FROM test_complex; value is null. Ordinarily this would mean that the sfunc would need to check for a null state-value input. But for sum and some other simple aggregates like - max and min, + max and min, it is sufficient to insert the first nonnull input value into the state variable and then start applying the transition function at the second nonnull input value. PostgreSQL will do that automatically if the initial state value is null and - the transition function is marked strict (i.e., not to be called + the transition function is marked strict (i.e., not to be called for null inputs). - Another bit of default behavior for a strict transition function + Another bit of default behavior for a strict transition function is that the previous state value is retained unchanged whenever a null input value is encountered. Thus, null values are ignored. If you need some other behavior for null inputs, do not declare your @@ -102,7 +102,7 @@ SELECT sum(a) FROM test_complex; - avg (average) is a more complex example of an aggregate. + avg (average) is a more complex example of an aggregate. It requires two pieces of running state: the sum of the inputs and the count of the number of inputs. The final result is obtained by dividing @@ -124,16 +124,16 @@ CREATE AGGREGATE avg (float8) - float8_accum requires a three-element array, not just + float8_accum requires a three-element array, not just two elements, because it accumulates the sum of squares as well as the sum and count of the inputs. This is so that it can be used for - some other aggregates as well as avg. + some other aggregates as well as avg. - Aggregate function calls in SQL allow DISTINCT - and ORDER BY options that control which rows are fed + Aggregate function calls in SQL allow DISTINCT + and ORDER BY options that control which rows are fed to the aggregate's transition function and in what order. These options are implemented behind the scenes and are not the concern of the aggregate's support functions. @@ -141,7 +141,7 @@ CREATE AGGREGATE avg (float8) For further details see the - + command. @@ -159,16 +159,16 @@ CREATE AGGREGATE avg (float8) Aggregate functions can optionally support moving-aggregate - mode, which allows substantially faster execution of aggregate + mode, which allows substantially faster execution of aggregate functions within windows with moving frame starting points. - (See - and for information about use of + (See + and for information about use of aggregate functions as window functions.) - The basic idea is that in addition to a normal forward + The basic idea is that in addition to a normal forward transition function, the aggregate provides an inverse - transition function, which allows rows to be removed from the + transition function, which allows rows to be removed from the aggregate's running state value when they exit the window frame. - For example a sum aggregate, which uses addition as the + For example a sum aggregate, which uses addition as the forward transition function, would use subtraction as the inverse transition function. Without an inverse transition function, the window function mechanism must recalculate the aggregate from scratch each time @@ -193,7 +193,7 @@ CREATE AGGREGATE avg (float8) - As an example, we could extend the sum aggregate given above + As an example, we could extend the sum aggregate given above to support moving-aggregate mode like this: @@ -209,10 +209,10 @@ CREATE AGGREGATE sum (complex) ); - The parameters whose names begin with m define the + The parameters whose names begin with m define the moving-aggregate implementation. Except for the inverse transition - function minvfunc, they correspond to the plain-aggregate - parameters without m. + function minvfunc, they correspond to the plain-aggregate + parameters without m. @@ -224,10 +224,10 @@ CREATE AGGREGATE sum (complex) current frame starting position. This convention allows moving-aggregate mode to be used in situations where there are some infrequent cases that are impractical to reverse out of the running state value. The inverse - transition function can punt on these cases, and yet still come + transition function can punt on these cases, and yet still come out ahead so long as it can work for most cases. As an example, an aggregate working with floating-point numbers might choose to punt when - a NaN (not a number) input has to be removed from the running + a NaN (not a number) input has to be removed from the running state value. @@ -238,8 +238,8 @@ CREATE AGGREGATE sum (complex) in results depending on whether the moving-aggregate mode is used. An example of an aggregate for which adding an inverse transition function seems easy at first, yet where this requirement cannot be met - is sum over float4 or float8 inputs. A - naive declaration of sum(float8) could be + is sum over float4 or float8 inputs. A + naive declaration of sum(float8) could be CREATE AGGREGATE unsafe_sum (float8) @@ -262,13 +262,13 @@ FROM (VALUES (1, 1.0e20::float8), (2, 1.0::float8)) AS v (n,x); - This query returns 0 as its second result, rather than the - expected answer of 1. The cause is the limited precision of - floating-point values: adding 1 to 1e20 results - in 1e20 again, and so subtracting 1e20 from that - yields 0, not 1. Note that this is a limitation + This query returns 0 as its second result, rather than the + expected answer of 1. The cause is the limited precision of + floating-point values: adding 1 to 1e20 results + in 1e20 again, and so subtracting 1e20 from that + yields 0, not 1. Note that this is a limitation of floating-point arithmetic in general, not a limitation - of PostgreSQL. + of PostgreSQL. @@ -290,7 +290,7 @@ FROM (VALUES (1, 1.0e20::float8), Aggregate functions can use polymorphic state transition functions or final functions, so that the same functions can be used to implement multiple aggregates. - See + See for an explanation of polymorphic functions. Going a step further, the aggregate function itself can be specified with polymorphic input type(s) and state type, allowing a single @@ -309,7 +309,7 @@ CREATE AGGREGATE array_accum (anyelement) Here, the actual state type for any given aggregate call is the array type having the actual input type as elements. The behavior of the aggregate is to concatenate all the inputs into an array of that type. - (Note: the built-in aggregate array_agg provides similar + (Note: the built-in aggregate array_agg provides similar functionality, with better performance than this definition would have.) @@ -344,19 +344,19 @@ SELECT attrelid::regclass, array_accum(atttypid::regtype) polymorphic state type, as in the above example. This is necessary because otherwise the final function cannot be declared sensibly: it would need to have a polymorphic result type but no polymorphic argument - type, which CREATE FUNCTION will reject on the grounds that + type, which CREATE FUNCTION will reject on the grounds that the result type cannot be deduced from a call. But sometimes it is inconvenient to use a polymorphic state type. The most common case is where the aggregate support functions are to be written in C and the - state type should be declared as internal because there is + state type should be declared as internal because there is no SQL-level equivalent for it. To address this case, it is possible to - declare the final function as taking extra dummy arguments + declare the final function as taking extra dummy arguments that match the input arguments of the aggregate. Such dummy arguments are always passed as null values since no specific value is available when the final function is called. Their only use is to allow a polymorphic final function's result type to be connected to the aggregate's input type(s). For example, the definition of the built-in - aggregate array_agg is equivalent to + aggregate array_agg is equivalent to CREATE FUNCTION array_agg_transfn(internal, anynonarray) @@ -373,30 +373,30 @@ CREATE AGGREGATE array_agg (anynonarray) ); - Here, the finalfunc_extra option specifies that the final + Here, the finalfunc_extra option specifies that the final function receives, in addition to the state value, extra dummy argument(s) corresponding to the aggregate's input argument(s). - The extra anynonarray argument allows the declaration - of array_agg_finalfn to be valid. + The extra anynonarray argument allows the declaration + of array_agg_finalfn to be valid. An aggregate function can be made to accept a varying number of arguments - by declaring its last argument as a VARIADIC array, in much + by declaring its last argument as a VARIADIC array, in much the same fashion as for regular functions; see - . The aggregate's transition + . The aggregate's transition function(s) must have the same array type as their last argument. The - transition function(s) typically would also be marked VARIADIC, + transition function(s) typically would also be marked VARIADIC, but this is not strictly required. Variadic aggregates are easily misused in connection with - the ORDER BY option (see ), + the ORDER BY option (see ), since the parser cannot tell whether the wrong number of actual arguments have been given in such a combination. Keep in mind that everything to - the right of ORDER BY is a sort key, not an argument to the + the right of ORDER BY is a sort key, not an argument to the aggregate. For example, in SELECT myaggregate(a ORDER BY a, b, c) FROM ... @@ -406,7 +406,7 @@ SELECT myaggregate(a ORDER BY a, b, c) FROM ... SELECT myaggregate(a, b, c ORDER BY a) FROM ... - If myaggregate is variadic, both these calls could be + If myaggregate is variadic, both these calls could be perfectly valid. @@ -427,19 +427,19 @@ SELECT myaggregate(a, b, c ORDER BY a) FROM ... - The aggregates we have been describing so far are normal - aggregates. PostgreSQL also - supports ordered-set aggregates, which differ from + The aggregates we have been describing so far are normal + aggregates. PostgreSQL also + supports ordered-set aggregates, which differ from normal aggregates in two key ways. First, in addition to ordinary aggregated arguments that are evaluated once per input row, an - ordered-set aggregate can have direct arguments that are + ordered-set aggregate can have direct arguments that are evaluated only once per aggregation operation. Second, the syntax for the ordinary aggregated arguments specifies a sort ordering for them explicitly. An ordered-set aggregate is usually used to implement a computation that depends on a specific row ordering, for instance rank or percentile, so that the sort ordering is a required aspect of any call. For example, the built-in - definition of percentile_disc is equivalent to: + definition of percentile_disc is equivalent to: CREATE FUNCTION ordered_set_transition(internal, anyelement) @@ -456,7 +456,7 @@ CREATE AGGREGATE percentile_disc (float8 ORDER BY anyelement) ); - This aggregate takes a float8 direct argument (the percentile + This aggregate takes a float8 direct argument (the percentile fraction) and an aggregated input that can be of any sortable data type. It could be used to obtain a median household income like this: @@ -467,26 +467,33 @@ SELECT percentile_disc(0.5) WITHIN GROUP (ORDER BY income) FROM households; 50489 - Here, 0.5 is a direct argument; it would make no sense + Here, 0.5 is a direct argument; it would make no sense for the percentile fraction to be a value varying across rows. Unlike the case for normal aggregates, the sorting of input rows for - an ordered-set aggregate is not done behind the scenes, + an ordered-set aggregate is not done behind the scenes, but is the responsibility of the aggregate's support functions. The typical implementation approach is to keep a reference to - a tuplesort object in the aggregate's state value, feed the + a tuplesort object in the aggregate's state value, feed the incoming rows into that object, and then complete the sorting and read out the data in the final function. This design allows the final function to perform special operations such as injecting - additional hypothetical rows into the data to be sorted. + additional hypothetical rows into the data to be sorted. While normal aggregates can often be implemented with support functions written in PL/pgSQL or another PL language, ordered-set aggregates generally have to be written in C, since their state values aren't definable as any SQL data type. (In the above example, notice that the state value is declared as - type internal — this is typical.) + type internal — this is typical.) + Also, because the final function performs the sort, it is not possible + to continue adding input rows by executing the transition function again + later. This means the final function is not READ_ONLY; + it must be declared in + as READ_WRITE, or as SHAREABLE if + it's possible for additional final-function calls to make use of the + already-sorted state. @@ -496,9 +503,9 @@ SELECT percentile_disc(0.5) WITHIN GROUP (ORDER BY income) FROM households; same definition as for normal aggregates, but note that the direct arguments (if any) are not provided. The final function receives the last state value, the values of the direct arguments if any, - and (if finalfunc_extra is specified) null values + and (if finalfunc_extra is specified) null values corresponding to the aggregated input(s). As with normal - aggregates, finalfunc_extra is only really useful if the + aggregates, finalfunc_extra is only really useful if the aggregate is polymorphic; then the extra dummy argument(s) are needed to connect the final function's result type to the aggregate's input type(s). @@ -521,7 +528,7 @@ SELECT percentile_disc(0.5) WITHIN GROUP (ORDER BY income) FROM households; Optionally, an aggregate function can support partial - aggregation. The idea of partial aggregation is to run the aggregate's + aggregation. The idea of partial aggregation is to run the aggregate's state transition function over different subsets of the input data independently, and then to combine the state values resulting from those subsets to produce the same state value that would have resulted from @@ -536,7 +543,7 @@ SELECT percentile_disc(0.5) WITHIN GROUP (ORDER BY income) FROM households; To support partial aggregation, the aggregate definition must provide - a combine function, which takes two values of the + a combine function, which takes two values of the aggregate's state type (representing the results of aggregating over two subsets of the input rows) and produces a new value of the state type, representing what the state would have been after aggregating over the @@ -547,10 +554,10 @@ SELECT percentile_disc(0.5) WITHIN GROUP (ORDER BY income) FROM households; - As simple examples, MAX and MIN aggregates can be + As simple examples, MAX and MIN aggregates can be made to support partial aggregation by specifying the combine function as the same greater-of-two or lesser-of-two comparison function that is used - as their transition function. SUM aggregates just need an + as their transition function. SUM aggregates just need an addition function as combine function. (Again, this is the same as their transition function, unless the state value is wider than the input data type.) @@ -561,26 +568,26 @@ SELECT percentile_disc(0.5) WITHIN GROUP (ORDER BY income) FROM households; happens to take a value of the state type, not of the underlying input type, as its second argument. In particular, the rules for dealing with null values and strict functions are similar. Also, if the aggregate - definition specifies a non-null initcond, keep in mind that + definition specifies a non-null initcond, keep in mind that that will be used not only as the initial state for each partial aggregation run, but also as the initial state for the combine function, which will be called to combine each partial result into that state. - If the aggregate's state type is declared as internal, it is + If the aggregate's state type is declared as internal, it is the combine function's responsibility that its result is allocated in the correct memory context for aggregate state values. This means in - particular that when the first input is NULL it's invalid + particular that when the first input is NULL it's invalid to simply return the second input, as that value will be in the wrong context and will not have sufficient lifespan. - When the aggregate's state type is declared as internal, it is + When the aggregate's state type is declared as internal, it is usually also appropriate for the aggregate definition to provide a - serialization function and a deserialization - function, which allow such a state value to be copied from one process + serialization function and a deserialization + function, which allow such a state value to be copied from one process to another. Without these functions, parallel aggregation cannot be performed, and future applications such as local/remote aggregation will probably not work either. @@ -588,11 +595,11 @@ SELECT percentile_disc(0.5) WITHIN GROUP (ORDER BY income) FROM households; A serialization function must take a single argument of - type internal and return a result of type bytea, which + type internal and return a result of type bytea, which represents the state value packaged up into a flat blob of bytes. Conversely, a deserialization function reverses that conversion. It must - take two arguments of types bytea and internal, and - return a result of type internal. (The second argument is unused + take two arguments of types bytea and internal, and + return a result of type internal. (The second argument is unused and is always zero, but it is required for type-safety reasons.) The result of the deserialization function should simply be allocated in the current memory context, as unlike the combine function's result, it is not @@ -601,7 +608,7 @@ SELECT percentile_disc(0.5) WITHIN GROUP (ORDER BY income) FROM households; Worth noting also is that for an aggregate to be executed in parallel, - the aggregate itself must be marked PARALLEL SAFE. The + the aggregate itself must be marked PARALLEL SAFE. The parallel-safety markings on its support functions are not consulted. @@ -618,31 +625,30 @@ SELECT percentile_disc(0.5) WITHIN GROUP (ORDER BY income) FROM households; A function written in C can detect that it is being called as an aggregate support function by calling - AggCheckCallContext, for example: + AggCheckCallContext, for example: if (AggCheckCallContext(fcinfo, NULL)) - One reason for checking this is that when it is true for a transition - function, the first input + One reason for checking this is that when it is true, the first input must be a temporary state value and can therefore safely be modified in-place rather than allocating a new copy. - See int8inc() for an example. - (This is the only - case where it is safe for a function to modify a pass-by-reference input. - In particular, final functions for normal aggregates must not - modify their inputs in any case, because in some cases they will be - re-executed on the same final state value.) + See int8inc() for an example. + (While aggregate transition functions are always allowed to modify + the transition value in-place, aggregate final functions are generally + discouraged from doing so; if they do so, the behavior must be declared + when creating the aggregate. See + for more detail.) - The second argument of AggCheckCallContext can be used to + The second argument of AggCheckCallContext can be used to retrieve the memory context in which aggregate state values are being kept. - This is useful for transition functions that wish to use expanded - objects (see ) as their state values. + This is useful for transition functions that wish to use expanded + objects (see ) as their state values. On first call, the transition function should return an expanded object whose memory context is a child of the aggregate state context, and then keep returning the same expanded object on subsequent calls. See - array_append() for an example. (array_append() + array_append() for an example. (array_append() is not the transition function of any built-in aggregate, but it is written to behave efficiently when used as transition function of a custom aggregate.) @@ -650,12 +656,12 @@ if (AggCheckCallContext(fcinfo, NULL)) Another support routine available to aggregate functions written in C - is AggGetAggref, which returns the Aggref + is AggGetAggref, which returns the Aggref parse node that defines the aggregate call. This is mainly useful for ordered-set aggregates, which can inspect the substructure of - the Aggref node to find out what sort ordering they are + the Aggref node to find out what sort ordering they are supposed to implement. Examples can be found - in orderedsetaggs.c in the PostgreSQL + in orderedsetaggs.c in the PostgreSQL source code. diff --git a/doc/src/sgml/xfunc.sgml b/doc/src/sgml/xfunc.sgml index cd6dd840ba..e18272c33a 100644 --- a/doc/src/sgml/xfunc.sgml +++ b/doc/src/sgml/xfunc.sgml @@ -16,24 +16,24 @@ query language functions (functions written in - SQL) () + SQL) () procedural language functions (functions written in, for - example, PL/pgSQL or PL/Tcl) - () + example, PL/pgSQL or PL/Tcl) + () - internal functions () + internal functions () - C-language functions () + C-language functions () @@ -63,15 +63,48 @@ Throughout this chapter, it can be useful to look at the reference - page of the command to + page of the command to understand the examples better. Some examples from this chapter can be found in funcs.sql and - funcs.c in the src/tutorial + funcs.c in the src/tutorial directory in the PostgreSQL source distribution. + + User-defined Procedures + + + procedure + user-defined + + + + A procedure is a database object similar to a function. The difference is + that a procedure does not return a value, so there is no return type + declaration. While a function is called as part of a query or DML + command, a procedure is called explicitly using + the statement. + + + + The explanations on how to define user-defined functions in the rest of + this chapter apply to procedures as well, except that + the command is used instead, there is + no return type, and some other features such as strictness don't apply. + + + + Collectively, functions and procedures are also known + as routinesroutine. + There are commands such as + and that can operate on functions and + procedures without having to know which kind it is. Note, however, that + there is no CREATE ROUTINE command. + + + Query Language (<acronym>SQL</acronym>) Functions @@ -87,7 +120,7 @@ In the simple (non-set) case, the first row of the last query's result will be returned. (Bear in mind that the first row of a multirow - result is not well-defined unless you use ORDER BY.) + result is not well-defined unless you use ORDER BY.) If the last query happens to return no rows at all, the null value will be returned. @@ -95,8 +128,8 @@ Alternatively, an SQL function can be declared to return a set (that is, multiple rows) by specifying the function's return type as SETOF - sometype, or equivalently by declaring it as - RETURNS TABLE(columns). In this case + sometype
, or equivalently by declaring it as + RETURNS TABLE(columns). In this case all rows of the last query's result are returned. Further details appear below. @@ -105,9 +138,9 @@ The body of an SQL function must be a list of SQL statements separated by semicolons. A semicolon after the last statement is optional. Unless the function is declared to return - void, the last statement must be a SELECT, - or an INSERT, UPDATE, or DELETE - that has a RETURNING clause. + void, the last statement must be a SELECT, + or an INSERT, UPDATE, or DELETE + that has a RETURNING clause. @@ -117,16 +150,16 @@ modification queries (INSERT, UPDATE, and DELETE), as well as other SQL commands. (You cannot use transaction control commands, e.g. - COMMIT, SAVEPOINT, and some utility - commands, e.g. VACUUM, in SQL functions.) + COMMIT, SAVEPOINT, and some utility + commands, e.g. VACUUM, in SQL functions.) However, the final command - must be a SELECT or have a RETURNING + must be a SELECT or have a RETURNING clause that returns whatever is specified as the function's return type. Alternatively, if you want to define a SQL function that performs actions but has no - useful value to return, you can define it as returning void. + useful value to return, you can define it as returning void. For example, this function removes rows with negative salaries from - the emp table: + the emp table: CREATE FUNCTION clean_emp() RETURNS void AS ' @@ -147,13 +180,13 @@ SELECT clean_emp(); The entire body of a SQL function is parsed before any of it is executed. While a SQL function can contain commands that alter - the system catalogs (e.g., CREATE TABLE), the effects + the system catalogs (e.g., CREATE TABLE), the effects of such commands will not be visible during parse analysis of later commands in the function. Thus, for example, CREATE TABLE foo (...); INSERT INTO foo VALUES(...); will not work as desired if packaged up into a single SQL function, - since foo won't exist yet when the INSERT - command is parsed. It's recommended to use PL/pgSQL + since foo won't exist yet when the INSERT + command is parsed. It's recommended to use PL/pgSQL instead of a SQL function in this type of situation.
@@ -162,11 +195,11 @@ SELECT clean_emp(); The syntax of the CREATE FUNCTION command requires the function body to be written as a string constant. It is usually most convenient to use dollar quoting (see ) for the string constant. + linkend="sql-syntax-dollar-quoting"/>) for the string constant. If you choose to use regular single-quoted string constant syntax, - you must double single quote marks (') and backslashes - (\) (assuming escape string syntax) in the body of - the function (see ). + you must double single quote marks (') and backslashes + (\) (assuming escape string syntax) in the body of + the function (see ). @@ -189,7 +222,7 @@ SELECT clean_emp(); is the same as any column name in the current SQL command within the function, the column name will take precedence. To override this, qualify the argument name with the name of the function itself, that is - function_name.argument_name. + function_name.argument_name. (If this would conflict with a qualified column name, again the column name wins. You can avoid the ambiguity by choosing a different alias for the table within the SQL command.) @@ -197,15 +230,15 @@ SELECT clean_emp(); In the older numeric approach, arguments are referenced using the syntax - $n: $1 refers to the first input - argument, $2 to the second, and so on. This will work + $n: $1 refers to the first input + argument, $2 to the second, and so on. This will work whether or not the particular argument was declared with a name. If an argument is of a composite type, then the dot notation, - e.g., argname.fieldname or - $1.fieldname, can be used to access attributes of the + e.g., argname.fieldname or + $1.fieldname, can be used to access attributes of the argument. Again, you might need to qualify the argument's name with the function name to make the form with an argument name unambiguous. @@ -226,7 +259,7 @@ INSERT INTO $1 VALUES (42); The ability to use names to reference SQL function arguments was added in PostgreSQL 9.2. Functions to be used in - older servers must use the $n notation. + older servers must use the $n notation. @@ -258,9 +291,9 @@ SELECT one(); Notice that we defined a column alias within the function body for the result of the function - (with the name result), but this column alias is not visible - outside the function. Hence, the result is labeled one - instead of result. + (with the name result), but this column alias is not visible + outside the function. Hence, the result is labeled one + instead of result. @@ -319,11 +352,11 @@ SELECT tf1(17, 100.0); - In this example, we chose the name accountno for the first + In this example, we chose the name accountno for the first argument, but this is the same as the name of a column in the - bank table. Within the UPDATE command, - accountno refers to the column bank.accountno, - so tf1.accountno must be used to refer to the argument. + bank table. Within the UPDATE command, + accountno refers to the column bank.accountno, + so tf1.accountno must be used to refer to the argument. We could of course avoid this by using a different name for the argument. @@ -342,7 +375,7 @@ $$ LANGUAGE SQL; which adjusts the balance and returns the new balance. - The same thing could be done in one command using RETURNING: + The same thing could be done in one command using RETURNING: CREATE FUNCTION tf1 (accountno integer, debit numeric) RETURNS numeric AS $$ @@ -351,6 +384,31 @@ CREATE FUNCTION tf1 (accountno integer, debit numeric) RETURNS numeric AS $$ WHERE accountno = tf1.accountno RETURNING balance; $$ LANGUAGE SQL; + + + + + A SQL function must return exactly its declared + result type. This may require inserting an explicit cast. + For example, suppose we wanted the + previous add_em function to return + type float8 instead. This won't work: + + +CREATE FUNCTION add_em(integer, integer) RETURNS float8 AS $$ + SELECT $1 + $2; +$$ LANGUAGE SQL; + + + even though in other contexts PostgreSQL + would be willing to insert an implicit cast to + convert integer to float8. + We need to write it as + + +CREATE FUNCTION add_em(integer, integer) RETURNS float8 AS $$ + SELECT ($1 + $2)::float8; +$$ LANGUAGE SQL; @@ -394,8 +452,8 @@ SELECT name, double_salary(emp.*) AS dream Notice the use of the syntax $1.salary to select one field of the argument row value. Also notice - how the calling SELECT command - uses table_name.* to select + how the calling SELECT command + uses table_name.* to select the entire current row of a table as a composite value. The table row can alternatively be referenced using just the table name, like this: @@ -405,13 +463,13 @@ SELECT name, double_salary(emp) AS dream WHERE emp.cubicle ~= point '(2,1)'; but this usage is deprecated since it's easy to get confused. - (See for details about these + (See for details about these two notations for the composite value of a table row.) Sometimes it is handy to construct a composite argument value - on-the-fly. This can be done with the ROW construct. + on-the-fly. This can be done with the ROW construct. For example, we could adjust the data being passed to the function: SELECT name, double_salary(ROW(name, salary*1.1, age, cubicle)) AS dream @@ -452,13 +510,16 @@ $$ LANGUAGE SQL; - You must typecast the expressions to match the - definition of the composite type, or you will get errors like this: + We must ensure each expression's type matches the corresponding + column of the composite type, inserting a cast if necessary. + Otherwise we'll get errors like this: ERROR: function declared to return emp returns varchar instead of text at column 1 + As with the base-type case, the function will not insert any casts + automatically. @@ -473,11 +534,16 @@ CREATE FUNCTION new_emp() RETURNS emp AS $$ $$ LANGUAGE SQL; - Here we wrote a SELECT that returns just a single + Here we wrote a SELECT that returns just a single column of the correct composite type. This isn't really better in this situation, but it is a handy alternative in some cases — for example, if we need to compute the result by calling another function that returns the desired composite value. + Another example is that if we are trying to write a function that + returns a domain over composite, rather than a plain composite type, + it is always necessary to write it as returning a single column, + since there is no other way to produce a value that is exactly of + the domain type. @@ -503,7 +569,7 @@ SELECT * FROM new_emp(); The second way is described more fully in . + linkend="xfunc-sql-table-functions"/>. @@ -541,7 +607,7 @@ SELECT name(new_emp()); None - As explained in , the field notation and + As explained in , the field notation and functional notation are equivalent. @@ -564,7 +630,7 @@ SELECT getname(new_emp()); - <acronym>SQL</> Functions with Output Parameters + <acronym>SQL</acronym> Functions with Output Parameters function @@ -573,7 +639,7 @@ SELECT getname(new_emp()); An alternative way of describing a function's results is to define it - with output parameters, as in this example: + with output parameters, as in this example: CREATE FUNCTION add_em (IN x int, IN y int, OUT sum int) @@ -587,8 +653,8 @@ SELECT add_em(3,7); (1 row) - This is not essentially different from the version of add_em - shown in . The real value of + This is not essentially different from the version of add_em + shown in . The real value of output parameters is that they provide a convenient way of defining functions that return several columns. For example, @@ -639,18 +705,18 @@ DROP FUNCTION sum_n_product (int, int); - Parameters can be marked as IN (the default), - OUT, INOUT, or VARIADIC. - An INOUT + Parameters can be marked as IN (the default), + OUT, INOUT, or VARIADIC. + An INOUT parameter serves as both an input parameter (part of the calling argument list) and an output parameter (part of the result record type). - VARIADIC parameters are input parameters, but are treated + VARIADIC parameters are input parameters, but are treated specially as described next. - <acronym>SQL</> Functions with Variable Numbers of Arguments + <acronym>SQL</acronym> Functions with Variable Numbers of Arguments function @@ -663,10 +729,10 @@ DROP FUNCTION sum_n_product (int, int); SQL functions can be declared to accept - variable numbers of arguments, so long as all the optional + variable numbers of arguments, so long as all the optional arguments are of the same data type. The optional arguments will be passed to the function as an array. The function is declared by - marking the last parameter as VARIADIC; this parameter + marking the last parameter as VARIADIC; this parameter must be declared as being of an array type. For example: @@ -682,7 +748,7 @@ SELECT mleast(10, -1, 5, 4.4); Effectively, all the actual arguments at or beyond the - VARIADIC position are gathered up into a one-dimensional + VARIADIC position are gathered up into a one-dimensional array, as if you had written @@ -691,15 +757,18 @@ SELECT mleast(ARRAY[10, -1, 5, 4.4]); -- doesn't work You can't actually write that, though — or at least, it will not match this function definition. A parameter marked - VARIADIC matches one or more occurrences of its element + VARIADIC matches one or more occurrences of its element type, not of its own type. Sometimes it is useful to be able to pass an already-constructed array to a variadic function; this is particularly handy when one variadic - function wants to pass on its array parameter to another one. You can - do that by specifying VARIADIC in the call: + function wants to pass on its array parameter to another one. Also, + this is the only secure way to call a variadic function found in a schema + that permits untrusted users to create objects; see + . You can do this by + specifying VARIADIC in the call: SELECT mleast(VARIADIC ARRAY[10, -1, 5, 4.4]); @@ -707,21 +776,21 @@ SELECT mleast(VARIADIC ARRAY[10, -1, 5, 4.4]); This prevents expansion of the function's variadic parameter into its element type, thereby allowing the array argument value to match - normally. VARIADIC can only be attached to the last + normally. VARIADIC can only be attached to the last actual argument of a function call. - Specifying VARIADIC in the call is also the only way to + Specifying VARIADIC in the call is also the only way to pass an empty array to a variadic function, for example: SELECT mleast(VARIADIC ARRAY[]::numeric[]); - Simply writing SELECT mleast() does not work because a + Simply writing SELECT mleast() does not work because a variadic parameter must match at least one actual argument. - (You could define a second function also named mleast, + (You could define a second function also named mleast, with no parameters, if you wanted to allow such calls.) @@ -729,8 +798,8 @@ SELECT mleast(VARIADIC ARRAY[]::numeric[]); The array element parameters generated from a variadic parameter are treated as not having any names of their own. This means it is not possible to call a variadic function using named arguments (), except when you specify - VARIADIC. For example, this will work: + linkend="sql-syntax-calling-funcs"/>), except when you specify + VARIADIC. For example, this will work: SELECT mleast(VARIADIC arr => ARRAY[10, -1, 5, 4.4]); @@ -746,7 +815,7 @@ SELECT mleast(arr => ARRAY[10, -1, 5, 4.4]); - <acronym>SQL</> Functions with Default Values for Arguments + <acronym>SQL</acronym> Functions with Default Values for Arguments function @@ -761,7 +830,10 @@ SELECT mleast(arr => ARRAY[10, -1, 5, 4.4]); parameters after a parameter with a default value have to have default values as well. (Although the use of named argument notation could allow this restriction to be relaxed, it's still enforced so that - positional argument notation works sensibly.) + positional argument notation works sensibly.) Whether or not you use it, + this capability creates a need for precautions when calling functions in + databases where some users mistrust other users; see + . @@ -804,7 +876,7 @@ ERROR: function foo() does not exist <acronym>SQL</acronym> Functions as Table Sources - All SQL functions can be used in the FROM clause of a query, + All SQL functions can be used in the FROM clause of a query, but it is particularly useful for functions returning composite types. If the function is defined to return a base type, the table function produces a one-column table. If the function is defined to return @@ -839,7 +911,7 @@ SELECT *, upper(fooname) FROM getfoo(1) AS t1; Note that we only got one row out of the function. This is because - we did not use SETOF. That is described in the next section. + we did not use SETOF. That is described in the next section. @@ -853,16 +925,16 @@ SELECT *, upper(fooname) FROM getfoo(1) AS t1; When an SQL function is declared as returning SETOF - sometype, the function's final + sometype, the function's final query is executed to completion, and each row it outputs is returned as an element of the result set. - This feature is normally used when calling the function in the FROM + This feature is normally used when calling the function in the FROM clause. In this case each row returned by the function becomes a row of the table seen by the query. For example, assume that - table foo has the same contents as above, and we say: + table foo has the same contents as above, and we say: CREATE FUNCTION getfoo(int) RETURNS SETOF foo AS $$ @@ -906,18 +978,18 @@ SELECT * FROM sum_n_product_with_tab(10); (4 rows) - The key point here is that you must write RETURNS SETOF record + The key point here is that you must write RETURNS SETOF record to indicate that the function returns multiple rows instead of just one. If there is only one output parameter, write that parameter's type - instead of record. + instead of record. It is frequently useful to construct a query's result by invoking a set-returning function multiple times, with the parameters for each invocation coming from successive rows of a table or subquery. The - preferred way to do this is to use the LATERAL key word, - which is described in . + preferred way to do this is to use the LATERAL key word, + which is described in . Here is an example using a set-returning function to enumerate elements of a tree structure: @@ -990,17 +1062,17 @@ SELECT name, listchildren(name) FROM nodes; In the last SELECT, - notice that no output row appears for Child2, Child3, etc. + notice that no output row appears for Child2, Child3, etc. This happens because listchildren returns an empty set for those arguments, so no result rows are generated. This is the same behavior as we got from an inner join to the function result when using - the LATERAL syntax. + the LATERAL syntax. - PostgreSQL's behavior for a set-returning function in a + PostgreSQL's behavior for a set-returning function in a query's select list is almost exactly the same as if the set-returning - function had been written in a LATERAL FROM-clause item + function had been written in a LATERAL FROM-clause item instead. For example, SELECT x, generate_series(1,5) AS g FROM tab; @@ -1010,20 +1082,20 @@ SELECT x, generate_series(1,5) AS g FROM tab; SELECT x, g FROM tab, LATERAL generate_series(1,5) AS g; It would be exactly the same, except that in this specific example, - the planner could choose to put g on the outside of the - nestloop join, since g has no actual lateral dependency - on tab. That would result in a different output row + the planner could choose to put g on the outside of the + nestloop join, since g has no actual lateral dependency + on tab. That would result in a different output row order. Set-returning functions in the select list are always evaluated as though they are on the inside of a nestloop join with the rest of - the FROM clause, so that the function(s) are run to - completion before the next row from the FROM clause is + the FROM clause, so that the function(s) are run to + completion before the next row from the FROM clause is considered. If there is more than one set-returning function in the query's select list, the behavior is similar to what you get from putting the functions - into a single LATERAL ROWS FROM( ... ) FROM-clause + into a single LATERAL ROWS FROM( ... ) FROM-clause item. For each row from the underlying query, there is an output row using the first result from each function, then an output row using the second result, and so on. If some of the set-returning functions @@ -1031,48 +1103,48 @@ SELECT x, g FROM tab, LATERAL generate_series(1,5) AS g; missing data, so that the total number of rows emitted for one underlying row is the same as for the set-returning function that produced the most outputs. Thus the set-returning functions - run in lockstep until they are all exhausted, and then + run in lockstep until they are all exhausted, and then execution continues with the next underlying row. Set-returning functions can be nested in a select list, although that is - not allowed in FROM-clause items. In such cases, each level + not allowed in FROM-clause items. In such cases, each level of nesting is treated separately, as though it were - a separate LATERAL ROWS FROM( ... ) item. For example, in + a separate LATERAL ROWS FROM( ... ) item. For example, in SELECT srf1(srf2(x), srf3(y)), srf4(srf5(z)) FROM tab; - the set-returning functions srf2, srf3, - and srf5 would be run in lockstep for each row - of tab, and then srf1 and srf4 + the set-returning functions srf2, srf3, + and srf5 would be run in lockstep for each row + of tab, and then srf1 and srf4 would be applied in lockstep to each row produced by the lower functions. Set-returning functions cannot be used within conditional-evaluation - constructs, such as CASE or COALESCE. For + constructs, such as CASE or COALESCE. For example, consider SELECT x, CASE WHEN x > 0 THEN generate_series(1, 5) ELSE 0 END FROM tab; It might seem that this should produce five repetitions of input rows - that have x > 0, and a single repetition of those that do - not; but actually, because generate_series(1, 5) would be - run in an implicit LATERAL FROM item before - the CASE expression is ever evaluated, it would produce five + that have x > 0, and a single repetition of those that do + not; but actually, because generate_series(1, 5) would be + run in an implicit LATERAL FROM item before + the CASE expression is ever evaluated, it would produce five repetitions of every input row. To reduce confusion, such cases produce a parse-time error instead. - If a function's last command is INSERT, UPDATE, - or DELETE with RETURNING, that command will + If a function's last command is INSERT, UPDATE, + or DELETE with RETURNING, that command will always be executed to completion, even if the function is not declared - with SETOF or the calling query does not fetch all the - result rows. Any extra rows produced by the RETURNING + with SETOF or the calling query does not fetch all the + result rows. Any extra rows produced by the RETURNING clause are silently dropped, but the commanded table modifications still happen (and are all completed before returning from the function). @@ -1080,7 +1152,7 @@ SELECT x, CASE WHEN x > 0 THEN generate_series(1, 5) ELSE 0 END FROM tab; - Before PostgreSQL 10, putting more than one + Before PostgreSQL 10, putting more than one set-returning function in the same select list did not behave very sensibly unless they always produced equal numbers of rows. Otherwise, what you got was a number of output rows equal to the least common @@ -1089,10 +1161,10 @@ SELECT x, CASE WHEN x > 0 THEN generate_series(1, 5) ELSE 0 END FROM tab; described above; instead, a set-returning function could have at most one set-returning argument, and each nest of set-returning functions was run independently. Also, conditional execution (set-returning - functions inside CASE etc) was previously allowed, + functions inside CASE etc) was previously allowed, complicating things even more. - Use of the LATERAL syntax is recommended when writing - queries that need to work in older PostgreSQL versions, + Use of the LATERAL syntax is recommended when writing + queries that need to work in older PostgreSQL versions, because that will give consistent results across different versions. If you have a query that is relying on conditional execution of a set-returning function, you may be able to fix it by moving the @@ -1115,13 +1187,13 @@ END$$ LANGUAGE plpgsql; SELECT x, case_generate_series(y > 0, 1, z, 5) FROM tab; This formulation will work the same in all versions - of PostgreSQL. + of PostgreSQL. - <acronym>SQL</acronym> Functions Returning <literal>TABLE</> + <acronym>SQL</acronym> Functions Returning <literal>TABLE</literal> function @@ -1131,12 +1203,12 @@ SELECT x, case_generate_series(y > 0, 1, z, 5) FROM tab; There is another way to declare a function as returning a set, which is to use the syntax - RETURNS TABLE(columns). - This is equivalent to using one or more OUT parameters plus - marking the function as returning SETOF record (or - SETOF a single output parameter's type, as appropriate). + RETURNS TABLE(columns). + This is equivalent to using one or more OUT parameters plus + marking the function as returning SETOF record (or + SETOF a single output parameter's type, as appropriate). This notation is specified in recent versions of the SQL standard, and - thus may be more portable than using SETOF. + thus may be more portable than using SETOF. @@ -1150,9 +1222,9 @@ RETURNS TABLE(sum int, product int) AS $$ $$ LANGUAGE SQL; - It is not allowed to use explicit OUT or INOUT - parameters with the RETURNS TABLE notation — you must - put all the output columns in the TABLE list. + It is not allowed to use explicit OUT or INOUT + parameters with the RETURNS TABLE notation — you must + put all the output columns in the TABLE list. @@ -1164,7 +1236,7 @@ $$ LANGUAGE SQL; return the polymorphic types anyelement, anyarray, anynonarray, anyenum, and anyrange. See for a more detailed + linkend="extend-types-polymorphic"/> for a more detailed explanation of polymorphic functions. Here is a polymorphic function make_array that builds up an array from two arbitrary data type elements: @@ -1270,34 +1342,34 @@ SELECT concat_values('|', 1, 4, 2); <acronym>SQL</acronym> Functions with Collations - collation - in SQL functions + collation + in SQL functions When a SQL function has one or more parameters of collatable data types, a collation is identified for each function call depending on the collations assigned to the actual arguments, as described in . If a collation is successfully identified + linkend="collation"/>. If a collation is successfully identified (i.e., there are no conflicts of implicit collations among the arguments) then all the collatable parameters are treated as having that collation implicitly. This will affect the behavior of collation-sensitive operations within the function. For example, using the - anyleast function described above, the result of + anyleast function described above, the result of SELECT anyleast('abc'::text, 'ABC'); - will depend on the database's default collation. In C locale - the result will be ABC, but in many other locales it will - be abc. The collation to use can be forced by adding - a COLLATE clause to any of the arguments, for example + will depend on the database's default collation. In C locale + the result will be ABC, but in many other locales it will + be abc. The collation to use can be forced by adding + a COLLATE clause to any of the arguments, for example SELECT anyleast('abc'::text, 'ABC' COLLATE "C"); Alternatively, if you wish a function to operate with a particular collation regardless of what it is called with, insert - COLLATE clauses as needed in the function definition. - This version of anyleast would always use en_US + COLLATE clauses as needed in the function definition. + This version of anyleast would always use en_US locale to compare strings: CREATE FUNCTION anyleast (VARIADIC anyarray) RETURNS anyelement AS $$ @@ -1333,11 +1405,14 @@ $$ LANGUAGE SQL; More than one function can be defined with the same SQL name, so long as the arguments they take are different. In other words, - function names can be overloaded. When a - query is executed, the server will determine which function to - call from the data types and the number of the provided arguments. - Overloading can also be used to simulate functions with a variable - number of arguments, up to a finite maximum number. + function names can be overloaded. Whether or not + you use it, this capability entails security precautions when calling + functions in databases where some users mistrust other users; see + . When a query is executed, the server + will determine which function to call from the data types and the number + of the provided arguments. Overloading can also be used to simulate + functions with a variable number of arguments, up to a finite maximum + number. @@ -1351,31 +1426,31 @@ CREATE FUNCTION test(smallint, double precision) RETURNS ... it is not immediately clear which function would be called with some trivial input like test(1, 1.5). The currently implemented resolution rules are described in - , but it is unwise to design a system that subtly + , but it is unwise to design a system that subtly relies on this behavior. A function that takes a single argument of a composite type should generally not have the same name as any attribute (field) of that type. - Recall that attribute(table) + Recall that attribute(table) is considered equivalent - to table.attribute. + to table.attribute. In the case that there is an ambiguity between a function on a composite type and an attribute of the composite type, the attribute will always be used. It is possible to override that choice by schema-qualifying the function name - (that is, schema.func(table) + (that is, schema.func(table) ) but it's better to avoid the problem by not choosing conflicting names. Another possible conflict is between variadic and non-variadic functions. - For instance, it is possible to create both foo(numeric) and - foo(VARIADIC numeric[]). In this case it is unclear which one + For instance, it is possible to create both foo(numeric) and + foo(VARIADIC numeric[]). In this case it is unclear which one should be matched to a call providing a single numeric argument, such as - foo(10.1). The rule is that the function appearing + foo(10.1). The rule is that the function appearing earlier in the search path is used, or if the two functions are in the same schema, the non-variadic one is preferred. @@ -1388,15 +1463,15 @@ CREATE FUNCTION test(smallint, double precision) RETURNS ... rule is violated, the behavior is not portable. You might get a run-time linker error, or one of the functions will get called (usually the internal one). The alternative form of the - AS clause for the SQL CREATE + AS clause for the SQL CREATE FUNCTION command decouples the SQL function name from the function name in the C source code. For instance: CREATE FUNCTION test(int) RETURNS int - AS 'filename', 'test_1arg' + AS 'filename', 'test_1arg' LANGUAGE C; CREATE FUNCTION test(int, int) RETURNS int - AS 'filename', 'test_2arg' + AS 'filename', 'test_2arg' LANGUAGE C; The names of the C functions here reflect one of many possible conventions. @@ -1421,17 +1496,17 @@ CREATE FUNCTION test(int, int) RETURNS int - Every function has a volatility classification, with - the possibilities being VOLATILE, STABLE, or - IMMUTABLE. VOLATILE is the default if the - + Every function has a volatility classification, with + the possibilities being VOLATILE, STABLE, or + IMMUTABLE. VOLATILE is the default if the + command does not specify a category. The volatility category is a promise to the optimizer about the behavior of the function: - A VOLATILE function can do anything, including modifying + A VOLATILE function can do anything, including modifying the database. It can return different results on successive calls with the same arguments. The optimizer makes no assumptions about the behavior of such functions. A query using a volatile function will @@ -1440,26 +1515,26 @@ CREATE FUNCTION test(int, int) RETURNS int - A STABLE function cannot modify the database and is + A STABLE function cannot modify the database and is guaranteed to return the same results given the same arguments for all rows within a single statement. This category allows the optimizer to optimize multiple calls of the function to a single call. In particular, it is safe to use an expression containing such a function in an index scan condition. (Since an index scan will evaluate the comparison value only once, not once at each - row, it is not valid to use a VOLATILE function in an + row, it is not valid to use a VOLATILE function in an index scan condition.) - An IMMUTABLE function cannot modify the database and is + An IMMUTABLE function cannot modify the database and is guaranteed to return the same results given the same arguments forever. This category allows the optimizer to pre-evaluate the function when a query calls it with constant arguments. For example, a query like - SELECT ... WHERE x = 2 + 2 can be simplified on sight to - SELECT ... WHERE x = 4, because the function underlying - the integer addition operator is marked IMMUTABLE. + SELECT ... WHERE x = 2 + 2 can be simplified on sight to + SELECT ... WHERE x = 4, because the function underlying + the integer addition operator is marked IMMUTABLE. @@ -1471,32 +1546,32 @@ CREATE FUNCTION test(int, int) RETURNS int - Any function with side-effects must be labeled - VOLATILE, so that calls to it cannot be optimized away. + Any function with side-effects must be labeled + VOLATILE, so that calls to it cannot be optimized away. Even a function with no side-effects needs to be labeled - VOLATILE if its value can change within a single query; - some examples are random(), currval(), - timeofday(). + VOLATILE if its value can change within a single query; + some examples are random(), currval(), + timeofday(). - Another important example is that the current_timestamp - family of functions qualify as STABLE, since their values do + Another important example is that the current_timestamp + family of functions qualify as STABLE, since their values do not change within a transaction. - There is relatively little difference between STABLE and - IMMUTABLE categories when considering simple interactive + There is relatively little difference between STABLE and + IMMUTABLE categories when considering simple interactive queries that are planned and immediately executed: it doesn't matter a lot whether a function is executed once during planning or once during query execution startup. But there is a big difference if the plan is - saved and reused later. Labeling a function IMMUTABLE when + saved and reused later. Labeling a function IMMUTABLE when it really isn't might allow it to be prematurely folded to a constant during planning, resulting in a stale value being re-used during subsequent uses of the plan. This is a hazard when using prepared statements or when using function languages that cache plans (such as - PL/pgSQL). + PL/pgSQL). @@ -1504,12 +1579,12 @@ CREATE FUNCTION test(int, int) RETURNS int languages, there is a second important property determined by the volatility category, namely the visibility of any data changes that have been made by the SQL command that is calling the function. A - VOLATILE function will see such changes, a STABLE - or IMMUTABLE function will not. This behavior is implemented - using the snapshotting behavior of MVCC (see ): - STABLE and IMMUTABLE functions use a snapshot + VOLATILE function will see such changes, a STABLE + or IMMUTABLE function will not. This behavior is implemented + using the snapshotting behavior of MVCC (see ): + STABLE and IMMUTABLE functions use a snapshot established as of the start of the calling query, whereas - VOLATILE functions obtain a fresh snapshot at the start of + VOLATILE functions obtain a fresh snapshot at the start of each query they execute. @@ -1522,41 +1597,41 @@ CREATE FUNCTION test(int, int) RETURNS int Because of this snapshotting behavior, - a function containing only SELECT commands can safely be - marked STABLE, even if it selects from tables that might be + a function containing only SELECT commands can safely be + marked STABLE, even if it selects from tables that might be undergoing modifications by concurrent queries. PostgreSQL will execute all commands of a - STABLE function using the snapshot established for the + STABLE function using the snapshot established for the calling query, and so it will see a fixed view of the database throughout that query. - The same snapshotting behavior is used for SELECT commands - within IMMUTABLE functions. It is generally unwise to select - from database tables within an IMMUTABLE function at all, + The same snapshotting behavior is used for SELECT commands + within IMMUTABLE functions. It is generally unwise to select + from database tables within an IMMUTABLE function at all, since the immutability will be broken if the table contents ever change. However, PostgreSQL does not enforce that you do not do that. - A common error is to label a function IMMUTABLE when its + A common error is to label a function IMMUTABLE when its results depend on a configuration parameter. For example, a function that manipulates timestamps might well have results that depend on the - setting. For safety, such functions should - be labeled STABLE instead. + setting. For safety, such functions should + be labeled STABLE instead. - PostgreSQL requires that STABLE - and IMMUTABLE functions contain no SQL commands other - than SELECT to prevent data modification. + PostgreSQL requires that STABLE + and IMMUTABLE functions contain no SQL commands other + than SELECT to prevent data modification. (This is not a completely bulletproof test, since such functions could - still call VOLATILE functions that modify the database. - If you do that, you will find that the STABLE or - IMMUTABLE function does not notice the database changes + still call VOLATILE functions that modify the database. + If you do that, you will find that the STABLE or + IMMUTABLE function does not notice the database changes applied by the called function, since they are hidden from its snapshot.) @@ -1569,11 +1644,11 @@ CREATE FUNCTION test(int, int) RETURNS int PostgreSQL allows user-defined functions to be written in other languages besides SQL and C. These other languages are generically called procedural - languages (PLs). + languages (PLs). Procedural languages aren't built into the PostgreSQL server; they are offered by loadable modules. - See and following chapters for more + See and following chapters for more information. @@ -1581,7 +1656,7 @@ CREATE FUNCTION test(int, int) RETURNS int Internal Functions - functioninternal + functioninternal Internal functions are functions written in C that have been statically @@ -1597,7 +1672,7 @@ CREATE FUNCTION test(int, int) RETURNS int Normally, all internal functions present in the server are declared during the initialization of the database cluster - (see ), + (see ), but a user could use CREATE FUNCTION to create additional alias names for an internal function. Internal functions are declared in CREATE FUNCTION @@ -1635,8 +1710,8 @@ CREATE FUNCTION square_root(double precision) RETURNS double precision be made compatible with C, such as C++). Such functions are compiled into dynamically loadable objects (also called shared libraries) and are loaded by the server on demand. The dynamic - loading feature is what distinguishes C language functions - from internal functions — the actual coding conventions + loading feature is what distinguishes C language functions + from internal functions — the actual coding conventions are essentially the same for both. (Hence, the standard internal function library is a rich source of coding examples for user-defined C functions.) @@ -1683,9 +1758,9 @@ CREATE FUNCTION square_root(double precision) RETURNS double precision If the name starts with the string $libdir, - that part is replaced by the PostgreSQL package + that part is replaced by the PostgreSQL package library directory - name, which is determined at build time.$libdir + name, which is determined at build time.$libdir @@ -1693,7 +1768,7 @@ CREATE FUNCTION square_root(double precision) RETURNS double precision If the name does not contain a directory part, the file is searched for in the path specified by the configuration variable - .dynamic_library_path + .dynamic_library_path @@ -1742,7 +1817,7 @@ CREATE FUNCTION square_root(double precision) RETURNS double precision PostgreSQL will not compile a C function automatically. The object file must be compiled before it is referenced in a CREATE - FUNCTION command. See for additional + FUNCTION command. See for additional information. @@ -1754,23 +1829,16 @@ CREATE FUNCTION square_root(double precision) RETURNS double precision To ensure that a dynamically loaded object file is not loaded into an incompatible server, PostgreSQL checks that the - file contains a magic block with the appropriate contents. + file contains a magic block with the appropriate contents. This allows the server to detect obvious incompatibilities, such as code compiled for a different major version of - PostgreSQL. A magic block is required as of - PostgreSQL 8.2. To include a magic block, + PostgreSQL. To include a magic block, write this in one (and only one) of the module source files, after having - included the header fmgr.h: + included the header fmgr.h: -#ifdef PG_MODULE_MAGIC PG_MODULE_MAGIC; -#endif - - The #ifdef test can be omitted if the code doesn't - need to compile against pre-8.2 PostgreSQL - releases. @@ -1797,12 +1865,12 @@ PG_MODULE_MAGIC; Optionally, a dynamically loaded file can contain initialization and finalization functions. If the file includes a function named - _PG_init, that function will be called immediately after + _PG_init, that function will be called immediately after loading the file. The function receives no parameters and should return void. If the file includes a function named - _PG_fini, that function will be called immediately before + _PG_fini, that function will be called immediately before unloading the file. Likewise, the function receives no parameters and - should return void. Note that _PG_fini will only be called + should return void. Note that _PG_fini will only be called during an unload of the file, not during process termination. (Presently, unloads are disabled and will never occur, but this may change in the future.) @@ -1873,7 +1941,7 @@ typedef int int4; means XX bits. Note therefore also that the C type int8 is 1 byte in size. The SQL type int8 is called int64 in C. See also - .) + .) @@ -1922,11 +1990,11 @@ typedef struct - Never modify the contents of a pass-by-reference input + Never modify the contents of a pass-by-reference input value. If you do so you are likely to corrupt on-disk data, since the pointer you are given might point directly into a disk buffer. The sole exception to this rule is explained in - . + . @@ -1941,7 +2009,7 @@ typedef struct { } text; - The [FLEXIBLE_ARRAY_MEMBER] notation means that the actual + The [FLEXIBLE_ARRAY_MEMBER] notation means that the actual length of the data part is not specified by this declaration. @@ -1949,7 +2017,7 @@ typedef struct { When manipulating variable-length types, we must be careful to allocate the correct amount of memory and set the length field correctly. - For example, if we wanted to store 40 bytes in a text + For example, if we wanted to store 40 bytes in a text structure, we might use a code fragment like this: data, buffer, 40); ]]> - VARHDRSZ is the same as sizeof(int32), but - it's considered good style to use the macro VARHDRSZ + VARHDRSZ is the same as sizeof(int32), but + it's considered good style to use the macro VARHDRSZ to refer to the size of the overhead for a variable-length type. - Also, the length field must be set using the - SET_VARSIZE macro, not by simple assignment. + Also, the length field must be set using the + SET_VARSIZE macro, not by simple assignment. - specifies which C type + specifies which C type corresponds to which SQL type when writing a C-language function - that uses a built-in type of PostgreSQL. + that uses a built-in type of PostgreSQL. The Defined In column gives the header file that needs to be included to get the type definition. (The actual definition might be in a different file that is included by the @@ -2001,16 +2069,6 @@ memcpy(destination->data, buffer, 40); - - abstime - AbsoluteTime - utils/nabstime.h - - - bigint (int8) - int64 - postgres.h - boolean bool @@ -2111,11 +2169,6 @@ memcpy(destination->data, buffer, 40); regproc postgres.h - - reltime - RelativeTime - utils/nabstime.h - text text* @@ -2141,11 +2194,6 @@ memcpy(destination->data, buffer, 40); Timestamp* datatype/timestamp.h - - tinterval - TimeInterval - utils/nabstime.h - varchar VarChar* @@ -2182,8 +2230,8 @@ PG_FUNCTION_INFO_V1(funcname); must appear in the same source file. (Conventionally, it's written just before the function itself.) This macro call is not - needed for internal-language functions, since - PostgreSQL assumes that all internal functions + needed for internal-language functions, since + PostgreSQL assumes that all internal functions use the version-1 convention. It is, however, required for dynamically-loaded functions. @@ -2214,9 +2262,7 @@ PG_FUNCTION_INFO_V1(funcname); #include "fmgr.h" #include "utils/geo_decls.h" -#ifdef PG_MODULE_MAGIC PG_MODULE_MAGIC; -#endif /* by value */ @@ -2341,8 +2387,8 @@ CREATE FUNCTION concat_text(text, text) RETURNS text directory of the shared library file (for instance the PostgreSQL tutorial directory, which contains the code for the examples used in this section). - (Better style would be to use just 'funcs' in the - AS clause, after having added + (Better style would be to use just 'funcs' in the + AS clause, after having added DIRECTORY to the search path. In any case, we can omit the system-specific extension for a shared library, commonly .so.) @@ -2359,16 +2405,16 @@ CREATE FUNCTION concat_text(text, text) RETURNS text At first glance, the version-1 coding conventions might appear to be just - pointless obscurantism, over using plain C calling - conventions. They do however allow to deal with NULLable + pointless obscurantism, over using plain C calling + conventions. They do however allow to deal with NULLable arguments/return values, and toasted (compressed or out-of-line) values. - The macro PG_ARGISNULL(n) + The macro PG_ARGISNULL(n) allows a function to test whether each input is null. (Of course, doing - this is only necessary in functions not declared strict.) + this is only necessary in functions not declared strict.) As with the PG_GETARG_xxx() macros, the input arguments are counted beginning at zero. Note that one @@ -2403,16 +2449,16 @@ CREATE FUNCTION concat_text(text, text) RETURNS text ALTER TABLE tablename ALTER COLUMN colname SET STORAGE storagetype. storagetype is one of - plain, external, extended, - or main.) + plain, external, extended, + or main.) Finally, the version-1 function call conventions make it possible - to return set results () and - implement trigger functions () and + to return set results () and + implement trigger functions () and procedural-language call handlers (). For more details + linkend="plhandler"/>). For more details see src/backend/utils/fmgr/README in the source distribution. @@ -2442,8 +2488,8 @@ CREATE FUNCTION concat_text(text, text) RETURNS text Use pg_config - --includedir-serverpg_configwith user-defined C functions - to find out where the PostgreSQL server header + --includedir-serverpg_configwith user-defined C functions + to find out where the PostgreSQL server header files are installed on your system (or the system that your users will be running on). @@ -2453,7 +2499,7 @@ CREATE FUNCTION concat_text(text, text) RETURNS text Compiling and linking your code so that it can be dynamically loaded into PostgreSQL always - requires special flags. See for a + requires special flags. See for a detailed explanation of how to do it for your particular operating system. @@ -2461,8 +2507,8 @@ CREATE FUNCTION concat_text(text, text) RETURNS text - Remember to define a magic block for your shared library, - as described in . + Remember to define a magic block for your shared library, + as described in . @@ -2470,7 +2516,7 @@ CREATE FUNCTION concat_text(text, text) RETURNS text When allocating memory, use the PostgreSQL functions - pallocpalloc and pfreepfree + pallocpalloc and pfreepfree instead of the corresponding C library functions malloc and free. The memory allocated by palloc will be @@ -2481,8 +2527,8 @@ CREATE FUNCTION concat_text(text, text) RETURNS text - Always zero the bytes of your structures using memset - (or allocate them with palloc0 in the first place). + Always zero the bytes of your structures using memset + (or allocate them with palloc0 in the first place). Even if you assign to each field of your structure, there might be alignment padding (holes in the structure) that contain garbage values. Without this, it's difficult to @@ -2502,7 +2548,7 @@ CREATE FUNCTION concat_text(text, text) RETURNS text (PG_FUNCTION_ARGS, etc.) are in fmgr.h, so you will need to include at least these two files. For portability reasons it's best to - include postgres.h first, + include postgres.h first, before any other system or user header files. Including postgres.h will also include elog.h and palloc.h @@ -2548,15 +2594,13 @@ SELECT name, c_overpaid(emp, 1500) AS overpaid Using the version-1 calling conventions, we can define - c_overpaid as: + c_overpaid as: - Notice we have used STRICT so that we did not have to + Notice we have used STRICT so that we did not have to check whether the input arguments were NULL. @@ -2630,87 +2674,87 @@ CREATE FUNCTION c_overpaid(emp, integer) RETURNS boolean There are two ways you can build a composite data value (henceforth - a tuple): you can build it from an array of Datum values, + a tuple): you can build it from an array of Datum values, or from an array of C strings that can be passed to the input conversion functions of the tuple's column data types. In either - case, you first need to obtain or construct a TupleDesc + case, you first need to obtain or construct a TupleDesc descriptor for the tuple structure. When working with Datums, you - pass the TupleDesc to BlessTupleDesc, - and then call heap_form_tuple for each row. When working - with C strings, you pass the TupleDesc to - TupleDescGetAttInMetadata, and then call - BuildTupleFromCStrings for each row. In the case of a + pass the TupleDesc to BlessTupleDesc, + and then call heap_form_tuple for each row. When working + with C strings, you pass the TupleDesc to + TupleDescGetAttInMetadata, and then call + BuildTupleFromCStrings for each row. In the case of a function returning a set of tuples, the setup steps can all be done once during the first call of the function. Several helper functions are available for setting up the needed - TupleDesc. The recommended way to do this in most + TupleDesc. The recommended way to do this in most functions returning composite values is to call: TypeFuncClass get_call_result_type(FunctionCallInfo fcinfo, Oid *resultTypeId, TupleDesc *resultTupleDesc) - passing the same fcinfo struct passed to the calling function + passing the same fcinfo struct passed to the calling function itself. (This of course requires that you use the version-1 - calling conventions.) resultTypeId can be specified - as NULL or as the address of a local variable to receive the - function's result type OID. resultTupleDesc should be the - address of a local TupleDesc variable. Check that the - result is TYPEFUNC_COMPOSITE; if so, - resultTupleDesc has been filled with the needed - TupleDesc. (If it is not, you can report an error along + calling conventions.) resultTypeId can be specified + as NULL or as the address of a local variable to receive the + function's result type OID. resultTupleDesc should be the + address of a local TupleDesc variable. Check that the + result is TYPEFUNC_COMPOSITE; if so, + resultTupleDesc has been filled with the needed + TupleDesc. (If it is not, you can report an error along the lines of function returning record called in context that cannot accept type record.) - get_call_result_type can resolve the actual type of a + get_call_result_type can resolve the actual type of a polymorphic function result; so it is useful in functions that return scalar polymorphic results, not only functions that return composites. - The resultTypeId output is primarily useful for functions + The resultTypeId output is primarily useful for functions returning polymorphic scalars. - get_call_result_type has a sibling - get_expr_result_type, which can be used to resolve the + get_call_result_type has a sibling + get_expr_result_type, which can be used to resolve the expected output type for a function call represented by an expression tree. This can be used when trying to determine the result type from outside the function itself. There is also - get_func_result_type, which can be used when only the + get_func_result_type, which can be used when only the function's OID is available. However these functions are not able - to deal with functions declared to return record, and - get_func_result_type cannot resolve polymorphic types, - so you should preferentially use get_call_result_type. + to deal with functions declared to return record, and + get_func_result_type cannot resolve polymorphic types, + so you should preferentially use get_call_result_type. Older, now-deprecated functions for obtaining - TupleDescs are: + TupleDescs are: TupleDesc RelationNameGetTupleDesc(const char *relname) - to get a TupleDesc for the row type of a named relation, + to get a TupleDesc for the row type of a named relation, and: TupleDesc TypeGetTupleDesc(Oid typeoid, List *colaliases) - to get a TupleDesc based on a type OID. This can - be used to get a TupleDesc for a base or + to get a TupleDesc based on a type OID. This can + be used to get a TupleDesc for a base or composite type. It will not work for a function that returns - record, however, and it cannot resolve polymorphic + record, however, and it cannot resolve polymorphic types. - Once you have a TupleDesc, call: + Once you have a TupleDesc, call: TupleDesc BlessTupleDesc(TupleDesc tupdesc) @@ -2720,8 +2764,8 @@ AttInMetadata *TupleDescGetAttInMetadata(TupleDesc tupdesc) if you plan to work with C strings. If you are writing a function returning set, you can save the results of these functions in the - FuncCallContext structure — use the - tuple_desc or attinmeta field + FuncCallContext structure — use the + tuple_desc or attinmeta field respectively. @@ -2730,7 +2774,7 @@ AttInMetadata *TupleDescGetAttInMetadata(TupleDesc tupdesc) HeapTuple heap_form_tuple(TupleDesc tupdesc, Datum *values, bool *isnull) - to build a HeapTuple given user data in Datum form. + to build a HeapTuple given user data in Datum form. @@ -2738,24 +2782,24 @@ HeapTuple heap_form_tuple(TupleDesc tupdesc, Datum *values, bool *isnull) HeapTuple BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values) - to build a HeapTuple given user data + to build a HeapTuple given user data in C string form. values is an array of C strings, one for each attribute of the return row. Each C string should be in the form expected by the input function of the attribute data type. In order to return a null value for one of the attributes, - the corresponding pointer in the values array - should be set to NULL. This function will need to + the corresponding pointer in the values array + should be set to NULL. This function will need to be called again for each row you return. Once you have built a tuple to return from your function, it - must be converted into a Datum. Use: + must be converted into a Datum. Use: HeapTupleGetDatum(HeapTuple tuple) - to convert a HeapTuple into a valid Datum. This - Datum can be returned directly if you intend to return + to convert a HeapTuple into a valid Datum. This + Datum can be returned directly if you intend to return just a single row, or it can be used as the current return value in a set-returning function. @@ -2778,13 +2822,13 @@ HeapTupleGetDatum(HeapTuple tuple) - A set-returning function (SRF) is called - once for each item it returns. The SRF must + A set-returning function (SRF) is called + once for each item it returns. The SRF must therefore save enough state to remember what it was doing and return the next item on each call. - The structure FuncCallContext is provided to help - control this process. Within a function, fcinfo->flinfo->fn_extra - is used to hold a pointer to FuncCallContext + The structure FuncCallContext is provided to help + control this process. Within a function, fcinfo->flinfo->fn_extra + is used to hold a pointer to FuncCallContext across calls. typedef struct FuncCallContext @@ -2806,14 +2850,6 @@ typedef struct FuncCallContext */ uint64 max_calls; - /* - * OPTIONAL pointer to result slot - * - * This is obsolete and only present for backward compatibility, viz, - * user-defined SRFs that use the deprecated TupleDescGetSlot(). - */ - TupleTableSlot *slot; - /* * OPTIONAL pointer to miscellaneous user-provided context information * @@ -2858,9 +2894,9 @@ typedef struct FuncCallContext - An SRF uses several functions and macros that - automatically manipulate the FuncCallContext - structure (and expect to find it via fn_extra). Use: + An SRF uses several functions and macros that + automatically manipulate the FuncCallContext + structure (and expect to find it via fn_extra). Use: SRF_IS_FIRSTCALL() @@ -2869,12 +2905,12 @@ SRF_IS_FIRSTCALL() SRF_FIRSTCALL_INIT() - to initialize the FuncCallContext. On every function call, + to initialize the FuncCallContext. On every function call, including the first, use: SRF_PERCALL_SETUP() - to properly set up for using the FuncCallContext + to properly set up for using the FuncCallContext and clearing any previously returned data left over from the previous pass. @@ -2884,27 +2920,27 @@ SRF_PERCALL_SETUP() SRF_RETURN_NEXT(funcctx, result) - to return it to the caller. (result must be of type - Datum, either a single value or a tuple prepared as + to return it to the caller. (result must be of type + Datum, either a single value or a tuple prepared as described above.) Finally, when your function is finished returning data, use: SRF_RETURN_DONE(funcctx) - to clean up and end the SRF. + to clean up and end the SRF. - The memory context that is current when the SRF is called is + The memory context that is current when the SRF is called is a transient context that will be cleared between calls. This means - that you do not need to call pfree on everything - you allocated using palloc; it will go away anyway. However, if you want to allocate + that you do not need to call pfree on everything + you allocated using palloc; it will go away anyway. However, if you want to allocate any data structures to live across calls, you need to put them somewhere else. The memory context referenced by - multi_call_memory_ctx is a suitable location for any - data that needs to survive until the SRF is finished running. In most + multi_call_memory_ctx is a suitable location for any + data that needs to survive until the SRF is finished running. In most cases, this means that you should switch into - multi_call_memory_ctx while doing the first-call setup. + multi_call_memory_ctx while doing the first-call setup. @@ -2915,8 +2951,8 @@ SRF_RETURN_DONE(funcctx) PG_GETARG_xxx macro) in the transient context then the detoasted copies will be freed on each cycle. Accordingly, if you keep references to such values in - your user_fctx, you must either copy them into the - multi_call_memory_ctx after detoasting, or ensure + your user_fctx, you must either copy them into the + multi_call_memory_ctx after detoasting, or ensure that you detoast the values only in that context. @@ -2970,7 +3006,7 @@ my_set_returning_function(PG_FUNCTION_ARGS) - A complete example of a simple SRF returning a composite type + A complete example of a simple SRF returning a composite type looks like: filename', 'retcomposite' + AS 'filename', 'retcomposite' LANGUAGE C IMMUTABLE STRICT; A different way is to use OUT parameters: @@ -3078,15 +3114,15 @@ CREATE OR REPLACE FUNCTION retcomposite(integer, integer) CREATE OR REPLACE FUNCTION retcomposite(IN integer, IN integer, OUT f1 integer, OUT f2 integer, OUT f3 integer) RETURNS SETOF record - AS 'filename', 'retcomposite' + AS 'filename', 'retcomposite' LANGUAGE C IMMUTABLE STRICT; Notice that in this method the output type of the function is formally - an anonymous record type. + an anonymous record type. - The directory contrib/tablefunc + The directory contrib/tablefunc module in the source distribution contains more examples of set-returning functions. @@ -3100,24 +3136,24 @@ CREATE OR REPLACE FUNCTION retcomposite(IN integer, IN integer, return the polymorphic types anyelement, anyarray, anynonarray, anyenum, and anyrange. - See for a more detailed explanation + See for a more detailed explanation of polymorphic functions. When function arguments or return types are defined as polymorphic types, the function author cannot know in advance what data type it will be called with, or - need to return. There are two routines provided in fmgr.h + need to return. There are two routines provided in fmgr.h to allow a version-1 C function to discover the actual data types of its arguments and the type it is expected to return. The routines are - called get_fn_expr_rettype(FmgrInfo *flinfo) and - get_fn_expr_argtype(FmgrInfo *flinfo, int argnum). + called get_fn_expr_rettype(FmgrInfo *flinfo) and + get_fn_expr_argtype(FmgrInfo *flinfo, int argnum). They return the result or argument type OID, or InvalidOid if the information is not available. - The structure flinfo is normally accessed as - fcinfo->flinfo. The parameter argnum - is zero based. get_call_result_type can also be used - as an alternative to get_fn_expr_rettype. - There is also get_fn_expr_variadic, which can be used to + The structure flinfo is normally accessed as + fcinfo->flinfo. The parameter argnum + is zero based. get_call_result_type can also be used + as an alternative to get_fn_expr_rettype. + There is also get_fn_expr_variadic, which can be used to find out whether variadic arguments have been merged into an array. - This is primarily useful for VARIADIC "any" functions, + This is primarily useful for VARIADIC "any" functions, since such merging will always have occurred for variadic functions taking ordinary array types. @@ -3185,23 +3221,23 @@ CREATE FUNCTION make_array(anyelement) RETURNS anyarray There is a variant of polymorphism that is only available to C-language functions: they can be declared to take parameters of type - "any". (Note that this type name must be double-quoted, + "any". (Note that this type name must be double-quoted, since it's also a SQL reserved word.) This works like - anyelement except that it does not constrain different - "any" arguments to be the same type, nor do they help + anyelement except that it does not constrain different + "any" arguments to be the same type, nor do they help determine the function's result type. A C-language function can also - declare its final parameter to be VARIADIC "any". This will + declare its final parameter to be VARIADIC "any". This will match one or more actual arguments of any type (not necessarily the same - type). These arguments will not be gathered into an array + type). These arguments will not be gathered into an array as happens with normal variadic functions; they will just be passed to - the function separately. The PG_NARGS() macro and the + the function separately. The PG_NARGS() macro and the methods described above must be used to determine the number of actual arguments and their types when using this feature. Also, users of such - a function might wish to use the VARIADIC keyword in their + a function might wish to use the VARIADIC keyword in their function call, with the expectation that the function would treat the array elements as separate arguments. The function itself must implement - that behavior if wanted, after using get_fn_expr_variadic to - detect that the actual argument was marked with VARIADIC. + that behavior if wanted, after using get_fn_expr_variadic to + detect that the actual argument was marked with VARIADIC. @@ -3211,22 +3247,22 @@ CREATE FUNCTION make_array(anyelement) RETURNS anyarray Some function calls can be simplified during planning based on properties specific to the function. For example, - int4mul(n, 1) could be simplified to just n. + int4mul(n, 1) could be simplified to just n. To define such function-specific optimizations, write a - transform function and place its OID in the - protransform field of the primary function's - pg_proc entry. The transform function must have the SQL - signature protransform(internal) RETURNS internal. The - argument, actually FuncExpr *, is a dummy node representing a + transform function and place its OID in the + protransform field of the primary function's + pg_proc entry. The transform function must have the SQL + signature protransform(internal) RETURNS internal. The + argument, actually FuncExpr *, is a dummy node representing a call to the primary function. If the transform function's study of the expression tree proves that a simplified expression tree can substitute for all possible concrete calls represented thereby, build and return - that simplified expression. Otherwise, return a NULL - pointer (not a SQL null). + that simplified expression. Otherwise, return a NULL + pointer (not a SQL null). - We make no guarantee that PostgreSQL will never call the + We make no guarantee that PostgreSQL will never call the primary function in cases that the transform function could simplify. Ensure rigorous equivalence between the simplified expression and an actual call to the primary function. @@ -3246,26 +3282,26 @@ CREATE FUNCTION make_array(anyelement) RETURNS anyarray Add-ins can reserve LWLocks and an allocation of shared memory on server startup. The add-in's shared library must be preloaded by specifying it in - shared_preload_libraries. + shared_preload_libraries. Shared memory is reserved by calling: void RequestAddinShmemSpace(int size) - from your _PG_init function. + from your _PG_init function. LWLocks are reserved by calling: void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks) - from _PG_init. This will ensure that an array of - num_lwlocks LWLocks is available under the name - tranche_name. Use GetNamedLWLockTranche + from _PG_init. This will ensure that an array of + num_lwlocks LWLocks is available under the name + tranche_name. Use GetNamedLWLockTranche to get a pointer to this array. To avoid possible race-conditions, each backend should use the LWLock - AddinShmemInitLock when connecting to and initializing + AddinShmemInitLock when connecting to and initializing its allocation of shared memory, as shown here: static mystruct *ptr = NULL; @@ -3288,10 +3324,10 @@ if (!ptr) - + Using C++ for Extensibility - + C++ @@ -3305,7 +3341,7 @@ if (!ptr) All functions accessed by the backend must present a C interface to the backend; these C functions can then call C++ functions. - For example, extern C linkage is required for + For example, extern C linkage is required for backend-accessed functions. This is also necessary for any functions that are passed as pointers between the backend and C++ code. @@ -3314,30 +3350,30 @@ if (!ptr) Free memory using the appropriate deallocation method. For example, - most backend memory is allocated using palloc(), so use - pfree() to free it. Using C++ - delete in such cases will fail. + most backend memory is allocated using palloc(), so use + pfree() to free it. Using C++ + delete in such cases will fail. Prevent exceptions from propagating into the C code (use a catch-all - block at the top level of all extern C functions). This + block at the top level of all extern C functions). This is necessary even if the C++ code does not explicitly throw any exceptions, because events like out-of-memory can still throw exceptions. Any exceptions must be caught and appropriate errors passed back to the C interface. If possible, compile C++ with - to eliminate exceptions entirely; in such cases, you must check for failures in your C++ code, e.g. check for - NULL returned by new(). + NULL returned by new(). If calling backend functions from C++ code, be sure that the C++ call stack contains only plain old data structures - (POD). This is necessary because backend errors - generate a distant longjmp() that does not properly + (POD). This is necessary because backend errors + generate a distant longjmp() that does not properly unroll a C++ call stack with non-POD objects. @@ -3346,7 +3382,7 @@ if (!ptr) In summary, it is best to place C++ code behind a wall of - extern C functions that interface to the backend, + extern C functions that interface to the backend, and avoid exception, memory, and call stack leakage. diff --git a/doc/src/sgml/xindex.sgml b/doc/src/sgml/xindex.sgml index 333a36c456..9446f8b836 100644 --- a/doc/src/sgml/xindex.sgml +++ b/doc/src/sgml/xindex.sgml @@ -12,14 +12,14 @@ The procedures described thus far let you define new types, new functions, and new operators. However, we cannot yet define an index on a column of a new data type. To do this, we must define an - operator class for the new data type. Later in this + operator class for the new data type. Later in this section, we will illustrate this concept in an example: a new operator class for the B-tree index method that stores and sorts complex numbers in ascending absolute value order. - Operator classes can be grouped into operator families + Operator classes can be grouped into operator families to show the relationships between semantically compatible classes. When only a single data type is involved, an operator class is sufficient, so we'll focus on that case first and then return to operator families. @@ -35,24 +35,24 @@ PostgreSQL, but all index methods are described in pg_am. It is possible to add a new index access method by writing the necessary code and - then creating a row in pg_am — but that is - beyond the scope of this chapter (see ). + then creating an entry in pg_am — but that is + beyond the scope of this chapter (see ). The routines for an index method do not directly know anything about the data types that the index method will operate on. Instead, an operator - classoperator class + classoperator class identifies the set of operations that the index method needs to use to work with a particular data type. Operator classes are so called because one thing they specify is the set of - WHERE-clause operators that can be used with an index + WHERE-clause operators that can be used with an index (i.e., can be converted into an index-scan qualification). An operator class can also specify some support - procedures that are needed by the internal operations of the + function that are needed by the internal operations of the index method, but do not directly correspond to any - WHERE-clause operator that can be used with the index. + WHERE-clause operator that can be used with the index. @@ -83,17 +83,17 @@ The operators associated with an operator class are identified by - strategy numbers, which serve to identify the semantics of + strategy numbers, which serve to identify the semantics of each operator within the context of its operator class. For example, B-trees impose a strict ordering on keys, lesser to greater, - and so operators like less than and greater than or equal - to are interesting with respect to a B-tree. + and so operators like less than and greater than or equal + to are interesting with respect to a B-tree. Because PostgreSQL allows the user to define operators, PostgreSQL cannot look at the name of an operator - (e.g., < or >=) and tell what kind of + (e.g., < or >=) and tell what kind of comparison it is. Instead, the index method defines a set of - strategies, which can be thought of as generalized operators. + strategies, which can be thought of as generalized operators. Each operator class specifies which actual operator corresponds to each strategy for a particular data type and interpretation of the index semantics. @@ -101,7 +101,7 @@ The B-tree index method defines five strategies, shown in . + linkend="xindex-btree-strat-table"/>. @@ -140,7 +140,7 @@ Hash indexes support only equality comparisons, and so they use only one - strategy, shown in . + strategy, shown in .
@@ -163,19 +163,19 @@ GiST indexes are more flexible: they do not have a fixed set of - strategies at all. Instead, the consistency support routine + strategies at all. Instead, the consistency support routine of each particular GiST operator class interprets the strategy numbers however it likes. As an example, several of the built-in GiST index operator classes index two-dimensional geometric objects, providing - the R-tree strategies shown in - . Four of these are true + the R-tree strategies shown in + . Four of these are true two-dimensional tests (overlaps, same, contains, contained by); four of them consider only the X direction; and the other four provide the same tests in the Y direction.
- GiST Two-Dimensional <quote>R-tree</> Strategies + GiST Two-Dimensional <quote>R-tree</quote> Strategies @@ -242,7 +242,7 @@ class interpret the strategy numbers according to the operator class's definition. As an example, the strategy numbers used by the built-in operator classes for points are shown in . + linkend="xindex-spgist-point-strat-table"/>.
@@ -289,7 +289,7 @@ each operator class interpret the strategy numbers according to the operator class's definition. As an example, the strategy numbers used by the built-in operator class for arrays are shown in - . + .
@@ -327,8 +327,8 @@ don't have a fixed set of strategies either. Instead the support routines of each operator class interpret the strategy numbers according to the operator class's definition. As an example, the strategy numbers used by - the built-in Minmax operator classes are shown in - . + the built-in Minmax operator classes are shown in + .
@@ -369,10 +369,10 @@ Notice that all the operators listed above return Boolean values. In practice, all operators defined as index method search operators must return type boolean, since they must appear at the top - level of a WHERE clause to be used with an index. - (Some index access methods also support ordering operators, + level of a WHERE clause to be used with an index. + (Some index access methods also support ordering operators, which typically don't return Boolean values; that feature is discussed - in .) + in .) @@ -396,14 +396,17 @@ functions should play each of these roles for a given data type and semantic interpretation. The index method defines the set of functions it needs, and the operator class identifies the correct - functions to use by assigning them to the support function numbers + functions to use by assigning them to the support function numbers specified by the index method. - B-trees require a single support function, and allow a second one to be + B-trees require a comparison support function, + and allow two additional support functions to be supplied at the operator class author's option, as shown in . + linkend="xindex-btree-support-table"/>. + The requirements for these support functions are explained further in + .
@@ -426,18 +429,26 @@ - Return the addresses of C-callable sort support function(s), - as documented in utils/sortsupport.h (optional) + Return the addresses of C-callable sort support function(s) + (optional) 2 + + + Compare a test value to a base value plus/minus an offset, and return + true or false according to the comparison result (optional) + + 3 +
- Hash indexes require one support function, shown in . + Hash indexes require one support function, and allow a second one to be + supplied at the operator class author's option, as shown in . @@ -451,17 +462,26 @@ - Compute the hash value for a key + Compute the 32-bit hash value for a key 1 + + + Compute the 64-bit hash value for a key given a 64-bit salt; if + the salt is 0, the low 32 bits of the result must match the value + that would have been computed by function 1 + (optional) + + 2 +
GiST indexes have nine support functions, two of which are optional, - as shown in . - (For more information see .) + as shown in . + (For more information see .) @@ -476,52 +496,52 @@ - consistent + consistent determine whether key satisfies the query qualifier 1 - union + union compute union of a set of keys 2 - compress + compress compute a compressed representation of a key or value to be indexed 3 - decompress + decompress compute a decompressed representation of a compressed key 4 - penalty + penalty compute penalty for inserting new key into subtree with given subtree's key 5 - picksplit + picksplit determine which entries of a page are to be moved to the new page and compute the union keys for resulting pages 6 - equal + equal compare two keys and return true if they are equal 7 - distance + distance determine distance from key to query value (optional) 8 - fetch + fetch compute original representation of a compressed key for index-only scans (optional) 9 @@ -532,8 +552,8 @@ SP-GiST indexes require five support functions, as - shown in . - (For more information see .) + shown in . + (For more information see .)
@@ -548,28 +568,28 @@ - config + config provide basic information about the operator class 1 - choose + choose determine how to insert a new value into an inner tuple 2 - picksplit + picksplit determine how to partition a set of values 3 - inner_consistent + inner_consistent determine which sub-partitions need to be searched for a query 4 - leaf_consistent + leaf_consistent determine whether key satisfies the query qualifier 5 @@ -580,8 +600,8 @@ GIN indexes have six support functions, three of which are optional, - as shown in . - (For more information see .) + as shown in . + (For more information see .)
@@ -596,7 +616,7 @@ - compare + compare compare two keys and return an integer less than zero, zero, or greater than zero, indicating whether the first key is less than, @@ -605,17 +625,17 @@ 1 - extractValue + extractValue extract keys from a value to be indexed 2 - extractQuery + extractQuery extract keys from a query condition 3 - consistent + consistent determine whether value matches query condition (Boolean variant) (optional if support function 6 is present) @@ -623,7 +643,7 @@ 4 - comparePartial + comparePartial compare partial key from query and key from index, and return an integer less than zero, zero, @@ -633,7 +653,7 @@ 5 - triConsistent + triConsistent determine whether value matches query condition (ternary variant) (optional if support function 4 is present) @@ -646,9 +666,9 @@ BRIN indexes have four basic support functions, as shown in - ; those basic functions + ; those basic functions may require additional support functions to be provided. - (For more information see .) + (For more information see .)
@@ -663,7 +683,7 @@ - opcInfo + opcInfo return internal information describing the indexed columns' summary data @@ -671,17 +691,17 @@ 1 - add_value + add_value add a new value to an existing summary index tuple 2 - consistent + consistent determine whether value matches query condition 3 - union + union compute union of two summary tuples @@ -717,15 +737,15 @@ operators that sort complex numbers in absolute value order, so we choose the name complex_abs_ops. First, we need a set of operators. The procedure for defining operators was - discussed in . For an operator class on + discussed in . For an operator class on B-trees, the operators we require are: - absolute-value less-than (strategy 1) - absolute-value less-than-or-equal (strategy 2) - absolute-value equal (strategy 3) - absolute-value greater-than-or-equal (strategy 4) - absolute-value greater-than (strategy 5) + absolute-value less-than (strategy 1) + absolute-value less-than-or-equal (strategy 2) + absolute-value equal (strategy 3) + absolute-value greater-than-or-equal (strategy 4) + absolute-value greater-than (strategy 5) @@ -792,8 +812,7 @@ CREATE OPERATOR < ( It is important to specify the correct commutator and negator operators, as well as suitable restriction and join selectivity functions, otherwise the optimizer will be unable to make effective - use of the index. Note that the less-than, equal, and - greater-than cases should use different selectivity functions. + use of the index. @@ -809,7 +828,7 @@ CREATE OPERATOR < ( type we'd probably want = to be the ordinary equality operation for complex numbers (and not the equality of the absolute values). In that case, we'd need to use some other - operator name for complex_abs_eq. + operator name for complex_abs_eq. @@ -886,7 +905,7 @@ CREATE OPERATOR CLASS complex_abs_ops The above example assumes that you want to make this new operator class the default B-tree operator class for the complex data type. - If you don't, just leave out the word DEFAULT. + If you don't, just leave out the word DEFAULT. @@ -909,11 +928,11 @@ CREATE OPERATOR CLASS complex_abs_ops To handle these needs, PostgreSQL uses the concept of an operator - familyoperator family. + familyoperator family. An operator family contains one or more operator classes, and can also contain indexable operators and corresponding support functions that belong to the family as a whole but not to any single class within the - family. We say that such operators and functions are loose + family. We say that such operators and functions are loose within the family, as opposed to being bound into a specific class. Typically each operator class contains single-data-type operators while cross-data-type operators are loose in the family. @@ -939,10 +958,10 @@ CREATE OPERATOR CLASS complex_abs_ops As an example, PostgreSQL has a built-in - B-tree operator family integer_ops, which includes operator - classes int8_ops, int4_ops, and - int2_ops for indexes on bigint (int8), - integer (int4), and smallint (int2) + B-tree operator family integer_ops, which includes operator + classes int8_ops, int4_ops, and + int2_ops for indexes on bigint (int8), + integer (int4), and smallint (int2) columns respectively. The family also contains cross-data-type comparison operators allowing any two of these types to be compared, so that an index on one of these types can be searched using a comparison value of another @@ -960,7 +979,8 @@ DEFAULT FOR TYPE int8 USING btree FAMILY integer_ops AS OPERATOR 4 >= , OPERATOR 5 > , FUNCTION 1 btint8cmp(int8, int8) , - FUNCTION 2 btint8sortsupport(internal) ; + FUNCTION 2 btint8sortsupport(internal) , + FUNCTION 3 in_range(int8, int8, int8, boolean, boolean) ; CREATE OPERATOR CLASS int4_ops DEFAULT FOR TYPE int4 USING btree FAMILY integer_ops AS @@ -971,7 +991,8 @@ DEFAULT FOR TYPE int4 USING btree FAMILY integer_ops AS OPERATOR 4 >= , OPERATOR 5 > , FUNCTION 1 btint4cmp(int4, int4) , - FUNCTION 2 btint4sortsupport(internal) ; + FUNCTION 2 btint4sortsupport(internal) , + FUNCTION 3 in_range(int4, int4, int4, boolean, boolean) ; CREATE OPERATOR CLASS int2_ops DEFAULT FOR TYPE int2 USING btree FAMILY integer_ops AS @@ -982,7 +1003,8 @@ DEFAULT FOR TYPE int2 USING btree FAMILY integer_ops AS OPERATOR 4 >= , OPERATOR 5 > , FUNCTION 1 btint2cmp(int2, int2) , - FUNCTION 2 btint2sortsupport(internal) ; + FUNCTION 2 btint2sortsupport(internal) , + FUNCTION 3 in_range(int2, int2, int2, boolean, boolean) ; ALTER OPERATOR FAMILY integer_ops USING btree ADD -- cross-type comparisons int8 vs int2 @@ -1031,11 +1053,17 @@ ALTER OPERATOR FAMILY integer_ops USING btree ADD OPERATOR 3 = (int2, int4) , OPERATOR 4 >= (int2, int4) , OPERATOR 5 > (int2, int4) , - FUNCTION 1 btint24cmp(int2, int4) ; + FUNCTION 1 btint24cmp(int2, int4) , + + -- cross-type in_range functions + FUNCTION 3 in_range(int4, int4, int8, boolean, boolean) , + FUNCTION 3 in_range(int4, int4, int2, boolean, boolean) , + FUNCTION 3 in_range(int2, int2, int8, boolean, boolean) , + FUNCTION 3 in_range(int2, int2, int4, boolean, boolean) ; ]]> - Notice that this definition overloads the operator strategy and + Notice that this definition overloads the operator strategy and support function numbers: each number occurs multiple times within the family. This is allowed so long as each instance of a particular number has distinct input data types. The instances that have @@ -1047,11 +1075,8 @@ ALTER OPERATOR FAMILY integer_ops USING btree ADD In a B-tree operator family, all the operators in the family must sort - compatibly, meaning that the transitive laws hold across all the data types - supported by the family: if A = B and B = C, then A = C, - and if A < B and B < C, then A < C. Moreover, implicit - or binary coercion casts between types represented in the operator family - must not change the associated sort ordering. For each + compatibly, as is specified in detail in . + For each operator in the family there must be a support function having the same two input data types as the operator. It is recommended that a family be complete, i.e., for each combination of data types, all operators are @@ -1086,7 +1111,7 @@ ALTER OPERATOR FAMILY integer_ops USING btree ADD In BRIN, the requirements depends on the framework that provides the - operator classes. For operator classes based on minmax, + operator classes. For operator classes based on minmax, the behavior required is the same as for B-tree operator families: all the operators in the family must sort compatibly, and casts must not change the associated sort ordering. @@ -1120,32 +1145,27 @@ ALTER OPERATOR FAMILY integer_ops USING btree ADD - In particular, there are SQL features such as ORDER BY and - DISTINCT that require comparison and sorting of values. + In particular, there are SQL features such as ORDER BY and + DISTINCT that require comparison and sorting of values. To implement these features on a user-defined data type, PostgreSQL looks for the default B-tree operator - class for the data type. The equals member of this operator + class for the data type. The equals member of this operator class defines the system's notion of equality of values for - GROUP BY and DISTINCT, and the sort ordering - imposed by the operator class defines the default ORDER BY + GROUP BY and DISTINCT, and the sort ordering + imposed by the operator class defines the default ORDER BY ordering. - - Comparison of arrays of user-defined types also relies on the semantics - defined by the default B-tree operator class. - - If there is no default B-tree operator class for a data type, the system will look for a default hash operator class. But since that kind of - operator class only provides equality, in practice it is only enough - to support array equality. + operator class only provides equality, it is only able to support grouping + not sorting. When there is no default operator class for a data type, you will get - errors like could not identify an ordering operator if you + errors like could not identify an ordering operator if you try to use these SQL features with the data type. @@ -1153,14 +1173,65 @@ ALTER OPERATOR FAMILY integer_ops USING btree ADD In PostgreSQL versions before 7.4, sorting and grouping operations would implicitly use operators named - =, <, and >. The new + =, <, and >. The new behavior of relying on default operator classes avoids having to make any assumption about the behavior of operators with particular names. - Another important point is that an operator that + Sorting by a non-default B-tree operator class is possible by specifying + the class's less-than operator in a USING option, + for example + +SELECT * FROM mytable ORDER BY somecol USING ~<~; + + Alternatively, specifying the class's greater-than operator + in USING selects a descending-order sort. + + + + Comparison of arrays of a user-defined type also relies on the semantics + defined by the type's default B-tree operator class. If there is no + default B-tree operator class, but there is a default hash operator class, + then array equality is supported, but not ordering comparisons. + + + + Another SQL feature that requires even more data-type-specific knowledge + is the RANGE offset + PRECEDING/FOLLOWING framing option + for window functions (see ). + For a query such as + +SELECT sum(x) OVER (ORDER BY x RANGE BETWEEN 5 PRECEDING AND 10 FOLLOWING) + FROM mytable; + + it is not sufficient to know how to order by x; + the database must also understand how to subtract 5 or + add 10 to the current row's value of x + to identify the bounds of the current window frame. Comparing the + resulting bounds to other rows' values of x is + possible using the comparison operators provided by the B-tree operator + class that defines the ORDER BY ordering — but + addition and subtraction operators are not part of the operator class, so + which ones should be used? Hard-wiring that choice would be undesirable, + because different sort orders (different B-tree operator classes) might + need different behavior. Therefore, a B-tree operator class can specify + an in_range support function that encapsulates the + addition and subtraction behaviors that make sense for its sort order. + It can even provide more than one in_range support function, in case + there is more than one data type that makes sense to use as the offset + in RANGE clauses. + If the B-tree operator class associated with the window's ORDER + BY clause does not have a matching in_range support function, + the RANGE offset + PRECEDING/FOLLOWING + option is not supported. + + + + Another important point is that an equality operator that appears in a hash operator family is a candidate for hash joins, hash aggregation, and related optimizations. The hash operator family is essential here since it identifies the hash function(s) to use. @@ -1171,23 +1242,23 @@ ALTER OPERATOR FAMILY integer_ops USING btree ADD Ordering Operators - Some index access methods (currently, only GiST) support the concept of - ordering operators. What we have been discussing so far - are search operators. A search operator is one for which + Some index access methods (currently, only GiST and SP-GiST) support the concept of + ordering operators. What we have been discussing so far + are search operators. A search operator is one for which the index can be searched to find all rows satisfying - WHERE - indexed_column - operator - constant. + WHERE + indexed_column + operator + constant. Note that nothing is promised about the order in which the matching rows will be returned. In contrast, an ordering operator does not restrict the set of rows that can be returned, but instead determines their order. An ordering operator is one for which the index can be scanned to return rows in the order represented by - ORDER BY - indexed_column - operator - constant. + ORDER BY + indexed_column + operator + constant. The reason for defining ordering operators that way is that it supports nearest-neighbor searches, if the operator is one that measures distance. For example, a query like @@ -1197,7 +1268,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; finds the ten places closest to a given target point. A GiST index on the location column can do this efficiently because - <-> is an ordering operator. + <-> is an ordering operator. @@ -1209,17 +1280,17 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10; a B-tree operator family that specifies the sort ordering of the result data type. As was stated in the previous section, B-tree operator families define PostgreSQL's notion of ordering, so - this is a natural representation. Since the point <-> - operator returns float8, it could be specified in an operator + this is a natural representation. Since the point <-> + operator returns float8, it could be specified in an operator class creation command like this: (point, point) FOR ORDER BY float_ops ]]> - where float_ops is the built-in operator family that includes - operations on float8. This declaration states that the index + where float_ops is the built-in operator family that includes + operations on float8. This declaration states that the index is able to return rows in order of increasing values of the - <-> operator. + <-> operator. @@ -1235,21 +1306,21 @@ OPERATOR 15 <-> (point, point) FOR ORDER BY float_ops Normally, declaring an operator as a member of an operator class (or family) means that the index method can retrieve exactly the set of rows - that satisfy a WHERE condition using the operator. For example: + that satisfy a WHERE condition using the operator. For example: SELECT * FROM table WHERE integer_column < 4; can be satisfied exactly by a B-tree index on the integer column. But there are cases where an index is useful as an inexact guide to the matching rows. For example, if a GiST index stores only bounding boxes - for geometric objects, then it cannot exactly satisfy a WHERE + for geometric objects, then it cannot exactly satisfy a WHERE condition that tests overlap between nonrectangular objects such as polygons. Yet we could use the index to find objects whose bounding box overlaps the bounding box of the target object, and then do the exact overlap test only on the objects found by the index. If this - scenario applies, the index is said to be lossy for the + scenario applies, the index is said to be lossy for the operator. Lossy index searches are implemented by having the index - method return a recheck flag when a row might or might + method return a recheck flag when a row might or might not really satisfy the query condition. The core system will then test the original query condition on the retrieved row to see whether it should be returned as a valid match. This approach works if @@ -1266,8 +1337,8 @@ SELECT * FROM table WHERE integer_column < 4; the bounding box of a complex object such as a polygon. In this case there's not much value in storing the whole polygon in the index entry — we might as well store just a simpler object of type - box. This situation is expressed by the STORAGE - option in CREATE OPERATOR CLASS: we'd write something like: + box. This situation is expressed by the STORAGE + option in CREATE OPERATOR CLASS: we'd write something like: CREATE OPERATOR CLASS polygon_ops @@ -1277,16 +1348,16 @@ CREATE OPERATOR CLASS polygon_ops At present, only the GiST, GIN and BRIN index methods support a - STORAGE type that's different from the column data type. - The GiST compress and decompress support - routines must deal with data-type conversion when STORAGE - is used. In GIN, the STORAGE type identifies the type of - the key values, which normally is different from the type + STORAGE type that's different from the column data type. + The GiST compress and decompress support + routines must deal with data-type conversion when STORAGE + is used. In GIN, the STORAGE type identifies the type of + the key values, which normally is different from the type of the indexed column — for example, an operator class for integer-array columns might have keys that are just integers. The - GIN extractValue and extractQuery support + GIN extractValue and extractQuery support routines are responsible for extracting keys from indexed values. - BRIN is similar to GIN: the STORAGE type identifies the + BRIN is similar to GIN: the STORAGE type identifies the type of the stored summary values, and operator classes' support procedures are responsible for interpreting the summary values correctly. diff --git a/doc/src/sgml/xml2.sgml b/doc/src/sgml/xml2.sgml index 9bbc9e75d7..0a0f13d02d 100644 --- a/doc/src/sgml/xml2.sgml +++ b/doc/src/sgml/xml2.sgml @@ -8,7 +8,7 @@ - The xml2 module provides XPath querying and + The xml2 module provides XPath querying and XSLT functionality. @@ -16,7 +16,7 @@ Deprecation Notice - From PostgreSQL 8.3 on, there is XML-related + From PostgreSQL 8.3 on, there is XML-related functionality based on the SQL/XML standard in the core server. That functionality covers XML syntax checking and XPath queries, which is what this module does, and more, but the API is @@ -34,9 +34,9 @@ Description of Functions - shows the functions provided by this module. + shows the functions provided by this module. These functions provide straightforward XML parsing and XPath queries. - All arguments are of type text, so for brevity that is not shown. + All arguments are of type text, so for brevity that is not shown.
@@ -63,8 +63,8 @@ This parses the document text in its parameter and returns true if the document is well-formed XML. (Note: this is an alias for the standard - PostgreSQL function xml_is_well_formed(). The - name xml_valid() is technically incorrect since validity + PostgreSQL function xml_is_well_formed(). The + name xml_valid() is technically incorrect since validity and well-formedness have different meanings in XML.) @@ -124,7 +124,7 @@ <itemtag>Value 2....</itemtag> </toptag> - If either toptag or itemtag is an empty string, the relevant tag is omitted. + If either toptag or itemtag is an empty string, the relevant tag is omitted. @@ -139,7 +139,7 @@ - Like xpath_nodeset(document, query, toptag, itemtag) but result omits both tags. + Like xpath_nodeset(document, query, toptag, itemtag) but result omits both tags. @@ -154,7 +154,7 @@ - Like xpath_nodeset(document, query, toptag, itemtag) but result omits toptag. + Like xpath_nodeset(document, query, toptag, itemtag) but result omits toptag. @@ -170,8 +170,8 @@ This function returns multiple values separated by the specified - separator, for example Value 1,Value 2,Value 3 if - separator is ,. + separator, for example Value 1,Value 2,Value 3 if + separator is ,. @@ -185,7 +185,7 @@ text - This is a wrapper for the above function that uses , + This is a wrapper for the above function that uses , as the separator. @@ -206,12 +206,12 @@ xpath_table(text key, text document, text relation, text xpaths, text criteria) - xpath_table is a table function that evaluates a set of XPath + xpath_table is a table function that evaluates a set of XPath queries on each of a set of documents and returns the results as a table. The primary key field from the original document table is returned as the first column of the result so that the result set can readily be used in joins. The parameters are described in - . + .
@@ -228,7 +228,7 @@ xpath_table(text key, text document, text relation, text xpaths, text criteria) key - the name of the key field — this is just a field to be used as + the name of the key field — this is just a field to be used as the first column of the output table, i.e., it identifies the record from which each output row came (see note below about multiple values) @@ -285,7 +285,7 @@ xpath_table(text key, text document, text relation, text xpaths, text criteria) - so those parameters can be anything valid in those particular + so those parameters can be anything valid in those particular locations. The result from this SELECT needs to return exactly two columns (which it will unless you try to list multiple fields for key or document). Beware that this simplistic approach requires that you @@ -293,8 +293,8 @@ xpath_table(text key, text document, text relation, text xpaths, text criteria) - The function has to be used in a FROM expression, with an - AS clause to specify the output columns; for example + The function has to be used in a FROM expression, with an + AS clause to specify the output columns; for example SELECT * FROM xpath_table('article_id', @@ -304,8 +304,8 @@ xpath_table('article_id', 'date_entered > ''2003-01-01'' ') AS t(article_id integer, author text, page_count integer, title text); - The AS clause defines the names and types of the columns in the - output table. The first is the key field and the rest correspond + The AS clause defines the names and types of the columns in the + output table. The first is the key field and the rest correspond to the XPath queries. If there are more XPath queries than result columns, the extra queries will be ignored. If there are more result columns @@ -313,19 +313,19 @@ AS t(article_id integer, author text, page_count integer, title text); - Notice that this example defines the page_count result + Notice that this example defines the page_count result column as an integer. The function deals internally with string representations, so when you say you want an integer in the output, it will take the string representation of the XPath result and use PostgreSQL input - functions to transform it into an integer (or whatever type the AS + functions to transform it into an integer (or whatever type the AS clause requests). An error will result if it can't do this — for example if the result is empty — so you may wish to just stick to - text as the column type if you think your data has any problems. + text as the column type if you think your data has any problems. - The calling SELECT statement doesn't necessarily have to be - just SELECT * — it can reference the output + The calling SELECT statement doesn't necessarily have to be + just SELECT * — it can reference the output columns by name or join them to other tables. The function produces a virtual table with which you can perform any operation you wish (e.g. aggregation, joining, sorting etc). So we could also have: @@ -346,7 +346,7 @@ WHERE t.author_id = p.person_id; Multivalued Results - The xpath_table function assumes that the results of each XPath query + The xpath_table function assumes that the results of each XPath query might be multivalued, so the number of rows returned by the function may not be the same as the number of input documents. The first row returned contains the first result from each query, the second row the @@ -393,8 +393,8 @@ WHERE id = 1 ORDER BY doc_num, line_num - To get doc_num on every line, the solution is to use two invocations - of xpath_table and join the results: + To get doc_num on every line, the solution is to use two invocations + of xpath_table and join the results: SELECT t.*,i.doc_num FROM @@ -437,15 +437,15 @@ xslt_process(text document, text stylesheet, text paramlist) returns text This function applies the XSL stylesheet to the document and returns - the transformed result. The paramlist is a list of parameter + the transformed result. The paramlist is a list of parameter assignments to be used in the transformation, specified in the form - a=1,b=2. Note that the + a=1,b=2. Note that the parameter parsing is very simple-minded: parameter values cannot contain commas! - There is also a two-parameter version of xslt_process which + There is also a two-parameter version of xslt_process which does not pass any parameters to the transformation. diff --git a/doc/src/sgml/xoper.sgml b/doc/src/sgml/xoper.sgml index 8568e21216..2f5560ac50 100644 --- a/doc/src/sgml/xoper.sgml +++ b/doc/src/sgml/xoper.sgml @@ -32,7 +32,7 @@ Here is an example of creating an operator for adding two complex numbers. We assume we've already created the definition of type - complex (see ). First we need a + complex (see ). First we need a function that does the work, then we can define the operator: @@ -44,7 +44,7 @@ CREATE FUNCTION complex_add(complex, complex) CREATE OPERATOR + ( leftarg = complex, rightarg = complex, - procedure = complex_add, + function = complex_add, commutator = + ); @@ -65,12 +65,12 @@ SELECT (a + b) AS c FROM test_complex; We've shown how to create a binary operator here. To create unary - operators, just omit one of leftarg (for left unary) or - rightarg (for right unary). The procedure + operators, just omit one of leftarg (for left unary) or + rightarg (for right unary). The function clause and the argument clauses are the only required items in - CREATE OPERATOR. The commutator + CREATE OPERATOR. The commutator clause shown in the example is an optional hint to the query - optimizer. Further details about commutator and other + optimizer. Further details about commutator and other optimizer hints appear in the next section. @@ -98,16 +98,16 @@ SELECT (a + b) AS c FROM test_complex; - <literal>COMMUTATOR</> + <literal>COMMUTATOR</literal> - The COMMUTATOR clause, if provided, names an operator that is the + The COMMUTATOR clause, if provided, names an operator that is the commutator of the operator being defined. We say that operator A is the commutator of operator B if (x A y) equals (y B x) for all possible input values x, y. Notice that B is also the commutator of A. For example, - operators < and > for a particular data type are usually each others' - commutators, and operator + is usually commutative with itself. - But operator - is usually not commutative with anything. + operators < and > for a particular data type are usually each others' + commutators, and operator + is usually commutative with itself. + But operator - is usually not commutative with anything. @@ -115,23 +115,23 @@ SELECT (a + b) AS c FROM test_complex; right operand type of its commutator, and vice versa. So the name of the commutator operator is all that PostgreSQL needs to be given to look up the commutator, and that's all that needs to - be provided in the COMMUTATOR clause. + be provided in the COMMUTATOR clause. It's critical to provide commutator information for operators that will be used in indexes and join clauses, because this allows the - query optimizer to flip around such a clause to the forms + query optimizer to flip around such a clause to the forms needed for different plan types. For example, consider a query with - a WHERE clause like tab1.x = tab2.y, where tab1.x - and tab2.y are of a user-defined type, and suppose that - tab2.y is indexed. The optimizer cannot generate an + a WHERE clause like tab1.x = tab2.y, where tab1.x + and tab2.y are of a user-defined type, and suppose that + tab2.y is indexed. The optimizer cannot generate an index scan unless it can determine how to flip the clause around to - tab2.y = tab1.x, because the index-scan machinery expects + tab2.y = tab1.x, because the index-scan machinery expects to see the indexed column on the left of the operator it is given. - PostgreSQL will not simply + PostgreSQL will not simply assume that this is a valid transformation — the creator of the - = operator must specify that it is valid, by marking the + = operator must specify that it is valid, by marking the operator with commutator information. @@ -145,20 +145,20 @@ SELECT (a + b) AS c FROM test_complex; - One way is to omit the COMMUTATOR clause in the first operator that + One way is to omit the COMMUTATOR clause in the first operator that you define, and then provide one in the second operator's definition. Since PostgreSQL knows that commutative operators come in pairs, when it sees the second definition it will - automatically go back and fill in the missing COMMUTATOR clause in + automatically go back and fill in the missing COMMUTATOR clause in the first definition. - The other, more straightforward way is just to include COMMUTATOR clauses + The other, more straightforward way is just to include COMMUTATOR clauses in both definitions. When PostgreSQL processes - the first definition and realizes that COMMUTATOR refers to a nonexistent + the first definition and realizes that COMMUTATOR refers to a nonexistent operator, the system will make a dummy entry for that operator in the system catalog. This dummy entry will have valid data only for the operator name, left and right operand types, and result type, @@ -175,15 +175,15 @@ SELECT (a + b) AS c FROM test_complex; - <literal>NEGATOR</> + <literal>NEGATOR</literal> - The NEGATOR clause, if provided, names an operator that is the + The NEGATOR clause, if provided, names an operator that is the negator of the operator being defined. We say that operator A is the negator of operator B if both return Boolean results and (x A y) equals NOT (x B y) for all possible inputs x, y. Notice that B is also the negator of A. - For example, < and >= are a negator pair for most data types. + For example, < and >= are a negator pair for most data types. An operator can never validly be its own negator. @@ -195,15 +195,15 @@ SELECT (a + b) AS c FROM test_complex; An operator's negator must have the same left and/or right operand types - as the operator to be defined, so just as with COMMUTATOR, only the operator - name need be given in the NEGATOR clause. + as the operator to be defined, so just as with COMMUTATOR, only the operator + name need be given in the NEGATOR clause. Providing a negator is very helpful to the query optimizer since - it allows expressions like NOT (x = y) to be simplified into - x <> y. This comes up more often than you might think, because - NOT operations can be inserted as a consequence of other rearrangements. + it allows expressions like NOT (x = y) to be simplified into + x <> y. This comes up more often than you might think, because + NOT operations can be inserted as a consequence of other rearrangements. @@ -214,13 +214,13 @@ SELECT (a + b) AS c FROM test_complex; - <literal>RESTRICT</> + <literal>RESTRICT</literal> - The RESTRICT clause, if provided, names a restriction selectivity + The RESTRICT clause, if provided, names a restriction selectivity estimation function for the operator. (Note that this is a function - name, not an operator name.) RESTRICT clauses only make sense for - binary operators that return boolean. The idea behind a restriction + name, not an operator name.) RESTRICT clauses only make sense for + binary operators that return boolean. The idea behind a restriction selectivity estimator is to guess what fraction of the rows in a table will satisfy a WHERE-clause condition of the form: @@ -228,10 +228,10 @@ column OP constant for the current operator and a particular constant value. This assists the optimizer by - giving it some idea of how many rows will be eliminated by WHERE + giving it some idea of how many rows will be eliminated by WHERE clauses that have this form. (What happens if the constant is on the left, you might be wondering? Well, that's one of the things that - COMMUTATOR is for...) + COMMUTATOR is for...) @@ -240,22 +240,13 @@ column OP constant one of the system's standard estimators for many of your own operators. These are the standard restriction estimators: - eqsel for = - neqsel for <> - scalarltsel for < or <= - scalargtsel for > or >= - - It might seem a little odd that these are the categories, but they - make sense if you think about it. = will typically accept only - a small fraction of the rows in a table; <> will typically reject - only a small fraction. < will accept a fraction that depends on - where the given constant falls in the range of values for that table - column (which, it just so happens, is information collected by - ANALYZE and made available to the selectivity estimator). - <= will accept a slightly larger fraction than < for the same - comparison constant, but they're close enough to not be worth - distinguishing, especially since we're not likely to do better than a - rough guess anyhow. Similar remarks apply to > and >=. + eqsel for = + neqsel for <> + scalarltsel for < + scalarlesel for <= + scalargtsel for > + scalargesel for >= + @@ -267,12 +258,14 @@ column OP constant - You can use scalarltsel and scalargtsel for comparisons on data types that - have some sensible means of being converted into numeric scalars for - range comparisons. If possible, add the data type to those understood - by the function convert_to_scalar() in src/backend/utils/adt/selfuncs.c. + You can use scalarltsel, scalarlesel, + scalargtsel and scalargesel for comparisons on + data types that have some sensible means of being converted into numeric + scalars for range comparisons. If possible, add the data type to those + understood by the function convert_to_scalar() in + src/backend/utils/adt/selfuncs.c. (Eventually, this function should be replaced by per-data-type functions - identified through a column of the pg_type system catalog; but that hasn't happened + identified through a column of the pg_type system catalog; but that hasn't happened yet.) If you do not do this, things will still work, but the optimizer's estimates won't be as good as they could be. @@ -286,15 +279,15 @@ column OP constant - <literal>JOIN</> + <literal>JOIN</literal> - The JOIN clause, if provided, names a join selectivity + The JOIN clause, if provided, names a join selectivity estimation function for the operator. (Note that this is a function - name, not an operator name.) JOIN clauses only make sense for + name, not an operator name.) JOIN clauses only make sense for binary operators that return boolean. The idea behind a join selectivity estimator is to guess what fraction of the rows in a - pair of tables will satisfy a WHERE-clause condition of the form: + pair of tables will satisfy a WHERE-clause condition of the form: table1.column1 OP table2.column2 @@ -308,25 +301,27 @@ table1.column1 OP table2.column2 a join selectivity estimator function, but will just suggest that you use one of the standard estimators if one is applicable: - eqjoinsel for = - neqjoinsel for <> - scalarltjoinsel for < or <= - scalargtjoinsel for > or >= - areajoinsel for 2D area-based comparisons - positionjoinsel for 2D position-based comparisons - contjoinsel for 2D containment-based comparisons + eqjoinsel for = + neqjoinsel for <> + scalarltjoinsel for < + scalarlejoinsel for <= + scalargtjoinsel for > + scalargejoinsel for >= + areajoinsel for 2D area-based comparisons + positionjoinsel for 2D position-based comparisons + contjoinsel for 2D containment-based comparisons - <literal>HASHES</> + <literal>HASHES</literal> The HASHES clause, if present, tells the system that it is permissible to use the hash join method for a join based on this - operator. HASHES only makes sense for a binary operator that - returns boolean, and in practice the operator must represent + operator. HASHES only makes sense for a binary operator that + returns boolean, and in practice the operator must represent equality for some data type or pair of data types. @@ -341,7 +336,7 @@ table1.column1 OP table2.column2 hashing for operators that take the same data type on both sides. However, sometimes it is possible to design compatible hash functions for two or more data types; that is, functions that will generate the - same hash codes for equal values, even though the values + same hash codes for equal values, even though the values have different representations. For example, it's fairly simple to arrange this property when hashing integers of different widths. @@ -362,10 +357,10 @@ table1.column1 OP table2.column2 are machine-dependent ways in which it might fail to do the right thing. For example, if your data type is a structure in which there might be uninteresting pad bits, you cannot simply pass the whole structure to - hash_any. (Unless you write your other operators and + hash_any. (Unless you write your other operators and functions to ensure that the unused bits are always zero, which is the recommended strategy.) - Another example is that on machines that meet the IEEE + Another example is that on machines that meet the IEEE floating-point standard, negative zero and positive zero are different values (different bit patterns) but they are defined to compare equal. If a float value might contain negative zero then extra steps are needed @@ -397,8 +392,8 @@ table1.column1 OP table2.column2 strict, the function must also be complete: that is, it should return true or false, never null, for any two nonnull inputs. If this rule is - not followed, hash-optimization of IN operations might - generate wrong results. (Specifically, IN might return + not followed, hash-optimization of IN operations might + generate wrong results. (Specifically, IN might return false where the correct answer according to the standard would be null; or it might yield an error complaining that it wasn't prepared for a null result.) @@ -408,13 +403,13 @@ table1.column1 OP table2.column2 - <literal>MERGES</> + <literal>MERGES</literal> The MERGES clause, if present, tells the system that it is permissible to use the merge-join method for a join based on this - operator. MERGES only makes sense for a binary operator that - returns boolean, and in practice the operator must represent + operator. MERGES only makes sense for a binary operator that + returns boolean, and in practice the operator must represent equality for some data type or pair of data types. @@ -423,7 +418,7 @@ table1.column1 OP table2.column2 into order and then scanning them in parallel. So, both data types must be capable of being fully ordered, and the join operator must be one that can only succeed for pairs of values that fall at the - same place + same place in the sort order. In practice this means that the join operator must behave like equality. But it is possible to merge-join two distinct data types so long as they are logically compatible. For @@ -435,7 +430,7 @@ table1.column1 OP table2.column2 To be marked MERGES, the join operator must appear - as an equality member of a btree index operator family. + as an equality member of a btree index operator family. This is not enforced when you create the operator, since of course the referencing operator family couldn't exist yet. But the operator will not actually be used for merge joins @@ -450,7 +445,7 @@ table1.column1 OP table2.column2 if they are different) that appears in the same operator family. If this is not the case, planner errors might occur when the operator is used. Also, it is a good idea (but not strictly required) for - a btree operator family that supports multiple data types to provide + a btree operator family that supports multiple data types to provide equality operators for every combination of the data types; this allows better optimization. diff --git a/doc/src/sgml/xplang.sgml b/doc/src/sgml/xplang.sgml index 4460c8f361..db765b4644 100644 --- a/doc/src/sgml/xplang.sgml +++ b/doc/src/sgml/xplang.sgml @@ -11,7 +11,7 @@ PostgreSQL allows user-defined functions to be written in other languages besides SQL and C. These other languages are generically called procedural - languages (PLs). For a function + languages (PLs). For a function written in a procedural language, the database server has no built-in knowledge about how to interpret the function's source text. Instead, the task is passed to a special handler that knows @@ -27,15 +27,15 @@ There are currently four procedural languages available in the standard PostgreSQL distribution: - PL/pgSQL (), - PL/Tcl (), - PL/Perl (), and - PL/Python (). + PL/pgSQL (), + PL/Tcl (), + PL/Perl (), and + PL/Python (). There are additional procedural languages available that are not - included in the core distribution. + included in the core distribution. has information about finding them. In addition other languages can be defined by users; the basics of developing a new procedural - language are covered in . + language are covered in . @@ -44,9 +44,9 @@ A procedural language must be installed into each database where it is to be used. But procedural languages installed in - the database template1 are automatically available in all + the database template1 are automatically available in all subsequently created databases, since their entries in - template1 will be copied by CREATE DATABASE. + template1 will be copied by CREATE DATABASE. So the database administrator can decide which languages are available in which databases and can make some languages available by default if desired. @@ -54,8 +54,8 @@ For the languages supplied with the standard distribution, it is - only necessary to execute CREATE EXTENSION - language_name to install the language into the + only necessary to execute CREATE EXTENSION + language_name to install the language into the current database. The manual procedure described below is only recommended for installing languages that have not been packaged as extensions. @@ -70,7 +70,7 @@ A procedural language is installed in a database in five steps, which must be carried out by a database superuser. In most cases the required SQL commands should be packaged as the installation script - of an extension, so that CREATE EXTENSION can be + of an extension, so that CREATE EXTENSION can be used to execute them. @@ -79,7 +79,7 @@ The shared object for the language handler must be compiled and installed into an appropriate library directory. This works in the same way as building and installing modules with regular user-defined C - functions does; see . Often, the language + functions does; see . Often, the language handler will depend on an external library that provides the actual programming language engine; if so, that must be installed as well. @@ -103,9 +103,9 @@ CREATE FUNCTION handler_function_name() - Optionally, the language handler can provide an inline + Optionally, the language handler can provide an inline handler function that executes anonymous code blocks - ( commands) + ( commands) written in this language. If an inline handler function is provided by the language, declare it with a command like @@ -119,10 +119,10 @@ CREATE FUNCTION inline_function_name(internal) - Optionally, the language handler can provide a validator + Optionally, the language handler can provide a validator function that checks a function definition for correctness without actually executing it. The validator function is called by - CREATE FUNCTION if it exists. If a validator function + CREATE FUNCTION if it exists. If a validator function is provided by the language, declare it with a command like CREATE FUNCTION validator_function_name(oid) @@ -146,7 +146,7 @@ CREATE TRUSTED PROCEDURAL LANGUAGE TRUSTED flag should only be given for languages that do not allow access to database server @@ -165,7 +165,7 @@ CREATE TRUSTED PROCEDURAL LANGUAGE - shows how the manual + shows how the manual installation procedure would work with the language PL/Perl. @@ -206,7 +206,7 @@ CREATE TRUSTED PROCEDURAL LANGUAGE plperl VALIDATOR plperl_validator; then defines that the previously declared functions - should be invoked for functions and trigger procedures where the + should be invoked for functions and procedures where the language attribute is plperl. @@ -217,13 +217,13 @@ CREATE TRUSTED PROCEDURAL LANGUAGE plperl is built and installed into the library directory; furthermore, the PL/pgSQL language itself is installed in all databases. - If Tcl support is configured in, the handlers for - PL/Tcl and PL/TclU are built and installed + If Tcl support is configured in, the handlers for + PL/Tcl and PL/TclU are built and installed in the library directory, but the language itself is not installed in any database by default. - Likewise, the PL/Perl and PL/PerlU + Likewise, the PL/Perl and PL/PerlU handlers are built and installed if Perl support is configured, and the - PL/PythonU handler is installed if Python support is + PL/PythonU handler is installed if Python support is configured, but these languages are not installed by default. diff --git a/doc/src/sgml/xtypes.sgml b/doc/src/sgml/xtypes.sgml index ac0b8a2943..186e287529 100644 --- a/doc/src/sgml/xtypes.sgml +++ b/doc/src/sgml/xtypes.sgml @@ -9,10 +9,10 @@ - As described in , + As described in , PostgreSQL can be extended to support new data types. This section describes how to define new base types, - which are data types defined below the level of the SQL + which are data types defined below the level of the SQL language. Creating a new base type requires implementing functions to operate on the type in a low-level language, usually C. @@ -20,8 +20,8 @@ The examples in this section can be found in complex.sql and complex.c - in the src/tutorial directory of the source distribution. - See the README file in that directory for instructions + in the src/tutorial directory of the source distribution. + See the README file in that directory for instructions about running the examples. @@ -45,7 +45,7 @@ - Suppose we want to define a type complex that represents + Suppose we want to define a type complex that represents complex numbers. A natural way to represent a complex number in memory would be the following C structure: @@ -57,7 +57,7 @@ typedef struct Complex { We will need to make this a pass-by-reference type, since it's too - large to fit into a single Datum value. + large to fit into a single Datum value. @@ -86,8 +86,8 @@ complex_in(PG_FUNCTION_ARGS) if (sscanf(str, " ( %lf , %lf )", &x, &y) != 2) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input syntax for complex: \"%s\"", - str))); + errmsg("invalid input syntax for type %s: \"%s\"", + "complex", str))); result = (Complex *) palloc(sizeof(Complex)); result->x = x; @@ -130,7 +130,7 @@ complex_out(PG_FUNCTION_ARGS) external binary representation is. Most of the built-in data types try to provide a machine-independent binary representation. For complex, we will piggy-back on the binary I/O converters - for type float8: + for type float8: PostgreSQL automatically provides support for arrays of that type. The array type typically has the same name as the base type with the underscore character - (_) prepended. + (_) prepended. @@ -237,7 +237,7 @@ CREATE TYPE complex ( If the internal representation of the data type is variable-length, the internal representation must follow the standard layout for variable-length data: the first four bytes must be a char[4] field which is - never accessed directly (customarily named vl_len_). You + never accessed directly (customarily named vl_len_). You must use the SET_VARSIZE() macro to store the total size of the datum (including the length field itself) in this field and VARSIZE() to retrieve it. (These macros exist @@ -246,7 +246,7 @@ CREATE TYPE complex ( For further details see the description of the - command. + command. @@ -258,41 +258,41 @@ CREATE TYPE complex ( If the values of your data type vary in size (in internal form), it's - usually desirable to make the data type TOAST-able (see ). You should do this even if the values are always + usually desirable to make the data type TOAST-able (see ). You should do this even if the values are always too small to be compressed or stored externally, because - TOAST can save space on small data too, by reducing header + TOAST can save space on small data too, by reducing header overhead. - To support TOAST storage, the C functions operating on the data + To support TOAST storage, the C functions operating on the data type must always be careful to unpack any toasted values they are handed - by using PG_DETOAST_DATUM. (This detail is customarily hidden + by using PG_DETOAST_DATUM. (This detail is customarily hidden by defining type-specific GETARG_DATATYPE_P macros.) Then, when running the CREATE TYPE command, specify the - internal length as variable and select some appropriate storage - option other than plain. + internal length as variable and select some appropriate storage + option other than plain. If data alignment is unimportant (either just for a specific function or because the data type specifies byte alignment anyway) then it's possible - to avoid some of the overhead of PG_DETOAST_DATUM. You can use - PG_DETOAST_DATUM_PACKED instead (customarily hidden by - defining a GETARG_DATATYPE_PP macro) and using the macros - VARSIZE_ANY_EXHDR and VARDATA_ANY to access + to avoid some of the overhead of PG_DETOAST_DATUM. You can use + PG_DETOAST_DATUM_PACKED instead (customarily hidden by + defining a GETARG_DATATYPE_PP macro) and using the macros + VARSIZE_ANY_EXHDR and VARDATA_ANY to access a potentially-packed datum. Again, the data returned by these macros is not aligned even if the data type definition specifies an alignment. If the alignment is important you - must go through the regular PG_DETOAST_DATUM interface. + must go through the regular PG_DETOAST_DATUM interface. - Older code frequently declares vl_len_ as an - int32 field instead of char[4]. This is OK as long as - the struct definition has other fields that have at least int32 + Older code frequently declares vl_len_ as an + int32 field instead of char[4]. This is OK as long as + the struct definition has other fields that have at least int32 alignment. But it is dangerous to use such a struct definition when working with a potentially unaligned datum; the compiler may take it as license to assume the datum actually is aligned, leading to core dumps on @@ -301,28 +301,28 @@ CREATE TYPE complex ( - Another feature that's enabled by TOAST support is the - possibility of having an expanded in-memory data + Another feature that's enabled by TOAST support is the + possibility of having an expanded in-memory data representation that is more convenient to work with than the format that - is stored on disk. The regular or flat varlena storage format + is stored on disk. The regular or flat varlena storage format is ultimately just a blob of bytes; it cannot for example contain pointers, since it may get copied to other locations in memory. For complex data types, the flat format may be quite expensive to work - with, so PostgreSQL provides a way to expand + with, so PostgreSQL provides a way to expand the flat format into a representation that is more suited to computation, and then pass that format in-memory between functions of the data type. To use expanded storage, a data type must define an expanded format that - follows the rules given in src/include/utils/expandeddatum.h, - and provide functions to expand a flat varlena value into - expanded format and flatten the expanded format back to the + follows the rules given in src/include/utils/expandeddatum.h, + and provide functions to expand a flat varlena value into + expanded format and flatten the expanded format back to the regular varlena representation. Then ensure that all C functions for the data type can accept either representation, possibly by converting one into the other immediately upon receipt. This does not require fixing all existing functions for the data type at once, because the standard - PG_DETOAST_DATUM macro is defined to convert expanded inputs + PG_DETOAST_DATUM macro is defined to convert expanded inputs into regular flat format. Therefore, existing functions that work with the flat varlena format will continue to work, though slightly inefficiently, with expanded inputs; they need not be converted until and @@ -344,14 +344,14 @@ CREATE TYPE complex ( will detoast external, short-header, and compressed varlena inputs, but not expanded inputs. Such a function can be defined as returning a pointer to a union of the flat varlena format and the expanded format. - Callers can use the VARATT_IS_EXPANDED_HEADER() macro to + Callers can use the VARATT_IS_EXPANDED_HEADER() macro to determine which format they received. - The TOAST infrastructure not only allows regular varlena + The TOAST infrastructure not only allows regular varlena values to be distinguished from expanded values, but also - distinguishes read-write and read-only pointers to + distinguishes read-write and read-only pointers to expanded values. C functions that only need to examine an expanded value, or will only change it in safe and non-semantically-visible ways, need not care which type of pointer they receive. C functions that @@ -368,7 +368,7 @@ CREATE TYPE complex ( For examples of working with expanded values, see the standard array infrastructure, particularly - src/backend/utils/adt/array_expanded.c. + src/backend/utils/adt/array_expanded.c. diff --git a/src/Makefile b/src/Makefile index 380da92c75..bcdbd9588a 100644 --- a/src/Makefile +++ b/src/Makefile @@ -28,8 +28,13 @@ SUBDIRS = \ pl \ makefiles \ test/regress \ + test/isolation \ test/perl +ifeq ($(with_llvm), yes) +SUBDIRS += backend/jit/llvm +endif + # There are too many interdependencies between the subdirectories, so # don't attempt parallel make here. .NOTPARALLEL: diff --git a/src/Makefile.global.in b/src/Makefile.global.in index e8b3a519cb..956fd274cd 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -171,6 +171,7 @@ endif # PGXS includedir_server = $(pkgincludedir)/server includedir_internal = $(pkgincludedir)/internal pgxsdir = $(pkglibdir)/pgxs +bitcodedir = $(pkglibdir)/bitcode ########################################################################## @@ -186,8 +187,12 @@ with_tcl = @with_tcl@ with_openssl = @with_openssl@ with_selinux = @with_selinux@ with_systemd = @with_systemd@ +with_gssapi = @with_gssapi@ +with_krb_srvnam = @with_krb_srvnam@ +with_ldap = @with_ldap@ with_libxml = @with_libxml@ with_libxslt = @with_libxslt@ +with_llvm = @with_llvm@ with_system_tzdata = @with_system_tzdata@ with_uuid = @with_uuid@ with_zlib = @with_zlib@ @@ -222,6 +227,11 @@ TCL_SHLIB_LD_LIBS = @TCL_SHLIB_LD_LIBS@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ PTHREAD_LIBS = @PTHREAD_LIBS@ +LLVM_CONFIG = @LLVM_CONFIG@ +LLVM_BINPATH = @LLVM_BINPATH@ +CLANG = @CLANG@ +BITCODE_CFLAGS = @BITCODE_CFLAGS@ +BITCODE_CXXFLAGS = @BITCODE_CXXFLAGS@ ########################################################################## # @@ -231,6 +241,7 @@ PTHREAD_LIBS = @PTHREAD_LIBS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ +PG_SYSROOT = @PG_SYSROOT@ override CPPFLAGS := $(ICU_CFLAGS) $(CPPFLAGS) @@ -246,9 +257,16 @@ endif # not PGXS CC = @CC@ GCC = @GCC@ SUN_STUDIO_CC = @SUN_STUDIO_CC@ +CXX = @CXX@ CFLAGS = @CFLAGS@ CFLAGS_VECTOR = @CFLAGS_VECTOR@ CFLAGS_SSE42 = @CFLAGS_SSE42@ +CFLAGS_ARMV8_CRC32C = @CFLAGS_ARMV8_CRC32C@ +CXXFLAGS = @CXXFLAGS@ + +LLVM_CPPFLAGS = @LLVM_CPPFLAGS@ +LLVM_CFLAGS = @LLVM_CFLAGS@ +LLVM_CXXFLAGS = @LLVM_CXXFLAGS@ # Kind-of compilers @@ -270,20 +288,30 @@ LDAP_LIBS_FE = @LDAP_LIBS_FE@ LDAP_LIBS_BE = @LDAP_LIBS_BE@ UUID_LIBS = @UUID_LIBS@ UUID_EXTRA_OBJS = @UUID_EXTRA_OBJS@ +LLVM_LIBS=@LLVM_LIBS@ LD = @LD@ with_gnu_ld = @with_gnu_ld@ -# We want -L for libpgport.a and libpgcommon.a to be first in LDFLAGS. We -# also need LDFLAGS to be a "recursively expanded" variable, else adjustments -# to rpathdir don't work right. So we must NOT do LDFLAGS := something, -# meaning this has to be done first and elsewhere we must only do LDFLAGS += -# something. +# It's critical that within LDFLAGS, all -L switches pointing to build-tree +# directories come before any -L switches pointing to external directories. +# Otherwise it's possible for, e.g., a platform-provided copy of libpq.so +# to get linked in place of the one we've built. Therefore we adopt the +# convention that the first component of LDFLAGS is an extra variable +# LDFLAGS_INTERNAL, and -L and -l switches for PG's own libraries must be +# put into LDFLAGS_INTERNAL, so they will appear ahead of those for external +# libraries. +# +# We need LDFLAGS and LDFLAGS_INTERNAL to be "recursively expanded" variables, +# else adjustments to, e.g., rpathdir don't work right. So we must NOT do +# "LDFLAGS := something" anywhere, ditto for LDFLAGS_INTERNAL. +# These initial assignments must be "=" type, and elsewhere we must only do +# "LDFLAGS += something" or "LDFLAGS_INTERNAL += something". ifdef PGXS - LDFLAGS = -L$(libdir) + LDFLAGS_INTERNAL = -L$(libdir) else - LDFLAGS = -L$(top_builddir)/src/port -L$(top_builddir)/src/common + LDFLAGS_INTERNAL = -L$(top_builddir)/src/port -L$(top_builddir)/src/common endif -LDFLAGS += @LDFLAGS@ +LDFLAGS = $(LDFLAGS_INTERNAL) @LDFLAGS@ LDFLAGS_EX = @LDFLAGS_EX@ # LDFLAGS_SL might have already been assigned by calling makefile @@ -304,6 +332,7 @@ else endif perl_archlibexp = @perl_archlibexp@ perl_privlibexp = @perl_privlibexp@ +perl_includespec = @perl_includespec@ perl_embed_ccflags = @perl_embed_ccflags@ perl_embed_ldflags = @perl_embed_ldflags@ @@ -321,12 +350,40 @@ XGETTEXT = @XGETTEXT@ GZIP = gzip BZIP2 = bzip2 + +# Tree-wide build support + +# Just about every code subdirectory wants to have the generated headers +# available before building, but we don't want parallel makes all trying +# to build the same headers. These rules, together with the recursion rules +# below, ensure that we update the generated headers once, if needed, +# at the top level of any "make all/install/check/installcheck" request. +# If a particular subdirectory knows this isn't needed in itself or its +# children, it can set NO_GENERATED_HEADERS. + +all install check installcheck: submake-generated-headers + +.PHONY: submake-generated-headers + +submake-generated-headers: +ifndef NO_GENERATED_HEADERS +ifeq ($(MAKELEVEL),0) + $(MAKE) -C $(top_builddir)/src/backend generated-headers +endif +endif + + # Testing +# In much the same way as above, these rules ensure that we build a temp +# install tree just once in any recursive "make check". The additional test +# on abs_top_builddir prevents doing anything foolish to the root directory. + check: temp-install .PHONY: temp-install -temp-install: + +temp-install: | submake-generated-headers ifndef NO_TEMP_INSTALL ifneq ($(abs_top_builddir),) ifeq ($(MAKELEVEL),0) @@ -362,12 +419,14 @@ endef ifeq ($(enable_tap_tests),yes) define prove_installcheck -rm -rf $(CURDIR)/tmp_check/log +rm -rf '$(CURDIR)'/tmp_check +$(MKDIR_P) '$(CURDIR)'/tmp_check cd $(srcdir) && TESTDIR='$(CURDIR)' PATH="$(bindir):$$PATH" PGPORT='6$(DEF_PGPORT)' top_builddir='$(CURDIR)/$(top_builddir)' PG_REGRESS='$(CURDIR)/$(top_builddir)/src/test/regress/pg_regress' $(PROVE) $(PG_PROVE_FLAGS) $(PROVE_FLAGS) $(if $(PROVE_TESTS),$(PROVE_TESTS),t/*.pl) endef define prove_check -rm -rf $(CURDIR)/tmp_check/log +rm -rf '$(CURDIR)'/tmp_check +$(MKDIR_P) '$(CURDIR)'/tmp_check cd $(srcdir) && TESTDIR='$(CURDIR)' $(with_temp_install) PGPORT='6$(DEF_PGPORT)' PG_REGRESS='$(CURDIR)/$(top_builddir)/src/test/regress/pg_regress' $(PROVE) $(PG_PROVE_FLAGS) $(PROVE_FLAGS) $(if $(PROVE_TESTS),$(PROVE_TESTS),t/*.pl) endef @@ -404,8 +463,6 @@ STRIP_SHARED_LIB = @STRIP_SHARED_LIB@ DBTOEPUB = @DBTOEPUB@ FOP = @FOP@ -NSGMLS = @NSGMLS@ -OSX = @OSX@ XMLLINT = @XMLLINT@ XSLTPROC = @XSLTPROC@ @@ -440,9 +497,6 @@ host_tuple = @host@ host_os = @host_os@ host_cpu = @host_cpu@ -# Make HAVE_IPV6 available for initdb script creation -HAVE_IPV6= @HAVE_IPV6@ - # This is mainly for use on FreeBSD, where we have both a.out and elf # systems now. May be applicable to other systems to? ELF_SYSTEM= @ELF_SYS@ @@ -478,21 +532,26 @@ libpq_srcdir = $(top_srcdir)/src/interfaces/libpq libpq_builddir = $(top_builddir)/src/interfaces/libpq endif -# This macro is for use by libraries linking to libpq. (Because libpgport -# isn't created with the same link flags as libpq, it can't be used.) +# How to link to libpq. (This macro may be used as-is by backend extensions. +# Client-side code should go through libpq_pgport or libpq_pgport_shlib, +# instead.) libpq = -L$(libpq_builddir) -lpq -# This macro is for use by client executables (not libraries) that use libpq. +# libpq_pgport is for use by client executables (not libraries) that use libpq. # We force clients to pull symbols from the non-shared libraries libpgport # and libpgcommon rather than pulling some libpgport symbols from libpq just # because libpq uses those functions too. This makes applications less -# dependent on changes in libpq's usage of pgport. To do this we link to +# dependent on changes in libpq's usage of pgport (on platforms where we +# don't have symbol export control for libpq). To do this we link to # pgport before libpq. This does cause duplicate -lpgport's to appear -# on client link lines. +# on client link lines, since that also appears in $(LIBS). +# libpq_pgport_shlib is the same idea, but for use in client shared libraries. ifdef PGXS libpq_pgport = -L$(libdir) -lpgcommon -lpgport $(libpq) +libpq_pgport_shlib = -L$(libdir) -lpgcommon_shlib -lpgport_shlib $(libpq) else libpq_pgport = -L$(top_builddir)/src/common -lpgcommon -L$(top_builddir)/src/port -lpgport $(libpq) +libpq_pgport_shlib = -L$(top_builddir)/src/common -lpgcommon_shlib -L$(top_builddir)/src/port -lpgport_shlib $(libpq) endif # Cygwin seems to need ldap libraries to be mentioned here, too @@ -505,22 +564,19 @@ endif # # Commonly used submake targets -submake-libpq: +submake-libpq: | submake-generated-headers $(MAKE) -C $(libpq_builddir) all -submake-libpgport: +submake-libpgport: | submake-generated-headers $(MAKE) -C $(top_builddir)/src/port all $(MAKE) -C $(top_builddir)/src/common all -submake-libpgfeutils: +submake-libpgfeutils: | submake-generated-headers $(MAKE) -C $(top_builddir)/src/port all $(MAKE) -C $(top_builddir)/src/common all $(MAKE) -C $(top_builddir)/src/fe_utils all -submake-generated-headers: - $(MAKE) -C $(top_builddir)/src/backend generated-headers - -.PHONY: submake-libpq submake-libpgport submake-libpgfeutils submake-generated-headers +.PHONY: submake-libpq submake-libpgport submake-libpgfeutils ########################################################################## @@ -754,8 +810,10 @@ $(error GNU make 3.80 or newer is required. You are using version $(MAKE_VERSIO endif # This function is only for internal use below. It should be called -# using $(eval). It will set up a target so that it recurses into -# a given subdirectory. Note that to avoid a nasty bug in make 3.80, +# using $(eval). It will set up a target so that it recurses into a +# given subdirectory. For the tree-wide all/install/check/installcheck cases, +# ensure we do our one-time tasks before recursing (see targets above). +# Note that to avoid a nasty bug in make 3.80, # this function has to avoid using any complicated constructs (like # multiple targets on a line) and also not contain any lines that expand # to more than about 200 bytes. This is why we make it apply to just one @@ -766,7 +824,7 @@ endif define _create_recursive_target .PHONY: $(1)-$(2)-recurse $(1): $(1)-$(2)-recurse -$(1)-$(2)-recurse: $(if $(filter check, $(3)), temp-install) +$(1)-$(2)-recurse: $(if $(filter all install check installcheck, $(3)), submake-generated-headers) $(if $(filter check, $(3)), temp-install) $$(MAKE) -C $(2) $(3) endef # Note that the use of $$ on the last line above is important; we want @@ -810,6 +868,10 @@ ifndef COMPILE.c COMPILE.c = $(CC) $(CFLAGS) $(CPPFLAGS) -c endif +ifndef COMPILE.cc +COMPILE.cc = $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c +endif + DEPDIR = .deps ifeq ($(GCC), yes) @@ -819,6 +881,10 @@ ifeq ($(GCC), yes) @if test ! -d $(DEPDIR); then mkdir -p $(DEPDIR); fi $(COMPILE.c) -o $@ $< -MMD -MP -MF $(DEPDIR)/$(*F).Po +%.o : %.cpp + @if test ! -d $(DEPDIR); then mkdir -p $(DEPDIR); fi + $(COMPILE.cc) -o $@ $< -MMD -MP -MF $(DEPDIR)/$(*F).Po + endif # GCC # Include all the dependency files generated for the current @@ -867,30 +933,50 @@ endif # enable_nls # gcov from foo.gcda (by "make coverage") # foo.c.gcov.out stdout captured when foo.c.gcov is created, mildly # interesting -# lcov.info lcov tracefile, built from gcda files in one directory, +# lcov_test.info +# lcov tracefile, built from gcda files in one directory, # later collected by "make coverage-html" +# lcov_base.info +# tracefile for zero counters for every file, so that +# even files that are not touched by tests are counted +# for the overall coverage rate ifeq ($(enable_coverage), yes) -# There is a strange interaction between lcov and existing .gcov -# output files. Hence the rm command and the ordering dependency. +# make coverage -- text output -gcda_files := $(wildcard *.gcda) +local_gcda_files = $(wildcard *.gcda) -lcov.info: $(gcda_files) - rm -f *.gcov .*.gcov - $(if $^,$(LCOV) -d . -c -o $@ $(LCOVFLAGS) --gcov-tool $(GCOV)) +coverage: $(local_gcda_files:.gcda=.c.gcov) -%.c.gcov: %.gcda | lcov.info +%.c.gcov: %.gcda $(GCOV) -b -f -p -o . $(GCOVFLAGS) $*.c >$*.c.gcov.out -coverage: $(gcda_files:.gcda=.c.gcov) lcov.info +# make coverage-html -- HTML output via lcov .PHONY: coverage-html -coverage-html: coverage +coverage-html: coverage-html-stamp + +GENHTML_FLAGS = -q --legend +GENHTML_TITLE = PostgreSQL $(VERSION) + +coverage-html-stamp: lcov_base.info lcov_test.info rm -rf coverage - mkdir coverage - $(GENHTML) --show-details --legend --output-directory=coverage --title=PostgreSQL --num-spaces=4 --prefix=$(abs_top_srcdir) `find . -name lcov.info -print` + $(GENHTML) $(GENHTML_FLAGS) -o coverage --title='$(GENHTML_TITLE)' --num-spaces=4 $(if $(filter no,$(vpath_build)),--prefix='$(abs_top_srcdir)') $^ + touch $@ + +LCOV += --gcov-tool $(GCOV) +LCOVFLAGS = -q --no-external + +all_gcno_files = $(shell find . -name '*.gcno' -print) + +lcov_base.info: $(all_gcno_files) + $(LCOV) $(LCOVFLAGS) -c -i -d . -d $(srcdir) -o $@ + +all_gcda_files = $(shell find . -name '*.gcda' -print) + +lcov_test.info: $(all_gcda_files) + $(LCOV) $(LCOVFLAGS) -c -d . -d $(srcdir) -o $@ # hook for clean-up @@ -898,8 +984,8 @@ clean distclean maintainer-clean: clean-coverage .PHONY: clean-coverage clean-coverage: - rm -rf coverage - rm -f *.gcda *.gcno lcov.info *.gcov .*.gcov *.gcov.out + rm -rf coverage coverage-html-stamp + rm -f *.gcda *.gcno lcov*.info *.gcov .*.gcov *.gcov.out # User-callable target to reset counts between test runs @@ -907,3 +993,56 @@ coverage-clean: rm -f `find . -name '*.gcda' -print` endif # enable_coverage + +########################################################################## +# +# LLVM support +# + +ifndef COMPILE.c.bc +# -Wno-ignored-attributes added so gnu_printf doesn't trigger +# warnings, when the main binary is compiled with C. +COMPILE.c.bc = $(CLANG) -Wno-ignored-attributes $(BITCODE_CFLAGS) $(CPPFLAGS) -flto=thin -emit-llvm -c +endif + +ifndef COMPILE.cxx.bc +COMPILE.cxx.bc = $(CLANG) -xc++ -Wno-ignored-attributes $(BITCODE_CXXFLAGS) $(CPPFLAGS) -flto=thin -emit-llvm -c +endif + +%.bc : %.c + $(COMPILE.c.bc) -o $@ $< + +%.bc : %.cpp + $(COMPILE.cxx.bc) -o $@ $< + +# Install LLVM bitcode module (for JITing). +# +# The arguments are: +# $(1) name of the module (e.g. an extension's name or postgres for core code) +# $(2) source objects, with .o suffix +# +# The many INSTALL_DATA invocations aren't particularly fast, it'd be +# good if we could coalesce them, but I didn't find a good way. +# +# Note: blank line at end of macro is necessary to let it be used in foreach +define install_llvm_module +$(MKDIR_P) '$(DESTDIR)${bitcodedir}/$(1)' +$(MKDIR_P) $(sort $(dir $(addprefix '$(DESTDIR)${bitcodedir}'/$(1)/, $(2)))) +$(foreach obj, ${2}, $(INSTALL_DATA) $(patsubst %.o,%.bc, $(obj)) '$(DESTDIR)${bitcodedir}'/$(1)/$(dir $(obj)) +) +cd '$(DESTDIR)${bitcodedir}' && $(LLVM_BINPATH)/llvm-lto -thinlto -thinlto-action=thinlink -o $(1).index.bc $(addprefix $(1)/,$(patsubst %.o,%.bc, $(2))) + +endef + +# Uninstall LLVM bitcode module. +# +# The arguments are: +# $(1) name of the module (e.g. an extension's name or postgres for core code) +# +# This intentionally doesn't use the explicit installed file list, +# seems too likely to change regularly. +define uninstall_llvm_module +rm -rf '$(DESTDIR)${bitcodedir}/$(1)/' +rm -f '$(DESTDIR)${bitcodedir}/$(1).index.bc' + +endef diff --git a/src/Makefile.shlib b/src/Makefile.shlib index 0ce6d2a145..f20ffac375 100644 --- a/src/Makefile.shlib +++ b/src/Makefile.shlib @@ -20,12 +20,16 @@ # # NAME Name of library to build (no suffix nor "lib" prefix) # OBJS List of object files to include in library -# SHLIB_LINK If shared library relies on other libraries, -# additional stuff to put in its link command +# SHLIB_LINK Stuff to append to library's link command +# (typically, -L and -l switches for external libraries) +# SHLIB_LINK_INTERNAL -L and -l switches for Postgres-supplied libraries # SHLIB_PREREQS Order-only prerequisites for library build target # SHLIB_EXPORTS (optional) Name of file containing list of symbols to # export, in the format "function_name number" # +# Don't use SHLIB_LINK for references to files in the build tree, or the +# wrong things will happen --- use SHLIB_LINK_INTERNAL for those! +# # When building a shared library, the following version information # must also be set. It should be omitted when building a dynamically # loadable module. @@ -60,16 +64,13 @@ # # Got that? Look at src/interfaces/libpq/Makefile for an example. # -# While the linker allows creation of most shared libraries, -# -Bsymbolic requires resolution of all symbols, making the -# compiler a better choice for shared library creation on ELF platforms. -# With the linker, -Bsymbolic requires the crt1.o startup object file. -# bjm 2001-02-10 COMPILER = $(CC) $(CFLAGS) LINK.static = $(AR) $(AROPT) +LDFLAGS_INTERNAL += $(SHLIB_LINK_INTERNAL) + ifdef SO_MAJOR_VERSION @@ -100,6 +101,7 @@ endif # Try to keep the sections in some kind of order, folks... override CFLAGS += $(CFLAGS_SL) +override CXXFLAGS += $(CFLAGS_SL) ifdef SO_MAJOR_VERSION # libraries ought to use this to refer to versioned gettext domain names override CPPFLAGS += -DSO_MAJOR_VERSION=$(SO_MAJOR_VERSION) @@ -142,6 +144,11 @@ ifeq ($(PORTNAME), openbsd) ifdef soname LINK.shared += -Wl,-x,-soname,$(soname) endif + BUILD.exports = ( echo '{ global:'; $(AWK) '/^[^\#]/ {printf "%s;\n",$$1}' $<; echo ' local: *; };' ) >$@ + exports_file = $(SHLIB_EXPORTS:%.txt=%.list) + ifneq (,$(exports_file)) + LINK.shared += -Wl,--version-script=$(exports_file) + endif SHLIB_LINK += -lc else LINK.shared = $(LD) -x -Bshareable -Bforcearchive @@ -157,6 +164,11 @@ ifeq ($(PORTNAME), freebsd) ifdef soname LINK.shared += -Wl,-x,-soname,$(soname) endif + BUILD.exports = ( echo '{ global:'; $(AWK) '/^[^\#]/ {printf "%s;\n",$$1}' $<; echo ' local: *; };' ) >$@ + exports_file = $(SHLIB_EXPORTS:%.txt=%.list) + ifneq (,$(exports_file)) + LINK.shared += -Wl,--version-script=$(exports_file) + endif else ifdef SO_MAJOR_VERSION shlib = lib$(NAME)$(DLSUFFIX).$(SO_MAJOR_VERSION).$(SO_MINOR_VERSION) @@ -171,6 +183,11 @@ ifeq ($(PORTNAME), netbsd) ifdef soname LINK.shared += -Wl,-x,-soname,$(soname) endif + BUILD.exports = ( echo '{ global:'; $(AWK) '/^[^\#]/ {printf "%s;\n",$$1}' $<; echo ' local: *; };' ) >$@ + exports_file = $(SHLIB_EXPORTS:%.txt=%.list) + ifneq (,$(exports_file)) + LINK.shared += -Wl,--version-script=$(exports_file) + endif else LINK.shared = $(LD) -x -Bshareable -Bforcearchive endif @@ -181,12 +198,12 @@ ifeq ($(PORTNAME), hpux) shlib = lib$(NAME)$(DLSUFFIX).$(SO_MAJOR_VERSION) endif ifeq ($(with_gnu_ld), yes) - LINK.shared = $(CC) -shared + LINK.shared = $(CC) -shared -Wl,-Bsymbolic ifdef soname LINK.shared += -Wl,-h -Wl,$(soname) endif else - LINK.shared = $(LD) -b + LINK.shared = $(LD) -b -Bsymbolic ifdef soname LINK.shared += +h $(soname) endif @@ -223,9 +240,9 @@ endif ifeq ($(PORTNAME), solaris) ifeq ($(GCC), yes) - LINK.shared = $(COMPILER) -shared + LINK.shared = $(COMPILER) -shared -Wl,-Bsymbolic else - LINK.shared = $(COMPILER) -G + LINK.shared = $(COMPILER) -G -Bsymbolic endif ifdef soname ifeq ($(with_gnu_ld), yes) @@ -314,13 +331,9 @@ else # PORTNAME == aix # AIX case -# There is no correct way to write a rule that generates two files. -# Rules with two targets don't have that meaning, they are merely -# shorthand for two otherwise separate rules. To be safe for parallel -# make, we must chain the dependencies like this. The semicolon is -# important, otherwise make will choose some built-in rule. - -$(stlib): $(shlib) ; +# See notes in src/backend/parser/Makefile about the following two rules +$(stlib): $(shlib) + touch $@ $(shlib): $(OBJS) | $(SHLIB_PREREQS) rm -f $(stlib) @@ -340,24 +353,19 @@ ifeq ($(PORTNAME), cygwin) # Cygwin case $(shlib): $(OBJS) | $(SHLIB_PREREQS) - $(CC) $(CFLAGS) -shared -o $@ $(OBJS) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK) $(LIBS) $(LDAP_LIBS_BE) + $(CC) $(CFLAGS) -shared -o $@ -Wl,--out-implib=$(stlib) $(OBJS) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK) $(LIBS) $(LDAP_LIBS_BE) -$(stlib): $(OBJS) | $(SHLIB_PREREQS) - rm -f $@ - $(LINK.static) $@ $^ - $(RANLIB) $@ +# see notes in src/backend/parser/Makefile about use of this type of rule +$(stlib): $(shlib) + touch $@ else # Win32 case -# There is no correct way to write a rule that generates two files. -# Rules with two targets don't have that meaning, they are merely -# shorthand for two otherwise separate rules. To be safe for parallel -# make, we must chain the dependencies like this. The semicolon is -# important, otherwise make will choose some built-in rule. - -$(stlib): $(shlib) ; +# See notes in src/backend/parser/Makefile about the following two rules +$(stlib): $(shlib) + touch $@ # XXX A backend that loads a module linked with libgcc_s_dw2-1.dll will exit # uncleanly, hence -static-libgcc. (Last verified with MinGW-w64 compilers diff --git a/src/backend/Makefile b/src/backend/Makefile index bce9d2c3eb..3a58bf6685 100644 --- a/src/backend/Makefile +++ b/src/backend/Makefile @@ -2,7 +2,7 @@ # # Makefile for the postgres backend # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/backend/Makefile @@ -18,8 +18,10 @@ top_builddir = ../.. include $(top_builddir)/src/Makefile.global SUBDIRS = access bootstrap catalog parser commands executor foreign lib libpq \ - main nodes optimizer port postmaster regex replication rewrite \ - statistics storage tcop tsearch utils $(top_builddir)/src/timezone + main nodes optimizer partitioning port postmaster \ + regex replication rewrite \ + statistics storage tcop tsearch utils $(top_builddir)/src/timezone \ + jit include $(srcdir)/common.mk @@ -39,8 +41,8 @@ OBJS = $(SUBDIROBJS) $(LOCALOBJS) $(top_builddir)/src/port/libpgport_srv.a \ $(top_builddir)/src/common/libpgcommon_srv.a # We put libpgport and libpgcommon into OBJS, so remove it from LIBS; also add -# libldap -LIBS := $(filter-out -lpgport -lpgcommon, $(LIBS)) $(LDAP_LIBS_BE) +# libldap and ICU +LIBS := $(filter-out -lpgport -lpgcommon, $(LIBS)) $(LDAP_LIBS_BE) $(ICU_LIBS) # The backend doesn't need everything that's in LIBS, however LIBS := $(filter-out -lz -lreadline -ledit -ltermcap -lncurses -lcurses, $(LIBS)) @@ -51,14 +53,14 @@ endif ########################################################################## -all: submake-libpgport submake-schemapg postgres $(POSTGRES_IMP) +all: submake-libpgport submake-catalog-headers submake-utils-headers postgres $(POSTGRES_IMP) ifneq ($(PORTNAME), cygwin) ifneq ($(PORTNAME), win32) ifneq ($(PORTNAME), aix) postgres: $(OBJS) - $(CC) $(CFLAGS) $(LDFLAGS) $(LDFLAGS_EX) $(export_dynamic) $(call expand_subsys,$^) $(LIBS) $(ICU_LIBS) -o $@ + $(CC) $(CFLAGS) $(LDFLAGS) $(LDFLAGS_EX) $(export_dynamic) $(call expand_subsys,$^) $(LIBS) -o $@ endif endif @@ -69,13 +71,10 @@ ifeq ($(PORTNAME), cygwin) postgres: $(OBJS) $(CC) $(CFLAGS) $(LDFLAGS) $(LDFLAGS_EX) $(export_dynamic) -Wl,--stack,$(WIN32_STACK_RLIMIT) -Wl,--export-all-symbols -Wl,--out-implib=libpostgres.a $(call expand_subsys,$^) $(LIBS) -o $@ -# There is no correct way to write a rule that generates two files. -# Rules with two targets don't have that meaning, they are merely -# shorthand for two otherwise separate rules. To be safe for parallel -# make, we must chain the dependencies like this. The semicolon is -# important, otherwise make will choose some built-in rule. - -libpostgres.a: postgres ; +# libpostgres.a is actually built in the preceding rule, but we need this to +# ensure it's newer than postgres; see notes in src/backend/parser/Makefile +libpostgres.a: postgres + touch $@ endif # cygwin @@ -85,7 +84,10 @@ LIBS += -lsecur32 postgres: $(OBJS) $(WIN32RES) $(CC) $(CFLAGS) $(LDFLAGS) $(LDFLAGS_EX) -Wl,--stack=$(WIN32_STACK_RLIMIT) -Wl,--export-all-symbols -Wl,--out-implib=libpostgres.a $(call expand_subsys,$(OBJS)) $(WIN32RES) $(LIBS) -o $@$(X) -libpostgres.a: postgres ; +# libpostgres.a is actually built in the preceding rule, but we need this to +# ensure it's newer than postgres; see notes in src/backend/parser/Makefile +libpostgres.a: postgres + touch $@ endif # win32 @@ -109,14 +111,6 @@ endif endif # aix -# Update the commonly used headers before building the subdirectories -$(SUBDIRS:%=%-recursive): | generated-headers - -# src/port needs a convenient way to force just errcodes.h to get built -submake-errcodes: $(top_builddir)/src/include/utils/errcodes.h - -.PHONY: submake-errcodes - $(top_builddir)/src/port/libpgport_srv.a: | submake-libpgport @@ -129,32 +123,28 @@ postgres.o: $(OBJS) # The following targets are specified in make commands that appear in # the make files in our subdirectories. Note that it's important we # match the dependencies shown in the subdirectory makefiles! +# Also, in cases where a subdirectory makefile generates two files in +# what's really one step, such as bison producing both gram.h and gram.c, +# we must request making the one that is shown as the secondary (dependent) +# output, else the timestamp on it might be wrong. By project convention, +# the .h file is the dependent one for bison output, so we need only request +# that; but in other cases, request both for safety. parser/gram.h: parser/gram.y $(MAKE) -C parser gram.h storage/lmgr/lwlocknames.h: storage/lmgr/generate-lwlocknames.pl storage/lmgr/lwlocknames.txt - $(MAKE) -C storage/lmgr lwlocknames.h - -utils/errcodes.h: utils/generate-errcodes.pl utils/errcodes.txt - $(MAKE) -C utils errcodes.h - -# see explanation in parser/Makefile -utils/fmgrprotos.h: utils/fmgroids.h ; - -utils/fmgroids.h: utils/Gen_fmgrtab.pl catalog/Catalog.pm $(top_srcdir)/src/include/catalog/pg_proc.h - $(MAKE) -C utils $(notdir $@) - -utils/probes.h: utils/probes.d - $(MAKE) -C utils probes.h + $(MAKE) -C storage/lmgr lwlocknames.h lwlocknames.c # run this unconditionally to avoid needing to know its dependencies here: -catalog/schemapg.h: | submake-schemapg +submake-catalog-headers: + $(MAKE) -C catalog distprep generated-header-symlinks -submake-schemapg: - $(MAKE) -C catalog schemapg.h +# run this unconditionally to avoid needing to know its dependencies here: +submake-utils-headers: + $(MAKE) -C utils distprep generated-header-symlinks -.PHONY: submake-schemapg +.PHONY: submake-catalog-headers submake-utils-headers # Make symlinks for these headers in the include directory. That way # we can cut down on the -I options. Also, a symlink is automatically @@ -169,42 +159,18 @@ submake-schemapg: .PHONY: generated-headers -generated-headers: $(top_builddir)/src/include/parser/gram.h $(top_builddir)/src/include/catalog/schemapg.h $(top_builddir)/src/include/storage/lwlocknames.h $(top_builddir)/src/include/utils/errcodes.h $(top_builddir)/src/include/utils/fmgroids.h $(top_builddir)/src/include/utils/fmgrprotos.h $(top_builddir)/src/include/utils/probes.h +generated-headers: $(top_builddir)/src/include/parser/gram.h $(top_builddir)/src/include/storage/lwlocknames.h submake-catalog-headers submake-utils-headers $(top_builddir)/src/include/parser/gram.h: parser/gram.h prereqdir=`cd '$(dir $<)' >/dev/null && pwd` && \ cd '$(dir $@)' && rm -f $(notdir $@) && \ $(LN_S) "$$prereqdir/$(notdir $<)" . -$(top_builddir)/src/include/catalog/schemapg.h: catalog/schemapg.h - prereqdir=`cd '$(dir $<)' >/dev/null && pwd` && \ - cd '$(dir $@)' && rm -f $(notdir $@) && \ - $(LN_S) "$$prereqdir/$(notdir $<)" . - $(top_builddir)/src/include/storage/lwlocknames.h: storage/lmgr/lwlocknames.h prereqdir=`cd '$(dir $<)' >/dev/null && pwd` && \ cd '$(dir $@)' && rm -f $(notdir $@) && \ $(LN_S) "$$prereqdir/$(notdir $<)" . -$(top_builddir)/src/include/utils/errcodes.h: utils/errcodes.h - prereqdir=`cd '$(dir $<)' >/dev/null && pwd` && \ - cd '$(dir $@)' && rm -f $(notdir $@) && \ - $(LN_S) "$$prereqdir/$(notdir $<)" . - -$(top_builddir)/src/include/utils/fmgroids.h: utils/fmgroids.h - prereqdir=`cd '$(dir $<)' >/dev/null && pwd` && \ - cd '$(dir $@)' && rm -f $(notdir $@) && \ - $(LN_S) "$$prereqdir/$(notdir $<)" . - -$(top_builddir)/src/include/utils/fmgrprotos.h: utils/fmgrprotos.h - prereqdir=`cd '$(dir $<)' >/dev/null && pwd` && \ - cd '$(dir $@)' && rm -f $(notdir $@) && \ - $(LN_S) "$$prereqdir/$(notdir $<)" . - -$(top_builddir)/src/include/utils/probes.h: utils/probes.h - cd '$(dir $@)' && rm -f $(notdir $@) && \ - $(LN_S) "../../../$(subdir)/utils/probes.h" . - utils/probes.o: utils/probes.d $(SUBDIROBJS) $(DTRACE) $(DTRACEFLAGS) -C -G -s $(call expand_subsys,$^) -o $@ @@ -216,10 +182,10 @@ utils/probes.o: utils/probes.d $(SUBDIROBJS) distprep: $(MAKE) -C parser gram.c gram.h scan.c $(MAKE) -C bootstrap bootparse.c bootscanner.c - $(MAKE) -C catalog schemapg.h postgres.bki postgres.description postgres.shdescription + $(MAKE) -C catalog distprep $(MAKE) -C replication repl_gram.c repl_scanner.c syncrep_gram.c syncrep_scanner.c - $(MAKE) -C storage/lmgr lwlocknames.h - $(MAKE) -C utils fmgrtab.c fmgroids.h fmgrprotos.h errcodes.h + $(MAKE) -C storage/lmgr lwlocknames.h lwlocknames.c + $(MAKE) -C utils distprep $(MAKE) -C utils/misc guc-file.c $(MAKE) -C utils/sort qsort_tuple.c @@ -239,11 +205,19 @@ endif endif $(MAKE) -C catalog install-data $(MAKE) -C tsearch install-data + $(MAKE) -C utils install-data $(INSTALL_DATA) $(srcdir)/libpq/pg_hba.conf.sample '$(DESTDIR)$(datadir)/pg_hba.conf.sample' $(INSTALL_DATA) $(srcdir)/libpq/pg_ident.conf.sample '$(DESTDIR)$(datadir)/pg_ident.conf.sample' $(INSTALL_DATA) $(srcdir)/utils/misc/postgresql.conf.sample '$(DESTDIR)$(datadir)/postgresql.conf.sample' $(INSTALL_DATA) $(srcdir)/access/transam/recovery.conf.sample '$(DESTDIR)$(datadir)/recovery.conf.sample' +ifeq ($(with_llvm), yes) +install-bin: install-postgres-bitcode + +install-postgres-bitcode: $(OBJS) all + $(call install_llvm_module,postgres,$(call expand_subsys, $(filter-out $(top_builddir)/src/timezone/objfiles.txt, $(SUBDIROBJS)))) +endif + install-bin: postgres $(POSTGRES_IMP) installdirs $(INSTALL_PROGRAM) postgres$(X) '$(DESTDIR)$(bindir)/postgres$(X)' ifneq ($(PORTNAME), win32) @@ -297,22 +271,20 @@ endif endif $(MAKE) -C catalog uninstall-data $(MAKE) -C tsearch uninstall-data + $(MAKE) -C utils uninstall-data rm -f '$(DESTDIR)$(datadir)/pg_hba.conf.sample' \ '$(DESTDIR)$(datadir)/pg_ident.conf.sample' \ '$(DESTDIR)$(datadir)/postgresql.conf.sample' \ '$(DESTDIR)$(datadir)/recovery.conf.sample' +ifeq ($(with_llvm), yes) + $(call uninstall_llvm_module,postgres) +endif ########################################################################## clean: - rm -f $(LOCALOBJS) postgres$(X) $(POSTGRES_IMP) \ - $(top_builddir)/src/include/parser/gram.h \ - $(top_builddir)/src/include/catalog/schemapg.h \ - $(top_builddir)/src/include/storage/lwlocknames.h \ - $(top_builddir)/src/include/utils/fmgroids.h \ - $(top_builddir)/src/include/utils/fmgrprotos.h \ - $(top_builddir)/src/include/utils/probes.h + rm -f $(LOCALOBJS) postgres$(X) $(POSTGRES_IMP) ifeq ($(PORTNAME), cygwin) rm -f postgres.dll libpostgres.a endif @@ -321,28 +293,22 @@ ifeq ($(PORTNAME), win32) endif distclean: clean - rm -f port/tas.s port/dynloader.c port/pg_sema.c port/pg_shmem.c + rm -f port/tas.s port/pg_sema.c port/pg_shmem.c maintainer-clean: distclean + $(MAKE) -C catalog $@ + $(MAKE) -C utils $@ rm -f bootstrap/bootparse.c \ bootstrap/bootscanner.c \ parser/gram.c \ parser/gram.h \ parser/scan.c \ - catalog/schemapg.h \ - catalog/postgres.bki \ - catalog/postgres.description \ - catalog/postgres.shdescription \ replication/repl_gram.c \ replication/repl_scanner.c \ replication/syncrep_gram.c \ replication/syncrep_scanner.c \ storage/lmgr/lwlocknames.c \ storage/lmgr/lwlocknames.h \ - utils/fmgroids.h \ - utils/fmgrprotos.h \ - utils/fmgrtab.c \ - utils/errcodes.h \ utils/misc/guc-file.c \ utils/sort/qsort_tuple.c diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index efebeb035a..e95fbbcea7 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -4,7 +4,7 @@ * * See src/backend/access/brin/README for details. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -67,7 +67,7 @@ static BrinBuildState *initialize_brin_buildstate(Relation idxRel, BrinRevmap *revmap, BlockNumber pagesPerRange); static void terminate_brin_buildstate(BrinBuildState *state); static void brinsummarize(Relation index, Relation heapRel, BlockNumber pageRange, - double *numSummarized, double *numExisting); + bool include_partial, double *numSummarized, double *numExisting); static void form_and_insert_tuple(BrinBuildState *state); static void union_tuples(BrinDesc *bdesc, BrinMemTuple *a, BrinTuple *b); @@ -97,6 +97,7 @@ brinhandler(PG_FUNCTION_ARGS) amroutine->amclusterable = false; amroutine->ampredlocks = false; amroutine->amcanparallel = false; + amroutine->amcaninclude = false; amroutine->amkeytype = InvalidOid; amroutine->ambuild = brinbuild; @@ -187,9 +188,19 @@ brininsert(Relation idxRel, Datum *values, bool *nulls, brinGetTupleForHeapBlock(revmap, lastPageRange, &buf, &off, NULL, BUFFER_LOCK_SHARE, NULL); if (!lastPageTuple) - AutoVacuumRequestWork(AVW_BRINSummarizeRange, - RelationGetRelid(idxRel), - lastPageRange); + { + bool recorded; + + recorded = AutoVacuumRequestWork(AVW_BRINSummarizeRange, + RelationGetRelid(idxRel), + lastPageRange); + if (!recorded) + ereport(LOG, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("request for BRIN range summarization for index \"%s\" page %u was not recorded", + RelationGetRelationName(idxRel), + lastPageRange))); + } else LockBuffer(buf, BUFFER_LOCK_UNLOCK); } @@ -473,7 +484,8 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm) */ Assert((key->sk_flags & SK_ISNULL) || (key->sk_collation == - bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation)); + TupleDescAttr(bdesc->bd_tupdesc, + keyattno - 1)->attcollation)); /* First time this column? look up consistent function */ if (consistentFn[keyattno - 1].fn_oid == InvalidOid) @@ -622,6 +634,7 @@ brinbuildCallback(Relation index, { FmgrInfo *addValue; BrinValues *col; + Form_pg_attribute attr = TupleDescAttr(state->bs_bdesc->bd_tupdesc, i); col = &state->bs_dtuple->bt_columns[i]; addValue = index_getprocinfo(index, i + 1, @@ -631,7 +644,7 @@ brinbuildCallback(Relation index, * Update dtuple state, if and as necessary. */ FunctionCall4Coll(addValue, - state->bs_bdesc->bd_tupdesc->attrs[i]->attcollation, + attr->attcollation, PointerGetDatum(state->bs_bdesc), PointerGetDatum(col), values[i], isnull[i]); @@ -683,7 +696,7 @@ brinbuild(Relation heap, Relation index, IndexInfo *indexInfo) XLogBeginInsert(); XLogRegisterData((char *) &xlrec, SizeOfBrinCreateIdx); - XLogRegisterBuffer(0, meta, REGBUF_WILL_INIT); + XLogRegisterBuffer(0, meta, REGBUF_WILL_INIT | REGBUF_STANDARD); recptr = XLogInsert(RM_BRIN_ID, XLOG_BRIN_CREATE_INDEX); @@ -704,7 +717,7 @@ brinbuild(Relation heap, Relation index, IndexInfo *indexInfo) * heap blocks in physical order. */ reltuples = IndexBuildHeapScan(heap, index, indexInfo, false, - brinbuildCallback, (void *) state); + brinbuildCallback, (void *) state, NULL); /* process the final batch */ form_and_insert_tuple(state); @@ -740,7 +753,7 @@ brinbuildempty(Relation index) brin_metapage_init(BufferGetPage(metabuf), BrinGetPagesPerRange(index), BRIN_CURRENT_VERSION); MarkBufferDirty(metabuf); - log_newpage_buffer(metabuf, false); + log_newpage_buffer(metabuf, true); END_CRIT_SECTION(); UnlockReleaseBuffer(metabuf); @@ -789,7 +802,7 @@ brinvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) brin_vacuum_scan(info->index, info->strategy); - brinsummarize(info->index, heapRel, BRIN_ALL_BLOCKRANGES, + brinsummarize(info->index, heapRel, BRIN_ALL_BLOCKRANGES, false, &stats->num_index_tuples, &stats->num_index_tuples); heap_close(heapRel, AccessShareLock); @@ -858,6 +871,12 @@ brin_summarize_range(PG_FUNCTION_ARGS) Relation heapRel; double numSummarized = 0; + if (RecoveryInProgress()) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("recovery is in progress"), + errhint("BRIN control functions cannot be executed during recovery."))); + if (heapBlk64 > BRIN_ALL_BLOCKRANGES || heapBlk64 < 0) { char *blk = psprintf(INT64_FORMAT, heapBlk64); @@ -892,7 +911,7 @@ brin_summarize_range(PG_FUNCTION_ARGS) /* User must own the index (comparable to privileges needed for VACUUM) */ if (!pg_class_ownercheck(indexoid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_INDEX, RelationGetRelationName(indexRel)); /* @@ -907,7 +926,7 @@ brin_summarize_range(PG_FUNCTION_ARGS) RelationGetRelationName(indexRel)))); /* OK, do it */ - brinsummarize(indexRel, heapRel, heapBlk, &numSummarized, NULL); + brinsummarize(indexRel, heapRel, heapBlk, true, &numSummarized, NULL); relation_close(indexRel, ShareUpdateExclusiveLock); relation_close(heapRel, ShareUpdateExclusiveLock); @@ -929,6 +948,12 @@ brin_desummarize_range(PG_FUNCTION_ARGS) Relation indexRel; bool done; + if (RecoveryInProgress()) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("recovery is in progress"), + errhint("BRIN control functions cannot be executed during recovery."))); + if (heapBlk64 > MaxBlockNumber || heapBlk64 < 0) { char *blk = psprintf(INT64_FORMAT, heapBlk64); @@ -963,7 +988,7 @@ brin_desummarize_range(PG_FUNCTION_ARGS) /* User must own the index (comparable to privileges needed for VACUUM) */ if (!pg_class_ownercheck(indexoid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_INDEX, RelationGetRelationName(indexRel)); /* @@ -1019,12 +1044,12 @@ brin_build_desc(Relation rel) for (keyno = 0; keyno < tupdesc->natts; keyno++) { FmgrInfo *opcInfoFn; + Form_pg_attribute attr = TupleDescAttr(tupdesc, keyno); opcInfoFn = index_getprocinfo(rel, keyno + 1, BRIN_PROCNUM_OPCINFO); opcinfo[keyno] = (BrinOpcInfo *) - DatumGetPointer(FunctionCall1(opcInfoFn, - tupdesc->attrs[keyno]->atttypid)); + DatumGetPointer(FunctionCall1(opcInfoFn, attr->atttypid)); totalstored += opcinfo[keyno]->oi_nstored; } @@ -1109,16 +1134,22 @@ initialize_brin_buildstate(Relation idxRel, BrinRevmap *revmap, static void terminate_brin_buildstate(BrinBuildState *state) { - /* release the last index buffer used */ + /* + * Release the last index buffer used. We might as well ensure that + * whatever free space remains in that page is available in FSM, too. + */ if (!BufferIsInvalid(state->bs_currentInsertBuf)) { Page page; + Size freespace; + BlockNumber blk; page = BufferGetPage(state->bs_currentInsertBuf); - RecordPageWithFreeSpace(state->bs_irel, - BufferGetBlockNumber(state->bs_currentInsertBuf), - PageGetFreeSpace(page)); + freespace = PageGetFreeSpace(page); + blk = BufferGetBlockNumber(state->bs_currentInsertBuf); ReleaseBuffer(state->bs_currentInsertBuf); + RecordPageWithFreeSpace(state->bs_irel, blk, freespace); + FreeSpaceMapVacuumRange(state->bs_irel, blk, blk + 1); } brin_free_desc(state->bs_bdesc); @@ -1127,7 +1158,8 @@ terminate_brin_buildstate(BrinBuildState *state) } /* - * Summarize the given page range of the given index. + * On the given BRIN index, summarize the heap page range that corresponds + * to the heap block number given. * * This routine can run in parallel with insertions into the heap. To avoid * missing those values from the summary tuple, we first insert a placeholder @@ -1137,6 +1169,12 @@ terminate_brin_buildstate(BrinBuildState *state) * update of the index value happens in a loop, so that if somebody updates * the placeholder tuple after we read it, we detect the case and try again. * This ensures that the concurrently inserted tuples are not lost. + * + * A further corner case is this routine being asked to summarize the partial + * range at the end of the table. heapNumBlocks is the (possibly outdated) + * table size; if we notice that the requested range lies beyond that size, + * we re-compute the table size after inserting the placeholder tuple, to + * avoid missing pages that were appended recently. */ static void summarize_range(IndexInfo *indexInfo, BrinBuildState *state, Relation heapRel, @@ -1157,6 +1195,33 @@ summarize_range(IndexInfo *indexInfo, BrinBuildState *state, Relation heapRel, state->bs_rmAccess, &phbuf, heapBlk, phtup, phsz); + /* + * Compute range end. We hold ShareUpdateExclusive lock on table, so it + * cannot shrink concurrently (but it can grow). + */ + Assert(heapBlk % state->bs_pagesPerRange == 0); + if (heapBlk + state->bs_pagesPerRange > heapNumBlks) + { + /* + * If we're asked to scan what we believe to be the final range on the + * table (i.e. a range that might be partial) we need to recompute our + * idea of what the latest page is after inserting the placeholder + * tuple. Anyone that grows the table later will update the + * placeholder tuple, so it doesn't matter that we won't scan these + * pages ourselves. Careful: the table might have been extended + * beyond the current range, so clamp our result. + * + * Fortunately, this should occur infrequently. + */ + scanNumBlks = Min(RelationGetNumberOfBlocks(heapRel) - heapBlk, + state->bs_pagesPerRange); + } + else + { + /* Easy case: range is known to be complete */ + scanNumBlks = state->bs_pagesPerRange; + } + /* * Execute the partial heap scan covering the heap blocks in the specified * page range, summarizing the heap tuples in it. This scan stops just @@ -1167,11 +1232,9 @@ summarize_range(IndexInfo *indexInfo, BrinBuildState *state, Relation heapRel, * by transactions that are still in progress, among other corner cases. */ state->bs_currRangeStart = heapBlk; - scanNumBlks = heapBlk + state->bs_pagesPerRange <= heapNumBlks ? - state->bs_pagesPerRange : heapNumBlks - heapBlk; IndexBuildHeapRangeScan(heapRel, state->bs_irel, indexInfo, false, true, heapBlk, scanNumBlks, - brinbuildCallback, (void *) state); + brinbuildCallback, (void *) state, NULL); /* * Now we update the values obtained by the scan with the placeholder @@ -1232,6 +1295,8 @@ summarize_range(IndexInfo *indexInfo, BrinBuildState *state, Relation heapRel, * Summarize page ranges that are not already summarized. If pageRange is * BRIN_ALL_BLOCKRANGES then the whole table is scanned; otherwise, only the * page range containing the given heap page number is scanned. + * If include_partial is true, then the partial range at the end of the table + * is summarized, otherwise not. * * For each new index tuple inserted, *numSummarized (if not NULL) is * incremented; for each existing tuple, *numExisting (if not NULL) is @@ -1239,56 +1304,57 @@ summarize_range(IndexInfo *indexInfo, BrinBuildState *state, Relation heapRel, */ static void brinsummarize(Relation index, Relation heapRel, BlockNumber pageRange, - double *numSummarized, double *numExisting) + bool include_partial, double *numSummarized, double *numExisting) { BrinRevmap *revmap; BrinBuildState *state = NULL; IndexInfo *indexInfo = NULL; BlockNumber heapNumBlocks; - BlockNumber heapBlk; BlockNumber pagesPerRange; Buffer buf; BlockNumber startBlk; - BlockNumber endBlk; - - /* determine range of pages to process; nothing to do for an empty table */ - heapNumBlocks = RelationGetNumberOfBlocks(heapRel); - if (heapNumBlocks == 0) - return; revmap = brinRevmapInitialize(index, &pagesPerRange, NULL); + /* determine range of pages to process */ + heapNumBlocks = RelationGetNumberOfBlocks(heapRel); if (pageRange == BRIN_ALL_BLOCKRANGES) - { startBlk = 0; - endBlk = heapNumBlocks; - } else { startBlk = (pageRange / pagesPerRange) * pagesPerRange; + heapNumBlocks = Min(heapNumBlocks, startBlk + pagesPerRange); + } + if (startBlk > heapNumBlocks) + { /* Nothing to do if start point is beyond end of table */ - if (startBlk > heapNumBlocks) - { - brinRevmapTerminate(revmap); - return; - } - endBlk = startBlk + pagesPerRange; - if (endBlk > heapNumBlocks) - endBlk = heapNumBlocks; + brinRevmapTerminate(revmap); + return; } /* * Scan the revmap to find unsummarized items. */ buf = InvalidBuffer; - for (heapBlk = startBlk; heapBlk < endBlk; heapBlk += pagesPerRange) + for (; startBlk < heapNumBlocks; startBlk += pagesPerRange) { BrinTuple *tup; OffsetNumber off; + /* + * Unless requested to summarize even a partial range, go away now if + * we think the next range is partial. Caller would pass true when it + * is typically run once bulk data loading is done + * (brin_summarize_new_values), and false when it is typically the + * result of arbitrarily-scheduled maintenance command (vacuuming). + */ + if (!include_partial && + (startBlk + pagesPerRange > heapNumBlocks)) + break; + CHECK_FOR_INTERRUPTS(); - tup = brinGetTupleForHeapBlock(revmap, heapBlk, &buf, &off, NULL, + tup = brinGetTupleForHeapBlock(revmap, startBlk, &buf, &off, NULL, BUFFER_LOCK_SHARE, NULL); if (tup == NULL) { @@ -1301,7 +1367,7 @@ brinsummarize(Relation index, Relation heapRel, BlockNumber pageRange, pagesPerRange); indexInfo = BuildIndexInfo(index); } - summarize_range(indexInfo, state, heapRel, heapBlk, heapNumBlocks); + summarize_range(indexInfo, state, heapRel, startBlk, heapNumBlocks); /* and re-initialize state for the next range */ brin_memtuple_initialize(state->bs_dtuple, state->bs_bdesc); @@ -1398,14 +1464,15 @@ union_tuples(BrinDesc *bdesc, BrinMemTuple *a, BrinTuple *b) static void brin_vacuum_scan(Relation idxrel, BufferAccessStrategy strategy) { - bool vacuum_fsm = false; + BlockNumber nblocks; BlockNumber blkno; /* * Scan the index in physical order, and clean up any possible mess in * each page. */ - for (blkno = 0; blkno < RelationGetNumberOfBlocks(idxrel); blkno++) + nblocks = RelationGetNumberOfBlocks(idxrel); + for (blkno = 0; blkno < nblocks; blkno++) { Buffer buf; @@ -1414,15 +1481,15 @@ brin_vacuum_scan(Relation idxrel, BufferAccessStrategy strategy) buf = ReadBufferExtended(idxrel, MAIN_FORKNUM, blkno, RBM_NORMAL, strategy); - vacuum_fsm |= brin_page_cleanup(idxrel, buf); + brin_page_cleanup(idxrel, buf); ReleaseBuffer(buf); } /* - * If we made any change to the FSM, make sure the new info is visible all - * the way to the top. + * Update all upper pages in the index's FSM, as well. This ensures not + * only that we propagate leaf-page FSM updates made by brin_page_cleanup, + * but also that any pre-existing damage or out-of-dateness is repaired. */ - if (vacuum_fsm) - FreeSpaceMapVacuum(idxrel); + FreeSpaceMapVacuum(idxrel); } diff --git a/src/backend/access/brin/brin_inclusion.c b/src/backend/access/brin/brin_inclusion.c index 9c0a058ccb..6ce355c6a9 100644 --- a/src/backend/access/brin/brin_inclusion.c +++ b/src/backend/access/brin/brin_inclusion.c @@ -16,7 +16,7 @@ * writing is the INET type, where IPv6 values cannot be merged with IPv4 * values. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -59,10 +59,14 @@ /*- * The values stored in the bv_values arrays correspond to: * - * 0 - the union of the values in the block range - * 1 - whether an empty value is present in any tuple in the block range - * 2 - whether the values in the block range cannot be merged (e.g. an IPv6 - * address amidst IPv4 addresses). + * INCLUSION_UNION + * the union of the values in the block range + * INCLUSION_UNMERGEABLE + * whether the values in the block range cannot be merged + * (e.g. an IPv6 address amidst IPv4 addresses) + * INCLUSION_CONTAINS_EMPTY + * whether an empty value is present in any tuple + * in the block range */ #define INCLUSION_UNION 0 #define INCLUSION_UNMERGEABLE 1 @@ -157,7 +161,7 @@ brin_inclusion_add_value(PG_FUNCTION_ARGS) } attno = column->bv_attno; - attr = bdesc->bd_tupdesc->attrs[attno - 1]; + attr = TupleDescAttr(bdesc->bd_tupdesc, attno - 1); /* * If the recorded value is null, copy the new value (which we know to be @@ -516,7 +520,7 @@ brin_inclusion_union(PG_FUNCTION_ARGS) PG_RETURN_VOID(); attno = col_a->bv_attno; - attr = bdesc->bd_tupdesc->attrs[attno - 1]; + attr = TupleDescAttr(bdesc->bd_tupdesc, attno - 1); /* * Adjust "allnulls". If A doesn't have values, just copy the values from @@ -675,7 +679,7 @@ inclusion_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno, Oid subtype, bool isNull; opfamily = bdesc->bd_index->rd_opfamily[attno - 1]; - attr = bdesc->bd_tupdesc->attrs[attno - 1]; + attr = TupleDescAttr(bdesc->bd_tupdesc, attno - 1); tuple = SearchSysCache4(AMOPSTRATEGY, ObjectIdGetDatum(opfamily), ObjectIdGetDatum(attr->atttypid), ObjectIdGetDatum(subtype), diff --git a/src/backend/access/brin/brin_minmax.c b/src/backend/access/brin/brin_minmax.c index 62fd90aabe..0f6aa33a45 100644 --- a/src/backend/access/brin/brin_minmax.c +++ b/src/backend/access/brin/brin_minmax.c @@ -2,7 +2,7 @@ * brin_minmax.c * Implementation of Min/Max opclass for BRIN * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -90,7 +90,7 @@ brin_minmax_add_value(PG_FUNCTION_ARGS) } attno = column->bv_attno; - attr = bdesc->bd_tupdesc->attrs[attno - 1]; + attr = TupleDescAttr(bdesc->bd_tupdesc, attno - 1); /* * If the recorded value is null, store the new value (which we know to be @@ -260,7 +260,7 @@ brin_minmax_union(PG_FUNCTION_ARGS) PG_RETURN_VOID(); attno = col_a->bv_attno; - attr = bdesc->bd_tupdesc->attrs[attno - 1]; + attr = TupleDescAttr(bdesc->bd_tupdesc, attno - 1); /* * Adjust "allnulls". If A doesn't have values, just copy the values from @@ -347,7 +347,7 @@ minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno, Oid subtype, bool isNull; opfamily = bdesc->bd_index->rd_opfamily[attno - 1]; - attr = bdesc->bd_tupdesc->attrs[attno - 1]; + attr = TupleDescAttr(bdesc->bd_tupdesc, attno - 1); tuple = SearchSysCache4(AMOPSTRATEGY, ObjectIdGetDatum(opfamily), ObjectIdGetDatum(attr->atttypid), ObjectIdGetDatum(subtype), diff --git a/src/backend/access/brin/brin_pageops.c b/src/backend/access/brin/brin_pageops.c index 80f803e438..040cb62e55 100644 --- a/src/backend/access/brin/brin_pageops.c +++ b/src/backend/access/brin/brin_pageops.c @@ -2,7 +2,7 @@ * brin_pageops.c * Page-handling routines for BRIN indexes * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -64,6 +64,7 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, BrinTuple *oldtup; Size oldsz; Buffer newbuf; + BlockNumber newblk = InvalidBlockNumber; bool extended; Assert(newsz == MAXALIGN(newsz)); @@ -101,6 +102,8 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, Assert(!extended); newbuf = InvalidBuffer; } + else + newblk = BufferGetBlockNumber(newbuf); } else { @@ -113,9 +116,15 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, /* * Check that the old tuple wasn't updated concurrently: it might have - * moved someplace else entirely ... + * moved someplace else entirely, and for that matter the whole page + * might've become a revmap page. Note that in the first two cases + * checked here, the "oldlp" we just calculated is garbage; but + * PageGetItemId() is simple enough that it was safe to do that + * calculation anyway. */ - if (!ItemIdIsNormal(oldlp)) + if (!BRIN_IS_REGULAR_PAGE(oldpage) || + oldoff > PageGetMaxOffsetNumber(oldpage) || + !ItemIdIsNormal(oldlp)) { LockBuffer(oldbuf, BUFFER_LOCK_UNLOCK); @@ -130,7 +139,7 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, brin_initialize_empty_new_buffer(idxrel, newbuf); UnlockReleaseBuffer(newbuf); if (extended) - FreeSpaceMapVacuum(idxrel); + FreeSpaceMapVacuumRange(idxrel, newblk, newblk + 1); } return false; } @@ -146,11 +155,12 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, LockBuffer(oldbuf, BUFFER_LOCK_UNLOCK); if (BufferIsValid(newbuf)) { + /* As above, initialize and record new page if we got one */ if (extended) brin_initialize_empty_new_buffer(idxrel, newbuf); UnlockReleaseBuffer(newbuf); if (extended) - FreeSpaceMapVacuum(idxrel); + FreeSpaceMapVacuumRange(idxrel, newblk, newblk + 1); } return false; } @@ -167,14 +177,6 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, if (((BrinPageFlags(oldpage) & BRIN_EVACUATE_PAGE) == 0) && brin_can_do_samepage_update(oldbuf, origsz, newsz)) { - if (BufferIsValid(newbuf)) - { - /* as above */ - if (extended) - brin_initialize_empty_new_buffer(idxrel, newbuf); - UnlockReleaseBuffer(newbuf); - } - START_CRIT_SECTION(); if (!PageIndexTupleOverwrite(oldpage, oldoff, (Item) newtup, newsz)) elog(ERROR, "failed to replace BRIN tuple"); @@ -204,8 +206,15 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, LockBuffer(oldbuf, BUFFER_LOCK_UNLOCK); - if (extended) - FreeSpaceMapVacuum(idxrel); + if (BufferIsValid(newbuf)) + { + /* As above, initialize and record new page if we got one */ + if (extended) + brin_initialize_empty_new_buffer(idxrel, newbuf); + UnlockReleaseBuffer(newbuf); + if (extended) + FreeSpaceMapVacuumRange(idxrel, newblk, newblk + 1); + } return true; } @@ -228,7 +237,6 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, Buffer revmapbuf; ItemPointerData newtid; OffsetNumber newoff; - BlockNumber newblk = InvalidBlockNumber; Size freespace = 0; revmapbuf = brinLockRevmapPageForUpdate(revmap, heapBlk); @@ -241,7 +249,7 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, * need to do that here. */ if (extended) - brin_page_init(BufferGetPage(newbuf), BRIN_PAGETYPE_REGULAR); + brin_page_init(newpage, BRIN_PAGETYPE_REGULAR); PageIndexTupleDeleteNoCompact(oldpage, oldoff); newoff = PageAddItem(newpage, (Item) newtup, newsz, @@ -253,12 +261,9 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, /* needed to update FSM below */ if (extended) - { - newblk = BufferGetBlockNumber(newbuf); freespace = br_page_get_freespace(newpage); - } - ItemPointerSet(&newtid, BufferGetBlockNumber(newbuf), newoff); + ItemPointerSet(&newtid, newblk, newoff); brinSetHeapBlockItemptr(revmapbuf, pagesPerRange, heapBlk, newtid); MarkBufferDirty(revmapbuf); @@ -305,9 +310,8 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange, if (extended) { - Assert(BlockNumberIsValid(newblk)); RecordPageWithFreeSpace(idxrel, newblk, freespace); - FreeSpaceMapVacuum(idxrel); + FreeSpaceMapVacuumRange(idxrel, newblk, newblk + 1); } return true; @@ -344,6 +348,7 @@ brin_doinsert(Relation idxrel, BlockNumber pagesPerRange, Page page; BlockNumber blk; OffsetNumber off; + Size freespace = 0; Buffer revmapbuf; ItemPointerData tid; bool extended; @@ -404,15 +409,16 @@ brin_doinsert(Relation idxrel, BlockNumber pagesPerRange, /* Execute the actual insertion */ START_CRIT_SECTION(); if (extended) - brin_page_init(BufferGetPage(*buffer), BRIN_PAGETYPE_REGULAR); + brin_page_init(page, BRIN_PAGETYPE_REGULAR); off = PageAddItem(page, (Item) tup, itemsz, InvalidOffsetNumber, false, false); if (off == InvalidOffsetNumber) - elog(ERROR, "could not insert new index tuple to page"); + elog(ERROR, "failed to add BRIN tuple to new page"); MarkBufferDirty(*buffer); - BRIN_elog((DEBUG2, "inserted tuple (%u,%u) for range starting at %u", - blk, off, heapBlk)); + /* needed to update FSM below */ + if (extended) + freespace = br_page_get_freespace(page); ItemPointerSet(&tid, blk, off); brinSetHeapBlockItemptr(revmapbuf, pagesPerRange, heapBlk, tid); @@ -450,8 +456,14 @@ brin_doinsert(Relation idxrel, BlockNumber pagesPerRange, LockBuffer(*buffer, BUFFER_LOCK_UNLOCK); LockBuffer(revmapbuf, BUFFER_LOCK_UNLOCK); + BRIN_elog((DEBUG2, "inserted tuple (%u,%u) for range starting at %u", + blk, off, heapBlk)); + if (extended) - FreeSpaceMapVacuum(idxrel); + { + RecordPageWithFreeSpace(idxrel, blk, freespace); + FreeSpaceMapVacuumRange(idxrel, blk, blk + 1); + } return off; } @@ -470,7 +482,7 @@ brin_page_init(Page page, uint16 type) } /* - * Initialize a new BRIN index' metapage. + * Initialize a new BRIN index's metapage. */ void brin_metapage_init(Page page, BlockNumber pagesPerRange, uint16 version) @@ -491,6 +503,14 @@ brin_metapage_init(Page page, BlockNumber pagesPerRange, uint16 version) * revmap page to be created when the index is. */ metadata->lastRevmapPage = 0; + + /* + * Set pd_lower just past the end of the metadata. This is essential, + * because without doing so, metadata will be lost if xlog.c compresses + * the page. + */ + ((PageHeader) page)->pd_lower = + ((char *) metadata + sizeof(BrinMetaPageData)) - (char *) page; } /* @@ -585,17 +605,22 @@ brin_evacuate_page(Relation idxRel, BlockNumber pagesPerRange, } /* - * Given a BRIN index page, initialize it if necessary, and record it into the - * FSM if necessary. Return value is true if the FSM itself needs "vacuuming". + * Given a BRIN index page, initialize it if necessary, and record its + * current free space in the FSM. + * * The main use for this is when, during vacuuming, an uninitialized page is * found, which could be the result of relation extension followed by a crash * before the page can be used. + * + * Here, we don't bother to update upper FSM pages, instead expecting that our + * caller (brin_vacuum_scan) will fix them at the end of the scan. Elsewhere + * in this file, it's generally a good idea to propagate additions of free + * space into the upper FSM pages immediately. */ -bool +void brin_page_cleanup(Relation idxrel, Buffer buf) { Page page = BufferGetPage(buf); - Size freespace; /* * If a page was left uninitialized, initialize it now; also record it in @@ -617,7 +642,7 @@ brin_page_cleanup(Relation idxrel, Buffer buf) { brin_initialize_empty_new_buffer(idxrel, buf); LockBuffer(buf, BUFFER_LOCK_UNLOCK); - return true; + return; } LockBuffer(buf, BUFFER_LOCK_UNLOCK); } @@ -625,24 +650,18 @@ brin_page_cleanup(Relation idxrel, Buffer buf) /* Nothing to be done for non-regular index pages */ if (BRIN_IS_META_PAGE(BufferGetPage(buf)) || BRIN_IS_REVMAP_PAGE(BufferGetPage(buf))) - return false; + return; /* Measure free space and record it */ - freespace = br_page_get_freespace(page); - if (freespace > GetRecordedFreeSpace(idxrel, BufferGetBlockNumber(buf))) - { - RecordPageWithFreeSpace(idxrel, BufferGetBlockNumber(buf), freespace); - return true; - } - - return false; + RecordPageWithFreeSpace(idxrel, BufferGetBlockNumber(buf), + br_page_get_freespace(page)); } /* * Return a pinned and exclusively locked buffer which can be used to insert an * index item of size itemsz (caller must ensure not to request sizes * impossible to fulfill). If oldbuf is a valid buffer, it is also locked (in - * an order determined to avoid deadlocks.) + * an order determined to avoid deadlocks). * * If we find that the old page is no longer a regular index page (because * of a revmap extension), the old buffer is unlocked and we return @@ -651,12 +670,18 @@ brin_page_cleanup(Relation idxrel, Buffer buf) * If there's no existing page with enough free space to accommodate the new * item, the relation is extended. If this happens, *extended is set to true, * and it is the caller's responsibility to initialize the page (and WAL-log - * that fact) prior to use. + * that fact) prior to use. The caller should also update the FSM with the + * page's remaining free space after the insertion. + * + * Note that the caller is not expected to update FSM unless *extended is set + * true. This policy means that we'll update FSM when a page is created, and + * when it's found to have too little space for a desired tuple insertion, + * but not every single time we add a tuple to the page. * - * Note that in some corner cases it is possible for this routine to extend the - * relation and then not return the buffer. It is this routine's + * Note that in some corner cases it is possible for this routine to extend + * the relation and then not return the new page. It is this routine's * responsibility to WAL-log the page initialization and to record the page in - * FSM if that happens. Such a buffer may later be reused by this routine. + * FSM if that happens, since the caller certainly can't do it. */ static Buffer brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, @@ -670,22 +695,22 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, /* callers must have checked */ Assert(itemsz <= BrinMaxItemSize); - *extended = false; - if (BufferIsValid(oldbuf)) oldblk = BufferGetBlockNumber(oldbuf); else oldblk = InvalidBlockNumber; + /* Choose initial target page, re-using existing target if known */ + newblk = RelationGetTargetBlock(irel); + if (newblk == InvalidBlockNumber) + newblk = GetPageWithFreeSpace(irel, itemsz); + /* * Loop until we find a page with sufficient free space. By the time we * return to caller out of this loop, both buffers are valid and locked; - * if we have to restart here, neither buffer is locked and buf is not a - * pinned buffer. + * if we have to restart here, neither page is locked and newblk isn't + * pinned (if it's even valid). */ - newblk = RelationGetTargetBlock(irel); - if (newblk == InvalidBlockNumber) - newblk = GetPageWithFreeSpace(irel, itemsz); for (;;) { Buffer buf; @@ -693,6 +718,8 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, CHECK_FOR_INTERRUPTS(); + *extended = false; + if (newblk == InvalidBlockNumber) { /* @@ -727,9 +754,9 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, /* * We lock the old buffer first, if it's earlier than the new one; but - * before we do, we need to check that it hasn't been turned into a - * revmap page concurrently; if we detect that it happened, give up - * and tell caller to start over. + * then we need to check that it hasn't been turned into a revmap page + * concurrently. If we detect that that happened, give up and tell + * caller to start over. */ if (BufferIsValid(oldbuf) && oldblk < newblk) { @@ -747,16 +774,20 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, * it first. */ if (*extended) - { brin_initialize_empty_new_buffer(irel, buf); - /* shouldn't matter, but don't confuse caller */ - *extended = false; - } if (extensionLockHeld) UnlockRelationForExtension(irel, ExclusiveLock); ReleaseBuffer(buf); + + if (*extended) + { + FreeSpaceMapVacuumRange(irel, newblk, newblk + 1); + /* shouldn't matter, but don't confuse caller */ + *extended = false; + } + return InvalidBuffer; } } @@ -771,9 +802,6 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, /* * We have a new buffer to insert into. Check that the new page has * enough free space, and return it if it does; otherwise start over. - * Note that we allow for the FSM to be out of date here, and in that - * case we update it and move on. - * * (br_page_get_freespace also checks that the FSM didn't hand us a * page that has since been repurposed for the revmap.) */ @@ -781,16 +809,7 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, BrinMaxItemSize : br_page_get_freespace(page); if (freespace >= itemsz) { - RelationSetTargetBlock(irel, BufferGetBlockNumber(buf)); - - /* - * Since the target block specification can get lost on cache - * invalidations, make sure we update the more permanent FSM with - * data about it before going away. - */ - if (*extended) - RecordPageWithFreeSpace(irel, BufferGetBlockNumber(buf), - freespace); + RelationSetTargetBlock(irel, newblk); /* * Lock the old buffer if not locked already. Note that in this @@ -818,6 +837,7 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, if (*extended) { brin_initialize_empty_new_buffer(irel, buf); + /* since this should not happen, skip FreeSpaceMapVacuum */ ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), @@ -831,6 +851,10 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, if (BufferIsValid(oldbuf) && oldblk <= newblk) LockBuffer(oldbuf, BUFFER_LOCK_UNLOCK); + /* + * Update the FSM with the new, presumably smaller, freespace value + * for this page, then search for a new target page. + */ newblk = RecordAndGetPageWithFreeSpace(irel, newblk, freespace, itemsz); } } @@ -845,6 +869,9 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz, * there is no mechanism to get the space back and the index would bloat. * Also, because we would not WAL-log the action that would initialize the * page, the page would go uninitialized in a standby (or after recovery). + * + * While we record the page in FSM here, caller is responsible for doing FSM + * upper-page update if that seems appropriate. */ static void brin_initialize_empty_new_buffer(Relation idxrel, Buffer buffer) diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c index 22f2076887..f0dd72ac67 100644 --- a/src/backend/access/brin/brin_revmap.c +++ b/src/backend/access/brin/brin_revmap.c @@ -12,7 +12,7 @@ * the metapage. When the revmap needs to be expanded, all tuples on the * regular BRIN page at that block (if any) are moved out of the way. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -315,7 +315,7 @@ brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk, * * Index must be locked in ShareUpdateExclusiveLock mode. * - * Return FALSE if caller should retry. + * Return false if caller should retry. */ bool brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk) @@ -615,7 +615,7 @@ revmap_physical_extend(BrinRevmap *revmap) /* * Ok, we have now locked the metapage and the target block. Re-initialize - * it as a revmap page. + * the target block as a revmap page, and update the metapage. */ START_CRIT_SECTION(); @@ -624,6 +624,17 @@ revmap_physical_extend(BrinRevmap *revmap) MarkBufferDirty(buf); metadata->lastRevmapPage = mapBlk; + + /* + * Set pd_lower just past the end of the metadata. This is essential, + * because without doing so, metadata will be lost if xlog.c compresses + * the page. (We must do this here because pre-v11 versions of PG did not + * set the metapage's pd_lower correctly, so a pg_upgraded index might + * contain the wrong value.) + */ + ((PageHeader) metapage)->pd_lower = + ((char *) metadata + sizeof(BrinMetaPageData)) - (char *) metapage; + MarkBufferDirty(revmap->rm_metaBuf); if (RelationNeedsWAL(revmap->rm_irel)) @@ -635,7 +646,7 @@ revmap_physical_extend(BrinRevmap *revmap) XLogBeginInsert(); XLogRegisterData((char *) &xlrec, SizeOfBrinRevmapExtend); - XLogRegisterBuffer(0, revmap->rm_metaBuf, 0); + XLogRegisterBuffer(0, revmap->rm_metaBuf, REGBUF_STANDARD); XLogRegisterBuffer(1, buf, REGBUF_WILL_INIT); diff --git a/src/backend/access/brin/brin_tuple.c b/src/backend/access/brin/brin_tuple.c index ed5b4b108d..00316b899c 100644 --- a/src/backend/access/brin/brin_tuple.c +++ b/src/backend/access/brin/brin_tuple.c @@ -23,7 +23,7 @@ * Note the size of the null bitmask may not be the same as that of the * datum array. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -559,7 +559,7 @@ brin_deconstruct_tuple(BrinDesc *brdesc, datumno < brdesc->bd_info[attnum]->oi_nstored; datumno++) { - Form_pg_attribute thisatt = diskdsc->attrs[stored]; + Form_pg_attribute thisatt = TupleDescAttr(diskdsc, stored); if (thisatt->attlen == -1) { diff --git a/src/backend/access/brin/brin_validate.c b/src/backend/access/brin/brin_validate.c index b4acf2b6f3..35f6ccacce 100644 --- a/src/backend/access/brin/brin_validate.c +++ b/src/backend/access/brin/brin_validate.c @@ -3,7 +3,7 @@ * brin_validate.c * Opclass validator for BRIN. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/backend/access/brin/brin_xlog.c b/src/backend/access/brin/brin_xlog.c index dff7198a39..b2871e78aa 100644 --- a/src/backend/access/brin/brin_xlog.c +++ b/src/backend/access/brin/brin_xlog.c @@ -2,7 +2,7 @@ * brin_xlog.c * XLog replay routines for BRIN indexes * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -234,6 +234,17 @@ brin_xlog_revmap_extend(XLogReaderState *record) metadata->lastRevmapPage = xlrec->targetBlk; PageSetLSN(metapg, lsn); + + /* + * Set pd_lower just past the end of the metadata. This is essential, + * because without doing so, metadata will be lost if xlog.c + * compresses the page. (We must do this here because pre-v11 + * versions of PG did not set the metapage's pd_lower correctly, so a + * pg_upgraded index might contain the wrong value.) + */ + ((PageHeader) metapg)->pd_lower = + ((char *) metadata + sizeof(BrinMetaPageData)) - (char *) metapg; + MarkBufferDirty(metabuf); } @@ -331,14 +342,20 @@ void brin_mask(char *pagedata, BlockNumber blkno) { Page page = (Page) pagedata; + PageHeader pagehdr = (PageHeader) page; - mask_page_lsn(page); + mask_page_lsn_and_checksum(page); mask_page_hint_bits(page); - if (BRIN_IS_REGULAR_PAGE(page)) + /* + * Regular brin pages contain unused space which needs to be masked. + * Similarly for meta pages, but mask it only if pd_lower appears to have + * been set correctly. + */ + if (BRIN_IS_REGULAR_PAGE(page) || + (BRIN_IS_META_PAGE(page) && pagehdr->pd_lower > SizeOfPageHeaderData)) { - /* Regular brin pages contain unused space which needs to be masked. */ mask_unused_space(page); } } diff --git a/src/backend/access/common/Makefile b/src/backend/access/common/Makefile index fb27944b89..f130b6e350 100644 --- a/src/backend/access/common/Makefile +++ b/src/backend/access/common/Makefile @@ -13,6 +13,6 @@ top_builddir = ../../../.. include $(top_builddir)/src/Makefile.global OBJS = bufmask.o heaptuple.o indextuple.o printsimple.o printtup.o \ - reloptions.o scankey.o tupconvert.o tupdesc.o + reloptions.o scankey.o session.o tupconvert.o tupdesc.o include $(top_srcdir)/src/backend/common.mk diff --git a/src/backend/access/common/bufmask.c b/src/backend/access/common/bufmask.c index 10253d3354..806f28e421 100644 --- a/src/backend/access/common/bufmask.c +++ b/src/backend/access/common/bufmask.c @@ -5,12 +5,12 @@ * in a page which can be different when the WAL is generated * and when the WAL is applied. * - * Portions Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2016-2018, PostgreSQL Global Development Group * * Contains common routines required for masking a page. * * IDENTIFICATION - * src/backend/storage/buffer/bufmask.c + * src/backend/access/common/bufmask.c * *------------------------------------------------------------------------- */ @@ -23,15 +23,17 @@ * mask_page_lsn * * In consistency checks, the LSN of the two pages compared will likely be - * different because of concurrent operations when the WAL is generated - * and the state of the page when WAL is applied. + * different because of concurrent operations when the WAL is generated and + * the state of the page when WAL is applied. Also, mask out checksum as + * masking anything else on page means checksum is not going to match as well. */ void -mask_page_lsn(Page page) +mask_page_lsn_and_checksum(Page page) { PageHeader phdr = (PageHeader) page; PageXLogRecPtrSet(phdr->pd_lsn, (uint64) MASK_MARKER); + phdr->pd_checksum = MASK_MARKER; } /* diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 584a202ab5..28127b311f 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -45,7 +45,7 @@ * and we'd like to still refer to them via C struct offsets. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -58,6 +58,7 @@ #include "postgres.h" #include "access/sysattr.h" +#include "access/tupdesc_details.h" #include "access/tuptoaster.h" #include "executor/tuptable.h" #include "utils/expandeddatum.h" @@ -76,6 +77,39 @@ * ---------------------------------------------------------------- */ +/* + * Return the missing value of an attribute, or NULL if there isn't one. + */ +Datum +getmissingattr(TupleDesc tupleDesc, + int attnum, bool *isnull) +{ + Form_pg_attribute att; + + Assert(attnum <= tupleDesc->natts); + Assert(attnum > 0); + + att = TupleDescAttr(tupleDesc, attnum - 1); + + if (att->atthasmissing) + { + AttrMissing *attrmiss; + + Assert(tupleDesc->constr); + Assert(tupleDesc->constr->missing); + + attrmiss = tupleDesc->constr->missing + (attnum - 1); + + if (attrmiss->am_present) + { + *isnull = false; + return attrmiss->am_value; + } + } + + *isnull = true; + return PointerGetDatum(NULL); +} /* * heap_compute_data_size @@ -89,7 +123,6 @@ heap_compute_data_size(TupleDesc tupleDesc, Size data_length = 0; int i; int numberOfAttributes = tupleDesc->natts; - Form_pg_attribute *att = tupleDesc->attrs; for (i = 0; i < numberOfAttributes; i++) { @@ -100,7 +133,7 @@ heap_compute_data_size(TupleDesc tupleDesc, continue; val = values[i]; - atti = att[i]; + atti = TupleDescAttr(tupleDesc, i); if (ATT_IS_PACKABLE(atti) && VARATT_CAN_MAKE_SHORT(DatumGetPointer(val))) @@ -133,6 +166,131 @@ heap_compute_data_size(TupleDesc tupleDesc, return data_length; } +/* + * Per-attribute helper for heap_fill_tuple and other routines building tuples. + * + * Fill in either a data value or a bit in the null bitmask + */ +static inline void +fill_val(Form_pg_attribute att, + bits8 **bit, + int *bitmask, + char **dataP, + uint16 *infomask, + Datum datum, + bool isnull) +{ + Size data_length; + char *data = *dataP; + + /* + * If we're building a null bitmap, set the appropriate bit for the + * current column value here. + */ + if (bit != NULL) + { + if (*bitmask != HIGHBIT) + *bitmask <<= 1; + else + { + *bit += 1; + **bit = 0x0; + *bitmask = 1; + } + + if (isnull) + { + *infomask |= HEAP_HASNULL; + return; + } + + **bit |= *bitmask; + } + + /* + * XXX we use the att_align macros on the pointer value itself, not on an + * offset. This is a bit of a hack. + */ + if (att->attbyval) + { + /* pass-by-value */ + data = (char *) att_align_nominal(data, att->attalign); + store_att_byval(data, datum, att->attlen); + data_length = att->attlen; + } + else if (att->attlen == -1) + { + /* varlena */ + Pointer val = DatumGetPointer(datum); + + *infomask |= HEAP_HASVARWIDTH; + if (VARATT_IS_EXTERNAL(val)) + { + if (VARATT_IS_EXTERNAL_EXPANDED(val)) + { + /* + * we want to flatten the expanded value so that the + * constructed tuple doesn't depend on it + */ + ExpandedObjectHeader *eoh = DatumGetEOHP(datum); + + data = (char *) att_align_nominal(data, + att->attalign); + data_length = EOH_get_flat_size(eoh); + EOH_flatten_into(eoh, data, data_length); + } + else + { + *infomask |= HEAP_HASEXTERNAL; + /* no alignment, since it's short by definition */ + data_length = VARSIZE_EXTERNAL(val); + memcpy(data, val, data_length); + } + } + else if (VARATT_IS_SHORT(val)) + { + /* no alignment for short varlenas */ + data_length = VARSIZE_SHORT(val); + memcpy(data, val, data_length); + } + else if (VARLENA_ATT_IS_PACKABLE(att) && + VARATT_CAN_MAKE_SHORT(val)) + { + /* convert to short varlena -- no alignment */ + data_length = VARATT_CONVERTED_SHORT_SIZE(val); + SET_VARSIZE_SHORT(data, data_length); + memcpy(data + 1, VARDATA(val), data_length - 1); + } + else + { + /* full 4-byte header varlena */ + data = (char *) att_align_nominal(data, + att->attalign); + data_length = VARSIZE(val); + memcpy(data, val, data_length); + } + } + else if (att->attlen == -2) + { + /* cstring ... never needs alignment */ + *infomask |= HEAP_HASVARWIDTH; + Assert(att->attalign == 'c'); + data_length = strlen(DatumGetCString(datum)) + 1; + memcpy(data, DatumGetPointer(datum), data_length); + } + else + { + /* fixed-length pass-by-reference */ + data = (char *) att_align_nominal(data, att->attalign); + Assert(att->attlen > 0); + data_length = att->attlen; + memcpy(data, DatumGetPointer(datum), data_length); + } + + data += data_length; + *dataP = data; +} + /* * heap_fill_tuple * Load data portion of a tuple from values/isnull arrays @@ -152,7 +310,6 @@ heap_fill_tuple(TupleDesc tupleDesc, int bitmask; int i; int numberOfAttributes = tupleDesc->natts; - Form_pg_attribute *att = tupleDesc->attrs; #ifdef USE_ASSERT_CHECKING char *start = data; @@ -174,110 +331,15 @@ heap_fill_tuple(TupleDesc tupleDesc, for (i = 0; i < numberOfAttributes; i++) { - Size data_length; - - if (bit != NULL) - { - if (bitmask != HIGHBIT) - bitmask <<= 1; - else - { - bitP += 1; - *bitP = 0x0; - bitmask = 1; - } - - if (isnull[i]) - { - *infomask |= HEAP_HASNULL; - continue; - } - - *bitP |= bitmask; - } - - /* - * XXX we use the att_align macros on the pointer value itself, not on - * an offset. This is a bit of a hack. - */ - - if (att[i]->attbyval) - { - /* pass-by-value */ - data = (char *) att_align_nominal(data, att[i]->attalign); - store_att_byval(data, values[i], att[i]->attlen); - data_length = att[i]->attlen; - } - else if (att[i]->attlen == -1) - { - /* varlena */ - Pointer val = DatumGetPointer(values[i]); - - *infomask |= HEAP_HASVARWIDTH; - if (VARATT_IS_EXTERNAL(val)) - { - if (VARATT_IS_EXTERNAL_EXPANDED(val)) - { - /* - * we want to flatten the expanded value so that the - * constructed tuple doesn't depend on it - */ - ExpandedObjectHeader *eoh = DatumGetEOHP(values[i]); - - data = (char *) att_align_nominal(data, - att[i]->attalign); - data_length = EOH_get_flat_size(eoh); - EOH_flatten_into(eoh, data, data_length); - } - else - { - *infomask |= HEAP_HASEXTERNAL; - /* no alignment, since it's short by definition */ - data_length = VARSIZE_EXTERNAL(val); - memcpy(data, val, data_length); - } - } - else if (VARATT_IS_SHORT(val)) - { - /* no alignment for short varlenas */ - data_length = VARSIZE_SHORT(val); - memcpy(data, val, data_length); - } - else if (VARLENA_ATT_IS_PACKABLE(att[i]) && - VARATT_CAN_MAKE_SHORT(val)) - { - /* convert to short varlena -- no alignment */ - data_length = VARATT_CONVERTED_SHORT_SIZE(val); - SET_VARSIZE_SHORT(data, data_length); - memcpy(data + 1, VARDATA(val), data_length - 1); - } - else - { - /* full 4-byte header varlena */ - data = (char *) att_align_nominal(data, - att[i]->attalign); - data_length = VARSIZE(val); - memcpy(data, val, data_length); - } - } - else if (att[i]->attlen == -2) - { - /* cstring ... never needs alignment */ - *infomask |= HEAP_HASVARWIDTH; - Assert(att[i]->attalign == 'c'); - data_length = strlen(DatumGetCString(values[i])) + 1; - memcpy(data, DatumGetPointer(values[i]), data_length); - } - else - { - /* fixed-length pass-by-reference */ - data = (char *) att_align_nominal(data, att[i]->attalign); - Assert(att[i]->attlen > 0); - data_length = att[i]->attlen; - memcpy(data, DatumGetPointer(values[i]), data_length); - } - - data += data_length; + Form_pg_attribute attr = TupleDescAttr(tupleDesc, i); + + fill_val(attr, + bitP ? &bitP : NULL, + &bitmask, + &data, + infomask, + values ? values[i] : PointerGetDatum(NULL), + isnull ? isnull[i] : true); } Assert((data - start) == data_size); @@ -290,14 +352,24 @@ heap_fill_tuple(TupleDesc tupleDesc, */ /* ---------------- - * heap_attisnull - returns TRUE iff tuple attribute is not present + * heap_attisnull - returns true iff tuple attribute is not present * ---------------- */ bool -heap_attisnull(HeapTuple tup, int attnum) +heap_attisnull(HeapTuple tup, int attnum, TupleDesc tupleDesc) { + /* + * We allow a NULL tupledesc for relations not expected to have missing + * values, such as catalog relations and indexes. + */ + Assert(!tupleDesc || attnum <= tupleDesc->natts); if (attnum > (int) HeapTupleHeaderGetNatts(tup->t_data)) - return true; + { + if (tupleDesc && TupleDescAttr(tupleDesc, attnum - 1)->atthasmissing) + return false; + else + return true; + } if (attnum > 0) { @@ -354,7 +426,6 @@ nocachegetattr(HeapTuple tuple, TupleDesc tupleDesc) { HeapTupleHeader tup = tuple->t_data; - Form_pg_attribute *att = tupleDesc->attrs; char *tp; /* ptr to data part of tuple */ bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */ bool slow = false; /* do we have to walk attrs? */ @@ -404,15 +475,15 @@ nocachegetattr(HeapTuple tuple, if (!slow) { + Form_pg_attribute att; + /* * If we get here, there are no nulls up to and including the target * attribute. If we have a cached offset, we can use it. */ - if (att[attnum]->attcacheoff >= 0) - { - return fetchatt(att[attnum], - tp + att[attnum]->attcacheoff); - } + att = TupleDescAttr(tupleDesc, attnum); + if (att->attcacheoff >= 0) + return fetchatt(att, tp + att->attcacheoff); /* * Otherwise, check for non-fixed-length attrs up to and including @@ -425,7 +496,7 @@ nocachegetattr(HeapTuple tuple, for (j = 0; j <= attnum; j++) { - if (att[j]->attlen <= 0) + if (TupleDescAttr(tupleDesc, j)->attlen <= 0) { slow = true; break; @@ -448,29 +519,32 @@ nocachegetattr(HeapTuple tuple, * fixed-width columns, in hope of avoiding future visits to this * routine. */ - att[0]->attcacheoff = 0; + TupleDescAttr(tupleDesc, 0)->attcacheoff = 0; /* we might have set some offsets in the slow path previously */ - while (j < natts && att[j]->attcacheoff > 0) + while (j < natts && TupleDescAttr(tupleDesc, j)->attcacheoff > 0) j++; - off = att[j - 1]->attcacheoff + att[j - 1]->attlen; + off = TupleDescAttr(tupleDesc, j - 1)->attcacheoff + + TupleDescAttr(tupleDesc, j - 1)->attlen; for (; j < natts; j++) { - if (att[j]->attlen <= 0) + Form_pg_attribute att = TupleDescAttr(tupleDesc, j); + + if (att->attlen <= 0) break; - off = att_align_nominal(off, att[j]->attalign); + off = att_align_nominal(off, att->attalign); - att[j]->attcacheoff = off; + att->attcacheoff = off; - off += att[j]->attlen; + off += att->attlen; } Assert(j > attnum); - off = att[attnum]->attcacheoff; + off = TupleDescAttr(tupleDesc, attnum)->attcacheoff; } else { @@ -490,6 +564,8 @@ nocachegetattr(HeapTuple tuple, off = 0; for (i = 0;; i++) /* loop exit is at "break" */ { + Form_pg_attribute att = TupleDescAttr(tupleDesc, i); + if (HeapTupleHasNulls(tuple) && att_isnull(i, bp)) { usecache = false; @@ -497,9 +573,9 @@ nocachegetattr(HeapTuple tuple, } /* If we know the next offset, we can skip the rest */ - if (usecache && att[i]->attcacheoff >= 0) - off = att[i]->attcacheoff; - else if (att[i]->attlen == -1) + if (usecache && att->attcacheoff >= 0) + off = att->attcacheoff; + else if (att->attlen == -1) { /* * We can only cache the offset for a varlena attribute if the @@ -508,11 +584,11 @@ nocachegetattr(HeapTuple tuple, * either an aligned or unaligned value. */ if (usecache && - off == att_align_nominal(off, att[i]->attalign)) - att[i]->attcacheoff = off; + off == att_align_nominal(off, att->attalign)) + att->attcacheoff = off; else { - off = att_align_pointer(off, att[i]->attalign, -1, + off = att_align_pointer(off, att->attalign, -1, tp + off); usecache = false; } @@ -520,23 +596,23 @@ nocachegetattr(HeapTuple tuple, else { /* not varlena, so safe to use att_align_nominal */ - off = att_align_nominal(off, att[i]->attalign); + off = att_align_nominal(off, att->attalign); if (usecache) - att[i]->attcacheoff = off; + att->attcacheoff = off; } if (i == attnum) break; - off = att_addlength_pointer(off, att[i]->attlen, tp + off); + off = att_addlength_pointer(off, att->attlen, tp + off); - if (usecache && att[i]->attlen <= 0) + if (usecache && att->attlen <= 0) usecache = false; } } - return fetchatt(att[attnum], tp + off); + return fetchatt(TupleDescAttr(tupleDesc, attnum), tp + off); } /* ---------------- @@ -646,6 +722,265 @@ heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest) memcpy((char *) dest->t_data, (char *) src->t_data, src->t_len); } +/* + * Expand a tuple which has less attributes than required. For each attribute + * not present in the sourceTuple, if there is a missing value that will be + * used. Otherwise the attribute will be set to NULL. + * + * The source tuple must have less attributes than the required number. + * + * Only one of targetHeapTuple and targetMinimalTuple may be supplied. The + * other argument must be NULL. + */ +static void +expand_tuple(HeapTuple *targetHeapTuple, + MinimalTuple *targetMinimalTuple, + HeapTuple sourceTuple, + TupleDesc tupleDesc) +{ + AttrMissing *attrmiss = NULL; + int attnum; + int firstmissingnum = 0; + bool hasNulls = HeapTupleHasNulls(sourceTuple); + HeapTupleHeader targetTHeader; + HeapTupleHeader sourceTHeader = sourceTuple->t_data; + int sourceNatts = HeapTupleHeaderGetNatts(sourceTHeader); + int natts = tupleDesc->natts; + int sourceNullLen; + int targetNullLen; + Size sourceDataLen = sourceTuple->t_len - sourceTHeader->t_hoff; + Size targetDataLen; + Size len; + int hoff; + bits8 *nullBits = NULL; + int bitMask = 0; + char *targetData; + uint16 *infoMask; + + Assert((targetHeapTuple && !targetMinimalTuple) + || (!targetHeapTuple && targetMinimalTuple)); + + Assert(sourceNatts < natts); + + sourceNullLen = (hasNulls ? BITMAPLEN(sourceNatts) : 0); + + targetDataLen = sourceDataLen; + + if (tupleDesc->constr && + tupleDesc->constr->missing) + { + /* + * If there are missing values we want to put them into the tuple. + * Before that we have to compute the extra length for the values + * array and the variable length data. + */ + attrmiss = tupleDesc->constr->missing; + + /* + * Find the first item in attrmiss for which we don't have a value in + * the source. We can ignore all the missing entries before that. + */ + for (firstmissingnum = sourceNatts; + firstmissingnum < natts; + firstmissingnum++) + { + if (attrmiss[firstmissingnum].am_present) + break; + else + hasNulls = true; + } + + /* + * Now walk the missing attributes. If there is a missing value + * make space for it. Otherwise, it's going to be NULL. + */ + for (attnum = firstmissingnum; + attnum < natts; + attnum++) + { + if (attrmiss[attnum].am_present) + { + Form_pg_attribute att = TupleDescAttr(tupleDesc, attnum); + + targetDataLen = att_align_datum(targetDataLen, + att->attalign, + att->attlen, + attrmiss[attnum].am_value); + + targetDataLen = att_addlength_pointer(targetDataLen, + att->attlen, + attrmiss[attnum].am_value); + } + else + { + /* no missing value, so it must be null */ + hasNulls = true; + } + } + } /* end if have missing values */ + else + { + /* + * If there are no missing values at all then NULLS must be allowed, + * since some of the attributes are known to be absent. + */ + hasNulls = true; + } + + len = 0; + + if (hasNulls) + { + targetNullLen = BITMAPLEN(natts); + len += targetNullLen; + } + else + targetNullLen = 0; + + if (tupleDesc->tdhasoid) + len += sizeof(Oid); + + /* + * Allocate and zero the space needed. Note that the tuple body and + * HeapTupleData management structure are allocated in one chunk. + */ + if (targetHeapTuple) + { + len += offsetof(HeapTupleHeaderData, t_bits); + hoff = len = MAXALIGN(len); /* align user data safely */ + len += targetDataLen; + + *targetHeapTuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len); + (*targetHeapTuple)->t_data + = targetTHeader + = (HeapTupleHeader) ((char *) *targetHeapTuple + HEAPTUPLESIZE); + (*targetHeapTuple)->t_len = len; + (*targetHeapTuple)->t_tableOid = sourceTuple->t_tableOid; + (*targetHeapTuple)->t_self = sourceTuple->t_self; + + targetTHeader->t_infomask = sourceTHeader->t_infomask; + targetTHeader->t_hoff = hoff; + HeapTupleHeaderSetNatts(targetTHeader, natts); + HeapTupleHeaderSetDatumLength(targetTHeader, len); + HeapTupleHeaderSetTypeId(targetTHeader, tupleDesc->tdtypeid); + HeapTupleHeaderSetTypMod(targetTHeader, tupleDesc->tdtypmod); + /* We also make sure that t_ctid is invalid unless explicitly set */ + ItemPointerSetInvalid(&(targetTHeader->t_ctid)); + if (targetNullLen > 0) + nullBits = (bits8 *) ((char *) (*targetHeapTuple)->t_data + + offsetof(HeapTupleHeaderData, t_bits)); + targetData = (char *) (*targetHeapTuple)->t_data + hoff; + infoMask = &(targetTHeader->t_infomask); + } + else + { + len += SizeofMinimalTupleHeader; + hoff = len = MAXALIGN(len); /* align user data safely */ + len += targetDataLen; + + *targetMinimalTuple = (MinimalTuple) palloc0(len); + (*targetMinimalTuple)->t_len = len; + (*targetMinimalTuple)->t_hoff = hoff + MINIMAL_TUPLE_OFFSET; + (*targetMinimalTuple)->t_infomask = sourceTHeader->t_infomask; + /* Same macro works for MinimalTuples */ + HeapTupleHeaderSetNatts(*targetMinimalTuple, natts); + if (targetNullLen > 0) + nullBits = (bits8 *) ((char *) *targetMinimalTuple + + offsetof(MinimalTupleData, t_bits)); + targetData = (char *) *targetMinimalTuple + hoff; + infoMask = &((*targetMinimalTuple)->t_infomask); + } + + if (targetNullLen > 0) + { + if (sourceNullLen > 0) + { + /* if bitmap pre-existed copy in - all is set */ + memcpy(nullBits, + ((char *) sourceTHeader) + + offsetof(HeapTupleHeaderData, t_bits), + sourceNullLen); + nullBits += sourceNullLen - 1; + } + else + { + sourceNullLen = BITMAPLEN(sourceNatts); + /* Set NOT NULL for all existing attributes */ + memset(nullBits, 0xff, sourceNullLen); + + nullBits += sourceNullLen - 1; + + if (sourceNatts & 0x07) + { + /* build the mask (inverted!) */ + bitMask = 0xff << (sourceNatts & 0x07); + /* Voila */ + *nullBits = ~bitMask; + } + } + + bitMask = (1 << ((sourceNatts - 1) & 0x07)); + } /* End if have null bitmap */ + + memcpy(targetData, + ((char *) sourceTuple->t_data) + sourceTHeader->t_hoff, + sourceDataLen); + + targetData += sourceDataLen; + + /* Now fill in the missing values */ + for (attnum = sourceNatts; attnum < natts; attnum++) + { + + Form_pg_attribute attr = TupleDescAttr(tupleDesc, attnum); + + if (attrmiss && attrmiss[attnum].am_present) + { + fill_val(attr, + nullBits ? &nullBits : NULL, + &bitMask, + &targetData, + infoMask, + attrmiss[attnum].am_value, + false); + } + else + { + fill_val(attr, + &nullBits, + &bitMask, + &targetData, + infoMask, + (Datum) 0, + true); + } + } /* end loop over missing attributes */ +} + +/* + * Fill in the missing values for a minimal HeapTuple + */ +MinimalTuple +minimal_expand_tuple(HeapTuple sourceTuple, TupleDesc tupleDesc) +{ + MinimalTuple minimalTuple; + + expand_tuple(NULL, &minimalTuple, sourceTuple, tupleDesc); + return minimalTuple; +} + +/* + * Fill in the missing values for an ordinary HeapTuple + */ +HeapTuple +heap_expand_tuple(HeapTuple sourceTuple, TupleDesc tupleDesc) +{ + HeapTuple heapTuple; + + expand_tuple(&heapTuple, NULL, sourceTuple, tupleDesc); + return heapTuple; +} + /* ---------------- * heap_copy_tuple_as_datum * @@ -935,12 +1270,11 @@ heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, { HeapTupleHeader tup = tuple->t_data; bool hasnulls = HeapTupleHasNulls(tuple); - Form_pg_attribute *att = tupleDesc->attrs; int tdesc_natts = tupleDesc->natts; int natts; /* number of atts to extract */ int attnum; char *tp; /* ptr to tuple data */ - long off; /* offset in tuple data */ + uint32 off; /* offset in tuple data */ bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */ bool slow = false; /* can we use/set attcacheoff? */ @@ -959,7 +1293,7 @@ heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, for (attnum = 0; attnum < natts; attnum++) { - Form_pg_attribute thisatt = att[attnum]; + Form_pg_attribute thisatt = TupleDescAttr(tupleDesc, attnum); if (hasnulls && att_isnull(attnum, bp)) { @@ -1010,13 +1344,10 @@ heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, /* * If tuple doesn't have all the atts indicated by tupleDesc, read the - * rest as null + * rest as nulls or missing values as appropriate. */ for (; attnum < tdesc_natts; attnum++) - { - values[attnum] = (Datum) 0; - isnull[attnum] = true; - } + values[attnum] = getmissingattr(tupleDesc, attnum + 1, &isnull[attnum]); } /* @@ -1030,7 +1361,7 @@ heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, * re-computing information about previously extracted attributes. * slot->tts_nvalid is the number of attributes already extracted. */ -static void +void slot_deform_tuple(TupleTableSlot *slot, int natts) { HeapTuple tuple = slot->tts_tuple; @@ -1039,10 +1370,9 @@ slot_deform_tuple(TupleTableSlot *slot, int natts) bool *isnull = slot->tts_isnull; HeapTupleHeader tup = tuple->t_data; bool hasnulls = HeapTupleHasNulls(tuple); - Form_pg_attribute *att = tupleDesc->attrs; int attnum; char *tp; /* ptr to tuple data */ - long off; /* offset in tuple data */ + uint32 off; /* offset in tuple data */ bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */ bool slow; /* can we use/set attcacheoff? */ @@ -1061,14 +1391,14 @@ slot_deform_tuple(TupleTableSlot *slot, int natts) { /* Restore state from previous execution */ off = slot->tts_off; - slow = slot->tts_slow; + slow = TTS_SLOW(slot); } tp = (char *) tup + tup->t_hoff; for (; attnum < natts; attnum++) { - Form_pg_attribute thisatt = att[attnum]; + Form_pg_attribute thisatt = TupleDescAttr(tupleDesc, attnum); if (hasnulls && att_isnull(attnum, bp)) { @@ -1122,30 +1452,25 @@ slot_deform_tuple(TupleTableSlot *slot, int natts) */ slot->tts_nvalid = attnum; slot->tts_off = off; - slot->tts_slow = slow; + if (slow) + slot->tts_flags |= TTS_FLAG_SLOW; + else + slot->tts_flags &= ~TTS_FLAG_SLOW; } /* - * slot_getattr - * This function fetches an attribute of the slot's current tuple. - * It is functionally equivalent to heap_getattr, but fetches of - * multiple attributes of the same tuple will be optimized better, - * because we avoid O(N^2) behavior from multiple calls of - * nocachegetattr(), even when attcacheoff isn't usable. - * - * A difference from raw heap_getattr is that attnums beyond the - * slot's tupdesc's last attribute will be considered NULL even - * when the physical tuple is longer than the tupdesc. + * slot_attisnull + * Detect whether an attribute of the slot is null, without + * actually fetching it. */ -Datum -slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull) +bool +slot_attisnull(TupleTableSlot *slot, int attnum) { HeapTuple tuple = slot->tts_tuple; TupleDesc tupleDesc = slot->tts_tupleDescriptor; - HeapTupleHeader tup; /* - * system attributes are handled by heap_getsysattr + * system attributes are handled by heap_attisnull */ if (attnum <= 0) { @@ -1153,216 +1478,56 @@ slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull) elog(ERROR, "cannot extract system attribute from virtual tuple"); if (tuple == &(slot->tts_minhdr)) /* internal error */ elog(ERROR, "cannot extract system attribute from minimal tuple"); - return heap_getsysattr(tuple, attnum, tupleDesc, isnull); + return heap_attisnull(tuple, attnum, tupleDesc); } /* * fast path if desired attribute already cached */ if (attnum <= slot->tts_nvalid) - { - *isnull = slot->tts_isnull[attnum - 1]; - return slot->tts_values[attnum - 1]; - } + return slot->tts_isnull[attnum - 1]; /* * return NULL if attnum is out of range according to the tupdesc */ if (attnum > tupleDesc->natts) - { - *isnull = true; - return (Datum) 0; - } - - /* - * otherwise we had better have a physical tuple (tts_nvalid should equal - * natts in all virtual-tuple cases) - */ - if (tuple == NULL) /* internal error */ - elog(ERROR, "cannot extract attribute from empty tuple slot"); - - /* - * return NULL if attnum is out of range according to the tuple - * - * (We have to check this separately because of various inheritance and - * table-alteration scenarios: the tuple could be either longer or shorter - * than the tupdesc.) - */ - tup = tuple->t_data; - if (attnum > HeapTupleHeaderGetNatts(tup)) - { - *isnull = true; - return (Datum) 0; - } - - /* - * check if target attribute is null: no point in groveling through tuple - */ - if (HeapTupleHasNulls(tuple) && att_isnull(attnum - 1, tup->t_bits)) - { - *isnull = true; - return (Datum) 0; - } - - /* - * If the attribute's column has been dropped, we force a NULL result. - * This case should not happen in normal use, but it could happen if we - * are executing a plan cached before the column was dropped. - */ - if (tupleDesc->attrs[attnum - 1]->attisdropped) - { - *isnull = true; - return (Datum) 0; - } - - /* - * Extract the attribute, along with any preceding attributes. - */ - slot_deform_tuple(slot, attnum); - - /* - * The result is acquired from tts_values array. - */ - *isnull = slot->tts_isnull[attnum - 1]; - return slot->tts_values[attnum - 1]; -} - -/* - * slot_getallattrs - * This function forces all the entries of the slot's Datum/isnull - * arrays to be valid. The caller may then extract data directly - * from those arrays instead of using slot_getattr. - */ -void -slot_getallattrs(TupleTableSlot *slot) -{ - int tdesc_natts = slot->tts_tupleDescriptor->natts; - int attnum; - HeapTuple tuple; - - /* Quick out if we have 'em all already */ - if (slot->tts_nvalid == tdesc_natts) - return; - - /* - * otherwise we had better have a physical tuple (tts_nvalid should equal - * natts in all virtual-tuple cases) - */ - tuple = slot->tts_tuple; - if (tuple == NULL) /* internal error */ - elog(ERROR, "cannot extract attribute from empty tuple slot"); - - /* - * load up any slots available from physical tuple - */ - attnum = HeapTupleHeaderGetNatts(tuple->t_data); - attnum = Min(attnum, tdesc_natts); - - slot_deform_tuple(slot, attnum); - - /* - * If tuple doesn't have all the atts indicated by tupleDesc, read the - * rest as null - */ - for (; attnum < tdesc_natts; attnum++) - { - slot->tts_values[attnum] = (Datum) 0; - slot->tts_isnull[attnum] = true; - } - slot->tts_nvalid = tdesc_natts; -} - -/* - * slot_getsomeattrs - * This function forces the entries of the slot's Datum/isnull - * arrays to be valid at least up through the attnum'th entry. - */ -void -slot_getsomeattrs(TupleTableSlot *slot, int attnum) -{ - HeapTuple tuple; - int attno; - - /* Quick out if we have 'em all already */ - if (slot->tts_nvalid >= attnum) - return; - - /* Check for caller error */ - if (attnum <= 0 || attnum > slot->tts_tupleDescriptor->natts) - elog(ERROR, "invalid attribute number %d", attnum); + return true; /* * otherwise we had better have a physical tuple (tts_nvalid should equal * natts in all virtual-tuple cases) */ - tuple = slot->tts_tuple; if (tuple == NULL) /* internal error */ elog(ERROR, "cannot extract attribute from empty tuple slot"); - /* - * load up any slots available from physical tuple - */ - attno = HeapTupleHeaderGetNatts(tuple->t_data); - attno = Min(attno, attnum); - - slot_deform_tuple(slot, attno); - - /* - * If tuple doesn't have all the atts indicated by tupleDesc, read the - * rest as null - */ - for (; attno < attnum; attno++) - { - slot->tts_values[attno] = (Datum) 0; - slot->tts_isnull[attno] = true; - } - slot->tts_nvalid = attnum; + /* and let the tuple tell it */ + return heap_attisnull(tuple, attnum, tupleDesc); } /* - * slot_attisnull - * Detect whether an attribute of the slot is null, without - * actually fetching it. + * slot_getsysattr + * This function fetches a system attribute of the slot's current tuple. + * Unlike slot_getattr, if the slot does not contain system attributes, + * this will return false (with a NULL attribute value) instead of + * throwing an error. */ bool -slot_attisnull(TupleTableSlot *slot, int attnum) +slot_getsysattr(TupleTableSlot *slot, int attnum, + Datum *value, bool *isnull) { HeapTuple tuple = slot->tts_tuple; - TupleDesc tupleDesc = slot->tts_tupleDescriptor; - /* - * system attributes are handled by heap_attisnull - */ - if (attnum <= 0) + Assert(attnum < 0); /* else caller error */ + if (tuple == NULL || + tuple == &(slot->tts_minhdr)) { - if (tuple == NULL) /* internal error */ - elog(ERROR, "cannot extract system attribute from virtual tuple"); - if (tuple == &(slot->tts_minhdr)) /* internal error */ - elog(ERROR, "cannot extract system attribute from minimal tuple"); - return heap_attisnull(tuple, attnum); + /* No physical tuple, or minimal tuple, so fail */ + *value = (Datum) 0; + *isnull = true; + return false; } - - /* - * fast path if desired attribute already cached - */ - if (attnum <= slot->tts_nvalid) - return slot->tts_isnull[attnum - 1]; - - /* - * return NULL if attnum is out of range according to the tupdesc - */ - if (attnum > tupleDesc->natts) - return true; - - /* - * otherwise we had better have a physical tuple (tts_nvalid should equal - * natts in all virtual-tuple cases) - */ - if (tuple == NULL) /* internal error */ - elog(ERROR, "cannot extract attribute from empty tuple slot"); - - /* and let the tuple tell it */ - return heap_attisnull(tuple, attnum); + *value = heap_getsysattr(tuple, attnum, slot->tts_tupleDescriptor, isnull); + return true; } /* @@ -1529,3 +1694,13 @@ minimal_tuple_from_heap_tuple(HeapTuple htup) result->t_len = len; return result; } + +/* + * This mainly exists so JIT can inline the definition, but it's also + * sometimes useful in debugging sessions. + */ +size_t +varsize_any(void *p) +{ + return VARSIZE_ANY(p); +} diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c index 37a21057d0..aa52a96259 100644 --- a/src/backend/access/common/indextuple.c +++ b/src/backend/access/common/indextuple.c @@ -4,7 +4,7 @@ * This file contains index tuple accessor and mutator routines, * as well as various tuple utilities. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -31,6 +31,9 @@ * * This shouldn't leak any memory; otherwise, callers such as * tuplesort_putindextuplevalues() will be very unhappy. + * + * This shouldn't perform external table access provided caller + * does not pass values that are stored EXTERNAL. * ---------------- */ IndexTuple @@ -63,7 +66,7 @@ index_form_tuple(TupleDesc tupleDescriptor, #ifdef TOAST_INDEX_HACK for (i = 0; i < numberOfAttributes; i++) { - Form_pg_attribute att = tupleDescriptor->attrs[i]; + Form_pg_attribute att = TupleDescAttr(tupleDescriptor, i); untoasted_values[i] = values[i]; untoasted_free[i] = false; @@ -209,7 +212,6 @@ nocache_index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc) { - Form_pg_attribute *att = tupleDesc->attrs; char *tp; /* ptr to data part of tuple */ bits8 *bp = NULL; /* ptr to null bitmap in tuple */ bool slow = false; /* do we have to walk attrs? */ @@ -271,15 +273,15 @@ nocache_index_getattr(IndexTuple tup, if (!slow) { + Form_pg_attribute att; + /* * If we get here, there are no nulls up to and including the target * attribute. If we have a cached offset, we can use it. */ - if (att[attnum]->attcacheoff >= 0) - { - return fetchatt(att[attnum], - tp + att[attnum]->attcacheoff); - } + att = TupleDescAttr(tupleDesc, attnum); + if (att->attcacheoff >= 0) + return fetchatt(att, tp + att->attcacheoff); /* * Otherwise, check for non-fixed-length attrs up to and including @@ -292,7 +294,7 @@ nocache_index_getattr(IndexTuple tup, for (j = 0; j <= attnum; j++) { - if (att[j]->attlen <= 0) + if (TupleDescAttr(tupleDesc, j)->attlen <= 0) { slow = true; break; @@ -315,29 +317,32 @@ nocache_index_getattr(IndexTuple tup, * fixed-width columns, in hope of avoiding future visits to this * routine. */ - att[0]->attcacheoff = 0; + TupleDescAttr(tupleDesc, 0)->attcacheoff = 0; /* we might have set some offsets in the slow path previously */ - while (j < natts && att[j]->attcacheoff > 0) + while (j < natts && TupleDescAttr(tupleDesc, j)->attcacheoff > 0) j++; - off = att[j - 1]->attcacheoff + att[j - 1]->attlen; + off = TupleDescAttr(tupleDesc, j - 1)->attcacheoff + + TupleDescAttr(tupleDesc, j - 1)->attlen; for (; j < natts; j++) { - if (att[j]->attlen <= 0) + Form_pg_attribute att = TupleDescAttr(tupleDesc, j); + + if (att->attlen <= 0) break; - off = att_align_nominal(off, att[j]->attalign); + off = att_align_nominal(off, att->attalign); - att[j]->attcacheoff = off; + att->attcacheoff = off; - off += att[j]->attlen; + off += att->attlen; } Assert(j > attnum); - off = att[attnum]->attcacheoff; + off = TupleDescAttr(tupleDesc, attnum)->attcacheoff; } else { @@ -357,6 +362,8 @@ nocache_index_getattr(IndexTuple tup, off = 0; for (i = 0;; i++) /* loop exit is at "break" */ { + Form_pg_attribute att = TupleDescAttr(tupleDesc, i); + if (IndexTupleHasNulls(tup) && att_isnull(i, bp)) { usecache = false; @@ -364,9 +371,9 @@ nocache_index_getattr(IndexTuple tup, } /* If we know the next offset, we can skip the rest */ - if (usecache && att[i]->attcacheoff >= 0) - off = att[i]->attcacheoff; - else if (att[i]->attlen == -1) + if (usecache && att->attcacheoff >= 0) + off = att->attcacheoff; + else if (att->attlen == -1) { /* * We can only cache the offset for a varlena attribute if the @@ -375,11 +382,11 @@ nocache_index_getattr(IndexTuple tup, * either an aligned or unaligned value. */ if (usecache && - off == att_align_nominal(off, att[i]->attalign)) - att[i]->attcacheoff = off; + off == att_align_nominal(off, att->attalign)) + att->attcacheoff = off; else { - off = att_align_pointer(off, att[i]->attalign, -1, + off = att_align_pointer(off, att->attalign, -1, tp + off); usecache = false; } @@ -387,23 +394,23 @@ nocache_index_getattr(IndexTuple tup, else { /* not varlena, so safe to use att_align_nominal */ - off = att_align_nominal(off, att[i]->attalign); + off = att_align_nominal(off, att->attalign); if (usecache) - att[i]->attcacheoff = off; + att->attcacheoff = off; } if (i == attnum) break; - off = att_addlength_pointer(off, att[i]->attlen, tp + off); + off = att_addlength_pointer(off, att->attlen, tp + off); - if (usecache && att[i]->attlen <= 0) + if (usecache && att->attlen <= 0) usecache = false; } } - return fetchatt(att[attnum], tp + off); + return fetchatt(TupleDescAttr(tupleDesc, attnum), tp + off); } /* @@ -441,3 +448,51 @@ CopyIndexTuple(IndexTuple source) memcpy(result, source, size); return result; } + +/* + * Create a palloc'd copy of an index tuple, leaving only the first + * leavenatts attributes remaining. + * + * Truncation is guaranteed to result in an index tuple that is no + * larger than the original. It is safe to use the IndexTuple with + * the original tuple descriptor, but caller must avoid actually + * accessing truncated attributes from returned tuple! In practice + * this means that index_getattr() must be called with special care, + * and that the truncated tuple should only ever be accessed by code + * under caller's direct control. + * + * It's safe to call this function with a buffer lock held, since it + * never performs external table access. If it ever became possible + * for index tuples to contain EXTERNAL TOAST values, then this would + * have to be revisited. + */ +IndexTuple +index_truncate_tuple(TupleDesc sourceDescriptor, IndexTuple source, + int leavenatts) +{ + TupleDesc truncdesc; + Datum values[INDEX_MAX_KEYS]; + bool isnull[INDEX_MAX_KEYS]; + IndexTuple truncated; + + Assert(leavenatts < sourceDescriptor->natts); + + /* Create temporary descriptor to scribble on */ + truncdesc = palloc(TupleDescSize(sourceDescriptor)); + TupleDescCopy(truncdesc, sourceDescriptor); + truncdesc->natts = leavenatts; + + /* Deform, form copy of tuple with fewer attributes */ + index_deform_tuple(source, truncdesc, values, isnull); + truncated = index_form_tuple(truncdesc, values, isnull); + truncated->t_tid = source->t_tid; + Assert(IndexTupleSize(truncated) <= IndexTupleSize(source)); + + /* + * Cannot leak memory here, TupleDescCopy() doesn't allocate any inner + * structure, so, plain pfree() should clean all allocated memory + */ + pfree(truncdesc); + + return truncated; +} diff --git a/src/backend/access/common/printsimple.c b/src/backend/access/common/printsimple.c index c863e859fe..3c4d227712 100644 --- a/src/backend/access/common/printsimple.c +++ b/src/backend/access/common/printsimple.c @@ -8,7 +8,7 @@ * doesn't handle standalone backends or protocol versions other than * 3.0, because we don't need such handling for current applications. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -34,19 +34,19 @@ printsimple_startup(DestReceiver *self, int operation, TupleDesc tupdesc) int i; pq_beginmessage(&buf, 'T'); /* RowDescription */ - pq_sendint(&buf, tupdesc->natts, 2); + pq_sendint16(&buf, tupdesc->natts); for (i = 0; i < tupdesc->natts; ++i) { - Form_pg_attribute attr = tupdesc->attrs[i]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, i); pq_sendstring(&buf, NameStr(attr->attname)); - pq_sendint(&buf, 0, 4); /* table oid */ - pq_sendint(&buf, 0, 2); /* attnum */ - pq_sendint(&buf, (int) attr->atttypid, 4); - pq_sendint(&buf, attr->attlen, 2); - pq_sendint(&buf, attr->atttypmod, 4); - pq_sendint(&buf, 0, 2); /* format code */ + pq_sendint32(&buf, 0); /* table oid */ + pq_sendint16(&buf, 0); /* attnum */ + pq_sendint32(&buf, (int) attr->atttypid); + pq_sendint16(&buf, attr->attlen); + pq_sendint32(&buf, attr->atttypmod); + pq_sendint16(&buf, 0); /* format code */ } pq_endmessage(&buf); @@ -67,16 +67,16 @@ printsimple(TupleTableSlot *slot, DestReceiver *self) /* Prepare and send message */ pq_beginmessage(&buf, 'D'); - pq_sendint(&buf, tupdesc->natts, 2); + pq_sendint16(&buf, tupdesc->natts); for (i = 0; i < tupdesc->natts; ++i) { - Form_pg_attribute attr = tupdesc->attrs[i]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, i); Datum value; if (slot->tts_isnull[i]) { - pq_sendint(&buf, -1, 4); + pq_sendint32(&buf, -1); continue; } diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c index a2ca2d74ae..a1d4415704 100644 --- a/src/backend/access/common/printtup.c +++ b/src/backend/access/common/printtup.c @@ -5,7 +5,7 @@ * clients and standalone backends are supported here). * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -32,6 +32,10 @@ static bool printtup_internal_20(TupleTableSlot *slot, DestReceiver *self); static void printtup_shutdown(DestReceiver *self); static void printtup_destroy(DestReceiver *self); +static void SendRowDescriptionCols_2(StringInfo buf, TupleDesc typeinfo, + List *targetlist, int16 *formats); +static void SendRowDescriptionCols_3(StringInfo buf, TupleDesc typeinfo, + List *targetlist, int16 *formats); /* ---------------------------------------------------------------- * printtup / debugtup support @@ -57,6 +61,7 @@ typedef struct typedef struct { DestReceiver pub; /* publicly-known function pointers */ + StringInfoData buf; /* output buffer */ Portal portal; /* the Portal we are printing from */ bool sendDescrip; /* send RowDescription at startup? */ TupleDesc attrinfo; /* The attr info we are set up for */ @@ -127,6 +132,9 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo) DR_printtup *myState = (DR_printtup *) self; Portal portal = myState->portal; + /* create buffer to be used for all messages */ + initStringInfo(&myState->buf); + /* * Create a temporary memory context that we can reset once per row to * recover palloc'd memory. This avoids any problems with leaks inside @@ -157,7 +165,8 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo) * descriptor of the tuples. */ if (myState->sendDescrip) - SendRowDescriptionMessage(typeinfo, + SendRowDescriptionMessage(&myState->buf, + typeinfo, FetchPortalTargetList(portal), portal->formats); @@ -185,61 +194,126 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo) * send zeroes for the format codes in that case. */ void -SendRowDescriptionMessage(TupleDesc typeinfo, List *targetlist, int16 *formats) +SendRowDescriptionMessage(StringInfo buf, TupleDesc typeinfo, + List *targetlist, int16 *formats) { - Form_pg_attribute *attrs = typeinfo->attrs; int natts = typeinfo->natts; int proto = PG_PROTOCOL_MAJOR(FrontendProtocol); + + /* tuple descriptor message type */ + pq_beginmessage_reuse(buf, 'T'); + /* # of attrs in tuples */ + pq_sendint16(buf, natts); + + if (proto >= 3) + SendRowDescriptionCols_3(buf, typeinfo, targetlist, formats); + else + SendRowDescriptionCols_2(buf, typeinfo, targetlist, formats); + + pq_endmessage_reuse(buf); +} + +/* + * Send description for each column when using v3+ protocol + */ +static void +SendRowDescriptionCols_3(StringInfo buf, TupleDesc typeinfo, List *targetlist, int16 *formats) +{ + int natts = typeinfo->natts; int i; - StringInfoData buf; ListCell *tlist_item = list_head(targetlist); - pq_beginmessage(&buf, 'T'); /* tuple descriptor message type */ - pq_sendint(&buf, natts, 2); /* # of attrs in tuples */ + /* + * Preallocate memory for the entire message to be sent. That allows to + * use the significantly faster inline pqformat.h functions and to avoid + * reallocations. + * + * Have to overestimate the size of the column-names, to account for + * character set overhead. + */ + enlargeStringInfo(buf, (NAMEDATALEN * MAX_CONVERSION_GROWTH /* attname */ + + sizeof(Oid) /* resorigtbl */ + + sizeof(AttrNumber) /* resorigcol */ + + sizeof(Oid) /* atttypid */ + + sizeof(int16) /* attlen */ + + sizeof(int32) /* attypmod */ + + sizeof(int16) /* format */ + ) * natts); for (i = 0; i < natts; ++i) { - Oid atttypid = attrs[i]->atttypid; - int32 atttypmod = attrs[i]->atttypmod; + Form_pg_attribute att = TupleDescAttr(typeinfo, i); + Oid atttypid = att->atttypid; + int32 atttypmod = att->atttypmod; + Oid resorigtbl; + AttrNumber resorigcol; + int16 format; + + /* + * If column is a domain, send the base type and typmod instead. + * Lookup before sending any ints, for efficiency. + */ + atttypid = getBaseTypeAndTypmod(atttypid, &atttypmod); - pq_sendstring(&buf, NameStr(attrs[i]->attname)); - /* column ID info appears in protocol 3.0 and up */ - if (proto >= 3) + /* Do we have a non-resjunk tlist item? */ + while (tlist_item && + ((TargetEntry *) lfirst(tlist_item))->resjunk) + tlist_item = lnext(tlist_item); + if (tlist_item) { - /* Do we have a non-resjunk tlist item? */ - while (tlist_item && - ((TargetEntry *) lfirst(tlist_item))->resjunk) - tlist_item = lnext(tlist_item); - if (tlist_item) - { - TargetEntry *tle = (TargetEntry *) lfirst(tlist_item); - - pq_sendint(&buf, tle->resorigtbl, 4); - pq_sendint(&buf, tle->resorigcol, 2); - tlist_item = lnext(tlist_item); - } - else - { - /* No info available, so send zeroes */ - pq_sendint(&buf, 0, 4); - pq_sendint(&buf, 0, 2); - } + TargetEntry *tle = (TargetEntry *) lfirst(tlist_item); + + resorigtbl = tle->resorigtbl; + resorigcol = tle->resorigcol; + tlist_item = lnext(tlist_item); } - /* If column is a domain, send the base type and typmod instead */ - atttypid = getBaseTypeAndTypmod(atttypid, &atttypmod); - pq_sendint(&buf, (int) atttypid, sizeof(atttypid)); - pq_sendint(&buf, attrs[i]->attlen, sizeof(attrs[i]->attlen)); - pq_sendint(&buf, atttypmod, sizeof(atttypmod)); - /* format info appears in protocol 3.0 and up */ - if (proto >= 3) + else { - if (formats) - pq_sendint(&buf, formats[i], 2); - else - pq_sendint(&buf, 0, 2); + /* No info available, so send zeroes */ + resorigtbl = 0; + resorigcol = 0; } + + if (formats) + format = formats[i]; + else + format = 0; + + pq_writestring(buf, NameStr(att->attname)); + pq_writeint32(buf, resorigtbl); + pq_writeint16(buf, resorigcol); + pq_writeint32(buf, atttypid); + pq_writeint16(buf, att->attlen); + pq_writeint32(buf, atttypmod); + pq_writeint16(buf, format); + } +} + +/* + * Send description for each column when using v2 protocol + */ +static void +SendRowDescriptionCols_2(StringInfo buf, TupleDesc typeinfo, List *targetlist, int16 *formats) +{ + int natts = typeinfo->natts; + int i; + + for (i = 0; i < natts; ++i) + { + Form_pg_attribute att = TupleDescAttr(typeinfo, i); + Oid atttypid = att->atttypid; + int32 atttypmod = att->atttypmod; + + /* If column is a domain, send the base type and typmod instead */ + atttypid = getBaseTypeAndTypmod(atttypid, &atttypmod); + + pq_sendstring(buf, NameStr(att->attname)); + /* column ID only info appears in protocol 3.0 and up */ + pq_sendint32(buf, atttypid); + pq_sendint16(buf, att->attlen); + pq_sendint32(buf, atttypmod); + /* format info only appears in protocol 3.0 and up */ } - pq_endmessage(&buf); } /* @@ -268,18 +342,19 @@ printtup_prepare_info(DR_printtup *myState, TupleDesc typeinfo, int numAttrs) { PrinttupAttrInfo *thisState = myState->myinfo + i; int16 format = (formats ? formats[i] : 0); + Form_pg_attribute attr = TupleDescAttr(typeinfo, i); thisState->format = format; if (format == 0) { - getTypeOutputInfo(typeinfo->attrs[i]->atttypid, + getTypeOutputInfo(attr->atttypid, &thisState->typoutput, &thisState->typisvarlena); fmgr_info(thisState->typoutput, &thisState->finfo); } else if (format == 1) { - getTypeBinaryOutputInfo(typeinfo->attrs[i]->atttypid, + getTypeBinaryOutputInfo(attr->atttypid, &thisState->typsend, &thisState->typisvarlena); fmgr_info(thisState->typsend, &thisState->finfo); @@ -301,7 +376,7 @@ printtup(TupleTableSlot *slot, DestReceiver *self) TupleDesc typeinfo = slot->tts_tupleDescriptor; DR_printtup *myState = (DR_printtup *) self; MemoryContext oldcontext; - StringInfoData buf; + StringInfo buf = &myState->buf; int natts = typeinfo->natts; int i; @@ -318,9 +393,9 @@ printtup(TupleTableSlot *slot, DestReceiver *self) /* * Prepare a DataRow message (note buffer is in per-row context) */ - pq_beginmessage(&buf, 'D'); + pq_beginmessage_reuse(buf, 'D'); - pq_sendint(&buf, natts, 2); + pq_sendint16(buf, natts); /* * send the attributes of this tuple @@ -332,7 +407,7 @@ printtup(TupleTableSlot *slot, DestReceiver *self) if (slot->tts_isnull[i]) { - pq_sendint(&buf, -1, 4); + pq_sendint32(buf, -1); continue; } @@ -353,7 +428,7 @@ printtup(TupleTableSlot *slot, DestReceiver *self) char *outputstr; outputstr = OutputFunctionCall(&thisState->finfo, attr); - pq_sendcountedtext(&buf, outputstr, strlen(outputstr), false); + pq_sendcountedtext(buf, outputstr, strlen(outputstr), false); } else { @@ -361,13 +436,13 @@ printtup(TupleTableSlot *slot, DestReceiver *self) bytea *outputbytes; outputbytes = SendFunctionCall(&thisState->finfo, attr); - pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4); - pq_sendbytes(&buf, VARDATA(outputbytes), + pq_sendint32(buf, VARSIZE(outputbytes) - VARHDRSZ); + pq_sendbytes(buf, VARDATA(outputbytes), VARSIZE(outputbytes) - VARHDRSZ); } } - pq_endmessage(&buf); + pq_endmessage_reuse(buf); /* Return to caller's context, and flush row's temporary memory */ MemoryContextSwitchTo(oldcontext); @@ -386,7 +461,7 @@ printtup_20(TupleTableSlot *slot, DestReceiver *self) TupleDesc typeinfo = slot->tts_tupleDescriptor; DR_printtup *myState = (DR_printtup *) self; MemoryContext oldcontext; - StringInfoData buf; + StringInfo buf = &myState->buf; int natts = typeinfo->natts; int i, j, @@ -405,7 +480,7 @@ printtup_20(TupleTableSlot *slot, DestReceiver *self) /* * tell the frontend to expect new tuple data (in ASCII style) */ - pq_beginmessage(&buf, 'D'); + pq_beginmessage_reuse(buf, 'D'); /* * send a bitmap of which attributes are not null @@ -419,13 +494,13 @@ printtup_20(TupleTableSlot *slot, DestReceiver *self) k >>= 1; if (k == 0) /* end of byte? */ { - pq_sendint(&buf, j, 1); + pq_sendint8(buf, j); j = 0; k = 1 << 7; } } if (k != (1 << 7)) /* flush last partial byte */ - pq_sendint(&buf, j, 1); + pq_sendint8(buf, j); /* * send the attributes of this tuple @@ -442,10 +517,10 @@ printtup_20(TupleTableSlot *slot, DestReceiver *self) Assert(thisState->format == 0); outputstr = OutputFunctionCall(&thisState->finfo, attr); - pq_sendcountedtext(&buf, outputstr, strlen(outputstr), true); + pq_sendcountedtext(buf, outputstr, strlen(outputstr), true); } - pq_endmessage(&buf); + pq_endmessage_reuse(buf); /* Return to caller's context, and flush row's temporary memory */ MemoryContextSwitchTo(oldcontext); @@ -513,14 +588,13 @@ void debugStartup(DestReceiver *self, int operation, TupleDesc typeinfo) { int natts = typeinfo->natts; - Form_pg_attribute *attinfo = typeinfo->attrs; int i; /* * show the return type of the tuples */ for (i = 0; i < natts; ++i) - printatt((unsigned) i + 1, attinfo[i], NULL); + printatt((unsigned) i + 1, TupleDescAttr(typeinfo, i), NULL); printf("\t----\n"); } @@ -545,12 +619,12 @@ debugtup(TupleTableSlot *slot, DestReceiver *self) attr = slot_getattr(slot, i + 1, &isnull); if (isnull) continue; - getTypeOutputInfo(typeinfo->attrs[i]->atttypid, + getTypeOutputInfo(TupleDescAttr(typeinfo, i)->atttypid, &typoutput, &typisvarlena); value = OidOutputFunctionCall(typoutput, attr); - printatt((unsigned) i + 1, typeinfo->attrs[i], value); + printatt((unsigned) i + 1, TupleDescAttr(typeinfo, i), value); } printf("\t----\n"); @@ -572,7 +646,7 @@ printtup_internal_20(TupleTableSlot *slot, DestReceiver *self) TupleDesc typeinfo = slot->tts_tupleDescriptor; DR_printtup *myState = (DR_printtup *) self; MemoryContext oldcontext; - StringInfoData buf; + StringInfo buf = &myState->buf; int natts = typeinfo->natts; int i, j, @@ -591,7 +665,7 @@ printtup_internal_20(TupleTableSlot *slot, DestReceiver *self) /* * tell the frontend to expect new tuple data (in binary style) */ - pq_beginmessage(&buf, 'B'); + pq_beginmessage_reuse(buf, 'B'); /* * send a bitmap of which attributes are not null @@ -605,13 +679,13 @@ printtup_internal_20(TupleTableSlot *slot, DestReceiver *self) k >>= 1; if (k == 0) /* end of byte? */ { - pq_sendint(&buf, j, 1); + pq_sendint8(buf, j); j = 0; k = 1 << 7; } } if (k != (1 << 7)) /* flush last partial byte */ - pq_sendint(&buf, j, 1); + pq_sendint8(buf, j); /* * send the attributes of this tuple @@ -628,12 +702,12 @@ printtup_internal_20(TupleTableSlot *slot, DestReceiver *self) Assert(thisState->format == 1); outputbytes = SendFunctionCall(&thisState->finfo, attr); - pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4); - pq_sendbytes(&buf, VARDATA(outputbytes), + pq_sendint32(buf, VARSIZE(outputbytes) - VARHDRSZ); + pq_sendbytes(buf, VARDATA(outputbytes), VARSIZE(outputbytes) - VARHDRSZ); } - pq_endmessage(&buf); + pq_endmessage_reuse(buf); /* Return to caller's context, and flush row's temporary memory */ MemoryContextSwitchTo(oldcontext); diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index ec10762529..db84da0678 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -3,7 +3,7 @@ * reloptions.c * Core support for relation options (pg_class.reloptions) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -23,6 +23,7 @@ #include "access/nbtree.h" #include "access/reloptions.h" #include "access/spgist.h" +#include "access/tuptoaster.h" #include "catalog/pg_type.h" #include "commands/defrem.h" #include "commands/tablespace.h" @@ -128,6 +129,15 @@ static relopt_bool boolRelOpts[] = }, true }, + { + { + "recheck_on_update", + "Recheck functional index expression for changed value after update", + RELOPT_KIND_INDEX, + ShareUpdateExclusiveLock /* since only applies to later UPDATEs */ + }, + true + }, { { "security_barrier", @@ -290,6 +300,15 @@ static relopt_int intRelOpts[] = }, -1, -1, INT_MAX }, + { + { + "toast_tuple_target", + "Sets the target tuple length at which external columns will be toasted", + RELOPT_KIND_HEAP, + ShareUpdateExclusiveLock + }, + TOAST_TUPLE_TARGET, 128, TOAST_TUPLE_TARGET_MAIN + }, { { "pages_per_range", @@ -390,6 +409,15 @@ static relopt_real realRelOpts[] = }, 0, -1.0, DBL_MAX }, + { + { + "vacuum_cleanup_index_scale_factor", + "Number of tuple inserts prior to index cleanup as a fraction of reltuples.", + RELOPT_KIND_BTREE, + ShareUpdateExclusiveLock + }, + -1, 0.0, 1e10 + }, /* list terminator */ {{NULL}} }; @@ -582,7 +610,7 @@ add_reloption(relopt_gen *newoption) * (for types other than string) */ static relopt_gen * -allocate_reloption(bits32 kinds, int type, char *name, char *desc) +allocate_reloption(bits32 kinds, int type, const char *name, const char *desc) { MemoryContext oldcxt; size_t size; @@ -630,7 +658,7 @@ allocate_reloption(bits32 kinds, int type, char *name, char *desc) * Add a new boolean reloption */ void -add_bool_reloption(bits32 kinds, char *name, char *desc, bool default_val) +add_bool_reloption(bits32 kinds, const char *name, const char *desc, bool default_val) { relopt_bool *newoption; @@ -646,7 +674,7 @@ add_bool_reloption(bits32 kinds, char *name, char *desc, bool default_val) * Add a new integer reloption */ void -add_int_reloption(bits32 kinds, char *name, char *desc, int default_val, +add_int_reloption(bits32 kinds, const char *name, const char *desc, int default_val, int min_val, int max_val) { relopt_int *newoption; @@ -665,7 +693,7 @@ add_int_reloption(bits32 kinds, char *name, char *desc, int default_val, * Add a new float reloption */ void -add_real_reloption(bits32 kinds, char *name, char *desc, double default_val, +add_real_reloption(bits32 kinds, const char *name, const char *desc, double default_val, double min_val, double max_val) { relopt_real *newoption; @@ -689,7 +717,7 @@ add_real_reloption(bits32 kinds, char *name, char *desc, double default_val, * the validation. */ void -add_string_reloption(bits32 kinds, char *name, char *desc, char *default_val, +add_string_reloption(bits32 kinds, const char *name, const char *desc, const char *default_val, validate_string_relopt validator) { relopt_string *newoption; @@ -742,7 +770,7 @@ add_string_reloption(bits32 kinds, char *name, char *desc, char *default_val, * but we declare them as Datums to avoid including array.h in reloptions.h. */ Datum -transformRelOptions(Datum oldOptions, List *defList, char *namspace, +transformRelOptions(Datum oldOptions, List *defList, const char *namspace, char *validnsps[], bool ignoreOids, bool isReset) { Datum result; @@ -786,12 +814,12 @@ transformRelOptions(Datum oldOptions, List *defList, char *namspace, } else if (def->defnamespace == NULL) continue; - else if (pg_strcasecmp(def->defnamespace, namspace) != 0) + else if (strcmp(def->defnamespace, namspace) != 0) continue; kw_len = strlen(def->defname); if (text_len > kw_len && text_str[kw_len] == '=' && - pg_strncasecmp(text_str, def->defname, kw_len) == 0) + strncmp(text_str, def->defname, kw_len) == 0) break; } if (!cell) @@ -839,8 +867,7 @@ transformRelOptions(Datum oldOptions, List *defList, char *namspace, { for (i = 0; validnsps[i]; i++) { - if (pg_strcasecmp(def->defnamespace, - validnsps[i]) == 0) + if (strcmp(def->defnamespace, validnsps[i]) == 0) { valid = true; break; @@ -855,7 +882,7 @@ transformRelOptions(Datum oldOptions, List *defList, char *namspace, def->defnamespace))); } - if (ignoreOids && pg_strcasecmp(def->defname, "oids") == 0) + if (ignoreOids && strcmp(def->defname, "oids") == 0) continue; /* ignore if not in the same namespace */ @@ -866,7 +893,7 @@ transformRelOptions(Datum oldOptions, List *defList, char *namspace, } else if (def->defnamespace == NULL) continue; - else if (pg_strcasecmp(def->defnamespace, namspace) != 0) + else if (strcmp(def->defnamespace, namspace) != 0) continue; /* @@ -983,6 +1010,7 @@ extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, options = view_reloptions(datum, false); break; case RELKIND_INDEX: + case RELKIND_PARTITIONED_INDEX: options = index_reloptions(amoptions, datum, false); break; case RELKIND_FOREIGN_TABLE: @@ -1071,8 +1099,7 @@ parseRelOptions(Datum options, bool validate, relopt_kind kind, int kw_len = reloptions[j].gen->namelen; if (text_len > kw_len && text_str[kw_len] == '=' && - pg_strncasecmp(text_str, reloptions[j].gen->name, - kw_len) == 0) + strncmp(text_str, reloptions[j].gen->name, kw_len) == 0) { parse_one_reloption(&reloptions[j], text_str, text_len, validate); @@ -1251,7 +1278,7 @@ fillRelOptions(void *rdopts, Size basesize, for (j = 0; j < numelems; j++) { - if (pg_strcasecmp(options[i].gen->name, elems[j].optname) == 0) + if (strcmp(options[i].gen->name, elems[j].optname) == 0) { relopt_string *optstring; char *itempos = ((char *) rdopts) + elems[j].offset; @@ -1301,7 +1328,7 @@ fillRelOptions(void *rdopts, Size basesize, break; } } - if (validate && !found) + if (validate && !found && options[i].gen->kinds != RELOPT_KIND_INDEX) elog(ERROR, "reloption \"%s\" not found in parse table", options[i].gen->name); } @@ -1344,6 +1371,8 @@ default_reloptions(Datum reloptions, bool validate, relopt_kind kind) offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, multixact_freeze_table_age)}, {"log_autovacuum_min_duration", RELOPT_TYPE_INT, offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, log_min_duration)}, + {"toast_tuple_target", RELOPT_TYPE_INT, + offsetof(StdRdOptions, toast_tuple_target)}, {"autovacuum_vacuum_scale_factor", RELOPT_TYPE_REAL, offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, vacuum_scale_factor)}, {"autovacuum_analyze_scale_factor", RELOPT_TYPE_REAL, @@ -1351,7 +1380,9 @@ default_reloptions(Datum reloptions, bool validate, relopt_kind kind) {"user_catalog_table", RELOPT_TYPE_BOOL, offsetof(StdRdOptions, user_catalog_table)}, {"parallel_workers", RELOPT_TYPE_INT, - offsetof(StdRdOptions, parallel_workers)} + offsetof(StdRdOptions, parallel_workers)}, + {"vacuum_cleanup_index_scale_factor", RELOPT_TYPE_REAL, + offsetof(StdRdOptions, vacuum_cleanup_index_scale_factor)} }; options = parseRelOptions(reloptions, validate, kind, &numoptions); @@ -1455,6 +1486,40 @@ index_reloptions(amoptions_function amoptions, Datum reloptions, bool validate) return amoptions(reloptions, validate); } +/* + * Parse generic options for all indexes. + * + * reloptions options as text[] datum + * validate error flag + */ +bytea * +index_generic_reloptions(Datum reloptions, bool validate) +{ + int numoptions; + GenericIndexOpts *idxopts; + relopt_value *options; + static const relopt_parse_elt tab[] = { + {"recheck_on_update", RELOPT_TYPE_BOOL, offsetof(GenericIndexOpts, recheck_on_update)} + }; + + options = parseRelOptions(reloptions, validate, + RELOPT_KIND_INDEX, + &numoptions); + + /* if none set, we're done */ + if (numoptions == 0) + return NULL; + + idxopts = allocateReloptStruct(sizeof(GenericIndexOpts), options, numoptions); + + fillRelOptions((void *) idxopts, sizeof(GenericIndexOpts), options, numoptions, + validate, tab, lengthof(tab)); + + pfree(options); + + return (bytea *) idxopts; +} + /* * Option parser for attribute reloptions */ @@ -1543,9 +1608,9 @@ AlterTableGetRelOptionsLockLevel(List *defList) for (i = 0; relOpts[i]; i++) { - if (pg_strncasecmp(relOpts[i]->name, - def->defname, - relOpts[i]->namelen + 1) == 0) + if (strncmp(relOpts[i]->name, + def->defname, + relOpts[i]->namelen + 1) == 0) { if (lockmode < relOpts[i]->lockmode) lockmode = relOpts[i]->lockmode; diff --git a/src/backend/access/common/scankey.c b/src/backend/access/common/scankey.c index 13edca1f94..781516c56a 100644 --- a/src/backend/access/common/scankey.c +++ b/src/backend/access/common/scankey.c @@ -3,7 +3,7 @@ * scankey.c * scan key support code * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/common/session.c b/src/backend/access/common/session.c new file mode 100644 index 0000000000..ffa7432a3c --- /dev/null +++ b/src/backend/access/common/session.c @@ -0,0 +1,208 @@ +/*------------------------------------------------------------------------- + * + * session.c + * Encapsulation of user session. + * + * This is intended to contain data that needs to be shared between backends + * performing work for a client session. In particular such a session is + * shared between the leader and worker processes for parallel queries. At + * some later point it might also become useful infrastructure for separating + * backends from client connections, e.g. for the purpose of pooling. + * + * Currently this infrastructure is used to share: + * - typemod registry for ephemeral row-types, i.e. BlessTupleDesc etc. + * + * Portions Copyright (c) 2017-2018, PostgreSQL Global Development Group + * + * src/backend/access/common/session.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/session.h" +#include "storage/lwlock.h" +#include "storage/shm_toc.h" +#include "utils/memutils.h" +#include "utils/typcache.h" + +/* Magic number for per-session DSM TOC. */ +#define SESSION_MAGIC 0xabb0fbc9 + +/* + * We want to create a DSA area to store shared state that has the same + * lifetime as a session. So far, it's only used to hold the shared record + * type registry. We don't want it to have to create any DSM segments just + * yet in common cases, so we'll give it enough space to hold a very small + * SharedRecordTypmodRegistry. + */ +#define SESSION_DSA_SIZE 0x30000 + +/* + * Magic numbers for state sharing in the per-session DSM area. + */ +#define SESSION_KEY_DSA UINT64CONST(0xFFFFFFFFFFFF0001) +#define SESSION_KEY_RECORD_TYPMOD_REGISTRY UINT64CONST(0xFFFFFFFFFFFF0002) + +/* This backend's current session. */ +Session *CurrentSession = NULL; + +/* + * Set up CurrentSession to point to an empty Session object. + */ +void +InitializeSession(void) +{ + CurrentSession = MemoryContextAllocZero(TopMemoryContext, sizeof(Session)); +} + +/* + * Initialize the per-session DSM segment if it isn't already initialized, and + * return its handle so that worker processes can attach to it. + * + * Unlike the per-context DSM segment, this segment and its contents are + * reused for future parallel queries. + * + * Return DSM_HANDLE_INVALID if a segment can't be allocated due to lack of + * resources. + */ +dsm_handle +GetSessionDsmHandle(void) +{ + shm_toc_estimator estimator; + shm_toc *toc; + dsm_segment *seg; + size_t typmod_registry_size; + size_t size; + void *dsa_space; + void *typmod_registry_space; + dsa_area *dsa; + MemoryContext old_context; + + /* + * If we have already created a session-scope DSM segment in this backend, + * return its handle. The same segment will be used for the rest of this + * backend's lifetime. + */ + if (CurrentSession->segment != NULL) + return dsm_segment_handle(CurrentSession->segment); + + /* Otherwise, prepare to set one up. */ + old_context = MemoryContextSwitchTo(TopMemoryContext); + shm_toc_initialize_estimator(&estimator); + + /* Estimate space for the per-session DSA area. */ + shm_toc_estimate_keys(&estimator, 1); + shm_toc_estimate_chunk(&estimator, SESSION_DSA_SIZE); + + /* Estimate space for the per-session record typmod registry. */ + typmod_registry_size = SharedRecordTypmodRegistryEstimate(); + shm_toc_estimate_keys(&estimator, 1); + shm_toc_estimate_chunk(&estimator, typmod_registry_size); + + /* Set up segment and TOC. */ + size = shm_toc_estimate(&estimator); + seg = dsm_create(size, DSM_CREATE_NULL_IF_MAXSEGMENTS); + if (seg == NULL) + { + MemoryContextSwitchTo(old_context); + + return DSM_HANDLE_INVALID; + } + toc = shm_toc_create(SESSION_MAGIC, + dsm_segment_address(seg), + size); + + /* Create per-session DSA area. */ + dsa_space = shm_toc_allocate(toc, SESSION_DSA_SIZE); + dsa = dsa_create_in_place(dsa_space, + SESSION_DSA_SIZE, + LWTRANCHE_SESSION_DSA, + seg); + shm_toc_insert(toc, SESSION_KEY_DSA, dsa_space); + + + /* Create session-scoped shared record typmod registry. */ + typmod_registry_space = shm_toc_allocate(toc, typmod_registry_size); + SharedRecordTypmodRegistryInit((SharedRecordTypmodRegistry *) + typmod_registry_space, seg, dsa); + shm_toc_insert(toc, SESSION_KEY_RECORD_TYPMOD_REGISTRY, + typmod_registry_space); + + /* + * If we got this far, we can pin the shared memory so it stays mapped for + * the rest of this backend's life. If we don't make it this far, cleanup + * callbacks for anything we installed above (ie currently + * SharedRecordTypemodRegistry) will run when the DSM segment is detached + * by CurrentResourceOwner so we aren't left with a broken CurrentSession. + */ + dsm_pin_mapping(seg); + dsa_pin_mapping(dsa); + + /* Make segment and area available via CurrentSession. */ + CurrentSession->segment = seg; + CurrentSession->area = dsa; + + MemoryContextSwitchTo(old_context); + + return dsm_segment_handle(seg); +} + +/* + * Attach to a per-session DSM segment provided by a parallel leader. + */ +void +AttachSession(dsm_handle handle) +{ + dsm_segment *seg; + shm_toc *toc; + void *dsa_space; + void *typmod_registry_space; + dsa_area *dsa; + MemoryContext old_context; + + old_context = MemoryContextSwitchTo(TopMemoryContext); + + /* Attach to the DSM segment. */ + seg = dsm_attach(handle); + if (seg == NULL) + elog(ERROR, "could not attach to per-session DSM segment"); + toc = shm_toc_attach(SESSION_MAGIC, dsm_segment_address(seg)); + + /* Attach to the DSA area. */ + dsa_space = shm_toc_lookup(toc, SESSION_KEY_DSA, false); + dsa = dsa_attach_in_place(dsa_space, seg); + + /* Make them available via the current session. */ + CurrentSession->segment = seg; + CurrentSession->area = dsa; + + /* Attach to the shared record typmod registry. */ + typmod_registry_space = + shm_toc_lookup(toc, SESSION_KEY_RECORD_TYPMOD_REGISTRY, false); + SharedRecordTypmodRegistryAttach((SharedRecordTypmodRegistry *) + typmod_registry_space); + + /* Remain attached until end of backend or DetachSession(). */ + dsm_pin_mapping(seg); + dsa_pin_mapping(dsa); + + MemoryContextSwitchTo(old_context); +} + +/* + * Detach from the current session DSM segment. It's not strictly necessary + * to do this explicitly since we'll detach automatically at backend exit, but + * if we ever reuse parallel workers it will become important for workers to + * detach from one session before attaching to another. Note that this runs + * detach hooks. + */ +void +DetachSession(void) +{ + /* Runs detach hooks. */ + dsm_detach(CurrentSession->segment); + CurrentSession->segment = NULL; + dsa_detach(CurrentSession->area); + CurrentSession->area = NULL; +} diff --git a/src/backend/access/common/tupconvert.c b/src/backend/access/common/tupconvert.c index 57e44375ea..21fe8ae490 100644 --- a/src/backend/access/common/tupconvert.c +++ b/src/backend/access/common/tupconvert.c @@ -4,12 +4,10 @@ * Tuple conversion support. * * These functions provide conversion between rowtypes that are logically - * equivalent but might have columns in a different order or different sets - * of dropped columns. There is some overlap of functionality with the - * executor's "junkfilter" routines, but these functions work on bare - * HeapTuples rather than TupleTableSlots. + * equivalent but might have columns in a different order or different sets of + * dropped columns. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -22,6 +20,7 @@ #include "access/htup_details.h" #include "access/tupconvert.h" +#include "executor/tuptable.h" #include "utils/builtins.h" @@ -31,7 +30,7 @@ * The setup routine checks whether the given source and destination tuple * descriptors are logically compatible. If not, it throws an error. * If so, it returns NULL if they are physically compatible (ie, no conversion - * is needed), else a TupleConversionMap that can be used by do_convert_tuple + * is needed), else a TupleConversionMap that can be used by execute_attr_map_tuple * to perform the conversion. * * The TupleConversionMap, if needed, is palloc'd in the caller's memory @@ -84,7 +83,7 @@ convert_tuples_by_position(TupleDesc indesc, same = true; for (i = 0; i < n; i++) { - Form_pg_attribute att = outdesc->attrs[i]; + Form_pg_attribute att = TupleDescAttr(outdesc, i); Oid atttypid; int32 atttypmod; @@ -95,7 +94,7 @@ convert_tuples_by_position(TupleDesc indesc, atttypmod = att->atttypmod; for (; j < indesc->natts; j++) { - att = indesc->attrs[j]; + att = TupleDescAttr(indesc, j); if (att->attisdropped) continue; nincols++; @@ -122,7 +121,7 @@ convert_tuples_by_position(TupleDesc indesc, /* Check for unused input columns */ for (; j < indesc->natts; j++) { - if (indesc->attrs[j]->attisdropped) + if (TupleDescAttr(indesc, j)->attisdropped) continue; nincols++; same = false; /* we'll complain below */ @@ -149,6 +148,9 @@ convert_tuples_by_position(TupleDesc indesc, { for (i = 0; i < n; i++) { + Form_pg_attribute inatt; + Form_pg_attribute outatt; + if (attrMap[i] == (i + 1)) continue; @@ -157,10 +159,12 @@ convert_tuples_by_position(TupleDesc indesc, * also dropped, we needn't convert. However, attlen and attalign * must agree. */ + inatt = TupleDescAttr(indesc, i); + outatt = TupleDescAttr(outdesc, i); if (attrMap[i] == 0 && - indesc->attrs[i]->attisdropped && - indesc->attrs[i]->attlen == outdesc->attrs[i]->attlen && - indesc->attrs[i]->attalign == outdesc->attrs[i]->attalign) + inatt->attisdropped && + inatt->attlen == outatt->attlen && + inatt->attalign == outatt->attalign) continue; same = false; @@ -209,50 +213,13 @@ convert_tuples_by_name(TupleDesc indesc, TupleConversionMap *map; AttrNumber *attrMap; int n = outdesc->natts; - int i; - bool same; /* Verify compatibility and prepare attribute-number map */ - attrMap = convert_tuples_by_name_map(indesc, outdesc, msg); + attrMap = convert_tuples_by_name_map_if_req(indesc, outdesc, msg); - /* - * Check to see if the map is one-to-one, in which case we need not do a - * tuple conversion. We must also insist that both tupdescs either - * specify or don't specify an OID column, else we need a conversion to - * add/remove space for that. (For some callers, presence or absence of - * an OID column perhaps would not really matter, but let's be safe.) - */ - if (indesc->natts == outdesc->natts && - indesc->tdhasoid == outdesc->tdhasoid) + if (attrMap == NULL) { - same = true; - for (i = 0; i < n; i++) - { - if (attrMap[i] == (i + 1)) - continue; - - /* - * If it's a dropped column and the corresponding input column is - * also dropped, we needn't convert. However, attlen and attalign - * must agree. - */ - if (attrMap[i] == 0 && - indesc->attrs[i]->attisdropped && - indesc->attrs[i]->attlen == outdesc->attrs[i]->attlen && - indesc->attrs[i]->attalign == outdesc->attrs[i]->attalign) - continue; - - same = false; - break; - } - } - else - same = false; - - if (same) - { - /* Runtime conversion is not needed */ - pfree(attrMap); + /* runtime conversion is not needed */ return NULL; } @@ -285,33 +252,55 @@ convert_tuples_by_name_map(TupleDesc indesc, const char *msg) { AttrNumber *attrMap; - int n; + int outnatts; + int innatts; int i; + int nextindesc = -1; - n = outdesc->natts; - attrMap = (AttrNumber *) palloc0(n * sizeof(AttrNumber)); - for (i = 0; i < n; i++) + outnatts = outdesc->natts; + innatts = indesc->natts; + + attrMap = (AttrNumber *) palloc0(outnatts * sizeof(AttrNumber)); + for (i = 0; i < outnatts; i++) { - Form_pg_attribute att = outdesc->attrs[i]; + Form_pg_attribute outatt = TupleDescAttr(outdesc, i); char *attname; Oid atttypid; int32 atttypmod; int j; - if (att->attisdropped) + if (outatt->attisdropped) continue; /* attrMap[i] is already 0 */ - attname = NameStr(att->attname); - atttypid = att->atttypid; - atttypmod = att->atttypmod; - for (j = 0; j < indesc->natts; j++) + attname = NameStr(outatt->attname); + atttypid = outatt->atttypid; + atttypmod = outatt->atttypmod; + + /* + * Now search for an attribute with the same name in the indesc. It + * seems likely that a partitioned table will have the attributes in + * the same order as the partition, so the search below is optimized + * for that case. It is possible that columns are dropped in one of + * the relations, but not the other, so we use the 'nextindesc' + * counter to track the starting point of the search. If the inner + * loop encounters dropped columns then it will have to skip over + * them, but it should leave 'nextindesc' at the correct position for + * the next outer loop. + */ + for (j = 0; j < innatts; j++) { - att = indesc->attrs[j]; - if (att->attisdropped) + Form_pg_attribute inatt; + + nextindesc++; + if (nextindesc >= innatts) + nextindesc = 0; + + inatt = TupleDescAttr(indesc, nextindesc); + if (inatt->attisdropped) continue; - if (strcmp(attname, NameStr(att->attname)) == 0) + if (strcmp(attname, NameStr(inatt->attname)) == 0) { /* Found it, check type */ - if (atttypid != att->atttypid || atttypmod != att->atttypmod) + if (atttypid != inatt->atttypid || atttypmod != inatt->atttypmod) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg_internal("%s", _(msg)), @@ -319,7 +308,7 @@ convert_tuples_by_name_map(TupleDesc indesc, attname, format_type_be(outdesc->tdtypeid), format_type_be(indesc->tdtypeid)))); - attrMap[i] = (AttrNumber) (j + 1); + attrMap[i] = inatt->attnum; break; } } @@ -332,15 +321,81 @@ convert_tuples_by_name_map(TupleDesc indesc, format_type_be(outdesc->tdtypeid), format_type_be(indesc->tdtypeid)))); } - return attrMap; } +/* + * Returns mapping created by convert_tuples_by_name_map, or NULL if no + * conversion not required. This is a convenience routine for + * convert_tuples_by_name() and other functions. + */ +AttrNumber * +convert_tuples_by_name_map_if_req(TupleDesc indesc, + TupleDesc outdesc, + const char *msg) +{ + AttrNumber *attrMap; + int n = outdesc->natts; + int i; + bool same; + + /* Verify compatibility and prepare attribute-number map */ + attrMap = convert_tuples_by_name_map(indesc, outdesc, msg); + + /* + * Check to see if the map is one-to-one, in which case we need not do a + * tuple conversion. We must also insist that both tupdescs either + * specify or don't specify an OID column, else we need a conversion to + * add/remove space for that. (For some callers, presence or absence of + * an OID column perhaps would not really matter, but let's be safe.) + */ + if (indesc->natts == outdesc->natts && + indesc->tdhasoid == outdesc->tdhasoid) + { + same = true; + for (i = 0; i < n; i++) + { + Form_pg_attribute inatt; + Form_pg_attribute outatt; + + if (attrMap[i] == (i + 1)) + continue; + + /* + * If it's a dropped column and the corresponding input column is + * also dropped, we needn't convert. However, attlen and attalign + * must agree. + */ + inatt = TupleDescAttr(indesc, i); + outatt = TupleDescAttr(outdesc, i); + if (attrMap[i] == 0 && + inatt->attisdropped && + inatt->attlen == outatt->attlen && + inatt->attalign == outatt->attalign) + continue; + + same = false; + break; + } + } + else + same = false; + + if (same) + { + /* Runtime conversion is not needed */ + pfree(attrMap); + return NULL; + } + else + return attrMap; +} + /* * Perform conversion of a tuple according to the map. */ HeapTuple -do_convert_tuple(HeapTuple tuple, TupleConversionMap *map) +execute_attr_map_tuple(HeapTuple tuple, TupleConversionMap *map) { AttrNumber *attrMap = map->attrMap; Datum *invalues = map->invalues; @@ -374,6 +429,62 @@ do_convert_tuple(HeapTuple tuple, TupleConversionMap *map) return heap_form_tuple(map->outdesc, outvalues, outisnull); } +/* + * Perform conversion of a tuple slot according to the map. + */ +TupleTableSlot * +execute_attr_map_slot(AttrNumber *attrMap, + TupleTableSlot *in_slot, + TupleTableSlot *out_slot) +{ + Datum *invalues; + bool *inisnull; + Datum *outvalues; + bool *outisnull; + int outnatts; + int i; + + /* Sanity checks */ + Assert(in_slot->tts_tupleDescriptor != NULL && + out_slot->tts_tupleDescriptor != NULL); + Assert(in_slot->tts_values != NULL && out_slot->tts_values != NULL); + + outnatts = out_slot->tts_tupleDescriptor->natts; + + /* Extract all the values of the in slot. */ + slot_getallattrs(in_slot); + + /* Before doing the mapping, clear any old contents from the out slot */ + ExecClearTuple(out_slot); + + invalues = in_slot->tts_values; + inisnull = in_slot->tts_isnull; + outvalues = out_slot->tts_values; + outisnull = out_slot->tts_isnull; + + /* Transpose into proper fields of the out slot. */ + for (i = 0; i < outnatts; i++) + { + int j = attrMap[i] - 1; + + /* attrMap[i] == 0 means it's a NULL datum. */ + if (j == -1) + { + outvalues[i] = (Datum) 0; + outisnull[i] = true; + } + else + { + outvalues[i] = invalues[j]; + outisnull[i] = inisnull[j]; + } + } + + ExecStoreVirtualTuple(out_slot); + + return out_slot; +} + /* * Free a TupleConversionMap structure. */ diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c index 9fd7b4e019..b0434b4672 100644 --- a/src/backend/access/common/tupdesc.c +++ b/src/backend/access/common/tupdesc.c @@ -3,7 +3,7 @@ * tupdesc.c * POSTGRES tuple descriptor support code * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -19,13 +19,17 @@ #include "postgres.h" +#include "access/hash.h" #include "access/htup_details.h" +#include "access/tupdesc_details.h" #include "catalog/pg_collation.h" #include "catalog/pg_type.h" #include "miscadmin.h" #include "parser/parse_type.h" #include "utils/acl.h" #include "utils/builtins.h" +#include "utils/datum.h" +#include "utils/hashutils.h" #include "utils/resowner_private.h" #include "utils/syscache.h" @@ -41,8 +45,6 @@ TupleDesc CreateTemplateTupleDesc(int natts, bool hasoid) { TupleDesc desc; - char *stg; - int attroffset; /* * sanity checks @@ -51,38 +53,18 @@ CreateTemplateTupleDesc(int natts, bool hasoid) /* * Allocate enough memory for the tuple descriptor, including the - * attribute rows, and set up the attribute row pointers. + * attribute rows. * - * Note: we assume that sizeof(struct tupleDesc) is a multiple of the - * struct pointer alignment requirement, and hence we don't need to insert - * alignment padding between the struct and the array of attribute row - * pointers. - * - * Note: Only the fixed part of pg_attribute rows is included in tuple - * descriptors, so we only need ATTRIBUTE_FIXED_PART_SIZE space per attr. - * That might need alignment padding, however. + * Note: the attribute array stride is sizeof(FormData_pg_attribute), + * since we declare the array elements as FormData_pg_attribute for + * notational convenience. However, we only guarantee that the first + * ATTRIBUTE_FIXED_PART_SIZE bytes of each entry are valid; most code that + * copies tupdesc entries around copies just that much. In principle that + * could be less due to trailing padding, although with the current + * definition of pg_attribute there probably isn't any padding. */ - attroffset = sizeof(struct tupleDesc) + natts * sizeof(Form_pg_attribute); - attroffset = MAXALIGN(attroffset); - stg = palloc(attroffset + natts * MAXALIGN(ATTRIBUTE_FIXED_PART_SIZE)); - desc = (TupleDesc) stg; - - if (natts > 0) - { - Form_pg_attribute *attrs; - int i; - - attrs = (Form_pg_attribute *) (stg + sizeof(struct tupleDesc)); - desc->attrs = attrs; - stg += attroffset; - for (i = 0; i < natts; i++) - { - attrs[i] = (Form_pg_attribute) stg; - stg += MAXALIGN(ATTRIBUTE_FIXED_PART_SIZE); - } - } - else - desc->attrs = NULL; + desc = (TupleDesc) palloc(offsetof(struct tupleDesc, attrs) + + natts * sizeof(FormData_pg_attribute)); /* * Initialize other fields of the tupdesc. @@ -99,12 +81,9 @@ CreateTemplateTupleDesc(int natts, bool hasoid) /* * CreateTupleDesc - * This function allocates a new TupleDesc pointing to a given + * This function allocates a new TupleDesc by copying a given * Form_pg_attribute array. * - * Note: if the TupleDesc is ever freed, the Form_pg_attribute array - * will not be freed thereby. - * * Tuple type ID information is initially set for an anonymous record type; * caller can overwrite this if needed. */ @@ -112,20 +91,12 @@ TupleDesc CreateTupleDesc(int natts, bool hasoid, Form_pg_attribute *attrs) { TupleDesc desc; + int i; - /* - * sanity checks - */ - AssertArg(natts >= 0); + desc = CreateTemplateTupleDesc(natts, hasoid); - desc = (TupleDesc) palloc(sizeof(struct tupleDesc)); - desc->attrs = attrs; - desc->natts = natts; - desc->constr = NULL; - desc->tdtypeid = RECORDOID; - desc->tdtypmod = -1; - desc->tdhasoid = hasoid; - desc->tdrefcount = -1; /* assume not reference-counted */ + for (i = 0; i < natts; ++i) + memcpy(TupleDescAttr(desc, i), attrs[i], ATTRIBUTE_FIXED_PART_SIZE); return desc; } @@ -145,14 +116,26 @@ CreateTupleDescCopy(TupleDesc tupdesc) desc = CreateTemplateTupleDesc(tupdesc->natts, tupdesc->tdhasoid); + /* Flat-copy the attribute array */ + memcpy(TupleDescAttr(desc, 0), + TupleDescAttr(tupdesc, 0), + desc->natts * sizeof(FormData_pg_attribute)); + + /* + * Since we're not copying constraints and defaults, clear fields + * associated with them. + */ for (i = 0; i < desc->natts; i++) { - memcpy(desc->attrs[i], tupdesc->attrs[i], ATTRIBUTE_FIXED_PART_SIZE); - desc->attrs[i]->attnotnull = false; - desc->attrs[i]->atthasdef = false; - desc->attrs[i]->attidentity = '\0'; + Form_pg_attribute att = TupleDescAttr(desc, i); + + att->attnotnull = false; + att->atthasdef = false; + att->atthasmissing = false; + att->attidentity = '\0'; } + /* We can copy the tuple type identification, too */ desc->tdtypeid = tupdesc->tdtypeid; desc->tdtypmod = tupdesc->tdtypmod; @@ -173,11 +156,12 @@ CreateTupleDescCopyConstr(TupleDesc tupdesc) desc = CreateTemplateTupleDesc(tupdesc->natts, tupdesc->tdhasoid); - for (i = 0; i < desc->natts; i++) - { - memcpy(desc->attrs[i], tupdesc->attrs[i], ATTRIBUTE_FIXED_PART_SIZE); - } + /* Flat-copy the attribute array */ + memcpy(TupleDescAttr(desc, 0), + TupleDescAttr(tupdesc, 0), + desc->natts * sizeof(FormData_pg_attribute)); + /* Copy the TupleConstr data structure, if any */ if (constr) { TupleConstr *cpy = (TupleConstr *) palloc0(sizeof(TupleConstr)); @@ -195,6 +179,23 @@ CreateTupleDescCopyConstr(TupleDesc tupdesc) } } + if (constr->missing) + { + cpy->missing = (AttrMissing *) palloc(tupdesc->natts * sizeof(AttrMissing)); + memcpy(cpy->missing, constr->missing, tupdesc->natts * sizeof(AttrMissing)); + for (i = tupdesc->natts - 1; i >= 0; i--) + { + if (constr->missing[i].am_present) + { + Form_pg_attribute attr = TupleDescAttr(tupdesc, i); + + cpy->missing[i].am_value = datumCopy(constr->missing[i].am_value, + attr->attbyval, + attr->attlen); + } + } + } + if ((cpy->num_check = constr->num_check) > 0) { cpy->check = (ConstrCheck *) palloc(cpy->num_check * sizeof(ConstrCheck)); @@ -213,12 +214,51 @@ CreateTupleDescCopyConstr(TupleDesc tupdesc) desc->constr = cpy; } + /* We can copy the tuple type identification, too */ desc->tdtypeid = tupdesc->tdtypeid; desc->tdtypmod = tupdesc->tdtypmod; return desc; } +/* + * TupleDescCopy + * Copy a tuple descriptor into caller-supplied memory. + * The memory may be shared memory mapped at any address, and must + * be sufficient to hold TupleDescSize(src) bytes. + * + * !!! Constraints and defaults are not copied !!! + */ +void +TupleDescCopy(TupleDesc dst, TupleDesc src) +{ + int i; + + /* Flat-copy the header and attribute array */ + memcpy(dst, src, TupleDescSize(src)); + + /* + * Since we're not copying constraints and defaults, clear fields + * associated with them. + */ + for (i = 0; i < dst->natts; i++) + { + Form_pg_attribute att = TupleDescAttr(dst, i); + + att->attnotnull = false; + att->atthasdef = false; + att->atthasmissing = false; + att->attidentity = '\0'; + } + dst->constr = NULL; + + /* + * Also, assume the destination is not to be ref-counted. (Copying the + * source's refcount would be wrong in any case.) + */ + dst->tdrefcount = -1; +} + /* * TupleDescCopyEntry * This function copies a single attribute structure from one tuple @@ -230,6 +270,9 @@ void TupleDescCopyEntry(TupleDesc dst, AttrNumber dstAttno, TupleDesc src, AttrNumber srcAttno) { + Form_pg_attribute dstAtt = TupleDescAttr(dst, dstAttno - 1); + Form_pg_attribute srcAtt = TupleDescAttr(src, srcAttno - 1); + /* * sanity checks */ @@ -240,8 +283,7 @@ TupleDescCopyEntry(TupleDesc dst, AttrNumber dstAttno, AssertArg(dstAttno >= 1); AssertArg(dstAttno <= dst->natts); - memcpy(dst->attrs[dstAttno - 1], src->attrs[srcAttno - 1], - ATTRIBUTE_FIXED_PART_SIZE); + memcpy(dstAtt, srcAtt, ATTRIBUTE_FIXED_PART_SIZE); /* * Aside from updating the attno, we'd better reset attcacheoff. @@ -252,13 +294,14 @@ TupleDescCopyEntry(TupleDesc dst, AttrNumber dstAttno, * by other uses of this function or TupleDescInitEntry. So we cheat a * bit to avoid a useless O(N^2) penalty. */ - dst->attrs[dstAttno - 1]->attnum = dstAttno; - dst->attrs[dstAttno - 1]->attcacheoff = -1; + dstAtt->attnum = dstAttno; + dstAtt->attcacheoff = -1; /* since we're not copying constraints or defaults, clear these */ - dst->attrs[dstAttno - 1]->attnotnull = false; - dst->attrs[dstAttno - 1]->atthasdef = false; - dst->attrs[dstAttno - 1]->attidentity = '\0'; + dstAtt->attnotnull = false; + dstAtt->atthasdef = false; + dstAtt->atthasmissing = false; + dstAtt->attidentity = '\0'; } /* @@ -288,6 +331,18 @@ FreeTupleDesc(TupleDesc tupdesc) } pfree(attrdef); } + if (tupdesc->constr->missing) + { + AttrMissing *attrmiss = tupdesc->constr->missing; + + for (i = tupdesc->natts - 1; i >= 0; i--) + { + if (attrmiss[i].am_present + && !TupleDescAttr(tupdesc, i)->attbyval) + pfree(DatumGetPointer(attrmiss[i].am_value)); + } + pfree(attrmiss); + } if (tupdesc->constr->num_check > 0) { ConstrCheck *check = tupdesc->constr->check; @@ -366,8 +421,8 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2) for (i = 0; i < tupdesc1->natts; i++) { - Form_pg_attribute attr1 = tupdesc1->attrs[i]; - Form_pg_attribute attr2 = tupdesc2->attrs[i]; + Form_pg_attribute attr1 = TupleDescAttr(tupdesc1, i); + Form_pg_attribute attr2 = TupleDescAttr(tupdesc2, i); /* * We do not need to check every single field here: we can disregard @@ -448,6 +503,29 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2) if (strcmp(defval1->adbin, defval2->adbin) != 0) return false; } + if (constr1->missing) + { + if (!constr2->missing) + return false; + for (i = 0; i < tupdesc1->natts; i++) + { + AttrMissing *missval1 = constr1->missing + i; + AttrMissing *missval2 = constr2->missing + i; + + if (missval1->am_present != missval2->am_present) + return false; + if (missval1->am_present) + { + Form_pg_attribute missatt1 = TupleDescAttr(tupdesc1, i); + + if (!datumIsEqual(missval1->am_value, missval2->am_value, + missatt1->attbyval, missatt1->attlen)) + return false; + } + } + } + else if (constr2->missing) + return false; n = constr1->num_check; if (n != (int) constr2->num_check) return false; @@ -478,6 +556,31 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2) return true; } +/* + * hashTupleDesc + * Compute a hash value for a tuple descriptor. + * + * If two tuple descriptors would be considered equal by equalTupleDescs() + * then their hash value will be equal according to this function. + * + * Note that currently contents of constraint are not hashed - it'd be a bit + * painful to do so, and conflicts just due to constraints are unlikely. + */ +uint32 +hashTupleDesc(TupleDesc desc) +{ + uint32 s; + int i; + + s = hash_combine(0, hash_uint32(desc->natts)); + s = hash_combine(s, hash_uint32(desc->tdtypeid)); + s = hash_combine(s, hash_uint32(desc->tdhasoid)); + for (i = 0; i < desc->natts; ++i) + s = hash_combine(s, hash_uint32(TupleDescAttr(desc, i)->atttypid)); + + return s; +} + /* * TupleDescInitEntry * This function initializes a single attribute structure in @@ -515,7 +618,7 @@ TupleDescInitEntry(TupleDesc desc, /* * initialize the attribute fields */ - att = desc->attrs[attributeNumber - 1]; + att = TupleDescAttr(desc, attributeNumber - 1); att->attrelid = 0; /* dummy value */ @@ -538,6 +641,7 @@ TupleDescInitEntry(TupleDesc desc, att->attnotnull = false; att->atthasdef = false; + att->atthasmissing = false; att->attidentity = '\0'; att->attisdropped = false; att->attislocal = true; @@ -580,7 +684,7 @@ TupleDescInitBuiltinEntry(TupleDesc desc, AssertArg(attributeNumber <= desc->natts); /* initialize the attribute fields */ - att = desc->attrs[attributeNumber - 1]; + att = TupleDescAttr(desc, attributeNumber - 1); att->attrelid = 0; /* dummy value */ /* unlike TupleDescInitEntry, we require an attribute name */ @@ -596,6 +700,7 @@ TupleDescInitBuiltinEntry(TupleDesc desc, att->attnotnull = false; att->atthasdef = false; + att->atthasmissing = false; att->attidentity = '\0'; att->attisdropped = false; att->attislocal = true; @@ -664,7 +769,7 @@ TupleDescInitEntryCollation(TupleDesc desc, AssertArg(attributeNumber >= 1); AssertArg(attributeNumber <= desc->natts); - desc->attrs[attributeNumber - 1]->attcollation = collationid; + TupleDescAttr(desc, attributeNumber - 1)->attcollation = collationid; } @@ -704,6 +809,7 @@ BuildDescForRelation(List *schema) { ColumnDef *entry = lfirst(l); AclResult aclresult; + Form_pg_attribute att; /* * for each entry in the list, get the name and type information from @@ -730,17 +836,18 @@ BuildDescForRelation(List *schema) TupleDescInitEntry(desc, attnum, attname, atttypid, atttypmod, attdim); + att = TupleDescAttr(desc, attnum - 1); /* Override TupleDescInitEntry's settings as requested */ TupleDescInitEntryCollation(desc, attnum, attcollation); if (entry->storage) - desc->attrs[attnum - 1]->attstorage = entry->storage; + att->attstorage = entry->storage; /* Fill in additional stuff not handled by TupleDescInitEntry */ - desc->attrs[attnum - 1]->attnotnull = entry->is_not_null; + att->attnotnull = entry->is_not_null; has_not_null |= entry->is_not_null; - desc->attrs[attnum - 1]->attislocal = entry->is_local; - desc->attrs[attnum - 1]->attinhcount = entry->inhcount; + att->attislocal = entry->is_local; + att->attinhcount = entry->inhcount; } if (has_not_null) @@ -749,6 +856,7 @@ BuildDescForRelation(List *schema) constr->has_not_null = true; constr->defval = NULL; + constr->missing = NULL; constr->num_defval = 0; constr->check = NULL; constr->num_check = 0; diff --git a/src/backend/access/gin/README b/src/backend/access/gin/README index 990b5ffa58..cc434b1feb 100644 --- a/src/backend/access/gin/README +++ b/src/backend/access/gin/README @@ -331,6 +331,40 @@ page-deletions safe; it stamps the deleted pages with an XID and keeps the deleted pages around with the right-link intact until all concurrent scans have finished.) +Predicate Locking +----------------- + +GIN supports predicate locking, for serializable snapshot isolation. +A predicate locks represent that a scan has scanned a range of values. They +are not concerned with physical pages as such, but the logical key values. +A predicate lock on a page covers the key range that would belong on that +page, whether or not there are any matching tuples there currently. In other +words, a predicate lock on an index page covers the "gaps" between the index +tuples. To minimize false positives, predicate locks are acquired at the +finest level possible. + +* Like in the B-tree index, it is enough to lock only leaf pages, because all + insertions happen at the leaf level. + +* In an equality search (i.e. not a partial match search), if a key entry has + a posting tree, we lock the posting tree root page, to represent a lock on + just that key entry. Otherwise, we lock the entry tree page. We also lock + the entry tree page if no match is found, to lock the "gap" where the entry + would've been, had there been one. + +* In a partial match search, we lock all the entry leaf pages that we scan, + in addition to locks on posting tree roots, to represent the "gaps" between + values. + +* In addition to the locks on entry leaf pages and posting tree roots, all + scans grab a lock the metapage. This is to interlock with insertions to + the fast update pending list. An insertion to the pending list can really + belong anywhere in the tree, and the lock on the metapage represents that. + +The interlock for fastupdate pending lists means that with fastupdate=on, +we effectively always grab a full-index lock, so you could get a lot of false +positives. + Compatibility ------------- diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c index a5238c3af5..d0fa4adf87 100644 --- a/src/backend/access/gin/ginarrayproc.c +++ b/src/backend/access/gin/ginarrayproc.c @@ -4,7 +4,7 @@ * support functions for GIN's indexing of any array * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c index b02cb8ae58..d5a568106c 100644 --- a/src/backend/access/gin/ginbtree.c +++ b/src/backend/access/gin/ginbtree.c @@ -4,7 +4,7 @@ * page utilities routines for the postgres inverted index access method. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -17,6 +17,7 @@ #include "access/gin_private.h" #include "access/ginxlog.h" #include "access/xloginsert.h" +#include "storage/predicate.h" #include "miscadmin.h" #include "utils/memutils.h" #include "utils/rel.h" @@ -41,7 +42,7 @@ ginTraverseLock(Buffer buffer, bool searchMode) page = BufferGetPage(buffer); if (GinPageIsLeaf(page)) { - if (searchMode == FALSE) + if (searchMode == false) { /* we should relock our page */ LockBuffer(buffer, GIN_UNLOCK); @@ -83,6 +84,9 @@ ginFindLeafPage(GinBtree btree, bool searchMode, Snapshot snapshot) stack->parent = NULL; stack->predictNumber = 1; + if (!searchMode) + CheckForSerializableConflictIn(btree->index, NULL, stack->buffer); + for (;;) { Page page; @@ -107,7 +111,7 @@ ginFindLeafPage(GinBtree btree, bool searchMode, Snapshot snapshot) * ok, page is correctly locked, we should check to move right .., * root never has a right link, so small optimization */ - while (btree->fullScan == FALSE && stack->blkno != btree->rootBlkno && + while (btree->fullScan == false && stack->blkno != btree->rootBlkno && btree->isMoveRight(btree, page)) { BlockNumber rightlink = GinPageGetOpaque(page)->rightlink; @@ -207,7 +211,7 @@ freeGinBtreeStack(GinBtreeStack *stack) /* * Try to find parent for current stack position. Returns correct parent and * child's offset in stack->parent. The root page is never released, to - * to prevent conflict with vacuum process. + * prevent conflict with vacuum process. */ static void ginFindParents(GinBtree btree, GinBtreeStack *stack) @@ -515,6 +519,19 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, btree->fillRoot(btree, newrootpg, BufferGetBlockNumber(lbuffer), newlpage, BufferGetBlockNumber(rbuffer), newrpage); + + if (GinPageIsLeaf(BufferGetPage(stack->buffer))) + { + + PredicateLockPageSplit(btree->index, + BufferGetBlockNumber(stack->buffer), + BufferGetBlockNumber(lbuffer)); + + PredicateLockPageSplit(btree->index, + BufferGetBlockNumber(stack->buffer), + BufferGetBlockNumber(rbuffer)); + } + } else { @@ -524,6 +541,14 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, GinPageGetOpaque(newrpage)->rightlink = savedRightLink; GinPageGetOpaque(newlpage)->flags |= GIN_INCOMPLETE_SPLIT; GinPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer); + + if (GinPageIsLeaf(BufferGetPage(stack->buffer))) + { + + PredicateLockPageSplit(btree->index, + BufferGetBlockNumber(stack->buffer), + BufferGetBlockNumber(rbuffer)); + } } /* diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c index 4ff149e59a..f2fbe6fa3f 100644 --- a/src/backend/access/gin/ginbulk.c +++ b/src/backend/access/gin/ginbulk.c @@ -4,7 +4,7 @@ * routines for fast build of inverted index * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -27,7 +27,7 @@ /* Combiner function for rbtree.c */ static void -ginCombineData(RBNode *existing, const RBNode *newdata, void *arg) +ginCombineData(RBTNode *existing, const RBTNode *newdata, void *arg) { GinEntryAccumulator *eo = (GinEntryAccumulator *) existing; const GinEntryAccumulator *en = (const GinEntryAccumulator *) newdata; @@ -52,7 +52,7 @@ ginCombineData(RBNode *existing, const RBNode *newdata, void *arg) } /* If item pointers are not ordered, they will need to be sorted later */ - if (eo->shouldSort == FALSE) + if (eo->shouldSort == false) { int res; @@ -60,7 +60,7 @@ ginCombineData(RBNode *existing, const RBNode *newdata, void *arg) Assert(res != 0); if (res > 0) - eo->shouldSort = TRUE; + eo->shouldSort = true; } eo->list[eo->count] = en->list[0]; @@ -69,7 +69,7 @@ ginCombineData(RBNode *existing, const RBNode *newdata, void *arg) /* Comparator function for rbtree.c */ static int -cmpEntryAccumulator(const RBNode *a, const RBNode *b, void *arg) +cmpEntryAccumulator(const RBTNode *a, const RBTNode *b, void *arg) { const GinEntryAccumulator *ea = (const GinEntryAccumulator *) a; const GinEntryAccumulator *eb = (const GinEntryAccumulator *) b; @@ -81,7 +81,7 @@ cmpEntryAccumulator(const RBNode *a, const RBNode *b, void *arg) } /* Allocator function for rbtree.c */ -static RBNode * +static RBTNode * ginAllocEntryAccumulator(void *arg) { BuildAccumulator *accum = (BuildAccumulator *) arg; @@ -89,7 +89,7 @@ ginAllocEntryAccumulator(void *arg) /* * Allocate memory by rather big chunks to decrease overhead. We have no - * need to reclaim RBNodes individually, so this costs nothing. + * need to reclaim RBTNodes individually, so this costs nothing. */ if (accum->entryallocator == NULL || accum->eas_used >= DEF_NENTRY) { @@ -98,11 +98,11 @@ ginAllocEntryAccumulator(void *arg) accum->eas_used = 0; } - /* Allocate new RBNode from current chunk */ + /* Allocate new RBTNode from current chunk */ ea = accum->entryallocator + accum->eas_used; accum->eas_used++; - return (RBNode *) ea; + return (RBTNode *) ea; } void @@ -112,12 +112,12 @@ ginInitBA(BuildAccumulator *accum) accum->allocatedMemory = 0; accum->entryallocator = NULL; accum->eas_used = 0; - accum->tree = rb_create(sizeof(GinEntryAccumulator), - cmpEntryAccumulator, - ginCombineData, - ginAllocEntryAccumulator, - NULL, /* no freefunc needed */ - (void *) accum); + accum->tree = rbt_create(sizeof(GinEntryAccumulator), + cmpEntryAccumulator, + ginCombineData, + ginAllocEntryAccumulator, + NULL, /* no freefunc needed */ + (void *) accum); } /* @@ -127,9 +127,10 @@ ginInitBA(BuildAccumulator *accum) static Datum getDatumCopy(BuildAccumulator *accum, OffsetNumber attnum, Datum value) { - Form_pg_attribute att = accum->ginstate->origTupdesc->attrs[attnum - 1]; + Form_pg_attribute att; Datum res; + att = TupleDescAttr(accum->ginstate->origTupdesc, attnum - 1); if (att->attbyval) res = value; else @@ -162,8 +163,8 @@ ginInsertBAEntry(BuildAccumulator *accum, /* temporarily set up single-entry itempointer list */ eatmp.list = heapptr; - ea = (GinEntryAccumulator *) rb_insert(accum->tree, (RBNode *) &eatmp, - &isNew); + ea = (GinEntryAccumulator *) rbt_insert(accum->tree, (RBTNode *) &eatmp, + &isNew); if (isNew) { @@ -175,7 +176,7 @@ ginInsertBAEntry(BuildAccumulator *accum, ea->key = getDatumCopy(accum, attnum, key); ea->maxcount = DEF_NPTR; ea->count = 1; - ea->shouldSort = FALSE; + ea->shouldSort = false; ea->list = (ItemPointerData *) palloc(sizeof(ItemPointerData) * DEF_NPTR); ea->list[0] = *heapptr; @@ -255,7 +256,7 @@ qsortCompareItemPointers(const void *a, const void *b) void ginBeginBAScan(BuildAccumulator *accum) { - rb_begin_iterate(accum->tree, LeftRightWalk, &accum->tree_walk); + rbt_begin_iterate(accum->tree, LeftRightWalk, &accum->tree_walk); } /* @@ -271,7 +272,7 @@ ginGetBAEntry(BuildAccumulator *accum, GinEntryAccumulator *entry; ItemPointerData *list; - entry = (GinEntryAccumulator *) rb_iterate(&accum->tree_walk); + entry = (GinEntryAccumulator *) rbt_iterate(&accum->tree_walk); if (entry == NULL) return NULL; /* no more entries */ diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c index 2e5ea47976..9f20513811 100644 --- a/src/backend/access/gin/gindatapage.c +++ b/src/backend/access/gin/gindatapage.c @@ -4,7 +4,7 @@ * routines for handling GIN posting tree pages. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -19,6 +19,7 @@ #include "access/xloginsert.h" #include "lib/ilist.h" #include "miscadmin.h" +#include "storage/predicate.h" #include "utils/rel.h" /* @@ -235,9 +236,9 @@ dataIsMoveRight(GinBtree btree, Page page) ItemPointer iptr = GinDataPageGetRightBound(page); if (GinPageRightMost(page)) - return FALSE; + return false; - return (ginCompareItemPointers(&btree->itemptr, iptr) > 0) ? TRUE : FALSE; + return (ginCompareItemPointers(&btree->itemptr, iptr) > 0) ? true : false; } /* @@ -1393,7 +1394,8 @@ disassembleLeaf(Page page) { /* * A pre-9.4 format uncompressed page is represented by a single - * segment, with an array of items. + * segment, with an array of items. The corner case is uncompressed + * page containing no items, which is represented as no segments. */ ItemPointer uncompressed; int nuncompressed; @@ -1401,15 +1403,18 @@ disassembleLeaf(Page page) uncompressed = dataLeafPageGetUncompressed(page, &nuncompressed); - seginfo = palloc(sizeof(leafSegmentInfo)); + if (nuncompressed > 0) + { + seginfo = palloc(sizeof(leafSegmentInfo)); - seginfo->action = GIN_SEGMENT_REPLACE; - seginfo->seg = NULL; - seginfo->items = palloc(nuncompressed * sizeof(ItemPointerData)); - memcpy(seginfo->items, uncompressed, nuncompressed * sizeof(ItemPointerData)); - seginfo->nitems = nuncompressed; + seginfo->action = GIN_SEGMENT_REPLACE; + seginfo->seg = NULL; + seginfo->items = palloc(nuncompressed * sizeof(ItemPointerData)); + memcpy(seginfo->items, uncompressed, nuncompressed * sizeof(ItemPointerData)); + seginfo->nitems = nuncompressed; - dlist_push_tail(&leaf->segments, &seginfo->node); + dlist_push_tail(&leaf->segments, &seginfo->node); + } leaf->oldformat = true; } @@ -1759,7 +1764,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining) */ BlockNumber createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, - GinStatsData *buildStats) + GinStatsData *buildStats, Buffer entrybuffer) { BlockNumber blkno; Buffer buffer; @@ -1810,6 +1815,12 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, page = BufferGetPage(buffer); blkno = BufferGetBlockNumber(buffer); + /* + * Copy any predicate locks from the entry tree leaf (containing posting + * list) to the posting tree. + */ + PredicateLockPageSplit(index, BufferGetBlockNumber(entrybuffer), blkno); + START_CRIT_SECTION(); PageRestoreTempPage(tmppage, page); @@ -1857,7 +1868,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, return blkno; } -void +static void ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno) { memset(btree, 0, sizeof(GinBtreeData)); @@ -1875,9 +1886,9 @@ ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno) btree->fillRoot = ginDataFillRoot; btree->prepareDownlink = dataPrepareDownlink; - btree->isData = TRUE; - btree->fullScan = FALSE; - btree->isBuild = FALSE; + btree->isData = true; + btree->fullScan = false; + btree->isBuild = false; } /* @@ -1919,9 +1930,9 @@ ginScanBeginPostingTree(GinBtree btree, Relation index, BlockNumber rootBlkno, ginPrepareDataScan(btree, index, rootBlkno); - btree->fullScan = TRUE; + btree->fullScan = true; - stack = ginFindLeafPage(btree, TRUE, snapshot); + stack = ginFindLeafPage(btree, true, snapshot); return stack; } diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c index d5cc70258a..184cc0af3e 100644 --- a/src/backend/access/gin/ginentrypage.c +++ b/src/backend/access/gin/ginentrypage.c @@ -4,7 +4,7 @@ * routines for handling GIN entry tree pages. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -30,7 +30,7 @@ static void entrySplitPage(GinBtree btree, Buffer origbuf, * Form a tuple for entry tree. * * If the tuple would be too big to be stored, function throws a suitable - * error if errorTooBig is TRUE, or returns NULL if errorTooBig is FALSE. + * error if errorTooBig is true, or returns NULL if errorTooBig is false. * * See src/backend/access/gin/README for a description of the index tuple * format that is being built here. We build on the assumption that we @@ -249,7 +249,7 @@ entryIsMoveRight(GinBtree btree, Page page) GinNullCategory category; if (GinPageRightMost(page)) - return FALSE; + return false; itup = getRightMostTuple(page); attnum = gintuple_get_attrnum(btree->ginstate, itup); @@ -258,9 +258,9 @@ entryIsMoveRight(GinBtree btree, Page page) if (ginCompareAttEntries(btree->ginstate, btree->entryAttnum, btree->entryKey, btree->entryCategory, attnum, key, category) > 0) - return TRUE; + return true; - return FALSE; + return false; } /* @@ -356,7 +356,7 @@ entryLocateLeafEntry(GinBtree btree, GinBtreeStack *stack) if (btree->fullScan) { stack->off = FirstOffsetNumber; - return TRUE; + return true; } low = FirstOffsetNumber; @@ -616,7 +616,7 @@ entrySplitPage(GinBtree btree, Buffer origbuf, Page lpage = PageGetTempPageCopy(BufferGetPage(origbuf)); Page rpage = PageGetTempPageCopy(BufferGetPage(origbuf)); Size pageSize = PageGetPageSize(lpage); - char tupstore[2 * BLCKSZ]; + PGAlignedBlock tupstore[2]; /* could need 2 pages' worth of tuples */ entryPreparePage(btree, lpage, off, insertData, updateblkno); @@ -625,7 +625,7 @@ entrySplitPage(GinBtree btree, Buffer origbuf, * one after another in a temporary workspace. */ maxoff = PageGetMaxOffsetNumber(lpage); - ptr = tupstore; + ptr = tupstore[0].data; for (i = FirstOffsetNumber; i <= maxoff; i++) { if (i == off) @@ -658,7 +658,7 @@ entrySplitPage(GinBtree btree, Buffer origbuf, GinInitPage(rpage, GinPageGetOpaque(lpage)->flags, pageSize); GinInitPage(lpage, GinPageGetOpaque(rpage)->flags, pageSize); - ptr = tupstore; + ptr = tupstore[0].data; maxoff++; lsize = 0; @@ -762,9 +762,9 @@ ginPrepareEntryScan(GinBtree btree, OffsetNumber attnum, btree->fillRoot = ginEntryFillRoot; btree->prepareDownlink = entryPrepareDownlink; - btree->isData = FALSE; - btree->fullScan = FALSE; - btree->isBuild = FALSE; + btree->isData = false; + btree->fullScan = false; + btree->isBuild = false; btree->entryAttnum = attnum; btree->entryKey = key; diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index 59e435465a..ca2a32bd25 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -7,7 +7,7 @@ * transfer pending entries into the regular index structure. This * wins because bulk insertion is much more efficient than retail. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -31,6 +31,7 @@ #include "postmaster/autovacuum.h" #include "storage/indexfsm.h" #include "storage/lmgr.h" +#include "storage/predicate.h" #include "utils/builtins.h" /* GUC parameter */ @@ -63,18 +64,15 @@ writeListPage(Relation index, Buffer buffer, size = 0; OffsetNumber l, off; - char *workspace; + PGAlignedBlock workspace; char *ptr; - /* workspace could be a local array; we use palloc for alignment */ - workspace = palloc(BLCKSZ); - START_CRIT_SECTION(); GinInitBuffer(buffer, GIN_LIST); off = FirstOffsetNumber; - ptr = workspace; + ptr = workspace.data; for (i = 0; i < ntuples; i++) { @@ -126,7 +124,7 @@ writeListPage(Relation index, Buffer buffer, XLogRegisterData((char *) &data, sizeof(ginxlogInsertListPage)); XLogRegisterBuffer(0, buffer, REGBUF_WILL_INIT); - XLogRegisterBufData(0, workspace, size); + XLogRegisterBufData(0, workspace.data, size); recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_INSERT_LISTPAGE); PageSetLSN(page, recptr); @@ -139,8 +137,6 @@ writeListPage(Relation index, Buffer buffer, END_CRIT_SECTION(); - pfree(workspace); - return freesize; } @@ -245,6 +241,13 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO); metapage = BufferGetPage(metabuffer); + /* + * An insertion to the pending list could logically belong anywhere in the + * tree, so it conflicts with all serializable scans. All scans acquire a + * predicate lock on the metabuffer to represent that. + */ + CheckForSerializableConflictIn(index, NULL, metabuffer); + if (collector->sumsize + collector->ntuples * sizeof(ItemIdData) > GinListPageSize) { /* @@ -396,6 +399,16 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) MarkBufferDirty(buffer); } + /* + * Set pd_lower just past the end of the metadata. This is essential, + * because without doing so, metadata will be lost if xlog.c compresses + * the page. (We must do this here because pre-v11 versions of PG did not + * set the metapage's pd_lower correctly, so a pg_upgraded index might + * contain the wrong value.) + */ + ((PageHeader) metapage)->pd_lower = + ((char *) metadata + sizeof(GinMetaPageData)) - (char *) metapage; + /* * Write metabuffer, make xlog entry */ @@ -407,7 +420,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) memcpy(&data.metadata, metadata, sizeof(GinMetaPageData)); - XLogRegisterBuffer(0, metabuffer, REGBUF_WILL_INIT); + XLogRegisterBuffer(0, metabuffer, REGBUF_WILL_INIT | REGBUF_STANDARD); XLogRegisterData((char *) &data, sizeof(ginxlogUpdateMeta)); recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_UPDATE_META_PAGE); @@ -440,8 +453,12 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) END_CRIT_SECTION(); + /* + * Since it could contend with concurrent cleanup process we cleanup + * pending list not forcibly. + */ if (needCleanup) - ginInsertCleanup(ginstate, false, true, NULL); + ginInsertCleanup(ginstate, false, true, false, NULL); } /* @@ -572,6 +589,16 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead, metadata->nPendingHeapTuples = 0; } + /* + * Set pd_lower just past the end of the metadata. This is essential, + * because without doing so, metadata will be lost if xlog.c + * compresses the page. (We must do this here because pre-v11 + * versions of PG did not set the metapage's pd_lower correctly, so a + * pg_upgraded index might contain the wrong value.) + */ + ((PageHeader) metapage)->pd_lower = + ((char *) metadata + sizeof(GinMetaPageData)) - (char *) metapage; + MarkBufferDirty(metabuffer); for (i = 0; i < data.ndeleted; i++) @@ -586,7 +613,8 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead, XLogRecPtr recptr; XLogBeginInsert(); - XLogRegisterBuffer(0, metabuffer, REGBUF_WILL_INIT); + XLogRegisterBuffer(0, metabuffer, + REGBUF_WILL_INIT | REGBUF_STANDARD); for (i = 0; i < data.ndeleted; i++) XLogRegisterBuffer(i + 1, buffers[i], REGBUF_WILL_INIT); @@ -727,7 +755,8 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka, */ void ginInsertCleanup(GinState *ginstate, bool full_clean, - bool fill_fsm, IndexBulkDeleteResult *stats) + bool fill_fsm, bool forceCleanup, + IndexBulkDeleteResult *stats) { Relation index = ginstate->index; Buffer metabuffer, @@ -744,7 +773,6 @@ ginInsertCleanup(GinState *ginstate, bool full_clean, bool cleanupFinish = false; bool fsm_vac = false; Size workMemory; - bool inVacuum = (stats == NULL); /* * We would like to prevent concurrent cleanup process. For that we will @@ -753,7 +781,7 @@ ginInsertCleanup(GinState *ginstate, bool full_clean, * insertion into pending list */ - if (inVacuum) + if (forceCleanup) { /* * We are called from [auto]vacuum/analyze or gin_clean_pending_list() @@ -968,7 +996,6 @@ ginInsertCleanup(GinState *ginstate, bool full_clean, if (fsm_vac && fill_fsm) IndexFreeSpaceMapVacuum(index); - /* Clean up temporary space */ MemoryContextSwitchTo(oldCtx); MemoryContextDelete(opCtx); @@ -1011,12 +1038,12 @@ gin_clean_pending_list(PG_FUNCTION_ARGS) /* User must own the index (comparable to privileges needed for VACUUM) */ if (!pg_class_ownercheck(indexoid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_INDEX, RelationGetRelationName(indexRel)); memset(&stats, 0, sizeof(stats)); initGinState(&ginstate, indexRel); - ginInsertCleanup(&ginstate, true, true, &stats); + ginInsertCleanup(&ginstate, true, true, true, &stats); index_close(indexRel, AccessShareLock); diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index 56a5bf47b8..8466d947ea 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -4,7 +4,7 @@ * fetch tuples from a GIN scan. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -17,8 +17,10 @@ #include "access/gin_private.h" #include "access/relscan.h" #include "miscadmin.h" +#include "storage/predicate.h" #include "utils/datum.h" #include "utils/memutils.h" +#include "utils/rel.h" /* GUC parameter */ int GinFuzzySearchLimit = 0; @@ -37,7 +39,7 @@ typedef struct pendingPosition * Goes to the next page if current offset is outside of bounds */ static bool -moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack) +moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack, Snapshot snapshot) { Page page = BufferGetPage(stack->buffer); @@ -52,6 +54,7 @@ moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack) stack->buffer = ginStepRight(stack->buffer, btree->index, GIN_SHARE); stack->blkno = BufferGetBlockNumber(stack->buffer); stack->off = FirstOffsetNumber; + PredicateLockPage(btree->index, stack->blkno, snapshot); } return true; @@ -73,6 +76,7 @@ scanPostingTree(Relation index, GinScanEntry scanEntry, /* Descend to the leftmost leaf page */ stack = ginScanBeginPostingTree(&btree, index, rootPostingTree, snapshot); buffer = stack->buffer; + IncrBufferRefCount(buffer); /* prevent unpin in freeGinBtreeStack */ freeGinBtreeStack(stack); @@ -129,7 +133,13 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, /* Locate tupdesc entry for key column (for attbyval/attlen data) */ attnum = scanEntry->attnum; - attr = btree->ginstate->origTupdesc->attrs[attnum - 1]; + attr = TupleDescAttr(btree->ginstate->origTupdesc, attnum - 1); + + /* + * Predicate lock entry leaf page, following pages will be locked by + * moveRightIfItNeeded() + */ + PredicateLockPage(btree->index, stack->buffer, snapshot); for (;;) { @@ -141,7 +151,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, /* * stack->off points to the interested entry, buffer is already locked */ - if (moveRightIfItNeeded(btree, stack) == false) + if (moveRightIfItNeeded(btree, stack, snapshot) == false) return true; page = BufferGetPage(stack->buffer); @@ -224,6 +234,13 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, LockBuffer(stack->buffer, GIN_UNLOCK); + /* + * Acquire predicate lock on the posting tree. We already hold a + * lock on the entry page, but insertions to the posting tree + * don't check for conflicts on that level. + */ + PredicateLockPage(btree->index, rootPostingTree, snapshot); + /* Collect all the TIDs in this entry's posting tree */ scanPostingTree(btree->index, scanEntry, rootPostingTree, snapshot); @@ -250,7 +267,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, Datum newDatum; GinNullCategory newCategory; - if (moveRightIfItNeeded(btree, stack) == false) + if (moveRightIfItNeeded(btree, stack, snapshot) == false) elog(ERROR, "lost saved point in index"); /* must not happen !!! */ page = BufferGetPage(stack->buffer); @@ -311,7 +328,7 @@ startScanEntry(GinState *ginstate, GinScanEntry entry, Snapshot snapshot) entry->nlist = 0; entry->matchBitmap = NULL; entry->matchResult = NULL; - entry->reduceResult = FALSE; + entry->reduceResult = false; entry->predictNumberResult = 0; /* @@ -323,10 +340,11 @@ startScanEntry(GinState *ginstate, GinScanEntry entry, Snapshot snapshot) ginstate); stackEntry = ginFindLeafPage(&btreeEntry, true, snapshot); page = BufferGetPage(stackEntry->buffer); + /* ginFindLeafPage() will have already checked snapshot age. */ - needUnlock = TRUE; + needUnlock = true; - entry->isFinished = TRUE; + entry->isFinished = true; if (entry->isPartialMatch || entry->queryCategory == GIN_CAT_EMPTY_QUERY) @@ -363,7 +381,7 @@ startScanEntry(GinState *ginstate, GinScanEntry entry, Snapshot snapshot) if (entry->matchBitmap && !tbm_is_empty(entry->matchBitmap)) { entry->matchIterator = tbm_begin_iterate(entry->matchBitmap); - entry->isFinished = FALSE; + entry->isFinished = false; } } else if (btreeEntry.findItem(&btreeEntry, stackEntry)) @@ -377,6 +395,13 @@ startScanEntry(GinState *ginstate, GinScanEntry entry, Snapshot snapshot) Page page; ItemPointerData minItem; + /* + * This is an equality scan, so lock the root of the posting tree. + * It represents a lock on the exact key value, and covers all the + * items in the posting tree. + */ + PredicateLockPage(ginstate->index, rootPostingTree, snapshot); + /* * We should unlock entry page before touching posting tree to * prevent deadlocks with vacuum processes. Because entry is never @@ -385,7 +410,7 @@ startScanEntry(GinState *ginstate, GinScanEntry entry, Snapshot snapshot) * root of posting tree. */ LockBuffer(stackEntry->buffer, GIN_UNLOCK); - needUnlock = FALSE; + needUnlock = false; stack = ginScanBeginPostingTree(&entry->btree, ginstate->index, rootPostingTree, snapshot); @@ -410,17 +435,40 @@ startScanEntry(GinState *ginstate, GinScanEntry entry, Snapshot snapshot) LockBuffer(entry->buffer, GIN_UNLOCK); freeGinBtreeStack(stack); - entry->isFinished = FALSE; + entry->isFinished = false; } - else if (GinGetNPosting(itup) > 0) + else { - entry->list = ginReadTuple(ginstate, entry->attnum, itup, - &entry->nlist); - entry->predictNumberResult = entry->nlist; + /* + * Lock the entry leaf page. This is more coarse-grained than + * necessary, because it will conflict with any insertions that + * land on the same leaf page, not only the exacty key we searched + * for. But locking an individual tuple would require updating + * that lock whenever it moves because of insertions or vacuums, + * which seems too complicated. + */ + PredicateLockPage(ginstate->index, + BufferGetBlockNumber(stackEntry->buffer), + snapshot); + if (GinGetNPosting(itup) > 0) + { + entry->list = ginReadTuple(ginstate, entry->attnum, itup, + &entry->nlist); + entry->predictNumberResult = entry->nlist; - entry->isFinished = FALSE; + entry->isFinished = false; + } } } + else + { + /* + * No entry found. Predicate lock the leaf page, to lock the place + * where the entry would've been, had there been one. + */ + PredicateLockPage(ginstate->index, + BufferGetBlockNumber(stackEntry->buffer), snapshot); + } if (needUnlock) LockBuffer(stackEntry->buffer, GIN_UNLOCK); @@ -565,7 +613,7 @@ startScan(IndexScanDesc scan) for (i = 0; i < so->totalentries; i++) { so->entries[i]->predictNumberResult /= so->totalentries; - so->entries[i]->reduceResult = TRUE; + so->entries[i]->reduceResult = true; } } } @@ -666,7 +714,7 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, { UnlockReleaseBuffer(entry->buffer); entry->buffer = InvalidBuffer; - entry->isFinished = TRUE; + entry->isFinished = true; return; } @@ -728,7 +776,7 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, /* * Sets entry->curItem to next heap item pointer > advancePast, for one entry - * of one scan key, or sets entry->isFinished to TRUE if there are no more. + * of one scan key, or sets entry->isFinished to true if there are no more. * * Item pointers are returned in ascending order. * @@ -775,7 +823,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry, ItemPointerSetInvalid(&entry->curItem); tbm_end_iterate(entry->matchIterator); entry->matchIterator = NULL; - entry->isFinished = TRUE; + entry->isFinished = true; break; } @@ -835,7 +883,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry, entry->matchResult->offsets[entry->offset]); entry->offset++; gotitem = true; - } while (!gotitem || (entry->reduceResult == TRUE && dropItem(entry))); + } while (!gotitem || (entry->reduceResult == true && dropItem(entry))); } else if (!BufferIsValid(entry->buffer)) { @@ -848,7 +896,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry, if (entry->offset >= entry->nlist) { ItemPointerSetInvalid(&entry->curItem); - entry->isFinished = TRUE; + entry->isFinished = true; break; } @@ -876,7 +924,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry, entry->curItem = entry->list[entry->offset++]; } while (ginCompareItemPointers(&entry->curItem, &advancePast) <= 0 || - (entry->reduceResult == TRUE && dropItem(entry))); + (entry->reduceResult == true && dropItem(entry))); } } @@ -891,7 +939,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry, * iff recheck is needed for this item pointer (including the case where the * item pointer is a lossy page pointer). * - * If all entry streams are exhausted, sets key->isFinished to TRUE. + * If all entry streams are exhausted, sets key->isFinished to true. * * Item pointers must be returned in ascending order. * @@ -963,7 +1011,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key, if (allFinished) { /* all entries are finished */ - key->isFinished = TRUE; + key->isFinished = true; return; } @@ -1051,7 +1099,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key, * them. We could pass them as MAYBE as well, but if we're using the * "shim" implementation of a tri-state consistent function (see * ginlogic.c), it's better to pass as few MAYBEs as possible. So pass - * them as TRUE. + * them as true. * * Note that only lossy-page entries pointing to the current item's page * should trigger this processing; we might have future lossy pages in the @@ -1064,7 +1112,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key, for (i = 0; i < key->nentries; i++) { entry = key->scanEntry[i]; - if (entry->isFinished == FALSE && + if (entry->isFinished == false && ginCompareItemPointers(&entry->curItem, &curPageLossy) == 0) { if (i < key->nuserentries) @@ -1314,7 +1362,7 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast, } } - return TRUE; + return true; } @@ -1508,7 +1556,7 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos) memset(key->entryRes, GIN_FALSE, key->nentries); } - memset(pos->hasMatchKey, FALSE, so->nkeys); + memset(pos->hasMatchKey, false, so->nkeys); /* * Outer loop iterates over multiple pending-list pages when a single heap @@ -1700,7 +1748,7 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos) } /* - * Collect all matched rows from pending list into bitmap + * Collect all matched rows from pending list into bitmap. */ static void scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids) @@ -1717,6 +1765,12 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids) *ntids = 0; + /* + * Acquire predicate lock on the metapage, to conflict with any fastupdate + * insertions. + */ + PredicateLockPage(scan->indexRelation, GIN_METAPAGE_BLKNO, scan->xs_snapshot); + LockBuffer(metabuffer, GIN_SHARE); page = BufferGetPage(metabuffer); TestForOldSnapshot(scan->xs_snapshot, scan->indexRelation, page); diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index 5378011f50..5281eb6823 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -4,7 +4,7 @@ * insert routines for the postgres inverted index access method. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -22,6 +22,7 @@ #include "storage/bufmgr.h" #include "storage/smgr.h" #include "storage/indexfsm.h" +#include "storage/predicate.h" #include "utils/memutils.h" #include "utils/rel.h" @@ -48,7 +49,7 @@ static IndexTuple addItemPointersToLeafTuple(GinState *ginstate, IndexTuple old, ItemPointerData *items, uint32 nitem, - GinStatsData *buildStats) + GinStatsData *buildStats, Buffer buffer) { OffsetNumber attnum; Datum key; @@ -99,7 +100,8 @@ addItemPointersToLeafTuple(GinState *ginstate, postingRoot = createPostingTree(ginstate->index, oldItems, oldNPosting, - buildStats); + buildStats, + buffer); /* Now insert the TIDs-to-be-added into the posting tree */ ginInsertItemPointers(ginstate->index, postingRoot, @@ -127,7 +129,7 @@ static IndexTuple buildFreshLeafTuple(GinState *ginstate, OffsetNumber attnum, Datum key, GinNullCategory category, ItemPointerData *items, uint32 nitem, - GinStatsData *buildStats) + GinStatsData *buildStats, Buffer buffer) { IndexTuple res = NULL; GinPostingList *compressedList; @@ -157,7 +159,7 @@ buildFreshLeafTuple(GinState *ginstate, * Initialize a new posting tree with the TIDs. */ postingRoot = createPostingTree(ginstate->index, items, nitem, - buildStats); + buildStats, buffer); /* And save the root link in the result tuple */ GinSetPostingTree(res, postingRoot); @@ -185,7 +187,7 @@ ginEntryInsert(GinState *ginstate, IndexTuple itup; Page page; - insertdata.isDelete = FALSE; + insertdata.isDelete = false; /* During index build, count the to-be-inserted entry */ if (buildStats) @@ -217,17 +219,19 @@ ginEntryInsert(GinState *ginstate, return; } + CheckForSerializableConflictIn(ginstate->index, NULL, stack->buffer); /* modify an existing leaf entry */ itup = addItemPointersToLeafTuple(ginstate, itup, - items, nitem, buildStats); + items, nitem, buildStats, stack->buffer); - insertdata.isDelete = TRUE; + insertdata.isDelete = true; } else { + CheckForSerializableConflictIn(ginstate->index, NULL, stack->buffer); /* no match, so construct a new leaf entry */ itup = buildFreshLeafTuple(ginstate, attnum, key, category, - items, nitem, buildStats); + items, nitem, buildStats, stack->buffer); } /* Insert the new or modified leaf tuple */ @@ -348,7 +352,7 @@ ginbuild(Relation heap, Relation index, IndexInfo *indexInfo) Page page; XLogBeginInsert(); - XLogRegisterBuffer(0, MetaBuffer, REGBUF_WILL_INIT); + XLogRegisterBuffer(0, MetaBuffer, REGBUF_WILL_INIT | REGBUF_STANDARD); XLogRegisterBuffer(1, RootBuffer, REGBUF_WILL_INIT); recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_CREATE_INDEX); @@ -391,7 +395,7 @@ ginbuild(Relation heap, Relation index, IndexInfo *indexInfo) * prefers to receive tuples in TID order. */ reltuples = IndexBuildHeapScan(heap, index, indexInfo, false, - ginBuildCallback, (void *) &buildstate); + ginBuildCallback, (void *) &buildstate, NULL); /* dump remaining entries to the index */ oldCtx = MemoryContextSwitchTo(buildstate.tmpCtx); @@ -447,7 +451,7 @@ ginbuildempty(Relation index) START_CRIT_SECTION(); GinInitMetabuffer(MetaBuffer); MarkBufferDirty(MetaBuffer); - log_newpage_buffer(MetaBuffer, false); + log_newpage_buffer(MetaBuffer, true); GinInitBuffer(RootBuffer, GIN_LEAF); MarkBufferDirty(RootBuffer); log_newpage_buffer(RootBuffer, false); diff --git a/src/backend/access/gin/ginlogic.c b/src/backend/access/gin/ginlogic.c index 5b8ad9a25a..2c42d1aa91 100644 --- a/src/backend/access/gin/ginlogic.c +++ b/src/backend/access/gin/ginlogic.c @@ -24,7 +24,7 @@ * is used for.) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/backend/access/gin/ginpostinglist.c b/src/backend/access/gin/ginpostinglist.c index 8d2d31ac72..54c9caffe3 100644 --- a/src/backend/access/gin/ginpostinglist.c +++ b/src/backend/access/gin/ginpostinglist.c @@ -4,7 +4,7 @@ * routines for dealing with posting lists. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c index 7ceea7a741..8ade4311df 100644 --- a/src/backend/access/gin/ginscan.c +++ b/src/backend/access/gin/ginscan.c @@ -4,7 +4,7 @@ * routines to manage scans of inverted index relations * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -295,6 +295,7 @@ ginNewScanKey(IndexScanDesc scan) bool *partial_matches = NULL; Pointer *extra_data = NULL; bool *nullFlags = NULL; + GinNullCategory *categories; int32 searchMode = GIN_SEARCH_MODE_DEFAULT; /* @@ -346,15 +347,12 @@ ginNewScanKey(IndexScanDesc scan) } /* - * If the extractQueryFn didn't create a nullFlags array, create one, - * assuming that everything's non-null. Otherwise, run through the - * array and make sure each value is exactly 0 or 1; this ensures - * binary compatibility with the GinNullCategory representation. While - * at it, detect whether any null keys are present. + * Create GinNullCategory representation. If the extractQueryFn + * didn't create a nullFlags array, we assume everything is non-null. + * While at it, detect whether any null keys are present. */ - if (nullFlags == NULL) - nullFlags = (bool *) palloc0(nQueryValues * sizeof(bool)); - else + categories = (GinNullCategory *) palloc0(nQueryValues * sizeof(GinNullCategory)); + if (nullFlags) { int32 j; @@ -362,17 +360,16 @@ ginNewScanKey(IndexScanDesc scan) { if (nullFlags[j]) { - nullFlags[j] = true; /* not any other nonzero value */ + categories[j] = GIN_CAT_NULL_KEY; hasNullQuery = true; } } } - /* now we can use the nullFlags as category codes */ ginFillScanKey(so, skey->sk_attno, skey->sk_strategy, searchMode, skey->sk_argument, nQueryValues, - queryValues, (GinNullCategory *) nullFlags, + queryValues, categories, partial_matches, extra_data); } diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c index 91e4a8cf70..0a32182dd7 100644 --- a/src/backend/access/gin/ginutil.c +++ b/src/backend/access/gin/ginutil.c @@ -4,7 +4,7 @@ * Utility routines for the Postgres inverted index access method. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -23,6 +23,7 @@ #include "miscadmin.h" #include "storage/indexfsm.h" #include "storage/lmgr.h" +#include "storage/predicate.h" #include "utils/builtins.h" #include "utils/index_selfuncs.h" #include "utils/typcache.h" @@ -49,8 +50,9 @@ ginhandler(PG_FUNCTION_ARGS) amroutine->amsearchnulls = false; amroutine->amstorage = true; amroutine->amclusterable = false; - amroutine->ampredlocks = false; + amroutine->ampredlocks = true; amroutine->amcanparallel = false; + amroutine->amcaninclude = false; amroutine->amkeytype = InvalidOid; amroutine->ambuild = ginbuild; @@ -96,6 +98,8 @@ initGinState(GinState *state, Relation index) for (i = 0; i < origTupdesc->natts; i++) { + Form_pg_attribute attr = TupleDescAttr(origTupdesc, i); + if (state->oneCol) state->tupdesc[i] = state->origTupdesc; else @@ -105,11 +109,11 @@ initGinState(GinState *state, Relation index) TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 1, NULL, INT2OID, -1, 0); TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 2, NULL, - origTupdesc->attrs[i]->atttypid, - origTupdesc->attrs[i]->atttypmod, - origTupdesc->attrs[i]->attndims); + attr->atttypid, + attr->atttypmod, + attr->attndims); TupleDescInitEntryCollation(state->tupdesc[i], (AttrNumber) 2, - origTupdesc->attrs[i]->attcollation); + attr->attcollation); } /* @@ -126,13 +130,13 @@ initGinState(GinState *state, Relation index) { TypeCacheEntry *typentry; - typentry = lookup_type_cache(origTupdesc->attrs[i]->atttypid, + typentry = lookup_type_cache(attr->atttypid, TYPECACHE_CMP_PROC_FINFO); if (!OidIsValid(typentry->cmp_proc_finfo.fn_oid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("could not identify a comparison function for type %s", - format_type_be(origTupdesc->attrs[i]->atttypid)))); + format_type_be(attr->atttypid)))); fmgr_info_copy(&(state->compareFn[i]), &(typentry->cmp_proc_finfo), CurrentMemoryContext); @@ -372,6 +376,14 @@ GinInitMetabuffer(Buffer b) metadata->nDataPages = 0; metadata->nEntries = 0; metadata->ginVersion = GIN_CURRENT_VERSION; + + /* + * Set pd_lower just past the end of the metadata. This is essential, + * because without doing so, metadata will be lost if xlog.c compresses + * the page. + */ + ((PageHeader) page)->pd_lower = + ((char *) metadata + sizeof(GinMetaPageData)) - (char *) page; } /* @@ -519,19 +531,10 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum, /* * If the extractValueFn didn't create a nullFlags array, create one, - * assuming that everything's non-null. Otherwise, run through the array - * and make sure each value is exactly 0 or 1; this ensures binary - * compatibility with the GinNullCategory representation. + * assuming that everything's non-null. */ if (nullFlags == NULL) nullFlags = (bool *) palloc0(*nentries * sizeof(bool)); - else - { - for (i = 0; i < *nentries; i++) - nullFlags[i] = (nullFlags[i] ? true : false); - } - /* now we can use the nullFlags as category codes */ - *categories = (GinNullCategory *) nullFlags; /* * If there's more than one key, sort and unique-ify. @@ -590,6 +593,13 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum, pfree(keydata); } + /* + * Create GinNullCategory representation from nullFlags. + */ + *categories = (GinNullCategory *) palloc0(*nentries * sizeof(GinNullCategory)); + for (i = 0; i < *nentries; i++) + (*categories)[i] = (nullFlags[i] ? GIN_CAT_NULL_KEY : GIN_CAT_NORM_KEY); + return entries; } @@ -674,6 +684,16 @@ ginUpdateStats(Relation index, const GinStatsData *stats) metadata->nDataPages = stats->nDataPages; metadata->nEntries = stats->nEntries; + /* + * Set pd_lower just past the end of the metadata. This is essential, + * because without doing so, metadata will be lost if xlog.c compresses + * the page. (We must do this here because pre-v11 versions of PG did not + * set the metapage's pd_lower correctly, so a pg_upgraded index might + * contain the wrong value.) + */ + ((PageHeader) metapage)->pd_lower = + ((char *) metadata + sizeof(GinMetaPageData)) - (char *) metapage; + MarkBufferDirty(metabuffer); if (RelationNeedsWAL(index)) @@ -688,7 +708,7 @@ ginUpdateStats(Relation index, const GinStatsData *stats) XLogBeginInsert(); XLogRegisterData((char *) &data, sizeof(ginxlogUpdateMeta)); - XLogRegisterBuffer(0, metabuffer, REGBUF_WILL_INIT); + XLogRegisterBuffer(0, metabuffer, REGBUF_WILL_INIT | REGBUF_STANDARD); recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_UPDATE_META_PAGE); PageSetLSN(metapage, recptr); diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index 31425e9963..3104bc12b6 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -4,7 +4,7 @@ * delete & vacuum routines for the postgres GIN * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -22,6 +22,7 @@ #include "postmaster/autovacuum.h" #include "storage/indexfsm.h" #include "storage/lmgr.h" +#include "storage/predicate.h" #include "utils/memutils.h" struct GinVacuumState @@ -153,12 +154,18 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn LockBuffer(lBuffer, GIN_EXCLUSIVE); - START_CRIT_SECTION(); - - /* Unlink the page by changing left sibling's rightlink */ page = BufferGetPage(dBuffer); rightlink = GinPageGetOpaque(page)->rightlink; + /* + * Any insert which would have gone on the leaf block will now go to its + * right sibling. + */ + PredicateLockPageCombine(gvs->index, deleteBlkno, rightlink); + + START_CRIT_SECTION(); + + /* Unlink the page by changing left sibling's rightlink */ page = BufferGetPage(lBuffer); GinPageGetOpaque(page)->rightlink = rightlink; @@ -235,7 +242,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, DataPageDeleteStack *me; Buffer buffer; Page page; - bool meDelete = FALSE; + bool meDelete = false; bool isempty; if (isRoot) @@ -274,7 +281,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, { PostingItem *pitem = GinDataPageGetPostingItem(page, i); - if (ginScanToDelete(gvs, PostingItemGetBlockNumber(pitem), FALSE, me, i)) + if (ginScanToDelete(gvs, PostingItemGetBlockNumber(pitem), false, me, i)) i--; } } @@ -291,7 +298,7 @@ ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, { Assert(!isRoot); ginDeletePage(gvs, blkno, me->leftBlkno, me->parent->blkno, myoff, me->parent->isRoot); - meDelete = TRUE; + meDelete = true; } } @@ -319,7 +326,7 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot) { Buffer buffer; Page page; - bool hasVoidPage = FALSE; + bool hasVoidPage = false; MemoryContext oldCxt; buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno, @@ -339,7 +346,7 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot) /* if root is a leaf page, we don't desire further processing */ if (GinDataLeafPageIsEmpty(page)) - hasVoidPage = TRUE; + hasVoidPage = true; UnlockReleaseBuffer(buffer); @@ -348,8 +355,8 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot) else { OffsetNumber i; - bool hasEmptyChild = FALSE; - bool hasNonEmptyChild = FALSE; + bool hasEmptyChild = false; + bool hasNonEmptyChild = false; OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff; BlockNumber *children = palloc(sizeof(BlockNumber) * (maxoff + 1)); @@ -369,10 +376,10 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot) for (i = FirstOffsetNumber; i <= maxoff; i++) { - if (ginVacuumPostingTreeLeaves(gvs, children[i], FALSE)) - hasEmptyChild = TRUE; + if (ginVacuumPostingTreeLeaves(gvs, children[i], false)) + hasEmptyChild = true; else - hasNonEmptyChild = TRUE; + hasNonEmptyChild = true; } pfree(children); @@ -380,12 +387,12 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot) vacuum_delay_point(); /* - * All subtree is empty - just return TRUE to indicate that parent - * must do a cleanup. Unless we are ROOT an there is way to go upper. + * All subtree is empty - just return true to indicate that parent + * must do a cleanup, unless we are ROOT and there is way to go upper. */ if (hasEmptyChild && !hasNonEmptyChild && !isRoot) - return TRUE; + return true; if (hasEmptyChild) { @@ -399,9 +406,9 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot) memset(&root, 0, sizeof(DataPageDeleteStack)); root.leftBlkno = InvalidBlockNumber; - root.isRoot = TRUE; + root.isRoot = true; - ginScanToDelete(gvs, blkno, TRUE, &root, InvalidOffsetNumber); + ginScanToDelete(gvs, blkno, true, &root, InvalidOffsetNumber); ptr = root.child; @@ -416,14 +423,14 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot) } /* Here we have deleted all empty subtrees */ - return FALSE; + return false; } } static void ginVacuumPostingTree(GinVacuumState *gvs, BlockNumber rootBlkno) { - ginVacuumPostingTreeLeaves(gvs, rootBlkno, TRUE); + ginVacuumPostingTreeLeaves(gvs, rootBlkno, true); } /* @@ -570,7 +577,7 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, * and cleanup any pending inserts */ ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(), - false, stats); + false, true, stats); } /* we'll re-count the tuples each time */ @@ -683,7 +690,7 @@ ginvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) if (IsAutoVacuumWorkerProcess()) { initGinState(&ginstate, index); - ginInsertCleanup(&ginstate, false, true, stats); + ginInsertCleanup(&ginstate, false, true, true, stats); } return stats; } @@ -697,7 +704,7 @@ ginvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); initGinState(&ginstate, index); ginInsertCleanup(&ginstate, !IsAutoVacuumWorkerProcess(), - false, stats); + false, true, stats); } memset(&idxStat, 0, sizeof(idxStat)); diff --git a/src/backend/access/gin/ginvalidate.c b/src/backend/access/gin/ginvalidate.c index 4c8e563545..1922260b75 100644 --- a/src/backend/access/gin/ginvalidate.c +++ b/src/backend/access/gin/ginvalidate.c @@ -3,7 +3,7 @@ * ginvalidate.c * Opclass validator for GIN. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -90,7 +90,7 @@ ginvalidate(Oid opclassoid) { ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("operator family \"%s\" of access method %s contains support procedure %s with different left and right input types", + errmsg("operator family \"%s\" of access method %s contains support function %s with different left and right input types", opfamilyname, "gin", format_procedure(procform->amproc)))); result = false; diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index 7ba04e324f..7701a2d6bf 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -4,7 +4,7 @@ * WAL replay logic for inverted index. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -135,6 +135,14 @@ ginRedoInsertEntry(Buffer buffer, bool isLeaf, BlockNumber rightblkno, void *rda } } +/* + * Redo recompression of posting list. Doing all the changes in-place is not + * always possible, because it might require more space than we've on the page. + * Instead, once modification is required we copy unprocessed tail of the page + * into separately allocated chunk of memory for further reading original + * versions of segments. Thanks to that we don't bother about moving page data + * in-place. + */ static void ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) { @@ -144,6 +152,9 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) Pointer segmentend; char *walbuf; int totalsize; + Pointer tailCopy = NULL; + Pointer writePtr; + Pointer segptr; /* * If the page is in pre-9.4 format, convert to new format first. @@ -153,21 +164,37 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) ItemPointer uncompressed = (ItemPointer) GinDataPageGetData(page); int nuncompressed = GinPageGetOpaque(page)->maxoff; int npacked; - GinPostingList *plist; - plist = ginCompressPostingList(uncompressed, nuncompressed, - BLCKSZ, &npacked); - Assert(npacked == nuncompressed); + /* + * Empty leaf pages are deleted as part of vacuum, but leftmost and + * rightmost pages are never deleted. So, pg_upgrade'd from pre-9.4 + * instances might contain empty leaf pages, and we need to handle + * them correctly. + */ + if (nuncompressed > 0) + { + GinPostingList *plist; + + plist = ginCompressPostingList(uncompressed, nuncompressed, + BLCKSZ, &npacked); + totalsize = SizeOfGinPostingList(plist); - totalsize = SizeOfGinPostingList(plist); + Assert(npacked == nuncompressed); + + memcpy(GinDataLeafPageGetPostingList(page), plist, totalsize); + } + else + { + totalsize = 0; + } - memcpy(GinDataLeafPageGetPostingList(page), plist, totalsize); GinDataPageSetDataSize(page, totalsize); GinPageSetCompressed(page); GinPageGetOpaque(page)->maxoff = InvalidOffsetNumber; } oldseg = GinDataLeafPageGetPostingList(page); + writePtr = (Pointer) oldseg; segmentend = (Pointer) oldseg + GinDataLeafPageGetPostingListSize(page); segno = 0; @@ -185,8 +212,6 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) ItemPointerData *newitems; int nnewitems; int segsize; - Pointer segptr; - int szleft; /* Extract all the information we need from the WAL record */ if (a_action == GIN_SEGMENT_INSERT || @@ -209,6 +234,17 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) Assert(segno <= a_segno); while (segno < a_segno) { + /* + * Once modification is started and page tail is copied, we've + * to copy unmodified segments. + */ + segsize = SizeOfGinPostingList(oldseg); + if (tailCopy) + { + Assert(writePtr + segsize < PageGetSpecialPointer(page)); + memcpy(writePtr, (Pointer) oldseg, segsize); + } + writePtr += segsize; oldseg = GinNextPostingListSegment(oldseg); segno++; } @@ -249,36 +285,42 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) Assert(a_action == GIN_SEGMENT_INSERT); segsize = 0; } - szleft = segmentend - segptr; + + /* + * We're about to start modification of the page. So, copy tail of the + * page if it's not done already. + */ + if (!tailCopy && segptr != segmentend) + { + int tailSize = segmentend - segptr; + + tailCopy = (Pointer) palloc(tailSize); + memcpy(tailCopy, segptr, tailSize); + segptr = tailCopy; + oldseg = (GinPostingList *) segptr; + segmentend = segptr + tailSize; + } switch (a_action) { case GIN_SEGMENT_DELETE: - memmove(segptr, segptr + segsize, szleft - segsize); - segmentend -= segsize; - + segptr += segsize; segno++; break; case GIN_SEGMENT_INSERT: - /* make room for the new segment */ - memmove(segptr + newsegsize, segptr, szleft); /* copy the new segment in place */ - memcpy(segptr, newseg, newsegsize); - segmentend += newsegsize; - segptr += newsegsize; + Assert(writePtr + newsegsize <= PageGetSpecialPointer(page)); + memcpy(writePtr, newseg, newsegsize); + writePtr += newsegsize; break; case GIN_SEGMENT_REPLACE: - /* shift the segments that follow */ - memmove(segptr + newsegsize, - segptr + segsize, - szleft - segsize); - /* copy the replacement segment in place */ - memcpy(segptr, newseg, newsegsize); - segmentend -= segsize; - segmentend += newsegsize; - segptr += newsegsize; + /* copy the new version of segment in place */ + Assert(writePtr + newsegsize <= PageGetSpecialPointer(page)); + memcpy(writePtr, newseg, newsegsize); + writePtr += newsegsize; + segptr += segsize; segno++; break; @@ -288,7 +330,18 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data) oldseg = (GinPostingList *) segptr; } - totalsize = segmentend - (Pointer) GinDataLeafPageGetPostingList(page); + /* Copy the rest of unmodified segments if any. */ + segptr = (Pointer) oldseg; + if (segptr != segmentend && tailCopy) + { + int restSize = segmentend - segptr; + + Assert(writePtr + restSize <= PageGetSpecialPointer(page)); + memcpy(writePtr, segptr, restSize); + writePtr += restSize; + } + + totalsize = writePtr - (Pointer) GinDataLeafPageGetPostingList(page); GinDataPageSetDataSize(page, totalsize); } @@ -514,7 +567,7 @@ ginRedoUpdateMetapage(XLogReaderState *record) Assert(BufferGetBlockNumber(metabuffer) == GIN_METAPAGE_BLKNO); metapage = BufferGetPage(metabuffer); - GinInitPage(metapage, GIN_META, BufferGetPageSize(metabuffer)); + GinInitMetabuffer(metabuffer); memcpy(GinPageGetMeta(metapage), &data->metadata, sizeof(GinMetaPageData)); PageSetLSN(metapage, lsn); MarkBufferDirty(metabuffer); @@ -656,7 +709,7 @@ ginRedoDeleteListPages(XLogReaderState *record) Assert(BufferGetBlockNumber(metabuffer) == GIN_METAPAGE_BLKNO); metapage = BufferGetPage(metabuffer); - GinInitPage(metapage, GIN_META, BufferGetPageSize(metabuffer)); + GinInitMetabuffer(metabuffer); memcpy(GinPageGetMeta(metapage), &data->metadata, sizeof(GinMetaPageData)); PageSetLSN(metapage, lsn); @@ -768,26 +821,21 @@ void gin_mask(char *pagedata, BlockNumber blkno) { Page page = (Page) pagedata; + PageHeader pagehdr = (PageHeader) page; GinPageOpaque opaque; - mask_page_lsn(page); + mask_page_lsn_and_checksum(page); opaque = GinPageGetOpaque(page); mask_page_hint_bits(page); /* - * GIN metapage doesn't use pd_lower/pd_upper. Other page types do. Hence, - * we need to apply masking for those pages. + * For a GIN_DELETED page, the page is initialized to empty. Hence, mask + * the whole page content. For other pages, mask the hole if pd_lower + * appears to have been set correctly. */ - if (opaque->flags != GIN_META) - { - /* - * For GIN_DELETED page, the page is initialized to empty. Hence, mask - * the page content. - */ - if (opaque->flags & GIN_DELETED) - mask_page_content(page); - else - mask_unused_space(page); - } + if (opaque->flags & GIN_DELETED) + mask_page_content(page); + else if (pagehdr->pd_lower > SizeOfPageHeaderData) + mask_unused_space(page); } diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 565525bbdf..8a42effdf7 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -4,7 +4,7 @@ * interface routines for the postgres GiST index access method. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -18,6 +18,8 @@ #include "access/gistscan.h" #include "catalog/pg_collation.h" #include "miscadmin.h" +#include "storage/lmgr.h" +#include "storage/predicate.h" #include "nodes/execnodes.h" #include "utils/builtins.h" #include "utils/index_selfuncs.h" @@ -70,8 +72,9 @@ gisthandler(PG_FUNCTION_ARGS) amroutine->amsearchnulls = true; amroutine->amstorage = true; amroutine->amclusterable = true; - amroutine->ampredlocks = false; + amroutine->ampredlocks = true; amroutine->amcanparallel = false; + amroutine->amcaninclude = false; amroutine->amkeytype = InvalidOid; amroutine->ambuild = gistbuild; @@ -337,6 +340,9 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, GISTInitBuffer(ptr->buffer, (is_leaf) ? F_LEAF : 0); ptr->page = BufferGetPage(ptr->buffer); ptr->block.blkno = BufferGetBlockNumber(ptr->buffer); + PredicateLockPageSplit(rel, + BufferGetBlockNumber(buffer), + BufferGetBlockNumber(ptr->buffer)); } /* @@ -640,7 +646,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) } stack->page = (Page) BufferGetPage(stack->buffer); - stack->lsn = PageGetLSN(stack->page); + stack->lsn = xlocked ? + PageGetLSN(stack->page) : BufferGetLSNAtomic(stack->buffer); Assert(!RelationNeedsWAL(state.r) || !XLogRecPtrIsInvalid(stack->lsn)); /* @@ -890,7 +897,7 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum) break; } - top->lsn = PageGetLSN(page); + top->lsn = BufferGetLSNAtomic(buffer); /* * If F_FOLLOW_RIGHT is set, the page to the right doesn't have a @@ -1212,6 +1219,12 @@ gistinserttuples(GISTInsertState *state, GISTInsertStack *stack, List *splitinfo; bool is_split; + /* + * Check for any rw conflicts (in serializable isolation level) just + * before we intend to modify the page + */ + CheckForSerializableConflictIn(state->r, NULL, stack->buffer); + /* Insert the tuple(s) to the page, splitting the page if necessary */ is_split = gistplacetopage(state->r, state->freespace, giststate, stack->buffer, @@ -1364,8 +1377,8 @@ gistSplit(Relation r, IndexTupleSize(itup[0]), GiSTPageSize, RelationGetRelationName(r)))); - memset(v.spl_lisnull, TRUE, sizeof(bool) * giststate->tupdesc->natts); - memset(v.spl_risnull, TRUE, sizeof(bool) * giststate->tupdesc->natts); + memset(v.spl_lisnull, true, sizeof(bool) * giststate->tupdesc->natts); + memset(v.spl_risnull, true, sizeof(bool) * giststate->tupdesc->natts); gistSplitByKey(r, page, itup, len, giststate, &v, 0); /* form left and right vector */ @@ -1453,12 +1466,23 @@ initGISTstate(Relation index) fmgr_info_copy(&(giststate->unionFn[i]), index_getprocinfo(index, i + 1, GIST_UNION_PROC), scanCxt); - fmgr_info_copy(&(giststate->compressFn[i]), - index_getprocinfo(index, i + 1, GIST_COMPRESS_PROC), - scanCxt); - fmgr_info_copy(&(giststate->decompressFn[i]), - index_getprocinfo(index, i + 1, GIST_DECOMPRESS_PROC), - scanCxt); + + /* opclasses are not required to provide a Compress method */ + if (OidIsValid(index_getprocid(index, i + 1, GIST_COMPRESS_PROC))) + fmgr_info_copy(&(giststate->compressFn[i]), + index_getprocinfo(index, i + 1, GIST_COMPRESS_PROC), + scanCxt); + else + giststate->compressFn[i].fn_oid = InvalidOid; + + /* opclasses are not required to provide a Decompress method */ + if (OidIsValid(index_getprocid(index, i + 1, GIST_DECOMPRESS_PROC))) + fmgr_info_copy(&(giststate->decompressFn[i]), + index_getprocinfo(index, i + 1, GIST_DECOMPRESS_PROC), + scanCxt); + else + giststate->decompressFn[i].fn_oid = InvalidOid; + fmgr_info_copy(&(giststate->penaltyFn[i]), index_getprocinfo(index, i + 1, GIST_PENALTY_PROC), scanCxt); @@ -1468,6 +1492,7 @@ initGISTstate(Relation index) fmgr_info_copy(&(giststate->equalFn[i]), index_getprocinfo(index, i + 1, GIST_EQUAL_PROC), scanCxt); + /* opclasses are not required to provide a Distance method */ if (OidIsValid(index_getprocid(index, i + 1, GIST_DISTANCE_PROC))) fmgr_info_copy(&(giststate->distanceFn[i]), diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c index 4756a70ae6..434f15f014 100644 --- a/src/backend/access/gist/gistbuild.c +++ b/src/backend/access/gist/gistbuild.c @@ -4,7 +4,7 @@ * build algorithm for GiST indexes implementation. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -203,7 +203,7 @@ gistbuild(Relation heap, Relation index, IndexInfo *indexInfo) * Do the heap scan. */ reltuples = IndexBuildHeapScan(heap, index, indexInfo, true, - gistBuildCallback, (void *) &buildstate); + gistBuildCallback, (void *) &buildstate, NULL); /* * If buffering was used, flush out all the tuples that are still in the @@ -238,7 +238,7 @@ gistbuild(Relation heap, Relation index, IndexInfo *indexInfo) * and "auto" values. */ void -gistValidateBufferingOption(char *value) +gistValidateBufferingOption(const char *value) { if (value == NULL || (strcmp(value, "on") != 0 && @@ -295,10 +295,10 @@ gistInitBuffering(GISTBuildState *buildstate) itupMinSize = (Size) MAXALIGN(sizeof(IndexTupleData)); for (i = 0; i < index->rd_att->natts; i++) { - if (index->rd_att->attrs[i]->attlen < 0) + if (TupleDescAttr(index->rd_att, i)->attlen < 0) itupMinSize += VARHDRSZ; else - itupMinSize += index->rd_att->attrs[i]->attlen; + itupMinSize += TupleDescAttr(index->rd_att, i)->attlen; } /* Calculate average and maximal number of index tuples which fit to page */ diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c index 88cee2028d..97033983e3 100644 --- a/src/backend/access/gist/gistbuildbuffers.c +++ b/src/backend/access/gist/gistbuildbuffers.c @@ -4,7 +4,7 @@ * node buffer management functions for GiST buffering build algorithm. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index 760ea0c997..e4a3786be0 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -4,7 +4,7 @@ * fetch tuples from a GiST scan. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -14,13 +14,15 @@ */ #include "postgres.h" +#include "access/genam.h" #include "access/gist_private.h" #include "access/relscan.h" -#include "catalog/pg_type.h" #include "miscadmin.h" +#include "storage/lmgr.h" +#include "storage/predicate.h" #include "pgstat.h" #include "lib/pairingheap.h" -#include "utils/builtins.h" +#include "utils/float.h" #include "utils/memutils.h" #include "utils/rel.h" @@ -61,7 +63,7 @@ gistkillitems(IndexScanDesc scan) * read. killedItems could be not valid so LP_DEAD hints applying is not * safe. */ - if (PageGetLSN(page) != so->curPageLSN) + if (BufferGetLSNAtomic(buffer) != so->curPageLSN) { UnlockReleaseBuffer(buffer); so->numKilled = 0; /* reset counter */ @@ -197,7 +199,7 @@ gistindex_keytest(IndexScanDesc scan, gistdentryinit(giststate, key->sk_attno - 1, &de, datum, r, page, offset, - FALSE, isNull); + false, isNull); /* * Call the Consistent function to evaluate the test. The @@ -258,7 +260,7 @@ gistindex_keytest(IndexScanDesc scan, gistdentryinit(giststate, key->sk_attno - 1, &de, datum, r, page, offset, - FALSE, isNull); + false, isNull); /* * Call the Distance function to evaluate the distance. The @@ -336,6 +338,7 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances, buffer = ReadBuffer(scan->indexRelation, pageItem->blkno); LockBuffer(buffer, GIST_SHARE); + PredicateLockPage(r, BufferGetBlockNumber(buffer), scan->xs_snapshot); gistcheckpage(scan->indexRelation, buffer); page = BufferGetPage(buffer); TestForOldSnapshot(scan->xs_snapshot, r, page); @@ -384,7 +387,7 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances, * safe to apply LP_DEAD hints to the page later. This allows us to drop * the pin for MVCC scans, which allows vacuum to avoid blocking. */ - so->curPageLSN = PageGetLSN(page); + so->curPageLSN = BufferGetLSNAtomic(buffer); /* * check all tuples on page @@ -540,7 +543,6 @@ getNextNearest(IndexScanDesc scan) { GISTScanOpaque so = (GISTScanOpaque) scan->opaque; bool res = false; - int i; if (scan->xs_hitup) { @@ -561,45 +563,10 @@ getNextNearest(IndexScanDesc scan) /* found a heap item at currently minimal distance */ scan->xs_ctup.t_self = item->data.heap.heapPtr; scan->xs_recheck = item->data.heap.recheck; - scan->xs_recheckorderby = item->data.heap.recheckDistances; - for (i = 0; i < scan->numberOfOrderBys; i++) - { - if (so->orderByTypes[i] == FLOAT8OID) - { -#ifndef USE_FLOAT8_BYVAL - /* must free any old value to avoid memory leakage */ - if (!scan->xs_orderbynulls[i]) - pfree(DatumGetPointer(scan->xs_orderbyvals[i])); -#endif - scan->xs_orderbyvals[i] = Float8GetDatum(item->distances[i]); - scan->xs_orderbynulls[i] = false; - } - else if (so->orderByTypes[i] == FLOAT4OID) - { - /* convert distance function's result to ORDER BY type */ -#ifndef USE_FLOAT4_BYVAL - /* must free any old value to avoid memory leakage */ - if (!scan->xs_orderbynulls[i]) - pfree(DatumGetPointer(scan->xs_orderbyvals[i])); -#endif - scan->xs_orderbyvals[i] = Float4GetDatum((float4) item->distances[i]); - scan->xs_orderbynulls[i] = false; - } - else - { - /* - * If the ordering operator's return value is anything - * else, we don't know how to convert the float8 bound - * calculated by the distance function to that. The - * executor won't actually need the order by values we - * return here, if there are no lossy results, so only - * insist on converting if the *recheck flag is set. - */ - if (scan->xs_recheckorderby) - elog(ERROR, "GiST operator family's FOR ORDER BY operator must return float8 or float4 if the distance function is lossy"); - scan->xs_orderbynulls[i] = true; - } - } + + index_store_float8_orderby_distances(scan, so->orderByTypes, + item->distances, + item->data.heap.recheckDistances); /* in an index-only scan, also return the reconstructed tuple. */ if (scan->xs_want_itup) @@ -801,11 +768,13 @@ gistgetbitmap(IndexScanDesc scan, TIDBitmap *tbm) * Can we do index-only scans on the given index column? * * Opclasses that implement a fetch function support index-only scans. + * Opclasses without compression functions also support index-only scans. */ bool gistcanreturn(Relation index, int attno) { - if (OidIsValid(index_getprocid(index, attno, GIST_FETCH_PROC))) + if (OidIsValid(index_getprocid(index, attno, GIST_FETCH_PROC)) || + !OidIsValid(index_getprocid(index, attno, GIST_COMPRESS_PROC))) return true; else return false; diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c index 08990f5a1b..c3383bd6da 100644 --- a/src/backend/access/gist/gistproc.c +++ b/src/backend/access/gist/gistproc.c @@ -7,7 +7,7 @@ * This gives R-tree behavior, with Guttman's poly-time split algorithm. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -17,12 +17,12 @@ */ #include "postgres.h" -#include #include #include "access/gist.h" #include "access/stratnum.h" #include "utils/builtins.h" +#include "utils/float.h" #include "utils/geo_decls.h" @@ -34,15 +34,6 @@ static bool rtree_internal_consistent(BOX *key, BOX *query, /* Minimum accepted ratio of split */ #define LIMIT_RATIO 0.3 -/* Convenience macros for NaN-aware comparisons */ -#define FLOAT8_EQ(a,b) (float8_cmp_internal(a, b) == 0) -#define FLOAT8_LT(a,b) (float8_cmp_internal(a, b) < 0) -#define FLOAT8_LE(a,b) (float8_cmp_internal(a, b) <= 0) -#define FLOAT8_GT(a,b) (float8_cmp_internal(a, b) > 0) -#define FLOAT8_GE(a,b) (float8_cmp_internal(a, b) >= 0) -#define FLOAT8_MAX(a,b) (FLOAT8_GT(a, b) ? (a) : (b)) -#define FLOAT8_MIN(a,b) (FLOAT8_LT(a, b) ? (a) : (b)) - /************************************************** * Box ops @@ -54,17 +45,17 @@ static bool rtree_internal_consistent(BOX *key, BOX *query, static void rt_box_union(BOX *n, const BOX *a, const BOX *b) { - n->high.x = FLOAT8_MAX(a->high.x, b->high.x); - n->high.y = FLOAT8_MAX(a->high.y, b->high.y); - n->low.x = FLOAT8_MIN(a->low.x, b->low.x); - n->low.y = FLOAT8_MIN(a->low.y, b->low.y); + n->high.x = float8_max(a->high.x, b->high.x); + n->high.y = float8_max(a->high.y, b->high.y); + n->low.x = float8_min(a->low.x, b->low.x); + n->low.y = float8_min(a->low.y, b->low.y); } /* * Size of a BOX for penalty-calculation purposes. * The result can be +Infinity, but not NaN. */ -static double +static float8 size_box(const BOX *box) { /* @@ -74,8 +65,8 @@ size_box(const BOX *box) * * The less-than cases should not happen, but if they do, say "zero". */ - if (FLOAT8_LE(box->high.x, box->low.x) || - FLOAT8_LE(box->high.y, box->low.y)) + if (float8_le(box->high.x, box->low.x) || + float8_le(box->high.y, box->low.y)) return 0.0; /* @@ -85,27 +76,28 @@ size_box(const BOX *box) */ if (isnan(box->high.x) || isnan(box->high.y)) return get_float8_infinity(); - return (box->high.x - box->low.x) * (box->high.y - box->low.y); + return float8_mul(float8_mi(box->high.x, box->low.x), + float8_mi(box->high.y, box->low.y)); } /* * Return amount by which the union of the two boxes is larger than * the original BOX's area. The result can be +Infinity, but not NaN. */ -static double +static float8 box_penalty(const BOX *original, const BOX *new) { BOX unionbox; rt_box_union(&unionbox, original, new); - return size_box(&unionbox) - size_box(original); + return float8_mi(size_box(&unionbox), size_box(original)); } /* * The GiST Consistent method for boxes * * Should return false if for all data items x below entry, - * the predicate x op query must be FALSE, where op is the oper + * the predicate x op query must be false, where op is the oper * corresponding to strategy in the pg_amop table. */ Datum @@ -122,7 +114,7 @@ gist_box_consistent(PG_FUNCTION_ARGS) *recheck = false; if (DatumGetBoxP(entry->key) == NULL || query == NULL) - PG_RETURN_BOOL(FALSE); + PG_RETURN_BOOL(false); /* * if entry is not leaf, use rtree_internal_consistent, else use @@ -144,13 +136,13 @@ gist_box_consistent(PG_FUNCTION_ARGS) static void adjustBox(BOX *b, const BOX *addon) { - if (FLOAT8_LT(b->high.x, addon->high.x)) + if (float8_lt(b->high.x, addon->high.x)) b->high.x = addon->high.x; - if (FLOAT8_GT(b->low.x, addon->low.x)) + if (float8_gt(b->low.x, addon->low.x)) b->low.x = addon->low.x; - if (FLOAT8_LT(b->high.y, addon->high.y)) + if (float8_lt(b->high.y, addon->high.y)) b->high.y = addon->high.y; - if (FLOAT8_GT(b->low.y, addon->low.y)) + if (float8_gt(b->low.y, addon->low.y)) b->low.y = addon->low.y; } @@ -185,37 +177,9 @@ gist_box_union(PG_FUNCTION_ARGS) } /* - * GiST Compress methods for boxes - * - * do not do anything. + * We store boxes as boxes in GiST indexes, so we do not need + * compress, decompress, or fetch functions. */ -Datum -gist_box_compress(PG_FUNCTION_ARGS) -{ - PG_RETURN_POINTER(PG_GETARG_POINTER(0)); -} - -/* - * GiST DeCompress method for boxes (also used for points, polygons - * and circles) - * - * do not do anything --- we just use the stored box as is. - */ -Datum -gist_box_decompress(PG_FUNCTION_ARGS) -{ - PG_RETURN_POINTER(PG_GETARG_POINTER(0)); -} - -/* - * GiST Fetch method for boxes - * do not do anything --- we just return the stored box as is. - */ -Datum -gist_box_fetch(PG_FUNCTION_ARGS) -{ - PG_RETURN_POINTER(PG_GETARG_POINTER(0)); -} /* * The GiST Penalty method for boxes (also used for points) @@ -300,7 +264,7 @@ typedef struct /* Index of entry in the initial array */ int index; /* Delta between penalties of entry insertion into different groups */ - double delta; + float8 delta; } CommonEntry; /* @@ -316,13 +280,13 @@ typedef struct bool first; /* true if no split was selected yet */ - double leftUpper; /* upper bound of left interval */ - double rightLower; /* lower bound of right interval */ + float8 leftUpper; /* upper bound of left interval */ + float8 rightLower; /* lower bound of right interval */ float4 ratio; float4 overlap; int dim; /* axis of this split */ - double range; /* width of general MBR projection to the + float8 range; /* width of general MBR projection to the * selected axis */ } ConsiderSplitContext; @@ -331,7 +295,7 @@ typedef struct */ typedef struct { - double lower, + float8 lower, upper; } SplitInterval; @@ -341,7 +305,7 @@ typedef struct static int interval_cmp_lower(const void *i1, const void *i2) { - double lower1 = ((const SplitInterval *) i1)->lower, + float8 lower1 = ((const SplitInterval *) i1)->lower, lower2 = ((const SplitInterval *) i2)->lower; return float8_cmp_internal(lower1, lower2); @@ -353,7 +317,7 @@ interval_cmp_lower(const void *i1, const void *i2) static int interval_cmp_upper(const void *i1, const void *i2) { - double upper1 = ((const SplitInterval *) i1)->upper, + float8 upper1 = ((const SplitInterval *) i1)->upper, upper2 = ((const SplitInterval *) i2)->upper; return float8_cmp_internal(upper1, upper2); @@ -376,14 +340,14 @@ non_negative(float val) */ static inline void g_box_consider_split(ConsiderSplitContext *context, int dimNum, - double rightLower, int minLeftCount, - double leftUpper, int maxLeftCount) + float8 rightLower, int minLeftCount, + float8 leftUpper, int maxLeftCount) { int leftCount, rightCount; float4 ratio, overlap; - double range; + float8 range; /* * Calculate entries distribution ratio assuming most uniform distribution @@ -406,8 +370,7 @@ g_box_consider_split(ConsiderSplitContext *context, int dimNum, * Ratio of split - quotient between size of lesser group and total * entries count. */ - ratio = ((float4) Min(leftCount, rightCount)) / - ((float4) context->entriesCount); + ratio = float4_div(Min(leftCount, rightCount), context->entriesCount); if (ratio > LIMIT_RATIO) { @@ -421,11 +384,13 @@ g_box_consider_split(ConsiderSplitContext *context, int dimNum, * or less range with same overlap. */ if (dimNum == 0) - range = context->boundingBox.high.x - context->boundingBox.low.x; + range = float8_mi(context->boundingBox.high.x, + context->boundingBox.low.x); else - range = context->boundingBox.high.y - context->boundingBox.low.y; + range = float8_mi(context->boundingBox.high.y, + context->boundingBox.low.y); - overlap = (leftUpper - rightLower) / range; + overlap = float8_div(float8_mi(leftUpper, rightLower), range); /* If there is no previous selection, select this */ if (context->first) @@ -481,20 +446,14 @@ g_box_consider_split(ConsiderSplitContext *context, int dimNum, /* * Compare common entries by their deltas. - * (We assume the deltas can't be NaN.) */ static int common_entry_cmp(const void *i1, const void *i2) { - double delta1 = ((const CommonEntry *) i1)->delta, + float8 delta1 = ((const CommonEntry *) i1)->delta, delta2 = ((const CommonEntry *) i2)->delta; - if (delta1 < delta2) - return -1; - else if (delta1 > delta2) - return 1; - else - return 0; + return float8_cmp_internal(delta1, delta2); } /* @@ -568,7 +527,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS) context.first = true; /* nothing selected yet */ for (dim = 0; dim < 2; dim++) { - double leftUpper, + float8 leftUpper, rightLower; int i1, i2; @@ -644,9 +603,9 @@ gist_box_picksplit(PG_FUNCTION_ARGS) * Find next lower bound of right group. */ while (i1 < nentries && - FLOAT8_EQ(rightLower, intervalsLower[i1].lower)) + float8_eq(rightLower, intervalsLower[i1].lower)) { - if (FLOAT8_LT(leftUpper, intervalsLower[i1].upper)) + if (float8_lt(leftUpper, intervalsLower[i1].upper)) leftUpper = intervalsLower[i1].upper; i1++; } @@ -659,7 +618,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS) * left group. */ while (i2 < nentries && - FLOAT8_LE(intervalsUpper[i2].upper, leftUpper)) + float8_le(intervalsUpper[i2].upper, leftUpper)) i2++; /* @@ -681,9 +640,9 @@ gist_box_picksplit(PG_FUNCTION_ARGS) /* * Find next upper bound of left group. */ - while (i2 >= 0 && FLOAT8_EQ(leftUpper, intervalsUpper[i2].upper)) + while (i2 >= 0 && float8_eq(leftUpper, intervalsUpper[i2].upper)) { - if (FLOAT8_GT(rightLower, intervalsUpper[i2].lower)) + if (float8_gt(rightLower, intervalsUpper[i2].lower)) rightLower = intervalsUpper[i2].lower; i2--; } @@ -695,7 +654,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS) * Find count of intervals which anyway should be placed to the * right group. */ - while (i1 >= 0 && FLOAT8_GE(intervalsLower[i1].lower, rightLower)) + while (i1 >= 0 && float8_ge(intervalsLower[i1].lower, rightLower)) i1--; /* @@ -765,7 +724,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS) */ for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) { - double lower, + float8 lower, upper; /* @@ -783,10 +742,10 @@ gist_box_picksplit(PG_FUNCTION_ARGS) upper = box->high.y; } - if (FLOAT8_LE(upper, context.leftUpper)) + if (float8_le(upper, context.leftUpper)) { /* Fits to the left group */ - if (FLOAT8_GE(lower, context.rightLower)) + if (float8_ge(lower, context.rightLower)) { /* Fits also to the right group, so "common entry" */ commonEntries[commonEntriesCount++].index = i; @@ -804,7 +763,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS) * entry didn't fit on the left group, it better fit in the right * group. */ - Assert(FLOAT8_GE(lower, context.rightLower)); + Assert(float8_ge(lower, context.rightLower)); /* Doesn't fit to the left group, so join to the right group */ PLACE_RIGHT(box, i); @@ -820,7 +779,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS) * Calculate minimum number of entries that must be placed in both * groups, to reach LIMIT_RATIO. */ - int m = ceil(LIMIT_RATIO * (double) nentries); + int m = ceil(LIMIT_RATIO * nentries); /* * Calculate delta between penalties of join "common entries" to @@ -829,8 +788,8 @@ gist_box_picksplit(PG_FUNCTION_ARGS) for (i = 0; i < commonEntriesCount; i++) { box = DatumGetBoxP(entryvec->vector[commonEntries[i].index].key); - commonEntries[i].delta = Abs(box_penalty(leftBox, box) - - box_penalty(rightBox, box)); + commonEntries[i].delta = Abs(float8_mi(box_penalty(leftBox, box), + box_penalty(rightBox, box))); } /* @@ -888,10 +847,10 @@ gist_box_same(PG_FUNCTION_ARGS) bool *result = (bool *) PG_GETARG_POINTER(2); if (b1 && b2) - *result = (FLOAT8_EQ(b1->low.x, b2->low.x) && - FLOAT8_EQ(b1->low.y, b2->low.y) && - FLOAT8_EQ(b1->high.x, b2->high.x) && - FLOAT8_EQ(b1->high.y, b2->high.y)); + *result = (float8_eq(b1->low.x, b2->low.x) && + float8_eq(b1->low.y, b2->low.y) && + float8_eq(b1->high.x, b2->high.x) && + float8_eq(b1->high.y, b2->high.y)); else *result = (b1 == NULL && b2 == NULL); PG_RETURN_POINTER(result); @@ -1084,7 +1043,7 @@ gist_poly_compress(PG_FUNCTION_ARGS) retval = (GISTENTRY *) palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } else retval = entry; @@ -1109,7 +1068,7 @@ gist_poly_consistent(PG_FUNCTION_ARGS) *recheck = true; if (DatumGetBoxP(entry->key) == NULL || query == NULL) - PG_RETURN_BOOL(FALSE); + PG_RETURN_BOOL(false); /* * Since the operators require recheck anyway, we can just use @@ -1144,15 +1103,15 @@ gist_circle_compress(PG_FUNCTION_ARGS) BOX *r; r = (BOX *) palloc(sizeof(BOX)); - r->high.x = in->center.x + in->radius; - r->low.x = in->center.x - in->radius; - r->high.y = in->center.y + in->radius; - r->low.y = in->center.y - in->radius; + r->high.x = float8_pl(in->center.x, in->radius); + r->low.x = float8_mi(in->center.x, in->radius); + r->high.y = float8_pl(in->center.y, in->radius); + r->low.y = float8_mi(in->center.y, in->radius); retval = (GISTENTRY *) palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); } else retval = entry; @@ -1178,17 +1137,17 @@ gist_circle_consistent(PG_FUNCTION_ARGS) *recheck = true; if (DatumGetBoxP(entry->key) == NULL || query == NULL) - PG_RETURN_BOOL(FALSE); + PG_RETURN_BOOL(false); /* * Since the operators require recheck anyway, we can just use * rtree_internal_consistent even at leaf nodes. (This works in part * because the index entries are bounding boxes not circles.) */ - bbox.high.x = query->center.x + query->radius; - bbox.low.x = query->center.x - query->radius; - bbox.high.y = query->center.y + query->radius; - bbox.low.y = query->center.y - query->radius; + bbox.high.x = float8_pl(query->center.x, query->radius); + bbox.low.x = float8_mi(query->center.x, query->radius); + bbox.high.y = float8_pl(query->center.y, query->radius); + bbox.low.y = float8_mi(query->center.y, query->radius); result = rtree_internal_consistent(DatumGetBoxP(entry->key), &bbox, strategy); @@ -1214,7 +1173,7 @@ gist_point_compress(PG_FUNCTION_ARGS) box->high = box->low = *point; gistentryinit(*retval, BoxPGetDatum(box), - entry->rel, entry->page, entry->offset, FALSE); + entry->rel, entry->page, entry->offset, false); PG_RETURN_POINTER(retval); } @@ -1243,7 +1202,7 @@ gist_point_fetch(PG_FUNCTION_ARGS) r->y = in->high.y; gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, - entry->offset, FALSE); + entry->offset, false); PG_RETURN_POINTER(retval); } @@ -1253,10 +1212,10 @@ gist_point_fetch(PG_FUNCTION_ARGS) DatumGetFloat8(DirectFunctionCall2(point_distance, \ PointPGetDatum(p1), PointPGetDatum(p2))) -static double +static float8 computeDistance(bool isLeaf, BOX *box, Point *point) { - double result = 0.0; + float8 result = 0.0; if (isLeaf) { @@ -1274,9 +1233,9 @@ computeDistance(bool isLeaf, BOX *box, Point *point) /* point is over or below box */ Assert(box->low.y <= box->high.y); if (point->y > box->high.y) - result = point->y - box->high.y; + result = float8_mi(point->y, box->high.y); else if (point->y < box->low.y) - result = box->low.y - point->y; + result = float8_mi(box->low.y, point->y); else elog(ERROR, "inconsistent point values"); } @@ -1285,9 +1244,9 @@ computeDistance(bool isLeaf, BOX *box, Point *point) /* point is to left or right of box */ Assert(box->low.x <= box->high.x); if (point->x > box->high.x) - result = point->x - box->high.x; + result = float8_mi(point->x, box->high.x); else if (point->x < box->low.x) - result = box->low.x - point->x; + result = float8_mi(box->low.x, point->x); else elog(ERROR, "inconsistent point values"); } @@ -1295,7 +1254,7 @@ computeDistance(bool isLeaf, BOX *box, Point *point) { /* closest point will be a vertex */ Point p; - double subresult; + float8 subresult; result = point_point_distance(point, &box->low); @@ -1486,7 +1445,7 @@ gist_point_distance(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - double distance; + float8 distance; StrategyNumber strategyGroup = strategy / GeoStrategyNumberOffset; switch (strategyGroup) @@ -1515,11 +1474,11 @@ gist_point_distance(PG_FUNCTION_ARGS) * This is a lower bound estimate of distance from point to indexed geometric * type. */ -static double +static float8 gist_bbox_distance(GISTENTRY *entry, Datum query, StrategyNumber strategy, bool *recheck) { - double distance; + float8 distance; StrategyNumber strategyGroup = strategy / GeoStrategyNumberOffset; /* Bounding box distance is always inexact. */ @@ -1549,7 +1508,7 @@ gist_circle_distance(PG_FUNCTION_ARGS) /* Oid subtype = PG_GETARG_OID(3); */ bool *recheck = (bool *) PG_GETARG_POINTER(4); - double distance; + float8 distance; distance = gist_bbox_distance(entry, query, strategy, recheck); @@ -1565,7 +1524,7 @@ gist_poly_distance(PG_FUNCTION_ARGS) /* Oid subtype = PG_GETARG_OID(3); */ bool *recheck = (bool *) PG_GETARG_POINTER(4); - double distance; + float8 distance; distance = gist_bbox_distance(entry, query, strategy, recheck); diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c index 058544e2ae..4d97ff1d5d 100644 --- a/src/backend/access/gist/gistscan.c +++ b/src/backend/access/gist/gistscan.c @@ -4,7 +4,7 @@ * routines to manage scans on GiST index relations * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c index 617f42c317..a7038cca67 100644 --- a/src/backend/access/gist/gistsplit.c +++ b/src/backend/access/gist/gistsplit.c @@ -15,7 +15,7 @@ * gistSplitByKey() is the entry point to this file. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -125,7 +125,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec, * check for nulls */ gistentryinit(entry, spl->splitVector.spl_rdatum, r, NULL, - (OffsetNumber) 0, FALSE); + (OffsetNumber) 0, false); for (i = 0; i < spl->splitVector.spl_nleft; i++) { int j = spl->splitVector.spl_left[i]; @@ -141,7 +141,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec, /* And conversely for the right-side tuples */ gistentryinit(entry, spl->splitVector.spl_ldatum, r, NULL, - (OffsetNumber) 0, FALSE); + (OffsetNumber) 0, false); for (i = 0; i < spl->splitVector.spl_nright; i++) { int j = spl->splitVector.spl_right[i]; @@ -177,7 +177,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare) { OffsetNumber ai = a[i]; - if (dontcare[ai] == FALSE) + if (dontcare[ai] == false) { /* re-emit item into a[] */ *curwpos = ai; @@ -213,10 +213,10 @@ placeOne(Relation r, GISTSTATE *giststate, GistSplitVector *v, rpenalty; GISTENTRY entry; - gistentryinit(entry, v->spl_lattr[attno], r, NULL, 0, FALSE); + gistentryinit(entry, v->spl_lattr[attno], r, NULL, 0, false); lpenalty = gistpenalty(giststate, attno, &entry, v->spl_lisnull[attno], identry + attno, isnull[attno]); - gistentryinit(entry, v->spl_rattr[attno], r, NULL, 0, FALSE); + gistentryinit(entry, v->spl_rattr[attno], r, NULL, 0, false); rpenalty = gistpenalty(giststate, attno, &entry, v->spl_risnull[attno], identry + attno, isnull[attno]); @@ -265,10 +265,10 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno, entrySL, entrySR; - gistentryinit(entryL, oldL, r, NULL, 0, FALSE); - gistentryinit(entryR, oldR, r, NULL, 0, FALSE); - gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, FALSE); - gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, FALSE); + gistentryinit(entryL, oldL, r, NULL, 0, false); + gistentryinit(entryR, oldR, r, NULL, 0, false); + gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, false); + gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, false); if (sv->spl_ldatum_exists && sv->spl_rdatum_exists) { @@ -320,8 +320,8 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno, SWAPVAR(sv->spl_left, sv->spl_right, off); SWAPVAR(sv->spl_nleft, sv->spl_nright, noff); SWAPVAR(sv->spl_ldatum, sv->spl_rdatum, datum); - gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, FALSE); - gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, FALSE); + gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, false); + gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, false); } if (sv->spl_ldatum_exists) @@ -396,20 +396,20 @@ genericPickSplit(GISTSTATE *giststate, GistEntryVector *entryvec, GIST_SPLITVEC * Calls user picksplit method for attno column to split tuples into * two vectors. * - * Returns FALSE if split is complete (there are no more index columns, or + * Returns false if split is complete (there are no more index columns, or * there is no need to consider them because split is optimal already). * - * Returns TRUE and v->spl_dontcare = NULL if the picksplit result is + * Returns true and v->spl_dontcare = NULL if the picksplit result is * degenerate (all tuples seem to be don't-cares), so we should just * disregard this column and split on the next column(s) instead. * - * Returns TRUE and v->spl_dontcare != NULL if there are don't-care tuples + * Returns true and v->spl_dontcare != NULL if there are don't-care tuples * that could be relocated based on the next column(s). The don't-care * tuples have been removed from the split and must be reinserted by caller. * There is at least one non-don't-care tuple on each side of the split, * and union keys for all columns are updated to include just those tuples. * - * A TRUE result implies there is at least one more index column. + * A true result implies there is at least one more index column. */ static bool gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVector *v, @@ -610,7 +610,7 @@ gistSplitHalf(GIST_SPLITVEC *v, int len) * attno: column we are working on (zero-based index) * * Outside caller must initialize v->spl_lisnull and v->spl_risnull arrays - * to all-TRUE. On return, spl_left/spl_nleft contain indexes of tuples + * to all-true. On return, spl_left/spl_nleft contain indexes of tuples * to go left, spl_right/spl_nright contain indexes of tuples to go right, * spl_lattr/spl_lisnull contain left-side union key values, and * spl_rattr/spl_risnull contain right-side union key values. Other fields @@ -643,7 +643,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, &IsNull); gistdentryinit(giststate, attno, &(entryvec->vector[i]), datum, r, page, i, - FALSE, IsNull); + false, IsNull); if (IsNull) offNullTuples[nOffNullTuples++] = i; } @@ -655,7 +655,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, * our attention to the next column. If there's no next column, just * split page in half. */ - v->spl_risnull[attno] = v->spl_lisnull[attno] = TRUE; + v->spl_risnull[attno] = v->spl_lisnull[attno] = true; if (attno + 1 < giststate->tupdesc->natts) gistSplitByKey(r, page, itup, len, giststate, v, attno + 1); @@ -672,7 +672,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, */ v->splitVector.spl_right = offNullTuples; v->splitVector.spl_nright = nOffNullTuples; - v->spl_risnull[attno] = TRUE; + v->spl_risnull[attno] = true; v->splitVector.spl_left = (OffsetNumber *) palloc(len * sizeof(OffsetNumber)); v->splitVector.spl_nleft = 0; diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index b6ccc1a66a..70627e5df6 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -4,7 +4,7 @@ * utilities routines for the postgres GiST index access method. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -13,7 +13,6 @@ */ #include "postgres.h" -#include #include #include "access/gist_private.h" @@ -22,8 +21,9 @@ #include "catalog/pg_opclass.h" #include "storage/indexfsm.h" #include "storage/lmgr.h" -#include "utils/builtins.h" +#include "utils/float.h" #include "utils/syscache.h" +#include "utils/lsyscache.h" /* @@ -179,7 +179,7 @@ gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len, evec->vector + evec->n, datum, NULL, NULL, (OffsetNumber) 0, - FALSE, IsNull); + false, IsNull); evec->n++; } @@ -187,7 +187,7 @@ gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len, if (evec->n == 0) { attr[i] = (Datum) 0; - isnull[i] = TRUE; + isnull[i] = true; } else { @@ -204,7 +204,7 @@ gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len, PointerGetDatum(evec), PointerGetDatum(&attrsize)); - isnull[i] = FALSE; + isnull[i] = false; } } } @@ -246,17 +246,17 @@ gistMakeUnionKey(GISTSTATE *giststate, int attno, if (isnull1 && isnull2) { - *dstisnull = TRUE; + *dstisnull = true; *dst = (Datum) 0; } else { - if (isnull1 == FALSE && isnull2 == FALSE) + if (isnull1 == false && isnull2 == false) { evec->vector[0] = *entry1; evec->vector[1] = *entry2; } - else if (isnull1 == FALSE) + else if (isnull1 == false) { evec->vector[0] = *entry1; evec->vector[1] = *entry1; @@ -267,7 +267,7 @@ gistMakeUnionKey(GISTSTATE *giststate, int attno, evec->vector[1] = *entry2; } - *dstisnull = FALSE; + *dstisnull = false; *dst = FunctionCall2Coll(&giststate->unionFn[attno], giststate->supportCollation[attno], PointerGetDatum(evec), @@ -303,7 +303,7 @@ gistDeCompressAtt(GISTSTATE *giststate, Relation r, IndexTuple tuple, Page p, datum = index_getattr(tuple, i + 1, giststate->tupdesc, &isnull[i]); gistdentryinit(giststate, i, &attdata[i], datum, r, p, o, - FALSE, isnull[i]); + false, isnull[i]); } } @@ -313,7 +313,7 @@ gistDeCompressAtt(GISTSTATE *giststate, Relation r, IndexTuple tuple, Page p, IndexTuple gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate) { - bool neednew = FALSE; + bool neednew = false; GISTENTRY oldentries[INDEX_MAX_KEYS], addentries[INDEX_MAX_KEYS]; bool oldisnull[INDEX_MAX_KEYS], @@ -451,7 +451,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */ /* Compute penalty for this column. */ datum = index_getattr(itup, j + 1, giststate->tupdesc, &IsNull); gistdentryinit(giststate, j, &entry, datum, r, p, i, - FALSE, IsNull); + false, IsNull); usize = gistpenalty(giststate, j, &entry, IsNull, &identry[j], isnull[j]); if (usize > 0) @@ -550,6 +550,11 @@ gistdentryinit(GISTSTATE *giststate, int nkey, GISTENTRY *e, GISTENTRY *dep; gistentryinit(*e, k, r, pg, o, l); + + /* there may not be a decompress function in opclass */ + if (!OidIsValid(giststate->decompressFn[nkey].fn_oid)) + return; + dep = (GISTENTRY *) DatumGetPointer(FunctionCall1Coll(&giststate->decompressFn[nkey], giststate->supportCollation[nkey], @@ -585,10 +590,14 @@ gistFormTuple(GISTSTATE *giststate, Relation r, gistentryinit(centry, attdata[i], r, NULL, (OffsetNumber) 0, isleaf); - cep = (GISTENTRY *) - DatumGetPointer(FunctionCall1Coll(&giststate->compressFn[i], - giststate->supportCollation[i], - PointerGetDatum(¢ry))); + /* there may not be a compress function in opclass */ + if (OidIsValid(giststate->compressFn[i].fn_oid)) + cep = (GISTENTRY *) + DatumGetPointer(FunctionCall1Coll(&giststate->compressFn[i], + giststate->supportCollation[i], + PointerGetDatum(¢ry))); + else + cep = ¢ry; compatt[i] = cep->key; } } @@ -648,6 +657,17 @@ gistFetchTuple(GISTSTATE *giststate, Relation r, IndexTuple tuple) else fetchatt[i] = (Datum) 0; } + else if (giststate->compressFn[i].fn_oid == InvalidOid) + { + /* + * If opclass does not provide compress method that could change + * original value, att is necessarily stored in original form. + */ + if (!isnull[i]) + fetchatt[i] = datum; + else + fetchatt[i] = (Datum) 0; + } else { /* @@ -671,8 +691,8 @@ gistpenalty(GISTSTATE *giststate, int attno, { float penalty = 0.0; - if (giststate->penaltyFn[attno].fn_strict == FALSE || - (isNullOrig == FALSE && isNullAdd == FALSE)) + if (giststate->penaltyFn[attno].fn_strict == false || + (isNullOrig == false && isNullAdd == false)) { FunctionCall3Coll(&giststate->penaltyFn[attno], giststate->supportCollation[attno], @@ -852,12 +872,6 @@ gistproperty(Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull) { - HeapTuple tuple; - Form_pg_index rd_index PG_USED_FOR_ASSERTS_ONLY; - Form_pg_opclass rd_opclass; - Datum datum; - bool disnull; - oidvector *indclass; Oid opclass, opfamily, opcintype; @@ -891,41 +905,19 @@ gistproperty(Oid index_oid, int attno, } /* First we need to know the column's opclass. */ - - tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(index_oid)); - if (!HeapTupleIsValid(tuple)) + opclass = get_index_column_opclass(index_oid, attno); + if (!OidIsValid(opclass)) { *isnull = true; return true; } - rd_index = (Form_pg_index) GETSTRUCT(tuple); - - /* caller is supposed to guarantee this */ - Assert(attno > 0 && attno <= rd_index->indnatts); - - datum = SysCacheGetAttr(INDEXRELID, tuple, - Anum_pg_index_indclass, &disnull); - Assert(!disnull); - - indclass = ((oidvector *) DatumGetPointer(datum)); - opclass = indclass->values[attno - 1]; - - ReleaseSysCache(tuple); /* Now look up the opclass family and input datatype. */ - - tuple = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass)); - if (!HeapTupleIsValid(tuple)) + if (!get_opclass_opfamily_and_input_type(opclass, &opfamily, &opcintype)) { *isnull = true; return true; } - rd_opclass = (Form_pg_opclass) GETSTRUCT(tuple); - - opfamily = rd_opclass->opcfamily; - opcintype = rd_opclass->opcintype; - - ReleaseSysCache(tuple); /* And now we can check whether the function is provided. */ @@ -934,6 +926,22 @@ gistproperty(Oid index_oid, int attno, ObjectIdGetDatum(opcintype), ObjectIdGetDatum(opcintype), Int16GetDatum(procno)); + + /* + * Special case: even without a fetch function, AMPROP_RETURNABLE is true + * if the opclass has no compress function. + */ + if (prop == AMPROP_RETURNABLE && !*res) + { + *res = !SearchSysCacheExists4(AMPROCNUM, + ObjectIdGetDatum(opfamily), + ObjectIdGetDatum(opcintype), + ObjectIdGetDatum(opcintype), + Int16GetDatum(GIST_COMPRESS_PROC)); + } + + *isnull = false; + return true; } diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c index 77d9d12f0b..5948218c77 100644 --- a/src/backend/access/gist/gistvacuum.c +++ b/src/backend/access/gist/gistvacuum.c @@ -4,7 +4,7 @@ * vacuuming routines for the postgres GiST index access method. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -32,6 +32,7 @@ gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) BlockNumber npages, blkno; BlockNumber totFreePages; + double tuplesCount; bool needLock; /* No-op in ANALYZE ONLY mode */ @@ -40,17 +41,7 @@ gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) /* Set up all-zero stats if gistbulkdelete wasn't called */ if (stats == NULL) - { stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); - /* use heap's tuple count */ - stats->num_index_tuples = info->num_heap_tuples; - stats->estimated_count = info->estimated_count; - - /* - * XXX the above is wrong if index is partial. Would it be OK to just - * return NULL, or is there work we must do below? - */ - } /* * Need lock unless it's local to this backend. @@ -65,6 +56,7 @@ gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) UnlockRelationForExtension(rel, ExclusiveLock); totFreePages = 0; + tuplesCount = 0; for (blkno = GIST_ROOT_BLKNO + 1; blkno < npages; blkno++) { Buffer buffer; @@ -82,6 +74,11 @@ gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) totFreePages++; RecordFreeIndexPage(rel, blkno); } + else if (GistPageIsLeaf(page)) + { + /* count tuples in index (considering only leaf tuples) */ + tuplesCount += PageGetMaxOffsetNumber(page); + } UnlockReleaseBuffer(buffer); } @@ -95,6 +92,8 @@ gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) stats->num_pages = RelationGetNumberOfBlocks(rel); if (needLock) UnlockRelationForExtension(rel, ExclusiveLock); + stats->num_index_tuples = tuplesCount; + stats->estimated_count = false; return stats; } @@ -249,7 +248,7 @@ gistbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, ptr = (GistBDItem *) palloc(sizeof(GistBDItem)); ptr->blkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid)); - ptr->parentlsn = PageGetLSN(page); + ptr->parentlsn = BufferGetLSNAtomic(buffer); ptr->next = stack->next; stack->next = ptr; diff --git a/src/backend/access/gist/gistvalidate.c b/src/backend/access/gist/gistvalidate.c index 42254c5f15..c300e52ca5 100644 --- a/src/backend/access/gist/gistvalidate.c +++ b/src/backend/access/gist/gistvalidate.c @@ -3,7 +3,7 @@ * gistvalidate.c * Opclass validator for GiST. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -90,7 +90,7 @@ gistvalidate(Oid opclassoid) { ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("operator family \"%s\" of access method %s contains support procedure %s with different left and right input types", + errmsg("operator family \"%s\" of access method %s contains support function %s with different left and right input types", opfamilyname, "gist", format_procedure(procform->amproc)))); result = false; @@ -258,7 +258,8 @@ gistvalidate(Oid opclassoid) if (opclassgroup && (opclassgroup->functionset & (((uint64) 1) << i)) != 0) continue; /* got it */ - if (i == GIST_DISTANCE_PROC || i == GIST_FETCH_PROC) + if (i == GIST_DISTANCE_PROC || i == GIST_FETCH_PROC || + i == GIST_COMPRESS_PROC || i == GIST_DECOMPRESS_PROC) continue; /* optional methods */ ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c index 4f4fe8fab5..1e09126978 100644 --- a/src/backend/access/gist/gistxlog.c +++ b/src/backend/access/gist/gistxlog.c @@ -4,7 +4,7 @@ * WAL replay logic for GiST. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -352,14 +352,14 @@ gist_mask(char *pagedata, BlockNumber blkno) { Page page = (Page) pagedata; - mask_page_lsn(page); + mask_page_lsn_and_checksum(page); mask_page_hint_bits(page); mask_unused_space(page); /* * NSN is nothing but a special purpose LSN. Hence, mask it for the same - * reason as mask_page_lsn. + * reason as mask_page_lsn_and_checksum. */ GistPageSetNSN(page, (uint64) MASK_MARKER); diff --git a/src/backend/access/hash/README b/src/backend/access/hash/README index c8a0ec78a9..2227ebfe9b 100644 --- a/src/backend/access/hash/README +++ b/src/backend/access/hash/README @@ -173,7 +173,7 @@ where a given tuple ought to be located. To do this, we need the bucket count, highmask, and lowmask from the metapage; however, it's undesirable for performance reasons to have to have to lock and pin the metapage for every such operation. Instead, we retain a cached copy of the metapage -in each each backend's relcache entry. This will produce the correct +in each backend's relcache entry. This will produce the correct bucket mapping as long as the target bucket hasn't been split since the last cache refresh. @@ -189,8 +189,8 @@ reality, InvalidBlockNumber. After computing the ostensibly-correct bucket number based on our cached copy of the metapage, we lock the corresponding primary bucket page and check whether the bucket count stored in hasho_prevblkno is greater than -our the number of buckets stored in our cached copy of the metapage. If -so, the bucket has certainly been split, because the must originally +the number of buckets stored in our cached copy of the metapage. If +so, the bucket has certainly been split, because the count must originally have been less than the number of buckets that existed at that time and can't have increased except due to a split. If not, the bucket can't have been split, because a split would have created a new bucket with a higher @@ -259,10 +259,11 @@ The reader algorithm is: -- then, per read request: reacquire content lock on current page step to next page if necessary (no chaining of content locks, but keep - the pin on the primary bucket throughout the scan; we also maintain - a pin on the page currently being scanned) - get tuple - release content lock + the pin on the primary bucket throughout the scan) + save all the matching tuples from current index page into an items array + release pin and content lock (but if it is primary bucket page retain + its pin till the end of the scan) + get tuple from an item array -- at scan shutdown: release all pins still held @@ -270,15 +271,13 @@ Holding the buffer pin on the primary bucket page for the whole scan prevents the reader's current-tuple pointer from being invalidated by splits or compactions. (Of course, other buckets can still be split or compacted.) -To keep concurrency reasonably good, we require readers to cope with -concurrent insertions, which means that they have to be able to re-find -their current scan position after re-acquiring the buffer content lock on -page. Since deletion is not possible while a reader holds the pin on bucket, -and we assume that heap tuple TIDs are unique, this can be implemented by -searching for the same heap tuple TID previously returned. Insertion does -not move index entries across pages, so the previously-returned index entry -should always be on the same page, at the same or higher offset number, -as it was before. +To minimize lock/unlock traffic, hash index scan always searches the entire +hash page to identify all the matching items at once, copying their heap tuple +IDs into backend-local storage. The heap tuple IDs are then processed while not +holding any page lock within the index thereby, allowing concurrent insertion +to happen on the same index page without any requirement of re-finding the +current scan position for the reader. We do continue to hold a pin on the +bucket page, to protect against concurrent deletions and bucket split. To allow for scans during a bucket split, if at the start of the scan, the bucket is marked as bucket-being-populated, it scan all the tuples in that @@ -415,23 +414,43 @@ The fourth operation is garbage collection (bulk deletion): Note that this is designed to allow concurrent splits and scans. If a split occurs, tuples relocated into the new bucket will be visited twice by the -scan, but that does no harm. As we release the lock on bucket page during -cleanup scan of a bucket, it will allow concurrent scan to start on a bucket -and ensures that scan will always be behind cleanup. It is must to keep scans -behind cleanup, else vacuum could decrease the TIDs that are required to -complete the scan. Now, as the scan that returns multiple tuples from the -same bucket page always expect next valid TID to be greater than or equal to -the current TID, it might miss the tuples. This holds true for backward scans -as well (backward scans first traverse each bucket starting from first bucket -to last overflow page in the chain). We must be careful about the statistics -reported by the VACUUM operation. What we can do is count the number of -tuples scanned, and believe this in preference to the stored tuple count if -the stored tuple count and number of buckets did *not* change at any time -during the scan. This provides a way of correcting the stored tuple count if -it gets out of sync for some reason. But if a split or insertion does occur -concurrently, the scan count is untrustworthy; instead, subtract the number of -tuples deleted from the stored tuple count and use that. - +scan, but that does no harm. See also "Interlocking Between Scans and +VACUUM", below. + +We must be careful about the statistics reported by the VACUUM operation. +What we can do is count the number of tuples scanned, and believe this in +preference to the stored tuple count if the stored tuple count and number of +buckets did *not* change at any time during the scan. This provides a way of +correcting the stored tuple count if it gets out of sync for some reason. But +if a split or insertion does occur concurrently, the scan count is +untrustworthy; instead, subtract the number of tuples deleted from the stored +tuple count and use that. + +Interlocking Between Scans and VACUUM +------------------------------------- + +Since we release the lock on bucket page during a cleanup scan of a bucket, a +concurrent scan could start in that bucket before we've finished vacuuming it. +If a scan gets ahead of cleanup, we could have the following problem: (1) the +scan sees heap TIDs that are about to be removed before they are processed by +VACUUM, (2) the scan decides that one or more of those TIDs are dead, (3) +VACUUM completes, (4) one or more of the TIDs the scan decided were dead are +reused for an unrelated tuple, and finally (5) the scan wakes up and +erroneously kills the new tuple. + +Note that this requires VACUUM and a scan to be active in the same bucket at +the same time. If VACUUM completes before the scan starts, the scan never has +a chance to see the dead tuples; if the scan completes before the VACUUM +starts, the heap TIDs can't have been reused meanwhile. Furthermore, VACUUM +can't start on a bucket that has an active scan, because the scan holds a pin +on the primary bucket page, and VACUUM must take a cleanup lock on that page +in order to begin cleanup. Therefore, the only way this problem can occur is +for a scan to start after VACUUM has released the cleanup lock on the bucket +but before it has processed the entire bucket and then overtake the cleanup +operation. + +Currently, we prevent this using lock chaining: cleanup locks the next page +in the chain before releasing the lock and pin on the page just processed. Free Space Management --------------------- diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index d89c192862..0002df30c0 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -3,7 +3,7 @@ * hash.c * Implementation of Margo Seltzer's Hashing package for postgres. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -68,8 +68,9 @@ hashhandler(PG_FUNCTION_ARGS) amroutine->amsearchnulls = false; amroutine->amstorage = false; amroutine->amclusterable = false; - amroutine->ampredlocks = false; + amroutine->ampredlocks = true; amroutine->amcanparallel = false; + amroutine->amcaninclude = false; amroutine->amkeytype = INT4OID; amroutine->ambuild = hashbuild; @@ -159,7 +160,7 @@ hashbuild(Relation heap, Relation index, IndexInfo *indexInfo) /* do the heap scan */ reltuples = IndexBuildHeapScan(heap, index, indexInfo, true, - hashbuildCallback, (void *) &buildstate); + hashbuildCallback, (void *) &buildstate, NULL); if (buildstate.spool) { @@ -268,65 +269,20 @@ bool hashgettuple(IndexScanDesc scan, ScanDirection dir) { HashScanOpaque so = (HashScanOpaque) scan->opaque; - Relation rel = scan->indexRelation; - Buffer buf; - Page page; - OffsetNumber offnum; - ItemPointer current; bool res; /* Hash indexes are always lossy since we store only the hash code */ scan->xs_recheck = true; - /* - * We hold pin but not lock on current buffer while outside the hash AM. - * Reacquire the read lock here. - */ - if (BufferIsValid(so->hashso_curbuf)) - LockBuffer(so->hashso_curbuf, BUFFER_LOCK_SHARE); - /* * If we've already initialized this scan, we can just advance it in the * appropriate direction. If we haven't done so yet, we call a routine to * get the first item in the scan. */ - current = &(so->hashso_curpos); - if (ItemPointerIsValid(current)) + if (!HashScanPosIsValid(so->currPos)) + res = _hash_first(scan, dir); + else { - /* - * An insertion into the current index page could have happened while - * we didn't have read lock on it. Re-find our position by looking - * for the TID we previously returned. (Because we hold a pin on the - * primary bucket page, no deletions or splits could have occurred; - * therefore we can expect that the TID still exists in the current - * index page, at an offset >= where we were.) - */ - OffsetNumber maxoffnum; - - buf = so->hashso_curbuf; - Assert(BufferIsValid(buf)); - page = BufferGetPage(buf); - - /* - * We don't need test for old snapshot here as the current buffer is - * pinned, so vacuum can't clean the page. - */ - maxoffnum = PageGetMaxOffsetNumber(page); - for (offnum = ItemPointerGetOffsetNumber(current); - offnum <= maxoffnum; - offnum = OffsetNumberNext(offnum)) - { - IndexTuple itup; - - itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); - if (ItemPointerEquals(&(so->hashso_heappos), &(itup->t_tid))) - break; - } - if (offnum > maxoffnum) - elog(ERROR, "failed to re-find scan position within index \"%s\"", - RelationGetRelationName(rel)); - ItemPointerSetOffsetNumber(current, offnum); - /* * Check to see if we should kill the previously-fetched tuple. */ @@ -341,16 +297,11 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir) * entries. */ if (so->killedItems == NULL) - so->killedItems = palloc(MaxIndexTuplesPerPage * - sizeof(HashScanPosItem)); + so->killedItems = (int *) + palloc(MaxIndexTuplesPerPage * sizeof(int)); if (so->numKilled < MaxIndexTuplesPerPage) - { - so->killedItems[so->numKilled].heapTid = so->hashso_heappos; - so->killedItems[so->numKilled].indexOffset = - ItemPointerGetOffsetNumber(&(so->hashso_curpos)); - so->numKilled++; - } + so->killedItems[so->numKilled++] = so->currPos.itemIndex; } /* @@ -358,30 +309,6 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir) */ res = _hash_next(scan, dir); } - else - res = _hash_first(scan, dir); - - /* - * Skip killed tuples if asked to. - */ - if (scan->ignore_killed_tuples) - { - while (res) - { - offnum = ItemPointerGetOffsetNumber(current); - page = BufferGetPage(so->hashso_curbuf); - if (!ItemIdIsDead(PageGetItemId(page, offnum))) - break; - res = _hash_next(scan, dir); - } - } - - /* Release read lock on current buffer, but keep it pinned */ - if (BufferIsValid(so->hashso_curbuf)) - LockBuffer(so->hashso_curbuf, BUFFER_LOCK_UNLOCK); - - /* Return current heap TID on success */ - scan->xs_ctup.t_self = so->hashso_heappos; return res; } @@ -396,35 +323,21 @@ hashgetbitmap(IndexScanDesc scan, TIDBitmap *tbm) HashScanOpaque so = (HashScanOpaque) scan->opaque; bool res; int64 ntids = 0; + HashScanPosItem *currItem; res = _hash_first(scan, ForwardScanDirection); while (res) { - bool add_tuple; + currItem = &so->currPos.items[so->currPos.itemIndex]; /* - * Skip killed tuples if asked to. + * _hash_first and _hash_next handle eliminate dead index entries + * whenever scan->ignored_killed_tuples is true. Therefore, there's + * nothing to do here except add the results to the TIDBitmap. */ - if (scan->ignore_killed_tuples) - { - Page page; - OffsetNumber offnum; - - offnum = ItemPointerGetOffsetNumber(&(so->hashso_curpos)); - page = BufferGetPage(so->hashso_curbuf); - add_tuple = !ItemIdIsDead(PageGetItemId(page, offnum)); - } - else - add_tuple = true; - - /* Save tuple ID, and continue scanning */ - if (add_tuple) - { - /* Note we mark the tuple ID as requiring recheck */ - tbm_add_tuples(tbm, &(so->hashso_heappos), 1, true); - ntids++; - } + tbm_add_tuples(tbm, &(currItem->heapTid), 1, true); + ntids++; res = _hash_next(scan, ForwardScanDirection); } @@ -448,12 +361,9 @@ hashbeginscan(Relation rel, int nkeys, int norderbys) scan = RelationGetIndexScan(rel, nkeys, norderbys); so = (HashScanOpaque) palloc(sizeof(HashScanOpaqueData)); - so->hashso_curbuf = InvalidBuffer; + HashScanPosInvalidate(so->currPos); so->hashso_bucket_buf = InvalidBuffer; so->hashso_split_bucket_buf = InvalidBuffer; - /* set position invalid (this will cause _hash_first call) */ - ItemPointerSetInvalid(&(so->hashso_curpos)); - ItemPointerSetInvalid(&(so->hashso_heappos)); so->hashso_buc_populated = false; so->hashso_buc_split = false; @@ -476,22 +386,17 @@ hashrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, HashScanOpaque so = (HashScanOpaque) scan->opaque; Relation rel = scan->indexRelation; - /* - * Before leaving current page, deal with any killed items. Also, ensure - * that we acquire lock on current page before calling _hash_kill_items. - */ - if (so->numKilled > 0) + if (HashScanPosIsValid(so->currPos)) { - LockBuffer(so->hashso_curbuf, BUFFER_LOCK_SHARE); - _hash_kill_items(scan); - LockBuffer(so->hashso_curbuf, BUFFER_LOCK_UNLOCK); + /* Before leaving current page, deal with any killed items */ + if (so->numKilled > 0) + _hash_kill_items(scan); } _hash_dropscanbuf(rel, so); /* set position invalid (this will cause _hash_first call) */ - ItemPointerSetInvalid(&(so->hashso_curpos)); - ItemPointerSetInvalid(&(so->hashso_heappos)); + HashScanPosInvalidate(so->currPos); /* Update scan key, if a new one is given */ if (scankey && scan->numberOfKeys > 0) @@ -514,15 +419,11 @@ hashendscan(IndexScanDesc scan) HashScanOpaque so = (HashScanOpaque) scan->opaque; Relation rel = scan->indexRelation; - /* - * Before leaving current page, deal with any killed items. Also, ensure - * that we acquire lock on current page before calling _hash_kill_items. - */ - if (so->numKilled > 0) + if (HashScanPosIsValid(so->currPos)) { - LockBuffer(so->hashso_curbuf, BUFFER_LOCK_SHARE); - _hash_kill_items(scan); - LockBuffer(so->hashso_curbuf, BUFFER_LOCK_UNLOCK); + /* Before leaving current page, deal with any killed items */ + if (so->numKilled > 0) + _hash_kill_items(scan); } _hash_dropscanbuf(rel, so); @@ -755,16 +656,15 @@ hashvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) * primary bucket page. The lock won't necessarily be held continuously, * though, because we'll release it when visiting overflow pages. * - * It would be very bad if this function cleaned a page while some other - * backend was in the midst of scanning it, because hashgettuple assumes - * that the next valid TID will be greater than or equal to the current - * valid TID. There can't be any concurrent scans in progress when we first - * enter this function because of the cleanup lock we hold on the primary - * bucket page, but as soon as we release that lock, there might be. We - * handle that by conspiring to prevent those scans from passing our cleanup - * scan. To do that, we lock the next page in the bucket chain before - * releasing the lock on the previous page. (This type of lock chaining is - * not ideal, so we might want to look for a better solution at some point.) + * There can't be any concurrent scans in progress when we first enter this + * function because of the cleanup lock we hold on the primary bucket page, + * but as soon as we release that lock, there might be. If those scans got + * ahead of our cleanup scan, they might see a tuple before we kill it and + * wake up only after VACUUM has completed and the TID has been recycled for + * an unrelated tuple. To avoid that calamity, we prevent scans from passing + * our cleanup scan by locking the next page in the bucket chain before + * releasing the lock on the previous page. (This type of lock chaining is not + * ideal, so we might want to look for a better solution at some point.) * * We need to retain a pin on the primary bucket to ensure that no concurrent * split can start. diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c index 67a856c142..ab5aaff156 100644 --- a/src/backend/access/hash/hash_xlog.c +++ b/src/backend/access/hash/hash_xlog.c @@ -4,7 +4,7 @@ * WAL replay logic for hash index. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -558,7 +558,7 @@ hash_xlog_move_page_contents(XLogReaderState *record) Size itemsz; OffsetNumber l; - itemsz = IndexTupleDSize(*itup); + itemsz = IndexTupleSize(itup); itemsz = MAXALIGN(itemsz); data += itemsz; @@ -686,7 +686,7 @@ hash_xlog_squeeze_page(XLogReaderState *record) Size itemsz; OffsetNumber l; - itemsz = IndexTupleDSize(*itup); + itemsz = IndexTupleSize(itup); itemsz = MAXALIGN(itemsz); data += itemsz; @@ -1263,7 +1263,7 @@ hash_mask(char *pagedata, BlockNumber blkno) HashPageOpaque opaque; int pagetype; - mask_page_lsn(page); + mask_page_lsn_and_checksum(page); mask_page_hint_bits(page); mask_unused_space(page); diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c index a127f3f8b1..a0060a633d 100644 --- a/src/backend/access/hash/hashfunc.c +++ b/src/backend/access/hash/hashfunc.c @@ -3,7 +3,7 @@ * hashfunc.c * Support functions for hash access method. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -46,18 +46,36 @@ hashchar(PG_FUNCTION_ARGS) return hash_uint32((int32) PG_GETARG_CHAR(0)); } +Datum +hashcharextended(PG_FUNCTION_ARGS) +{ + return hash_uint32_extended((int32) PG_GETARG_CHAR(0), PG_GETARG_INT64(1)); +} + Datum hashint2(PG_FUNCTION_ARGS) { return hash_uint32((int32) PG_GETARG_INT16(0)); } +Datum +hashint2extended(PG_FUNCTION_ARGS) +{ + return hash_uint32_extended((int32) PG_GETARG_INT16(0), PG_GETARG_INT64(1)); +} + Datum hashint4(PG_FUNCTION_ARGS) { return hash_uint32(PG_GETARG_INT32(0)); } +Datum +hashint4extended(PG_FUNCTION_ARGS) +{ + return hash_uint32_extended(PG_GETARG_INT32(0), PG_GETARG_INT64(1)); +} + Datum hashint8(PG_FUNCTION_ARGS) { @@ -78,18 +96,43 @@ hashint8(PG_FUNCTION_ARGS) return hash_uint32(lohalf); } +Datum +hashint8extended(PG_FUNCTION_ARGS) +{ + /* Same approach as hashint8 */ + int64 val = PG_GETARG_INT64(0); + uint32 lohalf = (uint32) val; + uint32 hihalf = (uint32) (val >> 32); + + lohalf ^= (val >= 0) ? hihalf : ~hihalf; + + return hash_uint32_extended(lohalf, PG_GETARG_INT64(1)); +} + Datum hashoid(PG_FUNCTION_ARGS) { return hash_uint32((uint32) PG_GETARG_OID(0)); } +Datum +hashoidextended(PG_FUNCTION_ARGS) +{ + return hash_uint32_extended((uint32) PG_GETARG_OID(0), PG_GETARG_INT64(1)); +} + Datum hashenum(PG_FUNCTION_ARGS) { return hash_uint32((uint32) PG_GETARG_OID(0)); } +Datum +hashenumextended(PG_FUNCTION_ARGS) +{ + return hash_uint32_extended((uint32) PG_GETARG_OID(0), PG_GETARG_INT64(1)); +} + Datum hashfloat4(PG_FUNCTION_ARGS) { @@ -116,6 +159,21 @@ hashfloat4(PG_FUNCTION_ARGS) return hash_any((unsigned char *) &key8, sizeof(key8)); } +Datum +hashfloat4extended(PG_FUNCTION_ARGS) +{ + float4 key = PG_GETARG_FLOAT4(0); + uint64 seed = PG_GETARG_INT64(1); + float8 key8; + + /* Same approach as hashfloat4 */ + if (key == (float4) 0) + PG_RETURN_UINT64(seed); + key8 = key; + + return hash_any_extended((unsigned char *) &key8, sizeof(key8), seed); +} + Datum hashfloat8(PG_FUNCTION_ARGS) { @@ -132,6 +190,19 @@ hashfloat8(PG_FUNCTION_ARGS) return hash_any((unsigned char *) &key, sizeof(key)); } +Datum +hashfloat8extended(PG_FUNCTION_ARGS) +{ + float8 key = PG_GETARG_FLOAT8(0); + uint64 seed = PG_GETARG_INT64(1); + + /* Same approach as hashfloat8 */ + if (key == (float8) 0) + PG_RETURN_UINT64(seed); + + return hash_any_extended((unsigned char *) &key, sizeof(key), seed); +} + Datum hashoidvector(PG_FUNCTION_ARGS) { @@ -140,6 +211,16 @@ hashoidvector(PG_FUNCTION_ARGS) return hash_any((unsigned char *) key->values, key->dim1 * sizeof(Oid)); } +Datum +hashoidvectorextended(PG_FUNCTION_ARGS) +{ + oidvector *key = (oidvector *) PG_GETARG_POINTER(0); + + return hash_any_extended((unsigned char *) key->values, + key->dim1 * sizeof(Oid), + PG_GETARG_INT64(1)); +} + Datum hashname(PG_FUNCTION_ARGS) { @@ -148,6 +229,15 @@ hashname(PG_FUNCTION_ARGS) return hash_any((unsigned char *) key, strlen(key)); } +Datum +hashnameextended(PG_FUNCTION_ARGS) +{ + char *key = NameStr(*PG_GETARG_NAME(0)); + + return hash_any_extended((unsigned char *) key, strlen(key), + PG_GETARG_INT64(1)); +} + Datum hashtext(PG_FUNCTION_ARGS) { @@ -168,6 +258,22 @@ hashtext(PG_FUNCTION_ARGS) return result; } +Datum +hashtextextended(PG_FUNCTION_ARGS) +{ + text *key = PG_GETARG_TEXT_PP(0); + Datum result; + + /* Same approach as hashtext */ + result = hash_any_extended((unsigned char *) VARDATA_ANY(key), + VARSIZE_ANY_EXHDR(key), + PG_GETARG_INT64(1)); + + PG_FREE_IF_COPY(key, 0); + + return result; +} + /* * hashvarlena() can be used for any varlena datatype in which there are * no non-significant bits, ie, distinct bitpatterns never compare as equal. @@ -187,6 +293,21 @@ hashvarlena(PG_FUNCTION_ARGS) return result; } +Datum +hashvarlenaextended(PG_FUNCTION_ARGS) +{ + struct varlena *key = PG_GETARG_VARLENA_PP(0); + Datum result; + + result = hash_any_extended((unsigned char *) VARDATA_ANY(key), + VARSIZE_ANY_EXHDR(key), + PG_GETARG_INT64(1)); + + PG_FREE_IF_COPY(key, 0); + + return result; +} + /* * This hash function was written by Bob Jenkins * (bob_jenkins@burtleburtle.net), and superficially adapted @@ -345,9 +466,9 @@ hash_any(register const unsigned char *k, register int keylen) /* fall through */ case 9: c += ((uint32) k[8] << 24); - /* the lowest byte of c is reserved for the length */ /* fall through */ case 8: + /* the lowest byte of c is reserved for the length */ b += ka[1]; a += ka[0]; break; @@ -384,9 +505,9 @@ hash_any(register const unsigned char *k, register int keylen) /* fall through */ case 9: c += ((uint32) k[8] << 8); - /* the lowest byte of c is reserved for the length */ /* fall through */ case 8: + /* the lowest byte of c is reserved for the length */ b += ka[1]; a += ka[0]; break; @@ -437,57 +558,77 @@ hash_any(register const unsigned char *k, register int keylen) /* handle the last 11 bytes */ #ifdef WORDS_BIGENDIAN - switch (len) /* all the case statements fall through */ + switch (len) { case 11: c += ((uint32) k[10] << 8); + /* fall through */ case 10: c += ((uint32) k[9] << 16); + /* fall through */ case 9: c += ((uint32) k[8] << 24); - /* the lowest byte of c is reserved for the length */ + /* fall through */ case 8: + /* the lowest byte of c is reserved for the length */ b += k[7]; + /* fall through */ case 7: b += ((uint32) k[6] << 8); + /* fall through */ case 6: b += ((uint32) k[5] << 16); + /* fall through */ case 5: b += ((uint32) k[4] << 24); + /* fall through */ case 4: a += k[3]; + /* fall through */ case 3: a += ((uint32) k[2] << 8); + /* fall through */ case 2: a += ((uint32) k[1] << 16); + /* fall through */ case 1: a += ((uint32) k[0] << 24); /* case 0: nothing left to add */ } #else /* !WORDS_BIGENDIAN */ - switch (len) /* all the case statements fall through */ + switch (len) { case 11: c += ((uint32) k[10] << 24); + /* fall through */ case 10: c += ((uint32) k[9] << 16); + /* fall through */ case 9: c += ((uint32) k[8] << 8); - /* the lowest byte of c is reserved for the length */ + /* fall through */ case 8: + /* the lowest byte of c is reserved for the length */ b += ((uint32) k[7] << 24); + /* fall through */ case 7: b += ((uint32) k[6] << 16); + /* fall through */ case 6: b += ((uint32) k[5] << 8); + /* fall through */ case 5: b += k[4]; + /* fall through */ case 4: a += ((uint32) k[3] << 24); + /* fall through */ case 3: a += ((uint32) k[2] << 16); + /* fall through */ case 2: a += ((uint32) k[1] << 8); + /* fall through */ case 1: a += k[0]; /* case 0: nothing left to add */ @@ -502,7 +643,247 @@ hash_any(register const unsigned char *k, register int keylen) } /* - * hash_uint32() -- hash a 32-bit value + * hash_any_extended() -- hash into a 64-bit value, using an optional seed + * k : the key (the unaligned variable-length array of bytes) + * len : the length of the key, counting by bytes + * seed : a 64-bit seed (0 means no seed) + * + * Returns a uint64 value. Otherwise similar to hash_any. + */ +Datum +hash_any_extended(register const unsigned char *k, register int keylen, + uint64 seed) +{ + register uint32 a, + b, + c, + len; + + /* Set up the internal state */ + len = keylen; + a = b = c = 0x9e3779b9 + len + 3923095; + + /* If the seed is non-zero, use it to perturb the internal state. */ + if (seed != 0) + { + /* + * In essence, the seed is treated as part of the data being hashed, + * but for simplicity, we pretend that it's padded with four bytes of + * zeroes so that the seed constitutes a 12-byte chunk. + */ + a += (uint32) (seed >> 32); + b += (uint32) seed; + mix(a, b, c); + } + + /* If the source pointer is word-aligned, we use word-wide fetches */ + if (((uintptr_t) k & UINT32_ALIGN_MASK) == 0) + { + /* Code path for aligned source data */ + register const uint32 *ka = (const uint32 *) k; + + /* handle most of the key */ + while (len >= 12) + { + a += ka[0]; + b += ka[1]; + c += ka[2]; + mix(a, b, c); + ka += 3; + len -= 12; + } + + /* handle the last 11 bytes */ + k = (const unsigned char *) ka; +#ifdef WORDS_BIGENDIAN + switch (len) + { + case 11: + c += ((uint32) k[10] << 8); + /* fall through */ + case 10: + c += ((uint32) k[9] << 16); + /* fall through */ + case 9: + c += ((uint32) k[8] << 24); + /* fall through */ + case 8: + /* the lowest byte of c is reserved for the length */ + b += ka[1]; + a += ka[0]; + break; + case 7: + b += ((uint32) k[6] << 8); + /* fall through */ + case 6: + b += ((uint32) k[5] << 16); + /* fall through */ + case 5: + b += ((uint32) k[4] << 24); + /* fall through */ + case 4: + a += ka[0]; + break; + case 3: + a += ((uint32) k[2] << 8); + /* fall through */ + case 2: + a += ((uint32) k[1] << 16); + /* fall through */ + case 1: + a += ((uint32) k[0] << 24); + /* case 0: nothing left to add */ + } +#else /* !WORDS_BIGENDIAN */ + switch (len) + { + case 11: + c += ((uint32) k[10] << 24); + /* fall through */ + case 10: + c += ((uint32) k[9] << 16); + /* fall through */ + case 9: + c += ((uint32) k[8] << 8); + /* fall through */ + case 8: + /* the lowest byte of c is reserved for the length */ + b += ka[1]; + a += ka[0]; + break; + case 7: + b += ((uint32) k[6] << 16); + /* fall through */ + case 6: + b += ((uint32) k[5] << 8); + /* fall through */ + case 5: + b += k[4]; + /* fall through */ + case 4: + a += ka[0]; + break; + case 3: + a += ((uint32) k[2] << 16); + /* fall through */ + case 2: + a += ((uint32) k[1] << 8); + /* fall through */ + case 1: + a += k[0]; + /* case 0: nothing left to add */ + } +#endif /* WORDS_BIGENDIAN */ + } + else + { + /* Code path for non-aligned source data */ + + /* handle most of the key */ + while (len >= 12) + { +#ifdef WORDS_BIGENDIAN + a += (k[3] + ((uint32) k[2] << 8) + ((uint32) k[1] << 16) + ((uint32) k[0] << 24)); + b += (k[7] + ((uint32) k[6] << 8) + ((uint32) k[5] << 16) + ((uint32) k[4] << 24)); + c += (k[11] + ((uint32) k[10] << 8) + ((uint32) k[9] << 16) + ((uint32) k[8] << 24)); +#else /* !WORDS_BIGENDIAN */ + a += (k[0] + ((uint32) k[1] << 8) + ((uint32) k[2] << 16) + ((uint32) k[3] << 24)); + b += (k[4] + ((uint32) k[5] << 8) + ((uint32) k[6] << 16) + ((uint32) k[7] << 24)); + c += (k[8] + ((uint32) k[9] << 8) + ((uint32) k[10] << 16) + ((uint32) k[11] << 24)); +#endif /* WORDS_BIGENDIAN */ + mix(a, b, c); + k += 12; + len -= 12; + } + + /* handle the last 11 bytes */ +#ifdef WORDS_BIGENDIAN + switch (len) + { + case 11: + c += ((uint32) k[10] << 8); + /* fall through */ + case 10: + c += ((uint32) k[9] << 16); + /* fall through */ + case 9: + c += ((uint32) k[8] << 24); + /* fall through */ + case 8: + /* the lowest byte of c is reserved for the length */ + b += k[7]; + /* fall through */ + case 7: + b += ((uint32) k[6] << 8); + /* fall through */ + case 6: + b += ((uint32) k[5] << 16); + /* fall through */ + case 5: + b += ((uint32) k[4] << 24); + /* fall through */ + case 4: + a += k[3]; + /* fall through */ + case 3: + a += ((uint32) k[2] << 8); + /* fall through */ + case 2: + a += ((uint32) k[1] << 16); + /* fall through */ + case 1: + a += ((uint32) k[0] << 24); + /* case 0: nothing left to add */ + } +#else /* !WORDS_BIGENDIAN */ + switch (len) + { + case 11: + c += ((uint32) k[10] << 24); + /* fall through */ + case 10: + c += ((uint32) k[9] << 16); + /* fall through */ + case 9: + c += ((uint32) k[8] << 8); + /* fall through */ + case 8: + /* the lowest byte of c is reserved for the length */ + b += ((uint32) k[7] << 24); + /* fall through */ + case 7: + b += ((uint32) k[6] << 16); + /* fall through */ + case 6: + b += ((uint32) k[5] << 8); + /* fall through */ + case 5: + b += k[4]; + /* fall through */ + case 4: + a += ((uint32) k[3] << 24); + /* fall through */ + case 3: + a += ((uint32) k[2] << 16); + /* fall through */ + case 2: + a += ((uint32) k[1] << 8); + /* fall through */ + case 1: + a += k[0]; + /* case 0: nothing left to add */ + } +#endif /* WORDS_BIGENDIAN */ + } + + final(a, b, c); + + /* report the result */ + PG_RETURN_UINT64(((uint64) b << 32) | c); +} + +/* + * hash_uint32() -- hash a 32-bit value to a 32-bit value * * This has the same result as * hash_any(&k, sizeof(uint32)) @@ -523,3 +904,32 @@ hash_uint32(uint32 k) /* report the result */ return UInt32GetDatum(c); } + +/* + * hash_uint32_extended() -- hash a 32-bit value to a 64-bit value, with a seed + * + * Like hash_uint32, this is a convenience function. + */ +Datum +hash_uint32_extended(uint32 k, uint64 seed) +{ + register uint32 a, + b, + c; + + a = b = c = 0x9e3779b9 + (uint32) sizeof(uint32) + 3923095; + + if (seed != 0) + { + a += (uint32) (seed >> 32); + b += (uint32) seed; + mix(a, b, c); + } + + a += k; + + final(a, b, c); + + /* report the result */ + PG_RETURN_UINT64(((uint64) b << 32) | c); +} diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c index dc08db97db..3eb722ce26 100644 --- a/src/backend/access/hash/hashinsert.c +++ b/src/backend/access/hash/hashinsert.c @@ -3,7 +3,7 @@ * hashinsert.c * Item insertion in hash tables for Postgres. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -22,6 +22,7 @@ #include "utils/rel.h" #include "storage/lwlock.h" #include "storage/buf_internals.h" +#include "storage/predicate.h" static void _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf, RelFileNode hnode); @@ -55,7 +56,7 @@ _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel) hashkey = _hash_get_indextuple_hashkey(itup); /* compute item size too */ - itemsz = IndexTupleDSize(*itup); + itemsz = IndexTupleSize(itup); itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we * need to be consistent */ @@ -88,6 +89,8 @@ _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel) &usedmetap); Assert(usedmetap != NULL); + CheckForSerializableConflictIn(rel, NULL, buf); + /* remember the primary bucket buffer to release the pin on it at end. */ bucket_buf = buf; @@ -222,7 +225,7 @@ _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel) XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD); XLogRegisterBuffer(0, buf, REGBUF_STANDARD); - XLogRegisterBufData(0, (char *) itup, IndexTupleDSize(*itup)); + XLogRegisterBufData(0, (char *) itup, IndexTupleSize(itup)); recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INSERT); @@ -309,7 +312,7 @@ _hash_pgaddmultitup(Relation rel, Buffer buf, IndexTuple *itups, { Size itemsize; - itemsize = IndexTupleDSize(*itups[i]); + itemsize = IndexTupleSize(itups[i]); itemsize = MAXALIGN(itemsize); /* Find where to insert the tuple (preserving page's hashkey ordering) */ diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c index c206e704d4..b170b46d86 100644 --- a/src/backend/access/hash/hashovfl.c +++ b/src/backend/access/hash/hashovfl.c @@ -3,7 +3,7 @@ * hashovfl.c * Overflow page management code for the Postgres hash access method * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -341,9 +341,10 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin) metap->hashm_mapp[metap->hashm_nmaps] = BufferGetBlockNumber(newmapbuf); metap->hashm_nmaps++; metap->hashm_spares[splitnum]++; - MarkBufferDirty(metabuf); } + MarkBufferDirty(metabuf); + /* * for new overflow page, we don't need to explicitly set the bit in * bitmap page, as by default that will be set to "in use". @@ -890,7 +891,7 @@ _hash_squeezebucket(Relation rel, itup = (IndexTuple) PageGetItem(rpage, PageGetItemId(rpage, roffnum)); - itemsz = IndexTupleDSize(*itup); + itemsz = IndexTupleSize(itup); itemsz = MAXALIGN(itemsz); /* diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 7b2906b0ca..6825c14309 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -3,7 +3,7 @@ * hashpage.c * Hash table page management code for the Postgres hash access method * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -33,6 +33,7 @@ #include "miscadmin.h" #include "storage/lmgr.h" #include "storage/smgr.h" +#include "storage/predicate.h" static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock, @@ -298,20 +299,20 @@ _hash_dropscanbuf(Relation rel, HashScanOpaque so) { /* release pin we hold on primary bucket page */ if (BufferIsValid(so->hashso_bucket_buf) && - so->hashso_bucket_buf != so->hashso_curbuf) + so->hashso_bucket_buf != so->currPos.buf) _hash_dropbuf(rel, so->hashso_bucket_buf); so->hashso_bucket_buf = InvalidBuffer; /* release pin we hold on primary bucket page of bucket being split */ if (BufferIsValid(so->hashso_split_bucket_buf) && - so->hashso_split_bucket_buf != so->hashso_curbuf) + so->hashso_split_bucket_buf != so->currPos.buf) _hash_dropbuf(rel, so->hashso_split_bucket_buf); so->hashso_split_bucket_buf = InvalidBuffer; /* release any pin we still hold */ - if (BufferIsValid(so->hashso_curbuf)) - _hash_dropbuf(rel, so->hashso_curbuf); - so->hashso_curbuf = InvalidBuffer; + if (BufferIsValid(so->currPos.buf)) + _hash_dropbuf(rel, so->currPos.buf); + so->currPos.buf = InvalidBuffer; /* reset split scan */ so->hashso_buc_populated = false; @@ -373,7 +374,7 @@ _hash_init(Relation rel, double num_tuples, ForkNumber forkNum) if (ffactor < 10) ffactor = 10; - procid = index_getprocid(rel, 1, HASHPROC); + procid = index_getprocid(rel, 1, HASHSTANDARD_PROC); /* * We initialize the metapage, the first N bucket pages, and the first @@ -403,7 +404,7 @@ _hash_init(Relation rel, double num_tuples, ForkNumber forkNum) XLogBeginInsert(); XLogRegisterData((char *) &xlrec, SizeOfHashInitMetaPage); - XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT); + XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD); recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_META_PAGE); @@ -592,8 +593,9 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid, metap->hashm_firstfree = 0; /* - * Set pd_lower just past the end of the metadata. This is to log full - * page image of metapage in xloginsert.c. + * Set pd_lower just past the end of the metadata. This is essential, + * because without doing so, metadata will be lost if xlog.c compresses + * the page. */ ((PageHeader) page)->pd_lower = ((char *) metap + sizeof(HashMetaPageData)) - (char *) page; @@ -991,14 +993,14 @@ _hash_expandtable(Relation rel, Buffer metabuf) * for the purpose. OTOH, adding a splitpoint is a very infrequent operation, * so it may not be worth worrying about. * - * Returns TRUE if successful, or FALSE if allocation failed due to + * Returns true if successful, or false if allocation failed due to * BlockNumber overflow. */ static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks) { BlockNumber lastblock; - char zerobuf[BLCKSZ]; + PGAlignedBlock zerobuf; Page page; HashPageOpaque ovflopaque; @@ -1011,7 +1013,7 @@ _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks) if (lastblock < firstblock || lastblock == InvalidBlockNumber) return false; - page = (Page) zerobuf; + page = (Page) zerobuf.data; /* * Initialize the page. Just zeroing the page won't work; see @@ -1032,11 +1034,12 @@ _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks) log_newpage(&rel->rd_node, MAIN_FORKNUM, lastblock, - zerobuf, + zerobuf.data, true); RelationOpenSmgr(rel); - smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf, false); + PageSetChecksumInplace(page, lastblock); + smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf.data, false); return true; } @@ -1106,6 +1109,11 @@ _hash_splitbucket(Relation rel, npage = BufferGetPage(nbuf); nopaque = (HashPageOpaque) PageGetSpecialPointer(npage); + /* Copy the predicate locks from old bucket to new bucket. */ + PredicateLockPageSplit(rel, + BufferGetBlockNumber(bucket_obuf), + BufferGetBlockNumber(bucket_nbuf)); + /* * Partition the tuples in the old bucket between the old bucket and the * new bucket, advancing along the old bucket's overflow bucket chain and @@ -1172,7 +1180,7 @@ _hash_splitbucket(Relation rel, * the current page in the new bucket, we must allocate a new * overflow page and place the tuple on that page instead. */ - itemsz = IndexTupleDSize(*new_itup); + itemsz = IndexTupleSize(new_itup); itemsz = MAXALIGN(itemsz); if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz)) diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c index 3e461ad7a0..650041db0a 100644 --- a/src/backend/access/hash/hashsearch.c +++ b/src/backend/access/hash/hashsearch.c @@ -3,7 +3,7 @@ * hashsearch.c * search code for postgres hash tables * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -19,45 +19,107 @@ #include "miscadmin.h" #include "pgstat.h" #include "utils/rel.h" +#include "storage/predicate.h" +static bool _hash_readpage(IndexScanDesc scan, Buffer *bufP, + ScanDirection dir); +static int _hash_load_qualified_items(IndexScanDesc scan, Page page, + OffsetNumber offnum, ScanDirection dir); +static inline void _hash_saveitem(HashScanOpaque so, int itemIndex, + OffsetNumber offnum, IndexTuple itup); +static void _hash_readnext(IndexScanDesc scan, Buffer *bufp, + Page *pagep, HashPageOpaque *opaquep); /* * _hash_next() -- Get the next item in a scan. * - * On entry, we have a valid hashso_curpos in the scan, and a - * pin and read lock on the page that contains that item. - * We find the next item in the scan, if any. - * On success exit, we have the page containing the next item - * pinned and locked. + * On entry, so->currPos describes the current page, which may + * be pinned but not locked, and so->currPos.itemIndex identifies + * which item was previously returned. + * + * On successful exit, scan->xs_ctup.t_self is set to the TID + * of the next heap tuple. so->currPos is updated as needed. + * + * On failure exit (no more tuples), we return false with pin + * held on bucket page but no pins or locks held on overflow + * page. */ bool _hash_next(IndexScanDesc scan, ScanDirection dir) { Relation rel = scan->indexRelation; HashScanOpaque so = (HashScanOpaque) scan->opaque; + HashScanPosItem *currItem; + BlockNumber blkno; Buffer buf; - Page page; - OffsetNumber offnum; - ItemPointer current; - IndexTuple itup; - - /* we still have the buffer pinned and read-locked */ - buf = so->hashso_curbuf; - Assert(BufferIsValid(buf)); + bool end_of_scan = false; /* - * step to next valid tuple. + * Advance to the next tuple on the current page; or if done, try to read + * data from the next or previous page based on the scan direction. Before + * moving to the next or previous page make sure that we deal with all the + * killed items. */ - if (!_hash_step(scan, &buf, dir)) + if (ScanDirectionIsForward(dir)) + { + if (++so->currPos.itemIndex > so->currPos.lastItem) + { + if (so->numKilled > 0) + _hash_kill_items(scan); + + blkno = so->currPos.nextPage; + if (BlockNumberIsValid(blkno)) + { + buf = _hash_getbuf(rel, blkno, HASH_READ, LH_OVERFLOW_PAGE); + TestForOldSnapshot(scan->xs_snapshot, rel, BufferGetPage(buf)); + if (!_hash_readpage(scan, &buf, dir)) + end_of_scan = true; + } + else + end_of_scan = true; + } + } + else + { + if (--so->currPos.itemIndex < so->currPos.firstItem) + { + if (so->numKilled > 0) + _hash_kill_items(scan); + + blkno = so->currPos.prevPage; + if (BlockNumberIsValid(blkno)) + { + buf = _hash_getbuf(rel, blkno, HASH_READ, + LH_BUCKET_PAGE | LH_OVERFLOW_PAGE); + TestForOldSnapshot(scan->xs_snapshot, rel, BufferGetPage(buf)); + + /* + * We always maintain the pin on bucket page for whole scan + * operation, so releasing the additional pin we have acquired + * here. + */ + if (buf == so->hashso_bucket_buf || + buf == so->hashso_split_bucket_buf) + _hash_dropbuf(rel, buf); + + if (!_hash_readpage(scan, &buf, dir)) + end_of_scan = true; + } + else + end_of_scan = true; + } + } + + if (end_of_scan) + { + _hash_dropscanbuf(rel, so); + HashScanPosInvalidate(so->currPos); return false; + } - /* if we're here, _hash_step found a valid tuple */ - current = &(so->hashso_curpos); - offnum = ItemPointerGetOffsetNumber(current); - _hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE); - page = BufferGetPage(buf); - itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); - so->hashso_heappos = itup->t_tid; + /* OK, itemIndex says what to return */ + currItem = &so->currPos.items[so->currPos.itemIndex]; + scan->xs_ctup.t_self = currItem->heapTid; return true; } @@ -110,6 +172,7 @@ _hash_readnext(IndexScanDesc scan, Assert(BufferIsValid(*bufp)); LockBuffer(*bufp, BUFFER_LOCK_SHARE); + PredicateLockPage(rel, BufferGetBlockNumber(*bufp), scan->xs_snapshot); /* * setting hashso_buc_split to true indicates that we are scanning @@ -212,11 +275,18 @@ _hash_readprev(IndexScanDesc scan, /* * _hash_first() -- Find the first item in a scan. * - * Find the first item in the index that - * satisfies the qualification associated with the scan descriptor. On - * success, the page containing the current index tuple is read locked - * and pinned, and the scan's opaque data entry is updated to - * include the buffer. + * We find the first item (or, if backward scan, the last item) in the + * index that satisfies the qualification associated with the scan + * descriptor. + * + * On successful exit, if the page containing current index tuple is an + * overflow page, both pin and lock are released whereas if it is a bucket + * page then it is pinned but not locked and data about the matching + * tuple(s) on the page has been loaded into so->currPos, + * scan->xs_ctup.t_self is set to the heap TID of the current tuple. + * + * On failure exit (no more tuples), we return false, with pin held on + * bucket page but no pins or locks held on overflow page. */ bool _hash_first(IndexScanDesc scan, ScanDirection dir) @@ -229,15 +299,10 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) Buffer buf; Page page; HashPageOpaque opaque; - IndexTuple itup; - ItemPointer current; - OffsetNumber offnum; + HashScanPosItem *currItem; pgstat_count_index_scan(rel); - current = &(so->hashso_curpos); - ItemPointerSetInvalid(current); - /* * We do not support hash scans with no index qualification, because we * would have to read the whole index rather than just one bucket. That @@ -284,6 +349,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) so->hashso_sk_hash = hashkey; buf = _hash_getbucketbuf_from_hashkey(rel, hashkey, HASH_READ, NULL); + PredicateLockPage(rel, BufferGetBlockNumber(buf), scan->xs_snapshot); page = BufferGetPage(buf); TestForOldSnapshot(scan->xs_snapshot, rel, page); opaque = (HashPageOpaque) PageGetSpecialPointer(page); @@ -356,222 +422,300 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) _hash_readnext(scan, &buf, &page, &opaque); } - /* Now find the first tuple satisfying the qualification */ - if (!_hash_step(scan, &buf, dir)) + /* remember which buffer we have pinned, if any */ + Assert(BufferIsInvalid(so->currPos.buf)); + so->currPos.buf = buf; + + /* Now find all the tuples satisfying the qualification from a page */ + if (!_hash_readpage(scan, &buf, dir)) return false; - /* if we're here, _hash_step found a valid tuple */ - offnum = ItemPointerGetOffsetNumber(current); - _hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE); - page = BufferGetPage(buf); - itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); - so->hashso_heappos = itup->t_tid; + /* OK, itemIndex says what to return */ + currItem = &so->currPos.items[so->currPos.itemIndex]; + scan->xs_ctup.t_self = currItem->heapTid; + /* if we're here, _hash_readpage found a valid tuples */ return true; } /* - * _hash_step() -- step to the next valid item in a scan in the bucket. - * - * If no valid record exists in the requested direction, return - * false. Else, return true and set the hashso_curpos for the - * scan to the right thing. + * _hash_readpage() -- Load data from current index page into so->currPos * - * Here we need to ensure that if the scan has started during split, then - * skip the tuples that are moved by split while scanning bucket being - * populated and then scan the bucket being split to cover all such - * tuples. This is done to ensure that we don't miss tuples in the scans - * that are started during split. + * We scan all the items in the current index page and save them into + * so->currPos if it satisfies the qualification. If no matching items + * are found in the current page, we move to the next or previous page + * in a bucket chain as indicated by the direction. * - * 'bufP' points to the current buffer, which is pinned and read-locked. - * On success exit, we have pin and read-lock on whichever page - * contains the right item; on failure, we have released all buffers. + * Return true if any matching items are found else return false. */ -bool -_hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) +static bool +_hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) { Relation rel = scan->indexRelation; HashScanOpaque so = (HashScanOpaque) scan->opaque; - ItemPointer current; Buffer buf; Page page; HashPageOpaque opaque; - OffsetNumber maxoff; OffsetNumber offnum; - BlockNumber blkno; - IndexTuple itup; - - current = &(so->hashso_curpos); + uint16 itemIndex; buf = *bufP; + Assert(BufferIsValid(buf)); _hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE); page = BufferGetPage(buf); opaque = (HashPageOpaque) PageGetSpecialPointer(page); - /* - * If _hash_step is called from _hash_first, current will not be valid, so - * we can't dereference it. However, in that case, we presumably want to - * start at the beginning/end of the page... - */ - maxoff = PageGetMaxOffsetNumber(page); - if (ItemPointerIsValid(current)) - offnum = ItemPointerGetOffsetNumber(current); - else - offnum = InvalidOffsetNumber; + so->currPos.buf = buf; + so->currPos.currPage = BufferGetBlockNumber(buf); - /* - * 'offnum' now points to the last tuple we examined (if any). - * - * continue to step through tuples until: 1) we get to the end of the - * bucket chain or 2) we find a valid tuple. - */ - do + if (ScanDirectionIsForward(dir)) { - switch (dir) + BlockNumber prev_blkno = InvalidBlockNumber; + + for (;;) { - case ForwardScanDirection: - if (offnum != InvalidOffsetNumber) - offnum = OffsetNumberNext(offnum); /* move forward */ - else - { - /* new page, locate starting position by binary search */ - offnum = _hash_binsearch(page, so->hashso_sk_hash); - } - - for (;;) - { - /* - * check if we're still in the range of items with the - * target hash key - */ - if (offnum <= maxoff) - { - Assert(offnum >= FirstOffsetNumber); - itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); - - /* - * skip the tuples that are moved by split operation - * for the scan that has started when split was in - * progress - */ - if (so->hashso_buc_populated && !so->hashso_buc_split && - (itup->t_info & INDEX_MOVED_BY_SPLIT_MASK)) - { - offnum = OffsetNumberNext(offnum); /* move forward */ - continue; - } - - if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup)) - break; /* yes, so exit for-loop */ - } - - /* Before leaving current page, deal with any killed items */ - if (so->numKilled > 0) - _hash_kill_items(scan); - - /* - * ran off the end of this page, try the next - */ - _hash_readnext(scan, &buf, &page, &opaque); - if (BufferIsValid(buf)) - { - maxoff = PageGetMaxOffsetNumber(page); - offnum = _hash_binsearch(page, so->hashso_sk_hash); - } - else - { - itup = NULL; - break; /* exit for-loop */ - } - } + /* new page, locate starting position by binary search */ + offnum = _hash_binsearch(page, so->hashso_sk_hash); + + itemIndex = _hash_load_qualified_items(scan, page, offnum, dir); + + if (itemIndex != 0) break; - case BackwardScanDirection: - if (offnum != InvalidOffsetNumber) - offnum = OffsetNumberPrev(offnum); /* move back */ - else - { - /* new page, locate starting position by binary search */ - offnum = _hash_binsearch_last(page, so->hashso_sk_hash); - } - - for (;;) - { - /* - * check if we're still in the range of items with the - * target hash key - */ - if (offnum >= FirstOffsetNumber) - { - Assert(offnum <= maxoff); - itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); - - /* - * skip the tuples that are moved by split operation - * for the scan that has started when split was in - * progress - */ - if (so->hashso_buc_populated && !so->hashso_buc_split && - (itup->t_info & INDEX_MOVED_BY_SPLIT_MASK)) - { - offnum = OffsetNumberPrev(offnum); /* move back */ - continue; - } - - if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup)) - break; /* yes, so exit for-loop */ - } - - /* Before leaving current page, deal with any killed items */ - if (so->numKilled > 0) - _hash_kill_items(scan); - - /* - * ran off the end of this page, try the next - */ - _hash_readprev(scan, &buf, &page, &opaque); - if (BufferIsValid(buf)) - { - TestForOldSnapshot(scan->xs_snapshot, rel, page); - maxoff = PageGetMaxOffsetNumber(page); - offnum = _hash_binsearch_last(page, so->hashso_sk_hash); - } - else - { - itup = NULL; - break; /* exit for-loop */ - } - } + /* + * Could not find any matching tuples in the current page, move to + * the next page. Before leaving the current page, deal with any + * killed items. + */ + if (so->numKilled > 0) + _hash_kill_items(scan); + + /* + * If this is a primary bucket page, hasho_prevblkno is not a real + * block number. + */ + if (so->currPos.buf == so->hashso_bucket_buf || + so->currPos.buf == so->hashso_split_bucket_buf) + prev_blkno = InvalidBlockNumber; + else + prev_blkno = opaque->hasho_prevblkno; + + _hash_readnext(scan, &buf, &page, &opaque); + if (BufferIsValid(buf)) + { + so->currPos.buf = buf; + so->currPos.currPage = BufferGetBlockNumber(buf); + } + else + { + /* + * Remember next and previous block numbers for scrollable + * cursors to know the start position and return false + * indicating that no more matching tuples were found. Also, + * don't reset currPage or lsn, because we expect + * _hash_kill_items to be called for the old page after this + * function returns. + */ + so->currPos.prevPage = prev_blkno; + so->currPos.nextPage = InvalidBlockNumber; + so->currPos.buf = buf; + return false; + } + } + + so->currPos.firstItem = 0; + so->currPos.lastItem = itemIndex - 1; + so->currPos.itemIndex = 0; + } + else + { + BlockNumber next_blkno = InvalidBlockNumber; + + for (;;) + { + /* new page, locate starting position by binary search */ + offnum = _hash_binsearch_last(page, so->hashso_sk_hash); + + itemIndex = _hash_load_qualified_items(scan, page, offnum, dir); + + if (itemIndex != MaxIndexTuplesPerPage) break; - default: - /* NoMovementScanDirection */ - /* this should not be reached */ - itup = NULL; + /* + * Could not find any matching tuples in the current page, move to + * the previous page. Before leaving the current page, deal with + * any killed items. + */ + if (so->numKilled > 0) + _hash_kill_items(scan); + + if (so->currPos.buf == so->hashso_bucket_buf || + so->currPos.buf == so->hashso_split_bucket_buf) + next_blkno = opaque->hasho_nextblkno; + + _hash_readprev(scan, &buf, &page, &opaque); + if (BufferIsValid(buf)) + { + so->currPos.buf = buf; + so->currPos.currPage = BufferGetBlockNumber(buf); + } + else + { + /* + * Remember next and previous block numbers for scrollable + * cursors to know the start position and return false + * indicating that no more matching tuples were found. Also, + * don't reset currPage or lsn, because we expect + * _hash_kill_items to be called for the old page after this + * function returns. + */ + so->currPos.prevPage = InvalidBlockNumber; + so->currPos.nextPage = next_blkno; + so->currPos.buf = buf; + return false; + } + } + + so->currPos.firstItem = itemIndex; + so->currPos.lastItem = MaxIndexTuplesPerPage - 1; + so->currPos.itemIndex = MaxIndexTuplesPerPage - 1; + } + + if (so->currPos.buf == so->hashso_bucket_buf || + so->currPos.buf == so->hashso_split_bucket_buf) + { + so->currPos.prevPage = InvalidBlockNumber; + so->currPos.nextPage = opaque->hasho_nextblkno; + LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK); + } + else + { + so->currPos.prevPage = opaque->hasho_prevblkno; + so->currPos.nextPage = opaque->hasho_nextblkno; + _hash_relbuf(rel, so->currPos.buf); + so->currPos.buf = InvalidBuffer; + } + + Assert(so->currPos.firstItem <= so->currPos.lastItem); + return true; +} + +/* + * Load all the qualified items from a current index page + * into so->currPos. Helper function for _hash_readpage. + */ +static int +_hash_load_qualified_items(IndexScanDesc scan, Page page, + OffsetNumber offnum, ScanDirection dir) +{ + HashScanOpaque so = (HashScanOpaque) scan->opaque; + IndexTuple itup; + int itemIndex; + OffsetNumber maxoff; + + maxoff = PageGetMaxOffsetNumber(page); + + if (ScanDirectionIsForward(dir)) + { + /* load items[] in ascending order */ + itemIndex = 0; + + while (offnum <= maxoff) + { + Assert(offnum >= FirstOffsetNumber); + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); + + /* + * skip the tuples that are moved by split operation for the scan + * that has started when split was in progress. Also, skip the + * tuples that are marked as dead. + */ + if ((so->hashso_buc_populated && !so->hashso_buc_split && + (itup->t_info & INDEX_MOVED_BY_SPLIT_MASK)) || + (scan->ignore_killed_tuples && + (ItemIdIsDead(PageGetItemId(page, offnum))))) + { + offnum = OffsetNumberNext(offnum); /* move forward */ + continue; + } + + if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup) && + _hash_checkqual(scan, itup)) + { + /* tuple is qualified, so remember it */ + _hash_saveitem(so, itemIndex, offnum, itup); + itemIndex++; + } + else + { + /* + * No more matching tuples exist in this page. so, exit while + * loop. + */ break; + } + + offnum = OffsetNumberNext(offnum); } - if (itup == NULL) + Assert(itemIndex <= MaxIndexTuplesPerPage); + return itemIndex; + } + else + { + /* load items[] in descending order */ + itemIndex = MaxIndexTuplesPerPage; + + while (offnum >= FirstOffsetNumber) { + Assert(offnum <= maxoff); + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); + /* - * We ran off the end of the bucket without finding a match. - * Release the pin on bucket buffers. Normally, such pins are - * released at end of scan, however scrolling cursors can - * reacquire the bucket lock and pin in the same scan multiple - * times. + * skip the tuples that are moved by split operation for the scan + * that has started when split was in progress. Also, skip the + * tuples that are marked as dead. */ - *bufP = so->hashso_curbuf = InvalidBuffer; - ItemPointerSetInvalid(current); - _hash_dropscanbuf(rel, so); - return false; + if ((so->hashso_buc_populated && !so->hashso_buc_split && + (itup->t_info & INDEX_MOVED_BY_SPLIT_MASK)) || + (scan->ignore_killed_tuples && + (ItemIdIsDead(PageGetItemId(page, offnum))))) + { + offnum = OffsetNumberPrev(offnum); /* move back */ + continue; + } + + if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup) && + _hash_checkqual(scan, itup)) + { + itemIndex--; + /* tuple is qualified, so remember it */ + _hash_saveitem(so, itemIndex, offnum, itup); + } + else + { + /* + * No more matching tuples exist in this page. so, exit while + * loop. + */ + break; + } + + offnum = OffsetNumberPrev(offnum); } - /* check the tuple quals, loop around if not met */ - } while (!_hash_checkqual(scan, itup)); + Assert(itemIndex >= 0); + return itemIndex; + } +} + +/* Save an index item into so->currPos.items[itemIndex] */ +static inline void +_hash_saveitem(HashScanOpaque so, int itemIndex, + OffsetNumber offnum, IndexTuple itup) +{ + HashScanPosItem *currItem = &so->currPos.items[itemIndex]; - /* if we made it to here, we've found a valid tuple */ - blkno = BufferGetBlockNumber(buf); - *bufP = so->hashso_curbuf = buf; - ItemPointerSet(current, blkno, offnum); - return true; + currItem->heapTid = itup->t_tid; + currItem->indexOffset = offnum; } diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c index 41d615df8b..b70964f429 100644 --- a/src/backend/access/hash/hashsort.c +++ b/src/backend/access/hash/hashsort.c @@ -14,7 +14,7 @@ * plenty of locality of access. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -82,6 +82,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets) hspool->low_mask, hspool->max_buckets, maintenance_work_mem, + NULL, false); return hspool; diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c index 9b803af7c2..7c9b2cfc9e 100644 --- a/src/backend/access/hash/hashutil.c +++ b/src/backend/access/hash/hashutil.c @@ -3,7 +3,7 @@ * hashutil.c * Utility code for Postgres hash implementation. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -73,10 +73,10 @@ _hash_checkqual(IndexScanDesc scan, IndexTuple itup) } /* - * _hash_datum2hashkey -- given a Datum, call the index's hash procedure + * _hash_datum2hashkey -- given a Datum, call the index's hash function * * The Datum is assumed to be of the index's column type, so we can use the - * "primary" hash procedure that's tracked for us by the generic index code. + * "primary" hash function that's tracked for us by the generic index code. */ uint32 _hash_datum2hashkey(Relation rel, Datum key) @@ -85,7 +85,7 @@ _hash_datum2hashkey(Relation rel, Datum key) Oid collation; /* XXX assumes index has only one attribute */ - procinfo = index_getprocinfo(rel, 1, HASHPROC); + procinfo = index_getprocinfo(rel, 1, HASHSTANDARD_PROC); collation = rel->rd_indcollation[0]; return DatumGetUInt32(FunctionCall1Coll(procinfo, collation, key)); @@ -108,10 +108,10 @@ _hash_datum2hashkey_type(Relation rel, Datum key, Oid keytype) hash_proc = get_opfamily_proc(rel->rd_opfamily[0], keytype, keytype, - HASHPROC); + HASHSTANDARD_PROC); if (!RegProcedureIsValid(hash_proc)) elog(ERROR, "missing support function %d(%u,%u) for index \"%s\"", - HASHPROC, keytype, keytype, + HASHSTANDARD_PROC, keytype, keytype, RelationGetRelationName(rel)); collation = rel->rd_indcollation[0]; @@ -522,13 +522,31 @@ _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket, * current page and killed tuples thereon (generally, this should only be * called if so->numKilled > 0). * + * The caller does not have a lock on the page and may or may not have the + * page pinned in a buffer. Note that read-lock is sufficient for setting + * LP_DEAD status (which is only a hint). + * + * The caller must have pin on bucket buffer, but may or may not have pin + * on overflow buffer, as indicated by HashScanPosIsPinned(so->currPos). + * * We match items by heap TID before assuming they are the right ones to * delete. + * + * There are never any scans active in a bucket at the time VACUUM begins, + * because VACUUM takes a cleanup lock on the primary bucket page and scans + * hold a pin. A scan can begin after VACUUM leaves the primary bucket page + * but before it finishes the entire bucket, but it can never pass VACUUM, + * because VACUUM always locks the next page before releasing the lock on + * the previous one. Therefore, we don't have to worry about accidentally + * killing a TID that has been reused for an unrelated tuple. */ void _hash_kill_items(IndexScanDesc scan) { HashScanOpaque so = (HashScanOpaque) scan->opaque; + Relation rel = scan->indexRelation; + BlockNumber blkno; + Buffer buf; Page page; HashPageOpaque opaque; OffsetNumber offnum, @@ -536,9 +554,11 @@ _hash_kill_items(IndexScanDesc scan) int numKilled = so->numKilled; int i; bool killedsomething = false; + bool havePin = false; Assert(so->numKilled > 0); Assert(so->killedItems != NULL); + Assert(HashScanPosIsValid(so->currPos)); /* * Always reset the scan state, so we don't look for same items on other @@ -546,20 +566,40 @@ _hash_kill_items(IndexScanDesc scan) */ so->numKilled = 0; - page = BufferGetPage(so->hashso_curbuf); + blkno = so->currPos.currPage; + if (HashScanPosIsPinned(so->currPos)) + { + /* + * We already have pin on this buffer, so, all we need to do is + * acquire lock on it. + */ + havePin = true; + buf = so->currPos.buf; + LockBuffer(buf, BUFFER_LOCK_SHARE); + } + else + buf = _hash_getbuf(rel, blkno, HASH_READ, LH_OVERFLOW_PAGE); + + page = BufferGetPage(buf); opaque = (HashPageOpaque) PageGetSpecialPointer(page); maxoff = PageGetMaxOffsetNumber(page); for (i = 0; i < numKilled; i++) { - offnum = so->killedItems[i].indexOffset; + int itemIndex = so->killedItems[i]; + HashScanPosItem *currItem = &so->currPos.items[itemIndex]; + + offnum = currItem->indexOffset; + + Assert(itemIndex >= so->currPos.firstItem && + itemIndex <= so->currPos.lastItem); while (offnum <= maxoff) { ItemId iid = PageGetItemId(page, offnum); IndexTuple ituple = (IndexTuple) PageGetItem(page, iid); - if (ItemPointerEquals(&ituple->t_tid, &so->killedItems[i].heapTid)) + if (ItemPointerEquals(&ituple->t_tid, &currItem->heapTid)) { /* found the item */ ItemIdMarkDead(iid); @@ -578,6 +618,12 @@ _hash_kill_items(IndexScanDesc scan) if (killedsomething) { opaque->hasho_flag |= LH_PAGE_HAS_DEAD_TUPLES; - MarkBufferDirtyHint(so->hashso_curbuf, true); + MarkBufferDirtyHint(buf, true); } + + if (so->hashso_bucket_buf == so->currPos.buf || + havePin) + LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK); + else + _hash_relbuf(rel, buf); } diff --git a/src/backend/access/hash/hashvalidate.c b/src/backend/access/hash/hashvalidate.c index 30b29cb100..390a6ea1e0 100644 --- a/src/backend/access/hash/hashvalidate.c +++ b/src/backend/access/hash/hashvalidate.c @@ -3,7 +3,7 @@ * hashvalidate.c * Opclass validator for hash. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -29,7 +29,7 @@ #include "utils/syscache.h" -static bool check_hash_func_signature(Oid funcid, Oid restype, Oid argtype); +static bool check_hash_func_signature(Oid funcid, int16 amprocnum, Oid argtype); /* @@ -96,7 +96,7 @@ hashvalidate(Oid opclassoid) { ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("operator family \"%s\" of access method %s contains support procedure %s with different left and right input types", + errmsg("operator family \"%s\" of access method %s contains support function %s with different left and right input types", opfamilyname, "hash", format_procedure(procform->amproc)))); result = false; @@ -105,8 +105,9 @@ hashvalidate(Oid opclassoid) /* Check procedure numbers and function signatures */ switch (procform->amprocnum) { - case HASHPROC: - if (!check_hash_func_signature(procform->amproc, INT4OID, + case HASHSTANDARD_PROC: + case HASHEXTENDED_PROC: + if (!check_hash_func_signature(procform->amproc, procform->amprocnum, procform->amproclefttype)) { ereport(INFO, @@ -181,7 +182,7 @@ hashvalidate(Oid opclassoid) result = false; } - /* There should be relevant hash procedures for each datatype */ + /* There should be relevant hash functions for each datatype */ if (!list_member_oid(hashabletypes, oprform->amoplefttype) || !list_member_oid(hashabletypes, oprform->amoprighttype)) { @@ -264,19 +265,37 @@ hashvalidate(Oid opclassoid) * hacks in the core hash opclass definitions. */ static bool -check_hash_func_signature(Oid funcid, Oid restype, Oid argtype) +check_hash_func_signature(Oid funcid, int16 amprocnum, Oid argtype) { bool result = true; + Oid restype; + int16 nargs; HeapTuple tp; Form_pg_proc procform; + switch (amprocnum) + { + case HASHSTANDARD_PROC: + restype = INT4OID; + nargs = 1; + break; + + case HASHEXTENDED_PROC: + restype = INT8OID; + nargs = 2; + break; + + default: + elog(ERROR, "invalid amprocnum"); + } + tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for function %u", funcid); procform = (Form_pg_proc) GETSTRUCT(tp); if (procform->prorettype != restype || procform->proretset || - procform->pronargs != 1) + procform->pronargs != nargs) result = false; if (!IsBinaryCoercible(argtype, procform->proargtypes.values[0])) @@ -290,24 +309,28 @@ check_hash_func_signature(Oid funcid, Oid restype, Oid argtype) * identity, not just its input type, because hashvarlena() takes * INTERNAL and allowing any such function seems too scary. */ - if (funcid == F_HASHINT4 && + if ((funcid == F_HASHINT4 || funcid == F_HASHINT4EXTENDED) && (argtype == DATEOID || - argtype == ABSTIMEOID || argtype == RELTIMEOID || argtype == XIDOID || argtype == CIDOID)) /* okay, allowed use of hashint4() */ ; - else if (funcid == F_TIMESTAMP_HASH && + else if ((funcid == F_TIMESTAMP_HASH || + funcid == F_TIMESTAMP_HASH_EXTENDED) && argtype == TIMESTAMPTZOID) /* okay, allowed use of timestamp_hash() */ ; - else if (funcid == F_HASHCHAR && + else if ((funcid == F_HASHCHAR || funcid == F_HASHCHAREXTENDED) && argtype == BOOLOID) /* okay, allowed use of hashchar() */ ; - else if (funcid == F_HASHVARLENA && + else if ((funcid == F_HASHVARLENA || funcid == F_HASHVARLENAEXTENDED) && argtype == BYTEAOID) /* okay, allowed use of hashvarlena() */ ; else result = false; } + /* If function takes a second argument, it must be for a 64-bit salt. */ + if (nargs == 2 && procform->proargtypes.values[1] != INT8OID) + result = false; + ReleaseSysCache(tp); return result; } diff --git a/src/backend/access/heap/README.tuplock b/src/backend/access/heap/README.tuplock index 10b8d78ab7..b2f3a4ce90 100644 --- a/src/backend/access/heap/README.tuplock +++ b/src/backend/access/heap/README.tuplock @@ -45,10 +45,10 @@ and modifications which might alter the tuple's key. This is the lock that is implicitly taken by UPDATE operations which leave all key fields unchanged. SELECT FOR SHARE obtains a shared lock which prevents any kind of tuple modification. Finally, SELECT FOR KEY SHARE obtains a shared lock which only -prevents tuple removal and modifications of key fields. This last mode -implements a mode just strong enough to implement RI checks, i.e. it ensures -that tuples do not go away from under a check, without blocking when some -other transaction that want to update the tuple without changing its key. +prevents tuple removal and modifications of key fields. This lock level is +just strong enough to implement RI checks, i.e. it ensures that tuples do not +go away from under a check, without blocking transactions that want to update +the tuple without changing its key. The conflict table is: diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 8792f1453c..fb63471a0e 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -3,7 +3,7 @@ * heapam.c * heap access method code * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -56,6 +56,7 @@ #include "access/xlogutils.h" #include "catalog/catalog.h" #include "catalog/namespace.h" +#include "catalog/index.h" #include "miscadmin.h" #include "pgstat.h" #include "port/atomics.h" @@ -74,7 +75,9 @@ #include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/tqual.h" - +#include "utils/memutils.h" +#include "nodes/execnodes.h" +#include "executor/executor.h" /* GUC variable */ bool synchronize_seqscans = true; @@ -126,6 +129,7 @@ static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup); static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified, bool *copy); +static bool ProjIndexIsUnchanged(Relation relation, HeapTuple oldtup, HeapTuple newtup); /* @@ -1066,11 +1070,11 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, (*(isnull) = false), HeapTupleNoNulls(tup) ? ( - (tupleDesc)->attrs[(attnum) - 1]->attcacheoff >= 0 ? + TupleDescAttr((tupleDesc), (attnum) - 1)->attcacheoff >= 0 ? ( - fetchatt((tupleDesc)->attrs[(attnum) - 1], + fetchatt(TupleDescAttr((tupleDesc), (attnum) - 1), (char *) (tup)->t_data + (tup)->t_data->t_hoff + - (tupleDesc)->attrs[(attnum) - 1]->attcacheoff) + TupleDescAttr((tupleDesc), (attnum) - 1)->attcacheoff) ) : nocachegetattr((tup), (attnum), (tupleDesc)) @@ -1133,6 +1137,14 @@ relation_open(Oid relationId, LOCKMODE lockmode) if (!RelationIsValid(r)) elog(ERROR, "could not open relation with OID %u", relationId); + /* + * If we didn't get the lock ourselves, assert that caller holds one, + * except in bootstrap mode where no locks are used. + */ + Assert(lockmode != NoLock || + IsBootstrapProcessingMode() || + CheckRelationLockedByMe(r, AccessShareLock, true)); + /* Make note that we've accessed a temporary relation */ if (RelationUsesLocalBuffers(r)) MyXactFlags |= XACT_FLAGS_ACCESSEDTEMPREL; @@ -1179,6 +1191,10 @@ try_relation_open(Oid relationId, LOCKMODE lockmode) if (!RelationIsValid(r)) elog(ERROR, "could not open relation with OID %u", relationId); + /* If we didn't get the lock ourselves, assert that caller holds one */ + Assert(lockmode != NoLock || + CheckRelationLockedByMe(r, AccessShareLock, true)); + /* Make note that we've accessed a temporary relation */ if (RelationUsesLocalBuffers(r)) MyXactFlags |= XACT_FLAGS_ACCESSEDTEMPREL; @@ -1293,7 +1309,8 @@ heap_open(Oid relationId, LOCKMODE lockmode) r = relation_open(relationId, lockmode); - if (r->rd_rel->relkind == RELKIND_INDEX) + if (r->rd_rel->relkind == RELKIND_INDEX || + r->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is an index", @@ -1321,7 +1338,8 @@ heap_openrv(const RangeVar *relation, LOCKMODE lockmode) r = relation_openrv(relation, lockmode); - if (r->rd_rel->relkind == RELKIND_INDEX) + if (r->rd_rel->relkind == RELKIND_INDEX || + r->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is an index", @@ -1353,7 +1371,8 @@ heap_openrv_extended(const RangeVar *relation, LOCKMODE lockmode, if (r) { - if (r->rd_rel->relkind == RELKIND_INDEX) + if (r->rd_rel->relkind == RELKIND_INDEX || + r->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is an index", @@ -1379,7 +1398,7 @@ heap_openrv_extended(const RangeVar *relation, LOCKMODE lockmode, * heap_beginscan_strat offers an extended API that lets the caller control * whether a nondefault buffer access strategy can be used, and whether * syncscan can be chosen (possibly resulting in the scan not starting from - * block zero). Both of these default to TRUE with plain heap_beginscan. + * block zero). Both of these default to true with plain heap_beginscan. * * heap_beginscan_bm is an alternative entry point for setting up a * HeapScanDesc for a bitmap heap scan. Although that scan technology is @@ -1531,21 +1550,6 @@ heap_rescan(HeapScanDesc scan, * reinitialize scan descriptor */ initscan(scan, key, true); - - /* - * reset parallel scan, if present - */ - if (scan->rs_parallel != NULL) - { - ParallelHeapScanDesc parallel_scan; - - /* - * Caller is responsible for making sure that all workers have - * finished the scan before calling this. - */ - parallel_scan = scan->rs_parallel; - pg_atomic_write_u64(¶llel_scan->phs_nallocated, 0); - } } /* ---------------- @@ -1639,7 +1643,29 @@ heap_parallelscan_initialize(ParallelHeapScanDesc target, Relation relation, SpinLockInit(&target->phs_mutex); target->phs_startblock = InvalidBlockNumber; pg_atomic_init_u64(&target->phs_nallocated, 0); - SerializeSnapshot(snapshot, target->phs_snapshot_data); + if (IsMVCCSnapshot(snapshot)) + { + SerializeSnapshot(snapshot, target->phs_snapshot_data); + target->phs_snapshot_any = false; + } + else + { + Assert(snapshot == SnapshotAny); + target->phs_snapshot_any = true; + } +} + +/* ---------------- + * heap_parallelscan_reinitialize - reset a parallel scan + * + * Call this in the leader process. Caller is responsible for + * making sure that all workers have finished the scan beforehand. + * ---------------- + */ +void +heap_parallelscan_reinitialize(ParallelHeapScanDesc parallel_scan) +{ + pg_atomic_write_u64(¶llel_scan->phs_nallocated, 0); } /* ---------------- @@ -1654,11 +1680,22 @@ heap_beginscan_parallel(Relation relation, ParallelHeapScanDesc parallel_scan) Snapshot snapshot; Assert(RelationGetRelid(relation) == parallel_scan->phs_relid); - snapshot = RestoreSnapshot(parallel_scan->phs_snapshot_data); - RegisterSnapshot(snapshot); + + if (!parallel_scan->phs_snapshot_any) + { + /* Snapshot was serialized -- restore it */ + snapshot = RestoreSnapshot(parallel_scan->phs_snapshot_data); + RegisterSnapshot(snapshot); + } + else + { + /* SnapshotAny passed by caller (not serialized) */ + snapshot = SnapshotAny; + } return heap_beginscan_internal(relation, snapshot, 0, NULL, parallel_scan, - true, true, true, false, false, true); + true, true, true, false, false, + !parallel_scan->phs_snapshot_any); } /* ---------------- @@ -1844,16 +1881,16 @@ heap_getnext(HeapScanDesc scan, ScanDirection direction) * against the specified snapshot. * * If successful (tuple found and passes snapshot time qual), then *userbuf - * is set to the buffer holding the tuple and TRUE is returned. The caller + * is set to the buffer holding the tuple and true is returned. The caller * must unpin the buffer when done with the tuple. * * If the tuple is not found (ie, item number references a deleted slot), - * then tuple->t_data is set to NULL and FALSE is returned. + * then tuple->t_data is set to NULL and false is returned. * - * If the tuple is found but fails the time qual check, then FALSE is returned + * If the tuple is found but fails the time qual check, then false is returned * but tuple->t_data is left pointing to the tuple. * - * keep_buf determines what is done with the buffer in the FALSE-result cases. + * keep_buf determines what is done with the buffer in the false-result cases. * When the caller specifies keep_buf = true, we retain the pin on the buffer * and return it in *userbuf (so the caller must eventually unpin it); when * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer. @@ -1995,15 +2032,15 @@ heap_fetch(Relation relation, * of a HOT chain), and buffer is the buffer holding this tuple. We search * for the first chain member satisfying the given snapshot. If one is * found, we update *tid to reference that tuple's offset number, and - * return TRUE. If no match, return FALSE without modifying *tid. + * return true. If no match, return false without modifying *tid. * * heapTuple is a caller-supplied buffer. When a match is found, we return * the tuple here, in addition to updating *tid. If no match is found, the * contents of this buffer on return are undefined. * * If all_dead is not NULL, we check non-visible tuples to see if they are - * globally dead; *all_dead is set TRUE if all members of the HOT chain - * are vacuumable, FALSE if not. + * globally dead; *all_dead is set true if all members of the HOT chain + * are vacuumable, false if not. * * Unlike heap_fetch, the caller must already have pin and (at least) share * lock on the buffer; it is still pinned/locked at exit. Also unlike @@ -2120,6 +2157,9 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, * If we can't see it, maybe no one else can either. At caller * request, check whether all chain members are dead to all * transactions. + * + * Note: if you change the criterion here for what is "dead", fix the + * planner's get_actual_variable_range() function to match. */ if (all_dead && *all_dead && !HeapTupleIsSurelyDead(heapTuple, RecentGlobalXmin)) @@ -2280,6 +2320,7 @@ heap_get_latest_tid(Relation relation, */ if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) || HeapTupleHeaderIsOnlyLocked(tp.t_data) || + HeapTupleHeaderIndicatesMovedPartitions(tp.t_data) || ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid)) { UnlockReleaseBuffer(buffer); @@ -2387,17 +2428,22 @@ ReleaseBulkInsertStatePin(BulkInsertState bistate) * This causes rows to be frozen, which is an MVCC violation and * requires explicit options chosen by user. * - * HEAP_INSERT_IS_SPECULATIVE is used on so-called "speculative insertions", + * HEAP_INSERT_SPECULATIVE is used on so-called "speculative insertions", * which can be backed out afterwards without aborting the whole transaction. * Other sessions can wait for the speculative insertion to be confirmed, * turning it into a regular tuple, or aborted, as if it never existed. * Speculatively inserted tuples behave as "value locks" of short duration, * used to implement INSERT .. ON CONFLICT. * + * HEAP_INSERT_NO_LOGICAL force-disables the emitting of logical decoding + * information for the tuple. This should solely be used during table rewrites + * where RelationIsLogicallyLogged(relation) is not yet accurate for the new + * relation. + * * Note that most of these options will be applied when inserting into the * heap's TOAST table, too, if the tuple requires any out-of-line data. Only - * HEAP_INSERT_IS_SPECULATIVE is explicitly ignored, as the toast data does - * not partake in speculative insertion. + * HEAP_INSERT_SPECULATIVE is explicitly ignored, as the toast data does not + * partake in speculative insertion. * * The BulkInsertState object (if any; bistate can be NULL for default * behavior) is also just passed through to RelationGetBufferForTuple. @@ -2522,7 +2568,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, * page write, so make sure it's included even if we take a full-page * image. (XXX We could alternatively store a pointer into the FPW). */ - if (RelationIsLogicallyLogged(relation)) + if (RelationIsLogicallyLogged(relation) && + !(options & HEAP_INSERT_NO_LOGICAL)) { xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE; bufflags |= REGBUF_KEEP_DATA; @@ -2597,15 +2644,17 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options) { /* - * For now, parallel operations are required to be strictly read-only. - * Unlike heap_update() and heap_delete(), an insert should never create a - * combo CID, so it might be possible to relax this restriction, but not - * without more thought and testing. + * Parallel operations are required to be strictly read-only in a parallel + * worker. Parallel inserts are not safe even in the leader in the + * general case, because group locking means that heavyweight locks for + * relation extension or GIN page locks will not conflict between members + * of a lock group, but we don't prohibit that case here because there are + * useful special cases that we can safely allow, such as CREATE TABLE AS. */ - if (IsInParallelMode()) + if (IsParallelWorker()) ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("cannot insert tuples during a parallel operation"))); + errmsg("cannot insert tuples in a parallel worker"))); if (relation->rd_rel->relhasoids) { @@ -2678,13 +2727,16 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, HeapTuple *heaptuples; int i; int ndone; - char *scratch = NULL; + PGAlignedBlock scratch; Page page; bool needwal; Size saveFreeSpace; bool need_tuple_data = RelationIsLogicallyLogged(relation); bool need_cids = RelationIsAccessibleInLogicalDecoding(relation); + /* currently not needed (thus unsupported) for heap_multi_insert() */ + AssertArg(!(options & HEAP_INSERT_NO_LOGICAL)); + needwal = !(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation); saveFreeSpace = RelationGetTargetPageFreeSpace(relation, HEAP_DEFAULT_FILLFACTOR); @@ -2695,14 +2747,6 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, heaptuples[i] = heap_prepare_insert(relation, tuples[i], xid, cid, options); - /* - * Allocate some memory to use for constructing the WAL record. Using - * palloc() within a critical section is not safe, so we allocate this - * beforehand. - */ - if (needwal) - scratch = palloc(BLCKSZ); - /* * We're about to do the actual inserts -- but check for conflict first, * to minimize the possibility of having to roll back work we've just @@ -2795,7 +2839,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, uint8 info = XLOG_HEAP2_MULTI_INSERT; char *tupledata; int totaldatalen; - char *scratchptr = scratch; + char *scratchptr = scratch.data; bool init; int bufflags = 0; @@ -2854,7 +2898,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, scratchptr += datalen; } totaldatalen = scratchptr - tupledata; - Assert((scratchptr - scratch) < BLCKSZ); + Assert((scratchptr - scratch.data) < BLCKSZ); if (need_tuple_data) xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE; @@ -2881,7 +2925,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, bufflags |= REGBUF_KEEP_DATA; XLogBeginInsert(); - XLogRegisterData((char *) xlrec, tupledata - scratch); + XLogRegisterData((char *) xlrec, tupledata - scratch.data); XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags); XLogRegisterBufData(0, tupledata, totaldatalen); @@ -3011,6 +3055,8 @@ xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask) * crosscheck - if not InvalidSnapshot, also check tuple against this * wait - true if should wait for any conflicting update to commit/abort * hufd - output parameter, filled in failure cases (see below) + * changingPart - true iff the tuple is being moved to another partition + * table due to an update of the partition key. Otherwise, false. * * Normal, successful return value is HeapTupleMayBeUpdated, which * actually means we did delete it. Failure return codes are @@ -3026,7 +3072,7 @@ xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask) HTSU_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, - HeapUpdateFailureData *hufd) + HeapUpdateFailureData *hufd, bool changingPart) { HTSU_Result result; TransactionId xid = GetCurrentTransactionId(); @@ -3294,6 +3340,10 @@ heap_delete(Relation relation, ItemPointer tid, /* Make sure there is no forward chain link in t_ctid */ tp.t_data->t_ctid = tp.t_self; + /* Signal that this is actually a move into another partition */ + if (changingPart) + HeapTupleHeaderSetMovedPartitions(tp.t_data); + MarkBufferDirty(buffer); /* @@ -3311,7 +3361,11 @@ heap_delete(Relation relation, ItemPointer tid, if (RelationIsAccessibleInLogicalDecoding(relation)) log_heap_new_cid(relation, &tp); - xlrec.flags = all_visible_cleared ? XLH_DELETE_ALL_VISIBLE_CLEARED : 0; + xlrec.flags = 0; + if (all_visible_cleared) + xlrec.flags |= XLH_DELETE_ALL_VISIBLE_CLEARED; + if (changingPart) + xlrec.flags |= XLH_DELETE_IS_PARTITION_MOVE; xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask, tp.t_data->t_infomask2); xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self); @@ -3419,7 +3473,7 @@ simple_heap_delete(Relation relation, ItemPointer tid) result = heap_delete(relation, tid, GetCurrentCommandId(true), InvalidSnapshot, true /* wait for commit */ , - &hufd); + &hufd, false /* changingPart */ ); switch (result) { case HeapTupleSelfUpdated: @@ -3482,6 +3536,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, HTSU_Result result; TransactionId xid = GetCurrentTransactionId(); Bitmapset *hot_attrs; + Bitmapset *proj_idx_attrs; Bitmapset *key_attrs; Bitmapset *id_attrs; Bitmapset *interesting_attrs; @@ -3545,12 +3600,11 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, * Note that we get copies of each bitmap, so we need not worry about * relcache flush happening midway through. */ - hot_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_ALL); + hot_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_HOT); + proj_idx_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_PROJ); key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY); id_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_IDENTITY_KEY); - - block = ItemPointerGetBlockNumber(otid); buffer = ReadBuffer(relation, block); page = BufferGetPage(buffer); @@ -3570,6 +3624,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, if (!PageIsFull(page)) { interesting_attrs = bms_add_members(interesting_attrs, hot_attrs); + interesting_attrs = bms_add_members(interesting_attrs, proj_idx_attrs); hot_attrs_checked = true; } interesting_attrs = bms_add_members(interesting_attrs, key_attrs); @@ -3868,6 +3923,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, if (vmbuffer != InvalidBuffer) ReleaseBuffer(vmbuffer); bms_free(hot_attrs); + bms_free(proj_idx_attrs); bms_free(key_attrs); bms_free(id_attrs); bms_free(modified_attrs); @@ -4175,11 +4231,18 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, /* * Since the new tuple is going into the same page, we might be able * to do a HOT update. Check if any of the index columns have been - * changed. If the page was already full, we may have skipped checking - * for index columns. If so, HOT update is possible. + * changed, or if we have projection functional indexes, check whether + * the old and the new values are the same. If the page was already + * full, we may have skipped checking for index columns. If so, HOT + * update is possible. */ - if (hot_attrs_checked && !bms_overlap(modified_attrs, hot_attrs)) + if (hot_attrs_checked + && !bms_overlap(modified_attrs, hot_attrs) + && (!bms_overlap(modified_attrs, proj_idx_attrs) + || ProjIndexIsUnchanged(relation, &oldtup, newtup))) + { use_hot_update = true; + } } else { @@ -4341,6 +4404,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, heap_freetuple(old_key_tuple); bms_free(hot_attrs); + bms_free(proj_idx_attrs); bms_free(key_attrs); bms_free(id_attrs); bms_free(modified_attrs); @@ -4422,11 +4486,91 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum, else { Assert(attrnum <= tupdesc->natts); - att = tupdesc->attrs[attrnum - 1]; + att = TupleDescAttr(tupdesc, attrnum - 1); return datumIsEqual(value1, value2, att->attbyval, att->attlen); } } +/* + * Check whether the value is unchanged after update of a projection + * functional index. Compare the new and old values of the indexed + * expression to see if we are able to use a HOT update or not. + */ +static bool +ProjIndexIsUnchanged(Relation relation, HeapTuple oldtup, HeapTuple newtup) +{ + ListCell *l; + List *indexoidlist = RelationGetIndexList(relation); + EState *estate = CreateExecutorState(); + ExprContext *econtext = GetPerTupleExprContext(estate); + TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(relation)); + bool equals = true; + Datum old_values[INDEX_MAX_KEYS]; + bool old_isnull[INDEX_MAX_KEYS]; + Datum new_values[INDEX_MAX_KEYS]; + bool new_isnull[INDEX_MAX_KEYS]; + int indexno = 0; + + econtext->ecxt_scantuple = slot; + + foreach(l, indexoidlist) + { + if (bms_is_member(indexno, relation->rd_projidx)) + { + Oid indexOid = lfirst_oid(l); + Relation indexDesc = index_open(indexOid, AccessShareLock); + IndexInfo *indexInfo = BuildIndexInfo(indexDesc); + int i; + + ResetExprContext(econtext); + ExecStoreHeapTuple(oldtup, slot, false); + FormIndexDatum(indexInfo, + slot, + estate, + old_values, + old_isnull); + + ExecStoreHeapTuple(newtup, slot, false); + FormIndexDatum(indexInfo, + slot, + estate, + new_values, + new_isnull); + + for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++) + { + if (old_isnull[i] != new_isnull[i]) + { + equals = false; + break; + } + else if (!old_isnull[i]) + { + Form_pg_attribute att = TupleDescAttr(RelationGetDescr(indexDesc), i); + + if (!datumIsEqual(old_values[i], new_values[i], att->attbyval, att->attlen)) + { + equals = false; + break; + } + } + } + index_close(indexDesc, AccessShareLock); + + if (!equals) + { + break; + } + } + indexno += 1; + } + ExecDropSingleTupleTableSlot(slot); + FreeExecutorState(estate); + + return equals; +} + + /* * Check which columns are being updated. * @@ -5651,6 +5795,7 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, new_xmax; TransactionId priorXmax = InvalidTransactionId; bool cleared_all_frozen = false; + bool pinned_desired_page; Buffer vmbuffer = InvalidBuffer; BlockNumber block; @@ -5672,7 +5817,8 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, * chain, and there's no further tuple to lock: return success to * caller. */ - return HeapTupleMayBeUpdated; + result = HeapTupleMayBeUpdated; + goto out_unlocked; } l4: @@ -5685,9 +5831,12 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, * to recheck after we have the lock. */ if (PageIsAllVisible(BufferGetPage(buf))) + { visibilitymap_pin(rel, block, &vmbuffer); + pinned_desired_page = true; + } else - vmbuffer = InvalidBuffer; + pinned_desired_page = false; LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); @@ -5696,8 +5845,13 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, * all visible while we were busy locking the buffer, we'll have to * unlock and re-lock, to avoid holding the buffer lock across I/O. * That's a bit unfortunate, but hopefully shouldn't happen often. + * + * Note: in some paths through this function, we will reach here + * holding a pin on a vm page that may or may not be the one matching + * this page. If this page isn't all-visible, we won't use the vm + * page, but we hold onto such a pin till the end of the function. */ - if (vmbuffer == InvalidBuffer && PageIsAllVisible(BufferGetPage(buf))) + if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf))) { LockBuffer(buf, BUFFER_LOCK_UNLOCK); visibilitymap_pin(rel, block, &vmbuffer); @@ -5723,8 +5877,8 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, */ if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data))) { - UnlockReleaseBuffer(buf); - return HeapTupleMayBeUpdated; + result = HeapTupleMayBeUpdated; + goto out_locked; } old_infomask = mytup.t_data->t_infomask; @@ -5920,6 +6074,7 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, next: /* if we find the end of update chain, we're done. */ if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID || + HeapTupleHeaderIndicatesMovedPartitions(mytup.t_data) || ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) || HeapTupleHeaderIsOnlyLocked(mytup.t_data)) { @@ -5931,8 +6086,6 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data); ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid); UnlockReleaseBuffer(buf); - if (vmbuffer != InvalidBuffer) - ReleaseBuffer(vmbuffer); } result = HeapTupleMayBeUpdated; @@ -5940,11 +6093,11 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, out_locked: UnlockReleaseBuffer(buf); +out_unlocked: if (vmbuffer != InvalidBuffer) ReleaseBuffer(vmbuffer); return result; - } /* @@ -5973,7 +6126,12 @@ static HTSU_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode) { - if (!ItemPointerEquals(&tuple->t_self, ctid)) + /* + * If the tuple has not been updated, or has moved into another partition + * (effectively a delete) stop here. + */ + if (!HeapTupleHeaderIndicatesMovedPartitions(tuple->t_data) && + !ItemPointerEquals(&tuple->t_self, ctid)) { /* * If this is the first possibly-multixact-able operation in the @@ -6354,6 +6512,7 @@ heap_inplace_update(Relation relation, HeapTuple tuple) */ static TransactionId FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, + TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags) { @@ -6380,16 +6539,26 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, *flags |= FRM_INVALIDATE_XMAX; return InvalidTransactionId; } + else if (MultiXactIdPrecedes(multi, relminmxid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found multixact %u from before relminmxid %u", + multi, relminmxid))); else if (MultiXactIdPrecedes(multi, cutoff_multi)) { /* - * This old multi cannot possibly have members still running. If it - * was a locker only, it can be removed without any further - * consideration; but if it contained an update, we might need to - * preserve it. + * This old multi cannot possibly have members still running, but + * verify just in case. If it was a locker only, it can be removed + * without any further consideration; but if it contained an update, + * we might need to preserve it. */ - Assert(!MultiXactIdIsRunning(multi, - HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))); + if (MultiXactIdIsRunning(multi, + HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("multixact %u from before cutoff %u found to be still running", + multi, cutoff_multi))); + if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)) { *flags |= FRM_INVALIDATE_XMAX; @@ -6403,13 +6572,22 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, /* wasn't only a lock, xid needs to be valid */ Assert(TransactionIdIsValid(xid)); + if (TransactionIdPrecedes(xid, relfrozenxid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found update xid %u from before relfrozenxid %u", + xid, relfrozenxid))); + /* * If the xid is older than the cutoff, it has to have aborted, * otherwise the tuple would have gotten pruned away. */ if (TransactionIdPrecedes(xid, cutoff_xid)) { - Assert(!TransactionIdDidCommit(xid)); + if (TransactionIdDidCommit(xid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("cannot freeze committed update xid %u", xid))); *flags |= FRM_INVALIDATE_XMAX; xid = InvalidTransactionId; /* not strictly necessary */ } @@ -6481,6 +6659,13 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, { TransactionId xid = members[i].xid; + Assert(TransactionIdIsValid(xid)); + if (TransactionIdPrecedes(xid, relfrozenxid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found update xid %u from before relfrozenxid %u", + xid, relfrozenxid))); + /* * It's an update; should we keep it? If the transaction is known * aborted or crashed then it's okay to ignore it, otherwise not. @@ -6509,18 +6694,26 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, update_committed = true; update_xid = xid; } - - /* - * Not in progress, not committed -- must be aborted or crashed; - * we can ignore it. - */ + else + { + /* + * Not in progress, not committed -- must be aborted or + * crashed; we can ignore it. + */ + } /* * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the - * update Xid cannot possibly be older than the xid cutoff. + * update Xid cannot possibly be older than the xid cutoff. The + * presence of such a tuple would cause corruption, so be paranoid + * and check. */ - Assert(!TransactionIdIsValid(update_xid) || - !TransactionIdPrecedes(update_xid, cutoff_xid)); + if (TransactionIdIsValid(update_xid) && + TransactionIdPrecedes(update_xid, cutoff_xid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found update xid %u from before xid cutoff %u", + update_xid, cutoff_xid))); /* * If we determined that it's an Xid corresponding to an update @@ -6591,7 +6784,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac) * are older than the specified cutoff XID and cutoff MultiXactId. If so, * setup enough state (in the *frz output argument) to later execute and - * WAL-log what we would need to do, and return TRUE. Return FALSE if nothing + * WAL-log what we would need to do, and return true. Return false if nothing * is to be changed. In addition, set *totally_frozen_p to true if the tuple * will be totally frozen after these operations are performed and false if * more freezing will eventually be required. @@ -6617,14 +6810,16 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * recovery. We really need to remove old xids. */ bool -heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, - TransactionId cutoff_multi, +heap_prepare_freeze_tuple(HeapTupleHeader tuple, + TransactionId relfrozenxid, TransactionId relminmxid, + TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p) { bool changed = false; - bool freeze_xmax = false; + bool xmax_already_frozen = false; + bool xmin_frozen; + bool freeze_xmax; TransactionId xid; - bool totally_frozen = true; frz->frzflags = 0; frz->t_infomask2 = tuple->t_infomask2; @@ -6633,15 +6828,28 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, /* Process xmin */ xid = HeapTupleHeaderGetXmin(tuple); + xmin_frozen = ((xid == FrozenTransactionId) || + HeapTupleHeaderXminFrozen(tuple)); if (TransactionIdIsNormal(xid)) { + if (TransactionIdPrecedes(xid, relfrozenxid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found xmin %u from before relfrozenxid %u", + xid, relfrozenxid))); + if (TransactionIdPrecedes(xid, cutoff_xid)) { + if (!TransactionIdDidCommit(xid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("uncommitted xmin %u from before xid cutoff %u needs to be frozen", + xid, cutoff_xid))); + frz->t_infomask |= HEAP_XMIN_FROZEN; changed = true; + xmin_frozen = true; } - else - totally_frozen = false; } /* @@ -6661,11 +6869,12 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, uint16 flags; newxmax = FreezeMultiXactId(xid, tuple->t_infomask, + relfrozenxid, relminmxid, cutoff_xid, cutoff_multi, &flags); - if (flags & FRM_INVALIDATE_XMAX) - freeze_xmax = true; - else if (flags & FRM_RETURN_IS_XID) + freeze_xmax = (flags & FRM_INVALIDATE_XMAX); + + if (flags & FRM_RETURN_IS_XID) { /* * NB -- some of these transformations are only valid because we @@ -6679,7 +6888,6 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, if (flags & FRM_MARK_COMMITTED) frz->t_infomask |= HEAP_XMAX_COMMITTED; changed = true; - totally_frozen = false; } else if (flags & FRM_RETURN_IS_MULTI) { @@ -6701,23 +6909,51 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, frz->xmax = newxmax; changed = true; - totally_frozen = false; - } - else - { - Assert(flags & FRM_NOOP); } } else if (TransactionIdIsNormal(xid)) { + if (TransactionIdPrecedes(xid, relfrozenxid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found xmax %u from before relfrozenxid %u", + xid, relfrozenxid))); + if (TransactionIdPrecedes(xid, cutoff_xid)) + { + /* + * If we freeze xmax, make absolutely sure that it's not an XID + * that is important. (Note, a lock-only xmax can be removed + * independent of committedness, since a committed lock holder has + * released the lock). + */ + if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) && + TransactionIdDidCommit(xid)) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("cannot freeze committed xmax %u", + xid))); freeze_xmax = true; + } else - totally_frozen = false; + freeze_xmax = false; + } + else if ((tuple->t_infomask & HEAP_XMAX_INVALID) || + !TransactionIdIsValid(HeapTupleHeaderGetRawXmax(tuple))) + { + freeze_xmax = false; + xmax_already_frozen = true; } + else + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("found xmax %u (infomask 0x%04x) not frozen, not multi, not normal", + xid, tuple->t_infomask))); if (freeze_xmax) { + Assert(!xmax_already_frozen); + frz->xmax = InvalidTransactionId; /* @@ -6770,7 +7006,8 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, } } - *totally_frozen_p = totally_frozen; + *totally_frozen_p = (xmin_frozen && + (freeze_xmax || xmax_already_frozen)); return changed; } @@ -6816,14 +7053,17 @@ heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz) * Useful for callers like CLUSTER that perform their own WAL logging. */ bool -heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, - TransactionId cutoff_multi) +heap_freeze_tuple(HeapTupleHeader tuple, + TransactionId relfrozenxid, TransactionId relminmxid, + TransactionId cutoff_xid, TransactionId cutoff_multi) { xl_heap_freeze_tuple frz; bool do_freeze; bool tuple_totally_frozen; - do_freeze = heap_prepare_freeze_tuple(tuple, cutoff_xid, cutoff_multi, + do_freeze = heap_prepare_freeze_tuple(tuple, + relfrozenxid, relminmxid, + cutoff_xid, cutoff_multi, &frz, &tuple_totally_frozen); /* @@ -7239,7 +7479,7 @@ heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple) * heap_tuple_needs_freeze * * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac) - * are older than the specified cutoff XID or MultiXactId. If so, return TRUE. + * are older than the specified cutoff XID or MultiXactId. If so, return true. * * It doesn't matter whether the tuple is alive or dead, we are checking * to see if a tuple needs to be removed or frozen to avoid wraparound. @@ -7822,7 +8062,6 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool * TupleDesc desc = RelationGetDescr(relation); Oid replidindex; Relation idx_rel; - TupleDesc idx_desc; char replident = relation->rd_rel->relreplident; HeapTuple key_tuple = NULL; bool nulls[MaxHeapAttributeNumber]; @@ -7865,7 +8104,8 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool * } idx_rel = RelationIdGetRelation(replidindex); - idx_desc = RelationGetDescr(idx_rel); + + Assert(CheckRelationLockedByMe(idx_rel, AccessShareLock, true)); /* deform tuple, so we have fast access to columns */ heap_deform_tuple(tp, desc, values, nulls); @@ -7877,7 +8117,7 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool * * Now set all columns contained in the index to NOT NULL, they cannot * currently be NULL. */ - for (natt = 0; natt < idx_desc->natts; natt++) + for (natt = 0; natt < IndexRelationGetNumberOfKeyAttributes(idx_rel); natt++) { int attno = idx_rel->rd_index->indkey.values[natt]; @@ -7947,7 +8187,7 @@ heap_xlog_cleanup_info(XLogReaderState *record) } /* - * Handles HEAP2_CLEAN record type + * Handles XLOG_HEAP2_CLEAN record type */ static void heap_xlog_clean(XLogReaderState *record) @@ -7955,7 +8195,6 @@ heap_xlog_clean(XLogReaderState *record) XLogRecPtr lsn = record->EndRecPtr; xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record); Buffer buffer; - Size freespace = 0; RelFileNode rnode; BlockNumber blkno; XLogRedoAction action; @@ -8007,8 +8246,6 @@ heap_xlog_clean(XLogReaderState *record) nowdead, ndead, nowunused, nunused); - freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */ - /* * Note: we don't worry about updating the page's prunability hints. * At worst this will cause an extra prune cycle to occur soon. @@ -8017,18 +8254,24 @@ heap_xlog_clean(XLogReaderState *record) PageSetLSN(page, lsn); MarkBufferDirty(buffer); } + if (BufferIsValid(buffer)) + { + Size freespace = PageGetHeapFreeSpace(BufferGetPage(buffer)); + UnlockReleaseBuffer(buffer); - /* - * Update the FSM as well. - * - * XXX: Don't do this if the page was restored from full page image. We - * don't bother to update the FSM in that case, it doesn't need to be - * totally accurate anyway. - */ - if (action == BLK_NEEDS_REDO) + /* + * After cleaning records from a page, it's useful to update the FSM + * about it, as it may cause the page become target for insertions + * later even if vacuum decides not to visit it (which is possible if + * gets marked all-visible.) + * + * Do this regardless of a full-page image being applied, since the + * FSM data is not in the page anyway. + */ XLogRecordPageWithFreeSpace(rnode, blkno, freespace); + } } /* @@ -8101,9 +8344,34 @@ heap_xlog_visible(XLogReaderState *record) * wal_log_hints enabled.) */ } + if (BufferIsValid(buffer)) + { + Size space = PageGetFreeSpace(BufferGetPage(buffer)); + UnlockReleaseBuffer(buffer); + /* + * Since FSM is not WAL-logged and only updated heuristically, it + * easily becomes stale in standbys. If the standby is later promoted + * and runs VACUUM, it will skip updating individual free space + * figures for pages that became all-visible (or all-frozen, depending + * on the vacuum mode,) which is troublesome when FreeSpaceMapVacuum + * propagates too optimistic free space values to upper FSM layers; + * later inserters try to use such pages only to find out that they + * are unusable. This can cause long stalls when there are many such + * pages. + * + * Forestall those problems by updating FSM's idea about a page that + * is becoming all-visible or all-frozen. + * + * Do this regardless of a full-page image being applied, since the + * FSM data is not in the page anyway. + */ + if (xlrec->flags & VISIBILITYMAP_VALID_BITS) + XLogRecordPageWithFreeSpace(rnode, blkno, space); + } + /* * Even if we skipped the heap page update due to the LSN interlock, it's * still safe to update the visibility map. Any WAL record that clears @@ -8294,8 +8562,11 @@ heap_xlog_delete(XLogReaderState *record) if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED) PageClearAllVisible(page); - /* Make sure there is no forward chain link in t_ctid */ - htup->t_ctid = target_tid; + /* Make sure t_ctid is set correctly */ + if (xlrec->flags & XLH_DELETE_IS_PARTITION_MOVE) + HeapTupleHeaderSetMovedPartitions(htup); + else + htup->t_ctid = target_tid; PageSetLSN(page, lsn); MarkBufferDirty(buffer); } @@ -9059,6 +9330,14 @@ heap_redo(XLogReaderState *record) case XLOG_HEAP_UPDATE: heap_xlog_update(record, false); break; + case XLOG_HEAP_TRUNCATE: + + /* + * TRUNCATE is a no-op because the actions are already logged as + * SMGR WAL records. TRUNCATE WAL record only exists for logical + * decoding. + */ + break; case XLOG_HEAP_HOT_UPDATE: heap_xlog_update(record, true); break; @@ -9165,7 +9444,7 @@ heap_mask(char *pagedata, BlockNumber blkno) Page page = (Page) pagedata; OffsetNumber off; - mask_page_lsn(page); + mask_page_lsn_and_checksum(page); mask_page_hint_bits(page); mask_unused_space(page); @@ -9216,6 +9495,13 @@ heap_mask(char *pagedata, BlockNumber blkno) */ if (HeapTupleHeaderIsSpeculative(page_htup)) ItemPointerSet(&page_htup->t_ctid, blkno, off); + + /* + * NB: Not ignoring ctid changes due to the tuple having moved + * (i.e. HeapTupleHeaderIndicatesMovedPartitions), because that's + * important information that needs to be in-sync between primary + * and standby, and thus is WAL logged. + */ } /* diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index 13e3bdca50..b8b5871559 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -3,7 +3,7 @@ * hio.c * POSTGRES heap access method input/output code. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -67,9 +67,9 @@ RelationPutHeapTuple(Relation relation, if (!token) { ItemId itemId = PageGetItemId(pageHeader, offnum); - Item item = PageGetItem(pageHeader, itemId); + HeapTupleHeader item = (HeapTupleHeader) PageGetItem(pageHeader, itemId); - ((HeapTupleHeader) item)->t_ctid = tuple->t_self; + item->t_ctid = tuple->t_self; } } @@ -177,13 +177,10 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2, static void RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) { - Page page; - BlockNumber blockNum = InvalidBlockNumber, + BlockNumber blockNum, firstBlock = InvalidBlockNumber; - int extraBlocks = 0; - int lockWaiters = 0; - Size freespace = 0; - Buffer buffer; + int extraBlocks; + int lockWaiters; /* Use the length of the lock wait queue to judge how much to extend. */ lockWaiters = RelationExtensionLockWaiterCount(relation); @@ -198,18 +195,40 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) */ extraBlocks = Min(512, lockWaiters * 20); - while (extraBlocks-- >= 0) + do { - /* Ouch - an unnecessary lseek() each time through the loop! */ + Buffer buffer; + Page page; + Size freespace; + + /* + * Extend by one page. This should generally match the main-line + * extension code in RelationGetBufferForTuple, except that we hold + * the relation extension lock throughout. + */ buffer = ReadBufferBI(relation, P_NEW, bistate); - /* Extend by one page. */ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); page = BufferGetPage(buffer); + + if (!PageIsNew(page)) + elog(ERROR, "page %u of relation \"%s\" should be empty but is not", + BufferGetBlockNumber(buffer), + RelationGetRelationName(relation)); + PageInit(page, BufferGetPageSize(buffer), 0); + + /* + * We mark all the new buffers dirty, but do nothing to write them + * out; they'll probably get used soon, and even if they are not, a + * crash will leave an okay all-zeroes page on disk. + */ MarkBufferDirty(buffer); + + /* we'll need this info below */ blockNum = BufferGetBlockNumber(buffer); freespace = PageGetHeapFreeSpace(page); + UnlockReleaseBuffer(buffer); /* Remember first block number thus added. */ @@ -223,18 +242,15 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) */ RecordPageWithFreeSpace(relation, blockNum, freespace); } + while (--extraBlocks > 0); /* * Updating the upper levels of the free space map is too expensive to do * for every block, but it's worth doing once at the end to make sure that * subsequent insertion activity sees all of those nifty free pages we * just inserted. - * - * Note that we're using the freespace value that was reported for the - * last block we added as if it were the freespace value for every block - * we added. That's actually true, because they're all equally empty. */ - UpdateFreeSpaceMap(relation, firstBlock, blockNum, freespace); + FreeSpaceMapVacuumRange(relation, firstBlock, blockNum + 1); } /* diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 52231ac417..c2f5343dac 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -3,7 +3,7 @@ * pruneheap.c * heap page pruning and HOT-chain management code * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -39,7 +39,7 @@ typedef struct OffsetNumber redirected[MaxHeapTuplesPerPage * 2]; OffsetNumber nowdead[MaxHeapTuplesPerPage]; OffsetNumber nowunused[MaxHeapTuplesPerPage]; - /* marked[i] is TRUE if item i is entered in one of the above arrays */ + /* marked[i] is true if item i is entered in one of the above arrays */ bool marked[MaxHeapTuplesPerPage + 1]; } PruneState; @@ -170,7 +170,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer) * or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum). * * If report_stats is true then we send the number of reclaimed heap-only - * tuples to pgstats. (This must be FALSE during vacuum, since vacuum will + * tuples to pgstats. (This must be false during vacuum, since vacuum will * send its own new total to pgstats, and we don't want this delta applied * on top of that.) * @@ -552,6 +552,9 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum, if (!HeapTupleHeaderIsHotUpdated(htup)) break; + /* HOT implies it can't have moved to different partition */ + Assert(!HeapTupleHeaderIndicatesMovedPartitions(htup)); + /* * Advance to next chain member. */ @@ -823,6 +826,9 @@ heap_get_root_tuples(Page page, OffsetNumber *root_offsets) if (!HeapTupleHeaderIsHotUpdated(htup)) break; + /* HOT implies it can't have moved to different partition */ + Assert(!HeapTupleHeaderIndicatesMovedPartitions(htup)); + nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid); priorXmax = HeapTupleHeaderGetUpdateXid(htup); } diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index bd560e47e1..c5db75afa1 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -92,7 +92,7 @@ * heap's TOAST table will go through the normal bufmgr. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994-5, Regents of the University of California * * IDENTIFICATION @@ -407,7 +407,10 @@ rewrite_heap_tuple(RewriteState state, * While we have our hands on the tuple, we may as well freeze any * eligible xmin or xmax, so that future VACUUM effort can be saved. */ - heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid, + heap_freeze_tuple(new_tuple->t_data, + state->rs_old_rel->rd_rel->relfrozenxid, + state->rs_old_rel->rd_rel->relminmxid, + state->rs_freeze_xid, state->rs_cutoff_multi); /* @@ -421,6 +424,7 @@ rewrite_heap_tuple(RewriteState state, */ if (!((old_tuple->t_data->t_infomask & HEAP_XMAX_INVALID) || HeapTupleHeaderIsOnlyLocked(old_tuple->t_data)) && + !HeapTupleHeaderIndicatesMovedPartitions(old_tuple->t_data) && !(ItemPointerEquals(&(old_tuple->t_self), &(old_tuple->t_data->t_ctid)))) { @@ -648,10 +652,23 @@ raw_heap_insert(RewriteState state, HeapTuple tup) heaptup = tup; } else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD) + { + int options = HEAP_INSERT_SKIP_FSM; + + if (!state->rs_use_wal) + options |= HEAP_INSERT_SKIP_WAL; + + /* + * The new relfilenode's relcache entrye doesn't have the necessary + * information to determine whether a relation should emit data for + * logical decoding. Force it to off if necessary. + */ + if (!RelationIsLogicallyLogged(state->rs_old_rel)) + options |= HEAP_INSERT_NO_LOGICAL; + heaptup = toast_insert_or_update(state->rs_new_rel, tup, NULL, - HEAP_INSERT_SKIP_FSM | - (state->rs_use_wal ? - 0 : HEAP_INSERT_SKIP_WAL)); + options); + } else heaptup = tup; @@ -918,7 +935,7 @@ logical_heap_rewrite_flush_mappings(RewriteState state) * Note that we deviate from the usual WAL coding practices here, * check the above "Logical rewrite support" comment for reasoning. */ - written = FileWrite(src->vfd, waldata_start, len, + written = FileWrite(src->vfd, waldata_start, len, src->off, WAIT_EVENT_LOGICAL_REWRITE_WRITE); if (written != len) ereport(ERROR, @@ -1013,8 +1030,7 @@ logical_rewrite_log_mapping(RewriteState state, TransactionId xid, src->off = 0; memcpy(src->path, path, sizeof(path)); src->vfd = PathNameOpenFile(path, - O_CREAT | O_EXCL | O_WRONLY | PG_BINARY, - S_IRUSR | S_IWUSR); + O_CREAT | O_EXCL | O_WRONLY | PG_BINARY); if (src->vfd < 0) ereport(ERROR, (errcode_for_file_access(), @@ -1133,8 +1149,7 @@ heap_xlog_logical_rewrite(XLogReaderState *r) xlrec->mapped_xid, XLogRecGetXid(r)); fd = OpenTransientFile(path, - O_CREAT | O_WRONLY | PG_BINARY, - S_IRUSR | S_IWUSR); + O_CREAT | O_WRONLY | PG_BINARY); if (fd < 0) ereport(ERROR, (errcode_for_file_access(), @@ -1164,11 +1179,17 @@ heap_xlog_logical_rewrite(XLogReaderState *r) len = xlrec->num_mappings * sizeof(LogicalRewriteMappingData); /* write out tail end of mapping file (again) */ + errno = 0; pgstat_report_wait_start(WAIT_EVENT_LOGICAL_REWRITE_MAPPING_WRITE); if (write(fd, data, len) != len) + { + /* if write didn't set errno, assume problem is no disk space */ + if (errno == 0) + errno = ENOSPC; ereport(ERROR, (errcode_for_file_access(), errmsg("could not write to file \"%s\": %m", path))); + } pgstat_report_wait_end(); /* @@ -1258,7 +1279,7 @@ CheckPointLogicalRewriteHeap(void) } else { - int fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0); + int fd = OpenTransientFile(path, O_RDONLY | PG_BINARY); /* * The file cannot vanish due to concurrency since this function diff --git a/src/backend/access/heap/syncscan.c b/src/backend/access/heap/syncscan.c index 20640cbbaf..054eb066e9 100644 --- a/src/backend/access/heap/syncscan.c +++ b/src/backend/access/heap/syncscan.c @@ -36,7 +36,7 @@ * ss_report_location - update current scan location * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c index 458180bc95..cd42c50b09 100644 --- a/src/backend/access/heap/tuptoaster.c +++ b/src/backend/access/heap/tuptoaster.c @@ -4,7 +4,7 @@ * Support routines for external and compressed storage of * variable size attributes. * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -464,7 +464,6 @@ void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative) { TupleDesc tupleDesc; - Form_pg_attribute *att; int numAttrs; int i; Datum toast_values[MaxHeapAttributeNumber]; @@ -489,7 +488,6 @@ toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative) * least one varlena column, by the way.) */ tupleDesc = rel->rd_att; - att = tupleDesc->attrs; numAttrs = tupleDesc->natts; Assert(numAttrs <= MaxHeapAttributeNumber); @@ -501,7 +499,7 @@ toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative) */ for (i = 0; i < numAttrs; i++) { - if (att[i]->attlen == -1) + if (TupleDescAttr(tupleDesc, i)->attlen == -1) { Datum value = toast_values[i]; @@ -538,7 +536,6 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, { HeapTuple result_tuple; TupleDesc tupleDesc; - Form_pg_attribute *att; int numAttrs; int i; @@ -579,7 +576,6 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, * Get the tuple descriptor and break down the tuple(s) into fields. */ tupleDesc = rel->rd_att; - att = tupleDesc->attrs; numAttrs = tupleDesc->natts; Assert(numAttrs <= MaxHeapAttributeNumber); @@ -606,6 +602,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, for (i = 0; i < numAttrs; i++) { + Form_pg_attribute att = TupleDescAttr(tupleDesc, i); struct varlena *old_value; struct varlena *new_value; @@ -621,7 +618,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, * If the old value is stored on disk, check if it has changed so * we have to delete it later. */ - if (att[i]->attlen == -1 && !toast_oldisnull[i] && + if (att->attlen == -1 && !toast_oldisnull[i] && VARATT_IS_EXTERNAL_ONDISK(old_value)) { if (toast_isnull[i] || !VARATT_IS_EXTERNAL_ONDISK(new_value) || @@ -668,12 +665,12 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, /* * Now look at varlena attributes */ - if (att[i]->attlen == -1) + if (att->attlen == -1) { /* * If the table's attribute says PLAIN always, force it so. */ - if (att[i]->attstorage == 'p') + if (att->attstorage == 'p') toast_action[i] = 'p'; /* @@ -687,7 +684,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, if (VARATT_IS_EXTERNAL(new_value)) { toast_oldexternal[i] = new_value; - if (att[i]->attstorage == 'p') + if (att->attstorage == 'p') new_value = heap_tuple_untoast_attr(new_value); else new_value = heap_tuple_fetch_attr(new_value); @@ -730,7 +727,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, hoff += sizeof(Oid); hoff = MAXALIGN(hoff); /* now convert to a limit on the tuple data size */ - maxDataLen = TOAST_TUPLE_TARGET - hoff; + maxDataLen = RelationGetToastTupleTarget(rel, TOAST_TUPLE_TARGET) - hoff; /* * Look for attributes with attstorage 'x' to compress. Also find large @@ -749,13 +746,15 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, */ for (i = 0; i < numAttrs; i++) { + Form_pg_attribute att = TupleDescAttr(tupleDesc, i); + if (toast_action[i] != ' ') continue; if (VARATT_IS_EXTERNAL(DatumGetPointer(toast_values[i]))) continue; /* can't happen, toast_action would be 'p' */ if (VARATT_IS_COMPRESSED(DatumGetPointer(toast_values[i]))) continue; - if (att[i]->attstorage != 'x' && att[i]->attstorage != 'e') + if (att->attstorage != 'x' && att->attstorage != 'e') continue; if (toast_sizes[i] > biggest_size) { @@ -771,7 +770,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, * Attempt to compress it inline, if it has attstorage 'x' */ i = biggest_attno; - if (att[i]->attstorage == 'x') + if (TupleDescAttr(tupleDesc, i)->attstorage == 'x') { old_value = toast_values[i]; new_value = toast_compress_datum(old_value); @@ -841,11 +840,13 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, */ for (i = 0; i < numAttrs; i++) { + Form_pg_attribute att = TupleDescAttr(tupleDesc, i); + if (toast_action[i] == 'p') continue; if (VARATT_IS_EXTERNAL(DatumGetPointer(toast_values[i]))) continue; /* can't happen, toast_action would be 'p' */ - if (att[i]->attstorage != 'x' && att[i]->attstorage != 'e') + if (att->attstorage != 'x' && att->attstorage != 'e') continue; if (toast_sizes[i] > biggest_size) { @@ -896,7 +897,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, continue; /* can't happen, toast_action would be 'p' */ if (VARATT_IS_COMPRESSED(DatumGetPointer(toast_values[i]))) continue; - if (att[i]->attstorage != 'm') + if (TupleDescAttr(tupleDesc, i)->attstorage != 'm') continue; if (toast_sizes[i] > biggest_size) { @@ -959,7 +960,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, continue; if (VARATT_IS_EXTERNAL(DatumGetPointer(toast_values[i]))) continue; /* can't happen, toast_action would be 'p' */ - if (att[i]->attstorage != 'm') + if (TupleDescAttr(tupleDesc, i)->attstorage != 'm') continue; if (toast_sizes[i] > biggest_size) { @@ -1084,7 +1085,6 @@ HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc) { HeapTuple new_tuple; - Form_pg_attribute *att = tupleDesc->attrs; int numAttrs = tupleDesc->natts; int i; Datum toast_values[MaxTupleAttributeNumber]; @@ -1104,7 +1104,7 @@ toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc) /* * Look at non-null varlena attributes */ - if (!toast_isnull[i] && att[i]->attlen == -1) + if (!toast_isnull[i] && TupleDescAttr(tupleDesc, i)->attlen == -1) { struct varlena *new_value; @@ -1193,7 +1193,6 @@ toast_flatten_tuple_to_datum(HeapTupleHeader tup, int32 new_data_len; int32 new_tuple_len; HeapTupleData tmptup; - Form_pg_attribute *att = tupleDesc->attrs; int numAttrs = tupleDesc->natts; int i; bool has_nulls = false; @@ -1222,7 +1221,7 @@ toast_flatten_tuple_to_datum(HeapTupleHeader tup, */ if (toast_isnull[i]) has_nulls = true; - else if (att[i]->attlen == -1) + else if (TupleDescAttr(tupleDesc, i)->attlen == -1) { struct varlena *new_value; @@ -1307,7 +1306,6 @@ toast_build_flattened_tuple(TupleDesc tupleDesc, bool *isnull) { HeapTuple new_tuple; - Form_pg_attribute *att = tupleDesc->attrs; int numAttrs = tupleDesc->natts; int num_to_free; int i; @@ -1327,7 +1325,7 @@ toast_build_flattened_tuple(TupleDesc tupleDesc, /* * Look at non-null varlena attributes */ - if (!isnull[i] && att[i]->attlen == -1) + if (!isnull[i] && TupleDescAttr(tupleDesc, i)->attlen == -1) { struct varlena *new_value; @@ -1796,7 +1794,9 @@ toast_delete_datum(Relation rel, Datum value, bool is_speculative) /* ---------- * toastrel_valueid_exists - * - * Test whether a toast value with the given ID exists in the toast relation + * Test whether a toast value with the given ID exists in the toast relation. + * For safety, we consider a value to exist if there are either live or dead + * toast rows with that ID; see notes for GetNewOid(). * ---------- */ static bool @@ -1808,7 +1808,6 @@ toastrel_valueid_exists(Relation toastrel, Oid valueid) int num_indexes; int validIndex; Relation *toastidxs; - SnapshotData SnapshotToast; /* Fetch a valid index relation */ validIndex = toast_open_indexes(toastrel, @@ -1827,10 +1826,9 @@ toastrel_valueid_exists(Relation toastrel, Oid valueid) /* * Is there any such chunk? */ - init_toast_snapshot(&SnapshotToast); toastscan = systable_beginscan(toastrel, RelationGetRelid(toastidxs[validIndex]), - true, &SnapshotToast, 1, &toastkey); + true, SnapshotAny, 1, &toastkey); if (systable_getnext(toastscan) != NULL) result = true; diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index 4c2a13aeba..695567b4b0 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -3,7 +3,7 @@ * visibilitymap.c * bitmap for tracking visibility of heap tuples * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -610,11 +610,30 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend) * Use ZERO_ON_ERROR mode, and initialize the page if necessary. It's * always safe to clear bits, so it's better to clear corrupt pages than * error out. + * + * The initialize-the-page part is trickier than it looks, because of the + * possibility of multiple backends doing this concurrently, and our + * desire to not uselessly take the buffer lock in the normal path where + * the page is OK. We must take the lock to initialize the page, so + * recheck page newness after we have the lock, in case someone else + * already did it. Also, because we initially check PageIsNew with no + * lock, it's possible to fall through and return the buffer while someone + * else is still initializing the page (i.e., we might see pd_upper as set + * but other page header fields are still zeroes). This is harmless for + * callers that will take a buffer lock themselves, but some callers + * inspect the page without any lock at all. The latter is OK only so + * long as it doesn't depend on the page header having correct contents. + * Current usage is safe because PageGetContents() does not require that. */ buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno, RBM_ZERO_ON_ERROR, NULL); if (PageIsNew(BufferGetPage(buf))) - PageInit(BufferGetPage(buf), BLCKSZ, 0); + { + LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); + if (PageIsNew(BufferGetPage(buf))) + PageInit(BufferGetPage(buf), BLCKSZ, 0); + LockBuffer(buf, BUFFER_LOCK_UNLOCK); + } return buf; } @@ -626,10 +645,9 @@ static void vm_extend(Relation rel, BlockNumber vm_nblocks) { BlockNumber vm_nblocks_now; - Page pg; + PGAlignedBlock pg; - pg = (Page) palloc(BLCKSZ); - PageInit(pg, BLCKSZ, 0); + PageInit((Page) pg.data, BLCKSZ, 0); /* * We use the relation extension lock to lock out other backends trying to @@ -660,10 +678,10 @@ vm_extend(Relation rel, BlockNumber vm_nblocks) /* Now extend the file */ while (vm_nblocks_now < vm_nblocks) { - PageSetChecksumInplace(pg, vm_nblocks_now); + PageSetChecksumInplace((Page) pg.data, vm_nblocks_now); smgrextend(rel->rd_smgr, VISIBILITYMAP_FORKNUM, vm_nblocks_now, - (char *) pg, false); + pg.data, false); vm_nblocks_now++; } @@ -680,6 +698,4 @@ vm_extend(Relation rel, BlockNumber vm_nblocks) rel->rd_smgr->smgr_vm_nblocks = vm_nblocks_now; UnlockRelationForExtension(rel, ExclusiveLock); - - pfree(pg); } diff --git a/src/backend/access/index/amapi.c b/src/backend/access/index/amapi.c index 7b597a072f..f395cb1ab4 100644 --- a/src/backend/access/index/amapi.c +++ b/src/backend/access/index/amapi.c @@ -3,7 +3,7 @@ * amapi.c * Support routines for API for Postgres index access methods. * - * Copyright (c) 2015-2017, PostgreSQL Global Development Group + * Copyright (c) 2015-2018, PostgreSQL Global Development Group * * * IDENTIFICATION diff --git a/src/backend/access/index/amvalidate.c b/src/backend/access/index/amvalidate.c index 80865e9ff9..24f9927f82 100644 --- a/src/backend/access/index/amvalidate.c +++ b/src/backend/access/index/amvalidate.c @@ -3,7 +3,7 @@ * amvalidate.c * Support routines for index access methods' amvalidate functions. * - * Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Copyright (c) 2016-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -140,9 +140,9 @@ identify_opfamily_groups(CatCList *oprlist, CatCList *proclist) /* * Validate the signature (argument and result types) of an opclass support - * function. Return TRUE if OK, FALSE if not. + * function. Return true if OK, false if not. * - * The "..." represents maxargs argument-type OIDs. If "exact" is TRUE, they + * The "..." represents maxargs argument-type OIDs. If "exact" is true, they * must match the function arg types exactly, else only binary-coercibly. * In any case the function result type must match restype exactly. */ @@ -184,7 +184,7 @@ check_amproc_signature(Oid funcid, Oid restype, bool exact, /* * Validate the signature (argument and result types) of an opclass operator. - * Return TRUE if OK, FALSE if not. + * Return true if OK, false if not. * * Currently, we can hard-wire this as accepting only binary operators. Also, * we can insist on exact type matches, since the given lefttype/righttype diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index 05d7da001a..9d08775687 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -3,7 +3,7 @@ * genam.c * general index access method routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -158,7 +158,8 @@ IndexScanEnd(IndexScanDesc scan) * * Construct a string describing the contents of an index entry, in the * form "(key_name, ...)=(key_value, ...)". This is currently used - * for building unique-constraint and exclusion-constraint error messages. + * for building unique-constraint and exclusion-constraint error messages, + * so only key columns of the index are checked and printed. * * Note that if the user does not have permissions to view all of the * columns involved then a NULL is returned. Returning a partial key seems @@ -179,14 +180,15 @@ BuildIndexValueDescription(Relation indexRelation, { StringInfoData buf; Form_pg_index idxrec; - HeapTuple ht_idx; - int natts = indexRelation->rd_rel->relnatts; + int indnkeyatts; int i; int keyno; Oid indexrelid = RelationGetRelid(indexRelation); Oid indrelid; AclResult aclresult; + indnkeyatts = IndexRelationGetNumberOfKeyAttributes(indexRelation); + /* * Check permissions- if the user does not have access to view all of the * key columns then return NULL to avoid leaking data. @@ -197,24 +199,13 @@ BuildIndexValueDescription(Relation indexRelation, * Next we need to check table-level SELECT access and then, if there is * no access there, check column-level permissions. */ - - /* - * Fetch the pg_index tuple by the Oid of the index - */ - ht_idx = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexrelid)); - if (!HeapTupleIsValid(ht_idx)) - elog(ERROR, "cache lookup failed for index %u", indexrelid); - idxrec = (Form_pg_index) GETSTRUCT(ht_idx); - + idxrec = indexRelation->rd_index; indrelid = idxrec->indrelid; Assert(indexrelid == idxrec->indexrelid); /* RLS check- if RLS is enabled then we don't return anything. */ if (check_enable_rls(indrelid, InvalidOid, true) == RLS_ENABLED) - { - ReleaseSysCache(ht_idx); return NULL; - } /* Table-level SELECT is enough, if the user has it */ aclresult = pg_class_aclcheck(indrelid, GetUserId(), ACL_SELECT); @@ -224,7 +215,7 @@ BuildIndexValueDescription(Relation indexRelation, * No table-level access, so step through the columns in the index and * make sure the user has SELECT rights on all of them. */ - for (keyno = 0; keyno < idxrec->indnatts; keyno++) + for (keyno = 0; keyno < indnkeyatts; keyno++) { AttrNumber attnum = idxrec->indkey.values[keyno]; @@ -239,18 +230,16 @@ BuildIndexValueDescription(Relation indexRelation, ACL_SELECT) != ACLCHECK_OK) { /* No access, so clean up and return */ - ReleaseSysCache(ht_idx); return NULL; } } } - ReleaseSysCache(ht_idx); initStringInfo(&buf); appendStringInfo(&buf, "(%s)=(", pg_get_indexdef_columns(indexrelid, true)); - for (i = 0; i < natts; i++) + for (i = 0; i < indnkeyatts; i++) { char *val; @@ -368,7 +357,7 @@ systable_beginscan(Relation heapRelation, { int j; - for (j = 0; j < irel->rd_index->indnatts; j++) + for (j = 0; j < IndexRelationGetNumberOfAttributes(irel); j++) { if (key[i].sk_attno == irel->rd_index->indkey.values[j]) { @@ -376,7 +365,7 @@ systable_beginscan(Relation heapRelation, break; } } - if (j == irel->rd_index->indnatts) + if (j == IndexRelationGetNumberOfAttributes(irel)) elog(ERROR, "column is not in index"); } @@ -570,7 +559,7 @@ systable_beginscan_ordered(Relation heapRelation, { int j; - for (j = 0; j < indexRelation->rd_index->indnatts; j++) + for (j = 0; j < IndexRelationGetNumberOfAttributes(indexRelation); j++) { if (key[i].sk_attno == indexRelation->rd_index->indkey.values[j]) { @@ -578,7 +567,7 @@ systable_beginscan_ordered(Relation heapRelation, break; } } - if (j == indexRelation->rd_index->indnatts) + if (j == IndexRelationGetNumberOfAttributes(indexRelation)) elog(ERROR, "column is not in index"); } diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index bef4255369..eade540ef5 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -3,7 +3,7 @@ * indexam.c * general index access method routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -73,8 +73,8 @@ #include "access/relscan.h" #include "access/transam.h" #include "access/xlog.h" -#include "catalog/catalog.h" #include "catalog/index.h" +#include "catalog/pg_type.h" #include "pgstat.h" #include "storage/bufmgr.h" #include "storage/lmgr.h" @@ -154,7 +154,8 @@ index_open(Oid relationId, LOCKMODE lockmode) r = relation_open(relationId, lockmode); - if (r->rd_rel->relkind != RELKIND_INDEX) + if (r->rd_rel->relkind != RELKIND_INDEX && + r->rd_rel->relkind != RELKIND_PARTITIONED_INDEX) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not an index", @@ -784,7 +785,7 @@ index_can_return(Relation indexRelation, int attno) { RELATION_CHECKS; - /* amcanreturn is optional; assume FALSE if not provided by AM */ + /* amcanreturn is optional; assume false if not provided by AM */ if (indexRelation->rd_amroutine->amcanreturn == NULL) return false; @@ -897,3 +898,72 @@ index_getprocinfo(Relation irel, return locinfo; } + +/* ---------------- + * index_store_float8_orderby_distances + * + * Convert AM distance function's results (that can be inexact) + * to ORDER BY types and save them into xs_orderbyvals/xs_orderbynulls + * for a possible recheck. + * ---------------- + */ +void +index_store_float8_orderby_distances(IndexScanDesc scan, Oid *orderByTypes, + double *distances, bool recheckOrderBy) +{ + int i; + + scan->xs_recheckorderby = recheckOrderBy; + + if (!distances) + { + Assert(!scan->xs_recheckorderby); + + for (i = 0; i < scan->numberOfOrderBys; i++) + { + scan->xs_orderbyvals[i] = (Datum) 0; + scan->xs_orderbynulls[i] = true; + } + + return; + } + + for (i = 0; i < scan->numberOfOrderBys; i++) + { + if (orderByTypes[i] == FLOAT8OID) + { +#ifndef USE_FLOAT8_BYVAL + /* must free any old value to avoid memory leakage */ + if (!scan->xs_orderbynulls[i]) + pfree(DatumGetPointer(scan->xs_orderbyvals[i])); +#endif + scan->xs_orderbyvals[i] = Float8GetDatum(distances[i]); + scan->xs_orderbynulls[i] = false; + } + else if (orderByTypes[i] == FLOAT4OID) + { + /* convert distance function's result to ORDER BY type */ +#ifndef USE_FLOAT4_BYVAL + /* must free any old value to avoid memory leakage */ + if (!scan->xs_orderbynulls[i]) + pfree(DatumGetPointer(scan->xs_orderbyvals[i])); +#endif + scan->xs_orderbyvals[i] = Float4GetDatum((float4) distances[i]); + scan->xs_orderbynulls[i] = false; + } + else + { + /* + * If the ordering operator's return value is anything else, we + * don't know how to convert the float8 bound calculated by the + * distance function to that. The executor won't actually need + * the order by values we return here, if there are no lossy + * results, so only insist on converting if the *recheck flag is + * set. + */ + if (scan->xs_recheckorderby) + elog(ERROR, "ORDER BY operator must return float8 or float4 if the distance function is lossy"); + scan->xs_orderbynulls[i] = true; + } + } +} diff --git a/src/backend/access/nbtree/README b/src/backend/access/nbtree/README index a3f11da8d5..3680e69b89 100644 --- a/src/backend/access/nbtree/README +++ b/src/backend/access/nbtree/README @@ -375,6 +375,25 @@ positives, so long as it never gives a false negative. This makes it possible to implement the test with a small counter value stored on each index page. +Fastpath For Index Insertion +---------------------------- + +We optimize for a common case of insertion of increasing index key +values by caching the last page to which this backend inserted the last +value, if this page was the rightmost leaf page. For the next insert, we +can then quickly check if the cached page is still the rightmost leaf +page and also the correct place to hold the current value. We can avoid +the cost of walking down the tree in such common cases. + +The optimization works on the assumption that there can only be one +non-ignorable leaf rightmost page, and so even a RecentGlobalXmin style +interlock isn't required. We cannot fail to detect that our hint was +invalidated, because there can only be one such page in the B-Tree at +any time. It's possible that the page will be deleted and recycled +without a backend's cached page also being detected as invalidated, but +only when we happen to recycle a block that once again gets recycled as the +rightmost leaf page. + On-the-Fly Deletion Of Index Tuples ----------------------------------- @@ -590,6 +609,23 @@ original search scankey is consulted as each index entry is sequentially scanned to decide whether to return the entry and whether the scan can stop (see _bt_checkkeys()). +We use term "pivot" index tuples to distinguish tuples which don't point +to heap tuples, but rather used for tree navigation. Pivot tuples includes +all tuples on non-leaf pages and high keys on leaf pages. Note that pivot +index tuples are only used to represent which part of the key space belongs +on each page, and can have attribute values copied from non-pivot tuples +that were deleted and killed by VACUUM some time ago. In principle, we could +truncate away attributes that are not needed for a page high key during a leaf +page split, provided that the remaining attributes distinguish the last index +tuple on the post-split left page as belonging on the left page, and the first +index tuple on the post-split right page as belonging on the right page. This +optimization is sometimes called suffix truncation, and may appear in a future +release. Since the high key is subsequently reused as the downlink in the +parent page for the new right page, suffix truncation can increase index +fan-out considerably by keeping pivot tuples short. INCLUDE indexes similarly +truncate away non-key attributes at the time of a leaf page split, +increasing fan-out. + Notes About Data Representation ------------------------------- @@ -623,56 +659,3 @@ routines must treat it accordingly. The actual key stored in the item is irrelevant, and need not be stored at all. This arrangement corresponds to the fact that an L&Y non-leaf page has one more pointer than key. - -Notes to Operator Class Implementors ------------------------------------- - -With this implementation, we require each supported combination of -datatypes to supply us with a comparison procedure via pg_amproc. -This procedure must take two nonnull values A and B and return an int32 < 0, -0, or > 0 if A < B, A = B, or A > B, respectively. The procedure must -not return INT_MIN for "A < B", since the value may be negated before -being tested for sign. A null result is disallowed, too. See nbtcompare.c -for examples. - -There are some basic assumptions that a btree operator family must satisfy: - -An = operator must be an equivalence relation; that is, for all non-null -values A,B,C of the datatype: - - A = A is true reflexive law - if A = B, then B = A symmetric law - if A = B and B = C, then A = C transitive law - -A < operator must be a strong ordering relation; that is, for all non-null -values A,B,C: - - A < A is false irreflexive law - if A < B and B < C, then A < C transitive law - -Furthermore, the ordering is total; that is, for all non-null values A,B: - - exactly one of A < B, A = B, and B < A is true trichotomy law - -(The trichotomy law justifies the definition of the comparison support -procedure, of course.) - -The other three operators are defined in terms of these two in the obvious way, -and must act consistently with them. - -For an operator family supporting multiple datatypes, the above laws must hold -when A,B,C are taken from any datatypes in the family. The transitive laws -are the trickiest to ensure, as in cross-type situations they represent -statements that the behaviors of two or three different operators are -consistent. As an example, it would not work to put float8 and numeric into -an opfamily, at least not with the current semantics that numerics are -converted to float8 for comparison to a float8. Because of the limited -accuracy of float8, this means there are distinct numeric values that will -compare equal to the same float8 value, and thus the transitive law fails. - -It should be fairly clear why a btree index requires these laws to hold within -a single datatype: without them there is no ordering to arrange the keys with. -Also, index searches using a key of a different datatype require comparisons -to behave sanely across two datatypes. The extensions to three or more -datatypes within a family are not strictly required by the btree index -mechanism itself, but the planner relies on them for optimization purposes. diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c index 4b131efb87..6f2ad23b5d 100644 --- a/src/backend/access/nbtree/nbtcompare.c +++ b/src/backend/access/nbtree/nbtcompare.c @@ -3,7 +3,7 @@ * nbtcompare.c * Comparison functions for btree access method. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -22,11 +22,10 @@ * * The result is always an int32 regardless of the input datatype. * - * Although any negative int32 (except INT_MIN) is acceptable for reporting - * "<", and any positive int32 is acceptable for reporting ">", routines + * Although any negative int32 is acceptable for reporting "<", + * and any positive int32 is acceptable for reporting ">", routines * that work on 32-bit or wider datatypes can't just return "a - b". - * That could overflow and give the wrong answer. Also, one must not - * return INT_MIN to report "<", since some callers will negate the result. + * That could overflow and give the wrong answer. * * NOTE: it is critical that the comparison function impose a total order * on all non-NULL values of the data type, and that the datatype's @@ -44,13 +43,31 @@ * during an index access won't be recovered till end of query. This * primarily affects comparison routines for toastable datatypes; * they have to be careful to free any detoasted copy of an input datum. + * + * NOTE: we used to forbid comparison functions from returning INT_MIN, + * but that proves to be too error-prone because some platforms' versions + * of memcmp() etc can return INT_MIN. As a means of stress-testing + * callers, this file can be compiled with STRESS_SORT_INT_MIN defined + * to cause many of these functions to return INT_MIN or INT_MAX instead of + * their customary -1/+1. For production, though, that's not a good idea + * since users or third-party code might expect the traditional results. *------------------------------------------------------------------------- */ #include "postgres.h" +#include + #include "utils/builtins.h" #include "utils/sortsupport.h" +#ifdef STRESS_SORT_INT_MIN +#define A_LESS_THAN_B INT_MIN +#define A_GREATER_THAN_B INT_MAX +#else +#define A_LESS_THAN_B (-1) +#define A_GREATER_THAN_B 1 +#endif + Datum btboolcmp(PG_FUNCTION_ARGS) @@ -95,11 +112,11 @@ btint4cmp(PG_FUNCTION_ARGS) int32 b = PG_GETARG_INT32(1); if (a > b) - PG_RETURN_INT32(1); + PG_RETURN_INT32(A_GREATER_THAN_B); else if (a == b) PG_RETURN_INT32(0); else - PG_RETURN_INT32(-1); + PG_RETURN_INT32(A_LESS_THAN_B); } static int @@ -109,11 +126,11 @@ btint4fastcmp(Datum x, Datum y, SortSupport ssup) int32 b = DatumGetInt32(y); if (a > b) - return 1; + return A_GREATER_THAN_B; else if (a == b) return 0; else - return -1; + return A_LESS_THAN_B; } Datum @@ -132,11 +149,11 @@ btint8cmp(PG_FUNCTION_ARGS) int64 b = PG_GETARG_INT64(1); if (a > b) - PG_RETURN_INT32(1); + PG_RETURN_INT32(A_GREATER_THAN_B); else if (a == b) PG_RETURN_INT32(0); else - PG_RETURN_INT32(-1); + PG_RETURN_INT32(A_LESS_THAN_B); } static int @@ -146,11 +163,11 @@ btint8fastcmp(Datum x, Datum y, SortSupport ssup) int64 b = DatumGetInt64(y); if (a > b) - return 1; + return A_GREATER_THAN_B; else if (a == b) return 0; else - return -1; + return A_LESS_THAN_B; } Datum @@ -169,11 +186,11 @@ btint48cmp(PG_FUNCTION_ARGS) int64 b = PG_GETARG_INT64(1); if (a > b) - PG_RETURN_INT32(1); + PG_RETURN_INT32(A_GREATER_THAN_B); else if (a == b) PG_RETURN_INT32(0); else - PG_RETURN_INT32(-1); + PG_RETURN_INT32(A_LESS_THAN_B); } Datum @@ -183,11 +200,11 @@ btint84cmp(PG_FUNCTION_ARGS) int32 b = PG_GETARG_INT32(1); if (a > b) - PG_RETURN_INT32(1); + PG_RETURN_INT32(A_GREATER_THAN_B); else if (a == b) PG_RETURN_INT32(0); else - PG_RETURN_INT32(-1); + PG_RETURN_INT32(A_LESS_THAN_B); } Datum @@ -197,11 +214,11 @@ btint24cmp(PG_FUNCTION_ARGS) int32 b = PG_GETARG_INT32(1); if (a > b) - PG_RETURN_INT32(1); + PG_RETURN_INT32(A_GREATER_THAN_B); else if (a == b) PG_RETURN_INT32(0); else - PG_RETURN_INT32(-1); + PG_RETURN_INT32(A_LESS_THAN_B); } Datum @@ -211,11 +228,11 @@ btint42cmp(PG_FUNCTION_ARGS) int16 b = PG_GETARG_INT16(1); if (a > b) - PG_RETURN_INT32(1); + PG_RETURN_INT32(A_GREATER_THAN_B); else if (a == b) PG_RETURN_INT32(0); else - PG_RETURN_INT32(-1); + PG_RETURN_INT32(A_LESS_THAN_B); } Datum @@ -225,11 +242,11 @@ btint28cmp(PG_FUNCTION_ARGS) int64 b = PG_GETARG_INT64(1); if (a > b) - PG_RETURN_INT32(1); + PG_RETURN_INT32(A_GREATER_THAN_B); else if (a == b) PG_RETURN_INT32(0); else - PG_RETURN_INT32(-1); + PG_RETURN_INT32(A_LESS_THAN_B); } Datum @@ -239,11 +256,11 @@ btint82cmp(PG_FUNCTION_ARGS) int16 b = PG_GETARG_INT16(1); if (a > b) - PG_RETURN_INT32(1); + PG_RETURN_INT32(A_GREATER_THAN_B); else if (a == b) PG_RETURN_INT32(0); else - PG_RETURN_INT32(-1); + PG_RETURN_INT32(A_LESS_THAN_B); } Datum @@ -253,11 +270,11 @@ btoidcmp(PG_FUNCTION_ARGS) Oid b = PG_GETARG_OID(1); if (a > b) - PG_RETURN_INT32(1); + PG_RETURN_INT32(A_GREATER_THAN_B); else if (a == b) PG_RETURN_INT32(0); else - PG_RETURN_INT32(-1); + PG_RETURN_INT32(A_LESS_THAN_B); } static int @@ -267,11 +284,11 @@ btoidfastcmp(Datum x, Datum y, SortSupport ssup) Oid b = DatumGetObjectId(y); if (a > b) - return 1; + return A_GREATER_THAN_B; else if (a == b) return 0; else - return -1; + return A_LESS_THAN_B; } Datum @@ -299,9 +316,9 @@ btoidvectorcmp(PG_FUNCTION_ARGS) if (a->values[i] != b->values[i]) { if (a->values[i] > b->values[i]) - PG_RETURN_INT32(1); + PG_RETURN_INT32(A_GREATER_THAN_B); else - PG_RETURN_INT32(-1); + PG_RETURN_INT32(A_LESS_THAN_B); } } PG_RETURN_INT32(0); diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index bf963fcdef..582e5b0652 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -3,7 +3,7 @@ * nbtinsert.c * Item insertion in Lehman and Yao btrees for Postgres. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -23,8 +23,11 @@ #include "miscadmin.h" #include "storage/lmgr.h" #include "storage/predicate.h" +#include "storage/smgr.h" #include "utils/tqual.h" +/* Minimum tree height for application of fastpath optimization */ +#define BTREE_FASTPATH_MIN_LEVEL 2 typedef struct { @@ -85,7 +88,6 @@ static bool _bt_isequal(TupleDesc itupdesc, Page page, OffsetNumber offnum, int keysz, ScanKey scankey); static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel); - /* * _bt_doinsert() -- Handle insertion of a single index tuple in the tree. * @@ -99,8 +101,8 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel); * don't actually insert. * * The result value is only significant for UNIQUE_CHECK_PARTIAL: - * it must be TRUE if the entry is known unique, else FALSE. - * (In the current implementation we'll also return TRUE after a + * it must be true if the entry is known unique, else false. + * (In the current implementation we'll also return true after a * successful UNIQUE_CHECK_YES or UNIQUE_CHECK_EXISTING call, but * that's just a coding artifact.) */ @@ -109,34 +111,118 @@ _bt_doinsert(Relation rel, IndexTuple itup, IndexUniqueCheck checkUnique, Relation heapRel) { bool is_unique = false; - int natts = rel->rd_rel->relnatts; + int indnkeyatts; ScanKey itup_scankey; - BTStack stack; + BTStack stack = NULL; Buffer buf; OffsetNumber offset; + bool fastpath; + + indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); + Assert(indnkeyatts != 0); /* we need an insertion scan key to do our search, so build one */ itup_scankey = _bt_mkscankey(rel, itup); + /* + * It's very common to have an index on an auto-incremented or + * monotonically increasing value. In such cases, every insertion happens + * towards the end of the index. We try to optimize that case by caching + * the right-most leaf of the index. If our cached block is still the + * rightmost leaf, has enough free space to accommodate a new entry and + * the insertion key is strictly greater than the first key in this page, + * then we can safely conclude that the new key will be inserted in the + * cached block. So we simply search within the cached block and insert + * the key at the appropriate location. We call it a fastpath. + * + * Testing has revealed, though, that the fastpath can result in increased + * contention on the exclusive-lock on the rightmost leaf page. So we + * conditionally check if the lock is available. If it's not available + * then we simply abandon the fastpath and take the regular path. This + * makes sense because unavailability of the lock also signals that some + * other backend might be concurrently inserting into the page, thus + * reducing our chances to finding an insertion place in this page. + */ top: - /* find the first page containing this key */ - stack = _bt_search(rel, natts, itup_scankey, false, &buf, BT_WRITE, NULL); - + fastpath = false; offset = InvalidOffsetNumber; + if (RelationGetTargetBlock(rel) != InvalidBlockNumber) + { + Size itemsz; + Page page; + BTPageOpaque lpageop; - /* trade in our read lock for a write lock */ - LockBuffer(buf, BUFFER_LOCK_UNLOCK); - LockBuffer(buf, BT_WRITE); + /* + * Conditionally acquire exclusive lock on the buffer before doing any + * checks. If we don't get the lock, we simply follow slowpath. If we + * do get the lock, this ensures that the index state cannot change, + * as far as the rightmost part of the index is concerned. + */ + buf = ReadBuffer(rel, RelationGetTargetBlock(rel)); - /* - * If the page was split between the time that we surrendered our read - * lock and acquired our write lock, then this page may no longer be the - * right place for the key we want to insert. In this case, we need to - * move right in the tree. See Lehman and Yao for an excruciatingly - * precise description. - */ - buf = _bt_moveright(rel, buf, natts, itup_scankey, false, - true, stack, BT_WRITE, NULL); + if (ConditionalLockBuffer(buf)) + { + _bt_checkpage(rel, buf); + + page = BufferGetPage(buf); + + lpageop = (BTPageOpaque) PageGetSpecialPointer(page); + itemsz = IndexTupleSize(itup); + itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this + * but we need to be consistent */ + + /* + * Check if the page is still the rightmost leaf page, has enough + * free space to accommodate the new tuple, and the insertion scan + * key is strictly greater than the first key on the page. + */ + if (P_ISLEAF(lpageop) && P_RIGHTMOST(lpageop) && + !P_IGNORE(lpageop) && + (PageGetFreeSpace(page) > itemsz) && + PageGetMaxOffsetNumber(page) >= P_FIRSTDATAKEY(lpageop) && + _bt_compare(rel, indnkeyatts, itup_scankey, page, + P_FIRSTDATAKEY(lpageop)) > 0) + { + /* + * The right-most block should never have an incomplete split. + * But be paranoid and check for it anyway. + */ + Assert(!P_INCOMPLETE_SPLIT(lpageop)); + fastpath = true; + } + else + { + _bt_relbuf(rel, buf); + + /* + * Something did not work out. Just forget about the cached + * block and follow the normal path. It might be set again if + * the conditions are favourable. + */ + RelationSetTargetBlock(rel, InvalidBlockNumber); + } + } + else + { + ReleaseBuffer(buf); + + /* + * If someone's holding a lock, it's likely to change anyway, so + * don't try again until we get an updated rightmost leaf. + */ + RelationSetTargetBlock(rel, InvalidBlockNumber); + } + } + + if (!fastpath) + { + /* + * Find the first page containing this key. Buffer returned by + * _bt_search() is locked in exclusive mode. + */ + stack = _bt_search(rel, indnkeyatts, itup_scankey, false, &buf, BT_WRITE, + NULL); + } /* * If we're not allowing duplicates, make sure the key isn't already in @@ -150,7 +236,7 @@ _bt_doinsert(Relation rel, IndexTuple itup, * inserter can be making the check at one time. Furthermore, once we are * past the check we hold write locks continuously until we have performed * our insertion, so no later inserter can fail to see our insertion. - * (This requires some care in _bt_insertonpg.) + * (This requires some care in _bt_findinsertloc.) * * If we must wait for another xact, we release the lock while waiting, * and then must start over completely. @@ -164,7 +250,7 @@ _bt_doinsert(Relation rel, IndexTuple itup, TransactionId xwait; uint32 speculativeToken; - offset = _bt_binsrch(rel, buf, natts, itup_scankey, false); + offset = _bt_binsrch(rel, buf, indnkeyatts, itup_scankey, false); xwait = _bt_check_unique(rel, itup, heapRel, buf, offset, itup_scankey, checkUnique, &is_unique, &speculativeToken); @@ -184,7 +270,8 @@ _bt_doinsert(Relation rel, IndexTuple itup, XactLockTableWait(xwait, rel, &itup->t_tid, XLTW_InsertIndex); /* start over... */ - _bt_freestack(stack); + if (stack) + _bt_freestack(stack); goto top; } } @@ -197,10 +284,12 @@ _bt_doinsert(Relation rel, IndexTuple itup, * actual location of the insert is hard to predict because of the * random search used to prevent O(N^2) performance when there are * many duplicate entries, we can just use the "first valid" page. + * This reasoning also applies to INCLUDE indexes, whose extra + * attributes are not considered part of the key space. */ CheckForSerializableConflictIn(rel, NULL, buf); /* do the insertion */ - _bt_findinsertloc(rel, &buf, &offset, natts, itup_scankey, itup, + _bt_findinsertloc(rel, &buf, &offset, indnkeyatts, itup_scankey, itup, stack, heapRel); _bt_insertonpg(rel, buf, InvalidBuffer, stack, itup, offset, false); } @@ -211,7 +300,8 @@ _bt_doinsert(Relation rel, IndexTuple itup, } /* be tidy */ - _bt_freestack(stack); + if (stack) + _bt_freestack(stack); _bt_freeskey(itup_scankey); return is_unique; @@ -243,7 +333,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, uint32 *speculativeToken) { TupleDesc itupdesc = RelationGetDescr(rel); - int natts = rel->rd_rel->relnatts; + int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); SnapshotData SnapshotDirty; OffsetNumber maxoff; Page page; @@ -302,7 +392,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, * in real comparison, but only for ordering/finding items on * pages. - vadim 03/24/97 */ - if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey)) + if (!_bt_isequal(itupdesc, page, offset, indnkeyatts, itup_scankey)) break; /* we're past all the equal tuples */ /* okay, we gotta fetch the heap tuple ... */ @@ -467,7 +557,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, if (P_RIGHTMOST(opaque)) break; if (!_bt_isequal(itupdesc, page, P_HIKEY, - natts, itup_scankey)) + indnkeyatts, itup_scankey)) break; /* Advance to next non-dead page --- there must be one */ for (;;) @@ -558,7 +648,7 @@ _bt_findinsertloc(Relation rel, lpageop = (BTPageOpaque) PageGetSpecialPointer(page); - itemsz = IndexTupleDSize(*newtup); + itemsz = IndexTupleSize(newtup); itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we * need to be consistent */ @@ -716,18 +806,18 @@ _bt_findinsertloc(Relation rel, * insertion, and the buffer must be pinned and write-locked. On return, * we will have dropped both the pin and the lock on the buffer. * - * When inserting to a non-leaf page, 'cbuf' is the left-sibling of the - * page we're inserting the downlink for. This function will clear the + * This routine only performs retail tuple insertions. 'itup' should + * always be either a non-highkey leaf item, or a downlink (new high + * key items are created indirectly, when a page is split). When + * inserting to a non-leaf page, 'cbuf' is the left-sibling of the page + * we're inserting the downlink for. This function will clear the * INCOMPLETE_SPLIT flag on it, and release the buffer. * * The locking interactions in this code are critical. You should * grok Lehman and Yao's paper before making any changes. In addition, * you need to understand how we disambiguate duplicate keys in this * implementation, in order to be able to find our location using - * L&Y "move right" operations. Since we may insert duplicate user - * keys, and since these dups may propagate up the tree, we use the - * 'afteritem' parameter to position ourselves correctly for the - * insertion on internal pages. + * L&Y "move right" operations. *---------- */ static void @@ -749,13 +839,20 @@ _bt_insertonpg(Relation rel, /* child buffer must be given iff inserting on an internal page */ Assert(P_ISLEAF(lpageop) == !BufferIsValid(cbuf)); + /* tuple must have appropriate number of attributes */ + Assert(!P_ISLEAF(lpageop) || + BTreeTupleGetNAtts(itup, rel) == + IndexRelationGetNumberOfAttributes(rel)); + Assert(P_ISLEAF(lpageop) || + BTreeTupleGetNAtts(itup, rel) == + IndexRelationGetNumberOfKeyAttributes(rel)); /* The caller should've finished any incomplete splits already. */ if (P_INCOMPLETE_SPLIT(lpageop)) elog(ERROR, "cannot insert to incompletely split page %u", BufferGetBlockNumber(buf)); - itemsz = IndexTupleDSize(*itup); + itemsz = IndexTupleSize(itup); itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we * need to be consistent */ @@ -773,6 +870,24 @@ _bt_insertonpg(Relation rel, bool newitemonleft; Buffer rbuf; + /* + * If we're here then a pagesplit is needed. We should never reach + * here if we're using the fastpath since we should have checked for + * all the required conditions, including the fact that this page has + * enough freespace. Note that this routine can in theory deal with + * the situation where a NULL stack pointer is passed (that's what + * would happen if the fastpath is taken), like it does during crash + * recovery. But that path is much slower, defeating the very purpose + * of the optimization. The following assertion should protect us + * from any future code changes that invalidate those assumptions. + * + * Note that whenever we fail to take the fastpath, we clear the + * cached block. Checking for a valid cached block at this point is + * enough to decide whether we're in a fastpath or not. + */ + Assert(!(P_ISLEAF(lpageop) && + BlockNumberIsValid(RelationGetTargetBlock(rel)))); + /* Choose the split point */ firstright = _bt_findsplitloc(rel, page, newitemoff, itemsz, @@ -810,6 +925,7 @@ _bt_insertonpg(Relation rel, BTMetaPageData *metad = NULL; OffsetNumber itup_off; BlockNumber itup_blkno; + BlockNumber cachedBlock = InvalidBlockNumber; itup_off = newitemoff; itup_blkno = BufferGetBlockNumber(buf); @@ -837,6 +953,18 @@ _bt_insertonpg(Relation rel, } } + /* + * Every internal page should have exactly one negative infinity item + * at all times. Only _bt_split() and _bt_newroot() should add items + * that become negative infinity items through truncation, since + * they're the only routines that allocate new internal pages. Do not + * allow a retail insertion of a new item at the negative infinity + * offset. + */ + if (!P_ISLEAF(lpageop) && newitemoff == P_FIRSTDATAKEY(lpageop)) + elog(ERROR, "cannot insert second negative infinity item in block %u of index \"%s\"", + itup_blkno, RelationGetRelationName(rel)); + /* Do the update. No ereport(ERROR) until changes are logged */ START_CRIT_SECTION(); @@ -848,6 +976,9 @@ _bt_insertonpg(Relation rel, if (BufferIsValid(metabuf)) { + /* upgrade meta-page if needed */ + if (metad->btm_version < BTREE_VERSION) + _bt_upgrademetapage(metapg); metad->btm_fastroot = itup_blkno; metad->btm_fastlevel = lpageop->btpo.level; MarkBufferDirty(metabuf); @@ -864,6 +995,15 @@ _bt_insertonpg(Relation rel, MarkBufferDirty(cbuf); } + /* + * Cache the block information if we just inserted into the rightmost + * leaf page of the index and it's not the root page. For very small + * index where root is also the leaf, there is no point trying for any + * optimization. + */ + if (P_RIGHTMOST(lpageop) && P_ISLEAF(lpageop) && !P_ISROOT(lpageop)) + cachedBlock = BufferGetBlockNumber(buf); + /* XLOG stuff */ if (RelationNeedsWAL(rel)) { @@ -871,7 +1011,6 @@ _bt_insertonpg(Relation rel, xl_btree_metadata xlmeta; uint8 xlinfo; XLogRecPtr recptr; - IndexTupleData trunctuple; xlrec.offnum = itup_off; @@ -897,24 +1036,18 @@ _bt_insertonpg(Relation rel, xlmeta.level = metad->btm_level; xlmeta.fastroot = metad->btm_fastroot; xlmeta.fastlevel = metad->btm_fastlevel; + xlmeta.oldest_btpo_xact = metad->btm_oldest_btpo_xact; + xlmeta.last_cleanup_num_heap_tuples = + metad->btm_last_cleanup_num_heap_tuples; - XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT); + XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD); XLogRegisterBufData(2, (char *) &xlmeta, sizeof(xl_btree_metadata)); xlinfo = XLOG_BTREE_INSERT_META; } - /* Read comments in _bt_pgaddtup */ XLogRegisterBuffer(0, buf, REGBUF_STANDARD); - if (!P_ISLEAF(lpageop) && newitemoff == P_FIRSTDATAKEY(lpageop)) - { - trunctuple = *itup; - trunctuple.t_info = sizeof(IndexTupleData); - XLogRegisterBufData(0, (char *) &trunctuple, - sizeof(IndexTupleData)); - } - else - XLogRegisterBufData(0, (char *) itup, IndexTupleDSize(*itup)); + XLogRegisterBufData(0, (char *) itup, IndexTupleSize(itup)); recptr = XLogInsert(RM_BTREE_ID, xlinfo); @@ -938,6 +1071,23 @@ _bt_insertonpg(Relation rel, if (BufferIsValid(cbuf)) _bt_relbuf(rel, cbuf); _bt_relbuf(rel, buf); + + /* + * If we decided to cache the insertion target block, then set it now. + * But before that, check for the height of the tree and don't go for + * the optimization for small indexes. We defer that check to this + * point to ensure that we don't call _bt_getrootheight while holding + * lock on any other block. + * + * We do this after dropping locks on all buffers. So the information + * about whether the insertion block is still the rightmost block or + * not may have changed in between. But we will deal with that during + * next insert operation. No special care is required while setting + * it. + */ + if (BlockNumberIsValid(cachedBlock) && + _bt_getrootheight(rel) >= BTREE_FASTPATH_MIN_LEVEL) + RelationSetTargetBlock(rel, cachedBlock); } } @@ -981,6 +1131,9 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, OffsetNumber maxoff; OffsetNumber i; bool isleaf; + IndexTuple lefthikey; + int indnatts = IndexRelationGetNumberOfAttributes(rel); + int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); /* Acquire a new page to split into */ rbuf = _bt_getbuf(rel, P_NEW, BT_WRITE); @@ -1050,6 +1203,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, itemid = PageGetItemId(origpage, P_HIKEY); itemsz = ItemIdGetLength(itemid); item = (IndexTuple) PageGetItem(origpage, itemid); + Assert(BTreeTupleGetNAtts(item, rel) == indnkeyatts); if (PageAddItem(rightpage, (Item) item, itemsz, rightoff, false, false) == InvalidOffsetNumber) { @@ -1080,7 +1234,28 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, itemsz = ItemIdGetLength(itemid); item = (IndexTuple) PageGetItem(origpage, itemid); } - if (PageAddItem(leftpage, (Item) item, itemsz, leftoff, + + /* + * Truncate non-key (INCLUDE) attributes of the high key item before + * inserting it on the left page. This only needs to happen at the leaf + * level, since in general all pivot tuple values originate from leaf + * level high keys. This isn't just about avoiding unnecessary work, + * though; truncating unneeded key attributes (more aggressive suffix + * truncation) can only be performed at the leaf level anyway. This is + * because a pivot tuple in a grandparent page must guide a search not + * only to the correct parent page, but also to the correct leaf page. + */ + if (indnatts != indnkeyatts && isleaf) + { + lefthikey = _bt_nonkey_truncate(rel, item); + itemsz = IndexTupleSize(lefthikey); + itemsz = MAXALIGN(itemsz); + } + else + lefthikey = item; + + Assert(BTreeTupleGetNAtts(lefthikey, rel) == indnkeyatts); + if (PageAddItem(leftpage, (Item) lefthikey, itemsz, leftoff, false, false) == InvalidOffsetNumber) { memset(rightpage, 0, BufferGetPageSize(rbuf)); @@ -1089,6 +1264,9 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, origpagenumber, RelationGetRelationName(rel)); } leftoff = OffsetNumberNext(leftoff); + /* be tidy */ + if (lefthikey != item) + pfree(lefthikey); /* * Now transfer all the data items to the appropriate page. @@ -1269,6 +1447,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, xl_btree_split xlrec; uint8 xlinfo; XLogRecPtr recptr; + bool loglhikey = false; xlrec.level = ropaque->btpo.level; xlrec.firstright = firstright; @@ -1298,18 +1477,20 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, XLogRegisterBufData(0, (char *) newitem, MAXALIGN(newitemsz)); /* Log left page */ - if (!isleaf) + if (!isleaf || indnatts != indnkeyatts) { /* - * We must also log the left page's high key, because the right - * page's leftmost key is suppressed on non-leaf levels. Show it - * as belonging to the left page buffer, so that it is not stored - * if XLogInsert decides it needs a full-page image of the left - * page. + * We must also log the left page's high key. There are two + * reasons for that: right page's leftmost key is suppressed on + * non-leaf levels and in covering indexes included columns are + * truncated from high keys. Show it as belonging to the left + * page buffer, so that it is not stored if XLogInsert decides it + * needs a full-page image of the left page. */ itemid = PageGetItemId(origpage, P_HIKEY); item = (IndexTuple) PageGetItem(origpage, itemid); XLogRegisterBufData(0, (char *) item, MAXALIGN(IndexTupleSize(item))); + loglhikey = true; } /* @@ -1328,7 +1509,9 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, (char *) rightpage + ((PageHeader) rightpage)->pd_upper, ((PageHeader) rightpage)->pd_special - ((PageHeader) rightpage)->pd_upper); - xlinfo = newitemonleft ? XLOG_BTREE_SPLIT_L : XLOG_BTREE_SPLIT_R; + xlinfo = newitemonleft ? + (loglhikey ? XLOG_BTREE_SPLIT_L_HIGHKEY : XLOG_BTREE_SPLIT_L) : + (loglhikey ? XLOG_BTREE_SPLIT_R_HIGHKEY : XLOG_BTREE_SPLIT_R); recptr = XLogInsert(RM_BTREE_ID, xlinfo); PageSetLSN(origpage, recptr); @@ -1558,7 +1741,12 @@ _bt_checksplitloc(FindSplitData *state, /* * The first item on the right page becomes the high key of the left page; - * therefore it counts against left space as well as right space. + * therefore it counts against left space as well as right space. When + * index has included attributes, then those attributes of left page high + * key will be truncated leaving that page with slightly more free space. + * However, that shouldn't affect our ability to find valid split + * location, because anyway split location should exists even without high + * key truncation. */ leftfree -= firstrightitemsz; @@ -1681,18 +1869,18 @@ _bt_insert_parent(Relation rel, stack = &fakestack; stack->bts_blkno = BufferGetBlockNumber(pbuf); stack->bts_offset = InvalidOffsetNumber; - /* bts_btentry will be initialized below */ + stack->bts_btentry = InvalidBlockNumber; stack->bts_parent = NULL; _bt_relbuf(rel, pbuf); } - /* get high key from left page == lowest key on new right page */ + /* get high key from left page == lower bound for new right page */ ritem = (IndexTuple) PageGetItem(page, PageGetItemId(page, P_HIKEY)); /* form an index tuple that points at the new right page */ new_item = CopyIndexTuple(ritem); - ItemPointerSet(&(new_item->t_tid), rbknum, P_HIKEY); + BTreeInnerTupleSetDownLink(new_item, rbknum); /* * Find the parent buffer and get the parent page. @@ -1701,7 +1889,7 @@ _bt_insert_parent(Relation rel, * want to find parent pointing to where we are, right ? - vadim * 05/27/97 */ - ItemPointerSet(&(stack->bts_btentry.t_tid), bknum, P_HIKEY); + stack->bts_btentry = bknum; pbuf = _bt_getstackbuf(rel, stack, BT_WRITE); /* @@ -1856,7 +2044,8 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access) { itemid = PageGetItemId(page, offnum); item = (IndexTuple) PageGetItem(page, itemid); - if (BTEntrySame(item, &stack->bts_btentry)) + + if (BTreeInnerTupleGetDownLink(item) == stack->bts_btentry) { /* Return accurate pointer to where link is now */ stack->bts_blkno = blkno; @@ -1871,7 +2060,8 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access) { itemid = PageGetItemId(page, offnum); item = (IndexTuple) PageGetItem(page, itemid); - if (BTEntrySame(item, &stack->bts_btentry)) + + if (BTreeInnerTupleGetDownLink(item) == stack->bts_btentry) { /* Return accurate pointer to where link is now */ stack->bts_blkno = blkno; @@ -1957,7 +2147,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) left_item_sz = sizeof(IndexTupleData); left_item = (IndexTuple) palloc(left_item_sz); left_item->t_info = left_item_sz; - ItemPointerSet(&(left_item->t_tid), lbkno, P_HIKEY); + BTreeInnerTupleSetDownLink(left_item, lbkno); + BTreeTupleSetNAtts(left_item, 0); /* * Create downlink item for right page. The key for it is obtained from @@ -1967,11 +2158,15 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) right_item_sz = ItemIdGetLength(itemid); item = (IndexTuple) PageGetItem(lpage, itemid); right_item = CopyIndexTuple(item); - ItemPointerSet(&(right_item->t_tid), rbkno, P_HIKEY); + BTreeInnerTupleSetDownLink(right_item, rbkno); /* NO EREPORT(ERROR) from here till newroot op is logged */ START_CRIT_SECTION(); + /* upgrade metapage if needed */ + if (metad->btm_version < BTREE_VERSION) + _bt_upgrademetapage(metapg); + /* set btree special data */ rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage); rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE; @@ -1994,6 +2189,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) * Note: we *must* insert the two items in item-number order, for the * benefit of _bt_restore_page(). */ + Assert(BTreeTupleGetNAtts(left_item, rel) == 0); if (PageAddItem(rootpage, (Item) left_item, left_item_sz, P_HIKEY, false, false) == InvalidOffsetNumber) elog(PANIC, "failed to add leftkey to new root page" @@ -2003,6 +2199,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) /* * insert the right page pointer into the new root page. */ + Assert(BTreeTupleGetNAtts(right_item, rel) == + IndexRelationGetNumberOfKeyAttributes(rel)); if (PageAddItem(rootpage, (Item) right_item, right_item_sz, P_FIRSTKEY, false, false) == InvalidOffsetNumber) elog(PANIC, "failed to add rightkey to new root page" @@ -2032,12 +2230,14 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) XLogRegisterBuffer(0, rootbuf, REGBUF_WILL_INIT); XLogRegisterBuffer(1, lbuf, REGBUF_STANDARD); - XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT); + XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD); md.root = rootblknum; md.level = metad->btm_level; md.fastroot = rootblknum; md.fastlevel = metad->btm_level; + md.oldest_btpo_xact = metad->btm_oldest_btpo_xact; + md.last_cleanup_num_heap_tuples = metad->btm_last_cleanup_num_heap_tuples; XLogRegisterBufData(2, (char *) &md, sizeof(xl_btree_metadata)); @@ -2096,6 +2296,7 @@ _bt_pgaddtup(Page page, { trunctuple = *itup; trunctuple.t_info = sizeof(IndexTupleData); + BTreeTupleSetNAtts(&trunctuple, 0); itup = &trunctuple; itemsize = sizeof(IndexTupleData); } @@ -2125,6 +2326,12 @@ _bt_isequal(TupleDesc itupdesc, Page page, OffsetNumber offnum, itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); + /* + * It's okay that we might perform a comparison against a truncated page + * high key when caller needs to determine if _bt_check_unique scan must + * continue on to the next page. Caller never asks us to compare non-key + * attributes within an INCLUDE index. + */ for (i = 1; i <= keysz; i++) { AttrNumber attno; diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 5c817b6510..4082103fe2 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -4,7 +4,7 @@ * BTree-specific page management code for the Postgres btree access * method. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -33,6 +33,7 @@ #include "storage/predicate.h" #include "utils/snapmgr.h" +static void _bt_cachemetadata(Relation rel, BTMetaPageData *metad); static bool _bt_mark_page_halfdead(Relation rel, Buffer buf, BTStack stack); static bool _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty); @@ -60,18 +61,165 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level) metad->btm_level = level; metad->btm_fastroot = rootbknum; metad->btm_fastlevel = level; + metad->btm_oldest_btpo_xact = InvalidTransactionId; + metad->btm_last_cleanup_num_heap_tuples = -1.0; metaopaque = (BTPageOpaque) PageGetSpecialPointer(page); metaopaque->btpo_flags = BTP_META; /* - * Set pd_lower just past the end of the metadata. This is not essential - * but it makes the page look compressible to xlog.c. + * Set pd_lower just past the end of the metadata. This is essential, + * because without doing so, metadata will be lost if xlog.c compresses + * the page. */ ((PageHeader) page)->pd_lower = ((char *) metad + sizeof(BTMetaPageData)) - (char *) page; } +/* + * _bt_upgrademetapage() -- Upgrade a meta-page from an old format to the new. + * + * This routine does purely in-memory image upgrade. Caller is + * responsible for locking, WAL-logging etc. + */ +void +_bt_upgrademetapage(Page page) +{ + BTMetaPageData *metad; + BTPageOpaque metaopaque PG_USED_FOR_ASSERTS_ONLY; + + metad = BTPageGetMeta(page); + metaopaque = (BTPageOpaque) PageGetSpecialPointer(page); + + /* It must be really a meta page of upgradable version */ + Assert(metaopaque->btpo_flags & BTP_META); + Assert(metad->btm_version < BTREE_VERSION); + Assert(metad->btm_version >= BTREE_MIN_VERSION); + + /* Set version number and fill extra fields added into version 3 */ + metad->btm_version = BTREE_VERSION; + metad->btm_oldest_btpo_xact = InvalidTransactionId; + metad->btm_last_cleanup_num_heap_tuples = -1.0; + + /* Adjust pd_lower (see _bt_initmetapage() for details) */ + ((PageHeader) page)->pd_lower = + ((char *) metad + sizeof(BTMetaPageData)) - (char *) page; +} + +/* + * Cache metadata from meta page to rel->rd_amcache. + */ +static void +_bt_cachemetadata(Relation rel, BTMetaPageData *metad) +{ + /* We assume rel->rd_amcache was already freed by caller */ + Assert(rel->rd_amcache == NULL); + rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt, + sizeof(BTMetaPageData)); + + /* + * Meta page should be of supported version (should be already checked by + * caller). + */ + Assert(metad->btm_version >= BTREE_MIN_VERSION && + metad->btm_version <= BTREE_VERSION); + + if (metad->btm_version == BTREE_VERSION) + { + /* Last version of meta-data, no need to upgrade */ + memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData)); + } + else + { + BTMetaPageData *cached_metad = (BTMetaPageData *) rel->rd_amcache; + + /* + * Upgrade meta-data: copy available information from meta-page and + * fill new fields with default values. + */ + memcpy(rel->rd_amcache, metad, offsetof(BTMetaPageData, btm_oldest_btpo_xact)); + cached_metad->btm_version = BTREE_VERSION; + cached_metad->btm_oldest_btpo_xact = InvalidTransactionId; + cached_metad->btm_last_cleanup_num_heap_tuples = -1.0; + } +} + +/* + * _bt_update_meta_cleanup_info() -- Update cleanup-related information in + * the metapage. + * + * This routine checks if provided cleanup-related information is matching + * to those written in the metapage. On mismatch, metapage is overwritten. + */ +void +_bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact, + float8 numHeapTuples) +{ + Buffer metabuf; + Page metapg; + BTMetaPageData *metad; + bool needsRewrite = false; + XLogRecPtr recptr; + + /* read the metapage and check if it needs rewrite */ + metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ); + metapg = BufferGetPage(metabuf); + metad = BTPageGetMeta(metapg); + + /* outdated version of metapage always needs rewrite */ + if (metad->btm_version < BTREE_VERSION) + needsRewrite = true; + else if (metad->btm_oldest_btpo_xact != oldestBtpoXact || + metad->btm_last_cleanup_num_heap_tuples != numHeapTuples) + needsRewrite = true; + + if (!needsRewrite) + { + _bt_relbuf(rel, metabuf); + return; + } + + /* trade in our read lock for a write lock */ + LockBuffer(metabuf, BUFFER_LOCK_UNLOCK); + LockBuffer(metabuf, BT_WRITE); + + START_CRIT_SECTION(); + + /* upgrade meta-page if needed */ + if (metad->btm_version < BTREE_VERSION) + _bt_upgrademetapage(metapg); + + /* update cleanup-related information */ + metad->btm_oldest_btpo_xact = oldestBtpoXact; + metad->btm_last_cleanup_num_heap_tuples = numHeapTuples; + MarkBufferDirty(metabuf); + + /* write wal record if needed */ + if (RelationNeedsWAL(rel)) + { + xl_btree_metadata md; + + XLogBeginInsert(); + XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD); + + md.root = metad->btm_root; + md.level = metad->btm_level; + md.fastroot = metad->btm_fastroot; + md.fastlevel = metad->btm_fastlevel; + md.oldest_btpo_xact = oldestBtpoXact; + md.last_cleanup_num_heap_tuples = numHeapTuples; + + XLogRegisterBufData(0, (char *) &md, sizeof(xl_btree_metadata)); + + recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_META_CLEANUP); + + PageSetLSN(metapg, recptr); + } + + END_CRIT_SECTION(); + _bt_relbuf(rel, metabuf); +} + /* * _bt_getroot() -- Get the root page of the btree. * @@ -123,7 +271,8 @@ _bt_getroot(Relation rel, int access) metad = (BTMetaPageData *) rel->rd_amcache; /* We shouldn't have cached it if any of these fail */ Assert(metad->btm_magic == BTREE_MAGIC); - Assert(metad->btm_version == BTREE_VERSION); + Assert(metad->btm_version >= BTREE_MIN_VERSION); + Assert(metad->btm_version <= BTREE_VERSION); Assert(metad->btm_root != P_NONE); rootblkno = metad->btm_fastroot; @@ -162,19 +311,21 @@ _bt_getroot(Relation rel, int access) metad = BTPageGetMeta(metapg); /* sanity-check the metapage */ - if (!(metaopaque->btpo_flags & BTP_META) || + if (!P_ISMETA(metaopaque) || metad->btm_magic != BTREE_MAGIC) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index \"%s\" is not a btree", RelationGetRelationName(rel)))); - if (metad->btm_version != BTREE_VERSION) + if (metad->btm_version < BTREE_MIN_VERSION || + metad->btm_version > BTREE_VERSION) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("version mismatch in index \"%s\": file version %d, code version %d", + errmsg("version mismatch in index \"%s\": file version %d, " + "current version %d, minimal supported version %d", RelationGetRelationName(rel), - metad->btm_version, BTREE_VERSION))); + metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION))); /* if no root page initialized yet, do it */ if (metad->btm_root == P_NONE) @@ -224,10 +375,16 @@ _bt_getroot(Relation rel, int access) /* NO ELOG(ERROR) till meta is updated */ START_CRIT_SECTION(); + /* upgrade metapage if needed */ + if (metad->btm_version < BTREE_VERSION) + _bt_upgrademetapage(metapg); + metad->btm_root = rootblkno; metad->btm_level = 0; metad->btm_fastroot = rootblkno; metad->btm_fastlevel = 0; + metad->btm_oldest_btpo_xact = InvalidTransactionId; + metad->btm_last_cleanup_num_heap_tuples = -1.0; MarkBufferDirty(rootbuf); MarkBufferDirty(metabuf); @@ -241,12 +398,14 @@ _bt_getroot(Relation rel, int access) XLogBeginInsert(); XLogRegisterBuffer(0, rootbuf, REGBUF_WILL_INIT); - XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT); + XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD); md.root = rootblkno; md.level = 0; md.fastroot = rootblkno; md.fastlevel = 0; + md.oldest_btpo_xact = InvalidTransactionId; + md.last_cleanup_num_heap_tuples = -1.0; XLogRegisterBufData(2, (char *) &md, sizeof(xl_btree_metadata)); @@ -283,9 +442,7 @@ _bt_getroot(Relation rel, int access) /* * Cache the metapage data for next time */ - rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt, - sizeof(BTMetaPageData)); - memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData)); + _bt_cachemetadata(rel, metad); /* * We are done with the metapage; arrange to release it via first @@ -365,19 +522,21 @@ _bt_gettrueroot(Relation rel) metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg); metad = BTPageGetMeta(metapg); - if (!(metaopaque->btpo_flags & BTP_META) || + if (!P_ISMETA(metaopaque) || metad->btm_magic != BTREE_MAGIC) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index \"%s\" is not a btree", RelationGetRelationName(rel)))); - if (metad->btm_version != BTREE_VERSION) + if (metad->btm_version < BTREE_MIN_VERSION || + metad->btm_version > BTREE_VERSION) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("version mismatch in index \"%s\": file version %d, code version %d", + errmsg("version mismatch in index \"%s\": file version %d, " + "current version %d, minimal supported version %d", RelationGetRelationName(rel), - metad->btm_version, BTREE_VERSION))); + metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION))); /* if no root page initialized yet, fail */ if (metad->btm_root == P_NONE) @@ -452,19 +611,21 @@ _bt_getrootheight(Relation rel) metad = BTPageGetMeta(metapg); /* sanity-check the metapage */ - if (!(metaopaque->btpo_flags & BTP_META) || + if (!P_ISMETA(metaopaque) || metad->btm_magic != BTREE_MAGIC) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index \"%s\" is not a btree", RelationGetRelationName(rel)))); - if (metad->btm_version != BTREE_VERSION) + if (metad->btm_version < BTREE_MIN_VERSION || + metad->btm_version > BTREE_VERSION) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("version mismatch in index \"%s\": file version %d, code version %d", + errmsg("version mismatch in index \"%s\": file version %d, " + "current version %d, minimal supported version %d", RelationGetRelationName(rel), - metad->btm_version, BTREE_VERSION))); + metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION))); /* * If there's no root page yet, _bt_getroot() doesn't expect a cache @@ -480,9 +641,7 @@ _bt_getrootheight(Relation rel) /* * Cache the metapage data for next time */ - rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt, - sizeof(BTMetaPageData)); - memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData)); + _bt_cachemetadata(rel, metad); _bt_relbuf(rel, metabuf); } @@ -623,9 +782,14 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access) /* * If we are generating WAL for Hot Standby then create a * WAL record that will allow us to conflict with queries - * running on standby. + * running on standby, in case they have snapshots older + * than btpo.xact. This can only apply if the page does + * have a valid btpo.xact value, ie not if it's new. (We + * must check that because an all-zero page has no special + * space.) */ - if (XLogStandbyInfoActive() && RelationNeedsWAL(rel)) + if (XLogStandbyInfoActive() && RelationNeedsWAL(rel) && + !PageIsNew(page)) { BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page); @@ -738,7 +902,10 @@ _bt_pageinit(Page page, Size size) * _bt_page_recyclable() -- Is an existing page recyclable? * * This exists to make sure _bt_getbuf and btvacuumscan have the same - * policy about whether a page is safe to re-use. + * policy about whether a page is safe to re-use. But note that _bt_getbuf + * knows enough to distinguish the PageIsNew condition from the other one. + * At some point it might be appropriate to redesign this to have a three-way + * result value. */ bool _bt_page_recyclable(Page page) @@ -984,7 +1151,7 @@ _bt_lock_branch_parent(Relation rel, BlockNumber child, BTStack stack, * Locate the downlink of "child" in the parent (updating the stack entry * if needed) */ - ItemPointerSet(&(stack->bts_btentry.t_tid), child, P_HIKEY); + stack->bts_btentry = child; pbuf = _bt_getstackbuf(rel, stack, BT_WRITE); if (pbuf == InvalidBuffer) elog(ERROR, "failed to re-find parent key in index \"%s\" for deletion target page %u", @@ -1255,8 +1422,9 @@ _bt_pagedel(Relation rel, Buffer buf) /* we need an insertion scan key for the search, so build one */ itup_scankey = _bt_mkscankey(rel, targetkey); /* find the leftmost leaf page containing this key */ - stack = _bt_search(rel, rel->rd_rel->relnatts, itup_scankey, - false, &lbuf, BT_READ, NULL); + stack = _bt_search(rel, + IndexRelationGetNumberOfKeyAttributes(rel), + itup_scankey, false, &lbuf, BT_READ, NULL); /* don't need a pin on the page */ _bt_relbuf(rel, lbuf); @@ -1283,6 +1451,7 @@ _bt_pagedel(Relation rel, Buffer buf) rightsib_empty = false; while (P_ISHALFDEAD(opaque)) { + /* will check for interrupts, once lock is released */ if (!_bt_unlink_halfdead_page(rel, buf, &rightsib_empty)) { /* _bt_unlink_halfdead_page already released buffer */ @@ -1295,6 +1464,12 @@ _bt_pagedel(Relation rel, Buffer buf) _bt_relbuf(rel, buf); + /* + * Check here, as calling loops will have locks held, preventing + * interrupts from being processed. + */ + CHECK_FOR_INTERRUPTS(); + /* * The page has now been deleted. If its right sibling is completely * empty, it's possible that the reason we haven't deleted it earlier @@ -1392,15 +1567,15 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack) #ifdef USE_ASSERT_CHECKING itemid = PageGetItemId(page, topoff); itup = (IndexTuple) PageGetItem(page, itemid); - Assert(ItemPointerGetBlockNumber(&(itup->t_tid)) == target); + Assert(BTreeInnerTupleGetDownLink(itup) == target); #endif nextoffset = OffsetNumberNext(topoff); itemid = PageGetItemId(page, nextoffset); itup = (IndexTuple) PageGetItem(page, itemid); - if (ItemPointerGetBlockNumber(&(itup->t_tid)) != rightsib) + if (BTreeInnerTupleGetDownLink(itup) != rightsib) elog(ERROR, "right sibling %u of block %u is not next child %u of block %u in index \"%s\"", - rightsib, target, ItemPointerGetBlockNumber(&(itup->t_tid)), + rightsib, target, BTreeInnerTupleGetDownLink(itup), BufferGetBlockNumber(topparent), RelationGetRelationName(rel)); /* @@ -1423,7 +1598,7 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack) itemid = PageGetItemId(page, topoff); itup = (IndexTuple) PageGetItem(page, itemid); - ItemPointerSet(&(itup->t_tid), rightsib, P_HIKEY); + BTreeInnerTupleSetDownLink(itup, rightsib); nextoffset = OffsetNumberNext(topoff); PageIndexTupleDelete(page, nextoffset); @@ -1442,9 +1617,10 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack) MemSet(&trunctuple, 0, sizeof(IndexTupleData)); trunctuple.t_info = sizeof(IndexTupleData); if (target != leafblkno) - ItemPointerSet(&trunctuple.t_tid, target, P_HIKEY); + BTreeTupleSetTopParent(&trunctuple, target); else - ItemPointerSetInvalid(&trunctuple.t_tid); + BTreeTupleSetTopParent(&trunctuple, InvalidBlockNumber); + if (PageAddItem(page, (Item) &trunctuple, sizeof(IndexTupleData), P_HIKEY, false, false) == InvalidOffsetNumber) elog(ERROR, "could not add dummy high key to half-dead page"); @@ -1528,7 +1704,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) BTPageOpaque opaque; bool rightsib_is_rightmost; int targetlevel; - ItemPointer leafhikey; + IndexTuple leafhikey; BlockNumber nextchild; page = BufferGetPage(leafbuf); @@ -1540,21 +1716,28 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) * Remember some information about the leaf page. */ itemid = PageGetItemId(page, P_HIKEY); - leafhikey = &((IndexTuple) PageGetItem(page, itemid))->t_tid; + leafhikey = (IndexTuple) PageGetItem(page, itemid); leafleftsib = opaque->btpo_prev; leafrightsib = opaque->btpo_next; LockBuffer(leafbuf, BUFFER_LOCK_UNLOCK); + /* + * Check here, as calling loops will have locks held, preventing + * interrupts from being processed. + */ + CHECK_FOR_INTERRUPTS(); + /* * If the leaf page still has a parent pointing to it (or a chain of * parents), we don't unlink the leaf page yet, but the topmost remaining * parent in the branch. Set 'target' and 'buf' to reference the page * actually being unlinked. */ - if (ItemPointerIsValid(leafhikey)) + target = BTreeTupleGetTopParent(leafhikey); + + if (target != InvalidBlockNumber) { - target = ItemPointerGetBlockNumber(leafhikey); Assert(target != leafblkno); /* fetch the block number of the topmost parent's left sibling */ @@ -1604,6 +1787,14 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) /* step right one page */ leftsib = opaque->btpo_next; _bt_relbuf(rel, lbuf); + + /* + * It'd be good to check for interrupts here, but it's not easy to + * do so because a lock is always held. This block isn't + * frequently reached, so hopefully the consequences of not + * checking interrupts aren't too bad. + */ + if (leftsib == P_NONE) { elog(LOG, "no left sibling (concurrent deletion?) of block %u in \"%s\"", @@ -1670,7 +1861,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) /* remember the next non-leaf child down in the branch. */ itemid = PageGetItemId(page, P_FIRSTDATAKEY(opaque)); - nextchild = ItemPointerGetBlockNumber(&((IndexTuple) PageGetItem(page, itemid))->t_tid); + nextchild = BTreeInnerTupleGetDownLink((IndexTuple) PageGetItem(page, itemid)); if (nextchild == leafblkno) nextchild = InvalidBlockNumber; } @@ -1757,12 +1948,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) * branch. */ if (target != leafblkno) - { - if (nextchild == InvalidBlockNumber) - ItemPointerSetInvalid(leafhikey); - else - ItemPointerSet(leafhikey, nextchild, P_HIKEY); - } + BTreeTupleSetTopParent(leafhikey, nextchild); /* * Mark the page itself deleted. It can be recycled when all current @@ -1783,6 +1969,9 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) /* And update the metapage, if needed */ if (BufferIsValid(metabuf)) { + /* upgrade metapage if needed */ + if (metad->btm_version < BTREE_VERSION) + _bt_upgrademetapage(metapg); metad->btm_fastroot = rightsib; metad->btm_fastlevel = targetlevel; MarkBufferDirty(metabuf); @@ -1827,12 +2016,14 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) if (BufferIsValid(metabuf)) { - XLogRegisterBuffer(4, metabuf, REGBUF_WILL_INIT); + XLogRegisterBuffer(4, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD); xlmeta.root = metad->btm_root; xlmeta.level = metad->btm_level; xlmeta.fastroot = metad->btm_fastroot; xlmeta.fastlevel = metad->btm_fastlevel; + xlmeta.oldest_btpo_xact = metad->btm_oldest_btpo_xact; + xlmeta.last_cleanup_num_heap_tuples = metad->btm_last_cleanup_num_heap_tuples; XLogRegisterBufData(4, (char *) &xlmeta, sizeof(xl_btree_metadata)); xlinfo = XLOG_BTREE_UNLINK_PAGE_META; diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 3dbafdd6fc..e8725fbbe1 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -8,7 +8,7 @@ * This file contains only the public interface routines. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -19,38 +19,24 @@ #include "postgres.h" #include "access/nbtree.h" +#include "access/nbtxlog.h" #include "access/relscan.h" #include "access/xlog.h" -#include "catalog/index.h" #include "commands/vacuum.h" +#include "miscadmin.h" +#include "nodes/execnodes.h" #include "pgstat.h" +#include "postmaster/autovacuum.h" #include "storage/condition_variable.h" #include "storage/indexfsm.h" #include "storage/ipc.h" #include "storage/lmgr.h" #include "storage/smgr.h" -#include "tcop/tcopprot.h" /* pgrminclude ignore */ #include "utils/builtins.h" #include "utils/index_selfuncs.h" #include "utils/memutils.h" -/* Working state for btbuild and its callback */ -typedef struct -{ - bool isUnique; - bool haveDead; - Relation heapRel; - BTSpool *spool; - - /* - * spool2 is needed only when the index is a unique index. Dead tuples are - * put into spool2 instead of spool in order to avoid uniqueness check. - */ - BTSpool *spool2; - double indtuples; -} BTBuildState; - /* Working state needed by btvacuumpage */ typedef struct { @@ -62,6 +48,7 @@ typedef struct BlockNumber lastBlockVacuumed; /* highest blkno actually vacuumed */ BlockNumber lastBlockLocked; /* highest blkno we've cleanup-locked */ BlockNumber totFreePages; /* true total # of free pages */ + TransactionId oldestBtpoXact; MemoryContext pagedelcontext; } BTVacState; @@ -104,15 +91,9 @@ typedef struct BTParallelScanDescData typedef struct BTParallelScanDescData *BTParallelScanDesc; -static void btbuildCallback(Relation index, - HeapTuple htup, - Datum *values, - bool *isnull, - bool tupleIsAlive, - void *state); static void btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state, - BTCycleId cycleid); + BTCycleId cycleid, TransactionId *oldestBtpoXact); static void btvacuumpage(BTVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno); @@ -140,6 +121,7 @@ bthandler(PG_FUNCTION_ARGS) amroutine->amclusterable = true; amroutine->ampredlocks = true; amroutine->amcanparallel = true; + amroutine->amcaninclude = true; amroutine->amkeytype = InvalidOid; amroutine->ambuild = btbuild; @@ -166,115 +148,6 @@ bthandler(PG_FUNCTION_ARGS) PG_RETURN_POINTER(amroutine); } -/* - * btbuild() -- build a new btree index. - */ -IndexBuildResult * -btbuild(Relation heap, Relation index, IndexInfo *indexInfo) -{ - IndexBuildResult *result; - double reltuples; - BTBuildState buildstate; - - buildstate.isUnique = indexInfo->ii_Unique; - buildstate.haveDead = false; - buildstate.heapRel = heap; - buildstate.spool = NULL; - buildstate.spool2 = NULL; - buildstate.indtuples = 0; - -#ifdef BTREE_BUILD_STATS - if (log_btree_build_stats) - ResetUsage(); -#endif /* BTREE_BUILD_STATS */ - - /* - * We expect to be called exactly once for any index relation. If that's - * not the case, big trouble's what we have. - */ - if (RelationGetNumberOfBlocks(index) != 0) - elog(ERROR, "index \"%s\" already contains data", - RelationGetRelationName(index)); - - buildstate.spool = _bt_spoolinit(heap, index, indexInfo->ii_Unique, false); - - /* - * If building a unique index, put dead tuples in a second spool to keep - * them out of the uniqueness check. - */ - if (indexInfo->ii_Unique) - buildstate.spool2 = _bt_spoolinit(heap, index, false, true); - - /* do the heap scan */ - reltuples = IndexBuildHeapScan(heap, index, indexInfo, true, - btbuildCallback, (void *) &buildstate); - - /* okay, all heap tuples are indexed */ - if (buildstate.spool2 && !buildstate.haveDead) - { - /* spool2 turns out to be unnecessary */ - _bt_spooldestroy(buildstate.spool2); - buildstate.spool2 = NULL; - } - - /* - * Finish the build by (1) completing the sort of the spool file, (2) - * inserting the sorted tuples into btree pages and (3) building the upper - * levels. - */ - _bt_leafbuild(buildstate.spool, buildstate.spool2); - _bt_spooldestroy(buildstate.spool); - if (buildstate.spool2) - _bt_spooldestroy(buildstate.spool2); - -#ifdef BTREE_BUILD_STATS - if (log_btree_build_stats) - { - ShowUsage("BTREE BUILD STATS"); - ResetUsage(); - } -#endif /* BTREE_BUILD_STATS */ - - /* - * Return statistics - */ - result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult)); - - result->heap_tuples = reltuples; - result->index_tuples = buildstate.indtuples; - - return result; -} - -/* - * Per-tuple callback from IndexBuildHeapScan - */ -static void -btbuildCallback(Relation index, - HeapTuple htup, - Datum *values, - bool *isnull, - bool tupleIsAlive, - void *state) -{ - BTBuildState *buildstate = (BTBuildState *) state; - - /* - * insert the index tuple into the appropriate spool file for subsequent - * processing - */ - if (tupleIsAlive || buildstate->spool2 == NULL) - _bt_spool(buildstate->spool, &htup->t_self, values, isnull); - else - { - /* dead tuples are put into spool2 */ - buildstate->haveDead = true; - _bt_spool(buildstate->spool2, &htup->t_self, values, isnull); - } - - buildstate->indtuples += 1; -} - /* * btbuildempty() -- build an empty btree index in the initialization fork */ @@ -298,7 +171,7 @@ btbuildempty(Relation index) smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE, (char *) metapage, true); log_newpage(&index->rd_smgr->smgr_rnode.node, INIT_FORKNUM, - BTREE_METAPAGE, metapage, false); + BTREE_METAPAGE, metapage, true); /* * An immediate sync is required even if we xlog'd the page, because the @@ -905,6 +778,71 @@ _bt_parallel_advance_array_keys(IndexScanDesc scan) SpinLockRelease(&btscan->btps_mutex); } +/* + * _bt_vacuum_needs_cleanup() -- Checks if index needs cleanup assuming that + * btbulkdelete() wasn't called. + */ +static bool +_bt_vacuum_needs_cleanup(IndexVacuumInfo *info) +{ + Buffer metabuf; + Page metapg; + BTMetaPageData *metad; + bool result = false; + + metabuf = _bt_getbuf(info->index, BTREE_METAPAGE, BT_READ); + metapg = BufferGetPage(metabuf); + metad = BTPageGetMeta(metapg); + + if (metad->btm_version < BTREE_VERSION) + { + /* + * Do cleanup if metapage needs upgrade, because we don't have + * cleanup-related meta-information yet. + */ + result = true; + } + else if (TransactionIdIsValid(metad->btm_oldest_btpo_xact) && + TransactionIdPrecedes(metad->btm_oldest_btpo_xact, + RecentGlobalXmin)) + { + /* + * If oldest btpo.xact in the deleted pages is older than + * RecentGlobalXmin, then at least one deleted page can be recycled. + */ + result = true; + } + else + { + StdRdOptions *relopts; + float8 cleanup_scale_factor; + float8 prev_num_heap_tuples; + + /* + * If table receives enough insertions and no cleanup was performed, + * then index would appear have stale statistics. If scale factor is + * set, we avoid that by performing cleanup if the number of inserted + * tuples exceeds vacuum_cleanup_index_scale_factor fraction of + * original tuples count. + */ + relopts = (StdRdOptions *) info->index->rd_options; + cleanup_scale_factor = (relopts && + relopts->vacuum_cleanup_index_scale_factor >= 0) + ? relopts->vacuum_cleanup_index_scale_factor + : vacuum_cleanup_index_scale_factor; + prev_num_heap_tuples = metad->btm_last_cleanup_num_heap_tuples; + + if (cleanup_scale_factor <= 0 || + prev_num_heap_tuples < 0 || + (info->num_heap_tuples - prev_num_heap_tuples) / + prev_num_heap_tuples >= cleanup_scale_factor) + result = true; + } + + _bt_relbuf(info->index, metabuf); + return result; +} + /* * Bulk deletion of all index entries pointing to a set of heap tuples. * The set of target tuples is specified via a callback routine that tells @@ -927,9 +865,20 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, /* The ENSURE stuff ensures we clean up shared memory on failure */ PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel)); { + TransactionId oldestBtpoXact; + cycleid = _bt_start_vacuum(rel); - btvacuumscan(info, stats, callback, callback_state, cycleid); + btvacuumscan(info, stats, callback, callback_state, cycleid, + &oldestBtpoXact); + + /* + * Update cleanup-related information in metapage. This information is + * used only for cleanup but keeping them up to date can avoid + * unnecessary cleanup even after bulkdelete. + */ + _bt_update_meta_cleanup_info(info->index, oldestBtpoXact, + info->num_heap_tuples); } PG_END_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel)); _bt_end_vacuum(rel); @@ -951,21 +900,29 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) /* * If btbulkdelete was called, we need not do anything, just return the - * stats from the latest btbulkdelete call. If it wasn't called, we must - * still do a pass over the index, to recycle any newly-recyclable pages - * and to obtain index statistics. + * stats from the latest btbulkdelete call. If it wasn't called, we might + * still need to do a pass over the index, to recycle any newly-recyclable + * pages or to obtain index statistics. _bt_vacuum_needs_cleanup + * determines if either are needed. * * Since we aren't going to actually delete any leaf items, there's no * need to go through all the vacuum-cycle-ID pushups. */ if (stats == NULL) { + TransactionId oldestBtpoXact; + + /* Check if we need a cleanup */ + if (!_bt_vacuum_needs_cleanup(info)) + return NULL; + stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); - btvacuumscan(info, stats, NULL, NULL, 0); - } + btvacuumscan(info, stats, NULL, NULL, 0, &oldestBtpoXact); - /* Finally, vacuum the FSM */ - IndexFreeSpaceMapVacuum(info->index); + /* Update cleanup-related information in the metapage */ + _bt_update_meta_cleanup_info(info->index, oldestBtpoXact, + info->num_heap_tuples); + } /* * It's quite possible for us to be fooled by concurrent page splits into @@ -997,7 +954,7 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) static void btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state, - BTCycleId cycleid) + BTCycleId cycleid, TransactionId *oldestBtpoXact) { Relation rel = info->index; BTVacState vstate; @@ -1022,6 +979,7 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, vstate.lastBlockVacuumed = BTREE_METAPAGE; /* Initialise at first block */ vstate.lastBlockLocked = BTREE_METAPAGE; vstate.totFreePages = 0; + vstate.oldestBtpoXact = InvalidTransactionId; /* Create a temporary memory context to run _bt_pagedel in */ vstate.pagedelcontext = AllocSetContextCreate(CurrentMemoryContext, @@ -1108,9 +1066,27 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, MemoryContextDelete(vstate.pagedelcontext); + /* + * If we found any recyclable pages (and recorded them in the FSM), then + * forcibly update the upper-level FSM pages to ensure that searchers can + * find them. It's possible that the pages were also found during + * previous scans and so this is a waste of time, but it's cheap enough + * relative to scanning the index that it shouldn't matter much, and + * making sure that free pages are available sooner not later seems + * worthwhile. + * + * Note that if no recyclable pages exist, we don't bother vacuuming the + * FSM at all. + */ + if (vstate.totFreePages > 0) + IndexFreeSpaceMapVacuum(rel); + /* update statistics */ stats->num_pages = num_pages; stats->pages_free = vstate.totFreePages; + + if (oldestBtpoXact) + *oldestBtpoXact = vstate.oldestBtpoXact; } /* @@ -1190,6 +1166,11 @@ btvacuumpage(BTVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno) { /* Already deleted, but can't recycle yet */ stats->pages_deleted++; + + /* Update the oldest btpo.xact */ + if (!TransactionIdIsValid(vstate->oldestBtpoXact) || + TransactionIdPrecedes(opaque->btpo.xact, vstate->oldestBtpoXact)) + vstate->oldestBtpoXact = opaque->btpo.xact; } else if (P_ISHALFDEAD(opaque)) { @@ -1358,7 +1339,12 @@ btvacuumpage(BTVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno) /* count only this page, else may double-count parent */ if (ndel) + { stats->pages_deleted++; + if (!TransactionIdIsValid(vstate->oldestBtpoXact) || + TransactionIdPrecedes(opaque->btpo.xact, vstate->oldestBtpoXact)) + vstate->oldestBtpoXact = opaque->btpo.xact; + } MemoryContextSwitchTo(oldcontext); /* pagedel released buffer, so we shouldn't */ diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 642c8943e7..16223d01ec 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -4,7 +4,7 @@ * Search code for postgres btrees. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -87,17 +87,18 @@ _bt_drop_lock_and_maybe_pin(IndexScanDesc scan, BTScanPos sp) * place during the descent through the tree. This is not needed when * positioning for an insert or delete, so NULL is used for those cases. * - * NOTE that the returned buffer is read-locked regardless of the access - * parameter. However, access = BT_WRITE will allow an empty root page - * to be created and returned. When access = BT_READ, an empty index - * will result in *bufP being set to InvalidBuffer. Also, in BT_WRITE mode, - * any incomplete splits encountered during the search will be finished. + * The returned buffer is locked according to access parameter. Additionally, + * access = BT_WRITE will allow an empty root page to be created and returned. + * When access = BT_READ, an empty index will result in *bufP being set to + * InvalidBuffer. Also, in BT_WRITE mode, any incomplete splits encountered + * during the search will be finished. */ BTStack _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey, Buffer *bufP, int access, Snapshot snapshot) { BTStack stack_in = NULL; + int page_access = BT_READ; /* Get the root page to start with */ *bufP = _bt_getroot(rel, access); @@ -132,7 +133,7 @@ _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey, */ *bufP = _bt_moveright(rel, *bufP, keysz, scankey, nextkey, (access == BT_WRITE), stack_in, - BT_READ, snapshot); + page_access, snapshot); /* if this is a leaf page, we're done */ page = BufferGetPage(*bufP); @@ -147,14 +148,14 @@ _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey, offnum = _bt_binsrch(rel, *bufP, keysz, scankey, nextkey); itemid = PageGetItemId(page, offnum); itup = (IndexTuple) PageGetItem(page, itemid); - blkno = ItemPointerGetBlockNumber(&(itup->t_tid)); + blkno = BTreeInnerTupleGetDownLink(itup); par_blkno = BufferGetBlockNumber(*bufP); /* * We need to save the location of the index entry we chose in the * parent page on a stack. In case we split the tree, we'll use the * stack to work back up to the parent page. We also save the actual - * downlink (TID) to uniquely identify the index entry, in case it + * downlink (block) to uniquely identify the index entry, in case it * moves right while we're working lower in the tree. See the paper * by Lehman and Yao for how this is detected and handled. (We use the * child link to disambiguate duplicate keys in the index -- Lehman @@ -163,16 +164,45 @@ _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey, new_stack = (BTStack) palloc(sizeof(BTStackData)); new_stack->bts_blkno = par_blkno; new_stack->bts_offset = offnum; - memcpy(&new_stack->bts_btentry, itup, sizeof(IndexTupleData)); + new_stack->bts_btentry = blkno; new_stack->bts_parent = stack_in; + /* + * Page level 1 is lowest non-leaf page level prior to leaves. So, + * if we're on the level 1 and asked to lock leaf page in write mode, + * then lock next page in write mode, because it must be a leaf. + */ + if (opaque->btpo.level == 1 && access == BT_WRITE) + page_access = BT_WRITE; + /* drop the read lock on the parent page, acquire one on the child */ - *bufP = _bt_relandgetbuf(rel, *bufP, blkno, BT_READ); + *bufP = _bt_relandgetbuf(rel, *bufP, blkno, page_access); /* okay, all set to move down a level */ stack_in = new_stack; } + /* + * If we're asked to lock leaf in write mode, but didn't manage to, then + * relock. That may happen when the root page appears to be leaf. + */ + if (access == BT_WRITE && page_access == BT_READ) + { + /* trade in our read lock for a write lock */ + LockBuffer(*bufP, BUFFER_LOCK_UNLOCK); + LockBuffer(*bufP, BT_WRITE); + + /* + * If the page was split between the time that we surrendered our read + * lock and acquired our write lock, then this page may no longer be + * the right place for the key we want to insert. In this case, we + * need to move right in the tree. See Lehman and Yao for an + * excruciatingly precise description. + */ + *bufP = _bt_moveright(rel, *bufP, keysz, scankey, nextkey, + true, stack_in, BT_WRITE, snapshot); + } + return stack_in; } @@ -436,6 +466,8 @@ _bt_compare(Relation rel, IndexTuple itup; int i; + Assert(_bt_check_natts(rel, page, offnum)); + /* * Force result ">" if target item is first data item on an internal page * --- see NOTE above. @@ -498,7 +530,7 @@ _bt_compare(Relation rel, scankey->sk_argument)); if (!(scankey->sk_flags & SK_BT_DESC)) - result = -result; + INVERT_COMPARE_RESULT(result); } /* if the keys are unequal, return the difference */ @@ -524,7 +556,7 @@ _bt_compare(Relation rel, * scan->xs_ctup.t_self is set to the heap TID of the current tuple, * and if requested, scan->xs_itup points to a copy of the index tuple. * - * If there are no matching items in the index, we return FALSE, with no + * If there are no matching items in the index, we return false, with no * pins or locks held. * * Note that scan->keyData[], and the so->keyData[] scankey built from it, @@ -1224,7 +1256,7 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum) * safe to apply LP_DEAD hints to the page later. This allows us to drop * the pin for MVCC scans, which allows vacuum to avoid blocking. */ - so->currPos.lsn = PageGetLSN(page); + so->currPos.lsn = BufferGetLSNAtomic(so->currPos.buf); /* * we must save the page's right-link while scanning it; this tells us @@ -1336,7 +1368,7 @@ _bt_saveitem(BTScanOpaque so, int itemIndex, * * For success on a scan using a non-MVCC snapshot we hold a pin, but not a * read lock, on that page. If we do not hold the pin, we set so->currPos.buf - * to InvalidBuffer. We return TRUE to indicate success. + * to InvalidBuffer. We return true to indicate success. */ static bool _bt_steppage(IndexScanDesc scan, ScanDirection dir) @@ -1440,10 +1472,10 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir) * * On success exit, so->currPos is updated to contain data from the next * interesting page. Caller is responsible to release lock and pin on - * buffer on success. We return TRUE to indicate success. + * buffer on success. We return true to indicate success. * * If there are no more matching records in the given direction, we drop all - * locks and pins, set so->currPos.buf to InvalidBuffer, and return FALSE. + * locks and pins, set so->currPos.buf to InvalidBuffer, and return false. */ static bool _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir) @@ -1486,21 +1518,28 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir) if (_bt_readpage(scan, dir, P_FIRSTDATAKEY(opaque))) break; } + else if (scan->parallel_scan != NULL) + { + /* allow next page be processed by parallel worker */ + _bt_parallel_release(scan, opaque->btpo_next); + } /* nope, keep going */ if (scan->parallel_scan != NULL) { + _bt_relbuf(rel, so->currPos.buf); status = _bt_parallel_seize(scan, &blkno); if (!status) { - _bt_relbuf(rel, so->currPos.buf); BTScanPosInvalidate(so->currPos); return false; } } else + { blkno = opaque->btpo_next; - _bt_relbuf(rel, so->currPos.buf); + _bt_relbuf(rel, so->currPos.buf); + } } } else @@ -1581,6 +1620,11 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir) if (_bt_readpage(scan, dir, PageGetMaxOffsetNumber(page))) break; } + else if (scan->parallel_scan != NULL) + { + /* allow next page be processed by parallel worker */ + _bt_parallel_release(scan, BufferGetBlockNumber(so->currPos.buf)); + } /* * For parallel scans, get the last page scanned as it is quite @@ -1608,7 +1652,7 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir) /* * _bt_parallel_readpage() -- Read current page containing valid data for scan * - * On success, release lock and maybe pin on buffer. We return TRUE to + * On success, release lock and maybe pin on buffer. We return true to * indicate success. */ static bool @@ -1823,7 +1867,7 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost, offnum = P_FIRSTDATAKEY(opaque); itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); - blkno = ItemPointerGetBlockNumber(&(itup->t_tid)); + blkno = BTreeInnerTupleGetDownLink(itup); buf = _bt_relandgetbuf(rel, buf, blkno, BT_READ); page = BufferGetPage(buf); diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index bf6c03c7b2..16f5755777 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -14,15 +14,6 @@ * its parent level. When we have only one page on a level, it must be * the root -- it can be attached to the btree metapage and we are done. * - * This code is moderately slow (~10% slower) compared to the regular - * btree (insertion) build code on sorted or well-clustered data. On - * random data, however, the insertion build code is unusable -- the - * difference on a 60MB heap is a factor of 15 because the random - * probes into the btree thrash the buffer pool. (NOTE: the above - * "10%" estimate is probably obsolete, since it refers to an old and - * not very good external sort implementation that used to exist in - * this module. tuplesort.c is almost certainly faster.) - * * It is not wise to pack the pages entirely full, since then *any* * insertion would cause a split (and not only of the leaf page; the need * for a split would cascade right up the tree). The steady-state load @@ -55,7 +46,7 @@ * This code isn't concerned about the FSM at all. The caller is responsible * for initializing that. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -67,28 +58,169 @@ #include "postgres.h" #include "access/nbtree.h" +#include "access/parallel.h" +#include "access/relscan.h" +#include "access/xact.h" #include "access/xlog.h" #include "access/xloginsert.h" +#include "catalog/index.h" #include "miscadmin.h" +#include "pgstat.h" #include "storage/smgr.h" -#include "tcop/tcopprot.h" +#include "tcop/tcopprot.h" /* pgrminclude ignore */ #include "utils/rel.h" #include "utils/sortsupport.h" #include "utils/tuplesort.h" +/* Magic numbers for parallel state sharing */ +#define PARALLEL_KEY_BTREE_SHARED UINT64CONST(0xA000000000000001) +#define PARALLEL_KEY_TUPLESORT UINT64CONST(0xA000000000000002) +#define PARALLEL_KEY_TUPLESORT_SPOOL2 UINT64CONST(0xA000000000000003) +#define PARALLEL_KEY_QUERY_TEXT UINT64CONST(0xA000000000000004) + +/* + * DISABLE_LEADER_PARTICIPATION disables the leader's participation in + * parallel index builds. This may be useful as a debugging aid. +#undef DISABLE_LEADER_PARTICIPATION + */ + /* * Status record for spooling/sorting phase. (Note we may have two of * these due to the special requirements for uniqueness-checking with * dead tuples.) */ -struct BTSpool +typedef struct BTSpool { Tuplesortstate *sortstate; /* state data for tuplesort.c */ Relation heap; Relation index; bool isunique; -}; +} BTSpool; + +/* + * Status for index builds performed in parallel. This is allocated in a + * dynamic shared memory segment. Note that there is a separate tuplesort TOC + * entry, private to tuplesort.c but allocated by this module on its behalf. + */ +typedef struct BTShared +{ + /* + * These fields are not modified during the sort. They primarily exist + * for the benefit of worker processes that need to create BTSpool state + * corresponding to that used by the leader. + */ + Oid heaprelid; + Oid indexrelid; + bool isunique; + bool isconcurrent; + int scantuplesortstates; + + /* + * workersdonecv is used to monitor the progress of workers. All parallel + * participants must indicate that they are done before leader can use + * mutable state that workers maintain during scan (and before leader can + * proceed to tuplesort_performsort()). + */ + ConditionVariable workersdonecv; + + /* + * mutex protects all fields before heapdesc. + * + * These fields contain status information of interest to B-Tree index + * builds that must work just the same when an index is built in parallel. + */ + slock_t mutex; + + /* + * Mutable state that is maintained by workers, and reported back to + * leader at end of parallel scan. + * + * nparticipantsdone is number of worker processes finished. + * + * reltuples is the total number of input heap tuples. + * + * havedead indicates if RECENTLY_DEAD tuples were encountered during + * build. + * + * indtuples is the total number of tuples that made it into the index. + * + * brokenhotchain indicates if any worker detected a broken HOT chain + * during build. + */ + int nparticipantsdone; + double reltuples; + bool havedead; + double indtuples; + bool brokenhotchain; + + /* + * This variable-sized field must come last. + * + * See _bt_parallel_estimate_shared(). + */ + ParallelHeapScanDescData heapdesc; +} BTShared; + +/* + * Status for leader in parallel index build. + */ +typedef struct BTLeader +{ + /* parallel context itself */ + ParallelContext *pcxt; + + /* + * nparticipanttuplesorts is the exact number of worker processes + * successfully launched, plus one leader process if it participates as a + * worker (only DISABLE_LEADER_PARTICIPATION builds avoid leader + * participating as a worker). + */ + int nparticipanttuplesorts; + + /* + * Leader process convenience pointers to shared state (leader avoids TOC + * lookups). + * + * btshared is the shared state for entire build. sharedsort is the + * shared, tuplesort-managed state passed to each process tuplesort. + * sharedsort2 is the corresponding btspool2 shared state, used only when + * building unique indexes. snapshot is the snapshot used by the scan iff + * an MVCC snapshot is required. + */ + BTShared *btshared; + Sharedsort *sharedsort; + Sharedsort *sharedsort2; + Snapshot snapshot; +} BTLeader; + +/* + * Working state for btbuild and its callback. + * + * When parallel CREATE INDEX is used, there is a BTBuildState for each + * participant. + */ +typedef struct BTBuildState +{ + bool isunique; + bool havedead; + Relation heap; + BTSpool *spool; + + /* + * spool2 is needed only when the index is a unique index. Dead tuples are + * put into spool2 instead of spool in order to avoid uniqueness check. + */ + BTSpool *spool2; + double indtuples; + + /* + * btleader is only present when a parallel index build is performed, and + * only in the leader process. (Actually, only the leader has a + * BTBuildState. Workers have their own spool and spool2, though.) + */ + BTLeader *btleader; +} BTBuildState; /* * Status record for a btree page being built. We have one of these @@ -128,6 +260,14 @@ typedef struct BTWriteState } BTWriteState; +static double _bt_spools_heapscan(Relation heap, Relation index, + BTBuildState *buildstate, IndexInfo *indexInfo); +static void _bt_spooldestroy(BTSpool *btspool); +static void _bt_spool(BTSpool *btspool, ItemPointer self, + Datum *values, bool *isnull); +static void _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2); +static void _bt_build_callback(Relation index, HeapTuple htup, Datum *values, + bool *isnull, bool tupleIsAlive, void *state); static Page _bt_blnewpage(uint32 level); static BTPageState *_bt_pagestate(BTWriteState *wstate, uint32 level); static void _bt_slideleft(Page page); @@ -138,45 +278,219 @@ static void _bt_buildadd(BTWriteState *wstate, BTPageState *state, static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state); static void _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2); +static void _bt_begin_parallel(BTBuildState *buildstate, bool isconcurrent, + int request); +static void _bt_end_parallel(BTLeader *btleader); +static Size _bt_parallel_estimate_shared(Snapshot snapshot); +static double _bt_parallel_heapscan(BTBuildState *buildstate, + bool *brokenhotchain); +static void _bt_leader_participate_as_worker(BTBuildState *buildstate); +static void _bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2, + BTShared *btshared, Sharedsort *sharedsort, + Sharedsort *sharedsort2, int sortmem); /* - * Interface routines + * btbuild() -- build a new btree index. */ +IndexBuildResult * +btbuild(Relation heap, Relation index, IndexInfo *indexInfo) +{ + IndexBuildResult *result; + BTBuildState buildstate; + double reltuples; +#ifdef BTREE_BUILD_STATS + if (log_btree_build_stats) + ResetUsage(); +#endif /* BTREE_BUILD_STATS */ + + buildstate.isunique = indexInfo->ii_Unique; + buildstate.havedead = false; + buildstate.heap = heap; + buildstate.spool = NULL; + buildstate.spool2 = NULL; + buildstate.indtuples = 0; + buildstate.btleader = NULL; + + /* + * We expect to be called exactly once for any index relation. If that's + * not the case, big trouble's what we have. + */ + if (RelationGetNumberOfBlocks(index) != 0) + elog(ERROR, "index \"%s\" already contains data", + RelationGetRelationName(index)); + + reltuples = _bt_spools_heapscan(heap, index, &buildstate, indexInfo); + + /* + * Finish the build by (1) completing the sort of the spool file, (2) + * inserting the sorted tuples into btree pages and (3) building the upper + * levels. Finally, it may also be necessary to end use of parallelism. + */ + _bt_leafbuild(buildstate.spool, buildstate.spool2); + _bt_spooldestroy(buildstate.spool); + if (buildstate.spool2) + _bt_spooldestroy(buildstate.spool2); + if (buildstate.btleader) + _bt_end_parallel(buildstate.btleader); + + result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult)); + + result->heap_tuples = reltuples; + result->index_tuples = buildstate.indtuples; + +#ifdef BTREE_BUILD_STATS + if (log_btree_build_stats) + { + ShowUsage("BTREE BUILD STATS"); + ResetUsage(); + } +#endif /* BTREE_BUILD_STATS */ + + return result; +} /* - * create and initialize a spool structure + * Create and initialize one or two spool structures, and save them in caller's + * buildstate argument. May also fill-in fields within indexInfo used by index + * builds. + * + * Scans the heap, possibly in parallel, filling spools with IndexTuples. This + * routine encapsulates all aspects of managing parallelism. Caller need only + * call _bt_end_parallel() in parallel case after it is done with spool/spool2. + * + * Returns the total number of heap tuples scanned. */ -BTSpool * -_bt_spoolinit(Relation heap, Relation index, bool isunique, bool isdead) +static double +_bt_spools_heapscan(Relation heap, Relation index, BTBuildState *buildstate, + IndexInfo *indexInfo) { BTSpool *btspool = (BTSpool *) palloc0(sizeof(BTSpool)); - int btKbytes; + SortCoordinate coordinate = NULL; + double reltuples = 0; + /* + * We size the sort area as maintenance_work_mem rather than work_mem to + * speed index creation. This should be OK since a single backend can't + * run multiple index creations in parallel (see also: notes on + * parallelism and maintenance_work_mem below). + */ btspool->heap = heap; btspool->index = index; - btspool->isunique = isunique; + btspool->isunique = indexInfo->ii_Unique; + + /* Save as primary spool */ + buildstate->spool = btspool; + + /* Attempt to launch parallel worker scan when required */ + if (indexInfo->ii_ParallelWorkers > 0) + _bt_begin_parallel(buildstate, indexInfo->ii_Concurrent, + indexInfo->ii_ParallelWorkers); /* - * We size the sort area as maintenance_work_mem rather than work_mem to - * speed index creation. This should be OK since a single backend can't - * run multiple index creations in parallel. Note that creation of a - * unique index actually requires two BTSpool objects. We expect that the - * second one (for dead tuples) won't get very full, so we give it only - * work_mem. + * If parallel build requested and at least one worker process was + * successfully launched, set up coordination state */ - btKbytes = isdead ? work_mem : maintenance_work_mem; - btspool->sortstate = tuplesort_begin_index_btree(heap, index, isunique, - btKbytes, false); + if (buildstate->btleader) + { + coordinate = (SortCoordinate) palloc0(sizeof(SortCoordinateData)); + coordinate->isWorker = false; + coordinate->nParticipants = + buildstate->btleader->nparticipanttuplesorts; + coordinate->sharedsort = buildstate->btleader->sharedsort; + } + + /* + * Begin serial/leader tuplesort. + * + * In cases where parallelism is involved, the leader receives the same + * share of maintenance_work_mem as a serial sort (it is generally treated + * in the same way as a serial sort once we return). Parallel worker + * Tuplesortstates will have received only a fraction of + * maintenance_work_mem, though. + * + * We rely on the lifetime of the Leader Tuplesortstate almost not + * overlapping with any worker Tuplesortstate's lifetime. There may be + * some small overlap, but that's okay because we rely on leader + * Tuplesortstate only allocating a small, fixed amount of memory here. + * When its tuplesort_performsort() is called (by our caller), and + * significant amounts of memory are likely to be used, all workers must + * have already freed almost all memory held by their Tuplesortstates + * (they are about to go away completely, too). The overall effect is + * that maintenance_work_mem always represents an absolute high watermark + * on the amount of memory used by a CREATE INDEX operation, regardless of + * the use of parallelism or any other factor. + */ + buildstate->spool->sortstate = + tuplesort_begin_index_btree(heap, index, buildstate->isunique, + maintenance_work_mem, coordinate, + false); + + /* + * If building a unique index, put dead tuples in a second spool to keep + * them out of the uniqueness check. We expect that the second spool (for + * dead tuples) won't get very full, so we give it only work_mem. + */ + if (indexInfo->ii_Unique) + { + BTSpool *btspool2 = (BTSpool *) palloc0(sizeof(BTSpool)); + SortCoordinate coordinate2 = NULL; + + /* Initialize secondary spool */ + btspool2->heap = heap; + btspool2->index = index; + btspool2->isunique = false; + /* Save as secondary spool */ + buildstate->spool2 = btspool2; + + if (buildstate->btleader) + { + /* + * Set up non-private state that is passed to + * tuplesort_begin_index_btree() about the basic high level + * coordination of a parallel sort. + */ + coordinate2 = (SortCoordinate) palloc0(sizeof(SortCoordinateData)); + coordinate2->isWorker = false; + coordinate2->nParticipants = + buildstate->btleader->nparticipanttuplesorts; + coordinate2->sharedsort = buildstate->btleader->sharedsort2; + } + + /* + * We expect that the second one (for dead tuples) won't get very + * full, so we give it only work_mem + */ + buildstate->spool2->sortstate = + tuplesort_begin_index_btree(heap, index, false, work_mem, + coordinate2, false); + } + + /* Fill spool using either serial or parallel heap scan */ + if (!buildstate->btleader) + reltuples = IndexBuildHeapScan(heap, index, indexInfo, true, + _bt_build_callback, (void *) buildstate, + NULL); + else + reltuples = _bt_parallel_heapscan(buildstate, + &indexInfo->ii_BrokenHotChain); + + /* okay, all heap tuples are spooled */ + if (buildstate->spool2 && !buildstate->havedead) + { + /* spool2 turns out to be unnecessary */ + _bt_spooldestroy(buildstate->spool2); + buildstate->spool2 = NULL; + } - return btspool; + return reltuples; } /* * clean up a spool structure and its substructures. */ -void +static void _bt_spooldestroy(BTSpool *btspool) { tuplesort_end(btspool->sortstate); @@ -186,7 +500,7 @@ _bt_spooldestroy(BTSpool *btspool) /* * spool an index entry into the sort file. */ -void +static void _bt_spool(BTSpool *btspool, ItemPointer self, Datum *values, bool *isnull) { tuplesort_putindextuplevalues(btspool->sortstate, btspool->index, @@ -197,7 +511,7 @@ _bt_spool(BTSpool *btspool, ItemPointer self, Datum *values, bool *isnull) * given a spool loaded by successive calls to _bt_spool, * create an entire btree. */ -void +static void _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2) { BTWriteState wstate; @@ -231,11 +545,34 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2) _bt_load(&wstate, btspool, btspool2); } - /* - * Internal routines. + * Per-tuple callback from IndexBuildHeapScan */ +static void +_bt_build_callback(Relation index, + HeapTuple htup, + Datum *values, + bool *isnull, + bool tupleIsAlive, + void *state) +{ + BTBuildState *buildstate = (BTBuildState *) state; + /* + * insert the index tuple into the appropriate spool file for subsequent + * processing + */ + if (tupleIsAlive || buildstate->spool2 == NULL) + _bt_spool(buildstate->spool, &htup->t_self, values, isnull); + else + { + /* dead tuples are put into spool2 */ + buildstate->havedead = true; + _bt_spool(buildstate->spool2, &htup->t_self, values, isnull); + } + + buildstate->indtuples += 1; +} /* * allocate workspace for a new, clean btree page, not linked to any siblings. @@ -406,6 +743,7 @@ _bt_sortaddtup(Page page, { trunctuple = *itup; trunctuple.t_info = sizeof(IndexTupleData); + BTreeTupleSetNAtts(&trunctuple, 0); itup = &trunctuple; itemsize = sizeof(IndexTupleData); } @@ -443,7 +781,9 @@ _bt_sortaddtup(Page page, * placeholder for the pointer to the "high key" item; when we have * filled up the page, we will set linp0 to point to itemN and clear * linpN. On the other hand, if we find this is the last (rightmost) - * page, we leave the items alone and slide the linp array over. + * page, we leave the items alone and slide the linp array over. If + * the high key is to be truncated, offset 1 is deleted, and we insert + * the truncated high key at offset 1. * * 'last' pointer indicates the last offset added to the page. *---------- @@ -456,6 +796,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) OffsetNumber last_off; Size pgspc; Size itupsz; + int indnatts = IndexRelationGetNumberOfAttributes(wstate->index); + int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(wstate->index); /* * This is a handy place to check for cancel interrupts during the btree @@ -468,7 +810,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) last_off = state->btps_lastoff; pgspc = PageGetFreeSpace(npage); - itupsz = IndexTupleDSize(*itup); + itupsz = IndexTupleSize(itup); itupsz = MAXALIGN(itupsz); /* @@ -510,6 +852,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) ItemId ii; ItemId hii; IndexTuple oitup; + BTPageOpaque opageop = (BTPageOpaque) PageGetSpecialPointer(opage); /* Create new page of same level */ npage = _bt_blnewpage(state->btps_level); @@ -537,6 +880,42 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) ItemIdSetUnused(ii); /* redundant */ ((PageHeader) opage)->pd_lower -= sizeof(ItemIdData); + if (indnkeyatts != indnatts && P_ISLEAF(opageop)) + { + IndexTuple truncated; + Size truncsz; + + /* + * Truncate any non-key attributes from high key on leaf level + * (i.e. truncate on leaf level if we're building an INCLUDE + * index). This is only done at the leaf level because downlinks + * in internal pages are either negative infinity items, or get + * their contents from copying from one level down. See also: + * _bt_split(). + * + * Since the truncated tuple is probably smaller than the + * original, it cannot just be copied in place (besides, we want + * to actually save space on the leaf page). We delete the + * original high key, and add our own truncated high key at the + * same offset. + * + * Note that the page layout won't be changed very much. oitup is + * already located at the physical beginning of tuple space, so we + * only shift the line pointer array back and forth, and overwrite + * the latter portion of the space occupied by the original tuple. + * This is fairly cheap. + */ + truncated = _bt_nonkey_truncate(wstate->index, oitup); + truncsz = IndexTupleSize(truncated); + PageIndexTupleDelete(opage, P_HIKEY); + _bt_sortaddtup(opage, truncsz, truncated, P_HIKEY); + pfree(truncated); + + /* oitup should continue to point to the page's high key */ + hii = PageGetItemId(opage, P_HIKEY); + oitup = (IndexTuple) PageGetItem(opage, hii); + } + /* * Link the old page into its parent, using its minimum key. If we * don't have a parent, we have to create one; this adds a new btree @@ -545,8 +924,12 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) if (state->btps_next == NULL) state->btps_next = _bt_pagestate(wstate, state->btps_level + 1); - Assert(state->btps_minkey != NULL); - ItemPointerSet(&(state->btps_minkey->t_tid), oblkno, P_HIKEY); + Assert(BTreeTupleGetNAtts(state->btps_minkey, wstate->index) == + IndexRelationGetNumberOfKeyAttributes(wstate->index) || + P_LEFTMOST(opageop)); + Assert(BTreeTupleGetNAtts(state->btps_minkey, wstate->index) == 0 || + !P_LEFTMOST(opageop)); + BTreeInnerTupleSetDownLink(state->btps_minkey, oblkno); _bt_buildadd(wstate, state->btps_next, state->btps_minkey); pfree(state->btps_minkey); @@ -585,12 +968,16 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) * If the new item is the first for its page, stash a copy for later. Note * this will only happen for the first item on a level; on later pages, * the first item for a page is copied from the prior page in the code - * above. + * above. Since the minimum key for an entire level is only used as a + * minus infinity downlink, and never as a high key, there is no need to + * truncate away non-key attributes at this point. */ if (last_off == P_HIKEY) { Assert(state->btps_minkey == NULL); state->btps_minkey = CopyIndexTuple(itup); + /* _bt_sortaddtup() will perform full truncation later */ + BTreeTupleSetNAtts(state->btps_minkey, 0); } /* @@ -642,8 +1029,12 @@ _bt_uppershutdown(BTWriteState *wstate, BTPageState *state) } else { - Assert(s->btps_minkey != NULL); - ItemPointerSet(&(s->btps_minkey->t_tid), blkno, P_HIKEY); + Assert(BTreeTupleGetNAtts(s->btps_minkey, wstate->index) == + IndexRelationGetNumberOfKeyAttributes(wstate->index) || + P_LEFTMOST(opaque)); + Assert(BTreeTupleGetNAtts(s->btps_minkey, wstate->index) == 0 || + !P_LEFTMOST(opaque)); + BTreeInnerTupleSetDownLink(s->btps_minkey, blkno); _bt_buildadd(wstate, s->btps_next, s->btps_minkey); pfree(s->btps_minkey); s->btps_minkey = NULL; @@ -683,7 +1074,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) bool load1; TupleDesc tupdes = RelationGetDescr(wstate->index); int i, - keysz = RelationGetNumberOfAttributes(wstate->index); + keysz = IndexRelationGetNumberOfKeyAttributes(wstate->index); ScanKey indexScanKey = NULL; SortSupport sortKeys; @@ -819,3 +1210,507 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) smgrimmedsync(wstate->index->rd_smgr, MAIN_FORKNUM); } } + +/* + * Create parallel context, and launch workers for leader. + * + * buildstate argument should be initialized (with the exception of the + * tuplesort state in spools, which may later be created based on shared + * state initially set up here). + * + * isconcurrent indicates if operation is CREATE INDEX CONCURRENTLY. + * + * request is the target number of parallel worker processes to launch. + * + * Sets buildstate's BTLeader, which caller must use to shut down parallel + * mode by passing it to _bt_end_parallel() at the very end of its index + * build. If not even a single worker process can be launched, this is + * never set, and caller should proceed with a serial index build. + */ +static void +_bt_begin_parallel(BTBuildState *buildstate, bool isconcurrent, int request) +{ + ParallelContext *pcxt; + int scantuplesortstates; + Snapshot snapshot; + Size estbtshared; + Size estsort; + BTShared *btshared; + Sharedsort *sharedsort; + Sharedsort *sharedsort2; + BTSpool *btspool = buildstate->spool; + BTLeader *btleader = (BTLeader *) palloc0(sizeof(BTLeader)); + bool leaderparticipates = true; + char *sharedquery; + int querylen; + +#ifdef DISABLE_LEADER_PARTICIPATION + leaderparticipates = false; +#endif + + /* + * Enter parallel mode, and create context for parallel build of btree + * index + */ + EnterParallelMode(); + Assert(request > 0); + pcxt = CreateParallelContext("postgres", "_bt_parallel_build_main", + request, true); + scantuplesortstates = leaderparticipates ? request + 1 : request; + + /* + * Prepare for scan of the base relation. In a normal index build, we use + * SnapshotAny because we must retrieve all tuples and do our own time + * qual checks (because we have to index RECENTLY_DEAD tuples). In a + * concurrent build, we take a regular MVCC snapshot and index whatever's + * live according to that. + */ + if (!isconcurrent) + snapshot = SnapshotAny; + else + snapshot = RegisterSnapshot(GetTransactionSnapshot()); + + /* + * Estimate size for our own PARALLEL_KEY_BTREE_SHARED workspace, and + * PARALLEL_KEY_TUPLESORT tuplesort workspace + */ + estbtshared = _bt_parallel_estimate_shared(snapshot); + shm_toc_estimate_chunk(&pcxt->estimator, estbtshared); + estsort = tuplesort_estimate_shared(scantuplesortstates); + shm_toc_estimate_chunk(&pcxt->estimator, estsort); + + /* + * Unique case requires a second spool, and so we may have to account for + * another shared workspace for that -- PARALLEL_KEY_TUPLESORT_SPOOL2 + */ + if (!btspool->isunique) + shm_toc_estimate_keys(&pcxt->estimator, 2); + else + { + shm_toc_estimate_chunk(&pcxt->estimator, estsort); + shm_toc_estimate_keys(&pcxt->estimator, 3); + } + + /* Finally, estimate PARALLEL_KEY_QUERY_TEXT space */ + querylen = strlen(debug_query_string); + shm_toc_estimate_chunk(&pcxt->estimator, querylen + 1); + shm_toc_estimate_keys(&pcxt->estimator, 1); + + /* Everyone's had a chance to ask for space, so now create the DSM */ + InitializeParallelDSM(pcxt); + + /* Store shared build state, for which we reserved space */ + btshared = (BTShared *) shm_toc_allocate(pcxt->toc, estbtshared); + /* Initialize immutable state */ + btshared->heaprelid = RelationGetRelid(btspool->heap); + btshared->indexrelid = RelationGetRelid(btspool->index); + btshared->isunique = btspool->isunique; + btshared->isconcurrent = isconcurrent; + btshared->scantuplesortstates = scantuplesortstates; + ConditionVariableInit(&btshared->workersdonecv); + SpinLockInit(&btshared->mutex); + /* Initialize mutable state */ + btshared->nparticipantsdone = 0; + btshared->reltuples = 0.0; + btshared->havedead = false; + btshared->indtuples = 0.0; + btshared->brokenhotchain = false; + heap_parallelscan_initialize(&btshared->heapdesc, btspool->heap, snapshot); + + /* + * Store shared tuplesort-private state, for which we reserved space. + * Then, initialize opaque state using tuplesort routine. + */ + sharedsort = (Sharedsort *) shm_toc_allocate(pcxt->toc, estsort); + tuplesort_initialize_shared(sharedsort, scantuplesortstates, + pcxt->seg); + + shm_toc_insert(pcxt->toc, PARALLEL_KEY_BTREE_SHARED, btshared); + shm_toc_insert(pcxt->toc, PARALLEL_KEY_TUPLESORT, sharedsort); + + /* Unique case requires a second spool, and associated shared state */ + if (!btspool->isunique) + sharedsort2 = NULL; + else + { + /* + * Store additional shared tuplesort-private state, for which we + * reserved space. Then, initialize opaque state using tuplesort + * routine. + */ + sharedsort2 = (Sharedsort *) shm_toc_allocate(pcxt->toc, estsort); + tuplesort_initialize_shared(sharedsort2, scantuplesortstates, + pcxt->seg); + + shm_toc_insert(pcxt->toc, PARALLEL_KEY_TUPLESORT_SPOOL2, sharedsort2); + } + + /* Store query string for workers */ + sharedquery = (char *) shm_toc_allocate(pcxt->toc, querylen + 1); + memcpy(sharedquery, debug_query_string, querylen + 1); + shm_toc_insert(pcxt->toc, PARALLEL_KEY_QUERY_TEXT, sharedquery); + + /* Launch workers, saving status for leader/caller */ + LaunchParallelWorkers(pcxt); + btleader->pcxt = pcxt; + btleader->nparticipanttuplesorts = pcxt->nworkers_launched; + if (leaderparticipates) + btleader->nparticipanttuplesorts++; + btleader->btshared = btshared; + btleader->sharedsort = sharedsort; + btleader->sharedsort2 = sharedsort2; + btleader->snapshot = snapshot; + + /* If no workers were successfully launched, back out (do serial build) */ + if (pcxt->nworkers_launched == 0) + { + _bt_end_parallel(btleader); + return; + } + + /* Save leader state now that it's clear build will be parallel */ + buildstate->btleader = btleader; + + /* Join heap scan ourselves */ + if (leaderparticipates) + _bt_leader_participate_as_worker(buildstate); + + /* + * Caller needs to wait for all launched workers when we return. Make + * sure that the failure-to-start case will not hang forever. + */ + WaitForParallelWorkersToAttach(pcxt); +} + +/* + * Shut down workers, destroy parallel context, and end parallel mode. + */ +static void +_bt_end_parallel(BTLeader *btleader) +{ + /* Shutdown worker processes */ + WaitForParallelWorkersToFinish(btleader->pcxt); + /* Free last reference to MVCC snapshot, if one was used */ + if (IsMVCCSnapshot(btleader->snapshot)) + UnregisterSnapshot(btleader->snapshot); + DestroyParallelContext(btleader->pcxt); + ExitParallelMode(); +} + +/* + * Returns size of shared memory required to store state for a parallel + * btree index build based on the snapshot its parallel scan will use. + */ +static Size +_bt_parallel_estimate_shared(Snapshot snapshot) +{ + if (!IsMVCCSnapshot(snapshot)) + { + Assert(snapshot == SnapshotAny); + return sizeof(BTShared); + } + + return add_size(offsetof(BTShared, heapdesc) + + offsetof(ParallelHeapScanDescData, phs_snapshot_data), + EstimateSnapshotSpace(snapshot)); +} + +/* + * Within leader, wait for end of heap scan. + * + * When called, parallel heap scan started by _bt_begin_parallel() will + * already be underway within worker processes (when leader participates + * as a worker, we should end up here just as workers are finishing). + * + * Fills in fields needed for ambuild statistics, and lets caller set + * field indicating that some worker encountered a broken HOT chain. + * + * Returns the total number of heap tuples scanned. + */ +static double +_bt_parallel_heapscan(BTBuildState *buildstate, bool *brokenhotchain) +{ + BTShared *btshared = buildstate->btleader->btshared; + int nparticipanttuplesorts; + double reltuples; + + nparticipanttuplesorts = buildstate->btleader->nparticipanttuplesorts; + for (;;) + { + SpinLockAcquire(&btshared->mutex); + if (btshared->nparticipantsdone == nparticipanttuplesorts) + { + buildstate->havedead = btshared->havedead; + buildstate->indtuples = btshared->indtuples; + *brokenhotchain = btshared->brokenhotchain; + reltuples = btshared->reltuples; + SpinLockRelease(&btshared->mutex); + break; + } + SpinLockRelease(&btshared->mutex); + + ConditionVariableSleep(&btshared->workersdonecv, + WAIT_EVENT_PARALLEL_CREATE_INDEX_SCAN); + } + + ConditionVariableCancelSleep(); + + return reltuples; +} + +/* + * Within leader, participate as a parallel worker. + */ +static void +_bt_leader_participate_as_worker(BTBuildState *buildstate) +{ + BTLeader *btleader = buildstate->btleader; + BTSpool *leaderworker; + BTSpool *leaderworker2; + int sortmem; + + /* Allocate memory and initialize private spool */ + leaderworker = (BTSpool *) palloc0(sizeof(BTSpool)); + leaderworker->heap = buildstate->spool->heap; + leaderworker->index = buildstate->spool->index; + leaderworker->isunique = buildstate->spool->isunique; + + /* Initialize second spool, if required */ + if (!btleader->btshared->isunique) + leaderworker2 = NULL; + else + { + /* Allocate memory for worker's own private secondary spool */ + leaderworker2 = (BTSpool *) palloc0(sizeof(BTSpool)); + + /* Initialize worker's own secondary spool */ + leaderworker2->heap = leaderworker->heap; + leaderworker2->index = leaderworker->index; + leaderworker2->isunique = false; + } + + /* + * Might as well use reliable figure when doling out maintenance_work_mem + * (when requested number of workers were not launched, this will be + * somewhat higher than it is for other workers). + */ + sortmem = maintenance_work_mem / btleader->nparticipanttuplesorts; + + /* Perform work common to all participants */ + _bt_parallel_scan_and_sort(leaderworker, leaderworker2, btleader->btshared, + btleader->sharedsort, btleader->sharedsort2, + sortmem); + +#ifdef BTREE_BUILD_STATS + if (log_btree_build_stats) + { + ShowUsage("BTREE BUILD (Leader Partial Spool) STATISTICS"); + ResetUsage(); + } +#endif /* BTREE_BUILD_STATS */ +} + +/* + * Perform work within a launched parallel process. + */ +void +_bt_parallel_build_main(dsm_segment *seg, shm_toc *toc) +{ + char *sharedquery; + BTSpool *btspool; + BTSpool *btspool2; + BTShared *btshared; + Sharedsort *sharedsort; + Sharedsort *sharedsort2; + Relation heapRel; + Relation indexRel; + LOCKMODE heapLockmode; + LOCKMODE indexLockmode; + int sortmem; + +#ifdef BTREE_BUILD_STATS + if (log_btree_build_stats) + ResetUsage(); +#endif /* BTREE_BUILD_STATS */ + + /* Set debug_query_string for individual workers first */ + sharedquery = shm_toc_lookup(toc, PARALLEL_KEY_QUERY_TEXT, false); + debug_query_string = sharedquery; + + /* Report the query string from leader */ + pgstat_report_activity(STATE_RUNNING, debug_query_string); + + /* Look up nbtree shared state */ + btshared = shm_toc_lookup(toc, PARALLEL_KEY_BTREE_SHARED, false); + + /* Open relations using lock modes known to be obtained by index.c */ + if (!btshared->isconcurrent) + { + heapLockmode = ShareLock; + indexLockmode = AccessExclusiveLock; + } + else + { + heapLockmode = ShareUpdateExclusiveLock; + indexLockmode = RowExclusiveLock; + } + + /* Open relations within worker */ + heapRel = heap_open(btshared->heaprelid, heapLockmode); + indexRel = index_open(btshared->indexrelid, indexLockmode); + + /* Initialize worker's own spool */ + btspool = (BTSpool *) palloc0(sizeof(BTSpool)); + btspool->heap = heapRel; + btspool->index = indexRel; + btspool->isunique = btshared->isunique; + + /* Look up shared state private to tuplesort.c */ + sharedsort = shm_toc_lookup(toc, PARALLEL_KEY_TUPLESORT, false); + tuplesort_attach_shared(sharedsort, seg); + if (!btshared->isunique) + { + btspool2 = NULL; + sharedsort2 = NULL; + } + else + { + /* Allocate memory for worker's own private secondary spool */ + btspool2 = (BTSpool *) palloc0(sizeof(BTSpool)); + + /* Initialize worker's own secondary spool */ + btspool2->heap = btspool->heap; + btspool2->index = btspool->index; + btspool2->isunique = false; + /* Look up shared state private to tuplesort.c */ + sharedsort2 = shm_toc_lookup(toc, PARALLEL_KEY_TUPLESORT_SPOOL2, false); + tuplesort_attach_shared(sharedsort2, seg); + } + + /* Perform sorting of spool, and possibly a spool2 */ + sortmem = maintenance_work_mem / btshared->scantuplesortstates; + _bt_parallel_scan_and_sort(btspool, btspool2, btshared, sharedsort, + sharedsort2, sortmem); + +#ifdef BTREE_BUILD_STATS + if (log_btree_build_stats) + { + ShowUsage("BTREE BUILD (Worker Partial Spool) STATISTICS"); + ResetUsage(); + } +#endif /* BTREE_BUILD_STATS */ + + index_close(indexRel, indexLockmode); + heap_close(heapRel, heapLockmode); +} + +/* + * Perform a worker's portion of a parallel sort. + * + * This generates a tuplesort for passed btspool, and a second tuplesort + * state if a second btspool is need (i.e. for unique index builds). All + * other spool fields should already be set when this is called. + * + * sortmem is the amount of working memory to use within each worker, + * expressed in KBs. + * + * When this returns, workers are done, and need only release resources. + */ +static void +_bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2, + BTShared *btshared, Sharedsort *sharedsort, + Sharedsort *sharedsort2, int sortmem) +{ + SortCoordinate coordinate; + BTBuildState buildstate; + HeapScanDesc scan; + double reltuples; + IndexInfo *indexInfo; + + /* Initialize local tuplesort coordination state */ + coordinate = palloc0(sizeof(SortCoordinateData)); + coordinate->isWorker = true; + coordinate->nParticipants = -1; + coordinate->sharedsort = sharedsort; + + /* Begin "partial" tuplesort */ + btspool->sortstate = tuplesort_begin_index_btree(btspool->heap, + btspool->index, + btspool->isunique, + sortmem, coordinate, + false); + + /* + * Just as with serial case, there may be a second spool. If so, a + * second, dedicated spool2 partial tuplesort is required. + */ + if (btspool2) + { + SortCoordinate coordinate2; + + /* + * We expect that the second one (for dead tuples) won't get very + * full, so we give it only work_mem (unless sortmem is less for + * worker). Worker processes are generally permitted to allocate + * work_mem independently. + */ + coordinate2 = palloc0(sizeof(SortCoordinateData)); + coordinate2->isWorker = true; + coordinate2->nParticipants = -1; + coordinate2->sharedsort = sharedsort2; + btspool2->sortstate = + tuplesort_begin_index_btree(btspool->heap, btspool->index, false, + Min(sortmem, work_mem), coordinate2, + false); + } + + /* Fill in buildstate for _bt_build_callback() */ + buildstate.isunique = btshared->isunique; + buildstate.havedead = false; + buildstate.heap = btspool->heap; + buildstate.spool = btspool; + buildstate.spool2 = btspool2; + buildstate.indtuples = 0; + buildstate.btleader = NULL; + + /* Join parallel scan */ + indexInfo = BuildIndexInfo(btspool->index); + indexInfo->ii_Concurrent = btshared->isconcurrent; + scan = heap_beginscan_parallel(btspool->heap, &btshared->heapdesc); + reltuples = IndexBuildHeapScan(btspool->heap, btspool->index, indexInfo, + true, _bt_build_callback, + (void *) &buildstate, scan); + + /* + * Execute this worker's part of the sort. + * + * Unlike leader and serial cases, we cannot avoid calling + * tuplesort_performsort() for spool2 if it ends up containing no dead + * tuples (this is disallowed for workers by tuplesort). + */ + tuplesort_performsort(btspool->sortstate); + if (btspool2) + tuplesort_performsort(btspool2->sortstate); + + /* + * Done. Record ambuild statistics, and whether we encountered a broken + * HOT chain. + */ + SpinLockAcquire(&btshared->mutex); + btshared->nparticipantsdone++; + btshared->reltuples += reltuples; + if (buildstate.havedead) + btshared->havedead = true; + btshared->indtuples += buildstate.indtuples; + if (indexInfo->ii_BrokenHotChain) + btshared->brokenhotchain = true; + SpinLockRelease(&btshared->mutex); + + /* Notify leader */ + ConditionVariableSignal(&btshared->workersdonecv); + + /* We can end tuplesorts immediately */ + tuplesort_end(btspool->sortstate); + if (btspool2) + tuplesort_end(btspool2->sortstate); +} diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index dbfb775dec..205457ef99 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -3,7 +3,7 @@ * nbtutils.c * Utility code for Postgres btree implementation. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -63,17 +63,28 @@ _bt_mkscankey(Relation rel, IndexTuple itup) { ScanKey skey; TupleDesc itupdesc; - int natts; + int indnatts PG_USED_FOR_ASSERTS_ONLY; + int indnkeyatts; int16 *indoption; int i; itupdesc = RelationGetDescr(rel); - natts = RelationGetNumberOfAttributes(rel); + indnatts = IndexRelationGetNumberOfAttributes(rel); + indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); indoption = rel->rd_indoption; - skey = (ScanKey) palloc(natts * sizeof(ScanKeyData)); + Assert(indnkeyatts > 0); + Assert(indnkeyatts <= indnatts); + Assert(BTreeTupleGetNAtts(itup, rel) == indnatts || + BTreeTupleGetNAtts(itup, rel) == indnkeyatts); - for (i = 0; i < natts; i++) + /* + * We'll execute search using scan key constructed on key columns. Non-key + * (INCLUDE index) columns are always omitted from scan keys. + */ + skey = (ScanKey) palloc(indnkeyatts * sizeof(ScanKeyData)); + + for (i = 0; i < indnkeyatts; i++) { FmgrInfo *procinfo; Datum arg; @@ -115,16 +126,16 @@ ScanKey _bt_mkscankey_nodata(Relation rel) { ScanKey skey; - int natts; + int indnkeyatts; int16 *indoption; int i; - natts = RelationGetNumberOfAttributes(rel); + indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); indoption = rel->rd_indoption; - skey = (ScanKey) palloc(natts * sizeof(ScanKeyData)); + skey = (ScanKey) palloc(indnkeyatts * sizeof(ScanKeyData)); - for (i = 0; i < natts; i++) + for (i = 0; i < indnkeyatts; i++) { FmgrInfo *procinfo; int flags; @@ -507,7 +518,7 @@ _bt_compare_array_elements(const void *a, const void *b, void *arg) cxt->collation, da, db)); if (cxt->reverse) - compare = -compare; + INVERT_COMPARE_RESULT(compare); return compare; } @@ -540,8 +551,8 @@ _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir) /* * _bt_advance_array_keys() -- Advance to next set of array elements * - * Returns TRUE if there is another set of values to consider, FALSE if not. - * On TRUE result, the scankeys are initialized with the next set of values. + * Returns true if there is another set of values to consider, false if not. + * On true result, the scankeys are initialized with the next set of values. */ bool _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir) @@ -724,7 +735,7 @@ _bt_restore_array_keys(IndexScanDesc scan) * for a forward scan; or after the last match for a backward scan.) * * As a byproduct of this work, we can detect contradictory quals such - * as "x = 1 AND x > 2". If we see that, we return so->qual_ok = FALSE, + * as "x = 1 AND x > 2". If we see that, we return so->qual_ok = false, * indicating the scan need not be run at all since no tuples can match. * (In this case we do not bother completing the output key array!) * Again, missing cross-type operators might cause us to fail to prove the @@ -1020,7 +1031,7 @@ _bt_preprocess_keys(IndexScanDesc scan) * * If the opfamily doesn't supply a complete set of cross-type operators we * may not be able to make the comparison. If we can make the comparison - * we store the operator result in *result and return TRUE. We return FALSE + * we store the operator result in *result and return true. We return false * if the comparison could not be made. * * Note: op always points at the same ScanKey as either leftarg or rightarg. @@ -1185,8 +1196,8 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op, * * Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a * NULL comparison value. Since all btree operators are assumed strict, - * a NULL means that the qual cannot be satisfied. We return TRUE if the - * comparison value isn't NULL, or FALSE if the scan should be abandoned. + * a NULL means that the qual cannot be satisfied. We return true if the + * comparison value isn't NULL, or false if the scan should be abandoned. * * This function is applied to the *input* scankey structure; therefore * on a rescan we will be looking at already-processed scankeys. Hence @@ -1416,6 +1427,7 @@ _bt_checkkeys(IndexScanDesc scan, bool isNull; Datum test; + Assert(key->sk_attno <= BTreeTupleGetNAtts(tuple, scan->indexRelation)); /* row-comparison keys need special processing */ if (key->sk_flags & SK_ROW_HEADER) { @@ -1640,7 +1652,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, subkey->sk_argument)); if (subkey->sk_flags & SK_BT_DESC) - cmpresult = -cmpresult; + INVERT_COMPARE_RESULT(cmpresult); /* Done comparing if unequal, else advance to next column */ if (cmpresult != 0) @@ -1772,7 +1784,7 @@ _bt_killitems(IndexScanDesc scan) return; page = BufferGetPage(buf); - if (PageGetLSN(page) == so->currPos.lsn) + if (BufferGetLSNAtomic(buf) == so->currPos.lsn) so->currPos.buf = buf; else { @@ -2069,3 +2081,136 @@ btproperty(Oid index_oid, int attno, return false; /* punt to generic code */ } } + +/* + * _bt_nonkey_truncate() -- create tuple without non-key suffix attributes. + * + * Returns truncated index tuple allocated in caller's memory context, with key + * attributes copied from caller's itup argument. Currently, suffix truncation + * is only performed to create pivot tuples in INCLUDE indexes, but some day it + * could be generalized to remove suffix attributes after the first + * distinguishing key attribute. + * + * Truncated tuple is guaranteed to be no larger than the original, which is + * important for staying under the 1/3 of a page restriction on tuple size. + * + * Note that returned tuple's t_tid offset will hold the number of attributes + * present, so the original item pointer offset is not represented. Caller + * should only change truncated tuple's downlink. + */ +IndexTuple +_bt_nonkey_truncate(Relation rel, IndexTuple itup) +{ + int nkeyattrs = IndexRelationGetNumberOfKeyAttributes(rel); + IndexTuple truncated; + + /* + * We should only ever truncate leaf index tuples, which must have both + * key and non-key attributes. It's never okay to truncate a second time. + */ + Assert(BTreeTupleGetNAtts(itup, rel) == + IndexRelationGetNumberOfAttributes(rel)); + + truncated = index_truncate_tuple(RelationGetDescr(rel), itup, nkeyattrs); + BTreeTupleSetNAtts(truncated, nkeyattrs); + + return truncated; +} + +/* + * _bt_check_natts() -- Verify tuple has expected number of attributes. + * + * Returns value indicating if the expected number of attributes were found + * for a particular offset on page. This can be used as a general purpose + * sanity check. + * + * Testing a tuple directly with BTreeTupleGetNAtts() should generally be + * preferred to calling here. That's usually more convenient, and is always + * more explicit. Call here instead when offnum's tuple may be a negative + * infinity tuple that uses the pre-v11 on-disk representation, or when a low + * context check is appropriate. + */ +bool +_bt_check_natts(Relation rel, Page page, OffsetNumber offnum) +{ + int16 natts = IndexRelationGetNumberOfAttributes(rel); + int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); + BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page); + IndexTuple itup; + + /* + * We cannot reliably test a deleted or half-deleted page, since they have + * dummy high keys + */ + if (P_IGNORE(opaque)) + return true; + + Assert(offnum >= FirstOffsetNumber && + offnum <= PageGetMaxOffsetNumber(page)); + + /* + * Mask allocated for number of keys in index tuple must be able to fit + * maximum possible number of index attributes + */ + StaticAssertStmt(BT_N_KEYS_OFFSET_MASK >= INDEX_MAX_KEYS, + "BT_N_KEYS_OFFSET_MASK can't fit INDEX_MAX_KEYS"); + + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); + + if (P_ISLEAF(opaque)) + { + if (offnum >= P_FIRSTDATAKEY(opaque)) + { + /* + * Leaf tuples that are not the page high key (non-pivot tuples) + * should never be truncated + */ + return BTreeTupleGetNAtts(itup, rel) == natts; + } + else + { + /* + * Rightmost page doesn't contain a page high key, so tuple was + * checked above as ordinary leaf tuple + */ + Assert(!P_RIGHTMOST(opaque)); + + /* Page high key tuple contains only key attributes */ + return BTreeTupleGetNAtts(itup, rel) == nkeyatts; + } + } + else /* !P_ISLEAF(opaque) */ + { + if (offnum == P_FIRSTDATAKEY(opaque)) + { + /* + * The first tuple on any internal page (possibly the first after + * its high key) is its negative infinity tuple. Negative + * infinity tuples are always truncated to zero attributes. They + * are a particular kind of pivot tuple. + * + * The number of attributes won't be explicitly represented if the + * negative infinity tuple was generated during a page split that + * occurred with a version of Postgres before v11. There must be + * a problem when there is an explicit representation that is + * non-zero, or when there is no explicit representation and the + * tuple is evidently not a pre-pg_upgrade tuple. + * + * Prior to v11, downlinks always had P_HIKEY as their offset. Use + * that to decide if the tuple is a pre-v11 tuple. + */ + return BTreeTupleGetNAtts(itup, rel) == 0 || + ((itup->t_info & INDEX_ALT_TID_MASK) == 0 && + ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY); + } + else + { + /* + * Tuple contains only key attributes despite on is it page high + * key or not + */ + return BTreeTupleGetNAtts(itup, rel) == nkeyatts; + } + + } +} diff --git a/src/backend/access/nbtree/nbtvalidate.c b/src/backend/access/nbtree/nbtvalidate.c index 5aae53ac68..f24091c0ad 100644 --- a/src/backend/access/nbtree/nbtvalidate.c +++ b/src/backend/access/nbtree/nbtvalidate.c @@ -3,7 +3,7 @@ * nbtvalidate.c * Opclass validator for btree. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -51,6 +51,7 @@ btvalidate(Oid opclassoid) List *grouplist; OpFamilyOpFuncGroup *opclassgroup; List *familytypes; + int usefulgroups; int i; ListCell *lc; @@ -95,6 +96,14 @@ btvalidate(Oid opclassoid) ok = check_amproc_signature(procform->amproc, VOIDOID, true, 1, 1, INTERNALOID); break; + case BTINRANGE_PROC: + ok = check_amproc_signature(procform->amproc, BOOLOID, true, + 5, 5, + procform->amproclefttype, + procform->amproclefttype, + procform->amprocrighttype, + BOOLOID, BOOLOID); + break; default: ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), @@ -165,12 +174,28 @@ btvalidate(Oid opclassoid) /* Now check for inconsistent groups of operators/functions */ grouplist = identify_opfamily_groups(oprlist, proclist); + usefulgroups = 0; opclassgroup = NULL; familytypes = NIL; foreach(lc, grouplist) { OpFamilyOpFuncGroup *thisgroup = (OpFamilyOpFuncGroup *) lfirst(lc); + /* + * It is possible for an in_range support function to have a RHS type + * that is otherwise irrelevant to the opfamily --- for instance, SQL + * requires the datetime_ops opclass to have range support with an + * interval offset. So, if this group appears to contain only an + * in_range function, ignore it: it doesn't represent a pair of + * supported types. + */ + if (thisgroup->operatorset == 0 && + thisgroup->functionset == (1 << BTINRANGE_PROC)) + continue; + + /* Else count it as a relevant group */ + usefulgroups++; + /* Remember the group exactly matching the test opclass */ if (thisgroup->lefttype == opcintype && thisgroup->righttype == opcintype) @@ -186,8 +211,8 @@ btvalidate(Oid opclassoid) /* * Complain if there seems to be an incomplete set of either operators - * or support functions for this datatype pair. The only thing that - * is considered optional is the sortsupport function. + * or support functions for this datatype pair. The only things + * considered optional are the sortsupport and in_range functions. */ if (thisgroup->operatorset != ((1 << BTLessStrategyNumber) | @@ -234,8 +259,7 @@ btvalidate(Oid opclassoid) * additional qual clauses from equivalence classes, so it seems * reasonable to insist that all built-in btree opfamilies be complete. */ - if (list_length(grouplist) != - list_length(familytypes) * list_length(familytypes)) + if (usefulgroups != (list_length(familytypes) * list_length(familytypes))) { ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index 3610c7c7e0..67a94cb80a 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -4,7 +4,7 @@ * WAL replay logic for btrees. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -51,9 +51,15 @@ _bt_restore_page(Page page, char *from, int len) i = 0; while (from < end) { - /* Need to copy tuple header due to alignment considerations */ + /* + * As we step through the items, 'from' won't always be properly + * aligned, so we need to use memcpy(). Further, we use Item (which + * is just a char*) here for our items array for the same reason; + * wouldn't want the compiler or anyone thinking that an item is + * aligned when it isn't. + */ memcpy(&itupdata, from, sizeof(IndexTupleData)); - itemsz = IndexTupleDSize(itupdata); + itemsz = IndexTupleSize(&itupdata); itemsz = MAXALIGN(itemsz); items[i] = (Item) from; @@ -102,13 +108,16 @@ _bt_restore_meta(XLogReaderState *record, uint8 block_id) md->btm_level = xlrec->level; md->btm_fastroot = xlrec->fastroot; md->btm_fastlevel = xlrec->fastlevel; + md->btm_oldest_btpo_xact = xlrec->oldest_btpo_xact; + md->btm_last_cleanup_num_heap_tuples = xlrec->last_cleanup_num_heap_tuples; pageop = (BTPageOpaque) PageGetSpecialPointer(metapg); pageop->btpo_flags = BTP_META; /* - * Set pd_lower just past the end of the metadata. This is not essential - * but it makes the page look compressible to xlog.c. + * Set pd_lower just past the end of the metadata. This is essential, + * because without doing so, metadata will be lost if xlog.c compresses + * the page. */ ((PageHeader) metapg)->pd_lower = ((char *) md + sizeof(BTMetaPageData)) - (char *) metapg; @@ -135,7 +144,7 @@ _bt_clear_incomplete_split(XLogReaderState *record, uint8 block_id) Page page = (Page) BufferGetPage(buf); BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page); - Assert((pageop->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0); + Assert(P_INCOMPLETE_SPLIT(pageop)); pageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT; PageSetLSN(page, lsn); @@ -193,7 +202,7 @@ btree_xlog_insert(bool isleaf, bool ismeta, XLogReaderState *record) } static void -btree_xlog_split(bool onleft, XLogReaderState *record) +btree_xlog_split(bool onleft, bool lhighkey, XLogReaderState *record) { XLogRecPtr lsn = record->EndRecPtr; xl_btree_split *xlrec = (xl_btree_split *) XLogRecGetData(record); @@ -204,7 +213,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record) BTPageOpaque ropaque; char *datapos; Size datalen; - Item left_hikey = NULL; + IndexTuple left_hikey = NULL; Size left_hikeysz = 0; BlockNumber leftsib; BlockNumber rightsib; @@ -240,14 +249,16 @@ btree_xlog_split(bool onleft, XLogReaderState *record) _bt_restore_page(rpage, datapos, datalen); /* - * On leaf level, the high key of the left page is equal to the first key - * on the right page. + * When the high key isn't present is the wal record, then we assume it to + * be equal to the first key on the right page. It must be from the leaf + * level. */ - if (isleaf) + if (!lhighkey) { ItemId hiItemId = PageGetItemId(rpage, P_FIRSTDATAKEY(ropaque)); - left_hikey = PageGetItem(rpage, hiItemId); + Assert(isleaf); + left_hikey = (IndexTuple) PageGetItem(rpage, hiItemId); left_hikeysz = ItemIdGetLength(hiItemId); } @@ -271,7 +282,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record) Page lpage = (Page) BufferGetPage(lbuf); BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage); OffsetNumber off; - Item newitem = NULL; + IndexTuple newitem = NULL; Size newitemsz = 0; Page newlpage; OffsetNumber leftoff; @@ -280,27 +291,28 @@ btree_xlog_split(bool onleft, XLogReaderState *record) if (onleft) { - newitem = (Item) datapos; + newitem = (IndexTuple) datapos; newitemsz = MAXALIGN(IndexTupleSize(newitem)); datapos += newitemsz; datalen -= newitemsz; } /* Extract left hikey and its size (assuming 16-bit alignment) */ - if (!isleaf) + if (lhighkey) { - left_hikey = (Item) datapos; + left_hikey = (IndexTuple) datapos; left_hikeysz = MAXALIGN(IndexTupleSize(left_hikey)); datapos += left_hikeysz; datalen -= left_hikeysz; } + Assert(datalen == 0); newlpage = PageGetTempPageCopySpecial(lpage); /* Set high key */ leftoff = P_HIKEY; - if (PageAddItem(newlpage, left_hikey, left_hikeysz, + if (PageAddItem(newlpage, (Item) left_hikey, left_hikeysz, P_HIKEY, false, false) == InvalidOffsetNumber) elog(PANIC, "failed to add high key to left page after split"); leftoff = OffsetNumberNext(leftoff); @@ -309,12 +321,12 @@ btree_xlog_split(bool onleft, XLogReaderState *record) { ItemId itemid; Size itemsz; - Item item; + IndexTuple item; /* add the new item if it was inserted on left page */ if (onleft && off == xlrec->newitemoff) { - if (PageAddItem(newlpage, newitem, newitemsz, leftoff, + if (PageAddItem(newlpage, (Item) newitem, newitemsz, leftoff, false, false) == InvalidOffsetNumber) elog(ERROR, "failed to add new item to left page after split"); leftoff = OffsetNumberNext(leftoff); @@ -322,8 +334,8 @@ btree_xlog_split(bool onleft, XLogReaderState *record) itemid = PageGetItemId(lpage, off); itemsz = ItemIdGetLength(itemid); - item = PageGetItem(lpage, itemid); - if (PageAddItem(newlpage, item, itemsz, leftoff, + item = (IndexTuple) PageGetItem(lpage, itemid); + if (PageAddItem(newlpage, (Item) item, itemsz, leftoff, false, false) == InvalidOffsetNumber) elog(ERROR, "failed to add old item to left page after split"); leftoff = OffsetNumberNext(leftoff); @@ -332,7 +344,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record) /* cope with possibility that newitem goes at the end */ if (onleft && off == xlrec->newitemoff) { - if (PageAddItem(newlpage, newitem, newitemsz, leftoff, + if (PageAddItem(newlpage, (Item) newitem, newitemsz, leftoff, false, false) == InvalidOffsetNumber) elog(ERROR, "failed to add new item to left page after split"); leftoff = OffsetNumberNext(leftoff); @@ -598,7 +610,7 @@ btree_xlog_delete_get_latestRemovedXid(XLogReaderState *record) UnlockReleaseBuffer(ibuffer); return InvalidTransactionId; } - LockBuffer(hbuffer, BUFFER_LOCK_SHARE); + LockBuffer(hbuffer, BT_READ); hpage = (Page) BufferGetPage(hbuffer); /* @@ -755,11 +767,11 @@ btree_xlog_mark_page_halfdead(uint8 info, XLogReaderState *record) nextoffset = OffsetNumberNext(poffset); itemid = PageGetItemId(page, nextoffset); itup = (IndexTuple) PageGetItem(page, itemid); - rightsib = ItemPointerGetBlockNumber(&itup->t_tid); + rightsib = BTreeInnerTupleGetDownLink(itup); itemid = PageGetItemId(page, poffset); itup = (IndexTuple) PageGetItem(page, itemid); - ItemPointerSet(&(itup->t_tid), rightsib, P_HIKEY); + BTreeInnerTupleSetDownLink(itup, rightsib); nextoffset = OffsetNumberNext(poffset); PageIndexTupleDelete(page, nextoffset); @@ -788,10 +800,8 @@ btree_xlog_mark_page_halfdead(uint8 info, XLogReaderState *record) */ MemSet(&trunctuple, 0, sizeof(IndexTupleData)); trunctuple.t_info = sizeof(IndexTupleData); - if (xlrec->topparent != InvalidBlockNumber) - ItemPointerSet(&trunctuple.t_tid, xlrec->topparent, P_HIKEY); - else - ItemPointerSetInvalid(&trunctuple.t_tid); + BTreeTupleSetTopParent(&trunctuple, xlrec->topparent); + if (PageAddItem(page, (Item) &trunctuple, sizeof(IndexTupleData), P_HIKEY, false, false) == InvalidOffsetNumber) elog(ERROR, "could not add dummy high key to half-dead page"); @@ -898,10 +908,8 @@ btree_xlog_unlink_page(uint8 info, XLogReaderState *record) /* Add a dummy hikey item */ MemSet(&trunctuple, 0, sizeof(IndexTupleData)); trunctuple.t_info = sizeof(IndexTupleData); - if (xlrec->topparent != InvalidBlockNumber) - ItemPointerSet(&trunctuple.t_tid, xlrec->topparent, P_HIKEY); - else - ItemPointerSetInvalid(&trunctuple.t_tid); + BTreeTupleSetTopParent(&trunctuple, xlrec->topparent); + if (PageAddItem(page, (Item) &trunctuple, sizeof(IndexTupleData), P_HIKEY, false, false) == InvalidOffsetNumber) elog(ERROR, "could not add dummy high key to half-dead page"); @@ -978,7 +986,6 @@ btree_xlog_reuse_page(XLogReaderState *record) } } - void btree_redo(XLogReaderState *record) { @@ -996,10 +1003,16 @@ btree_redo(XLogReaderState *record) btree_xlog_insert(false, true, record); break; case XLOG_BTREE_SPLIT_L: - btree_xlog_split(true, record); + btree_xlog_split(true, false, record); + break; + case XLOG_BTREE_SPLIT_L_HIGHKEY: + btree_xlog_split(true, true, record); break; case XLOG_BTREE_SPLIT_R: - btree_xlog_split(false, record); + btree_xlog_split(false, false, record); + break; + case XLOG_BTREE_SPLIT_R_HIGHKEY: + btree_xlog_split(false, true, record); break; case XLOG_BTREE_VACUUM: btree_xlog_vacuum(record); @@ -1020,6 +1033,9 @@ btree_redo(XLogReaderState *record) case XLOG_BTREE_REUSE_PAGE: btree_xlog_reuse_page(record); break; + case XLOG_BTREE_META_CLEANUP: + _bt_restore_meta(record, 0); + break; default: elog(PANIC, "btree_redo: unknown op code %u", info); } @@ -1034,7 +1050,7 @@ btree_mask(char *pagedata, BlockNumber blkno) Page page = (Page) pagedata; BTPageOpaque maskopaq; - mask_page_lsn(page); + mask_page_lsn_and_checksum(page); mask_page_hint_bits(page); mask_unused_space(page); diff --git a/src/backend/access/rmgrdesc/brindesc.c b/src/backend/access/rmgrdesc/brindesc.c index 8eb5275a8b..d464234254 100644 --- a/src/backend/access/rmgrdesc/brindesc.c +++ b/src/backend/access/rmgrdesc/brindesc.c @@ -3,7 +3,7 @@ * brindesc.c * rmgr descriptor routines for BRIN indexes * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/rmgrdesc/clogdesc.c b/src/backend/access/rmgrdesc/clogdesc.c index 9181154ffd..4c00d4d1f8 100644 --- a/src/backend/access/rmgrdesc/clogdesc.c +++ b/src/backend/access/rmgrdesc/clogdesc.c @@ -3,7 +3,7 @@ * clogdesc.c * rmgr descriptor routines for access/transam/clog.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/rmgrdesc/committsdesc.c b/src/backend/access/rmgrdesc/committsdesc.c index 3e670bd543..b6e210398d 100644 --- a/src/backend/access/rmgrdesc/committsdesc.c +++ b/src/backend/access/rmgrdesc/committsdesc.c @@ -3,7 +3,7 @@ * committsdesc.c * rmgr descriptor routines for access/transam/commit_ts.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/rmgrdesc/dbasedesc.c b/src/backend/access/rmgrdesc/dbasedesc.c index 768242cfd5..39e26d7ed4 100644 --- a/src/backend/access/rmgrdesc/dbasedesc.c +++ b/src/backend/access/rmgrdesc/dbasedesc.c @@ -3,7 +3,7 @@ * dbasedesc.c * rmgr descriptor routines for commands/dbcommands.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/rmgrdesc/genericdesc.c b/src/backend/access/rmgrdesc/genericdesc.c index c4705428f1..4e9bba804d 100644 --- a/src/backend/access/rmgrdesc/genericdesc.c +++ b/src/backend/access/rmgrdesc/genericdesc.c @@ -4,7 +4,7 @@ * rmgr descriptor routines for access/transam/generic_xlog.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/access/rmgrdesc/genericdesc.c diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c index 02c887496e..3456187e3d 100644 --- a/src/backend/access/rmgrdesc/gindesc.c +++ b/src/backend/access/rmgrdesc/gindesc.c @@ -3,7 +3,7 @@ * gindesc.c * rmgr descriptor routines for access/transam/gin/ginxlog.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/rmgrdesc/gistdesc.c b/src/backend/access/rmgrdesc/gistdesc.c index dc0506913c..e5e925e0c5 100644 --- a/src/backend/access/rmgrdesc/gistdesc.c +++ b/src/backend/access/rmgrdesc/gistdesc.c @@ -3,7 +3,7 @@ * gistdesc.c * rmgr descriptor routines for access/gist/gistxlog.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/rmgrdesc/hashdesc.c b/src/backend/access/rmgrdesc/hashdesc.c index 3e9236122b..3c53c84f1a 100644 --- a/src/backend/access/rmgrdesc/hashdesc.c +++ b/src/backend/access/rmgrdesc/hashdesc.c @@ -3,7 +3,7 @@ * hashdesc.c * rmgr descriptor routines for access/hash/hash.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/rmgrdesc/heapdesc.c b/src/backend/access/rmgrdesc/heapdesc.c index 44d2d6333f..318a281d7f 100644 --- a/src/backend/access/rmgrdesc/heapdesc.c +++ b/src/backend/access/rmgrdesc/heapdesc.c @@ -3,7 +3,7 @@ * heapdesc.c * rmgr descriptor routines for access/heap/heapam.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -75,6 +75,19 @@ heap_desc(StringInfo buf, XLogReaderState *record) xlrec->new_offnum, xlrec->new_xmax); } + else if (info == XLOG_HEAP_TRUNCATE) + { + xl_heap_truncate *xlrec = (xl_heap_truncate *) rec; + int i; + + if (xlrec->flags & XLH_TRUNCATE_CASCADE) + appendStringInfo(buf, "cascade "); + if (xlrec->flags & XLH_TRUNCATE_RESTART_SEQS) + appendStringInfo(buf, "restart_seqs "); + appendStringInfo(buf, "nrelids %u relids", xlrec->nrelids); + for (i = 0; i < xlrec->nrelids; i++) + appendStringInfo(buf, " %u", xlrec->relids[i]); + } else if (info == XLOG_HEAP_CONFIRM) { xl_heap_confirm *xlrec = (xl_heap_confirm *) rec; @@ -186,6 +199,9 @@ heap_identify(uint8 info) case XLOG_HEAP_HOT_UPDATE | XLOG_HEAP_INIT_PAGE: id = "HOT_UPDATE+INIT"; break; + case XLOG_HEAP_TRUNCATE: + id = "TRUNCATE"; + break; case XLOG_HEAP_CONFIRM: id = "HEAP_CONFIRM"; break; diff --git a/src/backend/access/rmgrdesc/logicalmsgdesc.c b/src/backend/access/rmgrdesc/logicalmsgdesc.c index 0b971c2aee..5b26da1b86 100644 --- a/src/backend/access/rmgrdesc/logicalmsgdesc.c +++ b/src/backend/access/rmgrdesc/logicalmsgdesc.c @@ -3,7 +3,7 @@ * logicalmsgdesc.c * rmgr descriptor routines for replication/logical/message.c * - * Portions Copyright (c) 2015-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2015-2018, PostgreSQL Global Development Group * * * IDENTIFICATION diff --git a/src/backend/access/rmgrdesc/mxactdesc.c b/src/backend/access/rmgrdesc/mxactdesc.c index 9c17447744..bd13837bd4 100644 --- a/src/backend/access/rmgrdesc/mxactdesc.c +++ b/src/backend/access/rmgrdesc/mxactdesc.c @@ -3,7 +3,7 @@ * mxactdesc.c * rmgr descriptor routines for access/transam/multixact.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/rmgrdesc/nbtdesc.c b/src/backend/access/rmgrdesc/nbtdesc.c index a3e1331fe2..5c4457179d 100644 --- a/src/backend/access/rmgrdesc/nbtdesc.c +++ b/src/backend/access/rmgrdesc/nbtdesc.c @@ -3,7 +3,7 @@ * nbtdesc.c * rmgr descriptor routines for access/nbtree/nbtxlog.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -35,6 +35,8 @@ btree_desc(StringInfo buf, XLogReaderState *record) } case XLOG_BTREE_SPLIT_L: case XLOG_BTREE_SPLIT_R: + case XLOG_BTREE_SPLIT_L_HIGHKEY: + case XLOG_BTREE_SPLIT_R_HIGHKEY: { xl_btree_split *xlrec = (xl_btree_split *) rec; @@ -94,6 +96,15 @@ btree_desc(StringInfo buf, XLogReaderState *record) xlrec->node.relNode, xlrec->latestRemovedXid); break; } + case XLOG_BTREE_META_CLEANUP: + { + xl_btree_metadata *xlrec = (xl_btree_metadata *) rec; + + appendStringInfo(buf, "oldest_btpo_xact %u; last_cleanup_num_heap_tuples: %f", + xlrec->oldest_btpo_xact, + xlrec->last_cleanup_num_heap_tuples); + break; + } } } @@ -119,6 +130,12 @@ btree_identify(uint8 info) case XLOG_BTREE_SPLIT_R: id = "SPLIT_R"; break; + case XLOG_BTREE_SPLIT_L_HIGHKEY: + id = "SPLIT_L_HIGHKEY"; + break; + case XLOG_BTREE_SPLIT_R_HIGHKEY: + id = "SPLIT_R_HIGHKEY"; + break; case XLOG_BTREE_VACUUM: id = "VACUUM"; break; @@ -140,6 +157,9 @@ btree_identify(uint8 info) case XLOG_BTREE_REUSE_PAGE: id = "REUSE_PAGE"; break; + case XLOG_BTREE_META_CLEANUP: + id = "META_CLEANUP"; + break; } return id; diff --git a/src/backend/access/rmgrdesc/relmapdesc.c b/src/backend/access/rmgrdesc/relmapdesc.c index 4cbdf37c70..5dbec9d94c 100644 --- a/src/backend/access/rmgrdesc/relmapdesc.c +++ b/src/backend/access/rmgrdesc/relmapdesc.c @@ -3,7 +3,7 @@ * relmapdesc.c * rmgr descriptor routines for utils/cache/relmapper.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/rmgrdesc/replorigindesc.c b/src/backend/access/rmgrdesc/replorigindesc.c index c43f850f8e..2719bf4a28 100644 --- a/src/backend/access/rmgrdesc/replorigindesc.c +++ b/src/backend/access/rmgrdesc/replorigindesc.c @@ -3,7 +3,7 @@ * replorigindesc.c * rmgr descriptor routines for replication/logical/origin.c * - * Portions Copyright (c) 2015-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2015-2018, PostgreSQL Global Development Group * * * IDENTIFICATION diff --git a/src/backend/access/rmgrdesc/seqdesc.c b/src/backend/access/rmgrdesc/seqdesc.c index 2209f7284e..5c11eb00f0 100644 --- a/src/backend/access/rmgrdesc/seqdesc.c +++ b/src/backend/access/rmgrdesc/seqdesc.c @@ -3,7 +3,7 @@ * seqdesc.c * rmgr descriptor routines for commands/sequence.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/rmgrdesc/smgrdesc.c b/src/backend/access/rmgrdesc/smgrdesc.c index b8174373dd..df1ad38b5a 100644 --- a/src/backend/access/rmgrdesc/smgrdesc.c +++ b/src/backend/access/rmgrdesc/smgrdesc.c @@ -3,7 +3,7 @@ * smgrdesc.c * rmgr descriptor routines for catalog/storage.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -14,7 +14,6 @@ */ #include "postgres.h" -#include "catalog/catalog.h" #include "catalog/storage_xlog.h" diff --git a/src/backend/access/rmgrdesc/spgdesc.c b/src/backend/access/rmgrdesc/spgdesc.c index 41ed84b168..92b1392974 100644 --- a/src/backend/access/rmgrdesc/spgdesc.c +++ b/src/backend/access/rmgrdesc/spgdesc.c @@ -3,7 +3,7 @@ * spgdesc.c * rmgr descriptor routines for access/spgist/spgxlog.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/rmgrdesc/standbydesc.c b/src/backend/access/rmgrdesc/standbydesc.c index 278546a728..76825a8d9c 100644 --- a/src/backend/access/rmgrdesc/standbydesc.c +++ b/src/backend/access/rmgrdesc/standbydesc.c @@ -3,7 +3,7 @@ * standbydesc.c * rmgr descriptor routines for storage/ipc/standby.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/rmgrdesc/tblspcdesc.c b/src/backend/access/rmgrdesc/tblspcdesc.c index 47c42328f3..d97762687b 100644 --- a/src/backend/access/rmgrdesc/tblspcdesc.c +++ b/src/backend/access/rmgrdesc/tblspcdesc.c @@ -3,7 +3,7 @@ * tblspcdesc.c * rmgr descriptor routines for commands/tablespace.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c index 3aafa79e52..6d5ebd475b 100644 --- a/src/backend/access/rmgrdesc/xactdesc.c +++ b/src/backend/access/rmgrdesc/xactdesc.c @@ -3,7 +3,7 @@ * xactdesc.c * rmgr descriptor routines for access/transam/xact.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -16,7 +16,6 @@ #include "access/transam.h" #include "access/xact.h" -#include "catalog/catalog.h" #include "storage/sinval.h" #include "storage/standbydefs.h" #include "utils/timestamp.h" @@ -102,13 +101,21 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars parsed->twophase_xid = xl_twophase->xid; data += sizeof(xl_xact_twophase); + + if (parsed->xinfo & XACT_XINFO_HAS_GID) + { + strlcpy(parsed->twophase_gid, data, sizeof(parsed->twophase_gid)); + data += strlen(data) + 1; + } } + /* Note: no alignment is guaranteed after this point */ + if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN) { xl_xact_origin xl_origin; - /* we're only guaranteed 4 byte alignment, so copy onto stack */ + /* no alignment is guaranteed, so copy onto stack */ memcpy(&xl_origin, data, sizeof(xl_origin)); parsed->origin_lsn = xl_origin.origin_lsn; @@ -139,6 +146,16 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed) data += sizeof(xl_xact_xinfo); } + if (parsed->xinfo & XACT_XINFO_HAS_DBINFO) + { + xl_xact_dbinfo *xl_dbinfo = (xl_xact_dbinfo *) data; + + parsed->dbId = xl_dbinfo->dbId; + parsed->tsId = xl_dbinfo->tsId; + + data += sizeof(xl_xact_dbinfo); + } + if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS) { xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data; @@ -168,6 +185,27 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed) parsed->twophase_xid = xl_twophase->xid; data += sizeof(xl_xact_twophase); + + if (parsed->xinfo & XACT_XINFO_HAS_GID) + { + strlcpy(parsed->twophase_gid, data, sizeof(parsed->twophase_gid)); + data += strlen(data) + 1; + } + } + + /* Note: no alignment is guaranteed after this point */ + + if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN) + { + xl_xact_origin xl_origin; + + /* no alignment is guaranteed, so copy onto stack */ + memcpy(&xl_origin, data, sizeof(xl_origin)); + + parsed->origin_lsn = xl_origin.origin_lsn; + parsed->origin_timestamp = xl_origin.origin_timestamp; + + data += sizeof(xl_xact_origin); } } diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c index f72f076017..00741c7b09 100644 --- a/src/backend/access/rmgrdesc/xlogdesc.c +++ b/src/backend/access/rmgrdesc/xlogdesc.c @@ -3,7 +3,7 @@ * xlogdesc.c * rmgr descriptor routines for access/transam/xlog.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/spgist/Makefile b/src/backend/access/spgist/Makefile index 14948a531e..5be3df5992 100644 --- a/src/backend/access/spgist/Makefile +++ b/src/backend/access/spgist/Makefile @@ -14,6 +14,7 @@ include $(top_builddir)/src/Makefile.global OBJS = spgutils.o spginsert.o spgscan.o spgvacuum.o spgvalidate.o \ spgdoinsert.o spgxlog.o \ - spgtextproc.o spgquadtreeproc.o spgkdtreeproc.o + spgtextproc.o spgquadtreeproc.o spgkdtreeproc.o \ + spgproc.o include $(top_srcdir)/src/backend/common.mk diff --git a/src/backend/access/spgist/README b/src/backend/access/spgist/README index 09ab21af26..b55b073832 100644 --- a/src/backend/access/spgist/README +++ b/src/backend/access/spgist/README @@ -41,7 +41,11 @@ contain exactly one inner tuple. When the search traversal algorithm reaches an inner tuple, it chooses a set of nodes to continue tree traverse in depth. If it reaches a leaf page it -scans a list of leaf tuples to find the ones that match the query. +scans a list of leaf tuples to find the ones that match the query. SP-GiST +also supports ordered (nearest-neighbor) searches - that is during scan pending +nodes are put into priority queue, so traversal is performed by the +closest-first model. + The insertion algorithm descends the tree similarly, except it must choose just one node to descend to from each inner tuple. Insertion might also have diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index b0702a7f92..098e09c574 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -4,7 +4,7 @@ * implementation of insert algorithm * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -580,7 +580,7 @@ setRedirectionTuple(SPPageDesc *current, OffsetNumber position, * Test to see if the user-defined picksplit function failed to do its job, * ie, it put all the leaf tuples into the same node. * If so, randomly divide the tuples into several nodes (all with the same - * label) and return TRUE to select allTheSame mode for this inner tuple. + * label) and return true to select allTheSame mode for this inner tuple. * * (This code is also used to forcibly select allTheSame mode for nulls.) * @@ -1906,14 +1906,38 @@ spgdoinsert(Relation index, SpGistState *state, procinfo = index_getprocinfo(index, 1, SPGIST_CHOOSE_PROC); /* - * Since we don't use index_form_tuple in this AM, we have to make sure - * value to be inserted is not toasted; FormIndexDatum doesn't guarantee - * that. + * Prepare the leaf datum to insert. + * + * If an optional "compress" method is provided, then call it to form the + * leaf datum from the input datum. Otherwise store the input datum as + * is. Since we don't use index_form_tuple in this AM, we have to make + * sure value to be inserted is not toasted; FormIndexDatum doesn't + * guarantee that. But we assume the "compress" method to return an + * untoasted value. */ - if (!isnull && state->attType.attlen == -1) - datum = PointerGetDatum(PG_DETOAST_DATUM(datum)); + if (!isnull) + { + if (OidIsValid(index_getprocid(index, 1, SPGIST_COMPRESS_PROC))) + { + FmgrInfo *compressProcinfo = NULL; + + compressProcinfo = index_getprocinfo(index, 1, SPGIST_COMPRESS_PROC); + leafDatum = FunctionCall1Coll(compressProcinfo, + index->rd_indcollation[0], + datum); + } + else + { + Assert(state->attLeafType.type == state->attType.type); - leafDatum = datum; + if (state->attType.attlen == -1) + leafDatum = PointerGetDatum(PG_DETOAST_DATUM(datum)); + else + leafDatum = datum; + } + } + else + leafDatum = (Datum) 0; /* * Compute space needed for a leaf tuple containing the given datum. @@ -1923,7 +1947,7 @@ spgdoinsert(Relation index, SpGistState *state, */ if (!isnull) leafSize = SGLTHDRSZ + sizeof(ItemIdData) + - SpGistGetTypeSize(&state->attType, leafDatum); + SpGistGetTypeSize(&state->attLeafType, leafDatum); else leafSize = SGDTSIZE + sizeof(ItemIdData); @@ -2138,7 +2162,7 @@ spgdoinsert(Relation index, SpGistState *state, { leafDatum = out.result.matchNode.restDatum; leafSize = SGLTHDRSZ + sizeof(ItemIdData) + - SpGistGetTypeSize(&state->attType, leafDatum); + SpGistGetTypeSize(&state->attLeafType, leafDatum); } /* diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c index e4b2c29b0e..7dd0d61fbb 100644 --- a/src/backend/access/spgist/spginsert.c +++ b/src/backend/access/spgist/spginsert.c @@ -5,7 +5,7 @@ * * All the actual insertion logic is in spgdoinsert.c. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -32,6 +32,7 @@ typedef struct { SpGistState spgstate; /* SPGiST's working state */ + int64 indtuples; /* total number of tuples indexed */ MemoryContext tmpCtx; /* per-tuple temporary context */ } SpGistBuildState; @@ -59,6 +60,9 @@ spgistBuildCallback(Relation index, HeapTuple htup, Datum *values, MemoryContextReset(buildstate->tmpCtx); } + /* Update total tuple count */ + buildstate->indtuples += 1; + MemoryContextSwitchTo(oldCtx); MemoryContextReset(buildstate->tmpCtx); } @@ -110,7 +114,7 @@ spgbuild(Relation heap, Relation index, IndexInfo *indexInfo) * Replay will re-initialize the pages, so don't take full pages * images. No other data to log. */ - XLogRegisterBuffer(0, metabuffer, REGBUF_WILL_INIT); + XLogRegisterBuffer(0, metabuffer, REGBUF_WILL_INIT | REGBUF_STANDARD); XLogRegisterBuffer(1, rootbuffer, REGBUF_WILL_INIT | REGBUF_STANDARD); XLogRegisterBuffer(2, nullbuffer, REGBUF_WILL_INIT | REGBUF_STANDARD); @@ -132,20 +136,23 @@ spgbuild(Relation heap, Relation index, IndexInfo *indexInfo) */ initSpGistState(&buildstate.spgstate, index); buildstate.spgstate.isBuild = true; + buildstate.indtuples = 0; buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext, "SP-GiST build temporary context", ALLOCSET_DEFAULT_SIZES); reltuples = IndexBuildHeapScan(heap, index, indexInfo, true, - spgistBuildCallback, (void *) &buildstate); + spgistBuildCallback, (void *) &buildstate, + NULL); MemoryContextDelete(buildstate.tmpCtx); SpGistUpdateMetaPage(index); result = (IndexBuildResult *) palloc0(sizeof(IndexBuildResult)); - result->heap_tuples = result->index_tuples = reltuples; + result->heap_tuples = reltuples; + result->index_tuples = buildstate.indtuples; return result; } @@ -173,7 +180,7 @@ spgbuildempty(Relation index) smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO, (char *) page, true); log_newpage(&index->rd_smgr->smgr_rnode.node, INIT_FORKNUM, - SPGIST_METAPAGE_BLKNO, page, false); + SPGIST_METAPAGE_BLKNO, page, true); /* Likewise for the root page. */ SpGistInitPage(page, SPGIST_LEAF); diff --git a/src/backend/access/spgist/spgkdtreeproc.c b/src/backend/access/spgist/spgkdtreeproc.c index 9a2649bf2a..41d2b9cac9 100644 --- a/src/backend/access/spgist/spgkdtreeproc.c +++ b/src/backend/access/spgist/spgkdtreeproc.c @@ -4,7 +4,7 @@ * implementation of k-d tree over points for SP-GiST * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -16,9 +16,11 @@ #include "postgres.h" #include "access/spgist.h" +#include "access/spgist_private.h" #include "access/stratnum.h" #include "catalog/pg_type.h" #include "utils/builtins.h" +#include "utils/float.h" #include "utils/geo_decls.h" @@ -162,6 +164,7 @@ spg_kd_inner_consistent(PG_FUNCTION_ARGS) double coord; int which; int i; + BOX bboxes[2]; Assert(in->hasPrefix); coord = DatumGetFloat8(in->prefixDatum); @@ -248,12 +251,85 @@ spg_kd_inner_consistent(PG_FUNCTION_ARGS) } /* We must descend into the children identified by which */ - out->nodeNumbers = (int *) palloc(sizeof(int) * 2); out->nNodes = 0; + + /* Fast-path for no matching children */ + if (!which) + PG_RETURN_VOID(); + + out->nodeNumbers = (int *) palloc(sizeof(int) * 2); + + /* + * When ordering scan keys are specified, we've to calculate distance for + * them. In order to do that, we need calculate bounding boxes for both + * children nodes. Calculation of those bounding boxes on non-zero level + * require knowledge of bounding box of upper node. So, we save bounding + * boxes to traversalValues. + */ + if (in->norderbys > 0) + { + BOX infArea; + BOX *area; + + out->distances = (double **) palloc(sizeof(double *) * in->nNodes); + out->traversalValues = (void **) palloc(sizeof(void *) * in->nNodes); + + if (in->level == 0) + { + float8 inf = get_float8_infinity(); + + infArea.high.x = inf; + infArea.high.y = inf; + infArea.low.x = -inf; + infArea.low.y = -inf; + area = &infArea; + } + else + { + area = (BOX *) in->traversalValue; + Assert(area); + } + + bboxes[0].low = area->low; + bboxes[1].high = area->high; + + if (in->level % 2) + { + /* split box by x */ + bboxes[0].high.x = bboxes[1].low.x = coord; + bboxes[0].high.y = area->high.y; + bboxes[1].low.y = area->low.y; + } + else + { + /* split box by y */ + bboxes[0].high.y = bboxes[1].low.y = coord; + bboxes[0].high.x = area->high.x; + bboxes[1].low.x = area->low.x; + } + } + for (i = 1; i <= 2; i++) { if (which & (1 << i)) - out->nodeNumbers[out->nNodes++] = i - 1; + { + out->nodeNumbers[out->nNodes] = i - 1; + + if (in->norderbys > 0) + { + MemoryContext oldCtx = MemoryContextSwitchTo(in->traversalMemoryContext); + BOX *box = box_copy(&bboxes[i - 1]); + + MemoryContextSwitchTo(oldCtx); + + out->traversalValues[out->nNodes] = box; + + out->distances[out->nNodes] = spg_key_orderbys_distances(BoxPGetDatum(box), false, + in->orderbys, in->norderbys); + } + + out->nNodes++; + } } /* Set up level increments, too */ diff --git a/src/backend/access/spgist/spgproc.c b/src/backend/access/spgist/spgproc.c new file mode 100644 index 0000000000..0bf80015e1 --- /dev/null +++ b/src/backend/access/spgist/spgproc.c @@ -0,0 +1,88 @@ +/*------------------------------------------------------------------------- + * + * spgproc.c + * Common supporting procedures for SP-GiST opclasses. + * + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/backend/access/spgist/spgproc.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include + +#include "access/spgist_private.h" +#include "utils/builtins.h" +#include "utils/float.h" +#include "utils/geo_decls.h" + +#define point_point_distance(p1,p2) \ + DatumGetFloat8(DirectFunctionCall2(point_distance, \ + PointPGetDatum(p1), PointPGetDatum(p2))) + +/* Point-box distance in the assumption that box is aligned by axis */ +static double +point_box_distance(Point *point, BOX *box) +{ + double dx, + dy; + + if (isnan(point->x) || isnan(box->low.x) || + isnan(point->y) || isnan(box->low.y)) + return get_float8_nan(); + + if (point->x < box->low.x) + dx = box->low.x - point->x; + else if (point->x > box->high.x) + dx = point->x - box->high.x; + else + dx = 0.0; + + if (point->y < box->low.y) + dy = box->low.y - point->y; + else if (point->y > box->high.y) + dy = point->y - box->high.y; + else + dy = 0.0; + + return HYPOT(dx, dy); +} + +/* + * Returns distances from given key to array of ordering scan keys. Leaf key + * is expected to be point, non-leaf key is expected to be box. Scan key + * arguments are expected to be points. + */ +double * +spg_key_orderbys_distances(Datum key, bool isLeaf, + ScanKey orderbys, int norderbys) +{ + int sk_num; + double *distances = (double *) palloc(norderbys * sizeof(double)), + *distance = distances; + + for (sk_num = 0; sk_num < norderbys; ++sk_num, ++orderbys, ++distance) + { + Point *point = DatumGetPointP(orderbys->sk_argument); + + *distance = isLeaf ? point_point_distance(point, DatumGetPointP(key)) + : point_box_distance(point, DatumGetBoxP(key)); + } + + return distances; +} + +BOX * +box_copy(BOX *orig) +{ + BOX *result = palloc(sizeof(BOX)); + + *result = *orig; + return result; +} diff --git a/src/backend/access/spgist/spgquadtreeproc.c b/src/backend/access/spgist/spgquadtreeproc.c index 773774555f..4e175d2664 100644 --- a/src/backend/access/spgist/spgquadtreeproc.c +++ b/src/backend/access/spgist/spgquadtreeproc.c @@ -4,7 +4,7 @@ * implementation of quad tree over points for SP-GiST * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -17,8 +17,10 @@ #include "access/spgist.h" #include "access/stratnum.h" +#include "access/spgist_private.h" #include "catalog/pg_type.h" #include "utils/builtins.h" +#include "utils/float.h" #include "utils/geo_decls.h" @@ -77,6 +79,38 @@ getQuadrant(Point *centroid, Point *tst) return 0; } +/* Returns bounding box of a given quadrant inside given bounding box */ +static BOX * +getQuadrantArea(BOX *bbox, Point *centroid, int quadrant) +{ + BOX *result = (BOX *) palloc(sizeof(BOX)); + + switch (quadrant) + { + case 1: + result->high = bbox->high; + result->low = *centroid; + break; + case 2: + result->high.x = bbox->high.x; + result->high.y = centroid->y; + result->low.x = centroid->x; + result->low.y = bbox->low.y; + break; + case 3: + result->high = *centroid; + result->low = bbox->low; + break; + case 4: + result->high.x = centroid->x; + result->high.y = bbox->high.y; + result->low.x = bbox->low.x; + result->low.y = centroid->y; + break; + } + + return result; +} Datum spg_quad_choose(PG_FUNCTION_ARGS) @@ -196,19 +230,66 @@ spg_quad_inner_consistent(PG_FUNCTION_ARGS) spgInnerConsistentIn *in = (spgInnerConsistentIn *) PG_GETARG_POINTER(0); spgInnerConsistentOut *out = (spgInnerConsistentOut *) PG_GETARG_POINTER(1); Point *centroid; + BOX infbbox; + BOX *bbox = NULL; int which; int i; Assert(in->hasPrefix); centroid = DatumGetPointP(in->prefixDatum); + /* + * When ordering scan keys are specified, we've to calculate distance for + * them. In order to do that, we need calculate bounding boxes for all + * children nodes. Calculation of those bounding boxes on non-zero level + * require knowledge of bounding box of upper node. So, we save bounding + * boxes to traversalValues. + */ + if (in->norderbys > 0) + { + out->distances = (double **) palloc(sizeof(double *) * in->nNodes); + out->traversalValues = (void **) palloc(sizeof(void *) * in->nNodes); + + if (in->level == 0) + { + double inf = get_float8_infinity(); + + infbbox.high.x = inf; + infbbox.high.y = inf; + infbbox.low.x = -inf; + infbbox.low.y = -inf; + bbox = &infbbox; + } + else + { + bbox = in->traversalValue; + Assert(bbox); + } + } + if (in->allTheSame) { /* Report that all nodes should be visited */ out->nNodes = in->nNodes; out->nodeNumbers = (int *) palloc(sizeof(int) * in->nNodes); for (i = 0; i < in->nNodes; i++) + { out->nodeNumbers[i] = i; + + if (in->norderbys > 0) + { + MemoryContext oldCtx = MemoryContextSwitchTo(in->traversalMemoryContext); + + /* Use parent quadrant box as traversalValue */ + BOX *quadrant = box_copy(bbox); + + MemoryContextSwitchTo(oldCtx); + + out->traversalValues[i] = quadrant; + out->distances[i] = spg_key_orderbys_distances(BoxPGetDatum(quadrant), false, + in->orderbys, in->norderbys); + } + } PG_RETURN_VOID(); } @@ -286,13 +367,35 @@ spg_quad_inner_consistent(PG_FUNCTION_ARGS) break; /* no need to consider remaining conditions */ } + out->levelAdds = palloc(sizeof(int) * 4); + for (i = 0; i < 4; ++i) + out->levelAdds[i] = 1; + /* We must descend into the quadrant(s) identified by which */ out->nodeNumbers = (int *) palloc(sizeof(int) * 4); out->nNodes = 0; + for (i = 1; i <= 4; i++) { if (which & (1 << i)) - out->nodeNumbers[out->nNodes++] = i - 1; + { + out->nodeNumbers[out->nNodes] = i - 1; + + if (in->norderbys > 0) + { + MemoryContext oldCtx = MemoryContextSwitchTo(in->traversalMemoryContext); + BOX *quadrant = getQuadrantArea(bbox, centroid, i); + + MemoryContextSwitchTo(oldCtx); + + out->traversalValues[out->nNodes] = quadrant; + + out->distances[out->nNodes] = spg_key_orderbys_distances(BoxPGetDatum(quadrant), false, + in->orderbys, in->norderbys); + } + + out->nNodes++; + } } PG_RETURN_VOID(); @@ -356,5 +459,10 @@ spg_quad_leaf_consistent(PG_FUNCTION_ARGS) break; } + if (res && in->norderbys > 0) + /* ok, it passes -> let's compute the distances */ + out->distances = spg_key_orderbys_distances(in->leafDatum, true, + in->orderbys, in->norderbys); + PG_RETURN_BOOL(res); } diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c index 7965b5846d..c883ae95e4 100644 --- a/src/backend/access/spgist/spgscan.c +++ b/src/backend/access/spgist/spgscan.c @@ -4,7 +4,7 @@ * routines for scanning SP-GiST indexes * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -15,79 +15,167 @@ #include "postgres.h" +#include "access/genam.h" #include "access/relscan.h" #include "access/spgist_private.h" #include "miscadmin.h" #include "storage/bufmgr.h" #include "utils/datum.h" +#include "utils/float.h" +#include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/rel.h" - typedef void (*storeRes_func) (SpGistScanOpaque so, ItemPointer heapPtr, - Datum leafValue, bool isnull, bool recheck); + Datum leafValue, bool isNull, bool recheck, + bool recheckDistances, double *distances); -typedef struct ScanStackEntry +/* + * Pairing heap comparison function for the SpGistSearchItem queue. + * KNN-searches currently only support NULLS LAST. So, preserve this logic + * here. + */ +static int +pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a, + const pairingheap_node *b, void *arg) { - Datum reconstructedValue; /* value reconstructed from parent */ - void *traversalValue; /* opclass-specific traverse value */ - int level; /* level of items on this page */ - ItemPointerData ptr; /* block and offset to scan from */ -} ScanStackEntry; + const SpGistSearchItem *sa = (const SpGistSearchItem *) a; + const SpGistSearchItem *sb = (const SpGistSearchItem *) b; + SpGistScanOpaque so = (SpGistScanOpaque) arg; + int i; + + if (sa->isNull) + { + if (!sb->isNull) + return -1; + } + else if (sb->isNull) + { + return 1; + } + else + { + /* Order according to distance comparison */ + for (i = 0; i < so->numberOfOrderBys; i++) + { + if (isnan(sa->distances[i]) && isnan(sb->distances[i])) + continue; /* NaN == NaN */ + if (isnan(sa->distances[i])) + return -1; /* NaN > number */ + if (isnan(sb->distances[i])) + return 1; /* number < NaN */ + if (sa->distances[i] != sb->distances[i]) + return (sa->distances[i] < sb->distances[i]) ? 1 : -1; + } + } + + /* Leaf items go before inner pages, to ensure a depth-first search */ + if (sa->isLeaf && !sb->isLeaf) + return 1; + if (!sa->isLeaf && sb->isLeaf) + return -1; + return 0; +} -/* Free a ScanStackEntry */ static void -freeScanStackEntry(SpGistScanOpaque so, ScanStackEntry *stackEntry) +spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item) { - if (!so->state.attType.attbyval && - DatumGetPointer(stackEntry->reconstructedValue) != NULL) - pfree(DatumGetPointer(stackEntry->reconstructedValue)); - if (stackEntry->traversalValue) - pfree(stackEntry->traversalValue); + if (!so->state.attLeafType.attbyval && + DatumGetPointer(item->value) != NULL) + pfree(DatumGetPointer(item->value)); + + if (item->traversalValue) + pfree(item->traversalValue); - pfree(stackEntry); + pfree(item); } -/* Free the entire stack */ +/* + * Add SpGistSearchItem to queue + * + * Called in queue context + */ static void -freeScanStack(SpGistScanOpaque so) +spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem * item) { - ListCell *lc; + pairingheap_add(so->scanQueue, &item->phNode); +} - foreach(lc, so->scanStack) - { - freeScanStackEntry(so, (ScanStackEntry *) lfirst(lc)); - } - list_free(so->scanStack); - so->scanStack = NIL; +static SpGistSearchItem * +spgAllocSearchItem(SpGistScanOpaque so, bool isnull, double *distances) +{ + /* allocate distance array only for non-NULL items */ + SpGistSearchItem *item = + palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfOrderBys)); + + item->isNull = isnull; + + if (!isnull && so->numberOfOrderBys > 0) + memcpy(item->distances, distances, + so->numberOfOrderBys * sizeof(double)); + + return item; +} + +static void +spgAddStartItem(SpGistScanOpaque so, bool isnull) +{ + SpGistSearchItem *startEntry = + spgAllocSearchItem(so, isnull, so->zeroDistances); + + ItemPointerSet(&startEntry->heapPtr, + isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO, + FirstOffsetNumber); + startEntry->isLeaf = false; + startEntry->level = 0; + startEntry->value = (Datum) 0; + startEntry->traversalValue = NULL; + startEntry->recheck = false; + startEntry->recheckDistances = false; + + spgAddSearchItemToQueue(so, startEntry); } /* - * Initialize scanStack to search the root page, resetting + * Initialize queue to search the root page, resetting * any previously active scan */ static void resetSpGistScanOpaque(SpGistScanOpaque so) { - ScanStackEntry *startEntry; + MemoryContext oldCtx; + + /* + * clear traversal context before proceeding to the next scan; this must + * not happen before the freeScanStack above, else we get double-free + * crashes. + */ + MemoryContextReset(so->traversalCxt); - freeScanStack(so); + oldCtx = MemoryContextSwitchTo(so->traversalCxt); + + /* initialize queue only for distance-ordered scans */ + so->scanQueue = pairingheap_allocate(pairingheap_SpGistSearchItem_cmp, so); if (so->searchNulls) - { - /* Stack a work item to scan the null index entries */ - startEntry = (ScanStackEntry *) palloc0(sizeof(ScanStackEntry)); - ItemPointerSet(&startEntry->ptr, SPGIST_NULL_BLKNO, FirstOffsetNumber); - so->scanStack = lappend(so->scanStack, startEntry); - } + /* Add a work item to scan the null index entries */ + spgAddStartItem(so, true); if (so->searchNonNulls) + /* Add a work item to scan the non-null index entries */ + spgAddStartItem(so, false); + + MemoryContextSwitchTo(oldCtx); + + if (so->numberOfOrderBys > 0) { - /* Stack a work item to scan the non-null index entries */ - startEntry = (ScanStackEntry *) palloc0(sizeof(ScanStackEntry)); - ItemPointerSet(&startEntry->ptr, SPGIST_ROOT_BLKNO, FirstOffsetNumber); - so->scanStack = lappend(so->scanStack, startEntry); + /* Must pfree distances to avoid memory leak */ + int i; + + for (i = 0; i < so->nPtrs; i++) + if (so->distances[i]) + pfree(so->distances[i]); } if (so->want_itup) @@ -122,6 +210,9 @@ spgPrepareScanKeys(IndexScanDesc scan) int nkeys; int i; + so->numberOfOrderBys = scan->numberOfOrderBys; + so->orderByData = scan->orderByData; + if (scan->numberOfKeys <= 0) { /* If no quals, whole-index scan is required */ @@ -182,8 +273,9 @@ spgbeginscan(Relation rel, int keysz, int orderbysz) { IndexScanDesc scan; SpGistScanOpaque so; + int i; - scan = RelationGetIndexScan(rel, keysz, 0); + scan = RelationGetIndexScan(rel, keysz, orderbysz); so = (SpGistScanOpaque) palloc0(sizeof(SpGistScanOpaqueData)); if (keysz > 0) @@ -191,13 +283,54 @@ spgbeginscan(Relation rel, int keysz, int orderbysz) else so->keyData = NULL; initSpGistState(&so->state, scan->indexRelation); + so->tempCxt = AllocSetContextCreate(CurrentMemoryContext, "SP-GiST search temporary context", ALLOCSET_DEFAULT_SIZES); + so->traversalCxt = AllocSetContextCreate(CurrentMemoryContext, + "SP-GiST traversal-value context", + ALLOCSET_DEFAULT_SIZES); /* Set up indexTupDesc and xs_hitupdesc in case it's an index-only scan */ so->indexTupDesc = scan->xs_hitupdesc = RelationGetDescr(rel); + /* Allocate various arrays needed for order-by scans */ + if (scan->numberOfOrderBys > 0) + { + /* This will be filled in spgrescan, but allocate the space here */ + so->orderByTypes = (Oid *) + palloc(sizeof(Oid) * scan->numberOfOrderBys); + + /* These arrays have constant contents, so we can fill them now */ + so->zeroDistances = (double *) + palloc(sizeof(double) * scan->numberOfOrderBys); + so->infDistances = (double *) + palloc(sizeof(double) * scan->numberOfOrderBys); + + for (i = 0; i < scan->numberOfOrderBys; i++) + { + so->zeroDistances[i] = 0.0; + so->infDistances[i] = get_float8_infinity(); + } + + scan->xs_orderbyvals = (Datum *) + palloc0(sizeof(Datum) * scan->numberOfOrderBys); + scan->xs_orderbynulls = (bool *) + palloc(sizeof(bool) * scan->numberOfOrderBys); + memset(scan->xs_orderbynulls, true, + sizeof(bool) * scan->numberOfOrderBys); + } + + fmgr_info_copy(&so->innerConsistentFn, + index_getprocinfo(rel, 1, SPGIST_INNER_CONSISTENT_PROC), + CurrentMemoryContext); + + fmgr_info_copy(&so->leafConsistentFn, + index_getprocinfo(rel, 1, SPGIST_LEAF_CONSISTENT_PROC), + CurrentMemoryContext); + + so->indexCollation = rel->rd_indcollation[0]; + scan->opaque = so; return scan; @@ -211,15 +344,41 @@ spgrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, /* copy scankeys into local storage */ if (scankey && scan->numberOfKeys > 0) - { memmove(scan->keyData, scankey, scan->numberOfKeys * sizeof(ScanKeyData)); + + /* initialize order-by data if needed */ + if (orderbys && scan->numberOfOrderBys > 0) + { + int i; + + memmove(scan->orderByData, orderbys, + scan->numberOfOrderBys * sizeof(ScanKeyData)); + + for (i = 0; i < scan->numberOfOrderBys; i++) + { + ScanKey skey = &scan->orderByData[i]; + + /* + * Look up the datatype returned by the original ordering + * operator. SP-GiST always uses a float8 for the distance + * function, but the ordering operator could be anything else. + * + * XXX: The distance function is only allowed to be lossy if the + * ordering operator's result type is float4 or float8. Otherwise + * we don't know how to return the distance to the executor. But + * we cannot check that here, as we won't know if the distance + * function is lossy until it returns *recheck = true for the + * first time. + */ + so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid); + } } /* preprocess scankeys, set up the representation in *so */ spgPrepareScanKeys(scan); - /* set up starting stack entries */ + /* set up starting queue entries */ resetSpGistScanOpaque(so); } @@ -229,65 +388,344 @@ spgendscan(IndexScanDesc scan) SpGistScanOpaque so = (SpGistScanOpaque) scan->opaque; MemoryContextDelete(so->tempCxt); + MemoryContextDelete(so->traversalCxt); + + if (so->keyData) + pfree(so->keyData); + + if (so->state.deadTupleStorage) + pfree(so->state.deadTupleStorage); + + if (scan->numberOfOrderBys > 0) + { + pfree(so->orderByTypes); + pfree(so->zeroDistances); + pfree(so->infDistances); + pfree(scan->xs_orderbyvals); + pfree(scan->xs_orderbynulls); + } + + pfree(so); +} + +/* + * Leaf SpGistSearchItem constructor, called in queue context + */ +static SpGistSearchItem * +spgNewHeapItem(SpGistScanOpaque so, int level, ItemPointer heapPtr, + Datum leafValue, bool recheck, bool recheckDistances, + bool isnull, double *distances) +{ + SpGistSearchItem *item = spgAllocSearchItem(so, isnull, distances); + + item->level = level; + item->heapPtr = *heapPtr; + /* copy value to queue cxt out of tmp cxt */ + item->value = isnull ? (Datum) 0 : + datumCopy(leafValue, so->state.attLeafType.attbyval, + so->state.attLeafType.attlen); + item->traversalValue = NULL; + item->isLeaf = true; + item->recheck = recheck; + item->recheckDistances = recheckDistances; + + return item; } /* * Test whether a leaf tuple satisfies all the scan keys * - * *leafValue is set to the reconstructed datum, if provided - * *recheck is set true if any of the operators are lossy + * *reportedSome is set to true if: + * the scan is not ordered AND the item satisfies the scankeys */ static bool -spgLeafTest(Relation index, SpGistScanOpaque so, +spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item, SpGistLeafTuple leafTuple, bool isnull, - int level, Datum reconstructedValue, - void *traversalValue, - Datum *leafValue, bool *recheck) + bool *reportedSome, storeRes_func storeRes) { + Datum leafValue; + double *distances; bool result; - Datum leafDatum; - spgLeafConsistentIn in; - spgLeafConsistentOut out; - FmgrInfo *procinfo; - MemoryContext oldCtx; + bool recheck; + bool recheckDistances; if (isnull) { /* Should not have arrived on a nulls page unless nulls are wanted */ Assert(so->searchNulls); - *leafValue = (Datum) 0; - *recheck = false; - return true; + leafValue = (Datum) 0; + distances = NULL; + recheck = false; + recheckDistances = false; + result = true; + } + else + { + spgLeafConsistentIn in; + spgLeafConsistentOut out; + + /* use temp context for calling leaf_consistent */ + MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt); + + in.scankeys = so->keyData; + in.nkeys = so->numberOfKeys; + in.orderbys = so->orderByData; + in.norderbys = so->numberOfOrderBys; + in.reconstructedValue = item->value; + in.traversalValue = item->traversalValue; + in.level = item->level; + in.returnData = so->want_itup; + in.leafDatum = SGLTDATUM(leafTuple, &so->state); + + out.leafValue = (Datum) 0; + out.recheck = false; + out.distances = NULL; + out.recheckDistances = false; + + result = DatumGetBool(FunctionCall2Coll(&so->leafConsistentFn, + so->indexCollation, + PointerGetDatum(&in), + PointerGetDatum(&out))); + recheck = out.recheck; + recheckDistances = out.recheckDistances; + leafValue = out.leafValue; + distances = out.distances; + + MemoryContextSwitchTo(oldCxt); } - leafDatum = SGLTDATUM(leafTuple, &so->state); + if (result) + { + /* item passes the scankeys */ + if (so->numberOfOrderBys > 0) + { + /* the scan is ordered -> add the item to the queue */ + MemoryContext oldCxt = MemoryContextSwitchTo(so->traversalCxt); + SpGistSearchItem *heapItem = spgNewHeapItem(so, item->level, + &leafTuple->heapPtr, + leafValue, + recheck, + recheckDistances, + isnull, + distances); + + spgAddSearchItemToQueue(so, heapItem); + + MemoryContextSwitchTo(oldCxt); + } + else + { + /* non-ordered scan, so report the item right away */ + Assert(!recheckDistances); + storeRes(so, &leafTuple->heapPtr, leafValue, isnull, + recheck, false, NULL); + *reportedSome = true; + } + } - /* use temp context for calling leaf_consistent */ - oldCtx = MemoryContextSwitchTo(so->tempCxt); + return result; +} - in.scankeys = so->keyData; - in.nkeys = so->numberOfKeys; - in.reconstructedValue = reconstructedValue; - in.traversalValue = traversalValue; - in.level = level; - in.returnData = so->want_itup; - in.leafDatum = leafDatum; +/* A bundle initializer for inner_consistent methods */ +static void +spgInitInnerConsistentIn(spgInnerConsistentIn *in, + SpGistScanOpaque so, + SpGistSearchItem * item, + SpGistInnerTuple innerTuple) +{ + in->scankeys = so->keyData; + in->orderbys = so->orderByData; + in->nkeys = so->numberOfKeys; + in->norderbys = so->numberOfOrderBys; + in->reconstructedValue = item->value; + in->traversalMemoryContext = so->traversalCxt; + in->traversalValue = item->traversalValue; + in->level = item->level; + in->returnData = so->want_itup; + in->allTheSame = innerTuple->allTheSame; + in->hasPrefix = (innerTuple->prefixSize > 0); + in->prefixDatum = SGITDATUM(innerTuple, &so->state); + in->nNodes = innerTuple->nNodes; + in->nodeLabels = spgExtractNodeLabels(&so->state, innerTuple); +} - out.leafValue = (Datum) 0; - out.recheck = false; +static SpGistSearchItem * +spgMakeInnerItem(SpGistScanOpaque so, + SpGistSearchItem * parentItem, + SpGistNodeTuple tuple, + spgInnerConsistentOut *out, int i, bool isnull, + double *distances) +{ + SpGistSearchItem *item = spgAllocSearchItem(so, isnull, distances); + + item->heapPtr = tuple->t_tid; + item->level = out->levelAdds ? parentItem->level + out->levelAdds[i] + : parentItem->level; + + /* Must copy value out of temp context */ + item->value = out->reconstructedValues + ? datumCopy(out->reconstructedValues[i], + so->state.attLeafType.attbyval, + so->state.attLeafType.attlen) + : (Datum) 0; + + /* + * Elements of out.traversalValues should be allocated in + * in.traversalMemoryContext, which is actually a long lived context of + * index scan. + */ + item->traversalValue = + out->traversalValues ? out->traversalValues[i] : NULL; + + item->isLeaf = false; + item->recheck = false; + item->recheckDistances = false; + + return item; +} - procinfo = index_getprocinfo(index, 1, SPGIST_LEAF_CONSISTENT_PROC); - result = DatumGetBool(FunctionCall2Coll(procinfo, - index->rd_indcollation[0], - PointerGetDatum(&in), - PointerGetDatum(&out))); +static void +spgInnerTest(SpGistScanOpaque so, SpGistSearchItem * item, + SpGistInnerTuple innerTuple, bool isnull) +{ + MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt); + spgInnerConsistentOut out; + int nNodes = innerTuple->nNodes; + int i; - *leafValue = out.leafValue; - *recheck = out.recheck; + memset(&out, 0, sizeof(out)); - MemoryContextSwitchTo(oldCtx); + if (!isnull) + { + spgInnerConsistentIn in; - return result; + spgInitInnerConsistentIn(&in, so, item, innerTuple); + + /* use user-defined inner consistent method */ + FunctionCall2Coll(&so->innerConsistentFn, + so->indexCollation, + PointerGetDatum(&in), + PointerGetDatum(&out)); + } + else + { + /* force all children to be visited */ + out.nNodes = nNodes; + out.nodeNumbers = (int *) palloc(sizeof(int) * nNodes); + for (i = 0; i < nNodes; i++) + out.nodeNumbers[i] = i; + } + + /* If allTheSame, they should all or none of them match */ + if (innerTuple->allTheSame && out.nNodes != 0 && out.nNodes != nNodes) + elog(ERROR, "inconsistent inner_consistent results for allTheSame inner tuple"); + + if (out.nNodes) + { + /* collect node pointers */ + SpGistNodeTuple node; + SpGistNodeTuple *nodes = (SpGistNodeTuple *) palloc( + sizeof(SpGistNodeTuple) * nNodes); + + SGITITERATE(innerTuple, i, node) + { + nodes[i] = node; + } + + MemoryContextSwitchTo(so->traversalCxt); + + for (i = 0; i < out.nNodes; i++) + { + int nodeN = out.nodeNumbers[i]; + SpGistSearchItem *innerItem; + double *distances; + + Assert(nodeN >= 0 && nodeN < nNodes); + + node = nodes[nodeN]; + + if (!ItemPointerIsValid(&node->t_tid)) + continue; + + /* + * Use infinity distances if innerConsistent() failed to return + * them or if is a NULL item (their distances are really unused). + */ + distances = out.distances ? out.distances[i] : so->infDistances; + + innerItem = spgMakeInnerItem(so, item, node, &out, i, isnull, + distances); + + spgAddSearchItemToQueue(so, innerItem); + } + } + + MemoryContextSwitchTo(oldCxt); +} + +/* Returns a next item in an (ordered) scan or null if the index is exhausted */ +static SpGistSearchItem * +spgGetNextQueueItem(SpGistScanOpaque so) +{ + if (pairingheap_is_empty(so->scanQueue)) + return NULL; /* Done when both heaps are empty */ + + /* Return item; caller is responsible to pfree it */ + return (SpGistSearchItem *) pairingheap_remove_first(so->scanQueue); +} + +enum SpGistSpecialOffsetNumbers +{ + SpGistBreakOffsetNumber = InvalidOffsetNumber, + SpGistRedirectOffsetNumber = MaxOffsetNumber + 1, + SpGistErrorOffsetNumber = MaxOffsetNumber + 2 +}; + +static OffsetNumber +spgTestLeafTuple(SpGistScanOpaque so, + SpGistSearchItem * item, + Page page, OffsetNumber offset, + bool isnull, bool isroot, + bool *reportedSome, + storeRes_func storeRes) +{ + SpGistLeafTuple leafTuple = (SpGistLeafTuple) + PageGetItem(page, PageGetItemId(page, offset)); + + if (leafTuple->tupstate != SPGIST_LIVE) + { + if (!isroot) /* all tuples on root should be live */ + { + if (leafTuple->tupstate == SPGIST_REDIRECT) + { + /* redirection tuple should be first in chain */ + Assert(offset == ItemPointerGetOffsetNumber(&item->heapPtr)); + /* transfer attention to redirect point */ + item->heapPtr = ((SpGistDeadTuple) leafTuple)->pointer; + Assert(ItemPointerGetBlockNumber(&item->heapPtr) != SPGIST_METAPAGE_BLKNO); + return SpGistRedirectOffsetNumber; + } + + if (leafTuple->tupstate == SPGIST_DEAD) + { + /* dead tuple should be first in chain */ + Assert(offset == ItemPointerGetOffsetNumber(&item->heapPtr)); + /* No live entries on this page */ + Assert(leafTuple->nextOffset == InvalidOffsetNumber); + return SpGistBreakOffsetNumber; + } + } + + /* We should not arrive at a placeholder */ + elog(ERROR, "unexpected SPGiST tuple state: %d", leafTuple->tupstate); + return SpGistErrorOffsetNumber; + } + + Assert(ItemPointerIsValid(&leafTuple->heapPtr)); + + spgLeafTest(so, item, leafTuple, isnull, reportedSome, storeRes); + + return leafTuple->nextOffset; } /* @@ -306,247 +744,101 @@ spgWalk(Relation index, SpGistScanOpaque so, bool scanWholeIndex, while (scanWholeIndex || !reportedSome) { - ScanStackEntry *stackEntry; - BlockNumber blkno; - OffsetNumber offset; - Page page; - bool isnull; + SpGistSearchItem *item = spgGetNextQueueItem(so); - /* Pull next to-do item from the list */ - if (so->scanStack == NIL) - break; /* there are no more pages to scan */ - - stackEntry = (ScanStackEntry *) linitial(so->scanStack); - so->scanStack = list_delete_first(so->scanStack); + if (item == NULL) + break; /* No more items in queue -> done */ redirect: /* Check for interrupts, just in case of infinite loop */ CHECK_FOR_INTERRUPTS(); - blkno = ItemPointerGetBlockNumber(&stackEntry->ptr); - offset = ItemPointerGetOffsetNumber(&stackEntry->ptr); - - if (buffer == InvalidBuffer) + if (item->isLeaf) { - buffer = ReadBuffer(index, blkno); - LockBuffer(buffer, BUFFER_LOCK_SHARE); + /* We store heap items in the queue only in case of ordered search */ + Assert(so->numberOfOrderBys > 0); + storeRes(so, &item->heapPtr, item->value, item->isNull, + item->recheck, item->recheckDistances, item->distances); + reportedSome = true; } - else if (blkno != BufferGetBlockNumber(buffer)) + else { - UnlockReleaseBuffer(buffer); - buffer = ReadBuffer(index, blkno); - LockBuffer(buffer, BUFFER_LOCK_SHARE); - } - /* else new pointer points to the same page, no work needed */ + BlockNumber blkno = ItemPointerGetBlockNumber(&item->heapPtr); + OffsetNumber offset = ItemPointerGetOffsetNumber(&item->heapPtr); + Page page; + bool isnull; - page = BufferGetPage(buffer); - TestForOldSnapshot(snapshot, index, page); + if (buffer == InvalidBuffer) + { + buffer = ReadBuffer(index, blkno); + LockBuffer(buffer, BUFFER_LOCK_SHARE); + } + else if (blkno != BufferGetBlockNumber(buffer)) + { + UnlockReleaseBuffer(buffer); + buffer = ReadBuffer(index, blkno); + LockBuffer(buffer, BUFFER_LOCK_SHARE); + } - isnull = SpGistPageStoresNulls(page) ? true : false; + /* else new pointer points to the same page, no work needed */ - if (SpGistPageIsLeaf(page)) - { - SpGistLeafTuple leafTuple; - OffsetNumber max = PageGetMaxOffsetNumber(page); - Datum leafValue = (Datum) 0; - bool recheck = false; + page = BufferGetPage(buffer); + TestForOldSnapshot(snapshot, index, page); + + isnull = SpGistPageStoresNulls(page) ? true : false; - if (SpGistBlockIsRoot(blkno)) + if (SpGistPageIsLeaf(page)) { - /* When root is a leaf, examine all its tuples */ - for (offset = FirstOffsetNumber; offset <= max; offset++) - { - leafTuple = (SpGistLeafTuple) - PageGetItem(page, PageGetItemId(page, offset)); - if (leafTuple->tupstate != SPGIST_LIVE) - { - /* all tuples on root should be live */ - elog(ERROR, "unexpected SPGiST tuple state: %d", - leafTuple->tupstate); - } + /* Page is a leaf - that is, all it's tuples are heap items */ + OffsetNumber max = PageGetMaxOffsetNumber(page); - Assert(ItemPointerIsValid(&leafTuple->heapPtr)); - if (spgLeafTest(index, so, - leafTuple, isnull, - stackEntry->level, - stackEntry->reconstructedValue, - stackEntry->traversalValue, - &leafValue, - &recheck)) - { - storeRes(so, &leafTuple->heapPtr, - leafValue, isnull, recheck); - reportedSome = true; - } + if (SpGistBlockIsRoot(blkno)) + { + /* When root is a leaf, examine all its tuples */ + for (offset = FirstOffsetNumber; offset <= max; offset++) + (void) spgTestLeafTuple(so, item, page, offset, + isnull, true, + &reportedSome, storeRes); } - } - else - { - /* Normal case: just examine the chain we arrived at */ - while (offset != InvalidOffsetNumber) + else { - Assert(offset >= FirstOffsetNumber && offset <= max); - leafTuple = (SpGistLeafTuple) - PageGetItem(page, PageGetItemId(page, offset)); - if (leafTuple->tupstate != SPGIST_LIVE) + /* Normal case: just examine the chain we arrived at */ + while (offset != InvalidOffsetNumber) { - if (leafTuple->tupstate == SPGIST_REDIRECT) - { - /* redirection tuple should be first in chain */ - Assert(offset == ItemPointerGetOffsetNumber(&stackEntry->ptr)); - /* transfer attention to redirect point */ - stackEntry->ptr = ((SpGistDeadTuple) leafTuple)->pointer; - Assert(ItemPointerGetBlockNumber(&stackEntry->ptr) != SPGIST_METAPAGE_BLKNO); + Assert(offset >= FirstOffsetNumber && offset <= max); + offset = spgTestLeafTuple(so, item, page, offset, + isnull, false, + &reportedSome, storeRes); + if (offset == SpGistRedirectOffsetNumber) goto redirect; - } - if (leafTuple->tupstate == SPGIST_DEAD) - { - /* dead tuple should be first in chain */ - Assert(offset == ItemPointerGetOffsetNumber(&stackEntry->ptr)); - /* No live entries on this page */ - Assert(leafTuple->nextOffset == InvalidOffsetNumber); - break; - } - /* We should not arrive at a placeholder */ - elog(ERROR, "unexpected SPGiST tuple state: %d", - leafTuple->tupstate); } - - Assert(ItemPointerIsValid(&leafTuple->heapPtr)); - if (spgLeafTest(index, so, - leafTuple, isnull, - stackEntry->level, - stackEntry->reconstructedValue, - stackEntry->traversalValue, - &leafValue, - &recheck)) - { - storeRes(so, &leafTuple->heapPtr, - leafValue, isnull, recheck); - reportedSome = true; - } - - offset = leafTuple->nextOffset; - } - } - } - else /* page is inner */ - { - SpGistInnerTuple innerTuple; - spgInnerConsistentIn in; - spgInnerConsistentOut out; - FmgrInfo *procinfo; - SpGistNodeTuple *nodes; - SpGistNodeTuple node; - int i; - MemoryContext oldCtx; - - innerTuple = (SpGistInnerTuple) PageGetItem(page, - PageGetItemId(page, offset)); - - if (innerTuple->tupstate != SPGIST_LIVE) - { - if (innerTuple->tupstate == SPGIST_REDIRECT) - { - /* transfer attention to redirect point */ - stackEntry->ptr = ((SpGistDeadTuple) innerTuple)->pointer; - Assert(ItemPointerGetBlockNumber(&stackEntry->ptr) != SPGIST_METAPAGE_BLKNO); - goto redirect; } - elog(ERROR, "unexpected SPGiST tuple state: %d", - innerTuple->tupstate); } - - /* use temp context for calling inner_consistent */ - oldCtx = MemoryContextSwitchTo(so->tempCxt); - - in.scankeys = so->keyData; - in.nkeys = so->numberOfKeys; - in.reconstructedValue = stackEntry->reconstructedValue; - in.traversalMemoryContext = oldCtx; - in.traversalValue = stackEntry->traversalValue; - in.level = stackEntry->level; - in.returnData = so->want_itup; - in.allTheSame = innerTuple->allTheSame; - in.hasPrefix = (innerTuple->prefixSize > 0); - in.prefixDatum = SGITDATUM(innerTuple, &so->state); - in.nNodes = innerTuple->nNodes; - in.nodeLabels = spgExtractNodeLabels(&so->state, innerTuple); - - /* collect node pointers */ - nodes = (SpGistNodeTuple *) palloc(sizeof(SpGistNodeTuple) * in.nNodes); - SGITITERATE(innerTuple, i, node) + else /* page is inner */ { - nodes[i] = node; - } - - memset(&out, 0, sizeof(out)); + SpGistInnerTuple innerTuple = (SpGistInnerTuple) + PageGetItem(page, PageGetItemId(page, offset)); - if (!isnull) - { - /* use user-defined inner consistent method */ - procinfo = index_getprocinfo(index, 1, SPGIST_INNER_CONSISTENT_PROC); - FunctionCall2Coll(procinfo, - index->rd_indcollation[0], - PointerGetDatum(&in), - PointerGetDatum(&out)); - } - else - { - /* force all children to be visited */ - out.nNodes = in.nNodes; - out.nodeNumbers = (int *) palloc(sizeof(int) * in.nNodes); - for (i = 0; i < in.nNodes; i++) - out.nodeNumbers[i] = i; - } - - MemoryContextSwitchTo(oldCtx); - - /* If allTheSame, they should all or none of 'em match */ - if (innerTuple->allTheSame) - if (out.nNodes != 0 && out.nNodes != in.nNodes) - elog(ERROR, "inconsistent inner_consistent results for allTheSame inner tuple"); - - for (i = 0; i < out.nNodes; i++) - { - int nodeN = out.nodeNumbers[i]; - - Assert(nodeN >= 0 && nodeN < in.nNodes); - if (ItemPointerIsValid(&nodes[nodeN]->t_tid)) + if (innerTuple->tupstate != SPGIST_LIVE) { - ScanStackEntry *newEntry; - - /* Create new work item for this node */ - newEntry = palloc(sizeof(ScanStackEntry)); - newEntry->ptr = nodes[nodeN]->t_tid; - if (out.levelAdds) - newEntry->level = stackEntry->level + out.levelAdds[i]; - else - newEntry->level = stackEntry->level; - /* Must copy value out of temp context */ - if (out.reconstructedValues) - newEntry->reconstructedValue = - datumCopy(out.reconstructedValues[i], - so->state.attType.attbyval, - so->state.attType.attlen); - else - newEntry->reconstructedValue = (Datum) 0; - - /* - * Elements of out.traversalValues should be allocated in - * in.traversalMemoryContext, which is actually a long - * lived context of index scan. - */ - newEntry->traversalValue = (out.traversalValues) ? - out.traversalValues[i] : NULL; - - so->scanStack = lcons(newEntry, so->scanStack); + if (innerTuple->tupstate == SPGIST_REDIRECT) + { + /* transfer attention to redirect point */ + item->heapPtr = ((SpGistDeadTuple) innerTuple)->pointer; + Assert(ItemPointerGetBlockNumber(&item->heapPtr) != + SPGIST_METAPAGE_BLKNO); + goto redirect; + } + elog(ERROR, "unexpected SPGiST tuple state: %d", + innerTuple->tupstate); } + + spgInnerTest(so, item, innerTuple, isnull); } } - /* done with this scan stack entry */ - freeScanStackEntry(so, stackEntry); + /* done with this scan item */ + spgFreeSearchItem(so, item); /* clear temp context before proceeding to the next one */ MemoryContextReset(so->tempCxt); } @@ -555,11 +847,14 @@ spgWalk(Relation index, SpGistScanOpaque so, bool scanWholeIndex, UnlockReleaseBuffer(buffer); } + /* storeRes subroutine for getbitmap case */ static void storeBitmap(SpGistScanOpaque so, ItemPointer heapPtr, - Datum leafValue, bool isnull, bool recheck) + Datum leafValue, bool isnull, bool recheck, bool recheckDistances, + double *distances) { + Assert(!recheckDistances && !distances); tbm_add_tuples(so->tbm, heapPtr, 1, recheck); so->ntids++; } @@ -583,11 +878,26 @@ spggetbitmap(IndexScanDesc scan, TIDBitmap *tbm) /* storeRes subroutine for gettuple case */ static void storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr, - Datum leafValue, bool isnull, bool recheck) + Datum leafValue, bool isnull, bool recheck, bool recheckDistances, + double *distances) { Assert(so->nPtrs < MaxIndexTuplesPerPage); so->heapPtrs[so->nPtrs] = *heapPtr; so->recheck[so->nPtrs] = recheck; + so->recheckDistances[so->nPtrs] = recheckDistances; + + if (so->numberOfOrderBys > 0) + { + if (isnull) + so->distances[so->nPtrs] = NULL; + else + { + Size size = sizeof(double) * so->numberOfOrderBys; + + so->distances[so->nPtrs] = memcpy(palloc(size), distances, size); + } + } + if (so->want_itup) { /* @@ -616,14 +926,29 @@ spggettuple(IndexScanDesc scan, ScanDirection dir) { if (so->iPtr < so->nPtrs) { - /* continuing to return tuples from a leaf page */ + /* continuing to return reported tuples */ scan->xs_ctup.t_self = so->heapPtrs[so->iPtr]; scan->xs_recheck = so->recheck[so->iPtr]; scan->xs_hitup = so->reconTups[so->iPtr]; + + if (so->numberOfOrderBys > 0) + index_store_float8_orderby_distances(scan, so->orderByTypes, + so->distances[so->iPtr], + so->recheckDistances[so->iPtr]); so->iPtr++; return true; } + if (so->numberOfOrderBys > 0) + { + /* Must pfree distances to avoid memory leak */ + int i; + + for (i = 0; i < so->nPtrs; i++) + if (so->distances[i]) + pfree(so->distances[i]); + } + if (so->want_itup) { /* Must pfree reconstructed tuples to avoid memory leak */ diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c index 53f298b6c2..153c57b540 100644 --- a/src/backend/access/spgist/spgtextproc.c +++ b/src/backend/access/spgist/spgtextproc.c @@ -29,7 +29,7 @@ * No new entries ever get pushed into a -2-labeled child, either. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -67,6 +67,20 @@ */ #define SPGIST_MAX_PREFIX_LENGTH Max((int) (BLCKSZ - 258 * 16 - 100), 32) +/* + * Strategy for collation aware operator on text is equal to btree strategy + * plus value of 10. + * + * Current collation aware strategies and their corresponding btree strategies: + * 11 BTLessStrategyNumber + * 12 BTLessEqualStrategyNumber + * 14 BTGreaterEqualStrategyNumber + * 15 BTGreaterStrategyNumber + */ +#define SPG_STRATEGY_ADDITION (10) +#define SPG_IS_COLLATION_AWARE_STRATEGY(s) ((s) > SPG_STRATEGY_ADDITION \ + && (s) != RTPrefixStrategyNumber) + /* Struct for sorting values in picksplit */ typedef struct spgNodePtr { @@ -496,10 +510,10 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS) * well end with a partial multibyte character, so that applying * any encoding-sensitive test to it would be risky anyhow.) */ - if (strategy > 10) + if (SPG_IS_COLLATION_AWARE_STRATEGY(strategy)) { if (collate_is_c) - strategy -= 10; + strategy -= SPG_STRATEGY_ADDITION; else continue; } @@ -526,6 +540,10 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS) if (r < 0) res = false; break; + case RTPrefixStrategyNumber: + if (r != 0) + res = false; + break; default: elog(ERROR, "unrecognized strategy number: %d", in->scankeys[j].sk_strategy); @@ -605,30 +623,47 @@ spg_text_leaf_consistent(PG_FUNCTION_ARGS) int queryLen = VARSIZE_ANY_EXHDR(query); int r; - if (strategy > 10) + if (strategy == RTPrefixStrategyNumber) + { + /* + * if level >= length of query then reconstrValue must begin with + * query (prefix) string, so we don't need to check it again. + */ + res = (level >= queryLen) || + DatumGetBool(DirectFunctionCall2(text_starts_with, + out->leafValue, + PointerGetDatum(query))); + + if (!res) /* no need to consider remaining conditions */ + break; + + continue; + } + + if (SPG_IS_COLLATION_AWARE_STRATEGY(strategy)) { /* Collation-aware comparison */ - strategy -= 10; + strategy -= SPG_STRATEGY_ADDITION; /* If asserts enabled, verify encoding of reconstructed string */ Assert(pg_verifymbstr(fullValue, fullLen, false)); - r = varstr_cmp(fullValue, Min(queryLen, fullLen), - VARDATA_ANY(query), Min(queryLen, fullLen), + r = varstr_cmp(fullValue, fullLen, + VARDATA_ANY(query), queryLen, PG_GET_COLLATION()); } else { /* Non-collation-aware comparison */ r = memcmp(fullValue, VARDATA_ANY(query), Min(queryLen, fullLen)); - } - if (r == 0) - { - if (queryLen > fullLen) - r = -1; - else if (queryLen < fullLen) - r = 1; + if (r == 0) + { + if (queryLen > fullLen) + r = -1; + else if (queryLen < fullLen) + r = 1; + } } switch (strategy) diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c index 8656af453c..9919e6f0d7 100644 --- a/src/backend/access/spgist/spgutils.c +++ b/src/backend/access/spgist/spgutils.c @@ -4,7 +4,7 @@ * various support functions for SP-GiST * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -15,17 +15,26 @@ #include "postgres.h" +#include "access/amvalidate.h" +#include "access/htup_details.h" #include "access/reloptions.h" #include "access/spgist_private.h" #include "access/transam.h" #include "access/xact.h" +#include "catalog/pg_amop.h" +#include "optimizer/paths.h" #include "storage/bufmgr.h" #include "storage/indexfsm.h" #include "storage/lmgr.h" #include "utils/builtins.h" +#include "utils/catcache.h" #include "utils/index_selfuncs.h" #include "utils/lsyscache.h" +#include "utils/syscache.h" +extern Expr *spgcanorderbyop(IndexOptInfo *index, + PathKey *pathkey, int pathkeyno, + Expr *orderby_clause, int *indexcol_p); /* * SP-GiST handler function: return IndexAmRoutine with access method parameters @@ -39,7 +48,7 @@ spghandler(PG_FUNCTION_ARGS) amroutine->amstrategies = 0; amroutine->amsupport = SPGISTNProc; amroutine->amcanorder = false; - amroutine->amcanorderbyop = false; + amroutine->amcanorderbyop = true; amroutine->amcanbackward = false; amroutine->amcanunique = false; amroutine->amcanmulticol = false; @@ -50,6 +59,7 @@ spghandler(PG_FUNCTION_ARGS) amroutine->amclusterable = false; amroutine->ampredlocks = false; amroutine->amcanparallel = false; + amroutine->amcaninclude = false; amroutine->amkeytype = InvalidOid; amroutine->ambuild = spgbuild; @@ -60,7 +70,7 @@ spghandler(PG_FUNCTION_ARGS) amroutine->amcanreturn = spgcanreturn; amroutine->amcostestimate = spgcostestimate; amroutine->amoptions = spgoptions; - amroutine->amproperty = NULL; + amroutine->amproperty = spgproperty; amroutine->amvalidate = spgvalidate; amroutine->ambeginscan = spgbeginscan; amroutine->amrescan = spgrescan; @@ -112,7 +122,7 @@ spgGetCache(Relation index) * tupdesc. We pass this to the opclass config function so that * polymorphic opclasses are possible. */ - atttype = index->rd_att->attrs[0]->atttypid; + atttype = TupleDescAttr(index->rd_att, 0)->atttypid; /* Call the config function to get config info for the opclass */ in.attType = atttype; @@ -125,6 +135,22 @@ spgGetCache(Relation index) /* Get the information we need about each relevant datatype */ fillTypeDesc(&cache->attType, atttype); + + if (OidIsValid(cache->config.leafType) && + cache->config.leafType != atttype) + { + if (!OidIsValid(index_getprocid(index, 1, SPGIST_COMPRESS_PROC))) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("compress method must be defined when leaf type is different from input type"))); + + fillTypeDesc(&cache->attLeafType, cache->config.leafType); + } + else + { + cache->attLeafType = cache->attType; + } + fillTypeDesc(&cache->attPrefixType, cache->config.prefixType); fillTypeDesc(&cache->attLabelType, cache->config.labelType); @@ -164,6 +190,7 @@ initSpGistState(SpGistState *state, Relation index) state->config = cache->config; state->attType = cache->attType; + state->attLeafType = cache->attLeafType; state->attPrefixType = cache->attPrefixType; state->attLabelType = cache->attLabelType; @@ -256,15 +283,27 @@ SpGistUpdateMetaPage(Relation index) if (cache != NULL) { Buffer metabuffer; - SpGistMetaPageData *metadata; metabuffer = ReadBuffer(index, SPGIST_METAPAGE_BLKNO); if (ConditionalLockBuffer(metabuffer)) { - metadata = SpGistPageGetMeta(BufferGetPage(metabuffer)); + Page metapage = BufferGetPage(metabuffer); + SpGistMetaPageData *metadata = SpGistPageGetMeta(metapage); + metadata->lastUsedPages = cache->lastUsedPages; + /* + * Set pd_lower just past the end of the metadata. This is + * essential, because without doing so, metadata will be lost if + * xlog.c compresses the page. (We must do this here because + * pre-v11 versions of PG did not set the metapage's pd_lower + * correctly, so a pg_upgraded index might contain the wrong + * value.) + */ + ((PageHeader) metapage)->pd_lower = + ((char *) metadata + sizeof(SpGistMetaPageData)) - (char *) metapage; + MarkBufferDirty(metabuffer); UnlockReleaseBuffer(metabuffer); } @@ -534,6 +573,14 @@ SpGistInitMetapage(Page page) /* initialize last-used-page cache to empty */ for (i = 0; i < SPGIST_CACHED_PAGES; i++) metadata->lastUsedPages.cachedPage[i].blkno = InvalidBlockNumber; + + /* + * Set pd_lower just past the end of the metadata. This is essential, + * because without doing so, metadata will be lost if xlog.c compresses + * the page. + */ + ((PageHeader) page)->pd_lower = + ((char *) metadata + sizeof(SpGistMetaPageData)) - (char *) page; } /* @@ -598,7 +645,7 @@ spgFormLeafTuple(SpGistState *state, ItemPointer heapPtr, /* compute space needed (note result is already maxaligned) */ size = SGLTHDRSZ; if (!isnull) - size += SpGistGetTypeSize(&state->attType, datum); + size += SpGistGetTypeSize(&state->attLeafType, datum); /* * Ensure that we can replace the tuple with a dead tuple later. This @@ -614,7 +661,7 @@ spgFormLeafTuple(SpGistState *state, ItemPointer heapPtr, tup->nextOffset = InvalidOffsetNumber; tup->heapPtr = *heapPtr; if (!isnull) - memcpyDatum(SGLTDATAPTR(tup), &state->attType, datum); + memcpyDatum(SGLTDATAPTR(tup), &state->attLeafType, datum); return tup; } @@ -911,3 +958,82 @@ SpGistPageAddNewItem(SpGistState *state, Page page, Item item, Size size, return offnum; } + +/* + * spgproperty() -- Check boolean properties of indexes. + * + * This is optional for most AMs, but is required for SP-GiST because the core + * property code doesn't support AMPROP_DISTANCE_ORDERABLE. + */ +bool +spgproperty(Oid index_oid, int attno, + IndexAMProperty prop, const char *propname, + bool *res, bool *isnull) +{ + Oid opclass, + opfamily, + opcintype; + CatCList *catlist; + int i; + + /* Only answer column-level inquiries */ + if (attno == 0) + return false; + + switch (prop) + { + case AMPROP_DISTANCE_ORDERABLE: + break; + default: + return false; + } + + /* + * Currently, SP-GiST distance-ordered scans require that there be a + * distance operator in the opclass with the default types. So we assume + * that if such a operator exists, then there's a reason for it. + */ + + /* First we need to know the column's opclass. */ + opclass = get_index_column_opclass(index_oid, attno); + if (!OidIsValid(opclass)) + { + *isnull = true; + return true; + } + + /* Now look up the opclass family and input datatype. */ + if (!get_opclass_opfamily_and_input_type(opclass, &opfamily, &opcintype)) + { + *isnull = true; + return true; + } + + /* And now we can check whether the operator is provided. */ + catlist = SearchSysCacheList1(AMOPSTRATEGY, + ObjectIdGetDatum(opfamily)); + + *res = false; + + for (i = 0; i < catlist->n_members; i++) + { + HeapTuple amoptup = &catlist->members[i]->tuple; + Form_pg_amop amopform = (Form_pg_amop) GETSTRUCT(amoptup); + + if (amopform->amoppurpose == AMOP_ORDER && + (amopform->amoplefttype == opcintype || + amopform->amoprighttype == opcintype) && + opfamily_can_sort_type(amopform->amopsortfamily, + get_op_rettype(amopform->amopopr))) + { + *res = true; + break; + } + } + + ReleaseSysCacheList(catlist); + + *isnull = false; + + return true; +} diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index d7d5e90ef3..a83a4b581e 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -4,7 +4,7 @@ * vacuum for SP-GiST * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -845,6 +845,21 @@ spgvacuumscan(spgBulkDeleteState *bds) /* Propagate local lastUsedPage cache to metablock */ SpGistUpdateMetaPage(index); + /* + * If we found any empty pages (and recorded them in the FSM), then + * forcibly update the upper-level FSM pages to ensure that searchers can + * find them. It's possible that the pages were also found during + * previous scans and so this is a waste of time, but it's cheap enough + * relative to scanning the index that it shouldn't matter much, and + * making sure that free pages are available sooner not later seems + * worthwhile. + * + * Note that if no empty pages exist, we don't bother vacuuming the FSM at + * all. + */ + if (bds->stats->pages_deleted > 0) + IndexFreeSpaceMapVacuum(index); + /* * Truncate index if possible * @@ -916,7 +931,6 @@ dummy_callback(ItemPointer itemptr, void *state) IndexBulkDeleteResult * spgvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) { - Relation index = info->index; spgBulkDeleteState bds; /* No-op in ANALYZE ONLY mode */ @@ -926,8 +940,8 @@ spgvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) /* * We don't need to scan the index if there was a preceding bulkdelete * pass. Otherwise, make a pass that won't delete any live tuples, but - * might still accomplish useful stuff with redirect/placeholder cleanup, - * and in any case will provide stats. + * might still accomplish useful stuff with redirect/placeholder cleanup + * and/or FSM housekeeping, and in any case will provide stats. */ if (stats == NULL) { @@ -940,9 +954,6 @@ spgvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) spgvacuumscan(&bds); } - /* Finally, vacuum the FSM */ - IndexFreeSpaceMapVacuum(index); - /* * It's quite possible for us to be fooled by concurrent tuple moves into * double-counting some index tuples, so disbelieve any total that exceeds diff --git a/src/backend/access/spgist/spgvalidate.c b/src/backend/access/spgist/spgvalidate.c index 157cf2a028..8ba6c26f0c 100644 --- a/src/backend/access/spgist/spgvalidate.c +++ b/src/backend/access/spgist/spgvalidate.c @@ -3,7 +3,7 @@ * spgvalidate.c * Opclass validator for SP-GiST. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -22,6 +22,7 @@ #include "catalog/pg_opfamily.h" #include "catalog/pg_type.h" #include "utils/builtins.h" +#include "utils/lsyscache.h" #include "utils/regproc.h" #include "utils/syscache.h" @@ -52,6 +53,10 @@ spgvalidate(Oid opclassoid) OpFamilyOpFuncGroup *opclassgroup; int i; ListCell *lc; + spgConfigIn configIn; + spgConfigOut configOut; + Oid configOutLefttype = InvalidOid; + Oid configOutRighttype = InvalidOid; /* Fetch opclass information */ classtup = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclassoid)); @@ -74,6 +79,7 @@ spgvalidate(Oid opclassoid) /* Fetch all operators and support functions of the opfamily */ oprlist = SearchSysCacheList1(AMOPSTRATEGY, ObjectIdGetDatum(opfamilyoid)); proclist = SearchSysCacheList1(AMPROCNUM, ObjectIdGetDatum(opfamilyoid)); + grouplist = identify_opfamily_groups(oprlist, proclist); /* Check individual support functions */ for (i = 0; i < proclist->n_members; i++) @@ -90,7 +96,7 @@ spgvalidate(Oid opclassoid) { ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("operator family \"%s\" of access method %s contains support procedure %s with different left and right input types", + errmsg("operator family \"%s\" of access method %s contains support function %s with different left and right input types", opfamilyname, "spgist", format_procedure(procform->amproc)))); result = false; @@ -100,6 +106,40 @@ spgvalidate(Oid opclassoid) switch (procform->amprocnum) { case SPGIST_CONFIG_PROC: + ok = check_amproc_signature(procform->amproc, VOIDOID, true, + 2, 2, INTERNALOID, INTERNALOID); + configIn.attType = procform->amproclefttype; + memset(&configOut, 0, sizeof(configOut)); + + OidFunctionCall2(procform->amproc, + PointerGetDatum(&configIn), + PointerGetDatum(&configOut)); + + configOutLefttype = procform->amproclefttype; + configOutRighttype = procform->amprocrighttype; + + /* + * When leaf and attribute types are the same, compress + * function is not required and we set corresponding bit in + * functionset for later group consistency check. + */ + if (!OidIsValid(configOut.leafType) || + configOut.leafType == configIn.attType) + { + foreach(lc, grouplist) + { + OpFamilyOpFuncGroup *group = lfirst(lc); + + if (group->lefttype == procform->amproclefttype && + group->righttype == procform->amprocrighttype) + { + group->functionset |= + ((uint64) 1) << SPGIST_COMPRESS_PROC; + break; + } + } + } + break; case SPGIST_CHOOSE_PROC: case SPGIST_PICKSPLIT_PROC: case SPGIST_INNER_CONSISTENT_PROC: @@ -110,6 +150,15 @@ spgvalidate(Oid opclassoid) ok = check_amproc_signature(procform->amproc, BOOLOID, true, 2, 2, INTERNALOID, INTERNALOID); break; + case SPGIST_COMPRESS_PROC: + if (configOutLefttype != procform->amproclefttype || + configOutRighttype != procform->amprocrighttype) + ok = false; + else + ok = check_amproc_signature(procform->amproc, + configOut.leafType, true, + 1, 1, procform->amproclefttype); + break; default: ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), @@ -138,6 +187,7 @@ spgvalidate(Oid opclassoid) { HeapTuple oprtup = &oprlist->members[i]->tuple; Form_pg_amop oprform = (Form_pg_amop) GETSTRUCT(oprtup); + Oid op_rettype; /* TODO: Check that only allowed strategy numbers exist */ if (oprform->amopstrategy < 1 || oprform->amopstrategy > 63) @@ -151,20 +201,26 @@ spgvalidate(Oid opclassoid) result = false; } - /* spgist doesn't support ORDER BY operators */ - if (oprform->amoppurpose != AMOP_SEARCH || - OidIsValid(oprform->amopsortfamily)) + /* spgist supports ORDER BY operators */ + if (oprform->amoppurpose != AMOP_SEARCH) { - ereport(INFO, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("operator family \"%s\" of access method %s contains invalid ORDER BY specification for operator %s", - opfamilyname, "spgist", - format_operator(oprform->amopopr)))); - result = false; + /* ... and operator result must match the claimed btree opfamily */ + op_rettype = get_op_rettype(oprform->amopopr); + if (!opfamily_can_sort_type(oprform->amopsortfamily, op_rettype)) + { + ereport(INFO, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("operator family \"%s\" of access method %s contains invalid ORDER BY specification for operator %s", + opfamilyname, "spgist", + format_operator(oprform->amopopr)))); + result = false; + } } + else + op_rettype = BOOLOID; /* Check operator signature --- same for all spgist strategies */ - if (!check_amop_signature(oprform->amopopr, BOOLOID, + if (!check_amop_signature(oprform->amopopr, op_rettype, oprform->amoplefttype, oprform->amoprighttype)) { @@ -178,7 +234,6 @@ spgvalidate(Oid opclassoid) } /* Now check for inconsistent groups of operators/functions */ - grouplist = identify_opfamily_groups(oprlist, proclist); opclassgroup = NULL; foreach(lc, grouplist) { diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c index c440d21715..9e2bd3f811 100644 --- a/src/backend/access/spgist/spgxlog.c +++ b/src/backend/access/spgist/spgxlog.c @@ -4,7 +4,7 @@ * WAL replay logic for SP-GiST * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -1033,15 +1033,16 @@ void spg_mask(char *pagedata, BlockNumber blkno) { Page page = (Page) pagedata; + PageHeader pagehdr = (PageHeader) page; - mask_page_lsn(page); + mask_page_lsn_and_checksum(page); mask_page_hint_bits(page); /* - * Any SpGist page other than meta contains unused space which needs to be - * masked. + * Mask the unused space, but only if the page's pd_lower appears to have + * been set correctly. */ - if (!SpGistPageIsMeta(page)) + if (pagehdr->pd_lower > SizeOfPageHeaderData) mask_unused_space(page); } diff --git a/src/backend/access/tablesample/bernoulli.c b/src/backend/access/tablesample/bernoulli.c index 5f6d478159..fba62e7b16 100644 --- a/src/backend/access/tablesample/bernoulli.c +++ b/src/backend/access/tablesample/bernoulli.c @@ -13,7 +13,7 @@ * cutoff value computed from the selection probability by BeginSampleScan. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -24,9 +24,6 @@ #include "postgres.h" -#ifdef _MSC_VER -#include /* for _isnan */ -#endif #include #include "access/hash.h" diff --git a/src/backend/access/tablesample/system.c b/src/backend/access/tablesample/system.c index e270cbc4a0..4d937b4258 100644 --- a/src/backend/access/tablesample/system.c +++ b/src/backend/access/tablesample/system.c @@ -13,7 +13,7 @@ * cutoff value computed from the selection probability by BeginSampleScan. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -24,9 +24,6 @@ #include "postgres.h" -#ifdef _MSC_VER -#include /* for _isnan */ -#endif #include #include "access/hash.h" diff --git a/src/backend/access/tablesample/tablesample.c b/src/backend/access/tablesample/tablesample.c index 10d2bc91b3..6f62581e87 100644 --- a/src/backend/access/tablesample/tablesample.c +++ b/src/backend/access/tablesample/tablesample.c @@ -3,7 +3,7 @@ * tablesample.c * Support functions for TABLESAMPLE feature * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/transam/README b/src/backend/access/transam/README index e7dd19fd7b..ad4083eb6b 100644 --- a/src/backend/access/transam/README +++ b/src/backend/access/transam/README @@ -177,13 +177,13 @@ subtransaction level with the same name. So it's a completely new subtransaction as far as the internals are concerned. Other subsystems are allowed to start "internal" subtransactions, which are -handled by BeginInternalSubtransaction. This is to allow implementing +handled by BeginInternalSubTransaction. This is to allow implementing exception handling, e.g. in PL/pgSQL. ReleaseCurrentSubTransaction and RollbackAndReleaseCurrentSubTransaction allows the subsystem to close said subtransactions. The main difference between this and the savepoint/release path is that we execute the complete state transition immediately in each subroutine, rather than deferring some work until CommitTransactionCommand. -Another difference is that BeginInternalSubtransaction is allowed when no +Another difference is that BeginInternalSubTransaction is allowed when no explicit transaction block has been established, while DefineSavepoint is not. diff --git a/src/backend/access/transam/README.parallel b/src/backend/access/transam/README.parallel index 5c33c40ae9..85e5840feb 100644 --- a/src/backend/access/transam/README.parallel +++ b/src/backend/access/transam/README.parallel @@ -122,9 +122,16 @@ worker. This includes: values are restored, this incidentally sets SessionUserId and OuterUserId to the correct values. This final step restores CurrentUserId. -To prevent undetected or unprincipled deadlocks when running in parallel mode, -this could should eventually handle heavyweight locks in some way. This is -not implemented yet. + - State related to pending REINDEX operations, which prevents access to + an index that is currently being rebuilt. + + - Active relmapper.c mapping state. This is needed to allow consistent + answers when fetching the current relfilenode for relation oids of + mapped relations. + +To prevent unprincipled deadlocks when running in parallel mode, this code +also arranges for the leader and all workers to participate in group +locking. See src/backend/storage/lmgr/README for more details. Transaction Integration ======================= diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index 0a7e2b310f..8b7ff5b0c2 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -23,7 +23,7 @@ * for aborts (whether sync or async), since the post-crash assumption would * be that such transactions failed anyway. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/access/transam/clog.c @@ -39,7 +39,9 @@ #include "access/xloginsert.h" #include "access/xlogutils.h" #include "miscadmin.h" +#include "pgstat.h" #include "pg_trace.h" +#include "storage/proc.h" /* * Defines for CLOG page sizes. A page is the same BLCKSZ as is used @@ -71,6 +73,12 @@ #define GetLSNIndex(slotno, xid) ((slotno) * CLOG_LSNS_PER_PAGE + \ ((xid) % (TransactionId) CLOG_XACTS_PER_PAGE) / CLOG_XACTS_PER_LSN_GROUP) +/* + * The number of subtransactions below which we consider to apply clog group + * update optimization. Testing reveals that the number higher than this can + * hurt performance. + */ +#define THRESHOLD_SUBTRANS_CLOG_OPT 5 /* * Link to shared-memory data structures for CLOG control @@ -87,11 +95,17 @@ static void WriteTruncateXlogRec(int pageno, TransactionId oldestXact, Oid oldestXidDb); static void TransactionIdSetPageStatus(TransactionId xid, int nsubxids, TransactionId *subxids, XidStatus status, - XLogRecPtr lsn, int pageno); + XLogRecPtr lsn, int pageno, + bool all_xact_same_page); static void TransactionIdSetStatusBit(TransactionId xid, XidStatus status, XLogRecPtr lsn, int slotno); static void set_status_by_pages(int nsubxids, TransactionId *subxids, XidStatus status, XLogRecPtr lsn); +static bool TransactionGroupUpdateXidStatus(TransactionId xid, + XidStatus status, XLogRecPtr lsn, int pageno); +static void TransactionIdSetPageStatusInternal(TransactionId xid, int nsubxids, + TransactionId *subxids, XidStatus status, + XLogRecPtr lsn, int pageno); /* @@ -174,7 +188,7 @@ TransactionIdSetTreeStatus(TransactionId xid, int nsubxids, * Set the parent and all subtransactions in a single call */ TransactionIdSetPageStatus(xid, nsubxids, subxids, status, lsn, - pageno); + pageno, true); } else { @@ -201,7 +215,7 @@ TransactionIdSetTreeStatus(TransactionId xid, int nsubxids, */ pageno = TransactionIdToPage(xid); TransactionIdSetPageStatus(xid, nsubxids_on_first_page, subxids, status, - lsn, pageno); + lsn, pageno, false); /* * Now work through the rest of the subxids one clog page at a time, @@ -227,34 +241,110 @@ set_status_by_pages(int nsubxids, TransactionId *subxids, int offset = 0; int i = 0; + Assert(nsubxids > 0); /* else the pageno fetch above is unsafe */ + while (i < nsubxids) { int num_on_page = 0; + int nextpageno; - while (TransactionIdToPage(subxids[i]) == pageno && i < nsubxids) + do { + nextpageno = TransactionIdToPage(subxids[i]); + if (nextpageno != pageno) + break; num_on_page++; i++; - } + } while (i < nsubxids); TransactionIdSetPageStatus(InvalidTransactionId, num_on_page, subxids + offset, - status, lsn, pageno); + status, lsn, pageno, false); offset = i; - pageno = TransactionIdToPage(subxids[offset]); + pageno = nextpageno; } } /* - * Record the final state of transaction entries in the commit log for - * all entries on a single page. Atomic only on this page. - * - * Otherwise API is same as TransactionIdSetTreeStatus() + * Record the final state of transaction entries in the commit log for all + * entries on a single page. Atomic only on this page. */ static void TransactionIdSetPageStatus(TransactionId xid, int nsubxids, TransactionId *subxids, XidStatus status, - XLogRecPtr lsn, int pageno) + XLogRecPtr lsn, int pageno, + bool all_xact_same_page) +{ + /* Can't use group update when PGPROC overflows. */ + StaticAssertStmt(THRESHOLD_SUBTRANS_CLOG_OPT <= PGPROC_MAX_CACHED_SUBXIDS, + "group clog threshold less than PGPROC cached subxids"); + + /* + * When there is contention on CLogControlLock, we try to group multiple + * updates; a single leader process will perform transaction status + * updates for multiple backends so that the number of times + * CLogControlLock needs to be acquired is reduced. + * + * For this optimization to be safe, the XID in MyPgXact and the subxids + * in MyProc must be the same as the ones for which we're setting the + * status. Check that this is the case. + * + * For this optimization to be efficient, we shouldn't have too many + * sub-XIDs and all of the XIDs for which we're adjusting clog should be + * on the same page. Check those conditions, too. + */ + if (all_xact_same_page && xid == MyPgXact->xid && + nsubxids <= THRESHOLD_SUBTRANS_CLOG_OPT && + nsubxids == MyPgXact->nxids && + memcmp(subxids, MyProc->subxids.xids, + nsubxids * sizeof(TransactionId)) == 0) + { + /* + * We don't try to do group update optimization if a process has + * overflowed the subxids array in its PGPROC, since in that case we + * don't have a complete list of XIDs for it. + */ + Assert(THRESHOLD_SUBTRANS_CLOG_OPT <= PGPROC_MAX_CACHED_SUBXIDS); + + /* + * If we can immediately acquire CLogControlLock, we update the status + * of our own XID and release the lock. If not, try use group XID + * update. If that doesn't work out, fall back to waiting for the + * lock to perform an update for this transaction only. + */ + if (LWLockConditionalAcquire(CLogControlLock, LW_EXCLUSIVE)) + { + /* Got the lock without waiting! Do the update. */ + TransactionIdSetPageStatusInternal(xid, nsubxids, subxids, status, + lsn, pageno); + LWLockRelease(CLogControlLock); + return; + } + else if (TransactionGroupUpdateXidStatus(xid, status, lsn, pageno)) + { + /* Group update mechanism has done the work. */ + return; + } + + /* Fall through only if update isn't done yet. */ + } + + /* Group update not applicable, or couldn't accept this page number. */ + LWLockAcquire(CLogControlLock, LW_EXCLUSIVE); + TransactionIdSetPageStatusInternal(xid, nsubxids, subxids, status, + lsn, pageno); + LWLockRelease(CLogControlLock); +} + +/* + * Record the final state of transaction entry in the commit log + * + * We don't do any locking here; caller must handle that. + */ +static void +TransactionIdSetPageStatusInternal(TransactionId xid, int nsubxids, + TransactionId *subxids, XidStatus status, + XLogRecPtr lsn, int pageno) { int slotno; int i; @@ -262,8 +352,7 @@ TransactionIdSetPageStatus(TransactionId xid, int nsubxids, Assert(status == TRANSACTION_STATUS_COMMITTED || status == TRANSACTION_STATUS_ABORTED || (status == TRANSACTION_STATUS_SUB_COMMITTED && !TransactionIdIsValid(xid))); - - LWLockAcquire(CLogControlLock, LW_EXCLUSIVE); + Assert(LWLockHeldByMeInMode(CLogControlLock, LW_EXCLUSIVE)); /* * If we're doing an async commit (ie, lsn is valid), then we must wait @@ -311,8 +400,167 @@ TransactionIdSetPageStatus(TransactionId xid, int nsubxids, } ClogCtl->shared->page_dirty[slotno] = true; +} + +/* + * When we cannot immediately acquire CLogControlLock in exclusive mode at + * commit time, add ourselves to a list of processes that need their XIDs + * status update. The first process to add itself to the list will acquire + * CLogControlLock in exclusive mode and set transaction status as required + * on behalf of all group members. This avoids a great deal of contention + * around CLogControlLock when many processes are trying to commit at once, + * since the lock need not be repeatedly handed off from one committing + * process to the next. + * + * Returns true when transaction status has been updated in clog; returns + * false if we decided against applying the optimization because the page + * number we need to update differs from those processes already waiting. + */ +static bool +TransactionGroupUpdateXidStatus(TransactionId xid, XidStatus status, + XLogRecPtr lsn, int pageno) +{ + volatile PROC_HDR *procglobal = ProcGlobal; + PGPROC *proc = MyProc; + uint32 nextidx; + uint32 wakeidx; + + /* We should definitely have an XID whose status needs to be updated. */ + Assert(TransactionIdIsValid(xid)); + + /* + * Add ourselves to the list of processes needing a group XID status + * update. + */ + proc->clogGroupMember = true; + proc->clogGroupMemberXid = xid; + proc->clogGroupMemberXidStatus = status; + proc->clogGroupMemberPage = pageno; + proc->clogGroupMemberLsn = lsn; + nextidx = pg_atomic_read_u32(&procglobal->clogGroupFirst); + + while (true) + { + /* + * Add the proc to list, if the clog page where we need to update the + * current transaction status is same as group leader's clog page. + * + * There is a race condition here, which is that after doing the below + * check and before adding this proc's clog update to a group, the + * group leader might have already finished the group update for this + * page and becomes group leader of another group. This will lead to a + * situation where a single group can have different clog page + * updates. This isn't likely and will still work, just maybe a bit + * less efficiently. + */ + if (nextidx != INVALID_PGPROCNO && + ProcGlobal->allProcs[nextidx].clogGroupMemberPage != proc->clogGroupMemberPage) + { + proc->clogGroupMember = false; + return false; + } + + pg_atomic_write_u32(&proc->clogGroupNext, nextidx); + + if (pg_atomic_compare_exchange_u32(&procglobal->clogGroupFirst, + &nextidx, + (uint32) proc->pgprocno)) + break; + } + + /* + * If the list was not empty, the leader will update the status of our + * XID. It is impossible to have followers without a leader because the + * first process that has added itself to the list will always have + * nextidx as INVALID_PGPROCNO. + */ + if (nextidx != INVALID_PGPROCNO) + { + int extraWaits = 0; + + /* Sleep until the leader updates our XID status. */ + pgstat_report_wait_start(WAIT_EVENT_CLOG_GROUP_UPDATE); + for (;;) + { + /* acts as a read barrier */ + PGSemaphoreLock(proc->sem); + if (!proc->clogGroupMember) + break; + extraWaits++; + } + pgstat_report_wait_end(); + + Assert(pg_atomic_read_u32(&proc->clogGroupNext) == INVALID_PGPROCNO); + + /* Fix semaphore count for any absorbed wakeups */ + while (extraWaits-- > 0) + PGSemaphoreUnlock(proc->sem); + return true; + } + + /* We are the leader. Acquire the lock on behalf of everyone. */ + LWLockAcquire(CLogControlLock, LW_EXCLUSIVE); + + /* + * Now that we've got the lock, clear the list of processes waiting for + * group XID status update, saving a pointer to the head of the list. + * Trying to pop elements one at a time could lead to an ABA problem. + */ + nextidx = pg_atomic_exchange_u32(&procglobal->clogGroupFirst, + INVALID_PGPROCNO); + + /* Remember head of list so we can perform wakeups after dropping lock. */ + wakeidx = nextidx; + + /* Walk the list and update the status of all XIDs. */ + while (nextidx != INVALID_PGPROCNO) + { + PGPROC *proc = &ProcGlobal->allProcs[nextidx]; + PGXACT *pgxact = &ProcGlobal->allPgXact[nextidx]; + + /* + * Overflowed transactions should not use group XID status update + * mechanism. + */ + Assert(!pgxact->overflowed); + + TransactionIdSetPageStatusInternal(proc->clogGroupMemberXid, + pgxact->nxids, + proc->subxids.xids, + proc->clogGroupMemberXidStatus, + proc->clogGroupMemberLsn, + proc->clogGroupMemberPage); + + /* Move to next proc in list. */ + nextidx = pg_atomic_read_u32(&proc->clogGroupNext); + } + + /* We're done with the lock now. */ LWLockRelease(CLogControlLock); + + /* + * Now that we've released the lock, go back and wake everybody up. We + * don't do this under the lock so as to keep lock hold times to a + * minimum. + */ + while (wakeidx != INVALID_PGPROCNO) + { + PGPROC *proc = &ProcGlobal->allProcs[wakeidx]; + + wakeidx = pg_atomic_read_u32(&proc->clogGroupNext); + pg_atomic_write_u32(&proc->clogGroupNext, INVALID_PGPROCNO); + + /* ensure all previous writes are visible before follower continues. */ + pg_write_barrier(); + + proc->clogGroupMember = false; + + if (proc != MyProc) + PGSemaphoreUnlock(proc->sem); + } + + return true; } /* @@ -479,7 +727,7 @@ BootStrapCLOG(void) /* * Initialize (or reinitialize) a page of CLOG to zeroes. - * If writeXlog is TRUE, also emit an XLOG record saying we did this. + * If writeXlog is true, also emit an XLOG record saying we did this. * * The page is not actually written, just set up in shared memory. * The slot number of the new page is returned. diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c index 60fb9eeb06..599203c96c 100644 --- a/src/backend/access/transam/commit_ts.c +++ b/src/backend/access/transam/commit_ts.c @@ -15,7 +15,7 @@ * re-perform the status update on redo; so we need make no additional XLOG * entry here. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/access/transam/commit_ts.c @@ -531,7 +531,7 @@ BootStrapCommitTs(void) /* * Initialize (or reinitialize) a page of CommitTs to zeroes. - * If writeXlog is TRUE, also emit an XLOG record saying we did this. + * If writeXlog is true, also emit an XLOG record saying we did this. * * The page is not actually written, just set up in shared memory. * The slot number of the new page is returned. @@ -573,10 +573,9 @@ CompleteCommitTsInitialization(void) * any leftover data. * * Conversely, we activate the module if the feature is enabled. This is - * not necessary in a master system because we already did it earlier, but - * if we're in a standby server that got promoted which had the feature - * enabled and was following a master that had the feature disabled, this - * is where we turn it on locally. + * necessary for primary and standby as the activation depends on the + * control file contents at the beginning of recovery or when a + * XLOG_PARAMETER_CHANGE is replayed. */ if (!track_commit_timestamp) DeactivateCommitTs(); @@ -586,7 +585,7 @@ CompleteCommitTsInitialization(void) /* * Activate or deactivate CommitTs' upon reception of a XLOG_PARAMETER_CHANGE - * XLog record in a standby. + * XLog record during recovery. */ void CommitTsParameterChange(bool newvalue, bool oldvalue) @@ -884,7 +883,8 @@ AdvanceOldestCommitTsXid(TransactionId oldestXact) /* - * Decide which of two CLOG page numbers is "older" for truncation purposes. + * Decide which of two commitTS page numbers is "older" for truncation + * purposes. * * We need to use comparison of TransactionIds here in order to do the right * thing with wraparound XID arithmetic. However, if we are asked about diff --git a/src/backend/access/transam/generic_xlog.c b/src/backend/access/transam/generic_xlog.c index fbc6810c2f..aa7ad725f4 100644 --- a/src/backend/access/transam/generic_xlog.c +++ b/src/backend/access/transam/generic_xlog.c @@ -4,7 +4,7 @@ * Implementation of generic xlog records. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/access/transam/generic_xlog.c @@ -61,14 +61,11 @@ typedef struct /* State of generic xlog record construction */ struct GenericXLogState { - /* - * page's images. Should be first in this struct to have MAXALIGN'ed - * images addresses, because some code working with pages directly aligns - * addresses, not offsets from beginning of page - */ - char images[MAX_GENERIC_XLOG_PAGES * BLCKSZ]; + /* Info about each page, see above */ PageData pages[MAX_GENERIC_XLOG_PAGES]; bool isLogged; + /* Page images (properly aligned) */ + PGAlignedBlock images[MAX_GENERIC_XLOG_PAGES]; }; static void writeFragment(PageData *pageData, OffsetNumber offset, @@ -251,12 +248,12 @@ computeDelta(PageData *pageData, Page curpage, Page targetpage) #ifdef WAL_DEBUG if (XLOG_DEBUG) { - char tmp[BLCKSZ]; + PGAlignedBlock tmp; - memcpy(tmp, curpage, BLCKSZ); - applyPageRedo(tmp, pageData->delta, pageData->deltaLen); - if (memcmp(tmp, targetpage, targetLower) != 0 || - memcmp(tmp + targetUpper, targetpage + targetUpper, + memcpy(tmp.data, curpage, BLCKSZ); + applyPageRedo(tmp.data, pageData->delta, pageData->deltaLen); + if (memcmp(tmp.data, targetpage, targetLower) != 0 || + memcmp(tmp.data + targetUpper, targetpage + targetUpper, BLCKSZ - targetUpper) != 0) elog(ERROR, "result of generic xlog apply does not match"); } @@ -277,7 +274,7 @@ GenericXLogStart(Relation relation) for (i = 0; i < MAX_GENERIC_XLOG_PAGES; i++) { - state->pages[i].image = state->images + BLCKSZ * i; + state->pages[i].image = state->images[i].data; state->pages[i].buffer = InvalidBuffer; } @@ -541,7 +538,7 @@ generic_redo(XLogReaderState *record) void generic_mask(char *page, BlockNumber blkno) { - mask_page_lsn(page); + mask_page_lsn_and_checksum(page); mask_unused_space(page); } diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index 7142ecede0..365daf153a 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -59,7 +59,7 @@ * counter does not fall within the wraparound horizon considering the global * minimum value. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/access/transam/multixact.c @@ -1000,14 +1000,14 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) errmsg("database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database \"%s\"", oldest_datname), errhint("Execute a database-wide VACUUM in that database.\n" - "You might also need to commit or roll back old prepared transactions."))); + "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); else ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database with OID %u", oldest_datoid), errhint("Execute a database-wide VACUUM in that database.\n" - "You might also need to commit or roll back old prepared transactions."))); + "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); } /* @@ -1031,7 +1031,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) oldest_datname, multiWrapLimit - result), errhint("Execute a database-wide VACUUM in that database.\n" - "You might also need to commit or roll back old prepared transactions."))); + "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); else ereport(WARNING, (errmsg_plural("database with OID %u must be vacuumed before %u more MultiXactId is used", @@ -1040,7 +1040,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) oldest_datoid, multiWrapLimit - result), errhint("Execute a database-wide VACUUM in that database.\n" - "You might also need to commit or roll back old prepared transactions."))); + "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); } /* Re-acquire lock and start over */ @@ -1892,7 +1892,7 @@ BootStrapMultiXact(void) /* * Initialize (or reinitialize) a page of MultiXactOffset to zeroes. - * If writeXlog is TRUE, also emit an XLOG record saying we did this. + * If writeXlog is true, also emit an XLOG record saying we did this. * * The page is not actually written, just set up in shared memory. * The slot number of the new page is returned. @@ -1932,7 +1932,7 @@ ZeroMultiXactMemberPage(int pageno, bool writeXlog) * MaybeExtendOffsetSlru * Extend the offsets SLRU area, if necessary * - * After a binary upgrade from <= 9.2, the pg_multixact/offset SLRU area might + * After a binary upgrade from <= 9.2, the pg_multixact/offsets SLRU area might * contain files that are shorter than necessary; this would occur if the old * installation had used multixacts beyond the first page (files cannot be * copied, because the on-disk representation is different). pg_upgrade would @@ -2321,7 +2321,7 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid, oldest_datname, multiWrapLimit - curMulti), errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n" - "You might also need to commit or roll back old prepared transactions."))); + "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); else ereport(WARNING, (errmsg_plural("database with OID %u must be vacuumed before %u more MultiXactId is used", @@ -2330,7 +2330,7 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid, oldest_datoid, multiWrapLimit - curMulti), errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n" - "You might also need to commit or roll back old prepared transactions."))); + "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); } } @@ -2555,7 +2555,7 @@ SetOffsetVacuumLimit(bool is_startup) /* * NB: Have to prevent concurrent truncation, we might otherwise try to - * lookup a oldestMulti that's concurrently getting truncated away. + * lookup an oldestMulti that's concurrently getting truncated away. */ LWLockAcquire(MultiXactTruncationLock, LW_SHARED); @@ -2732,7 +2732,7 @@ find_multixact_start(MultiXactId multi, MultiXactOffset *result) /* * Flush out dirty data, so PhysicalPageExists can work correctly. * SimpleLruFlush() is a pretty big hammer for that. Alternatively we - * could add a in-memory version of page exists, but find_multixact_start + * could add an in-memory version of page exists, but find_multixact_start * is called infrequently, and it doesn't seem bad to flush buffers to * disk before truncation. */ diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index 17b10383e4..84197192ec 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -3,7 +3,7 @@ * parallel.c * Infrastructure for launching parallel workers * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -14,9 +14,13 @@ #include "postgres.h" +#include "access/nbtree.h" #include "access/parallel.h" +#include "access/session.h" #include "access/xact.h" #include "access/xlog.h" +#include "catalog/pg_enum.h" +#include "catalog/index.h" #include "catalog/namespace.h" #include "commands/async.h" #include "executor/execParallel.h" @@ -34,8 +38,9 @@ #include "utils/guc.h" #include "utils/inval.h" #include "utils/memutils.h" -#include "utils/resowner.h" +#include "utils/relmapper.h" #include "utils/snapmgr.h" +#include "utils/typcache.h" /* @@ -51,8 +56,9 @@ #define PARALLEL_MAGIC 0x50477c7c /* - * Magic numbers for parallel state sharing. Higher-level code should use - * smaller values, leaving these very large ones for use by this module. + * Magic numbers for per-context parallel state sharing. Higher-level code + * should use smaller values, leaving these very large ones for use by this + * module. */ #define PARALLEL_KEY_FIXED UINT64CONST(0xFFFFFFFFFFFF0001) #define PARALLEL_KEY_ERROR_QUEUE UINT64CONST(0xFFFFFFFFFFFF0002) @@ -63,6 +69,10 @@ #define PARALLEL_KEY_ACTIVE_SNAPSHOT UINT64CONST(0xFFFFFFFFFFFF0007) #define PARALLEL_KEY_TRANSACTION_STATE UINT64CONST(0xFFFFFFFFFFFF0008) #define PARALLEL_KEY_ENTRYPOINT UINT64CONST(0xFFFFFFFFFFFF0009) +#define PARALLEL_KEY_SESSION_DSM UINT64CONST(0xFFFFFFFFFFFF000A) +#define PARALLEL_KEY_REINDEX_STATE UINT64CONST(0xFFFFFFFFFFFF000B) +#define PARALLEL_KEY_RELMAPPER_STATE UINT64CONST(0xFFFFFFFFFFFF000C) +#define PARALLEL_KEY_ENUMBLACKLIST UINT64CONST(0xFFFFFFFFFFFF000D) /* Fixed-size parallel state. */ typedef struct FixedParallelState @@ -71,12 +81,16 @@ typedef struct FixedParallelState Oid database_id; Oid authenticated_user_id; Oid current_user_id; + Oid outer_user_id; Oid temp_namespace_id; Oid temp_toast_namespace_id; int sec_context; + bool is_superuser; PGPROC *parallel_master_pgproc; pid_t parallel_master_pid; BackendId parallel_master_backend_id; + TimestampTz xact_ts; + TimestampTz stmt_ts; /* Mutex protects remaining fields. */ slock_t mutex; @@ -105,6 +119,9 @@ static FixedParallelState *MyFixedParallelState; /* List of active parallel contexts. */ static dlist_head pcxt_list = DLIST_STATIC_INIT(pcxt_list); +/* Backend-local copy of data from FixedParallelState. */ +static pid_t ParallelMasterPid; + /* * List of internal parallel worker entry points. We need this for * reasons explained in LookupParallelWorkerFunction(), below. @@ -118,6 +135,9 @@ static const struct { { "ParallelQueryMain", ParallelQueryMain + }, + { + "_bt_parallel_build_main", _bt_parallel_build_main } }; @@ -125,6 +145,7 @@ static const struct static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg); static void WaitForParallelWorkersToExit(ParallelContext *pcxt); static parallel_worker_main_type LookupParallelWorkerFunction(const char *libraryname, const char *funcname); +static void ParallelWorkerShutdown(int code, Datum arg); /* @@ -134,7 +155,7 @@ static parallel_worker_main_type LookupParallelWorkerFunction(const char *librar */ ParallelContext * CreateParallelContext(const char *library_name, const char *function_name, - int nworkers) + int nworkers, bool serializable_okay) { MemoryContext oldcontext; ParallelContext *pcxt; @@ -145,19 +166,14 @@ CreateParallelContext(const char *library_name, const char *function_name, /* Number of workers should be non-negative. */ Assert(nworkers >= 0); - /* - * If dynamic shared memory is not available, we won't be able to use - * background workers. - */ - if (dynamic_shared_memory_type == DSM_IMPL_NONE) - nworkers = 0; - /* * If we are running under serializable isolation, we can't use parallel * workers, at least not until somebody enhances that mechanism to be - * parallel-aware. + * parallel-aware. Utility statement callers may ask us to ignore this + * restriction because they're always able to safely ignore the fact that + * SIREAD locks do not work with parallelism. */ - if (IsolationIsSerializable()) + if (IsolationIsSerializable() && !serializable_okay) nworkers = 0; /* We might be running in a short-lived memory context. */ @@ -194,9 +210,13 @@ InitializeParallelDSM(ParallelContext *pcxt) Size tsnaplen = 0; Size asnaplen = 0; Size tstatelen = 0; + Size reindexlen = 0; + Size relmapperlen = 0; + Size enumblacklistlen = 0; Size segsize = 0; int i; FixedParallelState *fps; + dsm_handle session_dsm_handle = DSM_HANDLE_INVALID; Snapshot transaction_snapshot = GetTransactionSnapshot(); Snapshot active_snapshot = GetActiveSnapshot(); @@ -211,6 +231,21 @@ InitializeParallelDSM(ParallelContext *pcxt) * Normally, the user will have requested at least one worker process, but * if by chance they have not, we can skip a bunch of things here. */ + if (pcxt->nworkers > 0) + { + /* Get (or create) the per-session DSM segment's handle. */ + session_dsm_handle = GetSessionDsmHandle(); + + /* + * If we weren't able to create a per-session DSM segment, then we can + * continue but we can't safely launch any workers because their + * record typmods would be incompatible so they couldn't exchange + * tuples. + */ + if (session_dsm_handle == DSM_HANDLE_INVALID) + pcxt->nworkers = 0; + } + if (pcxt->nworkers > 0) { /* Estimate space for various kinds of state sharing. */ @@ -226,8 +261,15 @@ InitializeParallelDSM(ParallelContext *pcxt) shm_toc_estimate_chunk(&pcxt->estimator, asnaplen); tstatelen = EstimateTransactionStateSpace(); shm_toc_estimate_chunk(&pcxt->estimator, tstatelen); + shm_toc_estimate_chunk(&pcxt->estimator, sizeof(dsm_handle)); + reindexlen = EstimateReindexStateSpace(); + shm_toc_estimate_chunk(&pcxt->estimator, reindexlen); + relmapperlen = EstimateRelationMapSpace(); + shm_toc_estimate_chunk(&pcxt->estimator, relmapperlen); + enumblacklistlen = EstimateEnumBlacklistSpace(); + shm_toc_estimate_chunk(&pcxt->estimator, enumblacklistlen); /* If you add more chunks here, you probably need to add keys. */ - shm_toc_estimate_keys(&pcxt->estimator, 6); + shm_toc_estimate_keys(&pcxt->estimator, 10); /* Estimate space need for error queues. */ StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) == @@ -275,12 +317,16 @@ InitializeParallelDSM(ParallelContext *pcxt) shm_toc_allocate(pcxt->toc, sizeof(FixedParallelState)); fps->database_id = MyDatabaseId; fps->authenticated_user_id = GetAuthenticatedUserId(); + fps->outer_user_id = GetCurrentRoleId(); + fps->is_superuser = session_auth_is_superuser; GetUserIdAndSecContext(&fps->current_user_id, &fps->sec_context); GetTempNamespaceState(&fps->temp_namespace_id, &fps->temp_toast_namespace_id); fps->parallel_master_pgproc = MyProc; fps->parallel_master_pid = MyProcPid; fps->parallel_master_backend_id = MyBackendId; + fps->xact_ts = GetCurrentTransactionStartTimestamp(); + fps->stmt_ts = GetCurrentStatementStartTimestamp(); SpinLockInit(&fps->mutex); fps->last_xlog_end = 0; shm_toc_insert(pcxt->toc, PARALLEL_KEY_FIXED, fps); @@ -294,8 +340,12 @@ InitializeParallelDSM(ParallelContext *pcxt) char *tsnapspace; char *asnapspace; char *tstatespace; + char *reindexspace; + char *relmapperspace; char *error_queue_space; + char *session_dsm_handle_space; char *entrypointstate; + char *enumblacklistspace; Size lnamelen; /* Serialize shared libraries we have loaded. */ @@ -322,11 +372,35 @@ InitializeParallelDSM(ParallelContext *pcxt) SerializeSnapshot(active_snapshot, asnapspace); shm_toc_insert(pcxt->toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, asnapspace); + /* Provide the handle for per-session segment. */ + session_dsm_handle_space = shm_toc_allocate(pcxt->toc, + sizeof(dsm_handle)); + *(dsm_handle *) session_dsm_handle_space = session_dsm_handle; + shm_toc_insert(pcxt->toc, PARALLEL_KEY_SESSION_DSM, + session_dsm_handle_space); + /* Serialize transaction state. */ tstatespace = shm_toc_allocate(pcxt->toc, tstatelen); SerializeTransactionState(tstatelen, tstatespace); shm_toc_insert(pcxt->toc, PARALLEL_KEY_TRANSACTION_STATE, tstatespace); + /* Serialize reindex state. */ + reindexspace = shm_toc_allocate(pcxt->toc, reindexlen); + SerializeReindexState(reindexlen, reindexspace); + shm_toc_insert(pcxt->toc, PARALLEL_KEY_REINDEX_STATE, reindexspace); + + /* Serialize relmapper state. */ + relmapperspace = shm_toc_allocate(pcxt->toc, relmapperlen); + SerializeRelationMap(relmapperlen, relmapperspace); + shm_toc_insert(pcxt->toc, PARALLEL_KEY_RELMAPPER_STATE, + relmapperspace); + + /* Serialize enum blacklist state. */ + enumblacklistspace = shm_toc_allocate(pcxt->toc, enumblacklistlen); + SerializeEnumBlacklist(enumblacklistspace, enumblacklistlen); + shm_toc_insert(pcxt->toc, PARALLEL_KEY_ENUMBLACKLIST, + enumblacklistspace); + /* Allocate space for worker information. */ pcxt->worker = palloc0(sizeof(ParallelWorkerInfo) * pcxt->nworkers); @@ -380,8 +454,6 @@ void ReinitializeParallelDSM(ParallelContext *pcxt) { FixedParallelState *fps; - char *error_queue_space; - int i; /* Wait for any old workers to exit. */ if (pcxt->nworkers_launched > 0) @@ -389,24 +461,36 @@ ReinitializeParallelDSM(ParallelContext *pcxt) WaitForParallelWorkersToFinish(pcxt); WaitForParallelWorkersToExit(pcxt); pcxt->nworkers_launched = 0; + if (pcxt->known_attached_workers) + { + pfree(pcxt->known_attached_workers); + pcxt->known_attached_workers = NULL; + pcxt->nknown_attached_workers = 0; + } } /* Reset a few bits of fixed parallel state to a clean state. */ fps = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_FIXED, false); fps->last_xlog_end = 0; - /* Recreate error queues. */ - error_queue_space = - shm_toc_lookup(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, false); - for (i = 0; i < pcxt->nworkers; ++i) + /* Recreate error queues (if they exist). */ + if (pcxt->nworkers > 0) { - char *start; - shm_mq *mq; + char *error_queue_space; + int i; - start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE; - mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE); - shm_mq_set_receiver(mq, MyProc); - pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL); + error_queue_space = + shm_toc_lookup(pcxt->toc, PARALLEL_KEY_ERROR_QUEUE, false); + for (i = 0; i < pcxt->nworkers; ++i) + { + char *start; + shm_mq *mq; + + start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE; + mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE); + shm_mq_set_receiver(mq, MyProc); + pcxt->worker[i].error_mqh = shm_mq_attach(mq, pcxt->seg, NULL); + } } } @@ -438,6 +522,7 @@ LaunchParallelWorkers(ParallelContext *pcxt) memset(&worker, 0, sizeof(worker)); snprintf(worker.bgw_name, BGW_MAXLEN, "parallel worker for PID %d", MyProcPid); + snprintf(worker.bgw_type, BGW_MAXLEN, "parallel worker"); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION | BGWORKER_CLASS_PARALLEL; @@ -480,15 +565,154 @@ LaunchParallelWorkers(ParallelContext *pcxt) */ any_registrations_failed = true; pcxt->worker[i].bgwhandle = NULL; - pfree(pcxt->worker[i].error_mqh); + shm_mq_detach(pcxt->worker[i].error_mqh); pcxt->worker[i].error_mqh = NULL; } } + /* + * Now that nworkers_launched has taken its final value, we can initialize + * known_attached_workers. + */ + if (pcxt->nworkers_launched > 0) + { + pcxt->known_attached_workers = + palloc0(sizeof(bool) * pcxt->nworkers_launched); + pcxt->nknown_attached_workers = 0; + } + /* Restore previous memory context. */ MemoryContextSwitchTo(oldcontext); } +/* + * Wait for all workers to attach to their error queues, and throw an error if + * any worker fails to do this. + * + * Callers can assume that if this function returns successfully, then the + * number of workers given by pcxt->nworkers_launched have initialized and + * attached to their error queues. Whether or not these workers are guaranteed + * to still be running depends on what code the caller asked them to run; + * this function does not guarantee that they have not exited. However, it + * does guarantee that any workers which exited must have done so cleanly and + * after successfully performing the work with which they were tasked. + * + * If this function is not called, then some of the workers that were launched + * may not have been started due to a fork() failure, or may have exited during + * early startup prior to attaching to the error queue, so nworkers_launched + * cannot be viewed as completely reliable. It will never be less than the + * number of workers which actually started, but it might be more. Any workers + * that failed to start will still be discovered by + * WaitForParallelWorkersToFinish and an error will be thrown at that time, + * provided that function is eventually reached. + * + * In general, the leader process should do as much work as possible before + * calling this function. fork() failures and other early-startup failures + * are very uncommon, and having the leader sit idle when it could be doing + * useful work is undesirable. However, if the leader needs to wait for + * all of its workers or for a specific worker, it may want to call this + * function before doing so. If not, it must make some other provision for + * the failure-to-start case, lest it wait forever. On the other hand, a + * leader which never waits for a worker that might not be started yet, or + * at least never does so prior to WaitForParallelWorkersToFinish(), need not + * call this function at all. + */ +void +WaitForParallelWorkersToAttach(ParallelContext *pcxt) +{ + int i; + + /* Skip this if we have no launched workers. */ + if (pcxt->nworkers_launched == 0) + return; + + for (;;) + { + /* + * This will process any parallel messages that are pending and it may + * also throw an error propagated from a worker. + */ + CHECK_FOR_INTERRUPTS(); + + for (i = 0; i < pcxt->nworkers_launched; ++i) + { + BgwHandleStatus status; + shm_mq *mq; + int rc; + pid_t pid; + + if (pcxt->known_attached_workers[i]) + continue; + + /* + * If error_mqh is NULL, then the worker has already exited + * cleanly. + */ + if (pcxt->worker[i].error_mqh == NULL) + { + pcxt->known_attached_workers[i] = true; + ++pcxt->nknown_attached_workers; + continue; + } + + status = GetBackgroundWorkerPid(pcxt->worker[i].bgwhandle, &pid); + if (status == BGWH_STARTED) + { + /* Has the worker attached to the error queue? */ + mq = shm_mq_get_queue(pcxt->worker[i].error_mqh); + if (shm_mq_get_sender(mq) != NULL) + { + /* Yes, so it is known to be attached. */ + pcxt->known_attached_workers[i] = true; + ++pcxt->nknown_attached_workers; + } + } + else if (status == BGWH_STOPPED) + { + /* + * If the worker stopped without attaching to the error queue, + * throw an error. + */ + mq = shm_mq_get_queue(pcxt->worker[i].error_mqh); + if (shm_mq_get_sender(mq) == NULL) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("parallel worker failed to initialize"), + errhint("More details may be available in the server log."))); + + pcxt->known_attached_workers[i] = true; + ++pcxt->nknown_attached_workers; + } + else + { + /* + * Worker not yet started, so we must wait. The postmaster + * will notify us if the worker's state changes. Our latch + * might also get set for some other reason, but if so we'll + * just end up waiting for the same worker again. + */ + rc = WaitLatch(MyLatch, + WL_LATCH_SET | WL_POSTMASTER_DEATH, + -1, WAIT_EVENT_BGWORKER_STARTUP); + + /* emergency bailout if postmaster has died */ + if (rc & WL_POSTMASTER_DEATH) + proc_exit(1); + + if (rc & WL_LATCH_SET) + ResetLatch(MyLatch); + } + } + + /* If all workers are known to have started, we're done. */ + if (pcxt->nknown_attached_workers >= pcxt->nworkers_launched) + { + Assert(pcxt->nknown_attached_workers == pcxt->nworkers_launched); + break; + } + } +} + /* * Wait for all workers to finish computing. * @@ -506,6 +730,7 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt) for (;;) { bool anyone_alive = false; + int nfinished = 0; int i; /* @@ -517,7 +742,15 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt) for (i = 0; i < pcxt->nworkers_launched; ++i) { - if (pcxt->worker[i].error_mqh != NULL) + /* + * If error_mqh is NULL, then the worker has already exited + * cleanly. If we have received a message through error_mqh from + * the worker, we know it started up cleanly, and therefore we're + * certain to be notified when it exits. + */ + if (pcxt->worker[i].error_mqh == NULL) + ++nfinished; + else if (pcxt->known_attached_workers[i]) { anyone_alive = true; break; @@ -525,7 +758,62 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt) } if (!anyone_alive) - break; + { + /* If all workers are known to have finished, we're done. */ + if (nfinished >= pcxt->nworkers_launched) + { + Assert(nfinished == pcxt->nworkers_launched); + break; + } + + /* + * We didn't detect any living workers, but not all workers are + * known to have exited cleanly. Either not all workers have + * launched yet, or maybe some of them failed to start or + * terminated abnormally. + */ + for (i = 0; i < pcxt->nworkers_launched; ++i) + { + pid_t pid; + shm_mq *mq; + + /* + * If the worker is BGWH_NOT_YET_STARTED or BGWH_STARTED, we + * should just keep waiting. If it is BGWH_STOPPED, then + * further investigation is needed. + */ + if (pcxt->worker[i].error_mqh == NULL || + pcxt->worker[i].bgwhandle == NULL || + GetBackgroundWorkerPid(pcxt->worker[i].bgwhandle, + &pid) != BGWH_STOPPED) + continue; + + /* + * Check whether the worker ended up stopped without ever + * attaching to the error queue. If so, the postmaster was + * unable to fork the worker or it exited without initializing + * properly. We must throw an error, since the caller may + * have been expecting the worker to do some work before + * exiting. + */ + mq = shm_mq_get_queue(pcxt->worker[i].error_mqh); + if (shm_mq_get_sender(mq) == NULL) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("parallel worker failed to initialize"), + errhint("More details may be available in the server log."))); + + /* + * The worker is stopped, but is attached to the error queue. + * Unless there's a bug somewhere, this will only happen when + * the worker writes messages and terminates after the + * CHECK_FOR_INTERRUPTS() near the top of this function and + * before the call to GetBackgroundWorkerPid(). In that case, + * or latch should have been set as well and the right things + * will happen on the next pass through the loop. + */ + } + } WaitLatch(MyLatch, WL_LATCH_SET, -1, WAIT_EVENT_PARALLEL_FINISH); @@ -612,7 +900,7 @@ DestroyParallelContext(ParallelContext *pcxt) { TerminateBackgroundWorker(pcxt->worker[i].bgwhandle); - pfree(pcxt->worker[i].error_mqh); + shm_mq_detach(pcxt->worker[i].error_mqh); pcxt->worker[i].error_mqh = NULL; } } @@ -782,6 +1070,13 @@ HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg) { char msgtype; + if (pcxt->known_attached_workers != NULL && + !pcxt->known_attached_workers[i]) + { + pcxt->known_attached_workers[i] = true; + pcxt->nknown_attached_workers++; + } + msgtype = pq_getmsgbyte(msg); switch (msgtype) @@ -861,7 +1156,7 @@ HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg) case 'X': /* Terminate, indicating clean exit */ { - pfree(pcxt->worker[i].error_mqh); + shm_mq_detach(pcxt->worker[i].error_mqh); pcxt->worker[i].error_mqh = NULL; break; } @@ -937,7 +1232,11 @@ ParallelWorkerMain(Datum main_arg) char *tsnapspace; char *asnapspace; char *tstatespace; + char *reindexspace; + char *relmapperspace; + char *enumblacklistspace; StringInfoData msgbuf; + char *session_dsm_handle_space; /* Set flag to indicate that we're initializing a parallel worker. */ InitializingParallelWorker = true; @@ -950,16 +1249,19 @@ ParallelWorkerMain(Datum main_arg) Assert(ParallelWorkerNumber == -1); memcpy(&ParallelWorkerNumber, MyBgworkerEntry->bgw_extra, sizeof(int)); - /* Set up a memory context and resource owner. */ - Assert(CurrentResourceOwner == NULL); - CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel toplevel"); + /* Set up a memory context to work in, just for cleanliness. */ CurrentMemoryContext = AllocSetContextCreate(TopMemoryContext, "Parallel worker", ALLOCSET_DEFAULT_SIZES); /* - * Now that we have a resource owner, we can attach to the dynamic shared - * memory segment and read the table of contents. + * Attach to the dynamic shared memory segment for the parallel query, and + * find its table of contents. + * + * Note: at this point, we have not created any ResourceOwner in this + * process. This will result in our DSM mapping surviving until process + * exit, which is fine. If there were a ResourceOwner, it would acquire + * ownership of the mapping, but we have no need for that. */ seg = dsm_attach(DatumGetUInt32(main_arg)); if (seg == NULL) @@ -976,11 +1278,16 @@ ParallelWorkerMain(Datum main_arg) fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED, false); MyFixedParallelState = fps; + /* Arrange to signal the leader if we exit. */ + ParallelMasterPid = fps->parallel_master_pid; + ParallelMasterBackendId = fps->parallel_master_backend_id; + on_shmem_exit(ParallelWorkerShutdown, (Datum) 0); + /* - * Now that we have a worker number, we can find and attach to the error - * queue provided for us. That's good, because until we do that, any - * errors that happen here will not be reported back to the process that - * requested that this worker be launched. + * Now we can find and attach to the error queue provided for us. That's + * good, because until we do that, any errors that happen here will not be + * reported back to the process that requested that this worker be + * launched. */ error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE, false); mq = (shm_mq *) (error_queue_space + @@ -999,8 +1306,8 @@ ParallelWorkerMain(Datum main_arg) * in this case. */ pq_beginmessage(&msgbuf, 'K'); - pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32)); - pq_sendint(&msgbuf, (int32) MyCancelKey, sizeof(int32)); + pq_sendint32(&msgbuf, (int32) MyProcPid); + pq_sendint32(&msgbuf, (int32) MyCancelKey); pq_endmessage(&msgbuf); /* @@ -1022,12 +1329,11 @@ ParallelWorkerMain(Datum main_arg) return; /* - * Load libraries that were loaded by original backend. We want to do - * this before restoring GUCs, because the libraries might define custom - * variables. + * Restore transaction and statement start-time timestamps. This must + * happen before anything that would start a transaction, else asserts in + * xact.c will fire. */ - libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY, false); - RestoreLibraryState(libraryspace); + SetParallelStartTimestamps(fps->xact_ts, fps->stmt_ts); /* * Identify the entry point to be called. In theory this could result in @@ -1042,7 +1348,8 @@ ParallelWorkerMain(Datum main_arg) /* Restore database connection. */ BackgroundWorkerInitializeConnectionByOid(fps->database_id, - fps->authenticated_user_id); + fps->authenticated_user_id, + 0); /* * Set the client encoding to the database encoding, since that is what @@ -1050,9 +1357,17 @@ ParallelWorkerMain(Datum main_arg) */ SetClientEncoding(GetDatabaseEncoding()); + /* + * Load libraries that were loaded by original backend. We want to do + * this before restoring GUCs, because the libraries might define custom + * variables. + */ + libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY, false); + StartTransactionCommand(); + RestoreLibraryState(libraryspace); + /* Restore GUC values from launching backend. */ gucspace = shm_toc_lookup(toc, PARALLEL_KEY_GUC, false); - StartTransactionCommand(); RestoreGUCState(gucspace); CommitTransactionCommand(); @@ -1064,6 +1379,11 @@ ParallelWorkerMain(Datum main_arg) combocidspace = shm_toc_lookup(toc, PARALLEL_KEY_COMBO_CID, false); RestoreComboCIDState(combocidspace); + /* Attach to the per-session DSM segment and contained objects. */ + session_dsm_handle_space = + shm_toc_lookup(toc, PARALLEL_KEY_SESSION_DSM, false); + AttachSession(*(dsm_handle *) session_dsm_handle_space); + /* Restore transaction snapshot. */ tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT, false); RestoreTransactionSnapshot(RestoreSnapshot(tsnapspace), @@ -1079,6 +1399,13 @@ ParallelWorkerMain(Datum main_arg) */ InvalidateSystemCaches(); + /* + * Restore current role id. Skip verifying whether session user is + * allowed to become this role and blindly restore the leader's state for + * current role. + */ + SetCurrentRoleId(fps->outer_user_id, fps->is_superuser); + /* Restore user ID and security context. */ SetUserIdAndSecContext(fps->current_user_id, fps->sec_context); @@ -1086,8 +1413,18 @@ ParallelWorkerMain(Datum main_arg) SetTempNamespaceState(fps->temp_namespace_id, fps->temp_toast_namespace_id); - /* Set ParallelMasterBackendId so we know how to address temp relations. */ - ParallelMasterBackendId = fps->parallel_master_backend_id; + /* Restore reindex state. */ + reindexspace = shm_toc_lookup(toc, PARALLEL_KEY_REINDEX_STATE, false); + RestoreReindexState(reindexspace); + + /* Restore relmapper state. */ + relmapperspace = shm_toc_lookup(toc, PARALLEL_KEY_RELMAPPER_STATE, false); + RestoreRelationMap(relmapperspace); + + /* Restore enum blacklist. */ + enumblacklistspace = shm_toc_lookup(toc, PARALLEL_KEY_ENUMBLACKLIST, + false); + RestoreEnumBlacklist(enumblacklistspace); /* * We've initialized all of our state now; nothing should change @@ -1104,12 +1441,15 @@ ParallelWorkerMain(Datum main_arg) /* Must exit parallel mode to pop active snapshot. */ ExitParallelMode(); - /* Must pop active snapshot so resowner.c doesn't complain. */ + /* Must pop active snapshot so snapmgr.c doesn't complain. */ PopActiveSnapshot(); /* Shut down the parallel-worker transaction. */ EndParallelWorkerTransaction(); + /* Detach from the per-session DSM segment. */ + DetachSession(); + /* Report success. */ pq_putmessage('X', NULL, 0); } @@ -1130,6 +1470,20 @@ ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end) SpinLockRelease(&fps->mutex); } +/* + * Make sure the leader tries to read from our error queue one more time. + * This guards against the case where we exit uncleanly without sending an + * ErrorResponse to the leader, for example because some code calls proc_exit + * directly. + */ +static void +ParallelWorkerShutdown(int code, Datum arg) +{ + SendProcSignal(ParallelMasterPid, + PROCSIG_PARALLEL_MESSAGE, + ParallelMasterBackendId); +} + /* * Look up (and possibly load) a parallel worker entry point function. * diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index 77edc51e1c..1132eef038 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -38,7 +38,7 @@ * by re-setting the page's page_dirty flag. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/access/transam/slru.c @@ -599,7 +599,7 @@ SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int pageno) SlruFileName(ctl, path, segno); - fd = OpenTransientFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); + fd = OpenTransientFile(path, O_RDWR | PG_BINARY); if (fd < 0) { /* expected: file doesn't exist */ @@ -614,7 +614,7 @@ SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int pageno) if ((endpos = lseek(fd, 0, SEEK_END)) < 0) { - slru_errcause = SLRU_OPEN_FAILED; + slru_errcause = SLRU_SEEK_FAILED; slru_errno = errno; SlruReportIOError(ctl, pageno, 0); } @@ -629,7 +629,7 @@ SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int pageno) * Physical read of a (previously existing) page into a buffer slot * * On failure, we cannot just ereport(ERROR) since caller has put state in - * shared memory that must be undone. So, we return FALSE and save enough + * shared memory that must be undone. So, we return false and save enough * info in static variables to let SlruReportIOError make the report. * * For now, assume it's not worth keeping a file pointer open across @@ -654,7 +654,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno) * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case * where the file doesn't exist, and return zeroes instead. */ - fd = OpenTransientFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); + fd = OpenTransientFile(path, O_RDWR | PG_BINARY); if (fd < 0) { if (errno != ENOENT || !InRecovery) @@ -705,7 +705,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno) * Physical write of a page from a buffer slot * * On failure, we cannot just ereport(ERROR) since caller has put state in - * shared memory that must be undone. So, we return FALSE and save enough + * shared memory that must be undone. So, we return false and save enough * info in static variables to let SlruReportIOError make the report. * * For now, assume it's not worth keeping a file pointer open across @@ -804,8 +804,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata) * don't use O_EXCL or O_TRUNC or anything like that. */ SlruFileName(ctl, path, segno); - fd = OpenTransientFile(path, O_RDWR | O_CREAT | PG_BINARY, - S_IRUSR | S_IWUSR); + fd = OpenTransientFile(path, O_RDWR | O_CREAT | PG_BINARY); if (fd < 0) { slru_errcause = SLRU_OPEN_FAILED; diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index f640661130..4faa21f5ae 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -19,7 +19,7 @@ * data across crashes. During database startup, we simply force the * currently-active page of SUBTRANS to zeroes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/access/transam/subtrans.c diff --git a/src/backend/access/transam/timeline.c b/src/backend/access/transam/timeline.c index 63db8a981d..61d36050c3 100644 --- a/src/backend/access/transam/timeline.c +++ b/src/backend/access/transam/timeline.c @@ -21,7 +21,7 @@ * The fields are separated by tabs. Lines beginning with # are comments, and * are ignored. Empty lines are also ignored. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/access/transam/timeline.c @@ -307,8 +307,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, unlink(tmppath); /* do not use get_sync_bit() here --- want to fsync only at end of fill */ - fd = OpenTransientFile(tmppath, O_RDWR | O_CREAT | O_EXCL, - S_IRUSR | S_IWUSR); + fd = OpenTransientFile(tmppath, O_RDWR | O_CREAT | O_EXCL); if (fd < 0) ereport(ERROR, (errcode_for_file_access(), @@ -325,7 +324,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, else TLHistoryFilePath(path, parentTLI); - srcfd = OpenTransientFile(path, O_RDONLY, 0); + srcfd = OpenTransientFile(path, O_RDONLY); if (srcfd < 0) { if (errno != ENOENT) @@ -459,8 +458,7 @@ writeTimeLineHistoryFile(TimeLineID tli, char *content, int size) unlink(tmppath); /* do not use get_sync_bit() here --- want to fsync only at end of fill */ - fd = OpenTransientFile(tmppath, O_RDWR | O_CREAT | O_EXCL, - S_IRUSR | S_IWUSR); + fd = OpenTransientFile(tmppath, O_RDWR | O_CREAT | O_EXCL); if (fd < 0) ereport(ERROR, (errcode_for_file_access(), diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c index 968b232364..52a624c90b 100644 --- a/src/backend/access/transam/transam.c +++ b/src/backend/access/transam/transam.c @@ -3,7 +3,7 @@ * transam.c * postgres transaction (commit) log interface routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index ba03d9687e..3942734e5a 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -3,7 +3,7 @@ * twophase.c * Two-phase commit support functions. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -144,11 +144,7 @@ int max_prepared_xacts = 0; * * typedef struct GlobalTransactionData *GlobalTransaction appears in * twophase.h - * - * Note that the max value of GIDSIZE must fit in the uint16 gidlen, - * specified in TwoPhaseFileHeader. */ -#define GIDSIZE 200 typedef struct GlobalTransactionData { @@ -170,9 +166,9 @@ typedef struct GlobalTransactionData Oid owner; /* ID of user that executed the xact */ BackendId locking_backend; /* backend currently working on the xact */ - bool valid; /* TRUE if PGPROC entry is in proc array */ - bool ondisk; /* TRUE if prepare state file is on disk */ - bool inredo; /* TRUE if entry was added via xlog_redo */ + bool valid; /* true if PGPROC entry is in proc array */ + bool ondisk; /* true if prepare state file is on disk */ + bool inredo; /* true if entry was added via xlog_redo */ char gid[GIDSIZE]; /* The GID assigned to the prepared xact */ } GlobalTransactionData; @@ -211,12 +207,14 @@ static void RecordTransactionCommitPrepared(TransactionId xid, RelFileNode *rels, int ninvalmsgs, SharedInvalidationMessage *invalmsgs, - bool initfileinval); + bool initfileinval, + const char *gid); static void RecordTransactionAbortPrepared(TransactionId xid, int nchildren, TransactionId *children, int nrels, - RelFileNode *rels); + RelFileNode *rels, + const char *gid); static void ProcessRecords(char *bufptr, TransactionId xid, const TwoPhaseCallback callbacks[]); static void RemoveGXact(GlobalTransaction gxact); @@ -473,6 +471,7 @@ MarkAsPreparingGuts(GlobalTransaction gxact, TransactionId xid, const char *gid, proc->backendId = InvalidBackendId; proc->databaseId = databaseid; proc->roleId = owner; + proc->tempNamespaceId = InvalidOid; proc->isBackgroundWorker = false; proc->lwWaiting = false; proc->lwWaitMode = 0; @@ -898,7 +897,7 @@ TwoPhaseGetDummyProc(TransactionId xid) /* * Header for a 2PC state file */ -#define TWOPHASE_MAGIC 0x57F94533 /* format identifier */ +#define TWOPHASE_MAGIC 0x57F94534 /* format identifier */ typedef struct TwoPhaseFileHeader { @@ -914,6 +913,8 @@ typedef struct TwoPhaseFileHeader int32 ninvalmsgs; /* number of cache invalidation messages */ bool initfileinval; /* does relcache init file need invalidation? */ uint16 gidlen; /* length of the GID - GID follows the header */ + XLogRecPtr origin_lsn; /* lsn of this record at origin node */ + TimestampTz origin_timestamp; /* time of prepare at origin node */ } TwoPhaseFileHeader; /* @@ -1065,6 +1066,7 @@ EndPrepare(GlobalTransaction gxact) { TwoPhaseFileHeader *hdr; StateFileChunk *record; + bool replorigin; /* Add the end sentinel to the list of 2PC records */ RegisterTwoPhaseRecord(TWOPHASE_RM_END_ID, 0, @@ -1075,6 +1077,21 @@ EndPrepare(GlobalTransaction gxact) Assert(hdr->magic == TWOPHASE_MAGIC); hdr->total_len = records.total_len + sizeof(pg_crc32c); + replorigin = (replorigin_session_origin != InvalidRepOriginId && + replorigin_session_origin != DoNotReplicateId); + + if (replorigin) + { + Assert(replorigin_session_origin_lsn != InvalidXLogRecPtr); + hdr->origin_lsn = replorigin_session_origin_lsn; + hdr->origin_timestamp = replorigin_session_origin_timestamp; + } + else + { + hdr->origin_lsn = InvalidXLogRecPtr; + hdr->origin_timestamp = 0; + } + /* * If the data size exceeds MaxAllocSize, we won't be able to read it in * ReadTwoPhaseFile. Check for that now, rather than fail in the case @@ -1107,7 +1124,18 @@ EndPrepare(GlobalTransaction gxact) XLogBeginInsert(); for (record = records.head; record != NULL; record = record->next) XLogRegisterData(record->data, record->len); + + XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN); + gxact->prepare_end_lsn = XLogInsert(RM_XACT_ID, XLOG_XACT_PREPARE); + + if (replorigin) + { + /* Move LSNs forward for this replication origin */ + replorigin_session_advance(replorigin_session_origin_lsn, + gxact->prepare_end_lsn); + } + XLogFlush(gxact->prepare_end_lsn); /* If we crash now, we have prepared: WAL replay will fix things */ @@ -1179,10 +1207,12 @@ RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, * Read and validate the state file for xid. * * If it looks OK (has a valid magic number and CRC), return the palloc'd - * contents of the file. Otherwise return NULL. + * contents of the file, issuing an error when finding corrupted data. If + * missing_ok is true, which indicates that missing files can be safely + * ignored, then return NULL. This state can be reached when doing recovery. */ static char * -ReadTwoPhaseFile(TransactionId xid, bool give_warnings) +ReadTwoPhaseFile(TransactionId xid, bool missing_ok) { char path[MAXPGPATH]; char *buf; @@ -1192,18 +1222,19 @@ ReadTwoPhaseFile(TransactionId xid, bool give_warnings) uint32 crc_offset; pg_crc32c calc_crc, file_crc; + int r; TwoPhaseFilePath(path, xid); - fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0); + fd = OpenTransientFile(path, O_RDONLY | PG_BINARY); if (fd < 0) { - if (give_warnings) - ereport(WARNING, - (errcode_for_file_access(), - errmsg("could not open two-phase state file \"%s\": %m", - path))); - return NULL; + if (missing_ok && errno == ENOENT) + return NULL; + + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not open file \"%s\": %m", path))); } /* @@ -1213,31 +1244,27 @@ ReadTwoPhaseFile(TransactionId xid, bool give_warnings) * even on a valid file. */ if (fstat(fd, &stat)) - { - CloseTransientFile(fd); - if (give_warnings) - ereport(WARNING, - (errcode_for_file_access(), - errmsg("could not stat two-phase state file \"%s\": %m", - path))); - return NULL; - } + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not stat file \"%s\": %m", path))); if (stat.st_size < (MAXALIGN(sizeof(TwoPhaseFileHeader)) + MAXALIGN(sizeof(TwoPhaseRecordOnDisk)) + sizeof(pg_crc32c)) || stat.st_size > MaxAllocSize) - { - CloseTransientFile(fd); - return NULL; - } + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_plural("incorrect size of file \"%s\": %zu byte", + "incorrect size of file \"%s\": %zu bytes", + (Size) stat.st_size, path, + (Size) stat.st_size))); crc_offset = stat.st_size - sizeof(pg_crc32c); if (crc_offset != MAXALIGN(crc_offset)) - { - CloseTransientFile(fd); - return NULL; - } + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("incorrect alignment of CRC offset for file \"%s\"", + path))); /* * OK, slurp in the file. @@ -1245,28 +1272,34 @@ ReadTwoPhaseFile(TransactionId xid, bool give_warnings) buf = (char *) palloc(stat.st_size); pgstat_report_wait_start(WAIT_EVENT_TWOPHASE_FILE_READ); - if (read(fd, buf, stat.st_size) != stat.st_size) + r = read(fd, buf, stat.st_size); + if (r != stat.st_size) { - pgstat_report_wait_end(); - CloseTransientFile(fd); - if (give_warnings) - ereport(WARNING, + if (r < 0) + ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read two-phase state file \"%s\": %m", - path))); - pfree(buf); - return NULL; + errmsg("could not read file \"%s\": %m", path))); + else + ereport(ERROR, + (errmsg("could not read file \"%s\": read %d of %zu", + path, r, (Size) stat.st_size))); } pgstat_report_wait_end(); CloseTransientFile(fd); hdr = (TwoPhaseFileHeader *) buf; - if (hdr->magic != TWOPHASE_MAGIC || hdr->total_len != stat.st_size) - { - pfree(buf); - return NULL; - } + if (hdr->magic != TWOPHASE_MAGIC) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("invalid magic number stored in file \"%s\"", + path))); + + if (hdr->total_len != stat.st_size) + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("invalid size stored in file \"%s\"", + path))); INIT_CRC32C(calc_crc); COMP_CRC32C(calc_crc, buf, crc_offset); @@ -1275,14 +1308,52 @@ ReadTwoPhaseFile(TransactionId xid, bool give_warnings) file_crc = *((pg_crc32c *) (buf + crc_offset)); if (!EQ_CRC32C(calc_crc, file_crc)) - { - pfree(buf); - return NULL; - } + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("calculated CRC checksum does not match value stored in file \"%s\"", + path))); return buf; } +/* + * ParsePrepareRecord + */ +void +ParsePrepareRecord(uint8 info, char *xlrec, xl_xact_parsed_prepare *parsed) +{ + TwoPhaseFileHeader *hdr; + char *bufptr; + + hdr = (TwoPhaseFileHeader *) xlrec; + bufptr = xlrec + MAXALIGN(sizeof(TwoPhaseFileHeader)); + + parsed->origin_lsn = hdr->origin_lsn; + parsed->origin_timestamp = hdr->origin_timestamp; + parsed->twophase_xid = hdr->xid; + parsed->dbId = hdr->database; + parsed->nsubxacts = hdr->nsubxacts; + parsed->nrels = hdr->ncommitrels; + parsed->nabortrels = hdr->nabortrels; + parsed->nmsgs = hdr->ninvalmsgs; + + strncpy(parsed->twophase_gid, bufptr, hdr->gidlen); + bufptr += MAXALIGN(hdr->gidlen); + + parsed->subxacts = (TransactionId *) bufptr; + bufptr += MAXALIGN(hdr->nsubxacts * sizeof(TransactionId)); + + parsed->xnodes = (RelFileNode *) bufptr; + bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileNode)); + + parsed->abortnodes = (RelFileNode *) bufptr; + bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileNode)); + + parsed->msgs = (SharedInvalidationMessage *) bufptr; + bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage)); +} + + /* * Reads 2PC data from xlog. During checkpoint this data will be moved to @@ -1299,7 +1370,8 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len) XLogReaderState *xlogreader; char *errormsg; - xlogreader = XLogReaderAllocate(&read_local_xlog_page, NULL); + xlogreader = XLogReaderAllocate(wal_segment_size, &read_local_xlog_page, + NULL); if (!xlogreader) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), @@ -1348,7 +1420,7 @@ StandbyTransactionIdIsPrepared(TransactionId xid) return false; /* nothing to do */ /* Read and validate file */ - buf = ReadTwoPhaseFile(xid, false); + buf = ReadTwoPhaseFile(xid, true); if (buf == NULL) return false; @@ -1380,7 +1452,6 @@ FinishPreparedTransaction(const char *gid, bool isCommit) RelFileNode *delrels; int ndelrels; SharedInvalidationMessage *invalmsgs; - int i; /* * Validate the GID, and lock the GXACT to ensure that two backends do not @@ -1397,7 +1468,7 @@ FinishPreparedTransaction(const char *gid, bool isCommit) * to disk if for some reason they have lived for a long time. */ if (gxact->ondisk) - buf = ReadTwoPhaseFile(xid, true); + buf = ReadTwoPhaseFile(xid, false); else XlogReadTwoPhaseData(gxact->prepare_start_lsn, &buf, NULL); @@ -1421,6 +1492,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit) /* compute latestXid among all children */ latestXid = TransactionIdLatest(xid, hdr->nsubxacts, children); + /* Prevent cancel/die interrupt while cleaning up */ + HOLD_INTERRUPTS(); + /* * The order of operations here is critical: make the XLOG entry for * commit or abort, then mark the transaction committed or aborted in @@ -1434,11 +1508,12 @@ FinishPreparedTransaction(const char *gid, bool isCommit) hdr->nsubxacts, children, hdr->ncommitrels, commitrels, hdr->ninvalmsgs, invalmsgs, - hdr->initfileinval); + hdr->initfileinval, gid); else RecordTransactionAbortPrepared(xid, hdr->nsubxacts, children, - hdr->nabortrels, abortrels); + hdr->nabortrels, abortrels, + gid); ProcArrayRemove(proc, latestXid); @@ -1469,13 +1544,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit) delrels = abortrels; ndelrels = hdr->nabortrels; } - for (i = 0; i < ndelrels; i++) - { - SMgrRelation srel = smgropen(delrels[i], InvalidBackendId); - smgrdounlink(srel, false); - smgrclose(srel); - } + /* Make sure files supposed to be dropped are dropped */ + DropRelationFiles(delrels, ndelrels, false); /* * Handle cache invalidation messages. @@ -1511,6 +1582,8 @@ FinishPreparedTransaction(const char *gid, bool isCommit) LWLockRelease(TwoPhaseStateLock); MyLockedGxact = NULL; + RESUME_INTERRUPTS(); + pfree(buf); } @@ -1555,8 +1628,7 @@ RemoveTwoPhaseFile(TransactionId xid, bool giveWarning) if (errno != ENOENT || giveWarning) ereport(WARNING, (errcode_for_file_access(), - errmsg("could not remove two-phase state file \"%s\": %m", - path))); + errmsg("could not remove file \"%s\": %m", path))); } /* @@ -1580,31 +1652,40 @@ RecreateTwoPhaseFile(TransactionId xid, void *content, int len) TwoPhaseFilePath(path, xid); fd = OpenTransientFile(path, - O_CREAT | O_TRUNC | O_WRONLY | PG_BINARY, - S_IRUSR | S_IWUSR); + O_CREAT | O_TRUNC | O_WRONLY | PG_BINARY); if (fd < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not recreate two-phase state file \"%s\": %m", - path))); + errmsg("could not recreate file \"%s\": %m", path))); /* Write content and CRC */ + errno = 0; pgstat_report_wait_start(WAIT_EVENT_TWOPHASE_FILE_WRITE); if (write(fd, content, len) != len) { + int save_errno = errno; + pgstat_report_wait_end(); CloseTransientFile(fd); + + /* if write didn't set errno, assume problem is no disk space */ + errno = save_errno ? save_errno : ENOSPC; ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write two-phase state file: %m"))); + errmsg("could not write file \"%s\": %m", path))); } if (write(fd, &statefile_crc, sizeof(pg_crc32c)) != sizeof(pg_crc32c)) { + int save_errno = errno; + pgstat_report_wait_end(); CloseTransientFile(fd); + + /* if write didn't set errno, assume problem is no disk space */ + errno = save_errno ? save_errno : ENOSPC; ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write two-phase state file: %m"))); + errmsg("could not write file \"%s\": %m", path))); } pgstat_report_wait_end(); @@ -1615,17 +1696,20 @@ RecreateTwoPhaseFile(TransactionId xid, void *content, int len) pgstat_report_wait_start(WAIT_EVENT_TWOPHASE_FILE_SYNC); if (pg_fsync(fd) != 0) { + int save_errno = errno; + CloseTransientFile(fd); + errno = save_errno; ereport(ERROR, (errcode_for_file_access(), - errmsg("could not fsync two-phase state file: %m"))); + errmsg("could not fsync file \"%s\": %m", path))); } pgstat_report_wait_end(); if (CloseTransientFile(fd) != 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not close two-phase state file: %m"))); + errmsg("could not close file \"%s\": %m", path))); } /* @@ -1642,7 +1726,7 @@ RecreateTwoPhaseFile(TransactionId xid, void *content, int len) * possible that GXACTs that were valid at checkpoint start will no longer * exist if we wait a little bit. With typical checkpoint settings this * will be about 3 minutes for an online checkpoint, so as a result we - * we expect that there will be no GXACTs that need to be copied to disk. + * expect that there will be no GXACTs that need to be copied to disk. * * If a GXACT remains valid across multiple checkpoints, it will already * be on disk so we don't bother to repeat that write. @@ -1735,8 +1819,8 @@ restoreTwoPhaseData(void) DIR *cldir; struct dirent *clde; - cldir = AllocateDir(TWOPHASE_DIR); LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE); + cldir = AllocateDir(TWOPHASE_DIR); while ((clde = ReadDir(cldir, TWOPHASE_DIR)) != NULL) { if (strlen(clde->d_name) == 8 && @@ -1752,7 +1836,8 @@ restoreTwoPhaseData(void) if (buf == NULL) continue; - PrepareRedoAdd(buf, InvalidXLogRecPtr, InvalidXLogRecPtr); + PrepareRedoAdd(buf, InvalidXLogRecPtr, + InvalidXLogRecPtr, InvalidRepOriginId); } } LWLockRelease(TwoPhaseStateLock); @@ -1778,6 +1863,10 @@ restoreTwoPhaseData(void) * write a WAL entry, and so there might be no evidence in WAL of those * subxact XIDs. * + * On corrupted two-phase files, fail immediately. Keeping around broken + * entries and let replay continue causes harm on the system, and a new + * backup should be rolled in. + * * Our other responsibility is to determine and return the oldest valid XID * among the prepared xacts (if none, return ShmemVariableCache->nextXid). * This is needed to synchronize pg_subtrans startup properly. @@ -2031,14 +2120,14 @@ ProcessTwoPhaseBuffer(TransactionId xid, if (fromdisk) { ereport(WARNING, - (errmsg("removing stale two-phase state file for \"%u\"", + (errmsg("removing stale two-phase state file for transaction %u", xid))); RemoveTwoPhaseFile(xid, true); } else { ereport(WARNING, - (errmsg("removing stale two-phase state from shared memory for \"%u\"", + (errmsg("removing stale two-phase state from memory for transaction %u", xid))); PrepareRedoRemove(xid, true); } @@ -2051,14 +2140,14 @@ ProcessTwoPhaseBuffer(TransactionId xid, if (fromdisk) { ereport(WARNING, - (errmsg("removing future two-phase state file for \"%u\"", + (errmsg("removing future two-phase state file for transaction %u", xid))); RemoveTwoPhaseFile(xid, true); } else { ereport(WARNING, - (errmsg("removing future two-phase state from memory for \"%u\"", + (errmsg("removing future two-phase state from memory for transaction %u", xid))); PrepareRedoRemove(xid, true); } @@ -2068,15 +2157,7 @@ ProcessTwoPhaseBuffer(TransactionId xid, if (fromdisk) { /* Read and validate file */ - buf = ReadTwoPhaseFile(xid, true); - if (buf == NULL) - { - ereport(WARNING, - (errmsg("removing corrupt two-phase state file for \"%u\"", - xid))); - RemoveTwoPhaseFile(xid, true); - return NULL; - } + buf = ReadTwoPhaseFile(xid, false); } else { @@ -2089,21 +2170,15 @@ ProcessTwoPhaseBuffer(TransactionId xid, if (!TransactionIdEquals(hdr->xid, xid)) { if (fromdisk) - { - ereport(WARNING, - (errmsg("removing corrupt two-phase state file for \"%u\"", + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("corrupted two-phase state file for transaction \"%u\"", xid))); - RemoveTwoPhaseFile(xid, true); - } else - { - ereport(WARNING, - (errmsg("removing corrupt two-phase state from memory for \"%u\"", + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("corrupted two-phase state in memory for transaction \"%u\"", xid))); - PrepareRedoRemove(xid, true); - } - pfree(buf); - return NULL; } /* @@ -2165,7 +2240,8 @@ RecordTransactionCommitPrepared(TransactionId xid, RelFileNode *rels, int ninvalmsgs, SharedInvalidationMessage *invalmsgs, - bool initfileinval) + bool initfileinval, + const char *gid) { XLogRecPtr recptr; TimestampTz committs = GetCurrentTimestamp(); @@ -2193,7 +2269,7 @@ RecordTransactionCommitPrepared(TransactionId xid, ninvalmsgs, invalmsgs, initfileinval, false, MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK, - xid); + xid, gid); if (replorigin) @@ -2255,7 +2331,8 @@ RecordTransactionAbortPrepared(TransactionId xid, int nchildren, TransactionId *children, int nrels, - RelFileNode *rels) + RelFileNode *rels, + const char *gid) { XLogRecPtr recptr; @@ -2278,7 +2355,7 @@ RecordTransactionAbortPrepared(TransactionId xid, nchildren, children, nrels, rels, MyXactFlags | XACT_FLAGS_ACQUIREDACCESSEXCLUSIVELOCK, - xid); + xid, gid); /* Always flush, since we're about to remove the 2PC state file */ XLogFlush(recptr); @@ -2309,7 +2386,8 @@ RecordTransactionAbortPrepared(TransactionId xid, * data, the entry is marked as located on disk. */ void -PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn) +PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, + XLogRecPtr end_lsn, RepOriginId origin_id) { TwoPhaseFileHeader *hdr = (TwoPhaseFileHeader *) buf; char *bufptr; @@ -2358,6 +2436,13 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, XLogRecPtr end_lsn) Assert(TwoPhaseState->numPrepXacts < max_prepared_xacts); TwoPhaseState->prepXacts[TwoPhaseState->numPrepXacts++] = gxact; + if (origin_id != InvalidRepOriginId) + { + /* recover apply progress */ + replorigin_advance(origin_id, hdr->origin_lsn, end_lsn, + false /* backward */ , false /* WAL */ ); + } + elog(DEBUG2, "added 2PC data in shared memory for transaction %u", gxact->xid); } diff --git a/src/backend/access/transam/twophase_rmgr.c b/src/backend/access/transam/twophase_rmgr.c index 1cd03482d9..6d327e36bc 100644 --- a/src/backend/access/transam/twophase_rmgr.c +++ b/src/backend/access/transam/twophase_rmgr.c @@ -3,7 +3,7 @@ * twophase_rmgr.c * Two-phase-commit resource managers tables * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index 702c8c957f..664735b381 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -3,7 +3,7 @@ * varsup.c * postgres OID & XID variables support routines * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/access/transam/varsup.c @@ -124,14 +124,14 @@ GetNewTransactionId(bool isSubXact) errmsg("database is not accepting commands to avoid wraparound data loss in database \"%s\"", oldest_datname), errhint("Stop the postmaster and vacuum that database in single-user mode.\n" - "You might also need to commit or roll back old prepared transactions."))); + "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); else ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("database is not accepting commands to avoid wraparound data loss in database with OID %u", oldest_datoid), errhint("Stop the postmaster and vacuum that database in single-user mode.\n" - "You might also need to commit or roll back old prepared transactions."))); + "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); } else if (TransactionIdFollowsOrEquals(xid, xidWarnLimit)) { @@ -144,14 +144,14 @@ GetNewTransactionId(bool isSubXact) oldest_datname, xidWrapLimit - xid), errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n" - "You might also need to commit or roll back old prepared transactions."))); + "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); else ereport(WARNING, (errmsg("database with OID %u must be vacuumed within %u transactions", oldest_datoid, xidWrapLimit - xid), errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n" - "You might also need to commit or roll back old prepared transactions."))); + "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); } /* Re-acquire lock and start over */ @@ -186,20 +186,23 @@ GetNewTransactionId(bool isSubXact) * latestCompletedXid is present in the ProcArray, which is essential for * correct OldestXmin tracking; see src/backend/access/transam/README. * - * XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we - * are relying on fetch/store of an xid to be atomic, else other backends - * might see a partially-set xid here. But holding both locks at once - * would be a nasty concurrency hit. So for now, assume atomicity. - * * Note that readers of PGXACT xid fields should be careful to fetch the * value only once, rather than assume they can read a value multiple - * times and get the same answer each time. + * times and get the same answer each time. Note we are assuming that + * TransactionId and int fetch/store are atomic. * * The same comments apply to the subxact xid count and overflow fields. * - * A solution to the atomic-store problem would be to give each PGXACT its - * own spinlock used only for fetching/storing that PGXACT's xid and - * related fields. + * Use of a write barrier prevents dangerous code rearrangement in this + * function; other backends could otherwise e.g. be examining my subxids + * info concurrently, and we don't want them to see an invalid + * intermediate state, such as an incremented nxids before the array entry + * is filled. + * + * Other processes that read nxids should do so before reading xids + * elements with a pg_read_barrier() in between, so that they can be sure + * not to read an uninitialized array element; see + * src/backend/storage/lmgr/README.barrier. * * If there's no room to fit a subtransaction XID into PGPROC, set the * cache-overflowed flag instead. This forces readers to look in @@ -211,31 +214,20 @@ GetNewTransactionId(bool isSubXact) * window *will* include the parent XID, so they will deliver the correct * answer later on when someone does have a reason to inquire.) */ + if (!isSubXact) + MyPgXact->xid = xid; /* LWLockRelease acts as barrier */ + else { - /* - * Use volatile pointer to prevent code rearrangement; other backends - * could be examining my subxids info concurrently, and we don't want - * them to see an invalid intermediate state, such as incrementing - * nxids before filling the array entry. Note we are assuming that - * TransactionId and int fetch/store are atomic. - */ - volatile PGPROC *myproc = MyProc; - volatile PGXACT *mypgxact = MyPgXact; + int nxids = MyPgXact->nxids; - if (!isSubXact) - mypgxact->xid = xid; - else + if (nxids < PGPROC_MAX_CACHED_SUBXIDS) { - int nxids = mypgxact->nxids; - - if (nxids < PGPROC_MAX_CACHED_SUBXIDS) - { - myproc->subxids.xids[nxids] = xid; - mypgxact->nxids = nxids + 1; - } - else - mypgxact->overflowed = true; + MyProc->subxids.xids[nxids] = xid; + pg_write_barrier(); + MyPgXact->nxids = nxids + 1; } + else + MyPgXact->overflowed = true; } LWLockRelease(XidGenLock); @@ -403,14 +395,14 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid) oldest_datname, xidWrapLimit - curXid), errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n" - "You might also need to commit or roll back old prepared transactions."))); + "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); else ereport(WARNING, (errmsg("database with OID %u must be vacuumed within %u transactions", oldest_datoid, xidWrapLimit - curXid), errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n" - "You might also need to commit or roll back old prepared transactions."))); + "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); } } @@ -476,11 +468,9 @@ GetNewObjectId(void) /* * Check for wraparound of the OID counter. We *must* not return 0 - * (InvalidOid); and as long as we have to check that, it seems a good - * idea to skip over everything below FirstNormalObjectId too. (This - * basically just avoids lots of collisions with bootstrap-assigned OIDs - * right after a wrap occurs, so as to avoid a possibly large number of - * iterations in GetNewOid.) Note we are relying on unsigned comparison. + * (InvalidOid), and in normal operation we mustn't return anything below + * FirstNormalObjectId since that range is reserved for initdb (see + * IsCatalogClass()). Note we are relying on unsigned comparison. * * During initdb, we start the OID generator at FirstBootstrapObjectId, so * we only wrap if before that point when in bootstrap or standalone mode. diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 5e7e812200..a979d7e07b 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -5,7 +5,7 @@ * * See src/backend/access/transam/README for more information. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -30,8 +30,8 @@ #include "access/xlog.h" #include "access/xloginsert.h" #include "access/xlogutils.h" -#include "catalog/catalog.h" #include "catalog/namespace.h" +#include "catalog/pg_enum.h" #include "catalog/storage.h" #include "commands/async.h" #include "commands/tablecmds.h" @@ -145,6 +145,7 @@ typedef enum TBlockState /* transaction block states */ TBLOCK_BEGIN, /* starting transaction block */ TBLOCK_INPROGRESS, /* live transaction */ + TBLOCK_IMPLICIT_INPROGRESS, /* live transaction after implicit BEGIN */ TBLOCK_PARALLEL_INPROGRESS, /* live transaction inside parallel worker */ TBLOCK_END, /* COMMIT received */ TBLOCK_ABORT, /* failed xact, awaiting ROLLBACK */ @@ -199,27 +200,8 @@ typedef TransactionStateData *TransactionState; * transaction at all, or when in a top-level transaction. */ static TransactionStateData TopTransactionStateData = { - 0, /* transaction id */ - 0, /* subtransaction id */ - NULL, /* savepoint name */ - 0, /* savepoint level */ - TRANS_DEFAULT, /* transaction state */ - TBLOCK_DEFAULT, /* transaction block state from the client - * perspective */ - 0, /* transaction nesting depth */ - 0, /* GUC context nesting depth */ - NULL, /* cur transaction context */ - NULL, /* cur transaction resource owner */ - NULL, /* subcommitted child Xids */ - 0, /* # of subcommitted child Xids */ - 0, /* allocated size of childXids[] */ - InvalidOid, /* previous CurrentUserId setting */ - 0, /* previous SecurityRestrictionContext */ - false, /* entry-time xact r/o state */ - false, /* startedInRecovery */ - false, /* didLogXid */ - 0, /* parallelMode */ - NULL /* link to parent state block */ + .state = TRANS_DEFAULT, + .blockState = TBLOCK_DEFAULT, }; /* @@ -309,7 +291,7 @@ static void CallSubXactCallbacks(SubXactEvent event, SubTransactionId mySubid, SubTransactionId parentSubid); static void CleanupTransaction(void); -static void CheckTransactionChain(bool isTopLevel, bool throwError, +static void CheckTransactionBlock(bool isTopLevel, bool throwError, const char *stmtType); static void CommitTransaction(void); static TransactionId RecordTransactionAbort(bool isSubXact); @@ -505,7 +487,7 @@ AssignTransactionId(TransactionState s) /* * Ensure parent(s) have XIDs, so that a child always has an XID later - * than its parent. Musn't recurse here, or we might get a stack overflow + * than its parent. Mustn't recurse here, or we might get a stack overflow * if we're at the bottom of a huge stack of subtransactions none of which * have XIDs yet. */ @@ -574,18 +556,10 @@ AssignTransactionId(TransactionState s) * ResourceOwner. */ currentOwner = CurrentResourceOwner; - PG_TRY(); - { - CurrentResourceOwner = s->curTransactionOwner; - XactLockTableInsert(s->transactionId); - } - PG_CATCH(); - { - /* Ensure CurrentResourceOwner is restored on error */ - CurrentResourceOwner = currentOwner; - PG_RE_THROW(); - } - PG_END_TRY(); + CurrentResourceOwner = s->curTransactionOwner; + + XactLockTableInsert(s->transactionId); + CurrentResourceOwner = currentOwner; /* @@ -678,8 +652,8 @@ SubTransactionIsActive(SubTransactionId subxid) /* * GetCurrentCommandId * - * "used" must be TRUE if the caller intends to use the command ID to mark - * inserted/updated/deleted tuples. FALSE means the ID is being fetched + * "used" must be true if the caller intends to use the command ID to mark + * inserted/updated/deleted tuples. false means the ID is being fetched * for read-only purposes (ie, as a snapshot validity cutoff). See * CommandCounterIncrement() for discussion. */ @@ -690,17 +664,33 @@ GetCurrentCommandId(bool used) if (used) { /* - * Forbid setting currentCommandIdUsed in parallel mode, because we - * have no provision for communicating this back to the master. We + * Forbid setting currentCommandIdUsed in a parallel worker, because + * we have no provision for communicating this back to the master. We * could relax this restriction when currentCommandIdUsed was already * true at the start of the parallel operation. */ - Assert(CurrentTransactionState->parallelModeLevel == 0); + Assert(!IsParallelWorker()); currentCommandIdUsed = true; } return currentCommandId; } +/* + * SetParallelStartTimestamps + * + * In a parallel worker, we should inherit the parent transaction's + * timestamps rather than setting our own. The parallel worker + * infrastructure must call this to provide those values before + * calling StartTransaction() or SetCurrentStatementStartTimestamp(). + */ +void +SetParallelStartTimestamps(TimestampTz xact_ts, TimestampTz stmt_ts) +{ + Assert(IsParallelWorker()); + xactStartTimestamp = xact_ts; + stmtStartTimestamp = stmt_ts; +} + /* * GetCurrentTransactionStartTimestamp */ @@ -735,11 +725,17 @@ GetCurrentTransactionStopTimestamp(void) /* * SetCurrentStatementStartTimestamp + * + * In a parallel worker, this should already have been provided by a call + * to SetParallelStartTimestamps(). */ void SetCurrentStatementStartTimestamp(void) { - stmtStartTimestamp = GetCurrentTimestamp(); + if (!IsParallelWorker()) + stmtStartTimestamp = GetCurrentTimestamp(); + else + Assert(stmtStartTimestamp != 0); } /* @@ -1171,7 +1167,7 @@ RecordTransactionCommit(void) * vacuum. Hence we emit a bespoke record for the invalidations. We * don't want to use that in case a commit record is emitted, so they * happen synchronously with commits (besides not wanting to emit more - * WAL recoreds). + * WAL records). */ if (nmsgs != 0) { @@ -1233,7 +1229,7 @@ RecordTransactionCommit(void) nmsgs, invalMessages, RelcacheInitFileInval, forceSyncCommit, MyXactFlags, - InvalidTransactionId /* plain commit */ ); + InvalidTransactionId, NULL /* plain commit */ ); if (replorigin) /* Move LSNs forward for this replication origin */ @@ -1585,7 +1581,8 @@ RecordTransactionAbort(bool isSubXact) XactLogAbortRecord(xact_time, nchildren, children, nrels, rels, - MyXactFlags, InvalidTransactionId); + MyXactFlags, InvalidTransactionId, + NULL); /* * Report the latest async abort LSN, so that the WAL writer knows to @@ -1891,14 +1888,26 @@ StartTransaction(void) TRACE_POSTGRESQL_TRANSACTION_START(vxid.localTransactionId); /* - * set transaction_timestamp() (a/k/a now()). We want this to be the same - * as the first command's statement_timestamp(), so don't do a fresh - * GetCurrentTimestamp() call (which'd be expensive anyway). Also, mark - * xactStopTimestamp as unset. + * set transaction_timestamp() (a/k/a now()). Normally, we want this to + * be the same as the first command's statement_timestamp(), so don't do a + * fresh GetCurrentTimestamp() call (which'd be expensive anyway). But + * for transactions started inside procedures (i.e., nonatomic SPI + * contexts), we do need to advance the timestamp. Also, in a parallel + * worker, the timestamp should already have been provided by a call to + * SetParallelStartTimestamps(). */ - xactStartTimestamp = stmtStartTimestamp; - xactStopTimestamp = 0; + if (!IsParallelWorker()) + { + if (!SPI_inside_nonatomic_context()) + xactStartTimestamp = stmtStartTimestamp; + else + xactStartTimestamp = GetCurrentTimestamp(); + } + else + Assert(xactStartTimestamp != 0); pgstat_report_xact_timestamp(xactStartTimestamp); + /* Mark xactStopTimestamp as unset. */ + xactStopTimestamp = 0; /* * initialize current transaction state fields @@ -2025,7 +2034,7 @@ CommitTransaction(void) HOLD_INTERRUPTS(); /* Commit updates to the relation map --- do this as late as possible */ - AtEOXact_RelationMap(true); + AtEOXact_RelationMap(true, is_parallel_worker); /* * set the current transaction state information appropriately during @@ -2127,10 +2136,11 @@ CommitTransaction(void) AtCommit_Notify(); AtEOXact_GUC(true, 1); AtEOXact_SPI(true); + AtEOXact_Enum(); AtEOXact_on_commit_actions(true); AtEOXact_Namespace(true, is_parallel_worker); AtEOXact_SMgr(); - AtEOXact_Files(); + AtEOXact_Files(true); AtEOXact_ComboCid(); AtEOXact_HashTables(true); AtEOXact_PgStat(true); @@ -2405,10 +2415,11 @@ PrepareTransaction(void) /* PREPARE acts the same as COMMIT as far as GUC is concerned */ AtEOXact_GUC(true, 1); AtEOXact_SPI(true); + AtEOXact_Enum(); AtEOXact_on_commit_actions(true); AtEOXact_Namespace(true, false); AtEOXact_SMgr(); - AtEOXact_Files(); + AtEOXact_Files(true); AtEOXact_ComboCid(); AtEOXact_HashTables(true); /* don't call AtEOXact_PgStat here; we fixed pgstat state above */ @@ -2546,7 +2557,7 @@ AbortTransaction(void) AtAbort_Portals(); AtEOXact_LargeObject(false); AtAbort_Notify(); - AtEOXact_RelationMap(false); + AtEOXact_RelationMap(false, is_parallel_worker); AtAbort_Twophase(); /* @@ -2607,10 +2618,11 @@ AbortTransaction(void) AtEOXact_GUC(false, 1); AtEOXact_SPI(false); + AtEOXact_Enum(); AtEOXact_on_commit_actions(false); AtEOXact_Namespace(false, is_parallel_worker); AtEOXact_SMgr(); - AtEOXact_Files(); + AtEOXact_Files(false); AtEOXact_ComboCid(); AtEOXact_HashTables(false); AtEOXact_PgStat(false); @@ -2700,6 +2712,7 @@ StartTransactionCommand(void) * previous CommitTransactionCommand.) */ case TBLOCK_INPROGRESS: + case TBLOCK_IMPLICIT_INPROGRESS: case TBLOCK_SUBINPROGRESS: break; @@ -2790,6 +2803,7 @@ CommitTransactionCommand(void) * counter and return. */ case TBLOCK_INPROGRESS: + case TBLOCK_IMPLICIT_INPROGRESS: case TBLOCK_SUBINPROGRESS: CommandCounterIncrement(); break; @@ -3014,10 +3028,12 @@ AbortCurrentTransaction(void) break; /* - * if we aren't in a transaction block, we just do the basic abort - * & cleanup transaction. + * If we aren't in a transaction block, we just do the basic abort + * & cleanup transaction. For this purpose, we treat an implicit + * transaction block as if it were a simple statement. */ case TBLOCK_STARTED: + case TBLOCK_IMPLICIT_INPROGRESS: AbortTransaction(); CleanupTransaction(); s->blockState = TBLOCK_DEFAULT; @@ -3136,7 +3152,7 @@ AbortCurrentTransaction(void) } /* - * PreventTransactionChain + * PreventInTransactionBlock * * This routine is to be called by statements that must not run inside * a transaction block, typically because they have non-rollback-able @@ -3148,13 +3164,12 @@ AbortCurrentTransaction(void) * completes). Subtransactions are verboten too. * * isTopLevel: passed down from ProcessUtility to determine whether we are - * inside a function or multi-query querystring. (We will always fail if - * this is false, but it's convenient to centralize the check here instead of - * making callers do it.) + * inside a function. (We will always fail if this is false, but it's + * convenient to centralize the check here instead of making callers do it.) * stmtType: statement type name, for error messages. */ void -PreventTransactionChain(bool isTopLevel, const char *stmtType) +PreventInTransactionBlock(bool isTopLevel, const char *stmtType) { /* * xact block already started? @@ -3183,8 +3198,7 @@ PreventTransactionChain(bool isTopLevel, const char *stmtType) ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), /* translator: %s represents an SQL statement name */ - errmsg("%s cannot be executed from a function or multi-command string", - stmtType))); + errmsg("%s cannot be executed from a function", stmtType))); /* If we got past IsTransactionBlock test, should be in default state */ if (CurrentTransactionState->blockState != TBLOCK_DEFAULT && @@ -3194,44 +3208,43 @@ PreventTransactionChain(bool isTopLevel, const char *stmtType) } /* - * These two functions allow for warnings or errors if a command is - * executed outside of a transaction block. + * WarnNoTranactionBlock + * RequireTransactionBlock + * + * These two functions allow for warnings or errors if a command is executed + * outside of a transaction block. This is useful for commands that have no + * effects that persist past transaction end (and so calling them outside a + * transaction block is presumably an error). DECLARE CURSOR is an example. + * While top-level transaction control commands (BEGIN/COMMIT/ABORT) and SET + * that have no effect issue warnings, all other no-effect commands generate + * errors. + * + * If we appear to be running inside a user-defined function, we do not + * issue anything, since the function could issue more commands that make + * use of the current statement's results. Likewise subtransactions. + * Thus these are inverses for PreventInTransactionBlock. * - * While top-level transaction control commands (BEGIN/COMMIT/ABORT) and - * SET that have no effect issue warnings, all other no-effect commands - * generate errors. + * isTopLevel: passed down from ProcessUtility to determine whether we are + * inside a function. + * stmtType: statement type name, for warning or error messages. */ void -WarnNoTransactionChain(bool isTopLevel, const char *stmtType) +WarnNoTransactionBlock(bool isTopLevel, const char *stmtType) { - CheckTransactionChain(isTopLevel, false, stmtType); + CheckTransactionBlock(isTopLevel, false, stmtType); } void -RequireTransactionChain(bool isTopLevel, const char *stmtType) +RequireTransactionBlock(bool isTopLevel, const char *stmtType) { - CheckTransactionChain(isTopLevel, true, stmtType); + CheckTransactionBlock(isTopLevel, true, stmtType); } /* - * RequireTransactionChain - * - * This routine is to be called by statements that must run inside - * a transaction block, because they have no effects that persist past - * transaction end (and so calling them outside a transaction block - * is presumably an error). DECLARE CURSOR is an example. - * - * If we appear to be running inside a user-defined function, we do not - * issue anything, since the function could issue more commands that make - * use of the current statement's results. Likewise subtransactions. - * Thus this is an inverse for PreventTransactionChain. - * - * isTopLevel: passed down from ProcessUtility to determine whether we are - * inside a function. - * stmtType: statement type name, for warning or error messages. + * This is the implementation of the above two. */ static void -CheckTransactionChain(bool isTopLevel, bool throwError, const char *stmtType) +CheckTransactionBlock(bool isTopLevel, bool throwError, const char *stmtType) { /* * xact block already started? @@ -3260,7 +3273,7 @@ CheckTransactionChain(bool isTopLevel, bool throwError, const char *stmtType) } /* - * IsInTransactionChain + * IsInTransactionBlock * * This routine is for statements that need to behave differently inside * a transaction block than when running as single commands. ANALYZE is @@ -3270,11 +3283,11 @@ CheckTransactionChain(bool isTopLevel, bool throwError, const char *stmtType) * inside a function. */ bool -IsInTransactionChain(bool isTopLevel) +IsInTransactionBlock(bool isTopLevel) { /* - * Return true on same conditions that would make PreventTransactionChain - * error out + * Return true on same conditions that would make + * PreventInTransactionBlock error out */ if (IsTransactionBlock()) return true; @@ -3344,7 +3357,7 @@ CallXactCallbacks(XactEvent event) XactCallbackItem *item; for (item = Xact_callbacks; item; item = item->next) - (*item->callback) (event, item->arg); + item->callback(event, item->arg); } @@ -3401,7 +3414,7 @@ CallSubXactCallbacks(SubXactEvent event, SubXactCallbackItem *item; for (item = SubXact_callbacks; item; item = item->next) - (*item->callback) (event, mySubid, parentSubid, item->arg); + item->callback(event, mySubid, parentSubid, item->arg); } @@ -3428,6 +3441,15 @@ BeginTransactionBlock(void) s->blockState = TBLOCK_BEGIN; break; + /* + * BEGIN converts an implicit transaction block to a regular one. + * (Note that we allow this even if we've already done some + * commands, which is a bit odd but matches historical practice.) + */ + case TBLOCK_IMPLICIT_INPROGRESS: + s->blockState = TBLOCK_BEGIN; + break; + /* * Already a transaction block in progress. */ @@ -3466,7 +3488,7 @@ BeginTransactionBlock(void) * This executes a PREPARE command. * * Since PREPARE may actually do a ROLLBACK, the result indicates what - * happened: TRUE for PREPARE, FALSE for ROLLBACK. + * happened: true for PREPARE, false for ROLLBACK. * * Note that we don't actually do anything here except change blockState. * The real work will be done in the upcoming PrepareTransaction(). @@ -3474,7 +3496,7 @@ BeginTransactionBlock(void) * resource owner, etc while executing inside a Portal. */ bool -PrepareTransactionBlock(char *gid) +PrepareTransactionBlock(const char *gid) { TransactionState s; bool result; @@ -3503,7 +3525,8 @@ PrepareTransactionBlock(char *gid) * ignore case where we are not in a transaction; * EndTransactionBlock already issued a warning. */ - Assert(s->blockState == TBLOCK_STARTED); + Assert(s->blockState == TBLOCK_STARTED || + s->blockState == TBLOCK_IMPLICIT_INPROGRESS); /* Don't send back a PREPARE result tag... */ result = false; } @@ -3517,7 +3540,7 @@ PrepareTransactionBlock(char *gid) * This executes a COMMIT command. * * Since COMMIT may actually do a ROLLBACK, the result indicates what - * happened: TRUE for COMMIT, FALSE for ROLLBACK. + * happened: true for COMMIT, false for ROLLBACK. * * Note that we don't actually do anything here except change blockState. * The real work will be done in the upcoming CommitTransactionCommand(). @@ -3541,6 +3564,18 @@ EndTransactionBlock(void) result = true; break; + /* + * In an implicit transaction block, commit, but issue a warning + * because there was no explicit BEGIN before this. + */ + case TBLOCK_IMPLICIT_INPROGRESS: + ereport(WARNING, + (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION), + errmsg("there is no transaction in progress"))); + s->blockState = TBLOCK_END; + result = true; + break; + /* * We are in a failed transaction block. Tell * CommitTransactionCommand it's time to exit the block. @@ -3705,8 +3740,14 @@ UserAbortTransactionBlock(void) * WARNING and go to abort state. The upcoming call to * CommitTransactionCommand() will then put us back into the * default state. + * + * We do the same thing with ABORT inside an implicit transaction, + * although in this case we might be rolling back actual database + * state changes. (It's debatable whether we should issue a + * WARNING in this case, but we have done so historically.) */ case TBLOCK_STARTED: + case TBLOCK_IMPLICIT_INPROGRESS: ereport(WARNING, (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION), errmsg("there is no transaction in progress"))); @@ -3743,12 +3784,64 @@ UserAbortTransactionBlock(void) } } +/* + * BeginImplicitTransactionBlock + * Start an implicit transaction block if we're not already in one. + * + * Unlike BeginTransactionBlock, this is called directly from the main loop + * in postgres.c, not within a Portal. So we can just change blockState + * without a lot of ceremony. We do not expect caller to do + * CommitTransactionCommand/StartTransactionCommand. + */ +void +BeginImplicitTransactionBlock(void) +{ + TransactionState s = CurrentTransactionState; + + /* + * If we are in STARTED state (that is, no transaction block is open), + * switch to IMPLICIT_INPROGRESS state, creating an implicit transaction + * block. + * + * For caller convenience, we consider all other transaction states as + * legal here; otherwise the caller would need its own state check, which + * seems rather pointless. + */ + if (s->blockState == TBLOCK_STARTED) + s->blockState = TBLOCK_IMPLICIT_INPROGRESS; +} + +/* + * EndImplicitTransactionBlock + * End an implicit transaction block, if we're in one. + * + * Like EndTransactionBlock, we just make any needed blockState change here. + * The real work will be done in the upcoming CommitTransactionCommand(). + */ +void +EndImplicitTransactionBlock(void) +{ + TransactionState s = CurrentTransactionState; + + /* + * If we are in IMPLICIT_INPROGRESS state, switch back to STARTED state, + * allowing CommitTransactionCommand to commit whatever happened during + * the implicit transaction block as though it were a single statement. + * + * For caller convenience, we consider all other transaction states as + * legal here; otherwise the caller would need its own state check, which + * seems rather pointless. + */ + if (s->blockState == TBLOCK_IMPLICIT_INPROGRESS) + s->blockState = TBLOCK_STARTED; +} + /* * DefineSavepoint * This executes a SAVEPOINT command. */ void -DefineSavepoint(char *name) +DefineSavepoint(const char *name) { TransactionState s = CurrentTransactionState; @@ -3780,6 +3873,28 @@ DefineSavepoint(char *name) s->name = MemoryContextStrdup(TopTransactionContext, name); break; + /* + * We disallow savepoint commands in implicit transaction blocks. + * There would be no great difficulty in allowing them so far as + * this module is concerned, but a savepoint seems inconsistent + * with exec_simple_query's behavior of abandoning the whole query + * string upon error. Also, the point of an implicit transaction + * block (as opposed to a regular one) is to automatically close + * after an error, so it's hard to see how a savepoint would fit + * into that. + * + * The error messages for this are phrased as if there were no + * active transaction block at all, which is historical but + * perhaps could be improved. + */ + case TBLOCK_IMPLICIT_INPROGRESS: + ereport(ERROR, + (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION), + /* translator: %s represents an SQL statement name */ + errmsg("%s can only be used in transaction blocks", + "SAVEPOINT"))); + break; + /* These cases are invalid. */ case TBLOCK_DEFAULT: case TBLOCK_STARTED: @@ -3811,13 +3926,11 @@ DefineSavepoint(char *name) * As above, we don't actually do anything here except change blockState. */ void -ReleaseSavepoint(List *options) +ReleaseSavepoint(const char *name) { TransactionState s = CurrentTransactionState; TransactionState target, xact; - ListCell *cell; - char *name = NULL; /* * Workers synchronize transaction state at the beginning of each parallel @@ -3834,13 +3947,21 @@ ReleaseSavepoint(List *options) switch (s->blockState) { /* - * We can't rollback to a savepoint if there is no savepoint - * defined. + * We can't release a savepoint if there is no savepoint defined. */ case TBLOCK_INPROGRESS: ereport(ERROR, (errcode(ERRCODE_S_E_INVALID_SPECIFICATION), - errmsg("no such savepoint"))); + errmsg("savepoint \"%s\" does not exist", name))); + break; + + case TBLOCK_IMPLICIT_INPROGRESS: + /* See comment about implicit transactions in DefineSavepoint */ + ereport(ERROR, + (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION), + /* translator: %s represents an SQL statement name */ + errmsg("%s can only be used in transaction blocks", + "RELEASE SAVEPOINT"))); break; /* @@ -3873,16 +3994,6 @@ ReleaseSavepoint(List *options) break; } - foreach(cell, options) - { - DefElem *elem = lfirst(cell); - - if (strcmp(elem->defname, "savepoint_name") == 0) - name = strVal(elem->arg); - } - - Assert(PointerIsValid(name)); - for (target = s; PointerIsValid(target); target = target->parent) { if (PointerIsValid(target->name) && strcmp(target->name, name) == 0) @@ -3892,13 +4003,13 @@ ReleaseSavepoint(List *options) if (!PointerIsValid(target)) ereport(ERROR, (errcode(ERRCODE_S_E_INVALID_SPECIFICATION), - errmsg("no such savepoint"))); + errmsg("savepoint \"%s\" does not exist", name))); /* disallow crossing savepoint level boundaries */ if (target->savepointLevel != s->savepointLevel) ereport(ERROR, (errcode(ERRCODE_S_E_INVALID_SPECIFICATION), - errmsg("no such savepoint"))); + errmsg("savepoint \"%s\" does not exist within current savepoint level", name))); /* * Mark "commit pending" all subtransactions up to the target @@ -3924,13 +4035,11 @@ ReleaseSavepoint(List *options) * As above, we don't actually do anything here except change blockState. */ void -RollbackToSavepoint(List *options) +RollbackToSavepoint(const char *name) { TransactionState s = CurrentTransactionState; TransactionState target, xact; - ListCell *cell; - char *name = NULL; /* * Workers synchronize transaction state at the beginning of each parallel @@ -3954,7 +4063,16 @@ RollbackToSavepoint(List *options) case TBLOCK_ABORT: ereport(ERROR, (errcode(ERRCODE_S_E_INVALID_SPECIFICATION), - errmsg("no such savepoint"))); + errmsg("savepoint \"%s\" does not exist", name))); + break; + + case TBLOCK_IMPLICIT_INPROGRESS: + /* See comment about implicit transactions in DefineSavepoint */ + ereport(ERROR, + (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION), + /* translator: %s represents an SQL statement name */ + errmsg("%s can only be used in transaction blocks", + "ROLLBACK TO SAVEPOINT"))); break; /* @@ -3985,16 +4103,6 @@ RollbackToSavepoint(List *options) break; } - foreach(cell, options) - { - DefElem *elem = lfirst(cell); - - if (strcmp(elem->defname, "savepoint_name") == 0) - name = strVal(elem->arg); - } - - Assert(PointerIsValid(name)); - for (target = s; PointerIsValid(target); target = target->parent) { if (PointerIsValid(target->name) && strcmp(target->name, name) == 0) @@ -4004,13 +4112,13 @@ RollbackToSavepoint(List *options) if (!PointerIsValid(target)) ereport(ERROR, (errcode(ERRCODE_S_E_INVALID_SPECIFICATION), - errmsg("no such savepoint"))); + errmsg("savepoint \"%s\" does not exist", name))); /* disallow crossing savepoint level boundaries */ if (target->savepointLevel != s->savepointLevel) ereport(ERROR, (errcode(ERRCODE_S_E_INVALID_SPECIFICATION), - errmsg("no such savepoint"))); + errmsg("savepoint \"%s\" does not exist within current savepoint level", name))); /* * Mark "abort pending" all subtransactions up to the target @@ -4046,14 +4154,15 @@ RollbackToSavepoint(List *options) /* * BeginInternalSubTransaction * This is the same as DefineSavepoint except it allows TBLOCK_STARTED, - * TBLOCK_END, and TBLOCK_PREPARE states, and therefore it can safely be - * used in functions that might be called when not inside a BEGIN block - * or when running deferred triggers at COMMIT/PREPARE time. Also, it - * automatically does CommitTransactionCommand/StartTransactionCommand - * instead of expecting the caller to do it. + * TBLOCK_IMPLICIT_INPROGRESS, TBLOCK_END, and TBLOCK_PREPARE states, + * and therefore it can safely be used in functions that might be called + * when not inside a BEGIN block or when running deferred triggers at + * COMMIT/PREPARE time. Also, it automatically does + * CommitTransactionCommand/StartTransactionCommand instead of expecting + * the caller to do it. */ void -BeginInternalSubTransaction(char *name) +BeginInternalSubTransaction(const char *name) { TransactionState s = CurrentTransactionState; @@ -4076,6 +4185,7 @@ BeginInternalSubTransaction(char *name) { case TBLOCK_STARTED: case TBLOCK_INPROGRESS: + case TBLOCK_IMPLICIT_INPROGRESS: case TBLOCK_END: case TBLOCK_PREPARE: case TBLOCK_SUBINPROGRESS: @@ -4180,6 +4290,7 @@ RollbackAndReleaseCurrentSubTransaction(void) case TBLOCK_DEFAULT: case TBLOCK_STARTED: case TBLOCK_BEGIN: + case TBLOCK_IMPLICIT_INPROGRESS: case TBLOCK_PARALLEL_INPROGRESS: case TBLOCK_SUBBEGIN: case TBLOCK_INPROGRESS: @@ -4211,6 +4322,7 @@ RollbackAndReleaseCurrentSubTransaction(void) s = CurrentTransactionState; /* changed by pop */ AssertState(s->blockState == TBLOCK_SUBINPROGRESS || s->blockState == TBLOCK_INPROGRESS || + s->blockState == TBLOCK_IMPLICIT_INPROGRESS || s->blockState == TBLOCK_STARTED); } @@ -4259,6 +4371,7 @@ AbortOutOfAnyTransaction(void) case TBLOCK_STARTED: case TBLOCK_BEGIN: case TBLOCK_INPROGRESS: + case TBLOCK_IMPLICIT_INPROGRESS: case TBLOCK_PARALLEL_INPROGRESS: case TBLOCK_END: case TBLOCK_ABORT_PENDING: @@ -4369,6 +4482,7 @@ TransactionBlockStatusCode(void) case TBLOCK_BEGIN: case TBLOCK_SUBBEGIN: case TBLOCK_INPROGRESS: + case TBLOCK_IMPLICIT_INPROGRESS: case TBLOCK_PARALLEL_INPROGRESS: case TBLOCK_SUBINPROGRESS: case TBLOCK_END: @@ -4542,6 +4656,7 @@ CommitSubTransaction(void) AtEOSubXact_HashTables(true, s->nestingLevel); AtEOSubXact_PgStat(true, s->nestingLevel); AtSubCommit_Snapshot(s->nestingLevel); + AtEOSubXact_ApplyLauncher(true, s->nestingLevel); /* * We need to restore the upper transaction's read-only state, in case the @@ -4596,6 +4711,9 @@ AbortSubTransaction(void) /* Reset WAL record construction state */ XLogResetInsertion(); + /* Cancel condition variable sleep */ + ConditionVariableCancelSleep(); + /* * Also clean up any open wait for lock, since the lock manager will choke * if we try to wait for another lock before doing this. @@ -4692,6 +4810,7 @@ AbortSubTransaction(void) AtEOSubXact_HashTables(false, s->nestingLevel); AtEOSubXact_PgStat(false, s->nestingLevel); AtSubAbort_Snapshot(s->nestingLevel); + AtEOSubXact_ApplyLauncher(false, s->nestingLevel); } /* @@ -5036,6 +5155,8 @@ BlockStateAsString(TBlockState blockState) return "BEGIN"; case TBLOCK_INPROGRESS: return "INPROGRESS"; + case TBLOCK_IMPLICIT_INPROGRESS: + return "IMPLICIT_INPROGRESS"; case TBLOCK_PARALLEL_INPROGRESS: return "PARALLEL_INPROGRESS"; case TBLOCK_END: @@ -5043,29 +5164,29 @@ BlockStateAsString(TBlockState blockState) case TBLOCK_ABORT: return "ABORT"; case TBLOCK_ABORT_END: - return "ABORT END"; + return "ABORT_END"; case TBLOCK_ABORT_PENDING: - return "ABORT PEND"; + return "ABORT_PENDING"; case TBLOCK_PREPARE: return "PREPARE"; case TBLOCK_SUBBEGIN: - return "SUB BEGIN"; + return "SUBBEGIN"; case TBLOCK_SUBINPROGRESS: - return "SUB INPROGRS"; + return "SUBINPROGRESS"; case TBLOCK_SUBRELEASE: - return "SUB RELEASE"; + return "SUBRELEASE"; case TBLOCK_SUBCOMMIT: - return "SUB COMMIT"; + return "SUBCOMMIT"; case TBLOCK_SUBABORT: - return "SUB ABORT"; + return "SUBABORT"; case TBLOCK_SUBABORT_END: - return "SUB ABORT END"; + return "SUBABORT_END"; case TBLOCK_SUBABORT_PENDING: - return "SUB ABRT PEND"; + return "SUBABORT_PENDING"; case TBLOCK_SUBRESTART: - return "SUB RESTART"; + return "SUBRESTART"; case TBLOCK_SUBABORT_RESTART: - return "SUB AB RESTRT"; + return "SUBABORT_RESTART"; } return "UNRECOGNIZED"; } @@ -5084,7 +5205,7 @@ TransStateAsString(TransState state) case TRANS_START: return "START"; case TRANS_INPROGRESS: - return "INPROGR"; + return "INPROGRESS"; case TRANS_COMMIT: return "COMMIT"; case TRANS_ABORT: @@ -5134,7 +5255,8 @@ XactLogCommitRecord(TimestampTz commit_time, int nrels, RelFileNode *rels, int nmsgs, SharedInvalidationMessage *msgs, bool relcacheInval, bool forceSync, - int xactflags, TransactionId twophase_xid) + int xactflags, TransactionId twophase_xid, + const char *twophase_gid) { xl_xact_commit xlrec; xl_xact_xinfo xl_xinfo; @@ -5144,7 +5266,6 @@ XactLogCommitRecord(TimestampTz commit_time, xl_xact_invals xl_invals; xl_xact_twophase xl_twophase; xl_xact_origin xl_origin; - uint8 info; Assert(CritSectionCount > 0); @@ -5208,6 +5329,10 @@ XactLogCommitRecord(TimestampTz commit_time, { xl_xinfo.xinfo |= XACT_XINFO_HAS_TWOPHASE; xl_twophase.xid = twophase_xid; + Assert(twophase_gid != NULL); + + if (XLogLogicalInfoActive()) + xl_xinfo.xinfo |= XACT_XINFO_HAS_GID; } /* dump transaction origin information */ @@ -5258,7 +5383,11 @@ XactLogCommitRecord(TimestampTz commit_time, } if (xl_xinfo.xinfo & XACT_XINFO_HAS_TWOPHASE) + { XLogRegisterData((char *) (&xl_twophase), sizeof(xl_xact_twophase)); + if (xl_xinfo.xinfo & XACT_XINFO_HAS_GID) + XLogRegisterData((char *) twophase_gid, strlen(twophase_gid) + 1); + } if (xl_xinfo.xinfo & XACT_XINFO_HAS_ORIGIN) XLogRegisterData((char *) (&xl_origin), sizeof(xl_xact_origin)); @@ -5279,13 +5408,16 @@ XLogRecPtr XactLogAbortRecord(TimestampTz abort_time, int nsubxacts, TransactionId *subxacts, int nrels, RelFileNode *rels, - int xactflags, TransactionId twophase_xid) + int xactflags, TransactionId twophase_xid, + const char *twophase_gid) { xl_xact_abort xlrec; xl_xact_xinfo xl_xinfo; xl_xact_subxacts xl_subxacts; xl_xact_relfilenodes xl_relfilenodes; xl_xact_twophase xl_twophase; + xl_xact_dbinfo xl_dbinfo; + xl_xact_origin xl_origin; uint8 info; @@ -5323,6 +5455,28 @@ XactLogAbortRecord(TimestampTz abort_time, { xl_xinfo.xinfo |= XACT_XINFO_HAS_TWOPHASE; xl_twophase.xid = twophase_xid; + Assert(twophase_gid != NULL); + + if (XLogLogicalInfoActive()) + xl_xinfo.xinfo |= XACT_XINFO_HAS_GID; + } + + if (TransactionIdIsValid(twophase_xid) && XLogLogicalInfoActive()) + { + xl_xinfo.xinfo |= XACT_XINFO_HAS_DBINFO; + xl_dbinfo.dbId = MyDatabaseId; + xl_dbinfo.tsId = MyDatabaseTableSpace; + } + + /* dump transaction origin information only for abort prepared */ + if ((replorigin_session_origin != InvalidRepOriginId) && + TransactionIdIsValid(twophase_xid) && + XLogLogicalInfoActive()) + { + xl_xinfo.xinfo |= XACT_XINFO_HAS_ORIGIN; + + xl_origin.origin_lsn = replorigin_session_origin_lsn; + xl_origin.origin_timestamp = replorigin_session_origin_timestamp; } if (xl_xinfo.xinfo != 0) @@ -5337,6 +5491,9 @@ XactLogAbortRecord(TimestampTz abort_time, if (xl_xinfo.xinfo != 0) XLogRegisterData((char *) (&xl_xinfo), sizeof(xl_xinfo)); + if (xl_xinfo.xinfo & XACT_XINFO_HAS_DBINFO) + XLogRegisterData((char *) (&xl_dbinfo), sizeof(xl_dbinfo)); + if (xl_xinfo.xinfo & XACT_XINFO_HAS_SUBXACTS) { XLogRegisterData((char *) (&xl_subxacts), @@ -5354,7 +5511,17 @@ XactLogAbortRecord(TimestampTz abort_time, } if (xl_xinfo.xinfo & XACT_XINFO_HAS_TWOPHASE) + { XLogRegisterData((char *) (&xl_twophase), sizeof(xl_xact_twophase)); + if (xl_xinfo.xinfo & XACT_XINFO_HAS_GID) + XLogRegisterData((char *) twophase_gid, strlen(twophase_gid) + 1); + } + + if (xl_xinfo.xinfo & XACT_XINFO_HAS_ORIGIN) + XLogRegisterData((char *) (&xl_origin), sizeof(xl_xact_origin)); + + if (TransactionIdIsValid(twophase_xid)) + XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN); return XLogInsert(RM_XACT_ID, info); } @@ -5370,7 +5537,6 @@ xact_redo_commit(xl_xact_parsed_commit *parsed, RepOriginId origin_id) { TransactionId max_xid; - int i; TimestampTz commit_time; Assert(TransactionIdIsValid(xid)); @@ -5456,12 +5622,10 @@ xact_redo_commit(xl_xact_parsed_commit *parsed, /* * Release locks, if any. We do this for both two phase and normal one * phase transactions. In effect we are ignoring the prepare phase and - * just going straight to lock release. At commit we release all locks - * via their top-level xid only, so no need to provide subxact list, - * which will save time when replaying commits. + * just going straight to lock release. */ if (parsed->xinfo & XACT_XINFO_HAS_AE_LOCKS) - StandbyReleaseLockTree(xid, 0, NULL); + StandbyReleaseLockTree(xid, parsed->nsubxacts, parsed->subxacts); } if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN) @@ -5491,16 +5655,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed, */ XLogFlush(lsn); - for (i = 0; i < parsed->nrels; i++) - { - SMgrRelation srel = smgropen(parsed->xnodes[i], InvalidBackendId); - ForkNumber fork; - - for (fork = 0; fork <= MAX_FORKNUM; fork++) - XLogDropRelation(parsed->xnodes[i], fork); - smgrdounlink(srel, true); - smgrclose(srel); - } + /* Make sure files supposed to be dropped are dropped */ + DropRelationFiles(parsed->xnodes, parsed->nrels, true); } /* @@ -5539,7 +5695,6 @@ xact_redo_commit(xl_xact_parsed_commit *parsed, static void xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid) { - int i; TransactionId max_xid; Assert(TransactionIdIsValid(xid)); @@ -5604,16 +5759,7 @@ xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid) } /* Make sure files supposed to be dropped are dropped */ - for (i = 0; i < parsed->nrels; i++) - { - SMgrRelation srel = smgropen(parsed->xnodes[i], InvalidBackendId); - ForkNumber fork; - - for (fork = 0; fork <= MAX_FORKNUM; fork++) - XLogDropRelation(parsed->xnodes[i], fork); - smgrdounlink(srel, true); - smgrclose(srel); - } + DropRelationFiles(parsed->xnodes, parsed->nrels, true); } void @@ -5677,7 +5823,8 @@ xact_redo(XLogReaderState *record) LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE); PrepareRedoAdd(XLogRecGetData(record), record->ReadRecPtr, - record->EndRecPtr); + record->EndRecPtr, + XLogRecGetOrigin(record)); LWLockRelease(TwoPhaseStateLock); } else if (info == XLOG_XACT_ASSIGNMENT) diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index df4843f409..7eed5866d2 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -4,7 +4,7 @@ * PostgreSQL write-ahead log manager * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/access/transam/xlog.c @@ -78,12 +78,6 @@ extern uint32 bootstrap_data_checksum_version; -/* File path names (all relative to $PGDATA) */ -#define RECOVERY_COMMAND_FILE "recovery.conf" -#define RECOVERY_COMMAND_DONE "recovery.done" -#define PROMOTE_SIGNAL_FILE "promote" -#define FALLBACK_PROMOTE_SIGNAL_FILE "fallback_promote" - /* User-settable parameters */ int max_wal_size_mb = 1024; /* 1 GB */ @@ -110,6 +104,8 @@ int wal_retrieve_retry_interval = 5000; bool XLOG_DEBUG = false; #endif +int wal_segment_size = DEFAULT_XLOG_SEG_SIZE; + /* * Number of WAL insertion locks to use. A higher value allows more insertions * to happen concurrently, but adds some CPU overhead to flushing the WAL, @@ -731,14 +727,16 @@ static ControlFileData *ControlFile = NULL; (((recptr) / XLOG_BLCKSZ) % (XLogCtl->XLogCacheBlck + 1)) /* - * These are the number of bytes in a WAL page and segment usable for WAL data. + * These are the number of bytes in a WAL page usable for WAL data. */ #define UsableBytesInPage (XLOG_BLCKSZ - SizeOfXLogShortPHD) -#define UsableBytesInSegment ((XLOG_SEG_SIZE / XLOG_BLCKSZ) * UsableBytesInPage - (SizeOfXLogLongPHD - SizeOfXLogShortPHD)) /* Convert min_wal_size_mb and max wal_size_mb to equivalent segment count */ -#define ConvertToXSegs(x) \ - (x / (XLOG_SEG_SIZE / (1024 * 1024))) +#define ConvertToXSegs(x, segsize) \ + (x / ((segsize) / (1024 * 1024))) + +/* The number of bytes in a WAL segment usable for WAL data. */ +static int UsableBytesInSegment; /* * Private, possibly out-of-date copy of shared LogwrtResult. @@ -817,8 +815,14 @@ static XLogSource XLogReceiptSource = 0; /* XLOG_FROM_* code */ static XLogRecPtr ReadRecPtr; /* start of last record read */ static XLogRecPtr EndRecPtr; /* end+1 of last record read */ -static XLogRecPtr minRecoveryPoint; /* local copy of - * ControlFile->minRecoveryPoint */ +/* + * Local copies of equivalent fields in the control file. When running + * crash recovery, minRecoveryPoint is set to InvalidXLogRecPtr as we + * expect to replay all the WAL available, and updateMinRecoveryPoint is + * switched to false to prevent any updates while replaying records. + * Those values are kept consistent as long as crash recovery runs. + */ +static XLogRecPtr minRecoveryPoint; static TimeLineID minRecoveryPointTLI; static bool updateMinRecoveryPoint = true; @@ -877,8 +881,9 @@ static bool WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, static int emode_for_corrupt_record(int emode, XLogRecPtr RecPtr); static void XLogFileClose(void); static void PreallocXlogFiles(XLogRecPtr endptr); -static void RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr); -static void RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr); +static void RemoveTempXlogFiles(void); +static void RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr RedoRecPtr, XLogRecPtr endptr); +static void RemoveXlogFile(const char *segname, XLogRecPtr RedoRecPtr, XLogRecPtr endptr); static void UpdateLastRemovedPtr(char *filename); static void ValidateXLOGDirectoryStructure(void); static void CleanupBackupHistory(void); @@ -933,7 +938,7 @@ static void WALInsertLockUpdateInsertingAt(XLogRecPtr insertingAt); * * If 'fpw_lsn' is valid, it is the oldest LSN among the pages that this * WAL record applies to, that were not included in the record as full page - * images. If fpw_lsn >= RedoRecPtr, the function does not perform the + * images. If fpw_lsn <= RedoRecPtr, the function does not perform the * insertion and returns InvalidXLogRecPtr. The caller can then recalculate * which pages need a full-page image, and retry. If fpw_lsn is invalid, the * record is always inserted. @@ -966,6 +971,7 @@ XLogInsertRecord(XLogRecData *rdata, info == XLOG_SWITCH); XLogRecPtr StartPos; XLogRecPtr EndPos; + bool prevDoPageWrites = doPageWrites; /* we assume that all of the record header is in the first chunk */ Assert(rdata->len >= SizeOfXLogRecord); @@ -1013,10 +1019,14 @@ XLogInsertRecord(XLogRecData *rdata, WALInsertLockAcquire(); /* - * Check to see if my copy of RedoRecPtr or doPageWrites is out of date. - * If so, may have to go back and have the caller recompute everything. - * This can only happen just after a checkpoint, so it's better to be slow - * in this case and fast otherwise. + * Check to see if my copy of RedoRecPtr is out of date. If so, may have + * to go back and have the caller recompute everything. This can only + * happen just after a checkpoint, so it's better to be slow in this case + * and fast otherwise. + * + * Also check to see if fullPageWrites or forcePageWrites was just turned + * on; if we weren't already doing full-page writes then go back and + * recompute. * * If we aren't doing full-page writes then RedoRecPtr doesn't actually * affect the contents of the XLOG record, so we'll update our local copy @@ -1031,7 +1041,9 @@ XLogInsertRecord(XLogRecData *rdata, } doPageWrites = (Insert->fullPageWrites || Insert->forcePageWrites); - if (fpw_lsn != InvalidXLogRecPtr && fpw_lsn <= RedoRecPtr && doPageWrites) + if (doPageWrites && + (!prevDoPageWrites || + (fpw_lsn != InvalidXLogRecPtr && fpw_lsn <= RedoRecPtr))) { /* * Oops, some buffer now needs to be backed up that the caller didn't @@ -1137,7 +1149,9 @@ XLogInsertRecord(XLogRecData *rdata, EndPos = StartPos + SizeOfXLogRecord; if (StartPos / XLOG_BLCKSZ != EndPos / XLOG_BLCKSZ) { - if (EndPos % XLOG_SEG_SIZE == EndPos % XLOG_BLCKSZ) + uint64 offset = XLogSegmentOffset(EndPos, wal_segment_size); + + if (offset == EndPos % XLOG_BLCKSZ) EndPos += SizeOfXLogLongPHD; else EndPos += SizeOfXLogShortPHD; @@ -1170,7 +1184,7 @@ XLogInsertRecord(XLogRecData *rdata, appendBinaryStringInfo(&recordBuf, rdata->data, rdata->len); if (!debug_reader) - debug_reader = XLogReaderAllocate(NULL, NULL); + debug_reader = XLogReaderAllocate(wal_segment_size, NULL, NULL); if (!debug_reader) { @@ -1296,7 +1310,7 @@ ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr) startbytepos = Insert->CurrBytePos; ptr = XLogBytePosToEndRecPtr(startbytepos); - if (ptr % XLOG_SEG_SIZE == 0) + if (XLogSegmentOffset(ptr, wal_segment_size) == 0) { SpinLockRelease(&Insert->insertpos_lck); *EndPos = *StartPos = ptr; @@ -1309,8 +1323,8 @@ ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr) *StartPos = XLogBytePosToRecPtr(startbytepos); *EndPos = XLogBytePosToEndRecPtr(endbytepos); - segleft = XLOG_SEG_SIZE - ((*EndPos) % XLOG_SEG_SIZE); - if (segleft != XLOG_SEG_SIZE) + segleft = wal_segment_size - XLogSegmentOffset(*EndPos, wal_segment_size); + if (segleft != wal_segment_size) { /* consume the rest of the segment */ *EndPos += segleft; @@ -1323,7 +1337,7 @@ ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr) *PrevPtr = XLogBytePosToRecPtr(prevbytepos); - Assert((*EndPos) % XLOG_SEG_SIZE == 0); + Assert(XLogSegmentOffset(*EndPos, wal_segment_size) == 0); Assert(XLogRecPtrToBytePos(*EndPos) == endbytepos); Assert(XLogRecPtrToBytePos(*StartPos) == startbytepos); Assert(XLogRecPtrToBytePos(*PrevPtr) == prevbytepos); @@ -1501,7 +1515,7 @@ CopyXLogRecordToWAL(int write_len, bool isLogSwitch, XLogRecData *rdata, pagehdr->xlp_info |= XLP_FIRST_IS_CONTRECORD; /* skip over the page header */ - if (CurrPos % XLogSegSize == 0) + if (XLogSegmentOffset(CurrPos, wal_segment_size) == 0) { CurrPos += SizeOfXLogLongPHD; currpos += SizeOfXLogLongPHD; @@ -1527,30 +1541,50 @@ CopyXLogRecordToWAL(int write_len, bool isLogSwitch, XLogRecData *rdata, /* * If this was an xlog-switch, it's not enough to write the switch record, - * we also have to consume all the remaining space in the WAL segment. We - * have already reserved it for us, but we still need to make sure it's - * allocated and zeroed in the WAL buffers so that when the caller (or - * someone else) does XLogWrite(), it can really write out all the zeros. + * we also have to consume all the remaining space in the WAL segment. We + * have already reserved that space, but we need to actually fill it. */ - if (isLogSwitch && CurrPos % XLOG_SEG_SIZE != 0) + if (isLogSwitch && XLogSegmentOffset(CurrPos, wal_segment_size) != 0) { /* An xlog-switch record doesn't contain any data besides the header */ Assert(write_len == SizeOfXLogRecord); - /* - * We do this one page at a time, to make sure we don't deadlock - * against ourselves if wal_buffers < XLOG_SEG_SIZE. - */ - Assert(EndPos % XLogSegSize == 0); + /* Assert that we did reserve the right amount of space */ + Assert(XLogSegmentOffset(EndPos, wal_segment_size) == 0); - /* Use up all the remaining space on the first page */ + /* Use up all the remaining space on the current page */ CurrPos += freespace; + /* + * Cause all remaining pages in the segment to be flushed, leaving the + * XLog position where it should be, at the start of the next segment. + * We do this one page at a time, to make sure we don't deadlock + * against ourselves if wal_buffers < wal_segment_size. + */ while (CurrPos < EndPos) { - /* initialize the next page (if not initialized already) */ - WALInsertLockUpdateInsertingAt(CurrPos); - AdvanceXLInsertBuffer(CurrPos, false); + /* + * The minimal action to flush the page would be to call + * WALInsertLockUpdateInsertingAt(CurrPos) followed by + * AdvanceXLInsertBuffer(...). The page would be left initialized + * mostly to zeros, except for the page header (always the short + * variant, as this is never a segment's first page). + * + * The large vistas of zeros are good for compressibility, but the + * headers interrupting them every XLOG_BLCKSZ (with values that + * differ from page to page) are not. The effect varies with + * compression tool, but bzip2 for instance compresses about an + * order of magnitude worse if those headers are left in place. + * + * Rather than complicating AdvanceXLInsertBuffer itself (which is + * called in heavily-loaded circumstances as well as this lightly- + * loaded one) with variant behavior, we just use GetXLogBuffer + * (which itself calls the two methods we need) to get the pointer + * and zero most of the page. Then we just zero the page header. + */ + currpos = GetXLogBuffer(CurrPos); + MemSet(currpos, 0, SizeOfXLogShortPHD); + CurrPos += XLOG_BLCKSZ; } } @@ -1866,10 +1900,10 @@ GetXLogBuffer(XLogRecPtr ptr) * the page header. */ if (ptr % XLOG_BLCKSZ == SizeOfXLogShortPHD && - ptr % XLOG_SEG_SIZE > XLOG_BLCKSZ) + XLogSegmentOffset(ptr, wal_segment_size) > XLOG_BLCKSZ) initializedUpto = ptr - SizeOfXLogShortPHD; else if (ptr % XLOG_BLCKSZ == SizeOfXLogLongPHD && - ptr % XLOG_SEG_SIZE < XLOG_BLCKSZ) + XLogSegmentOffset(ptr, wal_segment_size) < XLOG_BLCKSZ) initializedUpto = ptr - SizeOfXLogLongPHD; else initializedUpto = ptr; @@ -1939,7 +1973,7 @@ XLogBytePosToRecPtr(uint64 bytepos) seg_offset += fullpages * XLOG_BLCKSZ + bytesleft + SizeOfXLogShortPHD; } - XLogSegNoOffsetToRecPtr(fullsegs, seg_offset, result); + XLogSegNoOffsetToRecPtr(fullsegs, seg_offset, wal_segment_size, result); return result; } @@ -1985,7 +2019,7 @@ XLogBytePosToEndRecPtr(uint64 bytepos) seg_offset += fullpages * XLOG_BLCKSZ + bytesleft + SizeOfXLogShortPHD; } - XLogSegNoOffsetToRecPtr(fullsegs, seg_offset, result); + XLogSegNoOffsetToRecPtr(fullsegs, seg_offset, wal_segment_size, result); return result; } @@ -2001,9 +2035,9 @@ XLogRecPtrToBytePos(XLogRecPtr ptr) uint32 offset; uint64 result; - XLByteToSeg(ptr, fullsegs); + XLByteToSeg(ptr, fullsegs, wal_segment_size); - fullpages = (ptr % XLOG_SEG_SIZE) / XLOG_BLCKSZ; + fullpages = (XLogSegmentOffset(ptr, wal_segment_size)) / XLOG_BLCKSZ; offset = ptr % XLOG_BLCKSZ; if (fullpages == 0) @@ -2151,7 +2185,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic) /* * If online backup is not in progress, mark the header to indicate - * that* WAL records beginning in this page have removable backup + * that WAL records beginning in this page have removable backup * blocks. This allows the WAL archiver to know whether it is safe to * compress archived WAL data by transforming full-block records into * the non-full-block format. It is sufficient to record this at the @@ -2168,12 +2202,12 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic) /* * If first page of an XLOG segment file, make it a long header. */ - if ((NewPage->xlp_pageaddr % XLogSegSize) == 0) + if ((XLogSegmentOffset(NewPage->xlp_pageaddr, wal_segment_size)) == 0) { XLogLongPageHeader NewLongPage = (XLogLongPageHeader) NewPage; NewLongPage->xlp_sysid = ControlFile->system_identifier; - NewLongPage->xlp_seg_size = XLogSegSize; + NewLongPage->xlp_seg_size = wal_segment_size; NewLongPage->xlp_xlog_blcksz = XLOG_BLCKSZ; NewPage->xlp_info |= XLP_LONG_HEADER; } @@ -2215,12 +2249,18 @@ CalculateCheckpointSegments(void) * Calculate the distance at which to trigger a checkpoint, to avoid * exceeding max_wal_size_mb. This is based on two assumptions: * - * a) we keep WAL for two checkpoint cycles, back to the "prev" checkpoint. + * a) we keep WAL for only one checkpoint cycle (prior to PG11 we kept + * WAL for two checkpoint cycles to allow us to recover from the + * secondary checkpoint if the first checkpoint failed, though we + * only did this on the master anyway, not on standby. Keeping just + * one checkpoint simplifies processing and reduces disk space in + * many smaller databases.) * b) during checkpoint, we consume checkpoint_completion_target * * number of segments consumed between checkpoints. *------- */ - target = (double) ConvertToXSegs(max_wal_size_mb) / (2.0 + CheckPointCompletionTarget); + target = (double) ConvertToXSegs(max_wal_size_mb, wal_segment_size) / + (1.0 + CheckPointCompletionTarget); /* round down */ CheckPointSegments = (int) target; @@ -2248,7 +2288,7 @@ assign_checkpoint_completion_target(double newval, void *extra) * XLOG segments? Returns the highest segment that should be preallocated. */ static XLogSegNo -XLOGfileslop(XLogRecPtr PriorRedoPtr) +XLOGfileslop(XLogRecPtr RedoRecPtr) { XLogSegNo minSegNo; XLogSegNo maxSegNo; @@ -2260,8 +2300,10 @@ XLOGfileslop(XLogRecPtr PriorRedoPtr) * correspond to. Always recycle enough segments to meet the minimum, and * remove enough segments to stay below the maximum. */ - minSegNo = PriorRedoPtr / XLOG_SEG_SIZE + ConvertToXSegs(min_wal_size_mb) - 1; - maxSegNo = PriorRedoPtr / XLOG_SEG_SIZE + ConvertToXSegs(max_wal_size_mb) - 1; + minSegNo = RedoRecPtr / wal_segment_size + + ConvertToXSegs(min_wal_size_mb, wal_segment_size) - 1; + maxSegNo = RedoRecPtr / wal_segment_size + + ConvertToXSegs(max_wal_size_mb, wal_segment_size) - 1; /* * Between those limits, recycle enough segments to get us through to the @@ -2270,27 +2312,13 @@ XLOGfileslop(XLogRecPtr PriorRedoPtr) * To estimate where the next checkpoint will finish, assume that the * system runs steadily consuming CheckPointDistanceEstimate bytes between * every checkpoint. - * - * The reason this calculation is done from the prior checkpoint, not the - * one that just finished, is that this behaves better if some checkpoint - * cycles are abnormally short, like if you perform a manual checkpoint - * right after a timed one. The manual checkpoint will make almost a full - * cycle's worth of WAL segments available for recycling, because the - * segments from the prior's prior, fully-sized checkpoint cycle are no - * longer needed. However, the next checkpoint will make only few segments - * available for recycling, the ones generated between the timed - * checkpoint and the manual one right after that. If at the manual - * checkpoint we only retained enough segments to get us to the next timed - * one, and removed the rest, then at the next checkpoint we would not - * have enough segments around for recycling, to get us to the checkpoint - * after that. Basing the calculations on the distance from the prior redo - * pointer largely fixes that problem. - */ - distance = (2.0 + CheckPointCompletionTarget) * CheckPointDistanceEstimate; + */ + distance = (1.0 + CheckPointCompletionTarget) * CheckPointDistanceEstimate; /* add 10% for good measure. */ distance *= 1.10; - recycleSegNo = (XLogSegNo) ceil(((double) PriorRedoPtr + distance) / XLOG_SEG_SIZE); + recycleSegNo = (XLogSegNo) ceil(((double) RedoRecPtr + distance) / + wal_segment_size); if (recycleSegNo < minSegNo) recycleSegNo = minSegNo; @@ -2314,7 +2342,7 @@ XLogCheckpointNeeded(XLogSegNo new_segno) { XLogSegNo old_segno; - XLByteToSeg(RedoRecPtr, old_segno); + XLByteToSeg(RedoRecPtr, old_segno, wal_segment_size); if (new_segno >= old_segno + (uint64) (CheckPointSegments - 1)) return true; @@ -2324,7 +2352,7 @@ XLogCheckpointNeeded(XLogSegNo new_segno) /* * Write and/or fsync the log at least as far as WriteRqst indicates. * - * If flexible == TRUE, we don't have to write as far as WriteRqst, but + * If flexible == true, we don't have to write as far as WriteRqst, but * may stop at any convenient boundary (such as a cache or logfile boundary). * This option allows us to avoid uselessly issuing multiple writes when a * single one would do. @@ -2392,7 +2420,8 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) LogwrtResult.Write = EndPtr; ispartialpage = WriteRqst.Write < LogwrtResult.Write; - if (!XLByteInPrevSeg(LogwrtResult.Write, openLogSegNo)) + if (!XLByteInPrevSeg(LogwrtResult.Write, openLogSegNo, + wal_segment_size)) { /* * Switch to new logfile segment. We cannot have any pending @@ -2401,7 +2430,8 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) Assert(npages == 0); if (openLogFile >= 0) XLogFileClose(); - XLByteToPrevSeg(LogwrtResult.Write, openLogSegNo); + XLByteToPrevSeg(LogwrtResult.Write, openLogSegNo, + wal_segment_size); /* create/use new log file */ use_existent = true; @@ -2412,7 +2442,8 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) /* Make sure we have the current logfile open */ if (openLogFile < 0) { - XLByteToPrevSeg(LogwrtResult.Write, openLogSegNo); + XLByteToPrevSeg(LogwrtResult.Write, openLogSegNo, + wal_segment_size); openLogFile = XLogFileOpen(openLogSegNo); openLogOff = 0; } @@ -2422,7 +2453,8 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) { /* first of group */ startidx = curridx; - startoffset = (LogwrtResult.Write - XLOG_BLCKSZ) % XLogSegSize; + startoffset = XLogSegmentOffset(LogwrtResult.Write - XLOG_BLCKSZ, + wal_segment_size); } npages++; @@ -2435,7 +2467,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) last_iteration = WriteRqst.Write <= LogwrtResult.Write; finishing_seg = !ispartialpage && - (startoffset + npages * XLOG_BLCKSZ) >= XLogSegSize; + (startoffset + npages * XLOG_BLCKSZ) >= wal_segment_size; if (last_iteration || curridx == XLogCtl->XLogCacheBlck || @@ -2446,18 +2478,6 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) Size nleft; int written; - /* Need to seek in the file? */ - if (openLogOff != startoffset) - { - if (lseek(openLogFile, (off_t) startoffset, SEEK_SET) < 0) - ereport(PANIC, - (errcode_for_file_access(), - errmsg("could not seek in log file %s to offset %u: %m", - XLogFileNameP(ThisTimeLineID, openLogSegNo), - startoffset))); - openLogOff = startoffset; - } - /* OK to write the page(s) */ from = XLogCtl->pages + startidx * (Size) XLOG_BLCKSZ; nbytes = npages * (Size) XLOG_BLCKSZ; @@ -2466,7 +2486,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) { errno = 0; pgstat_report_wait_start(WAIT_EVENT_WAL_WRITE); - written = write(openLogFile, from, nleft); + written = pg_pwrite(openLogFile, from, nleft, startoffset); pgstat_report_wait_end(); if (written <= 0) { @@ -2481,6 +2501,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) } nleft -= written; from += written; + startoffset += written; } while (nleft > 0); /* Update state for write */ @@ -2562,11 +2583,13 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) sync_method != SYNC_METHOD_OPEN_DSYNC) { if (openLogFile >= 0 && - !XLByteInPrevSeg(LogwrtResult.Write, openLogSegNo)) + !XLByteInPrevSeg(LogwrtResult.Write, openLogSegNo, + wal_segment_size)) XLogFileClose(); if (openLogFile < 0) { - XLByteToPrevSeg(LogwrtResult.Write, openLogSegNo); + XLByteToPrevSeg(LogwrtResult.Write, openLogSegNo, + wal_segment_size); openLogFile = XLogFileOpen(openLogSegNo); openLogOff = 0; } @@ -2685,18 +2708,30 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force) if (!updateMinRecoveryPoint || (!force && lsn <= minRecoveryPoint)) return; + /* + * An invalid minRecoveryPoint means that we need to recover all the WAL, + * i.e., we're doing crash recovery. We never modify the control file's + * value in that case, so we can short-circuit future checks here too. The + * local values of minRecoveryPoint and minRecoveryPointTLI should not be + * updated until crash recovery finishes. We only do this for the startup + * process as it should not update its own reference of minRecoveryPoint + * until it has finished crash recovery to make sure that all WAL + * available is replayed in this case. This also saves from extra locks + * taken on the control file from the startup process. + */ + if (XLogRecPtrIsInvalid(minRecoveryPoint) && InRecovery) + { + updateMinRecoveryPoint = false; + return; + } + LWLockAcquire(ControlFileLock, LW_EXCLUSIVE); /* update local copy */ minRecoveryPoint = ControlFile->minRecoveryPoint; minRecoveryPointTLI = ControlFile->minRecoveryPointTLI; - /* - * An invalid minRecoveryPoint means that we need to recover all the WAL, - * i.e., we're doing crash recovery. We never modify the control file's - * value in that case, so we can short-circuit future checks here too. - */ - if (minRecoveryPoint == 0) + if (XLogRecPtrIsInvalid(minRecoveryPoint)) updateMinRecoveryPoint = false; else if (force || minRecoveryPoint < lsn) { @@ -2939,7 +2974,7 @@ XLogFlush(XLogRecPtr record) * * This routine is invoked periodically by the background walwriter process. * - * Returns TRUE if there was any work to do, even if we skipped flushing due + * Returns true if there was any work to do, even if we skipped flushing due * to wal_writer_delay/wal_writer_flush_after. */ bool @@ -2982,7 +3017,8 @@ XLogBackgroundFlush(void) { if (openLogFile >= 0) { - if (!XLByteInPrevSeg(LogwrtResult.Write, openLogSegNo)) + if (!XLByteInPrevSeg(LogwrtResult.Write, openLogSegNo, + wal_segment_size)) { XLogFileClose(); } @@ -3083,7 +3119,18 @@ XLogNeedsFlush(XLogRecPtr record) */ if (RecoveryInProgress()) { - /* Quick exit if already known updated */ + /* + * An invalid minRecoveryPoint means that we need to recover all the + * WAL, i.e., we're doing crash recovery. We never modify the control + * file's value in that case, so we can short-circuit future checks + * here too. This triggers a quick exit path for the startup process, + * which cannot update its local copy of minRecoveryPoint as long as + * it has not replayed all WAL available when doing crash recovery. + */ + if (XLogRecPtrIsInvalid(minRecoveryPoint) && InRecovery) + updateMinRecoveryPoint = false; + + /* Quick exit if already known to be updated or cannot be updated */ if (record <= minRecoveryPoint || !updateMinRecoveryPoint) return false; @@ -3098,12 +3145,11 @@ XLogNeedsFlush(XLogRecPtr record) LWLockRelease(ControlFileLock); /* - * An invalid minRecoveryPoint means that we need to recover all the - * WAL, i.e., we're doing crash recovery. We never modify the control - * file's value in that case, so we can short-circuit future checks - * here too. + * Check minRecoveryPoint for any other process than the startup + * process doing crash recovery, which should not update the control + * file value if crash recovery is still running. */ - if (minRecoveryPoint == 0) + if (XLogRecPtrIsInvalid(minRecoveryPoint)) updateMinRecoveryPoint = false; /* check again */ @@ -3134,12 +3180,12 @@ XLogNeedsFlush(XLogRecPtr record) * * log, seg: identify segment to be created/opened. * - * *use_existent: if TRUE, OK to use a pre-existing file (else, any - * pre-existing file will be deleted). On return, TRUE if a pre-existing + * *use_existent: if true, OK to use a pre-existing file (else, any + * pre-existing file will be deleted). On return, true if a pre-existing * file was used. * - * use_lock: if TRUE, acquire ControlFileLock while moving file into - * place. This should be TRUE except during bootstrap log creation. The + * use_lock: if true, acquire ControlFileLock while moving file into + * place. This should be true except during bootstrap log creation. The * caller must *not* hold the lock at call. * * Returns FD of opened file. @@ -3154,22 +3200,20 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock) { char path[MAXPGPATH]; char tmppath[MAXPGPATH]; - char zbuffer_raw[XLOG_BLCKSZ + MAXIMUM_ALIGNOF]; - char *zbuffer; + PGAlignedXLogBlock zbuffer; XLogSegNo installed_segno; XLogSegNo max_segno; int fd; int nbytes; - XLogFilePath(path, ThisTimeLineID, logsegno); + XLogFilePath(path, ThisTimeLineID, logsegno, wal_segment_size); /* * Try to use existent file (checkpoint maker may have created it already) */ if (*use_existent) { - fd = BasicOpenFile(path, O_RDWR | PG_BINARY | get_sync_bit(sync_method), - S_IRUSR | S_IWUSR); + fd = BasicOpenFile(path, O_RDWR | PG_BINARY | get_sync_bit(sync_method)); if (fd < 0) { if (errno != ENOENT) @@ -3194,8 +3238,7 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock) unlink(tmppath); /* do not use get_sync_bit() here --- want to fsync only at end of fill */ - fd = BasicOpenFile(tmppath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, - S_IRUSR | S_IWUSR); + fd = BasicOpenFile(tmppath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY); if (fd < 0) ereport(ERROR, (errcode_for_file_access(), @@ -3209,17 +3252,13 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock) * fsync below) that all the indirect blocks are down on disk. Therefore, * fdatasync(2) or O_DSYNC will be sufficient to sync future writes to the * log file. - * - * Note: ensure the buffer is reasonably well-aligned; this may save a few - * cycles transferring data to the kernel. */ - zbuffer = (char *) MAXALIGN(zbuffer_raw); - memset(zbuffer, 0, XLOG_BLCKSZ); - for (nbytes = 0; nbytes < XLogSegSize; nbytes += XLOG_BLCKSZ) + memset(zbuffer.data, 0, XLOG_BLCKSZ); + for (nbytes = 0; nbytes < wal_segment_size; nbytes += XLOG_BLCKSZ) { errno = 0; pgstat_report_wait_start(WAIT_EVENT_WAL_INIT_WRITE); - if ((int) write(fd, zbuffer, XLOG_BLCKSZ) != (int) XLOG_BLCKSZ) + if ((int) write(fd, zbuffer.data, XLOG_BLCKSZ) != (int) XLOG_BLCKSZ) { int save_errno = errno; @@ -3243,7 +3282,10 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock) pgstat_report_wait_start(WAIT_EVENT_WAL_INIT_SYNC); if (pg_fsync(fd) != 0) { + int save_errno = errno; + close(fd); + errno = save_errno; ereport(ERROR, (errcode_for_file_access(), errmsg("could not fsync file \"%s\": %m", tmppath))); @@ -3291,8 +3333,7 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock) *use_existent = false; /* Now open original target segment (might not be file I just made) */ - fd = BasicOpenFile(path, O_RDWR | PG_BINARY | get_sync_bit(sync_method), - S_IRUSR | S_IWUSR); + fd = BasicOpenFile(path, O_RDWR | PG_BINARY | get_sync_bit(sync_method)); if (fd < 0) ereport(ERROR, (errcode_for_file_access(), @@ -3324,7 +3365,7 @@ XLogFileCopy(XLogSegNo destsegno, TimeLineID srcTLI, XLogSegNo srcsegno, { char path[MAXPGPATH]; char tmppath[MAXPGPATH]; - char buffer[XLOG_BLCKSZ]; + PGAlignedXLogBlock buffer; int srcfd; int fd; int nbytes; @@ -3332,8 +3373,8 @@ XLogFileCopy(XLogSegNo destsegno, TimeLineID srcTLI, XLogSegNo srcsegno, /* * Open the source file */ - XLogFilePath(path, srcTLI, srcsegno); - srcfd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0); + XLogFilePath(path, srcTLI, srcsegno, wal_segment_size); + srcfd = OpenTransientFile(path, O_RDONLY | PG_BINARY); if (srcfd < 0) ereport(ERROR, (errcode_for_file_access(), @@ -3347,8 +3388,7 @@ XLogFileCopy(XLogSegNo destsegno, TimeLineID srcTLI, XLogSegNo srcsegno, unlink(tmppath); /* do not use get_sync_bit() here --- want to fsync only at end of fill */ - fd = OpenTransientFile(tmppath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, - S_IRUSR | S_IWUSR); + fd = OpenTransientFile(tmppath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY); if (fd < 0) ereport(ERROR, (errcode_for_file_access(), @@ -3357,7 +3397,7 @@ XLogFileCopy(XLogSegNo destsegno, TimeLineID srcTLI, XLogSegNo srcsegno, /* * Do the data copying. */ - for (nbytes = 0; nbytes < XLogSegSize; nbytes += sizeof(buffer)) + for (nbytes = 0; nbytes < wal_segment_size; nbytes += sizeof(buffer)) { int nread; @@ -3368,31 +3408,34 @@ XLogFileCopy(XLogSegNo destsegno, TimeLineID srcTLI, XLogSegNo srcsegno, * zeros. */ if (nread < sizeof(buffer)) - memset(buffer, 0, sizeof(buffer)); + memset(buffer.data, 0, sizeof(buffer)); if (nread > 0) { + int r; + if (nread > sizeof(buffer)) nread = sizeof(buffer); - errno = 0; pgstat_report_wait_start(WAIT_EVENT_WAL_COPY_READ); - if (read(srcfd, buffer, nread) != nread) + r = read(srcfd, buffer.data, nread); + if (r != nread) { - if (errno != 0) + if (r < 0) ereport(ERROR, (errcode_for_file_access(), errmsg("could not read file \"%s\": %m", path))); else ereport(ERROR, - (errmsg("not enough data in file \"%s\"", - path))); + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("could not read file \"%s\": read %d of %zu", + path, r, (Size) nread))); } pgstat_report_wait_end(); } errno = 0; pgstat_report_wait_start(WAIT_EVENT_WAL_COPY_WRITE); - if ((int) write(fd, buffer, sizeof(buffer)) != (int) sizeof(buffer)) + if ((int) write(fd, buffer.data, sizeof(buffer)) != (int) sizeof(buffer)) { int save_errno = errno; @@ -3438,24 +3481,24 @@ XLogFileCopy(XLogSegNo destsegno, TimeLineID srcTLI, XLogSegNo srcsegno, * filename while it's being created) and to recycle an old segment. * * *segno: identify segment to install as (or first possible target). - * When find_free is TRUE, this is modified on return to indicate the + * When find_free is true, this is modified on return to indicate the * actual installation location or last segment searched. * * tmppath: initial name of file to install. It will be renamed into place. * - * find_free: if TRUE, install the new segment at the first empty segno - * number at or after the passed numbers. If FALSE, install the new segment + * find_free: if true, install the new segment at the first empty segno + * number at or after the passed numbers. If false, install the new segment * exactly where specified, deleting any existing segment file there. * * max_segno: maximum segment number to install the new file as. Fail if no * free slot is found between *segno and max_segno. (Ignored when find_free - * is FALSE.) + * is false.) * - * use_lock: if TRUE, acquire ControlFileLock while moving file into - * place. This should be TRUE except during bootstrap log creation. The + * use_lock: if true, acquire ControlFileLock while moving file into + * place. This should be true except during bootstrap log creation. The * caller must *not* hold the lock at call. * - * Returns TRUE if the file was installed successfully. FALSE indicates that + * Returns true if the file was installed successfully. false indicates that * max_segno limit was exceeded, or an error occurred while renaming the * file into place. */ @@ -3467,7 +3510,7 @@ InstallXLogFileSegment(XLogSegNo *segno, char *tmppath, char path[MAXPGPATH]; struct stat stat_buf; - XLogFilePath(path, ThisTimeLineID, *segno); + XLogFilePath(path, ThisTimeLineID, *segno, wal_segment_size); /* * We want to be sure that only one process does this at a time. @@ -3493,7 +3536,7 @@ InstallXLogFileSegment(XLogSegNo *segno, char *tmppath, return false; } (*segno)++; - XLogFilePath(path, ThisTimeLineID, *segno); + XLogFilePath(path, ThisTimeLineID, *segno, wal_segment_size); } } @@ -3524,14 +3567,13 @@ XLogFileOpen(XLogSegNo segno) char path[MAXPGPATH]; int fd; - XLogFilePath(path, ThisTimeLineID, segno); + XLogFilePath(path, ThisTimeLineID, segno, wal_segment_size); - fd = BasicOpenFile(path, O_RDWR | PG_BINARY | get_sync_bit(sync_method), - S_IRUSR | S_IWUSR); + fd = BasicOpenFile(path, O_RDWR | PG_BINARY | get_sync_bit(sync_method)); if (fd < 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not open write-ahead log file \"%s\": %m", path))); + errmsg("could not open file \"%s\": %m", path))); return fd; } @@ -3551,7 +3593,7 @@ XLogFileRead(XLogSegNo segno, int emode, TimeLineID tli, char path[MAXPGPATH]; int fd; - XLogFileName(xlogfname, tli, segno); + XLogFileName(xlogfname, tli, segno, wal_segment_size); switch (source) { @@ -3563,7 +3605,7 @@ XLogFileRead(XLogSegNo segno, int emode, TimeLineID tli, restoredFromArchive = RestoreArchivedFile(path, xlogfname, "RECOVERYXLOG", - XLogSegSize, + wal_segment_size, InRedo); if (!restoredFromArchive) return -1; @@ -3571,7 +3613,7 @@ XLogFileRead(XLogSegNo segno, int emode, TimeLineID tli, case XLOG_FROM_PG_WAL: case XLOG_FROM_STREAM: - XLogFilePath(path, tli, segno); + XLogFilePath(path, tli, segno, wal_segment_size); restoredFromArchive = false; break; @@ -3593,7 +3635,7 @@ XLogFileRead(XLogSegNo segno, int emode, TimeLineID tli, snprintf(path, MAXPGPATH, XLOGDIR "/%s", xlogfname); } - fd = BasicOpenFile(path, O_RDONLY | PG_BINARY, 0); + fd = BasicOpenFile(path, O_RDONLY | PG_BINARY); if (fd >= 0) { /* Success! */ @@ -3690,7 +3732,7 @@ XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source) } /* Couldn't find it. For simplicity, complain about front timeline */ - XLogFilePath(path, recoveryTargetTLI, segno); + XLogFilePath(path, recoveryTargetTLI, segno, wal_segment_size); errno = ENOENT; ereport(emode, (errcode_for_file_access(), @@ -3720,7 +3762,7 @@ XLogFileClose(void) if (close(openLogFile)) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not close log file %s: %m", + errmsg("could not close file \"%s\": %m", XLogFileNameP(ThisTimeLineID, openLogSegNo)))); openLogFile = -1; } @@ -3741,9 +3783,11 @@ PreallocXlogFiles(XLogRecPtr endptr) XLogSegNo _logSegNo; int lf; bool use_existent; + uint64 offset; - XLByteToPrevSeg(endptr, _logSegNo); - if ((endptr - 1) % XLogSegSize >= (uint32) (0.75 * XLogSegSize)) + XLByteToPrevSeg(endptr, _logSegNo, wal_segment_size); + offset = XLogSegmentOffset(endptr - 1, wal_segment_size); + if (offset >= (uint32) (0.75 * wal_segment_size)) { _logSegNo++; use_existent = true; @@ -3760,10 +3804,16 @@ PreallocXlogFiles(XLogRecPtr endptr) * existed while the server has been running, as this function always * succeeds if no WAL segments have been removed since startup. * 'tli' is only used in the error message. + * + * Note: this function guarantees to keep errno unchanged on return. + * This supports callers that use this to possibly deliver a better + * error message about a missing file, while still being able to throw + * a normal file-access error afterwards, if this does return. */ void CheckXLogRemoved(XLogSegNo segno, TimeLineID tli) { + int save_errno = errno; XLogSegNo lastRemovedSegNo; SpinLockAcquire(&XLogCtl->info_lck); @@ -3774,12 +3824,14 @@ CheckXLogRemoved(XLogSegNo segno, TimeLineID tli) { char filename[MAXFNAMELEN]; - XLogFileName(filename, tli, segno); + XLogFileName(filename, tli, segno, wal_segment_size); + errno = save_errno; ereport(ERROR, (errcode_for_file_access(), errmsg("requested WAL segment %s has already been removed", filename))); } + errno = save_errno; } /* @@ -3811,7 +3863,7 @@ UpdateLastRemovedPtr(char *filename) uint32 tli; XLogSegNo segno; - XLogFromFileName(filename, &tli, &segno); + XLogFromFileName(filename, &tli, &segno, wal_segment_size); SpinLockAcquire(&XLogCtl->info_lck); if (segno > XLogCtl->lastRemovedSegNo) @@ -3819,37 +3871,61 @@ UpdateLastRemovedPtr(char *filename) SpinLockRelease(&XLogCtl->info_lck); } +/* + * Remove all temporary log files in pg_wal + * + * This is called at the beginning of recovery after a previous crash, + * at a point where no other processes write fresh WAL data. + */ +static void +RemoveTempXlogFiles(void) +{ + DIR *xldir; + struct dirent *xlde; + + elog(DEBUG2, "removing all temporary WAL segments"); + + xldir = AllocateDir(XLOGDIR); + while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL) + { + char path[MAXPGPATH]; + + if (strncmp(xlde->d_name, "xlogtemp.", 9) != 0) + continue; + + snprintf(path, MAXPGPATH, XLOGDIR "/%s", xlde->d_name); + unlink(path); + elog(DEBUG2, "removed temporary WAL segment \"%s\"", path); + } + FreeDir(xldir); +} + /* * Recycle or remove all log files older or equal to passed segno. * - * endptr is current (or recent) end of xlog, and PriorRedoRecPtr is the - * redo pointer of the previous checkpoint. These are used to determine + * endptr is current (or recent) end of xlog, and RedoRecPtr is the + * redo pointer of the last checkpoint. These are used to determine * whether we want to recycle rather than delete no-longer-wanted log files. */ static void -RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr) +RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr RedoRecPtr, XLogRecPtr endptr) { DIR *xldir; struct dirent *xlde; char lastoff[MAXFNAMELEN]; - xldir = AllocateDir(XLOGDIR); - if (xldir == NULL) - ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not open write-ahead log directory \"%s\": %m", - XLOGDIR))); - /* * Construct a filename of the last segment to be kept. The timeline ID * doesn't matter, we ignore that in the comparison. (During recovery, * ThisTimeLineID isn't set, so we can't use that.) */ - XLogFileName(lastoff, 0, segno); + XLogFileName(lastoff, 0, segno, wal_segment_size); elog(DEBUG2, "attempting to remove WAL segments older than log file %s", lastoff); + xldir = AllocateDir(XLOGDIR); + while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL) { /* Ignore files that are not XLOG segments */ @@ -3875,7 +3951,7 @@ RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr) /* Update the last removed location in shared memory first */ UpdateLastRemovedPtr(xlde->d_name); - RemoveXlogFile(xlde->d_name, PriorRedoPtr, endptr); + RemoveXlogFile(xlde->d_name, RedoRecPtr, endptr); } } } @@ -3906,23 +3982,18 @@ RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI) char switchseg[MAXFNAMELEN]; XLogSegNo endLogSegNo; - XLByteToPrevSeg(switchpoint, endLogSegNo); - - xldir = AllocateDir(XLOGDIR); - if (xldir == NULL) - ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not open write-ahead log directory \"%s\": %m", - XLOGDIR))); + XLByteToPrevSeg(switchpoint, endLogSegNo, wal_segment_size); /* * Construct a filename of the last segment to be kept. */ - XLogFileName(switchseg, newTLI, endLogSegNo); + XLogFileName(switchseg, newTLI, endLogSegNo, wal_segment_size); elog(DEBUG2, "attempting to remove WAL segments newer than log file %s", switchseg); + xldir = AllocateDir(XLOGDIR); + while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL) { /* Ignore files that are not XLOG segments */ @@ -3954,14 +4025,14 @@ RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI) /* * Recycle or remove a log file that's no longer needed. * - * endptr is current (or recent) end of xlog, and PriorRedoRecPtr is the - * redo pointer of the previous checkpoint. These are used to determine + * endptr is current (or recent) end of xlog, and RedoRecPtr is the + * redo pointer of the last checkpoint. These are used to determine * whether we want to recycle rather than delete no-longer-wanted log files. - * If PriorRedoRecPtr is not known, pass invalid, and the function will - * recycle, somewhat arbitrarily, 10 future segments. + * If RedoRecPtr is not known, pass invalid, and the function will recycle, + * somewhat arbitrarily, 10 future segments. */ static void -RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr) +RemoveXlogFile(const char *segname, XLogRecPtr RedoRecPtr, XLogRecPtr endptr) { char path[MAXPGPATH]; #ifdef WIN32 @@ -3974,11 +4045,11 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr) /* * Initialize info about where to try to recycle to. */ - XLByteToSeg(endptr, endlogSegNo); - if (PriorRedoPtr == InvalidXLogRecPtr) + XLByteToSeg(endptr, endlogSegNo, wal_segment_size); + if (RedoRecPtr == InvalidXLogRecPtr) recycleSegNo = endlogSegNo + 10; else - recycleSegNo = XLOGfileslop(PriorRedoPtr); + recycleSegNo = XLOGfileslop(RedoRecPtr); snprintf(path, MAXPGPATH, XLOGDIR "/%s", segname); @@ -4025,7 +4096,7 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr) { ereport(LOG, (errcode_for_file_access(), - errmsg("could not rename old write-ahead log file \"%s\": %m", + errmsg("could not rename file \"%s\": %m", path))); return; } @@ -4084,7 +4155,7 @@ ValidateXLOGDirectoryStructure(void) { ereport(LOG, (errmsg("creating missing WAL directory \"%s\"", path))); - if (mkdir(path, S_IRWXU) < 0) + if (MakePGDirectory(path) < 0) ereport(FATAL, (errmsg("could not create missing directory \"%s\": %m", path))); @@ -4104,11 +4175,6 @@ CleanupBackupHistory(void) char path[MAXPGPATH + sizeof(XLOGDIR)]; xldir = AllocateDir(XLOGDIR); - if (xldir == NULL) - ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not open write-ahead log directory \"%s\": %m", - XLOGDIR))); while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL) { @@ -4192,9 +4258,11 @@ ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode, XLogSegNo segno; int32 offset; - XLByteToSeg(xlogreader->latestPagePtr, segno); - offset = xlogreader->latestPagePtr % XLogSegSize; - XLogFileName(fname, xlogreader->readPageTLI, segno); + XLByteToSeg(xlogreader->latestPagePtr, segno, wal_segment_size); + offset = XLogSegmentOffset(xlogreader->latestPagePtr, + wal_segment_size); + XLogFileName(fname, xlogreader->readPageTLI, segno, + wal_segment_size); ereport(emode_for_corrupt_record(emode, RecPtr ? RecPtr : EndRecPtr), (errmsg("unexpected timeline ID %u in log segment %s, offset %u", @@ -4247,6 +4315,12 @@ ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode, minRecoveryPoint = ControlFile->minRecoveryPoint; minRecoveryPointTLI = ControlFile->minRecoveryPointTLI; + /* + * The startup process can update its local copy of + * minRecoveryPoint from this point. + */ + updateMinRecoveryPoint = true; + UpdateControlFile(); LWLockRelease(ControlFileLock); @@ -4399,7 +4473,7 @@ WriteControlFile(void) ControlFile->blcksz = BLCKSZ; ControlFile->relseg_size = RELSEG_SIZE; ControlFile->xlog_blcksz = XLOG_BLCKSZ; - ControlFile->xlog_seg_size = XLOG_SEG_SIZE; + ControlFile->xlog_seg_size = wal_segment_size; ControlFile->nameDataLen = NAMEDATALEN; ControlFile->indexMaxKeys = INDEX_MAX_KEYS; @@ -4428,12 +4502,11 @@ WriteControlFile(void) memcpy(buffer, ControlFile, sizeof(ControlFileData)); fd = BasicOpenFile(XLOG_CONTROL_FILE, - O_RDWR | O_CREAT | O_EXCL | PG_BINARY, - S_IRUSR | S_IWUSR); + O_RDWR | O_CREAT | O_EXCL | PG_BINARY); if (fd < 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not create control file \"%s\": %m", + errmsg("could not create file \"%s\": %m", XLOG_CONTROL_FILE))); errno = 0; @@ -4445,7 +4518,8 @@ WriteControlFile(void) errno = ENOSPC; ereport(PANIC, (errcode_for_file_access(), - errmsg("could not write to control file: %m"))); + errmsg("could not write to file \"%s\": %m", + XLOG_CONTROL_FILE))); } pgstat_report_wait_end(); @@ -4453,13 +4527,15 @@ WriteControlFile(void) if (pg_fsync(fd) != 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not fsync control file: %m"))); + errmsg("could not fsync file \"%s\": %m", + XLOG_CONTROL_FILE))); pgstat_report_wait_end(); if (close(fd)) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not close control file: %m"))); + errmsg("could not close file \"%s\": %m", + XLOG_CONTROL_FILE))); } static void @@ -4467,24 +4543,35 @@ ReadControlFile(void) { pg_crc32c crc; int fd; + static char wal_segsz_str[20]; + int r; /* * Read data... */ fd = BasicOpenFile(XLOG_CONTROL_FILE, - O_RDWR | PG_BINARY, - S_IRUSR | S_IWUSR); + O_RDWR | PG_BINARY); if (fd < 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not open control file \"%s\": %m", + errmsg("could not open file \"%s\": %m", XLOG_CONTROL_FILE))); pgstat_report_wait_start(WAIT_EVENT_CONTROL_FILE_READ); - if (read(fd, ControlFile, sizeof(ControlFileData)) != sizeof(ControlFileData)) - ereport(PANIC, - (errcode_for_file_access(), - errmsg("could not read from control file: %m"))); + r = read(fd, ControlFile, sizeof(ControlFileData)); + if (r != sizeof(ControlFileData)) + { + if (r < 0) + ereport(PANIC, + (errcode_for_file_access(), + errmsg("could not read file \"%s\": %m", + XLOG_CONTROL_FILE))); + else + ereport(PANIC, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("could not read file \"%s\": read %d of %zu", + XLOG_CONTROL_FILE, r, sizeof(ControlFileData)))); + } pgstat_report_wait_end(); close(fd); @@ -4569,13 +4656,6 @@ ReadControlFile(void) " but the server was compiled with XLOG_BLCKSZ %d.", ControlFile->xlog_blcksz, XLOG_BLCKSZ), errhint("It looks like you need to recompile or initdb."))); - if (ControlFile->xlog_seg_size != XLOG_SEG_SIZE) - ereport(FATAL, - (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with XLOG_SEG_SIZE %d," - " but the server was compiled with XLOG_SEG_SIZE %d.", - ControlFile->xlog_seg_size, XLOG_SEG_SIZE), - errhint("It looks like you need to recompile or initdb."))); if (ControlFile->nameDataLen != NAMEDATALEN) ereport(FATAL, (errmsg("database files are incompatible with server"), @@ -4637,6 +4717,34 @@ ReadControlFile(void) errhint("It looks like you need to recompile or initdb."))); #endif + wal_segment_size = ControlFile->xlog_seg_size; + + if (!IsValidWalSegSize(wal_segment_size)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg_plural("WAL segment size must be a power of two between 1 MB and 1 GB, but the control file specifies %d byte", + "WAL segment size must be a power of two between 1 MB and 1 GB, but the control file specifies %d bytes", + wal_segment_size, + wal_segment_size))); + + snprintf(wal_segsz_str, sizeof(wal_segsz_str), "%d", wal_segment_size); + SetConfigOption("wal_segment_size", wal_segsz_str, PGC_INTERNAL, + PGC_S_OVERRIDE); + + /* check and update variables dependent on wal_segment_size */ + if (ConvertToXSegs(min_wal_size_mb, wal_segment_size) < 2) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("\"min_wal_size\" must be at least twice \"wal_segment_size\""))); + + if (ConvertToXSegs(max_wal_size_mb, wal_segment_size) < 2) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("\"max_wal_size\" must be at least twice \"wal_segment_size\""))); + + UsableBytesInSegment = + (wal_segment_size / XLOG_BLCKSZ * UsableBytesInPage) - + (SizeOfXLogLongPHD - SizeOfXLogShortPHD); + + CalculateCheckpointSegments(); + /* Make the initdb settings visible as GUC variables, too */ SetConfigOption("data_checksums", DataChecksumsEnabled() ? "yes" : "no", PGC_INTERNAL, PGC_S_OVERRIDE); @@ -4654,13 +4762,11 @@ UpdateControlFile(void) FIN_CRC32C(ControlFile->crc); fd = BasicOpenFile(XLOG_CONTROL_FILE, - O_RDWR | PG_BINARY, - S_IRUSR | S_IWUSR); + O_RDWR | PG_BINARY); if (fd < 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not open control file \"%s\": %m", - XLOG_CONTROL_FILE))); + errmsg("could not open file \"%s\": %m", XLOG_CONTROL_FILE))); errno = 0; pgstat_report_wait_start(WAIT_EVENT_CONTROL_FILE_WRITE_UPDATE); @@ -4671,7 +4777,8 @@ UpdateControlFile(void) errno = ENOSPC; ereport(PANIC, (errcode_for_file_access(), - errmsg("could not write to control file: %m"))); + errmsg("could not write to file \"%s\": %m", + XLOG_CONTROL_FILE))); } pgstat_report_wait_end(); @@ -4679,13 +4786,15 @@ UpdateControlFile(void) if (pg_fsync(fd) != 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not fsync control file: %m"))); + errmsg("could not fsync file \"%s\": %m", + XLOG_CONTROL_FILE))); pgstat_report_wait_end(); if (close(fd)) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not close control file: %m"))); + errmsg("could not close file \"%s\": %m", + XLOG_CONTROL_FILE))); } /* @@ -4757,8 +4866,8 @@ XLOGChooseNumBuffers(void) int xbuffers; xbuffers = NBuffers / 32; - if (xbuffers > XLOG_SEG_SIZE / XLOG_BLCKSZ) - xbuffers = XLOG_SEG_SIZE / XLOG_BLCKSZ; + if (xbuffers > (wal_segment_size / XLOG_BLCKSZ)) + xbuffers = (wal_segment_size / XLOG_BLCKSZ); if (xbuffers < 8) xbuffers = 8; return xbuffers; @@ -4799,6 +4908,26 @@ check_wal_buffers(int *newval, void **extra, GucSource source) return true; } +/* + * Read the control file, set respective GUCs. + * + * This is to be called during startup, including a crash recovery cycle, + * unless in bootstrap mode, where no control file yet exists. As there's no + * usable shared memory yet (its sizing can depend on the contents of the + * control file!), first store the contents in local memory. XLOGShmemInit() + * will then copy it to shared memory later. + * + * reset just controls whether previous contents are to be expected (in the + * reset case, there's a dangling pointer into old shared memory), or not. + */ +void +LocalProcessControlFile(bool reset) +{ + Assert(reset || ControlFile == NULL); + ControlFile = palloc(sizeof(ControlFileData)); + ReadControlFile(); +} + /* * Initialization of shared memory for XLOG */ @@ -4850,6 +4979,7 @@ XLOGShmemInit(void) foundXLog; char *allocptr; int i; + ControlFileData *localControlFile; #ifdef WAL_DEBUG @@ -4867,11 +4997,14 @@ XLOGShmemInit(void) } #endif - ControlFile = (ControlFileData *) - ShmemInitStruct("Control File", sizeof(ControlFileData), &foundCFile); + XLogCtl = (XLogCtlData *) ShmemInitStruct("XLOG Ctl", XLOGShmemSize(), &foundXLog); + localControlFile = ControlFile; + ControlFile = (ControlFileData *) + ShmemInitStruct("Control File", sizeof(ControlFileData), &foundCFile); + if (foundCFile || foundXLog) { /* both should be present or neither */ @@ -4881,10 +5014,23 @@ XLOGShmemInit(void) WALInsertLocks = XLogCtl->Insert.WALInsertLocks; LWLockRegisterTranche(LWTRANCHE_WAL_INSERT, "wal_insert"); + + if (localControlFile) + pfree(localControlFile); return; } memset(XLogCtl, 0, sizeof(XLogCtlData)); + /* + * Already have read control file locally, unless in bootstrap mode. Move + * contents into shared memory. + */ + if (localControlFile) + { + memcpy(ControlFile, localControlFile, sizeof(ControlFileData)); + pfree(localControlFile); + } + /* * Since XLogCtlData contains XLogRecPtr fields, its sizeof should be a * multiple of the alignment for same, so no extra alignment padding is @@ -4933,14 +5079,6 @@ XLOGShmemInit(void) SpinLockInit(&XLogCtl->info_lck); SpinLockInit(&XLogCtl->ulsn_lck); InitSharedLatch(&XLogCtl->recoveryWakeupLatch); - - /* - * If we are not in bootstrap mode, pg_control should already exist. Read - * and validate it immediately (see comments in ReadControlFile() for the - * reasons why). - */ - if (!IsBootstrapProcessingMode()) - ReadControlFile(); } /* @@ -5005,7 +5143,7 @@ BootStrapXLOG(void) * segment with logid=0 logseg=1. The very first WAL segment, 0/0, is not * used, so that we can use 0/0 to mean "before any valid WAL segment". */ - checkPoint.redo = XLogSegSize + SizeOfXLogLongPHD; + checkPoint.redo = wal_segment_size + SizeOfXLogLongPHD; checkPoint.ThisTimeLineID = ThisTimeLineID; checkPoint.PrevTimeLineID = ThisTimeLineID; checkPoint.fullPageWrites = fullPageWrites; @@ -5036,10 +5174,10 @@ BootStrapXLOG(void) page->xlp_magic = XLOG_PAGE_MAGIC; page->xlp_info = XLP_LONG_HEADER; page->xlp_tli = ThisTimeLineID; - page->xlp_pageaddr = XLogSegSize; + page->xlp_pageaddr = wal_segment_size; longpage = (XLogLongPageHeader) page; longpage->xlp_sysid = sysidentifier; - longpage->xlp_seg_size = XLogSegSize; + longpage->xlp_seg_size = wal_segment_size; longpage->xlp_xlog_blcksz = XLOG_BLCKSZ; /* Insert the initial checkpoint record */ @@ -5129,6 +5267,12 @@ BootStrapXLOG(void) BootStrapMultiXact(); pfree(buffer); + + /* + * Force control file to be read - in contrast to normal processing we'd + * otherwise never run the checks and GUC related initializations therein. + */ + ReadControlFile(); } static char * @@ -5265,6 +5409,18 @@ readRecoveryCommandFile(void) { recoveryTarget = RECOVERY_TARGET_TIME; + if (strcmp(item->value, "epoch") == 0 || + strcmp(item->value, "infinity") == 0 || + strcmp(item->value, "-infinity") == 0 || + strcmp(item->value, "now") == 0 || + strcmp(item->value, "today") == 0 || + strcmp(item->value, "tomorrow") == 0 || + strcmp(item->value, "yesterday") == 0) + ereport(FATAL, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("recovery_target_time is not a valid timestamp: \"%s\"", + item->value))); + /* * Convert the time string given by the user to TimestampTz form. */ @@ -5412,7 +5568,7 @@ readRecoveryCommandFile(void) } /* - * Override any inconsistent requests. Not that this is a change of + * Override any inconsistent requests. Note that this is a change of * behaviour in 9.5; prior to this we simply ignored a request to pause if * hot_standby = off, which was surprising behaviour. */ @@ -5503,8 +5659,8 @@ exitArchiveRecovery(TimeLineID endTLI, XLogRecPtr endOfLog) * they are the same, but if the switch happens exactly at a segment * boundary, startLogSegNo will be endLogSegNo + 1. */ - XLByteToPrevSeg(endOfLog, endLogSegNo); - XLByteToSeg(endOfLog, startLogSegNo); + XLByteToPrevSeg(endOfLog, endLogSegNo, wal_segment_size); + XLByteToSeg(endOfLog, startLogSegNo, wal_segment_size); /* * Initialize the starting WAL segment for the new timeline. If the switch @@ -5522,7 +5678,7 @@ exitArchiveRecovery(TimeLineID endTLI, XLogRecPtr endOfLog) * avoid emplacing a bogus file. */ XLogFileCopy(endLogSegNo, endTLI, endLogSegNo, - endOfLog % XLOG_SEG_SIZE); + XLogSegmentOffset(endOfLog, wal_segment_size)); } else { @@ -5538,7 +5694,7 @@ exitArchiveRecovery(TimeLineID endTLI, XLogRecPtr endOfLog) if (close(fd)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not close log file %s: %m", + errmsg("could not close file \"%s\": %m", XLogFileNameP(ThisTimeLineID, startLogSegNo)))); } @@ -5546,7 +5702,7 @@ exitArchiveRecovery(TimeLineID endTLI, XLogRecPtr endOfLog) * Let's just make real sure there are not .ready or .done flags posted * for the new segment. */ - XLogFileName(xlogfname, ThisTimeLineID, startLogSegNo); + XLogFileName(xlogfname, ThisTimeLineID, startLogSegNo, wal_segment_size); XLogArchiveCleanup(xlogfname); /* @@ -5610,7 +5766,7 @@ getRecordTimestamp(XLogReaderState *record, TimestampTz *recordXtime) * For point-in-time recovery, this function decides whether we want to * stop applying the XLOG before the current record. * - * Returns TRUE if we are stopping, FALSE otherwise. If stopping, some + * Returns true if we are stopping, false otherwise. If stopping, some * information is saved in recoveryStopXid et al for use in annotating the * new timeline's history file. */ @@ -6215,13 +6371,17 @@ StartupXLOG(void) struct stat st; /* - * Read control file and check XLOG status looks valid. - * - * Note: in most control paths, *ControlFile is already valid and we need - * not do ReadControlFile() here, but might as well do it to be sure. + * We should have an aux process resource owner to use, and we should not + * be in a transaction that's installed some other resowner. */ - ReadControlFile(); + Assert(AuxProcessResourceOwner != NULL); + Assert(CurrentResourceOwner == NULL || + CurrentResourceOwner == AuxProcessResourceOwner); + CurrentResourceOwner = AuxProcessResourceOwner; + /* + * Verify XLOG status looks valid. + */ if (ControlFile->state < DB_SHUTDOWNED || ControlFile->state > DB_IN_PRODUCTION || !XRecOffIsValid(ControlFile->checkPoint)) @@ -6273,17 +6433,25 @@ StartupXLOG(void) */ ValidateXLOGDirectoryStructure(); - /* - * If we previously crashed, there might be data which we had written, - * intending to fsync it, but which we had not actually fsync'd yet. - * Therefore, a power failure in the near future might cause earlier - * unflushed writes to be lost, even though more recent data written to - * disk from here on would be persisted. To avoid that, fsync the entire - * data directory. + /*---------- + * If we previously crashed, perform a couple of actions: + * - The pg_wal directory may still include some temporary WAL segments + * used when creating a new segment, so perform some clean up to not + * bloat this path. This is done first as there is no point to sync this + * temporary data. + * - There might be data which we had written, intending to fsync it, + * but which we had not actually fsync'd yet. Therefore, a power failure + * in the near future might cause earlier unflushed writes to be lost, + * even though more recent data written to disk from here on would be + * persisted. To avoid that, fsync the entire data directory. + *--------- */ if (ControlFile->state != DB_SHUTDOWNED && ControlFile->state != DB_SHUTDOWNED_IN_RECOVERY) + { + RemoveTempXlogFiles(); SyncDataDirectory(); + } /* * Initialize on the assumption we want to recover to the latest timeline @@ -6348,7 +6516,7 @@ StartupXLOG(void) /* Set up XLOG reader facility */ MemSet(&private, 0, sizeof(XLogPageReadPrivate)); - xlogreader = XLogReaderAllocate(&XLogPageRead, &private); + xlogreader = XLogReaderAllocate(wal_segment_size, &XLogPageRead, &private); if (!xlogreader) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), @@ -6357,8 +6525,11 @@ StartupXLOG(void) xlogreader->system_identifier = ControlFile->system_identifier; /* - * Allocate pages dedicated to WAL consistency checks, those had better be - * aligned. + * Allocate two page buffers dedicated to WAL consistency checks. We do + * it this way, rather than just making static arrays, for two reasons: + * (1) no need to waste the storage in most instantiations of the backend; + * (2) a static char array isn't guaranteed to have any particular + * alignment, whereas palloc() will provide MAXALIGN'd storage. */ replay_image_masked = (char *) palloc(BLCKSZ); master_image_masked = (char *) palloc(BLCKSZ); @@ -6505,10 +6676,7 @@ StartupXLOG(void) StandbyMode = true; } - /* - * Get the last valid checkpoint record. If the latest one according - * to pg_control is broken, try the next-to-last one. - */ + /* Get the last valid checkpoint record. */ checkPointLoc = ControlFile->checkPoint; RedoStartLSN = ControlFile->checkPointCopy.redo; record = ReadCheckpointRecord(xlogreader, checkPointLoc, 1, true); @@ -6518,30 +6686,17 @@ StartupXLOG(void) (errmsg("checkpoint record is at %X/%X", (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc))); } - else if (StandbyMode) + else { /* - * The last valid checkpoint record required for a streaming - * recovery exists in neither standby nor the primary. + * We used to attempt to go back to a secondary checkpoint record + * here, but only when not in standby_mode. We now just fail if we + * can't read the last checkpoint because this allows us to + * simplify processing around checkpoints. */ ereport(PANIC, (errmsg("could not locate a valid checkpoint record"))); } - else - { - checkPointLoc = ControlFile->prevCheckPoint; - record = ReadCheckpointRecord(xlogreader, checkPointLoc, 2, true); - if (record != NULL) - { - ereport(LOG, - (errmsg("using previous checkpoint record at %X/%X", - (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc))); - InRecovery = true; /* force recovery even if SHUTDOWNED */ - } - else - ereport(PANIC, - (errmsg("could not locate a valid checkpoint record"))); - } memcpy(&checkPoint, XLogRecGetData(xlogreader), sizeof(CheckPoint)); wasShutdown = ((record->xl_info & ~XLR_INFO_MASK) == XLOG_CHECKPOINT_SHUTDOWN); } @@ -6607,7 +6762,7 @@ StartupXLOG(void) ereport(DEBUG1, (errmsg_internal("redo record is at %X/%X; shutdown %s", (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo, - wasShutdown ? "TRUE" : "FALSE"))); + wasShutdown ? "true" : "false"))); ereport(DEBUG1, (errmsg_internal("next transaction ID: %u:%u; next OID: %u", checkPoint.nextXidEpoch, checkPoint.nextXid, @@ -6661,11 +6816,12 @@ StartupXLOG(void) StartupMultiXact(); /* - * Ditto commit timestamps. In a standby, we do it if setting is enabled - * in ControlFile; in a master we base the decision on the GUC itself. + * Ditto for commit timestamps. Activate the facility if the setting is + * enabled in the control file, as there should be no tracking of commit + * timestamps done when the setting was disabled. This facility can be + * started or stopped when replaying a XLOG_PARAMETER_CHANGE record. */ - if (ArchiveRecoveryRequested ? - ControlFile->track_commit_timestamp : track_commit_timestamp) + if (ControlFile->track_commit_timestamp) StartupCommitTs(); /* @@ -6770,7 +6926,6 @@ StartupXLOG(void) recoveryTargetTLI))); ControlFile->state = DB_IN_CRASH_RECOVERY; } - ControlFile->prevCheckPoint = ControlFile->checkPoint; ControlFile->checkPoint = checkPointLoc; ControlFile->checkPointCopy = checkPoint; if (InArchiveRecovery) @@ -6818,9 +6973,26 @@ StartupXLOG(void) /* No need to hold ControlFileLock yet, we aren't up far enough */ UpdateControlFile(); - /* initialize our local copy of minRecoveryPoint */ - minRecoveryPoint = ControlFile->minRecoveryPoint; - minRecoveryPointTLI = ControlFile->minRecoveryPointTLI; + /* + * Initialize our local copy of minRecoveryPoint. When doing crash + * recovery we want to replay up to the end of WAL. Particularly, in + * the case of a promoted standby minRecoveryPoint value in the + * control file is only updated after the first checkpoint. However, + * if the instance crashes before the first post-recovery checkpoint + * is completed then recovery will use a stale location causing the + * startup process to think that there are still invalid page + * references when checking for data consistency. + */ + if (InArchiveRecovery) + { + minRecoveryPoint = ControlFile->minRecoveryPoint; + minRecoveryPointTLI = ControlFile->minRecoveryPointTLI; + } + else + { + minRecoveryPoint = InvalidXLogRecPtr; + minRecoveryPointTLI = 0; + } /* * Reset pgstat data, because it may be invalid after recovery. @@ -7388,6 +7560,13 @@ StartupXLOG(void) } } + /* + * Pre-scan prepared transactions to find out the range of XIDs present. + * This information is not quite needed yet, but it is positioned here so + * as potential problems are detected before any on-disk change is done. + */ + oldestActiveXID = PrescanPreparedTransactions(NULL, NULL); + /* * Consider whether we need to assign a new timeline ID. * @@ -7442,6 +7621,24 @@ StartupXLOG(void) else snprintf(reason, sizeof(reason), "no recovery target specified"); + /* + * We are now done reading the old WAL. Turn off archive fetching if + * it was active, and make a writable copy of the last WAL segment. + * (Note that we also have a copy of the last block of the old WAL in + * readBuf; we will use that below.) + */ + exitArchiveRecovery(EndOfLogTLI, EndOfLog); + + /* + * Write the timeline history file, and have it archived. After this + * point (or rather, as soon as the file is archived), the timeline + * will appear as "taken" in the WAL archive and to any standby + * servers. If we crash before actually switching to the new + * timeline, standby servers will nevertheless think that we switched + * to the new timeline, and will try to connect to the new timeline. + * To minimize the window for that, try to do as little as possible + * between here and writing the end-of-recovery record. + */ writeTimeLineHistory(ThisTimeLineID, recoveryTargetTLI, EndRecPtr, reason); } @@ -7450,15 +7647,6 @@ StartupXLOG(void) XLogCtl->ThisTimeLineID = ThisTimeLineID; XLogCtl->PrevTimeLineID = PrevTimeLineID; - /* - * We are now done reading the old WAL. Turn off archive fetching if it - * was active, and make a writable copy of the last WAL segment. (Note - * that we also have a copy of the last block of the old WAL in readBuf; - * we will use that below.) - */ - if (ArchiveRecoveryRequested) - exitArchiveRecovery(EndOfLogTLI, EndOfLog); - /* * Prepare to write WAL starting at EndOfLog location, and init xlog * buffer cache using the block containing the last record from the @@ -7481,7 +7669,7 @@ StartupXLOG(void) XLogRecPtr pageBeginPtr; pageBeginPtr = EndOfLog - (EndOfLog % XLOG_BLCKSZ); - Assert(readOff == pageBeginPtr % XLogSegSize); + Assert(readOff == XLogSegmentOffset(pageBeginPtr, wal_segment_size)); firstIdx = XLogRecPtrToBufIdx(EndOfLog); @@ -7511,9 +7699,6 @@ StartupXLOG(void) XLogCtl->LogwrtRqst.Write = EndOfLog; XLogCtl->LogwrtRqst.Flush = EndOfLog; - /* Pre-scan prepared transactions to find out the range of XIDs present */ - oldestActiveXID = PrescanPreparedTransactions(NULL, NULL); - /* * Update full_page_writes in shared memory and write an XLOG_FPW_CHANGE * record before resource manager writes cleanup WAL records or checkpoint @@ -7544,12 +7729,11 @@ StartupXLOG(void) { if (fast_promote) { - checkPointLoc = ControlFile->prevCheckPoint; + checkPointLoc = ControlFile->checkPoint; /* * Confirm the last checkpoint is available for us to recover - * from if we fail. Note that we don't check for the secondary - * checkpoint since that isn't available in most base backups. + * from if we fail. */ record = ReadCheckpointRecord(xlogreader, checkPointLoc, 1, false); if (record != NULL) @@ -7630,13 +7814,14 @@ StartupXLOG(void) * restored from the archive to begin with, it's expected to have a * .done file). */ - if (EndOfLog % XLOG_SEG_SIZE != 0 && XLogArchivingActive()) + if (XLogSegmentOffset(EndOfLog, wal_segment_size) != 0 && + XLogArchivingActive()) { char origfname[MAXFNAMELEN]; XLogSegNo endLogSegNo; - XLByteToPrevSeg(EndOfLog, endLogSegNo); - XLogFileName(origfname, EndOfLogTLI, endLogSegNo); + XLByteToPrevSeg(EndOfLog, endLogSegNo, wal_segment_size); + XLogFileName(origfname, EndOfLogTLI, endLogSegNo, wal_segment_size); if (!XLogArchiveIsReadyOrDone(origfname)) { @@ -7644,7 +7829,7 @@ StartupXLOG(void) char partialfname[MAXFNAMELEN]; char partialpath[MAXPGPATH]; - XLogFilePath(origpath, EndOfLogTLI, endLogSegNo); + XLogFilePath(origpath, EndOfLogTLI, endLogSegNo, wal_segment_size); snprintf(partialfname, MAXFNAMELEN, "%s.partial", origfname); snprintf(partialpath, MAXPGPATH, "%s.partial", origpath); @@ -7787,6 +7972,8 @@ CheckRecoveryConsistency(void) if (XLogRecPtrIsInvalid(minRecoveryPoint)) return; + Assert(InArchiveRecovery); + /* * assume that we are called in the startup process, and hence don't need * a lock to read lastReplayedEndRecPtr @@ -7824,7 +8011,7 @@ CheckRecoveryConsistency(void) /* * Have we passed our safe starting point? Note that minRecoveryPoint is * known to be incorrectly set if ControlFile->backupEndRequired, until - * the XLOG_BACKUP_RECORD arrives to advise us of the correct + * the XLOG_BACKUP_END arrives to advise us of the correct * minRecoveryPoint. All we know prior to that is that we're not * consistent yet. */ @@ -8014,7 +8201,7 @@ LocalSetXLogInsertAllowed(void) * Subroutine to try to fetch and validate a prior checkpoint record. * * whichChkpt identifies the checkpoint (merely for reporting purposes). - * 1 for "primary", 2 for "secondary", 0 for "other" (backup_label) + * 1 for "primary", 0 for "other" (backup_label) */ static XLogRecord * ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, @@ -8034,10 +8221,6 @@ ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, ereport(LOG, (errmsg("invalid primary checkpoint link in control file"))); break; - case 2: - ereport(LOG, - (errmsg("invalid secondary checkpoint link in control file"))); - break; default: ereport(LOG, (errmsg("invalid checkpoint link in backup_label file"))); @@ -8059,10 +8242,6 @@ ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, ereport(LOG, (errmsg("invalid primary checkpoint record"))); break; - case 2: - ereport(LOG, - (errmsg("invalid secondary checkpoint record"))); - break; default: ereport(LOG, (errmsg("invalid checkpoint record"))); @@ -8078,10 +8257,6 @@ ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, ereport(LOG, (errmsg("invalid resource manager ID in primary checkpoint record"))); break; - case 2: - ereport(LOG, - (errmsg("invalid resource manager ID in secondary checkpoint record"))); - break; default: ereport(LOG, (errmsg("invalid resource manager ID in checkpoint record"))); @@ -8099,10 +8274,6 @@ ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, ereport(LOG, (errmsg("invalid xl_info in primary checkpoint record"))); break; - case 2: - ereport(LOG, - (errmsg("invalid xl_info in secondary checkpoint record"))); - break; default: ereport(LOG, (errmsg("invalid xl_info in checkpoint record"))); @@ -8118,10 +8289,6 @@ ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, ereport(LOG, (errmsg("invalid length of primary checkpoint record"))); break; - case 2: - ereport(LOG, - (errmsg("invalid length of secondary checkpoint record"))); - break; default: ereport(LOG, (errmsg("invalid length of checkpoint record"))); @@ -8150,6 +8317,9 @@ InitXLOGAccess(void) ThisTimeLineID = XLogCtl->ThisTimeLineID; Assert(ThisTimeLineID != 0 || IsBootstrapProcessingMode()); + /* set wal_segment_size */ + wal_segment_size = ControlFile->xlog_seg_size; + /* Use GetRedoRecPtr to copy the RedoRecPtr safely */ (void) GetRedoRecPtr(); /* Also update our copy of doPageWrites. */ @@ -8325,6 +8495,15 @@ GetNextXidAndEpoch(TransactionId *xid, uint32 *epoch) void ShutdownXLOG(int code, Datum arg) { + /* + * We should have an aux process resource owner to use, and we should not + * be in a transaction that's installed some other resowner. + */ + Assert(AuxProcessResourceOwner != NULL); + Assert(CurrentResourceOwner == NULL || + CurrentResourceOwner == AuxProcessResourceOwner); + CurrentResourceOwner = AuxProcessResourceOwner; + /* Don't be chatty in standalone mode */ ereport(IsPostmasterEnvironment ? LOG : NOTICE, (errmsg("shutting down"))); @@ -8480,7 +8659,7 @@ UpdateCheckPointDistanceEstimate(uint64 nbytes) * more. * * When checkpoints are triggered by max_wal_size, this should converge to - * CheckpointSegments * XLOG_SEG_SIZE, + * CheckpointSegments * wal_segment_size, * * Note: This doesn't pay any attention to what caused the checkpoint. * Checkpoints triggered manually with CHECKPOINT command, or by e.g. @@ -8535,6 +8714,7 @@ CreateCheckPoint(int flags) bool shutdown; CheckPoint checkPoint; XLogRecPtr recptr; + XLogSegNo _logSegNo; XLogCtlInsert *Insert = &XLogCtl->Insert; uint32 freespace; XLogRecPtr PriorRedoPtr; @@ -8646,7 +8826,7 @@ CreateCheckPoint(int flags) LWLockRelease(CheckpointLock); END_CRIT_SECTION(); ereport(DEBUG1, - (errmsg("checkpoint skipped due to an idle system"))); + (errmsg("checkpoint skipped because system is idle"))); return; } } @@ -8679,7 +8859,7 @@ CreateCheckPoint(int flags) freespace = INSERT_FREESPACE(curInsert); if (freespace == 0) { - if (curInsert % XLogSegSize == 0) + if (XLogSegmentOffset(curInsert, wal_segment_size) == 0) curInsert += SizeOfXLogLongPHD; else curInsert += SizeOfXLogShortPHD; @@ -8854,8 +9034,8 @@ CreateCheckPoint(int flags) (errmsg("concurrent write-ahead log activity while database system is shutting down"))); /* - * Remember the prior checkpoint's redo pointer, used later to determine - * the point where the log can be truncated. + * Remember the prior checkpoint's redo ptr for + * UpdateCheckPointDistanceEstimate() */ PriorRedoPtr = ControlFile->checkPointCopy.redo; @@ -8865,7 +9045,6 @@ CreateCheckPoint(int flags) LWLockAcquire(ControlFileLock, LW_EXCLUSIVE); if (shutdown) ControlFile->state = DB_SHUTDOWNED; - ControlFile->prevCheckPoint = ControlFile->checkPoint; ControlFile->checkPoint = ProcLastRecPtr; ControlFile->checkPointCopy = checkPoint; ControlFile->time = (pg_time_t) time(NULL); @@ -8903,21 +9082,20 @@ CreateCheckPoint(int flags) smgrpostckpt(); /* - * Delete old log files (those no longer needed even for previous - * checkpoint or the standbys in XLOG streaming). + * Update the average distance between checkpoints if the prior checkpoint + * exists. */ if (PriorRedoPtr != InvalidXLogRecPtr) - { - XLogSegNo _logSegNo; - - /* Update the average distance between checkpoints. */ UpdateCheckPointDistanceEstimate(RedoRecPtr - PriorRedoPtr); - XLByteToSeg(PriorRedoPtr, _logSegNo); - KeepLogSeg(recptr, &_logSegNo); - _logSegNo--; - RemoveOldXlogFiles(_logSegNo, PriorRedoPtr, recptr); - } + /* + * Delete old log files, those no longer needed for last checkpoint to + * prevent the disk holding the xlog from growing full. + */ + XLByteToSeg(RedoRecPtr, _logSegNo, wal_segment_size); + KeepLogSeg(recptr, &_logSegNo); + _logSegNo--; + RemoveOldXlogFiles(_logSegNo, RedoRecPtr, recptr); /* * Make more log segments if needed. (Do this after recycling old log @@ -9083,6 +9261,11 @@ CreateRestartPoint(int flags) XLogRecPtr lastCheckPointEndPtr; CheckPoint lastCheckPoint; XLogRecPtr PriorRedoPtr; + XLogRecPtr receivePtr; + XLogRecPtr replayPtr; + TimeLineID replayTLI; + XLogRecPtr endptr; + XLogSegNo _logSegNo; TimestampTz xtime; /* @@ -9179,8 +9362,8 @@ CreateRestartPoint(int flags) CheckPointGuts(lastCheckPoint.redo, flags); /* - * Remember the prior checkpoint's redo pointer, used later to determine - * the point at which we can truncate the log. + * Remember the prior checkpoint's redo ptr for + * UpdateCheckPointDistanceEstimate() */ PriorRedoPtr = ControlFile->checkPointCopy.redo; @@ -9194,7 +9377,6 @@ CreateRestartPoint(int flags) if (ControlFile->state == DB_IN_ARCHIVE_RECOVERY && ControlFile->checkPointCopy.redo < lastCheckPoint.redo) { - ControlFile->prevCheckPoint = ControlFile->checkPoint; ControlFile->checkPoint = lastCheckPointRecPtr; ControlFile->checkPointCopy = lastCheckPoint; ControlFile->time = (pg_time_t) time(NULL); @@ -9226,68 +9408,60 @@ CreateRestartPoint(int flags) LWLockRelease(ControlFileLock); /* - * Delete old log files (those no longer needed even for previous - * checkpoint/restartpoint) to prevent the disk holding the xlog from - * growing full. + * Update the average distance between checkpoints/restartpoints if the + * prior checkpoint exists. */ if (PriorRedoPtr != InvalidXLogRecPtr) - { - XLogRecPtr receivePtr; - XLogRecPtr replayPtr; - TimeLineID replayTLI; - XLogRecPtr endptr; - XLogSegNo _logSegNo; - - /* Update the average distance between checkpoints/restartpoints. */ UpdateCheckPointDistanceEstimate(RedoRecPtr - PriorRedoPtr); - XLByteToSeg(PriorRedoPtr, _logSegNo); - - /* - * Get the current end of xlog replayed or received, whichever is - * later. - */ - receivePtr = GetWalRcvWriteRecPtr(NULL, NULL); - replayPtr = GetXLogReplayRecPtr(&replayTLI); - endptr = (receivePtr < replayPtr) ? replayPtr : receivePtr; + /* + * Delete old log files, those no longer needed for last restartpoint to + * prevent the disk holding the xlog from growing full. + */ + XLByteToSeg(RedoRecPtr, _logSegNo, wal_segment_size); - KeepLogSeg(endptr, &_logSegNo); - _logSegNo--; + /* + * Retreat _logSegNo using the current end of xlog replayed or received, + * whichever is later. + */ + receivePtr = GetWalRcvWriteRecPtr(NULL, NULL); + replayPtr = GetXLogReplayRecPtr(&replayTLI); + endptr = (receivePtr < replayPtr) ? replayPtr : receivePtr; + KeepLogSeg(endptr, &_logSegNo); + _logSegNo--; - /* - * Try to recycle segments on a useful timeline. If we've been - * promoted since the beginning of this restartpoint, use the new - * timeline chosen at end of recovery (RecoveryInProgress() sets - * ThisTimeLineID in that case). If we're still in recovery, use the - * timeline we're currently replaying. - * - * There is no guarantee that the WAL segments will be useful on the - * current timeline; if recovery proceeds to a new timeline right - * after this, the pre-allocated WAL segments on this timeline will - * not be used, and will go wasted until recycled on the next - * restartpoint. We'll live with that. - */ - if (RecoveryInProgress()) - ThisTimeLineID = replayTLI; + /* + * Try to recycle segments on a useful timeline. If we've been promoted + * since the beginning of this restartpoint, use the new timeline chosen + * at end of recovery (RecoveryInProgress() sets ThisTimeLineID in that + * case). If we're still in recovery, use the timeline we're currently + * replaying. + * + * There is no guarantee that the WAL segments will be useful on the + * current timeline; if recovery proceeds to a new timeline right after + * this, the pre-allocated WAL segments on this timeline will not be used, + * and will go wasted until recycled on the next restartpoint. We'll live + * with that. + */ + if (RecoveryInProgress()) + ThisTimeLineID = replayTLI; - RemoveOldXlogFiles(_logSegNo, PriorRedoPtr, endptr); + RemoveOldXlogFiles(_logSegNo, RedoRecPtr, endptr); - /* - * Make more log segments if needed. (Do this after recycling old log - * segments, since that may supply some of the needed files.) - */ - PreallocXlogFiles(endptr); + /* + * Make more log segments if needed. (Do this after recycling old log + * segments, since that may supply some of the needed files.) + */ + PreallocXlogFiles(endptr); - /* - * ThisTimeLineID is normally not set when we're still in recovery. - * However, recycling/preallocating segments above needed - * ThisTimeLineID to determine which timeline to install the segments - * on. Reset it now, to restore the normal state of affairs for - * debugging purposes. - */ - if (RecoveryInProgress()) - ThisTimeLineID = 0; - } + /* + * ThisTimeLineID is normally not set when we're still in recovery. + * However, recycling/preallocating segments above needed ThisTimeLineID + * to determine which timeline to install the segments on. Reset it now, + * to restore the normal state of affairs for debugging purposes. + */ + if (RecoveryInProgress()) + ThisTimeLineID = 0; /* * Truncate pg_subtrans if possible. We can throw away all data before @@ -9306,7 +9480,7 @@ CreateRestartPoint(int flags) ereport((log_checkpoints ? LOG : DEBUG2), (errmsg("recovery restart point at %X/%X", (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo), - xtime ? errdetail("last completed transaction was at log time %s", + xtime ? errdetail("Last completed transaction was at log time %s.", timestamptz_to_str(xtime)) : 0)); LWLockRelease(CheckpointLock); @@ -9336,7 +9510,7 @@ KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo) XLogSegNo segno; XLogRecPtr keep; - XLByteToSeg(recptr, segno); + XLByteToSeg(recptr, segno, wal_segment_size); keep = XLogGetReplicationSlotMinimumLSN(); /* compute limit for wal_keep_segments first */ @@ -9354,7 +9528,7 @@ KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo) { XLogSegNo slotSegNo; - XLByteToSeg(keep, slotSegNo); + XLByteToSeg(keep, slotSegNo, wal_segment_size); if (slotSegNo <= 0) segno = 1; @@ -9510,6 +9684,7 @@ void UpdateFullPageWrites(void) { XLogCtlInsert *Insert = &XLogCtl->Insert; + bool recoveryInProgress; /* * Do nothing if full_page_writes has not been changed. @@ -9521,6 +9696,13 @@ UpdateFullPageWrites(void) if (fullPageWrites == Insert->fullPageWrites) return; + /* + * Perform this outside critical section so that the WAL insert + * initialization done by RecoveryInProgress() doesn't trigger an + * assertion failure. + */ + recoveryInProgress = RecoveryInProgress(); + START_CRIT_SECTION(); /* @@ -9541,7 +9723,7 @@ UpdateFullPageWrites(void) * Write an XLOG_FPW_CHANGE record. This allows us to keep track of * full_page_writes during archive recovery, if required. */ - if (XLogStandbyInfoActive() && !RecoveryInProgress()) + if (XLogStandbyInfoActive() && !recoveryInProgress) { XLogBeginInsert(); XLogRegisterData((char *) (&fullPageWrites), sizeof(bool)); @@ -9743,11 +9925,20 @@ xlog_redo(XLogReaderState *record) checkPoint.nextXid)) ShmemVariableCache->nextXid = checkPoint.nextXid; LWLockRelease(XidGenLock); - /* ... but still treat OID counter as exact */ - LWLockAcquire(OidGenLock, LW_EXCLUSIVE); - ShmemVariableCache->nextOid = checkPoint.nextOid; - ShmemVariableCache->oidCount = 0; - LWLockRelease(OidGenLock); + + /* + * We ignore the nextOid counter in an ONLINE checkpoint, preferring + * to track OID assignment through XLOG_NEXTOID records. The nextOid + * counter is from the start of the checkpoint and might well be stale + * compared to later XLOG_NEXTOID records. We could try to take the + * maximum of the nextOid counter and our latest value, but since + * there's no particular guarantee about the speed with which the OID + * counter wraps around, that's a risky thing to do. In any case, + * users of the nextOid counter are required to avoid assignment of + * duplicates, so that a somewhat out-of-date value should be safe. + */ + + /* Handle multixact */ MultiXactAdvanceNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset); @@ -9885,11 +10076,16 @@ xlog_redo(XLogReaderState *record) * Update minRecoveryPoint to ensure that if recovery is aborted, we * recover back up to this point before allowing hot standby again. * This is important if the max_* settings are decreased, to ensure - * you don't run queries against the WAL preceding the change. + * you don't run queries against the WAL preceding the change. The + * local copies cannot be updated as long as crash recovery is + * happening and we expect all the WAL to be replayed. */ - minRecoveryPoint = ControlFile->minRecoveryPoint; - minRecoveryPointTLI = ControlFile->minRecoveryPointTLI; - if (minRecoveryPoint != 0 && minRecoveryPoint < lsn) + if (InArchiveRecovery) + { + minRecoveryPoint = ControlFile->minRecoveryPoint; + minRecoveryPointTLI = ControlFile->minRecoveryPointTLI; + } + if (minRecoveryPoint != InvalidXLogRecPtr && minRecoveryPoint < lsn) { ControlFile->minRecoveryPoint = lsn; ControlFile->minRecoveryPointTLI = ThisTimeLineID; @@ -10073,7 +10269,7 @@ assign_xlog_sync_method(int new_sync_method, void *extra) if (pg_fsync(openLogFile) != 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not fsync log segment %s: %m", + errmsg("could not fsync file \"%s\": %m", XLogFileNameP(ThisTimeLineID, openLogSegNo)))); pgstat_report_wait_end(); if (get_sync_bit(sync_method) != get_sync_bit(new_sync_method)) @@ -10092,13 +10288,14 @@ assign_xlog_sync_method(int new_sync_method, void *extra) void issue_xlog_fsync(int fd, XLogSegNo segno) { + pgstat_report_wait_start(WAIT_EVENT_WAL_SYNC); switch (sync_method) { case SYNC_METHOD_FSYNC: if (pg_fsync_no_writethrough(fd) != 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not fsync log file %s: %m", + errmsg("could not fsync file \"%s\": %m", XLogFileNameP(ThisTimeLineID, segno)))); break; #ifdef HAVE_FSYNC_WRITETHROUGH @@ -10106,7 +10303,7 @@ issue_xlog_fsync(int fd, XLogSegNo segno) if (pg_fsync_writethrough(fd) != 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not fsync write-through log file %s: %m", + errmsg("could not fsync write-through file \"%s\": %m", XLogFileNameP(ThisTimeLineID, segno)))); break; #endif @@ -10115,7 +10312,7 @@ issue_xlog_fsync(int fd, XLogSegNo segno) if (pg_fdatasync(fd) != 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not fdatasync log file %s: %m", + errmsg("could not fdatasync file \"%s\": %m", XLogFileNameP(ThisTimeLineID, segno)))); break; #endif @@ -10127,6 +10324,7 @@ issue_xlog_fsync(int fd, XLogSegNo segno) elog(PANIC, "unrecognized wal_sync_method: %d", sync_method); break; } + pgstat_report_wait_end(); } /* @@ -10137,7 +10335,7 @@ XLogFileNameP(TimeLineID tli, XLogSegNo segno) { char *result = palloc(MAXFNAMELEN); - XLogFileName(result, tli, segno); + XLogFileName(result, tli, segno, wal_segment_size); return result; } @@ -10181,7 +10379,7 @@ XLogFileNameP(TimeLineID tli, XLogSegNo segno) */ XLogRecPtr do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, - StringInfo labelfile, DIR *tblspcdir, List **tablespaces, + StringInfo labelfile, List **tablespaces, StringInfo tblspcmapfile, bool infotbssize, bool needtblspcmapfile) { @@ -10272,6 +10470,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, PG_ENSURE_ERROR_CLEANUP(pg_start_backup_callback, (Datum) BoolGetDatum(exclusive)); { bool gotUniqueStartpoint = false; + DIR *tblspcdir; struct dirent *de; tablespaceinfo *ti; int datadirpathlen; @@ -10391,8 +10590,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, WALInsertLockRelease(); } while (!gotUniqueStartpoint); - XLByteToSeg(startpoint, _logSegNo); - XLogFileName(xlogfilename, starttli, _logSegNo); + XLByteToSeg(startpoint, _logSegNo, wal_segment_size); + XLogFileName(xlogfilename, starttli, _logSegNo, wal_segment_size); /* * Construct tablespace_map file @@ -10403,6 +10602,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, datadirpathlen = strlen(DataDir); /* Collect information about all tablespaces */ + tblspcdir = AllocateDir("pg_tblspc"); while ((de = ReadDir(tblspcdir, "pg_tblspc")) != NULL) { char fullpath[MAXPGPATH + 10]; @@ -10451,7 +10651,6 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, appendStringInfoChar(&buflinkpath, *s++); } - /* * Relpath holds the relative path of the tablespace directory * when it's located within PGDATA, or NULL if it's located @@ -10486,6 +10685,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, errmsg("tablespaces are not supported on this platform"))); #endif } + FreeDir(tblspcdir); /* * Construct backup label file @@ -10508,6 +10708,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, backup_started_in_recovery ? "standby" : "master"); appendStringInfo(labelfile, "START TIME: %s\n", strfbuf); appendStringInfo(labelfile, "LABEL: %s\n", backupidstr); + appendStringInfo(labelfile, "START TIMELINE: %u\n", starttli); /* * Okay, write the file, or return its contents to caller. @@ -10601,13 +10802,19 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, /* * Mark that start phase has correctly finished for an exclusive backup. * Session-level locks are updated as well to reflect that state. + * + * Note that CHECK_FOR_INTERRUPTS() must not occur while updating backup + * counters and session-level lock. Otherwise they can be updated + * inconsistently, and which might cause do_pg_abort_backup() to fail. */ if (exclusive) { WALInsertLockAcquireExclusive(); XLogCtl->Insert.exclusiveBackupState = EXCLUSIVE_BACKUP_IN_PROGRESS; - WALInsertLockRelease(); + + /* Set session-level lock */ sessionBackupState = SESSION_BACKUP_EXCLUSIVE; + WALInsertLockRelease(); } else sessionBackupState = SESSION_BACKUP_NON_EXCLUSIVE; @@ -10811,7 +11018,11 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) } /* - * OK to update backup counters and forcePageWrites + * OK to update backup counters, forcePageWrites and session-level lock. + * + * Note that CHECK_FOR_INTERRUPTS() must not occur while updating them. + * Otherwise they can be updated inconsistently, and which might cause + * do_pg_abort_backup() to fail. */ WALInsertLockAcquireExclusive(); if (exclusive) @@ -10835,11 +11046,20 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) { XLogCtl->Insert.forcePageWrites = false; } - WALInsertLockRelease(); - /* Clean up session-level lock */ + /* + * Clean up session-level lock. + * + * You might think that WALInsertLockRelease() can be called before + * cleaning up session-level lock because session-level lock doesn't need + * to be protected with WAL insertion lock. But since + * CHECK_FOR_INTERRUPTS() can occur in it, session-level lock must be + * cleaned up before it. + */ sessionBackupState = SESSION_BACKUP_NONE; + WALInsertLockRelease(); + /* * Read and parse the START WAL LOCATION line (this code is pretty crude, * but we are not expecting any variability in the file format). @@ -10943,8 +11163,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) */ RequestXLogSwitch(false); - XLByteToPrevSeg(stoppoint, _logSegNo); - XLogFileName(stopxlogfilename, stoptli, _logSegNo); + XLByteToPrevSeg(stoppoint, _logSegNo, wal_segment_size); + XLogFileName(stopxlogfilename, stoptli, _logSegNo, wal_segment_size); /* Use the log timezone here, not the session timezone */ stamp_time = (pg_time_t) time(NULL); @@ -10955,9 +11175,9 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) /* * Write the backup history file */ - XLByteToSeg(startpoint, _logSegNo); + XLByteToSeg(startpoint, _logSegNo, wal_segment_size); BackupHistoryFilePath(histfilepath, stoptli, _logSegNo, - (uint32) (startpoint % XLogSegSize)); + startpoint, wal_segment_size); fp = AllocateFile(histfilepath, "w"); if (!fp) ereport(ERROR, @@ -10968,9 +11188,14 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) (uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename); fprintf(fp, "STOP WAL LOCATION: %X/%X (file %s)\n", (uint32) (stoppoint >> 32), (uint32) stoppoint, stopxlogfilename); - /* transfer remaining lines from label to history file */ + + /* + * Transfer remaining lines including label and start timeline to + * history file. + */ fprintf(fp, "%s", remaining); fprintf(fp, "STOP TIME: %s\n", strfbuf); + fprintf(fp, "STOP TIMELINE: %u\n", stoptli); if (fflush(fp) || ferror(fp) || FreeFile(fp)) ereport(ERROR, (errcode_for_file_access(), @@ -11011,12 +11236,12 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) ((!backup_started_in_recovery && XLogArchivingActive()) || (backup_started_in_recovery && XLogArchivingAlways()))) { - XLByteToPrevSeg(stoppoint, _logSegNo); - XLogFileName(lastxlogfilename, stoptli, _logSegNo); + XLByteToPrevSeg(stoppoint, _logSegNo, wal_segment_size); + XLogFileName(lastxlogfilename, stoptli, _logSegNo, wal_segment_size); - XLByteToSeg(startpoint, _logSegNo); + XLByteToSeg(startpoint, _logSegNo, wal_segment_size); BackupHistoryFileName(histfilename, stoptli, _logSegNo, - (uint32) (startpoint % XLogSegSize)); + startpoint, wal_segment_size); seconds_before_warning = 60; waits = 0; @@ -11077,8 +11302,16 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) void do_pg_abort_backup(void) { + /* + * Quick exit if session is not keeping around a non-exclusive backup + * already started. + */ + if (sessionBackupState == SESSION_BACKUP_NONE) + return; + WALInsertLockAcquireExclusive(); Assert(XLogCtl->Insert.nonExclusiveBackups > 0); + Assert(sessionBackupState == SESSION_BACKUP_NON_EXCLUSIVE); XLogCtl->Insert.nonExclusiveBackups--; if (XLogCtl->Insert.exclusiveBackupState == EXCLUSIVE_BACKUP_NONE && @@ -11162,22 +11395,25 @@ GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli) * later than the start of the dump, and so if we rely on it as the start * point, we will fail to restore a consistent database state. * - * Returns TRUE if a backup_label was found (and fills the checkpoint + * Returns true if a backup_label was found (and fills the checkpoint * location and its REDO location into *checkPointLoc and RedoStartLSN, - * respectively); returns FALSE if not. If this backup_label came from a - * streamed backup, *backupEndRequired is set to TRUE. If this backup_label - * was created during recovery, *backupFromStandby is set to TRUE. + * respectively); returns false if not. If this backup_label came from a + * streamed backup, *backupEndRequired is set to true. If this backup_label + * was created during recovery, *backupFromStandby is set to true. */ static bool read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired, bool *backupFromStandby) { char startxlogfilename[MAXFNAMELEN]; - TimeLineID tli; + TimeLineID tli_from_walseg, + tli_from_file; FILE *lfp; char ch; char backuptype[20]; char backupfrom[20]; + char backuplabel[MAXPGPATH]; + char backuptime[128]; uint32 hi, lo; @@ -11204,7 +11440,7 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired, * format). */ if (fscanf(lfp, "START WAL LOCATION: %X/%X (file %08X%16s)%c", - &hi, &lo, &tli, startxlogfilename, &ch) != 5 || ch != '\n') + &hi, &lo, &tli_from_walseg, startxlogfilename, &ch) != 5 || ch != '\n') ereport(FATAL, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE))); @@ -11233,6 +11469,43 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired, *backupFromStandby = true; } + /* + * Parse START TIME and LABEL. Those are not mandatory fields for recovery + * but checking for their presence is useful for debugging and the next + * sanity checks. Cope also with the fact that the result buffers have a + * pre-allocated size, hence if the backup_label file has been generated + * with strings longer than the maximum assumed here an incorrect parsing + * happens. That's fine as only minor consistency checks are done + * afterwards. + */ + if (fscanf(lfp, "START TIME: %127[^\n]\n", backuptime) == 1) + ereport(DEBUG1, + (errmsg("backup time %s in file \"%s\"", + backuptime, BACKUP_LABEL_FILE))); + + if (fscanf(lfp, "LABEL: %1023[^\n]\n", backuplabel) == 1) + ereport(DEBUG1, + (errmsg("backup label %s in file \"%s\"", + backuplabel, BACKUP_LABEL_FILE))); + + /* + * START TIMELINE is new as of 11. Its parsing is not mandatory, still use + * it as a sanity check if present. + */ + if (fscanf(lfp, "START TIMELINE: %u\n", &tli_from_file) == 1) + { + if (tli_from_walseg != tli_from_file) + ereport(FATAL, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE), + errdetail("Timeline ID parsed is %u, but expected %u", + tli_from_file, tli_from_walseg))); + + ereport(DEBUG1, + (errmsg("backup timeline %u in file \"%s\"", + tli_from_file, BACKUP_LABEL_FILE))); + } + if (ferror(lfp) || FreeFile(lfp)) ereport(FATAL, (errcode_for_file_access(), @@ -11249,8 +11522,8 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired, * recovering from a backup dump file, and we therefore need to create symlinks * as per the information present in tablespace_map file. * - * Returns TRUE if a tablespace_map file was found (and fills the link - * information for all the tablespace links present in file); returns FALSE + * Returns true if a tablespace_map file was found (and fills the link + * information for all the tablespace links present in file); returns false * if not. */ static bool @@ -11458,15 +11731,17 @@ XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, int emode = private->emode; uint32 targetPageOff; XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY; + int r; - XLByteToSeg(targetPagePtr, targetSegNo); - targetPageOff = targetPagePtr % XLogSegSize; + XLByteToSeg(targetPagePtr, targetSegNo, wal_segment_size); + targetPageOff = XLogSegmentOffset(targetPagePtr, wal_segment_size); /* * See if we need to switch to a new segment because the requested record * is not in the currently open one. */ - if (readFile >= 0 && !XLByteInSeg(targetPagePtr, readSegNo)) + if (readFile >= 0 && + !XLByteInSeg(targetPagePtr, readSegNo, wal_segment_size)) { /* * Request a restartpoint if we've replayed too much xlog since the @@ -11487,7 +11762,7 @@ XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, readSource = 0; } - XLByteToSeg(targetPagePtr, readSegNo); + XLByteToSeg(targetPagePtr, readSegNo, wal_segment_size); retry: /* See if we need to retrieve more data */ @@ -11527,36 +11802,37 @@ XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, if (((targetPagePtr) / XLOG_BLCKSZ) != (receivedUpto / XLOG_BLCKSZ)) readLen = XLOG_BLCKSZ; else - readLen = receivedUpto % XLogSegSize - targetPageOff; + readLen = XLogSegmentOffset(receivedUpto, wal_segment_size) - + targetPageOff; } else readLen = XLOG_BLCKSZ; /* Read the requested page */ readOff = targetPageOff; - if (lseek(readFile, (off_t) readOff, SEEK_SET) < 0) - { - char fname[MAXFNAMELEN]; - - XLogFileName(fname, curFileTLI, readSegNo); - ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen), - (errcode_for_file_access(), - errmsg("could not seek in log segment %s to offset %u: %m", - fname, readOff))); - goto next_record_is_invalid; - } pgstat_report_wait_start(WAIT_EVENT_WAL_READ); - if (read(readFile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ) + r = pg_pread(readFile, readBuf, XLOG_BLCKSZ, (off_t) readOff); + if (r != XLOG_BLCKSZ) { char fname[MAXFNAMELEN]; + int save_errno = errno; pgstat_report_wait_end(); - XLogFileName(fname, curFileTLI, readSegNo); - ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen), - (errcode_for_file_access(), - errmsg("could not read from log segment %s, offset %u: %m", - fname, readOff))); + XLogFileName(fname, curFileTLI, readSegNo, wal_segment_size); + if (r < 0) + { + errno = save_errno; + ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen), + (errcode_for_file_access(), + errmsg("could not read from log segment %s, offset %u: %m", + fname, readOff))); + } + else + ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen), + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("could not read from log segment %s, offset %u: read %d of %zu", + fname, readOff, r, (Size) XLOG_BLCKSZ))); goto next_record_is_invalid; } pgstat_report_wait_end(); @@ -11566,6 +11842,40 @@ XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen, Assert(reqLen <= readLen); *readTLI = curFileTLI; + + /* + * Check the page header immediately, so that we can retry immediately if + * it's not valid. This may seem unnecessary, because XLogReadRecord() + * validates the page header anyway, and would propagate the failure up to + * ReadRecord(), which would retry. However, there's a corner case with + * continuation records, if a record is split across two pages such that + * we would need to read the two pages from different sources. For + * example, imagine a scenario where a streaming replica is started up, + * and replay reaches a record that's split across two WAL segments. The + * first page is only available locally, in pg_wal, because it's already + * been recycled in the master. The second page, however, is not present + * in pg_wal, and we should stream it from the master. There is a recycled + * WAL segment present in pg_wal, with garbage contents, however. We would + * read the first page from the local WAL segment, but when reading the + * second page, we would read the bogus, recycled, WAL segment. If we + * didn't catch that case here, we would never recover, because + * ReadRecord() would retry reading the whole record from the beginning. + * + * Of course, this only catches errors in the page header, which is what + * happens in the case of a recycled WAL segment. Other kinds of errors or + * corruption still has the same problem. But this at least fixes the + * common case, which can happen as part of normal operation. + * + * Validating the page header is cheap enough that doing it twice + * shouldn't be a big deal from a performance point of view. + */ + if (!XLogReaderValidatePageHeader(xlogreader, targetPagePtr, readBuf)) + { + /* reset any error XLogReaderValidatePageHeader() might have set */ + xlogreader->errormsg_buf[0] = '\0'; + goto next_record_is_invalid; + } + return readLen; next_record_is_invalid: @@ -11682,7 +11992,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, * If primary_conninfo is set, launch walreceiver to try * to stream the missing WAL. * - * If fetching_ckpt is TRUE, RecPtr points to the initial + * If fetching_ckpt is true, RecPtr points to the initial * checkpoint location. In that case, we use RedoStartLSN * as the streaming start position instead of RecPtr, so * that when we later jump backwards to start redo at @@ -11700,12 +12010,18 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, } else { - ptr = tliRecPtr; + ptr = RecPtr; + + /* + * Use the record begin position to determine the + * TLI, rather than the position we're reading. + */ tli = tliOfPointInHistory(tliRecPtr, expectedTLEs); if (curFileTLI > 0 && tli < curFileTLI) elog(ERROR, "according to history file, WAL location %X/%X belongs to timeline %u, but previous recovered WAL file came from timeline %u", - (uint32) (ptr >> 32), (uint32) ptr, + (uint32) (tliRecPtr >> 32), + (uint32) tliRecPtr, tli, curFileTLI); } curFileTLI = tli; diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c index 7afb73579b..d40317168e 100644 --- a/src/backend/access/transam/xlogarchive.c +++ b/src/backend/access/transam/xlogarchive.c @@ -4,7 +4,7 @@ * Functions for archiving WAL files and restoring from the archive. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/access/transam/xlogarchive.c @@ -33,11 +33,11 @@ * Attempt to retrieve the specified file from off-line archival storage. * If successful, fill "path" with its complete path (note that this will be * a temp file name that doesn't follow the normal naming convention), and - * return TRUE. + * return true. * * If not successful, fill "path" with the name of the normal on-line file * (which may or may not actually exist, but we'll try to use it), and return - * FALSE. + * false. * * For fixed-size files, the caller may pass the expected size as an * additional crosscheck on successful recovery. If the file size is not @@ -134,13 +134,14 @@ RestoreArchivedFile(char *path, const char *xlogfname, if (cleanupEnabled) { GetOldestRestartPoint(&restartRedoPtr, &restartTli); - XLByteToSeg(restartRedoPtr, restartSegNo); - XLogFileName(lastRestartPointFname, restartTli, restartSegNo); + XLByteToSeg(restartRedoPtr, restartSegNo, wal_segment_size); + XLogFileName(lastRestartPointFname, restartTli, restartSegNo, + wal_segment_size); /* we shouldn't need anything earlier than last restart point */ Assert(strcmp(lastRestartPointFname, xlogfname) <= 0); } else - XLogFileName(lastRestartPointFname, 0, 0L); + XLogFileName(lastRestartPointFname, 0, 0L, wal_segment_size); /* * construct the command to be executed @@ -326,7 +327,7 @@ RestoreArchivedFile(char *path, const char *xlogfname, * This is currently used for recovery_end_command and archive_cleanup_command. */ void -ExecuteRecoveryCommand(char *command, char *commandName, bool failOnSignal) +ExecuteRecoveryCommand(const char *command, const char *commandName, bool failOnSignal) { char xlogRecoveryCmd[MAXPGPATH]; char lastRestartPointFname[MAXPGPATH]; @@ -347,8 +348,9 @@ ExecuteRecoveryCommand(char *command, char *commandName, bool failOnSignal) * archive, though there is no requirement to do so. */ GetOldestRestartPoint(&restartRedoPtr, &restartTli); - XLByteToSeg(restartRedoPtr, restartSegNo); - XLogFileName(lastRestartPointFname, restartTli, restartSegNo); + XLByteToSeg(restartRedoPtr, restartSegNo, wal_segment_size); + XLogFileName(lastRestartPointFname, restartTli, restartSegNo, + wal_segment_size); /* * construct the command to be executed @@ -420,10 +422,10 @@ ExecuteRecoveryCommand(char *command, char *commandName, bool failOnSignal) /* * A file was restored from the archive under a temporary filename (path), * and now we want to keep it. Rename it under the permanent filename in - * in pg_wal (xlogfname), replacing any existing file with the same name. + * pg_wal (xlogfname), replacing any existing file with the same name. */ void -KeepFileRestoredFromArchive(char *path, char *xlogfname) +KeepFileRestoredFromArchive(const char *path, const char *xlogfname) { char xlogfpath[MAXPGPATH]; bool reload = false; @@ -547,7 +549,7 @@ XLogArchiveNotifySeg(XLogSegNo segno) { char xlog[MAXFNAMELEN]; - XLogFileName(xlog, ThisTimeLineID, segno); + XLogFileName(xlog, ThisTimeLineID, segno, wal_segment_size); XLogArchiveNotify(xlog); } @@ -618,9 +620,16 @@ XLogArchiveCheckDone(const char *xlog) { char archiveStatusPath[MAXPGPATH]; struct stat stat_buf; + bool inRecovery = RecoveryInProgress(); - /* Always deletable if archiving is off */ - if (!XLogArchivingActive()) + /* + * The file is always deletable if archive_mode is "off". On standbys + * archiving is disabled if archive_mode is "on", and enabled with + * "always". On a primary, archiving is enabled if archive_mode is "on" + * or "always". + */ + if (!((XLogArchivingActive() && !inRecovery) || + (XLogArchivingAlways() && inRecovery))) return true; /* First check for .done --- this means archiver is done with it */ diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c index f9b49ba498..a31adcca5e 100644 --- a/src/backend/access/transam/xlogfuncs.c +++ b/src/backend/access/transam/xlogfuncs.c @@ -7,7 +7,7 @@ * This file contains WAL control and information functions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/access/transam/xlogfuncs.c @@ -16,14 +16,16 @@ */ #include "postgres.h" +#include + #include "access/htup_details.h" #include "access/xlog.h" #include "access/xlog_internal.h" #include "access/xlogutils.h" -#include "catalog/catalog.h" #include "catalog/pg_type.h" #include "funcapi.h" #include "miscadmin.h" +#include "pgstat.h" #include "replication/walreceiver.h" #include "storage/smgr.h" #include "utils/builtins.h" @@ -75,7 +77,6 @@ pg_start_backup(PG_FUNCTION_ARGS) bool exclusive = PG_GETARG_BOOL(2); char *backupidstr; XLogRecPtr startpoint; - DIR *dir; SessionBackupState status = get_backup_status(); backupidstr = text_to_cstring(backupid); @@ -85,16 +86,10 @@ pg_start_backup(PG_FUNCTION_ARGS) (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("a backup is already in progress in this session"))); - /* Make sure we can open the directory with tablespaces in it */ - dir = AllocateDir("pg_tblspc"); - if (!dir) - ereport(ERROR, - (errmsg("could not open directory \"%s\": %m", "pg_tblspc"))); - if (exclusive) { startpoint = do_pg_start_backup(backupidstr, fast, NULL, NULL, - dir, NULL, NULL, false, true); + NULL, NULL, false, true); } else { @@ -110,13 +105,11 @@ pg_start_backup(PG_FUNCTION_ARGS) MemoryContextSwitchTo(oldcontext); startpoint = do_pg_start_backup(backupidstr, fast, NULL, label_file, - dir, NULL, tblspc_map_file, false, true); + NULL, tblspc_map_file, false, true); before_shmem_exit(nonexclusive_base_backup_cleanup, (Datum) 0); } - FreeDir(dir); - PG_RETURN_LSN(startpoint); } @@ -489,8 +482,8 @@ pg_walfile_name_offset(PG_FUNCTION_ARGS) /* * xlogfilename */ - XLByteToPrevSeg(locationpoint, xlogsegno); - XLogFileName(xlogfilename, ThisTimeLineID, xlogsegno); + XLByteToPrevSeg(locationpoint, xlogsegno, wal_segment_size); + XLogFileName(xlogfilename, ThisTimeLineID, xlogsegno, wal_segment_size); values[0] = CStringGetTextDatum(xlogfilename); isnull[0] = false; @@ -498,7 +491,7 @@ pg_walfile_name_offset(PG_FUNCTION_ARGS) /* * offset */ - xrecoff = locationpoint % XLogSegSize; + xrecoff = XLogSegmentOffset(locationpoint, wal_segment_size); values[1] = UInt32GetDatum(xrecoff); isnull[1] = false; @@ -530,8 +523,8 @@ pg_walfile_name(PG_FUNCTION_ARGS) errmsg("recovery is in progress"), errhint("pg_walfile_name() cannot be executed during recovery."))); - XLByteToPrevSeg(locationpoint, xlogsegno); - XLogFileName(xlogfilename, ThisTimeLineID, xlogsegno); + XLByteToPrevSeg(locationpoint, xlogsegno, wal_segment_size); + XLogFileName(xlogfilename, ThisTimeLineID, xlogsegno, wal_segment_size); PG_RETURN_TEXT_P(cstring_to_text(xlogfilename)); } @@ -707,3 +700,77 @@ pg_backup_start_time(PG_FUNCTION_ARGS) PG_RETURN_DATUM(xtime); } + +/* + * Promotes a standby server. + * + * A result of "true" means that promotion has been completed if "wait" is + * "true", or initiated if "wait" is false. + */ +Datum +pg_promote(PG_FUNCTION_ARGS) +{ + bool wait = PG_GETARG_BOOL(0); + int wait_seconds = PG_GETARG_INT32(1); + FILE *promote_file; + int i; + + if (!RecoveryInProgress()) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("recovery is not in progress"), + errhint("Recovery control functions can only be executed during recovery."))); + + if (wait_seconds <= 0) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("\"wait_seconds\" cannot be negative or equal zero"))); + + /* create the promote signal file */ + promote_file = AllocateFile(PROMOTE_SIGNAL_FILE, "w"); + if (!promote_file) + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not create file \"%s\": %m", + PROMOTE_SIGNAL_FILE))); + + if (FreeFile(promote_file)) + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not write file \"%s\": %m", + PROMOTE_SIGNAL_FILE))); + + /* signal the postmaster */ + if (kill(PostmasterPid, SIGUSR1) != 0) + { + ereport(WARNING, + (errmsg("failed to send signal to postmaster: %m"))); + (void) unlink(PROMOTE_SIGNAL_FILE); + PG_RETURN_BOOL(false); + } + + /* return immediately if waiting was not requested */ + if (!wait) + PG_RETURN_BOOL(true); + + /* wait for the amount of time wanted until promotion */ +#define WAITS_PER_SECOND 10 + for (i = 0; i < WAITS_PER_SECOND * wait_seconds; i++) + { + ResetLatch(MyLatch); + + if (!RecoveryInProgress()) + PG_RETURN_BOOL(true); + + CHECK_FOR_INTERRUPTS(); + + WaitLatch(MyLatch, + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + 1000L / WAITS_PER_SECOND, + WAIT_EVENT_PROMOTE); + } + + ereport(WARNING, + (errmsg("server did not promote within %d seconds", wait_seconds))); + PG_RETURN_BOOL(false); +} diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index 3af03ecdb1..34d4db4297 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -9,7 +9,7 @@ * of XLogRecData structs by a call to XLogRecordAssemble(). See * access/transam/README for details. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/access/transam/xloginsert.c @@ -584,7 +584,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, if (include_image) { Page page = regbuf->page; - uint16 compressed_len; + uint16 compressed_len = 0; /* * The page needs to be backed up, so calculate its hole length @@ -797,8 +797,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, /* * Create a compressed version of a backup block image. * - * Returns FALSE if compression fails (i.e., compressed result is actually - * bigger than original). Otherwise, returns TRUE and sets 'dlen' to + * Returns false if compression fails (i.e., compressed result is actually + * bigger than original). Otherwise, returns true and sets 'dlen' to * the length of compressed block image. */ static bool @@ -809,12 +809,12 @@ XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length, int32 len; int32 extra_bytes = 0; char *source; - char tmp[BLCKSZ]; + PGAlignedBlock tmp; if (hole_length != 0) { /* must skip the hole */ - source = tmp; + source = tmp.data; memcpy(source, page, hole_offset); memcpy(source + hole_offset, page + (hole_offset + hole_length), @@ -917,7 +917,7 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std) if (lsn <= RedoRecPtr) { int flags; - char copied_buffer[BLCKSZ]; + PGAlignedBlock copied_buffer; char *origdata = (char *) BufferGetBlock(buffer); RelFileNode rnode; ForkNumber forkno; @@ -935,11 +935,11 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std) uint16 lower = ((PageHeader) page)->pd_lower; uint16 upper = ((PageHeader) page)->pd_upper; - memcpy(copied_buffer, origdata, lower); - memcpy(copied_buffer + upper, origdata + upper, BLCKSZ - upper); + memcpy(copied_buffer.data, origdata, lower); + memcpy(copied_buffer.data + upper, origdata + upper, BLCKSZ - upper); } else - memcpy(copied_buffer, origdata, BLCKSZ); + memcpy(copied_buffer.data, origdata, BLCKSZ); XLogBeginInsert(); @@ -948,7 +948,7 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std) flags |= REGBUF_STANDARD; BufferGetTag(buffer, &rnode, &forkno, &blkno); - XLogRegisterBlock(0, &rnode, forkno, blkno, copied_buffer, flags); + XLogRegisterBlock(0, &rnode, forkno, blkno, copied_buffer.data, flags); recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI_FOR_HINT); } @@ -965,7 +965,7 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std) * log_newpage_buffer instead. * * If the page follows the standard page layout, with a PageHeader and unused - * space between pd_lower and pd_upper, set 'page_std' to TRUE. That allows + * space between pd_lower and pd_upper, set 'page_std' to true. That allows * the unused space to be left out from the WAL record, making it smaller. */ XLogRecPtr @@ -1002,7 +1002,7 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, * function. This function will set the page LSN. * * If the page follows the standard page layout, with a PageHeader and unused - * space between pd_lower and pd_upper, set 'page_std' to TRUE. That allows + * space between pd_lower and pd_upper, set 'page_std' to true. That allows * the unused space to be left out from the WAL record, making it smaller. */ XLogRecPtr diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index 0781a7b9de..0768ca7822 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -3,7 +3,7 @@ * xlogreader.c * Generic XLog reading facility * - * Portions Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2013-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/access/transam/xlogreader.c @@ -25,10 +25,12 @@ #include "common/pg_lzcompress.h" #include "replication/origin.h" +#ifndef FRONTEND +#include "utils/memutils.h" +#endif + static bool allocate_recordbuf(XLogReaderState *state, uint32 reclength); -static bool ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, - XLogPageHeader hdr); static bool ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr, XLogRecPtr PrevRecPtr, XLogRecord *record, bool randAccess); static bool ValidXLogRecord(XLogReaderState *state, XLogRecord *record, @@ -64,7 +66,8 @@ report_invalid_record(XLogReaderState *state, const char *fmt,...) * Returns NULL if the xlogreader couldn't be allocated. */ XLogReaderState * -XLogReaderAllocate(XLogPageReadCB pagereadfunc, void *private_data) +XLogReaderAllocate(int wal_segment_size, XLogPageReadCB pagereadfunc, + void *private_data) { XLogReaderState *state; @@ -91,6 +94,7 @@ XLogReaderAllocate(XLogPageReadCB pagereadfunc, void *private_data) return NULL; } + state->wal_segment_size = wal_segment_size; state->read_page = pagereadfunc; /* system_identifier initialized to zeroes above */ state->private_data = private_data; @@ -160,6 +164,25 @@ allocate_recordbuf(XLogReaderState *state, uint32 reclength) newSize += XLOG_BLCKSZ - (newSize % XLOG_BLCKSZ); newSize = Max(newSize, 5 * Max(BLCKSZ, XLOG_BLCKSZ)); +#ifndef FRONTEND + + /* + * Note that in much unlucky circumstances, the random data read from a + * recycled segment can cause this routine to be called with a size + * causing a hard failure at allocation. For a standby, this would cause + * the instance to stop suddenly with a hard failure, preventing it to + * retry fetching WAL from one of its sources which could allow it to move + * on with replay without a manual restart. If the data comes from a past + * recycled segment and is still valid, then the allocation may succeed + * but record checks are going to fail so this would be short-lived. If + * the allocation fails because of a memory shortage, then this is not a + * hard failure either per the guarantee given by MCXT_ALLOC_NO_OOM. + */ + if (!AllocSizeIsValid(newSize)) + return false; + +#endif + if (state->readRecordBuf) pfree(state->readRecordBuf); state->readRecordBuf = @@ -466,8 +489,8 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg) (record->xl_info & ~XLR_INFO_MASK) == XLOG_SWITCH) { /* Pretend it extends to end of segment */ - state->EndRecPtr += XLogSegSize - 1; - state->EndRecPtr -= state->EndRecPtr % XLogSegSize; + state->EndRecPtr += state->wal_segment_size - 1; + state->EndRecPtr -= XLogSegmentOffset(state->EndRecPtr, state->wal_segment_size); } if (DecodeXLogRecord(state, record, errormsg)) @@ -509,8 +532,8 @@ ReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqLen) Assert((pageptr % XLOG_BLCKSZ) == 0); - XLByteToSeg(pageptr, targetSegNo); - targetPageOff = (pageptr % XLogSegSize); + XLByteToSeg(pageptr, targetSegNo, state->wal_segment_size); + targetPageOff = XLogSegmentOffset(pageptr, state->wal_segment_size); /* check whether we have all the requested data already */ if (targetSegNo == state->readSegNo && targetPageOff == state->readOff && @@ -531,7 +554,6 @@ ReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqLen) */ if (targetSegNo != state->readSegNo && targetPageOff != 0) { - XLogPageHeader hdr; XLogRecPtr targetSegmentPtr = pageptr - targetPageOff; readLen = state->read_page(state, targetSegmentPtr, XLOG_BLCKSZ, @@ -543,9 +565,8 @@ ReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqLen) /* we can be sure to have enough WAL available, we scrolled back */ Assert(readLen == XLOG_BLCKSZ); - hdr = (XLogPageHeader) state->readBuf; - - if (!ValidXLogPageHeader(state, targetSegmentPtr, hdr)) + if (!XLogReaderValidatePageHeader(state, targetSegmentPtr, + state->readBuf)) goto err; } @@ -582,7 +603,7 @@ ReadPageInternal(XLogReaderState *state, XLogRecPtr pageptr, int reqLen) /* * Now that we know we have the full header, validate it. */ - if (!ValidXLogPageHeader(state, pageptr, hdr)) + if (!XLogReaderValidatePageHeader(state, pageptr, (char *) hdr)) goto err; /* update read state information */ @@ -707,28 +728,32 @@ ValidXLogRecord(XLogReaderState *state, XLogRecord *record, XLogRecPtr recptr) } /* - * Validate a page header + * Validate a page header. + * + * Check if 'phdr' is valid as the header of the XLog page at position + * 'recptr'. */ -static bool -ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, - XLogPageHeader hdr) +bool +XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr, + char *phdr) { XLogRecPtr recaddr; XLogSegNo segno; int32 offset; + XLogPageHeader hdr = (XLogPageHeader) phdr; Assert((recptr % XLOG_BLCKSZ) == 0); - XLByteToSeg(recptr, segno); - offset = recptr % XLogSegSize; + XLByteToSeg(recptr, segno, state->wal_segment_size); + offset = XLogSegmentOffset(recptr, state->wal_segment_size); - XLogSegNoOffsetToRecPtr(segno, offset, recaddr); + XLogSegNoOffsetToRecPtr(segno, offset, state->wal_segment_size, recaddr); if (hdr->xlp_magic != XLOG_PAGE_MAGIC) { char fname[MAXFNAMELEN]; - XLogFileName(fname, state->readPageTLI, segno); + XLogFileName(fname, state->readPageTLI, segno, state->wal_segment_size); report_invalid_record(state, "invalid magic number %04X in log segment %s, offset %u", @@ -742,7 +767,7 @@ ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, { char fname[MAXFNAMELEN]; - XLogFileName(fname, state->readPageTLI, segno); + XLogFileName(fname, state->readPageTLI, segno, state->wal_segment_size); report_invalid_record(state, "invalid info bits %04X in log segment %s, offset %u", @@ -775,10 +800,10 @@ ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, fhdrident_str, sysident_str); return false; } - else if (longhdr->xlp_seg_size != XLogSegSize) + else if (longhdr->xlp_seg_size != state->wal_segment_size) { report_invalid_record(state, - "WAL file is from different database system: incorrect XLOG_SEG_SIZE in page header"); + "WAL file is from different database system: incorrect segment size in page header"); return false; } else if (longhdr->xlp_xlog_blcksz != XLOG_BLCKSZ) @@ -792,7 +817,7 @@ ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, { char fname[MAXFNAMELEN]; - XLogFileName(fname, state->readPageTLI, segno); + XLogFileName(fname, state->readPageTLI, segno, state->wal_segment_size); /* hmm, first page of file doesn't have a long header? */ report_invalid_record(state, @@ -803,11 +828,16 @@ ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, return false; } + /* + * Check that the address on the page agrees with what we expected. This + * check typically fails when an old WAL segment is recycled, and hasn't + * yet been overwritten with new data yet. + */ if (hdr->xlp_pageaddr != recaddr) { char fname[MAXFNAMELEN]; - XLogFileName(fname, state->readPageTLI, segno); + XLogFileName(fname, state->readPageTLI, segno, state->wal_segment_size); report_invalid_record(state, "unexpected pageaddr %X/%X in log segment %s, offset %u", @@ -832,7 +862,7 @@ ValidXLogPageHeader(XLogReaderState *state, XLogRecPtr recptr, { char fname[MAXFNAMELEN]; - XLogFileName(fname, state->readPageTLI, segno); + XLogFileName(fname, state->readPageTLI, segno, state->wal_segment_size); report_invalid_record(state, "out-of-sequence timeline ID %u (after %u) in log segment %s, offset %u", @@ -1262,7 +1292,13 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg) { if (blk->data) pfree(blk->data); - blk->data_bufsz = blk->data_len; + + /* + * Force the initial request to be BLCKSZ so that we don't + * waste time with lots of trips through this stanza as a + * result of WAL compression. + */ + blk->data_bufsz = MAXALIGN(Max(blk->data_len, BLCKSZ)); blk->data = palloc(blk->data_bufsz); } memcpy(blk->data, ptr, blk->data_len); @@ -1277,7 +1313,22 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg) { if (state->main_data) pfree(state->main_data); - state->main_data_bufsz = state->main_data_len; + + /* + * main_data_bufsz must be MAXALIGN'ed. In many xlog record + * types, we omit trailing struct padding on-disk to save a few + * bytes; but compilers may generate accesses to the xlog struct + * that assume that padding bytes are present. If the palloc + * request is not large enough to include such padding bytes then + * we'll get valgrind complaints due to otherwise-harmless fetches + * of the padding bytes. + * + * In addition, force the initial request to be reasonably large + * so that we don't waste time with lots of trips through this + * stanza. BLCKSZ / 2 seems like a good compromise choice. + */ + state->main_data_bufsz = MAXALIGN(Max(state->main_data_len, + BLCKSZ / 2)); state->main_data = palloc(state->main_data_bufsz); } memcpy(state->main_data, ptr, state->main_data_len); @@ -1300,8 +1351,8 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg) * Returns information about the block that a block reference refers to. * * If the WAL record contains a block reference with the given ID, *rnode, - * *forknum, and *blknum are filled in (if not NULL), and returns TRUE. - * Otherwise returns FALSE. + * *forknum, and *blknum are filled in (if not NULL), and returns true. + * Otherwise returns false. */ bool XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, @@ -1361,7 +1412,7 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page) { DecodedBkpBlock *bkpb; char *ptr; - char tmp[BLCKSZ]; + PGAlignedBlock tmp; if (!record->blocks[block_id].in_use) return false; @@ -1374,7 +1425,7 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page) if (bkpb->bimg_info & BKPIMAGE_IS_COMPRESSED) { /* If a backup block image is compressed, decompress it */ - if (pglz_decompress(ptr, bkpb->bimg_len, tmp, + if (pglz_decompress(ptr, bkpb->bimg_len, tmp.data, BLCKSZ - bkpb->hole_length) < 0) { report_invalid_record(record, "invalid compressed image at %X/%X, block %d", @@ -1383,7 +1434,7 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page) block_id); return false; } - ptr = tmp; + ptr = tmp.data; } /* generate page, taking into account hole if necessary */ diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index bbae733d65..4ecdc9220f 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -8,7 +8,7 @@ * None of this code is used during normal system operation. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/access/transam/xlogutils.c @@ -23,7 +23,6 @@ #include "access/xlog.h" #include "access/xlog_internal.h" #include "access/xlogutils.h" -#include "catalog/catalog.h" #include "miscadmin.h" #include "pgstat.h" #include "storage/smgr.h" @@ -654,7 +653,8 @@ XLogTruncateRelation(RelFileNode rnode, ForkNumber forkNum, * frontend). Probably these should be merged at some point. */ static void -XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) +XLogRead(char *buf, int segsize, TimeLineID tli, XLogRecPtr startptr, + Size count) { char *p; XLogRecPtr recptr; @@ -666,6 +666,8 @@ XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) static TimeLineID sendTLI = 0; static uint32 sendOff = 0; + Assert(segsize == wal_segment_size); + p = buf; recptr = startptr; nbytes = count; @@ -676,10 +678,10 @@ XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) int segbytes; int readbytes; - startoff = recptr % XLogSegSize; + startoff = XLogSegmentOffset(recptr, segsize); /* Do we need to switch to a different xlog segment? */ - if (sendFile < 0 || !XLByteInSeg(recptr, sendSegNo) || + if (sendFile < 0 || !XLByteInSeg(recptr, sendSegNo, segsize) || sendTLI != tli) { char path[MAXPGPATH]; @@ -687,11 +689,11 @@ XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) if (sendFile >= 0) close(sendFile); - XLByteToSeg(recptr, sendSegNo); + XLByteToSeg(recptr, sendSegNo, segsize); - XLogFilePath(path, tli, sendSegNo); + XLogFilePath(path, tli, sendSegNo, segsize); - sendFile = BasicOpenFile(path, O_RDONLY | PG_BINARY, 0); + sendFile = BasicOpenFile(path, O_RDONLY | PG_BINARY); if (sendFile < 0) { @@ -716,9 +718,10 @@ XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) if (lseek(sendFile, (off_t) startoff, SEEK_SET) < 0) { char path[MAXPGPATH]; + int save_errno = errno; - XLogFilePath(path, tli, sendSegNo); - + XLogFilePath(path, tli, sendSegNo, segsize); + errno = save_errno; ereport(ERROR, (errcode_for_file_access(), errmsg("could not seek in log segment %s to offset %u: %m", @@ -728,8 +731,8 @@ XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) } /* How many bytes are within this segment? */ - if (nbytes > (XLogSegSize - startoff)) - segbytes = XLogSegSize - startoff; + if (nbytes > (segsize - startoff)) + segbytes = segsize - startoff; else segbytes = nbytes; @@ -739,9 +742,10 @@ XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) if (readbytes <= 0) { char path[MAXPGPATH]; + int save_errno = errno; - XLogFilePath(path, tli, sendSegNo); - + XLogFilePath(path, tli, sendSegNo, segsize); + errno = save_errno; ereport(ERROR, (errcode_for_file_access(), errmsg("could not read from log segment %s, offset %u, length %lu: %m", @@ -798,7 +802,8 @@ XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count) void XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength) { - const XLogRecPtr lastReadPage = state->readSegNo * XLogSegSize + state->readOff; + const XLogRecPtr lastReadPage = state->readSegNo * + state->wal_segment_size + state->readOff; Assert(wantPage != InvalidXLogRecPtr && wantPage % XLOG_BLCKSZ == 0); Assert(wantLength <= XLOG_BLCKSZ); @@ -842,7 +847,8 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa if (state->currTLIValidUntil != InvalidXLogRecPtr && state->currTLI != ThisTimeLineID && state->currTLI != 0 && - (wantPage + wantLength) / XLogSegSize < state->currTLIValidUntil / XLogSegSize) + ((wantPage + wantLength) / state->wal_segment_size) < + (state->currTLIValidUntil / state->wal_segment_size)) return; /* @@ -864,9 +870,11 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wa */ List *timelineHistory = readTimeLineHistory(ThisTimeLineID); - XLogRecPtr endOfSegment = (((wantPage / XLogSegSize) + 1) * XLogSegSize) - 1; + XLogRecPtr endOfSegment = (((wantPage / state->wal_segment_size) + 1) + * state->wal_segment_size) - 1; - Assert(wantPage / XLogSegSize == endOfSegment / XLogSegSize); + Assert(wantPage / state->wal_segment_size == + endOfSegment / state->wal_segment_size); /* * Find the timeline of the last LSN on the segment containing @@ -1014,7 +1022,8 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, * as 'count', read the whole page anyway. It's guaranteed to be * zero-padded up to the page boundary if it's incomplete. */ - XLogRead(cur_page, *pageTLI, targetPagePtr, XLOG_BLCKSZ); + XLogRead(cur_page, state->wal_segment_size, *pageTLI, targetPagePtr, + XLOG_BLCKSZ); /* number of valid bytes in the buffer */ return count; diff --git a/src/backend/bootstrap/bootparse.y b/src/backend/bootstrap/bootparse.y index 2e1fef0350..4c72989cc2 100644 --- a/src/backend/bootstrap/bootparse.y +++ b/src/backend/bootstrap/bootparse.y @@ -4,7 +4,7 @@ * bootparse.y * yacc grammar for the "bootstrap" mode (BKI file format) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -105,6 +105,7 @@ static int num_columns_read = 0; List *list; IndexElem *ielem; char *str; + const char *kw; int ival; Oid oidval; } @@ -116,17 +117,17 @@ static int num_columns_read = 0; %type oidspec optoideq optrowtypeoid %token ID -%token OPEN XCLOSE XCREATE INSERT_TUPLE -%token XDECLARE INDEX ON USING XBUILD INDICES UNIQUE XTOAST %token COMMA EQUALS LPAREN RPAREN -%token OBJ_ID XBOOTSTRAP XSHARED_RELATION XWITHOUT_OIDS XROWTYPE_OID NULLVAL -%token XFORCE XNOT XNULL +/* NULLVAL is a reserved keyword */ +%token NULLVAL +/* All the rest are unreserved, and should be handled in boot_ident! */ +%token OPEN XCLOSE XCREATE INSERT_TUPLE +%token XDECLARE INDEX ON USING XBUILD INDICES UNIQUE XTOAST +%token OBJ_ID XBOOTSTRAP XSHARED_RELATION XWITHOUT_OIDS XROWTYPE_OID +%token XFORCE XNOT XNULL %start TopLevel -%nonassoc low -%nonassoc high - %% TopLevel: @@ -160,18 +161,12 @@ Boot_OpenStmt: ; Boot_CloseStmt: - XCLOSE boot_ident %prec low + XCLOSE boot_ident { do_start(); closerel($2); do_end(); } - | XCLOSE %prec high - { - do_start(); - closerel(NULL); - do_end(); - } ; Boot_CreateStmt: @@ -257,6 +252,7 @@ Boot_CreateStmt: false, true, false, + InvalidOid, NULL); elog(DEBUG4, "relation created with OID %u", id); } @@ -292,6 +288,8 @@ Boot_DeclareIndexStmt: IndexStmt *stmt = makeNode(IndexStmt); Oid relationId; + elog(DEBUG4, "creating index \"%s\"", $3); + do_start(); stmt->idxname = $3; @@ -299,6 +297,7 @@ Boot_DeclareIndexStmt: stmt->accessMethod = $8; stmt->tableSpace = NULL; stmt->indexParams = $10; + stmt->indexIncludingParams = NIL; stmt->options = NIL; stmt->whereClause = NULL; stmt->excludeOpNames = NIL; @@ -321,6 +320,8 @@ Boot_DeclareIndexStmt: DefineIndex(relationId, stmt, $4, + InvalidOid, + InvalidOid, false, false, false, @@ -336,6 +337,8 @@ Boot_DeclareUniqueIndexStmt: IndexStmt *stmt = makeNode(IndexStmt); Oid relationId; + elog(DEBUG4, "creating unique index \"%s\"", $4); + do_start(); stmt->idxname = $4; @@ -343,6 +346,7 @@ Boot_DeclareUniqueIndexStmt: stmt->accessMethod = $9; stmt->tableSpace = NULL; stmt->indexParams = $11; + stmt->indexIncludingParams = NIL; stmt->options = NIL; stmt->whereClause = NULL; stmt->excludeOpNames = NIL; @@ -365,6 +369,8 @@ Boot_DeclareUniqueIndexStmt: DefineIndex(relationId, stmt, $5, + InvalidOid, + InvalidOid, false, false, false, @@ -377,6 +383,8 @@ Boot_DeclareUniqueIndexStmt: Boot_DeclareToastStmt: XDECLARE XTOAST oidspec oidspec ON boot_ident { + elog(DEBUG4, "creating toast table for table \"%s\"", $6); + do_start(); BootstrapToastTable($6, $3, $4); @@ -476,8 +484,28 @@ boot_column_val: { InsertOneNull(num_columns_read++); } ; -boot_ident : - ID { $$ = yylval.str; } +boot_ident: + ID { $$ = $1; } + | OPEN { $$ = pstrdup($1); } + | XCLOSE { $$ = pstrdup($1); } + | XCREATE { $$ = pstrdup($1); } + | INSERT_TUPLE { $$ = pstrdup($1); } + | XDECLARE { $$ = pstrdup($1); } + | INDEX { $$ = pstrdup($1); } + | ON { $$ = pstrdup($1); } + | USING { $$ = pstrdup($1); } + | XBUILD { $$ = pstrdup($1); } + | INDICES { $$ = pstrdup($1); } + | UNIQUE { $$ = pstrdup($1); } + | XTOAST { $$ = pstrdup($1); } + | OBJ_ID { $$ = pstrdup($1); } + | XBOOTSTRAP { $$ = pstrdup($1); } + | XSHARED_RELATION { $$ = pstrdup($1); } + | XWITHOUT_OIDS { $$ = pstrdup($1); } + | XROWTYPE_OID { $$ = pstrdup($1); } + | XFORCE { $$ = pstrdup($1); } + | XNOT { $$ = pstrdup($1); } + | XNULL { $$ = pstrdup($1); } ; %% diff --git a/src/backend/bootstrap/bootscanner.l b/src/backend/bootstrap/bootscanner.l index 6467882fa3..739087b786 100644 --- a/src/backend/bootstrap/bootscanner.l +++ b/src/backend/bootstrap/bootscanner.l @@ -4,7 +4,7 @@ * bootscanner.l * a lexical scanner for the bootstrap parser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -38,6 +38,7 @@ /* Not needed now that this file is compiled as part of bootparse. */ /* #include "bootparse.h" */ +/* LCOV_EXCL_START */ /* Avoid exit() on fatal scanner errors (a bit ugly -- see yy_fatal_error) */ #undef fprintf @@ -64,76 +65,81 @@ static int yyline = 1; /* line number for error reporting */ %option prefix="boot_yy" -D [0-9] -oct \\{D}{D}{D} -id ([A-Za-z0-9_]|{oct}|\-)+ +id [-A-Za-z0-9_]+ sid \"([^\"])*\" -arrayid [A-Za-z0-9_]+\[{D}*\] + +/* + * Keyword tokens return the keyword text (as a constant string) in yylval.kw, + * just in case that's needed because we want to treat the keyword as an + * unreserved identifier. Note that _null_ is not treated as a keyword + * for this purpose; it's the one "reserved word" in the bootstrap syntax. + * + * Notice that all the keywords are case-sensitive, and for historical + * reasons some must be upper case. + * + * String tokens return a palloc'd string in yylval.str. + */ %% -open { return(OPEN); } +open { yylval.kw = "open"; return OPEN; } + +close { yylval.kw = "close"; return XCLOSE; } -close { return(XCLOSE); } +create { yylval.kw = "create"; return XCREATE; } -create { return(XCREATE); } +OID { yylval.kw = "OID"; return OBJ_ID; } +bootstrap { yylval.kw = "bootstrap"; return XBOOTSTRAP; } +shared_relation { yylval.kw = "shared_relation"; return XSHARED_RELATION; } +without_oids { yylval.kw = "without_oids"; return XWITHOUT_OIDS; } +rowtype_oid { yylval.kw = "rowtype_oid"; return XROWTYPE_OID; } -OID { return(OBJ_ID); } -bootstrap { return(XBOOTSTRAP); } -"shared_relation" { return(XSHARED_RELATION); } -"without_oids" { return(XWITHOUT_OIDS); } -"rowtype_oid" { return(XROWTYPE_OID); } -_null_ { return(NULLVAL); } +insert { yylval.kw = "insert"; return INSERT_TUPLE; } -insert { return(INSERT_TUPLE); } +_null_ { return NULLVAL; } -"," { return(COMMA); } -"=" { return(EQUALS); } -"(" { return(LPAREN); } -")" { return(RPAREN); } +"," { return COMMA; } +"=" { return EQUALS; } +"(" { return LPAREN; } +")" { return RPAREN; } [\n] { yyline++; } -[\t] ; -" " ; - -^\#[^\n]* ; /* drop everything after "#" for comments */ - - -"declare" { return(XDECLARE); } -"build" { return(XBUILD); } -"indices" { return(INDICES); } -"unique" { return(UNIQUE); } -"index" { return(INDEX); } -"on" { return(ON); } -"using" { return(USING); } -"toast" { return(XTOAST); } -"FORCE" { return(XFORCE); } -"NOT" { return(XNOT); } -"NULL" { return(XNULL); } - -{arrayid} { - yylval.str = MapArrayTypeName(yytext); - return(ID); - } +[\r\t ] ; + +^\#[^\n]* ; /* drop everything after "#" for comments */ + +declare { yylval.kw = "declare"; return XDECLARE; } +build { yylval.kw = "build"; return XBUILD; } +indices { yylval.kw = "indices"; return INDICES; } +unique { yylval.kw = "unique"; return UNIQUE; } +index { yylval.kw = "index"; return INDEX; } +on { yylval.kw = "on"; return ON; } +using { yylval.kw = "using"; return USING; } +toast { yylval.kw = "toast"; return XTOAST; } +FORCE { yylval.kw = "FORCE"; return XFORCE; } +NOT { yylval.kw = "NOT"; return XNOT; } +NULL { yylval.kw = "NULL"; return XNULL; } + {id} { yylval.str = scanstr(yytext); - return(ID); + return ID; } {sid} { - yytext[strlen(yytext)-1] = '\0'; /* strip off quotes */ + /* leading and trailing quotes are not passed to scanstr */ + yytext[strlen(yytext) - 1] = '\0'; yylval.str = scanstr(yytext+1); - yytext[strlen(yytext)] = '"'; /* restore quotes */ - return(ID); + yytext[strlen(yytext)] = '"'; /* restore yytext */ + return ID; } . { elog(ERROR, "syntax error at line %d: unexpected character \"%s\"", yyline, yytext); } - - %% +/* LCOV_EXCL_STOP */ + void yyerror(const char *message) { diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c index b3f0b3cc92..578af2e66d 100644 --- a/src/backend/bootstrap/bootstrap.c +++ b/src/backend/bootstrap/bootstrap.c @@ -4,7 +4,7 @@ * routines to support running postgres in 'bootstrap' mode * bootstrap mode is used to create the initial template database * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -19,10 +19,12 @@ #include "access/htup_details.h" #include "access/xact.h" +#include "access/xlog_internal.h" #include "bootstrap/bootstrap.h" #include "catalog/index.h" #include "catalog/pg_collation.h" #include "catalog/pg_type.h" +#include "common/link-canary.h" #include "libpq/pqsignal.h" #include "miscadmin.h" #include "nodes/makefuncs.h" @@ -222,7 +224,7 @@ AuxiliaryProcessMain(int argc, char *argv[]) /* If no -x argument, we are a CheckerProcess */ MyAuxProcType = CheckerProcess; - while ((flag = getopt(argc, argv, "B:c:d:D:Fkr:x:-:")) != -1) + while ((flag = getopt(argc, argv, "B:c:d:D:Fkr:x:X:-:")) != -1) { switch (flag) { @@ -257,6 +259,18 @@ AuxiliaryProcessMain(int argc, char *argv[]) case 'x': MyAuxProcType = atoi(optarg); break; + case 'X': + { + int WalSegSz = strtoul(optarg, NULL, 0); + + if (!IsValidWalSegSize(WalSegSz)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("-X requires a power of two value between 1 MB and 1 GB"))); + SetConfigOption("wal_segment_size", optarg, PGC_INTERNAL, + PGC_S_OVERRIDE); + } + break; case 'c': case '-': { @@ -308,19 +322,19 @@ AuxiliaryProcessMain(int argc, char *argv[]) switch (MyAuxProcType) { case StartupProcess: - statmsg = "startup process"; + statmsg = pgstat_get_backend_desc(B_STARTUP); break; case BgWriterProcess: - statmsg = "writer process"; + statmsg = pgstat_get_backend_desc(B_BG_WRITER); break; case CheckpointerProcess: - statmsg = "checkpointer process"; + statmsg = pgstat_get_backend_desc(B_CHECKPOINTER); break; case WalWriterProcess: - statmsg = "wal writer process"; + statmsg = pgstat_get_backend_desc(B_WAL_WRITER); break; case WalReceiverProcess: - statmsg = "wal receiver process"; + statmsg = pgstat_get_backend_desc(B_WAL_RECEIVER); break; default: statmsg = "??? process"; @@ -336,13 +350,15 @@ AuxiliaryProcessMain(int argc, char *argv[]) proc_exit(1); } - /* Validate we have been given a reasonable-looking DataDir */ - Assert(DataDir); - ValidatePgVersion(DataDir); - - /* Change into DataDir (if under postmaster, should be done already) */ + /* + * Validate we have been given a reasonable-looking DataDir and change + * into it (if under postmaster, should be done already). + */ if (!IsUnderPostmaster) + { + checkDataDir(); ChangeToDataDir(); + } /* If standalone, create lockfile for data directory */ if (!IsUnderPostmaster) @@ -388,6 +404,13 @@ AuxiliaryProcessMain(int argc, char *argv[]) /* finish setting up bufmgr.c */ InitBufferPoolBackend(); + /* + * Auxiliary processes don't run transactions, but they may need a + * resource owner anyway to manage buffer pins acquired outside + * transactions (and, perhaps, other things in future). + */ + CreateAuxProcessResourceOwner(); + /* Initialize backend status information */ pgstat_initialize(); pgstat_bestart(); @@ -480,12 +503,19 @@ BootstrapModeMain(void) Assert(!IsUnderPostmaster); Assert(IsBootstrapProcessingMode()); + /* + * To ensure that src/common/link-canary.c is linked into the backend, we + * must call it from somewhere. Here is as good as anywhere. + */ + if (pg_link_canary_is_frontend()) + elog(ERROR, "backend is incorrectly linked to frontend functions"); + /* * Do backend-like initialization for bootstrap mode */ InitProcess(); - InitPostgres(NULL, InvalidOid, NULL, InvalidOid, NULL); + InitPostgres(NULL, InvalidOid, NULL, InvalidOid, NULL, false); /* Initialize stuff for bootstrap-file processing */ for (i = 0; i < MAXATTR; i++) @@ -603,13 +633,13 @@ boot_openrel(char *relname) relname, (int) ATTRIBUTE_FIXED_PART_SIZE); boot_reldesc = heap_openrv(makeRangeVar(NULL, relname, -1), NoLock); - numattr = boot_reldesc->rd_rel->relnatts; + numattr = RelationGetNumberOfAttributes(boot_reldesc); for (i = 0; i < numattr; i++) { if (attrtypes[i] == NULL) attrtypes[i] = AllocateAttribute(); memmove((char *) attrtypes[i], - (char *) boot_reldesc->rd_att->attrs[i], + (char *) TupleDescAttr(boot_reldesc->rd_att, i), ATTRIBUTE_FIXED_PART_SIZE); { @@ -816,7 +846,7 @@ InsertOneValue(char *value, int i) elog(DEBUG4, "inserting column %d value \"%s\"", i, value); - typoid = boot_reldesc->rd_att->attrs[i]->atttypid; + typoid = TupleDescAttr(boot_reldesc->rd_att, i)->atttypid; boot_get_type_io_data(typoid, &typlen, &typbyval, &typalign, @@ -843,10 +873,10 @@ InsertOneNull(int i) { elog(DEBUG4, "inserting column %d NULL", i); Assert(i >= 0 && i < MAXATTR); - if (boot_reldesc->rd_att->attrs[i]->attnotnull) + if (TupleDescAttr(boot_reldesc->rd_att, i)->attnotnull) elog(ERROR, "NULL value specified for not-null column \"%s\" of relation \"%s\"", - NameStr(boot_reldesc->rd_att->attrs[i]->attname), + NameStr(TupleDescAttr(boot_reldesc->rd_att, i)->attname), RelationGetRelationName(boot_reldesc)); values[i] = PointerGetDatum(NULL); Nulls[i] = true; @@ -1021,36 +1051,6 @@ AllocateAttribute(void) MemoryContextAllocZero(TopMemoryContext, ATTRIBUTE_FIXED_PART_SIZE); } -/* - * MapArrayTypeName - * - * Given a type name, produce the corresponding array type name by prepending - * '_' and truncating as needed to fit in NAMEDATALEN-1 bytes. This is only - * used in bootstrap mode, so we can get away with assuming that the input is - * ASCII and we don't need multibyte-aware truncation. - * - * The given string normally ends with '[]' or '[digits]'; we discard that. - * - * The result is a palloc'd string. - */ -char * -MapArrayTypeName(const char *s) -{ - int i, - j; - char newStr[NAMEDATALEN]; - - newStr[0] = '_'; - j = 1; - for (i = 0; i < NAMEDATALEN - 2 && s[i] != '['; i++, j++) - newStr[j] = s[i]; - - newStr[j] = '\0'; - - return pstrdup(newStr); -} - - /* * index_register() -- record an index that has been set up for building * later. @@ -1124,7 +1124,7 @@ build_indices(void) heap = heap_open(ILHead->il_heap, NoLock); ind = index_open(ILHead->il_ind, NoLock); - index_build(heap, ind, ILHead->il_info, false, false); + index_build(heap, ind, ILHead->il_info, false, false, false); index_close(ind, NoLock); heap_close(heap, NoLock); diff --git a/src/backend/catalog/.gitignore b/src/backend/catalog/.gitignore index 557af3c0e5..9abe91d6e6 100644 --- a/src/backend/catalog/.gitignore +++ b/src/backend/catalog/.gitignore @@ -2,3 +2,5 @@ /postgres.description /postgres.shdescription /schemapg.h +/pg_*_d.h +/bki-stamp diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm index 7abfda3d3a..9699dfd8d5 100644 --- a/src/backend/catalog/Catalog.pm +++ b/src/backend/catalog/Catalog.pm @@ -1,10 +1,10 @@ #---------------------------------------------------------------------- # # Catalog.pm -# Perl module that extracts info from catalog headers into Perl +# Perl module that extracts info from catalog files into Perl # data structures # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/backend/catalog/Catalog.pm @@ -16,17 +16,14 @@ package Catalog; use strict; use warnings; -require Exporter; -our @ISA = qw(Exporter); -our @EXPORT = (); -our @EXPORT_OK = qw(Catalogs SplitDataLine RenameTempFile); +use File::Compare; -# Call this function with an array of names of header files to parse. -# Returns a nested data structure describing the data in the headers. -sub Catalogs + +# Parses a catalog header file into a data structure describing the schema +# of the catalog. +sub ParseHeader { - my (%catalogs, $catname, $declaring_attributes, $most_recent); - $catalogs{names} = []; + my $input_file = shift; # There are a few types which are given one name in the C source, but a # different name at the SQL level. These are enumerated here. @@ -36,23 +33,39 @@ sub Catalogs 'int64' => 'int8', 'Oid' => 'oid', 'NameData' => 'name', - 'TransactionId' => 'xid'); + 'TransactionId' => 'xid', + 'XLogRecPtr' => 'pg_lsn'); - foreach my $input_file (@_) - { - my %catalog; - $catalog{columns} = []; - $catalog{data} = []; + my %catalog; + my $declaring_attributes = 0; + my $is_varlen = 0; + my $is_client_code = 0; + + $catalog{columns} = []; + $catalog{toasting} = []; + $catalog{indexing} = []; + $catalog{client_code} = []; - open(my $ifh, '<', $input_file) || die "$input_file: $!"; + open(my $ifh, '<', $input_file) || die "$input_file: $!"; - my ($filename) = ($input_file =~ m/(\w+)\.h$/); - my $natts_pat = "Natts_$filename"; + # Scan the input file. + while (<$ifh>) + { - # Scan the input file. - while (<$ifh>) + # Set appropriate flag when we're in certain code sections. + if (/^#/) { + $is_varlen = 1 if /^#ifdef\s+CATALOG_VARLEN/; + if (/^#ifdef\s+EXPOSE_TO_CLIENT_CODE/) + { + $is_client_code = 1; + next; + } + next if !$is_client_code; + } + if (!$is_client_code) + { # Strip C-style comments. s;/\*(.|\n)*\*/;;g; if (m;/\*;) @@ -66,207 +79,461 @@ sub Catalogs redo; } - # Remember input line number for later. - my $input_line_number = $.; - # Strip useless whitespace and trailing semicolons. chomp; s/^\s+//; s/;\s*$//; s/\s+/ /g; + } - # Push the data into the appropriate data structure. - if (/$natts_pat\s+(\d+)/) - { - $catalog{natts} = $1; - } - elsif ( - /^DATA\(insert(\s+OID\s+=\s+(\d+))?\s+\(\s*(.*)\s*\)\s*\)$/) - { - check_natts($filename, $catalog{natts}, $3, $input_file, - $input_line_number); - - push @{ $catalog{data} }, { oid => $2, bki_values => $3 }; - } - elsif (/^DESCR\(\"(.*)\"\)$/) - { - $most_recent = $catalog{data}->[-1]; + # Push the data into the appropriate data structure. + if (/^DECLARE_TOAST\(\s*(\w+),\s*(\d+),\s*(\d+)\)/) + { + push @{ $catalog{toasting} }, + { parent_table => $1, toast_oid => $2, toast_index_oid => $3 }; + } + elsif (/^DECLARE_(UNIQUE_)?INDEX\(\s*(\w+),\s*(\d+),\s*(.+)\)/) + { + push @{ $catalog{indexing} }, + { + is_unique => $1 ? 1 : 0, + index_name => $2, + index_oid => $3, + index_decl => $4 + }; + } + elsif (/^CATALOG\((\w+),(\d+),(\w+)\)/) + { + $catalog{catname} = $1; + $catalog{relation_oid} = $2; + $catalog{relation_oid_macro} = $3; - # this tests if most recent line is not a DATA() statement - if (ref $most_recent ne 'HASH') - { - die "DESCR() does not apply to any catalog ($input_file)"; - } - if (!defined $most_recent->{oid}) - { - die "DESCR() does not apply to any oid ($input_file)"; - } - elsif ($1 ne '') - { - $most_recent->{descr} = $1; - } - } - elsif (/^SHDESCR\(\"(.*)\"\)$/) + $catalog{bootstrap} = /BKI_BOOTSTRAP/ ? ' bootstrap' : ''; + $catalog{shared_relation} = + /BKI_SHARED_RELATION/ ? ' shared_relation' : ''; + $catalog{without_oids} = + /BKI_WITHOUT_OIDS/ ? ' without_oids' : ''; + if (/BKI_ROWTYPE_OID\((\d+),(\w+)\)/) { - $most_recent = $catalog{data}->[-1]; - - # this tests if most recent line is not a DATA() statement - if (ref $most_recent ne 'HASH') - { - die - "SHDESCR() does not apply to any catalog ($input_file)"; - } - if (!defined $most_recent->{oid}) - { - die "SHDESCR() does not apply to any oid ($input_file)"; - } - elsif ($1 ne '') - { - $most_recent->{shdescr} = $1; - } + $catalog{rowtype_oid} = $1; + $catalog{rowtype_oid_clause} = " rowtype_oid $1"; + $catalog{rowtype_oid_macro} = $2; } - elsif (/^DECLARE_TOAST\(\s*(\w+),\s*(\d+),\s*(\d+)\)/) + else { - $catname = 'toasting'; - my ($toast_name, $toast_oid, $index_oid) = ($1, $2, $3); - push @{ $catalog{data} }, - "declare toast $toast_oid $index_oid on $toast_name\n"; + $catalog{rowtype_oid} = ''; + $catalog{rowtype_oid_clause} = ''; + $catalog{rowtype_oid_macro} = ''; } - elsif (/^DECLARE_(UNIQUE_)?INDEX\(\s*(\w+),\s*(\d+),\s*(.+)\)/) + $catalog{schema_macro} = /BKI_SCHEMA_MACRO/ ? 1 : 0; + $declaring_attributes = 1; + } + elsif ($is_client_code) + { + if (/^#endif/) { - $catname = 'indexing'; - my ($is_unique, $index_name, $index_oid, $using) = - ($1, $2, $3, $4); - push @{ $catalog{data} }, - sprintf( - "declare %sindex %s %s %s\n", - $is_unique ? 'unique ' : '', - $index_name, $index_oid, $using); + $is_client_code = 0; } - elsif (/^BUILD_INDICES/) + else { - push @{ $catalog{data} }, "build indices\n"; + push @{ $catalog{client_code} }, $_; } - elsif (/^CATALOG\(([^,]*),(\d+)\)/) + } + elsif ($declaring_attributes) + { + next if (/^{|^$/); + if (/^}/) { - $catname = $1; - $catalog{relation_oid} = $2; - - # Store pg_* catalog names in the same order we receive them - push @{ $catalogs{names} }, $catname; - - $catalog{bootstrap} = /BKI_BOOTSTRAP/ ? ' bootstrap' : ''; - $catalog{shared_relation} = - /BKI_SHARED_RELATION/ ? ' shared_relation' : ''; - $catalog{without_oids} = - /BKI_WITHOUT_OIDS/ ? ' without_oids' : ''; - $catalog{rowtype_oid} = - /BKI_ROWTYPE_OID\((\d+)\)/ ? " rowtype_oid $1" : ''; - $catalog{schema_macro} = /BKI_SCHEMA_MACRO/ ? 'True' : ''; - $declaring_attributes = 1; + $declaring_attributes = 0; } - elsif ($declaring_attributes) + else { - next if (/^{|^$/); - next if (/^#/); - if (/^}/) + my %column; + my @attopts = split /\s+/, $_; + my $atttype = shift @attopts; + my $attname = shift @attopts; + die "parse error ($input_file)" + unless ($attname and $atttype); + + if (exists $RENAME_ATTTYPE{$atttype}) { - undef $declaring_attributes; + $atttype = $RENAME_ATTTYPE{$atttype}; } - else + + # If the C name ends with '[]' or '[digits]', we have + # an array type, so we discard that from the name and + # prepend '_' to the type. + if ($attname =~ /(\w+)\[\d*\]/) { - my %row; - my ($atttype, $attname, $attopt) = split /\s+/, $_; - die "parse error ($input_file)" unless $attname; - if (exists $RENAME_ATTTYPE{$atttype}) + $attname = $1; + $atttype = '_' . $atttype; + } + + $column{type} = $atttype; + $column{name} = $attname; + $column{is_varlen} = 1 if $is_varlen; + + foreach my $attopt (@attopts) + { + if ($attopt eq 'BKI_FORCE_NULL') { - $atttype = $RENAME_ATTTYPE{$atttype}; + $column{forcenull} = 1; } - if ($attname =~ /(.*)\[.*\]/) # array attribute + elsif ($attopt eq 'BKI_FORCE_NOT_NULL') { - $attname = $1; - $atttype .= '[]'; # variable-length only + $column{forcenotnull} = 1; } - $row{'type'} = $atttype; - $row{'name'} = $attname; + # We use quotes for values like \0 and \054, to + # make sure all compilers and syntax highlighters + # can recognize them properly. + elsif ($attopt =~ /BKI_DEFAULT\(['"]?([^'"]+)['"]?\)/) + { + $column{default} = $1; + } + elsif ( + $attopt =~ /BKI_ARRAY_DEFAULT\(['"]?([^'"]+)['"]?\)/) + { + $column{array_default} = $1; + } + elsif ($attopt =~ /BKI_LOOKUP\((\w+)\)/) + { + $column{lookup} = $1; + } + else + { + die + "unknown or misformatted column option $attopt on column $attname"; + } - if (defined $attopt) + if ($column{forcenull} and $column{forcenotnull}) { - if ($attopt eq 'BKI_FORCE_NULL') - { - $row{'forcenull'} = 1; - } - elsif ($attopt eq 'BKI_FORCE_NOT_NULL') - { - $row{'forcenotnull'} = 1; - } - else - { - die -"unknown column option $attopt on column $attname"; - } + die "$attname is forced both null and not null"; } - push @{ $catalog{columns} }, \%row; } + push @{ $catalog{columns} }, \%column; } } - $catalogs{$catname} = \%catalog; - close $ifh; } - return \%catalogs; + close $ifh; + return \%catalog; +} + +# Parses a file containing Perl data structure literals, returning live data. +# +# The parameter $preserve_formatting needs to be set for callers that want +# to work with non-data lines in the data files, such as comments and blank +# lines. If a caller just wants to consume the data, leave it unset. +sub ParseData +{ + my ($input_file, $schema, $preserve_formatting) = @_; + + open(my $ifd, '<', $input_file) || die "$input_file: $!"; + $input_file =~ /(\w+)\.dat$/ + or die "Input file $input_file needs to be a .dat file.\n"; + my $catname = $1; + my $data = []; + + # Scan the input file. + while (<$ifd>) + { + my $hash_ref; + + if (/{/) + { + # Capture the hash ref + # NB: Assumes that the next hash ref can't start on the + # same line where the present one ended. + # Not foolproof, but we shouldn't need a full parser, + # since we expect relatively well-behaved input. + + # Quick hack to detect when we have a full hash ref to + # parse. We can't just use a regex because of values in + # pg_aggregate and pg_proc like '{0,0}'. This will need + # work if we ever need to allow unbalanced braces within + # a field value. + my $lcnt = tr/{//; + my $rcnt = tr/}//; + + if ($lcnt == $rcnt) + { + # We're treating the input line as a piece of Perl, so we + # need to use string eval here. Tell perlcritic we know what + # we're doing. + eval '$hash_ref = ' . $_; ## no critic (ProhibitStringyEval) + if (!ref $hash_ref) + { + die "$input_file: error parsing line $.:\n$_\n"; + } + + # Annotate each hash with the source line number. + $hash_ref->{line_number} = $.; + + # Expand tuples to their full representation. + AddDefaultValues($hash_ref, $schema, $catname); + } + else + { + my $next_line = <$ifd>; + die "$input_file: file ends within Perl hash\n" + if !defined $next_line; + $_ .= $next_line; + redo; + } + } + + # If we found a hash reference, keep it, unless it is marked as + # autogenerated; in that case it'd duplicate an entry we'll + # autogenerate below. (This makes it safe for reformat_dat_file.pl + # with --full-tuples to print autogenerated entries, which seems like + # useful behavior for debugging.) + # + # Only keep non-data strings if we are told to preserve formatting. + if (defined $hash_ref) + { + push @$data, $hash_ref if !$hash_ref->{autogenerated}; + } + elsif ($preserve_formatting) + { + push @$data, $_; + } + } + close $ifd; + + # If this is pg_type, auto-generate array types too. + GenerateArrayTypes($schema, $data) if $catname eq 'pg_type'; + + return $data; +} + +# Fill in default values of a record using the given schema. +# It's the caller's responsibility to specify other values beforehand. +sub AddDefaultValues +{ + my ($row, $schema, $catname) = @_; + my @missing_fields; + + # Compute special-case column values. + # Note: If you add new cases here, you must also teach + # strip_default_values() in include/catalog/reformat_dat_file.pl + # to delete them. + if ($catname eq 'pg_proc') + { + # pg_proc.pronargs can be derived from proargtypes. + if (defined $row->{proargtypes}) + { + my @proargtypes = split /\s+/, $row->{proargtypes}; + $row->{pronargs} = scalar(@proargtypes); + } + } + + # Now fill in defaults, and note any columns that remain undefined. + foreach my $column (@$schema) + { + my $attname = $column->{name}; + my $atttype = $column->{type}; + + if (defined $row->{$attname}) + { + ; + } + elsif (defined $column->{default}) + { + $row->{$attname} = $column->{default}; + } + else + { + # Failed to find a value. + push @missing_fields, $attname; + } + } + + # Failure to provide all columns is a hard error. + if (@missing_fields) + { + die sprintf "missing values for field(s) %s in %s.dat line %s\n", + join(', ', @missing_fields), $catname, $row->{line_number}; + } } -# Split a DATA line into fields. -# Call this on the bki_values element of a DATA item returned by Catalogs(); -# it returns a list of field values. We don't strip quoting from the fields. -# Note: it should be safe to assign the result to a list of length equal to -# the nominal number of catalog fields, because check_natts already checked -# the number of fields. -sub SplitDataLine +# If a pg_type entry has an array_type_oid metadata field, +# auto-generate an entry for its array type. +sub GenerateArrayTypes { - my $bki_values = shift; - - # This handling of quoted strings might look too simplistic, but it - # matches what bootscanner.l does: that has no provision for quote marks - # inside quoted strings, either. If we don't have a quoted string, just - # snarf everything till next whitespace. That will accept some things - # that bootscanner.l will see as erroneous tokens; but it seems wiser - # to do that and let bootscanner.l complain than to silently drop - # non-whitespace characters. - my @result = $bki_values =~ /"[^"]*"|\S+/g; - - return @result; + my $pgtype_schema = shift; + my $types = shift; + my @array_types; + + foreach my $elem_type (@$types) + { + next if !(ref $elem_type eq 'HASH'); + next if !defined($elem_type->{array_type_oid}); + + my %array_type; + + # Set up metadata fields for array type. + $array_type{oid} = $elem_type->{array_type_oid}; + $array_type{autogenerated} = 1; + $array_type{line_number} = $elem_type->{line_number}; + + # Set up column values derived from the element type. + $array_type{typname} = '_' . $elem_type->{typname}; + $array_type{typelem} = $elem_type->{typname}; + + # Arrays require INT alignment, unless the element type requires + # DOUBLE alignment. + $array_type{typalign} = $elem_type->{typalign} eq 'd' ? 'd' : 'i'; + + # Fill in the rest of the array entry's fields. + foreach my $column (@$pgtype_schema) + { + my $attname = $column->{name}; + + # Skip if we already set it above. + next if defined $array_type{$attname}; + + # Apply the BKI_ARRAY_DEFAULT setting if there is one, + # otherwise copy the field from the element type. + if (defined $column->{array_default}) + { + $array_type{$attname} = $column->{array_default}; + } + else + { + $array_type{$attname} = $elem_type->{$attname}; + } + } + + # Lastly, cross-link the array to the element type. + $elem_type->{typarray} = $array_type{typname}; + + push @array_types, \%array_type; + } + + push @$types, @array_types; + + return; } # Rename temporary files to final names. -# Call this function with the final file name and the .tmp extension +# Call this function with the final file name and the .tmp extension. +# +# If the final file already exists and has identical contents, don't +# overwrite it; this behavior avoids unnecessary recompiles due to +# updating the mod date on unchanged header files. +# # Note: recommended extension is ".tmp$$", so that parallel make steps -# can't use the same temp files +# can't use the same temp files. sub RenameTempFile { my $final_name = shift; my $extension = shift; my $temp_name = $final_name . $extension; - print "Writing $final_name\n"; - rename($temp_name, $final_name) || die "rename: $temp_name: $!"; + + if (-f $final_name + && compare($temp_name, $final_name) == 0) + { + unlink($temp_name) || die "unlink: $temp_name: $!"; + } + else + { + rename($temp_name, $final_name) || die "rename: $temp_name: $!"; + } + return; } -# verify the number of fields in the passed-in DATA line -sub check_natts +# Find a symbol defined in a particular header file and extract the value. +# include_path should be the path to src/include/. +sub FindDefinedSymbol { - my ($catname, $natts, $bki_val, $file, $line) = @_; + my ($catalog_header, $include_path, $symbol) = @_; + my $value; - die -"Could not find definition for Natts_${catname} before start of DATA() in $file\n" - unless defined $natts; + # Make sure include path ends in a slash. + if (substr($include_path, -1) ne '/') + { + $include_path .= '/'; + } + my $file = $include_path . $catalog_header; + open(my $find_defined_symbol, '<', $file) || die "$file: $!"; + while (<$find_defined_symbol>) + { + if (/^#define\s+\Q$symbol\E\s+(\S+)/) + { + $value = $1; + last; + } + } + close $find_defined_symbol; + return $value if defined $value; + die "$file: no definition found for $symbol\n"; +} + +# Similar to FindDefinedSymbol, but looks in the bootstrap metadata. +sub FindDefinedSymbolFromData +{ + my ($data, $symbol) = @_; + foreach my $row (@{$data}) + { + if ($row->{oid_symbol} eq $symbol) + { + return $row->{oid}; + } + } + die "no definition found for $symbol\n"; +} - my $nfields = scalar(SplitDataLine($bki_val)); +# Extract an array of all the OIDs assigned in the specified catalog headers +# and their associated data files (if any). +# Caution: genbki.pl contains equivalent logic; change it too if you need to +# touch this. +sub FindAllOidsFromHeaders +{ + my @input_files = @_; + + my @oids = (); + + foreach my $header (@input_files) + { + $header =~ /(.+)\.h$/ + or die "Input files need to be header files.\n"; + my $datfile = "$1.dat"; + + my $catalog = Catalog::ParseHeader($header); + + # We ignore the pg_class OID and rowtype OID of bootstrap catalogs, + # as those are expected to appear in the initial data for pg_class + # and pg_type. For regular catalogs, include these OIDs. + if (!$catalog->{bootstrap}) + { + push @oids, $catalog->{relation_oid} + if ($catalog->{relation_oid}); + push @oids, $catalog->{rowtype_oid} if ($catalog->{rowtype_oid}); + } + + # Not all catalogs have a data file. + if (-e $datfile) + { + my $catdata = + Catalog::ParseData($datfile, $catalog->{columns}, 0); + + foreach my $row (@$catdata) + { + push @oids, $row->{oid} if defined $row->{oid}; + } + } + + foreach my $toast (@{ $catalog->{toasting} }) + { + push @oids, $toast->{toast_oid}, $toast->{toast_index_oid}; + } + foreach my $index (@{ $catalog->{indexing} }) + { + push @oids, $index->{index_oid}; + } + } - die sprintf -"Wrong number of attributes in DATA() entry at %s:%d (expected %d but got %d)\n", - $file, $line, $natts, $nfields - unless $natts == $nfields; + return \@oids; } 1; diff --git a/src/backend/catalog/Makefile b/src/backend/catalog/Makefile index fd33426bad..0865240f11 100644 --- a/src/backend/catalog/Makefile +++ b/src/backend/catalog/Makefile @@ -2,6 +2,9 @@ # # Makefile for backend/catalog # +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# # src/backend/catalog/Makefile # #------------------------------------------------------------------------- @@ -22,13 +25,11 @@ BKIFILES = postgres.bki postgres.description postgres.shdescription include $(top_srcdir)/src/backend/common.mk -all: $(BKIFILES) schemapg.h - -# Note: there are some undocumented dependencies on the ordering in which -# the catalog header files are assembled into postgres.bki. In particular, -# indexing.h had better be last, and toasting.h just before it. - -POSTGRES_BKI_SRCS = $(addprefix $(top_srcdir)/src/include/catalog/,\ +# Note: the order of this list determines the order in which the catalog +# header files are assembled into postgres.bki. BKI_BOOTSTRAP catalogs +# must appear first, and there are reputedly other, undocumented ordering +# dependencies. +CATALOG_HEADERS := \ pg_proc.h pg_type.h pg_attribute.h pg_class.h \ pg_attrdef.h pg_constraint.h pg_inherits.h pg_index.h pg_operator.h \ pg_opfamily.h pg_opclass.h pg_am.h pg_amop.h pg_amproc.h \ @@ -45,34 +46,65 @@ POSTGRES_BKI_SRCS = $(addprefix $(top_srcdir)/src/include/catalog/,\ pg_default_acl.h pg_init_privs.h pg_seclabel.h pg_shseclabel.h \ pg_collation.h pg_partitioned_table.h pg_range.h pg_transform.h \ pg_sequence.h pg_publication.h pg_publication_rel.h pg_subscription.h \ - pg_subscription_rel.h toasting.h indexing.h \ - toasting.h indexing.h \ - ) + pg_subscription_rel.h + +GENERATED_HEADERS := $(CATALOG_HEADERS:%.h=%_d.h) schemapg.h + +# In the list of headers used to assemble postgres.bki, indexing.h needs +# be last, and toasting.h just before it. This ensures we don't try to +# create indexes or toast tables before their catalogs exist. +POSTGRES_BKI_SRCS := $(addprefix $(top_srcdir)/src/include/catalog/,\ + $(CATALOG_HEADERS) toasting.h indexing.h \ + ) + +# The .dat files we need can just be listed alphabetically. +POSTGRES_BKI_DATA = $(addprefix $(top_srcdir)/src/include/catalog/,\ + pg_aggregate.dat pg_am.dat pg_amop.dat pg_amproc.dat pg_authid.dat \ + pg_cast.dat pg_class.dat pg_collation.dat \ + pg_database.dat pg_language.dat \ + pg_namespace.dat pg_opclass.dat pg_operator.dat pg_opfamily.dat \ + pg_pltemplate.dat pg_proc.dat pg_range.dat pg_tablespace.dat \ + pg_ts_config.dat pg_ts_config_map.dat pg_ts_dict.dat pg_ts_parser.dat \ + pg_ts_template.dat pg_type.dat \ + ) # location of Catalog.pm catalogdir = $(top_srcdir)/src/backend/catalog -# locations of headers that genbki.pl needs to read -pg_includes = -I$(top_srcdir)/src/include/catalog -I$(top_builddir)/src/include/catalog - -# see explanation in ../parser/Makefile -postgres.description: postgres.bki ; - -postgres.shdescription: postgres.bki ; - -schemapg.h: postgres.bki ; - -# Technically, this should depend on Makefile.global, but then -# postgres.bki would need to be rebuilt after every configure run, -# even in distribution tarballs. So this is cheating a bit, but it -# will achieve the goal of updating the version number when it -# changes. -postgres.bki: genbki.pl Catalog.pm $(POSTGRES_BKI_SRCS) $(top_srcdir)/configure $(top_srcdir)/src/include/catalog/duplicate_oids - cd $(top_srcdir)/src/include/catalog && $(PERL) ./duplicate_oids - $(PERL) -I $(catalogdir) $< $(pg_includes) --set-version=$(MAJORVERSION) $(POSTGRES_BKI_SRCS) - +all: distprep generated-header-symlinks + +distprep: bki-stamp + +.PHONY: generated-header-symlinks + +generated-header-symlinks: $(top_builddir)/src/include/catalog/header-stamp + +# bki-stamp records the last time we ran genbki.pl. We don't rely on +# the timestamps of the individual output files, because the Perl script +# won't update them if they didn't change (to avoid unnecessary recompiles). +# Technically, this should depend on Makefile.global which supplies +# $(MAJORVERSION); but then genbki.pl would need to be re-run after every +# configure run, even in distribution tarballs. So depending on configure.in +# instead is cheating a bit, but it will achieve the goal of updating the +# version number when it changes. +bki-stamp: genbki.pl Catalog.pm $(POSTGRES_BKI_SRCS) $(POSTGRES_BKI_DATA) $(top_srcdir)/configure.in + $(PERL) -I $(catalogdir) $< --set-version=$(MAJORVERSION) $(POSTGRES_BKI_SRCS) + touch $@ + +# The generated headers must all be symlinked into builddir/src/include/, +# using absolute links for the reasons explained in src/backend/Makefile. +# We use header-stamp to record that we've done this because the symlinks +# themselves may appear older than bki-stamp. +$(top_builddir)/src/include/catalog/header-stamp: bki-stamp + prereqdir=`cd '$(dir $<)' >/dev/null && pwd` && \ + cd '$(dir $@)' && for file in $(GENERATED_HEADERS); do \ + rm -f $$file && $(LN_S) "$$prereqdir/$$file" . ; \ + done + touch $@ + +# Note: installation of generated headers is handled elsewhere .PHONY: install-data -install-data: $(BKIFILES) installdirs +install-data: bki-stamp installdirs $(INSTALL_DATA) $(call vpathsearch,postgres.bki) '$(DESTDIR)$(datadir)/postgres.bki' $(INSTALL_DATA) $(call vpathsearch,postgres.description) '$(DESTDIR)$(datadir)/postgres.description' $(INSTALL_DATA) $(call vpathsearch,postgres.shdescription) '$(DESTDIR)$(datadir)/postgres.shdescription' @@ -87,9 +119,10 @@ installdirs: uninstall-data: rm -f $(addprefix '$(DESTDIR)$(datadir)'/, $(BKIFILES) system_views.sql information_schema.sql sql_features.txt) -# postgres.bki, postgres.description, postgres.shdescription, and schemapg.h -# are in the distribution tarball, so they are not cleaned here. +# postgres.bki, postgres.description, postgres.shdescription, +# and the generated headers are in the distribution tarball, +# so they are not cleaned here. clean: maintainer-clean: clean - rm -f $(BKIFILES) + rm -f bki-stamp $(BKIFILES) $(GENERATED_HEADERS) diff --git a/src/backend/catalog/README b/src/backend/catalog/README deleted file mode 100644 index 7e0ddf312d..0000000000 --- a/src/backend/catalog/README +++ /dev/null @@ -1,111 +0,0 @@ -src/backend/catalog/README - -System Catalog -============== - -This directory contains .c files that manipulate the system catalogs; -src/include/catalog contains the .h files that define the structure -of the system catalogs. - -When the compile-time scripts (Gen_fmgrtab.pl and genbki.pl) -execute, they grep the DATA statements out of the .h files and munge -these in order to generate the postgres.bki file. The .bki file is then -used as input to initdb (which is just a wrapper around postgres -running single-user in bootstrapping mode) in order to generate the -initial (template) system catalog relation files. - ------------------------------------------------------------------ - -People who are going to hose around with the .h files should be aware -of the following facts: - -- It is very important that the DATA statements be properly formatted -(e.g., no broken lines, proper use of white-space and _null_). The -scripts are line-oriented and break easily. In addition, the only -documentation on the proper format for them is the code in the -bootstrap/ directory. Just be careful when adding new DATA -statements. - -- Some catalogs require that OIDs be preallocated to tuples because -of cross-references from other pre-loaded tuples. For example, pg_type -contains pointers into pg_proc (e.g., pg_type.typinput), and pg_proc -contains back-pointers into pg_type (pg_proc.proargtypes). For such -cases, the OID assigned to a tuple may be explicitly set by use of the -"OID = n" clause of the .bki insert statement. If no such pointers are -required to a given tuple, then the OID = n clause may be omitted -(then the system generates an OID in the usual way, or leaves it 0 in a -catalog that has no OIDs). In practice we usually preassign OIDs -for all or none of the pre-loaded tuples in a given catalog, even if only -some of them are actually cross-referenced. - -- We also sometimes preallocate OIDs for catalog tuples whose OIDs must -be known directly in the C code. In such cases, put a #define in the -catalog's .h file, and use the #define symbol in the C code. Writing -the actual numeric value of any OID in C code is considered very bad form. -Direct references to pg_proc OIDs are common enough that there's a special -mechanism to create the necessary #define's automatically: see -backend/utils/Gen_fmgrtab.pl. We also have standard conventions for setting -up #define's for the pg_class OIDs of system catalogs and indexes. For all -the other system catalogs, you have to manually create any #define's you -need. - -- If you need to find a valid OID for a new predefined tuple, -use the unused_oids script. It generates inclusive ranges of -*unused* OIDs (e.g., the line "45-900" means OIDs 45 through 900 have -not been allocated yet). Currently, OIDs 1-9999 are reserved for manual -assignment; the unused_oids script simply looks through the include/catalog -headers to see which ones do not appear in "OID =" clauses in DATA lines. -(As of Postgres 8.1, it also looks at CATALOG and DECLARE_INDEX lines.) -You can also use the duplicate_oids script to check for mistakes. - -- The OID counter starts at 10000 at bootstrap. If a catalog row is in a -table that requires OIDs, but no OID was preassigned by an "OID =" clause, -then it will receive an OID of 10000 or above. - -- To create a "BOOTSTRAP" table you have to do a lot of extra work: these -tables are not created through a normal CREATE TABLE operation, but spring -into existence when first written to during initdb. Therefore, you must -manually create appropriate entries for them in the pre-loaded contents of -pg_class, pg_attribute, and pg_type. Avoid making new catalogs be bootstrap -catalogs if at all possible; generally, only tables that must be written to -in order to create a table should be bootstrapped. - -- Certain BOOTSTRAP tables must be at the start of the Makefile -POSTGRES_BKI_SRCS variable, as these cannot be created through the standard -heap_create_with_catalog process, because it needs these tables to exist -already. The list of files this currently includes is: - pg_proc.h pg_type.h pg_attribute.h pg_class.h -Within this list, pg_type.h must come before pg_attribute.h. -Also, indexing.h must be last, since the indexes can't be created until all -the tables are in place, and toasting.h should probably be next-to-last -(or at least after all the tables that need toast tables). There are -reputedly some other order dependencies in the .bki list, too. - ------------------------------------------------------------------ - -When munging the .c files, you should be aware of certain conventions: - -- The system catalog cache code (and most catalog-munging code in -general) assumes that the fixed-length portions of all system catalog -tuples are in fact present, because it maps C struct declarations onto -them. Thus, the variable-length fields must all be at the end, and -only the variable-length fields of a catalog tuple are permitted to be -NULL. For example, if you set pg_type.typrelid to be NULL, a -piece of code will likely perform "typetup->typrelid" (or, worse, -"typetup->typelem", which follows typrelid). This will result in -random errors or even segmentation violations. Hence, do NOT insert -catalog tuples that contain NULL attributes except in their -variable-length portions! (The bootstrapping code is fairly good about -marking NOT NULL each of the columns that can legally be referenced via -C struct declarations ... but those markings won't be enforced against -DATA commands, so you must get it right in a DATA line.) - -- Modification of the catalogs must be performed with the proper -updating of catalog indexes! That is, most catalogs have indexes -on them; when you munge them using the executor, the executor will -take care of doing the index updates, but if you make direct access -method calls to insert new or modified tuples into a heap, you must -also make the calls to insert the tuple into ALL of its indexes! If -not, the new tuple will generally be "invisible" to the system because -most of the accesses to the catalogs in question will be through the -associated indexes. diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index ccde66a7dd..bd147752ef 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -3,7 +3,7 @@ * aclchk.c * Routines to check access control permissions. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -86,7 +86,7 @@ typedef struct Oid nspid; /* namespace, or InvalidOid if none */ /* remaining fields are same as in InternalGrant: */ bool is_grant; - GrantObjectType objtype; + ObjectType objtype; bool all_privs; AclMode privileges; List *grantees; @@ -116,8 +116,8 @@ static void ExecGrant_Type(InternalGrant *grantStmt); static void SetDefaultACLsInSchemas(InternalDefaultACL *iacls, List *nspnames); static void SetDefaultACL(InternalDefaultACL *iacls); -static List *objectNamesToOids(GrantObjectType objtype, List *objnames); -static List *objectsInSchemaToOids(GrantObjectType objtype, List *nspnames); +static List *objectNamesToOids(ObjectType objtype, List *objnames); +static List *objectsInSchemaToOids(ObjectType objtype, List *nspnames); static List *getRelationsInNamespace(Oid namespaceId, char relkind); static void expand_col_privileges(List *colnames, Oid table_oid, AclMode this_privileges, @@ -132,9 +132,9 @@ static const char *privilege_to_string(AclMode privilege); static AclMode restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs, AclMode privileges, Oid objectId, Oid grantorId, - AclObjectKind objkind, const char *objname, + ObjectType objtype, const char *objname, AttrNumber att_number, const char *colname); -static AclMode pg_aclmask(AclObjectKind objkind, Oid table_oid, AttrNumber attnum, +static AclMode pg_aclmask(ObjectType objtype, Oid table_oid, AttrNumber attnum, Oid roleid, AclMode mask, AclMaskHow how); static void recordExtensionInitPriv(Oid objoid, Oid classoid, int objsubid, Acl *new_acl); @@ -236,56 +236,56 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant, static AclMode restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs, AclMode privileges, Oid objectId, Oid grantorId, - AclObjectKind objkind, const char *objname, + ObjectType objtype, const char *objname, AttrNumber att_number, const char *colname) { AclMode this_privileges; AclMode whole_mask; - switch (objkind) + switch (objtype) { - case ACL_KIND_COLUMN: + case OBJECT_COLUMN: whole_mask = ACL_ALL_RIGHTS_COLUMN; break; - case ACL_KIND_CLASS: + case OBJECT_TABLE: whole_mask = ACL_ALL_RIGHTS_RELATION; break; - case ACL_KIND_SEQUENCE: + case OBJECT_SEQUENCE: whole_mask = ACL_ALL_RIGHTS_SEQUENCE; break; - case ACL_KIND_DATABASE: + case OBJECT_DATABASE: whole_mask = ACL_ALL_RIGHTS_DATABASE; break; - case ACL_KIND_PROC: + case OBJECT_FUNCTION: whole_mask = ACL_ALL_RIGHTS_FUNCTION; break; - case ACL_KIND_LANGUAGE: + case OBJECT_LANGUAGE: whole_mask = ACL_ALL_RIGHTS_LANGUAGE; break; - case ACL_KIND_LARGEOBJECT: + case OBJECT_LARGEOBJECT: whole_mask = ACL_ALL_RIGHTS_LARGEOBJECT; break; - case ACL_KIND_NAMESPACE: - whole_mask = ACL_ALL_RIGHTS_NAMESPACE; + case OBJECT_SCHEMA: + whole_mask = ACL_ALL_RIGHTS_SCHEMA; break; - case ACL_KIND_TABLESPACE: + case OBJECT_TABLESPACE: whole_mask = ACL_ALL_RIGHTS_TABLESPACE; break; - case ACL_KIND_FDW: + case OBJECT_FDW: whole_mask = ACL_ALL_RIGHTS_FDW; break; - case ACL_KIND_FOREIGN_SERVER: + case OBJECT_FOREIGN_SERVER: whole_mask = ACL_ALL_RIGHTS_FOREIGN_SERVER; break; - case ACL_KIND_EVENT_TRIGGER: + case OBJECT_EVENT_TRIGGER: elog(ERROR, "grantable rights not supported for event triggers"); /* not reached, but keep compiler quiet */ return ACL_NO_RIGHTS; - case ACL_KIND_TYPE: + case OBJECT_TYPE: whole_mask = ACL_ALL_RIGHTS_TYPE; break; default: - elog(ERROR, "unrecognized object kind: %d", objkind); + elog(ERROR, "unrecognized object type: %d", objtype); /* not reached, but keep compiler quiet */ return ACL_NO_RIGHTS; } @@ -297,14 +297,14 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs, */ if (avail_goptions == ACL_NO_RIGHTS) { - if (pg_aclmask(objkind, objectId, att_number, grantorId, + if (pg_aclmask(objtype, objectId, att_number, grantorId, whole_mask | ACL_GRANT_OPTION_FOR(whole_mask), ACLMASK_ANY) == ACL_NO_RIGHTS) { - if (objkind == ACL_KIND_COLUMN && colname) - aclcheck_error_col(ACLCHECK_NO_PRIV, objkind, objname, colname); + if (objtype == OBJECT_COLUMN && colname) + aclcheck_error_col(ACLCHECK_NO_PRIV, objtype, objname, colname); else - aclcheck_error(ACLCHECK_NO_PRIV, objkind, objname); + aclcheck_error(ACLCHECK_NO_PRIV, objtype, objname); } } @@ -320,7 +320,7 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs, { if (this_privileges == 0) { - if (objkind == ACL_KIND_COLUMN && colname) + if (objtype == OBJECT_COLUMN && colname) ereport(WARNING, (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED), errmsg("no privileges were granted for column \"%s\" of relation \"%s\"", @@ -333,7 +333,7 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs, } else if (!all_privs && this_privileges != privileges) { - if (objkind == ACL_KIND_COLUMN && colname) + if (objtype == OBJECT_COLUMN && colname) ereport(WARNING, (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED), errmsg("not all privileges were granted for column \"%s\" of relation \"%s\"", @@ -349,7 +349,7 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs, { if (this_privileges == 0) { - if (objkind == ACL_KIND_COLUMN && colname) + if (objtype == OBJECT_COLUMN && colname) ereport(WARNING, (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED), errmsg("no privileges could be revoked for column \"%s\" of relation \"%s\"", @@ -362,7 +362,7 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs, } else if (!all_privs && this_privileges != privileges) { - if (objkind == ACL_KIND_COLUMN && colname) + if (objtype == OBJECT_COLUMN && colname) ereport(WARNING, (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED), errmsg("not all privileges could be revoked for column \"%s\" of relation \"%s\"", @@ -441,60 +441,69 @@ ExecuteGrantStmt(GrantStmt *stmt) /* * Convert stmt->privileges, a list of AccessPriv nodes, into an AclMode - * bitmask. Note: objtype can't be ACL_OBJECT_COLUMN. + * bitmask. Note: objtype can't be OBJECT_COLUMN. */ switch (stmt->objtype) { + case OBJECT_TABLE: + /* * Because this might be a sequence, we test both relation and * sequence bits, and later do a more limited test when we know * the object type. */ - case ACL_OBJECT_RELATION: all_privileges = ACL_ALL_RIGHTS_RELATION | ACL_ALL_RIGHTS_SEQUENCE; errormsg = gettext_noop("invalid privilege type %s for relation"); break; - case ACL_OBJECT_SEQUENCE: + case OBJECT_SEQUENCE: all_privileges = ACL_ALL_RIGHTS_SEQUENCE; errormsg = gettext_noop("invalid privilege type %s for sequence"); break; - case ACL_OBJECT_DATABASE: + case OBJECT_DATABASE: all_privileges = ACL_ALL_RIGHTS_DATABASE; errormsg = gettext_noop("invalid privilege type %s for database"); break; - case ACL_OBJECT_DOMAIN: + case OBJECT_DOMAIN: all_privileges = ACL_ALL_RIGHTS_TYPE; errormsg = gettext_noop("invalid privilege type %s for domain"); break; - case ACL_OBJECT_FUNCTION: + case OBJECT_FUNCTION: all_privileges = ACL_ALL_RIGHTS_FUNCTION; errormsg = gettext_noop("invalid privilege type %s for function"); break; - case ACL_OBJECT_LANGUAGE: + case OBJECT_LANGUAGE: all_privileges = ACL_ALL_RIGHTS_LANGUAGE; errormsg = gettext_noop("invalid privilege type %s for language"); break; - case ACL_OBJECT_LARGEOBJECT: + case OBJECT_LARGEOBJECT: all_privileges = ACL_ALL_RIGHTS_LARGEOBJECT; errormsg = gettext_noop("invalid privilege type %s for large object"); break; - case ACL_OBJECT_NAMESPACE: - all_privileges = ACL_ALL_RIGHTS_NAMESPACE; + case OBJECT_SCHEMA: + all_privileges = ACL_ALL_RIGHTS_SCHEMA; errormsg = gettext_noop("invalid privilege type %s for schema"); break; - case ACL_OBJECT_TABLESPACE: + case OBJECT_PROCEDURE: + all_privileges = ACL_ALL_RIGHTS_FUNCTION; + errormsg = gettext_noop("invalid privilege type %s for procedure"); + break; + case OBJECT_ROUTINE: + all_privileges = ACL_ALL_RIGHTS_FUNCTION; + errormsg = gettext_noop("invalid privilege type %s for routine"); + break; + case OBJECT_TABLESPACE: all_privileges = ACL_ALL_RIGHTS_TABLESPACE; errormsg = gettext_noop("invalid privilege type %s for tablespace"); break; - case ACL_OBJECT_TYPE: + case OBJECT_TYPE: all_privileges = ACL_ALL_RIGHTS_TYPE; errormsg = gettext_noop("invalid privilege type %s for type"); break; - case ACL_OBJECT_FDW: + case OBJECT_FDW: all_privileges = ACL_ALL_RIGHTS_FDW; errormsg = gettext_noop("invalid privilege type %s for foreign-data wrapper"); break; - case ACL_OBJECT_FOREIGN_SERVER: + case OBJECT_FOREIGN_SERVER: all_privileges = ACL_ALL_RIGHTS_FOREIGN_SERVER; errormsg = gettext_noop("invalid privilege type %s for foreign server"); break; @@ -532,7 +541,7 @@ ExecuteGrantStmt(GrantStmt *stmt) */ if (privnode->cols) { - if (stmt->objtype != ACL_OBJECT_RELATION) + if (stmt->objtype != OBJECT_TABLE) ereport(ERROR, (errcode(ERRCODE_INVALID_GRANT_OPERATION), errmsg("column privileges are only valid for relations"))); @@ -566,36 +575,38 @@ ExecGrantStmt_oids(InternalGrant *istmt) { switch (istmt->objtype) { - case ACL_OBJECT_RELATION: - case ACL_OBJECT_SEQUENCE: + case OBJECT_TABLE: + case OBJECT_SEQUENCE: ExecGrant_Relation(istmt); break; - case ACL_OBJECT_DATABASE: + case OBJECT_DATABASE: ExecGrant_Database(istmt); break; - case ACL_OBJECT_DOMAIN: - case ACL_OBJECT_TYPE: + case OBJECT_DOMAIN: + case OBJECT_TYPE: ExecGrant_Type(istmt); break; - case ACL_OBJECT_FDW: + case OBJECT_FDW: ExecGrant_Fdw(istmt); break; - case ACL_OBJECT_FOREIGN_SERVER: + case OBJECT_FOREIGN_SERVER: ExecGrant_ForeignServer(istmt); break; - case ACL_OBJECT_FUNCTION: + case OBJECT_FUNCTION: + case OBJECT_PROCEDURE: + case OBJECT_ROUTINE: ExecGrant_Function(istmt); break; - case ACL_OBJECT_LANGUAGE: + case OBJECT_LANGUAGE: ExecGrant_Language(istmt); break; - case ACL_OBJECT_LARGEOBJECT: + case OBJECT_LARGEOBJECT: ExecGrant_Largeobject(istmt); break; - case ACL_OBJECT_NAMESPACE: + case OBJECT_SCHEMA: ExecGrant_Namespace(istmt); break; - case ACL_OBJECT_TABLESPACE: + case OBJECT_TABLESPACE: ExecGrant_Tablespace(istmt); break; default: @@ -609,7 +620,7 @@ ExecGrantStmt_oids(InternalGrant *istmt) * the functions a chance to adjust the istmt with privileges actually * granted. */ - if (EventTriggerSupportsGrantObjectType(istmt->objtype)) + if (EventTriggerSupportsObjectType(istmt->objtype)) EventTriggerCollectGrant(istmt); } @@ -624,7 +635,7 @@ ExecGrantStmt_oids(InternalGrant *istmt) * to fail. */ static List * -objectNamesToOids(GrantObjectType objtype, List *objnames) +objectNamesToOids(ObjectType objtype, List *objnames) { List *objects = NIL; ListCell *cell; @@ -633,8 +644,8 @@ objectNamesToOids(GrantObjectType objtype, List *objnames) switch (objtype) { - case ACL_OBJECT_RELATION: - case ACL_OBJECT_SEQUENCE: + case OBJECT_TABLE: + case OBJECT_SEQUENCE: foreach(cell, objnames) { RangeVar *relvar = (RangeVar *) lfirst(cell); @@ -644,7 +655,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames) objects = lappend_oid(objects, relOid); } break; - case ACL_OBJECT_DATABASE: + case OBJECT_DATABASE: foreach(cell, objnames) { char *dbname = strVal(lfirst(cell)); @@ -654,8 +665,8 @@ objectNamesToOids(GrantObjectType objtype, List *objnames) objects = lappend_oid(objects, dbid); } break; - case ACL_OBJECT_DOMAIN: - case ACL_OBJECT_TYPE: + case OBJECT_DOMAIN: + case OBJECT_TYPE: foreach(cell, objnames) { List *typname = (List *) lfirst(cell); @@ -665,17 +676,17 @@ objectNamesToOids(GrantObjectType objtype, List *objnames) objects = lappend_oid(objects, oid); } break; - case ACL_OBJECT_FUNCTION: + case OBJECT_FUNCTION: foreach(cell, objnames) { ObjectWithArgs *func = (ObjectWithArgs *) lfirst(cell); Oid funcid; - funcid = LookupFuncWithArgs(func, false); + funcid = LookupFuncWithArgs(OBJECT_FUNCTION, func, false); objects = lappend_oid(objects, funcid); } break; - case ACL_OBJECT_LANGUAGE: + case OBJECT_LANGUAGE: foreach(cell, objnames) { char *langname = strVal(lfirst(cell)); @@ -685,7 +696,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames) objects = lappend_oid(objects, oid); } break; - case ACL_OBJECT_LARGEOBJECT: + case OBJECT_LARGEOBJECT: foreach(cell, objnames) { Oid lobjOid = oidparse(lfirst(cell)); @@ -699,7 +710,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames) objects = lappend_oid(objects, lobjOid); } break; - case ACL_OBJECT_NAMESPACE: + case OBJECT_SCHEMA: foreach(cell, objnames) { char *nspname = strVal(lfirst(cell)); @@ -709,7 +720,27 @@ objectNamesToOids(GrantObjectType objtype, List *objnames) objects = lappend_oid(objects, oid); } break; - case ACL_OBJECT_TABLESPACE: + case OBJECT_PROCEDURE: + foreach(cell, objnames) + { + ObjectWithArgs *func = (ObjectWithArgs *) lfirst(cell); + Oid procid; + + procid = LookupFuncWithArgs(OBJECT_PROCEDURE, func, false); + objects = lappend_oid(objects, procid); + } + break; + case OBJECT_ROUTINE: + foreach(cell, objnames) + { + ObjectWithArgs *func = (ObjectWithArgs *) lfirst(cell); + Oid routid; + + routid = LookupFuncWithArgs(OBJECT_ROUTINE, func, false); + objects = lappend_oid(objects, routid); + } + break; + case OBJECT_TABLESPACE: foreach(cell, objnames) { char *spcname = strVal(lfirst(cell)); @@ -719,7 +750,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames) objects = lappend_oid(objects, spcoid); } break; - case ACL_OBJECT_FDW: + case OBJECT_FDW: foreach(cell, objnames) { char *fdwname = strVal(lfirst(cell)); @@ -728,7 +759,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames) objects = lappend_oid(objects, fdwid); } break; - case ACL_OBJECT_FOREIGN_SERVER: + case OBJECT_FOREIGN_SERVER: foreach(cell, objnames) { char *srvname = strVal(lfirst(cell)); @@ -753,7 +784,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames) * no privilege checking on the individual objects here. */ static List * -objectsInSchemaToOids(GrantObjectType objtype, List *nspnames) +objectsInSchemaToOids(ObjectType objtype, List *nspnames) { List *objects = NIL; ListCell *cell; @@ -768,7 +799,7 @@ objectsInSchemaToOids(GrantObjectType objtype, List *nspnames) switch (objtype) { - case ACL_OBJECT_RELATION: + case OBJECT_TABLE: objs = getRelationsInNamespace(namespaceId, RELKIND_RELATION); objects = list_concat(objects, objs); objs = getRelationsInNamespace(namespaceId, RELKIND_VIEW); @@ -780,24 +811,40 @@ objectsInSchemaToOids(GrantObjectType objtype, List *nspnames) objs = getRelationsInNamespace(namespaceId, RELKIND_PARTITIONED_TABLE); objects = list_concat(objects, objs); break; - case ACL_OBJECT_SEQUENCE: + case OBJECT_SEQUENCE: objs = getRelationsInNamespace(namespaceId, RELKIND_SEQUENCE); objects = list_concat(objects, objs); break; - case ACL_OBJECT_FUNCTION: + case OBJECT_FUNCTION: + case OBJECT_PROCEDURE: + case OBJECT_ROUTINE: { - ScanKeyData key[1]; + ScanKeyData key[2]; + int keycount; Relation rel; HeapScanDesc scan; HeapTuple tuple; - ScanKeyInit(&key[0], + keycount = 0; + ScanKeyInit(&key[keycount++], Anum_pg_proc_pronamespace, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(namespaceId)); + if (objtype == OBJECT_FUNCTION) + /* includes aggregates and window functions */ + ScanKeyInit(&key[keycount++], + Anum_pg_proc_prokind, + BTEqualStrategyNumber, F_CHARNE, + CharGetDatum(PROKIND_PROCEDURE)); + else if (objtype == OBJECT_PROCEDURE) + ScanKeyInit(&key[keycount++], + Anum_pg_proc_prokind, + BTEqualStrategyNumber, F_CHAREQ, + CharGetDatum(PROKIND_PROCEDURE)); + rel = heap_open(ProcedureRelationId, AccessShareLock); - scan = heap_beginscan_catalog(rel, 1, key); + scan = heap_beginscan_catalog(rel, keycount, key); while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { @@ -943,24 +990,32 @@ ExecAlterDefaultPrivilegesStmt(ParseState *pstate, AlterDefaultPrivilegesStmt *s */ switch (action->objtype) { - case ACL_OBJECT_RELATION: + case OBJECT_TABLE: all_privileges = ACL_ALL_RIGHTS_RELATION; errormsg = gettext_noop("invalid privilege type %s for relation"); break; - case ACL_OBJECT_SEQUENCE: + case OBJECT_SEQUENCE: all_privileges = ACL_ALL_RIGHTS_SEQUENCE; errormsg = gettext_noop("invalid privilege type %s for sequence"); break; - case ACL_OBJECT_FUNCTION: + case OBJECT_FUNCTION: all_privileges = ACL_ALL_RIGHTS_FUNCTION; errormsg = gettext_noop("invalid privilege type %s for function"); break; - case ACL_OBJECT_TYPE: + case OBJECT_PROCEDURE: + all_privileges = ACL_ALL_RIGHTS_FUNCTION; + errormsg = gettext_noop("invalid privilege type %s for procedure"); + break; + case OBJECT_ROUTINE: + all_privileges = ACL_ALL_RIGHTS_FUNCTION; + errormsg = gettext_noop("invalid privilege type %s for routine"); + break; + case OBJECT_TYPE: all_privileges = ACL_ALL_RIGHTS_TYPE; errormsg = gettext_noop("invalid privilege type %s for type"); break; - case ACL_OBJECT_NAMESPACE: - all_privileges = ACL_ALL_RIGHTS_NAMESPACE; + case OBJECT_SCHEMA: + all_privileges = ACL_ALL_RIGHTS_SCHEMA; errormsg = gettext_noop("invalid privilege type %s for schema"); break; default: @@ -1126,38 +1181,38 @@ SetDefaultACL(InternalDefaultACL *iacls) */ switch (iacls->objtype) { - case ACL_OBJECT_RELATION: + case OBJECT_TABLE: objtype = DEFACLOBJ_RELATION; if (iacls->all_privs && this_privileges == ACL_NO_RIGHTS) this_privileges = ACL_ALL_RIGHTS_RELATION; break; - case ACL_OBJECT_SEQUENCE: + case OBJECT_SEQUENCE: objtype = DEFACLOBJ_SEQUENCE; if (iacls->all_privs && this_privileges == ACL_NO_RIGHTS) this_privileges = ACL_ALL_RIGHTS_SEQUENCE; break; - case ACL_OBJECT_FUNCTION: + case OBJECT_FUNCTION: objtype = DEFACLOBJ_FUNCTION; if (iacls->all_privs && this_privileges == ACL_NO_RIGHTS) this_privileges = ACL_ALL_RIGHTS_FUNCTION; break; - case ACL_OBJECT_TYPE: + case OBJECT_TYPE: objtype = DEFACLOBJ_TYPE; if (iacls->all_privs && this_privileges == ACL_NO_RIGHTS) this_privileges = ACL_ALL_RIGHTS_TYPE; break; - case ACL_OBJECT_NAMESPACE: + case OBJECT_SCHEMA: if (OidIsValid(iacls->nspid)) ereport(ERROR, (errcode(ERRCODE_INVALID_GRANT_OPERATION), errmsg("cannot use IN SCHEMA clause when using GRANT/REVOKE ON SCHEMAS"))); objtype = DEFACLOBJ_NAMESPACE; if (iacls->all_privs && this_privileges == ACL_NO_RIGHTS) - this_privileges = ACL_ALL_RIGHTS_NAMESPACE; + this_privileges = ACL_ALL_RIGHTS_SCHEMA; break; default: @@ -1372,19 +1427,19 @@ RemoveRoleFromObjectACL(Oid roleid, Oid classid, Oid objid) switch (pg_default_acl_tuple->defaclobjtype) { case DEFACLOBJ_RELATION: - iacls.objtype = ACL_OBJECT_RELATION; + iacls.objtype = OBJECT_TABLE; break; case DEFACLOBJ_SEQUENCE: - iacls.objtype = ACL_OBJECT_SEQUENCE; + iacls.objtype = OBJECT_SEQUENCE; break; case DEFACLOBJ_FUNCTION: - iacls.objtype = ACL_OBJECT_FUNCTION; + iacls.objtype = OBJECT_FUNCTION; break; case DEFACLOBJ_TYPE: - iacls.objtype = ACL_OBJECT_TYPE; + iacls.objtype = OBJECT_TYPE; break; case DEFACLOBJ_NAMESPACE: - iacls.objtype = ACL_OBJECT_NAMESPACE; + iacls.objtype = OBJECT_SCHEMA; break; default: /* Shouldn't get here */ @@ -1413,35 +1468,35 @@ RemoveRoleFromObjectACL(Oid roleid, Oid classid, Oid objid) switch (classid) { case RelationRelationId: - /* it's OK to use RELATION for a sequence */ - istmt.objtype = ACL_OBJECT_RELATION; + /* it's OK to use TABLE for a sequence */ + istmt.objtype = OBJECT_TABLE; break; case DatabaseRelationId: - istmt.objtype = ACL_OBJECT_DATABASE; + istmt.objtype = OBJECT_DATABASE; break; case TypeRelationId: - istmt.objtype = ACL_OBJECT_TYPE; + istmt.objtype = OBJECT_TYPE; break; case ProcedureRelationId: - istmt.objtype = ACL_OBJECT_FUNCTION; + istmt.objtype = OBJECT_ROUTINE; break; case LanguageRelationId: - istmt.objtype = ACL_OBJECT_LANGUAGE; + istmt.objtype = OBJECT_LANGUAGE; break; case LargeObjectRelationId: - istmt.objtype = ACL_OBJECT_LARGEOBJECT; + istmt.objtype = OBJECT_LARGEOBJECT; break; case NamespaceRelationId: - istmt.objtype = ACL_OBJECT_NAMESPACE; + istmt.objtype = OBJECT_SCHEMA; break; case TableSpaceRelationId: - istmt.objtype = ACL_OBJECT_TABLESPACE; + istmt.objtype = OBJECT_TABLESPACE; break; case ForeignServerRelationId: - istmt.objtype = ACL_OBJECT_FOREIGN_SERVER; + istmt.objtype = OBJECT_FOREIGN_SERVER; break; case ForeignDataWrapperRelationId: - istmt.objtype = ACL_OBJECT_FDW; + istmt.objtype = OBJECT_FDW; break; default: elog(ERROR, "unexpected object class %u", classid); @@ -1624,7 +1679,7 @@ ExecGrant_Attribute(InternalGrant *istmt, Oid relOid, const char *relname, &isNull); if (isNull) { - old_acl = acldefault(ACL_OBJECT_COLUMN, ownerId); + old_acl = acldefault(OBJECT_COLUMN, ownerId); /* There are no old member roles according to the catalogs */ noldmembers = 0; oldmembers = NULL; @@ -1663,7 +1718,7 @@ ExecGrant_Attribute(InternalGrant *istmt, Oid relOid, const char *relname, restrict_and_check_grant(istmt->is_grant, avail_goptions, (col_privileges == ACL_ALL_RIGHTS_COLUMN), col_privileges, - relOid, grantorId, ACL_KIND_COLUMN, + relOid, grantorId, OBJECT_COLUMN, relname, attnum, NameStr(pg_attribute_tuple->attname)); @@ -1766,7 +1821,8 @@ ExecGrant_Relation(InternalGrant *istmt) pg_class_tuple = (Form_pg_class) GETSTRUCT(tuple); /* Not sensible to grant on an index */ - if (pg_class_tuple->relkind == RELKIND_INDEX) + if (pg_class_tuple->relkind == RELKIND_INDEX || + pg_class_tuple->relkind == RELKIND_PARTITIONED_INDEX) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is an index", @@ -1780,7 +1836,7 @@ ExecGrant_Relation(InternalGrant *istmt) NameStr(pg_class_tuple->relname)))); /* Used GRANT SEQUENCE on a non-sequence? */ - if (istmt->objtype == ACL_OBJECT_SEQUENCE && + if (istmt->objtype == OBJECT_SEQUENCE && pg_class_tuple->relkind != RELKIND_SEQUENCE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), @@ -1804,7 +1860,7 @@ ExecGrant_Relation(InternalGrant *istmt) * permissions. The OR of table and sequence permissions were already * checked. */ - if (istmt->objtype == ACL_OBJECT_RELATION) + if (istmt->objtype == OBJECT_TABLE) { if (pg_class_tuple->relkind == RELKIND_SEQUENCE) { @@ -1883,10 +1939,10 @@ ExecGrant_Relation(InternalGrant *istmt) switch (pg_class_tuple->relkind) { case RELKIND_SEQUENCE: - old_acl = acldefault(ACL_OBJECT_SEQUENCE, ownerId); + old_acl = acldefault(OBJECT_SEQUENCE, ownerId); break; default: - old_acl = acldefault(ACL_OBJECT_RELATION, ownerId); + old_acl = acldefault(OBJECT_TABLE, ownerId); break; } /* There are no old member roles according to the catalogs */ @@ -1917,7 +1973,7 @@ ExecGrant_Relation(InternalGrant *istmt) bool replaces[Natts_pg_class]; int nnewmembers; Oid *newmembers; - AclObjectKind aclkind; + ObjectType objtype; /* Determine ID to do the grant as, and available grant options */ select_best_grantor(GetUserId(), this_privileges, @@ -1927,10 +1983,10 @@ ExecGrant_Relation(InternalGrant *istmt) switch (pg_class_tuple->relkind) { case RELKIND_SEQUENCE: - aclkind = ACL_KIND_SEQUENCE; + objtype = OBJECT_SEQUENCE; break; default: - aclkind = ACL_KIND_CLASS; + objtype = OBJECT_TABLE; break; } @@ -1941,7 +1997,7 @@ ExecGrant_Relation(InternalGrant *istmt) this_privileges = restrict_and_check_grant(istmt->is_grant, avail_goptions, istmt->all_privs, this_privileges, - relOid, grantorId, aclkind, + relOid, grantorId, objtype, NameStr(pg_class_tuple->relname), 0, NULL); @@ -2111,7 +2167,7 @@ ExecGrant_Database(InternalGrant *istmt) RelationGetDescr(relation), &isNull); if (isNull) { - old_acl = acldefault(ACL_OBJECT_DATABASE, ownerId); + old_acl = acldefault(OBJECT_DATABASE, ownerId); /* There are no old member roles according to the catalogs */ noldmembers = 0; oldmembers = NULL; @@ -2135,7 +2191,7 @@ ExecGrant_Database(InternalGrant *istmt) this_privileges = restrict_and_check_grant(istmt->is_grant, avail_goptions, istmt->all_privs, istmt->privileges, - datId, grantorId, ACL_KIND_DATABASE, + datId, grantorId, OBJECT_DATABASE, NameStr(pg_database_tuple->datname), 0, NULL); @@ -2233,7 +2289,7 @@ ExecGrant_Fdw(InternalGrant *istmt) &isNull); if (isNull) { - old_acl = acldefault(ACL_OBJECT_FDW, ownerId); + old_acl = acldefault(OBJECT_FDW, ownerId); /* There are no old member roles according to the catalogs */ noldmembers = 0; oldmembers = NULL; @@ -2257,7 +2313,7 @@ ExecGrant_Fdw(InternalGrant *istmt) this_privileges = restrict_and_check_grant(istmt->is_grant, avail_goptions, istmt->all_privs, istmt->privileges, - fdwid, grantorId, ACL_KIND_FDW, + fdwid, grantorId, OBJECT_FDW, NameStr(pg_fdw_tuple->fdwname), 0, NULL); @@ -2359,7 +2415,7 @@ ExecGrant_ForeignServer(InternalGrant *istmt) &isNull); if (isNull) { - old_acl = acldefault(ACL_OBJECT_FOREIGN_SERVER, ownerId); + old_acl = acldefault(OBJECT_FOREIGN_SERVER, ownerId); /* There are no old member roles according to the catalogs */ noldmembers = 0; oldmembers = NULL; @@ -2383,7 +2439,7 @@ ExecGrant_ForeignServer(InternalGrant *istmt) this_privileges = restrict_and_check_grant(istmt->is_grant, avail_goptions, istmt->all_privs, istmt->privileges, - srvid, grantorId, ACL_KIND_FOREIGN_SERVER, + srvid, grantorId, OBJECT_FOREIGN_SERVER, NameStr(pg_server_tuple->srvname), 0, NULL); @@ -2483,7 +2539,7 @@ ExecGrant_Function(InternalGrant *istmt) &isNull); if (isNull) { - old_acl = acldefault(ACL_OBJECT_FUNCTION, ownerId); + old_acl = acldefault(OBJECT_FUNCTION, ownerId); /* There are no old member roles according to the catalogs */ noldmembers = 0; oldmembers = NULL; @@ -2507,7 +2563,7 @@ ExecGrant_Function(InternalGrant *istmt) this_privileges = restrict_and_check_grant(istmt->is_grant, avail_goptions, istmt->all_privs, istmt->privileges, - funcId, grantorId, ACL_KIND_PROC, + funcId, grantorId, OBJECT_FUNCTION, NameStr(pg_proc_tuple->proname), 0, NULL); @@ -2614,7 +2670,7 @@ ExecGrant_Language(InternalGrant *istmt) &isNull); if (isNull) { - old_acl = acldefault(ACL_OBJECT_LANGUAGE, ownerId); + old_acl = acldefault(OBJECT_LANGUAGE, ownerId); /* There are no old member roles according to the catalogs */ noldmembers = 0; oldmembers = NULL; @@ -2638,7 +2694,7 @@ ExecGrant_Language(InternalGrant *istmt) this_privileges = restrict_and_check_grant(istmt->is_grant, avail_goptions, istmt->all_privs, istmt->privileges, - langId, grantorId, ACL_KIND_LANGUAGE, + langId, grantorId, OBJECT_LANGUAGE, NameStr(pg_language_tuple->lanname), 0, NULL); @@ -2752,7 +2808,7 @@ ExecGrant_Largeobject(InternalGrant *istmt) RelationGetDescr(relation), &isNull); if (isNull) { - old_acl = acldefault(ACL_OBJECT_LARGEOBJECT, ownerId); + old_acl = acldefault(OBJECT_LARGEOBJECT, ownerId); /* There are no old member roles according to the catalogs */ noldmembers = 0; oldmembers = NULL; @@ -2777,7 +2833,7 @@ ExecGrant_Largeobject(InternalGrant *istmt) this_privileges = restrict_and_check_grant(istmt->is_grant, avail_goptions, istmt->all_privs, istmt->privileges, - loid, grantorId, ACL_KIND_LARGEOBJECT, + loid, grantorId, OBJECT_LARGEOBJECT, loname, 0, NULL); /* @@ -2836,7 +2892,7 @@ ExecGrant_Namespace(InternalGrant *istmt) ListCell *cell; if (istmt->all_privs && istmt->privileges == ACL_NO_RIGHTS) - istmt->privileges = ACL_ALL_RIGHTS_NAMESPACE; + istmt->privileges = ACL_ALL_RIGHTS_SCHEMA; relation = heap_open(NamespaceRelationId, RowExclusiveLock); @@ -2878,7 +2934,7 @@ ExecGrant_Namespace(InternalGrant *istmt) &isNull); if (isNull) { - old_acl = acldefault(ACL_OBJECT_NAMESPACE, ownerId); + old_acl = acldefault(OBJECT_SCHEMA, ownerId); /* There are no old member roles according to the catalogs */ noldmembers = 0; oldmembers = NULL; @@ -2902,7 +2958,7 @@ ExecGrant_Namespace(InternalGrant *istmt) this_privileges = restrict_and_check_grant(istmt->is_grant, avail_goptions, istmt->all_privs, istmt->privileges, - nspid, grantorId, ACL_KIND_NAMESPACE, + nspid, grantorId, OBJECT_SCHEMA, NameStr(pg_namespace_tuple->nspname), 0, NULL); @@ -3002,7 +3058,7 @@ ExecGrant_Tablespace(InternalGrant *istmt) RelationGetDescr(relation), &isNull); if (isNull) { - old_acl = acldefault(ACL_OBJECT_TABLESPACE, ownerId); + old_acl = acldefault(OBJECT_TABLESPACE, ownerId); /* There are no old member roles according to the catalogs */ noldmembers = 0; oldmembers = NULL; @@ -3026,7 +3082,7 @@ ExecGrant_Tablespace(InternalGrant *istmt) this_privileges = restrict_and_check_grant(istmt->is_grant, avail_goptions, istmt->all_privs, istmt->privileges, - tblId, grantorId, ACL_KIND_TABLESPACE, + tblId, grantorId, OBJECT_TABLESPACE, NameStr(pg_tablespace_tuple->spcname), 0, NULL); @@ -3120,7 +3176,7 @@ ExecGrant_Type(InternalGrant *istmt) errhint("Set the privileges of the element type instead."))); /* Used GRANT DOMAIN on a non-domain? */ - if (istmt->objtype == ACL_OBJECT_DOMAIN && + if (istmt->objtype == OBJECT_DOMAIN && pg_type_tuple->typtype != TYPTYPE_DOMAIN) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), @@ -3160,7 +3216,7 @@ ExecGrant_Type(InternalGrant *istmt) this_privileges = restrict_and_check_grant(istmt->is_grant, avail_goptions, istmt->all_privs, istmt->privileges, - typId, grantorId, ACL_KIND_TYPE, + typId, grantorId, OBJECT_TYPE, NameStr(pg_type_tuple->typname), 0, NULL); @@ -3289,114 +3345,8 @@ privilege_to_string(AclMode privilege) * Note: we do not double-quote the %s's below, because many callers * supply strings that might be already quoted. */ - -static const char *const no_priv_msg[MAX_ACL_KIND] = -{ - /* ACL_KIND_COLUMN */ - gettext_noop("permission denied for column %s"), - /* ACL_KIND_CLASS */ - gettext_noop("permission denied for relation %s"), - /* ACL_KIND_SEQUENCE */ - gettext_noop("permission denied for sequence %s"), - /* ACL_KIND_DATABASE */ - gettext_noop("permission denied for database %s"), - /* ACL_KIND_PROC */ - gettext_noop("permission denied for function %s"), - /* ACL_KIND_OPER */ - gettext_noop("permission denied for operator %s"), - /* ACL_KIND_TYPE */ - gettext_noop("permission denied for type %s"), - /* ACL_KIND_LANGUAGE */ - gettext_noop("permission denied for language %s"), - /* ACL_KIND_LARGEOBJECT */ - gettext_noop("permission denied for large object %s"), - /* ACL_KIND_NAMESPACE */ - gettext_noop("permission denied for schema %s"), - /* ACL_KIND_OPCLASS */ - gettext_noop("permission denied for operator class %s"), - /* ACL_KIND_OPFAMILY */ - gettext_noop("permission denied for operator family %s"), - /* ACL_KIND_COLLATION */ - gettext_noop("permission denied for collation %s"), - /* ACL_KIND_CONVERSION */ - gettext_noop("permission denied for conversion %s"), - /* ACL_KIND_STATISTICS */ - gettext_noop("permission denied for statistics object %s"), - /* ACL_KIND_TABLESPACE */ - gettext_noop("permission denied for tablespace %s"), - /* ACL_KIND_TSDICTIONARY */ - gettext_noop("permission denied for text search dictionary %s"), - /* ACL_KIND_TSCONFIGURATION */ - gettext_noop("permission denied for text search configuration %s"), - /* ACL_KIND_FDW */ - gettext_noop("permission denied for foreign-data wrapper %s"), - /* ACL_KIND_FOREIGN_SERVER */ - gettext_noop("permission denied for foreign server %s"), - /* ACL_KIND_EVENT_TRIGGER */ - gettext_noop("permission denied for event trigger %s"), - /* ACL_KIND_EXTENSION */ - gettext_noop("permission denied for extension %s"), - /* ACL_KIND_PUBLICATION */ - gettext_noop("permission denied for publication %s"), - /* ACL_KIND_SUBSCRIPTION */ - gettext_noop("permission denied for subscription %s"), -}; - -static const char *const not_owner_msg[MAX_ACL_KIND] = -{ - /* ACL_KIND_COLUMN */ - gettext_noop("must be owner of relation %s"), - /* ACL_KIND_CLASS */ - gettext_noop("must be owner of relation %s"), - /* ACL_KIND_SEQUENCE */ - gettext_noop("must be owner of sequence %s"), - /* ACL_KIND_DATABASE */ - gettext_noop("must be owner of database %s"), - /* ACL_KIND_PROC */ - gettext_noop("must be owner of function %s"), - /* ACL_KIND_OPER */ - gettext_noop("must be owner of operator %s"), - /* ACL_KIND_TYPE */ - gettext_noop("must be owner of type %s"), - /* ACL_KIND_LANGUAGE */ - gettext_noop("must be owner of language %s"), - /* ACL_KIND_LARGEOBJECT */ - gettext_noop("must be owner of large object %s"), - /* ACL_KIND_NAMESPACE */ - gettext_noop("must be owner of schema %s"), - /* ACL_KIND_OPCLASS */ - gettext_noop("must be owner of operator class %s"), - /* ACL_KIND_OPFAMILY */ - gettext_noop("must be owner of operator family %s"), - /* ACL_KIND_COLLATION */ - gettext_noop("must be owner of collation %s"), - /* ACL_KIND_CONVERSION */ - gettext_noop("must be owner of conversion %s"), - /* ACL_KIND_STATISTICS */ - gettext_noop("must be owner of statistics object %s"), - /* ACL_KIND_TABLESPACE */ - gettext_noop("must be owner of tablespace %s"), - /* ACL_KIND_TSDICTIONARY */ - gettext_noop("must be owner of text search dictionary %s"), - /* ACL_KIND_TSCONFIGURATION */ - gettext_noop("must be owner of text search configuration %s"), - /* ACL_KIND_FDW */ - gettext_noop("must be owner of foreign-data wrapper %s"), - /* ACL_KIND_FOREIGN_SERVER */ - gettext_noop("must be owner of foreign server %s"), - /* ACL_KIND_EVENT_TRIGGER */ - gettext_noop("must be owner of event trigger %s"), - /* ACL_KIND_EXTENSION */ - gettext_noop("must be owner of extension %s"), - /* ACL_KIND_PUBLICATION */ - gettext_noop("must be owner of publication %s"), - /* ACL_KIND_SUBSCRIPTION */ - gettext_noop("must be owner of subscription %s"), -}; - - void -aclcheck_error(AclResult aclerr, AclObjectKind objectkind, +aclcheck_error(AclResult aclerr, ObjectType objtype, const char *objectname) { switch (aclerr) @@ -3405,15 +3355,272 @@ aclcheck_error(AclResult aclerr, AclObjectKind objectkind, /* no error, so return to caller */ break; case ACLCHECK_NO_PRIV: - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg(no_priv_msg[objectkind], objectname))); - break; + { + const char *msg = "???"; + + switch (objtype) + { + case OBJECT_AGGREGATE: + msg = gettext_noop("permission denied for aggregate %s"); + break; + case OBJECT_COLLATION: + msg = gettext_noop("permission denied for collation %s"); + break; + case OBJECT_COLUMN: + msg = gettext_noop("permission denied for column %s"); + break; + case OBJECT_CONVERSION: + msg = gettext_noop("permission denied for conversion %s"); + break; + case OBJECT_DATABASE: + msg = gettext_noop("permission denied for database %s"); + break; + case OBJECT_DOMAIN: + msg = gettext_noop("permission denied for domain %s"); + break; + case OBJECT_EVENT_TRIGGER: + msg = gettext_noop("permission denied for event trigger %s"); + break; + case OBJECT_EXTENSION: + msg = gettext_noop("permission denied for extension %s"); + break; + case OBJECT_FDW: + msg = gettext_noop("permission denied for foreign-data wrapper %s"); + break; + case OBJECT_FOREIGN_SERVER: + msg = gettext_noop("permission denied for foreign server %s"); + break; + case OBJECT_FOREIGN_TABLE: + msg = gettext_noop("permission denied for foreign table %s"); + break; + case OBJECT_FUNCTION: + msg = gettext_noop("permission denied for function %s"); + break; + case OBJECT_INDEX: + msg = gettext_noop("permission denied for index %s"); + break; + case OBJECT_LANGUAGE: + msg = gettext_noop("permission denied for language %s"); + break; + case OBJECT_LARGEOBJECT: + msg = gettext_noop("permission denied for large object %s"); + break; + case OBJECT_MATVIEW: + msg = gettext_noop("permission denied for materialized view %s"); + break; + case OBJECT_OPCLASS: + msg = gettext_noop("permission denied for operator class %s"); + break; + case OBJECT_OPERATOR: + msg = gettext_noop("permission denied for operator %s"); + break; + case OBJECT_OPFAMILY: + msg = gettext_noop("permission denied for operator family %s"); + break; + case OBJECT_POLICY: + msg = gettext_noop("permission denied for policy %s"); + break; + case OBJECT_PROCEDURE: + msg = gettext_noop("permission denied for procedure %s"); + break; + case OBJECT_PUBLICATION: + msg = gettext_noop("permission denied for publication %s"); + break; + case OBJECT_ROUTINE: + msg = gettext_noop("permission denied for routine %s"); + break; + case OBJECT_SCHEMA: + msg = gettext_noop("permission denied for schema %s"); + break; + case OBJECT_SEQUENCE: + msg = gettext_noop("permission denied for sequence %s"); + break; + case OBJECT_STATISTIC_EXT: + msg = gettext_noop("permission denied for statistics object %s"); + break; + case OBJECT_SUBSCRIPTION: + msg = gettext_noop("permission denied for subscription %s"); + break; + case OBJECT_TABLE: + msg = gettext_noop("permission denied for table %s"); + break; + case OBJECT_TABLESPACE: + msg = gettext_noop("permission denied for tablespace %s"); + break; + case OBJECT_TSCONFIGURATION: + msg = gettext_noop("permission denied for text search configuration %s"); + break; + case OBJECT_TSDICTIONARY: + msg = gettext_noop("permission denied for text search dictionary %s"); + break; + case OBJECT_TYPE: + msg = gettext_noop("permission denied for type %s"); + break; + case OBJECT_VIEW: + msg = gettext_noop("permission denied for view %s"); + break; + /* these currently aren't used */ + case OBJECT_ACCESS_METHOD: + case OBJECT_AMOP: + case OBJECT_AMPROC: + case OBJECT_ATTRIBUTE: + case OBJECT_CAST: + case OBJECT_DEFAULT: + case OBJECT_DEFACL: + case OBJECT_DOMCONSTRAINT: + case OBJECT_PUBLICATION_REL: + case OBJECT_ROLE: + case OBJECT_RULE: + case OBJECT_TABCONSTRAINT: + case OBJECT_TRANSFORM: + case OBJECT_TRIGGER: + case OBJECT_TSPARSER: + case OBJECT_TSTEMPLATE: + case OBJECT_USER_MAPPING: + elog(ERROR, "unsupported object type %d", objtype); + } + + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg(msg, objectname))); + break; + } case ACLCHECK_NOT_OWNER: - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg(not_owner_msg[objectkind], objectname))); - break; + { + const char *msg = "???"; + + switch (objtype) + { + case OBJECT_AGGREGATE: + msg = gettext_noop("must be owner of aggregate %s"); + break; + case OBJECT_COLLATION: + msg = gettext_noop("must be owner of collation %s"); + break; + case OBJECT_CONVERSION: + msg = gettext_noop("must be owner of conversion %s"); + break; + case OBJECT_DATABASE: + msg = gettext_noop("must be owner of database %s"); + break; + case OBJECT_DOMAIN: + msg = gettext_noop("must be owner of domain %s"); + break; + case OBJECT_EVENT_TRIGGER: + msg = gettext_noop("must be owner of event trigger %s"); + break; + case OBJECT_EXTENSION: + msg = gettext_noop("must be owner of extension %s"); + break; + case OBJECT_FDW: + msg = gettext_noop("must be owner of foreign-data wrapper %s"); + break; + case OBJECT_FOREIGN_SERVER: + msg = gettext_noop("must be owner of foreign server %s"); + break; + case OBJECT_FOREIGN_TABLE: + msg = gettext_noop("must be owner of foreign table %s"); + break; + case OBJECT_FUNCTION: + msg = gettext_noop("must be owner of function %s"); + break; + case OBJECT_INDEX: + msg = gettext_noop("must be owner of index %s"); + break; + case OBJECT_LANGUAGE: + msg = gettext_noop("must be owner of language %s"); + break; + case OBJECT_LARGEOBJECT: + msg = gettext_noop("must be owner of large object %s"); + break; + case OBJECT_MATVIEW: + msg = gettext_noop("must be owner of materialized view %s"); + break; + case OBJECT_OPCLASS: + msg = gettext_noop("must be owner of operator class %s"); + break; + case OBJECT_OPERATOR: + msg = gettext_noop("must be owner of operator %s"); + break; + case OBJECT_OPFAMILY: + msg = gettext_noop("must be owner of operator family %s"); + break; + case OBJECT_PROCEDURE: + msg = gettext_noop("must be owner of procedure %s"); + break; + case OBJECT_PUBLICATION: + msg = gettext_noop("must be owner of publication %s"); + break; + case OBJECT_ROUTINE: + msg = gettext_noop("must be owner of routine %s"); + break; + case OBJECT_SEQUENCE: + msg = gettext_noop("must be owner of sequence %s"); + break; + case OBJECT_SUBSCRIPTION: + msg = gettext_noop("must be owner of subscription %s"); + break; + case OBJECT_TABLE: + msg = gettext_noop("must be owner of table %s"); + break; + case OBJECT_TYPE: + msg = gettext_noop("must be owner of type %s"); + break; + case OBJECT_VIEW: + msg = gettext_noop("must be owner of view %s"); + break; + case OBJECT_SCHEMA: + msg = gettext_noop("must be owner of schema %s"); + break; + case OBJECT_STATISTIC_EXT: + msg = gettext_noop("must be owner of statistics object %s"); + break; + case OBJECT_TABLESPACE: + msg = gettext_noop("must be owner of tablespace %s"); + break; + case OBJECT_TSCONFIGURATION: + msg = gettext_noop("must be owner of text search configuration %s"); + break; + case OBJECT_TSDICTIONARY: + msg = gettext_noop("must be owner of text search dictionary %s"); + break; + + /* + * Special cases: For these, the error message talks + * about "relation", because that's where the + * ownership is attached. See also + * check_object_ownership(). + */ + case OBJECT_COLUMN: + case OBJECT_POLICY: + case OBJECT_RULE: + case OBJECT_TABCONSTRAINT: + case OBJECT_TRIGGER: + msg = gettext_noop("must be owner of relation %s"); + break; + /* these currently aren't used */ + case OBJECT_ACCESS_METHOD: + case OBJECT_AMOP: + case OBJECT_AMPROC: + case OBJECT_ATTRIBUTE: + case OBJECT_CAST: + case OBJECT_DEFAULT: + case OBJECT_DEFACL: + case OBJECT_DOMCONSTRAINT: + case OBJECT_PUBLICATION_REL: + case OBJECT_ROLE: + case OBJECT_TRANSFORM: + case OBJECT_TSPARSER: + case OBJECT_TSTEMPLATE: + case OBJECT_USER_MAPPING: + elog(ERROR, "unsupported object type %d", objtype); + } + + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg(msg, objectname))); + break; + } default: elog(ERROR, "unrecognized AclResult: %d", (int) aclerr); break; @@ -3422,7 +3629,7 @@ aclcheck_error(AclResult aclerr, AclObjectKind objectkind, void -aclcheck_error_col(AclResult aclerr, AclObjectKind objectkind, +aclcheck_error_col(AclResult aclerr, ObjectType objtype, const char *objectname, const char *colname) { switch (aclerr) @@ -3438,9 +3645,7 @@ aclcheck_error_col(AclResult aclerr, AclObjectKind objectkind, break; case ACLCHECK_NOT_OWNER: /* relation msg is OK since columns don't have separate owners */ - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg(not_owner_msg[objectkind], objectname))); + aclcheck_error(aclerr, objtype, objectname); break; default: elog(ERROR, "unrecognized AclResult: %d", (int) aclerr); @@ -3458,7 +3663,7 @@ aclcheck_error_type(AclResult aclerr, Oid typeOid) { Oid element_type = get_element_type(typeOid); - aclcheck_error(aclerr, ACL_KIND_TYPE, format_type_be(element_type ? element_type : typeOid)); + aclcheck_error(aclerr, OBJECT_TYPE, format_type_be(element_type ? element_type : typeOid)); } @@ -3466,48 +3671,48 @@ aclcheck_error_type(AclResult aclerr, Oid typeOid) * Relay for the various pg_*_mask routines depending on object kind */ static AclMode -pg_aclmask(AclObjectKind objkind, Oid table_oid, AttrNumber attnum, Oid roleid, +pg_aclmask(ObjectType objtype, Oid table_oid, AttrNumber attnum, Oid roleid, AclMode mask, AclMaskHow how) { - switch (objkind) + switch (objtype) { - case ACL_KIND_COLUMN: + case OBJECT_COLUMN: return pg_class_aclmask(table_oid, roleid, mask, how) | pg_attribute_aclmask(table_oid, attnum, roleid, mask, how); - case ACL_KIND_CLASS: - case ACL_KIND_SEQUENCE: + case OBJECT_TABLE: + case OBJECT_SEQUENCE: return pg_class_aclmask(table_oid, roleid, mask, how); - case ACL_KIND_DATABASE: + case OBJECT_DATABASE: return pg_database_aclmask(table_oid, roleid, mask, how); - case ACL_KIND_PROC: + case OBJECT_FUNCTION: return pg_proc_aclmask(table_oid, roleid, mask, how); - case ACL_KIND_LANGUAGE: + case OBJECT_LANGUAGE: return pg_language_aclmask(table_oid, roleid, mask, how); - case ACL_KIND_LARGEOBJECT: + case OBJECT_LARGEOBJECT: return pg_largeobject_aclmask_snapshot(table_oid, roleid, mask, how, NULL); - case ACL_KIND_NAMESPACE: + case OBJECT_SCHEMA: return pg_namespace_aclmask(table_oid, roleid, mask, how); - case ACL_KIND_STATISTICS: + case OBJECT_STATISTIC_EXT: elog(ERROR, "grantable rights not supported for statistics objects"); /* not reached, but keep compiler quiet */ return ACL_NO_RIGHTS; - case ACL_KIND_TABLESPACE: + case OBJECT_TABLESPACE: return pg_tablespace_aclmask(table_oid, roleid, mask, how); - case ACL_KIND_FDW: + case OBJECT_FDW: return pg_foreign_data_wrapper_aclmask(table_oid, roleid, mask, how); - case ACL_KIND_FOREIGN_SERVER: + case OBJECT_FOREIGN_SERVER: return pg_foreign_server_aclmask(table_oid, roleid, mask, how); - case ACL_KIND_EVENT_TRIGGER: + case OBJECT_EVENT_TRIGGER: elog(ERROR, "grantable rights not supported for event triggers"); /* not reached, but keep compiler quiet */ return ACL_NO_RIGHTS; - case ACL_KIND_TYPE: + case OBJECT_TYPE: return pg_type_aclmask(table_oid, roleid, mask, how); default: - elog(ERROR, "unrecognized objkind: %d", - (int) objkind); + elog(ERROR, "unrecognized objtype: %d", + (int) objtype); /* not reached, but keep compiler quiet */ return ACL_NO_RIGHTS; } @@ -3686,10 +3891,10 @@ pg_class_aclmask(Oid table_oid, Oid roleid, switch (classForm->relkind) { case RELKIND_SEQUENCE: - acl = acldefault(ACL_OBJECT_SEQUENCE, ownerId); + acl = acldefault(OBJECT_SEQUENCE, ownerId); break; default: - acl = acldefault(ACL_OBJECT_RELATION, ownerId); + acl = acldefault(OBJECT_TABLE, ownerId); break; } aclDatum = (Datum) 0; @@ -3745,7 +3950,7 @@ pg_database_aclmask(Oid db_oid, Oid roleid, if (isNull) { /* No ACL, so build default ACL */ - acl = acldefault(ACL_OBJECT_DATABASE, ownerId); + acl = acldefault(OBJECT_DATABASE, ownerId); aclDatum = (Datum) 0; } else @@ -3799,7 +4004,7 @@ pg_proc_aclmask(Oid proc_oid, Oid roleid, if (isNull) { /* No ACL, so build default ACL */ - acl = acldefault(ACL_OBJECT_FUNCTION, ownerId); + acl = acldefault(OBJECT_FUNCTION, ownerId); aclDatum = (Datum) 0; } else @@ -3853,7 +4058,7 @@ pg_language_aclmask(Oid lang_oid, Oid roleid, if (isNull) { /* No ACL, so build default ACL */ - acl = acldefault(ACL_OBJECT_LANGUAGE, ownerId); + acl = acldefault(OBJECT_LANGUAGE, ownerId); aclDatum = (Datum) 0; } else @@ -3933,7 +4138,7 @@ pg_largeobject_aclmask_snapshot(Oid lobj_oid, Oid roleid, if (isNull) { /* No ACL, so build default ACL */ - acl = acldefault(ACL_OBJECT_LARGEOBJECT, ownerId); + acl = acldefault(OBJECT_LARGEOBJECT, ownerId); aclDatum = (Datum) 0; } else @@ -3996,7 +4201,7 @@ pg_namespace_aclmask(Oid nsp_oid, Oid roleid, { if (pg_database_aclcheck(MyDatabaseId, roleid, ACL_CREATE_TEMP) == ACLCHECK_OK) - return mask & ACL_ALL_RIGHTS_NAMESPACE; + return mask & ACL_ALL_RIGHTS_SCHEMA; else return mask & ACL_USAGE; } @@ -4017,7 +4222,7 @@ pg_namespace_aclmask(Oid nsp_oid, Oid roleid, if (isNull) { /* No ACL, so build default ACL */ - acl = acldefault(ACL_OBJECT_NAMESPACE, ownerId); + acl = acldefault(OBJECT_SCHEMA, ownerId); aclDatum = (Datum) 0; } else @@ -4073,7 +4278,7 @@ pg_tablespace_aclmask(Oid spc_oid, Oid roleid, if (isNull) { /* No ACL, so build default ACL */ - acl = acldefault(ACL_OBJECT_TABLESPACE, ownerId); + acl = acldefault(OBJECT_TABLESPACE, ownerId); aclDatum = (Datum) 0; } else @@ -4135,7 +4340,7 @@ pg_foreign_data_wrapper_aclmask(Oid fdw_oid, Oid roleid, if (isNull) { /* No ACL, so build default ACL */ - acl = acldefault(ACL_OBJECT_FDW, ownerId); + acl = acldefault(OBJECT_FDW, ownerId); aclDatum = (Datum) 0; } else @@ -4197,7 +4402,7 @@ pg_foreign_server_aclmask(Oid srv_oid, Oid roleid, if (isNull) { /* No ACL, so build default ACL */ - acl = acldefault(ACL_OBJECT_FOREIGN_SERVER, ownerId); + acl = acldefault(OBJECT_FOREIGN_SERVER, ownerId); aclDatum = (Datum) 0; } else @@ -4274,7 +4479,7 @@ pg_type_aclmask(Oid type_oid, Oid roleid, AclMode mask, AclMaskHow how) if (isNull) { /* No ACL, so build default ACL */ - acl = acldefault(ACL_OBJECT_TYPE, ownerId); + acl = acldefault(OBJECT_TYPE, ownerId); aclDatum = (Datum) 0; } else @@ -4386,7 +4591,7 @@ pg_attribute_aclcheck_all(Oid table_oid, Oid roleid, AclMode mode, * grants no privileges, so that we can fall out quickly in the very * common case where attacl is null. */ - if (heap_attisnull(attTuple, Anum_pg_attribute_attacl)) + if (heap_attisnull(attTuple, Anum_pg_attribute_attacl, NULL)) attmask = 0; else attmask = pg_attribute_aclmask(table_oid, curr_att, roleid, @@ -5078,7 +5283,7 @@ pg_extension_ownercheck(Oid ext_oid, Oid roleid) } /* - * Ownership check for an publication (specified by OID). + * Ownership check for a publication (specified by OID). */ bool pg_publication_ownercheck(Oid pub_oid, Oid roleid) @@ -5240,10 +5445,13 @@ get_default_acl_internal(Oid roleId, Oid nsp_oid, char objtype) /* * Get default permissions for newly created object within given schema * - * Returns NULL if built-in system defaults should be used + * Returns NULL if built-in system defaults should be used. + * + * If the result is not NULL, caller must call recordDependencyOnNewAcl + * once the OID of the new object is known. */ Acl * -get_user_default_acl(GrantObjectType objtype, Oid ownerId, Oid nsp_oid) +get_user_default_acl(ObjectType objtype, Oid ownerId, Oid nsp_oid) { Acl *result; Acl *glob_acl; @@ -5261,23 +5469,23 @@ get_user_default_acl(GrantObjectType objtype, Oid ownerId, Oid nsp_oid) /* Check if object type is supported in pg_default_acl */ switch (objtype) { - case ACL_OBJECT_RELATION: + case OBJECT_TABLE: defaclobjtype = DEFACLOBJ_RELATION; break; - case ACL_OBJECT_SEQUENCE: + case OBJECT_SEQUENCE: defaclobjtype = DEFACLOBJ_SEQUENCE; break; - case ACL_OBJECT_FUNCTION: + case OBJECT_FUNCTION: defaclobjtype = DEFACLOBJ_FUNCTION; break; - case ACL_OBJECT_TYPE: + case OBJECT_TYPE: defaclobjtype = DEFACLOBJ_TYPE; break; - case ACL_OBJECT_NAMESPACE: + case OBJECT_SCHEMA: defaclobjtype = DEFACLOBJ_NAMESPACE; break; @@ -5315,6 +5523,30 @@ get_user_default_acl(GrantObjectType objtype, Oid ownerId, Oid nsp_oid) return result; } +/* + * Record dependencies on roles mentioned in a new object's ACL. + */ +void +recordDependencyOnNewAcl(Oid classId, Oid objectId, int32 objsubId, + Oid ownerId, Acl *acl) +{ + int nmembers; + Oid *members; + + /* Nothing to do if ACL is defaulted */ + if (acl == NULL) + return; + + /* Extract roles mentioned in ACL */ + nmembers = aclmembers(acl, &members); + + /* Update the shared dependency ACL info */ + updateAclDependencies(classId, objectId, objsubId, + ownerId, + 0, NULL, + nmembers, members); +} + /* * Record initial privileges for the top-level object passed in. * @@ -5347,7 +5579,8 @@ recordExtObjInitPriv(Oid objoid, Oid classoid) pg_class_tuple = (Form_pg_class) GETSTRUCT(tuple); /* Indexes don't have permissions */ - if (pg_class_tuple->relkind == RELKIND_INDEX) + if (pg_class_tuple->relkind == RELKIND_INDEX || + pg_class_tuple->relkind == RELKIND_PARTITIONED_INDEX) return; /* Composite types don't have permissions either */ @@ -5632,7 +5865,8 @@ removeExtObjInitPriv(Oid objoid, Oid classoid) pg_class_tuple = (Form_pg_class) GETSTRUCT(tuple); /* Indexes don't have permissions */ - if (pg_class_tuple->relkind == RELKIND_INDEX) + if (pg_class_tuple->relkind == RELKIND_INDEX || + pg_class_tuple->relkind == RELKIND_PARTITIONED_INDEX) return; /* Composite types don't have permissions either */ @@ -5765,8 +5999,8 @@ recordExtensionInitPrivWorker(Oid objoid, Oid classoid, int objsubid, Acl *new_a MemSet(nulls, false, sizeof(nulls)); MemSet(replace, false, sizeof(replace)); - values[Anum_pg_init_privs_privs - 1] = PointerGetDatum(new_acl); - replace[Anum_pg_init_privs_privs - 1] = true; + values[Anum_pg_init_privs_initprivs - 1] = PointerGetDatum(new_acl); + replace[Anum_pg_init_privs_initprivs - 1] = true; oldtuple = heap_modify_tuple(oldtuple, RelationGetDescr(relation), values, nulls, replace); @@ -5803,7 +6037,7 @@ recordExtensionInitPrivWorker(Oid objoid, Oid classoid, int objsubid, Acl *new_a values[Anum_pg_init_privs_privtype - 1] = CharGetDatum(INITPRIVS_EXTENSION); - values[Anum_pg_init_privs_privs - 1] = PointerGetDatum(new_acl); + values[Anum_pg_init_privs_initprivs - 1] = PointerGetDatum(new_acl); tuple = heap_form_tuple(RelationGetDescr(relation), values, nulls); diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c index 92d943cac7..6061428bcc 100644 --- a/src/backend/catalog/catalog.c +++ b/src/backend/catalog/catalog.c @@ -5,7 +5,7 @@ * bits of hard-wired knowledge * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -120,7 +120,7 @@ IsCatalogClass(Oid relid, Form_pg_class reltuple) * this is noticeably cheaper and doesn't require catalog access. * * This test is safe since even an oid wraparound will preserve this - * property (c.f. GetNewObjectId()) and it has the advantage that it works + * property (cf. GetNewObjectId()) and it has the advantage that it works * correctly even if a user decides to create a relation in the pg_catalog * namespace. * ---- @@ -253,12 +253,24 @@ IsSharedRelation(Oid relationId) relationId == SubscriptionNameIndexId) return true; /* These are their toast tables and toast indexes (see toasting.h) */ - if (relationId == PgShdescriptionToastTable || - relationId == PgShdescriptionToastIndex || + if (relationId == PgAuthidToastTable || + relationId == PgAuthidToastIndex || + relationId == PgDatabaseToastTable || + relationId == PgDatabaseToastIndex || relationId == PgDbRoleSettingToastTable || relationId == PgDbRoleSettingToastIndex || + relationId == PgPlTemplateToastTable || + relationId == PgPlTemplateToastIndex || + relationId == PgReplicationOriginToastTable || + relationId == PgReplicationOriginToastIndex || + relationId == PgShdescriptionToastTable || + relationId == PgShdescriptionToastIndex || relationId == PgShseclabelToastTable || - relationId == PgShseclabelToastIndex) + relationId == PgShseclabelToastIndex || + relationId == PgSubscriptionToastTable || + relationId == PgSubscriptionToastIndex || + relationId == PgTablespaceToastTable || + relationId == PgTablespaceToastIndex) return true; return false; } @@ -282,8 +294,12 @@ IsSharedRelation(Oid relationId) * managed to cycle through 2^32 OIDs and generate the same OID before we * finish inserting our row. This seems unlikely to be a problem. Note * that if we had to *commit* the row to end the race condition, the risk - * would be rather higher; therefore we use SnapshotDirty in the test, - * so that we will see uncommitted rows. + * would be rather higher; therefore we use SnapshotAny in the test, so that + * we will see uncommitted rows. (We used to use SnapshotDirty, but that has + * the disadvantage that it ignores recently-deleted rows, creating a risk + * of transient conflicts for as long as our own MVCC snapshots think a + * recently-deleted row is live. The risk is far higher when selecting TOAST + * OIDs, because SnapshotToast considers dead rows as active indefinitely.) */ Oid GetNewOid(Relation relation) @@ -336,7 +352,6 @@ Oid GetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn) { Oid newOid; - SnapshotData SnapshotDirty; SysScanDesc scan; ScanKeyData key; bool collides; @@ -349,8 +364,6 @@ GetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn) */ Assert(!IsBinaryUpgrade || RelationGetRelid(relation) != TypeRelationId); - InitDirtySnapshot(SnapshotDirty); - /* Generate new OIDs until we find one not in the table */ do { @@ -363,9 +376,9 @@ GetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn) BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(newOid)); - /* see notes above about using SnapshotDirty */ + /* see notes above about using SnapshotAny */ scan = systable_beginscan(relation, indexId, true, - &SnapshotDirty, 1, &key); + SnapshotAny, 1, &key); collides = HeapTupleIsValid(systable_getnext(scan)); @@ -396,7 +409,6 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence) { RelFileNodeBackend rnode; char *rpath; - int fd; bool collides; BackendId backend; @@ -444,12 +456,10 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence) /* Check for existing file of same name */ rpath = relpath(rnode, MAIN_FORKNUM); - fd = BasicOpenFile(rpath, O_RDONLY | PG_BINARY, 0); - if (fd >= 0) + if (access(rpath, F_OK) == 0) { /* definite collision */ - close(fd); collides = true; } else @@ -457,13 +467,9 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence) /* * Here we have a little bit of a dilemma: if errno is something * other than ENOENT, should we declare a collision and loop? In - * particular one might think this advisable for, say, EPERM. - * However there really shouldn't be any unreadable files in a - * tablespace directory, and if the EPERM is actually complaining - * that we can't read the directory itself, we'd be in an infinite - * loop. In practice it seems best to go ahead regardless of the - * errno. If there is a colliding file we will get an smgr - * failure when we attempt to create the new relation file. + * practice it seems best to go ahead regardless of the errno. If + * there is a colliding file we will get an smgr failure when we + * attempt to create the new relation file. */ collides = false; } diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index 6fffc290fa..7dfa3278a5 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -4,7 +4,7 @@ * Routines to support inter-object dependencies. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -27,11 +27,8 @@ #include "catalog/pg_authid.h" #include "catalog/pg_cast.h" #include "catalog/pg_collation.h" -#include "catalog/pg_collation_fn.h" #include "catalog/pg_constraint.h" -#include "catalog/pg_constraint_fn.h" #include "catalog/pg_conversion.h" -#include "catalog/pg_conversion_fn.h" #include "catalog/pg_database.h" #include "catalog/pg_default_acl.h" #include "catalog/pg_depend.h" @@ -582,6 +579,7 @@ findDependentObjects(const ObjectAddress *object, /* FALL THRU */ case DEPENDENCY_INTERNAL: + case DEPENDENCY_INTERNAL_AUTO: /* * This object is part of the internal implementation of @@ -633,6 +631,14 @@ findDependentObjects(const ObjectAddress *object, * transform this deletion request into a delete of this * owning object. * + * For INTERNAL_AUTO dependencies, we don't enforce this; in + * other words, we don't follow the links back to the owning + * object. + */ + if (foundDep->deptype == DEPENDENCY_INTERNAL_AUTO) + break; + + /* * First, release caller's lock on this object and get * deletion lock on the owning object. (We must release * caller's lock to avoid deadlock against a concurrent @@ -675,6 +681,7 @@ findDependentObjects(const ObjectAddress *object, /* And we're done here. */ systable_endscan(scan); return; + case DEPENDENCY_PIN: /* @@ -762,6 +769,7 @@ findDependentObjects(const ObjectAddress *object, case DEPENDENCY_AUTO_EXTENSION: subflags = DEPFLAG_AUTO; break; + case DEPENDENCY_INTERNAL_AUTO: case DEPENDENCY_INTERNAL: subflags = DEPFLAG_INTERNAL; break; @@ -1109,7 +1117,8 @@ doDeletion(const ObjectAddress *object, int flags) { char relKind = get_rel_relkind(object->objectId); - if (relKind == RELKIND_INDEX) + if (relKind == RELKIND_INDEX || + relKind == RELKIND_PARTITIONED_INDEX) { bool concurrent = ((flags & PERFORM_DELETION_CONCURRENTLY) != 0); @@ -1406,6 +1415,7 @@ recordDependencyOnSingleRelExpr(const ObjectAddress *depender, rte.rtekind = RTE_RELATION; rte.relid = relId; rte.relkind = RELKIND_RELATION; /* no need for exactness here */ + rte.rellockmode = AccessShareLock; context.rtables = list_make1(list_make1(&rte)); @@ -1713,6 +1723,51 @@ find_expr_references_walker(Node *node, /* Extra work needed here if we ever need this case */ elog(ERROR, "already-planned subqueries not supported"); } + else if (IsA(node, FieldSelect)) + { + FieldSelect *fselect = (FieldSelect *) node; + Oid argtype = getBaseType(exprType((Node *) fselect->arg)); + Oid reltype = get_typ_typrelid(argtype); + + /* + * We need a dependency on the specific column named in FieldSelect, + * assuming we can identify the pg_class OID for it. (Probably we + * always can at the moment, but in future it might be possible for + * argtype to be RECORDOID.) If we can make a column dependency then + * we shouldn't need a dependency on the column's type; but if we + * can't, make a dependency on the type, as it might not appear + * anywhere else in the expression. + */ + if (OidIsValid(reltype)) + add_object_address(OCLASS_CLASS, reltype, fselect->fieldnum, + context->addrs); + else + add_object_address(OCLASS_TYPE, fselect->resulttype, 0, + context->addrs); + /* the collation might not be referenced anywhere else, either */ + if (OidIsValid(fselect->resultcollid) && + fselect->resultcollid != DEFAULT_COLLATION_OID) + add_object_address(OCLASS_COLLATION, fselect->resultcollid, 0, + context->addrs); + } + else if (IsA(node, FieldStore)) + { + FieldStore *fstore = (FieldStore *) node; + Oid reltype = get_typ_typrelid(fstore->resulttype); + + /* similar considerations to FieldSelect, but multiple column(s) */ + if (OidIsValid(reltype)) + { + ListCell *l; + + foreach(l, fstore->fieldnums) + add_object_address(OCLASS_CLASS, reltype, lfirst_int(l), + context->addrs); + } + else + add_object_address(OCLASS_TYPE, fstore->resulttype, 0, + context->addrs); + } else if (IsA(node, RelabelType)) { RelabelType *relab = (RelabelType *) node; @@ -1733,16 +1788,24 @@ find_expr_references_walker(Node *node, /* since there is no exposed function, need to depend on type */ add_object_address(OCLASS_TYPE, iocoerce->resulttype, 0, context->addrs); + /* the collation might not be referenced anywhere else, either */ + if (OidIsValid(iocoerce->resultcollid) && + iocoerce->resultcollid != DEFAULT_COLLATION_OID) + add_object_address(OCLASS_COLLATION, iocoerce->resultcollid, 0, + context->addrs); } else if (IsA(node, ArrayCoerceExpr)) { ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node; - if (OidIsValid(acoerce->elemfuncid)) - add_object_address(OCLASS_PROC, acoerce->elemfuncid, 0, - context->addrs); + /* as above, depend on type */ add_object_address(OCLASS_TYPE, acoerce->resulttype, 0, context->addrs); + /* the collation might not be referenced anywhere else, either */ + if (OidIsValid(acoerce->resultcollid) && + acoerce->resultcollid != DEFAULT_COLLATION_OID) + add_object_address(OCLASS_COLLATION, acoerce->resultcollid, 0, + context->addrs); /* fall through to examine arguments */ } else if (IsA(node, ConvertRowtypeExpr)) @@ -1818,6 +1881,22 @@ find_expr_references_walker(Node *node, context->addrs); return false; } + else if (IsA(node, WindowClause)) + { + WindowClause *wc = (WindowClause *) node; + + if (OidIsValid(wc->startInRangeFunc)) + add_object_address(OCLASS_PROC, wc->startInRangeFunc, 0, + context->addrs); + if (OidIsValid(wc->endInRangeFunc)) + add_object_address(OCLASS_PROC, wc->endInRangeFunc, 0, + context->addrs); + if (OidIsValid(wc->inRangeColl) && + wc->inRangeColl != DEFAULT_COLLATION_OID) + add_object_address(OCLASS_COLLATION, wc->inRangeColl, 0, + context->addrs); + /* fall through to examine substructure */ + } else if (IsA(node, Query)) { /* Recurse into RTE subquery or not-yet-planned sublink subquery */ diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl index 2eebb061b7..649200260a 100644 --- a/src/backend/catalog/genbki.pl +++ b/src/backend/catalog/genbki.pl @@ -3,11 +3,11 @@ # # genbki.pl # Perl script that generates postgres.bki, postgres.description, -# postgres.shdescription, and schemapg.h from specially formatted -# header files. The .bki files are used to initialize the postgres -# template database. +# postgres.shdescription, and symbol definition headers from specially +# formatted header files and data files. The BKI files are used to +# initialize the postgres template database. # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/backend/catalog/genbki.pl @@ -20,7 +20,6 @@ use warnings; my @input_files; -our @include_path; my $output_path = ''; my $major_version; @@ -36,10 +35,6 @@ { $output_path = length($arg) > 2 ? substr($arg, 2) : shift @ARGV; } - elsif ($arg =~ /^-I/) - { - push @include_path, length($arg) > 2 ? substr($arg, 2) : shift @ARGV; - } elsif ($arg =~ /^--set-version=(.*)$/) { $major_version = $1; @@ -53,8 +48,7 @@ } # Sanity check arguments. -die "No input files.\n" if !@input_files; -die "No include path; you must specify -I at least once.\n" if !@include_path; +die "No input files.\n" if !@input_files; die "--set-version must be specified.\n" if !defined $major_version; # Make sure output_path ends in a slash. @@ -63,6 +57,197 @@ $output_path .= '/'; } +# Read all the files into internal data structures. +my @catnames; +my %catalogs; +my %catalog_data; +my @toast_decls; +my @index_decls; +my %oidcounts; + +foreach my $header (@input_files) +{ + $header =~ /(.+)\.h$/ + or die "Input files need to be header files.\n"; + my $datfile = "$1.dat"; + + my $catalog = Catalog::ParseHeader($header); + my $catname = $catalog->{catname}; + my $schema = $catalog->{columns}; + + if (defined $catname) + { + push @catnames, $catname; + $catalogs{$catname} = $catalog; + } + + # While checking for duplicated OIDs, we ignore the pg_class OID and + # rowtype OID of bootstrap catalogs, as those are expected to appear + # in the initial data for pg_class and pg_type. For regular catalogs, + # include these OIDs. (See also Catalog::FindAllOidsFromHeaders + # if you change this logic.) + if (!$catalog->{bootstrap}) + { + $oidcounts{ $catalog->{relation_oid} }++ + if ($catalog->{relation_oid}); + $oidcounts{ $catalog->{rowtype_oid} }++ + if ($catalog->{rowtype_oid}); + } + + # Not all catalogs have a data file. + if (-e $datfile) + { + my $data = Catalog::ParseData($datfile, $schema, 0); + $catalog_data{$catname} = $data; + + # Check for duplicated OIDs while we're at it. + foreach my $row (@$data) + { + $oidcounts{ $row->{oid} }++ if defined $row->{oid}; + } + } + + # If the header file contained toast or index info, build BKI + # commands for those, which we'll output later. + foreach my $toast (@{ $catalog->{toasting} }) + { + push @toast_decls, + sprintf "declare toast %s %s on %s\n", + $toast->{toast_oid}, $toast->{toast_index_oid}, + $toast->{parent_table}; + $oidcounts{ $toast->{toast_oid} }++; + $oidcounts{ $toast->{toast_index_oid} }++; + } + foreach my $index (@{ $catalog->{indexing} }) + { + push @index_decls, + sprintf "declare %sindex %s %s %s\n", + $index->{is_unique} ? 'unique ' : '', + $index->{index_name}, $index->{index_oid}, + $index->{index_decl}; + $oidcounts{ $index->{index_oid} }++; + } +} + +# Complain and exit if we found any duplicate OIDs. +# While duplicate OIDs would only cause a failure if they appear in +# the same catalog, our project policy is that manually assigned OIDs +# should be globally unique, to avoid confusion. +my $found = 0; +foreach my $oid (keys %oidcounts) +{ + next unless $oidcounts{$oid} > 1; + print STDERR "Duplicate OIDs detected:\n" if !$found; + print STDERR "$oid\n"; + $found++; +} +die "found $found duplicate OID(s) in catalog data\n" if $found; + +# Fetch some special data that we will substitute into the output file. +# CAUTION: be wary about what symbols you substitute into the .bki file here! +# It's okay to substitute things that are expected to be really constant +# within a given Postgres release, such as fixed OIDs. Do not substitute +# anything that could depend on platform or configuration. (The right place +# to handle those sorts of things is in initdb.c's bootstrap_template1().) +my $BOOTSTRAP_SUPERUSERID = + Catalog::FindDefinedSymbolFromData($catalog_data{pg_authid}, + 'BOOTSTRAP_SUPERUSERID'); +my $PG_CATALOG_NAMESPACE = + Catalog::FindDefinedSymbolFromData($catalog_data{pg_namespace}, + 'PG_CATALOG_NAMESPACE'); + + +# Build lookup tables for OID macro substitutions and for pg_attribute +# copies of pg_type values. + +# index access method OID lookup +my %amoids; +foreach my $row (@{ $catalog_data{pg_am} }) +{ + $amoids{ $row->{amname} } = $row->{oid}; +} + +# opclass OID lookup +my %opcoids; +foreach my $row (@{ $catalog_data{pg_opclass} }) +{ + # There is no unique name, so we need to combine access method + # and opclass name. + my $key = sprintf "%s/%s", $row->{opcmethod}, $row->{opcname}; + $opcoids{$key} = $row->{oid}; +} + +# operator OID lookup +my %operoids; +foreach my $row (@{ $catalog_data{pg_operator} }) +{ + # There is no unique name, so we need to invent one that contains + # the relevant type names. + my $key = sprintf "%s(%s,%s)", + $row->{oprname}, $row->{oprleft}, $row->{oprright}; + $operoids{$key} = $row->{oid}; +} + +# opfamily OID lookup +my %opfoids; +foreach my $row (@{ $catalog_data{pg_opfamily} }) +{ + # There is no unique name, so we need to combine access method + # and opfamily name. + my $key = sprintf "%s/%s", $row->{opfmethod}, $row->{opfname}; + $opfoids{$key} = $row->{oid}; +} + +# procedure OID lookup +my %procoids; +foreach my $row (@{ $catalog_data{pg_proc} }) +{ + # Generate an entry under just the proname (corresponds to regproc lookup) + my $prokey = $row->{proname}; + if (defined $procoids{$prokey}) + { + $procoids{$prokey} = 'MULTIPLE'; + } + else + { + $procoids{$prokey} = $row->{oid}; + } + + # Also generate an entry using proname(proargtypes). This is not quite + # identical to regprocedure lookup because we don't worry much about + # special SQL names for types etc; we just use the names in the source + # proargtypes field. These *should* be unique, but do a multiplicity + # check anyway. + $prokey .= '(' . join(',', split(/\s+/, $row->{proargtypes})) . ')'; + if (defined $procoids{$prokey}) + { + $procoids{$prokey} = 'MULTIPLE'; + } + else + { + $procoids{$prokey} = $row->{oid}; + } +} + +# type lookups +my %typeoids; +my %types; +foreach my $row (@{ $catalog_data{pg_type} }) +{ + $typeoids{ $row->{typname} } = $row->{oid}; + $types{ $row->{typname} } = $row; +} + +# Map catalog name to OID lookup. +my %lookup_kind = ( + pg_am => \%amoids, + pg_opclass => \%opcoids, + pg_operator => \%operoids, + pg_opfamily => \%opfoids, + pg_proc => \%procoids, + pg_type => \%typeoids); + + # Open temp files my $tmpext = ".tmp$$"; my $bkifile = $output_path . 'postgres.bki'; @@ -78,23 +263,8 @@ open my $shdescr, '>', $shdescrfile . $tmpext or die "can't open $shdescrfile$tmpext: $!"; -# Fetch some special data that we will substitute into the output file. -# CAUTION: be wary about what symbols you substitute into the .bki file here! -# It's okay to substitute things that are expected to be really constant -# within a given Postgres release, such as fixed OIDs. Do not substitute -# anything that could depend on platform or configuration. (The right place -# to handle those sorts of things is in initdb.c's bootstrap_template1().) -# NB: make sure that the files used here are known to be part of the .bki -# file's dependencies by src/backend/catalog/Makefile. -my $BOOTSTRAP_SUPERUSERID = - find_defined_symbol('pg_authid.h', 'BOOTSTRAP_SUPERUSERID'); -my $PG_CATALOG_NAMESPACE = - find_defined_symbol('pg_namespace.h', 'PG_CATALOG_NAMESPACE'); - -# Read all the input header files into internal data structures -my $catalogs = Catalog::Catalogs(@input_files); - -# Generate postgres.bki, postgres.description, and postgres.shdescription +# Generate postgres.bki, postgres.description, postgres.shdescription, +# and pg_*_d.h headers. # version marker for .bki file print $bki "# PostgreSQL $major_version\n"; @@ -102,33 +272,73 @@ # vars to hold data needed for schemapg.h my %schemapg_entries; my @tables_needing_macros; -my %regprocoids; -our @types; # produce output, one catalog at a time -foreach my $catname (@{ $catalogs->{names} }) +foreach my $catname (@catnames) { + my $catalog = $catalogs{$catname}; + + # Create one definition header with macro definitions for each catalog. + my $def_file = $output_path . $catname . '_d.h'; + open my $def, '>', $def_file . $tmpext + or die "can't open $def_file$tmpext: $!"; + + # Opening boilerplate for pg_*_d.h + printf $def <{relation_oid_macro}, $catalog->{relation_oid} + if $catalog->{relation_oid_macro}; + printf $def "#define %s %s\n", + $catalog->{rowtype_oid_macro}, $catalog->{rowtype_oid} + if $catalog->{rowtype_oid_macro}; + print $def "\n"; # .bki CREATE command for this catalog - my $catalog = $catalogs->{$catname}; print $bki "create $catname $catalog->{relation_oid}" . $catalog->{shared_relation} . $catalog->{bootstrap} . $catalog->{without_oids} - . $catalog->{rowtype_oid} . "\n"; + . $catalog->{rowtype_oid_clause}; - my %bki_attr; - my @attnames; my $first = 1; - print $bki " (\n"; - foreach my $column (@{ $catalog->{columns} }) + print $bki "\n (\n"; + my $schema = $catalog->{columns}; + my %attnames; + my $attnum = 0; + foreach my $column (@$schema) { + $attnum++; my $attname = $column->{name}; my $atttype = $column->{type}; - $bki_attr{$attname} = $column; - push @attnames, $attname; + # Build hash of column names for use later + $attnames{$attname} = 1; + + # Emit column definitions if (!$first) { print $bki " ,\n"; @@ -145,181 +355,169 @@ { print $bki " FORCE NULL"; } + + # Emit Anum_* constants + printf $def "#define Anum_%s_%s %s\n", $catname, $attname, $attnum; } print $bki "\n )\n"; - # open it, unless bootstrap case (create bootstrap does this automatically) - if ($catalog->{bootstrap} eq '') + # Emit Natts_* constant + print $def "\n#define Natts_$catname $attnum\n\n"; + + # Emit client code copied from source header + foreach my $line (@{ $catalog->{client_code} }) + { + print $def $line; + } + + # Open it, unless it's a bootstrap catalog (create bootstrap does this + # automatically) + if (!$catalog->{bootstrap}) { print $bki "open $catname\n"; } - if (defined $catalog->{data}) + # For pg_attribute.h, we generate data entries ourselves. + if ($catname eq 'pg_attribute') { + gen_pg_attribute($schema); + } - # Ordinary catalog with DATA line(s) - foreach my $row (@{ $catalog->{data} }) - { + # Ordinary catalog with a data file + foreach my $row (@{ $catalog_data{$catname} }) + { + my %bki_values = %$row; - # Split line into tokens without interpreting their meaning. - my %bki_values; - @bki_values{@attnames} = - Catalog::SplitDataLine($row->{bki_values}); + # Complain about unrecognized keys; they are presumably misspelled + foreach my $key (keys %bki_values) + { + next + if $key eq "oid" + || $key eq "oid_symbol" + || $key eq "array_type_oid" + || $key eq "descr" + || $key eq "autogenerated" + || $key eq "line_number"; + die sprintf "unrecognized field name \"%s\" in %s.dat line %s\n", + $key, $catname, $bki_values{line_number} + if (!exists($attnames{$key})); + } - # Perform required substitutions on fields - foreach my $att (keys %bki_values) + # Perform required substitutions on fields + foreach my $column (@$schema) + { + my $attname = $column->{name}; + my $atttype = $column->{type}; + + # Substitute constant values we acquired above. + # (It's intentional that this can apply to parts of a field). + $bki_values{$attname} =~ s/\bPGUID\b/$BOOTSTRAP_SUPERUSERID/g; + $bki_values{$attname} =~ s/\bPGNSP\b/$PG_CATALOG_NAMESPACE/g; + + # Replace OID synonyms with OIDs per the appropriate lookup rule. + # + # If the column type is oidvector or _oid, we have to replace + # each element of the array as per the lookup rule. + if ($column->{lookup}) { + my $lookup = $lookup_kind{ $column->{lookup} }; + my @lookupnames; + my @lookupoids; - # Substitute constant values we acquired above. - # (It's intentional that this can apply to parts of a field). - $bki_values{$att} =~ s/\bPGUID\b/$BOOTSTRAP_SUPERUSERID/g; - $bki_values{$att} =~ s/\bPGNSP\b/$PG_CATALOG_NAMESPACE/g; + die "unrecognized BKI_LOOKUP type " . $column->{lookup} + if !defined($lookup); - # Replace regproc columns' values with OIDs. - # If we don't have a unique value to substitute, - # just do nothing (regprocin will complain). - if ($bki_attr{$att}->{type} eq 'regproc') + if ($atttype eq 'oidvector') { - my $procoid = $regprocoids{ $bki_values{$att} }; - $bki_values{$att} = $procoid - if defined($procoid) && $procoid ne 'MULTIPLE'; + @lookupnames = split /\s+/, $bki_values{$attname}; + @lookupoids = lookup_oids($lookup, $catname, \%bki_values, + @lookupnames); + $bki_values{$attname} = join(' ', @lookupoids); } - } - - # Save pg_proc oids for use in later regproc substitutions. - # This relies on the order we process the files in! - if ($catname eq 'pg_proc') - { - if (defined($regprocoids{ $bki_values{proname} })) + elsif ($atttype eq '_oid') { - $regprocoids{ $bki_values{proname} } = 'MULTIPLE'; + if ($bki_values{$attname} ne '_null_') + { + $bki_values{$attname} =~ s/[{}]//g; + @lookupnames = split /,/, $bki_values{$attname}; + @lookupoids = + lookup_oids($lookup, $catname, \%bki_values, + @lookupnames); + $bki_values{$attname} = sprintf "{%s}", + join(',', @lookupoids); + } } else { - $regprocoids{ $bki_values{proname} } = $row->{oid}; + $lookupnames[0] = $bki_values{$attname}; + @lookupoids = lookup_oids($lookup, $catname, \%bki_values, + @lookupnames); + $bki_values{$attname} = $lookupoids[0]; } } + } - # Save pg_type info for pg_attribute processing below - if ($catname eq 'pg_type') - { - my %type = %bki_values; - $type{oid} = $row->{oid}; - push @types, \%type; - } + # Special hack to generate OID symbols for pg_type entries + # that lack one. + if ($catname eq 'pg_type' and !exists $bki_values{oid_symbol}) + { + my $symbol = form_pg_type_symbol($bki_values{typname}); + $bki_values{oid_symbol} = $symbol + if defined $symbol; + } - # Write to postgres.bki - my $oid = $row->{oid} ? "OID = $row->{oid} " : ''; - printf $bki "insert %s( %s )\n", $oid, - join(' ', @bki_values{@attnames}); + # Write to postgres.bki + print_bki_insert(\%bki_values, $schema); - # Write comments to postgres.description and postgres.shdescription - if (defined $row->{descr}) + # Write comments to postgres.description and + # postgres.shdescription + if (defined $bki_values{descr}) + { + if ($catalog->{shared_relation}) { - printf $descr "%s\t%s\t0\t%s\n", $row->{oid}, $catname, - $row->{descr}; + printf $shdescr "%s\t%s\t%s\n", + $bki_values{oid}, $catname, $bki_values{descr}; } - if (defined $row->{shdescr}) + else { - printf $shdescr "%s\t%s\t%s\n", $row->{oid}, $catname, - $row->{shdescr}; + printf $descr "%s\t%s\t0\t%s\n", + $bki_values{oid}, $catname, $bki_values{descr}; } } - } - if ($catname eq 'pg_attribute') - { - # For pg_attribute.h, we generate DATA entries ourselves. - # NB: pg_type.h must come before pg_attribute.h in the input list - # of catalog names, since we use info from pg_type.h here. - foreach my $table_name (@{ $catalogs->{names} }) + # Emit OID symbol + if (defined $bki_values{oid_symbol}) { - my $table = $catalogs->{$table_name}; - - # Currently, all bootstrapped relations also need schemapg.h - # entries, so skip if the relation isn't to be in schemapg.h. - next if $table->{schema_macro} ne 'True'; - - $schemapg_entries{$table_name} = []; - push @tables_needing_macros, $table_name; - my $is_bootstrap = $table->{bootstrap}; - - # Generate entries for user attributes. - my $attnum = 0; - my $priornotnull = 1; - my @user_attrs = @{ $table->{columns} }; - foreach my $attr (@user_attrs) - { - $attnum++; - my $row = emit_pgattr_row($table_name, $attr, $priornotnull); - $row->{attnum} = $attnum; - $row->{attstattarget} = '-1'; - $priornotnull &= ($row->{attnotnull} eq 't'); - - # If it's bootstrapped, put an entry in postgres.bki. - if ($is_bootstrap eq ' bootstrap') - { - bki_insert($row, @attnames); - } - - # Store schemapg entries for later. - $row = - emit_schemapg_row($row, - grep { $bki_attr{$_}{type} eq 'bool' } @attnames); - push @{ $schemapg_entries{$table_name} }, '{ ' - . join( - ', ', grep { defined $_ } - map $row->{$_}, @attnames) . ' }'; - } - - # Generate entries for system attributes. - # We only need postgres.bki entries, not schemapg.h entries. - if ($is_bootstrap eq ' bootstrap') - { - $attnum = 0; - my @SYS_ATTRS = ( - { name => 'ctid', type => 'tid' }, - { name => 'oid', type => 'oid' }, - { name => 'xmin', type => 'xid' }, - { name => 'cmin', type => 'cid' }, - { name => 'xmax', type => 'xid' }, - { name => 'cmax', type => 'cid' }, - { name => 'tableoid', type => 'oid' }); - foreach my $attr (@SYS_ATTRS) - { - $attnum--; - my $row = emit_pgattr_row($table_name, $attr, 1); - $row->{attnum} = $attnum; - $row->{attstattarget} = '0'; - - # some catalogs don't have oids - next - if $table->{without_oids} eq ' without_oids' - && $row->{attname} eq 'oid'; - - bki_insert($row, @attnames); - } - } + printf $def "#define %s %s\n", + $bki_values{oid_symbol}, $bki_values{oid}; } } print $bki "close $catname\n"; + printf $def "\n#endif\t\t\t\t\t\t\t/* %s_D_H */\n", uc $catname; + + # Close and rename definition header + close $def; + Catalog::RenameTempFile($def_file, $tmpext); } # Any information needed for the BKI that is not contained in a pg_*.h header # (i.e., not contained in a header with a CATALOG() statement) comes here # Write out declare toast/index statements -foreach my $declaration (@{ $catalogs->{toasting}->{data} }) +foreach my $declaration (@toast_decls) { print $bki $declaration; } -foreach my $declaration (@{ $catalogs->{indexing}->{data} }) +foreach my $declaration (@index_decls) { print $bki $declaration; } +# last command in the BKI file: build the indexes declared above +print $bki "build indices\n"; + # Now generate schemapg.h @@ -330,7 +528,7 @@ * schemapg.h * Schema_pg_xxx macros for use by relcache.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * NOTES @@ -338,7 +536,7 @@ * *** DO NOT EDIT THIS FILE! *** * ****************************** * - * It has been GENERATED by $0 + * It has been GENERATED by src/backend/catalog/genbki.pl * *------------------------------------------------------------------------- */ @@ -355,7 +553,7 @@ } # Closing boilerplate for schemapg.h -print $schemapg "\n#endif /* SCHEMAPG_H */\n"; +print $schemapg "\n#endif\t\t\t\t\t\t\t/* SCHEMAPG_H */\n"; # We're done emitting data close $bki; @@ -374,158 +572,278 @@ #################### Subroutines ######################## -# Given a system catalog name and a reference to a key-value pair corresponding -# to the name and type of a column, generate a reference to a hash that -# represents a pg_attribute entry. We must also be told whether preceding -# columns were all not-null. -sub emit_pgattr_row +# For each catalog marked as needing a schema macro, generate the +# per-user-attribute data to be incorporated into schemapg.h. Also, for +# bootstrap catalogs, emit pg_attribute entries into the .bki file +# for both user and system attributes. +sub gen_pg_attribute { - my ($table_name, $attr, $priornotnull) = @_; - my $attname = $attr->{name}; - my $atttype = $attr->{type}; - my %row; - - $row{attrelid} = $catalogs->{$table_name}->{relation_oid}; - $row{attname} = $attname; + my $schema = shift; - # Adjust type name for arrays: foo[] becomes _foo - # so we can look it up in pg_type - if ($atttype =~ /(.+)\[\]$/) + my @attnames; + foreach my $column (@$schema) { - $atttype = '_' . $1; + push @attnames, $column->{name}; } - # Copy the type data from pg_type, and add some type-dependent items - foreach my $type (@types) + foreach my $table_name (@catnames) { - if (defined $type->{typname} && $type->{typname} eq $atttype) - { - $row{atttypid} = $type->{oid}; - $row{attlen} = $type->{typlen}; - $row{attbyval} = $type->{typbyval}; - $row{attstorage} = $type->{typstorage}; - $row{attalign} = $type->{typalign}; + my $table = $catalogs{$table_name}; - # set attndims if it's an array type - $row{attndims} = $type->{typcategory} eq 'A' ? '1' : '0'; - $row{attcollation} = $type->{typcollation}; + # Currently, all bootstrap catalogs also need schemapg.h + # entries, so skip if it isn't to be in schemapg.h. + next if !$table->{schema_macro}; - if (defined $attr->{forcenotnull}) - { - $row{attnotnull} = 't'; - } - elsif (defined $attr->{forcenull}) - { - $row{attnotnull} = 'f'; - } - elsif ($priornotnull) - { + $schemapg_entries{$table_name} = []; + push @tables_needing_macros, $table_name; - # attnotnull will automatically be set if the type is - # fixed-width and prior columns are all NOT NULL --- - # compare DefineAttr in bootstrap.c. oidvector and - # int2vector are also treated as not-nullable. - $row{attnotnull} = - $type->{typname} eq 'oidvector' ? 't' - : $type->{typname} eq 'int2vector' ? 't' - : $type->{typlen} eq 'NAMEDATALEN' ? 't' - : $type->{typlen} > 0 ? 't' - : 'f'; - } - else + # Generate entries for user attributes. + my $attnum = 0; + my $priornotnull = 1; + foreach my $attr (@{ $table->{columns} }) + { + $attnum++; + my %row; + $row{attnum} = $attnum; + $row{attrelid} = $table->{relation_oid}; + + morph_row_for_pgattr(\%row, $schema, $attr, $priornotnull); + $priornotnull &= ($row{attnotnull} eq 't'); + + # If it's bootstrapped, put an entry in postgres.bki. + print_bki_insert(\%row, $schema) if $table->{bootstrap}; + + # Store schemapg entries for later. + morph_row_for_schemapg(\%row, $schema); + push @{ $schemapg_entries{$table_name} }, + sprintf "{ %s }", + join(', ', grep { defined $_ } @row{@attnames}); + } + + # Generate entries for system attributes. + # We only need postgres.bki entries, not schemapg.h entries. + if ($table->{bootstrap}) + { + $attnum = 0; + my @SYS_ATTRS = ( + { name => 'ctid', type => 'tid' }, + { name => 'oid', type => 'oid' }, + { name => 'xmin', type => 'xid' }, + { name => 'cmin', type => 'cid' }, + { name => 'xmax', type => 'xid' }, + { name => 'cmax', type => 'cid' }, + { name => 'tableoid', type => 'oid' }); + foreach my $attr (@SYS_ATTRS) { - $row{attnotnull} = 'f'; + $attnum--; + my %row; + $row{attnum} = $attnum; + $row{attrelid} = $table->{relation_oid}; + $row{attstattarget} = '0'; + + # Omit the oid column if the catalog doesn't have them + next + if $table->{without_oids} + && $attr->{name} eq 'oid'; + + morph_row_for_pgattr(\%row, $schema, $attr, 1); + print_bki_insert(\%row, $schema); } - last; } } + return; +} + +# Given $pgattr_schema (the pg_attribute schema for a catalog sufficient for +# AddDefaultValues), $attr (the description of a catalog row), and +# $priornotnull (whether all prior attributes in this catalog are not null), +# modify the $row hashref for print_bki_insert. This includes setting data +# from the corresponding pg_type element and filling in any default values. +# Any value not handled here must be supplied by caller. +sub morph_row_for_pgattr +{ + my ($row, $pgattr_schema, $attr, $priornotnull) = @_; + my $attname = $attr->{name}; + my $atttype = $attr->{type}; + + $row->{attname} = $attname; + + # Copy the type data from pg_type, and add some type-dependent items + my $type = $types{$atttype}; + + $row->{atttypid} = $type->{oid}; + $row->{attlen} = $type->{typlen}; + $row->{attbyval} = $type->{typbyval}; + $row->{attstorage} = $type->{typstorage}; + $row->{attalign} = $type->{typalign}; + + # set attndims if it's an array type + $row->{attndims} = $type->{typcategory} eq 'A' ? '1' : '0'; + $row->{attcollation} = $type->{typcollation}; + + if (defined $attr->{forcenotnull}) + { + $row->{attnotnull} = 't'; + } + elsif (defined $attr->{forcenull}) + { + $row->{attnotnull} = 'f'; + } + elsif ($priornotnull) + { + + # attnotnull will automatically be set if the type is + # fixed-width and prior columns are all NOT NULL --- + # compare DefineAttr in bootstrap.c. oidvector and + # int2vector are also treated as not-nullable. + $row->{attnotnull} = + $type->{typname} eq 'oidvector' ? 't' + : $type->{typname} eq 'int2vector' ? 't' + : $type->{typlen} eq 'NAMEDATALEN' ? 't' + : $type->{typlen} > 0 ? 't' + : 'f'; + } + else + { + $row->{attnotnull} = 'f'; + } - # Add in default values for pg_attribute - my %PGATTR_DEFAULTS = ( - attcacheoff => '-1', - atttypmod => '-1', - atthasdef => 'f', - attidentity => '', - attisdropped => 'f', - attislocal => 't', - attinhcount => '0', - attacl => '_null_', - attoptions => '_null_', - attfdwoptions => '_null_'); - return { %PGATTR_DEFAULTS, %row }; + Catalog::AddDefaultValues($row, $pgattr_schema, 'pg_attribute'); + return; } -# Write a pg_attribute entry to postgres.bki -sub bki_insert +# Write an entry to postgres.bki. +sub print_bki_insert { - my $row = shift; - my @attnames = @_; - my $oid = $row->{oid} ? "OID = $row->{oid} " : ''; - my $bki_values = join ' ', map { $_ eq '' ? '""' : $_ } map $row->{$_}, - @attnames; - printf $bki "insert %s( %s )\n", $oid, $bki_values; + my $row = shift; + my $schema = shift; + + my @bki_values; + my $oid = $row->{oid} ? "OID = $row->{oid} " : ''; + + foreach my $column (@$schema) + { + my $attname = $column->{name}; + my $atttype = $column->{type}; + my $bki_value = $row->{$attname}; + + # Fold backslash-zero to empty string if it's the entire string, + # since that represents a NUL char in C code. + $bki_value = '' if $bki_value eq '\0'; + + # Handle single quotes by doubling them, and double quotes by + # converting them to octal escapes, because that's what the + # bootstrap scanner requires. We do not process backslashes + # specially; this allows escape-string-style backslash escapes + # to be used in catalog data. + $bki_value =~ s/'/''/g; + $bki_value =~ s/"/\\042/g; + + # Quote value if needed. We need not quote values that satisfy + # the "id" pattern in bootscanner.l, currently "[-A-Za-z0-9_]+". + $bki_value = sprintf(qq'"%s"', $bki_value) + if length($bki_value) == 0 + or $bki_value =~ /[^-A-Za-z0-9_]/; + + push @bki_values, $bki_value; + } + printf $bki "insert %s( %s )\n", $oid, join(' ', @bki_values); + return; } +# Given a row reference, modify it so that it becomes a valid entry for +# a catalog schema declaration in schemapg.h. +# # The field values of a Schema_pg_xxx declaration are similar, but not # quite identical, to the corresponding values in postgres.bki. -sub emit_schemapg_row +sub morph_row_for_schemapg { - my $row = shift; - my @bool_attrs = @_; - - # Replace empty string by zero char constant - $row->{attidentity} ||= '\0'; - - # Supply appropriate quoting for these fields. - $row->{attname} = q|{"| . $row->{attname} . q|"}|; - $row->{attstorage} = q|'| . $row->{attstorage} . q|'|; - $row->{attalign} = q|'| . $row->{attalign} . q|'|; - $row->{attidentity} = q|'| . $row->{attidentity} . q|'|; - - # We don't emit initializers for the variable length fields at all. - # Only the fixed-size portions of the descriptors are ever used. - delete $row->{attacl}; - delete $row->{attoptions}; - delete $row->{attfdwoptions}; - - # Expand booleans from 'f'/'t' to 'false'/'true'. - # Some values might be other macros (eg FLOAT4PASSBYVAL), don't change. - foreach my $attr (@bool_attrs) + my $row = shift; + my $pgattr_schema = shift; + + foreach my $column (@$pgattr_schema) { - $row->{$attr} = - $row->{$attr} eq 't' ? 'true' - : $row->{$attr} eq 'f' ? 'false' - : $row->{$attr}; + my $attname = $column->{name}; + my $atttype = $column->{type}; + + # Some data types have special formatting rules. + if ($atttype eq 'name') + { + # add {" ... "} quoting + $row->{$attname} = sprintf(qq'{"%s"}', $row->{$attname}); + } + elsif ($atttype eq 'char') + { + # Add single quotes + $row->{$attname} = sprintf("'%s'", $row->{$attname}); + } + + # Expand booleans from 'f'/'t' to 'false'/'true'. + # Some values might be other macros (eg FLOAT4PASSBYVAL), + # don't change. + elsif ($atttype eq 'bool') + { + $row->{$attname} = 'true' if $row->{$attname} eq 't'; + $row->{$attname} = 'false' if $row->{$attname} eq 'f'; + } + + # We don't emit initializers for the variable length fields at all. + # Only the fixed-size portions of the descriptors are ever used. + delete $row->{$attname} if $column->{is_varlen}; } - return $row; + return; } -# Find a symbol defined in a particular header file and extract the value. -sub find_defined_symbol +# Perform OID lookups on an array of OID names. +# If we don't have a unique value to substitute, warn and +# leave the entry unchanged. +# (A warning seems sufficient because the bootstrap backend will reject +# non-numeric values anyway. So we might as well detect multiple problems +# within this genbki.pl run.) +sub lookup_oids { - my ($catalog_header, $symbol) = @_; - for my $path (@include_path) - { + my ($lookup, $catname, $bki_values, @lookupnames) = @_; - # Make sure include path ends in a slash. - if (substr($path, -1) ne '/') + my @lookupoids; + foreach my $lookupname (@lookupnames) + { + my $lookupoid = $lookup->{$lookupname}; + if (defined($lookupoid) and $lookupoid ne 'MULTIPLE') { - $path .= '/'; + push @lookupoids, $lookupoid; } - my $file = $path . $catalog_header; - next if !-f $file; - open(my $find_defined_symbol, '<', $file) || die "$file: $!"; - while (<$find_defined_symbol>) + else { - if (/^#define\s+\Q$symbol\E\s+(\S+)/) - { - return $1; - } + push @lookupoids, $lookupname; + warn sprintf + "unresolved OID reference \"%s\" in %s.dat line %s\n", + $lookupname, $catname, $bki_values->{line_number} + if $lookupname ne '-' and $lookupname ne '0'; } - close $find_defined_symbol; - die "$file: no definition found for $symbol\n"; } - die "$catalog_header: not found in any include directory\n"; + return @lookupoids; +} + +# Determine canonical pg_type OID #define symbol from the type name. +sub form_pg_type_symbol +{ + my $typename = shift; + + # Skip for rowtypes of bootstrap catalogs, since they have their + # own naming convention defined elsewhere. + return + if $typename eq 'pg_type' + or $typename eq 'pg_proc' + or $typename eq 'pg_attribute' + or $typename eq 'pg_class'; + + # Transform like so: + # foo_bar -> FOO_BAROID + # _foo_bar -> FOO_BARARRAYOID + $typename =~ /(_)?(.+)/; + my $arraystr = $1 ? 'ARRAY' : ''; + my $name = uc $2; + return $name . $arraystr . 'OID'; } sub usage @@ -534,12 +852,12 @@ sub usage Usage: genbki.pl [options] header... Options: - -I path to include files -o output path --set-version PostgreSQL version number for initdb cross-check -genbki.pl generates BKI files from specially formatted -header files. These BKI files are used to initialize the +genbki.pl generates BKI files and symbol definition +headers from specially formatted header files and .dat +files. The BKI files are used to initialize the postgres template database. Report bugs to . diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index a376b99f1e..bd4c439ef3 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -3,7 +3,7 @@ * heap.c * code to create and destroy POSTGRES heap relations * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -45,7 +45,6 @@ #include "catalog/pg_attrdef.h" #include "catalog/pg_collation.h" #include "catalog/pg_constraint.h" -#include "catalog/pg_constraint_fn.h" #include "catalog/pg_foreign_table.h" #include "catalog/pg_inherits.h" #include "catalog/pg_namespace.h" @@ -55,14 +54,16 @@ #include "catalog/pg_subscription_rel.h" #include "catalog/pg_tablespace.h" #include "catalog/pg_type.h" -#include "catalog/pg_type_fn.h" #include "catalog/storage.h" #include "catalog/storage_xlog.h" #include "commands/tablecmds.h" #include "commands/typecmds.h" +#include "executor/executor.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" +#include "optimizer/clauses.h" #include "optimizer/var.h" +#include "optimizer/planner.h" #include "parser/parse_coerce.h" #include "parser/parse_collate.h" #include "parser/parse_expr.h" @@ -72,9 +73,11 @@ #include "storage/smgr.h" #include "utils/acl.h" #include "utils/builtins.h" +#include "utils/datum.h" #include "utils/fmgroids.h" #include "utils/inval.h" #include "utils/lsyscache.h" +#include "utils/partcache.h" #include "utils/rel.h" #include "utils/ruleutils.h" #include "utils/snapmgr.h" @@ -103,12 +106,12 @@ static ObjectAddress AddNewRelationType(const char *typeName, Oid new_row_type, Oid new_array_type); static void RelationRemoveInheritance(Oid relid); -static Oid StoreRelCheck(Relation rel, char *ccname, Node *expr, +static Oid StoreRelCheck(Relation rel, const char *ccname, Node *expr, bool is_validated, bool is_local, int inhcount, bool is_no_inherit, bool is_internal); static void StoreConstraints(Relation rel, List *cooked_constraints, bool is_internal); -static bool MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr, +static bool MergeWithExistingConstraint(Relation rel, const char *ccname, Node *expr, bool allow_merge, bool is_local, bool is_initially_valid, bool is_no_inherit); @@ -141,40 +144,88 @@ static List *insert_ordered_unique_oid(List *list, Oid datum); * fixed-size portion of the structure anyway. */ -static FormData_pg_attribute a1 = { - 0, {"ctid"}, TIDOID, 0, sizeof(ItemPointerData), - SelfItemPointerAttributeNumber, 0, -1, -1, - false, 'p', 's', true, false, '\0', false, true, 0 +static const FormData_pg_attribute a1 = { + .attname = {"ctid"}, + .atttypid = TIDOID, + .attlen = sizeof(ItemPointerData), + .attnum = SelfItemPointerAttributeNumber, + .attcacheoff = -1, + .atttypmod = -1, + .attbyval = false, + .attstorage = 'p', + .attalign = 's', + .attnotnull = true, + .attislocal = true, }; -static FormData_pg_attribute a2 = { - 0, {"oid"}, OIDOID, 0, sizeof(Oid), - ObjectIdAttributeNumber, 0, -1, -1, - true, 'p', 'i', true, false, '\0', false, true, 0 +static const FormData_pg_attribute a2 = { + .attname = {"oid"}, + .atttypid = OIDOID, + .attlen = sizeof(Oid), + .attnum = ObjectIdAttributeNumber, + .attcacheoff = -1, + .atttypmod = -1, + .attbyval = true, + .attstorage = 'p', + .attalign = 'i', + .attnotnull = true, + .attislocal = true, }; -static FormData_pg_attribute a3 = { - 0, {"xmin"}, XIDOID, 0, sizeof(TransactionId), - MinTransactionIdAttributeNumber, 0, -1, -1, - true, 'p', 'i', true, false, '\0', false, true, 0 +static const FormData_pg_attribute a3 = { + .attname = {"xmin"}, + .atttypid = XIDOID, + .attlen = sizeof(TransactionId), + .attnum = MinTransactionIdAttributeNumber, + .attcacheoff = -1, + .atttypmod = -1, + .attbyval = true, + .attstorage = 'p', + .attalign = 'i', + .attnotnull = true, + .attislocal = true, }; -static FormData_pg_attribute a4 = { - 0, {"cmin"}, CIDOID, 0, sizeof(CommandId), - MinCommandIdAttributeNumber, 0, -1, -1, - true, 'p', 'i', true, false, '\0', false, true, 0 +static const FormData_pg_attribute a4 = { + .attname = {"cmin"}, + .atttypid = CIDOID, + .attlen = sizeof(CommandId), + .attnum = MinCommandIdAttributeNumber, + .attcacheoff = -1, + .atttypmod = -1, + .attbyval = true, + .attstorage = 'p', + .attalign = 'i', + .attnotnull = true, + .attislocal = true, }; -static FormData_pg_attribute a5 = { - 0, {"xmax"}, XIDOID, 0, sizeof(TransactionId), - MaxTransactionIdAttributeNumber, 0, -1, -1, - true, 'p', 'i', true, false, '\0', false, true, 0 +static const FormData_pg_attribute a5 = { + .attname = {"xmax"}, + .atttypid = XIDOID, + .attlen = sizeof(TransactionId), + .attnum = MaxTransactionIdAttributeNumber, + .attcacheoff = -1, + .atttypmod = -1, + .attbyval = true, + .attstorage = 'p', + .attalign = 'i', + .attnotnull = true, + .attislocal = true, }; -static FormData_pg_attribute a6 = { - 0, {"cmax"}, CIDOID, 0, sizeof(CommandId), - MaxCommandIdAttributeNumber, 0, -1, -1, - true, 'p', 'i', true, false, '\0', false, true, 0 +static const FormData_pg_attribute a6 = { + .attname = {"cmax"}, + .atttypid = CIDOID, + .attlen = sizeof(CommandId), + .attnum = MaxCommandIdAttributeNumber, + .attcacheoff = -1, + .atttypmod = -1, + .attbyval = true, + .attstorage = 'p', + .attalign = 'i', + .attnotnull = true, + .attislocal = true, }; /* @@ -183,20 +234,28 @@ static FormData_pg_attribute a6 = { * table of a particular class/type. In any case table is still the word * used in SQL. */ -static FormData_pg_attribute a7 = { - 0, {"tableoid"}, OIDOID, 0, sizeof(Oid), - TableOidAttributeNumber, 0, -1, -1, - true, 'p', 'i', true, false, '\0', false, true, 0 +static const FormData_pg_attribute a7 = { + .attname = {"tableoid"}, + .atttypid = OIDOID, + .attlen = sizeof(Oid), + .attnum = TableOidAttributeNumber, + .attcacheoff = -1, + .atttypmod = -1, + .attbyval = true, + .attstorage = 'p', + .attalign = 'i', + .attnotnull = true, + .attislocal = true, }; -static const Form_pg_attribute SysAtt[] = {&a1, &a2, &a3, &a4, &a5, &a6, &a7}; +static const FormData_pg_attribute *SysAtt[] = {&a1, &a2, &a3, &a4, &a5, &a6, &a7}; /* * This function returns a Form_pg_attribute pointer for a system attribute. * Note that we elog if the presented attno is invalid, which would only * happen if there's a problem upstream. */ -Form_pg_attribute +const FormData_pg_attribute * SystemAttributeDefinition(AttrNumber attno, bool relhasoids) { if (attno >= 0 || attno < -(int) lengthof(SysAtt)) @@ -210,14 +269,14 @@ SystemAttributeDefinition(AttrNumber attno, bool relhasoids) * If the given name is a system attribute name, return a Form_pg_attribute * pointer for a prototype definition. If not, return NULL. */ -Form_pg_attribute +const FormData_pg_attribute * SystemAttributeByName(const char *attname, bool relhasoids) { int j; for (j = 0; j < (int) lengthof(SysAtt); j++) { - Form_pg_attribute att = SysAtt[j]; + const FormData_pg_attribute *att = SysAtt[j]; if (relhasoids || att->attnum != ObjectIdAttributeNumber) { @@ -302,6 +361,15 @@ heap_create(const char *relname, */ reltablespace = InvalidOid; break; + + case RELKIND_PARTITIONED_INDEX: + /* + * Preserve tablespace so that it's used as tablespace for indexes + * on future partitions. + */ + create_storage = false; + break; + case RELKIND_SEQUENCE: create_storage = true; @@ -431,12 +499,14 @@ CheckAttributeNamesTypes(TupleDesc tupdesc, char relkind, { for (i = 0; i < natts; i++) { - if (SystemAttributeByName(NameStr(tupdesc->attrs[i]->attname), + Form_pg_attribute attr = TupleDescAttr(tupdesc, i); + + if (SystemAttributeByName(NameStr(attr->attname), tupdesc->tdhasoid) != NULL) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), errmsg("column name \"%s\" conflicts with a system column name", - NameStr(tupdesc->attrs[i]->attname)))); + NameStr(attr->attname)))); } } @@ -447,12 +517,12 @@ CheckAttributeNamesTypes(TupleDesc tupdesc, char relkind, { for (j = 0; j < i; j++) { - if (strcmp(NameStr(tupdesc->attrs[j]->attname), - NameStr(tupdesc->attrs[i]->attname)) == 0) + if (strcmp(NameStr(TupleDescAttr(tupdesc, j)->attname), + NameStr(TupleDescAttr(tupdesc, i)->attname)) == 0) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), errmsg("column name \"%s\" specified more than once", - NameStr(tupdesc->attrs[j]->attname)))); + NameStr(TupleDescAttr(tupdesc, j)->attname)))); } } @@ -461,9 +531,9 @@ CheckAttributeNamesTypes(TupleDesc tupdesc, char relkind, */ for (i = 0; i < natts; i++) { - CheckAttributeType(NameStr(tupdesc->attrs[i]->attname), - tupdesc->attrs[i]->atttypid, - tupdesc->attrs[i]->attcollation, + CheckAttributeType(NameStr(TupleDescAttr(tupdesc, i)->attname), + TupleDescAttr(tupdesc, i)->atttypid, + TupleDescAttr(tupdesc, i)->attcollation, NIL, /* assume we're creating a new rowtype */ allow_system_table_mods); } @@ -545,7 +615,7 @@ CheckAttributeType(const char *attname, for (i = 0; i < tupdesc->natts; i++) { - Form_pg_attribute attr = tupdesc->attrs[i]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, i); if (attr->attisdropped) continue; @@ -586,8 +656,8 @@ CheckAttributeType(const char *attname, * Construct and insert a new tuple in pg_attribute. * * Caller has already opened and locked pg_attribute. new_attribute is the - * attribute to insert (but we ignore attacl and attoptions, which are always - * initialized to NULL). + * attribute to insert. attcacheoff is always initialized to -1, attacl and + * attoptions are always initialized to NULL. * * indstate is the index state for CatalogTupleInsertWithInfo. It can be * passed as NULL, in which case we'll fetch the necessary info. (Don't do @@ -614,13 +684,14 @@ InsertPgAttributeTuple(Relation pg_attribute_rel, values[Anum_pg_attribute_attlen - 1] = Int16GetDatum(new_attribute->attlen); values[Anum_pg_attribute_attnum - 1] = Int16GetDatum(new_attribute->attnum); values[Anum_pg_attribute_attndims - 1] = Int32GetDatum(new_attribute->attndims); - values[Anum_pg_attribute_attcacheoff - 1] = Int32GetDatum(new_attribute->attcacheoff); + values[Anum_pg_attribute_attcacheoff - 1] = Int32GetDatum(-1); values[Anum_pg_attribute_atttypmod - 1] = Int32GetDatum(new_attribute->atttypmod); values[Anum_pg_attribute_attbyval - 1] = BoolGetDatum(new_attribute->attbyval); values[Anum_pg_attribute_attstorage - 1] = CharGetDatum(new_attribute->attstorage); values[Anum_pg_attribute_attalign - 1] = CharGetDatum(new_attribute->attalign); values[Anum_pg_attribute_attnotnull - 1] = BoolGetDatum(new_attribute->attnotnull); values[Anum_pg_attribute_atthasdef - 1] = BoolGetDatum(new_attribute->atthasdef); + values[Anum_pg_attribute_atthasmissing - 1] = BoolGetDatum(new_attribute->atthasmissing); values[Anum_pg_attribute_attidentity - 1] = CharGetDatum(new_attribute->attidentity); values[Anum_pg_attribute_attisdropped - 1] = BoolGetDatum(new_attribute->attisdropped); values[Anum_pg_attribute_attislocal - 1] = BoolGetDatum(new_attribute->attislocal); @@ -631,6 +702,7 @@ InsertPgAttributeTuple(Relation pg_attribute_rel, nulls[Anum_pg_attribute_attacl - 1] = true; nulls[Anum_pg_attribute_attoptions - 1] = true; nulls[Anum_pg_attribute_attfdwoptions - 1] = true; + nulls[Anum_pg_attribute_attmissingval - 1] = true; tup = heap_form_tuple(RelationGetDescr(pg_attribute_rel), values, nulls); @@ -678,12 +750,11 @@ AddNewAttributeTuples(Oid new_rel_oid, */ for (i = 0; i < natts; i++) { - attr = tupdesc->attrs[i]; + attr = TupleDescAttr(tupdesc, i); /* Fill in the correct relation OID */ attr->attrelid = new_rel_oid; - /* Make sure these are OK, too */ + /* Make sure this is OK, too */ attr->attstattarget = -1; - attr->attcacheoff = -1; InsertPgAttributeTuple(rel, attr, indstate); @@ -795,7 +866,6 @@ InsertPgClassTuple(Relation pg_class_desc, values[Anum_pg_class_relnatts - 1] = Int16GetDatum(rd_rel->relnatts); values[Anum_pg_class_relchecks - 1] = Int16GetDatum(rd_rel->relchecks); values[Anum_pg_class_relhasoids - 1] = BoolGetDatum(rd_rel->relhasoids); - values[Anum_pg_class_relhaspkey - 1] = BoolGetDatum(rd_rel->relhaspkey); values[Anum_pg_class_relhasrules - 1] = BoolGetDatum(rd_rel->relhasrules); values[Anum_pg_class_relhastriggers - 1] = BoolGetDatum(rd_rel->relhastriggers); values[Anum_pg_class_relrowsecurity - 1] = BoolGetDatum(rd_rel->relrowsecurity); @@ -804,6 +874,7 @@ InsertPgClassTuple(Relation pg_class_desc, values[Anum_pg_class_relispopulated - 1] = BoolGetDatum(rd_rel->relispopulated); values[Anum_pg_class_relreplident - 1] = CharGetDatum(rd_rel->relreplident); values[Anum_pg_class_relispartition - 1] = BoolGetDatum(rd_rel->relispartition); + values[Anum_pg_class_relrewrite - 1] = ObjectIdGetDatum(rd_rel->relrewrite); values[Anum_pg_class_relfrozenxid - 1] = TransactionIdGetDatum(rd_rel->relfrozenxid); values[Anum_pg_class_relminmxid - 1] = MultiXactIdGetDatum(rd_rel->relminmxid); if (relacl != (Datum) 0) @@ -998,15 +1069,15 @@ AddNewRelationType(const char *typeName, * cooked_constraints: list of precooked check constraints and defaults * relkind: relkind for new rel * relpersistence: rel's persistence status (permanent, temp, or unlogged) - * shared_relation: TRUE if it's to be a shared relation - * mapped_relation: TRUE if the relation will use the relfilenode map - * oidislocal: TRUE if oid column (if any) should be marked attislocal + * shared_relation: true if it's to be a shared relation + * mapped_relation: true if the relation will use the relfilenode map + * oidislocal: true if oid column (if any) should be marked attislocal * oidinhcount: attinhcount to assign to oid column (if any) * oncommit: ON COMMIT marking (only relevant if it's a temp table) * reloptions: reloptions in Datum form, or (Datum) 0 if none - * use_user_acl: TRUE if should look for user-defined default permissions; - * if FALSE, relacl is always set NULL - * allow_system_table_mods: TRUE to allow creation in system namespaces + * use_user_acl: true if should look for user-defined default permissions; + * if false, relacl is always set NULL + * allow_system_table_mods: true to allow creation in system namespaces * is_internal: is this a system-generated catalog? * * Output parameters: @@ -1036,6 +1107,7 @@ heap_create_with_catalog(const char *relname, bool use_user_acl, bool allow_system_table_mods, bool is_internal, + Oid relrewrite, ObjectAddress *typaddress) { Relation pg_class_desc; @@ -1140,11 +1212,11 @@ heap_create_with_catalog(const char *relname, case RELKIND_MATVIEW: case RELKIND_FOREIGN_TABLE: case RELKIND_PARTITIONED_TABLE: - relacl = get_user_default_acl(ACL_OBJECT_RELATION, ownerid, + relacl = get_user_default_acl(OBJECT_TABLE, ownerid, relnamespace); break; case RELKIND_SEQUENCE: - relacl = get_user_default_acl(ACL_OBJECT_SEQUENCE, ownerid, + relacl = get_user_default_acl(OBJECT_SEQUENCE, ownerid, relnamespace); break; default: @@ -1174,6 +1246,8 @@ heap_create_with_catalog(const char *relname, Assert(relid == RelationGetRelid(new_rel_desc)); + new_rel_desc->rd_rel->relrewrite = relrewrite; + /* * Decide whether to create an array type over the relation's rowtype. We * do not create any array types for system catalogs (ie, those made @@ -1301,6 +1375,7 @@ heap_create_with_catalog(const char *relname, myself.classId = RelationRelationId; myself.objectId = relid; myself.objectSubId = 0; + referenced.classId = NamespaceRelationId; referenced.objectId = relnamespace; referenced.objectSubId = 0; @@ -1308,6 +1383,8 @@ heap_create_with_catalog(const char *relname, recordDependencyOnOwner(RelationRelationId, relid, ownerid); + recordDependencyOnNewAcl(RelationRelationId, relid, 0, ownerid, relacl); + recordDependencyOnCurrentExtension(&myself, false); if (reloftypeid) @@ -1317,18 +1394,6 @@ heap_create_with_catalog(const char *relname, referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); } - - if (relacl != NULL) - { - int nnewmembers; - Oid *newmembers; - - nnewmembers = aclmembers(relacl, &newmembers); - updateAclDependencies(RelationRelationId, relid, 0, - ownerid, - 0, NULL, - nnewmembers, newmembers); - } } /* Post creation hook for new relation */ @@ -1602,6 +1667,29 @@ RemoveAttributeById(Oid relid, AttrNumber attnum) "........pg.dropped.%d........", attnum); namestrcpy(&(attStruct->attname), newattname); + /* clear the missing value if any */ + if (attStruct->atthasmissing) + { + Datum valuesAtt[Natts_pg_attribute]; + bool nullsAtt[Natts_pg_attribute]; + bool replacesAtt[Natts_pg_attribute]; + + /* update the tuple - set atthasmissing and attmissingval */ + MemSet(valuesAtt, 0, sizeof(valuesAtt)); + MemSet(nullsAtt, false, sizeof(nullsAtt)); + MemSet(replacesAtt, false, sizeof(replacesAtt)); + + valuesAtt[Anum_pg_attribute_atthasmissing - 1] = + BoolGetDatum(false); + replacesAtt[Anum_pg_attribute_atthasmissing - 1] = true; + valuesAtt[Anum_pg_attribute_attmissingval - 1] = (Datum) 0; + nullsAtt[Anum_pg_attribute_attmissingval - 1] = true; + replacesAtt[Anum_pg_attribute_attmissingval - 1] = true; + + tuple = heap_modify_tuple(tuple, RelationGetDescr(attr_rel), + valuesAtt, nullsAtt, replacesAtt); + } + CatalogTupleUpdate(attr_rel, &tuple->t_self, tuple); } @@ -1757,7 +1845,8 @@ heap_drop_with_catalog(Oid relid) { Relation rel; HeapTuple tuple; - Oid parentOid = InvalidOid; + Oid parentOid = InvalidOid, + defaultPartOid = InvalidOid; /* * To drop a partition safely, we must grab exclusive lock on its parent, @@ -1766,13 +1855,24 @@ heap_drop_with_catalog(Oid relid) * could attempt to access the just-dropped relation as its partition. We * must therefore take a table lock strong enough to prevent all queries * on the table from proceeding until we commit and send out a - * shared-cache-inval notice that will make them update their index lists. + * shared-cache-inval notice that will make them update their partition + * descriptors. */ tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for relation %u", relid); if (((Form_pg_class) GETSTRUCT(tuple))->relispartition) { parentOid = get_partition_parent(relid); LockRelationOid(parentOid, AccessExclusiveLock); + + /* + * If this is not the default partition, dropping it will change the + * default partition's partition constraint, so we must lock it. + */ + defaultPartOid = get_default_partition_oid(parentOid); + if (OidIsValid(defaultPartOid) && relid != defaultPartOid) + LockRelationOid(defaultPartOid, AccessExclusiveLock); } ReleaseSysCache(tuple); @@ -1823,6 +1923,13 @@ heap_drop_with_catalog(Oid relid) if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) RemovePartitionKeyByRelId(relid); + /* + * If the relation being dropped is the default partition itself, + * invalidate its entry in pg_partitioned_table. + */ + if (relid == defaultPartOid) + update_default_partition_oid(parentOid, InvalidOid); + /* * Schedule unlinking of the relation's physical files at commit. */ @@ -1882,6 +1989,14 @@ heap_drop_with_catalog(Oid relid) if (OidIsValid(parentOid)) { + /* + * If this is not the default partition, the partition constraint of + * the default partition has changed to include the portion of the key + * space previously covered by the dropped partition. + */ + if (OidIsValid(defaultPartOid) && relid != defaultPartOid) + CacheInvalidateRelcacheByRelid(defaultPartOid); + /* * Invalidate the parent's relcache so that the partition is no longer * included in its partition descriptor. @@ -1892,17 +2007,150 @@ heap_drop_with_catalog(Oid relid) } +/* + * RelationClearMissing + * + * Set atthasmissing and attmissingval to false/null for all attributes + * where they are currently set. This can be safely and usefully done if + * the table is rewritten (e.g. by VACUUM FULL or CLUSTER) where we know there + * are no rows left with less than a full complement of attributes. + * + * The caller must have an AccessExclusive lock on the relation. + */ +void +RelationClearMissing(Relation rel) +{ + Relation attr_rel; + Oid relid = RelationGetRelid(rel); + int natts = RelationGetNumberOfAttributes(rel); + int attnum; + Datum repl_val[Natts_pg_attribute]; + bool repl_null[Natts_pg_attribute]; + bool repl_repl[Natts_pg_attribute]; + Form_pg_attribute attrtuple; + HeapTuple tuple, + newtuple; + + memset(repl_val, 0, sizeof(repl_val)); + memset(repl_null, false, sizeof(repl_null)); + memset(repl_repl, false, sizeof(repl_repl)); + + repl_val[Anum_pg_attribute_atthasmissing - 1] = BoolGetDatum(false); + repl_null[Anum_pg_attribute_attmissingval - 1] = true; + + repl_repl[Anum_pg_attribute_atthasmissing - 1] = true; + repl_repl[Anum_pg_attribute_attmissingval - 1] = true; + + + /* Get a lock on pg_attribute */ + attr_rel = heap_open(AttributeRelationId, RowExclusiveLock); + + /* process each non-system attribute, including any dropped columns */ + for (attnum = 1; attnum <= natts; attnum++) + { + tuple = SearchSysCache2(ATTNUM, + ObjectIdGetDatum(relid), + Int16GetDatum(attnum)); + if (!HeapTupleIsValid(tuple)) /* shouldn't happen */ + elog(ERROR, "cache lookup failed for attribute %d of relation %u", + attnum, relid); + + attrtuple = (Form_pg_attribute) GETSTRUCT(tuple); + + /* ignore any where atthasmissing is not true */ + if (attrtuple->atthasmissing) + { + newtuple = heap_modify_tuple(tuple, RelationGetDescr(attr_rel), + repl_val, repl_null, repl_repl); + + CatalogTupleUpdate(attr_rel, &newtuple->t_self, newtuple); + + heap_freetuple(newtuple); + } + + ReleaseSysCache(tuple); + } + + /* + * Our update of the pg_attribute rows will force a relcache rebuild, so + * there's nothing else to do here. + */ + heap_close(attr_rel, RowExclusiveLock); +} + +/* + * SetAttrMissing + * + * Set the missing value of a single attribute. This should only be used by + * binary upgrade. Takes an AccessExclusive lock on the relation owning the + * attribute. + */ +void +SetAttrMissing(Oid relid, char *attname, char *value) +{ + Datum valuesAtt[Natts_pg_attribute]; + bool nullsAtt[Natts_pg_attribute]; + bool replacesAtt[Natts_pg_attribute]; + Datum missingval; + Form_pg_attribute attStruct; + Relation attrrel, + tablerel; + HeapTuple atttup, + newtup; + + /* lock the table the attribute belongs to */ + tablerel = heap_open(relid, AccessExclusiveLock); + + /* Lock the attribute row and get the data */ + attrrel = heap_open(AttributeRelationId, RowExclusiveLock); + atttup = SearchSysCacheAttName(relid, attname); + if (!HeapTupleIsValid(atttup)) + elog(ERROR, "cache lookup failed for attribute %s of relation %u", + attname, relid); + attStruct = (Form_pg_attribute) GETSTRUCT(atttup); + + /* get an array value from the value string */ + missingval = OidFunctionCall3(F_ARRAY_IN, + CStringGetDatum(value), + ObjectIdGetDatum(attStruct->atttypid), + Int32GetDatum(attStruct->atttypmod)); + + /* update the tuple - set atthasmissing and attmissingval */ + MemSet(valuesAtt, 0, sizeof(valuesAtt)); + MemSet(nullsAtt, false, sizeof(nullsAtt)); + MemSet(replacesAtt, false, sizeof(replacesAtt)); + + valuesAtt[Anum_pg_attribute_atthasmissing - 1] = BoolGetDatum(true); + replacesAtt[Anum_pg_attribute_atthasmissing - 1] = true; + valuesAtt[Anum_pg_attribute_attmissingval - 1] = missingval; + replacesAtt[Anum_pg_attribute_attmissingval - 1] = true; + + newtup = heap_modify_tuple(atttup, RelationGetDescr(attrrel), + valuesAtt, nullsAtt, replacesAtt); + CatalogTupleUpdate(attrrel, &newtup->t_self, newtup); + + /* clean up */ + ReleaseSysCache(atttup); + heap_close(attrrel, RowExclusiveLock); + heap_close(tablerel, AccessExclusiveLock); +} + /* * Store a default expression for column attnum of relation rel. * * Returns the OID of the new pg_attrdef tuple. + * + * add_column_mode must be true if we are storing the default for a new + * attribute, and false if it's for an already existing attribute. The reason + * for this is that the missing value must never be updated after it is set, + * which can only be when a column is added to the table. Otherwise we would + * in effect be changing existing tuples. */ Oid StoreAttrDefault(Relation rel, AttrNumber attnum, - Node *expr, bool is_internal) + Node *expr, bool is_internal, bool add_column_mode) { char *adbin; - char *adsrc; Relation adrel; HeapTuple tuple; Datum values[4]; @@ -1919,21 +2167,12 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, */ adbin = nodeToString(expr); - /* - * Also deparse it to form the mostly-obsolete adsrc field. - */ - adsrc = deparse_expression(expr, - deparse_context_for(RelationGetRelationName(rel), - RelationGetRelid(rel)), - false, false); - /* * Make the pg_attrdef entry. */ values[Anum_pg_attrdef_adrelid - 1] = RelationGetRelid(rel); values[Anum_pg_attrdef_adnum - 1] = attnum; values[Anum_pg_attrdef_adbin - 1] = CStringGetTextDatum(adbin); - values[Anum_pg_attrdef_adsrc - 1] = CStringGetTextDatum(adsrc); adrel = heap_open(AttrDefaultRelationId, RowExclusiveLock); @@ -1948,10 +2187,8 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, /* now can free some of the stuff allocated above */ pfree(DatumGetPointer(values[Anum_pg_attrdef_adbin - 1])); - pfree(DatumGetPointer(values[Anum_pg_attrdef_adsrc - 1])); heap_freetuple(tuple); pfree(adbin); - pfree(adsrc); /* * Update the pg_attribute entry for the column to show that a default @@ -1967,8 +2204,69 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, attStruct = (Form_pg_attribute) GETSTRUCT(atttup); if (!attStruct->atthasdef) { - attStruct->atthasdef = true; + Form_pg_attribute defAttStruct; + + ExprState *exprState; + Expr *expr2 = (Expr *) expr; + EState *estate = NULL; + ExprContext *econtext; + Datum valuesAtt[Natts_pg_attribute]; + bool nullsAtt[Natts_pg_attribute]; + bool replacesAtt[Natts_pg_attribute]; + Datum missingval = (Datum) 0; + bool missingIsNull = true; + + MemSet(valuesAtt, 0, sizeof(valuesAtt)); + MemSet(nullsAtt, false, sizeof(nullsAtt)); + MemSet(replacesAtt, false, sizeof(replacesAtt)); + valuesAtt[Anum_pg_attribute_atthasdef - 1] = true; + replacesAtt[Anum_pg_attribute_atthasdef - 1] = true; + + if (add_column_mode) + { + expr2 = expression_planner(expr2); + estate = CreateExecutorState(); + exprState = ExecPrepareExpr(expr2, estate); + econtext = GetPerTupleExprContext(estate); + + missingval = ExecEvalExpr(exprState, econtext, + &missingIsNull); + + FreeExecutorState(estate); + + defAttStruct = TupleDescAttr(rel->rd_att, attnum - 1); + + if (missingIsNull) + { + /* if the default evaluates to NULL, just store a NULL array */ + missingval = (Datum) 0; + } + else + { + /* otherwise make a one-element array of the value */ + missingval = PointerGetDatum( + construct_array(&missingval, + 1, + defAttStruct->atttypid, + defAttStruct->attlen, + defAttStruct->attbyval, + defAttStruct->attalign)); + } + + valuesAtt[Anum_pg_attribute_atthasmissing - 1] = !missingIsNull; + replacesAtt[Anum_pg_attribute_atthasmissing - 1] = true; + valuesAtt[Anum_pg_attribute_attmissingval - 1] = missingval; + replacesAtt[Anum_pg_attribute_attmissingval - 1] = true; + nullsAtt[Anum_pg_attribute_attmissingval - 1] = missingIsNull; + } + atttup = heap_modify_tuple(atttup, RelationGetDescr(attrrel), + valuesAtt, nullsAtt, replacesAtt); + CatalogTupleUpdate(attrrel, &atttup->t_self, atttup); + + if (!missingIsNull) + pfree(DatumGetPointer(missingval)); + } heap_close(attrrel, RowExclusiveLock); heap_freetuple(atttup); @@ -2011,12 +2309,11 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, * The OID of the new constraint is returned. */ static Oid -StoreRelCheck(Relation rel, char *ccname, Node *expr, +StoreRelCheck(Relation rel, const char *ccname, Node *expr, bool is_validated, bool is_local, int inhcount, bool is_no_inherit, bool is_internal) { char *ccbin; - char *ccsrc; List *varList; int keycount; int16 *attNos; @@ -2027,14 +2324,6 @@ StoreRelCheck(Relation rel, char *ccname, Node *expr, */ ccbin = nodeToString(expr); - /* - * Also deparse it to form the mostly-obsolete consrc field. - */ - ccsrc = deparse_expression(expr, - deparse_context_for(RelationGetRelationName(rel), - RelationGetRelid(rel)), - false, false); - /* * Find columns of rel that are used in expr * @@ -2088,9 +2377,11 @@ StoreRelCheck(Relation rel, char *ccname, Node *expr, false, /* Is Deferrable */ false, /* Is Deferred */ is_validated, + InvalidOid, /* no parent constraint */ RelationGetRelid(rel), /* relation */ attNos, /* attrs in the constraint */ - keycount, /* # attrs in the constraint */ + keycount, /* # key attrs in the constraint */ + keycount, /* # total attrs in the constraint */ InvalidOid, /* not a domain constraint */ InvalidOid, /* no associated index */ InvalidOid, /* Foreign key fields */ @@ -2105,14 +2396,12 @@ StoreRelCheck(Relation rel, char *ccname, Node *expr, NULL, /* not an exclusion constraint */ expr, /* Tree form of check constraint */ ccbin, /* Binary form of check constraint */ - ccsrc, /* Source form of check constraint */ is_local, /* conislocal */ inhcount, /* coninhcount */ is_no_inherit, /* connoinherit */ is_internal); /* internally constructed? */ pfree(ccbin); - pfree(ccsrc); return constrOid; } @@ -2151,7 +2440,7 @@ StoreConstraints(Relation rel, List *cooked_constraints, bool is_internal) { case CONSTR_DEFAULT: con->conoid = StoreAttrDefault(rel, con->attnum, con->expr, - is_internal); + is_internal, false); break; case CONSTR_CHECK: con->conoid = @@ -2182,9 +2471,9 @@ StoreConstraints(Relation rel, List *cooked_constraints, bool is_internal) * rel: relation to be modified * newColDefaults: list of RawColumnDefault structures * newConstraints: list of Constraint nodes - * allow_merge: TRUE if check constraints may be merged with existing ones - * is_local: TRUE if definition is local, FALSE if it's inherited - * is_internal: TRUE if result of some internal process, not a user request + * allow_merge: true if check constraints may be merged with existing ones + * is_local: true if definition is local, false if it's inherited + * is_internal: true if result of some internal process, not a user request * * All entries in newColDefaults will be processed. Entries in newConstraints * will be processed only if they are CONSTR_CHECK type. @@ -2203,7 +2492,8 @@ AddRelationNewConstraints(Relation rel, List *newConstraints, bool allow_merge, bool is_local, - bool is_internal) + bool is_internal, + const char *queryString) { List *cookedConstraints = NIL; TupleDesc tupleDesc; @@ -2232,8 +2522,10 @@ AddRelationNewConstraints(Relation rel, * rangetable entry. We need a ParseState for transformExpr. */ pstate = make_parsestate(NULL); + pstate->p_sourcetext = queryString; rte = addRangeTableEntryForRelation(pstate, rel, + AccessShareLock, NULL, false, true); @@ -2245,7 +2537,7 @@ AddRelationNewConstraints(Relation rel, foreach(cell, newColDefaults) { RawColumnDefault *colDef = (RawColumnDefault *) lfirst(cell); - Form_pg_attribute atp = rel->rd_att->attrs[colDef->attnum - 1]; + Form_pg_attribute atp = TupleDescAttr(rel->rd_att, colDef->attnum - 1); Oid defOid; expr = cookDefault(pstate, colDef->raw_default, @@ -2267,7 +2559,12 @@ AddRelationNewConstraints(Relation rel, (IsA(expr, Const) &&((Const *) expr)->constisnull)) continue; - defOid = StoreAttrDefault(rel, colDef->attnum, expr, is_internal); + /* If the DEFAULT is volatile we cannot use a missing value */ + if (colDef->missingMode && contain_volatile_functions((Node *) expr)) + colDef->missingMode = false; + + defOid = StoreAttrDefault(rel, colDef->attnum, expr, is_internal, + colDef->missingMode); cooked = (CookedConstraint *) palloc(sizeof(CookedConstraint)); cooked->contype = CONSTR_DEFAULT; @@ -2376,7 +2673,8 @@ AddRelationNewConstraints(Relation rel, if (list_length(vars) == 1) colname = get_attname(RelationGetRelid(rel), - ((Var *) linitial(vars))->varattno); + ((Var *) linitial(vars))->varattno, + true); else colname = NULL; @@ -2429,13 +2727,13 @@ AddRelationNewConstraints(Relation rel, * new one, and either adjust its conislocal/coninhcount settings or throw * error as needed. * - * Returns TRUE if merged (constraint is a duplicate), or FALSE if it's + * Returns true if merged (constraint is a duplicate), or false if it's * got a so-far-unique name, or throws error if conflict. * * XXX See MergeConstraintsIntoExisting too if you change this code. */ static bool -MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr, +MergeWithExistingConstraint(Relation rel, const char *ccname, Node *expr, bool allow_merge, bool is_local, bool is_initially_valid, bool is_no_inherit) @@ -2443,7 +2741,7 @@ MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr, bool found; Relation conDesc; SysScanDesc conscan; - ScanKeyData skey[2]; + ScanKeyData skey[3]; HeapTuple tup; /* Search for a pg_constraint entry with same name and relation */ @@ -2452,120 +2750,120 @@ MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr, found = false; ScanKeyInit(&skey[0], + Anum_pg_constraint_conrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationGetRelid(rel))); + ScanKeyInit(&skey[1], + Anum_pg_constraint_contypid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(InvalidOid)); + ScanKeyInit(&skey[2], Anum_pg_constraint_conname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(ccname)); - ScanKeyInit(&skey[1], - Anum_pg_constraint_connamespace, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetNamespace(rel))); - - conscan = systable_beginscan(conDesc, ConstraintNameNspIndexId, true, - NULL, 2, skey); + conscan = systable_beginscan(conDesc, ConstraintRelidTypidNameIndexId, true, + NULL, 3, skey); - while (HeapTupleIsValid(tup = systable_getnext(conscan))) + /* There can be at most one matching row */ + if (HeapTupleIsValid(tup = systable_getnext(conscan))) { Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tup); - if (con->conrelid == RelationGetRelid(rel)) + /* Found it. Conflicts if not identical check constraint */ + if (con->contype == CONSTRAINT_CHECK) { - /* Found it. Conflicts if not identical check constraint */ - if (con->contype == CONSTRAINT_CHECK) - { - Datum val; - bool isnull; - - val = fastgetattr(tup, - Anum_pg_constraint_conbin, - conDesc->rd_att, &isnull); - if (isnull) - elog(ERROR, "null conbin for rel %s", - RelationGetRelationName(rel)); - if (equal(expr, stringToNode(TextDatumGetCString(val)))) - found = true; - } + Datum val; + bool isnull; + + val = fastgetattr(tup, + Anum_pg_constraint_conbin, + conDesc->rd_att, &isnull); + if (isnull) + elog(ERROR, "null conbin for rel %s", + RelationGetRelationName(rel)); + if (equal(expr, stringToNode(TextDatumGetCString(val)))) + found = true; + } - /* - * If the existing constraint is purely inherited (no local - * definition) then interpret addition of a local constraint as a - * legal merge. This allows ALTER ADD CONSTRAINT on parent and - * child tables to be given in either order with same end state. - * However if the relation is a partition, all inherited - * constraints are always non-local, including those that were - * merged. - */ - if (is_local && !con->conislocal && !rel->rd_rel->relispartition) - allow_merge = true; + /* + * If the existing constraint is purely inherited (no local + * definition) then interpret addition of a local constraint as a + * legal merge. This allows ALTER ADD CONSTRAINT on parent and child + * tables to be given in either order with same end state. However if + * the relation is a partition, all inherited constraints are always + * non-local, including those that were merged. + */ + if (is_local && !con->conislocal && !rel->rd_rel->relispartition) + allow_merge = true; - if (!found || !allow_merge) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("constraint \"%s\" for relation \"%s\" already exists", - ccname, RelationGetRelationName(rel)))); + if (!found || !allow_merge) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("constraint \"%s\" for relation \"%s\" already exists", + ccname, RelationGetRelationName(rel)))); - /* If the child constraint is "no inherit" then cannot merge */ - if (con->connoinherit) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("constraint \"%s\" conflicts with non-inherited constraint on relation \"%s\"", - ccname, RelationGetRelationName(rel)))); + /* If the child constraint is "no inherit" then cannot merge */ + if (con->connoinherit) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("constraint \"%s\" conflicts with non-inherited constraint on relation \"%s\"", + ccname, RelationGetRelationName(rel)))); - /* - * Must not change an existing inherited constraint to "no - * inherit" status. That's because inherited constraints should - * be able to propagate to lower-level children. - */ - if (con->coninhcount > 0 && is_no_inherit) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("constraint \"%s\" conflicts with inherited constraint on relation \"%s\"", - ccname, RelationGetRelationName(rel)))); + /* + * Must not change an existing inherited constraint to "no inherit" + * status. That's because inherited constraints should be able to + * propagate to lower-level children. + */ + if (con->coninhcount > 0 && is_no_inherit) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("constraint \"%s\" conflicts with inherited constraint on relation \"%s\"", + ccname, RelationGetRelationName(rel)))); - /* - * If the child constraint is "not valid" then cannot merge with a - * valid parent constraint - */ - if (is_initially_valid && !con->convalidated) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("constraint \"%s\" conflicts with NOT VALID constraint on relation \"%s\"", - ccname, RelationGetRelationName(rel)))); + /* + * If the child constraint is "not valid" then cannot merge with a + * valid parent constraint. + */ + if (is_initially_valid && !con->convalidated) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("constraint \"%s\" conflicts with NOT VALID constraint on relation \"%s\"", + ccname, RelationGetRelationName(rel)))); - /* OK to update the tuple */ - ereport(NOTICE, - (errmsg("merging constraint \"%s\" with inherited definition", - ccname))); + /* OK to update the tuple */ + ereport(NOTICE, + (errmsg("merging constraint \"%s\" with inherited definition", + ccname))); - tup = heap_copytuple(tup); - con = (Form_pg_constraint) GETSTRUCT(tup); + tup = heap_copytuple(tup); + con = (Form_pg_constraint) GETSTRUCT(tup); - /* - * In case of partitions, an inherited constraint must be - * inherited only once since it cannot have multiple parents and - * it is never considered local. - */ - if (rel->rd_rel->relispartition) - { - con->coninhcount = 1; - con->conislocal = false; - } + /* + * In case of partitions, an inherited constraint must be inherited + * only once since it cannot have multiple parents and it is never + * considered local. + */ + if (rel->rd_rel->relispartition) + { + con->coninhcount = 1; + con->conislocal = false; + } + else + { + if (is_local) + con->conislocal = true; else - { - if (is_local) - con->conislocal = true; - else - con->coninhcount++; - } + con->coninhcount++; + } - if (is_no_inherit) - { - Assert(is_local); - con->connoinherit = true; - } - CatalogTupleUpdate(conDesc, &tup->t_self, tup); - break; + if (is_no_inherit) + { + Assert(is_local); + con->connoinherit = true; } + + CatalogTupleUpdate(conDesc, &tup->t_self, tup); } systable_endscan(conscan); @@ -2632,7 +2930,7 @@ cookDefault(ParseState *pstate, Node *raw_default, Oid atttypid, int32 atttypmod, - char *attname) + const char *attname) { Node *expr; @@ -2812,7 +3110,7 @@ RelationTruncateIndexes(Relation heapRelation) /* Initialize the index and rebuild */ /* Note: we do not need to re-establish pkey setting */ - index_build(heapRelation, currentIndex, indexInfo, false, true); + index_build(heapRelation, currentIndex, indexInfo, false, true, false); /* We're done with this index */ index_close(currentIndex, NoLock); @@ -2874,6 +3172,13 @@ heap_truncate_one_rel(Relation rel) { Oid toastrelid; + /* + * Truncate the relation. Partitioned tables have no storage, so there is + * nothing to do for them here. + */ + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + return; + /* Truncate the actual file (and discard buffers) */ RelationTruncate(rel, 0); @@ -2917,13 +3222,16 @@ heap_truncate_check_FKs(List *relations, bool tempTables) * Build a list of OIDs of the interesting relations. * * If a relation has no triggers, then it can neither have FKs nor be - * referenced by a FK from another table, so we can ignore it. + * referenced by a FK from another table, so we can ignore it. For + * partitioned tables, FKs have no triggers, so we must include them + * anyway. */ foreach(cell, relations) { Relation rel = lfirst(cell); - if (rel->rd_rel->relhastriggers) + if (rel->rd_rel->relhastriggers || + rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) oids = lappend_oid(oids, RelationGetRelid(rel)); } @@ -3105,9 +3413,6 @@ StorePartitionKey(Relation rel, Assert(rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE); - tuple = SearchSysCache1(PARTRELID, - ObjectIdGetDatum(RelationGetRelid(rel))); - /* Copy the partition attribute numbers, opclass OIDs into arrays */ partattrs_vec = buildint2vector(partattrs, partnatts); partopclass_vec = buildoidvector(partopclass, partnatts); @@ -3136,6 +3441,7 @@ StorePartitionKey(Relation rel, values[Anum_pg_partitioned_table_partrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel)); values[Anum_pg_partitioned_table_partstrat - 1] = CharGetDatum(strategy); values[Anum_pg_partitioned_table_partnatts - 1] = Int16GetDatum(partnatts); + values[Anum_pg_partitioned_table_partdefid - 1] = ObjectIdGetDatum(InvalidOid); values[Anum_pg_partitioned_table_partattrs - 1] = PointerGetDatum(partattrs_vec); values[Anum_pg_partitioned_table_partclass - 1] = PointerGetDatum(partopclass_vec); values[Anum_pg_partitioned_table_partcollation - 1] = PointerGetDatum(partcollation_vec); @@ -3220,8 +3526,12 @@ RemovePartitionKeyByRelId(Oid relid) * Update pg_class tuple of rel to store the partition bound and set * relispartition to true * + * If this is the default partition, also update the default partition OID in + * pg_partitioned_table. + * * Also, invalidate the parent's relcache, so that the next rebuild will load - * the new partition's info into its partition descriptor. + * the new partition's info into its partition descriptor. If there is a + * default partition, we must invalidate its relcache entry as well. */ void StorePartitionBound(Relation rel, Relation parent, PartitionBoundSpec *bound) @@ -3232,6 +3542,7 @@ StorePartitionBound(Relation rel, Relation parent, PartitionBoundSpec *bound) Datum new_val[Natts_pg_class]; bool new_null[Natts_pg_class], new_repl[Natts_pg_class]; + Oid defaultPartOid; /* Update pg_class tuple */ classRel = heap_open(RelationRelationId, RowExclusiveLock); @@ -3269,5 +3580,26 @@ StorePartitionBound(Relation rel, Relation parent, PartitionBoundSpec *bound) heap_freetuple(newtuple); heap_close(classRel, RowExclusiveLock); + /* + * If we're storing bounds for the default partition, update + * pg_partitioned_table too. + */ + if (bound->is_default) + update_default_partition_oid(RelationGetRelid(parent), + RelationGetRelid(rel)); + + /* Make these updates visible */ + CommandCounterIncrement(); + + /* + * The partition constraint for the default partition depends on the + * partition bounds of every other partition, so we must invalidate the + * relcache entry for that partition every time a partition is added or + * removed. + */ + defaultPartOid = get_default_oid_from_partdesc(RelationGetPartitionDesc(parent)); + if (OidIsValid(defaultPartOid)) + CacheInvalidateRelcacheByRelid(defaultPartOid); + CacheInvalidateRelcache(parent); } diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 25c5bead9f..4088286151 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -3,7 +3,7 @@ * index.c * code to create and destroy POSTGRES index relations * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -26,6 +26,7 @@ #include "access/amapi.h" #include "access/multixact.h" #include "access/relscan.h" +#include "access/reloptions.h" #include "access/sysattr.h" #include "access/transam.h" #include "access/visibilitymap.h" @@ -40,7 +41,8 @@ #include "catalog/pg_am.h" #include "catalog/pg_collation.h" #include "catalog/pg_constraint.h" -#include "catalog/pg_constraint_fn.h" +#include "catalog/pg_depend.h" +#include "catalog/pg_inherits.h" #include "catalog/pg_operator.h" #include "catalog/pg_opclass.h" #include "catalog/pg_tablespace.h" @@ -48,13 +50,16 @@ #include "catalog/pg_type.h" #include "catalog/storage.h" #include "commands/tablecmds.h" +#include "commands/event_trigger.h" #include "commands/trigger.h" #include "executor/executor.h" #include "miscadmin.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" +#include "optimizer/planner.h" #include "parser/parser.h" +#include "rewrite/rewriteManip.h" #include "storage/bufmgr.h" #include "storage/lmgr.h" #include "storage/predicate.h" @@ -86,6 +91,18 @@ typedef struct tups_inserted; } v_i_state; +/* + * Pointer-free representation of variables used when reindexing system + * catalogs; we use this to propagate those values to parallel workers. + */ +typedef struct +{ + Oid currentlyReindexedHeap; + Oid currentlyReindexedIndex; + int numPendingReindexedIndexes; + Oid pendingReindexedIndexes[FLEXIBLE_ARRAY_MEMBER]; +} SerializedReindexState; + /* non-export function prototypes */ static bool relationHasPrimaryKey(Relation rel); static TupleDesc ConstructTupleDescriptor(Relation heapRelation, @@ -98,6 +115,7 @@ static void InitializeAttributeOids(Relation indexRelation, int numatts, Oid indexoid); static void AppendAttributeTuples(Relation indexRelation, int numatts); static void UpdateIndexRelation(Oid indexoid, Oid heapoid, + Oid parentIndexId, IndexInfo *indexInfo, Oid *collationOids, Oid *classOids, @@ -105,9 +123,10 @@ static void UpdateIndexRelation(Oid indexoid, Oid heapoid, bool primary, bool isexclusion, bool immediate, - bool isvalid); + bool isvalid, + bool isready); static void index_update_stats(Relation rel, - bool hasindex, bool isprimary, + bool hasindex, double reltuples); static void IndexCheckExclusion(Relation heapRelation, Relation indexRelation, @@ -194,18 +213,19 @@ relationHasPrimaryKey(Relation rel) void index_check_primary_key(Relation heapRel, IndexInfo *indexInfo, - bool is_alter_table) + bool is_alter_table, + IndexStmt *stmt) { List *cmds; int i; /* - * If ALTER TABLE, check that there isn't already a PRIMARY KEY. In CREATE - * TABLE, we have faith that the parser rejected multiple pkey clauses; - * and CREATE INDEX doesn't have a way to say PRIMARY KEY, so it's no - * problem either. + * If ALTER TABLE and CREATE TABLE .. PARTITION OF, check that there isn't + * already a PRIMARY KEY. In CREATE TABLE for an ordinary relations, we + * have faith that the parser rejected multiple pkey clauses; and CREATE + * INDEX doesn't have a way to say PRIMARY KEY, so it's no problem either. */ - if (is_alter_table && + if ((is_alter_table || heapRel->rd_rel->relispartition) && relationHasPrimaryKey(heapRel)) { ereport(ERROR, @@ -219,9 +239,9 @@ index_check_primary_key(Relation heapRel, * null, otherwise attempt to ALTER TABLE .. SET NOT NULL */ cmds = NIL; - for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++) + for (i = 0; i < indexInfo->ii_NumIndexKeyAttrs; i++) { - AttrNumber attnum = indexInfo->ii_KeyAttrNumbers[i]; + AttrNumber attnum = indexInfo->ii_IndexAttrNumbers[i]; HeapTuple atttuple; Form_pg_attribute attform; @@ -262,7 +282,11 @@ index_check_primary_key(Relation heapRel, * unduly. */ if (cmds) + { + EventTriggerAlterTableStart((Node *) stmt); AlterTableInternal(RelationGetRelid(heapRel), cmds, true); + EventTriggerAlterTableEnd(); + } } /* @@ -279,6 +303,7 @@ ConstructTupleDescriptor(Relation heapRelation, Oid *classObjectId) { int numatts = indexInfo->ii_NumIndexAttrs; + int numkeyatts = indexInfo->ii_NumIndexKeyAttrs; ListCell *colnames_item = list_head(indexColNames); ListCell *indexpr_item = list_head(indexInfo->ii_Expressions); IndexAmRoutine *amroutine; @@ -300,23 +325,34 @@ ConstructTupleDescriptor(Relation heapRelation, indexTupDesc = CreateTemplateTupleDesc(numatts, false); /* - * For simple index columns, we copy the pg_attribute row from the parent - * relation and modify it as necessary. For expressions we have to cons - * up a pg_attribute row the hard way. + * Fill in the pg_attribute row. */ for (i = 0; i < numatts; i++) { - AttrNumber atnum = indexInfo->ii_KeyAttrNumbers[i]; - Form_pg_attribute to = indexTupDesc->attrs[i]; + AttrNumber atnum = indexInfo->ii_IndexAttrNumbers[i]; + Form_pg_attribute to = TupleDescAttr(indexTupDesc, i); HeapTuple tuple; Form_pg_type typeTup; Form_pg_opclass opclassTup; Oid keyType; + MemSet(to, 0, ATTRIBUTE_FIXED_PART_SIZE); + to->attnum = i + 1; + to->attstattarget = -1; + to->attcacheoff = -1; + to->attislocal = true; + to->attcollation = (i < numkeyatts) ? + collationObjectId[i] : InvalidOid; + + /* + * For simple index columns, we copy some pg_attribute fields from the + * parent relation. For expressions we have to look at the expression + * result. + */ if (atnum != 0) { /* Simple index column */ - Form_pg_attribute from; + const FormData_pg_attribute *from; if (atnum < 0) { @@ -333,37 +369,24 @@ ConstructTupleDescriptor(Relation heapRelation, */ if (atnum > natts) /* safety check */ elog(ERROR, "invalid column number %d", atnum); - from = heapTupDesc->attrs[AttrNumberGetAttrOffset(atnum)]; + from = TupleDescAttr(heapTupDesc, + AttrNumberGetAttrOffset(atnum)); } - /* - * now that we've determined the "from", let's copy the tuple desc - * data... - */ - memcpy(to, from, ATTRIBUTE_FIXED_PART_SIZE); - - /* - * Fix the stuff that should not be the same as the underlying - * attr - */ - to->attnum = i + 1; - - to->attstattarget = -1; - to->attcacheoff = -1; - to->attnotnull = false; - to->atthasdef = false; - to->attidentity = '\0'; - to->attislocal = true; - to->attinhcount = 0; - to->attcollation = collationObjectId[i]; + namecpy(&to->attname, &from->attname); + to->atttypid = from->atttypid; + to->attlen = from->attlen; + to->attndims = from->attndims; + to->atttypmod = from->atttypmod; + to->attbyval = from->attbyval; + to->attstorage = from->attstorage; + to->attalign = from->attalign; } else { /* Expressional index */ Node *indexkey; - MemSet(to, 0, ATTRIBUTE_FIXED_PART_SIZE); - if (indexpr_item == NULL) /* shouldn't happen */ elog(ERROR, "too few entries in indexprs list"); indexkey = (Node *) lfirst(indexpr_item); @@ -379,19 +402,14 @@ ConstructTupleDescriptor(Relation heapRelation, typeTup = (Form_pg_type) GETSTRUCT(tuple); /* - * Assign some of the attributes values. Leave the rest as 0. + * Assign some of the attributes values. Leave the rest. */ - to->attnum = i + 1; to->atttypid = keyType; to->attlen = typeTup->typlen; to->attbyval = typeTup->typbyval; to->attstorage = typeTup->typstorage; to->attalign = typeTup->typalign; - to->attstattarget = -1; - to->attcacheoff = -1; to->atttypmod = exprTypmod(indexkey); - to->attislocal = true; - to->attcollation = collationObjectId[i]; ReleaseSysCache(tuple); @@ -426,32 +444,40 @@ ConstructTupleDescriptor(Relation heapRelation, /* * Check the opclass and index AM to see if either provides a keytype - * (overriding the attribute type). Opclass takes precedence. + * (overriding the attribute type). Opclass (if exists) takes + * precedence. */ - tuple = SearchSysCache1(CLAOID, ObjectIdGetDatum(classObjectId[i])); - if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for opclass %u", - classObjectId[i]); - opclassTup = (Form_pg_opclass) GETSTRUCT(tuple); - if (OidIsValid(opclassTup->opckeytype)) - keyType = opclassTup->opckeytype; - else - keyType = amroutine->amkeytype; + keyType = amroutine->amkeytype; /* - * If keytype is specified as ANYELEMENT, and opcintype is ANYARRAY, - * then the attribute type must be an array (else it'd not have - * matched this opclass); use its element type. + * Code below is concerned to the opclasses which are not used with + * the included columns. */ - if (keyType == ANYELEMENTOID && opclassTup->opcintype == ANYARRAYOID) + if (i < indexInfo->ii_NumIndexKeyAttrs) { - keyType = get_base_element_type(to->atttypid); - if (!OidIsValid(keyType)) - elog(ERROR, "could not get element type of array type %u", - to->atttypid); - } + tuple = SearchSysCache1(CLAOID, ObjectIdGetDatum(classObjectId[i])); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for opclass %u", + classObjectId[i]); + opclassTup = (Form_pg_opclass) GETSTRUCT(tuple); + if (OidIsValid(opclassTup->opckeytype)) + keyType = opclassTup->opckeytype; - ReleaseSysCache(tuple); + /* + * If keytype is specified as ANYELEMENT, and opcintype is + * ANYARRAY, then the attribute type must be an array (else it'd + * not have matched this opclass); use its element type. + */ + if (keyType == ANYELEMENTOID && opclassTup->opcintype == ANYARRAYOID) + { + keyType = get_base_element_type(to->atttypid); + if (!OidIsValid(keyType)) + elog(ERROR, "could not get element type of array type %u", + to->atttypid); + } + + ReleaseSysCache(tuple); + } /* * If a key type different from the heap value is specified, update @@ -495,7 +521,7 @@ InitializeAttributeOids(Relation indexRelation, tupleDescriptor = RelationGetDescr(indexRelation); for (i = 0; i < numatts; i += 1) - tupleDescriptor->attrs[i]->attrelid = indexoid; + TupleDescAttr(tupleDescriptor, i)->attrelid = indexoid; } /* ---------------------------------------------------------------- @@ -524,14 +550,11 @@ AppendAttributeTuples(Relation indexRelation, int numatts) for (i = 0; i < numatts; i++) { - /* - * There used to be very grotty code here to set these fields, but I - * think it's unnecessary. They should be set already. - */ - Assert(indexTupDesc->attrs[i]->attnum == i + 1); - Assert(indexTupDesc->attrs[i]->attcacheoff == -1); + Form_pg_attribute attr = TupleDescAttr(indexTupDesc, i); + + Assert(attr->attnum == i + 1); - InsertPgAttributeTuple(pg_attribute, indexTupDesc->attrs[i], indstate); + InsertPgAttributeTuple(pg_attribute, attr, indstate); } CatalogCloseIndexes(indstate); @@ -548,6 +571,7 @@ AppendAttributeTuples(Relation indexRelation, int numatts) static void UpdateIndexRelation(Oid indexoid, Oid heapoid, + Oid parentIndexOid, IndexInfo *indexInfo, Oid *collationOids, Oid *classOids, @@ -555,7 +579,8 @@ UpdateIndexRelation(Oid indexoid, bool primary, bool isexclusion, bool immediate, - bool isvalid) + bool isvalid, + bool isready) { int2vector *indkey; oidvector *indcollation; @@ -575,10 +600,10 @@ UpdateIndexRelation(Oid indexoid, */ indkey = buildint2vector(NULL, indexInfo->ii_NumIndexAttrs); for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++) - indkey->values[i] = indexInfo->ii_KeyAttrNumbers[i]; - indcollation = buildoidvector(collationOids, indexInfo->ii_NumIndexAttrs); - indclass = buildoidvector(classOids, indexInfo->ii_NumIndexAttrs); - indoption = buildint2vector(coloptions, indexInfo->ii_NumIndexAttrs); + indkey->values[i] = indexInfo->ii_IndexAttrNumbers[i]; + indcollation = buildoidvector(collationOids, indexInfo->ii_NumIndexKeyAttrs); + indclass = buildoidvector(classOids, indexInfo->ii_NumIndexKeyAttrs); + indoption = buildint2vector(coloptions, indexInfo->ii_NumIndexKeyAttrs); /* * Convert the index expressions (if any) to a text datum @@ -622,6 +647,7 @@ UpdateIndexRelation(Oid indexoid, values[Anum_pg_index_indexrelid - 1] = ObjectIdGetDatum(indexoid); values[Anum_pg_index_indrelid - 1] = ObjectIdGetDatum(heapoid); values[Anum_pg_index_indnatts - 1] = Int16GetDatum(indexInfo->ii_NumIndexAttrs); + values[Anum_pg_index_indnkeyatts - 1] = Int16GetDatum(indexInfo->ii_NumIndexKeyAttrs); values[Anum_pg_index_indisunique - 1] = BoolGetDatum(indexInfo->ii_Unique); values[Anum_pg_index_indisprimary - 1] = BoolGetDatum(primary); values[Anum_pg_index_indisexclusion - 1] = BoolGetDatum(isexclusion); @@ -629,8 +655,7 @@ UpdateIndexRelation(Oid indexoid, values[Anum_pg_index_indisclustered - 1] = BoolGetDatum(false); values[Anum_pg_index_indisvalid - 1] = BoolGetDatum(isvalid); values[Anum_pg_index_indcheckxmin - 1] = BoolGetDatum(false); - /* we set isvalid and isready the same way */ - values[Anum_pg_index_indisready - 1] = BoolGetDatum(isvalid); + values[Anum_pg_index_indisready - 1] = BoolGetDatum(isready); values[Anum_pg_index_indislive - 1] = BoolGetDatum(true); values[Anum_pg_index_indisreplident - 1] = BoolGetDatum(false); values[Anum_pg_index_indkey - 1] = PointerGetDatum(indkey); @@ -667,6 +692,10 @@ UpdateIndexRelation(Oid indexoid, * indexRelationId: normally, pass InvalidOid to let this routine * generate an OID for the index. During bootstrap this may be * nonzero to specify a preselected OID. + * parentIndexRelid: if creating an index partition, the OID of the + * parent index; otherwise InvalidOid. + * parentConstraintId: if creating a constraint on a partition, the OID + * of the constraint in the parent; otherwise InvalidOid. * relFileNode: normally, pass InvalidOid to get new storage. May be * nonzero to attach an existing valid build. * indexInfo: same info executor uses to insert into the index @@ -677,19 +706,28 @@ UpdateIndexRelation(Oid indexoid, * classObjectId: array of index opclass OIDs, one per index column * coloptions: array of per-index-column indoption settings * reloptions: AM-specific options - * isprimary: index is a PRIMARY KEY - * isconstraint: index is owned by PRIMARY KEY, UNIQUE, or EXCLUSION constraint - * deferrable: constraint is DEFERRABLE - * initdeferred: constraint is INITIALLY DEFERRED + * flags: bitmask that can include any combination of these bits: + * INDEX_CREATE_IS_PRIMARY + * the index is a primary key + * INDEX_CREATE_ADD_CONSTRAINT: + * invoke index_constraint_create also + * INDEX_CREATE_SKIP_BUILD: + * skip the index_build() step for the moment; caller must do it + * later (typically via reindex_index()) + * INDEX_CREATE_CONCURRENT: + * do not lock the table against writers. The index will be + * marked "invalid" and the caller must take additional steps + * to fix it up. + * INDEX_CREATE_IF_NOT_EXISTS: + * do not throw an error if a relation with the same name + * already exists. + * INDEX_CREATE_PARTITIONED: + * create a partitioned index (table must be partitioned) + * constr_flags: flags passed to index_constraint_create + * (only if INDEX_CREATE_ADD_CONSTRAINT is set) * allow_system_table_mods: allow table to be a system catalog - * skip_build: true to skip the index_build() step for the moment; caller - * must do it later (typically via reindex_index()) - * concurrent: if true, do not lock the table against writers. The index - * will be marked "invalid" and the caller must take additional steps - * to fix it up. * is_internal: if true, post creation hook for new index - * if_not_exists: if true, do not throw an error if a relation with - * the same name already exists. + * constraintId: if not NULL, receives OID of created constraint * * Returns the OID of the created index. */ @@ -697,6 +735,8 @@ Oid index_create(Relation heapRelation, const char *indexRelationName, Oid indexRelationId, + Oid parentIndexRelid, + Oid parentConstraintId, Oid relFileNode, IndexInfo *indexInfo, List *indexColNames, @@ -706,15 +746,11 @@ index_create(Relation heapRelation, Oid *classObjectId, int16 *coloptions, Datum reloptions, - bool isprimary, - bool isconstraint, - bool deferrable, - bool initdeferred, + bits16 flags, + bits16 constr_flags, bool allow_system_table_mods, - bool skip_build, - bool concurrent, bool is_internal, - bool if_not_exists) + Oid *constraintId) { Oid heapRelationId = RelationGetRelid(heapRelation); Relation pg_class; @@ -726,7 +762,19 @@ index_create(Relation heapRelation, Oid namespaceId; int i; char relpersistence; - + bool isprimary = (flags & INDEX_CREATE_IS_PRIMARY) != 0; + bool invalid = (flags & INDEX_CREATE_INVALID) != 0; + bool concurrent = (flags & INDEX_CREATE_CONCURRENT) != 0; + bool partitioned = (flags & INDEX_CREATE_PARTITIONED) != 0; + char relkind; + + /* constraint flags can only be set when a constraint is requested */ + Assert((constr_flags == 0) || + ((flags & INDEX_CREATE_ADD_CONSTRAINT) != 0)); + /* partitioned indexes must never be "built" by themselves */ + Assert(!partitioned || (flags & INDEX_CREATE_SKIP_BUILD)); + + relkind = partitioned ? RELKIND_PARTITIONED_INDEX : RELKIND_INDEX; is_exclusion = (indexInfo->ii_ExclusionOps != NULL); pg_class = heap_open(RelationRelationId, RowExclusiveLock); @@ -789,9 +837,15 @@ index_create(Relation heapRelation, if (shared_relation && tableSpaceId != GLOBALTABLESPACE_OID) elog(ERROR, "shared relations must be placed in pg_global tablespace"); + /* + * Check for duplicate name (both as to the index, and as to the + * associated constraint if any). Such cases would fail on the relevant + * catalogs' unique indexes anyway, but we prefer to give a friendlier + * error message. + */ if (get_relname_relid(indexRelationName, namespaceId)) { - if (if_not_exists) + if ((flags & INDEX_CREATE_IF_NOT_EXISTS) != 0) { ereport(NOTICE, (errcode(ERRCODE_DUPLICATE_TABLE), @@ -807,6 +861,20 @@ index_create(Relation heapRelation, indexRelationName))); } + if ((flags & INDEX_CREATE_ADD_CONSTRAINT) != 0 && + ConstraintNameIsUsed(CONSTRAINT_RELATION, heapRelationId, + indexRelationName)) + { + /* + * INDEX_CREATE_IF_NOT_EXISTS does not apply here, since the + * conflicting constraint is not an index. + */ + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("constraint \"%s\" for relation \"%s\" already exists", + indexRelationName, RelationGetRelationName(heapRelation)))); + } + /* * construct tuple descriptor for index tuples */ @@ -844,9 +912,9 @@ index_create(Relation heapRelation, } /* - * create the index relation's relcache entry and physical disk file. (If - * we fail further down, it's the smgr's responsibility to remove the disk - * file again.) + * create the index relation's relcache entry and, if necessary, the + * physical disk file. (If we fail further down, it's the smgr's + * responsibility to remove the disk file again, if any.) */ indexRelation = heap_create(indexRelationName, namespaceId, @@ -854,7 +922,7 @@ index_create(Relation heapRelation, indexRelationId, relFileNode, indexTupDesc, - RELKIND_INDEX, + relkind, relpersistence, shared_relation, mapped_relation, @@ -863,7 +931,7 @@ index_create(Relation heapRelation, Assert(indexRelationId == RelationGetRelid(indexRelation)); /* - * Obtain exclusive lock on it. Although no other backends can see it + * Obtain exclusive lock on it. Although no other transactions can see it * until we commit, this prevents deadlock-risk complaints from lock * manager in cases such as CLUSTER. */ @@ -878,6 +946,7 @@ index_create(Relation heapRelation, indexRelation->rd_rel->relowner = heapRelation->rd_rel->relowner; indexRelation->rd_rel->relam = accessMethodObjectId; indexRelation->rd_rel->relhasoids = false; + indexRelation->rd_rel->relispartition = OidIsValid(parentIndexRelid); /* * store index's pg_class entry @@ -911,12 +980,27 @@ index_create(Relation heapRelation, * (Or, could define a rule to maintain the predicate) --Nels, Feb '92 * ---------------- */ - UpdateIndexRelation(indexRelationId, heapRelationId, indexInfo, + UpdateIndexRelation(indexRelationId, heapRelationId, parentIndexRelid, + indexInfo, collationObjectId, classObjectId, coloptions, isprimary, is_exclusion, - !deferrable, + (constr_flags & INDEX_CONSTR_CREATE_DEFERRABLE) == 0, + !concurrent && !invalid, !concurrent); + /* + * Register relcache invalidation on the indexes' heap relation, to + * maintain consistency of its index list + */ + CacheInvalidateRelcache(heapRelation); + + /* update pg_inherits and the parent's relhassubclass, if needed */ + if (OidIsValid(parentIndexRelid)) + { + StoreSingleInheritance(indexRelationId, parentIndexRelid, 1); + SetRelationHasSubclass(parentIndexRelid, true); + } + /* * Register constraint and dependencies for the index. * @@ -940,9 +1024,10 @@ index_create(Relation heapRelation, myself.objectId = indexRelationId; myself.objectSubId = 0; - if (isconstraint) + if ((flags & INDEX_CREATE_ADD_CONSTRAINT) != 0) { char constraintType; + ObjectAddress localaddr; if (isprimary) constraintType = CONSTRAINT_PRIMARY; @@ -956,33 +1041,35 @@ index_create(Relation heapRelation, constraintType = 0; /* keep compiler quiet */ } - index_constraint_create(heapRelation, - indexRelationId, - indexInfo, - indexRelationName, - constraintType, - deferrable, - initdeferred, - false, /* already marked primary */ - false, /* pg_index entry is OK */ - false, /* no old dependencies */ - allow_system_table_mods, - is_internal); + localaddr = index_constraint_create(heapRelation, + indexRelationId, + parentConstraintId, + indexInfo, + indexRelationName, + constraintType, + constr_flags, + allow_system_table_mods, + is_internal); + if (constraintId) + *constraintId = localaddr.objectId; } else { bool have_simple_col = false; + DependencyType deptype; + + deptype = OidIsValid(parentIndexRelid) ? DEPENDENCY_INTERNAL_AUTO : DEPENDENCY_AUTO; /* Create auto dependencies on simply-referenced columns */ for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++) { - if (indexInfo->ii_KeyAttrNumbers[i] != 0) + if (indexInfo->ii_IndexAttrNumbers[i] != 0) { referenced.classId = RelationRelationId; referenced.objectId = heapRelationId; - referenced.objectSubId = indexInfo->ii_KeyAttrNumbers[i]; + referenced.objectSubId = indexInfo->ii_IndexAttrNumbers[i]; - recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO); + recordDependencyOn(&myself, &referenced, deptype); have_simple_col = true; } @@ -1000,17 +1087,23 @@ index_create(Relation heapRelation, referenced.objectId = heapRelationId; referenced.objectSubId = 0; - recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO); + recordDependencyOn(&myself, &referenced, deptype); } + } + + /* Store dependency on parent index, if any */ + if (OidIsValid(parentIndexRelid)) + { + referenced.classId = RelationRelationId; + referenced.objectId = parentIndexRelid; + referenced.objectSubId = 0; - /* Non-constraint indexes can't be deferrable */ - Assert(!deferrable); - Assert(!initdeferred); + recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL_AUTO); } /* Store dependency on collations */ /* The default collation is pinned, so don't bother recording it */ - for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++) + for (i = 0; i < indexInfo->ii_NumIndexKeyAttrs; i++) { if (OidIsValid(collationObjectId[i]) && collationObjectId[i] != DEFAULT_COLLATION_OID) @@ -1024,7 +1117,7 @@ index_create(Relation heapRelation, } /* Store dependency on operator classes */ - for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++) + for (i = 0; i < indexInfo->ii_NumIndexKeyAttrs; i++) { referenced.classId = OperatorClassRelationId; referenced.objectId = classObjectId[i]; @@ -1056,9 +1149,7 @@ index_create(Relation heapRelation, else { /* Bootstrap mode - assert we weren't asked for constraint support */ - Assert(!isconstraint); - Assert(!deferrable); - Assert(!initdeferred); + Assert((flags & INDEX_CREATE_ADD_CONSTRAINT) == 0); } /* Post creation hook for new index */ @@ -1082,19 +1173,22 @@ index_create(Relation heapRelation, else Assert(indexRelation->rd_indexcxt != NULL); + indexRelation->rd_index->indnkeyatts = indexInfo->ii_NumIndexKeyAttrs; + /* * If this is bootstrap (initdb) time, then we don't actually fill in the * index yet. We'll be creating more indexes and classes later, so we * delay filling them in until just before we're done with bootstrapping. - * Similarly, if the caller specified skip_build then filling the index is - * delayed till later (ALTER TABLE can save work in some cases with this). - * Otherwise, we call the AM routine that constructs the index. + * Similarly, if the caller specified to skip the build then filling the + * index is delayed till later (ALTER TABLE can save work in some cases + * with this). Otherwise, we call the AM routine that constructs the + * index. */ if (IsBootstrapProcessingMode()) { index_register(heapRelationId, indexRelationId, indexInfo); } - else if (skip_build) + else if ((flags & INDEX_CREATE_SKIP_BUILD) != 0) { /* * Caller is responsible for filling the index later on. However, @@ -1103,14 +1197,14 @@ index_create(Relation heapRelation, */ index_update_stats(heapRelation, true, - isprimary, -1.0); /* Make the above update visible */ CommandCounterIncrement(); } else { - index_build(heapRelation, indexRelation, indexInfo, isprimary, false); + index_build(heapRelation, indexRelation, indexInfo, isprimary, false, + true); } /* @@ -1130,30 +1224,30 @@ index_create(Relation heapRelation, * * heapRelation: table owning the index (must be suitably locked by caller) * indexRelationId: OID of the index + * parentConstraintId: if constraint is on a partition, the OID of the + * constraint in the parent. * indexInfo: same info executor uses to insert into the index * constraintName: what it say (generally, should match name of index) * constraintType: one of CONSTRAINT_PRIMARY, CONSTRAINT_UNIQUE, or * CONSTRAINT_EXCLUSION - * deferrable: constraint is DEFERRABLE - * initdeferred: constraint is INITIALLY DEFERRED - * mark_as_primary: if true, set flags to mark index as primary key - * update_pgindex: if true, update pg_index row (else caller's done that) - * remove_old_dependencies: if true, remove existing dependencies of index - * on table's columns + * flags: bitmask that can include any combination of these bits: + * INDEX_CONSTR_CREATE_MARK_AS_PRIMARY: index is a PRIMARY KEY + * INDEX_CONSTR_CREATE_DEFERRABLE: constraint is DEFERRABLE + * INDEX_CONSTR_CREATE_INIT_DEFERRED: constraint is INITIALLY DEFERRED + * INDEX_CONSTR_CREATE_UPDATE_INDEX: update the pg_index row + * INDEX_CONSTR_CREATE_REMOVE_OLD_DEPS: remove existing dependencies + * of index on table's columns * allow_system_table_mods: allow table to be a system catalog * is_internal: index is constructed due to internal process */ ObjectAddress index_constraint_create(Relation heapRelation, Oid indexRelationId, + Oid parentConstraintId, IndexInfo *indexInfo, const char *constraintName, char constraintType, - bool deferrable, - bool initdeferred, - bool mark_as_primary, - bool update_pgindex, - bool remove_old_dependencies, + bits16 constr_flags, bool allow_system_table_mods, bool is_internal) { @@ -1161,6 +1255,16 @@ index_constraint_create(Relation heapRelation, ObjectAddress myself, referenced; Oid conOid; + bool deferrable; + bool initdeferred; + bool mark_as_primary; + bool islocal; + bool noinherit; + int inhcount; + + deferrable = (constr_flags & INDEX_CONSTR_CREATE_DEFERRABLE) != 0; + initdeferred = (constr_flags & INDEX_CONSTR_CREATE_INIT_DEFERRED) != 0; + mark_as_primary = (constr_flags & INDEX_CONSTR_CREATE_MARK_AS_PRIMARY) != 0; /* constraint creation support doesn't work while bootstrapping */ Assert(!IsBootstrapProcessingMode()); @@ -1187,10 +1291,23 @@ index_constraint_create(Relation heapRelation, * has any expressions or predicate, but we'd never be turning such an * index into a UNIQUE or PRIMARY KEY constraint. */ - if (remove_old_dependencies) + if (constr_flags & INDEX_CONSTR_CREATE_REMOVE_OLD_DEPS) deleteDependencyRecordsForClass(RelationRelationId, indexRelationId, RelationRelationId, DEPENDENCY_AUTO); + if (OidIsValid(parentConstraintId)) + { + islocal = false; + inhcount = 1; + noinherit = false; + } + else + { + islocal = true; + inhcount = 0; + noinherit = true; + } + /* * Construct a pg_constraint entry. */ @@ -1200,8 +1317,10 @@ index_constraint_create(Relation heapRelation, deferrable, initdeferred, true, + parentConstraintId, RelationGetRelid(heapRelation), - indexInfo->ii_KeyAttrNumbers, + indexInfo->ii_IndexAttrNumbers, + indexInfo->ii_NumIndexKeyAttrs, indexInfo->ii_NumIndexAttrs, InvalidOid, /* no domain */ indexRelationId, /* index OID */ @@ -1217,10 +1336,9 @@ index_constraint_create(Relation heapRelation, indexInfo->ii_ExclusionOps, NULL, /* no check constraint */ NULL, - NULL, - true, /* islocal */ - 0, /* inhcount */ - true, /* noinherit */ + islocal, + inhcount, + noinherit, is_internal); /* @@ -1239,6 +1357,18 @@ index_constraint_create(Relation heapRelation, recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL); + /* + * Also, if this is a constraint on a partition, mark it as depending on + * the constraint in the parent. + */ + if (OidIsValid(parentConstraintId)) + { + ObjectAddress parentConstr; + + ObjectAddressSet(parentConstr, ConstraintRelationId, parentConstraintId); + recordDependencyOn(&referenced, &parentConstr, DEPENDENCY_INTERNAL_AUTO); + } + /* * If the constraint is deferrable, create the deferred uniqueness * checking trigger. (The trigger will be given an internal dependency on @@ -1266,24 +1396,10 @@ index_constraint_create(Relation heapRelation, trigger->constrrel = NULL; (void) CreateTrigger(trigger, NULL, RelationGetRelid(heapRelation), - InvalidOid, conOid, indexRelationId, true); + InvalidOid, conOid, indexRelationId, InvalidOid, + InvalidOid, NULL, true, false); } - /* - * If needed, mark the table as having a primary key. We assume it can't - * have been so marked already, so no need to clear the flag in the other - * case. - * - * Note: this might better be done by callers. We do it here to avoid - * exposing index_update_stats() globally, but that wouldn't be necessary - * if relhaspkey went away. - */ - if (mark_as_primary) - index_update_stats(heapRelation, - true, - true, - -1.0); - /* * If needed, mark the index as primary and/or deferred in pg_index. * @@ -1292,7 +1408,8 @@ index_constraint_create(Relation heapRelation, * is a risk that concurrent readers of the table will miss seeing this * index at all. */ - if (update_pgindex && (mark_as_primary || deferrable)) + if ((constr_flags & INDEX_CONSTR_CREATE_UPDATE_INDEX) && + (mark_as_primary || deferrable)) { Relation pg_index; HeapTuple indexTuple; @@ -1549,9 +1666,10 @@ index_drop(Oid indexId, bool concurrent) } /* - * Schedule physical removal of the files + * Schedule physical removal of the files (if any) */ - RelationDropStorage(userIndexRelation); + if (userIndexRelation->rd_rel->relkind != RELKIND_PARTITIONED_INDEX) + RelationDropStorage(userIndexRelation); /* * Close and flush the index's relcache entry, to ensure relcache doesn't @@ -1571,7 +1689,8 @@ index_drop(Oid indexId, bool concurrent) if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for index %u", indexId); - hasexprs = !heap_attisnull(tuple, Anum_pg_index_indexprs); + hasexprs = !heap_attisnull(tuple, Anum_pg_index_indexprs, + RelationGetDescr(indexRelation)); CatalogTupleDelete(indexRelation, &tuple->t_self); @@ -1595,6 +1714,11 @@ index_drop(Oid indexId, bool concurrent) */ DeleteRelationTuple(indexId); + /* + * fix INHERITS relation + */ + DeleteInheritsTuple(indexId, InvalidOid); + /* * We are presently too lazy to attempt to compute the new correct value * of relhasindex (the next VACUUM will fix it if necessary). So there is @@ -1641,16 +1765,20 @@ BuildIndexInfo(Relation index) IndexInfo *ii = makeNode(IndexInfo); Form_pg_index indexStruct = index->rd_index; int i; - int numKeys; + int numAtts; /* check the number of keys, and copy attr numbers into the IndexInfo */ - numKeys = indexStruct->indnatts; - if (numKeys < 1 || numKeys > INDEX_MAX_KEYS) + numAtts = indexStruct->indnatts; + if (numAtts < 1 || numAtts > INDEX_MAX_KEYS) elog(ERROR, "invalid indnatts %d for index %u", - numKeys, RelationGetRelid(index)); - ii->ii_NumIndexAttrs = numKeys; - for (i = 0; i < numKeys; i++) - ii->ii_KeyAttrNumbers[i] = indexStruct->indkey.values[i]; + numAtts, RelationGetRelid(index)); + ii->ii_NumIndexAttrs = numAtts; + ii->ii_NumIndexKeyAttrs = indexStruct->indnkeyatts; + Assert(ii->ii_NumIndexKeyAttrs != 0); + Assert(ii->ii_NumIndexKeyAttrs <= ii->ii_NumIndexAttrs); + + for (i = 0; i < numAtts; i++) + ii->ii_IndexAttrNumbers[i] = indexStruct->indkey.values[i]; /* fetch any expressions needed for expressional indexes */ ii->ii_Expressions = RelationGetIndexExpressions(index); @@ -1686,14 +1814,134 @@ BuildIndexInfo(Relation index) /* initialize index-build state to default */ ii->ii_Concurrent = false; ii->ii_BrokenHotChain = false; + ii->ii_ParallelWorkers = 0; /* set up for possible use by index AM */ + ii->ii_Am = index->rd_rel->relam; ii->ii_AmCache = NULL; ii->ii_Context = CurrentMemoryContext; return ii; } +/* + * CompareIndexInfo + * Return whether the properties of two indexes (in different tables) + * indicate that they have the "same" definitions. + * + * Note: passing collations and opfamilies separately is a kludge. Adding + * them to IndexInfo may result in better coding here and elsewhere. + * + * Use convert_tuples_by_name_map(index2, index1) to build the attmap. + */ +bool +CompareIndexInfo(IndexInfo *info1, IndexInfo *info2, + Oid *collations1, Oid *collations2, + Oid *opfamilies1, Oid *opfamilies2, + AttrNumber *attmap, int maplen) +{ + int i; + + if (info1->ii_Unique != info2->ii_Unique) + return false; + + /* indexes are only equivalent if they have the same access method */ + if (info1->ii_Am != info2->ii_Am) + return false; + + /* and same number of attributes */ + if (info1->ii_NumIndexAttrs != info2->ii_NumIndexAttrs) + return false; + + /* and same number of key attributes */ + if (info1->ii_NumIndexKeyAttrs != info2->ii_NumIndexKeyAttrs) + return false; + + + /* + * and columns match through the attribute map (actual attribute numbers + * might differ!) Note that this implies that index columns that are + * expressions appear in the same positions. We will next compare the + * expressions themselves. + */ + for (i = 0; i < info1->ii_NumIndexAttrs; i++) + { + if (maplen < info2->ii_IndexAttrNumbers[i]) + elog(ERROR, "incorrect attribute map"); + + /* ignore expressions at this stage */ + if ((info1->ii_IndexAttrNumbers[i] != InvalidAttrNumber) && + (attmap[info2->ii_IndexAttrNumbers[i] - 1] != + info1->ii_IndexAttrNumbers[i])) + return false; + + /* collation and opfamily is not valid for including columns */ + if (i >= info1->ii_NumIndexKeyAttrs) + continue; + + if (collations1[i] != collations2[i]) + return false; + if (opfamilies1[i] != opfamilies2[i]) + return false; + } + + /* + * For expression indexes: either both are expression indexes, or neither + * is; if they are, make sure the expressions match. + */ + if ((info1->ii_Expressions != NIL) != (info2->ii_Expressions != NIL)) + return false; + if (info1->ii_Expressions != NIL) + { + bool found_whole_row; + Node *mapped; + + mapped = map_variable_attnos((Node *) info2->ii_Expressions, + 1, 0, attmap, maplen, + InvalidOid, &found_whole_row); + if (found_whole_row) + { + /* + * we could throw an error here, but seems out of scope for this + * routine. + */ + return false; + } + + if (!equal(info1->ii_Expressions, mapped)) + return false; + } + + /* Partial index predicates must be identical, if they exist */ + if ((info1->ii_Predicate == NULL) != (info2->ii_Predicate == NULL)) + return false; + if (info1->ii_Predicate != NULL) + { + bool found_whole_row; + Node *mapped; + + mapped = map_variable_attnos((Node *) info2->ii_Predicate, + 1, 0, attmap, maplen, + InvalidOid, &found_whole_row); + if (found_whole_row) + { + /* + * we could throw an error here, but seems out of scope for this + * routine. + */ + return false; + } + if (!equal(info1->ii_Predicate, mapped)) + return false; + } + + /* No support currently for comparing exclusion indexes. */ + if (info1->ii_ExclusionOps != NULL || info2->ii_ExclusionOps != NULL) + return false; + + return true; +} + /* ---------------- * BuildSpeculativeIndexInfo * Add extra state to IndexInfo record @@ -1709,9 +1957,11 @@ BuildIndexInfo(Relation index) void BuildSpeculativeIndexInfo(Relation index, IndexInfo *ii) { - int ncols = index->rd_rel->relnatts; + int indnkeyatts; int i; + indnkeyatts = IndexRelationGetNumberOfKeyAttributes(index); + /* * fetch info for checking unique indexes */ @@ -1720,16 +1970,16 @@ BuildSpeculativeIndexInfo(Relation index, IndexInfo *ii) if (index->rd_rel->relam != BTREE_AM_OID) elog(ERROR, "unexpected non-btree speculative unique index"); - ii->ii_UniqueOps = (Oid *) palloc(sizeof(Oid) * ncols); - ii->ii_UniqueProcs = (Oid *) palloc(sizeof(Oid) * ncols); - ii->ii_UniqueStrats = (uint16 *) palloc(sizeof(uint16) * ncols); + ii->ii_UniqueOps = (Oid *) palloc(sizeof(Oid) * indnkeyatts); + ii->ii_UniqueProcs = (Oid *) palloc(sizeof(Oid) * indnkeyatts); + ii->ii_UniqueStrats = (uint16 *) palloc(sizeof(uint16) * indnkeyatts); /* * We have to look up the operator's strategy number. This provides a * cross-check that the operator does match the index. */ /* We need the func OIDs and strategy numbers too */ - for (i = 0; i < ncols; i++) + for (i = 0; i < indnkeyatts; i++) { ii->ii_UniqueStrats[i] = BTEqualStrategyNumber; ii->ii_UniqueOps[i] = @@ -1787,7 +2037,7 @@ FormIndexDatum(IndexInfo *indexInfo, for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++) { - int keycol = indexInfo->ii_KeyAttrNumbers[i]; + int keycol = indexInfo->ii_IndexAttrNumbers[i]; Datum iDatum; bool isNull; @@ -1828,7 +2078,6 @@ FormIndexDatum(IndexInfo *indexInfo, * to ensure we can do all the necessary work in just one update. * * hasindex: set relhasindex to this value - * isprimary: if true, set relhaspkey true; else no change * reltuples: if >= 0, set reltuples to this value; else no change * * If reltuples >= 0, relpages and relallvisible are also updated (using @@ -1845,7 +2094,6 @@ FormIndexDatum(IndexInfo *indexInfo, static void index_update_stats(Relation rel, bool hasindex, - bool isprimary, double reltuples) { Oid relid = RelationGetRelid(rel); @@ -1875,11 +2123,11 @@ index_update_stats(Relation rel, * It is safe to use a non-transactional update even though our * transaction could still fail before committing. Setting relhasindex * true is safe even if there are no indexes (VACUUM will eventually fix - * it), likewise for relhaspkey. And of course the new relpages and - * reltuples counts are correct regardless. However, we don't want to - * change relpages (or relallvisible) if the caller isn't providing an - * updated reltuples count, because that would bollix the - * reltuples/relpages ratio which is what's really important. + * it). And of course the new relpages and reltuples counts are correct + * regardless. However, we don't want to change relpages (or + * relallvisible) if the caller isn't providing an updated reltuples + * count, because that would bollix the reltuples/relpages ratio which is + * what's really important. */ pg_class = heap_open(RelationRelationId, RowExclusiveLock); @@ -1916,6 +2164,9 @@ index_update_stats(Relation rel, elog(ERROR, "could not find tuple for relation %u", relid); rd_rel = (Form_pg_class) GETSTRUCT(tuple); + /* Should this be a more comprehensive test? */ + Assert(rd_rel->relkind != RELKIND_PARTITIONED_INDEX); + /* Apply required updates, if any, to copied tuple */ dirty = false; @@ -1924,14 +2175,6 @@ index_update_stats(Relation rel, rd_rel->relhasindex = hasindex; dirty = true; } - if (isprimary) - { - if (!rd_rel->relhaspkey) - { - rd_rel->relhaspkey = true; - dirty = true; - } - } if (reltuples >= 0) { @@ -1991,6 +2234,7 @@ index_update_stats(Relation rel, * * isprimary tells whether to mark the index as a primary-key index. * isreindex indicates we are recreating a previously-existing index. + * parallel indicates if parallelism may be useful. * * Note: when reindexing an existing index, isprimary can be false even if * the index is a PK; it's already properly marked and need not be re-marked. @@ -2004,7 +2248,8 @@ index_build(Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, bool isprimary, - bool isreindex) + bool isreindex, + bool parallel) { IndexBuildResult *stats; Oid save_userid; @@ -2019,10 +2264,31 @@ index_build(Relation heapRelation, Assert(PointerIsValid(indexRelation->rd_amroutine->ambuild)); Assert(PointerIsValid(indexRelation->rd_amroutine->ambuildempty)); - ereport(DEBUG1, - (errmsg("building index \"%s\" on table \"%s\"", - RelationGetRelationName(indexRelation), - RelationGetRelationName(heapRelation)))); + /* + * Determine worker process details for parallel CREATE INDEX. Currently, + * only btree has support for parallel builds. + * + * Note that planner considers parallel safety for us. + */ + if (parallel && IsNormalProcessingMode() && + indexRelation->rd_rel->relam == BTREE_AM_OID) + indexInfo->ii_ParallelWorkers = + plan_create_index_workers(RelationGetRelid(heapRelation), + RelationGetRelid(indexRelation)); + + if (indexInfo->ii_ParallelWorkers == 0) + ereport(DEBUG1, + (errmsg("building index \"%s\" on table \"%s\" serially", + RelationGetRelationName(indexRelation), + RelationGetRelationName(heapRelation)))); + else + ereport(DEBUG1, + (errmsg_plural("building index \"%s\" on table \"%s\" with request for %d parallel worker", + "building index \"%s\" on table \"%s\" with request for %d parallel workers", + indexInfo->ii_ParallelWorkers, + RelationGetRelationName(indexRelation), + RelationGetRelationName(heapRelation), + indexInfo->ii_ParallelWorkers))); /* * Switch to the table owner's userid, so that any index functions are run @@ -2117,11 +2383,9 @@ index_build(Relation heapRelation, */ index_update_stats(heapRelation, true, - isprimary, stats->heap_tuples); index_update_stats(indexRelation, - false, false, stats->index_tuples); @@ -2155,12 +2419,12 @@ index_build(Relation heapRelation, * things to add it to the new index. After we return, the AM's index * build procedure does whatever cleanup it needs. * - * The total count of heap tuples is returned. This is for updating pg_class - * statistics. (It's annoying not to be able to do that here, but we want - * to merge that update with others; see index_update_stats.) Note that the - * index AM itself must keep track of the number of index tuples; we don't do - * so here because the AM might reject some of the tuples for its own reasons, - * such as being unable to store NULLs. + * The total count of live heap tuples is returned. This is for updating + * pg_class statistics. (It's annoying not to be able to do that here, but we + * want to merge that update with others; see index_update_stats.) Note that + * the index AM itself must keep track of the number of index tuples; we don't + * do so here because the AM might reject some of the tuples for its own + * reasons, such as being unable to store NULLs. * * A side effect is to set indexInfo->ii_BrokenHotChain to true if we detect * any potentially broken HOT chains. Currently, we set this if there are @@ -2174,13 +2438,14 @@ IndexBuildHeapScan(Relation heapRelation, IndexInfo *indexInfo, bool allow_sync, IndexBuildCallback callback, - void *callback_state) + void *callback_state, + HeapScanDesc scan) { return IndexBuildHeapRangeScan(heapRelation, indexRelation, indexInfo, allow_sync, false, 0, InvalidBlockNumber, - callback, callback_state); + callback, callback_state, scan); } /* @@ -2190,8 +2455,8 @@ IndexBuildHeapScan(Relation heapRelation, * to scan cannot be done when requesting syncscan. * * When "anyvisible" mode is requested, all tuples visible to any transaction - * are considered, including those inserted or deleted by transactions that are - * still in progress. + * are indexed and counted as live, including those inserted or deleted by + * transactions that are still in progress. */ double IndexBuildHeapRangeScan(Relation heapRelation, @@ -2202,11 +2467,11 @@ IndexBuildHeapRangeScan(Relation heapRelation, BlockNumber start_blockno, BlockNumber numblocks, IndexBuildCallback callback, - void *callback_state) + void *callback_state, + HeapScanDesc scan) { bool is_system_catalog; bool checking_uniqueness; - HeapScanDesc scan; HeapTuple heapTuple; Datum values[INDEX_MAX_KEYS]; bool isnull[INDEX_MAX_KEYS]; @@ -2216,6 +2481,7 @@ IndexBuildHeapRangeScan(Relation heapRelation, EState *estate; ExprContext *econtext; Snapshot snapshot; + bool need_unregister_snapshot = false; TransactionId OldestXmin; BlockNumber root_blkno = InvalidBlockNumber; OffsetNumber root_offsets[MaxHeapTuplesPerPage]; @@ -2259,27 +2525,59 @@ IndexBuildHeapRangeScan(Relation heapRelation, * concurrent build, or during bootstrap, we take a regular MVCC snapshot * and index whatever's live according to that. */ - if (IsBootstrapProcessingMode() || indexInfo->ii_Concurrent) - { - snapshot = RegisterSnapshot(GetTransactionSnapshot()); - OldestXmin = InvalidTransactionId; /* not used */ + OldestXmin = InvalidTransactionId; + + /* okay to ignore lazy VACUUMs here */ + if (!IsBootstrapProcessingMode() && !indexInfo->ii_Concurrent) + OldestXmin = GetOldestXmin(heapRelation, PROCARRAY_FLAGS_VACUUM); - /* "any visible" mode is not compatible with this */ - Assert(!anyvisible); + if (!scan) + { + /* + * Serial index build. + * + * Must begin our own heap scan in this case. We may also need to + * register a snapshot whose lifetime is under our direct control. + */ + if (!TransactionIdIsValid(OldestXmin)) + { + snapshot = RegisterSnapshot(GetTransactionSnapshot()); + need_unregister_snapshot = true; + } + else + snapshot = SnapshotAny; + + scan = heap_beginscan_strat(heapRelation, /* relation */ + snapshot, /* snapshot */ + 0, /* number of keys */ + NULL, /* scan key */ + true, /* buffer access strategy OK */ + allow_sync); /* syncscan OK? */ } else { - snapshot = SnapshotAny; - /* okay to ignore lazy VACUUMs here */ - OldestXmin = GetOldestXmin(heapRelation, PROCARRAY_FLAGS_VACUUM); + /* + * Parallel index build. + * + * Parallel case never registers/unregisters own snapshot. Snapshot + * is taken from parallel heap scan, and is SnapshotAny or an MVCC + * snapshot, based on same criteria as serial case. + */ + Assert(!IsBootstrapProcessingMode()); + Assert(allow_sync); + snapshot = scan->rs_snapshot; } - scan = heap_beginscan_strat(heapRelation, /* relation */ - snapshot, /* snapshot */ - 0, /* number of keys */ - NULL, /* scan key */ - true, /* buffer access strategy OK */ - allow_sync); /* syncscan OK? */ + /* + * Must call GetOldestXmin() with SnapshotAny. Should never call + * GetOldestXmin() with MVCC snapshot. (It's especially worth checking + * this for parallel builds, since ambuild routines that support parallel + * builds must work these details out for themselves.) + */ + Assert(snapshot == SnapshotAny || IsMVCCSnapshot(snapshot)); + Assert(snapshot == SnapshotAny ? TransactionIdIsValid(OldestXmin) : + !TransactionIdIsValid(OldestXmin)); + Assert(snapshot == SnapshotAny || !anyvisible); /* set our scan endpoints */ if (!allow_sync) @@ -2354,6 +2652,12 @@ IndexBuildHeapRangeScan(Relation heapRelation, */ LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); + /* + * The criteria for counting a tuple as live in this block need to + * match what analyze.c's acquire_sample_rows() does, otherwise + * CREATE INDEX and ANALYZE may produce wildly different reltuples + * values, e.g. when there are many recently-dead tuples. + */ switch (HeapTupleSatisfiesVacuum(heapTuple, OldestXmin, scan->rs_cbuf)) { @@ -2366,6 +2670,8 @@ IndexBuildHeapRangeScan(Relation heapRelation, /* Normal case, index and unique-check it */ indexIt = true; tupleIsAlive = true; + /* Count it as live, too */ + reltuples += 1; break; case HEAPTUPLE_RECENTLY_DEAD: @@ -2379,6 +2685,9 @@ IndexBuildHeapRangeScan(Relation heapRelation, * the live tuple at the end of the HOT-chain. Since this * breaks semantics for pre-existing snapshots, mark the * index as unusable for them. + * + * We don't count recently-dead tuples in reltuples, even + * if we index them; see acquire_sample_rows(). */ if (HeapTupleIsHotUpdated(heapTuple)) { @@ -2401,6 +2710,7 @@ IndexBuildHeapRangeScan(Relation heapRelation, { indexIt = true; tupleIsAlive = true; + reltuples += 1; break; } @@ -2438,6 +2748,15 @@ IndexBuildHeapRangeScan(Relation heapRelation, goto recheck; } } + else + { + /* + * For consistency with acquire_sample_rows(), count + * HEAPTUPLE_INSERT_IN_PROGRESS tuples as live only + * when inserted by our own transaction. + */ + reltuples += 1; + } /* * We must index such tuples, since if the index build @@ -2457,6 +2776,7 @@ IndexBuildHeapRangeScan(Relation heapRelation, { indexIt = true; tupleIsAlive = false; + reltuples += 1; break; } @@ -2500,6 +2820,14 @@ IndexBuildHeapRangeScan(Relation heapRelation, * the same as a RECENTLY_DEAD tuple. */ indexIt = true; + + /* + * Count HEAPTUPLE_DELETE_IN_PROGRESS tuples as live, + * if they were not deleted by the current + * transaction. That's what acquire_sample_rows() + * does, and we want the behavior to be consistent. + */ + reltuples += 1; } else if (HeapTupleIsHotUpdated(heapTuple)) { @@ -2517,8 +2845,8 @@ IndexBuildHeapRangeScan(Relation heapRelation, { /* * It's a regular tuple deleted by our own xact. Index - * it but don't check for uniqueness, the same as a - * RECENTLY_DEAD tuple. + * it, but don't check for uniqueness nor count in + * reltuples, the same as a RECENTLY_DEAD tuple. */ indexIt = true; } @@ -2540,14 +2868,13 @@ IndexBuildHeapRangeScan(Relation heapRelation, { /* heap_getnext did the time qual check */ tupleIsAlive = true; + reltuples += 1; } - reltuples += 1; - MemoryContextReset(econtext->ecxt_per_tuple_memory); /* Set up for predicate or expression evaluation */ - ExecStoreTuple(heapTuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(heapTuple, slot, false); /* * In a partial index, discard tuples that don't satisfy the @@ -2589,9 +2916,12 @@ IndexBuildHeapRangeScan(Relation heapRelation, offnum = ItemPointerGetOffsetNumber(&heapTuple->t_self); if (!OffsetNumberIsValid(root_offsets[offnum - 1])) - elog(ERROR, "failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"", - ItemPointerGetBlockNumber(&heapTuple->t_self), - offnum, RelationGetRelationName(heapRelation)); + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"", + ItemPointerGetBlockNumber(&heapTuple->t_self), + offnum, + RelationGetRelationName(heapRelation)))); ItemPointerSetOffsetNumber(&rootTuple.t_self, root_offsets[offnum - 1]); @@ -2610,8 +2940,8 @@ IndexBuildHeapRangeScan(Relation heapRelation, heap_endscan(scan); - /* we can now forget our snapshot, if set */ - if (IsBootstrapProcessingMode() || indexInfo->ii_Concurrent) + /* we can now forget our snapshot, if set and registered by us */ + if (need_unregister_snapshot) UnregisterSnapshot(snapshot); ExecDropSingleTupleTableSlot(slot); @@ -2693,7 +3023,7 @@ IndexCheckExclusion(Relation heapRelation, MemoryContextReset(econtext->ecxt_per_tuple_memory); /* Set up for predicate or expression evaluation */ - ExecStoreTuple(heapTuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(heapTuple, slot, false); /* * In a partial index, ignore tuples that don't satisfy the predicate. @@ -2854,7 +3184,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot) state.tuplesort = tuplesort_begin_datum(INT8OID, Int8LessOperator, InvalidOid, false, maintenance_work_mem, - false); + NULL, false); state.htups = state.itups = state.tups_inserted = 0; (void) index_bulk_delete(&ivinfo, NULL, @@ -3054,10 +3384,12 @@ validate_index_heapscan(Relation heapRelation, { root_offnum = root_offsets[root_offnum - 1]; if (!OffsetNumberIsValid(root_offnum)) - elog(ERROR, "failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"", - ItemPointerGetBlockNumber(heapcursor), - ItemPointerGetOffsetNumber(heapcursor), - RelationGetRelationName(heapRelation)); + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"", + ItemPointerGetBlockNumber(heapcursor), + ItemPointerGetOffsetNumber(heapcursor), + RelationGetRelationName(heapRelation)))); ItemPointerSetOffsetNumber(&rootTuple, root_offnum); } @@ -3112,7 +3444,7 @@ validate_index_heapscan(Relation heapRelation, MemoryContextReset(econtext->ecxt_per_tuple_memory); /* Set up for predicate or expression evaluation */ - ExecStoreTuple(heapTuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(heapTuple, slot, false); /* * In a partial index, discard tuples that don't satisfy the @@ -3325,6 +3657,14 @@ reindex_index(Oid indexId, bool skip_constraint_checks, char persistence, */ iRel = index_open(indexId, AccessExclusiveLock); + /* + * The case of reindexing partitioned tables and indexes is handled + * differently by upper layers, so this case shouldn't arise. + */ + if (iRel->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) + elog(ERROR, "unsupported relation kind for index \"%s\"", + RelationGetRelationName(iRel)); + /* * Don't allow reindex on temp tables of other backends ... their local * buffer manager is not going to cope. @@ -3371,7 +3711,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks, char persistence, /* Initialize the index and rebuild */ /* Note: we do not need to re-establish pkey setting */ - index_build(heapRelation, iRel, indexInfo, false, true); + index_build(heapRelation, iRel, indexInfo, false, true, true); } PG_CATCH(); { @@ -3524,6 +3864,22 @@ reindex_relation(Oid relid, int flags, int options) */ rel = heap_open(relid, ShareLock); + /* + * This may be useful when implemented someday; but that day is not today. + * For now, avoid erroring out when called in a multi-table context + * (REINDEX SCHEMA) and happen to come across a partitioned table. The + * partitions may be reindexed on their own anyway. + */ + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + { + ereport(WARNING, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("REINDEX of partitioned tables is not yet implemented, skipping \"%s\"", + RelationGetRelationName(rel)))); + heap_close(rel, ShareLock); + return false; + } + toast_relid = rel->rd_rel->reltoastrelid; /* @@ -3559,7 +3915,7 @@ reindex_relation(Oid relid, int flags, int options) /* Ensure rd_indexattr is valid; see comments for RelationSetIndexList */ if (is_pg_class) - (void) RelationGetIndexAttrBitmap(rel, INDEX_ATTR_BITMAP_ALL); + (void) RelationGetIndexAttrBitmap(rel, INDEX_ATTR_BITMAP_HOT); PG_TRY(); { @@ -3647,7 +4003,8 @@ reindex_relation(Oid relid, int flags, int options) * When we are busy reindexing a system index, this code provides support * for preventing catalog lookups from using that index. We also make use * of this to catch attempted uses of user indexes during reindexing of - * those indexes. + * those indexes. This information is propagated to parallel workers; + * attempting to change it during a parallel operation is not permitted. * ---------------------------------------------------------------- */ @@ -3713,6 +4070,7 @@ SetReindexProcessing(Oid heapOid, Oid indexOid) static void ResetReindexProcessing(void) { + /* This may be called in leader error path */ currentlyReindexedHeap = InvalidOid; currentlyReindexedIndex = InvalidOid; } @@ -3730,6 +4088,8 @@ SetReindexPending(List *indexes) /* Reindexing is not re-entrant. */ if (pendingReindexedIndexes) elog(ERROR, "cannot reindex while reindexing"); + if (IsInParallelMode()) + elog(ERROR, "cannot modify reindex state during a parallel operation"); pendingReindexedIndexes = list_copy(indexes); } @@ -3740,6 +4100,8 @@ SetReindexPending(List *indexes) static void RemoveReindexPending(Oid indexOid) { + if (IsInParallelMode()) + elog(ERROR, "cannot modify reindex state during a parallel operation"); pendingReindexedIndexes = list_delete_oid(pendingReindexedIndexes, indexOid); } @@ -3751,5 +4113,58 @@ RemoveReindexPending(Oid indexOid) static void ResetReindexPending(void) { + /* This may be called in leader error path */ pendingReindexedIndexes = NIL; } + +/* + * EstimateReindexStateSpace + * Estimate space needed to pass reindex state to parallel workers. + */ +Size +EstimateReindexStateSpace(void) +{ + return offsetof(SerializedReindexState, pendingReindexedIndexes) + + mul_size(sizeof(Oid), list_length(pendingReindexedIndexes)); +} + +/* + * SerializeReindexState + * Serialize reindex state for parallel workers. + */ +void +SerializeReindexState(Size maxsize, char *start_address) +{ + SerializedReindexState *sistate = (SerializedReindexState *) start_address; + int c = 0; + ListCell *lc; + + sistate->currentlyReindexedHeap = currentlyReindexedHeap; + sistate->currentlyReindexedIndex = currentlyReindexedIndex; + sistate->numPendingReindexedIndexes = list_length(pendingReindexedIndexes); + foreach(lc, pendingReindexedIndexes) + sistate->pendingReindexedIndexes[c++] = lfirst_oid(lc); +} + +/* + * RestoreReindexState + * Restore reindex state in a parallel worker. + */ +void +RestoreReindexState(void *reindexstate) +{ + SerializedReindexState *sistate = (SerializedReindexState *) reindexstate; + int c = 0; + MemoryContext oldcontext; + + currentlyReindexedHeap = sistate->currentlyReindexedHeap; + currentlyReindexedIndex = sistate->currentlyReindexedIndex; + + Assert(pendingReindexedIndexes == NIL); + oldcontext = MemoryContextSwitchTo(TopMemoryContext); + for (c = 0; c < sistate->numPendingReindexedIndexes; ++c) + pendingReindexedIndexes = + lappend_oid(pendingReindexedIndexes, + sistate->pendingReindexedIndexes[c]); + MemoryContextSwitchTo(oldcontext); +} diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c index e5b6bafaff..daf7ae2eb2 100644 --- a/src/backend/catalog/indexing.c +++ b/src/backend/catalog/indexing.c @@ -4,7 +4,7 @@ * This file contains routines to support indexes defined on system * catalogs. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -42,7 +42,7 @@ CatalogOpenIndexes(Relation heapRel) ResultRelInfo *resultRelInfo; resultRelInfo = makeNode(ResultRelInfo); - resultRelInfo->ri_RangeTableIndex = 1; /* dummy */ + resultRelInfo->ri_RangeTableIndex = 0; /* dummy */ resultRelInfo->ri_RelationDesc = heapRel; resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */ @@ -96,7 +96,7 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple) /* Need a slot to hold the tuple being examined */ slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation)); - ExecStoreTuple(heapTuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(heapTuple, slot, false); /* * for each index, form and insert the index tuple @@ -119,6 +119,7 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple) Assert(indexInfo->ii_Predicate == NIL); Assert(indexInfo->ii_ExclusionOps == NULL); Assert(relationDescs[i]->rd_index->indimmediate); + Assert(indexInfo->ii_NumIndexKeyAttrs != 0); /* * FormIndexDatum fills in its values and isnull parameters with the diff --git a/src/backend/catalog/information_schema.sql b/src/backend/catalog/information_schema.sql index 236f6be37e..f4e69f4a26 100644 --- a/src/backend/catalog/information_schema.sql +++ b/src/backend/catalog/information_schema.sql @@ -2,7 +2,7 @@ * SQL Information Schema * as defined in ISO/IEC 9075-11:2011 * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * src/backend/catalog/information_schema.sql * @@ -1413,7 +1413,8 @@ CREATE VIEW routines AS CAST(current_database() AS sql_identifier) AS routine_catalog, CAST(n.nspname AS sql_identifier) AS routine_schema, CAST(p.proname AS sql_identifier) AS routine_name, - CAST('FUNCTION' AS character_data) AS routine_type, + CAST(CASE p.prokind WHEN 'f' THEN 'FUNCTION' WHEN 'p' THEN 'PROCEDURE' END + AS character_data) AS routine_type, CAST(null AS sql_identifier) AS module_catalog, CAST(null AS sql_identifier) AS module_schema, CAST(null AS sql_identifier) AS module_name, @@ -1422,7 +1423,8 @@ CREATE VIEW routines AS CAST(null AS sql_identifier) AS udt_name, CAST( - CASE WHEN t.typelem <> 0 AND t.typlen = -1 THEN 'ARRAY' + CASE WHEN p.prokind = 'p' THEN NULL + WHEN t.typelem <> 0 AND t.typlen = -1 THEN 'ARRAY' WHEN nt.nspname = 'pg_catalog' THEN format_type(t.oid, null) ELSE 'USER-DEFINED' END AS character_data) AS data_type, @@ -1440,14 +1442,14 @@ CREATE VIEW routines AS CAST(null AS cardinal_number) AS datetime_precision, CAST(null AS character_data) AS interval_type, CAST(null AS cardinal_number) AS interval_precision, - CAST(current_database() AS sql_identifier) AS type_udt_catalog, + CAST(CASE WHEN nt.nspname IS NOT NULL THEN current_database() END AS sql_identifier) AS type_udt_catalog, CAST(nt.nspname AS sql_identifier) AS type_udt_schema, CAST(t.typname AS sql_identifier) AS type_udt_name, CAST(null AS sql_identifier) AS scope_catalog, CAST(null AS sql_identifier) AS scope_schema, CAST(null AS sql_identifier) AS scope_name, CAST(null AS cardinal_number) AS maximum_cardinality, - CAST(0 AS sql_identifier) AS dtd_identifier, + CAST(CASE WHEN p.prokind <> 'p' THEN 0 END AS sql_identifier) AS dtd_identifier, CAST(CASE WHEN l.lanname = 'sql' THEN 'SQL' ELSE 'EXTERNAL' END AS character_data) AS routine_body, @@ -1462,7 +1464,8 @@ CREATE VIEW routines AS CAST('GENERAL' AS character_data) AS parameter_style, CAST(CASE WHEN p.provolatile = 'i' THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_deterministic, CAST('MODIFIES' AS character_data) AS sql_data_access, - CAST(CASE WHEN p.proisstrict THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_null_call, + CAST(CASE WHEN p.prokind <> 'p' THEN + CASE WHEN p.proisstrict THEN 'YES' ELSE 'NO' END END AS yes_or_no) AS is_null_call, CAST(null AS character_data) AS sql_path, CAST('YES' AS yes_or_no) AS schema_level_routine, CAST(0 AS cardinal_number) AS max_dynamic_result_sets, @@ -1503,13 +1506,15 @@ CREATE VIEW routines AS CAST(null AS cardinal_number) AS result_cast_maximum_cardinality, CAST(null AS sql_identifier) AS result_cast_dtd_identifier - FROM pg_namespace n, pg_proc p, pg_language l, - pg_type t, pg_namespace nt + FROM (pg_namespace n + JOIN pg_proc p ON n.oid = p.pronamespace + JOIN pg_language l ON p.prolang = l.oid) + LEFT JOIN + (pg_type t JOIN pg_namespace nt ON t.typnamespace = nt.oid) + ON p.prorettype = t.oid AND p.prokind <> 'p' - WHERE n.oid = p.pronamespace AND p.prolang = l.oid - AND p.prorettype = t.oid AND t.typnamespace = nt.oid - AND (pg_has_role(p.proowner, 'USAGE') - OR has_function_privilege(p.oid, 'EXECUTE')); + WHERE (pg_has_role(p.proowner, 'USAGE') + OR has_function_privilege(p.oid, 'EXECUTE')); GRANT SELECT ON routines TO PUBLIC; @@ -1778,7 +1783,8 @@ CREATE VIEW table_constraints AS CAST(CASE WHEN c.condeferrable THEN 'YES' ELSE 'NO' END AS yes_or_no) AS is_deferrable, CAST(CASE WHEN c.condeferred THEN 'YES' ELSE 'NO' END AS yes_or_no) - AS initially_deferred + AS initially_deferred, + CAST('YES' AS yes_or_no) AS enforced FROM pg_namespace nc, pg_namespace nr, @@ -1807,7 +1813,8 @@ CREATE VIEW table_constraints AS CAST(r.relname AS sql_identifier) AS table_name, CAST('CHECK' AS character_data) AS constraint_type, CAST('NO' AS yes_or_no) AS is_deferrable, - CAST('NO' AS yes_or_no) AS initially_deferred + CAST('NO' AS yes_or_no) AS initially_deferred, + CAST('YES' AS yes_or_no) AS enforced FROM pg_namespace nr, pg_class r, @@ -2079,8 +2086,12 @@ CREATE VIEW triggers AS CAST(current_database() AS sql_identifier) AS event_object_catalog, CAST(n.nspname AS sql_identifier) AS event_object_schema, CAST(c.relname AS sql_identifier) AS event_object_table, - CAST(null AS cardinal_number) AS action_order, - -- XXX strange hacks follow + CAST( + -- To determine action order, partition by schema, table, + -- event_manipulation (INSERT/DELETE/UPDATE), ROW/STATEMENT (1), + -- BEFORE/AFTER (66), then order by trigger name + rank() OVER (PARTITION BY n.oid, c.oid, em.num, t.tgtype & 1, t.tgtype & 66 ORDER BY t.tgname) + AS cardinal_number) AS action_order, CAST( CASE WHEN pg_has_role(c.relowner, 'USAGE') THEN (regexp_match(pg_get_triggerdef(t.oid), E'.{35,} WHEN \\((.+)\\) EXECUTE PROCEDURE'))[1] @@ -2098,8 +2109,8 @@ CREATE VIEW triggers AS -- hard-wired refs to TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSTEAD CASE t.tgtype & 66 WHEN 2 THEN 'BEFORE' WHEN 64 THEN 'INSTEAD OF' ELSE 'AFTER' END AS character_data) AS action_timing, - CAST(null AS sql_identifier) AS action_reference_old_table, - CAST(null AS sql_identifier) AS action_reference_new_table, + CAST(tgoldtable AS sql_identifier) AS action_reference_old_table, + CAST(tgnewtable AS sql_identifier) AS action_reference_new_table, CAST(null AS sql_identifier) AS action_reference_old_row, CAST(null AS sql_identifier) AS action_reference_new_row, CAST(null AS time_stamp) AS created diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index 5d71302ded..5d13e6a3d7 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -9,7 +9,7 @@ * and implementing search-path-controlled searches. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -28,7 +28,6 @@ #include "catalog/pg_authid.h" #include "catalog/pg_collation.h" #include "catalog/pg_conversion.h" -#include "catalog/pg_conversion_fn.h" #include "catalog/pg_namespace.h" #include "catalog/pg_opclass.h" #include "catalog/pg_operator.h" @@ -48,7 +47,7 @@ #include "parser/parse_func.h" #include "storage/ipc.h" #include "storage/lmgr.h" -#include "storage/sinval.h" +#include "storage/sinvaladt.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/catcache.h" @@ -95,7 +94,7 @@ * set up until the first attempt to create something in it. (The reason for * klugery is that we can't create the temp namespace outside a transaction, * but initial GUC processing of search_path happens outside a transaction.) - * activeTempCreationPending is TRUE if "pg_temp" appears first in the string + * activeTempCreationPending is true if "pg_temp" appears first in the string * but is not reflected in activeCreationNamespace because the namespace isn't * set up yet. * @@ -136,7 +135,7 @@ static List *activeSearchPath = NIL; /* default place to create stuff; if InvalidOid, no default */ static Oid activeCreationNamespace = InvalidOid; -/* if TRUE, activeCreationNamespace is wrong, it should be temp namespace */ +/* if true, activeCreationNamespace is wrong, it should be temp namespace */ static bool activeTempCreationPending = false; /* These variables are the values last derived from namespace_search_path: */ @@ -202,27 +201,41 @@ static bool MatchNamedCall(HeapTuple proctup, int nargs, List *argnames, /* - * RangeVarGetRelid + * RangeVarGetRelidExtended * Given a RangeVar describing an existing relation, * select the proper namespace and look up the relation OID. * - * If the schema or relation is not found, return InvalidOid if missing_ok - * = true, otherwise raise an error. + * If the schema or relation is not found, return InvalidOid if flags contains + * RVR_MISSING_OK, otherwise raise an error. * - * If nowait = true, throw an error if we'd have to wait for a lock. + * If flags contains RVR_NOWAIT, throw an error if we'd have to wait for a + * lock. + * + * If flags contains RVR_SKIP_LOCKED, return InvalidOid if we'd have to wait + * for a lock. + * + * flags cannot contain both RVR_NOWAIT and RVR_SKIP_LOCKED. + * + * Note that if RVR_MISSING_OK and RVR_SKIP_LOCKED are both specified, a + * return value of InvalidOid could either mean the relation is missing or it + * could not be locked. * * Callback allows caller to check permissions or acquire additional locks * prior to grabbing the relation lock. */ Oid RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, - bool missing_ok, bool nowait, + uint32 flags, RangeVarGetRelidCallback callback, void *callback_arg) { uint64 inval_count; Oid relId; Oid oldRelId = InvalidOid; bool retry = false; + bool missing_ok = (flags & RVR_MISSING_OK) != 0; + + /* verify that flags do no conflict */ + Assert(!((flags & RVR_NOWAIT) && (flags & RVR_SKIP_LOCKED))); /* * We check the catalog name and then ignore it. @@ -361,20 +374,24 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, */ if (!OidIsValid(relId)) AcceptInvalidationMessages(); - else if (!nowait) + else if (!(flags & (RVR_NOWAIT | RVR_SKIP_LOCKED))) LockRelationOid(relId, lockmode); else if (!ConditionalLockRelationOid(relId, lockmode)) { + int elevel = (flags & RVR_SKIP_LOCKED) ? DEBUG1 : ERROR; + if (relation->schemaname) - ereport(ERROR, + ereport(elevel, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), errmsg("could not obtain lock on relation \"%s.%s\"", relation->schemaname, relation->relname))); else - ereport(ERROR, + ereport(elevel, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), errmsg("could not obtain lock on relation \"%s\"", relation->relname))); + + return InvalidOid; } /* @@ -392,15 +409,17 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, oldRelId = relId; } - if (!OidIsValid(relId) && !missing_ok) + if (!OidIsValid(relId)) { + int elevel = missing_ok ? DEBUG1 : ERROR; + if (relation->schemaname) - ereport(ERROR, + ereport(elevel, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("relation \"%s.%s\" does not exist", relation->schemaname, relation->relname))); else - ereport(ERROR, + ereport(elevel, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("relation \"%s\" does not exist", relation->relname))); @@ -560,7 +579,7 @@ RangeVarGetAndCheckCreationNamespace(RangeVar *relation, /* Check namespace permissions. */ aclresult = pg_namespace_aclcheck(nspid, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(nspid)); if (retry) @@ -585,7 +604,7 @@ RangeVarGetAndCheckCreationNamespace(RangeVar *relation, if (lockmode != NoLock && OidIsValid(relid)) { if (!pg_class_ownercheck(relid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relid)), relation->relname); if (relid != oldrelid) LockRelationOid(relid, lockmode); @@ -2874,7 +2893,7 @@ LookupExplicitNamespace(const char *nspname, bool missing_ok) aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_USAGE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, nspname); /* Schema search hook for this lookup */ InvokeNamespaceSearchHook(namespaceId, true); @@ -2911,7 +2930,7 @@ LookupCreationNamespace(const char *nspname) aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, nspname); return namespaceId; @@ -3185,6 +3204,46 @@ isOtherTempNamespace(Oid namespaceId) return isAnyTempNamespace(namespaceId); } +/* + * isTempNamespaceInUse - is the given namespace owned and actively used + * by a backend? + * + * Note: this can be used while scanning relations in pg_class to detect + * orphaned temporary tables or namespaces with a backend connected to a + * given database. The result may be out of date quickly, so the caller + * must be careful how to handle this information. + */ +bool +isTempNamespaceInUse(Oid namespaceId) +{ + PGPROC *proc; + int backendId; + + Assert(OidIsValid(MyDatabaseId)); + + backendId = GetTempNamespaceBackendId(namespaceId); + + if (backendId == InvalidBackendId || + backendId == MyBackendId) + return false; + + /* Is the backend alive? */ + proc = BackendIdGetProc(backendId); + if (proc == NULL) + return false; + + /* Is the backend connected to the same database we are looking at? */ + if (proc->databaseId != MyDatabaseId) + return false; + + /* Does the backend own the temporary namespace? */ + if (proc->tempNamespaceId != namespaceId) + return false; + + /* all good to go */ + return true; +} + /* * GetTempNamespaceBackendId - if the given namespace is a temporary-table * namespace (either my own, or another backend's), return the BackendId @@ -3874,6 +3933,18 @@ InitTempTableNamespace(void) myTempNamespace = namespaceId; myTempToastNamespace = toastspaceId; + /* + * Mark MyProc as owning this namespace which other processes can use to + * decide if a temporary namespace is in use or not. We assume that + * assignment of namespaceId is an atomic operation. Even if it is not, + * the temporary relation which resulted in the creation of this temporary + * namespace is still locked until the current transaction commits, and + * its pg_namespace row is not visible yet. However it does not matter: + * this flag makes the namespace as being in use, so no objects created on + * it would be removed concurrently. + */ + MyProc->tempNamespaceId = namespaceId; + /* It should not be done already. */ AssertState(myTempNamespaceSubID == InvalidSubTransactionId); myTempNamespaceSubID = GetCurrentSubTransactionId(); @@ -3904,6 +3975,17 @@ AtEOXact_Namespace(bool isCommit, bool parallel) myTempNamespace = InvalidOid; myTempToastNamespace = InvalidOid; baseSearchPathValid = false; /* need to rebuild list */ + + /* + * Reset the temporary namespace flag in MyProc. We assume that + * this operation is atomic. + * + * Because this transaction is aborting, the pg_namespace row is + * not visible to anyone else anyway, but that doesn't matter: + * it's not a problem if objects contained in this namespace are + * removed concurrently. + */ + MyProc->tempNamespaceId = InvalidOid; } myTempNamespaceSubID = InvalidSubTransactionId; } @@ -3956,6 +4038,17 @@ AtEOSubXact_Namespace(bool isCommit, SubTransactionId mySubid, myTempNamespace = InvalidOid; myTempToastNamespace = InvalidOid; baseSearchPathValid = false; /* need to rebuild list */ + + /* + * Reset the temporary namespace flag in MyProc. We assume that + * this operation is atomic. + * + * Because this subtransaction is aborting, the pg_namespace row + * is not visible to anyone else anyway, but that doesn't matter: + * it's not a problem if objects contained in this namespace are + * removed concurrently. + */ + MyProc->tempNamespaceId = InvalidOid; } } diff --git a/src/backend/catalog/objectaccess.c b/src/backend/catalog/objectaccess.c index 9d5eb7b9da..65884699c4 100644 --- a/src/backend/catalog/objectaccess.c +++ b/src/backend/catalog/objectaccess.c @@ -3,7 +3,7 @@ * objectaccess.c * functions for object_access_hook on various events * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------- diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 6cac2dfd1d..593e6f7022 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -3,7 +3,7 @@ * objectaddress.c * functions for working with ObjectAddresses * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -30,7 +30,6 @@ #include "catalog/pg_event_trigger.h" #include "catalog/pg_collation.h" #include "catalog/pg_constraint.h" -#include "catalog/pg_constraint_fn.h" #include "catalog/pg_conversion.h" #include "catalog/pg_database.h" #include "catalog/pg_extension.h" @@ -69,13 +68,13 @@ #include "commands/trigger.h" #include "foreign/foreign.h" #include "funcapi.h" -#include "libpq/be-fsstubs.h" #include "miscadmin.h" #include "nodes/makefuncs.h" #include "parser/parse_func.h" #include "parser/parse_oper.h" #include "parser/parse_type.h" #include "rewrite/rewriteSupport.h" +#include "storage/large_object.h" #include "storage/lmgr.h" #include "storage/sinval.h" #include "utils/builtins.h" @@ -104,7 +103,7 @@ typedef struct AttrNumber attnum_namespace; /* attnum of namespace field */ AttrNumber attnum_owner; /* attnum of owner field */ AttrNumber attnum_acl; /* attnum of acl field */ - AclObjectKind acl_kind; /* ACL_KIND_* of this object type */ + ObjectType objtype; /* OBJECT_* of this object type */ bool is_nsp_name_unique; /* can the nsp/name combination (or name * alone, if there's no namespace) be * considered a unique identifier for an @@ -146,7 +145,7 @@ static const ObjectPropertyType ObjectProperty[] = Anum_pg_collation_collnamespace, Anum_pg_collation_collowner, InvalidAttrNumber, - ACL_KIND_COLLATION, + OBJECT_COLLATION, true }, { @@ -170,7 +169,7 @@ static const ObjectPropertyType ObjectProperty[] = Anum_pg_conversion_connamespace, Anum_pg_conversion_conowner, InvalidAttrNumber, - ACL_KIND_CONVERSION, + OBJECT_CONVERSION, true }, { @@ -182,7 +181,7 @@ static const ObjectPropertyType ObjectProperty[] = InvalidAttrNumber, Anum_pg_database_datdba, Anum_pg_database_datacl, - ACL_KIND_DATABASE, + OBJECT_DATABASE, true }, { @@ -194,7 +193,7 @@ static const ObjectPropertyType ObjectProperty[] = InvalidAttrNumber, /* extension doesn't belong to extnamespace */ Anum_pg_extension_extowner, InvalidAttrNumber, - ACL_KIND_EXTENSION, + OBJECT_EXTENSION, true }, { @@ -206,7 +205,7 @@ static const ObjectPropertyType ObjectProperty[] = InvalidAttrNumber, Anum_pg_foreign_data_wrapper_fdwowner, Anum_pg_foreign_data_wrapper_fdwacl, - ACL_KIND_FDW, + OBJECT_FDW, true }, { @@ -218,7 +217,7 @@ static const ObjectPropertyType ObjectProperty[] = InvalidAttrNumber, Anum_pg_foreign_server_srvowner, Anum_pg_foreign_server_srvacl, - ACL_KIND_FOREIGN_SERVER, + OBJECT_FOREIGN_SERVER, true }, { @@ -230,7 +229,7 @@ static const ObjectPropertyType ObjectProperty[] = Anum_pg_proc_pronamespace, Anum_pg_proc_proowner, Anum_pg_proc_proacl, - ACL_KIND_PROC, + OBJECT_FUNCTION, false }, { @@ -242,7 +241,7 @@ static const ObjectPropertyType ObjectProperty[] = InvalidAttrNumber, Anum_pg_language_lanowner, Anum_pg_language_lanacl, - ACL_KIND_LANGUAGE, + OBJECT_LANGUAGE, true }, { @@ -254,7 +253,7 @@ static const ObjectPropertyType ObjectProperty[] = InvalidAttrNumber, Anum_pg_largeobject_metadata_lomowner, Anum_pg_largeobject_metadata_lomacl, - ACL_KIND_LARGEOBJECT, + OBJECT_LARGEOBJECT, false }, { @@ -266,7 +265,7 @@ static const ObjectPropertyType ObjectProperty[] = Anum_pg_opclass_opcnamespace, Anum_pg_opclass_opcowner, InvalidAttrNumber, - ACL_KIND_OPCLASS, + OBJECT_OPCLASS, true }, { @@ -278,7 +277,7 @@ static const ObjectPropertyType ObjectProperty[] = Anum_pg_operator_oprnamespace, Anum_pg_operator_oprowner, InvalidAttrNumber, - ACL_KIND_OPER, + OBJECT_OPERATOR, false }, { @@ -290,7 +289,7 @@ static const ObjectPropertyType ObjectProperty[] = Anum_pg_opfamily_opfnamespace, Anum_pg_opfamily_opfowner, InvalidAttrNumber, - ACL_KIND_OPFAMILY, + OBJECT_OPFAMILY, true }, { @@ -326,7 +325,7 @@ static const ObjectPropertyType ObjectProperty[] = InvalidAttrNumber, Anum_pg_namespace_nspowner, Anum_pg_namespace_nspacl, - ACL_KIND_NAMESPACE, + OBJECT_SCHEMA, true }, { @@ -338,7 +337,7 @@ static const ObjectPropertyType ObjectProperty[] = Anum_pg_class_relnamespace, Anum_pg_class_relowner, Anum_pg_class_relacl, - ACL_KIND_CLASS, + OBJECT_TABLE, true }, { @@ -350,7 +349,7 @@ static const ObjectPropertyType ObjectProperty[] = InvalidAttrNumber, Anum_pg_tablespace_spcowner, Anum_pg_tablespace_spcacl, - ACL_KIND_TABLESPACE, + OBJECT_TABLESPACE, true }, { @@ -392,7 +391,7 @@ static const ObjectPropertyType ObjectProperty[] = InvalidAttrNumber, Anum_pg_event_trigger_evtowner, InvalidAttrNumber, - ACL_KIND_EVENT_TRIGGER, + OBJECT_EVENT_TRIGGER, true }, { @@ -404,7 +403,7 @@ static const ObjectPropertyType ObjectProperty[] = Anum_pg_ts_config_cfgnamespace, Anum_pg_ts_config_cfgowner, InvalidAttrNumber, - ACL_KIND_TSCONFIGURATION, + OBJECT_TSCONFIGURATION, true }, { @@ -416,7 +415,7 @@ static const ObjectPropertyType ObjectProperty[] = Anum_pg_ts_dict_dictnamespace, Anum_pg_ts_dict_dictowner, InvalidAttrNumber, - ACL_KIND_TSDICTIONARY, + OBJECT_TSDICTIONARY, true }, { @@ -452,7 +451,7 @@ static const ObjectPropertyType ObjectProperty[] = Anum_pg_type_typnamespace, Anum_pg_type_typowner, Anum_pg_type_typacl, - ACL_KIND_TYPE, + OBJECT_TYPE, true }, { @@ -464,7 +463,7 @@ static const ObjectPropertyType ObjectProperty[] = InvalidAttrNumber, Anum_pg_publication_pubowner, InvalidAttrNumber, - ACL_KIND_PUBLICATION, + OBJECT_PUBLICATION, true }, { @@ -476,7 +475,7 @@ static const ObjectPropertyType ObjectProperty[] = InvalidAttrNumber, Anum_pg_subscription_subowner, InvalidAttrNumber, - ACL_KIND_SUBSCRIPTION, + OBJECT_SUBSCRIPTION, true }, { @@ -488,7 +487,7 @@ static const ObjectPropertyType ObjectProperty[] = Anum_pg_statistic_ext_stxnamespace, Anum_pg_statistic_ext_stxowner, InvalidAttrNumber, /* no ACL (same as relation) */ - ACL_KIND_STATISTICS, + OBJECT_STATISTIC_EXT, true } }; @@ -566,6 +565,9 @@ static const struct object_type_map { "function", OBJECT_FUNCTION }, + { + "procedure", OBJECT_PROCEDURE + }, /* OCLASS_TYPE */ { "type", OBJECT_TYPE @@ -884,13 +886,11 @@ get_object_address(ObjectType objtype, Node *object, address = get_object_address_type(objtype, castNode(TypeName, object), missing_ok); break; case OBJECT_AGGREGATE: - address.classId = ProcedureRelationId; - address.objectId = LookupAggWithArgs(castNode(ObjectWithArgs, object), missing_ok); - address.objectSubId = 0; - break; case OBJECT_FUNCTION: + case OBJECT_PROCEDURE: + case OBJECT_ROUTINE: address.classId = ProcedureRelationId; - address.objectId = LookupFuncWithArgs(castNode(ObjectWithArgs, object), missing_ok); + address.objectId = LookupFuncWithArgs(objtype, castNode(ObjectWithArgs, object), missing_ok); address.objectSubId = 0; break; case OBJECT_OPERATOR: @@ -1216,7 +1216,8 @@ get_relation_by_qualified_name(ObjectType objtype, List *object, switch (objtype) { case OBJECT_INDEX: - if (relation->rd_rel->relkind != RELKIND_INDEX) + if (relation->rd_rel->relkind != RELKIND_INDEX && + relation->rd_rel->relkind != RELKIND_PARTITIONED_INDEX) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not an index", @@ -1591,6 +1592,8 @@ get_object_address_opf_member(ObjectType objtype, famaddr = get_object_address_opcf(OBJECT_OPFAMILY, copy, false); /* find out left/right type names and OIDs */ + typenames[0] = typenames[1] = NULL; + typeoids[0] = typeoids[1] = InvalidOid; i = 0; foreach(cell, lsecond(object)) { @@ -2025,6 +2028,8 @@ pg_get_object_address(PG_FUNCTION_ARGS) */ if (type == OBJECT_AGGREGATE || type == OBJECT_FUNCTION || + type == OBJECT_PROCEDURE || + type == OBJECT_ROUTINE || type == OBJECT_OPERATOR || type == OBJECT_CAST || type == OBJECT_AMOP || @@ -2057,8 +2062,8 @@ pg_get_object_address(PG_FUNCTION_ARGS) } /* - * get_object_address is pretty sensitive to the length its input lists; - * check that they're what it wants. + * get_object_address is pretty sensitive to the length of its input + * lists; check that they're what it wants. */ switch (type) { @@ -2087,6 +2092,7 @@ pg_get_object_address(PG_FUNCTION_ARGS) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("name list length must be at least %d", 3))); /* fall through to check args length */ + /* FALLTHROUGH */ case OBJECT_OPERATOR: if (list_length(args) != 2) ereport(ERROR, @@ -2168,6 +2174,8 @@ pg_get_object_address(PG_FUNCTION_ARGS) objnode = (Node *) list_make2(name, args); break; case OBJECT_FUNCTION: + case OBJECT_PROCEDURE: + case OBJECT_ROUTINE: case OBJECT_AGGREGATE: case OBJECT_OPERATOR: { @@ -2236,12 +2244,12 @@ check_object_ownership(Oid roleid, ObjectType objtype, ObjectAddress address, case OBJECT_POLICY: case OBJECT_TABCONSTRAINT: if (!pg_class_ownercheck(RelationGetRelid(relation), roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, RelationGetRelationName(relation)); break; case OBJECT_DATABASE: if (!pg_database_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, strVal((Value *) object)); break; case OBJECT_TYPE: @@ -2253,63 +2261,65 @@ check_object_ownership(Oid roleid, ObjectType objtype, ObjectAddress address, break; case OBJECT_AGGREGATE: case OBJECT_FUNCTION: + case OBJECT_PROCEDURE: + case OBJECT_ROUTINE: if (!pg_proc_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, NameListToString((castNode(ObjectWithArgs, object))->objname)); break; case OBJECT_OPERATOR: if (!pg_oper_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPER, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, NameListToString((castNode(ObjectWithArgs, object))->objname)); break; case OBJECT_SCHEMA: if (!pg_namespace_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_NAMESPACE, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, strVal((Value *) object)); break; case OBJECT_COLLATION: if (!pg_collation_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_COLLATION, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, NameListToString(castNode(List, object))); break; case OBJECT_CONVERSION: if (!pg_conversion_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, NameListToString(castNode(List, object))); break; case OBJECT_EXTENSION: if (!pg_extension_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_EXTENSION, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, strVal((Value *) object)); break; case OBJECT_FDW: if (!pg_foreign_data_wrapper_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_FDW, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, strVal((Value *) object)); break; case OBJECT_FOREIGN_SERVER: if (!pg_foreign_server_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_FOREIGN_SERVER, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, strVal((Value *) object)); break; case OBJECT_EVENT_TRIGGER: if (!pg_event_trigger_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_EVENT_TRIGGER, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, strVal((Value *) object)); break; case OBJECT_LANGUAGE: if (!pg_language_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_LANGUAGE, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, strVal((Value *) object)); break; case OBJECT_OPCLASS: if (!pg_opclass_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPCLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, NameListToString(castNode(List, object))); break; case OBJECT_OPFAMILY: if (!pg_opfamily_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPFAMILY, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, NameListToString(castNode(List, object))); break; case OBJECT_LARGEOBJECT: @@ -2339,12 +2349,12 @@ check_object_ownership(Oid roleid, ObjectType objtype, ObjectAddress address, break; case OBJECT_PUBLICATION: if (!pg_publication_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PUBLICATION, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, strVal((Value *) object)); break; case OBJECT_SUBSCRIPTION: if (!pg_subscription_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_SUBSCRIPTION, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, strVal((Value *) object)); break; case OBJECT_TRANSFORM: @@ -2358,17 +2368,17 @@ check_object_ownership(Oid roleid, ObjectType objtype, ObjectAddress address, break; case OBJECT_TABLESPACE: if (!pg_tablespace_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TABLESPACE, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, strVal((Value *) object)); break; case OBJECT_TSDICTIONARY: if (!pg_ts_dict_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TSDICTIONARY, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, NameListToString(castNode(List, object))); break; case OBJECT_TSCONFIGURATION: if (!pg_ts_config_ownercheck(address.objectId, roleid)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TSCONFIGURATION, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, NameListToString(castNode(List, object))); break; case OBJECT_ROLE: @@ -2532,12 +2542,22 @@ get_object_attnum_acl(Oid class_id) return prop->attnum_acl; } -AclObjectKind -get_object_aclkind(Oid class_id) +ObjectType +get_object_type(Oid class_id, Oid object_id) { const ObjectPropertyType *prop = get_object_property_data(class_id); - return prop->acl_kind; + if (prop->objtype == OBJECT_TABLE) + { + /* + * If the property data says it's a table, dig a little deeper to get + * the real relation kind, so that callers can produce more precise + * error messages. + */ + return get_relkind_objtype(get_rel_relkind(object_id)); + } + else + return prop->objtype; } bool @@ -2661,11 +2681,23 @@ getObjectDescription(const ObjectAddress *object) switch (getObjectClass(object)) { case OCLASS_CLASS: - getRelationDescription(&buffer, object->objectId); - if (object->objectSubId != 0) - appendStringInfo(&buffer, _(" column %s"), - get_relid_attribute_name(object->objectId, - object->objectSubId)); + if (object->objectSubId == 0) + getRelationDescription(&buffer, object->objectId); + else + { + /* column, not whole relation */ + StringInfoData rel; + + initStringInfo(&rel); + getRelationDescription(&rel, object->objectId); + /* translator: second %s is, e.g., "table %s" */ + appendStringInfo(&buffer, _("column %s of %s"), + get_attname(object->objectId, + object->objectSubId, + false), + rel.data); + pfree(rel.data); + } break; case OCLASS_PROC: @@ -2717,6 +2749,7 @@ getObjectDescription(const ObjectAddress *object) { HeapTuple collTup; Form_pg_collation coll; + char *nspname; collTup = SearchSysCache1(COLLOID, ObjectIdGetDatum(object->objectId)); @@ -2724,8 +2757,16 @@ getObjectDescription(const ObjectAddress *object) elog(ERROR, "cache lookup failed for collation %u", object->objectId); coll = (Form_pg_collation) GETSTRUCT(collTup); + + /* Qualify the name if not visible in search path */ + if (CollationIsVisible(object->objectId)) + nspname = NULL; + else + nspname = get_namespace_name(coll->collnamespace); + appendStringInfo(&buffer, _("collation %s"), - NameStr(coll->collname)); + quote_qualified_identifier(nspname, + NameStr(coll->collname))); ReleaseSysCache(collTup); break; } @@ -2748,6 +2789,7 @@ getObjectDescription(const ObjectAddress *object) initStringInfo(&rel); getRelationDescription(&rel, con->conrelid); + /* translator: second %s is, e.g., "table %s" */ appendStringInfo(&buffer, _("constraint %s on %s"), NameStr(con->conname), rel.data); pfree(rel.data); @@ -2765,14 +2807,25 @@ getObjectDescription(const ObjectAddress *object) case OCLASS_CONVERSION: { HeapTuple conTup; + Form_pg_conversion conv; + char *nspname; conTup = SearchSysCache1(CONVOID, ObjectIdGetDatum(object->objectId)); if (!HeapTupleIsValid(conTup)) elog(ERROR, "cache lookup failed for conversion %u", object->objectId); + conv = (Form_pg_conversion) GETSTRUCT(conTup); + + /* Qualify the name if not visible in search path */ + if (ConversionIsVisible(object->objectId)) + nspname = NULL; + else + nspname = get_namespace_name(conv->connamespace); + appendStringInfo(&buffer, _("conversion %s"), - NameStr(((Form_pg_conversion) GETSTRUCT(conTup))->conname)); + quote_qualified_identifier(nspname, + NameStr(conv->conname))); ReleaseSysCache(conTup); break; } @@ -2808,7 +2861,8 @@ getObjectDescription(const ObjectAddress *object) colobject.objectId = attrdef->adrelid; colobject.objectSubId = attrdef->adnum; - appendStringInfo(&buffer, _("default for %s"), + /* translator: %s is typically "column %s of table %s" */ + appendStringInfo(&buffer, _("default value for %s"), getObjectDescription(&colobject)); systable_endscan(adscan); @@ -2995,6 +3049,7 @@ getObjectDescription(const ObjectAddress *object) SysScanDesc rcscan; HeapTuple tup; Form_pg_rewrite rule; + StringInfoData rel; ruleDesc = heap_open(RewriteRelationId, AccessShareLock); @@ -3011,13 +3066,15 @@ getObjectDescription(const ObjectAddress *object) if (!HeapTupleIsValid(tup)) elog(ERROR, "could not find tuple for rule %u", object->objectId); - rule = (Form_pg_rewrite) GETSTRUCT(tup); - appendStringInfo(&buffer, _("rule %s on "), - NameStr(rule->rulename)); - getRelationDescription(&buffer, rule->ev_class); + initStringInfo(&rel); + getRelationDescription(&rel, rule->ev_class); + /* translator: second %s is, e.g., "table %s" */ + appendStringInfo(&buffer, _("rule %s on %s"), + NameStr(rule->rulename), rel.data); + pfree(rel.data); systable_endscan(rcscan); heap_close(ruleDesc, AccessShareLock); break; @@ -3030,6 +3087,7 @@ getObjectDescription(const ObjectAddress *object) SysScanDesc tgscan; HeapTuple tup; Form_pg_trigger trig; + StringInfoData rel; trigDesc = heap_open(TriggerRelationId, AccessShareLock); @@ -3046,13 +3104,15 @@ getObjectDescription(const ObjectAddress *object) if (!HeapTupleIsValid(tup)) elog(ERROR, "could not find tuple for trigger %u", object->objectId); - trig = (Form_pg_trigger) GETSTRUCT(tup); - appendStringInfo(&buffer, _("trigger %s on "), - NameStr(trig->tgname)); - getRelationDescription(&buffer, trig->tgrelid); + initStringInfo(&rel); + getRelationDescription(&rel, trig->tgrelid); + /* translator: second %s is, e.g., "table %s" */ + appendStringInfo(&buffer, _("trigger %s on %s"), + NameStr(trig->tgname), rel.data); + pfree(rel.data); systable_endscan(tgscan); heap_close(trigDesc, AccessShareLock); break; @@ -3074,17 +3134,24 @@ getObjectDescription(const ObjectAddress *object) { HeapTuple stxTup; Form_pg_statistic_ext stxForm; + char *nspname; stxTup = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(object->objectId)); if (!HeapTupleIsValid(stxTup)) elog(ERROR, "could not find tuple for statistics object %u", object->objectId); - stxForm = (Form_pg_statistic_ext) GETSTRUCT(stxTup); + /* Qualify the name if not visible in search path */ + if (StatisticsObjIsVisible(object->objectId)) + nspname = NULL; + else + nspname = get_namespace_name(stxForm->stxnamespace); + appendStringInfo(&buffer, _("statistics object %s"), - NameStr(stxForm->stxname)); + quote_qualified_identifier(nspname, + NameStr(stxForm->stxname))); ReleaseSysCache(stxTup); break; @@ -3093,14 +3160,25 @@ getObjectDescription(const ObjectAddress *object) case OCLASS_TSPARSER: { HeapTuple tup; + Form_pg_ts_parser prsForm; + char *nspname; tup = SearchSysCache1(TSPARSEROID, ObjectIdGetDatum(object->objectId)); if (!HeapTupleIsValid(tup)) elog(ERROR, "cache lookup failed for text search parser %u", object->objectId); + prsForm = (Form_pg_ts_parser) GETSTRUCT(tup); + + /* Qualify the name if not visible in search path */ + if (TSParserIsVisible(object->objectId)) + nspname = NULL; + else + nspname = get_namespace_name(prsForm->prsnamespace); + appendStringInfo(&buffer, _("text search parser %s"), - NameStr(((Form_pg_ts_parser) GETSTRUCT(tup))->prsname)); + quote_qualified_identifier(nspname, + NameStr(prsForm->prsname))); ReleaseSysCache(tup); break; } @@ -3108,14 +3186,25 @@ getObjectDescription(const ObjectAddress *object) case OCLASS_TSDICT: { HeapTuple tup; + Form_pg_ts_dict dictForm; + char *nspname; tup = SearchSysCache1(TSDICTOID, ObjectIdGetDatum(object->objectId)); if (!HeapTupleIsValid(tup)) elog(ERROR, "cache lookup failed for text search dictionary %u", object->objectId); + dictForm = (Form_pg_ts_dict) GETSTRUCT(tup); + + /* Qualify the name if not visible in search path */ + if (TSDictionaryIsVisible(object->objectId)) + nspname = NULL; + else + nspname = get_namespace_name(dictForm->dictnamespace); + appendStringInfo(&buffer, _("text search dictionary %s"), - NameStr(((Form_pg_ts_dict) GETSTRUCT(tup))->dictname)); + quote_qualified_identifier(nspname, + NameStr(dictForm->dictname))); ReleaseSysCache(tup); break; } @@ -3123,14 +3212,25 @@ getObjectDescription(const ObjectAddress *object) case OCLASS_TSTEMPLATE: { HeapTuple tup; + Form_pg_ts_template tmplForm; + char *nspname; tup = SearchSysCache1(TSTEMPLATEOID, ObjectIdGetDatum(object->objectId)); if (!HeapTupleIsValid(tup)) elog(ERROR, "cache lookup failed for text search template %u", object->objectId); + tmplForm = (Form_pg_ts_template) GETSTRUCT(tup); + + /* Qualify the name if not visible in search path */ + if (TSTemplateIsVisible(object->objectId)) + nspname = NULL; + else + nspname = get_namespace_name(tmplForm->tmplnamespace); + appendStringInfo(&buffer, _("text search template %s"), - NameStr(((Form_pg_ts_template) GETSTRUCT(tup))->tmplname)); + quote_qualified_identifier(nspname, + NameStr(tmplForm->tmplname))); ReleaseSysCache(tup); break; } @@ -3138,14 +3238,25 @@ getObjectDescription(const ObjectAddress *object) case OCLASS_TSCONFIG: { HeapTuple tup; + Form_pg_ts_config cfgForm; + char *nspname; tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(object->objectId)); if (!HeapTupleIsValid(tup)) elog(ERROR, "cache lookup failed for text search configuration %u", object->objectId); + cfgForm = (Form_pg_ts_config) GETSTRUCT(tup); + + /* Qualify the name if not visible in search path */ + if (TSConfigIsVisible(object->objectId)) + nspname = NULL; + else + nspname = get_namespace_name(cfgForm->cfgnamespace); + appendStringInfo(&buffer, _("text search configuration %s"), - NameStr(((Form_pg_ts_config) GETSTRUCT(tup))->cfgname)); + quote_qualified_identifier(nspname, + NameStr(cfgForm->cfgname))); ReleaseSysCache(tup); break; } @@ -3235,6 +3346,8 @@ getObjectDescription(const ObjectAddress *object) SysScanDesc rcscan; HeapTuple tup; Form_pg_default_acl defacl; + char *rolename; + char *nspname; defaclrel = heap_open(DefaultAclRelationId, AccessShareLock); @@ -3254,48 +3367,74 @@ getObjectDescription(const ObjectAddress *object) defacl = (Form_pg_default_acl) GETSTRUCT(tup); + rolename = GetUserNameFromId(defacl->defaclrole, false); + + if (OidIsValid(defacl->defaclnamespace)) + nspname = get_namespace_name(defacl->defaclnamespace); + else + nspname = NULL; + switch (defacl->defaclobjtype) { case DEFACLOBJ_RELATION: - appendStringInfo(&buffer, - _("default privileges on new relations belonging to role %s"), - GetUserNameFromId(defacl->defaclrole, false)); + if (nspname) + appendStringInfo(&buffer, + _("default privileges on new relations belonging to role %s in schema %s"), + rolename, nspname); + else + appendStringInfo(&buffer, + _("default privileges on new relations belonging to role %s"), + rolename); break; case DEFACLOBJ_SEQUENCE: - appendStringInfo(&buffer, - _("default privileges on new sequences belonging to role %s"), - GetUserNameFromId(defacl->defaclrole, false)); + if (nspname) + appendStringInfo(&buffer, + _("default privileges on new sequences belonging to role %s in schema %s"), + rolename, nspname); + else + appendStringInfo(&buffer, + _("default privileges on new sequences belonging to role %s"), + rolename); break; case DEFACLOBJ_FUNCTION: - appendStringInfo(&buffer, - _("default privileges on new functions belonging to role %s"), - GetUserNameFromId(defacl->defaclrole, false)); + if (nspname) + appendStringInfo(&buffer, + _("default privileges on new functions belonging to role %s in schema %s"), + rolename, nspname); + else + appendStringInfo(&buffer, + _("default privileges on new functions belonging to role %s"), + rolename); break; case DEFACLOBJ_TYPE: - appendStringInfo(&buffer, - _("default privileges on new types belonging to role %s"), - GetUserNameFromId(defacl->defaclrole, false)); + if (nspname) + appendStringInfo(&buffer, + _("default privileges on new types belonging to role %s in schema %s"), + rolename, nspname); + else + appendStringInfo(&buffer, + _("default privileges on new types belonging to role %s"), + rolename); break; case DEFACLOBJ_NAMESPACE: + Assert(!nspname); appendStringInfo(&buffer, _("default privileges on new schemas belonging to role %s"), - GetUserNameFromId(defacl->defaclrole, false)); + rolename); break; default: /* shouldn't get here */ - appendStringInfo(&buffer, - _("default privileges belonging to role %s"), - GetUserNameFromId(defacl->defaclrole, false)); + if (nspname) + appendStringInfo(&buffer, + _("default privileges belonging to role %s in schema %s"), + rolename, nspname); + else + appendStringInfo(&buffer, + _("default privileges belonging to role %s"), + rolename); break; } - if (OidIsValid(defacl->defaclnamespace)) - { - appendStringInfo(&buffer, - _(" in schema %s"), - get_namespace_name(defacl->defaclnamespace)); - } - systable_endscan(rcscan); heap_close(defaclrel, AccessShareLock); break; @@ -3335,6 +3474,7 @@ getObjectDescription(const ObjectAddress *object) SysScanDesc sscan; HeapTuple tuple; Form_pg_policy form_policy; + StringInfoData rel; policy_rel = heap_open(PolicyRelationId, AccessShareLock); @@ -3351,13 +3491,15 @@ getObjectDescription(const ObjectAddress *object) if (!HeapTupleIsValid(tuple)) elog(ERROR, "could not find tuple for policy %u", object->objectId); - form_policy = (Form_pg_policy) GETSTRUCT(tuple); - appendStringInfo(&buffer, _("policy %s on "), - NameStr(form_policy->polname)); - getRelationDescription(&buffer, form_policy->polrelid); + initStringInfo(&rel); + getRelationDescription(&rel, form_policy->polrelid); + /* translator: second %s is, e.g., "table %s" */ + appendStringInfo(&buffer, _("policy %s on %s"), + NameStr(form_policy->polname), rel.data); + pfree(rel.data); systable_endscan(sscan); heap_close(policy_rel, AccessShareLock); break; @@ -3366,7 +3508,8 @@ getObjectDescription(const ObjectAddress *object) case OCLASS_PUBLICATION: { appendStringInfo(&buffer, _("publication %s"), - get_publication_name(object->objectId)); + get_publication_name(object->objectId, + false)); break; } @@ -3375,6 +3518,7 @@ getObjectDescription(const ObjectAddress *object) HeapTuple tup; char *pubname; Form_pg_publication_rel prform; + StringInfoData rel; tup = SearchSysCache1(PUBLICATIONREL, ObjectIdGetDatum(object->objectId)); @@ -3383,10 +3527,15 @@ getObjectDescription(const ObjectAddress *object) object->objectId); prform = (Form_pg_publication_rel) GETSTRUCT(tup); - pubname = get_publication_name(prform->prpubid); + pubname = get_publication_name(prform->prpubid, false); - appendStringInfo(&buffer, _("publication table %s in publication %s"), - get_rel_name(prform->prrelid), pubname); + initStringInfo(&rel); + getRelationDescription(&rel, prform->prrelid); + + /* translator: first %s is, e.g., "table %s" */ + appendStringInfo(&buffer, _("publication of %s in publication %s"), + rel.data, pubname); + pfree(rel.data); ReleaseSysCache(tup); break; } @@ -3394,7 +3543,8 @@ getObjectDescription(const ObjectAddress *object) case OCLASS_SUBSCRIPTION: { appendStringInfo(&buffer, _("subscription %s"), - get_subscription_name(object->objectId)); + get_subscription_name(object->objectId, + false)); break; } @@ -3445,6 +3595,8 @@ getObjectDescriptionOids(Oid classid, Oid objid) /* * subroutine for getObjectDescription: describe a relation + * + * The result is appended to "buffer". */ static void getRelationDescription(StringInfo buffer, Oid relid) @@ -3476,6 +3628,7 @@ getRelationDescription(StringInfo buffer, Oid relid) relname); break; case RELKIND_INDEX: + case RELKIND_PARTITIONED_INDEX: appendStringInfo(buffer, _("index %s"), relname); break; @@ -3950,6 +4103,7 @@ getRelationTypeDescription(StringInfo buffer, Oid relid, int32 objectSubId) appendStringInfoString(buffer, "table"); break; case RELKIND_INDEX: + case RELKIND_PARTITIONED_INDEX: appendStringInfoString(buffer, "index"); break; case RELKIND_SEQUENCE: @@ -4024,9 +4178,11 @@ getProcedureTypeDescription(StringInfo buffer, Oid procid) elog(ERROR, "cache lookup failed for procedure %u", procid); procForm = (Form_pg_proc) GETSTRUCT(procTup); - if (procForm->proisagg) + if (procForm->prokind == PROKIND_AGGREGATE) appendStringInfoString(buffer, "aggregate"); - else + else if (procForm->prokind == PROKIND_PROCEDURE) + appendStringInfoString(buffer, "procedure"); + else /* function or window function */ appendStringInfoString(buffer, "function"); ReleaseSysCache(procTup); @@ -4081,8 +4237,8 @@ getObjectIdentityParts(const ObjectAddress *object, { char *attr; - attr = get_relid_attribute_name(object->objectId, - object->objectSubId); + attr = get_attname(object->objectId, object->objectSubId, + false); appendStringInfo(&buffer, ".%s", quote_identifier(attr)); if (objname) *objname = lappend(*objname, attr); @@ -4888,7 +5044,7 @@ getObjectIdentityParts(const ObjectAddress *object, { char *pubname; - pubname = get_publication_name(object->objectId); + pubname = get_publication_name(object->objectId, false); appendStringInfoString(&buffer, quote_identifier(pubname)); if (objname) @@ -4909,16 +5065,13 @@ getObjectIdentityParts(const ObjectAddress *object, object->objectId); prform = (Form_pg_publication_rel) GETSTRUCT(tup); - pubname = get_publication_name(prform->prpubid); + pubname = get_publication_name(prform->prpubid, false); - appendStringInfo(&buffer, _("%s in publication %s"), - get_rel_name(prform->prrelid), pubname); + getRelationIdentity(&buffer, prform->prrelid, objname); + appendStringInfo(&buffer, " in publication %s", pubname); - if (objname) - { - getRelationIdentity(&buffer, prform->prrelid, objname); + if (objargs) *objargs = list_make1(pubname); - } ReleaseSysCache(tup); break; @@ -4928,7 +5081,7 @@ getObjectIdentityParts(const ObjectAddress *object, { char *subname; - subname = get_subscription_name(object->objectId); + subname = get_subscription_name(object->objectId, false); appendStringInfoString(&buffer, quote_identifier(subname)); if (objname) @@ -5051,7 +5204,7 @@ getRelationIdentity(StringInfo buffer, Oid relid, List **object) } /* - * Auxiliary function to return a TEXT array out of a list of C-strings. + * Auxiliary function to build a TEXT array out of a list of C-strings. */ ArrayType * strlist_to_textarray(List *list) @@ -5063,12 +5216,14 @@ strlist_to_textarray(List *list) MemoryContext memcxt; MemoryContext oldcxt; + /* Work in a temp context; easier than individually pfree'ing the Datums */ memcxt = AllocSetContextCreate(CurrentMemoryContext, "strlist to array", ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(memcxt); - datums = palloc(sizeof(text *) * list_length(list)); + datums = (Datum *) palloc(sizeof(Datum) * list_length(list)); + foreach(cell, list) { char *name = lfirst(cell); @@ -5080,7 +5235,38 @@ strlist_to_textarray(List *list) arr = construct_array(datums, list_length(list), TEXTOID, -1, false, 'i'); + MemoryContextDelete(memcxt); return arr; } + +ObjectType +get_relkind_objtype(char relkind) +{ + switch (relkind) + { + case RELKIND_RELATION: + case RELKIND_PARTITIONED_TABLE: + return OBJECT_TABLE; + case RELKIND_INDEX: + case RELKIND_PARTITIONED_INDEX: + return OBJECT_INDEX; + case RELKIND_SEQUENCE: + return OBJECT_SEQUENCE; + case RELKIND_VIEW: + return OBJECT_VIEW; + case RELKIND_MATVIEW: + return OBJECT_MATVIEW; + case RELKIND_FOREIGN_TABLE: + return OBJECT_FOREIGN_TABLE; + + /* + * other relkinds are not supported here because they don't map to + * OBJECT_* values + */ + default: + elog(ERROR, "unexpected relkind: %d", relkind); + return 0; + } +} diff --git a/src/backend/catalog/partition.c b/src/backend/catalog/partition.c index c1a307c8d3..558022647c 100644 --- a/src/backend/catalog/partition.c +++ b/src/backend/catalog/partition.c @@ -3,7 +3,7 @@ * partition.c * Partitioning related data structures and functions. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -12,830 +12,73 @@ * *------------------------------------------------------------------------- */ - #include "postgres.h" +#include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" -#include "access/nbtree.h" +#include "access/tupconvert.h" #include "access/sysattr.h" -#include "catalog/dependency.h" #include "catalog/indexing.h" -#include "catalog/objectaddress.h" #include "catalog/partition.h" -#include "catalog/pg_collation.h" #include "catalog/pg_inherits.h" -#include "catalog/pg_inherits_fn.h" -#include "catalog/pg_opclass.h" -#include "catalog/pg_type.h" -#include "executor/executor.h" -#include "miscadmin.h" +#include "catalog/pg_partitioned_table.h" #include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/parsenodes.h" #include "optimizer/clauses.h" -#include "optimizer/planmain.h" +#include "optimizer/prep.h" #include "optimizer/var.h" +#include "partitioning/partbounds.h" #include "rewrite/rewriteManip.h" -#include "storage/lmgr.h" -#include "utils/array.h" -#include "utils/builtins.h" -#include "utils/datum.h" -#include "utils/memutils.h" #include "utils/fmgroids.h" -#include "utils/inval.h" -#include "utils/lsyscache.h" +#include "utils/partcache.h" #include "utils/rel.h" -#include "utils/ruleutils.h" #include "utils/syscache.h" -/* - * Information about bounds of a partitioned relation - * - * A list partition datum that is known to be NULL is never put into the - * datums array. Instead, it is tracked using the null_index field. - * - * In the case of range partitioning, ndatums will typically be far less than - * 2 * nparts, because a partition's upper bound and the next partition's lower - * bound are the same in most common cases, and we only store one of them (the - * upper bound). - * - * In the case of list partitioning, the indexes array stores one entry for - * every datum, which is the index of the partition that accepts a given datum. - * In case of range partitioning, it stores one entry per distinct range - * datum, which is the index of the partition for which a given datum - * is an upper bound. - */ - -typedef struct PartitionBoundInfoData -{ - char strategy; /* list or range bounds? */ - int ndatums; /* Length of the datums following array */ - Datum **datums; /* Array of datum-tuples with key->partnatts - * datums each */ - PartitionRangeDatumKind **kind; /* The kind of each range bound datum; - * NULL for list partitioned tables */ - int *indexes; /* Partition indexes; one entry per member of - * the datums array (plus one if range - * partitioned table) */ - int null_index; /* Index of the null-accepting partition; -1 - * if there isn't one */ -} PartitionBoundInfoData; - -#define partition_bound_accepts_nulls(bi) ((bi)->null_index != -1) - -/* - * When qsort'ing partition bounds after reading from the catalog, each bound - * is represented with one of the following structs. - */ - -/* One value coming from some (index'th) list partition */ -typedef struct PartitionListValue -{ - int index; - Datum value; -} PartitionListValue; - -/* One bound of a range partition */ -typedef struct PartitionRangeBound -{ - int index; - Datum *datums; /* range bound datums */ - PartitionRangeDatumKind *kind; /* the kind of each datum */ - bool lower; /* this is the lower (vs upper) bound */ -} PartitionRangeBound; - -static int32 qsort_partition_list_value_cmp(const void *a, const void *b, - void *arg); -static int32 qsort_partition_rbound_cmp(const void *a, const void *b, - void *arg); - -static Oid get_partition_operator(PartitionKey key, int col, - StrategyNumber strategy, bool *need_relabel); -static Expr *make_partition_op_expr(PartitionKey key, int keynum, - uint16 strategy, Expr *arg1, Expr *arg2); -static void get_range_key_properties(PartitionKey key, int keynum, - PartitionRangeDatum *ldatum, - PartitionRangeDatum *udatum, - ListCell **partexprs_item, - Expr **keyCol, - Const **lower_val, Const **upper_val); -static List *get_qual_for_list(PartitionKey key, PartitionBoundSpec *spec); -static List *get_qual_for_range(PartitionKey key, PartitionBoundSpec *spec); -static List *generate_partition_qual(Relation rel); - -static PartitionRangeBound *make_one_range_bound(PartitionKey key, int index, - List *datums, bool lower); -static int32 partition_rbound_cmp(PartitionKey key, - Datum *datums1, PartitionRangeDatumKind *kind1, - bool lower1, PartitionRangeBound *b2); -static int32 partition_rbound_datum_cmp(PartitionKey key, - Datum *rb_datums, PartitionRangeDatumKind *rb_kind, - Datum *tuple_datums); - -static int32 partition_bound_cmp(PartitionKey key, - PartitionBoundInfo boundinfo, - int offset, void *probe, bool probe_is_bound); -static int partition_bound_bsearch(PartitionKey key, - PartitionBoundInfo boundinfo, - void *probe, bool probe_is_bound, bool *is_equal); - -/* - * RelationBuildPartitionDesc - * Form rel's partition descriptor - * - * Not flushed from the cache by RelationClearRelation() unless changed because - * of addition or removal of partition. - */ -void -RelationBuildPartitionDesc(Relation rel) -{ - List *inhoids, - *partoids; - Oid *oids = NULL; - List *boundspecs = NIL; - ListCell *cell; - int i, - nparts; - PartitionKey key = RelationGetPartitionKey(rel); - PartitionDesc result; - MemoryContext oldcxt; - - int ndatums = 0; - - /* List partitioning specific */ - PartitionListValue **all_values = NULL; - int null_index = -1; - - /* Range partitioning specific */ - PartitionRangeBound **rbounds = NULL; - - /* - * The following could happen in situations where rel has a pg_class entry - * but not the pg_partitioned_table entry yet. - */ - if (key == NULL) - return; - - /* Get partition oids from pg_inherits */ - inhoids = find_inheritance_children(RelationGetRelid(rel), NoLock); - - /* Collect bound spec nodes in a list */ - i = 0; - partoids = NIL; - foreach(cell, inhoids) - { - Oid inhrelid = lfirst_oid(cell); - HeapTuple tuple; - Datum datum; - bool isnull; - Node *boundspec; - - tuple = SearchSysCache1(RELOID, inhrelid); - if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for relation %u", inhrelid); - - /* - * It is possible that the pg_class tuple of a partition has not been - * updated yet to set its relpartbound field. The only case where - * this happens is when we open the parent relation to check using its - * partition descriptor that a new partition's bound does not overlap - * some existing partition. - */ - if (!((Form_pg_class) GETSTRUCT(tuple))->relispartition) - { - ReleaseSysCache(tuple); - continue; - } - - datum = SysCacheGetAttr(RELOID, tuple, - Anum_pg_class_relpartbound, - &isnull); - Assert(!isnull); - boundspec = (Node *) stringToNode(TextDatumGetCString(datum)); - boundspecs = lappend(boundspecs, boundspec); - partoids = lappend_oid(partoids, inhrelid); - ReleaseSysCache(tuple); - } - - nparts = list_length(partoids); - - if (nparts > 0) - { - oids = (Oid *) palloc(nparts * sizeof(Oid)); - i = 0; - foreach(cell, partoids) - oids[i++] = lfirst_oid(cell); - - /* Convert from node to the internal representation */ - if (key->strategy == PARTITION_STRATEGY_LIST) - { - List *non_null_values = NIL; - - /* - * Create a unified list of non-null values across all partitions. - */ - i = 0; - null_index = -1; - foreach(cell, boundspecs) - { - PartitionBoundSpec *spec = castNode(PartitionBoundSpec, - lfirst(cell)); - ListCell *c; - - if (spec->strategy != PARTITION_STRATEGY_LIST) - elog(ERROR, "invalid strategy in partition bound spec"); - - foreach(c, spec->listdatums) - { - Const *val = castNode(Const, lfirst(c)); - PartitionListValue *list_value = NULL; - - if (!val->constisnull) - { - list_value = (PartitionListValue *) - palloc0(sizeof(PartitionListValue)); - list_value->index = i; - list_value->value = val->constvalue; - } - else - { - /* - * Never put a null into the values array, flag - * instead for the code further down below where we - * construct the actual relcache struct. - */ - if (null_index != -1) - elog(ERROR, "found null more than once"); - null_index = i; - } - - if (list_value) - non_null_values = lappend(non_null_values, - list_value); - } - - i++; - } - - ndatums = list_length(non_null_values); - - /* - * Collect all list values in one array. Alongside the value, we - * also save the index of partition the value comes from. - */ - all_values = (PartitionListValue **) palloc(ndatums * - sizeof(PartitionListValue *)); - i = 0; - foreach(cell, non_null_values) - { - PartitionListValue *src = lfirst(cell); - - all_values[i] = (PartitionListValue *) - palloc(sizeof(PartitionListValue)); - all_values[i]->value = src->value; - all_values[i]->index = src->index; - i++; - } - - qsort_arg(all_values, ndatums, sizeof(PartitionListValue *), - qsort_partition_list_value_cmp, (void *) key); - } - else if (key->strategy == PARTITION_STRATEGY_RANGE) - { - int j, - k; - PartitionRangeBound **all_bounds, - *prev; - bool *distinct_indexes; - - all_bounds = (PartitionRangeBound **) palloc0(2 * nparts * - sizeof(PartitionRangeBound *)); - distinct_indexes = (bool *) palloc(2 * nparts * sizeof(bool)); - - /* - * Create a unified list of range bounds across all the - * partitions. - */ - i = j = 0; - foreach(cell, boundspecs) - { - PartitionBoundSpec *spec = castNode(PartitionBoundSpec, - lfirst(cell)); - PartitionRangeBound *lower, - *upper; - - if (spec->strategy != PARTITION_STRATEGY_RANGE) - elog(ERROR, "invalid strategy in partition bound spec"); - - lower = make_one_range_bound(key, i, spec->lowerdatums, - true); - upper = make_one_range_bound(key, i, spec->upperdatums, - false); - all_bounds[j] = lower; - all_bounds[j + 1] = upper; - j += 2; - i++; - } - Assert(j == 2 * nparts); - - /* Sort all the bounds in ascending order */ - qsort_arg(all_bounds, 2 * nparts, - sizeof(PartitionRangeBound *), - qsort_partition_rbound_cmp, - (void *) key); - - /* - * Count the number of distinct bounds to allocate an array of - * that size. - */ - ndatums = 0; - prev = NULL; - for (i = 0; i < 2 * nparts; i++) - { - PartitionRangeBound *cur = all_bounds[i]; - bool is_distinct = false; - int j; - - /* Is the current bound distinct from the previous one? */ - for (j = 0; j < key->partnatts; j++) - { - Datum cmpval; - - if (prev == NULL || cur->kind[j] != prev->kind[j]) - { - is_distinct = true; - break; - } - - /* - * If the bounds are both MINVALUE or MAXVALUE, stop now - * and treat them as equal, since any values after this - * point must be ignored. - */ - if (cur->kind[j] != PARTITION_RANGE_DATUM_VALUE) - break; - - cmpval = FunctionCall2Coll(&key->partsupfunc[j], - key->partcollation[j], - cur->datums[j], - prev->datums[j]); - if (DatumGetInt32(cmpval) != 0) - { - is_distinct = true; - break; - } - } - - /* - * Count the current bound if it is distinct from the previous - * one. Also, store if the index i contains a distinct bound - * that we'd like put in the relcache array. - */ - if (is_distinct) - { - distinct_indexes[i] = true; - ndatums++; - } - else - distinct_indexes[i] = false; - - prev = cur; - } - - /* - * Finally save them in an array from where they will be copied - * into the relcache. - */ - rbounds = (PartitionRangeBound **) palloc(ndatums * - sizeof(PartitionRangeBound *)); - k = 0; - for (i = 0; i < 2 * nparts; i++) - { - if (distinct_indexes[i]) - rbounds[k++] = all_bounds[i]; - } - Assert(k == ndatums); - } - else - elog(ERROR, "unexpected partition strategy: %d", - (int) key->strategy); - } - - /* Now build the actual relcache partition descriptor */ - rel->rd_pdcxt = AllocSetContextCreate(CacheMemoryContext, - RelationGetRelationName(rel), - ALLOCSET_DEFAULT_SIZES); - oldcxt = MemoryContextSwitchTo(rel->rd_pdcxt); - - result = (PartitionDescData *) palloc0(sizeof(PartitionDescData)); - result->nparts = nparts; - if (nparts > 0) - { - PartitionBoundInfo boundinfo; - int *mapping; - int next_index = 0; - - result->oids = (Oid *) palloc0(nparts * sizeof(Oid)); - - boundinfo = (PartitionBoundInfoData *) - palloc0(sizeof(PartitionBoundInfoData)); - boundinfo->strategy = key->strategy; - boundinfo->ndatums = ndatums; - boundinfo->null_index = -1; - boundinfo->datums = (Datum **) palloc0(ndatums * sizeof(Datum *)); - - /* Initialize mapping array with invalid values */ - mapping = (int *) palloc(sizeof(int) * nparts); - for (i = 0; i < nparts; i++) - mapping[i] = -1; - - switch (key->strategy) - { - case PARTITION_STRATEGY_LIST: - { - boundinfo->indexes = (int *) palloc(ndatums * sizeof(int)); - - /* - * Copy values. Indexes of individual values are mapped - * to canonical values so that they match for any two list - * partitioned tables with same number of partitions and - * same lists per partition. One way to canonicalize is - * to assign the index in all_values[] of the smallest - * value of each partition, as the index of all of the - * partition's values. - */ - for (i = 0; i < ndatums; i++) - { - boundinfo->datums[i] = (Datum *) palloc(sizeof(Datum)); - boundinfo->datums[i][0] = datumCopy(all_values[i]->value, - key->parttypbyval[0], - key->parttyplen[0]); - - /* If the old index has no mapping, assign one */ - if (mapping[all_values[i]->index] == -1) - mapping[all_values[i]->index] = next_index++; - boundinfo->indexes[i] = mapping[all_values[i]->index]; - } - - /* - * If null-accepting partition has no mapped index yet, - * assign one. This could happen if such partition - * accepts only null and hence not covered in the above - * loop which only handled non-null values. - */ - if (null_index != -1) - { - Assert(null_index >= 0); - if (mapping[null_index] == -1) - mapping[null_index] = next_index++; - boundinfo->null_index = mapping[null_index]; - } - - /* All partition must now have a valid mapping */ - Assert(next_index == nparts); - break; - } - - case PARTITION_STRATEGY_RANGE: - { - boundinfo->kind = (PartitionRangeDatumKind **) - palloc(ndatums * - sizeof(PartitionRangeDatumKind *)); - boundinfo->indexes = (int *) palloc((ndatums + 1) * - sizeof(int)); - - for (i = 0; i < ndatums; i++) - { - int j; - - boundinfo->datums[i] = (Datum *) palloc(key->partnatts * - sizeof(Datum)); - boundinfo->kind[i] = (PartitionRangeDatumKind *) - palloc(key->partnatts * - sizeof(PartitionRangeDatumKind)); - for (j = 0; j < key->partnatts; j++) - { - if (rbounds[i]->kind[j] == PARTITION_RANGE_DATUM_VALUE) - boundinfo->datums[i][j] = - datumCopy(rbounds[i]->datums[j], - key->parttypbyval[j], - key->parttyplen[j]); - boundinfo->kind[i][j] = rbounds[i]->kind[j]; - } - - /* - * There is no mapping for invalid indexes. - * - * Any lower bounds in the rbounds array have invalid - * indexes assigned, because the values between the - * previous bound (if there is one) and this (lower) - * bound are not part of the range of any existing - * partition. - */ - if (rbounds[i]->lower) - boundinfo->indexes[i] = -1; - else - { - int orig_index = rbounds[i]->index; - - /* If the old index has no mapping, assign one */ - if (mapping[orig_index] == -1) - mapping[orig_index] = next_index++; - - boundinfo->indexes[i] = mapping[orig_index]; - } - } - boundinfo->indexes[i] = -1; - break; - } - - default: - elog(ERROR, "unexpected partition strategy: %d", - (int) key->strategy); - } - - result->boundinfo = boundinfo; - - /* - * Now assign OIDs from the original array into mapped indexes of the - * result array. Order of OIDs in the former is defined by the - * catalog scan that retrieved them, whereas that in the latter is - * defined by canonicalized representation of the list values or the - * range bounds. - */ - for (i = 0; i < nparts; i++) - result->oids[mapping[i]] = oids[i]; - pfree(mapping); - } - - MemoryContextSwitchTo(oldcxt); - rel->rd_partdesc = result; -} +static Oid get_partition_parent_worker(Relation inhRel, Oid relid); +static void get_partition_ancestors_worker(Relation inhRel, Oid relid, + List **ancestors); /* - * Are two partition bound collections logically equal? + * get_partition_parent + * Obtain direct parent of given relation * - * Used in the keep logic of relcache.c (ie, in RelationClearRelation()). - * This is also useful when b1 and b2 are bound collections of two separate - * relations, respectively, because PartitionBoundInfo is a canonical - * representation of partition bounds. - */ -bool -partition_bounds_equal(int partnatts, int16 *parttyplen, bool *parttypbyval, - PartitionBoundInfo b1, PartitionBoundInfo b2) -{ - int i; - - if (b1->strategy != b2->strategy) - return false; - - if (b1->ndatums != b2->ndatums) - return false; - - if (b1->null_index != b2->null_index) - return false; - - for (i = 0; i < b1->ndatums; i++) - { - int j; - - for (j = 0; j < partnatts; j++) - { - /* For range partitions, the bounds might not be finite. */ - if (b1->kind != NULL) - { - /* The different kinds of bound all differ from each other */ - if (b1->kind[i][j] != b2->kind[i][j]) - return false; - - /* Non-finite bounds are equal without further examination. */ - if (b1->kind[i][j] != PARTITION_RANGE_DATUM_VALUE) - continue; - } - - /* - * Compare the actual values. Note that it would be both incorrect - * and unsafe to invoke the comparison operator derived from the - * partitioning specification here. It would be incorrect because - * we want the relcache entry to be updated for ANY change to the - * partition bounds, not just those that the partitioning operator - * thinks are significant. It would be unsafe because we might - * reach this code in the context of an aborted transaction, and - * an arbitrary partitioning operator might not be safe in that - * context. datumIsEqual() should be simple enough to be safe. - */ - if (!datumIsEqual(b1->datums[i][j], b2->datums[i][j], - parttypbyval[j], parttyplen[j])) - return false; - } - - if (b1->indexes[i] != b2->indexes[i]) - return false; - } - - /* There are ndatums+1 indexes in case of range partitions */ - if (b1->strategy == PARTITION_STRATEGY_RANGE && - b1->indexes[i] != b2->indexes[i]) - return false; - - return true; -} - -/* - * check_new_partition_bound + * Returns inheritance parent of a partition by scanning pg_inherits * - * Checks if the new partition's bound overlaps any of the existing partitions - * of parent. Also performs additional checks as necessary per strategy. + * Note: Because this function assumes that the relation whose OID is passed + * as an argument will have precisely one parent, it should only be called + * when it is known that the relation is a partition. */ -void -check_new_partition_bound(char *relname, Relation parent, - PartitionBoundSpec *spec) +Oid +get_partition_parent(Oid relid) { - PartitionKey key = RelationGetPartitionKey(parent); - PartitionDesc partdesc = RelationGetPartitionDesc(parent); - ParseState *pstate = make_parsestate(NULL); - int with = -1; - bool overlap = false; - - switch (key->strategy) - { - case PARTITION_STRATEGY_LIST: - { - Assert(spec->strategy == PARTITION_STRATEGY_LIST); - - if (partdesc->nparts > 0) - { - PartitionBoundInfo boundinfo = partdesc->boundinfo; - ListCell *cell; - - Assert(boundinfo && - boundinfo->strategy == PARTITION_STRATEGY_LIST && - (boundinfo->ndatums > 0 || - partition_bound_accepts_nulls(boundinfo))); - - foreach(cell, spec->listdatums) - { - Const *val = castNode(Const, lfirst(cell)); - - if (!val->constisnull) - { - int offset; - bool equal; - - offset = partition_bound_bsearch(key, boundinfo, - &val->constvalue, - true, &equal); - if (offset >= 0 && equal) - { - overlap = true; - with = boundinfo->indexes[offset]; - break; - } - } - else if (partition_bound_accepts_nulls(boundinfo)) - { - overlap = true; - with = boundinfo->null_index; - break; - } - } - } - - break; - } - - case PARTITION_STRATEGY_RANGE: - { - PartitionRangeBound *lower, - *upper; - - Assert(spec->strategy == PARTITION_STRATEGY_RANGE); - lower = make_one_range_bound(key, -1, spec->lowerdatums, true); - upper = make_one_range_bound(key, -1, spec->upperdatums, false); - - /* - * First check if the resulting range would be empty with - * specified lower and upper bounds - */ - if (partition_rbound_cmp(key, lower->datums, lower->kind, true, - upper) >= 0) - { - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("empty range bound specified for partition \"%s\"", - relname), - errdetail("Specified lower bound %s is greater than or equal to upper bound %s.", - get_range_partbound_string(spec->lowerdatums), - get_range_partbound_string(spec->upperdatums)), - parser_errposition(pstate, spec->location))); - } - - if (partdesc->nparts > 0) - { - PartitionBoundInfo boundinfo = partdesc->boundinfo; - int offset; - bool equal; - - Assert(boundinfo && boundinfo->ndatums > 0 && - boundinfo->strategy == PARTITION_STRATEGY_RANGE); - - /* - * Test whether the new lower bound (which is treated - * inclusively as part of the new partition) lies inside - * an existing partition, or in a gap. - * - * If it's inside an existing partition, the bound at - * offset + 1 will be the upper bound of that partition, - * and its index will be >= 0. - * - * If it's in a gap, the bound at offset + 1 will be the - * lower bound of the next partition, and its index will - * be -1. This is also true if there is no next partition, - * since the index array is initialised with an extra -1 - * at the end. - */ - offset = partition_bound_bsearch(key, boundinfo, lower, - true, &equal); + Relation catalogRelation; + Oid result; - if (boundinfo->indexes[offset + 1] < 0) - { - /* - * Check that the new partition will fit in the gap. - * For it to fit, the new upper bound must be less - * than or equal to the lower bound of the next - * partition, if there is one. - */ - if (offset + 1 < boundinfo->ndatums) - { - int32 cmpval; + catalogRelation = heap_open(InheritsRelationId, AccessShareLock); - cmpval = partition_bound_cmp(key, boundinfo, - offset + 1, upper, - true); - if (cmpval < 0) - { - /* - * The new partition overlaps with the - * existing partition between offset + 1 and - * offset + 2. - */ - overlap = true; - with = boundinfo->indexes[offset + 2]; - } - } - } - else - { - /* - * The new partition overlaps with the existing - * partition between offset and offset + 1. - */ - overlap = true; - with = boundinfo->indexes[offset + 1]; - } - } + result = get_partition_parent_worker(catalogRelation, relid); - break; - } + if (!OidIsValid(result)) + elog(ERROR, "could not find tuple for parent of relation %u", relid); - default: - elog(ERROR, "unexpected partition strategy: %d", - (int) key->strategy); - } + heap_close(catalogRelation, AccessShareLock); - if (overlap) - { - Assert(with >= 0); - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("partition \"%s\" would overlap partition \"%s\"", - relname, get_rel_name(partdesc->oids[with])), - parser_errposition(pstate, spec->location))); - } + return result; } /* - * get_partition_parent - * - * Returns inheritance parent of a partition by scanning pg_inherits - * - * Note: Because this function assumes that the relation whose OID is passed - * as an argument will have precisely one parent, it should only be called - * when it is known that the relation is a partition. + * get_partition_parent_worker + * Scan the pg_inherits relation to return the OID of the parent of the + * given relation */ -Oid -get_partition_parent(Oid relid) +static Oid +get_partition_parent_worker(Relation inhRel, Oid relid) { - Form_pg_inherits form; - Relation catalogRelation; SysScanDesc scan; ScanKeyData key[2]; + Oid result = InvalidOid; HeapTuple tuple; - Oid result; - - catalogRelation = heap_open(InheritsRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_inherits_inhrelid, @@ -846,62 +89,73 @@ get_partition_parent(Oid relid) BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(1)); - scan = systable_beginscan(catalogRelation, InheritsRelidSeqnoIndexId, true, + scan = systable_beginscan(inhRel, InheritsRelidSeqnoIndexId, true, NULL, 2, key); - tuple = systable_getnext(scan); - if (!HeapTupleIsValid(tuple)) - elog(ERROR, "could not find tuple for parent of relation %u", relid); + if (HeapTupleIsValid(tuple)) + { + Form_pg_inherits form = (Form_pg_inherits) GETSTRUCT(tuple); - form = (Form_pg_inherits) GETSTRUCT(tuple); - result = form->inhparent; + result = form->inhparent; + } systable_endscan(scan); - heap_close(catalogRelation, AccessShareLock); return result; } /* - * get_qual_from_partbound - * Given a parser node for partition bound, return the list of executable - * expressions as partition constraint + * get_partition_ancestors + * Obtain ancestors of given relation + * + * Returns a list of ancestors of the given relation. + * + * Note: Because this function assumes that the relation whose OID is passed + * as an argument and each ancestor will have precisely one parent, it should + * only be called when it is known that the relation is a partition. */ List * -get_qual_from_partbound(Relation rel, Relation parent, - PartitionBoundSpec *spec) +get_partition_ancestors(Oid relid) { - PartitionKey key = RelationGetPartitionKey(parent); - List *my_qual = NIL; + List *result = NIL; + Relation inhRel; - Assert(key != NULL); + inhRel = heap_open(InheritsRelationId, AccessShareLock); - switch (key->strategy) - { - case PARTITION_STRATEGY_LIST: - Assert(spec->strategy == PARTITION_STRATEGY_LIST); - my_qual = get_qual_for_list(key, spec); - break; + get_partition_ancestors_worker(inhRel, relid, &result); - case PARTITION_STRATEGY_RANGE: - Assert(spec->strategy == PARTITION_STRATEGY_RANGE); - my_qual = get_qual_for_range(key, spec); - break; + heap_close(inhRel, AccessShareLock); - default: - elog(ERROR, "unexpected partition strategy: %d", - (int) key->strategy); - } + return result; +} + +/* + * get_partition_ancestors_worker + * recursive worker for get_partition_ancestors + */ +static void +get_partition_ancestors_worker(Relation inhRel, Oid relid, List **ancestors) +{ + Oid parentOid; + + /* Recursion ends at the topmost level, ie., when there's no parent */ + parentOid = get_partition_parent_worker(inhRel, relid); + if (parentOid == InvalidOid) + return; - return my_qual; + *ancestors = lappend_oid(*ancestors, parentOid); + get_partition_ancestors_worker(inhRel, parentOid, ancestors); } /* * map_partition_varattnos - maps varattno of any Vars in expr from the - * parent attno to partition attno. + * attno's of 'from_rel' to the attno's of 'to_rel' partition, each of which + * may be either a leaf partition or a partitioned table, but both of which + * must be from the same partitioning hierarchy. * - * We must allow for cases where physical attnos of a partition can be - * different from the parent's. + * Even though all of the same column names must be present in all relations + * in the hierarchy, and they must also have the same types, the attnos may + * be different. * * If found_whole_row is not NULL, *found_whole_row returns whether a * whole-row variable was found in the input expression. @@ -911,25 +165,27 @@ get_qual_from_partbound(Relation rel, Relation parent, * are working on Lists, so it's less messy to do the casts internally. */ List * -map_partition_varattnos(List *expr, int target_varno, - Relation partrel, Relation parent, +map_partition_varattnos(List *expr, int fromrel_varno, + Relation to_rel, Relation from_rel, bool *found_whole_row) { - AttrNumber *part_attnos; - bool my_found_whole_row; + bool my_found_whole_row = false; - if (expr == NIL) - return NIL; + if (expr != NIL) + { + AttrNumber *part_attnos; + + part_attnos = convert_tuples_by_name_map(RelationGetDescr(to_rel), + RelationGetDescr(from_rel), + gettext_noop("could not convert row type")); + expr = (List *) map_variable_attnos((Node *) expr, + fromrel_varno, 0, + part_attnos, + RelationGetDescr(from_rel)->natts, + RelationGetForm(to_rel)->reltype, + &my_found_whole_row); + } - part_attnos = convert_tuples_by_name_map(RelationGetDescr(partrel), - RelationGetDescr(parent), - gettext_noop("could not convert row type")); - expr = (List *) map_variable_attnos((Node *) expr, - target_varno, 0, - part_attnos, - RelationGetDescr(parent)->natts, - RelationGetForm(partrel)->reltype, - &my_found_whole_row); if (found_whole_row) *found_whole_row = my_found_whole_row; @@ -937,1399 +193,167 @@ map_partition_varattnos(List *expr, int target_varno, } /* - * RelationGetPartitionQual - * - * Returns a list of partition quals + * Checks if any of the 'attnums' is a partition key attribute for rel + * + * Sets *used_in_expr if any of the 'attnums' is found to be referenced in some + * partition key expression. It's possible for a column to be both used + * directly and as part of an expression; if that happens, *used_in_expr may + * end up as either true or false. That's OK for current uses of this + * function, because *used_in_expr is only used to tailor the error message + * text. */ -List * -RelationGetPartitionQual(Relation rel) +bool +has_partition_attrs(Relation rel, Bitmapset *attnums, bool *used_in_expr) { - /* Quick exit */ - if (!rel->rd_rel->relispartition) - return NIL; + PartitionKey key; + int partnatts; + List *partexprs; + ListCell *partexprs_item; + int i; - return generate_partition_qual(rel); -} + if (attnums == NULL || rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) + return false; -/* - * get_partition_qual_relid - * - * Returns an expression tree describing the passed-in relation's partition - * constraint. - */ -Expr * -get_partition_qual_relid(Oid relid) -{ - Relation rel = heap_open(relid, AccessShareLock); - Expr *result = NULL; - List *and_args; + key = RelationGetPartitionKey(rel); + partnatts = get_partition_natts(key); + partexprs = get_partition_exprs(key); - /* Do the work only if this relation is a partition. */ - if (rel->rd_rel->relispartition) + partexprs_item = list_head(partexprs); + for (i = 0; i < partnatts; i++) { - and_args = generate_partition_qual(rel); - if (list_length(and_args) > 1) - result = makeBoolExpr(AND_EXPR, and_args, -1); + AttrNumber partattno = get_partition_col_attnum(key, i); + + if (partattno != 0) + { + if (bms_is_member(partattno - FirstLowInvalidHeapAttributeNumber, + attnums)) + { + if (used_in_expr) + *used_in_expr = false; + return true; + } + } else - result = linitial(and_args); - } + { + /* Arbitrary expression */ + Node *expr = (Node *) lfirst(partexprs_item); + Bitmapset *expr_attrs = NULL; - /* Keep the lock. */ - heap_close(rel, NoLock); + /* Find all attributes referenced */ + pull_varattnos(expr, 1, &expr_attrs); + partexprs_item = lnext(partexprs_item); - return result; + if (bms_overlap(attnums, expr_attrs)) + { + if (used_in_expr) + *used_in_expr = true; + return true; + } + } + } + + return false; } /* - * Append OIDs of rel's partitions to the list 'partoids' and for each OID, - * append pointer rel to the list 'parents'. + * get_default_oid_from_partdesc + * + * Given a partition descriptor, return the OID of the default partition, if + * one exists; else, return InvalidOid. */ -#define APPEND_REL_PARTITION_OIDS(rel, partoids, parents) \ - do\ - {\ - int i;\ - for (i = 0; i < (rel)->rd_partdesc->nparts; i++)\ - {\ - (partoids) = lappend_oid((partoids), (rel)->rd_partdesc->oids[i]);\ - (parents) = lappend((parents), (rel));\ - }\ - } while(0) +Oid +get_default_oid_from_partdesc(PartitionDesc partdesc) +{ + if (partdesc && partdesc->boundinfo && + partition_bound_has_default(partdesc->boundinfo)) + return partdesc->oids[partdesc->boundinfo->default_index]; + + return InvalidOid; +} /* - * RelationGetPartitionDispatchInfo - * Returns information necessary to route tuples down a partition tree + * get_default_partition_oid * - * All the partitions will be locked with lockmode, unless it is NoLock. - * A list of the OIDs of all the leaf partitions of rel is returned in - * *leaf_part_oids. + * Given a relation OID, return the OID of the default partition, if one + * exists. Use get_default_oid_from_partdesc where possible, for + * efficiency. */ -PartitionDispatch * -RelationGetPartitionDispatchInfo(Relation rel, int lockmode, - int *num_parted, List **leaf_part_oids) +Oid +get_default_partition_oid(Oid parentId) { - PartitionDispatchData **pd; - List *all_parts = NIL, - *all_parents = NIL, - *parted_rels, - *parted_rel_parents; - ListCell *lc1, - *lc2; - int i, - k, - offset; + HeapTuple tuple; + Oid defaultPartId = InvalidOid; - /* - * Lock partitions and make a list of the partitioned ones to prepare - * their PartitionDispatch objects below. - * - * Cannot use find_all_inheritors() here, because then the order of OIDs - * in parted_rels list would be unknown, which does not help, because we - * assign indexes within individual PartitionDispatch in an order that is - * predetermined (determined by the order of OIDs in individual partition - * descriptors). - */ - *num_parted = 1; - parted_rels = list_make1(rel); - /* Root partitioned table has no parent, so NULL for parent */ - parted_rel_parents = list_make1(NULL); - APPEND_REL_PARTITION_OIDS(rel, all_parts, all_parents); - forboth(lc1, all_parts, lc2, all_parents) - { - Relation partrel = heap_open(lfirst_oid(lc1), lockmode); - Relation parent = lfirst(lc2); - PartitionDesc partdesc = RelationGetPartitionDesc(partrel); + tuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(parentId)); - /* - * If this partition is a partitioned table, add its children to the - * end of the list, so that they are processed as well. - */ - if (partdesc) - { - (*num_parted)++; - parted_rels = lappend(parted_rels, partrel); - parted_rel_parents = lappend(parted_rel_parents, parent); - APPEND_REL_PARTITION_OIDS(partrel, all_parts, all_parents); - } - else - heap_close(partrel, NoLock); + if (HeapTupleIsValid(tuple)) + { + Form_pg_partitioned_table part_table_form; - /* - * We keep the partitioned ones open until we're done using the - * information being collected here (for example, see - * ExecEndModifyTable). - */ + part_table_form = (Form_pg_partitioned_table) GETSTRUCT(tuple); + defaultPartId = part_table_form->partdefid; + ReleaseSysCache(tuple); } - /* - * We want to create two arrays - one for leaf partitions and another for - * partitioned tables (including the root table and internal partitions). - * While we only create the latter here, leaf partition array of suitable - * objects (such as, ResultRelInfo) is created by the caller using the - * list of OIDs we return. Indexes into these arrays get assigned in a - * breadth-first manner, whereby partitions of any given level are placed - * consecutively in the respective arrays. - */ - pd = (PartitionDispatchData **) palloc(*num_parted * - sizeof(PartitionDispatchData *)); - *leaf_part_oids = NIL; - i = k = offset = 0; - forboth(lc1, parted_rels, lc2, parted_rel_parents) - { - Relation partrel = lfirst(lc1); - Relation parent = lfirst(lc2); - PartitionKey partkey = RelationGetPartitionKey(partrel); - TupleDesc tupdesc = RelationGetDescr(partrel); - PartitionDesc partdesc = RelationGetPartitionDesc(partrel); - int j, - m; - - pd[i] = (PartitionDispatch) palloc(sizeof(PartitionDispatchData)); - pd[i]->reldesc = partrel; - pd[i]->key = partkey; - pd[i]->keystate = NIL; - pd[i]->partdesc = partdesc; - if (parent != NULL) - { - /* - * For every partitioned table other than root, we must store a - * tuple table slot initialized with its tuple descriptor and a - * tuple conversion map to convert a tuple from its parent's - * rowtype to its own. That is to make sure that we are looking at - * the correct row using the correct tuple descriptor when - * computing its partition key for tuple routing. - */ - pd[i]->tupslot = MakeSingleTupleTableSlot(tupdesc); - pd[i]->tupmap = convert_tuples_by_name(RelationGetDescr(parent), - tupdesc, - gettext_noop("could not convert row type")); - } - else - { - /* Not required for the root partitioned table */ - pd[i]->tupslot = NULL; - pd[i]->tupmap = NULL; - } - pd[i]->indexes = (int *) palloc(partdesc->nparts * sizeof(int)); - - /* - * Indexes corresponding to the internal partitions are multiplied by - * -1 to distinguish them from those of leaf partitions. Encountering - * an index >= 0 means we found a leaf partition, which is immediately - * returned as the partition we are looking for. A negative index - * means we found a partitioned table, whose PartitionDispatch object - * is located at the above index multiplied back by -1. Using the - * PartitionDispatch object, search is continued further down the - * partition tree. - */ - m = 0; - for (j = 0; j < partdesc->nparts; j++) - { - Oid partrelid = partdesc->oids[j]; - - if (get_rel_relkind(partrelid) != RELKIND_PARTITIONED_TABLE) - { - *leaf_part_oids = lappend_oid(*leaf_part_oids, partrelid); - pd[i]->indexes[j] = k++; - } - else - { - /* - * offset denotes the number of partitioned tables of upper - * levels including those of the current level. Any partition - * of this table must belong to the next level and hence will - * be placed after the last partitioned table of this level. - */ - pd[i]->indexes[j] = -(1 + offset + m); - m++; - } - } - i++; - - /* - * This counts the number of partitioned tables at upper levels - * including those of the current level. - */ - offset += m; - } - - return pd; -} - -/* Module-local functions */ - -/* - * get_partition_operator - * - * Return oid of the operator of given strategy for a given partition key - * column. - */ -static Oid -get_partition_operator(PartitionKey key, int col, StrategyNumber strategy, - bool *need_relabel) -{ - Oid operoid; - - /* - * First check if there exists an operator of the given strategy, with - * this column's type as both its lefttype and righttype, in the - * partitioning operator family specified for the column. - */ - operoid = get_opfamily_member(key->partopfamily[col], - key->parttypid[col], - key->parttypid[col], - strategy); - - /* - * If one doesn't exist, we must resort to using an operator in the same - * operator family but with the operator class declared input type. It is - * OK to do so, because the column's type is known to be binary-coercible - * with the operator class input type (otherwise, the operator class in - * question would not have been accepted as the partitioning operator - * class). We must however inform the caller to wrap the non-Const - * expression with a RelabelType node to denote the implicit coercion. It - * ensures that the resulting expression structurally matches similarly - * processed expressions within the optimizer. - */ - if (!OidIsValid(operoid)) - { - operoid = get_opfamily_member(key->partopfamily[col], - key->partopcintype[col], - key->partopcintype[col], - strategy); - if (!OidIsValid(operoid)) - elog(ERROR, "missing operator %d(%u,%u) in opfamily %u", - strategy, key->partopcintype[col], key->partopcintype[col], - key->partopfamily[col]); - *need_relabel = true; - } - else - *need_relabel = false; - - return operoid; -} - -/* - * make_partition_op_expr - * Returns an Expr for the given partition key column with arg1 and - * arg2 as its leftop and rightop, respectively - */ -static Expr * -make_partition_op_expr(PartitionKey key, int keynum, - uint16 strategy, Expr *arg1, Expr *arg2) -{ - Oid operoid; - bool need_relabel = false; - Expr *result = NULL; - - /* Get the correct btree operator for this partitioning column */ - operoid = get_partition_operator(key, keynum, strategy, &need_relabel); - - /* - * Chosen operator may be such that the non-Const operand needs to be - * coerced, so apply the same; see the comment in - * get_partition_operator(). - */ - if (!IsA(arg1, Const) && - (need_relabel || - key->partcollation[keynum] != key->parttypcoll[keynum])) - arg1 = (Expr *) makeRelabelType(arg1, - key->partopcintype[keynum], - -1, - key->partcollation[keynum], - COERCE_EXPLICIT_CAST); - - /* Generate the actual expression */ - switch (key->strategy) - { - case PARTITION_STRATEGY_LIST: - { - ScalarArrayOpExpr *saopexpr; - - /* Build leftop = ANY (rightop) */ - saopexpr = makeNode(ScalarArrayOpExpr); - saopexpr->opno = operoid; - saopexpr->opfuncid = get_opcode(operoid); - saopexpr->useOr = true; - saopexpr->inputcollid = key->partcollation[keynum]; - saopexpr->args = list_make2(arg1, arg2); - saopexpr->location = -1; - - result = (Expr *) saopexpr; - break; - } - - case PARTITION_STRATEGY_RANGE: - result = make_opclause(operoid, - BOOLOID, - false, - arg1, arg2, - InvalidOid, - key->partcollation[keynum]); - break; - - default: - elog(ERROR, "invalid partitioning strategy"); - break; - } - - return result; -} - -/* - * get_qual_for_list - * - * Returns an implicit-AND list of expressions to use as a list partition's - * constraint, given the partition key and bound structures. - */ -static List * -get_qual_for_list(PartitionKey key, PartitionBoundSpec *spec) -{ - List *result; - Expr *keyCol; - ArrayExpr *arr; - Expr *opexpr; - NullTest *nulltest; - ListCell *cell; - List *arrelems = NIL; - bool list_has_null = false; - - /* - * Only single-column list partitioning is supported, so we are worried - * only about the partition key with index 0. - */ - Assert(key->partnatts == 1); - - /* Construct Var or expression representing the partition column */ - if (key->partattrs[0] != 0) - keyCol = (Expr *) makeVar(1, - key->partattrs[0], - key->parttypid[0], - key->parttypmod[0], - key->parttypcoll[0], - 0); - else - keyCol = (Expr *) copyObject(linitial(key->partexprs)); - - /* Create list of Consts for the allowed values, excluding any nulls */ - foreach(cell, spec->listdatums) - { - Const *val = castNode(Const, lfirst(cell)); - - if (val->constisnull) - list_has_null = true; - else - arrelems = lappend(arrelems, copyObject(val)); - } - - if (arrelems) - { - /* Construct an ArrayExpr for the non-null partition values */ - arr = makeNode(ArrayExpr); - arr->array_typeid = !type_is_array(key->parttypid[0]) - ? get_array_type(key->parttypid[0]) - : key->parttypid[0]; - arr->array_collid = key->parttypcoll[0]; - arr->element_typeid = key->parttypid[0]; - arr->elements = arrelems; - arr->multidims = false; - arr->location = -1; - - /* Generate the main expression, i.e., keyCol = ANY (arr) */ - opexpr = make_partition_op_expr(key, 0, BTEqualStrategyNumber, - keyCol, (Expr *) arr); - } - else - { - /* If there are no partition values, we don't need an = ANY expr */ - opexpr = NULL; - } - - if (!list_has_null) - { - /* - * Gin up a "col IS NOT NULL" test that will be AND'd with the main - * expression. This might seem redundant, but the partition routing - * machinery needs it. - */ - nulltest = makeNode(NullTest); - nulltest->arg = keyCol; - nulltest->nulltesttype = IS_NOT_NULL; - nulltest->argisrow = false; - nulltest->location = -1; - - result = opexpr ? list_make2(nulltest, opexpr) : list_make1(nulltest); - } - else - { - /* - * Gin up a "col IS NULL" test that will be OR'd with the main - * expression. - */ - nulltest = makeNode(NullTest); - nulltest->arg = keyCol; - nulltest->nulltesttype = IS_NULL; - nulltest->argisrow = false; - nulltest->location = -1; - - if (opexpr) - { - Expr *or; - - or = makeBoolExpr(OR_EXPR, list_make2(nulltest, opexpr), -1); - result = list_make1(or); - } - else - result = list_make1(nulltest); - } - - return result; -} - -/* - * get_range_key_properties - * Returns range partition key information for a given column - * - * This is a subroutine for get_qual_for_range, and its API is pretty - * specialized to that caller. - * - * Constructs an Expr for the key column (returned in *keyCol) and Consts - * for the lower and upper range limits (returned in *lower_val and - * *upper_val). For MINVALUE/MAXVALUE limits, NULL is returned instead of - * a Const. All of these structures are freshly palloc'd. - * - * *partexprs_item points to the cell containing the next expression in - * the key->partexprs list, or NULL. It may be advanced upon return. - */ -static void -get_range_key_properties(PartitionKey key, int keynum, - PartitionRangeDatum *ldatum, - PartitionRangeDatum *udatum, - ListCell **partexprs_item, - Expr **keyCol, - Const **lower_val, Const **upper_val) -{ - /* Get partition key expression for this column */ - if (key->partattrs[keynum] != 0) - { - *keyCol = (Expr *) makeVar(1, - key->partattrs[keynum], - key->parttypid[keynum], - key->parttypmod[keynum], - key->parttypcoll[keynum], - 0); - } - else - { - if (*partexprs_item == NULL) - elog(ERROR, "wrong number of partition key expressions"); - *keyCol = copyObject(lfirst(*partexprs_item)); - *partexprs_item = lnext(*partexprs_item); - } - - /* Get appropriate Const nodes for the bounds */ - if (ldatum->kind == PARTITION_RANGE_DATUM_VALUE) - *lower_val = castNode(Const, copyObject(ldatum->value)); - else - *lower_val = NULL; - - if (udatum->kind == PARTITION_RANGE_DATUM_VALUE) - *upper_val = castNode(Const, copyObject(udatum->value)); - else - *upper_val = NULL; -} - -/* - * get_qual_for_range - * - * Returns an implicit-AND list of expressions to use as a range partition's - * constraint, given the partition key and bound structures. - * - * For a multi-column range partition key, say (a, b, c), with (al, bl, cl) - * as the lower bound tuple and (au, bu, cu) as the upper bound tuple, we - * generate an expression tree of the following form: - * - * (a IS NOT NULL) and (b IS NOT NULL) and (c IS NOT NULL) - * AND - * (a > al OR (a = al AND b > bl) OR (a = al AND b = bl AND c >= cl)) - * AND - * (a < au OR (a = au AND b < bu) OR (a = au AND b = bu AND c < cu)) - * - * It is often the case that a prefix of lower and upper bound tuples contains - * the same values, for example, (al = au), in which case, we will emit an - * expression tree of the following form: - * - * (a IS NOT NULL) and (b IS NOT NULL) and (c IS NOT NULL) - * AND - * (a = al) - * AND - * (b > bl OR (b = bl AND c >= cl)) - * AND - * (b < bu) OR (b = bu AND c < cu)) - * - * If a bound datum is either MINVALUE or MAXVALUE, these expressions are - * simplified using the fact that any value is greater than MINVALUE and less - * than MAXVALUE. So, for example, if cu = MAXVALUE, c < cu is automatically - * true, and we need not emit any expression for it, and the last line becomes - * - * (b < bu) OR (b = bu), which is simplified to (b <= bu) - * - * In most common cases with only one partition column, say a, the following - * expression tree will be generated: a IS NOT NULL AND a >= al AND a < au - * - * If we end up with an empty result list, we return a single-member list - * containing a constant TRUE, because callers expect a non-empty list. - */ -static List * -get_qual_for_range(PartitionKey key, PartitionBoundSpec *spec) -{ - List *result = NIL; - ListCell *cell1, - *cell2, - *partexprs_item, - *partexprs_item_saved; - int i, - j; - PartitionRangeDatum *ldatum, - *udatum; - Expr *keyCol; - Const *lower_val, - *upper_val; - NullTest *nulltest; - List *lower_or_arms, - *upper_or_arms; - int num_or_arms, - current_or_arm; - ListCell *lower_or_start_datum, - *upper_or_start_datum; - bool need_next_lower_arm, - need_next_upper_arm; - - lower_or_start_datum = list_head(spec->lowerdatums); - upper_or_start_datum = list_head(spec->upperdatums); - num_or_arms = key->partnatts; - - /* - * A range-partitioned table does not currently allow partition keys to be - * null, so emit an IS NOT NULL expression for each key column. - */ - partexprs_item = list_head(key->partexprs); - for (i = 0; i < key->partnatts; i++) - { - Expr *keyCol; - - if (key->partattrs[i] != 0) - { - keyCol = (Expr *) makeVar(1, - key->partattrs[i], - key->parttypid[i], - key->parttypmod[i], - key->parttypcoll[i], - 0); - } - else - { - if (partexprs_item == NULL) - elog(ERROR, "wrong number of partition key expressions"); - keyCol = copyObject(lfirst(partexprs_item)); - partexprs_item = lnext(partexprs_item); - } - - nulltest = makeNode(NullTest); - nulltest->arg = keyCol; - nulltest->nulltesttype = IS_NOT_NULL; - nulltest->argisrow = false; - nulltest->location = -1; - result = lappend(result, nulltest); - } - - /* - * Iterate over the key columns and check if the corresponding lower and - * upper datums are equal using the btree equality operator for the - * column's type. If equal, we emit single keyCol = common_value - * expression. Starting from the first column for which the corresponding - * lower and upper bound datums are not equal, we generate OR expressions - * as shown in the function's header comment. - */ - i = 0; - partexprs_item = list_head(key->partexprs); - partexprs_item_saved = partexprs_item; /* placate compiler */ - forboth(cell1, spec->lowerdatums, cell2, spec->upperdatums) - { - EState *estate; - MemoryContext oldcxt; - Expr *test_expr; - ExprState *test_exprstate; - Datum test_result; - bool isNull; - - ldatum = castNode(PartitionRangeDatum, lfirst(cell1)); - udatum = castNode(PartitionRangeDatum, lfirst(cell2)); - - /* - * Since get_range_key_properties() modifies partexprs_item, and we - * might need to start over from the previous expression in the later - * part of this function, save away the current value. - */ - partexprs_item_saved = partexprs_item; - - get_range_key_properties(key, i, ldatum, udatum, - &partexprs_item, - &keyCol, - &lower_val, &upper_val); - - /* - * If either value is NULL, the corresponding partition bound is - * either MINVALUE or MAXVALUE, and we treat them as unequal, because - * even if they're the same, there is no common value to equate the - * key column with. - */ - if (!lower_val || !upper_val) - break; - - /* Create the test expression */ - estate = CreateExecutorState(); - oldcxt = MemoryContextSwitchTo(estate->es_query_cxt); - test_expr = make_partition_op_expr(key, i, BTEqualStrategyNumber, - (Expr *) lower_val, - (Expr *) upper_val); - fix_opfuncids((Node *) test_expr); - test_exprstate = ExecInitExpr(test_expr, NULL); - test_result = ExecEvalExprSwitchContext(test_exprstate, - GetPerTupleExprContext(estate), - &isNull); - MemoryContextSwitchTo(oldcxt); - FreeExecutorState(estate); - - /* If not equal, go generate the OR expressions */ - if (!DatumGetBool(test_result)) - break; - - /* - * The bounds for the last key column can't be equal, because such a - * range partition would never be allowed to be defined (it would have - * an empty range otherwise). - */ - if (i == key->partnatts - 1) - elog(ERROR, "invalid range bound specification"); - - /* Equal, so generate keyCol = lower_val expression */ - result = lappend(result, - make_partition_op_expr(key, i, BTEqualStrategyNumber, - keyCol, (Expr *) lower_val)); - - i++; - } - - /* First pair of lower_val and upper_val that are not equal. */ - lower_or_start_datum = cell1; - upper_or_start_datum = cell2; - - /* OR will have as many arms as there are key columns left. */ - num_or_arms = key->partnatts - i; - current_or_arm = 0; - lower_or_arms = upper_or_arms = NIL; - need_next_lower_arm = need_next_upper_arm = true; - while (current_or_arm < num_or_arms) - { - List *lower_or_arm_args = NIL, - *upper_or_arm_args = NIL; - - /* Restart scan of columns from the i'th one */ - j = i; - partexprs_item = partexprs_item_saved; - - for_both_cell(cell1, lower_or_start_datum, cell2, upper_or_start_datum) - { - PartitionRangeDatum *ldatum_next = NULL, - *udatum_next = NULL; - - ldatum = castNode(PartitionRangeDatum, lfirst(cell1)); - if (lnext(cell1)) - ldatum_next = castNode(PartitionRangeDatum, - lfirst(lnext(cell1))); - udatum = castNode(PartitionRangeDatum, lfirst(cell2)); - if (lnext(cell2)) - udatum_next = castNode(PartitionRangeDatum, - lfirst(lnext(cell2))); - get_range_key_properties(key, j, ldatum, udatum, - &partexprs_item, - &keyCol, - &lower_val, &upper_val); - - if (need_next_lower_arm && lower_val) - { - uint16 strategy; - - /* - * For the non-last columns of this arm, use the EQ operator. - * For the last column of this arm, use GT, unless this is the - * last column of the whole bound check, or the next bound - * datum is MINVALUE, in which case use GE. - */ - if (j - i < current_or_arm) - strategy = BTEqualStrategyNumber; - else if (j == key->partnatts - 1 || - (ldatum_next && - ldatum_next->kind == PARTITION_RANGE_DATUM_MINVALUE)) - strategy = BTGreaterEqualStrategyNumber; - else - strategy = BTGreaterStrategyNumber; - - lower_or_arm_args = lappend(lower_or_arm_args, - make_partition_op_expr(key, j, - strategy, - keyCol, - (Expr *) lower_val)); - } - - if (need_next_upper_arm && upper_val) - { - uint16 strategy; - - /* - * For the non-last columns of this arm, use the EQ operator. - * For the last column of this arm, use LT, unless the next - * bound datum is MAXVALUE, in which case use LE. - */ - if (j - i < current_or_arm) - strategy = BTEqualStrategyNumber; - else if (udatum_next && - udatum_next->kind == PARTITION_RANGE_DATUM_MAXVALUE) - strategy = BTLessEqualStrategyNumber; - else - strategy = BTLessStrategyNumber; - - upper_or_arm_args = lappend(upper_or_arm_args, - make_partition_op_expr(key, j, - strategy, - keyCol, - (Expr *) upper_val)); - } - - /* - * Did we generate enough of OR's arguments? First arm considers - * the first of the remaining columns, second arm considers first - * two of the remaining columns, and so on. - */ - ++j; - if (j - i > current_or_arm) - { - /* - * We must not emit any more arms if the new column that will - * be considered is unbounded, or this one was. - */ - if (!lower_val || !ldatum_next || - ldatum_next->kind != PARTITION_RANGE_DATUM_VALUE) - need_next_lower_arm = false; - if (!upper_val || !udatum_next || - udatum_next->kind != PARTITION_RANGE_DATUM_VALUE) - need_next_upper_arm = false; - break; - } - } - - if (lower_or_arm_args != NIL) - lower_or_arms = lappend(lower_or_arms, - list_length(lower_or_arm_args) > 1 - ? makeBoolExpr(AND_EXPR, lower_or_arm_args, -1) - : linitial(lower_or_arm_args)); - - if (upper_or_arm_args != NIL) - upper_or_arms = lappend(upper_or_arms, - list_length(upper_or_arm_args) > 1 - ? makeBoolExpr(AND_EXPR, upper_or_arm_args, -1) - : linitial(upper_or_arm_args)); - - /* If no work to do in the next iteration, break away. */ - if (!need_next_lower_arm && !need_next_upper_arm) - break; - - ++current_or_arm; - } - - /* - * Generate the OR expressions for each of lower and upper bounds (if - * required), and append to the list of implicitly ANDed list of - * expressions. - */ - if (lower_or_arms != NIL) - result = lappend(result, - list_length(lower_or_arms) > 1 - ? makeBoolExpr(OR_EXPR, lower_or_arms, -1) - : linitial(lower_or_arms)); - if (upper_or_arms != NIL) - result = lappend(result, - list_length(upper_or_arms) > 1 - ? makeBoolExpr(OR_EXPR, upper_or_arms, -1) - : linitial(upper_or_arms)); - - /* As noted above, caller expects the list to be non-empty. */ - if (result == NIL) - result = list_make1(makeBoolConst(true, false)); - - return result; -} + return defaultPartId; +} /* - * generate_partition_qual - * - * Generate partition predicate from rel's partition bound expression + * update_default_partition_oid * - * Result expression tree is stored CacheMemoryContext to ensure it survives - * as long as the relcache entry. But we should be running in a less long-lived - * working context. To avoid leaking cache memory if this routine fails partway - * through, we build in working memory and then copy the completed structure - * into cache memory. + * Update pg_partition_table.partdefid with a new default partition OID. */ -static List * -generate_partition_qual(Relation rel) +void +update_default_partition_oid(Oid parentId, Oid defaultPartId) { HeapTuple tuple; - MemoryContext oldcxt; - Datum boundDatum; - bool isnull; - PartitionBoundSpec *bound; - List *my_qual = NIL, - *result = NIL; - Relation parent; - bool found_whole_row; + Relation pg_partitioned_table; + Form_pg_partitioned_table part_table_form; - /* Guard against stack overflow due to overly deep partition tree */ - check_stack_depth(); + pg_partitioned_table = heap_open(PartitionedRelationId, RowExclusiveLock); - /* Quick copy */ - if (rel->rd_partcheck != NIL) - return copyObject(rel->rd_partcheck); + tuple = SearchSysCacheCopy1(PARTRELID, ObjectIdGetDatum(parentId)); - /* Grab at least an AccessShareLock on the parent table */ - parent = heap_open(get_partition_parent(RelationGetRelid(rel)), - AccessShareLock); - - /* Get pg_class.relpartbound */ - tuple = SearchSysCache1(RELOID, RelationGetRelid(rel)); if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for relation %u", - RelationGetRelid(rel)); - - boundDatum = SysCacheGetAttr(RELOID, tuple, - Anum_pg_class_relpartbound, - &isnull); - if (isnull) /* should not happen */ - elog(ERROR, "relation \"%s\" has relpartbound = null", - RelationGetRelationName(rel)); - bound = castNode(PartitionBoundSpec, - stringToNode(TextDatumGetCString(boundDatum))); - ReleaseSysCache(tuple); - - my_qual = get_qual_from_partbound(rel, parent, bound); - - /* Add the parent's quals to the list (if any) */ - if (parent->rd_rel->relispartition) - result = list_concat(generate_partition_qual(parent), my_qual); - else - result = my_qual; - - /* - * Change Vars to have partition's attnos instead of the parent's. We do - * this after we concatenate the parent's quals, because we want every Var - * in it to bear this relation's attnos. It's safe to assume varno = 1 - * here. - */ - result = map_partition_varattnos(result, 1, rel, parent, - &found_whole_row); - /* There can never be a whole-row reference here */ - if (found_whole_row) - elog(ERROR, "unexpected whole-row reference found in partition key"); - - /* Save a copy in the relcache */ - oldcxt = MemoryContextSwitchTo(CacheMemoryContext); - rel->rd_partcheck = copyObject(result); - MemoryContextSwitchTo(oldcxt); - - /* Keep the parent locked until commit */ - heap_close(parent, NoLock); - - return result; -} - -/* ---------------- - * FormPartitionKeyDatum - * Construct values[] and isnull[] arrays for the partition key - * of a tuple. - * - * pd Partition dispatch object of the partitioned table - * slot Heap tuple from which to extract partition key - * estate executor state for evaluating any partition key - * expressions (must be non-NULL) - * values Array of partition key Datums (output area) - * isnull Array of is-null indicators (output area) - * - * the ecxt_scantuple slot of estate's per-tuple expr context must point to - * the heap tuple passed in. - * ---------------- - */ -void -FormPartitionKeyDatum(PartitionDispatch pd, - TupleTableSlot *slot, - EState *estate, - Datum *values, - bool *isnull) -{ - ListCell *partexpr_item; - int i; - - if (pd->key->partexprs != NIL && pd->keystate == NIL) - { - /* Check caller has set up context correctly */ - Assert(estate != NULL && - GetPerTupleExprContext(estate)->ecxt_scantuple == slot); - - /* First time through, set up expression evaluation state */ - pd->keystate = ExecPrepareExprList(pd->key->partexprs, estate); - } - - partexpr_item = list_head(pd->keystate); - for (i = 0; i < pd->key->partnatts; i++) - { - AttrNumber keycol = pd->key->partattrs[i]; - Datum datum; - bool isNull; - - if (keycol != 0) - { - /* Plain column; get the value directly from the heap tuple */ - datum = slot_getattr(slot, keycol, &isNull); - } - else - { - /* Expression; need to evaluate it */ - if (partexpr_item == NULL) - elog(ERROR, "wrong number of partition key expressions"); - datum = ExecEvalExprSwitchContext((ExprState *) lfirst(partexpr_item), - GetPerTupleExprContext(estate), - &isNull); - partexpr_item = lnext(partexpr_item); - } - values[i] = datum; - isnull[i] = isNull; - } - - if (partexpr_item != NULL) - elog(ERROR, "wrong number of partition key expressions"); -} - -/* - * get_partition_for_tuple - * Finds a leaf partition for tuple contained in *slot - * - * Returned value is the sequence number of the leaf partition thus found, - * or -1 if no leaf partition is found for the tuple. *failed_at is set - * to the OID of the partitioned table whose partition was not found in - * the latter case. - */ -int -get_partition_for_tuple(PartitionDispatch *pd, - TupleTableSlot *slot, - EState *estate, - PartitionDispatchData **failed_at, - TupleTableSlot **failed_slot) -{ - PartitionDispatch parent; - Datum values[PARTITION_MAX_KEYS]; - bool isnull[PARTITION_MAX_KEYS]; - int cur_offset, - cur_index; - int i, - result; - ExprContext *ecxt = GetPerTupleExprContext(estate); - TupleTableSlot *ecxt_scantuple_old = ecxt->ecxt_scantuple; - - /* start with the root partitioned table */ - parent = pd[0]; - while (true) - { - PartitionKey key = parent->key; - PartitionDesc partdesc = parent->partdesc; - TupleTableSlot *myslot = parent->tupslot; - TupleConversionMap *map = parent->tupmap; - - if (myslot != NULL && map != NULL) - { - HeapTuple tuple = ExecFetchSlotTuple(slot); - - ExecClearTuple(myslot); - tuple = do_convert_tuple(tuple, map); - ExecStoreTuple(tuple, myslot, InvalidBuffer, true); - slot = myslot; - } - - /* Quick exit */ - if (partdesc->nparts == 0) - { - *failed_at = parent; - *failed_slot = slot; - result = -1; - goto error_exit; - } - - /* - * Extract partition key from tuple. Expression evaluation machinery - * that FormPartitionKeyDatum() invokes expects ecxt_scantuple to - * point to the correct tuple slot. The slot might have changed from - * what was used for the parent table if the table of the current - * partitioning level has different tuple descriptor from the parent. - * So update ecxt_scantuple accordingly. - */ - ecxt->ecxt_scantuple = slot; - FormPartitionKeyDatum(parent, slot, estate, values, isnull); - - if (key->strategy == PARTITION_STRATEGY_RANGE) - { - /* - * Since we cannot route tuples with NULL partition keys through a - * range-partitioned table, simply return that no partition exists - */ - for (i = 0; i < key->partnatts; i++) - { - if (isnull[i]) - { - *failed_at = parent; - *failed_slot = slot; - result = -1; - goto error_exit; - } - } - } - - /* - * A null partition key is only acceptable if null-accepting list - * partition exists. - */ - cur_index = -1; - if (isnull[0] && partition_bound_accepts_nulls(partdesc->boundinfo)) - cur_index = partdesc->boundinfo->null_index; - else if (!isnull[0]) - { - /* Else bsearch in partdesc->boundinfo */ - bool equal = false; - - cur_offset = partition_bound_bsearch(key, partdesc->boundinfo, - values, false, &equal); - switch (key->strategy) - { - case PARTITION_STRATEGY_LIST: - if (cur_offset >= 0 && equal) - cur_index = partdesc->boundinfo->indexes[cur_offset]; - else - cur_index = -1; - break; - - case PARTITION_STRATEGY_RANGE: - - /* - * Offset returned is such that the bound at offset is - * found to be less or equal with the tuple. So, the bound - * at offset+1 would be the upper bound. - */ - cur_index = partdesc->boundinfo->indexes[cur_offset + 1]; - break; - - default: - elog(ERROR, "unexpected partition strategy: %d", - (int) key->strategy); - } - } - - /* - * cur_index < 0 means we failed to find a partition of this parent. - * cur_index >= 0 means we either found the leaf partition, or the - * next parent to find a partition of. - */ - if (cur_index < 0) - { - result = -1; - *failed_at = parent; - *failed_slot = slot; - break; - } - else if (parent->indexes[cur_index] >= 0) - { - result = parent->indexes[cur_index]; - break; - } - else - parent = pd[-parent->indexes[cur_index]]; - } + elog(ERROR, "cache lookup failed for partition key of relation %u", + parentId); -error_exit: - ecxt->ecxt_scantuple = ecxt_scantuple_old; - return result; -} + part_table_form = (Form_pg_partitioned_table) GETSTRUCT(tuple); + part_table_form->partdefid = defaultPartId; + CatalogTupleUpdate(pg_partitioned_table, &tuple->t_self, tuple); -/* - * qsort_partition_list_value_cmp - * - * Compare two list partition bound datums - */ -static int32 -qsort_partition_list_value_cmp(const void *a, const void *b, void *arg) -{ - Datum val1 = (*(const PartitionListValue **) a)->value, - val2 = (*(const PartitionListValue **) b)->value; - PartitionKey key = (PartitionKey) arg; - - return DatumGetInt32(FunctionCall2Coll(&key->partsupfunc[0], - key->partcollation[0], - val1, val2)); + heap_freetuple(tuple); + heap_close(pg_partitioned_table, RowExclusiveLock); } /* - * make_one_range_bound + * get_proposed_default_constraint * - * Return a PartitionRangeBound given a list of PartitionRangeDatum elements - * and a flag telling whether the bound is lower or not. Made into a function - * because there are multiple sites that want to use this facility. + * This function returns the negation of new_part_constraints, which + * would be an integral part of the default partition constraints after + * addition of the partition to which the new_part_constraints belongs. */ -static PartitionRangeBound * -make_one_range_bound(PartitionKey key, int index, List *datums, bool lower) -{ - PartitionRangeBound *bound; - ListCell *lc; - int i; - - bound = (PartitionRangeBound *) palloc0(sizeof(PartitionRangeBound)); - bound->index = index; - bound->datums = (Datum *) palloc0(key->partnatts * sizeof(Datum)); - bound->kind = (PartitionRangeDatumKind *) palloc0(key->partnatts * - sizeof(PartitionRangeDatumKind)); - bound->lower = lower; - - i = 0; - foreach(lc, datums) - { - PartitionRangeDatum *datum = castNode(PartitionRangeDatum, lfirst(lc)); - - /* What's contained in this range datum? */ - bound->kind[i] = datum->kind; - - if (datum->kind == PARTITION_RANGE_DATUM_VALUE) - { - Const *val = castNode(Const, datum->value); - - if (val->constisnull) - elog(ERROR, "invalid range bound datum"); - bound->datums[i] = val->constvalue; - } - - i++; - } - - return bound; -} - -/* Used when sorting range bounds across all range partitions */ -static int32 -qsort_partition_rbound_cmp(const void *a, const void *b, void *arg) -{ - PartitionRangeBound *b1 = (*(PartitionRangeBound *const *) a); - PartitionRangeBound *b2 = (*(PartitionRangeBound *const *) b); - PartitionKey key = (PartitionKey) arg; - - return partition_rbound_cmp(key, b1->datums, b1->kind, b1->lower, b2); -} - -/* - * partition_rbound_cmp - * - * Return for two range bounds whether the 1st one (specified in datum1, - * kind1, and lower1) is <, =, or > the bound specified in *b2. - * - * Note that if the values of the two range bounds compare equal, then we take - * into account whether they are upper or lower bounds, and an upper bound is - * considered to be smaller than a lower bound. This is important to the way - * that RelationBuildPartitionDesc() builds the PartitionBoundInfoData - * structure, which only stores the upper bound of a common boundary between - * two contiguous partitions. - */ -static int32 -partition_rbound_cmp(PartitionKey key, - Datum *datums1, PartitionRangeDatumKind *kind1, - bool lower1, PartitionRangeBound *b2) +List * +get_proposed_default_constraint(List *new_part_constraints) { - int32 cmpval = 0; /* placate compiler */ - int i; - Datum *datums2 = b2->datums; - PartitionRangeDatumKind *kind2 = b2->kind; - bool lower2 = b2->lower; - - for (i = 0; i < key->partnatts; i++) - { - /* - * First, handle cases where the column is unbounded, which should not - * invoke the comparison procedure, and should not consider any later - * columns. Note that the PartitionRangeDatumKind enum elements - * compare the same way as the values they represent. - */ - if (kind1[i] < kind2[i]) - return -1; - else if (kind1[i] > kind2[i]) - return 1; - else if (kind1[i] != PARTITION_RANGE_DATUM_VALUE) - - /* - * The column bounds are both MINVALUE or both MAXVALUE. No later - * columns should be considered, but we still need to compare - * whether they are upper or lower bounds. - */ - break; + Expr *defPartConstraint; - cmpval = DatumGetInt32(FunctionCall2Coll(&key->partsupfunc[i], - key->partcollation[i], - datums1[i], - datums2[i])); - if (cmpval != 0) - break; - } + defPartConstraint = make_ands_explicit(new_part_constraints); /* - * If the comparison is anything other than equal, we're done. If they - * compare equal though, we still have to consider whether the boundaries - * are inclusive or exclusive. Exclusive one is considered smaller of the - * two. + * Derive the partition constraints of default partition by negating the + * given partition constraints. The partition constraint never evaluates + * to NULL, so negating it like this is safe. */ - if (cmpval == 0 && lower1 != lower2) - cmpval = lower1 ? 1 : -1; - - return cmpval; -} + defPartConstraint = makeBoolExpr(NOT_EXPR, + list_make1(defPartConstraint), + -1); -/* - * partition_rbound_datum_cmp - * - * Return whether range bound (specified in rb_datums, rb_kind, and rb_lower) - * is <, =, or > partition key of tuple (tuple_datums) - */ -static int32 -partition_rbound_datum_cmp(PartitionKey key, - Datum *rb_datums, PartitionRangeDatumKind *rb_kind, - Datum *tuple_datums) -{ - int i; - int32 cmpval = -1; - - for (i = 0; i < key->partnatts; i++) - { - if (rb_kind[i] == PARTITION_RANGE_DATUM_MINVALUE) - return -1; - else if (rb_kind[i] == PARTITION_RANGE_DATUM_MAXVALUE) - return 1; - - cmpval = DatumGetInt32(FunctionCall2Coll(&key->partsupfunc[i], - key->partcollation[i], - rb_datums[i], - tuple_datums[i])); - if (cmpval != 0) - break; - } - - return cmpval; -} - -/* - * partition_bound_cmp - * - * Return whether the bound at offset in boundinfo is <, =, or > the argument - * specified in *probe. - */ -static int32 -partition_bound_cmp(PartitionKey key, PartitionBoundInfo boundinfo, - int offset, void *probe, bool probe_is_bound) -{ - Datum *bound_datums = boundinfo->datums[offset]; - int32 cmpval = -1; - - switch (key->strategy) - { - case PARTITION_STRATEGY_LIST: - cmpval = DatumGetInt32(FunctionCall2Coll(&key->partsupfunc[0], - key->partcollation[0], - bound_datums[0], - *(Datum *) probe)); - break; - - case PARTITION_STRATEGY_RANGE: - { - PartitionRangeDatumKind *kind = boundinfo->kind[offset]; - - if (probe_is_bound) - { - /* - * We need to pass whether the existing bound is a lower - * bound, so that two equal-valued lower and upper bounds - * are not regarded equal. - */ - bool lower = boundinfo->indexes[offset] < 0; - - cmpval = partition_rbound_cmp(key, - bound_datums, kind, lower, - (PartitionRangeBound *) probe); - } - else - cmpval = partition_rbound_datum_cmp(key, - bound_datums, kind, - (Datum *) probe); - break; - } - - default: - elog(ERROR, "unexpected partition strategy: %d", - (int) key->strategy); - } - - return cmpval; -} - -/* - * Binary search on a collection of partition bounds. Returns greatest - * bound in array boundinfo->datums which is less than or equal to *probe. - * If all bounds in the array are greater than *probe, -1 is returned. - * - * *probe could either be a partition bound or a Datum array representing - * the partition key of a tuple being routed; probe_is_bound tells which. - * We pass that down to the comparison function so that it can interpret the - * contents of *probe accordingly. - * - * *is_equal is set to whether the bound at the returned index is equal with - * *probe. - */ -static int -partition_bound_bsearch(PartitionKey key, PartitionBoundInfo boundinfo, - void *probe, bool probe_is_bound, bool *is_equal) -{ - int lo, - hi, - mid; - - lo = -1; - hi = boundinfo->ndatums - 1; - while (lo < hi) - { - int32 cmpval; - - mid = (lo + hi + 1) / 2; - cmpval = partition_bound_cmp(key, boundinfo, mid, probe, - probe_is_bound); - if (cmpval <= 0) - { - lo = mid; - *is_equal = (cmpval == 0); - - if (*is_equal) - break; - } - else - hi = mid - 1; - } + /* Simplify, to put the negated expression into canonical form */ + defPartConstraint = + (Expr *) eval_const_expressions(NULL, + (Node *) defPartConstraint); + defPartConstraint = canonicalize_qual(defPartConstraint, true); - return lo; + return make_ands_implicit(defPartConstraint); } diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c index a9204503d3..246776093e 100644 --- a/src/backend/catalog/pg_aggregate.c +++ b/src/backend/catalog/pg_aggregate.c @@ -3,7 +3,7 @@ * pg_aggregate.c * routines to support manipulation of the pg_aggregate relation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -22,7 +22,6 @@ #include "catalog/pg_language.h" #include "catalog/pg_operator.h" #include "catalog/pg_proc.h" -#include "catalog/pg_proc_fn.h" #include "catalog/pg_type.h" #include "miscadmin.h" #include "parser/parse_coerce.h" @@ -65,6 +64,8 @@ AggregateCreate(const char *aggName, List *aggmfinalfnName, bool finalfnExtraArgs, bool mfinalfnExtraArgs, + char finalfnModify, + char mfinalfnModify, List *aggsortopName, Oid aggTransType, int32 aggTransSpace, @@ -409,16 +410,17 @@ AggregateCreate(const char *aggName, Oid combineType; /* - * Combine function must have 2 argument, each of which is the trans - * type + * Combine function must have 2 arguments, each of which is the trans + * type. VARIADIC doesn't affect it. */ fnArgs[0] = aggTransType; fnArgs[1] = aggTransType; - combinefn = lookup_agg_function(aggcombinefnName, 2, fnArgs, - variadicArgType, &combineType); + combinefn = lookup_agg_function(aggcombinefnName, 2, + fnArgs, InvalidOid, + &combineType); - /* Ensure the return type matches the aggregates trans type */ + /* Ensure the return type matches the aggregate's trans type */ if (combineType != aggTransType) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), @@ -428,14 +430,14 @@ AggregateCreate(const char *aggName, /* * A combine function to combine INTERNAL states must accept nulls and - * ensure that the returned state is in the correct memory context. + * ensure that the returned state is in the correct memory context. We + * cannot directly check the latter, but we can check the former. */ if (aggTransType == INTERNALOID && func_strict(combinefn)) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("combine function with transition type %s must not be declared STRICT", format_type_be(aggTransType)))); - } /* @@ -443,10 +445,11 @@ AggregateCreate(const char *aggName, */ if (aggserialfnName) { + /* signature is always serialize(internal) returns bytea */ fnArgs[0] = INTERNALOID; serialfn = lookup_agg_function(aggserialfnName, 1, - fnArgs, variadicArgType, + fnArgs, InvalidOid, &rettype); if (rettype != BYTEAOID) @@ -462,11 +465,12 @@ AggregateCreate(const char *aggName, */ if (aggdeserialfnName) { + /* signature is always deserialize(bytea, internal) returns internal */ fnArgs[0] = BYTEAOID; fnArgs[1] = INTERNALOID; /* dummy argument for type safety */ deserialfn = lookup_agg_function(aggdeserialfnName, 2, - fnArgs, variadicArgType, + fnArgs, InvalidOid, &rettype); if (rettype != INTERNALOID) @@ -613,8 +617,7 @@ AggregateCreate(const char *aggName, InvalidOid, /* no validator */ "aggregate_dummy", /* placeholder proc */ NULL, /* probin */ - true, /* isAgg */ - false, /* isWindowFunc */ + PROKIND_AGGREGATE, false, /* security invoker (currently not * definable for agg) */ false, /* isLeakProof */ @@ -656,6 +659,8 @@ AggregateCreate(const char *aggName, values[Anum_pg_aggregate_aggmfinalfn - 1] = ObjectIdGetDatum(mfinalfn); values[Anum_pg_aggregate_aggfinalextra - 1] = BoolGetDatum(finalfnExtraArgs); values[Anum_pg_aggregate_aggmfinalextra - 1] = BoolGetDatum(mfinalfnExtraArgs); + values[Anum_pg_aggregate_aggfinalmodify - 1] = CharGetDatum(finalfnModify); + values[Anum_pg_aggregate_aggmfinalmodify - 1] = CharGetDatum(mfinalfnModify); values[Anum_pg_aggregate_aggsortop - 1] = ObjectIdGetDatum(sortop); values[Anum_pg_aggregate_aggtranstype - 1] = ObjectIdGetDatum(aggTransType); values[Anum_pg_aggregate_aggtransspace - 1] = Int32GetDatum(aggTransSpace); @@ -768,7 +773,11 @@ AggregateCreate(const char *aggName, /* * lookup_agg_function - * common code for finding transfn, invtransfn, finalfn, and combinefn + * common code for finding aggregate support functions + * + * fnName: possibly-schema-qualified function name + * nargs, input_types: expected function argument types + * variadicArgType: type of variadic argument if any, else InvalidOid * * Returns OID of function, and stores its return type into *rettype * @@ -860,7 +869,7 @@ lookup_agg_function(List *fnName, /* Check aggregate creator has permission to call the function */ aclresult = pg_proc_aclcheck(fnOid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, get_func_name(fnOid)); + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(fnOid)); return fnOid; } diff --git a/src/backend/catalog/pg_collation.c b/src/backend/catalog/pg_collation.c index ca62896ecb..ce7e5fb5cc 100644 --- a/src/backend/catalog/pg_collation.c +++ b/src/backend/catalog/pg_collation.c @@ -3,7 +3,7 @@ * pg_collation.c * routines to support manipulation of the pg_collation relation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -22,7 +22,6 @@ #include "catalog/indexing.h" #include "catalog/objectaccess.h" #include "catalog/pg_collation.h" -#include "catalog/pg_collation_fn.h" #include "catalog/pg_namespace.h" #include "mb/pg_wchar.h" #include "utils/builtins.h" diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c index 1336c46d3f..1c235b4b29 100644 --- a/src/backend/catalog/pg_constraint.c +++ b/src/backend/catalog/pg_constraint.c @@ -3,7 +3,7 @@ * pg_constraint.c * routines to support manipulation of the pg_constraint relation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -18,14 +18,17 @@ #include "access/heapam.h" #include "access/htup_details.h" #include "access/sysattr.h" +#include "access/tupconvert.h" +#include "access/xact.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/objectaccess.h" +#include "catalog/partition.h" #include "catalog/pg_constraint.h" -#include "catalog/pg_constraint_fn.h" #include "catalog/pg_operator.h" #include "catalog/pg_type.h" #include "commands/defrem.h" +#include "commands/tablecmds.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/fmgroids.h" @@ -35,6 +38,10 @@ #include "utils/tqual.h" +static void clone_fk_constraints(Relation pg_constraint, Relation parentRel, + Relation partRel, List *clone, List **cloned); + + /* * CreateConstraintEntry * Create a constraint table entry. @@ -52,9 +59,11 @@ CreateConstraintEntry(const char *constraintName, bool isDeferrable, bool isDeferred, bool isValidated, + Oid parentConstrId, Oid relId, const int16 *constraintKey, int constraintNKeys, + int constraintNTotalKeys, Oid domainId, Oid indexRelId, Oid foreignRelId, @@ -69,7 +78,6 @@ CreateConstraintEntry(const char *constraintName, const Oid *exclOp, Node *conExpr, const char *conBin, - const char *conSrc, bool conIsLocal, int conInhCount, bool conNoInherit, @@ -170,6 +178,7 @@ CreateConstraintEntry(const char *constraintName, values[Anum_pg_constraint_conrelid - 1] = ObjectIdGetDatum(relId); values[Anum_pg_constraint_contypid - 1] = ObjectIdGetDatum(domainId); values[Anum_pg_constraint_conindid - 1] = ObjectIdGetDatum(indexRelId); + values[Anum_pg_constraint_conparentid - 1] = ObjectIdGetDatum(parentConstrId); values[Anum_pg_constraint_confrelid - 1] = ObjectIdGetDatum(foreignRelId); values[Anum_pg_constraint_confupdtype - 1] = CharGetDatum(foreignUpdateType); values[Anum_pg_constraint_confdeltype - 1] = CharGetDatum(foreignDeleteType); @@ -208,22 +217,11 @@ CreateConstraintEntry(const char *constraintName, else nulls[Anum_pg_constraint_conexclop - 1] = true; - /* - * initialize the binary form of the check constraint. - */ if (conBin) values[Anum_pg_constraint_conbin - 1] = CStringGetTextDatum(conBin); else nulls[Anum_pg_constraint_conbin - 1] = true; - /* - * initialize the text form of the check constraint - */ - if (conSrc) - values[Anum_pg_constraint_consrc - 1] = CStringGetTextDatum(conSrc); - else - nulls[Anum_pg_constraint_consrc - 1] = true; - tup = heap_form_tuple(RelationGetDescr(conDesc), values, nulls); conOid = CatalogTupleInsert(conDesc, tup); @@ -244,9 +242,9 @@ CreateConstraintEntry(const char *constraintName, relobject.classId = RelationRelationId; relobject.objectId = relId; - if (constraintNKeys > 0) + if (constraintNTotalKeys > 0) { - for (i = 0; i < constraintNKeys; i++) + for (i = 0; i < constraintNTotalKeys; i++) { relobject.objectSubId = constraintKey[i]; @@ -375,6 +373,390 @@ CreateConstraintEntry(const char *constraintName, return conOid; } +/* + * CloneForeignKeyConstraints + * Clone foreign keys from a partitioned table to a newly acquired + * partition. + * + * relationId is a partition of parentId, so we can be certain that it has the + * same columns with the same datatypes. The columns may be in different + * order, though. + * + * The *cloned list is appended ClonedConstraint elements describing what was + * created. + */ +void +CloneForeignKeyConstraints(Oid parentId, Oid relationId, List **cloned) +{ + Relation pg_constraint; + Relation parentRel; + Relation rel; + ScanKeyData key; + SysScanDesc scan; + HeapTuple tuple; + List *clone = NIL; + + parentRel = heap_open(parentId, NoLock); /* already got lock */ + /* see ATAddForeignKeyConstraint about lock level */ + rel = heap_open(relationId, AccessExclusiveLock); + pg_constraint = heap_open(ConstraintRelationId, RowShareLock); + + /* Obtain the list of constraints to clone or attach */ + ScanKeyInit(&key, + Anum_pg_constraint_conrelid, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(parentId)); + scan = systable_beginscan(pg_constraint, ConstraintRelidTypidNameIndexId, true, + NULL, 1, &key); + while ((tuple = systable_getnext(scan)) != NULL) + clone = lappend_oid(clone, HeapTupleGetOid(tuple)); + systable_endscan(scan); + + /* Do the actual work, recursing to partitions as needed */ + clone_fk_constraints(pg_constraint, parentRel, rel, clone, cloned); + + /* We're done. Clean up */ + heap_close(parentRel, NoLock); + heap_close(rel, NoLock); /* keep lock till commit */ + heap_close(pg_constraint, RowShareLock); +} + +/* + * clone_fk_constraints + * Recursive subroutine for CloneForeignKeyConstraints + * + * Clone the given list of FK constraints when a partition is attached. + * + * When cloning foreign keys to a partition, it may happen that equivalent + * constraints already exist in the partition for some of them. We can skip + * creating a clone in that case, and instead just attach the existing + * constraint to the one in the parent. + * + * This function recurses to partitions, if the new partition is partitioned; + * of course, only do this for FKs that were actually cloned. + */ +static void +clone_fk_constraints(Relation pg_constraint, Relation parentRel, + Relation partRel, List *clone, List **cloned) +{ + TupleDesc tupdesc; + AttrNumber *attmap; + List *partFKs; + List *subclone = NIL; + ListCell *cell; + + tupdesc = RelationGetDescr(pg_constraint); + + /* + * The constraint key may differ, if the columns in the partition are + * different. This map is used to convert them. + */ + attmap = convert_tuples_by_name_map(RelationGetDescr(partRel), + RelationGetDescr(parentRel), + gettext_noop("could not convert row type")); + + partFKs = copyObject(RelationGetFKeyList(partRel)); + + foreach(cell, clone) + { + Oid parentConstrOid = lfirst_oid(cell); + Form_pg_constraint constrForm; + HeapTuple tuple; + AttrNumber conkey[INDEX_MAX_KEYS]; + AttrNumber mapped_conkey[INDEX_MAX_KEYS]; + AttrNumber confkey[INDEX_MAX_KEYS]; + Oid conpfeqop[INDEX_MAX_KEYS]; + Oid conppeqop[INDEX_MAX_KEYS]; + Oid conffeqop[INDEX_MAX_KEYS]; + Constraint *fkconstraint; + bool attach_it; + Oid constrOid; + ObjectAddress parentAddr, + childAddr; + int nelem; + ListCell *cell; + int i; + ArrayType *arr; + Datum datum; + bool isnull; + + tuple = SearchSysCache1(CONSTROID, parentConstrOid); + if (!tuple) + elog(ERROR, "cache lookup failed for constraint %u", + parentConstrOid); + constrForm = (Form_pg_constraint) GETSTRUCT(tuple); + + /* only foreign keys */ + if (constrForm->contype != CONSTRAINT_FOREIGN) + { + ReleaseSysCache(tuple); + continue; + } + + ObjectAddressSet(parentAddr, ConstraintRelationId, parentConstrOid); + + datum = fastgetattr(tuple, Anum_pg_constraint_conkey, + tupdesc, &isnull); + if (isnull) + elog(ERROR, "null conkey"); + arr = DatumGetArrayTypeP(datum); + nelem = ARR_DIMS(arr)[0]; + if (ARR_NDIM(arr) != 1 || + nelem < 1 || + nelem > INDEX_MAX_KEYS || + ARR_HASNULL(arr) || + ARR_ELEMTYPE(arr) != INT2OID) + elog(ERROR, "conkey is not a 1-D smallint array"); + memcpy(conkey, ARR_DATA_PTR(arr), nelem * sizeof(AttrNumber)); + + for (i = 0; i < nelem; i++) + mapped_conkey[i] = attmap[conkey[i] - 1]; + + datum = fastgetattr(tuple, Anum_pg_constraint_confkey, + tupdesc, &isnull); + if (isnull) + elog(ERROR, "null confkey"); + arr = DatumGetArrayTypeP(datum); + nelem = ARR_DIMS(arr)[0]; + if (ARR_NDIM(arr) != 1 || + nelem < 1 || + nelem > INDEX_MAX_KEYS || + ARR_HASNULL(arr) || + ARR_ELEMTYPE(arr) != INT2OID) + elog(ERROR, "confkey is not a 1-D smallint array"); + memcpy(confkey, ARR_DATA_PTR(arr), nelem * sizeof(AttrNumber)); + + datum = fastgetattr(tuple, Anum_pg_constraint_conpfeqop, + tupdesc, &isnull); + if (isnull) + elog(ERROR, "null conpfeqop"); + arr = DatumGetArrayTypeP(datum); + nelem = ARR_DIMS(arr)[0]; + if (ARR_NDIM(arr) != 1 || + nelem < 1 || + nelem > INDEX_MAX_KEYS || + ARR_HASNULL(arr) || + ARR_ELEMTYPE(arr) != OIDOID) + elog(ERROR, "conpfeqop is not a 1-D OID array"); + memcpy(conpfeqop, ARR_DATA_PTR(arr), nelem * sizeof(Oid)); + + datum = fastgetattr(tuple, Anum_pg_constraint_conpfeqop, + tupdesc, &isnull); + if (isnull) + elog(ERROR, "null conpfeqop"); + arr = DatumGetArrayTypeP(datum); + nelem = ARR_DIMS(arr)[0]; + if (ARR_NDIM(arr) != 1 || + nelem < 1 || + nelem > INDEX_MAX_KEYS || + ARR_HASNULL(arr) || + ARR_ELEMTYPE(arr) != OIDOID) + elog(ERROR, "conpfeqop is not a 1-D OID array"); + memcpy(conpfeqop, ARR_DATA_PTR(arr), nelem * sizeof(Oid)); + + datum = fastgetattr(tuple, Anum_pg_constraint_conppeqop, + tupdesc, &isnull); + if (isnull) + elog(ERROR, "null conppeqop"); + arr = DatumGetArrayTypeP(datum); + nelem = ARR_DIMS(arr)[0]; + if (ARR_NDIM(arr) != 1 || + nelem < 1 || + nelem > INDEX_MAX_KEYS || + ARR_HASNULL(arr) || + ARR_ELEMTYPE(arr) != OIDOID) + elog(ERROR, "conppeqop is not a 1-D OID array"); + memcpy(conppeqop, ARR_DATA_PTR(arr), nelem * sizeof(Oid)); + + datum = fastgetattr(tuple, Anum_pg_constraint_conffeqop, + tupdesc, &isnull); + if (isnull) + elog(ERROR, "null conffeqop"); + arr = DatumGetArrayTypeP(datum); + nelem = ARR_DIMS(arr)[0]; + if (ARR_NDIM(arr) != 1 || + nelem < 1 || + nelem > INDEX_MAX_KEYS || + ARR_HASNULL(arr) || + ARR_ELEMTYPE(arr) != OIDOID) + elog(ERROR, "conffeqop is not a 1-D OID array"); + memcpy(conffeqop, ARR_DATA_PTR(arr), nelem * sizeof(Oid)); + + /* + * Before creating a new constraint, see whether any existing FKs are + * fit for the purpose. If one is, attach the parent constraint to it, + * and don't clone anything. This way we avoid the expensive + * verification step and don't end up with a duplicate FK. This also + * means we don't consider this constraint when recursing to + * partitions. + */ + attach_it = false; + foreach(cell, partFKs) + { + ForeignKeyCacheInfo *fk = lfirst_node(ForeignKeyCacheInfo, cell); + Form_pg_constraint partConstr; + HeapTuple partcontup; + + attach_it = true; + + /* + * Do some quick & easy initial checks. If any of these fail, we + * cannot use this constraint, but keep looking. + */ + if (fk->confrelid != constrForm->confrelid || fk->nkeys != nelem) + { + attach_it = false; + continue; + } + for (i = 0; i < nelem; i++) + { + if (fk->conkey[i] != mapped_conkey[i] || + fk->confkey[i] != confkey[i] || + fk->conpfeqop[i] != conpfeqop[i]) + { + attach_it = false; + break; + } + } + if (!attach_it) + continue; + + /* + * Looks good so far; do some more extensive checks. Presumably + * the check for 'convalidated' could be dropped, since we don't + * really care about that, but let's be careful for now. + */ + partcontup = SearchSysCache1(CONSTROID, + ObjectIdGetDatum(fk->conoid)); + if (!partcontup) + elog(ERROR, "cache lookup failed for constraint %u", + fk->conoid); + partConstr = (Form_pg_constraint) GETSTRUCT(partcontup); + if (OidIsValid(partConstr->conparentid) || + !partConstr->convalidated || + partConstr->condeferrable != constrForm->condeferrable || + partConstr->condeferred != constrForm->condeferred || + partConstr->confupdtype != constrForm->confupdtype || + partConstr->confdeltype != constrForm->confdeltype || + partConstr->confmatchtype != constrForm->confmatchtype) + { + ReleaseSysCache(partcontup); + attach_it = false; + continue; + } + + ReleaseSysCache(partcontup); + + /* looks good! Attach this constraint */ + ConstraintSetParentConstraint(fk->conoid, + HeapTupleGetOid(tuple)); + CommandCounterIncrement(); + attach_it = true; + break; + } + + /* + * If we attached to an existing constraint, there is no need to + * create a new one. In fact, there's no need to recurse for this + * constraint to partitions, either. + */ + if (attach_it) + { + ReleaseSysCache(tuple); + continue; + } + + constrOid = + CreateConstraintEntry(NameStr(constrForm->conname), + constrForm->connamespace, + CONSTRAINT_FOREIGN, + constrForm->condeferrable, + constrForm->condeferred, + constrForm->convalidated, + HeapTupleGetOid(tuple), + RelationGetRelid(partRel), + mapped_conkey, + nelem, + nelem, + InvalidOid, /* not a domain constraint */ + constrForm->conindid, /* same index */ + constrForm->confrelid, /* same foreign rel */ + confkey, + conpfeqop, + conppeqop, + conffeqop, + nelem, + constrForm->confupdtype, + constrForm->confdeltype, + constrForm->confmatchtype, + NULL, + NULL, + NULL, + false, + 1, false, true); + subclone = lappend_oid(subclone, constrOid); + + ObjectAddressSet(childAddr, ConstraintRelationId, constrOid); + recordDependencyOn(&childAddr, &parentAddr, DEPENDENCY_INTERNAL_AUTO); + + fkconstraint = makeNode(Constraint); + /* for now this is all we need */ + fkconstraint->conname = pstrdup(NameStr(constrForm->conname)); + fkconstraint->fk_upd_action = constrForm->confupdtype; + fkconstraint->fk_del_action = constrForm->confdeltype; + fkconstraint->deferrable = constrForm->condeferrable; + fkconstraint->initdeferred = constrForm->condeferred; + + createForeignKeyTriggers(partRel, constrForm->confrelid, fkconstraint, + constrOid, constrForm->conindid, false); + + if (cloned) + { + ClonedConstraint *newc; + + /* + * Feed back caller about the constraints we created, so that they + * can set up constraint verification. + */ + newc = palloc(sizeof(ClonedConstraint)); + newc->relid = RelationGetRelid(partRel); + newc->refrelid = constrForm->confrelid; + newc->conindid = constrForm->conindid; + newc->conid = constrOid; + newc->constraint = fkconstraint; + + *cloned = lappend(*cloned, newc); + } + + ReleaseSysCache(tuple); + } + + pfree(attmap); + list_free_deep(partFKs); + + /* + * If the partition is partitioned, recurse to handle any constraints that + * were cloned. + */ + if (partRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE && + subclone != NIL) + { + PartitionDesc partdesc = RelationGetPartitionDesc(partRel); + int i; + + for (i = 0; i < partdesc->nparts; i++) + { + Relation childRel; + + childRel = heap_open(partdesc->oids[i], AccessExclusiveLock); + clone_fk_constraints(pg_constraint, + partRel, + childRel, + subclone, + cloned); + heap_close(childRel, NoLock); /* keep lock till commit */ + } + } +} /* * Test whether given name is currently used as a constraint name @@ -390,17 +772,58 @@ CreateConstraintEntry(const char *constraintName, */ bool ConstraintNameIsUsed(ConstraintCategory conCat, Oid objId, - Oid objNamespace, const char *conname) + const char *conname) { bool found; Relation conDesc; SysScanDesc conscan; - ScanKeyData skey[2]; - HeapTuple tup; + ScanKeyData skey[3]; conDesc = heap_open(ConstraintRelationId, AccessShareLock); - found = false; + ScanKeyInit(&skey[0], + Anum_pg_constraint_conrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum((conCat == CONSTRAINT_RELATION) + ? objId : InvalidOid)); + ScanKeyInit(&skey[1], + Anum_pg_constraint_contypid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum((conCat == CONSTRAINT_DOMAIN) + ? objId : InvalidOid)); + ScanKeyInit(&skey[2], + Anum_pg_constraint_conname, + BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum(conname)); + + conscan = systable_beginscan(conDesc, ConstraintRelidTypidNameIndexId, + true, NULL, 3, skey); + + /* There can be at most one matching row */ + found = (HeapTupleIsValid(systable_getnext(conscan))); + + systable_endscan(conscan); + heap_close(conDesc, AccessShareLock); + + return found; +} + +/* + * Does any constraint of the given name exist in the given namespace? + * + * This is used for code that wants to match ChooseConstraintName's rule + * that we should avoid autogenerating duplicate constraint names within a + * namespace. + */ +bool +ConstraintNameExists(const char *conname, Oid namespaceid) +{ + bool found; + Relation conDesc; + SysScanDesc conscan; + ScanKeyData skey[2]; + + conDesc = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&skey[0], Anum_pg_constraint_conname, @@ -410,26 +833,12 @@ ConstraintNameIsUsed(ConstraintCategory conCat, Oid objId, ScanKeyInit(&skey[1], Anum_pg_constraint_connamespace, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(objNamespace)); + ObjectIdGetDatum(namespaceid)); conscan = systable_beginscan(conDesc, ConstraintNameNspIndexId, true, NULL, 2, skey); - while (HeapTupleIsValid(tup = systable_getnext(conscan))) - { - Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tup); - - if (conCat == CONSTRAINT_RELATION && con->conrelid == objId) - { - found = true; - break; - } - else if (conCat == CONSTRAINT_DOMAIN && con->contypid == objId) - { - found = true; - break; - } - } + found = (HeapTupleIsValid(systable_getnext(conscan))); systable_endscan(conscan); heap_close(conDesc, AccessShareLock); @@ -636,13 +1045,11 @@ RenameConstraintById(Oid conId, const char *newname) con = (Form_pg_constraint) GETSTRUCT(tuple); /* - * We need to check whether the name is already in use --- note that there - * currently is not a unique index that would catch this. + * For user-friendliness, check whether the name is already in use. */ if (OidIsValid(con->conrelid) && ConstraintNameIsUsed(CONSTRAINT_RELATION, con->conrelid, - con->connamespace, newname)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), @@ -651,7 +1058,6 @@ RenameConstraintById(Oid conId, const char *newname) if (OidIsValid(con->contypid) && ConstraintNameIsUsed(CONSTRAINT_DOMAIN, con->contypid, - con->connamespace, newname)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), @@ -681,32 +1087,23 @@ AlterConstraintNamespaces(Oid ownerId, Oid oldNspId, Oid newNspId, bool isType, ObjectAddresses *objsMoved) { Relation conRel; - ScanKeyData key[1]; + ScanKeyData key[2]; SysScanDesc scan; HeapTuple tup; conRel = heap_open(ConstraintRelationId, RowExclusiveLock); - if (isType) - { - ScanKeyInit(&key[0], - Anum_pg_constraint_contypid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(ownerId)); - - scan = systable_beginscan(conRel, ConstraintTypidIndexId, true, - NULL, 1, key); - } - else - { - ScanKeyInit(&key[0], - Anum_pg_constraint_conrelid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(ownerId)); + ScanKeyInit(&key[0], + Anum_pg_constraint_conrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(isType ? InvalidOid : ownerId)); + ScanKeyInit(&key[1], + Anum_pg_constraint_contypid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(isType ? ownerId : InvalidOid)); - scan = systable_beginscan(conRel, ConstraintRelidIndexId, true, - NULL, 1, key); - } + scan = systable_beginscan(conRel, ConstraintRelidTypidNameIndexId, true, + NULL, 2, key); while (HeapTupleIsValid((tup = systable_getnext(scan)))) { @@ -747,6 +1144,60 @@ AlterConstraintNamespaces(Oid ownerId, Oid oldNspId, heap_close(conRel, RowExclusiveLock); } +/* + * ConstraintSetParentConstraint + * Set a partition's constraint as child of its parent table's + * + * This updates the constraint's pg_constraint row to show it as inherited, and + * add a dependency to the parent so that it cannot be removed on its own. + */ +void +ConstraintSetParentConstraint(Oid childConstrId, Oid parentConstrId) +{ + Relation constrRel; + Form_pg_constraint constrForm; + HeapTuple tuple, + newtup; + ObjectAddress depender; + ObjectAddress referenced; + + constrRel = heap_open(ConstraintRelationId, RowExclusiveLock); + tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(childConstrId)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for constraint %u", childConstrId); + newtup = heap_copytuple(tuple); + constrForm = (Form_pg_constraint) GETSTRUCT(newtup); + if (OidIsValid(parentConstrId)) + { + constrForm->conislocal = false; + constrForm->coninhcount++; + constrForm->conparentid = parentConstrId; + + CatalogTupleUpdate(constrRel, &tuple->t_self, newtup); + + ObjectAddressSet(referenced, ConstraintRelationId, parentConstrId); + ObjectAddressSet(depender, ConstraintRelationId, childConstrId); + + recordDependencyOn(&depender, &referenced, DEPENDENCY_INTERNAL_AUTO); + } + else + { + constrForm->coninhcount--; + if (constrForm->coninhcount <= 0) + constrForm->conislocal = true; + constrForm->conparentid = InvalidOid; + + deleteDependencyRecordsForClass(ConstraintRelationId, childConstrId, + ConstraintRelationId, + DEPENDENCY_INTERNAL_AUTO); + CatalogTupleUpdate(constrRel, &tuple->t_self, newtup); + } + + ReleaseSysCache(tuple); + heap_close(constrRel, RowExclusiveLock); +} + + /* * get_relation_constraint_oid * Find a constraint on the specified relation with the specified name. @@ -758,43 +1209,128 @@ get_relation_constraint_oid(Oid relid, const char *conname, bool missing_ok) Relation pg_constraint; HeapTuple tuple; SysScanDesc scan; - ScanKeyData skey[1]; + ScanKeyData skey[3]; Oid conOid = InvalidOid; - /* - * Fetch the constraint tuple from pg_constraint. There may be more than - * one match, because constraints are not required to have unique names; - * if so, error out. - */ pg_constraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&skey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); + ScanKeyInit(&skey[1], + Anum_pg_constraint_contypid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(InvalidOid)); + ScanKeyInit(&skey[2], + Anum_pg_constraint_conname, + BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum(conname)); - scan = systable_beginscan(pg_constraint, ConstraintRelidIndexId, true, - NULL, 1, skey); + scan = systable_beginscan(pg_constraint, ConstraintRelidTypidNameIndexId, true, + NULL, 3, skey); - while (HeapTupleIsValid(tuple = systable_getnext(scan))) + /* There can be at most one matching row */ + if (HeapTupleIsValid(tuple = systable_getnext(scan))) + conOid = HeapTupleGetOid(tuple); + + systable_endscan(scan); + + /* If no such constraint exists, complain */ + if (!OidIsValid(conOid) && !missing_ok) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("constraint \"%s\" for table \"%s\" does not exist", + conname, get_rel_name(relid)))); + + heap_close(pg_constraint, AccessShareLock); + + return conOid; +} + +/* + * get_relation_constraint_attnos + * Find a constraint on the specified relation with the specified name + * and return the constrained columns. + * + * Returns a Bitmapset of the column attnos of the constrained columns, with + * attnos being offset by FirstLowInvalidHeapAttributeNumber so that system + * columns can be represented. + * + * *constraintOid is set to the OID of the constraint, or InvalidOid on + * failure. + */ +Bitmapset * +get_relation_constraint_attnos(Oid relid, const char *conname, + bool missing_ok, Oid *constraintOid) +{ + Bitmapset *conattnos = NULL; + Relation pg_constraint; + HeapTuple tuple; + SysScanDesc scan; + ScanKeyData skey[3]; + + /* Set *constraintOid, to avoid complaints about uninitialized vars */ + *constraintOid = InvalidOid; + + pg_constraint = heap_open(ConstraintRelationId, AccessShareLock); + + ScanKeyInit(&skey[0], + Anum_pg_constraint_conrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(relid)); + ScanKeyInit(&skey[1], + Anum_pg_constraint_contypid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(InvalidOid)); + ScanKeyInit(&skey[2], + Anum_pg_constraint_conname, + BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum(conname)); + + scan = systable_beginscan(pg_constraint, ConstraintRelidTypidNameIndexId, true, + NULL, 3, skey); + + /* There can be at most one matching row */ + if (HeapTupleIsValid(tuple = systable_getnext(scan))) { - Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tuple); + Datum adatum; + bool isNull; - if (strcmp(NameStr(con->conname), conname) == 0) + *constraintOid = HeapTupleGetOid(tuple); + + /* Extract the conkey array, ie, attnums of constrained columns */ + adatum = heap_getattr(tuple, Anum_pg_constraint_conkey, + RelationGetDescr(pg_constraint), &isNull); + if (!isNull) { - if (OidIsValid(conOid)) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("table \"%s\" has multiple constraints named \"%s\"", - get_rel_name(relid), conname))); - conOid = HeapTupleGetOid(tuple); + ArrayType *arr; + int numcols; + int16 *attnums; + int i; + + arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */ + numcols = ARR_DIMS(arr)[0]; + if (ARR_NDIM(arr) != 1 || + numcols < 0 || + ARR_HASNULL(arr) || + ARR_ELEMTYPE(arr) != INT2OID) + elog(ERROR, "conkey is not a 1-D smallint array"); + attnums = (int16 *) ARR_DATA_PTR(arr); + + /* Construct the result value */ + for (i = 0; i < numcols; i++) + { + conattnos = bms_add_member(conattnos, + attnums[i] - FirstLowInvalidHeapAttributeNumber); + } } } systable_endscan(scan); /* If no such constraint exists, complain */ - if (!OidIsValid(conOid) && !missing_ok) + if (!OidIsValid(*constraintOid) && !missing_ok) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("constraint \"%s\" for table \"%s\" does not exist", @@ -802,7 +1338,46 @@ get_relation_constraint_oid(Oid relid, const char *conname, bool missing_ok) heap_close(pg_constraint, AccessShareLock); - return conOid; + return conattnos; +} + +/* + * Return the OID of the constraint associated with the given index in the + * given relation; or InvalidOid if no such index is catalogued. + */ +Oid +get_relation_idx_constraint_oid(Oid relationId, Oid indexId) +{ + Relation pg_constraint; + SysScanDesc scan; + ScanKeyData key; + HeapTuple tuple; + Oid constraintId = InvalidOid; + + pg_constraint = heap_open(ConstraintRelationId, AccessShareLock); + + ScanKeyInit(&key, + Anum_pg_constraint_conrelid, + BTEqualStrategyNumber, + F_OIDEQ, + ObjectIdGetDatum(relationId)); + scan = systable_beginscan(pg_constraint, ConstraintRelidTypidNameIndexId, + true, NULL, 1, &key); + while ((tuple = systable_getnext(scan)) != NULL) + { + Form_pg_constraint constrForm; + + constrForm = (Form_pg_constraint) GETSTRUCT(tuple); + if (constrForm->conindid == indexId) + { + constraintId = HeapTupleGetOid(tuple); + break; + } + } + systable_endscan(scan); + + heap_close(pg_constraint, AccessShareLock); + return constraintId; } /* @@ -816,38 +1391,30 @@ get_domain_constraint_oid(Oid typid, const char *conname, bool missing_ok) Relation pg_constraint; HeapTuple tuple; SysScanDesc scan; - ScanKeyData skey[1]; + ScanKeyData skey[3]; Oid conOid = InvalidOid; - /* - * Fetch the constraint tuple from pg_constraint. There may be more than - * one match, because constraints are not required to have unique names; - * if so, error out. - */ pg_constraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&skey[0], + Anum_pg_constraint_conrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(InvalidOid)); + ScanKeyInit(&skey[1], Anum_pg_constraint_contypid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(typid)); + ScanKeyInit(&skey[2], + Anum_pg_constraint_conname, + BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum(conname)); - scan = systable_beginscan(pg_constraint, ConstraintTypidIndexId, true, - NULL, 1, skey); - - while (HeapTupleIsValid(tuple = systable_getnext(scan))) - { - Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tuple); + scan = systable_beginscan(pg_constraint, ConstraintRelidTypidNameIndexId, true, + NULL, 3, skey); - if (strcmp(NameStr(con->conname), conname) == 0) - { - if (OidIsValid(conOid)) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("domain %s has multiple constraints named \"%s\"", - format_type_be(typid), conname))); - conOid = HeapTupleGetOid(tuple); - } - } + /* There can be at most one matching row */ + if (HeapTupleIsValid(tuple = systable_getnext(scan))) + conOid = HeapTupleGetOid(tuple); systable_endscan(scan); @@ -897,7 +1464,7 @@ get_primary_key_attnos(Oid relid, bool deferrableOk, Oid *constraintOid) BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); - scan = systable_beginscan(pg_constraint, ConstraintRelidIndexId, true, + scan = systable_beginscan(pg_constraint, ConstraintRelidTypidNameIndexId, true, NULL, 1, skey); while (HeapTupleIsValid(tuple = systable_getnext(scan))) @@ -958,7 +1525,7 @@ get_primary_key_attnos(Oid relid, bool deferrableOk, Oid *constraintOid) /* * Determine whether a relation can be proven functionally dependent on - * a set of grouping columns. If so, return TRUE and add the pg_constraint + * a set of grouping columns. If so, return true and add the pg_constraint * OIDs of the constraints needed for the proof to the *constraintDeps list. * * grouping_columns is a list of grouping expressions, in which columns of diff --git a/src/backend/catalog/pg_conversion.c b/src/backend/catalog/pg_conversion.c index 5746dc349a..fd5c18426b 100644 --- a/src/backend/catalog/pg_conversion.c +++ b/src/backend/catalog/pg_conversion.c @@ -3,7 +3,7 @@ * pg_conversion.c * routines to support manipulation of the pg_conversion relation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -21,7 +21,6 @@ #include "catalog/indexing.h" #include "catalog/objectaccess.h" #include "catalog/pg_conversion.h" -#include "catalog/pg_conversion_fn.h" #include "catalog/pg_namespace.h" #include "catalog/pg_proc.h" #include "mb/pg_wchar.h" diff --git a/src/backend/catalog/pg_db_role_setting.c b/src/backend/catalog/pg_db_role_setting.c index 323471bc83..e123691923 100644 --- a/src/backend/catalog/pg_db_role_setting.c +++ b/src/backend/catalog/pg_db_role_setting.c @@ -2,7 +2,7 @@ * pg_db_role_setting.c * Routines to support manipulation of the pg_db_role_setting relation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c index dd6ca3e8f7..2ea05f350b 100644 --- a/src/backend/catalog/pg_depend.c +++ b/src/backend/catalog/pg_depend.c @@ -3,7 +3,7 @@ * pg_depend.c * routines to support manipulation of the pg_depend relation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -490,7 +490,7 @@ getExtensionOfObject(Oid classId, Oid objectId) * * An ownership marker is an AUTO or INTERNAL dependency from the sequence to the * column. If we find one, store the identity of the owning column - * into *tableId and *colId and return TRUE; else return FALSE. + * into *tableId and *colId and return true; else return false. * * Note: if there's more than one such pg_depend entry then you get * a random one of them returned into the out parameters. This should @@ -656,14 +656,19 @@ get_constraint_index(Oid constraintId) /* * We assume any internal dependency of an index on the constraint - * must be what we are looking for. (The relkind test is just - * paranoia; there shouldn't be any such dependencies otherwise.) + * must be what we are looking for. */ if (deprec->classid == RelationRelationId && deprec->objsubid == 0 && - deprec->deptype == DEPENDENCY_INTERNAL && - get_rel_relkind(deprec->objid) == RELKIND_INDEX) + deprec->deptype == DEPENDENCY_INTERNAL) { + char relkind = get_rel_relkind(deprec->objid); + + /* This is pure paranoia; there shouldn't be any such */ + if (relkind != RELKIND_INDEX && + relkind != RELKIND_PARTITIONED_INDEX) + break; + indexId = deprec->objid; break; } diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c index fe61d4dacc..ece65587bb 100644 --- a/src/backend/catalog/pg_enum.c +++ b/src/backend/catalog/pg_enum.c @@ -3,7 +3,7 @@ * pg_enum.c * routines to support manipulation of the pg_enum relation * - * Copyright (c) 2006-2017, PostgreSQL Global Development Group + * Copyright (c) 2006-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -28,6 +28,8 @@ #include "utils/builtins.h" #include "utils/catcache.h" #include "utils/fmgroids.h" +#include "utils/hsearch.h" +#include "utils/memutils.h" #include "utils/syscache.h" #include "utils/tqual.h" @@ -35,6 +37,17 @@ /* Potentially set by pg_upgrade_support functions */ Oid binary_upgrade_next_pg_enum_oid = InvalidOid; +/* + * Hash table of enum value OIDs created during the current transaction by + * AddEnumLabel. We disallow using these values until the transaction is + * committed; otherwise, they might get into indexes where we can't clean + * them up, and then if the transaction rolls back we have a broken index. + * (See comments for check_safe_enum_use() in enum.c.) Values created by + * EnumValuesCreate are *not* blacklisted; we assume those are created during + * CREATE TYPE, so they can't go away unless the enum type itself does. + */ +static HTAB *enum_blacklist = NULL; + static void RenumberEnumType(Relation pg_enum, HeapTuple *existing, int nelems); static int sort_order_cmp(const void *p1, const void *p2); @@ -168,6 +181,23 @@ EnumValuesDelete(Oid enumTypeOid) heap_close(pg_enum, RowExclusiveLock); } +/* + * Initialize the enum blacklist for this transaction. + */ +static void +init_enum_blacklist(void) +{ + HASHCTL hash_ctl; + + memset(&hash_ctl, 0, sizeof(hash_ctl)); + hash_ctl.keysize = sizeof(Oid); + hash_ctl.entrysize = sizeof(Oid); + hash_ctl.hcxt = TopTransactionContext; + enum_blacklist = hash_create("Enum value blacklist", + 32, + &hash_ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); +} /* * AddEnumLabel @@ -460,6 +490,13 @@ AddEnumLabel(Oid enumTypeOid, heap_freetuple(enum_tup); heap_close(pg_enum, RowExclusiveLock); + + /* Set up the blacklist hash if not already done in this transaction */ + if (enum_blacklist == NULL) + init_enum_blacklist(); + + /* Add the new value to the blacklist */ + (void) hash_search(enum_blacklist, &newOid, HASH_ENTER, NULL); } @@ -547,6 +584,39 @@ RenameEnumLabel(Oid enumTypeOid, } +/* + * Test if the given enum value is on the blacklist + */ +bool +EnumBlacklisted(Oid enum_id) +{ + bool found; + + /* If we've made no blacklist table, all values are safe */ + if (enum_blacklist == NULL) + return false; + + /* Else, is it in the table? */ + (void) hash_search(enum_blacklist, &enum_id, HASH_FIND, &found); + return found; +} + + +/* + * Clean up enum stuff after end of top-level transaction. + */ +void +AtEOXact_Enum(void) +{ + /* + * Reset the blacklist table, as all our enum values are now committed. + * The memory will go away automatically when TopTransactionContext is + * freed; it's sufficient to clear our pointer. + */ + enum_blacklist = NULL; +} + + /* * RenumberEnumType * Renumber existing enum elements to have sort positions 1..n. @@ -620,3 +690,72 @@ sort_order_cmp(const void *p1, const void *p2) else return 0; } + +Size +EstimateEnumBlacklistSpace(void) +{ + size_t entries; + + if (enum_blacklist) + entries = hash_get_num_entries(enum_blacklist); + else + entries = 0; + + /* Add one for the terminator. */ + return sizeof(Oid) * (entries + 1); +} + +void +SerializeEnumBlacklist(void *space, Size size) +{ + Oid *serialized = (Oid *) space; + + /* + * Make sure the hash table hasn't changed in size since the caller + * reserved the space. + */ + Assert(size == EstimateEnumBlacklistSpace()); + + /* Write out all the values from the hash table, if there is one. */ + if (enum_blacklist) + { + HASH_SEQ_STATUS status; + Oid *value; + + hash_seq_init(&status, enum_blacklist); + while ((value = (Oid *) hash_seq_search(&status))) + *serialized++ = *value; + } + + /* Write out the terminator. */ + *serialized = InvalidOid; + + /* + * Make sure the amount of space we actually used matches what was + * estimated. + */ + Assert((char *) (serialized + 1) == ((char *) space) + size); +} + +void +RestoreEnumBlacklist(void *space) +{ + Oid *serialized = (Oid *) space; + + Assert(!enum_blacklist); + + /* + * As a special case, if the list is empty then don't even bother to + * create the hash table. This is the usual case, since enum alteration + * is expected to be rare. + */ + if (!OidIsValid(*serialized)) + return; + + /* Read all the values into a new hash table. */ + init_enum_blacklist(); + do + { + hash_search(enum_blacklist, serialized++, HASH_ENTER, NULL); + } while (OidIsValid(*serialized)); +} diff --git a/src/backend/catalog/pg_inherits.c b/src/backend/catalog/pg_inherits.c index 245a374fc9..85baca54cc 100644 --- a/src/backend/catalog/pg_inherits.c +++ b/src/backend/catalog/pg_inherits.c @@ -8,7 +8,7 @@ * Perhaps someday that code should be moved here, but it'd have to be * disentangled from other stuff such as pg_depend updates. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -24,7 +24,6 @@ #include "access/htup_details.h" #include "catalog/indexing.h" #include "catalog/pg_inherits.h" -#include "catalog/pg_inherits_fn.h" #include "parser/parse_type.h" #include "storage/lmgr.h" #include "utils/builtins.h" @@ -301,6 +300,11 @@ has_superclass(Oid relationId) /* * Given two type OIDs, determine whether the first is a complex type * (class type) that inherits from the second. + * + * This essentially asks whether the first type is guaranteed to be coercible + * to the second. Therefore, we allow the first type to be a domain over a + * complex type that inherits from the second; that creates no difficulties. + * But the second type cannot be a domain. */ bool typeInheritsFrom(Oid subclassTypeId, Oid superclassTypeId) @@ -314,9 +318,9 @@ typeInheritsFrom(Oid subclassTypeId, Oid superclassTypeId) ListCell *queue_item; /* We need to work with the associated relation OIDs */ - subclassRelid = typeidTypeRelid(subclassTypeId); + subclassRelid = typeOrDomainTypeRelid(subclassTypeId); if (subclassRelid == InvalidOid) - return false; /* not a complex type */ + return false; /* not a complex type or domain over one */ superclassRelid = typeidTypeRelid(superclassTypeId); if (superclassRelid == InvalidOid) return false; /* not a complex type */ @@ -400,3 +404,83 @@ typeInheritsFrom(Oid subclassTypeId, Oid superclassTypeId) return result; } + +/* + * Create a single pg_inherits row with the given data + */ +void +StoreSingleInheritance(Oid relationId, Oid parentOid, int32 seqNumber) +{ + Datum values[Natts_pg_inherits]; + bool nulls[Natts_pg_inherits]; + HeapTuple tuple; + Relation inhRelation; + + inhRelation = heap_open(InheritsRelationId, RowExclusiveLock); + + /* + * Make the pg_inherits entry + */ + values[Anum_pg_inherits_inhrelid - 1] = ObjectIdGetDatum(relationId); + values[Anum_pg_inherits_inhparent - 1] = ObjectIdGetDatum(parentOid); + values[Anum_pg_inherits_inhseqno - 1] = Int32GetDatum(seqNumber); + + memset(nulls, 0, sizeof(nulls)); + + tuple = heap_form_tuple(RelationGetDescr(inhRelation), values, nulls); + + CatalogTupleInsert(inhRelation, tuple); + + heap_freetuple(tuple); + + heap_close(inhRelation, RowExclusiveLock); +} + +/* + * DeleteInheritsTuple + * + * Delete pg_inherits tuples with the given inhrelid. inhparent may be given + * as InvalidOid, in which case all tuples matching inhrelid are deleted; + * otherwise only delete tuples with the specified inhparent. + * + * Returns whether at least one row was deleted. + */ +bool +DeleteInheritsTuple(Oid inhrelid, Oid inhparent) +{ + bool found = false; + Relation catalogRelation; + ScanKeyData key; + SysScanDesc scan; + HeapTuple inheritsTuple; + + /* + * Find pg_inherits entries by inhrelid. + */ + catalogRelation = heap_open(InheritsRelationId, RowExclusiveLock); + ScanKeyInit(&key, + Anum_pg_inherits_inhrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(inhrelid)); + scan = systable_beginscan(catalogRelation, InheritsRelidSeqnoIndexId, + true, NULL, 1, &key); + + while (HeapTupleIsValid(inheritsTuple = systable_getnext(scan))) + { + Oid parent; + + /* Compare inhparent if it was given, and do the actual deletion. */ + parent = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhparent; + if (!OidIsValid(inhparent) || parent == inhparent) + { + CatalogTupleDelete(catalogRelation, &inheritsTuple->t_self); + found = true; + } + } + + /* Done */ + systable_endscan(scan); + heap_close(catalogRelation, RowExclusiveLock); + + return found; +} diff --git a/src/backend/catalog/pg_largeobject.c b/src/backend/catalog/pg_largeobject.c index fc4f4f8c9b..a876473976 100644 --- a/src/backend/catalog/pg_largeobject.c +++ b/src/backend/catalog/pg_largeobject.c @@ -3,7 +3,7 @@ * pg_largeobject.c * routines to support manipulation of the pg_largeobject relation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/catalog/pg_namespace.c b/src/backend/catalog/pg_namespace.c index 3e20d051c2..0538e31b3b 100644 --- a/src/backend/catalog/pg_namespace.c +++ b/src/backend/catalog/pg_namespace.c @@ -3,7 +3,7 @@ * pg_namespace.c * routines to support manipulation of the pg_namespace relation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -63,7 +63,7 @@ NamespaceCreate(const char *nspName, Oid ownerId, bool isTemp) errmsg("schema \"%s\" already exists", nspName))); if (!isTemp) - nspacl = get_user_default_acl(ACL_OBJECT_NAMESPACE, ownerId, + nspacl = get_user_default_acl(OBJECT_SCHEMA, ownerId, InvalidOid); else nspacl = NULL; @@ -100,6 +100,9 @@ NamespaceCreate(const char *nspName, Oid ownerId, bool isTemp) /* dependency on owner */ recordDependencyOnOwner(NamespaceRelationId, nspoid, ownerId); + /* dependences on roles mentioned in default ACL */ + recordDependencyOnNewAcl(NamespaceRelationId, nspoid, 0, ownerId, nspacl); + /* dependency on extension ... but not for magic temp schemas */ if (!isTemp) recordDependencyOnCurrentExtension(&myself, false); diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c index ef81102150..6dde75ed25 100644 --- a/src/backend/catalog/pg_operator.c +++ b/src/backend/catalog/pg_operator.c @@ -3,7 +3,7 @@ * pg_operator.c * routines to support manipulation of the pg_operator relation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -26,7 +26,6 @@ #include "catalog/objectaccess.h" #include "catalog/pg_namespace.h" #include "catalog/pg_operator.h" -#include "catalog/pg_operator_fn.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "miscadmin.h" @@ -124,7 +123,7 @@ validOperatorName(const char *name) * finds an operator given an exact specification (name, namespace, * left and right type IDs). * - * *defined is set TRUE if defined (not a shell) + * *defined is set true if defined (not a shell) */ static Oid OperatorGet(const char *operatorName, @@ -164,7 +163,7 @@ OperatorGet(const char *operatorName, * looks up an operator given a possibly-qualified name and * left and right type IDs. * - * *defined is set TRUE if defined (not a shell) + * *defined is set true if defined (not a shell) */ static Oid OperatorLookup(List *operatorName, @@ -425,7 +424,7 @@ OperatorCreate(const char *operatorName, */ if (OidIsValid(operatorObjectId) && !pg_oper_ownercheck(operatorObjectId, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPER, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_OPERATOR, operatorName); /* @@ -445,7 +444,7 @@ OperatorCreate(const char *operatorName, /* Permission check: must own other operator */ if (OidIsValid(commutatorId) && !pg_oper_ownercheck(commutatorId, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPER, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_OPERATOR, NameListToString(commutatorName)); /* @@ -470,7 +469,7 @@ OperatorCreate(const char *operatorName, /* Permission check: must own other operator */ if (OidIsValid(negatorId) && !pg_oper_ownercheck(negatorId, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPER, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_OPERATOR, NameListToString(negatorName)); } else @@ -618,7 +617,7 @@ get_other_operator(List *otherOp, Oid otherLeftTypeId, Oid otherRightTypeId, aclresult = pg_namespace_aclcheck(otherNamespace, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(otherNamespace)); other_oid = OperatorShellMake(otherName, diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c index 571856e525..e367da7dba 100644 --- a/src/backend/catalog/pg_proc.c +++ b/src/backend/catalog/pg_proc.c @@ -3,7 +3,7 @@ * pg_proc.c * routines to support manipulation of the pg_proc relation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -22,7 +22,6 @@ #include "catalog/pg_language.h" #include "catalog/pg_namespace.h" #include "catalog/pg_proc.h" -#include "catalog/pg_proc_fn.h" #include "catalog/pg_transform.h" #include "catalog/pg_type.h" #include "commands/defrem.h" @@ -60,7 +59,7 @@ static bool match_prosrc_to_literal(const char *prosrc, const char *literal, * * Note: allParameterTypes, parameterModes, parameterNames, trftypes, and proconfig * are either arrays of the proper types or NULL. We declare them Datum, - * not "ArrayType *", to avoid importing array.h into pg_proc_fn.h. + * not "ArrayType *", to avoid importing array.h into pg_proc.h. * ---------------------------------------------------------------- */ ObjectAddress @@ -74,8 +73,7 @@ ProcedureCreate(const char *procedureName, Oid languageValidator, const char *prosrc, const char *probin, - bool isAgg, - bool isWindowFunc, + char prokind, bool security_definer, bool isLeakProof, bool isStrict, @@ -110,7 +108,6 @@ ProcedureCreate(const char *procedureName, bool nulls[Natts_pg_proc]; Datum values[Natts_pg_proc]; bool replaces[Natts_pg_proc]; - Oid relid; NameData procname; TupleDesc tupDesc; bool is_update; @@ -256,20 +253,6 @@ ProcedureCreate(const char *procedureName, errmsg("unsafe use of pseudo-type \"internal\""), errdetail("A function returning \"internal\" must have at least one \"internal\" argument."))); - /* - * don't allow functions of complex types that have the same name as - * existing attributes of the type - */ - if (parameterCount == 1 && - OidIsValid(parameterTypes->values[0]) && - (relid = typeidTypeRelid(parameterTypes->values[0])) != InvalidOid && - get_attnum(relid, procedureName) != InvalidAttrNumber) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("\"%s\" is already an attribute of type %s", - procedureName, - format_type_be(parameterTypes->values[0])))); - if (paramModes != NULL) { /* @@ -335,8 +318,7 @@ ProcedureCreate(const char *procedureName, values[Anum_pg_proc_prorows - 1] = Float4GetDatum(prorows); values[Anum_pg_proc_provariadic - 1] = ObjectIdGetDatum(variadicType); values[Anum_pg_proc_protransform - 1] = ObjectIdGetDatum(InvalidOid); - values[Anum_pg_proc_proisagg - 1] = BoolGetDatum(isAgg); - values[Anum_pg_proc_proiswindow - 1] = BoolGetDatum(isWindowFunc); + values[Anum_pg_proc_prokind - 1] = CharGetDatum(prokind); values[Anum_pg_proc_prosecdef - 1] = BoolGetDatum(security_definer); values[Anum_pg_proc_proleakproof - 1] = BoolGetDatum(isLeakProof); values[Anum_pg_proc_proisstrict - 1] = BoolGetDatum(isStrict); @@ -393,6 +375,7 @@ ProcedureCreate(const char *procedureName, Form_pg_proc oldproc = (Form_pg_proc) GETSTRUCT(oldtup); Datum proargnames; bool isnull; + const char *dropcmd; if (!replace) ereport(ERROR, @@ -400,19 +383,44 @@ ProcedureCreate(const char *procedureName, errmsg("function \"%s\" already exists with same argument types", procedureName))); if (!pg_proc_ownercheck(HeapTupleGetOid(oldtup), proowner)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, procedureName); + /* Not okay to change routine kind */ + if (oldproc->prokind != prokind) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change routine kind"), + (oldproc->prokind == PROKIND_AGGREGATE ? + errdetail("\"%s\" is an aggregate function.", procedureName) : + oldproc->prokind == PROKIND_FUNCTION ? + errdetail("\"%s\" is a function.", procedureName) : + oldproc->prokind == PROKIND_PROCEDURE ? + errdetail("\"%s\" is a procedure.", procedureName) : + oldproc->prokind == PROKIND_WINDOW ? + errdetail("\"%s\" is a window function.", procedureName) : + 0))); + + dropcmd = (prokind == PROKIND_PROCEDURE ? "DROP PROCEDURE" : "DROP FUNCTION"); + /* * Not okay to change the return type of the existing proc, since * existing rules, views, etc may depend on the return type. + * + * In case of a procedure, a changing return type means that whether + * the procedure has output parameters was changed. Since there is no + * user visible return type, we produce a more specific error message. */ if (returnType != oldproc->prorettype || returnsSet != oldproc->proretset) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("cannot change return type of existing function"), - errhint("Use DROP FUNCTION %s first.", + prokind == PROKIND_PROCEDURE + ? errmsg("cannot change whether a procedure has output parameters") + : errmsg("cannot change return type of existing function"), + /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */ + errhint("Use %s %s first.", + dropcmd, format_procedure(HeapTupleGetOid(oldtup))))); /* @@ -425,7 +433,8 @@ ProcedureCreate(const char *procedureName, TupleDesc newdesc; olddesc = build_function_result_tupdesc_t(oldtup); - newdesc = build_function_result_tupdesc_d(allParameterTypes, + newdesc = build_function_result_tupdesc_d(prokind, + allParameterTypes, parameterModes, parameterNames); if (olddesc == NULL && newdesc == NULL) @@ -436,7 +445,9 @@ ProcedureCreate(const char *procedureName, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("cannot change return type of existing function"), errdetail("Row type defined by OUT parameters is different."), - errhint("Use DROP FUNCTION %s first.", + /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */ + errhint("Use %s %s first.", + dropcmd, format_procedure(HeapTupleGetOid(oldtup))))); } @@ -479,7 +490,9 @@ ProcedureCreate(const char *procedureName, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("cannot change name of input parameter \"%s\"", old_arg_names[j]), - errhint("Use DROP FUNCTION %s first.", + /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */ + errhint("Use %s %s first.", + dropcmd, format_procedure(HeapTupleGetOid(oldtup))))); } } @@ -503,7 +516,9 @@ ProcedureCreate(const char *procedureName, ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("cannot remove parameter defaults from existing function"), - errhint("Use DROP FUNCTION %s first.", + /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */ + errhint("Use %s %s first.", + dropcmd, format_procedure(HeapTupleGetOid(oldtup))))); proargdefaults = SysCacheGetAttr(PROCNAMEARGSNSP, oldtup, @@ -529,40 +544,14 @@ ProcedureCreate(const char *procedureName, ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("cannot change data type of existing parameter default value"), - errhint("Use DROP FUNCTION %s first.", + /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */ + errhint("Use %s %s first.", + dropcmd, format_procedure(HeapTupleGetOid(oldtup))))); newlc = lnext(newlc); } } - /* Can't change aggregate or window-function status, either */ - if (oldproc->proisagg != isAgg) - { - if (oldproc->proisagg) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("function \"%s\" is an aggregate function", - procedureName))); - else - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("function \"%s\" is not an aggregate function", - procedureName))); - } - if (oldproc->proiswindow != isWindowFunc) - { - if (oldproc->proiswindow) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("function \"%s\" is a window function", - procedureName))); - else - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("function \"%s\" is not a window function", - procedureName))); - } - /* * Do not change existing ownership or permissions, either. Note * dependency-update code below has to agree with this decision. @@ -582,7 +571,7 @@ ProcedureCreate(const char *procedureName, /* Creating a new procedure */ /* First, get default permissions and set up proacl */ - proacl = get_user_default_acl(ACL_OBJECT_FUNCTION, proowner, + proacl = get_user_default_acl(OBJECT_FUNCTION, proowner, procNamespace); if (proacl != NULL) values[Anum_pg_proc_proacl - 1] = PointerGetDatum(proacl); @@ -665,17 +654,9 @@ ProcedureCreate(const char *procedureName, recordDependencyOnOwner(ProcedureRelationId, retval, proowner); /* dependency on any roles mentioned in ACL */ - if (!is_update && proacl != NULL) - { - int nnewmembers; - Oid *newmembers; - - nnewmembers = aclmembers(proacl, &newmembers); - updateAclDependencies(ProcedureRelationId, retval, 0, - proowner, - 0, NULL, - nnewmembers, newmembers); - } + if (!is_update) + recordDependencyOnNewAcl(ProcedureRelationId, retval, 0, + proowner, proacl); /* dependency on extension */ recordDependencyOnCurrentExtension(&myself, is_update); @@ -940,6 +921,7 @@ fmgr_sql_validator(PG_FUNCTION_ARGS) querytree_sublist); } + check_sql_fn_statements(querytree_list); (void) check_sql_fn_retval(funcoid, proc->prorettype, querytree_list, NULL, NULL); diff --git a/src/backend/catalog/pg_publication.c b/src/backend/catalog/pg_publication.c index 3ef7ba8cd5..3ecf6d57bf 100644 --- a/src/backend/catalog/pg_publication.c +++ b/src/backend/catalog/pg_publication.c @@ -3,7 +3,7 @@ * pg_publication.c * publication C API manipulation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -105,6 +105,15 @@ is_publishable_class(Oid relid, Form_pg_class reltuple) relid >= FirstNormalObjectId; } +/* + * Another variant of this, taking a Relation. + */ +bool +is_publishable_relation(Relation rel) +{ + return is_publishable_class(RelationGetRelid(rel), rel->rd_rel); +} + /* * SQL-callable variant of the above @@ -367,6 +376,7 @@ GetPublication(Oid pubid) pub->pubactions.pubinsert = pubform->pubinsert; pub->pubactions.pubupdate = pubform->pubupdate; pub->pubactions.pubdelete = pubform->pubdelete; + pub->pubactions.pubtruncate = pubform->pubtruncate; ReleaseSysCache(tup); @@ -417,9 +427,12 @@ get_publication_oid(const char *pubname, bool missing_ok) /* * get_publication_name - given a publication Oid, look up the name + * + * If missing_ok is false, throw an error if name not found. If true, just + * return NULL. */ char * -get_publication_name(Oid pubid) +get_publication_name(Oid pubid, bool missing_ok) { HeapTuple tup; char *pubname; @@ -428,7 +441,11 @@ get_publication_name(Oid pubid) tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid)); if (!HeapTupleIsValid(tup)) - elog(ERROR, "cache lookup failed for publication %u", pubid); + { + if (!missing_ok) + elog(ERROR, "cache lookup failed for publication %u", pubid); + return NULL; + } pubform = (Form_pg_publication) GETSTRUCT(tup); pubname = pstrdup(NameStr(pubform->pubname)); diff --git a/src/backend/catalog/pg_range.c b/src/backend/catalog/pg_range.c index a3b0fb8838..c902f98606 100644 --- a/src/backend/catalog/pg_range.c +++ b/src/backend/catalog/pg_range.c @@ -3,7 +3,7 @@ * pg_range.c * routines to support manipulation of the pg_range relation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c index 31b09a1da5..faf42b7640 100644 --- a/src/backend/catalog/pg_shdepend.c +++ b/src/backend/catalog/pg_shdepend.c @@ -3,7 +3,7 @@ * pg_shdepend.c * routines to support manipulation of the pg_shdepend relation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/catalog/pg_subscription.c b/src/backend/catalog/pg_subscription.c index fb53d71cd6..f891ff8054 100644 --- a/src/backend/catalog/pg_subscription.c +++ b/src/backend/catalog/pg_subscription.c @@ -3,7 +3,7 @@ * pg_subscription.c * replication subscriptions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -179,9 +179,12 @@ get_subscription_oid(const char *subname, bool missing_ok) /* * get_subscription_name - given a subscription OID, look up the name + * + * If missing_ok is false, throw an error if name not found. If true, just + * return NULL. */ char * -get_subscription_name(Oid subid) +get_subscription_name(Oid subid, bool missing_ok) { HeapTuple tup; char *subname; @@ -190,7 +193,11 @@ get_subscription_name(Oid subid) tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid)); if (!HeapTupleIsValid(tup)) - elog(ERROR, "cache lookup failed for subscription %u", subid); + { + if (!missing_ok) + elog(ERROR, "cache lookup failed for subscription %u", subid); + return NULL; + } subform = (Form_pg_subscription) GETSTRUCT(tup); subname = pstrdup(NameStr(subform->subname)); @@ -227,24 +234,15 @@ textarray_to_stringlist(ArrayType *textarray) } /* - * Set the state of a subscription table. - * - * If update_only is true and the record for given table doesn't exist, do - * nothing. This can be used to avoid inserting a new record that was deleted - * by someone else. Generally, subscription DDL commands should use false, - * workers should use true. - * - * The insert-or-update logic in this function is not concurrency safe so it - * might raise an error in rare circumstances. But if we took a stronger lock - * such as ShareRowExclusiveLock, we would risk more deadlocks. + * Add new state record for a subscription table. */ Oid -SetSubscriptionRelState(Oid subid, Oid relid, char state, - XLogRecPtr sublsn, bool update_only) +AddSubscriptionRelState(Oid subid, Oid relid, char state, + XLogRecPtr sublsn) { Relation rel; HeapTuple tup; - Oid subrelid = InvalidOid; + Oid subrelid; bool nulls[Natts_pg_subscription_rel]; Datum values[Natts_pg_subscription_rel]; @@ -256,57 +254,81 @@ SetSubscriptionRelState(Oid subid, Oid relid, char state, tup = SearchSysCacheCopy2(SUBSCRIPTIONRELMAP, ObjectIdGetDatum(relid), ObjectIdGetDatum(subid)); + if (HeapTupleIsValid(tup)) + elog(ERROR, "subscription table %u in subscription %u already exists", + relid, subid); - /* - * If the record for given table does not exist yet create new record, - * otherwise update the existing one. - */ - if (!HeapTupleIsValid(tup) && !update_only) - { - /* Form the tuple. */ - memset(values, 0, sizeof(values)); - memset(nulls, false, sizeof(nulls)); - values[Anum_pg_subscription_rel_srsubid - 1] = ObjectIdGetDatum(subid); - values[Anum_pg_subscription_rel_srrelid - 1] = ObjectIdGetDatum(relid); - values[Anum_pg_subscription_rel_srsubstate - 1] = CharGetDatum(state); - if (sublsn != InvalidXLogRecPtr) - values[Anum_pg_subscription_rel_srsublsn - 1] = LSNGetDatum(sublsn); - else - nulls[Anum_pg_subscription_rel_srsublsn - 1] = true; - - tup = heap_form_tuple(RelationGetDescr(rel), values, nulls); - - /* Insert tuple into catalog. */ - subrelid = CatalogTupleInsert(rel, tup); - - heap_freetuple(tup); - } - else if (HeapTupleIsValid(tup)) - { - bool replaces[Natts_pg_subscription_rel]; + /* Form the tuple. */ + memset(values, 0, sizeof(values)); + memset(nulls, false, sizeof(nulls)); + values[Anum_pg_subscription_rel_srsubid - 1] = ObjectIdGetDatum(subid); + values[Anum_pg_subscription_rel_srrelid - 1] = ObjectIdGetDatum(relid); + values[Anum_pg_subscription_rel_srsubstate - 1] = CharGetDatum(state); + if (sublsn != InvalidXLogRecPtr) + values[Anum_pg_subscription_rel_srsublsn - 1] = LSNGetDatum(sublsn); + else + nulls[Anum_pg_subscription_rel_srsublsn - 1] = true; - /* Update the tuple. */ - memset(values, 0, sizeof(values)); - memset(nulls, false, sizeof(nulls)); - memset(replaces, false, sizeof(replaces)); + tup = heap_form_tuple(RelationGetDescr(rel), values, nulls); - replaces[Anum_pg_subscription_rel_srsubstate - 1] = true; - values[Anum_pg_subscription_rel_srsubstate - 1] = CharGetDatum(state); + /* Insert tuple into catalog. */ + subrelid = CatalogTupleInsert(rel, tup); - replaces[Anum_pg_subscription_rel_srsublsn - 1] = true; - if (sublsn != InvalidXLogRecPtr) - values[Anum_pg_subscription_rel_srsublsn - 1] = LSNGetDatum(sublsn); - else - nulls[Anum_pg_subscription_rel_srsublsn - 1] = true; + heap_freetuple(tup); - tup = heap_modify_tuple(tup, RelationGetDescr(rel), values, nulls, - replaces); + /* Cleanup. */ + heap_close(rel, NoLock); - /* Update the catalog. */ - CatalogTupleUpdate(rel, &tup->t_self, tup); + return subrelid; +} - subrelid = HeapTupleGetOid(tup); - } +/* + * Update the state of a subscription table. + */ +Oid +UpdateSubscriptionRelState(Oid subid, Oid relid, char state, + XLogRecPtr sublsn) +{ + Relation rel; + HeapTuple tup; + Oid subrelid; + bool nulls[Natts_pg_subscription_rel]; + Datum values[Natts_pg_subscription_rel]; + bool replaces[Natts_pg_subscription_rel]; + + LockSharedObject(SubscriptionRelationId, subid, 0, AccessShareLock); + + rel = heap_open(SubscriptionRelRelationId, RowExclusiveLock); + + /* Try finding existing mapping. */ + tup = SearchSysCacheCopy2(SUBSCRIPTIONRELMAP, + ObjectIdGetDatum(relid), + ObjectIdGetDatum(subid)); + if (!HeapTupleIsValid(tup)) + elog(ERROR, "subscription table %u in subscription %u does not exist", + relid, subid); + + /* Update the tuple. */ + memset(values, 0, sizeof(values)); + memset(nulls, false, sizeof(nulls)); + memset(replaces, false, sizeof(replaces)); + + replaces[Anum_pg_subscription_rel_srsubstate - 1] = true; + values[Anum_pg_subscription_rel_srsubstate - 1] = CharGetDatum(state); + + replaces[Anum_pg_subscription_rel_srsublsn - 1] = true; + if (sublsn != InvalidXLogRecPtr) + values[Anum_pg_subscription_rel_srsublsn - 1] = LSNGetDatum(sublsn); + else + nulls[Anum_pg_subscription_rel_srsublsn - 1] = true; + + tup = heap_modify_tuple(tup, RelationGetDescr(rel), values, nulls, + replaces); + + /* Update the catalog. */ + CatalogTupleUpdate(rel, &tup->t_self, tup); + + subrelid = HeapTupleGetOid(tup); /* Cleanup. */ heap_close(rel, NoLock); diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c index 59ffd2104d..b729e7ec95 100644 --- a/src/backend/catalog/pg_type.c +++ b/src/backend/catalog/pg_type.c @@ -3,7 +3,7 @@ * pg_type.c * routines to support manipulation of the pg_type relation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -25,7 +25,6 @@ #include "catalog/pg_namespace.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" -#include "catalog/pg_type_fn.h" #include "commands/typecmds.h" #include "miscadmin.h" #include "parser/scansup.h" @@ -148,23 +147,13 @@ TypeShellMake(const char *typeName, Oid typeNamespace, Oid ownerId) * Create dependencies. We can/must skip this in bootstrap mode. */ if (!IsBootstrapProcessingMode()) - GenerateTypeDependencies(typeNamespace, - typoid, - InvalidOid, + GenerateTypeDependencies(typoid, + (Form_pg_type) GETSTRUCT(tup), + NULL, + NULL, 0, - ownerId, - F_SHELL_IN, - F_SHELL_OUT, - InvalidOid, - InvalidOid, - InvalidOid, - InvalidOid, - InvalidOid, - InvalidOid, false, - InvalidOid, - InvalidOid, - NULL, + false, false); /* Post creation hook for new shell type */ @@ -226,14 +215,15 @@ TypeCreate(Oid newTypeOid, { Relation pg_type_desc; Oid typeObjectId; + bool isDependentType; bool rebuildDeps = false; + Acl *typacl; HeapTuple tup; bool nulls[Natts_pg_type]; bool replaces[Natts_pg_type]; Datum values[Natts_pg_type]; NameData name; int i; - Acl *typacl = NULL; ObjectAddress address; /* @@ -321,6 +311,17 @@ TypeCreate(Oid newTypeOid, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("fixed-size types must have storage PLAIN"))); + /* + * This is a dependent type if it's an implicitly-created array type, or + * if it's a relation rowtype that's not a composite type. For such types + * we'll leave the ACL empty, and we'll skip creating some dependency + * records because there will be a dependency already through the + * depended-on type or relation. (Caution: this is closely intertwined + * with some behavior in GenerateTypeDependencies.) + */ + isDependentType = isImplicitArray || + (OidIsValid(relationOid) && relationKind != RELKIND_COMPOSITE_TYPE); + /* * initialize arrays needed for heap_form_tuple or heap_modify_tuple */ @@ -380,8 +381,14 @@ TypeCreate(Oid newTypeOid, else nulls[Anum_pg_type_typdefault - 1] = true; - typacl = get_user_default_acl(ACL_OBJECT_TYPE, ownerId, - typeNamespace); + /* + * Initialize the type's ACL, too. But dependent types don't get one. + */ + if (isDependentType) + typacl = NULL; + else + typacl = get_user_default_acl(OBJECT_TYPE, ownerId, + typeNamespace); if (typacl != NULL) values[Anum_pg_type_typacl - 1] = PointerGetDatum(typacl); else @@ -413,7 +420,7 @@ TypeCreate(Oid newTypeOid, * shell type must have been created by same owner */ if (((Form_pg_type) GETSTRUCT(tup))->typowner != ownerId) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TYPE, typeName); + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TYPE, typeName); /* trouble if caller wanted to force the OID */ if (OidIsValid(newTypeOid)) @@ -463,25 +470,15 @@ TypeCreate(Oid newTypeOid, * Create dependencies. We can/must skip this in bootstrap mode. */ if (!IsBootstrapProcessingMode()) - GenerateTypeDependencies(typeNamespace, - typeObjectId, - relationOid, - relationKind, - ownerId, - inputProcedure, - outputProcedure, - receiveProcedure, - sendProcedure, - typmodinProcedure, - typmodoutProcedure, - analyzeProcedure, - elementType, - isImplicitArray, - baseType, - typeCollation, + GenerateTypeDependencies(typeObjectId, + (Form_pg_type) GETSTRUCT(tup), (defaultTypeBin ? stringToNode(defaultTypeBin) : NULL), + typacl, + relationKind, + isImplicitArray, + isDependentType, rebuildDeps); /* Post creation hook for new type */ @@ -500,6 +497,17 @@ TypeCreate(Oid newTypeOid, /* * GenerateTypeDependencies: build the dependencies needed for a type * + * Most of what this function needs to know about the type is passed as the + * new pg_type row, typeForm. But we can't get at the varlena fields through + * that, so defaultExpr and typacl are passed separately. (typacl is really + * "Acl *", but we declare it "void *" to avoid including acl.h in pg_type.h.) + * + * relationKind and isImplicitArray aren't visible in the pg_type row either, + * so they're also passed separately. + * + * isDependentType is true if this is an implicit array or relation rowtype; + * that means it doesn't need its own dependencies on owner etc. + * * If rebuild is true, we remove existing dependencies and rebuild them * from scratch. This is needed for ALTER TYPE, and also when replacing * a shell type. We don't remove an existing extension dependency, though. @@ -509,23 +517,13 @@ TypeCreate(Oid newTypeOid, * that type will become a member of the extension.) */ void -GenerateTypeDependencies(Oid typeNamespace, - Oid typeObjectId, - Oid relationOid, /* only for relation rowtypes */ - char relationKind, /* ditto */ - Oid owner, - Oid inputProcedure, - Oid outputProcedure, - Oid receiveProcedure, - Oid sendProcedure, - Oid typmodinProcedure, - Oid typmodoutProcedure, - Oid analyzeProcedure, - Oid elementType, - bool isImplicitArray, - Oid baseType, - Oid typeCollation, +GenerateTypeDependencies(Oid typeObjectId, + Form_pg_type typeForm, Node *defaultExpr, + void *typacl, + char relationKind, /* only for relation rowtypes */ + bool isImplicitArray, + bool isDependentType, bool rebuild) { ObjectAddress myself, @@ -543,79 +541,80 @@ GenerateTypeDependencies(Oid typeNamespace, myself.objectSubId = 0; /* - * Make dependencies on namespace, owner, extension. + * Make dependencies on namespace, owner, ACL, extension. * - * For a relation rowtype (that's not a composite type), we should skip - * these because we'll depend on them indirectly through the pg_class - * entry. Likewise, skip for implicit arrays since we'll depend on them - * through the element type. + * Skip these for a dependent type, since it will have such dependencies + * indirectly through its depended-on type or relation. */ - if ((!OidIsValid(relationOid) || relationKind == RELKIND_COMPOSITE_TYPE) && - !isImplicitArray) + if (!isDependentType) { referenced.classId = NamespaceRelationId; - referenced.objectId = typeNamespace; + referenced.objectId = typeForm->typnamespace; referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); - recordDependencyOnOwner(TypeRelationId, typeObjectId, owner); + recordDependencyOnOwner(TypeRelationId, typeObjectId, + typeForm->typowner); + + recordDependencyOnNewAcl(TypeRelationId, typeObjectId, 0, + typeForm->typowner, typacl); recordDependencyOnCurrentExtension(&myself, rebuild); } /* Normal dependencies on the I/O functions */ - if (OidIsValid(inputProcedure)) + if (OidIsValid(typeForm->typinput)) { referenced.classId = ProcedureRelationId; - referenced.objectId = inputProcedure; + referenced.objectId = typeForm->typinput; referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); } - if (OidIsValid(outputProcedure)) + if (OidIsValid(typeForm->typoutput)) { referenced.classId = ProcedureRelationId; - referenced.objectId = outputProcedure; + referenced.objectId = typeForm->typoutput; referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); } - if (OidIsValid(receiveProcedure)) + if (OidIsValid(typeForm->typreceive)) { referenced.classId = ProcedureRelationId; - referenced.objectId = receiveProcedure; + referenced.objectId = typeForm->typreceive; referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); } - if (OidIsValid(sendProcedure)) + if (OidIsValid(typeForm->typsend)) { referenced.classId = ProcedureRelationId; - referenced.objectId = sendProcedure; + referenced.objectId = typeForm->typsend; referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); } - if (OidIsValid(typmodinProcedure)) + if (OidIsValid(typeForm->typmodin)) { referenced.classId = ProcedureRelationId; - referenced.objectId = typmodinProcedure; + referenced.objectId = typeForm->typmodin; referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); } - if (OidIsValid(typmodoutProcedure)) + if (OidIsValid(typeForm->typmodout)) { referenced.classId = ProcedureRelationId; - referenced.objectId = typmodoutProcedure; + referenced.objectId = typeForm->typmodout; referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); } - if (OidIsValid(analyzeProcedure)) + if (OidIsValid(typeForm->typanalyze)) { referenced.classId = ProcedureRelationId; - referenced.objectId = analyzeProcedure; + referenced.objectId = typeForm->typanalyze; referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); } @@ -629,10 +628,10 @@ GenerateTypeDependencies(Oid typeNamespace, * relation is, and not otherwise. And in the latter, of course we get the * opposite effect. */ - if (OidIsValid(relationOid)) + if (OidIsValid(typeForm->typrelid)) { referenced.classId = RelationRelationId; - referenced.objectId = relationOid; + referenced.objectId = typeForm->typrelid; referenced.objectSubId = 0; if (relationKind != RELKIND_COMPOSITE_TYPE) @@ -646,30 +645,31 @@ GenerateTypeDependencies(Oid typeNamespace, * dependent on the element type. Otherwise, if it has an element type, * the dependency is a normal one. */ - if (OidIsValid(elementType)) + if (OidIsValid(typeForm->typelem)) { referenced.classId = TypeRelationId; - referenced.objectId = elementType; + referenced.objectId = typeForm->typelem; referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, isImplicitArray ? DEPENDENCY_INTERNAL : DEPENDENCY_NORMAL); } /* Normal dependency from a domain to its base type. */ - if (OidIsValid(baseType)) + if (OidIsValid(typeForm->typbasetype)) { referenced.classId = TypeRelationId; - referenced.objectId = baseType; + referenced.objectId = typeForm->typbasetype; referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); } /* Normal dependency from a domain to its collation. */ /* We know the default collation is pinned, so don't bother recording it */ - if (OidIsValid(typeCollation) && typeCollation != DEFAULT_COLLATION_OID) + if (OidIsValid(typeForm->typcollation) && + typeForm->typcollation != DEFAULT_COLLATION_OID) { referenced.classId = CollationRelationId; - referenced.objectId = typeCollation; + referenced.objectId = typeForm->typcollation; referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); } @@ -821,9 +821,9 @@ makeArrayTypeName(const char *typeName, Oid typeNamespace) * determine the new type's own array type name; else the latter will * certainly pick the same name. * - * Returns TRUE if successfully moved the type, FALSE if not. + * Returns true if successfully moved the type, false if not. * - * We also return TRUE if the given type is a shell type. In this case + * We also return true if the given type is a shell type. In this case * the type has not been renamed out of the way, but nonetheless it can * be expected that TypeCreate will succeed. This behavior is convenient * for most callers --- those that need to distinguish the shell-type case diff --git a/src/backend/catalog/sql_features.txt b/src/backend/catalog/sql_features.txt index 8e746f36d4..aeb262a5b0 100644 --- a/src/backend/catalog/sql_features.txt +++ b/src/backend/catalog/sql_features.txt @@ -452,9 +452,9 @@ T301 Functional dependencies NO partially supported T312 OVERLAY function YES T321 Basic SQL-invoked routines NO T321 Basic SQL-invoked routines 01 User-defined functions with no overloading YES -T321 Basic SQL-invoked routines 02 User-defined stored procedures with no overloading NO +T321 Basic SQL-invoked routines 02 User-defined stored procedures with no overloading YES T321 Basic SQL-invoked routines 03 Function invocation YES -T321 Basic SQL-invoked routines 04 CALL statement NO +T321 Basic SQL-invoked routines 04 CALL statement YES T321 Basic SQL-invoked routines 05 RETURN statement NO T321 Basic SQL-invoked routines 06 ROUTINES view YES T321 Basic SQL-invoked routines 07 PARAMETERS view YES @@ -480,7 +480,7 @@ T495 Combined data change and retrieval NO different syntax T501 Enhanced EXISTS predicate YES T502 Period predicates NO T511 Transaction counts NO -T521 Named arguments in CALL statement NO +T521 Named arguments in CALL statement YES T522 Default values for IN parameters of SQL-invoked procedures NO supported except DEFAULT key word in invocation T551 Optional key words for default syntax YES T561 Holdable locators NO @@ -498,7 +498,7 @@ T616 Null treatment option for LEAD and LAG functions NO T617 FIRST_VALUE and LAST_VALUE function YES T618 NTH_VALUE function NO function exists, but some options missing T619 Nested window functions NO -T620 WINDOW clause: GROUPS option NO +T620 WINDOW clause: GROUPS option YES T621 Enhanced numeric functions YES T631 IN predicate with one list element YES T641 Multiple column assignment NO only some syntax variants supported diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c index 9a5fde00ca..5df4382b7e 100644 --- a/src/backend/catalog/storage.c +++ b/src/backend/catalog/storage.c @@ -3,7 +3,7 @@ * storage.c * code to create and destroy physical storage for relations * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -24,7 +24,6 @@ #include "access/xlog.h" #include "access/xloginsert.h" #include "access/xlogutils.h" -#include "catalog/catalog.h" #include "catalog/storage.h" #include "catalog/storage_xlog.h" #include "storage/freespace.h" diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index dc40cde424..715995dd88 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -1,7 +1,7 @@ /* * PostgreSQL System Views * - * Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/backend/catalog/system_views.sql * @@ -41,7 +41,7 @@ CREATE VIEW pg_shadow AS rolreplication AS userepl, rolbypassrls AS usebypassrls, rolpassword AS passwd, - rolvaliduntil::abstime AS valuntil, + rolvaliduntil AS valuntil, setconfig AS useconfig FROM pg_authid LEFT JOIN pg_db_role_setting s ON (pg_authid.oid = setrole AND setdatabase = 0) @@ -332,9 +332,11 @@ WHERE UNION ALL SELECT l.objoid, l.classoid, l.objsubid, - CASE WHEN pro.proisagg = true THEN 'aggregate'::text - WHEN pro.proisagg = false THEN 'function'::text - END AS objtype, + CASE pro.prokind + WHEN 'a' THEN 'aggregate'::text + WHEN 'f' THEN 'function'::text + WHEN 'p' THEN 'procedure'::text + WHEN 'w' THEN 'window'::text END AS objtype, pro.pronamespace AS objnamespace, CASE WHEN pg_function_is_visible(pro.oid) THEN quote_ident(pro.proname) @@ -750,6 +752,8 @@ CREATE VIEW pg_stat_wal_receiver AS s.latest_end_lsn, s.latest_end_time, s.slot_name, + s.sender_host, + s.sender_port, s.conninfo FROM pg_stat_get_wal_receiver() s WHERE s.pid IS NOT NULL; @@ -1023,6 +1027,11 @@ CREATE OR REPLACE FUNCTION pg_stop_backup ( RETURNS SETOF record STRICT VOLATILE LANGUAGE internal as 'pg_stop_backup_v2' PARALLEL RESTRICTED; +CREATE OR REPLACE FUNCTION + pg_promote(wait boolean DEFAULT true, wait_seconds integer DEFAULT 60) + RETURNS boolean STRICT VOLATILE LANGUAGE INTERNAL AS 'pg_promote' + PARALLEL SAFE; + -- legacy definition for compatibility with 9.3 CREATE OR REPLACE FUNCTION json_populate_record(base anyelement, from_json json, use_json_as_text boolean DEFAULT false) @@ -1077,7 +1086,7 @@ AS 'pg_create_physical_replication_slot'; CREATE OR REPLACE FUNCTION pg_create_logical_replication_slot( IN slot_name name, IN plugin name, IN temporary boolean DEFAULT false, - OUT slot_name text, OUT lsn pg_lsn) + OUT slot_name name, OUT lsn pg_lsn) RETURNS RECORD LANGUAGE INTERNAL STRICT VOLATILE @@ -1115,12 +1124,14 @@ LANGUAGE INTERNAL STRICT IMMUTABLE PARALLEL SAFE AS 'jsonb_insert'; +-- -- The default permissions for functions mean that anyone can execute them. -- A number of functions shouldn't be executable by just anyone, but rather -- than use explicit 'superuser()' checks in those functions, we use the GRANT -- system to REVOKE access to those functions at initdb time. Administrators -- can later change who can access these functions, or leave them as only -- available to superuser / cluster owner, if they choose. +-- REVOKE EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) FROM public; REVOKE EXECUTE ON FUNCTION pg_stop_backup() FROM public; REVOKE EXECUTE ON FUNCTION pg_stop_backup(boolean, boolean) FROM public; @@ -1132,16 +1143,45 @@ REVOKE EXECUTE ON FUNCTION pg_rotate_logfile() FROM public; REVOKE EXECUTE ON FUNCTION pg_reload_conf() FROM public; REVOKE EXECUTE ON FUNCTION pg_current_logfile() FROM public; REVOKE EXECUTE ON FUNCTION pg_current_logfile(text) FROM public; +REVOKE EXECUTE ON FUNCTION pg_promote(boolean, integer) FROM public; REVOKE EXECUTE ON FUNCTION pg_stat_reset() FROM public; REVOKE EXECUTE ON FUNCTION pg_stat_reset_shared(text) FROM public; REVOKE EXECUTE ON FUNCTION pg_stat_reset_single_table_counters(oid) FROM public; REVOKE EXECUTE ON FUNCTION pg_stat_reset_single_function_counters(oid) FROM public; +REVOKE EXECUTE ON FUNCTION lo_import(text) FROM public; +REVOKE EXECUTE ON FUNCTION lo_import(text, oid) FROM public; +REVOKE EXECUTE ON FUNCTION lo_export(oid, text) FROM public; + REVOKE EXECUTE ON FUNCTION pg_ls_logdir() FROM public; REVOKE EXECUTE ON FUNCTION pg_ls_waldir() FROM public; +REVOKE EXECUTE ON FUNCTION pg_ls_archive_statusdir() FROM public; +REVOKE EXECUTE ON FUNCTION pg_ls_tmpdir() FROM public; +REVOKE EXECUTE ON FUNCTION pg_ls_tmpdir(oid) FROM public; + +REVOKE EXECUTE ON FUNCTION pg_read_file(text) FROM public; +REVOKE EXECUTE ON FUNCTION pg_read_file(text,bigint,bigint) FROM public; +REVOKE EXECUTE ON FUNCTION pg_read_file(text,bigint,bigint,boolean) FROM public; + +REVOKE EXECUTE ON FUNCTION pg_read_binary_file(text) FROM public; +REVOKE EXECUTE ON FUNCTION pg_read_binary_file(text,bigint,bigint) FROM public; +REVOKE EXECUTE ON FUNCTION pg_read_binary_file(text,bigint,bigint,boolean) FROM public; + +REVOKE EXECUTE ON FUNCTION pg_stat_file(text) FROM public; +REVOKE EXECUTE ON FUNCTION pg_stat_file(text,boolean) FROM public; + +REVOKE EXECUTE ON FUNCTION pg_ls_dir(text) FROM public; +REVOKE EXECUTE ON FUNCTION pg_ls_dir(text,boolean,boolean) FROM public; + +-- +-- We also set up some things as accessible to standard roles. +-- GRANT EXECUTE ON FUNCTION pg_ls_logdir() TO pg_monitor; GRANT EXECUTE ON FUNCTION pg_ls_waldir() TO pg_monitor; +GRANT EXECUTE ON FUNCTION pg_ls_archive_statusdir() TO pg_monitor; +GRANT EXECUTE ON FUNCTION pg_ls_tmpdir() TO pg_monitor; +GRANT EXECUTE ON FUNCTION pg_ls_tmpdir(oid) TO pg_monitor; GRANT pg_read_all_settings TO pg_monitor; GRANT pg_read_all_stats TO pg_monitor; diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c index 29756eb14e..3baaa08238 100644 --- a/src/backend/catalog/toasting.c +++ b/src/backend/catalog/toasting.c @@ -4,7 +4,7 @@ * This file contains routines to support creation of toast tables * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -235,9 +235,9 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, * toast :-(. This is essential for chunk_data because type bytea is * toastable; hit the other two just to be sure. */ - tupdesc->attrs[0]->attstorage = 'p'; - tupdesc->attrs[1]->attstorage = 'p'; - tupdesc->attrs[2]->attstorage = 'p'; + TupleDescAttr(tupdesc, 0)->attstorage = 'p'; + TupleDescAttr(tupdesc, 1)->attstorage = 'p'; + TupleDescAttr(tupdesc, 2)->attstorage = 'p'; /* * Toast tables for regular relations go in pg_toast; those for temp @@ -279,6 +279,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, false, true, true, + InvalidOid, NULL); Assert(toast_relid != InvalidOid); @@ -302,8 +303,9 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, indexInfo = makeNode(IndexInfo); indexInfo->ii_NumIndexAttrs = 2; - indexInfo->ii_KeyAttrNumbers[0] = 1; - indexInfo->ii_KeyAttrNumbers[1] = 2; + indexInfo->ii_NumIndexKeyAttrs = 2; + indexInfo->ii_IndexAttrNumbers[0] = 1; + indexInfo->ii_IndexAttrNumbers[1] = 2; indexInfo->ii_Expressions = NIL; indexInfo->ii_ExpressionsState = NIL; indexInfo->ii_Predicate = NIL; @@ -315,6 +317,8 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, indexInfo->ii_ReadyForInserts = true; indexInfo->ii_Concurrent = false; indexInfo->ii_BrokenHotChain = false; + indexInfo->ii_ParallelWorkers = 0; + indexInfo->ii_Am = BTREE_AM_OID; indexInfo->ii_AmCache = NULL; indexInfo->ii_Context = CurrentMemoryContext; @@ -328,13 +332,13 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, coloptions[1] = 0; index_create(toast_rel, toast_idxname, toastIndexOid, InvalidOid, + InvalidOid, InvalidOid, indexInfo, list_make2("chunk_id", "chunk_seq"), BTREE_AM_OID, rel->rd_rel->reltablespace, collationObjectId, classObjectId, coloptions, (Datum) 0, - true, false, false, false, - true, false, false, true, false); + INDEX_CREATE_IS_PRIMARY, 0, true, true, NULL); heap_close(toast_rel, NoLock); @@ -394,6 +398,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, * (1) there are any toastable attributes, and (2) the maximum length * of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to * create a toast table for something like "f1 varchar(20)".) + * No need to create a TOAST table for partitioned tables. */ static bool needs_toast_table(Relation rel) @@ -402,33 +407,36 @@ needs_toast_table(Relation rel) bool maxlength_unknown = false; bool has_toastable_attrs = false; TupleDesc tupdesc; - Form_pg_attribute *att; int32 tuple_length; int i; + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + return false; + tupdesc = rel->rd_att; - att = tupdesc->attrs; for (i = 0; i < tupdesc->natts; i++) { - if (att[i]->attisdropped) + Form_pg_attribute att = TupleDescAttr(tupdesc, i); + + if (att->attisdropped) continue; - data_length = att_align_nominal(data_length, att[i]->attalign); - if (att[i]->attlen > 0) + data_length = att_align_nominal(data_length, att->attalign); + if (att->attlen > 0) { /* Fixed-length types are never toastable */ - data_length += att[i]->attlen; + data_length += att->attlen; } else { - int32 maxlen = type_maximum_size(att[i]->atttypid, - att[i]->atttypmod); + int32 maxlen = type_maximum_size(att->atttypid, + att->atttypmod); if (maxlen < 0) maxlength_unknown = true; else data_length += maxlen; - if (att[i]->attstorage != 'p') + if (att->attstorage != 'p') has_toastable_attrs = true; } } diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c index a63539ab21..877f658ce7 100644 --- a/src/backend/commands/aggregatecmds.c +++ b/src/backend/commands/aggregatecmds.c @@ -4,7 +4,7 @@ * * Routines for aggregate-manipulation commands * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -39,6 +39,9 @@ #include "utils/syscache.h" +static char extractModify(DefElem *defel); + + /* * DefineAggregate * @@ -67,6 +70,8 @@ DefineAggregate(ParseState *pstate, List *name, List *args, bool oldstyle, List List *mfinalfuncName = NIL; bool finalfuncExtraArgs = false; bool mfinalfuncExtraArgs = false; + char finalfuncModify = 0; + char mfinalfuncModify = 0; List *sortoperatorName = NIL; TypeName *baseType = NULL; TypeName *transType = NULL; @@ -97,7 +102,7 @@ DefineAggregate(ParseState *pstate, List *name, List *args, bool oldstyle, List /* Check we have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(aggNamespace, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(aggNamespace)); /* Deconstruct the output of the aggr_args grammar production */ @@ -121,33 +126,37 @@ DefineAggregate(ParseState *pstate, List *name, List *args, bool oldstyle, List * sfunc1, stype1, and initcond1 are accepted as obsolete spellings * for sfunc, stype, initcond. */ - if (pg_strcasecmp(defel->defname, "sfunc") == 0) + if (strcmp(defel->defname, "sfunc") == 0) transfuncName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "sfunc1") == 0) + else if (strcmp(defel->defname, "sfunc1") == 0) transfuncName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "finalfunc") == 0) + else if (strcmp(defel->defname, "finalfunc") == 0) finalfuncName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "combinefunc") == 0) + else if (strcmp(defel->defname, "combinefunc") == 0) combinefuncName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "serialfunc") == 0) + else if (strcmp(defel->defname, "serialfunc") == 0) serialfuncName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "deserialfunc") == 0) + else if (strcmp(defel->defname, "deserialfunc") == 0) deserialfuncName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "msfunc") == 0) + else if (strcmp(defel->defname, "msfunc") == 0) mtransfuncName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "minvfunc") == 0) + else if (strcmp(defel->defname, "minvfunc") == 0) minvtransfuncName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "mfinalfunc") == 0) + else if (strcmp(defel->defname, "mfinalfunc") == 0) mfinalfuncName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "finalfunc_extra") == 0) + else if (strcmp(defel->defname, "finalfunc_extra") == 0) finalfuncExtraArgs = defGetBoolean(defel); - else if (pg_strcasecmp(defel->defname, "mfinalfunc_extra") == 0) + else if (strcmp(defel->defname, "mfinalfunc_extra") == 0) mfinalfuncExtraArgs = defGetBoolean(defel); - else if (pg_strcasecmp(defel->defname, "sortop") == 0) + else if (strcmp(defel->defname, "finalfunc_modify") == 0) + finalfuncModify = extractModify(defel); + else if (strcmp(defel->defname, "mfinalfunc_modify") == 0) + mfinalfuncModify = extractModify(defel); + else if (strcmp(defel->defname, "sortop") == 0) sortoperatorName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "basetype") == 0) + else if (strcmp(defel->defname, "basetype") == 0) baseType = defGetTypeName(defel); - else if (pg_strcasecmp(defel->defname, "hypothetical") == 0) + else if (strcmp(defel->defname, "hypothetical") == 0) { if (defGetBoolean(defel)) { @@ -158,23 +167,23 @@ DefineAggregate(ParseState *pstate, List *name, List *args, bool oldstyle, List aggKind = AGGKIND_HYPOTHETICAL; } } - else if (pg_strcasecmp(defel->defname, "stype") == 0) + else if (strcmp(defel->defname, "stype") == 0) transType = defGetTypeName(defel); - else if (pg_strcasecmp(defel->defname, "stype1") == 0) + else if (strcmp(defel->defname, "stype1") == 0) transType = defGetTypeName(defel); - else if (pg_strcasecmp(defel->defname, "sspace") == 0) + else if (strcmp(defel->defname, "sspace") == 0) transSpace = defGetInt32(defel); - else if (pg_strcasecmp(defel->defname, "mstype") == 0) + else if (strcmp(defel->defname, "mstype") == 0) mtransType = defGetTypeName(defel); - else if (pg_strcasecmp(defel->defname, "msspace") == 0) + else if (strcmp(defel->defname, "msspace") == 0) mtransSpace = defGetInt32(defel); - else if (pg_strcasecmp(defel->defname, "initcond") == 0) + else if (strcmp(defel->defname, "initcond") == 0) initval = defGetString(defel); - else if (pg_strcasecmp(defel->defname, "initcond1") == 0) + else if (strcmp(defel->defname, "initcond1") == 0) initval = defGetString(defel); - else if (pg_strcasecmp(defel->defname, "minitcond") == 0) + else if (strcmp(defel->defname, "minitcond") == 0) minitval = defGetString(defel); - else if (pg_strcasecmp(defel->defname, "parallel") == 0) + else if (strcmp(defel->defname, "parallel") == 0) parallel = defGetString(defel); else ereport(WARNING, @@ -235,6 +244,15 @@ DefineAggregate(ParseState *pstate, List *name, List *args, bool oldstyle, List errmsg("aggregate minitcond must not be specified without mstype"))); } + /* + * Default values for modify flags can only be determined once we know the + * aggKind. + */ + if (finalfuncModify == 0) + finalfuncModify = (aggKind == AGGKIND_NORMAL) ? AGGMODIFY_READ_ONLY : AGGMODIFY_READ_WRITE; + if (mfinalfuncModify == 0) + mfinalfuncModify = (aggKind == AGGKIND_NORMAL) ? AGGMODIFY_READ_ONLY : AGGMODIFY_READ_WRITE; + /* * look up the aggregate's input datatype(s). */ @@ -288,7 +306,7 @@ DefineAggregate(ParseState *pstate, List *name, List *args, bool oldstyle, List interpret_function_parameter_list(pstate, args, InvalidOid, - true, /* is an aggregate */ + OBJECT_AGGREGATE, ¶meterTypes, &allParameterTypes, ¶meterModes, @@ -401,11 +419,11 @@ DefineAggregate(ParseState *pstate, List *name, List *args, bool oldstyle, List if (parallel) { - if (pg_strcasecmp(parallel, "safe") == 0) + if (strcmp(parallel, "safe") == 0) proparallel = PROPARALLEL_SAFE; - else if (pg_strcasecmp(parallel, "restricted") == 0) + else if (strcmp(parallel, "restricted") == 0) proparallel = PROPARALLEL_RESTRICTED; - else if (pg_strcasecmp(parallel, "unsafe") == 0) + else if (strcmp(parallel, "unsafe") == 0) proparallel = PROPARALLEL_UNSAFE; else ereport(ERROR, @@ -437,6 +455,8 @@ DefineAggregate(ParseState *pstate, List *name, List *args, bool oldstyle, List mfinalfuncName, /* final function name */ finalfuncExtraArgs, mfinalfuncExtraArgs, + finalfuncModify, + mfinalfuncModify, sortoperatorName, /* sort operator name */ transTypeId, /* transition data type */ transSpace, /* transition space */ @@ -446,3 +466,24 @@ DefineAggregate(ParseState *pstate, List *name, List *args, bool oldstyle, List minitval, /* initial condition */ proparallel); /* parallel safe? */ } + +/* + * Convert the string form of [m]finalfunc_modify to the catalog representation + */ +static char +extractModify(DefElem *defel) +{ + char *val = defGetString(defel); + + if (strcmp(val, "read_only") == 0) + return AGGMODIFY_READ_ONLY; + if (strcmp(val, "shareable") == 0) + return AGGMODIFY_SHAREABLE; + if (strcmp(val, "read_write") == 0) + return AGGMODIFY_READ_WRITE; + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("parameter \"%s\" must be READ_ONLY, SHAREABLE, or READ_WRITE", + defel->defname))); + return 0; /* keep compiler quiet */ +} diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 4f8147907c..eff325cc7d 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -3,7 +3,7 @@ * alter.c * Drivers for generic alter commands * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -171,7 +171,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name) AttrNumber Anum_name = get_object_attnum_name(classId); AttrNumber Anum_namespace = get_object_attnum_namespace(classId); AttrNumber Anum_owner = get_object_attnum_owner(classId); - AclObjectKind acl_kind = get_object_aclkind(classId); + ObjectType objtype = get_object_type(classId, objectId); HeapTuple oldtup; HeapTuple newtup; Datum datum; @@ -223,7 +223,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name) ownerId = DatumGetObjectId(datum); if (!has_privs_of_role(GetUserId(), DatumGetObjectId(ownerId))) - aclcheck_error(ACLCHECK_NOT_OWNER, acl_kind, old_name); + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, old_name); /* User must have CREATE privilege on the namespace */ if (OidIsValid(namespaceId)) @@ -231,7 +231,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name) aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); } } @@ -378,6 +378,8 @@ ExecRenameStmt(RenameStmt *stmt) case OBJECT_OPCLASS: case OBJECT_OPFAMILY: case OBJECT_LANGUAGE: + case OBJECT_PROCEDURE: + case OBJECT_ROUTINE: case OBJECT_STATISTIC_EXT: case OBJECT_TSCONFIGURATION: case OBJECT_TSDICTIONARY: @@ -495,6 +497,8 @@ ExecAlterObjectSchemaStmt(AlterObjectSchemaStmt *stmt, case OBJECT_OPERATOR: case OBJECT_OPCLASS: case OBJECT_OPFAMILY: + case OBJECT_PROCEDURE: + case OBJECT_ROUTINE: case OBJECT_STATISTIC_EXT: case OBJECT_TSCONFIGURATION: case OBJECT_TSDICTIONARY: @@ -659,7 +663,7 @@ AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid) AttrNumber Anum_name = get_object_attnum_name(classId); AttrNumber Anum_namespace = get_object_attnum_namespace(classId); AttrNumber Anum_owner = get_object_attnum_owner(classId); - AclObjectKind acl_kind = get_object_aclkind(classId); + ObjectType objtype = get_object_type(classId, objid); Oid oldNspOid; Datum name, namespace; @@ -715,13 +719,13 @@ AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid) ownerId = DatumGetObjectId(owner); if (!has_privs_of_role(GetUserId(), ownerId)) - aclcheck_error(ACLCHECK_NOT_OWNER, acl_kind, + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, NameStr(*(DatumGetName(name)))); /* User must have CREATE privilege on new namespace */ aclresult = pg_namespace_aclcheck(nspOid, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(nspOid)); } @@ -842,6 +846,8 @@ ExecAlterOwnerStmt(AlterOwnerStmt *stmt) case OBJECT_OPERATOR: case OBJECT_OPCLASS: case OBJECT_OPFAMILY: + case OBJECT_PROCEDURE: + case OBJECT_ROUTINE: case OBJECT_STATISTIC_EXT: case OBJECT_TABLESPACE: case OBJECT_TSDICTIONARY: @@ -936,7 +942,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId) /* Superusers can bypass permission checks */ if (!superuser()) { - AclObjectKind aclkind = get_object_aclkind(classId); + ObjectType objtype = get_object_type(classId, objectId); /* must be owner */ if (!has_privs_of_role(GetUserId(), old_ownerId)) @@ -957,7 +963,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId) HeapTupleGetOid(oldtup)); objname = namebuf; } - aclcheck_error(ACLCHECK_NOT_OWNER, aclkind, objname); + aclcheck_error(ACLCHECK_NOT_OWNER, objtype, objname); } /* Must be able to become new owner */ check_is_member_of_role(GetUserId(), new_ownerId); @@ -970,7 +976,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId) aclresult = pg_namespace_aclcheck(namespaceId, new_ownerId, ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); } } diff --git a/src/backend/commands/amcmds.c b/src/backend/commands/amcmds.c index 7e0a9aa0fd..f2173450ad 100644 --- a/src/backend/commands/amcmds.c +++ b/src/backend/commands/amcmds.c @@ -3,7 +3,7 @@ * amcmds.c * Routines for SQL commands that manipulate access methods. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 2b638271b3..8ac868ad73 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -3,7 +3,7 @@ * analyze.c * the Postgres statistics generator * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -27,7 +27,7 @@ #include "catalog/index.h" #include "catalog/indexing.h" #include "catalog/pg_collation.h" -#include "catalog/pg_inherits_fn.h" +#include "catalog/pg_inherits.h" #include "catalog/pg_namespace.h" #include "catalog/pg_statistic_ext.h" #include "commands/dbcommands.h" @@ -106,6 +106,10 @@ static Datum ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull); /* * analyze_rel() -- analyze one relation + * + * relid identifies the relation to analyze. If relation is supplied, use + * the name therein for reporting any failure to open/lock the rel; do not + * use it once we've successfully opened the rel, since it might be stale. */ void analyze_rel(Oid relid, RangeVar *relation, int options, @@ -137,45 +141,29 @@ analyze_rel(Oid relid, RangeVar *relation, int options, * concurrent VACUUM, which doesn't matter much at the moment but might * matter if we ever try to accumulate stats on dead tuples.) If the rel * has been dropped since we last saw it, we don't need to process it. + * + * Make sure to generate only logs for ANALYZE in this case. */ - if (!(options & VACOPT_NOWAIT)) - onerel = try_relation_open(relid, ShareUpdateExclusiveLock); - else if (ConditionalLockRelationOid(relid, ShareUpdateExclusiveLock)) - onerel = try_relation_open(relid, NoLock); - else - { - onerel = NULL; - if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0) - ereport(LOG, - (errcode(ERRCODE_LOCK_NOT_AVAILABLE), - errmsg("skipping analyze of \"%s\" --- lock not available", - relation->relname))); - } + onerel = vacuum_open_relation(relid, relation, params, + options & ~(VACOPT_VACUUM), + ShareUpdateExclusiveLock); + + /* leave if relation could not be opened or locked */ if (!onerel) return; /* - * Check permissions --- this should match vacuum's check! + * Check if relation needs to be skipped based on ownership. This check + * happens also when building the relation list to analyze for a manual + * operation, and needs to be done additionally here as ANALYZE could + * happen across multiple transactions where relation ownership could have + * changed in-between. Make sure to generate only logs for ANALYZE in + * this case. */ - if (!(pg_class_ownercheck(RelationGetRelid(onerel), GetUserId()) || - (pg_database_ownercheck(MyDatabaseId, GetUserId()) && !onerel->rd_rel->relisshared))) + if (!vacuum_is_relation_owner(RelationGetRelid(onerel), + onerel->rd_rel, + options & VACOPT_ANALYZE)) { - /* No need for a WARNING if we already complained during VACUUM */ - if (!(options & VACOPT_VACUUM)) - { - if (onerel->rd_rel->relisshared) - ereport(WARNING, - (errmsg("skipping \"%s\" --- only superuser can analyze it", - RelationGetRelationName(onerel)))); - else if (onerel->rd_rel->relnamespace == PG_CATALOG_NAMESPACE) - ereport(WARNING, - (errmsg("skipping \"%s\" --- only superuser or database owner can analyze it", - RelationGetRelationName(onerel)))); - else - ereport(WARNING, - (errmsg("skipping \"%s\" --- only table or database owner can analyze it", - RelationGetRelationName(onerel)))); - } relation_close(onerel, ShareUpdateExclusiveLock); return; } @@ -202,9 +190,7 @@ analyze_rel(Oid relid, RangeVar *relation, int options, } /* - * Check that it's a plain table, materialized view, or foreign table; we - * used to do this in get_rel_oids() but seems safer to check after we've - * locked the relation. + * Check that it's of an analyzable relkind, and set up appropriately. */ if (onerel->rd_rel->relkind == RELKIND_RELATION || onerel->rd_rel->relkind == RELKIND_MATVIEW) @@ -370,10 +356,14 @@ do_analyze_rel(Relation onerel, int options, VacuumParams *params, /* * Determine which columns to analyze * - * Note that system attributes are never analyzed. + * Note that system attributes are never analyzed, so we just reject them + * at the lookup stage. We also reject duplicate column mentions. (We + * could alternatively ignore duplicates, but analyzing a column twice + * won't work; we'd end up making a conflicting update in pg_statistic.) */ if (va_cols != NIL) { + Bitmapset *unique_cols = NULL; ListCell *le; vacattrstats = (VacAttrStats **) palloc(list_length(va_cols) * @@ -389,6 +379,13 @@ do_analyze_rel(Relation onerel, int options, VacuumParams *params, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("column \"%s\" of relation \"%s\" does not exist", col, RelationGetRelationName(onerel)))); + if (bms_is_member(i, unique_cols)) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_COLUMN), + errmsg("column \"%s\" of relation \"%s\" appears more than once", + col, RelationGetRelationName(onerel)))); + unique_cols = bms_add_member(unique_cols, i); + vacattrstats[tcnt] = examine_attribute(onerel, i, NULL); if (vacattrstats[tcnt] != NULL) tcnt++; @@ -445,7 +442,7 @@ do_analyze_rel(Relation onerel, int options, VacuumParams *params, tcnt = 0; for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++) { - int keycol = indexInfo->ii_KeyAttrNumbers[i]; + int keycol = indexInfo->ii_IndexAttrNumbers[i]; if (keycol == 0) { @@ -526,10 +523,10 @@ do_analyze_rel(Relation onerel, int options, VacuumParams *params, stats->rows = rows; stats->tupDesc = onerel->rd_att; - (*stats->compute_stats) (stats, - std_fetch_func, - numrows, - totalrows); + stats->compute_stats(stats, + std_fetch_func, + numrows, + totalrows); /* * If the appropriate flavor of the n_distinct option is @@ -759,7 +756,7 @@ compute_index_stats(Relation onerel, double totalrows, ResetExprContext(econtext); /* Set up for predicate or expression evaluation */ - ExecStoreTuple(heapTuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(heapTuple, slot, false); /* If index is partial, check predicate */ if (predicate != NULL) @@ -830,10 +827,10 @@ compute_index_stats(Relation onerel, double totalrows, stats->exprvals = exprvals + i; stats->exprnulls = exprnulls + i; stats->rowstride = attr_cnt; - (*stats->compute_stats) (stats, - ind_fetch_func, - numindexrows, - totalindexrows); + stats->compute_stats(stats, + ind_fetch_func, + numindexrows, + totalindexrows); /* * If the n_distinct option is specified, it overrides the @@ -871,7 +868,7 @@ compute_index_stats(Relation onerel, double totalrows, static VacAttrStats * examine_attribute(Relation onerel, int attnum, Node *index_expr) { - Form_pg_attribute attr = onerel->rd_att->attrs[attnum - 1]; + Form_pg_attribute attr = TupleDescAttr(onerel->rd_att, attnum - 1); HeapTuple typtuple; VacAttrStats *stats; int i; @@ -1199,19 +1196,22 @@ acquire_sample_rows(Relation onerel, int elevel, qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows); /* - * Estimate total numbers of rows in relation. For live rows, use - * vac_estimate_reltuples; for dead rows, we have no source of old - * information, so we have to assume the density is the same in unseen - * pages as in the pages we scanned. + * Estimate total numbers of live and dead rows in relation, extrapolating + * on the assumption that the average tuple density in pages we didn't + * scan is the same as in the pages we did scan. Since what we scanned is + * a random sample of the pages in the relation, this should be a good + * assumption. */ - *totalrows = vac_estimate_reltuples(onerel, true, - totalblocks, - bs.m, - liverows); if (bs.m > 0) + { + *totalrows = floor((liverows / bs.m) * totalblocks + 0.5); *totaldeadrows = floor((deadrows / bs.m) * totalblocks + 0.5); + } else + { + *totalrows = 0.0; *totaldeadrows = 0.0; + } /* * Emit some interesting relation info @@ -1452,7 +1452,7 @@ acquire_inherited_sample_rows(Relation onerel, int elevel, { HeapTuple newtup; - newtup = do_convert_tuple(rows[numrows + j], map); + newtup = execute_attr_map_tuple(rows[numrows + j], map); heap_freetuple(rows[numrows + j]); rows[numrows + j] = newtup; } @@ -1716,6 +1716,12 @@ static void compute_scalar_stats(VacAttrStatsP stats, double totalrows); static int compare_scalars(const void *a, const void *b, void *arg); static int compare_mcvs(const void *a, const void *b); +static int analyze_mcv_list(int *mcv_counts, + int num_mcv, + double stadistinct, + double stanullfrac, + int samplerows, + double totalrows); /* @@ -2134,9 +2140,7 @@ compute_distinct_stats(VacAttrStatsP stats, * we are able to generate a complete MCV list (all the values in the * sample will fit, and we think these are all the ones in the table), * then do so. Otherwise, store only those values that are - * significantly more common than the (estimated) average. We set the - * threshold rather arbitrarily at 25% more than average, with at - * least 2 instances in the sample. + * significantly more common than the values not in the list. * * Note: the first of these cases is meant to address columns with * small, fixed sets of possible values, such as boolean or enum @@ -2145,8 +2149,7 @@ compute_distinct_stats(VacAttrStatsP stats, * so and thus provide the planner with complete information. But if * the MCV list is not complete, it's generally worth being more * selective, and not just filling it all the way up to the stats - * target. So for an incomplete list, we try to take only MCVs that - * are significantly more common than average. + * target. */ if (track_cnt < track_max && toowide_cnt == 0 && stats->stadistinct > 0 && @@ -2157,28 +2160,22 @@ compute_distinct_stats(VacAttrStatsP stats, } else { - double ndistinct_table = stats->stadistinct; - double avgcount, - mincount; - - /* Re-extract estimate of # distinct nonnull values in table */ - if (ndistinct_table < 0) - ndistinct_table = -ndistinct_table * totalrows; - /* estimate # occurrences in sample of a typical nonnull value */ - avgcount = (double) nonnull_cnt / ndistinct_table; - /* set minimum threshold count to store a value */ - mincount = avgcount * 1.25; - if (mincount < 2) - mincount = 2; + int *mcv_counts; + + /* Incomplete list; decide how many values are worth keeping */ if (num_mcv > track_cnt) num_mcv = track_cnt; - for (i = 0; i < num_mcv; i++) + + if (num_mcv > 0) { - if (track[i].count < mincount) - { - num_mcv = i; - break; - } + mcv_counts = (int *) palloc(num_mcv * sizeof(int)); + for (i = 0; i < num_mcv; i++) + mcv_counts[i] = track[i].count; + + num_mcv = analyze_mcv_list(mcv_counts, num_mcv, + stats->stadistinct, + stats->stanullfrac, + samplerows, totalrows); } } @@ -2508,14 +2505,7 @@ compute_scalar_stats(VacAttrStatsP stats, * we are able to generate a complete MCV list (all the values in the * sample will fit, and we think these are all the ones in the table), * then do so. Otherwise, store only those values that are - * significantly more common than the (estimated) average. We set the - * threshold rather arbitrarily at 25% more than average, with at - * least 2 instances in the sample. Also, we won't suppress values - * that have a frequency of at least 1/K where K is the intended - * number of histogram bins; such values might otherwise cause us to - * emit duplicate histogram bin boundaries. (We might end up with - * duplicate histogram entries anyway, if the distribution is skewed; - * but we prefer to treat such values as MCVs if at all possible.) + * significantly more common than the values not in the list. * * Note: the first of these cases is meant to address columns with * small, fixed sets of possible values, such as boolean or enum @@ -2524,8 +2514,7 @@ compute_scalar_stats(VacAttrStatsP stats, * so and thus provide the planner with complete information. But if * the MCV list is not complete, it's generally worth being more * selective, and not just filling it all the way up to the stats - * target. So for an incomplete list, we try to take only MCVs that - * are significantly more common than average. + * target. */ if (track_cnt == ndistinct && toowide_cnt == 0 && stats->stadistinct > 0 && @@ -2536,33 +2525,22 @@ compute_scalar_stats(VacAttrStatsP stats, } else { - double ndistinct_table = stats->stadistinct; - double avgcount, - mincount, - maxmincount; - - /* Re-extract estimate of # distinct nonnull values in table */ - if (ndistinct_table < 0) - ndistinct_table = -ndistinct_table * totalrows; - /* estimate # occurrences in sample of a typical nonnull value */ - avgcount = (double) nonnull_cnt / ndistinct_table; - /* set minimum threshold count to store a value */ - mincount = avgcount * 1.25; - if (mincount < 2) - mincount = 2; - /* don't let threshold exceed 1/K, however */ - maxmincount = (double) values_cnt / (double) num_bins; - if (mincount > maxmincount) - mincount = maxmincount; + int *mcv_counts; + + /* Incomplete list; decide how many values are worth keeping */ if (num_mcv > track_cnt) num_mcv = track_cnt; - for (i = 0; i < num_mcv; i++) + + if (num_mcv > 0) { - if (track[i].count < mincount) - { - num_mcv = i; - break; - } + mcv_counts = (int *) palloc(num_mcv * sizeof(int)); + for (i = 0; i < num_mcv; i++) + mcv_counts[i] = track[i].count; + + num_mcv = analyze_mcv_list(mcv_counts, num_mcv, + stats->stadistinct, + stats->stanullfrac, + samplerows, totalrows); } } @@ -2828,3 +2806,125 @@ compare_mcvs(const void *a, const void *b) return da - db; } + +/* + * Analyze the list of common values in the sample and decide how many are + * worth storing in the table's MCV list. + * + * mcv_counts is assumed to be a list of the counts of the most common values + * seen in the sample, starting with the most common. The return value is the + * number that are significantly more common than the values not in the list, + * and which are therefore deemed worth storing in the table's MCV list. + */ +static int +analyze_mcv_list(int *mcv_counts, + int num_mcv, + double stadistinct, + double stanullfrac, + int samplerows, + double totalrows) +{ + double ndistinct_table; + double sumcount; + int i; + + /* + * If the entire table was sampled, keep the whole list. This also + * protects us against division by zero in the code below. + */ + if (samplerows == totalrows || totalrows <= 1.0) + return num_mcv; + + /* Re-extract the estimated number of distinct nonnull values in table */ + ndistinct_table = stadistinct; + if (ndistinct_table < 0) + ndistinct_table = -ndistinct_table * totalrows; + + /* + * Exclude the least common values from the MCV list, if they are not + * significantly more common than the estimated selectivity they would + * have if they weren't in the list. All non-MCV values are assumed to be + * equally common, after taking into account the frequencies of all the + * values in the MCV list and the number of nulls (c.f. eqsel()). + * + * Here sumcount tracks the total count of all but the last (least common) + * value in the MCV list, allowing us to determine the effect of excluding + * that value from the list. + * + * Note that we deliberately do this by removing values from the full + * list, rather than starting with an empty list and adding values, + * because the latter approach can fail to add any values if all the most + * common values have around the same frequency and make up the majority + * of the table, so that the overall average frequency of all values is + * roughly the same as that of the common values. This would lead to any + * uncommon values being significantly overestimated. + */ + sumcount = 0.0; + for (i = 0; i < num_mcv - 1; i++) + sumcount += mcv_counts[i]; + + while (num_mcv > 0) + { + double selec, + otherdistinct, + N, + n, + K, + variance, + stddev; + + /* + * Estimated selectivity the least common value would have if it + * wasn't in the MCV list (c.f. eqsel()). + */ + selec = 1.0 - sumcount / samplerows - stanullfrac; + if (selec < 0.0) + selec = 0.0; + if (selec > 1.0) + selec = 1.0; + otherdistinct = ndistinct_table - (num_mcv - 1); + if (otherdistinct > 1) + selec /= otherdistinct; + + /* + * If the value is kept in the MCV list, its population frequency is + * assumed to equal its sample frequency. We use the lower end of a + * textbook continuity-corrected Wald-type confidence interval to + * determine if that is significantly more common than the non-MCV + * frequency --- specifically we assume the population frequency is + * highly likely to be within around 2 standard errors of the sample + * frequency, which equates to an interval of 2 standard deviations + * either side of the sample count, plus an additional 0.5 for the + * continuity correction. Since we are sampling without replacement, + * this is a hypergeometric distribution. + * + * XXX: Empirically, this approach seems to work quite well, but it + * may be worth considering more advanced techniques for estimating + * the confidence interval of the hypergeometric distribution. + */ + N = totalrows; + n = samplerows; + K = N * mcv_counts[num_mcv - 1] / n; + variance = n * K * (N - K) * (N - n) / (N * N * (N - 1)); + stddev = sqrt(variance); + + if (mcv_counts[num_mcv - 1] > selec * samplerows + 2 * stddev + 0.5) + { + /* + * The value is significantly more common than the non-MCV + * selectivity would suggest. Keep it, and all the other more + * common values in the list. + */ + break; + } + else + { + /* Discard this value and consider the next least common value */ + num_mcv--; + if (num_mcv == 0) + break; + sumcount -= mcv_counts[num_mcv - 1]; + } + } + return num_mcv; +} diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c index bacc08eb84..ee7c6d41b4 100644 --- a/src/backend/commands/async.c +++ b/src/backend/commands/async.c @@ -3,7 +3,7 @@ * async.c * Asynchronous notification: NOTIFY, LISTEN, UNLISTEN * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -137,7 +137,9 @@ #include "utils/builtins.h" #include "utils/memutils.h" #include "utils/ps_status.h" +#include "utils/snapmgr.h" #include "utils/timestamp.h" +#include "utils/tqual.h" /* @@ -387,7 +389,8 @@ static bool SignalBackends(void); static void asyncQueueReadAllNotifications(void); static bool asyncQueueProcessPageEntries(volatile QueuePosition *current, QueuePosition stop, - char *page_buffer); + char *page_buffer, + Snapshot snapshot); static void asyncQueueAdvanceTail(void); static void ProcessIncomingNotify(void); static bool AsyncExistsPendingNotify(const char *channel, const char *payload); @@ -798,7 +801,7 @@ PreCommit_Notify(void) } } - /* Queue any pending notifies */ + /* Queue any pending notifies (must happen after the above) */ if (pendingNotifies) { ListCell *nextNotify; @@ -987,7 +990,9 @@ Exec_ListenPreCommit(void) * have already committed before we started to LISTEN. * * Note that we are not yet listening on anything, so we won't deliver any - * notification to the frontend. + * notification to the frontend. Also, although our transaction might + * have executed NOTIFY, those message(s) aren't queued yet so we can't + * see them in the queue. * * This will also advance the global tail pointer if possible. */ @@ -1744,6 +1749,7 @@ asyncQueueReadAllNotifications(void) volatile QueuePosition pos; QueuePosition oldpos; QueuePosition head; + Snapshot snapshot; bool advanceTail; /* page_buffer must be adequately aligned, so use a union */ @@ -1767,6 +1773,9 @@ asyncQueueReadAllNotifications(void) return; } + /* Get snapshot we'll use to decide which xacts are still in progress */ + snapshot = RegisterSnapshot(GetLatestSnapshot()); + /*---------- * Note that we deliver everything that we see in the queue and that * matches our _current_ listening state. @@ -1854,7 +1863,8 @@ asyncQueueReadAllNotifications(void) * while sending the notifications to the frontend. */ reachedStop = asyncQueueProcessPageEntries(&pos, head, - page_buffer.buf); + page_buffer.buf, + snapshot); } while (!reachedStop); } PG_CATCH(); @@ -1882,6 +1892,9 @@ asyncQueueReadAllNotifications(void) /* If we were the laziest backend, try to advance the tail pointer */ if (advanceTail) asyncQueueAdvanceTail(); + + /* Done with snapshot */ + UnregisterSnapshot(snapshot); } /* @@ -1903,7 +1916,8 @@ asyncQueueReadAllNotifications(void) static bool asyncQueueProcessPageEntries(volatile QueuePosition *current, QueuePosition stop, - char *page_buffer) + char *page_buffer, + Snapshot snapshot) { bool reachedStop = false; bool reachedEndOfPage; @@ -1928,7 +1942,7 @@ asyncQueueProcessPageEntries(volatile QueuePosition *current, /* Ignore messages destined for other databases */ if (qe->dboid == MyDatabaseId) { - if (TransactionIdIsInProgress(qe->xid)) + if (XidInMVCCSnapshot(qe->xid, snapshot)) { /* * The source transaction is still in progress, so we can't @@ -1939,10 +1953,15 @@ asyncQueueProcessPageEntries(volatile QueuePosition *current, * this advance-then-back-up behavior when dealing with an * uncommitted message.) * - * Note that we must test TransactionIdIsInProgress before we - * test TransactionIdDidCommit, else we might return a message - * from a transaction that is not yet visible to snapshots; - * compare the comments at the head of tqual.c. + * Note that we must test XidInMVCCSnapshot before we test + * TransactionIdDidCommit, else we might return a message from + * a transaction that is not yet visible to snapshots; compare + * the comments at the head of tqual.c. + * + * Also, while our own xact won't be listed in the snapshot, + * we need not check for TransactionIdIsCurrentTransactionId + * because our transaction cannot (yet) have queued any + * messages. */ *current = thisentry; reachedStop = true; @@ -2081,7 +2100,7 @@ NotifyMyFrontEnd(const char *channel, const char *payload, int32 srcPid) StringInfoData buf; pq_beginmessage(&buf, 'A'); - pq_sendint(&buf, srcPid, sizeof(int32)); + pq_sendint32(&buf, srcPid); pq_sendstring(&buf, channel); if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 3) pq_sendstring(&buf, payload); diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index f51f8b9492..68be470977 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -6,7 +6,7 @@ * There is hardly anything left of Paul Brown's original implementation... * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994-5, Regents of the University of California * * @@ -115,7 +115,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel) /* Find, lock, and check permissions on the table */ tableOid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock, - false, false, + 0, RangeVarCallbackOwnsTable, NULL); rel = heap_open(tableOid, NoLock); @@ -128,6 +128,14 @@ cluster(ClusterStmt *stmt, bool isTopLevel) (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot cluster temporary tables of other sessions"))); + /* + * Reject clustering a partitioned table. + */ + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot cluster a partitioned table"))); + if (stmt->indexname == NULL) { ListCell *index; @@ -178,7 +186,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel) heap_close(rel, NoLock); /* Do the job. */ - cluster_rel(tableOid, indexOid, false, stmt->verbose); + cluster_rel(tableOid, indexOid, stmt->options); } else { @@ -194,7 +202,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel) * We cannot run this form of CLUSTER inside a user transaction block; * we'd be holding locks way too long. */ - PreventTransactionChain(isTopLevel, "CLUSTER"); + PreventInTransactionBlock(isTopLevel, "CLUSTER"); /* * Create special memory context for cross-transaction storage. @@ -226,7 +234,8 @@ cluster(ClusterStmt *stmt, bool isTopLevel) /* functions in indexes may want a snapshot set */ PushActiveSnapshot(GetTransactionSnapshot()); /* Do the job. */ - cluster_rel(rvtc->tableOid, rvtc->indexOid, true, stmt->verbose); + cluster_rel(rvtc->tableOid, rvtc->indexOid, + stmt->options | CLUOPT_RECHECK); PopActiveSnapshot(); CommitTransactionCommand(); } @@ -257,9 +266,11 @@ cluster(ClusterStmt *stmt, bool isTopLevel) * and error messages should refer to the operation as VACUUM not CLUSTER. */ void -cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose) +cluster_rel(Oid tableOid, Oid indexOid, int options) { Relation OldHeap; + bool verbose = ((options & CLUOPT_VERBOSE) != 0); + bool recheck = ((options & CLUOPT_RECHECK) != 0); /* Check for user-requested abort. */ CHECK_FOR_INTERRUPTS(); @@ -445,7 +456,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck, LOCKMOD * seqscan pass over the table to copy the missing rows, but that seems * expensive and tedious. */ - if (!heap_attisnull(OldIndex->rd_indextuple, Anum_pg_index_indpred)) + if (!heap_attisnull(OldIndex->rd_indextuple, Anum_pg_index_indpred, NULL)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot cluster on partial index \"%s\"", @@ -482,6 +493,12 @@ mark_index_clustered(Relation rel, Oid indexOid, bool is_internal) Relation pg_index; ListCell *index; + /* Disallow applying to a partitioned table */ + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot mark index clustered in partitioned table"))); + /* * If the index is already marked clustered, no need to do anything. */ @@ -678,6 +695,7 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace, char relpersistence, false, true, true, + OIDOldHeap, NULL); Assert(OIDNewHeap != InvalidOid); @@ -738,6 +756,9 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, Relation NewHeap, OldHeap, OldIndex; + Relation relRelation; + HeapTuple reltup; + Form_pg_class relform; TupleDesc oldTupDesc; TupleDesc newTupDesc; int natts; @@ -756,6 +777,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, double num_tuples = 0, tups_vacuumed = 0, tups_recently_dead = 0; + BlockNumber num_pages; int elevel = verbose ? INFO : DEBUG2; PGRUsage ru0; @@ -891,7 +913,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, /* Set up sorting if wanted */ if (use_sort) tuplesort = tuplesort_begin_cluster(oldTupDesc, OldIndex, - maintenance_work_mem, false); + maintenance_work_mem, + NULL, false); else tuplesort = NULL; @@ -1079,6 +1102,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, /* Reset rd_toastoid just to be tidy --- it shouldn't be looked at again */ NewHeap->rd_toastoid = InvalidOid; + num_pages = RelationGetNumberOfBlocks(NewHeap); + /* Log what we did */ ereport(elevel, (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u pages", @@ -1098,6 +1123,30 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, index_close(OldIndex, NoLock); heap_close(OldHeap, NoLock); heap_close(NewHeap, NoLock); + + /* Update pg_class to reflect the correct values of pages and tuples. */ + relRelation = heap_open(RelationRelationId, RowExclusiveLock); + + reltup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(OIDNewHeap)); + if (!HeapTupleIsValid(reltup)) + elog(ERROR, "cache lookup failed for relation %u", OIDNewHeap); + relform = (Form_pg_class) GETSTRUCT(reltup); + + relform->relpages = num_pages; + relform->reltuples = num_tuples; + + /* Don't update the stats for pg_class. See swap_relation_files. */ + if (OIDOldHeap != RelationRelationId) + CatalogTupleUpdate(relRelation, &reltup->t_self, reltup); + else + CacheInvalidateRelcacheByTuple(reltup); + + /* Clean up. */ + heap_freetuple(reltup); + heap_close(relRelation, RowExclusiveLock); + + /* Make the update visible */ + CommandCounterIncrement(); } /* @@ -1493,8 +1542,8 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap, frozenXid, cutoffMulti, mapped_tables); /* - * If it's a system catalog, queue an sinval message to flush all - * catcaches on the catalog when we reach CommandCounterIncrement. + * If it's a system catalog, queue a sinval message to flush all catcaches + * on the catalog when we reach CommandCounterIncrement. */ if (is_system_catalog) CacheInvalidateCatalog(OIDOldHeap); @@ -1535,7 +1584,7 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap, * swap_relation_files()), thus relfrozenxid was not updated. That's * annoying because a potential reason for doing a VACUUM FULL is a * imminent or actual anti-wraparound shutdown. So, now that we can - * access the new relation using it's indices, update relfrozenxid. + * access the new relation using its indices, update relfrozenxid. * pg_class doesn't have a toast relation, so we don't need to update the * corresponding toast relation. Not that there's little point moving all * relfrozenxid updates here since swap_relation_files() needs to write to @@ -1623,6 +1672,16 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap, } relation_close(newrel, NoLock); } + + /* if it's not a catalog table, clear any missing attribute settings */ + if (!is_system_catalog) + { + Relation newrel; + + newrel = heap_open(OIDOldHeap, NoLock); + RelationClearMissing(newrel); + relation_close(newrel, NoLock); + } } @@ -1714,7 +1773,7 @@ reform_and_rewrite_tuple(HeapTuple tuple, /* Be sure to null out any dropped columns */ for (i = 0; i < newTupDesc->natts; i++) { - if (newTupDesc->attrs[i]->attisdropped) + if (TupleDescAttr(newTupDesc, i)->attisdropped) isnull[i] = true; } diff --git a/src/backend/commands/collationcmds.c b/src/backend/commands/collationcmds.c index 96a6bc9bf0..8fb51e8c3d 100644 --- a/src/backend/commands/collationcmds.c +++ b/src/backend/commands/collationcmds.c @@ -3,7 +3,7 @@ * collationcmds.c * collation-related commands support code * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -22,7 +22,6 @@ #include "catalog/namespace.h" #include "catalog/objectaccess.h" #include "catalog/pg_collation.h" -#include "catalog/pg_collation_fn.h" #include "commands/alter.h" #include "commands/collationcmds.h" #include "commands/comment.h" @@ -74,7 +73,7 @@ DefineCollation(ParseState *pstate, List *names, List *parameters, bool if_not_e aclresult = pg_namespace_aclcheck(collNamespace, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(collNamespace)); foreach(pl, parameters) @@ -82,17 +81,17 @@ DefineCollation(ParseState *pstate, List *names, List *parameters, bool if_not_e DefElem *defel = lfirst_node(DefElem, pl); DefElem **defelp; - if (pg_strcasecmp(defel->defname, "from") == 0) + if (strcmp(defel->defname, "from") == 0) defelp = &fromEl; - else if (pg_strcasecmp(defel->defname, "locale") == 0) + else if (strcmp(defel->defname, "locale") == 0) defelp = &localeEl; - else if (pg_strcasecmp(defel->defname, "lc_collate") == 0) + else if (strcmp(defel->defname, "lc_collate") == 0) defelp = &lccollateEl; - else if (pg_strcasecmp(defel->defname, "lc_ctype") == 0) + else if (strcmp(defel->defname, "lc_ctype") == 0) defelp = &lcctypeEl; - else if (pg_strcasecmp(defel->defname, "provider") == 0) + else if (strcmp(defel->defname, "provider") == 0) defelp = &providerEl; - else if (pg_strcasecmp(defel->defname, "version") == 0) + else if (strcmp(defel->defname, "version") == 0) defelp = &versionEl; else { @@ -278,7 +277,7 @@ AlterCollation(AlterCollationStmt *stmt) collOid = get_collation_oid(stmt->collname, false); if (!pg_collation_ownercheck(collOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_COLLATION, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_COLLATION, NameListToString(stmt->collname)); tup = SearchSysCacheCopy1(COLLOID, ObjectIdGetDatum(collOid)); @@ -667,7 +666,16 @@ pg_import_system_collations(PG_FUNCTION_ARGS) } #endif /* READ_LOCALE_A_OUTPUT */ - /* Load collations known to ICU */ + /* + * Load collations known to ICU + * + * We use uloc_countAvailable()/uloc_getAvailable() rather than + * ucol_countAvailable()/ucol_getAvailable(). The former returns a full + * set of language+region combinations, whereas the latter only returns + * language+region combinations of they are distinct from the language's + * base collation. So there might not be a de-DE or en-GB, which would be + * confusing. + */ #ifdef USE_ICU { int i; @@ -676,37 +684,18 @@ pg_import_system_collations(PG_FUNCTION_ARGS) * Start the loop at -1 to sneak in the root locale without too much * code duplication. */ - for (i = -1; i < ucol_countAvailable(); i++) + for (i = -1; i < uloc_countAvailable(); i++) { - /* - * In ICU 4.2, ucol_getKeywordValuesForLocale() sometimes returns - * values that will not be accepted by uloc_toLanguageTag(). Skip - * loading keyword variants in that version. (Both - * ucol_getKeywordValuesForLocale() and uloc_toLanguageTag() are - * new in ICU 4.2, so older versions are not supported at all.) - * - * XXX We have no information about ICU 4.3 through 4.7, but we - * know the code below works with 4.8. - */ -#if U_ICU_VERSION_MAJOR_NUM > 4 || (U_ICU_VERSION_MAJOR_NUM == 4 && U_ICU_VERSION_MINOR_NUM > 2) -#define LOAD_ICU_KEYWORD_VARIANTS -#endif - const char *name; char *langtag; char *icucomment; const char *collcollate; Oid collid; -#ifdef LOAD_ICU_KEYWORD_VARIANTS - UEnumeration *en; - UErrorCode status; - const char *val; -#endif if (i == -1) name = ""; /* ICU root locale */ else - name = ucol_getAvailable(i); + name = uloc_getAvailable(i); langtag = get_icu_language_tag(name); collcollate = U_ICU_VERSION_MAJOR_NUM >= 54 ? langtag : name; @@ -735,58 +724,6 @@ pg_import_system_collations(PG_FUNCTION_ARGS) CreateComments(collid, CollationRelationId, 0, icucomment); } - - /* - * Add keyword variants, if enabled. - */ -#ifdef LOAD_ICU_KEYWORD_VARIANTS - status = U_ZERO_ERROR; - en = ucol_getKeywordValuesForLocale("collation", name, TRUE, &status); - if (U_FAILURE(status)) - ereport(ERROR, - (errmsg("could not get keyword values for locale \"%s\": %s", - name, u_errorName(status)))); - - status = U_ZERO_ERROR; - uenum_reset(en, &status); - while ((val = uenum_next(en, NULL, &status))) - { - char *localeid = psprintf("%s@collation=%s", name, val); - - langtag = get_icu_language_tag(localeid); - collcollate = U_ICU_VERSION_MAJOR_NUM >= 54 ? langtag : localeid; - - /* - * Be paranoid about not allowing any non-ASCII strings into - * pg_collation - */ - if (!is_all_ascii(langtag) || !is_all_ascii(collcollate)) - continue; - - collid = CollationCreate(psprintf("%s-x-icu", langtag), - nspid, GetUserId(), - COLLPROVIDER_ICU, -1, - collcollate, collcollate, - get_collation_actual_version(COLLPROVIDER_ICU, collcollate), - true, true); - if (OidIsValid(collid)) - { - ncreated++; - - CommandCounterIncrement(); - - icucomment = get_icu_locale_comment(name); - if (icucomment) - CreateComments(collid, CollationRelationId, 0, - icucomment); - } - } - if (U_FAILURE(status)) - ereport(ERROR, - (errmsg("could not get keyword values for locale \"%s\": %s", - name, u_errorName(status)))); - uenum_close(en); -#endif /* LOAD_ICU_KEYWORD_VARIANTS */ } } #endif /* USE_ICU */ diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c index 1c17927c49..2f2e69b4a8 100644 --- a/src/backend/commands/comment.c +++ b/src/backend/commands/comment.c @@ -4,7 +4,7 @@ * * PostgreSQL object comments utility code. * - * Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/commands/comment.c @@ -139,7 +139,7 @@ CommentObject(CommentStmt *stmt) * existing comment for the specified key. */ void -CreateComments(Oid oid, Oid classoid, int32 subid, char *comment) +CreateComments(Oid oid, Oid classoid, int32 subid, const char *comment) { Relation description; ScanKeyData skey[3]; @@ -234,7 +234,7 @@ CreateComments(Oid oid, Oid classoid, int32 subid, char *comment) * existing comment for the specified key. */ void -CreateSharedComments(Oid oid, Oid classoid, char *comment) +CreateSharedComments(Oid oid, Oid classoid, const char *comment) { Relation shdescription; ScanKeyData skey[2]; diff --git a/src/backend/commands/constraint.c b/src/backend/commands/constraint.c index e2544e51ed..f472355b48 100644 --- a/src/backend/commands/constraint.c +++ b/src/backend/commands/constraint.c @@ -3,7 +3,7 @@ * constraint.c * PostgreSQL CONSTRAINT support code. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -124,7 +124,7 @@ unique_key_recheck(PG_FUNCTION_ARGS) */ slot = MakeSingleTupleTableSlot(RelationGetDescr(trigdata->tg_relation)); - ExecStoreTuple(new_row, slot, InvalidBuffer, false); + ExecStoreHeapTuple(new_row, slot, false); /* * Typically the index won't have expressions, but if it does we need an diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c index 9861d3df22..e36fc23dd8 100644 --- a/src/backend/commands/conversioncmds.c +++ b/src/backend/commands/conversioncmds.c @@ -3,7 +3,7 @@ * conversioncmds.c * conversion creation command support code * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -18,7 +18,6 @@ #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/pg_conversion.h" -#include "catalog/pg_conversion_fn.h" #include "catalog/pg_type.h" #include "commands/alter.h" #include "commands/conversioncmds.h" @@ -55,7 +54,7 @@ CreateConversionCommand(CreateConversionStmt *stmt) /* Check we have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); /* Check the encoding names */ @@ -90,7 +89,7 @@ CreateConversionCommand(CreateConversionStmt *stmt) /* Check we have EXECUTE rights for the function */ aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, NameListToString(func_name)); /* diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index a258965c20..b58a74f4e3 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -3,7 +3,7 @@ * copy.c * Implements the COPY utility command * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -17,19 +17,22 @@ #include #include #include -#include -#include #include "access/heapam.h" #include "access/htup_details.h" #include "access/sysattr.h" #include "access/xact.h" #include "access/xlog.h" +#include "catalog/dependency.h" +#include "catalog/pg_authid.h" #include "catalog/pg_type.h" #include "commands/copy.h" #include "commands/defrem.h" #include "commands/trigger.h" +#include "executor/execPartition.h" #include "executor/executor.h" +#include "executor/tuptable.h" +#include "foreign/fdwapi.h" #include "libpq/libpq.h" #include "libpq/pqformat.h" #include "mb/pg_wchar.h" @@ -38,12 +41,14 @@ #include "optimizer/planner.h" #include "nodes/makefuncs.h" #include "parser/parse_relation.h" +#include "port/pg_bswap.h" #include "rewrite/rewriteHandler.h" #include "storage/fd.h" #include "tcop/tcopprot.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/memutils.h" +#include "utils/partcache.h" #include "utils/portal.h" #include "utils/rel.h" #include "utils/rls.h" @@ -76,6 +81,16 @@ typedef enum EolType EOL_CRNL } EolType; +/* + * Represents the heap insert method to be used during COPY FROM. + */ +typedef enum CopyInsertMethod +{ + CIM_SINGLE, /* use heap_insert or fdw routine */ + CIM_MULTI, /* always use heap_multi_insert */ + CIM_MULTI_CONDITIONAL /* use heap_multi_insert only if valid */ +} CopyInsertMethod; + /* * This struct contains all the state variables used throughout a COPY * operation. For simplicity, we use the same struct for all variants of COPY, @@ -89,7 +104,7 @@ typedef enum EolType * characters, else we might find a false match to a trailing byte. In * supported server encodings, there is no possibility of a false match, and * it's faster to make useless comparisons to trailing bytes than it is to - * invoke pg_encoding_mblen() to skip over them. encoding_embeds_ascii is TRUE + * invoke pg_encoding_mblen() to skip over them. encoding_embeds_ascii is true * when we have to do it the hard way. */ typedef struct CopyStateData @@ -136,7 +151,7 @@ typedef struct CopyStateData /* these are just for error messages, see CopyFromErrorCallback */ const char *cur_relname; /* table name for error messages */ - int cur_lineno; /* line number for error messages */ + uint64 cur_lineno; /* line number for error messages */ const char *cur_attname; /* current att for error messages */ const char *cur_attval; /* current att value for error messages */ @@ -165,14 +180,7 @@ typedef struct CopyStateData bool volatile_defexprs; /* is any of defexprs volatile? */ List *range_table; - PartitionDispatch *partition_dispatch_info; - int num_dispatch; /* Number of entries in the above array */ - int num_partitions; /* Number of members in the following arrays */ - ResultRelInfo *partitions; /* Per partition result relation */ - TupleConversionMap **partition_tupconv_maps; - TupleTableSlot *partition_tuple_slot; TransitionCaptureState *transition_capture; - TupleConversionMap **transition_tupconv_maps; /* * These variables are used to reduce overhead in textual COPY FROM. @@ -309,7 +317,7 @@ static void CopyFromInsertBatch(CopyState cstate, EState *estate, ResultRelInfo *resultRelInfo, TupleTableSlot *myslot, BulkInsertState bistate, int nBufferedTuples, HeapTuple *bufferedTuples, - int firstBufferedLineNo); + uint64 firstBufferedLineNo); static bool CopyReadLine(CopyState cstate); static bool CopyReadLineText(CopyState cstate); static int CopyReadAttributesText(CopyState cstate); @@ -358,9 +366,9 @@ SendCopyBegin(CopyState cstate) pq_beginmessage(&buf, 'H'); pq_sendbyte(&buf, format); /* overall format */ - pq_sendint(&buf, natts, 2); + pq_sendint16(&buf, natts); for (i = 0; i < natts; i++) - pq_sendint(&buf, format, 2); /* per-column formats */ + pq_sendint16(&buf, format); /* per-column formats */ pq_endmessage(&buf); cstate->copy_dest = COPY_NEW_FE; } @@ -391,9 +399,9 @@ ReceiveCopyBegin(CopyState cstate) pq_beginmessage(&buf, 'G'); pq_sendbyte(&buf, format); /* overall format */ - pq_sendint(&buf, natts, 2); + pq_sendint16(&buf, natts); for (i = 0; i < natts; i++) - pq_sendint(&buf, format, 2); /* per-column formats */ + pq_sendint16(&buf, format); /* per-column formats */ pq_endmessage(&buf); cstate->copy_dest = COPY_NEW_FE; cstate->fe_msgbuf = makeStringInfo(); @@ -671,7 +679,7 @@ CopySendInt32(CopyState cstate, int32 val) { uint32 buf; - buf = htonl((uint32) val); + buf = pg_hton32((uint32) val); CopySendData(cstate, &buf, sizeof(buf)); } @@ -690,7 +698,7 @@ CopyGetInt32(CopyState cstate, int32 *val) *val = 0; /* suppress compiler warning */ return false; } - *val = (int32) ntohl(buf); + *val = (int32) pg_ntoh32(buf); return true; } @@ -702,7 +710,7 @@ CopySendInt16(CopyState cstate, int16 val) { uint16 buf; - buf = htons((uint16) val); + buf = pg_hton16((uint16) val); CopySendData(cstate, &buf, sizeof(buf)); } @@ -719,7 +727,7 @@ CopyGetInt16(CopyState cstate, int16 *val) *val = 0; /* suppress compiler warning */ return false; } - *val = (int16) ntohs(buf); + *val = (int16) pg_ntoh16(buf); return true; } @@ -727,7 +735,7 @@ CopyGetInt16(CopyState cstate, int16 *val) /* * CopyLoadRawBuf loads some more data into raw_buf * - * Returns TRUE if able to obtain at least one more byte, else FALSE. + * Returns true if able to obtain at least one more byte, else false. * * If raw_buf_index < raw_buf_len, the unprocessed bytes are transferred * down to the start of the buffer and then we load more data after that. @@ -764,7 +772,7 @@ CopyLoadRawBuf(CopyState cstate) * DoCopy executes the SQL COPY statement * * Either unload or reload contents of table , depending on . - * ( = TRUE means we are inserting into the table.) In the "TO" case + * ( = true means we are inserting into the table.) In the "TO" case * we also support copying the output of an arbitrary SELECT, INSERT, UPDATE * or DELETE query. * @@ -773,8 +781,8 @@ CopyLoadRawBuf(CopyState cstate) * input/output stream. The latter could be either stdin/stdout or a * socket, depending on whether we're running under Postmaster control. * - * Do not allow a Postgres user without superuser privilege to read from - * or write to a file. + * Do not allow a Postgres user without the 'pg_access_server_files' role to + * read from or write to a file. * * Do not allow the copy if user doesn't have proper permission to access * the table or the specifically requested columns. @@ -791,39 +799,56 @@ DoCopy(ParseState *pstate, const CopyStmt *stmt, Oid relid; RawStmt *query = NULL; - /* Disallow COPY to/from file or program except to superusers. */ - if (!pipe && !superuser()) + /* + * Disallow COPY to/from file or program except to users with the + * appropriate role. + */ + if (!pipe) { if (stmt->is_program) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to COPY to or from an external program"), - errhint("Anyone can COPY to stdout or from stdin. " - "psql's \\copy command also works for anyone."))); + { + if (!is_member_of_role(GetUserId(), DEFAULT_ROLE_EXECUTE_SERVER_PROGRAM)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser or a member of the pg_execute_server_program role to COPY to or from an external program"), + errhint("Anyone can COPY to stdout or from stdin. " + "psql's \\copy command also works for anyone."))); + } else - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to COPY to or from a file"), - errhint("Anyone can COPY to stdout or from stdin. " - "psql's \\copy command also works for anyone."))); + { + if (is_from && !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_SERVER_FILES)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser or a member of the pg_read_server_files role to COPY from a file"), + errhint("Anyone can COPY to stdout or from stdin. " + "psql's \\copy command also works for anyone."))); + + if (!is_from && !is_member_of_role(GetUserId(), DEFAULT_ROLE_WRITE_SERVER_FILES)) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser or a member of the pg_write_server_files role to COPY to a file"), + errhint("Anyone can COPY to stdout or from stdin. " + "psql's \\copy command also works for anyone."))); + } } if (stmt->relation) { + LOCKMODE lockmode = is_from ? RowExclusiveLock : AccessShareLock; + RangeTblEntry *rte; TupleDesc tupDesc; List *attnums; ListCell *cur; - RangeTblEntry *rte; Assert(!stmt->query); /* Open and lock the relation, using the appropriate lock type. */ - rel = heap_openrv(stmt->relation, - (is_from ? RowExclusiveLock : AccessShareLock)); + rel = heap_openrv(stmt->relation, lockmode); relid = RelationGetRelid(rel); - rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, false); + rte = addRangeTableEntryForRelation(pstate, rel, lockmode, + NULL, false, false); rte->requiredPerms = (is_from ? ACL_INSERT : ACL_SELECT); tupDesc = RelationGetDescr(rel); @@ -1415,59 +1440,6 @@ BeginCopy(ParseState *pstate, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("table \"%s\" does not have OIDs", RelationGetRelationName(cstate->rel)))); - - /* - * If there are any triggers with transition tables on the named - * relation, we need to be prepared to capture transition tuples. - */ - cstate->transition_capture = MakeTransitionCaptureState(rel->trigdesc); - - /* Initialize state for CopyFrom tuple routing. */ - if (is_from && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) - { - PartitionDispatch *partition_dispatch_info; - ResultRelInfo *partitions; - TupleConversionMap **partition_tupconv_maps; - TupleTableSlot *partition_tuple_slot; - int num_parted, - num_partitions; - - ExecSetupPartitionTupleRouting(rel, - 1, - &partition_dispatch_info, - &partitions, - &partition_tupconv_maps, - &partition_tuple_slot, - &num_parted, &num_partitions); - cstate->partition_dispatch_info = partition_dispatch_info; - cstate->num_dispatch = num_parted; - cstate->partitions = partitions; - cstate->num_partitions = num_partitions; - cstate->partition_tupconv_maps = partition_tupconv_maps; - cstate->partition_tuple_slot = partition_tuple_slot; - - /* - * If we are capturing transition tuples, they may need to be - * converted from partition format back to partitioned table - * format (this is only ever necessary if a BEFORE trigger - * modifies the tuple). - */ - if (cstate->transition_capture != NULL) - { - int i; - - cstate->transition_tupconv_maps = (TupleConversionMap **) - palloc0(sizeof(TupleConversionMap *) * - cstate->num_partitions); - for (i = 0; i < cstate->num_partitions; ++i) - { - cstate->transition_tupconv_maps[i] = - convert_tuples_by_name(RelationGetDescr(cstate->partitions[i].ri_RelationDesc), - RelationGetDescr(rel), - gettext_noop("could not convert row type")); - } - } - } } else { @@ -1636,12 +1608,13 @@ BeginCopy(ParseState *pstate, foreach(cur, attnums) { int attnum = lfirst_int(cur); + Form_pg_attribute attr = TupleDescAttr(tupDesc, attnum - 1); if (!list_member_int(cstate->attnumlist, attnum)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), errmsg("FORCE_QUOTE column \"%s\" not referenced by COPY", - NameStr(tupDesc->attrs[attnum - 1]->attname)))); + NameStr(attr->attname)))); cstate->force_quote_flags[attnum - 1] = true; } } @@ -1658,12 +1631,13 @@ BeginCopy(ParseState *pstate, foreach(cur, attnums) { int attnum = lfirst_int(cur); + Form_pg_attribute attr = TupleDescAttr(tupDesc, attnum - 1); if (!list_member_int(cstate->attnumlist, attnum)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), errmsg("FORCE_NOT_NULL column \"%s\" not referenced by COPY", - NameStr(tupDesc->attrs[attnum - 1]->attname)))); + NameStr(attr->attname)))); cstate->force_notnull_flags[attnum - 1] = true; } } @@ -1680,12 +1654,13 @@ BeginCopy(ParseState *pstate, foreach(cur, attnums) { int attnum = lfirst_int(cur); + Form_pg_attribute attr = TupleDescAttr(tupDesc, attnum - 1); if (!list_member_int(cstate->attnumlist, attnum)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), errmsg("FORCE_NULL column \"%s\" not referenced by COPY", - NameStr(tupDesc->attrs[attnum - 1]->attname)))); + NameStr(attr->attname)))); cstate->force_null_flags[attnum - 1] = true; } } @@ -1703,12 +1678,13 @@ BeginCopy(ParseState *pstate, foreach(cur, attnums) { int attnum = lfirst_int(cur); + Form_pg_attribute attr = TupleDescAttr(tupDesc, attnum - 1); if (!list_member_int(cstate->attnumlist, attnum)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), errmsg_internal("selected column \"%s\" not referenced by COPY", - NameStr(tupDesc->attrs[attnum - 1]->attname)))); + NameStr(attr->attname)))); cstate->convert_select_flags[attnum - 1] = true; } } @@ -1875,7 +1851,16 @@ BeginCopyTo(ParseState *pstate, errmsg("relative path not allowed for COPY to file"))); oumask = umask(S_IWGRP | S_IWOTH); - cstate->copy_file = AllocateFile(cstate->filename, PG_BINARY_W); + PG_TRY(); + { + cstate->copy_file = AllocateFile(cstate->filename, PG_BINARY_W); + } + PG_CATCH(); + { + umask(oumask); + PG_RE_THROW(); + } + PG_END_TRY(); umask(oumask); if (cstate->copy_file == NULL) { @@ -1972,7 +1957,6 @@ CopyTo(CopyState cstate) { TupleDesc tupDesc; int num_phys_attrs; - Form_pg_attribute *attr; ListCell *cur; uint64 processed; @@ -1980,7 +1964,6 @@ CopyTo(CopyState cstate) tupDesc = RelationGetDescr(cstate->rel); else tupDesc = cstate->queryDesc->tupDesc; - attr = tupDesc->attrs; num_phys_attrs = tupDesc->natts; cstate->null_print_client = cstate->null_print; /* default */ @@ -1994,13 +1977,14 @@ CopyTo(CopyState cstate) int attnum = lfirst_int(cur); Oid out_func_oid; bool isvarlena; + Form_pg_attribute attr = TupleDescAttr(tupDesc, attnum - 1); if (cstate->binary) - getTypeBinaryOutputInfo(attr[attnum - 1]->atttypid, + getTypeBinaryOutputInfo(attr->atttypid, &out_func_oid, &isvarlena); else - getTypeOutputInfo(attr[attnum - 1]->atttypid, + getTypeOutputInfo(attr->atttypid, &out_func_oid, &isvarlena); fmgr_info(out_func_oid, &cstate->out_functions[attnum - 1]); @@ -2057,7 +2041,7 @@ CopyTo(CopyState cstate) CopySendChar(cstate, cstate->delim[0]); hdr_delim = true; - colname = NameStr(attr[attnum - 1]->attname); + colname = NameStr(TupleDescAttr(tupDesc, attnum - 1)->attname); CopyAttributeOutCSV(cstate, colname, false, list_length(cstate->attnumlist) == 1); @@ -2218,17 +2202,21 @@ void CopyFromErrorCallback(void *arg) { CopyState cstate = (CopyState) arg; + char curlineno_str[32]; + + snprintf(curlineno_str, sizeof(curlineno_str), UINT64_FORMAT, + cstate->cur_lineno); if (cstate->binary) { /* can't usefully display the data */ if (cstate->cur_attname) - errcontext("COPY %s, line %d, column %s", - cstate->cur_relname, cstate->cur_lineno, + errcontext("COPY %s, line %s, column %s", + cstate->cur_relname, curlineno_str, cstate->cur_attname); else - errcontext("COPY %s, line %d", - cstate->cur_relname, cstate->cur_lineno); + errcontext("COPY %s, line %s", + cstate->cur_relname, curlineno_str); } else { @@ -2238,16 +2226,16 @@ CopyFromErrorCallback(void *arg) char *attval; attval = limit_printout_length(cstate->cur_attval); - errcontext("COPY %s, line %d, column %s: \"%s\"", - cstate->cur_relname, cstate->cur_lineno, + errcontext("COPY %s, line %s, column %s: \"%s\"", + cstate->cur_relname, curlineno_str, cstate->cur_attname, attval); pfree(attval); } else if (cstate->cur_attname) { /* error is relevant to a particular column, value is NULL */ - errcontext("COPY %s, line %d, column %s: null input", - cstate->cur_relname, cstate->cur_lineno, + errcontext("COPY %s, line %s, column %s: null input", + cstate->cur_relname, curlineno_str, cstate->cur_attname); } else @@ -2268,14 +2256,14 @@ CopyFromErrorCallback(void *arg) char *lineval; lineval = limit_printout_length(cstate->line_buf.data); - errcontext("COPY %s, line %d: \"%s\"", - cstate->cur_relname, cstate->cur_lineno, lineval); + errcontext("COPY %s, line %s: \"%s\"", + cstate->cur_relname, curlineno_str, lineval); pfree(lineval); } else { - errcontext("COPY %s, line %d", - cstate->cur_relname, cstate->cur_lineno); + errcontext("COPY %s, line %s", + cstate->cur_relname, curlineno_str); } } } @@ -2327,34 +2315,45 @@ CopyFrom(CopyState cstate) Datum *values; bool *nulls; ResultRelInfo *resultRelInfo; - ResultRelInfo *saved_resultRelInfo = NULL; + ResultRelInfo *target_resultRelInfo; EState *estate = CreateExecutorState(); /* for ExecConstraints() */ + ModifyTableState *mtstate; ExprContext *econtext; TupleTableSlot *myslot; MemoryContext oldcontext = CurrentMemoryContext; + PartitionTupleRouting *proute = NULL; + ExprContext *secondaryExprContext = NULL; ErrorContextCallback errcallback; CommandId mycid = GetCurrentCommandId(true); int hi_options = 0; /* start with default heap_insert options */ BulkInsertState bistate; + CopyInsertMethod insertMethod; uint64 processed = 0; - bool useHeapMultiInsert; int nBufferedTuples = 0; int prev_leaf_part_index = -1; + bool has_before_insert_row_trig; + bool has_instead_insert_row_trig; + bool leafpart_use_multi_insert = false; #define MAX_BUFFERED_TUPLES 1000 +#define RECHECK_MULTI_INSERT_THRESHOLD 1000 HeapTuple *bufferedTuples = NULL; /* initialize to silence warning */ Size bufferedTuplesSize = 0; - int firstBufferedLineNo = 0; + uint64 firstBufferedLineNo = 0; + uint64 lastPartitionSampleLineNo = 0; + uint64 nPartitionChanges = 0; + double avgTuplesPerPartChange = 0; Assert(cstate->rel); /* - * The target must be a plain relation or have an INSTEAD OF INSERT row - * trigger. (Currently, such triggers are only allowed on views, so we - * only hint about them in the view case.) + * The target must be a plain, foreign, or partitioned relation, or have + * an INSTEAD OF INSERT row trigger. (Currently, such triggers are only + * allowed on views, so we only hint about them in the view case.) */ if (cstate->rel->rd_rel->relkind != RELKIND_RELATION && + cstate->rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE && cstate->rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE && !(cstate->rel->trigdesc && cstate->rel->trigdesc->trig_insert_instead_row)) @@ -2370,11 +2369,6 @@ CopyFrom(CopyState cstate) (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot copy to materialized view \"%s\"", RelationGetRelationName(cstate->rel)))); - else if (cstate->rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot copy to foreign table \"%s\"", - RelationGetRelationName(cstate->rel)))); else if (cstate->rel->rd_rel->relkind == RELKIND_SEQUENCE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), @@ -2436,13 +2430,25 @@ CopyFrom(CopyState cstate) /* * Optimize if new relfilenode was created in this subxact or one of its * committed children and we won't see those rows later as part of an - * earlier scan or command. This ensures that if this subtransaction - * aborts then the frozen rows won't be visible after xact cleanup. Note + * earlier scan or command. The subxact test ensures that if this subxact + * aborts then the frozen rows won't be visible after xact cleanup. Note * that the stronger test of exactly which subtransaction created it is - * crucial for correctness of this optimization. + * crucial for correctness of this optimization. The test for an earlier + * scan or command tolerates false negatives. FREEZE causes other sessions + * to see rows they would not see under MVCC, and a false negative merely + * spreads that anomaly to the current session. */ if (cstate->freeze) { + /* + * Tolerate one registration for the benefit of FirstXactSnapshot. + * Scan-bearing queries generally create at least two registrations, + * though relying on that is fragile, as is ignoring ActiveSnapshot. + * Clear CatalogSnapshot to avoid counting its registration. We'll + * still detect ongoing catalog scans, each of which separately + * registers the snapshot it uses. + */ + InvalidateCatalogSnapshot(); if (!ThereAreNoPriorRegisteredSnapshots() || !ThereAreNoReadyPortals()) ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), @@ -2465,49 +2471,160 @@ CopyFrom(CopyState cstate) resultRelInfo = makeNode(ResultRelInfo); InitResultRelInfo(resultRelInfo, cstate->rel, - 1, /* dummy rangetable index */ + 1, /* must match rel's position in range_table */ NULL, 0); + target_resultRelInfo = resultRelInfo; + + /* Verify the named relation is a valid target for INSERT */ + CheckValidResultRel(resultRelInfo, CMD_INSERT); ExecOpenIndices(resultRelInfo, false); estate->es_result_relations = resultRelInfo; estate->es_num_result_relations = 1; estate->es_result_relation_info = resultRelInfo; - estate->es_range_table = cstate->range_table; + + ExecInitRangeTable(estate, cstate->range_table); /* Set up a tuple slot too */ - myslot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(myslot, tupDesc); + myslot = ExecInitExtraTupleSlot(estate, tupDesc); /* Triggers might need a slot as well */ - estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate); + estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate, NULL); + + /* + * Set up a ModifyTableState so we can let FDW(s) init themselves for + * foreign-table result relation(s). + */ + mtstate = makeNode(ModifyTableState); + mtstate->ps.plan = NULL; + mtstate->ps.state = estate; + mtstate->operation = CMD_INSERT; + mtstate->resultRelInfo = estate->es_result_relations; + + if (resultRelInfo->ri_FdwRoutine != NULL && + resultRelInfo->ri_FdwRoutine->BeginForeignInsert != NULL) + resultRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, + resultRelInfo); + + /* Prepare to catch AFTER triggers. */ + AfterTriggerBeginQuery(); + + /* + * If there are any triggers with transition tables on the named relation, + * we need to be prepared to capture transition tuples. + */ + cstate->transition_capture = + MakeTransitionCaptureState(cstate->rel->trigdesc, + RelationGetRelid(cstate->rel), + CMD_INSERT); + + /* + * If the named relation is a partitioned table, initialize state for + * CopyFrom tuple routing. + */ + if (cstate->rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + { + proute = ExecSetupPartitionTupleRouting(NULL, cstate->rel); + + /* + * If we are capturing transition tuples, they may need to be + * converted from partition format back to partitioned table format + * (this is only ever necessary if a BEFORE trigger modifies the + * tuple). + */ + if (cstate->transition_capture != NULL) + ExecSetupChildParentMapForLeaf(proute); + } /* * It's more efficient to prepare a bunch of tuples for insertion, and * insert them in one heap_multi_insert() call, than call heap_insert() - * separately for every tuple. However, we can't do that if there are - * BEFORE/INSTEAD OF triggers, or we need to evaluate volatile default - * expressions. Such triggers or expressions might query the table we're - * inserting to, and act differently if the tuples that have already been - * processed and prepared for insertion are not there. We also can't do - * it if the table is partitioned. + * separately for every tuple. However, there are a number of reasons why + * we might not be able to do this. These are explained below. */ - if ((resultRelInfo->ri_TrigDesc != NULL && - (resultRelInfo->ri_TrigDesc->trig_insert_before_row || - resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) || - cstate->partition_dispatch_info != NULL || - cstate->volatile_defexprs) + if (resultRelInfo->ri_TrigDesc != NULL && + (resultRelInfo->ri_TrigDesc->trig_insert_before_row || + resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) { - useHeapMultiInsert = false; + /* + * Can't support multi-inserts when there are any BEFORE/INSTEAD OF + * triggers on the table. Such triggers might query the table we're + * inserting into and act differently if the tuples that have already + * been processed and prepared for insertion are not there. + */ + insertMethod = CIM_SINGLE; + } + else if (proute != NULL && resultRelInfo->ri_TrigDesc != NULL && + resultRelInfo->ri_TrigDesc->trig_insert_new_table) + { + /* + * For partitioned tables we can't support multi-inserts when there + * are any statement level insert triggers. It might be possible to + * allow partitioned tables with such triggers in the future, but for + * now, CopyFromInsertBatch expects that any before row insert and + * statement level insert triggers are on the same relation. + */ + insertMethod = CIM_SINGLE; + } + else if (resultRelInfo->ri_FdwRoutine != NULL || + cstate->volatile_defexprs) + { + /* + * Can't support multi-inserts to foreign tables or if there are any + * volatile default expressions in the table. Similarly to the + * trigger case above, such expressions may query the table we're + * inserting into. + * + * Note: It does not matter if any partitions have any volatile + * default expressions as we use the defaults from the target of the + * COPY command. + */ + insertMethod = CIM_SINGLE; } else { - useHeapMultiInsert = true; + /* + * For partitioned tables, we may still be able to perform bulk + * inserts for sets of consecutive tuples which belong to the same + * partition. However, the possibility of this depends on which types + * of triggers exist on the partition. We must disable bulk inserts + * if the partition is a foreign table or it has any before row insert + * or insert instead triggers (same as we checked above for the parent + * table). Since the partition's resultRelInfos are initialized only + * when we actually need to insert the first tuple into them, we must + * have the intermediate insert method of CIM_MULTI_CONDITIONAL to + * flag that we must later determine if we can use bulk-inserts for + * the partition being inserted into. + * + * Normally, when performing bulk inserts we just flush the insert + * buffer whenever it becomes full, but for the partitioned table + * case, we flush it whenever the current tuple does not belong to the + * same partition as the previous tuple, and since we flush the + * previous partition's buffer once the new tuple has already been + * built, we're unable to reset the estate since we'd free the memory + * in which the new tuple is stored. To work around this we maintain + * a secondary expression context and alternate between these when the + * partition changes. This does mean we do store the first new tuple + * in a different context than subsequent tuples, but that does not + * matter, providing we don't free anything while it's still needed. + */ + if (proute) + { + insertMethod = CIM_MULTI_CONDITIONAL; + secondaryExprContext = CreateExprContext(estate); + } + else + insertMethod = CIM_MULTI; + bufferedTuples = palloc(MAX_BUFFERED_TUPLES * sizeof(HeapTuple)); } - /* Prepare to catch AFTER triggers. */ - AfterTriggerBeginQuery(); + has_before_insert_row_trig = (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->trig_insert_before_row); + + has_instead_insert_row_trig = (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->trig_insert_instead_row); /* * Check BEFORE STATEMENT insertion triggers. It's debatable whether we @@ -2563,17 +2680,17 @@ CopyFrom(CopyState cstate) * Constraints might reference the tableoid column, so initialize * t_tableOid before evaluating them. */ - tuple->t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); + tuple->t_tableOid = RelationGetRelid(target_resultRelInfo->ri_RelationDesc); /* Triggers and stuff need to be invoked in query context. */ MemoryContextSwitchTo(oldcontext); /* Place tuple in tuple slot --- but slot shouldn't free it */ slot = myslot; - ExecStoreTuple(tuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(tuple, slot, false); /* Determine the partition to heap_insert the tuple into */ - if (cstate->partition_dispatch_info) + if (proute) { int leaf_part_index; TupleConversionMap *map; @@ -2586,37 +2703,131 @@ CopyFrom(CopyState cstate) * will get us the ResultRelInfo and TupleConversionMap for the * partition, respectively. */ - leaf_part_index = ExecFindPartition(resultRelInfo, - cstate->partition_dispatch_info, + leaf_part_index = ExecFindPartition(target_resultRelInfo, + proute->partition_dispatch_info, slot, estate); Assert(leaf_part_index >= 0 && - leaf_part_index < cstate->num_partitions); + leaf_part_index < proute->num_partitions); - /* - * If this tuple is mapped to a partition that is not same as the - * previous one, we'd better make the bulk insert mechanism gets a - * new buffer. - */ if (prev_leaf_part_index != leaf_part_index) { + /* Check if we can multi-insert into this partition */ + if (insertMethod == CIM_MULTI_CONDITIONAL) + { + /* + * When performing bulk-inserts into partitioned tables we + * must insert the tuples seen so far to the heap whenever + * the partition changes. + */ + if (nBufferedTuples > 0) + { + ExprContext *swapcontext; + ResultRelInfo *presultRelInfo; + + presultRelInfo = proute->partitions[prev_leaf_part_index]; + + CopyFromInsertBatch(cstate, estate, mycid, hi_options, + presultRelInfo, myslot, bistate, + nBufferedTuples, bufferedTuples, + firstBufferedLineNo); + nBufferedTuples = 0; + bufferedTuplesSize = 0; + + Assert(secondaryExprContext); + + /* + * Normally we reset the per-tuple context whenever + * the bufferedTuples array is empty at the beginning + * of the loop, however, it is possible since we flush + * the buffer here that the buffer is never empty at + * the start of the loop. To prevent the per-tuple + * context from never being reset we maintain a second + * context and alternate between them when the + * partition changes. We can now reset + * secondaryExprContext as this is no longer needed, + * since we just flushed any tuples stored in it. We + * also now switch over to the other context. This + * does mean that the first tuple in the buffer won't + * be in the same context as the others, but that does + * not matter since we only reset it after the flush. + */ + ReScanExprContext(secondaryExprContext); + + swapcontext = secondaryExprContext; + secondaryExprContext = estate->es_per_tuple_exprcontext; + estate->es_per_tuple_exprcontext = swapcontext; + } + + nPartitionChanges++; + + /* + * Here we adaptively enable multi-inserts based on the + * average number of tuples from recent multi-insert + * batches. We recalculate the average every + * RECHECK_MULTI_INSERT_THRESHOLD tuples instead of taking + * the average over the whole copy. This allows us to + * enable multi-inserts when we get periods in the copy + * stream that have tuples commonly belonging to the same + * partition, and disable when the partition is changing + * too often. + */ + if (unlikely(lastPartitionSampleLineNo <= (cstate->cur_lineno - + RECHECK_MULTI_INSERT_THRESHOLD) + && cstate->cur_lineno >= RECHECK_MULTI_INSERT_THRESHOLD)) + { + avgTuplesPerPartChange = + (cstate->cur_lineno - lastPartitionSampleLineNo) / + (double) nPartitionChanges; + + lastPartitionSampleLineNo = cstate->cur_lineno; + nPartitionChanges = 0; + } + } + + /* + * Overwrite resultRelInfo with the corresponding partition's + * one. + */ + resultRelInfo = proute->partitions[leaf_part_index]; + if (unlikely(resultRelInfo == NULL)) + { + resultRelInfo = ExecInitPartitionInfo(mtstate, + target_resultRelInfo, + proute, estate, + leaf_part_index); + proute->partitions[leaf_part_index] = resultRelInfo; + Assert(resultRelInfo != NULL); + } + + /* Determine which triggers exist on this partition */ + has_before_insert_row_trig = (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->trig_insert_before_row); + + has_instead_insert_row_trig = (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->trig_insert_instead_row); + + /* + * Tests have shown that using multi-inserts when the + * partition changes on every tuple slightly decreases the + * performance, however, there are benefits even when only + * some batches have just 2 tuples, so let's enable + * multi-inserts even when the average is quite low. + */ + leafpart_use_multi_insert = insertMethod == CIM_MULTI_CONDITIONAL && + avgTuplesPerPartChange >= 1.3 && + !has_before_insert_row_trig && + !has_instead_insert_row_trig && + resultRelInfo->ri_FdwRoutine == NULL; + + /* + * We'd better make the bulk insert mechanism gets a new + * buffer when the partition being inserted into changes. + */ ReleaseBulkInsertStatePin(bistate); prev_leaf_part_index = leaf_part_index; } - /* - * Save the old ResultRelInfo and switch to the one corresponding - * to the selected partition. - */ - saved_resultRelInfo = resultRelInfo; - resultRelInfo = cstate->partitions + leaf_part_index; - - /* We do not yet have a way to insert into a foreign partition */ - if (resultRelInfo->ri_FdwRoutine) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot route inserted tuples to a foreign table"))); - /* * For ExecInsertIndexTuples() to work on the partition's indexes */ @@ -2628,18 +2839,17 @@ CopyFrom(CopyState cstate) */ if (cstate->transition_capture != NULL) { - if (resultRelInfo->ri_TrigDesc && - (resultRelInfo->ri_TrigDesc->trig_insert_before_row || - resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) + if (has_before_insert_row_trig) { /* - * If there are any BEFORE or INSTEAD triggers on the - * partition, we'll have to be ready to convert their - * result back to tuplestore format. + * If there are any BEFORE triggers on the partition, + * we'll have to be ready to convert their result back to + * tuplestore format. */ cstate->transition_capture->tcs_original_insert_tuple = NULL; cstate->transition_capture->tcs_map = - cstate->transition_tupconv_maps[leaf_part_index]; + TupConvMapForLeaf(proute, target_resultRelInfo, + leaf_part_index); } else { @@ -2656,22 +2866,24 @@ CopyFrom(CopyState cstate) * We might need to convert from the parent rowtype to the * partition rowtype. */ - map = cstate->partition_tupconv_maps[leaf_part_index]; - if (map) + map = proute->parent_child_tupconv_maps[leaf_part_index]; + if (map != NULL) { - Relation partrel = resultRelInfo->ri_RelationDesc; + TupleTableSlot *new_slot; + MemoryContext oldcontext; - tuple = do_convert_tuple(tuple, map); + Assert(proute->partition_tuple_slots != NULL && + proute->partition_tuple_slots[leaf_part_index] != NULL); + new_slot = proute->partition_tuple_slots[leaf_part_index]; + slot = execute_attr_map_slot(map->attrMap, slot, new_slot); /* - * We must use the partition's tuple descriptor from this - * point on. Use a dedicated slot from this point on until - * we're finished dealing with the partition. + * Get the tuple in the per-tuple context, so that it will be + * freed after each batch insert. */ - slot = cstate->partition_tuple_slot; - Assert(slot != NULL); - ExecSetSlotDescriptor(slot, RelationGetDescr(partrel)); - ExecStoreTuple(tuple, slot, InvalidBuffer, true); + oldcontext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + tuple = ExecCopySlotTuple(slot); + MemoryContextSwitchTo(oldcontext); } tuple->t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); @@ -2680,8 +2892,7 @@ CopyFrom(CopyState cstate) skip_tuple = false; /* BEFORE ROW INSERT Triggers */ - if (resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->trig_insert_before_row) + if (has_before_insert_row_trig) { slot = ExecBRInsertTriggers(estate, resultRelInfo, slot); @@ -2693,8 +2904,7 @@ CopyFrom(CopyState cstate) if (!skip_tuple) { - if (resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->trig_insert_instead_row) + if (has_instead_insert_row_trig) { /* Pass the data to the INSTEAD ROW INSERT trigger */ ExecIRInsertTriggers(estate, resultRelInfo, slot); @@ -2702,26 +2912,29 @@ CopyFrom(CopyState cstate) else { /* - * We always check the partition constraint, including when - * the tuple got here via tuple-routing. However we don't - * need to in the latter case if no BR trigger is defined on - * the partition. Note that a BR trigger might modify the - * tuple such that the partition constraint is no longer - * satisfied, so we need to check in that case. + * If the target is a plain table, check the constraints of + * the tuple. */ - bool check_partition_constr = - (resultRelInfo->ri_PartitionCheck != NIL); - - if (saved_resultRelInfo != NULL && - !(resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->trig_insert_before_row)) - check_partition_constr = false; - - /* Check the constraints of the tuple */ - if (cstate->rel->rd_att->constr || check_partition_constr) + if (resultRelInfo->ri_FdwRoutine == NULL && + resultRelInfo->ri_RelationDesc->rd_att->constr) ExecConstraints(resultRelInfo, slot, estate); - if (useHeapMultiInsert) + /* + * Also check the tuple against the partition constraint, if + * there is one; except that if we got here via tuple-routing, + * we don't need to if there's no BR trigger defined on the + * partition. + */ + if (resultRelInfo->ri_PartitionCheck && + (proute == NULL || has_before_insert_row_trig)) + ExecPartitionCheck(resultRelInfo, slot, estate, true); + + /* + * Perform multi-inserts when enabled, or when loading a + * partitioned table that can support multi-inserts as + * determined above. + */ + if (insertMethod == CIM_MULTI || leafpart_use_multi_insert) { /* Add this tuple to the tuple buffer */ if (nBufferedTuples == 0) @@ -2750,10 +2963,32 @@ CopyFrom(CopyState cstate) { List *recheckIndexes = NIL; - /* OK, store the tuple and create index entries for it */ - heap_insert(resultRelInfo->ri_RelationDesc, tuple, mycid, - hi_options, bistate); + /* OK, store the tuple */ + if (resultRelInfo->ri_FdwRoutine != NULL) + { + slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate, + resultRelInfo, + slot, + NULL); + + if (slot == NULL) /* "do nothing" */ + continue; /* next tuple please */ + + /* FDW might have changed tuple */ + tuple = ExecMaterializeSlot(slot); + /* + * AFTER ROW Triggers might reference the tableoid + * column, so initialize t_tableOid before evaluating + * them. + */ + tuple->t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); + } + else + heap_insert(resultRelInfo->ri_RelationDesc, tuple, + mycid, hi_options, bistate); + + /* And create index entries for it */ if (resultRelInfo->ri_NumIndices > 0) recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), @@ -2771,26 +3006,34 @@ CopyFrom(CopyState cstate) } /* - * We count only tuples not suppressed by a BEFORE INSERT trigger; - * this is the same definition used by execMain.c for counting - * tuples inserted by an INSERT command. + * We count only tuples not suppressed by a BEFORE INSERT trigger + * or FDW; this is the same definition used by nodeModifyTable.c + * for counting tuples inserted by an INSERT command. */ processed++; - - if (saved_resultRelInfo) - { - resultRelInfo = saved_resultRelInfo; - estate->es_result_relation_info = resultRelInfo; - } } } /* Flush any remaining buffered tuples */ if (nBufferedTuples > 0) - CopyFromInsertBatch(cstate, estate, mycid, hi_options, - resultRelInfo, myslot, bistate, - nBufferedTuples, bufferedTuples, - firstBufferedLineNo); + { + if (insertMethod == CIM_MULTI_CONDITIONAL) + { + ResultRelInfo *presultRelInfo; + + presultRelInfo = proute->partitions[prev_leaf_part_index]; + + CopyFromInsertBatch(cstate, estate, mycid, hi_options, + presultRelInfo, myslot, bistate, + nBufferedTuples, bufferedTuples, + firstBufferedLineNo); + } + else + CopyFromInsertBatch(cstate, estate, mycid, hi_options, + resultRelInfo, myslot, bistate, + nBufferedTuples, bufferedTuples, + firstBufferedLineNo); + } /* Done, clean up */ error_context_stack = errcallback.previous; @@ -2807,7 +3050,7 @@ CopyFrom(CopyState cstate) pq_endmsgread(); /* Execute AFTER STATEMENT insertion triggers */ - ExecASInsertTriggers(estate, resultRelInfo, cstate->transition_capture); + ExecASInsertTriggers(estate, target_resultRelInfo, cstate->transition_capture); /* Handle queued AFTER triggers */ AfterTriggerEndQuery(estate); @@ -2817,37 +3060,17 @@ CopyFrom(CopyState cstate) ExecResetTupleTable(estate->es_tupleTable, false); - ExecCloseIndices(resultRelInfo); + /* Allow the FDW to shut down */ + if (target_resultRelInfo->ri_FdwRoutine != NULL && + target_resultRelInfo->ri_FdwRoutine->EndForeignInsert != NULL) + target_resultRelInfo->ri_FdwRoutine->EndForeignInsert(estate, + target_resultRelInfo); - /* Close all the partitioned tables, leaf partitions, and their indices */ - if (cstate->partition_dispatch_info) - { - int i; + ExecCloseIndices(target_resultRelInfo); - /* - * Remember cstate->partition_dispatch_info[0] corresponds to the root - * partitioned table, which we must not try to close, because it is - * the main target table of COPY that will be closed eventually by - * DoCopy(). Also, tupslot is NULL for the root partitioned table. - */ - for (i = 1; i < cstate->num_dispatch; i++) - { - PartitionDispatch pd = cstate->partition_dispatch_info[i]; - - heap_close(pd->reldesc, NoLock); - ExecDropSingleTupleTableSlot(pd->tupslot); - } - for (i = 0; i < cstate->num_partitions; i++) - { - ResultRelInfo *resultRelInfo = cstate->partitions + i; - - ExecCloseIndices(resultRelInfo); - heap_close(resultRelInfo->ri_RelationDesc, NoLock); - } - - /* Release the standalone partition tuple descriptor */ - ExecDropSingleTupleTableSlot(cstate->partition_tuple_slot); - } + /* Close all the partitioned tables, leaf partitions, and their indices */ + if (proute) + ExecCleanupTupleRouting(mtstate, proute); /* Close any trigger target relations */ ExecCleanUpTriggerState(estate); @@ -2874,11 +3097,12 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid, int hi_options, ResultRelInfo *resultRelInfo, TupleTableSlot *myslot, BulkInsertState bistate, int nBufferedTuples, HeapTuple *bufferedTuples, - int firstBufferedLineNo) + uint64 firstBufferedLineNo) { MemoryContext oldcontext; int i; - int save_cur_lineno; + uint64 save_cur_lineno; + bool line_buf_valid = cstate->line_buf_valid; /* * Print error context information correctly, if one of the operations @@ -2892,7 +3116,7 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid, * before calling it. */ oldcontext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - heap_multi_insert(cstate->rel, + heap_multi_insert(resultRelInfo->ri_RelationDesc, bufferedTuples, nBufferedTuples, mycid, @@ -2911,7 +3135,7 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid, List *recheckIndexes; cstate->cur_lineno = firstBufferedLineNo + i; - ExecStoreTuple(bufferedTuples[i], myslot, InvalidBuffer, false); + ExecStoreHeapTuple(bufferedTuples[i], myslot, false); recheckIndexes = ExecInsertIndexTuples(myslot, &(bufferedTuples[i]->t_self), estate, false, NULL, NIL); @@ -2939,7 +3163,8 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid, } } - /* reset cur_lineno to where we were */ + /* reset cur_lineno and line_buf_valid to what they were */ + cstate->line_buf_valid = line_buf_valid; cstate->cur_lineno = save_cur_lineno; } @@ -2965,7 +3190,6 @@ BeginCopyFrom(ParseState *pstate, CopyState cstate; bool pipe = (filename == NULL); TupleDesc tupDesc; - Form_pg_attribute *attr; AttrNumber num_phys_attrs, num_defaults; FmgrInfo *in_functions; @@ -3000,7 +3224,6 @@ BeginCopyFrom(ParseState *pstate, cstate->range_table = pstate->p_rtable; tupDesc = RelationGetDescr(cstate->rel); - attr = tupDesc->attrs; num_phys_attrs = tupDesc->natts; num_defaults = 0; volatile_defexprs = false; @@ -3018,16 +3241,18 @@ BeginCopyFrom(ParseState *pstate, for (attnum = 1; attnum <= num_phys_attrs; attnum++) { + Form_pg_attribute att = TupleDescAttr(tupDesc, attnum - 1); + /* We don't need info for dropped attributes */ - if (attr[attnum - 1]->attisdropped) + if (att->attisdropped) continue; /* Fetch the input function and typioparam info */ if (cstate->binary) - getTypeBinaryInputInfo(attr[attnum - 1]->atttypid, + getTypeBinaryInputInfo(att->atttypid, &in_func_oid, &typioparams[attnum - 1]); else - getTypeInputInfo(attr[attnum - 1]->atttypid, + getTypeInputInfo(att->atttypid, &in_func_oid, &typioparams[attnum - 1]); fmgr_info(in_func_oid, &in_functions[attnum - 1]); @@ -3269,7 +3494,6 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, Datum *values, bool *nulls, Oid *tupleOid) { TupleDesc tupDesc; - Form_pg_attribute *attr; AttrNumber num_phys_attrs, attr_count, num_defaults = cstate->num_defaults; @@ -3283,7 +3507,6 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, ExprState **defexprs = cstate->defexprs; tupDesc = RelationGetDescr(cstate->rel); - attr = tupDesc->attrs; num_phys_attrs = tupDesc->natts; attr_count = list_length(cstate->attnumlist); nfields = file_has_oids ? (attr_count + 1) : attr_count; @@ -3345,12 +3568,13 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, { int attnum = lfirst_int(cur); int m = attnum - 1; + Form_pg_attribute att = TupleDescAttr(tupDesc, m); if (fieldno >= fldct) ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), errmsg("missing data for column \"%s\"", - NameStr(attr[m]->attname)))); + NameStr(att->attname)))); string = field_strings[fieldno++]; if (cstate->convert_select_flags && @@ -3384,12 +3608,12 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, } } - cstate->cur_attname = NameStr(attr[m]->attname); + cstate->cur_attname = NameStr(att->attname); cstate->cur_attval = string; values[m] = InputFunctionCall(&in_functions[m], string, typioparams[m], - attr[m]->atttypmod); + att->atttypmod); if (string != NULL) nulls[m] = false; cstate->cur_attname = NULL; @@ -3468,14 +3692,15 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, { int attnum = lfirst_int(cur); int m = attnum - 1; + Form_pg_attribute att = TupleDescAttr(tupDesc, m); - cstate->cur_attname = NameStr(attr[m]->attname); + cstate->cur_attname = NameStr(att->attname); i++; values[m] = CopyReadBinaryAttribute(cstate, i, &in_functions[m], typioparams[m], - attr[m]->atttypmod, + att->atttypmod, &nulls[m]); cstate->cur_attname = NULL; } @@ -4705,13 +4930,12 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) if (attnamelist == NIL) { /* Generate default column list */ - Form_pg_attribute *attr = tupDesc->attrs; int attr_count = tupDesc->natts; int i; for (i = 0; i < attr_count; i++) { - if (attr[i]->attisdropped) + if (TupleDescAttr(tupDesc, i)->attisdropped) continue; attnums = lappend_int(attnums, i + 1); } @@ -4731,11 +4955,13 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) attnum = InvalidAttrNumber; for (i = 0; i < tupDesc->natts; i++) { - if (tupDesc->attrs[i]->attisdropped) + Form_pg_attribute att = TupleDescAttr(tupDesc, i); + + if (att->attisdropped) continue; - if (namestrcmp(&(tupDesc->attrs[i]->attname), name) == 0) + if (namestrcmp(&(att->attname), name) == 0) { - attnum = tupDesc->attrs[i]->attnum; + attnum = att->attnum; break; } } diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c index 97f9c55d6e..d5cb62da15 100644 --- a/src/backend/commands/createas.c +++ b/src/backend/commands/createas.c @@ -13,7 +13,7 @@ * we must return a tuples-processed count in the completionTag. (We no * longer do that for CTAS ... WITH NO DATA, however.) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -326,8 +326,8 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString, query = linitial_node(Query, rewritten); Assert(query->commandType == CMD_SELECT); - /* plan the query --- note we disallow parallelism */ - plan = pg_plan_query(query, 0, params); + /* plan the query */ + plan = pg_plan_query(query, CURSOR_OPT_PARALLEL_OK, params); /* * Use a snapshot with an updated command ID to ensure this query sees @@ -468,7 +468,7 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) lc = list_head(into->colNames); for (attnum = 0; attnum < typeinfo->natts; attnum++) { - Form_pg_attribute attribute = typeinfo->attrs[attnum]; + Form_pg_attribute attribute = TupleDescAttr(typeinfo, attnum); ColumnDef *col; char *colname; @@ -528,6 +528,7 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) rte->rtekind = RTE_RELATION; rte->relid = intoRelationAddr.objectId; rte->relkind = relkind; + rte->rellockmode = RowExclusiveLock; rte->requiredPerms = ACL_INSERT; for (attnum = 1; attnum <= intoRelationDesc->rd_att->natts; attnum++) diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index e138539035..5342f217c0 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -8,7 +8,7 @@ * stepping on each others' toes. Formerly we used table-level locks * on pg_database, but that's too coarse-grained. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -422,7 +422,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) aclresult = pg_tablespace_aclcheck(dst_deftablespace, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_TABLESPACE, + aclcheck_error(aclresult, OBJECT_TABLESPACE, tablespacename); /* pg_global must never be the default tablespace */ @@ -822,7 +822,7 @@ dropdb(const char *dbname, bool missing_ok) * Permission checks */ if (!pg_database_ownercheck(db_id, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE, dbname); /* DROP hook for the database being removed */ @@ -857,8 +857,8 @@ dropdb(const char *dbname, bool missing_ok) (errcode(ERRCODE_OBJECT_IN_USE), errmsg("database \"%s\" is used by an active logical replication slot", dbname), - errdetail_plural("There is %d active slot", - "There are %d active slots", + errdetail_plural("There is %d active slot.", + "There are %d active slots.", nslots_active, nslots_active))); } @@ -997,7 +997,7 @@ RenameDatabase(const char *oldname, const char *newname) /* must be owner */ if (!pg_database_ownercheck(db_id, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE, oldname); /* must have createdb rights */ @@ -1112,7 +1112,7 @@ movedb(const char *dbname, const char *tblspcname) * Permission checks */ if (!pg_database_ownercheck(db_id, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE, dbname); /* @@ -1134,7 +1134,7 @@ movedb(const char *dbname, const char *tblspcname) aclresult = pg_tablespace_aclcheck(dst_tblspcoid, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_TABLESPACE, + aclcheck_error(aclresult, OBJECT_TABLESPACE, tblspcname); /* @@ -1476,7 +1476,7 @@ AlterDatabase(ParseState *pstate, AlterDatabaseStmt *stmt, bool isTopLevel) dtablespace->defname), parser_errposition(pstate, dtablespace->location))); /* this case isn't allowed within a transaction block */ - PreventTransactionChain(isTopLevel, "ALTER DATABASE SET TABLESPACE"); + PreventInTransactionBlock(isTopLevel, "ALTER DATABASE SET TABLESPACE"); movedb(stmt->dbname, defGetString(dtablespace)); return InvalidOid; } @@ -1515,7 +1515,7 @@ AlterDatabase(ParseState *pstate, AlterDatabaseStmt *stmt, bool isTopLevel) dboid = HeapTupleGetOid(tuple); if (!pg_database_ownercheck(HeapTupleGetOid(tuple), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE, stmt->dbname); /* @@ -1583,7 +1583,7 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt) shdepLockAndCheckObject(DatabaseRelationId, datid); if (!pg_database_ownercheck(datid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE, stmt->dbname); AlterSetting(datid, InvalidOid, stmt->setstmt); @@ -1646,7 +1646,7 @@ AlterDatabaseOwner(const char *dbname, Oid newOwnerId) /* Otherwise, must be owner of the existing object */ if (!pg_database_ownercheck(HeapTupleGetOid(tuple), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE, dbname); /* Must be able to become new owner */ @@ -1718,8 +1718,8 @@ AlterDatabaseOwner(const char *dbname, Oid newOwnerId) /* * Look up info about the database named "name". If the database exists, * obtain the specified lock type on it, fill in any of the remaining - * parameters that aren't NULL, and return TRUE. If no such database, - * return FALSE. + * parameters that aren't NULL, and return true. If no such database, + * return false. */ static bool get_db_info(const char *name, LOCKMODE lockmode, @@ -1923,7 +1923,7 @@ remove_dbtablespaces(Oid db_id) /* * Check for existing files that conflict with a proposed new DB OID; - * return TRUE if there are any + * return true if there are any * * If there were a subdirectory in any tablespace matching the proposed new * OID, we'd get a create failure due to the duplicate name ... and then we'd diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c index 8eff0ad17b..00b5721f85 100644 --- a/src/backend/commands/define.c +++ b/src/backend/commands/define.c @@ -4,7 +4,7 @@ * Support routines for various kinds of object creation. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/commands/discard.c b/src/backend/commands/discard.c index f0dcd87fb8..01a999c2ac 100644 --- a/src/backend/commands/discard.c +++ b/src/backend/commands/discard.c @@ -3,7 +3,7 @@ * discard.c * The implementation of the DISCARD command * - * Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Copyright (c) 1996-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -63,7 +63,7 @@ DiscardAll(bool isTopLevel) * DISCARD ALL inside a transaction block would leave the transaction * still uncommitted. */ - PreventTransactionChain(isTopLevel, "DISCARD ALL"); + PreventInTransactionBlock(isTopLevel, "DISCARD ALL"); /* Closing portals might run user-defined code, so do that first. */ PortalHashTableDeleteAll(); diff --git a/src/backend/commands/dropcmds.c b/src/backend/commands/dropcmds.c index 2b30677d6f..a2a8e37b3c 100644 --- a/src/backend/commands/dropcmds.c +++ b/src/backend/commands/dropcmds.c @@ -3,12 +3,12 @@ * dropcmds.c * handle various "DROP" operations * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION - * src/backend/catalog/dropcmds.c + * src/backend/commands/dropcmds.c * *------------------------------------------------------------------------- */ @@ -26,6 +26,7 @@ #include "nodes/makefuncs.h" #include "parser/parse_type.h" #include "utils/builtins.h" +#include "utils/lsyscache.h" #include "utils/syscache.h" @@ -91,21 +92,12 @@ RemoveObjects(DropStmt *stmt) */ if (stmt->removeType == OBJECT_FUNCTION) { - Oid funcOid = address.objectId; - HeapTuple tup; - - tup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcOid)); - if (!HeapTupleIsValid(tup)) /* should not happen */ - elog(ERROR, "cache lookup failed for function %u", funcOid); - - if (((Form_pg_proc) GETSTRUCT(tup))->proisagg) + if (get_func_prokind(address.objectId) == PROKIND_AGGREGATE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is an aggregate function", NameListToString(castNode(ObjectWithArgs, object)->objname)), errhint("Use DROP AGGREGATE to drop aggregate functions."))); - - ReleaseSysCache(tup); } /* Check permissions. */ @@ -338,6 +330,32 @@ does_not_exist_skipping(ObjectType objtype, Node *object) } break; } + case OBJECT_PROCEDURE: + { + ObjectWithArgs *owa = castNode(ObjectWithArgs, object); + + if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) && + !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name)) + { + msg = gettext_noop("procedure %s(%s) does not exist, skipping"); + name = NameListToString(owa->objname); + args = TypeNameListToString(owa->objargs); + } + break; + } + case OBJECT_ROUTINE: + { + ObjectWithArgs *owa = castNode(ObjectWithArgs, object); + + if (!schema_does_not_exist_skipping(owa->objname, &msg, &name) && + !type_in_list_does_not_exist_skipping(owa->objargs, &msg, &name)) + { + msg = gettext_noop("routine %s(%s) does not exist, skipping"); + name = NameListToString(owa->objname); + args = TypeNameListToString(owa->objargs); + } + break; + } case OBJECT_AGGREGATE: { ObjectWithArgs *owa = castNode(ObjectWithArgs, object); diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c index 938133bbe4..20a3a78692 100644 --- a/src/backend/commands/event_trigger.c +++ b/src/backend/commands/event_trigger.c @@ -3,7 +3,7 @@ * event_trigger.c * PostgreSQL EVENT TRIGGER support code. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -85,7 +85,7 @@ typedef enum } event_trigger_command_tag_check_result; /* XXX merge this with ObjectTypeMap? */ -static event_trigger_support_data event_trigger_support[] = { +static const event_trigger_support_data event_trigger_support[] = { {"ACCESS METHOD", true}, {"AGGREGATE", true}, {"CAST", true}, @@ -106,8 +106,10 @@ static event_trigger_support_data event_trigger_support[] = { {"OPERATOR CLASS", true}, {"OPERATOR FAMILY", true}, {"POLICY", true}, + {"PROCEDURE", true}, {"PUBLICATION", true}, {"ROLE", false}, + {"ROUTINE", true}, {"RULE", true}, {"SCHEMA", true}, {"SEQUENCE", true}, @@ -152,13 +154,13 @@ static event_trigger_command_tag_check_result check_table_rewrite_ddl_tag( const char *tag); static void error_duplicate_filter_variable(const char *defname); static Datum filter_list_to_array(List *filterlist); -static Oid insert_event_trigger_tuple(char *trigname, char *eventname, +static Oid insert_event_trigger_tuple(const char *trigname, const char *eventname, Oid evtOwner, Oid funcoid, List *tags); static void validate_ddl_tags(const char *filtervar, List *taglist); static void validate_table_rewrite_tags(const char *filtervar, List *taglist); static void EventTriggerInvoke(List *fn_oid_list, EventTriggerData *trigdata); -static const char *stringify_grantobjtype(GrantObjectType objtype); -static const char *stringify_adefprivs_objtype(GrantObjectType objtype); +static const char *stringify_grant_objtype(ObjectType objtype); +static const char *stringify_adefprivs_objtype(ObjectType objtype); /* * Create an event trigger. @@ -280,7 +282,7 @@ static event_trigger_command_tag_check_result check_ddl_tag(const char *tag) { const char *obtypename; - event_trigger_support_data *etsd; + const event_trigger_support_data *etsd; /* * Handle some idiosyncratic special cases. @@ -372,7 +374,7 @@ error_duplicate_filter_variable(const char *defname) * Insert the new pg_event_trigger row and record dependencies. */ static Oid -insert_event_trigger_tuple(char *trigname, char *eventname, Oid evtOwner, +insert_event_trigger_tuple(const char *trigname, const char *eventname, Oid evtOwner, Oid funcoid, List *taglist) { Relation tgrel; @@ -517,7 +519,7 @@ AlterEventTrigger(AlterEventTrigStmt *stmt) trigoid = HeapTupleGetOid(tup); if (!pg_event_trigger_ownercheck(trigoid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_EVENT_TRIGGER, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_EVENT_TRIGGER, stmt->trigname); /* tuple is a copy, so we can modify it below */ @@ -608,7 +610,7 @@ AlterEventTriggerOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) return; if (!pg_event_trigger_ownercheck(HeapTupleGetOid(tup), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_EVENT_TRIGGER, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_EVENT_TRIGGER, NameStr(form->evtname)); /* New owner must be a superuser */ @@ -834,6 +836,19 @@ EventTriggerDDLCommandEnd(Node *parsetree) if (!IsUnderPostmaster) return; + /* + * Also do nothing if our state isn't set up, which it won't be if there + * weren't any relevant event triggers at the start of the current DDL + * command. This test might therefore seem optional, but it's important + * because EventTriggerCommonSetup might find triggers that didn't exist + * at the time the command started. Although this function itself + * wouldn't crash, the event trigger functions would presumably call + * pg_event_trigger_ddl_commands which would fail. Better to do nothing + * until the next command. + */ + if (!currentEventTriggerState) + return; + runlist = EventTriggerCommonSetup(parsetree, EVT_DDLCommandEnd, "ddl_command_end", &trigdata); @@ -885,9 +900,10 @@ EventTriggerSQLDrop(Node *parsetree) &trigdata); /* - * Nothing to do if run list is empty. Note this shouldn't happen, + * Nothing to do if run list is empty. Note this typically can't happen, * because if there are no sql_drop events, then objects-to-drop wouldn't * have been collected in the first place and we would have quit above. + * But it could occur if event triggers were dropped partway through. */ if (runlist == NIL) return; @@ -934,8 +950,6 @@ EventTriggerTableRewrite(Node *parsetree, Oid tableOid, int reason) List *runlist; EventTriggerData trigdata; - elog(DEBUG1, "EventTriggerTableRewrite(%u)", tableOid); - /* * Event Triggers are completely disabled in standalone mode. There are * (at least) two reasons for this: @@ -955,6 +969,16 @@ EventTriggerTableRewrite(Node *parsetree, Oid tableOid, int reason) if (!IsUnderPostmaster) return; + /* + * Also do nothing if our state isn't set up, which it won't be if there + * weren't any relevant event triggers at the start of the current DDL + * command. This test might therefore seem optional, but it's + * *necessary*, because EventTriggerCommonSetup might find triggers that + * didn't exist at the time the command started. + */ + if (!currentEventTriggerState) + return; + runlist = EventTriggerCommonSetup(parsetree, EVT_TableRewrite, "table_rewrite", @@ -1103,8 +1127,10 @@ EventTriggerSupportsObjectType(ObjectType obtype) case OBJECT_OPERATOR: case OBJECT_OPFAMILY: case OBJECT_POLICY: + case OBJECT_PROCEDURE: case OBJECT_PUBLICATION: case OBJECT_PUBLICATION_REL: + case OBJECT_ROUTINE: case OBJECT_RULE: case OBJECT_SCHEMA: case OBJECT_SEQUENCE: @@ -1195,39 +1221,6 @@ EventTriggerSupportsObjectClass(ObjectClass objclass) return false; } -bool -EventTriggerSupportsGrantObjectType(GrantObjectType objtype) -{ - switch (objtype) - { - case ACL_OBJECT_DATABASE: - case ACL_OBJECT_TABLESPACE: - /* no support for global objects */ - return false; - - case ACL_OBJECT_COLUMN: - case ACL_OBJECT_RELATION: - case ACL_OBJECT_SEQUENCE: - case ACL_OBJECT_DOMAIN: - case ACL_OBJECT_FDW: - case ACL_OBJECT_FOREIGN_SERVER: - case ACL_OBJECT_FUNCTION: - case ACL_OBJECT_LANGUAGE: - case ACL_OBJECT_LARGEOBJECT: - case ACL_OBJECT_NAMESPACE: - case ACL_OBJECT_TYPE: - return true; - - /* - * There's intentionally no default: case here; we want the - * compiler to warn if a new ACL class hasn't been handled above. - */ - } - - /* Shouldn't get here, but if we do, say "no support" */ - return false; -} - /* * Prepare event trigger state for a new complete query to run, if necessary; * returns whether this was done. If it was, EventTriggerEndCompleteQuery must @@ -1703,11 +1696,6 @@ EventTriggerCollectSimpleCommand(ObjectAddress address, * Note we don't collect the command immediately; instead we keep it in * currentCommand, and only when we're done processing the subcommands we will * add it to the command list. - * - * XXX -- this API isn't considering the possibility of an ALTER TABLE command - * being called reentrantly by an event trigger function. Do we need stackable - * commands at this level? Perhaps at least we should detect the condition and - * raise an error. */ void EventTriggerAlterTableStart(Node *parsetree) @@ -1732,6 +1720,7 @@ EventTriggerAlterTableStart(Node *parsetree) command->d.alterTable.subcmds = NIL; command->parsetree = copyObject(parsetree); + command->parent = currentEventTriggerState->currentCommand; currentEventTriggerState->currentCommand = command; MemoryContextSwitchTo(oldcxt); @@ -1772,6 +1761,7 @@ EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address) return; Assert(IsA(subcmd, AlterTableCmd)); + Assert(currentEventTriggerState->currentCommand != NULL); Assert(OidIsValid(currentEventTriggerState->currentCommand->d.alterTable.objectId)); oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); @@ -1797,11 +1787,15 @@ EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address) void EventTriggerAlterTableEnd(void) { + CollectedCommand *parent; + /* ignore if event trigger context not set, or collection disabled */ if (!currentEventTriggerState || currentEventTriggerState->commandCollectionInhibited) return; + parent = currentEventTriggerState->currentCommand->parent; + /* If no subcommands, don't collect */ if (list_length(currentEventTriggerState->currentCommand->d.alterTable.subcmds) != 0) { @@ -1812,7 +1806,7 @@ EventTriggerAlterTableEnd(void) else pfree(currentEventTriggerState->currentCommand); - currentEventTriggerState->currentCommand = NULL; + currentEventTriggerState->currentCommand = parent; } /* @@ -2190,8 +2184,8 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS) values[i++] = CStringGetTextDatum(cmd->d.grant.istmt->is_grant ? "GRANT" : "REVOKE"); /* object_type */ - values[i++] = CStringGetTextDatum(stringify_grantobjtype( - cmd->d.grant.istmt->objtype)); + values[i++] = CStringGetTextDatum(stringify_grant_objtype( + cmd->d.grant.istmt->objtype)); /* schema */ nulls[i++] = true; /* identity */ @@ -2213,84 +2207,164 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS) } /* - * Return the GrantObjectType as a string, as it would appear in GRANT and + * Return the ObjectType as a string, as it would appear in GRANT and * REVOKE commands. */ static const char * -stringify_grantobjtype(GrantObjectType objtype) +stringify_grant_objtype(ObjectType objtype) { switch (objtype) { - case ACL_OBJECT_COLUMN: + case OBJECT_COLUMN: return "COLUMN"; - case ACL_OBJECT_RELATION: + case OBJECT_TABLE: return "TABLE"; - case ACL_OBJECT_SEQUENCE: + case OBJECT_SEQUENCE: return "SEQUENCE"; - case ACL_OBJECT_DATABASE: + case OBJECT_DATABASE: return "DATABASE"; - case ACL_OBJECT_DOMAIN: + case OBJECT_DOMAIN: return "DOMAIN"; - case ACL_OBJECT_FDW: + case OBJECT_FDW: return "FOREIGN DATA WRAPPER"; - case ACL_OBJECT_FOREIGN_SERVER: + case OBJECT_FOREIGN_SERVER: return "FOREIGN SERVER"; - case ACL_OBJECT_FUNCTION: + case OBJECT_FUNCTION: return "FUNCTION"; - case ACL_OBJECT_LANGUAGE: + case OBJECT_LANGUAGE: return "LANGUAGE"; - case ACL_OBJECT_LARGEOBJECT: + case OBJECT_LARGEOBJECT: return "LARGE OBJECT"; - case ACL_OBJECT_NAMESPACE: + case OBJECT_SCHEMA: return "SCHEMA"; - case ACL_OBJECT_TABLESPACE: + case OBJECT_PROCEDURE: + return "PROCEDURE"; + case OBJECT_ROUTINE: + return "ROUTINE"; + case OBJECT_TABLESPACE: return "TABLESPACE"; - case ACL_OBJECT_TYPE: + case OBJECT_TYPE: return "TYPE"; + /* these currently aren't used */ + case OBJECT_ACCESS_METHOD: + case OBJECT_AGGREGATE: + case OBJECT_AMOP: + case OBJECT_AMPROC: + case OBJECT_ATTRIBUTE: + case OBJECT_CAST: + case OBJECT_COLLATION: + case OBJECT_CONVERSION: + case OBJECT_DEFAULT: + case OBJECT_DEFACL: + case OBJECT_DOMCONSTRAINT: + case OBJECT_EVENT_TRIGGER: + case OBJECT_EXTENSION: + case OBJECT_FOREIGN_TABLE: + case OBJECT_INDEX: + case OBJECT_MATVIEW: + case OBJECT_OPCLASS: + case OBJECT_OPERATOR: + case OBJECT_OPFAMILY: + case OBJECT_POLICY: + case OBJECT_PUBLICATION: + case OBJECT_PUBLICATION_REL: + case OBJECT_ROLE: + case OBJECT_RULE: + case OBJECT_STATISTIC_EXT: + case OBJECT_SUBSCRIPTION: + case OBJECT_TABCONSTRAINT: + case OBJECT_TRANSFORM: + case OBJECT_TRIGGER: + case OBJECT_TSCONFIGURATION: + case OBJECT_TSDICTIONARY: + case OBJECT_TSPARSER: + case OBJECT_TSTEMPLATE: + case OBJECT_USER_MAPPING: + case OBJECT_VIEW: + elog(ERROR, "unsupported object type: %d", (int) objtype); } - elog(ERROR, "unrecognized grant object type: %d", (int) objtype); return "???"; /* keep compiler quiet */ } /* - * Return the GrantObjectType as a string; as above, but use the spelling + * Return the ObjectType as a string; as above, but use the spelling * in ALTER DEFAULT PRIVILEGES commands instead. Generally this is just * the plural. */ static const char * -stringify_adefprivs_objtype(GrantObjectType objtype) +stringify_adefprivs_objtype(ObjectType objtype) { switch (objtype) { - case ACL_OBJECT_COLUMN: + case OBJECT_COLUMN: return "COLUMNS"; - case ACL_OBJECT_RELATION: + case OBJECT_TABLE: return "TABLES"; - case ACL_OBJECT_SEQUENCE: + case OBJECT_SEQUENCE: return "SEQUENCES"; - case ACL_OBJECT_DATABASE: + case OBJECT_DATABASE: return "DATABASES"; - case ACL_OBJECT_DOMAIN: + case OBJECT_DOMAIN: return "DOMAINS"; - case ACL_OBJECT_FDW: + case OBJECT_FDW: return "FOREIGN DATA WRAPPERS"; - case ACL_OBJECT_FOREIGN_SERVER: + case OBJECT_FOREIGN_SERVER: return "FOREIGN SERVERS"; - case ACL_OBJECT_FUNCTION: + case OBJECT_FUNCTION: return "FUNCTIONS"; - case ACL_OBJECT_LANGUAGE: + case OBJECT_LANGUAGE: return "LANGUAGES"; - case ACL_OBJECT_LARGEOBJECT: + case OBJECT_LARGEOBJECT: return "LARGE OBJECTS"; - case ACL_OBJECT_NAMESPACE: + case OBJECT_SCHEMA: return "SCHEMAS"; - case ACL_OBJECT_TABLESPACE: + case OBJECT_PROCEDURE: + return "PROCEDURES"; + case OBJECT_ROUTINE: + return "ROUTINES"; + case OBJECT_TABLESPACE: return "TABLESPACES"; - case ACL_OBJECT_TYPE: + case OBJECT_TYPE: return "TYPES"; + /* these currently aren't used */ + case OBJECT_ACCESS_METHOD: + case OBJECT_AGGREGATE: + case OBJECT_AMOP: + case OBJECT_AMPROC: + case OBJECT_ATTRIBUTE: + case OBJECT_CAST: + case OBJECT_COLLATION: + case OBJECT_CONVERSION: + case OBJECT_DEFAULT: + case OBJECT_DEFACL: + case OBJECT_DOMCONSTRAINT: + case OBJECT_EVENT_TRIGGER: + case OBJECT_EXTENSION: + case OBJECT_FOREIGN_TABLE: + case OBJECT_INDEX: + case OBJECT_MATVIEW: + case OBJECT_OPCLASS: + case OBJECT_OPERATOR: + case OBJECT_OPFAMILY: + case OBJECT_POLICY: + case OBJECT_PUBLICATION: + case OBJECT_PUBLICATION_REL: + case OBJECT_ROLE: + case OBJECT_RULE: + case OBJECT_STATISTIC_EXT: + case OBJECT_SUBSCRIPTION: + case OBJECT_TABCONSTRAINT: + case OBJECT_TRANSFORM: + case OBJECT_TRIGGER: + case OBJECT_TSCONFIGURATION: + case OBJECT_TSDICTIONARY: + case OBJECT_TSPARSER: + case OBJECT_TSTEMPLATE: + case OBJECT_USER_MAPPING: + case OBJECT_VIEW: + elog(ERROR, "unsupported object type: %d", (int) objtype); } - elog(ERROR, "unrecognized grant object type: %d", (int) objtype); return "???"; /* keep compiler quiet */ } diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 7648201218..799a22e9d5 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -3,7 +3,7 @@ * explain.c * Explain query execution plans * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994-5, Regents of the University of California * * IDENTIFICATION @@ -19,8 +19,9 @@ #include "commands/createas.h" #include "commands/defrem.h" #include "commands/prepare.h" -#include "executor/hashjoin.h" +#include "executor/nodeHash.h" #include "foreign/fdwapi.h" +#include "jit/jit.h" #include "nodes/extensible.h" #include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" @@ -107,6 +108,7 @@ static void show_tidbitmap_info(BitmapHeapScanState *planstate, static void show_instrumentation_count(const char *qlabel, int which, PlanState *planstate, ExplainState *es); static void show_foreignscan_info(ForeignScanState *fsstate, ExplainState *es); +static void show_eval_params(Bitmapset *bms_params, ExplainState *es); static const char *explain_get_index_name(Oid indexId); static void show_buffer_usage(ExplainState *es, const BufferUsage *usage); static void ExplainIndexScanDetails(Oid indexid, ScanDirection indexorderdir, @@ -116,18 +118,14 @@ static void ExplainModifyTarget(ModifyTable *plan, ExplainState *es); static void ExplainTargetRel(Plan *plan, Index rti, ExplainState *es); static void show_modifytable_info(ModifyTableState *mtstate, List *ancestors, ExplainState *es); -static void ExplainMemberNodes(List *plans, PlanState **planstates, - List *ancestors, ExplainState *es); +static void ExplainMemberNodes(PlanState **planstates, int nsubnodes, + int nplans, List *ancestors, ExplainState *es); static void ExplainSubPlans(List *plans, List *ancestors, const char *relationship, ExplainState *es); static void ExplainCustomChildren(CustomScanState *css, List *ancestors, ExplainState *es); -static void ExplainProperty(const char *qlabel, const char *value, - bool numeric, ExplainState *es); -static void ExplainOpenGroup(const char *objtype, const char *labelname, - bool labeled, ExplainState *es); -static void ExplainCloseGroup(const char *objtype, const char *labelname, - bool labeled, ExplainState *es); +static void ExplainProperty(const char *qlabel, const char *unit, + const char *value, bool numeric, ExplainState *es); static void ExplainDummyGroup(const char *objtype, const char *labelname, ExplainState *es); static void ExplainXMLTag(const char *tagname, int flags, ExplainState *es); @@ -354,7 +352,7 @@ ExplainOneQuery(Query *query, int cursorOptions, /* if an advisor plugin is present, let it manage things */ if (ExplainOneQuery_hook) (*ExplainOneQuery_hook) (query, cursorOptions, into, es, - queryString, params); + queryString, params, queryEnv); else { PlannedStmt *plan; @@ -400,8 +398,6 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es, * We have to rewrite the contained SELECT and then pass it back to * ExplainOneQuery. It's probably not really necessary to copy the * contained parsetree another time, but let's be safe. - * - * Like ExecCreateTableAs, disallow parallelism in the plan. */ CreateTableAsStmt *ctas = (CreateTableAsStmt *) utilityStmt; List *rewritten; @@ -409,7 +405,7 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es, rewritten = QueryRewrite(castNode(Query, copyObject(ctas->query))); Assert(list_length(rewritten) == 1); ExplainOneQuery(linitial_node(Query, rewritten), - 0, ctas->into, es, + CURSOR_OPT_PARALLEL_OK, ctas->into, es, queryString, params, queryEnv); } else if (IsA(utilityStmt, DeclareCursorStmt)) @@ -554,17 +550,22 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es, { double plantime = INSTR_TIME_GET_DOUBLE(*planduration); - if (es->format == EXPLAIN_FORMAT_TEXT) - appendStringInfo(es->str, "Planning time: %.3f ms\n", - 1000.0 * plantime); - else - ExplainPropertyFloat("Planning Time", 1000.0 * plantime, 3, es); + ExplainPropertyFloat("Planning Time", "ms", 1000.0 * plantime, 3, es); } /* Print info about runtime of triggers */ if (es->analyze) ExplainPrintTriggers(es, queryDesc); + /* + * Print info about JITing. Tied to es->costs because we don't want to + * display this in regression tests, as it'd cause output differences + * depending on build options. Might want to separate that out from COSTS + * at a later stage. + */ + if (es->costs) + ExplainPrintJITSummary(es, queryDesc); + /* * Close down the query and free resources. Include time for this in the * total execution time (although it should be pretty minimal). @@ -590,14 +591,8 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es, * the output). By default, ANALYZE sets SUMMARY to true. */ if (es->summary && es->analyze) - { - if (es->format == EXPLAIN_FORMAT_TEXT) - appendStringInfo(es->str, "Execution time: %.3f ms\n", - 1000.0 * totaltime); - else - ExplainPropertyFloat("Execution Time", 1000.0 * totaltime, - 3, es); - } + ExplainPropertyFloat("Execution Time", "ms", 1000.0 * totaltime, 3, + es); ExplainCloseGroup("Query", NULL, true, es); } @@ -656,17 +651,33 @@ ExplainPrintTriggers(ExplainState *es, QueryDesc *queryDesc) ResultRelInfo *rInfo; bool show_relname; int numrels = queryDesc->estate->es_num_result_relations; - List *targrels = queryDesc->estate->es_trig_target_relations; + int numrootrels = queryDesc->estate->es_num_root_result_relations; + List *routerels; + List *targrels; int nr; ListCell *l; + routerels = queryDesc->estate->es_tuple_routing_result_relations; + targrels = queryDesc->estate->es_trig_target_relations; + ExplainOpenGroup("Triggers", "Triggers", false, es); - show_relname = (numrels > 1 || targrels != NIL); + show_relname = (numrels > 1 || numrootrels > 0 || + routerels != NIL || targrels != NIL); rInfo = queryDesc->estate->es_result_relations; for (nr = 0; nr < numrels; rInfo++, nr++) report_triggers(rInfo, show_relname, es); + rInfo = queryDesc->estate->es_root_result_relations; + for (nr = 0; nr < numrootrels; rInfo++, nr++) + report_triggers(rInfo, show_relname, es); + + foreach(l, routerels) + { + rInfo = (ResultRelInfo *) lfirst(l); + report_triggers(rInfo, show_relname, es); + } + foreach(l, targrels) { rInfo = (ResultRelInfo *) lfirst(l); @@ -676,6 +687,131 @@ ExplainPrintTriggers(ExplainState *es, QueryDesc *queryDesc) ExplainCloseGroup("Triggers", "Triggers", false, es); } +/* + * ExplainPrintJITSummary - + * Print summarized JIT instrumentation from leader and workers + */ +void +ExplainPrintJITSummary(ExplainState *es, QueryDesc *queryDesc) +{ + JitInstrumentation ji = {0}; + + if (!(queryDesc->estate->es_jit_flags & PGJIT_PERFORM)) + return; + + /* + * Work with a copy instead of modifying the leader state, since this + * function may be called twice + */ + if (queryDesc->estate->es_jit) + InstrJitAgg(&ji, &queryDesc->estate->es_jit->instr); + + /* If this process has done JIT in parallel workers, merge stats */ + if (queryDesc->estate->es_jit_worker_instr) + InstrJitAgg(&ji, queryDesc->estate->es_jit_worker_instr); + + ExplainPrintJIT(es, queryDesc->estate->es_jit_flags, &ji, -1); +} + +/* + * ExplainPrintJIT - + * Append information about JITing to es->str. + * + * Can be used to print the JIT instrumentation of the backend (worker_num = + * -1) or that of a specific worker (worker_num = ...). + */ +void +ExplainPrintJIT(ExplainState *es, int jit_flags, + JitInstrumentation *ji, int worker_num) +{ + instr_time total_time; + bool for_workers = (worker_num >= 0); + + /* don't print information if no JITing happened */ + if (!ji || ji->created_functions == 0) + return; + + /* calculate total time */ + INSTR_TIME_SET_ZERO(total_time); + INSTR_TIME_ADD(total_time, ji->generation_counter); + INSTR_TIME_ADD(total_time, ji->inlining_counter); + INSTR_TIME_ADD(total_time, ji->optimization_counter); + INSTR_TIME_ADD(total_time, ji->emission_counter); + + ExplainOpenGroup("JIT", "JIT", true, es); + + /* for higher density, open code the text output format */ + if (es->format == EXPLAIN_FORMAT_TEXT) + { + appendStringInfoSpaces(es->str, es->indent * 2); + if (for_workers) + appendStringInfo(es->str, "JIT for worker %u:\n", worker_num); + else + appendStringInfo(es->str, "JIT:\n"); + es->indent += 1; + + ExplainPropertyInteger("Functions", NULL, ji->created_functions, es); + + appendStringInfoSpaces(es->str, es->indent * 2); + appendStringInfo(es->str, "Options: %s %s, %s %s, %s %s, %s %s\n", + "Inlining", jit_flags & PGJIT_INLINE ? "true" : "false", + "Optimization", jit_flags & PGJIT_OPT3 ? "true" : "false", + "Expressions", jit_flags & PGJIT_EXPR ? "true" : "false", + "Deforming", jit_flags & PGJIT_DEFORM ? "true" : "false"); + + if (es->analyze && es->timing) + { + appendStringInfoSpaces(es->str, es->indent * 2); + appendStringInfo(es->str, + "Timing: %s %.3f ms, %s %.3f ms, %s %.3f ms, %s %.3f ms, %s %.3f ms\n", + "Generation", 1000.0 * INSTR_TIME_GET_DOUBLE(ji->generation_counter), + "Inlining", 1000.0 * INSTR_TIME_GET_DOUBLE(ji->inlining_counter), + "Optimization", 1000.0 * INSTR_TIME_GET_DOUBLE(ji->optimization_counter), + "Emission", 1000.0 * INSTR_TIME_GET_DOUBLE(ji->emission_counter), + "Total", 1000.0 * INSTR_TIME_GET_DOUBLE(total_time)); + } + + es->indent -= 1; + } + else + { + ExplainPropertyInteger("Worker Number", NULL, worker_num, es); + ExplainPropertyInteger("Functions", NULL, ji->created_functions, es); + + ExplainOpenGroup("Options", "Options", true, es); + ExplainPropertyBool("Inlining", jit_flags & PGJIT_INLINE, es); + ExplainPropertyBool("Optimization", jit_flags & PGJIT_OPT3, es); + ExplainPropertyBool("Expressions", jit_flags & PGJIT_EXPR, es); + ExplainPropertyBool("Deforming", jit_flags & PGJIT_DEFORM, es); + ExplainCloseGroup("Options", "Options", true, es); + + if (es->analyze && es->timing) + { + ExplainOpenGroup("Timing", "Timing", true, es); + + ExplainPropertyFloat("Generation", "ms", + 1000.0 * INSTR_TIME_GET_DOUBLE(ji->generation_counter), + 3, es); + ExplainPropertyFloat("Inlining", "ms", + 1000.0 * INSTR_TIME_GET_DOUBLE(ji->inlining_counter), + 3, es); + ExplainPropertyFloat("Optimization", "ms", + 1000.0 * INSTR_TIME_GET_DOUBLE(ji->optimization_counter), + 3, es); + ExplainPropertyFloat("Emission", "ms", + 1000.0 * INSTR_TIME_GET_DOUBLE(ji->emission_counter), + 3, es); + ExplainPropertyFloat("Total", "ms", + 1000.0 * INSTR_TIME_GET_DOUBLE(total_time), + 3, es); + + ExplainCloseGroup("Timing", "Timing", true, es); + } + } + + ExplainCloseGroup("JIT", "JIT", true, es); +} + /* * ExplainQueryText - * add a "Query Text" node that contains the actual text of the query @@ -753,8 +889,9 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es) ExplainPropertyText("Constraint Name", conname, es); ExplainPropertyText("Relation", relname, es); if (es->timing) - ExplainPropertyFloat("Time", 1000.0 * instr->total, 3, es); - ExplainPropertyFloat("Calls", instr->ntuples, 0, es); + ExplainPropertyFloat("Time", "ms", 1000.0 * instr->total, 3, + es); + ExplainPropertyFloat("Calls", NULL, instr->ntuples, 0, es); } if (conname) @@ -1269,10 +1406,14 @@ ExplainNode(PlanState *planstate, List *ancestors, } else { - ExplainPropertyFloat("Startup Cost", plan->startup_cost, 2, es); - ExplainPropertyFloat("Total Cost", plan->total_cost, 2, es); - ExplainPropertyFloat("Plan Rows", plan->plan_rows, 0, es); - ExplainPropertyInteger("Plan Width", plan->plan_width, es); + ExplainPropertyFloat("Startup Cost", NULL, plan->startup_cost, + 2, es); + ExplainPropertyFloat("Total Cost", NULL, plan->total_cost, + 2, es); + ExplainPropertyFloat("Plan Rows", NULL, plan->plan_rows, + 0, es); + ExplainPropertyInteger("Plan Width", NULL, plan->plan_width, + es); } } @@ -1293,8 +1434,8 @@ ExplainNode(PlanState *planstate, List *ancestors, planstate->instrument && planstate->instrument->nloops > 0) { double nloops = planstate->instrument->nloops; - double startup_sec = 1000.0 * planstate->instrument->startup / nloops; - double total_sec = 1000.0 * planstate->instrument->total / nloops; + double startup_ms = 1000.0 * planstate->instrument->startup / nloops; + double total_ms = 1000.0 * planstate->instrument->total / nloops; double rows = planstate->instrument->ntuples / nloops; if (es->format == EXPLAIN_FORMAT_TEXT) @@ -1302,7 +1443,7 @@ ExplainNode(PlanState *planstate, List *ancestors, if (es->timing) appendStringInfo(es->str, " (actual time=%.3f..%.3f rows=%.0f loops=%.0f)", - startup_sec, total_sec, rows, nloops); + startup_ms, total_ms, rows, nloops); else appendStringInfo(es->str, " (actual rows=%.0f loops=%.0f)", @@ -1312,11 +1453,13 @@ ExplainNode(PlanState *planstate, List *ancestors, { if (es->timing) { - ExplainPropertyFloat("Actual Startup Time", startup_sec, 3, es); - ExplainPropertyFloat("Actual Total Time", total_sec, 3, es); + ExplainPropertyFloat("Actual Startup Time", "s", startup_ms, + 3, es); + ExplainPropertyFloat("Actual Total Time", "s", total_ms, + 3, es); } - ExplainPropertyFloat("Actual Rows", rows, 0, es); - ExplainPropertyFloat("Actual Loops", nloops, 0, es); + ExplainPropertyFloat("Actual Rows", NULL, rows, 0, es); + ExplainPropertyFloat("Actual Loops", NULL, nloops, 0, es); } } else if (es->analyze) @@ -1327,11 +1470,11 @@ ExplainNode(PlanState *planstate, List *ancestors, { if (es->timing) { - ExplainPropertyFloat("Actual Startup Time", 0.0, 3, es); - ExplainPropertyFloat("Actual Total Time", 0.0, 3, es); + ExplainPropertyFloat("Actual Startup Time", "ms", 0.0, 3, es); + ExplainPropertyFloat("Actual Total Time", "ms", 0.0, 3, es); } - ExplainPropertyFloat("Actual Rows", 0.0, 0, es); - ExplainPropertyFloat("Actual Loops", 0.0, 0, es); + ExplainPropertyFloat("Actual Rows", NULL, 0.0, 0, es); + ExplainPropertyFloat("Actual Loops", NULL, 0.0, 0, es); } } @@ -1389,8 +1532,8 @@ ExplainNode(PlanState *planstate, List *ancestors, show_instrumentation_count("Rows Removed by Filter", 1, planstate, es); if (es->analyze) - ExplainPropertyLong("Heap Fetches", - ((IndexOnlyScanState *) planstate)->ioss_HeapFetches, es); + ExplainPropertyFloat("Heap Fetches", NULL, + planstate->instrument->ntuples2, 0, es); break; case T_BitmapIndexScan: show_scan_qual(((BitmapIndexScan *) plan)->indexqualorig, @@ -1412,7 +1555,8 @@ ExplainNode(PlanState *planstate, List *ancestors, case T_SampleScan: show_tablesample(((SampleScan *) plan)->tablesample, planstate, ancestors, es); - /* FALL THRU to print additional fields the same as SeqScan */ + /* fall through to print additional fields the same as SeqScan */ + /* FALLTHROUGH */ case T_SeqScan: case T_ValuesScan: case T_CteScan: @@ -1432,16 +1576,40 @@ ExplainNode(PlanState *planstate, List *ancestors, if (plan->qual) show_instrumentation_count("Rows Removed by Filter", 1, planstate, es); - ExplainPropertyInteger("Workers Planned", + ExplainPropertyInteger("Workers Planned", NULL, gather->num_workers, es); + + /* Show params evaluated at gather node */ + if (gather->initParam) + show_eval_params(gather->initParam, es); + if (es->analyze) { int nworkers; nworkers = ((GatherState *) planstate)->nworkers_launched; - ExplainPropertyInteger("Workers Launched", + ExplainPropertyInteger("Workers Launched", NULL, nworkers, es); } + + /* + * Print per-worker Jit instrumentation. Use same conditions + * as for the leader's JIT instrumentation, see comment there. + */ + if (es->costs && es->verbose && + outerPlanState(planstate)->worker_jit_instrument) + { + PlanState *child = outerPlanState(planstate); + int n; + SharedJitInstrumentation *w = child->worker_jit_instrument; + + for (n = 0; n < w->num_workers; ++n) + { + ExplainPrintJIT(es, child->state->es_jit_flags, + &w->jit_instr[n], n); + } + } + if (gather->single_copy || es->format != EXPLAIN_FORMAT_TEXT) ExplainPropertyBool("Single Copy", gather->single_copy, es); } @@ -1454,14 +1622,19 @@ ExplainNode(PlanState *planstate, List *ancestors, if (plan->qual) show_instrumentation_count("Rows Removed by Filter", 1, planstate, es); - ExplainPropertyInteger("Workers Planned", + ExplainPropertyInteger("Workers Planned", NULL, gm->num_workers, es); + + /* Show params evaluated at gather-merge node */ + if (gm->initParam) + show_eval_params(gm->initParam, es); + if (es->analyze) { int nworkers; nworkers = ((GatherMergeState *) planstate)->nworkers_launched; - ExplainPropertyInteger("Workers Launched", + ExplainPropertyInteger("Workers Launched", NULL, nworkers, es); } } @@ -1631,14 +1804,14 @@ ExplainNode(PlanState *planstate, List *ancestors, { Instrumentation *instrument = &w->instrument[n]; double nloops = instrument->nloops; - double startup_sec; - double total_sec; + double startup_ms; + double total_ms; double rows; if (nloops <= 0) continue; - startup_sec = 1000.0 * instrument->startup / nloops; - total_sec = 1000.0 * instrument->total / nloops; + startup_ms = 1000.0 * instrument->startup / nloops; + total_ms = 1000.0 * instrument->total / nloops; rows = instrument->ntuples / nloops; if (es->format == EXPLAIN_FORMAT_TEXT) @@ -1648,7 +1821,7 @@ ExplainNode(PlanState *planstate, List *ancestors, if (es->timing) appendStringInfo(es->str, "actual time=%.3f..%.3f rows=%.0f loops=%.0f\n", - startup_sec, total_sec, rows, nloops); + startup_ms, total_ms, rows, nloops); else appendStringInfo(es->str, "actual rows=%.0f loops=%.0f\n", @@ -1666,15 +1839,17 @@ ExplainNode(PlanState *planstate, List *ancestors, opened_group = true; } ExplainOpenGroup("Worker", NULL, true, es); - ExplainPropertyInteger("Worker Number", n, es); + ExplainPropertyInteger("Worker Number", NULL, n, es); if (es->timing) { - ExplainPropertyFloat("Actual Startup Time", startup_sec, 3, es); - ExplainPropertyFloat("Actual Total Time", total_sec, 3, es); + ExplainPropertyFloat("Actual Startup Time", "ms", + startup_ms, 3, es); + ExplainPropertyFloat("Actual Total Time", "ms", + total_ms, 3, es); } - ExplainPropertyFloat("Actual Rows", rows, 0, es); - ExplainPropertyFloat("Actual Loops", nloops, 0, es); + ExplainPropertyFloat("Actual Rows", NULL, rows, 0, es); + ExplainPropertyFloat("Actual Loops", NULL, nloops, 0, es); if (es->buffers) show_buffer_usage(es, &instrument->bufusage); @@ -1725,28 +1900,33 @@ ExplainNode(PlanState *planstate, List *ancestors, switch (nodeTag(plan)) { case T_ModifyTable: - ExplainMemberNodes(((ModifyTable *) plan)->plans, - ((ModifyTableState *) planstate)->mt_plans, + ExplainMemberNodes(((ModifyTableState *) planstate)->mt_plans, + ((ModifyTableState *) planstate)->mt_nplans, + list_length(((ModifyTable *) plan)->plans), ancestors, es); break; case T_Append: - ExplainMemberNodes(((Append *) plan)->appendplans, - ((AppendState *) planstate)->appendplans, + ExplainMemberNodes(((AppendState *) planstate)->appendplans, + ((AppendState *) planstate)->as_nplans, + list_length(((Append *) plan)->appendplans), ancestors, es); break; case T_MergeAppend: - ExplainMemberNodes(((MergeAppend *) plan)->mergeplans, - ((MergeAppendState *) planstate)->mergeplans, + ExplainMemberNodes(((MergeAppendState *) planstate)->mergeplans, + ((MergeAppendState *) planstate)->ms_nplans, + list_length(((MergeAppend *) plan)->mergeplans), ancestors, es); break; case T_BitmapAnd: - ExplainMemberNodes(((BitmapAnd *) plan)->bitmapplans, - ((BitmapAndState *) planstate)->bitmapplans, + ExplainMemberNodes(((BitmapAndState *) planstate)->bitmapplans, + ((BitmapAndState *) planstate)->nplans, + list_length(((BitmapAnd *) plan)->bitmapplans), ancestors, es); break; case T_BitmapOr: - ExplainMemberNodes(((BitmapOr *) plan)->bitmapplans, - ((BitmapOrState *) planstate)->bitmapplans, + ExplainMemberNodes(((BitmapOrState *) planstate)->bitmapplans, + ((BitmapOrState *) planstate)->nplans, + list_length(((BitmapOr *) plan)->bitmapplans), ancestors, es); break; case T_SubqueryScan: @@ -2279,15 +2459,21 @@ show_tablesample(TableSampleClause *tsc, PlanState *planstate, static void show_sort_info(SortState *sortstate, ExplainState *es) { - if (es->analyze && sortstate->sort_Done && - sortstate->tuplesortstate != NULL) + if (!es->analyze) + return; + + if (sortstate->sort_Done && sortstate->tuplesortstate != NULL) { Tuplesortstate *state = (Tuplesortstate *) sortstate->tuplesortstate; + TuplesortInstrumentation stats; const char *sortMethod; const char *spaceType; long spaceUsed; - tuplesort_get_stats(state, &sortMethod, &spaceType, &spaceUsed); + tuplesort_get_stats(state, &stats); + sortMethod = tuplesort_method_name(stats.sortMethod); + spaceType = tuplesort_space_type_name(stats.spaceType); + spaceUsed = stats.spaceUsed; if (es->format == EXPLAIN_FORMAT_TEXT) { @@ -2298,10 +2484,55 @@ show_sort_info(SortState *sortstate, ExplainState *es) else { ExplainPropertyText("Sort Method", sortMethod, es); - ExplainPropertyLong("Sort Space Used", spaceUsed, es); + ExplainPropertyInteger("Sort Space Used", "kB", spaceUsed, es); ExplainPropertyText("Sort Space Type", spaceType, es); } } + + if (sortstate->shared_info != NULL) + { + int n; + bool opened_group = false; + + for (n = 0; n < sortstate->shared_info->num_workers; n++) + { + TuplesortInstrumentation *sinstrument; + const char *sortMethod; + const char *spaceType; + long spaceUsed; + + sinstrument = &sortstate->shared_info->sinstrument[n]; + if (sinstrument->sortMethod == SORT_TYPE_STILL_IN_PROGRESS) + continue; /* ignore any unfilled slots */ + sortMethod = tuplesort_method_name(sinstrument->sortMethod); + spaceType = tuplesort_space_type_name(sinstrument->spaceType); + spaceUsed = sinstrument->spaceUsed; + + if (es->format == EXPLAIN_FORMAT_TEXT) + { + appendStringInfoSpaces(es->str, es->indent * 2); + appendStringInfo(es->str, + "Worker %d: Sort Method: %s %s: %ldkB\n", + n, sortMethod, spaceType, spaceUsed); + } + else + { + if (!opened_group) + { + ExplainOpenGroup("Workers", "Workers", false, es); + opened_group = true; + } + ExplainOpenGroup("Worker", NULL, true, es); + ExplainPropertyInteger("Worker Number", NULL, n, es); + ExplainPropertyText("Sort Method", sortMethod, es); + ExplainPropertyInteger("Sort Space Used", "kB", spaceUsed, es); + ExplainPropertyText("Sort Space Type", spaceType, es); + ExplainCloseGroup("Worker", NULL, true, es); + } + } + if (opened_group) + ExplainCloseGroup("Workers", "Workers", false, es); + } } /* @@ -2310,34 +2541,90 @@ show_sort_info(SortState *sortstate, ExplainState *es) static void show_hash_info(HashState *hashstate, ExplainState *es) { - HashJoinTable hashtable; + HashInstrumentation hinstrument = {0}; + + /* + * In a parallel query, the leader process may or may not have run the + * hash join, and even if it did it may not have built a hash table due to + * timing (if it started late it might have seen no tuples in the outer + * relation and skipped building the hash table). Therefore we have to be + * prepared to get instrumentation data from all participants. + */ + if (hashstate->hashtable) + ExecHashGetInstrumentation(&hinstrument, hashstate->hashtable); + + /* + * Merge results from workers. In the parallel-oblivious case, the + * results from all participants should be identical, except where + * participants didn't run the join at all so have no data. In the + * parallel-aware case, we need to consider all the results. Each worker + * may have seen a different subset of batches and we want to find the + * highest memory usage for any one batch across all batches. + */ + if (hashstate->shared_info) + { + SharedHashInfo *shared_info = hashstate->shared_info; + int i; + + for (i = 0; i < shared_info->num_workers; ++i) + { + HashInstrumentation *worker_hi = &shared_info->hinstrument[i]; + + if (worker_hi->nbatch > 0) + { + /* + * Every participant should agree on the buckets, so to be + * sure we have a value we'll just overwrite each time. + */ + hinstrument.nbuckets = worker_hi->nbuckets; + hinstrument.nbuckets_original = worker_hi->nbuckets_original; - hashtable = hashstate->hashtable; + /* + * Normally every participant should agree on the number of + * batches too, but it's possible for a backend that started + * late and missed the whole join not to have the final nbatch + * number. So we'll take the largest number. + */ + hinstrument.nbatch = Max(hinstrument.nbatch, worker_hi->nbatch); + hinstrument.nbatch_original = worker_hi->nbatch_original; - if (hashtable) + /* + * In a parallel-aware hash join, for now we report the + * maximum peak memory reported by any worker. + */ + hinstrument.space_peak = + Max(hinstrument.space_peak, worker_hi->space_peak); + } + } + } + + if (hinstrument.nbatch > 0) { - long spacePeakKb = (hashtable->spacePeak + 1023) / 1024; + long spacePeakKb = (hinstrument.space_peak + 1023) / 1024; if (es->format != EXPLAIN_FORMAT_TEXT) { - ExplainPropertyLong("Hash Buckets", hashtable->nbuckets, es); - ExplainPropertyLong("Original Hash Buckets", - hashtable->nbuckets_original, es); - ExplainPropertyLong("Hash Batches", hashtable->nbatch, es); - ExplainPropertyLong("Original Hash Batches", - hashtable->nbatch_original, es); - ExplainPropertyLong("Peak Memory Usage", spacePeakKb, es); + ExplainPropertyInteger("Hash Buckets", NULL, + hinstrument.nbuckets, es); + ExplainPropertyInteger("Original Hash Buckets", NULL, + hinstrument.nbuckets_original, es); + ExplainPropertyInteger("Hash Batches", NULL, + hinstrument.nbatch, es); + ExplainPropertyInteger("Original Hash Batches", NULL, + hinstrument.nbatch_original, es); + ExplainPropertyInteger("Peak Memory Usage", "kB", + spacePeakKb, es); } - else if (hashtable->nbatch_original != hashtable->nbatch || - hashtable->nbuckets_original != hashtable->nbuckets) + else if (hinstrument.nbatch_original != hinstrument.nbatch || + hinstrument.nbuckets_original != hinstrument.nbuckets) { appendStringInfoSpaces(es->str, es->indent * 2); appendStringInfo(es->str, "Buckets: %d (originally %d) Batches: %d (originally %d) Memory Usage: %ldkB\n", - hashtable->nbuckets, - hashtable->nbuckets_original, - hashtable->nbatch, - hashtable->nbatch_original, + hinstrument.nbuckets, + hinstrument.nbuckets_original, + hinstrument.nbatch, + hinstrument.nbatch_original, spacePeakKb); } else @@ -2345,7 +2632,7 @@ show_hash_info(HashState *hashstate, ExplainState *es) appendStringInfoSpaces(es->str, es->indent * 2); appendStringInfo(es->str, "Buckets: %d Batches: %d Memory Usage: %ldkB\n", - hashtable->nbuckets, hashtable->nbatch, + hinstrument.nbuckets, hinstrument.nbatch, spacePeakKb); } } @@ -2359,8 +2646,10 @@ show_tidbitmap_info(BitmapHeapScanState *planstate, ExplainState *es) { if (es->format != EXPLAIN_FORMAT_TEXT) { - ExplainPropertyLong("Exact Heap Blocks", planstate->exact_pages, es); - ExplainPropertyLong("Lossy Heap Blocks", planstate->lossy_pages, es); + ExplainPropertyInteger("Exact Heap Blocks", NULL, + planstate->exact_pages, es); + ExplainPropertyInteger("Lossy Heap Blocks", NULL, + planstate->lossy_pages, es); } else { @@ -2402,9 +2691,9 @@ show_instrumentation_count(const char *qlabel, int which, if (nfiltered > 0 || es->format != EXPLAIN_FORMAT_TEXT) { if (nloops > 0) - ExplainPropertyFloat(qlabel, nfiltered / nloops, 0, es); + ExplainPropertyFloat(qlabel, NULL, nfiltered / nloops, 0, es); else - ExplainPropertyFloat(qlabel, 0.0, 0, es); + ExplainPropertyFloat(qlabel, NULL, 0.0, 0, es); } } @@ -2429,6 +2718,29 @@ show_foreignscan_info(ForeignScanState *fsstate, ExplainState *es) } } +/* + * Show initplan params evaluated at Gather or Gather Merge node. + */ +static void +show_eval_params(Bitmapset *bms_params, ExplainState *es) +{ + int paramid = -1; + List *params = NIL; + + Assert(bms_params); + + while ((paramid = bms_next_member(bms_params, paramid)) >= 0) + { + char param[32]; + + snprintf(param, sizeof(param), "$%d", paramid); + params = lappend(params, pstrdup(param)); + } + + if (params) + ExplainPropertyList("Params Evaluated", params, es); +} + /* * Fetch the name of an index in an EXPLAIN * @@ -2547,20 +2859,34 @@ show_buffer_usage(ExplainState *es, const BufferUsage *usage) } else { - ExplainPropertyLong("Shared Hit Blocks", usage->shared_blks_hit, es); - ExplainPropertyLong("Shared Read Blocks", usage->shared_blks_read, es); - ExplainPropertyLong("Shared Dirtied Blocks", usage->shared_blks_dirtied, es); - ExplainPropertyLong("Shared Written Blocks", usage->shared_blks_written, es); - ExplainPropertyLong("Local Hit Blocks", usage->local_blks_hit, es); - ExplainPropertyLong("Local Read Blocks", usage->local_blks_read, es); - ExplainPropertyLong("Local Dirtied Blocks", usage->local_blks_dirtied, es); - ExplainPropertyLong("Local Written Blocks", usage->local_blks_written, es); - ExplainPropertyLong("Temp Read Blocks", usage->temp_blks_read, es); - ExplainPropertyLong("Temp Written Blocks", usage->temp_blks_written, es); + ExplainPropertyInteger("Shared Hit Blocks", NULL, + usage->shared_blks_hit, es); + ExplainPropertyInteger("Shared Read Blocks", NULL, + usage->shared_blks_read, es); + ExplainPropertyInteger("Shared Dirtied Blocks", NULL, + usage->shared_blks_dirtied, es); + ExplainPropertyInteger("Shared Written Blocks", NULL, + usage->shared_blks_written, es); + ExplainPropertyInteger("Local Hit Blocks", NULL, + usage->local_blks_hit, es); + ExplainPropertyInteger("Local Read Blocks", NULL, + usage->local_blks_read, es); + ExplainPropertyInteger("Local Dirtied Blocks", NULL, + usage->local_blks_dirtied, es); + ExplainPropertyInteger("Local Written Blocks", NULL, + usage->local_blks_written, es); + ExplainPropertyInteger("Temp Read Blocks", NULL, + usage->temp_blks_read, es); + ExplainPropertyInteger("Temp Written Blocks", NULL, + usage->temp_blks_written, es); if (track_io_timing) { - ExplainPropertyFloat("I/O Read Time", INSTR_TIME_GET_MILLISEC(usage->blk_read_time), 3, es); - ExplainPropertyFloat("I/O Write Time", INSTR_TIME_GET_MILLISEC(usage->blk_write_time), 3, es); + ExplainPropertyFloat("I/O Read Time", "ms", + INSTR_TIME_GET_MILLISEC(usage->blk_read_time), + 3, es); + ExplainPropertyFloat("I/O Write Time", "ms", + INSTR_TIME_GET_MILLISEC(usage->blk_write_time), + 3, es); } } } @@ -2860,10 +3186,10 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors, if (node->onConflictAction != ONCONFLICT_NONE) { - ExplainProperty("Conflict Resolution", - node->onConflictAction == ONCONFLICT_NOTHING ? - "NOTHING" : "UPDATE", - false, es); + ExplainPropertyText("Conflict Resolution", + node->onConflictAction == ONCONFLICT_NOTHING ? + "NOTHING" : "UPDATE", + es); /* * Don't display arbiter indexes at all when DO NOTHING variant @@ -2891,11 +3217,13 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors, /* count the number of source rows */ total = mtstate->mt_plans[0]->instrument->ntuples; - other_path = mtstate->ps.instrument->nfiltered2; + other_path = mtstate->ps.instrument->ntuples2; insert_path = total - other_path; - ExplainPropertyFloat("Tuples Inserted", insert_path, 0, es); - ExplainPropertyFloat("Conflicting Tuples", other_path, 0, es); + ExplainPropertyFloat("Tuples Inserted", NULL, + insert_path, 0, es); + ExplainPropertyFloat("Conflicting Tuples", NULL, + other_path, 0, es); } } @@ -2909,18 +3237,28 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors, * * The ancestors list should already contain the immediate parent of these * plans. - * - * Note: we don't actually need to examine the Plan list members, but - * we need the list in order to determine the length of the PlanState array. +* +* nsubnodes indicates the number of items in the planstates array. +* nplans indicates the original number of subnodes in the Plan, some of these +* may have been pruned by the run-time pruning code. */ static void -ExplainMemberNodes(List *plans, PlanState **planstates, +ExplainMemberNodes(PlanState **planstates, int nsubnodes, int nplans, List *ancestors, ExplainState *es) { - int nplans = list_length(plans); int j; - for (j = 0; j < nplans; j++) + /* + * The number of subnodes being lower than the number of subplans that was + * specified in the plan means that some subnodes have been ignored per + * instruction for the partition pruning code during the executor + * initialization. To make this a bit less mysterious, we'll indicate + * here that this has happened. + */ + if (nsubnodes < nplans) + ExplainPropertyInteger("Subplans Removed", NULL, nplans - nsubnodes, es); + + for (j = 0; j < nsubnodes; j++) ExplainNode(planstates[j], ancestors, "Member", NULL, es); } @@ -3098,18 +3436,23 @@ ExplainPropertyListNested(const char *qlabel, List *data, ExplainState *es) * If "numeric" is true, the value is a number (or other value that * doesn't need quoting in JSON). * + * If unit is non-NULL the text format will display it after the value. + * * This usually should not be invoked directly, but via one of the datatype * specific routines ExplainPropertyText, ExplainPropertyInteger, etc. */ static void -ExplainProperty(const char *qlabel, const char *value, bool numeric, - ExplainState *es) +ExplainProperty(const char *qlabel, const char *unit, const char *value, + bool numeric, ExplainState *es) { switch (es->format) { case EXPLAIN_FORMAT_TEXT: appendStringInfoSpaces(es->str, es->indent * 2); - appendStringInfo(es->str, "%s: %s\n", qlabel, value); + if (unit) + appendStringInfo(es->str, "%s: %s %s\n", qlabel, value, unit); + else + appendStringInfo(es->str, "%s: %s\n", qlabel, value); break; case EXPLAIN_FORMAT_XML: @@ -3154,31 +3497,20 @@ ExplainProperty(const char *qlabel, const char *value, bool numeric, void ExplainPropertyText(const char *qlabel, const char *value, ExplainState *es) { - ExplainProperty(qlabel, value, false, es); + ExplainProperty(qlabel, NULL, value, false, es); } /* * Explain an integer-valued property. */ void -ExplainPropertyInteger(const char *qlabel, int value, ExplainState *es) -{ - char buf[32]; - - snprintf(buf, sizeof(buf), "%d", value); - ExplainProperty(qlabel, buf, true, es); -} - -/* - * Explain a long-integer-valued property. - */ -void -ExplainPropertyLong(const char *qlabel, long value, ExplainState *es) +ExplainPropertyInteger(const char *qlabel, const char *unit, int64 value, + ExplainState *es) { char buf[32]; - snprintf(buf, sizeof(buf), "%ld", value); - ExplainProperty(qlabel, buf, true, es); + snprintf(buf, sizeof(buf), INT64_FORMAT, value); + ExplainProperty(qlabel, unit, buf, true, es); } /* @@ -3186,13 +3518,14 @@ ExplainPropertyLong(const char *qlabel, long value, ExplainState *es) * fractional digits. */ void -ExplainPropertyFloat(const char *qlabel, double value, int ndigits, - ExplainState *es) +ExplainPropertyFloat(const char *qlabel, const char *unit, double value, + int ndigits, ExplainState *es) { - char buf[256]; + char *buf; - snprintf(buf, sizeof(buf), "%.*f", ndigits, value); - ExplainProperty(qlabel, buf, true, es); + buf = psprintf("%.*f", ndigits, value); + ExplainProperty(qlabel, unit, buf, true, es); + pfree(buf); } /* @@ -3201,7 +3534,7 @@ ExplainPropertyFloat(const char *qlabel, double value, int ndigits, void ExplainPropertyBool(const char *qlabel, bool value, ExplainState *es) { - ExplainProperty(qlabel, value ? "true" : "false", true, es); + ExplainProperty(qlabel, NULL, value ? "true" : "false", true, es); } /* @@ -3213,7 +3546,7 @@ ExplainPropertyBool(const char *qlabel, bool value, ExplainState *es) * If labeled is true, the group members will be labeled properties, * while if it's false, they'll be unlabeled objects. */ -static void +void ExplainOpenGroup(const char *objtype, const char *labelname, bool labeled, ExplainState *es) { @@ -3276,7 +3609,7 @@ ExplainOpenGroup(const char *objtype, const char *labelname, * Close a group of related objects. * Parameters must match the corresponding ExplainOpenGroup call. */ -static void +void ExplainCloseGroup(const char *objtype, const char *labelname, bool labeled, ExplainState *es) { diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index e4340eed8c..2d761a5773 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -12,7 +12,7 @@ * postgresql.conf and recovery.conf. An extension also has an installation * script file, containing SQL commands to create the extension's objects. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -683,8 +683,6 @@ read_extension_script_file(const ExtensionControlFile *control, /* * Execute given SQL string. * - * filename is used only to report errors. - * * Note: it's tempting to just use SPI to execute the string, but that does * not work very well. The really serious problem is that SPI will parse, * analyze, and plan the whole string before executing any of it; of course @@ -694,7 +692,7 @@ read_extension_script_file(const ExtensionControlFile *control, * could be very long. */ static void -execute_sql_string(const char *sql, const char *filename) +execute_sql_string(const char *sql) { List *raw_parsetree_list; DestReceiver *dest; @@ -921,7 +919,7 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control, /* And now back to C string */ c_sql = text_to_cstring(DatumGetTextPP(t_sql)); - execute_sql_string(c_sql, filename); + execute_sql_string(c_sql); } PG_CATCH(); { @@ -1266,8 +1264,8 @@ find_install_path(List *evi_list, ExtensionVersionInfo *evi_target, static ObjectAddress CreateExtensionInternal(char *extensionName, char *schemaName, - char *versionName, - char *oldVersionName, + const char *versionName, + const char *oldVersionName, bool cascade, List *parents, bool is_create) @@ -2704,13 +2702,13 @@ AlterExtensionNamespace(const char *extensionName, const char *newschema, Oid *o * check ownership of the individual member objects ... */ if (!pg_extension_ownercheck(extensionOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_EXTENSION, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_EXTENSION, extensionName); /* Permission check: must have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(nspOid, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, newschema); + aclcheck_error(aclresult, OBJECT_SCHEMA, newschema); /* * If the schema is currently a member of the extension, disallow moving @@ -2924,7 +2922,7 @@ ExecAlterExtensionStmt(ParseState *pstate, AlterExtensionStmt *stmt) /* Permission check: must own extension */ if (!pg_extension_ownercheck(extensionOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_EXTENSION, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_EXTENSION, stmt->extname); /* @@ -3182,7 +3180,7 @@ ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt, /* Permission check: must own extension */ if (!pg_extension_ownercheck(extension.objectId, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_EXTENSION, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_EXTENSION, stmt->extname); /* diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c index 9ad991507f..e5dd9958a4 100644 --- a/src/backend/commands/foreigncmds.c +++ b/src/backend/commands/foreigncmds.c @@ -3,7 +3,7 @@ * foreigncmds.c * foreign-data wrapper/server creation/manipulation commands * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -358,7 +358,7 @@ AlterForeignServerOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) /* Must be owner */ if (!pg_foreign_server_ownercheck(srvId, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_FOREIGN_SERVER, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FOREIGN_SERVER, NameStr(form->srvname)); /* Must be able to become new owner */ @@ -370,7 +370,7 @@ AlterForeignServerOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) { ForeignDataWrapper *fdw = GetForeignDataWrapper(form->srvfdw); - aclcheck_error(aclresult, ACL_KIND_FDW, fdw->fdwname); + aclcheck_error(aclresult, OBJECT_FDW, fdw->fdwname); } } @@ -907,7 +907,7 @@ CreateForeignServer(CreateForeignServerStmt *stmt) aclresult = pg_foreign_data_wrapper_aclcheck(fdw->fdwid, ownerId, ACL_USAGE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_FDW, fdw->fdwname); + aclcheck_error(aclresult, OBJECT_FDW, fdw->fdwname); /* * Insert tuple into pg_foreign_server. @@ -1010,7 +1010,7 @@ AlterForeignServer(AlterForeignServerStmt *stmt) * Only owner or a superuser can ALTER a SERVER. */ if (!pg_foreign_server_ownercheck(srvId, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_FOREIGN_SERVER, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FOREIGN_SERVER, stmt->servername); memset(repl_val, 0, sizeof(repl_val)); @@ -1119,10 +1119,10 @@ user_mapping_ddl_aclcheck(Oid umuserid, Oid serverid, const char *servername) aclresult = pg_foreign_server_aclcheck(serverid, curuserid, ACL_USAGE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_FOREIGN_SERVER, servername); + aclcheck_error(aclresult, OBJECT_FOREIGN_SERVER, servername); } else - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_FOREIGN_SERVER, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FOREIGN_SERVER, servername); } } @@ -1231,8 +1231,12 @@ CreateUserMapping(CreateUserMappingStmt *stmt) recordDependencyOnOwner(UserMappingRelationId, umId, useId); } - /* dependency on extension */ - recordDependencyOnCurrentExtension(&myself, false); + /* + * Perhaps someday there should be a recordDependencyOnCurrentExtension + * call here; but since roles aren't members of extensions, it seems like + * user mappings shouldn't be either. Note that the grammar and pg_dump + * would need to be extended too if we change this. + */ /* Post creation hook for new user mapping */ InvokeObjectPostCreateHook(UserMappingRelationId, umId, 0); @@ -1477,7 +1481,7 @@ CreateForeignTable(CreateForeignTableStmt *stmt, Oid relid) server = GetForeignServerByName(stmt->servername, false); aclresult = pg_foreign_server_aclcheck(server->serverid, ownerId, ACL_USAGE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_FOREIGN_SERVER, server->servername); + aclcheck_error(aclresult, OBJECT_FOREIGN_SERVER, server->servername); fdw = GetForeignDataWrapper(server->fdwid); @@ -1536,7 +1540,7 @@ ImportForeignSchema(ImportForeignSchemaStmt *stmt) server = GetForeignServerByName(stmt->server_name, false); aclresult = pg_foreign_server_aclcheck(server->serverid, GetUserId(), ACL_USAGE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_FOREIGN_SERVER, server->servername); + aclcheck_error(aclresult, OBJECT_FOREIGN_SERVER, server->servername); /* Check that the schema exists and we have CREATE permissions on it */ (void) LookupCreationNamespace(stmt->local_schema); diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index 7de844b2ca..3925fb83a5 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -5,7 +5,7 @@ * Routines for CREATE and DROP FUNCTION commands and CREATE and DROP * CAST commands. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -44,27 +44,32 @@ #include "catalog/pg_language.h" #include "catalog/pg_namespace.h" #include "catalog/pg_proc.h" -#include "catalog/pg_proc_fn.h" #include "catalog/pg_transform.h" #include "catalog/pg_type.h" -#include "catalog/pg_type_fn.h" #include "commands/alter.h" #include "commands/defrem.h" #include "commands/proclang.h" +#include "executor/execdesc.h" +#include "executor/executor.h" +#include "funcapi.h" #include "miscadmin.h" +#include "optimizer/clauses.h" #include "optimizer/var.h" #include "parser/parse_coerce.h" #include "parser/parse_collate.h" #include "parser/parse_expr.h" #include "parser/parse_func.h" #include "parser/parse_type.h" +#include "pgstat.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/guc.h" #include "utils/lsyscache.h" +#include "utils/memutils.h" #include "utils/rel.h" #include "utils/syscache.h" +#include "utils/typcache.h" #include "utils/tqual.h" /* @@ -144,7 +149,7 @@ compute_return_type(TypeName *returnType, Oid languageOid, aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); address = TypeShellMake(typname, namespaceId, GetUserId()); rettype = address.objectId; @@ -179,7 +184,7 @@ void interpret_function_parameter_list(ParseState *pstate, List *parameters, Oid languageOid, - bool is_aggregate, + ObjectType objtype, oidvector **parameterTypes, ArrayType **allParameterTypes, ArrayType **parameterModes, @@ -233,7 +238,7 @@ interpret_function_parameter_list(ParseState *pstate, errmsg("SQL function cannot accept shell type %s", TypeNameToString(t)))); /* We don't allow creating aggregates on shell types either */ - else if (is_aggregate) + else if (objtype == OBJECT_AGGREGATE) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("aggregate cannot accept shell type %s", @@ -262,16 +267,29 @@ interpret_function_parameter_list(ParseState *pstate, if (t->setof) { - if (is_aggregate) + if (objtype == OBJECT_AGGREGATE) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("aggregates cannot accept set arguments"))); + else if (objtype == OBJECT_PROCEDURE) + ereport(ERROR, + (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), + errmsg("procedures cannot accept set arguments"))); else ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("functions cannot accept set arguments"))); } + if (objtype == OBJECT_PROCEDURE) + { + if (fp->mode == FUNC_PARAM_OUT) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + (errmsg("procedures cannot have OUT arguments"), + errhint("INOUT arguments are permitted.")))); + } + /* handle input parameters */ if (fp->mode != FUNC_PARAM_OUT && fp->mode != FUNC_PARAM_TABLE) { @@ -287,7 +305,9 @@ interpret_function_parameter_list(ParseState *pstate, /* handle output parameters */ if (fp->mode != FUNC_PARAM_IN && fp->mode != FUNC_PARAM_VARIADIC) { - if (outCount == 0) /* save first output param's type */ + if (objtype == OBJECT_PROCEDURE) + *requiredResultType = RECORDOID; + else if (outCount == 0) /* save first output param's type */ *requiredResultType = toid; outCount++; } @@ -451,6 +471,7 @@ interpret_function_parameter_list(ParseState *pstate, */ static bool compute_common_attribute(ParseState *pstate, + bool is_procedure, DefElem *defel, DefElem **volatility_item, DefElem **strict_item, @@ -463,6 +484,8 @@ compute_common_attribute(ParseState *pstate, { if (strcmp(defel->defname, "volatility") == 0) { + if (is_procedure) + goto procedure_error; if (*volatility_item) goto duplicate_error; @@ -470,6 +493,8 @@ compute_common_attribute(ParseState *pstate, } else if (strcmp(defel->defname, "strict") == 0) { + if (is_procedure) + goto procedure_error; if (*strict_item) goto duplicate_error; @@ -484,6 +509,8 @@ compute_common_attribute(ParseState *pstate, } else if (strcmp(defel->defname, "leakproof") == 0) { + if (is_procedure) + goto procedure_error; if (*leakproof_item) goto duplicate_error; @@ -495,6 +522,8 @@ compute_common_attribute(ParseState *pstate, } else if (strcmp(defel->defname, "cost") == 0) { + if (is_procedure) + goto procedure_error; if (*cost_item) goto duplicate_error; @@ -502,6 +531,8 @@ compute_common_attribute(ParseState *pstate, } else if (strcmp(defel->defname, "rows") == 0) { + if (is_procedure) + goto procedure_error; if (*rows_item) goto duplicate_error; @@ -509,6 +540,8 @@ compute_common_attribute(ParseState *pstate, } else if (strcmp(defel->defname, "parallel") == 0) { + if (is_procedure) + goto procedure_error; if (*parallel_item) goto duplicate_error; @@ -526,6 +559,13 @@ compute_common_attribute(ParseState *pstate, errmsg("conflicting or redundant options"), parser_errposition(pstate, defel->location))); return false; /* keep compiler quiet */ + +procedure_error: + ereport(ERROR, + (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), + errmsg("invalid attribute in procedure definition"), + parser_errposition(pstate, defel->location))); + return false; } static char @@ -602,20 +642,21 @@ update_proconfig_value(ArrayType *a, List *set_items) * attributes. */ static void -compute_attributes_sql_style(ParseState *pstate, - List *options, - List **as, - char **language, - Node **transform, - bool *windowfunc_p, - char *volatility_p, - bool *strict_p, - bool *security_definer, - bool *leakproof_p, - ArrayType **proconfig, - float4 *procost, - float4 *prorows, - char *parallel_p) +compute_function_attributes(ParseState *pstate, + bool is_procedure, + List *options, + List **as, + char **language, + Node **transform, + bool *windowfunc_p, + char *volatility_p, + bool *strict_p, + bool *security_definer, + bool *leakproof_p, + ArrayType **proconfig, + float4 *procost, + float4 *prorows, + char *parallel_p) { ListCell *option; DefElem *as_item = NULL; @@ -669,9 +710,15 @@ compute_attributes_sql_style(ParseState *pstate, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("conflicting or redundant options"), parser_errposition(pstate, defel->location))); + if (is_procedure) + ereport(ERROR, + (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), + errmsg("invalid attribute in procedure definition"), + parser_errposition(pstate, defel->location))); windowfunc_item = defel; } else if (compute_common_attribute(pstate, + is_procedure, defel, &volatility_item, &strict_item, @@ -747,47 +794,6 @@ compute_attributes_sql_style(ParseState *pstate, } -/*------------- - * Interpret the parameters *parameters and return their contents via - * *isStrict_p and *volatility_p. - * - * These parameters supply optional information about a function. - * All have defaults if not specified. Parameters: - * - * * isStrict means the function should not be called when any NULL - * inputs are present; instead a NULL result value should be assumed. - * - * * volatility tells the optimizer whether the function's result can - * be assumed to be repeatable over multiple evaluations. - *------------ - */ -static void -compute_attributes_with_style(ParseState *pstate, List *parameters, bool *isStrict_p, char *volatility_p) -{ - ListCell *pl; - - foreach(pl, parameters) - { - DefElem *param = (DefElem *) lfirst(pl); - - if (pg_strcasecmp(param->defname, "isstrict") == 0) - *isStrict_p = defGetBoolean(param); - else if (pg_strcasecmp(param->defname, "iscachable") == 0) - { - /* obsolete spelling of isImmutable */ - if (defGetBoolean(param)) - *volatility_p = PROVOLATILE_IMMUTABLE; - } - else - ereport(WARNING, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("unrecognized function attribute \"%s\" ignored", - param->defname), - parser_errposition(pstate, param->location))); - } -} - - /* * For a dynamically linked C language object, the form of the clause is * @@ -855,7 +861,7 @@ interpret_AS_clause(Oid languageOid, const char *languageName, /* * CreateFunction - * Execute a CREATE FUNCTION utility statement. + * Execute a CREATE FUNCTION (or CREATE PROCEDURE) utility statement. */ ObjectAddress CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt) @@ -900,10 +906,10 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt) /* Check we have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); - /* default attributes */ + /* Set default attributes */ isWindowFunc = false; isStrict = false; security = false; @@ -914,13 +920,14 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt) prorows = -1; /* indicates not set */ parallel = PROPARALLEL_UNSAFE; - /* override attributes from explicit list */ - compute_attributes_sql_style(pstate, - stmt->options, - &as_clause, &language, &transformDefElem, - &isWindowFunc, &volatility, - &isStrict, &security, &isLeakProof, - &proconfig, &procost, &prorows, ¶llel); + /* Extract non-default attributes from stmt->options list */ + compute_function_attributes(pstate, + stmt->is_procedure, + stmt->options, + &as_clause, &language, &transformDefElem, + &isWindowFunc, &volatility, + &isStrict, &security, &isLeakProof, + &proconfig, &procost, &prorows, ¶llel); /* Look up the language and validate permissions */ languageTuple = SearchSysCache1(LANGNAME, PointerGetDatum(language)); @@ -929,7 +936,7 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt) (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("language \"%s\" does not exist", language), (PLTemplateExists(language) ? - errhint("Use CREATE LANGUAGE to load the language into the database.") : 0))); + errhint("Use CREATE EXTENSION to load the language into the database.") : 0))); languageOid = HeapTupleGetOid(languageTuple); languageStruct = (Form_pg_language) GETSTRUCT(languageTuple); @@ -941,14 +948,14 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt) aclresult = pg_language_aclcheck(languageOid, GetUserId(), ACL_USAGE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_LANGUAGE, + aclcheck_error(aclresult, OBJECT_LANGUAGE, NameStr(languageStruct->lanname)); } else { /* if untrusted language, must be superuser */ if (!superuser()) - aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_LANGUAGE, + aclcheck_error(ACLCHECK_NO_PRIV, OBJECT_LANGUAGE, NameStr(languageStruct->lanname)); } @@ -990,7 +997,7 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt) interpret_function_parameter_list(pstate, stmt->parameters, languageOid, - false, /* not an aggregate */ + stmt->is_procedure ? OBJECT_PROCEDURE : OBJECT_FUNCTION, ¶meterTypes, &allParameterTypes, ¶meterModes, @@ -999,7 +1006,13 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt) &variadicArgType, &requiredResultType); - if (stmt->returnType) + if (stmt->is_procedure) + { + Assert(!stmt->returnType); + prorettype = requiredResultType ? requiredResultType : VOIDOID; + returnsSet = false; + } + else if (stmt->returnType) { /* explicit RETURNS clause */ compute_return_type(stmt->returnType, languageOid, @@ -1045,8 +1058,6 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt) trftypes = NULL; } - compute_attributes_with_style(pstate, stmt->withClause, &isStrict, &volatility); - interpret_AS_clause(languageOid, language, funcname, as_clause, &prosrc_str, &probin_str); @@ -1090,8 +1101,7 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt) languageValidator, prosrc_str, /* converted to text later */ probin_str, /* converted to text later */ - false, /* not an aggregate */ - isWindowFunc, + stmt->is_procedure ? PROKIND_PROCEDURE : (isWindowFunc ? PROKIND_WINDOW : PROKIND_FUNCTION), security, isLeakProof, isStrict, @@ -1119,7 +1129,7 @@ RemoveFunctionById(Oid funcOid) { Relation relation; HeapTuple tup; - bool isagg; + char prokind; /* * Delete the pg_proc tuple. @@ -1130,7 +1140,7 @@ RemoveFunctionById(Oid funcOid) if (!HeapTupleIsValid(tup)) /* should not happen */ elog(ERROR, "cache lookup failed for function %u", funcOid); - isagg = ((Form_pg_proc) GETSTRUCT(tup))->proisagg; + prokind = ((Form_pg_proc) GETSTRUCT(tup))->prokind; CatalogTupleDelete(relation, &tup->t_self); @@ -1141,7 +1151,7 @@ RemoveFunctionById(Oid funcOid) /* * If there's a pg_aggregate tuple, delete that too. */ - if (isagg) + if (prokind == PROKIND_AGGREGATE) { relation = heap_open(AggregateRelationId, RowExclusiveLock); @@ -1168,6 +1178,7 @@ AlterFunction(ParseState *pstate, AlterFunctionStmt *stmt) HeapTuple tup; Oid funcOid; Form_pg_proc procForm; + bool is_procedure; Relation rel; ListCell *l; DefElem *volatility_item = NULL; @@ -1182,7 +1193,7 @@ AlterFunction(ParseState *pstate, AlterFunctionStmt *stmt) rel = heap_open(ProcedureRelationId, RowExclusiveLock); - funcOid = LookupFuncWithArgs(stmt->func, false); + funcOid = LookupFuncWithArgs(stmt->objtype, stmt->func, false); tup = SearchSysCacheCopy1(PROCOID, ObjectIdGetDatum(funcOid)); if (!HeapTupleIsValid(tup)) /* should not happen */ @@ -1192,21 +1203,24 @@ AlterFunction(ParseState *pstate, AlterFunctionStmt *stmt) /* Permission check: must own function */ if (!pg_proc_ownercheck(funcOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, stmt->objtype, NameListToString(stmt->func->objname)); - if (procForm->proisagg) + if (procForm->prokind == PROKIND_AGGREGATE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is an aggregate function", NameListToString(stmt->func->objname)))); + is_procedure = (procForm->prokind == PROKIND_PROCEDURE); + /* Examine requested actions. */ foreach(l, stmt->actions) { DefElem *defel = (DefElem *) lfirst(l); if (compute_common_attribute(pstate, + is_procedure, defel, &volatility_item, &strict_item, @@ -1472,7 +1486,7 @@ CreateCast(CreateCastStmt *stmt) { Form_pg_proc procstruct; - funcid = LookupFuncWithArgs(stmt->func, false); + funcid = LookupFuncWithArgs(OBJECT_FUNCTION, stmt->func, false); tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(tuple)) @@ -1514,14 +1528,10 @@ CreateCast(CreateCastStmt *stmt) (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("cast function must not be volatile"))); #endif - if (procstruct->proisagg) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("cast function must not be an aggregate function"))); - if (procstruct->proiswindow) + if (procstruct->prokind != PROKIND_FUNCTION) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("cast function must not be a window function"))); + errmsg("cast function must be a normal function"))); if (procstruct->proretset) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), @@ -1766,14 +1776,10 @@ check_transform_function(Form_pg_proc procstruct) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("transform function must not be volatile"))); - if (procstruct->proisagg) + if (procstruct->prokind != PROKIND_FUNCTION) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("transform function must not be an aggregate function"))); - if (procstruct->proiswindow) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("transform function must not be a window function"))); + errmsg("transform function must be a normal function"))); if (procstruct->proretset) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), @@ -1846,21 +1852,21 @@ CreateTransform(CreateTransformStmt *stmt) aclresult = pg_language_aclcheck(langid, GetUserId(), ACL_USAGE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_LANGUAGE, stmt->lang); + aclcheck_error(aclresult, OBJECT_LANGUAGE, stmt->lang); /* * Get the functions */ if (stmt->fromsql) { - fromsqlfuncid = LookupFuncWithArgs(stmt->fromsql, false); + fromsqlfuncid = LookupFuncWithArgs(OBJECT_FUNCTION, stmt->fromsql, false); if (!pg_proc_ownercheck(fromsqlfuncid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, NameListToString(stmt->fromsql->objname)); + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, NameListToString(stmt->fromsql->objname)); aclresult = pg_proc_aclcheck(fromsqlfuncid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, NameListToString(stmt->fromsql->objname)); + aclcheck_error(aclresult, OBJECT_FUNCTION, NameListToString(stmt->fromsql->objname)); tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(fromsqlfuncid)); if (!HeapTupleIsValid(tuple)) @@ -1879,14 +1885,14 @@ CreateTransform(CreateTransformStmt *stmt) if (stmt->tosql) { - tosqlfuncid = LookupFuncWithArgs(stmt->tosql, false); + tosqlfuncid = LookupFuncWithArgs(OBJECT_FUNCTION, stmt->tosql, false); if (!pg_proc_ownercheck(tosqlfuncid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, NameListToString(stmt->tosql->objname)); + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, NameListToString(stmt->tosql->objname)); aclresult = pg_proc_aclcheck(tosqlfuncid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, NameListToString(stmt->tosql->objname)); + aclcheck_error(aclresult, OBJECT_FUNCTION, NameListToString(stmt->tosql->objname)); tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(tosqlfuncid)); if (!HeapTupleIsValid(tuple)) @@ -2071,9 +2077,11 @@ IsThereFunctionInNamespace(const char *proname, int pronargs, /* * ExecuteDoStmt * Execute inline procedural-language code + * + * See at ExecuteCallStmt() about the atomic argument. */ void -ExecuteDoStmt(DoStmt *stmt) +ExecuteDoStmt(DoStmt *stmt, bool atomic) { InlineCodeBlock *codeblock = makeNode(InlineCodeBlock); ListCell *arg; @@ -2130,11 +2138,12 @@ ExecuteDoStmt(DoStmt *stmt) (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("language \"%s\" does not exist", language), (PLTemplateExists(language) ? - errhint("Use CREATE LANGUAGE to load the language into the database.") : 0))); + errhint("Use CREATE EXTENSION to load the language into the database.") : 0))); codeblock->langOid = HeapTupleGetOid(languageTuple); languageStruct = (Form_pg_language) GETSTRUCT(languageTuple); codeblock->langIsTrusted = languageStruct->lanpltrusted; + codeblock->atomic = atomic; if (languageStruct->lanpltrusted) { @@ -2144,14 +2153,14 @@ ExecuteDoStmt(DoStmt *stmt) aclresult = pg_language_aclcheck(codeblock->langOid, GetUserId(), ACL_USAGE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_LANGUAGE, + aclcheck_error(aclresult, OBJECT_LANGUAGE, NameStr(languageStruct->lanname)); } else { /* if untrusted language, must be superuser */ if (!superuser()) - aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_LANGUAGE, + aclcheck_error(ACLCHECK_NO_PRIV, OBJECT_LANGUAGE, NameStr(languageStruct->lanname)); } @@ -2168,3 +2177,216 @@ ExecuteDoStmt(DoStmt *stmt) /* execute the inline handler */ OidFunctionCall1(laninline, PointerGetDatum(codeblock)); } + +/* + * Execute CALL statement + * + * Inside a top-level CALL statement, transaction-terminating commands such as + * COMMIT or a PL-specific equivalent are allowed. The terminology in the SQL + * standard is that CALL establishes a non-atomic execution context. Most + * other commands establish an atomic execution context, in which transaction + * control actions are not allowed. If there are nested executions of CALL, + * we want to track the execution context recursively, so that the nested + * CALLs can also do transaction control. Note, however, that for example in + * CALL -> SELECT -> CALL, the second call cannot do transaction control, + * because the SELECT in between establishes an atomic execution context. + * + * So when ExecuteCallStmt() is called from the top level, we pass in atomic = + * false (recall that that means transactions = yes). We then create a + * CallContext node with content atomic = false, which is passed in the + * fcinfo->context field to the procedure invocation. The language + * implementation should then take appropriate measures to allow or prevent + * transaction commands based on that information, e.g., call + * SPI_connect_ext(SPI_OPT_NONATOMIC). The language should also pass on the + * atomic flag to any nested invocations to CALL. + * + * The expression data structures and execution context that we create + * within this function are children of the portalContext of the Portal + * that the CALL utility statement runs in. Therefore, any pass-by-ref + * values that we're passing to the procedure will survive transaction + * commits that might occur inside the procedure. + */ +void +ExecuteCallStmt(CallStmt *stmt, ParamListInfo params, bool atomic, DestReceiver *dest) +{ + ListCell *lc; + FuncExpr *fexpr; + int nargs; + int i; + AclResult aclresult; + FmgrInfo flinfo; + FunctionCallInfoData fcinfo; + CallContext *callcontext; + EState *estate; + ExprContext *econtext; + HeapTuple tp; + PgStat_FunctionCallUsage fcusage; + Datum retval; + + fexpr = stmt->funcexpr; + Assert(fexpr); + Assert(IsA(fexpr, FuncExpr)); + + aclresult = pg_proc_aclcheck(fexpr->funcid, GetUserId(), ACL_EXECUTE); + if (aclresult != ACLCHECK_OK) + aclcheck_error(aclresult, OBJECT_PROCEDURE, get_func_name(fexpr->funcid)); + + /* Prep the context object we'll pass to the procedure */ + callcontext = makeNode(CallContext); + callcontext->atomic = atomic; + + tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(fexpr->funcid)); + if (!HeapTupleIsValid(tp)) + elog(ERROR, "cache lookup failed for function %u", fexpr->funcid); + + /* + * If proconfig is set we can't allow transaction commands because of the + * way the GUC stacking works: The transaction boundary would have to pop + * the proconfig setting off the stack. That restriction could be lifted + * by redesigning the GUC nesting mechanism a bit. + */ + if (!heap_attisnull(tp, Anum_pg_proc_proconfig, NULL)) + callcontext->atomic = true; + + /* + * In security definer procedures, we can't allow transaction commands. + * StartTransaction() insists that the security context stack is empty, + * and AbortTransaction() resets the security context. This could be + * reorganized, but right now it doesn't work. + */ + if (((Form_pg_proc) GETSTRUCT(tp))->prosecdef) + callcontext->atomic = true; + + /* + * Expand named arguments, defaults, etc. We do not want to scribble on + * the passed-in CallStmt parse tree, so first flat-copy fexpr, allowing + * us to replace its args field. (Note that expand_function_arguments + * will not modify any of the passed-in data structure.) + */ + { + FuncExpr *nexpr = makeNode(FuncExpr); + + memcpy(nexpr, fexpr, sizeof(FuncExpr)); + fexpr = nexpr; + } + + fexpr->args = expand_function_arguments(fexpr->args, + fexpr->funcresulttype, + tp); + nargs = list_length(fexpr->args); + + ReleaseSysCache(tp); + + /* safety check; see ExecInitFunc() */ + if (nargs > FUNC_MAX_ARGS) + ereport(ERROR, + (errcode(ERRCODE_TOO_MANY_ARGUMENTS), + errmsg_plural("cannot pass more than %d argument to a procedure", + "cannot pass more than %d arguments to a procedure", + FUNC_MAX_ARGS, + FUNC_MAX_ARGS))); + + /* Initialize function call structure */ + InvokeFunctionExecuteHook(fexpr->funcid); + fmgr_info(fexpr->funcid, &flinfo); + fmgr_info_set_expr((Node *) fexpr, &flinfo); + InitFunctionCallInfoData(fcinfo, &flinfo, nargs, fexpr->inputcollid, (Node *) callcontext, NULL); + + /* + * Evaluate procedure arguments inside a suitable execution context. Note + * we can't free this context till the procedure returns. + */ + estate = CreateExecutorState(); + estate->es_param_list_info = params; + econtext = CreateExprContext(estate); + + i = 0; + foreach(lc, fexpr->args) + { + ExprState *exprstate; + Datum val; + bool isnull; + + exprstate = ExecPrepareExpr(lfirst(lc), estate); + + val = ExecEvalExprSwitchContext(exprstate, econtext, &isnull); + + fcinfo.arg[i] = val; + fcinfo.argnull[i] = isnull; + + i++; + } + + pgstat_init_function_usage(&fcinfo, &fcusage); + retval = FunctionCallInvoke(&fcinfo); + pgstat_end_function_usage(&fcusage, true); + + if (fexpr->funcresulttype == VOIDOID) + { + /* do nothing */ + } + else if (fexpr->funcresulttype == RECORDOID) + { + /* + * send tuple to client + */ + + HeapTupleHeader td; + Oid tupType; + int32 tupTypmod; + TupleDesc retdesc; + HeapTupleData rettupdata; + TupOutputState *tstate; + TupleTableSlot *slot; + + if (fcinfo.isnull) + elog(ERROR, "procedure returned null record"); + + td = DatumGetHeapTupleHeader(retval); + tupType = HeapTupleHeaderGetTypeId(td); + tupTypmod = HeapTupleHeaderGetTypMod(td); + retdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); + + tstate = begin_tup_output_tupdesc(dest, retdesc); + + rettupdata.t_len = HeapTupleHeaderGetDatumLength(td); + ItemPointerSetInvalid(&(rettupdata.t_self)); + rettupdata.t_tableOid = InvalidOid; + rettupdata.t_data = td; + + slot = ExecStoreHeapTuple(&rettupdata, tstate->slot, false); + tstate->dest->receiveSlot(slot, tstate->dest); + + end_tup_output(tstate); + + ReleaseTupleDesc(retdesc); + } + else + elog(ERROR, "unexpected result type for procedure: %u", + fexpr->funcresulttype); + + FreeExecutorState(estate); +} + +/* + * Construct the tuple descriptor for a CALL statement return + */ +TupleDesc +CallStmtResultDesc(CallStmt *stmt) +{ + FuncExpr *fexpr; + HeapTuple tuple; + TupleDesc tupdesc; + + fexpr = stmt->funcexpr; + + tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(fexpr->funcid)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for procedure %u", fexpr->funcid); + + tupdesc = build_function_result_tupdesc_t(tuple); + + ReleaseSysCache(tuple); + + return tupdesc; +} diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 620704ec49..906d711378 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -3,7 +3,7 @@ * indexcmds.c * POSTGRES define and remove index code. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -23,7 +23,10 @@ #include "catalog/catalog.h" #include "catalog/index.h" #include "catalog/indexing.h" +#include "catalog/partition.h" #include "catalog/pg_am.h" +#include "catalog/pg_constraint.h" +#include "catalog/pg_inherits.h" #include "catalog/pg_opclass.h" #include "catalog/pg_opfamily.h" #include "catalog/pg_tablespace.h" @@ -31,10 +34,12 @@ #include "commands/comment.h" #include "commands/dbcommands.h" #include "commands/defrem.h" +#include "commands/event_trigger.h" #include "commands/tablecmds.h" #include "commands/tablespace.h" #include "mb/pg_wchar.h" #include "miscadmin.h" +#include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" #include "optimizer/planner.h" @@ -42,6 +47,7 @@ #include "parser/parse_coerce.h" #include "parser/parse_func.h" #include "parser/parse_oper.h" +#include "rewrite/rewriteManip.h" #include "storage/lmgr.h" #include "storage/proc.h" #include "storage/procarray.h" @@ -51,6 +57,7 @@ #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/memutils.h" +#include "utils/partcache.h" #include "utils/regproc.h" #include "utils/snapmgr.h" #include "utils/syscache.h" @@ -67,7 +74,7 @@ static void ComputeIndexAttrs(IndexInfo *indexInfo, List *attList, List *exclusionOpNames, Oid relId, - char *accessMethodName, Oid accessMethodId, + const char *accessMethodName, Oid accessMethodId, bool amcanorder, bool isconstraint); static char *ChooseIndexName(const char *tabname, Oid namespaceId, @@ -77,6 +84,7 @@ static char *ChooseIndexNameAddition(List *colnames); static List *ChooseIndexColumnNames(List *indexElems); static void RangeVarCallbackForReindexIndex(const RangeVar *relation, Oid relId, Oid oldRelId, void *arg); +static void ReindexPartitionedIndex(Relation parentIdx); /* * CheckIndexCompatible @@ -102,8 +110,10 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation, * indexes. We acknowledge this when all operator classes, collations and * exclusion operators match. Though we could further permit intra-opfamily * changes for btree and hash indexes, that adds subtle complexity with no - * concrete benefit for core types. - + * concrete benefit for core types. Note, that INCLUDE columns aren't + * checked by this function, for them it's enough that table rewrite is + * skipped. + * * When a comparison or exclusion operator has a polymorphic input type, the * actual input types must also match. This defends against the possibility * that operators could vary behavior in response to get_fn_expr_argtype(). @@ -115,7 +125,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation, */ bool CheckIndexCompatible(Oid oldId, - char *accessMethodName, + const char *accessMethodName, List *attributeList, List *exclusionOpNames) { @@ -174,15 +184,20 @@ CheckIndexCompatible(Oid oldId, * the new index, so we can test whether it's compatible with the existing * one. Note that ComputeIndexAttrs might fail here, but that's OK: * DefineIndex would have called this function with the same arguments - * later on, and it would have failed then anyway. + * later on, and it would have failed then anyway. Our attributeList + * contains only key attributes, thus we're filling ii_NumIndexAttrs and + * ii_NumIndexKeyAttrs with same value. */ indexInfo = makeNode(IndexInfo); + indexInfo->ii_NumIndexAttrs = numberOfAttributes; + indexInfo->ii_NumIndexKeyAttrs = numberOfAttributes; indexInfo->ii_Expressions = NIL; indexInfo->ii_ExpressionsState = NIL; indexInfo->ii_PredicateState = NULL; indexInfo->ii_ExclusionOps = NULL; indexInfo->ii_ExclusionProcs = NULL; indexInfo->ii_ExclusionStrats = NULL; + indexInfo->ii_Am = accessMethodId; indexInfo->ii_AmCache = NULL; indexInfo->ii_Context = CurrentMemoryContext; typeObjectId = (Oid *) palloc(numberOfAttributes * sizeof(Oid)); @@ -207,8 +222,8 @@ CheckIndexCompatible(Oid oldId, * We don't assess expressions or predicates; assume incompatibility. * Also, if the index is invalid for any reason, treat it as incompatible. */ - if (!(heap_attisnull(tuple, Anum_pg_index_indpred) && - heap_attisnull(tuple, Anum_pg_index_indexprs) && + if (!(heap_attisnull(tuple, Anum_pg_index_indpred, NULL) && + heap_attisnull(tuple, Anum_pg_index_indexprs, NULL) && IndexIsValid(indexForm))) { ReleaseSysCache(tuple); @@ -216,7 +231,7 @@ CheckIndexCompatible(Oid oldId, } /* Any change in operator class or collation breaks compatibility. */ - old_natts = indexForm->indnatts; + old_natts = indexForm->indnkeyatts; Assert(old_natts == numberOfAttributes); d = SysCacheGetAttr(INDEXRELID, tuple, Anum_pg_index_indcollation, &isnull); @@ -242,7 +257,7 @@ CheckIndexCompatible(Oid oldId, for (i = 0; i < old_natts; i++) { if (IsPolymorphicType(get_opclass_input_type(classObjectId[i])) && - irel->rd_att->attrs[i]->atttypid != typeObjectId[i]) + TupleDescAttr(irel->rd_att, i)->atttypid != typeObjectId[i]) { ret = false; break; @@ -270,7 +285,7 @@ CheckIndexCompatible(Oid oldId, op_input_types(indexInfo->ii_ExclusionOps[i], &left, &right); if ((IsPolymorphicType(left) || IsPolymorphicType(right)) && - irel->rd_att->attrs[i]->atttypid != typeObjectId[i]) + TupleDescAttr(irel->rd_att, i)->atttypid != typeObjectId[i]) { ret = false; break; @@ -292,14 +307,17 @@ CheckIndexCompatible(Oid oldId, * 'stmt': IndexStmt describing the properties of the new index. * 'indexRelationId': normally InvalidOid, but during bootstrap can be * nonzero to specify a preselected OID for the index. + * 'parentIndexId': the OID of the parent index; InvalidOid if not the child + * of a partitioned index. + * 'parentConstraintId': the OID of the parent constraint; InvalidOid if not + * the child of a constraint (only used when recursing) * 'is_alter_table': this is due to an ALTER rather than a CREATE operation. * 'check_rights': check for CREATE rights in namespace and tablespace. (This * should be true except when ALTER is deleting/recreating an index.) * 'check_not_in_use': check for table not already in use in current session. * This should be true unless caller is holding the table open, in which * case the caller had better have checked it earlier. - * 'skip_build': make the catalog entries but leave the index file empty; - * it will be filled later. + * 'skip_build': make the catalog entries but don't create the index files * 'quiet': suppress the NOTICE chatter ordinarily provided for constraints. * * Returns the object address of the created index. @@ -308,6 +326,8 @@ ObjectAddress DefineIndex(Oid relationId, IndexStmt *stmt, Oid indexRelationId, + Oid parentIndexId, + Oid parentConstraintId, bool is_alter_table, bool check_rights, bool check_not_in_use, @@ -322,7 +342,9 @@ DefineIndex(Oid relationId, Oid accessMethodId; Oid namespaceId; Oid tablespaceId; + Oid createdConstraintId = InvalidOid; List *indexColNames; + List *allIndexParams; Relation rel; Relation indexRelation; HeapTuple tuple; @@ -330,10 +352,14 @@ DefineIndex(Oid relationId, IndexAmRoutine *amRoutine; bool amcanorder; amoptions_function amoptions; + bool partitioned; Datum reloptions; int16 *coloptions; IndexInfo *indexInfo; + bits16 flags; + bits16 constr_flags; int numberOfAttributes; + int numberOfKeyAttributes; TransactionId limitXmin; VirtualTransactionId *old_snapshots; ObjectAddress address; @@ -345,9 +371,22 @@ DefineIndex(Oid relationId, int i; /* - * count attributes in index + * count key attributes in index */ - numberOfAttributes = list_length(stmt->indexParams); + numberOfKeyAttributes = list_length(stmt->indexParams); + + /* + * Calculate the new list of index columns including both key columns and + * INCLUDE columns. Later we can determine which of these are key + * columns, and which are just part of the INCLUDE list by checking the + * list position. A list item in a position less than ii_NumIndexKeyAttrs + * is part of the key columns, and anything equal to and over is part of + * the INCLUDE columns. + */ + allIndexParams = list_concat(list_copy(stmt->indexParams), + list_copy(stmt->indexIncludingParams)); + numberOfAttributes = list_length(allIndexParams); + if (numberOfAttributes <= 0) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), @@ -368,6 +407,10 @@ DefineIndex(Oid relationId, * this will typically require the caller to have already locked the * relation. To avoid lock upgrade hazards, that lock should be at least * as strong as the one we take here. + * + * NB: If the lock strength here ever changes, code that is run by + * parallel workers under the control of certain particular ambuild + * functions will need to be updated, too. */ lockmode = stmt->concurrent ? ShareUpdateExclusiveLock : ShareLock; rel = heap_open(relationId, lockmode); @@ -375,10 +418,15 @@ DefineIndex(Oid relationId, relationId = RelationGetRelid(rel); namespaceId = RelationGetNamespace(rel); - if (rel->rd_rel->relkind != RELKIND_RELATION && - rel->rd_rel->relkind != RELKIND_MATVIEW) + /* Ensure that it makes sense to index this kind of relation */ + switch (rel->rd_rel->relkind) { - if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) + case RELKIND_RELATION: + case RELKIND_MATVIEW: + case RELKIND_PARTITIONED_TABLE: + /* OK */ + break; + case RELKIND_FOREIGN_TABLE: /* * Custom error message for FOREIGN TABLE since the term is close @@ -388,15 +436,35 @@ DefineIndex(Oid relationId, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot create index on foreign table \"%s\"", RelationGetRelationName(rel)))); - else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + break; + default: ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot create index on partitioned table \"%s\"", + errmsg("\"%s\" is not a table or materialized view", RelationGetRelationName(rel)))); - else + break; + } + + /* + * Establish behavior for partitioned tables, and verify sanity of + * parameters. + * + * We do not build an actual index in this case; we only create a few + * catalog entries. The actual indexes are built by recursing for each + * partition. + */ + partitioned = rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE; + if (partitioned) + { + if (stmt->concurrent) ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table or materialized view", + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot create index on partitioned table \"%s\" concurrently", + RelationGetRelationName(rel)))); + if (stmt->excludeOpNames) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot create exclusion constraints on partitioned table \"%s\"", RelationGetRelationName(rel)))); } @@ -430,7 +498,7 @@ DefineIndex(Oid relationId, aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceId)); } @@ -457,7 +525,7 @@ DefineIndex(Oid relationId, aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_TABLESPACE, + aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(tablespaceId)); } @@ -476,7 +544,7 @@ DefineIndex(Oid relationId, /* * Choose the index column names. */ - indexColNames = ChooseIndexColumnNames(stmt->indexParams); + indexColNames = ChooseIndexColumnNames(allIndexParams); /* * Select name for index if caller didn't specify @@ -524,6 +592,11 @@ DefineIndex(Oid relationId, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("access method \"%s\" does not support unique indexes", accessMethodName))); + if (stmt->indexIncludingParams != NIL && !amRoutine->amcaninclude) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("access method \"%s\" does not support included columns", + accessMethodName))); if (numberOfAttributes > 1 && !amRoutine->amcanmulticol) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -561,6 +634,7 @@ DefineIndex(Oid relationId, */ indexInfo = makeNode(IndexInfo); indexInfo->ii_NumIndexAttrs = numberOfAttributes; + indexInfo->ii_NumIndexKeyAttrs = numberOfKeyAttributes; indexInfo->ii_Expressions = NIL; /* for now */ indexInfo->ii_ExpressionsState = NIL; indexInfo->ii_Predicate = make_ands_implicit((Expr *) stmt->whereClause); @@ -573,6 +647,8 @@ DefineIndex(Oid relationId, indexInfo->ii_ReadyForInserts = !stmt->concurrent; indexInfo->ii_Concurrent = stmt->concurrent; indexInfo->ii_BrokenHotChain = false; + indexInfo->ii_ParallelWorkers = 0; + indexInfo->ii_Am = accessMethodId; indexInfo->ii_AmCache = NULL; indexInfo->ii_Context = CurrentMemoryContext; @@ -582,7 +658,7 @@ DefineIndex(Oid relationId, coloptions = (int16 *) palloc(numberOfAttributes * sizeof(int16)); ComputeIndexAttrs(indexInfo, typeObjectId, collationObjectId, classObjectId, - coloptions, stmt->indexParams, + coloptions, allIndexParams, stmt->excludeOpNames, relationId, accessMethodName, accessMethodId, amcanorder, stmt->isconstraint); @@ -591,7 +667,85 @@ DefineIndex(Oid relationId, * Extra checks when creating a PRIMARY KEY index. */ if (stmt->primary) - index_check_primary_key(rel, indexInfo, is_alter_table); + index_check_primary_key(rel, indexInfo, is_alter_table, stmt); + + /* + * If this table is partitioned and we're creating a unique index or a + * primary key, make sure that the indexed columns are part of the + * partition key. Otherwise it would be possible to violate uniqueness by + * putting values that ought to be unique in different partitions. + * + * We could lift this limitation if we had global indexes, but those have + * their own problems, so this is a useful feature combination. + */ + if (partitioned && (stmt->unique || stmt->primary)) + { + PartitionKey key = rel->rd_partkey; + int i; + + /* + * A partitioned table can have unique indexes, as long as all the + * columns in the partition key appear in the unique key. A + * partition-local index can enforce global uniqueness iff the PK + * value completely determines the partition that a row is in. + * + * Thus, verify that all the columns in the partition key appear in + * the unique key definition. + */ + for (i = 0; i < key->partnatts; i++) + { + bool found = false; + int j; + const char *constraint_type; + + if (stmt->primary) + constraint_type = "PRIMARY KEY"; + else if (stmt->unique) + constraint_type = "UNIQUE"; + else if (stmt->excludeOpNames != NIL) + constraint_type = "EXCLUDE"; + else + { + elog(ERROR, "unknown constraint type"); + constraint_type = NULL; /* keep compiler quiet */ + } + + /* + * It may be possible to support UNIQUE constraints when partition + * keys are expressions, but is it worth it? Give up for now. + */ + if (key->partattrs[i] == 0) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unsupported %s constraint with partition key definition", + constraint_type), + errdetail("%s constraints cannot be used when partition keys include expressions.", + constraint_type))); + + for (j = 0; j < indexInfo->ii_NumIndexAttrs; j++) + { + if (key->partattrs[i] == indexInfo->ii_IndexAttrNumbers[j]) + { + found = true; + break; + } + } + if (!found) + { + Form_pg_attribute att; + + att = TupleDescAttr(RelationGetDescr(rel), key->partattrs[i] - 1); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("insufficient columns in %s constraint definition", + constraint_type), + errdetail("%s constraint on table \"%s\" lacks column \"%s\" which is part of the partition key.", + constraint_type, RelationGetRelationName(rel), + NameStr(att->attname)))); + } + } + } + /* * We disallow indexes on system columns other than OID. They would not @@ -599,7 +753,7 @@ DefineIndex(Oid relationId, */ for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++) { - AttrNumber attno = indexInfo->ii_KeyAttrNumbers[i]; + AttrNumber attno = indexInfo->ii_IndexAttrNumbers[i]; if (attno < 0 && attno != ObjectIdAttributeNumber) ereport(ERROR, @@ -662,20 +816,42 @@ DefineIndex(Oid relationId, Assert(!OidIsValid(stmt->oldNode) || (skip_build && !stmt->concurrent)); /* - * Make the catalog entries for the index, including constraints. Then, if - * not skip_build || concurrent, actually build the index. + * Make the catalog entries for the index, including constraints. This + * step also actually builds the index, except if caller requested not to + * or in concurrent mode, in which case it'll be done later, or doing a + * partitioned index (because those don't have storage). */ + flags = constr_flags = 0; + if (stmt->isconstraint) + flags |= INDEX_CREATE_ADD_CONSTRAINT; + if (skip_build || stmt->concurrent || partitioned) + flags |= INDEX_CREATE_SKIP_BUILD; + if (stmt->if_not_exists) + flags |= INDEX_CREATE_IF_NOT_EXISTS; + if (stmt->concurrent) + flags |= INDEX_CREATE_CONCURRENT; + if (partitioned) + flags |= INDEX_CREATE_PARTITIONED; + if (stmt->primary) + flags |= INDEX_CREATE_IS_PRIMARY; + if (partitioned && stmt->relation && !stmt->relation->inh) + flags |= INDEX_CREATE_INVALID; + + if (stmt->deferrable) + constr_flags |= INDEX_CONSTR_CREATE_DEFERRABLE; + if (stmt->initdeferred) + constr_flags |= INDEX_CONSTR_CREATE_INIT_DEFERRED; + indexRelationId = - index_create(rel, indexRelationName, indexRelationId, stmt->oldNode, - indexInfo, indexColNames, + index_create(rel, indexRelationName, indexRelationId, parentIndexId, + parentConstraintId, + stmt->oldNode, indexInfo, indexColNames, accessMethodId, tablespaceId, collationObjectId, classObjectId, - coloptions, reloptions, stmt->primary, - stmt->isconstraint, stmt->deferrable, stmt->initdeferred, - allowSystemTableMods, - skip_build || stmt->concurrent, - stmt->concurrent, !check_rights, - stmt->if_not_exists); + coloptions, reloptions, + flags, constr_flags, + allowSystemTableMods, !check_rights, + &createdConstraintId); ObjectAddressSet(address, RelationRelationId, indexRelationId); @@ -690,6 +866,209 @@ DefineIndex(Oid relationId, CreateComments(indexRelationId, RelationRelationId, 0, stmt->idxcomment); + if (partitioned) + { + /* + * Unless caller specified to skip this step (via ONLY), process each + * partition to make sure they all contain a corresponding index. + * + * If we're called internally (no stmt->relation), recurse always. + */ + if (!stmt->relation || stmt->relation->inh) + { + PartitionDesc partdesc = RelationGetPartitionDesc(rel); + int nparts = partdesc->nparts; + Oid *part_oids = palloc(sizeof(Oid) * nparts); + bool invalidate_parent = false; + TupleDesc parentDesc; + Oid *opfamOids; + + memcpy(part_oids, partdesc->oids, sizeof(Oid) * nparts); + + parentDesc = CreateTupleDescCopy(RelationGetDescr(rel)); + opfamOids = palloc(sizeof(Oid) * numberOfKeyAttributes); + for (i = 0; i < numberOfKeyAttributes; i++) + opfamOids[i] = get_opclass_family(classObjectId[i]); + + heap_close(rel, NoLock); + + /* + * For each partition, scan all existing indexes; if one matches + * our index definition and is not already attached to some other + * parent index, attach it to the one we just created. + * + * If none matches, build a new index by calling ourselves + * recursively with the same options (except for the index name). + */ + for (i = 0; i < nparts; i++) + { + Oid childRelid = part_oids[i]; + Relation childrel; + List *childidxs; + ListCell *cell; + AttrNumber *attmap; + bool found = false; + int maplen; + + childrel = heap_open(childRelid, lockmode); + childidxs = RelationGetIndexList(childrel); + attmap = + convert_tuples_by_name_map(RelationGetDescr(childrel), + parentDesc, + gettext_noop("could not convert row type")); + maplen = parentDesc->natts; + + + foreach(cell, childidxs) + { + Oid cldidxid = lfirst_oid(cell); + Relation cldidx; + IndexInfo *cldIdxInfo; + + /* this index is already partition of another one */ + if (has_superclass(cldidxid)) + continue; + + cldidx = index_open(cldidxid, lockmode); + cldIdxInfo = BuildIndexInfo(cldidx); + if (CompareIndexInfo(cldIdxInfo, indexInfo, + cldidx->rd_indcollation, + collationObjectId, + cldidx->rd_opfamily, + opfamOids, + attmap, maplen)) + { + Oid cldConstrOid = InvalidOid; + + /* + * Found a match. + * + * If this index is being created in the parent + * because of a constraint, then the child needs to + * have a constraint also, so look for one. If there + * is no such constraint, this index is no good, so + * keep looking. + */ + if (createdConstraintId != InvalidOid) + { + cldConstrOid = + get_relation_idx_constraint_oid(childRelid, + cldidxid); + if (cldConstrOid == InvalidOid) + { + index_close(cldidx, lockmode); + continue; + } + } + + /* Attach index to parent and we're done. */ + IndexSetParentIndex(cldidx, indexRelationId); + if (createdConstraintId != InvalidOid) + ConstraintSetParentConstraint(cldConstrOid, + createdConstraintId); + + if (!IndexIsValid(cldidx->rd_index)) + invalidate_parent = true; + + found = true; + /* keep lock till commit */ + index_close(cldidx, NoLock); + break; + } + + index_close(cldidx, lockmode); + } + + list_free(childidxs); + heap_close(childrel, NoLock); + + /* + * If no matching index was found, create our own. + */ + if (!found) + { + IndexStmt *childStmt = copyObject(stmt); + bool found_whole_row; + ListCell *lc; + + /* + * Adjust any Vars (both in expressions and in the index's + * WHERE clause) to match the partition's column numbering + * in case it's different from the parent's. + */ + foreach(lc, childStmt->indexParams) + { + IndexElem *ielem = lfirst(lc); + + /* + * If the index parameter is an expression, we must + * translate it to contain child Vars. + */ + if (ielem->expr) + { + ielem->expr = + map_variable_attnos((Node *) ielem->expr, + 1, 0, attmap, maplen, + InvalidOid, + &found_whole_row); + if (found_whole_row) + elog(ERROR, "cannot convert whole-row table reference"); + } + } + childStmt->whereClause = + map_variable_attnos(stmt->whereClause, 1, 0, + attmap, maplen, + InvalidOid, &found_whole_row); + if (found_whole_row) + elog(ERROR, "cannot convert whole-row table reference"); + + childStmt->idxname = NULL; + childStmt->relationId = childRelid; + DefineIndex(childRelid, childStmt, + InvalidOid, /* no predefined OID */ + indexRelationId, /* this is our child */ + createdConstraintId, + is_alter_table, check_rights, check_not_in_use, + skip_build, quiet); + } + + pfree(attmap); + } + + /* + * The pg_index row we inserted for this index was marked + * indisvalid=true. But if we attached an existing index that is + * invalid, this is incorrect, so update our row to invalid too. + */ + if (invalidate_parent) + { + Relation pg_index = heap_open(IndexRelationId, RowExclusiveLock); + HeapTuple tup, + newtup; + + tup = SearchSysCache1(INDEXRELID, + ObjectIdGetDatum(indexRelationId)); + if (!tup) + elog(ERROR, "cache lookup failed for index %u", + indexRelationId); + newtup = heap_copytuple(tup); + ((Form_pg_index) GETSTRUCT(newtup))->indisvalid = false; + CatalogTupleUpdate(pg_index, &tup->t_self, newtup); + ReleaseSysCache(tup); + heap_close(pg_index, RowExclusiveLock); + heap_freetuple(newtup); + } + } + else + heap_close(rel, NoLock); + + /* + * Indexes on partitioned tables are not themselves built, so we're + * done here. + */ + return address; + } + if (!stmt->concurrent) { /* Close the heap and we're done, in the non-concurrent case */ @@ -781,7 +1160,7 @@ DefineIndex(Oid relationId, indexInfo->ii_BrokenHotChain = false; /* Now build the index */ - index_build(rel, indexRelation, indexInfo, stmt->primary, false); + index_build(rel, indexRelation, indexInfo, stmt->primary, false, true); /* Close both the relations, but keep the locks */ heap_close(rel, NoLock); @@ -846,6 +1225,20 @@ DefineIndex(Oid relationId, PopActiveSnapshot(); UnregisterSnapshot(snapshot); + /* + * The snapshot subsystem could still contain registered snapshots that + * are holding back our process's advertised xmin; in particular, if + * default_transaction_isolation = serializable, there is a transaction + * snapshot that is still active. The CatalogSnapshot is likewise a + * hazard. To ensure no deadlocks, we must commit and start yet another + * transaction, and do our wait before any snapshot has been taken in it. + */ + CommitTransactionCommand(); + StartTransactionCommand(); + + /* We should now definitely not be advertising any xmin. */ + Assert(MyPgXact->xmin == InvalidTransactionId); + /* * The index is now valid in the sense that it contains all currently * interesting tuples. But since it might not contain tuples deleted just @@ -1001,7 +1394,8 @@ CheckPredicate(Expr *predicate) /* * Compute per-index-column information, including indexed column numbers - * or index expressions, opclasses, and indoptions. + * or index expressions, opclasses, and indoptions. Note, all output vectors + * should be allocated for all columns, including "including" ones. */ static void ComputeIndexAttrs(IndexInfo *indexInfo, @@ -1012,7 +1406,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo, List *attList, /* list of IndexElem's */ List *exclusionOpNames, Oid relId, - char *accessMethodName, + const char *accessMethodName, Oid accessMethodId, bool amcanorder, bool isconstraint) @@ -1020,16 +1414,15 @@ ComputeIndexAttrs(IndexInfo *indexInfo, ListCell *nextExclOp; ListCell *lc; int attn; + int nkeycols = indexInfo->ii_NumIndexKeyAttrs; /* Allocate space for exclusion operator info, if needed */ if (exclusionOpNames) { - int ncols = list_length(attList); - - Assert(list_length(exclusionOpNames) == ncols); - indexInfo->ii_ExclusionOps = (Oid *) palloc(sizeof(Oid) * ncols); - indexInfo->ii_ExclusionProcs = (Oid *) palloc(sizeof(Oid) * ncols); - indexInfo->ii_ExclusionStrats = (uint16 *) palloc(sizeof(uint16) * ncols); + Assert(list_length(exclusionOpNames) == nkeycols); + indexInfo->ii_ExclusionOps = (Oid *) palloc(sizeof(Oid) * nkeycols); + indexInfo->ii_ExclusionProcs = (Oid *) palloc(sizeof(Oid) * nkeycols); + indexInfo->ii_ExclusionStrats = (uint16 *) palloc(sizeof(uint16) * nkeycols); nextExclOp = list_head(exclusionOpNames); } else @@ -1071,7 +1464,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo, attribute->name))); } attform = (Form_pg_attribute) GETSTRUCT(atttuple); - indexInfo->ii_KeyAttrNumbers[attn] = attform->attnum; + indexInfo->ii_IndexAttrNumbers[attn] = attform->attnum; atttype = attform->atttypid; attcollation = attform->attcollation; ReleaseSysCache(atttuple); @@ -1082,6 +1475,11 @@ ComputeIndexAttrs(IndexInfo *indexInfo, Node *expr = attribute->expr; Assert(expr != NULL); + + if (attn >= nkeycols) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("expressions are not supported in included columns"))); atttype = exprType(expr); attcollation = exprCollation(expr); @@ -1099,11 +1497,11 @@ ComputeIndexAttrs(IndexInfo *indexInfo, * User wrote "(column)" or "(column COLLATE something)". * Treat it like simple attribute anyway. */ - indexInfo->ii_KeyAttrNumbers[attn] = ((Var *) expr)->varattno; + indexInfo->ii_IndexAttrNumbers[attn] = ((Var *) expr)->varattno; } else { - indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */ + indexInfo->ii_IndexAttrNumbers[attn] = 0; /* marks expression */ indexInfo->ii_Expressions = lappend(indexInfo->ii_Expressions, expr); @@ -1128,6 +1526,37 @@ ComputeIndexAttrs(IndexInfo *indexInfo, typeOidP[attn] = atttype; + /* + * Included columns have no collation, no opclass and no ordering + * options. + */ + if (attn >= nkeycols) + { + if (attribute->collation) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("including column does not support a collation"))); + if (attribute->opclass) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("including column does not support an operator class"))); + if (attribute->ordering != SORTBY_DEFAULT) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("including column does not support ASC/DESC options"))); + if (attribute->nulls_ordering != SORTBY_NULLS_DEFAULT) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("including column does not support NULLS FIRST/LAST options"))); + + classOidP[attn] = InvalidOid; + colOptionP[attn] = 0; + collationOidP[attn] = InvalidOid; + attn++; + + continue; + } + /* * Apply collation override if any */ @@ -1278,7 +1707,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo, */ Oid ResolveOpClass(List *opclass, Oid attrType, - char *accessMethodName, Oid accessMethodId) + const char *accessMethodName, Oid accessMethodId) { char *schemaname; char *opcname; @@ -1477,7 +1906,8 @@ GetDefaultOpClass(Oid type_id, Oid am_id) /* * makeObjectName() * - * Create a name for an implicitly created index, sequence, constraint, etc. + * Create a name for an implicitly created index, sequence, constraint, + * extended statistics, etc. * * The parameters are typically: the original table name, the original field * name, and a "type" string (such as "seq" or "pkey"). The field name @@ -1566,6 +1996,12 @@ makeObjectName(const char *name1, const char *name2, const char *label) * except that the label can't be NULL; digits will be appended to the label * if needed to create a name that is unique within the specified namespace. * + * If isconstraint is true, we also avoid choosing a name matching any + * existing constraint in the same namespace. (This is stricter than what + * Postgres itself requires, but the SQL standard says that constraint names + * should be unique within schemas, so we follow that for autogenerated + * constraint names.) + * * Note: it is theoretically possible to get a collision anyway, if someone * else chooses the same name concurrently. This is fairly unlikely to be * a problem in practice, especially if one is holding an exclusive lock on @@ -1577,7 +2013,8 @@ makeObjectName(const char *name1, const char *name2, const char *label) */ char * ChooseRelationName(const char *name1, const char *name2, - const char *label, Oid namespaceid) + const char *label, Oid namespaceid, + bool isconstraint) { int pass = 0; char *relname = NULL; @@ -1591,7 +2028,11 @@ ChooseRelationName(const char *name1, const char *name2, relname = makeObjectName(name1, name2, modlabel); if (!OidIsValid(get_relname_relid(relname, namespaceid))) - break; + { + if (!isconstraint || + !ConstraintNameExists(relname, namespaceid)) + break; + } /* found a conflict, so try a new name component */ pfree(relname); @@ -1619,28 +2060,32 @@ ChooseIndexName(const char *tabname, Oid namespaceId, indexname = ChooseRelationName(tabname, NULL, "pkey", - namespaceId); + namespaceId, + true); } else if (exclusionOpNames != NIL) { indexname = ChooseRelationName(tabname, ChooseIndexNameAddition(colnames), "excl", - namespaceId); + namespaceId, + true); } else if (isconstraint) { indexname = ChooseRelationName(tabname, ChooseIndexNameAddition(colnames), "key", - namespaceId); + namespaceId, + true); } else { indexname = ChooseRelationName(tabname, ChooseIndexNameAddition(colnames), "idx", - namespaceId); + namespaceId, + false); } return indexname; @@ -1653,6 +2098,8 @@ ChooseIndexName(const char *tabname, Oid namespaceId, * * We know that less than NAMEDATALEN characters will actually be used, * so we can truncate the result once we've generated that many. + * + * XXX See also ChooseExtendedStatisticNameAddition. */ static char * ChooseIndexNameAddition(List *colnames) @@ -1746,7 +2193,7 @@ ChooseIndexColumnNames(List *indexElems) * ReindexIndex * Recreate a specific index. */ -Oid +void ReindexIndex(RangeVar *indexRelation, int options) { Oid indOid; @@ -1760,7 +2207,7 @@ ReindexIndex(RangeVar *indexRelation, int options) * used here must match the index lock obtained in reindex_index(). */ indOid = RangeVarGetRelidExtended(indexRelation, AccessExclusiveLock, - false, false, + 0, RangeVarCallbackForReindexIndex, (void *) &heapOid); @@ -1769,12 +2216,17 @@ ReindexIndex(RangeVar *indexRelation, int options) * lock on the index. */ irel = index_open(indOid, NoLock); + + if (irel->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) + { + ReindexPartitionedIndex(irel); + return; + } + persistence = irel->rd_rel->relpersistence; index_close(irel, NoLock); reindex_index(indOid, false, persistence, options); - - return indOid; } /* @@ -1813,14 +2265,15 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation, relkind = get_rel_relkind(relId); if (!relkind) return; - if (relkind != RELKIND_INDEX) + if (relkind != RELKIND_INDEX && + relkind != RELKIND_PARTITIONED_INDEX) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not an index", relation->relname))); /* Check permissions */ if (!pg_class_ownercheck(relId, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, relation->relname); + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_INDEX, relation->relname); /* Lock heap before index to avoid deadlock. */ if (relId != oldRelId) @@ -1846,7 +2299,7 @@ ReindexTable(RangeVar *relation, int options) Oid heapOid; /* The lock level used here should match reindex_relation(). */ - heapOid = RangeVarGetRelidExtended(relation, ShareLock, false, false, + heapOid = RangeVarGetRelidExtended(relation, ShareLock, 0, RangeVarCallbackOwnsTable, NULL); if (!reindex_relation(heapOid, @@ -1899,7 +2352,7 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind, objectOid = get_namespace_oid(objectName, false); if (!pg_namespace_ownercheck(objectOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_NAMESPACE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_SCHEMA, objectName); } else @@ -1911,7 +2364,7 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("can only reindex the currently open database"))); if (!pg_database_ownercheck(objectOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE, objectName); } @@ -1957,6 +2410,12 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind, /* * Only regular tables and matviews can have indexes, so ignore any * other kind of relation. + * + * It is tempting to also consider partitioned tables here, but that + * has the problem that if the children are in the same schema, they + * would be processed twice. Maybe we could have a separate list of + * partitioned tables, and expand that afterwards into relids, + * ignoring any duplicates. */ if (classtuple->relkind != RELKIND_RELATION && classtuple->relkind != RELKIND_MATVIEW) @@ -1972,6 +2431,18 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind, !IsSystemClass(relid, classtuple)) continue; + /* + * The table can be reindexed if the user is superuser, the table + * owner, or the database/schema owner (but in the latter case, only + * if it's not a shared relation). pg_class_ownercheck includes the + * superuser case, and depending on objectKind we already know that + * the user has permission to run REINDEX on this database or schema + * per the permission checks at the beginning of this routine. + */ + if (classtuple->relisshared && + !pg_class_ownercheck(relid, GetUserId())) + continue; + /* Save the list of relation OIDs in private context */ old = MemoryContextSwitchTo(private_context); @@ -2019,3 +2490,162 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind, MemoryContextDelete(private_context); } + +/* + * ReindexPartitionedIndex + * Reindex each child of the given partitioned index. + * + * Not yet implemented. + */ +static void +ReindexPartitionedIndex(Relation parentIdx) +{ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("REINDEX is not yet implemented for partitioned indexes"))); +} + +/* + * Insert or delete an appropriate pg_inherits tuple to make the given index + * be a partition of the indicated parent index. + * + * This also corrects the pg_depend information for the affected index. + */ +void +IndexSetParentIndex(Relation partitionIdx, Oid parentOid) +{ + Relation pg_inherits; + ScanKeyData key[2]; + SysScanDesc scan; + Oid partRelid = RelationGetRelid(partitionIdx); + HeapTuple tuple; + bool fix_dependencies; + + /* Make sure this is an index */ + Assert(partitionIdx->rd_rel->relkind == RELKIND_INDEX || + partitionIdx->rd_rel->relkind == RELKIND_PARTITIONED_INDEX); + + /* + * Scan pg_inherits for rows linking our index to some parent. + */ + pg_inherits = relation_open(InheritsRelationId, RowExclusiveLock); + ScanKeyInit(&key[0], + Anum_pg_inherits_inhrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(partRelid)); + ScanKeyInit(&key[1], + Anum_pg_inherits_inhseqno, + BTEqualStrategyNumber, F_INT4EQ, + Int32GetDatum(1)); + scan = systable_beginscan(pg_inherits, InheritsRelidSeqnoIndexId, true, + NULL, 2, key); + tuple = systable_getnext(scan); + + if (!HeapTupleIsValid(tuple)) + { + if (parentOid == InvalidOid) + { + /* + * No pg_inherits row, and no parent wanted: nothing to do in this + * case. + */ + fix_dependencies = false; + } + else + { + Datum values[Natts_pg_inherits]; + bool isnull[Natts_pg_inherits]; + + /* + * No pg_inherits row exists, and we want a parent for this index, + * so insert it. + */ + values[Anum_pg_inherits_inhrelid - 1] = ObjectIdGetDatum(partRelid); + values[Anum_pg_inherits_inhparent - 1] = + ObjectIdGetDatum(parentOid); + values[Anum_pg_inherits_inhseqno - 1] = Int32GetDatum(1); + memset(isnull, false, sizeof(isnull)); + + tuple = heap_form_tuple(RelationGetDescr(pg_inherits), + values, isnull); + CatalogTupleInsert(pg_inherits, tuple); + + fix_dependencies = true; + } + } + else + { + Form_pg_inherits inhForm = (Form_pg_inherits) GETSTRUCT(tuple); + + if (parentOid == InvalidOid) + { + /* + * There exists a pg_inherits row, which we want to clear; do so. + */ + CatalogTupleDelete(pg_inherits, &tuple->t_self); + fix_dependencies = true; + } + else + { + /* + * A pg_inherits row exists. If it's the same we want, then we're + * good; if it differs, that amounts to a corrupt catalog and + * should not happen. + */ + if (inhForm->inhparent != parentOid) + { + /* unexpected: we should not get called in this case */ + elog(ERROR, "bogus pg_inherit row: inhrelid %u inhparent %u", + inhForm->inhrelid, inhForm->inhparent); + } + + /* already in the right state */ + fix_dependencies = false; + } + } + + /* done with pg_inherits */ + systable_endscan(scan); + relation_close(pg_inherits, RowExclusiveLock); + + /* set relhassubclass if an index partition has been added to the parent */ + if (OidIsValid(parentOid)) + SetRelationHasSubclass(parentOid, true); + + if (fix_dependencies) + { + ObjectAddress partIdx; + + /* + * Insert/delete pg_depend rows. If setting a parent, add an + * INTERNAL_AUTO dependency to the parent index; if making standalone, + * remove all existing rows and put back the regular dependency on the + * table. + */ + ObjectAddressSet(partIdx, RelationRelationId, partRelid); + + if (OidIsValid(parentOid)) + { + ObjectAddress parentIdx; + + ObjectAddressSet(parentIdx, RelationRelationId, parentOid); + recordDependencyOn(&partIdx, &parentIdx, DEPENDENCY_INTERNAL_AUTO); + } + else + { + ObjectAddress partitionTbl; + + ObjectAddressSet(partitionTbl, RelationRelationId, + partitionIdx->rd_index->indrelid); + + deleteDependencyRecordsForClass(RelationRelationId, partRelid, + RelationRelationId, + DEPENDENCY_INTERNAL_AUTO); + + recordDependencyOn(&partIdx, &partitionTbl, DEPENDENCY_AUTO); + } + + /* make our updates visible */ + CommandCounterIncrement(); + } +} diff --git a/src/backend/commands/lockcmds.c b/src/backend/commands/lockcmds.c index 9fe9e022b0..71278b38cf 100644 --- a/src/backend/commands/lockcmds.c +++ b/src/backend/commands/lockcmds.c @@ -3,7 +3,7 @@ * lockcmds.c * LOCK command support code * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -15,7 +15,7 @@ #include "postgres.h" #include "catalog/namespace.h" -#include "catalog/pg_inherits_fn.h" +#include "catalog/pg_inherits.h" #include "commands/lockcmds.h" #include "miscadmin.h" #include "parser/parse_clause.h" @@ -23,11 +23,15 @@ #include "utils/acl.h" #include "utils/lsyscache.h" #include "utils/syscache.h" +#include "rewrite/rewriteHandler.h" +#include "access/heapam.h" +#include "nodes/nodeFuncs.h" -static void LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait); -static AclResult LockTableAclCheck(Oid relid, LOCKMODE lockmode); +static void LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait, Oid userid); +static AclResult LockTableAclCheck(Oid relid, LOCKMODE lockmode, Oid userid); static void RangeVarCallbackForLockTable(const RangeVar *rv, Oid relid, Oid oldrelid, void *arg); +static void LockViewRecurse(Oid reloid, LOCKMODE lockmode, bool nowait, List *ancestor_views); /* * LOCK TABLE @@ -57,13 +61,15 @@ LockTableCommand(LockStmt *lockstmt) bool recurse = rv->inh; Oid reloid; - reloid = RangeVarGetRelidExtended(rv, lockstmt->mode, false, - lockstmt->nowait, + reloid = RangeVarGetRelidExtended(rv, lockstmt->mode, + lockstmt->nowait ? RVR_NOWAIT : 0, RangeVarCallbackForLockTable, (void *) &lockstmt->mode); - if (recurse) - LockTableRecurse(reloid, lockstmt->mode, lockstmt->nowait); + if (get_rel_relkind(reloid) == RELKIND_VIEW) + LockViewRecurse(reloid, lockstmt->mode, lockstmt->nowait, NIL); + else if (recurse) + LockTableRecurse(reloid, lockstmt->mode, lockstmt->nowait, GetUserId()); } } @@ -86,17 +92,18 @@ RangeVarCallbackForLockTable(const RangeVar *rv, Oid relid, Oid oldrelid, return; /* woops, concurrently dropped; no permissions * check */ - /* Currently, we only allow plain tables to be locked */ - if (relkind != RELKIND_RELATION && relkind != RELKIND_PARTITIONED_TABLE) + /* Currently, we only allow plain tables or views to be locked */ + if (relkind != RELKIND_RELATION && relkind != RELKIND_PARTITIONED_TABLE && + relkind != RELKIND_VIEW) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table", + errmsg("\"%s\" is not a table or a view", rv->relname))); /* Check permissions. */ - aclresult = LockTableAclCheck(relid, lockmode); + aclresult = LockTableAclCheck(relid, lockmode, GetUserId()); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_CLASS, rv->relname); + aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(relid)), rv->relname); } /* @@ -107,7 +114,7 @@ RangeVarCallbackForLockTable(const RangeVar *rv, Oid relid, Oid oldrelid, * multiply-inheriting children more than once, but that's no problem. */ static void -LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait) +LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait, Oid userid) { List *children; ListCell *lc; @@ -120,14 +127,14 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait) AclResult aclresult; /* Check permissions before acquiring the lock. */ - aclresult = LockTableAclCheck(childreloid, lockmode); + aclresult = LockTableAclCheck(childreloid, lockmode, userid); if (aclresult != ACLCHECK_OK) { char *relname = get_rel_name(childreloid); if (!relname) continue; /* child concurrently dropped, just skip it */ - aclcheck_error(aclresult, ACL_KIND_CLASS, relname); + aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(childreloid)), relname); } /* We have enough rights to lock the relation; do so. */ @@ -157,15 +164,126 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait) continue; } - LockTableRecurse(childreloid, lockmode, nowait); + LockTableRecurse(childreloid, lockmode, nowait, userid); } } +/* + * Apply LOCK TABLE recursively over a view + * + * All tables and views appearing in the view definition query are locked + * recursively with the same lock mode. + */ + +typedef struct +{ + LOCKMODE lockmode; /* lock mode to use */ + bool nowait; /* no wait mode */ + Oid viewowner; /* view owner for checking the privilege */ + Oid viewoid; /* OID of the view to be locked */ + List *ancestor_views; /* OIDs of ancestor views */ +} LockViewRecurse_context; + +static bool +LockViewRecurse_walker(Node *node, LockViewRecurse_context *context) +{ + if (node == NULL) + return false; + + if (IsA(node, Query)) + { + Query *query = (Query *) node; + ListCell *rtable; + + foreach(rtable, query->rtable) + { + RangeTblEntry *rte = lfirst(rtable); + AclResult aclresult; + + Oid relid = rte->relid; + char relkind = rte->relkind; + char *relname = get_rel_name(relid); + + /* + * The OLD and NEW placeholder entries in the view's rtable are + * skipped. + */ + if (relid == context->viewoid && + (!strcmp(rte->eref->aliasname, "old") || !strcmp(rte->eref->aliasname, "new"))) + continue; + + /* Currently, we only allow plain tables or views to be locked. */ + if (relkind != RELKIND_RELATION && relkind != RELKIND_PARTITIONED_TABLE && + relkind != RELKIND_VIEW) + continue; + + /* Check infinite recursion in the view definition. */ + if (list_member_oid(context->ancestor_views, relid)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("infinite recursion detected in rules for relation \"%s\"", + get_rel_name(relid)))); + + /* Check permissions with the view owner's privilege. */ + aclresult = LockTableAclCheck(relid, context->lockmode, context->viewowner); + if (aclresult != ACLCHECK_OK) + aclcheck_error(aclresult, get_relkind_objtype(relkind), relname); + + /* We have enough rights to lock the relation; do so. */ + if (!context->nowait) + LockRelationOid(relid, context->lockmode); + else if (!ConditionalLockRelationOid(relid, context->lockmode)) + ereport(ERROR, + (errcode(ERRCODE_LOCK_NOT_AVAILABLE), + errmsg("could not obtain lock on relation \"%s\"", + relname))); + + if (relkind == RELKIND_VIEW) + LockViewRecurse(relid, context->lockmode, context->nowait, context->ancestor_views); + else if (rte->inh) + LockTableRecurse(relid, context->lockmode, context->nowait, context->viewowner); + } + + return query_tree_walker(query, + LockViewRecurse_walker, + context, + QTW_IGNORE_JOINALIASES); + } + + return expression_tree_walker(node, + LockViewRecurse_walker, + context); +} + +static void +LockViewRecurse(Oid reloid, LOCKMODE lockmode, bool nowait, List *ancestor_views) +{ + LockViewRecurse_context context; + + Relation view; + Query *viewquery; + + view = heap_open(reloid, NoLock); + viewquery = get_view_query(view); + + context.lockmode = lockmode; + context.nowait = nowait; + context.viewowner = view->rd_rel->relowner; + context.viewoid = reloid; + context.ancestor_views = lcons_oid(reloid, ancestor_views); + + LockViewRecurse_walker((Node *) viewquery, &context); + + ancestor_views = list_delete_oid(ancestor_views, reloid); + + heap_close(view, NoLock); +} + /* * Check whether the current user is permitted to lock this relation. */ static AclResult -LockTableAclCheck(Oid reloid, LOCKMODE lockmode) +LockTableAclCheck(Oid reloid, LOCKMODE lockmode, Oid userid) { AclResult aclresult; AclMode aclmask; @@ -178,7 +296,7 @@ LockTableAclCheck(Oid reloid, LOCKMODE lockmode) else aclmask = ACL_UPDATE | ACL_DELETE | ACL_TRUNCATE; - aclresult = pg_class_aclcheck(reloid, GetUserId(), aclmask); + aclresult = pg_class_aclcheck(reloid, userid, aclmask); return aclresult; } diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c index 7d57f97442..e1eb7c374b 100644 --- a/src/backend/commands/matview.c +++ b/src/backend/commands/matview.c @@ -3,7 +3,7 @@ * matview.c * materialized view support * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -21,6 +21,8 @@ #include "catalog/catalog.h" #include "catalog/indexing.h" #include "catalog/namespace.h" +#include "catalog/pg_am.h" +#include "catalog/pg_opclass.h" #include "catalog/pg_operator.h" #include "commands/cluster.h" #include "commands/matview.h" @@ -40,7 +42,6 @@ #include "utils/rel.h" #include "utils/snapmgr.h" #include "utils/syscache.h" -#include "utils/typcache.h" typedef struct @@ -62,14 +63,11 @@ static void transientrel_shutdown(DestReceiver *self); static void transientrel_destroy(DestReceiver *self); static uint64 refresh_matview_datafill(DestReceiver *dest, Query *query, const char *queryString); - static char *make_temptable_name_n(char *tempname, int n); -static void mv_GenerateOper(StringInfo buf, Oid opoid); - static void refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, int save_sec_context); static void refresh_by_heap_swap(Oid matviewOid, Oid OIDNewHeap, char relpersistence); - +static bool is_usable_unique_index(Relation indexRel); static void OpenMatViewIncrementalMaintenance(void); static void CloseMatViewIncrementalMaintenance(void); @@ -163,7 +161,7 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString, * Get a lock until end of transaction. */ matviewOid = RangeVarGetRelidExtended(stmt->relation, - lockmode, false, false, + lockmode, 0, RangeVarCallbackOwnsTable, NULL); matviewRel = heap_open(matviewOid, NoLock); @@ -230,23 +228,12 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString, { Oid indexoid = lfirst_oid(indexoidscan); Relation indexRel; - Form_pg_index indexStruct; indexRel = index_open(indexoid, AccessShareLock); - indexStruct = indexRel->rd_index; - - if (indexStruct->indisunique && - IndexIsValid(indexStruct) && - RelationGetIndexExpressions(indexRel) == NIL && - RelationGetIndexPredicate(indexRel) == NIL && - indexStruct->indnatts > 0) - { - hasUniqueIndex = true; - index_close(indexRel, AccessShareLock); - break; - } - + hasUniqueIndex = is_usable_unique_index(indexRel); index_close(indexRel, AccessShareLock); + if (hasUniqueIndex) + break; } list_free(indexoidlist); @@ -557,25 +544,6 @@ make_temptable_name_n(char *tempname, int n) return namebuf.data; } -static void -mv_GenerateOper(StringInfo buf, Oid opoid) -{ - HeapTuple opertup; - Form_pg_operator operform; - - opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(opoid)); - if (!HeapTupleIsValid(opertup)) - elog(ERROR, "cache lookup failed for operator %u", opoid); - operform = (Form_pg_operator) GETSTRUCT(opertup); - Assert(operform->oprkind == 'b'); - - appendStringInfo(buf, "OPERATOR(%s.%s)", - quote_identifier(get_namespace_name(operform->oprnamespace)), - NameStr(operform->oprname)); - - ReleaseSysCache(opertup); -} - /* * refresh_by_match_merge * @@ -623,7 +591,7 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, List *indexoidlist; ListCell *indexoidscan; int16 relnatts; - bool *usedForQual; + Oid *opUsedForQual; initStringInfo(&querybuf); matviewRel = heap_open(matviewOid, NoLock); @@ -634,8 +602,7 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, RelationGetRelationName(tempRel)); diffname = make_temptable_name_n(tempname, 2); - relnatts = matviewRel->rd_rel->relnatts; - usedForQual = (bool *) palloc0(sizeof(bool) * relnatts); + relnatts = RelationGetNumberOfAttributes(matviewRel); /* Open SPI context. */ if (SPI_connect() != SPI_OK_CONNECT) @@ -657,10 +624,10 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, appendStringInfo(&querybuf, "SELECT newdata FROM %s newdata " "WHERE newdata IS NOT NULL AND EXISTS " - "(SELECT * FROM %s newdata2 WHERE newdata2 IS NOT NULL " + "(SELECT 1 FROM %s newdata2 WHERE newdata2 IS NOT NULL " "AND newdata2 OPERATOR(pg_catalog.*=) newdata " "AND newdata2.ctid OPERATOR(pg_catalog.<>) " - "newdata.ctid) LIMIT 1", + "newdata.ctid)", tempname, tempname); if (SPI_execute(querybuf.data, false, 1) != SPI_OK_SELECT) elog(ERROR, "SPI_exec failed: %s", querybuf.data); @@ -699,45 +666,82 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, * include all rows. */ tupdesc = matviewRel->rd_att; + opUsedForQual = (Oid *) palloc0(sizeof(Oid) * relnatts); foundUniqueIndex = false; + indexoidlist = RelationGetIndexList(matviewRel); foreach(indexoidscan, indexoidlist) { Oid indexoid = lfirst_oid(indexoidscan); Relation indexRel; - Form_pg_index indexStruct; indexRel = index_open(indexoid, RowExclusiveLock); - indexStruct = indexRel->rd_index; - - /* - * We're only interested if it is unique, valid, contains no - * expressions, and is not partial. - */ - if (indexStruct->indisunique && - IndexIsValid(indexStruct) && - RelationGetIndexExpressions(indexRel) == NIL && - RelationGetIndexPredicate(indexRel) == NIL) + if (is_usable_unique_index(indexRel)) { - int numatts = indexStruct->indnatts; + Form_pg_index indexStruct = indexRel->rd_index; + int indnkeyatts = indexStruct->indnkeyatts; + oidvector *indclass; + Datum indclassDatum; + bool isnull; int i; + /* Must get indclass the hard way. */ + indclassDatum = SysCacheGetAttr(INDEXRELID, + indexRel->rd_indextuple, + Anum_pg_index_indclass, + &isnull); + Assert(!isnull); + indclass = (oidvector *) DatumGetPointer(indclassDatum); + /* Add quals for all columns from this index. */ - for (i = 0; i < numatts; i++) + for (i = 0; i < indnkeyatts; i++) { int attnum = indexStruct->indkey.values[i]; - Oid type; + Oid opclass = indclass->values[i]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1); + Oid attrtype = attr->atttypid; + HeapTuple cla_ht; + Form_pg_opclass cla_tup; + Oid opfamily; + Oid opcintype; Oid op; - const char *colname; + const char *leftop; + const char *rightop; /* - * Only include the column once regardless of how many times - * it shows up in how many indexes. + * Identify the equality operator associated with this index + * column. First we need to look up the column's opclass. */ - if (usedForQual[attnum - 1]) + cla_ht = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass)); + if (!HeapTupleIsValid(cla_ht)) + elog(ERROR, "cache lookup failed for opclass %u", opclass); + cla_tup = (Form_pg_opclass) GETSTRUCT(cla_ht); + Assert(cla_tup->opcmethod == BTREE_AM_OID); + opfamily = cla_tup->opcfamily; + opcintype = cla_tup->opcintype; + ReleaseSysCache(cla_ht); + + op = get_opfamily_member(opfamily, opcintype, opcintype, + BTEqualStrategyNumber); + if (!OidIsValid(op)) + elog(ERROR, "missing operator %d(%u,%u) in opfamily %u", + BTEqualStrategyNumber, opcintype, opcintype, opfamily); + + /* + * If we find the same column with the same equality semantics + * in more than one index, we only need to emit the equality + * clause once. + * + * Since we only remember the last equality operator, this + * code could be fooled into emitting duplicate clauses given + * multiple indexes with several different opclasses ... but + * that's so unlikely it doesn't seem worth spending extra + * code to avoid. + */ + if (opUsedForQual[attnum - 1] == op) continue; - usedForQual[attnum - 1] = true; + opUsedForQual[attnum - 1] = op; /* * Actually add the qual, ANDed with any others. @@ -745,12 +749,15 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, if (foundUniqueIndex) appendStringInfoString(&querybuf, " AND "); - colname = quote_identifier(NameStr((tupdesc->attrs[attnum - 1])->attname)); - appendStringInfo(&querybuf, "newdata.%s ", colname); - type = attnumTypeId(matviewRel, attnum); - op = lookup_type_cache(type, TYPECACHE_EQ_OPR)->eq_opr; - mv_GenerateOper(&querybuf, op); - appendStringInfo(&querybuf, " mv.%s", colname); + leftop = quote_qualified_identifier("newdata", + NameStr(attr->attname)); + rightop = quote_qualified_identifier("mv", + NameStr(attr->attname)); + + generate_operator_clause(&querybuf, + leftop, attrtype, + op, + rightop, attrtype); foundUniqueIndex = true; } @@ -763,11 +770,11 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, list_free(indexoidlist); /* - * There must be at least one unique index on the matview. + * There must be at least one usable unique index on the matview. * * ExecRefreshMatView() checks that after taking the exclusive lock on the * matview. So at least one unique index is guaranteed to exist here - * because the lock is still being held. + * because the lock is still being held; so an Assert seems sufficient. */ Assert(foundUniqueIndex); @@ -844,6 +851,51 @@ refresh_by_heap_swap(Oid matviewOid, Oid OIDNewHeap, char relpersistence) RecentXmin, ReadNextMultiXactId(), relpersistence); } +/* + * Check whether specified index is usable for match merge. + */ +static bool +is_usable_unique_index(Relation indexRel) +{ + Form_pg_index indexStruct = indexRel->rd_index; + + /* + * Must be unique, valid, immediate, non-partial, and be defined over + * plain user columns (not expressions). We also require it to be a + * btree. Even if we had any other unique index kinds, we'd not know how + * to identify the corresponding equality operator, nor could we be sure + * that the planner could implement the required FULL JOIN with non-btree + * operators. + */ + if (indexStruct->indisunique && + indexStruct->indimmediate && + indexRel->rd_rel->relam == BTREE_AM_OID && + IndexIsValid(indexStruct) && + RelationGetIndexPredicate(indexRel) == NIL && + indexStruct->indnatts > 0) + { + /* + * The point of groveling through the index columns individually is to + * reject both index expressions and system columns. Currently, + * matviews couldn't have OID columns so there's no way to create an + * index on a system column; but maybe someday that wouldn't be true, + * so let's be safe. + */ + int numatts = indexStruct->indnatts; + int i; + + for (i = 0; i < numatts; i++) + { + int attnum = indexStruct->indkey.values[i]; + + if (attnum <= 0) + return false; + } + return true; + } + return false; +} + /* * This should be used to test whether the backend is in a context where it is diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c index a31b1acb9c..3b5c90e3f4 100644 --- a/src/backend/commands/opclasscmds.c +++ b/src/backend/commands/opclasscmds.c @@ -4,7 +4,7 @@ * * Routines for opclass (and opfamily) manipulation commands * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -18,6 +18,7 @@ #include #include "access/genam.h" +#include "access/hash.h" #include "access/heapam.h" #include "access/nbtree.h" #include "access/htup_details.h" @@ -238,7 +239,7 @@ get_opclass_oid(Oid amID, List *opclassname, bool missing_ok) * Caller must have done permissions checks etc. already. */ static ObjectAddress -CreateOpFamily(char *amname, char *opfname, Oid namespaceoid, Oid amoid) +CreateOpFamily(const char *amname, const char *opfname, Oid namespaceoid, Oid amoid) { Oid opfamilyoid; Relation rel; @@ -352,7 +353,7 @@ DefineOpClass(CreateOpClassStmt *stmt) /* Check we have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(namespaceoid, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceoid)); /* Get necessary info about access method */ @@ -496,11 +497,11 @@ DefineOpClass(CreateOpClassStmt *stmt) /* XXX this is unnecessary given the superuser check above */ /* Caller must own operator and its underlying function */ if (!pg_oper_ownercheck(operOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPER, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_OPERATOR, get_opname(operOid)); funcOid = get_opcode(operOid); if (!pg_proc_ownercheck(funcOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, get_func_name(funcOid)); #endif @@ -516,15 +517,15 @@ DefineOpClass(CreateOpClassStmt *stmt) if (item->number <= 0 || item->number > maxProcNumber) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("invalid procedure number %d," + errmsg("invalid function number %d," " must be between 1 and %d", item->number, maxProcNumber))); - funcOid = LookupFuncWithArgs(item->name, false); + funcOid = LookupFuncWithArgs(OBJECT_FUNCTION, item->name, false); #ifdef NOT_USED /* XXX this is unnecessary given the superuser check above */ /* Caller must own function */ if (!pg_proc_ownercheck(funcOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, get_func_name(funcOid)); #endif @@ -729,7 +730,7 @@ DefineOpFamily(CreateOpFamilyStmt *stmt) /* Check we have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(namespaceoid, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceoid)); /* Get access method OID, throwing an error if it doesn't exist. */ @@ -870,11 +871,11 @@ AlterOpFamilyAdd(AlterOpFamilyStmt *stmt, Oid amoid, Oid opfamilyoid, /* XXX this is unnecessary given the superuser check above */ /* Caller must own operator and its underlying function */ if (!pg_oper_ownercheck(operOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPER, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_OPERATOR, get_opname(operOid)); funcOid = get_opcode(operOid); if (!pg_proc_ownercheck(funcOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, get_func_name(funcOid)); #endif @@ -890,15 +891,15 @@ AlterOpFamilyAdd(AlterOpFamilyStmt *stmt, Oid amoid, Oid opfamilyoid, if (item->number <= 0 || item->number > maxProcNumber) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("invalid procedure number %d," + errmsg("invalid function number %d," " must be between 1 and %d", item->number, maxProcNumber))); - funcOid = LookupFuncWithArgs(item->name, false); + funcOid = LookupFuncWithArgs(OBJECT_FUNCTION, item->name, false); #ifdef NOT_USED /* XXX this is unnecessary given the superuser check above */ /* Caller must own function */ if (!pg_proc_ownercheck(funcOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, get_func_name(funcOid)); #endif @@ -985,7 +986,7 @@ AlterOpFamilyDrop(AlterOpFamilyStmt *stmt, Oid amoid, Oid opfamilyoid, if (item->number <= 0 || item->number > maxProcNumber) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("invalid procedure number %d," + errmsg("invalid function number %d," " must be between 1 and %d", item->number, maxProcNumber))); processTypesSpec(item->class_args, &lefttype, &righttype); @@ -1127,9 +1128,11 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid) procform = (Form_pg_proc) GETSTRUCT(proctup); /* - * btree comparison procs must be 2-arg procs returning int4, while btree - * sortsupport procs must take internal and return void. hash support - * procs must be 1-arg procs returning int4. Otherwise we don't know. + * btree comparison procs must be 2-arg procs returning int4. btree + * sortsupport procs must take internal and return void. btree in_range + * procs must be 5-arg procs returning bool. hash support proc 1 must be + * a 1-arg proc returning int4, while proc 2 must be a 2-arg proc + * returning int8. Otherwise we don't know. */ if (amoid == BTREE_AM_OID) { @@ -1138,11 +1141,11 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid) if (procform->pronargs != 2) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("btree comparison procedures must have two arguments"))); + errmsg("btree comparison functions must have two arguments"))); if (procform->prorettype != INT4OID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("btree comparison procedures must return integer"))); + errmsg("btree comparison functions must return integer"))); /* * If lefttype/righttype isn't specified, use the proc's input @@ -1159,27 +1162,61 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid) procform->proargtypes.values[0] != INTERNALOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("btree sort support procedures must accept type \"internal\""))); + errmsg("btree sort support functions must accept type \"internal\""))); if (procform->prorettype != VOIDOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("btree sort support procedures must return void"))); + errmsg("btree sort support functions must return void"))); /* * Can't infer lefttype/righttype from proc, so use default rule */ } + else if (member->number == BTINRANGE_PROC) + { + if (procform->pronargs != 5) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("btree in_range functions must have five arguments"))); + if (procform->prorettype != BOOLOID) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("btree in_range functions must return boolean"))); + + /* + * If lefttype/righttype isn't specified, use the proc's input + * types (we look at the test-value and offset arguments) + */ + if (!OidIsValid(member->lefttype)) + member->lefttype = procform->proargtypes.values[0]; + if (!OidIsValid(member->righttype)) + member->righttype = procform->proargtypes.values[2]; + } } else if (amoid == HASH_AM_OID) { - if (procform->pronargs != 1) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("hash procedures must have one argument"))); - if (procform->prorettype != INT4OID) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("hash procedures must return integer"))); + if (member->number == HASHSTANDARD_PROC) + { + if (procform->pronargs != 1) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("hash function 1 must have one argument"))); + if (procform->prorettype != INT4OID) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("hash function 1 must return integer"))); + } + else if (member->number == HASHEXTENDED_PROC) + { + if (procform->pronargs != 2) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("hash function 2 must have two arguments"))); + if (procform->prorettype != INT8OID) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("hash function 2 must return bigint"))); + } /* * If lefttype/righttype isn't specified, use the proc's input type @@ -1203,7 +1240,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid) if (!OidIsValid(member->lefttype) || !OidIsValid(member->righttype)) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("associated data types must be specified for index support procedure"))); + errmsg("associated data types must be specified for index support function"))); ReleaseSysCache(proctup); } @@ -1228,7 +1265,7 @@ addFamilyMember(List **list, OpFamilyMember *member, bool isProc) if (isProc) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("procedure number %d for (%s,%s) appears more than once", + errmsg("function number %d for (%s,%s) appears more than once", member->number, format_type_be(member->lefttype), format_type_be(member->righttype)))); diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c index 6674b41eec..8fd8f7e8cf 100644 --- a/src/backend/commands/operatorcmds.c +++ b/src/backend/commands/operatorcmds.c @@ -4,7 +4,7 @@ * * Routines for operator manipulation commands * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -21,7 +21,7 @@ * NOTES * These things must be defined and committed in the following order: * "create function": - * input/output, recv/send procedures + * input/output, recv/send functions * "create type": * type * "create operator": @@ -40,7 +40,6 @@ #include "catalog/indexing.h" #include "catalog/objectaccess.h" #include "catalog/pg_operator.h" -#include "catalog/pg_operator_fn.h" #include "catalog/pg_type.h" #include "commands/alter.h" #include "commands/defrem.h" @@ -80,8 +79,8 @@ DefineOperator(List *names, List *parameters) Oid rettype; List *commutatorName = NIL; /* optional commutator operator name */ List *negatorName = NIL; /* optional negator operator name */ - List *restrictionName = NIL; /* optional restrict. sel. procedure */ - List *joinName = NIL; /* optional join sel. procedure */ + List *restrictionName = NIL; /* optional restrict. sel. function */ + List *joinName = NIL; /* optional join sel. function */ Oid functionOid; /* functions converted to OID */ Oid restrictionOid; Oid joinOid; @@ -95,7 +94,7 @@ DefineOperator(List *names, List *parameters) /* Check we have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(oprNamespace, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(oprNamespace)); /* @@ -105,7 +104,7 @@ DefineOperator(List *names, List *parameters) { DefElem *defel = (DefElem *) lfirst(pl); - if (pg_strcasecmp(defel->defname, "leftarg") == 0) + if (strcmp(defel->defname, "leftarg") == 0) { typeName1 = defGetTypeName(defel); if (typeName1->setof) @@ -113,7 +112,7 @@ DefineOperator(List *names, List *parameters) (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("SETOF type not allowed for operator argument"))); } - else if (pg_strcasecmp(defel->defname, "rightarg") == 0) + else if (strcmp(defel->defname, "rightarg") == 0) { typeName2 = defGetTypeName(defel); if (typeName2->setof) @@ -121,28 +120,31 @@ DefineOperator(List *names, List *parameters) (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("SETOF type not allowed for operator argument"))); } - else if (pg_strcasecmp(defel->defname, "procedure") == 0) + /* "function" and "procedure" are equivalent here */ + else if (strcmp(defel->defname, "function") == 0) functionName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "commutator") == 0) + else if (strcmp(defel->defname, "procedure") == 0) + functionName = defGetQualifiedName(defel); + else if (strcmp(defel->defname, "commutator") == 0) commutatorName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "negator") == 0) + else if (strcmp(defel->defname, "negator") == 0) negatorName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "restrict") == 0) + else if (strcmp(defel->defname, "restrict") == 0) restrictionName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "join") == 0) + else if (strcmp(defel->defname, "join") == 0) joinName = defGetQualifiedName(defel); - else if (pg_strcasecmp(defel->defname, "hashes") == 0) + else if (strcmp(defel->defname, "hashes") == 0) canHash = defGetBoolean(defel); - else if (pg_strcasecmp(defel->defname, "merges") == 0) + else if (strcmp(defel->defname, "merges") == 0) canMerge = defGetBoolean(defel); /* These obsolete options are taken as meaning canMerge */ - else if (pg_strcasecmp(defel->defname, "sort1") == 0) + else if (strcmp(defel->defname, "sort1") == 0) canMerge = true; - else if (pg_strcasecmp(defel->defname, "sort2") == 0) + else if (strcmp(defel->defname, "sort2") == 0) canMerge = true; - else if (pg_strcasecmp(defel->defname, "ltcmp") == 0) + else if (strcmp(defel->defname, "ltcmp") == 0) canMerge = true; - else if (pg_strcasecmp(defel->defname, "gtcmp") == 0) + else if (strcmp(defel->defname, "gtcmp") == 0) canMerge = true; else { @@ -160,7 +162,7 @@ DefineOperator(List *names, List *parameters) if (functionName == NIL) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("operator procedure must be specified"))); + errmsg("operator function must be specified"))); /* Transform type names to type OIDs */ if (typeName1) @@ -215,7 +217,7 @@ DefineOperator(List *names, List *parameters) */ aclresult = pg_proc_aclcheck(functionOid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, NameListToString(functionName)); rettype = get_func_rettype(functionOid); @@ -246,8 +248,8 @@ DefineOperator(List *names, List *parameters) functionOid, /* function for operator */ commutatorName, /* optional commutator operator name */ negatorName, /* optional negator operator name */ - restrictionOid, /* optional restrict. sel. procedure */ - joinOid, /* optional join sel. procedure name */ + restrictionOid, /* optional restrict. sel. function */ + joinOid, /* optional join sel. function name */ canMerge, /* operator merges */ canHash); /* operator hashes */ } @@ -281,7 +283,7 @@ ValidateRestrictionEstimator(List *restrictionName) /* Require EXECUTE rights for the estimator */ aclresult = pg_proc_aclcheck(restrictionOid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, NameListToString(restrictionName)); return restrictionOid; @@ -327,7 +329,7 @@ ValidateJoinEstimator(List *joinName) /* Require EXECUTE rights for the estimator */ aclresult = pg_proc_aclcheck(joinOid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, NameListToString(joinName)); return joinOid; @@ -394,10 +396,10 @@ AlterOperator(AlterOperatorStmt *stmt) Datum values[Natts_pg_operator]; bool nulls[Natts_pg_operator]; bool replaces[Natts_pg_operator]; - List *restrictionName = NIL; /* optional restrict. sel. procedure */ + List *restrictionName = NIL; /* optional restrict. sel. function */ bool updateRestriction = false; Oid restrictionOid; - List *joinName = NIL; /* optional join sel. procedure */ + List *joinName = NIL; /* optional join sel. function */ bool updateJoin = false; Oid joinOid; @@ -420,12 +422,12 @@ AlterOperator(AlterOperatorStmt *stmt) else param = defGetQualifiedName(defel); - if (pg_strcasecmp(defel->defname, "restrict") == 0) + if (strcmp(defel->defname, "restrict") == 0) { restrictionName = param; updateRestriction = true; } - else if (pg_strcasecmp(defel->defname, "join") == 0) + else if (strcmp(defel->defname, "join") == 0) { joinName = param; updateJoin = true; @@ -435,13 +437,14 @@ AlterOperator(AlterOperatorStmt *stmt) * The rest of the options that CREATE accepts cannot be changed. * Check for them so that we can give a meaningful error message. */ - else if (pg_strcasecmp(defel->defname, "leftarg") == 0 || - pg_strcasecmp(defel->defname, "rightarg") == 0 || - pg_strcasecmp(defel->defname, "procedure") == 0 || - pg_strcasecmp(defel->defname, "commutator") == 0 || - pg_strcasecmp(defel->defname, "negator") == 0 || - pg_strcasecmp(defel->defname, "hashes") == 0 || - pg_strcasecmp(defel->defname, "merges") == 0) + else if (strcmp(defel->defname, "leftarg") == 0 || + strcmp(defel->defname, "rightarg") == 0 || + strcmp(defel->defname, "function") == 0 || + strcmp(defel->defname, "procedure") == 0 || + strcmp(defel->defname, "commutator") == 0 || + strcmp(defel->defname, "negator") == 0 || + strcmp(defel->defname, "hashes") == 0 || + strcmp(defel->defname, "merges") == 0) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), @@ -457,7 +460,7 @@ AlterOperator(AlterOperatorStmt *stmt) /* Check permissions. Must be owner. */ if (!pg_oper_ownercheck(oprId, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_OPER, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_OPERATOR, NameStr(oprForm->oprname)); /* diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index 9ced4ee34c..2fd17b24b9 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -3,7 +3,7 @@ * policy.c * Commands for manipulating policies. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/commands/policy.c @@ -78,7 +78,7 @@ RangeVarCallbackForPolicy(const RangeVar *rv, Oid relid, Oid oldrelid, /* Must own relation. */ if (!pg_class_ownercheck(relid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, rv->relname); + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relid)), rv->relname); /* No system table modifications unless explicitly allowed. */ if (!allowSystemTableMods && IsSystemClass(relid, classform)) @@ -214,6 +214,9 @@ RelationBuildRowSecurity(Relation relation) SysScanDesc sscan; HeapTuple tuple; + MemoryContextCopyAndSetIdentifier(rscxt, + RelationGetRelationName(relation)); + rsdesc = MemoryContextAllocZero(rscxt, sizeof(RowSecurityDesc)); rsdesc->rscxt = rscxt; @@ -564,7 +567,9 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id) qual_expr = stringToNode(qual_value); /* Add this rel to the parsestate's rangetable, for dependencies */ - addRangeTableEntryForRelation(qual_pstate, rel, NULL, false, false); + addRangeTableEntryForRelation(qual_pstate, rel, + AccessShareLock, + NULL, false, false); qual_parse_rtable = qual_pstate->p_rtable; free_parsestate(qual_pstate); @@ -586,8 +591,9 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id) with_check_qual = stringToNode(with_check_value); /* Add this rel to the parsestate's rangetable, for dependencies */ - addRangeTableEntryForRelation(with_check_pstate, rel, NULL, false, - false); + addRangeTableEntryForRelation(with_check_pstate, rel, + AccessShareLock, + NULL, false, false); with_check_parse_rtable = with_check_pstate->p_rtable; free_parsestate(with_check_pstate); @@ -740,7 +746,7 @@ CreatePolicy(CreatePolicyStmt *stmt) /* Get id of table. Also handles permissions checks. */ table_id = RangeVarGetRelidExtended(stmt->table, AccessExclusiveLock, - false, false, + 0, RangeVarCallbackForPolicy, (void *) stmt); @@ -749,11 +755,13 @@ CreatePolicy(CreatePolicyStmt *stmt) /* Add for the regular security quals */ rte = addRangeTableEntryForRelation(qual_pstate, target_table, + AccessShareLock, NULL, false, false); addRTEtoQuery(qual_pstate, rte, false, true, true); /* Add for the with-check quals */ rte = addRangeTableEntryForRelation(with_check_pstate, target_table, + AccessShareLock, NULL, false, false); addRTEtoQuery(with_check_pstate, rte, false, true, true); @@ -912,7 +920,7 @@ AlterPolicy(AlterPolicyStmt *stmt) /* Get id of table. Also handles permissions checks. */ table_id = RangeVarGetRelidExtended(stmt->table, AccessExclusiveLock, - false, false, + 0, RangeVarCallbackForPolicy, (void *) stmt); @@ -925,6 +933,7 @@ AlterPolicy(AlterPolicyStmt *stmt) ParseState *qual_pstate = make_parsestate(NULL); rte = addRangeTableEntryForRelation(qual_pstate, target_table, + AccessShareLock, NULL, false, false); addRTEtoQuery(qual_pstate, rte, false, true, true); @@ -947,6 +956,7 @@ AlterPolicy(AlterPolicyStmt *stmt) ParseState *with_check_pstate = make_parsestate(NULL); rte = addRangeTableEntryForRelation(with_check_pstate, target_table, + AccessShareLock, NULL, false, false); addRTEtoQuery(with_check_pstate, rte, false, true, true); @@ -1093,8 +1103,9 @@ AlterPolicy(AlterPolicyStmt *stmt) qual = stringToNode(qual_value); /* Add this rel to the parsestate's rangetable, for dependencies */ - addRangeTableEntryForRelation(qual_pstate, target_table, NULL, - false, false); + addRangeTableEntryForRelation(qual_pstate, target_table, + AccessShareLock, + NULL, false, false); qual_parse_rtable = qual_pstate->p_rtable; free_parsestate(qual_pstate); @@ -1134,8 +1145,9 @@ AlterPolicy(AlterPolicyStmt *stmt) with_check_qual = stringToNode(with_check_value); /* Add this rel to the parsestate's rangetable, for dependencies */ - addRangeTableEntryForRelation(with_check_pstate, target_table, NULL, - false, false); + addRangeTableEntryForRelation(with_check_pstate, target_table, + AccessShareLock, + NULL, false, false); with_check_parse_rtable = with_check_pstate->p_rtable; free_parsestate(with_check_pstate); @@ -1212,7 +1224,7 @@ rename_policy(RenameStmt *stmt) /* Get id of table. Also handles permissions checks. */ table_id = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock, - false, false, + 0, RangeVarCallbackForPolicy, (void *) stmt); diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c index 46369cf3db..568499761f 100644 --- a/src/backend/commands/portalcmds.c +++ b/src/backend/commands/portalcmds.c @@ -9,7 +9,7 @@ * storage management for portals (but doesn't run any queries in them). * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -63,7 +63,7 @@ PerformCursorOpen(DeclareCursorStmt *cstmt, ParamListInfo params, * user-visible effect). */ if (!(cstmt->options & CURSOR_OPT_HOLD)) - RequireTransactionChain(isTopLevel, "DECLARE CURSOR"); + RequireTransactionBlock(isTopLevel, "DECLARE CURSOR"); /* * Parse analysis was done already, but we still have to run the rule @@ -96,7 +96,7 @@ PerformCursorOpen(DeclareCursorStmt *cstmt, ParamListInfo params, */ portal = CreatePortal(cstmt->portalname, false, false); - oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal)); + oldContext = MemoryContextSwitchTo(portal->portalContext); plan = copyObject(plan); @@ -277,7 +277,7 @@ PortalCleanup(Portal portal) * since other mechanisms will take care of releasing executor resources, * and we can't be sure that ExecutorEnd itself wouldn't fail. */ - queryDesc = PortalGetQueryDesc(portal); + queryDesc = portal->queryDesc; if (queryDesc) { /* @@ -294,21 +294,13 @@ PortalCleanup(Portal portal) /* We must make the portal's resource owner current */ saveResourceOwner = CurrentResourceOwner; - PG_TRY(); - { - if (portal->resowner) - CurrentResourceOwner = portal->resowner; - ExecutorFinish(queryDesc); - ExecutorEnd(queryDesc); - FreeQueryDesc(queryDesc); - } - PG_CATCH(); - { - /* Ensure CurrentResourceOwner is restored on error */ - CurrentResourceOwner = saveResourceOwner; - PG_RE_THROW(); - } - PG_END_TRY(); + if (portal->resowner) + CurrentResourceOwner = portal->resowner; + + ExecutorFinish(queryDesc); + ExecutorEnd(queryDesc); + FreeQueryDesc(queryDesc); + CurrentResourceOwner = saveResourceOwner; } } @@ -325,7 +317,7 @@ PortalCleanup(Portal portal) void PersistHoldablePortal(Portal portal) { - QueryDesc *queryDesc = PortalGetQueryDesc(portal); + QueryDesc *queryDesc = portal->queryDesc; Portal saveActivePortal; ResourceOwner saveResourceOwner; MemoryContext savePortalContext; @@ -371,7 +363,7 @@ PersistHoldablePortal(Portal portal) ActivePortal = portal; if (portal->resowner) CurrentResourceOwner = portal->resowner; - PortalContext = PortalGetHeapMemory(portal); + PortalContext = portal->portalContext; MemoryContextSwitchTo(PortalContext); @@ -397,7 +389,7 @@ PersistHoldablePortal(Portal portal) /* Fetch the result set into the tuplestore */ ExecutorRun(queryDesc, ForwardScanDirection, 0L, false); - (*queryDesc->dest->rDestroy) (queryDesc->dest); + queryDesc->dest->rDestroy(queryDesc->dest); queryDesc->dest = NULL; /* @@ -458,10 +450,10 @@ PersistHoldablePortal(Portal portal) PopActiveSnapshot(); /* - * We can now release any subsidiary memory of the portal's heap context; - * we'll never use it again. The executor already dropped its context, - * but this will clean up anything that glommed onto the portal's heap via + * We can now release any subsidiary memory of the portal's context; we'll + * never use it again. The executor already dropped its context, but this + * will clean up anything that glommed onto the portal's context via * PortalContext. */ - MemoryContextDeleteChildren(PortalGetHeapMemory(portal)); + MemoryContextDeleteChildren(portal->portalContext); } diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index be7222f003..b945b1556a 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -7,7 +7,7 @@ * accessed via the extended FE/BE query protocol. * * - * Copyright (c) 2002-2017, PostgreSQL Global Development Group + * Copyright (c) 2002-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/commands/prepare.c @@ -239,7 +239,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause, portal->visible = false; /* Copy the plan's saved query string into the portal's memory */ - query_string = MemoryContextStrdup(PortalGetHeapMemory(portal), + query_string = MemoryContextStrdup(portal->portalContext, entry->plansource->query_string); /* Replan if needed, and increment plan refcount for portal */ @@ -399,10 +399,11 @@ EvaluateParams(PreparedStatement *pstmt, List *params, /* we have static list of params, so no hooks needed */ paramLI->paramFetch = NULL; paramLI->paramFetchArg = NULL; + paramLI->paramCompile = NULL; + paramLI->paramCompileArg = NULL; paramLI->parserSetup = NULL; paramLI->parserSetupArg = NULL; paramLI->numParams = num_params; - paramLI->paramMask = NULL; i = 0; foreach(l, exprstates) diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c index 9d2d43fe6b..c900ad9431 100644 --- a/src/backend/commands/proclang.c +++ b/src/backend/commands/proclang.c @@ -3,7 +3,7 @@ * proclang.c * PostgreSQL PROCEDURAL LANGUAGE support code. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -24,7 +24,6 @@ #include "catalog/pg_namespace.h" #include "catalog/pg_pltemplate.h" #include "catalog/pg_proc.h" -#include "catalog/pg_proc_fn.h" #include "catalog/pg_type.h" #include "commands/dbcommands.h" #include "commands/defrem.h" @@ -97,7 +96,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) errmsg("must be superuser to create procedural language \"%s\"", stmt->plname))); if (!pg_database_ownercheck(MyDatabaseId, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE, get_database_name(MyDatabaseId)); } @@ -129,8 +128,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) F_FMGR_C_VALIDATOR, pltemplate->tmplhandler, pltemplate->tmpllibrary, - false, /* isAgg */ - false, /* isWindowFunc */ + PROKIND_FUNCTION, false, /* security_definer */ false, /* isLeakProof */ false, /* isStrict */ @@ -169,8 +167,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) F_FMGR_C_VALIDATOR, pltemplate->tmplinline, pltemplate->tmpllibrary, - false, /* isAgg */ - false, /* isWindowFunc */ + PROKIND_FUNCTION, false, /* security_definer */ false, /* isLeakProof */ true, /* isStrict */ @@ -212,8 +209,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) F_FMGR_C_VALIDATOR, pltemplate->tmplvalidator, pltemplate->tmpllibrary, - false, /* isAgg */ - false, /* isWindowFunc */ + PROKIND_FUNCTION, false, /* security_definer */ false, /* isLeakProof */ true, /* isStrict */ @@ -366,7 +362,7 @@ create_proc_lang(const char *languageName, bool replace, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("language \"%s\" already exists", languageName))); if (!pg_language_ownercheck(HeapTupleGetOid(oldtup), languageOwner)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_LANGUAGE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_LANGUAGE, languageName); /* @@ -513,7 +509,7 @@ find_language_template(const char *languageName) /* - * This just returns TRUE if we have a valid template for a given language + * This just returns true if we have a valid template for a given language */ bool PLTemplateExists(const char *languageName) diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c index 610cb499d2..6f7762a906 100644 --- a/src/backend/commands/publicationcmds.c +++ b/src/backend/commands/publicationcmds.c @@ -3,7 +3,7 @@ * publicationcmds.c * publication manipulation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -28,7 +28,7 @@ #include "catalog/namespace.h" #include "catalog/objectaccess.h" #include "catalog/objectaddress.h" -#include "catalog/pg_inherits_fn.h" +#include "catalog/pg_inherits.h" #include "catalog/pg_type.h" #include "catalog/pg_publication.h" #include "catalog/pg_publication_rel.h" @@ -62,7 +62,8 @@ parse_publication_options(List *options, bool *publish_given, bool *publish_insert, bool *publish_update, - bool *publish_delete) + bool *publish_delete, + bool *publish_truncate) { ListCell *lc; @@ -72,6 +73,7 @@ parse_publication_options(List *options, *publish_insert = true; *publish_update = true; *publish_delete = true; + *publish_truncate = true; /* Parse options */ foreach(lc, options) @@ -96,6 +98,7 @@ parse_publication_options(List *options, *publish_insert = false; *publish_update = false; *publish_delete = false; + *publish_truncate = false; *publish_given = true; publish = defGetString(defel); @@ -103,7 +106,7 @@ parse_publication_options(List *options, if (!SplitIdentifierString(publish, ',', &publish_list)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("invalid publish list"))); + errmsg("invalid list syntax for \"publish\" option"))); /* Process the option list. */ foreach(lc, publish_list) @@ -116,6 +119,8 @@ parse_publication_options(List *options, *publish_update = true; else if (strcmp(publish_opt, "delete") == 0) *publish_delete = true; + else if (strcmp(publish_opt, "truncate") == 0) + *publish_truncate = true; else ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), @@ -145,12 +150,13 @@ CreatePublication(CreatePublicationStmt *stmt) bool publish_insert; bool publish_update; bool publish_delete; + bool publish_truncate; AclResult aclresult; /* must have CREATE privilege on database */ aclresult = pg_database_aclcheck(MyDatabaseId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_DATABASE, + aclcheck_error(aclresult, OBJECT_DATABASE, get_database_name(MyDatabaseId)); /* FOR ALL TABLES requires superuser */ @@ -181,7 +187,8 @@ CreatePublication(CreatePublicationStmt *stmt) parse_publication_options(stmt->options, &publish_given, &publish_insert, - &publish_update, &publish_delete); + &publish_update, &publish_delete, + &publish_truncate); values[Anum_pg_publication_puballtables - 1] = BoolGetDatum(stmt->for_all_tables); @@ -191,6 +198,8 @@ CreatePublication(CreatePublicationStmt *stmt) BoolGetDatum(publish_update); values[Anum_pg_publication_pubdelete - 1] = BoolGetDatum(publish_delete); + values[Anum_pg_publication_pubtruncate - 1] = + BoolGetDatum(publish_truncate); tup = heap_form_tuple(RelationGetDescr(rel), values, nulls); @@ -237,11 +246,13 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, bool publish_insert; bool publish_update; bool publish_delete; + bool publish_truncate; ObjectAddress obj; parse_publication_options(stmt->options, &publish_given, &publish_insert, - &publish_update, &publish_delete); + &publish_update, &publish_delete, + &publish_truncate); /* Everything ok, form a new tuple. */ memset(values, 0, sizeof(values)); @@ -258,6 +269,9 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel, values[Anum_pg_publication_pubdelete - 1] = BoolGetDatum(publish_delete); replaces[Anum_pg_publication_pubdelete - 1] = true; + + values[Anum_pg_publication_pubtruncate - 1] = BoolGetDatum(publish_truncate); + replaces[Anum_pg_publication_pubtruncate - 1] = true; } tup = heap_modify_tuple(tup, RelationGetDescr(rel), values, nulls, @@ -403,7 +417,7 @@ AlterPublication(AlterPublicationStmt *stmt) /* must be owner */ if (!pg_publication_ownercheck(HeapTupleGetOid(tup), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PUBLICATION, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_PUBLICATION, stmt->pubname); if (stmt->options) @@ -582,7 +596,7 @@ PublicationAddTables(Oid pubid, List *rels, bool if_not_exists, /* Must be owner of the table or superuser. */ if (!pg_class_ownercheck(RelationGetRelid(rel), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(rel->rd_rel->relkind), RelationGetRelationName(rel)); obj = publication_add_relation(pubid, rel, if_not_exists); @@ -649,7 +663,7 @@ AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) /* Must be owner */ if (!pg_publication_ownercheck(HeapTupleGetOid(tup), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PUBLICATION, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_PUBLICATION, NameStr(form->pubname)); /* Must be able to become new owner */ @@ -658,7 +672,7 @@ AlterPublicationOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) /* New owner must have CREATE privilege on database */ aclresult = pg_database_aclcheck(MyDatabaseId, newOwnerId, ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_DATABASE, + aclcheck_error(aclresult, OBJECT_DATABASE, get_database_name(MyDatabaseId)); if (form->puballtables && !superuser_arg(newOwnerId)) diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c index f9ea73f923..dc6cb46e4e 100644 --- a/src/backend/commands/schemacmds.c +++ b/src/backend/commands/schemacmds.c @@ -3,7 +3,7 @@ * schemacmds.c * schema creation/manipulation commands * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -94,7 +94,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString, */ aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_DATABASE, + aclcheck_error(aclresult, OBJECT_DATABASE, get_database_name(MyDatabaseId)); check_is_member_of_role(saved_uid, owner_uid); @@ -265,13 +265,13 @@ RenameSchema(const char *oldname, const char *newname) /* must be owner */ if (!pg_namespace_ownercheck(HeapTupleGetOid(tup), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_NAMESPACE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_SCHEMA, oldname); /* must have CREATE privilege on database */ aclresult = pg_database_aclcheck(MyDatabaseId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_DATABASE, + aclcheck_error(aclresult, OBJECT_DATABASE, get_database_name(MyDatabaseId)); if (!allowSystemTableMods && IsReservedName(newname)) @@ -373,7 +373,7 @@ AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerId) /* Otherwise, must be owner of the existing object */ if (!pg_namespace_ownercheck(HeapTupleGetOid(tup), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_NAMESPACE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_SCHEMA, NameStr(nspForm->nspname)); /* Must be able to become new owner */ @@ -391,7 +391,7 @@ AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerId) aclresult = pg_database_aclcheck(MyDatabaseId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_DATABASE, + aclcheck_error(aclresult, OBJECT_DATABASE, get_database_name(MyDatabaseId)); memset(repl_null, false, sizeof(repl_null)); diff --git a/src/backend/commands/seclabel.c b/src/backend/commands/seclabel.c index 5f16d6cf1c..1ac7756f2a 100644 --- a/src/backend/commands/seclabel.c +++ b/src/backend/commands/seclabel.c @@ -3,7 +3,7 @@ * seclabel.c * routines to support security label feature. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------- @@ -122,7 +122,7 @@ ExecSecLabelStmt(SecLabelStmt *stmt) } /* Provider gets control here, may throw ERROR to veto new label. */ - (*provider->hook) (&address, stmt->label); + provider->hook(&address, stmt->label); /* Apply new label. */ SetSecurityLabel(&address, provider->provider_name, stmt->label); @@ -321,7 +321,7 @@ SetSharedSecurityLabel(const ObjectAddress *object, /* * SetSecurityLabel attempts to set the security label for the specified * provider on the specified object to the given value. NULL means that any - * any existing label should be deleted. + * existing label should be deleted. */ void SetSecurityLabel(const ObjectAddress *object, diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index 62937124ef..6d89925b23 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -3,7 +3,7 @@ * sequence.c * PostgreSQL sequences support code. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -172,7 +172,6 @@ DefineSequence(ParseState *pstate, CreateSeqStmt *seq) coldef->is_local = true; coldef->is_not_null = true; coldef->is_from_type = false; - coldef->is_from_parent = false; coldef->storage = 0; coldef->raw_default = NULL; coldef->cooked_default = NULL; @@ -432,8 +431,7 @@ AlterSequence(ParseState *pstate, AlterSeqStmt *stmt) /* Open and lock sequence, and check for ownership along the way. */ relid = RangeVarGetRelidExtended(stmt->sequence, ShareRowExclusiveLock, - stmt->missing_ok, - false, + stmt->missing_ok ? RVR_MISSING_OK : 0, RangeVarCallbackOwnsRelation, NULL); if (relid == InvalidOid) @@ -1055,18 +1053,10 @@ lock_and_open_sequence(SeqTable seq) ResourceOwner currentOwner; currentOwner = CurrentResourceOwner; - PG_TRY(); - { - CurrentResourceOwner = TopTransactionResourceOwner; - LockRelationOid(seq->relid, RowExclusiveLock); - } - PG_CATCH(); - { - /* Ensure CurrentResourceOwner is restored on error */ - CurrentResourceOwner = currentOwner; - PG_RE_THROW(); - } - PG_END_TRY(); + CurrentResourceOwner = TopTransactionResourceOwner; + + LockRelationOid(seq->relid, RowExclusiveLock); + CurrentResourceOwner = currentOwner; /* Flag that we have a lock in the current xact */ @@ -1760,12 +1750,19 @@ sequence_options(Oid relid) elog(ERROR, "cache lookup failed for sequence %u", relid); pgsform = (Form_pg_sequence) GETSTRUCT(pgstuple); - options = lappend(options, makeDefElem("cache", (Node *) makeInteger(pgsform->seqcache), -1)); - options = lappend(options, makeDefElem("cycle", (Node *) makeInteger(pgsform->seqcycle), -1)); - options = lappend(options, makeDefElem("increment", (Node *) makeInteger(pgsform->seqincrement), -1)); - options = lappend(options, makeDefElem("maxvalue", (Node *) makeInteger(pgsform->seqmax), -1)); - options = lappend(options, makeDefElem("minvalue", (Node *) makeInteger(pgsform->seqmin), -1)); - options = lappend(options, makeDefElem("start", (Node *) makeInteger(pgsform->seqstart), -1)); + /* Use makeFloat() for 64-bit integers, like gram.y does. */ + options = lappend(options, + makeDefElem("cache", (Node *) makeFloat(psprintf(INT64_FORMAT, pgsform->seqcache)), -1)); + options = lappend(options, + makeDefElem("cycle", (Node *) makeInteger(pgsform->seqcycle), -1)); + options = lappend(options, + makeDefElem("increment", (Node *) makeFloat(psprintf(INT64_FORMAT, pgsform->seqincrement)), -1)); + options = lappend(options, + makeDefElem("maxvalue", (Node *) makeFloat(psprintf(INT64_FORMAT, pgsform->seqmax)), -1)); + options = lappend(options, + makeDefElem("minvalue", (Node *) makeFloat(psprintf(INT64_FORMAT, pgsform->seqmin)), -1)); + options = lappend(options, + makeDefElem("start", (Node *) makeFloat(psprintf(INT64_FORMAT, pgsform->seqstart)), -1)); ReleaseSysCache(pgstuple); @@ -1941,7 +1938,7 @@ ResetSequenceCaches(void) void seq_mask(char *page, BlockNumber blkno) { - mask_page_lsn(page); + mask_page_lsn_and_checksum(page); mask_unused_space(page); } diff --git a/src/backend/commands/statscmds.c b/src/backend/commands/statscmds.c index 476505512b..3bb0d24cd2 100644 --- a/src/backend/commands/statscmds.c +++ b/src/backend/commands/statscmds.c @@ -3,7 +3,7 @@ * statscmds.c * Commands for creating and altering extended statistics objects * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -20,6 +20,7 @@ #include "catalog/namespace.h" #include "catalog/pg_namespace.h" #include "catalog/pg_statistic_ext.h" +#include "commands/comment.h" #include "commands/defrem.h" #include "miscadmin.h" #include "statistics/statistics.h" @@ -31,6 +32,11 @@ #include "utils/typcache.h" +static char *ChooseExtendedStatisticName(const char *name1, const char *name2, + const char *label, Oid namespaceid); +static char *ChooseExtendedStatisticNameAddition(List *exprs); + + /* qsort comparator for the attnums in CreateStatistics */ static int compare_int16(const void *a, const void *b) @@ -75,31 +81,6 @@ CreateStatistics(CreateStatsStmt *stmt) Assert(IsA(stmt, CreateStatsStmt)); - /* resolve the pieces of the name (namespace etc.) */ - namespaceId = QualifiedNameGetCreationNamespace(stmt->defnames, &namestr); - namestrcpy(&stxname, namestr); - - /* - * Deal with the possibility that the statistics object already exists. - */ - if (SearchSysCacheExists2(STATEXTNAMENSP, - NameGetDatum(&stxname), - ObjectIdGetDatum(namespaceId))) - { - if (stmt->if_not_exists) - { - ereport(NOTICE, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("statistics object \"%s\" already exists, skipping", - namestr))); - return InvalidObjectAddress; - } - - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("statistics object \"%s\" already exists", namestr))); - } - /* * Examine the FROM clause. Currently, we only allow it to be a single * simple table, but later we'll probably allow multiple tables and JOIN @@ -141,13 +122,55 @@ CreateStatistics(CreateStatsStmt *stmt) /* You must own the relation to create stats on it */ if (!pg_class_ownercheck(RelationGetRelid(rel), stxowner)) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(rel->rd_rel->relkind), RelationGetRelationName(rel)); } Assert(rel); relid = RelationGetRelid(rel); + /* + * If the node has a name, split it up and determine creation namespace. + * If not (a possibility not considered by the grammar, but one which can + * occur via the "CREATE TABLE ... (LIKE)" command), then we put the + * object in the same namespace as the relation, and cons up a name for + * it. + */ + if (stmt->defnames) + namespaceId = QualifiedNameGetCreationNamespace(stmt->defnames, + &namestr); + else + { + namespaceId = RelationGetNamespace(rel); + namestr = ChooseExtendedStatisticName(RelationGetRelationName(rel), + ChooseExtendedStatisticNameAddition(stmt->exprs), + "stat", + namespaceId); + } + namestrcpy(&stxname, namestr); + + /* + * Deal with the possibility that the statistics object already exists. + */ + if (SearchSysCacheExists2(STATEXTNAMENSP, + CStringGetDatum(namestr), + ObjectIdGetDatum(namespaceId))) + { + if (stmt->if_not_exists) + { + ereport(NOTICE, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("statistics object \"%s\" already exists, skipping", + namestr))); + relation_close(rel, NoLock); + return InvalidObjectAddress; + } + + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("statistics object \"%s\" already exists", namestr))); + } + /* * Currently, we only allow simple column references in the expression * list. That will change someday, and again the grammar already supports @@ -180,7 +203,7 @@ CreateStatistics(CreateStatsStmt *stmt) if (!HeapTupleIsValid(atttuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" referenced in statistics does not exist", + errmsg("column \"%s\" does not exist", attname))); attForm = (Form_pg_attribute) GETSTRUCT(atttuple); @@ -195,8 +218,8 @@ CreateStatistics(CreateStatsStmt *stmt) if (type->lt_opr == InvalidOid) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("column \"%s\" cannot be used in statistics because its type has no default btree operator class", - attname))); + errmsg("column \"%s\" cannot be used in statistics because its type %s has no default btree operator class", + attname, format_type_be(attForm->atttypid)))); /* Make sure no more than STATS_MAX_DIMENSIONS columns are used */ if (numcols >= STATS_MAX_DIMENSIONS) @@ -242,7 +265,7 @@ CreateStatistics(CreateStatsStmt *stmt) stxkeys = buildint2vector(attnums, numcols); /* - * Parse the statistics types. + * Parse the statistics kinds. */ build_ndistinct = false; build_dependencies = false; @@ -263,7 +286,7 @@ CreateStatistics(CreateStatsStmt *stmt) else ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("unrecognized statistic type \"%s\"", + errmsg("unrecognized statistics kind \"%s\"", type))); } /* If no statistic type was specified, build them all. */ @@ -340,6 +363,11 @@ CreateStatistics(CreateStatsStmt *stmt) * STATISTICS, which is more work than it seems worth. */ + /* Add any requested comment */ + if (stmt->stxcomment != NULL) + CreateComments(statoid, StatisticExtRelationId, 0, + stmt->stxcomment); + /* Return stats object's address */ return myself; } @@ -405,3 +433,94 @@ UpdateStatisticsForTypeChange(Oid statsOid, Oid relationOid, int attnum, * Future types of extended stats will likely require us to work harder. */ } + +/* + * Select a nonconflicting name for a new statistics. + * + * name1, name2, and label are used the same way as for makeObjectName(), + * except that the label can't be NULL; digits will be appended to the label + * if needed to create a name that is unique within the specified namespace. + * + * Returns a palloc'd string. + * + * Note: it is theoretically possible to get a collision anyway, if someone + * else chooses the same name concurrently. This is fairly unlikely to be + * a problem in practice, especially if one is holding a share update + * exclusive lock on the relation identified by name1. However, if choosing + * multiple names within a single command, you'd better create the new object + * and do CommandCounterIncrement before choosing the next one! + */ +static char * +ChooseExtendedStatisticName(const char *name1, const char *name2, + const char *label, Oid namespaceid) +{ + int pass = 0; + char *stxname = NULL; + char modlabel[NAMEDATALEN]; + + /* try the unmodified label first */ + StrNCpy(modlabel, label, sizeof(modlabel)); + + for (;;) + { + Oid existingstats; + + stxname = makeObjectName(name1, name2, modlabel); + + existingstats = GetSysCacheOid2(STATEXTNAMENSP, + PointerGetDatum(stxname), + ObjectIdGetDatum(namespaceid)); + if (!OidIsValid(existingstats)) + break; + + /* found a conflict, so try a new name component */ + pfree(stxname); + snprintf(modlabel, sizeof(modlabel), "%s%d", label, ++pass); + } + + return stxname; +} + +/* + * Generate "name2" for a new statistics given the list of column names for it + * This will be passed to ChooseExtendedStatisticName along with the parent + * table name and a suitable label. + * + * We know that less than NAMEDATALEN characters will actually be used, + * so we can truncate the result once we've generated that many. + * + * XXX see also ChooseIndexNameAddition. + */ +static char * +ChooseExtendedStatisticNameAddition(List *exprs) +{ + char buf[NAMEDATALEN * 2]; + int buflen = 0; + ListCell *lc; + + buf[0] = '\0'; + foreach(lc, exprs) + { + ColumnRef *cref = (ColumnRef *) lfirst(lc); + const char *name; + + /* It should be one of these, but just skip if it happens not to be */ + if (!IsA(cref, ColumnRef)) + continue; + + name = strVal((Value *) linitial(cref->fields)); + + if (buflen > 0) + buf[buflen++] = '_'; /* insert _ between names */ + + /* + * At this point we have buflen <= NAMEDATALEN. name should be less + * than NAMEDATALEN already, but use strlcpy for paranoia. + */ + strlcpy(buf + buflen, name, NAMEDATALEN); + buflen += strlen(buf + buflen); + if (buflen >= NAMEDATALEN) + break; + } + return pstrdup(buf); +} diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c index 9bc1d178fc..f138e61a8d 100644 --- a/src/backend/commands/subscriptioncmds.c +++ b/src/backend/commands/subscriptioncmds.c @@ -3,7 +3,7 @@ * subscriptioncmds.c * subscription catalog manipulation functions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -244,7 +244,7 @@ parse_subscription_options(List *options, bool *connect, bool *enabled_given, } /* - * Auxiliary function to return a text array out of a list of String nodes. + * Auxiliary function to build a text array out of a list of String nodes. */ static Datum publicationListToArray(List *publist) @@ -259,12 +259,11 @@ publicationListToArray(List *publist) /* Create memory context for temporary allocations. */ memcxt = AllocSetContextCreate(CurrentMemoryContext, "publicationListToArray to array", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(memcxt); - datums = palloc(sizeof(text *) * list_length(publist)); + datums = (Datum *) palloc(sizeof(Datum) * list_length(publist)); + foreach(cell, publist) { char *name = strVal(lfirst(cell)); @@ -275,7 +274,7 @@ publicationListToArray(List *publist) { char *pname = strVal(lfirst(pcell)); - if (name == pname) + if (pcell == cell) break; if (strcmp(name, pname) == 0) @@ -292,6 +291,7 @@ publicationListToArray(List *publist) arr = construct_array(datums, list_length(publist), TEXTOID, -1, false, 'i'); + MemoryContextDelete(memcxt); return PointerGetDatum(arr); @@ -339,7 +339,7 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel) * replication slot. */ if (create_slot) - PreventTransactionChain(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)"); + PreventInTransactionBlock(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)"); if (!superuser()) ereport(ERROR, @@ -450,8 +450,8 @@ CreateSubscription(CreateSubscriptionStmt *stmt, bool isTopLevel) CheckSubscriptionRelkind(get_rel_relkind(relid), rv->schemaname, rv->relname); - SetSubscriptionRelState(subid, relid, table_state, - InvalidXLogRecPtr, false); + AddSubscriptionRelState(subid, relid, table_state, + InvalidXLogRecPtr); } /* @@ -569,9 +569,9 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data) if (!bsearch(&relid, subrel_local_oids, list_length(subrel_states), sizeof(Oid), oid_cmp)) { - SetSubscriptionRelState(sub->oid, relid, + AddSubscriptionRelState(sub->oid, relid, copy_data ? SUBREL_STATE_INIT : SUBREL_STATE_READY, - InvalidXLogRecPtr, false); + InvalidXLogRecPtr); ereport(DEBUG1, (errmsg("table \"%s.%s\" added to subscription \"%s\"", rv->schemaname, rv->relname, sub->name))); @@ -635,7 +635,7 @@ AlterSubscription(AlterSubscriptionStmt *stmt) /* must be owner */ if (!pg_subscription_ownercheck(HeapTupleGetOid(tup), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_SUBSCRIPTION, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_SUBSCRIPTION, stmt->subname); subid = HeapTupleGetOid(tup); @@ -854,7 +854,7 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) /* must be owner */ if (!pg_subscription_ownercheck(subid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_SUBSCRIPTION, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_SUBSCRIPTION, stmt->subname); /* DROP hook for the subscription being removed */ @@ -897,7 +897,7 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) * don't have the proper facilities for that. */ if (slotname) - PreventTransactionChain(isTopLevel, "DROP SUBSCRIPTION"); + PreventInTransactionBlock(isTopLevel, "DROP SUBSCRIPTION"); ObjectAddressSet(myself, SubscriptionRelationId, subid); @@ -909,9 +909,17 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) ReleaseSysCache(tup); /* - * If we are dropping the replication slot, stop all the subscription - * workers immediately, so that the slot becomes accessible. Otherwise - * just schedule the stopping for the end of the transaction. + * Stop all the subscription workers immediately. + * + * This is necessary if we are dropping the replication slot, so that the + * slot becomes accessible. + * + * It is also necessary if the subscription is disabled and was disabled + * in the same transaction. Then the workers haven't seen the disabling + * yet and will still be running, leading to hangs later when we want to + * drop the replication origin. If the subscription was disabled before + * this transaction, then there shouldn't be any workers left, so this + * won't make a difference. * * New workers won't be started because we hold an exclusive lock on the * subscription till the end of the transaction. @@ -923,10 +931,7 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) { LogicalRepWorker *w = (LogicalRepWorker *) lfirst(lc); - if (slotname) - logicalrep_worker_stop(w->subid, w->relid); - else - logicalrep_worker_stop_at_commit(w->subid, w->relid); + logicalrep_worker_stop(w->subid, w->relid); } list_free(subworkers); @@ -959,7 +964,7 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) load_file("libpqwalreceiver", false); initStringInfo(&cmd); - appendStringInfo(&cmd, "DROP_REPLICATION_SLOT %s", quote_identifier(slotname)); + appendStringInfo(&cmd, "DROP_REPLICATION_SLOT %s WAIT", quote_identifier(slotname)); wrconn = walrcv_connect(conninfo, true, subname, &err); if (wrconn == NULL) @@ -1017,7 +1022,7 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId) return; if (!pg_subscription_ownercheck(HeapTupleGetOid(tup), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_SUBSCRIPTION, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_SUBSCRIPTION, NameStr(form->subname)); /* New owner must be a superuser */ diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 513a9ec485..82989158ee 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -3,7 +3,7 @@ * tablecmds.c * Commands for creating and altering table structures and settings * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -16,6 +16,7 @@ #include "access/genam.h" #include "access/heapam.h" +#include "access/heapam_xlog.h" #include "access/multixact.h" #include "access/reloptions.h" #include "access/relscan.h" @@ -34,17 +35,14 @@ #include "catalog/pg_am.h" #include "catalog/pg_collation.h" #include "catalog/pg_constraint.h" -#include "catalog/pg_constraint_fn.h" #include "catalog/pg_depend.h" #include "catalog/pg_foreign_table.h" #include "catalog/pg_inherits.h" -#include "catalog/pg_inherits_fn.h" #include "catalog/pg_namespace.h" #include "catalog/pg_opclass.h" #include "catalog/pg_tablespace.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" -#include "catalog/pg_type_fn.h" #include "catalog/storage.h" #include "catalog/storage_xlog.h" #include "catalog/toasting.h" @@ -79,6 +77,7 @@ #include "parser/parse_type.h" #include "parser/parse_utilcmd.h" #include "parser/parser.h" +#include "partitioning/partbounds.h" #include "pgstat.h" #include "rewrite/rewriteDefine.h" #include "rewrite/rewriteHandler.h" @@ -94,6 +93,7 @@ #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/memutils.h" +#include "utils/partcache.h" #include "utils/relcache.h" #include "utils/ruleutils.h" #include "utils/snapmgr.h" @@ -168,6 +168,8 @@ typedef struct AlteredTableInfo bool chgPersistence; /* T if SET LOGGED/UNLOGGED is used */ char newrelpersistence; /* if above is true */ Expr *partition_constraint; /* for attach partition validation */ + /* true, if validating default due to some other attach/detach */ + bool validate_default; /* Objects to rebuild after completing ALTER TYPE operations */ List *changedConstraintOids; /* OIDs of constraints to rebuild */ List *changedConstraintDefs; /* string definitions of same */ @@ -264,6 +266,12 @@ static const struct dropmsgstrings dropmsgstringarray[] = { gettext_noop("table \"%s\" does not exist, skipping"), gettext_noop("\"%s\" is not a table"), gettext_noop("Use DROP TABLE to remove a table.")}, + {RELKIND_PARTITIONED_INDEX, + ERRCODE_UNDEFINED_OBJECT, + gettext_noop("index \"%s\" does not exist"), + gettext_noop("index \"%s\" does not exist, skipping"), + gettext_noop("\"%s\" is not an index"), + gettext_noop("Use DROP INDEX to remove an index.")}, {'\0', 0, NULL, NULL, NULL, NULL} }; @@ -282,6 +290,7 @@ struct DropRelationCallbackState #define ATT_INDEX 0x0008 #define ATT_COMPOSITE_TYPE 0x0010 #define ATT_FOREIGN_TABLE 0x0020 +#define ATT_PARTITIONED_INDEX 0x0040 /* * Partition tables are expected to be dropped when the parent partitioned @@ -291,7 +300,10 @@ struct DropRelationCallbackState #define child_dependency_type(child_is_partition) \ ((child_is_partition) ? DEPENDENCY_AUTO : DEPENDENCY_NORMAL) -static void truncate_check_rel(Relation rel); +static void truncate_check_rel(Oid relid, Form_pg_class reltuple); +static void truncate_check_activity(Relation rel); +static void RangeVarCallbackForTruncate(const RangeVar *relation, + Oid relId, Oid oldRelId, void *arg); static List *MergeAttributes(List *schema, List *supers, char relpersistence, bool is_partition, List **supOids, List **supconstr, int *supOidCount); @@ -301,7 +313,7 @@ static void MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel static void StoreCatalogInheritance(Oid relationId, List *supers, bool child_is_partition); static void StoreCatalogInheritance1(Oid relationId, Oid parentOid, - int16 seqNumber, Relation inhRelation, + int32 seqNumber, Relation inhRelation, bool child_is_partition); static int findAttrByName(const char *attributeName, List *schema); static void AlterIndexNamespaces(Relation classRel, Relation rel, @@ -329,9 +341,6 @@ static void validateCheckConstraint(Relation rel, HeapTuple constrtup); static void validateForeignKeyConstraint(char *conname, Relation rel, Relation pkrel, Oid pkindOid, Oid constraintOid); -static void createForeignKeyTriggers(Relation rel, Oid refRelOid, - Constraint *fkconstraint, - Oid constraintOid, Oid indexOid); static void ATController(AlterTableStmt *parsetree, Relation rel, List *cmds, bool recurse, LOCKMODE lockmode); static void ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, @@ -375,9 +384,9 @@ static ObjectAddress ATExecAddIdentity(Relation rel, const char *colName, static ObjectAddress ATExecSetIdentity(Relation rel, const char *colName, Node *def, LOCKMODE lockmode); static ObjectAddress ATExecDropIdentity(Relation rel, const char *colName, bool missing_ok, LOCKMODE lockmode); -static void ATPrepSetStatistics(Relation rel, const char *colName, +static void ATPrepSetStatistics(Relation rel, const char *colName, int16 colNum, Node *newValue, LOCKMODE lockmode); -static ObjectAddress ATExecSetStatistics(Relation rel, const char *colName, +static ObjectAddress ATExecSetStatistics(Relation rel, const char *colName, int16 colNum, Node *newValue, LOCKMODE lockmode); static ObjectAddress ATExecSetOptions(Relation rel, const char *colName, Node *options, bool isReset, LOCKMODE lockmode); @@ -402,8 +411,10 @@ static ObjectAddress ATAddCheckConstraint(List **wqueue, Constraint *constr, bool recurse, bool recursing, bool is_readd, LOCKMODE lockmode); -static ObjectAddress ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, - Constraint *fkconstraint, LOCKMODE lockmode); +static ObjectAddress ATAddForeignKeyConstraint(List **wqueue, AlteredTableInfo *tab, + Relation rel, Constraint *fkconstraint, Oid parentConstr, + bool recurse, bool recursing, + LOCKMODE lockmode); static void ATExecDropConstraint(Relation rel, const char *constrName, DropBehavior behavior, bool recurse, bool recursing, @@ -423,7 +434,8 @@ static void ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, List **wqueue, LOCKMODE lockmode, bool rewrite); static void RebuildConstraintComment(AlteredTableInfo *tab, int pass, - Oid objid, Relation rel, char *conname); + Oid objid, Relation rel, List *domname, + const char *conname); static void TryReuseIndex(Oid oldId, IndexStmt *stmt); static void TryReuseForeignKey(Oid oldId, Constraint *con); static void change_owner_fix_column_acls(Oid relationOid, @@ -435,14 +447,15 @@ static ObjectAddress ATExecClusterOn(Relation rel, const char *indexName, static void ATExecDropCluster(Relation rel, LOCKMODE lockmode); static bool ATPrepChangePersistence(Relation rel, bool toLogged); static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, - char *tablespacename, LOCKMODE lockmode); + const char *tablespacename, LOCKMODE lockmode); static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode); +static void ATExecPartedIdxSetTableSpace(Relation rel, Oid newTableSpace); static void ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation, LOCKMODE lockmode); -static void ATExecEnableDisableTrigger(Relation rel, char *trigname, +static void ATExecEnableDisableTrigger(Relation rel, const char *trigname, char fires_when, bool skip_system, LOCKMODE lockmode); -static void ATExecEnableDisableRule(Relation rel, char *rulename, +static void ATExecEnableDisableRule(Relation rel, const char *rulename, char fires_when, LOCKMODE lockmode); static void ATPrepAddInherit(Relation child_rel); static ObjectAddress ATExecAddInherit(Relation child_rel, RangeVar *parent, LOCKMODE lockmode); @@ -465,15 +478,26 @@ static void RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid, void *arg); static void RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, void *arg); -static bool is_partition_attr(Relation rel, AttrNumber attnum, bool *used_in_expr); static PartitionSpec *transformPartitionSpec(Relation rel, PartitionSpec *partspec, char *strategy); -static void ComputePartitionAttrs(Relation rel, List *partParams, AttrNumber *partattrs, - List **partexprs, Oid *partopclass, Oid *partcollation); +static void ComputePartitionAttrs(ParseState *pstate, Relation rel, List *partParams, AttrNumber *partattrs, + List **partexprs, Oid *partopclass, Oid *partcollation, char strategy); static void CreateInheritance(Relation child_rel, Relation parent_rel); static void RemoveInheritance(Relation child_rel, Relation parent_rel); static ObjectAddress ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd); +static void AttachPartitionEnsureIndexes(Relation rel, Relation attachrel); +static void QueuePartitionConstraintValidation(List **wqueue, Relation scanrel, + List *partConstraint, + bool validate_default); +static void CloneRowTriggersToPartition(Relation parent, Relation partition); static ObjectAddress ATExecDetachPartition(Relation rel, RangeVar *name); +static ObjectAddress ATExecAttachPartitionIdx(List **wqueue, Relation rel, + RangeVar *name); +static void validatePartitionedIndex(Relation partedIdx, Relation partedTbl); +static void refuseDupeIndexAttach(Relation parentIdx, Relation partIdx, + Relation partitionTbl); +static void update_relispartition(Relation classRel, Oid relationId, + bool newval); /* ---------------------------------------------------------------- @@ -485,6 +509,7 @@ static ObjectAddress ATExecDetachPartition(Relation rel, RangeVar *name); * relkind: relkind to assign to the new relation * ownerId: if not InvalidOid, use this as the new relation's owner. * typaddress: if not null, it's set to the pg_type entry's address. + * queryString: for error reporting * * Note that permissions checks are done against current user regardless of * ownerId. A nonzero ownerId is used when someone is creating a relation @@ -582,7 +607,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_TABLESPACE, + aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(tablespaceId)); } @@ -680,8 +705,10 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, foreach(listptr, stmt->tableElts) { ColumnDef *colDef = lfirst(listptr); + Form_pg_attribute attr; attnum++; + attr = TupleDescAttr(descriptor, attnum - 1); if (colDef->raw_default != NULL) { @@ -692,8 +719,9 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, rawEnt = (RawColumnDefault *) palloc(sizeof(RawColumnDefault)); rawEnt->attnum = attnum; rawEnt->raw_default = colDef->raw_default; + rawEnt->missingMode = false; rawDefaults = lappend(rawDefaults, rawEnt); - descriptor->attrs[attnum - 1]->atthasdef = true; + attr->atthasdef = true; } else if (colDef->cooked_default != NULL) { @@ -710,11 +738,11 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, cooked->inhcount = 0; /* ditto */ cooked->is_no_inherit = false; cookedDefaults = lappend(cookedDefaults, cooked); - descriptor->attrs[attnum - 1]->atthasdef = true; + attr->atthasdef = true; } if (colDef->identity) - descriptor->attrs[attnum - 1]->attidentity = colDef->identity; + attr->attidentity = colDef->identity; } /* @@ -743,11 +771,9 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, true, allowSystemTableMods, false, + InvalidOid, typaddress); - /* Store inheritance information for new rel. */ - StoreCatalogInheritance(relationId, inheritOids, stmt->partbound != NULL); - /* * We must bump the command counter to make the newly-created relation * tuple visible for opening. @@ -767,8 +793,10 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, { PartitionBoundSpec *bound; ParseState *pstate; - Oid parentId = linitial_oid(inheritOids); - Relation parent; + Oid parentId = linitial_oid(inheritOids), + defaultPartOid; + Relation parent, + defaultRel = NULL; /* Already have strong enough lock on the parent */ parent = heap_open(parentId, NoLock); @@ -783,7 +811,31 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, errmsg("\"%s\" is not partitioned", RelationGetRelationName(parent)))); - /* Tranform the bound values */ + /* + * The partition constraint of the default partition depends on the + * partition bounds of every other partition. It is possible that + * another backend might be about to execute a query on the default + * partition table, and that the query relies on previously cached + * default partition constraints. We must therefore take a table lock + * strong enough to prevent all queries on the default partition from + * proceeding until we commit and send out a shared-cache-inval notice + * that will make them update their index lists. + * + * Order of locking: The relation being added won't be visible to + * other backends until it is committed, hence here in + * DefineRelation() the order of locking the default partition and the + * relation being added does not matter. But at all other places we + * need to lock the default relation before we lock the relation being + * added or removed i.e. we should take the lock in same order at all + * the places such that lock parent, lock default partition and then + * lock the partition so as to avoid a deadlock. + */ + defaultPartOid = + get_default_oid_from_partdesc(RelationGetPartitionDesc(parent)); + if (OidIsValid(defaultPartOid)) + defaultRel = heap_open(defaultPartOid, AccessExclusiveLock); + + /* Transform the bound values */ pstate = make_parsestate(NULL); pstate->p_sourcetext = queryString; @@ -791,30 +843,40 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, /* * Check first that the new partition's bound is valid and does not - * overlap with any of existing partitions of the parent - note that - * it does not return on error. + * overlap with any of existing partitions of the parent. */ check_new_partition_bound(relname, parent, bound); + /* + * If the default partition exists, its partition constraints will + * change after the addition of this new partition such that it won't + * allow any row that qualifies for this new partition. So, check that + * the existing data in the default partition satisfies the constraint + * as it will exist after adding this partition. + */ + if (OidIsValid(defaultPartOid)) + { + check_default_partition_contents(parent, defaultRel, bound); + /* Keep the lock until commit. */ + heap_close(defaultRel, NoLock); + } + /* Update the pg_class entry. */ StorePartitionBound(rel, parent, bound); heap_close(parent, NoLock); - - /* - * The code that follows may also update the pg_class tuple to update - * relnumchecks, so bump up the command counter to avoid the "already - * updated by self" error. - */ - CommandCounterIncrement(); } + /* Store inheritance information for new rel. */ + StoreCatalogInheritance(relationId, inheritOids, stmt->partbound != NULL); + /* * Process the partitioning specification (if any) and store the partition * key information into the catalog. */ if (stmt->partspec) { + ParseState *pstate; char strategy; int partnatts; AttrNumber partattrs[PARTITION_MAX_KEYS]; @@ -822,6 +884,9 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, Oid partcollation[PARTITION_MAX_KEYS]; List *partexprs = NIL; + pstate = make_parsestate(NULL); + pstate->p_sourcetext = queryString; + partnatts = list_length(stmt->partspec->partParams); /* Protect fixed-size arrays here and in executor */ @@ -840,12 +905,78 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, stmt->partspec = transformPartitionSpec(rel, stmt->partspec, &strategy); - ComputePartitionAttrs(rel, stmt->partspec->partParams, + ComputePartitionAttrs(pstate, rel, stmt->partspec->partParams, partattrs, &partexprs, partopclass, - partcollation); + partcollation, strategy); StorePartitionKey(rel, strategy, partnatts, partattrs, partexprs, partopclass, partcollation); + + /* make it all visible */ + CommandCounterIncrement(); + } + + /* + * If we're creating a partition, create now all the indexes, triggers, + * FKs defined in the parent. + * + * We can't do it earlier, because DefineIndex wants to know the partition + * key which we just stored. + */ + if (stmt->partbound) + { + Oid parentId = linitial_oid(inheritOids); + Relation parent; + List *idxlist; + ListCell *cell; + + /* Already have strong enough lock on the parent */ + parent = heap_open(parentId, NoLock); + idxlist = RelationGetIndexList(parent); + + /* + * For each index in the parent table, create one in the partition + */ + foreach(cell, idxlist) + { + Relation idxRel = index_open(lfirst_oid(cell), AccessShareLock); + AttrNumber *attmap; + IndexStmt *idxstmt; + Oid constraintOid; + + attmap = convert_tuples_by_name_map(RelationGetDescr(rel), + RelationGetDescr(parent), + gettext_noop("could not convert row type")); + idxstmt = + generateClonedIndexStmt(NULL, RelationGetRelid(rel), idxRel, + attmap, RelationGetDescr(rel)->natts, + &constraintOid); + DefineIndex(RelationGetRelid(rel), + idxstmt, + InvalidOid, + RelationGetRelid(idxRel), + constraintOid, + false, false, false, false, false); + + index_close(idxRel, AccessShareLock); + } + + list_free(idxlist); + + /* + * If there are any row-level triggers, clone them to the new + * partition. + */ + if (parent->trigdesc != NULL) + CloneRowTriggersToPartition(parent, rel); + + /* + * And foreign keys too. Note that because we're freshly creating the + * table, there is no need to verify these new constraints. + */ + CloneForeignKeyConstraints(parentId, relationId, NULL); + + heap_close(parent, NoLock); } /* @@ -859,7 +990,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, */ if (rawDefaults || stmt->constraints) AddRelationNewConstraints(rel, rawDefaults, stmt->constraints, - true, true, false); + true, true, false, queryString); ObjectAddressSet(address, RelationRelationId, relationId); @@ -1043,8 +1174,7 @@ RemoveRelations(DropStmt *drop) state.heapOid = InvalidOid; state.partParentOid = InvalidOid; state.concurrent = drop->concurrent; - relOid = RangeVarGetRelidExtended(rel, lockmode, true, - false, + relOid = RangeVarGetRelidExtended(rel, lockmode, RVR_MISSING_OK, RangeVarCallbackForDropRelation, (void *) &state); @@ -1128,10 +1258,13 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid, * but RemoveRelations() can only pass one relkind for a given relation. * It chooses RELKIND_RELATION for both regular and partitioned tables. * That means we must be careful before giving the wrong type error when - * the relation is RELKIND_PARTITIONED_TABLE. + * the relation is RELKIND_PARTITIONED_TABLE. An equivalent problem + * exists with indexes. */ if (classform->relkind == RELKIND_PARTITIONED_TABLE) expected_relkind = RELKIND_RELATION; + else if (classform->relkind == RELKIND_PARTITIONED_INDEX) + expected_relkind = RELKIND_INDEX; else expected_relkind = classform->relkind; @@ -1141,7 +1274,7 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid, /* Allow DROP to either table owner or schema owner */ if (!pg_class_ownercheck(relOid, GetUserId()) && !pg_namespace_ownercheck(classform->relnamespace, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relOid)), rel->relname); if (!allowSystemTableMods && IsSystemClass(relOid, classform)) @@ -1159,7 +1292,8 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid, * we do it the other way around. No error if we don't find a pg_index * entry, though --- the relation may have been dropped. */ - if (relkind == RELKIND_INDEX && relOid != oldRelOid) + if ((relkind == RELKIND_INDEX || relkind == RELKIND_PARTITIONED_INDEX) && + relOid != oldRelOid) { state->heapOid = IndexGetRelation(relOid, true); if (OidIsValid(state->heapOid)) @@ -1197,11 +1331,7 @@ ExecuteTruncate(TruncateStmt *stmt) { List *rels = NIL; List *relids = NIL; - List *seq_relids = NIL; - EState *estate; - ResultRelInfo *resultRelInfos; - ResultRelInfo *resultRelInfo; - SubTransactionId mySubid; + List *relids_logged = NIL; ListCell *cell; /* @@ -1214,17 +1344,31 @@ ExecuteTruncate(TruncateStmt *stmt) bool recurse = rv->inh; Oid myrelid; - rel = heap_openrv(rv, AccessExclusiveLock); - myrelid = RelationGetRelid(rel); + myrelid = RangeVarGetRelidExtended(rv, AccessExclusiveLock, + 0, RangeVarCallbackForTruncate, + NULL); + + /* open the relation, we already hold a lock on it */ + rel = heap_open(myrelid, NoLock); + /* don't throw error for "TRUNCATE foo, foo" */ if (list_member_oid(relids, myrelid)) { heap_close(rel, AccessExclusiveLock); continue; } - truncate_check_rel(rel); + + /* + * RangeVarGetRelidExtended() has done most checks with its callback, + * but other checks with the now-opened Relation remain. + */ + truncate_check_activity(rel); + rels = lappend(rels, rel); relids = lappend_oid(relids, myrelid); + /* Log this relation only if needed for logical decoding */ + if (RelationIsLogicallyLogged(rel)) + relids_logged = lappend_oid(relids_logged, myrelid); if (recurse) { @@ -1242,19 +1386,64 @@ ExecuteTruncate(TruncateStmt *stmt) /* find_all_inheritors already got lock */ rel = heap_open(childrelid, NoLock); - truncate_check_rel(rel); + truncate_check_rel(RelationGetRelid(rel), rel->rd_rel); + truncate_check_activity(rel); + rels = lappend(rels, rel); relids = lappend_oid(relids, childrelid); + /* Log this relation only if needed for logical decoding */ + if (RelationIsLogicallyLogged(rel)) + relids_logged = lappend_oid(relids_logged, childrelid); } } else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot truncate only a partitioned table"), - errhint("Do not specify the ONLY keyword, or use truncate only on the partitions directly."))); + errhint("Do not specify the ONLY keyword, or use TRUNCATE ONLY on the partitions directly."))); + } + + ExecuteTruncateGuts(rels, relids, relids_logged, + stmt->behavior, stmt->restart_seqs); + + /* And close the rels */ + foreach(cell, rels) + { + Relation rel = (Relation) lfirst(cell); + + heap_close(rel, NoLock); } +} + +/* + * ExecuteTruncateGuts + * + * Internal implementation of TRUNCATE. This is called by the actual TRUNCATE + * command (see above) as well as replication subscribers that execute a + * replicated TRUNCATE action. + * + * explicit_rels is the list of Relations to truncate that the command + * specified. relids is the list of Oids corresponding to explicit_rels. + * relids_logged is the list of Oids (a subset of relids) that require + * WAL-logging. This is all a bit redundant, but the existing callers have + * this information handy in this form. + */ +void +ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged, + DropBehavior behavior, bool restart_seqs) +{ + List *rels; + List *seq_relids = NIL; + EState *estate; + ResultRelInfo *resultRelInfos; + ResultRelInfo *resultRelInfo; + SubTransactionId mySubid; + ListCell *cell; + Oid *logrelids; /* + * Check the explicitly-specified relations. + * * In CASCADE mode, suck in all referencing relations as well. This * requires multiple iterations to find indirectly-dependent relations. At * each phase, we need to exclusive-lock new rels before looking for their @@ -1262,7 +1451,8 @@ ExecuteTruncate(TruncateStmt *stmt) * soon as we open it, to avoid a faux pas such as holding lock for a long * time on a rel we have no permissions for. */ - if (stmt->behavior == DROP_CASCADE) + rels = list_copy(explicit_rels); + if (behavior == DROP_CASCADE) { for (;;) { @@ -1281,9 +1471,13 @@ ExecuteTruncate(TruncateStmt *stmt) ereport(NOTICE, (errmsg("truncate cascades to table \"%s\"", RelationGetRelationName(rel)))); - truncate_check_rel(rel); + truncate_check_rel(relid, rel->rd_rel); + truncate_check_activity(rel); rels = lappend(rels, rel); relids = lappend_oid(relids, relid); + /* Log this relation only if needed for logical decoding */ + if (RelationIsLogicallyLogged(rel)) + relids_logged = lappend_oid(relids_logged, relid); } } } @@ -1296,7 +1490,7 @@ ExecuteTruncate(TruncateStmt *stmt) #ifdef USE_ASSERT_CHECKING heap_truncate_check_FKs(rels, false); #else - if (stmt->behavior == DROP_RESTRICT) + if (behavior == DROP_RESTRICT) heap_truncate_check_FKs(rels, false); #endif @@ -1306,7 +1500,7 @@ ExecuteTruncate(TruncateStmt *stmt) * We want to do this early since it's pointless to do all the truncation * work only to fail on sequence permissions. */ - if (stmt->restart_seqs) + if (restart_seqs) { foreach(cell, rels) { @@ -1323,7 +1517,7 @@ ExecuteTruncate(TruncateStmt *stmt) /* This check must match AlterSequence! */ if (!pg_class_ownercheck(seq_relid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_SEQUENCE, RelationGetRelationName(seq_rel)); seq_relids = lappend_oid(seq_relids, seq_relid); @@ -1461,6 +1655,42 @@ ExecuteTruncate(TruncateStmt *stmt) ResetSequence(seq_relid); } + /* + * Write a WAL record to allow this set of actions to be logically + * decoded. + * + * Assemble an array of relids so we can write a single WAL record for the + * whole action. + */ + if (list_length(relids_logged) > 0) + { + xl_heap_truncate xlrec; + int i = 0; + + /* should only get here if wal_level >= logical */ + Assert(XLogLogicalInfoActive()); + + logrelids = palloc(list_length(relids_logged) * sizeof(Oid)); + foreach(cell, relids_logged) + logrelids[i++] = lfirst_oid(cell); + + xlrec.dbId = MyDatabaseId; + xlrec.nrelids = list_length(relids_logged); + xlrec.flags = 0; + if (behavior == DROP_CASCADE) + xlrec.flags |= XLH_TRUNCATE_CASCADE; + if (restart_seqs) + xlrec.flags |= XLH_TRUNCATE_RESTART_SEQS; + + XLogBeginInsert(); + XLogRegisterData((char *) &xlrec, SizeOfHeapTruncate); + XLogRegisterData((char *) logrelids, list_length(relids_logged) * sizeof(Oid)); + + XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN); + + (void) XLogInsert(RM_HEAP_ID, XLOG_HEAP_TRUNCATE); + } + /* * Process all AFTER STATEMENT TRUNCATE triggers. */ @@ -1478,7 +1708,11 @@ ExecuteTruncate(TruncateStmt *stmt) /* We can clean up the EState now */ FreeExecutorState(estate); - /* And close the rels (can't do this while EState still holds refs) */ + /* + * Close any rels opened by CASCADE (can't do this while EState still + * holds refs) + */ + rels = list_difference_ptr(rels, explicit_rels); foreach(cell, rels) { Relation rel = (Relation) lfirst(cell); @@ -1488,38 +1722,47 @@ ExecuteTruncate(TruncateStmt *stmt) } /* - * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate + * Check that a given relation is safe to truncate. Subroutine for + * ExecuteTruncate() and RangeVarCallbackForTruncate(). */ static void -truncate_check_rel(Relation rel) +truncate_check_rel(Oid relid, Form_pg_class reltuple) { AclResult aclresult; + char *relname = NameStr(reltuple->relname); /* * Only allow truncate on regular tables and partitioned tables (although, * the latter are only being included here for the following checks; no * physical truncation will occur in their case.) */ - if (rel->rd_rel->relkind != RELKIND_RELATION && - rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) + if (reltuple->relkind != RELKIND_RELATION && + reltuple->relkind != RELKIND_PARTITIONED_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table", - RelationGetRelationName(rel)))); + errmsg("\"%s\" is not a table", relname))); /* Permissions checks */ - aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(), - ACL_TRUNCATE); + aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_TRUNCATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_CLASS, - RelationGetRelationName(rel)); + aclcheck_error(aclresult, get_relkind_objtype(reltuple->relkind), + relname); - if (!allowSystemTableMods && IsSystemRelation(rel)) + if (!allowSystemTableMods && IsSystemClass(relid, reltuple)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied: \"%s\" is a system catalog", - RelationGetRelationName(rel)))); + relname))); +} +/* + * Set of extra sanity checks to check if a given relation is safe to + * truncate. This is split with truncate_check_rel() as + * RangeVarCallbackForTruncate() cannot open a Relation yet. + */ +static void +truncate_check_activity(Relation rel) +{ /* * Don't allow truncate on temp tables of other backends ... their local * buffer manager is not going to cope. @@ -1649,17 +1892,6 @@ MergeAttributes(List *schema, List *supers, char relpersistence, errmsg("tables can have at most %d columns", MaxHeapAttributeNumber))); - /* - * In case of a partition, there are no new column definitions, only dummy - * ColumnDefs created for column constraints. We merge them with the - * constraints inherited from the parent. - */ - if (is_partition) - { - saved_schema = schema; - schema = NIL; - } - /* * Check for duplicate names in the explicit list of attributes. * @@ -1673,17 +1905,19 @@ MergeAttributes(List *schema, List *supers, char relpersistence, ListCell *rest = lnext(entry); ListCell *prev = entry; - if (coldef->typeName == NULL) - + if (!is_partition && coldef->typeName == NULL) + { /* * Typed table column option that does not belong to a column from * the type. This works because the columns from the type come - * first in the list. + * first in the list. (We omit this check for partition column + * lists; those are processed separately below.) */ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("column \"%s\" does not exist", coldef->colname))); + } while (rest != NULL) { @@ -1716,6 +1950,17 @@ MergeAttributes(List *schema, List *supers, char relpersistence, } } + /* + * In case of a partition, there are no new column definitions, only dummy + * ColumnDefs created for column constraints. Set them aside for now and + * process them at the end. + */ + if (is_partition) + { + saved_schema = schema; + schema = NIL; + } + /* * Scan the parents left-to-right, and merge their attributes to form a * list of inherited attributes (inhSchema). Also check to see if we need @@ -1750,6 +1995,14 @@ MergeAttributes(List *schema, List *supers, char relpersistence, else relation = heap_openrv(parent, AccessExclusiveLock); + /* + * Check for active uses of the parent partitioned table in the + * current transaction, such as being used in some manner by an + * enclosing command. + */ + if (is_partition) + CheckTableNotInUse(relation, "CREATE TABLE .. PARTITION OF"); + /* * We do not allow partitioned tables and partitions to participate in * regular inheritance. @@ -1773,6 +2026,19 @@ MergeAttributes(List *schema, List *supers, char relpersistence, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("inherited relation \"%s\" is not a table or foreign table", parent->relname))); + + /* + * If the parent is permanent, so must be all of its partitions. Note + * that inheritance allows that case. + */ + if (is_partition && + relation->rd_rel->relpersistence != RELPERSISTENCE_TEMP && + relpersistence == RELPERSISTENCE_TEMP) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot create a temporary relation as partition of permanent relation \"%s\"", + RelationGetRelationName(relation)))); + /* Permanent rels cannot inherit from temporary ones */ if (relpersistence != RELPERSISTENCE_TEMP && relation->rd_rel->relpersistence == RELPERSISTENCE_TEMP) @@ -1797,7 +2063,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence, * demand that creator of a child table own the parent. */ if (!pg_class_ownercheck(RelationGetRelid(relation), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(relation->rd_rel->relkind), RelationGetRelationName(relation)); /* @@ -1828,7 +2094,8 @@ MergeAttributes(List *schema, List *supers, char relpersistence, for (parent_attno = 1; parent_attno <= tupleDesc->natts; parent_attno++) { - Form_pg_attribute attribute = tupleDesc->attrs[parent_attno - 1]; + Form_pg_attribute attribute = TupleDescAttr(tupleDesc, + parent_attno - 1); char *attributeName = NameStr(attribute->attname); int exist_attno; ColumnDef *def; @@ -1910,7 +2177,6 @@ MergeAttributes(List *schema, List *supers, char relpersistence, def->is_local = false; def->is_not_null = attribute->attnotnull; def->is_from_type = false; - def->is_from_parent = true; def->storage = attribute->attstorage; def->raw_default = NULL; def->cooked_default = NULL; @@ -2163,59 +2429,51 @@ MergeAttributes(List *schema, List *supers, char relpersistence, /* * Now that we have the column definition list for a partition, we can * check whether the columns referenced in the column constraint specs - * actually exist. Also, we merge the constraints into the corresponding - * column definitions. + * actually exist. Also, we merge NOT NULL and defaults into each + * corresponding column definition. */ - if (is_partition && list_length(saved_schema) > 0) + if (is_partition) { - schema = list_concat(schema, saved_schema); - - foreach(entry, schema) + foreach(entry, saved_schema) { - ColumnDef *coldef = lfirst(entry); - ListCell *rest = lnext(entry); - ListCell *prev = entry; + ColumnDef *restdef = lfirst(entry); + bool found = false; + ListCell *l; - /* - * Partition column option that does not belong to a column from - * the parent. This works because the columns from the parent - * come first in the list (see above). - */ - if (coldef->typeName == NULL) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" does not exist", - coldef->colname))); - while (rest != NULL) + foreach(l, schema) { - ColumnDef *restdef = lfirst(rest); - ListCell *next = lnext(rest); /* need to save it in case we - * delete it */ + ColumnDef *coldef = lfirst(l); if (strcmp(coldef->colname, restdef->colname) == 0) { + found = true; + coldef->is_not_null |= restdef->is_not_null; + /* - * merge the column options into the column from the - * parent + * Override the parent's default value for this column + * (coldef->cooked_default) with the partition's local + * definition (restdef->raw_default), if there's one. It + * should be physically impossible to get a cooked default + * in the local definition or a raw default in the + * inherited definition, but make sure they're nulls, for + * future-proofing. */ - if (coldef->is_from_parent) + Assert(restdef->cooked_default == NULL); + Assert(coldef->raw_default == NULL); + if (restdef->raw_default) { - coldef->is_not_null = restdef->is_not_null; coldef->raw_default = restdef->raw_default; - coldef->cooked_default = restdef->cooked_default; - coldef->constraints = restdef->constraints; - coldef->is_from_parent = false; - list_delete_cell(schema, rest, prev); + coldef->cooked_default = NULL; } - else - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" specified more than once", - coldef->colname))); } - prev = rest; - rest = next; } + + /* complain for constraints on columns not in parent */ + if (!found) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column \"%s\" does not exist", + restdef->colname))); } } @@ -2254,7 +2512,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence, * * constraints is a list of CookedConstraint structs for previous constraints. * - * Returns TRUE if merged (constraint is a duplicate), or FALSE if it's + * Returns true if merged (constraint is a duplicate), or false if it's * got a so-far-unique name, or throws error if conflict. */ static bool @@ -2300,7 +2558,7 @@ StoreCatalogInheritance(Oid relationId, List *supers, bool child_is_partition) { Relation relation; - int16 seqNumber; + int32 seqNumber; ListCell *entry; /* @@ -2341,30 +2599,14 @@ StoreCatalogInheritance(Oid relationId, List *supers, */ static void StoreCatalogInheritance1(Oid relationId, Oid parentOid, - int16 seqNumber, Relation inhRelation, + int32 seqNumber, Relation inhRelation, bool child_is_partition) { - TupleDesc desc = RelationGetDescr(inhRelation); - Datum values[Natts_pg_inherits]; - bool nulls[Natts_pg_inherits]; ObjectAddress childobject, parentobject; - HeapTuple tuple; - - /* - * Make the pg_inherits entry - */ - values[Anum_pg_inherits_inhrelid - 1] = ObjectIdGetDatum(relationId); - values[Anum_pg_inherits_inhparent - 1] = ObjectIdGetDatum(parentOid); - values[Anum_pg_inherits_inhseqno - 1] = Int16GetDatum(seqNumber); - - memset(nulls, 0, sizeof(nulls)); - - tuple = heap_form_tuple(desc, values, nulls); - - CatalogTupleInsert(inhRelation, tuple); - heap_freetuple(tuple); + /* store the pg_inherits row */ + StoreSingleInheritance(relationId, parentOid, seqNumber); /* * Store a dependency too @@ -2488,6 +2730,7 @@ renameatt_check(Oid myrelid, Form_pg_class classform, bool recursing) relkind != RELKIND_MATVIEW && relkind != RELKIND_COMPOSITE_TYPE && relkind != RELKIND_INDEX && + relkind != RELKIND_PARTITIONED_INDEX && relkind != RELKIND_FOREIGN_TABLE && relkind != RELKIND_PARTITIONED_TABLE) ereport(ERROR, @@ -2499,7 +2742,7 @@ renameatt_check(Oid myrelid, Form_pg_class classform, bool recursing) * permissions checking. only the owner of a class can change its schema. */ if (!pg_class_ownercheck(myrelid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(myrelid)), NameStr(classform->relname)); if (!allowSystemTableMods && IsSystemClass(myrelid, classform)) ereport(ERROR, @@ -2687,7 +2930,7 @@ renameatt(RenameStmt *stmt) /* lock level taken here should match renameatt_internal */ relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock, - stmt->missing_ok, false, + stmt->missing_ok ? RVR_MISSING_OK : 0, RangeVarCallbackForRenameAttribute, NULL); @@ -2839,7 +3082,7 @@ RenameConstraint(RenameStmt *stmt) { /* lock level taken here should match rename_constraint_internal */ relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock, - stmt->missing_ok, false, + stmt->missing_ok ? RVR_MISSING_OK : 0, RangeVarCallbackForRenameAttribute, NULL); if (!OidIsValid(relid)) @@ -2881,7 +3124,7 @@ RenameRelation(RenameStmt *stmt) * escalation. */ relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock, - stmt->missing_ok, false, + stmt->missing_ok ? RVR_MISSING_OK : 0, RangeVarCallbackForAlterRelation, (void *) stmt); @@ -2903,12 +3146,6 @@ RenameRelation(RenameStmt *stmt) /* * RenameRelationInternal - change the name of a relation - * - * XXX - When renaming sequences, we don't bother to modify the - * sequence name that is stored within the sequence itself - * (this would cause problems with MVCC). In the future, - * the sequence name should probably be removed from the - * sequence, AFAIK there's no need for it to be there. */ void RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal) @@ -2967,7 +3204,8 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal) /* * Also rename the associated constraint, if any. */ - if (targetrelation->rd_rel->relkind == RELKIND_INDEX) + if (targetrelation->rd_rel->relkind == RELKIND_INDEX || + targetrelation->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) { Oid constraintId = get_index_constraint(myrelid); @@ -3021,6 +3259,7 @@ CheckTableNotInUse(Relation rel, const char *stmt) stmt, RelationGetRelationName(rel)))); if (rel->rd_rel->relkind != RELKIND_INDEX && + rel->rd_rel->relkind != RELKIND_PARTITIONED_INDEX && AfterTriggerPendingOnRel(RelationGetRelid(rel))) ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), @@ -3038,7 +3277,8 @@ CheckTableNotInUse(Relation rel, const char *stmt) Oid AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode) { - return RangeVarGetRelidExtended(stmt->relation, lockmode, stmt->missing_ok, false, + return RangeVarGetRelidExtended(stmt->relation, lockmode, + stmt->missing_ok ? RVR_MISSING_OK : 0, RangeVarCallbackForAlterRelation, (void *) stmt); } @@ -3267,6 +3507,7 @@ AlterTableGetLockLevel(List *cmds) case AT_ProcessedConstraint: /* becomes AT_AddConstraint */ case AT_AddConstraintRecurse: /* becomes AT_AddConstraint */ case AT_ReAddConstraint: /* becomes AT_AddConstraint */ + case AT_ReAddDomainConstraint: /* becomes AT_AddConstraint */ if (IsA(cmd->def, Constraint)) { Constraint *con = (Constraint *) cmd->def; @@ -3517,7 +3758,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, case AT_SetStatistics: /* ALTER COLUMN SET STATISTICS */ ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode); /* Performs own permission checks */ - ATPrepSetStatistics(rel, cmd->name, cmd->def, lockmode); + ATPrepSetStatistics(rel, cmd->name, cmd->num, cmd->def, lockmode); pass = AT_PASS_MISC; break; case AT_SetOptions: /* ALTER COLUMN SET ( options ) */ @@ -3636,7 +3877,8 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, pass = AT_PASS_DROP; break; case AT_SetTableSpace: /* SET TABLESPACE */ - ATSimplePermissions(rel, ATT_TABLE | ATT_MATVIEW | ATT_INDEX); + ATSimplePermissions(rel, ATT_TABLE | ATT_MATVIEW | ATT_INDEX | + ATT_PARTITIONED_INDEX); /* This command never recurses */ ATPrepSetTableSpace(tab, rel, cmd->name, lockmode); pass = AT_PASS_MISC; /* doesn't actually matter */ @@ -3711,6 +3953,10 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, pass = AT_PASS_MISC; break; case AT_AttachPartition: + ATSimplePermissions(rel, ATT_TABLE | ATT_PARTITIONED_INDEX); + /* No command-specific prep needed */ + pass = AT_PASS_MISC; + break; case AT_DetachPartition: ATSimplePermissions(rel, ATT_TABLE); /* No command-specific prep needed */ @@ -3767,7 +4013,9 @@ ATRewriteCatalogs(List **wqueue, LOCKMODE lockmode) rel = relation_open(tab->relid, NoLock); foreach(lcmd, subcmds) - ATExecCmd(wqueue, tab, rel, (AlterTableCmd *) lfirst(lcmd), lockmode); + ATExecCmd(wqueue, tab, rel, + castNode(AlterTableCmd, lfirst(lcmd)), + lockmode); /* * After the ALTER TYPE pass, do cleanup work (this is not done in @@ -3840,7 +4088,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, address = ATExecSetNotNull(tab, rel, cmd->name, lockmode); break; case AT_SetStatistics: /* ALTER COLUMN SET STATISTICS */ - address = ATExecSetStatistics(rel, cmd->name, cmd->def, lockmode); + address = ATExecSetStatistics(rel, cmd->name, cmd->num, cmd->def, lockmode); break; case AT_SetOptions: /* ALTER COLUMN SET ( options ) */ address = ATExecSetOptions(rel, cmd->name, cmd->def, false, lockmode); @@ -3884,6 +4132,13 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, ATExecAddConstraint(wqueue, tab, rel, (Constraint *) cmd->def, true, true, lockmode); break; + case AT_ReAddDomainConstraint: /* Re-add pre-existing domain check + * constraint */ + address = + AlterDomainAddConstraint(((AlterDomainStmt *) cmd->def)->typeName, + ((AlterDomainStmt *) cmd->def)->def, + NULL); + break; case AT_ReAddComment: /* Re-add existing comment */ address = CommentObject((CommentStmt *) cmd->def); break; @@ -3959,10 +4214,13 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, */ break; case AT_SetTableSpace: /* SET TABLESPACE */ - /* - * Nothing to do here; Phase 3 does the work + * Only do this for partitioned indexes, for which this is just + * a catalog change. Other relation types are handled by Phase 3. */ + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) + ATExecPartedIdxSetTableSpace(rel, tab->newTableSpace); + break; case AT_SetRelOptions: /* SET (...) */ case AT_ResetRelOptions: /* RESET (...) */ @@ -4050,9 +4308,15 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, ATExecGenericOptions(rel, (List *) cmd->def); break; case AT_AttachPartition: - ATExecAttachPartition(wqueue, rel, (PartitionCmd *) cmd->def); + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + ATExecAttachPartition(wqueue, rel, (PartitionCmd *) cmd->def); + else + ATExecAttachPartitionIdx(wqueue, rel, + ((PartitionCmd *) cmd->def)->name); break; case AT_DetachPartition: + /* ATPrepCmd ensures it must be a table */ + Assert(rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE); ATExecDetachPartition(rel, ((PartitionCmd *) cmd->def)->name); break; default: /* oops */ @@ -4086,9 +4350,13 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode) { AlteredTableInfo *tab = (AlteredTableInfo *) lfirst(ltab); - /* Foreign tables have no storage, nor do partitioned tables. */ + /* + * Foreign tables have no storage, nor do partitioned tables and + * indexes. + */ if (tab->relkind == RELKIND_FOREIGN_TABLE || - tab->relkind == RELKIND_PARTITIONED_TABLE) + tab->relkind == RELKIND_PARTITIONED_TABLE || + tab->relkind == RELKIND_PARTITIONED_INDEX) continue; /* @@ -4412,8 +4680,9 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) */ for (i = 0; i < newTupDesc->natts; i++) { - if (newTupDesc->attrs[i]->attnotnull && - !newTupDesc->attrs[i]->attisdropped) + Form_pg_attribute attr = TupleDescAttr(newTupDesc, i); + + if (attr->attnotnull && !attr->attisdropped) notnull_attrs = lappend_int(notnull_attrs, i); } if (notnull_attrs) @@ -4477,7 +4746,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) */ for (i = 0; i < newTupDesc->natts; i++) { - if (newTupDesc->attrs[i]->attisdropped) + if (TupleDescAttr(newTupDesc, i)->attisdropped) dropped_attrs = lappend_int(dropped_attrs, i); } @@ -4513,7 +4782,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) * Process supplied expressions to replace selected columns. * Expression inputs come from the old tuple. */ - ExecStoreTuple(tuple, oldslot, InvalidBuffer, false); + ExecStoreHeapTuple(tuple, oldslot, false); econtext->ecxt_scantuple = oldslot; foreach(l, tab->newvals) @@ -4543,19 +4812,23 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) } /* Now check any constraints on the possibly-changed tuple */ - ExecStoreTuple(tuple, newslot, InvalidBuffer, false); + ExecStoreHeapTuple(tuple, newslot, false); econtext->ecxt_scantuple = newslot; foreach(l, notnull_attrs) { int attn = lfirst_int(l); - if (heap_attisnull(tuple, attn + 1)) + if (heap_attisnull(tuple, attn + 1, newTupDesc)) + { + Form_pg_attribute attr = TupleDescAttr(newTupDesc, attn); + ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("column \"%s\" contains null values", - NameStr(newTupDesc->attrs[attn]->attname)), + NameStr(attr->attname)), errtablecol(oldrel, attn + 1))); + } } foreach(l, tab->constraints) @@ -4582,9 +4855,16 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) } if (partqualstate && !ExecCheck(partqualstate, econtext)) - ereport(ERROR, - (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("partition constraint is violated by some row"))); + { + if (tab->validate_default) + ereport(ERROR, + (errcode(ERRCODE_CHECK_VIOLATION), + errmsg("updated partition constraint for default partition would be violated by some row"))); + else + ereport(ERROR, + (errcode(ERRCODE_CHECK_VIOLATION), + errmsg("partition constraint is violated by some row"))); + } /* Write the tuple out to the new relation */ if (newrel) @@ -4642,7 +4922,7 @@ ATGetQueueEntry(List **wqueue, Relation rel) tab = (AlteredTableInfo *) palloc0(sizeof(AlteredTableInfo)); tab->relid = relid; tab->relkind = rel->rd_rel->relkind; - tab->oldDesc = CreateTupleDescCopy(RelationGetDescr(rel)); + tab->oldDesc = CreateTupleDescCopyConstr(RelationGetDescr(rel)); tab->newrelpersistence = RELPERSISTENCE_PERMANENT; tab->chgPersistence = false; @@ -4678,6 +4958,9 @@ ATSimplePermissions(Relation rel, int allowed_targets) case RELKIND_INDEX: actual_target = ATT_INDEX; break; + case RELKIND_PARTITIONED_INDEX: + actual_target = ATT_PARTITIONED_INDEX; + break; case RELKIND_COMPOSITE_TYPE: actual_target = ATT_COMPOSITE_TYPE; break; @@ -4695,7 +4978,7 @@ ATSimplePermissions(Relation rel, int allowed_targets) /* Permissions checks */ if (!pg_class_ownercheck(RelationGetRelid(rel), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(rel->rd_rel->relkind), RelationGetRelationName(rel)); if (!allowSystemTableMods && IsSystemRelation(rel)) @@ -4922,7 +5205,7 @@ find_composite_type_dependencies(Oid typeOid, Relation origRelation, continue; rel = relation_open(pg_depend->objid, AccessShareLock); - att = rel->rd_att->attrs[pg_depend->objsubid - 1]; + att = TupleDescAttr(rel->rd_att, pg_depend->objsubid - 1); if (rel->rd_rel->relkind == RELKIND_RELATION || rel->rd_rel->relkind == RELKIND_MATVIEW || @@ -5027,6 +5310,8 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be * isn't suitable, throw an error. Currently, we require that the type * originated with CREATE TYPE AS. We could support any row type, but doing so * would require handling a number of extra corner cases in the DDL commands. + * (Also, allowing domain-over-composite would open up a can of worms about + * whether and how the domain's constraints should apply to derived tables.) */ void check_of_type(HeapTuple typetuple) @@ -5247,7 +5532,6 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel, attribute.atttypid = typeOid; attribute.attstattarget = (newattnum > 0) ? -1 : 0; attribute.attlen = tform->typlen; - attribute.attcacheoff = -1; attribute.atttypmod = typmod; attribute.attnum = newattnum; attribute.attbyval = tform->typbyval; @@ -5256,6 +5540,7 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel, attribute.attalign = tform->typalign; attribute.attnotnull = colDef->is_not_null; attribute.atthasdef = false; + attribute.atthasmissing = false; attribute.attidentity = colDef->identity; attribute.attisdropped = false; attribute.attislocal = colDef->is_local; @@ -5300,15 +5585,29 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel, rawEnt->attnum = attribute.attnum; rawEnt->raw_default = copyObject(colDef->raw_default); + /* + * Attempt to skip a complete table rewrite by storing the specified + * DEFAULT value outside of the heap. This may be disabled inside + * AddRelationNewConstraints if the optimization cannot be applied. + */ + rawEnt->missingMode = true; + /* * This function is intended for CREATE TABLE, so it processes a * _list_ of defaults, but we just do one. */ AddRelationNewConstraints(rel, list_make1(rawEnt), NIL, - false, true, false); + false, true, false, NULL); /* Make the additional catalog changes visible */ CommandCounterIncrement(); + + /* + * Did the request for a missing value work? If not we'll have to do a + * rewrite + */ + if (!rawEnt->missingMode) + tab->rewrite |= AT_REWRITE_DEFAULT_VAL; } /* @@ -5342,7 +5641,24 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel, if (relkind != RELKIND_VIEW && relkind != RELKIND_COMPOSITE_TYPE && relkind != RELKIND_FOREIGN_TABLE && attribute.attnum > 0) { - defval = (Expr *) build_column_default(rel, attribute.attnum); + /* + * For an identity column, we can't use build_column_default(), + * because the sequence ownership isn't set yet. So do it manually. + */ + if (colDef->identity) + { + NextValueExpr *nve = makeNode(NextValueExpr); + + nve->seqid = RangeVarGetRelid(colDef->identitySequence, NoLock, false); + nve->typeId = typeOid; + + defval = (Expr *) nve; + + /* must do a rewrite for identity columns */ + tab->rewrite |= AT_REWRITE_DEFAULT_VAL; + } + else + defval = (Expr *) build_column_default(rel, attribute.attnum); if (!defval && DomainHasConstraints(typeOid)) { @@ -5375,16 +5691,21 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel, newval->expr = expression_planner(defval); tab->newvals = lappend(tab->newvals, newval); - tab->rewrite |= AT_REWRITE_DEFAULT_VAL; } - /* - * If the new column is NOT NULL, tell Phase 3 it needs to test that. - * (Note we don't do this for an OID column. OID will be marked not - * null, but since it's filled specially, there's no need to test - * anything.) - */ - tab->new_notnull |= colDef->is_not_null; + if (DomainHasConstraints(typeOid)) + tab->rewrite |= AT_REWRITE_DEFAULT_VAL; + + if (!TupleDescAttr(rel->rd_att, attribute.attnum - 1)->atthasmissing) + { + /* + * If the new column is NOT NULL, and there is no missing value, + * tell Phase 3 it needs to test that. (Note we don't do this for + * an OID column. OID will be marked not null, but since it's + * filled specially, there's no need to test anything.) + */ + tab->new_notnull |= colDef->is_not_null; + } } /* @@ -5602,6 +5923,7 @@ static ObjectAddress ATExecDropNotNull(Relation rel, const char *colName, LOCKMODE lockmode) { HeapTuple tuple; + Form_pg_attribute attTup; AttrNumber attnum; Relation attr_rel; List *indexoidlist; @@ -5614,14 +5936,13 @@ ATExecDropNotNull(Relation rel, const char *colName, LOCKMODE lockmode) attr_rel = heap_open(AttributeRelationId, RowExclusiveLock); tuple = SearchSysCacheCopyAttName(RelationGetRelid(rel), colName); - if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("column \"%s\" of relation \"%s\" does not exist", colName, RelationGetRelationName(rel)))); - - attnum = ((Form_pg_attribute) GETSTRUCT(tuple))->attnum; + attTup = (Form_pg_attribute) GETSTRUCT(tuple); + attnum = attTup->attnum; /* Prevent them from altering a system attribute */ if (attnum <= 0) @@ -5630,7 +5951,7 @@ ATExecDropNotNull(Relation rel, const char *colName, LOCKMODE lockmode) errmsg("cannot alter system column \"%s\"", colName))); - if (get_attidentity(RelationGetRelid(rel), attnum)) + if (attTup->attidentity) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("column \"%s\" of relation \"%s\" is an identity column", @@ -5664,7 +5985,7 @@ ATExecDropNotNull(Relation rel, const char *colName, LOCKMODE lockmode) * Loop over each attribute in the primary key and see if it * matches the to-be-altered attribute */ - for (i = 0; i < indexStruct->indnatts; i++) + for (i = 0; i < indexStruct->indnkeyatts; i++) { if (indexStruct->indkey.values[i] == attnum) ereport(ERROR, @@ -5688,7 +6009,7 @@ ATExecDropNotNull(Relation rel, const char *colName, LOCKMODE lockmode) AttrNumber parent_attnum; parent_attnum = get_attnum(parentId, colName); - if (tupDesc->attrs[parent_attnum - 1]->attnotnull) + if (TupleDescAttr(tupDesc, parent_attnum - 1)->attnotnull) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("column \"%s\" is marked NOT NULL in parent table", @@ -5699,9 +6020,9 @@ ATExecDropNotNull(Relation rel, const char *colName, LOCKMODE lockmode) /* * Okay, actually perform the catalog change ... if needed */ - if (((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull) + if (attTup->attnotnull) { - ((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull = FALSE; + attTup->attnotnull = false; CatalogTupleUpdate(attr_rel, &tuple->t_self, tuple); @@ -5721,9 +6042,6 @@ ATExecDropNotNull(Relation rel, const char *colName, LOCKMODE lockmode) /* * ALTER TABLE ALTER COLUMN SET NOT NULL - * - * Return the address of the modified column. If the column was already NOT - * NULL, InvalidObjectAddress is returned. */ static void @@ -5746,6 +6064,10 @@ ATPrepSetNotNull(Relation rel, bool recurse, bool recursing) } } +/* + * Return the address of the modified column. If the column was already NOT + * NULL, InvalidObjectAddress is returned. + */ static ObjectAddress ATExecSetNotNull(AlteredTableInfo *tab, Relation rel, const char *colName, LOCKMODE lockmode) @@ -5782,7 +6104,7 @@ ATExecSetNotNull(AlteredTableInfo *tab, Relation rel, */ if (!((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull) { - ((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull = TRUE; + ((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull = true; CatalogTupleUpdate(attr_rel, &tuple->t_self, tuple); @@ -5812,6 +6134,7 @@ static ObjectAddress ATExecColumnDefault(Relation rel, const char *colName, Node *newDefault, LOCKMODE lockmode) { + TupleDesc tupdesc = RelationGetDescr(rel); AttrNumber attnum; ObjectAddress address; @@ -5832,7 +6155,7 @@ ATExecColumnDefault(Relation rel, const char *colName, errmsg("cannot alter system column \"%s\"", colName))); - if (get_attidentity(RelationGetRelid(rel), attnum)) + if (TupleDescAttr(tupdesc, attnum - 1)->attidentity) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("column \"%s\" of relation \"%s\" is an identity column", @@ -5859,13 +6182,14 @@ ATExecColumnDefault(Relation rel, const char *colName, rawEnt = (RawColumnDefault *) palloc(sizeof(RawColumnDefault)); rawEnt->attnum = attnum; rawEnt->raw_default = newDefault; + rawEnt->missingMode = false; /* * This function is intended for CREATE TABLE, so it processes a * _list_ of defaults, but we just do one. */ AddRelationNewConstraints(rel, list_make1(rawEnt), NIL, - false, true, false); + false, true, false, NULL); } ObjectAddressSubSet(address, RelationRelationId, @@ -6107,7 +6431,7 @@ ATExecDropIdentity(Relation rel, const char *colName, bool missing_ok, LOCKMODE * ALTER TABLE ALTER COLUMN SET STATISTICS */ static void -ATPrepSetStatistics(Relation rel, const char *colName, Node *newValue, LOCKMODE lockmode) +ATPrepSetStatistics(Relation rel, const char *colName, int16 colNum, Node *newValue, LOCKMODE lockmode) { /* * We do our own permission checking because (a) we want to allow SET @@ -6118,6 +6442,7 @@ ATPrepSetStatistics(Relation rel, const char *colName, Node *newValue, LOCKMODE if (rel->rd_rel->relkind != RELKIND_RELATION && rel->rd_rel->relkind != RELKIND_MATVIEW && rel->rd_rel->relkind != RELKIND_INDEX && + rel->rd_rel->relkind != RELKIND_PARTITIONED_INDEX && rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE && rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) ereport(ERROR, @@ -6125,9 +6450,20 @@ ATPrepSetStatistics(Relation rel, const char *colName, Node *newValue, LOCKMODE errmsg("\"%s\" is not a table, materialized view, index, or foreign table", RelationGetRelationName(rel)))); + /* + * We allow referencing columns by numbers only for indexes, since table + * column numbers could contain gaps if columns are later dropped. + */ + if (rel->rd_rel->relkind != RELKIND_INDEX && + rel->rd_rel->relkind != RELKIND_PARTITIONED_INDEX && + !colName) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot refer to non-index column by number"))); + /* Permissions checks */ if (!pg_class_ownercheck(RelationGetRelid(rel), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(rel->rd_rel->relkind), RelationGetRelationName(rel)); } @@ -6135,7 +6471,7 @@ ATPrepSetStatistics(Relation rel, const char *colName, Node *newValue, LOCKMODE * Return value is the address of the modified column */ static ObjectAddress -ATExecSetStatistics(Relation rel, const char *colName, Node *newValue, LOCKMODE lockmode) +ATExecSetStatistics(Relation rel, const char *colName, int16 colNum, Node *newValue, LOCKMODE lockmode) { int newtarget; Relation attrelation; @@ -6168,22 +6504,52 @@ ATExecSetStatistics(Relation rel, const char *colName, Node *newValue, LOCKMODE attrelation = heap_open(AttributeRelationId, RowExclusiveLock); - tuple = SearchSysCacheCopyAttName(RelationGetRelid(rel), colName); - - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", - colName, RelationGetRelationName(rel)))); - attrtuple = (Form_pg_attribute) GETSTRUCT(tuple); + if (colName) + { + tuple = SearchSysCacheCopyAttName(RelationGetRelid(rel), colName); - attnum = attrtuple->attnum; + if (!HeapTupleIsValid(tuple)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column \"%s\" of relation \"%s\" does not exist", + colName, RelationGetRelationName(rel)))); + } + else + { + tuple = SearchSysCacheCopyAttNum(RelationGetRelid(rel), colNum); + + if (!HeapTupleIsValid(tuple)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column number %d of relation \"%s\" does not exist", + colNum, RelationGetRelationName(rel)))); + } + + attrtuple = (Form_pg_attribute) GETSTRUCT(tuple); + + attnum = attrtuple->attnum; if (attnum <= 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter system column \"%s\"", colName))); + if (rel->rd_rel->relkind == RELKIND_INDEX || + rel->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) + { + if (attnum > rel->rd_index->indnkeyatts) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot alter statistics on included column \"%s\" of index \"%s\"", + NameStr(attrtuple->attname), RelationGetRelationName(rel)))); + else if (rel->rd_index->indkey.values[attnum - 1] != 0) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot alter statistics on non-expression column \"%s\" of index \"%s\"", + NameStr(attrtuple->attname), RelationGetRelationName(rel)), + errhint("Alter statistics on table column instead."))); + } + attrtuple->attstattarget = newtarget; CatalogTupleUpdate(attrelation, &tuple->t_self, tuple); @@ -6383,68 +6749,6 @@ ATPrepDropColumn(List **wqueue, Relation rel, bool recurse, bool recursing, cmd->subtype = AT_DropColumnRecurse; } -/* - * Checks if attnum is a partition attribute for rel - * - * Sets *used_in_expr if attnum is found to be referenced in some partition - * key expression. It's possible for a column to be both used directly and - * as part of an expression; if that happens, *used_in_expr may end up as - * either true or false. That's OK for current uses of this function, because - * *used_in_expr is only used to tailor the error message text. - */ -static bool -is_partition_attr(Relation rel, AttrNumber attnum, bool *used_in_expr) -{ - PartitionKey key; - int partnatts; - List *partexprs; - ListCell *partexprs_item; - int i; - - if (rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) - return false; - - key = RelationGetPartitionKey(rel); - partnatts = get_partition_natts(key); - partexprs = get_partition_exprs(key); - - partexprs_item = list_head(partexprs); - for (i = 0; i < partnatts; i++) - { - AttrNumber partattno = get_partition_col_attnum(key, i); - - if (partattno != 0) - { - if (attnum == partattno) - { - if (used_in_expr) - *used_in_expr = false; - return true; - } - } - else - { - /* Arbitrary expression */ - Node *expr = (Node *) lfirst(partexprs_item); - Bitmapset *expr_attrs = NULL; - - /* Find all attributes referenced */ - pull_varattnos(expr, 1, &expr_attrs); - partexprs_item = lnext(partexprs_item); - - if (bms_is_member(attnum - FirstLowInvalidHeapAttributeNumber, - expr_attrs)) - { - if (used_in_expr) - *used_in_expr = true; - return true; - } - } - } - - return false; -} - /* * Return value is the address of the dropped column. */ @@ -6505,7 +6809,9 @@ ATExecDropColumn(List **wqueue, Relation rel, const char *colName, colName))); /* Don't drop columns used in the partition key */ - if (is_partition_attr(rel, attnum, &is_expr)) + if (has_partition_attrs(rel, + bms_make_singleton(attnum - FirstLowInvalidHeapAttributeNumber), + &is_expr)) { if (!is_expr) ereport(ERROR, @@ -6689,6 +6995,8 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel, address = DefineIndex(RelationGetRelid(rel), stmt, InvalidOid, /* no predefined OID */ + InvalidOid, /* no parent index */ + InvalidOid, /* no parent constraint */ true, /* is_alter_table */ check_rights, false, /* check_not_in_use - we did it already */ @@ -6728,11 +7036,21 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel, char *constraintName; char constraintType; ObjectAddress address; + bits16 flags; Assert(IsA(stmt, IndexStmt)); Assert(OidIsValid(index_oid)); Assert(stmt->isconstraint); + /* + * Doing this on partitioned tables is not a simple feature to implement, + * so let's punt for now. + */ + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ALTER TABLE / ADD CONSTRAINT USING INDEX is not supported on partitioned tables"))); + indexRel = index_open(index_oid, AccessShareLock); indexName = pstrdup(RelationGetRelationName(indexRel)); @@ -6763,7 +7081,7 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel, /* Extra checks needed if making primary key */ if (stmt->primary) - index_check_primary_key(rel, indexInfo, true); + index_check_primary_key(rel, indexInfo, true, stmt); /* Note we currently don't support EXCLUSION constraints here */ if (stmt->primary) @@ -6772,16 +7090,19 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel, constraintType = CONSTRAINT_UNIQUE; /* Create the catalog entries for the constraint */ + flags = INDEX_CONSTR_CREATE_UPDATE_INDEX | + INDEX_CONSTR_CREATE_REMOVE_OLD_DEPS | + (stmt->initdeferred ? INDEX_CONSTR_CREATE_INIT_DEFERRED : 0) | + (stmt->deferrable ? INDEX_CONSTR_CREATE_DEFERRABLE : 0) | + (stmt->primary ? INDEX_CONSTR_CREATE_MARK_AS_PRIMARY : 0); + address = index_constraint_create(rel, index_oid, + InvalidOid, indexInfo, constraintName, constraintType, - stmt->deferrable, - stmt->initdeferred, - stmt->primary, - true, /* update pg_index */ - true, /* remove old dependencies */ + flags, allowSystemTableMods, false); /* is_internal */ @@ -6831,7 +7152,6 @@ ATExecAddConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, { if (ConstraintNameIsUsed(CONSTRAINT_RELATION, RelationGetRelid(rel), - RelationGetNamespace(rel), newConstraint->conname)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), @@ -6847,7 +7167,9 @@ ATExecAddConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, RelationGetNamespace(rel), NIL); - address = ATAddForeignKeyConstraint(tab, rel, newConstraint, + address = ATAddForeignKeyConstraint(wqueue, tab, rel, + newConstraint, InvalidOid, + recurse, false, lockmode); break; @@ -6903,7 +7225,9 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, list_make1(copyObject(constr)), recursing | is_readd, /* allow_merge */ !recursing, /* is_local */ - is_readd); /* is_internal */ + is_readd, /* is_internal */ + NULL); /* queryString not available + * here */ /* we don't expect more than one constraint here */ Assert(list_length(newcons) <= 1); @@ -7002,8 +7326,9 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, * We do permissions checks here, however. */ static ObjectAddress -ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, - Constraint *fkconstraint, LOCKMODE lockmode) +ATAddForeignKeyConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, + Constraint *fkconstraint, Oid parentConstr, + bool recurse, bool recursing, LOCKMODE lockmode) { Relation pkrel; int16 pkattnum[INDEX_MAX_KEYS]; @@ -7042,6 +7367,23 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, errmsg("cannot reference partitioned table \"%s\"", RelationGetRelationName(pkrel)))); + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + { + if (!recurse) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot use ONLY for foreign key on partitioned table \"%s\" referencing relation \"%s\"", + RelationGetRelationName(rel), + RelationGetRelationName(pkrel)))); + if (fkconstraint->skip_validation && !fkconstraint->initially_valid) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot add NOT VALID foreign key on partitioned table \"%s\" referencing relation \"%s\"", + RelationGetRelationName(rel), + RelationGetRelationName(pkrel)), + errdetail("This feature is not yet supported on partitioned tables."))); + } + if (pkrel->rd_rel->relkind != RELKIND_RELATION) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), @@ -7281,13 +7623,15 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, CoercionPathType new_pathtype; Oid old_castfunc; Oid new_castfunc; + Form_pg_attribute attr = TupleDescAttr(tab->oldDesc, + fkattnum[i] - 1); /* * Identify coercion pathways from each of the old and new FK-side * column types to the right (foreign) operand type of the pfeqop. * We may assume that pg_constraint.conkey is not changing. */ - old_fktype = tab->oldDesc->attrs[fkattnum[i] - 1]->atttypid; + old_fktype = attr->atttypid; new_fktype = fktype; old_pathtype = findFkeyCast(pfeqop_right, old_fktype, &old_castfunc); @@ -7347,9 +7691,11 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, fkconstraint->deferrable, fkconstraint->initdeferred, fkconstraint->initially_valid, + parentConstr, RelationGetRelid(rel), fkattnum, numfks, + numfks, InvalidOid, /* not a domain constraint */ indexOid, RelationGetRelid(pkrel), @@ -7364,7 +7710,6 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, NULL, /* no exclusion constraint */ NULL, /* no check constraint */ NULL, - NULL, true, /* islocal */ 0, /* inhcount */ true, /* isnoinherit */ @@ -7372,10 +7717,12 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, ObjectAddressSet(address, ConstraintRelationId, constrOid); /* - * Create the triggers that will enforce the constraint. + * Create the triggers that will enforce the constraint. We only want the + * action triggers to appear for the parent partitioned relation, even + * though the constraints also exist below. */ createForeignKeyTriggers(rel, RelationGetRelid(pkrel), fkconstraint, - constrOid, indexOid); + constrOid, indexOid, !recursing); /* * Tell Phase 3 to check that the constraint is satisfied by existing @@ -7399,6 +7746,40 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, tab->constraints = lappend(tab->constraints, newcon); } + /* + * When called on a partitioned table, recurse to create the constraint on + * the partitions also. + */ + if (recurse && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + { + PartitionDesc partdesc; + + partdesc = RelationGetPartitionDesc(rel); + + for (i = 0; i < partdesc->nparts; i++) + { + Oid partitionId = partdesc->oids[i]; + Relation partition = heap_open(partitionId, lockmode); + AlteredTableInfo *childtab; + ObjectAddress childAddr; + + CheckTableNotInUse(partition, "ALTER TABLE"); + + /* Find or create work queue entry for this table */ + childtab = ATGetQueueEntry(wqueue, partition); + + childAddr = + ATAddForeignKeyConstraint(wqueue, childtab, partition, + fkconstraint, constrOid, + recurse, true, lockmode); + + /* Record this constraint as dependent on the parent one */ + recordDependencyOn(&childAddr, &address, DEPENDENCY_INTERNAL_AUTO); + + heap_close(partition, NoLock); + } + } + /* * Close pk table, but keep lock until we've committed. */ @@ -7427,10 +7808,9 @@ ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd, Constraint *cmdcon; Relation conrel; SysScanDesc scan; - ScanKeyData key; + ScanKeyData skey[3]; HeapTuple contuple; - Form_pg_constraint currcon = NULL; - bool found = false; + Form_pg_constraint currcon; ObjectAddress address; cmdcon = castNode(Constraint, cmd->def); @@ -7440,29 +7820,29 @@ ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd, /* * Find and check the target constraint */ - ScanKeyInit(&key, + ScanKeyInit(&skey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - scan = systable_beginscan(conrel, ConstraintRelidIndexId, - true, NULL, 1, &key); - - while (HeapTupleIsValid(contuple = systable_getnext(scan))) - { - currcon = (Form_pg_constraint) GETSTRUCT(contuple); - if (strcmp(NameStr(currcon->conname), cmdcon->conname) == 0) - { - found = true; - break; - } - } + ScanKeyInit(&skey[1], + Anum_pg_constraint_contypid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(InvalidOid)); + ScanKeyInit(&skey[2], + Anum_pg_constraint_conname, + BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum(cmdcon->conname)); + scan = systable_beginscan(conrel, ConstraintRelidTypidNameIndexId, + true, NULL, 3, skey); - if (!found) + /* There can be at most one matching row */ + if (!HeapTupleIsValid(contuple = systable_getnext(scan))) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("constraint \"%s\" of relation \"%s\" does not exist", cmdcon->conname, RelationGetRelationName(rel)))); + currcon = (Form_pg_constraint) GETSTRUCT(contuple); if (currcon->contype != CONSTRAINT_FOREIGN) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), @@ -7595,10 +7975,9 @@ ATExecValidateConstraint(Relation rel, char *constrName, bool recurse, { Relation conrel; SysScanDesc scan; - ScanKeyData key; + ScanKeyData skey[3]; HeapTuple tuple; - Form_pg_constraint con = NULL; - bool found = false; + Form_pg_constraint con; ObjectAddress address; conrel = heap_open(ConstraintRelationId, RowExclusiveLock); @@ -7606,29 +7985,29 @@ ATExecValidateConstraint(Relation rel, char *constrName, bool recurse, /* * Find and check the target constraint */ - ScanKeyInit(&key, + ScanKeyInit(&skey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - scan = systable_beginscan(conrel, ConstraintRelidIndexId, - true, NULL, 1, &key); - - while (HeapTupleIsValid(tuple = systable_getnext(scan))) - { - con = (Form_pg_constraint) GETSTRUCT(tuple); - if (strcmp(NameStr(con->conname), constrName) == 0) - { - found = true; - break; - } - } + ScanKeyInit(&skey[1], + Anum_pg_constraint_contypid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(InvalidOid)); + ScanKeyInit(&skey[2], + Anum_pg_constraint_conname, + BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum(constrName)); + scan = systable_beginscan(conrel, ConstraintRelidTypidNameIndexId, + true, NULL, 3, skey); - if (!found) + /* There can be at most one matching row */ + if (!HeapTupleIsValid(tuple = systable_getnext(scan))) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("constraint \"%s\" of relation \"%s\" does not exist", constrName, RelationGetRelationName(rel)))); + con = (Form_pg_constraint) GETSTRUCT(tuple); if (con->contype != CONSTRAINT_FOREIGN && con->contype != CONSTRAINT_CHECK) ereport(ERROR, @@ -7661,8 +8040,8 @@ ATExecValidateConstraint(Relation rel, char *constrName, bool recurse, heap_close(refrel, NoLock); /* - * Foreign keys do not inherit, so we purposely ignore the - * recursion bit here + * We disallow creating invalid foreign keys to or from + * partitioned tables, so ignoring the recursion bit is okay. */ } else if (con->contype == CONSTRAINT_CHECK) @@ -7872,7 +8251,7 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid, * assume a primary key cannot have expressional elements) */ *attnamelist = NIL; - for (i = 0; i < indexStruct->indnatts; i++) + for (i = 0; i < indexStruct->indnkeyatts; i++) { int pkattno = indexStruct->indkey.values[i]; @@ -7950,11 +8329,11 @@ transformFkeyCheckAttrs(Relation pkrel, * partial index; forget it if there are any expressions, too. Invalid * indexes are out as well. */ - if (indexStruct->indnatts == numattrs && + if (indexStruct->indnkeyatts == numattrs && indexStruct->indisunique && IndexIsValid(indexStruct) && - heap_attisnull(indexTuple, Anum_pg_index_indpred) && - heap_attisnull(indexTuple, Anum_pg_index_indexprs)) + heap_attisnull(indexTuple, Anum_pg_index_indpred, NULL) && + heap_attisnull(indexTuple, Anum_pg_index_indexprs, NULL)) { Datum indclassDatum; bool isnull; @@ -8084,7 +8463,7 @@ checkFkeyPermissions(Relation rel, int16 *attnums, int natts) aclresult = pg_attribute_aclcheck(RelationGetRelid(rel), attnums[i], roleid, ACL_REFERENCES); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_CLASS, + aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind), RelationGetRelationName(rel)); } } @@ -8155,7 +8534,7 @@ validateCheckConstraint(Relation rel, HeapTuple constrtup) while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { - ExecStoreTuple(tuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(tuple, slot, false); if (!ExecCheck(exprstate, econtext)) ereport(ERROR, @@ -8202,16 +8581,16 @@ validateForeignKeyConstraint(char *conname, trig.tgoid = InvalidOid; trig.tgname = conname; trig.tgenabled = TRIGGER_FIRES_ON_ORIGIN; - trig.tgisinternal = TRUE; + trig.tgisinternal = true; trig.tgconstrrelid = RelationGetRelid(pkrel); trig.tgconstrindid = pkindOid; trig.tgconstraint = constraintOid; - trig.tgdeferrable = FALSE; - trig.tginitdeferred = FALSE; + trig.tgdeferrable = false; + trig.tginitdeferred = false; /* we needn't fill in remaining fields */ /* - * See if we can do it with a single LEFT JOIN query. A FALSE result + * See if we can do it with a single LEFT JOIN query. A false result * indicates we must proceed with the fire-the-trigger method. */ if (RI_Initial_Check(&trig, rel, pkrel)) @@ -8301,30 +8680,23 @@ CreateFKCheckTrigger(Oid myRelOid, Oid refRelOid, Constraint *fkconstraint, fk_trigger->args = NIL; (void) CreateTrigger(fk_trigger, NULL, myRelOid, refRelOid, constraintOid, - indexOid, true); + indexOid, InvalidOid, InvalidOid, NULL, true, false); /* Make changes-so-far visible */ CommandCounterIncrement(); } /* - * Create the triggers that implement an FK constraint. - * - * NB: if you change any trigger properties here, see also - * ATExecAlterConstraint. + * createForeignKeyActionTriggers + * Create the referenced-side "action" triggers that implement a foreign + * key. */ static void -createForeignKeyTriggers(Relation rel, Oid refRelOid, Constraint *fkconstraint, - Oid constraintOid, Oid indexOid) +createForeignKeyActionTriggers(Relation rel, Oid refRelOid, Constraint *fkconstraint, + Oid constraintOid, Oid indexOid) { - Oid myRelOid; CreateTrigStmt *fk_trigger; - myRelOid = RelationGetRelid(rel); - - /* Make changes-so-far visible */ - CommandCounterIncrement(); - /* * Build and execute a CREATE CONSTRAINT TRIGGER statement for the ON * DELETE action on the referenced table. @@ -8374,8 +8746,9 @@ createForeignKeyTriggers(Relation rel, Oid refRelOid, Constraint *fkconstraint, } fk_trigger->args = NIL; - (void) CreateTrigger(fk_trigger, NULL, refRelOid, myRelOid, constraintOid, - indexOid, true); + (void) CreateTrigger(fk_trigger, NULL, refRelOid, RelationGetRelid(rel), + constraintOid, + indexOid, InvalidOid, InvalidOid, NULL, true, false); /* Make changes-so-far visible */ CommandCounterIncrement(); @@ -8429,22 +8802,58 @@ createForeignKeyTriggers(Relation rel, Oid refRelOid, Constraint *fkconstraint, } fk_trigger->args = NIL; - (void) CreateTrigger(fk_trigger, NULL, refRelOid, myRelOid, constraintOid, - indexOid, true); - - /* Make changes-so-far visible */ - CommandCounterIncrement(); + (void) CreateTrigger(fk_trigger, NULL, refRelOid, RelationGetRelid(rel), + constraintOid, + indexOid, InvalidOid, InvalidOid, NULL, true, false); +} - /* - * Build and execute CREATE CONSTRAINT TRIGGER statements for the CHECK - * action for both INSERTs and UPDATEs on the referencing table. - */ +/* + * createForeignKeyCheckTriggers + * Create the referencing-side "check" triggers that implement a foreign + * key. + */ +static void +createForeignKeyCheckTriggers(Oid myRelOid, Oid refRelOid, + Constraint *fkconstraint, Oid constraintOid, + Oid indexOid) +{ CreateFKCheckTrigger(myRelOid, refRelOid, fkconstraint, constraintOid, indexOid, true); CreateFKCheckTrigger(myRelOid, refRelOid, fkconstraint, constraintOid, indexOid, false); } +/* + * Create the triggers that implement an FK constraint. + * + * NB: if you change any trigger properties here, see also + * ATExecAlterConstraint. + */ +void +createForeignKeyTriggers(Relation rel, Oid refRelOid, Constraint *fkconstraint, + Oid constraintOid, Oid indexOid, bool create_action) +{ + /* + * For the referenced side, create action triggers, if requested. (If the + * referencing side is partitioned, there is still only one trigger, which + * runs on the referenced side and points to the top of the referencing + * hierarchy.) + */ + if (create_action) + createForeignKeyActionTriggers(rel, refRelOid, fkconstraint, constraintOid, + indexOid); + + /* + * For the referencing side, create the check triggers. We only need + * these on the partitions. + */ + if (rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) + createForeignKeyCheckTriggers(RelationGetRelid(rel), refRelOid, + fkconstraint, constraintOid, indexOid); + + CommandCounterIncrement(); +} + /* * ALTER TABLE DROP CONSTRAINT * @@ -8461,7 +8870,7 @@ ATExecDropConstraint(Relation rel, const char *constrName, Relation conrel; Form_pg_constraint con; SysScanDesc scan; - ScanKeyData key; + ScanKeyData skey[3]; HeapTuple tuple; bool found = false; bool is_no_inherit_constraint = false; @@ -8475,22 +8884,28 @@ ATExecDropConstraint(Relation rel, const char *constrName, /* * Find and drop the target constraint */ - ScanKeyInit(&key, + ScanKeyInit(&skey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); - scan = systable_beginscan(conrel, ConstraintRelidIndexId, - true, NULL, 1, &key); + ScanKeyInit(&skey[1], + Anum_pg_constraint_contypid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(InvalidOid)); + ScanKeyInit(&skey[2], + Anum_pg_constraint_conname, + BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum(constrName)); + scan = systable_beginscan(conrel, ConstraintRelidTypidNameIndexId, + true, NULL, 3, skey); - while (HeapTupleIsValid(tuple = systable_getnext(scan))) + /* There can be at most one matching row */ + if (HeapTupleIsValid(tuple = systable_getnext(scan))) { ObjectAddress conobj; con = (Form_pg_constraint) GETSTRUCT(tuple); - if (strcmp(NameStr(con->conname), constrName) != 0) - continue; - /* Don't drop inherited constraints */ if (con->coninhcount > 0 && !recursing) ereport(ERROR, @@ -8528,9 +8943,6 @@ ATExecDropConstraint(Relation rel, const char *constrName, performDeletion(&conobj, behavior, 0); found = true; - - /* constraint found and dropped -- no need to keep looping */ - break; } systable_endscan(scan); @@ -8586,27 +8998,23 @@ ATExecDropConstraint(Relation rel, const char *constrName, childrel = heap_open(childrelid, NoLock); CheckTableNotInUse(childrel, "ALTER TABLE"); - ScanKeyInit(&key, + ScanKeyInit(&skey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(childrelid)); - scan = systable_beginscan(conrel, ConstraintRelidIndexId, - true, NULL, 1, &key); - - /* scan for matching tuple - there should only be one */ - while (HeapTupleIsValid(tuple = systable_getnext(scan))) - { - con = (Form_pg_constraint) GETSTRUCT(tuple); - - /* Right now only CHECK constraints can be inherited */ - if (con->contype != CONSTRAINT_CHECK) - continue; - - if (strcmp(NameStr(con->conname), constrName) == 0) - break; - } - - if (!HeapTupleIsValid(tuple)) + ScanKeyInit(&skey[1], + Anum_pg_constraint_contypid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(InvalidOid)); + ScanKeyInit(&skey[2], + Anum_pg_constraint_conname, + BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum(constrName)); + scan = systable_beginscan(conrel, ConstraintRelidTypidNameIndexId, + true, NULL, 3, skey); + + /* There can be at most one matching row */ + if (!HeapTupleIsValid(tuple = systable_getnext(scan))) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("constraint \"%s\" of relation \"%s\" does not exist", @@ -8619,6 +9027,10 @@ ATExecDropConstraint(Relation rel, const char *constrName, con = (Form_pg_constraint) GETSTRUCT(copy_tuple); + /* Right now only CHECK constraints can be inherited */ + if (con->contype != CONSTRAINT_CHECK) + elog(ERROR, "inherited constraint is not a CHECK constraint"); + if (con->coninhcount <= 0) /* shouldn't happen */ elog(ERROR, "relation %u has non-inherited constraint \"%s\"", childrelid, constrName); @@ -8724,7 +9136,9 @@ ATPrepAlterColumnType(List **wqueue, colName))); /* Don't alter columns used in the partition key */ - if (is_partition_attr(rel, attnum, &is_expr)) + if (has_partition_attrs(rel, + bms_make_singleton(attnum - FirstLowInvalidHeapAttributeNumber), + &is_expr)) { if (!is_expr) ereport(ERROR, @@ -8958,7 +9372,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, ColumnDef *def = (ColumnDef *) cmd->def; TypeName *typeName = def->typeName; HeapTuple heapTup; - Form_pg_attribute attTup; + Form_pg_attribute attTup, + attOldTup; AttrNumber attnum; HeapTuple typeTuple; Form_pg_type tform; @@ -8984,10 +9399,11 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, colName, RelationGetRelationName(rel)))); attTup = (Form_pg_attribute) GETSTRUCT(heapTup); attnum = attTup->attnum; + attOldTup = TupleDescAttr(tab->oldDesc, attnum - 1); /* Check for multiple ALTER TYPE on same column --- can't cope */ - if (attTup->atttypid != tab->oldDesc->attrs[attnum - 1]->atttypid || - attTup->atttypmod != tab->oldDesc->attrs[attnum - 1]->atttypmod) + if (attTup->atttypid != attOldTup->atttypid || + attTup->atttypmod != attOldTup->atttypmod) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter type of column \"%s\" twice", @@ -9083,7 +9499,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, { char relKind = get_rel_relkind(foundObject.objectId); - if (relKind == RELKIND_INDEX) + if (relKind == RELKIND_INDEX || + relKind == RELKIND_PARTITIONED_INDEX) { Assert(foundObject.objectSubId == 0); if (!list_member_oid(tab->changedIndexOids, foundObject.objectId)) @@ -9118,33 +9535,12 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, { char *defstring = pg_get_constraintdef_command(foundObject.objectId); - /* - * Put NORMAL dependencies at the front of the list and - * AUTO dependencies at the back. This makes sure that - * foreign-key constraints depending on this column will - * be dropped before unique or primary-key constraints of - * the column; which we must have because the FK - * constraints depend on the indexes belonging to the - * unique constraints. - */ - if (foundDep->deptype == DEPENDENCY_NORMAL) - { - tab->changedConstraintOids = - lcons_oid(foundObject.objectId, - tab->changedConstraintOids); - tab->changedConstraintDefs = - lcons(defstring, - tab->changedConstraintDefs); - } - else - { - tab->changedConstraintOids = - lappend_oid(tab->changedConstraintOids, - foundObject.objectId); - tab->changedConstraintDefs = - lappend(tab->changedConstraintDefs, - defstring); - } + tab->changedConstraintOids = + lappend_oid(tab->changedConstraintOids, + foundObject.objectId); + tab->changedConstraintDefs = + lappend(tab->changedConstraintDefs, + defstring); } break; @@ -9355,7 +9751,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, true, true); - StoreAttrDefault(rel, attnum, defaultexpr, true); + StoreAttrDefault(rel, attnum, defaultexpr, true, false); } ObjectAddressSubSet(address, RelationRelationId, @@ -9484,16 +9880,23 @@ static void ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode) { ObjectAddress obj; + ObjectAddresses *objects; ListCell *def_item; ListCell *oid_item; + /* + * Collect all the constraints and indexes to drop so we can process them + * in a single call. That way we don't have to worry about dependencies + * among them. + */ + objects = new_object_addresses(); + /* * Re-parse the index and constraint definitions, and attach them to the * appropriate work queue entries. We do this before dropping because in * the case of a FOREIGN KEY constraint, we might not yet have exclusive * lock on the table the constraint is attached to, and we need to get - * that before dropping. It's safe because the parser won't actually look - * at the catalogs to detect the existing entry. + * that before reparsing/dropping. * * We can't rely on the output of deparsing to tell us which relation to * operate on, because concurrent activity might have made the name @@ -9509,17 +9912,30 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode) Form_pg_constraint con; Oid relid; Oid confrelid; + char contype; bool conislocal; tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(oldId)); if (!HeapTupleIsValid(tup)) /* should not happen */ elog(ERROR, "cache lookup failed for constraint %u", oldId); con = (Form_pg_constraint) GETSTRUCT(tup); - relid = con->conrelid; + if (OidIsValid(con->conrelid)) + relid = con->conrelid; + else + { + /* must be a domain constraint */ + relid = get_typ_typrelid(getBaseType(con->contypid)); + if (!OidIsValid(relid)) + elog(ERROR, "could not identify relation associated with constraint %u", oldId); + } confrelid = con->confrelid; + contype = con->contype; conislocal = con->conislocal; ReleaseSysCache(tup); + ObjectAddressSet(obj, ConstraintRelationId, oldId); + add_exact_object_address(&obj, objects); + /* * If the constraint is inherited (only), we don't want to inject a * new definition here; it'll get recreated when ATAddCheckConstraint @@ -9529,6 +9945,15 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode) if (!conislocal) continue; + /* + * When rebuilding an FK constraint that references the table we're + * modifying, we might not yet have any lock on the FK's table, so get + * one now. We'll need AccessExclusiveLock for the DROP CONSTRAINT + * step, so there's no value in asking for anything weaker. + */ + if (relid != tab->relid && contype == CONSTRAINT_FOREIGN) + LockRelationOid(relid, AccessExclusiveLock); + ATPostAlterTypeParse(oldId, relid, confrelid, (char *) lfirst(def_item), wqueue, lockmode, tab->rewrite); @@ -9543,31 +9968,18 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode) ATPostAlterTypeParse(oldId, relid, InvalidOid, (char *) lfirst(def_item), wqueue, lockmode, tab->rewrite); + + ObjectAddressSet(obj, RelationRelationId, oldId); + add_exact_object_address(&obj, objects); } /* - * Now we can drop the existing constraints and indexes --- constraints - * first, since some of them might depend on the indexes. In fact, we - * have to delete FOREIGN KEY constraints before UNIQUE constraints, but - * we already ordered the constraint list to ensure that would happen. It - * should be okay to use DROP_RESTRICT here, since nothing else should be - * depending on these objects. + * It should be okay to use DROP_RESTRICT here, since nothing else should + * be depending on these objects. */ - foreach(oid_item, tab->changedConstraintOids) - { - obj.classId = ConstraintRelationId; - obj.objectId = lfirst_oid(oid_item); - obj.objectSubId = 0; - performDeletion(&obj, DROP_RESTRICT, PERFORM_DELETION_INTERNAL); - } + performMultipleDeletions(objects, DROP_RESTRICT, PERFORM_DELETION_INTERNAL); - foreach(oid_item, tab->changedIndexOids) - { - obj.classId = RelationRelationId; - obj.objectId = lfirst_oid(oid_item); - obj.objectSubId = 0; - performDeletion(&obj, DROP_RESTRICT, PERFORM_DELETION_INTERNAL); - } + free_object_addresses(objects); /* * The objects will get recreated during subsequent passes over the work @@ -9652,7 +10064,7 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, foreach(lcmd, stmt->cmds) { - AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); + AlterTableCmd *cmd = castNode(AlterTableCmd, lfirst(lcmd)); if (cmd->subtype == AT_AddIndex) { @@ -9676,13 +10088,14 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, RebuildConstraintComment(tab, AT_PASS_OLD_INDEX, oldId, - rel, indstmt->idxname); + rel, + NIL, + indstmt->idxname); } else if (cmd->subtype == AT_AddConstraint) { - Constraint *con; + Constraint *con = castNode(Constraint, cmd->def); - con = castNode(Constraint, cmd->def); con->old_pktable_oid = refRelId; /* rewriting neither side of a FK */ if (con->contype == CONSTR_FOREIGN && @@ -9696,13 +10109,41 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, RebuildConstraintComment(tab, AT_PASS_OLD_CONSTR, oldId, - rel, con->conname); + rel, + NIL, + con->conname); } else elog(ERROR, "unexpected statement subtype: %d", (int) cmd->subtype); } } + else if (IsA(stm, AlterDomainStmt)) + { + AlterDomainStmt *stmt = (AlterDomainStmt *) stm; + + if (stmt->subtype == 'C') /* ADD CONSTRAINT */ + { + Constraint *con = castNode(Constraint, stmt->def); + AlterTableCmd *cmd = makeNode(AlterTableCmd); + + cmd->subtype = AT_ReAddDomainConstraint; + cmd->def = (Node *) stmt; + tab->subcmds[AT_PASS_OLD_CONSTR] = + lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd); + + /* recreate any comment on the constraint */ + RebuildConstraintComment(tab, + AT_PASS_OLD_CONSTR, + oldId, + NULL, + stmt->typeName, + con->conname); + } + else + elog(ERROR, "unexpected statement subtype: %d", + (int) stmt->subtype); + } else elog(ERROR, "unexpected statement type: %d", (int) nodeTag(stm)); @@ -9712,12 +10153,19 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, } /* - * Subroutine for ATPostAlterTypeParse() to recreate a comment entry for - * a constraint that is being re-added. + * Subroutine for ATPostAlterTypeParse() to recreate any existing comment + * for a table or domain constraint that is being rebuilt. + * + * objid is the OID of the constraint. + * Pass "rel" for a table constraint, or "domname" (domain's qualified name + * as a string list) for a domain constraint. + * (We could dig that info, as well as the conname, out of the pg_constraint + * entry; but callers already have them so might as well pass them.) */ static void RebuildConstraintComment(AlteredTableInfo *tab, int pass, Oid objid, - Relation rel, char *conname) + Relation rel, List *domname, + const char *conname) { CommentStmt *cmd; char *comment_str; @@ -9728,12 +10176,23 @@ RebuildConstraintComment(AlteredTableInfo *tab, int pass, Oid objid, if (comment_str == NULL) return; - /* Build node CommentStmt */ + /* Build CommentStmt node, copying all input data for safety */ cmd = makeNode(CommentStmt); - cmd->objtype = OBJECT_TABCONSTRAINT; - cmd->object = (Node *) list_make3(makeString(get_namespace_name(RelationGetNamespace(rel))), - makeString(pstrdup(RelationGetRelationName(rel))), - makeString(pstrdup(conname))); + if (rel) + { + cmd->objtype = OBJECT_TABCONSTRAINT; + cmd->object = (Node *) + list_make3(makeString(get_namespace_name(RelationGetNamespace(rel))), + makeString(pstrdup(RelationGetRelationName(rel))), + makeString(pstrdup(conname))); + } + else + { + cmd->objtype = OBJECT_DOMCONSTRAINT; + cmd->object = (Node *) + list_make2(makeTypeNameFromNameList(copyObject(domname)), + makeString(pstrdup(conname))); + } cmd->comment = comment_str; /* Append it to list of commands */ @@ -9871,6 +10330,15 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock newOwnerId = tuple_class->relowner; } break; + case RELKIND_PARTITIONED_INDEX: + if (recursing) + break; + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot change owner of index \"%s\"", + NameStr(tuple_class->relname)), + errhint("Change the ownership of the index's table, instead."))); + break; case RELKIND_SEQUENCE: if (!recursing && tuple_class->relowner != newOwnerId) @@ -9935,7 +10403,7 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock /* Otherwise, must be owner of the existing object */ if (!pg_class_ownercheck(relationOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relationOid)), RelationGetRelationName(target_rel)); /* Must be able to become new owner */ @@ -9945,7 +10413,7 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock aclresult = pg_namespace_aclcheck(namespaceOid, newOwnerId, ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceOid)); } } @@ -9992,6 +10460,7 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock */ if (tuple_class->relkind != RELKIND_COMPOSITE_TYPE && tuple_class->relkind != RELKIND_INDEX && + tuple_class->relkind != RELKIND_PARTITIONED_INDEX && tuple_class->relkind != RELKIND_TOASTVALUE) changeDependencyOnOwner(RelationRelationId, relationOid, newOwnerId); @@ -9999,7 +10468,8 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock /* * Also change the ownership of the table's row type, if it has one */ - if (tuple_class->relkind != RELKIND_INDEX) + if (tuple_class->relkind != RELKIND_INDEX && + tuple_class->relkind != RELKIND_PARTITIONED_INDEX) AlterTypeOwnerInternal(tuple_class->reltype, newOwnerId); /* @@ -10008,6 +10478,7 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock * relation, as well as its toast table (if it has one). */ if (tuple_class->relkind == RELKIND_RELATION || + tuple_class->relkind == RELKIND_PARTITIONED_TABLE || tuple_class->relkind == RELKIND_MATVIEW || tuple_class->relkind == RELKIND_TOASTVALUE) { @@ -10024,17 +10495,13 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock list_free(index_oid_list); } - if (tuple_class->relkind == RELKIND_RELATION || - tuple_class->relkind == RELKIND_MATVIEW) - { - /* If it has a toast table, recurse to change its ownership */ - if (tuple_class->reltoastrelid != InvalidOid) - ATExecChangeOwner(tuple_class->reltoastrelid, newOwnerId, - true, lockmode); + /* If it has a toast table, recurse to change its ownership */ + if (tuple_class->reltoastrelid != InvalidOid) + ATExecChangeOwner(tuple_class->reltoastrelid, newOwnerId, + true, lockmode); - /* If it has dependent sequences, recurse to change them too */ - change_owner_recurse_to_sequences(relationOid, newOwnerId, lockmode); - } + /* If it has dependent sequences, recurse to change them too */ + change_owner_recurse_to_sequences(relationOid, newOwnerId, lockmode); } InvokeObjectPostAlterHook(RelationRelationId, relationOid, 0); @@ -10226,7 +10693,7 @@ ATExecDropCluster(Relation rel, LOCKMODE lockmode) * ALTER TABLE SET TABLESPACE */ static void -ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename, LOCKMODE lockmode) +ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, const char *tablespacename, LOCKMODE lockmode) { Oid tablespaceId; @@ -10240,7 +10707,7 @@ ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename, L aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_TABLESPACE, tablespacename); + aclcheck_error(aclresult, OBJECT_TABLESPACE, tablespacename); } /* Save info for Phase 3 to do the real work */ @@ -10316,6 +10783,7 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation, (void) view_reloptions(newOptions, true); break; case RELKIND_INDEX: + case RELKIND_PARTITIONED_INDEX: (void) index_reloptions(rel->rd_amroutine->amoptions, newOptions, true); break; default: @@ -10338,7 +10806,7 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation, { DefElem *defel = (DefElem *) lfirst(cell); - if (pg_strcasecmp(defel->defname, "check_option") == 0) + if (strcmp(defel->defname, "check_option") == 0) check_option = true; } @@ -10355,7 +10823,7 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation, ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("WITH CHECK OPTION is supported only on automatically updatable views"), - errhint("%s", view_updatable_error))); + errhint("%s", _(view_updatable_error)))); } } @@ -10619,30 +11087,79 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode) } /* - * Alter Table ALL ... SET TABLESPACE - * - * Allows a user to move all objects of some type in a given tablespace in the - * current database to another tablespace. Objects can be chosen based on the - * owner of the object also, to allow users to move only their objects. - * The user must have CREATE rights on the new tablespace, as usual. The main - * permissions handling is done by the lower-level table move function. - * - * All to-be-moved objects are locked first. If NOWAIT is specified and the - * lock can't be acquired then we ereport(ERROR). + * Special handling of ALTER TABLE SET TABLESPACE for partitioned indexes, + * which have no storage (so not handled in Phase 3 like other relation types) */ -Oid -AlterTableMoveAll(AlterTableMoveAllStmt *stmt) +static void +ATExecPartedIdxSetTableSpace(Relation rel, Oid newTableSpace) { - List *relations = NIL; - ListCell *l; - ScanKeyData key[1]; - Relation rel; - HeapScanDesc scan; HeapTuple tuple; - Oid orig_tablespaceoid; - Oid new_tablespaceoid; - List *role_oids = roleSpecsToIds(stmt->roles); - + Oid oldTableSpace; + Relation pg_class; + Form_pg_class rd_rel; + Oid indexOid = RelationGetRelid(rel); + + Assert(rel->rd_rel->relkind == RELKIND_PARTITIONED_INDEX); + + /* + * No work if no change in tablespace. + */ + oldTableSpace = rel->rd_rel->reltablespace; + if (newTableSpace == oldTableSpace || + (newTableSpace == MyDatabaseTableSpace && oldTableSpace == 0)) + { + InvokeObjectPostAlterHook(RelationRelationId, + indexOid, 0); + return; + } + + /* Get a modifiable copy of the relation's pg_class row */ + pg_class = heap_open(RelationRelationId, RowExclusiveLock); + + tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(indexOid)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for relation %u", indexOid); + rd_rel = (Form_pg_class) GETSTRUCT(tuple); + + /* update the pg_class row */ + rd_rel->reltablespace = (newTableSpace == MyDatabaseTableSpace) ? InvalidOid : newTableSpace; + CatalogTupleUpdate(pg_class, &tuple->t_self, tuple); + + InvokeObjectPostAlterHook(RelationRelationId, indexOid, 0); + + heap_freetuple(tuple); + + heap_close(pg_class, RowExclusiveLock); + + /* Make sure the reltablespace change is visible */ + CommandCounterIncrement(); +} + +/* + * Alter Table ALL ... SET TABLESPACE + * + * Allows a user to move all objects of some type in a given tablespace in the + * current database to another tablespace. Objects can be chosen based on the + * owner of the object also, to allow users to move only their objects. + * The user must have CREATE rights on the new tablespace, as usual. The main + * permissions handling is done by the lower-level table move function. + * + * All to-be-moved objects are locked first. If NOWAIT is specified and the + * lock can't be acquired then we ereport(ERROR). + */ +Oid +AlterTableMoveAll(AlterTableMoveAllStmt *stmt) +{ + List *relations = NIL; + ListCell *l; + ScanKeyData key[1]; + Relation rel; + HeapScanDesc scan; + HeapTuple tuple; + Oid orig_tablespaceoid; + Oid new_tablespaceoid; + List *role_oids = roleSpecsToIds(stmt->roles); + /* Ensure we were not asked to move something we can't */ if (stmt->objtype != OBJECT_TABLE && stmt->objtype != OBJECT_INDEX && stmt->objtype != OBJECT_MATVIEW) @@ -10674,7 +11191,7 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt) aclresult = pg_tablespace_aclcheck(new_tablespaceoid, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_TABLESPACE, + aclcheck_error(aclresult, OBJECT_TABLESPACE, get_tablespace_name(new_tablespaceoid)); } @@ -10728,7 +11245,8 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt) relForm->relkind != RELKIND_RELATION && relForm->relkind != RELKIND_PARTITIONED_TABLE) || (stmt->objtype == OBJECT_INDEX && - relForm->relkind != RELKIND_INDEX) || + relForm->relkind != RELKIND_INDEX && + relForm->relkind != RELKIND_PARTITIONED_INDEX) || (stmt->objtype == OBJECT_MATVIEW && relForm->relkind != RELKIND_MATVIEW)) continue; @@ -10745,7 +11263,7 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt) * Caller must be considered an owner on the table to move it. */ if (!pg_class_ownercheck(relOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relOid)), NameStr(relForm->relname)); if (stmt->nowait && @@ -10799,21 +11317,14 @@ static void copy_relation_data(SMgrRelation src, SMgrRelation dst, ForkNumber forkNum, char relpersistence) { - char *buf; + PGAlignedBlock buf; Page page; bool use_wal; bool copying_initfork; BlockNumber nblocks; BlockNumber blkno; - /* - * palloc the buffer so that it's MAXALIGN'd. If it were just a local - * char[] array, the compiler might align it on any byte boundary, which - * can seriously hurt transfer speed to and from the kernel; not to - * mention possibly making log_newpage's accesses to the page header fail. - */ - buf = (char *) palloc(BLCKSZ); - page = (Page) buf; + page = (Page) buf.data; /* * The init fork for an unlogged relation in many respects has to be @@ -10837,7 +11348,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst, /* If we got a cancel signal during the copy of the data, quit */ CHECK_FOR_INTERRUPTS(); - smgrread(src, forkNum, blkno, buf); + smgrread(src, forkNum, blkno, buf.data); if (!PageIsVerified(page, blkno)) ereport(ERROR, @@ -10863,11 +11374,9 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst, * rel, because there's no need for smgr to schedule an fsync for this * write; we'll do it ourselves below. */ - smgrextend(dst, forkNum, blkno, buf, true); + smgrextend(dst, forkNum, blkno, buf.data, true); } - pfree(buf); - /* * If the rel is WAL-logged, must fsync before commit. We use heap_sync * to ensure that the toast table gets fsync'd too. (For a temp or @@ -10893,10 +11402,10 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst, * We just pass this off to trigger.c. */ static void -ATExecEnableDisableTrigger(Relation rel, char *trigname, +ATExecEnableDisableTrigger(Relation rel, const char *trigname, char fires_when, bool skip_system, LOCKMODE lockmode) { - EnableDisableTrigger(rel, trigname, fires_when, skip_system); + EnableDisableTrigger(rel, trigname, fires_when, skip_system, lockmode); } /* @@ -10905,7 +11414,7 @@ ATExecEnableDisableTrigger(Relation rel, char *trigname, * We just pass this off to rewriteDefine.c. */ static void -ATExecEnableDisableRule(Relation rel, char *rulename, +ATExecEnableDisableRule(Relation rel, const char *rulename, char fires_when, LOCKMODE lockmode) { EnableDisableRule(rel, rulename, fires_when); @@ -11204,7 +11713,8 @@ MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel) for (parent_attno = 1; parent_attno <= parent_natts; parent_attno++) { - Form_pg_attribute attribute = tupleDesc->attrs[parent_attno - 1]; + Form_pg_attribute attribute = TupleDescAttr(tupleDesc, + parent_attno - 1); char *attributeName = NameStr(attribute->attname); /* Ignore dropped columns in the parent. */ @@ -11354,7 +11864,7 @@ MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel) Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(parent_rel))); - parent_scan = systable_beginscan(catalog_relation, ConstraintRelidIndexId, + parent_scan = systable_beginscan(catalog_relation, ConstraintRelidTypidNameIndexId, true, NULL, 1, &parent_key); while (HeapTupleIsValid(parent_tuple = systable_getnext(parent_scan))) @@ -11377,7 +11887,7 @@ MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel) Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(child_rel))); - child_scan = systable_beginscan(catalog_relation, ConstraintRelidIndexId, + child_scan = systable_beginscan(catalog_relation, ConstraintRelidTypidNameIndexId, true, NULL, 1, &child_key); while (HeapTupleIsValid(child_tuple = systable_getnext(child_scan))) @@ -11488,12 +11998,12 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode) /* Off to RemoveInheritance() where most of the work happens */ RemoveInheritance(rel, parent_rel); - /* keep our lock on the parent relation until commit */ - heap_close(parent_rel, NoLock); - ObjectAddressSet(address, RelationRelationId, RelationGetRelid(parent_rel)); + /* keep our lock on the parent relation until commit */ + heap_close(parent_rel, NoLock); + return address; } @@ -11521,45 +12031,18 @@ RemoveInheritance(Relation child_rel, Relation parent_rel) Relation catalogRelation; SysScanDesc scan; ScanKeyData key[3]; - HeapTuple inheritsTuple, - attributeTuple, + HeapTuple attributeTuple, constraintTuple; List *connames; - bool found = false; + bool found; bool child_is_partition = false; /* If parent_rel is a partitioned table, child_rel must be a partition */ if (parent_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) child_is_partition = true; - /* - * Find and destroy the pg_inherits entry linking the two, or error out if - * there is none. - */ - catalogRelation = heap_open(InheritsRelationId, RowExclusiveLock); - ScanKeyInit(&key[0], - Anum_pg_inherits_inhrelid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(child_rel))); - scan = systable_beginscan(catalogRelation, InheritsRelidSeqnoIndexId, - true, NULL, 1, key); - - while (HeapTupleIsValid(inheritsTuple = systable_getnext(scan))) - { - Oid inhparent; - - inhparent = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhparent; - if (inhparent == RelationGetRelid(parent_rel)) - { - CatalogTupleDelete(catalogRelation, &inheritsTuple->t_self); - found = true; - break; - } - } - - systable_endscan(scan); - heap_close(catalogRelation, RowExclusiveLock); - + found = DeleteInheritsTuple(RelationGetRelid(child_rel), + RelationGetRelid(parent_rel)); if (!found) { if (child_is_partition) @@ -11625,7 +12108,7 @@ RemoveInheritance(Relation child_rel, Relation parent_rel) Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(parent_rel))); - scan = systable_beginscan(catalogRelation, ConstraintRelidIndexId, + scan = systable_beginscan(catalogRelation, ConstraintRelidTypidNameIndexId, true, NULL, 1, key); connames = NIL; @@ -11645,7 +12128,7 @@ RemoveInheritance(Relation child_rel, Relation parent_rel) Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(child_rel))); - scan = systable_beginscan(catalogRelation, ConstraintRelidIndexId, + scan = systable_beginscan(catalogRelation, ConstraintRelidTypidNameIndexId, true, NULL, 1, key); while (HeapTupleIsValid(constraintTuple = systable_getnext(scan))) @@ -11817,7 +12300,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode) *table_attname; /* Get the next non-dropped type attribute. */ - type_attr = typeTupleDesc->attrs[type_attno - 1]; + type_attr = TupleDescAttr(typeTupleDesc, type_attno - 1); if (type_attr->attisdropped) continue; type_attname = NameStr(type_attr->attname); @@ -11830,7 +12313,8 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode) (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("table is missing column \"%s\"", type_attname))); - table_attr = tableTupleDesc->attrs[table_attno++ - 1]; + table_attr = TupleDescAttr(tableTupleDesc, table_attno - 1); + table_attno++; } while (table_attr->attisdropped); table_attname = NameStr(table_attr->attname); @@ -11855,7 +12339,8 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode) /* Any remaining columns at the end of the table had better be dropped. */ for (; table_attno <= tableTupleDesc->natts; table_attno++) { - Form_pg_attribute table_attr = tableTupleDesc->attrs[table_attno - 1]; + Form_pg_attribute table_attr = TupleDescAttr(tableTupleDesc, + table_attno - 1); if (!table_attr->attisdropped) ereport(ERROR, @@ -12122,7 +12607,7 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode RelationGetRelationName(indexRel)))); /* Check index for nullable columns. */ - for (key = 0; key < indexRel->rd_index->indnatts; key++) + for (key = 0; key < IndexRelationGetNumberOfKeyAttributes(indexRel); key++) { int16 attno = indexRel->rd_index->indkey.values[key]; Form_pg_attribute attr; @@ -12142,7 +12627,7 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode errmsg("index \"%s\" cannot be used as replica identity because column %d is a system column", RelationGetRelationName(indexRel), attno))); - attr = rel->rd_att->attrs[attno - 1]; + attr = TupleDescAttr(rel->rd_att, attno - 1); if (!attr->attnotnull) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), @@ -12384,7 +12869,7 @@ ATPrepChangePersistence(Relation rel, bool toLogged) BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(rel))); scan = systable_beginscan(pg_constraint, - toLogged ? ConstraintRelidIndexId : InvalidOid, + toLogged ? ConstraintRelidTypidNameIndexId : InvalidOid, true, NULL, 1, skey); while (HeapTupleIsValid(tuple = systable_getnext(scan))) @@ -12452,7 +12937,7 @@ AlterTableNamespace(AlterObjectSchemaStmt *stmt, Oid *oldschema) ObjectAddress myself; relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock, - stmt->missing_ok, false, + stmt->missing_ok ? RVR_MISSING_OK : 0, RangeVarCallbackForAlterRelation, (void *) stmt); @@ -12808,6 +13293,7 @@ PreCommit_on_commit_actions(void) { ListCell *l; List *oids_to_truncate = NIL; + List *oids_to_drop = NIL; foreach(l, on_commits) { @@ -12834,36 +13320,66 @@ PreCommit_on_commit_actions(void) oids_to_truncate = lappend_oid(oids_to_truncate, oc->relid); break; case ONCOMMIT_DROP: - { - ObjectAddress object; - - object.classId = RelationRelationId; - object.objectId = oc->relid; - object.objectSubId = 0; - - /* - * Since this is an automatic drop, rather than one - * directly initiated by the user, we pass the - * PERFORM_DELETION_INTERNAL flag. - */ - performDeletion(&object, - DROP_CASCADE, PERFORM_DELETION_INTERNAL); - - /* - * Note that table deletion will call - * remove_on_commit_action, so the entry should get marked - * as deleted. - */ - Assert(oc->deleting_subid != InvalidSubTransactionId); - break; - } + oids_to_drop = lappend_oid(oids_to_drop, oc->relid); + break; } } + + /* + * Truncate relations before dropping so that all dependencies between + * relations are removed after they are worked on. Doing it like this + * might be a waste as it is possible that a relation being truncated will + * be dropped anyway due to its parent being dropped, but this makes the + * code more robust because of not having to re-check that the relation + * exists at truncation time. + */ if (oids_to_truncate != NIL) { heap_truncate(oids_to_truncate); CommandCounterIncrement(); /* XXX needed? */ } + if (oids_to_drop != NIL) + { + ObjectAddresses *targetObjects = new_object_addresses(); + ListCell *l; + + foreach(l, oids_to_drop) + { + ObjectAddress object; + + object.classId = RelationRelationId; + object.objectId = lfirst_oid(l); + object.objectSubId = 0; + + Assert(!object_address_present(&object, targetObjects)); + + add_exact_object_address(&object, targetObjects); + } + + /* + * Since this is an automatic drop, rather than one directly initiated + * by the user, we pass the PERFORM_DELETION_INTERNAL flag. + */ + performMultipleDeletions(targetObjects, DROP_CASCADE, + PERFORM_DELETION_INTERNAL | PERFORM_DELETION_QUIETLY); + +#ifdef USE_ASSERT_CHECKING + + /* + * Note that table deletion will call remove_on_commit_action, so the + * entry should get marked as deleted. + */ + foreach(l, on_commits) + { + OnCommitItem *oc = (OnCommitItem *) lfirst(l); + + if (oc->oncommit != ONCOMMIT_DROP) + continue; + + Assert(oc->deleting_subid != InvalidSubTransactionId); + } +#endif + } } /* @@ -12987,7 +13503,29 @@ RangeVarCallbackOwnsTable(const RangeVar *relation, /* Check permissions */ if (!pg_class_ownercheck(relId, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, relation->relname); + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relId)), relation->relname); +} + +/* + * Callback to RangeVarGetRelidExtended() for TRUNCATE processing. + */ +static void +RangeVarCallbackForTruncate(const RangeVar *relation, + Oid relId, Oid oldRelId, void *arg) +{ + HeapTuple tuple; + + /* Nothing to do if the relation was not found. */ + if (!OidIsValid(relId)) + return; + + tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relId)); + if (!HeapTupleIsValid(tuple)) /* should not happen */ + elog(ERROR, "cache lookup failed for relation %u", relId); + + truncate_check_rel(relId, (Form_pg_class) GETSTRUCT(tuple)); + + ReleaseSysCache(tuple); } /* @@ -13009,7 +13547,7 @@ RangeVarCallbackOwnsRelation(const RangeVar *relation, elog(ERROR, "cache lookup failed for relation %u", relId); if (!pg_class_ownercheck(relId, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relId)), relation->relname); if (!allowSystemTableMods && @@ -13045,7 +13583,7 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, /* Must own relation. */ if (!pg_class_ownercheck(relid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, rv->relname); + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relid)), rv->relname); /* No system table modifications unless explicitly allowed. */ if (!allowSystemTableMods && IsSystemClass(relid, classform)) @@ -13065,7 +13603,7 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, aclresult = pg_namespace_aclcheck(classform->relnamespace, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(classform->relnamespace)); reltype = ((RenameStmt *) stmt)->renameType; } @@ -13076,8 +13614,8 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, reltype = ((AlterTableStmt *) stmt)->relkind; else { - reltype = OBJECT_TABLE; /* placate compiler */ elog(ERROR, "unrecognized node type: %d", (int) nodeTag(stmt)); + reltype = OBJECT_TABLE; /* placate compiler */ } /* @@ -13112,7 +13650,8 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a composite type", rv->relname))); - if (reltype == OBJECT_INDEX && relkind != RELKIND_INDEX + if (reltype == OBJECT_INDEX && relkind != RELKIND_INDEX && + relkind != RELKIND_PARTITIONED_INDEX && !IsA(stmt, RenameStmt)) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), @@ -13167,7 +13706,9 @@ transformPartitionSpec(Relation rel, PartitionSpec *partspec, char *strategy) newspec->location = partspec->location; /* Parse partitioning strategy name */ - if (pg_strcasecmp(partspec->strategy, "list") == 0) + if (pg_strcasecmp(partspec->strategy, "hash") == 0) + *strategy = PARTITION_STRATEGY_HASH; + else if (pg_strcasecmp(partspec->strategy, "list") == 0) *strategy = PARTITION_STRATEGY_LIST; else if (pg_strcasecmp(partspec->strategy, "range") == 0) *strategy = PARTITION_STRATEGY_RANGE; @@ -13189,28 +13730,14 @@ transformPartitionSpec(Relation rel, PartitionSpec *partspec, char *strategy) * rangetable entry. We need a ParseState for transformExpr. */ pstate = make_parsestate(NULL); - rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true); + rte = addRangeTableEntryForRelation(pstate, rel, AccessShareLock, + NULL, false, true); addRTEtoQuery(pstate, rte, true, true, true); /* take care of any partition expressions */ foreach(l, partspec->partParams) { PartitionElem *pelem = castNode(PartitionElem, lfirst(l)); - ListCell *lc; - - /* Check for PARTITION BY ... (foo, foo) */ - foreach(lc, newspec->partParams) - { - PartitionElem *pparam = castNode(PartitionElem, lfirst(lc)); - - if (pelem->name && pparam->name && - strcmp(pelem->name, pparam->name) == 0) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" appears more than once in partition key", - pelem->name), - parser_errposition(pstate, pelem->location))); - } if (pelem->expr) { @@ -13236,11 +13763,13 @@ transformPartitionSpec(Relation rel, PartitionSpec *partspec, char *strategy) * Expressions in the PartitionElems must be parse-analyzed already. */ static void -ComputePartitionAttrs(Relation rel, List *partParams, AttrNumber *partattrs, - List **partexprs, Oid *partopclass, Oid *partcollation) +ComputePartitionAttrs(ParseState *pstate, Relation rel, List *partParams, AttrNumber *partattrs, + List **partexprs, Oid *partopclass, Oid *partcollation, + char strategy) { int attn; ListCell *lc; + Oid am_oid; attn = 0; foreach(lc, partParams) @@ -13261,14 +13790,16 @@ ComputePartitionAttrs(Relation rel, List *partParams, AttrNumber *partattrs, ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("column \"%s\" named in partition key does not exist", - pelem->name))); + pelem->name), + parser_errposition(pstate, pelem->location))); attform = (Form_pg_attribute) GETSTRUCT(atttuple); if (attform->attnum <= 0) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("cannot use system column \"%s\" in partition key", - pelem->name))); + pelem->name), + parser_errposition(pstate, pelem->location))); partattrs[attn] = attform->attnum; atttype = attform->atttypid; @@ -13400,30 +13931,216 @@ ComputePartitionAttrs(Relation rel, List *partParams, AttrNumber *partattrs, partcollation[attn] = attcollation; /* - * Identify a btree opclass to use. Currently, we use only btree - * operators, which seems enough for list and range partitioning. + * Identify the appropriate operator class. For list and range + * partitioning, we use a btree operator class; hash partitioning uses + * a hash operator class. */ + if (strategy == PARTITION_STRATEGY_HASH) + am_oid = HASH_AM_OID; + else + am_oid = BTREE_AM_OID; + if (!pelem->opclass) { - partopclass[attn] = GetDefaultOpClass(atttype, BTREE_AM_OID); + partopclass[attn] = GetDefaultOpClass(atttype, am_oid); if (!OidIsValid(partopclass[attn])) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("data type %s has no default btree operator class", - format_type_be(atttype)), - errhint("You must specify a btree operator class or define a default btree operator class for the data type."))); + { + if (strategy == PARTITION_STRATEGY_HASH) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("data type %s has no default hash operator class", + format_type_be(atttype)), + errhint("You must specify a hash operator class or define a default hash operator class for the data type."))); + else + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("data type %s has no default btree operator class", + format_type_be(atttype)), + errhint("You must specify a btree operator class or define a default btree operator class for the data type."))); + + } } else partopclass[attn] = ResolveOpClass(pelem->opclass, atttype, - "btree", - BTREE_AM_OID); + am_oid == HASH_AM_OID ? "hash" : "btree", + am_oid); attn++; } } +/* + * PartConstraintImpliedByRelConstraint + * Do scanrel's existing constraints imply the partition constraint? + * + * "Existing constraints" include its check constraints and column-level + * NOT NULL constraints. partConstraint describes the partition constraint, + * in implicit-AND form. + */ +bool +PartConstraintImpliedByRelConstraint(Relation scanrel, + List *partConstraint) +{ + List *existConstraint = NIL; + TupleConstr *constr = RelationGetDescr(scanrel)->constr; + int num_check, + i; + + if (constr && constr->has_not_null) + { + int natts = scanrel->rd_att->natts; + + for (i = 1; i <= natts; i++) + { + Form_pg_attribute att = TupleDescAttr(scanrel->rd_att, i - 1); + + if (att->attnotnull && !att->attisdropped) + { + NullTest *ntest = makeNode(NullTest); + + ntest->arg = (Expr *) makeVar(1, + i, + att->atttypid, + att->atttypmod, + att->attcollation, + 0); + ntest->nulltesttype = IS_NOT_NULL; + + /* + * argisrow=false is correct even for a composite column, + * because attnotnull does not represent a SQL-spec IS NOT + * NULL test in such a case, just IS DISTINCT FROM NULL. + */ + ntest->argisrow = false; + ntest->location = -1; + existConstraint = lappend(existConstraint, ntest); + } + } + } + + num_check = (constr != NULL) ? constr->num_check : 0; + for (i = 0; i < num_check; i++) + { + Node *cexpr; + + /* + * If this constraint hasn't been fully validated yet, we must ignore + * it here. + */ + if (!constr->check[i].ccvalid) + continue; + + cexpr = stringToNode(constr->check[i].ccbin); + + /* + * Run each expression through const-simplification and + * canonicalization. It is necessary, because we will be comparing it + * to similarly-processed partition constraint expressions, and may + * fail to detect valid matches without this. + */ + cexpr = eval_const_expressions(NULL, cexpr); + cexpr = (Node *) canonicalize_qual((Expr *) cexpr, true); + + existConstraint = list_concat(existConstraint, + make_ands_implicit((Expr *) cexpr)); + } + + /* + * Try to make the proof. Since we are comparing CHECK constraints, we + * need to use weak implication, i.e., we assume existConstraint is + * not-false and try to prove the same for partConstraint. + * + * Note that predicate_implied_by assumes its first argument is known + * immutable. That should always be true for partition constraints, so we + * don't test it here. + */ + return predicate_implied_by(partConstraint, existConstraint, true); +} + +/* + * QueuePartitionConstraintValidation + * + * Add an entry to wqueue to have the given partition constraint validated by + * Phase 3, for the given relation, and all its children. + * + * We first verify whether the given constraint is implied by pre-existing + * relation constraints; if it is, there's no need to scan the table to + * validate, so don't queue in that case. + */ +static void +QueuePartitionConstraintValidation(List **wqueue, Relation scanrel, + List *partConstraint, + bool validate_default) +{ + /* + * Based on the table's existing constraints, determine whether or not we + * may skip scanning the table. + */ + if (PartConstraintImpliedByRelConstraint(scanrel, partConstraint)) + { + if (!validate_default) + ereport(INFO, + (errmsg("partition constraint for table \"%s\" is implied by existing constraints", + RelationGetRelationName(scanrel)))); + else + ereport(INFO, + (errmsg("updated partition constraint for default partition \"%s\" is implied by existing constraints", + RelationGetRelationName(scanrel)))); + return; + } + + /* + * Constraints proved insufficient. For plain relations, queue a + * validation item now; for partitioned tables, recurse to process each + * partition. + */ + if (scanrel->rd_rel->relkind == RELKIND_RELATION) + { + AlteredTableInfo *tab; + + /* Grab a work queue entry. */ + tab = ATGetQueueEntry(wqueue, scanrel); + Assert(tab->partition_constraint == NULL); + tab->partition_constraint = (Expr *) linitial(partConstraint); + tab->validate_default = validate_default; + } + else if (scanrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + { + PartitionDesc partdesc = RelationGetPartitionDesc(scanrel); + int i; + + for (i = 0; i < partdesc->nparts; i++) + { + Relation part_rel; + bool found_whole_row; + List *thisPartConstraint; + + /* + * This is the minimum lock we need to prevent deadlocks. + */ + part_rel = heap_open(partdesc->oids[i], AccessExclusiveLock); + + /* + * Adjust the constraint for scanrel so that it matches this + * partition's attribute numbers. + */ + thisPartConstraint = + map_partition_varattnos(partConstraint, 1, + part_rel, scanrel, &found_whole_row); + /* There can never be a whole-row reference here */ + if (found_whole_row) + elog(ERROR, "unexpected whole-row reference found in partition constraint"); + + QueuePartitionConstraintValidation(wqueue, part_rel, + thisPartConstraint, + validate_default); + heap_close(part_rel, NoLock); /* keep lock till commit */ + } + } +} + /* * ALTER TABLE ATTACH PARTITION FOR VALUES * @@ -13435,21 +14152,36 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd) Relation attachrel, catalog; List *attachrel_children; - TupleConstr *attachrel_constr; - List *partConstraint, - *existConstraint; + List *partConstraint; SysScanDesc scan; ScanKeyData skey; AttrNumber attno; int natts; TupleDesc tupleDesc; - bool skip_validate = false; ObjectAddress address; const char *trigger_name; bool found_whole_row; + Oid defaultPartOid; + List *partBoundConstraint; + List *cloned; + ListCell *l; + + /* + * We must lock the default partition if one exists, because attaching a + * new partition will change its partition constraint. + */ + defaultPartOid = + get_default_oid_from_partdesc(RelationGetPartitionDesc(rel)); + if (OidIsValid(defaultPartOid)) + LockRelationOid(defaultPartOid, AccessExclusiveLock); attachrel = heap_openrv(cmd->name, AccessExclusiveLock); + /* + * XXX I think it'd be a good idea to grab locks on all tables referenced + * by FKs at this point also. + */ + /* * Must be owner of both parent and source table -- parent was checked by * ATSimplePermissions call in ATPrepCmd @@ -13504,7 +14236,7 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd) * Prevent circularity by seeing if rel is a partition of attachrel. (In * particular, this disallows making a rel a partition of itself.) * - * We do that by checking if rel is a member of the list of attachRel's + * We do that by checking if rel is a member of the list of attachrel's * partitions provided the latter is partitioned at all. We want to avoid * having to construct this list again, so we request the strongest lock * on all partitions. We need the strongest lock, because we may decide @@ -13525,6 +14257,14 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd) RelationGetRelationName(rel), RelationGetRelationName(attachrel)))); + /* If the parent is permanent, so must be all of its partitions. */ + if (rel->rd_rel->relpersistence != RELPERSISTENCE_TEMP && + attachrel->rd_rel->relpersistence == RELPERSISTENCE_TEMP) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("cannot attach a temporary relation as partition of permanent relation \"%s\"", + RelationGetRelationName(rel)))); + /* Temp parent cannot have a partition that is itself not a temp */ if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP && attachrel->rd_rel->relpersistence != RELPERSISTENCE_TEMP) @@ -13568,7 +14308,7 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd) natts = tupleDesc->natts; for (attno = 1; attno <= natts; attno++) { - Form_pg_attribute attribute = tupleDesc->attrs[attno - 1]; + Form_pg_attribute attribute = TupleDescAttr(tupleDesc, attno - 1); char *attributeName = NameStr(attribute->attname); /* Ignore dropped */ @@ -13584,7 +14324,7 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd) errmsg("table \"%s\" contains column \"%s\" not found in parent \"%s\"", RelationGetRelationName(attachrel), attributeName, RelationGetRelationName(rel)), - errdetail("New partition should contain only the columns present in parent."))); + errdetail("The new partition may contain only the columns present in parent."))); } /* @@ -13600,9 +14340,6 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd) trigger_name, RelationGetRelationName(attachrel)), errdetail("ROW triggers with transition tables are not supported on partitions"))); - /* OK to create inheritance. Rest of the checks performed there */ - CreateInheritance(attachrel, rel); - /* * Check that the new partition's bound is valid and does not overlap any * of existing partitions of the parent - note that it does not return on @@ -13611,181 +14348,392 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd) check_new_partition_bound(RelationGetRelationName(attachrel), rel, cmd->bound); + /* OK to create inheritance. Rest of the checks performed there */ + CreateInheritance(attachrel, rel); + /* Update the pg_class entry. */ StorePartitionBound(attachrel, rel, cmd->bound); - /* + /* Ensure there exists a correct set of indexes in the partition. */ + AttachPartitionEnsureIndexes(rel, attachrel); + + /* and triggers */ + CloneRowTriggersToPartition(rel, attachrel); + + /* + * Clone foreign key constraints, and setup for Phase 3 to verify them. + */ + cloned = NIL; + CloneForeignKeyConstraints(RelationGetRelid(rel), + RelationGetRelid(attachrel), &cloned); + foreach(l, cloned) + { + ClonedConstraint *clonedcon = lfirst(l); + NewConstraint *newcon; + Relation clonedrel; + AlteredTableInfo *parttab; + + clonedrel = relation_open(clonedcon->relid, NoLock); + parttab = ATGetQueueEntry(wqueue, clonedrel); + + newcon = (NewConstraint *) palloc0(sizeof(NewConstraint)); + newcon->name = clonedcon->constraint->conname; + newcon->contype = CONSTR_FOREIGN; + newcon->refrelid = clonedcon->refrelid; + newcon->refindid = clonedcon->conindid; + newcon->conid = clonedcon->conid; + newcon->qual = (Node *) clonedcon->constraint; + + parttab->constraints = lappend(parttab->constraints, newcon); + + relation_close(clonedrel, NoLock); + } + + /* * Generate partition constraint from the partition bound specification. * If the parent itself is a partition, make sure to include its * constraint as well. */ - partConstraint = list_concat(get_qual_from_partbound(attachrel, rel, - cmd->bound), + partBoundConstraint = get_qual_from_partbound(attachrel, rel, cmd->bound); + partConstraint = list_concat(partBoundConstraint, RelationGetPartitionQual(rel)); - partConstraint = (List *) eval_const_expressions(NULL, - (Node *) partConstraint); - partConstraint = (List *) canonicalize_qual((Expr *) partConstraint); - partConstraint = list_make1(make_ands_explicit(partConstraint)); + + /* Skip validation if there are no constraints to validate. */ + if (partConstraint) + { + /* + * Run the partition quals through const-simplification similar to + * check constraints. We skip canonicalize_qual, though, because + * partition quals should be in canonical form already. + */ + partConstraint = + (List *) eval_const_expressions(NULL, + (Node *) partConstraint); + + /* XXX this sure looks wrong */ + partConstraint = list_make1(make_ands_explicit(partConstraint)); + + /* + * Adjust the generated constraint to match this partition's attribute + * numbers. + */ + partConstraint = map_partition_varattnos(partConstraint, 1, attachrel, + rel, &found_whole_row); + /* There can never be a whole-row reference here */ + if (found_whole_row) + elog(ERROR, + "unexpected whole-row reference found in partition key"); + + /* Validate partition constraints against the table being attached. */ + QueuePartitionConstraintValidation(wqueue, attachrel, partConstraint, + false); + } /* - * Adjust the generated constraint to match this partition's attribute - * numbers. + * If we're attaching a partition other than the default partition and a + * default one exists, then that partition's partition constraint changes, + * so add an entry to the work queue to validate it, too. (We must not do + * this when the partition being attached is the default one; we already + * did it above!) */ - partConstraint = map_partition_varattnos(partConstraint, 1, attachrel, - rel, &found_whole_row); - /* There can never be a whole-row reference here */ - if (found_whole_row) - elog(ERROR, "unexpected whole-row reference found in partition key"); + if (OidIsValid(defaultPartOid)) + { + Relation defaultrel; + List *defPartConstraint; + + Assert(!cmd->bound->is_default); + + /* we already hold a lock on the default partition */ + defaultrel = heap_open(defaultPartOid, NoLock); + defPartConstraint = + get_proposed_default_constraint(partBoundConstraint); + QueuePartitionConstraintValidation(wqueue, defaultrel, + defPartConstraint, true); + + /* keep our lock until commit. */ + heap_close(defaultrel, NoLock); + } + + ObjectAddressSet(address, RelationRelationId, RelationGetRelid(attachrel)); + + /* keep our lock until commit */ + heap_close(attachrel, NoLock); + + return address; +} + +/* + * AttachPartitionEnsureIndexes + * subroutine for ATExecAttachPartition to create/match indexes + * + * Enforce the indexing rule for partitioned tables during ALTER TABLE / ATTACH + * PARTITION: every partition must have an index attached to each index on the + * partitioned table. + */ +static void +AttachPartitionEnsureIndexes(Relation rel, Relation attachrel) +{ + List *idxes; + List *attachRelIdxs; + Relation *attachrelIdxRels; + IndexInfo **attachInfos; + int i; + ListCell *cell; + MemoryContext cxt; + MemoryContext oldcxt; + + cxt = AllocSetContextCreate(CurrentMemoryContext, + "AttachPartitionEnsureIndexes", + ALLOCSET_DEFAULT_SIZES); + oldcxt = MemoryContextSwitchTo(cxt); + + idxes = RelationGetIndexList(rel); + attachRelIdxs = RelationGetIndexList(attachrel); + attachrelIdxRels = palloc(sizeof(Relation) * list_length(attachRelIdxs)); + attachInfos = palloc(sizeof(IndexInfo *) * list_length(attachRelIdxs)); + + /* Build arrays of all existing indexes and their IndexInfos */ + i = 0; + foreach(cell, attachRelIdxs) + { + Oid cldIdxId = lfirst_oid(cell); + + attachrelIdxRels[i] = index_open(cldIdxId, AccessShareLock); + attachInfos[i] = BuildIndexInfo(attachrelIdxRels[i]); + i++; + } /* - * Check if we can do away with having to scan the table being attached to - * validate the partition constraint, by *proving* that the existing - * constraints of the table *imply* the partition predicate. We include - * the table's check constraints and NOT NULL constraints in the list of - * clauses passed to predicate_implied_by(). - * - * There is a case in which we cannot rely on just the result of the - * proof. + * For each index on the partitioned table, find a matching one in the + * partition-to-be; if one is not found, create one. */ - attachrel_constr = tupleDesc->constr; - existConstraint = NIL; - if (attachrel_constr != NULL) + foreach(cell, idxes) { - int num_check = attachrel_constr->num_check; - int i; + Oid idx = lfirst_oid(cell); + Relation idxRel = index_open(idx, AccessShareLock); + IndexInfo *info; + AttrNumber *attmap; + bool found = false; + Oid constraintOid; - if (attachrel_constr->has_not_null) + /* + * Ignore indexes in the partitioned table other than partitioned + * indexes. + */ + if (idxRel->rd_rel->relkind != RELKIND_PARTITIONED_INDEX) { - int natts = attachrel->rd_att->natts; + index_close(idxRel, AccessShareLock); + continue; + } - for (i = 1; i <= natts; i++) - { - Form_pg_attribute att = attachrel->rd_att->attrs[i - 1]; + /* construct an indexinfo to compare existing indexes against */ + info = BuildIndexInfo(idxRel); + attmap = convert_tuples_by_name_map(RelationGetDescr(attachrel), + RelationGetDescr(rel), + gettext_noop("could not convert row type")); + constraintOid = get_relation_idx_constraint_oid(RelationGetRelid(rel), idx); - if (att->attnotnull && !att->attisdropped) - { - NullTest *ntest = makeNode(NullTest); + /* + * Scan the list of existing indexes in the partition-to-be, and mark + * the first matching, unattached one we find, if any, as partition of + * the parent index. If we find one, we're done. + */ + for (i = 0; i < list_length(attachRelIdxs); i++) + { + Oid cldIdxId = RelationGetRelid(attachrelIdxRels[i]); + Oid cldConstrOid = InvalidOid; - ntest->arg = (Expr *) makeVar(1, - i, - att->atttypid, - att->atttypmod, - att->attcollation, - 0); - ntest->nulltesttype = IS_NOT_NULL; + /* does this index have a parent? if so, can't use it */ + if (attachrelIdxRels[i]->rd_rel->relispartition) + continue; - /* - * argisrow=false is correct even for a composite column, - * because attnotnull does not represent a SQL-spec IS NOT - * NULL test in such a case, just IS DISTINCT FROM NULL. - */ - ntest->argisrow = false; - ntest->location = -1; - existConstraint = lappend(existConstraint, ntest); + if (CompareIndexInfo(attachInfos[i], info, + attachrelIdxRels[i]->rd_indcollation, + idxRel->rd_indcollation, + attachrelIdxRels[i]->rd_opfamily, + idxRel->rd_opfamily, + attmap, + RelationGetDescr(rel)->natts)) + { + /* + * If this index is being created in the parent because of a + * constraint, then the child needs to have a constraint also, + * so look for one. If there is no such constraint, this + * index is no good, so keep looking. + */ + if (OidIsValid(constraintOid)) + { + cldConstrOid = + get_relation_idx_constraint_oid(RelationGetRelid(attachrel), + cldIdxId); + /* no dice */ + if (!OidIsValid(cldConstrOid)) + continue; } + + /* bingo. */ + IndexSetParentIndex(attachrelIdxRels[i], idx); + if (OidIsValid(constraintOid)) + ConstraintSetParentConstraint(cldConstrOid, constraintOid); + update_relispartition(NULL, cldIdxId, true); + found = true; + break; } } - for (i = 0; i < num_check; i++) + /* + * If no suitable index was found in the partition-to-be, create one + * now. + */ + if (!found) { - Node *cexpr; - - /* - * If this constraint hasn't been fully validated yet, we must - * ignore it here. - */ - if (!attachrel_constr->check[i].ccvalid) - continue; + IndexStmt *stmt; + Oid constraintOid; + + stmt = generateClonedIndexStmt(NULL, RelationGetRelid(attachrel), + idxRel, attmap, + RelationGetDescr(rel)->natts, + &constraintOid); + DefineIndex(RelationGetRelid(attachrel), stmt, InvalidOid, + RelationGetRelid(idxRel), + constraintOid, + true, false, false, false, false); + } - cexpr = stringToNode(attachrel_constr->check[i].ccbin); + index_close(idxRel, AccessShareLock); + } - /* - * Run each expression through const-simplification and - * canonicalization. It is necessary, because we will be - * comparing it to similarly-processed qual clauses, and may fail - * to detect valid matches without this. - */ - cexpr = eval_const_expressions(NULL, cexpr); - cexpr = (Node *) canonicalize_qual((Expr *) cexpr); + /* Clean up. */ + for (i = 0; i < list_length(attachRelIdxs); i++) + index_close(attachrelIdxRels[i], AccessShareLock); + MemoryContextSwitchTo(oldcxt); + MemoryContextDelete(cxt); +} - existConstraint = list_concat(existConstraint, - make_ands_implicit((Expr *) cexpr)); - } +/* + * CloneRowTriggersToPartition + * subroutine for ATExecAttachPartition/DefineRelation to create row + * triggers on partitions + */ +static void +CloneRowTriggersToPartition(Relation parent, Relation partition) +{ + Relation pg_trigger; + ScanKeyData key; + SysScanDesc scan; + HeapTuple tuple; + MemoryContext oldcxt, + perTupCxt; - existConstraint = list_make1(make_ands_explicit(existConstraint)); + ScanKeyInit(&key, Anum_pg_trigger_tgrelid, BTEqualStrategyNumber, + F_OIDEQ, ObjectIdGetDatum(RelationGetRelid(parent))); + pg_trigger = heap_open(TriggerRelationId, RowExclusiveLock); + scan = systable_beginscan(pg_trigger, TriggerRelidNameIndexId, + true, NULL, 1, &key); - /* And away we go ... */ - if (predicate_implied_by(partConstraint, existConstraint, true)) - skip_validate = true; - } + perTupCxt = AllocSetContextCreate(CurrentMemoryContext, + "clone trig", ALLOCSET_SMALL_SIZES); + oldcxt = MemoryContextSwitchTo(perTupCxt); - if (skip_validate) - { - /* No need to scan the table after all. */ - ereport(INFO, - (errmsg("partition constraint for table \"%s\" is implied by existing constraints", - RelationGetRelationName(attachrel)))); - } - else + while (HeapTupleIsValid(tuple = systable_getnext(scan))) { - /* Constraints proved insufficient, so we need to scan the table. */ - ListCell *lc; + Form_pg_trigger trigForm; + CreateTrigStmt *trigStmt; + Node *qual = NULL; + Datum value; + bool isnull; + List *cols = NIL; + + trigForm = (Form_pg_trigger) GETSTRUCT(tuple); + + /* + * Ignore statement-level triggers; those are not cloned. + */ + if (!TRIGGER_FOR_ROW(trigForm->tgtype)) + continue; + + /* We don't clone internal triggers, either */ + if (trigForm->tgisinternal) + continue; - foreach(lc, attachrel_children) + /* + * Complain if we find an unexpected trigger type. + */ + if (!TRIGGER_FOR_AFTER(trigForm->tgtype)) + elog(ERROR, "unexpected trigger \"%s\" found", + NameStr(trigForm->tgname)); + + /* + * If there is a WHEN clause, generate a 'cooked' version of it that's + * appropriate for the partition. + */ + value = heap_getattr(tuple, Anum_pg_trigger_tgqual, + RelationGetDescr(pg_trigger), &isnull); + if (!isnull) { - AlteredTableInfo *tab; - Oid part_relid = lfirst_oid(lc); - Relation part_rel; - List *my_partconstr = partConstraint; + bool found_whole_row; + + qual = stringToNode(TextDatumGetCString(value)); + qual = (Node *) map_partition_varattnos((List *) qual, PRS2_OLD_VARNO, + partition, parent, + &found_whole_row); + if (found_whole_row) + elog(ERROR, "unexpected whole-row reference found in trigger WHEN clause"); + qual = (Node *) map_partition_varattnos((List *) qual, PRS2_NEW_VARNO, + partition, parent, + &found_whole_row); + if (found_whole_row) + elog(ERROR, "unexpected whole-row reference found in trigger WHEN clause"); + } - /* Lock already taken */ - if (part_relid != RelationGetRelid(attachrel)) - part_rel = heap_open(part_relid, NoLock); - else - part_rel = attachrel; + /* + * If there is a column list, transform it to a list of column names. + * Note we don't need to map this list in any way ... + */ + if (trigForm->tgattr.dim1 > 0) + { + int i; - /* - * Skip if the partition is itself a partitioned table. We can - * only ever scan RELKIND_RELATION relations. - */ - if (part_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + for (i = 0; i < trigForm->tgattr.dim1; i++) { - if (part_rel != attachrel) - heap_close(part_rel, NoLock); - continue; - } + Form_pg_attribute col; - if (part_rel != attachrel) - { - /* - * Adjust the constraint that we constructed above for - * attachRel so that it matches this partition's attribute - * numbers. - */ - my_partconstr = map_partition_varattnos(my_partconstr, 1, - part_rel, attachrel, - &found_whole_row); - /* There can never be a whole-row reference here */ - if (found_whole_row) - elog(ERROR, "unexpected whole-row reference found in partition key"); + col = TupleDescAttr(parent->rd_att, + trigForm->tgattr.values[i] - 1); + cols = lappend(cols, + makeString(pstrdup(NameStr(col->attname)))); } - - /* Grab a work queue entry. */ - tab = ATGetQueueEntry(wqueue, part_rel); - tab->partition_constraint = (Expr *) linitial(my_partconstr); - - /* keep our lock until commit */ - if (part_rel != attachrel) - heap_close(part_rel, NoLock); } - } - ObjectAddressSet(address, RelationRelationId, RelationGetRelid(attachrel)); + trigStmt = makeNode(CreateTrigStmt); + trigStmt->trigname = NameStr(trigForm->tgname); + trigStmt->relation = NULL; + trigStmt->funcname = NULL; /* passed separately */ + trigStmt->args = NULL; /* passed separately */ + trigStmt->row = true; + trigStmt->timing = trigForm->tgtype & TRIGGER_TYPE_TIMING_MASK; + trigStmt->events = trigForm->tgtype & TRIGGER_TYPE_EVENT_MASK; + trigStmt->columns = cols; + trigStmt->whenClause = NULL; /* passed separately */ + trigStmt->isconstraint = OidIsValid(trigForm->tgconstraint); + trigStmt->transitionRels = NIL; /* not supported at present */ + trigStmt->deferrable = trigForm->tgdeferrable; + trigStmt->initdeferred = trigForm->tginitdeferred; + trigStmt->constrrel = NULL; /* passed separately */ + + CreateTrigger(trigStmt, NULL, RelationGetRelid(partition), + trigForm->tgconstrrelid, InvalidOid, InvalidOid, + trigForm->tgfoid, HeapTupleGetOid(tuple), qual, + false, true); + + MemoryContextReset(perTupCxt); + } - /* keep our lock until commit */ - heap_close(attachrel, NoLock); + MemoryContextSwitchTo(oldcxt); + MemoryContextDelete(perTupCxt); - return address; + systable_endscan(scan); + heap_close(pg_trigger, RowExclusiveLock); } /* @@ -13801,10 +14749,22 @@ ATExecDetachPartition(Relation rel, RangeVar *name) HeapTuple tuple, newtuple; Datum new_val[Natts_pg_class]; - bool isnull, - new_null[Natts_pg_class], + bool new_null[Natts_pg_class], new_repl[Natts_pg_class]; ObjectAddress address; + Oid defaultPartOid; + List *indexes; + List *fks; + ListCell *cell; + + /* + * We must lock the default partition, because detaching this partition + * will change its partition constraint. + */ + defaultPartOid = + get_default_oid_from_partdesc(RelationGetPartitionDesc(rel)); + if (OidIsValid(defaultPartOid)) + LockRelationOid(defaultPartOid, AccessExclusiveLock); partRel = heap_openrv(name, AccessShareLock); @@ -13815,12 +14775,11 @@ ATExecDetachPartition(Relation rel, RangeVar *name) classRel = heap_open(RelationRelationId, RowExclusiveLock); tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(RelationGetRelid(partRel))); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for relation %u", + RelationGetRelid(partRel)); Assert(((Form_pg_class) GETSTRUCT(tuple))->relispartition); - (void) SysCacheGetAttr(RELOID, tuple, Anum_pg_class_relpartbound, - &isnull); - Assert(!isnull); - /* Clear relpartbound and reset relispartition */ memset(new_val, 0, sizeof(new_val)); memset(new_null, false, sizeof(new_null)); @@ -13834,8 +14793,60 @@ ATExecDetachPartition(Relation rel, RangeVar *name) ((Form_pg_class) GETSTRUCT(newtuple))->relispartition = false; CatalogTupleUpdate(classRel, &newtuple->t_self, newtuple); heap_freetuple(newtuple); + + if (OidIsValid(defaultPartOid)) + { + /* + * If the relation being detached is the default partition itself, + * remove it from the parent's pg_partitioned_table entry. + * + * If not, we must invalidate default partition's relcache entry, as + * in StorePartitionBound: its partition constraint depends on every + * other partition's partition constraint. + */ + if (RelationGetRelid(partRel) == defaultPartOid) + update_default_partition_oid(RelationGetRelid(rel), InvalidOid); + else + CacheInvalidateRelcacheByRelid(defaultPartOid); + } + + /* detach indexes too */ + indexes = RelationGetIndexList(partRel); + foreach(cell, indexes) + { + Oid idxid = lfirst_oid(cell); + Relation idx; + + if (!has_superclass(idxid)) + continue; + + Assert((IndexGetRelation(get_partition_parent(idxid), false) == + RelationGetRelid(rel))); + + idx = index_open(idxid, AccessExclusiveLock); + IndexSetParentIndex(idx, InvalidOid); + update_relispartition(classRel, idxid, false); + relation_close(idx, AccessExclusiveLock); + } heap_close(classRel, RowExclusiveLock); + /* Detach foreign keys */ + fks = copyObject(RelationGetFKeyList(partRel)); + foreach(cell, fks) + { + ForeignKeyCacheInfo *fk = lfirst(cell); + HeapTuple contup; + + contup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(fk->conoid)); + if (!contup) + elog(ERROR, "cache lookup failed for constraint %u", fk->conoid); + + ConstraintSetParentConstraint(fk->conoid, InvalidOid); + + ReleaseSysCache(contup); + } + list_free_deep(fks); + /* * Invalidate the parent's relcache so that the partition is no longer * included in its partition descriptor. @@ -13849,3 +14860,387 @@ ATExecDetachPartition(Relation rel, RangeVar *name) return address; } + +/* + * Before acquiring lock on an index, acquire the same lock on the owning + * table. + */ +struct AttachIndexCallbackState +{ + Oid partitionOid; + Oid parentTblOid; + bool lockedParentTbl; +}; + +static void +RangeVarCallbackForAttachIndex(const RangeVar *rv, Oid relOid, Oid oldRelOid, + void *arg) +{ + struct AttachIndexCallbackState *state; + Form_pg_class classform; + HeapTuple tuple; + + state = (struct AttachIndexCallbackState *) arg; + + if (!state->lockedParentTbl) + { + LockRelationOid(state->parentTblOid, AccessShareLock); + state->lockedParentTbl = true; + } + + /* + * If we previously locked some other heap, and the name we're looking up + * no longer refers to an index on that relation, release the now-useless + * lock. XXX maybe we should do *after* we verify whether the index does + * not actually belong to the same relation ... + */ + if (relOid != oldRelOid && OidIsValid(state->partitionOid)) + { + UnlockRelationOid(state->partitionOid, AccessShareLock); + state->partitionOid = InvalidOid; + } + + /* Didn't find a relation, so no need for locking or permission checks. */ + if (!OidIsValid(relOid)) + return; + + tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relOid)); + if (!HeapTupleIsValid(tuple)) + return; /* concurrently dropped, so nothing to do */ + classform = (Form_pg_class) GETSTRUCT(tuple); + if (classform->relkind != RELKIND_PARTITIONED_INDEX && + classform->relkind != RELKIND_INDEX) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("\"%s\" is not an index", rv->relname))); + ReleaseSysCache(tuple); + + /* + * Since we need only examine the heap's tupledesc, an access share lock + * on it (preventing any DDL) is sufficient. + */ + state->partitionOid = IndexGetRelation(relOid, false); + LockRelationOid(state->partitionOid, AccessShareLock); +} + +/* + * ALTER INDEX i1 ATTACH PARTITION i2 + */ +static ObjectAddress +ATExecAttachPartitionIdx(List **wqueue, Relation parentIdx, RangeVar *name) +{ + Relation partIdx; + Relation partTbl; + Relation parentTbl; + ObjectAddress address; + Oid partIdxId; + Oid currParent; + struct AttachIndexCallbackState state; + + /* + * We need to obtain lock on the index 'name' to modify it, but we also + * need to read its owning table's tuple descriptor -- so we need to lock + * both. To avoid deadlocks, obtain lock on the table before doing so on + * the index. Furthermore, we need to examine the parent table of the + * partition, so lock that one too. + */ + state.partitionOid = InvalidOid; + state.parentTblOid = parentIdx->rd_index->indrelid; + state.lockedParentTbl = false; + partIdxId = + RangeVarGetRelidExtended(name, AccessExclusiveLock, 0, + RangeVarCallbackForAttachIndex, + (void *) &state); + /* Not there? */ + if (!OidIsValid(partIdxId)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("index \"%s\" does not exist", name->relname))); + + /* no deadlock risk: RangeVarGetRelidExtended already acquired the lock */ + partIdx = relation_open(partIdxId, AccessExclusiveLock); + + /* we already hold locks on both tables, so this is safe: */ + parentTbl = relation_open(parentIdx->rd_index->indrelid, AccessShareLock); + partTbl = relation_open(partIdx->rd_index->indrelid, NoLock); + + ObjectAddressSet(address, RelationRelationId, RelationGetRelid(partIdx)); + + /* Silently do nothing if already in the right state */ + currParent = partIdx->rd_rel->relispartition ? + get_partition_parent(partIdxId) : InvalidOid; + if (currParent != RelationGetRelid(parentIdx)) + { + IndexInfo *childInfo; + IndexInfo *parentInfo; + AttrNumber *attmap; + bool found; + int i; + PartitionDesc partDesc; + Oid constraintOid, + cldConstrId = InvalidOid; + + /* + * If this partition already has an index attached, refuse the + * operation. + */ + refuseDupeIndexAttach(parentIdx, partIdx, partTbl); + + if (OidIsValid(currParent)) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot attach index \"%s\" as a partition of index \"%s\"", + RelationGetRelationName(partIdx), + RelationGetRelationName(parentIdx)), + errdetail("Index \"%s\" is already attached to another index.", + RelationGetRelationName(partIdx)))); + + /* Make sure it indexes a partition of the other index's table */ + partDesc = RelationGetPartitionDesc(parentTbl); + found = false; + for (i = 0; i < partDesc->nparts; i++) + { + if (partDesc->oids[i] == state.partitionOid) + { + found = true; + break; + } + } + if (!found) + ereport(ERROR, + (errmsg("cannot attach index \"%s\" as a partition of index \"%s\"", + RelationGetRelationName(partIdx), + RelationGetRelationName(parentIdx)), + errdetail("Index \"%s\" is not an index on any partition of table \"%s\".", + RelationGetRelationName(partIdx), + RelationGetRelationName(parentTbl)))); + + /* Ensure the indexes are compatible */ + childInfo = BuildIndexInfo(partIdx); + parentInfo = BuildIndexInfo(parentIdx); + attmap = convert_tuples_by_name_map(RelationGetDescr(partTbl), + RelationGetDescr(parentTbl), + gettext_noop("could not convert row type")); + if (!CompareIndexInfo(childInfo, parentInfo, + partIdx->rd_indcollation, + parentIdx->rd_indcollation, + partIdx->rd_opfamily, + parentIdx->rd_opfamily, + attmap, + RelationGetDescr(partTbl)->natts)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("cannot attach index \"%s\" as a partition of index \"%s\"", + RelationGetRelationName(partIdx), + RelationGetRelationName(parentIdx)), + errdetail("The index definitions do not match."))); + + /* + * If there is a constraint in the parent, make sure there is one in + * the child too. + */ + constraintOid = get_relation_idx_constraint_oid(RelationGetRelid(parentTbl), + RelationGetRelid(parentIdx)); + + if (OidIsValid(constraintOid)) + { + cldConstrId = get_relation_idx_constraint_oid(RelationGetRelid(partTbl), + partIdxId); + if (!OidIsValid(cldConstrId)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("cannot attach index \"%s\" as a partition of index \"%s\"", + RelationGetRelationName(partIdx), + RelationGetRelationName(parentIdx)), + errdetail("The index \"%s\" belongs to a constraint in table \"%s\" but no constraint exists for index \"%s\".", + RelationGetRelationName(parentIdx), + RelationGetRelationName(parentTbl), + RelationGetRelationName(partIdx)))); + } + + /* All good -- do it */ + IndexSetParentIndex(partIdx, RelationGetRelid(parentIdx)); + if (OidIsValid(constraintOid)) + ConstraintSetParentConstraint(cldConstrId, constraintOid); + update_relispartition(NULL, partIdxId, true); + + pfree(attmap); + + validatePartitionedIndex(parentIdx, parentTbl); + } + + relation_close(parentTbl, AccessShareLock); + /* keep these locks till commit */ + relation_close(partTbl, NoLock); + relation_close(partIdx, NoLock); + + return address; +} + +/* + * Verify whether the given partition already contains an index attached + * to the given partitioned index. If so, raise an error. + */ +static void +refuseDupeIndexAttach(Relation parentIdx, Relation partIdx, Relation partitionTbl) +{ + Relation pg_inherits; + ScanKeyData key; + HeapTuple tuple; + SysScanDesc scan; + + pg_inherits = heap_open(InheritsRelationId, AccessShareLock); + ScanKeyInit(&key, Anum_pg_inherits_inhparent, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationGetRelid(parentIdx))); + scan = systable_beginscan(pg_inherits, InheritsParentIndexId, true, + NULL, 1, &key); + while (HeapTupleIsValid(tuple = systable_getnext(scan))) + { + Form_pg_inherits inhForm; + Oid tab; + + inhForm = (Form_pg_inherits) GETSTRUCT(tuple); + tab = IndexGetRelation(inhForm->inhrelid, false); + if (tab == RelationGetRelid(partitionTbl)) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot attach index \"%s\" as a partition of index \"%s\"", + RelationGetRelationName(partIdx), + RelationGetRelationName(parentIdx)), + errdetail("Another index is already attached for partition \"%s\".", + RelationGetRelationName(partitionTbl)))); + } + + systable_endscan(scan); + heap_close(pg_inherits, AccessShareLock); +} + +/* + * Verify whether the set of attached partition indexes to a parent index on + * a partitioned table is complete. If it is, mark the parent index valid. + * + * This should be called each time a partition index is attached. + */ +static void +validatePartitionedIndex(Relation partedIdx, Relation partedTbl) +{ + Relation inheritsRel; + SysScanDesc scan; + ScanKeyData key; + int tuples = 0; + HeapTuple inhTup; + bool updated = false; + + Assert(partedIdx->rd_rel->relkind == RELKIND_PARTITIONED_INDEX); + + /* + * Scan pg_inherits for this parent index. Count each valid index we find + * (verifying the pg_index entry for each), and if we reach the total + * amount we expect, we can mark this parent index as valid. + */ + inheritsRel = heap_open(InheritsRelationId, AccessShareLock); + ScanKeyInit(&key, Anum_pg_inherits_inhparent, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationGetRelid(partedIdx))); + scan = systable_beginscan(inheritsRel, InheritsParentIndexId, true, + NULL, 1, &key); + while ((inhTup = systable_getnext(scan)) != NULL) + { + Form_pg_inherits inhForm = (Form_pg_inherits) GETSTRUCT(inhTup); + HeapTuple indTup; + Form_pg_index indexForm; + + indTup = SearchSysCache1(INDEXRELID, + ObjectIdGetDatum(inhForm->inhrelid)); + if (!indTup) + elog(ERROR, "cache lookup failed for index %u", + inhForm->inhrelid); + indexForm = (Form_pg_index) GETSTRUCT(indTup); + if (IndexIsValid(indexForm)) + tuples += 1; + ReleaseSysCache(indTup); + } + + /* Done with pg_inherits */ + systable_endscan(scan); + heap_close(inheritsRel, AccessShareLock); + + /* + * If we found as many inherited indexes as the partitioned table has + * partitions, we're good; update pg_index to set indisvalid. + */ + if (tuples == RelationGetPartitionDesc(partedTbl)->nparts) + { + Relation idxRel; + HeapTuple newtup; + + idxRel = heap_open(IndexRelationId, RowExclusiveLock); + + newtup = heap_copytuple(partedIdx->rd_indextuple); + ((Form_pg_index) GETSTRUCT(newtup))->indisvalid = true; + updated = true; + + CatalogTupleUpdate(idxRel, &partedIdx->rd_indextuple->t_self, newtup); + + heap_close(idxRel, RowExclusiveLock); + } + + /* + * If this index is in turn a partition of a larger index, validating it + * might cause the parent to become valid also. Try that. + */ + if (updated && partedIdx->rd_rel->relispartition) + { + Oid parentIdxId, + parentTblId; + Relation parentIdx, + parentTbl; + + /* make sure we see the validation we just did */ + CommandCounterIncrement(); + + parentIdxId = get_partition_parent(RelationGetRelid(partedIdx)); + parentTblId = get_partition_parent(RelationGetRelid(partedTbl)); + parentIdx = relation_open(parentIdxId, AccessExclusiveLock); + parentTbl = relation_open(parentTblId, AccessExclusiveLock); + Assert(!parentIdx->rd_index->indisvalid); + + validatePartitionedIndex(parentIdx, parentTbl); + + relation_close(parentIdx, AccessExclusiveLock); + relation_close(parentTbl, AccessExclusiveLock); + } +} + +/* + * Update the relispartition flag of the given relation to the given value. + * + * classRel is the pg_class relation, already open and suitably locked. + * It can be passed as NULL, in which case it's opened and closed locally. + */ +static void +update_relispartition(Relation classRel, Oid relationId, bool newval) +{ + HeapTuple tup; + HeapTuple newtup; + Form_pg_class classForm; + bool opened = false; + + if (classRel == NULL) + { + classRel = heap_open(RelationRelationId, RowExclusiveLock); + opened = true; + } + + tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId)); + newtup = heap_copytuple(tup); + classForm = (Form_pg_class) GETSTRUCT(newtup); + classForm->relispartition = newval; + CatalogTupleUpdate(classRel, &tup->t_self, newtup); + heap_freetuple(newtup); + ReleaseSysCache(tup); + + if (opened) + heap_close(classRel, RowExclusiveLock); +} diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 8559c3b6b3..f7e9160a4f 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -35,7 +35,7 @@ * and munge the system catalogs of the new database. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -68,6 +68,7 @@ #include "commands/seclabel.h" #include "commands/tablecmds.h" #include "commands/tablespace.h" +#include "common/file_perm.h" #include "miscadmin.h" #include "postmaster/bgwriter.h" #include "storage/fd.h" @@ -151,7 +152,7 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) else { /* Directory creation failed? */ - if (mkdir(dir, S_IRWXU) < 0) + if (MakePGDirectory(dir) < 0) { char *parentdir; @@ -173,7 +174,7 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) get_parent_directory(parentdir); get_parent_directory(parentdir); /* Can't create parent and it doesn't already exist? */ - if (mkdir(parentdir, S_IRWXU) < 0 && errno != EEXIST) + if (MakePGDirectory(parentdir) < 0 && errno != EEXIST) ereport(ERROR, (errcode_for_file_access(), errmsg("could not create directory \"%s\": %m", @@ -184,7 +185,7 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) parentdir = pstrdup(dir); get_parent_directory(parentdir); /* Can't create parent and it doesn't already exist? */ - if (mkdir(parentdir, S_IRWXU) < 0 && errno != EEXIST) + if (MakePGDirectory(parentdir) < 0 && errno != EEXIST) ereport(ERROR, (errcode_for_file_access(), errmsg("could not create directory \"%s\": %m", @@ -192,7 +193,7 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) pfree(parentdir); /* Create database directory */ - if (mkdir(dir, S_IRWXU) < 0) + if (MakePGDirectory(dir) < 0) ereport(ERROR, (errcode_for_file_access(), errmsg("could not create directory \"%s\": %m", @@ -279,7 +280,8 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) /* * Check that location isn't too long. Remember that we're going to append * 'PG_XXX//_.'. FYI, we never actually - * reference the whole path here, but mkdir() uses the first two parts. + * reference the whole path here, but MakePGDirectory() uses the first two + * parts. */ if (strlen(location) + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1 + OIDCHARS > MAXPGPATH) @@ -444,13 +446,13 @@ DropTableSpace(DropTableSpaceStmt *stmt) /* Must be tablespace owner */ if (!pg_tablespace_ownercheck(tablespaceoid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TABLESPACE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TABLESPACE, tablespacename); /* Disallow drop of the standard tablespaces, even by superuser */ if (tablespaceoid == GLOBALTABLESPACE_OID || tablespaceoid == DEFAULTTABLESPACE_OID) - aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_TABLESPACE, + aclcheck_error(ACLCHECK_NO_PRIV, OBJECT_TABLESPACE, tablespacename); /* DROP hook for the tablespace being removed */ @@ -574,7 +576,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) * Attempt to coerce target directory to safe permissions. If this fails, * it doesn't exist or has the wrong owner. */ - if (chmod(location, S_IRWXU) != 0) + if (chmod(location, pg_dir_create_mode) != 0) { if (errno == ENOENT) ereport(ERROR, @@ -599,7 +601,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) if (stat(location_with_version_dir, &st) == 0 && S_ISDIR(st.st_mode)) { if (!rmtree(location_with_version_dir, true)) - /* If this failed, mkdir() below is going to error. */ + /* If this failed, MakePGDirectory() below is going to error. */ ereport(WARNING, (errmsg("some useless files may be left behind in old database directory \"%s\"", location_with_version_dir))); @@ -610,7 +612,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) * The creation of the version directory prevents more than one tablespace * in a single location. */ - if (mkdir(location_with_version_dir, S_IRWXU) < 0) + if (MakePGDirectory(location_with_version_dir) < 0) { if (errno == EEXIST) ereport(ERROR, @@ -655,7 +657,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) * does not justify throwing an error that would require manual intervention * to get the database running again. * - * Returns TRUE if successful, FALSE if some subdirectory is not empty + * Returns true if successful, false if some subdirectory is not empty */ static bool destroy_tablespace_directories(Oid tablespaceoid, bool redo) @@ -941,7 +943,7 @@ RenameTableSpace(const char *oldname, const char *newname) /* Must be owner */ if (!pg_tablespace_ownercheck(HeapTupleGetOid(newtuple), GetUserId())) - aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_TABLESPACE, oldname); + aclcheck_error(ACLCHECK_NO_PRIV, OBJECT_TABLESPACE, oldname); /* Validate new name */ if (!allowSystemTableMods && IsReservedName(newname)) @@ -1017,7 +1019,7 @@ AlterTableSpaceOptions(AlterTableSpaceOptionsStmt *stmt) /* Must be owner of the existing object */ if (!pg_tablespace_ownercheck(HeapTupleGetOid(tup), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TABLESPACE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TABLESPACE, stmt->tablespacename); /* Generate new proposed spcoptions (text array) */ @@ -1232,7 +1234,7 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source) if (aclresult != ACLCHECK_OK) { if (source >= PGC_S_INTERACTIVE) - aclcheck_error(aclresult, ACL_KIND_TABLESPACE, curname); + aclcheck_error(aclresult, OBJECT_TABLESPACE, curname); continue; } diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index da0850bfd6..ccb5706c16 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -3,7 +3,7 @@ * trigger.c * PostgreSQL TRIGGERs support code. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -20,11 +20,12 @@ #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/dependency.h" +#include "catalog/index.h" #include "catalog/indexing.h" #include "catalog/objectaccess.h" +#include "catalog/partition.h" #include "catalog/pg_constraint.h" -#include "catalog/pg_constraint_fn.h" -#include "catalog/pg_inherits_fn.h" +#include "catalog/pg_inherits.h" #include "catalog/pg_proc.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" @@ -74,7 +75,7 @@ static int MyTriggerDepth = 0; * to be changed, however. */ #define GetUpdatedColumns(relinfo, estate) \ - (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols) + (exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->updatedCols) /* Local function prototypes */ static void ConvertTriggerToFK(CreateTrigStmt *stmt, Oid funcoid); @@ -100,6 +101,7 @@ static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, List *recheckIndexes, Bitmapset *modifiedCols, TransitionCaptureState *transition_capture); static void AfterTriggerEnlargeQueryState(void); +static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType); /* @@ -122,16 +124,33 @@ static void AfterTriggerEnlargeQueryState(void); * TRIGGER, we build a pg_constraint entry internally.) * * indexOid, if nonzero, is the OID of an index associated with the constraint. - * We do nothing with this except store it into pg_trigger.tgconstrindid. + * We do nothing with this except store it into pg_trigger.tgconstrindid; + * but when creating a trigger for a deferrable unique constraint on a + * partitioned table, its children are looked up. Note we don't cope with + * invalid indexes in that case. + * + * funcoid, if nonzero, is the OID of the function to invoke. When this is + * given, stmt->funcname is ignored. + * + * parentTriggerOid, if nonzero, is a trigger that begets this one; so that + * if that trigger is dropped, this one should be too. (This is passed as + * Invalid by most callers; it's set here when recursing on a partition.) + * + * If whenClause is passed, it is an already-transformed expression for + * WHEN. In this case, we ignore any that may come in stmt->whenClause. * * If isInternal is true then this is an internally-generated trigger. * This argument sets the tgisinternal field of the pg_trigger entry, and - * if TRUE causes us to modify the given trigger name to ensure uniqueness. + * if true causes us to modify the given trigger name to ensure uniqueness. * * When isInternal is not true we require ACL_TRIGGER permissions on the * relation, as well as ACL_EXECUTE on the trigger function. For internal * triggers the caller must apply any required permission checks. * + * When called on partitioned tables, this function recurses to create the + * trigger on all the partitions, except if isInternal is true, in which + * case caller is expected to execute recursion on its own. + * * Note: can return InvalidObjectAddress if we decided to not create a trigger * at all, but a foreign-key constraint. This is a kluge for backwards * compatibility. @@ -139,13 +158,13 @@ static void AfterTriggerEnlargeQueryState(void); ObjectAddress CreateTrigger(CreateTrigStmt *stmt, const char *queryString, Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid, - bool isInternal) + Oid funcoid, Oid parentTriggerOid, Node *whenClause, + bool isInternal, bool in_partition) { int16 tgtype; int ncolumns; int16 *columns; int2vector *tgattr; - Node *whenClause; List *whenRtable; char *qual; Datum values[Natts_pg_trigger]; @@ -158,7 +177,6 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, Relation pgrel; HeapTuple tuple; Oid fargtypes[1]; /* dummy */ - Oid funcoid; Oid funcrettype; Oid trigoid; char internaltrigname[NAMEDATALEN]; @@ -168,6 +186,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, referenced; char *oldtablename = NULL; char *newtablename = NULL; + bool partition_recurse; if (OidIsValid(relOid)) rel = heap_open(relOid, ShareRowExclusiveLock); @@ -178,8 +197,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, * Triggers must be on tables or views, and there are additional * relation-type-specific restrictions. */ - if (rel->rd_rel->relkind == RELKIND_RELATION || - rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + if (rel->rd_rel->relkind == RELKIND_RELATION) { /* Tables can't have INSTEAD OF triggers */ if (stmt->timing != TRIGGER_TYPE_BEFORE && @@ -189,13 +207,53 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, errmsg("\"%s\" is a table", RelationGetRelationName(rel)), errdetail("Tables cannot have INSTEAD OF triggers."))); - /* Disallow ROW triggers on partitioned tables */ - if (stmt->row && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + } + else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + { + /* Partitioned tables can't have INSTEAD OF triggers */ + if (stmt->timing != TRIGGER_TYPE_BEFORE && + stmt->timing != TRIGGER_TYPE_AFTER) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is a partitioned table", + errmsg("\"%s\" is a table", RelationGetRelationName(rel)), - errdetail("Partitioned tables cannot have ROW triggers."))); + errdetail("Tables cannot have INSTEAD OF triggers."))); + + /* + * FOR EACH ROW triggers have further restrictions + */ + if (stmt->row) + { + /* + * BEFORE triggers FOR EACH ROW are forbidden, because they would + * allow the user to direct the row to another partition, which + * isn't implemented in the executor. + */ + if (stmt->timing != TRIGGER_TYPE_AFTER) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("\"%s\" is a partitioned table", + RelationGetRelationName(rel)), + errdetail("Partitioned tables cannot have BEFORE / FOR EACH ROW triggers."))); + + /* + * Disallow use of transition tables. + * + * Note that we have another restriction about transition tables + * in partitions; search for 'has_superclass' below for an + * explanation. The check here is just to protect from the fact + * that if we allowed it here, the creation would succeed for a + * partitioned table with no partitions, but would be blocked by + * the other restriction when the first partition was created, + * which is very unfriendly behavior. + */ + if (stmt->transitionRels != NIL) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("\"%s\" is a partitioned table", + RelationGetRelationName(rel)), + errdetail("Triggers on partitioned tables cannot have transition tables."))); + } } else if (rel->rd_rel->relkind == RELKIND_VIEW) { @@ -234,6 +292,11 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, RelationGetRelationName(rel)), errdetail("Foreign tables cannot have TRUNCATE triggers."))); + /* + * We disallow constraint triggers to protect the assumption that + * triggers on FKs can't be deferred. See notes with AfterTriggers + * data structures, below. + */ if (stmt->isconstraint) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), @@ -278,7 +341,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(), ACL_TRIGGER); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_CLASS, + aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind), RelationGetRelationName(rel)); if (OidIsValid(constrrelid)) @@ -286,11 +349,23 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, aclresult = pg_class_aclcheck(constrrelid, GetUserId(), ACL_TRIGGER); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_CLASS, + aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)), get_rel_name(constrrelid)); } } + /* + * When called on a partitioned table to create a FOR EACH ROW trigger + * that's not internal, we create one trigger for each partition, too. + * + * For that, we'd better hold lock on all of them ahead of time. + */ + partition_recurse = !isInternal && stmt->row && + rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE; + if (partition_recurse) + list_free(find_all_inheritors(RelationGetRelid(rel), + ShareRowExclusiveLock, NULL)); + /* Compute tgtype */ TRIGGER_CLEAR_TYPE(tgtype); if (stmt->row) @@ -416,7 +491,27 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("Transition tables cannot be specified for triggers with more than one event"))); + errmsg("transition tables cannot be specified for triggers with more than one event"))); + + /* + * We currently don't allow column-specific triggers with + * transition tables. Per spec, that seems to require + * accumulating separate transition tables for each combination of + * columns, which is a lot of work for a rather marginal feature. + */ + if (stmt->columns != NIL) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("transition tables cannot be specified for triggers with column lists"))); + + /* + * We disallow constraint triggers with transition tables, to + * protect the assumption that such triggers can't be deferred. + * See notes with AfterTriggers data structures, below. + * + * Currently this is enforced by the grammar, so just Assert here. + */ + Assert(!stmt->isconstraint); if (tt->isNew) { @@ -458,9 +553,14 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, } /* - * Parse the WHEN clause, if any + * Parse the WHEN clause, if any and we weren't passed an already + * transformed one. + * + * Note that as a side effect, we fill whenRtable when parsing. If we got + * an already parsed clause, this does not occur, which is what we want -- + * no point in adding redundant dependencies below. */ - if (stmt->whenClause) + if (!whenClause && stmt->whenClause) { ParseState *pstate; RangeTblEntry *rte; @@ -477,10 +577,12 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2. */ rte = addRangeTableEntryForRelation(pstate, rel, + AccessShareLock, makeAlias("old", NIL), false, false); addRTEtoQuery(pstate, rte, false, true, true); rte = addRangeTableEntryForRelation(pstate, rel, + AccessShareLock, makeAlias("new", NIL), false, false); addRTEtoQuery(pstate, rte, false, true, true); @@ -551,22 +653,28 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, free_parsestate(pstate); } - else + else if (!whenClause) { whenClause = NULL; whenRtable = NIL; qual = NULL; } + else + { + qual = nodeToString(whenClause); + whenRtable = NIL; + } /* * Find and validate the trigger function. */ - funcoid = LookupFuncName(stmt->funcname, 0, fargtypes, false); + if (!OidIsValid(funcoid)) + funcoid = LookupFuncName(stmt->funcname, 0, fargtypes, false); if (!isInternal) { aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, NameListToString(stmt->funcname)); } funcrettype = get_func_rettype(funcoid); @@ -625,9 +733,11 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, stmt->deferrable, stmt->initdeferred, true, + InvalidOid, /* no parent */ RelationGetRelid(rel), NULL, /* no conkey */ 0, + 0, InvalidOid, /* no domain */ InvalidOid, /* no index */ InvalidOid, /* no foreign key */ @@ -642,7 +752,6 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, NULL, /* no exclusion */ NULL, /* no check constraint */ NULL, - NULL, true, /* islocal */ 0, /* inhcount */ true, /* isnoinherit */ @@ -707,6 +816,11 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, /* * Build the new pg_trigger tuple. + * + * When we're creating a trigger in a partition, we mark it as internal, + * even though we don't do the isInternal magic in this function. This + * makes the triggers in partitions identical to the ones in the + * partitioned tables, except that they are marked internal. */ memset(nulls, false, sizeof(nulls)); @@ -716,7 +830,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid); values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype); values[Anum_pg_trigger_tgenabled - 1] = CharGetDatum(TRIGGER_FIRES_ON_ORIGIN); - values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal); + values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal || in_partition); values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid); values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid); values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid); @@ -846,9 +960,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1])); /* - * Update relation's pg_class entry. Crucial side-effect: other backends - * (and this one too!) are sent SI message to make them rebuild relcache - * entries. + * Update relation's pg_class entry; if necessary; and if not, send an SI + * message to make other backends (and this one) rebuild relcache entries. */ pgrel = heap_open(RelationRelationId, RowExclusiveLock); tuple = SearchSysCacheCopy1(RELOID, @@ -856,20 +969,20 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for relation %u", RelationGetRelid(rel)); + if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers) + { + ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true; - ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true; + CatalogTupleUpdate(pgrel, &tuple->t_self, tuple); - CatalogTupleUpdate(pgrel, &tuple->t_self, tuple); + CommandCounterIncrement(); + } + else + CacheInvalidateRelcacheByTuple(tuple); heap_freetuple(tuple); heap_close(pgrel, RowExclusiveLock); - /* - * We used to try to update the rel's relcache entry here, but that's - * fairly pointless since it will happen as a byproduct of the upcoming - * CommandCounterIncrement... - */ - /* * Record dependencies for trigger. Always place a normal dependency on * the function. @@ -902,11 +1015,18 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, * User CREATE TRIGGER, so place dependencies. We make trigger be * auto-dropped if its relation is dropped or if the FK relation is * dropped. (Auto drop is compatible with our pre-7.3 behavior.) + * + * Exception: if this trigger comes from a parent partitioned table, + * then it's not separately drop-able, but goes away if the partition + * does. */ referenced.classId = RelationRelationId; referenced.objectId = RelationGetRelid(rel); referenced.objectSubId = 0; - recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO); + recordDependencyOn(&myself, &referenced, OidIsValid(parentTriggerOid) ? + DEPENDENCY_INTERNAL_AUTO : + DEPENDENCY_AUTO); + if (OidIsValid(constrrelid)) { referenced.classId = RelationRelationId; @@ -928,6 +1048,13 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, referenced.objectSubId = 0; recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL); } + + /* Depends on the parent trigger, if there is one. */ + if (OidIsValid(parentTriggerOid)) + { + ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid); + recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL_AUTO); + } } /* If column-specific trigger, add normal dependencies on columns */ @@ -948,7 +1075,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, * If it has a WHEN clause, add dependencies on objects mentioned in the * expression (eg, functions, as well as any columns used). */ - if (whenClause != NULL) + if (whenRtable != NIL) recordDependencyOnExpr(&myself, whenClause, whenRtable, DEPENDENCY_NORMAL); @@ -956,6 +1083,112 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0, isInternal); + /* + * Lastly, create the trigger on child relations, if needed. + */ + if (partition_recurse) + { + PartitionDesc partdesc = RelationGetPartitionDesc(rel); + List *idxs = NIL; + List *childTbls = NIL; + ListCell *l; + int i; + MemoryContext oldcxt, + perChildCxt; + + perChildCxt = AllocSetContextCreate(CurrentMemoryContext, + "part trig clone", + ALLOCSET_SMALL_SIZES); + + /* + * When a trigger is being created associated with an index, we'll + * need to associate the trigger in each child partition with the + * corresponding index on it. + */ + if (OidIsValid(indexOid)) + { + ListCell *l; + List *idxs = NIL; + + idxs = find_inheritance_children(indexOid, ShareRowExclusiveLock); + foreach(l, idxs) + childTbls = lappend_oid(childTbls, + IndexGetRelation(lfirst_oid(l), + false)); + } + + oldcxt = MemoryContextSwitchTo(perChildCxt); + + /* Iterate to create the trigger on each existing partition */ + for (i = 0; i < partdesc->nparts; i++) + { + Oid indexOnChild = InvalidOid; + ListCell *l2; + CreateTrigStmt *childStmt; + Relation childTbl; + Node *qual; + bool found_whole_row; + + childTbl = heap_open(partdesc->oids[i], ShareRowExclusiveLock); + + /* Find which of the child indexes is the one on this partition */ + if (OidIsValid(indexOid)) + { + forboth(l, idxs, l2, childTbls) + { + if (lfirst_oid(l2) == partdesc->oids[i]) + { + indexOnChild = lfirst_oid(l); + break; + } + } + if (!OidIsValid(indexOnChild)) + elog(ERROR, "failed to find index matching index \"%s\" in partition \"%s\"", + get_rel_name(indexOid), + get_rel_name(partdesc->oids[i])); + } + + /* + * Initialize our fabricated parse node by copying the original + * one, then resetting fields that we pass separately. + */ + childStmt = (CreateTrigStmt *) copyObject(stmt); + childStmt->funcname = NIL; + childStmt->args = NIL; + childStmt->whenClause = NULL; + + /* If there is a WHEN clause, create a modified copy of it */ + qual = copyObject(whenClause); + qual = (Node *) + map_partition_varattnos((List *) qual, PRS2_OLD_VARNO, + childTbl, rel, + &found_whole_row); + if (found_whole_row) + elog(ERROR, "unexpected whole-row reference found in trigger WHEN clause"); + qual = (Node *) + map_partition_varattnos((List *) qual, PRS2_NEW_VARNO, + childTbl, rel, + &found_whole_row); + if (found_whole_row) + elog(ERROR, "unexpected whole-row reference found in trigger WHEN clause"); + + CreateTrigger(childStmt, queryString, + partdesc->oids[i], refRelOid, + InvalidOid, indexOnChild, + funcoid, trigoid, qual, + isInternal, true); + + heap_close(childTbl, NoLock); + + MemoryContextReset(perChildCxt); + } + + MemoryContextSwitchTo(oldcxt); + MemoryContextDelete(perChildCxt); + list_free(idxs); + list_free(childTbls); + } + /* Keep lock on target rel until end of xact */ heap_close(rel, NoLock); @@ -1396,7 +1629,7 @@ RangeVarCallbackForRenameTrigger(const RangeVar *rv, Oid relid, Oid oldrelid, /* you must own the table to rename one of its triggers */ if (!pg_class_ownercheck(relid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, rv->relname); + aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relid)), rv->relname); if (!allowSystemTableMods && IsSystemClass(relid, form)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), @@ -1436,7 +1669,7 @@ renametrig(RenameStmt *stmt) * release until end of transaction). */ relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock, - false, false, + 0, RangeVarCallbackForRenameTrigger, NULL); @@ -1553,7 +1786,7 @@ renametrig(RenameStmt *stmt) */ void EnableDisableTrigger(Relation rel, const char *tgname, - char fires_when, bool skip_system) + char fires_when, bool skip_system, LOCKMODE lockmode) { Relation tgrel; int nkeys; @@ -1616,6 +1849,27 @@ EnableDisableTrigger(Relation rel, const char *tgname, heap_freetuple(newtup); + /* + * When altering FOR EACH ROW triggers on a partitioned table, do + * the same on the partitions as well. + */ + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE && + (TRIGGER_FOR_ROW(oldtrig->tgtype))) + { + PartitionDesc partdesc = RelationGetPartitionDesc(rel); + int i; + + for (i = 0; i < partdesc->nparts; i++) + { + Relation part; + + part = relation_open(partdesc->oids[i], lockmode); + EnableDisableTrigger(part, NameStr(oldtrig->tgname), + fires_when, skip_system, lockmode); + heap_close(part, NoLock); /* keep lock till commit */ + } + } + changed = true; } @@ -2085,96 +2339,6 @@ FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc) return NULL; } -/* - * Make a TransitionCaptureState object from a given TriggerDesc. The - * resulting object holds the flags which control whether transition tuples - * are collected when tables are modified, and the tuplestores themselves. - * Note that we copy the flags from a parent table into this struct (rather - * than using each relation's TriggerDesc directly) so that we can use it to - * control the collection of transition tuples from child tables. - * - * If there are no triggers with transition tables configured for 'trigdesc', - * then return NULL. - * - * The resulting object can be passed to the ExecAR* functions. The caller - * should set tcs_map or tcs_original_insert_tuple as appropriate when dealing - * with child tables. - */ -TransitionCaptureState * -MakeTransitionCaptureState(TriggerDesc *trigdesc) -{ - TransitionCaptureState *state = NULL; - - if (trigdesc != NULL && - (trigdesc->trig_delete_old_table || trigdesc->trig_update_old_table || - trigdesc->trig_update_new_table || trigdesc->trig_insert_new_table)) - { - MemoryContext oldcxt; - ResourceOwner saveResourceOwner; - - /* - * Normally DestroyTransitionCaptureState should be called after - * executing all AFTER triggers for the current statement. - * - * To handle error cleanup, TransitionCaptureState and the tuplestores - * it contains will live in the current [sub]transaction's memory - * context. Likewise for the current resource owner, because we also - * want to clean up temporary files spilled to disk by the tuplestore - * in that scenario. This scope is sufficient, because AFTER triggers - * with transition tables cannot be deferred (only constraint triggers - * can be deferred, and constraint triggers cannot have transition - * tables). The AFTER trigger queue may contain pointers to this - * TransitionCaptureState, but any such entries will be processed or - * discarded before the end of the current [sub]transaction. - * - * If a future release allows deferred triggers with transition - * tables, we'll need to reconsider the scope of the - * TransitionCaptureState object. - */ - oldcxt = MemoryContextSwitchTo(CurTransactionContext); - saveResourceOwner = CurrentResourceOwner; - - state = (TransitionCaptureState *) - palloc0(sizeof(TransitionCaptureState)); - state->tcs_delete_old_table = trigdesc->trig_delete_old_table; - state->tcs_update_old_table = trigdesc->trig_update_old_table; - state->tcs_update_new_table = trigdesc->trig_update_new_table; - state->tcs_insert_new_table = trigdesc->trig_insert_new_table; - PG_TRY(); - { - CurrentResourceOwner = CurTransactionResourceOwner; - if (trigdesc->trig_delete_old_table || trigdesc->trig_update_old_table) - state->tcs_old_tuplestore = tuplestore_begin_heap(false, false, work_mem); - if (trigdesc->trig_insert_new_table) - state->tcs_insert_tuplestore = tuplestore_begin_heap(false, false, work_mem); - if (trigdesc->trig_update_new_table) - state->tcs_update_tuplestore = tuplestore_begin_heap(false, false, work_mem); - } - PG_CATCH(); - { - CurrentResourceOwner = saveResourceOwner; - PG_RE_THROW(); - } - PG_END_TRY(); - CurrentResourceOwner = saveResourceOwner; - MemoryContextSwitchTo(oldcxt); - } - - return state; -} - -void -DestroyTransitionCaptureState(TransitionCaptureState *tcs) -{ - if (tcs->tcs_insert_tuplestore != NULL) - tuplestore_end(tcs->tcs_insert_tuplestore); - if (tcs->tcs_update_tuplestore != NULL) - tuplestore_end(tcs->tcs_update_tuplestore); - if (tcs->tcs_old_tuplestore != NULL) - tuplestore_end(tcs->tcs_old_tuplestore); - pfree(tcs); -} - /* * Call a trigger function. * @@ -2294,6 +2458,11 @@ ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo) if (!trigdesc->trig_insert_before_statement) return; + /* no-op if we already fired BS triggers in this context */ + if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc), + CMD_INSERT)) + return; + LocTriggerData.type = T_TriggerData; LocTriggerData.tg_event = TRIGGER_EVENT_INSERT | TRIGGER_EVENT_BEFORE; @@ -2403,7 +2572,7 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo, if (newslot->tts_tupleDescriptor != tupdesc) ExecSetSlotDescriptor(newslot, tupdesc); - ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + ExecStoreHeapTuple(newtuple, newslot, false); slot = newslot; } return slot; @@ -2484,7 +2653,7 @@ ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo, if (newslot->tts_tupleDescriptor != tupdesc) ExecSetSlotDescriptor(newslot, tupdesc); - ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + ExecStoreHeapTuple(newtuple, newslot, false); slot = newslot; } return slot; @@ -2504,6 +2673,11 @@ ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo) if (!trigdesc->trig_delete_before_statement) return; + /* no-op if we already fired BS triggers in this context */ + if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc), + CMD_DELETE)) + return; + LocTriggerData.type = T_TriggerData; LocTriggerData.tg_event = TRIGGER_EVENT_DELETE | TRIGGER_EVENT_BEFORE; @@ -2553,11 +2727,19 @@ ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo, false, NULL, NULL, NIL, NULL, transition_capture); } +/* + * Execute BEFORE ROW DELETE triggers. + * + * True indicates caller can proceed with the delete. False indicates caller + * need to suppress the delete and additionally if requested, we need to pass + * back the concurrently updated tuple if any. + */ bool ExecBRDeleteTriggers(EState *estate, EPQState *epqstate, ResultRelInfo *relinfo, ItemPointer tupleid, - HeapTuple fdw_trigtuple) + HeapTuple fdw_trigtuple, + TupleTableSlot **epqslot) { TriggerDesc *trigdesc = relinfo->ri_TrigDesc; bool result = true; @@ -2574,6 +2756,18 @@ ExecBRDeleteTriggers(EState *estate, EPQState *epqstate, LockTupleExclusive, &newSlot); if (trigtuple == NULL) return false; + + /* + * If the tuple was concurrently updated and the caller of this + * function requested for the updated tuple, skip the trigger + * execution. + */ + if (newSlot != NULL && epqslot != NULL) + { + *epqslot = newSlot; + heap_freetuple(trigtuple); + return false; + } } else trigtuple = fdw_trigtuple; @@ -2716,6 +2910,11 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo) if (!trigdesc->trig_update_before_statement) return; + /* no-op if we already fired BS triggers in this context */ + if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc), + CMD_UPDATE)) + return; + updatedCols = GetUpdatedColumns(relinfo, estate); LocTriggerData.type = T_TriggerData; @@ -2864,7 +3063,7 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate, return NULL; /* "do nothing" */ } } - if (trigtuple != fdw_trigtuple) + if (trigtuple != fdw_trigtuple && trigtuple != newtuple) heap_freetuple(trigtuple); if (newtuple != slottuple) @@ -2880,7 +3079,7 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate, if (newslot->tts_tupleDescriptor != tupdesc) ExecSetSlotDescriptor(newslot, tupdesc); - ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + ExecStoreHeapTuple(newtuple, newslot, false); slot = newslot; } return slot; @@ -2903,8 +3102,13 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo, { HeapTuple trigtuple; - Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid)); - if (fdw_trigtuple == NULL) + /* + * Note: if the UPDATE is converted into a DELETE+INSERT as part of + * update-partition-key operation, then this function is also called + * separately for DELETE and INSERT to capture transition table rows. + * In such case, either old tuple or new tuple can be NULL. + */ + if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid)) trigtuple = GetTupleForTrigger(estate, NULL, relinfo, @@ -2983,7 +3187,7 @@ ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo, if (newslot->tts_tupleDescriptor != tupdesc) ExecSetSlotDescriptor(newslot, tupdesc); - ExecStoreTuple(newtuple, newslot, InvalidBuffer, false); + ExecStoreHeapTuple(newtuple, newslot, false); slot = newslot; } return slot; @@ -3115,6 +3319,11 @@ ltrmark:; ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); + if (ItemPointerIndicatesMovedPartitions(&hufd.ctid)) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("tuple to be locked was already moved to another partition due to concurrent update"))); + if (!ItemPointerEquals(&hufd.ctid, &tuple.t_self)) { /* it was updated, so look at the updated version */ @@ -3150,6 +3359,7 @@ ltrmark:; case HeapTupleInvisible: elog(ERROR, "attempted to lock invisible tuple"); + break; default: ReleaseBuffer(buffer); @@ -3187,7 +3397,10 @@ ltrmark:; LockBuffer(buffer, BUFFER_LOCK_UNLOCK); } - result = heap_copytuple(&tuple); + if (HeapTupleHeaderGetNatts(tuple.t_data) < relation->rd_att->natts) + result = heap_expand_tuple(&tuple, relation->rd_att); + else + result = heap_copytuple(&tuple); ReleaseBuffer(buffer); return result; @@ -3295,26 +3508,28 @@ TriggerEnabled(EState *estate, ResultRelInfo *relinfo, if (estate->es_trig_oldtup_slot == NULL) { oldContext = MemoryContextSwitchTo(estate->es_query_cxt); - estate->es_trig_oldtup_slot = ExecInitExtraTupleSlot(estate); + estate->es_trig_oldtup_slot = + ExecInitExtraTupleSlot(estate, NULL); MemoryContextSwitchTo(oldContext); } oldslot = estate->es_trig_oldtup_slot; if (oldslot->tts_tupleDescriptor != tupdesc) ExecSetSlotDescriptor(oldslot, tupdesc); - ExecStoreTuple(oldtup, oldslot, InvalidBuffer, false); + ExecStoreHeapTuple(oldtup, oldslot, false); } if (HeapTupleIsValid(newtup)) { if (estate->es_trig_newtup_slot == NULL) { oldContext = MemoryContextSwitchTo(estate->es_query_cxt); - estate->es_trig_newtup_slot = ExecInitExtraTupleSlot(estate); + estate->es_trig_newtup_slot = + ExecInitExtraTupleSlot(estate, NULL); MemoryContextSwitchTo(oldContext); } newslot = estate->es_trig_newtup_slot; if (newslot->tts_tupleDescriptor != tupdesc) ExecSetSlotDescriptor(newslot, tupdesc); - ExecStoreTuple(newtup, newslot, InvalidBuffer, false); + ExecStoreHeapTuple(newtup, newslot, false); } /* @@ -3338,9 +3553,11 @@ TriggerEnabled(EState *estate, ResultRelInfo *relinfo, * during the current transaction tree. (BEFORE triggers are fired * immediately so we don't need any persistent state about them.) The struct * and most of its subsidiary data are kept in TopTransactionContext; however - * the individual event records are kept in a separate sub-context. This is - * done mainly so that it's easy to tell from a memory context dump how much - * space is being eaten by trigger events. + * some data that can be discarded sooner appears in the CurTransactionContext + * of the relevant subtransaction. Also, the individual event records are + * kept in a separate sub-context of TopTransactionContext. This is done + * mainly so that it's easy to tell from a memory context dump how much space + * is being eaten by trigger events. * * Because the list of pending events can grow large, we go to some * considerable effort to minimize per-event memory consumption. The event @@ -3400,6 +3617,13 @@ typedef SetConstraintStateData *SetConstraintState; * tuple(s). This permits storing tuples once regardless of the number of * row-level triggers on a foreign table. * + * Note that we need triggers on foreign tables to be fired in exactly the + * order they were queued, so that the tuples come out of the tuplestore in + * the right order. To ensure that, we forbid deferrable (constraint) + * triggers on foreign tables. This also ensures that such triggers do not + * get deferred into outer trigger query levels, meaning that it's okay to + * destroy the tuplestore at the end of the query level. + * * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they * require no ctid field. We lack the flag bit space to neatly represent that * distinct case, and it seems unlikely to be worth much trouble. @@ -3433,7 +3657,7 @@ typedef struct AfterTriggerSharedData Oid ats_tgoid; /* the trigger's ID */ Oid ats_relid; /* the relation it's on */ CommandId ats_firing_id; /* ID for firing cycle */ - TransitionCaptureState *ats_transition_capture; + struct AfterTriggersTableData *ats_table; /* transition table access */ } AfterTriggerSharedData; typedef struct AfterTriggerEventData *AfterTriggerEvent; @@ -3505,6 +3729,14 @@ typedef struct AfterTriggerEventList #define for_each_event_chunk(eptr, cptr, evtlist) \ for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr) +/* Macros for iterating from a start point that might not be list start */ +#define for_each_chunk_from(cptr) \ + for (; cptr != NULL; cptr = cptr->next) +#define for_each_event_from(eptr, cptr) \ + for (; \ + (char *) eptr < (cptr)->freeptr; \ + eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr))) + /* * All per-transaction data for the AFTER TRIGGERS module. @@ -3529,60 +3761,108 @@ typedef struct AfterTriggerEventList * query_depth is the current depth of nested AfterTriggerBeginQuery calls * (-1 when the stack is empty). * - * query_stack[query_depth] is a list of AFTER trigger events queued by the - * current query (and the query_stack entries below it are lists of trigger - * events queued by calling queries). None of these are valid until the - * matching AfterTriggerEndQuery call occurs. At that point we fire - * immediate-mode triggers, and append any deferred events to the main events - * list. + * query_stack[query_depth] is the per-query-level data, including these fields: + * + * events is a list of AFTER trigger events queued by the current query. + * None of these are valid until the matching AfterTriggerEndQuery call + * occurs. At that point we fire immediate-mode triggers, and append any + * deferred events to the main events list. + * + * fdw_tuplestore is a tuplestore containing the foreign-table tuples + * needed by events queued by the current query. (Note: we use just one + * tuplestore even though more than one foreign table might be involved. + * This is okay because tuplestores don't really care what's in the tuples + * they store; but it's possible that someday it'd break.) + * + * tables is a List of AfterTriggersTableData structs for target tables + * of the current query (see below). * - * fdw_tuplestores[query_depth] is a tuplestore containing the foreign tuples - * needed for the current query. + * maxquerydepth is just the allocated length of query_stack. * - * maxquerydepth is just the allocated length of query_stack and the - * tuplestores. + * trans_stack holds per-subtransaction data, including these fields: * - * state_stack is a stack of pointers to saved copies of the SET CONSTRAINTS - * state data; each subtransaction level that modifies that state first + * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS + * state data. Each subtransaction level that modifies that state first * saves a copy, which we use to restore the state if we abort. * - * events_stack is a stack of copies of the events head/tail pointers, + * events is a copy of the events head/tail pointers, * which we use to restore those values during subtransaction abort. * - * depth_stack is a stack of copies of subtransaction-start-time query_depth, + * query_depth is the subtransaction-start-time value of query_depth, * which we similarly use to clean up at subtransaction abort. * - * firing_stack is a stack of copies of subtransaction-start-time - * firing_counter. We use this to recognize which deferred triggers were - * fired (or marked for firing) within an aborted subtransaction. + * firing_counter is the subtransaction-start-time value of firing_counter. + * We use this to recognize which deferred triggers were fired (or marked + * for firing) within an aborted subtransaction. * * We use GetCurrentTransactionNestLevel() to determine the correct array - * index in these stacks. maxtransdepth is the number of allocated entries in - * each stack. (By not keeping our own stack pointer, we can avoid trouble + * index in trans_stack. maxtransdepth is the number of allocated entries in + * trans_stack. (By not keeping our own stack pointer, we can avoid trouble * in cases where errors during subxact abort cause multiple invocations * of AfterTriggerEndSubXact() at the same nesting depth.) + * + * We create an AfterTriggersTableData struct for each target table of the + * current query, and each operation mode (INSERT/UPDATE/DELETE), that has + * either transition tables or statement-level triggers. This is used to + * hold the relevant transition tables, as well as info tracking whether + * we already queued the statement triggers. (We use that info to prevent + * firing the same statement triggers more than once per statement, or really + * once per transition table set.) These structs, along with the transition + * table tuplestores, live in the (sub)transaction's CurTransactionContext. + * That's sufficient lifespan because we don't allow transition tables to be + * used by deferrable triggers, so they only need to survive until + * AfterTriggerEndQuery. */ +typedef struct AfterTriggersQueryData AfterTriggersQueryData; +typedef struct AfterTriggersTransData AfterTriggersTransData; +typedef struct AfterTriggersTableData AfterTriggersTableData; + typedef struct AfterTriggersData { CommandId firing_counter; /* next firing ID to assign */ SetConstraintState state; /* the active S C state */ AfterTriggerEventList events; /* deferred-event list */ - int query_depth; /* current query list index */ - AfterTriggerEventList *query_stack; /* events pending from each query */ - Tuplestorestate **fdw_tuplestores; /* foreign tuples for one row from - * each query */ - int maxquerydepth; /* allocated len of above array */ MemoryContext event_cxt; /* memory context for events, if any */ - /* these fields are just for resetting at subtrans abort: */ + /* per-query-level data: */ + AfterTriggersQueryData *query_stack; /* array of structs shown below */ + int query_depth; /* current index in above array */ + int maxquerydepth; /* allocated len of above array */ - SetConstraintState *state_stack; /* stacked S C states */ - AfterTriggerEventList *events_stack; /* stacked list pointers */ - int *depth_stack; /* stacked query_depths */ - CommandId *firing_stack; /* stacked firing_counters */ - int maxtransdepth; /* allocated len of above arrays */ + /* per-subtransaction-level data: */ + AfterTriggersTransData *trans_stack; /* array of structs shown below */ + int maxtransdepth; /* allocated len of above array */ } AfterTriggersData; +struct AfterTriggersQueryData +{ + AfterTriggerEventList events; /* events pending from this query */ + Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */ + List *tables; /* list of AfterTriggersTableData, see below */ +}; + +struct AfterTriggersTransData +{ + /* these fields are just for resetting at subtrans abort: */ + SetConstraintState state; /* saved S C state, or NULL if not yet saved */ + AfterTriggerEventList events; /* saved list pointer */ + int query_depth; /* saved query_depth */ + CommandId firing_counter; /* saved firing_counter */ +}; + +struct AfterTriggersTableData +{ + /* relid + cmdType form the lookup key for these structs: */ + Oid relid; /* target table's OID */ + CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */ + bool closed; /* true when no longer OK to add tuples */ + bool before_trig_done; /* did we already queue BS triggers? */ + bool after_trig_done; /* did we already queue AS triggers? */ + AfterTriggerEventList after_trig_events; /* if so, saved list pointer */ + Tuplestorestate *old_tuplestore; /* "old" transition table, if any */ + Tuplestorestate *new_tuplestore; /* "new" transition table, if any */ +}; + static AfterTriggersData afterTriggers; static void AfterTriggerExecute(AfterTriggerEvent event, @@ -3591,50 +3871,46 @@ static void AfterTriggerExecute(AfterTriggerEvent event, Instrumentation *instr, MemoryContext per_tuple_context, TupleTableSlot *trig_tuple_slot1, - TupleTableSlot *trig_tuple_slot2, - TransitionCaptureState *transition_capture); + TupleTableSlot *trig_tuple_slot2); +static AfterTriggersTableData *GetAfterTriggersTableData(Oid relid, + CmdType cmdType); +static void AfterTriggerFreeQuery(AfterTriggersQueryData *qs); static SetConstraintState SetConstraintStateCreate(int numalloc); static SetConstraintState SetConstraintStateCopy(SetConstraintState state); static SetConstraintState SetConstraintStateAddItem(SetConstraintState state, Oid tgoid, bool tgisdeferred); +static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent); /* - * Gets a current query transition tuplestore and initializes it if necessary. + * Get the FDW tuplestore for the current trigger query level, creating it + * if necessary. */ static Tuplestorestate * -GetTriggerTransitionTuplestore(Tuplestorestate **tss) +GetCurrentFDWTuplestore(void) { Tuplestorestate *ret; - ret = tss[afterTriggers.query_depth]; + ret = afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore; if (ret == NULL) { MemoryContext oldcxt; ResourceOwner saveResourceOwner; /* - * Make the tuplestore valid until end of transaction. This is the - * allocation lifespan of the associated events list, but we really + * Make the tuplestore valid until end of subtransaction. We really * only need it until AfterTriggerEndQuery(). */ - oldcxt = MemoryContextSwitchTo(TopTransactionContext); + oldcxt = MemoryContextSwitchTo(CurTransactionContext); saveResourceOwner = CurrentResourceOwner; - PG_TRY(); - { - CurrentResourceOwner = TopTransactionResourceOwner; - ret = tuplestore_begin_heap(false, false, work_mem); - } - PG_CATCH(); - { - CurrentResourceOwner = saveResourceOwner; - PG_RE_THROW(); - } - PG_END_TRY(); + CurrentResourceOwner = CurTransactionResourceOwner; + + ret = tuplestore_begin_heap(false, false, work_mem); + CurrentResourceOwner = saveResourceOwner; MemoryContextSwitchTo(oldcxt); - tss[afterTriggers.query_depth] = ret; + afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore = ret; } return ret; @@ -3780,7 +4056,7 @@ afterTriggerAddEvent(AfterTriggerEventList *events, if (newshared->ats_tgoid == evtshared->ats_tgoid && newshared->ats_relid == evtshared->ats_relid && newshared->ats_event == evtshared->ats_event && - newshared->ats_transition_capture == evtshared->ats_transition_capture && + newshared->ats_table == evtshared->ats_table && newshared->ats_firing_id == 0) break; } @@ -3812,14 +4088,12 @@ static void afterTriggerFreeEventList(AfterTriggerEventList *events) { AfterTriggerEventChunk *chunk; - AfterTriggerEventChunk *next_chunk; - for (chunk = events->head; chunk != NULL; chunk = next_chunk) + while ((chunk = events->head) != NULL) { - next_chunk = chunk->next; + events->head = chunk->next; pfree(chunk); } - events->head = NULL; events->tail = NULL; events->tailfree = NULL; } @@ -3863,6 +4137,45 @@ afterTriggerRestoreEventList(AfterTriggerEventList *events, } } +/* ---------- + * afterTriggerDeleteHeadEventChunk() + * + * Remove the first chunk of events from the query level's event list. + * Keep any event list pointers elsewhere in the query level's data + * structures in sync. + * ---------- + */ +static void +afterTriggerDeleteHeadEventChunk(AfterTriggersQueryData *qs) +{ + AfterTriggerEventChunk *target = qs->events.head; + ListCell *lc; + + Assert(target && target->next); + + /* + * First, update any pointers in the per-table data, so that they won't be + * dangling. Resetting obsoleted pointers to NULL will make + * cancel_prior_stmt_triggers start from the list head, which is fine. + */ + foreach(lc, qs->tables) + { + AfterTriggersTableData *table = (AfterTriggersTableData *) lfirst(lc); + + if (table->after_trig_done && + table->after_trig_events.tail == target) + { + table->after_trig_events.head = NULL; + table->after_trig_events.tail = NULL; + table->after_trig_events.tailfree = NULL; + } + } + + /* Now we can flush the head chunk */ + qs->events.head = target->next; + pfree(target); +} + /* ---------- * AfterTriggerExecute() @@ -3892,8 +4205,7 @@ AfterTriggerExecute(AfterTriggerEvent event, FmgrInfo *finfo, Instrumentation *instr, MemoryContext per_tuple_context, TupleTableSlot *trig_tuple_slot1, - TupleTableSlot *trig_tuple_slot2, - TransitionCaptureState *transition_capture) + TupleTableSlot *trig_tuple_slot2) { AfterTriggerShared evtshared = GetTriggerSharedData(event); Oid tgoid = evtshared->ats_tgoid; @@ -3934,9 +4246,7 @@ AfterTriggerExecute(AfterTriggerEvent event, { case AFTER_TRIGGER_FDW_FETCH: { - Tuplestorestate *fdw_tuplestore = - GetTriggerTransitionTuplestore - (afterTriggers.fdw_tuplestores); + Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore(); if (!tuplestore_gettupleslot(fdw_tuplestore, true, false, trig_tuple_slot1)) @@ -4006,36 +4316,25 @@ AfterTriggerExecute(AfterTriggerEvent event, } /* - * Set up the tuplestore information. + * Set up the tuplestore information to let the trigger have access to + * transition tables. When we first make a transition table available to + * a trigger, mark it "closed" so that it cannot change anymore. If any + * additional events of the same type get queued in the current trigger + * query level, they'll go into new transition tables. */ LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL; - if (transition_capture != NULL) + if (evtshared->ats_table) { if (LocTriggerData.tg_trigger->tgoldtable) - LocTriggerData.tg_oldtable = transition_capture->tcs_old_tuplestore; - if (LocTriggerData.tg_trigger->tgnewtable) { - /* - * Currently a trigger with transition tables may only be defined - * for a single event type (here AFTER INSERT or AFTER UPDATE, but - * not AFTER INSERT OR ...). - */ - Assert((TRIGGER_FOR_INSERT(LocTriggerData.tg_trigger->tgtype) != 0) ^ - (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype) != 0)); + LocTriggerData.tg_oldtable = evtshared->ats_table->old_tuplestore; + evtshared->ats_table->closed = true; + } - /* - * Show either the insert or update new tuple images, depending on - * which event type the trigger was registered for. A single - * statement may have produced both in the case of INSERT ... ON - * CONFLICT ... DO UPDATE, and in that case the event determines - * which tuplestore the trigger sees as the NEW TABLE. - */ - if (TRIGGER_FOR_INSERT(LocTriggerData.tg_trigger->tgtype)) - LocTriggerData.tg_newtable = - transition_capture->tcs_insert_tuplestore; - else - LocTriggerData.tg_newtable = - transition_capture->tcs_update_tuplestore; + if (LocTriggerData.tg_trigger->tgnewtable) + { + LocTriggerData.tg_newtable = evtshared->ats_table->new_tuplestore; + evtshared->ats_table->closed = true; } } @@ -4089,10 +4388,10 @@ AfterTriggerExecute(AfterTriggerEvent event, * If move_list isn't NULL, events that are not to be invoked now are * transferred to move_list. * - * When immediate_only is TRUE, do not invoke currently-deferred triggers. - * (This will be FALSE only at main transaction exit.) + * When immediate_only is true, do not invoke currently-deferred triggers. + * (This will be false only at main transaction exit.) * - * Returns TRUE if any invokable events were found. + * Returns true if any invokable events were found. */ static bool afterTriggerMarkEvents(AfterTriggerEventList *events, @@ -4156,14 +4455,14 @@ afterTriggerMarkEvents(AfterTriggerEventList *events, * make one locally to cache the info in case there are multiple trigger * events per rel. * - * When delete_ok is TRUE, it's safe to delete fully-processed events. + * When delete_ok is true, it's safe to delete fully-processed events. * (We are not very tense about that: we simply reset a chunk to be empty * if all its events got fired. The objective here is just to avoid useless * rescanning of events when a trigger queues new events during transaction * end, so it's not necessary to worry much about the case where only * some events are fired.) * - * Returns TRUE if no unfired events remain in the list (this allows us + * Returns true if no unfired events remain in the list (this allows us * to avoid repeating afterTriggerMarkEvents). */ static bool @@ -4245,8 +4544,7 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events, * won't try to re-fire it. */ AfterTriggerExecute(event, rel, trigdesc, finfo, instr, - per_tuple_context, slot1, slot2, - evtshared->ats_transition_capture); + per_tuple_context, slot1, slot2); /* * Mark the event as done. @@ -4270,7 +4568,7 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events, /* * If it's last chunk, must sync event list's tailfree too. Note * that delete_ok must NOT be passed as true if there could be - * stacked AfterTriggerEventList values pointing at this event + * additional AfterTriggerEventList values pointing at this event * list, since we'd fail to fix their copies of tailfree. */ if (chunk == events->tail) @@ -4296,6 +4594,159 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events, } +/* + * GetAfterTriggersTableData + * + * Find or create an AfterTriggersTableData struct for the specified + * trigger event (relation + operation type). Ignore existing structs + * marked "closed"; we don't want to put any additional tuples into them, + * nor change their stmt-triggers-fired state. + * + * Note: the AfterTriggersTableData list is allocated in the current + * (sub)transaction's CurTransactionContext. This is OK because + * we don't need it to live past AfterTriggerEndQuery. + */ +static AfterTriggersTableData * +GetAfterTriggersTableData(Oid relid, CmdType cmdType) +{ + AfterTriggersTableData *table; + AfterTriggersQueryData *qs; + MemoryContext oldcxt; + ListCell *lc; + + /* Caller should have ensured query_depth is OK. */ + Assert(afterTriggers.query_depth >= 0 && + afterTriggers.query_depth < afterTriggers.maxquerydepth); + qs = &afterTriggers.query_stack[afterTriggers.query_depth]; + + foreach(lc, qs->tables) + { + table = (AfterTriggersTableData *) lfirst(lc); + if (table->relid == relid && table->cmdType == cmdType && + !table->closed) + return table; + } + + oldcxt = MemoryContextSwitchTo(CurTransactionContext); + + table = (AfterTriggersTableData *) palloc0(sizeof(AfterTriggersTableData)); + table->relid = relid; + table->cmdType = cmdType; + qs->tables = lappend(qs->tables, table); + + MemoryContextSwitchTo(oldcxt); + + return table; +} + + +/* + * MakeTransitionCaptureState + * + * Make a TransitionCaptureState object for the given TriggerDesc, target + * relation, and operation type. The TCS object holds all the state needed + * to decide whether to capture tuples in transition tables. + * + * If there are no triggers in 'trigdesc' that request relevant transition + * tables, then return NULL. + * + * The resulting object can be passed to the ExecAR* functions. The caller + * should set tcs_map or tcs_original_insert_tuple as appropriate when dealing + * with child tables. + * + * Note that we copy the flags from a parent table into this struct (rather + * than subsequently using the relation's TriggerDesc directly) so that we can + * use it to control collection of transition tuples from child tables. + * + * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE) + * on the same table during one query should share one transition table. + * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct + * looked up using the table OID + CmdType, and are merely referenced by + * the TransitionCaptureState objects we hand out to callers. + */ +TransitionCaptureState * +MakeTransitionCaptureState(TriggerDesc *trigdesc, Oid relid, CmdType cmdType) +{ + TransitionCaptureState *state; + bool need_old, + need_new; + AfterTriggersTableData *table; + MemoryContext oldcxt; + ResourceOwner saveResourceOwner; + + if (trigdesc == NULL) + return NULL; + + /* Detect which table(s) we need. */ + switch (cmdType) + { + case CMD_INSERT: + need_old = false; + need_new = trigdesc->trig_insert_new_table; + break; + case CMD_UPDATE: + need_old = trigdesc->trig_update_old_table; + need_new = trigdesc->trig_update_new_table; + break; + case CMD_DELETE: + need_old = trigdesc->trig_delete_old_table; + need_new = false; + break; + default: + elog(ERROR, "unexpected CmdType: %d", (int) cmdType); + need_old = need_new = false; /* keep compiler quiet */ + break; + } + if (!need_old && !need_new) + return NULL; + + /* Check state, like AfterTriggerSaveEvent. */ + if (afterTriggers.query_depth < 0) + elog(ERROR, "MakeTransitionCaptureState() called outside of query"); + + /* Be sure we have enough space to record events at this query depth. */ + if (afterTriggers.query_depth >= afterTriggers.maxquerydepth) + AfterTriggerEnlargeQueryState(); + + /* + * Find or create an AfterTriggersTableData struct to hold the + * tuplestore(s). If there's a matching struct but it's marked closed, + * ignore it; we need a newer one. + * + * Note: the AfterTriggersTableData list, as well as the tuplestores, are + * allocated in the current (sub)transaction's CurTransactionContext, and + * the tuplestores are managed by the (sub)transaction's resource owner. + * This is sufficient lifespan because we do not allow triggers using + * transition tables to be deferrable; they will be fired during + * AfterTriggerEndQuery, after which it's okay to delete the data. + */ + table = GetAfterTriggersTableData(relid, cmdType); + + /* Now create required tuplestore(s), if we don't have them already. */ + oldcxt = MemoryContextSwitchTo(CurTransactionContext); + saveResourceOwner = CurrentResourceOwner; + CurrentResourceOwner = CurTransactionResourceOwner; + + if (need_old && table->old_tuplestore == NULL) + table->old_tuplestore = tuplestore_begin_heap(false, false, work_mem); + if (need_new && table->new_tuplestore == NULL) + table->new_tuplestore = tuplestore_begin_heap(false, false, work_mem); + + CurrentResourceOwner = saveResourceOwner; + MemoryContextSwitchTo(oldcxt); + + /* Now build the TransitionCaptureState struct, in caller's context */ + state = (TransitionCaptureState *) palloc0(sizeof(TransitionCaptureState)); + state->tcs_delete_old_table = trigdesc->trig_delete_old_table; + state->tcs_update_old_table = trigdesc->trig_update_old_table; + state->tcs_update_new_table = trigdesc->trig_update_new_table; + state->tcs_insert_new_table = trigdesc->trig_insert_new_table; + state->tcs_private = table; + + return state; +} + + /* ---------- * AfterTriggerBeginXact() * @@ -4319,14 +4770,10 @@ AfterTriggerBeginXact(void) */ Assert(afterTriggers.state == NULL); Assert(afterTriggers.query_stack == NULL); - Assert(afterTriggers.fdw_tuplestores == NULL); Assert(afterTriggers.maxquerydepth == 0); Assert(afterTriggers.event_cxt == NULL); Assert(afterTriggers.events.head == NULL); - Assert(afterTriggers.state_stack == NULL); - Assert(afterTriggers.events_stack == NULL); - Assert(afterTriggers.depth_stack == NULL); - Assert(afterTriggers.firing_stack == NULL); + Assert(afterTriggers.trans_stack == NULL); Assert(afterTriggers.maxtransdepth == 0); } @@ -4362,8 +4809,7 @@ AfterTriggerBeginQuery(void) void AfterTriggerEndQuery(EState *estate) { - AfterTriggerEventList *events; - Tuplestorestate *fdw_tuplestore; + AfterTriggersQueryData *qs; /* Must be inside a query, too */ Assert(afterTriggers.query_depth >= 0); @@ -4393,38 +4839,103 @@ AfterTriggerEndQuery(EState *estate) * will instead fire any triggers in a dedicated query level. Foreign key * enforcement triggers do add to the current query level, thanks to their * passing fire_triggers = false to SPI_execute_snapshot(). Other - * C-language triggers might do likewise. Be careful here: firing a - * trigger could result in query_stack being repalloc'd, so we can't save - * its address across afterTriggerInvokeEvents calls. + * C-language triggers might do likewise. * * If we find no firable events, we don't have to increment * firing_counter. */ + qs = &afterTriggers.query_stack[afterTriggers.query_depth]; + for (;;) { - events = &afterTriggers.query_stack[afterTriggers.query_depth]; - if (afterTriggerMarkEvents(events, &afterTriggers.events, true)) + if (afterTriggerMarkEvents(&qs->events, &afterTriggers.events, true)) { CommandId firing_id = afterTriggers.firing_counter++; + AfterTriggerEventChunk *oldtail = qs->events.tail; - /* OK to delete the immediate events after processing them */ - if (afterTriggerInvokeEvents(events, firing_id, estate, true)) + if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false)) break; /* all fired */ + + /* + * Firing a trigger could result in query_stack being repalloc'd, + * so we must recalculate qs after each afterTriggerInvokeEvents + * call. Furthermore, it's unsafe to pass delete_ok = true here, + * because that could cause afterTriggerInvokeEvents to try to + * access qs->events after the stack has been repalloc'd. + */ + qs = &afterTriggers.query_stack[afterTriggers.query_depth]; + + /* + * We'll need to scan the events list again. To reduce the cost + * of doing so, get rid of completely-fired chunks. We know that + * all events were marked IN_PROGRESS or DONE at the conclusion of + * afterTriggerMarkEvents, so any still-interesting events must + * have been added after that, and so must be in the chunk that + * was then the tail chunk, or in later chunks. So, zap all + * chunks before oldtail. This is approximately the same set of + * events we would have gotten rid of by passing delete_ok = true. + */ + Assert(oldtail != NULL); + while (qs->events.head != oldtail) + afterTriggerDeleteHeadEventChunk(qs); } else break; } - /* Release query-local storage for events, including tuplestore if any */ - fdw_tuplestore = afterTriggers.fdw_tuplestores[afterTriggers.query_depth]; - if (fdw_tuplestore) + /* Release query-level-local storage, including tuplestores if any */ + AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]); + + afterTriggers.query_depth--; +} + + +/* + * AfterTriggerFreeQuery + * Release subsidiary storage for a trigger query level. + * This includes closing down tuplestores. + * Note: it's important for this to be safe if interrupted by an error + * and then called again for the same query level. + */ +static void +AfterTriggerFreeQuery(AfterTriggersQueryData *qs) +{ + Tuplestorestate *ts; + List *tables; + ListCell *lc; + + /* Drop the trigger events */ + afterTriggerFreeEventList(&qs->events); + + /* Drop FDW tuplestore if any */ + ts = qs->fdw_tuplestore; + qs->fdw_tuplestore = NULL; + if (ts) + tuplestore_end(ts); + + /* Release per-table subsidiary storage */ + tables = qs->tables; + foreach(lc, tables) { - tuplestore_end(fdw_tuplestore); - afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL; + AfterTriggersTableData *table = (AfterTriggersTableData *) lfirst(lc); + + ts = table->old_tuplestore; + table->old_tuplestore = NULL; + if (ts) + tuplestore_end(ts); + ts = table->new_tuplestore; + table->new_tuplestore = NULL; + if (ts) + tuplestore_end(ts); } - afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]); - afterTriggers.query_depth--; + /* + * Now free the AfterTriggersTableData structs and list cells. Reset list + * pointer first; if list_free_deep somehow gets an error, better to leak + * that storage than have an infinite loop. + */ + qs->tables = NIL; + list_free_deep(tables); } @@ -4521,10 +5032,7 @@ AfterTriggerEndXact(bool isCommit) * large, we let the eventual reset of TopTransactionContext free the * memory instead of doing it here. */ - afterTriggers.state_stack = NULL; - afterTriggers.events_stack = NULL; - afterTriggers.depth_stack = NULL; - afterTriggers.firing_stack = NULL; + afterTriggers.trans_stack = NULL; afterTriggers.maxtransdepth = 0; @@ -4534,7 +5042,6 @@ AfterTriggerEndXact(bool isCommit) * memory here. */ afterTriggers.query_stack = NULL; - afterTriggers.fdw_tuplestores = NULL; afterTriggers.maxquerydepth = 0; afterTriggers.state = NULL; @@ -4553,48 +5060,28 @@ AfterTriggerBeginSubXact(void) int my_level = GetCurrentTransactionNestLevel(); /* - * Allocate more space in the stacks if needed. (Note: because the + * Allocate more space in the trans_stack if needed. (Note: because the * minimum nest level of a subtransaction is 2, we waste the first couple - * entries of each array; not worth the notational effort to avoid it.) + * entries of the array; not worth the notational effort to avoid it.) */ while (my_level >= afterTriggers.maxtransdepth) { if (afterTriggers.maxtransdepth == 0) { - MemoryContext old_cxt; - - old_cxt = MemoryContextSwitchTo(TopTransactionContext); - -#define DEFTRIG_INITALLOC 8 - afterTriggers.state_stack = (SetConstraintState *) - palloc(DEFTRIG_INITALLOC * sizeof(SetConstraintState)); - afterTriggers.events_stack = (AfterTriggerEventList *) - palloc(DEFTRIG_INITALLOC * sizeof(AfterTriggerEventList)); - afterTriggers.depth_stack = (int *) - palloc(DEFTRIG_INITALLOC * sizeof(int)); - afterTriggers.firing_stack = (CommandId *) - palloc(DEFTRIG_INITALLOC * sizeof(CommandId)); - afterTriggers.maxtransdepth = DEFTRIG_INITALLOC; - - MemoryContextSwitchTo(old_cxt); + /* Arbitrarily initialize for max of 8 subtransaction levels */ + afterTriggers.trans_stack = (AfterTriggersTransData *) + MemoryContextAlloc(TopTransactionContext, + 8 * sizeof(AfterTriggersTransData)); + afterTriggers.maxtransdepth = 8; } else { - /* repalloc will keep the stacks in the same context */ + /* repalloc will keep the stack in the same context */ int new_alloc = afterTriggers.maxtransdepth * 2; - afterTriggers.state_stack = (SetConstraintState *) - repalloc(afterTriggers.state_stack, - new_alloc * sizeof(SetConstraintState)); - afterTriggers.events_stack = (AfterTriggerEventList *) - repalloc(afterTriggers.events_stack, - new_alloc * sizeof(AfterTriggerEventList)); - afterTriggers.depth_stack = (int *) - repalloc(afterTriggers.depth_stack, - new_alloc * sizeof(int)); - afterTriggers.firing_stack = (CommandId *) - repalloc(afterTriggers.firing_stack, - new_alloc * sizeof(CommandId)); + afterTriggers.trans_stack = (AfterTriggersTransData *) + repalloc(afterTriggers.trans_stack, + new_alloc * sizeof(AfterTriggersTransData)); afterTriggers.maxtransdepth = new_alloc; } } @@ -4604,10 +5091,10 @@ AfterTriggerBeginSubXact(void) * is not saved until/unless changed. Likewise, we don't make a * per-subtransaction event context until needed. */ - afterTriggers.state_stack[my_level] = NULL; - afterTriggers.events_stack[my_level] = afterTriggers.events; - afterTriggers.depth_stack[my_level] = afterTriggers.query_depth; - afterTriggers.firing_stack[my_level] = afterTriggers.firing_counter; + afterTriggers.trans_stack[my_level].state = NULL; + afterTriggers.trans_stack[my_level].events = afterTriggers.events; + afterTriggers.trans_stack[my_level].query_depth = afterTriggers.query_depth; + afterTriggers.trans_stack[my_level].firing_counter = afterTriggers.firing_counter; } /* @@ -4631,70 +5118,58 @@ AfterTriggerEndSubXact(bool isCommit) { Assert(my_level < afterTriggers.maxtransdepth); /* If we saved a prior state, we don't need it anymore */ - state = afterTriggers.state_stack[my_level]; + state = afterTriggers.trans_stack[my_level].state; if (state != NULL) pfree(state); /* this avoids double pfree if error later: */ - afterTriggers.state_stack[my_level] = NULL; + afterTriggers.trans_stack[my_level].state = NULL; Assert(afterTriggers.query_depth == - afterTriggers.depth_stack[my_level]); + afterTriggers.trans_stack[my_level].query_depth); } else { /* * Aborting. It is possible subxact start failed before calling * AfterTriggerBeginSubXact, in which case we mustn't risk touching - * stack levels that aren't there. + * trans_stack levels that aren't there. */ if (my_level >= afterTriggers.maxtransdepth) return; /* - * Release any event lists from queries being aborted, and restore + * Release query-level storage for queries being aborted, and restore * query_depth to its pre-subxact value. This assumes that a * subtransaction will not add events to query levels started in a * earlier transaction state. */ - while (afterTriggers.query_depth > afterTriggers.depth_stack[my_level]) + while (afterTriggers.query_depth > afterTriggers.trans_stack[my_level].query_depth) { if (afterTriggers.query_depth < afterTriggers.maxquerydepth) - { - Tuplestorestate *ts; - - ts = afterTriggers.fdw_tuplestores[afterTriggers.query_depth]; - if (ts) - { - tuplestore_end(ts); - afterTriggers.fdw_tuplestores[afterTriggers.query_depth] = NULL; - } - - afterTriggerFreeEventList(&afterTriggers.query_stack[afterTriggers.query_depth]); - } - + AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]); afterTriggers.query_depth--; } Assert(afterTriggers.query_depth == - afterTriggers.depth_stack[my_level]); + afterTriggers.trans_stack[my_level].query_depth); /* * Restore the global deferred-event list to its former length, * discarding any events queued by the subxact. */ afterTriggerRestoreEventList(&afterTriggers.events, - &afterTriggers.events_stack[my_level]); + &afterTriggers.trans_stack[my_level].events); /* * Restore the trigger state. If the saved state is NULL, then this * subxact didn't save it, so it doesn't need restoring. */ - state = afterTriggers.state_stack[my_level]; + state = afterTriggers.trans_stack[my_level].state; if (state != NULL) { pfree(afterTriggers.state); afterTriggers.state = state; } /* this avoids double pfree if error later: */ - afterTriggers.state_stack[my_level] = NULL; + afterTriggers.trans_stack[my_level].state = NULL; /* * Scan for any remaining deferred events that were marked DONE or IN @@ -4704,7 +5179,7 @@ AfterTriggerEndSubXact(bool isCommit) * (This essentially assumes that the current subxact includes all * subxacts started after it.) */ - subxact_firing_id = afterTriggers.firing_stack[my_level]; + subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter; for_each_event_chunk(event, chunk, afterTriggers.events) { AfterTriggerShared evtshared = GetTriggerSharedData(event); @@ -4740,12 +5215,9 @@ AfterTriggerEnlargeQueryState(void) { int new_alloc = Max(afterTriggers.query_depth + 1, 8); - afterTriggers.query_stack = (AfterTriggerEventList *) + afterTriggers.query_stack = (AfterTriggersQueryData *) MemoryContextAlloc(TopTransactionContext, - new_alloc * sizeof(AfterTriggerEventList)); - afterTriggers.fdw_tuplestores = (Tuplestorestate **) - MemoryContextAllocZero(TopTransactionContext, - new_alloc * sizeof(Tuplestorestate *)); + new_alloc * sizeof(AfterTriggersQueryData)); afterTriggers.maxquerydepth = new_alloc; } else @@ -4755,27 +5227,22 @@ AfterTriggerEnlargeQueryState(void) int new_alloc = Max(afterTriggers.query_depth + 1, old_alloc * 2); - afterTriggers.query_stack = (AfterTriggerEventList *) + afterTriggers.query_stack = (AfterTriggersQueryData *) repalloc(afterTriggers.query_stack, - new_alloc * sizeof(AfterTriggerEventList)); - afterTriggers.fdw_tuplestores = (Tuplestorestate **) - repalloc(afterTriggers.fdw_tuplestores, - new_alloc * sizeof(Tuplestorestate *)); - /* Clear newly-allocated slots for subsequent lazy initialization. */ - memset(afterTriggers.fdw_tuplestores + old_alloc, - 0, (new_alloc - old_alloc) * sizeof(Tuplestorestate *)); + new_alloc * sizeof(AfterTriggersQueryData)); afterTriggers.maxquerydepth = new_alloc; } - /* Initialize new query lists to empty */ + /* Initialize new array entries to empty */ while (init_depth < afterTriggers.maxquerydepth) { - AfterTriggerEventList *events; + AfterTriggersQueryData *qs = &afterTriggers.query_stack[init_depth]; - events = &afterTriggers.query_stack[init_depth]; - events->head = NULL; - events->tail = NULL; - events->tailfree = NULL; + qs->events.head = NULL; + qs->events.tail = NULL; + qs->events.tailfree = NULL; + qs->fdw_tuplestore = NULL; + qs->tables = NIL; ++init_depth; } @@ -4873,9 +5340,9 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) * save it so it can be restored if the subtransaction aborts. */ if (my_level > 1 && - afterTriggers.state_stack[my_level] == NULL) + afterTriggers.trans_stack[my_level].state == NULL) { - afterTriggers.state_stack[my_level] = + afterTriggers.trans_stack[my_level].state = SetConstraintStateCopy(afterTriggers.state); } @@ -4913,6 +5380,9 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) * constraints within the first search-path schema that has any * matches, but disregard matches in schemas beyond the first match. * (This is a bit odd but it's the historical behavior.) + * + * A constraint in a partitioned table may have corresponding + * constraints in the partitions. Grab those too. */ conrel = heap_open(ConstraintRelationId, AccessShareLock); @@ -5007,6 +5477,32 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) constraint->relname))); } + /* + * Scan for any possible descendants of the constraints. We append + * whatever we find to the same list that we're scanning; this has the + * effect that we create new scans for those, too, so if there are + * further descendents, we'll also catch them. + */ + foreach(lc, conoidlist) + { + Oid parent = lfirst_oid(lc); + ScanKeyData key; + SysScanDesc scan; + HeapTuple tuple; + + ScanKeyInit(&key, + Anum_pg_constraint_conparentid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(parent)); + + scan = systable_beginscan(conrel, ConstraintParentIndexId, true, NULL, 1, &key); + + while (HeapTupleIsValid(tuple = systable_getnext(scan))) + conoidlist = lappend_oid(conoidlist, HeapTupleGetOid(tuple)); + + systable_endscan(scan); + } + heap_close(conrel, AccessShareLock); /* @@ -5184,7 +5680,7 @@ AfterTriggerPendingOnRel(Oid relid) */ for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++) { - for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth]) + for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth].events) { AfterTriggerShared evtshared = GetTriggerSharedData(event); @@ -5211,7 +5707,12 @@ AfterTriggerPendingOnRel(Oid relid) * triggers actually need to be queued. It is also called after each row, * even if there are no triggers for that event, if there are any AFTER * STATEMENT triggers for the statement which use transition tables, so that - * the transition tuplestores can be built. + * the transition tuplestores can be built. Furthermore, if the transition + * capture is happening for UPDATEd rows being moved to another partition due + * to the partition-key being changed, then this function is called once when + * the row is deleted (to capture OLD row), and once when the row is inserted + * into another partition (to capture NEW row). This is done separately because + * DELETE and INSERT happen on different tables. * * Transition tuplestores are built now, rather than when events are pulled * off of the queue because AFTER ROW triggers are allowed to select from the @@ -5229,7 +5730,7 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, TriggerDesc *trigdesc = relinfo->ri_TrigDesc; AfterTriggerEventData new_event; AfterTriggerSharedData new_shared; - char relkind = relinfo->ri_RelationDesc->rd_rel->relkind; + char relkind = rel->rd_rel->relkind; int tgtype_event; int tgtype_level; int i; @@ -5260,17 +5761,31 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, bool update_new_table = transition_capture->tcs_update_new_table; bool insert_new_table = transition_capture->tcs_insert_new_table;; - if ((event == TRIGGER_EVENT_DELETE && delete_old_table) || - (event == TRIGGER_EVENT_UPDATE && update_old_table)) + /* + * For INSERT events newtup should be non-NULL, for DELETE events + * oldtup should be non-NULL, whereas for UPDATE events normally both + * oldtup and newtup are non-NULL. But for UPDATE events fired for + * capturing transition tuples during UPDATE partition-key row + * movement, oldtup is NULL when the event is for a row being + * inserted, whereas newtup is NULL when the event is for a row being + * deleted. + */ + Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table && + oldtup == NULL)); + Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table && + newtup == NULL)); + + if (oldtup != NULL && + ((event == TRIGGER_EVENT_DELETE && delete_old_table) || + (event == TRIGGER_EVENT_UPDATE && update_old_table))) { Tuplestorestate *old_tuplestore; - Assert(oldtup != NULL); - old_tuplestore = transition_capture->tcs_old_tuplestore; + old_tuplestore = transition_capture->tcs_private->old_tuplestore; if (map != NULL) { - HeapTuple converted = do_convert_tuple(oldtup, map); + HeapTuple converted = execute_attr_map_tuple(oldtup, map); tuplestore_puttuple(old_tuplestore, converted); pfree(converted); @@ -5278,22 +5793,19 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, else tuplestore_puttuple(old_tuplestore, oldtup); } - if ((event == TRIGGER_EVENT_INSERT && insert_new_table) || - (event == TRIGGER_EVENT_UPDATE && update_new_table)) + if (newtup != NULL && + ((event == TRIGGER_EVENT_INSERT && insert_new_table) || + (event == TRIGGER_EVENT_UPDATE && update_new_table))) { Tuplestorestate *new_tuplestore; - Assert(newtup != NULL); - if (event == TRIGGER_EVENT_INSERT) - new_tuplestore = transition_capture->tcs_insert_tuplestore; - else - new_tuplestore = transition_capture->tcs_update_tuplestore; + new_tuplestore = transition_capture->tcs_private->new_tuplestore; if (original_insert_tuple != NULL) tuplestore_puttuple(new_tuplestore, original_insert_tuple); else if (map != NULL) { - HeapTuple converted = do_convert_tuple(newtup, map); + HeapTuple converted = execute_attr_map_tuple(newtup, map); tuplestore_puttuple(new_tuplestore, converted); pfree(converted); @@ -5302,11 +5814,18 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, tuplestore_puttuple(new_tuplestore, newtup); } - /* If transition tables are the only reason we're here, return. */ + /* + * If transition tables are the only reason we're here, return. As + * mentioned above, we can also be here during update tuple routing in + * presence of transition tables, in which case this function is + * called separately for oldtup and newtup, so we expect exactly one + * of them to be NULL. + */ if (trigdesc == NULL || (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) || (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) || - (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row)) + (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row) || + (event == TRIGGER_EVENT_UPDATE && ((oldtup == NULL) ^ (newtup == NULL)))) return; } @@ -5316,6 +5835,11 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, * The event code will be used both as a bitmask and an array offset, so * validation is important to make sure we don't walk off the edge of our * arrays. + * + * Also, if we're considering statement-level triggers, check whether we + * already queued a set of them for this event, and cancel the prior set + * if so. This preserves the behavior that statement-level triggers fire + * just once per statement and fire after row-level triggers. */ switch (event) { @@ -5334,6 +5858,8 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, Assert(newtup == NULL); ItemPointerSetInvalid(&(new_event.ate_ctid1)); ItemPointerSetInvalid(&(new_event.ate_ctid2)); + cancel_prior_stmt_triggers(RelationGetRelid(rel), + CMD_INSERT, event); } break; case TRIGGER_EVENT_DELETE: @@ -5351,6 +5877,8 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, Assert(newtup == NULL); ItemPointerSetInvalid(&(new_event.ate_ctid1)); ItemPointerSetInvalid(&(new_event.ate_ctid2)); + cancel_prior_stmt_triggers(RelationGetRelid(rel), + CMD_DELETE, event); } break; case TRIGGER_EVENT_UPDATE: @@ -5368,6 +5896,8 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, Assert(newtup == NULL); ItemPointerSetInvalid(&(new_event.ate_ctid1)); ItemPointerSetInvalid(&(new_event.ate_ctid2)); + cancel_prior_stmt_triggers(RelationGetRelid(rel), + CMD_UPDATE, event); } break; case TRIGGER_EVENT_TRUNCATE: @@ -5407,9 +5937,7 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, { if (fdw_tuplestore == NULL) { - fdw_tuplestore = - GetTriggerTransitionTuplestore - (afterTriggers.fdw_tuplestores); + fdw_tuplestore = GetCurrentFDWTuplestore(); new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH; } else @@ -5422,12 +5950,12 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, * certain cases where we can skip queueing the event because we can * tell by inspection that the FK constraint will still pass. */ - if (TRIGGER_FIRED_BY_UPDATE(event)) + if (TRIGGER_FIRED_BY_UPDATE(event) || TRIGGER_FIRED_BY_DELETE(event)) { switch (RI_FKey_trigger_type(trigger->tgfoid)) { case RI_TRIGGER_PK: - /* Update on trigger's PK table */ + /* Update or delete on trigger's PK table */ if (!RI_FKey_pk_upd_check_required(trigger, rel, oldtup, newtup)) { @@ -5465,6 +5993,8 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, /* * Fill in event structure and add it to the current query's queue. + * Note we set ats_table to NULL whenever this trigger doesn't use + * transition tables, to improve sharability of the shared event data. */ new_shared.ats_event = (event & TRIGGER_EVENT_OPMASK) | @@ -5474,9 +6004,13 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, new_shared.ats_tgoid = trigger->tgoid; new_shared.ats_relid = RelationGetRelid(rel); new_shared.ats_firing_id = 0; - new_shared.ats_transition_capture = transition_capture; + if ((trigger->tgoldtable || trigger->tgnewtable) && + transition_capture != NULL) + new_shared.ats_table = transition_capture->tcs_private; + else + new_shared.ats_table = NULL; - afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth], + afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth].events, &new_event, &new_shared); } @@ -5494,6 +6028,131 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, } } +/* + * Detect whether we already queued BEFORE STATEMENT triggers for the given + * relation + operation, and set the flag so the next call will report "true". + */ +static bool +before_stmt_triggers_fired(Oid relid, CmdType cmdType) +{ + bool result; + AfterTriggersTableData *table; + + /* Check state, like AfterTriggerSaveEvent. */ + if (afterTriggers.query_depth < 0) + elog(ERROR, "before_stmt_triggers_fired() called outside of query"); + + /* Be sure we have enough space to record events at this query depth. */ + if (afterTriggers.query_depth >= afterTriggers.maxquerydepth) + AfterTriggerEnlargeQueryState(); + + /* + * We keep this state in the AfterTriggersTableData that also holds + * transition tables for the relation + operation. In this way, if we are + * forced to make a new set of transition tables because more tuples get + * entered after we've already fired triggers, we will allow a new set of + * statement triggers to get queued. + */ + table = GetAfterTriggersTableData(relid, cmdType); + result = table->before_trig_done; + table->before_trig_done = true; + return result; +} + +/* + * If we previously queued a set of AFTER STATEMENT triggers for the given + * relation + operation, and they've not been fired yet, cancel them. The + * caller will queue a fresh set that's after any row-level triggers that may + * have been queued by the current sub-statement, preserving (as much as + * possible) the property that AFTER ROW triggers fire before AFTER STATEMENT + * triggers, and that the latter only fire once. This deals with the + * situation where several FK enforcement triggers sequentially queue triggers + * for the same table into the same trigger query level. We can't fully + * prevent odd behavior though: if there are AFTER ROW triggers taking + * transition tables, we don't want to change the transition tables once the + * first such trigger has seen them. In such a case, any additional events + * will result in creating new transition tables and allowing new firings of + * statement triggers. + * + * This also saves the current event list location so that a later invocation + * of this function can cheaply find the triggers we're about to queue and + * cancel them. + */ +static void +cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent) +{ + AfterTriggersTableData *table; + AfterTriggersQueryData *qs = &afterTriggers.query_stack[afterTriggers.query_depth]; + + /* + * We keep this state in the AfterTriggersTableData that also holds + * transition tables for the relation + operation. In this way, if we are + * forced to make a new set of transition tables because more tuples get + * entered after we've already fired triggers, we will allow a new set of + * statement triggers to get queued without canceling the old ones. + */ + table = GetAfterTriggersTableData(relid, cmdType); + + if (table->after_trig_done) + { + /* + * We want to start scanning from the tail location that existed just + * before we inserted any statement triggers. But the events list + * might've been entirely empty then, in which case scan from the + * current head. + */ + AfterTriggerEvent event; + AfterTriggerEventChunk *chunk; + + if (table->after_trig_events.tail) + { + chunk = table->after_trig_events.tail; + event = (AfterTriggerEvent) table->after_trig_events.tailfree; + } + else + { + chunk = qs->events.head; + event = NULL; + } + + for_each_chunk_from(chunk) + { + if (event == NULL) + event = (AfterTriggerEvent) CHUNK_DATA_START(chunk); + for_each_event_from(event, chunk) + { + AfterTriggerShared evtshared = GetTriggerSharedData(event); + + /* + * Exit loop when we reach events that aren't AS triggers for + * the target relation. + */ + if (evtshared->ats_relid != relid) + goto done; + if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) != tgevent) + goto done; + if (!TRIGGER_FIRED_FOR_STATEMENT(evtshared->ats_event)) + goto done; + if (!TRIGGER_FIRED_AFTER(evtshared->ats_event)) + goto done; + /* OK, mark it DONE */ + event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS; + event->ate_flags |= AFTER_TRIGGER_DONE; + } + /* signal we must reinitialize event ptr for next chunk */ + event = NULL; + } + } +done: + + /* In any case, save current insertion point for next time */ + table->after_trig_done = true; + table->after_trig_events = qs->events; +} + +/* + * SQL function pg_trigger_depth() + */ Datum pg_trigger_depth(PG_FUNCTION_ARGS) { diff --git a/src/backend/commands/tsearchcmds.c b/src/backend/commands/tsearchcmds.c index adc7cd67a7..3a843512d1 100644 --- a/src/backend/commands/tsearchcmds.c +++ b/src/backend/commands/tsearchcmds.c @@ -4,7 +4,7 @@ * * Routines for tsearch manipulation commands * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -209,27 +209,27 @@ DefineTSParser(List *names, List *parameters) { DefElem *defel = (DefElem *) lfirst(pl); - if (pg_strcasecmp(defel->defname, "start") == 0) + if (strcmp(defel->defname, "start") == 0) { values[Anum_pg_ts_parser_prsstart - 1] = get_ts_parser_func(defel, Anum_pg_ts_parser_prsstart); } - else if (pg_strcasecmp(defel->defname, "gettoken") == 0) + else if (strcmp(defel->defname, "gettoken") == 0) { values[Anum_pg_ts_parser_prstoken - 1] = get_ts_parser_func(defel, Anum_pg_ts_parser_prstoken); } - else if (pg_strcasecmp(defel->defname, "end") == 0) + else if (strcmp(defel->defname, "end") == 0) { values[Anum_pg_ts_parser_prsend - 1] = get_ts_parser_func(defel, Anum_pg_ts_parser_prsend); } - else if (pg_strcasecmp(defel->defname, "headline") == 0) + else if (strcmp(defel->defname, "headline") == 0) { values[Anum_pg_ts_parser_prsheadline - 1] = get_ts_parser_func(defel, Anum_pg_ts_parser_prsheadline); } - else if (pg_strcasecmp(defel->defname, "lextypes") == 0) + else if (strcmp(defel->defname, "lextypes") == 0) { values[Anum_pg_ts_parser_prslextype - 1] = get_ts_parser_func(defel, Anum_pg_ts_parser_prslextype); @@ -428,7 +428,7 @@ DefineTSDictionary(List *names, List *parameters) /* Check we have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(namespaceoid, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceoid)); /* @@ -438,7 +438,7 @@ DefineTSDictionary(List *names, List *parameters) { DefElem *defel = (DefElem *) lfirst(pl); - if (pg_strcasecmp(defel->defname, "template") == 0) + if (strcmp(defel->defname, "template") == 0) { templId = get_ts_template_oid(defGetQualifiedName(defel), false); } @@ -549,7 +549,7 @@ AlterTSDictionary(AlterTSDictionaryStmt *stmt) /* must be owner */ if (!pg_ts_dict_ownercheck(dictId, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TSDICTIONARY, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TSDICTIONARY, NameListToString(stmt->dictname)); /* deserialize the existing set of options */ @@ -580,7 +580,7 @@ AlterTSDictionary(AlterTSDictionaryStmt *stmt) DefElem *oldel = (DefElem *) lfirst(cell); next = lnext(cell); - if (pg_strcasecmp(oldel->defname, defel->defname) == 0) + if (strcmp(oldel->defname, defel->defname) == 0) dictoptions = list_delete_cell(dictoptions, cell, prev); else prev = cell; @@ -765,13 +765,13 @@ DefineTSTemplate(List *names, List *parameters) { DefElem *defel = (DefElem *) lfirst(pl); - if (pg_strcasecmp(defel->defname, "init") == 0) + if (strcmp(defel->defname, "init") == 0) { values[Anum_pg_ts_template_tmplinit - 1] = get_ts_template_func(defel, Anum_pg_ts_template_tmplinit); nulls[Anum_pg_ts_template_tmplinit - 1] = false; } - else if (pg_strcasecmp(defel->defname, "lexize") == 0) + else if (strcmp(defel->defname, "lexize") == 0) { values[Anum_pg_ts_template_tmpllexize - 1] = get_ts_template_func(defel, Anum_pg_ts_template_tmpllexize); @@ -980,7 +980,7 @@ DefineTSConfiguration(List *names, List *parameters, ObjectAddress *copied) /* Check we have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(namespaceoid, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(namespaceoid)); /* @@ -990,9 +990,9 @@ DefineTSConfiguration(List *names, List *parameters, ObjectAddress *copied) { DefElem *defel = (DefElem *) lfirst(pl); - if (pg_strcasecmp(defel->defname, "parser") == 0) + if (strcmp(defel->defname, "parser") == 0) prsOid = get_ts_parser_oid(defGetQualifiedName(defel), false); - else if (pg_strcasecmp(defel->defname, "copy") == 0) + else if (strcmp(defel->defname, "copy") == 0) sourceOid = get_ts_config_oid(defGetQualifiedName(defel), false); else ereport(ERROR, @@ -1189,7 +1189,7 @@ AlterTSConfiguration(AlterTSConfigurationStmt *stmt) /* must be owner */ if (!pg_ts_config_ownercheck(HeapTupleGetOid(tup), GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_TSCONFIGURATION, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TSCONFIGURATION, NameListToString(stmt->cfgname)); relMap = heap_open(TSConfigMapRelationId, RowExclusiveLock); @@ -1251,7 +1251,6 @@ getTokenTypes(Oid prsId, List *tokennames) j = 0; while (list && list[j].lexid) { - /* XXX should we use pg_strcasecmp here? */ if (strcmp(strVal(val), list[j].alias) == 0) { res[i] = list[j].lexid; diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index 29ac5d569d..66f7c57726 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -3,7 +3,7 @@ * typecmds.c * Routines for SQL commands that manipulate types (and domains). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -41,16 +41,13 @@ #include "catalog/pg_authid.h" #include "catalog/pg_collation.h" #include "catalog/pg_constraint.h" -#include "catalog/pg_constraint_fn.h" #include "catalog/pg_depend.h" #include "catalog/pg_enum.h" #include "catalog/pg_language.h" #include "catalog/pg_namespace.h" #include "catalog/pg_proc.h" -#include "catalog/pg_proc_fn.h" #include "catalog/pg_range.h" #include "catalog/pg_type.h" -#include "catalog/pg_type_fn.h" #include "commands/defrem.h" #include "commands/tablecmds.h" #include "commands/typecmds.h" @@ -103,7 +100,7 @@ static void checkEnumOwner(HeapTuple tup); static char *domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, int typMod, Constraint *constr, - char *domainName, ObjectAddress *constrAddr); + const char *domainName, ObjectAddress *constrAddr); static Node *replace_domain_constraint_value(ParseState *pstate, ColumnRef *cref); @@ -190,7 +187,7 @@ DefineType(ParseState *pstate, List *names, List *parameters) /* Check we have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(typeNamespace, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(typeNamespace)); #endif @@ -245,42 +242,42 @@ DefineType(ParseState *pstate, List *names, List *parameters) DefElem *defel = (DefElem *) lfirst(pl); DefElem **defelp; - if (pg_strcasecmp(defel->defname, "like") == 0) + if (strcmp(defel->defname, "like") == 0) defelp = &likeTypeEl; - else if (pg_strcasecmp(defel->defname, "internallength") == 0) + else if (strcmp(defel->defname, "internallength") == 0) defelp = &internalLengthEl; - else if (pg_strcasecmp(defel->defname, "input") == 0) + else if (strcmp(defel->defname, "input") == 0) defelp = &inputNameEl; - else if (pg_strcasecmp(defel->defname, "output") == 0) + else if (strcmp(defel->defname, "output") == 0) defelp = &outputNameEl; - else if (pg_strcasecmp(defel->defname, "receive") == 0) + else if (strcmp(defel->defname, "receive") == 0) defelp = &receiveNameEl; - else if (pg_strcasecmp(defel->defname, "send") == 0) + else if (strcmp(defel->defname, "send") == 0) defelp = &sendNameEl; - else if (pg_strcasecmp(defel->defname, "typmod_in") == 0) + else if (strcmp(defel->defname, "typmod_in") == 0) defelp = &typmodinNameEl; - else if (pg_strcasecmp(defel->defname, "typmod_out") == 0) + else if (strcmp(defel->defname, "typmod_out") == 0) defelp = &typmodoutNameEl; - else if (pg_strcasecmp(defel->defname, "analyze") == 0 || - pg_strcasecmp(defel->defname, "analyse") == 0) + else if (strcmp(defel->defname, "analyze") == 0 || + strcmp(defel->defname, "analyse") == 0) defelp = &analyzeNameEl; - else if (pg_strcasecmp(defel->defname, "category") == 0) + else if (strcmp(defel->defname, "category") == 0) defelp = &categoryEl; - else if (pg_strcasecmp(defel->defname, "preferred") == 0) + else if (strcmp(defel->defname, "preferred") == 0) defelp = &preferredEl; - else if (pg_strcasecmp(defel->defname, "delimiter") == 0) + else if (strcmp(defel->defname, "delimiter") == 0) defelp = &delimiterEl; - else if (pg_strcasecmp(defel->defname, "element") == 0) + else if (strcmp(defel->defname, "element") == 0) defelp = &elemTypeEl; - else if (pg_strcasecmp(defel->defname, "default") == 0) + else if (strcmp(defel->defname, "default") == 0) defelp = &defaultValueEl; - else if (pg_strcasecmp(defel->defname, "passedbyvalue") == 0) + else if (strcmp(defel->defname, "passedbyvalue") == 0) defelp = &byValueEl; - else if (pg_strcasecmp(defel->defname, "alignment") == 0) + else if (strcmp(defel->defname, "alignment") == 0) defelp = &alignmentEl; - else if (pg_strcasecmp(defel->defname, "storage") == 0) + else if (strcmp(defel->defname, "storage") == 0) defelp = &storageEl; - else if (pg_strcasecmp(defel->defname, "collatable") == 0) + else if (strcmp(defel->defname, "collatable") == 0) defelp = &collatableEl; else { @@ -526,25 +523,25 @@ DefineType(ParseState *pstate, List *names, List *parameters) #ifdef NOT_USED /* XXX this is unnecessary given the superuser check above */ if (inputOid && !pg_proc_ownercheck(inputOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, NameListToString(inputName)); if (outputOid && !pg_proc_ownercheck(outputOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, NameListToString(outputName)); if (receiveOid && !pg_proc_ownercheck(receiveOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, NameListToString(receiveName)); if (sendOid && !pg_proc_ownercheck(sendOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, NameListToString(sendName)); if (typmodinOid && !pg_proc_ownercheck(typmodinOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, NameListToString(typmodinName)); if (typmodoutOid && !pg_proc_ownercheck(typmodoutOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, NameListToString(typmodoutName)); if (analyzeOid && !pg_proc_ownercheck(analyzeOid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_FUNCTION, NameListToString(analyzeName)); #endif @@ -729,6 +726,7 @@ ObjectAddress DefineDomain(CreateDomainStmt *stmt) { char *domainName; + char *domainArrayName; Oid domainNamespace; AclResult aclresult; int16 internalLength; @@ -757,6 +755,7 @@ DefineDomain(CreateDomainStmt *stmt) Oid basetypeoid; Oid old_type_oid; Oid domaincoll; + Oid domainArrayOid; Form_pg_type baseType; int32 basetypeMod; Oid baseColl; @@ -770,7 +769,7 @@ DefineDomain(CreateDomainStmt *stmt) aclresult = pg_namespace_aclcheck(domainNamespace, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(domainNamespace)); /* @@ -796,13 +795,16 @@ DefineDomain(CreateDomainStmt *stmt) basetypeoid = HeapTupleGetOid(typeTup); /* - * Base type must be a plain base type, another domain, an enum or a range - * type. Domains over pseudotypes would create a security hole. Domains - * over composite types might be made to work in the future, but not - * today. + * Base type must be a plain base type, a composite type, another domain, + * an enum or a range type. Domains over pseudotypes would create a + * security hole. (It would be shorter to code this to just check for + * pseudotypes; but it seems safer to call out the specific typtypes that + * are supported, rather than assume that all future typtypes would be + * automatically supported.) */ typtype = baseType->typtype; if (typtype != TYPTYPE_BASE && + typtype != TYPTYPE_COMPOSITE && typtype != TYPTYPE_DOMAIN && typtype != TYPTYPE_ENUM && typtype != TYPTYPE_RANGE) @@ -1027,6 +1029,9 @@ DefineDomain(CreateDomainStmt *stmt) } } + /* Allocate OID for array type */ + domainArrayOid = AssignTypeArrayOid(); + /* * Have TypeCreate do all the real work. */ @@ -1051,7 +1056,7 @@ DefineDomain(CreateDomainStmt *stmt) analyzeProcedure, /* analyze procedure */ InvalidOid, /* no array element type */ false, /* this isn't an array */ - InvalidOid, /* no arrays for domains (yet) */ + domainArrayOid, /* array type we are about to create */ basetypeoid, /* base type ID */ defaultValue, /* default type value (text) */ defaultValueBin, /* default type value (binary) */ @@ -1063,6 +1068,48 @@ DefineDomain(CreateDomainStmt *stmt) typNotNull, /* Type NOT NULL */ domaincoll); /* type's collation */ + /* + * Create the array type that goes with it. + */ + domainArrayName = makeArrayTypeName(domainName, domainNamespace); + + /* alignment must be 'i' or 'd' for arrays */ + alignment = (alignment == 'd') ? 'd' : 'i'; + + TypeCreate(domainArrayOid, /* force assignment of this type OID */ + domainArrayName, /* type name */ + domainNamespace, /* namespace */ + InvalidOid, /* relation oid (n/a here) */ + 0, /* relation kind (ditto) */ + GetUserId(), /* owner's ID */ + -1, /* internal size (always varlena) */ + TYPTYPE_BASE, /* type-type (base type) */ + TYPCATEGORY_ARRAY, /* type-category (array) */ + false, /* array types are never preferred */ + delimiter, /* array element delimiter */ + F_ARRAY_IN, /* input procedure */ + F_ARRAY_OUT, /* output procedure */ + F_ARRAY_RECV, /* receive procedure */ + F_ARRAY_SEND, /* send procedure */ + InvalidOid, /* typmodin procedure - none */ + InvalidOid, /* typmodout procedure - none */ + F_ARRAY_TYPANALYZE, /* analyze procedure */ + address.objectId, /* element type ID */ + true, /* yes this is an array type */ + InvalidOid, /* no further array type */ + InvalidOid, /* base type ID */ + NULL, /* never a default type value */ + NULL, /* binary default isn't sent either */ + false, /* never passed by value */ + alignment, /* see above */ + 'x', /* ARRAY is always toastable */ + -1, /* typMod (Domains only) */ + 0, /* Array dimensions of typbasetype */ + false, /* Type NOT NULL */ + domaincoll); /* type's collation */ + + pfree(domainArrayName); + /* * Process constraints which refer to the domain ID returned by TypeCreate */ @@ -1121,7 +1168,7 @@ DefineEnum(CreateEnumStmt *stmt) /* Check we have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(enumNamespace, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(enumNamespace)); /* @@ -1139,6 +1186,7 @@ DefineEnum(CreateEnumStmt *stmt) errmsg("type \"%s\" already exists", enumName))); } + /* Allocate OID for array type */ enumArrayOid = AssignTypeArrayOid(); /* Create the pg_type entry */ @@ -1326,7 +1374,7 @@ DefineRange(CreateRangeStmt *stmt) /* Check we have creation rights in target namespace */ aclresult = pg_namespace_aclcheck(typeNamespace, GetUserId(), ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(typeNamespace)); /* @@ -1367,7 +1415,7 @@ DefineRange(CreateRangeStmt *stmt) { DefElem *defel = (DefElem *) lfirst(lc); - if (pg_strcasecmp(defel->defname, "subtype") == 0) + if (strcmp(defel->defname, "subtype") == 0) { if (OidIsValid(rangeSubtype)) ereport(ERROR, @@ -1376,7 +1424,7 @@ DefineRange(CreateRangeStmt *stmt) /* we can look up the subtype name immediately */ rangeSubtype = typenameTypeId(NULL, defGetTypeName(defel)); } - else if (pg_strcasecmp(defel->defname, "subtype_opclass") == 0) + else if (strcmp(defel->defname, "subtype_opclass") == 0) { if (rangeSubOpclassName != NIL) ereport(ERROR, @@ -1384,7 +1432,7 @@ DefineRange(CreateRangeStmt *stmt) errmsg("conflicting or redundant options"))); rangeSubOpclassName = defGetQualifiedName(defel); } - else if (pg_strcasecmp(defel->defname, "collation") == 0) + else if (strcmp(defel->defname, "collation") == 0) { if (rangeCollationName != NIL) ereport(ERROR, @@ -1392,7 +1440,7 @@ DefineRange(CreateRangeStmt *stmt) errmsg("conflicting or redundant options"))); rangeCollationName = defGetQualifiedName(defel); } - else if (pg_strcasecmp(defel->defname, "canonical") == 0) + else if (strcmp(defel->defname, "canonical") == 0) { if (rangeCanonicalName != NIL) ereport(ERROR, @@ -1400,7 +1448,7 @@ DefineRange(CreateRangeStmt *stmt) errmsg("conflicting or redundant options"))); rangeCanonicalName = defGetQualifiedName(defel); } - else if (pg_strcasecmp(defel->defname, "subtype_diff") == 0) + else if (strcmp(defel->defname, "subtype_diff") == 0) { if (rangeSubtypeDiffName != NIL) ereport(ERROR, @@ -1600,8 +1648,7 @@ makeRangeConstructors(const char *name, Oid namespace, F_FMGR_INTERNAL_VALIDATOR, /* language validator */ prosrc[i], /* prosrc */ NULL, /* probin */ - false, /* isAgg */ - false, /* isWindowFunc */ + PROKIND_FUNCTION, false, /* security_definer */ false, /* leakproof */ false, /* isStrict */ @@ -1922,7 +1969,7 @@ findRangeSubOpclass(List *opcname, Oid subtype) opcid = GetDefaultOpClass(subtype, BTREE_AM_OID); if (!OidIsValid(opcid)) { - /* We spell the error message identically to GetIndexOpClass */ + /* We spell the error message identically to ResolveOpClass */ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("data type %s has no default operator class for access method \"%s\"", @@ -1970,7 +2017,7 @@ findRangeCanonicalFunction(List *procname, Oid typeOid) /* Also, range type's creator must have permission to call function */ aclresult = pg_proc_aclcheck(procOid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, get_func_name(procOid)); + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(procOid)); return procOid; } @@ -2013,7 +2060,7 @@ findRangeSubtypeDiffFunction(List *procname, Oid subtype) /* Also, range type's creator must have permission to call function */ aclresult = pg_proc_aclcheck(procOid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, get_func_name(procOid)); + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(procOid)); return procOid; } @@ -2132,6 +2179,9 @@ AlterDomainDefault(List *names, Node *defaultRaw) Relation rel; char *defaultValue; Node *defaultExpr = NULL; /* NULL if no default specified */ + Acl *typacl; + Datum aclDatum; + bool isNull; Datum new_record[Natts_pg_type]; bool new_record_nulls[Natts_pg_type]; bool new_record_repl[Natts_pg_type]; @@ -2223,25 +2273,23 @@ AlterDomainDefault(List *names, Node *defaultRaw) CatalogTupleUpdate(rel, &tup->t_self, newtuple); + /* Must extract ACL for use of GenerateTypeDependencies */ + aclDatum = heap_getattr(newtuple, Anum_pg_type_typacl, + RelationGetDescr(rel), &isNull); + if (isNull) + typacl = NULL; + else + typacl = DatumGetAclPCopy(aclDatum); + /* Rebuild dependencies */ - GenerateTypeDependencies(typTup->typnamespace, - domainoid, - InvalidOid, /* typrelid is n/a */ + GenerateTypeDependencies(domainoid, + (Form_pg_type) GETSTRUCT(newtuple), + defaultExpr, + typacl, 0, /* relation kind is n/a */ - typTup->typowner, - typTup->typinput, - typTup->typoutput, - typTup->typreceive, - typTup->typsend, - typTup->typmodin, - typTup->typmodout, - typTup->typanalyze, - InvalidOid, false, /* a domain isn't an implicit array */ - typTup->typbasetype, - typTup->typcollation, - defaultExpr, - true); /* Rebuild is true */ + false, /* nor is it any kind of dependent type */ + true); /* We do need to rebuild dependencies */ InvokeObjectPostAlterHook(TypeRelationId, domainoid, 0); @@ -2324,8 +2372,9 @@ AlterDomainNotNull(List *names, bool notNull) for (i = 0; i < rtc->natts; i++) { int attnum = rtc->atts[i]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1); - if (heap_attisnull(tuple, attnum)) + if (heap_attisnull(tuple, attnum, tupdesc)) { /* * In principle the auxiliary information for this @@ -2338,7 +2387,7 @@ AlterDomainNotNull(List *names, bool notNull) ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("column \"%s\" of table \"%s\" contains null values", - NameStr(tupdesc->attrs[attnum - 1]->attname), + NameStr(attr->attname), RelationGetRelationName(testrel)), errtablecol(testrel, attnum))); } @@ -2375,6 +2424,8 @@ AlterDomainNotNull(List *names, bool notNull) * AlterDomainDropConstraint * * Implements the ALTER DOMAIN DROP CONSTRAINT statement + * + * Returns ObjectAddress of the modified domain. */ ObjectAddress AlterDomainDropConstraint(List *names, const char *constrName, @@ -2386,10 +2437,10 @@ AlterDomainDropConstraint(List *names, const char *constrName, Relation rel; Relation conrel; SysScanDesc conscan; - ScanKeyData key[1]; + ScanKeyData skey[3]; HeapTuple contup; bool found = false; - ObjectAddress address = InvalidObjectAddress; + ObjectAddress address; /* Make a TypeName so we can use standard type lookup machinery */ typename = makeTypeNameFromNameList(names); @@ -2408,37 +2459,36 @@ AlterDomainDropConstraint(List *names, const char *constrName, /* Grab an appropriate lock on the pg_constraint relation */ conrel = heap_open(ConstraintRelationId, RowExclusiveLock); - /* Use the index to scan only constraints of the target relation */ - ScanKeyInit(&key[0], + /* Find and remove the target constraint */ + ScanKeyInit(&skey[0], + Anum_pg_constraint_conrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(InvalidOid)); + ScanKeyInit(&skey[1], Anum_pg_constraint_contypid, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(HeapTupleGetOid(tup))); + ObjectIdGetDatum(domainoid)); + ScanKeyInit(&skey[2], + Anum_pg_constraint_conname, + BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum(constrName)); - conscan = systable_beginscan(conrel, ConstraintTypidIndexId, true, - NULL, 1, key); + conscan = systable_beginscan(conrel, ConstraintRelidTypidNameIndexId, true, + NULL, 3, skey); - /* - * Scan over the result set, removing any matching entries. - */ - while ((contup = systable_getnext(conscan)) != NULL) + /* There can be at most one matching row */ + if ((contup = systable_getnext(conscan)) != NULL) { - Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(contup); - - if (strcmp(NameStr(con->conname), constrName) == 0) - { - ObjectAddress conobj; + ObjectAddress conobj; - conobj.classId = ConstraintRelationId; - conobj.objectId = HeapTupleGetOid(contup); - conobj.objectSubId = 0; + conobj.classId = ConstraintRelationId; + conobj.objectId = HeapTupleGetOid(contup); + conobj.objectSubId = 0; - performDeletion(&conobj, behavior, 0); - found = true; - } + performDeletion(&conobj, behavior, 0); + found = true; } - ObjectAddressSet(address, TypeRelationId, domainoid); - /* Clean up after the scan */ systable_endscan(conscan); heap_close(conrel, RowExclusiveLock); @@ -2458,6 +2508,8 @@ AlterDomainDropConstraint(List *names, const char *constrName, constrName, TypeNameToString(typename)))); } + ObjectAddressSet(address, TypeRelationId, domainoid); + return address; } @@ -2576,23 +2628,22 @@ AlterDomainAddConstraint(List *names, Node *newConstraint, * Implements the ALTER DOMAIN .. VALIDATE CONSTRAINT statement. */ ObjectAddress -AlterDomainValidateConstraint(List *names, char *constrName) +AlterDomainValidateConstraint(List *names, const char *constrName) { TypeName *typename; Oid domainoid; Relation typrel; Relation conrel; HeapTuple tup; - Form_pg_constraint con = NULL; + Form_pg_constraint con; Form_pg_constraint copy_con; char *conbin; SysScanDesc scan; Datum val; - bool found = false; bool isnull; HeapTuple tuple; HeapTuple copyTuple; - ScanKeyData key; + ScanKeyData skey[3]; ObjectAddress address; /* Make a TypeName so we can use standard type lookup machinery */ @@ -2613,29 +2664,31 @@ AlterDomainValidateConstraint(List *names, char *constrName) * Find and check the target constraint */ conrel = heap_open(ConstraintRelationId, RowExclusiveLock); - ScanKeyInit(&key, + + ScanKeyInit(&skey[0], + Anum_pg_constraint_conrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(InvalidOid)); + ScanKeyInit(&skey[1], Anum_pg_constraint_contypid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(domainoid)); - scan = systable_beginscan(conrel, ConstraintTypidIndexId, - true, NULL, 1, &key); + ScanKeyInit(&skey[2], + Anum_pg_constraint_conname, + BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum(constrName)); - while (HeapTupleIsValid(tuple = systable_getnext(scan))) - { - con = (Form_pg_constraint) GETSTRUCT(tuple); - if (strcmp(NameStr(con->conname), constrName) == 0) - { - found = true; - break; - } - } + scan = systable_beginscan(conrel, ConstraintRelidTypidNameIndexId, true, + NULL, 3, skey); - if (!found) + /* There can be at most one matching row */ + if (!HeapTupleIsValid(tuple = systable_getnext(scan))) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("constraint \"%s\" of domain \"%s\" does not exist", constrName, TypeNameToString(typename)))); + con = (Form_pg_constraint) GETSTRUCT(tuple); if (con->contype != CONSTRAINT_CHECK) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), @@ -2722,6 +2775,7 @@ validateDomainConstraint(Oid domainoid, char *ccbin) Datum d; bool isNull; Datum conResult; + Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1); d = heap_getattr(tuple, attnum, tupdesc, &isNull); @@ -2745,7 +2799,7 @@ validateDomainConstraint(Oid domainoid, char *ccbin) ereport(ERROR, (errcode(ERRCODE_CHECK_VIOLATION), errmsg("column \"%s\" of table \"%s\" contains values that violate the new constraint", - NameStr(tupdesc->attrs[attnum - 1]->attname), + NameStr(attr->attname), RelationGetRelationName(testrel)), errtablecol(testrel, attnum))); } @@ -2930,7 +2984,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode) */ if (pg_depend->objsubid > RelationGetNumberOfAttributes(rtc->rel)) continue; - pg_att = rtc->rel->rd_att->attrs[pg_depend->objsubid - 1]; + pg_att = TupleDescAttr(rtc->rel->rd_att, pg_depend->objsubid - 1); if (pg_att->attisdropped || pg_att->atttypid != domainOid) continue; @@ -2986,10 +3040,9 @@ checkDomainOwner(HeapTuple tup) static char * domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, int typMod, Constraint *constr, - char *domainName, ObjectAddress *constrAddr) + const char *domainName, ObjectAddress *constrAddr) { Node *expr; - char *ccsrc; char *ccbin; ParseState *pstate; CoerceToDomainValue *domVal; @@ -3002,7 +3055,6 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, { if (ConstraintNameIsUsed(CONSTRAINT_DOMAIN, domainOid, - domainNamespace, constr->conname)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), @@ -3064,12 +3116,6 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, */ ccbin = nodeToString(expr); - /* - * Deparse it to produce text for consrc. - */ - ccsrc = deparse_expression(expr, - NIL, false, false); - /* * Store the constraint in pg_constraint */ @@ -3080,9 +3126,11 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, false, /* Is Deferrable */ false, /* Is Deferred */ !constr->skip_validation, /* Is Validated */ + InvalidOid, /* no parent constraint */ InvalidOid, /* not a relation constraint */ NULL, 0, + 0, domainOid, /* domain constraint */ InvalidOid, /* no associated index */ InvalidOid, /* Foreign key fields */ @@ -3097,7 +3145,6 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, NULL, /* not an exclusion constraint */ expr, /* Tree form of check constraint */ ccbin, /* Binary form of check constraint */ - ccsrc, /* Source form of check constraint */ true, /* is local */ 0, /* inhcount */ false, /* connoinherit */ @@ -3306,7 +3353,7 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype) newOwnerId, ACL_CREATE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_NAMESPACE, + aclcheck_error(aclresult, OBJECT_SCHEMA, get_namespace_name(typTup->typnamespace)); } @@ -3325,9 +3372,9 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype) * AlterTypeOwner_oid - change type owner unconditionally * * This function recurses to handle a pg_class entry, if necessary. It - * invokes any necessary access object hooks. If hasDependEntry is TRUE, this + * invokes any necessary access object hooks. If hasDependEntry is true, this * function modifies the pg_shdepend entry appropriately (this should be - * passed as FALSE only for table rowtypes and array types). + * passed as false only for table rowtypes and array types). * * This is used by ALTER TABLE/TYPE OWNER commands, as well as by REASSIGN * OWNED BY. It assumes the caller has done all needed check. @@ -3493,10 +3540,10 @@ AlterTypeNamespace_oid(Oid typeOid, Oid nspOid, ObjectAddresses *objsMoved) * Caller must have already checked privileges. * * The function automatically recurses to process the type's array type, - * if any. isImplicitArray should be TRUE only when doing this internal + * if any. isImplicitArray should be true only when doing this internal * recursion (outside callers must never try to move an array type directly). * - * If errorOnTableType is TRUE, the function errors out if the type is + * If errorOnTableType is true, the function errors out if the type is * a table type. ALTER TABLE has to be used to move a table to a new * namespace. * diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index f2941352d7..71c5caa41b 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -3,7 +3,7 @@ * user.c * Commands for manipulating roles (formerly called users). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/commands/user.c @@ -939,7 +939,7 @@ AlterRoleSet(AlterRoleSetStmt *stmt) * ALTER DATABASE ... SET, so use the same permission check. */ if (!pg_database_ownercheck(databaseid, GetUserId())) - aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE, + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE, stmt->database); } } diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index faa181207a..a86963fc86 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -9,7 +9,7 @@ * in cluster.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -32,11 +32,12 @@ #include "access/xact.h" #include "catalog/namespace.h" #include "catalog/pg_database.h" -#include "catalog/pg_inherits_fn.h" +#include "catalog/pg_inherits.h" #include "catalog/pg_namespace.h" #include "commands/cluster.h" #include "commands/vacuum.h" #include "miscadmin.h" +#include "nodes/makefuncs.h" #include "pgstat.h" #include "postmaster/autovacuum.h" #include "storage/bufmgr.h" @@ -67,7 +68,8 @@ static BufferAccessStrategy vac_strategy; /* non-export function prototypes */ -static List *get_rel_oids(Oid relid, const RangeVar *vacrel); +static List *expand_vacuum_rel(VacuumRelation *vrel, int options); +static List *get_all_vacuum_rels(int options); static void vac_truncate_clog(TransactionId frozenXID, MultiXactId minMulti, TransactionId lastSaneFrozenXid, @@ -90,9 +92,26 @@ ExecVacuum(VacuumStmt *vacstmt, bool isTopLevel) Assert(vacstmt->options & (VACOPT_VACUUM | VACOPT_ANALYZE)); Assert((vacstmt->options & VACOPT_VACUUM) || !(vacstmt->options & (VACOPT_FULL | VACOPT_FREEZE))); - Assert((vacstmt->options & VACOPT_ANALYZE) || vacstmt->va_cols == NIL); Assert(!(vacstmt->options & VACOPT_SKIPTOAST)); + /* + * Make sure VACOPT_ANALYZE is specified if any column lists are present. + */ + if (!(vacstmt->options & VACOPT_ANALYZE)) + { + ListCell *lc; + + foreach(lc, vacstmt->rels) + { + VacuumRelation *vrel = lfirst_node(VacuumRelation, lc); + + if (vrel->va_cols != NIL) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ANALYZE option must be specified when a column list is provided"))); + } + } + /* * All freeze ages are zero if the FREEZE option is given; otherwise pass * them as -1 which means to use the default values. @@ -119,24 +138,22 @@ ExecVacuum(VacuumStmt *vacstmt, bool isTopLevel) params.log_min_duration = -1; /* Now go through the common routine */ - vacuum(vacstmt->options, vacstmt->relation, InvalidOid, ¶ms, - vacstmt->va_cols, NULL, isTopLevel); + vacuum(vacstmt->options, vacstmt->rels, ¶ms, NULL, isTopLevel); } /* - * Primary entry point for VACUUM and ANALYZE commands. + * Internal entry point for VACUUM and ANALYZE commands. * * options is a bitmask of VacuumOption flags, indicating what to do. * - * relid, if not InvalidOid, indicate the relation to process; otherwise, - * the RangeVar is used. (The latter must always be passed, because it's - * used for error messages.) + * relations, if not NIL, is a list of VacuumRelation to process; otherwise, + * we process all relevant tables in the database. For each VacuumRelation, + * if a valid OID is supplied, the table with that OID is what to process; + * otherwise, the VacuumRelation's RangeVar indicates what to process. * * params contains a set of parameters that can be used to customize the * behavior. * - * va_cols is a list of columns to analyze, or NIL to process them all. - * * bstrategy is normally given as NULL, but in autovacuum it can be passed * in to use the same buffer strategy object across multiple vacuum() calls. * @@ -146,14 +163,14 @@ ExecVacuum(VacuumStmt *vacstmt, bool isTopLevel) * memory context that will not disappear at transaction commit. */ void -vacuum(int options, RangeVar *relation, Oid relid, VacuumParams *params, - List *va_cols, BufferAccessStrategy bstrategy, bool isTopLevel) +vacuum(int options, List *relations, VacuumParams *params, + BufferAccessStrategy bstrategy, bool isTopLevel) { + static bool in_vacuum = false; + const char *stmttype; volatile bool in_outer_xact, use_own_xacts; - List *relations; - static bool in_vacuum = false; Assert(params != NULL); @@ -169,11 +186,11 @@ vacuum(int options, RangeVar *relation, Oid relid, VacuumParams *params, */ if (options & VACOPT_VACUUM) { - PreventTransactionChain(isTopLevel, stmttype); + PreventInTransactionBlock(isTopLevel, stmttype); in_outer_xact = false; } else - in_outer_xact = IsInTransactionChain(isTopLevel); + in_outer_xact = IsInTransactionBlock(isTopLevel); /* * Due to static variables vac_context, anl_context and vac_strategy, @@ -226,10 +243,29 @@ vacuum(int options, RangeVar *relation, Oid relid, VacuumParams *params, vac_strategy = bstrategy; /* - * Build list of relations to process, unless caller gave us one. (If we - * build one, we put it in vac_context for safekeeping.) + * Build list of relation(s) to process, putting any new data in + * vac_context for safekeeping. */ - relations = get_rel_oids(relid, relation); + if (relations != NIL) + { + List *newrels = NIL; + ListCell *lc; + + foreach(lc, relations) + { + VacuumRelation *vrel = lfirst_node(VacuumRelation, lc); + List *sublist; + MemoryContext old_context; + + sublist = expand_vacuum_rel(vrel, options); + old_context = MemoryContextSwitchTo(vac_context); + newrels = list_concat(newrels, sublist); + MemoryContextSwitchTo(old_context); + } + relations = newrels; + } + else + relations = get_all_vacuum_rels(options); /* * Decide whether we need to start/commit our own transactions. @@ -280,7 +316,7 @@ vacuum(int options, RangeVar *relation, Oid relid, VacuumParams *params, CommitTransactionCommand(); } - /* Turn vacuum cost accounting on or off */ + /* Turn vacuum cost accounting on or off, and set/clear in_vacuum */ PG_TRY(); { ListCell *cur; @@ -297,11 +333,11 @@ vacuum(int options, RangeVar *relation, Oid relid, VacuumParams *params, */ foreach(cur, relations) { - Oid relid = lfirst_oid(cur); + VacuumRelation *vrel = lfirst_node(VacuumRelation, cur); if (options & VACOPT_VACUUM) { - if (!vacuum_rel(relid, relation, options, params)) + if (!vacuum_rel(vrel->oid, vrel->relation, options, params)) continue; } @@ -318,8 +354,8 @@ vacuum(int options, RangeVar *relation, Oid relid, VacuumParams *params, PushActiveSnapshot(GetTransactionSnapshot()); } - analyze_rel(relid, relation, options, params, - va_cols, in_outer_xact, vac_strategy); + analyze_rel(vrel->oid, vrel->relation, options, params, + vrel->va_cols, in_outer_xact, vac_strategy); if (use_own_xacts) { @@ -373,107 +409,386 @@ vacuum(int options, RangeVar *relation, Oid relid, VacuumParams *params, } /* - * Build a list of Oids for each relation to be processed + * Check if a given relation can be safely vacuumed or analyzed. If the + * user is not the relation owner, issue a WARNING log message and return + * false to let the caller decide what to do with this relation. This + * routine is used to decide if a relation can be processed for VACUUM or + * ANALYZE. + */ +bool +vacuum_is_relation_owner(Oid relid, Form_pg_class reltuple, int options) +{ + char *relname; + + Assert((options & (VACOPT_VACUUM | VACOPT_ANALYZE)) != 0); + + /* + * Check permissions. + * + * We allow the user to vacuum or analyze a table if he is superuser, the + * table owner, or the database owner (but in the latter case, only if + * it's not a shared relation). pg_class_ownercheck includes the + * superuser case. + * + * Note we choose to treat permissions failure as a WARNING and keep + * trying to vacuum or analyze the rest of the DB --- is this appropriate? + */ + if (pg_class_ownercheck(relid, GetUserId()) || + (pg_database_ownercheck(MyDatabaseId, GetUserId()) && !reltuple->relisshared)) + return true; + + relname = NameStr(reltuple->relname); + + if ((options & VACOPT_VACUUM) != 0) + { + if (reltuple->relisshared) + ereport(WARNING, + (errmsg("skipping \"%s\" --- only superuser can vacuum it", + relname))); + else if (reltuple->relnamespace == PG_CATALOG_NAMESPACE) + ereport(WARNING, + (errmsg("skipping \"%s\" --- only superuser or database owner can vacuum it", + relname))); + else + ereport(WARNING, + (errmsg("skipping \"%s\" --- only table or database owner can vacuum it", + relname))); + + /* + * For VACUUM ANALYZE, both logs could show up, but just generate + * information for VACUUM as that would be the first one to be + * processed. + */ + return false; + } + + if ((options & VACOPT_ANALYZE) != 0) + { + if (reltuple->relisshared) + ereport(WARNING, + (errmsg("skipping \"%s\" --- only superuser can analyze it", + relname))); + else if (reltuple->relnamespace == PG_CATALOG_NAMESPACE) + ereport(WARNING, + (errmsg("skipping \"%s\" --- only superuser or database owner can analyze it", + relname))); + else + ereport(WARNING, + (errmsg("skipping \"%s\" --- only table or database owner can analyze it", + relname))); + } + + return false; +} + + +/* + * vacuum_open_relation * - * The list is built in vac_context so that it will survive across our - * per-relation transactions. + * This routine is used for attempting to open and lock a relation which + * is going to be vacuumed or analyzed. If the relation cannot be opened + * or locked, a log is emitted if possible. + */ +Relation +vacuum_open_relation(Oid relid, RangeVar *relation, VacuumParams *params, + int options, LOCKMODE lmode) +{ + Relation onerel; + bool rel_lock = true; + int elevel; + + Assert(params != NULL); + Assert((options & (VACOPT_VACUUM | VACOPT_ANALYZE)) != 0); + + /* + * Open the relation and get the appropriate lock on it. + * + * There's a race condition here: the relation may have gone away since + * the last time we saw it. If so, we don't need to vacuum or analyze it. + * + * If we've been asked not to wait for the relation lock, acquire it first + * in non-blocking mode, before calling try_relation_open(). + */ + if (!(options & VACOPT_SKIP_LOCKED)) + onerel = try_relation_open(relid, lmode); + else if (ConditionalLockRelationOid(relid, lmode)) + onerel = try_relation_open(relid, NoLock); + else + { + onerel = NULL; + rel_lock = false; + } + + /* if relation is opened, leave */ + if (onerel) + return onerel; + + /* + * Relation could not be opened, hence generate if possible a log + * informing on the situation. + * + * If the RangeVar is not defined, we do not have enough information to + * provide a meaningful log statement. Chances are that the caller has + * intentionally not provided this information so that this logging is + * skipped, anyway. + */ + if (relation == NULL) + return NULL; + + /* + * Determine the log level. + * + * For autovacuum logs, we emit a LOG if log_autovacuum_min_duration is + * not disabled. For manual VACUUM or ANALYZE, we emit a WARNING to match + * the log statements in the permission checks. + */ + if (!IsAutoVacuumWorkerProcess()) + elevel = WARNING; + else if (params->log_min_duration >= 0) + elevel = LOG; + else + return NULL; + + if ((options & VACOPT_VACUUM) != 0) + { + if (!rel_lock) + ereport(elevel, + (errcode(ERRCODE_LOCK_NOT_AVAILABLE), + errmsg("skipping vacuum of \"%s\" --- lock not available", + relation->relname))); + else + ereport(elevel, + (errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("skipping vacuum of \"%s\" --- relation no longer exists", + relation->relname))); + + /* + * For VACUUM ANALYZE, both logs could show up, but just generate + * information for VACUUM as that would be the first one to be + * processed. + */ + return NULL; + } + + if ((options & VACOPT_ANALYZE) != 0) + { + if (!rel_lock) + ereport(elevel, + (errcode(ERRCODE_LOCK_NOT_AVAILABLE), + errmsg("skipping analyze of \"%s\" --- lock not available", + relation->relname))); + else + ereport(elevel, + (errcode(ERRCODE_UNDEFINED_TABLE), + errmsg("skipping analyze of \"%s\" --- relation no longer exists", + relation->relname))); + } + + return NULL; +} + + +/* + * Given a VacuumRelation, fill in the table OID if it wasn't specified, + * and optionally add VacuumRelations for partitions of the table. + * + * If a VacuumRelation does not have an OID supplied and is a partitioned + * table, an extra entry will be added to the output for each partition. + * Presently, only autovacuum supplies OIDs when calling vacuum(), and + * it does not want us to expand partitioned tables. + * + * We take care not to modify the input data structure, but instead build + * new VacuumRelation(s) to return. (But note that they will reference + * unmodified parts of the input, eg column lists.) New data structures + * are made in vac_context. */ static List * -get_rel_oids(Oid relid, const RangeVar *vacrel) +expand_vacuum_rel(VacuumRelation *vrel, int options) { - List *oid_list = NIL; + List *vacrels = NIL; MemoryContext oldcontext; - /* OID supplied by VACUUM's caller? */ - if (OidIsValid(relid)) + /* If caller supplied OID, there's nothing we need do here. */ + if (OidIsValid(vrel->oid)) { oldcontext = MemoryContextSwitchTo(vac_context); - oid_list = lappend_oid(oid_list, relid); + vacrels = lappend(vacrels, vrel); MemoryContextSwitchTo(oldcontext); } - else if (vacrel) + else { - /* Process a specific relation */ + /* Process a specific relation, and possibly partitions thereof */ Oid relid; HeapTuple tuple; Form_pg_class classForm; bool include_parts; + int rvr_opts; + + /* + * Since autovacuum workers supply OIDs when calling vacuum(), no + * autovacuum worker should reach this code. + */ + Assert(!IsAutoVacuumWorkerProcess()); + + /* + * We transiently take AccessShareLock to protect the syscache lookup + * below, as well as find_all_inheritors's expectation that the caller + * holds some lock on the starting relation. + */ + rvr_opts = (options & VACOPT_SKIP_LOCKED) ? RVR_SKIP_LOCKED : 0; + relid = RangeVarGetRelidExtended(vrel->relation, + AccessShareLock, + rvr_opts, + NULL, NULL); /* - * Since we don't take a lock here, the relation might be gone, or the - * RangeVar might no longer refer to the OID we look up here. In the - * former case, VACUUM will do nothing; in the latter case, it will - * process the OID we looked up here, rather than the new one. Neither - * is ideal, but there's little practical alternative, since we're - * going to commit this transaction and begin a new one between now - * and then. + * If the lock is unavailable, emit the same log statement that + * vacuum_rel() and analyze_rel() would. */ - relid = RangeVarGetRelid(vacrel, NoLock, false); + if (!OidIsValid(relid)) + { + if (options & VACOPT_VACUUM) + ereport(WARNING, + (errcode(ERRCODE_LOCK_NOT_AVAILABLE), + errmsg("skipping vacuum of \"%s\" --- lock not available", + vrel->relation->relname))); + else + ereport(WARNING, + (errcode(ERRCODE_LOCK_NOT_AVAILABLE), + errmsg("skipping analyze of \"%s\" --- lock not available", + vrel->relation->relname))); + return vacrels; + } /* - * To check whether the relation is a partitioned table, fetch its - * syscache entry. + * To check whether the relation is a partitioned table and its + * ownership, fetch its syscache entry. */ tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for relation %u", relid); classForm = (Form_pg_class) GETSTRUCT(tuple); + + /* + * Make a returnable VacuumRelation for this rel if user is a proper + * owner. + */ + if (vacuum_is_relation_owner(relid, classForm, options)) + { + oldcontext = MemoryContextSwitchTo(vac_context); + vacrels = lappend(vacrels, makeVacuumRelation(vrel->relation, + relid, + vrel->va_cols)); + MemoryContextSwitchTo(oldcontext); + } + + include_parts = (classForm->relkind == RELKIND_PARTITIONED_TABLE); ReleaseSysCache(tuple); /* - * Make relation list entries for this guy and its partitions, if any. - * Note that the list returned by find_all_inheritors() include the - * passed-in OID at its head. Also note that we did not request a - * lock to be taken to match what would be done otherwise. + * If it is, make relation list entries for its partitions. Note that + * the list returned by find_all_inheritors() includes the passed-in + * OID, so we have to skip that. There's no point in taking locks on + * the individual partitions yet, and doing so would just add + * unnecessary deadlock risk. For this last reason we do not check + * yet the ownership of the partitions, which get added to the list to + * process. Ownership will be checked later on anyway. */ - oldcontext = MemoryContextSwitchTo(vac_context); if (include_parts) - oid_list = list_concat(oid_list, - find_all_inheritors(relid, NoLock, NULL)); - else - oid_list = lappend_oid(oid_list, relid); - MemoryContextSwitchTo(oldcontext); - } - else - { + { + List *part_oids = find_all_inheritors(relid, NoLock, NULL); + ListCell *part_lc; + + foreach(part_lc, part_oids) + { + Oid part_oid = lfirst_oid(part_lc); + + if (part_oid == relid) + continue; /* ignore original table */ + + /* + * We omit a RangeVar since it wouldn't be appropriate to + * complain about failure to open one of these relations + * later. + */ + oldcontext = MemoryContextSwitchTo(vac_context); + vacrels = lappend(vacrels, makeVacuumRelation(NULL, + part_oid, + vrel->va_cols)); + MemoryContextSwitchTo(oldcontext); + } + } + /* - * Process all plain relations and materialized views listed in - * pg_class + * Release lock again. This means that by the time we actually try to + * process the table, it might be gone or renamed. In the former case + * we'll silently ignore it; in the latter case we'll process it + * anyway, but we must beware that the RangeVar doesn't necessarily + * identify it anymore. This isn't ideal, perhaps, but there's little + * practical alternative, since we're typically going to commit this + * transaction and begin a new one between now and then. Moreover, + * holding locks on multiple relations would create significant risk + * of deadlock. */ - Relation pgclass; - HeapScanDesc scan; - HeapTuple tuple; + UnlockRelationOid(relid, AccessShareLock); + } + + return vacrels; +} + +/* + * Construct a list of VacuumRelations for all vacuumable rels in + * the current database. The list is built in vac_context. + */ +static List * +get_all_vacuum_rels(int options) +{ + List *vacrels = NIL; + Relation pgclass; + HeapScanDesc scan; + HeapTuple tuple; - pgclass = heap_open(RelationRelationId, AccessShareLock); + pgclass = heap_open(RelationRelationId, AccessShareLock); - scan = heap_beginscan_catalog(pgclass, 0, NULL); + scan = heap_beginscan_catalog(pgclass, 0, NULL); - while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) - { - Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); - - /* - * We include partitioned tables here; depending on which - * operation is to be performed, caller will decide whether to - * process or ignore them. - */ - if (classForm->relkind != RELKIND_RELATION && - classForm->relkind != RELKIND_MATVIEW && - classForm->relkind != RELKIND_PARTITIONED_TABLE) - continue; - - /* Make a relation list entry for this guy */ - oldcontext = MemoryContextSwitchTo(vac_context); - oid_list = lappend_oid(oid_list, HeapTupleGetOid(tuple)); - MemoryContextSwitchTo(oldcontext); - } + while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) + { + Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple); + MemoryContext oldcontext; + Oid relid = HeapTupleGetOid(tuple); - heap_endscan(scan); - heap_close(pgclass, AccessShareLock); + /* check permissions of relation */ + if (!vacuum_is_relation_owner(relid, classForm, options)) + continue; + + /* + * We include partitioned tables here; depending on which operation is + * to be performed, caller will decide whether to process or ignore + * them. + */ + if (classForm->relkind != RELKIND_RELATION && + classForm->relkind != RELKIND_MATVIEW && + classForm->relkind != RELKIND_PARTITIONED_TABLE) + continue; + + /* + * Build VacuumRelation(s) specifying the table OIDs to be processed. + * We omit a RangeVar since it wouldn't be appropriate to complain + * about failure to open one of these relations later. + */ + oldcontext = MemoryContextSwitchTo(vac_context); + vacrels = lappend(vacrels, makeVacuumRelation(NULL, + relid, + NIL)); + MemoryContextSwitchTo(oldcontext); } - return oid_list; + heap_endscan(scan); + heap_close(pgclass, AccessShareLock); + + return vacrels; } /* @@ -563,7 +878,8 @@ vacuum_set_xid_limits(Relation rel, { ereport(WARNING, (errmsg("oldest xmin is far in the past"), - errhint("Close open transactions soon to avoid wraparound problems."))); + errhint("Close open transactions soon to avoid wraparound problems.\n" + "You might also need to commit or roll back old prepared transactions, or drop stale replication slots."))); limit = *oldestXmin; } @@ -673,16 +989,17 @@ vacuum_set_xid_limits(Relation rel, * vac_estimate_reltuples() -- estimate the new value for pg_class.reltuples * * If we scanned the whole relation then we should just use the count of - * live tuples seen; but if we did not, we should not trust the count - * unreservedly, especially not in VACUUM, which may have scanned a quite - * nonrandom subset of the table. When we have only partial information, - * we take the old value of pg_class.reltuples as a measurement of the + * live tuples seen; but if we did not, we should not blindly extrapolate + * from that number, since VACUUM may have scanned a quite nonrandom + * subset of the table. When we have only partial information, we take + * the old value of pg_class.reltuples as a measurement of the * tuple density in the unscanned pages. * - * This routine is shared by VACUUM and ANALYZE. + * Note: scanned_tuples should count only *live* tuples, since + * pg_class.reltuples is defined that way. */ double -vac_estimate_reltuples(Relation relation, bool is_analyze, +vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples) @@ -690,9 +1007,8 @@ vac_estimate_reltuples(Relation relation, bool is_analyze, BlockNumber old_rel_pages = relation->rd_rel->relpages; double old_rel_tuples = relation->rd_rel->reltuples; double old_density; - double new_density; - double multiplier; - double updated_density; + double unscanned_pages; + double total_tuples; /* If we did scan the whole table, just use the count as-is */ if (scanned_pages >= total_pages) @@ -716,31 +1032,14 @@ vac_estimate_reltuples(Relation relation, bool is_analyze, /* * Okay, we've covered the corner cases. The normal calculation is to - * convert the old measurement to a density (tuples per page), then update - * the density using an exponential-moving-average approach, and finally - * compute reltuples as updated_density * total_pages. - * - * For ANALYZE, the moving average multiplier is just the fraction of the - * table's pages we scanned. This is equivalent to assuming that the - * tuple density in the unscanned pages didn't change. Of course, it - * probably did, if the new density measurement is different. But over - * repeated cycles, the value of reltuples will converge towards the - * correct value, if repeated measurements show the same new density. - * - * For VACUUM, the situation is a bit different: we have looked at a - * nonrandom sample of pages, but we know for certain that the pages we - * didn't look at are precisely the ones that haven't changed lately. - * Thus, there is a reasonable argument for doing exactly the same thing - * as for the ANALYZE case, that is use the old density measurement as the - * value for the unscanned pages. - * - * This logic could probably use further refinement. + * convert the old measurement to a density (tuples per page), then + * estimate the number of tuples in the unscanned pages using that figure, + * and finally add on the number of tuples in the scanned pages. */ old_density = old_rel_tuples / old_rel_pages; - new_density = scanned_tuples / scanned_pages; - multiplier = (double) scanned_pages / (double) total_pages; - updated_density = old_density + (new_density - old_density) * multiplier; - return floor(updated_density * total_pages + 0.5); + unscanned_pages = (double) total_pages - (double) scanned_pages; + total_tuples = old_density * unscanned_pages + scanned_tuples; + return floor(total_tuples + 0.5); } @@ -779,6 +1078,9 @@ vac_estimate_reltuples(Relation relation, bool is_analyze, * transaction. This is OK since postponing the flag maintenance is * always allowable. * + * Note: num_tuples should count only *live* tuples, since + * pg_class.reltuples is defined that way. + * * This routine is shared by VACUUM and ANALYZE. */ void @@ -836,16 +1138,6 @@ vac_update_relstats(Relation relation, dirty = true; } - /* - * If we have discovered that there are no indexes, then there's no - * primary key either. This could be done more thoroughly... - */ - if (pgcform->relhaspkey && !hasindex) - { - pgcform->relhaspkey = false; - dirty = true; - } - /* We also clear relhasrules and relhastriggers if needed */ if (pgcform->relhasrules && relation->rd_rules == NULL) { @@ -1212,6 +1504,14 @@ vac_truncate_clog(TransactionId frozenXID, /* * vacuum_rel() -- vacuum one heap relation * + * relid identifies the relation to vacuum. If relation is supplied, + * use the name therein for reporting any failure to open/lock the rel; + * do not use it once we've successfully opened the rel, since it might + * be stale. + * + * Returns true if it's okay to proceed with a requested ANALYZE + * operation on this table. + * * Doing one heap at a time incurs extra overhead, since we need to * check that the heap exists again just before we vacuum it. The * reason that we do this is so that vacuuming can be spread across @@ -1284,29 +1584,10 @@ vacuum_rel(Oid relid, RangeVar *relation, int options, VacuumParams *params) */ lmode = (options & VACOPT_FULL) ? AccessExclusiveLock : ShareUpdateExclusiveLock; - /* - * Open the relation and get the appropriate lock on it. - * - * There's a race condition here: the rel may have gone away since the - * last time we saw it. If so, we don't need to vacuum it. - * - * If we've been asked not to wait for the relation lock, acquire it first - * in non-blocking mode, before calling try_relation_open(). - */ - if (!(options & VACOPT_NOWAIT)) - onerel = try_relation_open(relid, lmode); - else if (ConditionalLockRelationOid(relid, lmode)) - onerel = try_relation_open(relid, NoLock); - else - { - onerel = NULL; - if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0) - ereport(LOG, - (errcode(ERRCODE_LOCK_NOT_AVAILABLE), - errmsg("skipping vacuum of \"%s\" --- lock not available", - relation->relname))); - } + /* open the relation and get the appropriate lock on it */ + onerel = vacuum_open_relation(relid, relation, params, options, lmode); + /* leave if relation could not be opened or locked */ if (!onerel) { PopActiveSnapshot(); @@ -1315,30 +1596,17 @@ vacuum_rel(Oid relid, RangeVar *relation, int options, VacuumParams *params) } /* - * Check permissions. - * - * We allow the user to vacuum a table if he is superuser, the table - * owner, or the database owner (but in the latter case, only if it's not - * a shared relation). pg_class_ownercheck includes the superuser case. - * - * Note we choose to treat permissions failure as a WARNING and keep - * trying to vacuum the rest of the DB --- is this appropriate? + * Check if relation needs to be skipped based on ownership. This check + * happens also when building the relation list to vacuum for a manual + * operation, and needs to be done additionally here as VACUUM could + * happen across multiple transactions where relation ownership could have + * changed in-between. Make sure to only generate logs for VACUUM in this + * case. */ - if (!(pg_class_ownercheck(RelationGetRelid(onerel), GetUserId()) || - (pg_database_ownercheck(MyDatabaseId, GetUserId()) && !onerel->rd_rel->relisshared))) + if (!vacuum_is_relation_owner(RelationGetRelid(onerel), + onerel->rd_rel, + options & VACOPT_VACUUM)) { - if (onerel->rd_rel->relisshared) - ereport(WARNING, - (errmsg("skipping \"%s\" --- only superuser can vacuum it", - RelationGetRelationName(onerel)))); - else if (onerel->rd_rel->relnamespace == PG_CATALOG_NAMESPACE) - ereport(WARNING, - (errmsg("skipping \"%s\" --- only superuser or database owner can vacuum it", - RelationGetRelationName(onerel)))); - else - ereport(WARNING, - (errmsg("skipping \"%s\" --- only table or database owner can vacuum it", - RelationGetRelationName(onerel)))); relation_close(onerel, lmode); PopActiveSnapshot(); CommitTransactionCommand(); @@ -1346,9 +1614,7 @@ vacuum_rel(Oid relid, RangeVar *relation, int options, VacuumParams *params) } /* - * Check that it's a vacuumable relation; we used to do this in - * get_rel_oids() but seems safer to check after we've locked the - * relation. + * Check that it's of a vacuumable relkind. */ if (onerel->rd_rel->relkind != RELKIND_RELATION && onerel->rd_rel->relkind != RELKIND_MATVIEW && @@ -1380,17 +1646,16 @@ vacuum_rel(Oid relid, RangeVar *relation, int options, VacuumParams *params) } /* - * Ignore partitioned tables as there is no work to be done. Since we - * release the lock here, it's possible that any partitions added from - * this point on will not get processed, but that seems harmless. + * Silently ignore partitioned tables as there is no work to be done. The + * useful work is on their child partitions, which have been queued up for + * us separately. */ if (onerel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { relation_close(onerel, lmode); PopActiveSnapshot(); CommitTransactionCommand(); - - /* It's OK for other commands to look at this table */ + /* It's OK to proceed with ANALYZE on this table */ return true; } @@ -1433,13 +1698,17 @@ vacuum_rel(Oid relid, RangeVar *relation, int options, VacuumParams *params) */ if (options & VACOPT_FULL) { + int cluster_options = 0; + /* close relation before vacuuming, but hold lock until commit */ relation_close(onerel, NoLock); onerel = NULL; + if ((options & VACOPT_VERBOSE) != 0) + cluster_options |= CLUOPT_VERBOSE; + /* VACUUM FULL is now a variant of CLUSTER; see cluster.c */ - cluster_rel(relid, InvalidOid, false, - (options & VACOPT_VERBOSE) != 0); + cluster_rel(relid, InvalidOid, cluster_options); } else lazy_vacuum_rel(onerel, options, params, vac_strategy); @@ -1468,7 +1737,7 @@ vacuum_rel(Oid relid, RangeVar *relation, int options, VacuumParams *params) * totally unimportant for toast relations. */ if (toast_relid != InvalidOid) - vacuum_rel(toast_relid, relation, options, params); + vacuum_rel(toast_relid, NULL, options, params); /* * Now release the session-level lock on the master table. diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index e9b4045fe5..8996d366e9 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -23,7 +23,7 @@ * the TID array, just enough to hold as many heap tuples as fit on one page. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -44,7 +44,6 @@ #include "access/transam.h" #include "access/visibilitymap.h" #include "access/xlog.h" -#include "catalog/catalog.h" #include "catalog/storage.h" #include "commands/dbcommands.h" #include "commands/progress.h" @@ -84,6 +83,15 @@ #define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */ #define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */ +/* + * When a table has no indexes, vacuum the FSM after every 8GB, approximately + * (it won't be exact because we only vacuum FSM after processing a heap page + * that has some removable tuples). When there are indexes, this is ignored, + * and we vacuum FSM after each index/heap cleaning pass. + */ +#define VACUUM_FSM_EVERY_PAGES \ + ((BlockNumber) (((uint64) 8 * 1024 * 1024 * 1024) / BLCKSZ)) + /* * Guesstimation of number of dead tuples per page. This is used to * provide an upper limit to memory allocated when vacuuming small @@ -114,9 +122,9 @@ typedef struct LVRelStats BlockNumber pinskipped_pages; /* # of pages we skipped due to a pin */ BlockNumber frozenskipped_pages; /* # of frozen pages we skipped */ BlockNumber tupcount_pages; /* pages whose tuples we counted */ - double scanned_tuples; /* counts only tuples on tupcount_pages */ - double old_rel_tuples; /* previous value of pg_class.reltuples */ + double old_live_tuples; /* previous value of pg_class.reltuples */ double new_rel_tuples; /* new estimated total # of tuples */ + double new_live_tuples; /* new estimated total # of live tuples */ double new_dead_tuples; /* new estimated total # of dead tuples */ BlockNumber pages_removed; double tuples_deleted; @@ -196,7 +204,6 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params, TransactionId xidFullScanLimit; MultiXactId mxactFullScanLimit; BlockNumber new_rel_pages; - double new_rel_tuples; BlockNumber new_rel_allvisible; double new_live_tuples; TransactionId new_frozen_xid; @@ -245,7 +252,7 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params, vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats)); vacrelstats->old_rel_pages = onerel->rd_rel->relpages; - vacrelstats->old_rel_tuples = onerel->rd_rel->reltuples; + vacrelstats->old_live_tuples = onerel->rd_rel->reltuples; vacrelstats->num_index_scans = 0; vacrelstats->pages_removed = 0; vacrelstats->lock_waiter_detected = false; @@ -286,9 +293,6 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params, pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_FINAL_CLEANUP); - /* Vacuum the Free Space Map */ - FreeSpaceMapVacuum(onerel); - /* * Update statistics in pg_class. * @@ -311,11 +315,11 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params, * since then we don't know for certain that all tuples have a newer xmin. */ new_rel_pages = vacrelstats->rel_pages; - new_rel_tuples = vacrelstats->new_rel_tuples; + new_live_tuples = vacrelstats->new_live_tuples; if (vacrelstats->tupcount_pages == 0 && new_rel_pages > 0) { new_rel_pages = vacrelstats->old_rel_pages; - new_rel_tuples = vacrelstats->old_rel_tuples; + new_live_tuples = vacrelstats->old_live_tuples; } visibilitymap_count(onerel, &new_rel_allvisible, NULL); @@ -327,7 +331,7 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params, vac_update_relstats(onerel, new_rel_pages, - new_rel_tuples, + new_live_tuples, new_rel_allvisible, vacrelstats->hasindex, new_frozen_xid, @@ -335,10 +339,6 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params, false); /* report results to the stats collector, too */ - new_live_tuples = new_rel_tuples - vacrelstats->new_dead_tuples; - if (new_live_tuples < 0) - new_live_tuples = 0; /* just in case */ - pgstat_report_vacuum(RelationGetRelid(onerel), onerel->rd_rel->relisshared, new_live_tuples, @@ -355,6 +355,7 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params, params->log_min_duration)) { StringInfoData buf; + char *msgfmt; TimestampDifference(starttime, endtime, &secs, &usecs); @@ -373,7 +374,21 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params, * emitting individual parts of the message when not applicable. */ initStringInfo(&buf); - appendStringInfo(&buf, _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"), + if (params->is_wraparound) + { + if (aggressive) + msgfmt = _("automatic aggressive vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n"); + else + msgfmt = _("automatic vacuum to prevent wraparound of table \"%s.%s.%s\": index scans: %d\n"); + } + else + { + if (aggressive) + msgfmt = _("automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n"); + else + msgfmt = _("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"); + } + appendStringInfo(&buf, msgfmt, get_database_name(MyDatabaseId), get_namespace_name(RelationGetNamespace(onerel)), RelationGetRelationName(onerel), @@ -462,12 +477,16 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, blkno; HeapTupleData tuple; char *relname; + TransactionId relfrozenxid = onerel->rd_rel->relfrozenxid; + TransactionId relminmxid = onerel->rd_rel->relminmxid; BlockNumber empty_pages, - vacuumed_pages; - double num_tuples, - tups_vacuumed, - nkeep, - nunused; + vacuumed_pages, + next_fsm_block_to_vacuum; + double num_tuples, /* total number of nonremovable tuples */ + live_tuples, /* live tuples (reltuples estimate) */ + tups_vacuumed, /* tuples cleaned up by vacuum */ + nkeep, /* dead-but-not-removable tuples */ + nunused; /* unused item pointers */ IndexBulkDeleteResult **indstats; int i; PGRUsage ru0; @@ -486,13 +505,20 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, pg_rusage_init(&ru0); relname = RelationGetRelationName(onerel); - ereport(elevel, - (errmsg("vacuuming \"%s.%s\"", - get_namespace_name(RelationGetNamespace(onerel)), - relname))); + if (aggressive) + ereport(elevel, + (errmsg("aggressively vacuuming \"%s.%s\"", + get_namespace_name(RelationGetNamespace(onerel)), + relname))); + else + ereport(elevel, + (errmsg("vacuuming \"%s.%s\"", + get_namespace_name(RelationGetNamespace(onerel)), + relname))); empty_pages = vacuumed_pages = 0; - num_tuples = tups_vacuumed = nkeep = nunused = 0; + next_fsm_block_to_vacuum = (BlockNumber) 0; + num_tuples = live_tuples = tups_vacuumed = nkeep = nunused = 0; indstats = (IndexBulkDeleteResult **) palloc0(nindexes * sizeof(IndexBulkDeleteResult *)); @@ -743,6 +769,13 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, vacrelstats->num_dead_tuples = 0; vacrelstats->num_index_scans++; + /* + * Vacuum the Free Space Map to make newly-freed space visible on + * upper-level FSM pages. Note we have not yet processed blkno. + */ + FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, blkno); + next_fsm_block_to_vacuum = blkno; + /* Report that we are once again scanning the heap */ pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, PROGRESS_VACUUM_PHASE_SCAN_HEAP); @@ -975,6 +1008,17 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, tupgone = false; + /* + * The criteria for counting a tuple as live in this block need to + * match what analyze.c's acquire_sample_rows() does, otherwise + * VACUUM and ANALYZE may produce wildly different reltuples + * values, e.g. when there are many recently-dead tuples. + * + * The logic here is a bit simpler than acquire_sample_rows(), as + * VACUUM can't run inside a transaction block, which makes some + * cases impossible (e.g. in-progress insert from the same + * transaction). + */ switch (HeapTupleSatisfiesVacuum(&tuple, OldestXmin, buf)) { case HEAPTUPLE_DEAD: @@ -993,6 +1037,13 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, * tuple, we choose to keep it, because it'll be a lot * cheaper to get rid of it in the next pruning pass than * to treat it like an indexed tuple. + * + * If this were to happen for a tuple that actually needed + * to be deleted, we'd be in trouble, because it'd + * possibly leave a tuple below the relation's xmin + * horizon alive. heap_prepare_freeze_tuple() is prepared + * to detect that case and abort the transaction, + * preventing corruption. */ if (HeapTupleIsHotUpdated(&tuple) || HeapTupleIsHeapOnly(&tuple)) @@ -1008,6 +1059,12 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid", relname, blkno, offnum); + /* + * Count it as live. Not only is this natural, but it's + * also what acquire_sample_rows() does. + */ + live_tuples += 1; + /* * Is the tuple definitely visible to all transactions? * @@ -1053,12 +1110,29 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, all_visible = false; break; case HEAPTUPLE_INSERT_IN_PROGRESS: - /* This is an expected case during concurrent vacuum */ + + /* + * This is an expected case during concurrent vacuum. + * + * We do not count these rows as live, because we expect + * the inserting transaction to update the counters at + * commit, and we assume that will happen only after we + * report our results. This assumption is a bit shaky, + * but it is what acquire_sample_rows() does, so be + * consistent. + */ all_visible = false; break; case HEAPTUPLE_DELETE_IN_PROGRESS: /* This is an expected case during concurrent vacuum */ all_visible = false; + + /* + * Count such rows as live. As above, we assume the + * deleting transaction will commit and update the + * counters after we report. + */ + live_tuples += 1; break; default: elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result"); @@ -1084,8 +1158,10 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, * Each non-removable tuple must be checked to see if it needs * freezing. Note we already have exclusive buffer lock. */ - if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit, - MultiXactCutoff, &frozen[nfrozen], + if (heap_prepare_freeze_tuple(tuple.t_data, + relfrozenxid, relminmxid, + FreezeLimit, MultiXactCutoff, + &frozen[nfrozen], &tuple_totally_frozen)) frozen[nfrozen++].offset = offnum; @@ -1148,6 +1224,19 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, */ vacrelstats->num_dead_tuples = 0; vacuumed_pages++; + + /* + * Periodically do incremental FSM vacuuming to make newly-freed + * space visible on upper FSM pages. Note: although we've cleaned + * the current block, we haven't yet updated its FSM entry (that + * happens further down), so passing end == blkno is correct. + */ + if (blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES) + { + FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, + blkno); + next_fsm_block_to_vacuum = blkno; + } } freespace = PageGetHeapFreeSpace(page); @@ -1259,15 +1348,18 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, pfree(frozen); /* save stats for use later */ - vacrelstats->scanned_tuples = num_tuples; vacrelstats->tuples_deleted = tups_vacuumed; vacrelstats->new_dead_tuples = nkeep; /* now we can compute the new value for pg_class.reltuples */ - vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false, - nblocks, - vacrelstats->tupcount_pages, - num_tuples); + vacrelstats->new_live_tuples = vac_estimate_reltuples(onerel, + nblocks, + vacrelstats->tupcount_pages, + live_tuples); + + /* also compute total number of surviving heap entries */ + vacrelstats->new_rel_tuples = + vacrelstats->new_live_tuples + vacrelstats->new_dead_tuples; /* * Release any remaining pin on visibility map page. @@ -1313,6 +1405,13 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, vacrelstats->num_index_scans++; } + /* + * Vacuum the remainder of the Free Space Map. We must do this whether or + * not there were indexes. + */ + if (blkno > next_fsm_block_to_vacuum) + FreeSpaceMapVacuumRange(onerel, next_fsm_block_to_vacuum, blkno); + /* report all blocks vacuumed; and that we're cleaning up */ pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno); pgstat_progress_update_param(PROGRESS_VACUUM_PHASE, @@ -1351,7 +1450,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats, "%u pages are entirely empty.\n", empty_pages), empty_pages); - appendStringInfo(&buf, "%s.", pg_rusage_show(&ru0)); + appendStringInfo(&buf, _("%s."), pg_rusage_show(&ru0)); ereport(elevel, (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages", @@ -1603,7 +1702,8 @@ lazy_vacuum_index(Relation indrel, ivinfo.analyze_only = false; ivinfo.estimated_count = true; ivinfo.message_level = elevel; - ivinfo.num_heap_tuples = vacrelstats->old_rel_tuples; + /* We can only provide an approximate value of num_heap_tuples here */ + ivinfo.num_heap_tuples = vacrelstats->old_live_tuples; ivinfo.strategy = vac_strategy; /* Do bulk deletion */ @@ -1634,6 +1734,12 @@ lazy_cleanup_index(Relation indrel, ivinfo.analyze_only = false; ivinfo.estimated_count = (vacrelstats->tupcount_pages < vacrelstats->rel_pages); ivinfo.message_level = elevel; + + /* + * Now we can provide a better estimate of total number of surviving + * tuples (we assume indexes are more interested in that than in the + * number of nominally live tuples). + */ ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples; ivinfo.strategy = vac_strategy; diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c index 3ed1c56e82..c2d7a5bebf 100644 --- a/src/backend/commands/variable.c +++ b/src/backend/commands/variable.c @@ -4,7 +4,7 @@ * Routines for handling specialized SET variables. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -522,32 +522,9 @@ check_transaction_read_only(bool *newval, void **extra, GucSource source) * As in check_transaction_read_only, allow it if not inside a transaction. */ bool -check_XactIsoLevel(char **newval, void **extra, GucSource source) +check_XactIsoLevel(int *newval, void **extra, GucSource source) { - int newXactIsoLevel; - - if (strcmp(*newval, "serializable") == 0) - { - newXactIsoLevel = XACT_SERIALIZABLE; - } - else if (strcmp(*newval, "repeatable read") == 0) - { - newXactIsoLevel = XACT_REPEATABLE_READ; - } - else if (strcmp(*newval, "read committed") == 0) - { - newXactIsoLevel = XACT_READ_COMMITTED; - } - else if (strcmp(*newval, "read uncommitted") == 0) - { - newXactIsoLevel = XACT_READ_UNCOMMITTED; - } - else if (strcmp(*newval, "default") == 0) - { - newXactIsoLevel = DefaultXactIsoLevel; - } - else - return false; + int newXactIsoLevel = *newval; if (newXactIsoLevel != XactIsoLevel && IsTransactionState()) { @@ -574,39 +551,9 @@ check_XactIsoLevel(char **newval, void **extra, GucSource source) } } - *extra = malloc(sizeof(int)); - if (!*extra) - return false; - *((int *) *extra) = newXactIsoLevel; - return true; } -void -assign_XactIsoLevel(const char *newval, void *extra) -{ - XactIsoLevel = *((int *) extra); -} - -const char * -show_XactIsoLevel(void) -{ - /* We need this because we don't want to show "default". */ - switch (XactIsoLevel) - { - case XACT_READ_UNCOMMITTED: - return "read uncommitted"; - case XACT_READ_COMMITTED: - return "read committed"; - case XACT_REPEATABLE_READ: - return "repeatable read"; - case XACT_SERIALIZABLE: - return "serializable"; - default: - return "bogus"; - } -} - /* * SET TRANSACTION [NOT] DEFERRABLE */ diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c index f25a5658d6..b670cad8b1 100644 --- a/src/backend/commands/view.c +++ b/src/backend/commands/view.c @@ -3,7 +3,7 @@ * view.c * use rewrite rules to construct views * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -43,11 +43,11 @@ static void checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc); * are "local" and "cascaded". */ void -validateWithCheckOption(char *value) +validateWithCheckOption(const char *value) { if (value == NULL || - (pg_strcasecmp(value, "local") != 0 && - pg_strcasecmp(value, "cascaded") != 0)) + (strcmp(value, "local") != 0 && + strcmp(value, "cascaded") != 0)) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -61,6 +61,8 @@ validateWithCheckOption(char *value) * * Create a view relation and use the rules system to store the query * for the view. + * + * EventTriggerAlterTableStart must have been called already. *--------------------------------------------------------------------- */ static ObjectAddress @@ -186,6 +188,7 @@ DefineVirtualRelation(RangeVar *relation, List *tlist, bool replace, atcmds = lappend(atcmds, atcmd); } + /* EventTriggerAlterTableStart called by ProcessUtilitySlow */ AlterTableInternal(viewOid, atcmds, true); /* Make the new view columns visible */ @@ -217,6 +220,7 @@ DefineVirtualRelation(RangeVar *relation, List *tlist, bool replace, atcmd->def = (Node *) options; atcmds = list_make1(atcmd); + /* EventTriggerAlterTableStart called by ProcessUtilitySlow */ AlterTableInternal(viewOid, atcmds, true); ObjectAddressSet(address, RelationRelationId, viewOid); @@ -283,8 +287,8 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc) for (i = 0; i < olddesc->natts; i++) { - Form_pg_attribute newattr = newdesc->attrs[i]; - Form_pg_attribute oldattr = olddesc->attrs[i]; + Form_pg_attribute newattr = TupleDescAttr(newdesc, i); + Form_pg_attribute oldattr = TupleDescAttr(olddesc, i); /* XXX msg not right, but we don't support DROP COL on view anyway */ if (newattr->attisdropped != oldattr->attisdropped) @@ -353,7 +357,7 @@ DefineViewRules(Oid viewOid, Query *viewParse, bool replace) * by 2... * * These extra RT entries are not actually used in the query, - * except for run-time permission checking. + * except for run-time locking and permission checking. *--------------------------------------------------------------- */ static Query * @@ -386,9 +390,11 @@ UpdateRangeTableOfViewParse(Oid viewOid, Query *viewParse) * OLD first, then NEW.... */ rt_entry1 = addRangeTableEntryForRelation(pstate, viewRel, + AccessShareLock, makeAlias("old", NIL), false, false); rt_entry2 = addRangeTableEntryForRelation(pstate, viewRel, + AccessShareLock, makeAlias("new", NIL), false, false); /* Must override addRangeTableEntry's default access-check flags */ @@ -485,7 +491,7 @@ DefineView(ViewStmt *stmt, const char *queryString, { DefElem *defel = (DefElem *) lfirst(cell); - if (pg_strcasecmp(defel->defname, "check_option") == 0) + if (strcmp(defel->defname, "check_option") == 0) check_option = true; } @@ -502,7 +508,7 @@ DefineView(ViewStmt *stmt, const char *queryString, ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("WITH CHECK OPTION is supported only on automatically updatable views"), - errhint("%s", view_updatable_error))); + errhint("%s", _(view_updatable_error)))); } /* diff --git a/src/backend/common.mk b/src/backend/common.mk index 5d599dbd0c..663e9f886c 100644 --- a/src/backend/common.mk +++ b/src/backend/common.mk @@ -8,13 +8,7 @@ # this directory and SUBDIRS to subdirectories containing more things # to build. -ifdef PARTIAL_LINKING -# old style: linking using SUBSYS.o -subsysfilename = SUBSYS.o -else -# new style: linking all object files at once subsysfilename = objfiles.txt -endif SUBDIROBJS = $(SUBDIRS:%=%/$(subsysfilename)) @@ -30,6 +24,10 @@ objfiles.txt: Makefile $(SUBDIROBJS) $(OBJS) # Don't rebuild the list if only the OBJS have changed. $(if $(filter-out $(OBJS),$?),( $(if $(SUBDIROBJS),cat $(SUBDIROBJS); )echo $(addprefix $(subdir)/,$(OBJS)) ) >$@,touch $@) +ifeq ($(with_llvm), yes) +objfiles.txt: $(patsubst %.o,%.bc, $(OBJS)) +endif + # make function to expand objfiles.txt contents expand_subsys = $(foreach file,$(1),$(if $(filter %/objfiles.txt,$(file)),$(patsubst ../../src/backend/%,%,$(addprefix $(top_builddir)/,$(shell cat $(file)))),$(file))) @@ -43,6 +41,7 @@ $(SUBDIRS:%=%-recursive): $(call recurse,clean) clean: clean-local clean-local: - rm -f $(subsysfilename) $(OBJS) + rm -f $(subsysfilename) $(OBJS) $(patsubst %.o,%.bc, $(OBJS)) $(call recurse,coverage) +$(call recurse,install) diff --git a/src/backend/executor/Makefile b/src/backend/executor/Makefile index 083b20f3fe..cc09895fa5 100644 --- a/src/backend/executor/Makefile +++ b/src/backend/executor/Makefile @@ -14,7 +14,7 @@ include $(top_builddir)/src/Makefile.global OBJS = execAmi.o execCurrent.o execExpr.o execExprInterp.o \ execGrouping.o execIndexing.o execJunk.o \ - execMain.o execParallel.o execProcnode.o \ + execMain.o execParallel.o execPartition.o execProcnode.o \ execReplication.o execScan.o execSRF.o execTuples.o \ execUtils.o functions.o instrument.o nodeAppend.o nodeAgg.o \ nodeBitmapAnd.o nodeBitmapOr.o \ diff --git a/src/backend/executor/README b/src/backend/executor/README index a0045067fb..0d7cd552eb 100644 --- a/src/backend/executor/README +++ b/src/backend/executor/README @@ -241,11 +241,13 @@ This is a sketch of control flow for full query processing: CreateExecutorState creates per-query context switch to per-query context to run ExecInitNode + AfterTriggerBeginQuery ExecInitNode --- recursively scans plan tree + ExecInitNode + recurse into subsidiary nodes CreateExprContext creates per-tuple context ExecInitExpr - AfterTriggerBeginQuery ExecutorRun ExecProcNode --- recursively called in per-query context diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index f1636a5b88..9e78421978 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -3,7 +3,7 @@ * execAmi.c * miscellaneous executor access method routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/executor/execAmi.c diff --git a/src/backend/executor/execCurrent.c b/src/backend/executor/execCurrent.c index f00fce5913..aadf749382 100644 --- a/src/backend/executor/execCurrent.c +++ b/src/backend/executor/execCurrent.c @@ -3,7 +3,7 @@ * execCurrent.c * executor support for WHERE CURRENT OF cursor * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/executor/execCurrent.c @@ -12,6 +12,7 @@ */ #include "postgres.h" +#include "access/relscan.h" #include "access/sysattr.h" #include "catalog/pg_type.h" #include "executor/executor.h" @@ -22,7 +23,8 @@ static char *fetch_cursor_param_value(ExprContext *econtext, int paramId); -static ScanState *search_plan_tree(PlanState *node, Oid table_oid); +static ScanState *search_plan_tree(PlanState *node, Oid table_oid, + bool *pending_rescan); /* @@ -32,7 +34,7 @@ static ScanState *search_plan_tree(PlanState *node, Oid table_oid); * of the table is currently being scanned by the cursor named by CURRENT OF, * and return the row's TID into *current_tid. * - * Returns TRUE if a row was identified. Returns FALSE if the cursor is valid + * Returns true if a row was identified. Returns false if the cursor is valid * for the table but is not currently scanning a row of the table (this is a * legal situation in inheritance cases). Raises error if cursor is not a * valid updatable scan of the specified table. @@ -75,7 +77,7 @@ execCurrentOf(CurrentOfExpr *cexpr, (errcode(ERRCODE_INVALID_CURSOR_STATE), errmsg("cursor \"%s\" is not a SELECT query", cursor_name))); - queryDesc = PortalGetQueryDesc(portal); + queryDesc = portal->queryDesc; if (queryDesc == NULL || queryDesc->estate == NULL) ereport(ERROR, (errcode(ERRCODE_INVALID_CURSOR_STATE), @@ -89,21 +91,22 @@ execCurrentOf(CurrentOfExpr *cexpr, * the other code can't, while the non-FOR-UPDATE case allows use of WHERE * CURRENT OF with an insensitive cursor. */ - if (queryDesc->estate->es_rowMarks) + if (queryDesc->estate->es_rowmarks) { ExecRowMark *erm; - ListCell *lc; + Index i; /* * Here, the query must have exactly one FOR UPDATE/SHARE reference to * the target table, and we dig the ctid info out of that. */ erm = NULL; - foreach(lc, queryDesc->estate->es_rowMarks) + for (i = 0; i < queryDesc->estate->es_range_table_size; i++) { - ExecRowMark *thiserm = (ExecRowMark *) lfirst(lc); + ExecRowMark *thiserm = queryDesc->estate->es_rowmarks[i]; - if (!RowMarkRequiresRowShareLock(thiserm->markType)) + if (thiserm == NULL || + !RowMarkRequiresRowShareLock(thiserm->markType)) continue; /* ignore non-FOR UPDATE/SHARE items */ if (thiserm->relid == table_oid) @@ -149,17 +152,16 @@ execCurrentOf(CurrentOfExpr *cexpr, } else { - ScanState *scanstate; - bool lisnull; - Oid tuple_tableoid PG_USED_FOR_ASSERTS_ONLY; - ItemPointer tuple_tid; - /* * Without FOR UPDATE, we dig through the cursor's plan to find the * scan node. Fail if it's not there or buried underneath * aggregation. */ - scanstate = search_plan_tree(queryDesc->planstate, table_oid); + ScanState *scanstate; + bool pending_rescan = false; + + scanstate = search_plan_tree(queryDesc->planstate, table_oid, + &pending_rescan); if (!scanstate) ereport(ERROR, (errcode(ERRCODE_INVALID_CURSOR_STATE), @@ -179,25 +181,70 @@ execCurrentOf(CurrentOfExpr *cexpr, errmsg("cursor \"%s\" is not positioned on a row", cursor_name))); - /* Now OK to return false if we found an inactive scan */ - if (TupIsNull(scanstate->ss_ScanTupleSlot)) + /* + * Now OK to return false if we found an inactive scan. It is + * inactive either if it's not positioned on a row, or there's a + * rescan pending for it. + */ + if (TupIsNull(scanstate->ss_ScanTupleSlot) || pending_rescan) return false; - /* Use slot_getattr to catch any possible mistakes */ - tuple_tableoid = - DatumGetObjectId(slot_getattr(scanstate->ss_ScanTupleSlot, - TableOidAttributeNumber, - &lisnull)); - Assert(!lisnull); - tuple_tid = (ItemPointer) - DatumGetPointer(slot_getattr(scanstate->ss_ScanTupleSlot, - SelfItemPointerAttributeNumber, - &lisnull)); - Assert(!lisnull); + /* + * Extract TID of the scan's current row. The mechanism for this is + * in principle scan-type-dependent, but for most scan types, we can + * just dig the TID out of the physical scan tuple. + */ + if (IsA(scanstate, IndexOnlyScanState)) + { + /* + * For IndexOnlyScan, the tuple stored in ss_ScanTupleSlot may be + * a virtual tuple that does not have the ctid column, so we have + * to get the TID from xs_ctup.t_self. + */ + IndexScanDesc scan = ((IndexOnlyScanState *) scanstate)->ioss_ScanDesc; + + *current_tid = scan->xs_ctup.t_self; + } + else + { + /* + * Default case: try to fetch TID from the scan node's current + * tuple. As an extra cross-check, verify tableoid in the current + * tuple. If the scan hasn't provided a physical tuple, we have + * to fail. + */ + Datum ldatum; + bool lisnull; + ItemPointer tuple_tid; + +#ifdef USE_ASSERT_CHECKING + if (!slot_getsysattr(scanstate->ss_ScanTupleSlot, + TableOidAttributeNumber, + &ldatum, + &lisnull)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_CURSOR_STATE), + errmsg("cursor \"%s\" is not a simply updatable scan of table \"%s\"", + cursor_name, table_name))); + Assert(!lisnull); + Assert(DatumGetObjectId(ldatum) == table_oid); +#endif + + if (!slot_getsysattr(scanstate->ss_ScanTupleSlot, + SelfItemPointerAttributeNumber, + &ldatum, + &lisnull)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_CURSOR_STATE), + errmsg("cursor \"%s\" is not a simply updatable scan of table \"%s\"", + cursor_name, table_name))); + Assert(!lisnull); + tuple_tid = (ItemPointer) DatumGetPointer(ldatum); - Assert(tuple_tableoid == table_oid); + *current_tid = *tuple_tid; + } - *current_tid = *tuple_tid; + Assert(ItemPointerIsValid(current_tid)); return true; } @@ -216,11 +263,14 @@ fetch_cursor_param_value(ExprContext *econtext, int paramId) if (paramInfo && paramId > 0 && paramId <= paramInfo->numParams) { - ParamExternData *prm = ¶mInfo->params[paramId - 1]; + ParamExternData *prm; + ParamExternData prmdata; /* give hook a chance in case parameter is dynamic */ - if (!OidIsValid(prm->ptype) && paramInfo->paramFetch != NULL) - (*paramInfo->paramFetch) (paramInfo, paramId); + if (paramInfo->paramFetch != NULL) + prm = paramInfo->paramFetch(paramInfo, paramId, false, &prmdata); + else + prm = ¶mInfo->params[paramId - 1]; if (OidIsValid(prm->ptype) && !prm->isnull) { @@ -249,10 +299,20 @@ fetch_cursor_param_value(ExprContext *econtext, int paramId) * * Search through a PlanState tree for a scan node on the specified table. * Return NULL if not found or multiple candidates. + * + * If a candidate is found, set *pending_rescan to true if that candidate + * or any node above it has a pending rescan action, i.e. chgParam != NULL. + * That indicates that we shouldn't consider the node to be positioned on a + * valid tuple, even if its own state would indicate that it is. (Caller + * must initialize *pending_rescan to false, and should not trust its state + * if multiple candidates are found.) */ static ScanState * -search_plan_tree(PlanState *node, Oid table_oid) +search_plan_tree(PlanState *node, Oid table_oid, + bool *pending_rescan) { + ScanState *result = NULL; + if (node == NULL) return NULL; switch (nodeTag(node)) @@ -272,7 +332,7 @@ search_plan_tree(PlanState *node, Oid table_oid) ScanState *sstate = (ScanState *) node; if (RelationGetRelid(sstate->ss_currentRelation) == table_oid) - return sstate; + result = sstate; break; } @@ -283,13 +343,13 @@ search_plan_tree(PlanState *node, Oid table_oid) case T_AppendState: { AppendState *astate = (AppendState *) node; - ScanState *result = NULL; int i; for (i = 0; i < astate->as_nplans; i++) { ScanState *elem = search_plan_tree(astate->appendplans[i], - table_oid); + table_oid, + pending_rescan); if (!elem) continue; @@ -297,7 +357,7 @@ search_plan_tree(PlanState *node, Oid table_oid) return NULL; /* multiple matches */ result = elem; } - return result; + break; } /* @@ -306,13 +366,13 @@ search_plan_tree(PlanState *node, Oid table_oid) case T_MergeAppendState: { MergeAppendState *mstate = (MergeAppendState *) node; - ScanState *result = NULL; int i; for (i = 0; i < mstate->ms_nplans; i++) { ScanState *elem = search_plan_tree(mstate->mergeplans[i], - table_oid); + table_oid, + pending_rescan); if (!elem) continue; @@ -320,7 +380,7 @@ search_plan_tree(PlanState *node, Oid table_oid) return NULL; /* multiple matches */ result = elem; } - return result; + break; } /* @@ -329,18 +389,31 @@ search_plan_tree(PlanState *node, Oid table_oid) */ case T_ResultState: case T_LimitState: - return search_plan_tree(node->lefttree, table_oid); + result = search_plan_tree(node->lefttree, + table_oid, + pending_rescan); + break; /* * SubqueryScan too, but it keeps the child in a different place */ case T_SubqueryScanState: - return search_plan_tree(((SubqueryScanState *) node)->subplan, - table_oid); + result = search_plan_tree(((SubqueryScanState *) node)->subplan, + table_oid, + pending_rescan); + break; default: /* Otherwise, assume we can't descend through it */ break; } - return NULL; + + /* + * If we found a candidate at or below this node, then this node's + * chgParam indicates a pending rescan that will affect the candidate. + */ + if (result && node->chgParam != NULL) + *pending_rescan = true; + + return result; } diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c index 7496189fab..885da18306 100644 --- a/src/backend/executor/execExpr.c +++ b/src/backend/executor/execExpr.c @@ -19,7 +19,7 @@ * and "Expression Evaluation" sections. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -36,6 +36,7 @@ #include "executor/execExpr.h" #include "executor/nodeSubplan.h" #include "funcapi.h" +#include "jit/jit.h" #include "miscadmin.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" @@ -43,6 +44,7 @@ #include "optimizer/planner.h" #include "pgstat.h" #include "utils/builtins.h" +#include "utils/datum.h" #include "utils/lsyscache.h" #include "utils/typcache.h" @@ -55,23 +57,27 @@ typedef struct LastAttnumInfo } LastAttnumInfo; static void ExecReadyExpr(ExprState *state); -static void ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, +static void ExecInitExprRec(Expr *node, ExprState *state, Datum *resv, bool *resnull); -static void ExprEvalPushStep(ExprState *es, const ExprEvalStep *s); static void ExecInitFunc(ExprEvalStep *scratch, Expr *node, List *args, - Oid funcid, Oid inputcollid, PlanState *parent, + Oid funcid, Oid inputcollid, ExprState *state); static void ExecInitExprSlots(ExprState *state, Node *node); +static void ExecPushExprSlots(ExprState *state, LastAttnumInfo *info); static bool get_last_attnums_walker(Node *node, LastAttnumInfo *info); static void ExecInitWholeRowVar(ExprEvalStep *scratch, Var *variable, - PlanState *parent); + ExprState *state); static void ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, - PlanState *parent, ExprState *state, + ExprState *state, Datum *resv, bool *resnull); static bool isAssignmentIndirectionExpr(Expr *expr); static void ExecInitCoerceToDomain(ExprEvalStep *scratch, CoerceToDomain *ctest, - PlanState *parent, ExprState *state, + ExprState *state, Datum *resv, bool *resnull); +static void ExecBuildAggTransCall(ExprState *state, AggState *aggstate, + ExprEvalStep *scratch, + FunctionCallInfo fcinfo, AggStatePerTrans pertrans, + int transno, int setno, int setoff, bool ishash); /* @@ -113,7 +119,7 @@ ExprState * ExecInitExpr(Expr *node, PlanState *parent) { ExprState *state; - ExprEvalStep scratch; + ExprEvalStep scratch = {0}; /* Special case: NULL expression produces a NULL ExprState pointer */ if (node == NULL) @@ -122,12 +128,51 @@ ExecInitExpr(Expr *node, PlanState *parent) /* Initialize ExprState with empty step list */ state = makeNode(ExprState); state->expr = node; + state->parent = parent; + state->ext_params = NULL; /* Insert EEOP_*_FETCHSOME steps as needed */ ExecInitExprSlots(state, (Node *) node); /* Compile the expression proper */ - ExecInitExprRec(node, parent, state, &state->resvalue, &state->resnull); + ExecInitExprRec(node, state, &state->resvalue, &state->resnull); + + /* Finally, append a DONE step */ + scratch.opcode = EEOP_DONE; + ExprEvalPushStep(state, &scratch); + + ExecReadyExpr(state); + + return state; +} + +/* + * ExecInitExprWithParams: prepare a standalone expression tree for execution + * + * This is the same as ExecInitExpr, except that there is no parent PlanState, + * and instead we may have a ParamListInfo describing PARAM_EXTERN Params. + */ +ExprState * +ExecInitExprWithParams(Expr *node, ParamListInfo ext_params) +{ + ExprState *state; + ExprEvalStep scratch = {0}; + + /* Special case: NULL expression produces a NULL ExprState pointer */ + if (node == NULL) + return NULL; + + /* Initialize ExprState with empty step list */ + state = makeNode(ExprState); + state->expr = node; + state->parent = NULL; + state->ext_params = ext_params; + + /* Insert EEOP_*_FETCHSOME steps as needed */ + ExecInitExprSlots(state, (Node *) node); + + /* Compile the expression proper */ + ExecInitExprRec(node, state, &state->resvalue, &state->resnull); /* Finally, append a DONE step */ scratch.opcode = EEOP_DONE; @@ -160,7 +205,7 @@ ExprState * ExecInitQual(List *qual, PlanState *parent) { ExprState *state; - ExprEvalStep scratch; + ExprEvalStep scratch = {0}; List *adjust_jumps = NIL; ListCell *lc; @@ -172,6 +217,9 @@ ExecInitQual(List *qual, PlanState *parent) state = makeNode(ExprState); state->expr = (Expr *) qual; + state->parent = parent; + state->ext_params = NULL; + /* mark expression as to be used with ExecQual() */ state->flags = EEO_FLAG_IS_QUAL; @@ -198,7 +246,7 @@ ExecInitQual(List *qual, PlanState *parent) Expr *node = (Expr *) lfirst(lc); /* first evaluate expression */ - ExecInitExprRec(node, parent, state, &state->resvalue, &state->resnull); + ExecInitExprRec(node, state, &state->resvalue, &state->resnull); /* then emit EEOP_QUAL to detect if it's false (or null) */ scratch.d.qualexpr.jumpdone = -1; @@ -306,7 +354,7 @@ ExecBuildProjectionInfo(List *targetList, { ProjectionInfo *projInfo = makeNode(ProjectionInfo); ExprState *state; - ExprEvalStep scratch; + ExprEvalStep scratch = {0}; ListCell *lc; projInfo->pi_exprContext = econtext; @@ -314,6 +362,9 @@ ExecBuildProjectionInfo(List *targetList, projInfo->pi_state.tag.type = T_ExprState; state = &projInfo->pi_state; state->expr = (Expr *) targetList; + state->parent = parent; + state->ext_params = NULL; + state->resultslot = slot; /* Insert EEOP_*_FETCHSOME steps as needed */ @@ -347,7 +398,7 @@ ExecBuildProjectionInfo(List *targetList, isSafeVar = true; /* can't check, just assume OK */ else if (attnum <= inputDesc->natts) { - Form_pg_attribute attr = inputDesc->attrs[attnum - 1]; + Form_pg_attribute attr = TupleDescAttr(inputDesc, attnum - 1); /* * If user attribute is dropped or has a type mismatch, don't @@ -398,7 +449,7 @@ ExecBuildProjectionInfo(List *targetList, * matter) can change between executions. We instead evaluate * into the ExprState's resvalue/resnull and then move. */ - ExecInitExprRec(tle->expr, parent, state, + ExecInitExprRec(tle->expr, state, &state->resvalue, &state->resnull); /* @@ -573,6 +624,9 @@ ExecCheck(ExprState *state, ExprContext *econtext) static void ExecReadyExpr(ExprState *state) { + if (jit_compile_expr(state)) + return; + ExecReadyInterpretedExpr(state); } @@ -581,15 +635,14 @@ ExecReadyExpr(ExprState *state) * possibly recursing into sub-expressions of node. * * node - expression to evaluate - * parent - parent executor node (or NULL if a standalone expression) * state - ExprState to whose ->steps to append the necessary operations * resv / resnull - where to store the result of the node into */ static void -ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, +ExecInitExprRec(Expr *node, ExprState *state, Datum *resv, bool *resnull) { - ExprEvalStep scratch; + ExprEvalStep scratch = {0}; /* Guard against stack overflow due to overly complex expressions */ check_stack_depth(); @@ -609,7 +662,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, if (variable->varattno == InvalidAttrNumber) { /* whole-row Var */ - ExecInitWholeRowVar(&scratch, variable, parent); + ExecInitWholeRowVar(&scratch, variable, state); } else if (variable->varattno <= 0) { @@ -637,20 +690,19 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, /* regular user column */ scratch.d.var.attnum = variable->varattno - 1; scratch.d.var.vartype = variable->vartype; - /* select EEOP_*_FIRST opcode to force one-time checks */ switch (variable->varno) { case INNER_VAR: - scratch.opcode = EEOP_INNER_VAR_FIRST; + scratch.opcode = EEOP_INNER_VAR; break; case OUTER_VAR: - scratch.opcode = EEOP_OUTER_VAR_FIRST; + scratch.opcode = EEOP_OUTER_VAR; break; /* INDEX_VAR is handled by default case */ default: - scratch.opcode = EEOP_SCAN_VAR_FIRST; + scratch.opcode = EEOP_SCAN_VAR; break; } } @@ -674,6 +726,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, case T_Param: { Param *param = (Param *) node; + ParamListInfo params; switch (param->paramkind) { @@ -681,19 +734,41 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, scratch.opcode = EEOP_PARAM_EXEC; scratch.d.param.paramid = param->paramid; scratch.d.param.paramtype = param->paramtype; + ExprEvalPushStep(state, &scratch); break; case PARAM_EXTERN: - scratch.opcode = EEOP_PARAM_EXTERN; - scratch.d.param.paramid = param->paramid; - scratch.d.param.paramtype = param->paramtype; + + /* + * If we have a relevant ParamCompileHook, use it; + * otherwise compile a standard EEOP_PARAM_EXTERN + * step. ext_params, if supplied, takes precedence + * over info from the parent node's EState (if any). + */ + if (state->ext_params) + params = state->ext_params; + else if (state->parent && + state->parent->state) + params = state->parent->state->es_param_list_info; + else + params = NULL; + if (params && params->paramCompile) + { + params->paramCompile(params, param, state, + resv, resnull); + } + else + { + scratch.opcode = EEOP_PARAM_EXTERN; + scratch.d.param.paramid = param->paramid; + scratch.d.param.paramtype = param->paramtype; + ExprEvalPushStep(state, &scratch); + } break; default: elog(ERROR, "unrecognized paramkind: %d", (int) param->paramkind); break; } - - ExprEvalPushStep(state, &scratch); break; } @@ -706,9 +781,9 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, scratch.d.aggref.astate = astate; astate->aggref = aggref; - if (parent && IsA(parent, AggState)) + if (state->parent && IsA(state->parent, AggState)) { - AggState *aggstate = (AggState *) parent; + AggState *aggstate = (AggState *) state->parent; aggstate->aggs = lcons(astate, aggstate->aggs); aggstate->numaggs++; @@ -728,14 +803,14 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, GroupingFunc *grp_node = (GroupingFunc *) node; Agg *agg; - if (!parent || !IsA(parent, AggState) || - !IsA(parent->plan, Agg)) + if (!state->parent || !IsA(state->parent, AggState) || + !IsA(state->parent->plan, Agg)) elog(ERROR, "GroupingFunc found in non-Agg plan node"); scratch.opcode = EEOP_GROUPING_FUNC; - scratch.d.grouping_func.parent = (AggState *) parent; + scratch.d.grouping_func.parent = (AggState *) state->parent; - agg = (Agg *) (parent->plan); + agg = (Agg *) (state->parent->plan); if (agg->groupingSets) scratch.d.grouping_func.clauses = grp_node->cols; @@ -753,9 +828,9 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, wfstate->wfunc = wfunc; - if (parent && IsA(parent, WindowAggState)) + if (state->parent && IsA(state->parent, WindowAggState)) { - WindowAggState *winstate = (WindowAggState *) parent; + WindowAggState *winstate = (WindowAggState *) state->parent; int nfuncs; winstate->funcs = lcons(wfstate, winstate->funcs); @@ -764,9 +839,10 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, winstate->numaggs++; /* for now initialize agg using old style expressions */ - wfstate->args = ExecInitExprList(wfunc->args, parent); + wfstate->args = ExecInitExprList(wfunc->args, + state->parent); wfstate->aggfilter = ExecInitExpr(wfunc->aggfilter, - parent); + state->parent); /* * Complain if the windowfunc's arguments contain any @@ -795,7 +871,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, { ArrayRef *aref = (ArrayRef *) node; - ExecInitArrayRef(&scratch, aref, parent, state, resv, resnull); + ExecInitArrayRef(&scratch, aref, state, resv, resnull); break; } @@ -805,7 +881,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, ExecInitFunc(&scratch, node, func->args, func->funcid, func->inputcollid, - parent, state); + state); ExprEvalPushStep(state, &scratch); break; } @@ -816,7 +892,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, ExecInitFunc(&scratch, node, op->args, op->opfuncid, op->inputcollid, - parent, state); + state); ExprEvalPushStep(state, &scratch); break; } @@ -827,7 +903,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, ExecInitFunc(&scratch, node, op->args, op->opfuncid, op->inputcollid, - parent, state); + state); /* * Change opcode of call instruction to EEOP_DISTINCT. @@ -849,7 +925,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, ExecInitFunc(&scratch, node, op->args, op->opfuncid, op->inputcollid, - parent, state); + state); /* * Change opcode of call instruction to EEOP_NULLIF. @@ -883,7 +959,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(opexpr->opfuncid)); InvokeFunctionExecuteHook(opexpr->opfuncid); @@ -896,7 +972,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, opexpr->inputcollid, NULL, NULL); /* Evaluate scalar directly into left function argument */ - ExecInitExprRec(scalararg, parent, state, + ExecInitExprRec(scalararg, state, &fcinfo->arg[0], &fcinfo->argnull[0]); /* @@ -905,7 +981,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, * be overwritten by EEOP_SCALARARRAYOP, and will not be * passed to any other expression. */ - ExecInitExprRec(arrayarg, parent, state, resv, resnull); + ExecInitExprRec(arrayarg, state, resv, resnull); /* And perform the operation */ scratch.opcode = EEOP_SCALARARRAYOP; @@ -949,7 +1025,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, Expr *arg = (Expr *) lfirst(lc); /* Evaluate argument into our output variable */ - ExecInitExprRec(arg, parent, state, resv, resnull); + ExecInitExprRec(arg, state, resv, resnull); /* Perform the appropriate step type */ switch (boolexpr->boolop) @@ -1009,13 +1085,14 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, SubPlan *subplan = (SubPlan *) node; SubPlanState *sstate; - if (!parent) + if (!state->parent) elog(ERROR, "SubPlan found with no parent plan"); - sstate = ExecInitSubPlan(subplan, parent); + sstate = ExecInitSubPlan(subplan, state->parent); - /* add SubPlanState nodes to parent->subPlan */ - parent->subPlan = lappend(parent->subPlan, sstate); + /* add SubPlanState nodes to state->parent->subPlan */ + state->parent->subPlan = lappend(state->parent->subPlan, + sstate); scratch.opcode = EEOP_SUBPLAN; scratch.d.subplan.sstate = sstate; @@ -1029,10 +1106,10 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, AlternativeSubPlan *asplan = (AlternativeSubPlan *) node; AlternativeSubPlanState *asstate; - if (!parent) + if (!state->parent) elog(ERROR, "AlternativeSubPlan found with no parent plan"); - asstate = ExecInitAlternativeSubPlan(asplan, parent); + asstate = ExecInitAlternativeSubPlan(asplan, state->parent); scratch.opcode = EEOP_ALTERNATIVE_SUBPLAN; scratch.d.alternative_subplan.asstate = asstate; @@ -1046,7 +1123,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, FieldSelect *fselect = (FieldSelect *) node; /* evaluate row/record argument into result area */ - ExecInitExprRec(fselect->arg, parent, state, resv, resnull); + ExecInitExprRec(fselect->arg, state, resv, resnull); /* and extract field */ scratch.opcode = EEOP_FIELDSELECT; @@ -1083,7 +1160,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, *descp = NULL; /* emit code to evaluate the composite input value */ - ExecInitExprRec(fstore->arg, parent, state, resv, resnull); + ExecInitExprRec(fstore->arg, state, resv, resnull); /* next, deform the input tuple into our workspace */ scratch.opcode = EEOP_FIELDSTORE_DEFORM; @@ -1134,7 +1211,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, state->innermost_caseval = &values[fieldnum - 1]; state->innermost_casenull = &nulls[fieldnum - 1]; - ExecInitExprRec(e, parent, state, + ExecInitExprRec(e, state, &values[fieldnum - 1], &nulls[fieldnum - 1]); @@ -1158,7 +1235,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, /* relabel doesn't need to do anything at runtime */ RelabelType *relabel = (RelabelType *) node; - ExecInitExprRec(relabel->arg, parent, state, resv, resnull); + ExecInitExprRec(relabel->arg, state, resv, resnull); break; } @@ -1171,7 +1248,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, FunctionCallInfo fcinfo_in; /* evaluate argument into step's result area */ - ExecInitExprRec(iocoerce->arg, parent, state, resv, resnull); + ExecInitExprRec(iocoerce->arg, state, resv, resnull); /* * Prepare both output and input function calls, to be @@ -1225,51 +1302,62 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, { ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node; Oid resultelemtype; + ExprState *elemstate; /* evaluate argument into step's result area */ - ExecInitExprRec(acoerce->arg, parent, state, resv, resnull); + ExecInitExprRec(acoerce->arg, state, resv, resnull); resultelemtype = get_element_type(acoerce->resulttype); if (!OidIsValid(resultelemtype)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("target type is not an array"))); - /* Arrays over domains aren't supported yet */ - Assert(getBaseType(resultelemtype) == resultelemtype); - scratch.opcode = EEOP_ARRAYCOERCE; - scratch.d.arraycoerce.coerceexpr = acoerce; - scratch.d.arraycoerce.resultelemtype = resultelemtype; + /* + * Construct a sub-expression for the per-element expression; + * but don't ready it until after we check it for triviality. + * We assume it hasn't any Var references, but does have a + * CaseTestExpr representing the source array element values. + */ + elemstate = makeNode(ExprState); + elemstate->expr = acoerce->elemexpr; + elemstate->parent = state->parent; + elemstate->ext_params = state->ext_params; - if (OidIsValid(acoerce->elemfuncid)) - { - AclResult aclresult; + elemstate->innermost_caseval = (Datum *) palloc(sizeof(Datum)); + elemstate->innermost_casenull = (bool *) palloc(sizeof(bool)); - /* Check permission to call function */ - aclresult = pg_proc_aclcheck(acoerce->elemfuncid, - GetUserId(), - ACL_EXECUTE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, - get_func_name(acoerce->elemfuncid)); - InvokeFunctionExecuteHook(acoerce->elemfuncid); + ExecInitExprRec(acoerce->elemexpr, elemstate, + &elemstate->resvalue, &elemstate->resnull); - /* Set up the primary fmgr lookup information */ - scratch.d.arraycoerce.elemfunc = - (FmgrInfo *) palloc0(sizeof(FmgrInfo)); - fmgr_info(acoerce->elemfuncid, - scratch.d.arraycoerce.elemfunc); - fmgr_info_set_expr((Node *) acoerce, - scratch.d.arraycoerce.elemfunc); + if (elemstate->steps_len == 1 && + elemstate->steps[0].opcode == EEOP_CASE_TESTVAL) + { + /* Trivial, so we need no per-element work at runtime */ + elemstate = NULL; + } + else + { + /* Not trivial, so append a DONE step */ + scratch.opcode = EEOP_DONE; + ExprEvalPushStep(elemstate, &scratch); + /* and ready the subexpression */ + ExecReadyExpr(elemstate); + } + + scratch.opcode = EEOP_ARRAYCOERCE; + scratch.d.arraycoerce.elemexprstate = elemstate; + scratch.d.arraycoerce.resultelemtype = resultelemtype; + if (elemstate) + { /* Set up workspace for array_map */ scratch.d.arraycoerce.amstate = (ArrayMapState *) palloc0(sizeof(ArrayMapState)); } else { - /* Don't need workspace if there's no conversion func */ - scratch.d.arraycoerce.elemfunc = NULL; + /* Don't need workspace if there's no subexpression */ scratch.d.arraycoerce.amstate = NULL; } @@ -1282,7 +1370,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node; /* evaluate argument into step's result area */ - ExecInitExprRec(convert->arg, parent, state, resv, resnull); + ExecInitExprRec(convert->arg, state, resv, resnull); /* and push conversion step */ scratch.opcode = EEOP_CONVERT_ROWTYPE; @@ -1316,7 +1404,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, caseval = palloc(sizeof(Datum)); casenull = palloc(sizeof(bool)); - ExecInitExprRec(caseExpr->arg, parent, state, + ExecInitExprRec(caseExpr->arg, state, caseval, casenull); /* @@ -1367,7 +1455,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, state->innermost_casenull = casenull; /* evaluate condition into CASE's result variables */ - ExecInitExprRec(when->expr, parent, state, resv, resnull); + ExecInitExprRec(when->expr, state, resv, resnull); state->innermost_caseval = save_innermost_caseval; state->innermost_casenull = save_innermost_casenull; @@ -1382,7 +1470,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, * If WHEN result is true, evaluate THEN result, storing * it into the CASE's result variables. */ - ExecInitExprRec(when->result, parent, state, resv, resnull); + ExecInitExprRec(when->result, state, resv, resnull); /* Emit JUMP step to jump to end of CASE's code */ scratch.opcode = EEOP_JUMP; @@ -1407,7 +1495,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, Assert(caseExpr->defresult); /* evaluate ELSE expr into CASE's result variables */ - ExecInitExprRec(caseExpr->defresult, parent, state, + ExecInitExprRec(caseExpr->defresult, state, resv, resnull); /* adjust jump targets */ @@ -1428,10 +1516,11 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, /* * Read from location identified by innermost_caseval. Note * that innermost_caseval could be NULL, if this node isn't - * actually within a CASE structure; some parts of the system - * abuse CaseTestExpr to cause a read of a value externally - * supplied in econtext->caseValue_datum. We'll take care of - * that scenario at runtime. + * actually within a CaseExpr, ArrayCoerceExpr, etc structure. + * That can happen because some parts of the system abuse + * CaseTestExpr to cause a read of a value externally supplied + * in econtext->caseValue_datum. We'll take care of that + * scenario at runtime. */ scratch.opcode = EEOP_CASE_TESTVAL; scratch.d.casetest.value = state->innermost_caseval; @@ -1476,7 +1565,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, { Expr *e = (Expr *) lfirst(lc); - ExecInitExprRec(e, parent, state, + ExecInitExprRec(e, state, &scratch.d.arrayexpr.elemvalues[elemoff], &scratch.d.arrayexpr.elemnulls[elemoff]); elemoff++; @@ -1492,7 +1581,6 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, RowExpr *rowexpr = (RowExpr *) node; int nelems = list_length(rowexpr->args); TupleDesc tupdesc; - Form_pg_attribute *attrs; int i; ListCell *l; @@ -1539,13 +1627,13 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, memset(scratch.d.row.elemnulls, true, sizeof(bool) * nelems); /* Set up evaluation, skipping any deleted columns */ - attrs = tupdesc->attrs; i = 0; foreach(l, rowexpr->args) { + Form_pg_attribute att = TupleDescAttr(tupdesc, i); Expr *e = (Expr *) lfirst(l); - if (!attrs[i]->attisdropped) + if (!att->attisdropped) { /* * Guard against ALTER COLUMN TYPE on rowtype since @@ -1553,12 +1641,12 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, * typmod too? Not sure we can be sure it'll be the * same. */ - if (exprType((Node *) e) != attrs[i]->atttypid) + if (exprType((Node *) e) != att->atttypid) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("ROW() column has type %s instead of type %s", format_type_be(exprType((Node *) e)), - format_type_be(attrs[i]->atttypid)))); + format_type_be(att->atttypid)))); } else { @@ -1571,7 +1659,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, } /* Evaluate column expr into appropriate workspace slot */ - ExecInitExprRec(e, parent, state, + ExecInitExprRec(e, state, &scratch.d.row.elemvalues[i], &scratch.d.row.elemnulls[i]); i++; @@ -1660,9 +1748,9 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, */ /* evaluate left and right args directly into fcinfo */ - ExecInitExprRec(left_expr, parent, state, + ExecInitExprRec(left_expr, state, &fcinfo->arg[0], &fcinfo->argnull[0]); - ExecInitExprRec(right_expr, parent, state, + ExecInitExprRec(right_expr, state, &fcinfo->arg[1], &fcinfo->argnull[1]); scratch.opcode = EEOP_ROWCOMPARE_STEP; @@ -1731,7 +1819,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, Expr *e = (Expr *) lfirst(lc); /* evaluate argument, directly into result datum */ - ExecInitExprRec(e, parent, state, resv, resnull); + ExecInitExprRec(e, state, resv, resnull); /* if it's not null, skip to end of COALESCE expr */ scratch.opcode = EEOP_JUMP_IF_NOT_NULL; @@ -1813,7 +1901,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, { Expr *e = (Expr *) lfirst(lc); - ExecInitExprRec(e, parent, state, + ExecInitExprRec(e, state, &scratch.d.minmax.values[off], &scratch.d.minmax.nulls[off]); off++; @@ -1879,7 +1967,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, { Expr *e = (Expr *) lfirst(arg); - ExecInitExprRec(e, parent, state, + ExecInitExprRec(e, state, &scratch.d.xmlexpr.named_argvalue[off], &scratch.d.xmlexpr.named_argnull[off]); off++; @@ -1890,7 +1978,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, { Expr *e = (Expr *) lfirst(arg); - ExecInitExprRec(e, parent, state, + ExecInitExprRec(e, state, &scratch.d.xmlexpr.argvalue[off], &scratch.d.xmlexpr.argnull[off]); off++; @@ -1928,7 +2016,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, scratch.d.nulltest_row.argdesc = NULL; /* first evaluate argument into result variable */ - ExecInitExprRec(ntest->arg, parent, state, + ExecInitExprRec(ntest->arg, state, resv, resnull); /* then push the test of that argument */ @@ -1946,7 +2034,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, * and will get overwritten by the below EEOP_BOOLTEST_IS_* * step. */ - ExecInitExprRec(btest->arg, parent, state, resv, resnull); + ExecInitExprRec(btest->arg, state, resv, resnull); switch (btest->booltesttype) { @@ -1983,7 +2071,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, { CoerceToDomain *ctest = (CoerceToDomain *) node; - ExecInitCoerceToDomain(&scratch, ctest, parent, state, + ExecInitCoerceToDomain(&scratch, ctest, state, resv, resnull); break; } @@ -2039,7 +2127,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state, * Note that this potentially re-allocates es->steps, therefore no pointer * into that array may be used while the expression is still being built. */ -static void +void ExprEvalPushStep(ExprState *es, const ExprEvalStep *s) { if (es->steps_alloc == 0) @@ -2067,7 +2155,7 @@ ExprEvalPushStep(ExprState *es, const ExprEvalStep *s) */ static void ExecInitFunc(ExprEvalStep *scratch, Expr *node, List *args, Oid funcid, - Oid inputcollid, PlanState *parent, ExprState *state) + Oid inputcollid, ExprState *state) { int nargs = list_length(args); AclResult aclresult; @@ -2079,7 +2167,7 @@ ExecInitFunc(ExprEvalStep *scratch, Expr *node, List *args, Oid funcid, /* Check permission to call function */ aclresult = pg_proc_aclcheck(funcid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, get_func_name(funcid)); + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(funcid)); InvokeFunctionExecuteHook(funcid); /* @@ -2119,8 +2207,9 @@ ExecInitFunc(ExprEvalStep *scratch, Expr *node, List *args, Oid funcid, ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"), - parent ? executor_errposition(parent->state, - exprLocation((Node *) node)) : 0)); + state->parent ? + executor_errposition(state->parent->state, + exprLocation((Node *) node)) : 0)); /* Build code to evaluate arguments directly into the fcinfo struct */ argno = 0; @@ -2141,7 +2230,7 @@ ExecInitFunc(ExprEvalStep *scratch, Expr *node, List *args, Oid funcid, } else { - ExecInitExprRec(arg, parent, state, + ExecInitExprRec(arg, state, &fcinfo->arg[argno], &fcinfo->argnull[argno]); } argno++; @@ -2172,30 +2261,48 @@ static void ExecInitExprSlots(ExprState *state, Node *node) { LastAttnumInfo info = {0, 0, 0}; - ExprEvalStep scratch; /* * Figure out which attributes we're going to need. */ get_last_attnums_walker(node, &info); + ExecPushExprSlots(state, &info); +} + +/* + * Add steps deforming the ExprState's inner/out/scan slots as much as + * indicated by info. This is useful when building an ExprState covering more + * than one expression. + */ +static void +ExecPushExprSlots(ExprState *state, LastAttnumInfo *info) +{ + ExprEvalStep scratch = {0}; + + scratch.resvalue = NULL; + scratch.resnull = NULL; + /* Emit steps as needed */ - if (info.last_inner > 0) + if (info->last_inner > 0) { scratch.opcode = EEOP_INNER_FETCHSOME; - scratch.d.fetch.last_var = info.last_inner; + scratch.d.fetch.last_var = info->last_inner; + scratch.d.fetch.known_desc = NULL; ExprEvalPushStep(state, &scratch); } - if (info.last_outer > 0) + if (info->last_outer > 0) { scratch.opcode = EEOP_OUTER_FETCHSOME; - scratch.d.fetch.last_var = info.last_outer; + scratch.d.fetch.last_var = info->last_outer; + scratch.d.fetch.known_desc = NULL; ExprEvalPushStep(state, &scratch); } - if (info.last_scan > 0) + if (info->last_scan > 0) { scratch.opcode = EEOP_SCAN_FETCHSOME; - scratch.d.fetch.last_var = info.last_scan; + scratch.d.fetch.last_var = info->last_scan; + scratch.d.fetch.known_desc = NULL; ExprEvalPushStep(state, &scratch); } } @@ -2253,8 +2360,10 @@ get_last_attnums_walker(Node *node, LastAttnumInfo *info) * The caller still has to push the step. */ static void -ExecInitWholeRowVar(ExprEvalStep *scratch, Var *variable, PlanState *parent) +ExecInitWholeRowVar(ExprEvalStep *scratch, Var *variable, ExprState *state) { + PlanState *parent = state->parent; + /* fill in all but the target */ scratch->opcode = EEOP_WHOLEROW; scratch->d.wholerow.var = variable; @@ -2314,7 +2423,7 @@ ExecInitWholeRowVar(ExprEvalStep *scratch, Var *variable, PlanState *parent) scratch->d.wholerow.junkFilter = ExecInitJunkFilter(subplan->plan->targetlist, ExecGetResultType(subplan)->tdhasoid, - ExecInitExtraTupleSlot(parent->state)); + ExecInitExtraTupleSlot(parent->state, NULL)); } } } @@ -2324,7 +2433,7 @@ ExecInitWholeRowVar(ExprEvalStep *scratch, Var *variable, PlanState *parent) * Prepare evaluation of an ArrayRef expression. */ static void -ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent, +ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, ExprState *state, Datum *resv, bool *resnull) { bool isAssignment = (aref->refassgnexpr != NULL); @@ -2348,7 +2457,7 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent, * be overwritten by the final EEOP_ARRAYREF_FETCH/ASSIGN step, which is * pushed last. */ - ExecInitExprRec(aref->refexpr, parent, state, resv, resnull); + ExecInitExprRec(aref->refexpr, state, resv, resnull); /* * If refexpr yields NULL, and it's a fetch, then result is NULL. We can @@ -2394,7 +2503,7 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent, arefstate->upperprovided[i] = true; /* Each subscript is evaluated into subscriptvalue/subscriptnull */ - ExecInitExprRec(e, parent, state, + ExecInitExprRec(e, state, &arefstate->subscriptvalue, &arefstate->subscriptnull); /* ... and then ARRAYREF_SUBSCRIPT saves it into step's workspace */ @@ -2427,7 +2536,7 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent, arefstate->lowerprovided[i] = true; /* Each subscript is evaluated into subscriptvalue/subscriptnull */ - ExecInitExprRec(e, parent, state, + ExecInitExprRec(e, state, &arefstate->subscriptvalue, &arefstate->subscriptnull); /* ... and then ARRAYREF_SUBSCRIPT saves it into step's workspace */ @@ -2481,7 +2590,7 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent, state->innermost_casenull = &arefstate->prevnull; /* evaluate replacement value into replacevalue/replacenull */ - ExecInitExprRec(aref->refassgnexpr, parent, state, + ExecInitExprRec(aref->refassgnexpr, state, &arefstate->replacevalue, &arefstate->replacenull); state->innermost_caseval = save_innermost_caseval; @@ -2559,10 +2668,9 @@ isAssignmentIndirectionExpr(Expr *expr) */ static void ExecInitCoerceToDomain(ExprEvalStep *scratch, CoerceToDomain *ctest, - PlanState *parent, ExprState *state, - Datum *resv, bool *resnull) + ExprState *state, Datum *resv, bool *resnull) { - ExprEvalStep scratch2; + ExprEvalStep scratch2 = {0}; DomainConstraintRef *constraint_ref; Datum *domainval = NULL; bool *domainnull = NULL; @@ -2580,7 +2688,7 @@ ExecInitCoerceToDomain(ExprEvalStep *scratch, CoerceToDomain *ctest, * if there's constraint failures there'll be errors, otherwise it's what * needs to be returned. */ - ExecInitExprRec(ctest->arg, parent, state, resv, resnull); + ExecInitExprRec(ctest->arg, state, resv, resnull); /* * Note: if the argument is of varlena type, it could be a R/W expanded @@ -2677,7 +2785,7 @@ ExecInitCoerceToDomain(ExprEvalStep *scratch, CoerceToDomain *ctest, state->innermost_domainnull = domainnull; /* evaluate check expression value */ - ExecInitExprRec(con->check_expr, parent, state, + ExecInitExprRec(con->check_expr, state, scratch->d.domaincheck.checkvalue, scratch->d.domaincheck.checknull); @@ -2696,3 +2804,544 @@ ExecInitCoerceToDomain(ExprEvalStep *scratch, CoerceToDomain *ctest, } } } + +/* + * Build transition/combine function invocations for all aggregate transition + * / combination function invocations in a grouping sets phase. This has to + * invoke all sort based transitions in a phase (if doSort is true), all hash + * based transitions (if doHash is true), or both (both true). + * + * The resulting expression will, for each set of transition values, first + * check for filters, evaluate aggregate input, check that that input is not + * NULL for a strict transition function, and then finally invoke the + * transition for each of the concurrently computed grouping sets. + */ +ExprState * +ExecBuildAggTrans(AggState *aggstate, AggStatePerPhase phase, + bool doSort, bool doHash) +{ + ExprState *state = makeNode(ExprState); + PlanState *parent = &aggstate->ss.ps; + ExprEvalStep scratch = {0}; + int transno = 0; + int setoff = 0; + bool isCombine = DO_AGGSPLIT_COMBINE(aggstate->aggsplit); + LastAttnumInfo deform = {0, 0, 0}; + + state->expr = (Expr *) aggstate; + state->parent = parent; + + scratch.resvalue = &state->resvalue; + scratch.resnull = &state->resnull; + + /* + * First figure out which slots, and how many columns from each, we're + * going to need. + */ + for (transno = 0; transno < aggstate->numtrans; transno++) + { + AggStatePerTrans pertrans = &aggstate->pertrans[transno]; + + get_last_attnums_walker((Node *) pertrans->aggref->aggdirectargs, + &deform); + get_last_attnums_walker((Node *) pertrans->aggref->args, + &deform); + get_last_attnums_walker((Node *) pertrans->aggref->aggorder, + &deform); + get_last_attnums_walker((Node *) pertrans->aggref->aggdistinct, + &deform); + get_last_attnums_walker((Node *) pertrans->aggref->aggfilter, + &deform); + } + ExecPushExprSlots(state, &deform); + + /* + * Emit instructions for each transition value / grouping set combination. + */ + for (transno = 0; transno < aggstate->numtrans; transno++) + { + AggStatePerTrans pertrans = &aggstate->pertrans[transno]; + int argno; + int setno; + FunctionCallInfo trans_fcinfo = &pertrans->transfn_fcinfo; + ListCell *arg; + ListCell *bail; + List *adjust_bailout = NIL; + bool *strictnulls = NULL; + + /* + * If filter present, emit. Do so before evaluating the input, to + * avoid potentially unneeded computations, or even worse, unintended + * side-effects. When combining, all the necessary filtering has + * already been done. + */ + if (pertrans->aggref->aggfilter && !isCombine) + { + /* evaluate filter expression */ + ExecInitExprRec(pertrans->aggref->aggfilter, state, + &state->resvalue, &state->resnull); + /* and jump out if false */ + scratch.opcode = EEOP_JUMP_IF_NOT_TRUE; + scratch.d.jump.jumpdone = -1; /* adjust later */ + ExprEvalPushStep(state, &scratch); + adjust_bailout = lappend_int(adjust_bailout, + state->steps_len - 1); + } + + /* + * Evaluate arguments to aggregate/combine function. + */ + argno = 0; + if (isCombine) + { + /* + * Combining two aggregate transition values. Instead of directly + * coming from a tuple the input is a, potentially deserialized, + * transition value. + */ + TargetEntry *source_tle; + + Assert(pertrans->numSortCols == 0); + Assert(list_length(pertrans->aggref->args) == 1); + + strictnulls = trans_fcinfo->argnull + 1; + source_tle = (TargetEntry *) linitial(pertrans->aggref->args); + + /* + * deserialfn_oid will be set if we must deserialize the input + * state before calling the combine function. + */ + if (!OidIsValid(pertrans->deserialfn_oid)) + { + /* + * Start from 1, since the 0th arg will be the transition + * value + */ + ExecInitExprRec(source_tle->expr, state, + &trans_fcinfo->arg[argno + 1], + &trans_fcinfo->argnull[argno + 1]); + } + else + { + FunctionCallInfo ds_fcinfo = &pertrans->deserialfn_fcinfo; + + /* evaluate argument */ + ExecInitExprRec(source_tle->expr, state, + &ds_fcinfo->arg[0], + &ds_fcinfo->argnull[0]); + + /* Dummy second argument for type-safety reasons */ + ds_fcinfo->arg[1] = PointerGetDatum(NULL); + ds_fcinfo->argnull[1] = false; + + /* + * Don't call a strict deserialization function with NULL + * input + */ + if (pertrans->deserialfn.fn_strict) + scratch.opcode = EEOP_AGG_STRICT_DESERIALIZE; + else + scratch.opcode = EEOP_AGG_DESERIALIZE; + + scratch.d.agg_deserialize.aggstate = aggstate; + scratch.d.agg_deserialize.fcinfo_data = ds_fcinfo; + scratch.d.agg_deserialize.jumpnull = -1; /* adjust later */ + scratch.resvalue = &trans_fcinfo->arg[argno + 1]; + scratch.resnull = &trans_fcinfo->argnull[argno + 1]; + + ExprEvalPushStep(state, &scratch); + adjust_bailout = lappend_int(adjust_bailout, + state->steps_len - 1); + + /* restore normal settings of scratch fields */ + scratch.resvalue = &state->resvalue; + scratch.resnull = &state->resnull; + } + argno++; + } + else if (pertrans->numSortCols == 0) + { + /* + * Normal transition function without ORDER BY / DISTINCT. + */ + strictnulls = trans_fcinfo->argnull + 1; + + foreach(arg, pertrans->aggref->args) + { + TargetEntry *source_tle = (TargetEntry *) lfirst(arg); + + /* + * Start from 1, since the 0th arg will be the transition + * value + */ + ExecInitExprRec(source_tle->expr, state, + &trans_fcinfo->arg[argno + 1], + &trans_fcinfo->argnull[argno + 1]); + argno++; + } + } + else if (pertrans->numInputs == 1) + { + /* + * DISTINCT and/or ORDER BY case, with a single column sorted on. + */ + TargetEntry *source_tle = + (TargetEntry *) linitial(pertrans->aggref->args); + + Assert(list_length(pertrans->aggref->args) == 1); + + ExecInitExprRec(source_tle->expr, state, + &state->resvalue, + &state->resnull); + strictnulls = &state->resnull; + argno++; + } + else + { + /* + * DISTINCT and/or ORDER BY case, with multiple columns sorted on. + */ + Datum *values = pertrans->sortslot->tts_values; + bool *nulls = pertrans->sortslot->tts_isnull; + + strictnulls = nulls; + + foreach(arg, pertrans->aggref->args) + { + TargetEntry *source_tle = (TargetEntry *) lfirst(arg); + + ExecInitExprRec(source_tle->expr, state, + &values[argno], &nulls[argno]); + argno++; + } + } + Assert(pertrans->numInputs == argno); + + /* + * For a strict transfn, nothing happens when there's a NULL input; we + * just keep the prior transValue. This is true for both plain and + * sorted/distinct aggregates. + */ + if (trans_fcinfo->flinfo->fn_strict && pertrans->numTransInputs > 0) + { + scratch.opcode = EEOP_AGG_STRICT_INPUT_CHECK; + scratch.d.agg_strict_input_check.nulls = strictnulls; + scratch.d.agg_strict_input_check.jumpnull = -1; /* adjust later */ + scratch.d.agg_strict_input_check.nargs = pertrans->numTransInputs; + ExprEvalPushStep(state, &scratch); + adjust_bailout = lappend_int(adjust_bailout, + state->steps_len - 1); + } + + /* + * Call transition function (once for each concurrently evaluated + * grouping set). Do so for both sort and hash based computations, as + * applicable. + */ + setoff = 0; + if (doSort) + { + int processGroupingSets = Max(phase->numsets, 1); + + for (setno = 0; setno < processGroupingSets; setno++) + { + ExecBuildAggTransCall(state, aggstate, &scratch, trans_fcinfo, + pertrans, transno, setno, setoff, false); + setoff++; + } + } + + if (doHash) + { + int numHashes = aggstate->num_hashes; + + /* in MIXED mode, there'll be preceding transition values */ + if (aggstate->aggstrategy != AGG_HASHED) + setoff = aggstate->maxsets; + else + setoff = 0; + + for (setno = 0; setno < numHashes; setno++) + { + ExecBuildAggTransCall(state, aggstate, &scratch, trans_fcinfo, + pertrans, transno, setno, setoff, true); + setoff++; + } + } + + /* adjust early bail out jump target(s) */ + foreach(bail, adjust_bailout) + { + ExprEvalStep *as = &state->steps[lfirst_int(bail)]; + + if (as->opcode == EEOP_JUMP_IF_NOT_TRUE) + { + Assert(as->d.jump.jumpdone == -1); + as->d.jump.jumpdone = state->steps_len; + } + else if (as->opcode == EEOP_AGG_STRICT_INPUT_CHECK) + { + Assert(as->d.agg_strict_input_check.jumpnull == -1); + as->d.agg_strict_input_check.jumpnull = state->steps_len; + } + else if (as->opcode == EEOP_AGG_STRICT_DESERIALIZE) + { + Assert(as->d.agg_deserialize.jumpnull == -1); + as->d.agg_deserialize.jumpnull = state->steps_len; + } + } + } + + scratch.resvalue = NULL; + scratch.resnull = NULL; + scratch.opcode = EEOP_DONE; + ExprEvalPushStep(state, &scratch); + + ExecReadyExpr(state); + + return state; +} + +/* + * Build transition/combine function invocation for a single transition + * value. This is separated from ExecBuildAggTrans() because there are + * multiple callsites (hash and sort in some grouping set cases). + */ +static void +ExecBuildAggTransCall(ExprState *state, AggState *aggstate, + ExprEvalStep *scratch, + FunctionCallInfo fcinfo, AggStatePerTrans pertrans, + int transno, int setno, int setoff, bool ishash) +{ + int adjust_init_jumpnull = -1; + int adjust_strict_jumpnull = -1; + ExprContext *aggcontext; + + if (ishash) + aggcontext = aggstate->hashcontext; + else + aggcontext = aggstate->aggcontexts[setno]; + + /* + * If the initial value for the transition state doesn't exist in the + * pg_aggregate table then we will let the first non-NULL value returned + * from the outer procNode become the initial value. (This is useful for + * aggregates like max() and min().) The noTransValue flag signals that we + * still need to do this. + */ + if (pertrans->numSortCols == 0 && + fcinfo->flinfo->fn_strict && + pertrans->initValueIsNull) + { + scratch->opcode = EEOP_AGG_INIT_TRANS; + scratch->d.agg_init_trans.aggstate = aggstate; + scratch->d.agg_init_trans.pertrans = pertrans; + scratch->d.agg_init_trans.setno = setno; + scratch->d.agg_init_trans.setoff = setoff; + scratch->d.agg_init_trans.transno = transno; + scratch->d.agg_init_trans.aggcontext = aggcontext; + scratch->d.agg_init_trans.jumpnull = -1; /* adjust later */ + ExprEvalPushStep(state, scratch); + + /* see comment about jumping out below */ + adjust_init_jumpnull = state->steps_len - 1; + } + + if (pertrans->numSortCols == 0 && + fcinfo->flinfo->fn_strict) + { + scratch->opcode = EEOP_AGG_STRICT_TRANS_CHECK; + scratch->d.agg_strict_trans_check.aggstate = aggstate; + scratch->d.agg_strict_trans_check.setno = setno; + scratch->d.agg_strict_trans_check.setoff = setoff; + scratch->d.agg_strict_trans_check.transno = transno; + scratch->d.agg_strict_trans_check.jumpnull = -1; /* adjust later */ + ExprEvalPushStep(state, scratch); + + /* + * Note, we don't push into adjust_bailout here - those jump to the + * end of all transition value computations. Here a single transition + * value is NULL, so just skip processing the individual value. + */ + adjust_strict_jumpnull = state->steps_len - 1; + } + + /* invoke appropriate transition implementation */ + if (pertrans->numSortCols == 0 && pertrans->transtypeByVal) + scratch->opcode = EEOP_AGG_PLAIN_TRANS_BYVAL; + else if (pertrans->numSortCols == 0) + scratch->opcode = EEOP_AGG_PLAIN_TRANS; + else if (pertrans->numInputs == 1) + scratch->opcode = EEOP_AGG_ORDERED_TRANS_DATUM; + else + scratch->opcode = EEOP_AGG_ORDERED_TRANS_TUPLE; + + scratch->d.agg_trans.aggstate = aggstate; + scratch->d.agg_trans.pertrans = pertrans; + scratch->d.agg_trans.setno = setno; + scratch->d.agg_trans.setoff = setoff; + scratch->d.agg_trans.transno = transno; + scratch->d.agg_trans.aggcontext = aggcontext; + ExprEvalPushStep(state, scratch); + + /* adjust jumps so they jump till after transition invocation */ + if (adjust_init_jumpnull != -1) + { + ExprEvalStep *as = &state->steps[adjust_init_jumpnull]; + + Assert(as->d.agg_init_trans.jumpnull == -1); + as->d.agg_init_trans.jumpnull = state->steps_len; + } + if (adjust_strict_jumpnull != -1) + { + ExprEvalStep *as = &state->steps[adjust_strict_jumpnull]; + + Assert(as->d.agg_strict_trans_check.jumpnull == -1); + as->d.agg_strict_trans_check.jumpnull = state->steps_len; + } +} + +/* + * Build equality expression that can be evaluated using ExecQual(), returning + * true if the expression context's inner/outer tuple are NOT DISTINCT. I.e + * two nulls match, a null and a not-null don't match. + * + * desc: tuple descriptor of the to-be-compared tuples + * numCols: the number of attributes to be examined + * keyColIdx: array of attribute column numbers + * eqFunctions: array of function oids of the equality functions to use + * parent: parent executor node + */ +ExprState * +ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc, + int numCols, + AttrNumber *keyColIdx, + Oid *eqfunctions, + PlanState *parent) +{ + ExprState *state = makeNode(ExprState); + ExprEvalStep scratch = {0}; + int natt; + int maxatt = -1; + List *adjust_jumps = NIL; + ListCell *lc; + + /* + * When no columns are actually compared, the result's always true. See + * special case in ExecQual(). + */ + if (numCols == 0) + return NULL; + + state->expr = NULL; + state->flags = EEO_FLAG_IS_QUAL; + state->parent = parent; + + scratch.resvalue = &state->resvalue; + scratch.resnull = &state->resnull; + + /* compute max needed attribute */ + for (natt = 0; natt < numCols; natt++) + { + int attno = keyColIdx[natt]; + + if (attno > maxatt) + maxatt = attno; + } + Assert(maxatt >= 0); + + /* push deform steps */ + scratch.opcode = EEOP_INNER_FETCHSOME; + scratch.d.fetch.last_var = maxatt; + scratch.d.fetch.known_desc = ldesc; + ExprEvalPushStep(state, &scratch); + + scratch.opcode = EEOP_OUTER_FETCHSOME; + scratch.d.fetch.last_var = maxatt; + scratch.d.fetch.known_desc = rdesc; + ExprEvalPushStep(state, &scratch); + + /* + * Start comparing at the last field (least significant sort key). That's + * the most likely to be different if we are dealing with sorted input. + */ + for (natt = numCols; --natt >= 0;) + { + int attno = keyColIdx[natt]; + Form_pg_attribute latt = TupleDescAttr(ldesc, attno - 1); + Form_pg_attribute ratt = TupleDescAttr(rdesc, attno - 1); + Oid foid = eqfunctions[natt]; + FmgrInfo *finfo; + FunctionCallInfo fcinfo; + AclResult aclresult; + + /* Check permission to call function */ + aclresult = pg_proc_aclcheck(foid, GetUserId(), ACL_EXECUTE); + if (aclresult != ACLCHECK_OK) + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(foid)); + + InvokeFunctionExecuteHook(foid); + + /* Set up the primary fmgr lookup information */ + finfo = palloc0(sizeof(FmgrInfo)); + fcinfo = palloc0(sizeof(FunctionCallInfoData)); + fmgr_info(foid, finfo); + fmgr_info_set_expr(NULL, finfo); + InitFunctionCallInfoData(*fcinfo, finfo, 2, + InvalidOid, NULL, NULL); + + /* left arg */ + scratch.opcode = EEOP_INNER_VAR; + scratch.d.var.attnum = attno - 1; + scratch.d.var.vartype = latt->atttypid; + scratch.resvalue = &fcinfo->arg[0]; + scratch.resnull = &fcinfo->argnull[0]; + ExprEvalPushStep(state, &scratch); + + /* right arg */ + scratch.opcode = EEOP_OUTER_VAR; + scratch.d.var.attnum = attno - 1; + scratch.d.var.vartype = ratt->atttypid; + scratch.resvalue = &fcinfo->arg[1]; + scratch.resnull = &fcinfo->argnull[1]; + ExprEvalPushStep(state, &scratch); + + /* evaluate distinctness */ + scratch.opcode = EEOP_NOT_DISTINCT; + scratch.d.func.finfo = finfo; + scratch.d.func.fcinfo_data = fcinfo; + scratch.d.func.fn_addr = finfo->fn_addr; + scratch.d.func.nargs = 2; + scratch.resvalue = &state->resvalue; + scratch.resnull = &state->resnull; + ExprEvalPushStep(state, &scratch); + + /* then emit EEOP_QUAL to detect if result is false (or null) */ + scratch.opcode = EEOP_QUAL; + scratch.d.qualexpr.jumpdone = -1; + scratch.resvalue = &state->resvalue; + scratch.resnull = &state->resnull; + ExprEvalPushStep(state, &scratch); + adjust_jumps = lappend_int(adjust_jumps, + state->steps_len - 1); + } + + /* adjust jump targets */ + foreach(lc, adjust_jumps) + { + ExprEvalStep *as = &state->steps[lfirst_int(lc)]; + + Assert(as->opcode == EEOP_QUAL); + Assert(as->d.qualexpr.jumpdone == -1); + as->d.qualexpr.jumpdone = state->steps_len; + } + + scratch.resvalue = NULL; + scratch.resnull = NULL; + scratch.opcode = EEOP_DONE; + ExprEvalPushStep(state, &scratch); + + ExecReadyExpr(state); + + return state; +} diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c index f2a52f6213..f7eac2a572 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -34,10 +34,8 @@ * * For very simple instructions the overhead of the full interpreter * "startup", as minimal as it is, is noticeable. Therefore - * ExecReadyInterpretedExpr will choose to implement simple scalar Var - * and Const expressions using special fast-path routines (ExecJust*). - * Benchmarking shows anything more complex than those may as well use the - * "full interpreter". + * ExecReadyInterpretedExpr will choose to implement certain simple + * opcode patterns using special fast-path routines (ExecJust*). * * Complex or uncommon instructions are not implemented in-line in * ExecInterpExpr(), rather we call out to a helper function appearing later @@ -48,7 +46,7 @@ * exported rather than being "static" in this file.) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -64,12 +62,15 @@ #include "executor/execExpr.h" #include "executor/nodeSubplan.h" #include "funcapi.h" +#include "utils/memutils.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "parser/parsetree.h" #include "pgstat.h" #include "utils/builtins.h" #include "utils/date.h" +#include "utils/datum.h" +#include "utils/expandedrecord.h" #include "utils/lsyscache.h" #include "utils/timestamp.h" #include "utils/typcache.h" @@ -97,9 +98,19 @@ */ #if defined(EEO_USE_COMPUTED_GOTO) +/* struct for jump target -> opcode lookup table */ +typedef struct ExprEvalOpLookup +{ + const void *opcode; + ExprEvalOp op; +} ExprEvalOpLookup; + /* to make dispatch_table accessible outside ExecInterpExpr() */ static const void **dispatch_table = NULL; +/* jump target -> opcode lookup table */ +static ExprEvalOpLookup reverse_dispatch_table[EEOP_LAST]; + #define EEO_SWITCH() #define EEO_CASE(name) CASE_##name: #define EEO_DISPATCH() goto *((void *) op->opcode) @@ -139,16 +150,14 @@ static void ExecEvalRowNullInt(ExprState *state, ExprEvalStep *op, ExprContext *econtext, bool checkisnull); /* fast-path evaluation functions */ -static Datum ExecJustInnerVarFirst(ExprState *state, ExprContext *econtext, bool *isnull); static Datum ExecJustInnerVar(ExprState *state, ExprContext *econtext, bool *isnull); -static Datum ExecJustOuterVarFirst(ExprState *state, ExprContext *econtext, bool *isnull); static Datum ExecJustOuterVar(ExprState *state, ExprContext *econtext, bool *isnull); -static Datum ExecJustScanVarFirst(ExprState *state, ExprContext *econtext, bool *isnull); static Datum ExecJustScanVar(ExprState *state, ExprContext *econtext, bool *isnull); static Datum ExecJustConst(ExprState *state, ExprContext *econtext, bool *isnull); static Datum ExecJustAssignInnerVar(ExprState *state, ExprContext *econtext, bool *isnull); static Datum ExecJustAssignOuterVar(ExprState *state, ExprContext *econtext, bool *isnull); static Datum ExecJustAssignScanVar(ExprState *state, ExprContext *econtext, bool *isnull); +static Datum ExecJustApplyFuncToCase(ExprState *state, ExprContext *econtext, bool *isnull); /* @@ -172,6 +181,14 @@ ExecReadyInterpretedExpr(ExprState *state) if (state->flags & EEO_FLAG_INTERPRETER_INITIALIZED) return; + /* + * First time through, check whether attribute matches Var. Might not be + * ok anymore, due to schema changes. We do that by setting up a callback + * that does checking on the first call, which then sets the evalfunc + * callback to the actual method of execution. + */ + state->evalfunc = ExecInterpExprStillValid; + /* DIRECT_THREADED should not already be set */ Assert((state->flags & EEO_FLAG_DIRECT_THREADED) == 0); @@ -184,10 +201,8 @@ ExecReadyInterpretedExpr(ExprState *state) /* * Select fast-path evalfuncs for very simple expressions. "Starting up" - * the full interpreter is a measurable overhead for these. Plain Vars - * and Const seem to be the only ones where the intrinsic cost is small - * enough that the overhead of ExecInterpExpr matters. For more complex - * expressions it's cheaper to use ExecInterpExpr always. + * the full interpreter is a measurable overhead for these, and these + * patterns occur often enough to be worth optimizing. */ if (state->steps_len == 3) { @@ -195,46 +210,53 @@ ExecReadyInterpretedExpr(ExprState *state) ExprEvalOp step1 = state->steps[1].opcode; if (step0 == EEOP_INNER_FETCHSOME && - step1 == EEOP_INNER_VAR_FIRST) + step1 == EEOP_INNER_VAR) { - state->evalfunc = ExecJustInnerVarFirst; + state->evalfunc_private = (void *) ExecJustInnerVar; return; } else if (step0 == EEOP_OUTER_FETCHSOME && - step1 == EEOP_OUTER_VAR_FIRST) + step1 == EEOP_OUTER_VAR) { - state->evalfunc = ExecJustOuterVarFirst; + state->evalfunc_private = (void *) ExecJustOuterVar; return; } else if (step0 == EEOP_SCAN_FETCHSOME && - step1 == EEOP_SCAN_VAR_FIRST) + step1 == EEOP_SCAN_VAR) { - state->evalfunc = ExecJustScanVarFirst; + state->evalfunc_private = (void *) ExecJustScanVar; return; } else if (step0 == EEOP_INNER_FETCHSOME && step1 == EEOP_ASSIGN_INNER_VAR) { - state->evalfunc = ExecJustAssignInnerVar; + state->evalfunc_private = (void *) ExecJustAssignInnerVar; return; } else if (step0 == EEOP_OUTER_FETCHSOME && step1 == EEOP_ASSIGN_OUTER_VAR) { - state->evalfunc = ExecJustAssignOuterVar; + state->evalfunc_private = (void *) ExecJustAssignOuterVar; return; } else if (step0 == EEOP_SCAN_FETCHSOME && step1 == EEOP_ASSIGN_SCAN_VAR) { - state->evalfunc = ExecJustAssignScanVar; + state->evalfunc_private = (void *) ExecJustAssignScanVar; + return; + } + else if (step0 == EEOP_CASE_TESTVAL && + step1 == EEOP_FUNCEXPR_STRICT && + state->steps[0].d.casetest.value) + { + state->evalfunc_private = (void *) ExecJustApplyFuncToCase; return; } } else if (state->steps_len == 2 && state->steps[0].opcode == EEOP_CONST) { - state->evalfunc = ExecJustConst; + state->evalfunc_private = (void *) ExecJustConst; return; } @@ -258,7 +280,7 @@ ExecReadyInterpretedExpr(ExprState *state) } #endif /* EEO_USE_COMPUTED_GOTO */ - state->evalfunc = ExecInterpExpr; + state->evalfunc_private = (void *) ExecInterpExpr; } @@ -289,11 +311,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) &&CASE_EEOP_INNER_FETCHSOME, &&CASE_EEOP_OUTER_FETCHSOME, &&CASE_EEOP_SCAN_FETCHSOME, - &&CASE_EEOP_INNER_VAR_FIRST, &&CASE_EEOP_INNER_VAR, - &&CASE_EEOP_OUTER_VAR_FIRST, &&CASE_EEOP_OUTER_VAR, - &&CASE_EEOP_SCAN_VAR_FIRST, &&CASE_EEOP_SCAN_VAR, &&CASE_EEOP_INNER_SYSVAR, &&CASE_EEOP_OUTER_SYSVAR, @@ -331,10 +350,12 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) &&CASE_EEOP_BOOLTEST_IS_NOT_FALSE, &&CASE_EEOP_PARAM_EXEC, &&CASE_EEOP_PARAM_EXTERN, + &&CASE_EEOP_PARAM_CALLBACK, &&CASE_EEOP_CASE_TESTVAL, &&CASE_EEOP_MAKE_READONLY, &&CASE_EEOP_IOCOERCE, &&CASE_EEOP_DISTINCT, + &&CASE_EEOP_NOT_DISTINCT, &&CASE_EEOP_NULLIF, &&CASE_EEOP_SQLVALUEFUNCTION, &&CASE_EEOP_CURRENTOFEXPR, @@ -363,6 +384,15 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) &&CASE_EEOP_WINDOW_FUNC, &&CASE_EEOP_SUBPLAN, &&CASE_EEOP_ALTERNATIVE_SUBPLAN, + &&CASE_EEOP_AGG_STRICT_DESERIALIZE, + &&CASE_EEOP_AGG_DESERIALIZE, + &&CASE_EEOP_AGG_STRICT_INPUT_CHECK, + &&CASE_EEOP_AGG_INIT_TRANS, + &&CASE_EEOP_AGG_STRICT_TRANS_CHECK, + &&CASE_EEOP_AGG_PLAIN_TRANS_BYVAL, + &&CASE_EEOP_AGG_PLAIN_TRANS, + &&CASE_EEOP_AGG_ORDERED_TRANS_DATUM, + &&CASE_EEOP_AGG_ORDERED_TRANS_TUPLE, &&CASE_EEOP_LAST }; @@ -415,22 +445,6 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) EEO_NEXT(); } - EEO_CASE(EEOP_INNER_VAR_FIRST) - { - int attnum = op->d.var.attnum; - - /* - * First time through, check whether attribute matches Var. Might - * not be ok anymore, due to schema changes. - */ - CheckVarSlotCompatibility(innerslot, attnum + 1, op->d.var.vartype); - - /* Skip that check on subsequent evaluations */ - op->opcode = EEO_OPCODE(EEOP_INNER_VAR); - - /* FALL THROUGH to EEOP_INNER_VAR */ - } - EEO_CASE(EEOP_INNER_VAR) { int attnum = op->d.var.attnum; @@ -448,18 +462,6 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) EEO_NEXT(); } - EEO_CASE(EEOP_OUTER_VAR_FIRST) - { - int attnum = op->d.var.attnum; - - /* See EEOP_INNER_VAR_FIRST comments */ - - CheckVarSlotCompatibility(outerslot, attnum + 1, op->d.var.vartype); - op->opcode = EEO_OPCODE(EEOP_OUTER_VAR); - - /* FALL THROUGH to EEOP_OUTER_VAR */ - } - EEO_CASE(EEOP_OUTER_VAR) { int attnum = op->d.var.attnum; @@ -473,18 +475,6 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) EEO_NEXT(); } - EEO_CASE(EEOP_SCAN_VAR_FIRST) - { - int attnum = op->d.var.attnum; - - /* See EEOP_INNER_VAR_FIRST comments */ - - CheckVarSlotCompatibility(scanslot, attnum + 1, op->d.var.vartype); - op->opcode = EEO_OPCODE(EEOP_SCAN_VAR); - - /* FALL THROUGH to EEOP_SCAN_VAR */ - } - EEO_CASE(EEOP_SCAN_VAR) { int attnum = op->d.var.attnum; @@ -500,49 +490,19 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) EEO_CASE(EEOP_INNER_SYSVAR) { - int attnum = op->d.var.attnum; - - /* these asserts must match defenses in slot_getattr */ - Assert(innerslot->tts_tuple != NULL); - Assert(innerslot->tts_tuple != &(innerslot->tts_minhdr)); - /* heap_getsysattr has sufficient defenses against bad attnums */ - - *op->resvalue = heap_getsysattr(innerslot->tts_tuple, attnum, - innerslot->tts_tupleDescriptor, - op->resnull); - + ExecEvalSysVar(state, op, econtext, innerslot); EEO_NEXT(); } EEO_CASE(EEOP_OUTER_SYSVAR) { - int attnum = op->d.var.attnum; - - /* these asserts must match defenses in slot_getattr */ - Assert(outerslot->tts_tuple != NULL); - Assert(outerslot->tts_tuple != &(outerslot->tts_minhdr)); - - /* heap_getsysattr has sufficient defenses against bad attnums */ - *op->resvalue = heap_getsysattr(outerslot->tts_tuple, attnum, - outerslot->tts_tupleDescriptor, - op->resnull); - + ExecEvalSysVar(state, op, econtext, outerslot); EEO_NEXT(); } EEO_CASE(EEOP_SCAN_SYSVAR) { - int attnum = op->d.var.attnum; - - /* these asserts must match defenses in slot_getattr */ - Assert(scanslot->tts_tuple != NULL); - Assert(scanslot->tts_tuple != &(scanslot->tts_minhdr)); - /* heap_getsysattr has sufficient defenses against bad attnums */ - - *op->resvalue = heap_getsysattr(scanslot->tts_tuple, attnum, - scanslot->tts_tupleDescriptor, - op->resnull); - + ExecEvalSysVar(state, op, econtext, scanslot); EEO_NEXT(); } @@ -641,13 +601,22 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) * As both STRICT checks and function-usage are noticeable performance * wise, and function calls are a very hot-path (they also back * operators!), it's worth having so many separate opcodes. + * + * Note: the reason for using a temporary variable "d", here and in + * other places, is that some compilers think "*op->resvalue = f();" + * requires them to evaluate op->resvalue into a register before + * calling f(), just in case f() is able to modify op->resvalue + * somehow. The extra line of code can save a useless register spill + * and reload across the function call. */ EEO_CASE(EEOP_FUNCEXPR) { FunctionCallInfo fcinfo = op->d.func.fcinfo_data; + Datum d; fcinfo->isnull = false; - *op->resvalue = (op->d.func.fn_addr) (fcinfo); + d = op->d.func.fn_addr(fcinfo); + *op->resvalue = d; *op->resnull = fcinfo->isnull; EEO_NEXT(); @@ -658,6 +627,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) FunctionCallInfo fcinfo = op->d.func.fcinfo_data; bool *argnull = fcinfo->argnull; int argno; + Datum d; /* strict function, so check for NULL args */ for (argno = 0; argno < op->d.func.nargs; argno++) @@ -669,7 +639,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) } } fcinfo->isnull = false; - *op->resvalue = (op->d.func.fn_addr) (fcinfo); + d = op->d.func.fn_addr(fcinfo); + *op->resvalue = d; *op->resnull = fcinfo->isnull; strictfail: @@ -678,46 +649,17 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) EEO_CASE(EEOP_FUNCEXPR_FUSAGE) { - FunctionCallInfo fcinfo = op->d.func.fcinfo_data; - PgStat_FunctionCallUsage fcusage; - - pgstat_init_function_usage(fcinfo, &fcusage); - - fcinfo->isnull = false; - *op->resvalue = (op->d.func.fn_addr) (fcinfo); - *op->resnull = fcinfo->isnull; - - pgstat_end_function_usage(&fcusage, true); + /* not common enough to inline */ + ExecEvalFuncExprFusage(state, op, econtext); EEO_NEXT(); } EEO_CASE(EEOP_FUNCEXPR_STRICT_FUSAGE) { - FunctionCallInfo fcinfo = op->d.func.fcinfo_data; - PgStat_FunctionCallUsage fcusage; - bool *argnull = fcinfo->argnull; - int argno; + /* not common enough to inline */ + ExecEvalFuncExprStrictFusage(state, op, econtext); - /* strict function, so check for NULL args */ - for (argno = 0; argno < op->d.func.nargs; argno++) - { - if (argnull[argno]) - { - *op->resnull = true; - goto strictfail_fusage; - } - } - - pgstat_init_function_usage(fcinfo, &fcusage); - - fcinfo->isnull = false; - *op->resvalue = (op->d.func.fn_addr) (fcinfo); - *op->resnull = fcinfo->isnull; - - pgstat_end_function_usage(&fcusage, true); - - strictfail_fusage: EEO_NEXT(); } @@ -1022,6 +964,13 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) EEO_NEXT(); } + EEO_CASE(EEOP_PARAM_CALLBACK) + { + /* allow an extension module to supply a PARAM_EXTERN value */ + op->d.cparam.paramfunc(state, op, econtext); + EEO_NEXT(); + } + EEO_CASE(EEOP_CASE_TESTVAL) { /* @@ -1113,6 +1062,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) if (!op->d.iocoerce.finfo_in->fn_strict || str != NULL) { FunctionCallInfo fcinfo_in; + Datum d; fcinfo_in = op->d.iocoerce.fcinfo_data_in; fcinfo_in->arg[0] = PointerGetDatum(str); @@ -1120,7 +1070,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) /* second and third arguments are already set up */ fcinfo_in->isnull = false; - *op->resvalue = FunctionCallInvoke(fcinfo_in); + d = FunctionCallInvoke(fcinfo_in); + *op->resvalue = d; /* Should get null result if and only if str is NULL */ if (str == NULL) @@ -1170,7 +1121,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) Datum eqresult; fcinfo->isnull = false; - eqresult = (op->d.func.fn_addr) (fcinfo); + eqresult = op->d.func.fn_addr(fcinfo); /* Must invert result of "="; safe to do even if null */ *op->resvalue = BoolGetDatum(!DatumGetBool(eqresult)); *op->resnull = fcinfo->isnull; @@ -1179,6 +1130,34 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) EEO_NEXT(); } + /* see EEOP_DISTINCT for comments, this is just inverted */ + EEO_CASE(EEOP_NOT_DISTINCT) + { + FunctionCallInfo fcinfo = op->d.func.fcinfo_data; + + if (fcinfo->argnull[0] && fcinfo->argnull[1]) + { + *op->resvalue = BoolGetDatum(true); + *op->resnull = false; + } + else if (fcinfo->argnull[0] || fcinfo->argnull[1]) + { + *op->resvalue = BoolGetDatum(false); + *op->resnull = false; + } + else + { + Datum eqresult; + + fcinfo->isnull = false; + eqresult = op->d.func.fn_addr(fcinfo); + *op->resvalue = eqresult; + *op->resnull = fcinfo->isnull; + } + + EEO_NEXT(); + } + EEO_CASE(EEOP_NULLIF) { /* @@ -1192,7 +1171,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) Datum result; fcinfo->isnull = false; - result = (op->d.func.fn_addr) (fcinfo); + result = op->d.func.fn_addr(fcinfo); /* if the arguments are equal return null */ if (!fcinfo->isnull && DatumGetBool(result)) @@ -1252,7 +1231,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) EEO_CASE(EEOP_ARRAYCOERCE) { /* too complex for an inline implementation */ - ExecEvalArrayCoerce(state, op); + ExecEvalArrayCoerce(state, op, econtext); EEO_NEXT(); } @@ -1268,6 +1247,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) EEO_CASE(EEOP_ROWCOMPARE_STEP) { FunctionCallInfo fcinfo = op->d.rowcompare_step.fcinfo_data; + Datum d; /* force NULL result if strict fn and NULL input */ if (op->d.rowcompare_step.finfo->fn_strict && @@ -1279,7 +1259,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) /* Apply comparison function */ fcinfo->isnull = false; - *op->resvalue = (op->d.rowcompare_step.fn_addr) (fcinfo); + d = op->d.rowcompare_step.fn_addr(fcinfo); + *op->resvalue = d; /* force NULL result if NULL function result */ if (fcinfo->isnull) @@ -1506,6 +1487,235 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) EEO_NEXT(); } + /* evaluate a strict aggregate deserialization function */ + EEO_CASE(EEOP_AGG_STRICT_DESERIALIZE) + { + bool *argnull = op->d.agg_deserialize.fcinfo_data->argnull; + + /* Don't call a strict deserialization function with NULL input */ + if (argnull[0]) + EEO_JUMP(op->d.agg_deserialize.jumpnull); + + /* fallthrough */ + } + + /* evaluate aggregate deserialization function (non-strict portion) */ + EEO_CASE(EEOP_AGG_DESERIALIZE) + { + FunctionCallInfo fcinfo = op->d.agg_deserialize.fcinfo_data; + AggState *aggstate = op->d.agg_deserialize.aggstate; + MemoryContext oldContext; + + /* + * We run the deserialization functions in per-input-tuple memory + * context. + */ + oldContext = MemoryContextSwitchTo(aggstate->tmpcontext->ecxt_per_tuple_memory); + fcinfo->isnull = false; + *op->resvalue = FunctionCallInvoke(fcinfo); + *op->resnull = fcinfo->isnull; + MemoryContextSwitchTo(oldContext); + + EEO_NEXT(); + } + + /* + * Check that a strict aggregate transition / combination function's + * input is not NULL. + */ + EEO_CASE(EEOP_AGG_STRICT_INPUT_CHECK) + { + int argno; + bool *nulls = op->d.agg_strict_input_check.nulls; + int nargs = op->d.agg_strict_input_check.nargs; + + for (argno = 0; argno < nargs; argno++) + { + if (nulls[argno]) + EEO_JUMP(op->d.agg_strict_input_check.jumpnull); + } + EEO_NEXT(); + } + + /* + * Initialize an aggregate's first value if necessary. + */ + EEO_CASE(EEOP_AGG_INIT_TRANS) + { + AggState *aggstate; + AggStatePerGroup pergroup; + + aggstate = op->d.agg_init_trans.aggstate; + pergroup = &aggstate->all_pergroups + [op->d.agg_init_trans.setoff] + [op->d.agg_init_trans.transno]; + + /* If transValue has not yet been initialized, do so now. */ + if (pergroup->noTransValue) + { + AggStatePerTrans pertrans = op->d.agg_init_trans.pertrans; + + aggstate->curaggcontext = op->d.agg_init_trans.aggcontext; + aggstate->current_set = op->d.agg_init_trans.setno; + + ExecAggInitGroup(aggstate, pertrans, pergroup); + + /* copied trans value from input, done this round */ + EEO_JUMP(op->d.agg_init_trans.jumpnull); + } + + EEO_NEXT(); + } + + /* check that a strict aggregate's input isn't NULL */ + EEO_CASE(EEOP_AGG_STRICT_TRANS_CHECK) + { + AggState *aggstate; + AggStatePerGroup pergroup; + + aggstate = op->d.agg_strict_trans_check.aggstate; + pergroup = &aggstate->all_pergroups + [op->d.agg_strict_trans_check.setoff] + [op->d.agg_strict_trans_check.transno]; + + if (unlikely(pergroup->transValueIsNull)) + EEO_JUMP(op->d.agg_strict_trans_check.jumpnull); + + EEO_NEXT(); + } + + /* + * Evaluate aggregate transition / combine function that has a + * by-value transition type. That's a separate case from the + * by-reference implementation because it's a bit simpler. + */ + EEO_CASE(EEOP_AGG_PLAIN_TRANS_BYVAL) + { + AggState *aggstate; + AggStatePerTrans pertrans; + AggStatePerGroup pergroup; + FunctionCallInfo fcinfo; + MemoryContext oldContext; + Datum newVal; + + aggstate = op->d.agg_trans.aggstate; + pertrans = op->d.agg_trans.pertrans; + + pergroup = &aggstate->all_pergroups + [op->d.agg_trans.setoff] + [op->d.agg_trans.transno]; + + Assert(pertrans->transtypeByVal); + + fcinfo = &pertrans->transfn_fcinfo; + + /* cf. select_current_set() */ + aggstate->curaggcontext = op->d.agg_trans.aggcontext; + aggstate->current_set = op->d.agg_trans.setno; + + /* set up aggstate->curpertrans for AggGetAggref() */ + aggstate->curpertrans = pertrans; + + /* invoke transition function in per-tuple context */ + oldContext = MemoryContextSwitchTo(aggstate->tmpcontext->ecxt_per_tuple_memory); + + fcinfo->arg[0] = pergroup->transValue; + fcinfo->argnull[0] = pergroup->transValueIsNull; + fcinfo->isnull = false; /* just in case transfn doesn't set it */ + + newVal = FunctionCallInvoke(fcinfo); + + pergroup->transValue = newVal; + pergroup->transValueIsNull = fcinfo->isnull; + + MemoryContextSwitchTo(oldContext); + + EEO_NEXT(); + } + + /* + * Evaluate aggregate transition / combine function that has a + * by-reference transition type. + * + * Could optimize a bit further by splitting off by-reference + * fixed-length types, but currently that doesn't seem worth it. + */ + EEO_CASE(EEOP_AGG_PLAIN_TRANS) + { + AggState *aggstate; + AggStatePerTrans pertrans; + AggStatePerGroup pergroup; + FunctionCallInfo fcinfo; + MemoryContext oldContext; + Datum newVal; + + aggstate = op->d.agg_trans.aggstate; + pertrans = op->d.agg_trans.pertrans; + + pergroup = &aggstate->all_pergroups + [op->d.agg_trans.setoff] + [op->d.agg_trans.transno]; + + Assert(!pertrans->transtypeByVal); + + fcinfo = &pertrans->transfn_fcinfo; + + /* cf. select_current_set() */ + aggstate->curaggcontext = op->d.agg_trans.aggcontext; + aggstate->current_set = op->d.agg_trans.setno; + + /* set up aggstate->curpertrans for AggGetAggref() */ + aggstate->curpertrans = pertrans; + + /* invoke transition function in per-tuple context */ + oldContext = MemoryContextSwitchTo(aggstate->tmpcontext->ecxt_per_tuple_memory); + + fcinfo->arg[0] = pergroup->transValue; + fcinfo->argnull[0] = pergroup->transValueIsNull; + fcinfo->isnull = false; /* just in case transfn doesn't set it */ + + newVal = FunctionCallInvoke(fcinfo); + + /* + * For pass-by-ref datatype, must copy the new value into + * aggcontext and free the prior transValue. But if transfn + * returned a pointer to its first input, we don't need to do + * anything. Also, if transfn returned a pointer to a R/W + * expanded object that is already a child of the aggcontext, + * assume we can adopt that value without copying it. + */ + if (DatumGetPointer(newVal) != DatumGetPointer(pergroup->transValue)) + newVal = ExecAggTransReparent(aggstate, pertrans, + newVal, fcinfo->isnull, + pergroup->transValue, + pergroup->transValueIsNull); + + pergroup->transValue = newVal; + pergroup->transValueIsNull = fcinfo->isnull; + + MemoryContextSwitchTo(oldContext); + + EEO_NEXT(); + } + + /* process single-column ordered aggregate datum */ + EEO_CASE(EEOP_AGG_ORDERED_TRANS_DATUM) + { + /* too complex for an inline implementation */ + ExecEvalAggOrderedTransDatum(state, op, econtext); + + EEO_NEXT(); + } + + /* process multi-column ordered aggregate tuple */ + EEO_CASE(EEOP_AGG_ORDERED_TRANS_TUPLE) + { + /* too complex for an inline implementation */ + ExecEvalAggOrderedTransTuple(state, op, econtext); + + EEO_NEXT(); + } + EEO_CASE(EEOP_LAST) { /* unreachable */ @@ -1519,6 +1729,78 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) return state->resvalue; } +/* + * Expression evaluation callback that performs extra checks before executing + * the expression. Declared extern so other methods of execution can use it + * too. + */ +Datum +ExecInterpExprStillValid(ExprState *state, ExprContext *econtext, bool *isNull) +{ + /* + * First time through, check whether attribute matches Var. Might not be + * ok anymore, due to schema changes. + */ + CheckExprStillValid(state, econtext); + + /* skip the check during further executions */ + state->evalfunc = (ExprStateEvalFunc) state->evalfunc_private; + + /* and actually execute */ + return state->evalfunc(state, econtext, isNull); +} + +/* + * Check that an expression is still valid in the face of potential schema + * changes since the plan has been created. + */ +void +CheckExprStillValid(ExprState *state, ExprContext *econtext) +{ + int i = 0; + TupleTableSlot *innerslot; + TupleTableSlot *outerslot; + TupleTableSlot *scanslot; + + innerslot = econtext->ecxt_innertuple; + outerslot = econtext->ecxt_outertuple; + scanslot = econtext->ecxt_scantuple; + + for (i = 0; i < state->steps_len; i++) + { + ExprEvalStep *op = &state->steps[i]; + + switch (ExecEvalStepOp(state, op)) + { + case EEOP_INNER_VAR: + { + int attnum = op->d.var.attnum; + + CheckVarSlotCompatibility(innerslot, attnum + 1, op->d.var.vartype); + break; + } + + case EEOP_OUTER_VAR: + { + int attnum = op->d.var.attnum; + + CheckVarSlotCompatibility(outerslot, attnum + 1, op->d.var.vartype); + break; + } + + case EEOP_SCAN_VAR: + { + int attnum = op->d.var.attnum; + + CheckVarSlotCompatibility(scanslot, attnum + 1, op->d.var.vartype); + break; + } + default: + break; + } + } +} + /* * Check whether a user attribute in a slot can be referenced by a Var * expression. This should succeed unless there have been schema changes @@ -1553,7 +1835,7 @@ CheckVarSlotCompatibility(TupleTableSlot *slot, int attnum, Oid vartype) elog(ERROR, "attribute number %d exceeds number of columns %d", attnum, slot_tupdesc->natts); - attr = slot_tupdesc->attrs[attnum - 1]; + attr = TupleDescAttr(slot_tupdesc, attnum - 1); if (attr->attisdropped) ereport(ERROR, @@ -1631,20 +1913,14 @@ ShutdownTupleDescRef(Datum arg) * Fast-path functions, for very simple expressions */ -/* Simple reference to inner Var, first time through */ +/* Simple reference to inner Var */ static Datum -ExecJustInnerVarFirst(ExprState *state, ExprContext *econtext, bool *isnull) +ExecJustInnerVar(ExprState *state, ExprContext *econtext, bool *isnull) { ExprEvalStep *op = &state->steps[1]; int attnum = op->d.var.attnum + 1; TupleTableSlot *slot = econtext->ecxt_innertuple; - /* See ExecInterpExpr()'s comments for EEOP_INNER_VAR_FIRST */ - - CheckVarSlotCompatibility(slot, attnum, op->d.var.vartype); - op->opcode = EEOP_INNER_VAR; /* just for cleanliness */ - state->evalfunc = ExecJustInnerVar; - /* * Since we use slot_getattr(), we don't need to implement the FETCHSOME * step explicitly, and we also needn't Assert that the attnum is in range @@ -1653,34 +1929,6 @@ ExecJustInnerVarFirst(ExprState *state, ExprContext *econtext, bool *isnull) return slot_getattr(slot, attnum, isnull); } -/* Simple reference to inner Var */ -static Datum -ExecJustInnerVar(ExprState *state, ExprContext *econtext, bool *isnull) -{ - ExprEvalStep *op = &state->steps[1]; - int attnum = op->d.var.attnum + 1; - TupleTableSlot *slot = econtext->ecxt_innertuple; - - /* See comments in ExecJustInnerVarFirst */ - return slot_getattr(slot, attnum, isnull); -} - -/* Simple reference to outer Var, first time through */ -static Datum -ExecJustOuterVarFirst(ExprState *state, ExprContext *econtext, bool *isnull) -{ - ExprEvalStep *op = &state->steps[1]; - int attnum = op->d.var.attnum + 1; - TupleTableSlot *slot = econtext->ecxt_outertuple; - - CheckVarSlotCompatibility(slot, attnum, op->d.var.vartype); - op->opcode = EEOP_OUTER_VAR; /* just for cleanliness */ - state->evalfunc = ExecJustOuterVar; - - /* See comments in ExecJustInnerVarFirst */ - return slot_getattr(slot, attnum, isnull); -} - /* Simple reference to outer Var */ static Datum ExecJustOuterVar(ExprState *state, ExprContext *econtext, bool *isnull) @@ -1689,23 +1937,7 @@ ExecJustOuterVar(ExprState *state, ExprContext *econtext, bool *isnull) int attnum = op->d.var.attnum + 1; TupleTableSlot *slot = econtext->ecxt_outertuple; - /* See comments in ExecJustInnerVarFirst */ - return slot_getattr(slot, attnum, isnull); -} - -/* Simple reference to scan Var, first time through */ -static Datum -ExecJustScanVarFirst(ExprState *state, ExprContext *econtext, bool *isnull) -{ - ExprEvalStep *op = &state->steps[1]; - int attnum = op->d.var.attnum + 1; - TupleTableSlot *slot = econtext->ecxt_scantuple; - - CheckVarSlotCompatibility(slot, attnum, op->d.var.vartype); - op->opcode = EEOP_SCAN_VAR; /* just for cleanliness */ - state->evalfunc = ExecJustScanVar; - - /* See comments in ExecJustInnerVarFirst */ + /* See comments in ExecJustInnerVar */ return slot_getattr(slot, attnum, isnull); } @@ -1717,7 +1949,7 @@ ExecJustScanVar(ExprState *state, ExprContext *econtext, bool *isnull) int attnum = op->d.var.attnum + 1; TupleTableSlot *slot = econtext->ecxt_scantuple; - /* See comments in ExecJustInnerVarFirst */ + /* See comments in ExecJustInnerVar */ return slot_getattr(slot, attnum, isnull); } @@ -1786,6 +2018,61 @@ ExecJustAssignScanVar(ExprState *state, ExprContext *econtext, bool *isnull) return 0; } +/* Evaluate CASE_TESTVAL and apply a strict function to it */ +static Datum +ExecJustApplyFuncToCase(ExprState *state, ExprContext *econtext, bool *isnull) +{ + ExprEvalStep *op = &state->steps[0]; + FunctionCallInfo fcinfo; + bool *argnull; + int argno; + Datum d; + + /* + * XXX with some redesign of the CaseTestExpr mechanism, maybe we could + * get rid of this data shuffling? + */ + *op->resvalue = *op->d.casetest.value; + *op->resnull = *op->d.casetest.isnull; + + op++; + + fcinfo = op->d.func.fcinfo_data; + argnull = fcinfo->argnull; + + /* strict function, so check for NULL args */ + for (argno = 0; argno < op->d.func.nargs; argno++) + { + if (argnull[argno]) + { + *isnull = true; + return (Datum) 0; + } + } + fcinfo->isnull = false; + d = op->d.func.fn_addr(fcinfo); + *isnull = fcinfo->isnull; + return d; +} + +#if defined(EEO_USE_COMPUTED_GOTO) +/* + * Comparator used when building address->opcode lookup table for + * ExecEvalStepOp() in the threaded dispatch case. + */ +static int +dispatch_compare_ptr(const void *a, const void *b) +{ + const ExprEvalOpLookup *la = (const ExprEvalOpLookup *) a; + const ExprEvalOpLookup *lb = (const ExprEvalOpLookup *) b; + + if (la->opcode < lb->opcode) + return -1; + else if (la->opcode > lb->opcode) + return 1; + return 0; +} +#endif /* * Do one-time initialization of interpretation machinery. @@ -1796,8 +2083,25 @@ ExecInitInterpreter(void) #if defined(EEO_USE_COMPUTED_GOTO) /* Set up externally-visible pointer to dispatch table */ if (dispatch_table == NULL) + { + int i; + dispatch_table = (const void **) DatumGetPointer(ExecInterpExpr(NULL, NULL, NULL)); + + /* build reverse lookup table */ + for (i = 0; i < EEOP_LAST; i++) + { + reverse_dispatch_table[i].opcode = dispatch_table[i]; + reverse_dispatch_table[i].op = (ExprEvalOp) i; + } + + /* make it bsearch()able */ + qsort(reverse_dispatch_table, + EEOP_LAST /* nmembers */ , + sizeof(ExprEvalOpLookup), + dispatch_compare_ptr); + } #endif } @@ -1806,10 +2110,6 @@ ExecInitInterpreter(void) * * When direct-threading is in use, ExprState->opcode isn't easily * decipherable. This function returns the appropriate enum member. - * - * This currently is only supposed to be used in paths that aren't critical - * performance-wise. If that changes, we could add an inverse dispatch_table - * that's sorted on the address, so a binary search can be performed. */ ExprEvalOp ExecEvalStepOp(ExprState *state, ExprEvalStep *op) @@ -1817,16 +2117,17 @@ ExecEvalStepOp(ExprState *state, ExprEvalStep *op) #if defined(EEO_USE_COMPUTED_GOTO) if (state->flags & EEO_FLAG_DIRECT_THREADED) { - int i; - - for (i = 0; i < EEOP_LAST; i++) - { - if ((void *) op->opcode == dispatch_table[i]) - { - return (ExprEvalOp) i; - } - } - elog(ERROR, "unknown opcode"); + ExprEvalOpLookup key; + ExprEvalOpLookup *res; + + key.opcode = (void *) op->opcode; + res = bsearch(&key, + reverse_dispatch_table, + EEOP_LAST /* nmembers */ , + sizeof(ExprEvalOpLookup), + dispatch_compare_ptr); + Assert(res); /* unknown ops shouldn't get looked up */ + return res->op; } #endif return (ExprEvalOp) op->opcode; @@ -1837,6 +2138,61 @@ ExecEvalStepOp(ExprState *state, ExprEvalStep *op) * Out-of-line helper functions for complex instructions. */ +/* + * Evaluate EEOP_FUNCEXPR_FUSAGE + */ +void +ExecEvalFuncExprFusage(ExprState *state, ExprEvalStep *op, + ExprContext *econtext) +{ + FunctionCallInfo fcinfo = op->d.func.fcinfo_data; + PgStat_FunctionCallUsage fcusage; + Datum d; + + pgstat_init_function_usage(fcinfo, &fcusage); + + fcinfo->isnull = false; + d = op->d.func.fn_addr(fcinfo); + *op->resvalue = d; + *op->resnull = fcinfo->isnull; + + pgstat_end_function_usage(&fcusage, true); +} + +/* + * Evaluate EEOP_FUNCEXPR_STRICT_FUSAGE + */ +void +ExecEvalFuncExprStrictFusage(ExprState *state, ExprEvalStep *op, + ExprContext *econtext) +{ + + FunctionCallInfo fcinfo = op->d.func.fcinfo_data; + PgStat_FunctionCallUsage fcusage; + bool *argnull = fcinfo->argnull; + int argno; + Datum d; + + /* strict function, so check for NULL args */ + for (argno = 0; argno < op->d.func.nargs; argno++) + { + if (argnull[argno]) + { + *op->resnull = true; + return; + } + } + + pgstat_init_function_usage(fcinfo, &fcusage); + + fcinfo->isnull = false; + d = op->d.func.fn_addr(fcinfo); + *op->resvalue = d; + *op->resnull = fcinfo->isnull; + + pgstat_end_function_usage(&fcusage, true); +} + /* * Evaluate a PARAM_EXEC parameter. * @@ -1874,11 +2230,14 @@ ExecEvalParamExtern(ExprState *state, ExprEvalStep *op, ExprContext *econtext) if (likely(paramInfo && paramId > 0 && paramId <= paramInfo->numParams)) { - ParamExternData *prm = ¶mInfo->params[paramId - 1]; + ParamExternData *prm; + ParamExternData prmdata; /* give hook a chance in case parameter is dynamic */ - if (!OidIsValid(prm->ptype) && paramInfo->paramFetch != NULL) - (*paramInfo->paramFetch) (paramInfo, paramId); + if (paramInfo->paramFetch != NULL) + prm = paramInfo->paramFetch(paramInfo, paramId, false, &prmdata); + else + prm = ¶mInfo->params[paramId - 1]; if (likely(OidIsValid(prm->ptype))) { @@ -2081,9 +2440,9 @@ ExecEvalRowNullInt(ExprState *state, ExprEvalStep *op, for (att = 1; att <= tupDesc->natts; att++) { /* ignore dropped columns */ - if (tupDesc->attrs[att - 1]->attisdropped) + if (TupleDescAttr(tupDesc, att - 1)->attisdropped) continue; - if (heap_attisnull(&tmptup, att)) + if (heap_attisnull(&tmptup, att, tupDesc)) { /* null field disproves IS NOT NULL */ if (!checkisnull) @@ -2131,14 +2490,6 @@ ExecEvalArrayExpr(ExprState *state, ExprEvalStep *op) Datum *dvalues = op->d.arrayexpr.elemvalues; bool *dnulls = op->d.arrayexpr.elemnulls; - /* Shouldn't happen here, but if length is 0, return empty array */ - if (nelems == 0) - { - *op->resvalue = - PointerGetDatum(construct_empty_array(element_type)); - return; - } - /* setup for 1-D array of the given length */ ndims = 1; dims[0] = nelems; @@ -2328,11 +2679,9 @@ ExecEvalArrayExpr(ExprState *state, ExprEvalStep *op) * Source array is in step's result variable. */ void -ExecEvalArrayCoerce(ExprState *state, ExprEvalStep *op) +ExecEvalArrayCoerce(ExprState *state, ExprEvalStep *op, ExprContext *econtext) { - ArrayCoerceExpr *acoerce = op->d.arraycoerce.coerceexpr; Datum arraydatum; - FunctionCallInfoData locfcinfo; /* NULL array -> NULL result */ if (*op->resnull) @@ -2344,7 +2693,7 @@ ExecEvalArrayCoerce(ExprState *state, ExprEvalStep *op) * If it's binary-compatible, modify the element type in the array header, * but otherwise leave the array as we received it. */ - if (!OidIsValid(acoerce->elemfuncid)) + if (op->d.arraycoerce.elemexprstate == NULL) { /* Detoast input array if necessary, and copy in any case */ ArrayType *array = DatumGetArrayTypePCopy(arraydatum); @@ -2355,23 +2704,12 @@ ExecEvalArrayCoerce(ExprState *state, ExprEvalStep *op) } /* - * Use array_map to apply the function to each array element. - * - * We pass on the desttypmod and isExplicit flags whether or not the - * function wants them. - * - * Note: coercion functions are assumed to not use collation. + * Use array_map to apply the sub-expression to each array element. */ - InitFunctionCallInfoData(locfcinfo, op->d.arraycoerce.elemfunc, 3, - InvalidOid, NULL, NULL); - locfcinfo.arg[0] = arraydatum; - locfcinfo.arg[1] = Int32GetDatum(acoerce->resulttypmod); - locfcinfo.arg[2] = BoolGetDatum(acoerce->isExplicit); - locfcinfo.argnull[0] = false; - locfcinfo.argnull[1] = false; - locfcinfo.argnull[2] = false; - - *op->resvalue = array_map(&locfcinfo, op->d.arraycoerce.resultelemtype, + *op->resvalue = array_map(arraydatum, + op->d.arraycoerce.elemexprstate, + econtext, + op->d.arraycoerce.resultelemtype, op->d.arraycoerce.amstate); } @@ -2471,57 +2809,105 @@ ExecEvalFieldSelect(ExprState *state, ExprEvalStep *op, ExprContext *econtext) if (*op->resnull) return; - /* Get the composite datum and extract its type fields */ tupDatum = *op->resvalue; - tuple = DatumGetHeapTupleHeader(tupDatum); - tupType = HeapTupleHeaderGetTypeId(tuple); - tupTypmod = HeapTupleHeaderGetTypMod(tuple); + /* We can special-case expanded records for speed */ + if (VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(tupDatum))) + { + ExpandedRecordHeader *erh = (ExpandedRecordHeader *) DatumGetEOHP(tupDatum); - /* Lookup tupdesc if first time through or if type changes */ - tupDesc = get_cached_rowtype(tupType, tupTypmod, - &op->d.fieldselect.argdesc, - econtext); + Assert(erh->er_magic == ER_MAGIC); - /* - * Find field's attr record. Note we don't support system columns here: a - * datum tuple doesn't have valid values for most of the interesting - * system columns anyway. - */ - if (fieldnum <= 0) /* should never happen */ - elog(ERROR, "unsupported reference to system column %d in FieldSelect", - fieldnum); - if (fieldnum > tupDesc->natts) /* should never happen */ - elog(ERROR, "attribute number %d exceeds number of columns %d", - fieldnum, tupDesc->natts); - attr = tupDesc->attrs[fieldnum - 1]; - - /* Check for dropped column, and force a NULL result if so */ - if (attr->attisdropped) - { - *op->resnull = true; - return; + /* Extract record's TupleDesc */ + tupDesc = expanded_record_get_tupdesc(erh); + + /* + * Find field's attr record. Note we don't support system columns + * here: a datum tuple doesn't have valid values for most of the + * interesting system columns anyway. + */ + if (fieldnum <= 0) /* should never happen */ + elog(ERROR, "unsupported reference to system column %d in FieldSelect", + fieldnum); + if (fieldnum > tupDesc->natts) /* should never happen */ + elog(ERROR, "attribute number %d exceeds number of columns %d", + fieldnum, tupDesc->natts); + attr = TupleDescAttr(tupDesc, fieldnum - 1); + + /* Check for dropped column, and force a NULL result if so */ + if (attr->attisdropped) + { + *op->resnull = true; + return; + } + + /* Check for type mismatch --- possible after ALTER COLUMN TYPE? */ + /* As in CheckVarSlotCompatibility, we should but can't check typmod */ + if (op->d.fieldselect.resulttype != attr->atttypid) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("attribute %d has wrong type", fieldnum), + errdetail("Table has type %s, but query expects %s.", + format_type_be(attr->atttypid), + format_type_be(op->d.fieldselect.resulttype)))); + + /* extract the field */ + *op->resvalue = expanded_record_get_field(erh, fieldnum, + op->resnull); } + else + { + /* Get the composite datum and extract its type fields */ + tuple = DatumGetHeapTupleHeader(tupDatum); - /* Check for type mismatch --- possible after ALTER COLUMN TYPE? */ - /* As in CheckVarSlotCompatibility, we should but can't check typmod */ - if (op->d.fieldselect.resulttype != attr->atttypid) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("attribute %d has wrong type", fieldnum), - errdetail("Table has type %s, but query expects %s.", - format_type_be(attr->atttypid), - format_type_be(op->d.fieldselect.resulttype)))); + tupType = HeapTupleHeaderGetTypeId(tuple); + tupTypmod = HeapTupleHeaderGetTypMod(tuple); - /* heap_getattr needs a HeapTuple not a bare HeapTupleHeader */ - tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple); - tmptup.t_data = tuple; + /* Lookup tupdesc if first time through or if type changes */ + tupDesc = get_cached_rowtype(tupType, tupTypmod, + &op->d.fieldselect.argdesc, + econtext); - /* extract the field */ - *op->resvalue = heap_getattr(&tmptup, - fieldnum, - tupDesc, - op->resnull); + /* + * Find field's attr record. Note we don't support system columns + * here: a datum tuple doesn't have valid values for most of the + * interesting system columns anyway. + */ + if (fieldnum <= 0) /* should never happen */ + elog(ERROR, "unsupported reference to system column %d in FieldSelect", + fieldnum); + if (fieldnum > tupDesc->natts) /* should never happen */ + elog(ERROR, "attribute number %d exceeds number of columns %d", + fieldnum, tupDesc->natts); + attr = TupleDescAttr(tupDesc, fieldnum - 1); + + /* Check for dropped column, and force a NULL result if so */ + if (attr->attisdropped) + { + *op->resnull = true; + return; + } + + /* Check for type mismatch --- possible after ALTER COLUMN TYPE? */ + /* As in CheckVarSlotCompatibility, we should but can't check typmod */ + if (op->d.fieldselect.resulttype != attr->atttypid) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("attribute %d has wrong type", fieldnum), + errdetail("Table has type %s, but query expects %s.", + format_type_be(attr->atttypid), + format_type_be(op->d.fieldselect.resulttype)))); + + /* heap_getattr needs a HeapTuple not a bare HeapTupleHeader */ + tmptup.t_len = HeapTupleHeaderGetDatumLength(tuple); + tmptup.t_data = tuple; + + /* extract the field */ + *op->resvalue = heap_getattr(&tmptup, + fieldnum, + tupDesc, + op->resnull); + } } /* @@ -2863,7 +3249,7 @@ ExecEvalConvertRowtype(ExprState *state, ExprEvalStep *op, ExprContext *econtext if (op->d.convert_rowtype.map != NULL) { /* Full conversion with attribute rearrangement needed */ - result = do_convert_tuple(&tmptup, op->d.convert_rowtype.map); + result = execute_attr_map_tuple(&tmptup, op->d.convert_rowtype.map); /* Result already has appropriate composite-datum header fields */ *op->resvalue = HeapTupleGetDatum(result); } @@ -3000,7 +3386,7 @@ ExecEvalScalarArrayOp(ExprState *state, ExprEvalStep *op) else { fcinfo->isnull = false; - thisresult = (op->d.scalararrayop.fn_addr) (fcinfo); + thisresult = op->d.scalararrayop.fn_addr(fcinfo); } /* Combine results per OR or AND semantics */ @@ -3424,8 +3810,12 @@ ExecEvalWholeRowVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext) * generates an INT4 NULL regardless of the dropped column type). * If we find a dropped column and cannot verify that case (1) * holds, we have to use the slow path to check (2) for each row. + * + * If vartype is a domain over composite, just look through that + * to the base composite type. */ - var_tupdesc = lookup_rowtype_tupdesc(variable->vartype, -1); + var_tupdesc = lookup_rowtype_tupdesc_domain(variable->vartype, + -1, false); slot_tupdesc = slot->tts_tupleDescriptor; @@ -3441,8 +3831,8 @@ ExecEvalWholeRowVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext) for (i = 0; i < var_tupdesc->natts; i++) { - Form_pg_attribute vattr = var_tupdesc->attrs[i]; - Form_pg_attribute sattr = slot_tupdesc->attrs[i]; + Form_pg_attribute vattr = TupleDescAttr(var_tupdesc, i); + Form_pg_attribute sattr = TupleDescAttr(slot_tupdesc, i); if (vattr->atttypid == sattr->atttypid) continue; /* no worries */ @@ -3508,10 +3898,10 @@ ExecEvalWholeRowVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext) * perhaps other places.) */ if (econtext->ecxt_estate && - variable->varno <= list_length(econtext->ecxt_estate->es_range_table)) + variable->varno <= econtext->ecxt_estate->es_range_table_size) { - RangeTblEntry *rte = rt_fetch(variable->varno, - econtext->ecxt_estate->es_range_table); + RangeTblEntry *rte = exec_rt_fetch(variable->varno, + econtext->ecxt_estate); if (rte->eref) ExecTypeSetColNames(output_tupdesc, rte->eref->colnames); @@ -3540,8 +3930,8 @@ ExecEvalWholeRowVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext) for (i = 0; i < var_tupdesc->natts; i++) { - Form_pg_attribute vattr = var_tupdesc->attrs[i]; - Form_pg_attribute sattr = tupleDesc->attrs[i]; + Form_pg_attribute vattr = TupleDescAttr(var_tupdesc, i); + Form_pg_attribute sattr = TupleDescAttr(tupleDesc, i); if (!vattr->attisdropped) continue; /* already checked non-dropped cols */ @@ -3579,3 +3969,112 @@ ExecEvalWholeRowVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext) *op->resvalue = PointerGetDatum(dtuple); *op->resnull = false; } + +void +ExecEvalSysVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext, + TupleTableSlot *slot) +{ + bool success; + + /* slot_getsysattr has sufficient defenses against bad attnums */ + success = slot_getsysattr(slot, + op->d.var.attnum, + op->resvalue, + op->resnull); + /* this ought to be unreachable, but it's cheap enough to check */ + if (unlikely(!success)) + elog(ERROR, "failed to fetch attribute from slot"); +} + +/* + * Transition value has not been initialized. This is the first non-NULL input + * value for a group. We use it as the initial value for transValue. + */ +void +ExecAggInitGroup(AggState *aggstate, AggStatePerTrans pertrans, AggStatePerGroup pergroup) +{ + FunctionCallInfo fcinfo = &pertrans->transfn_fcinfo; + MemoryContext oldContext; + + /* + * We must copy the datum into aggcontext if it is pass-by-ref. We do not + * need to pfree the old transValue, since it's NULL. (We already checked + * that the agg's input type is binary-compatible with its transtype, so + * straight copy here is OK.) + */ + oldContext = MemoryContextSwitchTo( + aggstate->curaggcontext->ecxt_per_tuple_memory); + pergroup->transValue = datumCopy(fcinfo->arg[1], + pertrans->transtypeByVal, + pertrans->transtypeLen); + pergroup->transValueIsNull = false; + pergroup->noTransValue = false; + MemoryContextSwitchTo(oldContext); +} + +/* + * Ensure that the current transition value is a child of the aggcontext, + * rather than the per-tuple context. + * + * NB: This can change the current memory context. + */ +Datum +ExecAggTransReparent(AggState *aggstate, AggStatePerTrans pertrans, + Datum newValue, bool newValueIsNull, + Datum oldValue, bool oldValueIsNull) +{ + if (!newValueIsNull) + { + MemoryContextSwitchTo(aggstate->curaggcontext->ecxt_per_tuple_memory); + if (DatumIsReadWriteExpandedObject(newValue, + false, + pertrans->transtypeLen) && + MemoryContextGetParent(DatumGetEOHP(newValue)->eoh_context) == CurrentMemoryContext) + /* do nothing */ ; + else + newValue = datumCopy(newValue, + pertrans->transtypeByVal, + pertrans->transtypeLen); + } + if (!oldValueIsNull) + { + if (DatumIsReadWriteExpandedObject(oldValue, + false, + pertrans->transtypeLen)) + DeleteExpandedObject(oldValue); + else + pfree(DatumGetPointer(oldValue)); + } + + return newValue; +} + +/* + * Invoke ordered transition function, with a datum argument. + */ +void +ExecEvalAggOrderedTransDatum(ExprState *state, ExprEvalStep *op, + ExprContext *econtext) +{ + AggStatePerTrans pertrans = op->d.agg_trans.pertrans; + int setno = op->d.agg_trans.setno; + + tuplesort_putdatum(pertrans->sortstates[setno], + *op->resvalue, *op->resnull); +} + +/* + * Invoke ordered transition function, with a tuple argument. + */ +void +ExecEvalAggOrderedTransTuple(ExprState *state, ExprEvalStep *op, + ExprContext *econtext) +{ + AggStatePerTrans pertrans = op->d.agg_trans.pertrans; + int setno = op->d.agg_trans.setno; + + ExecClearTuple(pertrans->sortslot); + pertrans->sortslot->tts_nvalid = pertrans->numInputs; + ExecStoreVirtualTuple(pertrans->sortslot); + tuplesort_puttupleslot(pertrans->sortstates[setno], pertrans->sortslot); +} diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c index 07c8852fca..c4d0e04058 100644 --- a/src/backend/executor/execGrouping.c +++ b/src/backend/executor/execGrouping.c @@ -7,7 +7,7 @@ * collation-sensitive, so the code in this file has no support for passing * collation settings through from callers. That may have to change someday. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -23,6 +23,7 @@ #include "executor/executor.h" #include "miscadmin.h" #include "utils/lsyscache.h" +#include "utils/hashutils.h" #include "utils/memutils.h" static uint32 TupleHashTableHash(struct tuplehash_hash *tb, const MinimalTuple tuple); @@ -50,173 +51,34 @@ static int TupleHashTableMatch(struct tuplehash_hash *tb, const MinimalTuple tup * Utility routines for grouping tuples together *****************************************************************************/ -/* - * execTuplesMatch - * Return true if two tuples match in all the indicated fields. - * - * This actually implements SQL's notion of "not distinct". Two nulls - * match, a null and a not-null don't match. - * - * slot1, slot2: the tuples to compare (must have same columns!) - * numCols: the number of attributes to be examined - * matchColIdx: array of attribute column numbers - * eqFunctions: array of fmgr lookup info for the equality functions to use - * evalContext: short-term memory context for executing the functions - * - * NB: evalContext is reset each time! - */ -bool -execTuplesMatch(TupleTableSlot *slot1, - TupleTableSlot *slot2, - int numCols, - AttrNumber *matchColIdx, - FmgrInfo *eqfunctions, - MemoryContext evalContext) -{ - MemoryContext oldContext; - bool result; - int i; - - /* Reset and switch into the temp context. */ - MemoryContextReset(evalContext); - oldContext = MemoryContextSwitchTo(evalContext); - - /* - * We cannot report a match without checking all the fields, but we can - * report a non-match as soon as we find unequal fields. So, start - * comparing at the last field (least significant sort key). That's the - * most likely to be different if we are dealing with sorted input. - */ - result = true; - - for (i = numCols; --i >= 0;) - { - AttrNumber att = matchColIdx[i]; - Datum attr1, - attr2; - bool isNull1, - isNull2; - - attr1 = slot_getattr(slot1, att, &isNull1); - - attr2 = slot_getattr(slot2, att, &isNull2); - - if (isNull1 != isNull2) - { - result = false; /* one null and one not; they aren't equal */ - break; - } - - if (isNull1) - continue; /* both are null, treat as equal */ - - /* Apply the type-specific equality function */ - - if (!DatumGetBool(FunctionCall2(&eqfunctions[i], - attr1, attr2))) - { - result = false; /* they aren't equal */ - break; - } - } - - MemoryContextSwitchTo(oldContext); - - return result; -} - -/* - * execTuplesUnequal - * Return true if two tuples are definitely unequal in the indicated - * fields. - * - * Nulls are neither equal nor unequal to anything else. A true result - * is obtained only if there are non-null fields that compare not-equal. - * - * Parameters are identical to execTuplesMatch. - */ -bool -execTuplesUnequal(TupleTableSlot *slot1, - TupleTableSlot *slot2, - int numCols, - AttrNumber *matchColIdx, - FmgrInfo *eqfunctions, - MemoryContext evalContext) -{ - MemoryContext oldContext; - bool result; - int i; - - /* Reset and switch into the temp context. */ - MemoryContextReset(evalContext); - oldContext = MemoryContextSwitchTo(evalContext); - - /* - * We cannot report a match without checking all the fields, but we can - * report a non-match as soon as we find unequal fields. So, start - * comparing at the last field (least significant sort key). That's the - * most likely to be different if we are dealing with sorted input. - */ - result = false; - - for (i = numCols; --i >= 0;) - { - AttrNumber att = matchColIdx[i]; - Datum attr1, - attr2; - bool isNull1, - isNull2; - - attr1 = slot_getattr(slot1, att, &isNull1); - - if (isNull1) - continue; /* can't prove anything here */ - - attr2 = slot_getattr(slot2, att, &isNull2); - - if (isNull2) - continue; /* can't prove anything here */ - - /* Apply the type-specific equality function */ - - if (!DatumGetBool(FunctionCall2(&eqfunctions[i], - attr1, attr2))) - { - result = true; /* they are unequal */ - break; - } - } - - MemoryContextSwitchTo(oldContext); - - return result; -} - - /* * execTuplesMatchPrepare - * Look up the equality functions needed for execTuplesMatch or - * execTuplesUnequal, given an array of equality operator OIDs. - * - * The result is a palloc'd array. + * Build expression that can be evaluated using ExecQual(), returning + * whether an ExprContext's inner/outer tuples are NOT DISTINCT */ -FmgrInfo * -execTuplesMatchPrepare(int numCols, - Oid *eqOperators) +ExprState * +execTuplesMatchPrepare(TupleDesc desc, + int numCols, + AttrNumber *keyColIdx, + Oid *eqOperators, + PlanState *parent) { - FmgrInfo *eqFunctions = (FmgrInfo *) palloc(numCols * sizeof(FmgrInfo)); + Oid *eqFunctions = (Oid *) palloc(numCols * sizeof(Oid)); int i; + ExprState *expr; + + if (numCols == 0) + return NULL; + /* lookup equality functions */ for (i = 0; i < numCols; i++) - { - Oid eq_opr = eqOperators[i]; - Oid eq_function; + eqFunctions[i] = get_opcode(eqOperators[i]); - eq_function = get_opcode(eq_opr); - fmgr_info(eq_function, &eqFunctions[i]); - } + /* build actual expression */ + expr = ExecBuildGroupingEqual(desc, desc, numCols, keyColIdx, eqFunctions, + parent); - return eqFunctions; + return expr; } /* @@ -232,12 +94,12 @@ execTuplesMatchPrepare(int numCols, void execTuplesHashPrepare(int numCols, Oid *eqOperators, - FmgrInfo **eqFunctions, + Oid **eqFuncOids, FmgrInfo **hashFunctions) { int i; - *eqFunctions = (FmgrInfo *) palloc(numCols * sizeof(FmgrInfo)); + *eqFuncOids = (Oid *) palloc(numCols * sizeof(Oid)); *hashFunctions = (FmgrInfo *) palloc(numCols * sizeof(FmgrInfo)); for (i = 0; i < numCols; i++) @@ -254,7 +116,7 @@ execTuplesHashPrepare(int numCols, eq_opr); /* We're not supporting cross-type cases here */ Assert(left_hash_function == right_hash_function); - fmgr_info(eq_function, &(*eqFunctions)[i]); + (*eqFuncOids)[i] = eq_function; fmgr_info(right_hash_function, &(*hashFunctions)[i]); } } @@ -287,8 +149,10 @@ execTuplesHashPrepare(int numCols, * storage that will live as long as the hashtable does. */ TupleHashTable -BuildTupleHashTable(int numCols, AttrNumber *keyColIdx, - FmgrInfo *eqfunctions, +BuildTupleHashTable(PlanState *parent, + TupleDesc inputDesc, + int numCols, AttrNumber *keyColIdx, + Oid *eqfuncoids, FmgrInfo *hashfunctions, long nbuckets, Size additionalsize, MemoryContext tablecxt, MemoryContext tempcxt, @@ -296,6 +160,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx, { TupleHashTable hashtable; Size entrysize = sizeof(TupleHashEntryData) + additionalsize; + MemoryContext oldcontext; Assert(nbuckets > 0); @@ -308,14 +173,13 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx, hashtable->numCols = numCols; hashtable->keyColIdx = keyColIdx; hashtable->tab_hash_funcs = hashfunctions; - hashtable->tab_eq_funcs = eqfunctions; hashtable->tablecxt = tablecxt; hashtable->tempcxt = tempcxt; hashtable->entrysize = entrysize; hashtable->tableslot = NULL; /* will be made on first lookup */ hashtable->inputslot = NULL; hashtable->in_hash_funcs = NULL; - hashtable->cur_eq_funcs = NULL; + hashtable->cur_eq_func = NULL; /* * If parallelism is in use, even if the master backend is performing the @@ -326,12 +190,30 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx, * underestimated. */ if (use_variable_hash_iv) - hashtable->hash_iv = hash_uint32(ParallelWorkerNumber); + hashtable->hash_iv = murmurhash32(ParallelWorkerNumber); else hashtable->hash_iv = 0; hashtable->hashtab = tuplehash_create(tablecxt, nbuckets, hashtable); + oldcontext = MemoryContextSwitchTo(hashtable->tablecxt); + + /* + * We copy the input tuple descriptor just for safety --- we assume all + * input tuples will have equivalent descriptors. + */ + hashtable->tableslot = MakeSingleTupleTableSlot(CreateTupleDescCopy(inputDesc)); + + /* build comparator for all columns */ + hashtable->tab_eq_func = ExecBuildGroupingEqual(inputDesc, inputDesc, + numCols, + keyColIdx, eqfuncoids, + parent); + + MemoryContextSwitchTo(oldcontext); + + hashtable->exprcontext = CreateExprContext(parent->state); + return hashtable; } @@ -356,29 +238,13 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, bool found; MinimalTuple key; - /* If first time through, clone the input slot to make table slot */ - if (hashtable->tableslot == NULL) - { - TupleDesc tupdesc; - - oldContext = MemoryContextSwitchTo(hashtable->tablecxt); - - /* - * We copy the input tuple descriptor just for safety --- we assume - * all input tuples will have equivalent descriptors. - */ - tupdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor); - hashtable->tableslot = MakeSingleTupleTableSlot(tupdesc); - MemoryContextSwitchTo(oldContext); - } - /* Need to run the hash functions in short-lived context */ oldContext = MemoryContextSwitchTo(hashtable->tempcxt); /* set up data needed by hash and match functions */ hashtable->inputslot = slot; hashtable->in_hash_funcs = hashtable->tab_hash_funcs; - hashtable->cur_eq_funcs = hashtable->tab_eq_funcs; + hashtable->cur_eq_func = hashtable->tab_eq_func; key = NULL; /* flag to reference inputslot */ @@ -423,7 +289,7 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, */ TupleHashEntry FindTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, - FmgrInfo *eqfunctions, + ExprState *eqcomp, FmgrInfo *hashfunctions) { TupleHashEntry entry; @@ -436,7 +302,7 @@ FindTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, /* Set up data needed by hash and match functions */ hashtable->inputslot = slot; hashtable->in_hash_funcs = hashfunctions; - hashtable->cur_eq_funcs = eqfunctions; + hashtable->cur_eq_func = eqcomp; /* Search the hash table */ key = NULL; /* flag to reference inputslot */ @@ -510,16 +376,19 @@ TupleHashTableHash(struct tuplehash_hash *tb, const MinimalTuple tuple) } } - return hashkey; + /* + * The way hashes are combined above, among each other and with the IV, + * doesn't lead to good bit perturbation. As the IV's goal is to lead to + * achieve that, perform a round of hashing of the combined hash - + * resulting in near perfect perturbation. + */ + return murmurhash32(hashkey); } /* * See whether two tuples (presumably of the same hash value) match * * As above, the passed pointers are pointers to TupleHashEntryData. - * - * Also, the caller must select an appropriate memory context for running - * the compare functions. (dynahash.c doesn't change CurrentMemoryContext.) */ static int TupleHashTableMatch(struct tuplehash_hash *tb, const MinimalTuple tuple1, const MinimalTuple tuple2) @@ -527,6 +396,7 @@ TupleHashTableMatch(struct tuplehash_hash *tb, const MinimalTuple tuple1, const TupleTableSlot *slot1; TupleTableSlot *slot2; TupleHashTable hashtable = (TupleHashTable) tb->private_data; + ExprContext *econtext = hashtable->exprcontext; /* * We assume that simplehash.h will only ever call us with the first @@ -541,13 +411,7 @@ TupleHashTableMatch(struct tuplehash_hash *tb, const MinimalTuple tuple1, const slot2 = hashtable->inputslot; /* For crosstype comparisons, the inputslot must be first */ - if (execTuplesMatch(slot2, - slot1, - hashtable->numCols, - hashtable->keyColIdx, - hashtable->cur_eq_funcs, - hashtable->tempcxt)) - return 0; - else - return 1; + econtext->ecxt_innertuple = slot2; + econtext->ecxt_outertuple = slot1; + return !ExecQualAndReset(hashtable->cur_eq_func, econtext); } diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c index 89e189fa71..9927ad70e6 100644 --- a/src/backend/executor/execIndexing.c +++ b/src/backend/executor/execIndexing.c @@ -95,7 +95,7 @@ * with the higher XID backs out. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -648,7 +648,7 @@ check_exclusion_or_unique_constraint(Relation heap, Relation index, Oid *constr_procs; uint16 *constr_strats; Oid *index_collations = index->rd_indcollation; - int index_natts = index->rd_index->indnatts; + int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(index); IndexScanDesc index_scan; HeapTuple tup; ScanKeyData scankeys[INDEX_MAX_KEYS]; @@ -675,7 +675,7 @@ check_exclusion_or_unique_constraint(Relation heap, Relation index, * If any of the input values are NULL, the constraint check is assumed to * pass (i.e., we assume the operators are strict). */ - for (i = 0; i < index_natts; i++) + for (i = 0; i < indnkeyatts; i++) { if (isnull[i]) return true; @@ -687,7 +687,7 @@ check_exclusion_or_unique_constraint(Relation heap, Relation index, */ InitDirtySnapshot(DirtySnapshot); - for (i = 0; i < index_natts; i++) + for (i = 0; i < indnkeyatts; i++) { ScanKeyEntryInitialize(&scankeys[i], 0, @@ -719,8 +719,8 @@ check_exclusion_or_unique_constraint(Relation heap, Relation index, retry: conflict = false; found_self = false; - index_scan = index_beginscan(heap, index, &DirtySnapshot, index_natts, 0); - index_rescan(index_scan, scankeys, index_natts, NULL, 0); + index_scan = index_beginscan(heap, index, &DirtySnapshot, indnkeyatts, 0); + index_rescan(index_scan, scankeys, indnkeyatts, NULL, 0); while ((tup = index_getnext(index_scan, ForwardScanDirection)) != NULL) @@ -750,7 +750,7 @@ check_exclusion_or_unique_constraint(Relation heap, Relation index, * Extract the index column values and isnull flags from the existing * tuple. */ - ExecStoreTuple(tup, existing_slot, InvalidBuffer, false); + ExecStoreHeapTuple(tup, existing_slot, false); FormIndexDatum(indexInfo, existing_slot, estate, existing_values, existing_isnull); @@ -881,10 +881,10 @@ index_recheck_constraint(Relation index, Oid *constr_procs, Datum *existing_values, bool *existing_isnull, Datum *new_values) { - int index_natts = index->rd_index->indnatts; + int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(index); int i; - for (i = 0; i < index_natts; i++) + for (i = 0; i < indnkeyatts; i++) { /* Assume the exclusion operators are strict */ if (existing_isnull[i]) diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c index a422327c88..57d74e57c1 100644 --- a/src/backend/executor/execJunk.c +++ b/src/backend/executor/execJunk.c @@ -3,7 +3,7 @@ * execJunk.c * Junk attribute support stuff.... * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -168,7 +168,7 @@ ExecInitJunkFilterConversion(List *targetList, t = list_head(targetList); for (i = 0; i < cleanLength; i++) { - if (cleanTupType->attrs[i]->attisdropped) + if (TupleDescAttr(cleanTupType, i)->attisdropped) continue; /* map entry is already zero */ for (;;) { diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 6671a25ffb..ba156f8c5f 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -26,7 +26,7 @@ * before ExecutorEnd. This can be omitted only in case of EXPLAIN, * which should also omit ExecutorRun. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -42,12 +42,13 @@ #include "access/transam.h" #include "access/xact.h" #include "catalog/namespace.h" -#include "catalog/partition.h" #include "catalog/pg_publication.h" #include "commands/matview.h" #include "commands/trigger.h" #include "executor/execdebug.h" +#include "executor/nodeSubplan.h" #include "foreign/fdwapi.h" +#include "jit/jit.h" #include "mb/pg_wchar.h" #include "miscadmin.h" #include "optimizer/clauses.h" @@ -59,6 +60,7 @@ #include "utils/acl.h" #include "utils/lsyscache.h" #include "utils/memutils.h" +#include "utils/partcache.h" #include "utils/rls.h" #include "utils/ruleutils.h" #include "utils/snapmgr.h" @@ -97,14 +99,8 @@ static char *ExecBuildSlotValueDescription(Oid reloid, TupleDesc tupdesc, Bitmapset *modifiedCols, int maxfieldlen); -static char *ExecBuildSlotPartitionKeyDescription(Relation rel, - Datum *values, - bool *isnull, - int maxfieldlen); static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree); -static void ExecPartitionCheck(ResultRelInfo *resultRelInfo, - TupleTableSlot *slot, EState *estate); /* * Note that GetUpdatedColumns() also exists in commands/trigger.c. There does @@ -113,9 +109,9 @@ static void ExecPartitionCheck(ResultRelInfo *resultRelInfo, * to be changed, however. */ #define GetInsertedColumns(relinfo, estate) \ - (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->insertedCols) + (exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->insertedCols) #define GetUpdatedColumns(relinfo, estate) \ - (rt_fetch((relinfo)->ri_RangeTableIndex, (estate)->es_range_table)->updatedCols) + (exec_rt_fetch((relinfo)->ri_RangeTableIndex, estate)->updatedCols) /* end of local decls */ @@ -194,9 +190,14 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags) */ estate->es_param_list_info = queryDesc->params; - if (queryDesc->plannedstmt->nParamExec > 0) + if (queryDesc->plannedstmt->paramExecTypes != NIL) + { + int nParamExec; + + nParamExec = list_length(queryDesc->plannedstmt->paramExecTypes); estate->es_param_exec_vals = (ParamExecData *) - palloc0(queryDesc->plannedstmt->nParamExec * sizeof(ParamExecData)); + palloc0(nParamExec * sizeof(ParamExecData)); + } estate->es_sourceText = queryDesc->sourceText; @@ -249,11 +250,7 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags) estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot); estate->es_top_eflags = eflags; estate->es_instrument = queryDesc->instrument_options; - - /* - * Initialize the plan state tree - */ - InitPlan(queryDesc, eflags); + estate->es_jit_flags = queryDesc->plannedstmt->jitFlags; /* * Set up an AFTER-trigger statement context, unless told not to, or @@ -262,6 +259,11 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags) if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY))) AfterTriggerBeginQuery(); + /* + * Initialize the plan state tree + */ + InitPlan(queryDesc, eflags); + MemoryContextSwitchTo(oldcontext); } @@ -348,7 +350,7 @@ standard_ExecutorRun(QueryDesc *queryDesc, queryDesc->plannedstmt->hasReturning); if (sendTuples) - (*dest->rStartup) (dest, operation, queryDesc->tupDesc); + dest->rStartup(dest, operation, queryDesc->tupDesc); /* * run plan @@ -374,7 +376,7 @@ standard_ExecutorRun(QueryDesc *queryDesc, * shutdown tuple receiver, if we started it */ if (sendTuples) - (*dest->rShutdown) (dest); + dest->rShutdown(dest); if (queryDesc->totaltime) InstrStopNode(queryDesc->totaltime, estate->es_processed); @@ -580,7 +582,7 @@ ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) { Assert(rte->rtekind == RTE_RELATION); if (ereport_on_violation) - aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, + aclcheck_error(ACLCHECK_NO_PRIV, get_relkind_objtype(get_rel_relkind(rte->relid)), get_rel_name(rte->relid)); return false; } @@ -821,14 +823,12 @@ InitPlan(QueryDesc *queryDesc, int eflags) /* * initialize the node's execution state */ - estate->es_range_table = rangeTable; + ExecInitRangeTable(estate, rangeTable); + estate->es_plannedstmt = plannedstmt; /* - * initialize result relation stuff, and open/lock the result rels. - * - * We must do this before initializing the plan tree, else we might try to - * do a lock upgrade if a result rel is also a source rel. + * Initialize ResultRelInfo data structures, and open the result rels. */ if (plannedstmt->resultRelations) { @@ -843,12 +843,10 @@ InitPlan(QueryDesc *queryDesc, int eflags) foreach(l, resultRelations) { Index resultRelationIndex = lfirst_int(l); - Oid resultRelationOid; Relation resultRelation; - resultRelationOid = getrelid(resultRelationIndex, rangeTable); - resultRelation = heap_open(resultRelationOid, RowExclusiveLock); - + resultRelation = ExecGetRangeTableRelation(estate, + resultRelationIndex); InitResultRelInfo(resultRelInfo, resultRelation, resultRelationIndex, @@ -858,39 +856,32 @@ InitPlan(QueryDesc *queryDesc, int eflags) } estate->es_result_relations = resultRelInfos; estate->es_num_result_relations = numResultRelations; + /* es_result_relation_info is NULL except when within ModifyTable */ estate->es_result_relation_info = NULL; /* - * In the partitioned result relation case, lock the non-leaf result - * relations too. A subset of these are the roots of respective - * partitioned tables, for which we also allocate ResulRelInfos. + * In the partitioned result relation case, also build ResultRelInfos + * for all the partitioned table roots, because we will need them to + * fire statement-level triggers, if any. */ - estate->es_root_result_relations = NULL; - estate->es_num_root_result_relations = 0; - if (plannedstmt->nonleafResultRelations) + if (plannedstmt->rootResultRelations) { int num_roots = list_length(plannedstmt->rootResultRelations); - /* - * Firstly, build ResultRelInfos for all the partitioned table - * roots, because we will need them to fire the statement-level - * triggers, if any. - */ resultRelInfos = (ResultRelInfo *) palloc(num_roots * sizeof(ResultRelInfo)); resultRelInfo = resultRelInfos; foreach(l, plannedstmt->rootResultRelations) { Index resultRelIndex = lfirst_int(l); - Oid resultRelOid; Relation resultRelDesc; - resultRelOid = getrelid(resultRelIndex, rangeTable); - resultRelDesc = heap_open(resultRelOid, RowExclusiveLock); + resultRelDesc = ExecGetRangeTableRelation(estate, + resultRelIndex); InitResultRelInfo(resultRelInfo, resultRelDesc, - lfirst_int(l), + resultRelIndex, NULL, estate->es_instrument); resultRelInfo++; @@ -898,18 +889,11 @@ InitPlan(QueryDesc *queryDesc, int eflags) estate->es_root_result_relations = resultRelInfos; estate->es_num_root_result_relations = num_roots; - - /* Simply lock the rest of them. */ - foreach(l, plannedstmt->nonleafResultRelations) - { - Index resultRelIndex = lfirst_int(l); - - /* We locked the roots above. */ - if (!list_member_int(plannedstmt->rootResultRelations, - resultRelIndex)) - LockRelationOid(getrelid(resultRelIndex, rangeTable), - RowExclusiveLock); - } + } + else + { + estate->es_root_result_relations = NULL; + estate->es_num_root_result_relations = 0; } } else @@ -925,71 +909,68 @@ InitPlan(QueryDesc *queryDesc, int eflags) } /* - * Similarly, we have to lock relations selected FOR [KEY] UPDATE/SHARE - * before we initialize the plan tree, else we'd be risking lock upgrades. - * While we are at it, build the ExecRowMark list. Any partitioned child - * tables are ignored here (because isParent=true) and will be locked by - * the first Append or MergeAppend node that references them. (Note that - * the RowMarks corresponding to partitioned child tables are present in - * the same list as the rest, i.e., plannedstmt->rowMarks.) + * Next, build the ExecRowMark array from the PlanRowMark(s), if any. */ - estate->es_rowMarks = NIL; - foreach(l, plannedstmt->rowMarks) + if (plannedstmt->rowMarks) { - PlanRowMark *rc = (PlanRowMark *) lfirst(l); - Oid relid; - Relation relation; - ExecRowMark *erm; + estate->es_rowmarks = (ExecRowMark **) + palloc0(estate->es_range_table_size * sizeof(ExecRowMark *)); + foreach(l, plannedstmt->rowMarks) + { + PlanRowMark *rc = (PlanRowMark *) lfirst(l); + Oid relid; + Relation relation; + ExecRowMark *erm; - /* ignore "parent" rowmarks; they are irrelevant at runtime */ - if (rc->isParent) - continue; + /* ignore "parent" rowmarks; they are irrelevant at runtime */ + if (rc->isParent) + continue; - /* get relation's OID (will produce InvalidOid if subquery) */ - relid = getrelid(rc->rti, rangeTable); + /* get relation's OID (will produce InvalidOid if subquery) */ + relid = exec_rt_fetch(rc->rti, estate)->relid; - /* - * If you change the conditions under which rel locks are acquired - * here, be sure to adjust ExecOpenScanRelation to match. - */ - switch (rc->markType) - { - case ROW_MARK_EXCLUSIVE: - case ROW_MARK_NOKEYEXCLUSIVE: - case ROW_MARK_SHARE: - case ROW_MARK_KEYSHARE: - relation = heap_open(relid, RowShareLock); - break; - case ROW_MARK_REFERENCE: - relation = heap_open(relid, AccessShareLock); - break; - case ROW_MARK_COPY: - /* no physical table access is required */ - relation = NULL; - break; - default: - elog(ERROR, "unrecognized markType: %d", rc->markType); - relation = NULL; /* keep compiler quiet */ - break; - } + /* open relation, if we need to access it for this mark type */ + switch (rc->markType) + { + case ROW_MARK_EXCLUSIVE: + case ROW_MARK_NOKEYEXCLUSIVE: + case ROW_MARK_SHARE: + case ROW_MARK_KEYSHARE: + case ROW_MARK_REFERENCE: + relation = ExecGetRangeTableRelation(estate, rc->rti); + break; + case ROW_MARK_COPY: + /* no physical table access is required */ + relation = NULL; + break; + default: + elog(ERROR, "unrecognized markType: %d", rc->markType); + relation = NULL; /* keep compiler quiet */ + break; + } - /* Check that relation is a legal target for marking */ - if (relation) - CheckValidRowMarkRel(relation, rc->markType); - - erm = (ExecRowMark *) palloc(sizeof(ExecRowMark)); - erm->relation = relation; - erm->relid = relid; - erm->rti = rc->rti; - erm->prti = rc->prti; - erm->rowmarkId = rc->rowmarkId; - erm->markType = rc->markType; - erm->strength = rc->strength; - erm->waitPolicy = rc->waitPolicy; - erm->ermActive = false; - ItemPointerSetInvalid(&(erm->curCtid)); - erm->ermExtra = NULL; - estate->es_rowMarks = lappend(estate->es_rowMarks, erm); + /* Check that relation is a legal target for marking */ + if (relation) + CheckValidRowMarkRel(relation, rc->markType); + + erm = (ExecRowMark *) palloc(sizeof(ExecRowMark)); + erm->relation = relation; + erm->relid = relid; + erm->rti = rc->rti; + erm->prti = rc->prti; + erm->rowmarkId = rc->rowmarkId; + erm->markType = rc->markType; + erm->strength = rc->strength; + erm->waitPolicy = rc->waitPolicy; + erm->ermActive = false; + ItemPointerSetInvalid(&(erm->curCtid)); + erm->ermExtra = NULL; + + Assert(erm->rti > 0 && erm->rti <= estate->es_range_table_size && + estate->es_rowmarks[erm->rti - 1] == NULL); + + estate->es_rowmarks[erm->rti - 1] = erm; + } } /* @@ -1074,7 +1055,7 @@ InitPlan(QueryDesc *queryDesc, int eflags) j = ExecInitJunkFilter(planstate->plan->targetlist, tupType->tdhasoid, - ExecInitExtraTupleSlot(estate)); + ExecInitExtraTupleSlot(estate, NULL)); estate->es_junkFilter = j; /* Want to return the cleaned tuple type */ @@ -1096,8 +1077,9 @@ InitPlan(QueryDesc *queryDesc, int eflags) * CheckValidRowMarkRel. */ void -CheckValidResultRel(Relation resultRel, CmdType operation) +CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation) { + Relation resultRel = resultRelInfo->ri_RelationDesc; TriggerDesc *trigDesc = resultRel->trigdesc; FdwRoutine *fdwroutine; @@ -1168,7 +1150,7 @@ CheckValidResultRel(Relation resultRel, CmdType operation) break; case RELKIND_FOREIGN_TABLE: /* Okay only if the FDW supports it */ - fdwroutine = GetFdwRoutineForRelation(resultRel, false); + fdwroutine = resultRelInfo->ri_FdwRoutine; switch (operation) { case CMD_INSERT: @@ -1334,11 +1316,15 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo, resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true); else resultRelInfo->ri_FdwRoutine = NULL; + + /* The following fields are set later if needed */ resultRelInfo->ri_FdwState = NULL; resultRelInfo->ri_usesFdwDirectModify = false; resultRelInfo->ri_ConstraintExprs = NULL; resultRelInfo->ri_junkFilter = NULL; resultRelInfo->ri_projectReturning = NULL; + resultRelInfo->ri_onConflictArbiterIndexes = NIL; + resultRelInfo->ri_onConflict = NULL; /* * Partition constraint, which also includes the partition constraint of @@ -1357,23 +1343,28 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo, resultRelInfo->ri_PartitionCheck = partition_check; resultRelInfo->ri_PartitionRoot = partition_root; + resultRelInfo->ri_PartitionReadyForRouting = false; } /* - * ExecGetTriggerResultRel - * - * Get a ResultRelInfo for a trigger target relation. Most of the time, - * triggers are fired on one of the result relations of the query, and so - * we can just return a member of the es_result_relations array. (Note: in - * self-join situations there might be multiple members with the same OID; - * if so it doesn't matter which one we pick.) However, it is sometimes - * necessary to fire triggers on other relations; this happens mainly when an - * RI update trigger queues additional triggers on other relations, which will - * be processed in the context of the outer query. For efficiency's sake, - * we want to have a ResultRelInfo for those triggers too; that can avoid - * repeated re-opening of the relation. (It also provides a way for EXPLAIN - * ANALYZE to report the runtimes of such triggers.) So we make additional - * ResultRelInfo's as needed, and save them in es_trig_target_relations. + * ExecGetTriggerResultRel + * Get a ResultRelInfo for a trigger target relation. + * + * Most of the time, triggers are fired on one of the result relations of the + * query, and so we can just return a member of the es_result_relations array, + * or the es_root_result_relations array (if any), or the + * es_tuple_routing_result_relations list (if any). (Note: in self-join + * situations there might be multiple members with the same OID; if so it + * doesn't matter which one we pick.) + * + * However, it is sometimes necessary to fire triggers on other relations; + * this happens mainly when an RI update trigger queues additional triggers + * on other relations, which will be processed in the context of the outer + * query. For efficiency's sake, we want to have a ResultRelInfo for those + * triggers too; that can avoid repeated re-opening of the relation. (It + * also provides a way for EXPLAIN ANALYZE to report the runtimes of such + * triggers.) So we make additional ResultRelInfo's as needed, and save them + * in es_trig_target_relations. */ ResultRelInfo * ExecGetTriggerResultRel(EState *estate, Oid relid) @@ -1394,6 +1385,28 @@ ExecGetTriggerResultRel(EState *estate, Oid relid) rInfo++; nr--; } + /* Second, search through the root result relations, if any */ + rInfo = estate->es_root_result_relations; + nr = estate->es_num_root_result_relations; + while (nr > 0) + { + if (RelationGetRelid(rInfo->ri_RelationDesc) == relid) + return rInfo; + rInfo++; + nr--; + } + + /* + * Third, search through the result relations that were created during + * tuple routing, if any. + */ + foreach(l, estate->es_tuple_routing_result_relations) + { + rInfo = (ResultRelInfo *) lfirst(l); + if (RelationGetRelid(rInfo->ri_RelationDesc) == relid) + return rInfo; + } + /* Nope, but maybe we already made an extra ResultRelInfo for it */ foreach(l, estate->es_trig_target_relations) { @@ -1445,8 +1458,19 @@ ExecCleanUpTriggerState(EState *estate) { ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l); - /* Close indices and then the relation itself */ - ExecCloseIndices(resultRelInfo); + /* + * Assert this is a "dummy" ResultRelInfo, see above. Otherwise we + * might be issuing a duplicate close against a Relation opened by + * ExecGetRangeTableRelation. + */ + Assert(resultRelInfo->ri_RangeTableIndex == 0); + + /* + * Since ExecGetTriggerResultRel doesn't call ExecOpenIndices for + * these rels, we needn't call ExecCloseIndices either. + */ + Assert(resultRelInfo->ri_NumIndices == 0); + heap_close(resultRelInfo->ri_RelationDesc, NoLock); } } @@ -1459,8 +1483,8 @@ ExecCleanUpTriggerState(EState *estate) * going to be stored into a relation that has OIDs. In other contexts * we are free to choose whether to leave space for OIDs in result tuples * (we generally don't want to, but we do if a physical-tlist optimization - * is possible). This routine checks the plan context and returns TRUE if the - * choice is forced, FALSE if the choice is not forced. In the TRUE case, + * is possible). This routine checks the plan context and returns true if the + * choice is forced, false if the choice is not forced. In the true case, * *hasoids is set to the required value. * * One reason this is ugly is that all plan nodes in the plan tree will emit @@ -1569,7 +1593,8 @@ static void ExecEndPlan(PlanState *planstate, EState *estate) { ResultRelInfo *resultRelInfo; - int i; + Index num_relations; + Index i; ListCell *l; /* @@ -1596,39 +1621,29 @@ ExecEndPlan(PlanState *planstate, EState *estate) ExecResetTupleTable(estate->es_tupleTable, false); /* - * close the result relation(s) if any, but hold locks until xact commit. + * close indexes of result relation(s) if any. (Rels themselves get + * closed next.) */ resultRelInfo = estate->es_result_relations; for (i = estate->es_num_result_relations; i > 0; i--) { - /* Close indices and then the relation itself */ ExecCloseIndices(resultRelInfo); - heap_close(resultRelInfo->ri_RelationDesc, NoLock); resultRelInfo++; } - /* Close the root target relation(s). */ - resultRelInfo = estate->es_root_result_relations; - for (i = estate->es_num_root_result_relations; i > 0; i--) + /* + * close whatever rangetable Relations have been opened. We do not + * release any locks we might hold on those rels. + */ + num_relations = estate->es_range_table_size; + for (i = 0; i < num_relations; i++) { - heap_close(resultRelInfo->ri_RelationDesc, NoLock); - resultRelInfo++; + if (estate->es_relations[i]) + heap_close(estate->es_relations[i], NoLock); } /* likewise close any trigger target relations */ ExecCleanUpTriggerState(estate); - - /* - * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping - * locks - */ - foreach(l, estate->es_rowMarks) - { - ExecRowMark *erm = (ExecRowMark *) lfirst(l); - - if (erm->relation) - heap_close(erm->relation, NoLock); - } } /* ---------------------------------------------------------------- @@ -1669,13 +1684,12 @@ ExecutePlan(EState *estate, /* * If the plan might potentially be executed multiple times, we must force - * it to run without parallelism, because we might exit early. Also - * disable parallelism when writing into a relation, because no database - * changes are allowed in parallel mode. + * it to run without parallelism, because we might exit early. */ - if (!execute_once || dest->mydest == DestIntoRel) + if (!execute_once) use_parallel_mode = false; + estate->es_use_parallel_mode = use_parallel_mode; if (use_parallel_mode) EnterParallelMode(); @@ -1698,8 +1712,12 @@ ExecutePlan(EState *estate, */ if (TupIsNull(slot)) { - /* Allow nodes to release or shut down resources. */ - (void) ExecShutdownNode(planstate); + /* + * If we know we won't need to back up, we can release resources + * at this point. + */ + if (!(estate->es_top_eflags & EXEC_FLAG_BACKWARD)) + (void) ExecShutdownNode(planstate); break; } @@ -1725,7 +1743,7 @@ ExecutePlan(EState *estate, * has closed and no more tuples can be sent. If that's the case, * end the loop. */ - if (!((*dest->receiveSlot) (slot, dest))) + if (!dest->receiveSlot(slot, dest)) break; } @@ -1745,8 +1763,12 @@ ExecutePlan(EState *estate, current_tuple_count++; if (numberTuples && numberTuples == current_tuple_count) { - /* Allow nodes to release or shut down resources. */ - (void) ExecShutdownNode(planstate); + /* + * If we know we won't need to back up, we can release resources + * at this point. + */ + if (!(estate->es_top_eflags & EXEC_FLAG_BACKWARD)) + (void) ExecShutdownNode(planstate); break; } } @@ -1822,17 +1844,17 @@ ExecRelCheck(ResultRelInfo *resultRelInfo, /* * ExecPartitionCheck --- check that tuple meets the partition constraint. + * + * Returns true if it meets the partition constraint. If the constraint + * fails and we're asked to emit to error, do so and don't return; otherwise + * return false. */ -static void +bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, - EState *estate) + EState *estate, bool emitError) { - Relation rel = resultRelInfo->ri_RelationDesc; - TupleDesc tupdesc = RelationGetDescr(rel); - Bitmapset *modifiedCols; - Bitmapset *insertedCols; - Bitmapset *updatedCols; ExprContext *econtext; + bool success; /* * If first time through, build expression state tree for the partition @@ -1859,56 +1881,81 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, * As in case of the catalogued constraints, we treat a NULL result as * success here, not a failure. */ - if (!ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext)) + success = ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext); + + /* if asked to emit error, don't actually return on failure */ + if (!success && emitError) + ExecPartitionCheckEmitError(resultRelInfo, slot, estate); + + return success; +} + +/* + * ExecPartitionCheckEmitError - Form and emit an error message after a failed + * partition constraint check. + */ +void +ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, + EState *estate) +{ + Relation rel = resultRelInfo->ri_RelationDesc; + Relation orig_rel = rel; + TupleDesc tupdesc = RelationGetDescr(rel); + char *val_desc; + Bitmapset *modifiedCols; + Bitmapset *insertedCols; + Bitmapset *updatedCols; + + /* + * Need to first convert the tuple to the root partitioned table's row + * type. For details, check similar comments in ExecConstraints(). + */ + if (resultRelInfo->ri_PartitionRoot) { - char *val_desc; - Relation orig_rel = rel; + TupleDesc old_tupdesc = RelationGetDescr(rel); + AttrNumber *map; - /* See the comment above. */ - if (resultRelInfo->ri_PartitionRoot) - { - HeapTuple tuple = ExecFetchSlotTuple(slot); - TupleDesc old_tupdesc = RelationGetDescr(rel); - TupleConversionMap *map; - - rel = resultRelInfo->ri_PartitionRoot; - tupdesc = RelationGetDescr(rel); - /* a reverse map */ - map = convert_tuples_by_name(old_tupdesc, tupdesc, - gettext_noop("could not convert row type")); - if (map != NULL) - { - tuple = do_convert_tuple(tuple, map); - ExecSetSlotDescriptor(slot, tupdesc); - ExecStoreTuple(tuple, slot, InvalidBuffer, false); - } - } + rel = resultRelInfo->ri_PartitionRoot; + tupdesc = RelationGetDescr(rel); + /* a reverse map */ + map = convert_tuples_by_name_map_if_req(old_tupdesc, tupdesc, + gettext_noop("could not convert row type")); - insertedCols = GetInsertedColumns(resultRelInfo, estate); - updatedCols = GetUpdatedColumns(resultRelInfo, estate); - modifiedCols = bms_union(insertedCols, updatedCols); - val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel), - slot, - tupdesc, - modifiedCols, - 64); - ereport(ERROR, - (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("new row for relation \"%s\" violates partition constraint", - RelationGetRelationName(orig_rel)), - val_desc ? errdetail("Failing row contains %s.", val_desc) : 0)); + /* + * Partition-specific slot's tupdesc can't be changed, so allocate a + * new one. + */ + if (map != NULL) + slot = execute_attr_map_slot(map, slot, + MakeTupleTableSlot(tupdesc)); } + + insertedCols = GetInsertedColumns(resultRelInfo, estate); + updatedCols = GetUpdatedColumns(resultRelInfo, estate); + modifiedCols = bms_union(insertedCols, updatedCols); + val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel), + slot, + tupdesc, + modifiedCols, + 64); + ereport(ERROR, + (errcode(ERRCODE_CHECK_VIOLATION), + errmsg("new row for relation \"%s\" violates partition constraint", + RelationGetRelationName(orig_rel)), + val_desc ? errdetail("Failing row contains %s.", val_desc) : 0)); } /* * ExecConstraints - check constraints of the tuple in 'slot' * - * This checks the traditional NOT NULL and check constraints, as well as - * the partition constraint, if any. + * This checks the traditional NOT NULL and check constraints. + * + * The partition constraint is *NOT* checked. * * Note: 'slot' contains the tuple to check the constraints of, which may * have been converted from the original input tuple after tuple routing. - * 'resultRelInfo' is the original result relation, before tuple routing. + * 'resultRelInfo' is the final result relation, after tuple routing. */ void ExecConstraints(ResultRelInfo *resultRelInfo, @@ -1930,8 +1977,9 @@ ExecConstraints(ResultRelInfo *resultRelInfo, for (attrChk = 1; attrChk <= natts; attrChk++) { - if (tupdesc->attrs[attrChk - 1]->attnotnull && - slot_attisnull(slot, attrChk)) + Form_pg_attribute att = TupleDescAttr(tupdesc, attrChk - 1); + + if (att->attnotnull && slot_attisnull(slot, attrChk)) { char *val_desc; Relation orig_rel = rel; @@ -1946,20 +1994,22 @@ ExecConstraints(ResultRelInfo *resultRelInfo, */ if (resultRelInfo->ri_PartitionRoot) { - HeapTuple tuple = ExecFetchSlotTuple(slot); - TupleConversionMap *map; + AttrNumber *map; rel = resultRelInfo->ri_PartitionRoot; tupdesc = RelationGetDescr(rel); /* a reverse map */ - map = convert_tuples_by_name(orig_tupdesc, tupdesc, - gettext_noop("could not convert row type")); + map = convert_tuples_by_name_map_if_req(orig_tupdesc, + tupdesc, + gettext_noop("could not convert row type")); + + /* + * Partition-specific slot's tupdesc can't be changed, so + * allocate a new one. + */ if (map != NULL) - { - tuple = do_convert_tuple(tuple, map); - ExecSetSlotDescriptor(slot, tupdesc); - ExecStoreTuple(tuple, slot, InvalidBuffer, false); - } + slot = execute_attr_map_slot(map, slot, + MakeTupleTableSlot(tupdesc)); } insertedCols = GetInsertedColumns(resultRelInfo, estate); @@ -1974,7 +2024,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo, ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("null value in column \"%s\" violates not-null constraint", - NameStr(orig_tupdesc->attrs[attrChk - 1]->attname)), + NameStr(att->attname)), val_desc ? errdetail("Failing row contains %s.", val_desc) : 0, errtablecol(orig_rel, attrChk))); } @@ -1993,21 +2043,23 @@ ExecConstraints(ResultRelInfo *resultRelInfo, /* See the comment above. */ if (resultRelInfo->ri_PartitionRoot) { - HeapTuple tuple = ExecFetchSlotTuple(slot); TupleDesc old_tupdesc = RelationGetDescr(rel); - TupleConversionMap *map; + AttrNumber *map; rel = resultRelInfo->ri_PartitionRoot; tupdesc = RelationGetDescr(rel); /* a reverse map */ - map = convert_tuples_by_name(old_tupdesc, tupdesc, - gettext_noop("could not convert row type")); + map = convert_tuples_by_name_map_if_req(old_tupdesc, + tupdesc, + gettext_noop("could not convert row type")); + + /* + * Partition-specific slot's tupdesc can't be changed, so + * allocate a new one. + */ if (map != NULL) - { - tuple = do_convert_tuple(tuple, map); - ExecSetSlotDescriptor(slot, tupdesc); - ExecStoreTuple(tuple, slot, InvalidBuffer, false); - } + slot = execute_attr_map_slot(map, slot, + MakeTupleTableSlot(tupdesc)); } insertedCols = GetInsertedColumns(resultRelInfo, estate); @@ -2026,12 +2078,8 @@ ExecConstraints(ResultRelInfo *resultRelInfo, errtableconstraint(orig_rel, failed))); } } - - if (resultRelInfo->ri_PartitionCheck) - ExecPartitionCheck(resultRelInfo, slot, estate); } - /* * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs * of the specified kind. @@ -2103,21 +2151,23 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo, /* See the comment in ExecConstraints(). */ if (resultRelInfo->ri_PartitionRoot) { - HeapTuple tuple = ExecFetchSlotTuple(slot); TupleDesc old_tupdesc = RelationGetDescr(rel); - TupleConversionMap *map; + AttrNumber *map; rel = resultRelInfo->ri_PartitionRoot; tupdesc = RelationGetDescr(rel); /* a reverse map */ - map = convert_tuples_by_name(old_tupdesc, tupdesc, - gettext_noop("could not convert row type")); + map = convert_tuples_by_name_map_if_req(old_tupdesc, + tupdesc, + gettext_noop("could not convert row type")); + + /* + * Partition-specific slot's tupdesc can't be changed, + * so allocate a new one. + */ if (map != NULL) - { - tuple = do_convert_tuple(tuple, map); - ExecSetSlotDescriptor(slot, tupdesc); - ExecStoreTuple(tuple, slot, InvalidBuffer, false); - } + slot = execute_attr_map_slot(map, slot, + MakeTupleTableSlot(tupdesc)); } insertedCols = GetInsertedColumns(resultRelInfo, estate); @@ -2241,9 +2291,10 @@ ExecBuildSlotValueDescription(Oid reloid, bool column_perm = false; char *val; int vallen; + Form_pg_attribute att = TupleDescAttr(tupdesc, i); /* ignore dropped columns */ - if (tupdesc->attrs[i]->attisdropped) + if (att->attisdropped) continue; if (!table_perm) @@ -2254,9 +2305,9 @@ ExecBuildSlotValueDescription(Oid reloid, * for the column. If not, omit this column from the error * message. */ - aclresult = pg_attribute_aclcheck(reloid, tupdesc->attrs[i]->attnum, + aclresult = pg_attribute_aclcheck(reloid, att->attnum, GetUserId(), ACL_SELECT); - if (bms_is_member(tupdesc->attrs[i]->attnum - FirstLowInvalidHeapAttributeNumber, + if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber, modifiedCols) || aclresult == ACLCHECK_OK) { column_perm = any_perm = true; @@ -2266,7 +2317,7 @@ ExecBuildSlotValueDescription(Oid reloid, else write_comma_collist = true; - appendStringInfoString(&collist, NameStr(tupdesc->attrs[i]->attname)); + appendStringInfoString(&collist, NameStr(att->attname)); } } @@ -2279,7 +2330,7 @@ ExecBuildSlotValueDescription(Oid reloid, Oid foutoid; bool typisvarlena; - getTypeOutputInfo(tupdesc->attrs[i]->atttypid, + getTypeOutputInfo(att->atttypid, &foutoid, &typisvarlena); val = OidOutputFunctionCall(foutoid, slot->tts_values[i]); } @@ -2353,13 +2404,12 @@ ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo) ExecRowMark * ExecFindRowMark(EState *estate, Index rti, bool missing_ok) { - ListCell *lc; - - foreach(lc, estate->es_rowMarks) + if (rti > 0 && rti <= estate->es_range_table_size && + estate->es_rowmarks != NULL) { - ExecRowMark *erm = (ExecRowMark *) lfirst(lc); + ExecRowMark *erm = estate->es_rowmarks[rti - 1]; - if (erm->rti == rti) + if (erm) return erm; } if (!missing_ok) @@ -2665,6 +2715,10 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode, ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); + if (ItemPointerIndicatesMovedPartitions(&hufd.ctid)) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("tuple to be locked was already moved to another partition due to concurrent update"))); /* Should not encounter speculative tuple on recheck */ Assert(!HeapTupleHeaderIsSpeculative(tuple.t_data)); @@ -2685,6 +2739,7 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode, case HeapTupleInvisible: elog(ERROR, "attempted to lock invisible tuple"); + break; default: ReleaseBuffer(buffer); @@ -2733,6 +2788,14 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode, * As above, it should be safe to examine xmax and t_ctid without the * buffer content lock, because they can't be changing. */ + + /* check whether next version would be in a different partition */ + if (HeapTupleHeaderIndicatesMovedPartitions(tuple.t_data)) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("tuple to be locked was already moved to another partition due to concurrent update"))); + + /* check whether tuple has been deleted */ if (ItemPointerEquals(&tuple.t_self, &tuple.t_data->t_ctid)) { /* deleted, so forget about it */ @@ -2997,15 +3060,25 @@ EvalPlanQualBegin(EPQState *epqstate, EState *parentestate) /* * We already have a suitable child EPQ tree, so just reset it. */ - int rtsize = list_length(parentestate->es_range_table); + Index rtsize = parentestate->es_range_table_size; PlanState *planstate = epqstate->planstate; MemSet(estate->es_epqScanDone, 0, rtsize * sizeof(bool)); /* Recopy current values of parent parameters */ - if (parentestate->es_plannedstmt->nParamExec > 0) + if (parentestate->es_plannedstmt->paramExecTypes != NIL) { - int i = parentestate->es_plannedstmt->nParamExec; + int i; + + /* + * Force evaluation of any InitPlan outputs that could be needed + * by the subplan, just in case they got reset since + * EvalPlanQualStart (see comments therein). + */ + ExecSetParamPlanMulti(planstate->plan->extParam, + GetPerTupleExprContext(parentestate)); + + i = list_length(parentestate->es_plannedstmt->paramExecTypes); while (--i >= 0) { @@ -3036,11 +3109,11 @@ static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree) { EState *estate; - int rtsize; + Index rtsize; MemoryContext oldcontext; ListCell *l; - rtsize = list_length(parentestate->es_range_table); + rtsize = parentestate->es_range_table_size; epqstate->estate = estate = CreateExecutorState(); @@ -3064,6 +3137,10 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree) estate->es_snapshot = parentestate->es_snapshot; estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot; estate->es_range_table = parentestate->es_range_table; + estate->es_range_table_array = parentestate->es_range_table_array; + estate->es_range_table_size = parentestate->es_range_table_size; + estate->es_relations = parentestate->es_relations; + estate->es_rowmarks = parentestate->es_rowmarks; estate->es_plannedstmt = parentestate->es_plannedstmt; estate->es_junkFilter = parentestate->es_junkFilter; estate->es_output_cid = parentestate->es_output_cid; @@ -3081,7 +3158,6 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree) } /* es_result_relation_info must NOT be copied */ /* es_trig_target_relations must NOT be copied */ - estate->es_rowMarks = parentestate->es_rowMarks; estate->es_top_eflags = parentestate->es_top_eflags; estate->es_instrument = parentestate->es_instrument; /* es_auxmodifytables must NOT be copied */ @@ -3093,12 +3169,36 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree) * already set from other parts of the parent's plan tree. */ estate->es_param_list_info = parentestate->es_param_list_info; - if (parentestate->es_plannedstmt->nParamExec > 0) + if (parentestate->es_plannedstmt->paramExecTypes != NIL) { - int i = parentestate->es_plannedstmt->nParamExec; + int i; + + /* + * Force evaluation of any InitPlan outputs that could be needed by + * the subplan. (With more complexity, maybe we could postpone this + * till the subplan actually demands them, but it doesn't seem worth + * the trouble; this is a corner case already, since usually the + * InitPlans would have been evaluated before reaching EvalPlanQual.) + * + * This will not touch output params of InitPlans that occur somewhere + * within the subplan tree, only those that are attached to the + * ModifyTable node or above it and are referenced within the subplan. + * That's OK though, because the planner would only attach such + * InitPlans to a lower-level SubqueryScan node, and EPQ execution + * will not descend into a SubqueryScan. + * + * The EState's per-output-tuple econtext is sufficiently short-lived + * for this, since it should get reset before there is any chance of + * doing EvalPlanQual again. + */ + ExecSetParamPlanMulti(planTree->extParam, + GetPerTupleExprContext(parentestate)); + /* now make the internal param workspace ... */ + i = list_length(parentestate->es_plannedstmt->paramExecTypes); estate->es_param_exec_vals = (ParamExecData *) palloc0(i * sizeof(ParamExecData)); + /* ... and copy down all values, whether really needed or not */ while (--i >= 0) { /* copy value if any, but not execPlan link */ @@ -3208,250 +3308,3 @@ EvalPlanQualEnd(EPQState *epqstate) epqstate->planstate = NULL; epqstate->origslot = NULL; } - -/* - * ExecSetupPartitionTupleRouting - set up information needed during - * tuple routing for partitioned tables - * - * Output arguments: - * 'pd' receives an array of PartitionDispatch objects with one entry for - * every partitioned table in the partition tree - * 'partitions' receives an array of ResultRelInfo objects with one entry for - * every leaf partition in the partition tree - * 'tup_conv_maps' receives an array of TupleConversionMap objects with one - * entry for every leaf partition (required to convert input tuple based - * on the root table's rowtype to a leaf partition's rowtype after tuple - * routing is done) - * 'partition_tuple_slot' receives a standalone TupleTableSlot to be used - * to manipulate any given leaf partition's rowtype after that partition - * is chosen by tuple-routing. - * 'num_parted' receives the number of partitioned tables in the partition - * tree (= the number of entries in the 'pd' output array) - * 'num_partitions' receives the number of leaf partitions in the partition - * tree (= the number of entries in the 'partitions' and 'tup_conv_maps' - * output arrays - * - * Note that all the relations in the partition tree are locked using the - * RowExclusiveLock mode upon return from this function. - */ -void -ExecSetupPartitionTupleRouting(Relation rel, - Index resultRTindex, - PartitionDispatch **pd, - ResultRelInfo **partitions, - TupleConversionMap ***tup_conv_maps, - TupleTableSlot **partition_tuple_slot, - int *num_parted, int *num_partitions) -{ - TupleDesc tupDesc = RelationGetDescr(rel); - List *leaf_parts; - ListCell *cell; - int i; - ResultRelInfo *leaf_part_rri; - - /* Get the tuple-routing information and lock partitions */ - *pd = RelationGetPartitionDispatchInfo(rel, RowExclusiveLock, num_parted, - &leaf_parts); - *num_partitions = list_length(leaf_parts); - *partitions = (ResultRelInfo *) palloc(*num_partitions * - sizeof(ResultRelInfo)); - *tup_conv_maps = (TupleConversionMap **) palloc0(*num_partitions * - sizeof(TupleConversionMap *)); - - /* - * Initialize an empty slot that will be used to manipulate tuples of any - * given partition's rowtype. It is attached to the caller-specified node - * (such as ModifyTableState) and released when the node finishes - * processing. - */ - *partition_tuple_slot = MakeTupleTableSlot(); - - leaf_part_rri = *partitions; - i = 0; - foreach(cell, leaf_parts) - { - Relation partrel; - TupleDesc part_tupdesc; - - /* - * We locked all the partitions above including the leaf partitions. - * Note that each of the relations in *partitions are eventually - * closed by the caller. - */ - partrel = heap_open(lfirst_oid(cell), NoLock); - part_tupdesc = RelationGetDescr(partrel); - - /* - * Verify result relation is a valid target for the current operation. - */ - CheckValidResultRel(partrel, CMD_INSERT); - - /* - * Save a tuple conversion map to convert a tuple routed to this - * partition from the parent's type to the partition's. - */ - (*tup_conv_maps)[i] = convert_tuples_by_name(tupDesc, part_tupdesc, - gettext_noop("could not convert row type")); - - InitResultRelInfo(leaf_part_rri, - partrel, - resultRTindex, - rel, - 0); - - /* - * Open partition indices (remember we do not support ON CONFLICT in - * case of partitioned tables, so we do not need support information - * for speculative insertion) - */ - if (leaf_part_rri->ri_RelationDesc->rd_rel->relhasindex && - leaf_part_rri->ri_IndexRelationDescs == NULL) - ExecOpenIndices(leaf_part_rri, false); - - leaf_part_rri++; - i++; - } -} - -/* - * ExecFindPartition -- Find a leaf partition in the partition tree rooted - * at parent, for the heap tuple contained in *slot - * - * estate must be non-NULL; we'll need it to compute any expressions in the - * partition key(s) - * - * If no leaf partition is found, this routine errors out with the appropriate - * error message, else it returns the leaf partition sequence number returned - * by get_partition_for_tuple() unchanged. - */ -int -ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd, - TupleTableSlot *slot, EState *estate) -{ - int result; - PartitionDispatchData *failed_at; - TupleTableSlot *failed_slot; - - /* - * First check the root table's partition constraint, if any. No point in - * routing the tuple if it doesn't belong in the root table itself. - */ - if (resultRelInfo->ri_PartitionCheck) - ExecPartitionCheck(resultRelInfo, slot, estate); - - result = get_partition_for_tuple(pd, slot, estate, - &failed_at, &failed_slot); - if (result < 0) - { - Relation failed_rel; - Datum key_values[PARTITION_MAX_KEYS]; - bool key_isnull[PARTITION_MAX_KEYS]; - char *val_desc; - ExprContext *ecxt = GetPerTupleExprContext(estate); - - failed_rel = failed_at->reldesc; - ecxt->ecxt_scantuple = failed_slot; - FormPartitionKeyDatum(failed_at, failed_slot, estate, - key_values, key_isnull); - val_desc = ExecBuildSlotPartitionKeyDescription(failed_rel, - key_values, - key_isnull, - 64); - Assert(OidIsValid(RelationGetRelid(failed_rel))); - ereport(ERROR, - (errcode(ERRCODE_CHECK_VIOLATION), - errmsg("no partition of relation \"%s\" found for row", - RelationGetRelationName(failed_rel)), - val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0)); - } - - return result; -} - -/* - * BuildSlotPartitionKeyDescription - * - * This works very much like BuildIndexValueDescription() and is currently - * used for building error messages when ExecFindPartition() fails to find - * partition for a row. - */ -static char * -ExecBuildSlotPartitionKeyDescription(Relation rel, - Datum *values, - bool *isnull, - int maxfieldlen) -{ - StringInfoData buf; - PartitionKey key = RelationGetPartitionKey(rel); - int partnatts = get_partition_natts(key); - int i; - Oid relid = RelationGetRelid(rel); - AclResult aclresult; - - if (check_enable_rls(relid, InvalidOid, true) == RLS_ENABLED) - return NULL; - - /* If the user has table-level access, just go build the description. */ - aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT); - if (aclresult != ACLCHECK_OK) - { - /* - * Step through the columns of the partition key and make sure the - * user has SELECT rights on all of them. - */ - for (i = 0; i < partnatts; i++) - { - AttrNumber attnum = get_partition_col_attnum(key, i); - - /* - * If this partition key column is an expression, we return no - * detail rather than try to figure out what column(s) the - * expression includes and if the user has SELECT rights on them. - */ - if (attnum == InvalidAttrNumber || - pg_attribute_aclcheck(relid, attnum, GetUserId(), - ACL_SELECT) != ACLCHECK_OK) - return NULL; - } - } - - initStringInfo(&buf); - appendStringInfo(&buf, "(%s) = (", - pg_get_partkeydef_columns(relid, true)); - - for (i = 0; i < partnatts; i++) - { - char *val; - int vallen; - - if (isnull[i]) - val = "null"; - else - { - Oid foutoid; - bool typisvarlena; - - getTypeOutputInfo(get_partition_col_typid(key, i), - &foutoid, &typisvarlena); - val = OidOutputFunctionCall(foutoid, values[i]); - } - - if (i > 0) - appendStringInfoString(&buf, ", "); - - /* truncate if needed */ - vallen = strlen(val); - if (vallen <= maxfieldlen) - appendStringInfoString(&buf, val); - else - { - vallen = pg_mbcliplen(val, vallen, maxfieldlen); - appendBinaryStringInfo(&buf, val, vallen); - appendStringInfoString(&buf, "..."); - } - } - - appendStringInfoChar(&buf, ')'); - - return buf.data; -} diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index ce47f1d4a8..13ef232d39 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -3,7 +3,7 @@ * execParallel.c * Support routines for parallel execution. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * This file contains routines that are intended to support setting up, @@ -25,19 +25,27 @@ #include "executor/execParallel.h" #include "executor/executor.h" +#include "executor/nodeAppend.h" #include "executor/nodeBitmapHeapscan.h" #include "executor/nodeCustom.h" #include "executor/nodeForeignscan.h" -#include "executor/nodeSeqscan.h" +#include "executor/nodeHash.h" +#include "executor/nodeHashjoin.h" #include "executor/nodeIndexscan.h" #include "executor/nodeIndexonlyscan.h" +#include "executor/nodeSeqscan.h" +#include "executor/nodeSort.h" +#include "executor/nodeSubplan.h" #include "executor/tqueue.h" +#include "jit/jit.h" #include "nodes/nodeFuncs.h" #include "optimizer/planmain.h" #include "optimizer/planner.h" #include "storage/spin.h" #include "tcop/tcopprot.h" +#include "utils/datum.h" #include "utils/dsa.h" +#include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/snapmgr.h" #include "pgstat.h" @@ -47,16 +55,29 @@ * greater than any 32-bit integer here so that values < 2^32 can be used * by individual parallel nodes to store their own state. */ -#define PARALLEL_KEY_PLANNEDSTMT UINT64CONST(0xE000000000000001) -#define PARALLEL_KEY_PARAMS UINT64CONST(0xE000000000000002) -#define PARALLEL_KEY_BUFFER_USAGE UINT64CONST(0xE000000000000003) -#define PARALLEL_KEY_TUPLE_QUEUE UINT64CONST(0xE000000000000004) -#define PARALLEL_KEY_INSTRUMENTATION UINT64CONST(0xE000000000000005) -#define PARALLEL_KEY_DSA UINT64CONST(0xE000000000000006) -#define PARALLEL_KEY_QUERY_TEXT UINT64CONST(0xE000000000000007) +#define PARALLEL_KEY_EXECUTOR_FIXED UINT64CONST(0xE000000000000001) +#define PARALLEL_KEY_PLANNEDSTMT UINT64CONST(0xE000000000000002) +#define PARALLEL_KEY_PARAMLISTINFO UINT64CONST(0xE000000000000003) +#define PARALLEL_KEY_BUFFER_USAGE UINT64CONST(0xE000000000000004) +#define PARALLEL_KEY_TUPLE_QUEUE UINT64CONST(0xE000000000000005) +#define PARALLEL_KEY_INSTRUMENTATION UINT64CONST(0xE000000000000006) +#define PARALLEL_KEY_DSA UINT64CONST(0xE000000000000007) +#define PARALLEL_KEY_QUERY_TEXT UINT64CONST(0xE000000000000008) +#define PARALLEL_KEY_JIT_INSTRUMENTATION UINT64CONST(0xE000000000000009) #define PARALLEL_TUPLE_QUEUE_SIZE 65536 +/* + * Fixed-size random stuff that we need to pass to parallel workers. + */ +typedef struct FixedParallelExecutorState +{ + int64 tuples_needed; /* tuple bound, see ExecSetTupleBound */ + dsa_pointer param_exec; + int eflags; + int jit_flags; +} FixedParallelExecutorState; + /* * DSM structure for accumulating per-PlanState instrumentation. * @@ -109,6 +130,8 @@ static bool ExecParallelInitializeDSM(PlanState *node, ExecParallelInitializeDSMContext *d); static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize); +static bool ExecParallelReInitializeDSM(PlanState *planstate, + ParallelContext *pcxt); static bool ExecParallelRetrieveInstrumentation(PlanState *planstate, SharedExecutorInstrumentation *instrumentation); @@ -150,7 +173,7 @@ ExecSerializePlan(Plan *plan, EState *estate) */ pstmt = makeNode(PlannedStmt); pstmt->commandType = CMD_SELECT; - pstmt->queryId = 0; + pstmt->queryId = UINT64CONST(0); pstmt->hasReturning = false; pstmt->hasModifyingCTE = false; pstmt->canSetTag = true; @@ -160,7 +183,6 @@ ExecSerializePlan(Plan *plan, EState *estate) pstmt->planTree = plan; pstmt->rtable = estate->es_range_table; pstmt->resultRelations = NIL; - pstmt->nonleafResultRelations = NIL; /* * Transfer only parallel-safe subplans, leaving a NULL "hole" in the list @@ -183,7 +205,7 @@ ExecSerializePlan(Plan *plan, EState *estate) pstmt->rowMarks = NIL; pstmt->relationOids = NIL; pstmt->invalItems = NIL; /* workers can't replan anyway... */ - pstmt->nParamExec = estate->es_plannedstmt->nParamExec; + pstmt->paramExecTypes = estate->es_plannedstmt->paramExecTypes; pstmt->utilityStmt = NULL; pstmt->stmt_location = -1; pstmt->stmt_len = -1; @@ -193,10 +215,10 @@ ExecSerializePlan(Plan *plan, EState *estate) } /* - * Ordinary plan nodes won't do anything here, but parallel-aware plan nodes - * may need some state which is shared across all parallel workers. Before - * we size the DSM, give them a chance to call shm_toc_estimate_chunk or - * shm_toc_estimate_keys on &pcxt->estimator. + * Parallel-aware plan nodes (and occasionally others) may need some state + * which is shared across all parallel workers. Before we size the DSM, give + * them a chance to call shm_toc_estimate_chunk or shm_toc_estimate_keys on + * &pcxt->estimator. * * While we're at it, count the number of PlanState nodes in the tree, so * we know how many SharedPlanStateInstrumentation structures we need. @@ -210,43 +232,191 @@ ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e) /* Count this node. */ e->nnodes++; - /* Call estimators for parallel-aware nodes. */ - if (planstate->plan->parallel_aware) + switch (nodeTag(planstate)) { - switch (nodeTag(planstate)) - { - case T_SeqScanState: + case T_SeqScanState: + if (planstate->plan->parallel_aware) ExecSeqScanEstimate((SeqScanState *) planstate, e->pcxt); - break; - case T_IndexScanState: + break; + case T_IndexScanState: + if (planstate->plan->parallel_aware) ExecIndexScanEstimate((IndexScanState *) planstate, e->pcxt); - break; - case T_IndexOnlyScanState: + break; + case T_IndexOnlyScanState: + if (planstate->plan->parallel_aware) ExecIndexOnlyScanEstimate((IndexOnlyScanState *) planstate, e->pcxt); - break; - case T_ForeignScanState: + break; + case T_ForeignScanState: + if (planstate->plan->parallel_aware) ExecForeignScanEstimate((ForeignScanState *) planstate, e->pcxt); - break; - case T_CustomScanState: + break; + case T_AppendState: + if (planstate->plan->parallel_aware) + ExecAppendEstimate((AppendState *) planstate, + e->pcxt); + break; + case T_CustomScanState: + if (planstate->plan->parallel_aware) ExecCustomScanEstimate((CustomScanState *) planstate, e->pcxt); - break; - case T_BitmapHeapScanState: + break; + case T_BitmapHeapScanState: + if (planstate->plan->parallel_aware) ExecBitmapHeapEstimate((BitmapHeapScanState *) planstate, e->pcxt); - break; - default: - break; - } + break; + case T_HashJoinState: + if (planstate->plan->parallel_aware) + ExecHashJoinEstimate((HashJoinState *) planstate, + e->pcxt); + break; + case T_HashState: + /* even when not parallel-aware, for EXPLAIN ANALYZE */ + ExecHashEstimate((HashState *) planstate, e->pcxt); + break; + case T_SortState: + /* even when not parallel-aware, for EXPLAIN ANALYZE */ + ExecSortEstimate((SortState *) planstate, e->pcxt); + break; + + default: + break; } return planstate_tree_walker(planstate, ExecParallelEstimate, e); } +/* + * Estimate the amount of space required to serialize the indicated parameters. + */ +static Size +EstimateParamExecSpace(EState *estate, Bitmapset *params) +{ + int paramid; + Size sz = sizeof(int); + + paramid = -1; + while ((paramid = bms_next_member(params, paramid)) >= 0) + { + Oid typeOid; + int16 typLen; + bool typByVal; + ParamExecData *prm; + + prm = &(estate->es_param_exec_vals[paramid]); + typeOid = list_nth_oid(estate->es_plannedstmt->paramExecTypes, + paramid); + + sz = add_size(sz, sizeof(int)); /* space for paramid */ + + /* space for datum/isnull */ + if (OidIsValid(typeOid)) + get_typlenbyval(typeOid, &typLen, &typByVal); + else + { + /* If no type OID, assume by-value, like copyParamList does. */ + typLen = sizeof(Datum); + typByVal = true; + } + sz = add_size(sz, + datumEstimateSpace(prm->value, prm->isnull, + typByVal, typLen)); + } + return sz; +} + +/* + * Serialize specified PARAM_EXEC parameters. + * + * We write the number of parameters first, as a 4-byte integer, and then + * write details for each parameter in turn. The details for each parameter + * consist of a 4-byte paramid (location of param in execution time internal + * parameter array) and then the datum as serialized by datumSerialize(). + */ +static dsa_pointer +SerializeParamExecParams(EState *estate, Bitmapset *params, dsa_area *area) +{ + Size size; + int nparams; + int paramid; + ParamExecData *prm; + dsa_pointer handle; + char *start_address; + + /* Allocate enough space for the current parameter values. */ + size = EstimateParamExecSpace(estate, params); + handle = dsa_allocate(area, size); + start_address = dsa_get_address(area, handle); + + /* First write the number of parameters as a 4-byte integer. */ + nparams = bms_num_members(params); + memcpy(start_address, &nparams, sizeof(int)); + start_address += sizeof(int); + + /* Write details for each parameter in turn. */ + paramid = -1; + while ((paramid = bms_next_member(params, paramid)) >= 0) + { + Oid typeOid; + int16 typLen; + bool typByVal; + + prm = &(estate->es_param_exec_vals[paramid]); + typeOid = list_nth_oid(estate->es_plannedstmt->paramExecTypes, + paramid); + + /* Write paramid. */ + memcpy(start_address, ¶mid, sizeof(int)); + start_address += sizeof(int); + + /* Write datum/isnull */ + if (OidIsValid(typeOid)) + get_typlenbyval(typeOid, &typLen, &typByVal); + else + { + /* If no type OID, assume by-value, like copyParamList does. */ + typLen = sizeof(Datum); + typByVal = true; + } + datumSerialize(prm->value, prm->isnull, typByVal, typLen, + &start_address); + } + + return handle; +} + +/* + * Restore specified PARAM_EXEC parameters. + */ +static void +RestoreParamExecParams(char *start_address, EState *estate) +{ + int nparams; + int i; + int paramid; + + memcpy(&nparams, start_address, sizeof(int)); + start_address += sizeof(int); + + for (i = 0; i < nparams; i++) + { + ParamExecData *prm; + + /* Read paramid */ + memcpy(¶mid, start_address, sizeof(int)); + start_address += sizeof(int); + prm = &(estate->es_param_exec_vals[paramid]); + + /* Read datum/isnull. */ + prm->value = datumRestore(&start_address, &prm->isnull); + prm->execPlan = NULL; + } +} + /* * Initialize the dynamic shared memory segment that will be used to control * parallel execution. @@ -267,46 +437,67 @@ ExecParallelInitializeDSM(PlanState *planstate, d->nnodes++; /* - * Call initializers for parallel-aware plan nodes. + * Call initializers for DSM-using plan nodes. * - * Ordinary plan nodes won't do anything here, but parallel-aware plan - * nodes may need to initialize shared state in the DSM before parallel - * workers are available. They can allocate the space they previously + * Most plan nodes won't do anything here, but plan nodes that allocated + * DSM may need to initialize shared state in the DSM before parallel + * workers are launched. They can allocate the space they previously * estimated using shm_toc_allocate, and add the keys they previously * estimated using shm_toc_insert, in each case targeting pcxt->toc. */ - if (planstate->plan->parallel_aware) + switch (nodeTag(planstate)) { - switch (nodeTag(planstate)) - { - case T_SeqScanState: + case T_SeqScanState: + if (planstate->plan->parallel_aware) ExecSeqScanInitializeDSM((SeqScanState *) planstate, d->pcxt); - break; - case T_IndexScanState: + break; + case T_IndexScanState: + if (planstate->plan->parallel_aware) ExecIndexScanInitializeDSM((IndexScanState *) planstate, d->pcxt); - break; - case T_IndexOnlyScanState: + break; + case T_IndexOnlyScanState: + if (planstate->plan->parallel_aware) ExecIndexOnlyScanInitializeDSM((IndexOnlyScanState *) planstate, d->pcxt); - break; - case T_ForeignScanState: + break; + case T_ForeignScanState: + if (planstate->plan->parallel_aware) ExecForeignScanInitializeDSM((ForeignScanState *) planstate, d->pcxt); - break; - case T_CustomScanState: + break; + case T_AppendState: + if (planstate->plan->parallel_aware) + ExecAppendInitializeDSM((AppendState *) planstate, + d->pcxt); + break; + case T_CustomScanState: + if (planstate->plan->parallel_aware) ExecCustomScanInitializeDSM((CustomScanState *) planstate, d->pcxt); - break; - case T_BitmapHeapScanState: + break; + case T_BitmapHeapScanState: + if (planstate->plan->parallel_aware) ExecBitmapHeapInitializeDSM((BitmapHeapScanState *) planstate, d->pcxt); - break; + break; + case T_HashJoinState: + if (planstate->plan->parallel_aware) + ExecHashJoinInitializeDSM((HashJoinState *) planstate, + d->pcxt); + break; + case T_HashState: + /* even when not parallel-aware, for EXPLAIN ANALYZE */ + ExecHashInitializeDSM((HashState *) planstate, d->pcxt); + break; + case T_SortState: + /* even when not parallel-aware, for EXPLAIN ANALYZE */ + ExecSortInitializeDSM((SortState *) planstate, d->pcxt); + break; - default: - break; - } + default: + break; } return planstate_tree_walker(planstate, ExecParallelInitializeDSM, d); @@ -364,42 +555,48 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize) return responseq; } -/* - * Re-initialize the parallel executor info such that it can be reused by - * workers. - */ -void -ExecParallelReinitialize(ParallelExecutorInfo *pei) -{ - ReinitializeParallelDSM(pei->pcxt); - pei->tqueue = ExecParallelSetupTupleQueues(pei->pcxt, true); - pei->finished = false; -} - /* * Sets up the required infrastructure for backend workers to perform * execution and return results to the main backend. */ ParallelExecutorInfo * -ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) +ExecInitParallelPlan(PlanState *planstate, EState *estate, + Bitmapset *sendParams, int nworkers, + int64 tuples_needed) { ParallelExecutorInfo *pei; ParallelContext *pcxt; ExecParallelEstimateContext e; ExecParallelInitializeDSMContext d; + FixedParallelExecutorState *fpes; char *pstmt_data; char *pstmt_space; - char *param_space; + char *paramlistinfo_space; BufferUsage *bufusage_space; SharedExecutorInstrumentation *instrumentation = NULL; + SharedJitInstrumentation *jit_instrumentation = NULL; int pstmt_len; - int param_len; + int paramlistinfo_len; int instrumentation_len = 0; + int jit_instrumentation_len = 0; int instrument_offset = 0; Size dsa_minsize = dsa_minimum_size(); char *query_string; int query_len; + /* + * Force any initplan outputs that we're going to pass to workers to be + * evaluated, if they weren't already. + * + * For simplicity, we use the EState's per-output-tuple ExprContext here. + * That risks intra-query memory leakage, since we might pass through here + * many times before that ExprContext gets reset; but ExecSetParamPlan + * doesn't normally leak any memory in the context (see its comments), so + * it doesn't seem worth complicating this function's API to pass it a + * shorter-lived ExprContext. This might need to change someday. + */ + ExecSetParamPlanMulti(sendParams, GetPerTupleExprContext(estate)); + /* Allocate object for return value. */ pei = palloc0(sizeof(ParallelExecutorInfo)); pei->finished = false; @@ -409,7 +606,7 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) pstmt_data = ExecSerializePlan(planstate->plan, estate); /* Create a parallel context. */ - pcxt = CreateParallelContext("postgres", "ParallelQueryMain", nworkers); + pcxt = CreateParallelContext("postgres", "ParallelQueryMain", nworkers, false); pei->pcxt = pcxt; /* @@ -418,9 +615,14 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) * for the various things we need to store. */ + /* Estimate space for fixed-size state. */ + shm_toc_estimate_chunk(&pcxt->estimator, + sizeof(FixedParallelExecutorState)); + shm_toc_estimate_keys(&pcxt->estimator, 1); + /* Estimate space for query text. */ query_len = strlen(estate->es_sourceText); - shm_toc_estimate_chunk(&pcxt->estimator, query_len); + shm_toc_estimate_chunk(&pcxt->estimator, query_len + 1); shm_toc_estimate_keys(&pcxt->estimator, 1); /* Estimate space for serialized PlannedStmt. */ @@ -429,8 +631,8 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) shm_toc_estimate_keys(&pcxt->estimator, 1); /* Estimate space for serialized ParamListInfo. */ - param_len = EstimateParamListSpace(estate->es_param_list_info); - shm_toc_estimate_chunk(&pcxt->estimator, param_len); + paramlistinfo_len = EstimateParamListSpace(estate->es_param_list_info); + shm_toc_estimate_chunk(&pcxt->estimator, paramlistinfo_len); shm_toc_estimate_keys(&pcxt->estimator, 1); /* @@ -470,6 +672,16 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) mul_size(e.nnodes, nworkers)); shm_toc_estimate_chunk(&pcxt->estimator, instrumentation_len); shm_toc_estimate_keys(&pcxt->estimator, 1); + + /* Estimate space for JIT instrumentation, if required. */ + if (estate->es_jit_flags != PGJIT_NONE) + { + jit_instrumentation_len = + offsetof(SharedJitInstrumentation, jit_instr) + + sizeof(JitInstrumentation) * nworkers; + shm_toc_estimate_chunk(&pcxt->estimator, jit_instrumentation_len); + shm_toc_estimate_keys(&pcxt->estimator, 1); + } } /* Estimate space for DSA area. */ @@ -487,9 +699,17 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) * asked for has been allocated or initialized yet, though, so do that. */ + /* Store fixed-size state. */ + fpes = shm_toc_allocate(pcxt->toc, sizeof(FixedParallelExecutorState)); + fpes->tuples_needed = tuples_needed; + fpes->param_exec = InvalidDsaPointer; + fpes->eflags = estate->es_top_eflags; + fpes->jit_flags = estate->es_jit_flags; + shm_toc_insert(pcxt->toc, PARALLEL_KEY_EXECUTOR_FIXED, fpes); + /* Store query string */ - query_string = shm_toc_allocate(pcxt->toc, query_len); - memcpy(query_string, estate->es_sourceText, query_len); + query_string = shm_toc_allocate(pcxt->toc, query_len + 1); + memcpy(query_string, estate->es_sourceText, query_len + 1); shm_toc_insert(pcxt->toc, PARALLEL_KEY_QUERY_TEXT, query_string); /* Store serialized PlannedStmt. */ @@ -498,9 +718,9 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) shm_toc_insert(pcxt->toc, PARALLEL_KEY_PLANNEDSTMT, pstmt_space); /* Store serialized ParamListInfo. */ - param_space = shm_toc_allocate(pcxt->toc, param_len); - shm_toc_insert(pcxt->toc, PARALLEL_KEY_PARAMS, param_space); - SerializeParamList(estate->es_param_list_info, ¶m_space); + paramlistinfo_space = shm_toc_allocate(pcxt->toc, paramlistinfo_len); + shm_toc_insert(pcxt->toc, PARALLEL_KEY_PARAMLISTINFO, paramlistinfo_space); + SerializeParamList(estate->es_param_list_info, ¶mlistinfo_space); /* Allocate space for each worker's BufferUsage; no need to initialize. */ bufusage_space = shm_toc_allocate(pcxt->toc, @@ -508,9 +728,12 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space); pei->buffer_usage = bufusage_space; - /* Set up tuple queues. */ + /* Set up the tuple queues that the workers will write into. */ pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false); + /* We don't need the TupleQueueReaders yet, though. */ + pei->reader = NULL; + /* * If instrumentation options were supplied, allocate space for the data. * It only gets partially initialized here; the rest happens during @@ -532,6 +755,18 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) shm_toc_insert(pcxt->toc, PARALLEL_KEY_INSTRUMENTATION, instrumentation); pei->instrumentation = instrumentation; + + if (estate->es_jit_flags != PGJIT_NONE) + { + jit_instrumentation = shm_toc_allocate(pcxt->toc, + jit_instrumentation_len); + jit_instrumentation->num_workers = nworkers; + memset(jit_instrumentation->jit_instr, 0, + sizeof(JitInstrumentation) * nworkers); + shm_toc_insert(pcxt->toc, PARALLEL_KEY_JIT_INSTRUMENTATION, + jit_instrumentation); + pei->jit_instrumentation = jit_instrumentation; + } } /* @@ -548,13 +783,20 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) pei->area = dsa_create_in_place(area_space, dsa_minsize, LWTRANCHE_PARALLEL_QUERY_DSA, pcxt->seg); - } - /* - * Make the area available to executor nodes running in the leader. See - * also ParallelQueryMain which makes it available to workers. - */ - estate->es_query_dsa = pei->area; + /* + * Serialize parameters, if any, using DSA storage. We don't dare use + * the main parallel query DSM for this because we might relaunch + * workers after the values have changed (and thus the amount of + * storage required has changed). + */ + if (!bms_is_empty(sendParams)) + { + pei->param_exec = SerializeParamExecParams(estate, sendParams, + pei->area); + fpes->param_exec = pei->param_exec; + } + } /* * Give parallel-aware nodes a chance to initialize their shared data. @@ -564,10 +806,14 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) d.pcxt = pcxt; d.instrumentation = instrumentation; d.nnodes = 0; + + /* Install our DSA area while initializing the plan. */ + estate->es_query_dsa = pei->area; ExecParallelInitializeDSM(planstate, &d); + estate->es_query_dsa = NULL; /* - * Make sure that the world hasn't shifted under our feat. This could + * Make sure that the world hasn't shifted under our feet. This could * probably just be an Assert(), but let's be conservative for now. */ if (e.nnodes != d.nnodes) @@ -577,6 +823,150 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) return pei; } +/* + * Set up tuple queue readers to read the results of a parallel subplan. + * + * This is separate from ExecInitParallelPlan() because we can launch the + * worker processes and let them start doing something before we do this. + */ +void +ExecParallelCreateReaders(ParallelExecutorInfo *pei) +{ + int nworkers = pei->pcxt->nworkers_launched; + int i; + + Assert(pei->reader == NULL); + + if (nworkers > 0) + { + pei->reader = (TupleQueueReader **) + palloc(nworkers * sizeof(TupleQueueReader *)); + + for (i = 0; i < nworkers; i++) + { + shm_mq_set_handle(pei->tqueue[i], + pei->pcxt->worker[i].bgwhandle); + pei->reader[i] = CreateTupleQueueReader(pei->tqueue[i]); + } + } +} + +/* + * Re-initialize the parallel executor shared memory state before launching + * a fresh batch of workers. + */ +void +ExecParallelReinitialize(PlanState *planstate, + ParallelExecutorInfo *pei, + Bitmapset *sendParams) +{ + EState *estate = planstate->state; + FixedParallelExecutorState *fpes; + + /* Old workers must already be shut down */ + Assert(pei->finished); + + /* + * Force any initplan outputs that we're going to pass to workers to be + * evaluated, if they weren't already (see comments in + * ExecInitParallelPlan). + */ + ExecSetParamPlanMulti(sendParams, GetPerTupleExprContext(estate)); + + ReinitializeParallelDSM(pei->pcxt); + pei->tqueue = ExecParallelSetupTupleQueues(pei->pcxt, true); + pei->reader = NULL; + pei->finished = false; + + fpes = shm_toc_lookup(pei->pcxt->toc, PARALLEL_KEY_EXECUTOR_FIXED, false); + + /* Free any serialized parameters from the last round. */ + if (DsaPointerIsValid(fpes->param_exec)) + { + dsa_free(pei->area, fpes->param_exec); + fpes->param_exec = InvalidDsaPointer; + } + + /* Serialize current parameter values if required. */ + if (!bms_is_empty(sendParams)) + { + pei->param_exec = SerializeParamExecParams(estate, sendParams, + pei->area); + fpes->param_exec = pei->param_exec; + } + + /* Traverse plan tree and let each child node reset associated state. */ + estate->es_query_dsa = pei->area; + ExecParallelReInitializeDSM(planstate, pei->pcxt); + estate->es_query_dsa = NULL; +} + +/* + * Traverse plan tree to reinitialize per-node dynamic shared memory state + */ +static bool +ExecParallelReInitializeDSM(PlanState *planstate, + ParallelContext *pcxt) +{ + if (planstate == NULL) + return false; + + /* + * Call reinitializers for DSM-using plan nodes. + */ + switch (nodeTag(planstate)) + { + case T_SeqScanState: + if (planstate->plan->parallel_aware) + ExecSeqScanReInitializeDSM((SeqScanState *) planstate, + pcxt); + break; + case T_IndexScanState: + if (planstate->plan->parallel_aware) + ExecIndexScanReInitializeDSM((IndexScanState *) planstate, + pcxt); + break; + case T_IndexOnlyScanState: + if (planstate->plan->parallel_aware) + ExecIndexOnlyScanReInitializeDSM((IndexOnlyScanState *) planstate, + pcxt); + break; + case T_ForeignScanState: + if (planstate->plan->parallel_aware) + ExecForeignScanReInitializeDSM((ForeignScanState *) planstate, + pcxt); + break; + case T_AppendState: + if (planstate->plan->parallel_aware) + ExecAppendReInitializeDSM((AppendState *) planstate, pcxt); + break; + case T_CustomScanState: + if (planstate->plan->parallel_aware) + ExecCustomScanReInitializeDSM((CustomScanState *) planstate, + pcxt); + break; + case T_BitmapHeapScanState: + if (planstate->plan->parallel_aware) + ExecBitmapHeapReInitializeDSM((BitmapHeapScanState *) planstate, + pcxt); + break; + case T_HashJoinState: + if (planstate->plan->parallel_aware) + ExecHashJoinReInitializeDSM((HashJoinState *) planstate, + pcxt); + break; + case T_HashState: + case T_SortState: + /* these nodes have DSM state, but no reinitialization is required */ + break; + + default: + break; + } + + return planstate_tree_walker(planstate, ExecParallelReInitializeDSM, pcxt); +} + /* * Copy instrumentation information about this node and its descendants from * dynamic shared memory. @@ -621,46 +1011,138 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate, planstate->worker_instrument->num_workers = instrumentation->num_workers; memcpy(&planstate->worker_instrument->instrument, instrument, ibytes); + /* Perform any node-type-specific work that needs to be done. */ + switch (nodeTag(planstate)) + { + case T_SortState: + ExecSortRetrieveInstrumentation((SortState *) planstate); + break; + case T_HashState: + ExecHashRetrieveInstrumentation((HashState *) planstate); + break; + default: + break; + } + return planstate_tree_walker(planstate, ExecParallelRetrieveInstrumentation, instrumentation); } +/* + * Add up the workers' JIT instrumentation from dynamic shared memory. + */ +static void +ExecParallelRetrieveJitInstrumentation(PlanState *planstate, + SharedJitInstrumentation *shared_jit) +{ + JitInstrumentation *combined; + int ibytes; + + int n; + + /* + * Accumulate worker JIT instrumentation into the combined JIT + * instrumentation, allocating it if required. + */ + if (!planstate->state->es_jit_worker_instr) + planstate->state->es_jit_worker_instr = + MemoryContextAllocZero(planstate->state->es_query_cxt, sizeof(JitInstrumentation)); + combined = planstate->state->es_jit_worker_instr; + + /* Accummulate all the workers' instrumentations. */ + for (n = 0; n < shared_jit->num_workers; ++n) + InstrJitAgg(combined, &shared_jit->jit_instr[n]); + + /* + * Store the per-worker detail. + * + * Similar to ExecParallelRetrieveInstrumentation(), allocate the + * instrumentation in per-query context. + */ + ibytes = offsetof(SharedJitInstrumentation, jit_instr) + + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation)); + planstate->worker_jit_instrument = + MemoryContextAlloc(planstate->state->es_query_cxt, ibytes); + + memcpy(planstate->worker_jit_instrument, shared_jit, ibytes); +} + /* * Finish parallel execution. We wait for parallel workers to finish, and - * accumulate their buffer usage and instrumentation. + * accumulate their buffer usage. */ void ExecParallelFinish(ParallelExecutorInfo *pei) { + int nworkers = pei->pcxt->nworkers_launched; int i; + /* Make this be a no-op if called twice in a row. */ if (pei->finished) return; - /* First, wait for the workers to finish. */ + /* + * Detach from tuple queues ASAP, so that any still-active workers will + * notice that no further results are wanted. + */ + if (pei->tqueue != NULL) + { + for (i = 0; i < nworkers; i++) + shm_mq_detach(pei->tqueue[i]); + pfree(pei->tqueue); + pei->tqueue = NULL; + } + + /* + * While we're waiting for the workers to finish, let's get rid of the + * tuple queue readers. (Any other local cleanup could be done here too.) + */ + if (pei->reader != NULL) + { + for (i = 0; i < nworkers; i++) + DestroyTupleQueueReader(pei->reader[i]); + pfree(pei->reader); + pei->reader = NULL; + } + + /* Now wait for the workers to finish. */ WaitForParallelWorkersToFinish(pei->pcxt); - /* Next, accumulate buffer usage. */ - for (i = 0; i < pei->pcxt->nworkers_launched; ++i) + /* + * Next, accumulate buffer usage. (This must wait for the workers to + * finish, or we might get incomplete data.) + */ + for (i = 0; i < nworkers; i++) InstrAccumParallelQuery(&pei->buffer_usage[i]); - /* Finally, accumulate instrumentation, if any. */ - if (pei->instrumentation) - ExecParallelRetrieveInstrumentation(pei->planstate, - pei->instrumentation); - pei->finished = true; } /* - * Clean up whatever ParallelExecutorInfo resources still exist after - * ExecParallelFinish. We separate these routines because someone might - * want to examine the contents of the DSM after ExecParallelFinish and - * before calling this routine. + * Accumulate instrumentation, and then clean up whatever ParallelExecutorInfo + * resources still exist after ExecParallelFinish. We separate these + * routines because someone might want to examine the contents of the DSM + * after ExecParallelFinish and before calling this routine. */ void ExecParallelCleanup(ParallelExecutorInfo *pei) { + /* Accumulate instrumentation, if any. */ + if (pei->instrumentation) + ExecParallelRetrieveInstrumentation(pei->planstate, + pei->instrumentation); + + /* Accumulate JIT instrumentation, if any. */ + if (pei->jit_instrumentation) + ExecParallelRetrieveJitInstrumentation(pei->planstate, + pei->jit_instrumentation); + + /* Free any serialized parameters. */ + if (DsaPointerIsValid(pei->param_exec)) + { + dsa_free(pei->area, pei->param_exec); + pei->param_exec = InvalidDsaPointer; + } if (pei->area != NULL) { dsa_detach(pei->area); @@ -712,17 +1194,10 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver, pstmt = (PlannedStmt *) stringToNode(pstmtspace); /* Reconstruct ParamListInfo. */ - paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMS, false); + paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMLISTINFO, false); paramLI = RestoreParamList(¶mspace); - /* - * Create a QueryDesc for the query. - * - * It's not obvious how to obtain the query string from here; and even if - * we could copying it would take more cycles than not copying it. But - * it's a bit unsatisfying to just use a dummy string here, so consider - * revising this someday. - */ + /* Create a QueryDesc for the query. */ return CreateQueryDesc(pstmt, queryString, GetActiveSnapshot(), InvalidSnapshot, @@ -775,43 +1250,66 @@ ExecParallelReportInstrumentation(PlanState *planstate, * is allocated and initialized by executor; that is, after ExecutorStart(). */ static bool -ExecParallelInitializeWorker(PlanState *planstate, shm_toc *toc) +ExecParallelInitializeWorker(PlanState *planstate, ParallelWorkerContext *pwcxt) { if (planstate == NULL) return false; - /* Call initializers for parallel-aware plan nodes. */ - if (planstate->plan->parallel_aware) + switch (nodeTag(planstate)) { - switch (nodeTag(planstate)) - { - case T_SeqScanState: - ExecSeqScanInitializeWorker((SeqScanState *) planstate, toc); - break; - case T_IndexScanState: - ExecIndexScanInitializeWorker((IndexScanState *) planstate, toc); - break; - case T_IndexOnlyScanState: - ExecIndexOnlyScanInitializeWorker((IndexOnlyScanState *) planstate, toc); - break; - case T_ForeignScanState: + case T_SeqScanState: + if (planstate->plan->parallel_aware) + ExecSeqScanInitializeWorker((SeqScanState *) planstate, pwcxt); + break; + case T_IndexScanState: + if (planstate->plan->parallel_aware) + ExecIndexScanInitializeWorker((IndexScanState *) planstate, + pwcxt); + break; + case T_IndexOnlyScanState: + if (planstate->plan->parallel_aware) + ExecIndexOnlyScanInitializeWorker((IndexOnlyScanState *) planstate, + pwcxt); + break; + case T_ForeignScanState: + if (planstate->plan->parallel_aware) ExecForeignScanInitializeWorker((ForeignScanState *) planstate, - toc); - break; - case T_CustomScanState: + pwcxt); + break; + case T_AppendState: + if (planstate->plan->parallel_aware) + ExecAppendInitializeWorker((AppendState *) planstate, pwcxt); + break; + case T_CustomScanState: + if (planstate->plan->parallel_aware) ExecCustomScanInitializeWorker((CustomScanState *) planstate, - toc); - break; - case T_BitmapHeapScanState: - ExecBitmapHeapInitializeWorker( - (BitmapHeapScanState *) planstate, toc); - break; - default: - break; - } + pwcxt); + break; + case T_BitmapHeapScanState: + if (planstate->plan->parallel_aware) + ExecBitmapHeapInitializeWorker((BitmapHeapScanState *) planstate, + pwcxt); + break; + case T_HashJoinState: + if (planstate->plan->parallel_aware) + ExecHashJoinInitializeWorker((HashJoinState *) planstate, + pwcxt); + break; + case T_HashState: + /* even when not parallel-aware, for EXPLAIN ANALYZE */ + ExecHashInitializeWorker((HashState *) planstate, pwcxt); + break; + case T_SortState: + /* even when not parallel-aware, for EXPLAIN ANALYZE */ + ExecSortInitializeWorker((SortState *) planstate, pwcxt); + break; + + default: + break; } - return planstate_tree_walker(planstate, ExecParallelInitializeWorker, toc); + return planstate_tree_walker(planstate, ExecParallelInitializeWorker, + pwcxt); } /* @@ -833,19 +1331,27 @@ ExecParallelInitializeWorker(PlanState *planstate, shm_toc *toc) void ParallelQueryMain(dsm_segment *seg, shm_toc *toc) { + FixedParallelExecutorState *fpes; BufferUsage *buffer_usage; DestReceiver *receiver; QueryDesc *queryDesc; SharedExecutorInstrumentation *instrumentation; + SharedJitInstrumentation *jit_instrumentation; int instrument_options = 0; void *area_space; dsa_area *area; + ParallelWorkerContext pwcxt; + + /* Get fixed-size state. */ + fpes = shm_toc_lookup(toc, PARALLEL_KEY_EXECUTOR_FIXED, false); /* Set up DestReceiver, SharedExecutorInstrumentation, and QueryDesc. */ receiver = ExecParallelGetReceiver(seg, toc); instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_INSTRUMENTATION, true); if (instrumentation != NULL) instrument_options = instrumentation->instrument_options; + jit_instrumentation = shm_toc_lookup(toc, PARALLEL_KEY_JIT_INSTRUMENTATION, + true); queryDesc = ExecParallelGetQueryDesc(toc, receiver, instrument_options); /* Setting debug_query_string for individual workers */ @@ -854,22 +1360,48 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc) /* Report workers' query for monitoring purposes */ pgstat_report_activity(STATE_RUNNING, debug_query_string); - /* Prepare to track buffer usage during query execution. */ - InstrStartParallelQuery(); - /* Attach to the dynamic shared memory area. */ area_space = shm_toc_lookup(toc, PARALLEL_KEY_DSA, false); area = dsa_attach_in_place(area_space, seg); /* Start up the executor */ - ExecutorStart(queryDesc, 0); + queryDesc->plannedstmt->jitFlags = fpes->jit_flags; + ExecutorStart(queryDesc, fpes->eflags); /* Special executor initialization steps for parallel workers */ queryDesc->planstate->state->es_query_dsa = area; - ExecParallelInitializeWorker(queryDesc->planstate, toc); + if (DsaPointerIsValid(fpes->param_exec)) + { + char *paramexec_space; + + paramexec_space = dsa_get_address(area, fpes->param_exec); + RestoreParamExecParams(paramexec_space, queryDesc->estate); + + } + pwcxt.toc = toc; + pwcxt.seg = seg; + ExecParallelInitializeWorker(queryDesc->planstate, &pwcxt); - /* Run the plan */ - ExecutorRun(queryDesc, ForwardScanDirection, 0L, true); + /* Pass down any tuple bound */ + ExecSetTupleBound(fpes->tuples_needed, queryDesc->planstate); + + /* + * Prepare to track buffer usage during query execution. + * + * We do this after starting up the executor to match what happens in the + * leader, which also doesn't count buffer accesses that occur during + * executor startup. + */ + InstrStartParallelQuery(); + + /* + * Run the plan. If we specified a tuple bound, be careful not to demand + * more tuples than that. + */ + ExecutorRun(queryDesc, + ForwardScanDirection, + fpes->tuples_needed < 0 ? (int64) 0 : fpes->tuples_needed, + true); /* Shut down the executor */ ExecutorFinish(queryDesc); @@ -883,11 +1415,19 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc) ExecParallelReportInstrumentation(queryDesc->planstate, instrumentation); + /* Report JIT instrumentation data if any */ + if (queryDesc->estate->es_jit && jit_instrumentation != NULL) + { + Assert(ParallelWorkerNumber < jit_instrumentation->num_workers); + jit_instrumentation->jit_instr[ParallelWorkerNumber] = + queryDesc->estate->es_jit->instr; + } + /* Must do this after capturing instrumentation. */ ExecutorEnd(queryDesc); /* Cleanup. */ dsa_detach(area); FreeQueryDesc(queryDesc); - (*receiver->rDestroy) (receiver); + receiver->rDestroy(receiver); } diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c new file mode 100644 index 0000000000..1e72e9fb3f --- /dev/null +++ b/src/backend/executor/execPartition.c @@ -0,0 +1,1874 @@ +/*------------------------------------------------------------------------- + * + * execPartition.c + * Support routines for partitioning. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/backend/executor/execPartition.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "catalog/partition.h" +#include "catalog/pg_inherits.h" +#include "catalog/pg_type.h" +#include "executor/execPartition.h" +#include "executor/executor.h" +#include "foreign/fdwapi.h" +#include "mb/pg_wchar.h" +#include "miscadmin.h" +#include "nodes/makefuncs.h" +#include "partitioning/partbounds.h" +#include "partitioning/partprune.h" +#include "rewrite/rewriteManip.h" +#include "utils/lsyscache.h" +#include "utils/partcache.h" +#include "utils/rel.h" +#include "utils/rls.h" +#include "utils/ruleutils.h" + + +/*----------------------- + * PartitionDispatch - information about one partitioned table in a partition + * hierarchy required to route a tuple to one of its partitions + * + * reldesc Relation descriptor of the table + * key Partition key information of the table + * keystate Execution state required for expressions in the partition key + * partdesc Partition descriptor of the table + * tupslot A standalone TupleTableSlot initialized with this table's tuple + * descriptor + * tupmap TupleConversionMap to convert from the parent's rowtype to + * this table's rowtype (when extracting the partition key of a + * tuple just before routing it through this table) + * indexes Array with partdesc->nparts members (for details on what + * individual members represent, see how they are set in + * get_partition_dispatch_recurse()) + *----------------------- + */ +typedef struct PartitionDispatchData +{ + Relation reldesc; + PartitionKey key; + List *keystate; /* list of ExprState */ + PartitionDesc partdesc; + TupleTableSlot *tupslot; + AttrNumber *tupmap; + int *indexes; +} PartitionDispatchData; + + +static PartitionDispatch *RelationGetPartitionDispatchInfo(Relation rel, + int *num_parted, List **leaf_part_oids); +static void get_partition_dispatch_recurse(Relation rel, Relation parent, + List **pds, List **leaf_part_oids); +static void FormPartitionKeyDatum(PartitionDispatch pd, + TupleTableSlot *slot, + EState *estate, + Datum *values, + bool *isnull); +static int get_partition_for_tuple(PartitionDispatch pd, Datum *values, + bool *isnull); +static char *ExecBuildSlotPartitionKeyDescription(Relation rel, + Datum *values, + bool *isnull, + int maxfieldlen); +static List *adjust_partition_tlist(List *tlist, TupleConversionMap *map); +static void find_matching_subplans_recurse(PartitionPruningData *prunedata, + PartitionedRelPruningData *pprune, + bool initial_prune, + Bitmapset **validsubplans); + + +/* + * ExecSetupPartitionTupleRouting - sets up information needed during + * tuple routing for partitioned tables, encapsulates it in + * PartitionTupleRouting, and returns it. + * + * Note that all the relations in the partition tree are locked using the + * RowExclusiveLock mode upon return from this function. + * + * While we allocate the arrays of pointers of ResultRelInfo and + * TupleConversionMap for all partitions here, actual objects themselves are + * lazily allocated for a given partition if a tuple is actually routed to it; + * see ExecInitPartitionInfo. However, if the function is invoked for update + * tuple routing, caller would already have initialized ResultRelInfo's for + * some of the partitions, which are reused and assigned to their respective + * slot in the aforementioned array. For such partitions, we delay setting + * up objects such as TupleConversionMap until those are actually chosen as + * the partitions to route tuples to. See ExecPrepareTupleRouting. + */ +PartitionTupleRouting * +ExecSetupPartitionTupleRouting(ModifyTableState *mtstate, Relation rel) +{ + List *leaf_parts; + ListCell *cell; + int i; + ResultRelInfo *update_rri = NULL; + int num_update_rri = 0, + update_rri_index = 0; + PartitionTupleRouting *proute; + int nparts; + ModifyTable *node = mtstate ? (ModifyTable *) mtstate->ps.plan : NULL; + + /* + * Get the information about the partition tree after locking all the + * partitions. + */ + (void) find_all_inheritors(RelationGetRelid(rel), RowExclusiveLock, NULL); + proute = (PartitionTupleRouting *) palloc0(sizeof(PartitionTupleRouting)); + proute->partition_dispatch_info = + RelationGetPartitionDispatchInfo(rel, &proute->num_dispatch, + &leaf_parts); + proute->num_partitions = nparts = list_length(leaf_parts); + proute->partitions = + (ResultRelInfo **) palloc(nparts * sizeof(ResultRelInfo *)); + proute->parent_child_tupconv_maps = + (TupleConversionMap **) palloc0(nparts * sizeof(TupleConversionMap *)); + proute->partition_oids = (Oid *) palloc(nparts * sizeof(Oid)); + + /* Set up details specific to the type of tuple routing we are doing. */ + if (node && node->operation == CMD_UPDATE) + { + update_rri = mtstate->resultRelInfo; + num_update_rri = list_length(node->plans); + proute->subplan_partition_offsets = + palloc(num_update_rri * sizeof(int)); + proute->num_subplan_partition_offsets = num_update_rri; + + /* + * We need an additional tuple slot for storing transient tuples that + * are converted to the root table descriptor. + */ + proute->root_tuple_slot = MakeTupleTableSlot(RelationGetDescr(rel)); + } + + i = 0; + foreach(cell, leaf_parts) + { + ResultRelInfo *leaf_part_rri = NULL; + Oid leaf_oid = lfirst_oid(cell); + + proute->partition_oids[i] = leaf_oid; + + /* + * If the leaf partition is already present in the per-subplan result + * rels, we re-use that rather than initialize a new result rel. The + * per-subplan resultrels and the resultrels of the leaf partitions + * are both in the same canonical order. So while going through the + * leaf partition oids, we need to keep track of the next per-subplan + * result rel to be looked for in the leaf partition resultrels. + */ + if (update_rri_index < num_update_rri && + RelationGetRelid(update_rri[update_rri_index].ri_RelationDesc) == leaf_oid) + { + leaf_part_rri = &update_rri[update_rri_index]; + + /* + * This is required in order to convert the partition's tuple to + * be compatible with the root partitioned table's tuple + * descriptor. When generating the per-subplan result rels, this + * was not set. + */ + leaf_part_rri->ri_PartitionRoot = rel; + + /* Remember the subplan offset for this ResultRelInfo */ + proute->subplan_partition_offsets[update_rri_index] = i; + + update_rri_index++; + } + + proute->partitions[i] = leaf_part_rri; + i++; + } + + /* + * For UPDATE, we should have found all the per-subplan resultrels in the + * leaf partitions. (If this is an INSERT, both values will be zero.) + */ + Assert(update_rri_index == num_update_rri); + + return proute; +} + +/* + * ExecFindPartition -- Find a leaf partition in the partition tree rooted + * at parent, for the heap tuple contained in *slot + * + * estate must be non-NULL; we'll need it to compute any expressions in the + * partition key(s) + * + * If no leaf partition is found, this routine errors out with the appropriate + * error message, else it returns the leaf partition sequence number + * as an index into the array of (ResultRelInfos of) all leaf partitions in + * the partition tree. + */ +int +ExecFindPartition(ResultRelInfo *resultRelInfo, PartitionDispatch *pd, + TupleTableSlot *slot, EState *estate) +{ + int result; + Datum values[PARTITION_MAX_KEYS]; + bool isnull[PARTITION_MAX_KEYS]; + Relation rel; + PartitionDispatch dispatch; + ExprContext *ecxt = GetPerTupleExprContext(estate); + TupleTableSlot *ecxt_scantuple_old = ecxt->ecxt_scantuple; + TupleTableSlot *myslot = NULL; + MemoryContext oldcxt; + + /* use per-tuple context here to avoid leaking memory */ + oldcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + + /* + * First check the root table's partition constraint, if any. No point in + * routing the tuple if it doesn't belong in the root table itself. + */ + if (resultRelInfo->ri_PartitionCheck) + ExecPartitionCheck(resultRelInfo, slot, estate, true); + + /* start with the root partitioned table */ + dispatch = pd[0]; + while (true) + { + AttrNumber *map = dispatch->tupmap; + int cur_index = -1; + + rel = dispatch->reldesc; + + /* + * Convert the tuple to this parent's layout, if different from the + * current relation. + */ + myslot = dispatch->tupslot; + if (myslot != NULL && map != NULL) + slot = execute_attr_map_slot(map, slot, myslot); + + /* + * Extract partition key from tuple. Expression evaluation machinery + * that FormPartitionKeyDatum() invokes expects ecxt_scantuple to + * point to the correct tuple slot. The slot might have changed from + * what was used for the parent table if the table of the current + * partitioning level has different tuple descriptor from the parent. + * So update ecxt_scantuple accordingly. + */ + ecxt->ecxt_scantuple = slot; + FormPartitionKeyDatum(dispatch, slot, estate, values, isnull); + + /* + * Nothing for get_partition_for_tuple() to do if there are no + * partitions to begin with. + */ + if (dispatch->partdesc->nparts == 0) + { + result = -1; + break; + } + + cur_index = get_partition_for_tuple(dispatch, values, isnull); + + /* + * cur_index < 0 means we failed to find a partition of this parent. + * cur_index >= 0 means we either found the leaf partition, or the + * next parent to find a partition of. + */ + if (cur_index < 0) + { + result = -1; + break; + } + else if (dispatch->indexes[cur_index] >= 0) + { + result = dispatch->indexes[cur_index]; + /* success! */ + break; + } + else + { + /* move down one level */ + dispatch = pd[-dispatch->indexes[cur_index]]; + } + } + + /* Release the tuple in the lowest parent's dedicated slot. */ + if (slot == myslot) + ExecClearTuple(myslot); + + /* A partition was not found. */ + if (result < 0) + { + char *val_desc; + + val_desc = ExecBuildSlotPartitionKeyDescription(rel, + values, isnull, 64); + Assert(OidIsValid(RelationGetRelid(rel))); + ereport(ERROR, + (errcode(ERRCODE_CHECK_VIOLATION), + errmsg("no partition of relation \"%s\" found for row", + RelationGetRelationName(rel)), + val_desc ? errdetail("Partition key of the failing row contains %s.", val_desc) : 0)); + } + + MemoryContextSwitchTo(oldcxt); + ecxt->ecxt_scantuple = ecxt_scantuple_old; + + return result; +} + +/* + * ExecInitPartitionInfo + * Initialize ResultRelInfo and other information for a partition + * + * Returns the ResultRelInfo + */ +ResultRelInfo * +ExecInitPartitionInfo(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo, + PartitionTupleRouting *proute, + EState *estate, int partidx) +{ + ModifyTable *node = (ModifyTable *) mtstate->ps.plan; + Relation rootrel = resultRelInfo->ri_RelationDesc, + partrel; + Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc; + ResultRelInfo *leaf_part_rri; + MemoryContext oldContext; + AttrNumber *part_attnos = NULL; + bool found_whole_row; + + /* + * We locked all the partitions in ExecSetupPartitionTupleRouting + * including the leaf partitions. + */ + partrel = heap_open(proute->partition_oids[partidx], NoLock); + + /* + * Keep ResultRelInfo and other information for this partition in the + * per-query memory context so they'll survive throughout the query. + */ + oldContext = MemoryContextSwitchTo(estate->es_query_cxt); + + leaf_part_rri = makeNode(ResultRelInfo); + InitResultRelInfo(leaf_part_rri, + partrel, + node ? node->rootRelation : 1, + rootrel, + estate->es_instrument); + + /* + * Verify result relation is a valid target for an INSERT. An UPDATE of a + * partition-key becomes a DELETE+INSERT operation, so this check is still + * required when the operation is CMD_UPDATE. + */ + CheckValidResultRel(leaf_part_rri, CMD_INSERT); + + /* + * Since we've just initialized this ResultRelInfo, it's not in any list + * attached to the estate as yet. Add it, so that it can be found later. + * + * Note that the entries in this list appear in no predetermined order, + * because partition result rels are initialized as and when they're + * needed. + */ + estate->es_tuple_routing_result_relations = + lappend(estate->es_tuple_routing_result_relations, + leaf_part_rri); + + /* + * Open partition indices. The user may have asked to check for conflicts + * within this leaf partition and do "nothing" instead of throwing an + * error. Be prepared in that case by initializing the index information + * needed by ExecInsert() to perform speculative insertions. + */ + if (partrel->rd_rel->relhasindex && + leaf_part_rri->ri_IndexRelationDescs == NULL) + ExecOpenIndices(leaf_part_rri, + (node != NULL && + node->onConflictAction != ONCONFLICT_NONE)); + + /* + * Build WITH CHECK OPTION constraints for the partition. Note that we + * didn't build the withCheckOptionList for partitions within the planner, + * but simple translation of varattnos will suffice. This only occurs for + * the INSERT case or in the case of UPDATE tuple routing where we didn't + * find a result rel to reuse in ExecSetupPartitionTupleRouting(). + */ + if (node && node->withCheckOptionLists != NIL) + { + List *wcoList; + List *wcoExprs = NIL; + ListCell *ll; + int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex; + + /* + * In the case of INSERT on a partitioned table, there is only one + * plan. Likewise, there is only one WCO list, not one per partition. + * For UPDATE, there are as many WCO lists as there are plans. + */ + Assert((node->operation == CMD_INSERT && + list_length(node->withCheckOptionLists) == 1 && + list_length(node->plans) == 1) || + (node->operation == CMD_UPDATE && + list_length(node->withCheckOptionLists) == + list_length(node->plans))); + + /* + * Use the WCO list of the first plan as a reference to calculate + * attno's for the WCO list of this partition. In the INSERT case, + * that refers to the root partitioned table, whereas in the UPDATE + * tuple routing case, that refers to the first partition in the + * mtstate->resultRelInfo array. In any case, both that relation and + * this partition should have the same columns, so we should be able + * to map attributes successfully. + */ + wcoList = linitial(node->withCheckOptionLists); + + /* + * Convert Vars in it to contain this partition's attribute numbers. + */ + part_attnos = + convert_tuples_by_name_map(RelationGetDescr(partrel), + RelationGetDescr(firstResultRel), + gettext_noop("could not convert row type")); + wcoList = (List *) + map_variable_attnos((Node *) wcoList, + firstVarno, 0, + part_attnos, + RelationGetDescr(firstResultRel)->natts, + RelationGetForm(partrel)->reltype, + &found_whole_row); + /* We ignore the value of found_whole_row. */ + + foreach(ll, wcoList) + { + WithCheckOption *wco = castNode(WithCheckOption, lfirst(ll)); + ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual), + &mtstate->ps); + + wcoExprs = lappend(wcoExprs, wcoExpr); + } + + leaf_part_rri->ri_WithCheckOptions = wcoList; + leaf_part_rri->ri_WithCheckOptionExprs = wcoExprs; + } + + /* + * Build the RETURNING projection for the partition. Note that we didn't + * build the returningList for partitions within the planner, but simple + * translation of varattnos will suffice. This only occurs for the INSERT + * case or in the case of UPDATE tuple routing where we didn't find a + * result rel to reuse in ExecSetupPartitionTupleRouting(). + */ + if (node && node->returningLists != NIL) + { + TupleTableSlot *slot; + ExprContext *econtext; + List *returningList; + int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex; + + /* See the comment above for WCO lists. */ + Assert((node->operation == CMD_INSERT && + list_length(node->returningLists) == 1 && + list_length(node->plans) == 1) || + (node->operation == CMD_UPDATE && + list_length(node->returningLists) == + list_length(node->plans))); + + /* + * Use the RETURNING list of the first plan as a reference to + * calculate attno's for the RETURNING list of this partition. See + * the comment above for WCO lists for more details on why this is + * okay. + */ + returningList = linitial(node->returningLists); + + /* + * Convert Vars in it to contain this partition's attribute numbers. + */ + if (part_attnos == NULL) + part_attnos = + convert_tuples_by_name_map(RelationGetDescr(partrel), + RelationGetDescr(firstResultRel), + gettext_noop("could not convert row type")); + returningList = (List *) + map_variable_attnos((Node *) returningList, + firstVarno, 0, + part_attnos, + RelationGetDescr(firstResultRel)->natts, + RelationGetForm(partrel)->reltype, + &found_whole_row); + /* We ignore the value of found_whole_row. */ + + leaf_part_rri->ri_returningList = returningList; + + /* + * Initialize the projection itself. + * + * Use the slot and the expression context that would have been set up + * in ExecInitModifyTable() for projection's output. + */ + Assert(mtstate->ps.ps_ResultTupleSlot != NULL); + slot = mtstate->ps.ps_ResultTupleSlot; + Assert(mtstate->ps.ps_ExprContext != NULL); + econtext = mtstate->ps.ps_ExprContext; + leaf_part_rri->ri_projectReturning = + ExecBuildProjectionInfo(returningList, econtext, slot, + &mtstate->ps, RelationGetDescr(partrel)); + } + + /* Set up information needed for routing tuples to the partition. */ + ExecInitRoutingInfo(mtstate, estate, proute, leaf_part_rri, partidx); + + /* + * If there is an ON CONFLICT clause, initialize state for it. + */ + if (node && node->onConflictAction != ONCONFLICT_NONE) + { + TupleConversionMap *map = proute->parent_child_tupconv_maps[partidx]; + int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex; + TupleDesc partrelDesc = RelationGetDescr(partrel); + ExprContext *econtext = mtstate->ps.ps_ExprContext; + ListCell *lc; + List *arbiterIndexes = NIL; + + /* + * If there is a list of arbiter indexes, map it to a list of indexes + * in the partition. We do that by scanning the partition's index + * list and searching for ancestry relationships to each index in the + * ancestor table. + */ + if (list_length(resultRelInfo->ri_onConflictArbiterIndexes) > 0) + { + List *childIdxs; + + childIdxs = RelationGetIndexList(leaf_part_rri->ri_RelationDesc); + + foreach(lc, childIdxs) + { + Oid childIdx = lfirst_oid(lc); + List *ancestors; + ListCell *lc2; + + ancestors = get_partition_ancestors(childIdx); + foreach(lc2, resultRelInfo->ri_onConflictArbiterIndexes) + { + if (list_member_oid(ancestors, lfirst_oid(lc2))) + arbiterIndexes = lappend_oid(arbiterIndexes, childIdx); + } + list_free(ancestors); + } + } + + /* + * If the resulting lists are of inequal length, something is wrong. + * (This shouldn't happen, since arbiter index selection should not + * pick up an invalid index.) + */ + if (list_length(resultRelInfo->ri_onConflictArbiterIndexes) != + list_length(arbiterIndexes)) + elog(ERROR, "invalid arbiter index list"); + leaf_part_rri->ri_onConflictArbiterIndexes = arbiterIndexes; + + /* + * In the DO UPDATE case, we have some more state to initialize. + */ + if (node->onConflictAction == ONCONFLICT_UPDATE) + { + Assert(node->onConflictSet != NIL); + Assert(resultRelInfo->ri_onConflict != NULL); + + /* + * If the partition's tuple descriptor matches exactly the root + * parent (the common case), we can simply re-use the parent's ON + * CONFLICT SET state, skipping a bunch of work. Otherwise, we + * need to create state specific to this partition. + */ + if (map == NULL) + leaf_part_rri->ri_onConflict = resultRelInfo->ri_onConflict; + else + { + List *onconflset; + TupleDesc tupDesc; + bool found_whole_row; + + leaf_part_rri->ri_onConflict = makeNode(OnConflictSetState); + + /* + * Translate expressions in onConflictSet to account for + * different attribute numbers. For that, map partition + * varattnos twice: first to catch the EXCLUDED + * pseudo-relation (INNER_VAR), and second to handle the main + * target relation (firstVarno). + */ + onconflset = (List *) copyObject((Node *) node->onConflictSet); + if (part_attnos == NULL) + part_attnos = + convert_tuples_by_name_map(RelationGetDescr(partrel), + RelationGetDescr(firstResultRel), + gettext_noop("could not convert row type")); + onconflset = (List *) + map_variable_attnos((Node *) onconflset, + INNER_VAR, 0, + part_attnos, + RelationGetDescr(firstResultRel)->natts, + RelationGetForm(partrel)->reltype, + &found_whole_row); + /* We ignore the value of found_whole_row. */ + onconflset = (List *) + map_variable_attnos((Node *) onconflset, + firstVarno, 0, + part_attnos, + RelationGetDescr(firstResultRel)->natts, + RelationGetForm(partrel)->reltype, + &found_whole_row); + /* We ignore the value of found_whole_row. */ + + /* Finally, adjust this tlist to match the partition. */ + onconflset = adjust_partition_tlist(onconflset, map); + + /* + * Build UPDATE SET's projection info. The user of this + * projection is responsible for setting the slot's tupdesc! + * We set aside a tupdesc that's good for the common case of a + * partition that's tupdesc-equal to the partitioned table; + * partitions of different tupdescs must generate their own. + */ + tupDesc = ExecTypeFromTL(onconflset, partrelDesc->tdhasoid); + ExecSetSlotDescriptor(mtstate->mt_conflproj, tupDesc); + leaf_part_rri->ri_onConflict->oc_ProjInfo = + ExecBuildProjectionInfo(onconflset, econtext, + mtstate->mt_conflproj, + &mtstate->ps, partrelDesc); + leaf_part_rri->ri_onConflict->oc_ProjTupdesc = tupDesc; + + /* + * If there is a WHERE clause, initialize state where it will + * be evaluated, mapping the attribute numbers appropriately. + * As with onConflictSet, we need to map partition varattnos + * to the partition's tupdesc. + */ + if (node->onConflictWhere) + { + List *clause; + + clause = copyObject((List *) node->onConflictWhere); + clause = (List *) + map_variable_attnos((Node *) clause, + INNER_VAR, 0, + part_attnos, + RelationGetDescr(firstResultRel)->natts, + RelationGetForm(partrel)->reltype, + &found_whole_row); + /* We ignore the value of found_whole_row. */ + clause = (List *) + map_variable_attnos((Node *) clause, + firstVarno, 0, + part_attnos, + RelationGetDescr(firstResultRel)->natts, + RelationGetForm(partrel)->reltype, + &found_whole_row); + /* We ignore the value of found_whole_row. */ + leaf_part_rri->ri_onConflict->oc_WhereClause = + ExecInitQual((List *) clause, &mtstate->ps); + } + } + } + } + + Assert(proute->partitions[partidx] == NULL); + proute->partitions[partidx] = leaf_part_rri; + + MemoryContextSwitchTo(oldContext); + + return leaf_part_rri; +} + +/* + * ExecInitRoutingInfo + * Set up information needed for routing tuples to a leaf partition + */ +void +ExecInitRoutingInfo(ModifyTableState *mtstate, + EState *estate, + PartitionTupleRouting *proute, + ResultRelInfo *partRelInfo, + int partidx) +{ + MemoryContext oldContext; + + /* + * Switch into per-query memory context. + */ + oldContext = MemoryContextSwitchTo(estate->es_query_cxt); + + /* + * Set up a tuple conversion map to convert a tuple routed to the + * partition from the parent's type to the partition's. + */ + proute->parent_child_tupconv_maps[partidx] = + convert_tuples_by_name(RelationGetDescr(partRelInfo->ri_PartitionRoot), + RelationGetDescr(partRelInfo->ri_RelationDesc), + gettext_noop("could not convert row type")); + + /* + * If a partition has a different rowtype than the root parent, initialize + * a slot dedicated to storing this partition's tuples. The slot is used + * for various operations that are applied to tuples after routing, such + * as checking constraints. + */ + if (proute->parent_child_tupconv_maps[partidx] != NULL) + { + Relation partrel = partRelInfo->ri_RelationDesc; + + /* + * Initialize the array in proute where these slots are stored, if not + * already done. + */ + if (proute->partition_tuple_slots == NULL) + proute->partition_tuple_slots = (TupleTableSlot **) + palloc0(proute->num_partitions * + sizeof(TupleTableSlot *)); + + /* + * Initialize the slot itself setting its descriptor to this + * partition's TupleDesc; TupleDesc reference will be released at the + * end of the command. + */ + proute->partition_tuple_slots[partidx] = + ExecInitExtraTupleSlot(estate, + RelationGetDescr(partrel)); + } + + /* + * If the partition is a foreign table, let the FDW init itself for + * routing tuples to the partition. + */ + if (partRelInfo->ri_FdwRoutine != NULL && + partRelInfo->ri_FdwRoutine->BeginForeignInsert != NULL) + partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo); + + MemoryContextSwitchTo(oldContext); + + partRelInfo->ri_PartitionReadyForRouting = true; +} + +/* + * ExecSetupChildParentMapForLeaf -- Initialize the per-leaf-partition + * child-to-root tuple conversion map array. + * + * This map is required for capturing transition tuples when the target table + * is a partitioned table. For a tuple that is routed by an INSERT or UPDATE, + * we need to convert it from the leaf partition to the target table + * descriptor. + */ +void +ExecSetupChildParentMapForLeaf(PartitionTupleRouting *proute) +{ + Assert(proute != NULL); + + /* + * These array elements get filled up with maps on an on-demand basis. + * Initially just set all of them to NULL. + */ + proute->child_parent_tupconv_maps = + (TupleConversionMap **) palloc0(sizeof(TupleConversionMap *) * + proute->num_partitions); + + /* Same is the case for this array. All the values are set to false */ + proute->child_parent_map_not_required = + (bool *) palloc0(sizeof(bool) * proute->num_partitions); +} + +/* + * TupConvMapForLeaf -- Get the tuple conversion map for a given leaf partition + * index. + */ +TupleConversionMap * +TupConvMapForLeaf(PartitionTupleRouting *proute, + ResultRelInfo *rootRelInfo, int leaf_index) +{ + ResultRelInfo **resultRelInfos = proute->partitions; + TupleConversionMap **map; + TupleDesc tupdesc; + + /* Don't call this if we're not supposed to be using this type of map. */ + Assert(proute->child_parent_tupconv_maps != NULL); + + /* If it's already known that we don't need a map, return NULL. */ + if (proute->child_parent_map_not_required[leaf_index]) + return NULL; + + /* If we've already got a map, return it. */ + map = &proute->child_parent_tupconv_maps[leaf_index]; + if (*map != NULL) + return *map; + + /* No map yet; try to create one. */ + tupdesc = RelationGetDescr(resultRelInfos[leaf_index]->ri_RelationDesc); + *map = + convert_tuples_by_name(tupdesc, + RelationGetDescr(rootRelInfo->ri_RelationDesc), + gettext_noop("could not convert row type")); + + /* If it turns out no map is needed, remember for next time. */ + proute->child_parent_map_not_required[leaf_index] = (*map == NULL); + + return *map; +} + +/* + * ExecCleanupTupleRouting -- Clean up objects allocated for partition tuple + * routing. + * + * Close all the partitioned tables, leaf partitions, and their indices. + */ +void +ExecCleanupTupleRouting(ModifyTableState *mtstate, + PartitionTupleRouting *proute) +{ + int i; + int subplan_index = 0; + + /* + * Remember, proute->partition_dispatch_info[0] corresponds to the root + * partitioned table, which we must not try to close, because it is the + * main target table of the query that will be closed by callers such as + * ExecEndPlan() or DoCopy(). Also, tupslot is NULL for the root + * partitioned table. + */ + for (i = 1; i < proute->num_dispatch; i++) + { + PartitionDispatch pd = proute->partition_dispatch_info[i]; + + heap_close(pd->reldesc, NoLock); + ExecDropSingleTupleTableSlot(pd->tupslot); + } + + for (i = 0; i < proute->num_partitions; i++) + { + ResultRelInfo *resultRelInfo = proute->partitions[i]; + + /* skip further processing for uninitialized partitions */ + if (resultRelInfo == NULL) + continue; + + /* Allow any FDWs to shut down if they've been exercised */ + if (resultRelInfo->ri_PartitionReadyForRouting && + resultRelInfo->ri_FdwRoutine != NULL && + resultRelInfo->ri_FdwRoutine->EndForeignInsert != NULL) + resultRelInfo->ri_FdwRoutine->EndForeignInsert(mtstate->ps.state, + resultRelInfo); + + /* + * If this result rel is one of the UPDATE subplan result rels, let + * ExecEndPlan() close it. For INSERT or COPY, + * proute->subplan_partition_offsets will always be NULL. Note that + * the subplan_partition_offsets array and the partitions array have + * the partitions in the same order. So, while we iterate over + * partitions array, we also iterate over the + * subplan_partition_offsets array in order to figure out which of the + * result rels are present in the UPDATE subplans. + */ + if (proute->subplan_partition_offsets && + subplan_index < proute->num_subplan_partition_offsets && + proute->subplan_partition_offsets[subplan_index] == i) + { + subplan_index++; + continue; + } + + ExecCloseIndices(resultRelInfo); + heap_close(resultRelInfo->ri_RelationDesc, NoLock); + } + + /* Release the standalone partition tuple descriptors, if any */ + if (proute->root_tuple_slot) + ExecDropSingleTupleTableSlot(proute->root_tuple_slot); +} + +/* + * RelationGetPartitionDispatchInfo + * Returns information necessary to route tuples down a partition tree + * + * The number of elements in the returned array (that is, the number of + * PartitionDispatch objects for the partitioned tables in the partition tree) + * is returned in *num_parted and a list of the OIDs of all the leaf + * partitions of rel is returned in *leaf_part_oids. + * + * All the relations in the partition tree (including 'rel') must have been + * locked (using at least the AccessShareLock) by the caller. + */ +static PartitionDispatch * +RelationGetPartitionDispatchInfo(Relation rel, + int *num_parted, List **leaf_part_oids) +{ + List *pdlist = NIL; + PartitionDispatchData **pd; + ListCell *lc; + int i; + + Assert(rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE); + + *num_parted = 0; + *leaf_part_oids = NIL; + + get_partition_dispatch_recurse(rel, NULL, &pdlist, leaf_part_oids); + *num_parted = list_length(pdlist); + pd = (PartitionDispatchData **) palloc(*num_parted * + sizeof(PartitionDispatchData *)); + i = 0; + foreach(lc, pdlist) + { + pd[i++] = lfirst(lc); + } + + return pd; +} + +/* + * get_partition_dispatch_recurse + * Recursively expand partition tree rooted at rel + * + * As the partition tree is expanded in a depth-first manner, we maintain two + * global lists: of PartitionDispatch objects corresponding to partitioned + * tables in *pds and of the leaf partition OIDs in *leaf_part_oids. + * + * Note that the order of OIDs of leaf partitions in leaf_part_oids matches + * the order in which the planner's expand_partitioned_rtentry() processes + * them. It's not necessarily the case that the offsets match up exactly, + * because constraint exclusion might prune away some partitions on the + * planner side, whereas we'll always have the complete list; but unpruned + * partitions will appear in the same order in the plan as they are returned + * here. + */ +static void +get_partition_dispatch_recurse(Relation rel, Relation parent, + List **pds, List **leaf_part_oids) +{ + TupleDesc tupdesc = RelationGetDescr(rel); + PartitionDesc partdesc = RelationGetPartitionDesc(rel); + PartitionKey partkey = RelationGetPartitionKey(rel); + PartitionDispatch pd; + int i; + + check_stack_depth(); + + /* Build a PartitionDispatch for this table and add it to *pds. */ + pd = (PartitionDispatch) palloc(sizeof(PartitionDispatchData)); + *pds = lappend(*pds, pd); + pd->reldesc = rel; + pd->key = partkey; + pd->keystate = NIL; + pd->partdesc = partdesc; + if (parent != NULL) + { + /* + * For every partitioned table other than the root, we must store a + * tuple table slot initialized with its tuple descriptor and a tuple + * conversion map to convert a tuple from its parent's rowtype to its + * own. That is to make sure that we are looking at the correct row + * using the correct tuple descriptor when computing its partition key + * for tuple routing. + */ + pd->tupslot = MakeSingleTupleTableSlot(tupdesc); + pd->tupmap = convert_tuples_by_name_map_if_req(RelationGetDescr(parent), + tupdesc, + gettext_noop("could not convert row type")); + } + else + { + /* Not required for the root partitioned table */ + pd->tupslot = NULL; + pd->tupmap = NULL; + } + + /* + * Go look at each partition of this table. If it's a leaf partition, + * simply add its OID to *leaf_part_oids. If it's a partitioned table, + * recursively call get_partition_dispatch_recurse(), so that its + * partitions are processed as well and a corresponding PartitionDispatch + * object gets added to *pds. + * + * The 'indexes' array is used when searching for a partition matching a + * given tuple. The actual value we store here depends on whether the + * array element belongs to a leaf partition or a subpartitioned table. + * For leaf partitions we store the index into *leaf_part_oids, and for + * sub-partitioned tables we store a negative version of the index into + * the *pds list. Both indexes are 0-based, but the first element of the + * *pds list is the root partition, so 0 always means the first leaf. When + * searching, if we see a negative value, the search must continue in the + * corresponding sub-partition; otherwise, we've identified the correct + * partition. + */ + pd->indexes = (int *) palloc(partdesc->nparts * sizeof(int)); + for (i = 0; i < partdesc->nparts; i++) + { + Oid partrelid = partdesc->oids[i]; + + if (get_rel_relkind(partrelid) != RELKIND_PARTITIONED_TABLE) + { + *leaf_part_oids = lappend_oid(*leaf_part_oids, partrelid); + pd->indexes[i] = list_length(*leaf_part_oids) - 1; + } + else + { + /* + * We assume all tables in the partition tree were already locked + * by the caller. + */ + Relation partrel = heap_open(partrelid, NoLock); + + pd->indexes[i] = -list_length(*pds); + get_partition_dispatch_recurse(partrel, rel, pds, leaf_part_oids); + } + } +} + +/* ---------------- + * FormPartitionKeyDatum + * Construct values[] and isnull[] arrays for the partition key + * of a tuple. + * + * pd Partition dispatch object of the partitioned table + * slot Heap tuple from which to extract partition key + * estate executor state for evaluating any partition key + * expressions (must be non-NULL) + * values Array of partition key Datums (output area) + * isnull Array of is-null indicators (output area) + * + * the ecxt_scantuple slot of estate's per-tuple expr context must point to + * the heap tuple passed in. + * ---------------- + */ +static void +FormPartitionKeyDatum(PartitionDispatch pd, + TupleTableSlot *slot, + EState *estate, + Datum *values, + bool *isnull) +{ + ListCell *partexpr_item; + int i; + + if (pd->key->partexprs != NIL && pd->keystate == NIL) + { + /* Check caller has set up context correctly */ + Assert(estate != NULL && + GetPerTupleExprContext(estate)->ecxt_scantuple == slot); + + /* First time through, set up expression evaluation state */ + pd->keystate = ExecPrepareExprList(pd->key->partexprs, estate); + } + + partexpr_item = list_head(pd->keystate); + for (i = 0; i < pd->key->partnatts; i++) + { + AttrNumber keycol = pd->key->partattrs[i]; + Datum datum; + bool isNull; + + if (keycol != 0) + { + /* Plain column; get the value directly from the heap tuple */ + datum = slot_getattr(slot, keycol, &isNull); + } + else + { + /* Expression; need to evaluate it */ + if (partexpr_item == NULL) + elog(ERROR, "wrong number of partition key expressions"); + datum = ExecEvalExprSwitchContext((ExprState *) lfirst(partexpr_item), + GetPerTupleExprContext(estate), + &isNull); + partexpr_item = lnext(partexpr_item); + } + values[i] = datum; + isnull[i] = isNull; + } + + if (partexpr_item != NULL) + elog(ERROR, "wrong number of partition key expressions"); +} + +/* + * get_partition_for_tuple + * Finds partition of relation which accepts the partition key specified + * in values and isnull + * + * Return value is index of the partition (>= 0 and < partdesc->nparts) if one + * found or -1 if none found. + */ +static int +get_partition_for_tuple(PartitionDispatch pd, Datum *values, bool *isnull) +{ + int bound_offset; + int part_index = -1; + PartitionKey key = pd->key; + PartitionDesc partdesc = pd->partdesc; + PartitionBoundInfo boundinfo = partdesc->boundinfo; + + /* Route as appropriate based on partitioning strategy. */ + switch (key->strategy) + { + case PARTITION_STRATEGY_HASH: + { + int greatest_modulus; + uint64 rowHash; + + greatest_modulus = get_hash_partition_greatest_modulus(boundinfo); + rowHash = compute_partition_hash_value(key->partnatts, + key->partsupfunc, + values, isnull); + + part_index = boundinfo->indexes[rowHash % greatest_modulus]; + } + break; + + case PARTITION_STRATEGY_LIST: + if (isnull[0]) + { + if (partition_bound_accepts_nulls(boundinfo)) + part_index = boundinfo->null_index; + } + else + { + bool equal = false; + + bound_offset = partition_list_bsearch(key->partsupfunc, + key->partcollation, + boundinfo, + values[0], &equal); + if (bound_offset >= 0 && equal) + part_index = boundinfo->indexes[bound_offset]; + } + break; + + case PARTITION_STRATEGY_RANGE: + { + bool equal = false, + range_partkey_has_null = false; + int i; + + /* + * No range includes NULL, so this will be accepted by the + * default partition if there is one, and otherwise rejected. + */ + for (i = 0; i < key->partnatts; i++) + { + if (isnull[i]) + { + range_partkey_has_null = true; + break; + } + } + + if (!range_partkey_has_null) + { + bound_offset = partition_range_datum_bsearch(key->partsupfunc, + key->partcollation, + boundinfo, + key->partnatts, + values, + &equal); + + /* + * The bound at bound_offset is less than or equal to the + * tuple value, so the bound at offset+1 is the upper + * bound of the partition we're looking for, if there + * actually exists one. + */ + part_index = boundinfo->indexes[bound_offset + 1]; + } + } + break; + + default: + elog(ERROR, "unexpected partition strategy: %d", + (int) key->strategy); + } + + /* + * part_index < 0 means we failed to find a partition of this parent. Use + * the default partition, if there is one. + */ + if (part_index < 0) + part_index = boundinfo->default_index; + + return part_index; +} + +/* + * ExecBuildSlotPartitionKeyDescription + * + * This works very much like BuildIndexValueDescription() and is currently + * used for building error messages when ExecFindPartition() fails to find + * partition for a row. + */ +static char * +ExecBuildSlotPartitionKeyDescription(Relation rel, + Datum *values, + bool *isnull, + int maxfieldlen) +{ + StringInfoData buf; + PartitionKey key = RelationGetPartitionKey(rel); + int partnatts = get_partition_natts(key); + int i; + Oid relid = RelationGetRelid(rel); + AclResult aclresult; + + if (check_enable_rls(relid, InvalidOid, true) == RLS_ENABLED) + return NULL; + + /* If the user has table-level access, just go build the description. */ + aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT); + if (aclresult != ACLCHECK_OK) + { + /* + * Step through the columns of the partition key and make sure the + * user has SELECT rights on all of them. + */ + for (i = 0; i < partnatts; i++) + { + AttrNumber attnum = get_partition_col_attnum(key, i); + + /* + * If this partition key column is an expression, we return no + * detail rather than try to figure out what column(s) the + * expression includes and if the user has SELECT rights on them. + */ + if (attnum == InvalidAttrNumber || + pg_attribute_aclcheck(relid, attnum, GetUserId(), + ACL_SELECT) != ACLCHECK_OK) + return NULL; + } + } + + initStringInfo(&buf); + appendStringInfo(&buf, "(%s) = (", + pg_get_partkeydef_columns(relid, true)); + + for (i = 0; i < partnatts; i++) + { + char *val; + int vallen; + + if (isnull[i]) + val = "null"; + else + { + Oid foutoid; + bool typisvarlena; + + getTypeOutputInfo(get_partition_col_typid(key, i), + &foutoid, &typisvarlena); + val = OidOutputFunctionCall(foutoid, values[i]); + } + + if (i > 0) + appendStringInfoString(&buf, ", "); + + /* truncate if needed */ + vallen = strlen(val); + if (vallen <= maxfieldlen) + appendStringInfoString(&buf, val); + else + { + vallen = pg_mbcliplen(val, vallen, maxfieldlen); + appendBinaryStringInfo(&buf, val, vallen); + appendStringInfoString(&buf, "..."); + } + } + + appendStringInfoChar(&buf, ')'); + + return buf.data; +} + +/* + * adjust_partition_tlist + * Adjust the targetlist entries for a given partition to account for + * attribute differences between parent and the partition + * + * The expressions have already been fixed, but here we fix the list to make + * target resnos match the partition's attribute numbers. This results in a + * copy of the original target list in which the entries appear in resno + * order, including both the existing entries (that may have their resno + * changed in-place) and the newly added entries for columns that don't exist + * in the parent. + * + * Scribbles on the input tlist, so callers must make sure to make a copy + * before passing it to us. + */ +static List * +adjust_partition_tlist(List *tlist, TupleConversionMap *map) +{ + List *new_tlist = NIL; + TupleDesc tupdesc = map->outdesc; + AttrNumber *attrMap = map->attrMap; + AttrNumber attrno; + + for (attrno = 1; attrno <= tupdesc->natts; attrno++) + { + Form_pg_attribute att_tup = TupleDescAttr(tupdesc, attrno - 1); + TargetEntry *tle; + + if (attrMap[attrno - 1] != InvalidAttrNumber) + { + Assert(!att_tup->attisdropped); + + /* + * Use the corresponding entry from the parent's tlist, adjusting + * the resno the match the partition's attno. + */ + tle = (TargetEntry *) list_nth(tlist, attrMap[attrno - 1] - 1); + tle->resno = attrno; + } + else + { + Const *expr; + + /* + * For a dropped attribute in the partition, generate a dummy + * entry with resno matching the partition's attno. + */ + Assert(att_tup->attisdropped); + expr = makeConst(INT4OID, + -1, + InvalidOid, + sizeof(int32), + (Datum) 0, + true, /* isnull */ + true /* byval */ ); + tle = makeTargetEntry((Expr *) expr, + attrno, + pstrdup(NameStr(att_tup->attname)), + false); + } + + new_tlist = lappend(new_tlist, tle); + } + + return new_tlist; +} + +/*------------------------------------------------------------------------- + * Run-Time Partition Pruning Support. + * + * The following series of functions exist to support the removal of unneeded + * subplans for queries against partitioned tables. The supporting functions + * here are designed to work with any plan type which supports an arbitrary + * number of subplans, e.g. Append, MergeAppend. + * + * When pruning involves comparison of a partition key to a constant, it's + * done by the planner. However, if we have a comparison to a non-constant + * but not volatile expression, that presents an opportunity for run-time + * pruning by the executor, allowing irrelevant partitions to be skipped + * dynamically. + * + * We must distinguish expressions containing PARAM_EXEC Params from + * expressions that don't contain those. Even though a PARAM_EXEC Param is + * considered to be a stable expression, it can change value from one plan + * node scan to the next during query execution. Stable comparison + * expressions that don't involve such Params allow partition pruning to be + * done once during executor startup. Expressions that do involve such Params + * require us to prune separately for each scan of the parent plan node. + * + * Note that pruning away unneeded subplans during executor startup has the + * added benefit of not having to initialize the unneeded subplans at all. + * + * + * Functions: + * + * ExecCreatePartitionPruneState: + * Creates the PartitionPruneState required by each of the two pruning + * functions. Details stored include how to map the partition index + * returned by the partition pruning code into subplan indexes. + * + * ExecFindInitialMatchingSubPlans: + * Returns indexes of matching subplans. Partition pruning is attempted + * without any evaluation of expressions containing PARAM_EXEC Params. + * This function must be called during executor startup for the parent + * plan before the subplans themselves are initialized. Subplans which + * are found not to match by this function must be removed from the + * plan's list of subplans during execution, as this function performs a + * remap of the partition index to subplan index map and the newly + * created map provides indexes only for subplans which remain after + * calling this function. + * + * ExecFindMatchingSubPlans: + * Returns indexes of matching subplans after evaluating all available + * expressions. This function can only be called during execution and + * must be called again each time the value of a Param listed in + * PartitionPruneState's 'execparamids' changes. + *------------------------------------------------------------------------- + */ + +/* + * ExecCreatePartitionPruneState + * Build the data structure required for calling + * ExecFindInitialMatchingSubPlans and ExecFindMatchingSubPlans. + * + * 'planstate' is the parent plan node's execution state. + * + * 'partitionpruneinfo' is a PartitionPruneInfo as generated by + * make_partition_pruneinfo. Here we build a PartitionPruneState containing a + * PartitionPruningData for each partitioning hierarchy (i.e., each sublist of + * partitionpruneinfo->prune_infos), each of which contains a + * PartitionedRelPruningData for each PartitionedRelPruneInfo appearing in + * that sublist. This two-level system is needed to keep from confusing the + * different hierarchies when a UNION ALL contains multiple partitioned tables + * as children. The data stored in each PartitionedRelPruningData can be + * re-used each time we re-evaluate which partitions match the pruning steps + * provided in each PartitionedRelPruneInfo. + */ +PartitionPruneState * +ExecCreatePartitionPruneState(PlanState *planstate, + PartitionPruneInfo *partitionpruneinfo) +{ + EState *estate = planstate->state; + PartitionPruneState *prunestate; + int n_part_hierarchies; + ListCell *lc; + int i; + + n_part_hierarchies = list_length(partitionpruneinfo->prune_infos); + Assert(n_part_hierarchies > 0); + + /* + * Allocate the data structure + */ + prunestate = (PartitionPruneState *) + palloc(offsetof(PartitionPruneState, partprunedata) + + sizeof(PartitionPruningData *) * n_part_hierarchies); + + prunestate->execparamids = NULL; + /* other_subplans can change at runtime, so we need our own copy */ + prunestate->other_subplans = bms_copy(partitionpruneinfo->other_subplans); + prunestate->do_initial_prune = false; /* may be set below */ + prunestate->do_exec_prune = false; /* may be set below */ + prunestate->num_partprunedata = n_part_hierarchies; + + /* + * Create a short-term memory context which we'll use when making calls to + * the partition pruning functions. This avoids possible memory leaks, + * since the pruning functions call comparison functions that aren't under + * our control. + */ + prunestate->prune_context = + AllocSetContextCreate(CurrentMemoryContext, + "Partition Prune", + ALLOCSET_DEFAULT_SIZES); + + i = 0; + foreach(lc, partitionpruneinfo->prune_infos) + { + List *partrelpruneinfos = lfirst_node(List, lc); + int npartrelpruneinfos = list_length(partrelpruneinfos); + PartitionPruningData *prunedata; + ListCell *lc2; + int j; + + prunedata = (PartitionPruningData *) + palloc(offsetof(PartitionPruningData, partrelprunedata) + + npartrelpruneinfos * sizeof(PartitionedRelPruningData)); + prunestate->partprunedata[i] = prunedata; + prunedata->num_partrelprunedata = npartrelpruneinfos; + + j = 0; + foreach(lc2, partrelpruneinfos) + { + PartitionedRelPruneInfo *pinfo = lfirst_node(PartitionedRelPruneInfo, lc2); + PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j]; + PartitionPruneContext *context = &pprune->context; + Relation partrel; + PartitionDesc partdesc; + PartitionKey partkey; + int partnatts; + int n_steps; + ListCell *lc3; + + /* + * We must copy the subplan_map rather than pointing directly to + * the plan's version, as we may end up making modifications to it + * later. + */ + pprune->subplan_map = palloc(sizeof(int) * pinfo->nparts); + memcpy(pprune->subplan_map, pinfo->subplan_map, + sizeof(int) * pinfo->nparts); + + /* We can use the subpart_map verbatim, since we never modify it */ + pprune->subpart_map = pinfo->subpart_map; + + /* present_parts is also subject to later modification */ + pprune->present_parts = bms_copy(pinfo->present_parts); + + /* + * We can rely on the copies of the partitioned table's partition + * key and partition descriptor appearing in its relcache entry, + * because that entry will be held open and locked for the + * duration of this executor run. + */ + partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex); + partkey = RelationGetPartitionKey(partrel); + partdesc = RelationGetPartitionDesc(partrel); + + n_steps = list_length(pinfo->pruning_steps); + + context->strategy = partkey->strategy; + context->partnatts = partnatts = partkey->partnatts; + context->nparts = pinfo->nparts; + context->boundinfo = partdesc->boundinfo; + context->partcollation = partkey->partcollation; + context->partsupfunc = partkey->partsupfunc; + + /* We'll look up type-specific support functions as needed */ + context->stepcmpfuncs = (FmgrInfo *) + palloc0(sizeof(FmgrInfo) * n_steps * partnatts); + + context->ppccontext = CurrentMemoryContext; + context->planstate = planstate; + + /* Initialize expression state for each expression we need */ + context->exprstates = (ExprState **) + palloc0(sizeof(ExprState *) * n_steps * partnatts); + foreach(lc3, pinfo->pruning_steps) + { + PartitionPruneStepOp *step = (PartitionPruneStepOp *) lfirst(lc3); + ListCell *lc4; + int keyno; + + /* not needed for other step kinds */ + if (!IsA(step, PartitionPruneStepOp)) + continue; + + Assert(list_length(step->exprs) <= partnatts); + + keyno = 0; + foreach(lc4, step->exprs) + { + Expr *expr = (Expr *) lfirst(lc4); + + /* not needed for Consts */ + if (!IsA(expr, Const)) + { + int stateidx = PruneCxtStateIdx(partnatts, + step->step.step_id, + keyno); + + context->exprstates[stateidx] = + ExecInitExpr(expr, context->planstate); + } + keyno++; + } + } + + /* Array is not modified at runtime, so just point to plan's copy */ + context->exprhasexecparam = pinfo->hasexecparam; + + pprune->pruning_steps = pinfo->pruning_steps; + pprune->do_initial_prune = pinfo->do_initial_prune; + pprune->do_exec_prune = pinfo->do_exec_prune; + + /* Record if pruning would be useful at any level */ + prunestate->do_initial_prune |= pinfo->do_initial_prune; + prunestate->do_exec_prune |= pinfo->do_exec_prune; + + /* + * Accumulate the IDs of all PARAM_EXEC Params affecting the + * partitioning decisions at this plan node. + */ + prunestate->execparamids = bms_add_members(prunestate->execparamids, + pinfo->execparamids); + + j++; + } + i++; + } + + return prunestate; +} + +/* + * ExecFindInitialMatchingSubPlans + * Identify the set of subplans that cannot be eliminated by initial + * pruning (disregarding any pruning constraints involving PARAM_EXEC + * Params). Also re-map the translation matrix which allows conversion + * of partition indexes into subplan indexes to account for the unneeded + * subplans having been removed. + * + * Must only be called once per 'prunestate', and only if initial pruning + * is required. + * + * 'nsubplans' must be passed as the total number of unpruned subplans. + */ +Bitmapset * +ExecFindInitialMatchingSubPlans(PartitionPruneState *prunestate, int nsubplans) +{ + Bitmapset *result = NULL; + MemoryContext oldcontext; + int i; + + Assert(prunestate->do_initial_prune); + + /* + * Switch to a temp context to avoid leaking memory in the executor's + * memory context. + */ + oldcontext = MemoryContextSwitchTo(prunestate->prune_context); + + /* + * For each hierarchy, do the pruning tests, and add deletable subplans' + * indexes to "result". + */ + for (i = 0; i < prunestate->num_partprunedata; i++) + { + PartitionPruningData *prunedata; + PartitionedRelPruningData *pprune; + + prunedata = prunestate->partprunedata[i]; + pprune = &prunedata->partrelprunedata[0]; + + /* Perform pruning without using PARAM_EXEC Params */ + find_matching_subplans_recurse(prunedata, pprune, true, &result); + + /* Expression eval may have used space in node's ps_ExprContext too */ + ResetExprContext(pprune->context.planstate->ps_ExprContext); + } + + MemoryContextSwitchTo(oldcontext); + + /* Copy result out of the temp context before we reset it */ + result = bms_copy(result); + + /* Add in any subplans that partition pruning didn't account for */ + result = bms_add_members(result, prunestate->other_subplans); + + MemoryContextReset(prunestate->prune_context); + + /* + * If any subplans were pruned, we must re-sequence the subplan indexes so + * that ExecFindMatchingSubPlans properly returns the indexes from the + * subplans which will remain after execution of this function. + */ + if (bms_num_members(result) < nsubplans) + { + int *new_subplan_indexes; + Bitmapset *new_other_subplans; + int i; + int newidx; + + /* + * First we must build a temporary array which maps old subplan + * indexes to new ones. While we're at it, also recompute the + * other_subplans set, since indexes in it may change. + */ + new_subplan_indexes = (int *) palloc(sizeof(int) * nsubplans); + new_other_subplans = NULL; + newidx = 0; + for (i = 0; i < nsubplans; i++) + { + if (bms_is_member(i, result)) + new_subplan_indexes[i] = newidx++; + else + new_subplan_indexes[i] = -1; /* Newly pruned */ + + if (bms_is_member(i, prunestate->other_subplans)) + new_other_subplans = bms_add_member(new_other_subplans, + new_subplan_indexes[i]); + } + bms_free(prunestate->other_subplans); + prunestate->other_subplans = new_other_subplans; + + /* + * Now we can update each PartitionedRelPruneInfo's subplan_map with + * new subplan indexes. We must also recompute its present_parts + * bitmap. + */ + for (i = 0; i < prunestate->num_partprunedata; i++) + { + PartitionPruningData *prunedata = prunestate->partprunedata[i]; + int j; + + /* + * Within each hierarchy, we perform this loop in back-to-front + * order so that we determine present_parts for the lowest-level + * partitioned tables first. This way we can tell whether a + * sub-partitioned table's partitions were entirely pruned so we + * can exclude that from 'present_parts'. + */ + for (j = prunedata->num_partrelprunedata - 1; j >= 0; j--) + { + PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j]; + int nparts = pprune->context.nparts; + int k; + + /* We just rebuild present_parts from scratch */ + bms_free(pprune->present_parts); + pprune->present_parts = NULL; + + for (k = 0; k < nparts; k++) + { + int oldidx = pprune->subplan_map[k]; + int subidx; + + /* + * If this partition existed as a subplan then change the + * old subplan index to the new subplan index. The new + * index may become -1 if the partition was pruned above, + * or it may just come earlier in the subplan list due to + * some subplans being removed earlier in the list. If + * it's a subpartition, add it to present_parts unless + * it's entirely pruned. + */ + if (oldidx >= 0) + { + Assert(oldidx < nsubplans); + pprune->subplan_map[k] = new_subplan_indexes[oldidx]; + + if (new_subplan_indexes[oldidx] >= 0) + pprune->present_parts = + bms_add_member(pprune->present_parts, k); + } + else if ((subidx = pprune->subpart_map[k]) >= 0) + { + PartitionedRelPruningData *subprune; + + subprune = &prunedata->partrelprunedata[subidx]; + + if (!bms_is_empty(subprune->present_parts)) + pprune->present_parts = + bms_add_member(pprune->present_parts, k); + } + } + } + } + + pfree(new_subplan_indexes); + } + + return result; +} + +/* + * ExecFindMatchingSubPlans + * Determine which subplans match the pruning steps detailed in + * 'prunestate' for the current comparison expression values. + * + * Here we assume we may evaluate PARAM_EXEC Params. + */ +Bitmapset * +ExecFindMatchingSubPlans(PartitionPruneState *prunestate) +{ + Bitmapset *result = NULL; + MemoryContext oldcontext; + int i; + + /* + * Switch to a temp context to avoid leaking memory in the executor's + * memory context. + */ + oldcontext = MemoryContextSwitchTo(prunestate->prune_context); + + /* + * For each hierarchy, do the pruning tests, and add deletable subplans' + * indexes to "result". + */ + for (i = 0; i < prunestate->num_partprunedata; i++) + { + PartitionPruningData *prunedata; + PartitionedRelPruningData *pprune; + + prunedata = prunestate->partprunedata[i]; + pprune = &prunedata->partrelprunedata[0]; + + find_matching_subplans_recurse(prunedata, pprune, false, &result); + + /* Expression eval may have used space in node's ps_ExprContext too */ + ResetExprContext(pprune->context.planstate->ps_ExprContext); + } + + MemoryContextSwitchTo(oldcontext); + + /* Copy result out of the temp context before we reset it */ + result = bms_copy(result); + + /* Add in any subplans that partition pruning didn't account for */ + result = bms_add_members(result, prunestate->other_subplans); + + MemoryContextReset(prunestate->prune_context); + + return result; +} + +/* + * find_matching_subplans_recurse + * Recursive worker function for ExecFindMatchingSubPlans and + * ExecFindInitialMatchingSubPlans + * + * Adds valid (non-prunable) subplan IDs to *validsubplans + */ +static void +find_matching_subplans_recurse(PartitionPruningData *prunedata, + PartitionedRelPruningData *pprune, + bool initial_prune, + Bitmapset **validsubplans) +{ + Bitmapset *partset; + int i; + + /* Guard against stack overflow due to overly deep partition hierarchy. */ + check_stack_depth(); + + /* Only prune if pruning would be useful at this level. */ + if (initial_prune ? pprune->do_initial_prune : pprune->do_exec_prune) + { + PartitionPruneContext *context = &pprune->context; + + /* Set whether we can evaluate PARAM_EXEC Params or not */ + context->evalexecparams = !initial_prune; + + partset = get_matching_partitions(context, + pprune->pruning_steps); + } + else + { + /* + * If no pruning is to be done, just include all partitions at this + * level. + */ + partset = pprune->present_parts; + } + + /* Translate partset into subplan indexes */ + i = -1; + while ((i = bms_next_member(partset, i)) >= 0) + { + if (pprune->subplan_map[i] >= 0) + *validsubplans = bms_add_member(*validsubplans, + pprune->subplan_map[i]); + else + { + int partidx = pprune->subpart_map[i]; + + if (partidx >= 0) + find_matching_subplans_recurse(prunedata, + &prunedata->partrelprunedata[partidx], + initial_prune, validsubplans); + else + { + /* + * We get here if the planner already pruned all the sub- + * partitions for this partition. Silently ignore this + * partition in this case. The end result is the same: we + * would have pruned all partitions just the same, but we + * don't have any pruning steps to execute to verify this. + */ + } + } + } +} diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c index 36d2914249..610cb4b8a9 100644 --- a/src/backend/executor/execProcnode.c +++ b/src/backend/executor/execProcnode.c @@ -7,7 +7,7 @@ * ExecProcNode, or ExecEndNode on its subnodes and do the appropriate * processing. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -370,12 +370,7 @@ ExecInitNode(Plan *node, EState *estate, int eflags) break; } - /* - * Add a wrapper around the ExecProcNode callback that checks stack depth - * during the first execution. - */ - result->ExecProcNodeReal = result->ExecProcNode; - result->ExecProcNode = ExecProcNodeFirst; + ExecSetExecProcNode(result, result->ExecProcNode); /* * Initialize any initPlans present in this node. The planner put them in @@ -401,6 +396,26 @@ ExecInitNode(Plan *node, EState *estate, int eflags) } +/* + * If a node wants to change its ExecProcNode function after ExecInitNode() + * has finished, it should do so with this function. That way any wrapper + * functions can be reinstalled, without the node having to know how that + * works. + */ +void +ExecSetExecProcNode(PlanState *node, ExecProcNodeMtd function) +{ + /* + * Add a wrapper around the ExecProcNode callback that checks stack depth + * during the first execution and maybe adds an instrumentation wrapper. + * When the callback is changed after execution has already begun that + * means we'll superfluously execute ExecProcNodeFirst, but that seems ok. + */ + node->ExecProcNodeReal = function; + node->ExecProcNode = ExecProcNodeFirst; +} + + /* * ExecProcNode wrapper that performs some one-time checks, before calling * the relevant node method (possibly via an instrumentation wrapper). @@ -721,11 +736,7 @@ ExecEndNode(PlanState *node) * ExecShutdownNode * * Give execution nodes a chance to stop asynchronous resource consumption - * and release any resources still held. Currently, this is only used for - * parallel query, but we might want to extend it to other cases also (e.g. - * FDW). We might also want to call it sooner, as soon as it's evident that - * no more rows will be needed (e.g. when a Limit is filled) rather than only - * at the end of ExecutorRun. + * and release any resources still held. */ bool ExecShutdownNode(PlanState *node) @@ -737,6 +748,19 @@ ExecShutdownNode(PlanState *node) planstate_tree_walker(node, ExecShutdownNode, NULL); + /* + * Treat the node as running while we shut it down, but only if it's run + * at least once already. We don't expect much CPU consumption during + * node shutdown, but in the case of Gather or Gather Merge, we may shut + * down workers at this stage. If so, their buffer usage will get + * propagated into pgBufferUsage at this point, and we want to make sure + * that it gets associated with the Gather node. We skip this if the node + * has never been executed, so as to avoid incorrectly making it appear + * that it has. + */ + if (node->instrument && node->instrument->running) + InstrStartNode(node->instrument); + switch (nodeTag(node)) { case T_GatherState: @@ -751,9 +775,140 @@ ExecShutdownNode(PlanState *node) case T_GatherMergeState: ExecShutdownGatherMerge((GatherMergeState *) node); break; + case T_HashState: + ExecShutdownHash((HashState *) node); + break; + case T_HashJoinState: + ExecShutdownHashJoin((HashJoinState *) node); + break; default: break; } + /* Stop the node if we started it above, reporting 0 tuples. */ + if (node->instrument && node->instrument->running) + InstrStopNode(node->instrument, 0); + return false; } + +/* + * ExecSetTupleBound + * + * Set a tuple bound for a planstate node. This lets child plan nodes + * optimize based on the knowledge that the maximum number of tuples that + * their parent will demand is limited. The tuple bound for a node may + * only be changed between scans (i.e., after node initialization or just + * before an ExecReScan call). + * + * Any negative tuples_needed value means "no limit", which should be the + * default assumption when this is not called at all for a particular node. + * + * Note: if this is called repeatedly on a plan tree, the exact same set + * of nodes must be updated with the new limit each time; be careful that + * only unchanging conditions are tested here. + */ +void +ExecSetTupleBound(int64 tuples_needed, PlanState *child_node) +{ + /* + * Since this function recurses, in principle we should check stack depth + * here. In practice, it's probably pointless since the earlier node + * initialization tree traversal would surely have consumed more stack. + */ + + if (IsA(child_node, SortState)) + { + /* + * If it is a Sort node, notify it that it can use bounded sort. + * + * Note: it is the responsibility of nodeSort.c to react properly to + * changes of these parameters. If we ever redesign this, it'd be a + * good idea to integrate this signaling with the parameter-change + * mechanism. + */ + SortState *sortState = (SortState *) child_node; + + if (tuples_needed < 0) + { + /* make sure flag gets reset if needed upon rescan */ + sortState->bounded = false; + } + else + { + sortState->bounded = true; + sortState->bound = tuples_needed; + } + } + else if (IsA(child_node, MergeAppendState)) + { + /* + * If it is a MergeAppend, we can apply the bound to any nodes that + * are children of the MergeAppend, since the MergeAppend surely need + * read no more than that many tuples from any one input. + */ + MergeAppendState *maState = (MergeAppendState *) child_node; + int i; + + for (i = 0; i < maState->ms_nplans; i++) + ExecSetTupleBound(tuples_needed, maState->mergeplans[i]); + } + else if (IsA(child_node, ResultState)) + { + /* + * Similarly, for a projecting Result, we can apply the bound to its + * child node. + * + * If Result supported qual checking, we'd have to punt on seeing a + * qual. Note that having a resconstantqual is not a showstopper: if + * that condition succeeds it affects nothing, while if it fails, no + * rows will be demanded from the Result child anyway. + */ + if (outerPlanState(child_node)) + ExecSetTupleBound(tuples_needed, outerPlanState(child_node)); + } + else if (IsA(child_node, SubqueryScanState)) + { + /* + * We can also descend through SubqueryScan, but only if it has no + * qual (otherwise it might discard rows). + */ + SubqueryScanState *subqueryState = (SubqueryScanState *) child_node; + + if (subqueryState->ss.ps.qual == NULL) + ExecSetTupleBound(tuples_needed, subqueryState->subplan); + } + else if (IsA(child_node, GatherState)) + { + /* + * A Gather node can propagate the bound to its workers. As with + * MergeAppend, no one worker could possibly need to return more + * tuples than the Gather itself needs to. + * + * Note: As with Sort, the Gather node is responsible for reacting + * properly to changes to this parameter. + */ + GatherState *gstate = (GatherState *) child_node; + + gstate->tuples_needed = tuples_needed; + + /* Also pass down the bound to our own copy of the child plan */ + ExecSetTupleBound(tuples_needed, outerPlanState(child_node)); + } + else if (IsA(child_node, GatherMergeState)) + { + /* Same comments as for Gather */ + GatherMergeState *gstate = (GatherMergeState *) child_node; + + gstate->tuples_needed = tuples_needed; + + ExecSetTupleBound(tuples_needed, outerPlanState(child_node)); + } + + /* + * In principle we could descend through any plan node type that is + * certain not to discard or combine input rows; but on seeing a node that + * can do that, we can't propagate the bound any further. For the moment + * it's unclear that any other cases are worth checking here. + */ +} diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c index 3819de28ad..25ba93e03c 100644 --- a/src/backend/executor/execReplication.c +++ b/src/backend/executor/execReplication.c @@ -3,7 +3,7 @@ * execReplication.c * miscellaneous executor routines for logical replication * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -63,7 +63,7 @@ build_replindex_scan_key(ScanKey skey, Relation rel, Relation idxrel, opclass = (oidvector *) DatumGetPointer(indclassDatum); /* Build scankey for every attribute in the index. */ - for (attoff = 0; attoff < RelationGetNumberOfAttributes(idxrel); attoff++) + for (attoff = 0; attoff < IndexRelationGetNumberOfKeyAttributes(idxrel); attoff++) { Oid operator; Oid opfamily; @@ -131,7 +131,7 @@ RelationFindReplTupleByIndex(Relation rel, Oid idxoid, /* Start an index scan. */ InitDirtySnapshot(snap); scan = index_beginscan(rel, idxrel, &snap, - RelationGetNumberOfAttributes(idxrel), + IndexRelationGetNumberOfKeyAttributes(idxrel), 0); /* Build scan key. */ @@ -140,13 +140,13 @@ RelationFindReplTupleByIndex(Relation rel, Oid idxoid, retry: found = false; - index_rescan(scan, skey, RelationGetNumberOfAttributes(idxrel), NULL, 0); + index_rescan(scan, skey, IndexRelationGetNumberOfKeyAttributes(idxrel), NULL, 0); /* Try to find the tuple */ if ((scantuple = index_getnext(scan, ForwardScanDirection)) != NULL) { found = true; - ExecStoreTuple(scantuple, outslot, InvalidBuffer, false); + ExecStoreHeapTuple(scantuple, outslot, false); ExecMaterializeSlot(outslot); xwait = TransactionIdIsValid(snap.xmin) ? @@ -191,12 +191,18 @@ RelationFindReplTupleByIndex(Relation rel, Oid idxoid, break; case HeapTupleUpdated: /* XXX: Improve handling here */ - ereport(LOG, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("concurrent update, retrying"))); + if (ItemPointerIndicatesMovedPartitions(&hufd.ctid)) + ereport(LOG, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("tuple to be locked was already moved to another partition due to concurrent update, retrying"))); + else + ereport(LOG, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("concurrent update, retrying"))); goto retry; case HeapTupleInvisible: elog(ERROR, "attempted to lock invisible tuple"); + break; default: elog(ERROR, "unexpected heap_lock_tuple status: %u", res); break; @@ -247,7 +253,7 @@ tuple_equals_slot(TupleDesc desc, HeapTuple tup, TupleTableSlot *slot) if (isnull[attrnum]) continue; - att = desc->attrs[attrnum]; + att = TupleDescAttr(desc, attrnum); typentry = lookup_type_cache(att->atttypid, TYPECACHE_EQ_OPR_FINFO); if (!OidIsValid(typentry->eq_opr_finfo.fn_oid)) @@ -288,7 +294,7 @@ RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode, Assert(equalTupleDescs(desc, outslot->tts_tupleDescriptor)); - /* Start an index scan. */ + /* Start a heap scan. */ InitDirtySnapshot(snap); scan = heap_beginscan(rel, &snap, 0, NULL); @@ -304,7 +310,7 @@ RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode, continue; found = true; - ExecStoreTuple(scantuple, outslot, InvalidBuffer, false); + ExecStoreHeapTuple(scantuple, outslot, false); ExecMaterializeSlot(outslot); xwait = TransactionIdIsValid(snap.xmin) ? @@ -349,12 +355,18 @@ RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode, break; case HeapTupleUpdated: /* XXX: Improve handling here */ - ereport(LOG, - (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), - errmsg("concurrent update, retrying"))); + if (ItemPointerIndicatesMovedPartitions(&hufd.ctid)) + ereport(LOG, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("tuple to be locked was already moved to another partition due to concurrent update, retrying"))); + else + ereport(LOG, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("concurrent update, retrying"))); goto retry; case HeapTupleInvisible: elog(ERROR, "attempted to lock invisible tuple"); + break; default: elog(ERROR, "unexpected heap_lock_tuple status: %u", res); break; @@ -402,8 +414,10 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot) /* Check the constraints of the tuple */ if (rel->rd_att->constr) ExecConstraints(resultRelInfo, slot, estate); + if (resultRelInfo->ri_PartitionCheck) + ExecPartitionCheck(resultRelInfo, slot, estate, true); - /* Store the slot into tuple that we can inspect. */ + /* Materialize slot into a tuple that we can scribble upon. */ tuple = ExecMaterializeSlot(slot); /* OK, store the tuple and create index entries for it */ @@ -448,7 +462,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate, CheckCmdReplicaIdentity(rel, CMD_UPDATE); - /* BEFORE ROW INSERT Triggers */ + /* BEFORE ROW UPDATE Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->trig_update_before_row) { @@ -467,8 +481,10 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate, /* Check the constraints of the tuple */ if (rel->rd_att->constr) ExecConstraints(resultRelInfo, slot, estate); + if (resultRelInfo->ri_PartitionCheck) + ExecPartitionCheck(resultRelInfo, slot, estate, true); - /* Store the slot into tuple that we can write. */ + /* Materialize slot into a tuple that we can scribble upon. */ tuple = ExecMaterializeSlot(slot); /* OK, update the tuple and index entries for it */ @@ -509,13 +525,13 @@ ExecSimpleRelationDelete(EState *estate, EPQState *epqstate, CheckCmdReplicaIdentity(rel, CMD_DELETE); - /* BEFORE ROW INSERT Triggers */ + /* BEFORE ROW DELETE Triggers */ if (resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->trig_update_before_row) + resultRelInfo->ri_TrigDesc->trig_delete_before_row) { skip_tuple = !ExecBRDeleteTriggers(estate, epqstate, resultRelInfo, &searchslot->tts_tuple->t_self, - NULL); + NULL, NULL); } if (!skip_tuple) @@ -559,13 +575,13 @@ CheckCmdReplicaIdentity(Relation rel, CmdType cmd) if (cmd == CMD_UPDATE && pubactions->pubupdate) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot update table \"%s\" because it does not have replica identity and publishes updates", + errmsg("cannot update table \"%s\" because it does not have a replica identity and publishes updates", RelationGetRelationName(rel)), errhint("To enable updating the table, set REPLICA IDENTITY using ALTER TABLE."))); else if (cmd == CMD_DELETE && pubactions->pubdelete) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot delete from table \"%s\" because it does not have replica identity and publishes deletes", + errmsg("cannot delete from table \"%s\" because it does not have a replica identity and publishes deletes", RelationGetRelationName(rel)), errhint("To enable deleting from the table, set REPLICA IDENTITY using ALTER TABLE."))); } diff --git a/src/backend/executor/execSRF.c b/src/backend/executor/execSRF.c index 138e86ac67..b97b8d797e 100644 --- a/src/backend/executor/execSRF.c +++ b/src/backend/executor/execSRF.c @@ -7,7 +7,7 @@ * common code for calling set-returning functions according to the * ReturnSetInfo API. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -467,11 +467,16 @@ ExecInitFunctionResultSet(Expr *expr, * function itself. The argument expressions may not contain set-returning * functions (the planner is supposed to have separated evaluation for those). * + * This should be called in a short-lived (per-tuple) context, argContext + * needs to live until all rows have been returned (i.e. *isDone set to + * ExprEndResult or ExprSingleResult). + * * This is used by nodeProjectSet.c. */ Datum ExecMakeFunctionResultSet(SetExprState *fcache, ExprContext *econtext, + MemoryContext argContext, bool *isNull, ExprDoneCond *isDone) { @@ -495,8 +500,21 @@ ExecMakeFunctionResultSet(SetExprState *fcache, */ if (fcache->funcResultStore) { - if (tuplestore_gettupleslot(fcache->funcResultStore, true, false, - fcache->funcResultSlot)) + TupleTableSlot *slot = fcache->funcResultSlot; + MemoryContext oldContext; + bool foundTup; + + /* + * Have to make sure tuple in slot lives long enough, otherwise + * clearing the slot could end up trying to free something already + * freed. + */ + oldContext = MemoryContextSwitchTo(slot->tts_mcxt); + foundTup = tuplestore_gettupleslot(fcache->funcResultStore, true, false, + fcache->funcResultSlot); + MemoryContextSwitchTo(oldContext); + + if (foundTup) { *isDone = ExprMultipleResult; if (fcache->funcReturnsTuple) @@ -524,11 +542,20 @@ ExecMakeFunctionResultSet(SetExprState *fcache, * function manager. We skip the evaluation if it was already done in the * previous call (ie, we are continuing the evaluation of a set-valued * function). Otherwise, collect the current argument values into fcinfo. + * + * The arguments have to live in a context that lives at least until all + * rows from this SRF have been returned, otherwise ValuePerCall SRFs + * would reference freed memory after the first returned row. */ fcinfo = &fcache->fcinfo_data; arguments = fcache->args; if (!fcache->setArgsValid) + { + MemoryContext oldContext = MemoryContextSwitchTo(argContext); + ExecEvalFuncArgs(fcinfo, arguments, econtext); + MemoryContextSwitchTo(oldContext); + } else { /* Reset flag (we may set it again below) */ @@ -655,7 +682,7 @@ init_sexpr(Oid foid, Oid input_collation, Expr *node, /* Check permission to call function */ aclresult = pg_proc_aclcheck(foid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, get_func_name(foid)); + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(foid)); InvokeFunctionExecuteHook(foid); /* @@ -707,7 +734,8 @@ init_sexpr(Oid foid, Oid input_collation, Expr *node, /* Must save tupdesc in sexpr's context */ oldcontext = MemoryContextSwitchTo(sexprCxt); - if (functypclass == TYPEFUNC_COMPOSITE) + if (functypclass == TYPEFUNC_COMPOSITE || + functypclass == TYPEFUNC_COMPOSITE_DOMAIN) { /* Composite data type, e.g. a table's row type */ Assert(tupdesc); @@ -903,8 +931,8 @@ tupledesc_match(TupleDesc dst_tupdesc, TupleDesc src_tupdesc) for (i = 0; i < dst_tupdesc->natts; i++) { - Form_pg_attribute dattr = dst_tupdesc->attrs[i]; - Form_pg_attribute sattr = src_tupdesc->attrs[i]; + Form_pg_attribute dattr = TupleDescAttr(dst_tupdesc, i); + Form_pg_attribute sattr = TupleDescAttr(src_tupdesc, i); if (IsBinaryCoercible(sattr->atttypid, dattr->atttypid)) continue; /* no worries */ diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c index 4f131b3ee0..233cc28060 100644 --- a/src/backend/executor/execScan.c +++ b/src/backend/executor/execScan.c @@ -7,7 +7,7 @@ * stuff - checking the qualification and projecting the tuple * appropriately. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -23,11 +23,9 @@ #include "utils/memutils.h" -static bool tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc); - /* - * ExecScanFetch -- fetch next potential tuple + * ExecScanFetch -- check interrupts & fetch next potential tuple * * This routine is concerned with substituting a test tuple if we are * inside an EvalPlanQual recheck. If we aren't, just execute @@ -40,6 +38,8 @@ ExecScanFetch(ScanState *node, { EState *estate = node->ps.state; + CHECK_FOR_INTERRUPTS(); + if (estate->es_epqTuple != NULL) { /* @@ -78,8 +78,8 @@ ExecScanFetch(ScanState *node, return ExecClearTuple(slot); /* Store test tuple in the plan node's scan slot */ - ExecStoreTuple(estate->es_epqTuple[scanrelid - 1], - slot, InvalidBuffer, false); + ExecStoreHeapTuple(estate->es_epqTuple[scanrelid - 1], + slot, false); /* Check if it meets the access-method conditions */ if (!(*recheckMtd) (node, slot)) @@ -133,6 +133,8 @@ ExecScan(ScanState *node, projInfo = node->ps.ps_ProjInfo; econtext = node->ps.ps_ExprContext; + /* interrupt checks are in ExecScanFetch */ + /* * If we have neither a qual to check nor a projection to do, just skip * all the overhead and return the raw scan tuple. @@ -157,8 +159,6 @@ ExecScan(ScanState *node, { TupleTableSlot *slot; - CHECK_FOR_INTERRUPTS(); - slot = ExecScanFetch(node, accessMtd, recheckMtd); /* @@ -229,14 +229,15 @@ ExecScan(ScanState *node, * the scan node, because the planner will preferentially generate a matching * tlist. * - * ExecAssignScanType must have been called already. + * The scan slot's descriptor must have been set already. */ void ExecAssignScanProjectionInfo(ScanState *node) { Scan *scan = (Scan *) node->ps.plan; + TupleDesc tupdesc = node->ss_ScanTupleSlot->tts_tupleDescriptor; - ExecAssignScanProjectionInfoWithVarno(node, scan->scanrelid); + ExecConditionalAssignProjectionInfo(&node->ps, tupdesc, scan->scanrelid); } /* @@ -246,75 +247,9 @@ ExecAssignScanProjectionInfo(ScanState *node) void ExecAssignScanProjectionInfoWithVarno(ScanState *node, Index varno) { - Scan *scan = (Scan *) node->ps.plan; + TupleDesc tupdesc = node->ss_ScanTupleSlot->tts_tupleDescriptor; - if (tlist_matches_tupdesc(&node->ps, - scan->plan.targetlist, - varno, - node->ss_ScanTupleSlot->tts_tupleDescriptor)) - node->ps.ps_ProjInfo = NULL; - else - ExecAssignProjectionInfo(&node->ps, - node->ss_ScanTupleSlot->tts_tupleDescriptor); -} - -static bool -tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc) -{ - int numattrs = tupdesc->natts; - int attrno; - bool hasoid; - ListCell *tlist_item = list_head(tlist); - - /* Check the tlist attributes */ - for (attrno = 1; attrno <= numattrs; attrno++) - { - Form_pg_attribute att_tup = tupdesc->attrs[attrno - 1]; - Var *var; - - if (tlist_item == NULL) - return false; /* tlist too short */ - var = (Var *) ((TargetEntry *) lfirst(tlist_item))->expr; - if (!var || !IsA(var, Var)) - return false; /* tlist item not a Var */ - /* if these Asserts fail, planner messed up */ - Assert(var->varno == varno); - Assert(var->varlevelsup == 0); - if (var->varattno != attrno) - return false; /* out of order */ - if (att_tup->attisdropped) - return false; /* table contains dropped columns */ - - /* - * Note: usually the Var's type should match the tupdesc exactly, but - * in situations involving unions of columns that have different - * typmods, the Var may have come from above the union and hence have - * typmod -1. This is a legitimate situation since the Var still - * describes the column, just not as exactly as the tupdesc does. We - * could change the planner to prevent it, but it'd then insert - * projection steps just to convert from specific typmod to typmod -1, - * which is pretty silly. - */ - if (var->vartype != att_tup->atttypid || - (var->vartypmod != att_tup->atttypmod && - var->vartypmod != -1)) - return false; /* type mismatch */ - - tlist_item = lnext(tlist_item); - } - - if (tlist_item) - return false; /* tlist too long */ - - /* - * If the plan context requires a particular hasoid setting, then that has - * to match, too. - */ - if (ExecContextForcesOids(ps, &hasoid) && - hasoid != tupdesc->tdhasoid) - return false; - - return true; + ExecConditionalAssignProjectionInfo(&node->ps, tupdesc, varno); } /* @@ -328,6 +263,12 @@ ExecScanReScan(ScanState *node) { EState *estate = node->ps.state; + /* + * We must clear the scan tuple so that observers (e.g., execCurrent.c) + * can tell that this plan node is not positioned on a tuple. + */ + ExecClearTuple(node->ss_ScanTupleSlot); + /* Rescan EvalPlanQual tuple if we're inside an EvalPlanQual recheck */ if (estate->es_epqScanDone != NULL) { diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c index 7ae70a877a..9f0d9daa82 100644 --- a/src/backend/executor/execTuples.c +++ b/src/backend/executor/execTuples.c @@ -12,44 +12,6 @@ * This information is needed by routines manipulating tuples * (getattribute, formtuple, etc.). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * src/backend/executor/execTuples.c - * - *------------------------------------------------------------------------- - */ -/* - * INTERFACE ROUTINES - * - * SLOT CREATION/DESTRUCTION - * MakeTupleTableSlot - create an empty slot - * ExecAllocTableSlot - create a slot within a tuple table - * ExecResetTupleTable - clear and optionally delete a tuple table - * MakeSingleTupleTableSlot - make a standalone slot, set its descriptor - * ExecDropSingleTupleTableSlot - destroy a standalone slot - * - * SLOT ACCESSORS - * ExecSetSlotDescriptor - set a slot's tuple descriptor - * ExecStoreTuple - store a physical tuple in the slot - * ExecStoreMinimalTuple - store a minimal physical tuple in the slot - * ExecClearTuple - clear contents of a slot - * ExecStoreVirtualTuple - mark slot as containing a virtual tuple - * ExecCopySlotTuple - build a physical tuple from a slot - * ExecCopySlotMinimalTuple - build a minimal physical tuple from a slot - * ExecMaterializeSlot - convert virtual to physical storage - * ExecCopySlot - copy one slot's contents to another - * - * CONVENIENCE INITIALIZATION ROUTINES - * ExecInitResultTupleSlot \ convenience routines to initialize - * ExecInitScanTupleSlot \ the various tuple slots for nodes - * ExecInitExtraTupleSlot / which store copies of tuples. - * ExecInitNullTupleSlot / - * - * Routines that probably belong somewhere else: - * ExecTypeFromTL - form a TupleDesc from a target list * * EXAMPLE OF HOW TABLE ROUTINES WORK * Suppose we have a query such as SELECT emp.name FROM emp and we have @@ -57,18 +19,22 @@ * * At ExecutorStart() * ---------------- - * - ExecInitSeqScan() calls ExecInitScanTupleSlot() and - * ExecInitResultTupleSlot() to construct TupleTableSlots - * for the tuples returned by the access methods and the - * tuples resulting from performing target list projections. + + * - ExecInitSeqScan() calls ExecInitScanTupleSlot() to construct a + * TupleTableSlots for the tuples returned by the access method, and + * ExecInitResultTypeTL() to define the node's return + * type. ExecAssignScanProjectionInfo() will, if necessary, create + * another TupleTableSlot for the tuples resulting from performing + * target list projections. * * During ExecutorRun() * ---------------- - * - SeqNext() calls ExecStoreTuple() to place the tuple returned - * by the access methods into the scan tuple slot. + * - SeqNext() calls ExecStoreBufferHeapTuple() to place the tuple + * returned by the access method into the scan tuple slot. * - * - ExecSeqScan() calls ExecStoreTuple() to take the result - * tuple from ExecProject() and place it into the result tuple slot. + * - ExecSeqScan() (via ExecScan), if necessary, calls ExecProject(), + * putting the result of the projection in the result tuple slot. If + * not necessary, it directly returns the slot returned by SeqNext(). * * - ExecutePlan() calls the output function. * @@ -78,10 +44,21 @@ * (such as whether or not a tuple should be pfreed, what buffer contains * this tuple, the tuple's tuple descriptor, etc). It also allows us * to avoid physically constructing projection tuples in many cases. + * + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/backend/executor/execTuples.c + * + *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/htup_details.h" +#include "access/tupdesc_details.h" #include "access/tuptoaster.h" #include "funcapi.h" #include "catalog/pg_type.h" @@ -104,19 +81,35 @@ static TupleDesc ExecTypeFromTLInternal(List *targetList, /* -------------------------------- * MakeTupleTableSlot * - * Basic routine to make an empty TupleTableSlot. + * Basic routine to make an empty TupleTableSlot. If tupleDesc is + * specified the slot's descriptor is fixed for it's lifetime, gaining + * some efficiency. If that's undesirable, pass NULL. * -------------------------------- */ TupleTableSlot * -MakeTupleTableSlot(void) +MakeTupleTableSlot(TupleDesc tupleDesc) { - TupleTableSlot *slot = makeNode(TupleTableSlot); + Size sz; + TupleTableSlot *slot; + + /* + * When a fixed descriptor is specified, we can reduce overhead by + * allocating the entire slot in one go. + */ + if (tupleDesc) + sz = MAXALIGN(sizeof(TupleTableSlot)) + + MAXALIGN(tupleDesc->natts * sizeof(Datum)) + + MAXALIGN(tupleDesc->natts * sizeof(bool)); + else + sz = sizeof(TupleTableSlot); - slot->tts_isempty = true; - slot->tts_shouldFree = false; - slot->tts_shouldFreeMin = false; + slot = palloc0(sz); + slot->type = T_TupleTableSlot; + slot->tts_flags |= TTS_FLAG_EMPTY; + if (tupleDesc != NULL) + slot->tts_flags |= TTS_FLAG_FIXED; slot->tts_tuple = NULL; - slot->tts_tupleDescriptor = NULL; + slot->tts_tupleDescriptor = tupleDesc; slot->tts_mcxt = CurrentMemoryContext; slot->tts_buffer = InvalidBuffer; slot->tts_nvalid = 0; @@ -124,6 +117,19 @@ MakeTupleTableSlot(void) slot->tts_isnull = NULL; slot->tts_mintuple = NULL; + if (tupleDesc != NULL) + { + slot->tts_values = (Datum *) + (((char *) slot) + + MAXALIGN(sizeof(TupleTableSlot))); + slot->tts_isnull = (bool *) + (((char *) slot) + + MAXALIGN(sizeof(TupleTableSlot)) + + MAXALIGN(tupleDesc->natts * sizeof(Datum))); + + PinTupleDesc(tupleDesc); + } + return slot; } @@ -134,9 +140,9 @@ MakeTupleTableSlot(void) * -------------------------------- */ TupleTableSlot * -ExecAllocTableSlot(List **tupleTable) +ExecAllocTableSlot(List **tupleTable, TupleDesc desc) { - TupleTableSlot *slot = MakeTupleTableSlot(); + TupleTableSlot *slot = MakeTupleTableSlot(desc); *tupleTable = lappend(*tupleTable, slot); @@ -173,10 +179,13 @@ ExecResetTupleTable(List *tupleTable, /* tuple table */ /* If shouldFree, release memory occupied by the slot itself */ if (shouldFree) { - if (slot->tts_values) - pfree(slot->tts_values); - if (slot->tts_isnull) - pfree(slot->tts_isnull); + if (!TTS_FIXED(slot)) + { + if (slot->tts_values) + pfree(slot->tts_values); + if (slot->tts_isnull) + pfree(slot->tts_isnull); + } pfree(slot); } } @@ -198,9 +207,7 @@ ExecResetTupleTable(List *tupleTable, /* tuple table */ TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc) { - TupleTableSlot *slot = MakeTupleTableSlot(); - - ExecSetSlotDescriptor(slot, tupdesc); + TupleTableSlot *slot = MakeTupleTableSlot(tupdesc); return slot; } @@ -220,10 +227,13 @@ ExecDropSingleTupleTableSlot(TupleTableSlot *slot) ExecClearTuple(slot); if (slot->tts_tupleDescriptor) ReleaseTupleDesc(slot->tts_tupleDescriptor); - if (slot->tts_values) - pfree(slot->tts_values); - if (slot->tts_isnull) - pfree(slot->tts_isnull); + if (!TTS_FIXED(slot)) + { + if (slot->tts_values) + pfree(slot->tts_values); + if (slot->tts_isnull) + pfree(slot->tts_isnull); + } pfree(slot); } @@ -247,6 +257,8 @@ void ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */ TupleDesc tupdesc) /* new tuple descriptor */ { + Assert(!TTS_FIXED(slot)); + /* For safety, make sure slot is empty before changing it */ ExecClearTuple(slot); @@ -279,48 +291,94 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */ } /* -------------------------------- - * ExecStoreTuple + * ExecStoreHeapTuple * - * This function is used to store a physical tuple into a specified + * This function is used to store an on-the-fly physical tuple into a specified * slot in the tuple table. * * tuple: tuple to store * slot: slot to store it in - * buffer: disk buffer if tuple is in a disk page, else InvalidBuffer * shouldFree: true if ExecClearTuple should pfree() the tuple * when done with it * - * If 'buffer' is not InvalidBuffer, the tuple table code acquires a pin - * on the buffer which is held until the slot is cleared, so that the tuple - * won't go away on us. + * shouldFree is normally set 'true' for tuples constructed on-the-fly. But it + * can be 'false' when the referenced tuple is held in a tuple table slot + * belonging to a lower-level executor Proc node. In this case the lower-level + * slot retains ownership and responsibility for eventually releasing the + * tuple. When this method is used, we must be certain that the upper-level + * Proc node will lose interest in the tuple sooner than the lower-level one + * does! If you're not certain, copy the lower-level tuple with heap_copytuple + * and let the upper-level table slot assume ownership of the copy! + * + * Return value is just the passed-in slot pointer. + * -------------------------------- + */ +TupleTableSlot * +ExecStoreHeapTuple(HeapTuple tuple, + TupleTableSlot *slot, + bool shouldFree) +{ + /* + * sanity checks + */ + Assert(tuple != NULL); + Assert(slot != NULL); + Assert(slot->tts_tupleDescriptor != NULL); + + /* + * Free any old physical tuple belonging to the slot. + */ + if (TTS_SHOULDFREE(slot)) + { + heap_freetuple(slot->tts_tuple); + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; + } + if (TTS_SHOULDFREEMIN(slot)) + { + heap_free_minimal_tuple(slot->tts_mintuple); + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; + } + + /* + * Store the new tuple into the specified slot. + */ + slot->tts_flags &= ~TTS_FLAG_EMPTY; + if (shouldFree) + slot->tts_flags |= TTS_FLAG_SHOULDFREE; + slot->tts_tuple = tuple; + slot->tts_mintuple = NULL; + + /* Mark extracted state invalid */ + slot->tts_nvalid = 0; + + /* Unpin any buffer pinned by the slot. */ + if (BufferIsValid(slot->tts_buffer)) + ReleaseBuffer(slot->tts_buffer); + slot->tts_buffer = InvalidBuffer; + + return slot; +} + +/* -------------------------------- + * ExecStoreBufferHeapTuple + * + * This function is used to store an on-disk physical tuple from a buffer + * into a specified slot in the tuple table. * - * shouldFree is normally set 'true' for tuples constructed on-the-fly. - * It must always be 'false' for tuples that are stored in disk pages, - * since we don't want to try to pfree those. + * tuple: tuple to store + * slot: slot to store it in + * buffer: disk buffer if tuple is in a disk page, else InvalidBuffer * - * Another case where it is 'false' is when the referenced tuple is held - * in a tuple table slot belonging to a lower-level executor Proc node. - * In this case the lower-level slot retains ownership and responsibility - * for eventually releasing the tuple. When this method is used, we must - * be certain that the upper-level Proc node will lose interest in the tuple - * sooner than the lower-level one does! If you're not certain, copy the - * lower-level tuple with heap_copytuple and let the upper-level table - * slot assume ownership of the copy! + * The tuple table code acquires a pin on the buffer which is held until the + * slot is cleared, so that the tuple won't go away on us. * * Return value is just the passed-in slot pointer. - * - * NOTE: before PostgreSQL 8.1, this function would accept a NULL tuple - * pointer and effectively behave like ExecClearTuple (though you could - * still specify a buffer to pin, which would be an odd combination). - * This saved a couple lines of code in a few places, but seemed more likely - * to mask logic errors than to be really useful, so it's now disallowed. * -------------------------------- */ TupleTableSlot * -ExecStoreTuple(HeapTuple tuple, - TupleTableSlot *slot, - Buffer buffer, - bool shouldFree) +ExecStoreBufferHeapTuple(HeapTuple tuple, + TupleTableSlot *slot, + Buffer buffer) { /* * sanity checks @@ -328,23 +386,26 @@ ExecStoreTuple(HeapTuple tuple, Assert(tuple != NULL); Assert(slot != NULL); Assert(slot->tts_tupleDescriptor != NULL); - /* passing shouldFree=true for a tuple on a disk page is not sane */ - Assert(BufferIsValid(buffer) ? (!shouldFree) : true); + Assert(BufferIsValid(buffer)); /* * Free any old physical tuple belonging to the slot. */ - if (slot->tts_shouldFree) + if (TTS_SHOULDFREE(slot)) + { heap_freetuple(slot->tts_tuple); - if (slot->tts_shouldFreeMin) + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; + } + if (TTS_SHOULDFREEMIN(slot)) + { heap_free_minimal_tuple(slot->tts_mintuple); + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; + } /* * Store the new tuple into the specified slot. */ - slot->tts_isempty = false; - slot->tts_shouldFree = shouldFree; - slot->tts_shouldFreeMin = false; + slot->tts_flags &= ~TTS_FLAG_EMPTY; slot->tts_tuple = tuple; slot->tts_mintuple = NULL; @@ -352,21 +413,20 @@ ExecStoreTuple(HeapTuple tuple, slot->tts_nvalid = 0; /* - * If tuple is on a disk page, keep the page pinned as long as we hold a - * pointer into it. We assume the caller already has such a pin. + * Keep the disk page containing the given tuple pinned as long as we hold + * a pointer into it. We assume the caller already has such a pin. * * This is coded to optimize the case where the slot previously held a - * tuple on the same disk page: in that case releasing and re-acquiring - * the pin is a waste of cycles. This is a common situation during - * seqscans, so it's worth troubling over. + * tuple on the same disk page: in that case releasing and re-acquiring the + * pin is a waste of cycles. This is a common situation during seqscans, + * so it's worth troubling over. */ if (slot->tts_buffer != buffer) { if (BufferIsValid(slot->tts_buffer)) ReleaseBuffer(slot->tts_buffer); slot->tts_buffer = buffer; - if (BufferIsValid(buffer)) - IncrBufferRefCount(buffer); + IncrBufferRefCount(buffer); } return slot; @@ -375,7 +435,7 @@ ExecStoreTuple(HeapTuple tuple, /* -------------------------------- * ExecStoreMinimalTuple * - * Like ExecStoreTuple, but insert a "minimal" tuple into the slot. + * Like ExecStoreHeapTuple, but insert a "minimal" tuple into the slot. * * No 'buffer' parameter since minimal tuples are never stored in relations. * -------------------------------- @@ -395,10 +455,16 @@ ExecStoreMinimalTuple(MinimalTuple mtup, /* * Free any old physical tuple belonging to the slot. */ - if (slot->tts_shouldFree) + if (TTS_SHOULDFREE(slot)) + { heap_freetuple(slot->tts_tuple); - if (slot->tts_shouldFreeMin) + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; + } + if (TTS_SHOULDFREEMIN(slot)) + { heap_free_minimal_tuple(slot->tts_mintuple); + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; + } /* * Drop the pin on the referenced buffer, if there is one. @@ -411,9 +477,9 @@ ExecStoreMinimalTuple(MinimalTuple mtup, /* * Store the new tuple into the specified slot. */ - slot->tts_isempty = false; - slot->tts_shouldFree = false; - slot->tts_shouldFreeMin = shouldFree; + slot->tts_flags &= ~TTS_FLAG_EMPTY; + if (shouldFree) + slot->tts_flags |= TTS_FLAG_SHOULDFREEMIN; slot->tts_tuple = &slot->tts_minhdr; slot->tts_mintuple = mtup; @@ -446,15 +512,19 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */ /* * Free the old physical tuple if necessary. */ - if (slot->tts_shouldFree) + if (TTS_SHOULDFREE(slot)) + { heap_freetuple(slot->tts_tuple); - if (slot->tts_shouldFreeMin) + slot->tts_flags &= ~TTS_FLAG_SHOULDFREE; + } + if (TTS_SHOULDFREEMIN(slot)) + { heap_free_minimal_tuple(slot->tts_mintuple); + slot->tts_flags &= ~TTS_FLAG_SHOULDFREEMIN; + } slot->tts_tuple = NULL; slot->tts_mintuple = NULL; - slot->tts_shouldFree = false; - slot->tts_shouldFreeMin = false; /* * Drop the pin on the referenced buffer, if there is one. @@ -467,7 +537,7 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */ /* * Mark it empty. */ - slot->tts_isempty = true; + slot->tts_flags |= TTS_FLAG_EMPTY; slot->tts_nvalid = 0; return slot; @@ -492,9 +562,9 @@ ExecStoreVirtualTuple(TupleTableSlot *slot) */ Assert(slot != NULL); Assert(slot->tts_tupleDescriptor != NULL); - Assert(slot->tts_isempty); + Assert(TTS_EMPTY(slot)); - slot->tts_isempty = false; + slot->tts_flags &= ~TTS_FLAG_EMPTY; slot->tts_nvalid = slot->tts_tupleDescriptor->natts; return slot; @@ -548,7 +618,7 @@ ExecCopySlotTuple(TupleTableSlot *slot) * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); /* * If we have a physical tuple (either format) then just copy it. @@ -580,7 +650,7 @@ ExecCopySlotMinimalTuple(TupleTableSlot *slot) * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); /* * If we have a physical tuple then just copy it. Prefer to copy @@ -589,7 +659,14 @@ ExecCopySlotMinimalTuple(TupleTableSlot *slot) if (slot->tts_mintuple) return heap_copy_minimal_tuple(slot->tts_mintuple); if (slot->tts_tuple) - return minimal_tuple_from_heap_tuple(slot->tts_tuple); + { + if (HeapTupleHeaderGetNatts(slot->tts_tuple->t_data) + < slot->tts_tupleDescriptor->natts) + return minimal_expand_tuple(slot->tts_tuple, + slot->tts_tupleDescriptor); + else + return minimal_tuple_from_heap_tuple(slot->tts_tuple); + } /* * Otherwise we need to build a tuple from the Datum array. @@ -621,13 +698,26 @@ ExecFetchSlotTuple(TupleTableSlot *slot) * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); /* * If we have a regular physical tuple then just return it. */ if (TTS_HAS_PHYSICAL_TUPLE(slot)) + { + if (HeapTupleHeaderGetNatts(slot->tts_tuple->t_data) < + slot->tts_tupleDescriptor->natts) + { + HeapTuple tuple; + MemoryContext oldContext = MemoryContextSwitchTo(slot->tts_mcxt); + + tuple = heap_expand_tuple(slot->tts_tuple, + slot->tts_tupleDescriptor); + MemoryContextSwitchTo(oldContext); + slot = ExecStoreHeapTuple(tuple, slot, true); + } return slot->tts_tuple; + } /* * Otherwise materialize the slot... @@ -657,7 +747,8 @@ ExecFetchSlotMinimalTuple(TupleTableSlot *slot) * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); + /* * If we have a minimal physical tuple (local or not) then just return it. @@ -674,7 +765,7 @@ ExecFetchSlotMinimalTuple(TupleTableSlot *slot) */ oldContext = MemoryContextSwitchTo(slot->tts_mcxt); slot->tts_mintuple = ExecCopySlotMinimalTuple(slot); - slot->tts_shouldFreeMin = true; + slot->tts_flags |= TTS_FLAG_SHOULDFREEMIN; MemoryContextSwitchTo(oldContext); /* @@ -730,13 +821,13 @@ ExecMaterializeSlot(TupleTableSlot *slot) * sanity checks */ Assert(slot != NULL); - Assert(!slot->tts_isempty); + Assert(!TTS_EMPTY(slot)); /* * If we have a regular physical tuple, and it's locally palloc'd, we have * nothing to do. */ - if (slot->tts_tuple && slot->tts_shouldFree) + if (slot->tts_tuple && TTS_SHOULDFREE(slot)) return slot->tts_tuple; /* @@ -748,7 +839,7 @@ ExecMaterializeSlot(TupleTableSlot *slot) */ oldContext = MemoryContextSwitchTo(slot->tts_mcxt); slot->tts_tuple = ExecCopySlotTuple(slot); - slot->tts_shouldFree = true; + slot->tts_flags |= TTS_FLAG_SHOULDFREE; MemoryContextSwitchTo(oldContext); /* @@ -775,7 +866,7 @@ ExecMaterializeSlot(TupleTableSlot *slot) * storage, we must not pfree it now, since callers might have already * fetched datum pointers referencing it.) */ - if (!slot->tts_shouldFreeMin) + if (!TTS_SHOULDFREEMIN(slot)) slot->tts_mintuple = NULL; return slot->tts_tuple; @@ -806,7 +897,7 @@ ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot) newTuple = ExecCopySlotTuple(srcslot); MemoryContextSwitchTo(oldContext); - return ExecStoreTuple(newTuple, dstslot, InvalidBuffer, true); + return ExecStoreHeapTuple(newTuple, dstslot, true); } @@ -815,8 +906,34 @@ ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot) * ---------------------------------------------------------------- */ +/* ---------------- + * ExecInitResultTypeTL + * + * Initialize result type, using the plan node's targetlist. + * ---------------- + */ +void +ExecInitResultTypeTL(PlanState *planstate) +{ + bool hasoid; + TupleDesc tupDesc; + + if (ExecContextForcesOids(planstate, &hasoid)) + { + /* context forces OID choice; hasoid is now set correctly */ + } + else + { + /* given free choice, don't leave space for OIDs in result tuples */ + hasoid = false; + } + + tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid); + planstate->ps_ResultTupleDesc = tupDesc; +} + /* -------------------------------- - * ExecInit{Result,Scan,Extra}TupleSlot + * ExecInit{Result,Scan,Extra}TupleSlot[TL] * * These are convenience routines to initialize the specified slot * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot @@ -825,13 +942,33 @@ ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot) */ /* ---------------- - * ExecInitResultTupleSlot + * ExecInitResultTupleSlotTL + * + * Initialize result tuple slot, using the tuple descriptor previously + * computed with ExecInitResultTypeTL(). * ---------------- */ void -ExecInitResultTupleSlot(EState *estate, PlanState *planstate) +ExecInitResultSlot(PlanState *planstate) { - planstate->ps_ResultTupleSlot = ExecAllocTableSlot(&estate->es_tupleTable); + TupleTableSlot *slot; + + slot = ExecAllocTableSlot(&planstate->state->es_tupleTable, + planstate->ps_ResultTupleDesc); + planstate->ps_ResultTupleSlot = slot; +} + +/* ---------------- + * ExecInitResultTupleSlotTL + * + * Initialize result tuple slot, using the plan node's targetlist. + * ---------------- + */ +void +ExecInitResultTupleSlotTL(PlanState *planstate) +{ + ExecInitResultTypeTL(planstate); + ExecInitResultSlot(planstate); } /* ---------------- @@ -839,19 +976,25 @@ ExecInitResultTupleSlot(EState *estate, PlanState *planstate) * ---------------- */ void -ExecInitScanTupleSlot(EState *estate, ScanState *scanstate) +ExecInitScanTupleSlot(EState *estate, ScanState *scanstate, TupleDesc tupledesc) { - scanstate->ss_ScanTupleSlot = ExecAllocTableSlot(&estate->es_tupleTable); + scanstate->ss_ScanTupleSlot = ExecAllocTableSlot(&estate->es_tupleTable, + tupledesc); + scanstate->ps.scandesc = tupledesc; } /* ---------------- * ExecInitExtraTupleSlot + * + * Return a newly created slot. If tupledesc is non-NULL the slot will have + * that as its fixed tupledesc. Otherwise the caller needs to use + * ExecSetSlotDescriptor() to set the descriptor before use. * ---------------- */ TupleTableSlot * -ExecInitExtraTupleSlot(EState *estate) +ExecInitExtraTupleSlot(EState *estate, TupleDesc tupledesc) { - return ExecAllocTableSlot(&estate->es_tupleTable); + return ExecAllocTableSlot(&estate->es_tupleTable, tupledesc); } /* ---------------- @@ -865,13 +1008,189 @@ ExecInitExtraTupleSlot(EState *estate) TupleTableSlot * ExecInitNullTupleSlot(EState *estate, TupleDesc tupType) { - TupleTableSlot *slot = ExecInitExtraTupleSlot(estate); - - ExecSetSlotDescriptor(slot, tupType); + TupleTableSlot *slot = ExecInitExtraTupleSlot(estate, tupType); return ExecStoreAllNullTuple(slot); } +/* --------------------------------------------------------------- + * Routines for setting/accessing attributes in a slot. + * --------------------------------------------------------------- + */ + +/* + * Fill in missing values for a TupleTableSlot. + * + * This is only exposed because it's needed for JIT compiled tuple + * deforming. That exception aside, there should be no callers outside of this + * file. + */ +void +slot_getmissingattrs(TupleTableSlot *slot, int startAttNum, int lastAttNum) +{ + AttrMissing *attrmiss = NULL; + int missattnum; + + if (slot->tts_tupleDescriptor->constr) + attrmiss = slot->tts_tupleDescriptor->constr->missing; + + if (!attrmiss) + { + /* no missing values array at all, so just fill everything in as NULL */ + memset(slot->tts_values + startAttNum, 0, + (lastAttNum - startAttNum) * sizeof(Datum)); + memset(slot->tts_isnull + startAttNum, 1, + (lastAttNum - startAttNum) * sizeof(bool)); + } + else + { + /* if there is a missing values array we must process them one by one */ + for (missattnum = startAttNum; + missattnum < lastAttNum; + missattnum++) + { + slot->tts_values[missattnum] = attrmiss[missattnum].am_value; + slot->tts_isnull[missattnum] = !attrmiss[missattnum].am_present; + } + } +} + +/* + * slot_getattr + * This function fetches an attribute of the slot's current tuple. + * It is functionally equivalent to heap_getattr, but fetches of + * multiple attributes of the same tuple will be optimized better, + * because we avoid O(N^2) behavior from multiple calls of + * nocachegetattr(), even when attcacheoff isn't usable. + * + * A difference from raw heap_getattr is that attnums beyond the + * slot's tupdesc's last attribute will be considered NULL even + * when the physical tuple is longer than the tupdesc. + */ +Datum +slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull) +{ + HeapTuple tuple = slot->tts_tuple; + TupleDesc tupleDesc = slot->tts_tupleDescriptor; + HeapTupleHeader tup; + + /* + * system attributes are handled by heap_getsysattr + */ + if (attnum <= 0) + { + if (tuple == NULL) /* internal error */ + elog(ERROR, "cannot extract system attribute from virtual tuple"); + if (tuple == &(slot->tts_minhdr)) /* internal error */ + elog(ERROR, "cannot extract system attribute from minimal tuple"); + return heap_getsysattr(tuple, attnum, tupleDesc, isnull); + } + + /* + * fast path if desired attribute already cached + */ + if (attnum <= slot->tts_nvalid) + { + *isnull = slot->tts_isnull[attnum - 1]; + return slot->tts_values[attnum - 1]; + } + + /* + * return NULL if attnum is out of range according to the tupdesc + */ + if (attnum > tupleDesc->natts) + { + *isnull = true; + return (Datum) 0; + } + + /* + * otherwise we had better have a physical tuple (tts_nvalid should equal + * natts in all virtual-tuple cases) + */ + if (tuple == NULL) /* internal error */ + elog(ERROR, "cannot extract attribute from empty tuple slot"); + + /* + * return NULL or missing value if attnum is out of range according to the + * tuple + * + * (We have to check this separately because of various inheritance and + * table-alteration scenarios: the tuple could be either longer or shorter + * than the tupdesc.) + */ + tup = tuple->t_data; + if (attnum > HeapTupleHeaderGetNatts(tup)) + return getmissingattr(slot->tts_tupleDescriptor, attnum, isnull); + + /* + * check if target attribute is null: no point in groveling through tuple + */ + if (HeapTupleHasNulls(tuple) && att_isnull(attnum - 1, tup->t_bits)) + { + *isnull = true; + return (Datum) 0; + } + + /* + * Extract the attribute, along with any preceding attributes. + */ + slot_deform_tuple(slot, attnum); + + /* + * The result is acquired from tts_values array. + */ + *isnull = slot->tts_isnull[attnum - 1]; + return slot->tts_values[attnum - 1]; +} + +/* + * slot_getsomeattrs + * This function forces the entries of the slot's Datum/isnull + * arrays to be valid at least up through the attnum'th entry. + */ +void +slot_getsomeattrs(TupleTableSlot *slot, int attnum) +{ + HeapTuple tuple; + int attno; + + /* Quick out if we have 'em all already */ + if (slot->tts_nvalid >= attnum) + return; + + /* Check for caller error */ + if (attnum <= 0 || attnum > slot->tts_tupleDescriptor->natts) + elog(ERROR, "invalid attribute number %d", attnum); + + /* + * otherwise we had better have a physical tuple (tts_nvalid should equal + * natts in all virtual-tuple cases) + */ + tuple = slot->tts_tuple; + if (tuple == NULL) /* internal error */ + elog(ERROR, "cannot extract attribute from empty tuple slot"); + + /* + * load up any slots available from physical tuple + */ + attno = HeapTupleHeaderGetNatts(tuple->t_data); + attno = Min(attno, attnum); + + slot_deform_tuple(slot, attno); + + attno = slot->tts_nvalid; + + /* + * If tuple doesn't have all the atts indicated by attnum, read the rest + * as NULLs or missing values + */ + if (attno < attnum) + slot_getmissingattrs(slot, attno, attnum); + + slot->tts_nvalid = attnum; +} + /* ---------------------------------------------------------------- * ExecTypeFromTL * @@ -997,7 +1316,8 @@ ExecTypeSetColNames(TupleDesc typeInfo, List *namesList) /* Guard against too-long names list */ if (colno >= typeInfo->natts) break; - attr = typeInfo->attrs[colno++]; + attr = TupleDescAttr(typeInfo, colno); + colno++; /* Ignore empty aliases (these must be for dropped columns) */ if (cname[0] == '\0') @@ -1037,28 +1357,6 @@ BlessTupleDesc(TupleDesc tupdesc) return tupdesc; /* just for notational convenience */ } -/* - * TupleDescGetSlot - Initialize a slot based on the supplied tupledesc - * - * Note: this is obsolete; it is sufficient to call BlessTupleDesc on - * the tupdesc. We keep it around just for backwards compatibility with - * existing user-written SRFs. - */ -TupleTableSlot * -TupleDescGetSlot(TupleDesc tupdesc) -{ - TupleTableSlot *slot; - - /* The useful work is here */ - BlessTupleDesc(tupdesc); - - /* Make a standalone slot */ - slot = MakeSingleTupleTableSlot(tupdesc); - - /* Return the slot */ - return slot; -} - /* * TupleDescGetAttInMetadata - Build an AttInMetadata structure based on the * supplied TupleDesc. AttInMetadata can be used in conjunction with C strings @@ -1090,13 +1388,15 @@ TupleDescGetAttInMetadata(TupleDesc tupdesc) for (i = 0; i < natts; i++) { + Form_pg_attribute att = TupleDescAttr(tupdesc, i); + /* Ignore dropped attributes */ - if (!tupdesc->attrs[i]->attisdropped) + if (!att->attisdropped) { - atttypeid = tupdesc->attrs[i]->atttypid; + atttypeid = att->atttypid; getTypeInputInfo(atttypeid, &attinfuncid, &attioparams[i]); fmgr_info(attinfuncid, &attinfuncinfo[i]); - atttypmods[i] = tupdesc->attrs[i]->atttypmod; + atttypmods[i] = att->atttypmod; } } attinmeta->attinfuncs = attinfuncinfo; @@ -1124,10 +1424,13 @@ BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values) dvalues = (Datum *) palloc(natts * sizeof(Datum)); nulls = (bool *) palloc(natts * sizeof(bool)); - /* Call the "in" function for each non-dropped attribute */ + /* + * Call the "in" function for each non-dropped attribute, even for nulls, + * to support domains. + */ for (i = 0; i < natts; i++) { - if (!tupdesc->attrs[i]->attisdropped) + if (!TupleDescAttr(tupdesc, i)->attisdropped) { /* Non-dropped attributes */ dvalues[i] = InputFunctionCall(&attinmeta->attinfuncs[i], @@ -1238,7 +1541,7 @@ begin_tup_output_tupdesc(DestReceiver *dest, TupleDesc tupdesc) tstate->slot = MakeSingleTupleTableSlot(tupdesc); tstate->dest = dest; - (*tstate->dest->rStartup) (tstate->dest, (int) CMD_SELECT, tupdesc); + tstate->dest->rStartup(tstate->dest, (int) CMD_SELECT, tupdesc); return tstate; } @@ -1263,7 +1566,7 @@ do_tup_output(TupOutputState *tstate, Datum *values, bool *isnull) ExecStoreVirtualTuple(slot); /* send the tuple to the receiver */ - (void) (*tstate->dest->receiveSlot) (slot, tstate->dest); + (void) tstate->dest->receiveSlot(slot, tstate->dest); /* clean up */ ExecClearTuple(slot); @@ -1307,7 +1610,7 @@ do_text_output_multiline(TupOutputState *tstate, const char *txt) void end_tup_output(TupOutputState *tstate) { - (*tstate->dest->rShutdown) (tstate->dest); + tstate->dest->rShutdown(tstate->dest); /* note that destroying the dest is not ours to do */ ExecDropSingleTupleTableSlot(tstate->slot); pfree(tstate); diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index 25772fc603..f9e7bb479f 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -3,7 +3,7 @@ * execUtils.c * miscellaneous executor utility routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -22,11 +22,13 @@ * ReScanExprContext * * ExecAssignExprContext Common code for plan node init routines. - * ExecAssignResultType * etc * * ExecOpenScanRelation Common code for scan node init routines. - * ExecCloseScanRelation + * + * ExecInitRangeTable Set up executor's range-table-related data. + * + * ExecGetRangeTableRelation Fetch Relation for a rangetable entry. * * executor_errposition Report syntactic position of an error. * @@ -43,9 +45,11 @@ #include "postgres.h" +#include "access/parallel.h" #include "access/relscan.h" #include "access/transam.h" #include "executor/executor.h" +#include "jit/jit.h" #include "mb/pg_wchar.h" #include "nodes/nodeFuncs.h" #include "parser/parsetree.h" @@ -56,6 +60,7 @@ #include "utils/typcache.h" +static bool tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc); static void ShutdownExprContext(ExprContext *econtext, bool isCommit); @@ -105,6 +110,10 @@ CreateExecutorState(void) estate->es_snapshot = InvalidSnapshot; /* caller must initialize this */ estate->es_crosscheck_snapshot = InvalidSnapshot; /* no crosscheck */ estate->es_range_table = NIL; + estate->es_range_table_array = NULL; + estate->es_range_table_size = 0; + estate->es_relations = NULL; + estate->es_rowmarks = NULL; estate->es_plannedstmt = NULL; estate->es_junkFilter = NULL; @@ -115,6 +124,11 @@ CreateExecutorState(void) estate->es_num_result_relations = 0; estate->es_result_relation_info = NULL; + estate->es_root_result_relations = NULL; + estate->es_num_root_result_relations = 0; + + estate->es_tuple_routing_result_relations = NIL; + estate->es_trig_target_relations = NIL; estate->es_trig_tuple_slot = NULL; estate->es_trig_oldtup_slot = NULL; @@ -129,8 +143,6 @@ CreateExecutorState(void) estate->es_tupleTable = NIL; - estate->es_rowMarks = NIL; - estate->es_processed = 0; estate->es_lastoid = InvalidOid; @@ -151,6 +163,11 @@ CreateExecutorState(void) estate->es_epqScanDone = NULL; estate->es_sourceText = NULL; + estate->es_use_parallel_mode = false; + + estate->es_jit_flags = 0; + estate->es_jit = NULL; + /* * Return the executor state structure */ @@ -164,11 +181,11 @@ CreateExecutorState(void) * * Release an EState along with all remaining working storage. * - * Note: this is not responsible for releasing non-memory resources, - * such as open relations or buffer pins. But it will shut down any - * still-active ExprContexts within the EState. That is sufficient - * cleanup for situations where the EState has only been used for expression - * evaluation, and not to run a complete Plan. + * Note: this is not responsible for releasing non-memory resources, such as + * open relations or buffer pins. But it will shut down any still-active + * ExprContexts within the EState and deallocate associated JITed expressions. + * That is sufficient cleanup for situations where the EState has only been + * used for expression evaluation, and not to run a complete Plan. * * This can be called in any memory context ... so long as it's not one * of the ones to be freed. @@ -194,6 +211,13 @@ FreeExecutorState(EState *estate) /* FreeExprContext removed the list link for us */ } + /* release JIT context, if allocated */ + if (estate->es_jit) + { + jit_release_context(estate->es_jit); + estate->es_jit = NULL; + } + /* * Free the per-query memory context, thereby releasing all working * memory, including the EState node itself. @@ -420,47 +444,6 @@ ExecAssignExprContext(EState *estate, PlanState *planstate) planstate->ps_ExprContext = CreateExprContext(estate); } -/* ---------------- - * ExecAssignResultType - * ---------------- - */ -void -ExecAssignResultType(PlanState *planstate, TupleDesc tupDesc) -{ - TupleTableSlot *slot = planstate->ps_ResultTupleSlot; - - ExecSetSlotDescriptor(slot, tupDesc); -} - -/* ---------------- - * ExecAssignResultTypeFromTL - * ---------------- - */ -void -ExecAssignResultTypeFromTL(PlanState *planstate) -{ - bool hasoid; - TupleDesc tupDesc; - - if (ExecContextForcesOids(planstate, &hasoid)) - { - /* context forces OID choice; hasoid is now set correctly */ - } - else - { - /* given free choice, don't leave space for OIDs in result tuples */ - hasoid = false; - } - - /* - * ExecTypeFromTL needs the parse-time representation of the tlist, not a - * list of ExprStates. This is good because some plan nodes don't bother - * to set up planstate->targetlist ... - */ - tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid); - ExecAssignResultType(planstate, tupDesc); -} - /* ---------------- * ExecGetResultType * ---------------- @@ -468,9 +451,7 @@ ExecAssignResultTypeFromTL(PlanState *planstate) TupleDesc ExecGetResultType(PlanState *planstate) { - TupleTableSlot *slot = planstate->ps_ResultTupleSlot; - - return slot->tts_tupleDescriptor; + return planstate->ps_ResultTupleDesc; } @@ -496,6 +477,91 @@ ExecAssignProjectionInfo(PlanState *planstate, } +/* ---------------- + * ExecConditionalAssignProjectionInfo + * + * as ExecAssignProjectionInfo, but store NULL rather than building projection + * info if no projection is required + * ---------------- + */ +void +ExecConditionalAssignProjectionInfo(PlanState *planstate, TupleDesc inputDesc, + Index varno) +{ + if (tlist_matches_tupdesc(planstate, + planstate->plan->targetlist, + varno, + inputDesc)) + planstate->ps_ProjInfo = NULL; + else + { + if (!planstate->ps_ResultTupleSlot) + ExecInitResultSlot(planstate); + ExecAssignProjectionInfo(planstate, inputDesc); + } +} + +static bool +tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc) +{ + int numattrs = tupdesc->natts; + int attrno; + bool hasoid; + ListCell *tlist_item = list_head(tlist); + + /* Check the tlist attributes */ + for (attrno = 1; attrno <= numattrs; attrno++) + { + Form_pg_attribute att_tup = TupleDescAttr(tupdesc, attrno - 1); + Var *var; + + if (tlist_item == NULL) + return false; /* tlist too short */ + var = (Var *) ((TargetEntry *) lfirst(tlist_item))->expr; + if (!var || !IsA(var, Var)) + return false; /* tlist item not a Var */ + /* if these Asserts fail, planner messed up */ + Assert(var->varno == varno); + Assert(var->varlevelsup == 0); + if (var->varattno != attrno) + return false; /* out of order */ + if (att_tup->attisdropped) + return false; /* table contains dropped columns */ + if (att_tup->atthasmissing) + return false; /* table contains cols with missing values */ + + /* + * Note: usually the Var's type should match the tupdesc exactly, but + * in situations involving unions of columns that have different + * typmods, the Var may have come from above the union and hence have + * typmod -1. This is a legitimate situation since the Var still + * describes the column, just not as exactly as the tupdesc does. We + * could change the planner to prevent it, but it'd then insert + * projection steps just to convert from specific typmod to typmod -1, + * which is pretty silly. + */ + if (var->vartype != att_tup->atttypid || + (var->vartypmod != att_tup->atttypmod && + var->vartypmod != -1)) + return false; /* type mismatch */ + + tlist_item = lnext(tlist_item); + } + + if (tlist_item) + return false; /* tlist too long */ + + /* + * If the plan context requires a particular hasoid setting, then that has + * to match, too. + */ + if (ExecContextForcesOids(ps, &hasoid) && + hasoid != tupdesc->tdhasoid) + return false; + + return true; +} + /* ---------------- * ExecFreeExprContext * @@ -522,13 +588,9 @@ ExecFreeExprContext(PlanState *planstate) planstate->ps_ExprContext = NULL; } + /* ---------------------------------------------------------------- - * the following scan type support functions are for - * those nodes which are stubborn and return tuples in - * their Scan tuple slot instead of their Result tuple - * slot.. luck fur us, these nodes do not do projections - * so we don't have to worry about getting the ProjectionInfo - * right for them... -cim 6/3/91 + * Scan node support * ---------------------------------------------------------------- */ @@ -545,11 +607,11 @@ ExecAssignScanType(ScanState *scanstate, TupleDesc tupDesc) } /* ---------------- - * ExecAssignScanTypeFromOuterPlan + * ExecCreateSlotFromOuterPlan * ---------------- */ void -ExecAssignScanTypeFromOuterPlan(ScanState *scanstate) +ExecCreateScanSlotFromOuterPlan(EState *estate, ScanState *scanstate) { PlanState *outerPlan; TupleDesc tupDesc; @@ -557,15 +619,9 @@ ExecAssignScanTypeFromOuterPlan(ScanState *scanstate) outerPlan = outerPlanState(scanstate); tupDesc = ExecGetResultType(outerPlan); - ExecAssignScanType(scanstate, tupDesc); + ExecInitScanTupleSlot(estate, scanstate, tupDesc); } - -/* ---------------------------------------------------------------- - * Scan node support - * ---------------------------------------------------------------- - */ - /* ---------------------------------------------------------------- * ExecRelationIsTargetRelation * @@ -593,39 +649,15 @@ ExecRelationIsTargetRelation(EState *estate, Index scanrelid) * * Open the heap relation to be scanned by a base-level scan plan node. * This should be called during the node's ExecInit routine. - * - * By default, this acquires AccessShareLock on the relation. However, - * if the relation was already locked by InitPlan, we don't need to acquire - * any additional lock. This saves trips to the shared lock manager. * ---------------------------------------------------------------- */ Relation ExecOpenScanRelation(EState *estate, Index scanrelid, int eflags) { Relation rel; - Oid reloid; - LOCKMODE lockmode; - - /* - * Determine the lock type we need. First, scan to see if target relation - * is a result relation. If not, check if it's a FOR UPDATE/FOR SHARE - * relation. In either of those cases, we got the lock already. - */ - lockmode = AccessShareLock; - if (ExecRelationIsTargetRelation(estate, scanrelid)) - lockmode = NoLock; - else - { - /* Keep this check in sync with InitPlan! */ - ExecRowMark *erm = ExecFindRowMark(estate, scanrelid, true); - if (erm != NULL && erm->relation != NULL) - lockmode = NoLock; - } - - /* Open the relation and acquire lock as needed */ - reloid = getrelid(scanrelid, estate->es_range_table); - rel = heap_open(reloid, lockmode); + /* Open the relation. */ + rel = ExecGetRangeTableRelation(estate, scanrelid); /* * Complain if we're attempting a scan of an unscannable relation, except @@ -643,24 +675,97 @@ ExecOpenScanRelation(EState *estate, Index scanrelid, int eflags) return rel; } -/* ---------------------------------------------------------------- - * ExecCloseScanRelation - * - * Close the heap relation scanned by a base-level scan plan node. - * This should be called during the node's ExecEnd routine. - * - * Currently, we do not release the lock acquired by ExecOpenScanRelation. - * This lock should be held till end of transaction. (There is a faction - * that considers this too much locking, however.) +/* + * ExecInitRangeTable + * Set up executor's range-table-related data * - * If we did want to release the lock, we'd have to repeat the logic in - * ExecOpenScanRelation in order to figure out what to release. - * ---------------------------------------------------------------- + * We build an array from the range table list to allow faster lookup by RTI. + * (The es_range_table field is now somewhat redundant, but we keep it to + * avoid breaking external code unnecessarily.) + * This is also a convenient place to set up the parallel es_relations array. */ void -ExecCloseScanRelation(Relation scanrel) +ExecInitRangeTable(EState *estate, List *rangeTable) { - heap_close(scanrel, NoLock); + Index rti; + ListCell *lc; + + /* Remember the range table List as-is */ + estate->es_range_table = rangeTable; + + /* Set up the equivalent array representation */ + estate->es_range_table_size = list_length(rangeTable); + estate->es_range_table_array = (RangeTblEntry **) + palloc(estate->es_range_table_size * sizeof(RangeTblEntry *)); + rti = 0; + foreach(lc, rangeTable) + { + estate->es_range_table_array[rti++] = lfirst_node(RangeTblEntry, lc); + } + + /* + * Allocate an array to store an open Relation corresponding to each + * rangetable entry, and initialize entries to NULL. Relations are opened + * and stored here as needed. + */ + estate->es_relations = (Relation *) + palloc0(estate->es_range_table_size * sizeof(Relation)); + + /* + * es_rowmarks is also parallel to the es_range_table_array, but it's + * allocated only if needed. + */ + estate->es_rowmarks = NULL; +} + +/* + * ExecGetRangeTableRelation + * Open the Relation for a range table entry, if not already done + * + * The Relations will be closed again in ExecEndPlan(). + */ +Relation +ExecGetRangeTableRelation(EState *estate, Index rti) +{ + Relation rel; + + Assert(rti > 0 && rti <= estate->es_range_table_size); + + rel = estate->es_relations[rti - 1]; + if (rel == NULL) + { + /* First time through, so open the relation */ + RangeTblEntry *rte = exec_rt_fetch(rti, estate); + + Assert(rte->rtekind == RTE_RELATION); + + if (!IsParallelWorker()) + { + /* + * In a normal query, we should already have the appropriate lock, + * but verify that through an Assert. Since there's already an + * Assert inside heap_open that insists on holding some lock, it + * seems sufficient to check this only when rellockmode is higher + * than the minimum. + */ + rel = heap_open(rte->relid, NoLock); + Assert(rte->rellockmode == AccessShareLock || + CheckRelationLockedByMe(rel, rte->rellockmode, false)); + } + else + { + /* + * If we are a parallel worker, we need to obtain our own local + * lock on the relation. This ensures sane behavior in case the + * parent process exits before we do. + */ + rel = heap_open(rte->relid, rte->rellockmode); + } + + estate->es_relations[rti - 1] = rel; + } + + return rel; } /* @@ -808,68 +913,13 @@ ShutdownExprContext(ExprContext *econtext, bool isCommit) { econtext->ecxt_callbacks = ecxt_callback->next; if (isCommit) - (*ecxt_callback->function) (ecxt_callback->arg); + ecxt_callback->function(ecxt_callback->arg); pfree(ecxt_callback); } MemoryContextSwitchTo(oldcontext); } -/* - * ExecLockNonLeafAppendTables - * - * Locks, if necessary, the tables indicated by the RT indexes contained in - * the partitioned_rels list. These are the non-leaf tables in the partition - * tree controlled by a given Append or MergeAppend node. - */ -void -ExecLockNonLeafAppendTables(List *partitioned_rels, EState *estate) -{ - PlannedStmt *stmt = estate->es_plannedstmt; - ListCell *lc; - - foreach(lc, partitioned_rels) - { - ListCell *l; - Index rti = lfirst_int(lc); - bool is_result_rel = false; - Oid relid = getrelid(rti, estate->es_range_table); - - /* If this is a result relation, already locked in InitPlan */ - foreach(l, stmt->nonleafResultRelations) - { - if (rti == lfirst_int(l)) - { - is_result_rel = true; - break; - } - } - - /* - * Not a result relation; check if there is a RowMark that requires - * taking a RowShareLock on this rel. - */ - if (!is_result_rel) - { - PlanRowMark *rc = NULL; - - foreach(l, stmt->rowMarks) - { - if (((PlanRowMark *) lfirst(l))->rti == rti) - { - rc = lfirst(l); - break; - } - } - - if (rc && RowMarkRequiresRowShareLock(rc->markType)) - LockRelationOid(relid, RowShareLock); - else - LockRelationOid(relid, AccessShareLock); - } - } -} - /* * GetAttributeByName * GetAttributeByNum @@ -912,9 +962,11 @@ GetAttributeByName(HeapTupleHeader tuple, const char *attname, bool *isNull) attrno = InvalidAttrNumber; for (i = 0; i < tupDesc->natts; i++) { - if (namestrcmp(&(tupDesc->attrs[i]->attname), attname) == 0) + Form_pg_attribute att = TupleDescAttr(tupDesc, i); + + if (namestrcmp(&(att->attname), attname) == 0) { - attrno = tupDesc->attrs[i]->attnum; + attrno = att->attnum; break; } } diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index 3630f5d966..23545896d4 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -3,7 +3,7 @@ * functions.c * Execution of SQL-language functions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -390,6 +390,7 @@ sql_fn_post_column_ref(ParseState *pstate, ColumnRef *cref, Node *var) list_make1(param), pstate->p_last_srf, NULL, + false, cref->location); } @@ -572,8 +573,7 @@ init_execution_state(List *queryTree_list, * * Note: don't set setsResult if the function returns VOID, as evidenced * by not having made a junkfilter. This ensures we'll throw away any - * output from a utility statement that check_sql_fn_retval deemed to not - * have output. + * output from the last statement in such a function. */ if (lasttages && fcache->junkFilter) { @@ -612,7 +612,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK) * must be a child of whatever context holds the FmgrInfo. */ fcontext = AllocSetContextCreate(finfo->fn_mcxt, - "SQL function data", + "SQL function", ALLOCSET_DEFAULT_SIZES); oldcontext = MemoryContextSwitchTo(fcontext); @@ -635,9 +635,11 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK) procedureStruct = (Form_pg_proc) GETSTRUCT(procedureTuple); /* - * copy function name immediately for use by error reporting callback + * copy function name immediately for use by error reporting callback, and + * for use as memory context identifier */ fcache->fname = pstrdup(NameStr(procedureStruct->proname)); + MemoryContextSetIdentifier(fcontext, fcache->fname); /* * get the result type from the procedure tuple, and check for polymorphic @@ -721,6 +723,8 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK) list_copy(queryTree_sublist)); } + check_sql_fn_statements(flat_query_list); + /* * Check that the function returns the type it claims to. Although in * simple cases this was already done when the function was defined, we @@ -886,7 +890,7 @@ postquel_end(execution_state *es) ExecutorEnd(es->qd); } - (*es->qd->dest->rDestroy) (es->qd->dest); + es->qd->dest->rDestroy(es->qd->dest); FreeQueryDesc(es->qd); es->qd = NULL; @@ -912,10 +916,11 @@ postquel_sub_params(SQLFunctionCachePtr fcache, /* we have static list of params, so no hooks needed */ paramLI->paramFetch = NULL; paramLI->paramFetchArg = NULL; + paramLI->paramCompile = NULL; + paramLI->paramCompileArg = NULL; paramLI->parserSetup = NULL; paramLI->parserSetupArg = NULL; paramLI->numParams = nargs; - paramLI->paramMask = NULL; fcache->paramLI = paramLI; } else @@ -1321,7 +1326,7 @@ fmgr_sql(PG_FUNCTION_ARGS) } else { - /* Should only get here for VOID functions */ + /* Should only get here for VOID functions and procedures */ Assert(fcache->rettype == VOIDOID); fcinfo->isnull = true; result = (Datum) 0; @@ -1485,6 +1490,55 @@ ShutdownSQLFunction(Datum arg) fcache->shutdown_reg = false; } +/* + * check_sql_fn_statements + * + * Check statements in an SQL function. Error out if there is anything that + * is not acceptable. + */ +void +check_sql_fn_statements(List *queryTreeList) +{ + ListCell *lc; + + foreach(lc, queryTreeList) + { + Query *query = lfirst_node(Query, lc); + + /* + * Disallow procedures with output arguments. The current + * implementation would just throw the output values away, unless the + * statement is the last one. Per SQL standard, we should assign the + * output values by name. By disallowing this here, we preserve an + * opportunity for future improvement. + */ + if (query->commandType == CMD_UTILITY && + IsA(query->utilityStmt, CallStmt)) + { + CallStmt *stmt = castNode(CallStmt, query->utilityStmt); + HeapTuple tuple; + int numargs; + Oid *argtypes; + char **argnames; + char *argmodes; + int i; + + tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(stmt->funcexpr->funcid)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for function %u", stmt->funcexpr->funcid); + numargs = get_func_arg_info(tuple, &argtypes, &argnames, &argmodes); + ReleaseSysCache(tuple); + + for (i = 0; i < numargs; i++) + { + if (argmodes && (argmodes[i] == PROARGMODE_INOUT || argmodes[i] == PROARGMODE_OUT)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("calling procedures with output arguments is not supported in SQL functions"))); + } + } + } +} /* * check_sql_fn_retval() -- check return value of a list of sql parse trees. @@ -1548,6 +1602,13 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, if (junkFilter) *junkFilter = NULL; /* initialize in case of VOID result */ + /* + * If it's declared to return VOID, we don't care what's in the function. + * (This takes care of the procedure case, as well.) + */ + if (rettype == VOIDOID) + return false; + /* * Find the last canSetTag query in the list. This isn't necessarily the * last parsetree, because rule rewriting can insert queries after what @@ -1591,21 +1652,17 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, else { /* Empty function body, or last statement is a utility command */ - if (rettype != VOIDOID) - ereport(ERROR, - (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("return type mismatch in function declared to return %s", - format_type_be(rettype)), - errdetail("Function's final statement must be SELECT or INSERT/UPDATE/DELETE RETURNING."))); - return false; + ereport(ERROR, + (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), + errmsg("return type mismatch in function declared to return %s", + format_type_be(rettype)), + errdetail("Function's final statement must be SELECT or INSERT/UPDATE/DELETE RETURNING."))); + return false; /* keep compiler quiet */ } /* * OK, check that the targetlist returns something matching the declared - * type. (We used to insist that the declared type not be VOID in this - * case, but that makes it hard to write a void function that exits after - * calling another void function. Instead, we insist that the tlist - * return void ... so void is treated as if it were a scalar type below.) + * type. */ /* @@ -1618,8 +1675,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, if (fn_typtype == TYPTYPE_BASE || fn_typtype == TYPTYPE_DOMAIN || fn_typtype == TYPTYPE_ENUM || - fn_typtype == TYPTYPE_RANGE || - rettype == VOIDOID) + fn_typtype == TYPTYPE_RANGE) { /* * For scalar-type returns, the target list must have exactly one @@ -1665,7 +1721,15 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, } else if (fn_typtype == TYPTYPE_COMPOSITE || rettype == RECORDOID) { - /* Returns a rowtype */ + /* + * Returns a rowtype. + * + * Note that we will not consider a domain over composite to be a + * "rowtype" return type; it goes through the scalar case above. This + * is because SQL functions don't provide any implicit casting to the + * result type, so there is no way to produce a domain-over-composite + * result except by computing it as an explicit single-column result. + */ TupleDesc tupdesc; int tupnatts; /* physical number of columns in tuple */ int tuplogcols; /* # of nondeleted columns in tuple */ @@ -1711,7 +1775,10 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, } } - /* Is the rowtype fixed, or determined only at runtime? */ + /* + * Is the rowtype fixed, or determined only at runtime? (Note we + * cannot see TYPEFUNC_COMPOSITE_DOMAIN here.) + */ if (get_func_result_type(func_id, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) { /* @@ -1759,7 +1826,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, errmsg("return type mismatch in function declared to return %s", format_type_be(rettype)), errdetail("Final statement returns too many columns."))); - attr = tupdesc->attrs[colindex - 1]; + attr = TupleDescAttr(tupdesc, colindex - 1); if (attr->attisdropped && modifyTargetList) { Expr *null_expr; @@ -1816,7 +1883,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, /* remaining columns in tupdesc had better all be dropped */ for (colindex++; colindex <= tupnatts; colindex++) { - if (!tupdesc->attrs[colindex - 1]->attisdropped) + if (!TupleDescAttr(tupdesc, colindex - 1)->attisdropped) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("return type mismatch in function declared to return %s", diff --git a/src/backend/executor/instrument.c b/src/backend/executor/instrument.c index 6ec96ec371..fe5d55904d 100644 --- a/src/backend/executor/instrument.c +++ b/src/backend/executor/instrument.c @@ -4,7 +4,7 @@ * functions for instrumentation of plan execution * * - * Copyright (c) 2001-2017, PostgreSQL Global Development Group + * Copyright (c) 2001-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/executor/instrument.c @@ -49,7 +49,7 @@ InstrAlloc(int n, int instrument_options) return instr; } -/* Initialize an pre-allocated instrumentation structure. */ +/* Initialize a pre-allocated instrumentation structure. */ void InstrInit(Instrumentation *instr, int instrument_options) { @@ -156,6 +156,7 @@ InstrAggNode(Instrumentation *dst, Instrumentation *add) dst->startup += add->startup; dst->total += add->total; dst->ntuples += add->ntuples; + dst->ntuples2 += add->ntuples2; dst->nloops += add->nloops; dst->nfiltered1 += add->nfiltered1; dst->nfiltered2 += add->nfiltered2; diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 6a26773a49..85f1ec7140 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -90,7 +90,7 @@ * but in the aggregate case we know the left input is either the initial * transition value or a previous function result, and in either case its * value need not be preserved. See int8inc() for an example. Notice that - * advance_transition_function() is coded to avoid a data copy step when + * the EEOP_AGG_PLAIN_TRANS step is coded to avoid a data copy step when * the previous transition value pointer is returned. It is also possible * to avoid repeated data copying when the transition value is an expanded * object: to do that, the transition function must take care to return @@ -194,8 +194,18 @@ * transition values. hashcontext is the single context created to support * all hash tables. * + * Transition / Combine function invocation: * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * For performance reasons transition functions, including combine + * functions, aren't invoked one-by-one from nodeAgg.c after computing + * arguments using the expression evaluation engine. Instead + * ExecBuildAggTrans() builds one large expression that does both argument + * evaluation and transition function invocation. That avoids performance + * issues due to repeated uses of expression evaluation, complications due + * to filter expressions having to be evaluated early, and allows to JIT + * the entire expression into one native function. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -229,310 +239,16 @@ #include "utils/datum.h" -/* - * AggStatePerTransData - per aggregate state value information - * - * Working state for updating the aggregate's state value, by calling the - * transition function with an input row. This struct does not store the - * information needed to produce the final aggregate result from the transition - * state, that's stored in AggStatePerAggData instead. This separation allows - * multiple aggregate results to be produced from a single state value. - */ -typedef struct AggStatePerTransData -{ - /* - * These values are set up during ExecInitAgg() and do not change - * thereafter: - */ - - /* - * Link to an Aggref expr this state value is for. - * - * There can be multiple Aggref's sharing the same state value, as long as - * the inputs and transition function are identical. This points to the - * first one of them. - */ - Aggref *aggref; - - /* - * Nominal number of arguments for aggregate function. For plain aggs, - * this excludes any ORDER BY expressions. For ordered-set aggs, this - * counts both the direct and aggregated (ORDER BY) arguments. - */ - int numArguments; - - /* - * Number of aggregated input columns. This includes ORDER BY expressions - * in both the plain-agg and ordered-set cases. Ordered-set direct args - * are not counted, though. - */ - int numInputs; - - /* offset of input columns in AggState->evalslot */ - int inputoff; - - /* - * Number of aggregated input columns to pass to the transfn. This - * includes the ORDER BY columns for ordered-set aggs, but not for plain - * aggs. (This doesn't count the transition state value!) - */ - int numTransInputs; - - /* Oid of the state transition or combine function */ - Oid transfn_oid; - - /* Oid of the serialization function or InvalidOid */ - Oid serialfn_oid; - - /* Oid of the deserialization function or InvalidOid */ - Oid deserialfn_oid; - - /* Oid of state value's datatype */ - Oid aggtranstype; - - /* ExprStates of the FILTER and argument expressions. */ - ExprState *aggfilter; /* state of FILTER expression, if any */ - List *aggdirectargs; /* states of direct-argument expressions */ - - /* - * fmgr lookup data for transition function or combine function. Note in - * particular that the fn_strict flag is kept here. - */ - FmgrInfo transfn; - - /* fmgr lookup data for serialization function */ - FmgrInfo serialfn; - - /* fmgr lookup data for deserialization function */ - FmgrInfo deserialfn; - - /* Input collation derived for aggregate */ - Oid aggCollation; - - /* number of sorting columns */ - int numSortCols; - - /* number of sorting columns to consider in DISTINCT comparisons */ - /* (this is either zero or the same as numSortCols) */ - int numDistinctCols; - - /* deconstructed sorting information (arrays of length numSortCols) */ - AttrNumber *sortColIdx; - Oid *sortOperators; - Oid *sortCollations; - bool *sortNullsFirst; - - /* - * fmgr lookup data for input columns' equality operators --- only - * set/used when aggregate has DISTINCT flag. Note that these are in - * order of sort column index, not parameter index. - */ - FmgrInfo *equalfns; /* array of length numDistinctCols */ - - /* - * initial value from pg_aggregate entry - */ - Datum initValue; - bool initValueIsNull; - - /* - * We need the len and byval info for the agg's input and transition data - * types in order to know how to copy/delete values. - * - * Note that the info for the input type is used only when handling - * DISTINCT aggs with just one argument, so there is only one input type. - */ - int16 inputtypeLen, - transtypeLen; - bool inputtypeByVal, - transtypeByVal; - - /* - * Stuff for evaluation of aggregate inputs in cases where the aggregate - * requires sorted input. The arguments themselves will be evaluated via - * AggState->evalslot/evalproj for all aggregates at once, but we only - * want to sort the relevant columns for individual aggregates. - */ - TupleDesc sortdesc; /* descriptor of input tuples */ - - /* - * Slots for holding the evaluated input arguments. These are set up - * during ExecInitAgg() and then used for each input row requiring - * processing besides what's done in AggState->evalproj. - */ - TupleTableSlot *sortslot; /* current input tuple */ - TupleTableSlot *uniqslot; /* used for multi-column DISTINCT */ - - /* - * These values are working state that is initialized at the start of an - * input tuple group and updated for each input tuple. - * - * For a simple (non DISTINCT/ORDER BY) aggregate, we just feed the input - * values straight to the transition function. If it's DISTINCT or - * requires ORDER BY, we pass the input values into a Tuplesort object; - * then at completion of the input tuple group, we scan the sorted values, - * eliminate duplicates if needed, and run the transition function on the - * rest. - * - * We need a separate tuplesort for each grouping set. - */ - - Tuplesortstate **sortstates; /* sort objects, if DISTINCT or ORDER BY */ - - /* - * This field is a pre-initialized FunctionCallInfo struct used for - * calling this aggregate's transfn. We save a few cycles per row by not - * re-initializing the unchanging fields; which isn't much, but it seems - * worth the extra space consumption. - */ - FunctionCallInfoData transfn_fcinfo; - - /* Likewise for serialization and deserialization functions */ - FunctionCallInfoData serialfn_fcinfo; - - FunctionCallInfoData deserialfn_fcinfo; -} AggStatePerTransData; - -/* - * AggStatePerAggData - per-aggregate information - * - * This contains the information needed to call the final function, to produce - * a final aggregate result from the state value. If there are multiple - * identical Aggrefs in the query, they can all share the same per-agg data. - * - * These values are set up during ExecInitAgg() and do not change thereafter. - */ -typedef struct AggStatePerAggData -{ - /* - * Link to an Aggref expr this state value is for. - * - * There can be multiple identical Aggref's sharing the same per-agg. This - * points to the first one of them. - */ - Aggref *aggref; - - /* index to the state value which this agg should use */ - int transno; - - /* Optional Oid of final function (may be InvalidOid) */ - Oid finalfn_oid; - - /* - * fmgr lookup data for final function --- only valid when finalfn_oid oid - * is not InvalidOid. - */ - FmgrInfo finalfn; - - /* - * Number of arguments to pass to the finalfn. This is always at least 1 - * (the transition state value) plus any ordered-set direct args. If the - * finalfn wants extra args then we pass nulls corresponding to the - * aggregated input columns. - */ - int numFinalArgs; - - /* - * We need the len and byval info for the agg's result data type in order - * to know how to copy/delete values. - */ - int16 resulttypeLen; - bool resulttypeByVal; - -} AggStatePerAggData; - -/* - * AggStatePerGroupData - per-aggregate-per-group working state - * - * These values are working state that is initialized at the start of - * an input tuple group and updated for each input tuple. - * - * In AGG_PLAIN and AGG_SORTED modes, we have a single array of these - * structs (pointed to by aggstate->pergroup); we re-use the array for - * each input group, if it's AGG_SORTED mode. In AGG_HASHED mode, the - * hash table contains an array of these structs for each tuple group. - * - * Logically, the sortstate field belongs in this struct, but we do not - * keep it here for space reasons: we don't support DISTINCT aggregates - * in AGG_HASHED mode, so there's no reason to use up a pointer field - * in every entry of the hashtable. - */ -typedef struct AggStatePerGroupData -{ - Datum transValue; /* current transition value */ - bool transValueIsNull; - - bool noTransValue; /* true if transValue not set yet */ - - /* - * Note: noTransValue initially has the same value as transValueIsNull, - * and if true both are cleared to false at the same time. They are not - * the same though: if transfn later returns a NULL, we want to keep that - * NULL and not auto-replace it with a later input value. Only the first - * non-NULL input will be auto-substituted. - */ -} AggStatePerGroupData; - -/* - * AggStatePerPhaseData - per-grouping-set-phase state - * - * Grouping sets are divided into "phases", where a single phase can be - * processed in one pass over the input. If there is more than one phase, then - * at the end of input from the current phase, state is reset and another pass - * taken over the data which has been re-sorted in the mean time. - * - * Accordingly, each phase specifies a list of grouping sets and group clause - * information, plus each phase after the first also has a sort order. - */ -typedef struct AggStatePerPhaseData -{ - AggStrategy aggstrategy; /* strategy for this phase */ - int numsets; /* number of grouping sets (or 0) */ - int *gset_lengths; /* lengths of grouping sets */ - Bitmapset **grouped_cols; /* column groupings for rollup */ - FmgrInfo *eqfunctions; /* per-grouping-field equality fns */ - Agg *aggnode; /* Agg node for phase data */ - Sort *sortnode; /* Sort node for input ordering for phase */ -} AggStatePerPhaseData; - -/* - * AggStatePerHashData - per-hashtable state - * - * When doing grouping sets with hashing, we have one of these for each - * grouping set. (When doing hashing without grouping sets, we have just one of - * them.) - */ -typedef struct AggStatePerHashData -{ - TupleHashTable hashtable; /* hash table with one entry per group */ - TupleHashIterator hashiter; /* for iterating through hash table */ - TupleTableSlot *hashslot; /* slot for loading hash table */ - FmgrInfo *hashfunctions; /* per-grouping-field hash fns */ - FmgrInfo *eqfunctions; /* per-grouping-field equality fns */ - int numCols; /* number of hash key columns */ - int numhashGrpCols; /* number of columns in hash table */ - int largestGrpColIdx; /* largest col required for hashing */ - AttrNumber *hashGrpColIdxInput; /* hash col indices in input slot */ - AttrNumber *hashGrpColIdxHash; /* indices in hashtbl tuples */ - Agg *aggnode; /* original Agg node, for numGroups etc. */ -} AggStatePerHashData; - - static void select_current_set(AggState *aggstate, int setno, bool is_hash); static void initialize_phase(AggState *aggstate, int newphase); static TupleTableSlot *fetch_input_tuple(AggState *aggstate); static void initialize_aggregates(AggState *aggstate, - AggStatePerGroup pergroup, + AggStatePerGroup *pergroups, int numReset); static void advance_transition_function(AggState *aggstate, AggStatePerTrans pertrans, AggStatePerGroup pergroupstate); -static void advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup, - AggStatePerGroup *pergroups); -static void advance_combine_function(AggState *aggstate, - AggStatePerTrans pertrans, - AggStatePerGroup pergroupstate); -static void combine_aggregates(AggState *aggstate, AggStatePerGroup pergroup); +static void advance_aggregates(AggState *aggstate); static void process_ordered_aggregate_single(AggState *aggstate, AggStatePerTrans pertrans, AggStatePerGroup pergroupstate); @@ -558,7 +274,7 @@ static Bitmapset *find_unaggregated_cols(AggState *aggstate); static bool find_unaggregated_cols_walker(Node *node, Bitmapset **colnos); static void build_hash_table(AggState *aggstate); static TupleHashEntryData *lookup_hash_entry(AggState *aggstate); -static AggStatePerGroup *lookup_hash_entries(AggState *aggstate); +static void lookup_hash_entries(AggState *aggstate); static TupleTableSlot *agg_retrieve_direct(AggState *aggstate); static void agg_fill_hash_table(AggState *aggstate); static TupleTableSlot *agg_retrieve_hash_table(AggState *aggstate); @@ -572,6 +288,7 @@ static void build_pertrans_for_aggref(AggStatePerTrans pertrans, static int find_compatible_peragg(Aggref *newagg, AggState *aggstate, int lastaggno, List **same_input_transnos); static int find_compatible_pertrans(AggState *aggstate, Aggref *newagg, + bool shareable, Oid aggtransfn, Oid aggtranstype, Oid aggserialfn, Oid aggdeserialfn, Datum initValue, bool initValueIsNull, @@ -585,6 +302,7 @@ static int find_compatible_pertrans(AggState *aggstate, Aggref *newagg, static void select_current_set(AggState *aggstate, int setno, bool is_hash) { + /* when changing this, also adapt ExecInterpExpr() and friends */ if (is_hash) aggstate->curaggcontext = aggstate->hashcontext; else @@ -655,7 +373,7 @@ initialize_phase(AggState *aggstate, int newphase) sortnode->collations, sortnode->nullsFirst, work_mem, - false); + NULL, false); } aggstate->current_phase = newphase; @@ -724,12 +442,16 @@ initialize_aggregate(AggState *aggstate, AggStatePerTrans pertrans, * process_ordered_aggregate_single.) */ if (pertrans->numInputs == 1) + { + Form_pg_attribute attr = TupleDescAttr(pertrans->sortdesc, 0); + pertrans->sortstates[aggstate->current_set] = - tuplesort_begin_datum(pertrans->sortdesc->attrs[0]->atttypid, + tuplesort_begin_datum(attr->atttypid, pertrans->sortOperators[0], pertrans->sortCollations[0], pertrans->sortNullsFirst[0], - work_mem, false); + work_mem, NULL, false); + } else pertrans->sortstates[aggstate->current_set] = tuplesort_begin_heap(pertrans->sortdesc, @@ -738,7 +460,7 @@ initialize_aggregate(AggState *aggstate, AggStatePerTrans pertrans, pertrans->sortOperators, pertrans->sortCollations, pertrans->sortNullsFirst, - work_mem, false); + work_mem, NULL, false); } /* @@ -778,14 +500,16 @@ initialize_aggregate(AggState *aggstate, AggStatePerTrans pertrans, * If there are multiple grouping sets, we initialize only the first numReset * of them (the grouping sets are ordered so that the most specific one, which * is reset most often, is first). As a convenience, if numReset is 0, we - * reinitialize all sets. numReset is -1 to initialize a hashtable entry, in - * which case the caller must have used select_current_set appropriately. + * reinitialize all sets. + * + * NB: This cannot be used for hash aggregates, as for those the grouping set + * number has to be specified from further up. * * When called, CurrentMemoryContext should be the per-query context. */ static void initialize_aggregates(AggState *aggstate, - AggStatePerGroup pergroup, + AggStatePerGroup *pergroups, int numReset) { int transno; @@ -797,30 +521,18 @@ initialize_aggregates(AggState *aggstate, if (numReset == 0) numReset = numGroupingSets; - for (transno = 0; transno < numTrans; transno++) + for (setno = 0; setno < numReset; setno++) { - AggStatePerTrans pertrans = &transstates[transno]; - - if (numReset < 0) - { - AggStatePerGroup pergroupstate; + AggStatePerGroup pergroup = pergroups[setno]; - pergroupstate = &pergroup[transno]; + select_current_set(aggstate, setno, false); - initialize_aggregate(aggstate, pertrans, pergroupstate); - } - else + for (transno = 0; transno < numTrans; transno++) { - for (setno = 0; setno < numReset; setno++) - { - AggStatePerGroup pergroupstate; - - pergroupstate = &pergroup[transno + (setno * numTrans)]; + AggStatePerTrans pertrans = &transstates[transno]; + AggStatePerGroup pergroupstate = &pergroup[transno]; - select_current_set(aggstate, setno, false); - - initialize_aggregate(aggstate, pertrans, pergroupstate); - } + initialize_aggregate(aggstate, pertrans, pergroupstate); } } } @@ -961,321 +673,15 @@ advance_transition_function(AggState *aggstate, * When called, CurrentMemoryContext should be the per-query context. */ static void -advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup, AggStatePerGroup *pergroups) +advance_aggregates(AggState *aggstate) { - int transno; - int setno = 0; - int numGroupingSets = Max(aggstate->phase->numsets, 1); - int numHashes = aggstate->num_hashes; - int numTrans = aggstate->numtrans; - TupleTableSlot *slot = aggstate->evalslot; - - /* compute input for all aggregates */ - if (aggstate->evalproj) - aggstate->evalslot = ExecProject(aggstate->evalproj); - - for (transno = 0; transno < numTrans; transno++) - { - AggStatePerTrans pertrans = &aggstate->pertrans[transno]; - ExprState *filter = pertrans->aggfilter; - int numTransInputs = pertrans->numTransInputs; - int i; - int inputoff = pertrans->inputoff; - - /* Skip anything FILTERed out */ - if (filter) - { - Datum res; - bool isnull; - - res = ExecEvalExprSwitchContext(filter, aggstate->tmpcontext, - &isnull); - if (isnull || !DatumGetBool(res)) - continue; - } - - if (pertrans->numSortCols > 0) - { - /* DISTINCT and/or ORDER BY case */ - Assert(slot->tts_nvalid >= (pertrans->numInputs + inputoff)); - Assert(!pergroups); - - /* - * If the transfn is strict, we want to check for nullity before - * storing the row in the sorter, to save space if there are a lot - * of nulls. Note that we must only check numTransInputs columns, - * not numInputs, since nullity in columns used only for sorting - * is not relevant here. - */ - if (pertrans->transfn.fn_strict) - { - for (i = 0; i < numTransInputs; i++) - { - if (slot->tts_isnull[i + inputoff]) - break; - } - if (i < numTransInputs) - continue; - } - - for (setno = 0; setno < numGroupingSets; setno++) - { - /* OK, put the tuple into the tuplesort object */ - if (pertrans->numInputs == 1) - tuplesort_putdatum(pertrans->sortstates[setno], - slot->tts_values[inputoff], - slot->tts_isnull[inputoff]); - else - { - /* - * Copy slot contents, starting from inputoff, into sort - * slot. - */ - ExecClearTuple(pertrans->sortslot); - memcpy(pertrans->sortslot->tts_values, - &slot->tts_values[inputoff], - pertrans->numInputs * sizeof(Datum)); - memcpy(pertrans->sortslot->tts_isnull, - &slot->tts_isnull[inputoff], - pertrans->numInputs * sizeof(bool)); - pertrans->sortslot->tts_nvalid = pertrans->numInputs; - ExecStoreVirtualTuple(pertrans->sortslot); - tuplesort_puttupleslot(pertrans->sortstates[setno], pertrans->sortslot); - } - } - } - else - { - /* We can apply the transition function immediately */ - FunctionCallInfo fcinfo = &pertrans->transfn_fcinfo; - - /* Load values into fcinfo */ - /* Start from 1, since the 0th arg will be the transition value */ - Assert(slot->tts_nvalid >= (numTransInputs + inputoff)); - - for (i = 0; i < numTransInputs; i++) - { - fcinfo->arg[i + 1] = slot->tts_values[i + inputoff]; - fcinfo->argnull[i + 1] = slot->tts_isnull[i + inputoff]; - } - - if (pergroup) - { - /* advance transition states for ordered grouping */ - - for (setno = 0; setno < numGroupingSets; setno++) - { - AggStatePerGroup pergroupstate; - - select_current_set(aggstate, setno, false); - - pergroupstate = &pergroup[transno + (setno * numTrans)]; - - advance_transition_function(aggstate, pertrans, pergroupstate); - } - } - - if (pergroups) - { - /* advance transition states for hashed grouping */ + bool dummynull; - for (setno = 0; setno < numHashes; setno++) - { - AggStatePerGroup pergroupstate; - - select_current_set(aggstate, setno, true); - - pergroupstate = &pergroups[setno][transno]; - - advance_transition_function(aggstate, pertrans, pergroupstate); - } - } - } - } + ExecEvalExprSwitchContext(aggstate->phase->evaltrans, + aggstate->tmpcontext, + &dummynull); } -/* - * combine_aggregates replaces advance_aggregates in DO_AGGSPLIT_COMBINE - * mode. The principal difference is that here we may need to apply the - * deserialization function before running the transfn (which, in this mode, - * is actually the aggregate's combinefn). Also, we know we don't need to - * handle FILTER, DISTINCT, ORDER BY, or grouping sets. - */ -static void -combine_aggregates(AggState *aggstate, AggStatePerGroup pergroup) -{ - int transno; - int numTrans = aggstate->numtrans; - TupleTableSlot *slot; - - /* combine not supported with grouping sets */ - Assert(aggstate->phase->numsets <= 1); - - /* compute input for all aggregates */ - slot = ExecProject(aggstate->evalproj); - - for (transno = 0; transno < numTrans; transno++) - { - AggStatePerTrans pertrans = &aggstate->pertrans[transno]; - AggStatePerGroup pergroupstate = &pergroup[transno]; - FunctionCallInfo fcinfo = &pertrans->transfn_fcinfo; - int inputoff = pertrans->inputoff; - - Assert(slot->tts_nvalid > inputoff); - - /* - * deserialfn_oid will be set if we must deserialize the input state - * before calling the combine function - */ - if (OidIsValid(pertrans->deserialfn_oid)) - { - /* Don't call a strict deserialization function with NULL input */ - if (pertrans->deserialfn.fn_strict && slot->tts_isnull[inputoff]) - { - fcinfo->arg[1] = slot->tts_values[inputoff]; - fcinfo->argnull[1] = slot->tts_isnull[inputoff]; - } - else - { - FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo; - MemoryContext oldContext; - - dsinfo->arg[0] = slot->tts_values[inputoff]; - dsinfo->argnull[0] = slot->tts_isnull[inputoff]; - /* Dummy second argument for type-safety reasons */ - dsinfo->arg[1] = PointerGetDatum(NULL); - dsinfo->argnull[1] = false; - - /* - * We run the deserialization functions in per-input-tuple - * memory context. - */ - oldContext = MemoryContextSwitchTo(aggstate->tmpcontext->ecxt_per_tuple_memory); - - fcinfo->arg[1] = FunctionCallInvoke(dsinfo); - fcinfo->argnull[1] = dsinfo->isnull; - - MemoryContextSwitchTo(oldContext); - } - } - else - { - fcinfo->arg[1] = slot->tts_values[inputoff]; - fcinfo->argnull[1] = slot->tts_isnull[inputoff]; - } - - advance_combine_function(aggstate, pertrans, pergroupstate); - } -} - -/* - * Perform combination of states between 2 aggregate states. Effectively this - * 'adds' two states together by whichever logic is defined in the aggregate - * function's combine function. - * - * Note that in this case transfn is set to the combination function. This - * perhaps should be changed to avoid confusion, but one field is ok for now - * as they'll never be needed at the same time. - */ -static void -advance_combine_function(AggState *aggstate, - AggStatePerTrans pertrans, - AggStatePerGroup pergroupstate) -{ - FunctionCallInfo fcinfo = &pertrans->transfn_fcinfo; - MemoryContext oldContext; - Datum newVal; - - if (pertrans->transfn.fn_strict) - { - /* if we're asked to merge to a NULL state, then do nothing */ - if (fcinfo->argnull[1]) - return; - - if (pergroupstate->noTransValue) - { - /* - * transValue has not yet been initialized. If pass-by-ref - * datatype we must copy the combining state value into - * aggcontext. - */ - if (!pertrans->transtypeByVal) - { - oldContext = MemoryContextSwitchTo( - aggstate->curaggcontext->ecxt_per_tuple_memory); - pergroupstate->transValue = datumCopy(fcinfo->arg[1], - pertrans->transtypeByVal, - pertrans->transtypeLen); - MemoryContextSwitchTo(oldContext); - } - else - pergroupstate->transValue = fcinfo->arg[1]; - - pergroupstate->transValueIsNull = false; - pergroupstate->noTransValue = false; - return; - } - } - - /* We run the combine functions in per-input-tuple memory context */ - oldContext = MemoryContextSwitchTo(aggstate->tmpcontext->ecxt_per_tuple_memory); - - /* set up aggstate->curpertrans for AggGetAggref() */ - aggstate->curpertrans = pertrans; - - /* - * OK to call the combine function - */ - fcinfo->arg[0] = pergroupstate->transValue; - fcinfo->argnull[0] = pergroupstate->transValueIsNull; - fcinfo->isnull = false; /* just in case combine func doesn't set it */ - - newVal = FunctionCallInvoke(fcinfo); - - aggstate->curpertrans = NULL; - - /* - * If pass-by-ref datatype, must copy the new value into aggcontext and - * free the prior transValue. But if the combine function returned a - * pointer to its first input, we don't need to do anything. Also, if the - * combine function returned a pointer to a R/W expanded object that is - * already a child of the aggcontext, assume we can adopt that value - * without copying it. - */ - if (!pertrans->transtypeByVal && - DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue)) - { - if (!fcinfo->isnull) - { - MemoryContextSwitchTo(aggstate->curaggcontext->ecxt_per_tuple_memory); - if (DatumIsReadWriteExpandedObject(newVal, - false, - pertrans->transtypeLen) && - MemoryContextGetParent(DatumGetEOHP(newVal)->eoh_context) == CurrentMemoryContext) - /* do nothing */ ; - else - newVal = datumCopy(newVal, - pertrans->transtypeByVal, - pertrans->transtypeLen); - } - if (!pergroupstate->transValueIsNull) - { - if (DatumIsReadWriteExpandedObject(pergroupstate->transValue, - false, - pertrans->transtypeLen)) - DeleteExpandedObject(pergroupstate->transValue); - else - pfree(DatumGetPointer(pergroupstate->transValue)); - } - } - - pergroupstate->transValue = newVal; - pergroupstate->transValueIsNull = fcinfo->isnull; - - MemoryContextSwitchTo(oldContext); -} - - /* * Run the transition function for a DISTINCT or ORDER BY aggregate * with only one input. This is called after we have completed @@ -1349,7 +755,7 @@ process_ordered_aggregate_single(AggState *aggstate, ((oldIsNull && *isNull) || (!oldIsNull && !*isNull && oldAbbrevVal == newAbbrevVal && - DatumGetBool(FunctionCall2(&pertrans->equalfns[0], + DatumGetBool(FunctionCall2(&pertrans->equalfnOne, oldVal, *newVal))))) { /* equal to prior, so forget this one */ @@ -1396,7 +802,7 @@ process_ordered_aggregate_multi(AggState *aggstate, AggStatePerTrans pertrans, AggStatePerGroup pergroupstate) { - MemoryContext workcontext = aggstate->tmpcontext->ecxt_per_tuple_memory; + ExprContext *tmpcontext = aggstate->tmpcontext; FunctionCallInfo fcinfo = &pertrans->transfn_fcinfo; TupleTableSlot *slot1 = pertrans->sortslot; TupleTableSlot *slot2 = pertrans->uniqslot; @@ -1405,6 +811,7 @@ process_ordered_aggregate_multi(AggState *aggstate, Datum newAbbrevVal = (Datum) 0; Datum oldAbbrevVal = (Datum) 0; bool haveOldValue = false; + TupleTableSlot *save = aggstate->tmpcontext->ecxt_outertuple; int i; tuplesort_performsort(pertrans->sortstates[aggstate->current_set]); @@ -1418,22 +825,20 @@ process_ordered_aggregate_multi(AggState *aggstate, { CHECK_FOR_INTERRUPTS(); - /* - * Extract the first numTransInputs columns as datums to pass to the - * transfn. (This will help execTuplesMatch too, so we do it - * immediately.) - */ - slot_getsomeattrs(slot1, numTransInputs); + tmpcontext->ecxt_outertuple = slot1; + tmpcontext->ecxt_innertuple = slot2; if (numDistinctCols == 0 || !haveOldValue || newAbbrevVal != oldAbbrevVal || - !execTuplesMatch(slot1, slot2, - numDistinctCols, - pertrans->sortColIdx, - pertrans->equalfns, - workcontext)) + !ExecQual(pertrans->equalfnMulti, tmpcontext)) { + /* + * Extract the first numTransInputs columns as datums to pass to + * the transfn. + */ + slot_getsomeattrs(slot1, numTransInputs); + /* Load values into fcinfo */ /* Start from 1, since the 0th arg will be the transition value */ for (i = 0; i < numTransInputs; i++) @@ -1451,15 +856,14 @@ process_ordered_aggregate_multi(AggState *aggstate, slot2 = slot1; slot1 = tmpslot; - /* avoid execTuplesMatch() calls by reusing abbreviated keys */ + /* avoid ExecQual() calls by reusing abbreviated keys */ oldAbbrevVal = newAbbrevVal; haveOldValue = true; } } - /* Reset context each time, unless execTuplesMatch did it for us */ - if (numDistinctCols == 0) - MemoryContextReset(workcontext); + /* Reset context each time */ + ResetExprContext(tmpcontext); ExecClearTuple(slot1); } @@ -1469,6 +873,9 @@ process_ordered_aggregate_multi(AggState *aggstate, tuplesort_end(pertrans->sortstates[aggstate->current_set]); pertrans->sortstates[aggstate->current_set] = NULL; + + /* restore previous slot, potentially in use for grouping sets */ + tmpcontext->ecxt_outertuple = save; } /* @@ -1506,7 +913,7 @@ finalize_aggregate(AggState *aggstate, * for the transition state value. */ i = 1; - foreach(lc, pertrans->aggdirectargs) + foreach(lc, peragg->aggdirectargs) { ExprState *expr = (ExprState *) lfirst(lc); @@ -1524,8 +931,8 @@ finalize_aggregate(AggState *aggstate, { int numFinalArgs = peragg->numFinalArgs; - /* set up aggstate->curpertrans for AggGetAggref() */ - aggstate->curpertrans = pertrans; + /* set up aggstate->curperagg for AggGetAggref() */ + aggstate->curperagg = peragg; InitFunctionCallInfoData(fcinfo, &peragg->finalfn, numFinalArgs, @@ -1558,7 +965,7 @@ finalize_aggregate(AggState *aggstate, *resultVal = FunctionCallInvoke(&fcinfo); *resultIsNull = fcinfo.isnull; } - aggstate->curpertrans = NULL; + aggstate->curperagg = NULL; } else { @@ -1673,7 +1080,7 @@ prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet aggstate->grouped_cols = grouped_cols; - if (slot->tts_isempty) + if (TTS_EMPTY(slot)) { /* * Force all values to be NULL if working on an empty input tuple @@ -1870,9 +1277,11 @@ build_hash_table(AggState *aggstate) Assert(perhash->aggnode->numGroups > 0); - perhash->hashtable = BuildTupleHashTable(perhash->numCols, + perhash->hashtable = BuildTupleHashTable(&aggstate->ss.ps, + perhash->hashslot->tts_tupleDescriptor, + perhash->numCols, perhash->hashGrpColIdxHash, - perhash->eqfunctions, + perhash->eqfuncoids, perhash->hashfunctions, perhash->aggnode->numGroups, additionalsize, @@ -1908,6 +1317,7 @@ find_hash_columns(AggState *aggstate) Bitmapset *base_colnos; List *outerTlist = outerPlanState(aggstate)->plan->targetlist; int numHashes = aggstate->num_hashes; + EState *estate = aggstate->ss.ps.state; int j; /* Find Vars that will be needed in tlist and qual */ @@ -1987,7 +1397,13 @@ find_hash_columns(AggState *aggstate) } hashDesc = ExecTypeFromTL(hashTlist, false); - ExecSetSlotDescriptor(perhash->hashslot, hashDesc); + + execTuplesHashPrepare(perhash->numCols, + perhash->aggnode->grpOperators, + &perhash->eqfuncoids, + &perhash->hashfunctions); + perhash->hashslot = + ExecAllocTableSlot(&estate->es_tupleTable, hashDesc); list_free(hashTlist); bms_free(colnos); @@ -2053,12 +1469,25 @@ lookup_hash_entry(AggState *aggstate) if (isnew) { - entry->additional = (AggStatePerGroup) + AggStatePerGroup pergroup; + int transno; + + pergroup = (AggStatePerGroup) MemoryContextAlloc(perhash->hashtable->tablecxt, sizeof(AggStatePerGroupData) * aggstate->numtrans); - /* initialize aggregates for new tuple group */ - initialize_aggregates(aggstate, (AggStatePerGroup) entry->additional, - -1); + entry->additional = pergroup; + + /* + * Initialize aggregates for new tuple group, lookup_hash_entries() + * already has selected the relevant grouping set. + */ + for (transno = 0; transno < aggstate->numtrans; transno++) + { + AggStatePerTrans pertrans = &aggstate->pertrans[transno]; + AggStatePerGroup pergroupstate = &pergroup[transno]; + + initialize_aggregate(aggstate, pertrans, pergroupstate); + } } return entry; @@ -2070,7 +1499,7 @@ lookup_hash_entry(AggState *aggstate) * * Be aware that lookup_hash_entry can reset the tmpcontext. */ -static AggStatePerGroup * +static void lookup_hash_entries(AggState *aggstate) { int numHashes = aggstate->num_hashes; @@ -2082,8 +1511,6 @@ lookup_hash_entries(AggState *aggstate) select_current_set(aggstate, setno, true); pergroup[setno] = lookup_hash_entry(aggstate)->additional; } - - return pergroup; } /* @@ -2142,8 +1569,7 @@ agg_retrieve_direct(AggState *aggstate) ExprContext *econtext; ExprContext *tmpcontext; AggStatePerAgg peragg; - AggStatePerGroup pergroup; - AggStatePerGroup *hash_pergroups = NULL; + AggStatePerGroup *pergroups; TupleTableSlot *outerslot; TupleTableSlot *firstSlot; TupleTableSlot *result; @@ -2165,7 +1591,7 @@ agg_retrieve_direct(AggState *aggstate) tmpcontext = aggstate->tmpcontext; peragg = aggstate->peragg; - pergroup = aggstate->pergroup; + pergroups = aggstate->pergroups; firstSlot = aggstate->ss.ss_ScanTupleSlot; /* @@ -2278,17 +1704,14 @@ agg_retrieve_direct(AggState *aggstate) * of the next grouping set *---------- */ + tmpcontext->ecxt_innertuple = econtext->ecxt_outertuple; if (aggstate->input_done || (node->aggstrategy != AGG_PLAIN && aggstate->projected_set != -1 && aggstate->projected_set < (numGroupingSets - 1) && nextSetSize > 0 && - !execTuplesMatch(econtext->ecxt_outertuple, - tmpcontext->ecxt_outertuple, - nextSetSize, - node->grpColIdx, - aggstate->phase->eqfunctions, - tmpcontext->ecxt_per_tuple_memory))) + !ExecQualAndReset(aggstate->phase->eqfunctions[nextSetSize - 1], + tmpcontext))) { aggstate->projected_set += 1; @@ -2367,7 +1790,7 @@ agg_retrieve_direct(AggState *aggstate) /* * Initialize working state for a new input tuple group. */ - initialize_aggregates(aggstate, pergroup, numReset); + initialize_aggregates(aggstate, pergroups, numReset); if (aggstate->grp_firstTuple != NULL) { @@ -2376,10 +1799,9 @@ agg_retrieve_direct(AggState *aggstate) * reserved for it. The tuple will be deleted when it is * cleared from the slot. */ - ExecStoreTuple(aggstate->grp_firstTuple, - firstSlot, - InvalidBuffer, - true); + ExecStoreHeapTuple(aggstate->grp_firstTuple, + firstSlot, + true); aggstate->grp_firstTuple = NULL; /* don't keep two pointers */ /* set up for first advance_aggregates call */ @@ -2398,15 +1820,11 @@ agg_retrieve_direct(AggState *aggstate) if (aggstate->aggstrategy == AGG_MIXED && aggstate->current_phase == 1) { - hash_pergroups = lookup_hash_entries(aggstate); + lookup_hash_entries(aggstate); } - else - hash_pergroups = NULL; - if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit)) - combine_aggregates(aggstate, pergroup); - else - advance_aggregates(aggstate, pergroup, hash_pergroups); + /* Advance the aggregates (or combine functions) */ + advance_aggregates(aggstate); /* Reset per-input-tuple context after each tuple */ ResetExprContext(tmpcontext); @@ -2435,12 +1853,9 @@ agg_retrieve_direct(AggState *aggstate) */ if (node->aggstrategy != AGG_PLAIN) { - if (!execTuplesMatch(firstSlot, - outerslot, - node->numCols, - node->grpColIdx, - aggstate->phase->eqfunctions, - tmpcontext->ecxt_per_tuple_memory)) + tmpcontext->ecxt_innertuple = firstSlot; + if (!ExecQual(aggstate->phase->eqfunctions[node->numCols - 1], + tmpcontext)) { aggstate->grp_firstTuple = ExecCopySlotTuple(outerslot); break; @@ -2470,7 +1885,7 @@ agg_retrieve_direct(AggState *aggstate) finalize_aggregates(aggstate, peragg, - pergroup + (currentSet * aggstate->numtrans)); + pergroups[currentSet]); /* * If there's no row to project right now, we must continue rather @@ -2500,8 +1915,6 @@ agg_fill_hash_table(AggState *aggstate) */ for (;;) { - AggStatePerGroup *pergroups; - outerslot = fetch_input_tuple(aggstate); if (TupIsNull(outerslot)) break; @@ -2510,13 +1923,10 @@ agg_fill_hash_table(AggState *aggstate) tmpcontext->ecxt_outertuple = outerslot; /* Find or build hashtable entries */ - pergroups = lookup_hash_entries(aggstate); + lookup_hash_entries(aggstate); - /* Advance the aggregates */ - if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit)) - combine_aggregates(aggstate, pergroups[0]); - else - advance_aggregates(aggstate, NULL, pergroups); + /* Advance the aggregates (or combine functions) */ + advance_aggregates(aggstate); /* * Reset per-input-tuple context after each tuple, but note that the @@ -2597,7 +2007,7 @@ agg_retrieve_hash_table(AggState *aggstate) else { /* No more hashtables, so done */ - aggstate->agg_done = TRUE; + aggstate->agg_done = true; return NULL; } } @@ -2668,20 +2078,20 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) AggState *aggstate; AggStatePerAgg peraggs; AggStatePerTrans pertransstates; + AggStatePerGroup *pergroups; Plan *outerPlan; ExprContext *econtext; + TupleDesc scanDesc; int numaggs, transno, aggno; int phase; int phaseidx; - List *combined_inputeval; ListCell *l; Bitmapset *all_grouped_cols = NULL; int numGroupingSets = 1; int numPhases; int numHashes; - int column_offset; int i = 0; int j = 0; bool use_hashing = (node->aggstrategy == AGG_HASHED || @@ -2708,10 +2118,11 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) aggstate->current_set = 0; aggstate->peragg = NULL; aggstate->pertrans = NULL; + aggstate->curperagg = NULL; aggstate->curpertrans = NULL; aggstate->input_done = false; aggstate->agg_done = false; - aggstate->pergroup = NULL; + aggstate->pergroups = NULL; aggstate->grp_firstTuple = NULL; aggstate->sort_in = NULL; aggstate->sort_out = NULL; @@ -2786,30 +2197,6 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) ExecAssignExprContext(estate, &aggstate->ss.ps); - /* - * tuple table initialization. - * - * For hashtables, we create some additional slots below. - */ - ExecInitScanTupleSlot(estate, &aggstate->ss); - ExecInitResultTupleSlot(estate, &aggstate->ss.ps); - aggstate->sort_slot = ExecInitExtraTupleSlot(estate); - - /* - * initialize child expressions - * - * We rely on the parser to have checked that no aggs contain other agg - * calls in their arguments. This would make no sense under SQL semantics - * (and it's forbidden by the spec). Because it is true, we don't need to - * worry about evaluating the aggs in any particular order. - * - * Note: execExpr.c finds Aggrefs for us, and adds their AggrefExprState - * nodes to aggstate->aggs. Aggrefs in the qual are found here; Aggrefs - * in the targetlist are found during ExecAssignProjectionInfo, below. - */ - aggstate->ss.ps.qual = - ExecInitQual(node->plan.qual, (PlanState *) aggstate); - /* * Initialize child nodes. * @@ -2824,33 +2211,38 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) /* * initialize source tuple type. */ - ExecAssignScanTypeFromOuterPlan(&aggstate->ss); + ExecCreateScanSlotFromOuterPlan(estate, &aggstate->ss); + scanDesc = aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor; if (node->chain) - ExecSetSlotDescriptor(aggstate->sort_slot, - aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor); + aggstate->sort_slot = ExecInitExtraTupleSlot(estate, scanDesc); /* - * Initialize result tuple type and projection info. + * Initialize result type, slot and projection. */ - ExecAssignResultTypeFromTL(&aggstate->ss.ps); + ExecInitResultTupleSlotTL(&aggstate->ss.ps); ExecAssignProjectionInfo(&aggstate->ss.ps, NULL); + /* + * initialize child expressions + * + * We expect the parser to have checked that no aggs contain other agg + * calls in their arguments (and just to be sure, we verify it again while + * initializing the plan node). This would make no sense under SQL + * semantics, and it's forbidden by the spec. Because it is true, we + * don't need to worry about evaluating the aggs in any particular order. + * + * Note: execExpr.c finds Aggrefs for us, and adds their AggrefExprState + * nodes to aggstate->aggs. Aggrefs in the qual are found here; Aggrefs + * in the targetlist are found during ExecAssignProjectionInfo, below. + */ + aggstate->ss.ps.qual = + ExecInitQual(node->plan.qual, (PlanState *) aggstate); + /* * We should now have found all Aggrefs in the targetlist and quals. */ numaggs = aggstate->numaggs; Assert(numaggs == list_length(aggstate->aggs)); - if (numaggs <= 0) - { - /* - * This is not an error condition: we might be using the Agg node just - * to do hash-based grouping. Even in the regular case, - * constant-expression simplification could optimize away all of the - * Aggrefs in the targetlist and qual. So keep going, but force local - * copy of numaggs positive so that palloc()s below don't choke. - */ - numaggs = 1; - } /* * For each phase, prepare grouping set data and fmgr lookup data for @@ -2958,11 +2350,43 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) */ if (aggnode->aggstrategy == AGG_SORTED) { + int i = 0; + Assert(aggnode->numCols > 0); + /* + * Build a separate function for each subset of columns that + * need to be compared. + */ phasedata->eqfunctions = - execTuplesMatchPrepare(aggnode->numCols, - aggnode->grpOperators); + (ExprState **) palloc0(aggnode->numCols * sizeof(ExprState *)); + + /* for each grouping set */ + for (i = 0; i < phasedata->numsets; i++) + { + int length = phasedata->gset_lengths[i]; + + if (phasedata->eqfunctions[length - 1] != NULL) + continue; + + phasedata->eqfunctions[length - 1] = + execTuplesMatchPrepare(scanDesc, + length, + aggnode->grpColIdx, + aggnode->grpOperators, + (PlanState *) aggstate); + } + + /* and for all grouped columns, unless already computed */ + if (phasedata->eqfunctions[aggnode->numCols - 1] == NULL) + { + phasedata->eqfunctions[aggnode->numCols - 1] = + execTuplesMatchPrepare(scanDesc, + aggnode->numCols, + aggnode->grpColIdx, + aggnode->grpOperators, + (PlanState *) aggstate); + } } phasedata->aggnode = aggnode; @@ -2992,40 +2416,37 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) aggstate->peragg = peraggs; aggstate->pertrans = pertransstates; + + aggstate->all_pergroups = + (AggStatePerGroup *) palloc0(sizeof(AggStatePerGroup) + * (numGroupingSets + numHashes)); + pergroups = aggstate->all_pergroups; + + if (node->aggstrategy != AGG_HASHED) + { + for (i = 0; i < numGroupingSets; i++) + { + pergroups[i] = (AggStatePerGroup) palloc0(sizeof(AggStatePerGroupData) + * numaggs); + } + + aggstate->pergroups = pergroups; + pergroups += numGroupingSets; + } + /* * Hashing can only appear in the initial phase. */ if (use_hashing) { - for (i = 0; i < numHashes; ++i) - { - aggstate->perhash[i].hashslot = ExecInitExtraTupleSlot(estate); - - execTuplesHashPrepare(aggstate->perhash[i].numCols, - aggstate->perhash[i].aggnode->grpOperators, - &aggstate->perhash[i].eqfunctions, - &aggstate->perhash[i].hashfunctions); - } - /* this is an array of pointers, not structures */ - aggstate->hash_pergroup = palloc0(sizeof(AggStatePerGroup) * numHashes); + aggstate->hash_pergroup = pergroups; find_hash_columns(aggstate); build_hash_table(aggstate); aggstate->table_filled = false; } - if (node->aggstrategy != AGG_HASHED) - { - AggStatePerGroup pergroup; - - pergroup = (AggStatePerGroup) palloc0(sizeof(AggStatePerGroupData) - * numaggs - * numGroupingSets); - - aggstate->pergroup = pergroup; - } - /* * Initialize current phase-dependent values to initial phase. The initial * phase is 1 (first sort pass) for all strategies that use sorting (if @@ -3056,27 +2477,29 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) * * Scenarios: * - * 1. An aggregate function appears more than once in query: + * 1. Identical aggregate function calls appear in the query: * * SELECT SUM(x) FROM ... HAVING SUM(x) > 0 * - * Since the aggregates are the identical, we only need to calculate - * the calculate it once. Both aggregates will share the same 'aggno' - * value. + * Since these aggregates are identical, we only need to calculate + * the value once. Both aggregates will share the same 'aggno' value. * * 2. Two different aggregate functions appear in the query, but the - * aggregates have the same transition function and initial value, but - * different final function: + * aggregates have the same arguments, transition functions and + * initial values (and, presumably, different final functions): * - * SELECT SUM(x), AVG(x) FROM ... + * SELECT AVG(x), STDDEV(x) FROM ... * * In this case we must create a new peragg for the varying aggregate, - * and need to call the final functions separately, but can share the - * same transition state. + * and we need to call the final functions separately, but we need + * only run the transition function once. (This requires that the + * final functions be nondestructive of the transition state, but + * that's required anyway for other reasons.) * - * For either of these optimizations to be valid, the aggregate's - * arguments must be the same, including any modifiers such as ORDER BY, - * DISTINCT and FILTER, and they mustn't contain any volatile functions. + * For either of these optimizations to be valid, all aggregate properties + * used in the transition phase must be the same, including any modifiers + * such as ORDER BY, DISTINCT and FILTER, and the arguments mustn't + * contain any volatile functions. * ----------------- */ aggno = -1; @@ -3098,6 +2521,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) AclResult aclresult; Oid transfn_oid, finalfn_oid; + bool shareable; Oid serialfn_oid, deserialfn_oid; Expr *finalfnexpr; @@ -3141,7 +2565,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) aclresult = pg_proc_aclcheck(aggref->aggfnoid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_AGGREGATE, get_func_name(aggref->aggfnoid)); InvokeFunctionExecuteHook(aggref->aggfnoid); @@ -3170,6 +2594,15 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) else peragg->finalfn_oid = finalfn_oid = aggform->aggfinalfn; + /* + * If finalfn is marked read-write, we can't share transition states; + * but it is okay to share states for AGGMODIFY_SHAREABLE aggs. Also, + * if we're not executing the finalfn here, we can share regardless. + */ + shareable = (aggform->aggfinalmodify != AGGMODIFY_READ_WRITE) || + (finalfn_oid == InvalidOid); + peragg->shareable = shareable; + serialfn_oid = InvalidOid; deserialfn_oid = InvalidOid; @@ -3222,7 +2655,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) aclresult = pg_proc_aclcheck(transfn_oid, aggOwner, ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(transfn_oid)); InvokeFunctionExecuteHook(transfn_oid); if (OidIsValid(finalfn_oid)) @@ -3230,7 +2663,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) aclresult = pg_proc_aclcheck(finalfn_oid, aggOwner, ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(finalfn_oid)); InvokeFunctionExecuteHook(finalfn_oid); } @@ -3239,7 +2672,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) aclresult = pg_proc_aclcheck(serialfn_oid, aggOwner, ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(serialfn_oid)); InvokeFunctionExecuteHook(serialfn_oid); } @@ -3248,7 +2681,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) aclresult = pg_proc_aclcheck(deserialfn_oid, aggOwner, ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(deserialfn_oid)); InvokeFunctionExecuteHook(deserialfn_oid); } @@ -3270,6 +2703,10 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) else peragg->numFinalArgs = numDirectArgs + 1; + /* Initialize any direct-argument expressions */ + peragg->aggdirectargs = ExecInitExprList(aggref->aggdirectargs, + (PlanState *) aggstate); + /* * build expression trees using actual argument & result types for the * finalfn, if it exists and is required. @@ -3308,11 +2745,12 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) * 2. Build working state for invoking the transition function, or * look up previously initialized working state, if we can share it. * - * find_compatible_peragg() already collected a list of per-Trans's - * with the same inputs. Check if any of them have the same transition - * function and initial value. + * find_compatible_peragg() already collected a list of shareable + * per-Trans's with the same inputs. Check if any of them have the + * same transition function and initial value. */ existing_transno = find_compatible_pertrans(aggstate, aggref, + shareable, transfn_oid, aggtranstype, serialfn_oid, deserialfn_oid, initValue, initValueIsNull, @@ -3321,9 +2759,10 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) { /* * Existing compatible trans found, so just point the 'peragg' to - * the same per-trans struct. + * the same per-trans struct, and mark the trans state as shared. */ pertrans = &pertransstates[existing_transno]; + pertrans->aggshared = true; peragg->transno = existing_transno; } else @@ -3340,56 +2779,79 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) } /* - * Update numaggs to match the number of unique aggregates found. Also set - * numstates to the number of unique aggregate states found. + * Update aggstate->numaggs to be the number of unique aggregates found. + * Also set numstates to the number of unique transition states found. */ aggstate->numaggs = aggno + 1; aggstate->numtrans = transno + 1; /* - * Build a single projection computing the aggregate arguments for all - * aggregates at once, that's considerably faster than doing it separately - * for each. - * - * First create a targetlist combining the targetlist of all the - * transitions. + * Last, check whether any more aggregates got added onto the node while + * we processed the expressions for the aggregate arguments (including not + * only the regular arguments and FILTER expressions handled immediately + * above, but any direct arguments we might've handled earlier). If so, + * we have nested aggregate functions, which is semantically nonsensical, + * so complain. (This should have been caught by the parser, so we don't + * need to work hard on a helpful error message; but we defend against it + * here anyway, just to be sure.) */ - combined_inputeval = NIL; - column_offset = 0; - for (transno = 0; transno < aggstate->numtrans; transno++) + if (numaggs != list_length(aggstate->aggs)) + ereport(ERROR, + (errcode(ERRCODE_GROUPING_ERROR), + errmsg("aggregate function calls cannot be nested"))); + + /* + * Build expressions doing all the transition work at once. We build a + * different one for each phase, as the number of transition function + * invocation can differ between phases. Note this'll work both for + * transition and combination functions (although there'll only be one + * phase in the latter case). + */ + for (phaseidx = 0; phaseidx < aggstate->numphases; phaseidx++) { - AggStatePerTrans pertrans = &pertransstates[transno]; - ListCell *arg; + AggStatePerPhase phase = &aggstate->phases[phaseidx]; + bool dohash = false; + bool dosort = false; - pertrans->inputoff = column_offset; + /* phase 0 doesn't necessarily exist */ + if (!phase->aggnode) + continue; - /* - * Adjust resno in a copied target entries, to point into the combined - * slot. - */ - foreach(arg, pertrans->aggref->args) + if (aggstate->aggstrategy == AGG_MIXED && phaseidx == 1) { - TargetEntry *source_tle = lfirst_node(TargetEntry, arg); - TargetEntry *tle; - - tle = flatCopyTargetEntry(source_tle); - tle->resno += column_offset; - - combined_inputeval = lappend(combined_inputeval, tle); + /* + * Phase one, and only phase one, in a mixed agg performs both + * sorting and aggregation. + */ + dohash = true; + dosort = true; + } + else if (aggstate->aggstrategy == AGG_MIXED && phaseidx == 0) + { + /* + * No need to compute a transition function for an AGG_MIXED phase + * 0 - the contents of the hashtables will have been computed + * during phase 1. + */ + continue; + } + else if (phase->aggstrategy == AGG_PLAIN || + phase->aggstrategy == AGG_SORTED) + { + dohash = false; + dosort = true; } + else if (phase->aggstrategy == AGG_HASHED) + { + dohash = true; + dosort = false; + } + else + Assert(false); - column_offset += list_length(pertrans->aggref->args); - } + phase->evaltrans = ExecBuildAggTrans(aggstate, phase, dosort, dohash); - /* and then create a projection for that targetlist */ - aggstate->evaldesc = ExecTypeFromTL(combined_inputeval, false); - aggstate->evalslot = ExecInitExtraTupleSlot(estate); - aggstate->evalproj = ExecBuildProjectionInfo(combined_inputeval, - aggstate->tmpcontext, - aggstate->evalslot, - &aggstate->ss.ps, - NULL); - ExecSetSlotDescriptor(aggstate->evalslot, aggstate->evaldesc); + } return aggstate; } @@ -3420,11 +2882,11 @@ build_pertrans_for_aggref(AggStatePerTrans pertrans, List *sortlist; int numSortCols; int numDistinctCols; - int naggs; int i; /* Begin filling in the pertrans data */ pertrans->aggref = aggref; + pertrans->aggshared = false; pertrans->aggCollation = aggref->inputcollid; pertrans->transfn_oid = aggtransfn; pertrans->serialfn_oid = aggserialfn; @@ -3477,8 +2939,8 @@ build_pertrans_for_aggref(AggStatePerTrans pertrans, if (pertrans->transfn.fn_strict && aggtranstype == INTERNALOID) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("combine function for aggregate %u must be declared as STRICT", - aggref->aggfnoid))); + errmsg("combine function with transition type %s must not be declared STRICT", + format_type_be(aggtranstype)))); } else { @@ -3561,23 +3023,6 @@ build_pertrans_for_aggref(AggStatePerTrans pertrans, } - /* Initialize the input and FILTER expressions */ - naggs = aggstate->numaggs; - pertrans->aggfilter = ExecInitExpr(aggref->aggfilter, - (PlanState *) aggstate); - pertrans->aggdirectargs = ExecInitExprList(aggref->aggdirectargs, - (PlanState *) aggstate); - - /* - * Complain if the aggregate's arguments contain any aggregates; nested - * agg functions are semantically nonsensical. (This should have been - * caught earlier, but we defend against it here anyway.) - */ - if (naggs != aggstate->numaggs) - ereport(ERROR, - (errcode(ERRCODE_GROUPING_ERROR), - errmsg("aggregate function calls cannot be nested"))); - /* * If we're doing either DISTINCT or ORDER BY for a plain agg, then we * have a list of SortGroupClause nodes; fish out the data in them and @@ -3608,16 +3053,20 @@ build_pertrans_for_aggref(AggStatePerTrans pertrans, pertrans->numSortCols = numSortCols; pertrans->numDistinctCols = numDistinctCols; - if (numSortCols > 0) + /* + * If we have either sorting or filtering to do, create a tupledesc and + * slot corresponding to the aggregated inputs (including sort + * expressions) of the agg. + */ + if (numSortCols > 0 || aggref->aggfilter) { - /* - * Get a tupledesc and slot corresponding to the aggregated inputs - * (including sort expressions) of the agg. - */ pertrans->sortdesc = ExecTypeFromTL(aggref->args, false); - pertrans->sortslot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(pertrans->sortslot, pertrans->sortdesc); + pertrans->sortslot = + ExecInitExtraTupleSlot(estate, pertrans->sortdesc); + } + if (numSortCols > 0) + { /* * We don't implement DISTINCT or ORDER BY aggs in the HASHED case * (yet) @@ -3634,9 +3083,8 @@ build_pertrans_for_aggref(AggStatePerTrans pertrans, else if (numDistinctCols > 0) { /* we will need an extra slot to store prior values */ - pertrans->uniqslot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(pertrans->uniqslot, - pertrans->sortdesc); + pertrans->uniqslot = + ExecInitExtraTupleSlot(estate, pertrans->sortdesc); } /* Extract the sort information for use later */ @@ -3669,24 +3117,28 @@ build_pertrans_for_aggref(AggStatePerTrans pertrans, if (aggref->aggdistinct) { + Oid *ops; + Assert(numArguments > 0); + Assert(list_length(aggref->aggdistinct) == numDistinctCols); - /* - * We need the equal function for each DISTINCT comparison we will - * make. - */ - pertrans->equalfns = - (FmgrInfo *) palloc(numDistinctCols * sizeof(FmgrInfo)); + ops = palloc(numDistinctCols * sizeof(Oid)); i = 0; foreach(lc, aggref->aggdistinct) - { - SortGroupClause *sortcl = (SortGroupClause *) lfirst(lc); + ops[i++] = ((SortGroupClause *) lfirst(lc))->eqop; - fmgr_info(get_opcode(sortcl->eqop), &pertrans->equalfns[i]); - i++; - } - Assert(i == numDistinctCols); + /* lookup / build the necessary comparators */ + if (numDistinctCols == 1) + fmgr_info(get_opcode(ops[0]), &pertrans->equalfnOne); + else + pertrans->equalfnMulti = + execTuplesMatchPrepare(pertrans->sortdesc, + numDistinctCols, + pertrans->sortColIdx, + ops, + &aggstate->ss.ps); + pfree(ops); } pertrans->sortstates = (Tuplesortstate **) @@ -3717,10 +3169,10 @@ GetAggInitVal(Datum textInitVal, Oid transtype) * with this one, with the same input parameters. If no compatible aggregate * can be found, returns -1. * - * As a side-effect, this also collects a list of existing per-Trans structs - * with matching inputs. If no identical Aggref is found, the list is passed - * later to find_compatible_perstate, to see if we can at least reuse the - * state value of another aggregate. + * As a side-effect, this also collects a list of existing, shareable per-Trans + * structs with matching inputs. If no identical Aggref is found, the list is + * passed later to find_compatible_pertrans, to see if we can at least reuse + * the state value of another aggregate. */ static int find_compatible_peragg(Aggref *newagg, AggState *aggstate, @@ -3739,11 +3191,12 @@ find_compatible_peragg(Aggref *newagg, AggState *aggstate, /* * Search through the list of already seen aggregates. If we find an - * existing aggregate with the same aggregate function and input - * parameters as an existing one, then we can re-use that one. While + * existing identical aggregate call, then we can re-use that one. While * searching, we'll also collect a list of Aggrefs with the same input * parameters. If no matching Aggref is found, the caller can potentially - * still re-use the transition state of one of them. + * still re-use the transition state of one of them. (At this stage we + * just compare the parsetrees; whether different aggregates share the + * same transition function will be checked later.) */ for (aggno = 0; aggno <= lastaggno; aggno++) { @@ -3759,7 +3212,6 @@ find_compatible_peragg(Aggref *newagg, AggState *aggstate, newagg->aggstar != existingRef->aggstar || newagg->aggvariadic != existingRef->aggvariadic || newagg->aggkind != existingRef->aggkind || - !equal(newagg->aggdirectargs, existingRef->aggdirectargs) || !equal(newagg->args, existingRef->args) || !equal(newagg->aggorder, existingRef->aggorder) || !equal(newagg->aggdistinct, existingRef->aggdistinct) || @@ -3769,7 +3221,8 @@ find_compatible_peragg(Aggref *newagg, AggState *aggstate, /* if it's the same aggregate function then report exact match */ if (newagg->aggfnoid == existingRef->aggfnoid && newagg->aggtype == existingRef->aggtype && - newagg->aggcollid == existingRef->aggcollid) + newagg->aggcollid == existingRef->aggcollid && + equal(newagg->aggdirectargs, existingRef->aggdirectargs)) { list_free(*same_input_transnos); *same_input_transnos = NIL; @@ -3777,11 +3230,15 @@ find_compatible_peragg(Aggref *newagg, AggState *aggstate, } /* - * Not identical, but it had the same inputs. Return it to the caller, - * in case we can re-use its per-trans state. + * Not identical, but it had the same inputs. If the final function + * permits sharing, return its transno to the caller, in case we can + * re-use its per-trans state. (If there's already sharing going on, + * we might report a transno more than once. find_compatible_pertrans + * is cheap enough that it's not worth spending cycles to avoid that.) */ - *same_input_transnos = lappend_int(*same_input_transnos, - peragg->transno); + if (peragg->shareable) + *same_input_transnos = lappend_int(*same_input_transnos, + peragg->transno); } return -1; @@ -3792,11 +3249,11 @@ find_compatible_peragg(Aggref *newagg, AggState *aggstate, * struct * * Searches the list of transnos for a per-Trans struct with the same - * transition state and initial condition. (The inputs have already been + * transition function and initial condition. (The inputs have already been * verified to match.) */ static int -find_compatible_pertrans(AggState *aggstate, Aggref *newagg, +find_compatible_pertrans(AggState *aggstate, Aggref *newagg, bool shareable, Oid aggtransfn, Oid aggtranstype, Oid aggserialfn, Oid aggdeserialfn, Datum initValue, bool initValueIsNull, @@ -3804,6 +3261,10 @@ find_compatible_pertrans(AggState *aggstate, Aggref *newagg, { ListCell *lc; + /* If this aggregate can't share transition states, give up */ + if (!shareable) + return -1; + foreach(lc, transnos) { int transno = lfirst_int(lc); @@ -3828,16 +3289,16 @@ find_compatible_pertrans(AggState *aggstate, Aggref *newagg, aggdeserialfn != pertrans->deserialfn_oid) continue; - /* Check that the initial condition matches, too. */ + /* + * Check that the initial condition matches, too. + */ if (initValueIsNull && pertrans->initValueIsNull) return transno; if (!initValueIsNull && !pertrans->initValueIsNull && datumIsEqual(initValue, pertrans->initValue, pertrans->transtypeByVal, pertrans->transtypeLen)) - { return transno; - } } return -1; } @@ -3986,8 +3447,11 @@ ExecReScanAgg(AggState *node) /* * Reset the per-group state (in particular, mark transvalues null) */ - MemSet(node->pergroup, 0, - sizeof(AggStatePerGroupData) * node->numaggs * numGroupingSets); + for (setno = 0; setno < numGroupingSets; setno++) + { + MemSet(node->pergroups[setno], 0, + sizeof(AggStatePerGroupData) * node->numaggs); + } /* reset to phase 1 */ initialize_phase(node, 1); @@ -4056,6 +3520,13 @@ AggCheckCallContext(FunctionCallInfo fcinfo, MemoryContext *aggcontext) * If the function is being called as an aggregate support function, * return the Aggref node for the aggregate call. Otherwise, return NULL. * + * Aggregates sharing the same inputs and transition functions can get + * merged into a single transition calculation. If the transition function + * calls AggGetAggref, it will get some one of the Aggrefs for which it is + * executing. It must therefore not pay attention to the Aggref fields that + * relate to the final function, as those are indeterminate. But if a final + * function calls AggGetAggref, it will get a precise result. + * * Note that if an aggregate is being used as a window function, this will * return NULL. We could provide a similar function to return the relevant * WindowFunc node in such cases, but it's not needed yet. @@ -4065,9 +3536,18 @@ AggGetAggref(FunctionCallInfo fcinfo) { if (fcinfo->context && IsA(fcinfo->context, AggState)) { + AggState *aggstate = (AggState *) fcinfo->context; + AggStatePerAgg curperagg; AggStatePerTrans curpertrans; - curpertrans = ((AggState *) fcinfo->context)->curpertrans; + /* check curperagg (valid when in a final function) */ + curperagg = aggstate->curperagg; + + if (curperagg) + return curperagg->aggref; + + /* check curpertrans (valid when in a transition function) */ + curpertrans = aggstate->curpertrans; if (curpertrans) return curpertrans->aggref; @@ -4097,6 +3577,44 @@ AggGetTempMemoryContext(FunctionCallInfo fcinfo) return NULL; } +/* + * AggStateIsShared - find out whether transition state is shared + * + * If the function is being called as an aggregate support function, + * return true if the aggregate's transition state is shared across + * multiple aggregates, false if it is not. + * + * Returns true if not called as an aggregate support function. + * This is intended as a conservative answer, ie "no you'd better not + * scribble on your input". In particular, will return true if the + * aggregate is being used as a window function, which is a scenario + * in which changing the transition state is a bad idea. We might + * want to refine the behavior for the window case in future. + */ +bool +AggStateIsShared(FunctionCallInfo fcinfo) +{ + if (fcinfo->context && IsA(fcinfo->context, AggState)) + { + AggState *aggstate = (AggState *) fcinfo->context; + AggStatePerAgg curperagg; + AggStatePerTrans curpertrans; + + /* check curperagg (valid when in a final function) */ + curperagg = aggstate->curperagg; + + if (curperagg) + return aggstate->pertrans[curperagg->transno].aggshared; + + /* check curpertrans (valid when in a transition function) */ + curpertrans = aggstate->curpertrans; + + if (curpertrans) + return curpertrans->aggshared; + } + return true; +} + /* * AggRegisterCallback - register a cleanup callback for an aggregate * diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c index bed9bb8713..94a17c7c67 100644 --- a/src/backend/executor/nodeAppend.c +++ b/src/backend/executor/nodeAppend.c @@ -3,7 +3,7 @@ * nodeAppend.c * routines to handle append nodes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -58,54 +58,33 @@ #include "postgres.h" #include "executor/execdebug.h" +#include "executor/execPartition.h" #include "executor/nodeAppend.h" #include "miscadmin.h" -static TupleTableSlot *ExecAppend(PlanState *pstate); -static bool exec_append_initialize_next(AppendState *appendstate); - - -/* ---------------------------------------------------------------- - * exec_append_initialize_next - * - * Sets up the append state node for the "next" scan. - * - * Returns t iff there is a "next" scan to process. - * ---------------------------------------------------------------- - */ -static bool -exec_append_initialize_next(AppendState *appendstate) +/* Shared state for parallel-aware Append. */ +struct ParallelAppendState { - int whichplan; + LWLock pa_lock; /* mutual exclusion to choose next subplan */ + int pa_next_plan; /* next plan to choose by any worker */ /* - * get information from the append node + * pa_finished[i] should be true if no more workers should select subplan + * i. for a non-partial plan, this should be set to true as soon as a + * worker selects the plan; for a partial plan, it remains false until + * some worker executes the plan to completion. */ - whichplan = appendstate->as_whichplan; + bool pa_finished[FLEXIBLE_ARRAY_MEMBER]; +}; - if (whichplan < 0) - { - /* - * if scanning in reverse, we start at the last scan in the list and - * then proceed back to the first.. in any case we inform ExecAppend - * that we are at the end of the line by returning FALSE - */ - appendstate->as_whichplan = 0; - return FALSE; - } - else if (whichplan >= appendstate->as_nplans) - { - /* - * as above, end the scan if we go beyond the last scan in our list.. - */ - appendstate->as_whichplan = appendstate->as_nplans - 1; - return FALSE; - } - else - { - return TRUE; - } -} +#define INVALID_SUBPLAN_INDEX -1 +#define NO_MATCHING_SUBPLANS -2 + +static TupleTableSlot *ExecAppend(PlanState *pstate); +static bool choose_next_subplan_locally(AppendState *node); +static bool choose_next_subplan_for_leader(AppendState *node); +static bool choose_next_subplan_for_worker(AppendState *node); +static void mark_invalid_subplans_as_finished(AppendState *node); /* ---------------------------------------------------------------- * ExecInitAppend @@ -123,72 +102,143 @@ ExecInitAppend(Append *node, EState *estate, int eflags) { AppendState *appendstate = makeNode(AppendState); PlanState **appendplanstates; + Bitmapset *validsubplans; int nplans; - int i; + int firstvalid; + int i, + j; ListCell *lc; /* check for unsupported flags */ Assert(!(eflags & EXEC_FLAG_MARK)); - /* - * Lock the non-leaf tables in the partition tree controlled by this node. - * It's a no-op for non-partitioned parent tables. - */ - ExecLockNonLeafAppendTables(node->partitioned_rels, estate); - - /* - * Set up empty vector of subplan states - */ - nplans = list_length(node->appendplans); - - appendplanstates = (PlanState **) palloc0(nplans * sizeof(PlanState *)); - /* * create new AppendState for our append node */ appendstate->ps.plan = (Plan *) node; appendstate->ps.state = estate; appendstate->ps.ExecProcNode = ExecAppend; - appendstate->appendplans = appendplanstates; - appendstate->as_nplans = nplans; - /* - * Miscellaneous initialization - * - * Append plans don't have expression contexts because they never call - * ExecQual or ExecProject. - */ + /* Let choose_next_subplan_* function handle setting the first subplan */ + appendstate->as_whichplan = INVALID_SUBPLAN_INDEX; + + /* If run-time partition pruning is enabled, then set that up now */ + if (node->part_prune_info != NULL) + { + PartitionPruneState *prunestate; + + /* We may need an expression context to evaluate partition exprs */ + ExecAssignExprContext(estate, &appendstate->ps); + + /* Create the working data structure for pruning. */ + prunestate = ExecCreatePartitionPruneState(&appendstate->ps, + node->part_prune_info); + appendstate->as_prune_state = prunestate; + + /* Perform an initial partition prune, if required. */ + if (prunestate->do_initial_prune) + { + /* Determine which subplans survive initial pruning */ + validsubplans = ExecFindInitialMatchingSubPlans(prunestate, + list_length(node->appendplans)); + + /* + * The case where no subplans survive pruning must be handled + * specially. The problem here is that code in explain.c requires + * an Append to have at least one subplan in order for it to + * properly determine the Vars in that subplan's targetlist. We + * sidestep this issue by just initializing the first subplan and + * setting as_whichplan to NO_MATCHING_SUBPLANS to indicate that + * we don't really need to scan any subnodes. + */ + if (bms_is_empty(validsubplans)) + { + appendstate->as_whichplan = NO_MATCHING_SUBPLANS; + + /* Mark the first as valid so that it's initialized below */ + validsubplans = bms_make_singleton(0); + } + + nplans = bms_num_members(validsubplans); + } + else + { + /* We'll need to initialize all subplans */ + nplans = list_length(node->appendplans); + Assert(nplans > 0); + validsubplans = bms_add_range(NULL, 0, nplans - 1); + } + + /* + * If no runtime pruning is required, we can fill as_valid_subplans + * immediately, preventing later calls to ExecFindMatchingSubPlans. + */ + if (!prunestate->do_exec_prune) + { + Assert(nplans > 0); + appendstate->as_valid_subplans = bms_add_range(NULL, 0, nplans - 1); + } + } + else + { + nplans = list_length(node->appendplans); + + /* + * When run-time partition pruning is not enabled we can just mark all + * subplans as valid; they must also all be initialized. + */ + Assert(nplans > 0); + appendstate->as_valid_subplans = validsubplans = + bms_add_range(NULL, 0, nplans - 1); + appendstate->as_prune_state = NULL; + } /* - * append nodes still have Result slots, which hold pointers to tuples, so - * we have to initialize them. + * Initialize result tuple type and slot. */ - ExecInitResultTupleSlot(estate, &appendstate->ps); + ExecInitResultTupleSlotTL(&appendstate->ps); + + appendplanstates = (PlanState **) palloc(nplans * + sizeof(PlanState *)); /* - * call ExecInitNode on each of the plans to be executed and save the - * results into the array "appendplans". + * call ExecInitNode on each of the valid plans to be executed and save + * the results into the appendplanstates array. + * + * While at it, find out the first valid partial plan. */ - i = 0; + j = i = 0; + firstvalid = nplans; foreach(lc, node->appendplans) { - Plan *initNode = (Plan *) lfirst(lc); + if (bms_is_member(i, validsubplans)) + { + Plan *initNode = (Plan *) lfirst(lc); - appendplanstates[i] = ExecInitNode(initNode, estate, eflags); + /* + * Record the lowest appendplans index which is a valid partial + * plan. + */ + if (i >= node->first_partial_plan && j < firstvalid) + firstvalid = j; + + appendplanstates[j++] = ExecInitNode(initNode, estate, eflags); + } i++; } + appendstate->as_first_partial_plan = firstvalid; + appendstate->appendplans = appendplanstates; + appendstate->as_nplans = nplans; + /* - * initialize output tuple type + * Miscellaneous initialization */ - ExecAssignResultTypeFromTL(&appendstate->ps); + appendstate->ps.ps_ProjInfo = NULL; - /* - * initialize to scan first subplan - */ - appendstate->as_whichplan = 0; - exec_append_initialize_next(appendstate); + /* For parallel query, this will be overridden later. */ + appendstate->choose_next_subplan = choose_next_subplan_locally; return appendstate; } @@ -204,6 +254,21 @@ ExecAppend(PlanState *pstate) { AppendState *node = castNode(AppendState, pstate); + if (node->as_whichplan < 0) + { + /* + * If no subplan has been chosen, we must choose one before + * proceeding. + */ + if (node->as_whichplan == INVALID_SUBPLAN_INDEX && + !node->choose_next_subplan(node)) + return ExecClearTuple(node->ps.ps_ResultTupleSlot); + + /* Nothing to do if there are no matching subplans */ + else if (node->as_whichplan == NO_MATCHING_SUBPLANS) + return ExecClearTuple(node->ps.ps_ResultTupleSlot); + } + for (;;) { PlanState *subnode; @@ -214,6 +279,7 @@ ExecAppend(PlanState *pstate) /* * figure out which subplan we are currently processing */ + Assert(node->as_whichplan >= 0 && node->as_whichplan < node->as_nplans); subnode = node->appendplans[node->as_whichplan]; /* @@ -231,19 +297,9 @@ ExecAppend(PlanState *pstate) return result; } - /* - * Go on to the "next" subplan in the appropriate direction. If no - * more subplans, return the empty slot set up for us by - * ExecInitAppend. - */ - if (ScanDirectionIsForward(node->ps.state->es_direction)) - node->as_whichplan++; - else - node->as_whichplan--; - if (!exec_append_initialize_next(node)) + /* choose new subplan; if none, we're done */ + if (!node->choose_next_subplan(node)) return ExecClearTuple(node->ps.ps_ResultTupleSlot); - - /* Else loop back and try to get a tuple from the new subplan */ } } @@ -280,6 +336,19 @@ ExecReScanAppend(AppendState *node) { int i; + /* + * If any PARAM_EXEC Params used in pruning expressions have changed, then + * we'd better unset the valid subplans so that they are reselected for + * the new parameter values. + */ + if (node->as_prune_state && + bms_overlap(node->ps.chgParam, + node->as_prune_state->execparamids)) + { + bms_free(node->as_valid_subplans); + node->as_valid_subplans = NULL; + } + for (i = 0; i < node->as_nplans; i++) { PlanState *subnode = node->appendplans[i]; @@ -298,6 +367,366 @@ ExecReScanAppend(AppendState *node) if (subnode->chgParam == NULL) ExecReScan(subnode); } - node->as_whichplan = 0; - exec_append_initialize_next(node); + + /* Let choose_next_subplan_* function handle setting the first subplan */ + node->as_whichplan = INVALID_SUBPLAN_INDEX; +} + +/* ---------------------------------------------------------------- + * Parallel Append Support + * ---------------------------------------------------------------- + */ + +/* ---------------------------------------------------------------- + * ExecAppendEstimate + * + * Compute the amount of space we'll need in the parallel + * query DSM, and inform pcxt->estimator about our needs. + * ---------------------------------------------------------------- + */ +void +ExecAppendEstimate(AppendState *node, + ParallelContext *pcxt) +{ + node->pstate_len = + add_size(offsetof(ParallelAppendState, pa_finished), + sizeof(bool) * node->as_nplans); + + shm_toc_estimate_chunk(&pcxt->estimator, node->pstate_len); + shm_toc_estimate_keys(&pcxt->estimator, 1); +} + + +/* ---------------------------------------------------------------- + * ExecAppendInitializeDSM + * + * Set up shared state for Parallel Append. + * ---------------------------------------------------------------- + */ +void +ExecAppendInitializeDSM(AppendState *node, + ParallelContext *pcxt) +{ + ParallelAppendState *pstate; + + pstate = shm_toc_allocate(pcxt->toc, node->pstate_len); + memset(pstate, 0, node->pstate_len); + LWLockInitialize(&pstate->pa_lock, LWTRANCHE_PARALLEL_APPEND); + shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id, pstate); + + node->as_pstate = pstate; + node->choose_next_subplan = choose_next_subplan_for_leader; +} + +/* ---------------------------------------------------------------- + * ExecAppendReInitializeDSM + * + * Reset shared state before beginning a fresh scan. + * ---------------------------------------------------------------- + */ +void +ExecAppendReInitializeDSM(AppendState *node, ParallelContext *pcxt) +{ + ParallelAppendState *pstate = node->as_pstate; + + pstate->pa_next_plan = 0; + memset(pstate->pa_finished, 0, sizeof(bool) * node->as_nplans); +} + +/* ---------------------------------------------------------------- + * ExecAppendInitializeWorker + * + * Copy relevant information from TOC into planstate, and initialize + * whatever is required to choose and execute the optimal subplan. + * ---------------------------------------------------------------- + */ +void +ExecAppendInitializeWorker(AppendState *node, ParallelWorkerContext *pwcxt) +{ + node->as_pstate = shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false); + node->choose_next_subplan = choose_next_subplan_for_worker; +} + +/* ---------------------------------------------------------------- + * choose_next_subplan_locally + * + * Choose next subplan for a non-parallel-aware Append, + * returning false if there are no more. + * ---------------------------------------------------------------- + */ +static bool +choose_next_subplan_locally(AppendState *node) +{ + int whichplan = node->as_whichplan; + int nextplan; + + /* We should never be called when there are no subplans */ + Assert(whichplan != NO_MATCHING_SUBPLANS); + + /* + * If first call then have the bms member function choose the first valid + * subplan by initializing whichplan to -1. If there happen to be no + * valid subplans then the bms member function will handle that by + * returning a negative number which will allow us to exit returning a + * false value. + */ + if (whichplan == INVALID_SUBPLAN_INDEX) + { + if (node->as_valid_subplans == NULL) + node->as_valid_subplans = + ExecFindMatchingSubPlans(node->as_prune_state); + + whichplan = -1; + } + + /* Ensure whichplan is within the expected range */ + Assert(whichplan >= -1 && whichplan <= node->as_nplans); + + if (ScanDirectionIsForward(node->ps.state->es_direction)) + nextplan = bms_next_member(node->as_valid_subplans, whichplan); + else + nextplan = bms_prev_member(node->as_valid_subplans, whichplan); + + if (nextplan < 0) + return false; + + node->as_whichplan = nextplan; + + return true; +} + +/* ---------------------------------------------------------------- + * choose_next_subplan_for_leader + * + * Try to pick a plan which doesn't commit us to doing much + * work locally, so that as much work as possible is done in + * the workers. Cheapest subplans are at the end. + * ---------------------------------------------------------------- + */ +static bool +choose_next_subplan_for_leader(AppendState *node) +{ + ParallelAppendState *pstate = node->as_pstate; + + /* Backward scan is not supported by parallel-aware plans */ + Assert(ScanDirectionIsForward(node->ps.state->es_direction)); + + /* We should never be called when there are no subplans */ + Assert(node->as_whichplan != NO_MATCHING_SUBPLANS); + + LWLockAcquire(&pstate->pa_lock, LW_EXCLUSIVE); + + if (node->as_whichplan != INVALID_SUBPLAN_INDEX) + { + /* Mark just-completed subplan as finished. */ + node->as_pstate->pa_finished[node->as_whichplan] = true; + } + else + { + /* Start with last subplan. */ + node->as_whichplan = node->as_nplans - 1; + + /* + * If we've yet to determine the valid subplans then do so now. If + * run-time pruning is disabled then the valid subplans will always be + * set to all subplans. + */ + if (node->as_valid_subplans == NULL) + { + node->as_valid_subplans = + ExecFindMatchingSubPlans(node->as_prune_state); + + /* + * Mark each invalid plan as finished to allow the loop below to + * select the first valid subplan. + */ + mark_invalid_subplans_as_finished(node); + } + } + + /* Loop until we find a subplan to execute. */ + while (pstate->pa_finished[node->as_whichplan]) + { + if (node->as_whichplan == 0) + { + pstate->pa_next_plan = INVALID_SUBPLAN_INDEX; + node->as_whichplan = INVALID_SUBPLAN_INDEX; + LWLockRelease(&pstate->pa_lock); + return false; + } + + /* + * We needn't pay attention to as_valid_subplans here as all invalid + * plans have been marked as finished. + */ + node->as_whichplan--; + } + + /* If non-partial, immediately mark as finished. */ + if (node->as_whichplan < node->as_first_partial_plan) + node->as_pstate->pa_finished[node->as_whichplan] = true; + + LWLockRelease(&pstate->pa_lock); + + return true; +} + +/* ---------------------------------------------------------------- + * choose_next_subplan_for_worker + * + * Choose next subplan for a parallel-aware Append, returning + * false if there are no more. + * + * We start from the first plan and advance through the list; + * when we get back to the end, we loop back to the first + * partial plan. This assigns the non-partial plans first in + * order of descending cost and then spreads out the workers + * as evenly as possible across the remaining partial plans. + * ---------------------------------------------------------------- + */ +static bool +choose_next_subplan_for_worker(AppendState *node) +{ + ParallelAppendState *pstate = node->as_pstate; + + /* Backward scan is not supported by parallel-aware plans */ + Assert(ScanDirectionIsForward(node->ps.state->es_direction)); + + /* We should never be called when there are no subplans */ + Assert(node->as_whichplan != NO_MATCHING_SUBPLANS); + + LWLockAcquire(&pstate->pa_lock, LW_EXCLUSIVE); + + /* Mark just-completed subplan as finished. */ + if (node->as_whichplan != INVALID_SUBPLAN_INDEX) + node->as_pstate->pa_finished[node->as_whichplan] = true; + + /* + * If we've yet to determine the valid subplans then do so now. If + * run-time pruning is disabled then the valid subplans will always be set + * to all subplans. + */ + else if (node->as_valid_subplans == NULL) + { + node->as_valid_subplans = + ExecFindMatchingSubPlans(node->as_prune_state); + mark_invalid_subplans_as_finished(node); + } + + /* If all the plans are already done, we have nothing to do */ + if (pstate->pa_next_plan == INVALID_SUBPLAN_INDEX) + { + LWLockRelease(&pstate->pa_lock); + return false; + } + + /* Save the plan from which we are starting the search. */ + node->as_whichplan = pstate->pa_next_plan; + + /* Loop until we find a valid subplan to execute. */ + while (pstate->pa_finished[pstate->pa_next_plan]) + { + int nextplan; + + nextplan = bms_next_member(node->as_valid_subplans, + pstate->pa_next_plan); + if (nextplan >= 0) + { + /* Advance to the next valid plan. */ + pstate->pa_next_plan = nextplan; + } + else if (node->as_whichplan > node->as_first_partial_plan) + { + /* + * Try looping back to the first valid partial plan, if there is + * one. If there isn't, arrange to bail out below. + */ + nextplan = bms_next_member(node->as_valid_subplans, + node->as_first_partial_plan - 1); + pstate->pa_next_plan = + nextplan < 0 ? node->as_whichplan : nextplan; + } + else + { + /* + * At last plan, and either there are no partial plans or we've + * tried them all. Arrange to bail out. + */ + pstate->pa_next_plan = node->as_whichplan; + } + + if (pstate->pa_next_plan == node->as_whichplan) + { + /* We've tried everything! */ + pstate->pa_next_plan = INVALID_SUBPLAN_INDEX; + LWLockRelease(&pstate->pa_lock); + return false; + } + } + + /* Pick the plan we found, and advance pa_next_plan one more time. */ + node->as_whichplan = pstate->pa_next_plan; + pstate->pa_next_plan = bms_next_member(node->as_valid_subplans, + pstate->pa_next_plan); + + /* + * If there are no more valid plans then try setting the next plan to the + * first valid partial plan. + */ + if (pstate->pa_next_plan < 0) + { + int nextplan = bms_next_member(node->as_valid_subplans, + node->as_first_partial_plan - 1); + + if (nextplan >= 0) + pstate->pa_next_plan = nextplan; + else + { + /* + * There are no valid partial plans, and we already chose the last + * non-partial plan; so flag that there's nothing more for our + * fellow workers to do. + */ + pstate->pa_next_plan = INVALID_SUBPLAN_INDEX; + } + } + + /* If non-partial, immediately mark as finished. */ + if (node->as_whichplan < node->as_first_partial_plan) + node->as_pstate->pa_finished[node->as_whichplan] = true; + + LWLockRelease(&pstate->pa_lock); + + return true; +} + +/* + * mark_invalid_subplans_as_finished + * Marks the ParallelAppendState's pa_finished as true for each invalid + * subplan. + * + * This function should only be called for parallel Append with run-time + * pruning enabled. + */ +static void +mark_invalid_subplans_as_finished(AppendState *node) +{ + int i; + + /* Only valid to call this while in parallel Append mode */ + Assert(node->as_pstate); + + /* Shouldn't have been called when run-time pruning is not enabled */ + Assert(node->as_prune_state); + + /* Nothing to do if all plans are valid */ + if (bms_num_members(node->as_valid_subplans) == node->as_nplans) + return; + + /* Mark all non-valid plans as finished */ + for (i = 0; i < node->as_nplans; i++) + { + if (!bms_is_member(i, node->as_valid_subplans)) + node->as_pstate->pa_finished[i] = true; + } } diff --git a/src/backend/executor/nodeBitmapAnd.c b/src/backend/executor/nodeBitmapAnd.c index 1c5c312c95..23d0d94326 100644 --- a/src/backend/executor/nodeBitmapAnd.c +++ b/src/backend/executor/nodeBitmapAnd.c @@ -3,7 +3,7 @@ * nodeBitmapAnd.c * routines to handle BitmapAnd nodes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -80,13 +80,6 @@ ExecInitBitmapAnd(BitmapAnd *node, EState *estate, int eflags) bitmapandstate->bitmapplans = bitmapplanstates; bitmapandstate->nplans = nplans; - /* - * Miscellaneous initialization - * - * BitmapAnd plans don't have expression contexts because they never call - * ExecQual or ExecProject. They don't need any tuple slots either. - */ - /* * call ExecInitNode on each of the plans to be executed and save the * results into the array "bitmapplanstates". @@ -99,6 +92,13 @@ ExecInitBitmapAnd(BitmapAnd *node, EState *estate, int eflags) i++; } + /* + * Miscellaneous initialization + * + * BitmapAnd plans don't have expression contexts because they never call + * ExecQual or ExecProject. They don't need any tuple slots either. + */ + return bitmapandstate; } diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index 79f534e4e9..c153d74f41 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -16,7 +16,7 @@ * required index qual conditions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -39,6 +39,7 @@ #include "access/relscan.h" #include "access/transam.h" +#include "access/visibilitymap.h" #include "executor/execdebug.h" #include "executor/nodeBitmapHeapscan.h" #include "miscadmin.h" @@ -225,9 +226,31 @@ BitmapHeapNext(BitmapHeapScanState *node) } /* - * Fetch the current heap page and identify candidate tuples. + * We can skip fetching the heap page if we don't need any fields + * from the heap, and the bitmap entries don't need rechecking, + * and all tuples on the page are visible to our transaction. */ - bitgetpage(scan, tbmres); + node->skip_fetch = (node->can_skip_fetch && + !tbmres->recheck && + VM_ALL_VISIBLE(node->ss.ss_currentRelation, + tbmres->blockno, + &node->vmbuffer)); + + if (node->skip_fetch) + { + /* + * The number of tuples on this page is put into + * scan->rs_ntuples; note we don't fill scan->rs_vistuples. + */ + scan->rs_ntuples = tbmres->ntuples; + } + else + { + /* + * Fetch the current heap page and identify candidate tuples. + */ + bitgetpage(scan, tbmres); + } if (tbmres->ntuples >= 0) node->exact_pages++; @@ -289,45 +312,52 @@ BitmapHeapNext(BitmapHeapScanState *node) */ BitmapPrefetch(node, scan); - /* - * Okay to fetch the tuple - */ - targoffset = scan->rs_vistuples[scan->rs_cindex]; - dp = (Page) BufferGetPage(scan->rs_cbuf); - lp = PageGetItemId(dp, targoffset); - Assert(ItemIdIsNormal(lp)); - - scan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp); - scan->rs_ctup.t_len = ItemIdGetLength(lp); - scan->rs_ctup.t_tableOid = scan->rs_rd->rd_id; - ItemPointerSet(&scan->rs_ctup.t_self, tbmres->blockno, targoffset); + if (node->skip_fetch) + { + /* + * If we don't have to fetch the tuple, just return nulls. + */ + ExecStoreAllNullTuple(slot); + } + else + { + /* + * Okay to fetch the tuple. + */ + targoffset = scan->rs_vistuples[scan->rs_cindex]; + dp = (Page) BufferGetPage(scan->rs_cbuf); + lp = PageGetItemId(dp, targoffset); + Assert(ItemIdIsNormal(lp)); - pgstat_count_heap_fetch(scan->rs_rd); + scan->rs_ctup.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp); + scan->rs_ctup.t_len = ItemIdGetLength(lp); + scan->rs_ctup.t_tableOid = scan->rs_rd->rd_id; + ItemPointerSet(&scan->rs_ctup.t_self, tbmres->blockno, targoffset); - /* - * Set up the result slot to point to this tuple. Note that the slot - * acquires a pin on the buffer. - */ - ExecStoreTuple(&scan->rs_ctup, - slot, - scan->rs_cbuf, - false); + pgstat_count_heap_fetch(scan->rs_rd); - /* - * If we are using lossy info, we have to recheck the qual conditions - * at every tuple. - */ - if (tbmres->recheck) - { - econtext->ecxt_scantuple = slot; - ResetExprContext(econtext); + /* + * Set up the result slot to point to this tuple. Note that the + * slot acquires a pin on the buffer. + */ + ExecStoreBufferHeapTuple(&scan->rs_ctup, + slot, + scan->rs_cbuf); - if (!ExecQual(node->bitmapqualorig, econtext)) + /* + * If we are using lossy info, we have to recheck the qual + * conditions at every tuple. + */ + if (tbmres->recheck) { - /* Fails recheck, so drop it and loop back for another */ - InstrCountFiltered2(node, 1); - ExecClearTuple(slot); - continue; + econtext->ecxt_scantuple = slot; + if (!ExecQualAndReset(node->bitmapqualorig, econtext)) + { + /* Fails recheck, so drop it and loop back for another */ + InstrCountFiltered2(node, 1); + ExecClearTuple(slot); + continue; + } } } @@ -582,6 +612,7 @@ BitmapPrefetch(BitmapHeapScanState *node, HeapScanDesc scan) while (node->prefetch_pages < node->prefetch_target) { TBMIterateResult *tbmpre = tbm_iterate(prefetch_iterator); + bool skip_fetch; if (tbmpre == NULL) { @@ -591,7 +622,26 @@ BitmapPrefetch(BitmapHeapScanState *node, HeapScanDesc scan) break; } node->prefetch_pages++; - PrefetchBuffer(scan->rs_rd, MAIN_FORKNUM, tbmpre->blockno); + + /* + * If we expect not to have to actually read this heap page, + * skip this prefetch call, but continue to run the prefetch + * logic normally. (Would it be better not to increment + * prefetch_pages?) + * + * This depends on the assumption that the index AM will + * report the same recheck flag for this future heap page as + * it did for the current heap page; which is not a certainty + * but is true in many cases. + */ + skip_fetch = (node->can_skip_fetch && + (node->tbmres ? !node->tbmres->recheck : false) && + VM_ALL_VISIBLE(node->ss.ss_currentRelation, + tbmpre->blockno, + &node->pvmbuffer)); + + if (!skip_fetch) + PrefetchBuffer(scan->rs_rd, MAIN_FORKNUM, tbmpre->blockno); } } @@ -608,6 +658,7 @@ BitmapPrefetch(BitmapHeapScanState *node, HeapScanDesc scan) { TBMIterateResult *tbmpre; bool do_prefetch = false; + bool skip_fetch; /* * Recheck under the mutex. If some other process has already @@ -633,7 +684,15 @@ BitmapPrefetch(BitmapHeapScanState *node, HeapScanDesc scan) break; } - PrefetchBuffer(scan->rs_rd, MAIN_FORKNUM, tbmpre->blockno); + /* As above, skip prefetch if we expect not to need page */ + skip_fetch = (node->can_skip_fetch && + (node->tbmres ? !node->tbmres->recheck : false) && + VM_ALL_VISIBLE(node->ss.ss_currentRelation, + tbmpre->blockno, + &node->pvmbuffer)); + + if (!skip_fetch) + PrefetchBuffer(scan->rs_rd, MAIN_FORKNUM, tbmpre->blockno); } } } @@ -655,10 +714,7 @@ BitmapHeapRecheck(BitmapHeapScanState *node, TupleTableSlot *slot) /* Does the tuple meet the original qual conditions? */ econtext->ecxt_scantuple = slot; - - ResetExprContext(econtext); - - return ExecQual(node->bitmapqualorig, econtext); + return ExecQualAndReset(node->bitmapqualorig, econtext); } /* ---------------------------------------------------------------- @@ -687,6 +743,7 @@ ExecReScanBitmapHeapScan(BitmapHeapScanState *node) /* rescan to release any page pin */ heap_rescan(node->ss.ss_currentScanDesc, NULL); + /* release bitmaps and buffers if any */ if (node->tbmiterator) tbm_end_iterate(node->tbmiterator); if (node->prefetch_iterator) @@ -697,6 +754,10 @@ ExecReScanBitmapHeapScan(BitmapHeapScanState *node) tbm_end_shared_iterate(node->shared_prefetch_iterator); if (node->tbm) tbm_free(node->tbm); + if (node->vmbuffer != InvalidBuffer) + ReleaseBuffer(node->vmbuffer); + if (node->pvmbuffer != InvalidBuffer) + ReleaseBuffer(node->pvmbuffer); node->tbm = NULL; node->tbmiterator = NULL; node->tbmres = NULL; @@ -704,23 +765,8 @@ ExecReScanBitmapHeapScan(BitmapHeapScanState *node) node->initialized = false; node->shared_tbmiterator = NULL; node->shared_prefetch_iterator = NULL; - - /* Reset parallel bitmap state, if present */ - if (node->pstate) - { - dsa_area *dsa = node->ss.ps.state->es_query_dsa; - - node->pstate->state = BM_INITIAL; - - if (DsaPointerIsValid(node->pstate->tbmiterator)) - tbm_free_shared_area(dsa, node->pstate->tbmiterator); - - if (DsaPointerIsValid(node->pstate->prefetch_iterator)) - tbm_free_shared_area(dsa, node->pstate->prefetch_iterator); - - node->pstate->tbmiterator = InvalidDsaPointer; - node->pstate->prefetch_iterator = InvalidDsaPointer; - } + node->vmbuffer = InvalidBuffer; + node->pvmbuffer = InvalidBuffer; ExecScanReScan(&node->ss); @@ -739,13 +785,11 @@ ExecReScanBitmapHeapScan(BitmapHeapScanState *node) void ExecEndBitmapHeapScan(BitmapHeapScanState *node) { - Relation relation; HeapScanDesc scanDesc; /* * extract information from the node */ - relation = node->ss.ss_currentRelation; scanDesc = node->ss.ss_currentScanDesc; /* @@ -756,7 +800,8 @@ ExecEndBitmapHeapScan(BitmapHeapScanState *node) /* * clear out tuple table slots */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); /* @@ -765,7 +810,7 @@ ExecEndBitmapHeapScan(BitmapHeapScanState *node) ExecEndNode(outerPlanState(node)); /* - * release bitmap if any + * release bitmaps and buffers if any */ if (node->tbmiterator) tbm_end_iterate(node->tbmiterator); @@ -777,16 +822,15 @@ ExecEndBitmapHeapScan(BitmapHeapScanState *node) tbm_end_shared_iterate(node->shared_tbmiterator); if (node->shared_prefetch_iterator) tbm_end_shared_iterate(node->shared_prefetch_iterator); + if (node->vmbuffer != InvalidBuffer) + ReleaseBuffer(node->vmbuffer); + if (node->pvmbuffer != InvalidBuffer) + ReleaseBuffer(node->pvmbuffer); /* * close heap scan */ heap_endscan(scanDesc); - - /* - * close the heap relation. - */ - ExecCloseScanRelation(relation); } /* ---------------------------------------------------------------- @@ -822,6 +866,9 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags) scanstate->tbm = NULL; scanstate->tbmiterator = NULL; scanstate->tbmres = NULL; + scanstate->skip_fetch = false; + scanstate->vmbuffer = InvalidBuffer; + scanstate->pvmbuffer = InvalidBuffer; scanstate->exact_pages = 0; scanstate->lossy_pages = 0; scanstate->prefetch_iterator = NULL; @@ -832,8 +879,19 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags) scanstate->pscan_len = 0; scanstate->initialized = false; scanstate->shared_tbmiterator = NULL; + scanstate->shared_prefetch_iterator = NULL; scanstate->pstate = NULL; + /* + * We can potentially skip fetching heap pages if we do not need any + * columns of the table, either for checking non-indexable quals or for + * returning data. This test is a bit simplistic, as it checks the + * stronger condition that there's no qual or return tlist at all. But in + * most cases it's probably not worth working harder than that. + */ + scanstate->can_skip_fetch = (node->scan.plan.qual == NIL && + node->scan.plan.targetlist == NIL); + /* * Miscellaneous initialization * @@ -842,23 +900,35 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags) ExecAssignExprContext(estate, &scanstate->ss.ps); /* - * initialize child expressions + * open the scan relation */ - scanstate->ss.ps.qual = - ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); - scanstate->bitmapqualorig = - ExecInitQual(node->bitmapqualorig, (PlanState *) scanstate); + currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); /* - * tuple table initialization + * initialize child nodes */ - ExecInitResultTupleSlot(estate, &scanstate->ss.ps); - ExecInitScanTupleSlot(estate, &scanstate->ss); + outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags); /* - * open the base relation and acquire appropriate lock on it. + * get the scan type from the relation descriptor. */ - currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + ExecInitScanTupleSlot(estate, &scanstate->ss, + RelationGetDescr(currentRelation)); + + + /* + * Initialize result type and projection. + */ + ExecInitResultTypeTL(&scanstate->ss.ps); + ExecAssignScanProjectionInfo(&scanstate->ss); + + /* + * initialize child expressions + */ + scanstate->ss.ps.qual = + ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); + scanstate->bitmapqualorig = + ExecInitQual(node->bitmapqualorig, (PlanState *) scanstate); /* * Determine the maximum for prefetch_target. If the tablespace has a @@ -887,26 +957,6 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags) 0, NULL); - /* - * get the scan type from the relation descriptor. - */ - ExecAssignScanType(&scanstate->ss, RelationGetDescr(currentRelation)); - - /* - * Initialize result tuple type and projection info. - */ - ExecAssignResultTypeFromTL(&scanstate->ss.ps); - ExecAssignScanProjectionInfo(&scanstate->ss); - - /* - * initialize child nodes - * - * We do this last because the child nodes will open indexscans on our - * relation's indexes, and we want to be sure we have acquired a lock on - * the relation first. - */ - outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags); - /* * all done. */ @@ -951,7 +1001,8 @@ BitmapShouldInitializeSharedState(ParallelBitmapHeapState *pstate) /* ---------------------------------------------------------------- * ExecBitmapHeapEstimate * - * estimates the space required to serialize bitmap scan node. + * Compute the amount of space we'll need in the parallel + * query DSM, and inform pcxt->estimator about our needs. * ---------------------------------------------------------------- */ void @@ -980,6 +1031,11 @@ ExecBitmapHeapInitializeDSM(BitmapHeapScanState *node, { ParallelBitmapHeapState *pstate; EState *estate = node->ss.ps.state; + dsa_area *dsa = node->ss.ps.state->es_query_dsa; + + /* If there's no DSA, there are no workers; initialize nothing. */ + if (dsa == NULL) + return; pstate = shm_toc_allocate(pcxt->toc, node->pscan_len); @@ -999,6 +1055,35 @@ ExecBitmapHeapInitializeDSM(BitmapHeapScanState *node, node->pstate = pstate; } +/* ---------------------------------------------------------------- + * ExecBitmapHeapReInitializeDSM + * + * Reset shared state before beginning a fresh scan. + * ---------------------------------------------------------------- + */ +void +ExecBitmapHeapReInitializeDSM(BitmapHeapScanState *node, + ParallelContext *pcxt) +{ + ParallelBitmapHeapState *pstate = node->pstate; + dsa_area *dsa = node->ss.ps.state->es_query_dsa; + + /* If there's no DSA, there are no workers; do nothing. */ + if (dsa == NULL) + return; + + pstate->state = BM_INITIAL; + + if (DsaPointerIsValid(pstate->tbmiterator)) + tbm_free_shared_area(dsa, pstate->tbmiterator); + + if (DsaPointerIsValid(pstate->prefetch_iterator)) + tbm_free_shared_area(dsa, pstate->prefetch_iterator); + + pstate->tbmiterator = InvalidDsaPointer; + pstate->prefetch_iterator = InvalidDsaPointer; +} + /* ---------------------------------------------------------------- * ExecBitmapHeapInitializeWorker * @@ -1006,12 +1091,15 @@ ExecBitmapHeapInitializeDSM(BitmapHeapScanState *node, * ---------------------------------------------------------------- */ void -ExecBitmapHeapInitializeWorker(BitmapHeapScanState *node, shm_toc *toc) +ExecBitmapHeapInitializeWorker(BitmapHeapScanState *node, + ParallelWorkerContext *pwcxt) { ParallelBitmapHeapState *pstate; Snapshot snapshot; - pstate = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id, false); + Assert(node->ss.ps.state->es_query_dsa != NULL); + + pstate = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false); node->pstate = pstate; snapshot = RestoreSnapshot(pstate->phs_snapshot_data); diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c index 6feb70f4ae..d04f4901b4 100644 --- a/src/backend/executor/nodeBitmapIndexscan.c +++ b/src/backend/executor/nodeBitmapIndexscan.c @@ -3,7 +3,7 @@ * nodeBitmapIndexscan.c * Routines to support bitmapped index scans of relations * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -226,6 +226,15 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags) /* normally we don't make the result bitmap till runtime */ indexstate->biss_result = NULL; + /* + * We do not open or lock the base relation here. We assume that an + * ancestor BitmapHeapScan node is holding AccessShareLock (or better) on + * the heap relation throughout the execution of the plan tree. + */ + + indexstate->ss.ss_currentRelation = NULL; + indexstate->ss.ss_currentScanDesc = NULL; + /* * Miscellaneous initialization * @@ -242,15 +251,6 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags) * sub-parts corresponding to runtime keys (see below). */ - /* - * We do not open or lock the base relation here. We assume that an - * ancestor BitmapHeapScan node is holding AccessShareLock (or better) on - * the heap relation throughout the execution of the plan tree. - */ - - indexstate->ss.ss_currentRelation = NULL; - indexstate->ss.ss_currentScanDesc = NULL; - /* * If we are just doing EXPLAIN (ie, aren't going to run the plan), stop * here. This allows an index-advisor plugin to EXPLAIN a plan containing diff --git a/src/backend/executor/nodeBitmapOr.c b/src/backend/executor/nodeBitmapOr.c index 66a7a89a8b..3f0a0a0544 100644 --- a/src/backend/executor/nodeBitmapOr.c +++ b/src/backend/executor/nodeBitmapOr.c @@ -3,7 +3,7 @@ * nodeBitmapOr.c * routines to handle BitmapOr nodes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -81,13 +81,6 @@ ExecInitBitmapOr(BitmapOr *node, EState *estate, int eflags) bitmaporstate->bitmapplans = bitmapplanstates; bitmaporstate->nplans = nplans; - /* - * Miscellaneous initialization - * - * BitmapOr plans don't have expression contexts because they never call - * ExecQual or ExecProject. They don't need any tuple slots either. - */ - /* * call ExecInitNode on each of the plans to be executed and save the * results into the array "bitmapplanstates". @@ -100,6 +93,13 @@ ExecInitBitmapOr(BitmapOr *node, EState *estate, int eflags) i++; } + /* + * Miscellaneous initialization + * + * BitmapOr plans don't have expression contexts because they never call + * ExecQual or ExecProject. They don't need any tuple slots either. + */ + return bitmaporstate; } diff --git a/src/backend/executor/nodeCtescan.c b/src/backend/executor/nodeCtescan.c index 79676ca978..017b877277 100644 --- a/src/backend/executor/nodeCtescan.c +++ b/src/backend/executor/nodeCtescan.c @@ -3,7 +3,7 @@ * nodeCtescan.c * routines to handle CteScan nodes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -107,6 +107,13 @@ CteScanNext(CteScanState *node) return NULL; } + /* + * There are corner cases where the subplan could change which + * tuplestore read pointer is active, so be sure to reselect ours + * before storing the tuple we got. + */ + tuplestore_select_read_pointer(tuplestorestate, node->readptr); + /* * Append a copy of the returned tuple to tuplestore. NOTE: because * our read pointer is certainly in EOF state, its read position will @@ -178,6 +185,12 @@ ExecInitCteScan(CteScan *node, EState *estate, int eflags) * we might be asked to rescan the CTE even though upper levels didn't * tell us to be prepared to do it efficiently. Annoying, since this * prevents truncation of the tuplestore. XXX FIXME + * + * Note: if we are in an EPQ recheck plan tree, it's likely that no access + * to the tuplestore is needed at all, making this even more annoying. + * It's not worth improving that as long as all the read pointers would + * have REWIND anyway, but if we ever improve this logic then that aspect + * should be considered too. */ eflags |= EXEC_FLAG_REWIND; @@ -242,31 +255,25 @@ ExecInitCteScan(CteScan *node, EState *estate, int eflags) */ ExecAssignExprContext(estate, &scanstate->ss.ps); - /* - * initialize child expressions - */ - scanstate->ss.ps.qual = - ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); - - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &scanstate->ss.ps); - ExecInitScanTupleSlot(estate, &scanstate->ss); - /* * The scan tuple type (ie, the rowtype we expect to find in the work * table) is the same as the result rowtype of the CTE query. */ - ExecAssignScanType(&scanstate->ss, - ExecGetResultType(scanstate->cteplanstate)); + ExecInitScanTupleSlot(estate, &scanstate->ss, + ExecGetResultType(scanstate->cteplanstate)); /* - * Initialize result tuple type and projection info. + * Initialize result type and projection. */ - ExecAssignResultTypeFromTL(&scanstate->ss.ps); + ExecInitResultTypeTL(&scanstate->ss.ps); ExecAssignScanProjectionInfo(&scanstate->ss); + /* + * initialize child expressions + */ + scanstate->ss.ps.qual = + ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); + return scanstate; } @@ -287,7 +294,8 @@ ExecEndCteScan(CteScanState *node) /* * clean out the tuple table */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); /* @@ -311,7 +319,8 @@ ExecReScanCteScan(CteScanState *node) { Tuplestorestate *tuplestorestate = node->leader->cte_table; - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecScanReScan(&node->ss); diff --git a/src/backend/executor/nodeCustom.c b/src/backend/executor/nodeCustom.c index fb7645b1f4..ab3e34790e 100644 --- a/src/backend/executor/nodeCustom.c +++ b/src/backend/executor/nodeCustom.c @@ -3,7 +3,7 @@ * nodeCustom.c * Routines to handle execution of custom scan node * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------ @@ -54,16 +54,8 @@ ExecInitCustomScan(CustomScan *cscan, EState *estate, int eflags) /* create expression context for node */ ExecAssignExprContext(estate, &css->ss.ps); - /* initialize child expressions */ - css->ss.ps.qual = - ExecInitQual(cscan->scan.plan.qual, (PlanState *) css); - - /* tuple table initialization */ - ExecInitScanTupleSlot(estate, &css->ss); - ExecInitResultTupleSlot(estate, &css->ss.ps); - /* - * open the base relation, if any, and acquire an appropriate lock on it + * open the scan relation, if any */ if (scanrelid > 0) { @@ -81,23 +73,27 @@ ExecInitCustomScan(CustomScan *cscan, EState *estate, int eflags) TupleDesc scan_tupdesc; scan_tupdesc = ExecTypeFromTL(cscan->custom_scan_tlist, false); - ExecAssignScanType(&css->ss, scan_tupdesc); + ExecInitScanTupleSlot(estate, &css->ss, scan_tupdesc); /* Node's targetlist will contain Vars with varno = INDEX_VAR */ tlistvarno = INDEX_VAR; } else { - ExecAssignScanType(&css->ss, RelationGetDescr(scan_rel)); + ExecInitScanTupleSlot(estate, &css->ss, RelationGetDescr(scan_rel)); /* Node's targetlist will contain Vars with varno = scanrelid */ tlistvarno = scanrelid; } /* - * Initialize result tuple type and projection info. + * Initialize result slot, type and projection. */ - ExecAssignResultTypeFromTL(&css->ss.ps); + ExecInitResultTupleSlotTL(&css->ss.ps); ExecAssignScanProjectionInfoWithVarno(&css->ss, tlistvarno); + /* initialize child expressions */ + css->ss.ps.qual = + ExecInitQual(cscan->scan.plan.qual, (PlanState *) css); + /* * The callback of custom-scan provider applies the final initialization * of the custom-scan-state node according to its logic. @@ -130,10 +126,6 @@ ExecEndCustomScan(CustomScanState *node) /* Clean out the tuple table */ ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); - - /* Close the heap relation */ - if (node->ss.ss_currentRelation) - ExecCloseScanRelation(node->ss.ss_currentRelation); } void @@ -195,7 +187,23 @@ ExecCustomScanInitializeDSM(CustomScanState *node, ParallelContext *pcxt) } void -ExecCustomScanInitializeWorker(CustomScanState *node, shm_toc *toc) +ExecCustomScanReInitializeDSM(CustomScanState *node, ParallelContext *pcxt) +{ + const CustomExecMethods *methods = node->methods; + + if (methods->ReInitializeDSMCustomScan) + { + int plan_node_id = node->ss.ps.plan->plan_node_id; + void *coordinate; + + coordinate = shm_toc_lookup(pcxt->toc, plan_node_id, false); + methods->ReInitializeDSMCustomScan(node, pcxt, coordinate); + } +} + +void +ExecCustomScanInitializeWorker(CustomScanState *node, + ParallelWorkerContext *pwcxt) { const CustomExecMethods *methods = node->methods; @@ -204,8 +212,8 @@ ExecCustomScanInitializeWorker(CustomScanState *node, shm_toc *toc) int plan_node_id = node->ss.ps.plan->plan_node_id; void *coordinate; - coordinate = shm_toc_lookup(toc, plan_node_id, false); - methods->InitializeWorkerCustomScan(node, toc, coordinate); + coordinate = shm_toc_lookup(pwcxt->toc, plan_node_id, false); + methods->InitializeWorkerCustomScan(node, pwcxt->toc, coordinate); } } diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c index 140e82ef5e..5d2cd0ed71 100644 --- a/src/backend/executor/nodeForeignscan.c +++ b/src/backend/executor/nodeForeignscan.c @@ -3,7 +3,7 @@ * nodeForeignscan.c * Routines to support scans of foreign tables * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -156,22 +156,8 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags) ExecAssignExprContext(estate, &scanstate->ss.ps); /* - * initialize child expressions - */ - scanstate->ss.ps.qual = - ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); - scanstate->fdw_recheck_quals = - ExecInitQual(node->fdw_recheck_quals, (PlanState *) scanstate); - - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &scanstate->ss.ps); - ExecInitScanTupleSlot(estate, &scanstate->ss); - - /* - * open the base relation, if any, and acquire an appropriate lock on it; - * also acquire function pointers from the FDW's handler + * open the scan relation, if any; also acquire function pointers from the + * FDW's handler */ if (scanrelid > 0) { @@ -194,23 +180,35 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags) TupleDesc scan_tupdesc; scan_tupdesc = ExecTypeFromTL(node->fdw_scan_tlist, false); - ExecAssignScanType(&scanstate->ss, scan_tupdesc); + ExecInitScanTupleSlot(estate, &scanstate->ss, scan_tupdesc); /* Node's targetlist will contain Vars with varno = INDEX_VAR */ tlistvarno = INDEX_VAR; } else { - ExecAssignScanType(&scanstate->ss, RelationGetDescr(currentRelation)); + TupleDesc scan_tupdesc; + + /* don't trust FDWs to return tuples fulfilling NOT NULL constraints */ + scan_tupdesc = CreateTupleDescCopy(RelationGetDescr(currentRelation)); + ExecInitScanTupleSlot(estate, &scanstate->ss, scan_tupdesc); /* Node's targetlist will contain Vars with varno = scanrelid */ tlistvarno = scanrelid; } /* - * Initialize result tuple type and projection info. + * Initialize result slot, type and projection. */ - ExecAssignResultTypeFromTL(&scanstate->ss.ps); + ExecInitResultTypeTL(&scanstate->ss.ps); ExecAssignScanProjectionInfoWithVarno(&scanstate->ss, tlistvarno); + /* + * initialize child expressions + */ + scanstate->ss.ps.qual = + ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); + scanstate->fdw_recheck_quals = + ExecInitQual(node->fdw_recheck_quals, (PlanState *) scanstate); + /* * Initialize FDW-related state. */ @@ -258,12 +256,9 @@ ExecEndForeignScan(ForeignScanState *node) ExecFreeExprContext(&node->ss.ps); /* clean out the tuple table */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); - - /* close the relation. */ - if (node->ss.ss_currentRelation) - ExecCloseScanRelation(node->ss.ss_currentRelation); } /* ---------------------------------------------------------------- @@ -332,13 +327,35 @@ ExecForeignScanInitializeDSM(ForeignScanState *node, ParallelContext *pcxt) } /* ---------------------------------------------------------------- - * ExecForeignScanInitializeDSM + * ExecForeignScanReInitializeDSM + * + * Reset shared state before beginning a fresh scan. + * ---------------------------------------------------------------- + */ +void +ExecForeignScanReInitializeDSM(ForeignScanState *node, ParallelContext *pcxt) +{ + FdwRoutine *fdwroutine = node->fdwroutine; + + if (fdwroutine->ReInitializeDSMForeignScan) + { + int plan_node_id = node->ss.ps.plan->plan_node_id; + void *coordinate; + + coordinate = shm_toc_lookup(pcxt->toc, plan_node_id, false); + fdwroutine->ReInitializeDSMForeignScan(node, pcxt, coordinate); + } +} + +/* ---------------------------------------------------------------- + * ExecForeignScanInitializeWorker * * Initialization according to the parallel coordination information * ---------------------------------------------------------------- */ void -ExecForeignScanInitializeWorker(ForeignScanState *node, shm_toc *toc) +ExecForeignScanInitializeWorker(ForeignScanState *node, + ParallelWorkerContext *pwcxt) { FdwRoutine *fdwroutine = node->fdwroutine; @@ -347,8 +364,8 @@ ExecForeignScanInitializeWorker(ForeignScanState *node, shm_toc *toc) int plan_node_id = node->ss.ps.plan->plan_node_id; void *coordinate; - coordinate = shm_toc_lookup(toc, plan_node_id, false); - fdwroutine->InitializeWorkerForeignScan(node, toc, coordinate); + coordinate = shm_toc_lookup(pwcxt->toc, plan_node_id, false); + fdwroutine->InitializeWorkerForeignScan(node, pwcxt->toc, coordinate); } } diff --git a/src/backend/executor/nodeFunctionscan.c b/src/backend/executor/nodeFunctionscan.c index 9f87a7e5cd..0596adbb2f 100644 --- a/src/backend/executor/nodeFunctionscan.c +++ b/src/backend/executor/nodeFunctionscan.c @@ -3,7 +3,7 @@ * nodeFunctionscan.c * Support routines for scanning RangeFunctions (functions in rangetable). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -334,18 +334,6 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate, int eflags) */ ExecAssignExprContext(estate, &scanstate->ss.ps); - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &scanstate->ss.ps); - ExecInitScanTupleSlot(estate, &scanstate->ss); - - /* - * initialize child expressions - */ - scanstate->ss.ps.qual = - ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); - scanstate->funcstates = palloc(nfuncs * sizeof(FunctionScanPerFuncState)); natts = 0; @@ -383,7 +371,8 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate, int eflags) &funcrettype, &tupdesc); - if (functypclass == TYPEFUNC_COMPOSITE) + if (functypclass == TYPEFUNC_COMPOSITE || + functypclass == TYPEFUNC_COMPOSITE_DOMAIN) { /* Composite data type, e.g. a table's row type */ Assert(tupdesc); @@ -435,8 +424,7 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate, int eflags) */ if (!scanstate->simple) { - fs->func_slot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(fs->func_slot, fs->tupdesc); + fs->func_slot = ExecInitExtraTupleSlot(estate, fs->tupdesc); } else fs->func_slot = NULL; @@ -491,14 +479,23 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate, int eflags) Assert(attno == natts); } - ExecAssignScanType(&scanstate->ss, scan_tupdesc); + /* + * Initialize scan slot and type. + */ + ExecInitScanTupleSlot(estate, &scanstate->ss, scan_tupdesc); /* - * Initialize result tuple type and projection info. + * Initialize result slot, type and projection. */ - ExecAssignResultTypeFromTL(&scanstate->ss.ps); + ExecInitResultTypeTL(&scanstate->ss.ps); ExecAssignScanProjectionInfo(&scanstate->ss); + /* + * initialize child expressions + */ + scanstate->ss.ps.qual = + ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); + /* * Create a memory context that ExecMakeTableFunctionResult can use to * evaluate function arguments in. We can't use the per-tuple context for @@ -532,7 +529,8 @@ ExecEndFunctionScan(FunctionScanState *node) /* * clean out the tuple table */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); /* @@ -566,7 +564,8 @@ ExecReScanFunctionScan(FunctionScanState *node) int i; Bitmapset *chgparam = node->ss.ps.chgParam; - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); for (i = 0; i < node->nfuncs; i++) { FunctionScanPerFuncState *fs = &node->funcstates[i]; diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c index e8d94ee6f3..afddb0a039 100644 --- a/src/backend/executor/nodeGather.c +++ b/src/backend/executor/nodeGather.c @@ -3,7 +3,7 @@ * nodeGather.c * Support routines for scanning a plan via multiple workers. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * A Gather executor launches parallel workers to run multiple copies of a @@ -38,6 +38,7 @@ #include "executor/nodeSubplan.h" #include "executor/tqueue.h" #include "miscadmin.h" +#include "optimizer/planmain.h" #include "pgstat.h" #include "utils/memutils.h" #include "utils/rel.h" @@ -58,7 +59,6 @@ ExecInitGather(Gather *node, EState *estate, int eflags) { GatherState *gatherstate; Plan *outerNode; - bool hasoid; TupleDesc tupDesc; /* Gather node doesn't have innerPlan node. */ @@ -71,7 +71,11 @@ ExecInitGather(Gather *node, EState *estate, int eflags) gatherstate->ps.plan = (Plan *) node; gatherstate->ps.state = estate; gatherstate->ps.ExecProcNode = ExecGather; - gatherstate->need_to_scan_locally = !node->single_copy; + + gatherstate->initialized = false; + gatherstate->need_to_scan_locally = + !node->single_copy && parallel_leader_participation; + gatherstate->tuples_needed = -1; /* * Miscellaneous initialization @@ -80,37 +84,29 @@ ExecInitGather(Gather *node, EState *estate, int eflags) */ ExecAssignExprContext(estate, &gatherstate->ps); - /* - * initialize child expressions - */ - gatherstate->ps.qual = - ExecInitQual(node->plan.qual, (PlanState *) gatherstate); - - /* - * tuple table initialization - */ - gatherstate->funnel_slot = ExecInitExtraTupleSlot(estate); - ExecInitResultTupleSlot(estate, &gatherstate->ps); - /* * now initialize outer plan */ outerNode = outerPlan(node); outerPlanState(gatherstate) = ExecInitNode(outerNode, estate, eflags); + tupDesc = ExecGetResultType(outerPlanState(gatherstate)); /* - * Initialize result tuple type and projection info. + * Initialize result type and projection. */ - ExecAssignResultTypeFromTL(&gatherstate->ps); - ExecAssignProjectionInfo(&gatherstate->ps, NULL); + ExecInitResultTypeTL(&gatherstate->ps); + ExecConditionalAssignProjectionInfo(&gatherstate->ps, tupDesc, OUTER_VAR); /* * Initialize funnel slot to same tuple descriptor as outer plan. */ - if (!ExecContextForcesOids(&gatherstate->ps, &hasoid)) - hasoid = false; - tupDesc = ExecTypeFromTL(outerNode->targetlist, hasoid); - ExecSetSlotDescriptor(gatherstate->funnel_slot, tupDesc); + gatherstate->funnel_slot = ExecInitExtraTupleSlot(estate, tupDesc); + + /* + * Gather doesn't support checking a qual (it's always more efficient to + * do it in the child node). + */ + Assert(!node->plan.qual); return gatherstate; } @@ -126,8 +122,6 @@ static TupleTableSlot * ExecGather(PlanState *pstate) { GatherState *node = castNode(GatherState, pstate); - TupleTableSlot *fslot = node->funnel_slot; - int i; TupleTableSlot *slot; ExprContext *econtext; @@ -148,15 +142,21 @@ ExecGather(PlanState *pstate) * Sometimes we might have to run without parallelism; but if parallel * mode is active then we can try to fire up some workers. */ - if (gather->num_workers > 0 && IsInParallelMode()) + if (gather->num_workers > 0 && estate->es_use_parallel_mode) { ParallelContext *pcxt; - /* Initialize the workers required to execute Gather node. */ + /* Initialize, or re-initialize, shared state needed by workers. */ if (!node->pei) node->pei = ExecInitParallelPlan(node->ps.lefttree, estate, - gather->num_workers); + gather->initParam, + gather->num_workers, + node->tuples_needed); + else + ExecParallelReinitialize(node->ps.lefttree, + node->pei, + gather->initParam); /* * Register backend workers. We might not get as many as we @@ -164,45 +164,39 @@ ExecGather(PlanState *pstate) */ pcxt = node->pei->pcxt; LaunchParallelWorkers(pcxt); + /* We save # workers launched for the benefit of EXPLAIN */ node->nworkers_launched = pcxt->nworkers_launched; /* Set up tuple queue readers to read the results. */ if (pcxt->nworkers_launched > 0) { - node->nreaders = 0; - node->nextreader = 0; - node->reader = - palloc(pcxt->nworkers_launched * sizeof(TupleQueueReader *)); - - for (i = 0; i < pcxt->nworkers_launched; ++i) - { - shm_mq_set_handle(node->pei->tqueue[i], - pcxt->worker[i].bgwhandle); - node->reader[node->nreaders++] = - CreateTupleQueueReader(node->pei->tqueue[i], - fslot->tts_tupleDescriptor); - } + ExecParallelCreateReaders(node->pei); + /* Make a working array showing the active readers */ + node->nreaders = pcxt->nworkers_launched; + node->reader = (TupleQueueReader **) + palloc(node->nreaders * sizeof(TupleQueueReader *)); + memcpy(node->reader, node->pei->reader, + node->nreaders * sizeof(TupleQueueReader *)); } else { /* No workers? Then never mind. */ - ExecShutdownGatherWorkers(node); + node->nreaders = 0; + node->reader = NULL; } + node->nextreader = 0; } - /* Run plan locally if no workers or not single-copy. */ - node->need_to_scan_locally = (node->reader == NULL) - || !gather->single_copy; + /* Run plan locally if no workers or enabled and not single-copy. */ + node->need_to_scan_locally = (node->nreaders == 0) + || (!gather->single_copy && parallel_leader_participation); node->initialized = true; } /* * Reset per-tuple memory context to free any expression evaluation - * storage allocated in the previous tuple cycle. This will also clear - * any previous tuple returned by a TupleQueueReader; to make sure we - * don't leave a dangling pointer around, clear the working slot first. + * storage allocated in the previous tuple cycle. */ - ExecClearTuple(fslot); econtext = node->ps.ps_ExprContext; ResetExprContext(econtext); @@ -214,6 +208,10 @@ ExecGather(PlanState *pstate) if (TupIsNull(slot)) return NULL; + /* If no projection is required, we're done. */ + if (node->ps.ps_ProjInfo == NULL) + return slot; + /* * Form the result tuple using ExecProject(), and return it. */ @@ -233,7 +231,8 @@ ExecEndGather(GatherState *node) ExecEndNode(outerPlanState(node)); /* let children clean up first */ ExecShutdownGather(node); ExecFreeExprContext(&node->ps); - ExecClearTuple(node->ps.ps_ResultTupleSlot); + if (node->ps.ps_ResultTupleSlot) + ExecClearTuple(node->ps.ps_ResultTupleSlot); } /* @@ -247,36 +246,34 @@ gather_getnext(GatherState *gatherstate) PlanState *outerPlan = outerPlanState(gatherstate); TupleTableSlot *outerTupleSlot; TupleTableSlot *fslot = gatherstate->funnel_slot; - MemoryContext tupleContext = gatherstate->ps.ps_ExprContext->ecxt_per_tuple_memory; HeapTuple tup; - while (gatherstate->reader != NULL || gatherstate->need_to_scan_locally) + while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally) { CHECK_FOR_INTERRUPTS(); - if (gatherstate->reader != NULL) + if (gatherstate->nreaders > 0) { - MemoryContext oldContext; - - /* Run TupleQueueReaders in per-tuple context */ - oldContext = MemoryContextSwitchTo(tupleContext); tup = gather_readnext(gatherstate); - MemoryContextSwitchTo(oldContext); if (HeapTupleIsValid(tup)) { - ExecStoreTuple(tup, /* tuple to store */ - fslot, /* slot in which to store the tuple */ - InvalidBuffer, /* buffer associated with this - * tuple */ - false); /* slot should not pfree tuple */ + ExecStoreHeapTuple(tup, /* tuple to store */ + fslot, /* slot to store the tuple */ + true); /* pfree tuple when done with it */ return fslot; } } if (gatherstate->need_to_scan_locally) { + EState *estate = gatherstate->ps.state; + + /* Install our DSA area while executing the plan. */ + estate->es_query_dsa = + gatherstate->pei ? gatherstate->pei->area : NULL; outerTupleSlot = ExecProcNode(outerPlan); + estate->es_query_dsa = NULL; if (!TupIsNull(outerTupleSlot)) return outerTupleSlot; @@ -305,19 +302,25 @@ gather_readnext(GatherState *gatherstate) /* Check for async events, particularly messages from workers. */ CHECK_FOR_INTERRUPTS(); - /* Attempt to read a tuple, but don't block if none is available. */ + /* + * Attempt to read a tuple, but don't block if none is available. + * + * Note that TupleQueueReaderNext will just return NULL for a worker + * which fails to initialize. We'll treat that worker as having + * produced no tuples; WaitForParallelWorkersToFinish will error out + * when we get there. + */ Assert(gatherstate->nextreader < gatherstate->nreaders); reader = gatherstate->reader[gatherstate->nextreader]; tup = TupleQueueReaderNext(reader, true, &readerdone); /* - * If this reader is done, remove it. If all readers are done, clean - * up remaining worker state. + * If this reader is done, remove it from our working array of active + * readers. If all readers are done, we're outta here. */ if (readerdone) { Assert(!tup); - DestroyTupleQueueReader(reader); --gatherstate->nreaders; if (gatherstate->nreaders == 0) { @@ -370,37 +373,25 @@ gather_readnext(GatherState *gatherstate) /* ---------------------------------------------------------------- * ExecShutdownGatherWorkers * - * Destroy the parallel workers. Collect all the stats after - * workers are stopped, else some work done by workers won't be - * accounted. + * Stop all the parallel workers. * ---------------------------------------------------------------- */ static void ExecShutdownGatherWorkers(GatherState *node) { - /* Shut down tuple queue readers before shutting down workers. */ - if (node->reader != NULL) - { - int i; - - for (i = 0; i < node->nreaders; ++i) - DestroyTupleQueueReader(node->reader[i]); - - pfree(node->reader); - node->reader = NULL; - } - - /* Now shut down the workers. */ if (node->pei != NULL) ExecParallelFinish(node->pei); + + /* Flush local copy of reader array */ + if (node->reader) + pfree(node->reader); + node->reader = NULL; } /* ---------------------------------------------------------------- * ExecShutdownGather * * Destroy the setup for parallel workers including parallel context. - * Collect all the stats after workers are stopped, else some work - * done by workers won't be accounted. * ---------------------------------------------------------------- */ void @@ -424,24 +415,42 @@ ExecShutdownGather(GatherState *node) /* ---------------------------------------------------------------- * ExecReScanGather * - * Re-initialize the workers and rescans a relation via them. + * Prepare to re-scan the result of a Gather. * ---------------------------------------------------------------- */ void ExecReScanGather(GatherState *node) { - /* - * Re-initialize the parallel workers to perform rescan of relation. We - * want to gracefully shutdown all the workers so that they should be able - * to propagate any error or other information to master backend before - * dying. Parallel context will be reused for rescan. - */ + Gather *gather = (Gather *) node->ps.plan; + PlanState *outerPlan = outerPlanState(node); + + /* Make sure any existing workers are gracefully shut down */ ExecShutdownGatherWorkers(node); + /* Mark node so that shared state will be rebuilt at next call */ node->initialized = false; - if (node->pei) - ExecParallelReinitialize(node->pei); + /* + * Set child node's chgParam to tell it that the next scan might deliver a + * different set of rows within the leader process. (The overall rowset + * shouldn't change, but the leader process's subset might; hence nodes + * between here and the parallel table scan node mustn't optimize on the + * assumption of an unchanging rowset.) + */ + if (gather->rescan_param >= 0) + outerPlan->chgParam = bms_add_member(outerPlan->chgParam, + gather->rescan_param); - ExecReScan(node->ps.lefttree); + /* + * If chgParam of subnode is not null then plan will be re-scanned by + * first ExecProcNode. Note: because this does nothing if we have a + * rescan_param, it's currently guaranteed that parallel-aware child nodes + * will not see a ReScan call until after they get a ReInitializeDSM call. + * That ordering might not be something to rely on, though. A good rule + * of thumb is that ReInitializeDSM should reset only shared state, ReScan + * should reset only local state, and anything that depends on both of + * those steps being finished must wait until the first ExecProcNode call. + */ + if (outerPlan->chgParam == NULL) + ExecReScan(outerPlan); } diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c index 9a81e22510..7ae067f9eb 100644 --- a/src/backend/executor/nodeGatherMerge.c +++ b/src/backend/executor/nodeGatherMerge.c @@ -3,7 +3,7 @@ * nodeGatherMerge.c * Scan a plan in multiple workers, and do order-preserving merge. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -23,37 +23,46 @@ #include "executor/tqueue.h" #include "lib/binaryheap.h" #include "miscadmin.h" +#include "optimizer/planmain.h" #include "utils/memutils.h" #include "utils/rel.h" -/* - * Tuple array for each worker - */ -typedef struct GMReaderTupleBuffer -{ - HeapTuple *tuple; - int readCounter; - int nTuples; - bool done; -} GMReaderTupleBuffer; - /* * When we read tuples from workers, it's a good idea to read several at once * for efficiency when possible: this minimizes context-switching overhead. * But reading too many at a time wastes memory without improving performance. + * We'll read up to MAX_TUPLE_STORE tuples (in addition to the first one). */ #define MAX_TUPLE_STORE 10 +/* + * Pending-tuple array for each worker. This holds additional tuples that + * we were able to fetch from the worker, but can't process yet. In addition, + * this struct holds the "done" flag indicating the worker is known to have + * no more tuples. (We do not use this struct for the leader; we don't keep + * any pending tuples for the leader, and the need_to_scan_locally flag serves + * as its "done" indicator.) + */ +typedef struct GMReaderTupleBuffer +{ + HeapTuple *tuple; /* array of length MAX_TUPLE_STORE */ + int nTuples; /* number of tuples currently stored */ + int readCounter; /* index of next tuple to extract */ + bool done; /* true if reader is known exhausted */ +} GMReaderTupleBuffer; + static TupleTableSlot *ExecGatherMerge(PlanState *pstate); static int32 heap_compare_slots(Datum a, Datum b, void *arg); static TupleTableSlot *gather_merge_getnext(GatherMergeState *gm_state); static HeapTuple gm_readnext_tuple(GatherMergeState *gm_state, int nreader, bool nowait, bool *done); -static void gather_merge_init(GatherMergeState *gm_state); static void ExecShutdownGatherMergeWorkers(GatherMergeState *node); +static void gather_merge_setup(GatherMergeState *gm_state); +static void gather_merge_init(GatherMergeState *gm_state); +static void gather_merge_clear_tuples(GatherMergeState *gm_state); static bool gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait); -static void form_tuple_array(GatherMergeState *gm_state, int reader); +static void load_tuple_array(GatherMergeState *gm_state, int reader); /* ---------------------------------------------------------------- * ExecInitGather @@ -64,7 +73,6 @@ ExecInitGatherMerge(GatherMerge *node, EState *estate, int eflags) { GatherMergeState *gm_state; Plan *outerNode; - bool hasoid; TupleDesc tupDesc; /* Gather merge node doesn't have innerPlan node. */ @@ -78,6 +86,10 @@ ExecInitGatherMerge(GatherMerge *node, EState *estate, int eflags) gm_state->ps.state = estate; gm_state->ps.ExecProcNode = ExecGatherMerge; + gm_state->initialized = false; + gm_state->gm_initialized = false; + gm_state->tuples_needed = -1; + /* * Miscellaneous initialization * @@ -86,15 +98,10 @@ ExecInitGatherMerge(GatherMerge *node, EState *estate, int eflags) ExecAssignExprContext(estate, &gm_state->ps); /* - * initialize child expressions + * GatherMerge doesn't support checking a qual (it's always more efficient + * to do it in the child node). */ - gm_state->ps.qual = - ExecInitQual(node->plan.qual, &gm_state->ps); - - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &gm_state->ps); + Assert(!node->plan.qual); /* * now initialize outer plan @@ -103,12 +110,17 @@ ExecInitGatherMerge(GatherMerge *node, EState *estate, int eflags) outerPlanState(gm_state) = ExecInitNode(outerNode, estate, eflags); /* - * Initialize result tuple type and projection info. + * Store the tuple descriptor into gather merge state, so we can use it + * while initializing the gather merge slots. */ - ExecAssignResultTypeFromTL(&gm_state->ps); - ExecAssignProjectionInfo(&gm_state->ps, NULL); + tupDesc = ExecGetResultType(outerPlanState(gm_state)); + gm_state->tupDesc = tupDesc; - gm_state->gm_initialized = false; + /* + * Initialize result type and projection. + */ + ExecInitResultTypeTL(&gm_state->ps); + ExecConditionalAssignProjectionInfo(&gm_state->ps, tupDesc, OUTER_VAR); /* * initialize sort-key information @@ -140,14 +152,8 @@ ExecInitGatherMerge(GatherMerge *node, EState *estate, int eflags) } } - /* - * store the tuple descriptor into gather merge state, so we can use it - * later while initializing the gather merge slots. - */ - if (!ExecContextForcesOids(&gm_state->ps, &hasoid)) - hasoid = false; - tupDesc = ExecTypeFromTL(outerNode->targetlist, hasoid); - gm_state->tupDesc = tupDesc; + /* Now allocate the workspace for gather merge */ + gather_merge_setup(gm_state); return gm_state; } @@ -165,7 +171,6 @@ ExecGatherMerge(PlanState *pstate) GatherMergeState *node = castNode(GatherMergeState, pstate); TupleTableSlot *slot; ExprContext *econtext; - int i; CHECK_FOR_INTERRUPTS(); @@ -176,54 +181,56 @@ ExecGatherMerge(PlanState *pstate) if (!node->initialized) { EState *estate = node->ps.state; - GatherMerge *gm = (GatherMerge *) node->ps.plan; + GatherMerge *gm = castNode(GatherMerge, node->ps.plan); /* * Sometimes we might have to run without parallelism; but if parallel * mode is active then we can try to fire up some workers. */ - if (gm->num_workers > 0 && IsInParallelMode()) + if (gm->num_workers > 0 && estate->es_use_parallel_mode) { ParallelContext *pcxt; - /* Initialize data structures for workers. */ + /* Initialize, or re-initialize, shared state needed by workers. */ if (!node->pei) node->pei = ExecInitParallelPlan(node->ps.lefttree, estate, - gm->num_workers); + gm->initParam, + gm->num_workers, + node->tuples_needed); + else + ExecParallelReinitialize(node->ps.lefttree, + node->pei, + gm->initParam); /* Try to launch workers. */ pcxt = node->pei->pcxt; LaunchParallelWorkers(pcxt); + /* We save # workers launched for the benefit of EXPLAIN */ node->nworkers_launched = pcxt->nworkers_launched; /* Set up tuple queue readers to read the results. */ if (pcxt->nworkers_launched > 0) { - node->nreaders = 0; - node->reader = palloc(pcxt->nworkers_launched * - sizeof(TupleQueueReader *)); - - Assert(gm->numCols); - - for (i = 0; i < pcxt->nworkers_launched; ++i) - { - shm_mq_set_handle(node->pei->tqueue[i], - pcxt->worker[i].bgwhandle); - node->reader[node->nreaders++] = - CreateTupleQueueReader(node->pei->tqueue[i], - node->tupDesc); - } + ExecParallelCreateReaders(node->pei); + /* Make a working array showing the active readers */ + node->nreaders = pcxt->nworkers_launched; + node->reader = (TupleQueueReader **) + palloc(node->nreaders * sizeof(TupleQueueReader *)); + memcpy(node->reader, node->pei->reader, + node->nreaders * sizeof(TupleQueueReader *)); } else { /* No workers? Then never mind. */ - ExecShutdownGatherMergeWorkers(node); + node->nreaders = 0; + node->reader = NULL; } } - /* always allow leader to participate */ - node->need_to_scan_locally = true; + /* allow leader to participate if enabled or no choice */ + if (parallel_leader_participation || node->nreaders == 0) + node->need_to_scan_locally = true; node->initialized = true; } @@ -242,10 +249,12 @@ ExecGatherMerge(PlanState *pstate) if (TupIsNull(slot)) return NULL; + /* If no projection is required, we're done. */ + if (node->ps.ps_ProjInfo == NULL) + return slot; + /* - * form the result tuple using ExecProject(), and return it --- unless the - * projection produces an empty set, in which case we must loop back - * around for another tuple + * Form the result tuple using ExecProject(), and return it. */ econtext->ecxt_outertuple = slot; return ExecProject(node->ps.ps_ProjInfo); @@ -263,15 +272,14 @@ ExecEndGatherMerge(GatherMergeState *node) ExecEndNode(outerPlanState(node)); /* let children clean up first */ ExecShutdownGatherMerge(node); ExecFreeExprContext(&node->ps); - ExecClearTuple(node->ps.ps_ResultTupleSlot); + if (node->ps.ps_ResultTupleSlot) + ExecClearTuple(node->ps.ps_ResultTupleSlot); } /* ---------------------------------------------------------------- * ExecShutdownGatherMerge * * Destroy the setup for parallel workers including parallel context. - * Collect all the stats after workers are stopped, else some work - * done by workers won't be accounted. * ---------------------------------------------------------------- */ void @@ -290,159 +298,225 @@ ExecShutdownGatherMerge(GatherMergeState *node) /* ---------------------------------------------------------------- * ExecShutdownGatherMergeWorkers * - * Destroy the parallel workers. Collect all the stats after - * workers are stopped, else some work done by workers won't be - * accounted. + * Stop all the parallel workers. * ---------------------------------------------------------------- */ static void ExecShutdownGatherMergeWorkers(GatherMergeState *node) { - /* Shut down tuple queue readers before shutting down workers. */ - if (node->reader != NULL) - { - int i; - - for (i = 0; i < node->nreaders; ++i) - if (node->reader[i]) - DestroyTupleQueueReader(node->reader[i]); - - pfree(node->reader); - node->reader = NULL; - } - - /* Now shut down the workers. */ if (node->pei != NULL) ExecParallelFinish(node->pei); + + /* Flush local copy of reader array */ + if (node->reader) + pfree(node->reader); + node->reader = NULL; } /* ---------------------------------------------------------------- * ExecReScanGatherMerge * - * Re-initialize the workers and rescans a relation via them. + * Prepare to re-scan the result of a GatherMerge. * ---------------------------------------------------------------- */ void ExecReScanGatherMerge(GatherMergeState *node) { - /* - * Re-initialize the parallel workers to perform rescan of relation. We - * want to gracefully shutdown all the workers so that they should be able - * to propagate any error or other information to master backend before - * dying. Parallel context will be reused for rescan. - */ + GatherMerge *gm = (GatherMerge *) node->ps.plan; + PlanState *outerPlan = outerPlanState(node); + + /* Make sure any existing workers are gracefully shut down */ ExecShutdownGatherMergeWorkers(node); + /* Free any unused tuples, so we don't leak memory across rescans */ + gather_merge_clear_tuples(node); + + /* Mark node so that shared state will be rebuilt at next call */ node->initialized = false; + node->gm_initialized = false; - if (node->pei) - ExecParallelReinitialize(node->pei); + /* + * Set child node's chgParam to tell it that the next scan might deliver a + * different set of rows within the leader process. (The overall rowset + * shouldn't change, but the leader process's subset might; hence nodes + * between here and the parallel table scan node mustn't optimize on the + * assumption of an unchanging rowset.) + */ + if (gm->rescan_param >= 0) + outerPlan->chgParam = bms_add_member(outerPlan->chgParam, + gm->rescan_param); - ExecReScan(node->ps.lefttree); + /* + * If chgParam of subnode is not null then plan will be re-scanned by + * first ExecProcNode. Note: because this does nothing if we have a + * rescan_param, it's currently guaranteed that parallel-aware child nodes + * will not see a ReScan call until after they get a ReInitializeDSM call. + * That ordering might not be something to rely on, though. A good rule + * of thumb is that ReInitializeDSM should reset only shared state, ReScan + * should reset only local state, and anything that depends on both of + * those steps being finished must wait until the first ExecProcNode call. + */ + if (outerPlan->chgParam == NULL) + ExecReScan(outerPlan); } /* - * Initialize the Gather merge tuple read. + * Set up the data structures that we'll need for Gather Merge. + * + * We allocate these once on the basis of gm->num_workers, which is an + * upper bound for the number of workers we'll actually have. During + * a rescan, we reset the structures to empty. This approach simplifies + * not leaking memory across rescans. * - * Pull at least a single tuple from each worker + leader and set up the heap. + * In the gm_slots[] array, index 0 is for the leader, and indexes 1 to n + * are for workers. The values placed into gm_heap correspond to indexes + * in gm_slots[]. The gm_tuple_buffers[] array, however, is indexed from + * 0 to n-1; it has no entry for the leader. */ static void -gather_merge_init(GatherMergeState *gm_state) +gather_merge_setup(GatherMergeState *gm_state) { - int nreaders = gm_state->nreaders; - bool initialize = true; + GatherMerge *gm = castNode(GatherMerge, gm_state->ps.plan); + int nreaders = gm->num_workers; int i; /* - * Allocate gm_slots for the number of worker + one more slot for leader. - * Last slot is always for leader. Leader always calls ExecProcNode() to - * read the tuple which will return the TupleTableSlot. Later it will - * directly get assigned to gm_slot. So just initialize leader gm_slot - * with NULL. For other slots below code will call - * ExecInitExtraTupleSlot() which will do the initialization of worker - * slots. + * Allocate gm_slots for the number of workers + one more slot for leader. + * Slot 0 is always for the leader. Leader always calls ExecProcNode() to + * read the tuple, and then stores it directly into its gm_slots entry. + * For other slots, code below will call ExecInitExtraTupleSlot() to + * create a slot for the worker's results. Note that during any single + * scan, we might have fewer than num_workers available workers, in which + * case the extra array entries go unused. */ - gm_state->gm_slots = - palloc((gm_state->nreaders + 1) * sizeof(TupleTableSlot *)); - gm_state->gm_slots[gm_state->nreaders] = NULL; - - /* Initialize the tuple slot and tuple array for each worker */ - gm_state->gm_tuple_buffers = - (GMReaderTupleBuffer *) palloc0(sizeof(GMReaderTupleBuffer) * - (gm_state->nreaders + 1)); - for (i = 0; i < gm_state->nreaders; i++) + gm_state->gm_slots = (TupleTableSlot **) + palloc0((nreaders + 1) * sizeof(TupleTableSlot *)); + + /* Allocate the tuple slot and tuple array for each worker */ + gm_state->gm_tuple_buffers = (GMReaderTupleBuffer *) + palloc0(nreaders * sizeof(GMReaderTupleBuffer)); + + for (i = 0; i < nreaders; i++) { - /* Allocate the tuple array with MAX_TUPLE_STORE size */ + /* Allocate the tuple array with length MAX_TUPLE_STORE */ gm_state->gm_tuple_buffers[i].tuple = (HeapTuple *) palloc0(sizeof(HeapTuple) * MAX_TUPLE_STORE); - /* Initialize slot for worker */ - gm_state->gm_slots[i] = ExecInitExtraTupleSlot(gm_state->ps.state); - ExecSetSlotDescriptor(gm_state->gm_slots[i], - gm_state->tupDesc); + /* Initialize tuple slot for worker */ + gm_state->gm_slots[i + 1] = + ExecInitExtraTupleSlot(gm_state->ps.state, gm_state->tupDesc); } /* Allocate the resources for the merge */ - gm_state->gm_heap = binaryheap_allocate(gm_state->nreaders + 1, + gm_state->gm_heap = binaryheap_allocate(nreaders + 1, heap_compare_slots, gm_state); +} + +/* + * Initialize the Gather Merge. + * + * Reset data structures to ensure they're empty. Then pull at least one + * tuple from leader + each worker (or set its "done" indicator), and set up + * the heap. + */ +static void +gather_merge_init(GatherMergeState *gm_state) +{ + int nreaders = gm_state->nreaders; + bool nowait = true; + int i; + + /* Assert that gather_merge_setup made enough space */ + Assert(nreaders <= castNode(GatherMerge, gm_state->ps.plan)->num_workers); + + /* Reset leader's tuple slot to empty */ + gm_state->gm_slots[0] = NULL; + + /* Reset the tuple slot and tuple array for each worker */ + for (i = 0; i < nreaders; i++) + { + /* Reset tuple array to empty */ + gm_state->gm_tuple_buffers[i].nTuples = 0; + gm_state->gm_tuple_buffers[i].readCounter = 0; + /* Reset done flag to not-done */ + gm_state->gm_tuple_buffers[i].done = false; + /* Ensure output slot is empty */ + ExecClearTuple(gm_state->gm_slots[i + 1]); + } + + /* Reset binary heap to empty */ + binaryheap_reset(gm_state->gm_heap); /* * First, try to read a tuple from each worker (including leader) in - * nowait mode, so that we initialize read from each worker as well as - * leader. After this, if all active workers are unable to produce a - * tuple, then re-read and this time use wait mode. For workers that were - * able to produce a tuple in the earlier loop and are still active, just - * try to fill the tuple array if more tuples are avaiable. + * nowait mode. After this, if not all workers were able to produce a + * tuple (or a "done" indication), then re-read from remaining workers, + * this time using wait mode. Add all live readers (those producing at + * least one tuple) to the heap. */ reread: - for (i = 0; i < nreaders + 1; i++) + for (i = 0; i <= nreaders; i++) { CHECK_FOR_INTERRUPTS(); - if (!gm_state->gm_tuple_buffers[i].done && - (TupIsNull(gm_state->gm_slots[i]) || - gm_state->gm_slots[i]->tts_isempty)) + /* skip this source if already known done */ + if ((i == 0) ? gm_state->need_to_scan_locally : + !gm_state->gm_tuple_buffers[i - 1].done) { - if (gather_merge_readnext(gm_state, i, initialize)) + if (TupIsNull(gm_state->gm_slots[i])) { - binaryheap_add_unordered(gm_state->gm_heap, - Int32GetDatum(i)); + /* Don't have a tuple yet, try to get one */ + if (gather_merge_readnext(gm_state, i, nowait)) + binaryheap_add_unordered(gm_state->gm_heap, + Int32GetDatum(i)); + } + else + { + /* + * We already got at least one tuple from this worker, but + * might as well see if it has any more ready by now. + */ + load_tuple_array(gm_state, i); } } - else - form_tuple_array(gm_state, i); } - initialize = false; - for (i = 0; i < nreaders; i++) - if (!gm_state->gm_tuple_buffers[i].done && - (TupIsNull(gm_state->gm_slots[i]) || - gm_state->gm_slots[i]->tts_isempty)) + /* need not recheck leader, since nowait doesn't matter for it */ + for (i = 1; i <= nreaders; i++) + { + if (!gm_state->gm_tuple_buffers[i - 1].done && + TupIsNull(gm_state->gm_slots[i])) + { + nowait = false; goto reread; + } + } + /* Now heapify the heap. */ binaryheap_build(gm_state->gm_heap); + gm_state->gm_initialized = true; } /* - * Clear out the tuple table slots for each gather merge input. + * Clear out the tuple table slot, and any unused pending tuples, + * for each gather merge input. */ static void -gather_merge_clear_slots(GatherMergeState *gm_state) +gather_merge_clear_tuples(GatherMergeState *gm_state) { int i; for (i = 0; i < gm_state->nreaders; i++) { - pfree(gm_state->gm_tuple_buffers[i].tuple); - gm_state->gm_slots[i] = ExecClearTuple(gm_state->gm_slots[i]); - } + GMReaderTupleBuffer *tuple_buffer = &gm_state->gm_tuple_buffers[i]; - /* Free tuple array as we don't need it any more */ - pfree(gm_state->gm_tuple_buffers); - /* Free the binaryheap, which was created for sort */ - binaryheap_free(gm_state->gm_heap); + while (tuple_buffer->readCounter < tuple_buffer->nTuples) + heap_freetuple(tuple_buffer->tuple[tuple_buffer->readCounter++]); + + ExecClearTuple(gm_state->gm_slots[i + 1]); + } } /* @@ -476,13 +550,16 @@ gather_merge_getnext(GatherMergeState *gm_state) if (gather_merge_readnext(gm_state, i, false)) binaryheap_replace_first(gm_state->gm_heap, Int32GetDatum(i)); else + { + /* reader exhausted, remove it from heap */ (void) binaryheap_remove_first(gm_state->gm_heap); + } } if (binaryheap_empty(gm_state->gm_heap)) { /* All the queues are exhausted, and so is the heap */ - gather_merge_clear_slots(gm_state); + gather_merge_clear_tuples(gm_state); return NULL; } else @@ -494,37 +571,37 @@ gather_merge_getnext(GatherMergeState *gm_state) } /* - * Read the tuple for given reader in nowait mode, and form the tuple array. + * Read tuple(s) for given reader in nowait mode, and load into its tuple + * array, until we have MAX_TUPLE_STORE of them or would have to block. */ static void -form_tuple_array(GatherMergeState *gm_state, int reader) +load_tuple_array(GatherMergeState *gm_state, int reader) { - GMReaderTupleBuffer *tuple_buffer = &gm_state->gm_tuple_buffers[reader]; + GMReaderTupleBuffer *tuple_buffer; int i; - /* Last slot is for leader and we don't build tuple array for leader */ - if (reader == gm_state->nreaders) + /* Don't do anything if this is the leader. */ + if (reader == 0) return; - /* - * We here because we already read all the tuples from the tuple array, so - * initialize the counter to zero. - */ + tuple_buffer = &gm_state->gm_tuple_buffers[reader - 1]; + + /* If there's nothing in the array, reset the counters to zero. */ if (tuple_buffer->nTuples == tuple_buffer->readCounter) tuple_buffer->nTuples = tuple_buffer->readCounter = 0; - /* Tuple array is already full? */ - if (tuple_buffer->nTuples == MAX_TUPLE_STORE) - return; - + /* Try to fill additional slots in the array. */ for (i = tuple_buffer->nTuples; i < MAX_TUPLE_STORE; i++) { - tuple_buffer->tuple[i] = heap_copytuple(gm_readnext_tuple(gm_state, - reader, - false, - &tuple_buffer->done)); - if (!HeapTupleIsValid(tuple_buffer->tuple[i])) + HeapTuple tuple; + + tuple = gm_readnext_tuple(gm_state, + reader, + true, + &tuple_buffer->done); + if (!HeapTupleIsValid(tuple)) break; + tuple_buffer->tuple[i] = tuple; tuple_buffer->nTuples++; } } @@ -532,111 +609,108 @@ form_tuple_array(GatherMergeState *gm_state, int reader) /* * Store the next tuple for a given reader into the appropriate slot. * - * Returns false if the reader is exhausted, and true otherwise. + * Returns true if successful, false if not (either reader is exhausted, + * or we didn't want to wait for a tuple). Sets done flag if reader + * is found to be exhausted. */ static bool gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait) { GMReaderTupleBuffer *tuple_buffer; - HeapTuple tup = NULL; + HeapTuple tup; /* * If we're being asked to generate a tuple from the leader, then we just * call ExecProcNode as normal to produce one. */ - if (gm_state->nreaders == reader) + if (reader == 0) { if (gm_state->need_to_scan_locally) { PlanState *outerPlan = outerPlanState(gm_state); TupleTableSlot *outerTupleSlot; + EState *estate = gm_state->ps.state; + /* Install our DSA area while executing the plan. */ + estate->es_query_dsa = gm_state->pei ? gm_state->pei->area : NULL; outerTupleSlot = ExecProcNode(outerPlan); + estate->es_query_dsa = NULL; if (!TupIsNull(outerTupleSlot)) { - gm_state->gm_slots[reader] = outerTupleSlot; + gm_state->gm_slots[0] = outerTupleSlot; return true; } - gm_state->gm_tuple_buffers[reader].done = true; + /* need_to_scan_locally serves as "done" flag for leader */ gm_state->need_to_scan_locally = false; } return false; } /* Otherwise, check the state of the relevant tuple buffer. */ - tuple_buffer = &gm_state->gm_tuple_buffers[reader]; + tuple_buffer = &gm_state->gm_tuple_buffers[reader - 1]; if (tuple_buffer->nTuples > tuple_buffer->readCounter) { /* Return any tuple previously read that is still buffered. */ - tuple_buffer = &gm_state->gm_tuple_buffers[reader]; tup = tuple_buffer->tuple[tuple_buffer->readCounter++]; } else if (tuple_buffer->done) { /* Reader is known to be exhausted. */ - DestroyTupleQueueReader(gm_state->reader[reader]); - gm_state->reader[reader] = NULL; return false; } else { /* Read and buffer next tuple. */ - tup = heap_copytuple(gm_readnext_tuple(gm_state, - reader, - nowait, - &tuple_buffer->done)); + tup = gm_readnext_tuple(gm_state, + reader, + nowait, + &tuple_buffer->done); + if (!HeapTupleIsValid(tup)) + return false; /* * Attempt to read more tuples in nowait mode and store them in the - * tuple array. + * pending-tuple array for the reader. */ - if (HeapTupleIsValid(tup)) - form_tuple_array(gm_state, reader); - else - return false; + load_tuple_array(gm_state, reader); } Assert(HeapTupleIsValid(tup)); /* Build the TupleTableSlot for the given tuple */ - ExecStoreTuple(tup, /* tuple to store */ - gm_state->gm_slots[reader], /* slot in which to store the - * tuple */ - InvalidBuffer, /* buffer associated with this tuple */ - true); /* pfree this pointer if not from heap */ + ExecStoreHeapTuple(tup, /* tuple to store */ + gm_state->gm_slots[reader], /* slot in which to store + * the tuple */ + true); /* pfree tuple when done with it */ return true; } /* - * Attempt to read a tuple from given reader. + * Attempt to read a tuple from given worker. */ static HeapTuple gm_readnext_tuple(GatherMergeState *gm_state, int nreader, bool nowait, bool *done) { TupleQueueReader *reader; - HeapTuple tup = NULL; - MemoryContext oldContext; - MemoryContext tupleContext; - - tupleContext = gm_state->ps.ps_ExprContext->ecxt_per_tuple_memory; - - if (done != NULL) - *done = false; + HeapTuple tup; /* Check for async events, particularly messages from workers. */ CHECK_FOR_INTERRUPTS(); - /* Attempt to read a tuple. */ - reader = gm_state->reader[nreader]; - - /* Run TupleQueueReaders in per-tuple context */ - oldContext = MemoryContextSwitchTo(tupleContext); + /* + * Attempt to read a tuple. + * + * Note that TupleQueueReaderNext will just return NULL for a worker which + * fails to initialize. We'll treat that worker as having produced no + * tuples; WaitForParallelWorkersToFinish will error out when we get + * there. + */ + reader = gm_state->reader[nreader - 1]; tup = TupleQueueReaderNext(reader, nowait, done); - MemoryContextSwitchTo(oldContext); return tup; } @@ -682,7 +756,10 @@ heap_compare_slots(Datum a, Datum b, void *arg) datum2, isNull2, sortKey); if (compare != 0) - return -compare; + { + INVERT_COMPARE_RESULT(compare); + return compare; + } } return 0; } diff --git a/src/backend/executor/nodeGroup.c b/src/backend/executor/nodeGroup.c index ab4ae24a6b..9c1e51bc95 100644 --- a/src/backend/executor/nodeGroup.c +++ b/src/backend/executor/nodeGroup.c @@ -3,7 +3,7 @@ * nodeGroup.c * Routines to handle group nodes (used for queries with GROUP BY clause). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -25,6 +25,7 @@ #include "executor/executor.h" #include "executor/nodeGroup.h" #include "miscadmin.h" +#include "utils/memutils.h" /* @@ -37,8 +38,6 @@ ExecGroup(PlanState *pstate) { GroupState *node = castNode(GroupState, pstate); ExprContext *econtext; - int numCols; - AttrNumber *grpColIdx; TupleTableSlot *firsttupleslot; TupleTableSlot *outerslot; @@ -50,8 +49,6 @@ ExecGroup(PlanState *pstate) if (node->grp_done) return NULL; econtext = node->ss.ps.ps_ExprContext; - numCols = ((Group *) node->ss.ps.plan)->numCols; - grpColIdx = ((Group *) node->ss.ps.plan)->grpColIdx; /* * The ScanTupleSlot holds the (copied) first tuple of each group. @@ -59,7 +56,7 @@ ExecGroup(PlanState *pstate) firsttupleslot = node->ss.ss_ScanTupleSlot; /* - * We need not call ResetExprContext here because execTuplesMatch will + * We need not call ResetExprContext here because ExecQualAndReset() will * reset the per-tuple memory context once per input tuple. */ @@ -73,7 +70,7 @@ ExecGroup(PlanState *pstate) if (TupIsNull(outerslot)) { /* empty input, so return nothing */ - node->grp_done = TRUE; + node->grp_done = true; return NULL; } /* Copy tuple into firsttupleslot */ @@ -116,7 +113,7 @@ ExecGroup(PlanState *pstate) if (TupIsNull(outerslot)) { /* no more groups, so we're done */ - node->grp_done = TRUE; + node->grp_done = true; return NULL; } @@ -124,10 +121,9 @@ ExecGroup(PlanState *pstate) * Compare with first tuple and see if this tuple is of the same * group. If so, ignore it and keep scanning. */ - if (!execTuplesMatch(firsttupleslot, outerslot, - numCols, grpColIdx, - node->eqfunctions, - econtext->ecxt_per_tuple_memory)) + econtext->ecxt_innertuple = firsttupleslot; + econtext->ecxt_outertuple = outerslot; + if (!ExecQualAndReset(node->eqfunction, econtext)) break; } @@ -177,47 +173,44 @@ ExecInitGroup(Group *node, EState *estate, int eflags) grpstate->ss.ps.plan = (Plan *) node; grpstate->ss.ps.state = estate; grpstate->ss.ps.ExecProcNode = ExecGroup; - grpstate->grp_done = FALSE; + grpstate->grp_done = false; /* * create expression context */ ExecAssignExprContext(estate, &grpstate->ss.ps); - /* - * tuple table initialization - */ - ExecInitScanTupleSlot(estate, &grpstate->ss); - ExecInitResultTupleSlot(estate, &grpstate->ss.ps); - - /* - * initialize child expressions - */ - grpstate->ss.ps.qual = - ExecInitQual(node->plan.qual, (PlanState *) grpstate); - /* * initialize child nodes */ outerPlanState(grpstate) = ExecInitNode(outerPlan(node), estate, eflags); /* - * initialize tuple type. + * Initialize scan slot and type. */ - ExecAssignScanTypeFromOuterPlan(&grpstate->ss); + ExecCreateScanSlotFromOuterPlan(estate, &grpstate->ss); /* - * Initialize result tuple type and projection info. + * Initialize result slot, type and projection. */ - ExecAssignResultTypeFromTL(&grpstate->ss.ps); + ExecInitResultTupleSlotTL(&grpstate->ss.ps); ExecAssignProjectionInfo(&grpstate->ss.ps, NULL); + /* + * initialize child expressions + */ + grpstate->ss.ps.qual = + ExecInitQual(node->plan.qual, (PlanState *) grpstate); + /* * Precompute fmgr lookup data for inner loop */ - grpstate->eqfunctions = - execTuplesMatchPrepare(node->numCols, - node->grpOperators); + grpstate->eqfunction = + execTuplesMatchPrepare(ExecGetResultType(outerPlanState(grpstate)), + node->numCols, + node->grpColIdx, + node->grpOperators, + &grpstate->ss.ps); return grpstate; } @@ -246,7 +239,7 @@ ExecReScanGroup(GroupState *node) { PlanState *outerPlan = outerPlanState(node); - node->grp_done = FALSE; + node->grp_done = false; /* must clear first tuple */ ExecClearTuple(node->ss.ss_ScanTupleSlot); diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index d10d94ccc2..a9f812d66b 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -3,13 +3,15 @@ * nodeHash.c * Routines to hash relations for hashjoin * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * src/backend/executor/nodeHash.c * + * See note on parallelism in nodeHashjoin.c. + * *------------------------------------------------------------------------- */ /* @@ -25,6 +27,7 @@ #include #include "access/htup_details.h" +#include "access/parallel.h" #include "catalog/pg_statistic.h" #include "commands/tablespace.h" #include "executor/execdebug.h" @@ -32,6 +35,8 @@ #include "executor/nodeHash.h" #include "executor/nodeHashjoin.h" #include "miscadmin.h" +#include "pgstat.h" +#include "port/atomics.h" #include "utils/dynahash.h" #include "utils/memutils.h" #include "utils/lsyscache.h" @@ -40,6 +45,8 @@ static void ExecHashIncreaseNumBatches(HashJoinTable hashtable); static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable); +static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable); +static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable); static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse); static void ExecHashSkewTableInsert(HashJoinTable hashtable, @@ -49,6 +56,30 @@ static void ExecHashSkewTableInsert(HashJoinTable hashtable, static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable); static void *dense_alloc(HashJoinTable hashtable, Size size); +static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable, + size_t size, + dsa_pointer *shared); +static void MultiExecPrivateHash(HashState *node); +static void MultiExecParallelHash(HashState *node); +static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable table, + int bucketno); +static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable table, + HashJoinTuple tuple); +static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head, + HashJoinTuple tuple, + dsa_pointer tuple_shared); +static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch); +static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable); +static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable); +static void ExecParallelHashRepartitionRest(HashJoinTable hashtable); +static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable table, + dsa_pointer *shared); +static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable, + int batchno, + size_t size); +static void ExecParallelHashMergeCounters(HashJoinTable hashtable); +static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable); + /* ---------------------------------------------------------------- * ExecHash @@ -72,6 +103,39 @@ ExecHash(PlanState *pstate) */ Node * MultiExecHash(HashState *node) +{ + /* must provide our own instrumentation support */ + if (node->ps.instrument) + InstrStartNode(node->ps.instrument); + + if (node->parallel_state != NULL) + MultiExecParallelHash(node); + else + MultiExecPrivateHash(node); + + /* must provide our own instrumentation support */ + if (node->ps.instrument) + InstrStopNode(node->ps.instrument, node->hashtable->partialTuples); + + /* + * We do not return the hash table directly because it's not a subtype of + * Node, and so would violate the MultiExecProcNode API. Instead, our + * parent Hashjoin node is expected to know how to fish it out of our node + * state. Ugly but not really worth cleaning up, since Hashjoin knows + * quite a bit more about Hash besides that. + */ + return NULL; +} + +/* ---------------------------------------------------------------- + * MultiExecPrivateHash + * + * parallel-oblivious version, building a backend-private + * hash table and (if necessary) batch files. + * ---------------------------------------------------------------- + */ +static void +MultiExecPrivateHash(HashState *node) { PlanState *outerNode; List *hashkeys; @@ -80,10 +144,6 @@ MultiExecHash(HashState *node) ExprContext *econtext; uint32 hashvalue; - /* must provide our own instrumentation support */ - if (node->ps.instrument) - InstrStartNode(node->ps.instrument); - /* * get state info from node */ @@ -138,18 +198,148 @@ MultiExecHash(HashState *node) if (hashtable->spaceUsed > hashtable->spacePeak) hashtable->spacePeak = hashtable->spaceUsed; - /* must provide our own instrumentation support */ - if (node->ps.instrument) - InstrStopNode(node->ps.instrument, hashtable->totalTuples); + hashtable->partialTuples = hashtable->totalTuples; +} + +/* ---------------------------------------------------------------- + * MultiExecParallelHash + * + * parallel-aware version, building a shared hash table and + * (if necessary) batch files using the combined effort of + * a set of co-operating backends. + * ---------------------------------------------------------------- + */ +static void +MultiExecParallelHash(HashState *node) +{ + ParallelHashJoinState *pstate; + PlanState *outerNode; + List *hashkeys; + HashJoinTable hashtable; + TupleTableSlot *slot; + ExprContext *econtext; + uint32 hashvalue; + Barrier *build_barrier; + int i; /* - * We do not return the hash table directly because it's not a subtype of - * Node, and so would violate the MultiExecProcNode API. Instead, our - * parent Hashjoin node is expected to know how to fish it out of our node - * state. Ugly but not really worth cleaning up, since Hashjoin knows - * quite a bit more about Hash besides that. + * get state info from node */ - return NULL; + outerNode = outerPlanState(node); + hashtable = node->hashtable; + + /* + * set expression context + */ + hashkeys = node->hashkeys; + econtext = node->ps.ps_ExprContext; + + /* + * Synchronize the parallel hash table build. At this stage we know that + * the shared hash table has been or is being set up by + * ExecHashTableCreate(), but we don't know if our peers have returned + * from there or are here in MultiExecParallelHash(), and if so how far + * through they are. To find out, we check the build_barrier phase then + * and jump to the right step in the build algorithm. + */ + pstate = hashtable->parallel_state; + build_barrier = &pstate->build_barrier; + Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATING); + switch (BarrierPhase(build_barrier)) + { + case PHJ_BUILD_ALLOCATING: + + /* + * Either I just allocated the initial hash table in + * ExecHashTableCreate(), or someone else is doing that. Either + * way, wait for everyone to arrive here so we can proceed. + */ + BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATING); + /* Fall through. */ + + case PHJ_BUILD_HASHING_INNER: + + /* + * It's time to begin hashing, or if we just arrived here then + * hashing is already underway, so join in that effort. While + * hashing we have to be prepared to help increase the number of + * batches or buckets at any time, and if we arrived here when + * that was already underway we'll have to help complete that work + * immediately so that it's safe to access batches and buckets + * below. + */ + if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) != + PHJ_GROW_BATCHES_ELECTING) + ExecParallelHashIncreaseNumBatches(hashtable); + if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) != + PHJ_GROW_BUCKETS_ELECTING) + ExecParallelHashIncreaseNumBuckets(hashtable); + ExecParallelHashEnsureBatchAccessors(hashtable); + ExecParallelHashTableSetCurrentBatch(hashtable, 0); + for (;;) + { + slot = ExecProcNode(outerNode); + if (TupIsNull(slot)) + break; + econtext->ecxt_innertuple = slot; + if (ExecHashGetHashValue(hashtable, econtext, hashkeys, + false, hashtable->keepNulls, + &hashvalue)) + ExecParallelHashTableInsert(hashtable, slot, hashvalue); + hashtable->partialTuples++; + } + + /* + * Make sure that any tuples we wrote to disk are visible to + * others before anyone tries to load them. + */ + for (i = 0; i < hashtable->nbatch; ++i) + sts_end_write(hashtable->batches[i].inner_tuples); + + /* + * Update shared counters. We need an accurate total tuple count + * to control the empty table optimization. + */ + ExecParallelHashMergeCounters(hashtable); + + BarrierDetach(&pstate->grow_buckets_barrier); + BarrierDetach(&pstate->grow_batches_barrier); + + /* + * Wait for everyone to finish building and flushing files and + * counters. + */ + if (BarrierArriveAndWait(build_barrier, + WAIT_EVENT_HASH_BUILD_HASHING_INNER)) + { + /* + * Elect one backend to disable any further growth. Batches + * are now fixed. While building them we made sure they'd fit + * in our memory budget when we load them back in later (or we + * tried to do that and gave up because we detected extreme + * skew). + */ + pstate->growth = PHJ_GROWTH_DISABLED; + } + } + + /* + * We're not yet attached to a batch. We all agree on the dimensions and + * number of inner tuples (for the empty table optimization). + */ + hashtable->curbatch = -1; + hashtable->nbuckets = pstate->nbuckets; + hashtable->log2_nbuckets = my_log2(hashtable->nbuckets); + hashtable->totalTuples = pstate->total_tuples; + ExecParallelHashEnsureBatchAccessors(hashtable); + + /* + * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE + * case, which will bring the build phase to PHJ_BUILD_DONE (if it isn't + * there already). + */ + Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASHING_OUTER || + BarrierPhase(build_barrier) == PHJ_BUILD_DONE); } /* ---------------------------------------------------------------- @@ -183,29 +373,24 @@ ExecInitHash(Hash *node, EState *estate, int eflags) */ ExecAssignExprContext(estate, &hashstate->ps); - /* - * initialize our result slot - */ - ExecInitResultTupleSlot(estate, &hashstate->ps); - - /* - * initialize child expressions - */ - hashstate->ps.qual = - ExecInitQual(node->plan.qual, (PlanState *) hashstate); - /* * initialize child nodes */ outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags); /* - * initialize tuple type. no need to initialize projection info because - * this node doesn't do projections + * initialize our result slot and type. No need to build projection + * because this node doesn't do projections. */ - ExecAssignResultTypeFromTL(&hashstate->ps); + ExecInitResultTupleSlotTL(&hashstate->ps); hashstate->ps.ps_ProjInfo = NULL; + /* + * initialize child expressions + */ + hashstate->ps.qual = + ExecInitQual(node->plan.qual, (PlanState *) hashstate); + return hashstate; } @@ -240,12 +425,15 @@ ExecEndHash(HashState *node) * ---------------------------------------------------------------- */ HashJoinTable -ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls) +ExecHashTableCreate(HashState *state, List *hashOperators, bool keepNulls) { + Hash *node; HashJoinTable hashtable; Plan *outerNode; + size_t space_allowed; int nbuckets; int nbatch; + double rows; int num_skew_mcvs; int log2_nbuckets; int nkeys; @@ -258,10 +446,22 @@ ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls) * "outer" subtree of this node, but the inner relation of the hashjoin). * Compute the appropriate size of the hash table. */ + node = (Hash *) state->ps.plan; outerNode = outerPlan(node); - ExecChooseHashTableSize(outerNode->plan_rows, outerNode->plan_width, + /* + * If this is shared hash table with a partial plan, then we can't use + * outerNode->plan_rows to estimate its size. We need an estimate of the + * total number of rows across all copies of the partial plan. + */ + rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows; + + ExecChooseHashTableSize(rows, outerNode->plan_width, OidIsValid(node->skewTable), + state->parallel_state != NULL, + state->parallel_state != NULL ? + state->parallel_state->nparticipants - 1 : 0, + &space_allowed, &nbuckets, &nbatch, &num_skew_mcvs); /* nbuckets must be a power of 2 */ @@ -272,7 +472,8 @@ ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls) * Initialize the hash table control block. * * The hashtable control block is just palloc'd from the executor's - * per-query memory context. + * per-query memory context. Everything else should be kept inside the + * subsidiary hashCxt or batchCxt. */ hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData)); hashtable->nbuckets = nbuckets; @@ -280,7 +481,7 @@ ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls) hashtable->nbuckets_optimal = nbuckets; hashtable->log2_nbuckets = log2_nbuckets; hashtable->log2_nbuckets_optimal = log2_nbuckets; - hashtable->buckets = NULL; + hashtable->buckets.unshared = NULL; hashtable->keepNulls = keepNulls; hashtable->skewEnabled = false; hashtable->skewBucket = NULL; @@ -293,22 +494,43 @@ ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls) hashtable->nbatch_outstart = nbatch; hashtable->growEnabled = true; hashtable->totalTuples = 0; + hashtable->partialTuples = 0; hashtable->skewTuples = 0; hashtable->innerBatchFile = NULL; hashtable->outerBatchFile = NULL; hashtable->spaceUsed = 0; hashtable->spacePeak = 0; - hashtable->spaceAllowed = work_mem * 1024L; + hashtable->spaceAllowed = space_allowed; hashtable->spaceUsedSkew = 0; hashtable->spaceAllowedSkew = hashtable->spaceAllowed * SKEW_WORK_MEM_PERCENT / 100; hashtable->chunks = NULL; + hashtable->current_chunk = NULL; + hashtable->parallel_state = state->parallel_state; + hashtable->area = state->ps.state->es_query_dsa; + hashtable->batches = NULL; #ifdef HJDEBUG printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n", hashtable, nbatch, nbuckets); #endif + /* + * Create temporary memory contexts in which to keep the hashtable working + * storage. See notes in executor/hashjoin.h. + */ + hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext, + "HashTableContext", + ALLOCSET_DEFAULT_SIZES); + + hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt, + "HashBatchContext", + ALLOCSET_DEFAULT_SIZES); + + /* Allocate data that will live for the life of the hashjoin */ + + oldcxt = MemoryContextSwitchTo(hashtable->hashCxt); + /* * Get info about the hash functions to be used for each hash key. Also * remember whether the join operators are strict. @@ -335,26 +557,11 @@ ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls) i++; } - /* - * Create temporary memory contexts in which to keep the hashtable working - * storage. See notes in executor/hashjoin.h. - */ - hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext, - "HashTableContext", - ALLOCSET_DEFAULT_SIZES); - - hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt, - "HashBatchContext", - ALLOCSET_DEFAULT_SIZES); - - /* Allocate data that will live for the life of the hashjoin */ - - oldcxt = MemoryContextSwitchTo(hashtable->hashCxt); - - if (nbatch > 1) + if (nbatch > 1 && hashtable->parallel_state == NULL) { /* - * allocate and initialize the file arrays in hashCxt + * allocate and initialize the file arrays in hashCxt (not needed for + * parallel case which uses shared tuplestores instead of raw files) */ hashtable->innerBatchFile = (BufFile **) palloc0(nbatch * sizeof(BufFile *)); @@ -365,23 +572,77 @@ ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls) PrepareTempTablespaces(); } - /* - * Prepare context for the first-scan space allocations; allocate the - * hashbucket array therein, and set each bucket "empty". - */ - MemoryContextSwitchTo(hashtable->batchCxt); + MemoryContextSwitchTo(oldcxt); - hashtable->buckets = (HashJoinTuple *) - palloc0(nbuckets * sizeof(HashJoinTuple)); + if (hashtable->parallel_state) + { + ParallelHashJoinState *pstate = hashtable->parallel_state; + Barrier *build_barrier; - /* - * Set up for skew optimization, if possible and there's a need for more - * than one batch. (In a one-batch join, there's no point in it.) - */ - if (nbatch > 1) - ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs); + /* + * Attach to the build barrier. The corresponding detach operation is + * in ExecHashTableDetach. Note that we won't attach to the + * batch_barrier for batch 0 yet. We'll attach later and start it out + * in PHJ_BATCH_PROBING phase, because batch 0 is allocated up front + * and then loaded while hashing (the standard hybrid hash join + * algorithm), and we'll coordinate that using build_barrier. + */ + build_barrier = &pstate->build_barrier; + BarrierAttach(build_barrier); - MemoryContextSwitchTo(oldcxt); + /* + * So far we have no idea whether there are any other participants, + * and if so, what phase they are working on. The only thing we care + * about at this point is whether someone has already created the + * SharedHashJoinBatch objects and the hash table for batch 0. One + * backend will be elected to do that now if necessary. + */ + if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECTING && + BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECTING)) + { + pstate->nbatch = nbatch; + pstate->space_allowed = space_allowed; + pstate->growth = PHJ_GROWTH_OK; + + /* Set up the shared state for coordinating batches. */ + ExecParallelHashJoinSetUpBatches(hashtable, nbatch); + + /* + * Allocate batch 0's hash table up front so we can load it + * directly while hashing. + */ + pstate->nbuckets = nbuckets; + ExecParallelHashTableAlloc(hashtable, 0); + } + + /* + * The next Parallel Hash synchronization point is in + * MultiExecParallelHash(), which will progress it all the way to + * PHJ_BUILD_DONE. The caller must not return control from this + * executor node between now and then. + */ + } + else + { + /* + * Prepare context for the first-scan space allocations; allocate the + * hashbucket array therein, and set each bucket "empty". + */ + MemoryContextSwitchTo(hashtable->batchCxt); + + hashtable->buckets.unshared = (HashJoinTuple *) + palloc0(nbuckets * sizeof(HashJoinTuple)); + + /* + * Set up for skew optimization, if possible and there's a need for + * more than one batch. (In a one-batch join, there's no point in + * it.) + */ + if (nbatch > 1) + ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs); + + MemoryContextSwitchTo(oldcxt); + } return hashtable; } @@ -399,6 +660,9 @@ ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls) void ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, + bool try_combined_work_mem, + int parallel_workers, + size_t *space_allowed, int *numbuckets, int *numbatches, int *num_skew_mcvs) @@ -433,6 +697,16 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, */ hash_table_bytes = work_mem * 1024L; + /* + * Parallel Hash tries to use the combined work_mem of all workers to + * avoid the need to batch. If that won't work, it falls back to work_mem + * per worker and tries to process batches in parallel. + */ + if (try_combined_work_mem) + hash_table_bytes += hash_table_bytes * parallel_workers; + + *space_allowed = hash_table_bytes; + /* * If skew optimization is possible, estimate the number of skew buckets * that will fit in the memory allowed, and decrement the assumed space @@ -478,7 +752,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, * Note that both nbuckets and nbatch must be powers of 2 to make * ExecHashGetBucketAndBatch fast. */ - max_pointers = (work_mem * 1024L) / sizeof(HashJoinTuple); + max_pointers = *space_allowed / sizeof(HashJoinTuple); max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple)); /* If max_pointers isn't a power of 2, must round it down to one */ mppow2 = 1L << my_log2(max_pointers); @@ -510,6 +784,21 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, int minbatch; long bucket_size; + /* + * If Parallel Hash with combined work_mem would still need multiple + * batches, we'll have to fall back to regular work_mem budget. + */ + if (try_combined_work_mem) + { + ExecChooseHashTableSize(ntuples, tupwidth, useskew, + false, parallel_workers, + space_allowed, + numbuckets, + numbatches, + num_skew_mcvs); + return; + } + /* * Estimate the number of buckets we'll want to have when work_mem is * entirely full. Each bucket will contain a bucket pointer plus @@ -564,14 +853,17 @@ ExecHashTableDestroy(HashJoinTable hashtable) /* * Make sure all the temp files are closed. We skip batch 0, since it * can't have any temp files (and the arrays might not even exist if - * nbatch is only 1). + * nbatch is only 1). Parallel hash joins don't use these files. */ - for (i = 1; i < hashtable->nbatch; i++) + if (hashtable->innerBatchFile != NULL) { - if (hashtable->innerBatchFile[i]) - BufFileClose(hashtable->innerBatchFile[i]); - if (hashtable->outerBatchFile[i]) - BufFileClose(hashtable->outerBatchFile[i]); + for (i = 1; i < hashtable->nbatch; i++) + { + if (hashtable->innerBatchFile[i]) + BufFileClose(hashtable->innerBatchFile[i]); + if (hashtable->outerBatchFile[i]) + BufFileClose(hashtable->outerBatchFile[i]); + } } /* Release working memory (batchCxt is a child, so it goes away too) */ @@ -657,8 +949,9 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) hashtable->nbuckets = hashtable->nbuckets_optimal; hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal; - hashtable->buckets = repalloc(hashtable->buckets, - sizeof(HashJoinTuple) * hashtable->nbuckets); + hashtable->buckets.unshared = + repalloc(hashtable->buckets.unshared, + sizeof(HashJoinTuple) * hashtable->nbuckets); } /* @@ -666,14 +959,15 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) * buckets now and not have to keep track which tuples in the buckets have * already been processed. We will free the old chunks as we go. */ - memset(hashtable->buckets, 0, sizeof(HashJoinTuple) * hashtable->nbuckets); + memset(hashtable->buckets.unshared, 0, + sizeof(HashJoinTuple) * hashtable->nbuckets); oldchunks = hashtable->chunks; hashtable->chunks = NULL; /* so, let's scan through the old chunks, and all tuples in each chunk */ while (oldchunks != NULL) { - HashMemoryChunk nextchunk = oldchunks->next; + HashMemoryChunk nextchunk = oldchunks->next.unshared; /* position within the buffer (up to oldchunks->used) */ size_t idx = 0; @@ -681,7 +975,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) /* process all tuples stored in this chunk (and then free it) */ while (idx < oldchunks->used) { - HashJoinTuple hashTuple = (HashJoinTuple) (oldchunks->data + idx); + HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx); MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple); int hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len); int bucketno; @@ -700,8 +994,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) memcpy(copyTuple, hashTuple, hashTupleSize); /* and add it back to the appropriate bucket */ - copyTuple->next = hashtable->buckets[bucketno]; - hashtable->buckets[bucketno] = copyTuple; + copyTuple->next.unshared = hashtable->buckets.unshared[bucketno]; + hashtable->buckets.unshared[bucketno] = copyTuple; } else { @@ -751,73 +1045,535 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) } /* - * ExecHashIncreaseNumBuckets - * increase the original number of buckets in order to reduce - * number of tuples per bucket + * ExecParallelHashIncreaseNumBatches + * Every participant attached to grow_barrier must run this function + * when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES. */ static void -ExecHashIncreaseNumBuckets(HashJoinTable hashtable) +ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable) { - HashMemoryChunk chunk; + ParallelHashJoinState *pstate = hashtable->parallel_state; + int i; - /* do nothing if not an increase (it's called increase for a reason) */ - if (hashtable->nbuckets >= hashtable->nbuckets_optimal) - return; + Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER); -#ifdef HJDEBUG - printf("Hashjoin %p: increasing nbuckets %d => %d\n", - hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal); -#endif + /* + * It's unlikely, but we need to be prepared for new participants to show + * up while we're in the middle of this operation so we need to switch on + * barrier phase here. + */ + switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier))) + { + case PHJ_GROW_BATCHES_ELECTING: - hashtable->nbuckets = hashtable->nbuckets_optimal; - hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal; + /* + * Elect one participant to prepare to grow the number of batches. + * This involves reallocating or resetting the buckets of batch 0 + * in preparation for all participants to begin repartitioning the + * tuples. + */ + if (BarrierArriveAndWait(&pstate->grow_batches_barrier, + WAIT_EVENT_HASH_GROW_BATCHES_ELECTING)) + { + dsa_pointer_atomic *buckets; + ParallelHashJoinBatch *old_batch0; + int new_nbatch; + int i; + + /* Move the old batch out of the way. */ + old_batch0 = hashtable->batches[0].shared; + pstate->old_batches = pstate->batches; + pstate->old_nbatch = hashtable->nbatch; + pstate->batches = InvalidDsaPointer; + + /* Free this backend's old accessors. */ + ExecParallelHashCloseBatchAccessors(hashtable); + + /* Figure out how many batches to use. */ + if (hashtable->nbatch == 1) + { + /* + * We are going from single-batch to multi-batch. We need + * to switch from one large combined memory budget to the + * regular work_mem budget. + */ + pstate->space_allowed = work_mem * 1024L; + + /* + * The combined work_mem of all participants wasn't + * enough. Therefore one batch per participant would be + * approximately equivalent and would probably also be + * insufficient. So try two batches per particiant, + * rounded up to a power of two. + */ + new_nbatch = 1 << my_log2(pstate->nparticipants * 2); + } + else + { + /* + * We were already multi-batched. Try doubling the number + * of batches. + */ + new_nbatch = hashtable->nbatch * 2; + } + + /* Allocate new larger generation of batches. */ + Assert(hashtable->nbatch == pstate->nbatch); + ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch); + Assert(hashtable->nbatch == pstate->nbatch); + + /* Replace or recycle batch 0's bucket array. */ + if (pstate->old_nbatch == 1) + { + double dtuples; + double dbuckets; + int new_nbuckets; + + /* + * We probably also need a smaller bucket array. How many + * tuples do we expect per batch, assuming we have only + * half of them so far? Normally we don't need to change + * the bucket array's size, because the size of each batch + * stays the same as we add more batches, but in this + * special case we move from a large batch to many smaller + * batches and it would be wasteful to keep the large + * array. + */ + dtuples = (old_batch0->ntuples * 2.0) / new_nbatch; + dbuckets = ceil(dtuples / NTUP_PER_BUCKET); + dbuckets = Min(dbuckets, + MaxAllocSize / sizeof(dsa_pointer_atomic)); + new_nbuckets = (int) dbuckets; + new_nbuckets = Max(new_nbuckets, 1024); + new_nbuckets = 1 << my_log2(new_nbuckets); + dsa_free(hashtable->area, old_batch0->buckets); + hashtable->batches[0].shared->buckets = + dsa_allocate(hashtable->area, + sizeof(dsa_pointer_atomic) * new_nbuckets); + buckets = (dsa_pointer_atomic *) + dsa_get_address(hashtable->area, + hashtable->batches[0].shared->buckets); + for (i = 0; i < new_nbuckets; ++i) + dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer); + pstate->nbuckets = new_nbuckets; + } + else + { + /* Recycle the existing bucket array. */ + hashtable->batches[0].shared->buckets = old_batch0->buckets; + buckets = (dsa_pointer_atomic *) + dsa_get_address(hashtable->area, old_batch0->buckets); + for (i = 0; i < hashtable->nbuckets; ++i) + dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer); + } + + /* Move all chunks to the work queue for parallel processing. */ + pstate->chunk_work_queue = old_batch0->chunks; + + /* Disable further growth temporarily while we're growing. */ + pstate->growth = PHJ_GROWTH_DISABLED; + } + else + { + /* All other participants just flush their tuples to disk. */ + ExecParallelHashCloseBatchAccessors(hashtable); + } + /* Fall through. */ + + case PHJ_GROW_BATCHES_ALLOCATING: + /* Wait for the above to be finished. */ + BarrierArriveAndWait(&pstate->grow_batches_barrier, + WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATING); + /* Fall through. */ + + case PHJ_GROW_BATCHES_REPARTITIONING: + /* Make sure that we have the current dimensions and buckets. */ + ExecParallelHashEnsureBatchAccessors(hashtable); + ExecParallelHashTableSetCurrentBatch(hashtable, 0); + /* Then partition, flush counters. */ + ExecParallelHashRepartitionFirst(hashtable); + ExecParallelHashRepartitionRest(hashtable); + ExecParallelHashMergeCounters(hashtable); + /* Wait for the above to be finished. */ + BarrierArriveAndWait(&pstate->grow_batches_barrier, + WAIT_EVENT_HASH_GROW_BATCHES_REPARTITIONING); + /* Fall through. */ + + case PHJ_GROW_BATCHES_DECIDING: - Assert(hashtable->nbuckets > 1); - Assert(hashtable->nbuckets <= (INT_MAX / 2)); - Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets)); + /* + * Elect one participant to clean up and decide whether further + * repartitioning is needed, or should be disabled because it's + * not helping. + */ + if (BarrierArriveAndWait(&pstate->grow_batches_barrier, + WAIT_EVENT_HASH_GROW_BATCHES_DECIDING)) + { + bool space_exhausted = false; + bool extreme_skew_detected = false; + + /* Make sure that we have the current dimensions and buckets. */ + ExecParallelHashEnsureBatchAccessors(hashtable); + ExecParallelHashTableSetCurrentBatch(hashtable, 0); + + /* Are any of the new generation of batches exhausted? */ + for (i = 0; i < hashtable->nbatch; ++i) + { + ParallelHashJoinBatch *batch = hashtable->batches[i].shared; + + if (batch->space_exhausted || + batch->estimated_size > pstate->space_allowed) + { + int parent; + + space_exhausted = true; + + /* + * Did this batch receive ALL of the tuples from its + * parent batch? That would indicate that further + * repartitioning isn't going to help (the hash values + * are probably all the same). + */ + parent = i % pstate->old_nbatch; + if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples) + extreme_skew_detected = true; + } + } + + /* Don't keep growing if it's not helping or we'd overflow. */ + if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2) + pstate->growth = PHJ_GROWTH_DISABLED; + else if (space_exhausted) + pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES; + else + pstate->growth = PHJ_GROWTH_OK; + + /* Free the old batches in shared memory. */ + dsa_free(hashtable->area, pstate->old_batches); + pstate->old_batches = InvalidDsaPointer; + } + /* Fall through. */ - /* - * Just reallocate the proper number of buckets - we don't need to walk - * through them - we can walk the dense-allocated chunks (just like in - * ExecHashIncreaseNumBatches, but without all the copying into new - * chunks) - */ - hashtable->buckets = - (HashJoinTuple *) repalloc(hashtable->buckets, - hashtable->nbuckets * sizeof(HashJoinTuple)); + case PHJ_GROW_BATCHES_FINISHING: + /* Wait for the above to complete. */ + BarrierArriveAndWait(&pstate->grow_batches_barrier, + WAIT_EVENT_HASH_GROW_BATCHES_FINISHING); + } +} - memset(hashtable->buckets, 0, hashtable->nbuckets * sizeof(HashJoinTuple)); +/* + * Repartition the tuples currently loaded into memory for inner batch 0 + * because the number of batches has been increased. Some tuples are retained + * in memory and some are written out to a later batch. + */ +static void +ExecParallelHashRepartitionFirst(HashJoinTable hashtable) +{ + dsa_pointer chunk_shared; + HashMemoryChunk chunk; - /* scan through all tuples in all chunks to rebuild the hash table */ - for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next) + Assert(hashtable->nbatch == hashtable->parallel_state->nbatch); + + while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared))) { - /* process all tuples stored in this chunk */ size_t idx = 0; + /* Repartition all tuples in this chunk. */ while (idx < chunk->used) { - HashJoinTuple hashTuple = (HashJoinTuple) (chunk->data + idx); + HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx); + MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple); + HashJoinTuple copyTuple; + dsa_pointer shared; int bucketno; int batchno; ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue, &bucketno, &batchno); - /* add the tuple to the proper bucket */ - hashTuple->next = hashtable->buckets[bucketno]; - hashtable->buckets[bucketno] = hashTuple; + Assert(batchno < hashtable->nbatch); + if (batchno == 0) + { + /* It still belongs in batch 0. Copy to a new chunk. */ + copyTuple = + ExecParallelHashTupleAlloc(hashtable, + HJTUPLE_OVERHEAD + tuple->t_len, + &shared); + copyTuple->hashvalue = hashTuple->hashvalue; + memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len); + ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno], + copyTuple, shared); + } + else + { + size_t tuple_size = + MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len); + + /* It belongs in a later batch. */ + hashtable->batches[batchno].estimated_size += tuple_size; + sts_puttuple(hashtable->batches[batchno].inner_tuples, + &hashTuple->hashvalue, tuple); + } + + /* Count this tuple. */ + ++hashtable->batches[0].old_ntuples; + ++hashtable->batches[batchno].ntuples; - /* advance index past the tuple */ idx += MAXALIGN(HJTUPLE_OVERHEAD + HJTUPLE_MINTUPLE(hashTuple)->t_len); } - /* allow this loop to be cancellable */ + /* Free this chunk. */ + dsa_free(hashtable->area, chunk_shared); + CHECK_FOR_INTERRUPTS(); } } - +/* + * Help repartition inner batches 1..n. + */ +static void +ExecParallelHashRepartitionRest(HashJoinTable hashtable) +{ + ParallelHashJoinState *pstate = hashtable->parallel_state; + int old_nbatch = pstate->old_nbatch; + SharedTuplestoreAccessor **old_inner_tuples; + ParallelHashJoinBatch *old_batches; + int i; + + /* Get our hands on the previous generation of batches. */ + old_batches = (ParallelHashJoinBatch *) + dsa_get_address(hashtable->area, pstate->old_batches); + old_inner_tuples = palloc0(sizeof(SharedTuplestoreAccessor *) * old_nbatch); + for (i = 1; i < old_nbatch; ++i) + { + ParallelHashJoinBatch *shared = + NthParallelHashJoinBatch(old_batches, i); + + old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared), + ParallelWorkerNumber + 1, + &pstate->fileset); + } + + /* Join in the effort to repartition them. */ + for (i = 1; i < old_nbatch; ++i) + { + MinimalTuple tuple; + uint32 hashvalue; + + /* Scan one partition from the previous generation. */ + sts_begin_parallel_scan(old_inner_tuples[i]); + while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue))) + { + size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len); + int bucketno; + int batchno; + + /* Decide which partition it goes to in the new generation. */ + ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, + &batchno); + + hashtable->batches[batchno].estimated_size += tuple_size; + ++hashtable->batches[batchno].ntuples; + ++hashtable->batches[i].old_ntuples; + + /* Store the tuple its new batch. */ + sts_puttuple(hashtable->batches[batchno].inner_tuples, + &hashvalue, tuple); + + CHECK_FOR_INTERRUPTS(); + } + sts_end_parallel_scan(old_inner_tuples[i]); + } + + pfree(old_inner_tuples); +} + +/* + * Transfer the backend-local per-batch counters to the shared totals. + */ +static void +ExecParallelHashMergeCounters(HashJoinTable hashtable) +{ + ParallelHashJoinState *pstate = hashtable->parallel_state; + int i; + + LWLockAcquire(&pstate->lock, LW_EXCLUSIVE); + pstate->total_tuples = 0; + for (i = 0; i < hashtable->nbatch; ++i) + { + ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i]; + + batch->shared->size += batch->size; + batch->shared->estimated_size += batch->estimated_size; + batch->shared->ntuples += batch->ntuples; + batch->shared->old_ntuples += batch->old_ntuples; + batch->size = 0; + batch->estimated_size = 0; + batch->ntuples = 0; + batch->old_ntuples = 0; + pstate->total_tuples += batch->shared->ntuples; + } + LWLockRelease(&pstate->lock); +} + +/* + * ExecHashIncreaseNumBuckets + * increase the original number of buckets in order to reduce + * number of tuples per bucket + */ +static void +ExecHashIncreaseNumBuckets(HashJoinTable hashtable) +{ + HashMemoryChunk chunk; + + /* do nothing if not an increase (it's called increase for a reason) */ + if (hashtable->nbuckets >= hashtable->nbuckets_optimal) + return; + +#ifdef HJDEBUG + printf("Hashjoin %p: increasing nbuckets %d => %d\n", + hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal); +#endif + + hashtable->nbuckets = hashtable->nbuckets_optimal; + hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal; + + Assert(hashtable->nbuckets > 1); + Assert(hashtable->nbuckets <= (INT_MAX / 2)); + Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets)); + + /* + * Just reallocate the proper number of buckets - we don't need to walk + * through them - we can walk the dense-allocated chunks (just like in + * ExecHashIncreaseNumBatches, but without all the copying into new + * chunks) + */ + hashtable->buckets.unshared = + (HashJoinTuple *) repalloc(hashtable->buckets.unshared, + hashtable->nbuckets * sizeof(HashJoinTuple)); + + memset(hashtable->buckets.unshared, 0, + hashtable->nbuckets * sizeof(HashJoinTuple)); + + /* scan through all tuples in all chunks to rebuild the hash table */ + for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared) + { + /* process all tuples stored in this chunk */ + size_t idx = 0; + + while (idx < chunk->used) + { + HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx); + int bucketno; + int batchno; + + ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue, + &bucketno, &batchno); + + /* add the tuple to the proper bucket */ + hashTuple->next.unshared = hashtable->buckets.unshared[bucketno]; + hashtable->buckets.unshared[bucketno] = hashTuple; + + /* advance index past the tuple */ + idx += MAXALIGN(HJTUPLE_OVERHEAD + + HJTUPLE_MINTUPLE(hashTuple)->t_len); + } + + /* allow this loop to be cancellable */ + CHECK_FOR_INTERRUPTS(); + } +} + +static void +ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable) +{ + ParallelHashJoinState *pstate = hashtable->parallel_state; + int i; + HashMemoryChunk chunk; + dsa_pointer chunk_s; + + Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER); + + /* + * It's unlikely, but we need to be prepared for new participants to show + * up while we're in the middle of this operation so we need to switch on + * barrier phase here. + */ + switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier))) + { + case PHJ_GROW_BUCKETS_ELECTING: + /* Elect one participant to prepare to increase nbuckets. */ + if (BarrierArriveAndWait(&pstate->grow_buckets_barrier, + WAIT_EVENT_HASH_GROW_BUCKETS_ELECTING)) + { + size_t size; + dsa_pointer_atomic *buckets; + + /* Double the size of the bucket array. */ + pstate->nbuckets *= 2; + size = pstate->nbuckets * sizeof(dsa_pointer_atomic); + hashtable->batches[0].shared->size += size / 2; + dsa_free(hashtable->area, hashtable->batches[0].shared->buckets); + hashtable->batches[0].shared->buckets = + dsa_allocate(hashtable->area, size); + buckets = (dsa_pointer_atomic *) + dsa_get_address(hashtable->area, + hashtable->batches[0].shared->buckets); + for (i = 0; i < pstate->nbuckets; ++i) + dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer); + + /* Put the chunk list onto the work queue. */ + pstate->chunk_work_queue = hashtable->batches[0].shared->chunks; + + /* Clear the flag. */ + pstate->growth = PHJ_GROWTH_OK; + } + /* Fall through. */ + + case PHJ_GROW_BUCKETS_ALLOCATING: + /* Wait for the above to complete. */ + BarrierArriveAndWait(&pstate->grow_buckets_barrier, + WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATING); + /* Fall through. */ + + case PHJ_GROW_BUCKETS_REINSERTING: + /* Reinsert all tuples into the hash table. */ + ExecParallelHashEnsureBatchAccessors(hashtable); + ExecParallelHashTableSetCurrentBatch(hashtable, 0); + while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s))) + { + size_t idx = 0; + + while (idx < chunk->used) + { + HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx); + dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx; + int bucketno; + int batchno; + + ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue, + &bucketno, &batchno); + Assert(batchno == 0); + + /* add the tuple to the proper bucket */ + ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno], + hashTuple, shared); + + /* advance index past the tuple */ + idx += MAXALIGN(HJTUPLE_OVERHEAD + + HJTUPLE_MINTUPLE(hashTuple)->t_len); + } + + /* allow this loop to be cancellable */ + CHECK_FOR_INTERRUPTS(); + } + BarrierArriveAndWait(&pstate->grow_buckets_barrier, + WAIT_EVENT_HASH_GROW_BUCKETS_REINSERTING); + } +} + /* * ExecHashTableInsert * insert a tuple into the hash table depending on the hash value @@ -869,8 +1625,8 @@ ExecHashTableInsert(HashJoinTable hashtable, HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple)); /* Push it onto the front of the bucket's list */ - hashTuple->next = hashtable->buckets[bucketno]; - hashtable->buckets[bucketno] = hashTuple; + hashTuple->next.unshared = hashtable->buckets.unshared[bucketno]; + hashtable->buckets.unshared[bucketno] = hashTuple; /* * Increase the (optimal) number of buckets if we just exceeded the @@ -910,6 +1666,94 @@ ExecHashTableInsert(HashJoinTable hashtable, } } +/* + * ExecHashTableParallelInsert + * insert a tuple into a shared hash table or shared batch tuplestore + */ +void +ExecParallelHashTableInsert(HashJoinTable hashtable, + TupleTableSlot *slot, + uint32 hashvalue) +{ + MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot); + dsa_pointer shared; + int bucketno; + int batchno; + +retry: + ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno); + + if (batchno == 0) + { + HashJoinTuple hashTuple; + + /* Try to load it into memory. */ + Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) == + PHJ_BUILD_HASHING_INNER); + hashTuple = ExecParallelHashTupleAlloc(hashtable, + HJTUPLE_OVERHEAD + tuple->t_len, + &shared); + if (hashTuple == NULL) + goto retry; + + /* Store the hash value in the HashJoinTuple header. */ + hashTuple->hashvalue = hashvalue; + memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len); + + /* Push it onto the front of the bucket's list */ + ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno], + hashTuple, shared); + } + else + { + size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len); + + Assert(batchno > 0); + + /* Try to preallocate space in the batch if necessary. */ + if (hashtable->batches[batchno].preallocated < tuple_size) + { + if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size)) + goto retry; + } + + Assert(hashtable->batches[batchno].preallocated >= tuple_size); + hashtable->batches[batchno].preallocated -= tuple_size; + sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue, + tuple); + } + ++hashtable->batches[batchno].ntuples; +} + +/* + * Insert a tuple into the current hash table. Unlike + * ExecParallelHashTableInsert, this version is not prepared to send the tuple + * to other batches or to run out of memory, and should only be called with + * tuples that belong in the current batch once growth has been disabled. + */ +void +ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable, + TupleTableSlot *slot, + uint32 hashvalue) +{ + MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot); + HashJoinTuple hashTuple; + dsa_pointer shared; + int batchno; + int bucketno; + + ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno); + Assert(batchno == hashtable->curbatch); + hashTuple = ExecParallelHashTupleAlloc(hashtable, + HJTUPLE_OVERHEAD + tuple->t_len, + &shared); + hashTuple->hashvalue = hashvalue; + memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len); + HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple)); + ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno], + hashTuple, shared); +} + /* * ExecHashGetHashValue * Compute the hash value for a tuple @@ -918,10 +1762,10 @@ ExecHashTableInsert(HashJoinTable hashtable, * econtext->ecxt_innertuple. Vars in the hashkeys expressions should have * varno either OUTER_VAR or INNER_VAR. * - * A TRUE result means the tuple's hash value has been successfully computed - * and stored at *hashvalue. A FALSE result means the tuple cannot match + * A true result means the tuple's hash value has been successfully computed + * and stored at *hashvalue. A false result means the tuple cannot match * because it contains a null attribute, and hence it should be discarded - * immediately. (If keep_nulls is true then FALSE is never returned.) + * immediately. (If keep_nulls is true then false is never returned.) */ bool ExecHashGetHashValue(HashJoinTable hashtable, @@ -1076,11 +1920,11 @@ ExecScanHashBucket(HashJoinState *hjstate, * otherwise scan the standard hashtable bucket. */ if (hashTuple != NULL) - hashTuple = hashTuple->next; + hashTuple = hashTuple->next.unshared; else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO) hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples; else - hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo]; + hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo]; while (hashTuple != NULL) { @@ -1094,17 +1938,71 @@ ExecScanHashBucket(HashJoinState *hjstate, false); /* do not pfree */ econtext->ecxt_innertuple = inntuple; - /* reset temp memory each time to avoid leaks from qual expr */ - ResetExprContext(econtext); + if (ExecQualAndReset(hjclauses, econtext)) + { + hjstate->hj_CurTuple = hashTuple; + return true; + } + } + + hashTuple = hashTuple->next.unshared; + } + + /* + * no match + */ + return false; +} + +/* + * ExecParallelScanHashBucket + * scan a hash bucket for matches to the current outer tuple + * + * The current outer tuple must be stored in econtext->ecxt_outertuple. + * + * On success, the inner tuple is stored into hjstate->hj_CurTuple and + * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot + * for the latter. + */ +bool +ExecParallelScanHashBucket(HashJoinState *hjstate, + ExprContext *econtext) +{ + ExprState *hjclauses = hjstate->hashclauses; + HashJoinTable hashtable = hjstate->hj_HashTable; + HashJoinTuple hashTuple = hjstate->hj_CurTuple; + uint32 hashvalue = hjstate->hj_CurHashValue; + + /* + * hj_CurTuple is the address of the tuple last returned from the current + * bucket, or NULL if it's time to start scanning a new bucket. + */ + if (hashTuple != NULL) + hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple); + else + hashTuple = ExecParallelHashFirstTuple(hashtable, + hjstate->hj_CurBucketNo); + + while (hashTuple != NULL) + { + if (hashTuple->hashvalue == hashvalue) + { + TupleTableSlot *inntuple; - if (ExecQual(hjclauses, econtext)) + /* insert hashtable's tuple into exec slot so ExecQual sees it */ + inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple), + hjstate->hj_HashTupleSlot, + false); /* do not pfree */ + econtext->ecxt_innertuple = inntuple; + + if (ExecQualAndReset(hjclauses, econtext)) { hjstate->hj_CurTuple = hashTuple; return true; } } - hashTuple = hashTuple->next; + hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple); } /* @@ -1155,10 +2053,10 @@ ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext) * bucket. */ if (hashTuple != NULL) - hashTuple = hashTuple->next; + hashTuple = hashTuple->next.unshared; else if (hjstate->hj_CurBucketNo < hashtable->nbuckets) { - hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo]; + hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo]; hjstate->hj_CurBucketNo++; } else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets) @@ -1194,7 +2092,7 @@ ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext) return true; } - hashTuple = hashTuple->next; + hashTuple = hashTuple->next.unshared; } /* allow this loop to be cancellable */ @@ -1226,7 +2124,7 @@ ExecHashTableReset(HashJoinTable hashtable) oldcxt = MemoryContextSwitchTo(hashtable->batchCxt); /* Reallocate and reinitialize the hash bucket headers. */ - hashtable->buckets = (HashJoinTuple *) + hashtable->buckets.unshared = (HashJoinTuple *) palloc0(nbuckets * sizeof(HashJoinTuple)); hashtable->spaceUsed = 0; @@ -1250,7 +2148,8 @@ ExecHashTableResetMatchFlags(HashJoinTable hashtable) /* Reset all flags in the main table ... */ for (i = 0; i < hashtable->nbuckets; i++) { - for (tuple = hashtable->buckets[i]; tuple != NULL; tuple = tuple->next) + for (tuple = hashtable->buckets.unshared[i]; tuple != NULL; + tuple = tuple->next.unshared) HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple)); } @@ -1260,7 +2159,7 @@ ExecHashTableResetMatchFlags(HashJoinTable hashtable) int j = hashtable->skewBucketNums[i]; HashSkewBucket *skewBucket = hashtable->skewBucket[j]; - for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next) + for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared) HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple)); } } @@ -1505,8 +2404,9 @@ ExecHashSkewTableInsert(HashJoinTable hashtable, HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple)); /* Push it onto the front of the skew bucket's list */ - hashTuple->next = hashtable->skewBucket[bucketNumber]->tuples; + hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples; hashtable->skewBucket[bucketNumber]->tuples = hashTuple; + Assert(hashTuple != hashTuple->next.unshared); /* Account for space used, and back off if we've used too much */ hashtable->spaceUsed += hashTupleSize; @@ -1554,7 +2454,7 @@ ExecHashRemoveNextSkewBucket(HashJoinTable hashtable) hashTuple = bucket->tuples; while (hashTuple != NULL) { - HashJoinTuple nextHashTuple = hashTuple->next; + HashJoinTuple nextHashTuple = hashTuple->next.unshared; MinimalTuple tuple; Size tupleSize; @@ -1580,8 +2480,8 @@ ExecHashRemoveNextSkewBucket(HashJoinTable hashtable) memcpy(copyTuple, hashTuple, tupleSize); pfree(hashTuple); - copyTuple->next = hashtable->buckets[bucketno]; - hashtable->buckets[bucketno] = copyTuple; + copyTuple->next.unshared = hashtable->buckets.unshared[bucketno]; + hashtable->buckets.unshared[bucketno] = copyTuple; /* We have reduced skew space, but overall space doesn't change */ hashtable->spaceUsedSkew -= tupleSize; @@ -1637,6 +2537,112 @@ ExecHashRemoveNextSkewBucket(HashJoinTable hashtable) } } +/* + * Reserve space in the DSM segment for instrumentation data. + */ +void +ExecHashEstimate(HashState *node, ParallelContext *pcxt) +{ + size_t size; + + /* don't need this if not instrumenting or no workers */ + if (!node->ps.instrument || pcxt->nworkers == 0) + return; + + size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation)); + size = add_size(size, offsetof(SharedHashInfo, hinstrument)); + shm_toc_estimate_chunk(&pcxt->estimator, size); + shm_toc_estimate_keys(&pcxt->estimator, 1); +} + +/* + * Set up a space in the DSM for all workers to record instrumentation data + * about their hash table. + */ +void +ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt) +{ + size_t size; + + /* don't need this if not instrumenting or no workers */ + if (!node->ps.instrument || pcxt->nworkers == 0) + return; + + size = offsetof(SharedHashInfo, hinstrument) + + pcxt->nworkers * sizeof(HashInstrumentation); + node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size); + memset(node->shared_info, 0, size); + node->shared_info->num_workers = pcxt->nworkers; + shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id, + node->shared_info); +} + +/* + * Locate the DSM space for hash table instrumentation data that we'll write + * to at shutdown time. + */ +void +ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt) +{ + SharedHashInfo *shared_info; + + /* don't need this if not instrumenting */ + if (!node->ps.instrument) + return; + + shared_info = (SharedHashInfo *) + shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false); + node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber]; +} + +/* + * Copy instrumentation data from this worker's hash table (if it built one) + * to DSM memory so the leader can retrieve it. This must be done in an + * ExecShutdownHash() rather than ExecEndHash() because the latter runs after + * we've detached from the DSM segment. + */ +void +ExecShutdownHash(HashState *node) +{ + if (node->hinstrument && node->hashtable) + ExecHashGetInstrumentation(node->hinstrument, node->hashtable); +} + +/* + * Retrieve instrumentation data from workers before the DSM segment is + * detached, so that EXPLAIN can access it. + */ +void +ExecHashRetrieveInstrumentation(HashState *node) +{ + SharedHashInfo *shared_info = node->shared_info; + size_t size; + + if (shared_info == NULL) + return; + + /* Replace node->shared_info with a copy in backend-local memory. */ + size = offsetof(SharedHashInfo, hinstrument) + + shared_info->num_workers * sizeof(HashInstrumentation); + node->shared_info = palloc(size); + memcpy(node->shared_info, shared_info, size); +} + +/* + * Copy the instrumentation data from 'hashtable' into a HashInstrumentation + * struct. + */ +void +ExecHashGetInstrumentation(HashInstrumentation *instrument, + HashJoinTable hashtable) +{ + instrument->nbuckets = hashtable->nbuckets; + instrument->nbuckets_original = hashtable->nbuckets_original; + instrument->nbatch = hashtable->nbatch; + instrument->nbatch_original = hashtable->nbatch_original; + instrument->space_peak = hashtable->spacePeak; +} + /* * Allocate 'size' bytes from the currently active HashMemoryChunk */ @@ -1650,17 +2656,16 @@ dense_alloc(HashJoinTable hashtable, Size size) size = MAXALIGN(size); /* - * If tuple size is larger than of 1/4 of chunk size, allocate a separate - * chunk. + * If tuple size is larger than threshold, allocate a separate chunk. */ if (size > HASH_CHUNK_THRESHOLD) { /* allocate new chunk and put it at the beginning of the list */ newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt, - offsetof(HashMemoryChunkData, data) + size); + HASH_CHUNK_HEADER_SIZE + size); newChunk->maxlen = size; - newChunk->used = 0; - newChunk->ntuples = 0; + newChunk->used = size; + newChunk->ntuples = 1; /* * Add this chunk to the list after the first existing chunk, so that @@ -1669,18 +2674,15 @@ dense_alloc(HashJoinTable hashtable, Size size) if (hashtable->chunks != NULL) { newChunk->next = hashtable->chunks->next; - hashtable->chunks->next = newChunk; + hashtable->chunks->next.unshared = newChunk; } else { - newChunk->next = hashtable->chunks; + newChunk->next.unshared = hashtable->chunks; hashtable->chunks = newChunk; } - newChunk->used += size; - newChunk->ntuples += 1; - - return newChunk->data; + return HASH_CHUNK_DATA(newChunk); } /* @@ -1692,23 +2694,619 @@ dense_alloc(HashJoinTable hashtable, Size size) { /* allocate new chunk and put it at the beginning of the list */ newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt, - offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE); + HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE); newChunk->maxlen = HASH_CHUNK_SIZE; newChunk->used = size; newChunk->ntuples = 1; - newChunk->next = hashtable->chunks; + newChunk->next.unshared = hashtable->chunks; hashtable->chunks = newChunk; - return newChunk->data; + return HASH_CHUNK_DATA(newChunk); } /* There is enough space in the current chunk, let's add the tuple */ - ptr = hashtable->chunks->data + hashtable->chunks->used; + ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used; hashtable->chunks->used += size; hashtable->chunks->ntuples += 1; /* return pointer to the start of the tuple memory */ return ptr; } + +/* + * Allocate space for a tuple in shared dense storage. This is equivalent to + * dense_alloc but for Parallel Hash using shared memory. + * + * While loading a tuple into shared memory, we might run out of memory and + * decide to repartition, or determine that the load factor is too high and + * decide to expand the bucket array, or discover that another participant has + * commanded us to help do that. Return NULL if number of buckets or batches + * has changed, indicating that the caller must retry (considering the + * possibility that the tuple no longer belongs in the same batch). + */ +static HashJoinTuple +ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size, + dsa_pointer *shared) +{ + ParallelHashJoinState *pstate = hashtable->parallel_state; + dsa_pointer chunk_shared; + HashMemoryChunk chunk; + Size chunk_size; + HashJoinTuple result; + int curbatch = hashtable->curbatch; + + size = MAXALIGN(size); + + /* + * Fast path: if there is enough space in this backend's current chunk, + * then we can allocate without any locking. + */ + chunk = hashtable->current_chunk; + if (chunk != NULL && + size <= HASH_CHUNK_THRESHOLD && + chunk->maxlen - chunk->used >= size) + { + + chunk_shared = hashtable->current_chunk_shared; + Assert(chunk == dsa_get_address(hashtable->area, chunk_shared)); + *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used; + result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used); + chunk->used += size; + + Assert(chunk->used <= chunk->maxlen); + Assert(result == dsa_get_address(hashtable->area, *shared)); + + return result; + } + + /* Slow path: try to allocate a new chunk. */ + LWLockAcquire(&pstate->lock, LW_EXCLUSIVE); + + /* + * Check if we need to help increase the number of buckets or batches. + */ + if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES || + pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS) + { + ParallelHashGrowth growth = pstate->growth; + + hashtable->current_chunk = NULL; + LWLockRelease(&pstate->lock); + + /* Another participant has commanded us to help grow. */ + if (growth == PHJ_GROWTH_NEED_MORE_BATCHES) + ExecParallelHashIncreaseNumBatches(hashtable); + else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS) + ExecParallelHashIncreaseNumBuckets(hashtable); + + /* The caller must retry. */ + return NULL; + } + + /* Oversized tuples get their own chunk. */ + if (size > HASH_CHUNK_THRESHOLD) + chunk_size = size + HASH_CHUNK_HEADER_SIZE; + else + chunk_size = HASH_CHUNK_SIZE; + + /* Check if it's time to grow batches or buckets. */ + if (pstate->growth != PHJ_GROWTH_DISABLED) + { + Assert(curbatch == 0); + Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER); + + /* + * Check if our space limit would be exceeded. To avoid choking on + * very large tuples or very low work_mem setting, we'll always allow + * each backend to allocate at least one chunk. + */ + if (hashtable->batches[0].at_least_one_chunk && + hashtable->batches[0].shared->size + + chunk_size > pstate->space_allowed) + { + pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES; + hashtable->batches[0].shared->space_exhausted = true; + LWLockRelease(&pstate->lock); + + return NULL; + } + + /* Check if our load factor limit would be exceeded. */ + if (hashtable->nbatch == 1) + { + hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples; + hashtable->batches[0].ntuples = 0; + /* Guard against integer overflow and alloc size overflow */ + if (hashtable->batches[0].shared->ntuples + 1 > + hashtable->nbuckets * NTUP_PER_BUCKET && + hashtable->nbuckets < (INT_MAX / 2) && + hashtable->nbuckets * 2 <= + MaxAllocSize / sizeof(dsa_pointer_atomic)) + { + pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS; + LWLockRelease(&pstate->lock); + + return NULL; + } + } + } + + /* We are cleared to allocate a new chunk. */ + chunk_shared = dsa_allocate(hashtable->area, chunk_size); + hashtable->batches[curbatch].shared->size += chunk_size; + hashtable->batches[curbatch].at_least_one_chunk = true; + + /* Set up the chunk. */ + chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared); + *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE; + chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE; + chunk->used = size; + + /* + * Push it onto the list of chunks, so that it can be found if we need to + * increase the number of buckets or batches (batch 0 only) and later for + * freeing the memory (all batches). + */ + chunk->next.shared = hashtable->batches[curbatch].shared->chunks; + hashtable->batches[curbatch].shared->chunks = chunk_shared; + + if (size <= HASH_CHUNK_THRESHOLD) + { + /* + * Make this the current chunk so that we can use the fast path to + * fill the rest of it up in future calls. + */ + hashtable->current_chunk = chunk; + hashtable->current_chunk_shared = chunk_shared; + } + LWLockRelease(&pstate->lock); + + Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared)); + result = (HashJoinTuple) HASH_CHUNK_DATA(chunk); + + return result; +} + +/* + * One backend needs to set up the shared batch state including tuplestores. + * Other backends will ensure they have correctly configured accessors by + * called ExecParallelHashEnsureBatchAccessors(). + */ +static void +ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch) +{ + ParallelHashJoinState *pstate = hashtable->parallel_state; + ParallelHashJoinBatch *batches; + MemoryContext oldcxt; + int i; + + Assert(hashtable->batches == NULL); + + /* Allocate space. */ + pstate->batches = + dsa_allocate0(hashtable->area, + EstimateParallelHashJoinBatch(hashtable) * nbatch); + pstate->nbatch = nbatch; + batches = dsa_get_address(hashtable->area, pstate->batches); + + /* Use hash join memory context. */ + oldcxt = MemoryContextSwitchTo(hashtable->hashCxt); + + /* Allocate this backend's accessor array. */ + hashtable->nbatch = nbatch; + hashtable->batches = (ParallelHashJoinBatchAccessor *) + palloc0(sizeof(ParallelHashJoinBatchAccessor) * hashtable->nbatch); + + /* Set up the shared state, tuplestores and backend-local accessors. */ + for (i = 0; i < hashtable->nbatch; ++i) + { + ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i]; + ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i); + char name[MAXPGPATH]; + + /* + * All members of shared were zero-initialized. We just need to set + * up the Barrier. + */ + BarrierInit(&shared->batch_barrier, 0); + if (i == 0) + { + /* Batch 0 doesn't need to be loaded. */ + BarrierAttach(&shared->batch_barrier); + while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBING) + BarrierArriveAndWait(&shared->batch_barrier, 0); + BarrierDetach(&shared->batch_barrier); + } + + /* Initialize accessor state. All members were zero-initialized. */ + accessor->shared = shared; + + /* Initialize the shared tuplestores. */ + snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch); + accessor->inner_tuples = + sts_initialize(ParallelHashJoinBatchInner(shared), + pstate->nparticipants, + ParallelWorkerNumber + 1, + sizeof(uint32), + SHARED_TUPLESTORE_SINGLE_PASS, + &pstate->fileset, + name); + snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch); + accessor->outer_tuples = + sts_initialize(ParallelHashJoinBatchOuter(shared, + pstate->nparticipants), + pstate->nparticipants, + ParallelWorkerNumber + 1, + sizeof(uint32), + SHARED_TUPLESTORE_SINGLE_PASS, + &pstate->fileset, + name); + } + + MemoryContextSwitchTo(oldcxt); +} + +/* + * Free the current set of ParallelHashJoinBatchAccessor objects. + */ +static void +ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable) +{ + int i; + + for (i = 0; i < hashtable->nbatch; ++i) + { + /* Make sure no files are left open. */ + sts_end_write(hashtable->batches[i].inner_tuples); + sts_end_write(hashtable->batches[i].outer_tuples); + sts_end_parallel_scan(hashtable->batches[i].inner_tuples); + sts_end_parallel_scan(hashtable->batches[i].outer_tuples); + } + pfree(hashtable->batches); + hashtable->batches = NULL; +} + +/* + * Make sure this backend has up-to-date accessors for the current set of + * batches. + */ +static void +ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable) +{ + ParallelHashJoinState *pstate = hashtable->parallel_state; + ParallelHashJoinBatch *batches; + MemoryContext oldcxt; + int i; + + if (hashtable->batches != NULL) + { + if (hashtable->nbatch == pstate->nbatch) + return; + ExecParallelHashCloseBatchAccessors(hashtable); + } + + /* + * It's possible for a backend to start up very late so that the whole + * join is finished and the shm state for tracking batches has already + * been freed by ExecHashTableDetach(). In that case we'll just leave + * hashtable->batches as NULL so that ExecParallelHashJoinNewBatch() gives + * up early. + */ + if (!DsaPointerIsValid(pstate->batches)) + return; + + /* Use hash join memory context. */ + oldcxt = MemoryContextSwitchTo(hashtable->hashCxt); + + /* Allocate this backend's accessor array. */ + hashtable->nbatch = pstate->nbatch; + hashtable->batches = (ParallelHashJoinBatchAccessor *) + palloc0(sizeof(ParallelHashJoinBatchAccessor) * hashtable->nbatch); + + /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */ + batches = (ParallelHashJoinBatch *) + dsa_get_address(hashtable->area, pstate->batches); + + /* Set up the accessor array and attach to the tuplestores. */ + for (i = 0; i < hashtable->nbatch; ++i) + { + ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i]; + ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i); + + accessor->shared = shared; + accessor->preallocated = 0; + accessor->done = false; + accessor->inner_tuples = + sts_attach(ParallelHashJoinBatchInner(shared), + ParallelWorkerNumber + 1, + &pstate->fileset); + accessor->outer_tuples = + sts_attach(ParallelHashJoinBatchOuter(shared, + pstate->nparticipants), + ParallelWorkerNumber + 1, + &pstate->fileset); + } + + MemoryContextSwitchTo(oldcxt); +} + +/* + * Allocate an empty shared memory hash table for a given batch. + */ +void +ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno) +{ + ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared; + dsa_pointer_atomic *buckets; + int nbuckets = hashtable->parallel_state->nbuckets; + int i; + + batch->buckets = + dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets); + buckets = (dsa_pointer_atomic *) + dsa_get_address(hashtable->area, batch->buckets); + for (i = 0; i < nbuckets; ++i) + dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer); +} + +/* + * If we are currently attached to a shared hash join batch, detach. If we + * are last to detach, clean up. + */ +void +ExecHashTableDetachBatch(HashJoinTable hashtable) +{ + if (hashtable->parallel_state != NULL && + hashtable->curbatch >= 0) + { + int curbatch = hashtable->curbatch; + ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared; + + /* Make sure any temporary files are closed. */ + sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples); + sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples); + + /* Detach from the batch we were last working on. */ + if (BarrierArriveAndDetach(&batch->batch_barrier)) + { + /* + * Technically we shouldn't access the barrier because we're no + * longer attached, but since there is no way it's moving after + * this point it seems safe to make the following assertion. + */ + Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_DONE); + + /* Free shared chunks and buckets. */ + while (DsaPointerIsValid(batch->chunks)) + { + HashMemoryChunk chunk = + dsa_get_address(hashtable->area, batch->chunks); + dsa_pointer next = chunk->next.shared; + + dsa_free(hashtable->area, batch->chunks); + batch->chunks = next; + } + if (DsaPointerIsValid(batch->buckets)) + { + dsa_free(hashtable->area, batch->buckets); + batch->buckets = InvalidDsaPointer; + } + } + + /* + * Track the largest batch we've been attached to. Though each + * backend might see a different subset of batches, explain.c will + * scan the results from all backends to find the largest value. + */ + hashtable->spacePeak = + Max(hashtable->spacePeak, + batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets); + + /* Remember that we are not attached to a batch. */ + hashtable->curbatch = -1; + } +} + +/* + * Detach from all shared resources. If we are last to detach, clean up. + */ +void +ExecHashTableDetach(HashJoinTable hashtable) +{ + if (hashtable->parallel_state) + { + ParallelHashJoinState *pstate = hashtable->parallel_state; + int i; + + /* Make sure any temporary files are closed. */ + if (hashtable->batches) + { + for (i = 0; i < hashtable->nbatch; ++i) + { + sts_end_write(hashtable->batches[i].inner_tuples); + sts_end_write(hashtable->batches[i].outer_tuples); + sts_end_parallel_scan(hashtable->batches[i].inner_tuples); + sts_end_parallel_scan(hashtable->batches[i].outer_tuples); + } + } + + /* If we're last to detach, clean up shared memory. */ + if (BarrierDetach(&pstate->build_barrier)) + { + if (DsaPointerIsValid(pstate->batches)) + { + dsa_free(hashtable->area, pstate->batches); + pstate->batches = InvalidDsaPointer; + } + } + + hashtable->parallel_state = NULL; + } +} + +/* + * Get the first tuple in a given bucket identified by number. + */ +static inline HashJoinTuple +ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno) +{ + HashJoinTuple tuple; + dsa_pointer p; + + Assert(hashtable->parallel_state); + p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]); + tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p); + + return tuple; +} + +/* + * Get the next tuple in the same bucket as 'tuple'. + */ +static inline HashJoinTuple +ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple) +{ + HashJoinTuple next; + + Assert(hashtable->parallel_state); + next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared); + + return next; +} + +/* + * Insert a tuple at the front of a chain of tuples in DSA memory atomically. + */ +static inline void +ExecParallelHashPushTuple(dsa_pointer_atomic *head, + HashJoinTuple tuple, + dsa_pointer tuple_shared) +{ + for (;;) + { + tuple->next.shared = dsa_pointer_atomic_read(head); + if (dsa_pointer_atomic_compare_exchange(head, + &tuple->next.shared, + tuple_shared)) + break; + } +} + +/* + * Prepare to work on a given batch. + */ +void +ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno) +{ + Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer); + + hashtable->curbatch = batchno; + hashtable->buckets.shared = (dsa_pointer_atomic *) + dsa_get_address(hashtable->area, + hashtable->batches[batchno].shared->buckets); + hashtable->nbuckets = hashtable->parallel_state->nbuckets; + hashtable->log2_nbuckets = my_log2(hashtable->nbuckets); + hashtable->current_chunk = NULL; + hashtable->current_chunk_shared = InvalidDsaPointer; + hashtable->batches[batchno].at_least_one_chunk = false; +} + +/* + * Take the next available chunk from the queue of chunks being worked on in + * parallel. Return NULL if there are none left. Otherwise return a pointer + * to the chunk, and set *shared to the DSA pointer to the chunk. + */ +static HashMemoryChunk +ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared) +{ + ParallelHashJoinState *pstate = hashtable->parallel_state; + HashMemoryChunk chunk; + + LWLockAcquire(&pstate->lock, LW_EXCLUSIVE); + if (DsaPointerIsValid(pstate->chunk_work_queue)) + { + *shared = pstate->chunk_work_queue; + chunk = (HashMemoryChunk) + dsa_get_address(hashtable->area, *shared); + pstate->chunk_work_queue = chunk->next.shared; + } + else + chunk = NULL; + LWLockRelease(&pstate->lock); + + return chunk; +} + +/* + * Increase the space preallocated in this backend for a given inner batch by + * at least a given amount. This allows us to track whether a given batch + * would fit in memory when loaded back in. Also increase the number of + * batches or buckets if required. + * + * This maintains a running estimation of how much space will be taken when we + * load the batch back into memory by simulating the way chunks will be handed + * out to workers. It's not perfectly accurate because the tuples will be + * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but + * it should be pretty close. It tends to overestimate by a fraction of a + * chunk per worker since all workers gang up to preallocate during hashing, + * but workers tend to reload batches alone if there are enough to go around, + * leaving fewer partially filled chunks. This effect is bounded by + * nparticipants. + * + * Return false if the number of batches or buckets has changed, and the + * caller should reconsider which batch a given tuple now belongs in and call + * again. + */ +static bool +ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size) +{ + ParallelHashJoinState *pstate = hashtable->parallel_state; + ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno]; + size_t want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE); + + Assert(batchno > 0); + Assert(batchno < hashtable->nbatch); + Assert(size == MAXALIGN(size)); + + LWLockAcquire(&pstate->lock, LW_EXCLUSIVE); + + /* Has another participant commanded us to help grow? */ + if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES || + pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS) + { + ParallelHashGrowth growth = pstate->growth; + + LWLockRelease(&pstate->lock); + if (growth == PHJ_GROWTH_NEED_MORE_BATCHES) + ExecParallelHashIncreaseNumBatches(hashtable); + else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS) + ExecParallelHashIncreaseNumBuckets(hashtable); + + return false; + } + + if (pstate->growth != PHJ_GROWTH_DISABLED && + batch->at_least_one_chunk && + (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE + > pstate->space_allowed)) + { + /* + * We have determined that this batch would exceed the space budget if + * loaded into memory. Command all participants to help repartition. + */ + batch->shared->space_exhausted = true; + pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES; + LWLockRelease(&pstate->lock); + + return false; + } + + batch->at_least_one_chunk = true; + batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE; + batch->preallocated = want; + LWLockRelease(&pstate->lock); + + return true; +} diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index ab1632cc13..08a8bb3426 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -3,25 +3,119 @@ * nodeHashjoin.c * Routines to handle hash join nodes * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * src/backend/executor/nodeHashjoin.c * + * PARALLELISM + * + * Hash joins can participate in parallel query execution in several ways. A + * parallel-oblivious hash join is one where the node is unaware that it is + * part of a parallel plan. In this case, a copy of the inner plan is used to + * build a copy of the hash table in every backend, and the outer plan could + * either be built from a partial or complete path, so that the results of the + * hash join are correspondingly either partial or complete. A parallel-aware + * hash join is one that behaves differently, coordinating work between + * backends, and appears as Parallel Hash Join in EXPLAIN output. A Parallel + * Hash Join always appears with a Parallel Hash node. + * + * Parallel-aware hash joins use the same per-backend state machine to track + * progress through the hash join algorithm as parallel-oblivious hash joins. + * In a parallel-aware hash join, there is also a shared state machine that + * co-operating backends use to synchronize their local state machines and + * program counters. The shared state machine is managed with a Barrier IPC + * primitive. When all attached participants arrive at a barrier, the phase + * advances and all waiting participants are released. + * + * When a participant begins working on a parallel hash join, it must first + * figure out how much progress has already been made, because participants + * don't wait for each other to begin. For this reason there are switch + * statements at key points in the code where we have to synchronize our local + * state machine with the phase, and then jump to the correct part of the + * algorithm so that we can get started. + * + * One barrier called build_barrier is used to coordinate the hashing phases. + * The phase is represented by an integer which begins at zero and increments + * one by one, but in the code it is referred to by symbolic names as follows: + * + * PHJ_BUILD_ELECTING -- initial state + * PHJ_BUILD_ALLOCATING -- one sets up the batches and table 0 + * PHJ_BUILD_HASHING_INNER -- all hash the inner rel + * PHJ_BUILD_HASHING_OUTER -- (multi-batch only) all hash the outer + * PHJ_BUILD_DONE -- building done, probing can begin + * + * While in the phase PHJ_BUILD_HASHING_INNER a separate pair of barriers may + * be used repeatedly as required to coordinate expansions in the number of + * batches or buckets. Their phases are as follows: + * + * PHJ_GROW_BATCHES_ELECTING -- initial state + * PHJ_GROW_BATCHES_ALLOCATING -- one allocates new batches + * PHJ_GROW_BATCHES_REPARTITIONING -- all repartition + * PHJ_GROW_BATCHES_FINISHING -- one cleans up, detects skew + * + * PHJ_GROW_BUCKETS_ELECTING -- initial state + * PHJ_GROW_BUCKETS_ALLOCATING -- one allocates new buckets + * PHJ_GROW_BUCKETS_REINSERTING -- all insert tuples + * + * If the planner got the number of batches and buckets right, those won't be + * necessary, but on the other hand we might finish up needing to expand the + * buckets or batches multiple times while hashing the inner relation to stay + * within our memory budget and load factor target. For that reason it's a + * separate pair of barriers using circular phases. + * + * The PHJ_BUILD_HASHING_OUTER phase is required only for multi-batch joins, + * because we need to divide the outer relation into batches up front in order + * to be able to process batches entirely independently. In contrast, the + * parallel-oblivious algorithm simply throws tuples 'forward' to 'later' + * batches whenever it encounters them while scanning and probing, which it + * can do because it processes batches in serial order. + * + * Once PHJ_BUILD_DONE is reached, backends then split up and process + * different batches, or gang up and work together on probing batches if there + * aren't enough to go around. For each batch there is a separate barrier + * with the following phases: + * + * PHJ_BATCH_ELECTING -- initial state + * PHJ_BATCH_ALLOCATING -- one allocates buckets + * PHJ_BATCH_LOADING -- all load the hash table from disk + * PHJ_BATCH_PROBING -- all probe + * PHJ_BATCH_DONE -- end + * + * Batch 0 is a special case, because it starts out in phase + * PHJ_BATCH_PROBING; populating batch 0's hash table is done during + * PHJ_BUILD_HASHING_INNER so we can skip loading. + * + * Initially we try to plan for a single-batch hash join using the combined + * work_mem of all participants to create a large shared hash table. If that + * turns out either at planning or execution time to be impossible then we + * fall back to regular work_mem sized hash tables. + * + * To avoid deadlocks, we never wait for any barrier unless it is known that + * all other backends attached to it are actively executing the node or have + * already arrived. Practically, that means that we never return a tuple + * while attached to a barrier, unless the barrier has reached its final + * state. In the slightly special case of the per-batch barrier, we return + * tuples while in PHJ_BATCH_PROBING phase, but that's OK because we use + * BarrierArriveAndDetach() to advance it to PHJ_BATCH_DONE without waiting. + * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/htup_details.h" +#include "access/parallel.h" #include "executor/executor.h" #include "executor/hashjoin.h" #include "executor/nodeHash.h" #include "executor/nodeHashjoin.h" #include "miscadmin.h" +#include "pgstat.h" #include "utils/memutils.h" +#include "utils/sharedtuplestore.h" /* @@ -42,24 +136,33 @@ static TupleTableSlot *ExecHashJoinOuterGetTuple(PlanState *outerNode, HashJoinState *hjstate, uint32 *hashvalue); +static TupleTableSlot *ExecParallelHashJoinOuterGetTuple(PlanState *outerNode, + HashJoinState *hjstate, + uint32 *hashvalue); static TupleTableSlot *ExecHashJoinGetSavedTuple(HashJoinState *hjstate, BufFile *file, uint32 *hashvalue, TupleTableSlot *tupleSlot); static bool ExecHashJoinNewBatch(HashJoinState *hjstate); +static bool ExecParallelHashJoinNewBatch(HashJoinState *hjstate); +static void ExecParallelHashJoinPartitionOuter(HashJoinState *node); /* ---------------------------------------------------------------- - * ExecHashJoin + * ExecHashJoinImpl * - * This function implements the Hybrid Hashjoin algorithm. + * This function implements the Hybrid Hashjoin algorithm. It is marked + * with an always-inline attribute so that ExecHashJoin() and + * ExecParallelHashJoin() can inline it. Compilers that respect the + * attribute should create versions specialized for parallel == true and + * parallel == false with unnecessary branches removed. * * Note: the relation we build hash table on is the "inner" * the other one is "outer". * ---------------------------------------------------------------- */ -static TupleTableSlot * /* return: a tuple or NULL */ -ExecHashJoin(PlanState *pstate) +static pg_attribute_always_inline TupleTableSlot * +ExecHashJoinImpl(PlanState *pstate, bool parallel) { HashJoinState *node = castNode(HashJoinState, pstate); PlanState *outerNode; @@ -71,6 +174,7 @@ ExecHashJoin(PlanState *pstate) TupleTableSlot *outerTupleSlot; uint32 hashvalue; int batchno; + ParallelHashJoinState *parallel_state; /* * get information from HashJoin node @@ -81,6 +185,7 @@ ExecHashJoin(PlanState *pstate) outerNode = outerPlanState(node); hashtable = node->hj_HashTable; econtext = node->js.ps.ps_ExprContext; + parallel_state = hashNode->parallel_state; /* * Reset per-tuple memory context to free any expression evaluation @@ -138,6 +243,18 @@ ExecHashJoin(PlanState *pstate) /* no chance to not build the hash table */ node->hj_FirstOuterTupleSlot = NULL; } + else if (parallel) + { + /* + * The empty-outer optimization is not implemented for + * shared hash tables, because no one participant can + * determine that there are no outer tuples, and it's not + * yet clear that it's worth the synchronization overhead + * of reaching consensus to figure that out. So we have + * to build the hash table. + */ + node->hj_FirstOuterTupleSlot = NULL; + } else if (HJ_FILL_OUTER(node) || (outerNode->plan->startup_cost < hashNode->ps.plan->total_cost && !node->hj_OuterNotEmpty)) @@ -155,15 +272,19 @@ ExecHashJoin(PlanState *pstate) node->hj_FirstOuterTupleSlot = NULL; /* - * create the hash table + * Create the hash table. If using Parallel Hash, then + * whoever gets here first will create the hash table and any + * later arrivals will merely attach to it. */ - hashtable = ExecHashTableCreate((Hash *) hashNode->ps.plan, + hashtable = ExecHashTableCreate(hashNode, node->hj_HashOperators, HJ_FILL_INNER(node)); node->hj_HashTable = hashtable; /* - * execute the Hash node, to build the hash table + * Execute the Hash node, to build the hash table. If using + * Parallel Hash, then we'll try to help hashing unless we + * arrived too late. */ hashNode->hashtable = hashtable; (void) MultiExecProcNode((PlanState *) hashNode); @@ -189,7 +310,34 @@ ExecHashJoin(PlanState *pstate) */ node->hj_OuterNotEmpty = false; - node->hj_JoinState = HJ_NEED_NEW_OUTER; + if (parallel) + { + Barrier *build_barrier; + + build_barrier = ¶llel_state->build_barrier; + Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASHING_OUTER || + BarrierPhase(build_barrier) == PHJ_BUILD_DONE); + if (BarrierPhase(build_barrier) == PHJ_BUILD_HASHING_OUTER) + { + /* + * If multi-batch, we need to hash the outer relation + * up front. + */ + if (hashtable->nbatch > 1) + ExecParallelHashJoinPartitionOuter(node); + BarrierArriveAndWait(build_barrier, + WAIT_EVENT_HASH_BUILD_HASHING_OUTER); + } + Assert(BarrierPhase(build_barrier) == PHJ_BUILD_DONE); + + /* Each backend should now select a batch to work on. */ + hashtable->curbatch = -1; + node->hj_JoinState = HJ_NEED_NEW_BATCH; + + continue; + } + else + node->hj_JoinState = HJ_NEED_NEW_OUTER; /* FALL THRU */ @@ -198,9 +346,14 @@ ExecHashJoin(PlanState *pstate) /* * We don't have an outer tuple, try to get the next one */ - outerTupleSlot = ExecHashJoinOuterGetTuple(outerNode, - node, - &hashvalue); + if (parallel) + outerTupleSlot = + ExecParallelHashJoinOuterGetTuple(outerNode, node, + &hashvalue); + else + outerTupleSlot = + ExecHashJoinOuterGetTuple(outerNode, node, &hashvalue); + if (TupIsNull(outerTupleSlot)) { /* end of batch, or maybe whole join */ @@ -240,10 +393,12 @@ ExecHashJoin(PlanState *pstate) * Need to postpone this outer tuple to a later batch. * Save it in the corresponding outer-batch file. */ + Assert(parallel_state == NULL); Assert(batchno > hashtable->curbatch); ExecHashJoinSaveTuple(ExecFetchSlotMinimalTuple(outerTupleSlot), hashvalue, &hashtable->outerBatchFile[batchno]); + /* Loop around, staying in HJ_NEED_NEW_OUTER state */ continue; } @@ -258,11 +413,23 @@ ExecHashJoin(PlanState *pstate) /* * Scan the selected hash bucket for matches to current outer */ - if (!ExecScanHashBucket(node, econtext)) + if (parallel) { - /* out of matches; check for possible outer-join fill */ - node->hj_JoinState = HJ_FILL_OUTER_TUPLE; - continue; + if (!ExecParallelScanHashBucket(node, econtext)) + { + /* out of matches; check for possible outer-join fill */ + node->hj_JoinState = HJ_FILL_OUTER_TUPLE; + continue; + } + } + else + { + if (!ExecScanHashBucket(node, econtext)) + { + /* out of matches; check for possible outer-join fill */ + node->hj_JoinState = HJ_FILL_OUTER_TUPLE; + continue; + } } /* @@ -362,8 +529,16 @@ ExecHashJoin(PlanState *pstate) /* * Try to advance to next batch. Done if there are no more. */ - if (!ExecHashJoinNewBatch(node)) - return NULL; /* end of join */ + if (parallel) + { + if (!ExecParallelHashJoinNewBatch(node)) + return NULL; /* end of parallel-aware join */ + } + else + { + if (!ExecHashJoinNewBatch(node)) + return NULL; /* end of parallel-oblivious join */ + } node->hj_JoinState = HJ_NEED_NEW_OUTER; break; @@ -374,6 +549,38 @@ ExecHashJoin(PlanState *pstate) } } +/* ---------------------------------------------------------------- + * ExecHashJoin + * + * Parallel-oblivious version. + * ---------------------------------------------------------------- + */ +static TupleTableSlot * /* return: a tuple or NULL */ +ExecHashJoin(PlanState *pstate) +{ + /* + * On sufficiently smart compilers this should be inlined with the + * parallel-aware branches removed. + */ + return ExecHashJoinImpl(pstate, false); +} + +/* ---------------------------------------------------------------- + * ExecParallelHashJoin + * + * Parallel-aware version. + * ---------------------------------------------------------------- + */ +static TupleTableSlot * /* return: a tuple or NULL */ +ExecParallelHashJoin(PlanState *pstate) +{ + /* + * On sufficiently smart compilers this should be inlined with the + * parallel-oblivious branches removed. + */ + return ExecHashJoinImpl(pstate, true); +} + /* ---------------------------------------------------------------- * ExecInitHashJoin * @@ -388,7 +595,10 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) Hash *hashNode; List *lclauses; List *rclauses; + List *rhclauses; List *hoperators; + TupleDesc outerDesc, + innerDesc; ListCell *l; /* check for unsupported flags */ @@ -400,7 +610,14 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) hjstate = makeNode(HashJoinState); hjstate->js.ps.plan = (Plan *) node; hjstate->js.ps.state = estate; + + /* + * See ExecHashJoinInitializeDSM() and ExecHashJoinInitializeWorker() + * where this function may be replaced with a parallel version, if we + * managed to launch a parallel query. + */ hjstate->js.ps.ExecProcNode = ExecHashJoin; + hjstate->js.jointype = node->join.jointype; /* * Miscellaneous initialization @@ -409,17 +626,6 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) */ ExecAssignExprContext(estate, &hjstate->js.ps); - /* - * initialize child expressions - */ - hjstate->js.ps.qual = - ExecInitQual(node->join.plan.qual, (PlanState *) hjstate); - hjstate->js.jointype = node->join.jointype; - hjstate->js.joinqual = - ExecInitQual(node->join.joinqual, (PlanState *) hjstate); - hjstate->hashclauses = - ExecInitQual(node->hashclauses, (PlanState *) hjstate); - /* * initialize child nodes * @@ -431,13 +637,20 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) hashNode = (Hash *) innerPlan(node); outerPlanState(hjstate) = ExecInitNode(outerNode, estate, eflags); + outerDesc = ExecGetResultType(outerPlanState(hjstate)); innerPlanState(hjstate) = ExecInitNode((Plan *) hashNode, estate, eflags); + innerDesc = ExecGetResultType(innerPlanState(hjstate)); + + /* + * Initialize result slot, type and projection. + */ + ExecInitResultTupleSlotTL(&hjstate->js.ps); + ExecAssignProjectionInfo(&hjstate->js.ps, NULL); /* * tuple table initialization */ - ExecInitResultTupleSlot(estate, &hjstate->js.ps); - hjstate->hj_OuterTupleSlot = ExecInitExtraTupleSlot(estate); + hjstate->hj_OuterTupleSlot = ExecInitExtraTupleSlot(estate, outerDesc); /* * detect whether we need only consider the first matching inner tuple @@ -454,21 +667,17 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) case JOIN_LEFT: case JOIN_ANTI: hjstate->hj_NullInnerTupleSlot = - ExecInitNullTupleSlot(estate, - ExecGetResultType(innerPlanState(hjstate))); + ExecInitNullTupleSlot(estate, innerDesc); break; case JOIN_RIGHT: hjstate->hj_NullOuterTupleSlot = - ExecInitNullTupleSlot(estate, - ExecGetResultType(outerPlanState(hjstate))); + ExecInitNullTupleSlot(estate, outerDesc); break; case JOIN_FULL: hjstate->hj_NullOuterTupleSlot = - ExecInitNullTupleSlot(estate, - ExecGetResultType(outerPlanState(hjstate))); + ExecInitNullTupleSlot(estate, outerDesc); hjstate->hj_NullInnerTupleSlot = - ExecInitNullTupleSlot(estate, - ExecGetResultType(innerPlanState(hjstate))); + ExecInitNullTupleSlot(estate, innerDesc); break; default: elog(ERROR, "unrecognized join type: %d", @@ -490,13 +699,14 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) } /* - * initialize tuple type and projection info + * initialize child expressions */ - ExecAssignResultTypeFromTL(&hjstate->js.ps); - ExecAssignProjectionInfo(&hjstate->js.ps, NULL); - - ExecSetSlotDescriptor(hjstate->hj_OuterTupleSlot, - ExecGetResultType(outerPlanState(hjstate))); + hjstate->js.ps.qual = + ExecInitQual(node->join.plan.qual, (PlanState *) hjstate); + hjstate->js.joinqual = + ExecInitQual(node->join.joinqual, (PlanState *) hjstate); + hjstate->hashclauses = + ExecInitQual(node->hashclauses, (PlanState *) hjstate); /* * initialize hash-specific info @@ -517,6 +727,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) */ lclauses = NIL; rclauses = NIL; + rhclauses = NIL; hoperators = NIL; foreach(l, node->hashclauses) { @@ -526,13 +737,15 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) (PlanState *) hjstate)); rclauses = lappend(rclauses, ExecInitExpr(lsecond(hclause->args), (PlanState *) hjstate)); + rhclauses = lappend(rhclauses, ExecInitExpr(lsecond(hclause->args), + innerPlanState(hjstate))); hoperators = lappend_oid(hoperators, hclause->opno); } hjstate->hj_OuterHashKeys = lclauses; hjstate->hj_InnerHashKeys = rclauses; hjstate->hj_HashOperators = hoperators; /* child Hash node needs to evaluate inner hash keys, too */ - ((HashState *) innerPlanState(hjstate))->hashkeys = rclauses; + ((HashState *) innerPlanState(hjstate))->hashkeys = rhclauses; hjstate->hj_JoinState = HJ_BUILD_HASHTABLE; hjstate->hj_MatchedOuter = false; @@ -581,9 +794,9 @@ ExecEndHashJoin(HashJoinState *node) /* * ExecHashJoinOuterGetTuple * - * get the next outer tuple for hashjoin: either by - * executing the outer plan node in the first pass, or from - * the temp files for the hashjoin batches. + * get the next outer tuple for a parallel oblivious hashjoin: either by + * executing the outer plan node in the first pass, or from the temp + * files for the hashjoin batches. * * Returns a null slot if no more outer tuples (within the current batch). * @@ -661,6 +874,67 @@ ExecHashJoinOuterGetTuple(PlanState *outerNode, return NULL; } +/* + * ExecHashJoinOuterGetTuple variant for the parallel case. + */ +static TupleTableSlot * +ExecParallelHashJoinOuterGetTuple(PlanState *outerNode, + HashJoinState *hjstate, + uint32 *hashvalue) +{ + HashJoinTable hashtable = hjstate->hj_HashTable; + int curbatch = hashtable->curbatch; + TupleTableSlot *slot; + + /* + * In the Parallel Hash case we only run the outer plan directly for + * single-batch hash joins. Otherwise we have to go to batch files, even + * for batch 0. + */ + if (curbatch == 0 && hashtable->nbatch == 1) + { + slot = ExecProcNode(outerNode); + + while (!TupIsNull(slot)) + { + ExprContext *econtext = hjstate->js.ps.ps_ExprContext; + + econtext->ecxt_outertuple = slot; + if (ExecHashGetHashValue(hashtable, econtext, + hjstate->hj_OuterHashKeys, + true, /* outer tuple */ + HJ_FILL_OUTER(hjstate), + hashvalue)) + return slot; + + /* + * That tuple couldn't match because of a NULL, so discard it and + * continue with the next one. + */ + slot = ExecProcNode(outerNode); + } + } + else if (curbatch < hashtable->nbatch) + { + MinimalTuple tuple; + + tuple = sts_parallel_scan_next(hashtable->batches[curbatch].outer_tuples, + hashvalue); + if (tuple != NULL) + { + slot = ExecStoreMinimalTuple(tuple, + hjstate->hj_OuterTupleSlot, + false); + return slot; + } + else + ExecClearTuple(hjstate->hj_OuterTupleSlot); + } + + /* End of this batch */ + return NULL; +} + /* * ExecHashJoinNewBatch * switch to a new hashjoin batch @@ -803,6 +1077,129 @@ ExecHashJoinNewBatch(HashJoinState *hjstate) return true; } +/* + * Choose a batch to work on, and attach to it. Returns true if successful, + * false if there are no more batches. + */ +static bool +ExecParallelHashJoinNewBatch(HashJoinState *hjstate) +{ + HashJoinTable hashtable = hjstate->hj_HashTable; + int start_batchno; + int batchno; + + /* + * If we started up so late that the batch tracking array has been freed + * already by ExecHashTableDetach(), then we are finished. See also + * ExecParallelHashEnsureBatchAccessors(). + */ + if (hashtable->batches == NULL) + return false; + + /* + * If we were already attached to a batch, remember not to bother checking + * it again, and detach from it (possibly freeing the hash table if we are + * last to detach). + */ + if (hashtable->curbatch >= 0) + { + hashtable->batches[hashtable->curbatch].done = true; + ExecHashTableDetachBatch(hashtable); + } + + /* + * Search for a batch that isn't done. We use an atomic counter to start + * our search at a different batch in every participant when there are + * more batches than participants. + */ + batchno = start_batchno = + pg_atomic_fetch_add_u32(&hashtable->parallel_state->distributor, 1) % + hashtable->nbatch; + do + { + uint32 hashvalue; + MinimalTuple tuple; + TupleTableSlot *slot; + + if (!hashtable->batches[batchno].done) + { + SharedTuplestoreAccessor *inner_tuples; + Barrier *batch_barrier = + &hashtable->batches[batchno].shared->batch_barrier; + + switch (BarrierAttach(batch_barrier)) + { + case PHJ_BATCH_ELECTING: + + /* One backend allocates the hash table. */ + if (BarrierArriveAndWait(batch_barrier, + WAIT_EVENT_HASH_BATCH_ELECTING)) + ExecParallelHashTableAlloc(hashtable, batchno); + /* Fall through. */ + + case PHJ_BATCH_ALLOCATING: + /* Wait for allocation to complete. */ + BarrierArriveAndWait(batch_barrier, + WAIT_EVENT_HASH_BATCH_ALLOCATING); + /* Fall through. */ + + case PHJ_BATCH_LOADING: + /* Start (or join in) loading tuples. */ + ExecParallelHashTableSetCurrentBatch(hashtable, batchno); + inner_tuples = hashtable->batches[batchno].inner_tuples; + sts_begin_parallel_scan(inner_tuples); + while ((tuple = sts_parallel_scan_next(inner_tuples, + &hashvalue))) + { + slot = ExecStoreMinimalTuple(tuple, + hjstate->hj_HashTupleSlot, + false); + ExecParallelHashTableInsertCurrentBatch(hashtable, slot, + hashvalue); + } + sts_end_parallel_scan(inner_tuples); + BarrierArriveAndWait(batch_barrier, + WAIT_EVENT_HASH_BATCH_LOADING); + /* Fall through. */ + + case PHJ_BATCH_PROBING: + + /* + * This batch is ready to probe. Return control to + * caller. We stay attached to batch_barrier so that the + * hash table stays alive until everyone's finished + * probing it, but no participant is allowed to wait at + * this barrier again (or else a deadlock could occur). + * All attached participants must eventually call + * BarrierArriveAndDetach() so that the final phase + * PHJ_BATCH_DONE can be reached. + */ + ExecParallelHashTableSetCurrentBatch(hashtable, batchno); + sts_begin_parallel_scan(hashtable->batches[batchno].outer_tuples); + return true; + + case PHJ_BATCH_DONE: + + /* + * Already done. Detach and go around again (if any + * remain). + */ + BarrierDetach(batch_barrier); + hashtable->batches[batchno].done = true; + hashtable->curbatch = -1; + break; + + default: + elog(ERROR, "unexpected batch phase %d", + BarrierPhase(batch_barrier)); + } + } + batchno = (batchno + 1) % hashtable->nbatch; + } while (batchno != start_batchno); + + return false; +} + /* * ExecHashJoinSaveTuple * save a tuple to a batch file. @@ -964,3 +1361,176 @@ ExecReScanHashJoin(HashJoinState *node) if (node->js.ps.lefttree->chgParam == NULL) ExecReScan(node->js.ps.lefttree); } + +void +ExecShutdownHashJoin(HashJoinState *node) +{ + if (node->hj_HashTable) + { + /* + * Detach from shared state before DSM memory goes away. This makes + * sure that we don't have any pointers into DSM memory by the time + * ExecEndHashJoin runs. + */ + ExecHashTableDetachBatch(node->hj_HashTable); + ExecHashTableDetach(node->hj_HashTable); + } +} + +static void +ExecParallelHashJoinPartitionOuter(HashJoinState *hjstate) +{ + PlanState *outerState = outerPlanState(hjstate); + ExprContext *econtext = hjstate->js.ps.ps_ExprContext; + HashJoinTable hashtable = hjstate->hj_HashTable; + TupleTableSlot *slot; + uint32 hashvalue; + int i; + + Assert(hjstate->hj_FirstOuterTupleSlot == NULL); + + /* Execute outer plan, writing all tuples to shared tuplestores. */ + for (;;) + { + slot = ExecProcNode(outerState); + if (TupIsNull(slot)) + break; + econtext->ecxt_outertuple = slot; + if (ExecHashGetHashValue(hashtable, econtext, + hjstate->hj_OuterHashKeys, + true, /* outer tuple */ + HJ_FILL_OUTER(hjstate), + &hashvalue)) + { + int batchno; + int bucketno; + + ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, + &batchno); + sts_puttuple(hashtable->batches[batchno].outer_tuples, + &hashvalue, ExecFetchSlotMinimalTuple(slot)); + } + CHECK_FOR_INTERRUPTS(); + } + + /* Make sure all outer partitions are readable by any backend. */ + for (i = 0; i < hashtable->nbatch; ++i) + sts_end_write(hashtable->batches[i].outer_tuples); +} + +void +ExecHashJoinEstimate(HashJoinState *state, ParallelContext *pcxt) +{ + shm_toc_estimate_chunk(&pcxt->estimator, sizeof(ParallelHashJoinState)); + shm_toc_estimate_keys(&pcxt->estimator, 1); +} + +void +ExecHashJoinInitializeDSM(HashJoinState *state, ParallelContext *pcxt) +{ + int plan_node_id = state->js.ps.plan->plan_node_id; + HashState *hashNode; + ParallelHashJoinState *pstate; + + /* + * Disable shared hash table mode if we failed to create a real DSM + * segment, because that means that we don't have a DSA area to work with. + */ + if (pcxt->seg == NULL) + return; + + ExecSetExecProcNode(&state->js.ps, ExecParallelHashJoin); + + /* + * Set up the state needed to coordinate access to the shared hash + * table(s), using the plan node ID as the toc key. + */ + pstate = shm_toc_allocate(pcxt->toc, sizeof(ParallelHashJoinState)); + shm_toc_insert(pcxt->toc, plan_node_id, pstate); + + /* + * Set up the shared hash join state with no batches initially. + * ExecHashTableCreate() will prepare at least one later and set nbatch + * and space_allowed. + */ + pstate->nbatch = 0; + pstate->space_allowed = 0; + pstate->batches = InvalidDsaPointer; + pstate->old_batches = InvalidDsaPointer; + pstate->nbuckets = 0; + pstate->growth = PHJ_GROWTH_OK; + pstate->chunk_work_queue = InvalidDsaPointer; + pg_atomic_init_u32(&pstate->distributor, 0); + pstate->nparticipants = pcxt->nworkers + 1; + pstate->total_tuples = 0; + LWLockInitialize(&pstate->lock, + LWTRANCHE_PARALLEL_HASH_JOIN); + BarrierInit(&pstate->build_barrier, 0); + BarrierInit(&pstate->grow_batches_barrier, 0); + BarrierInit(&pstate->grow_buckets_barrier, 0); + + /* Set up the space we'll use for shared temporary files. */ + SharedFileSetInit(&pstate->fileset, pcxt->seg); + + /* Initialize the shared state in the hash node. */ + hashNode = (HashState *) innerPlanState(state); + hashNode->parallel_state = pstate; +} + +/* ---------------------------------------------------------------- + * ExecHashJoinReInitializeDSM + * + * Reset shared state before beginning a fresh scan. + * ---------------------------------------------------------------- + */ +void +ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *cxt) +{ + int plan_node_id = state->js.ps.plan->plan_node_id; + ParallelHashJoinState *pstate = + shm_toc_lookup(cxt->toc, plan_node_id, false); + + /* + * It would be possible to reuse the shared hash table in single-batch + * cases by resetting and then fast-forwarding build_barrier to + * PHJ_BUILD_DONE and batch 0's batch_barrier to PHJ_BATCH_PROBING, but + * currently shared hash tables are already freed by now (by the last + * participant to detach from the batch). We could consider keeping it + * around for single-batch joins. We'd also need to adjust + * finalize_plan() so that it doesn't record a dummy dependency for + * Parallel Hash nodes, preventing the rescan optimization. For now we + * don't try. + */ + + /* Detach, freeing any remaining shared memory. */ + if (state->hj_HashTable != NULL) + { + ExecHashTableDetachBatch(state->hj_HashTable); + ExecHashTableDetach(state->hj_HashTable); + } + + /* Clear any shared batch files. */ + SharedFileSetDeleteAll(&pstate->fileset); + + /* Reset build_barrier to PHJ_BUILD_ELECTING so we can go around again. */ + BarrierInit(&pstate->build_barrier, 0); +} + +void +ExecHashJoinInitializeWorker(HashJoinState *state, + ParallelWorkerContext *pwcxt) +{ + HashState *hashNode; + int plan_node_id = state->js.ps.plan->plan_node_id; + ParallelHashJoinState *pstate = + shm_toc_lookup(pwcxt->toc, plan_node_id, false); + + /* Attach to the space for shared temporary files. */ + SharedFileSetAttach(&pstate->fileset, pwcxt->seg); + + /* Attach to the shared state in the hash node. */ + hashNode = (HashState *) innerPlanState(state); + hashNode->parallel_state = pstate; + + ExecSetExecProcNode(&state->js.ps, ExecParallelHashJoin); +} diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c index fe7ba3f1a4..865a056c02 100644 --- a/src/backend/executor/nodeIndexonlyscan.c +++ b/src/backend/executor/nodeIndexonlyscan.c @@ -3,7 +3,7 @@ * nodeIndexonlyscan.c * Routines to support index-only scans * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -25,6 +25,7 @@ * parallel index-only scan * ExecIndexOnlyScanInitializeDSM initialize DSM for parallel * index-only scan + * ExecIndexOnlyScanReInitializeDSM reinitialize DSM for fresh scan * ExecIndexOnlyScanInitializeWorker attach to DSM info in parallel worker */ #include "postgres.h" @@ -83,8 +84,8 @@ IndexOnlyNext(IndexOnlyScanState *node) { /* * We reach here if the index only scan is not parallel, or if we're - * executing a index only scan that was intended to be parallel - * serially. + * serially executing an index only scan that was planned to be + * parallel. */ scandesc = index_beginscan(node->ss.ss_currentRelation, node->ioss_RelationDesc, @@ -161,7 +162,7 @@ IndexOnlyNext(IndexOnlyScanState *node) /* * Rats, we have to visit the heap to check visibility. */ - node->ioss_HeapFetches++; + InstrCountTuples2(node, 1); tuple = index_fetch_heap(scandesc); if (tuple == NULL) continue; /* no visible tuple, try next index entry */ @@ -198,7 +199,7 @@ IndexOnlyNext(IndexOnlyScanState *node) */ Assert(slot->tts_tupleDescriptor->natts == scandesc->xs_hitupdesc->natts); - ExecStoreTuple(scandesc->xs_hitup, slot, InvalidBuffer, false); + ExecStoreHeapTuple(scandesc->xs_hitup, slot, false); } else if (scandesc->xs_itup) StoreIndexTuple(slot, scandesc->xs_itup, scandesc->xs_itupdesc); @@ -213,8 +214,7 @@ IndexOnlyNext(IndexOnlyScanState *node) if (scandesc->xs_recheck) { econtext->ecxt_scantuple = slot; - ResetExprContext(econtext); - if (!ExecQual(node->indexqual, econtext)) + if (!ExecQualAndReset(node->indexqual, econtext)) { /* Fails recheck, so drop it and loop back for another */ InstrCountFiltered2(node, 1); @@ -336,16 +336,6 @@ ExecIndexOnlyScan(PlanState *pstate) void ExecReScanIndexOnlyScan(IndexOnlyScanState *node) { - bool reset_parallel_scan = true; - - /* - * If we are here to just update the scan keys, then don't reset parallel - * scan. For detailed reason behind this look in the comments for - * ExecReScanIndexScan. - */ - if (node->ioss_NumRuntimeKeys != 0 && !node->ioss_RuntimeKeysReady) - reset_parallel_scan = false; - /* * If we are doing runtime key calculations (ie, any of the index key * values weren't simple Consts), compute the new key values. But first, @@ -366,15 +356,10 @@ ExecReScanIndexOnlyScan(IndexOnlyScanState *node) /* reset index scan */ if (node->ioss_ScanDesc) - { - index_rescan(node->ioss_ScanDesc, node->ioss_ScanKeys, node->ioss_NumScanKeys, node->ioss_OrderByKeys, node->ioss_NumOrderByKeys); - if (reset_parallel_scan && node->ioss_ScanDesc->parallel_scan) - index_parallelrescan(node->ioss_ScanDesc); - } ExecScanReScan(&node->ss); } @@ -388,14 +373,12 @@ ExecEndIndexOnlyScan(IndexOnlyScanState *node) { Relation indexRelationDesc; IndexScanDesc indexScanDesc; - Relation relation; /* * extract information from the node */ indexRelationDesc = node->ioss_RelationDesc; indexScanDesc = node->ioss_ScanDesc; - relation = node->ss.ss_currentRelation; /* Release VM buffer pin, if any. */ if (node->ioss_VMBuffer != InvalidBuffer) @@ -416,7 +399,8 @@ ExecEndIndexOnlyScan(IndexOnlyScanState *node) /* * clear out tuple table slots */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); /* @@ -426,20 +410,43 @@ ExecEndIndexOnlyScan(IndexOnlyScanState *node) index_endscan(indexScanDesc); if (indexRelationDesc) index_close(indexRelationDesc, NoLock); - - /* - * close the heap relation. - */ - ExecCloseScanRelation(relation); } /* ---------------------------------------------------------------- * ExecIndexOnlyMarkPos + * + * Note: we assume that no caller attempts to set a mark before having read + * at least one tuple. Otherwise, ioss_ScanDesc might still be NULL. * ---------------------------------------------------------------- */ void ExecIndexOnlyMarkPos(IndexOnlyScanState *node) { + EState *estate = node->ss.ps.state; + + if (estate->es_epqTuple != NULL) + { + /* + * We are inside an EvalPlanQual recheck. If a test tuple exists for + * this relation, then we shouldn't access the index at all. We would + * instead need to save, and later restore, the state of the + * es_epqScanDone flag, so that re-fetching the test tuple is + * possible. However, given the assumption that no caller sets a mark + * at the start of the scan, we can only get here with es_epqScanDone + * already set, and so no state need be saved. + */ + Index scanrelid = ((Scan *) node->ss.ps.plan)->scanrelid; + + Assert(scanrelid > 0); + if (estate->es_epqTupleSet[scanrelid - 1]) + { + /* Verify the claim above */ + if (!estate->es_epqScanDone[scanrelid - 1]) + elog(ERROR, "unexpected ExecIndexOnlyMarkPos call in EPQ recheck"); + return; + } + } + index_markpos(node->ioss_ScanDesc); } @@ -450,6 +457,23 @@ ExecIndexOnlyMarkPos(IndexOnlyScanState *node) void ExecIndexOnlyRestrPos(IndexOnlyScanState *node) { + EState *estate = node->ss.ps.state; + + if (estate->es_epqTuple != NULL) + { + /* See comments in ExecIndexOnlyMarkPos */ + Index scanrelid = ((Scan *) node->ss.ps.plan)->scanrelid; + + Assert(scanrelid > 0); + if (estate->es_epqTupleSet[scanrelid - 1]) + { + /* Verify the claim above */ + if (!estate->es_epqScanDone[scanrelid - 1]) + elog(ERROR, "unexpected ExecIndexOnlyRestrPos call in EPQ recheck"); + return; + } + } + index_restrpos(node->ioss_ScanDesc); } @@ -479,7 +503,6 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags) indexstate->ss.ps.plan = (Plan *) node; indexstate->ss.ps.state = estate; indexstate->ss.ps.ExecProcNode = ExecIndexOnlyScan; - indexstate->ioss_HeapFetches = 0; /* * Miscellaneous initialization @@ -489,24 +512,7 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags) ExecAssignExprContext(estate, &indexstate->ss.ps); /* - * initialize child expressions - * - * Note: we don't initialize all of the indexorderby expression, only the - * sub-parts corresponding to runtime keys (see below). - */ - indexstate->ss.ps.qual = - ExecInitQual(node->scan.plan.qual, (PlanState *) indexstate); - indexstate->indexqual = - ExecInitQual(node->indexqual, (PlanState *) indexstate); - - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &indexstate->ss.ps); - ExecInitScanTupleSlot(estate, &indexstate->ss); - - /* - * open the base relation and acquire appropriate lock on it. + * open the scan relation */ currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); @@ -521,16 +527,26 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags) * suitable data anyway.) */ tupDesc = ExecTypeFromTL(node->indextlist, false); - ExecAssignScanType(&indexstate->ss, tupDesc); + ExecInitScanTupleSlot(estate, &indexstate->ss, tupDesc); /* - * Initialize result tuple type and projection info. The node's - * targetlist will contain Vars with varno = INDEX_VAR, referencing the - * scan tuple. + * Initialize result type and projection info. The node's targetlist will + * contain Vars with varno = INDEX_VAR, referencing the scan tuple. */ - ExecAssignResultTypeFromTL(&indexstate->ss.ps); + ExecInitResultTypeTL(&indexstate->ss.ps); ExecAssignScanProjectionInfoWithVarno(&indexstate->ss, INDEX_VAR); + /* + * initialize child expressions + * + * Note: we don't initialize all of the indexorderby expression, only the + * sub-parts corresponding to runtime keys (see below). + */ + indexstate->ss.ps.qual = + ExecInitQual(node->scan.plan.qual, (PlanState *) indexstate); + indexstate->indexqual = + ExecInitQual(node->indexqual, (PlanState *) indexstate); + /* * If we are just doing EXPLAIN (ie, aren't going to run the plan), stop * here. This allows an index-advisor plugin to EXPLAIN a plan containing @@ -618,7 +634,8 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags) /* ---------------------------------------------------------------- * ExecIndexOnlyScanEstimate * - * estimates the space required to serialize index-only scan node. + * Compute the amount of space we'll need in the parallel + * query DSM, and inform pcxt->estimator about our needs. * ---------------------------------------------------------------- */ void @@ -671,6 +688,19 @@ ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node, node->ioss_OrderByKeys, node->ioss_NumOrderByKeys); } +/* ---------------------------------------------------------------- + * ExecIndexOnlyScanReInitializeDSM + * + * Reset shared state before beginning a fresh scan. + * ---------------------------------------------------------------- + */ +void +ExecIndexOnlyScanReInitializeDSM(IndexOnlyScanState *node, + ParallelContext *pcxt) +{ + index_parallelrescan(node->ioss_ScanDesc); +} + /* ---------------------------------------------------------------- * ExecIndexOnlyScanInitializeWorker * @@ -678,11 +708,12 @@ ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node, * ---------------------------------------------------------------- */ void -ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node, shm_toc *toc) +ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node, + ParallelWorkerContext *pwcxt) { ParallelIndexScanDesc piscan; - piscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id, false); + piscan = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false); node->ioss_ScanDesc = index_beginscan_parallel(node->ss.ss_currentRelation, node->ioss_RelationDesc, diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 404076d593..8593c0e305 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -3,7 +3,7 @@ * nodeIndexscan.c * Routines to support indexed scans of relations * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -24,6 +24,7 @@ * ExecIndexRestrPos restores scan position. * ExecIndexScanEstimate estimates DSM space needed for parallel index scan * ExecIndexScanInitializeDSM initialize DSM for parallel indexscan + * ExecIndexScanReInitializeDSM reinitialize DSM for fresh scan * ExecIndexScanInitializeWorker attach to DSM info in parallel worker */ #include "postgres.h" @@ -107,7 +108,7 @@ IndexNext(IndexScanState *node) { /* * We reach here if the index scan is not parallel, or if we're - * executing a index scan that was intended to be parallel serially. + * serially executing an index scan that was planned to be parallel. */ scandesc = index_beginscan(node->ss.ss_currentRelation, node->iss_RelationDesc, @@ -139,10 +140,10 @@ IndexNext(IndexScanState *node) * Note: we pass 'false' because tuples returned by amgetnext are * pointers onto disk pages and must not be pfree()'d. */ - ExecStoreTuple(tuple, /* tuple to store */ - slot, /* slot to store in */ - scandesc->xs_cbuf, /* buffer containing tuple */ - false); /* don't pfree */ + ExecStoreBufferHeapTuple(tuple, /* tuple to store */ + slot, /* slot to store in */ + scandesc->xs_cbuf); /* buffer containing + * tuple */ /* * If the index was lossy, we have to recheck the index quals using @@ -151,8 +152,7 @@ IndexNext(IndexScanState *node) if (scandesc->xs_recheck) { econtext->ecxt_scantuple = slot; - ResetExprContext(econtext); - if (!ExecQual(node->indexqualorig, econtext)) + if (!ExecQualAndReset(node->indexqualorig, econtext)) { /* Fails recheck, so drop it and loop back for another */ InstrCountFiltered2(node, 1); @@ -214,7 +214,7 @@ IndexNextWithReorder(IndexScanState *node) { /* * We reach here if the index scan is not parallel, or if we're - * executing a index scan that was intended to be parallel serially. + * serially executing an index scan that was planned to be parallel. */ scandesc = index_beginscan(node->ss.ss_currentRelation, node->iss_RelationDesc, @@ -257,7 +257,7 @@ IndexNextWithReorder(IndexScanState *node) tuple = reorderqueue_pop(node); /* Pass 'true', as the tuple in the queue is a palloc'd copy */ - ExecStoreTuple(tuple, slot, InvalidBuffer, true); + ExecStoreHeapTuple(tuple, slot, true); return slot; } } @@ -284,13 +284,11 @@ IndexNextWithReorder(IndexScanState *node) /* * Store the scanned tuple in the scan tuple slot of the scan state. - * Note: we pass 'false' because tuples returned by amgetnext are - * pointers onto disk pages and must not be pfree()'d. */ - ExecStoreTuple(tuple, /* tuple to store */ - slot, /* slot to store in */ - scandesc->xs_cbuf, /* buffer containing tuple */ - false); /* don't pfree */ + ExecStoreBufferHeapTuple(tuple, /* tuple to store */ + slot, /* slot to store in */ + scandesc->xs_cbuf); /* buffer containing + * tuple */ /* * If the index was lossy, we have to recheck the index quals and @@ -299,8 +297,7 @@ IndexNextWithReorder(IndexScanState *node) if (scandesc->xs_recheck) { econtext->ecxt_scantuple = slot; - ResetExprContext(econtext); - if (!ExecQual(node->indexqualorig, econtext)) + if (!ExecQualAndReset(node->indexqualorig, econtext)) { /* Fails recheck, so drop it and loop back for another */ InstrCountFiltered2(node, 1); @@ -419,10 +416,7 @@ IndexRecheck(IndexScanState *node, TupleTableSlot *slot) /* Does the tuple meet the indexqual condition? */ econtext->ecxt_scantuple = slot; - - ResetExprContext(econtext); - - return ExecQual(node->indexqualorig, econtext); + return ExecQualAndReset(node->indexqualorig, econtext); } @@ -473,9 +467,10 @@ reorderqueue_cmp(const pairingheap_node *a, const pairingheap_node *b, ReorderTuple *rtb = (ReorderTuple *) b; IndexScanState *node = (IndexScanState *) arg; - return -cmp_orderbyvals(rta->orderbyvals, rta->orderbynulls, - rtb->orderbyvals, rtb->orderbynulls, - node); + /* exchange argument order to invert the sort order */ + return cmp_orderbyvals(rtb->orderbyvals, rtb->orderbynulls, + rta->orderbyvals, rta->orderbynulls, + node); } /* @@ -577,18 +572,6 @@ ExecIndexScan(PlanState *pstate) void ExecReScanIndexScan(IndexScanState *node) { - bool reset_parallel_scan = true; - - /* - * If we are here to just update the scan keys, then don't reset parallel - * scan. We don't want each of the participating process in the parallel - * scan to update the shared parallel scan state at the start of the scan. - * It is quite possible that one of the participants has already begun - * scanning the index when another has yet to start it. - */ - if (node->iss_NumRuntimeKeys != 0 && !node->iss_RuntimeKeysReady) - reset_parallel_scan = false; - /* * If we are doing runtime key calculations (ie, any of the index key * values weren't simple Consts), compute the new key values. But first, @@ -614,21 +597,11 @@ ExecReScanIndexScan(IndexScanState *node) reorderqueue_pop(node); } - /* - * Reset (parallel) index scan. For parallel-aware nodes, the scan - * descriptor is initialized during actual execution of node and we can - * reach here before that (ex. during execution of nest loop join). So, - * avoid updating the scan descriptor at that time. - */ + /* reset index scan */ if (node->iss_ScanDesc) - { index_rescan(node->iss_ScanDesc, node->iss_ScanKeys, node->iss_NumScanKeys, node->iss_OrderByKeys, node->iss_NumOrderByKeys); - - if (reset_parallel_scan && node->iss_ScanDesc->parallel_scan) - index_parallelrescan(node->iss_ScanDesc); - } node->iss_ReachedEnd = false; ExecScanReScan(&node->ss); @@ -697,8 +670,8 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext, * ExecIndexEvalArrayKeys * Evaluate any array key values, and set up to iterate through arrays. * - * Returns TRUE if there are array elements to consider; FALSE means there - * is at least one null or empty array, so no match is possible. On TRUE + * Returns true if there are array elements to consider; false means there + * is at least one null or empty array, so no match is possible. On true * result, the scankeys are initialized with the first elements of the arrays. */ bool @@ -777,8 +750,8 @@ ExecIndexEvalArrayKeys(ExprContext *econtext, * ExecIndexAdvanceArrayKeys * Advance to the next set of array key values, if any. * - * Returns TRUE if there is another set of values to consider, FALSE if not. - * On TRUE result, the scankeys are initialized with the next set of values. + * Returns true if there is another set of values to consider, false if not. + * On true result, the scankeys are initialized with the next set of values. */ bool ExecIndexAdvanceArrayKeys(IndexArrayKeyInfo *arrayKeys, int numArrayKeys) @@ -830,14 +803,12 @@ ExecEndIndexScan(IndexScanState *node) { Relation indexRelationDesc; IndexScanDesc indexScanDesc; - Relation relation; /* * extract information from the node */ indexRelationDesc = node->iss_RelationDesc; indexScanDesc = node->iss_ScanDesc; - relation = node->ss.ss_currentRelation; /* * Free the exprcontext(s) ... now dead code, see ExecFreeExprContext @@ -851,7 +822,8 @@ ExecEndIndexScan(IndexScanState *node) /* * clear out tuple table slots */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); /* @@ -861,20 +833,43 @@ ExecEndIndexScan(IndexScanState *node) index_endscan(indexScanDesc); if (indexRelationDesc) index_close(indexRelationDesc, NoLock); - - /* - * close the heap relation. - */ - ExecCloseScanRelation(relation); } /* ---------------------------------------------------------------- * ExecIndexMarkPos + * + * Note: we assume that no caller attempts to set a mark before having read + * at least one tuple. Otherwise, iss_ScanDesc might still be NULL. * ---------------------------------------------------------------- */ void ExecIndexMarkPos(IndexScanState *node) { + EState *estate = node->ss.ps.state; + + if (estate->es_epqTuple != NULL) + { + /* + * We are inside an EvalPlanQual recheck. If a test tuple exists for + * this relation, then we shouldn't access the index at all. We would + * instead need to save, and later restore, the state of the + * es_epqScanDone flag, so that re-fetching the test tuple is + * possible. However, given the assumption that no caller sets a mark + * at the start of the scan, we can only get here with es_epqScanDone + * already set, and so no state need be saved. + */ + Index scanrelid = ((Scan *) node->ss.ps.plan)->scanrelid; + + Assert(scanrelid > 0); + if (estate->es_epqTupleSet[scanrelid - 1]) + { + /* Verify the claim above */ + if (!estate->es_epqScanDone[scanrelid - 1]) + elog(ERROR, "unexpected ExecIndexMarkPos call in EPQ recheck"); + return; + } + } + index_markpos(node->iss_ScanDesc); } @@ -885,6 +880,23 @@ ExecIndexMarkPos(IndexScanState *node) void ExecIndexRestrPos(IndexScanState *node) { + EState *estate = node->ss.ps.state; + + if (estate->es_epqTuple != NULL) + { + /* See comments in ExecIndexMarkPos */ + Index scanrelid = ((Scan *) node->ss.ps.plan)->scanrelid; + + Assert(scanrelid > 0); + if (estate->es_epqTupleSet[scanrelid - 1]) + { + /* Verify the claim above */ + if (!estate->es_epqScanDone[scanrelid - 1]) + elog(ERROR, "unexpected ExecIndexRestrPos call in EPQ recheck"); + return; + } + } + index_restrpos(node->iss_ScanDesc); } @@ -921,6 +933,26 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags) */ ExecAssignExprContext(estate, &indexstate->ss.ps); + /* + * open the scan relation + */ + currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); + + indexstate->ss.ss_currentRelation = currentRelation; + indexstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */ + + /* + * get the scan type from the relation descriptor. + */ + ExecInitScanTupleSlot(estate, &indexstate->ss, + RelationGetDescr(currentRelation)); + + /* + * Initialize result type and projection. + */ + ExecInitResultTypeTL(&indexstate->ss.ps); + ExecAssignScanProjectionInfo(&indexstate->ss); + /* * initialize child expressions * @@ -938,31 +970,6 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags) indexstate->indexorderbyorig = ExecInitExprList(node->indexorderbyorig, (PlanState *) indexstate); - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &indexstate->ss.ps); - ExecInitScanTupleSlot(estate, &indexstate->ss); - - /* - * open the base relation and acquire appropriate lock on it. - */ - currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); - - indexstate->ss.ss_currentRelation = currentRelation; - indexstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */ - - /* - * get the scan type from the relation descriptor. - */ - ExecAssignScanType(&indexstate->ss, RelationGetDescr(currentRelation)); - - /* - * Initialize result tuple type and projection info. - */ - ExecAssignResultTypeFromTL(&indexstate->ss.ps); - ExecAssignScanProjectionInfo(&indexstate->ss); - /* * If we are just doing EXPLAIN (ie, aren't going to run the plan), stop * here. This allows an index-advisor plugin to EXPLAIN a plan containing @@ -1213,7 +1220,9 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, Expr *leftop; /* expr on lhs of operator */ Expr *rightop; /* expr on rhs ... */ AttrNumber varattno; /* att number used in scan */ + int indnkeyatts; + indnkeyatts = IndexRelationGetNumberOfKeyAttributes(index); if (IsA(clause, OpExpr)) { /* indexkey op const or indexkey op expression */ @@ -1238,7 +1247,7 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, elog(ERROR, "indexqual doesn't have key on left side"); varattno = ((Var *) leftop)->varattno; - if (varattno < 1 || varattno > index->rd_index->indnatts) + if (varattno < 1 || varattno > indnkeyatts) elog(ERROR, "bogus index qualification"); /* @@ -1361,7 +1370,7 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, opnos_cell = lnext(opnos_cell); if (index->rd_rel->relam != BTREE_AM_OID || - varattno < 1 || varattno > index->rd_index->indnatts) + varattno < 1 || varattno > indnkeyatts) elog(ERROR, "bogus RowCompare index qualification"); opfamily = index->rd_opfamily[varattno - 1]; @@ -1485,7 +1494,7 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, elog(ERROR, "indexqual doesn't have key on left side"); varattno = ((Var *) leftop)->varattno; - if (varattno < 1 || varattno > index->rd_index->indnatts) + if (varattno < 1 || varattno > indnkeyatts) elog(ERROR, "bogus index qualification"); /* @@ -1665,7 +1674,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, /* ---------------------------------------------------------------- * ExecIndexScanEstimate * - * estimates the space required to serialize indexscan node. + * Compute the amount of space we'll need in the parallel + * query DSM, and inform pcxt->estimator about our needs. * ---------------------------------------------------------------- */ void @@ -1716,6 +1726,19 @@ ExecIndexScanInitializeDSM(IndexScanState *node, node->iss_OrderByKeys, node->iss_NumOrderByKeys); } +/* ---------------------------------------------------------------- + * ExecIndexScanReInitializeDSM + * + * Reset shared state before beginning a fresh scan. + * ---------------------------------------------------------------- + */ +void +ExecIndexScanReInitializeDSM(IndexScanState *node, + ParallelContext *pcxt) +{ + index_parallelrescan(node->iss_ScanDesc); +} + /* ---------------------------------------------------------------- * ExecIndexScanInitializeWorker * @@ -1723,11 +1746,12 @@ ExecIndexScanInitializeDSM(IndexScanState *node, * ---------------------------------------------------------------- */ void -ExecIndexScanInitializeWorker(IndexScanState *node, shm_toc *toc) +ExecIndexScanInitializeWorker(IndexScanState *node, + ParallelWorkerContext *pwcxt) { ParallelIndexScanDesc piscan; - piscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id, false); + piscan = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false); node->iss_ScanDesc = index_beginscan_parallel(node->ss.ss_currentRelation, node->iss_RelationDesc, diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c index ac5a2ff0e6..f0b6819140 100644 --- a/src/backend/executor/nodeLimit.c +++ b/src/backend/executor/nodeLimit.c @@ -3,7 +3,7 @@ * nodeLimit.c * Routines to handle limiting of query results where appropriate * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -27,7 +27,7 @@ #include "nodes/nodeFuncs.h" static void recompute_limits(LimitState *node); -static void pass_down_bound(LimitState *node, PlanState *child_node); +static int64 compute_tuples_needed(LimitState *node); /* ---------------------------------------------------------------- @@ -134,6 +134,14 @@ ExecLimit(PlanState *pstate) node->position - node->offset >= node->count) { node->lstate = LIMIT_WINDOWEND; + + /* + * If we know we won't need to back up, we can release + * resources at this point. + */ + if (!(node->ps.state->es_top_eflags & EXEC_FLAG_BACKWARD)) + (void) ExecShutdownNode(outerPlan); + return NULL; } @@ -297,64 +305,26 @@ recompute_limits(LimitState *node) /* Set state-machine state */ node->lstate = LIMIT_RESCAN; - /* Notify child node about limit, if useful */ - pass_down_bound(node, outerPlanState(node)); + /* + * Notify child node about limit. Note: think not to "optimize" by + * skipping ExecSetTupleBound if compute_tuples_needed returns < 0. We + * must update the child node anyway, in case this is a rescan and the + * previous time we got a different result. + */ + ExecSetTupleBound(compute_tuples_needed(node), outerPlanState(node)); } /* - * If we have a COUNT, and our input is a Sort node, notify it that it can - * use bounded sort. Also, if our input is a MergeAppend, we can apply the - * same bound to any Sorts that are direct children of the MergeAppend, - * since the MergeAppend surely need read no more than that many tuples from - * any one input. We also have to be prepared to look through a Result, - * since the planner might stick one atop MergeAppend for projection purposes. - * - * This is a bit of a kluge, but we don't have any more-abstract way of - * communicating between the two nodes; and it doesn't seem worth trying - * to invent one without some more examples of special communication needs. - * - * Note: it is the responsibility of nodeSort.c to react properly to - * changes of these parameters. If we ever do redesign this, it'd be a - * good idea to integrate this signaling with the parameter-change mechanism. + * Compute the maximum number of tuples needed to satisfy this Limit node. + * Return a negative value if there is not a determinable limit. */ -static void -pass_down_bound(LimitState *node, PlanState *child_node) +static int64 +compute_tuples_needed(LimitState *node) { - if (IsA(child_node, SortState)) - { - SortState *sortState = (SortState *) child_node; - int64 tuples_needed = node->count + node->offset; - - /* negative test checks for overflow in sum */ - if (node->noCount || tuples_needed < 0) - { - /* make sure flag gets reset if needed upon rescan */ - sortState->bounded = false; - } - else - { - sortState->bounded = true; - sortState->bound = tuples_needed; - } - } - else if (IsA(child_node, MergeAppendState)) - { - MergeAppendState *maState = (MergeAppendState *) child_node; - int i; - - for (i = 0; i < maState->ms_nplans; i++) - pass_down_bound(node, maState->mergeplans[i]); - } - else if (IsA(child_node, ResultState)) - { - /* - * If Result supported qual checking, we'd have to punt on seeing a - * qual. Note that having a resconstantqual is not a showstopper: if - * that fails we're not getting any rows at all. - */ - if (outerPlanState(child_node)) - pass_down_bound(node, outerPlanState(child_node)); - } + if (node->noCount) + return -1; + /* Note: if this overflows, we'll return a negative value, which is OK */ + return node->count + node->offset; } /* ---------------------------------------------------------------- @@ -391,6 +361,12 @@ ExecInitLimit(Limit *node, EState *estate, int eflags) */ ExecAssignExprContext(estate, &limitstate->ps); + /* + * initialize outer plan + */ + outerPlan = outerPlan(node); + outerPlanState(limitstate) = ExecInitNode(outerPlan, estate, eflags); + /* * initialize child expressions */ @@ -400,21 +376,14 @@ ExecInitLimit(Limit *node, EState *estate, int eflags) (PlanState *) limitstate); /* - * Tuple table initialization (XXX not actually used...) - */ - ExecInitResultTupleSlot(estate, &limitstate->ps); - - /* - * then initialize outer plan + * Initialize result type. */ - outerPlan = outerPlan(node); - outerPlanState(limitstate) = ExecInitNode(outerPlan, estate, eflags); + ExecInitResultTypeTL(&limitstate->ps); /* * limit nodes do no projections, so initialize projection info for this * node appropriately */ - ExecAssignResultTypeFromTL(&limitstate->ps); limitstate->ps.ps_ProjInfo = NULL; return limitstate; diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c index 93895600a5..961798cecb 100644 --- a/src/backend/executor/nodeLockRows.c +++ b/src/backend/executor/nodeLockRows.c @@ -3,7 +3,7 @@ * nodeLockRows.c * Routines to handle FOR UPDATE/FOR SHARE row locking * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -218,6 +218,11 @@ ExecLockRows(PlanState *pstate) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); + if (ItemPointerIndicatesMovedPartitions(&hufd.ctid)) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("tuple to be locked was already moved to another partition due to concurrent update"))); + if (ItemPointerEquals(&hufd.ctid, &tuple.t_self)) { /* Tuple was deleted, so don't return it */ @@ -251,6 +256,7 @@ ExecLockRows(PlanState *pstate) case HeapTupleInvisible: elog(ERROR, "attempted to lock invisible tuple"); + break; default: elog(ERROR, "unrecognized heap_lock_tuple status: %u", @@ -370,13 +376,14 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags) /* * Miscellaneous initialization * - * LockRows nodes never call ExecQual or ExecProject. + * LockRows nodes never call ExecQual or ExecProject, therefore no + * ExprContext is needed. */ /* - * Tuple table initialization (XXX not actually used...) + * Initialize result type. */ - ExecInitResultTupleSlot(estate, &lrstate->ps); + ExecInitResultTypeTL(&lrstate->ps); /* * then initialize outer plan @@ -387,13 +394,12 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags) * LockRows nodes do no projections, so initialize projection info for * this node appropriately */ - ExecAssignResultTypeFromTL(&lrstate->ps); lrstate->ps.ps_ProjInfo = NULL; /* * Create workspace in which we can remember per-RTE locked tuples */ - lrstate->lr_ntables = list_length(estate->es_range_table); + lrstate->lr_ntables = estate->es_range_table_size; lrstate->lr_curtuples = (HeapTuple *) palloc0(lrstate->lr_ntables * sizeof(HeapTuple)); diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c index 91178f1019..4ede428f90 100644 --- a/src/backend/executor/nodeMaterial.c +++ b/src/backend/executor/nodeMaterial.c @@ -3,7 +3,7 @@ * nodeMaterial.c * Routines to handle materialization nodes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -206,14 +206,6 @@ ExecInitMaterial(Material *node, EState *estate, int eflags) * ExecQual or ExecProject. */ - /* - * tuple table initialization - * - * material nodes only return tuples from their materialized relation. - */ - ExecInitResultTupleSlot(estate, &matstate->ss.ps); - ExecInitScanTupleSlot(estate, &matstate->ss); - /* * initialize child nodes * @@ -226,13 +218,19 @@ ExecInitMaterial(Material *node, EState *estate, int eflags) outerPlanState(matstate) = ExecInitNode(outerPlan, estate, eflags); /* - * initialize tuple type. no need to initialize projection info because - * this node doesn't do projections. + * Initialize result type and slot. No need to initialize projection info + * because this node doesn't do projections. + * + * material nodes only return tuples from their materialized relation. */ - ExecAssignResultTypeFromTL(&matstate->ss.ps); - ExecAssignScanTypeFromOuterPlan(&matstate->ss); + ExecInitResultTupleSlotTL(&matstate->ss.ps); matstate->ss.ps.ps_ProjInfo = NULL; + /* + * initialize tuple type. + */ + ExecCreateScanSlotFromOuterPlan(estate, &matstate->ss); + return matstate; } diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c index 6bf490bd70..dbed667d16 100644 --- a/src/backend/executor/nodeMergeAppend.c +++ b/src/backend/executor/nodeMergeAppend.c @@ -3,7 +3,7 @@ * nodeMergeAppend.c * routines to handle MergeAppend nodes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -39,6 +39,7 @@ #include "postgres.h" #include "executor/execdebug.h" +#include "executor/execPartition.h" #include "executor/nodeMergeAppend.h" #include "lib/binaryheap.h" #include "miscadmin.h" @@ -65,32 +66,94 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) { MergeAppendState *mergestate = makeNode(MergeAppendState); PlanState **mergeplanstates; + Bitmapset *validsubplans; int nplans; - int i; + int i, + j; ListCell *lc; /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); - /* - * Lock the non-leaf tables in the partition tree controlled by this node. - * It's a no-op for non-partitioned parent tables. - */ - ExecLockNonLeafAppendTables(node->partitioned_rels, estate); - - /* - * Set up empty vector of subplan states - */ - nplans = list_length(node->mergeplans); - - mergeplanstates = (PlanState **) palloc0(nplans * sizeof(PlanState *)); - /* * create new MergeAppendState for our node */ mergestate->ps.plan = (Plan *) node; mergestate->ps.state = estate; mergestate->ps.ExecProcNode = ExecMergeAppend; + mergestate->ms_noopscan = false; + + /* If run-time partition pruning is enabled, then set that up now */ + if (node->part_prune_info != NULL) + { + PartitionPruneState *prunestate; + + /* We may need an expression context to evaluate partition exprs */ + ExecAssignExprContext(estate, &mergestate->ps); + + prunestate = ExecCreatePartitionPruneState(&mergestate->ps, + node->part_prune_info); + mergestate->ms_prune_state = prunestate; + + /* Perform an initial partition prune, if required. */ + if (prunestate->do_initial_prune) + { + /* Determine which subplans survive initial pruning */ + validsubplans = ExecFindInitialMatchingSubPlans(prunestate, + list_length(node->mergeplans)); + + /* + * The case where no subplans survive pruning must be handled + * specially. The problem here is that code in explain.c requires + * a MergeAppend to have at least one subplan in order for it to + * properly determine the Vars in that subplan's targetlist. We + * sidestep this issue by just initializing the first subplan and + * setting ms_noopscan to true to indicate that we don't really + * need to scan any subnodes. + */ + if (bms_is_empty(validsubplans)) + { + mergestate->ms_noopscan = true; + + /* Mark the first as valid so that it's initialized below */ + validsubplans = bms_make_singleton(0); + } + + nplans = bms_num_members(validsubplans); + } + else + { + /* We'll need to initialize all subplans */ + nplans = list_length(node->mergeplans); + Assert(nplans > 0); + validsubplans = bms_add_range(NULL, 0, nplans - 1); + } + + /* + * If no runtime pruning is required, we can fill ms_valid_subplans + * immediately, preventing later calls to ExecFindMatchingSubPlans. + */ + if (!prunestate->do_exec_prune) + { + Assert(nplans > 0); + mergestate->ms_valid_subplans = bms_add_range(NULL, 0, nplans - 1); + } + } + else + { + nplans = list_length(node->mergeplans); + + /* + * When run-time partition pruning is not enabled we can just mark all + * subplans as valid; they must also all be initialized. + */ + Assert(nplans > 0); + mergestate->ms_valid_subplans = validsubplans = + bms_add_range(NULL, 0, nplans - 1); + mergestate->ms_prune_state = NULL; + } + + mergeplanstates = (PlanState **) palloc(nplans * sizeof(PlanState *)); mergestate->mergeplans = mergeplanstates; mergestate->ms_nplans = nplans; @@ -101,33 +164,27 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) /* * Miscellaneous initialization * - * MergeAppend plans don't have expression contexts because they never - * call ExecQual or ExecProject. - */ - - /* * MergeAppend nodes do have Result slots, which hold pointers to tuples, - * so we have to initialize them. + * so we have to initialize them. FIXME */ - ExecInitResultTupleSlot(estate, &mergestate->ps); + ExecInitResultTupleSlotTL(&mergestate->ps); /* - * call ExecInitNode on each of the plans to be executed and save the - * results into the array "mergeplans". + * call ExecInitNode on each of the valid plans to be executed and save + * the results into the mergeplanstates array. */ - i = 0; + j = i = 0; foreach(lc, node->mergeplans) { - Plan *initNode = (Plan *) lfirst(lc); + if (bms_is_member(i, validsubplans)) + { + Plan *initNode = (Plan *) lfirst(lc); - mergeplanstates[i] = ExecInitNode(initNode, estate, eflags); + mergeplanstates[j++] = ExecInitNode(initNode, estate, eflags); + } i++; } - /* - * initialize output tuple type - */ - ExecAssignResultTypeFromTL(&mergestate->ps); mergestate->ps.ps_ProjInfo = NULL; /* @@ -182,11 +239,25 @@ ExecMergeAppend(PlanState *pstate) if (!node->ms_initialized) { + /* Nothing to do if all subplans were pruned */ + if (node->ms_noopscan) + return ExecClearTuple(node->ps.ps_ResultTupleSlot); + + /* + * If we've yet to determine the valid subplans then do so now. If + * run-time pruning is disabled then the valid subplans will always be + * set to all subplans. + */ + if (node->ms_valid_subplans == NULL) + node->ms_valid_subplans = + ExecFindMatchingSubPlans(node->ms_prune_state); + /* - * First time through: pull the first tuple from each subplan, and set - * up the heap. + * First time through: pull the first tuple from each valid subplan, + * and set up the heap. */ - for (i = 0; i < node->ms_nplans; i++) + i = -1; + while ((i = bms_next_member(node->ms_valid_subplans, i)) >= 0) { node->ms_slots[i] = ExecProcNode(node->mergeplans[i]); if (!TupIsNull(node->ms_slots[i])) @@ -261,7 +332,10 @@ heap_compare_slots(Datum a, Datum b, void *arg) datum2, isNull2, sortKey); if (compare != 0) - return -compare; + { + INVERT_COMPARE_RESULT(compare); + return compare; + } } return 0; } @@ -299,6 +373,19 @@ ExecReScanMergeAppend(MergeAppendState *node) { int i; + /* + * If any PARAM_EXEC Params used in pruning expressions have changed, then + * we'd better unset the valid subplans so that they are reselected for + * the new parameter values. + */ + if (node->ms_prune_state && + bms_overlap(node->ps.chgParam, + node->ms_prune_state->execparamids)) + { + bms_free(node->ms_valid_subplans); + node->ms_valid_subplans = NULL; + } + for (i = 0; i < node->ms_nplans; i++) { PlanState *subnode = node->mergeplans[i]; diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index 925b4cf553..9c97831331 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -3,7 +3,7 @@ * nodeMergejoin.c * routines supporting merge joins * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -510,7 +510,7 @@ MJFillInner(MergeJoinState *node) /* * Check that a qual condition is constant true or constant false. - * If it is constant false (or null), set *is_const_false to TRUE. + * If it is constant false (or null), set *is_const_false to true. * * Constant true would normally be represented by a NIL list, but we allow an * actual bool Const as well. We do expect that the planner will have thrown @@ -1436,6 +1436,8 @@ MergeJoinState * ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) { MergeJoinState *mergestate; + TupleDesc outerDesc, + innerDesc; /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); @@ -1450,6 +1452,8 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) mergestate->js.ps.plan = (Plan *) node; mergestate->js.ps.state = estate; mergestate->js.ps.ExecProcNode = ExecMergeJoin; + mergestate->js.jointype = node->join.jointype; + mergestate->mj_ConstFalseJoin = false; /* * Miscellaneous initialization @@ -1466,17 +1470,6 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) mergestate->mj_OuterEContext = CreateExprContext(estate); mergestate->mj_InnerEContext = CreateExprContext(estate); - /* - * initialize child expressions - */ - mergestate->js.ps.qual = - ExecInitQual(node->join.plan.qual, (PlanState *) mergestate); - mergestate->js.jointype = node->join.jointype; - mergestate->js.joinqual = - ExecInitQual(node->join.joinqual, (PlanState *) mergestate); - mergestate->mj_ConstFalseJoin = false; - /* mergeclauses are handled below */ - /* * initialize child nodes * @@ -1488,10 +1481,12 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) mergestate->mj_SkipMarkRestore = node->skip_mark_restore; outerPlanState(mergestate) = ExecInitNode(outerPlan(node), estate, eflags); + outerDesc = ExecGetResultType(outerPlanState(mergestate)); innerPlanState(mergestate) = ExecInitNode(innerPlan(node), estate, mergestate->mj_SkipMarkRestore ? eflags : (eflags | EXEC_FLAG_MARK)); + innerDesc = ExecGetResultType(innerPlanState(mergestate)); /* * For certain types of inner child nodes, it is advantageous to issue @@ -1502,6 +1497,10 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) * * Currently, only Material wants the extra MARKs, and it will be helpful * only if eflags doesn't specify REWIND. + * + * Note that for IndexScan and IndexOnlyScan, it is *necessary* that we + * not set mj_ExtraMarks; otherwise we might attempt to set a mark before + * the first inner tuple, which they do not support. */ if (IsA(innerPlan(node), Material) && (eflags & EXEC_FLAG_REWIND) == 0 && @@ -1510,14 +1509,25 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) else mergestate->mj_ExtraMarks = false; + /* + * Initialize result slot, type and projection. + */ + ExecInitResultTupleSlotTL(&mergestate->js.ps); + ExecAssignProjectionInfo(&mergestate->js.ps, NULL); + /* * tuple table initialization */ - ExecInitResultTupleSlot(estate, &mergestate->js.ps); + mergestate->mj_MarkedTupleSlot = ExecInitExtraTupleSlot(estate, innerDesc); - mergestate->mj_MarkedTupleSlot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(mergestate->mj_MarkedTupleSlot, - ExecGetResultType(innerPlanState(mergestate))); + /* + * initialize child expressions + */ + mergestate->js.ps.qual = + ExecInitQual(node->join.plan.qual, (PlanState *) mergestate); + mergestate->js.joinqual = + ExecInitQual(node->join.joinqual, (PlanState *) mergestate); + /* mergeclauses are handled below */ /* * detect whether we need only consider the first matching inner tuple @@ -1538,15 +1548,13 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) mergestate->mj_FillOuter = true; mergestate->mj_FillInner = false; mergestate->mj_NullInnerTupleSlot = - ExecInitNullTupleSlot(estate, - ExecGetResultType(innerPlanState(mergestate))); + ExecInitNullTupleSlot(estate, innerDesc); break; case JOIN_RIGHT: mergestate->mj_FillOuter = false; mergestate->mj_FillInner = true; mergestate->mj_NullOuterTupleSlot = - ExecInitNullTupleSlot(estate, - ExecGetResultType(outerPlanState(mergestate))); + ExecInitNullTupleSlot(estate, outerDesc); /* * Can't handle right or full join with non-constant extra @@ -1562,11 +1570,9 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) mergestate->mj_FillOuter = true; mergestate->mj_FillInner = true; mergestate->mj_NullOuterTupleSlot = - ExecInitNullTupleSlot(estate, - ExecGetResultType(outerPlanState(mergestate))); + ExecInitNullTupleSlot(estate, outerDesc); mergestate->mj_NullInnerTupleSlot = - ExecInitNullTupleSlot(estate, - ExecGetResultType(innerPlanState(mergestate))); + ExecInitNullTupleSlot(estate, innerDesc); /* * Can't handle right or full join with non-constant extra @@ -1583,12 +1589,6 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) (int) node->join.jointype); } - /* - * initialize tuple type and projection info - */ - ExecAssignResultTypeFromTL(&mergestate->js.ps); - ExecAssignProjectionInfo(&mergestate->js.ps, NULL); - /* * preprocess the merge clauses */ diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 36b2b43bc6..e2836b75ff 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -3,7 +3,7 @@ * nodeModifyTable.c * routines to handle ModifyTable nodes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -40,12 +40,12 @@ #include "access/htup_details.h" #include "access/xact.h" #include "commands/trigger.h" +#include "executor/execPartition.h" #include "executor/executor.h" #include "executor/nodeModifyTable.h" #include "foreign/fdwapi.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" -#include "parser/parsetree.h" #include "storage/bufmgr.h" #include "storage/lmgr.h" #include "utils/builtins.h" @@ -62,6 +62,16 @@ static bool ExecOnConflictUpdate(ModifyTableState *mtstate, EState *estate, bool canSetTag, TupleTableSlot **returning); +static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate, + EState *estate, + PartitionTupleRouting *proute, + ResultRelInfo *targetRelInfo, + TupleTableSlot *slot); +static ResultRelInfo *getTargetResultRelInfo(ModifyTableState *node); +static void ExecSetupChildParentMapForTcs(ModifyTableState *mtstate); +static void ExecSetupChildParentMapForSubplan(ModifyTableState *mtstate); +static TupleConversionMap *tupconv_map_for_subplan(ModifyTableState *node, + int whichplan); /* * Verify that the tuples to be produced by INSERT or UPDATE match the @@ -95,7 +105,8 @@ ExecCheckPlanOutput(Relation resultRel, List *targetList) (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("table row type and query-specified row type do not match"), errdetail("Query has too many columns."))); - attr = resultDesc->attrs[attno++]; + attr = TupleDescAttr(resultDesc, attno); + attno++; if (!attr->attisdropped) { @@ -135,7 +146,7 @@ ExecCheckPlanOutput(Relation resultRel, List *targetList) /* * ExecProcessReturning --- evaluate a RETURNING list * - * projectReturning: RETURNING projection info for current result rel + * resultRelInfo: current result rel * tupleSlot: slot holding tuple actually inserted/updated/deleted * planSlot: slot holding tuple returned by top subplan node * @@ -252,18 +263,18 @@ static TupleTableSlot * ExecInsert(ModifyTableState *mtstate, TupleTableSlot *slot, TupleTableSlot *planSlot, - List *arbiterIndexes, - OnConflictAction onconflict, EState *estate, bool canSetTag) { HeapTuple tuple; ResultRelInfo *resultRelInfo; - ResultRelInfo *saved_resultRelInfo = NULL; Relation resultRelationDesc; Oid newId; List *recheckIndexes = NIL; TupleTableSlot *result = NULL; + TransitionCaptureState *ar_insert_trig_tcs; + ModifyTable *node = (ModifyTable *) mtstate->ps.plan; + OnConflictAction onconflict = node->onConflictAction; /* * get the heap tuple out of the tuple table slot, making sure we have a @@ -275,97 +286,6 @@ ExecInsert(ModifyTableState *mtstate, * get information on the (current) result relation */ resultRelInfo = estate->es_result_relation_info; - - /* Determine the partition to heap_insert the tuple into */ - if (mtstate->mt_partition_dispatch_info) - { - int leaf_part_index; - TupleConversionMap *map; - - /* - * Away we go ... If we end up not finding a partition after all, - * ExecFindPartition() does not return and errors out instead. - * Otherwise, the returned value is to be used as an index into arrays - * mt_partitions[] and mt_partition_tupconv_maps[] that will get us - * the ResultRelInfo and TupleConversionMap for the partition, - * respectively. - */ - leaf_part_index = ExecFindPartition(resultRelInfo, - mtstate->mt_partition_dispatch_info, - slot, - estate); - Assert(leaf_part_index >= 0 && - leaf_part_index < mtstate->mt_num_partitions); - - /* - * Save the old ResultRelInfo and switch to the one corresponding to - * the selected partition. - */ - saved_resultRelInfo = resultRelInfo; - resultRelInfo = mtstate->mt_partitions + leaf_part_index; - - /* We do not yet have a way to insert into a foreign partition */ - if (resultRelInfo->ri_FdwRoutine) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot route inserted tuples to a foreign table"))); - - /* For ExecInsertIndexTuples() to work on the partition's indexes */ - estate->es_result_relation_info = resultRelInfo; - - /* - * If we're capturing transition tuples, we might need to convert from - * the partition rowtype to parent rowtype. - */ - if (mtstate->mt_transition_capture != NULL) - { - if (resultRelInfo->ri_TrigDesc && - (resultRelInfo->ri_TrigDesc->trig_insert_before_row || - resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) - { - /* - * If there are any BEFORE or INSTEAD triggers on the - * partition, we'll have to be ready to convert their result - * back to tuplestore format. - */ - mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL; - mtstate->mt_transition_capture->tcs_map = - mtstate->mt_transition_tupconv_maps[leaf_part_index]; - } - else - { - /* - * Otherwise, just remember the original unconverted tuple, to - * avoid a needless round trip conversion. - */ - mtstate->mt_transition_capture->tcs_original_insert_tuple = tuple; - mtstate->mt_transition_capture->tcs_map = NULL; - } - } - - /* - * We might need to convert from the parent rowtype to the partition - * rowtype. - */ - map = mtstate->mt_partition_tupconv_maps[leaf_part_index]; - if (map) - { - Relation partrel = resultRelInfo->ri_RelationDesc; - - tuple = do_convert_tuple(tuple, map); - - /* - * We must use the partition's tuple descriptor from this point - * on, until we're finished dealing with the partition. Use the - * dedicated slot for that. - */ - slot = mtstate->mt_partition_tuple_slot; - Assert(slot != NULL); - ExecSetSlotDescriptor(slot, RelationGetDescr(partrel)); - ExecStoreTuple(tuple, slot, InvalidBuffer, true); - } - } - resultRelationDesc = resultRelInfo->ri_RelationDesc; /* @@ -444,15 +364,7 @@ ExecInsert(ModifyTableState *mtstate, } else { - /* - * We always check the partition constraint, including when the tuple - * got here via tuple-routing. However we don't need to in the latter - * case if no BR trigger is defined on the partition. Note that a BR - * trigger might modify the tuple such that the partition constraint - * is no longer satisfied, so we need to check in that case. - */ - bool check_partition_constr = - (resultRelInfo->ri_PartitionCheck != NIL); + WCOKind wco_kind; /* * Constraints might reference the tableoid column, so initialize @@ -461,34 +373,50 @@ ExecInsert(ModifyTableState *mtstate, tuple->t_tableOid = RelationGetRelid(resultRelationDesc); /* - * Check any RLS INSERT WITH CHECK policies + * Check any RLS WITH CHECK policies. * + * Normally we should check INSERT policies. But if the insert is the + * result of a partition key update that moved the tuple to a new + * partition, we should instead check UPDATE policies, because we are + * executing policies defined on the target table, and not those + * defined on the child partitions. + */ + wco_kind = (mtstate->operation == CMD_UPDATE) ? + WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK; + + /* * ExecWithCheckOptions() will skip any WCOs which are not of the kind * we are looking for at this point. */ if (resultRelInfo->ri_WithCheckOptions != NIL) - ExecWithCheckOptions(WCO_RLS_INSERT_CHECK, - resultRelInfo, slot, estate); + ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate); /* - * No need though if the tuple has been routed, and a BR trigger - * doesn't exist. + * Check the constraints of the tuple. */ - if (saved_resultRelInfo != NULL && - !(resultRelInfo->ri_TrigDesc && - resultRelInfo->ri_TrigDesc->trig_insert_before_row)) - check_partition_constr = false; - - /* Check the constraints of the tuple */ - if (resultRelationDesc->rd_att->constr || check_partition_constr) + if (resultRelationDesc->rd_att->constr) ExecConstraints(resultRelInfo, slot, estate); + /* + * Also check the tuple against the partition constraint, if there is + * one; except that if we got here via tuple-routing, we don't need to + * if there's no BR trigger defined on the partition. + */ + if (resultRelInfo->ri_PartitionCheck && + (resultRelInfo->ri_PartitionRoot == NULL || + (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->trig_insert_before_row))) + ExecPartitionCheck(resultRelInfo, slot, estate, true); + if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0) { /* Perform a speculative insertion. */ uint32 specToken; ItemPointerData conflictTid; bool specConflict; + List *arbiterIndexes; + + arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes; /* * Do a non-conclusive check for conflicts first. @@ -522,7 +450,7 @@ ExecInsert(ModifyTableState *mtstate, &conflictTid, planSlot, slot, estate, canSetTag, &returning)) { - InstrCountFiltered2(&mtstate->ps, 1); + InstrCountTuples2(&mtstate->ps, 1); return returning; } else @@ -537,7 +465,7 @@ ExecInsert(ModifyTableState *mtstate, */ Assert(onconflict == ONCONFLICT_NOTHING); ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid); - InstrCountFiltered2(&mtstate->ps, 1); + InstrCountTuples2(&mtstate->ps, 1); return NULL; } } @@ -606,7 +534,7 @@ ExecInsert(ModifyTableState *mtstate, if (resultRelInfo->ri_NumIndices > 0) recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false, NULL, - arbiterIndexes); + NIL); } } @@ -617,9 +545,32 @@ ExecInsert(ModifyTableState *mtstate, setLastTid(&(tuple->t_self)); } + /* + * If this insert is the result of a partition key update that moved the + * tuple to a new partition, put this row into the transition NEW TABLE, + * if there is one. We need to do this separately for DELETE and INSERT + * because they happen on different tables. + */ + ar_insert_trig_tcs = mtstate->mt_transition_capture; + if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture + && mtstate->mt_transition_capture->tcs_update_new_table) + { + ExecARUpdateTriggers(estate, resultRelInfo, NULL, + NULL, + tuple, + NULL, + mtstate->mt_transition_capture); + + /* + * We've already captured the NEW TABLE row, so make sure any AR + * INSERT trigger fired below doesn't capture it again. + */ + ar_insert_trig_tcs = NULL; + } + /* AFTER ROW INSERT Triggers */ ExecARInsertTriggers(estate, resultRelInfo, tuple, recheckIndexes, - mtstate->mt_transition_capture); + ar_insert_trig_tcs); list_free(recheckIndexes); @@ -642,9 +593,6 @@ ExecInsert(ModifyTableState *mtstate, if (resultRelInfo->ri_projectReturning) result = ExecProcessReturning(resultRelInfo, slot, planSlot); - if (saved_resultRelInfo) - estate->es_result_relation_info = saved_resultRelInfo; - return result; } @@ -661,7 +609,11 @@ ExecInsert(ModifyTableState *mtstate, * foreign table, tupleid is invalid; the FDW has to figure out * which row to delete using data from the planSlot. oldtuple is * passed to foreign table triggers; it is NULL when the foreign - * table has no relevant triggers. + * table has no relevant triggers. We use tupleDeleted to indicate + * whether the tuple is actually deleted, callers can use it to + * decide whether to continue the operation. When this DELETE is a + * part of an UPDATE of partition-key, then the slot returned by + * EvalPlanQual() is passed back using output parameter epqslot. * * Returns RETURNING result if any, otherwise NULL. * ---------------------------------------------------------------- @@ -673,13 +625,21 @@ ExecDelete(ModifyTableState *mtstate, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate, - bool canSetTag) + bool processReturning, + bool canSetTag, + bool changingPart, + bool *tupleDeleted, + TupleTableSlot **epqslot) { ResultRelInfo *resultRelInfo; Relation resultRelationDesc; HTSU_Result result; HeapUpdateFailureData hufd; TupleTableSlot *slot = NULL; + TransitionCaptureState *ar_delete_trig_tcs; + + if (tupleDeleted) + *tupleDeleted = false; /* * get information on the (current) result relation @@ -694,7 +654,7 @@ ExecDelete(ModifyTableState *mtstate, bool dodelete; dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo, - tupleid, oldtuple); + tupleid, oldtuple, epqslot); if (!dodelete) /* "do nothing" */ return NULL; @@ -739,7 +699,7 @@ ExecDelete(ModifyTableState *mtstate, * RETURNING expressions might reference the tableoid column, so * initialize t_tableOid before evaluating them. */ - if (slot->tts_isempty) + if (TTS_EMPTY(slot)) ExecStoreAllNullTuple(slot); tuple = ExecMaterializeSlot(slot); tuple->t_tableOid = RelationGetRelid(resultRelationDesc); @@ -760,7 +720,8 @@ ldelete:; estate->es_output_cid, estate->es_crosscheck_snapshot, true /* wait for commit */ , - &hufd); + &hufd, + changingPart); switch (result) { case HeapTupleSelfUpdated: @@ -806,21 +767,37 @@ ldelete:; ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); + if (ItemPointerIndicatesMovedPartitions(&hufd.ctid)) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("tuple to be deleted was already moved to another partition due to concurrent update"))); + if (!ItemPointerEquals(tupleid, &hufd.ctid)) { - TupleTableSlot *epqslot; - - epqslot = EvalPlanQual(estate, - epqstate, - resultRelationDesc, - resultRelInfo->ri_RangeTableIndex, - LockTupleExclusive, - &hufd.ctid, - hufd.xmax); - if (!TupIsNull(epqslot)) + TupleTableSlot *my_epqslot; + + my_epqslot = EvalPlanQual(estate, + epqstate, + resultRelationDesc, + resultRelInfo->ri_RangeTableIndex, + LockTupleExclusive, + &hufd.ctid, + hufd.xmax); + if (!TupIsNull(my_epqslot)) { *tupleid = hufd.ctid; - goto ldelete; + + /* + * If requested, skip delete and pass back the updated + * row. + */ + if (epqslot) + { + *epqslot = my_epqslot; + return NULL; + } + else + goto ldelete; } } /* tuple already deleted; nothing to do */ @@ -844,12 +821,40 @@ ldelete:; if (canSetTag) (estate->es_processed)++; + /* Tell caller that the delete actually happened. */ + if (tupleDeleted) + *tupleDeleted = true; + + /* + * If this delete is the result of a partition key update that moved the + * tuple to a new partition, put this row into the transition OLD TABLE, + * if there is one. We need to do this separately for DELETE and INSERT + * because they happen on different tables. + */ + ar_delete_trig_tcs = mtstate->mt_transition_capture; + if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture + && mtstate->mt_transition_capture->tcs_update_old_table) + { + ExecARUpdateTriggers(estate, resultRelInfo, + tupleid, + oldtuple, + NULL, + NULL, + mtstate->mt_transition_capture); + + /* + * We've already captured the NEW TABLE row, so make sure any AR + * DELETE trigger fired below doesn't capture it again. + */ + ar_delete_trig_tcs = NULL; + } + /* AFTER ROW DELETE Triggers */ ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple, - mtstate->mt_transition_capture); + ar_delete_trig_tcs); - /* Process RETURNING if present */ - if (resultRelInfo->ri_projectReturning) + /* Process RETURNING if present and if requested */ + if (processReturning && resultRelInfo->ri_projectReturning) { /* * We have to put the target tuple into a slot, which means first we @@ -883,7 +888,7 @@ ldelete:; if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc)) ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); - ExecStoreTuple(&deltuple, slot, InvalidBuffer, false); + ExecStoreHeapTuple(&deltuple, slot, false); } rslot = ExecProcessReturning(resultRelInfo, slot, planSlot); @@ -942,6 +947,7 @@ ExecUpdate(ModifyTableState *mtstate, HTSU_Result result; HeapUpdateFailureData hufd; List *recheckIndexes = NIL; + TupleConversionMap *saved_tcs_map = NULL; /* * abort the operation if not running transactions @@ -1013,6 +1019,7 @@ ExecUpdate(ModifyTableState *mtstate, else { LockTupleMode lockmode; + bool partition_constraint_failed; /* * Constraints might reference the tableoid column, so initialize @@ -1028,21 +1035,165 @@ ExecUpdate(ModifyTableState *mtstate, * (We don't need to redo triggers, however. If there are any BEFORE * triggers then trigger.c will have done heap_lock_tuple to lock the * correct tuple, so there's no need to do them again.) - * - * ExecWithCheckOptions() will skip any WCOs which are not of the kind - * we are looking for at this point. */ lreplace:; - if (resultRelInfo->ri_WithCheckOptions != NIL) + + /* + * If partition constraint fails, this row might get moved to another + * partition, in which case we should check the RLS CHECK policy just + * before inserting into the new partition, rather than doing it here. + * This is because a trigger on that partition might again change the + * row. So skip the WCO checks if the partition constraint fails. + */ + partition_constraint_failed = + resultRelInfo->ri_PartitionCheck && + !ExecPartitionCheck(resultRelInfo, slot, estate, false); + + if (!partition_constraint_failed && + resultRelInfo->ri_WithCheckOptions != NIL) + { + /* + * ExecWithCheckOptions() will skip any WCOs which are not of the + * kind we are looking for at this point. + */ ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK, resultRelInfo, slot, estate); + } + + /* + * If a partition check failed, try to move the row into the right + * partition. + */ + if (partition_constraint_failed) + { + bool tuple_deleted; + TupleTableSlot *ret_slot; + TupleTableSlot *epqslot = NULL; + PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing; + int map_index; + TupleConversionMap *tupconv_map; + + /* + * Disallow an INSERT ON CONFLICT DO UPDATE that causes the + * original row to migrate to a different partition. Maybe this + * can be implemented some day, but it seems a fringe feature with + * little redeeming value. + */ + if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("invalid ON UPDATE specification"), + errdetail("The result tuple would appear in a different partition than the original tuple."))); + + /* + * When an UPDATE is run on a leaf partition, we will not have + * partition tuple routing set up. In that case, fail with + * partition constraint violation error. + */ + if (proute == NULL) + ExecPartitionCheckEmitError(resultRelInfo, slot, estate); + + /* + * Row movement, part 1. Delete the tuple, but skip RETURNING + * processing. We want to return rows from INSERT. + */ + ExecDelete(mtstate, tupleid, oldtuple, planSlot, epqstate, + estate, false, false /* canSetTag */ , + true /* changingPart */ , &tuple_deleted, &epqslot); + + /* + * For some reason if DELETE didn't happen (e.g. trigger prevented + * it, or it was already deleted by self, or it was concurrently + * deleted by another transaction), then we should skip the insert + * as well; otherwise, an UPDATE could cause an increase in the + * total number of rows across all partitions, which is clearly + * wrong. + * + * For a normal UPDATE, the case where the tuple has been the + * subject of a concurrent UPDATE or DELETE would be handled by + * the EvalPlanQual machinery, but for an UPDATE that we've + * translated into a DELETE from this partition and an INSERT into + * some other partition, that's not available, because CTID chains + * can't span relation boundaries. We mimic the semantics to a + * limited extent by skipping the INSERT if the DELETE fails to + * find a tuple. This ensures that two concurrent attempts to + * UPDATE the same tuple at the same time can't turn one tuple + * into two, and that an UPDATE of a just-deleted tuple can't + * resurrect it. + */ + if (!tuple_deleted) + { + /* + * epqslot will be typically NULL. But when ExecDelete() + * finds that another transaction has concurrently updated the + * same row, it re-fetches the row, skips the delete, and + * epqslot is set to the re-fetched tuple slot. In that case, + * we need to do all the checks again. + */ + if (TupIsNull(epqslot)) + return NULL; + else + { + slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot); + tuple = ExecMaterializeSlot(slot); + goto lreplace; + } + } + + /* + * Updates set the transition capture map only when a new subplan + * is chosen. But for inserts, it is set for each row. So after + * INSERT, we need to revert back to the map created for UPDATE; + * otherwise the next UPDATE will incorrectly use the one created + * for INSERT. So first save the one created for UPDATE. + */ + if (mtstate->mt_transition_capture) + saved_tcs_map = mtstate->mt_transition_capture->tcs_map; + + /* + * resultRelInfo is one of the per-subplan resultRelInfos. So we + * should convert the tuple into root's tuple descriptor, since + * ExecInsert() starts the search from root. The tuple conversion + * map list is in the order of mtstate->resultRelInfo[], so to + * retrieve the one for this resultRel, we need to know the + * position of the resultRel in mtstate->resultRelInfo[]. + */ + map_index = resultRelInfo - mtstate->resultRelInfo; + Assert(map_index >= 0 && map_index < mtstate->mt_nplans); + tupconv_map = tupconv_map_for_subplan(mtstate, map_index); + if (tupconv_map != NULL) + slot = execute_attr_map_slot(tupconv_map->attrMap, + slot, proute->root_tuple_slot); + + /* + * Prepare for tuple routing, making it look like we're inserting + * into the root. + */ + Assert(mtstate->rootResultRelInfo != NULL); + slot = ExecPrepareTupleRouting(mtstate, estate, proute, + mtstate->rootResultRelInfo, slot); + + ret_slot = ExecInsert(mtstate, slot, planSlot, + estate, canSetTag); + + /* Revert ExecPrepareTupleRouting's node change. */ + estate->es_result_relation_info = resultRelInfo; + if (mtstate->mt_transition_capture) + { + mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL; + mtstate->mt_transition_capture->tcs_map = saved_tcs_map; + } + + return ret_slot; + } /* - * Check the constraints of the tuple. Note that we pass the same - * slot for the orig_slot argument, because unlike ExecInsert(), no - * tuple-routing is performed here, hence the slot remains unchanged. + * Check the constraints of the tuple. We've already checked the + * partition constraint above; however, we must still ensure the tuple + * passes all other constraints, so we will call ExecConstraints() and + * have it validate all remaining checks. */ - if (resultRelationDesc->rd_att->constr || resultRelInfo->ri_PartitionCheck) + if (resultRelationDesc->rd_att->constr) ExecConstraints(resultRelInfo, slot, estate); /* @@ -1103,6 +1254,11 @@ lreplace:; ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); + if (ItemPointerIndicatesMovedPartitions(&hufd.ctid)) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("tuple to be updated was already moved to another partition due to concurrent update"))); + if (!ItemPointerEquals(tupleid, &hufd.ctid)) { TupleTableSlot *epqslot; @@ -1157,6 +1313,8 @@ lreplace:; /* AFTER ROW UPDATE Triggers */ ExecARUpdateTriggers(estate, resultRelInfo, tupleid, oldtuple, tuple, recheckIndexes, + mtstate->operation == CMD_INSERT ? + mtstate->mt_oc_transition_capture : mtstate->mt_transition_capture); list_free(recheckIndexes); @@ -1188,7 +1346,7 @@ lreplace:; * (but still lock row, even though it may not satisfy estate's * snapshot). * - * Returns true if if we're done (with or without an update), or false if + * Returns true if we're done (with or without an update), or false if * the caller must retry the INSERT from scratch. */ static bool @@ -1203,7 +1361,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, { ExprContext *econtext = mtstate->ps.ps_ExprContext; Relation relation = resultRelInfo->ri_RelationDesc; - ExprState *onConflictSetWhere = resultRelInfo->ri_onConflictSetWhere; + ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause; HeapTupleData tuple; HeapUpdateFailureData hufd; LockTupleMode lockmode; @@ -1255,6 +1413,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, /* This shouldn't happen */ elog(ERROR, "attempted to lock invisible tuple"); + break; case HeapTupleSelfUpdated: @@ -1264,6 +1423,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, * seen this row to conflict with. */ elog(ERROR, "unexpected self-updated tuple"); + break; case HeapTupleUpdated: if (IsolationUsesXactSnapshot()) @@ -1271,6 +1431,14 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); + /* + * As long as we don't support an UPDATE of INSERT ON CONFLICT for + * a partitioned table we shouldn't reach to a case where tuple to + * be lock is moved to another partition due to concurrent update + * of the partition key. + */ + Assert(!ItemPointerIndicatesMovedPartitions(&hufd.ctid)); + /* * Tell caller to try again from the very start. * @@ -1309,7 +1477,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, ExecCheckHeapTupleVisible(estate, &tuple, buffer); /* Store target's existing tuple in the state's dedicated slot */ - ExecStoreTuple(&tuple, mtstate->mt_existing, buffer, false); + ExecStoreBufferHeapTuple(&tuple, mtstate->mt_existing, buffer); /* * Make tuple and any needed join variables available to ExecQual and @@ -1352,7 +1520,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, } /* Project the new tuple version */ - ExecProject(resultRelInfo->ri_onConflictSetProj); + ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo); /* * Note that it is possible that the target tuple has been modified in @@ -1380,6 +1548,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, static void fireBSTriggers(ModifyTableState *node) { + ModifyTable *plan = (ModifyTable *) node->ps.plan; ResultRelInfo *resultRelInfo = node->resultRelInfo; /* @@ -1394,7 +1563,7 @@ fireBSTriggers(ModifyTableState *node) { case CMD_INSERT: ExecBSInsertTriggers(node->ps.state, resultRelInfo); - if (node->mt_onconflict == ONCONFLICT_UPDATE) + if (plan->onConflictAction == ONCONFLICT_UPDATE) ExecBSUpdateTriggers(node->ps.state, resultRelInfo); break; @@ -1411,17 +1580,20 @@ fireBSTriggers(ModifyTableState *node) } /* - * Return the ResultRelInfo for which we will fire AFTER STATEMENT triggers. - * This is also the relation into whose tuple format all captured transition - * tuples must be converted. + * Return the target rel ResultRelInfo. + * + * This relation is the same as : + * - the relation for which we will fire AFTER STATEMENT triggers. + * - the relation into whose tuple format all captured transition tuples must + * be converted. + * - the root partitioned table. */ static ResultRelInfo * -getASTriggerResultRelInfo(ModifyTableState *node) +getTargetResultRelInfo(ModifyTableState *node) { /* - * If the node modifies a partitioned table, we must fire its triggers. - * Note that in that case, node->resultRelInfo points to the first leaf - * partition, not the root table. + * Note that if the node modifies a partitioned table, node->resultRelInfo + * points to the first leaf partition, not the root table. */ if (node->rootResultRelInfo != NULL) return node->rootResultRelInfo; @@ -1435,15 +1607,16 @@ getASTriggerResultRelInfo(ModifyTableState *node) static void fireASTriggers(ModifyTableState *node) { - ResultRelInfo *resultRelInfo = getASTriggerResultRelInfo(node); + ModifyTable *plan = (ModifyTable *) node->ps.plan; + ResultRelInfo *resultRelInfo = getTargetResultRelInfo(node); switch (node->operation) { case CMD_INSERT: - if (node->mt_onconflict == ONCONFLICT_UPDATE) + if (plan->onConflictAction == ONCONFLICT_UPDATE) ExecASUpdateTriggers(node->ps.state, resultRelInfo, - node->mt_transition_capture); + node->mt_oc_transition_capture); ExecASInsertTriggers(node->ps.state, resultRelInfo, node->mt_transition_capture); break; @@ -1468,63 +1641,303 @@ fireASTriggers(ModifyTableState *node) static void ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate) { - ResultRelInfo *targetRelInfo = getASTriggerResultRelInfo(mtstate); - int i; + ModifyTable *plan = (ModifyTable *) mtstate->ps.plan; + ResultRelInfo *targetRelInfo = getTargetResultRelInfo(mtstate); /* Check for transition tables on the directly targeted relation. */ mtstate->mt_transition_capture = - MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc); + MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc, + RelationGetRelid(targetRelInfo->ri_RelationDesc), + mtstate->operation); + if (plan->operation == CMD_INSERT && + plan->onConflictAction == ONCONFLICT_UPDATE) + mtstate->mt_oc_transition_capture = + MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc, + RelationGetRelid(targetRelInfo->ri_RelationDesc), + CMD_UPDATE); /* * If we found that we need to collect transition tuples then we may also * need tuple conversion maps for any children that have TupleDescs that - * aren't compatible with the tuplestores. + * aren't compatible with the tuplestores. (We can share these maps + * between the regular and ON CONFLICT cases.) */ - if (mtstate->mt_transition_capture != NULL) + if (mtstate->mt_transition_capture != NULL || + mtstate->mt_oc_transition_capture != NULL) + { + ExecSetupChildParentMapForTcs(mtstate); + + /* + * Install the conversion map for the first plan for UPDATE and DELETE + * operations. It will be advanced each time we switch to the next + * plan. (INSERT operations set it every time, so we need not update + * mtstate->mt_oc_transition_capture here.) + */ + if (mtstate->mt_transition_capture && mtstate->operation != CMD_INSERT) + mtstate->mt_transition_capture->tcs_map = + tupconv_map_for_subplan(mtstate, 0); + } +} + +/* + * ExecPrepareTupleRouting --- prepare for routing one tuple + * + * Determine the partition in which the tuple in slot is to be inserted, + * and modify mtstate and estate to prepare for it. + * + * Caller must revert the estate changes after executing the insertion! + * In mtstate, transition capture changes may also need to be reverted. + * + * Returns a slot holding the tuple of the partition rowtype. + */ +static TupleTableSlot * +ExecPrepareTupleRouting(ModifyTableState *mtstate, + EState *estate, + PartitionTupleRouting *proute, + ResultRelInfo *targetRelInfo, + TupleTableSlot *slot) +{ + ModifyTable *node; + int partidx; + ResultRelInfo *partrel; + HeapTuple tuple; + TupleConversionMap *map; + + /* + * Determine the target partition. If ExecFindPartition does not find a + * partition after all, it doesn't return here; otherwise, the returned + * value is to be used as an index into the arrays for the ResultRelInfo + * and TupleConversionMap for the partition. + */ + partidx = ExecFindPartition(targetRelInfo, + proute->partition_dispatch_info, + slot, + estate); + Assert(partidx >= 0 && partidx < proute->num_partitions); + + /* + * Get the ResultRelInfo corresponding to the selected partition; if not + * yet there, initialize it. + */ + partrel = proute->partitions[partidx]; + if (partrel == NULL) + partrel = ExecInitPartitionInfo(mtstate, targetRelInfo, + proute, estate, + partidx); + + /* + * Check whether the partition is routable if we didn't yet + * + * Note: an UPDATE of a partition key invokes an INSERT that moves the + * tuple to a new partition. This check would be applied to a subplan + * partition of such an UPDATE that is chosen as the partition to route + * the tuple to. The reason we do this check here rather than in + * ExecSetupPartitionTupleRouting is to avoid aborting such an UPDATE + * unnecessarily due to non-routable subplan partitions that may not be + * chosen for update tuple movement after all. + */ + if (!partrel->ri_PartitionReadyForRouting) { - ResultRelInfo *resultRelInfos; - int numResultRelInfos; + /* Verify the partition is a valid target for INSERT. */ + CheckValidResultRel(partrel, CMD_INSERT); - /* Find the set of partitions so that we can find their TupleDescs. */ - if (mtstate->mt_partition_dispatch_info != NULL) + /* Set up information needed for routing tuples to the partition. */ + ExecInitRoutingInfo(mtstate, estate, proute, partrel, partidx); + } + + /* + * Make it look like we are inserting into the partition. + */ + estate->es_result_relation_info = partrel; + + /* Get the heap tuple out of the given slot. */ + tuple = ExecMaterializeSlot(slot); + + /* + * If we're capturing transition tuples, we might need to convert from the + * partition rowtype to parent rowtype. + */ + if (mtstate->mt_transition_capture != NULL) + { + if (partrel->ri_TrigDesc && + partrel->ri_TrigDesc->trig_insert_before_row) { /* - * For INSERT via partitioned table, so we need TupleDescs based - * on the partition routing table. + * If there are any BEFORE triggers on the partition, we'll have + * to be ready to convert their result back to tuplestore format. */ - resultRelInfos = mtstate->mt_partitions; - numResultRelInfos = mtstate->mt_num_partitions; + mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL; + mtstate->mt_transition_capture->tcs_map = + TupConvMapForLeaf(proute, targetRelInfo, partidx); } else { - /* Otherwise we need the ResultRelInfo for each subplan. */ - resultRelInfos = mtstate->resultRelInfo; - numResultRelInfos = mtstate->mt_nplans; + /* + * Otherwise, just remember the original unconverted tuple, to + * avoid a needless round trip conversion. + */ + mtstate->mt_transition_capture->tcs_original_insert_tuple = tuple; + mtstate->mt_transition_capture->tcs_map = NULL; } + } + if (mtstate->mt_oc_transition_capture != NULL) + { + mtstate->mt_oc_transition_capture->tcs_map = + TupConvMapForLeaf(proute, targetRelInfo, partidx); + } + + /* + * Convert the tuple, if necessary. + */ + map = proute->parent_child_tupconv_maps[partidx]; + if (map != NULL) + { + TupleTableSlot *new_slot; + + Assert(proute->partition_tuple_slots != NULL && + proute->partition_tuple_slots[partidx] != NULL); + new_slot = proute->partition_tuple_slots[partidx]; + slot = execute_attr_map_slot(map->attrMap, slot, new_slot); + } + + /* Initialize information needed to handle ON CONFLICT DO UPDATE. */ + Assert(mtstate != NULL); + node = (ModifyTable *) mtstate->ps.plan; + if (node->onConflictAction == ONCONFLICT_UPDATE) + { + Assert(mtstate->mt_existing != NULL); + ExecSetSlotDescriptor(mtstate->mt_existing, + RelationGetDescr(partrel->ri_RelationDesc)); + Assert(mtstate->mt_conflproj != NULL); + ExecSetSlotDescriptor(mtstate->mt_conflproj, + partrel->ri_onConflict->oc_ProjTupdesc); + } + return slot; +} + +/* + * Initialize the child-to-root tuple conversion map array for UPDATE subplans. + * + * This map array is required to convert the tuple from the subplan result rel + * to the target table descriptor. This requirement arises for two independent + * scenarios: + * 1. For update-tuple-routing. + * 2. For capturing tuples in transition tables. + */ +static void +ExecSetupChildParentMapForSubplan(ModifyTableState *mtstate) +{ + ResultRelInfo *targetRelInfo = getTargetResultRelInfo(mtstate); + ResultRelInfo *resultRelInfos = mtstate->resultRelInfo; + TupleDesc outdesc; + int numResultRelInfos = mtstate->mt_nplans; + int i; + + /* + * First check if there is already a per-subplan array allocated. Even if + * there is already a per-leaf map array, we won't require a per-subplan + * one, since we will use the subplan offset array to convert the subplan + * index to per-leaf index. + */ + if (mtstate->mt_per_subplan_tupconv_maps || + (mtstate->mt_partition_tuple_routing && + mtstate->mt_partition_tuple_routing->child_parent_tupconv_maps)) + return; + + /* + * Build array of conversion maps from each child's TupleDesc to the one + * used in the target relation. The map pointers may be NULL when no + * conversion is necessary, which is hopefully a common case. + */ + + /* Get tuple descriptor of the target rel. */ + outdesc = RelationGetDescr(targetRelInfo->ri_RelationDesc); + + mtstate->mt_per_subplan_tupconv_maps = (TupleConversionMap **) + palloc(sizeof(TupleConversionMap *) * numResultRelInfos); + + for (i = 0; i < numResultRelInfos; ++i) + { + mtstate->mt_per_subplan_tupconv_maps[i] = + convert_tuples_by_name(RelationGetDescr(resultRelInfos[i].ri_RelationDesc), + outdesc, + gettext_noop("could not convert row type")); + } +} + +/* + * Initialize the child-to-root tuple conversion map array required for + * capturing transition tuples. + * + * The map array can be indexed either by subplan index or by leaf-partition + * index. For transition tables, we need a subplan-indexed access to the map, + * and where tuple-routing is present, we also require a leaf-indexed access. + */ +static void +ExecSetupChildParentMapForTcs(ModifyTableState *mtstate) +{ + PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing; + + /* + * If partition tuple routing is set up, we will require partition-indexed + * access. In that case, create the map array indexed by partition; we + * will still be able to access the maps using a subplan index by + * converting the subplan index to a partition index using + * subplan_partition_offsets. If tuple routing is not set up, it means we + * don't require partition-indexed access. In that case, create just a + * subplan-indexed map. + */ + if (proute) + { /* - * Build array of conversion maps from each child's TupleDesc to the - * one used in the tuplestore. The map pointers may be NULL when no - * conversion is necessary, which is hopefully a common case for - * partitions. + * If a partition-indexed map array is to be created, the subplan map + * array has to be NULL. If the subplan map array is already created, + * we won't be able to access the map using a partition index. */ - mtstate->mt_transition_tupconv_maps = (TupleConversionMap **) - palloc0(sizeof(TupleConversionMap *) * numResultRelInfos); - for (i = 0; i < numResultRelInfos; ++i) - { - mtstate->mt_transition_tupconv_maps[i] = - convert_tuples_by_name(RelationGetDescr(resultRelInfos[i].ri_RelationDesc), - RelationGetDescr(targetRelInfo->ri_RelationDesc), - gettext_noop("could not convert row type")); - } + Assert(mtstate->mt_per_subplan_tupconv_maps == NULL); + + ExecSetupChildParentMapForLeaf(proute); + } + else + ExecSetupChildParentMapForSubplan(mtstate); +} + +/* + * For a given subplan index, get the tuple conversion map. + */ +static TupleConversionMap * +tupconv_map_for_subplan(ModifyTableState *mtstate, int whichplan) +{ + /* + * If a partition-index tuple conversion map array is allocated, we need + * to first get the index into the partition array. Exactly *one* of the + * two arrays is allocated. This is because if there is a partition array + * required, we don't require subplan-indexed array since we can translate + * subplan index into partition index. And, we create a subplan-indexed + * array *only* if partition-indexed array is not required. + */ + if (mtstate->mt_per_subplan_tupconv_maps == NULL) + { + int leaf_index; + PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing; /* - * Install the conversion map for the first plan for UPDATE and DELETE - * operations. It will be advanced each time we switch to the next - * plan. (INSERT operations set it every time.) + * If subplan-indexed array is NULL, things should have been arranged + * to convert the subplan index to partition index. */ - mtstate->mt_transition_capture->tcs_map = - mtstate->mt_transition_tupconv_maps[0]; + Assert(proute && proute->subplan_partition_offsets != NULL && + whichplan < proute->num_subplan_partition_offsets); + + leaf_index = proute->subplan_partition_offsets[whichplan]; + + return TupConvMapForLeaf(proute, getTargetResultRelInfo(mtstate), + leaf_index); + } + else + { + Assert(whichplan >= 0 && whichplan < mtstate->mt_nplans); + return mtstate->mt_per_subplan_tupconv_maps[whichplan]; } } @@ -1539,6 +1952,7 @@ static TupleTableSlot * ExecModifyTable(PlanState *pstate) { ModifyTableState *node = castNode(ModifyTableState, pstate); + PartitionTupleRouting *proute = node->mt_partition_tuple_routing; EState *estate = node->ps.state; CmdType operation = node->operation; ResultRelInfo *saved_resultRelInfo; @@ -1547,7 +1961,7 @@ ExecModifyTable(PlanState *pstate) JunkFilter *junkfilter; TupleTableSlot *slot; TupleTableSlot *planSlot; - ItemPointer tupleid = NULL; + ItemPointer tupleid; ItemPointerData tuple_ctid; HeapTupleData oldtupdata; HeapTuple oldtuple; @@ -1628,12 +2042,16 @@ ExecModifyTable(PlanState *pstate) estate->es_result_relation_info = resultRelInfo; EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan, node->mt_arowmarks[node->mt_whichplan]); + /* Prepare to convert transition tuples from this child. */ if (node->mt_transition_capture != NULL) { - /* Prepare to convert transition tuples from this child. */ - Assert(node->mt_transition_tupconv_maps != NULL); node->mt_transition_capture->tcs_map = - node->mt_transition_tupconv_maps[node->mt_whichplan]; + tupconv_map_for_subplan(node, node->mt_whichplan); + } + if (node->mt_oc_transition_capture != NULL) + { + node->mt_oc_transition_capture->tcs_map = + tupconv_map_for_subplan(node, node->mt_whichplan); } continue; } @@ -1664,6 +2082,7 @@ ExecModifyTable(PlanState *pstate) EvalPlanQualSetSlot(&node->mt_epqstate, planSlot); slot = planSlot; + tupleid = NULL; oldtuple = NULL; if (junkfilter != NULL) { @@ -1739,9 +2158,15 @@ ExecModifyTable(PlanState *pstate) switch (operation) { case CMD_INSERT: + /* Prepare for tuple routing if needed. */ + if (proute) + slot = ExecPrepareTupleRouting(node, estate, proute, + resultRelInfo, slot); slot = ExecInsert(node, slot, planSlot, - node->mt_arbiterindexes, node->mt_onconflict, estate, node->canSetTag); + /* Revert ExecPrepareTupleRouting's state change. */ + if (proute) + estate->es_result_relation_info = resultRelInfo; break; case CMD_UPDATE: slot = ExecUpdate(node, tupleid, oldtuple, slot, planSlot, @@ -1749,7 +2174,9 @@ ExecModifyTable(PlanState *pstate) break; case CMD_DELETE: slot = ExecDelete(node, tupleid, oldtuple, planSlot, - &node->mt_epqstate, estate, node->canSetTag); + &node->mt_epqstate, estate, + true, node->canSetTag, + false /* changingPart */ , NULL, NULL); break; default: elog(ERROR, "unknown operation"); @@ -1792,11 +2219,11 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) int nplans = list_length(node->plans); ResultRelInfo *saved_resultRelInfo; ResultRelInfo *resultRelInfo; - TupleDesc tupDesc; Plan *subplan; ListCell *l; int i; Relation rel; + bool update_tuple_routing_needed = node->partColsUpdated; /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); @@ -1823,8 +2250,6 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans); mtstate->mt_nplans = nplans; - mtstate->mt_onconflict = node->onConflictAction; - mtstate->mt_arbiterindexes = node->arbiterIndexes; /* set up epqstate with dummy subplan data for the moment */ EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam); @@ -1853,7 +2278,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* * Verify result relation is a valid target for the current operation */ - CheckValidResultRel(resultRelInfo->ri_RelationDesc, operation); + CheckValidResultRel(resultRelInfo, operation); /* * If there are indices on the result relation, open them and save @@ -1867,7 +2292,18 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex && operation != CMD_DELETE && resultRelInfo->ri_IndexRelationDescs == NULL) - ExecOpenIndices(resultRelInfo, mtstate->mt_onconflict != ONCONFLICT_NONE); + ExecOpenIndices(resultRelInfo, + node->onConflictAction != ONCONFLICT_NONE); + + /* + * If this is an UPDATE and a BEFORE UPDATE trigger is present, the + * trigger itself might modify the partition-key values. So arrange + * for tuple routing. + */ + if (resultRelInfo->ri_TrigDesc && + resultRelInfo->ri_TrigDesc->trig_update_before_row && + operation == CMD_UPDATE) + update_tuple_routing_needed = true; /* Now init the plan for this result rel */ estate->es_result_relation_info = resultRelInfo; @@ -1893,47 +2329,42 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) estate->es_result_relation_info = saved_resultRelInfo; - /* The root table RT index is at the head of the partitioned_rels list */ - if (node->partitioned_rels) - { - Index root_rti; - Oid root_oid; + /* Get the target relation */ + rel = (getTargetResultRelInfo(mtstate))->ri_RelationDesc; - root_rti = linitial_int(node->partitioned_rels); - root_oid = getrelid(root_rti, estate->es_range_table); - rel = heap_open(root_oid, NoLock); /* locked by InitPlan */ - } - else - rel = mtstate->resultRelInfo->ri_RelationDesc; + /* + * If it's not a partitioned table after all, UPDATE tuple routing should + * not be attempted. + */ + if (rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) + update_tuple_routing_needed = false; - /* Build state for INSERT tuple routing */ - if (operation == CMD_INSERT && - rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) - { - PartitionDispatch *partition_dispatch_info; - ResultRelInfo *partitions; - TupleConversionMap **partition_tupconv_maps; - TupleTableSlot *partition_tuple_slot; - int num_parted, - num_partitions; - - ExecSetupPartitionTupleRouting(rel, - node->nominalRelation, - &partition_dispatch_info, - &partitions, - &partition_tupconv_maps, - &partition_tuple_slot, - &num_parted, &num_partitions); - mtstate->mt_partition_dispatch_info = partition_dispatch_info; - mtstate->mt_num_dispatch = num_parted; - mtstate->mt_partitions = partitions; - mtstate->mt_num_partitions = num_partitions; - mtstate->mt_partition_tupconv_maps = partition_tupconv_maps; - mtstate->mt_partition_tuple_slot = partition_tuple_slot; - } + /* + * Build state for tuple routing if it's an INSERT or if it's an UPDATE of + * partition key. + */ + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE && + (operation == CMD_INSERT || update_tuple_routing_needed)) + mtstate->mt_partition_tuple_routing = + ExecSetupPartitionTupleRouting(mtstate, rel); - /* Build state for collecting transition tuples */ - ExecSetupTransitionCaptureState(mtstate, estate); + /* + * Build state for collecting transition tuples. This requires having a + * valid trigger query context, so skip it in explain-only mode. + */ + if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY)) + ExecSetupTransitionCaptureState(mtstate, estate); + + /* + * Construct mapping from each of the per-subplan partition attnos to the + * root attno. This is required when during update row movement the tuple + * descriptor of a source partition does not match the root partitioned + * table descriptor. In such a case we need to convert tuples to the root + * tuple descriptor, because the search for destination partition starts + * from the root. Skip this setup if it's not a partition key update. + */ + if (update_tuple_routing_needed) + ExecSetupChildParentMapForSubplan(mtstate); /* * Initialize any WITH CHECK OPTION constraints if needed. @@ -1961,57 +2392,6 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) i++; } - /* - * Build WITH CHECK OPTION constraints for each leaf partition rel. Note - * that we didn't build the withCheckOptionList for each partition within - * the planner, but simple translation of the varattnos for each partition - * will suffice. This only occurs for the INSERT case; UPDATE/DELETE - * cases are handled above. - */ - if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0) - { - List *wcoList; - PlanState *plan; - - /* - * In case of INSERT on partitioned tables, there is only one plan. - * Likewise, there is only one WITH CHECK OPTIONS list, not one per - * partition. We make a copy of the WCO qual for each partition; note - * that, if there are SubPlans in there, they all end up attached to - * the one parent Plan node. - */ - Assert(operation == CMD_INSERT && - list_length(node->withCheckOptionLists) == 1 && - mtstate->mt_nplans == 1); - wcoList = linitial(node->withCheckOptionLists); - plan = mtstate->mt_plans[0]; - resultRelInfo = mtstate->mt_partitions; - for (i = 0; i < mtstate->mt_num_partitions; i++) - { - Relation partrel = resultRelInfo->ri_RelationDesc; - List *mapped_wcoList; - List *wcoExprs = NIL; - ListCell *ll; - - /* varno = node->nominalRelation */ - mapped_wcoList = map_partition_varattnos(wcoList, - node->nominalRelation, - partrel, rel, NULL); - foreach(ll, mapped_wcoList) - { - WithCheckOption *wco = castNode(WithCheckOption, lfirst(ll)); - ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual), - plan); - - wcoExprs = lappend(wcoExprs, wcoExpr); - } - - resultRelInfo->ri_WithCheckOptions = mapped_wcoList; - resultRelInfo->ri_WithCheckOptionExprs = wcoExprs; - resultRelInfo++; - } - } - /* * Initialize RETURNING projections if needed. */ @@ -2019,18 +2399,15 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) { TupleTableSlot *slot; ExprContext *econtext; - List *returningList; /* * Initialize result tuple slot and assign its rowtype using the first * RETURNING list. We assume the rest will look the same. */ - tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists), - false); + mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists); /* Set up a slot for the output of the RETURNING projection(s) */ - ExecInitResultTupleSlot(estate, &mtstate->ps); - ExecAssignResultType(&mtstate->ps, tupDesc); + ExecInitResultTupleSlotTL(&mtstate->ps); slot = mtstate->ps.ps_ResultTupleSlot; /* Need an econtext too */ @@ -2046,30 +2423,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) { List *rlist = (List *) lfirst(l); - resultRelInfo->ri_projectReturning = - ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps, - resultRelInfo->ri_RelationDesc->rd_att); - resultRelInfo++; - } - - /* - * Build a projection for each leaf partition rel. Note that we - * didn't build the returningList for each partition within the - * planner, but simple translation of the varattnos for each partition - * will suffice. This only occurs for the INSERT case; UPDATE/DELETE - * are handled above. - */ - resultRelInfo = mtstate->mt_partitions; - returningList = linitial(node->returningLists); - for (i = 0; i < mtstate->mt_num_partitions; i++) - { - Relation partrel = resultRelInfo->ri_RelationDesc; - List *rlist; - - /* varno = node->nominalRelation */ - rlist = map_partition_varattnos(returningList, - node->nominalRelation, - partrel, rel, NULL); + resultRelInfo->ri_returningList = rlist; resultRelInfo->ri_projectReturning = ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps, resultRelInfo->ri_RelationDesc->rd_att); @@ -2082,25 +2436,25 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) * We still must construct a dummy result tuple type, because InitPlan * expects one (maybe should change that?). */ - tupDesc = ExecTypeFromTL(NIL, false); - ExecInitResultTupleSlot(estate, &mtstate->ps); - ExecAssignResultType(&mtstate->ps, tupDesc); + mtstate->ps.plan->targetlist = NIL; + ExecInitResultTypeTL(&mtstate->ps); mtstate->ps.ps_ExprContext = NULL; } - /* Close the root partitioned rel if we opened it above. */ - if (rel != mtstate->resultRelInfo->ri_RelationDesc) - heap_close(rel, NoLock); + /* Set the list of arbiter indexes if needed for ON CONFLICT */ + resultRelInfo = mtstate->resultRelInfo; + if (node->onConflictAction != ONCONFLICT_NONE) + resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes; /* * If needed, Initialize target list, projection and qual for ON CONFLICT * DO UPDATE. */ - resultRelInfo = mtstate->resultRelInfo; if (node->onConflictAction == ONCONFLICT_UPDATE) { ExprContext *econtext; + TupleDesc relationDesc; TupleDesc tupDesc; /* insert may only have one plan, inheritance is not expanded */ @@ -2111,36 +2465,56 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ExecAssignExprContext(estate, &mtstate->ps); econtext = mtstate->ps.ps_ExprContext; + relationDesc = resultRelInfo->ri_RelationDesc->rd_att; - /* initialize slot for the existing tuple */ - mtstate->mt_existing = ExecInitExtraTupleSlot(mtstate->ps.state); - ExecSetSlotDescriptor(mtstate->mt_existing, - resultRelInfo->ri_RelationDesc->rd_att); + /* + * Initialize slot for the existing tuple. If we'll be performing + * tuple routing, the tuple descriptor to use for this will be + * determined based on which relation the update is actually applied + * to, so we don't set its tuple descriptor here. + */ + mtstate->mt_existing = + ExecInitExtraTupleSlot(mtstate->ps.state, + mtstate->mt_partition_tuple_routing ? + NULL : relationDesc); /* carried forward solely for the benefit of explain */ mtstate->mt_excludedtlist = node->exclRelTlist; - /* create target slot for UPDATE SET projection */ + /* create state for DO UPDATE SET operation */ + resultRelInfo->ri_onConflict = makeNode(OnConflictSetState); + + /* + * Create the tuple slot for the UPDATE SET projection. + * + * Just like mt_existing above, we leave it without a tuple descriptor + * in the case of partitioning tuple routing, so that it can be + * changed by ExecPrepareTupleRouting. In that case, we still save + * the tupdesc in the parent's state: it can be reused by partitions + * with an identical descriptor to the parent. + */ tupDesc = ExecTypeFromTL((List *) node->onConflictSet, - resultRelInfo->ri_RelationDesc->rd_rel->relhasoids); - mtstate->mt_conflproj = ExecInitExtraTupleSlot(mtstate->ps.state); - ExecSetSlotDescriptor(mtstate->mt_conflproj, tupDesc); + relationDesc->tdhasoid); + mtstate->mt_conflproj = + ExecInitExtraTupleSlot(mtstate->ps.state, + mtstate->mt_partition_tuple_routing ? + NULL : tupDesc); + resultRelInfo->ri_onConflict->oc_ProjTupdesc = tupDesc; /* build UPDATE SET projection state */ - resultRelInfo->ri_onConflictSetProj = + resultRelInfo->ri_onConflict->oc_ProjInfo = ExecBuildProjectionInfo(node->onConflictSet, econtext, mtstate->mt_conflproj, &mtstate->ps, - resultRelInfo->ri_RelationDesc->rd_att); + relationDesc); - /* build DO UPDATE WHERE clause expression */ + /* initialize state to evaluate the WHERE clause, if any */ if (node->onConflictWhere) { ExprState *qualexpr; qualexpr = ExecInitQual((List *) node->onConflictWhere, &mtstate->ps); - - resultRelInfo->ri_onConflictSetWhere = qualexpr; + resultRelInfo->ri_onConflict->oc_WhereClause = qualexpr; } } @@ -2235,7 +2609,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) j = ExecInitJunkFilter(subplan->targetlist, resultRelInfo->ri_RelationDesc->rd_att->tdhasoid, - ExecInitExtraTupleSlot(estate)); + ExecInitExtraTupleSlot(estate, NULL)); if (operation == CMD_UPDATE || operation == CMD_DELETE) { @@ -2285,7 +2659,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) * we keep it in the estate. */ if (estate->es_trig_tuple_slot == NULL) - estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate); + estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate, NULL); /* * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it @@ -2316,10 +2690,6 @@ ExecEndModifyTable(ModifyTableState *node) { int i; - /* Free transition tables */ - if (node->mt_transition_capture != NULL) - DestroyTransitionCaptureState(node->mt_transition_capture); - /* * Allow any FDWs to shut down */ @@ -2334,32 +2704,9 @@ ExecEndModifyTable(ModifyTableState *node) resultRelInfo); } - /* - * Close all the partitioned tables, leaf partitions, and their indices - * - * Remember node->mt_partition_dispatch_info[0] corresponds to the root - * partitioned table, which we must not try to close, because it is the - * main target table of the query that will be closed by ExecEndPlan(). - * Also, tupslot is NULL for the root partitioned table. - */ - for (i = 1; i < node->mt_num_dispatch; i++) - { - PartitionDispatch pd = node->mt_partition_dispatch_info[i]; - - heap_close(pd->reldesc, NoLock); - ExecDropSingleTupleTableSlot(pd->tupslot); - } - for (i = 0; i < node->mt_num_partitions; i++) - { - ResultRelInfo *resultRelInfo = node->mt_partitions + i; - - ExecCloseIndices(resultRelInfo); - heap_close(resultRelInfo->ri_RelationDesc, NoLock); - } - - /* Release the standalone partition tuple descriptor, if any */ - if (node->mt_partition_tuple_slot) - ExecDropSingleTupleTableSlot(node->mt_partition_tuple_slot); + /* Close all the partitioned tables, leaf partitions, and their indices */ + if (node->mt_partition_tuple_routing) + ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing); /* * Free the exprcontext @@ -2369,7 +2716,8 @@ ExecEndModifyTable(ModifyTableState *node) /* * clean out the tuple table */ - ExecClearTuple(node->ps.ps_ResultTupleSlot); + if (node->ps.ps_ResultTupleSlot) + ExecClearTuple(node->ps.ps_ResultTupleSlot); /* * Terminate EPQ execution if active diff --git a/src/backend/executor/nodeNamedtuplestorescan.c b/src/backend/executor/nodeNamedtuplestorescan.c index 3a65b9f5dc..cf1b7b4f87 100644 --- a/src/backend/executor/nodeNamedtuplestorescan.c +++ b/src/backend/executor/nodeNamedtuplestorescan.c @@ -3,7 +3,7 @@ * nodeNamedtuplestorescan.c * routines to handle NamedTuplestoreScan nodes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -40,6 +40,7 @@ NamedTuplestoreScanNext(NamedTuplestoreScanState *node) * Get the next tuple from tuplestore. Return NULL if no more tuples. */ slot = node->ss.ss_ScanTupleSlot; + tuplestore_select_read_pointer(node->relation, node->readptr); (void) tuplestore_gettupleslot(node->relation, true, false, slot); return slot; } @@ -116,6 +117,7 @@ ExecInitNamedTuplestoreScan(NamedTuplestoreScan *node, EState *estate, int eflag * The new read pointer copies its position from read pointer 0, which * could be anywhere, so explicitly rewind it. */ + tuplestore_select_read_pointer(scanstate->relation, scanstate->readptr); tuplestore_rescan(scanstate->relation); /* @@ -133,27 +135,21 @@ ExecInitNamedTuplestoreScan(NamedTuplestoreScan *node, EState *estate, int eflag ExecAssignExprContext(estate, &scanstate->ss.ps); /* - * initialize child expressions + * The scan tuple type is specified for the tuplestore. */ - scanstate->ss.ps.qual = - ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); + ExecInitScanTupleSlot(estate, &scanstate->ss, scanstate->tupdesc); /* - * tuple table initialization + * Initialize result type and projection. */ - ExecInitResultTupleSlot(estate, &scanstate->ss.ps); - ExecInitScanTupleSlot(estate, &scanstate->ss); - - /* - * The scan tuple type is specified for the tuplestore. - */ - ExecAssignScanType(&scanstate->ss, scanstate->tupdesc); + ExecInitResultTypeTL(&scanstate->ss.ps); + ExecAssignScanProjectionInfo(&scanstate->ss); /* - * Initialize result tuple type and projection info. + * initialize child expressions */ - ExecAssignResultTypeFromTL(&scanstate->ss.ps); - ExecAssignScanProjectionInfo(&scanstate->ss); + scanstate->ss.ps.qual = + ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); return scanstate; } @@ -175,7 +171,8 @@ ExecEndNamedTuplestoreScan(NamedTuplestoreScanState *node) /* * clean out the tuple table */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); } @@ -190,7 +187,8 @@ ExecReScanNamedTuplestoreScan(NamedTuplestoreScanState *node) { Tuplestorestate *tuplestorestate = node->relation; - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecScanReScan(&node->ss); diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c index 4447b7c051..8dbec685eb 100644 --- a/src/backend/executor/nodeNestloop.c +++ b/src/backend/executor/nodeNestloop.c @@ -3,7 +3,7 @@ * nodeNestloop.c * routines to support nest-loop joins * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -285,15 +285,6 @@ ExecInitNestLoop(NestLoop *node, EState *estate, int eflags) */ ExecAssignExprContext(estate, &nlstate->js.ps); - /* - * initialize child expressions - */ - nlstate->js.ps.qual = - ExecInitQual(node->join.plan.qual, (PlanState *) nlstate); - nlstate->js.jointype = node->join.jointype; - nlstate->js.joinqual = - ExecInitQual(node->join.joinqual, (PlanState *) nlstate); - /* * initialize child nodes * @@ -311,9 +302,19 @@ ExecInitNestLoop(NestLoop *node, EState *estate, int eflags) innerPlanState(nlstate) = ExecInitNode(innerPlan(node), estate, eflags); /* - * tuple table initialization + * Initialize result slot, type and projection. */ - ExecInitResultTupleSlot(estate, &nlstate->js.ps); + ExecInitResultTupleSlotTL(&nlstate->js.ps); + ExecAssignProjectionInfo(&nlstate->js.ps, NULL); + + /* + * initialize child expressions + */ + nlstate->js.ps.qual = + ExecInitQual(node->join.plan.qual, (PlanState *) nlstate); + nlstate->js.jointype = node->join.jointype; + nlstate->js.joinqual = + ExecInitQual(node->join.joinqual, (PlanState *) nlstate); /* * detect whether we need only consider the first matching inner tuple @@ -338,12 +339,6 @@ ExecInitNestLoop(NestLoop *node, EState *estate, int eflags) (int) node->join.jointype); } - /* - * initialize tuple type and projection info - */ - ExecAssignResultTypeFromTL(&nlstate->js.ps); - ExecAssignProjectionInfo(&nlstate->js.ps, NULL); - /* * finally, wipe the current outer tuple clean. */ diff --git a/src/backend/executor/nodeProjectSet.c b/src/backend/executor/nodeProjectSet.c index d93462c542..e4dd414217 100644 --- a/src/backend/executor/nodeProjectSet.c +++ b/src/backend/executor/nodeProjectSet.c @@ -11,7 +11,7 @@ * can't be inside more-complex expressions. If that'd otherwise be * the case, the planner adds additional ProjectSet nodes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -52,6 +52,13 @@ ExecProjectSet(PlanState *pstate) econtext = node->ps.ps_ExprContext; + /* + * Reset per-tuple context to free expression-evaluation storage allocated + * for a potentially previously returned tuple. Note that the SRF argument + * context has a different lifetime and is reset below. + */ + ResetExprContext(econtext); + /* * Check to see if we're still projecting out tuples from a previous scan * tuple (because there is a function-returning-set in the projection @@ -66,11 +73,13 @@ ExecProjectSet(PlanState *pstate) } /* - * Reset per-tuple memory context to free any expression evaluation - * storage allocated in the previous tuple cycle. Note this can't happen - * until we're done projecting out tuples from a scan tuple. + * Reset argument context to free any expression evaluation storage + * allocated in the previous tuple cycle. Note this can't happen until + * we're done projecting out tuples from a scan tuple, as ValuePerCall + * functions are allowed to reference the arguments for each returned + * tuple. */ - ResetExprContext(econtext); + MemoryContextReset(node->argcontext); /* * Get another input tuple and project SRFs from it. @@ -124,12 +133,16 @@ ExecProjectSRF(ProjectSetState *node, bool continuing) { TupleTableSlot *resultSlot = node->ps.ps_ResultTupleSlot; ExprContext *econtext = node->ps.ps_ExprContext; + MemoryContext oldcontext; bool hassrf PG_USED_FOR_ASSERTS_ONLY; bool hasresult; int argno; ExecClearTuple(resultSlot); + /* Call SRFs, as well as plain expressions, in per-tuple context */ + oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + /* * Assume no further tuples are produced unless an ExprMultipleResult is * encountered from a set returning function. @@ -160,7 +173,8 @@ ExecProjectSRF(ProjectSetState *node, bool continuing) * Evaluate SRF - possibly continuing previously started output. */ *result = ExecMakeFunctionResultSet((SetExprState *) elem, - econtext, isnull, isdone); + econtext, node->argcontext, + isnull, isdone); if (*isdone != ExprEndResult) hasresult = true; @@ -176,6 +190,8 @@ ExecProjectSRF(ProjectSetState *node, bool continuing) } } + MemoryContextSwitchTo(oldcontext); + /* ProjectSet should not be used if there's no SRFs */ Assert(hassrf); @@ -227,14 +243,6 @@ ExecInitProjectSet(ProjectSet *node, EState *estate, int eflags) */ ExecAssignExprContext(estate, &state->ps); - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &state->ps); - - /* We don't support any qual on ProjectSet nodes */ - Assert(node->plan.qual == NIL); - /* * initialize child nodes */ @@ -246,9 +254,9 @@ ExecInitProjectSet(ProjectSet *node, EState *estate, int eflags) Assert(innerPlan(node) == NULL); /* - * initialize tuple type and projection info + * tuple table and result type initialization */ - ExecAssignResultTypeFromTL(&state->ps); + ExecInitResultTupleSlotTL(&state->ps); /* Create workspace for per-tlist-entry expr state & SRF-is-done state */ state->nelems = list_length(node->plan.targetlist); @@ -285,6 +293,20 @@ ExecInitProjectSet(ProjectSet *node, EState *estate, int eflags) off++; } + /* We don't support any qual on ProjectSet nodes */ + Assert(node->plan.qual == NIL); + + /* + * Create a memory context that ExecMakeFunctionResult can use to evaluate + * function arguments in. We can't use the per-tuple context for this + * because it gets reset too often; but we don't want to leak evaluation + * results into the query-lifespan context either. We use one context for + * the arguments of all tSRFs, as they have roughly equivalent lifetimes. + */ + state->argcontext = AllocSetContextCreate(CurrentMemoryContext, + "tSRF function arguments", + ALLOCSET_DEFAULT_SIZES); + return state; } diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c index a64dd1397a..2d26cec831 100644 --- a/src/backend/executor/nodeRecursiveunion.c +++ b/src/backend/executor/nodeRecursiveunion.c @@ -7,7 +7,7 @@ * already seen. The hash key is computed from the grouping columns. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -32,13 +32,16 @@ static void build_hash_table(RecursiveUnionState *rustate) { RecursiveUnion *node = (RecursiveUnion *) rustate->ps.plan; + TupleDesc desc = ExecGetResultType(outerPlanState(rustate)); Assert(node->numCols > 0); Assert(node->numGroups > 0); - rustate->hashtable = BuildTupleHashTable(node->numCols, + rustate->hashtable = BuildTupleHashTable(&rustate->ps, + desc, + node->numCols, node->dupColIdx, - rustate->eqfunctions, + rustate->eqfuncoids, rustate->hashfunctions, node->numGroups, 0, @@ -175,7 +178,7 @@ ExecInitRecursiveUnion(RecursiveUnion *node, EState *estate, int eflags) rustate->ps.state = estate; rustate->ps.ExecProcNode = ExecRecursiveUnion; - rustate->eqfunctions = NULL; + rustate->eqfuncoids = NULL; rustate->hashfunctions = NULL; rustate->hashtable = NULL; rustate->tempContext = NULL; @@ -226,14 +229,13 @@ ExecInitRecursiveUnion(RecursiveUnion *node, EState *estate, int eflags) * RecursiveUnion nodes still have Result slots, which hold pointers to * tuples, so we have to initialize them. */ - ExecInitResultTupleSlot(estate, &rustate->ps); + ExecInitResultTypeTL(&rustate->ps); /* - * Initialize result tuple type and projection info. (Note: we have to - * set up the result type before initializing child nodes, because - * nodeWorktablescan.c expects it to be valid.) + * Initialize result tuple type. (Note: we have to set up the result type + * before initializing child nodes, because nodeWorktablescan.c expects it + * to be valid.) */ - ExecAssignResultTypeFromTL(&rustate->ps); rustate->ps.ps_ProjInfo = NULL; /* @@ -250,7 +252,7 @@ ExecInitRecursiveUnion(RecursiveUnion *node, EState *estate, int eflags) { execTuplesHashPrepare(node->numCols, node->dupOperators, - &rustate->eqfunctions, + &rustate->eqfuncoids, &rustate->hashfunctions); build_hash_table(rustate); } @@ -277,11 +279,6 @@ ExecEndRecursiveUnion(RecursiveUnionState *node) if (node->tableContext) MemoryContextDelete(node->tableContext); - /* - * clean out the upper tuple table - */ - ExecClearTuple(node->ps.ps_ResultTupleSlot); - /* * close down subplans */ diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c index 4c879d8765..2bbb2e7884 100644 --- a/src/backend/executor/nodeResult.c +++ b/src/backend/executor/nodeResult.c @@ -34,7 +34,7 @@ * plan normally and pass back the results. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -204,19 +204,6 @@ ExecInitResult(Result *node, EState *estate, int eflags) */ ExecAssignExprContext(estate, &resstate->ps); - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &resstate->ps); - - /* - * initialize child expressions - */ - resstate->ps.qual = - ExecInitQual(node->plan.qual, (PlanState *) resstate); - resstate->resconstantqual = - ExecInitQual((List *) node->resconstantqual, (PlanState *) resstate); - /* * initialize child nodes */ @@ -228,11 +215,19 @@ ExecInitResult(Result *node, EState *estate, int eflags) Assert(innerPlan(node) == NULL); /* - * initialize tuple type and projection info + * Initialize result slot, type and projection. */ - ExecAssignResultTypeFromTL(&resstate->ps); + ExecInitResultTupleSlotTL(&resstate->ps); ExecAssignProjectionInfo(&resstate->ps, NULL); + /* + * initialize child expressions + */ + resstate->ps.qual = + ExecInitQual(node->plan.qual, (PlanState *) resstate); + resstate->resconstantqual = + ExecInitQual((List *) node->resconstantqual, (PlanState *) resstate); + return resstate; } diff --git a/src/backend/executor/nodeSamplescan.c b/src/backend/executor/nodeSamplescan.c index 9c74a836e4..cfa26535d7 100644 --- a/src/backend/executor/nodeSamplescan.c +++ b/src/backend/executor/nodeSamplescan.c @@ -3,7 +3,7 @@ * nodeSamplescan.c * Support routines for sample scans of relations (table sampling). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -26,7 +26,6 @@ #include "utils/rel.h" #include "utils/tqual.h" -static void InitScanRelation(SampleScanState *node, EState *estate, int eflags); static TupleTableSlot *SampleNext(SampleScanState *node); static void tablesample_init(SampleScanState *scanstate); static HeapTuple tablesample_getnext(SampleScanState *scanstate); @@ -64,10 +63,9 @@ SampleNext(SampleScanState *node) slot = node->ss.ss_ScanTupleSlot; if (tuple) - ExecStoreTuple(tuple, /* tuple to store */ - slot, /* slot to store in */ - node->ss.ss_currentScanDesc->rs_cbuf, /* tuple's buffer */ - false); /* don't pfree this pointer */ + ExecStoreBufferHeapTuple(tuple, /* tuple to store */ + slot, /* slot to store in */ + node->ss.ss_currentScanDesc->rs_cbuf); /* tuple's buffer */ else ExecClearTuple(slot); @@ -106,35 +104,6 @@ ExecSampleScan(PlanState *pstate) (ExecScanRecheckMtd) SampleRecheck); } -/* ---------------------------------------------------------------- - * InitScanRelation - * - * Set up to access the scan relation. - * ---------------------------------------------------------------- - */ -static void -InitScanRelation(SampleScanState *node, EState *estate, int eflags) -{ - Relation currentRelation; - - /* - * get the relation object id from the relid'th entry in the range table, - * open that relation and acquire appropriate lock on it. - */ - currentRelation = ExecOpenScanRelation(estate, - ((SampleScan *) node->ss.ps.plan)->scan.scanrelid, - eflags); - - node->ss.ss_currentRelation = currentRelation; - - /* we won't set up the HeapScanDesc till later */ - node->ss.ss_currentScanDesc = NULL; - - /* and report the scan tuple slot's rowtype */ - ExecAssignScanType(&node->ss, RelationGetDescr(currentRelation)); -} - - /* ---------------------------------------------------------------- * ExecInitSampleScan * ---------------------------------------------------------------- @@ -165,31 +134,35 @@ ExecInitSampleScan(SampleScan *node, EState *estate, int eflags) ExecAssignExprContext(estate, &scanstate->ss.ps); /* - * initialize child expressions + * open the scan relation */ - scanstate->ss.ps.qual = - ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); + scanstate->ss.ss_currentRelation = + ExecOpenScanRelation(estate, + node->scan.scanrelid, + eflags); - scanstate->args = ExecInitExprList(tsc->args, (PlanState *) scanstate); - scanstate->repeatable = - ExecInitExpr(tsc->repeatable, (PlanState *) scanstate); + /* we won't set up the HeapScanDesc till later */ + scanstate->ss.ss_currentScanDesc = NULL; - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &scanstate->ss.ps); - ExecInitScanTupleSlot(estate, &scanstate->ss); + /* and create slot with appropriate rowtype */ + ExecInitScanTupleSlot(estate, &scanstate->ss, + RelationGetDescr(scanstate->ss.ss_currentRelation)); /* - * initialize scan relation + * Initialize result type and projection. */ - InitScanRelation(scanstate, estate, eflags); + ExecInitResultTypeTL(&scanstate->ss.ps); + ExecAssignScanProjectionInfo(&scanstate->ss); /* - * Initialize result tuple type and projection info. + * initialize child expressions */ - ExecAssignResultTypeFromTL(&scanstate->ss.ps); - ExecAssignScanProjectionInfo(&scanstate->ss); + scanstate->ss.ps.qual = + ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); + + scanstate->args = ExecInitExprList(tsc->args, (PlanState *) scanstate); + scanstate->repeatable = + ExecInitExpr(tsc->repeatable, (PlanState *) scanstate); /* * If we don't have a REPEATABLE clause, select a random seed. We want to @@ -237,7 +210,8 @@ ExecEndSampleScan(SampleScanState *node) /* * clean out the tuple table */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); /* @@ -245,11 +219,6 @@ ExecEndSampleScan(SampleScanState *node) */ if (node->ss.ss_currentScanDesc) heap_endscan(node->ss.ss_currentScanDesc); - - /* - * close the heap relation. - */ - ExecCloseScanRelation(node->ss.ss_currentRelation); } /* ---------------------------------------------------------------- diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c index 5c49d4ca8a..b4bea67610 100644 --- a/src/backend/executor/nodeSeqscan.c +++ b/src/backend/executor/nodeSeqscan.c @@ -3,7 +3,7 @@ * nodeSeqscan.c * Support routines for sequential scans of relations. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -22,6 +22,7 @@ * * ExecSeqScanEstimate estimates DSM space needed for parallel scan * ExecSeqScanInitializeDSM initialize DSM for parallel scan + * ExecSeqScanReInitializeDSM reinitialize DSM for fresh parallel scan * ExecSeqScanInitializeWorker attach to DSM info in parallel worker */ #include "postgres.h" @@ -31,7 +32,6 @@ #include "executor/nodeSeqscan.h" #include "utils/rel.h" -static void InitScanRelation(SeqScanState *node, EState *estate, int eflags); static TupleTableSlot *SeqNext(SeqScanState *node); /* ---------------------------------------------------------------- @@ -65,8 +65,8 @@ SeqNext(SeqScanState *node) if (scandesc == NULL) { /* - * We reach here if the scan is not parallel, or if we're executing a - * scan that was intended to be parallel serially. + * We reach here if the scan is not parallel, or if we're serially + * executing a scan that was planned to be parallel. */ scandesc = heap_beginscan(node->ss.ss_currentRelation, estate->es_snapshot, @@ -84,15 +84,14 @@ SeqNext(SeqScanState *node) * our scan tuple slot and return the slot. Note: we pass 'false' because * tuples returned by heap_getnext() are pointers onto disk pages and were * not created with palloc() and so should not be pfree()'d. Note also - * that ExecStoreTuple will increment the refcount of the buffer; the + * that ExecStoreHeapTuple will increment the refcount of the buffer; the * refcount will not be dropped until the tuple table slot is cleared. */ if (tuple) - ExecStoreTuple(tuple, /* tuple to store */ - slot, /* slot to store in */ - scandesc->rs_cbuf, /* buffer associated with this - * tuple */ - false); /* don't pfree this pointer */ + ExecStoreBufferHeapTuple(tuple, /* tuple to store */ + slot, /* slot to store in */ + scandesc->rs_cbuf); /* buffer associated + * with this tuple */ else ExecClearTuple(slot); @@ -131,31 +130,6 @@ ExecSeqScan(PlanState *pstate) (ExecScanRecheckMtd) SeqRecheck); } -/* ---------------------------------------------------------------- - * InitScanRelation - * - * Set up to access the scan relation. - * ---------------------------------------------------------------- - */ -static void -InitScanRelation(SeqScanState *node, EState *estate, int eflags) -{ - Relation currentRelation; - - /* - * get the relation object id from the relid'th entry in the range table, - * open that relation and acquire appropriate lock on it. - */ - currentRelation = ExecOpenScanRelation(estate, - ((SeqScan *) node->ss.ps.plan)->scanrelid, - eflags); - - node->ss.ss_currentRelation = currentRelation; - - /* and report the scan tuple slot's rowtype */ - ExecAssignScanType(&node->ss, RelationGetDescr(currentRelation)); -} - /* ---------------------------------------------------------------- * ExecInitSeqScan @@ -189,27 +163,28 @@ ExecInitSeqScan(SeqScan *node, EState *estate, int eflags) ExecAssignExprContext(estate, &scanstate->ss.ps); /* - * initialize child expressions + * open the scan relation */ - scanstate->ss.ps.qual = - ExecInitQual(node->plan.qual, (PlanState *) scanstate); + scanstate->ss.ss_currentRelation = + ExecOpenScanRelation(estate, + node->scanrelid, + eflags); - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &scanstate->ss.ps); - ExecInitScanTupleSlot(estate, &scanstate->ss); + /* and create slot with the appropriate rowtype */ + ExecInitScanTupleSlot(estate, &scanstate->ss, + RelationGetDescr(scanstate->ss.ss_currentRelation)); /* - * initialize scan relation + * Initialize result type and projection. */ - InitScanRelation(scanstate, estate, eflags); + ExecInitResultTypeTL(&scanstate->ss.ps); + ExecAssignScanProjectionInfo(&scanstate->ss); /* - * Initialize result tuple type and projection info. + * initialize child expressions */ - ExecAssignResultTypeFromTL(&scanstate->ss.ps); - ExecAssignScanProjectionInfo(&scanstate->ss); + scanstate->ss.ps.qual = + ExecInitQual(node->plan.qual, (PlanState *) scanstate); return scanstate; } @@ -223,13 +198,11 @@ ExecInitSeqScan(SeqScan *node, EState *estate, int eflags) void ExecEndSeqScan(SeqScanState *node) { - Relation relation; HeapScanDesc scanDesc; /* * get information from node */ - relation = node->ss.ss_currentRelation; scanDesc = node->ss.ss_currentScanDesc; /* @@ -240,7 +213,8 @@ ExecEndSeqScan(SeqScanState *node) /* * clean out the tuple table */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); /* @@ -248,11 +222,6 @@ ExecEndSeqScan(SeqScanState *node) */ if (scanDesc != NULL) heap_endscan(scanDesc); - - /* - * close the heap relation. - */ - ExecCloseScanRelation(relation); } /* ---------------------------------------------------------------- @@ -288,7 +257,8 @@ ExecReScanSeqScan(SeqScanState *node) /* ---------------------------------------------------------------- * ExecSeqScanEstimate * - * estimates the space required to serialize seqscan node. + * Compute the amount of space we'll need in the parallel + * query DSM, and inform pcxt->estimator about our needs. * ---------------------------------------------------------------- */ void @@ -324,6 +294,21 @@ ExecSeqScanInitializeDSM(SeqScanState *node, heap_beginscan_parallel(node->ss.ss_currentRelation, pscan); } +/* ---------------------------------------------------------------- + * ExecSeqScanReInitializeDSM + * + * Reset shared state before beginning a fresh scan. + * ---------------------------------------------------------------- + */ +void +ExecSeqScanReInitializeDSM(SeqScanState *node, + ParallelContext *pcxt) +{ + HeapScanDesc scan = node->ss.ss_currentScanDesc; + + heap_parallelscan_reinitialize(scan->rs_parallel); +} + /* ---------------------------------------------------------------- * ExecSeqScanInitializeWorker * @@ -331,11 +316,12 @@ ExecSeqScanInitializeDSM(SeqScanState *node, * ---------------------------------------------------------------- */ void -ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc) +ExecSeqScanInitializeWorker(SeqScanState *node, + ParallelWorkerContext *pwcxt) { ParallelHeapScanDesc pscan; - pscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id, false); + pscan = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false); node->ss.ss_currentScanDesc = heap_beginscan_parallel(node->ss.ss_currentRelation, pscan); } diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c index 571cbf86b1..46bf77775c 100644 --- a/src/backend/executor/nodeSetOp.c +++ b/src/backend/executor/nodeSetOp.c @@ -32,7 +32,7 @@ * input group. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -120,18 +120,22 @@ static void build_hash_table(SetOpState *setopstate) { SetOp *node = (SetOp *) setopstate->ps.plan; + ExprContext *econtext = setopstate->ps.ps_ExprContext; + TupleDesc desc = ExecGetResultType(outerPlanState(setopstate)); Assert(node->strategy == SETOP_HASHED); Assert(node->numGroups > 0); - setopstate->hashtable = BuildTupleHashTable(node->numCols, + setopstate->hashtable = BuildTupleHashTable(&setopstate->ps, + desc, + node->numCols, node->dupColIdx, - setopstate->eqfunctions, + setopstate->eqfuncoids, setopstate->hashfunctions, node->numGroups, 0, setopstate->tableContext, - setopstate->tempContext, + econtext->ecxt_per_tuple_memory, false); } @@ -220,11 +224,11 @@ ExecSetOp(PlanState *pstate) static TupleTableSlot * setop_retrieve_direct(SetOpState *setopstate) { - SetOp *node = (SetOp *) setopstate->ps.plan; PlanState *outerPlan; SetOpStatePerGroup pergroup; TupleTableSlot *outerslot; TupleTableSlot *resultTupleSlot; + ExprContext *econtext = setopstate->ps.ps_ExprContext; /* * get state info from node @@ -263,10 +267,9 @@ setop_retrieve_direct(SetOpState *setopstate) * for it. The tuple will be deleted when it is cleared from the * slot. */ - ExecStoreTuple(setopstate->grp_firstTuple, - resultTupleSlot, - InvalidBuffer, - true); + ExecStoreHeapTuple(setopstate->grp_firstTuple, + resultTupleSlot, + true); setopstate->grp_firstTuple = NULL; /* don't keep two pointers */ /* Initialize working state for a new input tuple group */ @@ -292,11 +295,10 @@ setop_retrieve_direct(SetOpState *setopstate) /* * Check whether we've crossed a group boundary. */ - if (!execTuplesMatch(resultTupleSlot, - outerslot, - node->numCols, node->dupColIdx, - setopstate->eqfunctions, - setopstate->tempContext)) + econtext->ecxt_outertuple = resultTupleSlot; + econtext->ecxt_innertuple = outerslot; + + if (!ExecQualAndReset(setopstate->eqfunction, econtext)) { /* * Save the first input tuple of the next group. @@ -338,6 +340,7 @@ setop_fill_hash_table(SetOpState *setopstate) PlanState *outerPlan; int firstFlag; bool in_first_rel PG_USED_FOR_ASSERTS_ONLY; + ExprContext *econtext = setopstate->ps.ps_ExprContext; /* * get state info from node @@ -404,8 +407,8 @@ setop_fill_hash_table(SetOpState *setopstate) advance_counts((SetOpStatePerGroup) entry->additional, flag); } - /* Must reset temp context after each hashtable lookup */ - MemoryContextReset(setopstate->tempContext); + /* Must reset expression context after each hashtable lookup */ + ResetExprContext(econtext); } setopstate->table_filled = true; @@ -476,6 +479,7 @@ SetOpState * ExecInitSetOp(SetOp *node, EState *estate, int eflags) { SetOpState *setopstate; + TupleDesc outerDesc; /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); @@ -488,7 +492,7 @@ ExecInitSetOp(SetOp *node, EState *estate, int eflags) setopstate->ps.state = estate; setopstate->ps.ExecProcNode = ExecSetOp; - setopstate->eqfunctions = NULL; + setopstate->eqfuncoids = NULL; setopstate->hashfunctions = NULL; setopstate->setop_done = false; setopstate->numOutput = 0; @@ -498,16 +502,9 @@ ExecInitSetOp(SetOp *node, EState *estate, int eflags) setopstate->tableContext = NULL; /* - * Miscellaneous initialization - * - * SetOp nodes have no ExprContext initialization because they never call - * ExecQual or ExecProject. But they do need a per-tuple memory context - * anyway for calling execTuplesMatch. + * create expression context */ - setopstate->tempContext = - AllocSetContextCreate(CurrentMemoryContext, - "SetOp", - ALLOCSET_DEFAULT_SIZES); + ExecAssignExprContext(estate, &setopstate->ps); /* * If hashing, we also need a longer-lived context to store the hash @@ -520,11 +517,6 @@ ExecInitSetOp(SetOp *node, EState *estate, int eflags) "SetOp hash table", ALLOCSET_DEFAULT_SIZES); - /* - * Tuple table initialization - */ - ExecInitResultTupleSlot(estate, &setopstate->ps); - /* * initialize child nodes * @@ -534,12 +526,13 @@ ExecInitSetOp(SetOp *node, EState *estate, int eflags) if (node->strategy == SETOP_HASHED) eflags &= ~EXEC_FLAG_REWIND; outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate, eflags); + outerDesc = ExecGetResultType(outerPlanState(setopstate)); /* - * setop nodes do no projections, so initialize projection info for this - * node appropriately + * Initialize result slot and type. Setop nodes do no projections, so + * initialize projection info for this node appropriately. */ - ExecAssignResultTypeFromTL(&setopstate->ps); + ExecInitResultTupleSlotTL(&setopstate->ps); setopstate->ps.ps_ProjInfo = NULL; /* @@ -550,12 +543,15 @@ ExecInitSetOp(SetOp *node, EState *estate, int eflags) if (node->strategy == SETOP_HASHED) execTuplesHashPrepare(node->numCols, node->dupOperators, - &setopstate->eqfunctions, + &setopstate->eqfuncoids, &setopstate->hashfunctions); else - setopstate->eqfunctions = - execTuplesMatchPrepare(node->numCols, - node->dupOperators); + setopstate->eqfunction = + execTuplesMatchPrepare(outerDesc, + node->numCols, + node->dupColIdx, + node->dupOperators, + &setopstate->ps); if (node->strategy == SETOP_HASHED) { @@ -585,9 +581,9 @@ ExecEndSetOp(SetOpState *node) ExecClearTuple(node->ps.ps_ResultTupleSlot); /* free subsidiary stuff including hashtable */ - MemoryContextDelete(node->tempContext); if (node->tableContext) MemoryContextDelete(node->tableContext); + ExecFreeExprContext(&node->ps); ExecEndNode(outerPlanState(node)); } diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c index aae4150e2c..5492cd4557 100644 --- a/src/backend/executor/nodeSort.c +++ b/src/backend/executor/nodeSort.c @@ -3,7 +3,7 @@ * nodeSort.c * Routines to handle sorting of relations. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -15,6 +15,7 @@ #include "postgres.h" +#include "access/parallel.h" #include "executor/execdebug.h" #include "executor/nodeSort.h" #include "miscadmin.h" @@ -92,7 +93,7 @@ ExecSort(PlanState *pstate) plannode->collations, plannode->nullsFirst, work_mem, - node->randomAccess); + NULL, node->randomAccess); if (node->bounded) tuplesort_set_bound(tuplesortstate, node->bound); node->tuplesortstate = (void *) tuplesortstate; @@ -127,6 +128,15 @@ ExecSort(PlanState *pstate) node->sort_Done = true; node->bounded_Done = node->bounded; node->bound_Done = node->bound; + if (node->shared_info && node->am_worker) + { + TuplesortInstrumentation *si; + + Assert(IsParallelWorker()); + Assert(ParallelWorkerNumber <= node->shared_info->num_workers); + si = &node->shared_info->sinstrument[ParallelWorkerNumber]; + tuplesort_get_stats(tuplesortstate, si); + } SO1_printf("ExecSort: %s\n", "sorting done"); } @@ -188,14 +198,6 @@ ExecInitSort(Sort *node, EState *estate, int eflags) * ExecQual or ExecProject. */ - /* - * tuple table initialization - * - * sort nodes only return scan tuples from their sorted relation. - */ - ExecInitResultTupleSlot(estate, &sortstate->ss.ps); - ExecInitScanTupleSlot(estate, &sortstate->ss); - /* * initialize child nodes * @@ -207,11 +209,15 @@ ExecInitSort(Sort *node, EState *estate, int eflags) outerPlanState(sortstate) = ExecInitNode(outerPlan(node), estate, eflags); /* - * initialize tuple type. no need to initialize projection info because - * this node doesn't do projections. + * Initialize scan slot and type. */ - ExecAssignResultTypeFromTL(&sortstate->ss.ps); - ExecAssignScanTypeFromOuterPlan(&sortstate->ss); + ExecCreateScanSlotFromOuterPlan(estate, &sortstate->ss); + + /* + * Initialize return slot and type. No need to initialize projection info + * because this node doesn't do projections. + */ + ExecInitResultTupleSlotTL(&sortstate->ss.ps); sortstate->ss.ps.ps_ProjInfo = NULL; SO1_printf("ExecInitSort: %s\n", @@ -334,3 +340,90 @@ ExecReScanSort(SortState *node) else tuplesort_rescan((Tuplesortstate *) node->tuplesortstate); } + +/* ---------------------------------------------------------------- + * Parallel Query Support + * ---------------------------------------------------------------- + */ + +/* ---------------------------------------------------------------- + * ExecSortEstimate + * + * Estimate space required to propagate sort statistics. + * ---------------------------------------------------------------- + */ +void +ExecSortEstimate(SortState *node, ParallelContext *pcxt) +{ + Size size; + + /* don't need this if not instrumenting or no workers */ + if (!node->ss.ps.instrument || pcxt->nworkers == 0) + return; + + size = mul_size(pcxt->nworkers, sizeof(TuplesortInstrumentation)); + size = add_size(size, offsetof(SharedSortInfo, sinstrument)); + shm_toc_estimate_chunk(&pcxt->estimator, size); + shm_toc_estimate_keys(&pcxt->estimator, 1); +} + +/* ---------------------------------------------------------------- + * ExecSortInitializeDSM + * + * Initialize DSM space for sort statistics. + * ---------------------------------------------------------------- + */ +void +ExecSortInitializeDSM(SortState *node, ParallelContext *pcxt) +{ + Size size; + + /* don't need this if not instrumenting or no workers */ + if (!node->ss.ps.instrument || pcxt->nworkers == 0) + return; + + size = offsetof(SharedSortInfo, sinstrument) + + pcxt->nworkers * sizeof(TuplesortInstrumentation); + node->shared_info = shm_toc_allocate(pcxt->toc, size); + /* ensure any unfilled slots will contain zeroes */ + memset(node->shared_info, 0, size); + node->shared_info->num_workers = pcxt->nworkers; + shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, + node->shared_info); +} + +/* ---------------------------------------------------------------- + * ExecSortInitializeWorker + * + * Attach worker to DSM space for sort statistics. + * ---------------------------------------------------------------- + */ +void +ExecSortInitializeWorker(SortState *node, ParallelWorkerContext *pwcxt) +{ + node->shared_info = + shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, true); + node->am_worker = true; +} + +/* ---------------------------------------------------------------- + * ExecSortRetrieveInstrumentation + * + * Transfer sort statistics from DSM to private memory. + * ---------------------------------------------------------------- + */ +void +ExecSortRetrieveInstrumentation(SortState *node) +{ + Size size; + SharedSortInfo *si; + + if (node->shared_info == NULL) + return; + + size = offsetof(SharedSortInfo, sinstrument) + + node->shared_info->num_workers * sizeof(TuplesortInstrumentation); + si = palloc(size); + memcpy(si, node->shared_info, size); + node->shared_info = si; +} diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index fe10e809df..63de981034 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -11,7 +11,7 @@ * subplans, which are re-evaluated every time their result is required. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -65,6 +65,9 @@ ExecSubPlan(SubPlanState *node, bool *isNull) { SubPlan *subplan = node->subplan; + EState *estate = node->planstate->state; + ScanDirection dir = estate->es_direction; + Datum retval; CHECK_FOR_INTERRUPTS(); @@ -77,11 +80,19 @@ ExecSubPlan(SubPlanState *node, if (subplan->setParam != NIL && subplan->subLinkType != MULTIEXPR_SUBLINK) elog(ERROR, "cannot set parent params from subquery"); + /* Force forward-scan mode for evaluation */ + estate->es_direction = ForwardScanDirection; + /* Select appropriate evaluation strategy */ if (subplan->useHashTable) - return ExecHashSubPlan(node, econtext, isNull); + retval = ExecHashSubPlan(node, econtext, isNull); else - return ExecScanSubPlan(node, econtext, isNull); + retval = ExecScanSubPlan(node, econtext, isNull); + + /* restore scan direction */ + estate->es_direction = dir; + + return retval; } /* @@ -149,7 +160,7 @@ ExecHashSubPlan(SubPlanState *node, if (node->havehashrows && FindTupleHashEntry(node->hashtable, slot, - node->cur_eq_funcs, + node->cur_eq_comp, node->lhs_hash_funcs) != NULL) { ExecClearTuple(slot); @@ -220,7 +231,7 @@ ExecScanSubPlan(SubPlanState *node, MemoryContext oldcontext; TupleTableSlot *slot; Datum result; - bool found = false; /* TRUE if got at least one subplan tuple */ + bool found = false; /* true if got at least one subplan tuple */ ListCell *pvar; ListCell *l; ArrayBuildStateAny *astate = NULL; @@ -360,7 +371,7 @@ ExecScanSubPlan(SubPlanState *node, found = true; /* stash away current value */ - Assert(subplan->firstColType == tdesc->attrs[0]->atttypid); + Assert(subplan->firstColType == TupleDescAttr(tdesc, 0)->atttypid); dvalue = slot_getattr(slot, 1, &disnull); astate = accumArrayResultAny(astate, dvalue, disnull, subplan->firstColType, oldcontext); @@ -494,9 +505,11 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext) if (nbuckets < 1) nbuckets = 1; - node->hashtable = BuildTupleHashTable(ncols, + node->hashtable = BuildTupleHashTable(node->parent, + node->descRight, + ncols, node->keyColIdx, - node->tab_eq_funcs, + node->tab_eq_funcoids, node->tab_hash_funcs, nbuckets, 0, @@ -514,9 +527,11 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext) if (nbuckets < 1) nbuckets = 1; } - node->hashnulls = BuildTupleHashTable(ncols, + node->hashnulls = BuildTupleHashTable(node->parent, + node->descRight, + ncols, node->keyColIdx, - node->tab_eq_funcs, + node->tab_eq_funcoids, node->tab_hash_funcs, nbuckets, 0, @@ -598,6 +613,77 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext) MemoryContextSwitchTo(oldcontext); } +/* + * execTuplesUnequal + * Return true if two tuples are definitely unequal in the indicated + * fields. + * + * Nulls are neither equal nor unequal to anything else. A true result + * is obtained only if there are non-null fields that compare not-equal. + * + * slot1, slot2: the tuples to compare (must have same columns!) + * numCols: the number of attributes to be examined + * matchColIdx: array of attribute column numbers + * eqFunctions: array of fmgr lookup info for the equality functions to use + * evalContext: short-term memory context for executing the functions + */ +static bool +execTuplesUnequal(TupleTableSlot *slot1, + TupleTableSlot *slot2, + int numCols, + AttrNumber *matchColIdx, + FmgrInfo *eqfunctions, + MemoryContext evalContext) +{ + MemoryContext oldContext; + bool result; + int i; + + /* Reset and switch into the temp context. */ + MemoryContextReset(evalContext); + oldContext = MemoryContextSwitchTo(evalContext); + + /* + * We cannot report a match without checking all the fields, but we can + * report a non-match as soon as we find unequal fields. So, start + * comparing at the last field (least significant sort key). That's the + * most likely to be different if we are dealing with sorted input. + */ + result = false; + + for (i = numCols; --i >= 0;) + { + AttrNumber att = matchColIdx[i]; + Datum attr1, + attr2; + bool isNull1, + isNull2; + + attr1 = slot_getattr(slot1, att, &isNull1); + + if (isNull1) + continue; /* can't prove anything here */ + + attr2 = slot_getattr(slot2, att, &isNull2); + + if (isNull2) + continue; /* can't prove anything here */ + + /* Apply the type-specific equality function */ + + if (!DatumGetBool(FunctionCall2(&eqfunctions[i], + attr1, attr2))) + { + result = true; /* they are unequal */ + break; + } + } + + MemoryContextSwitchTo(oldContext); + + return result; +} + /* * findPartialMatch: does the hashtable contain an entry that is not * provably distinct from the tuple? @@ -719,6 +805,7 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent) sstate->hashtempcxt = NULL; sstate->innerecontext = NULL; sstate->keyColIdx = NULL; + sstate->tab_eq_funcoids = NULL; sstate->tab_hash_funcs = NULL; sstate->tab_eq_funcs = NULL; sstate->lhs_hash_funcs = NULL; @@ -757,7 +844,8 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent) { int ncols, i; - TupleDesc tupDesc; + TupleDesc tupDescLeft; + TupleDesc tupDescRight; TupleTableSlot *slot; List *oplist, *lefttlist, @@ -815,6 +903,7 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent) Assert(list_length(oplist) == ncols); lefttlist = righttlist = NIL; + sstate->tab_eq_funcoids = (Oid *) palloc(ncols * sizeof(Oid)); sstate->tab_hash_funcs = (FmgrInfo *) palloc(ncols * sizeof(FmgrInfo)); sstate->tab_eq_funcs = (FmgrInfo *) palloc(ncols * sizeof(FmgrInfo)); sstate->lhs_hash_funcs = (FmgrInfo *) palloc(ncols * sizeof(FmgrInfo)); @@ -848,6 +937,7 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent) righttlist = lappend(righttlist, tle); /* Lookup the equality function (potentially cross-type) */ + sstate->tab_eq_funcoids[i - 1] = opexpr->opfuncid; fmgr_info(opexpr->opfuncid, &sstate->cur_eq_funcs[i - 1]); fmgr_info_set_expr((Node *) opexpr, &sstate->cur_eq_funcs[i - 1]); @@ -877,23 +967,32 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent) * (hack alert!). The righthand expressions will be evaluated in our * own innerecontext. */ - tupDesc = ExecTypeFromTL(lefttlist, false); - slot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(slot, tupDesc); + tupDescLeft = ExecTypeFromTL(lefttlist, false); + slot = ExecInitExtraTupleSlot(estate, tupDescLeft); sstate->projLeft = ExecBuildProjectionInfo(lefttlist, NULL, slot, parent, NULL); - tupDesc = ExecTypeFromTL(righttlist, false); - slot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(slot, tupDesc); + sstate->descRight = tupDescRight = ExecTypeFromTL(righttlist, false); + slot = ExecInitExtraTupleSlot(estate, tupDescRight); sstate->projRight = ExecBuildProjectionInfo(righttlist, sstate->innerecontext, slot, sstate->planstate, NULL); + + /* + * Create comparator for lookups of rows in the table (potentially + * across-type comparison). + */ + sstate->cur_eq_comp = ExecBuildGroupingEqual(tupDescLeft, tupDescRight, + ncols, + sstate->keyColIdx, + sstate->tab_eq_funcoids, + parent); + } return sstate; @@ -910,6 +1009,17 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent) * of initplans: we don't run the subplan until/unless we need its output. * Note that this routine MUST clear the execPlan fields of the plan's * output parameters after evaluating them! + * + * The results of this function are stored in the EState associated with the + * ExprContext (particularly, its ecxt_param_exec_vals); any pass-by-ref + * result Datums are allocated in the EState's per-query memory. The passed + * econtext can be any ExprContext belonging to that EState; which one is + * important only to the extent that the ExprContext's per-tuple memory + * context is used to evaluate any parameters passed down to the subplan. + * (Thus in principle, the shorter-lived the ExprContext the better, since + * that data isn't needed after we return. In practice, because initplan + * parameters are never more complex than Vars, Aggrefs, etc, evaluating them + * currently never leaks any memory anyway.) * ---------------------------------------------------------------- */ void @@ -918,6 +1028,8 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) SubPlan *subplan = node->subplan; PlanState *planstate = node->planstate; SubLinkType subLinkType = subplan->subLinkType; + EState *estate = planstate->state; + ScanDirection dir = estate->es_direction; MemoryContext oldcontext; TupleTableSlot *slot; ListCell *pvar; @@ -931,6 +1043,12 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) if (subLinkType == CTE_SUBLINK) elog(ERROR, "CTE subplans should not be executed via ExecSetParamPlan"); + /* + * Enforce forward scan direction regardless of caller. It's hard but not + * impossible to get here in backward scan, so make it work anyway. + */ + estate->es_direction = ForwardScanDirection; + /* Initialize ArrayBuildStateAny in caller's context, if needed */ if (subLinkType == ARRAY_SUBLINK) astate = initArrayResultAny(subplan->firstColType, @@ -992,7 +1110,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) found = true; /* stash away current value */ - Assert(subplan->firstColType == tdesc->attrs[0]->atttypid); + Assert(subplan->firstColType == TupleDescAttr(tdesc, 0)->atttypid); dvalue = slot_getattr(slot, 1, &disnull); astate = accumArrayResultAny(astate, dvalue, disnull, subplan->firstColType, oldcontext); @@ -1083,6 +1201,40 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) } MemoryContextSwitchTo(oldcontext); + + /* restore scan direction */ + estate->es_direction = dir; +} + +/* + * ExecSetParamPlanMulti + * + * Apply ExecSetParamPlan to evaluate any not-yet-evaluated initplan output + * parameters whose ParamIDs are listed in "params". Any listed params that + * are not initplan outputs are ignored. + * + * As with ExecSetParamPlan, any ExprContext belonging to the current EState + * can be used, but in principle a shorter-lived ExprContext is better than a + * longer-lived one. + */ +void +ExecSetParamPlanMulti(const Bitmapset *params, ExprContext *econtext) +{ + int paramid; + + paramid = -1; + while ((paramid = bms_next_member(params, paramid)) >= 0) + { + ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]); + + if (prm->execPlan != NULL) + { + /* Parameter not evaluated yet, so go do it */ + ExecSetParamPlan(prm->execPlan, econtext); + /* ExecSetParamPlan should have processed this param... */ + Assert(prm->execPlan == NULL); + } + } } /* diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c index 088c92992e..b84c6892d5 100644 --- a/src/backend/executor/nodeSubqueryscan.c +++ b/src/backend/executor/nodeSubqueryscan.c @@ -7,7 +7,7 @@ * we need two sets of code. Ought to look at trying to unify the cases. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -120,35 +120,29 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, int eflags) */ ExecAssignExprContext(estate, &subquerystate->ss.ps); - /* - * initialize child expressions - */ - subquerystate->ss.ps.qual = - ExecInitQual(node->scan.plan.qual, (PlanState *) subquerystate); - - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &subquerystate->ss.ps); - ExecInitScanTupleSlot(estate, &subquerystate->ss); - /* * initialize subquery */ subquerystate->subplan = ExecInitNode(node->subplan, estate, eflags); /* - * Initialize scan tuple type (needed by ExecAssignScanProjectionInfo) + * Initialize scan slot and type (needed by ExecAssignScanProjectionInfo) */ - ExecAssignScanType(&subquerystate->ss, - ExecGetResultType(subquerystate->subplan)); + ExecInitScanTupleSlot(estate, &subquerystate->ss, + ExecGetResultType(subquerystate->subplan)); /* - * Initialize result tuple type and projection info. + * Initialize result type and projection. */ - ExecAssignResultTypeFromTL(&subquerystate->ss.ps); + ExecInitResultTypeTL(&subquerystate->ss.ps); ExecAssignScanProjectionInfo(&subquerystate->ss); + /* + * initialize child expressions + */ + subquerystate->ss.ps.qual = + ExecInitQual(node->scan.plan.qual, (PlanState *) subquerystate); + return subquerystate; } @@ -169,7 +163,8 @@ ExecEndSubqueryScan(SubqueryScanState *node) /* * clean out the upper tuple table */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); /* diff --git a/src/backend/executor/nodeTableFuncscan.c b/src/backend/executor/nodeTableFuncscan.c index b03d2ef762..b0c94d7e06 100644 --- a/src/backend/executor/nodeTableFuncscan.c +++ b/src/backend/executor/nodeTableFuncscan.c @@ -3,7 +3,7 @@ * nodeTableFuncscan.c * Support routines for scanning RangeTableFunc (XMLTABLE like functions). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -139,18 +139,6 @@ ExecInitTableFuncScan(TableFuncScan *node, EState *estate, int eflags) */ ExecAssignExprContext(estate, &scanstate->ss.ps); - /* - * initialize child expressions - */ - scanstate->ss.ps.qual = - ExecInitQual(node->scan.plan.qual, &scanstate->ss.ps); - - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &scanstate->ss.ps); - ExecInitScanTupleSlot(estate, &scanstate->ss); - /* * initialize source tuple type */ @@ -158,19 +146,25 @@ ExecInitTableFuncScan(TableFuncScan *node, EState *estate, int eflags) tf->coltypes, tf->coltypmods, tf->colcollations); - - ExecAssignScanType(&scanstate->ss, tupdesc); + /* and the corresponding scan slot */ + ExecInitScanTupleSlot(estate, &scanstate->ss, tupdesc); /* - * Initialize result tuple type and projection info. + * Initialize result type and projection. */ - ExecAssignResultTypeFromTL(&scanstate->ss.ps); + ExecInitResultTypeTL(&scanstate->ss.ps); ExecAssignScanProjectionInfo(&scanstate->ss); + /* + * initialize child expressions + */ + scanstate->ss.ps.qual = + ExecInitQual(node->scan.plan.qual, &scanstate->ss.ps); + /* Only XMLTABLE is supported currently */ scanstate->routine = &XmlTableRoutine; - scanstate->perValueCxt = + scanstate->perTableCxt = AllocSetContextCreate(CurrentMemoryContext, "TableFunc per value context", ALLOCSET_DEFAULT_SIZES); @@ -202,7 +196,7 @@ ExecInitTableFuncScan(TableFuncScan *node, EState *estate, int eflags) { Oid in_funcid; - getTypeInputInfo(tupdesc->attrs[i]->atttypid, + getTypeInputInfo(TupleDescAttr(tupdesc, i)->atttypid, &in_funcid, &scanstate->typioparams[i]); fmgr_info(in_funcid, &scanstate->in_functions[i]); } @@ -227,7 +221,8 @@ ExecEndTableFuncScan(TableFuncScanState *node) /* * clean out the tuple table */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); /* @@ -249,7 +244,8 @@ ExecReScanTableFuncScan(TableFuncScanState *node) { Bitmapset *chgparam = node->ss.ps.chgParam; - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecScanReScan(&node->ss); /* @@ -288,6 +284,16 @@ tfuncFetchRows(TableFuncScanState *tstate, ExprContext *econtext) oldcxt = MemoryContextSwitchTo(econtext->ecxt_per_query_memory); tstate->tupstore = tuplestore_begin_heap(false, false, work_mem); + /* + * Each call to fetch a new set of rows - of which there may be very many + * if XMLTABLE is being used in a lateral join - will allocate a possibly + * substantial amount of memory, so we cannot use the per-query context + * here. perTableCxt now serves the same function as "argcontext" does in + * FunctionScan - a place to store per-one-call (i.e. one result table) + * lifetime data (as opposed to per-query or per-result-tuple). + */ + MemoryContextSwitchTo(tstate->perTableCxt); + PG_TRY(); { routine->InitOpaque(tstate, @@ -319,8 +325,7 @@ tfuncFetchRows(TableFuncScanState *tstate, ExprContext *econtext) } PG_END_TRY(); - /* return to original memory context, and clean up */ - MemoryContextSwitchTo(oldcxt); + /* clean up and return to original memory context */ if (tstate->opaque != NULL) { @@ -328,6 +333,9 @@ tfuncFetchRows(TableFuncScanState *tstate, ExprContext *econtext) tstate->opaque = NULL; } + MemoryContextSwitchTo(oldcxt); + MemoryContextReset(tstate->perTableCxt); + return; } @@ -358,8 +366,9 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc) forboth(lc1, tstate->ns_uris, lc2, tstate->ns_names) { ExprState *expr = (ExprState *) lfirst(lc1); - char *ns_name = strVal(lfirst(lc2)); + Value *ns_node = (Value *) lfirst(lc2); char *ns_uri; + char *ns_name; value = ExecEvalExpr((ExprState *) expr, econtext, &isnull); if (isnull) @@ -368,6 +377,9 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc) errmsg("namespace URI must not be null"))); ns_uri = TextDatumGetCString(value); + /* DEFAULT is passed down to SetNamespace as NULL */ + ns_name = ns_node ? strVal(ns_node) : NULL; + routine->SetNamespace(tstate, ns_name, ns_uri); } @@ -390,6 +402,7 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc) foreach(lc1, tstate->colexprs) { char *colfilter; + Form_pg_attribute att = TupleDescAttr(tupdesc, colno); if (colno != ordinalitycol) { @@ -403,11 +416,11 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc) (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("column filter expression must not be null"), errdetail("Filter for column \"%s\" is null.", - NameStr(tupdesc->attrs[colno]->attname)))); + NameStr(att->attname)))); colfilter = TextDatumGetCString(value); } else - colfilter = NameStr(tupdesc->attrs[colno]->attname); + colfilter = NameStr(att->attname); routine->SetColumnFilter(tstate, colfilter, colno); } @@ -433,7 +446,14 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext) ordinalitycol = ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol; - oldcxt = MemoryContextSwitchTo(tstate->perValueCxt); + + /* + * We need a short-lived memory context that we can clean up each time + * around the loop, to avoid wasting space. Our default per-tuple context + * is fine for the job, since we won't have used it for anything yet in + * this tuple cycle. + */ + oldcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); /* * Keep requesting rows from the table builder until there aren't any. @@ -453,6 +473,8 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext) */ for (colno = 0; colno < natts; colno++) { + Form_pg_attribute att = TupleDescAttr(tupdesc, colno); + if (colno == ordinalitycol) { /* Fast path for ordinality column */ @@ -465,8 +487,8 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext) values[colno] = routine->GetValue(tstate, colno, - tupdesc->attrs[colno]->atttypid, - tupdesc->attrs[colno]->atttypmod, + att->atttypid, + att->atttypmod, &isnull); /* No value? Evaluate and apply the default, if any */ @@ -484,7 +506,7 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("null is not allowed in column \"%s\"", - NameStr(tupdesc->attrs[colno]->attname)))); + NameStr(att->attname)))); nulls[colno] = isnull; } @@ -496,7 +518,7 @@ tfuncLoadRows(TableFuncScanState *tstate, ExprContext *econtext) tuplestore_putvalues(tstate->tupstore, tupdesc, values, nulls); - MemoryContextReset(tstate->perValueCxt); + MemoryContextReset(econtext->ecxt_per_tuple_memory); } MemoryContextSwitchTo(oldcxt); diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c index 0ee76e7d25..bc859e3d51 100644 --- a/src/backend/executor/nodeTidscan.c +++ b/src/backend/executor/nodeTidscan.c @@ -3,7 +3,7 @@ * nodeTidscan.c * Routines to support direct tid scans of relations * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -377,20 +377,18 @@ TidNext(TidScanState *node) if (heap_fetch(heapRelation, snapshot, tuple, &buffer, false, NULL)) { /* - * store the scanned tuple in the scan tuple slot of the scan + * Store the scanned tuple in the scan tuple slot of the scan * state. Eventually we will only do this and not return a tuple. - * Note: we pass 'false' because tuples returned by amgetnext are - * pointers onto disk pages and were not created with palloc() and - * so should not be pfree()'d. */ - ExecStoreTuple(tuple, /* tuple to store */ - slot, /* slot to store in */ - buffer, /* buffer associated with tuple */ - false); /* don't pfree */ + ExecStoreBufferHeapTuple(tuple, /* tuple to store */ + slot, /* slot to store in */ + buffer); /* buffer associated with + * tuple */ /* * At this point we have an extra pin on the buffer, because - * ExecStoreTuple incremented the pin count. Drop our local pin. + * ExecStoreHeapTuple incremented the pin count. Drop our local + * pin. */ ReleaseBuffer(buffer); @@ -489,13 +487,9 @@ ExecEndTidScan(TidScanState *node) /* * clear out tuple table slots */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); - - /* - * close the heap relation. - */ - ExecCloseScanRelation(node->ss.ss_currentRelation); } /* ---------------------------------------------------------------- @@ -530,20 +524,6 @@ ExecInitTidScan(TidScan *node, EState *estate, int eflags) */ ExecAssignExprContext(estate, &tidstate->ss.ps); - /* - * initialize child expressions - */ - tidstate->ss.ps.qual = - ExecInitQual(node->scan.plan.qual, (PlanState *) tidstate); - - TidExprListCreate(tidstate); - - /* - * tuple table initialization - */ - ExecInitResultTupleSlot(estate, &tidstate->ss.ps); - ExecInitScanTupleSlot(estate, &tidstate->ss); - /* * mark tid list as not computed yet */ @@ -552,7 +532,7 @@ ExecInitTidScan(TidScan *node, EState *estate, int eflags) tidstate->tss_TidPtr = -1; /* - * open the base relation and acquire appropriate lock on it. + * open the scan relation */ currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); @@ -562,14 +542,23 @@ ExecInitTidScan(TidScan *node, EState *estate, int eflags) /* * get the scan type from the relation descriptor. */ - ExecAssignScanType(&tidstate->ss, RelationGetDescr(currentRelation)); + ExecInitScanTupleSlot(estate, &tidstate->ss, + RelationGetDescr(currentRelation)); /* - * Initialize result tuple type and projection info. + * Initialize result type and projection. */ - ExecAssignResultTypeFromTL(&tidstate->ss.ps); + ExecInitResultTypeTL(&tidstate->ss.ps); ExecAssignScanProjectionInfo(&tidstate->ss); + /* + * initialize child expressions + */ + tidstate->ss.ps.qual = + ExecInitQual(node->scan.plan.qual, (PlanState *) tidstate); + + TidExprListCreate(tidstate); + /* * all done. */ diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c index 621fdd9b9c..c791f89b48 100644 --- a/src/backend/executor/nodeUnique.c +++ b/src/backend/executor/nodeUnique.c @@ -11,7 +11,7 @@ * (It's debatable whether the savings justifies carrying two plan node * types, though.) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -47,7 +47,7 @@ static TupleTableSlot * /* return: a tuple or NULL */ ExecUnique(PlanState *pstate) { UniqueState *node = castNode(UniqueState, pstate); - Unique *plannode = (Unique *) node->ps.plan; + ExprContext *econtext = node->ps.ps_ExprContext; TupleTableSlot *resultTupleSlot; TupleTableSlot *slot; PlanState *outerPlan; @@ -89,10 +89,9 @@ ExecUnique(PlanState *pstate) * If so then we loop back and fetch another new tuple from the * subplan. */ - if (!execTuplesMatch(slot, resultTupleSlot, - plannode->numCols, plannode->uniqColIdx, - node->eqfunctions, - node->tempContext)) + econtext->ecxt_innertuple = slot; + econtext->ecxt_outertuple = resultTupleSlot; + if (!ExecQualAndReset(node->eqfunction, econtext)) break; } @@ -129,21 +128,9 @@ ExecInitUnique(Unique *node, EState *estate, int eflags) uniquestate->ps.ExecProcNode = ExecUnique; /* - * Miscellaneous initialization - * - * Unique nodes have no ExprContext initialization because they never call - * ExecQual or ExecProject. But they do need a per-tuple memory context - * anyway for calling execTuplesMatch. + * create expression context */ - uniquestate->tempContext = - AllocSetContextCreate(CurrentMemoryContext, - "Unique", - ALLOCSET_DEFAULT_SIZES); - - /* - * Tuple table initialization - */ - ExecInitResultTupleSlot(estate, &uniquestate->ps); + ExecAssignExprContext(estate, &uniquestate->ps); /* * then initialize outer plan @@ -151,18 +138,21 @@ ExecInitUnique(Unique *node, EState *estate, int eflags) outerPlanState(uniquestate) = ExecInitNode(outerPlan(node), estate, eflags); /* - * unique nodes do no projections, so initialize projection info for this - * node appropriately + * Initialize result slot and type. Unique nodes do no projections, so + * initialize projection info for this node appropriately. */ - ExecAssignResultTypeFromTL(&uniquestate->ps); + ExecInitResultTupleSlotTL(&uniquestate->ps); uniquestate->ps.ps_ProjInfo = NULL; /* * Precompute fmgr lookup data for inner loop */ - uniquestate->eqfunctions = - execTuplesMatchPrepare(node->numCols, - node->uniqOperators); + uniquestate->eqfunction = + execTuplesMatchPrepare(ExecGetResultType(outerPlanState(uniquestate)), + node->numCols, + node->uniqColIdx, + node->uniqOperators, + &uniquestate->ps); return uniquestate; } @@ -180,7 +170,7 @@ ExecEndUnique(UniqueState *node) /* clean up tuple table */ ExecClearTuple(node->ps.ps_ResultTupleSlot); - MemoryContextDelete(node->tempContext); + ExecFreeExprContext(&node->ps); ExecEndNode(outerPlanState(node)); } diff --git a/src/backend/executor/nodeValuesscan.c b/src/backend/executor/nodeValuesscan.c index 6eacaed8bb..fa49d0470f 100644 --- a/src/backend/executor/nodeValuesscan.c +++ b/src/backend/executor/nodeValuesscan.c @@ -4,7 +4,7 @@ * Support routines for scanning Values lists * ("VALUES (...), (...), ..." in rangetable). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -25,6 +25,7 @@ #include "executor/executor.h" #include "executor/nodeValuesscan.h" +#include "jit/jit.h" #include "utils/expandeddatum.h" @@ -92,12 +93,13 @@ ValuesNext(ValuesScanState *node) if (exprlist) { MemoryContext oldContext; + List *oldsubplans; List *exprstatelist; Datum *values; bool *isnull; - Form_pg_attribute *att; ListCell *lc; int resind; + int saved_jit_flags; /* * Get rid of any prior cycle's leftovers. We use ReScanExprContext @@ -115,12 +117,30 @@ ValuesNext(ValuesScanState *node) oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); /* - * Pass NULL, not my plan node, because we don't want anything in this - * transient state linking into permanent state. The only possibility - * is a SubPlan, and there shouldn't be any (any subselects in the - * VALUES list should be InitPlans). + * The expressions might contain SubPlans (this is currently only + * possible if there's a sub-select containing a LATERAL reference, + * otherwise sub-selects in a VALUES list should be InitPlans). Those + * subplans will want to hook themselves into our subPlan list, which + * would result in a corrupted list after we delete the eval state. We + * can work around this by saving and restoring the subPlan list. + * (There's no need for the functionality that would be enabled by + * having the list entries, since the SubPlans aren't going to be + * re-executed anyway.) */ - exprstatelist = ExecInitExprList(exprlist, NULL); + oldsubplans = node->ss.ps.subPlan; + node->ss.ps.subPlan = NIL; + + /* + * As the expressions are only ever used once, disable JIT for them. + * This is worthwhile because it's common to insert significant + * amounts of data via VALUES(). + */ + saved_jit_flags = econtext->ecxt_estate->es_jit_flags; + econtext->ecxt_estate->es_jit_flags = PGJIT_NONE; + exprstatelist = ExecInitExprList(exprlist, &node->ss.ps); + econtext->ecxt_estate->es_jit_flags = saved_jit_flags; + + node->ss.ps.subPlan = oldsubplans; /* parser should have checked all sublists are the same length */ Assert(list_length(exprstatelist) == slot->tts_tupleDescriptor->natts); @@ -131,12 +151,13 @@ ValuesNext(ValuesScanState *node) */ values = slot->tts_values; isnull = slot->tts_isnull; - att = slot->tts_tupleDescriptor->attrs; resind = 0; foreach(lc, exprstatelist) { ExprState *estate = (ExprState *) lfirst(lc); + Form_pg_attribute attr = TupleDescAttr(slot->tts_tupleDescriptor, + resind); values[resind] = ExecEvalExpr(estate, econtext, @@ -150,7 +171,7 @@ ValuesNext(ValuesScanState *node) */ values[resind] = MakeExpandedObjectReadOnly(values[resind], isnull[resind], - att[resind]->attlen); + attr->attlen); resind++; } @@ -237,23 +258,22 @@ ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags) ExecAssignExprContext(estate, planstate); /* - * tuple table initialization + * Get info about values list, initialize scan slot with it. */ - ExecInitResultTupleSlot(estate, &scanstate->ss.ps); - ExecInitScanTupleSlot(estate, &scanstate->ss); + tupdesc = ExecTypeFromExprList((List *) linitial(node->values_lists)); + ExecInitScanTupleSlot(estate, &scanstate->ss, tupdesc); /* - * initialize child expressions + * Initialize result type and projection. */ - scanstate->ss.ps.qual = - ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); + ExecInitResultTypeTL(&scanstate->ss.ps); + ExecAssignScanProjectionInfo(&scanstate->ss); /* - * get info about values list + * initialize child expressions */ - tupdesc = ExecTypeFromExprList((List *) linitial(node->values_lists)); - - ExecAssignScanType(&scanstate->ss, tupdesc); + scanstate->ss.ps.qual = + ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); /* * Other node-specific setup @@ -270,12 +290,6 @@ ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags) scanstate->exprlists[i++] = (List *) lfirst(vtl); } - /* - * Initialize result tuple type and projection info. - */ - ExecAssignResultTypeFromTL(&scanstate->ss.ps); - ExecAssignScanProjectionInfo(&scanstate->ss); - return scanstate; } @@ -298,7 +312,8 @@ ExecEndValuesScan(ValuesScanState *node) /* * clean out the tuple table */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); } @@ -311,7 +326,8 @@ ExecEndValuesScan(ValuesScanState *node) void ExecReScanValuesScan(ValuesScanState *node) { - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecScanReScan(&node->ss); diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index 80be46029f..6e597e8285 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -23,7 +23,7 @@ * aggregate function over all rows in the current row's window frame. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -49,6 +49,7 @@ #include "utils/datum.h" #include "utils/lsyscache.h" #include "utils/memutils.h" +#include "utils/regproc.h" #include "utils/syscache.h" #include "windowapi.h" @@ -179,10 +180,11 @@ static void begin_partition(WindowAggState *winstate); static void spool_tuples(WindowAggState *winstate, int64 pos); static void release_partition(WindowAggState *winstate); -static bool row_is_in_frame(WindowAggState *winstate, int64 pos, +static int row_is_in_frame(WindowAggState *winstate, int64 pos, TupleTableSlot *slot); -static void update_frameheadpos(WindowObject winobj, TupleTableSlot *slot); -static void update_frametailpos(WindowObject winobj, TupleTableSlot *slot); +static void update_frameheadpos(WindowAggState *winstate); +static void update_frametailpos(WindowAggState *winstate); +static void update_grouptailpos(WindowAggState *winstate); static WindowStatePerAggData *initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, @@ -682,11 +684,9 @@ eval_windowaggregates(WindowAggState *winstate) temp_slot = winstate->temp_slot_1; /* - * Currently, we support only a subset of the SQL-standard window framing - * rules. - * - * If the frame start is UNBOUNDED_PRECEDING, the window frame consists of - * a contiguous group of rows extending forward from the start of the + * If the window's frame start clause is UNBOUNDED_PRECEDING and no + * exclusion clause is specified, then the window frame consists of a + * contiguous group of rows extending forward from the start of the * partition, and rows only enter the frame, never exit it, as the current * row advances forward. This makes it possible to use an incremental * strategy for evaluating aggregates: we run the transition function for @@ -709,6 +709,11 @@ eval_windowaggregates(WindowAggState *winstate) * must perform the aggregation all over again for all tuples within the * new frame boundaries. * + * If there's any exclusion clause, then we may have to aggregate over a + * non-contiguous set of rows, so we punt and recalculate for every row. + * (For some frame end choices, it might be that the frame is always + * contiguous anyway, but that's an optimization to investigate later.) + * * In many common cases, multiple rows share the same frame and hence the * same aggregate value. (In particular, if there's no ORDER BY in a RANGE * window, then all rows are peers and so they all have window frame equal @@ -727,7 +732,7 @@ eval_windowaggregates(WindowAggState *winstate) * The frame head should never move backwards, and the code below wouldn't * cope if it did, so for safety we complain if it does. */ - update_frameheadpos(agg_winobj, temp_slot); + update_frameheadpos(winstate); if (winstate->frameheadpos < winstate->aggregatedbase) elog(ERROR, "window frame head moved backward"); @@ -736,15 +741,16 @@ eval_windowaggregates(WindowAggState *winstate) * the result values that were previously saved at the bottom of this * function. Since we don't know the current frame's end yet, this is not * possible to check for fully. But if the frame end mode is UNBOUNDED - * FOLLOWING or CURRENT ROW, and the current row lies within the previous - * row's frame, then the two frames' ends must coincide. Note that on the - * first row aggregatedbase == aggregatedupto, meaning this test must - * fail, so we don't need to check the "there was no previous row" case - * explicitly here. + * FOLLOWING or CURRENT ROW, no exclusion clause is specified, and the + * current row lies within the previous row's frame, then the two frames' + * ends must coincide. Note that on the first row aggregatedbase == + * aggregatedupto, meaning this test must fail, so we don't need to check + * the "there was no previous row" case explicitly here. */ if (winstate->aggregatedbase == winstate->frameheadpos && (winstate->frameOptions & (FRAMEOPTION_END_UNBOUNDED_FOLLOWING | FRAMEOPTION_END_CURRENT_ROW)) && + !(winstate->frameOptions & FRAMEOPTION_EXCLUSION) && winstate->aggregatedbase <= winstate->currentpos && winstate->aggregatedupto > winstate->currentpos) { @@ -765,6 +771,7 @@ eval_windowaggregates(WindowAggState *winstate) * - if we're processing the first row in the partition, or * - if the frame's head moved and we cannot use an inverse * transition function, or + * - we have an EXCLUSION clause, or * - if the new frame doesn't overlap the old one * * Note that we don't strictly need to restart in the last case, but if @@ -779,6 +786,7 @@ eval_windowaggregates(WindowAggState *winstate) if (winstate->currentpos == 0 || (winstate->aggregatedbase != winstate->frameheadpos && !OidIsValid(peraggstate->invtransfn_oid)) || + (winstate->frameOptions & FRAMEOPTION_EXCLUSION) || winstate->aggregatedupto <= winstate->frameheadpos) { peraggstate->restart = true; @@ -919,6 +927,8 @@ eval_windowaggregates(WindowAggState *winstate) */ for (;;) { + int ret; + /* Fetch next row if we didn't already */ if (TupIsNull(agg_row_slot)) { @@ -927,9 +937,15 @@ eval_windowaggregates(WindowAggState *winstate) break; /* must be end of partition */ } - /* Exit loop (for now) if not in frame */ - if (!row_is_in_frame(winstate, winstate->aggregatedupto, agg_row_slot)) + /* + * Exit loop if no more rows can be in frame. Skip aggregation if + * current row is not in frame but there might be more in the frame. + */ + ret = row_is_in_frame(winstate, winstate->aggregatedupto, agg_row_slot); + if (ret < 0) break; + if (ret == 0) + goto next_tuple; /* Set tuple context for evaluation of aggregate arguments */ winstate->tmpcontext->ecxt_outertuple = agg_row_slot; @@ -950,6 +966,7 @@ eval_windowaggregates(WindowAggState *winstate) peraggstate); } +next_tuple: /* Reset per-input-tuple context after each tuple */ ResetExprContext(winstate->tmpcontext); @@ -1060,18 +1077,30 @@ eval_windowfunction(WindowAggState *winstate, WindowStatePerFunc perfuncstate, static void begin_partition(WindowAggState *winstate) { + WindowAgg *node = (WindowAgg *) winstate->ss.ps.plan; PlanState *outerPlan = outerPlanState(winstate); + int frameOptions = winstate->frameOptions; int numfuncs = winstate->numfuncs; int i; winstate->partition_spooled = false; winstate->framehead_valid = false; winstate->frametail_valid = false; + winstate->grouptail_valid = false; winstate->spooled_rows = 0; winstate->currentpos = 0; winstate->frameheadpos = 0; - winstate->frametailpos = -1; + winstate->frametailpos = 0; + winstate->currentgroup = 0; + winstate->frameheadgroup = 0; + winstate->frametailgroup = 0; + winstate->groupheadpos = 0; + winstate->grouptailpos = -1; /* see update_grouptailpos */ ExecClearTuple(winstate->agg_row_slot); + if (winstate->framehead_slot) + ExecClearTuple(winstate->framehead_slot); + if (winstate->frametail_slot) + ExecClearTuple(winstate->frametail_slot); /* * If this is the very first partition, we need to fetch the first input @@ -1098,7 +1127,7 @@ begin_partition(WindowAggState *winstate) /* * Set up read pointers for the tuplestore. The current pointer doesn't * need BACKWARD capability, but the per-window-function read pointers do, - * and the aggregate pointer does if frame start is movable. + * and the aggregate pointer does if we might need to restart aggregation. */ winstate->current_ptr = 0; /* read pointer 0 is pre-allocated */ @@ -1111,10 +1140,14 @@ begin_partition(WindowAggState *winstate) WindowObject agg_winobj = winstate->agg_winobj; int readptr_flags = 0; - /* If the frame head is potentially movable ... */ - if (!(winstate->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING)) + /* + * If the frame head is potentially movable, or we have an EXCLUSION + * clause, we might need to restart aggregation ... + */ + if (!(frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) || + (frameOptions & FRAMEOPTION_EXCLUSION)) { - /* ... create a mark pointer to track the frame head */ + /* ... so create a mark pointer to track the frame head */ agg_winobj->markptr = tuplestore_alloc_read_pointer(winstate->buffer, 0); /* and the read pointer will need BACKWARD capability */ readptr_flags |= EXEC_FLAG_BACKWARD; @@ -1148,6 +1181,47 @@ begin_partition(WindowAggState *winstate) } } + /* + * If we are in RANGE or GROUPS mode, then determining frame boundaries + * requires physical access to the frame endpoint rows, except in certain + * degenerate cases. We create read pointers to point to those rows, to + * simplify access and ensure that the tuplestore doesn't discard the + * endpoint rows prematurely. (Must create pointers in exactly the same + * cases that update_frameheadpos and update_frametailpos need them.) + */ + winstate->framehead_ptr = winstate->frametail_ptr = -1; /* if not used */ + + if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS)) + { + if (((frameOptions & FRAMEOPTION_START_CURRENT_ROW) && + node->ordNumCols != 0) || + (frameOptions & FRAMEOPTION_START_OFFSET)) + winstate->framehead_ptr = + tuplestore_alloc_read_pointer(winstate->buffer, 0); + if (((frameOptions & FRAMEOPTION_END_CURRENT_ROW) && + node->ordNumCols != 0) || + (frameOptions & FRAMEOPTION_END_OFFSET)) + winstate->frametail_ptr = + tuplestore_alloc_read_pointer(winstate->buffer, 0); + } + + /* + * If we have an exclusion clause that requires knowing the boundaries of + * the current row's peer group, we create a read pointer to track the + * tail position of the peer group (i.e., first row of the next peer + * group). The head position does not require its own pointer because we + * maintain that as a side effect of advancing the current row. + */ + winstate->grouptail_ptr = -1; + + if ((frameOptions & (FRAMEOPTION_EXCLUDE_GROUP | + FRAMEOPTION_EXCLUDE_TIES)) && + node->ordNumCols != 0) + { + winstate->grouptail_ptr = + tuplestore_alloc_read_pointer(winstate->buffer, 0); + } + /* * Store the first tuple into the tuplestore (it's always available now; * we either read it above, or saved it at the end of previous partition) @@ -1202,12 +1276,13 @@ spool_tuples(WindowAggState *winstate, int64 pos) if (node->partNumCols > 0) { + ExprContext *econtext = winstate->tmpcontext; + + econtext->ecxt_innertuple = winstate->first_part_slot; + econtext->ecxt_outertuple = outerslot; + /* Check if this tuple still belongs to the current partition */ - if (!execTuplesMatch(winstate->first_part_slot, - outerslot, - node->partNumCols, node->partColIdx, - winstate->partEqfunctions, - winstate->tmpcontext->ecxt_per_tuple_memory)) + if (!ExecQualAndReset(winstate->partEqfunction, econtext)) { /* * end of partition; copy the tuple for the next cycle. @@ -1274,119 +1349,127 @@ release_partition(WindowAggState *winstate) * The caller must have already determined that the row is in the partition * and fetched it into a slot. This function just encapsulates the framing * rules. + * + * Returns: + * -1, if the row is out of frame and no succeeding rows can be in frame + * 0, if the row is out of frame but succeeding rows might be in frame + * 1, if the row is in frame + * + * May clobber winstate->temp_slot_2. */ -static bool +static int row_is_in_frame(WindowAggState *winstate, int64 pos, TupleTableSlot *slot) { int frameOptions = winstate->frameOptions; Assert(pos >= 0); /* else caller error */ - /* First, check frame starting conditions */ - if (frameOptions & FRAMEOPTION_START_CURRENT_ROW) - { - if (frameOptions & FRAMEOPTION_ROWS) - { - /* rows before current row are out of frame */ - if (pos < winstate->currentpos) - return false; - } - else if (frameOptions & FRAMEOPTION_RANGE) - { - /* preceding row that is not peer is out of frame */ - if (pos < winstate->currentpos && - !are_peers(winstate, slot, winstate->ss.ss_ScanTupleSlot)) - return false; - } - else - Assert(false); - } - else if (frameOptions & FRAMEOPTION_START_VALUE) - { - if (frameOptions & FRAMEOPTION_ROWS) - { - int64 offset = DatumGetInt64(winstate->startOffsetValue); - - /* rows before current row + offset are out of frame */ - if (frameOptions & FRAMEOPTION_START_VALUE_PRECEDING) - offset = -offset; - - if (pos < winstate->currentpos + offset) - return false; - } - else if (frameOptions & FRAMEOPTION_RANGE) - { - /* parser should have rejected this */ - elog(ERROR, "window frame with value offset is not implemented"); - } - else - Assert(false); - } + /* + * First, check frame starting conditions. We might as well delegate this + * to update_frameheadpos always; it doesn't add any notable cost. + */ + update_frameheadpos(winstate); + if (pos < winstate->frameheadpos) + return 0; - /* Okay so far, now check frame ending conditions */ + /* + * Okay so far, now check frame ending conditions. Here, we avoid calling + * update_frametailpos in simple cases, so as not to spool tuples further + * ahead than necessary. + */ if (frameOptions & FRAMEOPTION_END_CURRENT_ROW) { if (frameOptions & FRAMEOPTION_ROWS) { /* rows after current row are out of frame */ if (pos > winstate->currentpos) - return false; + return -1; } - else if (frameOptions & FRAMEOPTION_RANGE) + else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS)) { /* following row that is not peer is out of frame */ if (pos > winstate->currentpos && !are_peers(winstate, slot, winstate->ss.ss_ScanTupleSlot)) - return false; + return -1; } else Assert(false); } - else if (frameOptions & FRAMEOPTION_END_VALUE) + else if (frameOptions & FRAMEOPTION_END_OFFSET) { if (frameOptions & FRAMEOPTION_ROWS) { int64 offset = DatumGetInt64(winstate->endOffsetValue); /* rows after current row + offset are out of frame */ - if (frameOptions & FRAMEOPTION_END_VALUE_PRECEDING) + if (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING) offset = -offset; if (pos > winstate->currentpos + offset) - return false; + return -1; } - else if (frameOptions & FRAMEOPTION_RANGE) + else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS)) { - /* parser should have rejected this */ - elog(ERROR, "window frame with value offset is not implemented"); + /* hard cases, so delegate to update_frametailpos */ + update_frametailpos(winstate); + if (pos >= winstate->frametailpos) + return -1; } else Assert(false); } + /* Check exclusion clause */ + if (frameOptions & FRAMEOPTION_EXCLUDE_CURRENT_ROW) + { + if (pos == winstate->currentpos) + return 0; + } + else if ((frameOptions & FRAMEOPTION_EXCLUDE_GROUP) || + ((frameOptions & FRAMEOPTION_EXCLUDE_TIES) && + pos != winstate->currentpos)) + { + WindowAgg *node = (WindowAgg *) winstate->ss.ps.plan; + + /* If no ORDER BY, all rows are peers with each other */ + if (node->ordNumCols == 0) + return 0; + /* Otherwise, check the group boundaries */ + if (pos >= winstate->groupheadpos) + { + update_grouptailpos(winstate); + if (pos < winstate->grouptailpos) + return 0; + } + } + /* If we get here, it's in frame */ - return true; + return 1; } /* * update_frameheadpos * make frameheadpos valid for the current row * - * Uses the winobj's read pointer for any required fetches; hence, if the - * frame mode is one that requires row comparisons, the winobj's mark must - * not be past the currently known frame head. Also uses the specified slot - * for any required fetches. + * Note that frameheadpos is computed without regard for any window exclusion + * clause; the current row and/or its peers are considered part of the frame + * for this purpose even if they must be excluded later. + * + * May clobber winstate->temp_slot_2. */ static void -update_frameheadpos(WindowObject winobj, TupleTableSlot *slot) +update_frameheadpos(WindowAggState *winstate) { - WindowAggState *winstate = winobj->winstate; WindowAgg *node = (WindowAgg *) winstate->ss.ps.plan; int frameOptions = winstate->frameOptions; + MemoryContext oldcontext; if (winstate->framehead_valid) return; /* already known for current row */ + /* We may be called in a short-lived context */ + oldcontext = MemoryContextSwitchTo(winstate->ss.ps.ps_ExprContext->ecxt_per_query_memory); + if (frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) { /* In UNBOUNDED PRECEDING mode, frame head is always row 0 */ @@ -1401,58 +1484,67 @@ update_frameheadpos(WindowObject winobj, TupleTableSlot *slot) winstate->frameheadpos = winstate->currentpos; winstate->framehead_valid = true; } - else if (frameOptions & FRAMEOPTION_RANGE) + else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS)) { - int64 fhprev; - /* If no ORDER BY, all rows are peers with each other */ if (node->ordNumCols == 0) { winstate->frameheadpos = 0; winstate->framehead_valid = true; + MemoryContextSwitchTo(oldcontext); return; } /* - * In RANGE START_CURRENT mode, frame head is the first row that - * is a peer of current row. We search backwards from current, - * which could be a bit inefficient if peer sets are large. Might - * be better to have a separate read pointer that moves forward - * tracking the frame head. + * In RANGE or GROUPS START_CURRENT_ROW mode, frame head is the + * first row that is a peer of current row. We keep a copy of the + * last-known frame head row in framehead_slot, and advance as + * necessary. Note that if we reach end of partition, we will + * leave frameheadpos = end+1 and framehead_slot empty. */ - fhprev = winstate->currentpos - 1; - for (;;) + tuplestore_select_read_pointer(winstate->buffer, + winstate->framehead_ptr); + if (winstate->frameheadpos == 0 && + TupIsNull(winstate->framehead_slot)) { - /* assume the frame head can't go backwards */ - if (fhprev < winstate->frameheadpos) - break; - if (!window_gettupleslot(winobj, fhprev, slot)) - break; /* start of partition */ - if (!are_peers(winstate, slot, winstate->ss.ss_ScanTupleSlot)) - break; /* not peer of current row */ - fhprev--; + /* fetch first row into framehead_slot, if we didn't already */ + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->framehead_slot)) + elog(ERROR, "unexpected end of tuplestore"); + } + + while (!TupIsNull(winstate->framehead_slot)) + { + if (are_peers(winstate, winstate->framehead_slot, + winstate->ss.ss_ScanTupleSlot)) + break; /* this row is the correct frame head */ + /* Note we advance frameheadpos even if the fetch fails */ + winstate->frameheadpos++; + spool_tuples(winstate, winstate->frameheadpos); + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->framehead_slot)) + break; /* end of partition */ } - winstate->frameheadpos = fhprev + 1; winstate->framehead_valid = true; } else Assert(false); } - else if (frameOptions & FRAMEOPTION_START_VALUE) + else if (frameOptions & FRAMEOPTION_START_OFFSET) { if (frameOptions & FRAMEOPTION_ROWS) { /* In ROWS mode, bound is physically n before/after current */ int64 offset = DatumGetInt64(winstate->startOffsetValue); - if (frameOptions & FRAMEOPTION_START_VALUE_PRECEDING) + if (frameOptions & FRAMEOPTION_START_OFFSET_PRECEDING) offset = -offset; winstate->frameheadpos = winstate->currentpos + offset; /* frame head can't go before first row */ if (winstate->frameheadpos < 0) winstate->frameheadpos = 0; - else if (winstate->frameheadpos > winstate->currentpos) + else if (winstate->frameheadpos > winstate->currentpos + 1) { /* make sure frameheadpos is not past end of partition */ spool_tuples(winstate, winstate->frameheadpos - 1); @@ -1463,40 +1555,176 @@ update_frameheadpos(WindowObject winobj, TupleTableSlot *slot) } else if (frameOptions & FRAMEOPTION_RANGE) { - /* parser should have rejected this */ - elog(ERROR, "window frame with value offset is not implemented"); + /* + * In RANGE START_OFFSET mode, frame head is the first row that + * satisfies the in_range constraint relative to the current row. + * We keep a copy of the last-known frame head row in + * framehead_slot, and advance as necessary. Note that if we + * reach end of partition, we will leave frameheadpos = end+1 and + * framehead_slot empty. + */ + int sortCol = node->ordColIdx[0]; + bool sub, + less; + + /* We must have an ordering column */ + Assert(node->ordNumCols == 1); + + /* Precompute flags for in_range checks */ + if (frameOptions & FRAMEOPTION_START_OFFSET_PRECEDING) + sub = true; /* subtract startOffset from current row */ + else + sub = false; /* add it */ + less = false; /* normally, we want frame head >= sum */ + /* If sort order is descending, flip both flags */ + if (!winstate->inRangeAsc) + { + sub = !sub; + less = true; + } + + tuplestore_select_read_pointer(winstate->buffer, + winstate->framehead_ptr); + if (winstate->frameheadpos == 0 && + TupIsNull(winstate->framehead_slot)) + { + /* fetch first row into framehead_slot, if we didn't already */ + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->framehead_slot)) + elog(ERROR, "unexpected end of tuplestore"); + } + + while (!TupIsNull(winstate->framehead_slot)) + { + Datum headval, + currval; + bool headisnull, + currisnull; + + headval = slot_getattr(winstate->framehead_slot, sortCol, + &headisnull); + currval = slot_getattr(winstate->ss.ss_ScanTupleSlot, sortCol, + &currisnull); + if (headisnull || currisnull) + { + /* order of the rows depends only on nulls_first */ + if (winstate->inRangeNullsFirst) + { + /* advance head if head is null and curr is not */ + if (!headisnull || currisnull) + break; + } + else + { + /* advance head if head is not null and curr is null */ + if (headisnull || !currisnull) + break; + } + } + else + { + if (DatumGetBool(FunctionCall5Coll(&winstate->startInRangeFunc, + winstate->inRangeColl, + headval, + currval, + winstate->startOffsetValue, + BoolGetDatum(sub), + BoolGetDatum(less)))) + break; /* this row is the correct frame head */ + } + /* Note we advance frameheadpos even if the fetch fails */ + winstate->frameheadpos++; + spool_tuples(winstate, winstate->frameheadpos); + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->framehead_slot)) + break; /* end of partition */ + } + winstate->framehead_valid = true; + } + else if (frameOptions & FRAMEOPTION_GROUPS) + { + /* + * In GROUPS START_OFFSET mode, frame head is the first row of the + * first peer group whose number satisfies the offset constraint. + * We keep a copy of the last-known frame head row in + * framehead_slot, and advance as necessary. Note that if we + * reach end of partition, we will leave frameheadpos = end+1 and + * framehead_slot empty. + */ + int64 offset = DatumGetInt64(winstate->startOffsetValue); + int64 minheadgroup; + + if (frameOptions & FRAMEOPTION_START_OFFSET_PRECEDING) + minheadgroup = winstate->currentgroup - offset; + else + minheadgroup = winstate->currentgroup + offset; + + tuplestore_select_read_pointer(winstate->buffer, + winstate->framehead_ptr); + if (winstate->frameheadpos == 0 && + TupIsNull(winstate->framehead_slot)) + { + /* fetch first row into framehead_slot, if we didn't already */ + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->framehead_slot)) + elog(ERROR, "unexpected end of tuplestore"); + } + + while (!TupIsNull(winstate->framehead_slot)) + { + if (winstate->frameheadgroup >= minheadgroup) + break; /* this row is the correct frame head */ + ExecCopySlot(winstate->temp_slot_2, winstate->framehead_slot); + /* Note we advance frameheadpos even if the fetch fails */ + winstate->frameheadpos++; + spool_tuples(winstate, winstate->frameheadpos); + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->framehead_slot)) + break; /* end of partition */ + if (!are_peers(winstate, winstate->temp_slot_2, + winstate->framehead_slot)) + winstate->frameheadgroup++; + } + ExecClearTuple(winstate->temp_slot_2); + winstate->framehead_valid = true; } else Assert(false); } else Assert(false); + + MemoryContextSwitchTo(oldcontext); } /* * update_frametailpos * make frametailpos valid for the current row * - * Uses the winobj's read pointer for any required fetches; hence, if the - * frame mode is one that requires row comparisons, the winobj's mark must - * not be past the currently known frame tail. Also uses the specified slot - * for any required fetches. + * Note that frametailpos is computed without regard for any window exclusion + * clause; the current row and/or its peers are considered part of the frame + * for this purpose even if they must be excluded later. + * + * May clobber winstate->temp_slot_2. */ static void -update_frametailpos(WindowObject winobj, TupleTableSlot *slot) +update_frametailpos(WindowAggState *winstate) { - WindowAggState *winstate = winobj->winstate; WindowAgg *node = (WindowAgg *) winstate->ss.ps.plan; int frameOptions = winstate->frameOptions; + MemoryContext oldcontext; if (winstate->frametail_valid) return; /* already known for current row */ + /* We may be called in a short-lived context */ + oldcontext = MemoryContextSwitchTo(winstate->ss.ps.ps_ExprContext->ecxt_per_query_memory); + if (frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING) { /* In UNBOUNDED FOLLOWING mode, all partition rows are in frame */ spool_tuples(winstate, -1); - winstate->frametailpos = winstate->spooled_rows - 1; + winstate->frametailpos = winstate->spooled_rows; winstate->frametail_valid = true; } else if (frameOptions & FRAMEOPTION_END_CURRENT_ROW) @@ -1504,77 +1732,280 @@ update_frametailpos(WindowObject winobj, TupleTableSlot *slot) if (frameOptions & FRAMEOPTION_ROWS) { /* In ROWS mode, exactly the rows up to current are in frame */ - winstate->frametailpos = winstate->currentpos; + winstate->frametailpos = winstate->currentpos + 1; winstate->frametail_valid = true; } - else if (frameOptions & FRAMEOPTION_RANGE) + else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS)) { - int64 ftnext; - /* If no ORDER BY, all rows are peers with each other */ if (node->ordNumCols == 0) { spool_tuples(winstate, -1); - winstate->frametailpos = winstate->spooled_rows - 1; + winstate->frametailpos = winstate->spooled_rows; winstate->frametail_valid = true; + MemoryContextSwitchTo(oldcontext); return; } /* - * Else we have to search for the first non-peer of the current - * row. We assume the current value of frametailpos is a lower - * bound on the possible frame tail location, ie, frame tail never - * goes backward, and that currentpos is also a lower bound, ie, - * frame end always >= current row. + * In RANGE or GROUPS END_CURRENT_ROW mode, frame end is the last + * row that is a peer of current row, frame tail is the row after + * that (if any). We keep a copy of the last-known frame tail row + * in frametail_slot, and advance as necessary. Note that if we + * reach end of partition, we will leave frametailpos = end+1 and + * frametail_slot empty. */ - ftnext = Max(winstate->frametailpos, winstate->currentpos) + 1; - for (;;) + tuplestore_select_read_pointer(winstate->buffer, + winstate->frametail_ptr); + if (winstate->frametailpos == 0 && + TupIsNull(winstate->frametail_slot)) + { + /* fetch first row into frametail_slot, if we didn't already */ + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->frametail_slot)) + elog(ERROR, "unexpected end of tuplestore"); + } + + while (!TupIsNull(winstate->frametail_slot)) { - if (!window_gettupleslot(winobj, ftnext, slot)) + if (winstate->frametailpos > winstate->currentpos && + !are_peers(winstate, winstate->frametail_slot, + winstate->ss.ss_ScanTupleSlot)) + break; /* this row is the frame tail */ + /* Note we advance frametailpos even if the fetch fails */ + winstate->frametailpos++; + spool_tuples(winstate, winstate->frametailpos); + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->frametail_slot)) break; /* end of partition */ - if (!are_peers(winstate, slot, winstate->ss.ss_ScanTupleSlot)) - break; /* not peer of current row */ - ftnext++; } - winstate->frametailpos = ftnext - 1; winstate->frametail_valid = true; } else Assert(false); } - else if (frameOptions & FRAMEOPTION_END_VALUE) + else if (frameOptions & FRAMEOPTION_END_OFFSET) { if (frameOptions & FRAMEOPTION_ROWS) { /* In ROWS mode, bound is physically n before/after current */ int64 offset = DatumGetInt64(winstate->endOffsetValue); - if (frameOptions & FRAMEOPTION_END_VALUE_PRECEDING) + if (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING) offset = -offset; - winstate->frametailpos = winstate->currentpos + offset; - /* smallest allowable value of frametailpos is -1 */ + winstate->frametailpos = winstate->currentpos + offset + 1; + /* smallest allowable value of frametailpos is 0 */ if (winstate->frametailpos < 0) - winstate->frametailpos = -1; - else if (winstate->frametailpos > winstate->currentpos) + winstate->frametailpos = 0; + else if (winstate->frametailpos > winstate->currentpos + 1) { - /* make sure frametailpos is not past last row of partition */ - spool_tuples(winstate, winstate->frametailpos); - if (winstate->frametailpos >= winstate->spooled_rows) - winstate->frametailpos = winstate->spooled_rows - 1; + /* make sure frametailpos is not past end of partition */ + spool_tuples(winstate, winstate->frametailpos - 1); + if (winstate->frametailpos > winstate->spooled_rows) + winstate->frametailpos = winstate->spooled_rows; } winstate->frametail_valid = true; } else if (frameOptions & FRAMEOPTION_RANGE) { - /* parser should have rejected this */ - elog(ERROR, "window frame with value offset is not implemented"); + /* + * In RANGE END_OFFSET mode, frame end is the last row that + * satisfies the in_range constraint relative to the current row, + * frame tail is the row after that (if any). We keep a copy of + * the last-known frame tail row in frametail_slot, and advance as + * necessary. Note that if we reach end of partition, we will + * leave frametailpos = end+1 and frametail_slot empty. + */ + int sortCol = node->ordColIdx[0]; + bool sub, + less; + + /* We must have an ordering column */ + Assert(node->ordNumCols == 1); + + /* Precompute flags for in_range checks */ + if (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING) + sub = true; /* subtract endOffset from current row */ + else + sub = false; /* add it */ + less = true; /* normally, we want frame tail <= sum */ + /* If sort order is descending, flip both flags */ + if (!winstate->inRangeAsc) + { + sub = !sub; + less = false; + } + + tuplestore_select_read_pointer(winstate->buffer, + winstate->frametail_ptr); + if (winstate->frametailpos == 0 && + TupIsNull(winstate->frametail_slot)) + { + /* fetch first row into frametail_slot, if we didn't already */ + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->frametail_slot)) + elog(ERROR, "unexpected end of tuplestore"); + } + + while (!TupIsNull(winstate->frametail_slot)) + { + Datum tailval, + currval; + bool tailisnull, + currisnull; + + tailval = slot_getattr(winstate->frametail_slot, sortCol, + &tailisnull); + currval = slot_getattr(winstate->ss.ss_ScanTupleSlot, sortCol, + &currisnull); + if (tailisnull || currisnull) + { + /* order of the rows depends only on nulls_first */ + if (winstate->inRangeNullsFirst) + { + /* advance tail if tail is null or curr is not */ + if (!tailisnull) + break; + } + else + { + /* advance tail if tail is not null or curr is null */ + if (!currisnull) + break; + } + } + else + { + if (!DatumGetBool(FunctionCall5Coll(&winstate->endInRangeFunc, + winstate->inRangeColl, + tailval, + currval, + winstate->endOffsetValue, + BoolGetDatum(sub), + BoolGetDatum(less)))) + break; /* this row is the correct frame tail */ + } + /* Note we advance frametailpos even if the fetch fails */ + winstate->frametailpos++; + spool_tuples(winstate, winstate->frametailpos); + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->frametail_slot)) + break; /* end of partition */ + } + winstate->frametail_valid = true; + } + else if (frameOptions & FRAMEOPTION_GROUPS) + { + /* + * In GROUPS END_OFFSET mode, frame end is the last row of the + * last peer group whose number satisfies the offset constraint, + * and frame tail is the row after that (if any). We keep a copy + * of the last-known frame tail row in frametail_slot, and advance + * as necessary. Note that if we reach end of partition, we will + * leave frametailpos = end+1 and frametail_slot empty. + */ + int64 offset = DatumGetInt64(winstate->endOffsetValue); + int64 maxtailgroup; + + if (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING) + maxtailgroup = winstate->currentgroup - offset; + else + maxtailgroup = winstate->currentgroup + offset; + + tuplestore_select_read_pointer(winstate->buffer, + winstate->frametail_ptr); + if (winstate->frametailpos == 0 && + TupIsNull(winstate->frametail_slot)) + { + /* fetch first row into frametail_slot, if we didn't already */ + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->frametail_slot)) + elog(ERROR, "unexpected end of tuplestore"); + } + + while (!TupIsNull(winstate->frametail_slot)) + { + if (winstate->frametailgroup > maxtailgroup) + break; /* this row is the correct frame tail */ + ExecCopySlot(winstate->temp_slot_2, winstate->frametail_slot); + /* Note we advance frametailpos even if the fetch fails */ + winstate->frametailpos++; + spool_tuples(winstate, winstate->frametailpos); + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->frametail_slot)) + break; /* end of partition */ + if (!are_peers(winstate, winstate->temp_slot_2, + winstate->frametail_slot)) + winstate->frametailgroup++; + } + ExecClearTuple(winstate->temp_slot_2); + winstate->frametail_valid = true; } else Assert(false); } else Assert(false); + + MemoryContextSwitchTo(oldcontext); +} + +/* + * update_grouptailpos + * make grouptailpos valid for the current row + * + * May clobber winstate->temp_slot_2. + */ +static void +update_grouptailpos(WindowAggState *winstate) +{ + WindowAgg *node = (WindowAgg *) winstate->ss.ps.plan; + MemoryContext oldcontext; + + if (winstate->grouptail_valid) + return; /* already known for current row */ + + /* We may be called in a short-lived context */ + oldcontext = MemoryContextSwitchTo(winstate->ss.ps.ps_ExprContext->ecxt_per_query_memory); + + /* If no ORDER BY, all rows are peers with each other */ + if (node->ordNumCols == 0) + { + spool_tuples(winstate, -1); + winstate->grouptailpos = winstate->spooled_rows; + winstate->grouptail_valid = true; + MemoryContextSwitchTo(oldcontext); + return; + } + + /* + * Because grouptail_valid is reset only when current row advances into a + * new peer group, we always reach here knowing that grouptailpos needs to + * be advanced by at least one row. Hence, unlike the otherwise similar + * case for frame tail tracking, we do not need persistent storage of the + * group tail row. + */ + Assert(winstate->grouptailpos <= winstate->currentpos); + tuplestore_select_read_pointer(winstate->buffer, + winstate->grouptail_ptr); + for (;;) + { + /* Note we advance grouptailpos even if the fetch fails */ + winstate->grouptailpos++; + spool_tuples(winstate, winstate->grouptailpos); + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->temp_slot_2)) + break; /* end of partition */ + if (winstate->grouptailpos > winstate->currentpos && + !are_peers(winstate, winstate->temp_slot_2, + winstate->ss.ss_ScanTupleSlot)) + break; /* this row is the group tail */ + } + ExecClearTuple(winstate->temp_slot_2); + winstate->grouptail_valid = true; + + MemoryContextSwitchTo(oldcontext); } @@ -1601,7 +2032,9 @@ ExecWindowAgg(PlanState *pstate) return NULL; /* - * Compute frame offset values, if any, during first call. + * Compute frame offset values, if any, during first call (or after a + * rescan). These are assumed to hold constant throughout the scan; if + * user gives us a volatile expression, we'll only use its initial value. */ if (winstate->all_first) { @@ -1612,7 +2045,7 @@ ExecWindowAgg(PlanState *pstate) int16 len; bool byval; - if (frameOptions & FRAMEOPTION_START_VALUE) + if (frameOptions & FRAMEOPTION_START_OFFSET) { Assert(winstate->startOffset != NULL); value = ExecEvalExprSwitchContext(winstate->startOffset, @@ -1626,18 +2059,18 @@ ExecWindowAgg(PlanState *pstate) get_typlenbyval(exprType((Node *) winstate->startOffset->expr), &len, &byval); winstate->startOffsetValue = datumCopy(value, byval, len); - if (frameOptions & FRAMEOPTION_ROWS) + if (frameOptions & (FRAMEOPTION_ROWS | FRAMEOPTION_GROUPS)) { /* value is known to be int8 */ int64 offset = DatumGetInt64(value); if (offset < 0) ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errcode(ERRCODE_INVALID_PRECEDING_OR_FOLLOWING_SIZE), errmsg("frame starting offset must not be negative"))); } } - if (frameOptions & FRAMEOPTION_END_VALUE) + if (frameOptions & FRAMEOPTION_END_OFFSET) { Assert(winstate->endOffset != NULL); value = ExecEvalExprSwitchContext(winstate->endOffset, @@ -1651,14 +2084,14 @@ ExecWindowAgg(PlanState *pstate) get_typlenbyval(exprType((Node *) winstate->endOffset->expr), &len, &byval); winstate->endOffsetValue = datumCopy(value, byval, len); - if (frameOptions & FRAMEOPTION_ROWS) + if (frameOptions & (FRAMEOPTION_ROWS | FRAMEOPTION_GROUPS)) { /* value is known to be int8 */ int64 offset = DatumGetInt64(value); if (offset < 0) ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + (errcode(ERRCODE_INVALID_PRECEDING_OR_FOLLOWING_SIZE), errmsg("frame ending offset must not be negative"))); } } @@ -1678,6 +2111,7 @@ ExecWindowAgg(PlanState *pstate) /* This might mean that the frame moves, too */ winstate->framehead_valid = false; winstate->frametail_valid = false; + /* we don't need to invalidate grouptail here; see below */ } /* @@ -1717,12 +2151,38 @@ ExecWindowAgg(PlanState *pstate) * out of the tuplestore, since window function evaluation might cause the * tuplestore to dump its state to disk.) * + * In GROUPS mode, or when tracking a group-oriented exclusion clause, we + * must also detect entering a new peer group and update associated state + * when that happens. We use temp_slot_2 to temporarily hold the previous + * row for this purpose. + * * Current row must be in the tuplestore, since we spooled it above. */ tuplestore_select_read_pointer(winstate->buffer, winstate->current_ptr); - if (!tuplestore_gettupleslot(winstate->buffer, true, true, - winstate->ss.ss_ScanTupleSlot)) - elog(ERROR, "unexpected end of tuplestore"); + if ((winstate->frameOptions & (FRAMEOPTION_GROUPS | + FRAMEOPTION_EXCLUDE_GROUP | + FRAMEOPTION_EXCLUDE_TIES)) && + winstate->currentpos > 0) + { + ExecCopySlot(winstate->temp_slot_2, winstate->ss.ss_ScanTupleSlot); + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->ss.ss_ScanTupleSlot)) + elog(ERROR, "unexpected end of tuplestore"); + if (!are_peers(winstate, winstate->temp_slot_2, + winstate->ss.ss_ScanTupleSlot)) + { + winstate->currentgroup++; + winstate->groupheadpos = winstate->currentpos; + winstate->grouptail_valid = false; + } + ExecClearTuple(winstate->temp_slot_2); + } + else + { + if (!tuplestore_gettupleslot(winstate->buffer, true, true, + winstate->ss.ss_ScanTupleSlot)) + elog(ERROR, "unexpected end of tuplestore"); + } /* * Evaluate true window functions @@ -1745,6 +2205,23 @@ ExecWindowAgg(PlanState *pstate) if (winstate->numaggs > 0) eval_windowaggregates(winstate); + /* + * If we have created auxiliary read pointers for the frame or group + * boundaries, force them to be kept up-to-date, because we don't know + * whether the window function(s) will do anything that requires that. + * Failing to advance the pointers would result in being unable to trim + * data from the tuplestore, which is bad. (If we could know in advance + * whether the window functions will use frame boundary info, we could + * skip creating these pointers in the first place ... but unfortunately + * the window function API doesn't require that.) + */ + if (winstate->framehead_ptr >= 0) + update_frameheadpos(winstate); + if (winstate->frametail_ptr >= 0) + update_frametailpos(winstate); + if (winstate->grouptail_ptr >= 0) + update_grouptailpos(winstate); + /* * Truncate any no-longer-needed rows from the tuplestore. */ @@ -1776,10 +2253,12 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) ExprContext *tmpcontext; WindowStatePerFunc perfunc; WindowStatePerAgg peragg; + int frameOptions = node->frameOptions; int numfuncs, wfuncno, numaggs, aggno; + TupleDesc scanDesc; ListCell *l; /* check for unsupported flags */ @@ -1820,16 +2299,6 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) "WindowAgg Aggregates", ALLOCSET_DEFAULT_SIZES); - /* - * tuple table initialization - */ - ExecInitScanTupleSlot(estate, &winstate->ss); - ExecInitResultTupleSlot(estate, &winstate->ss.ps); - winstate->first_part_slot = ExecInitExtraTupleSlot(estate); - winstate->agg_row_slot = ExecInitExtraTupleSlot(estate); - winstate->temp_slot_1 = ExecInitExtraTupleSlot(estate); - winstate->temp_slot_2 = ExecInitExtraTupleSlot(estate); - /* * WindowAgg nodes never have quals, since they can only occur at the * logical top level of a query (ie, after any WHERE or HAVING filters) @@ -1847,30 +2316,58 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) * initialize source tuple type (which is also the tuple type that we'll * store in the tuplestore and use in all our working slots). */ - ExecAssignScanTypeFromOuterPlan(&winstate->ss); + ExecCreateScanSlotFromOuterPlan(estate, &winstate->ss); + scanDesc = winstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor; + + /* + * tuple table initialization + */ + winstate->first_part_slot = ExecInitExtraTupleSlot(estate, scanDesc); + winstate->agg_row_slot = ExecInitExtraTupleSlot(estate, scanDesc); + winstate->temp_slot_1 = ExecInitExtraTupleSlot(estate, scanDesc); + winstate->temp_slot_2 = ExecInitExtraTupleSlot(estate, scanDesc); - ExecSetSlotDescriptor(winstate->first_part_slot, - winstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor); - ExecSetSlotDescriptor(winstate->agg_row_slot, - winstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor); - ExecSetSlotDescriptor(winstate->temp_slot_1, - winstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor); - ExecSetSlotDescriptor(winstate->temp_slot_2, - winstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor); + /* + * create frame head and tail slots only if needed (must create slots in + * exactly the same cases that update_frameheadpos and update_frametailpos + * need them) + */ + winstate->framehead_slot = winstate->frametail_slot = NULL; + + if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS)) + { + if (((frameOptions & FRAMEOPTION_START_CURRENT_ROW) && + node->ordNumCols != 0) || + (frameOptions & FRAMEOPTION_START_OFFSET)) + winstate->framehead_slot = ExecInitExtraTupleSlot(estate, scanDesc); + if (((frameOptions & FRAMEOPTION_END_CURRENT_ROW) && + node->ordNumCols != 0) || + (frameOptions & FRAMEOPTION_END_OFFSET)) + winstate->frametail_slot = ExecInitExtraTupleSlot(estate, scanDesc); + } /* - * Initialize result tuple type and projection info. + * Initialize result slot, type and projection. */ - ExecAssignResultTypeFromTL(&winstate->ss.ps); + ExecInitResultTupleSlotTL(&winstate->ss.ps); ExecAssignProjectionInfo(&winstate->ss.ps, NULL); /* Set up data for comparing tuples */ if (node->partNumCols > 0) - winstate->partEqfunctions = execTuplesMatchPrepare(node->partNumCols, - node->partOperators); + winstate->partEqfunction = + execTuplesMatchPrepare(scanDesc, + node->partNumCols, + node->partColIdx, + node->partOperators, + &winstate->ss.ps); + if (node->ordNumCols > 0) - winstate->ordEqfunctions = execTuplesMatchPrepare(node->ordNumCols, - node->ordOperators); + winstate->ordEqfunction = + execTuplesMatchPrepare(scanDesc, + node->ordNumCols, + node->ordColIdx, + node->ordOperators, + &winstate->ss.ps); /* * WindowAgg nodes use aggvalues and aggnulls as well as Agg nodes. @@ -1927,7 +2424,7 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) aclresult = pg_proc_aclcheck(wfunc->winfnoid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(wfunc->winfnoid)); InvokeFunctionExecuteHook(wfunc->winfnoid); @@ -1990,7 +2487,7 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) } /* copy frame options to state node for easy access */ - winstate->frameOptions = node->frameOptions; + winstate->frameOptions = frameOptions; /* initialize frame bound offset expressions */ winstate->startOffset = ExecInitExpr((Expr *) node->startOffset, @@ -1998,6 +2495,15 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) winstate->endOffset = ExecInitExpr((Expr *) node->endOffset, (PlanState *) winstate); + /* Lookup in_range support functions if needed */ + if (OidIsValid(node->startInRangeFunc)) + fmgr_info(node->startInRangeFunc, &winstate->startInRangeFunc); + if (OidIsValid(node->endInRangeFunc)) + fmgr_info(node->endInRangeFunc, &winstate->endInRangeFunc); + winstate->inRangeColl = node->inRangeColl; + winstate->inRangeAsc = node->inRangeAsc; + winstate->inRangeNullsFirst = node->inRangeNullsFirst; + winstate->all_first = true; winstate->partition_spooled = false; winstate->more_partitions = false; @@ -2022,6 +2528,10 @@ ExecEndWindowAgg(WindowAggState *node) ExecClearTuple(node->agg_row_slot); ExecClearTuple(node->temp_slot_1); ExecClearTuple(node->temp_slot_2); + if (node->framehead_slot) + ExecClearTuple(node->framehead_slot); + if (node->frametail_slot) + ExecClearTuple(node->frametail_slot); /* * Free both the expr contexts. @@ -2067,6 +2577,10 @@ ExecReScanWindowAgg(WindowAggState *node) ExecClearTuple(node->agg_row_slot); ExecClearTuple(node->temp_slot_1); ExecClearTuple(node->temp_slot_2); + if (node->framehead_slot) + ExecClearTuple(node->framehead_slot); + if (node->frametail_slot) + ExecClearTuple(node->frametail_slot); /* Forget current wfunc values */ MemSet(econtext->ecxt_aggvalues, 0, sizeof(Datum) * node->numfuncs); @@ -2096,10 +2610,12 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, Oid aggtranstype; AttrNumber initvalAttNo; AclResult aclresult; + bool use_ma_code; Oid transfn_oid, invtransfn_oid, finalfn_oid; bool finalextra; + char finalmodify; Expr *transfnexpr, *invtransfnexpr, *finalfnexpr; @@ -2125,20 +2641,32 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, * Figure out whether we want to use the moving-aggregate implementation, * and collect the right set of fields from the pg_attribute entry. * - * If the frame head can't move, we don't need moving-aggregate code. Even - * if we'd like to use it, don't do so if the aggregate's arguments (and - * FILTER clause if any) contain any calls to volatile functions. - * Otherwise, the difference between restarting and not restarting the - * aggregation would be user-visible. + * It's possible that an aggregate would supply a safe moving-aggregate + * implementation and an unsafe normal one, in which case our hand is + * forced. Otherwise, if the frame head can't move, we don't need + * moving-aggregate code. Even if we'd like to use it, don't do so if the + * aggregate's arguments (and FILTER clause if any) contain any calls to + * volatile functions. Otherwise, the difference between restarting and + * not restarting the aggregation would be user-visible. */ - if (OidIsValid(aggform->aggminvtransfn) && - !(winstate->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) && - !contain_volatile_functions((Node *) wfunc)) + if (!OidIsValid(aggform->aggminvtransfn)) + use_ma_code = false; /* sine qua non */ + else if (aggform->aggmfinalmodify == AGGMODIFY_READ_ONLY && + aggform->aggfinalmodify != AGGMODIFY_READ_ONLY) + use_ma_code = true; /* decision forced by safety */ + else if (winstate->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) + use_ma_code = false; /* non-moving frame head */ + else if (contain_volatile_functions((Node *) wfunc)) + use_ma_code = false; /* avoid possible behavioral change */ + else + use_ma_code = true; /* yes, let's use it */ + if (use_ma_code) { peraggstate->transfn_oid = transfn_oid = aggform->aggmtransfn; peraggstate->invtransfn_oid = invtransfn_oid = aggform->aggminvtransfn; peraggstate->finalfn_oid = finalfn_oid = aggform->aggmfinalfn; finalextra = aggform->aggmfinalextra; + finalmodify = aggform->aggmfinalmodify; aggtranstype = aggform->aggmtranstype; initvalAttNo = Anum_pg_aggregate_aggminitval; } @@ -2148,6 +2676,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, peraggstate->invtransfn_oid = invtransfn_oid = InvalidOid; peraggstate->finalfn_oid = finalfn_oid = aggform->aggfinalfn; finalextra = aggform->aggfinalextra; + finalmodify = aggform->aggfinalmodify; aggtranstype = aggform->aggtranstype; initvalAttNo = Anum_pg_aggregate_agginitval; } @@ -2173,7 +2702,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, aclresult = pg_proc_aclcheck(transfn_oid, aggOwner, ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(transfn_oid)); InvokeFunctionExecuteHook(transfn_oid); @@ -2182,7 +2711,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, aclresult = pg_proc_aclcheck(invtransfn_oid, aggOwner, ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(invtransfn_oid)); InvokeFunctionExecuteHook(invtransfn_oid); } @@ -2192,12 +2721,23 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, aclresult = pg_proc_aclcheck(finalfn_oid, aggOwner, ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, + aclcheck_error(aclresult, OBJECT_FUNCTION, get_func_name(finalfn_oid)); InvokeFunctionExecuteHook(finalfn_oid); } } + /* + * If the selected finalfn isn't read-only, we can't run this aggregate as + * a window function. This is a user-facing error, so we take a bit more + * care with the error message than elsewhere in this function. + */ + if (finalmodify != AGGMODIFY_READ_ONLY) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("aggregate function %s does not support use as a window function", + format_procedure(wfunc->winfnoid)))); + /* Detect how many arguments to pass to the finalfn */ if (finalextra) peraggstate->numFinalArgs = numArguments + 1; @@ -2351,15 +2891,15 @@ are_peers(WindowAggState *winstate, TupleTableSlot *slot1, TupleTableSlot *slot2) { WindowAgg *node = (WindowAgg *) winstate->ss.ps.plan; + ExprContext *econtext = winstate->tmpcontext; /* If no ORDER BY, all rows are peers with each other */ if (node->ordNumCols == 0) return true; - return execTuplesMatch(slot1, slot2, - node->ordNumCols, node->ordColIdx, - winstate->ordEqfunctions, - winstate->tmpcontext->ecxt_per_tuple_memory); + econtext->ecxt_outertuple = slot1; + econtext->ecxt_innertuple = slot2; + return ExecQualAndReset(winstate->ordEqfunction, econtext); } /* @@ -2547,7 +3087,7 @@ WinSetMarkPosition(WindowObject winobj, int64 markpos) /* * WinRowsArePeers - * Compare two rows (specified by absolute position in window) to see + * Compare two rows (specified by absolute position in partition) to see * if they are equal according to the ORDER BY clause. * * NB: this does not consider the window frame mode. @@ -2569,6 +3109,10 @@ WinRowsArePeers(WindowObject winobj, int64 pos1, int64 pos2) if (node->ordNumCols == 0) return true; + /* + * Note: OK to use temp_slot_2 here because we aren't calling any + * frame-related functions (those tend to clobber temp_slot_2). + */ slot1 = winstate->temp_slot_1; slot2 = winstate->temp_slot_2; @@ -2653,30 +3197,7 @@ WinGetFuncArgInPartition(WindowObject winobj, int argno, if (isout) *isout = false; if (set_mark) - { - int frameOptions = winstate->frameOptions; - int64 mark_pos = abs_pos; - - /* - * In RANGE mode with a moving frame head, we must not let the - * mark advance past frameheadpos, since that row has to be - * fetchable during future update_frameheadpos calls. - * - * XXX it is very ugly to pollute window functions' marks with - * this consideration; it could for instance mask a logic bug that - * lets a window function fetch rows before what it had claimed - * was its mark. Perhaps use a separate mark for frame head - * probes? - */ - if ((frameOptions & FRAMEOPTION_RANGE) && - !(frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING)) - { - update_frameheadpos(winobj, winstate->temp_slot_2); - if (mark_pos > winstate->frameheadpos) - mark_pos = winstate->frameheadpos; - } - WinSetMarkPosition(winobj, mark_pos); - } + WinSetMarkPosition(winobj, abs_pos); econtext->ecxt_outertuple = slot; return ExecEvalExpr((ExprState *) list_nth(winobj->argstates, argno), econtext, isnull); @@ -2687,19 +3208,34 @@ WinGetFuncArgInPartition(WindowObject winobj, int argno, * WinGetFuncArgInFrame * Evaluate a window function's argument expression on a specified * row of the window frame. The row is identified in lseek(2) style, - * i.e. relative to the current, first, or last row. + * i.e. relative to the first or last row of the frame. (We do not + * support WINDOW_SEEK_CURRENT here, because it's not very clear what + * that should mean if the current row isn't part of the frame.) * * argno: argument number to evaluate (counted from 0) * relpos: signed rowcount offset from the seek position - * seektype: WINDOW_SEEK_CURRENT, WINDOW_SEEK_HEAD, or WINDOW_SEEK_TAIL - * set_mark: If the row is found and set_mark is true, the mark is moved to - * the row as a side-effect. + * seektype: WINDOW_SEEK_HEAD or WINDOW_SEEK_TAIL + * set_mark: If the row is found/in frame and set_mark is true, the mark is + * moved to the row as a side-effect. * isnull: output argument, receives isnull status of result * isout: output argument, set to indicate whether target row position * is out of frame (can pass NULL if caller doesn't care about this) * - * Specifying a nonexistent row is not an error, it just causes a null result - * (plus setting *isout true, if isout isn't NULL). + * Specifying a nonexistent or not-in-frame row is not an error, it just + * causes a null result (plus setting *isout true, if isout isn't NULL). + * + * Note that some exclusion-clause options lead to situations where the + * rows that are in-frame are not consecutive in the partition. But we + * count only in-frame rows when measuring relpos. + * + * The set_mark flag is interpreted as meaning that the caller will specify + * a constant (or, perhaps, monotonically increasing) relpos in successive + * calls, so that *if there is no exclusion clause* there will be no need + * to fetch a row before the previously fetched row. But we do not expect + * the caller to know how to account for exclusion clauses. Therefore, + * if there is an exclusion clause we take responsibility for adjusting the + * mark request to something that will be safe given the above assumption + * about relpos. */ Datum WinGetFuncArgInFrame(WindowObject winobj, int argno, @@ -2709,8 +3245,8 @@ WinGetFuncArgInFrame(WindowObject winobj, int argno, WindowAggState *winstate; ExprContext *econtext; TupleTableSlot *slot; - bool gottuple; int64 abs_pos; + int64 mark_pos; Assert(WindowObjectIsValid(winobj)); winstate = winobj->winstate; @@ -2720,66 +3256,167 @@ WinGetFuncArgInFrame(WindowObject winobj, int argno, switch (seektype) { case WINDOW_SEEK_CURRENT: - abs_pos = winstate->currentpos + relpos; + elog(ERROR, "WINDOW_SEEK_CURRENT is not supported for WinGetFuncArgInFrame"); + abs_pos = mark_pos = 0; /* keep compiler quiet */ break; case WINDOW_SEEK_HEAD: - update_frameheadpos(winobj, slot); + /* rejecting relpos < 0 is easy and simplifies code below */ + if (relpos < 0) + goto out_of_frame; + update_frameheadpos(winstate); abs_pos = winstate->frameheadpos + relpos; + mark_pos = abs_pos; + + /* + * Account for exclusion option if one is active, but advance only + * abs_pos not mark_pos. This prevents changes of the current + * row's peer group from resulting in trying to fetch a row before + * some previous mark position. + * + * Note that in some corner cases such as current row being + * outside frame, these calculations are theoretically too simple, + * but it doesn't matter because we'll end up deciding the row is + * out of frame. We do not attempt to avoid fetching rows past + * end of frame; that would happen in some cases anyway. + */ + switch (winstate->frameOptions & FRAMEOPTION_EXCLUSION) + { + case 0: + /* no adjustment needed */ + break; + case FRAMEOPTION_EXCLUDE_CURRENT_ROW: + if (abs_pos >= winstate->currentpos && + winstate->currentpos >= winstate->frameheadpos) + abs_pos++; + break; + case FRAMEOPTION_EXCLUDE_GROUP: + update_grouptailpos(winstate); + if (abs_pos >= winstate->groupheadpos && + winstate->grouptailpos > winstate->frameheadpos) + { + int64 overlapstart = Max(winstate->groupheadpos, + winstate->frameheadpos); + + abs_pos += winstate->grouptailpos - overlapstart; + } + break; + case FRAMEOPTION_EXCLUDE_TIES: + update_grouptailpos(winstate); + if (abs_pos >= winstate->groupheadpos && + winstate->grouptailpos > winstate->frameheadpos) + { + int64 overlapstart = Max(winstate->groupheadpos, + winstate->frameheadpos); + + if (abs_pos == overlapstart) + abs_pos = winstate->currentpos; + else + abs_pos += winstate->grouptailpos - overlapstart - 1; + } + break; + default: + elog(ERROR, "unrecognized frame option state: 0x%x", + winstate->frameOptions); + break; + } break; case WINDOW_SEEK_TAIL: - update_frametailpos(winobj, slot); - abs_pos = winstate->frametailpos + relpos; + /* rejecting relpos > 0 is easy and simplifies code below */ + if (relpos > 0) + goto out_of_frame; + update_frametailpos(winstate); + abs_pos = winstate->frametailpos - 1 + relpos; + + /* + * Account for exclusion option if one is active. If there is no + * exclusion, we can safely set the mark at the accessed row. But + * if there is, we can only mark the frame start, because we can't + * be sure how far back in the frame the exclusion might cause us + * to fetch in future. Furthermore, we have to actually check + * against frameheadpos here, since it's unsafe to try to fetch a + * row before frame start if the mark might be there already. + */ + switch (winstate->frameOptions & FRAMEOPTION_EXCLUSION) + { + case 0: + /* no adjustment needed */ + mark_pos = abs_pos; + break; + case FRAMEOPTION_EXCLUDE_CURRENT_ROW: + if (abs_pos <= winstate->currentpos && + winstate->currentpos < winstate->frametailpos) + abs_pos--; + update_frameheadpos(winstate); + if (abs_pos < winstate->frameheadpos) + goto out_of_frame; + mark_pos = winstate->frameheadpos; + break; + case FRAMEOPTION_EXCLUDE_GROUP: + update_grouptailpos(winstate); + if (abs_pos < winstate->grouptailpos && + winstate->groupheadpos < winstate->frametailpos) + { + int64 overlapend = Min(winstate->grouptailpos, + winstate->frametailpos); + + abs_pos -= overlapend - winstate->groupheadpos; + } + update_frameheadpos(winstate); + if (abs_pos < winstate->frameheadpos) + goto out_of_frame; + mark_pos = winstate->frameheadpos; + break; + case FRAMEOPTION_EXCLUDE_TIES: + update_grouptailpos(winstate); + if (abs_pos < winstate->grouptailpos && + winstate->groupheadpos < winstate->frametailpos) + { + int64 overlapend = Min(winstate->grouptailpos, + winstate->frametailpos); + + if (abs_pos == overlapend - 1) + abs_pos = winstate->currentpos; + else + abs_pos -= overlapend - 1 - winstate->groupheadpos; + } + update_frameheadpos(winstate); + if (abs_pos < winstate->frameheadpos) + goto out_of_frame; + mark_pos = winstate->frameheadpos; + break; + default: + elog(ERROR, "unrecognized frame option state: 0x%x", + winstate->frameOptions); + mark_pos = 0; /* keep compiler quiet */ + break; + } break; default: elog(ERROR, "unrecognized window seek type: %d", seektype); - abs_pos = 0; /* keep compiler quiet */ + abs_pos = mark_pos = 0; /* keep compiler quiet */ break; } - gottuple = window_gettupleslot(winobj, abs_pos, slot); - if (gottuple) - gottuple = row_is_in_frame(winstate, abs_pos, slot); + if (!window_gettupleslot(winobj, abs_pos, slot)) + goto out_of_frame; - if (!gottuple) - { - if (isout) - *isout = true; - *isnull = true; - return (Datum) 0; - } - else - { - if (isout) - *isout = false; - if (set_mark) - { - int frameOptions = winstate->frameOptions; - int64 mark_pos = abs_pos; + /* The code above does not detect all out-of-frame cases, so check */ + if (row_is_in_frame(winstate, abs_pos, slot) <= 0) + goto out_of_frame; - /* - * In RANGE mode with a moving frame head, we must not let the - * mark advance past frameheadpos, since that row has to be - * fetchable during future update_frameheadpos calls. - * - * XXX it is very ugly to pollute window functions' marks with - * this consideration; it could for instance mask a logic bug that - * lets a window function fetch rows before what it had claimed - * was its mark. Perhaps use a separate mark for frame head - * probes? - */ - if ((frameOptions & FRAMEOPTION_RANGE) && - !(frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING)) - { - update_frameheadpos(winobj, winstate->temp_slot_2); - if (mark_pos > winstate->frameheadpos) - mark_pos = winstate->frameheadpos; - } - WinSetMarkPosition(winobj, mark_pos); - } - econtext->ecxt_outertuple = slot; - return ExecEvalExpr((ExprState *) list_nth(winobj->argstates, argno), - econtext, isnull); - } + if (isout) + *isout = false; + if (set_mark) + WinSetMarkPosition(winobj, mark_pos); + econtext->ecxt_outertuple = slot; + return ExecEvalExpr((ExprState *) list_nth(winobj->argstates, argno), + econtext, isnull); + +out_of_frame: + if (isout) + *isout = true; + *isnull = true; + return (Datum) 0; } /* diff --git a/src/backend/executor/nodeWorktablescan.c b/src/backend/executor/nodeWorktablescan.c index d5ffadda3e..1ce8ae9f02 100644 --- a/src/backend/executor/nodeWorktablescan.c +++ b/src/backend/executor/nodeWorktablescan.c @@ -3,7 +3,7 @@ * nodeWorktablescan.c * routines to handle WorkTableScan nodes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -157,21 +157,21 @@ ExecInitWorkTableScan(WorkTableScan *node, EState *estate, int eflags) ExecAssignExprContext(estate, &scanstate->ss.ps); /* - * initialize child expressions + * tuple table initialization */ - scanstate->ss.ps.qual = - ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); + ExecInitResultTypeTL(&scanstate->ss.ps); + ExecInitScanTupleSlot(estate, &scanstate->ss, NULL); /* - * tuple table initialization + * initialize child expressions */ - ExecInitResultTupleSlot(estate, &scanstate->ss.ps); - ExecInitScanTupleSlot(estate, &scanstate->ss); + scanstate->ss.ps.qual = + ExecInitQual(node->scan.plan.qual, (PlanState *) scanstate); /* - * Initialize result tuple type, but not yet projection info. + * Do not yet initialize projection info, see ExecWorkTableScan() for + * details. */ - ExecAssignResultTypeFromTL(&scanstate->ss.ps); return scanstate; } @@ -193,7 +193,8 @@ ExecEndWorkTableScan(WorkTableScanState *node) /* * clean out the tuple table */ - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecClearTuple(node->ss.ss_ScanTupleSlot); } @@ -206,7 +207,8 @@ ExecEndWorkTableScan(WorkTableScanState *node) void ExecReScanWorkTableScan(WorkTableScanState *node) { - ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); + if (node->ss.ps.ps_ResultTupleSlot) + ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); ExecScanReScan(&node->ss); diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index cd00a6d9f2..1921273856 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -3,7 +3,7 @@ * spi.c * Server Programming Interface * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -36,10 +36,16 @@ #include "utils/typcache.h" +/* + * These global variables are part of the API for various SPI functions + * (a horrible API choice, but it's too late now). To reduce the risk of + * interference between different SPI callers, we save and restore them + * when entering/exiting a SPI nesting level. + */ uint64 SPI_processed = 0; Oid SPI_lastoid = InvalidOid; SPITupleTable *SPI_tuptable = NULL; -int SPI_result; +int SPI_result = 0; static _SPI_connection *_SPI_stack = NULL; static _SPI_connection *_SPI_current = NULL; @@ -71,8 +77,8 @@ static void _SPI_cursor_operation(Portal portal, static SPIPlanPtr _SPI_make_plan_non_temp(SPIPlanPtr plan); static SPIPlanPtr _SPI_save_plan(SPIPlanPtr plan); -static int _SPI_begin_call(bool execmem); -static int _SPI_end_call(bool procmem); +static int _SPI_begin_call(bool use_exec); +static int _SPI_end_call(bool use_exec); static MemoryContext _SPI_execmem(void); static MemoryContext _SPI_procmem(void); static bool _SPI_checktuples(void); @@ -82,6 +88,12 @@ static bool _SPI_checktuples(void); int SPI_connect(void) +{ + return SPI_connect_ext(0); +} + +int +SPI_connect_ext(int options) { int newdepth; @@ -92,7 +104,7 @@ SPI_connect(void) elog(ERROR, "SPI stack corrupted"); newdepth = 16; _SPI_stack = (_SPI_connection *) - MemoryContextAlloc(TopTransactionContext, + MemoryContextAlloc(TopMemoryContext, newdepth * sizeof(_SPI_connection)); _SPI_stack_depth = newdepth; } @@ -118,29 +130,49 @@ SPI_connect(void) _SPI_current->processed = 0; _SPI_current->lastoid = InvalidOid; _SPI_current->tuptable = NULL; + _SPI_current->execSubid = InvalidSubTransactionId; slist_init(&_SPI_current->tuptables); _SPI_current->procCxt = NULL; /* in case we fail to create 'em */ _SPI_current->execCxt = NULL; _SPI_current->connectSubid = GetCurrentSubTransactionId(); _SPI_current->queryEnv = NULL; + _SPI_current->atomic = (options & SPI_OPT_NONATOMIC ? false : true); + _SPI_current->internal_xact = false; + _SPI_current->outer_processed = SPI_processed; + _SPI_current->outer_lastoid = SPI_lastoid; + _SPI_current->outer_tuptable = SPI_tuptable; + _SPI_current->outer_result = SPI_result; /* * Create memory contexts for this procedure * - * XXX it would be better to use PortalContext as the parent context, but - * we may not be inside a portal (consider deferred-trigger execution). - * Perhaps CurTransactionContext would do? For now it doesn't matter - * because we clean up explicitly in AtEOSubXact_SPI(). + * In atomic contexts (the normal case), we use TopTransactionContext, + * otherwise PortalContext, so that it lives across transaction + * boundaries. + * + * XXX It could be better to use PortalContext as the parent context in + * all cases, but we may not be inside a portal (consider deferred-trigger + * execution). Perhaps CurTransactionContext could be an option? For now + * it doesn't matter because we clean up explicitly in AtEOSubXact_SPI(). */ - _SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext, + _SPI_current->procCxt = AllocSetContextCreate(_SPI_current->atomic ? TopTransactionContext : PortalContext, "SPI Proc", ALLOCSET_DEFAULT_SIZES); - _SPI_current->execCxt = AllocSetContextCreate(TopTransactionContext, + _SPI_current->execCxt = AllocSetContextCreate(_SPI_current->atomic ? TopTransactionContext : _SPI_current->procCxt, "SPI Exec", ALLOCSET_DEFAULT_SIZES); /* ... and switch to procedure's context */ _SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt); + /* + * Reset API global variables so that current caller cannot accidentally + * depend on state of an outer caller. + */ + SPI_processed = 0; + SPI_lastoid = InvalidOid; + SPI_tuptable = NULL; + SPI_result = 0; + return SPI_OK_CONNECT; } @@ -149,7 +181,7 @@ SPI_finish(void) { int res; - res = _SPI_begin_call(false); /* live in procedure memory */ + res = _SPI_begin_call(false); /* just check we're connected */ if (res < 0) return res; @@ -163,12 +195,13 @@ SPI_finish(void) _SPI_current->procCxt = NULL; /* - * Reset result variables, especially SPI_tuptable which is probably + * Restore outer API variables, especially SPI_tuptable which is probably * pointing at a just-deleted tuptable */ - SPI_processed = 0; - SPI_lastoid = InvalidOid; - SPI_tuptable = NULL; + SPI_processed = _SPI_current->outer_processed; + SPI_lastoid = _SPI_current->outer_lastoid; + SPI_tuptable = _SPI_current->outer_tuptable; + SPI_result = _SPI_current->outer_result; /* Exit stack level */ _SPI_connected--; @@ -180,29 +213,111 @@ SPI_finish(void) return SPI_OK_FINISH; } +void +SPI_start_transaction(void) +{ + MemoryContext oldcontext = CurrentMemoryContext; + + StartTransactionCommand(); + MemoryContextSwitchTo(oldcontext); +} + +void +SPI_commit(void) +{ + MemoryContext oldcontext = CurrentMemoryContext; + + if (_SPI_current->atomic) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), + errmsg("invalid transaction termination"))); + + /* + * This restriction is required by PLs implemented on top of SPI. They + * use subtransactions to establish exception blocks that are supposed to + * be rolled back together if there is an error. Terminating the + * top-level transaction in such a block violates that idea. A future PL + * implementation might have different ideas about this, in which case + * this restriction would have to be refined or the check possibly be + * moved out of SPI into the PLs. + */ + if (IsSubTransaction()) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), + errmsg("cannot commit while a subtransaction is active"))); + + _SPI_current->internal_xact = true; + + /* + * Before committing, pop all active snapshots to avoid error about + * "snapshot %p still active". + */ + while (ActiveSnapshotSet()) + PopActiveSnapshot(); + + CommitTransactionCommand(); + MemoryContextSwitchTo(oldcontext); + + _SPI_current->internal_xact = false; +} + +void +SPI_rollback(void) +{ + MemoryContext oldcontext = CurrentMemoryContext; + + if (_SPI_current->atomic) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), + errmsg("invalid transaction termination"))); + + /* see under SPI_commit() */ + if (IsSubTransaction()) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), + errmsg("cannot roll back while a subtransaction is active"))); + + _SPI_current->internal_xact = true; + + AbortCurrentTransaction(); + MemoryContextSwitchTo(oldcontext); + + _SPI_current->internal_xact = false; +} + +/* + * Clean up SPI state. Called on transaction end (of non-SPI-internal + * transactions) and when returning to the main loop on error. + */ +void +SPICleanup(void) +{ + _SPI_current = NULL; + _SPI_connected = -1; + /* Reset API global variables, too */ + SPI_processed = 0; + SPI_lastoid = InvalidOid; + SPI_tuptable = NULL; + SPI_result = 0; +} + /* * Clean up SPI state at transaction commit or abort. */ void AtEOXact_SPI(bool isCommit) { - /* - * Note that memory contexts belonging to SPI stack entries will be freed - * automatically, so we can ignore them here. We just need to restore our - * static variables to initial state. - */ + /* Do nothing if the transaction end was initiated by SPI. */ + if (_SPI_current && _SPI_current->internal_xact) + return; + if (isCommit && _SPI_connected != -1) ereport(WARNING, (errcode(ERRCODE_WARNING), errmsg("transaction left non-empty SPI stack"), errhint("Check for missing \"SPI_finish\" calls."))); - _SPI_current = _SPI_stack = NULL; - _SPI_stack_depth = 0; - _SPI_connected = -1; - SPI_processed = 0; - SPI_lastoid = InvalidOid; - SPI_tuptable = NULL; + SPICleanup(); } /* @@ -223,6 +338,9 @@ AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid) if (connection->connectSubid != mySubid) break; /* couldn't be any underneath it either */ + if (connection->internal_xact) + break; + found = true; /* @@ -240,18 +358,20 @@ AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid) } /* - * Pop the stack entry and reset global variables. Unlike + * Restore outer global variables and pop the stack entry. Unlike * SPI_finish(), we don't risk switching to memory contexts that might * be already gone. */ + SPI_processed = connection->outer_processed; + SPI_lastoid = connection->outer_lastoid; + SPI_tuptable = connection->outer_tuptable; + SPI_result = connection->outer_result; + _SPI_connected--; if (_SPI_connected < 0) _SPI_current = NULL; else _SPI_current = &(_SPI_stack[_SPI_connected]); - SPI_processed = 0; - SPI_lastoid = InvalidOid; - SPI_tuptable = NULL; } if (found && isCommit) @@ -268,8 +388,15 @@ AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid) { slist_mutable_iter siter; - /* free Executor memory the same as _SPI_end_call would do */ - MemoryContextResetAndDeleteChildren(_SPI_current->execCxt); + /* + * Throw away executor state if current executor operation was started + * within current subxact (essentially, force a _SPI_end_call(true)). + */ + if (_SPI_current->execSubid >= mySubid) + { + _SPI_current->execSubid = InvalidSubTransactionId; + MemoryContextResetAndDeleteChildren(_SPI_current->execCxt); + } /* throw away any tuple tables created within current subxact */ slist_foreach_modify(siter, &_SPI_current->tuptables) @@ -293,11 +420,22 @@ AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid) MemoryContextDelete(tuptable->tuptabcxt); } } - /* in particular we should have gotten rid of any in-progress table */ - Assert(_SPI_current->tuptable == NULL); } } +/* + * Are we executing inside a procedure (that is, a nonatomic SPI context)? + */ +bool +SPI_inside_nonatomic_context(void) +{ + if (_SPI_current == NULL) + return false; /* not in any SPI context at all */ + if (_SPI_current->atomic) + return false; /* it's atomic (ie function not procedure) */ + return true; +} + /* Parse, plan, and execute a query string */ int @@ -761,12 +899,14 @@ int SPI_fnumber(TupleDesc tupdesc, const char *fname) { int res; - Form_pg_attribute sysatt; + const FormData_pg_attribute *sysatt; for (res = 0; res < tupdesc->natts; res++) { - if (namestrcmp(&tupdesc->attrs[res]->attname, fname) == 0 && - !tupdesc->attrs[res]->attisdropped) + Form_pg_attribute attr = TupleDescAttr(tupdesc, res); + + if (namestrcmp(&attr->attname, fname) == 0 && + !attr->attisdropped) return res + 1; } @@ -781,7 +921,7 @@ SPI_fnumber(TupleDesc tupdesc, const char *fname) char * SPI_fname(TupleDesc tupdesc, int fnumber) { - Form_pg_attribute att; + const FormData_pg_attribute *att; SPI_result = 0; @@ -793,7 +933,7 @@ SPI_fname(TupleDesc tupdesc, int fnumber) } if (fnumber > 0) - att = tupdesc->attrs[fnumber - 1]; + att = TupleDescAttr(tupdesc, fnumber - 1); else att = SystemAttributeDefinition(fnumber, true); @@ -823,7 +963,7 @@ SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber) return NULL; if (fnumber > 0) - typoid = tupdesc->attrs[fnumber - 1]->atttypid; + typoid = TupleDescAttr(tupdesc, fnumber - 1)->atttypid; else typoid = (SystemAttributeDefinition(fnumber, true))->atttypid; @@ -865,7 +1005,7 @@ SPI_gettype(TupleDesc tupdesc, int fnumber) } if (fnumber > 0) - typoid = tupdesc->attrs[fnumber - 1]->atttypid; + typoid = TupleDescAttr(tupdesc, fnumber - 1)->atttypid; else typoid = (SystemAttributeDefinition(fnumber, true))->atttypid; @@ -901,7 +1041,7 @@ SPI_gettypeid(TupleDesc tupdesc, int fnumber) } if (fnumber > 0) - return tupdesc->attrs[fnumber - 1]->atttypid; + return TupleDescAttr(tupdesc, fnumber - 1)->atttypid; else return (SystemAttributeDefinition(fnumber, true))->atttypid; } @@ -1175,7 +1315,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, } /* Copy the plan's query string into the portal */ - query_string = MemoryContextStrdup(PortalGetHeapMemory(portal), + query_string = MemoryContextStrdup(portal->portalContext, plansource->query_string); /* @@ -1205,7 +1345,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, * will result in leaking our refcount on the plan, but it doesn't * matter because the plan is unsaved and hence transient anyway. */ - oldcontext = MemoryContextSwitchTo(PortalGetHeapMemory(portal)); + oldcontext = MemoryContextSwitchTo(portal->portalContext); stmt_list = copyObject(stmt_list); MemoryContextSwitchTo(oldcontext); ReleaseCachedPlan(cplan, false); @@ -1303,7 +1443,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, */ if (paramLI) { - oldcontext = MemoryContextSwitchTo(PortalGetHeapMemory(portal)); + oldcontext = MemoryContextSwitchTo(portal->portalContext); paramLI = copyParamList(paramLI); MemoryContextSwitchTo(oldcontext); } @@ -1899,9 +2039,9 @@ _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan) * snapshot: query snapshot to use, or InvalidSnapshot for the normal * behavior of taking a new snapshot for each query. * crosscheck_snapshot: for RI use, all others pass InvalidSnapshot - * read_only: TRUE for read-only execution (no CommandCounterIncrement) - * fire_triggers: TRUE to fire AFTER triggers at end of query (normal case); - * FALSE means any AFTER triggers are postponed to end of outer query + * read_only: true for read-only execution (no CommandCounterIncrement) + * fire_triggers: true to fire AFTER triggers at end of query (normal case); + * false means any AFTER triggers are postponed to end of outer query * tcount: execution tuple-count limit, or 0 for none */ static int @@ -1945,8 +2085,11 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, * * In the first two cases, we can just push the snap onto the stack once * for the whole plan list. + * + * But if the plan has no_snapshots set to true, then don't manage + * snapshots at all. The caller should then take care of that. */ - if (snapshot != InvalidSnapshot) + if (snapshot != InvalidSnapshot && !plan->no_snapshots) { if (read_only) { @@ -2025,7 +2168,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, * In the default non-read-only case, get a new snapshot, replacing * any that we pushed in a previous cycle. */ - if (snapshot == InvalidSnapshot && !read_only) + if (snapshot == InvalidSnapshot && !read_only && !plan->no_snapshots) { if (pushed_active_snap) PopActiveSnapshot(); @@ -2076,7 +2219,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, * If not read-only mode, advance the command counter before each * command and update the snapshot. */ - if (!read_only) + if (!read_only && !plan->no_snapshots) { CommandCounterIncrement(); UpdateActiveSnapshotCommandId(); @@ -2107,10 +2250,23 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, else { char completionTag[COMPLETION_TAG_BUFSIZE]; + ProcessUtilityContext context; + + /* + * If the SPI context is atomic, or we are asked to manage + * snapshots, then we are in an atomic execution context. + * Conversely, to propagate a nonatomic execution context, the + * caller must be in a nonatomic SPI context and manage + * snapshots itself. + */ + if (_SPI_current->atomic || !plan->no_snapshots) + context = PROCESS_UTILITY_QUERY; + else + context = PROCESS_UTILITY_QUERY_NONATOMIC; ProcessUtility(stmt, plansource->query_string, - PROCESS_UTILITY_QUERY, + context, paramLI, _SPI_current->queryEnv, dest, @@ -2251,10 +2407,11 @@ _SPI_convert_params(int nargs, Oid *argtypes, /* we have static list of params, so no hooks needed */ paramLI->paramFetch = NULL; paramLI->paramFetchArg = NULL; + paramLI->paramCompile = NULL; + paramLI->paramCompileArg = NULL; paramLI->parserSetup = NULL; paramLI->parserSetupArg = NULL; paramLI->numParams = nargs; - paramLI->paramMask = NULL; for (i = 0; i < nargs; i++) { @@ -2359,6 +2516,9 @@ _SPI_error_callback(void *arg) const char *query = (const char *) arg; int syntaxerrposition; + if (query == NULL) /* in case arg wasn't set yet */ + return; + /* * If there is a syntax error position, convert to internal syntax error; * otherwise treat the query as an item of context stack @@ -2444,15 +2604,24 @@ _SPI_procmem(void) /* * _SPI_begin_call: begin a SPI operation within a connected procedure + * + * use_exec is true if we intend to make use of the procedure's execCxt + * during this SPI operation. We'll switch into that context, and arrange + * for it to be cleaned up at _SPI_end_call or if an error occurs. */ static int -_SPI_begin_call(bool execmem) +_SPI_begin_call(bool use_exec) { if (_SPI_current == NULL) return SPI_ERROR_UNCONNECTED; - if (execmem) /* switch to the Executor memory context */ + if (use_exec) + { + /* remember when the Executor operation started */ + _SPI_current->execSubid = GetCurrentSubTransactionId(); + /* switch to the Executor memory context */ _SPI_execmem(); + } return 0; } @@ -2460,14 +2629,19 @@ _SPI_begin_call(bool execmem) /* * _SPI_end_call: end a SPI operation within a connected procedure * + * use_exec must be the same as in the previous _SPI_begin_call + * * Note: this currently has no failure return cases, so callers don't check */ static int -_SPI_end_call(bool procmem) +_SPI_end_call(bool use_exec) { - if (procmem) /* switch to the procedure memory context */ + if (use_exec) { + /* switch to the procedure memory context */ _SPI_procmem(); + /* mark Executor context no longer in use */ + _SPI_current->execSubid = InvalidSubTransactionId; /* and free Executor memory */ MemoryContextResetAndDeleteChildren(_SPI_current->execCxt); } @@ -2524,11 +2698,8 @@ _SPI_make_plan_non_temp(SPIPlanPtr plan) oldcxt = MemoryContextSwitchTo(plancxt); /* Copy the SPI_plan struct and subsidiary data into the new context */ - newplan = (SPIPlanPtr) palloc(sizeof(_SPI_plan)); + newplan = (SPIPlanPtr) palloc0(sizeof(_SPI_plan)); newplan->magic = _SPI_PLAN_MAGIC; - newplan->saved = false; - newplan->oneshot = false; - newplan->plancache_list = NIL; newplan->plancxt = plancxt; newplan->cursor_options = plan->cursor_options; newplan->nargs = plan->nargs; @@ -2591,11 +2762,8 @@ _SPI_save_plan(SPIPlanPtr plan) oldcxt = MemoryContextSwitchTo(plancxt); /* Copy the SPI plan into its own context */ - newplan = (SPIPlanPtr) palloc(sizeof(_SPI_plan)); + newplan = (SPIPlanPtr) palloc0(sizeof(_SPI_plan)); newplan->magic = _SPI_PLAN_MAGIC; - newplan->saved = false; - newplan->oneshot = false; - newplan->plancache_list = NIL; newplan->plancxt = plancxt; newplan->cursor_options = plan->cursor_options; newplan->nargs = plan->nargs; diff --git a/src/backend/executor/tqueue.c b/src/backend/executor/tqueue.c index a4cfe9685a..ecdbe7f79f 100644 --- a/src/backend/executor/tqueue.c +++ b/src/backend/executor/tqueue.c @@ -3,27 +3,12 @@ * tqueue.c * Use shm_mq to send & receive tuples between parallel backends * - * Most of the complexity in this module arises from transient RECORD types, - * which all have type RECORDOID and are distinguished by typmod numbers - * that are managed per-backend (see src/backend/utils/cache/typcache.c). - * The sender's set of RECORD typmod assignments probably doesn't match the - * receiver's. To deal with this, we make the sender send a description - * of each transient RECORD type appearing in the data it sends. The - * receiver finds or creates a matching type in its own typcache, and then - * maps the sender's typmod for that type to its own typmod. - * * A DestReceiver of type DestTupleQueue, which is a TQueueDestReceiver - * under the hood, writes tuples from the executor to a shm_mq. If - * necessary, it also writes control messages describing transient - * record types used within the tuple. + * under the hood, writes tuples from the executor to a shm_mq. * - * A TupleQueueReader reads tuples, and control messages if any are sent, - * from a shm_mq and returns the tuples. If transient record types are - * in use, it registers those types locally based on the control messages - * and rewrites the typmods sent by the remote side to the corresponding - * local record typmods. + * A TupleQueueReader reads tuples from a shm_mq and returns the tuples. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -35,275 +20,43 @@ #include "postgres.h" #include "access/htup_details.h" -#include "catalog/pg_type.h" #include "executor/tqueue.h" -#include "funcapi.h" -#include "lib/stringinfo.h" -#include "miscadmin.h" -#include "utils/array.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/rangetypes.h" -#include "utils/syscache.h" -#include "utils/typcache.h" - - -/* - * The data transferred through the shm_mq is divided into messages. - * One-byte messages are mode-switch messages, telling the receiver to switch - * between "control" and "data" modes. (We always start up in "data" mode.) - * Otherwise, when in "data" mode, each message is a tuple. When in "control" - * mode, each message defines one transient-typmod-to-tupledesc mapping to - * let us interpret future tuples. Both of those cases certainly require - * more than one byte, so no confusion is possible. - */ -#define TUPLE_QUEUE_MODE_CONTROL 'c' /* mode-switch message contents */ -#define TUPLE_QUEUE_MODE_DATA 'd' - -/* - * Both the sender and receiver build trees of TupleRemapInfo nodes to help - * them identify which (sub) fields of transmitted tuples are composite and - * may thus need remap processing. We might need to look within arrays and - * ranges, not only composites, to find composite sub-fields. A NULL - * TupleRemapInfo pointer indicates that it is known that the described field - * is not composite and has no composite substructure. - * - * Note that we currently have to look at each composite field at runtime, - * even if we believe it's of a named composite type (i.e., not RECORD). - * This is because we allow the actual value to be a compatible transient - * RECORD type. That's grossly inefficient, and it would be good to get - * rid of the requirement, but it's not clear what would need to change. - * - * Also, we allow the top-level tuple structure, as well as the actual - * structure of composite subfields, to change from one tuple to the next - * at runtime. This may well be entirely historical, but it's mostly free - * to support given the previous requirement; and other places in the system - * also permit this, so it's not entirely clear if we could drop it. - */ - -typedef enum -{ - TQUEUE_REMAP_ARRAY, /* array */ - TQUEUE_REMAP_RANGE, /* range */ - TQUEUE_REMAP_RECORD /* composite type, named or transient */ -} TupleRemapClass; - -typedef struct TupleRemapInfo TupleRemapInfo; - -typedef struct ArrayRemapInfo -{ - int16 typlen; /* array element type's storage properties */ - bool typbyval; - char typalign; - TupleRemapInfo *element_remap; /* array element type's remap info */ -} ArrayRemapInfo; - -typedef struct RangeRemapInfo -{ - TypeCacheEntry *typcache; /* range type's typcache entry */ - TupleRemapInfo *bound_remap; /* range bound type's remap info */ -} RangeRemapInfo; - -typedef struct RecordRemapInfo -{ - /* Original (remote) type ID info last seen for this composite field */ - Oid rectypid; - int32 rectypmod; - /* Local RECORD typmod, or -1 if unset; not used on sender side */ - int32 localtypmod; - /* If no fields of the record require remapping, these are NULL: */ - TupleDesc tupledesc; /* copy of record's tupdesc */ - TupleRemapInfo **field_remap; /* each field's remap info */ -} RecordRemapInfo; - -struct TupleRemapInfo -{ - TupleRemapClass remapclass; - union - { - ArrayRemapInfo arr; - RangeRemapInfo rng; - RecordRemapInfo rec; - } u; -}; /* * DestReceiver object's private contents * - * queue and tupledesc are pointers to data supplied by DestReceiver's caller. - * The recordhtab and remap info are owned by the DestReceiver and are kept - * in mycontext. tmpcontext is a tuple-lifespan context to hold cruft - * created while traversing each tuple to find record subfields. + * queue is a pointer to data supplied by DestReceiver's caller. */ typedef struct TQueueDestReceiver { DestReceiver pub; /* public fields */ shm_mq_handle *queue; /* shm_mq to send to */ - MemoryContext mycontext; /* context containing TQueueDestReceiver */ - MemoryContext tmpcontext; /* per-tuple context, if needed */ - HTAB *recordhtab; /* table of transmitted typmods, if needed */ - char mode; /* current message mode */ - TupleDesc tupledesc; /* current top-level tuple descriptor */ - TupleRemapInfo **field_remapinfo; /* current top-level remap info */ } TQueueDestReceiver; -/* - * Hash table entries for mapping remote to local typmods. - */ -typedef struct RecordTypmodMap -{ - int32 remotetypmod; /* hash key (must be first!) */ - int32 localtypmod; -} RecordTypmodMap; - /* * TupleQueueReader object's private contents * - * queue and tupledesc are pointers to data supplied by reader's caller. - * The typmodmap and remap info are owned by the TupleQueueReader and - * are kept in mycontext. + * queue is a pointer to data supplied by reader's caller. * * "typedef struct TupleQueueReader TupleQueueReader" is in tqueue.h */ struct TupleQueueReader { shm_mq_handle *queue; /* shm_mq to receive from */ - MemoryContext mycontext; /* context containing TupleQueueReader */ - HTAB *typmodmap; /* RecordTypmodMap hashtable, if needed */ - char mode; /* current message mode */ - TupleDesc tupledesc; /* current top-level tuple descriptor */ - TupleRemapInfo **field_remapinfo; /* current top-level remap info */ }; -/* Local function prototypes */ -static void TQExamine(TQueueDestReceiver *tqueue, - TupleRemapInfo *remapinfo, - Datum value); -static void TQExamineArray(TQueueDestReceiver *tqueue, - ArrayRemapInfo *remapinfo, - Datum value); -static void TQExamineRange(TQueueDestReceiver *tqueue, - RangeRemapInfo *remapinfo, - Datum value); -static void TQExamineRecord(TQueueDestReceiver *tqueue, - RecordRemapInfo *remapinfo, - Datum value); -static void TQSendRecordInfo(TQueueDestReceiver *tqueue, int32 typmod, - TupleDesc tupledesc); -static void TupleQueueHandleControlMessage(TupleQueueReader *reader, - Size nbytes, char *data); -static HeapTuple TupleQueueHandleDataMessage(TupleQueueReader *reader, - Size nbytes, HeapTupleHeader data); -static HeapTuple TQRemapTuple(TupleQueueReader *reader, - TupleDesc tupledesc, - TupleRemapInfo **field_remapinfo, - HeapTuple tuple); -static Datum TQRemap(TupleQueueReader *reader, TupleRemapInfo *remapinfo, - Datum value, bool *changed); -static Datum TQRemapArray(TupleQueueReader *reader, ArrayRemapInfo *remapinfo, - Datum value, bool *changed); -static Datum TQRemapRange(TupleQueueReader *reader, RangeRemapInfo *remapinfo, - Datum value, bool *changed); -static Datum TQRemapRecord(TupleQueueReader *reader, RecordRemapInfo *remapinfo, - Datum value, bool *changed); -static TupleRemapInfo *BuildTupleRemapInfo(Oid typid, MemoryContext mycontext); -static TupleRemapInfo *BuildArrayRemapInfo(Oid elemtypid, - MemoryContext mycontext); -static TupleRemapInfo *BuildRangeRemapInfo(Oid rngtypid, - MemoryContext mycontext); -static TupleRemapInfo **BuildFieldRemapInfo(TupleDesc tupledesc, - MemoryContext mycontext); - - /* * Receive a tuple from a query, and send it to the designated shm_mq. * - * Returns TRUE if successful, FALSE if shm_mq has been detached. + * Returns true if successful, false if shm_mq has been detached. */ static bool tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self) { TQueueDestReceiver *tqueue = (TQueueDestReceiver *) self; - TupleDesc tupledesc = slot->tts_tupleDescriptor; HeapTuple tuple; shm_mq_result result; - /* - * If first time through, compute remapping info for the top-level fields. - * On later calls, if the tupledesc has changed, set up for the new - * tupledesc. (This is a strange test both because the executor really - * shouldn't change the tupledesc, and also because it would be unsafe if - * the old tupledesc could be freed and a new one allocated at the same - * address. But since some very old code in printtup.c uses a similar - * approach, we adopt it here as well.) - * - * Here and elsewhere in this module, when replacing remapping info we - * pfree the top-level object because that's easy, but we don't bother to - * recursively free any substructure. This would lead to query-lifespan - * memory leaks if the mapping info actually changed frequently, but since - * we don't expect that to happen, it doesn't seem worth expending code to - * prevent it. - */ - if (tqueue->tupledesc != tupledesc) - { - /* Is it worth trying to free substructure of the remap tree? */ - if (tqueue->field_remapinfo != NULL) - pfree(tqueue->field_remapinfo); - tqueue->field_remapinfo = BuildFieldRemapInfo(tupledesc, - tqueue->mycontext); - tqueue->tupledesc = tupledesc; - } - - /* - * When, because of the types being transmitted, no record typmod mapping - * can be needed, we can skip a good deal of work. - */ - if (tqueue->field_remapinfo != NULL) - { - TupleRemapInfo **remapinfo = tqueue->field_remapinfo; - int i; - MemoryContext oldcontext = NULL; - - /* Deform the tuple so we can examine fields, if not done already. */ - slot_getallattrs(slot); - - /* Iterate over each attribute and search it for transient typmods. */ - for (i = 0; i < tupledesc->natts; i++) - { - /* Ignore nulls and types that don't need special handling. */ - if (slot->tts_isnull[i] || remapinfo[i] == NULL) - continue; - - /* Switch to temporary memory context to avoid leaking. */ - if (oldcontext == NULL) - { - if (tqueue->tmpcontext == NULL) - tqueue->tmpcontext = - AllocSetContextCreate(tqueue->mycontext, - "tqueue sender temp context", - ALLOCSET_DEFAULT_SIZES); - oldcontext = MemoryContextSwitchTo(tqueue->tmpcontext); - } - - /* Examine the value. */ - TQExamine(tqueue, remapinfo[i], slot->tts_values[i]); - } - - /* If we used the temp context, reset it and restore prior context. */ - if (oldcontext != NULL) - { - MemoryContextSwitchTo(oldcontext); - MemoryContextReset(tqueue->tmpcontext); - } - - /* If we entered control mode, switch back to data mode. */ - if (tqueue->mode != TUPLE_QUEUE_MODE_DATA) - { - tqueue->mode = TUPLE_QUEUE_MODE_DATA; - shm_mq_send(tqueue->queue, sizeof(char), &tqueue->mode, false); - } - } - /* Send the tuple itself. */ tuple = ExecMaterializeSlot(slot); result = shm_mq_send(tqueue->queue, tuple->t_len, tuple->t_data, false); @@ -319,248 +72,6 @@ tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self) return true; } -/* - * Examine the given datum and send any necessary control messages for - * transient record types contained in it. - * - * remapinfo is previously-computed remapping info about the datum's type. - * - * This function just dispatches based on the remap class. - */ -static void -TQExamine(TQueueDestReceiver *tqueue, TupleRemapInfo *remapinfo, Datum value) -{ - /* This is recursive, so it could be driven to stack overflow. */ - check_stack_depth(); - - switch (remapinfo->remapclass) - { - case TQUEUE_REMAP_ARRAY: - TQExamineArray(tqueue, &remapinfo->u.arr, value); - break; - case TQUEUE_REMAP_RANGE: - TQExamineRange(tqueue, &remapinfo->u.rng, value); - break; - case TQUEUE_REMAP_RECORD: - TQExamineRecord(tqueue, &remapinfo->u.rec, value); - break; - } -} - -/* - * Examine a record datum and send any necessary control messages for - * transient record types contained in it. - */ -static void -TQExamineRecord(TQueueDestReceiver *tqueue, RecordRemapInfo *remapinfo, - Datum value) -{ - HeapTupleHeader tup; - Oid typid; - int32 typmod; - TupleDesc tupledesc; - - /* Extract type OID and typmod from tuple. */ - tup = DatumGetHeapTupleHeader(value); - typid = HeapTupleHeaderGetTypeId(tup); - typmod = HeapTupleHeaderGetTypMod(tup); - - /* - * If first time through, or if this isn't the same composite type as last - * time, consider sending a control message, and then look up the - * necessary information for examining the fields. - */ - if (typid != remapinfo->rectypid || typmod != remapinfo->rectypmod) - { - /* Free any old data. */ - if (remapinfo->tupledesc != NULL) - FreeTupleDesc(remapinfo->tupledesc); - /* Is it worth trying to free substructure of the remap tree? */ - if (remapinfo->field_remap != NULL) - pfree(remapinfo->field_remap); - - /* Look up tuple descriptor in typcache. */ - tupledesc = lookup_rowtype_tupdesc(typid, typmod); - - /* - * If this is a transient record type, send the tupledesc in a control - * message. (TQSendRecordInfo is smart enough to do this only once - * per typmod.) - */ - if (typid == RECORDOID) - TQSendRecordInfo(tqueue, typmod, tupledesc); - - /* Figure out whether fields need recursive processing. */ - remapinfo->field_remap = BuildFieldRemapInfo(tupledesc, - tqueue->mycontext); - if (remapinfo->field_remap != NULL) - { - /* - * We need to inspect the record contents, so save a copy of the - * tupdesc. (We could possibly just reference the typcache's - * copy, but then it's problematic when to release the refcount.) - */ - MemoryContext oldcontext = MemoryContextSwitchTo(tqueue->mycontext); - - remapinfo->tupledesc = CreateTupleDescCopy(tupledesc); - MemoryContextSwitchTo(oldcontext); - } - else - { - /* No fields of the record require remapping. */ - remapinfo->tupledesc = NULL; - } - remapinfo->rectypid = typid; - remapinfo->rectypmod = typmod; - - /* Release reference count acquired by lookup_rowtype_tupdesc. */ - DecrTupleDescRefCount(tupledesc); - } - - /* - * If field remapping is required, deform the tuple and examine each - * field. - */ - if (remapinfo->field_remap != NULL) - { - Datum *values; - bool *isnull; - HeapTupleData tdata; - int i; - - /* Deform the tuple so we can check each column within. */ - tupledesc = remapinfo->tupledesc; - values = (Datum *) palloc(tupledesc->natts * sizeof(Datum)); - isnull = (bool *) palloc(tupledesc->natts * sizeof(bool)); - tdata.t_len = HeapTupleHeaderGetDatumLength(tup); - ItemPointerSetInvalid(&(tdata.t_self)); - tdata.t_tableOid = InvalidOid; - tdata.t_data = tup; - heap_deform_tuple(&tdata, tupledesc, values, isnull); - - /* Recursively check each interesting non-NULL attribute. */ - for (i = 0; i < tupledesc->natts; i++) - { - if (!isnull[i] && remapinfo->field_remap[i]) - TQExamine(tqueue, remapinfo->field_remap[i], values[i]); - } - - /* Need not clean up, since we're in a short-lived context. */ - } -} - -/* - * Examine an array datum and send any necessary control messages for - * transient record types contained in it. - */ -static void -TQExamineArray(TQueueDestReceiver *tqueue, ArrayRemapInfo *remapinfo, - Datum value) -{ - ArrayType *arr = DatumGetArrayTypeP(value); - Oid typid = ARR_ELEMTYPE(arr); - Datum *elem_values; - bool *elem_nulls; - int num_elems; - int i; - - /* Deconstruct the array. */ - deconstruct_array(arr, typid, remapinfo->typlen, - remapinfo->typbyval, remapinfo->typalign, - &elem_values, &elem_nulls, &num_elems); - - /* Examine each element. */ - for (i = 0; i < num_elems; i++) - { - if (!elem_nulls[i]) - TQExamine(tqueue, remapinfo->element_remap, elem_values[i]); - } -} - -/* - * Examine a range datum and send any necessary control messages for - * transient record types contained in it. - */ -static void -TQExamineRange(TQueueDestReceiver *tqueue, RangeRemapInfo *remapinfo, - Datum value) -{ - RangeType *range = DatumGetRangeType(value); - RangeBound lower; - RangeBound upper; - bool empty; - - /* Extract the lower and upper bounds. */ - range_deserialize(remapinfo->typcache, range, &lower, &upper, &empty); - - /* Nothing to do for an empty range. */ - if (empty) - return; - - /* Examine each bound, if present. */ - if (!upper.infinite) - TQExamine(tqueue, remapinfo->bound_remap, upper.val); - if (!lower.infinite) - TQExamine(tqueue, remapinfo->bound_remap, lower.val); -} - -/* - * Send tuple descriptor information for a transient typmod, unless we've - * already done so previously. - */ -static void -TQSendRecordInfo(TQueueDestReceiver *tqueue, int32 typmod, TupleDesc tupledesc) -{ - StringInfoData buf; - bool found; - int i; - - /* Initialize hash table if not done yet. */ - if (tqueue->recordhtab == NULL) - { - HASHCTL ctl; - - MemSet(&ctl, 0, sizeof(ctl)); - /* Hash table entries are just typmods */ - ctl.keysize = sizeof(int32); - ctl.entrysize = sizeof(int32); - ctl.hcxt = tqueue->mycontext; - tqueue->recordhtab = hash_create("tqueue sender record type hashtable", - 100, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - } - - /* Have we already seen this record type? If not, must report it. */ - hash_search(tqueue->recordhtab, &typmod, HASH_ENTER, &found); - if (found) - return; - - elog(DEBUG3, "sending tqueue control message for record typmod %d", typmod); - - /* If message queue is in data mode, switch to control mode. */ - if (tqueue->mode != TUPLE_QUEUE_MODE_CONTROL) - { - tqueue->mode = TUPLE_QUEUE_MODE_CONTROL; - shm_mq_send(tqueue->queue, sizeof(char), &tqueue->mode, false); - } - - /* Assemble a control message. */ - initStringInfo(&buf); - appendBinaryStringInfo(&buf, (char *) &typmod, sizeof(int32)); - appendBinaryStringInfo(&buf, (char *) &tupledesc->natts, sizeof(int)); - appendBinaryStringInfo(&buf, (char *) &tupledesc->tdhasoid, sizeof(bool)); - for (i = 0; i < tupledesc->natts; i++) - { - appendBinaryStringInfo(&buf, (char *) tupledesc->attrs[i], - sizeof(FormData_pg_attribute)); - } - - /* Send control message. */ - shm_mq_send(tqueue->queue, buf.len, buf.data, false); - - /* We assume it's OK to leak buf because we're in a short-lived context. */ -} - /* * Prepare to receive tuples from executor. */ @@ -578,7 +89,9 @@ tqueueShutdownReceiver(DestReceiver *self) { TQueueDestReceiver *tqueue = (TQueueDestReceiver *) self; - shm_mq_detach(shm_mq_get_queue(tqueue->queue)); + if (tqueue->queue != NULL) + shm_mq_detach(tqueue->queue); + tqueue->queue = NULL; } /* @@ -589,13 +102,9 @@ tqueueDestroyReceiver(DestReceiver *self) { TQueueDestReceiver *tqueue = (TQueueDestReceiver *) self; - if (tqueue->tmpcontext != NULL) - MemoryContextDelete(tqueue->tmpcontext); - if (tqueue->recordhtab != NULL) - hash_destroy(tqueue->recordhtab); - /* Is it worth trying to free substructure of the remap tree? */ - if (tqueue->field_remapinfo != NULL) - pfree(tqueue->field_remapinfo); + /* We probably already detached from queue, but let's be sure */ + if (tqueue->queue != NULL) + shm_mq_detach(tqueue->queue); pfree(self); } @@ -615,13 +124,6 @@ CreateTupleQueueDestReceiver(shm_mq_handle *handle) self->pub.rDestroy = tqueueDestroyReceiver; self->pub.mydest = DestTupleQueue; self->queue = handle; - self->mycontext = CurrentMemoryContext; - self->tmpcontext = NULL; - self->recordhtab = NULL; - self->mode = TUPLE_QUEUE_MODE_DATA; - /* Top-level tupledesc is not known yet */ - self->tupledesc = NULL; - self->field_remapinfo = NULL; return (DestReceiver *) self; } @@ -630,32 +132,24 @@ CreateTupleQueueDestReceiver(shm_mq_handle *handle) * Create a tuple queue reader. */ TupleQueueReader * -CreateTupleQueueReader(shm_mq_handle *handle, TupleDesc tupledesc) +CreateTupleQueueReader(shm_mq_handle *handle) { TupleQueueReader *reader = palloc0(sizeof(TupleQueueReader)); reader->queue = handle; - reader->mycontext = CurrentMemoryContext; - reader->typmodmap = NULL; - reader->mode = TUPLE_QUEUE_MODE_DATA; - reader->tupledesc = tupledesc; - reader->field_remapinfo = BuildFieldRemapInfo(tupledesc, reader->mycontext); return reader; } /* * Destroy a tuple queue reader. + * + * Note: cleaning up the underlying shm_mq is the caller's responsibility. + * We won't access it here, as it may be detached already. */ void DestroyTupleQueueReader(TupleQueueReader *reader) { - shm_mq_detach(shm_mq_get_queue(reader->queue)); - if (reader->typmodmap != NULL) - hash_destroy(reader->typmodmap); - /* Is it worth trying to free substructure of the remap tree? */ - if (reader->field_remapinfo != NULL) - pfree(reader->field_remapinfo); pfree(reader); } @@ -667,9 +161,8 @@ DestroyTupleQueueReader(TupleQueueReader *reader) * is set to true when there are no remaining tuples and otherwise to false. * * The returned tuple, if any, is allocated in CurrentMemoryContext. - * That should be a short-lived (tuple-lifespan) context, because we are - * pretty cavalier about leaking memory in that context if we have to do - * tuple remapping. + * Note that this routine must not leak memory! (We used to allow that, + * but not any more.) * * Even when shm_mq_receive() returns SHM_MQ_WOULD_BLOCK, this can still * accumulate bytes from a partially-read message, so it's useful to call @@ -678,64 +171,29 @@ DestroyTupleQueueReader(TupleQueueReader *reader) HeapTuple TupleQueueReaderNext(TupleQueueReader *reader, bool nowait, bool *done) { + HeapTupleData htup; shm_mq_result result; + Size nbytes; + void *data; if (done != NULL) *done = false; - for (;;) - { - Size nbytes; - void *data; - - /* Attempt to read a message. */ - result = shm_mq_receive(reader->queue, &nbytes, &data, nowait); - - /* If queue is detached, set *done and return NULL. */ - if (result == SHM_MQ_DETACHED) - { - if (done != NULL) - *done = true; - return NULL; - } - - /* In non-blocking mode, bail out if no message ready yet. */ - if (result == SHM_MQ_WOULD_BLOCK) - return NULL; - Assert(result == SHM_MQ_SUCCESS); + /* Attempt to read a message. */ + result = shm_mq_receive(reader->queue, &nbytes, &data, nowait); - /* - * We got a message (see message spec at top of file). Process it. - */ - if (nbytes == 1) - { - /* Mode switch message. */ - reader->mode = ((char *) data)[0]; - } - else if (reader->mode == TUPLE_QUEUE_MODE_DATA) - { - /* Tuple data. */ - return TupleQueueHandleDataMessage(reader, nbytes, data); - } - else if (reader->mode == TUPLE_QUEUE_MODE_CONTROL) - { - /* Control message, describing a transient record type. */ - TupleQueueHandleControlMessage(reader, nbytes, data); - } - else - elog(ERROR, "unrecognized tqueue mode: %d", (int) reader->mode); + /* If queue is detached, set *done and return NULL. */ + if (result == SHM_MQ_DETACHED) + { + if (done != NULL) + *done = true; + return NULL; } -} -/* - * Handle a data message - that is, a tuple - from the remote side. - */ -static HeapTuple -TupleQueueHandleDataMessage(TupleQueueReader *reader, - Size nbytes, - HeapTupleHeader data) -{ - HeapTupleData htup; + /* In non-blocking mode, bail out if no message ready yet. */ + if (result == SHM_MQ_WOULD_BLOCK) + return NULL; + Assert(result == SHM_MQ_SUCCESS); /* * Set up a dummy HeapTupleData pointing to the data from the shm_mq @@ -746,531 +204,5 @@ TupleQueueHandleDataMessage(TupleQueueReader *reader, htup.t_len = nbytes; htup.t_data = data; - /* - * Either just copy the data into a regular palloc'd tuple, or remap it, - * as required. - */ - return TQRemapTuple(reader, - reader->tupledesc, - reader->field_remapinfo, - &htup); -} - -/* - * Copy the given tuple, remapping any transient typmods contained in it. - */ -static HeapTuple -TQRemapTuple(TupleQueueReader *reader, - TupleDesc tupledesc, - TupleRemapInfo **field_remapinfo, - HeapTuple tuple) -{ - Datum *values; - bool *isnull; - bool changed = false; - int i; - - /* - * If no remapping is necessary, just copy the tuple into a single - * palloc'd chunk, as caller will expect. - */ - if (field_remapinfo == NULL) - return heap_copytuple(tuple); - - /* Deform tuple so we can remap record typmods for individual attrs. */ - values = (Datum *) palloc(tupledesc->natts * sizeof(Datum)); - isnull = (bool *) palloc(tupledesc->natts * sizeof(bool)); - heap_deform_tuple(tuple, tupledesc, values, isnull); - - /* Recursively process each interesting non-NULL attribute. */ - for (i = 0; i < tupledesc->natts; i++) - { - if (isnull[i] || field_remapinfo[i] == NULL) - continue; - values[i] = TQRemap(reader, field_remapinfo[i], values[i], &changed); - } - - /* Reconstruct the modified tuple, if anything was modified. */ - if (changed) - return heap_form_tuple(tupledesc, values, isnull); - else - return heap_copytuple(tuple); -} - -/* - * Process the given datum and replace any transient record typmods - * contained in it. Set *changed to TRUE if we actually changed the datum. - * - * remapinfo is previously-computed remapping info about the datum's type. - * - * This function just dispatches based on the remap class. - */ -static Datum -TQRemap(TupleQueueReader *reader, TupleRemapInfo *remapinfo, - Datum value, bool *changed) -{ - /* This is recursive, so it could be driven to stack overflow. */ - check_stack_depth(); - - switch (remapinfo->remapclass) - { - case TQUEUE_REMAP_ARRAY: - return TQRemapArray(reader, &remapinfo->u.arr, value, changed); - - case TQUEUE_REMAP_RANGE: - return TQRemapRange(reader, &remapinfo->u.rng, value, changed); - - case TQUEUE_REMAP_RECORD: - return TQRemapRecord(reader, &remapinfo->u.rec, value, changed); - } - - elog(ERROR, "unrecognized tqueue remap class: %d", - (int) remapinfo->remapclass); - return (Datum) 0; -} - -/* - * Process the given array datum and replace any transient record typmods - * contained in it. Set *changed to TRUE if we actually changed the datum. - */ -static Datum -TQRemapArray(TupleQueueReader *reader, ArrayRemapInfo *remapinfo, - Datum value, bool *changed) -{ - ArrayType *arr = DatumGetArrayTypeP(value); - Oid typid = ARR_ELEMTYPE(arr); - bool element_changed = false; - Datum *elem_values; - bool *elem_nulls; - int num_elems; - int i; - - /* Deconstruct the array. */ - deconstruct_array(arr, typid, remapinfo->typlen, - remapinfo->typbyval, remapinfo->typalign, - &elem_values, &elem_nulls, &num_elems); - - /* Remap each element. */ - for (i = 0; i < num_elems; i++) - { - if (!elem_nulls[i]) - elem_values[i] = TQRemap(reader, - remapinfo->element_remap, - elem_values[i], - &element_changed); - } - - if (element_changed) - { - /* Reconstruct and return the array. */ - *changed = true; - arr = construct_md_array(elem_values, elem_nulls, - ARR_NDIM(arr), ARR_DIMS(arr), ARR_LBOUND(arr), - typid, remapinfo->typlen, - remapinfo->typbyval, remapinfo->typalign); - return PointerGetDatum(arr); - } - - /* Else just return the value as-is. */ - return value; -} - -/* - * Process the given range datum and replace any transient record typmods - * contained in it. Set *changed to TRUE if we actually changed the datum. - */ -static Datum -TQRemapRange(TupleQueueReader *reader, RangeRemapInfo *remapinfo, - Datum value, bool *changed) -{ - RangeType *range = DatumGetRangeType(value); - bool bound_changed = false; - RangeBound lower; - RangeBound upper; - bool empty; - - /* Extract the lower and upper bounds. */ - range_deserialize(remapinfo->typcache, range, &lower, &upper, &empty); - - /* Nothing to do for an empty range. */ - if (empty) - return value; - - /* Remap each bound, if present. */ - if (!upper.infinite) - upper.val = TQRemap(reader, remapinfo->bound_remap, - upper.val, &bound_changed); - if (!lower.infinite) - lower.val = TQRemap(reader, remapinfo->bound_remap, - lower.val, &bound_changed); - - if (bound_changed) - { - /* Reserialize. */ - *changed = true; - range = range_serialize(remapinfo->typcache, &lower, &upper, empty); - return RangeTypeGetDatum(range); - } - - /* Else just return the value as-is. */ - return value; -} - -/* - * Process the given record datum and replace any transient record typmods - * contained in it. Set *changed to TRUE if we actually changed the datum. - */ -static Datum -TQRemapRecord(TupleQueueReader *reader, RecordRemapInfo *remapinfo, - Datum value, bool *changed) -{ - HeapTupleHeader tup; - Oid typid; - int32 typmod; - bool changed_typmod; - TupleDesc tupledesc; - - /* Extract type OID and typmod from tuple. */ - tup = DatumGetHeapTupleHeader(value); - typid = HeapTupleHeaderGetTypeId(tup); - typmod = HeapTupleHeaderGetTypMod(tup); - - /* - * If first time through, or if this isn't the same composite type as last - * time, identify the required typmod mapping, and then look up the - * necessary information for processing the fields. - */ - if (typid != remapinfo->rectypid || typmod != remapinfo->rectypmod) - { - /* Free any old data. */ - if (remapinfo->tupledesc != NULL) - FreeTupleDesc(remapinfo->tupledesc); - /* Is it worth trying to free substructure of the remap tree? */ - if (remapinfo->field_remap != NULL) - pfree(remapinfo->field_remap); - - /* If transient record type, look up matching local typmod. */ - if (typid == RECORDOID) - { - RecordTypmodMap *mapent; - - Assert(reader->typmodmap != NULL); - mapent = hash_search(reader->typmodmap, &typmod, - HASH_FIND, NULL); - if (mapent == NULL) - elog(ERROR, "tqueue received unrecognized remote typmod %d", - typmod); - remapinfo->localtypmod = mapent->localtypmod; - } - else - remapinfo->localtypmod = -1; - - /* Look up tuple descriptor in typcache. */ - tupledesc = lookup_rowtype_tupdesc(typid, remapinfo->localtypmod); - - /* Figure out whether fields need recursive processing. */ - remapinfo->field_remap = BuildFieldRemapInfo(tupledesc, - reader->mycontext); - if (remapinfo->field_remap != NULL) - { - /* - * We need to inspect the record contents, so save a copy of the - * tupdesc. (We could possibly just reference the typcache's - * copy, but then it's problematic when to release the refcount.) - */ - MemoryContext oldcontext = MemoryContextSwitchTo(reader->mycontext); - - remapinfo->tupledesc = CreateTupleDescCopy(tupledesc); - MemoryContextSwitchTo(oldcontext); - } - else - { - /* No fields of the record require remapping. */ - remapinfo->tupledesc = NULL; - } - remapinfo->rectypid = typid; - remapinfo->rectypmod = typmod; - - /* Release reference count acquired by lookup_rowtype_tupdesc. */ - DecrTupleDescRefCount(tupledesc); - } - - /* If transient record, replace remote typmod with local typmod. */ - if (typid == RECORDOID && typmod != remapinfo->localtypmod) - { - typmod = remapinfo->localtypmod; - changed_typmod = true; - } - else - changed_typmod = false; - - /* - * If we need to change the typmod, or if there are any potentially - * remappable fields, replace the tuple. - */ - if (changed_typmod || remapinfo->field_remap != NULL) - { - HeapTupleData htup; - HeapTuple atup; - - /* For now, assume we always need to change the tuple in this case. */ - *changed = true; - - /* Copy tuple, possibly remapping contained fields. */ - ItemPointerSetInvalid(&htup.t_self); - htup.t_tableOid = InvalidOid; - htup.t_len = HeapTupleHeaderGetDatumLength(tup); - htup.t_data = tup; - atup = TQRemapTuple(reader, - remapinfo->tupledesc, - remapinfo->field_remap, - &htup); - - /* Apply the correct labeling for a local Datum. */ - HeapTupleHeaderSetTypeId(atup->t_data, typid); - HeapTupleHeaderSetTypMod(atup->t_data, typmod); - HeapTupleHeaderSetDatumLength(atup->t_data, htup.t_len); - - /* And return the results. */ - return HeapTupleHeaderGetDatum(atup->t_data); - } - - /* Else just return the value as-is. */ - return value; -} - -/* - * Handle a control message from the tuple queue reader. - * - * Control messages are sent when the remote side is sending tuples that - * contain transient record types. We need to arrange to bless those - * record types locally and translate between remote and local typmods. - */ -static void -TupleQueueHandleControlMessage(TupleQueueReader *reader, Size nbytes, - char *data) -{ - int32 remotetypmod; - int natts; - bool hasoid; - Size offset = 0; - Form_pg_attribute *attrs; - TupleDesc tupledesc; - RecordTypmodMap *mapent; - bool found; - int i; - - /* Extract remote typmod. */ - memcpy(&remotetypmod, &data[offset], sizeof(int32)); - offset += sizeof(int32); - - /* Extract attribute count. */ - memcpy(&natts, &data[offset], sizeof(int)); - offset += sizeof(int); - - /* Extract hasoid flag. */ - memcpy(&hasoid, &data[offset], sizeof(bool)); - offset += sizeof(bool); - - /* Extract attribute details. The tupledesc made here is just transient. */ - attrs = palloc(natts * sizeof(Form_pg_attribute)); - for (i = 0; i < natts; i++) - { - attrs[i] = palloc(sizeof(FormData_pg_attribute)); - memcpy(attrs[i], &data[offset], sizeof(FormData_pg_attribute)); - offset += sizeof(FormData_pg_attribute); - } - - /* We should have read the whole message. */ - Assert(offset == nbytes); - - /* Construct TupleDesc, and assign a local typmod. */ - tupledesc = CreateTupleDesc(natts, hasoid, attrs); - tupledesc = BlessTupleDesc(tupledesc); - - /* Create mapping hashtable if it doesn't exist already. */ - if (reader->typmodmap == NULL) - { - HASHCTL ctl; - - MemSet(&ctl, 0, sizeof(ctl)); - ctl.keysize = sizeof(int32); - ctl.entrysize = sizeof(RecordTypmodMap); - ctl.hcxt = reader->mycontext; - reader->typmodmap = hash_create("tqueue receiver record type hashtable", - 100, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - } - - /* Create map entry. */ - mapent = hash_search(reader->typmodmap, &remotetypmod, HASH_ENTER, - &found); - if (found) - elog(ERROR, "duplicate tqueue control message for typmod %d", - remotetypmod); - mapent->localtypmod = tupledesc->tdtypmod; - - elog(DEBUG3, "tqueue mapping remote typmod %d to local typmod %d", - remotetypmod, mapent->localtypmod); -} - -/* - * Build remap info for the specified data type, storing it in mycontext. - * Returns NULL if neither the type nor any subtype could require remapping. - */ -static TupleRemapInfo * -BuildTupleRemapInfo(Oid typid, MemoryContext mycontext) -{ - HeapTuple tup; - Form_pg_type typ; - - /* This is recursive, so it could be driven to stack overflow. */ - check_stack_depth(); - -restart: - tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid)); - if (!HeapTupleIsValid(tup)) - elog(ERROR, "cache lookup failed for type %u", typid); - typ = (Form_pg_type) GETSTRUCT(tup); - - /* Look through domains to underlying base type. */ - if (typ->typtype == TYPTYPE_DOMAIN) - { - typid = typ->typbasetype; - ReleaseSysCache(tup); - goto restart; - } - - /* If it's a true array type, deal with it that way. */ - if (OidIsValid(typ->typelem) && typ->typlen == -1) - { - typid = typ->typelem; - ReleaseSysCache(tup); - return BuildArrayRemapInfo(typid, mycontext); - } - - /* Similarly, deal with ranges appropriately. */ - if (typ->typtype == TYPTYPE_RANGE) - { - ReleaseSysCache(tup); - return BuildRangeRemapInfo(typid, mycontext); - } - - /* - * If it's a composite type (including RECORD), set up for remapping. We - * don't attempt to determine the status of subfields here, since we do - * not have enough information yet; just mark everything invalid. - */ - if (typ->typtype == TYPTYPE_COMPOSITE || typid == RECORDOID) - { - TupleRemapInfo *remapinfo; - - remapinfo = (TupleRemapInfo *) - MemoryContextAlloc(mycontext, sizeof(TupleRemapInfo)); - remapinfo->remapclass = TQUEUE_REMAP_RECORD; - remapinfo->u.rec.rectypid = InvalidOid; - remapinfo->u.rec.rectypmod = -1; - remapinfo->u.rec.localtypmod = -1; - remapinfo->u.rec.tupledesc = NULL; - remapinfo->u.rec.field_remap = NULL; - ReleaseSysCache(tup); - return remapinfo; - } - - /* Nothing else can possibly need remapping attention. */ - ReleaseSysCache(tup); - return NULL; -} - -static TupleRemapInfo * -BuildArrayRemapInfo(Oid elemtypid, MemoryContext mycontext) -{ - TupleRemapInfo *remapinfo; - TupleRemapInfo *element_remapinfo; - - /* See if element type requires remapping. */ - element_remapinfo = BuildTupleRemapInfo(elemtypid, mycontext); - /* If not, the array doesn't either. */ - if (element_remapinfo == NULL) - return NULL; - /* OK, set up to remap the array. */ - remapinfo = (TupleRemapInfo *) - MemoryContextAlloc(mycontext, sizeof(TupleRemapInfo)); - remapinfo->remapclass = TQUEUE_REMAP_ARRAY; - get_typlenbyvalalign(elemtypid, - &remapinfo->u.arr.typlen, - &remapinfo->u.arr.typbyval, - &remapinfo->u.arr.typalign); - remapinfo->u.arr.element_remap = element_remapinfo; - return remapinfo; -} - -static TupleRemapInfo * -BuildRangeRemapInfo(Oid rngtypid, MemoryContext mycontext) -{ - TupleRemapInfo *remapinfo; - TupleRemapInfo *bound_remapinfo; - TypeCacheEntry *typcache; - - /* - * Get range info from the typcache. We assume this pointer will stay - * valid for the duration of the query. - */ - typcache = lookup_type_cache(rngtypid, TYPECACHE_RANGE_INFO); - if (typcache->rngelemtype == NULL) - elog(ERROR, "type %u is not a range type", rngtypid); - - /* See if range bound type requires remapping. */ - bound_remapinfo = BuildTupleRemapInfo(typcache->rngelemtype->type_id, - mycontext); - /* If not, the range doesn't either. */ - if (bound_remapinfo == NULL) - return NULL; - /* OK, set up to remap the range. */ - remapinfo = (TupleRemapInfo *) - MemoryContextAlloc(mycontext, sizeof(TupleRemapInfo)); - remapinfo->remapclass = TQUEUE_REMAP_RANGE; - remapinfo->u.rng.typcache = typcache; - remapinfo->u.rng.bound_remap = bound_remapinfo; - return remapinfo; -} - -/* - * Build remap info for fields of the type described by the given tupdesc. - * Returns an array of TupleRemapInfo pointers, or NULL if no field - * requires remapping. Data is allocated in mycontext. - */ -static TupleRemapInfo ** -BuildFieldRemapInfo(TupleDesc tupledesc, MemoryContext mycontext) -{ - TupleRemapInfo **remapinfo; - bool noop = true; - int i; - - /* Recursively determine the remapping status of each field. */ - remapinfo = (TupleRemapInfo **) - MemoryContextAlloc(mycontext, - tupledesc->natts * sizeof(TupleRemapInfo *)); - for (i = 0; i < tupledesc->natts; i++) - { - Form_pg_attribute attr = tupledesc->attrs[i]; - - if (attr->attisdropped) - { - remapinfo[i] = NULL; - continue; - } - remapinfo[i] = BuildTupleRemapInfo(attr->atttypid, mycontext); - if (remapinfo[i] != NULL) - noop = false; - } - - /* If no fields require remapping, report that by returning NULL. */ - if (noop) - { - pfree(remapinfo); - remapinfo = NULL; - } - - return remapinfo; + return heap_copytuple(&htup); } diff --git a/src/backend/executor/tstoreReceiver.c b/src/backend/executor/tstoreReceiver.c index eda38b1de1..d02ca3afd1 100644 --- a/src/backend/executor/tstoreReceiver.c +++ b/src/backend/executor/tstoreReceiver.c @@ -9,7 +9,7 @@ * data even if the underlying table is dropped. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -49,7 +49,6 @@ tstoreStartupReceiver(DestReceiver *self, int operation, TupleDesc typeinfo) { TStoreState *myState = (TStoreState *) self; bool needtoast = false; - Form_pg_attribute *attrs = typeinfo->attrs; int natts = typeinfo->natts; int i; @@ -58,9 +57,11 @@ tstoreStartupReceiver(DestReceiver *self, int operation, TupleDesc typeinfo) { for (i = 0; i < natts; i++) { - if (attrs[i]->attisdropped) + Form_pg_attribute attr = TupleDescAttr(typeinfo, i); + + if (attr->attisdropped) continue; - if (attrs[i]->attlen == -1) + if (attr->attlen == -1) { needtoast = true; break; @@ -109,7 +110,6 @@ tstoreReceiveSlot_detoast(TupleTableSlot *slot, DestReceiver *self) { TStoreState *myState = (TStoreState *) self; TupleDesc typeinfo = slot->tts_tupleDescriptor; - Form_pg_attribute *attrs = typeinfo->attrs; int natts = typeinfo->natts; int nfree; int i; @@ -127,10 +127,9 @@ tstoreReceiveSlot_detoast(TupleTableSlot *slot, DestReceiver *self) for (i = 0; i < natts; i++) { Datum val = slot->tts_values[i]; + Form_pg_attribute attr = TupleDescAttr(typeinfo, i); - if (!attrs[i]->attisdropped && - attrs[i]->attlen == -1 && - !slot->tts_isnull[i]) + if (!attr->attisdropped && attr->attlen == -1 && !slot->tts_isnull[i]) { if (VARATT_IS_EXTERNAL(DatumGetPointer(val))) { diff --git a/src/backend/foreign/foreign.c b/src/backend/foreign/foreign.c index a113bf540d..a0bcc042ce 100644 --- a/src/backend/foreign/foreign.c +++ b/src/backend/foreign/foreign.c @@ -3,7 +3,7 @@ * foreign.c * support for foreign-data wrappers, servers and user mappings. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/foreign/foreign.c @@ -428,7 +428,7 @@ GetFdwRoutineForRelation(Relation relation, bool makecopy) /* * IsImportableForeignTable - filter table names for IMPORT FOREIGN SCHEMA * - * Returns TRUE if given table name should be imported according to the + * Returns true if given table name should be imported according to the * statement's import filter options. */ bool @@ -560,7 +560,7 @@ struct ConnectionOption * * The list is small - don't bother with bsearch if it stays so. */ -static struct ConnectionOption libpq_conninfo_options[] = { +static const struct ConnectionOption libpq_conninfo_options[] = { {"authtype", ForeignServerRelationId}, {"service", ForeignServerRelationId}, {"user", UserMappingRelationId}, @@ -587,7 +587,7 @@ static struct ConnectionOption libpq_conninfo_options[] = { static bool is_conninfo_option(const char *option, Oid context) { - struct ConnectionOption *opt; + const struct ConnectionOption *opt; for (opt = libpq_conninfo_options; opt->optname; opt++) if (context == opt->optcontext && strcmp(opt->optname, option) == 0) @@ -622,7 +622,7 @@ postgresql_fdw_validator(PG_FUNCTION_ARGS) if (!is_conninfo_option(def->defname, catalog)) { - struct ConnectionOption *opt; + const struct ConnectionOption *opt; StringInfoData buf; /* @@ -712,7 +712,7 @@ get_foreign_server_oid(const char *servername, bool missing_ok) * path list in RelOptInfo is anyway sorted by total cost we are likely to * choose the most efficient path, which is all for the best. */ -extern Path * +Path * GetExistingLocalJoinPath(RelOptInfo *joinrel) { ListCell *lc; diff --git a/src/backend/jit/Makefile b/src/backend/jit/Makefile new file mode 100644 index 0000000000..cdb9009ec1 --- /dev/null +++ b/src/backend/jit/Makefile @@ -0,0 +1,22 @@ +#------------------------------------------------------------------------- +# +# Makefile-- +# Makefile for JIT code that's provider independent. +# +# Note that the LLVM JIT provider is recursed into by src/Makefile, +# not from here. +# +# IDENTIFICATION +# src/backend/jit/Makefile +# +#------------------------------------------------------------------------- + +subdir = src/backend/jit +top_builddir = ../../.. +include $(top_builddir)/src/Makefile.global + +override CPPFLAGS += -DDLSUFFIX=\"$(DLSUFFIX)\" + +OBJS = jit.o + +include $(top_srcdir)/src/backend/common.mk diff --git a/src/backend/jit/README b/src/backend/jit/README new file mode 100644 index 0000000000..e2fac8558e --- /dev/null +++ b/src/backend/jit/README @@ -0,0 +1,295 @@ +What is Just-in-Time Compilation? +================================= + +Just-in-Time compilation (JIT) is the process of turning some form of +interpreted program evaluation into a native program, and doing so at +runtime. + +For example, instead of using a facility that can evaluate arbitrary +SQL expressions to evaluate an SQL predicate like WHERE a.col = 3, it +is possible to generate a function than can be natively executed by +the CPU that just handles that expression, yielding a speedup. + +That this is done at query execution time, possibly even only in cases +where the relevant task is done a number of times, makes it JIT, +rather than ahead-of-time (AOT). Given the way JIT compilation is used +in PostgreSQL, the lines between interpretation, AOT and JIT are +somewhat blurry. + +Note that the interpreted program turned into a native program does +not necessarily have to be a program in the classical sense. E.g. it +is highly beneficial to JIT compile tuple deforming into a native +function just handling a specific type of table, despite tuple +deforming not commonly being understood as a "program". + + +Why JIT? +======== + +Parts of PostgreSQL are commonly bottlenecked by comparatively small +pieces of CPU intensive code. In a number of cases that is because the +relevant code has to be very generic (e.g. handling arbitrary SQL +level expressions, over arbitrary tables, with arbitrary extensions +installed). This often leads to a large number of indirect jumps and +unpredictable branches, and generally a high number of instructions +for a given task. E.g. just evaluating an expression comparing a +column in a database to an integer ends up needing several hundred +cycles. + +By generating native code large numbers of indirect jumps can be +removed by either making them into direct branches (e.g. replacing the +indirect call to an SQL operator's implementation with a direct call +to that function), or by removing it entirely (e.g. by evaluating the +branch at compile time because the input is constant). Similarly a lot +of branches can be entirely removed (e.g. by again evaluating the +branch at compile time because the input is constant). The latter is +particularly beneficial for removing branches during tuple deforming. + + +How to JIT +========== + +PostgreSQL, by default, uses LLVM to perform JIT. LLVM was chosen +because it is developed by several large corporations and therefore +unlikely to be discontinued, because it has a license compatible with +PostgreSQL, and because its IR can be generated from C using the Clang +compiler. + + +Shared Library Separation +------------------------- + +To avoid the main PostgreSQL binary directly depending on LLVM, which +would prevent LLVM support being independently installed by OS package +managers, the LLVM dependent code is located in a shared library that +is loaded on-demand. + +An additional benefit of doing so is that it is relatively easy to +evaluate JIT compilation that does not use LLVM, by changing out the +shared library used to provide JIT compilation. + +To achieve this, code intending to perform JIT (e.g. expression evaluation) +calls an LLVM independent wrapper located in jit.c to do so. If the +shared library providing JIT support can be loaded (i.e. PostgreSQL was +compiled with LLVM support and the shared library is installed), the task +of JIT compiling an expression gets handed off to the shared library. This +obviously requires that the function in jit.c is allowed to fail in case +no JIT provider can be loaded. + +Which shared library is loaded is determined by the jit_provider GUC, +defaulting to "llvmjit". + +Cloistering code performing JIT into a shared library unfortunately +also means that code doing JIT compilation for various parts of code +has to be located separately from the code doing so without +JIT. E.g. the JIT version of execExprInterp.c is located in jit/llvm/ +rather than executor/. + + +JIT Context +----------- + +For performance and convenience reasons it is useful to allow JITed +functions to be emitted and deallocated together. It is e.g. very +common to create a number of functions at query initialization time, +use them during query execution, and then deallocate all of them +together at the end of the query. + +Lifetimes of JITed functions are managed via JITContext. Exactly one +such context should be created for work in which all created JITed +function should have the same lifetime. E.g. there's exactly one +JITContext for each query executed, in the query's EState. Only the +release of an JITContext is exposed to the provider independent +facility, as the creation of one is done on-demand by the JIT +implementations. + +Emitting individual functions separately is more expensive than +emitting several functions at once, and emitting them together can +provide additional optimization opportunities. To facilitate that, the +LLVM provider separates defining functions from optimizing and +emitting functions in an executable manner. + +Creating functions into the current mutable module (a module +essentially is LLVM's equivalent of a translation unit in C) is done +using + extern LLVMModuleRef llvm_mutable_module(LLVMJitContext *context); +in which it then can emit as much code using the LLVM APIs as it +wants. Whenever a function actually needs to be called + extern void *llvm_get_function(LLVMJitContext *context, const char *funcname); +returns a pointer to it. + +E.g. in the expression evaluation case this setup allows most +functions in a query to be emitted during ExecInitNode(), delaying the +function emission to the time the first time a function is actually +used. + + +Error Handling +-------------- + +There are two aspects of error handling. Firstly, generated (LLVM IR) +and emitted functions (mmap()ed segments) need to be cleaned up both +after a successful query execution and after an error. This is done by +registering each created JITContext with the current resource owner, +and cleaning it up on error / end of transaction. If it is desirable +to release resources earlier, jit_release_context() can be used. + +The second, less pretty, aspect of error handling is OOM handling +inside LLVM itself. The above resowner based mechanism takes care of +cleaning up emitted code upon ERROR, but there's also the chance that +LLVM itself runs out of memory. LLVM by default does *not* use any C++ +exceptions. Its allocations are primarily funneled through the +standard "new" handlers, and some direct use of malloc() and +mmap(). For the former a 'new handler' exists: +http://en.cppreference.com/w/cpp/memory/new/set_new_handler +For the latter LLVM provides callbacks that get called upon failure +(unfortunately mmap() failures are treated as fatal rather than OOM errors). +What we've chosen to do for now is have two functions that LLVM using code +must use: +extern void llvm_enter_fatal_on_oom(void); +extern void llvm_leave_fatal_on_oom(void); +before interacting with LLVM code. + +When a libstdc++ new or LLVM error occurs, the handlers set up by the +above functions trigger a FATAL error. We have to use FATAL rather +than ERROR, as we *cannot* reliably throw ERROR inside a foreign +library without risking corrupting its internal state. + +Users of the above sections do *not* have to use PG_TRY/CATCH blocks, +the handlers instead are reset on toplevel sigsetjmp() level. + +Using a relatively small enter/leave protected section of code, rather +than setting up these handlers globally, avoids negative interactions +with extensions that might use C++ such as PostGIS. As LLVM code +generation should never execute arbitrary code, just setting these +handlers temporarily ought to suffice. + + +Type Synchronization +-------------------- + +To be able to generate code that can perform tasks done by "interpreted" +PostgreSQL, it obviously is required that code generation knows about at +least a few PostgreSQL types. While it is possible to inform LLVM about +type definitions by recreating them manually in C code, that is failure +prone and labor intensive. + +Instead there is one small file (llvmjit_types.c) which references each of +the types required for JITing. That file is translated to bitcode at +compile time, and loaded when LLVM is initialized in a backend. + +That works very well to synchronize the type definition, but unfortunately +it does *not* synchronize offsets as the IR level representation doesn't +know field names. Instead, required offsets are maintained as defines in +the original struct definition, like so: +#define FIELDNO_TUPLETABLESLOT_NVALID 9 + int tts_nvalid; /* # of valid values in tts_values */ +While that still needs to be defined, it's only required for a +relatively small number of fields, and it's bunched together with the +struct definition, so it's easily kept synchronized. + + +Inlining +-------- + +One big advantage of JITing expressions is that it can significantly +reduce the overhead of PostgreSQL's extensible function/operator +mechanism, by inlining the body of called functions/operators. + +It obviously is undesirable to maintain a second implementation of +commonly used functions, just for inlining purposes. Instead we take +advantage of the fact that the Clang compiler can emit LLVM IR. + +The ability to do so allows us to get the LLVM IR for all operators +(e.g. int8eq, float8pl etc), without maintaining two copies. These +bitcode files get installed into the server's + $pkglibdir/bitcode/postgres/ +Using existing LLVM functionality (for parallel LTO compilation), +additionally an index is over these is stored to +$pkglibdir/bitcode/postgres.index.bc + +Similarly extensions can install code into + $pkglibdir/bitcode/[extension]/ +accompanied by + $pkglibdir/bitcode/[extension].index.bc + +just alongside the actual library. An extension's index will be used +to look up symbols when located in the corresponding shared +library. Symbols that are used inside the extension, when inlined, +will be first looked up in the main binary and then the extension's. + + +Caching +------- + +Currently it is not yet possible to cache generated functions, even +though that'd be desirable from a performance point of view. The +problem is that the generated functions commonly contain pointers into +per-execution memory. The expression evaluation machinery needs to +be redesigned a bit to avoid that. Basically all per-execution memory +needs to be referenced as an offset to one block of memory stored in +an ExprState, rather than absolute pointers into memory. + +Once that is addressed, adding an LRU cache that's keyed by the +generated LLVM IR will allow to use optimized functions even for +faster queries. + +A longer term project is to move expression compilation to the planner +stage, allowing e.g. to tie compiled expressions to prepared +statements. + +An even more advanced approach would be to use JIT with few +optimizations initially, and build an optimized version in the +background. But that's even further off. + + +What to JIT +=========== + +Currently expression evaluation and tuple deforming are JITed. Those +were chosen because they commonly are major CPU bottlenecks in +analytics queries, but are by no means the only potentially beneficial cases. + +For JITing to be beneficial a piece of code first and foremost has to +be a CPU bottleneck. But also importantly, JITing can only be +beneficial if overhead can be removed by doing so. E.g. in the tuple +deforming case the knowledge about the number of columns and their +types can remove a significant number of branches, and in the +expression evaluation case a lot of indirect jumps/calls can be +removed. If neither of these is the case, JITing is a waste of +resources. + +Future avenues for JITing are tuple sorting, COPY parsing/output +generation, and later compiling larger parts of queries. + + +When to JIT +=========== + +Currently there are a number of GUCs that influence JITing: + +- jit_above_cost = -1, 0-DBL_MAX - all queries with a higher total cost + get JITed, *without* optimization (expensive part), corresponding to + -O0. This commonly already results in significant speedups if + expression/deforming is a bottleneck (removing dynamic branches + mostly). +- jit_optimize_above_cost = -1, 0-DBL_MAX - all queries with a higher total cost + get JITed, *with* optimization (expensive part). +- jit_inline_above_cost = -1, 0-DBL_MAX - inlining is tried if query has + higher cost. + +Whenever a query's total cost is above these limits, JITing is +performed. + +Alternative costing models, e.g. by generating separate paths for +parts of a query with lower cpu_* costs, are also a possibility, but +it's doubtful the overhead of doing so is sufficient. Another +alternative would be to count the number of times individual +expressions are estimated to be evaluated, and perform JITing of these +individual expressions. + +The obvious seeming approach of JITing expressions individually after +a number of execution turns out not to work too well. Primarily +because emitting many small functions individually has significant +overhead. Secondarily because the time until JITing occurs causes +relative slowdowns that eat into the gain of JIT compilation. diff --git a/src/backend/jit/jit.c b/src/backend/jit/jit.c new file mode 100644 index 0000000000..5d1f2e57be --- /dev/null +++ b/src/backend/jit/jit.c @@ -0,0 +1,211 @@ +/*------------------------------------------------------------------------- + * + * jit.c + * Provider independent JIT infrastructure. + * + * Code related to loading JIT providers, redirecting calls into JIT providers + * and error handling. No code specific to a specific JIT implementation + * should end up here. + * + * + * Copyright (c) 2016-2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/backend/jit/jit.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + + +#include +#include +#include + + +#include "fmgr.h" +#include "executor/execExpr.h" +#include "jit/jit.h" +#include "miscadmin.h" +#include "utils/resowner_private.h" +#include "utils/fmgrprotos.h" + + +/* GUCs */ +bool jit_enabled = true; +char *jit_provider = NULL; +bool jit_debugging_support = false; +bool jit_dump_bitcode = false; +bool jit_expressions = true; +bool jit_profiling_support = false; +bool jit_tuple_deforming = true; +double jit_above_cost = 100000; +double jit_inline_above_cost = 500000; +double jit_optimize_above_cost = 500000; + +static JitProviderCallbacks provider; +static bool provider_successfully_loaded = false; +static bool provider_failed_loading = false; + + +static bool provider_init(void); +static bool file_exists(const char *name); + + +/* + * SQL level function returning whether JIT is available in the current + * backend. Will attempt to load JIT provider if necessary. + */ +Datum +pg_jit_available(PG_FUNCTION_ARGS) +{ + PG_RETURN_BOOL(provider_init()); +} + + +/* + * Return whether a JIT provider has successfully been loaded, caching the + * result. + */ +static bool +provider_init(void) +{ + char path[MAXPGPATH]; + JitProviderInit init; + + /* don't even try to load if not enabled */ + if (!jit_enabled) + return false; + + /* + * Don't retry loading after failing - attempting to load JIT provider + * isn't cheap. + */ + if (provider_failed_loading) + return false; + if (provider_successfully_loaded) + return true; + + /* + * Check whether shared library exists. We do that check before actually + * attempting to load the shared library (via load_external_function()), + * because that'd error out in case the shlib isn't available. + */ + snprintf(path, MAXPGPATH, "%s/%s%s", pkglib_path, jit_provider, DLSUFFIX); + elog(DEBUG1, "probing availability of JIT provider at %s", path); + if (!file_exists(path)) + { + elog(DEBUG1, + "provider not available, disabling JIT for current session"); + provider_failed_loading = true; + return false; + } + + /* + * If loading functions fails, signal failure. We do so because + * load_external_function() might error out despite the above check if + * e.g. the library's dependencies aren't installed. We want to signal + * ERROR in that case, so the user is notified, but we don't want to + * continually retry. + */ + provider_failed_loading = true; + + /* and initialize */ + init = (JitProviderInit) + load_external_function(path, "_PG_jit_provider_init", true, NULL); + init(&provider); + + provider_successfully_loaded = true; + provider_failed_loading = false; + + elog(DEBUG1, "successfully loaded JIT provider in current session"); + + return true; +} + +/* + * Reset JIT provider's error handling. This'll be called after an error has + * been thrown and the main-loop has re-established control. + */ +void +jit_reset_after_error(void) +{ + if (provider_successfully_loaded) + provider.reset_after_error(); +} + +/* + * Release resources required by one JIT context. + */ +void +jit_release_context(JitContext *context) +{ + if (provider_successfully_loaded) + provider.release_context(context); + + ResourceOwnerForgetJIT(context->resowner, PointerGetDatum(context)); + pfree(context); +} + +/* + * Ask provider to JIT compile an expression. + * + * Returns true if successful, false if not. + */ +bool +jit_compile_expr(struct ExprState *state) +{ + /* + * We can easily create a one-off context for functions without an + * associated PlanState (and thus EState). But because there's no executor + * shutdown callback that could deallocate the created function, they'd + * live to the end of the transactions, where they'd be cleaned up by the + * resowner machinery. That can lead to a noticeable amount of memory + * usage, and worse, trigger some quadratic behaviour in gdb. Therefore, + * at least for now, don't create a JITed function in those circumstances. + */ + if (!state->parent) + return false; + + /* if no jitting should be performed at all */ + if (!(state->parent->state->es_jit_flags & PGJIT_PERFORM)) + return false; + + /* or if expressions aren't JITed */ + if (!(state->parent->state->es_jit_flags & PGJIT_EXPR)) + return false; + + /* this also takes !jit_enabled into account */ + if (provider_init()) + return provider.compile_expr(state); + + return false; +} + +/* Aggregate JIT instrumentation information */ +void +InstrJitAgg(JitInstrumentation *dst, JitInstrumentation *add) +{ + dst->created_functions += add->created_functions; + INSTR_TIME_ADD(dst->generation_counter, add->generation_counter); + INSTR_TIME_ADD(dst->inlining_counter, add->inlining_counter); + INSTR_TIME_ADD(dst->optimization_counter, add->optimization_counter); + INSTR_TIME_ADD(dst->emission_counter, add->emission_counter); +} + +static bool +file_exists(const char *name) +{ + struct stat st; + + AssertArg(name != NULL); + + if (stat(name, &st) == 0) + return S_ISDIR(st.st_mode) ? false : true; + else if (!(errno == ENOENT || errno == ENOTDIR)) + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not access file \"%s\": %m", name))); + + return false; +} diff --git a/src/backend/jit/llvm/Makefile b/src/backend/jit/llvm/Makefile new file mode 100644 index 0000000000..e2db4cea65 --- /dev/null +++ b/src/backend/jit/llvm/Makefile @@ -0,0 +1,62 @@ +#------------------------------------------------------------------------- +# +# Makefile-- +# Makefile the LLVM JIT provider, building it into a shared library. +# +# Note that this file is recursed into from src/Makefile, not by the +# parent directory.. +# +# IDENTIFICATION +# src/backend/jit/llvm/Makefile +# +#------------------------------------------------------------------------- + +subdir = src/backend/jit/llvm +top_builddir = ../../../.. +include $(top_builddir)/src/Makefile.global + +ifneq ($(with_llvm), yes) + $(error "not building with LLVM support") +endif + +PGFILEDESC = "llvmjit - JIT using LLVM" +NAME = llvmjit + +# All files in this directy use LLVM. +CFLAGS += $(LLVM_CFLAGS) +CXXFLAGS += $(LLVM_CXXFLAGS) +override CPPFLAGS := $(LLVM_CPPFLAGS) $(CPPFLAGS) +SHLIB_LINK += $(LLVM_LIBS) + +# Because this module includes C++ files, we need to use a C++ +# compiler for linking. Makefile.shlib uses $(COMPILER) to build +# loadable modules. +override COMPILER = $(CXX) $(CFLAGS) + +OBJS=$(WIN32RES) + +# Infrastructure +OBJS += llvmjit.o llvmjit_error.o llvmjit_inline.o llvmjit_wrap.o +# Code generation +OBJS += llvmjit_expr.o llvmjit_deform.o + +all: all-shared-lib llvmjit_types.bc + +install: all installdirs install-lib install-types + +installdirs: installdirs-lib + +uninstall: uninstall-lib uninstall-types + +# Note this is intentionally not in bitcodedir, as it's not for inlining */ +install-types: llvmjit_types.bc + $(INSTALL_DATA) llvmjit_types.bc '$(DESTDIR)$(pkglibdir)' + +uninstall-types: + rm -f '$(DESTDIR)$(pkglibdir)/llvmjit_types.bc' + +include $(top_srcdir)/src/Makefile.shlib + +clean distclean maintainer-clean: clean-lib + rm -f $(OBJS) + rm -f llvmjit_types.bc diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c new file mode 100644 index 0000000000..168072afd2 --- /dev/null +++ b/src/backend/jit/llvm/llvmjit.c @@ -0,0 +1,914 @@ +/*------------------------------------------------------------------------- + * + * llvmjit.c + * Core part of the LLVM JIT provider. + * + * Copyright (c) 2016-2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/backend/jit/llvm/llvmjit.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "jit/llvmjit.h" +#include "jit/llvmjit_emit.h" + +#include "miscadmin.h" + +#include "utils/memutils.h" +#include "utils/resowner_private.h" +#include "portability/instr_time.h" +#include "storage/ipc.h" + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LLVM_VERSION_MAJOR > 6 +#include +#endif + + +/* Handle of a module emitted via ORC JIT */ +typedef struct LLVMJitHandle +{ + LLVMOrcJITStackRef stack; + LLVMOrcModuleHandle orc_handle; +} LLVMJitHandle; + + +/* types & functions commonly needed for JITing */ +LLVMTypeRef TypeSizeT; +LLVMTypeRef TypeParamBool; +LLVMTypeRef TypeStorageBool; +LLVMTypeRef TypePGFunction; +LLVMTypeRef StructHeapTupleFieldsField3; +LLVMTypeRef StructHeapTupleFields; +LLVMTypeRef StructHeapTupleHeaderData; +LLVMTypeRef StructHeapTupleDataChoice; +LLVMTypeRef StructHeapTupleData; +LLVMTypeRef StructMinimalTupleData; +LLVMTypeRef StructItemPointerData; +LLVMTypeRef StructBlockId; +LLVMTypeRef StructFormPgAttribute; +LLVMTypeRef StructTupleConstr; +LLVMTypeRef StructtupleDesc; +LLVMTypeRef StructTupleTableSlot; +LLVMTypeRef StructMemoryContextData; +LLVMTypeRef StructPGFinfoRecord; +LLVMTypeRef StructFmgrInfo; +LLVMTypeRef StructFunctionCallInfoData; +LLVMTypeRef StructExprContext; +LLVMTypeRef StructExprEvalStep; +LLVMTypeRef StructExprState; +LLVMTypeRef StructAggState; +LLVMTypeRef StructAggStatePerGroupData; +LLVMTypeRef StructAggStatePerTransData; + +LLVMValueRef AttributeTemplate; +LLVMValueRef FuncStrlen; +LLVMValueRef FuncVarsizeAny; +LLVMValueRef FuncSlotGetsomeattrs; +LLVMValueRef FuncSlotGetmissingattrs; +LLVMValueRef FuncMakeExpandedObjectReadOnlyInternal; +LLVMValueRef FuncExecEvalArrayRefSubscript; +LLVMValueRef FuncExecEvalSysVar; +LLVMValueRef FuncExecAggTransReparent; +LLVMValueRef FuncExecAggInitGroup; + + +static bool llvm_session_initialized = false; +static size_t llvm_generation = 0; +static const char *llvm_triple = NULL; +static const char *llvm_layout = NULL; + + +static LLVMTargetMachineRef llvm_opt0_targetmachine; +static LLVMTargetMachineRef llvm_opt3_targetmachine; + +static LLVMTargetRef llvm_targetref; +static LLVMOrcJITStackRef llvm_opt0_orc; +static LLVMOrcJITStackRef llvm_opt3_orc; + + +static void llvm_release_context(JitContext *context); +static void llvm_session_initialize(void); +static void llvm_shutdown(int code, Datum arg); +static void llvm_compile_module(LLVMJitContext *context); +static void llvm_optimize_module(LLVMJitContext *context, LLVMModuleRef module); + +static void llvm_create_types(void); +static uint64_t llvm_resolve_symbol(const char *name, void *ctx); + + +PG_MODULE_MAGIC; + + +/* + * Initialize LLVM JIT provider. + */ +void +_PG_jit_provider_init(JitProviderCallbacks *cb) +{ + cb->reset_after_error = llvm_reset_after_error; + cb->release_context = llvm_release_context; + cb->compile_expr = llvm_compile_expr; +} + +/* + * Create a context for JITing work. + * + * The context, including subsidiary resources, will be cleaned up either when + * the context is explicitly released, or when the lifetime of + * CurrentResourceOwner ends (usually the end of the current [sub]xact). + */ +LLVMJitContext * +llvm_create_context(int jitFlags) +{ + LLVMJitContext *context; + + llvm_assert_in_fatal_section(); + + llvm_session_initialize(); + + ResourceOwnerEnlargeJIT(CurrentResourceOwner); + + context = MemoryContextAllocZero(TopMemoryContext, + sizeof(LLVMJitContext)); + context->base.flags = jitFlags; + + /* ensure cleanup */ + context->base.resowner = CurrentResourceOwner; + ResourceOwnerRememberJIT(CurrentResourceOwner, PointerGetDatum(context)); + + return context; +} + +/* + * Release resources required by one llvm context. + */ +static void +llvm_release_context(JitContext *context) +{ + LLVMJitContext *llvm_context = (LLVMJitContext *) context; + + llvm_enter_fatal_on_oom(); + + /* + * When this backend is exiting, don't clean up LLVM. As an error might + * have occurred from within LLVM, we do not want to risk reentering. All + * resource cleanup is going to happen through process exit. + */ + if (!proc_exit_inprogress) + { + if (llvm_context->module) + { + LLVMDisposeModule(llvm_context->module); + llvm_context->module = NULL; + } + + while (llvm_context->handles != NIL) + { + LLVMJitHandle *jit_handle; + + jit_handle = (LLVMJitHandle *) linitial(llvm_context->handles); + llvm_context->handles = list_delete_first(llvm_context->handles); + + LLVMOrcRemoveModule(jit_handle->stack, jit_handle->orc_handle); + pfree(jit_handle); + } + } +} + +/* + * Return module which may be modified, e.g. by creating new functions. + */ +LLVMModuleRef +llvm_mutable_module(LLVMJitContext *context) +{ + llvm_assert_in_fatal_section(); + + /* + * If there's no in-progress module, create a new one. + */ + if (!context->module) + { + context->compiled = false; + context->module_generation = llvm_generation++; + context->module = LLVMModuleCreateWithName("pg"); + LLVMSetTarget(context->module, llvm_triple); + LLVMSetDataLayout(context->module, llvm_layout); + } + + return context->module; +} + +/* + * Expand function name to be non-conflicting. This should be used by code + * generating code, when adding new externally visible function definitions to + * a Module. + */ +char * +llvm_expand_funcname(struct LLVMJitContext *context, const char *basename) +{ + Assert(context->module != NULL); + + context->base.instr.created_functions++; + + /* + * Previously we used dots to separate, but turns out some tools, e.g. + * GDB, don't like that and truncate name. + */ + return psprintf("%s_%zu_%d", + basename, + context->module_generation, + context->counter++); +} + +/* + * Return pointer to function funcname, which has to exist. If there's pending + * code to be optimized and emitted, do so first. + */ +void * +llvm_get_function(LLVMJitContext *context, const char *funcname) +{ + LLVMOrcTargetAddress addr = 0; +#if defined(HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN) && HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN + ListCell *lc; +#endif + + llvm_assert_in_fatal_section(); + + /* + * If there is a pending / not emitted module, compile and emit now. + * Otherwise we might not find the [correct] function. + */ + if (!context->compiled) + { + llvm_compile_module(context); + } + + /* + * ORC's symbol table is of *unmangled* symbols. Therefore we don't need + * to mangle here. + */ + +#if defined(HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN) && HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN + foreach(lc, context->handles) + { + LLVMJitHandle *handle = (LLVMJitHandle *) lfirst(lc); + + addr = 0; + if (LLVMOrcGetSymbolAddressIn(handle->stack, &addr, handle->orc_handle, funcname)) + elog(ERROR, "failed to look up symbol \"%s\"", funcname); + if (addr) + return (void *) (uintptr_t) addr; + } + +#else + +#if LLVM_VERSION_MAJOR < 5 + if ((addr = LLVMOrcGetSymbolAddress(llvm_opt0_orc, funcname))) + return (void *) (uintptr_t) addr; + if ((addr = LLVMOrcGetSymbolAddress(llvm_opt3_orc, funcname))) + return (void *) (uintptr_t) addr; +#else + if (LLVMOrcGetSymbolAddress(llvm_opt0_orc, &addr, funcname)) + elog(ERROR, "failed to look up symbol \"%s\"", funcname); + if (addr) + return (void *) (uintptr_t) addr; + if (LLVMOrcGetSymbolAddress(llvm_opt3_orc, &addr, funcname)) + elog(ERROR, "failed to look up symbol \"%s\"", funcname); + if (addr) + return (void *) (uintptr_t) addr; +#endif /* LLVM_VERSION_MAJOR */ + +#endif /* HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN */ + + elog(ERROR, "failed to JIT: %s", funcname); + + return NULL; +} + +/* + * Return declaration for passed function, adding it to the module if + * necessary. + * + * This is used to make functions imported by llvm_create_types() known to the + * module that's currently being worked on. + */ +LLVMValueRef +llvm_get_decl(LLVMModuleRef mod, LLVMValueRef v_src) +{ + LLVMValueRef v_fn; + + /* don't repeatedly add function */ + v_fn = LLVMGetNamedFunction(mod, LLVMGetValueName(v_src)); + if (v_fn) + return v_fn; + + v_fn = LLVMAddFunction(mod, + LLVMGetValueName(v_src), + LLVMGetElementType(LLVMTypeOf(v_src))); + llvm_copy_attributes(v_src, v_fn); + + return v_fn; +} + +/* + * Copy attributes from one function to another. + */ +void +llvm_copy_attributes(LLVMValueRef v_from, LLVMValueRef v_to) +{ + int num_attributes; + int attno; + LLVMAttributeRef *attrs; + + num_attributes = + LLVMGetAttributeCountAtIndex(v_from, LLVMAttributeFunctionIndex); + + attrs = palloc(sizeof(LLVMAttributeRef) * num_attributes); + LLVMGetAttributesAtIndex(v_from, LLVMAttributeFunctionIndex, attrs); + + for (attno = 0; attno < num_attributes; attno++) + { + LLVMAddAttributeAtIndex(v_to, LLVMAttributeFunctionIndex, + attrs[attno]); + } +} + +/* + * Return a callable LLVMValueRef for fcinfo. + */ +LLVMValueRef +llvm_function_reference(LLVMJitContext *context, + LLVMBuilderRef builder, + LLVMModuleRef mod, + FunctionCallInfo fcinfo) +{ + char *modname; + char *basename; + char *funcname; + + LLVMValueRef v_fn; + + fmgr_symbol(fcinfo->flinfo->fn_oid, &modname, &basename); + + if (modname != NULL && basename != NULL) + { + /* external function in loadable library */ + funcname = psprintf("pgextern.%s.%s", modname, basename); + } + else if (basename != NULL) + { + /* internal function */ + funcname = psprintf("%s", basename); + } + else + { + /* + * Function we don't know to handle, return pointer. We do so by + * creating a global constant containing a pointer to the function. + * Makes IR more readable. + */ + LLVMValueRef v_fn_addr; + + funcname = psprintf("pgoidextern.%u", + fcinfo->flinfo->fn_oid); + v_fn = LLVMGetNamedGlobal(mod, funcname); + if (v_fn != 0) + return LLVMBuildLoad(builder, v_fn, ""); + + v_fn_addr = l_ptr_const(fcinfo->flinfo->fn_addr, TypePGFunction); + + v_fn = LLVMAddGlobal(mod, TypePGFunction, funcname); + LLVMSetInitializer(v_fn, v_fn_addr); + LLVMSetGlobalConstant(v_fn, true); + + return LLVMBuildLoad(builder, v_fn, ""); + } + + /* check if function already has been added */ + v_fn = LLVMGetNamedFunction(mod, funcname); + if (v_fn != 0) + return v_fn; + + v_fn = LLVMAddFunction(mod, funcname, LLVMGetElementType(TypePGFunction)); + + return v_fn; +} + +/* + * Optimize code in module using the flags set in context. + */ +static void +llvm_optimize_module(LLVMJitContext *context, LLVMModuleRef module) +{ + LLVMPassManagerBuilderRef llvm_pmb; + LLVMPassManagerRef llvm_mpm; + LLVMPassManagerRef llvm_fpm; + LLVMValueRef func; + int compile_optlevel; + + if (context->base.flags & PGJIT_OPT3) + compile_optlevel = 3; + else + compile_optlevel = 0; + + /* + * Have to create a new pass manager builder every pass through, as the + * inliner has some per-builder state. Otherwise one ends up only inlining + * a function the first time though. + */ + llvm_pmb = LLVMPassManagerBuilderCreate(); + LLVMPassManagerBuilderSetOptLevel(llvm_pmb, compile_optlevel); + llvm_fpm = LLVMCreateFunctionPassManagerForModule(module); + + if (context->base.flags & PGJIT_OPT3) + { + /* TODO: Unscientifically determined threshold */ + LLVMPassManagerBuilderUseInlinerWithThreshold(llvm_pmb, 512); + } + else + { + /* we rely on mem2reg heavily, so emit even in the O0 case */ + LLVMAddPromoteMemoryToRegisterPass(llvm_fpm); + } + + LLVMPassManagerBuilderPopulateFunctionPassManager(llvm_pmb, llvm_fpm); + + /* + * Do function level optimization. This could be moved to the point where + * functions are emitted, to reduce memory usage a bit. + */ + LLVMInitializeFunctionPassManager(llvm_fpm); + for (func = LLVMGetFirstFunction(context->module); + func != NULL; + func = LLVMGetNextFunction(func)) + LLVMRunFunctionPassManager(llvm_fpm, func); + LLVMFinalizeFunctionPassManager(llvm_fpm); + LLVMDisposePassManager(llvm_fpm); + + /* + * Perform module level optimization. We do so even in the non-optimized + * case, so always-inline functions etc get inlined. It's cheap enough. + */ + llvm_mpm = LLVMCreatePassManager(); + LLVMPassManagerBuilderPopulateModulePassManager(llvm_pmb, + llvm_mpm); + /* always use always-inliner pass */ + if (!(context->base.flags & PGJIT_OPT3)) + LLVMAddAlwaysInlinerPass(llvm_mpm); + /* if doing inlining, but no expensive optimization, add inlining pass */ + if (context->base.flags & PGJIT_INLINE + && !(context->base.flags & PGJIT_OPT3)) + LLVMAddFunctionInliningPass(llvm_mpm); + LLVMRunPassManager(llvm_mpm, context->module); + LLVMDisposePassManager(llvm_mpm); + + LLVMPassManagerBuilderDispose(llvm_pmb); +} + +/* + * Emit code for the currently pending module. + */ +static void +llvm_compile_module(LLVMJitContext *context) +{ + LLVMOrcModuleHandle orc_handle; + MemoryContext oldcontext; + static LLVMOrcJITStackRef compile_orc; + instr_time starttime; + instr_time endtime; + + if (context->base.flags & PGJIT_OPT3) + compile_orc = llvm_opt3_orc; + else + compile_orc = llvm_opt0_orc; + + /* perform inlining */ + if (context->base.flags & PGJIT_INLINE) + { + INSTR_TIME_SET_CURRENT(starttime); + llvm_inline(context->module); + INSTR_TIME_SET_CURRENT(endtime); + INSTR_TIME_ACCUM_DIFF(context->base.instr.inlining_counter, + endtime, starttime); + } + + if (jit_dump_bitcode) + { + char *filename; + + filename = psprintf("%u.%zu.bc", + MyProcPid, + context->module_generation); + LLVMWriteBitcodeToFile(context->module, filename); + pfree(filename); + } + + + /* optimize according to the chosen optimization settings */ + INSTR_TIME_SET_CURRENT(starttime); + llvm_optimize_module(context, context->module); + INSTR_TIME_SET_CURRENT(endtime); + INSTR_TIME_ACCUM_DIFF(context->base.instr.optimization_counter, + endtime, starttime); + + if (jit_dump_bitcode) + { + char *filename; + + filename = psprintf("%u.%zu.optimized.bc", + MyProcPid, + context->module_generation); + LLVMWriteBitcodeToFile(context->module, filename); + pfree(filename); + } + + /* + * Emit the code. Note that this can, depending on the optimization + * settings, take noticeable resources as code emission executes low-level + * instruction combining/selection passes etc. Without optimization a + * faster instruction selection mechanism is used. + */ + INSTR_TIME_SET_CURRENT(starttime); +#if LLVM_VERSION_MAJOR > 6 + { + if (LLVMOrcAddEagerlyCompiledIR(compile_orc, &orc_handle, context->module, + llvm_resolve_symbol, NULL)) + { + elog(ERROR, "failed to JIT module"); + } + + /* LLVMOrcAddEagerlyCompiledIR takes ownership of the module */ + } +#elif LLVM_VERSION_MAJOR > 4 + { + LLVMSharedModuleRef smod; + + smod = LLVMOrcMakeSharedModule(context->module); + if (LLVMOrcAddEagerlyCompiledIR(compile_orc, &orc_handle, smod, + llvm_resolve_symbol, NULL)) + { + elog(ERROR, "failed to JIT module"); + } + LLVMOrcDisposeSharedModuleRef(smod); + } +#else /* LLVM 4.0 and 3.9 */ + { + orc_handle = LLVMOrcAddEagerlyCompiledIR(compile_orc, context->module, + llvm_resolve_symbol, NULL); + LLVMDisposeModule(context->module); + } +#endif + INSTR_TIME_SET_CURRENT(endtime); + INSTR_TIME_ACCUM_DIFF(context->base.instr.emission_counter, + endtime, starttime); + + context->module = NULL; + context->compiled = true; + + /* remember emitted code for cleanup and lookups */ + oldcontext = MemoryContextSwitchTo(TopMemoryContext); + { + LLVMJitHandle *handle; + + handle = (LLVMJitHandle *) palloc(sizeof(LLVMJitHandle)); + handle->stack = compile_orc; + handle->orc_handle = orc_handle; + + context->handles = lappend(context->handles, handle); + } + MemoryContextSwitchTo(oldcontext); + + ereport(DEBUG1, + (errmsg("time to inline: %.3fs, opt: %.3fs, emit: %.3fs", + INSTR_TIME_GET_DOUBLE(context->base.instr.inlining_counter), + INSTR_TIME_GET_DOUBLE(context->base.instr.optimization_counter), + INSTR_TIME_GET_DOUBLE(context->base.instr.emission_counter)), + errhidestmt(true), + errhidecontext(true))); +} + +/* + * Per session initialization. + */ +static void +llvm_session_initialize(void) +{ + MemoryContext oldcontext; + char *error = NULL; + char *cpu = NULL; + char *features = NULL; + + if (llvm_session_initialized) + return; + + oldcontext = MemoryContextSwitchTo(TopMemoryContext); + + LLVMInitializeNativeTarget(); + LLVMInitializeNativeAsmPrinter(); + LLVMInitializeNativeAsmParser(); + + /* + * Synchronize types early, as that also includes inferring the target + * triple. + */ + llvm_create_types(); + + if (LLVMGetTargetFromTriple(llvm_triple, &llvm_targetref, &error) != 0) + { + elog(FATAL, "failed to query triple %s\n", error); + } + + /* + * We want the generated code to use all available features. Therefore + * grab the host CPU string and detect features of the current CPU. The + * latter is needed because some CPU architectures default to enabling + * features not all CPUs have (weird, huh). + */ + cpu = LLVMGetHostCPUName(); + features = LLVMGetHostCPUFeatures(); + elog(DEBUG2, "LLVMJIT detected CPU \"%s\", with features \"%s\"", + cpu, features); + + llvm_opt0_targetmachine = + LLVMCreateTargetMachine(llvm_targetref, llvm_triple, cpu, features, + LLVMCodeGenLevelNone, + LLVMRelocDefault, + LLVMCodeModelJITDefault); + llvm_opt3_targetmachine = + LLVMCreateTargetMachine(llvm_targetref, llvm_triple, cpu, features, + LLVMCodeGenLevelAggressive, + LLVMRelocDefault, + LLVMCodeModelJITDefault); + + LLVMDisposeMessage(cpu); + cpu = NULL; + LLVMDisposeMessage(features); + features = NULL; + + /* force symbols in main binary to be loaded */ + LLVMLoadLibraryPermanently(NULL); + + llvm_opt0_orc = LLVMOrcCreateInstance(llvm_opt0_targetmachine); + llvm_opt3_orc = LLVMOrcCreateInstance(llvm_opt3_targetmachine); + +#if defined(HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER) && HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER + if (jit_debugging_support) + { + LLVMJITEventListenerRef l = LLVMCreateGDBRegistrationListener(); + + LLVMOrcRegisterJITEventListener(llvm_opt0_orc, l); + LLVMOrcRegisterJITEventListener(llvm_opt3_orc, l); + } +#endif +#if defined(HAVE_DECL_LLVMCREATEPERFJITEVENTLISTENER) && HAVE_DECL_LLVMCREATEPERFJITEVENTLISTENER + if (jit_profiling_support) + { + LLVMJITEventListenerRef l = LLVMCreatePerfJITEventListener(); + + LLVMOrcRegisterJITEventListener(llvm_opt0_orc, l); + LLVMOrcRegisterJITEventListener(llvm_opt3_orc, l); + } +#endif + + before_shmem_exit(llvm_shutdown, 0); + + llvm_session_initialized = true; + + MemoryContextSwitchTo(oldcontext); +} + +static void +llvm_shutdown(int code, Datum arg) +{ + /* unregister profiling support, needs to be flushed to be useful */ + + if (llvm_opt3_orc) + { +#if defined(HAVE_DECL_LLVMORCREGISTERPERF) && HAVE_DECL_LLVMORCREGISTERPERF + if (jit_profiling_support) + LLVMOrcUnregisterPerf(llvm_opt3_orc); +#endif + LLVMOrcDisposeInstance(llvm_opt3_orc); + llvm_opt3_orc = NULL; + } + + if (llvm_opt0_orc) + { +#if defined(HAVE_DECL_LLVMORCREGISTERPERF) && HAVE_DECL_LLVMORCREGISTERPERF + if (jit_profiling_support) + LLVMOrcUnregisterPerf(llvm_opt0_orc); +#endif + LLVMOrcDisposeInstance(llvm_opt0_orc); + llvm_opt0_orc = NULL; + } +} + +/* helper for llvm_create_types, returning a global var's type */ +static LLVMTypeRef +load_type(LLVMModuleRef mod, const char *name) +{ + LLVMValueRef value; + LLVMTypeRef typ; + + /* this'll return a *pointer* to the global */ + value = LLVMGetNamedGlobal(mod, name); + if (!value) + elog(ERROR, "type %s is unknown", name); + + /* therefore look at the contained type and return that */ + typ = LLVMTypeOf(value); + Assert(typ != NULL); + typ = LLVMGetElementType(typ); + Assert(typ != NULL); + return typ; +} + +/* helper for llvm_create_types, returning a function's return type */ +static LLVMTypeRef +load_return_type(LLVMModuleRef mod, const char *name) +{ + LLVMValueRef value; + LLVMTypeRef typ; + + /* this'll return a *pointer* to the function */ + value = LLVMGetNamedFunction(mod, name); + if (!value) + elog(ERROR, "function %s is unknown", name); + + /* get type of function pointer */ + typ = LLVMTypeOf(value); + Assert(typ != NULL); + /* dereference pointer */ + typ = LLVMGetElementType(typ); + Assert(typ != NULL); + /* and look at return type */ + typ = LLVMGetReturnType(typ); + Assert(typ != NULL); + + return typ; +} + +/* + * Load required information, types, function signatures from llvmjit_types.c + * and make them available in global variables. + * + * Those global variables are then used while emitting code. + */ +static void +llvm_create_types(void) +{ + char path[MAXPGPATH]; + LLVMMemoryBufferRef buf; + char *msg; + LLVMModuleRef mod = NULL; + + snprintf(path, MAXPGPATH, "%s/%s", pkglib_path, "llvmjit_types.bc"); + + /* open file */ + if (LLVMCreateMemoryBufferWithContentsOfFile(path, &buf, &msg)) + { + elog(ERROR, "LLVMCreateMemoryBufferWithContentsOfFile(%s) failed: %s", + path, msg); + } + + /* eagerly load contents, going to need it all */ + if (LLVMParseBitcode2(buf, &mod)) + { + elog(ERROR, "LLVMParseBitcode2 of %s failed", path); + } + LLVMDisposeMemoryBuffer(buf); + + /* + * Load triple & layout from clang emitted file so we're guaranteed to be + * compatible. + */ + llvm_triple = pstrdup(LLVMGetTarget(mod)); + llvm_layout = pstrdup(LLVMGetDataLayoutStr(mod)); + + TypeSizeT = load_type(mod, "TypeSizeT"); + TypeParamBool = load_return_type(mod, "FunctionReturningBool"); + TypeStorageBool = load_type(mod, "TypeStorageBool"); + TypePGFunction = load_type(mod, "TypePGFunction"); + StructExprContext = load_type(mod, "StructExprContext"); + StructExprEvalStep = load_type(mod, "StructExprEvalStep"); + StructExprState = load_type(mod, "StructExprState"); + StructFunctionCallInfoData = load_type(mod, "StructFunctionCallInfoData"); + StructMemoryContextData = load_type(mod, "StructMemoryContextData"); + StructTupleTableSlot = load_type(mod, "StructTupleTableSlot"); + StructHeapTupleData = load_type(mod, "StructHeapTupleData"); + StructtupleDesc = load_type(mod, "StructtupleDesc"); + StructAggState = load_type(mod, "StructAggState"); + StructAggStatePerGroupData = load_type(mod, "StructAggStatePerGroupData"); + StructAggStatePerTransData = load_type(mod, "StructAggStatePerTransData"); + + AttributeTemplate = LLVMGetNamedFunction(mod, "AttributeTemplate"); + FuncStrlen = LLVMGetNamedFunction(mod, "strlen"); + FuncVarsizeAny = LLVMGetNamedFunction(mod, "varsize_any"); + FuncSlotGetsomeattrs = LLVMGetNamedFunction(mod, "slot_getsomeattrs"); + FuncSlotGetmissingattrs = LLVMGetNamedFunction(mod, "slot_getmissingattrs"); + FuncMakeExpandedObjectReadOnlyInternal = LLVMGetNamedFunction(mod, "MakeExpandedObjectReadOnlyInternal"); + FuncExecEvalArrayRefSubscript = LLVMGetNamedFunction(mod, "ExecEvalArrayRefSubscript"); + FuncExecEvalSysVar = LLVMGetNamedFunction(mod, "ExecEvalSysVar"); + FuncExecAggTransReparent = LLVMGetNamedFunction(mod, "ExecAggTransReparent"); + FuncExecAggInitGroup = LLVMGetNamedFunction(mod, "ExecAggInitGroup"); + + /* + * Leave the module alive, otherwise references to function would be + * dangling. + */ + + return; +} + +/* + * Split a symbol into module / function parts. If the function is in the + * main binary (or an external library) *modname will be NULL. + */ +void +llvm_split_symbol_name(const char *name, char **modname, char **funcname) +{ + *modname = NULL; + *funcname = NULL; + + /* + * Module function names are pgextern.$module.$funcname + */ + if (strncmp(name, "pgextern.", strlen("pgextern.")) == 0) + { + /* + * Symbol names cannot contain a ., therefore we can split based on + * first and last occurrence of one. + */ + *funcname = rindex(name, '.'); + (*funcname)++; /* jump over . */ + + *modname = pnstrdup(name + strlen("pgextern."), + *funcname - name - strlen("pgextern.") - 1); + Assert(funcname); + + *funcname = pstrdup(*funcname); + } + else + { + *modname = NULL; + *funcname = pstrdup(name); + } +} + +/* + * Attempt to resolve symbol, so LLVM can emit a reference to it. + */ +static uint64_t +llvm_resolve_symbol(const char *symname, void *ctx) +{ + uintptr_t addr; + char *funcname; + char *modname; + + /* + * macOS prefixes all object level symbols with an underscore. But neither + * dlsym() nor PG's inliner expect that. So undo. + */ +#if defined(__darwin__) + if (symname[0] != '_') + elog(ERROR, "expected prefixed symbol name, but got \"%s\"", symname); + symname++; +#endif + + llvm_split_symbol_name(symname, &modname, &funcname); + + /* functions that aren't resolved to names shouldn't ever get here */ + Assert(funcname); + + if (modname) + addr = (uintptr_t) load_external_function(modname, funcname, + true, NULL); + else + addr = (uintptr_t) LLVMSearchForAddressOfSymbol(symname); + + pfree(funcname); + if (modname) + pfree(modname); + + /* let LLVM will error out - should never happen */ + if (!addr) + elog(WARNING, "failed to resolve name %s", symname); + + return (uint64_t) addr; +} diff --git a/src/backend/jit/llvm/llvmjit_deform.c b/src/backend/jit/llvm/llvmjit_deform.c new file mode 100644 index 0000000000..59e38d2d95 --- /dev/null +++ b/src/backend/jit/llvm/llvmjit_deform.c @@ -0,0 +1,707 @@ +/*------------------------------------------------------------------------- + * + * llvmjit_deform.c + * Generate code for deforming a heap tuple. + * + * This gains performance benefits over unJITed deforming from compile-time + * knowledge of the tuple descriptor. Fixed column widths, NOT NULLness, etc + * can be taken advantage of. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/backend/jit/llvm/llvmjit_deform.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include + +#include "access/htup_details.h" +#include "access/tupdesc_details.h" +#include "executor/tuptable.h" +#include "jit/llvmjit.h" +#include "jit/llvmjit_emit.h" + + +/* + * Create a function that deforms a tuple of type desc up to natts columns. + */ +LLVMValueRef +slot_compile_deform(LLVMJitContext *context, TupleDesc desc, int natts) +{ + char *funcname; + + LLVMModuleRef mod; + LLVMBuilderRef b; + + LLVMTypeRef deform_sig; + LLVMValueRef v_deform_fn; + + LLVMBasicBlockRef b_entry; + LLVMBasicBlockRef b_adjust_unavail_cols; + LLVMBasicBlockRef b_find_start; + + LLVMBasicBlockRef b_out; + LLVMBasicBlockRef b_dead; + LLVMBasicBlockRef *attcheckattnoblocks; + LLVMBasicBlockRef *attstartblocks; + LLVMBasicBlockRef *attisnullblocks; + LLVMBasicBlockRef *attcheckalignblocks; + LLVMBasicBlockRef *attalignblocks; + LLVMBasicBlockRef *attstoreblocks; + + LLVMValueRef v_offp; + + LLVMValueRef v_tupdata_base; + LLVMValueRef v_tts_values; + LLVMValueRef v_tts_nulls; + LLVMValueRef v_slotoffp; + LLVMValueRef v_flagsp; + LLVMValueRef v_nvalidp; + LLVMValueRef v_nvalid; + LLVMValueRef v_maxatt; + + LLVMValueRef v_slot; + + LLVMValueRef v_tupleheaderp; + LLVMValueRef v_tuplep; + LLVMValueRef v_infomask1; + LLVMValueRef v_infomask2; + LLVMValueRef v_bits; + + LLVMValueRef v_hoff; + + LLVMValueRef v_hasnulls; + + /* last column (0 indexed) guaranteed to exist */ + int guaranteed_column_number = -1; + + /* current known alignment */ + int known_alignment = 0; + + /* if true, known_alignment describes definite offset of column */ + bool attguaranteedalign = true; + + int attnum; + + mod = llvm_mutable_module(context); + + funcname = llvm_expand_funcname(context, "deform"); + + /* + * Check which columns do have to exist, so we don't have to check the + * rows natts unnecessarily. + */ + for (attnum = 0; attnum < desc->natts; attnum++) + { + Form_pg_attribute att = TupleDescAttr(desc, attnum); + + /* + * If the column is possibly missing, we can't rely on its (or + * subsequent) NOT NULL constraints to indicate minimum attributes in + * the tuple, so stop here. + */ + if (att->atthasmissing) + break; + + /* + * Column is NOT NULL and there've been no preceding missing columns, + * it's guaranteed that all columns up to here exist at least in the + * NULL bitmap. + */ + if (att->attnotnull) + guaranteed_column_number = attnum; + } + + /* Create the signature and function */ + { + LLVMTypeRef param_types[1]; + + param_types[0] = l_ptr(StructTupleTableSlot); + + deform_sig = LLVMFunctionType(LLVMVoidType(), param_types, + lengthof(param_types), 0); + } + v_deform_fn = LLVMAddFunction(mod, funcname, deform_sig); + LLVMSetLinkage(v_deform_fn, LLVMInternalLinkage); + LLVMSetParamAlignment(LLVMGetParam(v_deform_fn, 0), MAXIMUM_ALIGNOF); + llvm_copy_attributes(AttributeTemplate, v_deform_fn); + + b_entry = + LLVMAppendBasicBlock(v_deform_fn, "entry"); + b_adjust_unavail_cols = + LLVMAppendBasicBlock(v_deform_fn, "adjust_unavail_cols"); + b_find_start = + LLVMAppendBasicBlock(v_deform_fn, "find_startblock"); + b_out = + LLVMAppendBasicBlock(v_deform_fn, "outblock"); + b_dead = + LLVMAppendBasicBlock(v_deform_fn, "deadblock"); + + b = LLVMCreateBuilder(); + + attcheckattnoblocks = palloc(sizeof(LLVMBasicBlockRef) * natts); + attstartblocks = palloc(sizeof(LLVMBasicBlockRef) * natts); + attisnullblocks = palloc(sizeof(LLVMBasicBlockRef) * natts); + attcheckalignblocks = palloc(sizeof(LLVMBasicBlockRef) * natts); + attalignblocks = palloc(sizeof(LLVMBasicBlockRef) * natts); + attstoreblocks = palloc(sizeof(LLVMBasicBlockRef) * natts); + + known_alignment = 0; + + LLVMPositionBuilderAtEnd(b, b_entry); + + /* perform allocas first, llvm only converts those to registers */ + v_offp = LLVMBuildAlloca(b, TypeSizeT, "v_offp"); + + v_slot = LLVMGetParam(v_deform_fn, 0); + + v_tts_values = + l_load_struct_gep(b, v_slot, FIELDNO_TUPLETABLESLOT_VALUES, + "tts_values"); + v_tts_nulls = + l_load_struct_gep(b, v_slot, FIELDNO_TUPLETABLESLOT_ISNULL, + "tts_ISNULL"); + + v_slotoffp = LLVMBuildStructGEP(b, v_slot, FIELDNO_TUPLETABLESLOT_OFF, ""); + v_flagsp = LLVMBuildStructGEP(b, v_slot, FIELDNO_TUPLETABLESLOT_FLAGS, ""); + v_nvalidp = LLVMBuildStructGEP(b, v_slot, FIELDNO_TUPLETABLESLOT_NVALID, ""); + + v_tupleheaderp = + l_load_struct_gep(b, v_slot, FIELDNO_TUPLETABLESLOT_TUPLE, + "tupleheader"); + v_tuplep = + l_load_struct_gep(b, v_tupleheaderp, FIELDNO_HEAPTUPLEDATA_DATA, + "tuple"); + v_bits = + LLVMBuildBitCast(b, + LLVMBuildStructGEP(b, v_tuplep, + FIELDNO_HEAPTUPLEHEADERDATA_BITS, + ""), + l_ptr(LLVMInt8Type()), + "t_bits"); + v_infomask1 = + l_load_struct_gep(b, v_tuplep, + FIELDNO_HEAPTUPLEHEADERDATA_INFOMASK, + "infomask1"); + v_infomask2 = + l_load_struct_gep(b, + v_tuplep, FIELDNO_HEAPTUPLEHEADERDATA_INFOMASK2, + "infomask2"); + + /* t_infomask & HEAP_HASNULL */ + v_hasnulls = + LLVMBuildICmp(b, LLVMIntNE, + LLVMBuildAnd(b, + l_int16_const(HEAP_HASNULL), + v_infomask1, ""), + l_int16_const(0), + "hasnulls"); + + /* t_infomask2 & HEAP_NATTS_MASK */ + v_maxatt = LLVMBuildAnd(b, + l_int16_const(HEAP_NATTS_MASK), + v_infomask2, + "maxatt"); + + v_hoff = + l_load_struct_gep(b, v_tuplep, + FIELDNO_HEAPTUPLEHEADERDATA_HOFF, + "t_hoff"); + + v_tupdata_base = + LLVMBuildGEP(b, + LLVMBuildBitCast(b, + v_tuplep, + l_ptr(LLVMInt8Type()), + ""), + &v_hoff, 1, + "v_tupdata_base"); + + /* + * Load tuple start offset from slot. Will be reset below in case there's + * no existing deformed columns in slot. + */ + { + LLVMValueRef v_off_start; + + v_off_start = LLVMBuildLoad(b, v_slotoffp, "v_slot_off"); + v_off_start = LLVMBuildZExt(b, v_off_start, TypeSizeT, ""); + LLVMBuildStore(b, v_off_start, v_offp); + } + + /* build the basic block for each attribute, need them as jump target */ + for (attnum = 0; attnum < natts; attnum++) + { + attcheckattnoblocks[attnum] = + l_bb_append_v(v_deform_fn, "block.attr.%d.attcheckattno", attnum); + attstartblocks[attnum] = + l_bb_append_v(v_deform_fn, "block.attr.%d.start", attnum); + attisnullblocks[attnum] = + l_bb_append_v(v_deform_fn, "block.attr.%d.attisnull", attnum); + attcheckalignblocks[attnum] = + l_bb_append_v(v_deform_fn, "block.attr.%d.attcheckalign", attnum); + attalignblocks[attnum] = + l_bb_append_v(v_deform_fn, "block.attr.%d.align", attnum); + attstoreblocks[attnum] = + l_bb_append_v(v_deform_fn, "block.attr.%d.store", attnum); + } + + /* + * Check if's guaranteed the all the desired attributes are available in + * tuple. If so, we can start deforming. If not, need to make sure to + * fetch the missing columns. + */ + if ((natts - 1) <= guaranteed_column_number) + { + /* just skip through unnecessary blocks */ + LLVMBuildBr(b, b_adjust_unavail_cols); + LLVMPositionBuilderAtEnd(b, b_adjust_unavail_cols); + LLVMBuildBr(b, b_find_start); + } + else + { + LLVMValueRef v_params[3]; + + /* branch if not all columns available */ + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntULT, + v_maxatt, + l_int16_const(natts), + ""), + b_adjust_unavail_cols, + b_find_start); + + /* if not, memset tts_isnull of relevant cols to true */ + LLVMPositionBuilderAtEnd(b, b_adjust_unavail_cols); + + v_params[0] = v_slot; + v_params[1] = LLVMBuildZExt(b, v_maxatt, LLVMInt32Type(), ""); + v_params[2] = l_int32_const(natts); + LLVMBuildCall(b, llvm_get_decl(mod, FuncSlotGetmissingattrs), + v_params, lengthof(v_params), ""); + LLVMBuildBr(b, b_find_start); + } + + LLVMPositionBuilderAtEnd(b, b_find_start); + + v_nvalid = LLVMBuildLoad(b, v_nvalidp, ""); + + /* + * Build switch to go from nvalid to the right startblock. Callers + * currently don't have the knowledge, but it'd be good for performance to + * avoid this check when it's known that the slot is empty (e.g. in scan + * nodes). + */ + if (true) + { + LLVMValueRef v_switch = LLVMBuildSwitch(b, v_nvalid, + b_dead, natts); + + for (attnum = 0; attnum < natts; attnum++) + { + LLVMValueRef v_attno = l_int16_const(attnum); + + LLVMAddCase(v_switch, v_attno, attcheckattnoblocks[attnum]); + } + + } + else + { + /* jump from entry block to first block */ + LLVMBuildBr(b, attcheckattnoblocks[0]); + } + + LLVMPositionBuilderAtEnd(b, b_dead); + LLVMBuildUnreachable(b); + + /* + * Iterate over each attribute that needs to be deformed, build code to + * deform it. + */ + for (attnum = 0; attnum < natts; attnum++) + { + Form_pg_attribute att = TupleDescAttr(desc, attnum); + LLVMValueRef v_incby; + int alignto; + LLVMValueRef l_attno = l_int16_const(attnum); + LLVMValueRef v_attdatap; + LLVMValueRef v_resultp; + + /* build block checking whether we did all the necessary attributes */ + LLVMPositionBuilderAtEnd(b, attcheckattnoblocks[attnum]); + + /* + * If this is the first attribute, slot->tts_nvalid was 0. Therefore + * reset offset to 0 to, it be from a previous execution. + */ + if (attnum == 0) + { + LLVMBuildStore(b, l_sizet_const(0), v_offp); + } + + /* + * Build check whether column is available (i.e. whether the tuple has + * that many columns stored). We can avoid the branch if we know + * there's a subsequent NOT NULL column. + */ + if (attnum <= guaranteed_column_number) + { + LLVMBuildBr(b, attstartblocks[attnum]); + } + else + { + LLVMValueRef v_islast; + + v_islast = LLVMBuildICmp(b, LLVMIntUGE, + l_attno, + v_maxatt, + "heap_natts"); + LLVMBuildCondBr(b, v_islast, b_out, attstartblocks[attnum]); + } + LLVMPositionBuilderAtEnd(b, attstartblocks[attnum]); + + /* + * Check for nulls if necessary. No need to take missing attributes + * into account, because in case they're present the heaptuple's natts + * would have indicated that a slot_getmissingattrs() is needed. + */ + if (!att->attnotnull) + { + LLVMBasicBlockRef b_ifnotnull; + LLVMBasicBlockRef b_ifnull; + LLVMBasicBlockRef b_next; + LLVMValueRef v_attisnull; + LLVMValueRef v_nullbyteno; + LLVMValueRef v_nullbytemask; + LLVMValueRef v_nullbyte; + LLVMValueRef v_nullbit; + + b_ifnotnull = attcheckalignblocks[attnum]; + b_ifnull = attisnullblocks[attnum]; + + if (attnum + 1 == natts) + b_next = b_out; + else + b_next = attcheckattnoblocks[attnum + 1]; + + v_nullbyteno = l_int32_const(attnum >> 3); + v_nullbytemask = l_int8_const(1 << ((attnum) & 0x07)); + v_nullbyte = l_load_gep1(b, v_bits, v_nullbyteno, "attnullbyte"); + + v_nullbit = LLVMBuildICmp(b, + LLVMIntEQ, + LLVMBuildAnd(b, v_nullbyte, v_nullbytemask, ""), + l_int8_const(0), + "attisnull"); + + v_attisnull = LLVMBuildAnd(b, v_hasnulls, v_nullbit, ""); + + LLVMBuildCondBr(b, v_attisnull, b_ifnull, b_ifnotnull); + + LLVMPositionBuilderAtEnd(b, b_ifnull); + + /* store null-byte */ + LLVMBuildStore(b, + l_int8_const(1), + LLVMBuildGEP(b, v_tts_nulls, &l_attno, 1, "")); + /* store zero datum */ + LLVMBuildStore(b, + l_sizet_const(0), + LLVMBuildGEP(b, v_tts_values, &l_attno, 1, "")); + + LLVMBuildBr(b, b_next); + attguaranteedalign = false; + } + else + { + /* nothing to do */ + LLVMBuildBr(b, attcheckalignblocks[attnum]); + LLVMPositionBuilderAtEnd(b, attisnullblocks[attnum]); + LLVMBuildBr(b, attcheckalignblocks[attnum]); + } + LLVMPositionBuilderAtEnd(b, attcheckalignblocks[attnum]); + + /* determine required alignment */ + if (att->attalign == 'i') + alignto = ALIGNOF_INT; + else if (att->attalign == 'c') + alignto = 1; + else if (att->attalign == 'd') + alignto = ALIGNOF_DOUBLE; + else if (att->attalign == 's') + alignto = ALIGNOF_SHORT; + else + { + elog(ERROR, "unknown alignment"); + alignto = 0; + } + + /* ------ + * Even if alignment is required, we can skip doing it if provably + * unnecessary: + * - first column is guaranteed to be aligned + * - columns following a NOT NULL fixed width datum have known + * alignment, can skip alignment computation if that known alignment + * is compatible with current column. + * ------ + */ + if (alignto > 1 && + (known_alignment < 0 || known_alignment != TYPEALIGN(alignto, known_alignment))) + { + /* + * When accessing a varlena field we have to "peek" to see if we + * are looking at a pad byte or the first byte of a 1-byte-header + * datum. A zero byte must be either a pad byte, or the first + * byte of a correctly aligned 4-byte length word; in either case + * we can align safely. A non-zero byte must be either a 1-byte + * length word, or the first byte of a correctly aligned 4-byte + * length word; in either case we need not align. + */ + if (att->attlen == -1) + { + LLVMValueRef v_possible_padbyte; + LLVMValueRef v_ispad; + LLVMValueRef v_off; + + /* don't know if short varlena or not */ + attguaranteedalign = false; + + v_off = LLVMBuildLoad(b, v_offp, ""); + + v_possible_padbyte = + l_load_gep1(b, v_tupdata_base, v_off, "padbyte"); + v_ispad = + LLVMBuildICmp(b, LLVMIntEQ, + v_possible_padbyte, l_int8_const(0), + "ispadbyte"); + LLVMBuildCondBr(b, v_ispad, + attalignblocks[attnum], + attstoreblocks[attnum]); + } + else + { + LLVMBuildBr(b, attalignblocks[attnum]); + } + + LLVMPositionBuilderAtEnd(b, attalignblocks[attnum]); + + /* translation of alignment code (cf TYPEALIGN()) */ + { + LLVMValueRef v_off_aligned; + LLVMValueRef v_off = LLVMBuildLoad(b, v_offp, ""); + + /* ((ALIGNVAL) - 1) */ + LLVMValueRef v_alignval = l_sizet_const(alignto - 1); + + /* ((uintptr_t) (LEN) + ((ALIGNVAL) - 1)) */ + LLVMValueRef v_lh = LLVMBuildAdd(b, v_off, v_alignval, ""); + + /* ~((uintptr_t) ((ALIGNVAL) - 1)) */ + LLVMValueRef v_rh = l_sizet_const(~(alignto - 1)); + + v_off_aligned = LLVMBuildAnd(b, v_lh, v_rh, "aligned_offset"); + + LLVMBuildStore(b, v_off_aligned, v_offp); + } + + /* + * As alignment either was unnecessary or has been performed, we + * now know the current alignment. This is only safe because this + * value isn't used for varlena and nullable columns. + */ + if (known_alignment >= 0) + { + Assert(known_alignment != 0); + known_alignment = TYPEALIGN(alignto, known_alignment); + } + + LLVMBuildBr(b, attstoreblocks[attnum]); + LLVMPositionBuilderAtEnd(b, attstoreblocks[attnum]); + } + else + { + LLVMPositionBuilderAtEnd(b, attcheckalignblocks[attnum]); + LLVMBuildBr(b, attalignblocks[attnum]); + LLVMPositionBuilderAtEnd(b, attalignblocks[attnum]); + LLVMBuildBr(b, attstoreblocks[attnum]); + } + LLVMPositionBuilderAtEnd(b, attstoreblocks[attnum]); + + /* + * Store the current offset if known to be constant. That allows LLVM + * to generate better code. Without that LLVM can't figure out that + * the offset might be constant due to the jumps for previously + * decoded columns. + */ + if (attguaranteedalign) + { + Assert(known_alignment >= 0); + LLVMBuildStore(b, l_sizet_const(known_alignment), v_offp); + } + + /* compute what following columns are aligned to */ + if (att->attlen < 0) + { + /* can't guarantee any alignment after variable length field */ + known_alignment = -1; + attguaranteedalign = false; + } + else if (att->attnotnull && attguaranteedalign && known_alignment >= 0) + { + /* + * If the offset to the column was previously known a NOT NULL & + * fixed width column guarantees that alignment is just the + * previous alignment plus column width. + */ + Assert(att->attlen > 0); + known_alignment += att->attlen; + } + else if (att->attnotnull && (att->attlen % alignto) == 0) + { + /* + * After a NOT NULL fixed-width column with a length that is a + * multiple of its alignment requirement, we know the following + * column is aligned to at least the current column's alignment. + */ + Assert(att->attlen > 0); + known_alignment = alignto; + Assert(known_alignment > 0); + attguaranteedalign = false; + } + else + { + known_alignment = -1; + attguaranteedalign = false; + } + + + /* compute address to load data from */ + { + LLVMValueRef v_off = LLVMBuildLoad(b, v_offp, ""); + + v_attdatap = + LLVMBuildGEP(b, v_tupdata_base, &v_off, 1, ""); + } + + /* compute address to store value at */ + v_resultp = LLVMBuildGEP(b, v_tts_values, &l_attno, 1, ""); + + /* store null-byte (false) */ + LLVMBuildStore(b, l_int8_const(0), + LLVMBuildGEP(b, v_tts_nulls, &l_attno, 1, "")); + + /* + * Store datum. For byval datums copy the value, extend to Datum's + * width, and store. For byref types, store pointer to data. + */ + if (att->attbyval) + { + LLVMValueRef v_tmp_loaddata; + LLVMTypeRef vartypep = + LLVMPointerType(LLVMIntType(att->attlen * 8), 0); + + v_tmp_loaddata = + LLVMBuildPointerCast(b, v_attdatap, vartypep, ""); + v_tmp_loaddata = LLVMBuildLoad(b, v_tmp_loaddata, "attr_byval"); + v_tmp_loaddata = LLVMBuildZExt(b, v_tmp_loaddata, TypeSizeT, ""); + + LLVMBuildStore(b, v_tmp_loaddata, v_resultp); + } + else + { + LLVMValueRef v_tmp_loaddata; + + /* store pointer */ + v_tmp_loaddata = + LLVMBuildPtrToInt(b, + v_attdatap, + TypeSizeT, + "attr_ptr"); + LLVMBuildStore(b, v_tmp_loaddata, v_resultp); + } + + /* increment data pointer */ + if (att->attlen > 0) + { + v_incby = l_sizet_const(att->attlen); + } + else if (att->attlen == -1) + { + v_incby = LLVMBuildCall(b, + llvm_get_decl(mod, FuncVarsizeAny), + &v_attdatap, 1, + "varsize_any"); + l_callsite_ro(v_incby); + l_callsite_alwaysinline(v_incby); + } + else if (att->attlen == -2) + { + v_incby = LLVMBuildCall(b, + llvm_get_decl(mod, FuncStrlen), + &v_attdatap, 1, "strlen"); + + l_callsite_ro(v_incby); + + /* add 1 for NUL byte */ + v_incby = LLVMBuildAdd(b, v_incby, l_sizet_const(1), ""); + } + else + { + Assert(false); + v_incby = NULL; /* silence compiler */ + } + + if (attguaranteedalign) + { + Assert(known_alignment >= 0); + LLVMBuildStore(b, l_sizet_const(known_alignment), v_offp); + } + else + { + LLVMValueRef v_off = LLVMBuildLoad(b, v_offp, ""); + + v_off = LLVMBuildAdd(b, v_off, v_incby, "increment_offset"); + LLVMBuildStore(b, v_off, v_offp); + } + + /* + * jump to next block, unless last possible column, or all desired + * (available) attributes have been fetched. + */ + if (attnum + 1 == natts) + { + /* jump out */ + LLVMBuildBr(b, b_out); + } + else + { + LLVMBuildBr(b, attcheckattnoblocks[attnum + 1]); + } + } + + + /* build block that returns */ + LLVMPositionBuilderAtEnd(b, b_out); + + { + LLVMValueRef v_off = LLVMBuildLoad(b, v_offp, ""); + LLVMValueRef v_flags; + + LLVMBuildStore(b, l_int16_const(natts), v_nvalidp); + v_off = LLVMBuildTrunc(b, v_off, LLVMInt32Type(), ""); + LLVMBuildStore(b, v_off, v_slotoffp); + v_flags = LLVMBuildLoad(b, v_flagsp, "tts_flags"); + v_flags = LLVMBuildOr(b, v_flags, l_int16_const(TTS_FLAG_SLOW), ""); + LLVMBuildStore(b, v_flags, v_flagsp); + LLVMBuildRetVoid(b); + } + + LLVMDisposeBuilder(b); + + return v_deform_fn; +} diff --git a/src/backend/jit/llvm/llvmjit_error.cpp b/src/backend/jit/llvm/llvmjit_error.cpp new file mode 100644 index 0000000000..a2bdfe3fb8 --- /dev/null +++ b/src/backend/jit/llvm/llvmjit_error.cpp @@ -0,0 +1,141 @@ +/*------------------------------------------------------------------------- + * + * llvmjit_error.cpp + * LLVM error related handling that requires interfacing with C++ + * + * Unfortunately neither (re)setting the C++ new handler, nor the LLVM OOM + * handler are exposed to C. Therefore this file wraps the necessary code. + * + * Copyright (c) 2016-2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/backend/jit/llvm/llvmjit_error.c + * + *------------------------------------------------------------------------- + */ + +extern "C" +{ +#include "postgres.h" +} + +#include + +#include "jit/llvmjit.h" + + +static int fatal_new_handler_depth = 0; +static std::new_handler old_new_handler = NULL; + +static void fatal_system_new_handler(void); +#if LLVM_VERSION_MAJOR > 4 +static void fatal_llvm_new_handler(void *user_data, const std::string& reason, bool gen_crash_diag); +#endif +static void fatal_llvm_error_handler(void *user_data, const std::string& reason, bool gen_crash_diag); + + +/* + * Enter a section in which C++ and LLVM errors are treated as FATAL errors. + * + * This is necessary for LLVM as LLVM's error handling for such cases + * (exit()ing, throwing std::bad_alloc() if compiled with exceptions, abort()) + * isn't compatible with postgres error handling. Thus in sections where LLVM + * code, not LLVM generated functions!, is executing, standard new, LLVM OOM + * and LLVM fatal errors (some OOM errors masquerade as those) are redirected + * to our own error handlers. + * + * These error handlers use FATAL, because there's no reliable way from within + * LLVM to throw an error that's guaranteed not to corrupt LLVM's state. + * + * To avoid disturbing extensions using C++ and/or LLVM, these handlers are + * unset when not executing LLVM code. There is no need to call + * llvm_leave_fatal_on_oom() when ERRORing out, error recovery resets the + * handlers in that case. + */ +void +llvm_enter_fatal_on_oom(void) +{ + if (fatal_new_handler_depth == 0) + { + old_new_handler = std::set_new_handler(fatal_system_new_handler); +#if LLVM_VERSION_MAJOR > 4 + llvm::install_bad_alloc_error_handler(fatal_llvm_new_handler); +#endif + llvm::install_fatal_error_handler(fatal_llvm_error_handler); + } + fatal_new_handler_depth++; +} + +/* + * Leave fatal error section started with llvm_enter_fatal_on_oom(). + */ +void +llvm_leave_fatal_on_oom(void) +{ + fatal_new_handler_depth--; + if (fatal_new_handler_depth == 0) + { + std::set_new_handler(old_new_handler); +#if LLVM_VERSION_MAJOR > 4 + llvm::remove_bad_alloc_error_handler(); +#endif + llvm::remove_fatal_error_handler(); + } +} + +/* + * Reset fatal error handling. This should only be called in error recovery + * loops like PostgresMain()'s. + */ +void +llvm_reset_after_error(void) +{ + if (fatal_new_handler_depth != 0) + { + std::set_new_handler(old_new_handler); +#if LLVM_VERSION_MAJOR > 4 + llvm::remove_bad_alloc_error_handler(); +#endif + llvm::remove_fatal_error_handler(); + } + fatal_new_handler_depth = 0; +} + +void +llvm_assert_in_fatal_section(void) +{ + Assert(fatal_new_handler_depth > 0); +} + +static void +fatal_system_new_handler(void) +{ + ereport(FATAL, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"), + errdetail("while in LLVM"))); +} + +#if LLVM_VERSION_MAJOR > 4 +static void +fatal_llvm_new_handler(void *user_data, + const std::string& reason, + bool gen_crash_diag) +{ + ereport(FATAL, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"), + errdetail("While in LLVM: %s", reason.c_str()))); +} +#endif + +static void +fatal_llvm_error_handler(void *user_data, + const std::string& reason, + bool gen_crash_diag) +{ + ereport(FATAL, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("fatal llvm error: %s", + reason.c_str()))); +} diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c new file mode 100644 index 0000000000..0dbc1e4106 --- /dev/null +++ b/src/backend/jit/llvm/llvmjit_expr.c @@ -0,0 +1,2687 @@ +/*------------------------------------------------------------------------- + * + * llvmjit_expr.c + * JIT compile expressions. + * + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/backend/jit/llvm/llvmjit_expr.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include +#include + +#include "access/htup_details.h" +#include "access/nbtree.h" +#include "access/tupconvert.h" +#include "catalog/objectaccess.h" +#include "catalog/pg_type.h" +#include "executor/execdebug.h" +#include "executor/nodeAgg.h" +#include "executor/nodeSubplan.h" +#include "executor/execExpr.h" +#include "funcapi.h" +#include "jit/llvmjit.h" +#include "jit/llvmjit_emit.h" +#include "miscadmin.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "optimizer/planner.h" +#include "parser/parse_coerce.h" +#include "parser/parsetree.h" +#include "pgstat.h" +#include "utils/acl.h" +#include "utils/builtins.h" +#include "utils/date.h" +#include "utils/fmgrtab.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/timestamp.h" +#include "utils/typcache.h" +#include "utils/xml.h" + + +typedef struct CompiledExprState +{ + LLVMJitContext *context; + const char *funcname; +} CompiledExprState; + + +static Datum ExecRunCompiledExpr(ExprState *state, ExprContext *econtext, bool *isNull); + +static LLVMValueRef BuildV1Call(LLVMJitContext *context, LLVMBuilderRef b, + LLVMModuleRef mod, FunctionCallInfo fcinfo, + LLVMValueRef *v_fcinfo_isnull); +static void build_EvalXFunc(LLVMBuilderRef b, LLVMModuleRef mod, + const char *funcname, + LLVMValueRef v_state, LLVMValueRef v_econtext, + ExprEvalStep *op); +static LLVMValueRef create_LifetimeEnd(LLVMModuleRef mod); + + +/* + * JIT compile expression. + */ +bool +llvm_compile_expr(ExprState *state) +{ + PlanState *parent = state->parent; + int i; + char *funcname; + + LLVMJitContext *context = NULL; + + LLVMBuilderRef b; + LLVMModuleRef mod; + LLVMTypeRef eval_sig; + LLVMValueRef eval_fn; + LLVMBasicBlockRef entry; + LLVMBasicBlockRef *opblocks; + + /* state itself */ + LLVMValueRef v_state; + LLVMValueRef v_econtext; + + /* returnvalue */ + LLVMValueRef v_isnullp; + + /* tmp vars in state */ + LLVMValueRef v_tmpvaluep; + LLVMValueRef v_tmpisnullp; + + /* slots */ + LLVMValueRef v_innerslot; + LLVMValueRef v_outerslot; + LLVMValueRef v_scanslot; + LLVMValueRef v_resultslot; + + /* nulls/values of slots */ + LLVMValueRef v_innervalues; + LLVMValueRef v_innernulls; + LLVMValueRef v_outervalues; + LLVMValueRef v_outernulls; + LLVMValueRef v_scanvalues; + LLVMValueRef v_scannulls; + LLVMValueRef v_resultvalues; + LLVMValueRef v_resultnulls; + + /* stuff in econtext */ + LLVMValueRef v_aggvalues; + LLVMValueRef v_aggnulls; + + instr_time starttime; + instr_time endtime; + + llvm_enter_fatal_on_oom(); + + /* get or create JIT context */ + if (parent && parent->state->es_jit) + { + context = (LLVMJitContext *) parent->state->es_jit; + } + else + { + context = llvm_create_context(parent->state->es_jit_flags); + + if (parent) + { + parent->state->es_jit = &context->base; + } + + } + + INSTR_TIME_SET_CURRENT(starttime); + + mod = llvm_mutable_module(context); + + b = LLVMCreateBuilder(); + + funcname = llvm_expand_funcname(context, "evalexpr"); + + /* Create the signature and function */ + { + LLVMTypeRef param_types[3]; + + param_types[0] = l_ptr(StructExprState); /* state */ + param_types[1] = l_ptr(StructExprContext); /* econtext */ + param_types[2] = l_ptr(TypeParamBool); /* isnull */ + + eval_sig = LLVMFunctionType(TypeSizeT, + param_types, lengthof(param_types), + false); + } + eval_fn = LLVMAddFunction(mod, funcname, eval_sig); + LLVMSetLinkage(eval_fn, LLVMExternalLinkage); + LLVMSetVisibility(eval_fn, LLVMDefaultVisibility); + llvm_copy_attributes(AttributeTemplate, eval_fn); + + entry = LLVMAppendBasicBlock(eval_fn, "entry"); + + /* build state */ + v_state = LLVMGetParam(eval_fn, 0); + v_econtext = LLVMGetParam(eval_fn, 1); + v_isnullp = LLVMGetParam(eval_fn, 2); + + LLVMPositionBuilderAtEnd(b, entry); + + v_tmpvaluep = LLVMBuildStructGEP(b, v_state, + FIELDNO_EXPRSTATE_RESVALUE, + "v.state.resvalue"); + v_tmpisnullp = LLVMBuildStructGEP(b, v_state, + FIELDNO_EXPRSTATE_RESNULL, + "v.state.resnull"); + + /* build global slots */ + v_scanslot = l_load_struct_gep(b, v_econtext, + FIELDNO_EXPRCONTEXT_SCANTUPLE, + "v_scanslot"); + v_innerslot = l_load_struct_gep(b, v_econtext, + FIELDNO_EXPRCONTEXT_INNERTUPLE, + "v_innerslot"); + v_outerslot = l_load_struct_gep(b, v_econtext, + FIELDNO_EXPRCONTEXT_OUTERTUPLE, + "v_outerslot"); + v_resultslot = l_load_struct_gep(b, v_state, + FIELDNO_EXPRSTATE_RESULTSLOT, + "v_resultslot"); + + /* build global values/isnull pointers */ + v_scanvalues = l_load_struct_gep(b, v_scanslot, + FIELDNO_TUPLETABLESLOT_VALUES, + "v_scanvalues"); + v_scannulls = l_load_struct_gep(b, v_scanslot, + FIELDNO_TUPLETABLESLOT_ISNULL, + "v_scannulls"); + v_innervalues = l_load_struct_gep(b, v_innerslot, + FIELDNO_TUPLETABLESLOT_VALUES, + "v_innervalues"); + v_innernulls = l_load_struct_gep(b, v_innerslot, + FIELDNO_TUPLETABLESLOT_ISNULL, + "v_innernulls"); + v_outervalues = l_load_struct_gep(b, v_outerslot, + FIELDNO_TUPLETABLESLOT_VALUES, + "v_outervalues"); + v_outernulls = l_load_struct_gep(b, v_outerslot, + FIELDNO_TUPLETABLESLOT_ISNULL, + "v_outernulls"); + v_resultvalues = l_load_struct_gep(b, v_resultslot, + FIELDNO_TUPLETABLESLOT_VALUES, + "v_resultvalues"); + v_resultnulls = l_load_struct_gep(b, v_resultslot, + FIELDNO_TUPLETABLESLOT_ISNULL, + "v_resultnulls"); + + /* aggvalues/aggnulls */ + v_aggvalues = l_load_struct_gep(b, v_econtext, + FIELDNO_EXPRCONTEXT_AGGVALUES, + "v.econtext.aggvalues"); + v_aggnulls = l_load_struct_gep(b, v_econtext, + FIELDNO_EXPRCONTEXT_AGGNULLS, + "v.econtext.aggnulls"); + + /* allocate blocks for each op upfront, so we can do jumps easily */ + opblocks = palloc(sizeof(LLVMBasicBlockRef) * state->steps_len); + for (i = 0; i < state->steps_len; i++) + opblocks[i] = l_bb_append_v(eval_fn, "b.op.%d.start", i); + + /* jump from entry to first block */ + LLVMBuildBr(b, opblocks[0]); + + for (i = 0; i < state->steps_len; i++) + { + ExprEvalStep *op; + ExprEvalOp opcode; + LLVMValueRef v_resvaluep; + LLVMValueRef v_resnullp; + + LLVMPositionBuilderAtEnd(b, opblocks[i]); + + op = &state->steps[i]; + opcode = ExecEvalStepOp(state, op); + + v_resvaluep = l_ptr_const(op->resvalue, l_ptr(TypeSizeT)); + v_resnullp = l_ptr_const(op->resnull, l_ptr(TypeStorageBool)); + + switch (opcode) + { + case EEOP_DONE: + { + LLVMValueRef v_tmpisnull, + v_tmpvalue; + + v_tmpvalue = LLVMBuildLoad(b, v_tmpvaluep, ""); + v_tmpisnull = LLVMBuildLoad(b, v_tmpisnullp, ""); + v_tmpisnull = + LLVMBuildTrunc(b, v_tmpisnull, TypeParamBool, ""); + + LLVMBuildStore(b, v_tmpisnull, v_isnullp); + + LLVMBuildRet(b, v_tmpvalue); + break; + } + + case EEOP_INNER_FETCHSOME: + case EEOP_OUTER_FETCHSOME: + case EEOP_SCAN_FETCHSOME: + { + TupleDesc desc = NULL; + LLVMValueRef v_slot; + LLVMBasicBlockRef b_fetch; + LLVMValueRef v_nvalid; + + b_fetch = l_bb_before_v(opblocks[i + 1], + "op.%d.fetch", i); + + if (op->d.fetch.known_desc) + desc = op->d.fetch.known_desc; + + if (opcode == EEOP_INNER_FETCHSOME) + { + PlanState *is = innerPlanState(parent); + + v_slot = v_innerslot; + + if (!desc && + is && + is->ps_ResultTupleSlot && + TTS_FIXED(is->ps_ResultTupleSlot)) + desc = is->ps_ResultTupleSlot->tts_tupleDescriptor; + } + else if (opcode == EEOP_OUTER_FETCHSOME) + { + PlanState *os = outerPlanState(parent); + + v_slot = v_outerslot; + + if (!desc && + os && + os->ps_ResultTupleSlot && + TTS_FIXED(os->ps_ResultTupleSlot)) + desc = os->ps_ResultTupleSlot->tts_tupleDescriptor; + } + else + { + v_slot = v_scanslot; + if (!desc && parent) + desc = parent->scandesc; + } + + /* + * Check if all required attributes are available, or + * whether deforming is required. + */ + v_nvalid = + l_load_struct_gep(b, v_slot, + FIELDNO_TUPLETABLESLOT_NVALID, + ""); + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntUGE, v_nvalid, + l_int16_const(op->d.fetch.last_var), + ""), + opblocks[i + 1], b_fetch); + + LLVMPositionBuilderAtEnd(b, b_fetch); + + /* + * If the tupledesc of the to-be-deformed tuple is known, + * and JITing of deforming is enabled, build deform + * function specific to tupledesc and the exact number of + * to-be-extracted attributes. + */ + if (desc && (context->base.flags & PGJIT_DEFORM)) + { + LLVMValueRef params[1]; + LLVMValueRef l_jit_deform; + + l_jit_deform = + slot_compile_deform(context, desc, + op->d.fetch.last_var); + params[0] = v_slot; + + LLVMBuildCall(b, l_jit_deform, + params, lengthof(params), ""); + + } + else + { + LLVMValueRef params[2]; + + params[0] = v_slot; + params[1] = l_int32_const(op->d.fetch.last_var); + + LLVMBuildCall(b, + llvm_get_decl(mod, FuncSlotGetsomeattrs), + params, lengthof(params), ""); + } + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_INNER_VAR: + case EEOP_OUTER_VAR: + case EEOP_SCAN_VAR: + { + LLVMValueRef value, + isnull; + LLVMValueRef v_attnum; + LLVMValueRef v_values; + LLVMValueRef v_nulls; + + if (opcode == EEOP_INNER_VAR) + { + v_values = v_innervalues; + v_nulls = v_innernulls; + } + else if (opcode == EEOP_OUTER_VAR) + { + v_values = v_outervalues; + v_nulls = v_outernulls; + } + else + { + v_values = v_scanvalues; + v_nulls = v_scannulls; + } + + v_attnum = l_int32_const(op->d.var.attnum); + value = l_load_gep1(b, v_values, v_attnum, ""); + isnull = l_load_gep1(b, v_nulls, v_attnum, ""); + LLVMBuildStore(b, value, v_resvaluep); + LLVMBuildStore(b, isnull, v_resnullp); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_INNER_SYSVAR: + case EEOP_OUTER_SYSVAR: + case EEOP_SCAN_SYSVAR: + { + LLVMValueRef v_slot; + LLVMValueRef v_params[4]; + + if (opcode == EEOP_INNER_SYSVAR) + v_slot = v_innerslot; + else if (opcode == EEOP_OUTER_SYSVAR) + v_slot = v_outerslot; + else + v_slot = v_scanslot; + + v_params[0] = v_state; + v_params[1] = l_ptr_const(op, l_ptr(StructExprEvalStep)); + v_params[2] = v_econtext; + v_params[3] = v_slot; + + LLVMBuildCall(b, + llvm_get_decl(mod, FuncExecEvalSysVar), + v_params, lengthof(v_params), ""); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_WHOLEROW: + build_EvalXFunc(b, mod, "ExecEvalWholeRowVar", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_ASSIGN_INNER_VAR: + case EEOP_ASSIGN_OUTER_VAR: + case EEOP_ASSIGN_SCAN_VAR: + { + LLVMValueRef v_value, + v_isnull; + LLVMValueRef v_rvaluep, + v_risnullp; + LLVMValueRef v_attnum, + v_resultnum; + LLVMValueRef v_values; + LLVMValueRef v_nulls; + + if (opcode == EEOP_ASSIGN_INNER_VAR) + { + v_values = v_innervalues; + v_nulls = v_innernulls; + } + else if (opcode == EEOP_ASSIGN_OUTER_VAR) + { + v_values = v_outervalues; + v_nulls = v_outernulls; + } + else + { + v_values = v_scanvalues; + v_nulls = v_scannulls; + } + + /* load data */ + v_attnum = l_int32_const(op->d.assign_var.attnum); + v_value = l_load_gep1(b, v_values, v_attnum, ""); + v_isnull = l_load_gep1(b, v_nulls, v_attnum, ""); + + /* compute addresses of targets */ + v_resultnum = l_int32_const(op->d.assign_var.resultnum); + v_rvaluep = LLVMBuildGEP(b, v_resultvalues, + &v_resultnum, 1, ""); + v_risnullp = LLVMBuildGEP(b, v_resultnulls, + &v_resultnum, 1, ""); + + /* and store */ + LLVMBuildStore(b, v_value, v_rvaluep); + LLVMBuildStore(b, v_isnull, v_risnullp); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_ASSIGN_TMP: + { + LLVMValueRef v_value, + v_isnull; + LLVMValueRef v_rvaluep, + v_risnullp; + LLVMValueRef v_resultnum; + size_t resultnum = op->d.assign_tmp.resultnum; + + /* load data */ + v_value = LLVMBuildLoad(b, v_tmpvaluep, ""); + v_isnull = LLVMBuildLoad(b, v_tmpisnullp, ""); + + /* compute addresses of targets */ + v_resultnum = l_int32_const(resultnum); + v_rvaluep = + LLVMBuildGEP(b, v_resultvalues, &v_resultnum, 1, ""); + v_risnullp = + LLVMBuildGEP(b, v_resultnulls, &v_resultnum, 1, ""); + + /* and store */ + LLVMBuildStore(b, v_value, v_rvaluep); + LLVMBuildStore(b, v_isnull, v_risnullp); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_ASSIGN_TMP_MAKE_RO: + { + LLVMBasicBlockRef b_notnull; + LLVMValueRef v_params[1]; + LLVMValueRef v_ret; + LLVMValueRef v_value, + v_isnull; + LLVMValueRef v_rvaluep, + v_risnullp; + LLVMValueRef v_resultnum; + size_t resultnum = op->d.assign_tmp.resultnum; + + b_notnull = l_bb_before_v(opblocks[i + 1], + "op.%d.assign_tmp.notnull", i); + + /* load data */ + v_value = LLVMBuildLoad(b, v_tmpvaluep, ""); + v_isnull = LLVMBuildLoad(b, v_tmpisnullp, ""); + + /* compute addresses of targets */ + v_resultnum = l_int32_const(resultnum); + v_rvaluep = LLVMBuildGEP(b, v_resultvalues, + &v_resultnum, 1, ""); + v_risnullp = LLVMBuildGEP(b, v_resultnulls, + &v_resultnum, 1, ""); + + /* store nullness */ + LLVMBuildStore(b, v_isnull, v_risnullp); + + /* check if value is NULL */ + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_isnull, + l_sbool_const(0), ""), + b_notnull, opblocks[i + 1]); + + /* if value is not null, convert to RO datum */ + LLVMPositionBuilderAtEnd(b, b_notnull); + v_params[0] = v_value; + v_ret = + LLVMBuildCall(b, + llvm_get_decl(mod, FuncMakeExpandedObjectReadOnlyInternal), + v_params, lengthof(v_params), ""); + + /* store value */ + LLVMBuildStore(b, v_ret, v_rvaluep); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_CONST: + { + LLVMValueRef v_constvalue, + v_constnull; + + v_constvalue = l_sizet_const(op->d.constval.value); + v_constnull = l_sbool_const(op->d.constval.isnull); + + LLVMBuildStore(b, v_constvalue, v_resvaluep); + LLVMBuildStore(b, v_constnull, v_resnullp); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_FUNCEXPR_STRICT: + { + FunctionCallInfo fcinfo = op->d.func.fcinfo_data; + LLVMBasicBlockRef b_nonull; + int argno; + LLVMValueRef v_fcinfo; + LLVMValueRef v_argnullp; + LLVMBasicBlockRef *b_checkargnulls; + + /* + * Block for the actual function call, if args are + * non-NULL. + */ + b_nonull = l_bb_before_v(opblocks[i + 1], + "b.%d.no-null-args", i); + + /* should make sure they're optimized beforehand */ + if (op->d.func.nargs == 0) + elog(ERROR, "argumentless strict functions are pointless"); + + v_fcinfo = + l_ptr_const(fcinfo, l_ptr(StructFunctionCallInfoData)); + + v_argnullp = + LLVMBuildStructGEP(b, + v_fcinfo, + FIELDNO_FUNCTIONCALLINFODATA_ARGNULL, + "v_argnullp"); + + /* + * set resnull to true, if the function is actually + * called, it'll be reset + */ + LLVMBuildStore(b, l_sbool_const(1), v_resnullp); + + /* create blocks for checking args, one for each */ + b_checkargnulls = + palloc(sizeof(LLVMBasicBlockRef *) * op->d.func.nargs); + for (argno = 0; argno < op->d.func.nargs; argno++) + b_checkargnulls[argno] = + l_bb_before_v(b_nonull, "b.%d.isnull.%d", i, argno); + + /* jump to check of first argument */ + LLVMBuildBr(b, b_checkargnulls[0]); + + /* check each arg for NULLness */ + for (argno = 0; argno < op->d.func.nargs; argno++) + { + LLVMValueRef v_argisnull; + LLVMBasicBlockRef b_argnotnull; + + LLVMPositionBuilderAtEnd(b, b_checkargnulls[argno]); + + /* compute block to jump to if argument is not null */ + if (argno + 1 == op->d.func.nargs) + b_argnotnull = b_nonull; + else + b_argnotnull = b_checkargnulls[argno + 1]; + + /* and finally load & check NULLness of arg */ + v_argisnull = l_load_struct_gep(b, v_argnullp, + argno, ""); + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, + v_argisnull, + l_sbool_const(1), + ""), + opblocks[i + 1], + b_argnotnull); + } + + LLVMPositionBuilderAtEnd(b, b_nonull); + } + /* FALLTHROUGH */ + + case EEOP_FUNCEXPR: + { + FunctionCallInfo fcinfo = op->d.func.fcinfo_data; + LLVMValueRef v_fcinfo_isnull; + LLVMValueRef v_retval; + + v_retval = BuildV1Call(context, b, mod, fcinfo, + &v_fcinfo_isnull); + LLVMBuildStore(b, v_retval, v_resvaluep); + LLVMBuildStore(b, v_fcinfo_isnull, v_resnullp); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_FUNCEXPR_FUSAGE: + build_EvalXFunc(b, mod, "ExecEvalFuncExprFusage", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + + case EEOP_FUNCEXPR_STRICT_FUSAGE: + build_EvalXFunc(b, mod, "ExecEvalFuncExprStrictFusage", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_BOOL_AND_STEP_FIRST: + { + LLVMValueRef v_boolanynullp; + + v_boolanynullp = l_ptr_const(op->d.boolexpr.anynull, + l_ptr(TypeStorageBool)); + LLVMBuildStore(b, l_sbool_const(0), v_boolanynullp); + + } + /* FALLTHROUGH */ + + /* + * Treat them the same for now, optimizer can remove + * redundancy. Could be worthwhile to optimize during emission + * though. + */ + case EEOP_BOOL_AND_STEP_LAST: + case EEOP_BOOL_AND_STEP: + { + LLVMValueRef v_boolvalue; + LLVMValueRef v_boolnull; + LLVMValueRef v_boolanynullp, + v_boolanynull; + LLVMBasicBlockRef b_boolisnull; + LLVMBasicBlockRef b_boolcheckfalse; + LLVMBasicBlockRef b_boolisfalse; + LLVMBasicBlockRef b_boolcont; + LLVMBasicBlockRef b_boolisanynull; + + b_boolisnull = l_bb_before_v(opblocks[i + 1], + "b.%d.boolisnull", i); + b_boolcheckfalse = l_bb_before_v(opblocks[i + 1], + "b.%d.boolcheckfalse", i); + b_boolisfalse = l_bb_before_v(opblocks[i + 1], + "b.%d.boolisfalse", i); + b_boolisanynull = l_bb_before_v(opblocks[i + 1], + "b.%d.boolisanynull", i); + b_boolcont = l_bb_before_v(opblocks[i + 1], + "b.%d.boolcont", i); + + v_boolanynullp = l_ptr_const(op->d.boolexpr.anynull, + l_ptr(TypeStorageBool)); + + v_boolnull = LLVMBuildLoad(b, v_resnullp, ""); + v_boolvalue = LLVMBuildLoad(b, v_resvaluep, ""); + + /* set resnull to boolnull */ + LLVMBuildStore(b, v_boolnull, v_resnullp); + /* set revalue to boolvalue */ + LLVMBuildStore(b, v_boolvalue, v_resvaluep); + + /* check if current input is NULL */ + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_boolnull, + l_sbool_const(1), ""), + b_boolisnull, + b_boolcheckfalse); + + /* build block that sets anynull */ + LLVMPositionBuilderAtEnd(b, b_boolisnull); + /* set boolanynull to true */ + LLVMBuildStore(b, l_sbool_const(1), v_boolanynullp); + /* and jump to next block */ + LLVMBuildBr(b, b_boolcont); + + /* build block checking for false */ + LLVMPositionBuilderAtEnd(b, b_boolcheckfalse); + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_boolvalue, + l_sizet_const(0), ""), + b_boolisfalse, + b_boolcont); + + /* + * Build block handling FALSE. Value is false, so short + * circuit. + */ + LLVMPositionBuilderAtEnd(b, b_boolisfalse); + /* result is already set to FALSE, need not change it */ + /* and jump to the end of the AND expression */ + LLVMBuildBr(b, opblocks[op->d.boolexpr.jumpdone]); + + /* Build block that continues if bool is TRUE. */ + LLVMPositionBuilderAtEnd(b, b_boolcont); + + v_boolanynull = LLVMBuildLoad(b, v_boolanynullp, ""); + + /* set value to NULL if any previous values were NULL */ + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_boolanynull, + l_sbool_const(0), ""), + opblocks[i + 1], b_boolisanynull); + + LLVMPositionBuilderAtEnd(b, b_boolisanynull); + /* set resnull to true */ + LLVMBuildStore(b, l_sbool_const(1), v_resnullp); + /* reset resvalue */ + LLVMBuildStore(b, l_sizet_const(0), v_resvaluep); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + case EEOP_BOOL_OR_STEP_FIRST: + { + LLVMValueRef v_boolanynullp; + + v_boolanynullp = l_ptr_const(op->d.boolexpr.anynull, + l_ptr(TypeStorageBool)); + LLVMBuildStore(b, l_sbool_const(0), v_boolanynullp); + } + /* FALLTHROUGH */ + + /* + * Treat them the same for now, optimizer can remove + * redundancy. Could be worthwhile to optimize during emission + * though. + */ + case EEOP_BOOL_OR_STEP_LAST: + case EEOP_BOOL_OR_STEP: + { + LLVMValueRef v_boolvalue; + LLVMValueRef v_boolnull; + LLVMValueRef v_boolanynullp, + v_boolanynull; + + LLVMBasicBlockRef b_boolisnull; + LLVMBasicBlockRef b_boolchecktrue; + LLVMBasicBlockRef b_boolistrue; + LLVMBasicBlockRef b_boolcont; + LLVMBasicBlockRef b_boolisanynull; + + b_boolisnull = l_bb_before_v(opblocks[i + 1], + "b.%d.boolisnull", i); + b_boolchecktrue = l_bb_before_v(opblocks[i + 1], + "b.%d.boolchecktrue", i); + b_boolistrue = l_bb_before_v(opblocks[i + 1], + "b.%d.boolistrue", i); + b_boolisanynull = l_bb_before_v(opblocks[i + 1], + "b.%d.boolisanynull", i); + b_boolcont = l_bb_before_v(opblocks[i + 1], + "b.%d.boolcont", i); + + v_boolanynullp = l_ptr_const(op->d.boolexpr.anynull, + l_ptr(TypeStorageBool)); + + v_boolnull = LLVMBuildLoad(b, v_resnullp, ""); + v_boolvalue = LLVMBuildLoad(b, v_resvaluep, ""); + + /* set resnull to boolnull */ + LLVMBuildStore(b, v_boolnull, v_resnullp); + /* set revalue to boolvalue */ + LLVMBuildStore(b, v_boolvalue, v_resvaluep); + + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_boolnull, + l_sbool_const(1), ""), + b_boolisnull, + b_boolchecktrue); + + /* build block that sets anynull */ + LLVMPositionBuilderAtEnd(b, b_boolisnull); + /* set boolanynull to true */ + LLVMBuildStore(b, l_sbool_const(1), v_boolanynullp); + /* and jump to next block */ + LLVMBuildBr(b, b_boolcont); + + /* build block checking for true */ + LLVMPositionBuilderAtEnd(b, b_boolchecktrue); + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_boolvalue, + l_sizet_const(1), ""), + b_boolistrue, + b_boolcont); + + /* + * Build block handling True. Value is true, so short + * circuit. + */ + LLVMPositionBuilderAtEnd(b, b_boolistrue); + /* result is already set to TRUE, need not change it */ + /* and jump to the end of the OR expression */ + LLVMBuildBr(b, opblocks[op->d.boolexpr.jumpdone]); + + /* build block that continues if bool is FALSE */ + LLVMPositionBuilderAtEnd(b, b_boolcont); + + v_boolanynull = LLVMBuildLoad(b, v_boolanynullp, ""); + + /* set value to NULL if any previous values were NULL */ + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_boolanynull, + l_sbool_const(0), ""), + opblocks[i + 1], b_boolisanynull); + + LLVMPositionBuilderAtEnd(b, b_boolisanynull); + /* set resnull to true */ + LLVMBuildStore(b, l_sbool_const(1), v_resnullp); + /* reset resvalue */ + LLVMBuildStore(b, l_sizet_const(0), v_resvaluep); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_BOOL_NOT_STEP: + { + LLVMValueRef v_boolvalue; + LLVMValueRef v_boolnull; + LLVMValueRef v_negbool; + + v_boolnull = LLVMBuildLoad(b, v_resnullp, ""); + v_boolvalue = LLVMBuildLoad(b, v_resvaluep, ""); + + v_negbool = LLVMBuildZExt(b, + LLVMBuildICmp(b, LLVMIntEQ, + v_boolvalue, + l_sizet_const(0), + ""), + TypeSizeT, ""); + /* set resnull to boolnull */ + LLVMBuildStore(b, v_boolnull, v_resnullp); + /* set revalue to !boolvalue */ + LLVMBuildStore(b, v_negbool, v_resvaluep); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_QUAL: + { + LLVMValueRef v_resnull; + LLVMValueRef v_resvalue; + LLVMValueRef v_nullorfalse; + LLVMBasicBlockRef b_qualfail; + + b_qualfail = l_bb_before_v(opblocks[i + 1], + "op.%d.qualfail", i); + + v_resvalue = LLVMBuildLoad(b, v_resvaluep, ""); + v_resnull = LLVMBuildLoad(b, v_resnullp, ""); + + v_nullorfalse = + LLVMBuildOr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_resnull, + l_sbool_const(1), ""), + LLVMBuildICmp(b, LLVMIntEQ, v_resvalue, + l_sizet_const(0), ""), + ""); + + LLVMBuildCondBr(b, + v_nullorfalse, + b_qualfail, + opblocks[i + 1]); + + /* build block handling NULL or false */ + LLVMPositionBuilderAtEnd(b, b_qualfail); + /* set resnull to false */ + LLVMBuildStore(b, l_sbool_const(0), v_resnullp); + /* set resvalue to false */ + LLVMBuildStore(b, l_sizet_const(0), v_resvaluep); + /* and jump out */ + LLVMBuildBr(b, opblocks[op->d.qualexpr.jumpdone]); + break; + } + + case EEOP_JUMP: + { + LLVMBuildBr(b, opblocks[op->d.jump.jumpdone]); + break; + } + + case EEOP_JUMP_IF_NULL: + { + LLVMValueRef v_resnull; + + /* Transfer control if current result is null */ + + v_resnull = LLVMBuildLoad(b, v_resnullp, ""); + + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_resnull, + l_sbool_const(1), ""), + opblocks[op->d.jump.jumpdone], + opblocks[i + 1]); + break; + } + + case EEOP_JUMP_IF_NOT_NULL: + { + LLVMValueRef v_resnull; + + /* Transfer control if current result is non-null */ + + v_resnull = LLVMBuildLoad(b, v_resnullp, ""); + + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_resnull, + l_sbool_const(0), ""), + opblocks[op->d.jump.jumpdone], + opblocks[i + 1]); + break; + } + + + case EEOP_JUMP_IF_NOT_TRUE: + { + LLVMValueRef v_resnull; + LLVMValueRef v_resvalue; + LLVMValueRef v_nullorfalse; + + /* Transfer control if current result is null or false */ + + v_resvalue = LLVMBuildLoad(b, v_resvaluep, ""); + v_resnull = LLVMBuildLoad(b, v_resnullp, ""); + + v_nullorfalse = + LLVMBuildOr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_resnull, + l_sbool_const(1), ""), + LLVMBuildICmp(b, LLVMIntEQ, v_resvalue, + l_sizet_const(0), ""), + ""); + + LLVMBuildCondBr(b, + v_nullorfalse, + opblocks[op->d.jump.jumpdone], + opblocks[i + 1]); + break; + } + + case EEOP_NULLTEST_ISNULL: + { + LLVMValueRef v_resnull = LLVMBuildLoad(b, v_resnullp, ""); + LLVMValueRef v_resvalue; + + v_resvalue = + LLVMBuildSelect(b, + LLVMBuildICmp(b, LLVMIntEQ, v_resnull, + l_sbool_const(1), ""), + l_sizet_const(1), + l_sizet_const(0), + ""); + LLVMBuildStore(b, v_resvalue, v_resvaluep); + LLVMBuildStore(b, l_sbool_const(0), v_resnullp); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_NULLTEST_ISNOTNULL: + { + LLVMValueRef v_resnull = LLVMBuildLoad(b, v_resnullp, ""); + LLVMValueRef v_resvalue; + + v_resvalue = + LLVMBuildSelect(b, + LLVMBuildICmp(b, LLVMIntEQ, v_resnull, + l_sbool_const(1), ""), + l_sizet_const(0), + l_sizet_const(1), + ""); + LLVMBuildStore(b, v_resvalue, v_resvaluep); + LLVMBuildStore(b, l_sbool_const(0), v_resnullp); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_NULLTEST_ROWISNULL: + build_EvalXFunc(b, mod, "ExecEvalRowNull", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_NULLTEST_ROWISNOTNULL: + build_EvalXFunc(b, mod, "ExecEvalRowNotNull", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_BOOLTEST_IS_TRUE: + case EEOP_BOOLTEST_IS_NOT_FALSE: + case EEOP_BOOLTEST_IS_FALSE: + case EEOP_BOOLTEST_IS_NOT_TRUE: + { + LLVMBasicBlockRef b_isnull, + b_notnull; + LLVMValueRef v_resnull = LLVMBuildLoad(b, v_resnullp, ""); + + b_isnull = l_bb_before_v(opblocks[i + 1], + "op.%d.isnull", i); + b_notnull = l_bb_before_v(opblocks[i + 1], + "op.%d.isnotnull", i); + + /* check if value is NULL */ + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_resnull, + l_sbool_const(1), ""), + b_isnull, b_notnull); + + /* if value is NULL, return false */ + LLVMPositionBuilderAtEnd(b, b_isnull); + + /* result is not null */ + LLVMBuildStore(b, l_sbool_const(0), v_resnullp); + + if (opcode == EEOP_BOOLTEST_IS_TRUE || + opcode == EEOP_BOOLTEST_IS_FALSE) + { + LLVMBuildStore(b, l_sizet_const(0), v_resvaluep); + } + else + { + LLVMBuildStore(b, l_sizet_const(1), v_resvaluep); + } + + LLVMBuildBr(b, opblocks[i + 1]); + + LLVMPositionBuilderAtEnd(b, b_notnull); + + if (opcode == EEOP_BOOLTEST_IS_TRUE || + opcode == EEOP_BOOLTEST_IS_NOT_FALSE) + { + /* + * if value is not null NULL, return value (already + * set) + */ + } + else + { + LLVMValueRef v_value = + LLVMBuildLoad(b, v_resvaluep, ""); + + v_value = LLVMBuildZExt(b, + LLVMBuildICmp(b, LLVMIntEQ, + v_value, + l_sizet_const(0), + ""), + TypeSizeT, ""); + LLVMBuildStore(b, v_value, v_resvaluep); + } + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_PARAM_EXEC: + build_EvalXFunc(b, mod, "ExecEvalParamExec", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_PARAM_EXTERN: + build_EvalXFunc(b, mod, "ExecEvalParamExtern", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_PARAM_CALLBACK: + { + LLVMTypeRef param_types[3]; + LLVMValueRef v_params[3]; + LLVMTypeRef v_functype; + LLVMValueRef v_func; + + param_types[0] = l_ptr(StructExprState); + param_types[1] = l_ptr(TypeSizeT); + param_types[2] = l_ptr(StructExprContext); + + v_functype = LLVMFunctionType(LLVMVoidType(), + param_types, + lengthof(param_types), + false); + v_func = l_ptr_const(op->d.cparam.paramfunc, + l_ptr(v_functype)); + + v_params[0] = v_state; + v_params[1] = l_ptr_const(op, l_ptr(TypeSizeT)); + v_params[2] = v_econtext; + LLVMBuildCall(b, + v_func, + v_params, lengthof(v_params), ""); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_ARRAYREF_OLD: + build_EvalXFunc(b, mod, "ExecEvalArrayRefOld", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_ARRAYREF_ASSIGN: + build_EvalXFunc(b, mod, "ExecEvalArrayRefAssign", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_ARRAYREF_FETCH: + build_EvalXFunc(b, mod, "ExecEvalArrayRefFetch", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_CASE_TESTVAL: + { + LLVMBasicBlockRef b_avail, + b_notavail; + LLVMValueRef v_casevaluep, + v_casevalue; + LLVMValueRef v_casenullp, + v_casenull; + LLVMValueRef v_casevaluenull; + + b_avail = l_bb_before_v(opblocks[i + 1], + "op.%d.avail", i); + b_notavail = l_bb_before_v(opblocks[i + 1], + "op.%d.notavail", i); + + v_casevaluep = l_ptr_const(op->d.casetest.value, + l_ptr(TypeSizeT)); + v_casenullp = l_ptr_const(op->d.casetest.isnull, + l_ptr(TypeStorageBool)); + + v_casevaluenull = + LLVMBuildICmp(b, LLVMIntEQ, + LLVMBuildPtrToInt(b, v_casevaluep, + TypeSizeT, ""), + l_sizet_const(0), ""); + LLVMBuildCondBr(b, v_casevaluenull, b_notavail, b_avail); + + /* if casetest != NULL */ + LLVMPositionBuilderAtEnd(b, b_avail); + v_casevalue = LLVMBuildLoad(b, v_casevaluep, ""); + v_casenull = LLVMBuildLoad(b, v_casenullp, ""); + LLVMBuildStore(b, v_casevalue, v_resvaluep); + LLVMBuildStore(b, v_casenull, v_resnullp); + LLVMBuildBr(b, opblocks[i + 1]); + + /* if casetest == NULL */ + LLVMPositionBuilderAtEnd(b, b_notavail); + v_casevalue = + l_load_struct_gep(b, v_econtext, + FIELDNO_EXPRCONTEXT_CASEDATUM, ""); + v_casenull = + l_load_struct_gep(b, v_econtext, + FIELDNO_EXPRCONTEXT_CASENULL, ""); + LLVMBuildStore(b, v_casevalue, v_resvaluep); + LLVMBuildStore(b, v_casenull, v_resnullp); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_MAKE_READONLY: + { + LLVMBasicBlockRef b_notnull; + LLVMValueRef v_params[1]; + LLVMValueRef v_ret; + LLVMValueRef v_nullp; + LLVMValueRef v_valuep; + LLVMValueRef v_null; + LLVMValueRef v_value; + + b_notnull = l_bb_before_v(opblocks[i + 1], + "op.%d.readonly.notnull", i); + + v_nullp = l_ptr_const(op->d.make_readonly.isnull, + l_ptr(TypeStorageBool)); + + v_null = LLVMBuildLoad(b, v_nullp, ""); + + /* store null isnull value in result */ + LLVMBuildStore(b, v_null, v_resnullp); + + /* check if value is NULL */ + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_null, + l_sbool_const(1), ""), + opblocks[i + 1], b_notnull); + + /* if value is not null, convert to RO datum */ + LLVMPositionBuilderAtEnd(b, b_notnull); + + v_valuep = l_ptr_const(op->d.make_readonly.value, + l_ptr(TypeSizeT)); + + v_value = LLVMBuildLoad(b, v_valuep, ""); + + v_params[0] = v_value; + v_ret = + LLVMBuildCall(b, + llvm_get_decl(mod, FuncMakeExpandedObjectReadOnlyInternal), + v_params, lengthof(v_params), ""); + LLVMBuildStore(b, v_ret, v_resvaluep); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_IOCOERCE: + { + FunctionCallInfo fcinfo_out, + fcinfo_in; + LLVMValueRef v_fcinfo_out, + v_fcinfo_in; + LLVMValueRef v_fn_addr_out, + v_fn_addr_in; + LLVMValueRef v_fcinfo_in_isnullp; + LLVMValueRef v_in_argp, + v_out_argp; + LLVMValueRef v_in_argnullp, + v_out_argnullp; + LLVMValueRef v_retval; + LLVMValueRef v_resvalue; + LLVMValueRef v_resnull; + + LLVMValueRef v_output_skip; + LLVMValueRef v_output; + + LLVMBasicBlockRef b_skipoutput; + LLVMBasicBlockRef b_calloutput; + LLVMBasicBlockRef b_input; + LLVMBasicBlockRef b_inputcall; + + fcinfo_out = op->d.iocoerce.fcinfo_data_out; + fcinfo_in = op->d.iocoerce.fcinfo_data_in; + + b_skipoutput = l_bb_before_v(opblocks[i + 1], + "op.%d.skipoutputnull", i); + b_calloutput = l_bb_before_v(opblocks[i + 1], + "op.%d.calloutput", i); + b_input = l_bb_before_v(opblocks[i + 1], + "op.%d.input", i); + b_inputcall = l_bb_before_v(opblocks[i + 1], + "op.%d.inputcall", i); + + v_fcinfo_out = l_ptr_const(fcinfo_out, l_ptr(StructFunctionCallInfoData)); + v_fcinfo_in = l_ptr_const(fcinfo_in, l_ptr(StructFunctionCallInfoData)); + v_fn_addr_out = l_ptr_const(fcinfo_out->flinfo->fn_addr, TypePGFunction); + v_fn_addr_in = l_ptr_const(fcinfo_in->flinfo->fn_addr, TypePGFunction); + + v_fcinfo_in_isnullp = + LLVMBuildStructGEP(b, v_fcinfo_in, + FIELDNO_FUNCTIONCALLINFODATA_ISNULL, + "v_fcinfo_in_isnull"); + v_out_argnullp = + LLVMBuildStructGEP(b, v_fcinfo_out, + FIELDNO_FUNCTIONCALLINFODATA_ARGNULL, + "v_fcinfo_out_argnullp"); + v_in_argnullp = + LLVMBuildStructGEP(b, v_fcinfo_in, + FIELDNO_FUNCTIONCALLINFODATA_ARGNULL, + "v_fcinfo_in_argnullp"); + v_out_argp = + LLVMBuildStructGEP(b, v_fcinfo_out, + FIELDNO_FUNCTIONCALLINFODATA_ARG, + "v_fcinfo_out_argp"); + v_in_argp = + LLVMBuildStructGEP(b, v_fcinfo_in, + FIELDNO_FUNCTIONCALLINFODATA_ARG, + "v_fcinfo_in_argp"); + + /* output functions are not called on nulls */ + v_resnull = LLVMBuildLoad(b, v_resnullp, ""); + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_resnull, + l_sbool_const(1), ""), + b_skipoutput, + b_calloutput); + + LLVMPositionBuilderAtEnd(b, b_skipoutput); + v_output_skip = l_sizet_const(0); + LLVMBuildBr(b, b_input); + + LLVMPositionBuilderAtEnd(b, b_calloutput); + v_resvalue = LLVMBuildLoad(b, v_resvaluep, ""); + + /* set arg[0] */ + LLVMBuildStore(b, + v_resvalue, + LLVMBuildStructGEP(b, v_out_argp, 0, "")); + LLVMBuildStore(b, + l_sbool_const(0), + LLVMBuildStructGEP(b, v_out_argnullp, + 0, "")); + /* and call output function (can never return NULL) */ + v_output = LLVMBuildCall(b, v_fn_addr_out, &v_fcinfo_out, + 1, "funccall_coerce_out"); + LLVMBuildBr(b, b_input); + + /* build block handling input function call */ + LLVMPositionBuilderAtEnd(b, b_input); + + /* phi between resnull and output function call branches */ + { + LLVMValueRef incoming_values[2]; + LLVMBasicBlockRef incoming_blocks[2]; + + incoming_values[0] = v_output_skip; + incoming_blocks[0] = b_skipoutput; + + incoming_values[1] = v_output; + incoming_blocks[1] = b_calloutput; + + v_output = LLVMBuildPhi(b, TypeSizeT, "output"); + LLVMAddIncoming(v_output, + incoming_values, incoming_blocks, + lengthof(incoming_blocks)); + } + + /* + * If input function is strict, skip if input string is + * NULL. + */ + if (op->d.iocoerce.finfo_in->fn_strict) + { + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_output, + l_sizet_const(0), ""), + opblocks[i + 1], + b_inputcall); + } + else + { + LLVMBuildBr(b, b_inputcall); + } + + LLVMPositionBuilderAtEnd(b, b_inputcall); + /* set arguments */ + /* arg0: output */ + LLVMBuildStore(b, v_output, + LLVMBuildStructGEP(b, v_in_argp, 0, "")); + LLVMBuildStore(b, v_resnull, + LLVMBuildStructGEP(b, v_in_argnullp, 0, "")); + + /* arg1: ioparam: preset in execExpr.c */ + /* arg2: typmod: preset in execExpr.c */ + + /* reset fcinfo_in->isnull */ + LLVMBuildStore(b, l_sbool_const(0), v_fcinfo_in_isnullp); + /* and call function */ + v_retval = LLVMBuildCall(b, v_fn_addr_in, &v_fcinfo_in, 1, + "funccall_iocoerce_in"); + + LLVMBuildStore(b, v_retval, v_resvaluep); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_DISTINCT: + case EEOP_NOT_DISTINCT: + { + FunctionCallInfo fcinfo = op->d.func.fcinfo_data; + + LLVMValueRef v_fcinfo; + LLVMValueRef v_fcinfo_isnull; + + LLVMValueRef v_argnullp; + LLVMValueRef v_argnull0, + v_argisnull0; + LLVMValueRef v_argnull1, + v_argisnull1; + + LLVMValueRef v_anyargisnull; + LLVMValueRef v_bothargisnull; + + LLVMValueRef v_result; + + LLVMBasicBlockRef b_noargnull; + LLVMBasicBlockRef b_checkbothargnull; + LLVMBasicBlockRef b_bothargnull; + LLVMBasicBlockRef b_anyargnull; + + b_noargnull = l_bb_before_v(opblocks[i + 1], "op.%d.noargnull", i); + b_checkbothargnull = l_bb_before_v(opblocks[i + 1], "op.%d.checkbothargnull", i); + b_bothargnull = l_bb_before_v(opblocks[i + 1], "op.%d.bothargnull", i); + b_anyargnull = l_bb_before_v(opblocks[i + 1], "op.%d.anyargnull", i); + + v_fcinfo = l_ptr_const(fcinfo, l_ptr(StructFunctionCallInfoData)); + + v_argnullp = + LLVMBuildStructGEP(b, + v_fcinfo, + FIELDNO_FUNCTIONCALLINFODATA_ARGNULL, + "v_argnullp"); + + /* load argnull[0|1] for both arguments */ + v_argnull0 = l_load_struct_gep(b, v_argnullp, 0, ""); + v_argisnull0 = LLVMBuildICmp(b, LLVMIntEQ, v_argnull0, + l_sbool_const(1), ""); + + v_argnull1 = l_load_struct_gep(b, v_argnullp, 1, ""); + v_argisnull1 = LLVMBuildICmp(b, LLVMIntEQ, v_argnull1, + l_sbool_const(1), ""); + + v_anyargisnull = LLVMBuildOr(b, v_argisnull0, v_argisnull1, ""); + v_bothargisnull = LLVMBuildAnd(b, v_argisnull0, v_argisnull1, ""); + + /* + * Check function arguments for NULLness: If either is + * NULL, we check if both args are NULL. Otherwise call + * comparator. + */ + LLVMBuildCondBr(b, v_anyargisnull, b_checkbothargnull, + b_noargnull); + + /* + * build block checking if any arg is null + */ + LLVMPositionBuilderAtEnd(b, b_checkbothargnull); + LLVMBuildCondBr(b, v_bothargisnull, b_bothargnull, + b_anyargnull); + + + /* Both NULL? Then is not distinct... */ + LLVMPositionBuilderAtEnd(b, b_bothargnull); + LLVMBuildStore(b, l_sbool_const(0), v_resnullp); + if (opcode == EEOP_NOT_DISTINCT) + LLVMBuildStore(b, l_sizet_const(1), v_resvaluep); + else + LLVMBuildStore(b, l_sizet_const(0), v_resvaluep); + + LLVMBuildBr(b, opblocks[i + 1]); + + /* Only one is NULL? Then is distinct... */ + LLVMPositionBuilderAtEnd(b, b_anyargnull); + LLVMBuildStore(b, l_sbool_const(0), v_resnullp); + if (opcode == EEOP_NOT_DISTINCT) + LLVMBuildStore(b, l_sizet_const(0), v_resvaluep); + else + LLVMBuildStore(b, l_sizet_const(1), v_resvaluep); + LLVMBuildBr(b, opblocks[i + 1]); + + /* neither argument is null: compare */ + LLVMPositionBuilderAtEnd(b, b_noargnull); + + v_result = BuildV1Call(context, b, mod, fcinfo, + &v_fcinfo_isnull); + + if (opcode == EEOP_DISTINCT) + { + /* Must invert result of "=" */ + v_result = + LLVMBuildZExt(b, + LLVMBuildICmp(b, LLVMIntEQ, + v_result, + l_sizet_const(0), ""), + TypeSizeT, ""); + } + + LLVMBuildStore(b, v_fcinfo_isnull, v_resnullp); + LLVMBuildStore(b, v_result, v_resvaluep); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_NULLIF: + { + FunctionCallInfo fcinfo = op->d.func.fcinfo_data; + + LLVMValueRef v_fcinfo; + LLVMValueRef v_fcinfo_isnull; + LLVMValueRef v_argnullp; + LLVMValueRef v_argnull0; + LLVMValueRef v_argnull1; + LLVMValueRef v_anyargisnull; + LLVMValueRef v_argp; + LLVMValueRef v_arg0; + LLVMBasicBlockRef b_hasnull; + LLVMBasicBlockRef b_nonull; + LLVMBasicBlockRef b_argsequal; + LLVMValueRef v_retval; + LLVMValueRef v_argsequal; + + b_hasnull = l_bb_before_v(opblocks[i + 1], + "b.%d.null-args", i); + b_nonull = l_bb_before_v(opblocks[i + 1], + "b.%d.no-null-args", i); + b_argsequal = l_bb_before_v(opblocks[i + 1], + "b.%d.argsequal", i); + + v_fcinfo = l_ptr_const(fcinfo, l_ptr(StructFunctionCallInfoData)); + + v_argnullp = + LLVMBuildStructGEP(b, + v_fcinfo, + FIELDNO_FUNCTIONCALLINFODATA_ARGNULL, + "v_argnullp"); + + v_argp = + LLVMBuildStructGEP(b, + v_fcinfo, + FIELDNO_FUNCTIONCALLINFODATA_ARG, + "v_argp"); + + /* if either argument is NULL they can't be equal */ + v_argnull0 = l_load_struct_gep(b, v_argnullp, 0, ""); + v_argnull1 = l_load_struct_gep(b, v_argnullp, 1, ""); + + v_anyargisnull = + LLVMBuildOr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_argnull0, + l_sbool_const(1), ""), + LLVMBuildICmp(b, LLVMIntEQ, v_argnull1, + l_sbool_const(1), ""), + ""); + + LLVMBuildCondBr(b, v_anyargisnull, b_hasnull, b_nonull); + + /* one (or both) of the arguments are null, return arg[0] */ + LLVMPositionBuilderAtEnd(b, b_hasnull); + v_arg0 = l_load_struct_gep(b, v_argp, 0, ""); + LLVMBuildStore(b, v_argnull0, v_resnullp); + LLVMBuildStore(b, v_arg0, v_resvaluep); + LLVMBuildBr(b, opblocks[i + 1]); + + /* build block to invoke function and check result */ + LLVMPositionBuilderAtEnd(b, b_nonull); + + v_retval = BuildV1Call(context, b, mod, fcinfo, &v_fcinfo_isnull); + + /* + * If result not null, and arguments are equal return null + * (same result as if there'd been NULLs, hence reuse + * b_hasnull). + */ + v_argsequal = LLVMBuildAnd(b, + LLVMBuildICmp(b, LLVMIntEQ, + v_fcinfo_isnull, + l_sbool_const(0), + ""), + LLVMBuildICmp(b, LLVMIntEQ, + v_retval, + l_sizet_const(1), + ""), + ""); + LLVMBuildCondBr(b, v_argsequal, b_argsequal, b_hasnull); + + /* build block setting result to NULL, if args are equal */ + LLVMPositionBuilderAtEnd(b, b_argsequal); + LLVMBuildStore(b, l_sbool_const(1), v_resnullp); + LLVMBuildStore(b, l_sizet_const(0), v_resvaluep); + LLVMBuildStore(b, v_retval, v_resvaluep); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_SQLVALUEFUNCTION: + build_EvalXFunc(b, mod, "ExecEvalSQLValueFunction", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_CURRENTOFEXPR: + build_EvalXFunc(b, mod, "ExecEvalCurrentOfExpr", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_NEXTVALUEEXPR: + build_EvalXFunc(b, mod, "ExecEvalNextValueExpr", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_ARRAYEXPR: + build_EvalXFunc(b, mod, "ExecEvalArrayExpr", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_ARRAYCOERCE: + build_EvalXFunc(b, mod, "ExecEvalArrayCoerce", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_ROW: + build_EvalXFunc(b, mod, "ExecEvalRow", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_ROWCOMPARE_STEP: + { + FunctionCallInfo fcinfo = op->d.rowcompare_step.fcinfo_data; + LLVMValueRef v_fcinfo_isnull; + LLVMBasicBlockRef b_null; + LLVMBasicBlockRef b_compare; + LLVMBasicBlockRef b_compare_result; + + LLVMValueRef v_retval; + + b_null = l_bb_before_v(opblocks[i + 1], + "op.%d.row-null", i); + b_compare = l_bb_before_v(opblocks[i + 1], + "op.%d.row-compare", i); + b_compare_result = + l_bb_before_v(opblocks[i + 1], + "op.%d.row-compare-result", + i); + + /* + * If function is strict, and either arg is null, we're + * done. + */ + if (op->d.rowcompare_step.finfo->fn_strict) + { + LLVMValueRef v_fcinfo; + LLVMValueRef v_argnullp; + LLVMValueRef v_argnull0; + LLVMValueRef v_argnull1; + LLVMValueRef v_anyargisnull; + + v_fcinfo = l_ptr_const(fcinfo, + l_ptr(StructFunctionCallInfoData)); + + v_argnullp = + LLVMBuildStructGEP(b, v_fcinfo, + FIELDNO_FUNCTIONCALLINFODATA_ARGNULL, + "v_argnullp"); + + v_argnull0 = l_load_struct_gep(b, v_argnullp, 0, ""); + v_argnull1 = l_load_struct_gep(b, v_argnullp, 1, ""); + + v_anyargisnull = + LLVMBuildOr(b, + LLVMBuildICmp(b, + LLVMIntEQ, + v_argnull0, + l_sbool_const(1), + ""), + LLVMBuildICmp(b, LLVMIntEQ, + v_argnull1, + l_sbool_const(1), ""), + ""); + + LLVMBuildCondBr(b, v_anyargisnull, b_null, b_compare); + } + else + { + LLVMBuildBr(b, b_compare); + } + + /* build block invoking comparison function */ + LLVMPositionBuilderAtEnd(b, b_compare); + + /* call function */ + v_retval = BuildV1Call(context, b, mod, fcinfo, + &v_fcinfo_isnull); + LLVMBuildStore(b, v_retval, v_resvaluep); + + /* if result of function is NULL, force NULL result */ + LLVMBuildCondBr(b, + LLVMBuildICmp(b, + LLVMIntEQ, + v_fcinfo_isnull, + l_sbool_const(0), + ""), + b_compare_result, + b_null); + + /* build block analyzing the !NULL comparator result */ + LLVMPositionBuilderAtEnd(b, b_compare_result); + + /* if results equal, compare next, otherwise done */ + LLVMBuildCondBr(b, + LLVMBuildICmp(b, + LLVMIntEQ, + v_retval, + l_sizet_const(0), ""), + opblocks[i + 1], + opblocks[op->d.rowcompare_step.jumpdone]); + + /* + * Build block handling NULL input or NULL comparator + * result. + */ + LLVMPositionBuilderAtEnd(b, b_null); + LLVMBuildStore(b, l_sbool_const(1), v_resnullp); + LLVMBuildBr(b, opblocks[op->d.rowcompare_step.jumpnull]); + + break; + } + + case EEOP_ROWCOMPARE_FINAL: + { + RowCompareType rctype = op->d.rowcompare_final.rctype; + + LLVMValueRef v_cmpresult; + LLVMValueRef v_result; + LLVMIntPredicate predicate; + + /* + * Btree comparators return 32 bit results, need to be + * careful about sign (used as a 64 bit value it's + * otherwise wrong). + */ + v_cmpresult = + LLVMBuildTrunc(b, + LLVMBuildLoad(b, v_resvaluep, ""), + LLVMInt32Type(), ""); + + switch (rctype) + { + case ROWCOMPARE_LT: + predicate = LLVMIntSLT; + break; + case ROWCOMPARE_LE: + predicate = LLVMIntSLE; + break; + case ROWCOMPARE_GT: + predicate = LLVMIntSGT; + break; + case ROWCOMPARE_GE: + predicate = LLVMIntSGE; + break; + default: + /* EQ and NE cases aren't allowed here */ + Assert(false); + predicate = 0; /* prevent compiler warning */ + break; + } + + v_result = LLVMBuildICmp(b, + predicate, + v_cmpresult, + l_int32_const(0), + ""); + v_result = LLVMBuildZExt(b, v_result, TypeSizeT, ""); + + LLVMBuildStore(b, l_sbool_const(0), v_resnullp); + LLVMBuildStore(b, v_result, v_resvaluep); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_MINMAX: + build_EvalXFunc(b, mod, "ExecEvalMinMax", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_FIELDSELECT: + build_EvalXFunc(b, mod, "ExecEvalFieldSelect", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_FIELDSTORE_DEFORM: + build_EvalXFunc(b, mod, "ExecEvalFieldStoreDeForm", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_FIELDSTORE_FORM: + build_EvalXFunc(b, mod, "ExecEvalFieldStoreForm", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_ARRAYREF_SUBSCRIPT: + { + LLVMValueRef v_fn; + int jumpdone = op->d.arrayref_subscript.jumpdone; + LLVMValueRef v_params[2]; + LLVMValueRef v_ret; + + v_fn = llvm_get_decl(mod, FuncExecEvalArrayRefSubscript); + + v_params[0] = v_state; + v_params[1] = l_ptr_const(op, l_ptr(StructExprEvalStep)); + v_ret = LLVMBuildCall(b, v_fn, + v_params, lengthof(v_params), ""); + v_ret = LLVMBuildZExt(b, v_ret, TypeStorageBool, ""); + + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_ret, + l_sbool_const(1), ""), + opblocks[i + 1], + opblocks[jumpdone]); + break; + } + + case EEOP_DOMAIN_TESTVAL: + { + LLVMBasicBlockRef b_avail, + b_notavail; + LLVMValueRef v_casevaluep, + v_casevalue; + LLVMValueRef v_casenullp, + v_casenull; + LLVMValueRef v_casevaluenull; + + b_avail = l_bb_before_v(opblocks[i + 1], + "op.%d.avail", i); + b_notavail = l_bb_before_v(opblocks[i + 1], + "op.%d.notavail", i); + + v_casevaluep = l_ptr_const(op->d.casetest.value, + l_ptr(TypeSizeT)); + v_casenullp = l_ptr_const(op->d.casetest.isnull, + l_ptr(TypeStorageBool)); + + v_casevaluenull = + LLVMBuildICmp(b, LLVMIntEQ, + LLVMBuildPtrToInt(b, v_casevaluep, + TypeSizeT, ""), + l_sizet_const(0), ""); + LLVMBuildCondBr(b, + v_casevaluenull, + b_notavail, b_avail); + + /* if casetest != NULL */ + LLVMPositionBuilderAtEnd(b, b_avail); + v_casevalue = LLVMBuildLoad(b, v_casevaluep, ""); + v_casenull = LLVMBuildLoad(b, v_casenullp, ""); + LLVMBuildStore(b, v_casevalue, v_resvaluep); + LLVMBuildStore(b, v_casenull, v_resnullp); + LLVMBuildBr(b, opblocks[i + 1]); + + /* if casetest == NULL */ + LLVMPositionBuilderAtEnd(b, b_notavail); + v_casevalue = + l_load_struct_gep(b, v_econtext, + FIELDNO_EXPRCONTEXT_DOMAINDATUM, + ""); + v_casenull = + l_load_struct_gep(b, v_econtext, + FIELDNO_EXPRCONTEXT_DOMAINNULL, + ""); + LLVMBuildStore(b, v_casevalue, v_resvaluep); + LLVMBuildStore(b, v_casenull, v_resnullp); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_DOMAIN_NOTNULL: + build_EvalXFunc(b, mod, "ExecEvalConstraintNotNull", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_DOMAIN_CHECK: + build_EvalXFunc(b, mod, "ExecEvalConstraintCheck", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_CONVERT_ROWTYPE: + build_EvalXFunc(b, mod, "ExecEvalConvertRowtype", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_SCALARARRAYOP: + build_EvalXFunc(b, mod, "ExecEvalScalarArrayOp", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_XMLEXPR: + build_EvalXFunc(b, mod, "ExecEvalXmlExpr", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_AGGREF: + { + AggrefExprState *aggref = op->d.aggref.astate; + LLVMValueRef v_aggnop; + LLVMValueRef v_aggno; + LLVMValueRef value, + isnull; + + /* + * At this point aggref->aggno is not yet set (it's set up + * in ExecInitAgg() after initializing the expression). So + * load it from memory each time round. + */ + v_aggnop = l_ptr_const(&aggref->aggno, + l_ptr(LLVMInt32Type())); + v_aggno = LLVMBuildLoad(b, v_aggnop, "v_aggno"); + + /* load agg value / null */ + value = l_load_gep1(b, v_aggvalues, v_aggno, "aggvalue"); + isnull = l_load_gep1(b, v_aggnulls, v_aggno, "aggnull"); + + /* and store result */ + LLVMBuildStore(b, value, v_resvaluep); + LLVMBuildStore(b, isnull, v_resnullp); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_GROUPING_FUNC: + build_EvalXFunc(b, mod, "ExecEvalGroupingFunc", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_WINDOW_FUNC: + { + WindowFuncExprState *wfunc = op->d.window_func.wfstate; + LLVMValueRef v_wfuncnop; + LLVMValueRef v_wfuncno; + LLVMValueRef value, + isnull; + + /* + * At this point aggref->wfuncno is not yet set (it's set + * up in ExecInitWindowAgg() after initializing the + * expression). So load it from memory each time round. + */ + v_wfuncnop = l_ptr_const(&wfunc->wfuncno, + l_ptr(LLVMInt32Type())); + v_wfuncno = LLVMBuildLoad(b, v_wfuncnop, "v_wfuncno"); + + /* load window func value / null */ + value = l_load_gep1(b, v_aggvalues, v_wfuncno, + "windowvalue"); + isnull = l_load_gep1(b, v_aggnulls, v_wfuncno, + "windownull"); + + LLVMBuildStore(b, value, v_resvaluep); + LLVMBuildStore(b, isnull, v_resnullp); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_SUBPLAN: + build_EvalXFunc(b, mod, "ExecEvalSubPlan", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_ALTERNATIVE_SUBPLAN: + build_EvalXFunc(b, mod, "ExecEvalAlternativeSubPlan", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_AGG_STRICT_DESERIALIZE: + { + FunctionCallInfo fcinfo = op->d.agg_deserialize.fcinfo_data; + LLVMValueRef v_fcinfo; + LLVMValueRef v_argnullp; + LLVMValueRef v_argnull0; + LLVMBasicBlockRef b_deserialize; + + b_deserialize = l_bb_before_v(opblocks[i + 1], + "op.%d.deserialize", i); + + v_fcinfo = l_ptr_const(fcinfo, + l_ptr(StructFunctionCallInfoData)); + + v_argnullp = + LLVMBuildStructGEP(b, + v_fcinfo, + FIELDNO_FUNCTIONCALLINFODATA_ARGNULL, + "v_argnullp"); + v_argnull0 = + l_load_struct_gep(b, v_argnullp, 0, "v_argnull0"); + + LLVMBuildCondBr(b, + LLVMBuildICmp(b, + LLVMIntEQ, + v_argnull0, + l_sbool_const(1), + ""), + opblocks[op->d.agg_deserialize.jumpnull], + b_deserialize); + LLVMPositionBuilderAtEnd(b, b_deserialize); + } + /* FALLTHROUGH */ + + case EEOP_AGG_DESERIALIZE: + { + AggState *aggstate; + FunctionCallInfo fcinfo; + + LLVMValueRef v_retval; + LLVMValueRef v_fcinfo_isnull; + LLVMValueRef v_tmpcontext; + LLVMValueRef v_oldcontext; + + aggstate = op->d.agg_deserialize.aggstate; + fcinfo = op->d.agg_deserialize.fcinfo_data; + + v_tmpcontext = + l_ptr_const(aggstate->tmpcontext->ecxt_per_tuple_memory, + l_ptr(StructMemoryContextData)); + v_oldcontext = l_mcxt_switch(mod, b, v_tmpcontext); + v_retval = BuildV1Call(context, b, mod, fcinfo, + &v_fcinfo_isnull); + l_mcxt_switch(mod, b, v_oldcontext); + + LLVMBuildStore(b, v_retval, v_resvaluep); + LLVMBuildStore(b, v_fcinfo_isnull, v_resnullp); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_AGG_STRICT_INPUT_CHECK: + { + int nargs = op->d.agg_strict_input_check.nargs; + bool *nulls = op->d.agg_strict_input_check.nulls; + int jumpnull; + int argno; + + LLVMValueRef v_nullp; + LLVMBasicBlockRef *b_checknulls; + + Assert(nargs > 0); + + jumpnull = op->d.agg_strict_input_check.jumpnull; + v_nullp = l_ptr_const(nulls, l_ptr(TypeStorageBool)); + + /* create blocks for checking args */ + b_checknulls = palloc(sizeof(LLVMBasicBlockRef *) * nargs); + for (argno = 0; argno < nargs; argno++) + { + b_checknulls[argno] = + l_bb_before_v(opblocks[i + 1], + "op.%d.check-null.%d", + i, argno); + } + + LLVMBuildBr(b, b_checknulls[0]); + + /* strict function, check for NULL args */ + for (argno = 0; argno < nargs; argno++) + { + LLVMValueRef v_argno = l_int32_const(argno); + LLVMValueRef v_argisnull; + LLVMBasicBlockRef b_argnotnull; + + LLVMPositionBuilderAtEnd(b, b_checknulls[argno]); + + if (argno + 1 == nargs) + b_argnotnull = opblocks[i + 1]; + else + b_argnotnull = b_checknulls[argno + 1]; + + v_argisnull = l_load_gep1(b, v_nullp, v_argno, ""); + + LLVMBuildCondBr(b, + LLVMBuildICmp(b, + LLVMIntEQ, + v_argisnull, + l_sbool_const(1), ""), + opblocks[jumpnull], + b_argnotnull); + } + + break; + } + + case EEOP_AGG_INIT_TRANS: + { + AggState *aggstate; + AggStatePerTrans pertrans; + + LLVMValueRef v_aggstatep; + LLVMValueRef v_pertransp; + + LLVMValueRef v_allpergroupsp; + + LLVMValueRef v_pergroupp; + + LLVMValueRef v_setoff, + v_transno; + + LLVMValueRef v_notransvalue; + + LLVMBasicBlockRef b_init; + + aggstate = op->d.agg_init_trans.aggstate; + pertrans = op->d.agg_init_trans.pertrans; + + v_aggstatep = l_ptr_const(aggstate, + l_ptr(StructAggState)); + v_pertransp = l_ptr_const(pertrans, + l_ptr(StructAggStatePerTransData)); + + /* + * pergroup = &aggstate->all_pergroups + * [op->d.agg_init_trans_check.setoff] + * [op->d.agg_init_trans_check.transno]; + */ + v_allpergroupsp = + l_load_struct_gep(b, v_aggstatep, + FIELDNO_AGGSTATE_ALL_PERGROUPS, + "aggstate.all_pergroups"); + v_setoff = l_int32_const(op->d.agg_init_trans.setoff); + v_transno = l_int32_const(op->d.agg_init_trans.transno); + v_pergroupp = + LLVMBuildGEP(b, + l_load_gep1(b, v_allpergroupsp, v_setoff, ""), + &v_transno, 1, ""); + + v_notransvalue = + l_load_struct_gep(b, v_pergroupp, + FIELDNO_AGGSTATEPERGROUPDATA_NOTRANSVALUE, + "notransvalue"); + + b_init = l_bb_before_v(opblocks[i + 1], + "op.%d.inittrans", i); + + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_notransvalue, + l_sbool_const(1), ""), + b_init, + opblocks[i + 1]); + + LLVMPositionBuilderAtEnd(b, b_init); + + { + LLVMValueRef params[3]; + LLVMValueRef v_curaggcontext; + LLVMValueRef v_current_set; + LLVMValueRef v_aggcontext; + + v_aggcontext = l_ptr_const(op->d.agg_init_trans.aggcontext, + l_ptr(StructExprContext)); + + v_current_set = + LLVMBuildStructGEP(b, + v_aggstatep, + FIELDNO_AGGSTATE_CURRENT_SET, + "aggstate.current_set"); + v_curaggcontext = + LLVMBuildStructGEP(b, + v_aggstatep, + FIELDNO_AGGSTATE_CURAGGCONTEXT, + "aggstate.curaggcontext"); + + LLVMBuildStore(b, l_int32_const(op->d.agg_init_trans.setno), + v_current_set); + LLVMBuildStore(b, v_aggcontext, + v_curaggcontext); + + params[0] = v_aggstatep; + params[1] = v_pertransp; + params[2] = v_pergroupp; + + LLVMBuildCall(b, + llvm_get_decl(mod, FuncExecAggInitGroup), + params, lengthof(params), + ""); + } + LLVMBuildBr(b, opblocks[op->d.agg_init_trans.jumpnull]); + + break; + } + + case EEOP_AGG_STRICT_TRANS_CHECK: + { + AggState *aggstate; + LLVMValueRef v_setoff, + v_transno; + + LLVMValueRef v_aggstatep; + LLVMValueRef v_allpergroupsp; + + LLVMValueRef v_transnull; + LLVMValueRef v_pergroupp; + + int jumpnull = op->d.agg_strict_trans_check.jumpnull; + + aggstate = op->d.agg_strict_trans_check.aggstate; + v_aggstatep = l_ptr_const(aggstate, l_ptr(StructAggState)); + + /* + * pergroup = &aggstate->all_pergroups + * [op->d.agg_strict_trans_check.setoff] + * [op->d.agg_init_trans_check.transno]; + */ + v_allpergroupsp = + l_load_struct_gep(b, v_aggstatep, + FIELDNO_AGGSTATE_ALL_PERGROUPS, + "aggstate.all_pergroups"); + v_setoff = + l_int32_const(op->d.agg_strict_trans_check.setoff); + v_transno = + l_int32_const(op->d.agg_strict_trans_check.transno); + v_pergroupp = + LLVMBuildGEP(b, + l_load_gep1(b, v_allpergroupsp, v_setoff, ""), + &v_transno, 1, ""); + + v_transnull = + l_load_struct_gep(b, v_pergroupp, + FIELDNO_AGGSTATEPERGROUPDATA_TRANSVALUEISNULL, + "transnull"); + + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, v_transnull, + l_sbool_const(1), ""), + opblocks[jumpnull], + opblocks[i + 1]); + + break; + } + + case EEOP_AGG_PLAIN_TRANS_BYVAL: + case EEOP_AGG_PLAIN_TRANS: + { + AggState *aggstate; + AggStatePerTrans pertrans; + FunctionCallInfo fcinfo; + + LLVMValueRef v_aggstatep; + LLVMValueRef v_fcinfo; + LLVMValueRef v_fcinfo_isnull; + LLVMValueRef v_argp, + v_argnullp; + + LLVMValueRef v_transvaluep; + LLVMValueRef v_transnullp; + + LLVMValueRef v_setoff; + LLVMValueRef v_transno; + + LLVMValueRef v_aggcontext; + + LLVMValueRef v_allpergroupsp; + LLVMValueRef v_current_setp; + LLVMValueRef v_current_pertransp; + LLVMValueRef v_curaggcontext; + + LLVMValueRef v_pertransp; + + LLVMValueRef v_pergroupp; + + LLVMValueRef v_retval; + + LLVMValueRef v_tmpcontext; + LLVMValueRef v_oldcontext; + + aggstate = op->d.agg_trans.aggstate; + pertrans = op->d.agg_trans.pertrans; + + fcinfo = &pertrans->transfn_fcinfo; + + v_aggstatep = l_ptr_const(aggstate, + l_ptr(StructAggState)); + v_pertransp = l_ptr_const(pertrans, + l_ptr(StructAggStatePerTransData)); + + /* + * pergroup = &aggstate->all_pergroups + * [op->d.agg_strict_trans_check.setoff] + * [op->d.agg_init_trans_check.transno]; + */ + v_allpergroupsp = + l_load_struct_gep(b, v_aggstatep, + FIELDNO_AGGSTATE_ALL_PERGROUPS, + "aggstate.all_pergroups"); + v_setoff = l_int32_const(op->d.agg_trans.setoff); + v_transno = l_int32_const(op->d.agg_trans.transno); + v_pergroupp = + LLVMBuildGEP(b, + l_load_gep1(b, v_allpergroupsp, v_setoff, ""), + &v_transno, 1, ""); + + v_fcinfo = l_ptr_const(fcinfo, + l_ptr(StructFunctionCallInfoData)); + + v_argnullp = + LLVMBuildStructGEP(b, + v_fcinfo, + FIELDNO_FUNCTIONCALLINFODATA_ARGNULL, + "v_argnullp"); + v_argp = + LLVMBuildStructGEP(b, + v_fcinfo, + FIELDNO_FUNCTIONCALLINFODATA_ARG, + "v_argp"); + + v_aggcontext = l_ptr_const(op->d.agg_trans.aggcontext, + l_ptr(StructExprContext)); + + v_current_setp = + LLVMBuildStructGEP(b, + v_aggstatep, + FIELDNO_AGGSTATE_CURRENT_SET, + "aggstate.current_set"); + v_curaggcontext = + LLVMBuildStructGEP(b, + v_aggstatep, + FIELDNO_AGGSTATE_CURAGGCONTEXT, + "aggstate.curaggcontext"); + v_current_pertransp = + LLVMBuildStructGEP(b, + v_aggstatep, + FIELDNO_AGGSTATE_CURPERTRANS, + "aggstate.curpertrans"); + + /* set aggstate globals */ + LLVMBuildStore(b, v_aggcontext, v_curaggcontext); + LLVMBuildStore(b, l_int32_const(op->d.agg_trans.setno), + v_current_setp); + LLVMBuildStore(b, v_pertransp, v_current_pertransp); + + /* invoke transition function in per-tuple context */ + v_tmpcontext = + l_ptr_const(aggstate->tmpcontext->ecxt_per_tuple_memory, + l_ptr(StructMemoryContextData)); + v_oldcontext = l_mcxt_switch(mod, b, v_tmpcontext); + + /* store transvalue in fcinfo->arg/argnull[0] */ + v_transvaluep = + LLVMBuildStructGEP(b, v_pergroupp, + FIELDNO_AGGSTATEPERGROUPDATA_TRANSVALUE, + "transvalue"); + v_transnullp = + LLVMBuildStructGEP(b, v_pergroupp, + FIELDNO_AGGSTATEPERGROUPDATA_TRANSVALUEISNULL, + "transnullp"); + LLVMBuildStore(b, + LLVMBuildLoad(b, v_transvaluep, + "transvalue"), + LLVMBuildStructGEP(b, v_argp, 0, "")); + LLVMBuildStore(b, + LLVMBuildLoad(b, v_transnullp, "transnull"), + LLVMBuildStructGEP(b, v_argnullp, 0, "")); + + /* and invoke transition function */ + v_retval = BuildV1Call(context, b, mod, fcinfo, + &v_fcinfo_isnull); + + /* + * For pass-by-ref datatype, must copy the new value into + * aggcontext and free the prior transValue. But if + * transfn returned a pointer to its first input, we don't + * need to do anything. Also, if transfn returned a + * pointer to a R/W expanded object that is already a + * child of the aggcontext, assume we can adopt that value + * without copying it. + */ + if (opcode == EEOP_AGG_PLAIN_TRANS) + { + LLVMBasicBlockRef b_call; + LLVMBasicBlockRef b_nocall; + LLVMValueRef v_fn; + LLVMValueRef v_transvalue; + LLVMValueRef v_transnull; + LLVMValueRef v_newval; + LLVMValueRef params[6]; + + b_call = l_bb_before_v(opblocks[i + 1], + "op.%d.transcall", i); + b_nocall = l_bb_before_v(opblocks[i + 1], + "op.%d.transnocall", i); + + v_transvalue = LLVMBuildLoad(b, v_transvaluep, ""); + v_transnull = LLVMBuildLoad(b, v_transnullp, ""); + + /* + * DatumGetPointer(newVal) != + * DatumGetPointer(pergroup->transValue)) + */ + LLVMBuildCondBr(b, + LLVMBuildICmp(b, LLVMIntEQ, + v_transvalue, + v_retval, ""), + b_nocall, b_call); + + /* returned datum not passed datum, reparent */ + LLVMPositionBuilderAtEnd(b, b_call); + + params[0] = v_aggstatep; + params[1] = v_pertransp; + params[2] = v_retval; + params[3] = LLVMBuildTrunc(b, v_fcinfo_isnull, + TypeParamBool, ""); + params[4] = v_transvalue; + params[5] = LLVMBuildTrunc(b, v_transnull, + TypeParamBool, ""); + + v_fn = llvm_get_decl(mod, FuncExecAggTransReparent); + v_newval = + LLVMBuildCall(b, v_fn, + params, lengthof(params), + ""); + + /* store trans value */ + LLVMBuildStore(b, v_newval, v_transvaluep); + LLVMBuildStore(b, v_fcinfo_isnull, v_transnullp); + + l_mcxt_switch(mod, b, v_oldcontext); + LLVMBuildBr(b, opblocks[i + 1]); + + /* returned datum passed datum, no need to reparent */ + LLVMPositionBuilderAtEnd(b, b_nocall); + } + + /* store trans value */ + LLVMBuildStore(b, v_retval, v_transvaluep); + LLVMBuildStore(b, v_fcinfo_isnull, v_transnullp); + + l_mcxt_switch(mod, b, v_oldcontext); + + LLVMBuildBr(b, opblocks[i + 1]); + break; + } + + case EEOP_AGG_ORDERED_TRANS_DATUM: + build_EvalXFunc(b, mod, "ExecEvalAggOrderedTransDatum", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_AGG_ORDERED_TRANS_TUPLE: + build_EvalXFunc(b, mod, "ExecEvalAggOrderedTransTuple", + v_state, v_econtext, op); + LLVMBuildBr(b, opblocks[i + 1]); + break; + + case EEOP_LAST: + Assert(false); + break; + } + } + + LLVMDisposeBuilder(b); + + /* + * Don't immediately emit function, instead do so the first time the + * expression is actually evaluated. That allows to emit a lot of + * functions together, avoiding a lot of repeated llvm and memory + * remapping overhead. + */ + { + + CompiledExprState *cstate = palloc0(sizeof(CompiledExprState)); + + cstate->context = context; + cstate->funcname = funcname; + + state->evalfunc = ExecRunCompiledExpr; + state->evalfunc_private = cstate; + } + + llvm_leave_fatal_on_oom(); + + INSTR_TIME_SET_CURRENT(endtime); + INSTR_TIME_ACCUM_DIFF(context->base.instr.generation_counter, + endtime, starttime); + + return true; +} + +/* + * Run compiled expression. + * + * This will only be called the first time a JITed expression is called. We + * first make sure the expression is still up2date, and then get a pointer to + * the emitted function. The latter can be the first thing that triggers + * optimizing and emitting all the generated functions. + */ +static Datum +ExecRunCompiledExpr(ExprState *state, ExprContext *econtext, bool *isNull) +{ + CompiledExprState *cstate = state->evalfunc_private; + ExprStateEvalFunc func; + + CheckExprStillValid(state, econtext); + + llvm_enter_fatal_on_oom(); + func = (ExprStateEvalFunc) llvm_get_function(cstate->context, + cstate->funcname); + llvm_leave_fatal_on_oom(); + Assert(func); + + /* remove indirection via this function for future calls */ + state->evalfunc = func; + + return func(state, econtext, isNull); +} + +static LLVMValueRef +BuildV1Call(LLVMJitContext *context, LLVMBuilderRef b, + LLVMModuleRef mod, FunctionCallInfo fcinfo, + LLVMValueRef *v_fcinfo_isnull) +{ + LLVMValueRef v_fn; + LLVMValueRef v_fcinfo_isnullp; + LLVMValueRef v_retval; + LLVMValueRef v_fcinfo; + + v_fn = llvm_function_reference(context, b, mod, fcinfo); + + v_fcinfo = l_ptr_const(fcinfo, l_ptr(StructFunctionCallInfoData)); + v_fcinfo_isnullp = LLVMBuildStructGEP(b, v_fcinfo, + FIELDNO_FUNCTIONCALLINFODATA_ISNULL, + "v_fcinfo_isnull"); + LLVMBuildStore(b, l_sbool_const(0), v_fcinfo_isnullp); + + v_retval = LLVMBuildCall(b, v_fn, &v_fcinfo, 1, "funccall"); + + if (v_fcinfo_isnull) + *v_fcinfo_isnull = LLVMBuildLoad(b, v_fcinfo_isnullp, ""); + + /* + * Add lifetime-end annotation, signalling that writes to memory don't + * have to be retained (important for inlining potential). + */ + { + LLVMValueRef v_lifetime = create_LifetimeEnd(mod); + LLVMValueRef params[2]; + + params[0] = l_int64_const(sizeof(fcinfo->arg)); + params[1] = l_ptr_const(fcinfo->arg, l_ptr(LLVMInt8Type())); + LLVMBuildCall(b, v_lifetime, params, lengthof(params), ""); + + params[0] = l_int64_const(sizeof(fcinfo->argnull)); + params[1] = l_ptr_const(fcinfo->argnull, l_ptr(LLVMInt8Type())); + LLVMBuildCall(b, v_lifetime, params, lengthof(params), ""); + + params[0] = l_int64_const(sizeof(fcinfo->isnull)); + params[1] = l_ptr_const(&fcinfo->isnull, l_ptr(LLVMInt8Type())); + LLVMBuildCall(b, v_lifetime, params, lengthof(params), ""); + } + + return v_retval; +} + +/* + * Implement an expression step by calling the function funcname. + */ +static void +build_EvalXFunc(LLVMBuilderRef b, LLVMModuleRef mod, const char *funcname, + LLVMValueRef v_state, LLVMValueRef v_econtext, + ExprEvalStep *op) +{ + LLVMTypeRef sig; + LLVMValueRef v_fn; + LLVMTypeRef param_types[3]; + LLVMValueRef params[3]; + + v_fn = LLVMGetNamedFunction(mod, funcname); + if (!v_fn) + { + param_types[0] = l_ptr(StructExprState); + param_types[1] = l_ptr(StructExprEvalStep); + param_types[2] = l_ptr(StructExprContext); + + sig = LLVMFunctionType(LLVMVoidType(), + param_types, lengthof(param_types), + false); + v_fn = LLVMAddFunction(mod, funcname, sig); + } + + params[0] = v_state; + params[1] = l_ptr_const(op, l_ptr(StructExprEvalStep)); + params[2] = v_econtext; + + LLVMBuildCall(b, + v_fn, + params, lengthof(params), ""); +} + +static LLVMValueRef +create_LifetimeEnd(LLVMModuleRef mod) +{ + LLVMTypeRef sig; + LLVMValueRef fn; + LLVMTypeRef param_types[2]; + + /* LLVM 5+ has a variadic pointer argument */ +#if LLVM_VERSION_MAJOR < 5 + const char *nm = "llvm.lifetime.end"; +#else + const char *nm = "llvm.lifetime.end.p0i8"; +#endif + + fn = LLVMGetNamedFunction(mod, nm); + if (fn) + return fn; + + param_types[0] = LLVMInt64Type(); + param_types[1] = l_ptr(LLVMInt8Type()); + + sig = LLVMFunctionType(LLVMVoidType(), + param_types, lengthof(param_types), + false); + fn = LLVMAddFunction(mod, nm, sig); + + LLVMSetFunctionCallConv(fn, LLVMCCallConv); + + Assert(LLVMGetIntrinsicID(fn)); + + return fn; +} diff --git a/src/backend/jit/llvm/llvmjit_inline.cpp b/src/backend/jit/llvm/llvmjit_inline.cpp new file mode 100644 index 0000000000..b33a32141d --- /dev/null +++ b/src/backend/jit/llvm/llvmjit_inline.cpp @@ -0,0 +1,876 @@ +/*------------------------------------------------------------------------- + * + * llvmjit_inline.cpp + * Cross module inlining suitable for postgres' JIT + * + * The inliner iterates over external functions referenced from the passed + * module and attempts to inline those. It does so by utilizing pre-built + * indexes over both postgres core code and extension modules. When a match + * for an external function is found - not guaranteed! - the index will then + * be used to judge their instruction count / inline worthiness. After doing + * so for all external functions, all the referenced functions (and + * prerequisites) will be imorted. + * + * Copyright (c) 2016-2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/backend/lib/llvmjit/llvmjit_inline.c + * + *------------------------------------------------------------------------- + */ + +extern "C" +{ +#include "postgres.h" +} + +#include "jit/llvmjit.h" + +extern "C" +{ +#include +#include +#include +#include +#include + +#include "common/string.h" +#include "miscadmin.h" +#include "storage/fd.h" +} + +#include +#include + +#include +#include +#include +#include +#if LLVM_VERSION_MAJOR > 3 +#include +#else +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * Type used to represent modules InlineWorkListItem's subject is searched for + * in. + */ +typedef llvm::SmallVector InlineSearchPath; + +/* + * Item in queue of to-be-checked symbols and corresponding queue. + */ +typedef struct InlineWorkListItem +{ + llvm::StringRef symbolName; + llvm::SmallVector searchpath; +} InlineWorkListItem; +typedef llvm::SmallVector InlineWorkList; + +/* + * Information about symbols processed during inlining. Used to prevent + * repeated searches and provide additional information. + */ +typedef struct FunctionInlineState +{ + int costLimit; + bool processed; + bool inlined; + bool allowReconsidering; +} FunctionInlineState; +typedef llvm::StringMap FunctionInlineStates; + +/* + * Map of modules that should be inlined, with a list of the to-be inlined + * symbols. + */ +typedef llvm::StringMap > ImportMapTy; + + +const float inline_cost_decay_factor = 0.5; +const int inline_initial_cost = 150; + +/* + * These are managed statics so LLVM knows to deallocate them during an + * LLVMShutdown(), rather than after (which'd cause crashes). + */ +typedef llvm::StringMap > ModuleCache; +llvm::ManagedStatic module_cache; +typedef llvm::StringMap > SummaryCache; +llvm::ManagedStatic summary_cache; + + +static std::unique_ptr llvm_build_inline_plan(llvm::Module *mod); +static void llvm_execute_inline_plan(llvm::Module *mod, + ImportMapTy *globalsToInline); + +static llvm::Module* load_module_cached(llvm::StringRef modPath); +static std::unique_ptr load_module(llvm::StringRef Identifier); +static std::unique_ptr llvm_load_summary(llvm::StringRef path); + + +static llvm::Function* create_redirection_function(std::unique_ptr &importMod, + llvm::Function *F, + llvm::StringRef Name); + +static bool function_inlinable(llvm::Function &F, + int threshold, + FunctionInlineStates &functionState, + InlineWorkList &worklist, + InlineSearchPath &searchpath, + llvm::SmallPtrSet &visitedFunctions, + int &running_instcount, + llvm::StringSet<> &importVars); +static void function_references(llvm::Function &F, + int &running_instcount, + llvm::SmallPtrSet &referencedVars, + llvm::SmallPtrSet &referencedFunctions); + +static void add_module_to_inline_search_path(InlineSearchPath& path, llvm::StringRef modpath); +static llvm::SmallVector +summaries_for_guid(const InlineSearchPath& path, llvm::GlobalValue::GUID guid); + +/* verbose debugging for inliner development */ +/* #define INLINE_DEBUG */ +#ifdef INLINE_DEBUG +#define ilog elog +#else +#define ilog(...) (void) 0 +#endif + +/* + * Perform inlining of external function references in M based on a simple + * cost based analysis. + */ +void +llvm_inline(LLVMModuleRef M) +{ + llvm::Module *mod = llvm::unwrap(M); + + std::unique_ptr globalsToInline = llvm_build_inline_plan(mod); + if (!globalsToInline) + return; + llvm_execute_inline_plan(mod, globalsToInline.get()); +} + +/* + * Build information necessary for inlining external function references in + * mod. + */ +static std::unique_ptr +llvm_build_inline_plan(llvm::Module *mod) +{ + std::unique_ptr globalsToInline = llvm::make_unique(); + FunctionInlineStates functionStates; + InlineWorkList worklist; + + InlineSearchPath defaultSearchPath; + + /* attempt to add module to search path */ + add_module_to_inline_search_path(defaultSearchPath, "$libdir/postgres"); + /* if postgres isn't available, no point continuing */ + if (defaultSearchPath.empty()) + return nullptr; + + /* + * Start inlining with current references to external functions by putting + * them on the inlining worklist. If, during inlining of those, new extern + * functions need to be inlined, they'll also be put there, with a lower + * priority. + */ + for (const llvm::Function &funcDecl : mod->functions()) + { + InlineWorkListItem item = {}; + FunctionInlineState inlineState = {}; + + /* already has a definition */ + if (!funcDecl.isDeclaration()) + continue; + + /* llvm provides implementation */ + if (funcDecl.isIntrinsic()) + continue; + + item.symbolName = funcDecl.getName(); + item.searchpath = defaultSearchPath; + worklist.push_back(item); + inlineState.costLimit = inline_initial_cost; + inlineState.processed = false; + inlineState.inlined = false; + inlineState.allowReconsidering = false; + functionStates[funcDecl.getName()] = inlineState; + } + + /* + * Iterate over pending worklist items, look them up in index, check + * whether they should be inlined. + */ + while (!worklist.empty()) + { + InlineWorkListItem item = worklist.pop_back_val(); + llvm::StringRef symbolName = item.symbolName; + char *cmodname; + char *cfuncname; + FunctionInlineState &inlineState = functionStates[symbolName]; + llvm::GlobalValue::GUID funcGUID; + + llvm_split_symbol_name(symbolName.data(), &cmodname, &cfuncname); + + funcGUID = llvm::GlobalValue::getGUID(cfuncname); + + /* already processed */ + if (inlineState.processed) + continue; + + + if (cmodname) + add_module_to_inline_search_path(item.searchpath, cmodname); + + /* + * Iterate over all known definitions of function, via the index. Then + * look up module(s), check if function actually is defined (there + * could be hash conflicts). + */ + for (const auto &gvs : summaries_for_guid(item.searchpath, funcGUID)) + { + const llvm::FunctionSummary *fs; + llvm::StringRef modPath = gvs->modulePath(); + llvm::Module *defMod; + llvm::Function *funcDef; + + fs = llvm::cast(gvs); + +#if LLVM_VERSION_MAJOR > 3 + if (gvs->notEligibleToImport()) + { + ilog(DEBUG1, "ineligibile to import %s due to summary", + symbolName.data()); + continue; + } +#endif + + if ((int) fs->instCount() > inlineState.costLimit) + { + ilog(DEBUG1, "ineligibile to import %s due to early threshold: %u vs %u", + symbolName.data(), fs->instCount(), inlineState.costLimit); + inlineState.allowReconsidering = true; + continue; + } + + defMod = load_module_cached(modPath); + if (defMod->materializeMetadata()) + elog(FATAL, "failed to materialize metadata"); + + funcDef = defMod->getFunction(cfuncname); + + /* + * This can happen e.g. in case of a hash collision of the + * function's name. + */ + if (!funcDef) + continue; + + if (funcDef->materialize()) + elog(FATAL, "failed to materialize metadata"); + + Assert(!funcDef->isDeclaration()); + Assert(funcDef->hasExternalLinkage()); + + llvm::StringSet<> importVars; + llvm::SmallPtrSet visitedFunctions; + int running_instcount = 0; + + /* + * Check whether function, and objects it depends on, are + * inlinable. + */ + if (function_inlinable(*funcDef, + inlineState.costLimit, + functionStates, + worklist, + item.searchpath, + visitedFunctions, + running_instcount, + importVars)) + { + /* + * Check whether function and all its dependencies are too + * big. Dependencies already counted for other functions that + * will get inlined are not counted again. While this make + * things somewhat order dependant, I can't quite see a point + * in a different behaviour. + */ + if (running_instcount > inlineState.costLimit) + { + ilog(DEBUG1, "skipping inlining of %s due to late threshold %d vs %d", + symbolName.data(), running_instcount, inlineState.costLimit); + inlineState.allowReconsidering = true; + continue; + } + + ilog(DEBUG1, "inline top function %s total_instcount: %d, partial: %d", + symbolName.data(), running_instcount, fs->instCount()); + + /* import referenced function itself */ + importVars.insert(symbolName); + + { + llvm::StringSet<> &modGlobalsToInline = (*globalsToInline)[modPath]; + for (auto& importVar : importVars) + modGlobalsToInline.insert(importVar.first()); + Assert(modGlobalsToInline.size() > 0); + } + + /* mark function as inlined */ + inlineState.inlined = true; + + /* + * Found definition to inline, don't look for further + * potential definitions. + */ + break; + } + else + { + ilog(DEBUG1, "had to skip inlining %s", + symbolName.data()); + + /* It's possible there's another definition that's inlinable. */ + } + } + + /* + * Signal that we're done with symbol, whether successful (inlined = + * true above) or not. + */ + inlineState.processed = true; + } + + return globalsToInline; +} + +/* + * Perform the actual inlining of external functions (and their dependencies) + * into mod. + */ +static void +llvm_execute_inline_plan(llvm::Module *mod, ImportMapTy *globalsToInline) +{ + llvm::IRMover Mover(*mod); + + for (const auto& toInline : *globalsToInline) + { + const llvm::StringRef& modPath = toInline.first(); + const llvm::StringSet<>& modGlobalsToInline = toInline.second; + llvm::SetVector GlobalsToImport; + + Assert(module_cache->count(modPath)); + std::unique_ptr importMod(std::move((*module_cache)[modPath])); + module_cache->erase(modPath); + + if (modGlobalsToInline.empty()) + continue; + + for (auto &glob: modGlobalsToInline) + { + llvm::StringRef SymbolName = glob.first(); + char *modname; + char *funcname; + + llvm_split_symbol_name(SymbolName.data(), &modname, &funcname); + + llvm::GlobalValue *valueToImport = importMod->getNamedValue(funcname); + + if (!valueToImport) + elog(FATAL, "didn't refind value %s to import", SymbolName.data()); + + /* + * For functions (global vars are only inlined if already static), + * mark imported variables as being clones from other + * functions. That a) avoids symbol conflicts b) allows the + * optimizer to perform inlining. + */ + if (llvm::isa(valueToImport)) + { + llvm::Function *F = llvm::dyn_cast(valueToImport); + typedef llvm::GlobalValue::LinkageTypes LinkageTypes; + + /* + * Per-function info isn't necessarily stripped yet, as the + * module is lazy-loaded when stripped above. + */ + llvm::stripDebugInfo(*F); + + /* + * If the to-be-imported function is one referenced including + * its module name, create a tiny inline function that just + * forwards the call. One might think a GlobalAlias would do + * the trick, but a) IRMover doesn't override a declaration + * with an alias pointing to a definition (instead renaming + * it), b) Aliases can't be AvailableExternally. + */ + if (modname) + { + llvm::Function *AF; + + AF = create_redirection_function(importMod, F, SymbolName); + + GlobalsToImport.insert(AF); + llvm::stripDebugInfo(*AF); + } + + if (valueToImport->hasExternalLinkage()) + { + valueToImport->setLinkage(LinkageTypes::AvailableExternallyLinkage); + } + } + + GlobalsToImport.insert(valueToImport); + ilog(DEBUG1, "performing import of %s %s", + modPath.data(), SymbolName.data()); + + } + +#if LLVM_VERSION_MAJOR > 4 +#define IRMOVE_PARAMS , /*IsPerformingImport=*/false +#elif LLVM_VERSION_MAJOR > 3 +#define IRMOVE_PARAMS , /*LinkModuleInlineAsm=*/false, /*IsPerformingImport=*/false +#else +#define IRMOVE_PARAMS +#endif + if (Mover.move(std::move(importMod), GlobalsToImport.getArrayRef(), + [](llvm::GlobalValue &, llvm::IRMover::ValueAdder) {} + IRMOVE_PARAMS)) + elog(FATAL, "function import failed with linker error"); + } +} + +/* + * Return a module identified by modPath, caching it in memory. + * + * Note that such a module may *not* be modified without copying, otherwise + * the cache state would get corrupted. + */ +static llvm::Module* +load_module_cached(llvm::StringRef modPath) +{ + auto it = module_cache->find(modPath); + if (it == module_cache->end()) + { + it = module_cache->insert( + std::make_pair(modPath, load_module(modPath))).first; + } + + return it->second.get(); +} + +static std::unique_ptr +load_module(llvm::StringRef Identifier) +{ + LLVMMemoryBufferRef buf; + LLVMModuleRef mod; + char path[MAXPGPATH]; + char *msg; + + snprintf(path, MAXPGPATH,"%s/bitcode/%s", pkglib_path, Identifier.data()); + + if (LLVMCreateMemoryBufferWithContentsOfFile(path, &buf, &msg)) + elog(FATAL, "failed to open bitcode file \"%s\": %s", + path, msg); + if (LLVMGetBitcodeModuleInContext2(LLVMGetGlobalContext(), buf, &mod)) + elog(FATAL, "failed to parse bitcode in file \"%s\"", path); + + /* + * Currently there's no use in more detailed debug info for JITed + * code. Until that changes, not much point in wasting memory and cycles + * on processing debuginfo. + */ + llvm::StripDebugInfo(*llvm::unwrap(mod)); + + return std::unique_ptr(llvm::unwrap(mod)); +} + +/* + * Compute list of referenced variables, functions and the instruction count + * for a function. + */ +static void +function_references(llvm::Function &F, + int &running_instcount, + llvm::SmallPtrSet &referencedVars, + llvm::SmallPtrSet &referencedFunctions) +{ + llvm::SmallPtrSet Visited; + + for (llvm::BasicBlock &BB : F) + { + for (llvm::Instruction &I : BB) + { + if (llvm::isa(I)) + continue; + + llvm::SmallVector Worklist; + Worklist.push_back(&I); + + running_instcount++; + + while (!Worklist.empty()) { + llvm::User *U = Worklist.pop_back_val(); + + /* visited before */ + if (!Visited.insert(U).second) + continue; + + for (auto &OI : U->operands()) { + llvm::User *Operand = llvm::dyn_cast(OI); + if (!Operand) + continue; + if (llvm::isa(Operand)) + continue; + if (auto *GV = llvm::dyn_cast(Operand)) { + referencedVars.insert(GV); + if (GV->hasInitializer()) + Worklist.push_back(GV->getInitializer()); + continue; + } + if (auto *CF = llvm::dyn_cast(Operand)) { + referencedFunctions.insert(CF); + continue; + } + Worklist.push_back(Operand); + } + } + } + } +} + +/* + * Check whether function F is inlinable and, if so, what globals need to be + * imported. + * + * References to external functions from, potentially recursively, inlined + * functions are added to the passed in worklist. + */ +static bool +function_inlinable(llvm::Function &F, + int threshold, + FunctionInlineStates &functionStates, + InlineWorkList &worklist, + InlineSearchPath &searchpath, + llvm::SmallPtrSet &visitedFunctions, + int &running_instcount, + llvm::StringSet<> &importVars) +{ + int subThreshold = threshold * inline_cost_decay_factor; + llvm::SmallPtrSet referencedVars; + llvm::SmallPtrSet referencedFunctions; + + /* can't rely on what may be inlined */ + if (F.isInterposable()) + return false; + + /* + * Can't rely on function being present. Alternatively we could create a + * static version of these functions? + */ + if (F.hasAvailableExternallyLinkage()) + return false; + + ilog(DEBUG1, "checking inlinability of %s", F.getName().data()); + + if (F.materialize()) + elog(FATAL, "failed to materialize metadata"); + + if (F.getAttributes().hasFnAttribute(llvm::Attribute::NoInline)) + { + ilog(DEBUG1, "ineligibile to import %s due to noinline", + F.getName().data()); + return false; + } + + function_references(F, running_instcount, referencedVars, referencedFunctions); + + for (llvm::GlobalVariable* rv: referencedVars) + { + if (rv->materialize()) + elog(FATAL, "failed to materialize metadata"); + + /* + * Never want to inline externally visible vars, cheap enough to + * reference. + */ + if (rv->hasExternalLinkage() || rv->hasAvailableExternallyLinkage()) + continue; + + /* + * If variable is file-local, we need to inline it, to be able to + * inline the function itself. Can't do that if the variable can be + * modified, because they'd obviously get out of sync. + * + * XXX: Currently not a problem, but there'd be problems with + * nontrivial initializers if they were allowed for postgres. + */ + if (!rv->isConstant()) + { + ilog(DEBUG1, "cannot inline %s due to uncloneable variable %s", + F.getName().data(), rv->getName().data()); + return false; + } + + ilog(DEBUG1, "memorizing global var %s linkage %d for inlining", + rv->getName().data(), (int)rv->getLinkage()); + + importVars.insert(rv->getName()); + /* small cost attributed to each cloned global */ + running_instcount += 5; + } + + visitedFunctions.insert(&F); + + /* + * Check referenced functions. Check whether used static ones are + * inlinable, and remember external ones for inlining. + */ + for (llvm::Function* referencedFunction: referencedFunctions) + { + llvm::StringSet<> recImportVars; + + if (referencedFunction->materialize()) + elog(FATAL, "failed to materialize metadata"); + + if (referencedFunction->isIntrinsic()) + continue; + + /* if already visited skip, otherwise remember */ + if (!visitedFunctions.insert(referencedFunction).second) + continue; + + /* + * We don't inline external functions directly here, instead we put + * them on the worklist if appropriate and check them from + * llvm_build_inline_plan(). + */ + if (referencedFunction->hasExternalLinkage()) + { + llvm::StringRef funcName = referencedFunction->getName(); + + /* + * Don't bother checking for inlining if remaining cost budget is + * very small. + */ + if (subThreshold < 5) + continue; + + auto it = functionStates.find(funcName); + if (it == functionStates.end()) + { + FunctionInlineState inlineState; + + inlineState.costLimit = subThreshold; + inlineState.processed = false; + inlineState.inlined = false; + inlineState.allowReconsidering = false; + + functionStates[funcName] = inlineState; + worklist.push_back({funcName, searchpath}); + + ilog(DEBUG1, + "considering extern function %s at %d for inlining", + funcName.data(), subThreshold); + } + else if (!it->second.inlined && + (!it->second.processed || it->second.allowReconsidering) && + it->second.costLimit < subThreshold) + { + /* + * Update inlining threshold if higher. Need to re-queue + * to be processed if already processed with lower + * threshold. + */ + if (it->second.processed) + { + ilog(DEBUG1, + "reconsidering extern function %s at %d for inlining, increasing from %d", + funcName.data(), subThreshold, it->second.costLimit); + + it->second.processed = false; + it->second.allowReconsidering = false; + worklist.push_back({funcName, searchpath}); + } + it->second.costLimit = subThreshold; + } + continue; + } + + /* can't rely on what may be inlined */ + if (referencedFunction->isInterposable()) + return false; + + if (!function_inlinable(*referencedFunction, + subThreshold, + functionStates, + worklist, + searchpath, + visitedFunctions, + running_instcount, + recImportVars)) + { + ilog(DEBUG1, + "cannot inline %s due to required function %s not being inlinable", + F.getName().data(), referencedFunction->getName().data()); + return false; + } + + /* import referenced function itself */ + importVars.insert(referencedFunction->getName()); + + /* import referenced function and its dependants */ + for (auto& recImportVar : recImportVars) + importVars.insert(recImportVar.first()); + } + + return true; +} + +/* + * Attempt to load module summary located at path. Return empty pointer when + * loading fails. + */ +static std::unique_ptr +llvm_load_summary(llvm::StringRef path) +{ + llvm::ErrorOr > MBOrErr = + llvm::MemoryBuffer::getFile(path); + + if (std::error_code EC = MBOrErr.getError()) + { + ilog(DEBUG1, "failed to open %s: %s", path.data(), + EC.message().c_str()); + } + else + { + llvm::MemoryBufferRef ref(*MBOrErr.get().get()); + +#if LLVM_VERSION_MAJOR > 3 + llvm::Expected > IndexOrErr = + llvm::getModuleSummaryIndex(ref); + if (IndexOrErr) + return std::move(IndexOrErr.get()); + elog(FATAL, "failed to load summary \"%s\": %s", + path.data(), + toString(IndexOrErr.takeError()).c_str()); +#else + llvm::ErrorOr > IndexOrErr = + llvm::getModuleSummaryIndex(ref, [](const llvm::DiagnosticInfo &) {}); + if (IndexOrErr) + return std::move(IndexOrErr.get()); + elog(FATAL, "failed to load summary \"%s\": %s", + path.data(), + IndexOrErr.getError().message().c_str()); +#endif + } + return nullptr; +} + +/* + * Attempt to add modpath to the search path. + */ +static void +add_module_to_inline_search_path(InlineSearchPath& searchpath, llvm::StringRef modpath) +{ + /* only extension in libdir are candidates for inlining for now */ + if (!modpath.startswith("$libdir/")) + return; + + /* if there's no match, attempt to load */ + auto it = summary_cache->find(modpath); + if (it == summary_cache->end()) + { + std::string path(modpath); + path = path.replace(0, strlen("$libdir"), std::string(pkglib_path) + "/bitcode"); + path += ".index.bc"; + (*summary_cache)[modpath] = llvm_load_summary(path); + it = summary_cache->find(modpath); + } + + Assert(it != summary_cache->end()); + + /* if the entry isn't NULL, it's validly loaded */ + if (it->second) + searchpath.push_back(it->second.get()); +} + +/* + * Search for all references for functions hashing to guid in the search path, + * and return them in search path order. + */ +static llvm::SmallVector +summaries_for_guid(const InlineSearchPath& path, llvm::GlobalValue::GUID guid) +{ + llvm::SmallVector matches; + + for (auto index : path) + { +#if LLVM_VERSION_MAJOR > 4 + llvm::ValueInfo funcVI = index->getValueInfo(guid); + + /* if index doesn't know function, we don't have a body, continue */ + if (funcVI) + for (auto &gv : funcVI.getSummaryList()) + matches.push_back(gv.get()); +#else + const llvm::const_gvsummary_iterator &I = + index->findGlobalValueSummaryList(guid); + if (I != index->end()) + { + for (auto &gv : I->second) + matches.push_back(gv.get()); + } +#endif + } + + return matches; +} + +/* + * Create inline wrapper with the name Name, redirecting the call to F. + */ +static llvm::Function* +create_redirection_function(std::unique_ptr &importMod, + llvm::Function *F, + llvm::StringRef Name) +{ + typedef llvm::GlobalValue::LinkageTypes LinkageTypes; + + llvm::LLVMContext &Context = F->getContext(); + llvm::IRBuilder<> Builder(Context); + llvm::Function *AF; + llvm::BasicBlock *BB; + llvm::CallInst *fwdcall; + llvm::Attribute inlineAttribute; + + AF = llvm::Function::Create(F->getFunctionType(), + LinkageTypes::AvailableExternallyLinkage, + Name, importMod.get()); + BB = llvm::BasicBlock::Create(Context, "entry", AF); + + Builder.SetInsertPoint(BB); + fwdcall = Builder.CreateCall(F, &*AF->arg_begin()); + inlineAttribute = llvm::Attribute::get(Context, + llvm::Attribute::AlwaysInline); + fwdcall->addAttribute(~0U, inlineAttribute); + Builder.CreateRet(fwdcall); + + return AF; +} diff --git a/src/backend/jit/llvm/llvmjit_types.c b/src/backend/jit/llvm/llvmjit_types.c new file mode 100644 index 0000000000..855a6977ee --- /dev/null +++ b/src/backend/jit/llvm/llvmjit_types.c @@ -0,0 +1,107 @@ +/*------------------------------------------------------------------------- + * + * llvmjit_types.c + * List of types needed by JIT emitting code. + * + * JIT emitting code often needs to access struct elements, create functions + * with the correct signature etc. To allow synchronizing these types with a + * low chance of definitions getting out of sync, this file lists types and + * functions that directly need to be accessed from LLVM. + * + * When LLVM is first used in a backend, a bitcode version of this file will + * be loaded. The needed types and signatures will be stored into Struct*, + * Type*, Func* variables. + * + * NB: This file will not be linked into the server, it's just converted to + * bitcode. + * + * + * Copyright (c) 2016-2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/backend/jit/llvm/llvmjit_types.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/htup.h" +#include "access/htup_details.h" +#include "access/tupdesc.h" +#include "catalog/pg_attribute.h" +#include "executor/execExpr.h" +#include "executor/nodeAgg.h" +#include "executor/tuptable.h" +#include "fmgr.h" +#include "nodes/execnodes.h" +#include "nodes/memnodes.h" +#include "utils/expandeddatum.h" +#include "utils/palloc.h" + + +/* + * List of types needed for JITing. These have to be non-static, otherwise + * clang/LLVM will omit them. As this file will never be linked into + * anything, that's harmless. + */ +PGFunction TypePGFunction; +size_t TypeSizeT; +bool TypeStorageBool; + +AggState StructAggState; +AggStatePerGroupData StructAggStatePerGroupData; +AggStatePerTransData StructAggStatePerTransData; +ExprContext StructExprContext; +ExprEvalStep StructExprEvalStep; +ExprState StructExprState; +FunctionCallInfoData StructFunctionCallInfoData; +HeapTupleData StructHeapTupleData; +MemoryContextData StructMemoryContextData; +TupleTableSlot StructTupleTableSlot; +struct tupleDesc StructtupleDesc; + + +/* + * To determine which attributes functions need to have (depends e.g. on + * compiler version and settings) to be compatible for inlining, we simply + * copy the attributes of this function. + */ +extern Datum AttributeTemplate(PG_FUNCTION_ARGS); +Datum +AttributeTemplate(PG_FUNCTION_ARGS) +{ + PG_RETURN_NULL(); +} + +/* + * Clang represents stdbool.h style booleans that are returned by functions + * differently (as i1) than stored ones (as i8). Therefore we do not just need + * TypeBool (above), but also a way to determine the width of a returned + * integer. This allows us to keep compatible with non-stdbool using + * architectures. + */ +extern bool FunctionReturningBool(void); +bool +FunctionReturningBool(void) +{ + return false; +} + +/* + * To force signatures of functions used during JITing to be present, + * reference the functions required. This again has to be non-static, to avoid + * being removed as unnecessary. + */ +void *referenced_functions[] = +{ + strlen, + varsize_any, + slot_getsomeattrs, + slot_getmissingattrs, + MakeExpandedObjectReadOnlyInternal, + ExecEvalArrayRefSubscript, + ExecEvalSysVar, + ExecAggTransReparent, + ExecAggInitGroup +}; diff --git a/src/backend/jit/llvm/llvmjit_wrap.cpp b/src/backend/jit/llvm/llvmjit_wrap.cpp new file mode 100644 index 0000000000..4d0c3f680f --- /dev/null +++ b/src/backend/jit/llvm/llvmjit_wrap.cpp @@ -0,0 +1,46 @@ +/*------------------------------------------------------------------------- + * + * llvmjit_wrap.cpp + * Parts of the LLVM interface not (yet) exposed to C. + * + * Copyright (c) 2016-2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/backend/lib/llvm/llvmjit_wrap.c + * + *------------------------------------------------------------------------- + */ + +extern "C" +{ +#include "postgres.h" +} + +#include +#include + +#include "jit/llvmjit.h" + + +/* + * C-API extensions. + */ +#if defined(HAVE_DECL_LLVMGETHOSTCPUNAME) && !HAVE_DECL_LLVMGETHOSTCPUNAME +char *LLVMGetHostCPUName(void) { + return strdup(llvm::sys::getHostCPUName().data()); +} +#endif + + +#if defined(HAVE_DECL_LLVMGETHOSTCPUFEATURES) && !HAVE_DECL_LLVMGETHOSTCPUFEATURES +char *LLVMGetHostCPUFeatures(void) { + llvm::SubtargetFeatures Features; + llvm::StringMap HostFeatures; + + if (llvm::sys::getHostCPUFeatures(HostFeatures)) + for (auto &F : HostFeatures) + Features.AddFeature(F.first(), F.second); + + return strdup(Features.getString().c_str()); +} +#endif diff --git a/src/backend/lib/Makefile b/src/backend/lib/Makefile index f222c6c20d..191ea9bca2 100644 --- a/src/backend/lib/Makefile +++ b/src/backend/lib/Makefile @@ -12,7 +12,7 @@ subdir = src/backend/lib top_builddir = ../../.. include $(top_builddir)/src/Makefile.global -OBJS = binaryheap.o bipartite_match.o hyperloglog.o ilist.o knapsack.o \ - pairingheap.o rbtree.o stringinfo.o +OBJS = binaryheap.o bipartite_match.o bloomfilter.o dshash.o hyperloglog.o \ + ilist.o knapsack.o pairingheap.o rbtree.o stringinfo.o include $(top_srcdir)/src/backend/common.mk diff --git a/src/backend/lib/README b/src/backend/lib/README index 5e5ba5e437..ae5debe1bc 100644 --- a/src/backend/lib/README +++ b/src/backend/lib/README @@ -3,14 +3,22 @@ in the backend: binaryheap.c - a binary heap +bipartite_match.c - Hopcroft-Karp maximum cardinality algorithm for bipartite graphs + +bloomfilter.c - probabilistic, space-efficient set membership testing + +dshash.c - concurrent hash tables backed by dynamic shared memory areas + hyperloglog.c - a streaming cardinality estimator +ilist.c - single and double-linked lists + +knapsack.c - knapsack problem solver + pairingheap.c - a pairing heap rbtree.c - a red-black tree -ilist.c - single and double-linked lists. - stringinfo.c - an extensible string type diff --git a/src/backend/lib/binaryheap.c b/src/backend/lib/binaryheap.c index ce4331ccde..a8adf065a9 100644 --- a/src/backend/lib/binaryheap.c +++ b/src/backend/lib/binaryheap.c @@ -3,7 +3,7 @@ * binaryheap.c * A simple binary heap implementation * - * Portions Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/lib/binaryheap.c diff --git a/src/backend/lib/bipartite_match.c b/src/backend/lib/bipartite_match.c index 4564a463d5..2870b9e063 100644 --- a/src/backend/lib/bipartite_match.c +++ b/src/backend/lib/bipartite_match.c @@ -5,9 +5,9 @@ * * This implementation is based on pseudocode found at: * - * http://en.wikipedia.org/w/index.php?title=Hopcroft%E2%80%93Karp_algorithm&oldid=593898016 + * https://en.wikipedia.org/w/index.php?title=Hopcroft%E2%80%93Karp_algorithm&oldid=593898016 * - * Copyright (c) 2015-2017, PostgreSQL Global Development Group + * Copyright (c) 2015-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/lib/bipartite_match.c diff --git a/src/backend/lib/bloomfilter.c b/src/backend/lib/bloomfilter.c new file mode 100644 index 0000000000..1a8bc2c36c --- /dev/null +++ b/src/backend/lib/bloomfilter.c @@ -0,0 +1,306 @@ +/*------------------------------------------------------------------------- + * + * bloomfilter.c + * Space-efficient set membership testing + * + * A Bloom filter is a probabilistic data structure that is used to test an + * element's membership of a set. False positives are possible, but false + * negatives are not; a test of membership of the set returns either "possibly + * in set" or "definitely not in set". This is typically very space efficient, + * which can be a decisive advantage. + * + * Elements can be added to the set, but not removed. The more elements that + * are added, the larger the probability of false positives. Caller must hint + * an estimated total size of the set when the Bloom filter is initialized. + * This is used to balance the use of memory against the final false positive + * rate. + * + * The implementation is well suited to data synchronization problems between + * unordered sets, especially where predictable performance is important and + * some false positives are acceptable. It's also well suited to cache + * filtering problems where a relatively small and/or low cardinality set is + * fingerprinted, especially when many subsequent membership tests end up + * indicating that values of interest are not present. That should save the + * caller many authoritative lookups, such as expensive probes of a much larger + * on-disk structure. + * + * Copyright (c) 2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/backend/lib/bloomfilter.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include + +#include "access/hash.h" +#include "lib/bloomfilter.h" + +#define MAX_HASH_FUNCS 10 + +struct bloom_filter +{ + /* K hash functions are used, seeded by caller's seed */ + int k_hash_funcs; + uint64 seed; + /* m is bitset size, in bits. Must be a power of two <= 2^32. */ + uint64 m; + unsigned char bitset[FLEXIBLE_ARRAY_MEMBER]; +}; + +static int my_bloom_power(uint64 target_bitset_bits); +static int optimal_k(uint64 bitset_bits, int64 total_elems); +static void k_hashes(bloom_filter *filter, uint32 *hashes, unsigned char *elem, + size_t len); +static inline uint32 mod_m(uint32 a, uint64 m); + +/* + * Create Bloom filter in caller's memory context. We aim for a false positive + * rate of between 1% and 2% when bitset size is not constrained by memory + * availability. + * + * total_elems is an estimate of the final size of the set. It should be + * approximately correct, but the implementation can cope well with it being + * off by perhaps a factor of five or more. See "Bloom Filters in + * Probabilistic Verification" (Dillinger & Manolios, 2004) for details of why + * this is the case. + * + * bloom_work_mem is sized in KB, in line with the general work_mem convention. + * This determines the size of the underlying bitset (trivial bookkeeping space + * isn't counted). The bitset is always sized as a power of two number of + * bits, and the largest possible bitset is 512MB (2^32 bits). The + * implementation allocates only enough memory to target its standard false + * positive rate, using a simple formula with caller's total_elems estimate as + * an input. The bitset might be as small as 1MB, even when bloom_work_mem is + * much higher. + * + * The Bloom filter is seeded using a value provided by the caller. Using a + * distinct seed value on every call makes it unlikely that the same false + * positives will reoccur when the same set is fingerprinted a second time. + * Callers that don't care about this pass a constant as their seed, typically + * 0. Callers can use a pseudo-random seed in the range of 0 - INT_MAX by + * calling random(). + */ +bloom_filter * +bloom_create(int64 total_elems, int bloom_work_mem, uint64 seed) +{ + bloom_filter *filter; + int bloom_power; + uint64 bitset_bytes; + uint64 bitset_bits; + + /* + * Aim for two bytes per element; this is sufficient to get a false + * positive rate below 1%, independent of the size of the bitset or total + * number of elements. Also, if rounding down the size of the bitset to + * the next lowest power of two turns out to be a significant drop, the + * false positive rate still won't exceed 2% in almost all cases. + */ + bitset_bytes = Min(bloom_work_mem * UINT64CONST(1024), total_elems * 2); + bitset_bytes = Max(1024 * 1024, bitset_bytes); + + /* + * Size in bits should be the highest power of two <= target. bitset_bits + * is uint64 because PG_UINT32_MAX is 2^32 - 1, not 2^32 + */ + bloom_power = my_bloom_power(bitset_bytes * BITS_PER_BYTE); + bitset_bits = UINT64CONST(1) << bloom_power; + bitset_bytes = bitset_bits / BITS_PER_BYTE; + + /* Allocate bloom filter with unset bitset */ + filter = palloc0(offsetof(bloom_filter, bitset) + + sizeof(unsigned char) * bitset_bytes); + filter->k_hash_funcs = optimal_k(bitset_bits, total_elems); + filter->seed = seed; + filter->m = bitset_bits; + + return filter; +} + +/* + * Free Bloom filter + */ +void +bloom_free(bloom_filter *filter) +{ + pfree(filter); +} + +/* + * Add element to Bloom filter + */ +void +bloom_add_element(bloom_filter *filter, unsigned char *elem, size_t len) +{ + uint32 hashes[MAX_HASH_FUNCS]; + int i; + + k_hashes(filter, hashes, elem, len); + + /* Map a bit-wise address to a byte-wise address + bit offset */ + for (i = 0; i < filter->k_hash_funcs; i++) + { + filter->bitset[hashes[i] >> 3] |= 1 << (hashes[i] & 7); + } +} + +/* + * Test if Bloom filter definitely lacks element. + * + * Returns true if the element is definitely not in the set of elements + * observed by bloom_add_element(). Otherwise, returns false, indicating that + * element is probably present in set. + */ +bool +bloom_lacks_element(bloom_filter *filter, unsigned char *elem, size_t len) +{ + uint32 hashes[MAX_HASH_FUNCS]; + int i; + + k_hashes(filter, hashes, elem, len); + + /* Map a bit-wise address to a byte-wise address + bit offset */ + for (i = 0; i < filter->k_hash_funcs; i++) + { + if (!(filter->bitset[hashes[i] >> 3] & (1 << (hashes[i] & 7)))) + return true; + } + + return false; +} + +/* + * What proportion of bits are currently set? + * + * Returns proportion, expressed as a multiplier of filter size. That should + * generally be close to 0.5, even when we have more than enough memory to + * ensure a false positive rate within target 1% to 2% band, since more hash + * functions are used as more memory is available per element. + * + * This is the only instrumentation that is low overhead enough to appear in + * debug traces. When debugging Bloom filter code, it's likely to be far more + * interesting to directly test the false positive rate. + */ +double +bloom_prop_bits_set(bloom_filter *filter) +{ + int bitset_bytes = filter->m / BITS_PER_BYTE; + uint64 bits_set = 0; + int i; + + for (i = 0; i < bitset_bytes; i++) + { + unsigned char byte = filter->bitset[i]; + + while (byte) + { + bits_set++; + byte &= (byte - 1); + } + } + + return bits_set / (double) filter->m; +} + +/* + * Which element in the sequence of powers of two is less than or equal to + * target_bitset_bits? + * + * Value returned here must be generally safe as the basis for actual bitset + * size. + * + * Bitset is never allowed to exceed 2 ^ 32 bits (512MB). This is sufficient + * for the needs of all current callers, and allows us to use 32-bit hash + * functions. It also makes it easy to stay under the MaxAllocSize restriction + * (caller needs to leave room for non-bitset fields that appear before + * flexible array member, so a 1GB bitset would use an allocation that just + * exceeds MaxAllocSize). + */ +static int +my_bloom_power(uint64 target_bitset_bits) +{ + int bloom_power = -1; + + while (target_bitset_bits > 0 && bloom_power < 32) + { + bloom_power++; + target_bitset_bits >>= 1; + } + + return bloom_power; +} + +/* + * Determine optimal number of hash functions based on size of filter in bits, + * and projected total number of elements. The optimal number is the number + * that minimizes the false positive rate. + */ +static int +optimal_k(uint64 bitset_bits, int64 total_elems) +{ + int k = rint(log(2.0) * bitset_bits / total_elems); + + return Max(1, Min(k, MAX_HASH_FUNCS)); +} + +/* + * Generate k hash values for element. + * + * Caller passes array, which is filled-in with k values determined by hashing + * caller's element. + * + * Only 2 real independent hash functions are actually used to support an + * interface of up to MAX_HASH_FUNCS hash functions; enhanced double hashing is + * used to make this work. The main reason we prefer enhanced double hashing + * to classic double hashing is that the latter has an issue with collisions + * when using power of two sized bitsets. See Dillinger & Manolios for full + * details. + */ +static void +k_hashes(bloom_filter *filter, uint32 *hashes, unsigned char *elem, size_t len) +{ + uint64 hash; + uint32 x, + y; + uint64 m; + int i; + + /* Use 64-bit hashing to get two independent 32-bit hashes */ + hash = DatumGetUInt64(hash_any_extended(elem, len, filter->seed)); + x = (uint32) hash; + y = (uint32) (hash >> 32); + m = filter->m; + + x = mod_m(x, m); + y = mod_m(y, m); + + /* Accumulate hashes */ + hashes[0] = x; + for (i = 1; i < filter->k_hash_funcs; i++) + { + x = mod_m(x + y, m); + y = mod_m(y + i, m); + + hashes[i] = x; + } +} + +/* + * Calculate "val MOD m" inexpensively. + * + * Assumes that m (which is bitset size) is a power of two. + * + * Using a power of two number of bits for bitset size allows us to use bitwise + * AND operations to calculate the modulo of a hash value. It's also a simple + * way of avoiding the modulo bias effect. + */ +static inline uint32 +mod_m(uint32 val, uint64 m) +{ + Assert(m <= PG_UINT32_MAX + UINT64CONST(1)); + Assert(((m - 1) & m) == 0); + + return val & (m - 1); +} diff --git a/src/backend/lib/dshash.c b/src/backend/lib/dshash.c new file mode 100644 index 0000000000..b2b8fe60e1 --- /dev/null +++ b/src/backend/lib/dshash.c @@ -0,0 +1,899 @@ +/*------------------------------------------------------------------------- + * + * dshash.c + * Concurrent hash tables backed by dynamic shared memory areas. + * + * This is an open hashing hash table, with a linked list at each table + * entry. It supports dynamic resizing, as required to prevent the linked + * lists from growing too long on average. Currently, only growing is + * supported: the hash table never becomes smaller. + * + * To deal with concurrency, it has a fixed size set of partitions, each of + * which is independently locked. Each bucket maps to a partition; so insert, + * find and iterate operations normally only acquire one lock. Therefore, + * good concurrency is achieved whenever such operations don't collide at the + * lock partition level. However, when a resize operation begins, all + * partition locks must be acquired simultaneously for a brief period. This + * is only expected to happen a small number of times until a stable size is + * found, since growth is geometric. + * + * Future versions may support iterators and incremental resizing; for now + * the implementation is minimalist. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/backend/lib/dshash.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "lib/dshash.h" +#include "storage/ipc.h" +#include "storage/lwlock.h" +#include "utils/dsa.h" +#include "utils/hsearch.h" +#include "utils/memutils.h" + +/* + * An item in the hash table. This wraps the user's entry object in an + * envelop that holds a pointer back to the bucket and a pointer to the next + * item in the bucket. + */ +struct dshash_table_item +{ + /* The next item in the same bucket. */ + dsa_pointer next; + /* The hashed key, to avoid having to recompute it. */ + dshash_hash hash; + /* The user's entry object follows here. See ENTRY_FROM_ITEM(item). */ +}; + +/* + * The number of partitions for locking purposes. This is set to match + * NUM_BUFFER_PARTITIONS for now, on the basis that whatever's good enough for + * the buffer pool must be good enough for any other purpose. This could + * become a runtime parameter in future. + */ +#define DSHASH_NUM_PARTITIONS_LOG2 7 +#define DSHASH_NUM_PARTITIONS (1 << DSHASH_NUM_PARTITIONS_LOG2) + +/* A magic value used to identify our hash tables. */ +#define DSHASH_MAGIC 0x75ff6a20 + +/* + * Tracking information for each lock partition. Initially, each partition + * corresponds to one bucket, but each time the hash table grows, the buckets + * covered by each partition split so the number of buckets covered doubles. + * + * We might want to add padding here so that each partition is on a different + * cache line, but doing so would bloat this structure considerably. + */ +typedef struct dshash_partition +{ + LWLock lock; /* Protects all buckets in this partition. */ + size_t count; /* # of items in this partition's buckets */ +} dshash_partition; + +/* + * The head object for a hash table. This will be stored in dynamic shared + * memory. + */ +typedef struct dshash_table_control +{ + dshash_table_handle handle; + uint32 magic; + dshash_partition partitions[DSHASH_NUM_PARTITIONS]; + int lwlock_tranche_id; + + /* + * The following members are written to only when ALL partitions locks are + * held. They can be read when any one partition lock is held. + */ + + /* Number of buckets expressed as power of 2 (8 = 256 buckets). */ + size_t size_log2; /* log2(number of buckets) */ + dsa_pointer buckets; /* current bucket array */ +} dshash_table_control; + +/* + * Per-backend state for a dynamic hash table. + */ +struct dshash_table +{ + dsa_area *area; /* Backing dynamic shared memory area. */ + dshash_parameters params; /* Parameters. */ + void *arg; /* User-supplied data pointer. */ + dshash_table_control *control; /* Control object in DSM. */ + dsa_pointer *buckets; /* Current bucket pointers in DSM. */ + size_t size_log2; /* log2(number of buckets) */ + bool find_locked; /* Is any partition lock held by 'find'? */ + bool find_exclusively_locked; /* ... exclusively? */ +}; + +/* Given a pointer to an item, find the entry (user data) it holds. */ +#define ENTRY_FROM_ITEM(item) \ + ((char *)(item) + MAXALIGN(sizeof(dshash_table_item))) + +/* Given a pointer to an entry, find the item that holds it. */ +#define ITEM_FROM_ENTRY(entry) \ + ((dshash_table_item *)((char *)(entry) - \ + MAXALIGN(sizeof(dshash_table_item)))) + +/* How many resize operations (bucket splits) have there been? */ +#define NUM_SPLITS(size_log2) \ + (size_log2 - DSHASH_NUM_PARTITIONS_LOG2) + +/* How many buckets are there in each partition at a given size? */ +#define BUCKETS_PER_PARTITION(size_log2) \ + (((size_t) 1) << NUM_SPLITS(size_log2)) + +/* Max entries before we need to grow. Half + quarter = 75% load factor. */ +#define MAX_COUNT_PER_PARTITION(hash_table) \ + (BUCKETS_PER_PARTITION(hash_table->size_log2) / 2 + \ + BUCKETS_PER_PARTITION(hash_table->size_log2) / 4) + +/* Choose partition based on the highest order bits of the hash. */ +#define PARTITION_FOR_HASH(hash) \ + (hash >> ((sizeof(dshash_hash) * CHAR_BIT) - DSHASH_NUM_PARTITIONS_LOG2)) + +/* + * Find the bucket index for a given hash and table size. Each time the table + * doubles in size, the appropriate bucket for a given hash value doubles and + * possibly adds one, depending on the newly revealed bit, so that all buckets + * are split. + */ +#define BUCKET_INDEX_FOR_HASH_AND_SIZE(hash, size_log2) \ + (hash >> ((sizeof(dshash_hash) * CHAR_BIT) - (size_log2))) + +/* The index of the first bucket in a given partition. */ +#define BUCKET_INDEX_FOR_PARTITION(partition, size_log2) \ + ((partition) << NUM_SPLITS(size_log2)) + +/* The head of the active bucket for a given hash value (lvalue). */ +#define BUCKET_FOR_HASH(hash_table, hash) \ + (hash_table->buckets[ \ + BUCKET_INDEX_FOR_HASH_AND_SIZE(hash, \ + hash_table->size_log2)]) + +static void delete_item(dshash_table *hash_table, + dshash_table_item *item); +static void resize(dshash_table *hash_table, size_t new_size); +static inline void ensure_valid_bucket_pointers(dshash_table *hash_table); +static inline dshash_table_item *find_in_bucket(dshash_table *hash_table, + const void *key, + dsa_pointer item_pointer); +static void insert_item_into_bucket(dshash_table *hash_table, + dsa_pointer item_pointer, + dshash_table_item *item, + dsa_pointer *bucket); +static dshash_table_item *insert_into_bucket(dshash_table *hash_table, + const void *key, + dsa_pointer *bucket); +static bool delete_key_from_bucket(dshash_table *hash_table, + const void *key, + dsa_pointer *bucket_head); +static bool delete_item_from_bucket(dshash_table *hash_table, + dshash_table_item *item, + dsa_pointer *bucket_head); +static inline dshash_hash hash_key(dshash_table *hash_table, const void *key); +static inline bool equal_keys(dshash_table *hash_table, + const void *a, const void *b); + +#define PARTITION_LOCK(hash_table, i) \ + (&(hash_table)->control->partitions[(i)].lock) + +/* + * Create a new hash table backed by the given dynamic shared area, with the + * given parameters. The returned object is allocated in backend-local memory + * using the current MemoryContext. 'arg' will be passed through to the + * compare and hash functions. + */ +dshash_table * +dshash_create(dsa_area *area, const dshash_parameters *params, void *arg) +{ + dshash_table *hash_table; + dsa_pointer control; + + /* Allocate the backend-local object representing the hash table. */ + hash_table = palloc(sizeof(dshash_table)); + + /* Allocate the control object in shared memory. */ + control = dsa_allocate(area, sizeof(dshash_table_control)); + + /* Set up the local and shared hash table structs. */ + hash_table->area = area; + hash_table->params = *params; + hash_table->arg = arg; + hash_table->control = dsa_get_address(area, control); + hash_table->control->handle = control; + hash_table->control->magic = DSHASH_MAGIC; + hash_table->control->lwlock_tranche_id = params->tranche_id; + + /* Set up the array of lock partitions. */ + { + dshash_partition *partitions = hash_table->control->partitions; + int tranche_id = hash_table->control->lwlock_tranche_id; + int i; + + for (i = 0; i < DSHASH_NUM_PARTITIONS; ++i) + { + LWLockInitialize(&partitions[i].lock, tranche_id); + partitions[i].count = 0; + } + } + + hash_table->find_locked = false; + hash_table->find_exclusively_locked = false; + + /* + * Set up the initial array of buckets. Our initial size is the same as + * the number of partitions. + */ + hash_table->control->size_log2 = DSHASH_NUM_PARTITIONS_LOG2; + hash_table->control->buckets = + dsa_allocate_extended(area, + sizeof(dsa_pointer) * DSHASH_NUM_PARTITIONS, + DSA_ALLOC_NO_OOM | DSA_ALLOC_ZERO); + if (!DsaPointerIsValid(hash_table->control->buckets)) + { + dsa_free(area, control); + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"), + errdetail("Failed on DSA request of size %zu.", + sizeof(dsa_pointer) * DSHASH_NUM_PARTITIONS))); + } + hash_table->buckets = dsa_get_address(area, + hash_table->control->buckets); + hash_table->size_log2 = hash_table->control->size_log2; + + return hash_table; +} + +/* + * Attach to an existing hash table using a handle. The returned object is + * allocated in backend-local memory using the current MemoryContext. 'arg' + * will be passed through to the compare and hash functions. + */ +dshash_table * +dshash_attach(dsa_area *area, const dshash_parameters *params, + dshash_table_handle handle, void *arg) +{ + dshash_table *hash_table; + dsa_pointer control; + + /* Allocate the backend-local object representing the hash table. */ + hash_table = palloc(sizeof(dshash_table)); + + /* Find the control object in shared memory. */ + control = handle; + + /* Set up the local hash table struct. */ + hash_table->area = area; + hash_table->params = *params; + hash_table->arg = arg; + hash_table->control = dsa_get_address(area, control); + hash_table->find_locked = false; + hash_table->find_exclusively_locked = false; + Assert(hash_table->control->magic == DSHASH_MAGIC); + + /* + * These will later be set to the correct values by + * ensure_valid_bucket_pointers(), at which time we'll be holding a + * partition lock for interlocking against concurrent resizing. + */ + hash_table->buckets = NULL; + hash_table->size_log2 = 0; + + return hash_table; +} + +/* + * Detach from a hash table. This frees backend-local resources associated + * with the hash table, but the hash table will continue to exist until it is + * either explicitly destroyed (by a backend that is still attached to it), or + * the area that backs it is returned to the operating system. + */ +void +dshash_detach(dshash_table *hash_table) +{ + Assert(!hash_table->find_locked); + + /* The hash table may have been destroyed. Just free local memory. */ + pfree(hash_table); +} + +/* + * Destroy a hash table, returning all memory to the area. The caller must be + * certain that no other backend will attempt to access the hash table before + * calling this function. Other backend must explicitly call dshash_detach to + * free up backend-local memory associated with the hash table. The backend + * that calls dshash_destroy must not call dshash_detach. + */ +void +dshash_destroy(dshash_table *hash_table) +{ + size_t size; + size_t i; + + Assert(hash_table->control->magic == DSHASH_MAGIC); + ensure_valid_bucket_pointers(hash_table); + + /* Free all the entries. */ + size = ((size_t) 1) << hash_table->size_log2; + for (i = 0; i < size; ++i) + { + dsa_pointer item_pointer = hash_table->buckets[i]; + + while (DsaPointerIsValid(item_pointer)) + { + dshash_table_item *item; + dsa_pointer next_item_pointer; + + item = dsa_get_address(hash_table->area, item_pointer); + next_item_pointer = item->next; + dsa_free(hash_table->area, item_pointer); + item_pointer = next_item_pointer; + } + } + + /* + * Vandalize the control block to help catch programming errors where + * other backends access the memory formerly occupied by this hash table. + */ + hash_table->control->magic = 0; + + /* Free the active table and control object. */ + dsa_free(hash_table->area, hash_table->control->buckets); + dsa_free(hash_table->area, hash_table->control->handle); + + pfree(hash_table); +} + +/* + * Get a handle that can be used by other processes to attach to this hash + * table. + */ +dshash_table_handle +dshash_get_hash_table_handle(dshash_table *hash_table) +{ + Assert(hash_table->control->magic == DSHASH_MAGIC); + + return hash_table->control->handle; +} + +/* + * Look up an entry, given a key. Returns a pointer to an entry if one can be + * found with the given key. Returns NULL if the key is not found. If a + * non-NULL value is returned, the entry is locked and must be released by + * calling dshash_release_lock. If an error is raised before + * dshash_release_lock is called, the lock will be released automatically, but + * the caller must take care to ensure that the entry is not left corrupted. + * The lock mode is either shared or exclusive depending on 'exclusive'. + * + * The caller must not lock a lock already. + * + * Note that the lock held is in fact an LWLock, so interrupts will be held on + * return from this function, and not resumed until dshash_release_lock is + * called. It is a very good idea for the caller to release the lock quickly. + */ +void * +dshash_find(dshash_table *hash_table, const void *key, bool exclusive) +{ + dshash_hash hash; + size_t partition; + dshash_table_item *item; + + hash = hash_key(hash_table, key); + partition = PARTITION_FOR_HASH(hash); + + Assert(hash_table->control->magic == DSHASH_MAGIC); + Assert(!hash_table->find_locked); + + LWLockAcquire(PARTITION_LOCK(hash_table, partition), + exclusive ? LW_EXCLUSIVE : LW_SHARED); + ensure_valid_bucket_pointers(hash_table); + + /* Search the active bucket. */ + item = find_in_bucket(hash_table, key, BUCKET_FOR_HASH(hash_table, hash)); + + if (!item) + { + /* Not found. */ + LWLockRelease(PARTITION_LOCK(hash_table, partition)); + return NULL; + } + else + { + /* The caller will free the lock by calling dshash_release. */ + hash_table->find_locked = true; + hash_table->find_exclusively_locked = exclusive; + return ENTRY_FROM_ITEM(item); + } +} + +/* + * Returns a pointer to an exclusively locked item which must be released with + * dshash_release_lock. If the key is found in the hash table, 'found' is set + * to true and a pointer to the existing entry is returned. If the key is not + * found, 'found' is set to false, and a pointer to a newly created entry is + * returned. + * + * Notes above dshash_find() regarding locking and error handling equally + * apply here. + */ +void * +dshash_find_or_insert(dshash_table *hash_table, + const void *key, + bool *found) +{ + dshash_hash hash; + size_t partition_index; + dshash_partition *partition; + dshash_table_item *item; + + hash = hash_key(hash_table, key); + partition_index = PARTITION_FOR_HASH(hash); + partition = &hash_table->control->partitions[partition_index]; + + Assert(hash_table->control->magic == DSHASH_MAGIC); + Assert(!hash_table->find_locked); + +restart: + LWLockAcquire(PARTITION_LOCK(hash_table, partition_index), + LW_EXCLUSIVE); + ensure_valid_bucket_pointers(hash_table); + + /* Search the active bucket. */ + item = find_in_bucket(hash_table, key, BUCKET_FOR_HASH(hash_table, hash)); + + if (item) + *found = true; + else + { + *found = false; + + /* Check if we are getting too full. */ + if (partition->count > MAX_COUNT_PER_PARTITION(hash_table)) + { + /* + * The load factor (= keys / buckets) for all buckets protected by + * this partition is > 0.75. Presumably the same applies + * generally across the whole hash table (though we don't attempt + * to track that directly to avoid contention on some kind of + * central counter; we just assume that this partition is + * representative). This is a good time to resize. + * + * Give up our existing lock first, because resizing needs to + * reacquire all the locks in the right order to avoid deadlocks. + */ + LWLockRelease(PARTITION_LOCK(hash_table, partition_index)); + resize(hash_table, hash_table->size_log2 + 1); + + goto restart; + } + + /* Finally we can try to insert the new item. */ + item = insert_into_bucket(hash_table, key, + &BUCKET_FOR_HASH(hash_table, hash)); + item->hash = hash; + /* Adjust per-lock-partition counter for load factor knowledge. */ + ++partition->count; + } + + /* The caller must release the lock with dshash_release_lock. */ + hash_table->find_locked = true; + hash_table->find_exclusively_locked = true; + return ENTRY_FROM_ITEM(item); +} + +/* + * Remove an entry by key. Returns true if the key was found and the + * corresponding entry was removed. + * + * To delete an entry that you already have a pointer to, see + * dshash_delete_entry. + */ +bool +dshash_delete_key(dshash_table *hash_table, const void *key) +{ + dshash_hash hash; + size_t partition; + bool found; + + Assert(hash_table->control->magic == DSHASH_MAGIC); + Assert(!hash_table->find_locked); + + hash = hash_key(hash_table, key); + partition = PARTITION_FOR_HASH(hash); + + LWLockAcquire(PARTITION_LOCK(hash_table, partition), LW_EXCLUSIVE); + ensure_valid_bucket_pointers(hash_table); + + if (delete_key_from_bucket(hash_table, key, + &BUCKET_FOR_HASH(hash_table, hash))) + { + Assert(hash_table->control->partitions[partition].count > 0); + found = true; + --hash_table->control->partitions[partition].count; + } + else + found = false; + + LWLockRelease(PARTITION_LOCK(hash_table, partition)); + + return found; +} + +/* + * Remove an entry. The entry must already be exclusively locked, and must + * have been obtained by dshash_find or dshash_find_or_insert. Note that this + * function releases the lock just like dshash_release_lock. + * + * To delete an entry by key, see dshash_delete_key. + */ +void +dshash_delete_entry(dshash_table *hash_table, void *entry) +{ + dshash_table_item *item = ITEM_FROM_ENTRY(entry); + size_t partition = PARTITION_FOR_HASH(item->hash); + + Assert(hash_table->control->magic == DSHASH_MAGIC); + Assert(hash_table->find_locked); + Assert(hash_table->find_exclusively_locked); + Assert(LWLockHeldByMeInMode(PARTITION_LOCK(hash_table, partition), + LW_EXCLUSIVE)); + + delete_item(hash_table, item); + hash_table->find_locked = false; + hash_table->find_exclusively_locked = false; + LWLockRelease(PARTITION_LOCK(hash_table, partition)); +} + +/* + * Unlock an entry which was locked by dshash_find or dshash_find_or_insert. + */ +void +dshash_release_lock(dshash_table *hash_table, void *entry) +{ + dshash_table_item *item = ITEM_FROM_ENTRY(entry); + size_t partition_index = PARTITION_FOR_HASH(item->hash); + + Assert(hash_table->control->magic == DSHASH_MAGIC); + Assert(hash_table->find_locked); + Assert(LWLockHeldByMeInMode(PARTITION_LOCK(hash_table, partition_index), + hash_table->find_exclusively_locked + ? LW_EXCLUSIVE : LW_SHARED)); + + hash_table->find_locked = false; + hash_table->find_exclusively_locked = false; + LWLockRelease(PARTITION_LOCK(hash_table, partition_index)); +} + +/* + * A compare function that forwards to memcmp. + */ +int +dshash_memcmp(const void *a, const void *b, size_t size, void *arg) +{ + return memcmp(a, b, size); +} + +/* + * A hash function that forwards to tag_hash. + */ +dshash_hash +dshash_memhash(const void *v, size_t size, void *arg) +{ + return tag_hash(v, size); +} + +/* + * Print debugging information about the internal state of the hash table to + * stderr. The caller must hold no partition locks. + */ +void +dshash_dump(dshash_table *hash_table) +{ + size_t i; + size_t j; + + Assert(hash_table->control->magic == DSHASH_MAGIC); + Assert(!hash_table->find_locked); + + for (i = 0; i < DSHASH_NUM_PARTITIONS; ++i) + { + Assert(!LWLockHeldByMe(PARTITION_LOCK(hash_table, i))); + LWLockAcquire(PARTITION_LOCK(hash_table, i), LW_SHARED); + } + + ensure_valid_bucket_pointers(hash_table); + + fprintf(stderr, + "hash table size = %zu\n", (size_t) 1 << hash_table->size_log2); + for (i = 0; i < DSHASH_NUM_PARTITIONS; ++i) + { + dshash_partition *partition = &hash_table->control->partitions[i]; + size_t begin = BUCKET_INDEX_FOR_PARTITION(i, hash_table->size_log2); + size_t end = BUCKET_INDEX_FOR_PARTITION(i + 1, hash_table->size_log2); + + fprintf(stderr, " partition %zu\n", i); + fprintf(stderr, + " active buckets (key count = %zu)\n", partition->count); + + for (j = begin; j < end; ++j) + { + size_t count = 0; + dsa_pointer bucket = hash_table->buckets[j]; + + while (DsaPointerIsValid(bucket)) + { + dshash_table_item *item; + + item = dsa_get_address(hash_table->area, bucket); + + bucket = item->next; + ++count; + } + fprintf(stderr, " bucket %zu (key count = %zu)\n", j, count); + } + } + + for (i = 0; i < DSHASH_NUM_PARTITIONS; ++i) + LWLockRelease(PARTITION_LOCK(hash_table, i)); +} + +/* + * Delete a locked item to which we have a pointer. + */ +static void +delete_item(dshash_table *hash_table, dshash_table_item *item) +{ + size_t hash = item->hash; + size_t partition = PARTITION_FOR_HASH(hash); + + Assert(LWLockHeldByMe(PARTITION_LOCK(hash_table, partition))); + + if (delete_item_from_bucket(hash_table, item, + &BUCKET_FOR_HASH(hash_table, hash))) + { + Assert(hash_table->control->partitions[partition].count > 0); + --hash_table->control->partitions[partition].count; + } + else + { + Assert(false); + } +} + +/* + * Grow the hash table if necessary to the requested number of buckets. The + * requested size must be double some previously observed size. + * + * Must be called without any partition lock held. + */ +static void +resize(dshash_table *hash_table, size_t new_size_log2) +{ + dsa_pointer old_buckets; + dsa_pointer new_buckets_shared; + dsa_pointer *new_buckets; + size_t size; + size_t new_size = ((size_t) 1) << new_size_log2; + size_t i; + + /* + * Acquire the locks for all lock partitions. This is expensive, but we + * shouldn't have to do it many times. + */ + for (i = 0; i < DSHASH_NUM_PARTITIONS; ++i) + { + Assert(!LWLockHeldByMe(PARTITION_LOCK(hash_table, i))); + + LWLockAcquire(PARTITION_LOCK(hash_table, i), LW_EXCLUSIVE); + if (i == 0 && hash_table->control->size_log2 >= new_size_log2) + { + /* + * Another backend has already increased the size; we can avoid + * obtaining all the locks and return early. + */ + LWLockRelease(PARTITION_LOCK(hash_table, 0)); + return; + } + } + + Assert(new_size_log2 == hash_table->control->size_log2 + 1); + + /* Allocate the space for the new table. */ + new_buckets_shared = dsa_allocate0(hash_table->area, + sizeof(dsa_pointer) * new_size); + new_buckets = dsa_get_address(hash_table->area, new_buckets_shared); + + /* + * We've allocated the new bucket array; all that remains to do now is to + * reinsert all items, which amounts to adjusting all the pointers. + */ + size = ((size_t) 1) << hash_table->control->size_log2; + for (i = 0; i < size; ++i) + { + dsa_pointer item_pointer = hash_table->buckets[i]; + + while (DsaPointerIsValid(item_pointer)) + { + dshash_table_item *item; + dsa_pointer next_item_pointer; + + item = dsa_get_address(hash_table->area, item_pointer); + next_item_pointer = item->next; + insert_item_into_bucket(hash_table, item_pointer, item, + &new_buckets[BUCKET_INDEX_FOR_HASH_AND_SIZE(item->hash, + new_size_log2)]); + item_pointer = next_item_pointer; + } + } + + /* Swap the hash table into place and free the old one. */ + old_buckets = hash_table->control->buckets; + hash_table->control->buckets = new_buckets_shared; + hash_table->control->size_log2 = new_size_log2; + hash_table->buckets = new_buckets; + dsa_free(hash_table->area, old_buckets); + + /* Release all the locks. */ + for (i = 0; i < DSHASH_NUM_PARTITIONS; ++i) + LWLockRelease(PARTITION_LOCK(hash_table, i)); +} + +/* + * Make sure that our backend-local bucket pointers are up to date. The + * caller must have locked one lock partition, which prevents resize() from + * running concurrently. + */ +static inline void +ensure_valid_bucket_pointers(dshash_table *hash_table) +{ + if (hash_table->size_log2 != hash_table->control->size_log2) + { + hash_table->buckets = dsa_get_address(hash_table->area, + hash_table->control->buckets); + hash_table->size_log2 = hash_table->control->size_log2; + } +} + +/* + * Scan a locked bucket for a match, using the provided compare function. + */ +static inline dshash_table_item * +find_in_bucket(dshash_table *hash_table, const void *key, + dsa_pointer item_pointer) +{ + while (DsaPointerIsValid(item_pointer)) + { + dshash_table_item *item; + + item = dsa_get_address(hash_table->area, item_pointer); + if (equal_keys(hash_table, key, ENTRY_FROM_ITEM(item))) + return item; + item_pointer = item->next; + } + return NULL; +} + +/* + * Insert an already-allocated item into a bucket. + */ +static void +insert_item_into_bucket(dshash_table *hash_table, + dsa_pointer item_pointer, + dshash_table_item *item, + dsa_pointer *bucket) +{ + Assert(item == dsa_get_address(hash_table->area, item_pointer)); + + item->next = *bucket; + *bucket = item_pointer; +} + +/* + * Allocate space for an entry with the given key and insert it into the + * provided bucket. + */ +static dshash_table_item * +insert_into_bucket(dshash_table *hash_table, + const void *key, + dsa_pointer *bucket) +{ + dsa_pointer item_pointer; + dshash_table_item *item; + + item_pointer = dsa_allocate(hash_table->area, + hash_table->params.entry_size + + MAXALIGN(sizeof(dshash_table_item))); + item = dsa_get_address(hash_table->area, item_pointer); + memcpy(ENTRY_FROM_ITEM(item), key, hash_table->params.key_size); + insert_item_into_bucket(hash_table, item_pointer, item, bucket); + return item; +} + +/* + * Search a bucket for a matching key and delete it. + */ +static bool +delete_key_from_bucket(dshash_table *hash_table, + const void *key, + dsa_pointer *bucket_head) +{ + while (DsaPointerIsValid(*bucket_head)) + { + dshash_table_item *item; + + item = dsa_get_address(hash_table->area, *bucket_head); + + if (equal_keys(hash_table, key, ENTRY_FROM_ITEM(item))) + { + dsa_pointer next; + + next = item->next; + dsa_free(hash_table->area, *bucket_head); + *bucket_head = next; + + return true; + } + bucket_head = &item->next; + } + return false; +} + +/* + * Delete the specified item from the bucket. + */ +static bool +delete_item_from_bucket(dshash_table *hash_table, + dshash_table_item *item, + dsa_pointer *bucket_head) +{ + while (DsaPointerIsValid(*bucket_head)) + { + dshash_table_item *bucket_item; + + bucket_item = dsa_get_address(hash_table->area, *bucket_head); + + if (bucket_item == item) + { + dsa_pointer next; + + next = item->next; + dsa_free(hash_table->area, *bucket_head); + *bucket_head = next; + return true; + } + bucket_head = &bucket_item->next; + } + return false; +} + +/* + * Compute the hash value for a key. + */ +static inline dshash_hash +hash_key(dshash_table *hash_table, const void *key) +{ + return hash_table->params.hash_function(key, + hash_table->params.key_size, + hash_table->arg); +} + +/* + * Check whether two keys compare equal. + */ +static inline bool +equal_keys(dshash_table *hash_table, const void *a, const void *b) +{ + return hash_table->params.compare_function(a, b, + hash_table->params.key_size, + hash_table->arg) == 0; +} diff --git a/src/backend/lib/hyperloglog.c b/src/backend/lib/hyperloglog.c index df7a67e7dc..3c50375a92 100644 --- a/src/backend/lib/hyperloglog.c +++ b/src/backend/lib/hyperloglog.c @@ -3,7 +3,7 @@ * hyperloglog.c * HyperLogLog cardinality estimator * - * Portions Copyright (c) 2014-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2014-2018, PostgreSQL Global Development Group * * Based on Hideaki Ohno's C++ implementation. This is probably not ideally * suited to estimating the cardinality of very large sets; in particular, we diff --git a/src/backend/lib/ilist.c b/src/backend/lib/ilist.c index af8d656d3e..58bee57c76 100644 --- a/src/backend/lib/ilist.c +++ b/src/backend/lib/ilist.c @@ -3,7 +3,7 @@ * ilist.c * support for integrated/inline doubly- and singly- linked lists * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/lib/knapsack.c b/src/backend/lib/knapsack.c index ddf2b9afa3..c7d9c4d8d2 100644 --- a/src/backend/lib/knapsack.c +++ b/src/backend/lib/knapsack.c @@ -15,7 +15,7 @@ * allows approximate solutions in polynomial time (the general case of the * exact problem is NP-hard). * - * Copyright (c) 2017, PostgreSQL Global Development Group + * Copyright (c) 2017-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/lib/knapsack.c @@ -32,7 +32,6 @@ #include "nodes/bitmapset.h" #include "utils/builtins.h" #include "utils/memutils.h" -#include "utils/palloc.h" /* * DiscreteKnapsack @@ -57,9 +56,7 @@ DiscreteKnapsack(int max_weight, int num_items, { MemoryContext local_ctx = AllocSetContextCreate(CurrentMemoryContext, "Knapsack", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); MemoryContext oldctx = MemoryContextSwitchTo(local_ctx); double *values; Bitmapset **sets; diff --git a/src/backend/lib/pairingheap.c b/src/backend/lib/pairingheap.c index fd871408f3..89d0f62f8f 100644 --- a/src/backend/lib/pairingheap.c +++ b/src/backend/lib/pairingheap.c @@ -14,7 +14,7 @@ * The pairing heap: a new form of self-adjusting heap. * Algorithmica 1, 1 (January 1986), pages 111-129. DOI: 10.1007/BF01840439 * - * Portions Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/lib/pairingheap.c diff --git a/src/backend/lib/rbtree.c b/src/backend/lib/rbtree.c index 3d80090a8c..c35928daad 100644 --- a/src/backend/lib/rbtree.c +++ b/src/backend/lib/rbtree.c @@ -17,7 +17,7 @@ * longest path from root to leaf is only about twice as long as the shortest, * so lookups are guaranteed to run in O(lg n) time. * - * Copyright (c) 2009-2017, PostgreSQL Global Development Group + * Copyright (c) 2009-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/lib/rbtree.c @@ -30,26 +30,26 @@ /* - * Colors of nodes (values of RBNode.color) + * Colors of nodes (values of RBTNode.color) */ -#define RBBLACK (0) -#define RBRED (1) +#define RBTBLACK (0) +#define RBTRED (1) /* * RBTree control structure */ struct RBTree { - RBNode *root; /* root node, or RBNIL if tree is empty */ + RBTNode *root; /* root node, or RBTNIL if tree is empty */ - /* Remaining fields are constant after rb_create */ + /* Remaining fields are constant after rbt_create */ Size node_size; /* actual size of tree nodes */ /* The caller-supplied manipulation functions */ - rb_comparator comparator; - rb_combiner combiner; - rb_allocfunc allocfunc; - rb_freefunc freefunc; + rbt_comparator comparator; + rbt_combiner combiner; + rbt_allocfunc allocfunc; + rbt_freefunc freefunc; /* Passthrough arg passed to all manipulation functions */ void *arg; }; @@ -58,39 +58,31 @@ struct RBTree * all leafs are sentinels, use customized NIL name to prevent * collision with system-wide constant NIL which is actually NULL */ -#define RBNIL (&sentinel) +#define RBTNIL (&sentinel) -static RBNode sentinel = {RBBLACK, RBNIL, RBNIL, NULL}; - -/* - * Values used in the RBTreeIterator.next_state field, with an - * InvertedWalk iterator. - */ -typedef enum InvertedWalkNextStep +static RBTNode sentinel = { - NextStepBegin, - NextStepUp, - NextStepLeft, - NextStepRight -} InvertedWalkNextStep; + RBTBLACK, RBTNIL, RBTNIL, NULL +}; + /* - * rb_create: create an empty RBTree + * rbt_create: create an empty RBTree * * Arguments are: - * node_size: actual size of tree nodes (> sizeof(RBNode)) + * node_size: actual size of tree nodes (> sizeof(RBTNode)) * The manipulation functions: - * comparator: compare two RBNodes for less/equal/greater + * comparator: compare two RBTNodes for less/equal/greater * combiner: merge an existing tree entry with a new one - * allocfunc: allocate a new RBNode - * freefunc: free an old RBNode + * allocfunc: allocate a new RBTNode + * freefunc: free an old RBTNode * arg: passthrough pointer that will be passed to the manipulation functions * * Note that the combiner's righthand argument will be a "proposed" tree node, - * ie the input to rb_insert, in which the RBNode fields themselves aren't + * ie the input to rbt_insert, in which the RBTNode fields themselves aren't * valid. Similarly, either input to the comparator may be a "proposed" node. * This shouldn't matter since the functions aren't supposed to look at the - * RBNode fields, only the extra fields of the struct the RBNode is embedded + * RBTNode fields, only the extra fields of the struct the RBTNode is embedded * in. * * The freefunc should just be pfree or equivalent; it should NOT attempt @@ -107,18 +99,18 @@ typedef enum InvertedWalkNextStep * the RBTree node if you feel the urge. */ RBTree * -rb_create(Size node_size, - rb_comparator comparator, - rb_combiner combiner, - rb_allocfunc allocfunc, - rb_freefunc freefunc, - void *arg) +rbt_create(Size node_size, + rbt_comparator comparator, + rbt_combiner combiner, + rbt_allocfunc allocfunc, + rbt_freefunc freefunc, + void *arg) { RBTree *tree = (RBTree *) palloc(sizeof(RBTree)); - Assert(node_size > sizeof(RBNode)); + Assert(node_size > sizeof(RBTNode)); - tree->root = RBNIL; + tree->root = RBTNIL; tree->node_size = node_size; tree->comparator = comparator; tree->combiner = combiner; @@ -130,11 +122,11 @@ rb_create(Size node_size, return tree; } -/* Copy the additional data fields from one RBNode to another */ +/* Copy the additional data fields from one RBTNode to another */ static inline void -rb_copy_data(RBTree *rb, RBNode *dest, const RBNode *src) +rbt_copy_data(RBTree *rbt, RBTNode *dest, const RBTNode *src) { - memcpy(dest + 1, src + 1, rb->node_size - sizeof(RBNode)); + memcpy(dest + 1, src + 1, rbt->node_size - sizeof(RBTNode)); } /********************************************************************** @@ -142,21 +134,21 @@ rb_copy_data(RBTree *rb, RBNode *dest, const RBNode *src) **********************************************************************/ /* - * rb_find: search for a value in an RBTree + * rbt_find: search for a value in an RBTree * - * data represents the value to try to find. Its RBNode fields need not + * data represents the value to try to find. Its RBTNode fields need not * be valid, it's the extra data in the larger struct that is of interest. * * Returns the matching tree entry, or NULL if no match is found. */ -RBNode * -rb_find(RBTree *rb, const RBNode *data) +RBTNode * +rbt_find(RBTree *rbt, const RBTNode *data) { - RBNode *node = rb->root; + RBTNode *node = rbt->root; - while (node != RBNIL) + while (node != RBTNIL) { - int cmp = rb->comparator(data, node, rb->arg); + int cmp = rbt->comparator(data, node, rbt->arg); if (cmp == 0) return node; @@ -170,26 +162,26 @@ rb_find(RBTree *rb, const RBNode *data) } /* - * rb_leftmost: fetch the leftmost (smallest-valued) tree node. + * rbt_leftmost: fetch the leftmost (smallest-valued) tree node. * Returns NULL if tree is empty. * * Note: in the original implementation this included an unlink step, but - * that's a bit awkward. Just call rb_delete on the result if that's what + * that's a bit awkward. Just call rbt_delete on the result if that's what * you want. */ -RBNode * -rb_leftmost(RBTree *rb) +RBTNode * +rbt_leftmost(RBTree *rbt) { - RBNode *node = rb->root; - RBNode *leftmost = rb->root; + RBTNode *node = rbt->root; + RBTNode *leftmost = rbt->root; - while (node != RBNIL) + while (node != RBTNIL) { leftmost = node; node = node->left; } - if (leftmost != RBNIL) + if (leftmost != RBTNIL) return leftmost; return NULL; @@ -206,17 +198,17 @@ rb_leftmost(RBTree *rb) * child of that node. */ static void -rb_rotate_left(RBTree *rb, RBNode *x) +rbt_rotate_left(RBTree *rbt, RBTNode *x) { - RBNode *y = x->right; + RBTNode *y = x->right; /* establish x->right link */ x->right = y->left; - if (y->left != RBNIL) + if (y->left != RBTNIL) y->left->parent = x; /* establish y->parent link */ - if (y != RBNIL) + if (y != RBTNIL) y->parent = x->parent; if (x->parent) { @@ -227,12 +219,12 @@ rb_rotate_left(RBTree *rb, RBNode *x) } else { - rb->root = y; + rbt->root = y; } /* link x and y */ y->left = x; - if (x != RBNIL) + if (x != RBTNIL) x->parent = y; } @@ -243,17 +235,17 @@ rb_rotate_left(RBTree *rb, RBNode *x) * child of that node. */ static void -rb_rotate_right(RBTree *rb, RBNode *x) +rbt_rotate_right(RBTree *rbt, RBTNode *x) { - RBNode *y = x->left; + RBTNode *y = x->left; /* establish x->left link */ x->left = y->right; - if (y->right != RBNIL) + if (y->right != RBTNIL) y->right->parent = x; /* establish y->parent link */ - if (y != RBNIL) + if (y != RBTNIL) y->parent = x->parent; if (x->parent) { @@ -264,12 +256,12 @@ rb_rotate_right(RBTree *rb, RBNode *x) } else { - rb->root = y; + rbt->root = y; } /* link x and y */ y->right = x; - if (x != RBNIL) + if (x != RBTNIL) x->parent = y; } @@ -287,13 +279,13 @@ rb_rotate_right(RBTree *rb, RBNode *x) * the invariant that every leaf has equal black-height.) */ static void -rb_insert_fixup(RBTree *rb, RBNode *x) +rbt_insert_fixup(RBTree *rbt, RBTNode *x) { /* * x is always a red node. Initially, it is the newly inserted node. Each * iteration of this loop moves it higher up in the tree. */ - while (x != rb->root && x->parent->color == RBRED) + while (x != rbt->root && x->parent->color == RBTRED) { /* * x and x->parent are both red. Fix depends on whether x->parent is @@ -313,60 +305,60 @@ rb_insert_fixup(RBTree *rb, RBNode *x) */ if (x->parent == x->parent->parent->left) { - RBNode *y = x->parent->parent->right; + RBTNode *y = x->parent->parent->right; - if (y->color == RBRED) + if (y->color == RBTRED) { - /* uncle is RBRED */ - x->parent->color = RBBLACK; - y->color = RBBLACK; - x->parent->parent->color = RBRED; + /* uncle is RBTRED */ + x->parent->color = RBTBLACK; + y->color = RBTBLACK; + x->parent->parent->color = RBTRED; x = x->parent->parent; } else { - /* uncle is RBBLACK */ + /* uncle is RBTBLACK */ if (x == x->parent->right) { /* make x a left child */ x = x->parent; - rb_rotate_left(rb, x); + rbt_rotate_left(rbt, x); } /* recolor and rotate */ - x->parent->color = RBBLACK; - x->parent->parent->color = RBRED; + x->parent->color = RBTBLACK; + x->parent->parent->color = RBTRED; - rb_rotate_right(rb, x->parent->parent); + rbt_rotate_right(rbt, x->parent->parent); } } else { /* mirror image of above code */ - RBNode *y = x->parent->parent->left; + RBTNode *y = x->parent->parent->left; - if (y->color == RBRED) + if (y->color == RBTRED) { - /* uncle is RBRED */ - x->parent->color = RBBLACK; - y->color = RBBLACK; - x->parent->parent->color = RBRED; + /* uncle is RBTRED */ + x->parent->color = RBTBLACK; + y->color = RBTBLACK; + x->parent->parent->color = RBTRED; x = x->parent->parent; } else { - /* uncle is RBBLACK */ + /* uncle is RBTBLACK */ if (x == x->parent->left) { x = x->parent; - rb_rotate_right(rb, x); + rbt_rotate_right(rbt, x); } - x->parent->color = RBBLACK; - x->parent->parent->color = RBRED; + x->parent->color = RBTBLACK; + x->parent->parent->color = RBTRED; - rb_rotate_left(rb, x->parent->parent); + rbt_rotate_left(rbt, x->parent->parent); } } } @@ -375,13 +367,13 @@ rb_insert_fixup(RBTree *rb, RBNode *x) * The root may already have been black; if not, the black-height of every * node in the tree increases by one. */ - rb->root->color = RBBLACK; + rbt->root->color = RBTBLACK; } /* - * rb_insert: insert a new value into the tree. + * rbt_insert: insert a new value into the tree. * - * data represents the value to insert. Its RBNode fields need not + * data represents the value to insert. Its RBTNode fields need not * be valid, it's the extra data in the larger struct that is of interest. * * If the value represented by "data" is not present in the tree, then @@ -395,28 +387,28 @@ rb_insert_fixup(RBTree *rb, RBNode *x) * "data" is unmodified in either case; it's typically just a local * variable in the caller. */ -RBNode * -rb_insert(RBTree *rb, const RBNode *data, bool *isNew) +RBTNode * +rbt_insert(RBTree *rbt, const RBTNode *data, bool *isNew) { - RBNode *current, + RBTNode *current, *parent, *x; int cmp; /* find where node belongs */ - current = rb->root; + current = rbt->root; parent = NULL; cmp = 0; /* just to prevent compiler warning */ - while (current != RBNIL) + while (current != RBTNIL) { - cmp = rb->comparator(data, current, rb->arg); + cmp = rbt->comparator(data, current, rbt->arg); if (cmp == 0) { /* * Found node with given key. Apply combiner. */ - rb->combiner(current, data, rb->arg); + rbt->combiner(current, data, rbt->arg); *isNew = false; return current; } @@ -429,14 +421,14 @@ rb_insert(RBTree *rb, const RBNode *data, bool *isNew) */ *isNew = true; - x = rb->allocfunc(rb->arg); + x = rbt->allocfunc(rbt->arg); - x->color = RBRED; + x->color = RBTRED; - x->left = RBNIL; - x->right = RBNIL; + x->left = RBTNIL; + x->right = RBTNIL; x->parent = parent; - rb_copy_data(rb, x, data); + rbt_copy_data(rbt, x, data); /* insert node in tree */ if (parent) @@ -448,10 +440,10 @@ rb_insert(RBTree *rb, const RBNode *data, bool *isNew) } else { - rb->root = x; + rbt->root = x; } - rb_insert_fixup(rb, x); + rbt_insert_fixup(rbt, x); return x; } @@ -464,14 +456,14 @@ rb_insert(RBTree *rb, const RBNode *data, bool *isNew) * Maintain Red-Black tree balance after deleting a black node. */ static void -rb_delete_fixup(RBTree *rb, RBNode *x) +rbt_delete_fixup(RBTree *rbt, RBTNode *x) { /* * x is always a black node. Initially, it is the former child of the * deleted node. Each iteration of this loop moves it higher up in the * tree. */ - while (x != rb->root && x->color == RBBLACK) + while (x != rbt->root && x->color == RBTBLACK) { /* * Left and right cases are symmetric. Any nodes that are children of @@ -482,92 +474,93 @@ rb_delete_fixup(RBTree *rb, RBNode *x) */ if (x == x->parent->left) { - RBNode *w = x->parent->right; + RBTNode *w = x->parent->right; - if (w->color == RBRED) + if (w->color == RBTRED) { - w->color = RBBLACK; - x->parent->color = RBRED; + w->color = RBTBLACK; + x->parent->color = RBTRED; - rb_rotate_left(rb, x->parent); + rbt_rotate_left(rbt, x->parent); w = x->parent->right; } - if (w->left->color == RBBLACK && w->right->color == RBBLACK) + if (w->left->color == RBTBLACK && w->right->color == RBTBLACK) { - w->color = RBRED; + w->color = RBTRED; x = x->parent; } else { - if (w->right->color == RBBLACK) + if (w->right->color == RBTBLACK) { - w->left->color = RBBLACK; - w->color = RBRED; + w->left->color = RBTBLACK; + w->color = RBTRED; - rb_rotate_right(rb, w); + rbt_rotate_right(rbt, w); w = x->parent->right; } w->color = x->parent->color; - x->parent->color = RBBLACK; - w->right->color = RBBLACK; + x->parent->color = RBTBLACK; + w->right->color = RBTBLACK; - rb_rotate_left(rb, x->parent); - x = rb->root; /* Arrange for loop to terminate. */ + rbt_rotate_left(rbt, x->parent); + x = rbt->root; /* Arrange for loop to terminate. */ } } else { - RBNode *w = x->parent->left; + RBTNode *w = x->parent->left; - if (w->color == RBRED) + if (w->color == RBTRED) { - w->color = RBBLACK; - x->parent->color = RBRED; + w->color = RBTBLACK; + x->parent->color = RBTRED; - rb_rotate_right(rb, x->parent); + rbt_rotate_right(rbt, x->parent); w = x->parent->left; } - if (w->right->color == RBBLACK && w->left->color == RBBLACK) + if (w->right->color == RBTBLACK && w->left->color == RBTBLACK) { - w->color = RBRED; + w->color = RBTRED; x = x->parent; } else { - if (w->left->color == RBBLACK) + if (w->left->color == RBTBLACK) { - w->right->color = RBBLACK; - w->color = RBRED; + w->right->color = RBTBLACK; + w->color = RBTRED; - rb_rotate_left(rb, w); + rbt_rotate_left(rbt, w); w = x->parent->left; } w->color = x->parent->color; - x->parent->color = RBBLACK; - w->left->color = RBBLACK; + x->parent->color = RBTBLACK; + w->left->color = RBTBLACK; - rb_rotate_right(rb, x->parent); - x = rb->root; /* Arrange for loop to terminate. */ + rbt_rotate_right(rbt, x->parent); + x = rbt->root; /* Arrange for loop to terminate. */ } } } - x->color = RBBLACK; + x->color = RBTBLACK; } /* * Delete node z from tree. */ static void -rb_delete_node(RBTree *rb, RBNode *z) +rbt_delete_node(RBTree *rbt, RBTNode *z) { - RBNode *x, + RBTNode *x, *y; - if (!z || z == RBNIL) + /* This is just paranoia: we should only get called on a valid node */ + if (!z || z == RBTNIL) return; /* @@ -575,21 +568,21 @@ rb_delete_node(RBTree *rb, RBNode *z) * be z if z has fewer than two children, or the tree successor of z * otherwise. */ - if (z->left == RBNIL || z->right == RBNIL) + if (z->left == RBTNIL || z->right == RBTNIL) { - /* y has a RBNIL node as a child */ + /* y has a RBTNIL node as a child */ y = z; } else { /* find tree successor */ y = z->right; - while (y->left != RBNIL) + while (y->left != RBTNIL) y = y->left; } /* x is y's only child */ - if (y->left != RBNIL) + if (y->left != RBTNIL) x = y->left; else x = y->right; @@ -605,7 +598,7 @@ rb_delete_node(RBTree *rb, RBNode *z) } else { - rb->root = x; + rbt->root = x; } /* @@ -613,55 +606,55 @@ rb_delete_node(RBTree *rb, RBNode *z) * the data for the removed node to the one we were supposed to remove. */ if (y != z) - rb_copy_data(rb, z, y); + rbt_copy_data(rbt, z, y); /* * Removing a black node might make some paths from root to leaf contain * fewer black nodes than others, or it might make two red nodes adjacent. */ - if (y->color == RBBLACK) - rb_delete_fixup(rb, x); + if (y->color == RBTBLACK) + rbt_delete_fixup(rbt, x); /* Now we can recycle the y node */ - if (rb->freefunc) - rb->freefunc(y, rb->arg); + if (rbt->freefunc) + rbt->freefunc(y, rbt->arg); } /* - * rb_delete: remove the given tree entry + * rbt_delete: remove the given tree entry * - * "node" must have previously been found via rb_find or rb_leftmost. + * "node" must have previously been found via rbt_find or rbt_leftmost. * It is caller's responsibility to free any subsidiary data attached - * to the node before calling rb_delete. (Do *not* try to push that + * to the node before calling rbt_delete. (Do *not* try to push that * responsibility off to the freefunc, as some other physical node * may be the one actually freed!) */ void -rb_delete(RBTree *rb, RBNode *node) +rbt_delete(RBTree *rbt, RBTNode *node) { - rb_delete_node(rb, node); + rbt_delete_node(rbt, node); } /********************************************************************** * Traverse * **********************************************************************/ -static RBNode * -rb_left_right_iterator(RBTreeIterator *iter) +static RBTNode * +rbt_left_right_iterator(RBTreeIterator *iter) { if (iter->last_visited == NULL) { - iter->last_visited = iter->rb->root; - while (iter->last_visited->left != RBNIL) + iter->last_visited = iter->rbt->root; + while (iter->last_visited->left != RBTNIL) iter->last_visited = iter->last_visited->left; return iter->last_visited; } - if (iter->last_visited->right != RBNIL) + if (iter->last_visited->right != RBTNIL) { iter->last_visited = iter->last_visited->right; - while (iter->last_visited->left != RBNIL) + while (iter->last_visited->left != RBTNIL) iter->last_visited = iter->last_visited->left; return iter->last_visited; @@ -669,7 +662,7 @@ rb_left_right_iterator(RBTreeIterator *iter) for (;;) { - RBNode *came_from = iter->last_visited; + RBTNode *came_from = iter->last_visited; iter->last_visited = iter->last_visited->parent; if (iter->last_visited == NULL) @@ -688,22 +681,22 @@ rb_left_right_iterator(RBTreeIterator *iter) return iter->last_visited; } -static RBNode * -rb_right_left_iterator(RBTreeIterator *iter) +static RBTNode * +rbt_right_left_iterator(RBTreeIterator *iter) { if (iter->last_visited == NULL) { - iter->last_visited = iter->rb->root; - while (iter->last_visited->right != RBNIL) + iter->last_visited = iter->rbt->root; + while (iter->last_visited->right != RBTNIL) iter->last_visited = iter->last_visited->right; return iter->last_visited; } - if (iter->last_visited->left != RBNIL) + if (iter->last_visited->left != RBTNIL) { iter->last_visited = iter->last_visited->left; - while (iter->last_visited->right != RBNIL) + while (iter->last_visited->right != RBTNIL) iter->last_visited = iter->last_visited->right; return iter->last_visited; @@ -711,7 +704,7 @@ rb_right_left_iterator(RBTreeIterator *iter) for (;;) { - RBNode *came_from = iter->last_visited; + RBTNode *came_from = iter->last_visited; iter->last_visited = iter->last_visited->parent; if (iter->last_visited == NULL) @@ -730,149 +723,34 @@ rb_right_left_iterator(RBTreeIterator *iter) return iter->last_visited; } -static RBNode * -rb_direct_iterator(RBTreeIterator *iter) -{ - if (iter->last_visited == NULL) - { - iter->last_visited = iter->rb->root; - return iter->last_visited; - } - - if (iter->last_visited->left != RBNIL) - { - iter->last_visited = iter->last_visited->left; - return iter->last_visited; - } - - do - { - if (iter->last_visited->right != RBNIL) - { - iter->last_visited = iter->last_visited->right; - break; - } - - /* go up and one step right */ - for (;;) - { - RBNode *came_from = iter->last_visited; - - iter->last_visited = iter->last_visited->parent; - if (iter->last_visited == NULL) - { - iter->is_over = true; - break; - } - - if ((iter->last_visited->right != came_from) && (iter->last_visited->right != RBNIL)) - { - iter->last_visited = iter->last_visited->right; - return iter->last_visited; - } - } - } - while (iter->last_visited != NULL); - - return iter->last_visited; -} - -static RBNode * -rb_inverted_iterator(RBTreeIterator *iter) -{ - RBNode *came_from; - RBNode *current; - - current = iter->last_visited; - -loop: - switch ((InvertedWalkNextStep) iter->next_step) - { - /* First call, begin from root */ - case NextStepBegin: - current = iter->rb->root; - iter->next_step = NextStepLeft; - goto loop; - - case NextStepLeft: - while (current->left != RBNIL) - current = current->left; - - iter->next_step = NextStepRight; - goto loop; - - case NextStepRight: - if (current->right != RBNIL) - { - current = current->right; - iter->next_step = NextStepLeft; - goto loop; - } - else /* not moved - return current, then go up */ - iter->next_step = NextStepUp; - break; - - case NextStepUp: - came_from = current; - current = current->parent; - if (current == NULL) - { - iter->is_over = true; - break; /* end of iteration */ - } - else if (came_from == current->right) - { - /* return current, then continue to go up */ - break; - } - else - { - /* otherwise we came from the left */ - Assert(came_from == current->left); - iter->next_step = NextStepRight; - goto loop; - } - } - - iter->last_visited = current; - return current; -} - /* - * rb_begin_iterate: prepare to traverse the tree in any of several orders + * rbt_begin_iterate: prepare to traverse the tree in any of several orders * - * After calling rb_begin_iterate, call rb_iterate repeatedly until it + * After calling rbt_begin_iterate, call rbt_iterate repeatedly until it * returns NULL or the traversal stops being of interest. * * If the tree is changed during traversal, results of further calls to - * rb_iterate are unspecified. Multiple concurrent iterators on the same + * rbt_iterate are unspecified. Multiple concurrent iterators on the same * tree are allowed. * * The iterator state is stored in the 'iter' struct. The caller should - * treat it as opaque struct. + * treat it as an opaque struct. */ void -rb_begin_iterate(RBTree *rb, RBOrderControl ctrl, RBTreeIterator *iter) +rbt_begin_iterate(RBTree *rbt, RBTOrderControl ctrl, RBTreeIterator *iter) { /* Common initialization for all traversal orders */ - iter->rb = rb; + iter->rbt = rbt; iter->last_visited = NULL; - iter->is_over = (rb->root == RBNIL); + iter->is_over = (rbt->root == RBTNIL); switch (ctrl) { case LeftRightWalk: /* visit left, then self, then right */ - iter->iterate = rb_left_right_iterator; + iter->iterate = rbt_left_right_iterator; break; case RightLeftWalk: /* visit right, then self, then left */ - iter->iterate = rb_right_left_iterator; - break; - case DirectWalk: /* visit self, then left, then right */ - iter->iterate = rb_direct_iterator; - break; - case InvertedWalk: /* visit left, then right, then self */ - iter->iterate = rb_inverted_iterator; - iter->next_step = NextStepBegin; + iter->iterate = rbt_right_left_iterator; break; default: elog(ERROR, "unrecognized rbtree iteration order: %d", ctrl); @@ -880,10 +758,10 @@ rb_begin_iterate(RBTree *rb, RBOrderControl ctrl, RBTreeIterator *iter) } /* - * rb_iterate: return the next node in traversal order, or NULL if no more + * rbt_iterate: return the next node in traversal order, or NULL if no more */ -RBNode * -rb_iterate(RBTreeIterator *iter) +RBTNode * +rbt_iterate(RBTreeIterator *iter) { if (iter->is_over) return NULL; diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c index fd15567144..df7e01f76d 100644 --- a/src/backend/lib/stringinfo.c +++ b/src/backend/lib/stringinfo.c @@ -6,7 +6,7 @@ * It can be used to buffer either ordinary C strings (null-terminated text) * or arbitrary binary data. All storage is allocated with palloc(). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/lib/stringinfo.c @@ -77,12 +77,15 @@ resetStringInfo(StringInfo str) void appendStringInfo(StringInfo str, const char *fmt,...) { + int save_errno = errno; + for (;;) { va_list args; int needed; /* Try to format the data. */ + errno = save_errno; va_start(args, fmt); needed = appendStringInfoVA(str, fmt, args); va_end(args); @@ -105,6 +108,9 @@ appendStringInfo(StringInfo str, const char *fmt,...) * pass the return value to enlargeStringInfo() before trying again; see * appendStringInfo for standard usage pattern. * + * Caution: callers must be sure to preserve their entry-time errno + * when looping, in case the fmt contains "%m". + * * XXX This API is ugly, but there seems no alternative given the C spec's * restrictions on what can portably be done with va_list arguments: you have * to redo va_start before you can rescan the argument list, and we can't do @@ -202,7 +208,7 @@ appendStringInfoSpaces(StringInfo str, int count) * appendBinaryStringInfo * * Append arbitrary binary data to a StringInfo, allocating more space - * if necessary. + * if necessary. Ensures that a trailing null byte is present. */ void appendBinaryStringInfo(StringInfo str, const char *data, int datalen) @@ -224,6 +230,25 @@ appendBinaryStringInfo(StringInfo str, const char *data, int datalen) str->data[str->len] = '\0'; } +/* + * appendBinaryStringInfoNT + * + * Append arbitrary binary data to a StringInfo, allocating more space + * if necessary. Does not ensure a trailing null-byte exists. + */ +void +appendBinaryStringInfoNT(StringInfo str, const char *data, int datalen) +{ + Assert(str != NULL); + + /* Make more room if needed */ + enlargeStringInfo(str, datalen); + + /* OK, append the data */ + memcpy(str->data + str->len, data, datalen); + str->len += datalen; +} + /* * enlargeStringInfo * diff --git a/src/backend/libpq/Makefile b/src/backend/libpq/Makefile index 7fa2b02743..3dbec23e30 100644 --- a/src/backend/libpq/Makefile +++ b/src/backend/libpq/Makefile @@ -14,7 +14,7 @@ include $(top_builddir)/src/Makefile.global # be-fsstubs is here for historical reasons, probably belongs elsewhere -OBJS = be-fsstubs.o be-secure.o auth.o crypt.o hba.o ifaddr.o pqcomm.o \ +OBJS = be-fsstubs.o be-secure.o be-secure-common.o auth.o crypt.o hba.o ifaddr.o pqcomm.o \ pqformat.o pqmq.o pqsignal.o auth-scram.o ifeq ($(with_openssl),yes) diff --git a/src/backend/libpq/README.SSL b/src/backend/libpq/README.SSL index 53dc9dd005..d84a434a6e 100644 --- a/src/backend/libpq/README.SSL +++ b/src/backend/libpq/README.SSL @@ -58,3 +58,25 @@ SSL Fail with unknown --------------------------------------------------------------------------- + +Ephemeral DH +============ + +Since the server static private key ($DataDir/server.key) will +normally be stored unencrypted so that the database backend can +restart automatically, it is important that we select an algorithm +that continues to provide confidentiality even if the attacker has the +server's private key. Ephemeral DH (EDH) keys provide this and more +(Perfect Forward Secrecy aka PFS). + +N.B., the static private key should still be protected to the largest +extent possible, to minimize the risk of impersonations. + +Another benefit of EDH is that it allows the backend and clients to +use DSA keys. DSA keys can only provide digital signatures, not +encryption, and are often acceptable in jurisdictions where RSA keys +are unacceptable. + +The downside to EDH is that it makes it impossible to use ssldump(1) +if there's a problem establishing an SSL session. In this case you'll +need to temporarily disable EDH (see initialize_dh()). diff --git a/src/backend/libpq/auth-scram.c b/src/backend/libpq/auth-scram.c index 0b69f106f1..e997c94600 100644 --- a/src/backend/libpq/auth-scram.c +++ b/src/backend/libpq/auth-scram.c @@ -17,7 +17,18 @@ * by the SASLprep profile, we skip the SASLprep pre-processing and use * the raw bytes in calculating the hash. * - * - Channel binding is not supported yet. + * - If channel binding is used, the channel binding type is always + * "tls-server-end-point". The spec says the default is "tls-unique" + * (RFC 5802, section 6.1. Default Channel Binding), but there are some + * problems with that. Firstly, not all SSL libraries provide an API to + * get the TLS Finished message, required to use "tls-unique". Secondly, + * "tls-unique" is not specified for TLS v1.3, and as of this writing, + * it's not clear if there will be a replacement. We could support both + * "tls-server-end-point" and "tls-unique", but for our use case, + * "tls-unique" doesn't really have any advantages. The main advantage + * of "tls-unique" would be that it works even if the server doesn't + * have a certificate, but PostgreSQL requires a server certificate + * whenever SSL is used, anyway. * * * The password stored in pg_authid consists of the iteration count, salt, @@ -69,7 +80,7 @@ * general, after logging in, but let's do what we can here. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/libpq/auth-scram.c @@ -112,12 +123,16 @@ typedef struct const char *username; /* username from startup packet */ + Port *port; + bool channel_binding_in_use; + int iterations; char *salt; /* base64-encoded */ uint8 StoredKey[SCRAM_KEY_LEN]; uint8 ServerKey[SCRAM_KEY_LEN]; /* Fields of the first message from client */ + char cbind_flag; char *client_first_message_bare; char *client_username; char *client_nonce; @@ -153,8 +168,38 @@ static void mock_scram_verifier(const char *username, int *iterations, char **salt, uint8 *stored_key, uint8 *server_key); static bool is_scram_printable(char *p); static char *sanitize_char(char c); +static char *sanitize_str(const char *s); static char *scram_mock_salt(const char *username); +/* + * pg_be_scram_get_mechanisms + * + * Get a list of SASL mechanisms that this module supports. + * + * For the convenience of building the FE/BE packet that lists the + * mechanisms, the names are appended to the given StringInfo buffer, + * separated by '\0' bytes. + */ +void +pg_be_scram_get_mechanisms(Port *port, StringInfo buf) +{ + /* + * Advertise the mechanisms in decreasing order of importance. So the + * channel-binding variants go first, if they are supported. Channel + * binding is only supported with SSL, and only if the SSL implementation + * has a function to get the certificate's hash. + */ +#ifdef HAVE_BE_TLS_GET_CERTIFICATE_HASH + if (port->ssl_in_use) + { + appendStringInfoString(buf, SCRAM_SHA_256_PLUS_NAME); + appendStringInfoChar(buf, '\0'); + } +#endif + appendStringInfoString(buf, SCRAM_SHA_256_NAME); + appendStringInfoChar(buf, '\0'); +} + /* * pg_be_scram_init * @@ -162,20 +207,48 @@ static char *scram_mock_salt(const char *username); * needs to be called before doing any exchange. It will be filled later * after the beginning of the exchange with verifier data. * - * 'username' is the username provided by the client in the startup message. + * 'selected_mech' identifies the SASL mechanism that the client selected. + * It should be one of the mechanisms that we support, as returned by + * pg_be_scram_get_mechanisms(). + * * 'shadow_pass' is the role's password verifier, from pg_authid.rolpassword. - * If 'shadow_pass' is NULL, we still perform an authentication exchange, but - * it will fail, as if an incorrect password was given. + * The username was provided by the client in the startup message, and is + * available in port->user_name. If 'shadow_pass' is NULL, we still perform + * an authentication exchange, but it will fail, as if an incorrect password + * was given. */ void * -pg_be_scram_init(const char *username, const char *shadow_pass) +pg_be_scram_init(Port *port, + const char *selected_mech, + const char *shadow_pass) { scram_state *state; bool got_verifier; state = (scram_state *) palloc0(sizeof(scram_state)); + state->port = port; state->state = SCRAM_AUTH_INIT; - state->username = username; + + /* + * Parse the selected mechanism. + * + * Note that if we don't support channel binding, either because the SSL + * implementation doesn't support it or we're not using SSL at all, we + * would not have advertised the PLUS variant in the first place. If the + * client nevertheless tries to select it, it's a protocol violation like + * selecting any other SASL mechanism we don't support. + */ +#ifdef HAVE_BE_TLS_GET_CERTIFICATE_HASH + if (strcmp(selected_mech, SCRAM_SHA_256_PLUS_NAME) == 0 && port->ssl_in_use) + state->channel_binding_in_use = true; + else +#endif + if (strcmp(selected_mech, SCRAM_SHA_256_NAME) == 0) + state->channel_binding_in_use = false; + else + ereport(ERROR, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("client selected an invalid SASL authentication mechanism"))); /* * Parse the stored password verifier. @@ -197,7 +270,7 @@ pg_be_scram_init(const char *username, const char *shadow_pass) */ ereport(LOG, (errmsg("invalid SCRAM verifier for user \"%s\"", - username))); + state->port->user_name))); got_verifier = false; } } @@ -208,7 +281,7 @@ pg_be_scram_init(const char *username, const char *shadow_pass) * authentication with an MD5 hash.) */ state->logdetail = psprintf(_("User \"%s\" does not have a valid SCRAM verifier."), - state->username); + state->port->user_name); got_verifier = false; } } @@ -230,8 +303,8 @@ pg_be_scram_init(const char *username, const char *shadow_pass) */ if (!got_verifier) { - mock_scram_verifier(username, &state->iterations, &state->salt, - state->StoredKey, state->ServerKey); + mock_scram_verifier(state->port->user_name, &state->iterations, + &state->salt, state->StoredKey, state->ServerKey); state->doomed = true; } @@ -380,7 +453,7 @@ pg_be_scram_exchange(void *opaq, char *input, int inputlen, char * pg_be_scram_build_verifier(const char *password) { - char *prep_password = NULL; + char *prep_password; pg_saslprep_rc rc; char saltbuf[SCRAM_DEFAULT_SALT_LEN]; char *result; @@ -426,7 +499,7 @@ scram_verify_plain_password(const char *username, const char *password, uint8 stored_key[SCRAM_KEY_LEN]; uint8 server_key[SCRAM_KEY_LEN]; uint8 computed_key[SCRAM_KEY_LEN]; - char *prep_password = NULL; + char *prep_password; pg_saslprep_rc rc; if (!parse_scram_verifier(verifier, &iterations, &encoded_salt, @@ -573,7 +646,7 @@ mock_scram_verifier(const char *username, int *iterations, char **salt, } /* - * Read the value in a given SASL exchange message for given attribute. + * Read the value in a given SCRAM exchange message for given attribute. */ static char * read_attr_value(char **input, char attr) @@ -585,7 +658,7 @@ read_attr_value(char **input, char attr) ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("malformed SCRAM message"), - errdetail("Expected attribute '%c' but found %s.", + errdetail("Expected attribute \"%c\" but found \"%s\".", attr, sanitize_char(*begin)))); begin++; @@ -593,7 +666,7 @@ read_attr_value(char **input, char attr) ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("malformed SCRAM message"), - errdetail("Expected character = for attribute %c.", attr))); + errdetail("Expected character \"=\" for attribute \"%c\".", attr))); begin++; end = begin; @@ -652,7 +725,37 @@ sanitize_char(char c) } /* - * Read the next attribute and value in a SASL exchange message. + * Convert an arbitrary string to printable form, for error messages. + * + * Anything that's not a printable ASCII character is replaced with + * '?', and the string is truncated at 30 characters. + * + * The returned pointer points to a static buffer. + */ +static char * +sanitize_str(const char *s) +{ + static char buf[30 + 1]; + int i; + + for (i = 0; i < sizeof(buf) - 1; i++) + { + char c = s[i]; + + if (c == '\0') + break; + + if (c >= 0x21 && c <= 0x7E) + buf[i] = c; + else + buf[i] = '?'; + } + buf[i] = '\0'; + return buf; +} + +/* + * Read the next attribute and value in a SCRAM exchange message. * * Returns NULL if there is attribute. */ @@ -674,7 +777,7 @@ read_any_attr(char **input, char *attr_p) ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("malformed SCRAM message"), - errdetail("Attribute expected, but found invalid character %s.", + errdetail("Attribute expected, but found invalid character \"%s\".", sanitize_char(attr)))); if (attr_p) *attr_p = attr; @@ -684,7 +787,7 @@ read_any_attr(char **input, char *attr_p) ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("malformed SCRAM message"), - errdetail("Expected character = for attribute %c.", attr))); + errdetail("Expected character \"=\" for attribute \"%c\".", attr))); begin++; end = begin; @@ -703,7 +806,7 @@ read_any_attr(char **input, char *attr_p) } /* - * Read and parse the first message from client in the context of a SASL + * Read and parse the first message from client in the context of a SCRAM * authentication exchange message. * * At this stage, any errors will be reported directly with ereport(ERROR). @@ -711,6 +814,8 @@ read_any_attr(char **input, char *attr_p) static void read_client_first_message(scram_state *state, char *input) { + char *channel_binding_type; + input = pstrdup(input); /*------ @@ -773,45 +878,95 @@ read_client_first_message(scram_state *state, char *input) *------ */ - /* read gs2-cbind-flag */ + /* + * Read gs2-cbind-flag. (For details see also RFC 5802 Section 6 "Channel + * Binding".) + */ + state->cbind_flag = *input; switch (*input) { case 'n': - /* Client does not support channel binding */ + + /* + * The client does not support channel binding or has simply + * decided to not use it. In that case just let it go. + */ + if (state->channel_binding_in_use) + ereport(ERROR, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("malformed SCRAM message"), + errdetail("The client selected SCRAM-SHA-256-PLUS, but the SCRAM message does not include channel binding data."))); + + input++; + if (*input != ',') + ereport(ERROR, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("malformed SCRAM message"), + errdetail("Comma expected, but found character \"%s\".", + sanitize_char(*input)))); input++; break; case 'y': - /* Client supports channel binding, but we're not doing it today */ + + /* + * The client supports channel binding and thinks that the server + * does not. In this case, the server must fail authentication if + * it supports channel binding. + */ + if (state->channel_binding_in_use) + ereport(ERROR, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("malformed SCRAM message"), + errdetail("The client selected SCRAM-SHA-256-PLUS, but the SCRAM message does not include channel binding data."))); + +#ifdef HAVE_BE_TLS_GET_CERTIFICATE_HASH + if (state->port->ssl_in_use) + ereport(ERROR, + (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION), + errmsg("SCRAM channel binding negotiation error"), + errdetail("The client supports SCRAM channel binding but thinks the server does not. " + "However, this server does support channel binding."))); +#endif + input++; + if (*input != ',') + ereport(ERROR, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("malformed SCRAM message"), + errdetail("Comma expected, but found character \"%s\".", + sanitize_char(*input)))); input++; break; case 'p': /* - * Client requires channel binding. We don't support it. - * - * RFC 5802 specifies a particular error code, - * e=server-does-support-channel-binding, for this. But it can - * only be sent in the server-final message, and we don't want to - * go through the motions of the authentication, knowing it will - * fail, just to send that error message. + * The client requires channel binding. Channel binding type + * follows, e.g., "p=tls-server-end-point". */ - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("client requires SCRAM channel binding, but it is not supported"))); + if (!state->channel_binding_in_use) + ereport(ERROR, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("malformed SCRAM message"), + errdetail("The client selected SCRAM-SHA-256 without channel binding, but the SCRAM message includes channel binding data."))); + + channel_binding_type = read_attr_value(&input, 'p'); + + /* + * The only channel binding type we support is + * tls-server-end-point. + */ + if (strcmp(channel_binding_type, "tls-server-end-point") != 0) + ereport(ERROR, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + (errmsg("unsupported SCRAM channel-binding type \"%s\"", + sanitize_str(channel_binding_type))))); + break; default: ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("malformed SCRAM message"), - errdetail("Unexpected channel-binding flag %s.", + errdetail("Unexpected channel-binding flag \"%s\".", sanitize_char(*input)))); } - if (*input != ',') - ereport(ERROR, - (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("malformed SCRAM message"), - errdetail("Comma expected, but found character %s.", - sanitize_char(*input)))); - input++; /* * Forbid optional authzid (authorization identity). We don't support it. @@ -824,7 +979,7 @@ read_client_first_message(scram_state *state, char *input) ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("malformed SCRAM message"), - errdetail("Unexpected attribute %s in client-first-message.", + errdetail("Unexpected attribute \"%s\" in client-first-message.", sanitize_char(*input)))); input++; @@ -929,7 +1084,7 @@ verify_client_proof(scram_state *state) } /* - * Build the first server-side message sent to the client in a SASL + * Build the first server-side message sent to the client in a SCRAM * communication exchange. */ static char * @@ -1032,14 +1187,70 @@ read_client_final_message(scram_state *state, char *input) */ /* - * Read channel-binding. We don't support channel binding, so it's - * expected to always be "biws", which is "n,,", base64-encoded. + * Read channel binding. This repeats the channel-binding flags and is + * then followed by the actual binding data depending on the type. */ channel_binding = read_attr_value(&p, 'c'); - if (strcmp(channel_binding, "biws") != 0) - ereport(ERROR, - (errcode(ERRCODE_PROTOCOL_VIOLATION), - (errmsg("unexpected SCRAM channel-binding attribute in client-final-message")))); + if (state->channel_binding_in_use) + { +#ifdef HAVE_BE_TLS_GET_CERTIFICATE_HASH + const char *cbind_data = NULL; + size_t cbind_data_len = 0; + size_t cbind_header_len; + char *cbind_input; + size_t cbind_input_len; + char *b64_message; + int b64_message_len; + + Assert(state->cbind_flag == 'p'); + + /* Fetch hash data of server's SSL certificate */ + cbind_data = be_tls_get_certificate_hash(state->port, + &cbind_data_len); + + /* should not happen */ + if (cbind_data == NULL || cbind_data_len == 0) + elog(ERROR, "could not get server certificate hash"); + + cbind_header_len = strlen("p=tls-server-end-point,,"); /* p=type,, */ + cbind_input_len = cbind_header_len + cbind_data_len; + cbind_input = palloc(cbind_input_len); + snprintf(cbind_input, cbind_input_len, "p=tls-server-end-point,,"); + memcpy(cbind_input + cbind_header_len, cbind_data, cbind_data_len); + + b64_message = palloc(pg_b64_enc_len(cbind_input_len) + 1); + b64_message_len = pg_b64_encode(cbind_input, cbind_input_len, + b64_message); + b64_message[b64_message_len] = '\0'; + + /* + * Compare the value sent by the client with the value expected by the + * server. + */ + if (strcmp(channel_binding, b64_message) != 0) + ereport(ERROR, + (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION), + (errmsg("SCRAM channel binding check failed")))); +#else + /* shouldn't happen, because we checked this earlier already */ + elog(ERROR, "channel binding not supported by this build"); +#endif + } + else + { + /* + * If we are not using channel binding, the binding data is expected + * to always be "biws", which is "n,," base64-encoded, or "eSws", + * which is "y,,". We also have to check whether the flag is the same + * one that the client originally sent. + */ + if (!(strcmp(channel_binding, "biws") == 0 && state->cbind_flag == 'n') && + !(strcmp(channel_binding, "eSws") == 0 && state->cbind_flag == 'y')) + ereport(ERROR, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + (errmsg("unexpected SCRAM channel-binding attribute in client-final-message")))); + } + state->client_final_nonce = read_attr_value(&p, 'r'); /* ignore optional extensions */ @@ -1116,7 +1327,7 @@ build_server_final_message(scram_state *state) /* - * Determinisitcally generate salt for mock authentication, using a SHA256 + * Deterministically generate salt for mock authentication, using a SHA256 * hash based on the username and a cluster-level secret key. Returns a * pointer to a static buffer of size SCRAM_DEFAULT_SALT_LEN. */ diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index cb30fc7b71..8517565535 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -3,7 +3,7 @@ * auth.c * Routines to handle network authentication * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -18,7 +18,6 @@ #include #include #include -#include #include #ifdef HAVE_SYS_SELECT_H #include @@ -27,12 +26,14 @@ #include "commands/user.h" #include "common/ip.h" #include "common/md5.h" +#include "common/scram-common.h" #include "libpq/auth.h" #include "libpq/crypt.h" #include "libpq/libpq.h" #include "libpq/pqformat.h" #include "libpq/scram.h" #include "miscadmin.h" +#include "port/pg_bswap.h" #include "replication/walsender.h" #include "storage/ipc.h" #include "utils/backend_random.h" @@ -43,7 +44,7 @@ * Global authentication functions *---------------------------------------------------------------- */ -static void sendAuthRequest(Port *port, AuthRequest areq, char *extradata, +static void sendAuthRequest(Port *port, AuthRequest areq, const char *extradata, int extralen); static void auth_failed(Port *port, int status, char *logdetail); static char *recv_password_packet(Port *port); @@ -91,7 +92,7 @@ static int auth_peer(hbaPort *port); #define PGSQL_PAM_SERVICE "postgresql" /* Service name passed to PAM */ -static int CheckPAMAuth(Port *port, char *user, char *password); +static int CheckPAMAuth(Port *port, const char *user, const char *password); static int pam_passwd_conv_proc(int num_msg, const struct pam_message **msg, struct pam_response **resp, void *appdata_ptr); @@ -100,7 +101,8 @@ static struct pam_conv pam_passw_conv = { NULL }; -static char *pam_passwd = NULL; /* Workaround for Solaris 2.6 brokenness */ +static const char *pam_passwd = NULL; /* Workaround for Solaris 2.6 + * brokenness */ static Port *pam_port_cludge; /* Workaround for passing "Port *port" into * pam_passwd_conv_proc */ #endif /* USE_PAM */ @@ -141,6 +143,12 @@ ULONG (*__ldap_start_tls_sA) ( #endif static int CheckLDAPAuth(Port *port); + +/* LDAP_OPT_DIAGNOSTIC_MESSAGE is the newer spelling */ +#ifndef LDAP_OPT_DIAGNOSTIC_MESSAGE +#define LDAP_OPT_DIAGNOSTIC_MESSAGE LDAP_OPT_ERROR_STRING +#endif + #endif /* USE_LDAP */ /*---------------------------------------------------------------- @@ -196,7 +204,7 @@ static int pg_SSPI_make_upn(char *accountname, *---------------------------------------------------------------- */ static int CheckRADIUSAuth(Port *port); -static int PerformRadiusTransaction(char *server, char *secret, char *portstr, char *identifier, char *user_name, char *passwd); +static int PerformRadiusTransaction(const char *server, const char *secret, const char *portstr, const char *identifier, const char *user_name, const char *passwd); /* @@ -606,14 +614,14 @@ ClientAuthentication(Port *port) * Send an authentication request packet to the frontend. */ static void -sendAuthRequest(Port *port, AuthRequest areq, char *extradata, int extralen) +sendAuthRequest(Port *port, AuthRequest areq, const char *extradata, int extralen) { StringInfoData buf; CHECK_FOR_INTERRUPTS(); pq_beginmessage(&buf, 'R'); - pq_sendint(&buf, (int32) areq, sizeof(int32)); + pq_sendint32(&buf, (int32) areq); if (extralen > 0) pq_sendbytes(&buf, extradata, extralen); @@ -854,9 +862,10 @@ CheckMD5Auth(Port *port, char *shadow_pass, char **logdetail) static int CheckSCRAMAuth(Port *port, char *shadow_pass, char **logdetail) { + StringInfoData sasl_mechs; int mtype; StringInfoData buf; - void *scram_opaq; + void *scram_opaq = NULL; char *output = NULL; int outputlen = 0; char *input; @@ -879,25 +888,16 @@ CheckSCRAMAuth(Port *port, char *shadow_pass, char **logdetail) /* * Send the SASL authentication request to user. It includes the list of - * authentication mechanisms (which is trivial, because we only support - * SCRAM-SHA-256 at the moment). The extra "\0" is for an empty string to - * terminate the list. + * authentication mechanisms that are supported. */ - sendAuthRequest(port, AUTH_REQ_SASL, SCRAM_SHA256_NAME "\0", - strlen(SCRAM_SHA256_NAME) + 2); + initStringInfo(&sasl_mechs); - /* - * Initialize the status tracker for message exchanges. - * - * If the user doesn't exist, or doesn't have a valid password, or it's - * expired, we still go through the motions of SASL authentication, but - * tell the authentication method that the authentication is "doomed". - * That is, it's going to fail, no matter what. - * - * This is because we don't want to reveal to an attacker what usernames - * are valid, nor which users have a valid password. - */ - scram_opaq = pg_be_scram_init(port->user_name, shadow_pass); + pg_be_scram_get_mechanisms(port, &sasl_mechs); + /* Put another '\0' to mark that list is finished. */ + appendStringInfoChar(&sasl_mechs, '\0'); + + sendAuthRequest(port, AUTH_REQ_SASL, sasl_mechs.data, sasl_mechs.len); + pfree(sasl_mechs.data); /* * Loop through SASL message exchange. This exchange can consist of @@ -945,17 +945,21 @@ CheckSCRAMAuth(Port *port, char *shadow_pass, char **logdetail) { const char *selected_mech; + selected_mech = pq_getmsgrawstring(&buf); + /* - * We only support SCRAM-SHA-256 at the moment, so anything else - * is an error. + * Initialize the status tracker for message exchanges. + * + * If the user doesn't exist, or doesn't have a valid password, or + * it's expired, we still go through the motions of SASL + * authentication, but tell the authentication method that the + * authentication is "doomed". That is, it's going to fail, no + * matter what. + * + * This is because we don't want to reveal to an attacker what + * usernames are valid, nor which users have a valid password. */ - selected_mech = pq_getmsgrawstring(&buf); - if (strcmp(selected_mech, SCRAM_SHA256_NAME) != 0) - { - ereport(ERROR, - (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("client selected an invalid SASL authentication mechanism"))); - } + scram_opaq = pg_be_scram_init(port, selected_mech, shadow_pass); inputlen = pq_getmsgint(&buf, 4); if (inputlen == -1) @@ -1033,8 +1037,12 @@ static GSS_DLLIMP gss_OID GSS_C_NT_USER_NAME = &GSS_C_NT_USER_NAME_desc; #endif +/* + * Generate an error for GSSAPI authentication. The caller should apply + * _() to errmsg to make it translatable. + */ static void -pg_GSS_error(int severity, char *errmsg, OM_uint32 maj_stat, OM_uint32 min_stat) +pg_GSS_error(int severity, const char *errmsg, OM_uint32 maj_stat, OM_uint32 min_stat) { gss_buffer_desc gmsg; OM_uint32 lmin_s, @@ -1223,7 +1231,7 @@ pg_GSS_recvauth(Port *port) { gss_delete_sec_context(&lmin_s, &port->gss->ctx, GSS_C_NO_BUFFER); pg_GSS_error(ERROR, - gettext_noop("accepting GSS security context failed"), + _("accepting GSS security context failed"), maj_stat, min_stat); } @@ -1249,7 +1257,7 @@ pg_GSS_recvauth(Port *port) maj_stat = gss_display_name(&min_stat, port->gss->name, &gbuf, NULL); if (maj_stat != GSS_S_COMPLETE) pg_GSS_error(ERROR, - gettext_noop("retrieving GSS user name failed"), + _("retrieving GSS user name failed"), maj_stat, min_stat); /* @@ -1313,6 +1321,11 @@ pg_GSS_recvauth(Port *port) *---------------------------------------------------------------- */ #ifdef ENABLE_SSPI + +/* + * Generate an error for SSPI authentication. The caller should apply + * _() to errmsg to make it translatable. + */ static void pg_SSPI_error(int severity, const char *errmsg, SECURITY_STATUS r) { @@ -2017,10 +2030,12 @@ auth_peer(hbaPort *port) pw = getpwuid(uid); if (!pw) { + int save_errno = errno; + ereport(LOG, (errmsg("could not look up local user ID %ld: %s", (long) uid, - errno ? strerror(errno) : _("user does not exist")))); + save_errno ? strerror(save_errno) : _("user does not exist")))); return STATUS_ERROR; } @@ -2045,7 +2060,7 @@ static int pam_passwd_conv_proc(int num_msg, const struct pam_message **msg, struct pam_response **resp, void *appdata_ptr) { - char *passwd; + const char *passwd; struct pam_response *reply; int i; @@ -2143,7 +2158,7 @@ pam_passwd_conv_proc(int num_msg, const struct pam_message **msg, * Check authentication against PAM. */ static int -CheckPAMAuth(Port *port, char *user, char *password) +CheckPAMAuth(Port *port, const char *user, const char *password) { int retval; pam_handle_t *pamh = NULL; @@ -2305,6 +2320,8 @@ CheckBSDAuth(Port *port, char *user) */ #ifdef USE_LDAP +static int errdetail_for_ldap(LDAP *ldap); + /* * Initialize a connection to the LDAP server, including setting up * TLS if requested. @@ -2312,28 +2329,70 @@ CheckBSDAuth(Port *port, char *user) static int InitializeLDAPConnection(Port *port, LDAP **ldap) { + const char *scheme; int ldapversion = LDAP_VERSION3; int r; - *ldap = ldap_init(port->hba->ldapserver, port->hba->ldapport); + scheme = port->hba->ldapscheme; + if (scheme == NULL) + scheme = "ldap"; +#ifdef WIN32 + if (strcmp(scheme, "ldaps") == 0) + *ldap = ldap_sslinit(port->hba->ldapserver, port->hba->ldapport, 1); + else + *ldap = ldap_init(port->hba->ldapserver, port->hba->ldapport); if (!*ldap) { -#ifndef WIN32 - ereport(LOG, - (errmsg("could not initialize LDAP: %m"))); -#else ereport(LOG, (errmsg("could not initialize LDAP: error code %d", (int) LdapGetLastError()))); -#endif + + return STATUS_ERROR; + } +#else +#ifdef HAVE_LDAP_INITIALIZE + { + char *uri; + + uri = psprintf("%s://%s:%d", scheme, port->hba->ldapserver, + port->hba->ldapport); + r = ldap_initialize(ldap, uri); + pfree(uri); + if (r != LDAP_SUCCESS) + { + ereport(LOG, + (errmsg("could not initialize LDAP: %s", + ldap_err2string(r)))); + + return STATUS_ERROR; + } + } +#else + if (strcmp(scheme, "ldaps") == 0) + { + ereport(LOG, + (errmsg("ldaps not supported with this LDAP library"))); + + return STATUS_ERROR; + } + *ldap = ldap_init(port->hba->ldapserver, port->hba->ldapport); + if (!*ldap) + { + ereport(LOG, + (errmsg("could not initialize LDAP: %m"))); + return STATUS_ERROR; } +#endif +#endif if ((r = ldap_set_option(*ldap, LDAP_OPT_PROTOCOL_VERSION, &ldapversion)) != LDAP_SUCCESS) { - ldap_unbind(*ldap); ereport(LOG, - (errmsg("could not set LDAP protocol version: %s", ldap_err2string(r)))); + (errmsg("could not set LDAP protocol version: %s", + ldap_err2string(r)), + errdetail_for_ldap(*ldap))); + ldap_unbind(*ldap); return STATUS_ERROR; } @@ -2360,18 +2419,18 @@ InitializeLDAPConnection(Port *port, LDAP **ldap) * should never happen since we import other files from * wldap32, but check anyway */ - ldap_unbind(*ldap); ereport(LOG, (errmsg("could not load wldap32.dll"))); + ldap_unbind(*ldap); return STATUS_ERROR; } _ldap_start_tls_sA = (__ldap_start_tls_sA) GetProcAddress(ldaphandle, "ldap_start_tls_sA"); if (_ldap_start_tls_sA == NULL) { - ldap_unbind(*ldap); ereport(LOG, (errmsg("could not load function _ldap_start_tls_sA in wldap32.dll"), errdetail("LDAP over SSL is not supported on this platform."))); + ldap_unbind(*ldap); return STATUS_ERROR; } @@ -2384,9 +2443,11 @@ InitializeLDAPConnection(Port *port, LDAP **ldap) if ((r = _ldap_start_tls_sA(*ldap, NULL, NULL, NULL, NULL)) != LDAP_SUCCESS) #endif { - ldap_unbind(*ldap); ereport(LOG, - (errmsg("could not start LDAP TLS session: %s", ldap_err2string(r)))); + (errmsg("could not start LDAP TLS session: %s", + ldap_err2string(r)), + errdetail_for_ldap(*ldap))); + ldap_unbind(*ldap); return STATUS_ERROR; } } @@ -2394,6 +2455,44 @@ InitializeLDAPConnection(Port *port, LDAP **ldap) return STATUS_OK; } +/* Placeholders recognized by FormatSearchFilter. For now just one. */ +#define LPH_USERNAME "$username" +#define LPH_USERNAME_LEN (sizeof(LPH_USERNAME) - 1) + +/* Not all LDAP implementations define this. */ +#ifndef LDAP_NO_ATTRS +#define LDAP_NO_ATTRS "1.1" +#endif + +/* Not all LDAP implementations define this. */ +#ifndef LDAPS_PORT +#define LDAPS_PORT 636 +#endif + +/* + * Return a newly allocated C string copied from "pattern" with all + * occurrences of the placeholder "$username" replaced with "user_name". + */ +static char * +FormatSearchFilter(const char *pattern, const char *user_name) +{ + StringInfoData output; + + initStringInfo(&output); + while (*pattern != '\0') + { + if (strncmp(pattern, LPH_USERNAME, LPH_USERNAME_LEN) == 0) + { + appendStringInfoString(&output, user_name); + pattern += LPH_USERNAME_LEN; + } + else + appendStringInfoChar(&output, *pattern++); + } + + return output.data; +} + /* * Perform LDAP authentication */ @@ -2413,7 +2512,13 @@ CheckLDAPAuth(Port *port) } if (port->hba->ldapport == 0) - port->hba->ldapport = LDAP_PORT; + { + if (port->hba->ldapscheme != NULL && + strcmp(port->hba->ldapscheme, "ldaps") == 0) + port->hba->ldapport = LDAPS_PORT; + else + port->hba->ldapport = LDAP_PORT; + } sendAuthRequest(port, AUTH_REQ_PASSWORD, NULL, 0); @@ -2437,7 +2542,7 @@ CheckLDAPAuth(Port *port) char *filter; LDAPMessage *search_message; LDAPMessage *entry; - char *attributes[2]; + char *attributes[] = {LDAP_NO_ATTRS, NULL}; char *dn; char *c; int count; @@ -2458,6 +2563,7 @@ CheckLDAPAuth(Port *port) { ereport(LOG, (errmsg("invalid character in user name for LDAP authentication"))); + ldap_unbind(ldap); pfree(passwd); return STATUS_ERROR; } @@ -2474,18 +2580,22 @@ CheckLDAPAuth(Port *port) { ereport(LOG, (errmsg("could not perform initial LDAP bind for ldapbinddn \"%s\" on server \"%s\": %s", - port->hba->ldapbinddn, port->hba->ldapserver, ldap_err2string(r)))); + port->hba->ldapbinddn ? port->hba->ldapbinddn : "", + port->hba->ldapserver, + ldap_err2string(r)), + errdetail_for_ldap(ldap))); + ldap_unbind(ldap); pfree(passwd); return STATUS_ERROR; } - /* Fetch just one attribute, else *all* attributes are returned */ - attributes[0] = port->hba->ldapsearchattribute ? port->hba->ldapsearchattribute : "uid"; - attributes[1] = NULL; - - filter = psprintf("(%s=%s)", - attributes[0], - port->user_name); + /* Build a custom filter or a single attribute filter? */ + if (port->hba->ldapsearchfilter) + filter = FormatSearchFilter(port->hba->ldapsearchfilter, port->user_name); + else if (port->hba->ldapsearchattribute) + filter = psprintf("(%s=%s)", port->hba->ldapsearchattribute, port->user_name); + else + filter = psprintf("(uid=%s)", port->user_name); r = ldap_search_s(ldap, port->hba->ldapbasedn, @@ -2499,7 +2609,9 @@ CheckLDAPAuth(Port *port) { ereport(LOG, (errmsg("could not search LDAP for filter \"%s\" on server \"%s\": %s", - filter, port->hba->ldapserver, ldap_err2string(r)))); + filter, port->hba->ldapserver, ldap_err2string(r)), + errdetail_for_ldap(ldap))); + ldap_unbind(ldap); pfree(passwd); pfree(filter); return STATUS_ERROR; @@ -2521,6 +2633,7 @@ CheckLDAPAuth(Port *port) count, filter, port->hba->ldapserver, count))); + ldap_unbind(ldap); pfree(passwd); pfree(filter); ldap_msgfree(search_message); @@ -2536,7 +2649,10 @@ CheckLDAPAuth(Port *port) (void) ldap_get_option(ldap, LDAP_OPT_ERROR_NUMBER, &error); ereport(LOG, (errmsg("could not get dn for the first entry matching \"%s\" on server \"%s\": %s", - filter, port->hba->ldapserver, ldap_err2string(error)))); + filter, port->hba->ldapserver, + ldap_err2string(error)), + errdetail_for_ldap(ldap))); + ldap_unbind(ldap); pfree(passwd); pfree(filter); ldap_msgfree(search_message); @@ -2552,12 +2668,9 @@ CheckLDAPAuth(Port *port) r = ldap_unbind_s(ldap); if (r != LDAP_SUCCESS) { - int error; - - (void) ldap_get_option(ldap, LDAP_OPT_ERROR_NUMBER, &error); ereport(LOG, - (errmsg("could not unbind after searching for user \"%s\" on server \"%s\": %s", - fulluser, port->hba->ldapserver, ldap_err2string(error)))); + (errmsg("could not unbind after searching for user \"%s\" on server \"%s\"", + fulluser, port->hba->ldapserver))); pfree(passwd); pfree(fulluser); return STATUS_ERROR; @@ -2583,23 +2696,46 @@ CheckLDAPAuth(Port *port) port->hba->ldapsuffix ? port->hba->ldapsuffix : ""); r = ldap_simple_bind_s(ldap, fulluser, passwd); - ldap_unbind(ldap); if (r != LDAP_SUCCESS) { ereport(LOG, (errmsg("LDAP login failed for user \"%s\" on server \"%s\": %s", - fulluser, port->hba->ldapserver, ldap_err2string(r)))); + fulluser, port->hba->ldapserver, ldap_err2string(r)), + errdetail_for_ldap(ldap))); + ldap_unbind(ldap); pfree(passwd); pfree(fulluser); return STATUS_ERROR; } + ldap_unbind(ldap); pfree(passwd); pfree(fulluser); return STATUS_OK; } + +/* + * Add a detail error message text to the current error if one can be + * constructed from the LDAP 'diagnostic message'. + */ +static int +errdetail_for_ldap(LDAP *ldap) +{ + char *message; + int rc; + + rc = ldap_get_option(ldap, LDAP_OPT_DIAGNOSTIC_MESSAGE, &message); + if (rc == LDAP_SUCCESS && message != NULL) + { + errdetail("LDAP diagnostics: %s", message); + ldap_memfree(message); + } + + return 0; +} + #endif /* USE_LDAP */ @@ -2667,7 +2803,7 @@ typedef struct #define RADIUS_ACCESS_ACCEPT 2 #define RADIUS_ACCESS_REJECT 3 -/* RAIDUS attributes */ +/* RADIUS attributes */ #define RADIUS_USER_NAME 1 #define RADIUS_PASSWORD 2 #define RADIUS_SERVICE_TYPE 6 @@ -2799,7 +2935,7 @@ CheckRADIUSAuth(Port *port) } static int -PerformRadiusTransaction(char *server, char *secret, char *portstr, char *identifier, char *user_name, char *passwd) +PerformRadiusTransaction(const char *server, const char *secret, const char *portstr, const char *identifier, const char *user_name, const char *passwd) { radius_packet radius_send_pack; radius_packet radius_recv_pack; @@ -2807,7 +2943,7 @@ PerformRadiusTransaction(char *server, char *secret, char *portstr, char *identi radius_packet *receivepacket = &radius_recv_pack; char *radius_buffer = (char *) &radius_send_pack; char *receive_buffer = (char *) &radius_recv_pack; - int32 service = htonl(RADIUS_AUTHENTICATE_ONLY); + int32 service = pg_hton32(RADIUS_AUTHENTICATE_ONLY); uint8 *cryptvector; int encryptedpasswordlen; uint8 encryptedpassword[RADIUS_MAX_PASSWORD_LENGTH]; @@ -2866,9 +3002,9 @@ PerformRadiusTransaction(char *server, char *secret, char *portstr, char *identi return STATUS_ERROR; } packet->id = packet->vector[0]; - radius_add_attribute(packet, RADIUS_SERVICE_TYPE, (unsigned char *) &service, sizeof(service)); - radius_add_attribute(packet, RADIUS_USER_NAME, (unsigned char *) user_name, strlen(user_name)); - radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (unsigned char *) identifier, strlen(identifier)); + radius_add_attribute(packet, RADIUS_SERVICE_TYPE, (const unsigned char *) &service, sizeof(service)); + radius_add_attribute(packet, RADIUS_USER_NAME, (const unsigned char *) user_name, strlen(user_name)); + radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (const unsigned char *) identifier, strlen(identifier)); /* * RADIUS password attributes are calculated as: e[0] = p[0] XOR @@ -2915,7 +3051,7 @@ PerformRadiusTransaction(char *server, char *secret, char *portstr, char *identi /* Length needs to be in network order on the wire */ packetlength = packet->length; - packet->length = htons(packet->length); + packet->length = pg_hton16(packet->length); sock = socket(serveraddrs[0].ai_family, SOCK_DGRAM, 0); if (sock == PGINVALID_SOCKET) @@ -3041,19 +3177,19 @@ PerformRadiusTransaction(char *server, char *secret, char *portstr, char *identi } #ifdef HAVE_IPV6 - if (remoteaddr.sin6_port != htons(port)) + if (remoteaddr.sin6_port != pg_hton16(port)) #else - if (remoteaddr.sin_port != htons(port)) + if (remoteaddr.sin_port != pg_hton16(port)) #endif { #ifdef HAVE_IPV6 ereport(LOG, (errmsg("RADIUS response from %s was sent from incorrect port: %d", - server, ntohs(remoteaddr.sin6_port)))); + server, pg_ntoh16(remoteaddr.sin6_port)))); #else ereport(LOG, (errmsg("RADIUS response from %s was sent from incorrect port: %d", - server, ntohs(remoteaddr.sin_port)))); + server, pg_ntoh16(remoteaddr.sin_port)))); #endif continue; } @@ -3065,11 +3201,11 @@ PerformRadiusTransaction(char *server, char *secret, char *portstr, char *identi continue; } - if (packetlength != ntohs(receivepacket->length)) + if (packetlength != pg_ntoh16(receivepacket->length)) { ereport(LOG, (errmsg("RADIUS response from %s has corrupt length: %d (actual length %d)", - server, ntohs(receivepacket->length), packetlength))); + server, pg_ntoh16(receivepacket->length), packetlength))); continue; } diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c index bf45461b2f..0b802b54e4 100644 --- a/src/backend/libpq/be-fsstubs.c +++ b/src/backend/libpq/be-fsstubs.c @@ -3,7 +3,7 @@ * be-fsstubs.c * Builtin functions for open/close/read/write operations on large objects * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -51,11 +51,6 @@ #include "utils/builtins.h" #include "utils/memutils.h" -/* - * compatibility flag for permission checks - */ -bool lo_compat_privileges; - /* define this to enable debug logging */ /* #define FSDB 1 */ /* chunk size for lo_import/lo_export transfers */ @@ -108,14 +103,6 @@ be_lo_open(PG_FUNCTION_ARGS) lobjDesc = inv_open(lobjId, mode, fscxt); - if (lobjDesc == NULL) - { /* lookup failed */ -#if FSDB - elog(DEBUG4, "could not open large object %u", lobjId); -#endif - PG_RETURN_INT32(-1); - } - fd = newLOfd(lobjDesc); PG_RETURN_INT32(fd); @@ -163,22 +150,16 @@ lo_read(int fd, char *buf, int len) errmsg("invalid large-object descriptor: %d", fd))); lobj = cookies[fd]; - /* We don't bother to check IFS_RDLOCK, since it's always set */ - - /* Permission checks --- first time through only */ - if ((lobj->flags & IFS_RD_PERM_OK) == 0) - { - if (!lo_compat_privileges && - pg_largeobject_aclcheck_snapshot(lobj->id, - GetUserId(), - ACL_SELECT, - lobj->snapshot) != ACLCHECK_OK) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied for large object %u", - lobj->id))); - lobj->flags |= IFS_RD_PERM_OK; - } + /* + * Check state. inv_read() would throw an error anyway, but we want the + * error to be about the FD's state not the underlying privilege; it might + * be that the privilege exists but user forgot to ask for read mode. + */ + if ((lobj->flags & IFS_RDLOCK) == 0) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("large object descriptor %d was not opened for reading", + fd))); status = inv_read(lobj, buf, len); @@ -197,27 +178,13 @@ lo_write(int fd, const char *buf, int len) errmsg("invalid large-object descriptor: %d", fd))); lobj = cookies[fd]; + /* see comment in lo_read() */ if ((lobj->flags & IFS_WRLOCK) == 0) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("large object descriptor %d was not opened for writing", fd))); - /* Permission checks --- first time through only */ - if ((lobj->flags & IFS_WR_PERM_OK) == 0) - { - if (!lo_compat_privileges && - pg_largeobject_aclcheck_snapshot(lobj->id, - GetUserId(), - ACL_UPDATE, - lobj->snapshot) != ACLCHECK_OK) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied for large object %u", - lobj->id))); - lobj->flags |= IFS_WR_PERM_OK; - } - status = inv_write(lobj, buf, len); return status; @@ -342,7 +309,11 @@ be_lo_unlink(PG_FUNCTION_ARGS) { Oid lobjId = PG_GETARG_OID(0); - /* Must be owner of the largeobject */ + /* + * Must be owner of the large object. It would be cleaner to check this + * in inv_drop(), but we want to throw the error before not after closing + * relevant FDs. + */ if (!lo_compat_privileges && !pg_largeobject_ownercheck(lobjId, GetUserId())) ereport(ERROR, @@ -448,21 +419,13 @@ lo_import_internal(text *filename, Oid lobjOid) LargeObjectDesc *lobj; Oid oid; -#ifndef ALLOW_DANGEROUS_LO_FUNCTIONS - if (!superuser()) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to use server-side lo_import()"), - errhint("Anyone can use the client-side lo_import() provided by libpq."))); -#endif - CreateFSContext(); /* * open the file to be read in */ text_to_cstring_buffer(filename, fnamebuf, sizeof(fnamebuf)); - fd = OpenTransientFile(fnamebuf, O_RDONLY | PG_BINARY, S_IRWXU); + fd = OpenTransientFile(fnamebuf, O_RDONLY | PG_BINARY); if (fd < 0) ereport(ERROR, (errcode_for_file_access(), @@ -514,14 +477,6 @@ be_lo_export(PG_FUNCTION_ARGS) LargeObjectDesc *lobj; mode_t oumask; -#ifndef ALLOW_DANGEROUS_LO_FUNCTIONS - if (!superuser()) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to use server-side lo_export()"), - errhint("Anyone can use the client-side lo_export() provided by libpq."))); -#endif - CreateFSContext(); /* @@ -538,8 +493,17 @@ be_lo_export(PG_FUNCTION_ARGS) */ text_to_cstring_buffer(filename, fnamebuf, sizeof(fnamebuf)); oumask = umask(S_IWGRP | S_IWOTH); - fd = OpenTransientFile(fnamebuf, O_CREAT | O_WRONLY | O_TRUNC | PG_BINARY, - S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); + PG_TRY(); + { + fd = OpenTransientFilePerm(fnamebuf, O_CREAT | O_WRONLY | O_TRUNC | PG_BINARY, + S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); + } + PG_CATCH(); + { + umask(oumask); + PG_RE_THROW(); + } + PG_END_TRY(); umask(oumask); if (fd < 0) ereport(ERROR, @@ -581,27 +545,13 @@ lo_truncate_internal(int32 fd, int64 len) errmsg("invalid large-object descriptor: %d", fd))); lobj = cookies[fd]; + /* see comment in lo_read() */ if ((lobj->flags & IFS_WRLOCK) == 0) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("large object descriptor %d was not opened for writing", fd))); - /* Permission checks --- first time through only */ - if ((lobj->flags & IFS_WR_PERM_OK) == 0) - { - if (!lo_compat_privileges && - pg_largeobject_aclcheck_snapshot(lobj->id, - GetUserId(), - ACL_UPDATE, - lobj->snapshot) != ACLCHECK_OK) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied for large object %u", - lobj->id))); - lobj->flags |= IFS_WR_PERM_OK; - } - inv_truncate(lobj, len); } @@ -777,17 +727,6 @@ lo_get_fragment_internal(Oid loOid, int64 offset, int32 nbytes) loDesc = inv_open(loOid, INV_READ, fscxt); - /* Permission check */ - if (!lo_compat_privileges && - pg_largeobject_aclcheck_snapshot(loDesc->id, - GetUserId(), - ACL_SELECT, - loDesc->snapshot) != ACLCHECK_OK) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied for large object %u", - loDesc->id))); - /* * Compute number of bytes we'll actually read, accommodating nbytes == -1 * and reads beyond the end of the LO. diff --git a/src/backend/libpq/be-secure-common.c b/src/backend/libpq/be-secure-common.c new file mode 100644 index 0000000000..a3edf27e86 --- /dev/null +++ b/src/backend/libpq/be-secure-common.c @@ -0,0 +1,194 @@ +/*------------------------------------------------------------------------- + * + * be-secure-common.c + * + * common implementation-independent SSL support code + * + * While be-secure.c contains the interfaces that the rest of the + * communications code calls, this file contains support routines that are + * used by the library-specific implementations such as be-secure-openssl.c. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/backend/libpq/be-secure-common.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include +#include + +#include "libpq/libpq.h" +#include "storage/fd.h" + +/* + * Run ssl_passphrase_command + * + * prompt will be substituted for %p. is_server_start determines the loglevel + * of error messages. + * + * The result will be put in buffer buf, which is of size size. The return + * value is the length of the actual result. + */ +int +run_ssl_passphrase_command(const char *prompt, bool is_server_start, char *buf, int size) +{ + int loglevel = is_server_start ? ERROR : LOG; + StringInfoData command; + char *p; + FILE *fh; + int pclose_rc; + size_t len = 0; + + Assert(prompt); + Assert(size > 0); + buf[0] = '\0'; + + initStringInfo(&command); + + for (p = ssl_passphrase_command; *p; p++) + { + if (p[0] == '%') + { + switch (p[1]) + { + case 'p': + appendStringInfoString(&command, prompt); + p++; + break; + case '%': + appendStringInfoChar(&command, '%'); + p++; + break; + default: + appendStringInfoChar(&command, p[0]); + } + } + else + appendStringInfoChar(&command, p[0]); + } + + fh = OpenPipeStream(command.data, "r"); + if (fh == NULL) + { + ereport(loglevel, + (errcode_for_file_access(), + errmsg("could not execute command \"%s\": %m", + command.data))); + goto error; + } + + if (!fgets(buf, size, fh)) + { + if (ferror(fh)) + { + ereport(loglevel, + (errcode_for_file_access(), + errmsg("could not read from command \"%s\": %m", + command.data))); + goto error; + } + } + + pclose_rc = ClosePipeStream(fh); + if (pclose_rc == -1) + { + ereport(loglevel, + (errcode_for_file_access(), + errmsg("could not close pipe to external command: %m"))); + goto error; + } + else if (pclose_rc != 0) + { + ereport(loglevel, + (errcode_for_file_access(), + errmsg("command \"%s\" failed", + command.data), + errdetail_internal("%s", wait_result_to_str(pclose_rc)))); + goto error; + } + + /* strip trailing newline */ + len = strlen(buf); + if (len > 0 && buf[len - 1] == '\n') + buf[--len] = '\0'; + +error: + pfree(command.data); + return len; +} + + +/* + * Check permissions for SSL key files. + */ +bool +check_ssl_key_file_permissions(const char *ssl_key_file, bool isServerStart) +{ + int loglevel = isServerStart ? FATAL : LOG; + struct stat buf; + + if (stat(ssl_key_file, &buf) != 0) + { + ereport(loglevel, + (errcode_for_file_access(), + errmsg("could not access private key file \"%s\": %m", + ssl_key_file))); + return false; + } + + if (!S_ISREG(buf.st_mode)) + { + ereport(loglevel, + (errcode(ERRCODE_CONFIG_FILE_ERROR), + errmsg("private key file \"%s\" is not a regular file", + ssl_key_file))); + return false; + } + + /* + * Refuse to load key files owned by users other than us or root. + * + * XXX surely we can check this on Windows somehow, too. + */ +#if !defined(WIN32) && !defined(__CYGWIN__) + if (buf.st_uid != geteuid() && buf.st_uid != 0) + { + ereport(loglevel, + (errcode(ERRCODE_CONFIG_FILE_ERROR), + errmsg("private key file \"%s\" must be owned by the database user or root", + ssl_key_file))); + return false; + } +#endif + + /* + * Require no public access to key file. If the file is owned by us, + * require mode 0600 or less. If owned by root, require 0640 or less to + * allow read access through our gid, or a supplementary gid that allows + * to read system-wide certificates. + * + * XXX temporarily suppress check when on Windows, because there may not + * be proper support for Unix-y file permissions. Need to think of a + * reasonable check to apply on Windows. (See also the data directory + * permission check in postmaster.c) + */ +#if !defined(WIN32) && !defined(__CYGWIN__) + if ((buf.st_uid == geteuid() && buf.st_mode & (S_IRWXG | S_IRWXO)) || + (buf.st_uid == 0 && buf.st_mode & (S_IWGRP | S_IXGRP | S_IRWXO))) + { + ereport(loglevel, + (errcode(ERRCODE_CONFIG_FILE_ERROR), + errmsg("private key file \"%s\" has group or world access", + ssl_key_file), + errdetail("File must have permissions u=rw (0600) or less if owned by the database user, or permissions u=rw,g=r (0640) or less if owned by root."))); + return false; + } +#endif + + return true; +} diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index fe15227a77..6a576572bb 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -4,35 +4,13 @@ * functions for OpenSSL support in the backend. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * src/backend/libpq/be-secure-openssl.c * - * Since the server static private key ($DataDir/server.key) - * will normally be stored unencrypted so that the database - * backend can restart automatically, it is important that - * we select an algorithm that continues to provide confidentiality - * even if the attacker has the server's private key. Ephemeral - * DH (EDH) keys provide this and more (Perfect Forward Secrecy - * aka PFS). - * - * N.B., the static private key should still be protected to - * the largest extent possible, to minimize the risk of - * impersonations. - * - * Another benefit of EDH is that it allows the backend and - * clients to use DSA keys. DSA keys can only provide digital - * signatures, not encryption, and are often acceptable in - * jurisdictions where RSA keys are unacceptable. - * - * The downside to EDH is that it makes it impossible to - * use ssldump(1) if there's a problem establishing an SSL - * session. In this case you'll need to temporarily disable - * EDH by commenting out the callback. - * *------------------------------------------------------------------------- */ @@ -74,7 +52,8 @@ static int my_SSL_set_fd(Port *port, int fd); static DH *load_dh_file(char *filename, bool isServerStart); static DH *load_dh_buffer(const char *, size_t); -static int ssl_passwd_cb(char *buf, int size, int rwflag, void *userdata); +static int ssl_external_passwd_cb(char *buf, int size, int rwflag, void *userdata); +static int dummy_ssl_passwd_cb(char *buf, int size, int rwflag, void *userdata); static int verify_cb(int, X509_STORE_CTX *); static void info_cb(const SSL *ssl, int type, int args); static bool initialize_dh(SSL_CTX *context, bool isServerStart); @@ -85,60 +64,19 @@ static char *X509_NAME_to_cstring(X509_NAME *name); static SSL_CTX *SSL_context = NULL; static bool SSL_initialized = false; -static bool ssl_passwd_cb_called = false; - -/* ------------------------------------------------------------ */ -/* Hardcoded values */ -/* ------------------------------------------------------------ */ - -/* - * Hardcoded DH parameters, used in ephemeral DH keying. - * As discussed above, EDH protects the confidentiality of - * sessions even if the static private key is compromised, - * so we are *highly* motivated to ensure that we can use - * EDH even if the DBA has not provided custom DH parameters. - * - * We could refuse SSL connections unless a good DH parameter - * file exists, but some clients may quietly renegotiate an - * unsecured connection without fully informing the user. - * Very uncool. Alternatively, the system could refuse to start - * if a DH parameters is not specified, but this would tend to - * piss off DBAs. - * - * If you want to create your own hardcoded DH parameters - * for fun and profit, review "Assigned Number for SKIP - * Protocols" (http://www.skip-vpn.org/spec/numbers.html) - * for suggestions. - */ - -static const char file_dh2048[] = -"-----BEGIN DH PARAMETERS-----\n\ -MIIBCAKCAQEA9kJXtwh/CBdyorrWqULzBej5UxE5T7bxbrlLOCDaAadWoxTpj0BV\n\ -89AHxstDqZSt90xkhkn4DIO9ZekX1KHTUPj1WV/cdlJPPT2N286Z4VeSWc39uK50\n\ -T8X8dryDxUcwYc58yWb/Ffm7/ZFexwGq01uejaClcjrUGvC/RgBYK+X0iP1YTknb\n\ -zSC0neSRBzZrM2w4DUUdD3yIsxx8Wy2O9vPJI8BD8KVbGI2Ou1WMuF040zT9fBdX\n\ -Q6MdGGzeMyEstSr/POGxKUAYEY18hKcKctaGxAMZyAcpesqVDNmWn6vQClCbAkbT\n\ -CD1mpF1Bn5x8vYlLIhkmuquiXsNV6TILOwIBAg==\n\ ------END DH PARAMETERS-----\n"; +static bool dummy_ssl_passwd_cb_called = false; +static bool ssl_is_server_start; /* ------------------------------------------------------------ */ /* Public interface */ /* ------------------------------------------------------------ */ -/* - * Initialize global SSL context. - * - * If isServerStart is true, report any errors as FATAL (so we don't return). - * Otherwise, log errors at LOG level and return -1 to indicate trouble, - * preserving the old SSL state if any. Returns 0 if OK. - */ int be_tls_init(bool isServerStart) { STACK_OF(X509_NAME) *root_cert_list = NULL; SSL_CTX *context; - struct stat buf; /* This stuff need be done only once. */ if (!SSL_initialized) @@ -175,95 +113,55 @@ be_tls_init(bool isServerStart) SSL_CTX_set_mode(context, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); /* - * If reloading, override OpenSSL's default handling of - * passphrase-protected files, because we don't want to prompt for a - * passphrase in an already-running server. (Not that the default - * handling is very desirable during server start either, but some people - * insist we need to keep it.) - */ - if (!isServerStart) - SSL_CTX_set_default_passwd_cb(context, ssl_passwd_cb); - - /* - * Load and verify server's certificate and private key + * Set password callback */ - if (SSL_CTX_use_certificate_chain_file(context, ssl_cert_file) != 1) + if (isServerStart) { - ereport(isServerStart ? FATAL : LOG, - (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("could not load server certificate file \"%s\": %s", - ssl_cert_file, SSLerrmessage(ERR_get_error())))); - goto error; + if (ssl_passphrase_command[0]) + SSL_CTX_set_default_passwd_cb(context, ssl_external_passwd_cb); } - - if (stat(ssl_key_file, &buf) != 0) + else { - ereport(isServerStart ? FATAL : LOG, - (errcode_for_file_access(), - errmsg("could not access private key file \"%s\": %m", - ssl_key_file))); - goto error; - } + if (ssl_passphrase_command[0] && ssl_passphrase_command_supports_reload) + SSL_CTX_set_default_passwd_cb(context, ssl_external_passwd_cb); + else - if (!S_ISREG(buf.st_mode)) - { - ereport(isServerStart ? FATAL : LOG, - (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("private key file \"%s\" is not a regular file", - ssl_key_file))); - goto error; + /* + * If reloading and no external command is configured, override + * OpenSSL's default handling of passphrase-protected files, + * because we don't want to prompt for a passphrase in an + * already-running server. + */ + SSL_CTX_set_default_passwd_cb(context, dummy_ssl_passwd_cb); } + /* used by the callback */ + ssl_is_server_start = isServerStart; /* - * Refuse to load key files owned by users other than us or root. - * - * XXX surely we can check this on Windows somehow, too. + * Load and verify server's certificate and private key */ -#if !defined(WIN32) && !defined(__CYGWIN__) - if (buf.st_uid != geteuid() && buf.st_uid != 0) + if (SSL_CTX_use_certificate_chain_file(context, ssl_cert_file) != 1) { ereport(isServerStart ? FATAL : LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("private key file \"%s\" must be owned by the database user or root", - ssl_key_file))); + errmsg("could not load server certificate file \"%s\": %s", + ssl_cert_file, SSLerrmessage(ERR_get_error())))); goto error; } -#endif - /* - * Require no public access to key file. If the file is owned by us, - * require mode 0600 or less. If owned by root, require 0640 or less to - * allow read access through our gid, or a supplementary gid that allows - * to read system-wide certificates. - * - * XXX temporarily suppress check when on Windows, because there may not - * be proper support for Unix-y file permissions. Need to think of a - * reasonable check to apply on Windows. (See also the data directory - * permission check in postmaster.c) - */ -#if !defined(WIN32) && !defined(__CYGWIN__) - if ((buf.st_uid == geteuid() && buf.st_mode & (S_IRWXG | S_IRWXO)) || - (buf.st_uid == 0 && buf.st_mode & (S_IWGRP | S_IXGRP | S_IRWXO))) - { - ereport(isServerStart ? FATAL : LOG, - (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("private key file \"%s\" has group or world access", - ssl_key_file), - errdetail("File must have permissions u=rw (0600) or less if owned by the database user, or permissions u=rw,g=r (0640) or less if owned by root."))); + if (!check_ssl_key_file_permissions(ssl_key_file, isServerStart)) goto error; - } -#endif /* * OK, try to load the private key file. */ - ssl_passwd_cb_called = false; + dummy_ssl_passwd_cb_called = false; if (SSL_CTX_use_PrivateKey_file(context, ssl_key_file, SSL_FILETYPE_PEM) != 1) { - if (ssl_passwd_cb_called) + if (dummy_ssl_passwd_cb_called) ereport(isServerStart ? FATAL : LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("private key file \"%s\" cannot be reloaded because it requires a passphrase", @@ -289,7 +187,7 @@ be_tls_init(bool isServerStart) SSL_CTX_set_options(context, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3); /* disallow SSL session tickets */ -#ifdef SSL_OP_NO_TICKET /* added in openssl 0.9.8f */ +#ifdef SSL_OP_NO_TICKET /* added in OpenSSL 0.9.8f */ SSL_CTX_set_options(context, SSL_OP_NO_TICKET); #endif @@ -412,9 +310,6 @@ be_tls_init(bool isServerStart) return -1; } -/* - * Destroy global SSL context, if any. - */ void be_tls_destroy(void) { @@ -424,9 +319,6 @@ be_tls_destroy(void) ssl_loaded_verify_locations = false; } -/* - * Attempt to negotiate SSL connection. - */ int be_tls_open_server(Port *port) { @@ -585,19 +477,12 @@ be_tls_open_server(Port *port) port->peer_cert_valid = true; } - ereport(DEBUG2, - (errmsg("SSL connection from \"%s\"", - port->peer_cn ? port->peer_cn : "(anonymous)"))); - /* set up debugging/info callback */ SSL_CTX_set_info_callback(SSL_context, info_cb); return 0; } -/* - * Close SSL connection. - */ void be_tls_close(Port *port) { @@ -622,9 +507,6 @@ be_tls_close(Port *port) } } -/* - * Read data from a secure connection. - */ ssize_t be_tls_read(Port *port, void *ptr, size_t len, int *waitfor) { @@ -684,9 +566,6 @@ be_tls_read(Port *port, void *ptr, size_t len, int *waitfor) return n; } -/* - * Write data to a secure connection. - */ ssize_t be_tls_write(Port *port, void *ptr, size_t len, int *waitfor) { @@ -732,7 +611,7 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor) case SSL_ERROR_ZERO_RETURN: /* - * the SSL connnection was closed, leave it to the caller to + * the SSL connection was closed, leave it to the caller to * ereport it */ errno = ECONNRESET; @@ -759,7 +638,7 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor) * Private substitute BIO: this does the sending and receiving using send() and * recv() instead. This is so that we can enable and disable interrupts * just while calling recv(). We cannot have interrupts occurring while - * the bulk of openssl runs, because it uses malloc() and possibly other + * the bulk of OpenSSL runs, because it uses malloc() and possibly other * non-reentrant libc facilities. We also need to call send() and recv() * directly so it gets passed through the socket/signals layer on Win32. * @@ -857,7 +736,7 @@ my_BIO_s_socket(void) return my_bio_methods; } -/* This should exactly match openssl's SSL_set_fd except for using my BIO */ +/* This should exactly match OpenSSL's SSL_set_fd except for using my BIO */ static int my_SSL_set_fd(Port *port, int fd) { @@ -977,7 +856,21 @@ load_dh_buffer(const char *buffer, size_t len) } /* - * Passphrase collection callback + * Passphrase collection callback using ssl_passphrase_command + */ +static int +ssl_external_passwd_cb(char *buf, int size, int rwflag, void *userdata) +{ + /* same prompt as OpenSSL uses internally */ + const char *prompt = "Enter PEM pass phrase:"; + + Assert(rwflag == 0); + + return run_ssl_passphrase_command(prompt, ssl_is_server_start, buf, size); +} + +/* + * Dummy passphrase callback * * If OpenSSL is told to use a passphrase-protected server key, by default * it will issue a prompt on /dev/tty and try to read a key from there. @@ -986,10 +879,10 @@ load_dh_buffer(const char *buffer, size_t len) * function that just returns an empty passphrase, guaranteeing failure. */ static int -ssl_passwd_cb(char *buf, int size, int rwflag, void *userdata) +dummy_ssl_passwd_cb(char *buf, int size, int rwflag, void *userdata) { /* Set flag to change the error message we'll report */ - ssl_passwd_cb_called = true; + dummy_ssl_passwd_cb_called = true; /* And return empty string */ Assert(size > 0); buf[0] = '\0'; @@ -1063,8 +956,8 @@ info_cb(const SSL *ssl, int type, int args) * precomputed. * * Since few sites will bother to create a parameter file, we also - * also provide a fallback to the parameters provided by the - * OpenSSL project. + * provide a fallback to the parameters provided by the OpenSSL + * project. * * These values can be static (once loaded or computed) since the * OpenSSL library can efficiently generate random keys from the @@ -1080,7 +973,7 @@ initialize_dh(SSL_CTX *context, bool isServerStart) if (ssl_dh_params_file[0]) dh = load_dh_file(ssl_dh_params_file, isServerStart); if (!dh) - dh = load_dh_buffer(file_dh2048, sizeof file_dh2048); + dh = load_dh_buffer(FILE_DH2048, sizeof(FILE_DH2048)); if (!dh) { ereport(isServerStart ? FATAL : LOG, @@ -1151,7 +1044,7 @@ static const char * SSLerrmessage(unsigned long ecode) { const char *errreason; - static char errbuf[32]; + static char errbuf[36]; if (ecode == 0) return _("no SSL error reported"); @@ -1162,9 +1055,6 @@ SSLerrmessage(unsigned long ecode) return errbuf; } -/* - * Return information about the SSL connection - */ int be_tls_get_cipher_bits(Port *port) { @@ -1188,22 +1078,22 @@ be_tls_get_compression(Port *port) return false; } -void -be_tls_get_version(Port *port, char *ptr, size_t len) +const char * +be_tls_get_version(Port *port) { if (port->ssl) - strlcpy(ptr, SSL_get_version(port->ssl), len); + return SSL_get_version(port->ssl); else - ptr[0] = '\0'; + return NULL; } -void -be_tls_get_cipher(Port *port, char *ptr, size_t len) +const char * +be_tls_get_cipher(Port *port) { if (port->ssl) - strlcpy(ptr, SSL_get_cipher(port->ssl), len); + return SSL_get_cipher(port->ssl); else - ptr[0] = '\0'; + return NULL; } void @@ -1215,6 +1105,62 @@ be_tls_get_peerdn_name(Port *port, char *ptr, size_t len) ptr[0] = '\0'; } +#ifdef HAVE_X509_GET_SIGNATURE_NID +char * +be_tls_get_certificate_hash(Port *port, size_t *len) +{ + X509 *server_cert; + char *cert_hash; + const EVP_MD *algo_type = NULL; + unsigned char hash[EVP_MAX_MD_SIZE]; /* size for SHA-512 */ + unsigned int hash_size; + int algo_nid; + + *len = 0; + server_cert = SSL_get_certificate(port->ssl); + if (server_cert == NULL) + return NULL; + + /* + * Get the signature algorithm of the certificate to determine the hash + * algorithm to use for the result. + */ + if (!OBJ_find_sigid_algs(X509_get_signature_nid(server_cert), + &algo_nid, NULL)) + elog(ERROR, "could not determine server certificate signature algorithm"); + + /* + * The TLS server's certificate bytes need to be hashed with SHA-256 if + * its signature algorithm is MD5 or SHA-1 as per RFC 5929 + * (https://tools.ietf.org/html/rfc5929#section-4.1). If something else + * is used, the same hash as the signature algorithm is used. + */ + switch (algo_nid) + { + case NID_md5: + case NID_sha1: + algo_type = EVP_sha256(); + break; + default: + algo_type = EVP_get_digestbynid(algo_nid); + if (algo_type == NULL) + elog(ERROR, "could not find digest for NID %s", + OBJ_nid2sn(algo_nid)); + break; + } + + /* generate and save the certificate hash */ + if (!X509_digest(server_cert, algo_type, hash, &hash_size)) + elog(ERROR, "could not generate server certificate hash"); + + cert_hash = palloc(hash_size); + memcpy(cert_hash, hash, hash_size); + *len = hash_size; + + return cert_hash; +} +#endif + /* * Convert an X509 subject name to a cstring. * diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c index 53fefd1b29..4eb21fe89d 100644 --- a/src/backend/libpq/be-secure.c +++ b/src/backend/libpq/be-secure.c @@ -6,7 +6,7 @@ * message integrity and endpoint authentication. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -18,12 +18,10 @@ #include "postgres.h" -#include #include #include #include #include -#include #include #include #ifdef HAVE_NETINET_TCP_H @@ -40,11 +38,14 @@ #include "storage/proc.h" +char *ssl_library; char *ssl_cert_file; char *ssl_key_file; char *ssl_ca_file; char *ssl_crl_file; char *ssl_dh_params_file; +char *ssl_passphrase_command; +bool ssl_passphrase_command_supports_reload; #ifdef USE_SSL bool ssl_loaded_verify_locations = false; @@ -114,6 +115,10 @@ secure_open_server(Port *port) #ifdef USE_SSL r = be_tls_open_server(port); + + ereport(DEBUG2, + (errmsg("SSL connection from \"%s\"", + port->peer_cn ? port->peer_cn : "(anonymous)"))); #endif return r; @@ -140,6 +145,9 @@ secure_read(Port *port, void *ptr, size_t len) ssize_t n; int waitfor; + /* Deal with any already-pending interrupt condition. */ + ProcessClientReadInterrupt(false); + retry: #ifdef USE_SSL waitfor = 0; @@ -204,9 +212,8 @@ secure_read(Port *port, void *ptr, size_t len) } /* - * Process interrupts that happened while (or before) receiving. Note that - * we signal that we're not blocking, which will prevent some types of - * interrupts from being processed. + * Process interrupts that happened during a successful (or non-blocking, + * or hard-failed) read. */ ProcessClientReadInterrupt(false); @@ -243,6 +250,9 @@ secure_write(Port *port, void *ptr, size_t len) ssize_t n; int waitfor; + /* Deal with any already-pending interrupt condition. */ + ProcessClientWriteInterrupt(false); + retry: waitfor = 0; #ifdef USE_SSL @@ -282,17 +292,16 @@ secure_write(Port *port, void *ptr, size_t len) /* * We'll retry the write. Most likely it will return immediately - * because there's still no data available, and we'll wait for the - * socket to become ready again. + * because there's still no buffer space available, and we'll wait + * for the socket to become ready again. */ } goto retry; } /* - * Process interrupts that happened while (or before) sending. Note that - * we signal that we're not blocking, which will prevent some types of - * interrupts from being processed. + * Process interrupts that happened during a successful (or non-blocking, + * or hard-failed) write. */ ProcessClientWriteInterrupt(false); diff --git a/src/backend/libpq/crypt.c b/src/backend/libpq/crypt.c index 1715c52462..2c5ce4a47e 100644 --- a/src/backend/libpq/crypt.c +++ b/src/backend/libpq/crypt.c @@ -4,7 +4,7 @@ * Functions for dealing with encrypted passwords stored in * pg_authid.rolpassword. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/libpq/crypt.c diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index 42afead9fd..1a65ec87bd 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -5,7 +5,7 @@ * wherein you authenticate a user by seeing what IP address the system * says he comes from and choosing authentication method based on it). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -126,7 +126,7 @@ static const char *const UserAuthName[] = "ident", "password", "md5", - "scram-sha256", + "scram-sha-256", "gss", "sspi", "pam", @@ -144,8 +144,8 @@ static List *tokenize_inc_file(List *tokens, const char *outer_filename, const char *inc_filename, int elevel, char **err_msg); static bool parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int elevel, char **err_msg); -static bool verify_option_list_length(List *options, char *optionname, - List *masters, char *mastername, int line_num); +static bool verify_option_list_length(List *options, const char *optionname, + List *masters, const char *mastername, int line_num); static ArrayType *gethba_options(HbaLine *hba); static void fill_hba_line(Tuplestorestate *tuple_store, TupleDesc tupdesc, int lineno, HbaLine *hba, const char *err_msg); @@ -187,9 +187,9 @@ pg_isblank(const char c) * set *err_msg to a string describing the error. Currently the only * possible error is token too long for buf. * - * If successful: store null-terminated token at *buf and return TRUE. - * If no more tokens on line: set *buf = '\0' and return FALSE. - * If error: fill buf with truncated or misformatted token and return FALSE. + * If successful: store null-terminated token at *buf and return true. + * If no more tokens on line: set *buf = '\0' and return false. + * If error: fill buf with truncated or misformatted token and return false. */ static bool next_token(char **lineptr, char *buf, int bufsz, @@ -1505,22 +1505,24 @@ parse_hba_line(TokenizedLine *tok_line, int elevel) /* * LDAP can operate in two modes: either with a direct bind, using * ldapprefix and ldapsuffix, or using a search+bind, using - * ldapbasedn, ldapbinddn, ldapbindpasswd and ldapsearchattribute. - * Disallow mixing these parameters. + * ldapbasedn, ldapbinddn, ldapbindpasswd and one of + * ldapsearchattribute or ldapsearchfilter. Disallow mixing these + * parameters. */ if (parsedline->ldapprefix || parsedline->ldapsuffix) { if (parsedline->ldapbasedn || parsedline->ldapbinddn || parsedline->ldapbindpasswd || - parsedline->ldapsearchattribute) + parsedline->ldapsearchattribute || + parsedline->ldapsearchfilter) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("cannot use ldapbasedn, ldapbinddn, ldapbindpasswd, ldapsearchattribute, or ldapurl together with ldapprefix"), + errmsg("cannot use ldapbasedn, ldapbinddn, ldapbindpasswd, ldapsearchattribute, ldapsearchfilter, or ldapurl together with ldapprefix"), errcontext("line %d of configuration file \"%s\"", line_num, HbaFileName))); - *err_msg = "cannot use ldapbasedn, ldapbinddn, ldapbindpasswd, ldapsearchattribute, or ldapurl together with ldapprefix"; + *err_msg = "cannot use ldapbasedn, ldapbinddn, ldapbindpasswd, ldapsearchattribute, ldapsearchfilter, or ldapurl together with ldapprefix"; return NULL; } } @@ -1534,6 +1536,22 @@ parse_hba_line(TokenizedLine *tok_line, int elevel) *err_msg = "authentication method \"ldap\" requires argument \"ldapbasedn\", \"ldapprefix\", or \"ldapsuffix\" to be set"; return NULL; } + + /* + * When using search+bind, you can either use a simple attribute + * (defaulting to "uid") or a fully custom search filter. You can't + * do both. + */ + if (parsedline->ldapsearchattribute && parsedline->ldapsearchfilter) + { + ereport(elevel, + (errcode(ERRCODE_CONFIG_FILE_ERROR), + errmsg("cannot use ldapsearchattribute together with ldapsearchfilter"), + errcontext("line %d of configuration file \"%s\"", + line_num, HbaFileName))); + *err_msg = "cannot use ldapsearchattribute together with ldapsearchfilter"; + return NULL; + } } if (parsedline->auth_method == uaRADIUS) @@ -1599,7 +1617,7 @@ parse_hba_line(TokenizedLine *tok_line, int elevel) static bool -verify_option_list_length(List *options, char *optionname, List *masters, char *mastername, int line_num) +verify_option_list_length(List *options, const char *optionname, List *masters, const char *mastername, int line_num) { if (list_length(options) == 0 || list_length(options) == 1 || @@ -1608,7 +1626,7 @@ verify_option_list_length(List *options, char *optionname, List *masters, char * ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("the number of %s (%i) must be 1 or the same as the number of %s (%i)", + errmsg("the number of %s (%d) must be 1 or the same as the number of %s (%d)", optionname, list_length(options), mastername, @@ -1710,7 +1728,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, return false; } - if (strcmp(urldata->lud_scheme, "ldap") != 0) + if (strcmp(urldata->lud_scheme, "ldap") != 0 && + strcmp(urldata->lud_scheme, "ldaps") != 0) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), @@ -1721,22 +1740,19 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, return false; } - hbaline->ldapserver = pstrdup(urldata->lud_host); + if (urldata->lud_scheme) + hbaline->ldapscheme = pstrdup(urldata->lud_scheme); + if (urldata->lud_host) + hbaline->ldapserver = pstrdup(urldata->lud_host); hbaline->ldapport = urldata->lud_port; - hbaline->ldapbasedn = pstrdup(urldata->lud_dn); + if (urldata->lud_dn) + hbaline->ldapbasedn = pstrdup(urldata->lud_dn); if (urldata->lud_attrs) hbaline->ldapsearchattribute = pstrdup(urldata->lud_attrs[0]); /* only use first one */ hbaline->ldapscope = urldata->lud_scope; if (urldata->lud_filter) - { - ereport(elevel, - (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("filters not supported in LDAP URLs"))); - *err_msg = "filters not supported in LDAP URLs"; - ldap_free_urldesc(urldata); - return false; - } + hbaline->ldapsearchfilter = pstrdup(urldata->lud_filter); ldap_free_urldesc(urldata); #else /* not OpenLDAP */ ereport(elevel, @@ -1753,6 +1769,17 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, else hbaline->ldaptls = false; } + else if (strcmp(name, "ldapscheme") == 0) + { + REQUIRE_AUTH_OPTION(uaLDAP, "ldapscheme", "ldap"); + if (strcmp(val, "ldap") != 0 && strcmp(val, "ldaps") != 0) + ereport(elevel, + (errcode(ERRCODE_CONFIG_FILE_ERROR), + errmsg("invalid ldapscheme value: \"%s\"", val), + errcontext("line %d of configuration file \"%s\"", + line_num, HbaFileName))); + hbaline->ldapscheme = pstrdup(val); + } else if (strcmp(name, "ldapserver") == 0) { REQUIRE_AUTH_OPTION(uaLDAP, "ldapserver", "ldap"); @@ -1788,6 +1815,11 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, REQUIRE_AUTH_OPTION(uaLDAP, "ldapsearchattribute", "ldap"); hbaline->ldapsearchattribute = pstrdup(val); } + else if (strcmp(name, "ldapsearchfilter") == 0) + { + REQUIRE_AUTH_OPTION(uaLDAP, "ldapsearchfilter", "ldap"); + hbaline->ldapsearchfilter = pstrdup(val); + } else if (strcmp(name, "ldapbasedn") == 0) { REQUIRE_AUTH_OPTION(uaLDAP, "ldapbasedn", "ldap"); @@ -2266,6 +2298,11 @@ gethba_options(HbaLine *hba) CStringGetTextDatum(psprintf("ldapsearchattribute=%s", hba->ldapsearchattribute)); + if (hba->ldapsearchfilter) + options[noptions++] = + CStringGetTextDatum(psprintf("ldapsearchfilter=%s", + hba->ldapsearchfilter)); + if (hba->ldapscope) options[noptions++] = CStringGetTextDatum(psprintf("ldapscope=%d", hba->ldapscope)); diff --git a/src/backend/libpq/ifaddr.c b/src/backend/libpq/ifaddr.c index 53bf6bcd80..274c084362 100644 --- a/src/backend/libpq/ifaddr.c +++ b/src/backend/libpq/ifaddr.c @@ -3,7 +3,7 @@ * ifaddr.c * IP netmask calculations, and enumerating network interfaces. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -27,10 +27,10 @@ #ifdef HAVE_NETINET_TCP_H #include #endif -#include #include #include "libpq/ifaddr.h" +#include "port/pg_bswap.h" static int range_sockaddr_AF_INET(const struct sockaddr_in *addr, const struct sockaddr_in *netaddr, @@ -144,7 +144,7 @@ pg_sockaddr_cidr_mask(struct sockaddr_storage *mask, char *numbits, int family) & 0xffffffffUL; else maskl = 0; - mask4.sin_addr.s_addr = htonl(maskl); + mask4.sin_addr.s_addr = pg_hton32(maskl); memcpy(mask, &mask4, sizeof(mask4)); break; } @@ -568,7 +568,7 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data) /* addr 127.0.0.1/8 */ memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; - addr.sin_addr.s_addr = ntohl(0x7f000001); + addr.sin_addr.s_addr = pg_ntoh32(0x7f000001); memset(&mask, 0, sizeof(mask)); pg_sockaddr_cidr_mask(&mask, "8", AF_INET); run_ifaddr_callback(callback, cb_data, diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index 4452ea4228..0c9593d4cc 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -27,7 +27,7 @@ * the backend's "backend/libpq" is quite separate from "interfaces/libpq". * All that remains is similarities of names to trap the unwary... * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/libpq/pqcomm.c @@ -81,7 +81,6 @@ #ifdef HAVE_NETINET_TCP_H #include #endif -#include #ifdef HAVE_UTIME_H #include #endif @@ -92,6 +91,7 @@ #include "common/ip.h" #include "libpq/libpq.h" #include "miscadmin.h" +#include "port/pg_bswap.h" #include "storage/ipc.h" #include "utils/guc.h" #include "utils/memutils.h" @@ -170,7 +170,7 @@ static int Lock_AF_UNIX(char *unixSocketDir, char *unixSocketPath); static int Setup_AF_UNIX(char *sock_path); #endif /* HAVE_UNIX_SOCKETS */ -static PQcommMethods PqCommSocketMethods = { +static const PQcommMethods PqCommSocketMethods = { socket_comm_reset, socket_flush, socket_flush_if_writable, @@ -181,7 +181,7 @@ static PQcommMethods PqCommSocketMethods = { socket_endcopyout }; -PQcommMethods *PqCommMethods = &PqCommSocketMethods; +const PQcommMethods *PqCommMethods = &PqCommSocketMethods; WaitEventSet *FeBeWaitSet; @@ -914,7 +914,7 @@ RemoveSocketFiles(void) /* -------------------------------- * socket_set_nonblocking - set socket blocking/non-blocking * - * Sets the socket non-blocking if nonblocking is TRUE, or sets it + * Sets the socket non-blocking if nonblocking is true, or sets it * blocking otherwise. * -------------------------------- */ @@ -1286,7 +1286,7 @@ pq_getmessage(StringInfo s, int maxlen) return EOF; } - len = ntohl(len); + len = pg_ntoh32(len); if (len < 4 || (maxlen > 0 && len > maxlen)) @@ -1569,7 +1569,7 @@ socket_putmessage(char msgtype, const char *s, size_t len) { uint32 n32; - n32 = htonl((uint32) (len + 4)); + n32 = pg_hton32((uint32) (len + 4)); if (internal_putbytes((char *) &n32, 4)) goto fail; } diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c index c8cf67c041..1c7e99019d 100644 --- a/src/backend/libpq/pqformat.c +++ b/src/backend/libpq/pqformat.c @@ -21,7 +21,7 @@ * are different. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/libpq/pqformat.c @@ -72,12 +72,11 @@ #include "postgres.h" #include -#include -#include #include "libpq/libpq.h" #include "libpq/pqformat.h" #include "mb/pg_wchar.h" +#include "port/pg_bswap.h" /* -------------------------------- @@ -98,13 +97,24 @@ pq_beginmessage(StringInfo buf, char msgtype) } /* -------------------------------- - * pq_sendbyte - append a raw byte to a StringInfo buffer + + * pq_beginmessage_reuse - initialize for sending a message, reuse buffer + * + * This requires the buffer to be allocated in a sufficiently long-lived + * memory context. * -------------------------------- */ void -pq_sendbyte(StringInfo buf, int byt) +pq_beginmessage_reuse(StringInfo buf, char msgtype) { - appendStringInfoCharMacro(buf, byt); + resetStringInfo(buf); + + /* + * We stash the message type into the buffer's cursor field, expecting + * that the pq_sendXXX routines won't touch it. We could alternatively + * make it the first byte of the buffer contents, but this seems easier. + */ + buf->cursor = msgtype; } /* -------------------------------- @@ -114,6 +124,7 @@ pq_sendbyte(StringInfo buf, int byt) void pq_sendbytes(StringInfo buf, const char *data, int datalen) { + /* use variant that maintains a trailing null-byte, out of caution */ appendBinaryStringInfo(buf, data, datalen); } @@ -138,14 +149,14 @@ pq_sendcountedtext(StringInfo buf, const char *str, int slen, if (p != str) /* actual conversion has been done? */ { slen = strlen(p); - pq_sendint(buf, slen + extra, 4); - appendBinaryStringInfo(buf, p, slen); + pq_sendint32(buf, slen + extra); + appendBinaryStringInfoNT(buf, p, slen); pfree(p); } else { - pq_sendint(buf, slen + extra, 4); - appendBinaryStringInfo(buf, str, slen); + pq_sendint32(buf, slen + extra); + appendBinaryStringInfoNT(buf, str, slen); } } @@ -192,11 +203,11 @@ pq_sendstring(StringInfo buf, const char *str) if (p != str) /* actual conversion has been done? */ { slen = strlen(p); - appendBinaryStringInfo(buf, p, slen + 1); + appendBinaryStringInfoNT(buf, p, slen + 1); pfree(p); } else - appendBinaryStringInfo(buf, str, slen + 1); + appendBinaryStringInfoNT(buf, str, slen + 1); } /* -------------------------------- @@ -228,61 +239,6 @@ pq_send_ascii_string(StringInfo buf, const char *str) appendStringInfoChar(buf, '\0'); } -/* -------------------------------- - * pq_sendint - append a binary integer to a StringInfo buffer - * -------------------------------- - */ -void -pq_sendint(StringInfo buf, int i, int b) -{ - unsigned char n8; - uint16 n16; - uint32 n32; - - switch (b) - { - case 1: - n8 = (unsigned char) i; - appendBinaryStringInfo(buf, (char *) &n8, 1); - break; - case 2: - n16 = htons((uint16) i); - appendBinaryStringInfo(buf, (char *) &n16, 2); - break; - case 4: - n32 = htonl((uint32) i); - appendBinaryStringInfo(buf, (char *) &n32, 4); - break; - default: - elog(ERROR, "unsupported integer size %d", b); - break; - } -} - -/* -------------------------------- - * pq_sendint64 - append a binary 8-byte int to a StringInfo buffer - * - * It is tempting to merge this with pq_sendint, but we'd have to make the - * argument int64 for all data widths --- that could be a big performance - * hit on machines where int64 isn't efficient. - * -------------------------------- - */ -void -pq_sendint64(StringInfo buf, int64 i) -{ - uint32 n32; - - /* High order half first, since we're doing MSB-first */ - n32 = (uint32) (i >> 32); - n32 = htonl(n32); - appendBinaryStringInfo(buf, (char *) &n32, 4); - - /* Now the low order half */ - n32 = (uint32) i; - n32 = htonl(n32); - appendBinaryStringInfo(buf, (char *) &n32, 4); -} - /* -------------------------------- * pq_sendfloat4 - append a float4 to a StringInfo buffer * @@ -304,9 +260,7 @@ pq_sendfloat4(StringInfo buf, float4 f) } swap; swap.f = f; - swap.i = htonl(swap.i); - - appendBinaryStringInfo(buf, (char *) &swap.i, 4); + pq_sendint32(buf, swap.i); } /* -------------------------------- @@ -350,6 +304,21 @@ pq_endmessage(StringInfo buf) buf->data = NULL; } +/* -------------------------------- + * pq_endmessage_reuse - send the completed message to the frontend + * + * The data buffer is *not* freed, allowing to reuse the buffer with + * pg_beginmessage_reuse. + -------------------------------- + */ + +void +pq_endmessage_reuse(StringInfo buf) +{ + /* msgtype was saved in cursor field */ + (void) pq_putmessage(buf->cursor, buf->data, buf->len); +} + /* -------------------------------- * pq_begintypsend - initialize for constructing a bytea result @@ -460,11 +429,11 @@ pq_getmsgint(StringInfo msg, int b) break; case 2: pq_copymsgbytes(msg, (char *) &n16, 2); - result = ntohs(n16); + result = pg_ntoh16(n16); break; case 4: pq_copymsgbytes(msg, (char *) &n32, 4); - result = ntohl(n32); + result = pg_ntoh32(n32); break; default: elog(ERROR, "unsupported integer size %d", b); @@ -485,20 +454,11 @@ pq_getmsgint(StringInfo msg, int b) int64 pq_getmsgint64(StringInfo msg) { - int64 result; - uint32 h32; - uint32 l32; - - pq_copymsgbytes(msg, (char *) &h32, 4); - pq_copymsgbytes(msg, (char *) &l32, 4); - h32 = ntohl(h32); - l32 = ntohl(l32); + uint64 n64; - result = h32; - result <<= 32; - result |= l32; + pq_copymsgbytes(msg, (char *) &n64, sizeof(n64)); - return result; + return pg_ntoh64(n64); } /* -------------------------------- diff --git a/src/backend/libpq/pqmq.c b/src/backend/libpq/pqmq.c index 8fbc03819d..6eaed5bf0c 100644 --- a/src/backend/libpq/pqmq.c +++ b/src/backend/libpq/pqmq.c @@ -3,7 +3,7 @@ * pqmq.c * Use the frontend/backend protocol for communication over a shm_mq * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/libpq/pqmq.c @@ -21,7 +21,6 @@ #include "tcop/tcopprot.h" #include "utils/builtins.h" -static shm_mq *pq_mq; static shm_mq_handle *pq_mq_handle; static bool pq_mq_busy = false; static pid_t pq_mq_parallel_master_pid = 0; @@ -37,7 +36,7 @@ static void mq_putmessage_noblock(char msgtype, const char *s, size_t len); static void mq_startcopyout(void); static void mq_endcopyout(bool errorAbort); -static PQcommMethods PqCommMqMethods = { +static const PQcommMethods PqCommMqMethods = { mq_comm_reset, mq_flush, mq_flush_if_writable, @@ -56,7 +55,6 @@ void pq_redirect_to_shm_mq(dsm_segment *seg, shm_mq_handle *mqh) { PqCommMethods = &PqCommMqMethods; - pq_mq = shm_mq_get_queue(mqh); pq_mq_handle = mqh; whereToSendOutput = DestRemote; FrontendProtocol = PG_PROTOCOL_LATEST; @@ -70,7 +68,6 @@ pq_redirect_to_shm_mq(dsm_segment *seg, shm_mq_handle *mqh) static void pq_cleanup_redirect_to_shm_mq(dsm_segment *seg, Datum arg) { - pq_mq = NULL; pq_mq_handle = NULL; whereToSendOutput = DestNone; } @@ -135,9 +132,8 @@ mq_putmessage(char msgtype, const char *s, size_t len) */ if (pq_mq_busy) { - if (pq_mq != NULL) - shm_mq_detach(pq_mq); - pq_mq = NULL; + if (pq_mq_handle != NULL) + shm_mq_detach(pq_mq_handle); pq_mq_handle = NULL; return EOF; } @@ -148,7 +144,7 @@ mq_putmessage(char msgtype, const char *s, size_t len) * be generated late in the shutdown sequence, after all DSMs have already * been detached. */ - if (pq_mq == NULL) + if (pq_mq_handle == NULL) return 0; pq_mq_busy = true; @@ -290,10 +286,10 @@ pq_parse_errornotice(StringInfo msg, ErrorData *edata) edata->hint = pstrdup(value); break; case PG_DIAG_STATEMENT_POSITION: - edata->cursorpos = pg_atoi(value, sizeof(int), '\0'); + edata->cursorpos = pg_strtoint32(value); break; case PG_DIAG_INTERNAL_POSITION: - edata->internalpos = pg_atoi(value, sizeof(int), '\0'); + edata->internalpos = pg_strtoint32(value); break; case PG_DIAG_INTERNAL_QUERY: edata->internalquery = pstrdup(value); @@ -320,7 +316,7 @@ pq_parse_errornotice(StringInfo msg, ErrorData *edata) edata->filename = pstrdup(value); break; case PG_DIAG_SOURCE_LINE: - edata->lineno = pg_atoi(value, sizeof(int), '\0'); + edata->lineno = pg_strtoint32(value); break; case PG_DIAG_SOURCE_FUNCTION: edata->funcname = pstrdup(value); diff --git a/src/backend/libpq/pqsignal.c b/src/backend/libpq/pqsignal.c index 476e883a68..a24de5d410 100644 --- a/src/backend/libpq/pqsignal.c +++ b/src/backend/libpq/pqsignal.c @@ -3,7 +3,7 @@ * pqsignal.c * Backend signal(2) support (see also src/port/pqsignal.c) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/main/main.c b/src/backend/main/main.c index 87b7d3bf65..38853e38eb 100644 --- a/src/backend/main/main.c +++ b/src/backend/main/main.c @@ -9,7 +9,7 @@ * proper FooMain() routine for the incarnation. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -61,6 +61,14 @@ main(int argc, char *argv[]) { bool do_check_root = true; + /* + * If supported on the current platform, set up a handler to be called if + * the backend/postmaster crashes with a fatal signal or exception. + */ +#if defined(WIN32) && defined(HAVE_MINIDUMP_TYPE) + pgwin32_install_crashdump_handler(); +#endif + progname = get_progname(argv[0]); /* @@ -81,14 +89,6 @@ main(int argc, char *argv[]) */ argv = save_ps_display_args(argc, argv); - /* - * If supported on the current platform, set up a handler to be called if - * the backend/postmaster crashes with a fatal signal or exception. - */ -#if defined(WIN32) && defined(HAVE_MINIDUMP_TYPE) - pgwin32_install_crashdump_handler(); -#endif - /* * Fire up essential subsystems: error and memory management * diff --git a/src/backend/nls.mk b/src/backend/nls.mk index 627492df17..5b245aaccf 100644 --- a/src/backend/nls.mk +++ b/src/backend/nls.mk @@ -1,6 +1,6 @@ # src/backend/nls.mk CATALOG_NAME = postgres -AVAIL_LANGUAGES = de es fr id it ja ko pl pt_BR ru zh_CN +AVAIL_LANGUAGES = de es fr id it ja ko pl pt_BR ru sv zh_CN GETTEXT_FILES = + gettext-files GETTEXT_TRIGGERS = $(BACKEND_COMMON_GETTEXT_TRIGGERS) \ GUC_check_errmsg GUC_check_errdetail GUC_check_errhint \ diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c index bf8545d437..8ce253c88d 100644 --- a/src/backend/nodes/bitmapset.c +++ b/src/backend/nodes/bitmapset.c @@ -11,7 +11,7 @@ * bms_is_empty() in preference to testing for NULL.) * * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/nodes/bitmapset.c @@ -58,6 +58,9 @@ * rightmost_one_pos[x] gives the bit number (0-7) of the rightmost one bit * in a nonzero byte value x. The entry for x=0 is never used. * + * leftmost_one_pos[x] gives the bit number (0-7) of the leftmost one bit in a + * nonzero byte value x. The entry for x=0 is never used. + * * number_of_ones[x] gives the number of one-bits (0-8) in a byte value x. * * We could make these tables larger and reduce the number of iterations @@ -84,6 +87,25 @@ static const uint8 rightmost_one_pos[256] = { 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 }; +static const uint8 leftmost_one_pos[256] = { + 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 +}; + static const uint8 number_of_ones[256] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, @@ -172,6 +194,50 @@ bms_equal(const Bitmapset *a, const Bitmapset *b) return true; } +/* + * bms_compare - qsort-style comparator for bitmapsets + * + * This guarantees to report values as equal iff bms_equal would say they are + * equal. Otherwise, the highest-numbered bit that is set in one value but + * not the other determines the result. (This rule means that, for example, + * {6} is greater than {5}, which seems plausible.) + */ +int +bms_compare(const Bitmapset *a, const Bitmapset *b) +{ + int shortlen; + int i; + + /* Handle cases where either input is NULL */ + if (a == NULL) + return bms_is_empty(b) ? 0 : -1; + else if (b == NULL) + return bms_is_empty(a) ? 0 : +1; + /* Handle cases where one input is longer than the other */ + shortlen = Min(a->nwords, b->nwords); + for (i = shortlen; i < a->nwords; i++) + { + if (a->words[i] != 0) + return +1; + } + for (i = shortlen; i < b->nwords; i++) + { + if (b->words[i] != 0) + return -1; + } + /* Process words in common */ + i = shortlen; + while (--i >= 0) + { + bitmapword aw = a->words[i]; + bitmapword bw = b->words[i]; + + if (aw != bw) + return (aw > bw) ? +1 : -1; + } + return 0; +} + /* * bms_make_singleton - build a bitmapset containing a single member */ @@ -558,8 +624,8 @@ bms_singleton_member(const Bitmapset *a) * bms_get_singleton_member * * Test whether the given set is a singleton. - * If so, set *member to the value of its sole member, and return TRUE. - * If not, return FALSE, without changing *member. + * If so, set *member to the value of its sole member, and return true. + * If not, return false, without changing *member. * * This is more convenient and faster than calling bms_membership() and then * bms_singleton_member(), if we don't care about distinguishing empty sets @@ -784,6 +850,79 @@ bms_add_members(Bitmapset *a, const Bitmapset *b) return result; } +/* + * bms_add_range + * Add members in the range of 'lower' to 'upper' to the set. + * + * Note this could also be done by calling bms_add_member in a loop, however, + * using this function will be faster when the range is large as we work at + * the bitmapword level rather than at bit level. + */ +Bitmapset * +bms_add_range(Bitmapset *a, int lower, int upper) +{ + int lwordnum, + lbitnum, + uwordnum, + ushiftbits, + wordnum; + + /* do nothing if nothing is called for, without further checking */ + if (upper < lower) + return a; + + if (lower < 0) + elog(ERROR, "negative bitmapset member not allowed"); + uwordnum = WORDNUM(upper); + + if (a == NULL) + { + a = (Bitmapset *) palloc0(BITMAPSET_SIZE(uwordnum + 1)); + a->nwords = uwordnum + 1; + } + else if (uwordnum >= a->nwords) + { + int oldnwords = a->nwords; + int i; + + /* ensure we have enough words to store the upper bit */ + a = (Bitmapset *) repalloc(a, BITMAPSET_SIZE(uwordnum + 1)); + a->nwords = uwordnum + 1; + /* zero out the enlarged portion */ + for (i = oldnwords; i < a->nwords; i++) + a->words[i] = 0; + } + + wordnum = lwordnum = WORDNUM(lower); + + lbitnum = BITNUM(lower); + ushiftbits = BITS_PER_BITMAPWORD - (BITNUM(upper) + 1); + + /* + * Special case when lwordnum is the same as uwordnum we must perform the + * upper and lower masking on the word. + */ + if (lwordnum == uwordnum) + { + a->words[lwordnum] |= ~(bitmapword) (((bitmapword) 1 << lbitnum) - 1) + & (~(bitmapword) 0) >> ushiftbits; + } + else + { + /* turn on lbitnum and all bits left of it */ + a->words[wordnum++] |= ~(bitmapword) (((bitmapword) 1 << lbitnum) - 1); + + /* turn on all bits for any intermediate words */ + while (wordnum < uwordnum) + a->words[wordnum++] = ~(bitmapword) 0; + + /* turn on upper's bit and all bits right of it. */ + a->words[uwordnum] |= (~(bitmapword) 0) >> ushiftbits; + } + + return a; +} + /* * bms_int_members - like bms_intersect, but left input is recycled */ @@ -972,6 +1111,80 @@ bms_next_member(const Bitmapset *a, int prevbit) return -2; } +/* + * bms_prev_member - find prev member of a set + * + * Returns largest member less than "prevbit", or -2 if there is none. + * "prevbit" must NOT be more than one above the highest possible bit that can + * be set at the Bitmapset at its current size. + * + * To ease finding the highest set bit for the initial loop, the special + * prevbit value of -1 can be passed to have the function find the highest + * valued member in the set. + * + * This is intended as support for iterating through the members of a set in + * reverse. The typical pattern is + * + * x = -1; + * while ((x = bms_prev_member(inputset, x)) >= 0) + * process member x; + * + * Notice that when there are no more members, we return -2, not -1 as you + * might expect. The rationale for that is to allow distinguishing the + * loop-not-started state (x == -1) from the loop-completed state (x == -2). + * It makes no difference in simple loop usage, but complex iteration logic + * might need such an ability. + */ + +int +bms_prev_member(const Bitmapset *a, int prevbit) +{ + int wordnum; + int ushiftbits; + bitmapword mask; + + /* + * If set is NULL or if there are no more bits to the right then we've + * nothing to do. + */ + if (a == NULL || prevbit == 0) + return -2; + + /* transform -1 to the highest possible bit we could have set */ + if (prevbit == -1) + prevbit = a->nwords * BITS_PER_BITMAPWORD - 1; + else + prevbit--; + + ushiftbits = BITS_PER_BITMAPWORD - (BITNUM(prevbit) + 1); + mask = (~(bitmapword) 0) >> ushiftbits; + for (wordnum = WORDNUM(prevbit); wordnum >= 0; wordnum--) + { + bitmapword w = a->words[wordnum]; + + /* mask out bits left of prevbit */ + w &= mask; + + if (w != 0) + { + int result; + int shift = BITS_PER_BITMAPWORD - 8; + + result = wordnum * BITS_PER_BITMAPWORD; + + while ((w >> shift) == 0) + shift -= 8; + + result += shift + leftmost_one_pos[(w >> shift) & 255]; + return result; + } + + /* in subsequent words, consider all bits */ + mask = (~(bitmapword) 0); + } + return -2; +} + /* * bms_hash_value - compute a hash key for a Bitmapset * diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 72041693df..db49968409 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -11,7 +11,7 @@ * be handled easily in a simple depth-first traversal. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -87,17 +87,17 @@ _copyPlannedStmt(const PlannedStmt *from) COPY_SCALAR_FIELD(transientPlan); COPY_SCALAR_FIELD(dependsOnRole); COPY_SCALAR_FIELD(parallelModeNeeded); + COPY_SCALAR_FIELD(jitFlags); COPY_NODE_FIELD(planTree); COPY_NODE_FIELD(rtable); COPY_NODE_FIELD(resultRelations); - COPY_NODE_FIELD(nonleafResultRelations); COPY_NODE_FIELD(rootResultRelations); COPY_NODE_FIELD(subplans); COPY_BITMAPSET_FIELD(rewindPlanIDs); COPY_NODE_FIELD(rowMarks); COPY_NODE_FIELD(relationOids); COPY_NODE_FIELD(invalItems); - COPY_SCALAR_FIELD(nParamExec); + COPY_NODE_FIELD(paramExecTypes); COPY_NODE_FIELD(utilityStmt); COPY_LOCATION_FIELD(stmt_location); COPY_LOCATION_FIELD(stmt_len); @@ -203,7 +203,8 @@ _copyModifyTable(const ModifyTable *from) COPY_SCALAR_FIELD(operation); COPY_SCALAR_FIELD(canSetTag); COPY_SCALAR_FIELD(nominalRelation); - COPY_NODE_FIELD(partitioned_rels); + COPY_SCALAR_FIELD(rootRelation); + COPY_SCALAR_FIELD(partColsUpdated); COPY_NODE_FIELD(resultRelations); COPY_SCALAR_FIELD(resultRelIndex); COPY_SCALAR_FIELD(rootResultRelIndex); @@ -240,8 +241,9 @@ _copyAppend(const Append *from) /* * copy remainder of node */ - COPY_NODE_FIELD(partitioned_rels); COPY_NODE_FIELD(appendplans); + COPY_SCALAR_FIELD(first_partial_plan); + COPY_NODE_FIELD(part_prune_info); return newnode; } @@ -262,13 +264,13 @@ _copyMergeAppend(const MergeAppend *from) /* * copy remainder of node */ - COPY_NODE_FIELD(partitioned_rels); COPY_NODE_FIELD(mergeplans); COPY_SCALAR_FIELD(numCols); COPY_POINTER_FIELD(sortColIdx, from->numCols * sizeof(AttrNumber)); COPY_POINTER_FIELD(sortOperators, from->numCols * sizeof(Oid)); COPY_POINTER_FIELD(collations, from->numCols * sizeof(Oid)); COPY_POINTER_FIELD(nullsFirst, from->numCols * sizeof(bool)); + COPY_NODE_FIELD(part_prune_info); return newnode; } @@ -361,8 +363,10 @@ _copyGather(const Gather *from) * copy remainder of node */ COPY_SCALAR_FIELD(num_workers); + COPY_SCALAR_FIELD(rescan_param); COPY_SCALAR_FIELD(single_copy); COPY_SCALAR_FIELD(invisible); + COPY_BITMAPSET_FIELD(initParam); return newnode; } @@ -384,11 +388,13 @@ _copyGatherMerge(const GatherMerge *from) * copy remainder of node */ COPY_SCALAR_FIELD(num_workers); + COPY_SCALAR_FIELD(rescan_param); COPY_SCALAR_FIELD(numCols); COPY_POINTER_FIELD(sortColIdx, from->numCols * sizeof(AttrNumber)); COPY_POINTER_FIELD(sortOperators, from->numCols * sizeof(Oid)); COPY_POINTER_FIELD(collations, from->numCols * sizeof(Oid)); COPY_POINTER_FIELD(nullsFirst, from->numCols * sizeof(bool)); + COPY_BITMAPSET_FIELD(initParam); return newnode; } @@ -1006,6 +1012,11 @@ _copyWindowAgg(const WindowAgg *from) COPY_SCALAR_FIELD(frameOptions); COPY_NODE_FIELD(startOffset); COPY_NODE_FIELD(endOffset); + COPY_SCALAR_FIELD(startInRangeFunc); + COPY_SCALAR_FIELD(endInRangeFunc); + COPY_SCALAR_FIELD(inRangeColl); + COPY_SCALAR_FIELD(inRangeAsc); + COPY_SCALAR_FIELD(inRangeNullsFirst); return newnode; } @@ -1052,6 +1063,7 @@ _copyHash(const Hash *from) COPY_SCALAR_FIELD(skewTable); COPY_SCALAR_FIELD(skewColumn); COPY_SCALAR_FIELD(skewInherit); + COPY_SCALAR_FIELD(rows_total); return newnode; } @@ -1162,6 +1174,69 @@ _copyPlanRowMark(const PlanRowMark *from) return newnode; } +static PartitionPruneInfo * +_copyPartitionPruneInfo(const PartitionPruneInfo *from) +{ + PartitionPruneInfo *newnode = makeNode(PartitionPruneInfo); + + COPY_NODE_FIELD(prune_infos); + COPY_BITMAPSET_FIELD(other_subplans); + + return newnode; +} + +static PartitionedRelPruneInfo * +_copyPartitionedRelPruneInfo(const PartitionedRelPruneInfo *from) +{ + PartitionedRelPruneInfo *newnode = makeNode(PartitionedRelPruneInfo); + + COPY_SCALAR_FIELD(rtindex); + COPY_NODE_FIELD(pruning_steps); + COPY_BITMAPSET_FIELD(present_parts); + COPY_SCALAR_FIELD(nparts); + COPY_SCALAR_FIELD(nexprs); + COPY_POINTER_FIELD(subplan_map, from->nparts * sizeof(int)); + COPY_POINTER_FIELD(subpart_map, from->nparts * sizeof(int)); + COPY_POINTER_FIELD(hasexecparam, from->nexprs * sizeof(bool)); + COPY_SCALAR_FIELD(do_initial_prune); + COPY_SCALAR_FIELD(do_exec_prune); + COPY_BITMAPSET_FIELD(execparamids); + + return newnode; +} + +/* + * _copyPartitionPruneStepOp + */ +static PartitionPruneStepOp * +_copyPartitionPruneStepOp(const PartitionPruneStepOp *from) +{ + PartitionPruneStepOp *newnode = makeNode(PartitionPruneStepOp); + + COPY_SCALAR_FIELD(step.step_id); + COPY_SCALAR_FIELD(opstrategy); + COPY_NODE_FIELD(exprs); + COPY_NODE_FIELD(cmpfns); + COPY_BITMAPSET_FIELD(nullkeys); + + return newnode; +} + +/* + * _copyPartitionPruneStepCombine + */ +static PartitionPruneStepCombine * +_copyPartitionPruneStepCombine(const PartitionPruneStepCombine *from) +{ + PartitionPruneStepCombine *newnode = makeNode(PartitionPruneStepCombine); + + COPY_SCALAR_FIELD(step.step_id); + COPY_SCALAR_FIELD(combineOp); + COPY_NODE_FIELD(source_stepids); + + return newnode; +} + /* * _copyPlanInvalItem */ @@ -1696,11 +1771,10 @@ _copyArrayCoerceExpr(const ArrayCoerceExpr *from) ArrayCoerceExpr *newnode = makeNode(ArrayCoerceExpr); COPY_NODE_FIELD(arg); - COPY_SCALAR_FIELD(elemfuncid); + COPY_NODE_FIELD(elemexpr); COPY_SCALAR_FIELD(resulttype); COPY_SCALAR_FIELD(resulttypmod); COPY_SCALAR_FIELD(resultcollid); - COPY_SCALAR_FIELD(isExplicit); COPY_SCALAR_FIELD(coerceformat); COPY_LOCATION_FIELD(location); @@ -2248,20 +2322,6 @@ _copyAppendRelInfo(const AppendRelInfo *from) return newnode; } -/* - * _copyPartitionedChildRelInfo - */ -static PartitionedChildRelInfo * -_copyPartitionedChildRelInfo(const PartitionedChildRelInfo *from) -{ - PartitionedChildRelInfo *newnode = makeNode(PartitionedChildRelInfo); - - COPY_SCALAR_FIELD(parent_relid); - COPY_NODE_FIELD(child_rels); - - return newnode; -} - /* * _copyPlaceHolderInfo */ @@ -2293,6 +2353,7 @@ _copyRangeTblEntry(const RangeTblEntry *from) COPY_SCALAR_FIELD(rtekind); COPY_SCALAR_FIELD(relid); COPY_SCALAR_FIELD(relkind); + COPY_SCALAR_FIELD(rellockmode); COPY_NODE_FIELD(tablesample); COPY_NODE_FIELD(subquery); COPY_SCALAR_FIELD(security_barrier); @@ -2405,6 +2466,11 @@ _copyWindowClause(const WindowClause *from) COPY_SCALAR_FIELD(frameOptions); COPY_NODE_FIELD(startOffset); COPY_NODE_FIELD(endOffset); + COPY_SCALAR_FIELD(startInRangeFunc); + COPY_SCALAR_FIELD(endInRangeFunc); + COPY_SCALAR_FIELD(inRangeColl); + COPY_SCALAR_FIELD(inRangeAsc); + COPY_SCALAR_FIELD(inRangeNullsFirst); COPY_SCALAR_FIELD(winref); COPY_SCALAR_FIELD(copiedOrder); @@ -2807,11 +2873,11 @@ _copyColumnDef(const ColumnDef *from) COPY_SCALAR_FIELD(is_local); COPY_SCALAR_FIELD(is_not_null); COPY_SCALAR_FIELD(is_from_type); - COPY_SCALAR_FIELD(is_from_parent); COPY_SCALAR_FIELD(storage); COPY_NODE_FIELD(raw_default); COPY_NODE_FIELD(cooked_default); COPY_SCALAR_FIELD(identity); + COPY_NODE_FIELD(identitySequence); COPY_NODE_FIELD(collClause); COPY_SCALAR_FIELD(collOid); COPY_NODE_FIELD(constraints); @@ -2836,6 +2902,7 @@ _copyConstraint(const Constraint *from) COPY_STRING_FIELD(cooked_expr); COPY_SCALAR_FIELD(generated_when); COPY_NODE_FIELD(keys); + COPY_NODE_FIELD(including); COPY_NODE_FIELD(exclusions); COPY_NODE_FIELD(options); COPY_STRING_FIELD(indexname); @@ -3085,6 +3152,7 @@ _copyAlterTableCmd(const AlterTableCmd *from) COPY_SCALAR_FIELD(subtype); COPY_STRING_FIELD(name); + COPY_SCALAR_FIELD(num); COPY_NODE_FIELD(newowner); COPY_NODE_FIELD(def); COPY_SCALAR_FIELD(behavior); @@ -3206,6 +3274,17 @@ _copyClosePortalStmt(const ClosePortalStmt *from) return newnode; } +static CallStmt * +_copyCallStmt(const CallStmt *from) +{ + CallStmt *newnode = makeNode(CallStmt); + + COPY_NODE_FIELD(funccall); + COPY_NODE_FIELD(funcexpr); + + return newnode; +} + static ClusterStmt * _copyClusterStmt(const ClusterStmt *from) { @@ -3213,7 +3292,7 @@ _copyClusterStmt(const ClusterStmt *from) COPY_NODE_FIELD(relation); COPY_STRING_FIELD(indexname); - COPY_SCALAR_FIELD(verbose); + COPY_SCALAR_FIELD(options); return newnode; } @@ -3363,9 +3442,11 @@ _copyIndexStmt(const IndexStmt *from) COPY_STRING_FIELD(idxname); COPY_NODE_FIELD(relation); + COPY_SCALAR_FIELD(relationId); COPY_STRING_FIELD(accessMethod); COPY_STRING_FIELD(tableSpace); COPY_NODE_FIELD(indexParams); + COPY_NODE_FIELD(indexIncludingParams); COPY_NODE_FIELD(options); COPY_NODE_FIELD(whereClause); COPY_NODE_FIELD(excludeOpNames); @@ -3393,6 +3474,7 @@ _copyCreateStatsStmt(const CreateStatsStmt *from) COPY_NODE_FIELD(stat_types); COPY_NODE_FIELD(exprs); COPY_NODE_FIELD(relations); + COPY_STRING_FIELD(stxcomment); COPY_SCALAR_FIELD(if_not_exists); return newnode; @@ -3403,12 +3485,12 @@ _copyCreateFunctionStmt(const CreateFunctionStmt *from) { CreateFunctionStmt *newnode = makeNode(CreateFunctionStmt); + COPY_SCALAR_FIELD(is_procedure); COPY_SCALAR_FIELD(replace); COPY_NODE_FIELD(funcname); COPY_NODE_FIELD(parameters); COPY_NODE_FIELD(returnType); COPY_NODE_FIELD(options); - COPY_NODE_FIELD(withClause); return newnode; } @@ -3431,6 +3513,7 @@ _copyAlterFunctionStmt(const AlterFunctionStmt *from) { AlterFunctionStmt *newnode = makeNode(AlterFunctionStmt); + COPY_SCALAR_FIELD(objtype); COPY_NODE_FIELD(func); COPY_NODE_FIELD(actions); @@ -3569,6 +3652,7 @@ _copyTransactionStmt(const TransactionStmt *from) COPY_SCALAR_FIELD(kind); COPY_NODE_FIELD(options); + COPY_STRING_FIELD(savepoint_name); COPY_STRING_FIELD(gid); return newnode; @@ -3764,7 +3848,18 @@ _copyVacuumStmt(const VacuumStmt *from) VacuumStmt *newnode = makeNode(VacuumStmt); COPY_SCALAR_FIELD(options); + COPY_NODE_FIELD(rels); + + return newnode; +} + +static VacuumRelation * +_copyVacuumRelation(const VacuumRelation *from) +{ + VacuumRelation *newnode = makeNode(VacuumRelation); + COPY_NODE_FIELD(relation); + COPY_SCALAR_FIELD(oid); COPY_NODE_FIELD(va_cols); return newnode; @@ -4447,6 +4542,9 @@ _copyPartitionBoundSpec(const PartitionBoundSpec *from) PartitionBoundSpec *newnode = makeNode(PartitionBoundSpec); COPY_SCALAR_FIELD(strategy); + COPY_SCALAR_FIELD(is_default); + COPY_SCALAR_FIELD(modulus); + COPY_SCALAR_FIELD(remainder); COPY_NODE_FIELD(listdatums); COPY_NODE_FIELD(lowerdatums); COPY_NODE_FIELD(upperdatums); @@ -4646,6 +4744,7 @@ _copyForeignKeyCacheInfo(const ForeignKeyCacheInfo *from) { ForeignKeyCacheInfo *newnode = makeNode(ForeignKeyCacheInfo); + COPY_SCALAR_FIELD(conoid); COPY_SCALAR_FIELD(conrelid); COPY_SCALAR_FIELD(confrelid); COPY_SCALAR_FIELD(nkeys); @@ -4815,6 +4914,18 @@ copyObjectImpl(const void *from) case T_PlanRowMark: retval = _copyPlanRowMark(from); break; + case T_PartitionPruneInfo: + retval = _copyPartitionPruneInfo(from); + break; + case T_PartitionedRelPruneInfo: + retval = _copyPartitionedRelPruneInfo(from); + break; + case T_PartitionPruneStepOp: + retval = _copyPartitionPruneStepOp(from); + break; + case T_PartitionPruneStepCombine: + retval = _copyPartitionPruneStepCombine(from); + break; case T_PlanInvalItem: retval = _copyPlanInvalItem(from); break; @@ -4994,9 +5105,6 @@ copyObjectImpl(const void *from) case T_AppendRelInfo: retval = _copyAppendRelInfo(from); break; - case T_PartitionedChildRelInfo: - retval = _copyPartitionedChildRelInfo(from); - break; case T_PlaceHolderInfo: retval = _copyPlaceHolderInfo(from); break; @@ -5086,6 +5194,9 @@ copyObjectImpl(const void *from) case T_ClosePortalStmt: retval = _copyClosePortalStmt(from); break; + case T_CallStmt: + retval = _copyCallStmt(from); + break; case T_ClusterStmt: retval = _copyClusterStmt(from); break; @@ -5212,6 +5323,9 @@ copyObjectImpl(const void *from) case T_VacuumStmt: retval = _copyVacuumStmt(from); break; + case T_VacuumRelation: + retval = _copyVacuumRelation(from); + break; case T_ExplainStmt: retval = _copyExplainStmt(from); break; diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c index 8d92c03633..3a084b4d1f 100644 --- a/src/backend/nodes/equalfuncs.c +++ b/src/backend/nodes/equalfuncs.c @@ -18,7 +18,7 @@ * "x" to be considered equal() to another reference to "x" in the query. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -513,11 +513,10 @@ static bool _equalArrayCoerceExpr(const ArrayCoerceExpr *a, const ArrayCoerceExpr *b) { COMPARE_NODE_FIELD(arg); - COMPARE_SCALAR_FIELD(elemfuncid); + COMPARE_NODE_FIELD(elemexpr); COMPARE_SCALAR_FIELD(resulttype); COMPARE_SCALAR_FIELD(resulttypmod); COMPARE_SCALAR_FIELD(resultcollid); - COMPARE_SCALAR_FIELD(isExplicit); COMPARE_COERCIONFORM_FIELD(coerceformat); COMPARE_LOCATION_FIELD(location); @@ -904,15 +903,6 @@ _equalAppendRelInfo(const AppendRelInfo *a, const AppendRelInfo *b) return true; } -static bool -_equalPartitionedChildRelInfo(const PartitionedChildRelInfo *a, const PartitionedChildRelInfo *b) -{ - COMPARE_SCALAR_FIELD(parent_relid); - COMPARE_NODE_FIELD(child_rels); - - return true; -} - static bool _equalPlaceHolderInfo(const PlaceHolderInfo *a, const PlaceHolderInfo *b) { @@ -1098,6 +1088,7 @@ _equalAlterTableCmd(const AlterTableCmd *a, const AlterTableCmd *b) { COMPARE_SCALAR_FIELD(subtype); COMPARE_STRING_FIELD(name); + COMPARE_SCALAR_FIELD(num); COMPARE_NODE_FIELD(newowner); COMPARE_NODE_FIELD(def); COMPARE_SCALAR_FIELD(behavior); @@ -1201,12 +1192,21 @@ _equalClosePortalStmt(const ClosePortalStmt *a, const ClosePortalStmt *b) return true; } +static bool +_equalCallStmt(const CallStmt *a, const CallStmt *b) +{ + COMPARE_NODE_FIELD(funccall); + COMPARE_NODE_FIELD(funcexpr); + + return true; +} + static bool _equalClusterStmt(const ClusterStmt *a, const ClusterStmt *b) { COMPARE_NODE_FIELD(relation); COMPARE_STRING_FIELD(indexname); - COMPARE_SCALAR_FIELD(verbose); + COMPARE_SCALAR_FIELD(options); return true; } @@ -1324,9 +1324,11 @@ _equalIndexStmt(const IndexStmt *a, const IndexStmt *b) { COMPARE_STRING_FIELD(idxname); COMPARE_NODE_FIELD(relation); + COMPARE_SCALAR_FIELD(relationId); COMPARE_STRING_FIELD(accessMethod); COMPARE_STRING_FIELD(tableSpace); COMPARE_NODE_FIELD(indexParams); + COMPARE_NODE_FIELD(indexIncludingParams); COMPARE_NODE_FIELD(options); COMPARE_NODE_FIELD(whereClause); COMPARE_NODE_FIELD(excludeOpNames); @@ -1352,6 +1354,7 @@ _equalCreateStatsStmt(const CreateStatsStmt *a, const CreateStatsStmt *b) COMPARE_NODE_FIELD(stat_types); COMPARE_NODE_FIELD(exprs); COMPARE_NODE_FIELD(relations); + COMPARE_STRING_FIELD(stxcomment); COMPARE_SCALAR_FIELD(if_not_exists); return true; @@ -1360,12 +1363,12 @@ _equalCreateStatsStmt(const CreateStatsStmt *a, const CreateStatsStmt *b) static bool _equalCreateFunctionStmt(const CreateFunctionStmt *a, const CreateFunctionStmt *b) { + COMPARE_SCALAR_FIELD(is_procedure); COMPARE_SCALAR_FIELD(replace); COMPARE_NODE_FIELD(funcname); COMPARE_NODE_FIELD(parameters); COMPARE_NODE_FIELD(returnType); COMPARE_NODE_FIELD(options); - COMPARE_NODE_FIELD(withClause); return true; } @@ -1384,6 +1387,7 @@ _equalFunctionParameter(const FunctionParameter *a, const FunctionParameter *b) static bool _equalAlterFunctionStmt(const AlterFunctionStmt *a, const AlterFunctionStmt *b) { + COMPARE_SCALAR_FIELD(objtype); COMPARE_NODE_FIELD(func); COMPARE_NODE_FIELD(actions); @@ -1500,6 +1504,7 @@ _equalTransactionStmt(const TransactionStmt *a, const TransactionStmt *b) { COMPARE_SCALAR_FIELD(kind); COMPARE_NODE_FIELD(options); + COMPARE_STRING_FIELD(savepoint_name); COMPARE_STRING_FIELD(gid); return true; @@ -1663,7 +1668,16 @@ static bool _equalVacuumStmt(const VacuumStmt *a, const VacuumStmt *b) { COMPARE_SCALAR_FIELD(options); + COMPARE_NODE_FIELD(rels); + + return true; +} + +static bool +_equalVacuumRelation(const VacuumRelation *a, const VacuumRelation *b) +{ COMPARE_NODE_FIELD(relation); + COMPARE_SCALAR_FIELD(oid); COMPARE_NODE_FIELD(va_cols); return true; @@ -2539,11 +2553,11 @@ _equalColumnDef(const ColumnDef *a, const ColumnDef *b) COMPARE_SCALAR_FIELD(is_local); COMPARE_SCALAR_FIELD(is_not_null); COMPARE_SCALAR_FIELD(is_from_type); - COMPARE_SCALAR_FIELD(is_from_parent); COMPARE_SCALAR_FIELD(storage); COMPARE_NODE_FIELD(raw_default); COMPARE_NODE_FIELD(cooked_default); COMPARE_SCALAR_FIELD(identity); + COMPARE_NODE_FIELD(identitySequence); COMPARE_NODE_FIELD(collClause); COMPARE_SCALAR_FIELD(collOid); COMPARE_NODE_FIELD(constraints); @@ -2566,6 +2580,7 @@ _equalConstraint(const Constraint *a, const Constraint *b) COMPARE_STRING_FIELD(cooked_expr); COMPARE_SCALAR_FIELD(generated_when); COMPARE_NODE_FIELD(keys); + COMPARE_NODE_FIELD(including); COMPARE_NODE_FIELD(exclusions); COMPARE_NODE_FIELD(options); COMPARE_STRING_FIELD(indexname); @@ -2614,6 +2629,7 @@ _equalRangeTblEntry(const RangeTblEntry *a, const RangeTblEntry *b) COMPARE_SCALAR_FIELD(rtekind); COMPARE_SCALAR_FIELD(relid); COMPARE_SCALAR_FIELD(relkind); + COMPARE_SCALAR_FIELD(rellockmode); COMPARE_NODE_FIELD(tablesample); COMPARE_NODE_FIELD(subquery); COMPARE_SCALAR_FIELD(security_barrier); @@ -2714,6 +2730,11 @@ _equalWindowClause(const WindowClause *a, const WindowClause *b) COMPARE_SCALAR_FIELD(frameOptions); COMPARE_NODE_FIELD(startOffset); COMPARE_NODE_FIELD(endOffset); + COMPARE_SCALAR_FIELD(startInRangeFunc); + COMPARE_SCALAR_FIELD(endInRangeFunc); + COMPARE_SCALAR_FIELD(inRangeColl); + COMPARE_SCALAR_FIELD(inRangeAsc); + COMPARE_SCALAR_FIELD(inRangeNullsFirst); COMPARE_SCALAR_FIELD(winref); COMPARE_SCALAR_FIELD(copiedOrder); @@ -2838,6 +2859,9 @@ static bool _equalPartitionBoundSpec(const PartitionBoundSpec *a, const PartitionBoundSpec *b) { COMPARE_SCALAR_FIELD(strategy); + COMPARE_SCALAR_FIELD(is_default); + COMPARE_SCALAR_FIELD(modulus); + COMPARE_SCALAR_FIELD(remainder); COMPARE_NODE_FIELD(listdatums); COMPARE_NODE_FIELD(lowerdatums); COMPARE_NODE_FIELD(upperdatums); @@ -3155,9 +3179,6 @@ equal(const void *a, const void *b) case T_AppendRelInfo: retval = _equalAppendRelInfo(a, b); break; - case T_PartitionedChildRelInfo: - retval = _equalPartitionedChildRelInfo(a, b); - break; case T_PlaceHolderInfo: retval = _equalPlaceHolderInfo(a, b); break; @@ -3234,6 +3255,9 @@ equal(const void *a, const void *b) case T_ClosePortalStmt: retval = _equalClosePortalStmt(a, b); break; + case T_CallStmt: + retval = _equalCallStmt(a, b); + break; case T_ClusterStmt: retval = _equalClusterStmt(a, b); break; @@ -3360,6 +3384,9 @@ equal(const void *a, const void *b) case T_VacuumStmt: retval = _equalVacuumStmt(a, b); break; + case T_VacuumRelation: + retval = _equalVacuumRelation(a, b); + break; case T_ExplainStmt: retval = _equalExplainStmt(a, b); break; diff --git a/src/backend/nodes/extensible.c b/src/backend/nodes/extensible.c index 01cd3c84fb..f301c11fa9 100644 --- a/src/backend/nodes/extensible.c +++ b/src/backend/nodes/extensible.c @@ -10,7 +10,7 @@ * and GetExtensibleNodeMethods to get information about a previously * registered type of extensible node. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c index acaf4b5315..55fd4c359b 100644 --- a/src/backend/nodes/list.c +++ b/src/backend/nodes/list.c @@ -4,7 +4,7 @@ * implementation for PostgreSQL generic linked list package * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -1011,8 +1011,11 @@ list_append_unique_oid(List *list, Oid datum) * via equal(). * * This is almost the same functionality as list_union(), but list1 is - * modified in-place rather than being copied. Note also that list2's cells - * are not inserted in list1, so the analogy to list_concat() isn't perfect. + * modified in-place rather than being copied. However, callers of this + * function may have strict ordering expectations -- i.e. that the relative + * order of those list2 elements that are not duplicates is preserved. Note + * also that list2's cells are not inserted in list1, so the analogy to + * list_concat() isn't perfect. */ List * list_concat_unique(List *list1, List *list2) @@ -1249,6 +1252,68 @@ list_copy_tail(const List *oldlist, int nskip) return newlist; } +/* + * Sort a list as though by qsort. + * + * A new list is built and returned. Like list_copy, this doesn't make + * fresh copies of any pointed-to data. + * + * The comparator function receives arguments of type ListCell **. + */ +List * +list_qsort(const List *list, list_qsort_comparator cmp) +{ + int len = list_length(list); + ListCell **list_arr; + List *newlist; + ListCell *newlist_prev; + ListCell *cell; + int i; + + /* Empty list is easy */ + if (len == 0) + return NIL; + + /* Flatten list cells into an array, so we can use qsort */ + list_arr = (ListCell **) palloc(sizeof(ListCell *) * len); + i = 0; + foreach(cell, list) + list_arr[i++] = cell; + + qsort(list_arr, len, sizeof(ListCell *), cmp); + + /* Construct new list (this code is much like list_copy) */ + newlist = new_list(list->type); + newlist->length = len; + + /* + * Copy over the data in the first cell; new_list() has already allocated + * the head cell itself + */ + newlist->head->data = list_arr[0]->data; + + newlist_prev = newlist->head; + for (i = 1; i < len; i++) + { + ListCell *newlist_cur; + + newlist_cur = (ListCell *) palloc(sizeof(*newlist_cur)); + newlist_cur->data = list_arr[i]->data; + newlist_prev->next = newlist_cur; + + newlist_prev = newlist_cur; + } + + newlist_prev->next = NULL; + newlist->tail = newlist_prev; + + /* Might as well free the workspace array */ + pfree(list_arr); + + check_list_invariants(newlist); + return newlist; +} + /* * Temporary compatibility functions * diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c index 0755039da9..4a2e669a86 100644 --- a/src/backend/nodes/makefuncs.c +++ b/src/backend/nodes/makefuncs.c @@ -4,7 +4,7 @@ * creator functions for primitive nodes. The functions here are for * the most frequently created nodes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -120,8 +120,10 @@ makeVarFromTargetEntry(Index varno, * table entry, and varattno == 0 to signal that it references the whole * tuple. (Use of zero here is unclean, since it could easily be confused * with error cases, but it's not worth changing now.) The vartype indicates - * a rowtype; either a named composite type, or RECORD. This function - * encapsulates the logic for determining the correct rowtype OID to use. + * a rowtype; either a named composite type, or a domain over a named + * composite type (only possible if the RTE is a function returning that), + * or RECORD. This function encapsulates the logic for determining the + * correct rowtype OID to use. * * If allowScalar is true, then for the case where the RTE is a single function * returning a non-composite result type, we produce a normal Var referencing @@ -494,7 +496,6 @@ makeColumnDef(const char *colname, Oid typeOid, int32 typmod, Oid collOid) n->is_local = true; n->is_not_null = false; n->is_from_type = false; - n->is_from_parent = false; n->storage = 0; n->raw_default = NULL; n->cooked_default = NULL; @@ -611,3 +612,18 @@ makeGroupingSet(GroupingSetKind kind, List *content, int location) n->location = location; return n; } + +/* + * makeVacuumRelation - + * create a VacuumRelation node + */ +VacuumRelation * +makeVacuumRelation(RangeVar *relation, Oid oid, List *va_cols) +{ + VacuumRelation *v = makeNode(VacuumRelation); + + v->relation = relation; + v->oid = oid; + v->va_cols = va_cols; + return v; +} diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index e3eb0c5788..a10014f755 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -3,7 +3,7 @@ * nodeFuncs.c * Various general-purpose manipulations of Node trees * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -30,7 +30,7 @@ static int leftmostLoc(int loc1, int loc2); static bool fix_opfuncids_walker(Node *node, void *context); static bool planstate_walk_subplans(List *plans, bool (*walker) (), void *context); -static bool planstate_walk_members(List *plans, PlanState **planstates, +static bool planstate_walk_members(PlanState **planstates, int nplans, bool (*walker) (), void *context); @@ -663,7 +663,7 @@ strip_implicit_coercions(Node *node) * Test whether an expression returns a set result. * * Because we use expression_tree_walker(), this can also be applied to - * whole targetlists; it'll produce TRUE if any one of the tlist items + * whole targetlists; it'll produce true if any one of the tlist items * returns a set. */ bool @@ -1632,9 +1632,9 @@ set_sa_opfuncid(ScalarArrayOpExpr *opexpr) * check_functions_in_node - * apply checker() to each function OID contained in given expression node * - * Returns TRUE if the checker() function does; for nodes representing more - * than one function call, returns TRUE if the checker() function does so - * for any of those functions. Returns FALSE if node does not invoke any + * Returns true if the checker() function does; for nodes representing more + * than one function call, returns true if the checker() function does so + * for any of those functions. Returns false if node does not invoke any * SQL-visible function. Caller must not pass node == NULL. * * This function examines only the given node; it does not recurse into any @@ -1717,15 +1717,6 @@ check_functions_in_node(Node *node, check_function_callback checker, return true; } break; - case T_ArrayCoerceExpr: - { - ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node; - - if (OidIsValid(expr->elemfuncid) && - checker(expr->elemfuncid, context)) - return true; - } - break; case T_RowCompareExpr: { RowCompareExpr *rcexpr = (RowCompareExpr *) node; @@ -2023,7 +2014,15 @@ expression_tree_walker(Node *node, case T_CoerceViaIO: return walker(((CoerceViaIO *) node)->arg, context); case T_ArrayCoerceExpr: - return walker(((ArrayCoerceExpr *) node)->arg, context); + { + ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node; + + if (walker(acoerce->arg, context)) + return true; + if (walker(acoerce->elemexpr, context)) + return true; + } + break; case T_ConvertRowtypeExpr: return walker(((ConvertRowtypeExpr *) node)->arg, context); case T_CollateExpr: @@ -2147,6 +2146,17 @@ expression_tree_walker(Node *node, return true; } break; + case T_PartitionPruneStepOp: + { + PartitionPruneStepOp *opstep = (PartitionPruneStepOp *) node; + + if (walker((Node *) opstep->exprs, context)) + return true; + } + break; + case T_PartitionPruneStepCombine: + /* no expression subnodes */ + break; case T_JoinExpr: { JoinExpr *join = (JoinExpr *) node; @@ -2705,6 +2715,7 @@ expression_tree_mutator(Node *node, FLATCOPY(newnode, acoerce, ArrayCoerceExpr); MUTATE(newnode->arg, acoerce->arg, Expr *); + MUTATE(newnode->elemexpr, acoerce->elemexpr, Expr *); return (Node *) newnode; } break; @@ -2932,6 +2943,20 @@ expression_tree_mutator(Node *node, return (Node *) newnode; } break; + case T_PartitionPruneStepOp: + { + PartitionPruneStepOp *opstep = (PartitionPruneStepOp *) node; + PartitionPruneStepOp *newnode; + + FLATCOPY(newnode, opstep, PartitionPruneStepOp); + MUTATE(newnode->exprs, opstep->exprs, List *); + + return (Node *) newnode; + } + break; + case T_PartitionPruneStepCombine: + /* no expression sub-nodes */ + return (Node *) copyObject(node); case T_JoinExpr: { JoinExpr *join = (JoinExpr *) node; @@ -3723,32 +3748,32 @@ planstate_tree_walker(PlanState *planstate, switch (nodeTag(plan)) { case T_ModifyTable: - if (planstate_walk_members(((ModifyTable *) plan)->plans, - ((ModifyTableState *) planstate)->mt_plans, + if (planstate_walk_members(((ModifyTableState *) planstate)->mt_plans, + ((ModifyTableState *) planstate)->mt_nplans, walker, context)) return true; break; case T_Append: - if (planstate_walk_members(((Append *) plan)->appendplans, - ((AppendState *) planstate)->appendplans, + if (planstate_walk_members(((AppendState *) planstate)->appendplans, + ((AppendState *) planstate)->as_nplans, walker, context)) return true; break; case T_MergeAppend: - if (planstate_walk_members(((MergeAppend *) plan)->mergeplans, - ((MergeAppendState *) planstate)->mergeplans, + if (planstate_walk_members(((MergeAppendState *) planstate)->mergeplans, + ((MergeAppendState *) planstate)->ms_nplans, walker, context)) return true; break; case T_BitmapAnd: - if (planstate_walk_members(((BitmapAnd *) plan)->bitmapplans, - ((BitmapAndState *) planstate)->bitmapplans, + if (planstate_walk_members(((BitmapAndState *) planstate)->bitmapplans, + ((BitmapAndState *) planstate)->nplans, walker, context)) return true; break; case T_BitmapOr: - if (planstate_walk_members(((BitmapOr *) plan)->bitmapplans, - ((BitmapOrState *) planstate)->bitmapplans, + if (planstate_walk_members(((BitmapOrState *) planstate)->bitmapplans, + ((BitmapOrState *) planstate)->nplans, walker, context)) return true; break; @@ -3798,15 +3823,11 @@ planstate_walk_subplans(List *plans, /* * Walk the constituent plans of a ModifyTable, Append, MergeAppend, * BitmapAnd, or BitmapOr node. - * - * Note: we don't actually need to examine the Plan list members, but - * we need the list in order to determine the length of the PlanState array. */ static bool -planstate_walk_members(List *plans, PlanState **planstates, +planstate_walk_members(PlanState **planstates, int nplans, bool (*walker) (), void *context) { - int nplans = list_length(plans); int j; for (j = 0; j < nplans; j++) diff --git a/src/backend/nodes/nodes.c b/src/backend/nodes/nodes.c index d3345aae6d..f5ede390e0 100644 --- a/src/backend/nodes/nodes.c +++ b/src/backend/nodes/nodes.c @@ -4,7 +4,7 @@ * support code for nodes (now that we have removed the home-brew * inheritance system, our support code for nodes is much simpler) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index 5ce3c7c599..f0c396530d 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -3,7 +3,7 @@ * outfuncs.c * Output functions for Postgres tree nodes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -15,9 +15,13 @@ * have an output function defined here (as well as an input function * in readfuncs.c). In addition, plan nodes should have input and * output functions so that they can be sent to parallel workers. + * * For use in debugging, we also provide output functions for nodes - * that appear in raw parsetrees and path. These nodes however need - * not have input functions. + * that appear in raw parsetrees and planner Paths. These node types + * need not have input functions. Output support for raw parsetrees + * is somewhat incomplete, too; in particular, utility statements are + * almost entirely unsupported. We try to support everything that can + * appear in a raw SELECT, though. * *------------------------------------------------------------------------- */ @@ -54,6 +58,11 @@ static void outChar(StringInfo str, char c); #define WRITE_UINT_FIELD(fldname) \ appendStringInfo(str, " :" CppAsString(fldname) " %u", node->fldname) +/* Write an unsigned integer field (anything written with UINT64_FORMAT) */ +#define WRITE_UINT64_FIELD(fldname) \ + appendStringInfo(str, " :" CppAsString(fldname) " " UINT64_FORMAT, \ + node->fldname) + /* Write an OID field (don't hard-wire assumption that OID is same as uint) */ #define WRITE_OID_FIELD(fldname) \ appendStringInfo(str, " :" CppAsString(fldname) " %u", node->fldname) @@ -260,24 +269,24 @@ _outPlannedStmt(StringInfo str, const PlannedStmt *node) WRITE_NODE_TYPE("PLANNEDSTMT"); WRITE_ENUM_FIELD(commandType, CmdType); - WRITE_UINT_FIELD(queryId); + WRITE_UINT64_FIELD(queryId); WRITE_BOOL_FIELD(hasReturning); WRITE_BOOL_FIELD(hasModifyingCTE); WRITE_BOOL_FIELD(canSetTag); WRITE_BOOL_FIELD(transientPlan); WRITE_BOOL_FIELD(dependsOnRole); WRITE_BOOL_FIELD(parallelModeNeeded); + WRITE_INT_FIELD(jitFlags); WRITE_NODE_FIELD(planTree); WRITE_NODE_FIELD(rtable); WRITE_NODE_FIELD(resultRelations); - WRITE_NODE_FIELD(nonleafResultRelations); WRITE_NODE_FIELD(rootResultRelations); WRITE_NODE_FIELD(subplans); WRITE_BITMAPSET_FIELD(rewindPlanIDs); WRITE_NODE_FIELD(rowMarks); WRITE_NODE_FIELD(relationOids); WRITE_NODE_FIELD(invalItems); - WRITE_INT_FIELD(nParamExec); + WRITE_NODE_FIELD(paramExecTypes); WRITE_NODE_FIELD(utilityStmt); WRITE_LOCATION_FIELD(stmt_location); WRITE_LOCATION_FIELD(stmt_len); @@ -366,7 +375,8 @@ _outModifyTable(StringInfo str, const ModifyTable *node) WRITE_ENUM_FIELD(operation, CmdType); WRITE_BOOL_FIELD(canSetTag); WRITE_UINT_FIELD(nominalRelation); - WRITE_NODE_FIELD(partitioned_rels); + WRITE_UINT_FIELD(rootRelation); + WRITE_BOOL_FIELD(partColsUpdated); WRITE_NODE_FIELD(resultRelations); WRITE_INT_FIELD(resultRelIndex); WRITE_INT_FIELD(rootResultRelIndex); @@ -392,8 +402,9 @@ _outAppend(StringInfo str, const Append *node) _outPlanInfo(str, (const Plan *) node); - WRITE_NODE_FIELD(partitioned_rels); WRITE_NODE_FIELD(appendplans); + WRITE_INT_FIELD(first_partial_plan); + WRITE_NODE_FIELD(part_prune_info); } static void @@ -405,7 +416,6 @@ _outMergeAppend(StringInfo str, const MergeAppend *node) _outPlanInfo(str, (const Plan *) node); - WRITE_NODE_FIELD(partitioned_rels); WRITE_NODE_FIELD(mergeplans); WRITE_INT_FIELD(numCols); @@ -425,6 +435,8 @@ _outMergeAppend(StringInfo str, const MergeAppend *node) appendStringInfoString(str, " :nullsFirst"); for (i = 0; i < node->numCols; i++) appendStringInfo(str, " %s", booltostr(node->nullsFirst[i])); + + WRITE_NODE_FIELD(part_prune_info); } static void @@ -479,8 +491,10 @@ _outGather(StringInfo str, const Gather *node) _outPlanInfo(str, (const Plan *) node); WRITE_INT_FIELD(num_workers); + WRITE_INT_FIELD(rescan_param); WRITE_BOOL_FIELD(single_copy); WRITE_BOOL_FIELD(invisible); + WRITE_BITMAPSET_FIELD(initParam); } static void @@ -493,6 +507,7 @@ _outGatherMerge(StringInfo str, const GatherMerge *node) _outPlanInfo(str, (const Plan *) node); WRITE_INT_FIELD(num_workers); + WRITE_INT_FIELD(rescan_param); WRITE_INT_FIELD(numCols); appendStringInfoString(str, " :sortColIdx"); @@ -510,6 +525,8 @@ _outGatherMerge(StringInfo str, const GatherMerge *node) appendStringInfoString(str, " :nullsFirst"); for (i = 0; i < node->numCols; i++) appendStringInfo(str, " %s", booltostr(node->nullsFirst[i])); + + WRITE_BITMAPSET_FIELD(initParam); } static void @@ -828,6 +845,11 @@ _outWindowAgg(StringInfo str, const WindowAgg *node) WRITE_INT_FIELD(frameOptions); WRITE_NODE_FIELD(startOffset); WRITE_NODE_FIELD(endOffset); + WRITE_OID_FIELD(startInRangeFunc); + WRITE_OID_FIELD(endInRangeFunc); + WRITE_OID_FIELD(inRangeColl); + WRITE_BOOL_FIELD(inRangeAsc); + WRITE_BOOL_FIELD(inRangeNullsFirst); } static void @@ -916,6 +938,7 @@ _outHash(StringInfo str, const Hash *node) WRITE_OID_FIELD(skewTable); WRITE_INT_FIELD(skewColumn); WRITE_BOOL_FIELD(skewInherit); + WRITE_FLOAT_FIELD(rows_total, "%.0f"); } static void @@ -990,6 +1013,67 @@ _outPlanRowMark(StringInfo str, const PlanRowMark *node) WRITE_BOOL_FIELD(isParent); } +static void +_outPartitionPruneInfo(StringInfo str, const PartitionPruneInfo *node) +{ + WRITE_NODE_TYPE("PARTITIONPRUNEINFO"); + + WRITE_NODE_FIELD(prune_infos); + WRITE_BITMAPSET_FIELD(other_subplans); +} + +static void +_outPartitionedRelPruneInfo(StringInfo str, const PartitionedRelPruneInfo *node) +{ + int i; + + WRITE_NODE_TYPE("PARTITIONEDRELPRUNEINFO"); + + WRITE_UINT_FIELD(rtindex); + WRITE_NODE_FIELD(pruning_steps); + WRITE_BITMAPSET_FIELD(present_parts); + WRITE_INT_FIELD(nparts); + WRITE_INT_FIELD(nexprs); + + appendStringInfoString(str, " :subplan_map"); + for (i = 0; i < node->nparts; i++) + appendStringInfo(str, " %d", node->subplan_map[i]); + + appendStringInfoString(str, " :subpart_map"); + for (i = 0; i < node->nparts; i++) + appendStringInfo(str, " %d", node->subpart_map[i]); + + appendStringInfoString(str, " :hasexecparam"); + for (i = 0; i < node->nexprs; i++) + appendStringInfo(str, " %s", booltostr(node->hasexecparam[i])); + + WRITE_BOOL_FIELD(do_initial_prune); + WRITE_BOOL_FIELD(do_exec_prune); + WRITE_BITMAPSET_FIELD(execparamids); +} + +static void +_outPartitionPruneStepOp(StringInfo str, const PartitionPruneStepOp *node) +{ + WRITE_NODE_TYPE("PARTITIONPRUNESTEPOP"); + + WRITE_INT_FIELD(step.step_id); + WRITE_INT_FIELD(opstrategy); + WRITE_NODE_FIELD(exprs); + WRITE_NODE_FIELD(cmpfns); + WRITE_BITMAPSET_FIELD(nullkeys); +} + +static void +_outPartitionPruneStepCombine(StringInfo str, const PartitionPruneStepCombine *node) +{ + WRITE_NODE_TYPE("PARTITIONPRUNESTEPCOMBINE"); + + WRITE_INT_FIELD(step.step_id); + WRITE_ENUM_FIELD(combineOp, PartitionPruneCombineOp); + WRITE_NODE_FIELD(source_stepids); +} + static void _outPlanInvalItem(StringInfo str, const PlanInvalItem *node) { @@ -1392,11 +1476,10 @@ _outArrayCoerceExpr(StringInfo str, const ArrayCoerceExpr *node) WRITE_NODE_TYPE("ARRAYCOERCEEXPR"); WRITE_NODE_FIELD(arg); - WRITE_OID_FIELD(elemfuncid); + WRITE_NODE_FIELD(elemexpr); WRITE_OID_FIELD(resulttype); WRITE_INT_FIELD(resulttypmod); WRITE_OID_FIELD(resultcollid); - WRITE_BOOL_FIELD(isExplicit); WRITE_ENUM_FIELD(coerceformat, CoercionForm); WRITE_LOCATION_FIELD(location); } @@ -1855,6 +1938,7 @@ _outAppendPath(StringInfo str, const AppendPath *node) WRITE_NODE_FIELD(partitioned_rels); WRITE_NODE_FIELD(subpaths); + WRITE_INT_FIELD(first_partial_path); } static void @@ -2038,7 +2122,6 @@ _outWindowAggPath(StringInfo str, const WindowAggPath *node) WRITE_NODE_FIELD(subpath); WRITE_NODE_FIELD(winclause); - WRITE_NODE_FIELD(winpathkeys); } static void @@ -2093,7 +2176,8 @@ _outModifyTablePath(StringInfo str, const ModifyTablePath *node) WRITE_ENUM_FIELD(operation, CmdType); WRITE_BOOL_FIELD(canSetTag); WRITE_UINT_FIELD(nominalRelation); - WRITE_NODE_FIELD(partitioned_rels); + WRITE_UINT_FIELD(rootRelation); + WRITE_BOOL_FIELD(partColsUpdated); WRITE_NODE_FIELD(resultRelations); WRITE_NODE_FIELD(subpaths); WRITE_NODE_FIELD(subroots); @@ -2158,6 +2242,7 @@ _outHashPath(StringInfo str, const HashPath *node) WRITE_NODE_FIELD(path_hashclauses); WRITE_INT_FIELD(num_batches); + WRITE_FLOAT_FIELD(inner_rows_total, "%.0f"); } static void @@ -2171,11 +2256,10 @@ _outPlannerGlobal(StringInfo str, const PlannerGlobal *node) WRITE_NODE_FIELD(finalrtable); WRITE_NODE_FIELD(finalrowmarks); WRITE_NODE_FIELD(resultRelations); - WRITE_NODE_FIELD(nonleafResultRelations); WRITE_NODE_FIELD(rootResultRelations); WRITE_NODE_FIELD(relationOids); WRITE_NODE_FIELD(invalItems); - WRITE_INT_FIELD(nParamExec); + WRITE_NODE_FIELD(paramExecTypes); WRITE_UINT_FIELD(lastPHId); WRITE_UINT_FIELD(lastRowMarkId); WRITE_INT_FIELD(lastPlanNodeId); @@ -2211,7 +2295,6 @@ _outPlannerInfo(StringInfo str, const PlannerInfo *node) WRITE_NODE_FIELD(full_join_clauses); WRITE_NODE_FIELD(join_info_list); WRITE_NODE_FIELD(append_rel_list); - WRITE_NODE_FIELD(pcinfo_list); WRITE_NODE_FIELD(rowMarks); WRITE_NODE_FIELD(placeholder_list); WRITE_NODE_FIELD(fkey_list); @@ -2226,7 +2309,7 @@ _outPlannerInfo(StringInfo str, const PlannerInfo *node) WRITE_FLOAT_FIELD(tuple_fraction, "%.4f"); WRITE_FLOAT_FIELD(limit_tuples, "%.0f"); WRITE_UINT_FIELD(qual_security_level); - WRITE_BOOL_FIELD(hasInheritedTarget); + WRITE_ENUM_FIELD(inhTargetKind, InheritanceKind); WRITE_BOOL_FIELD(hasJoinRTEs); WRITE_BOOL_FIELD(hasLateralRTEs); WRITE_BOOL_FIELD(hasDeletedRTEs); @@ -2236,6 +2319,7 @@ _outPlannerInfo(StringInfo str, const PlannerInfo *node) WRITE_INT_FIELD(wt_param_id); WRITE_BITMAPSET_FIELD(curOuterRels); WRITE_NODE_FIELD(curOuterParams); + WRITE_BOOL_FIELD(partColsUpdated); } static void @@ -2284,7 +2368,9 @@ _outRelOptInfo(StringInfo str, const RelOptInfo *node) WRITE_UINT_FIELD(baserestrict_min_security); WRITE_NODE_FIELD(joininfo); WRITE_BOOL_FIELD(has_eclass_joins); + WRITE_BOOL_FIELD(consider_partitionwise_join); WRITE_BITMAPSET_FIELD(top_parent_relids); + WRITE_NODE_FIELD(partitioned_child_rels); } static void @@ -2509,15 +2595,6 @@ _outAppendRelInfo(StringInfo str, const AppendRelInfo *node) WRITE_OID_FIELD(parent_reloid); } -static void -_outPartitionedChildRelInfo(StringInfo str, const PartitionedChildRelInfo *node) -{ - WRITE_NODE_TYPE("PARTITIONEDCHILDRELINFO"); - - WRITE_UINT_FIELD(parent_relid); - WRITE_NODE_FIELD(child_rels); -} - static void _outPlaceHolderInfo(StringInfo str, const PlaceHolderInfo *node) { @@ -2639,9 +2716,11 @@ _outIndexStmt(StringInfo str, const IndexStmt *node) WRITE_STRING_FIELD(idxname); WRITE_NODE_FIELD(relation); + WRITE_OID_FIELD(relationId); WRITE_STRING_FIELD(accessMethod); WRITE_STRING_FIELD(tableSpace); WRITE_NODE_FIELD(indexParams); + WRITE_NODE_FIELD(indexIncludingParams); WRITE_NODE_FIELD(options); WRITE_NODE_FIELD(whereClause); WRITE_NODE_FIELD(excludeOpNames); @@ -2667,6 +2746,7 @@ _outCreateStatsStmt(StringInfo str, const CreateStatsStmt *node) WRITE_NODE_FIELD(stat_types); WRITE_NODE_FIELD(exprs); WRITE_NODE_FIELD(relations); + WRITE_STRING_FIELD(stxcomment); WRITE_BOOL_FIELD(if_not_exists); } @@ -2794,11 +2874,11 @@ _outColumnDef(StringInfo str, const ColumnDef *node) WRITE_BOOL_FIELD(is_local); WRITE_BOOL_FIELD(is_not_null); WRITE_BOOL_FIELD(is_from_type); - WRITE_BOOL_FIELD(is_from_parent); WRITE_CHAR_FIELD(storage); WRITE_NODE_FIELD(raw_default); WRITE_NODE_FIELD(cooked_default); WRITE_CHAR_FIELD(identity); + WRITE_NODE_FIELD(identitySequence); WRITE_NODE_FIELD(collClause); WRITE_OID_FIELD(collOid); WRITE_NODE_FIELD(constraints); @@ -2918,7 +2998,7 @@ _outQuery(StringInfo str, const Query *node) WRITE_NODE_FIELD(rowMarks); WRITE_NODE_FIELD(setOperations); WRITE_NODE_FIELD(constraintDeps); - /* withCheckOptions intentionally omitted, see comment in parsenodes.h */ + WRITE_NODE_FIELD(withCheckOptions); WRITE_LOCATION_FIELD(stmt_location); WRITE_LOCATION_FIELD(stmt_len); } @@ -2969,6 +3049,11 @@ _outWindowClause(StringInfo str, const WindowClause *node) WRITE_INT_FIELD(frameOptions); WRITE_NODE_FIELD(startOffset); WRITE_NODE_FIELD(endOffset); + WRITE_OID_FIELD(startInRangeFunc); + WRITE_OID_FIELD(endInRangeFunc); + WRITE_OID_FIELD(inRangeColl); + WRITE_BOOL_FIELD(inRangeAsc); + WRITE_BOOL_FIELD(inRangeNullsFirst); WRITE_UINT_FIELD(winref); WRITE_BOOL_FIELD(copiedOrder); } @@ -3041,6 +3126,7 @@ _outRangeTblEntry(StringInfo str, const RangeTblEntry *node) case RTE_RELATION: WRITE_OID_FIELD(relid); WRITE_CHAR_FIELD(relkind); + WRITE_INT_FIELD(rellockmode); WRITE_NODE_FIELD(tablesample); break; case RTE_SUBQUERY: @@ -3208,7 +3294,7 @@ _outValue(StringInfo str, const Value *value) switch (value->type) { case T_Integer: - appendStringInfo(str, "%ld", value->val.ival); + appendStringInfo(str, "%d", value->val.ival); break; case T_Float: @@ -3261,6 +3347,20 @@ _outParamRef(StringInfo str, const ParamRef *node) WRITE_LOCATION_FIELD(location); } +/* + * Node types found in raw parse trees (supported for debug purposes) + */ + +static void +_outRawStmt(StringInfo str, const RawStmt *node) +{ + WRITE_NODE_TYPE("RAWSTMT"); + + WRITE_NODE_FIELD(stmt); + WRITE_LOCATION_FIELD(stmt_location); + WRITE_INT_FIELD(stmt_len); +} + static void _outAConst(StringInfo str, const A_Const *node) { @@ -3460,6 +3560,7 @@ _outConstraint(StringInfo str, const Constraint *node) case CONSTR_PRIMARY: appendStringInfoString(str, "PRIMARY_KEY"); WRITE_NODE_FIELD(keys); + WRITE_NODE_FIELD(including); WRITE_NODE_FIELD(options); WRITE_STRING_FIELD(indexname); WRITE_STRING_FIELD(indexspace); @@ -3469,6 +3570,7 @@ _outConstraint(StringInfo str, const Constraint *node) case CONSTR_UNIQUE: appendStringInfoString(str, "UNIQUE"); WRITE_NODE_FIELD(keys); + WRITE_NODE_FIELD(including); WRITE_NODE_FIELD(options); WRITE_STRING_FIELD(indexname); WRITE_STRING_FIELD(indexspace); @@ -3478,6 +3580,7 @@ _outConstraint(StringInfo str, const Constraint *node) case CONSTR_EXCLUSION: appendStringInfoString(str, "EXCLUSION"); WRITE_NODE_FIELD(exclusions); + WRITE_NODE_FIELD(including); WRITE_NODE_FIELD(options); WRITE_STRING_FIELD(indexname); WRITE_STRING_FIELD(indexspace); @@ -3529,6 +3632,7 @@ _outForeignKeyCacheInfo(StringInfo str, const ForeignKeyCacheInfo *node) WRITE_NODE_TYPE("FOREIGNKEYCACHEINFO"); + WRITE_OID_FIELD(conoid); WRITE_OID_FIELD(conrelid); WRITE_OID_FIELD(confrelid); WRITE_INT_FIELD(nkeys); @@ -3571,6 +3675,9 @@ _outPartitionBoundSpec(StringInfo str, const PartitionBoundSpec *node) WRITE_NODE_TYPE("PARTITIONBOUNDSPEC"); WRITE_CHAR_FIELD(strategy); + WRITE_BOOL_FIELD(is_default); + WRITE_INT_FIELD(modulus); + WRITE_INT_FIELD(remainder); WRITE_NODE_FIELD(listdatums); WRITE_NODE_FIELD(lowerdatums); WRITE_NODE_FIELD(upperdatums); @@ -3746,6 +3853,18 @@ outNode(StringInfo str, const void *obj) case T_PlanRowMark: _outPlanRowMark(str, obj); break; + case T_PartitionPruneInfo: + _outPartitionPruneInfo(str, obj); + break; + case T_PartitionedRelPruneInfo: + _outPartitionedRelPruneInfo(str, obj); + break; + case T_PartitionPruneStepOp: + _outPartitionPruneStepOp(str, obj); + break; + case T_PartitionPruneStepCombine: + _outPartitionPruneStepCombine(str, obj); + break; case T_PlanInvalItem: _outPlanInvalItem(str, obj); break; @@ -4043,9 +4162,6 @@ outNode(StringInfo str, const void *obj) case T_AppendRelInfo: _outAppendRelInfo(str, obj); break; - case T_PartitionedChildRelInfo: - _outPartitionedChildRelInfo(str, obj); - break; case T_PlaceHolderInfo: _outPlaceHolderInfo(str, obj); break; @@ -4151,6 +4267,9 @@ outNode(StringInfo str, const void *obj) case T_ParamRef: _outParamRef(str, obj); break; + case T_RawStmt: + _outRawStmt(str, obj); + break; case T_A_Const: _outAConst(str, obj); break; diff --git a/src/backend/nodes/params.c b/src/backend/nodes/params.c index 110732081b..79197b18b4 100644 --- a/src/backend/nodes/params.c +++ b/src/backend/nodes/params.c @@ -4,7 +4,7 @@ * Support for finding the values associated with Param nodes. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -48,32 +48,25 @@ copyParamList(ParamListInfo from) retval = (ParamListInfo) palloc(size); retval->paramFetch = NULL; retval->paramFetchArg = NULL; + retval->paramCompile = NULL; + retval->paramCompileArg = NULL; retval->parserSetup = NULL; retval->parserSetupArg = NULL; retval->numParams = from->numParams; - retval->paramMask = NULL; for (i = 0; i < from->numParams; i++) { - ParamExternData *oprm = &from->params[i]; + ParamExternData *oprm; ParamExternData *nprm = &retval->params[i]; + ParamExternData prmdata; int16 typLen; bool typByVal; - /* Ignore parameters we don't need, to save cycles and space. */ - if (from->paramMask != NULL && - !bms_is_member(i, from->paramMask)) - { - nprm->value = (Datum) 0; - nprm->isnull = true; - nprm->pflags = 0; - nprm->ptype = InvalidOid; - continue; - } - /* give hook a chance in case parameter is dynamic */ - if (!OidIsValid(oprm->ptype) && from->paramFetch != NULL) - (*from->paramFetch) (from, i + 1); + if (from->paramFetch != NULL) + oprm = from->paramFetch(from, i + 1, false, &prmdata); + else + oprm = &from->params[i]; /* flat-copy the parameter info */ *nprm = *oprm; @@ -102,22 +95,19 @@ EstimateParamListSpace(ParamListInfo paramLI) for (i = 0; i < paramLI->numParams; i++) { - ParamExternData *prm = ¶mLI->params[i]; + ParamExternData *prm; + ParamExternData prmdata; Oid typeOid; int16 typLen; bool typByVal; - /* Ignore parameters we don't need, to save cycles and space. */ - if (paramLI->paramMask != NULL && - !bms_is_member(i, paramLI->paramMask)) - typeOid = InvalidOid; + /* give hook a chance in case parameter is dynamic */ + if (paramLI->paramFetch != NULL) + prm = paramLI->paramFetch(paramLI, i + 1, false, &prmdata); else - { - /* give hook a chance in case parameter is dynamic */ - if (!OidIsValid(prm->ptype) && paramLI->paramFetch != NULL) - (*paramLI->paramFetch) (paramLI, i + 1); - typeOid = prm->ptype; - } + prm = ¶mLI->params[i]; + + typeOid = prm->ptype; sz = add_size(sz, sizeof(Oid)); /* space for type OID */ sz = add_size(sz, sizeof(uint16)); /* space for pflags */ @@ -171,22 +161,19 @@ SerializeParamList(ParamListInfo paramLI, char **start_address) /* Write each parameter in turn. */ for (i = 0; i < nparams; i++) { - ParamExternData *prm = ¶mLI->params[i]; + ParamExternData *prm; + ParamExternData prmdata; Oid typeOid; int16 typLen; bool typByVal; - /* Ignore parameters we don't need, to save cycles and space. */ - if (paramLI->paramMask != NULL && - !bms_is_member(i, paramLI->paramMask)) - typeOid = InvalidOid; + /* give hook a chance in case parameter is dynamic */ + if (paramLI->paramFetch != NULL) + prm = paramLI->paramFetch(paramLI, i + 1, false, &prmdata); else - { - /* give hook a chance in case parameter is dynamic */ - if (!OidIsValid(prm->ptype) && paramLI->paramFetch != NULL) - (*paramLI->paramFetch) (paramLI, i + 1); - typeOid = prm->ptype; - } + prm = ¶mLI->params[i]; + + typeOid = prm->ptype; /* Write type OID. */ memcpy(*start_address, &typeOid, sizeof(Oid)); @@ -237,10 +224,11 @@ RestoreParamList(char **start_address) paramLI = (ParamListInfo) palloc(size); paramLI->paramFetch = NULL; paramLI->paramFetchArg = NULL; + paramLI->paramCompile = NULL; + paramLI->paramCompileArg = NULL; paramLI->parserSetup = NULL; paramLI->parserSetupArg = NULL; paramLI->numParams = nparams; - paramLI->paramMask = NULL; for (i = 0; i < nparams; i++) { diff --git a/src/backend/nodes/print.c b/src/backend/nodes/print.c index 380e8b71f2..b9bad5eacc 100644 --- a/src/backend/nodes/print.c +++ b/src/backend/nodes/print.c @@ -3,7 +3,7 @@ * print.c * various print routines (used mostly for debugging) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c index b56f28e15f..99ed2f248a 100644 --- a/src/backend/nodes/read.c +++ b/src/backend/nodes/read.c @@ -4,7 +4,7 @@ * routines to convert a string (legal ascii representation of node) back * to nodes * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -21,24 +21,37 @@ #include +#include "common/string.h" #include "nodes/pg_list.h" #include "nodes/readfuncs.h" #include "nodes/value.h" /* Static state for pg_strtok */ -static char *pg_strtok_ptr = NULL; +static const char *pg_strtok_ptr = NULL; + +/* State flag that determines how readfuncs.c should treat location fields */ +#ifdef WRITE_READ_PARSE_PLAN_TREES +bool restore_location_fields = false; +#endif /* * stringToNode - - * returns a Node with a given legal ASCII representation + * builds a Node tree from its string representation (assumed valid) + * + * restore_loc_fields instructs readfuncs.c whether to restore location + * fields rather than set them to -1. This is currently only supported + * in builds with the WRITE_READ_PARSE_PLAN_TREES debugging flag set. */ -void * -stringToNode(char *str) +static void * +stringToNodeInternal(const char *str, bool restore_loc_fields) { - char *save_strtok; void *retval; + const char *save_strtok; +#ifdef WRITE_READ_PARSE_PLAN_TREES + bool save_restore_location_fields; +#endif /* * We save and restore the pre-existing state of pg_strtok. This makes the @@ -50,13 +63,45 @@ stringToNode(char *str) pg_strtok_ptr = str; /* point pg_strtok at the string to read */ + /* + * If enabled, likewise save/restore the location field handling flag. + */ +#ifdef WRITE_READ_PARSE_PLAN_TREES + save_restore_location_fields = restore_location_fields; + restore_location_fields = restore_loc_fields; +#endif + retval = nodeRead(NULL, 0); /* do the reading */ pg_strtok_ptr = save_strtok; +#ifdef WRITE_READ_PARSE_PLAN_TREES + restore_location_fields = save_restore_location_fields; +#endif + return retval; } +/* + * Externally visible entry points + */ +void * +stringToNode(const char *str) +{ + return stringToNodeInternal(str, false); +} + +#ifdef WRITE_READ_PARSE_PLAN_TREES + +void * +stringToNodeWithLocations(const char *str) +{ + return stringToNodeInternal(str, true); +} + +#endif + + /***************************************************************************** * * the lisp token parser @@ -103,11 +148,11 @@ stringToNode(char *str) * code should add backslashes to a string constant to ensure it is treated * as a single token. */ -char * +const char * pg_strtok(int *length) { - char *local_str; /* working pointer to string */ - char *ret_str; /* start of token to return */ + const char *local_str; /* working pointer to string */ + const char *ret_str; /* start of token to return */ local_str = pg_strtok_ptr; @@ -165,7 +210,7 @@ pg_strtok(int *length) * any protective backslashes in the token are removed. */ char * -debackslash(char *token, int length) +debackslash(const char *token, int length) { char *result = palloc(length + 1); char *ptr = result; @@ -197,10 +242,10 @@ debackslash(char *token, int length) * Assumption: the ascii representation is legal */ static NodeTag -nodeTokenType(char *token, int length) +nodeTokenType(const char *token, int length) { NodeTag retval; - char *numptr; + const char *numptr; int numlen; /* @@ -215,22 +260,15 @@ nodeTokenType(char *token, int length) { /* * Yes. Figure out whether it is integral or float; this requires - * both a syntax check and a range check. strtol() can do both for us. - * We know the token will end at a character that strtol will stop at, - * so we do not need to modify the string. + * both a syntax check and a range check. strtoint() can do both for + * us. We know the token will end at a character that strtoint will + * stop at, so we do not need to modify the string. */ - long val; char *endptr; errno = 0; - val = strtol(token, &endptr, 10); - (void) val; /* avoid compiler warning if unused */ - if (endptr != token + length || errno == ERANGE -#ifdef HAVE_LONG_INT_64 - /* if long > 32 bits, check for overflow of int4 */ - || val != (long) ((int32) val) -#endif - ) + (void) strtoint(token, &endptr, 10); + if (endptr != token + length || errno == ERANGE) return T_Float; return T_Integer; } @@ -275,7 +313,7 @@ nodeTokenType(char *token, int length) * this should only be invoked from within a stringToNode operation). */ void * -nodeRead(char *token, int tok_len) +nodeRead(const char *token, int tok_len) { Node *result; NodeTag type; @@ -387,9 +425,9 @@ nodeRead(char *token, int tok_len) case T_Integer: /* - * we know that the token terminates on a char atol will stop at + * we know that the token terminates on a char atoi will stop at */ - result = (Node *) makeInteger(atol(token)); + result = (Node *) makeInteger(atoi(token)); break; case T_Float: { diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index 86c811de49..e117867de5 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -3,7 +3,7 @@ * readfuncs.c * Reader functions for Postgres tree nodes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -17,10 +17,14 @@ * never read executor state trees, either. * * Parse location fields are written out by outfuncs.c, but only for - * possible debugging use. When reading a location field, we discard + * debugging use. When reading a location field, we normally discard * the stored value and set the location field to -1 (ie, "unknown"). * This is because nodes coming from a stored rule should not be thought * to have a known location in the current query's text. + * However, if restore_location_fields is true, we do restore location + * fields from the string. This is currently intended only for use by the + * WRITE_READ_PARSE_PLAN_TREES test code, which doesn't want to cause + * any change in the node contents. * *------------------------------------------------------------------------- */ @@ -33,6 +37,7 @@ #include "nodes/parsenodes.h" #include "nodes/plannodes.h" #include "nodes/readfuncs.h" +#include "utils/builtins.h" /* @@ -50,7 +55,7 @@ /* And a few guys need only the pg_strtok support fields */ #define READ_TEMP_LOCALS() \ - char *token; \ + const char *token; \ int length /* ... but most need both */ @@ -70,7 +75,13 @@ token = pg_strtok(&length); /* get field value */ \ local_node->fldname = atoui(token) -/* Read an long integer field (anything written as ":fldname %ld") */ +/* Read an unsigned integer field (anything written using UINT64_FORMAT) */ +#define READ_UINT64_FIELD(fldname) \ + token = pg_strtok(&length); /* skip :fldname */ \ + token = pg_strtok(&length); /* get field value */ \ + local_node->fldname = pg_strtouint64(token, NULL, 10) + +/* Read a long integer field (anything written as ":fldname %ld") */ #define READ_LONG_FIELD(fldname) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* get field value */ \ @@ -113,12 +124,19 @@ token = pg_strtok(&length); /* get field value */ \ local_node->fldname = nullable_string(token, length) -/* Read a parse location field (and throw away the value, per notes above) */ +/* Read a parse location field (and possibly throw away the value) */ +#ifdef WRITE_READ_PARSE_PLAN_TREES +#define READ_LOCATION_FIELD(fldname) \ + token = pg_strtok(&length); /* skip :fldname */ \ + token = pg_strtok(&length); /* get field value */ \ + local_node->fldname = restore_location_fields ? atoi(token) : -1 +#else #define READ_LOCATION_FIELD(fldname) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* get field value */ \ (void) token; /* in case not used elsewhere */ \ local_node->fldname = -1 /* set field to "unknown" */ +#endif /* Read a Node field */ #define READ_NODE_FIELD(fldname) \ @@ -231,7 +249,7 @@ _readQuery(void) READ_ENUM_FIELD(commandType, CmdType); READ_ENUM_FIELD(querySource, QuerySource); - local_node->queryId = 0; /* not saved in output format */ + local_node->queryId = UINT64CONST(0); /* not saved in output format */ READ_BOOL_FIELD(canSetTag); READ_NODE_FIELD(utilityStmt); READ_INT_FIELD(resultRelation); @@ -262,7 +280,7 @@ _readQuery(void) READ_NODE_FIELD(rowMarks); READ_NODE_FIELD(setOperations); READ_NODE_FIELD(constraintDeps); - /* withCheckOptions intentionally omitted, see comment in parsenodes.h */ + READ_NODE_FIELD(withCheckOptions); READ_LOCATION_FIELD(stmt_location); READ_LOCATION_FIELD(stmt_len); @@ -362,6 +380,11 @@ _readWindowClause(void) READ_INT_FIELD(frameOptions); READ_NODE_FIELD(startOffset); READ_NODE_FIELD(endOffset); + READ_OID_FIELD(startInRangeFunc); + READ_OID_FIELD(endInRangeFunc); + READ_OID_FIELD(inRangeColl); + READ_BOOL_FIELD(inRangeAsc); + READ_BOOL_FIELD(inRangeNullsFirst); READ_UINT_FIELD(winref); READ_BOOL_FIELD(copiedOrder); @@ -892,11 +915,10 @@ _readArrayCoerceExpr(void) READ_LOCALS(ArrayCoerceExpr); READ_NODE_FIELD(arg); - READ_OID_FIELD(elemfuncid); + READ_NODE_FIELD(elemexpr); READ_OID_FIELD(resulttype); READ_INT_FIELD(resulttypmod); READ_OID_FIELD(resultcollid); - READ_BOOL_FIELD(isExplicit); READ_ENUM_FIELD(coerceformat, CoercionForm); READ_LOCATION_FIELD(location); @@ -1339,6 +1361,7 @@ _readRangeTblEntry(void) case RTE_RELATION: READ_OID_FIELD(relid); READ_CHAR_FIELD(relkind); + READ_INT_FIELD(rellockmode); READ_NODE_FIELD(tablesample); break; case RTE_SUBQUERY: @@ -1355,6 +1378,15 @@ _readRangeTblEntry(void) break; case RTE_TABLEFUNC: READ_NODE_FIELD(tablefunc); + /* The RTE must have a copy of the column type info, if any */ + if (local_node->tablefunc) + { + TableFunc *tf = local_node->tablefunc; + + local_node->coltypes = tf->coltypes; + local_node->coltypmods = tf->coltypmods; + local_node->colcollations = tf->colcollations; + } break; case RTE_VALUES: READ_NODE_FIELD(values_lists); @@ -1448,6 +1480,10 @@ _readDefElem(void) READ_DONE(); } +/* + * Stuff from plannodes.h. + */ + /* * _readPlannedStmt */ @@ -1457,24 +1493,24 @@ _readPlannedStmt(void) READ_LOCALS(PlannedStmt); READ_ENUM_FIELD(commandType, CmdType); - READ_UINT_FIELD(queryId); + READ_UINT64_FIELD(queryId); READ_BOOL_FIELD(hasReturning); READ_BOOL_FIELD(hasModifyingCTE); READ_BOOL_FIELD(canSetTag); READ_BOOL_FIELD(transientPlan); READ_BOOL_FIELD(dependsOnRole); READ_BOOL_FIELD(parallelModeNeeded); + READ_INT_FIELD(jitFlags); READ_NODE_FIELD(planTree); READ_NODE_FIELD(rtable); READ_NODE_FIELD(resultRelations); - READ_NODE_FIELD(nonleafResultRelations); READ_NODE_FIELD(rootResultRelations); READ_NODE_FIELD(subplans); READ_BITMAPSET_FIELD(rewindPlanIDs); READ_NODE_FIELD(rowMarks); READ_NODE_FIELD(relationOids); READ_NODE_FIELD(invalItems); - READ_INT_FIELD(nParamExec); + READ_NODE_FIELD(paramExecTypes); READ_NODE_FIELD(utilityStmt); READ_LOCATION_FIELD(stmt_location); READ_LOCATION_FIELD(stmt_len); @@ -1561,7 +1597,8 @@ _readModifyTable(void) READ_ENUM_FIELD(operation, CmdType); READ_BOOL_FIELD(canSetTag); READ_UINT_FIELD(nominalRelation); - READ_NODE_FIELD(partitioned_rels); + READ_UINT_FIELD(rootRelation); + READ_BOOL_FIELD(partColsUpdated); READ_NODE_FIELD(resultRelations); READ_INT_FIELD(resultRelIndex); READ_INT_FIELD(rootResultRelIndex); @@ -1592,8 +1629,9 @@ _readAppend(void) ReadCommonPlan(&local_node->plan); - READ_NODE_FIELD(partitioned_rels); READ_NODE_FIELD(appendplans); + READ_INT_FIELD(first_partial_plan); + READ_NODE_FIELD(part_prune_info); READ_DONE(); } @@ -1608,13 +1646,13 @@ _readMergeAppend(void) ReadCommonPlan(&local_node->plan); - READ_NODE_FIELD(partitioned_rels); READ_NODE_FIELD(mergeplans); READ_INT_FIELD(numCols); READ_ATTRNUMBER_ARRAY(sortColIdx, local_node->numCols); READ_OID_ARRAY(sortOperators, local_node->numCols); READ_OID_ARRAY(collations, local_node->numCols); READ_BOOL_ARRAY(nullsFirst, local_node->numCols); + READ_NODE_FIELD(part_prune_info); READ_DONE(); } @@ -1889,6 +1927,21 @@ _readCteScan(void) READ_DONE(); } +/* + * _readNamedTuplestoreScan + */ +static NamedTuplestoreScan * +_readNamedTuplestoreScan(void) +{ + READ_LOCALS(NamedTuplestoreScan); + + ReadCommonScan(&local_node->scan); + + READ_STRING_FIELD(enrname); + + READ_DONE(); +} + /* * _readWorkTableScan */ @@ -2131,6 +2184,11 @@ _readWindowAgg(void) READ_INT_FIELD(frameOptions); READ_NODE_FIELD(startOffset); READ_NODE_FIELD(endOffset); + READ_OID_FIELD(startInRangeFunc); + READ_OID_FIELD(endInRangeFunc); + READ_OID_FIELD(inRangeColl); + READ_BOOL_FIELD(inRangeAsc); + READ_BOOL_FIELD(inRangeNullsFirst); READ_DONE(); } @@ -2163,8 +2221,10 @@ _readGather(void) ReadCommonPlan(&local_node->plan); READ_INT_FIELD(num_workers); + READ_INT_FIELD(rescan_param); READ_BOOL_FIELD(single_copy); READ_BOOL_FIELD(invisible); + READ_BITMAPSET_FIELD(initParam); READ_DONE(); } @@ -2180,11 +2240,13 @@ _readGatherMerge(void) ReadCommonPlan(&local_node->plan); READ_INT_FIELD(num_workers); + READ_INT_FIELD(rescan_param); READ_INT_FIELD(numCols); READ_ATTRNUMBER_ARRAY(sortColIdx, local_node->numCols); READ_OID_ARRAY(sortOperators, local_node->numCols); READ_OID_ARRAY(collations, local_node->numCols); READ_BOOL_ARRAY(nullsFirst, local_node->numCols); + READ_BITMAPSET_FIELD(initParam); READ_DONE(); } @@ -2202,6 +2264,7 @@ _readHash(void) READ_OID_FIELD(skewTable); READ_INT_FIELD(skewColumn); READ_BOOL_FIELD(skewInherit); + READ_FLOAT_FIELD(rows_total); READ_DONE(); } @@ -2294,6 +2357,63 @@ _readPlanRowMark(void) READ_DONE(); } +static PartitionPruneInfo * +_readPartitionPruneInfo(void) +{ + READ_LOCALS(PartitionPruneInfo); + + READ_NODE_FIELD(prune_infos); + READ_BITMAPSET_FIELD(other_subplans); + + READ_DONE(); +} + +static PartitionedRelPruneInfo * +_readPartitionedRelPruneInfo(void) +{ + READ_LOCALS(PartitionedRelPruneInfo); + + READ_UINT_FIELD(rtindex); + READ_NODE_FIELD(pruning_steps); + READ_BITMAPSET_FIELD(present_parts); + READ_INT_FIELD(nparts); + READ_INT_FIELD(nexprs); + READ_INT_ARRAY(subplan_map, local_node->nparts); + READ_INT_ARRAY(subpart_map, local_node->nparts); + READ_BOOL_ARRAY(hasexecparam, local_node->nexprs); + READ_BOOL_FIELD(do_initial_prune); + READ_BOOL_FIELD(do_exec_prune); + READ_BITMAPSET_FIELD(execparamids); + + READ_DONE(); +} + +static PartitionPruneStepOp * +_readPartitionPruneStepOp(void) +{ + READ_LOCALS(PartitionPruneStepOp); + + READ_INT_FIELD(step.step_id); + READ_INT_FIELD(opstrategy); + READ_NODE_FIELD(exprs); + READ_NODE_FIELD(cmpfns); + READ_BITMAPSET_FIELD(nullkeys); + + READ_DONE(); +} + +static PartitionPruneStepCombine * +_readPartitionPruneStepCombine(void) +{ + READ_LOCALS(PartitionPruneStepCombine); + + READ_INT_FIELD(step.step_id); + READ_ENUM_FIELD(combineOp, PartitionPruneCombineOp); + READ_NODE_FIELD(source_stepids); + + READ_DONE(); +} + /* * _readPlanInvalItem */ @@ -2388,6 +2508,9 @@ _readPartitionBoundSpec(void) READ_LOCALS(PartitionBoundSpec); READ_CHAR_FIELD(strategy); + READ_BOOL_FIELD(is_default); + READ_INT_FIELD(modulus); + READ_INT_FIELD(remainder); READ_NODE_FIELD(listdatums); READ_NODE_FIELD(lowerdatums); READ_NODE_FIELD(upperdatums); @@ -2603,6 +2726,8 @@ parseNodeString(void) return_value = _readTableFuncScan(); else if (MATCH("CTESCAN", 7)) return_value = _readCteScan(); + else if (MATCH("NAMEDTUPLESTORESCAN", 19)) + return_value = _readNamedTuplestoreScan(); else if (MATCH("WORKTABLESCAN", 13)) return_value = _readWorkTableScan(); else if (MATCH("FOREIGNSCAN", 11)) @@ -2645,6 +2770,14 @@ parseNodeString(void) return_value = _readNestLoopParam(); else if (MATCH("PLANROWMARK", 11)) return_value = _readPlanRowMark(); + else if (MATCH("PARTITIONPRUNEINFO", 18)) + return_value = _readPartitionPruneInfo(); + else if (MATCH("PARTITIONEDRELPRUNEINFO", 23)) + return_value = _readPartitionedRelPruneInfo(); + else if (MATCH("PARTITIONPRUNESTEPOP", 20)) + return_value = _readPartitionPruneStepOp(); + else if (MATCH("PARTITIONPRUNESTEPCOMBINE", 25)) + return_value = _readPartitionPruneStepCombine(); else if (MATCH("PLANINVALITEM", 13)) return_value = _readPlanInvalItem(); else if (MATCH("SUBPLAN", 7)) @@ -2680,7 +2813,7 @@ readDatum(bool typbyval) Size length, i; int tokenLength; - char *token; + const char *token; Datum res; char *s; @@ -2693,7 +2826,7 @@ readDatum(bool typbyval) token = pg_strtok(&tokenLength); /* read the '[' */ if (token == NULL || token[0] != '[') elog(ERROR, "expected \"[\" to start datum, but got \"%s\"; length = %zu", - token ? (const char *) token : "[NULL]", length); + token ? token : "[NULL]", length); if (typbyval) { @@ -2723,7 +2856,7 @@ readDatum(bool typbyval) token = pg_strtok(&tokenLength); /* read the ']' */ if (token == NULL || token[0] != ']') elog(ERROR, "expected \"]\" to end datum, but got \"%s\"; length = %zu", - token ? (const char *) token : "[NULL]", length); + token ? token : "[NULL]", length); return res; } @@ -2736,7 +2869,7 @@ readAttrNumberCols(int numCols) { int tokenLength, i; - char *token; + const char *token; AttrNumber *attr_vals; if (numCols <= 0) @@ -2760,7 +2893,7 @@ readOidCols(int numCols) { int tokenLength, i; - char *token; + const char *token; Oid *oid_vals; if (numCols <= 0) @@ -2784,7 +2917,7 @@ readIntCols(int numCols) { int tokenLength, i; - char *token; + const char *token; int *int_vals; if (numCols <= 0) @@ -2808,7 +2941,7 @@ readBoolCols(int numCols) { int tokenLength, i; - char *token; + const char *token; bool *bool_vals; if (numCols <= 0) diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c index c4e53adb0c..17dc53898f 100644 --- a/src/backend/nodes/tidbitmap.c +++ b/src/backend/nodes/tidbitmap.c @@ -29,7 +29,7 @@ * and a non-lossy page. * * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/nodes/tidbitmap.c @@ -45,6 +45,7 @@ #include "nodes/tidbitmap.h" #include "storage/lwlock.h" #include "utils/dsa.h" +#include "utils/hashutils.h" /* * The maximum number of tuples per page is not large (typically 256 with @@ -237,30 +238,13 @@ static int tbm_comparator(const void *left, const void *right); static int tbm_shared_comparator(const void *left, const void *right, void *arg); -/* - * Simple inline murmur hash implementation for the exact width required, for - * performance. - */ -static inline uint32 -hash_blockno(BlockNumber b) -{ - uint32 h = b; - - h ^= h >> 16; - h *= 0x85ebca6b; - h ^= h >> 13; - h *= 0xc2b2ae35; - h ^= h >> 16; - return h; -} - /* define hashtable mapping block numbers to PagetableEntry's */ #define SH_USE_NONDEFAULT_ALLOCATOR #define SH_PREFIX pagetable #define SH_ELEMENT_TYPE PagetableEntry #define SH_KEY_TYPE BlockNumber #define SH_KEY blockno -#define SH_HASH_KEY(tb, key) hash_blockno(key) +#define SH_HASH_KEY(tb, key) murmurhash32(key) #define SH_EQUAL(tb, a, b) a == b #define SH_SCOPE static inline #define SH_DEFINE @@ -281,7 +265,6 @@ TIDBitmap * tbm_create(long maxbytes, dsa_area *dsa) { TIDBitmap *tbm; - long nbuckets; /* Create the TIDBitmap struct and zero all its fields */ tbm = makeNode(TIDBitmap); @@ -289,17 +272,7 @@ tbm_create(long maxbytes, dsa_area *dsa) tbm->mcxt = CurrentMemoryContext; tbm->status = TBM_EMPTY; - /* - * Estimate number of hashtable entries we can have within maxbytes. This - * estimates the hash cost as sizeof(PagetableEntry), which is good enough - * for our purpose. Also count an extra Pointer per entry for the arrays - * created during iteration readout. - */ - nbuckets = maxbytes / - (sizeof(PagetableEntry) + sizeof(Pointer) + sizeof(Pointer)); - nbuckets = Min(nbuckets, INT_MAX - 1); /* safety limit */ - nbuckets = Max(nbuckets, 16); /* sanity limit */ - tbm->maxentries = (int) nbuckets; + tbm->maxentries = (int) tbm_calculate_entries(maxbytes); tbm->lossify_start = 0; tbm->dsa = dsa; tbm->dsapagetable = InvalidDsaPointer; @@ -609,7 +582,7 @@ tbm_intersect(TIDBitmap *a, const TIDBitmap *b) /* * Process one page of a during an intersection op * - * Returns TRUE if apage is now empty and should be deleted from a + * Returns true if apage is now empty and should be deleted from a */ static bool tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b) @@ -1562,3 +1535,27 @@ pagetable_free(pagetable_hash *pagetable, void *pointer) tbm->dsapagetableold = InvalidDsaPointer; } } + +/* + * tbm_calculate_entries + * + * Estimate number of hashtable entries we can have within maxbytes. + */ +long +tbm_calculate_entries(double maxbytes) +{ + long nbuckets; + + /* + * Estimate number of hashtable entries we can have within maxbytes. This + * estimates the hash cost as sizeof(PagetableEntry), which is good enough + * for our purpose. Also count an extra Pointer per entry for the arrays + * created during iteration readout. + */ + nbuckets = maxbytes / + (sizeof(PagetableEntry) + sizeof(Pointer) + sizeof(Pointer)); + nbuckets = Min(nbuckets, INT_MAX - 1); /* safety limit */ + nbuckets = Max(nbuckets, 16); /* sanity limit */ + + return nbuckets; +} diff --git a/src/backend/nodes/value.c b/src/backend/nodes/value.c index 5d2f96c103..2a30307baf 100644 --- a/src/backend/nodes/value.c +++ b/src/backend/nodes/value.c @@ -4,7 +4,7 @@ * implementation of Value nodes * * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -20,7 +20,7 @@ * makeInteger */ Value * -makeInteger(long i) +makeInteger(int i) { Value *v = makeNode(Value); diff --git a/src/backend/optimizer/README b/src/backend/optimizer/README index fc0fca4107..9c852a15ef 100644 --- a/src/backend/optimizer/README +++ b/src/backend/optimizer/README @@ -320,7 +320,7 @@ set up for recursive handling of subqueries split up the qual into restrictions (a=1) and joins (b=c) find qual clauses that enable merge and hash joins ----make_one_rel() - set_base_rel_pathlist() + set_base_rel_pathlists() find seqscan and all index paths for each base relation find selectivity of columns used in joins make_rel_from_joinlist() @@ -374,6 +374,7 @@ RelOptInfo - a relation or joined relations MaterialPath - a Material plan node UniquePath - remove duplicate rows (either by hashing or sorting) GatherPath - collect the results of parallel workers + GatherMergePath - collect parallel results, preserving their common sort order ProjectionPath - a Result plan node with child (used for projection) ProjectSetPath - a ProjectSet plan node applied to some sub-path SortPath - a Sort plan node applied to some sub-path @@ -564,10 +565,12 @@ of scanning the relation and the resulting ordering of the tuples. Sequential scan Paths have NIL pathkeys, indicating no known ordering. Index scans have Path.pathkeys that represent the chosen index's ordering, if any. A single-key index would create a single-PathKey list, while a -multi-column index generates a list with one element per index column. -(Actually, since an index can be scanned either forward or backward, there -are two possible sort orders and two possible PathKey lists it can -generate.) +multi-column index generates a list with one element per key index column. +Non-key columns specified in the INCLUDE clause of covering indexes don't +have corresponding PathKeys in the list, because the have no influence on +index ordering. (Actually, since an index can be scanned either forward or +backward, there are two possible sort orders and two possible PathKey lists +it can generate.) Note that a bitmap scan has NIL pathkeys since we can say nothing about the overall order of its result. Also, an indexscan on an unordered type @@ -997,6 +1000,7 @@ considered useful for each step. Currently, we may create these types of additional RelOptInfos during upper-level planning: UPPERREL_SETOP result of UNION/INTERSECT/EXCEPT, if any +UPPERREL_PARTIAL_GROUP_AGG result of partial grouping/aggregation, if any UPPERREL_GROUP_AGG result of grouping/aggregation, if any UPPERREL_WINDOW result of window functions, if any UPPERREL_DISTINCT result of "SELECT DISTINCT", if any @@ -1030,7 +1034,7 @@ either by an entire query or some portion of the query in such a way that some of that work can be done by one or more worker processes, which are called parallel workers. Parallel workers are a subtype of dynamic background workers; see src/backend/access/transam/README.parallel for a -fuller description. Academic literature on parallel query suggests that +fuller description. The academic literature on parallel query suggests that parallel execution strategies can be divided into essentially two categories: pipelined parallelism, where the execution of the query is divided into multiple stages and each stage is handled by a separate @@ -1046,16 +1050,14 @@ that the underlying table be partitioned. It only requires that (1) there is some method of dividing the data from at least one of the base tables involved in the relation across multiple processes, (2) allowing each process to handle its own portion of the data, and then (3) -collecting the results. Requirements (2) and (3) is satisfied by the -executor node Gather, which launches any number of worker processes and -executes its single child plan in all of them (and perhaps in the leader -also, if the children aren't generating enough data to keep the leader -busy). Requirement (1) is handled by the SeqScan node: when invoked -with parallel_aware = true, this node will, in effect, partition the -table on a block by block basis, returning a subset of the tuples from -the relation in each worker where that SeqScan is executed. A similar -scheme could be (and probably should be) implemented for bitmap heap -scans. +collecting the results. Requirements (2) and (3) are satisfied by the +executor node Gather (or GatherMerge), which launches any number of worker +processes and executes its single child plan in all of them, and perhaps +in the leader also, if the children aren't generating enough data to keep +the leader busy. Requirement (1) is handled by the table scan node: when +invoked with parallel_aware = true, this node will, in effect, partition +the table on a block by block basis, returning a subset of the tuples from +the relation in each worker where that scan node is executed. Just as we do for non-parallel access methods, we build Paths to represent access strategies that can be used in a parallel plan. These @@ -1076,3 +1078,43 @@ be desirable to postpone the Gather stage until as near to the top of the plan as possible. Expanding the range of cases in which more work can be pushed below the Gather (and costing them accurately) is likely to keep us busy for a long time to come. + +Partitionwise joins +------------------- + +A join between two similarly partitioned tables can be broken down into joins +between their matching partitions if there exists an equi-join condition +between the partition keys of the joining tables. The equi-join between +partition keys implies that all join partners for a given row in one +partitioned table must be in the corresponding partition of the other +partitioned table. Because of this the join between partitioned tables to be +broken into joins between the matching partitions. The resultant join is +partitioned in the same way as the joining relations, thus allowing an N-way +join between similarly partitioned tables having equi-join condition between +their partition keys to be broken down into N-way joins between their matching +partitions. This technique of breaking down a join between partitioned tables +into joins between their partitions is called partitionwise join. We will use +term "partitioned relation" for either a partitioned table or a join between +compatibly partitioned tables. + +The partitioning properties of a partitioned relation are stored in its +RelOptInfo. The information about data types of partition keys are stored in +PartitionSchemeData structure. The planner maintains a list of canonical +partition schemes (distinct PartitionSchemeData objects) so that RelOptInfo of +any two partitioned relations with same partitioning scheme point to the same +PartitionSchemeData object. This reduces memory consumed by +PartitionSchemeData objects and makes it easy to compare the partition schemes +of joining relations. + +Partitionwise aggregates/grouping +--------------------------------- + +If the GROUP BY clause contains all of the partition keys, all the rows +that belong to a given group must come from a single partition; therefore, +aggregation can be done completely separately for each partition. Otherwise, +partial aggregates can be computed for each partition, and then finalized +after appending the results from the individual partitions. This technique of +breaking down aggregation or grouping over a partitioned relation into +aggregation or grouping over its partitions is called partitionwise +aggregation. Especially when the partition keys match the GROUP BY clause, +this can be significantly faster than the regular method. diff --git a/src/backend/optimizer/geqo/geqo_copy.c b/src/backend/optimizer/geqo/geqo_copy.c index 8fd20c5986..111caa2a2a 100644 --- a/src/backend/optimizer/geqo/geqo_copy.c +++ b/src/backend/optimizer/geqo/geqo_copy.c @@ -2,7 +2,7 @@ * * geqo_copy.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/optimizer/geqo/geqo_copy.c diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c index b5cab0c351..3ef7d7d8aa 100644 --- a/src/backend/optimizer/geqo/geqo_eval.c +++ b/src/backend/optimizer/geqo/geqo_eval.c @@ -3,7 +3,7 @@ * geqo_eval.c * Routines to evaluate query trees * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/optimizer/geqo/geqo_eval.c @@ -40,7 +40,7 @@ typedef struct } Clump; static List *merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, - bool force); + int num_gene, bool force); static bool desirable_join(PlannerInfo *root, RelOptInfo *outer_rel, RelOptInfo *inner_rel); @@ -196,7 +196,7 @@ gimme_tree(PlannerInfo *root, Gene *tour, int num_gene) cur_clump->size = 1; /* Merge it into the clumps list, using only desirable joins */ - clumps = merge_clump(root, clumps, cur_clump, false); + clumps = merge_clump(root, clumps, cur_clump, num_gene, false); } if (list_length(clumps) > 1) @@ -210,7 +210,7 @@ gimme_tree(PlannerInfo *root, Gene *tour, int num_gene) { Clump *clump = (Clump *) lfirst(lc); - fclumps = merge_clump(root, fclumps, clump, true); + fclumps = merge_clump(root, fclumps, clump, num_gene, true); } clumps = fclumps; } @@ -235,7 +235,8 @@ gimme_tree(PlannerInfo *root, Gene *tour, int num_gene) * "desirable" joins. */ static List * -merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force) +merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, int num_gene, + bool force) { ListCell *prev; ListCell *lc; @@ -264,8 +265,17 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force) /* Keep searching if join order is not valid */ if (joinrel) { - /* Create GatherPaths for any useful partial paths for rel */ - generate_gather_paths(root, joinrel); + /* Create paths for partitionwise joins. */ + generate_partitionwise_join_paths(root, joinrel); + + /* + * Except for the topmost scan/join rel, consider gathering + * partial paths. We'll do the same for the topmost scan/join + * rel once we know the final targetlist (see + * grouping_planner). + */ + if (old_clump->size + new_clump->size < num_gene) + generate_gather_paths(root, joinrel, false); /* Find and save the cheapest paths for this joinrel */ set_cheapest(joinrel); @@ -283,7 +293,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force) * others. When no further merge is possible, we'll reinsert * it into the list. */ - return merge_clump(root, clumps, old_clump, force); + return merge_clump(root, clumps, old_clump, num_gene, force); } } prev = lc; diff --git a/src/backend/optimizer/geqo/geqo_main.c b/src/backend/optimizer/geqo/geqo_main.c index 86213ac5a0..3eb8bcb76f 100644 --- a/src/backend/optimizer/geqo/geqo_main.c +++ b/src/backend/optimizer/geqo/geqo_main.c @@ -4,7 +4,7 @@ * solution to the query optimization problem * by means of a Genetic Algorithm (GA) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/optimizer/geqo/geqo_main.c diff --git a/src/backend/optimizer/geqo/geqo_misc.c b/src/backend/optimizer/geqo/geqo_misc.c index 937cb5fe0f..0f96912e49 100644 --- a/src/backend/optimizer/geqo/geqo_misc.c +++ b/src/backend/optimizer/geqo/geqo_misc.c @@ -3,7 +3,7 @@ * geqo_misc.c * misc. printout and debug stuff * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/optimizer/geqo/geqo_misc.c @@ -92,7 +92,7 @@ print_gen(FILE *fp, Pool *pool, int generation) { int lowest; - /* Get index to lowest ranking gene in poplulation. */ + /* Get index to lowest ranking gene in population. */ /* Use 2nd to last since last is buffer. */ lowest = pool->size > 1 ? pool->size - 2 : 0; diff --git a/src/backend/optimizer/geqo/geqo_pool.c b/src/backend/optimizer/geqo/geqo_pool.c index 596a2cda20..b2c9f31c8b 100644 --- a/src/backend/optimizer/geqo/geqo_pool.c +++ b/src/backend/optimizer/geqo/geqo_pool.c @@ -3,7 +3,7 @@ * geqo_pool.c * Genetic Algorithm (GA) pool stuff * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/optimizer/geqo/geqo_pool.c diff --git a/src/backend/optimizer/geqo/geqo_random.c b/src/backend/optimizer/geqo/geqo_random.c index 6f3500649c..850bfe5ebe 100644 --- a/src/backend/optimizer/geqo/geqo_random.c +++ b/src/backend/optimizer/geqo/geqo_random.c @@ -3,7 +3,7 @@ * geqo_random.c * random number generator * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/optimizer/geqo/geqo_random.c diff --git a/src/backend/optimizer/geqo/geqo_selection.c b/src/backend/optimizer/geqo/geqo_selection.c index 4d0f6b0881..ebd34b6db2 100644 --- a/src/backend/optimizer/geqo/geqo_selection.c +++ b/src/backend/optimizer/geqo/geqo_selection.c @@ -3,7 +3,7 @@ * geqo_selection.c * linear selection scheme for the genetic query optimizer * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/optimizer/geqo/geqo_selection.c diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index 2d7e1d84d0..738bb30848 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -3,7 +3,7 @@ * allpaths.c * Routines to find possible search paths for processing a query * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -24,6 +24,7 @@ #include "catalog/pg_operator.h" #include "catalog/pg_proc.h" #include "foreign/fdwapi.h" +#include "miscadmin.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #ifdef OPTIMIZER_DEBUG @@ -42,6 +43,7 @@ #include "optimizer/var.h" #include "parser/parse_clause.h" #include "parser/parsetree.h" +#include "partitioning/partprune.h" #include "rewrite/rewriteManip.h" #include "utils/lsyscache.h" @@ -100,7 +102,8 @@ static void generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, static Path *get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer); -static List *accumulate_append_subpath(List *subpaths, Path *path); +static void accumulate_append_subpath(Path *path, + List **subpaths, List **special_subpaths); static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte); static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel, @@ -132,8 +135,6 @@ static void subquery_push_qual(Query *subquery, static void recurse_push_qual(Node *setOp, Query *topquery, RangeTblEntry *rte, Index rti, Node *qual); static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel); -static void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, - List *live_childrels); /* @@ -146,6 +147,7 @@ make_one_rel(PlannerInfo *root, List *joinlist) { RelOptInfo *rel; Index rti; + double total_pages; /* * Construct the all_baserels Relids set. @@ -172,10 +174,45 @@ make_one_rel(PlannerInfo *root, List *joinlist) set_base_rel_consider_startup(root); /* - * Compute size estimates and consider_parallel flags for each base rel, - * then generate access paths. + * Compute size estimates and consider_parallel flags for each base rel. */ set_base_rel_sizes(root); + + /* + * We should now have size estimates for every actual table involved in + * the query, and we also know which if any have been deleted from the + * query by join removal, pruned by partition pruning, or eliminated by + * constraint exclusion. So we can now compute total_table_pages. + * + * Note that appendrels are not double-counted here, even though we don't + * bother to distinguish RelOptInfos for appendrel parents, because the + * parents will have pages = 0. + * + * XXX if a table is self-joined, we will count it once per appearance, + * which perhaps is the wrong thing ... but that's not completely clear, + * and detecting self-joins here is difficult, so ignore it for now. + */ + total_pages = 0; + for (rti = 1; rti < root->simple_rel_array_size; rti++) + { + RelOptInfo *brel = root->simple_rel_array[rti]; + + if (brel == NULL) + continue; + + Assert(brel->relid == rti); /* sanity check on array */ + + if (IS_DUMMY_REL(brel)) + continue; + + if (IS_SIMPLE_REL(brel)) + total_pages += (double) brel->pages; + } + root->total_table_pages = total_pages; + + /* + * Generate access paths for each base rel. + */ set_base_rel_pathlists(root); /* @@ -352,8 +389,8 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel, else if (rte->relkind == RELKIND_PARTITIONED_TABLE) { /* - * A partitioned table without leaf partitions is marked - * as a dummy rel. + * A partitioned table without any partitions is marked as + * a dummy rel. */ set_dummy_rel_pathlist(rel); } @@ -479,14 +516,21 @@ set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, } /* - * If this is a baserel, consider gathering any partial paths we may have - * created for it. (If we tried to gather inheritance children, we could + * If this is a baserel, we should normally consider gathering any partial + * paths we may have created for it. + * + * However, if this is an inheritance child, skip it. Otherwise, we could * end up with a very large number of gather nodes, each trying to grab - * its own pool of workers, so don't do this for otherrels. Instead, - * we'll consider gathering partial paths for the parent appendrel.) + * its own pool of workers. Instead, we'll consider gathering partial + * paths for the parent appendrel. + * + * Also, if this is the topmost scan/join rel (that is, the only baserel), + * we postpone this until the final scan/join targelist is available (see + * grouping_planner). */ - if (rel->reloptkind == RELOPT_BASEREL) - generate_gather_paths(root, rel); + if (rel->reloptkind == RELOPT_BASEREL && + bms_membership(root->all_baserels) != BMS_SINGLETON) + generate_gather_paths(root, rel, false); /* * Allow a plugin to editorialize on the set of Paths for this base @@ -612,7 +656,20 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, * the SubqueryScanPath as not parallel-safe. (Note that * set_subquery_pathlist() might push some of these quals down * into the subquery itself, but that doesn't change anything.) + * + * We can't push sub-select containing LIMIT/OFFSET to workers as + * there is no guarantee that the row order will be fully + * deterministic, and applying LIMIT/OFFSET will lead to + * inconsistent results at the top-level. (In some cases, where + * the result is ordered, we could relax this restriction. But it + * doesn't currently seem worth expending extra effort to do so.) */ + { + Query *subquery = castNode(Query, rte->subquery); + + if (limit_needed(subquery)) + return; + } break; case RTE_JOIN: @@ -718,7 +775,8 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) { int parallel_workers; - parallel_workers = compute_parallel_worker(rel, rel->pages, -1); + parallel_workers = compute_parallel_worker(rel, rel->pages, -1, + max_parallel_workers_per_gather); /* If any limit was set to zero, the user doesn't want a parallel scan. */ if (parallel_workers <= 0) @@ -866,9 +924,51 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, double *parent_attrsizes; int nattrs; ListCell *l; + Relids live_children = NULL; + bool did_pruning = false; + + /* Guard against stack overflow due to overly deep inheritance tree. */ + check_stack_depth(); Assert(IS_SIMPLE_REL(rel)); + /* + * Initialize partitioned_child_rels to contain this RT index. + * + * Note that during the set_append_rel_pathlist() phase, we will bubble up + * the indexes of partitioned relations that appear down in the tree, so + * that when we've created Paths for all the children, the root + * partitioned table's list will contain all such indexes. + */ + if (rte->relkind == RELKIND_PARTITIONED_TABLE) + rel->partitioned_child_rels = list_make1_int(rti); + + /* + * If the partitioned relation has any baserestrictinfo quals then we + * attempt to use these quals to prune away partitions that cannot + * possibly contain any tuples matching these quals. In this case we'll + * store the relids of all partitions which could possibly contain a + * matching tuple, and skip anything else in the loop below. + */ + if (enable_partition_pruning && + rte->relkind == RELKIND_PARTITIONED_TABLE && + rel->baserestrictinfo != NIL) + { + live_children = prune_append_rel_partitions(rel); + did_pruning = true; + } + + /* + * If this is a partitioned baserel, set the consider_partitionwise_join + * flag; currently, we only consider partitionwise joins with the baserel + * if its targetlist doesn't contain a whole-row Var. + */ + if (enable_partitionwise_join && + rel->reloptkind == RELOPT_BASEREL && + rte->relkind == RELKIND_PARTITIONED_TABLE && + rel->attr_needed[InvalidAttrNumber - rel->min_attr] == NULL) + rel->consider_partitionwise_join = true; + /* * Initialize to compute size estimates for whole append relation. * @@ -917,11 +1017,42 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL); /* - * We have to copy the parent's targetlist and quals to the child, - * with appropriate substitution of variables. However, only the - * baserestrictinfo quals are needed before we can check for - * constraint exclusion; so do that first and then check to see if we - * can disregard this child. + * Copy/Modify targetlist. Even if this child is deemed empty, we need + * its targetlist in case it falls on nullable side in a child-join + * because of partitionwise join. + * + * NB: the resulting childrel->reltarget->exprs may contain arbitrary + * expressions, which otherwise would not occur in a rel's targetlist. + * Code that might be looking at an appendrel child must cope with + * such. (Normally, a rel's targetlist would only include Vars and + * PlaceHolderVars.) XXX we do not bother to update the cost or width + * fields of childrel->reltarget; not clear if that would be useful. + */ + childrel->reltarget->exprs = (List *) + adjust_appendrel_attrs(root, + (Node *) rel->reltarget->exprs, + 1, &appinfo); + + /* + * We have to make child entries in the EquivalenceClass data + * structures as well. This is needed either if the parent + * participates in some eclass joins (because we will want to consider + * inner-indexscan joins on the individual children) or if the parent + * has useful pathkeys (because we should try to build MergeAppend + * paths that produce those sort orderings). Even if this child is + * deemed dummy, it may fall on nullable side in a child-join, which + * in turn may participate in a MergeAppend, where we will need the + * EquivalenceClass data structures. + */ + if (rel->has_eclass_joins || has_useful_pathkeys(root, rel)) + add_child_rel_equivalences(root, appinfo, rel, childrel); + childrel->has_eclass_joins = rel->has_eclass_joins; + + /* + * We have to copy the parent's quals to the child, with appropriate + * substitution of variables. However, only the baserestrictinfo + * quals are needed before we can check for constraint exclusion; so + * do that first and then check to see if we can disregard this child. * * The child rel's targetlist might contain non-Var expressions, which * means that substitution into the quals could produce opportunities @@ -1038,6 +1169,13 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, continue; } + if (did_pruning && !bms_is_member(appinfo->child_relid, live_children)) + { + /* This partition was pruned; skip it. */ + set_dummy_rel_pathlist(childrel); + continue; + } + if (relation_excluded_by_constraints(root, childrel, childRTE)) { /* @@ -1048,36 +1186,11 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, continue; } - /* - * CE failed, so finish copying/modifying targetlist and join quals. - * - * NB: the resulting childrel->reltarget->exprs may contain arbitrary - * expressions, which otherwise would not occur in a rel's targetlist. - * Code that might be looking at an appendrel child must cope with - * such. (Normally, a rel's targetlist would only include Vars and - * PlaceHolderVars.) XXX we do not bother to update the cost or width - * fields of childrel->reltarget; not clear if that would be useful. - */ + /* CE failed, so finish copying/modifying join quals. */ childrel->joininfo = (List *) adjust_appendrel_attrs(root, (Node *) rel->joininfo, 1, &appinfo); - childrel->reltarget->exprs = (List *) - adjust_appendrel_attrs(root, - (Node *) rel->reltarget->exprs, - 1, &appinfo); - - /* - * We have to make child entries in the EquivalenceClass data - * structures as well. This is needed either if the parent - * participates in some eclass joins (because we will want to consider - * inner-indexscan joins on the individual children) or if the parent - * has useful pathkeys (because we should try to build MergeAppend - * paths that produce those sort orderings). - */ - if (rel->has_eclass_joins || has_useful_pathkeys(root, rel)) - add_child_rel_equivalences(root, appinfo, rel, childrel); - childrel->has_eclass_joins = rel->has_eclass_joins; /* * Note: we could compute appropriate attr_needed data for the child's @@ -1087,6 +1200,14 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, * otherrels. So we just leave the child's attr_needed empty. */ + /* + * If we consider partitionwise joins with the parent rel, do the same + * for partitioned child rels. + */ + if (rel->consider_partitionwise_join && + childRTE->relkind == RELKIND_PARTITIONED_TABLE) + childrel->consider_partitionwise_join = true; + /* * If parallelism is allowable for this query in general, see whether * it's allowable for this childrel in particular. But if we've @@ -1186,6 +1307,11 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, * because some places assume rel->tuples is valid for any baserel. */ rel->tuples = parent_rows; + + /* + * Note that we leave rel->pages as zero; this is important to avoid + * double-counting the appendrel tree in total_table_pages. + */ } else { @@ -1252,20 +1378,26 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, if (IS_DUMMY_REL(childrel)) continue; + /* Bubble up childrel's partitioned children. */ + if (rel->part_scheme) + rel->partitioned_child_rels = + list_concat(rel->partitioned_child_rels, + list_copy(childrel->partitioned_child_rels)); + /* * Child is live, so add it to the live_childrels list for use below. */ live_childrels = lappend(live_childrels, childrel); } - /* Add paths to the "append" relation. */ + /* Add paths to the append relation. */ add_paths_to_append_rel(root, rel, live_childrels); } /* * add_paths_to_append_rel - * Generate paths for given "append" relation given the set of non-dummy + * Generate paths for the given append relation given the set of non-dummy * child rels. * * The function collects all parameterizations and orderings supported by the @@ -1274,25 +1406,71 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, * parameterization or ordering. Similarly it collects partial paths from * non-dummy children to create partial append paths. */ -static void +void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels) { List *subpaths = NIL; bool subpaths_valid = true; List *partial_subpaths = NIL; + List *pa_partial_subpaths = NIL; + List *pa_nonpartial_subpaths = NIL; bool partial_subpaths_valid = true; + bool pa_subpaths_valid; List *all_child_pathkeys = NIL; List *all_child_outers = NIL; ListCell *l; List *partitioned_rels = NIL; - RangeTblEntry *rte; + double partial_rows = -1; + + /* If appropriate, consider parallel append */ + pa_subpaths_valid = enable_parallel_append && rel->consider_parallel; + + /* + * AppendPath generated for partitioned tables must record the RT indexes + * of partitioned tables that are direct or indirect children of this + * Append rel. + * + * AppendPath may be for a sub-query RTE (UNION ALL), in which case, 'rel' + * itself does not represent a partitioned relation, but the child sub- + * queries may contain references to partitioned relations. The loop + * below will look for such children and collect them in a list to be + * passed to the path creation function. (This assumes that we don't need + * to look through multiple levels of subquery RTEs; if we ever do, we + * could consider stuffing the list we generate here into sub-query RTE's + * RelOptInfo, just like we do for partitioned rels, which would be used + * when populating our parent rel with paths. For the present, that + * appears to be unnecessary.) + */ + if (rel->part_scheme != NULL) + { + if (IS_SIMPLE_REL(rel)) + partitioned_rels = list_make1(rel->partitioned_child_rels); + else if (IS_JOIN_REL(rel)) + { + int relid = -1; + List *partrels = NIL; + + /* + * For a partitioned joinrel, concatenate the component rels' + * partitioned_child_rels lists. + */ + while ((relid = bms_next_member(rel->relids, relid)) >= 0) + { + RelOptInfo *component; + + Assert(relid >= 1 && relid < root->simple_rel_array_size); + component = root->simple_rel_array[relid]; + Assert(component->part_scheme != NULL); + Assert(list_length(component->partitioned_child_rels) >= 1); + partrels = + list_concat(partrels, + list_copy(component->partitioned_child_rels)); + } + + partitioned_rels = list_make1(partrels); + } - rte = planner_rt_fetch(rel->relid, root); - if (rte->relkind == RELKIND_PARTITIONED_TABLE) - { - partitioned_rels = get_partitioned_child_rels(root, rel->relid); - /* The root partitioned table is included as a child rel */ Assert(list_length(partitioned_rels) >= 1); } @@ -1305,25 +1483,89 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, { RelOptInfo *childrel = lfirst(l); ListCell *lcp; + Path *cheapest_partial_path = NULL; + + /* + * For UNION ALLs with non-empty partitioned_child_rels, accumulate + * the Lists of child relations. + */ + if (rel->rtekind == RTE_SUBQUERY && childrel->partitioned_child_rels != NIL) + partitioned_rels = lappend(partitioned_rels, + childrel->partitioned_child_rels); /* * If child has an unparameterized cheapest-total path, add that to * the unparameterized Append path we are constructing for the parent. * If not, there's no workable unparameterized path. + * + * With partitionwise aggregates, the child rel's pathlist may be + * empty, so don't assume that a path exists here. */ - if (childrel->cheapest_total_path->param_info == NULL) - subpaths = accumulate_append_subpath(subpaths, - childrel->cheapest_total_path); + if (childrel->pathlist != NIL && + childrel->cheapest_total_path->param_info == NULL) + accumulate_append_subpath(childrel->cheapest_total_path, + &subpaths, NULL); else subpaths_valid = false; /* Same idea, but for a partial plan. */ if (childrel->partial_pathlist != NIL) - partial_subpaths = accumulate_append_subpath(partial_subpaths, - linitial(childrel->partial_pathlist)); + { + cheapest_partial_path = linitial(childrel->partial_pathlist); + accumulate_append_subpath(cheapest_partial_path, + &partial_subpaths, NULL); + } else partial_subpaths_valid = false; + /* + * Same idea, but for a parallel append mixing partial and non-partial + * paths. + */ + if (pa_subpaths_valid) + { + Path *nppath = NULL; + + nppath = + get_cheapest_parallel_safe_total_inner(childrel->pathlist); + + if (cheapest_partial_path == NULL && nppath == NULL) + { + /* Neither a partial nor a parallel-safe path? Forget it. */ + pa_subpaths_valid = false; + } + else if (nppath == NULL || + (cheapest_partial_path != NULL && + cheapest_partial_path->total_cost < nppath->total_cost)) + { + /* Partial path is cheaper or the only option. */ + Assert(cheapest_partial_path != NULL); + accumulate_append_subpath(cheapest_partial_path, + &pa_partial_subpaths, + &pa_nonpartial_subpaths); + + } + else + { + /* + * Either we've got only a non-partial path, or we think that + * a single backend can execute the best non-partial path + * faster than all the parallel backends working together can + * execute the best partial path. + * + * It might make sense to be more aggressive here. Even if + * the best non-partial path is more expensive than the best + * partial path, it could still be better to choose the + * non-partial path if there are several such paths that can + * be given to different workers. For now, we don't try to + * figure that out. + */ + accumulate_append_subpath(nppath, + &pa_nonpartial_subpaths, + NULL); + } + } + /* * Collect lists of all the available path orderings and * parameterizations for all the children. We use these as a @@ -1395,11 +1637,13 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, * if we have zero or one live subpath due to constraint exclusion.) */ if (subpaths_valid) - add_path(rel, (Path *) create_append_path(rel, subpaths, NULL, 0, - partitioned_rels)); + add_path(rel, (Path *) create_append_path(root, rel, subpaths, NIL, + NULL, 0, false, + partitioned_rels, -1)); /* - * Consider an append of partial unordered, unparameterized partial paths. + * Consider an append of unordered, unparameterized partial paths. Make + * it parallel-aware if possible. */ if (partial_subpaths_valid) { @@ -1407,12 +1651,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, ListCell *lc; int parallel_workers = 0; - /* - * Decide on the number of workers to request for this append path. - * For now, we just use the maximum value from among the members. It - * might be useful to use a higher number if the Append node were - * smart enough to spread out the workers, but it currently isn't. - */ + /* Find the highest number of workers requested for any subpath. */ foreach(lc, partial_subpaths) { Path *path = lfirst(lc); @@ -1421,9 +1660,78 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, } Assert(parallel_workers > 0); + /* + * If the use of parallel append is permitted, always request at least + * log2(# of children) workers. We assume it can be useful to have + * extra workers in this case because they will be spread out across + * the children. The precise formula is just a guess, but we don't + * want to end up with a radically different answer for a table with N + * partitions vs. an unpartitioned table with the same data, so the + * use of some kind of log-scaling here seems to make some sense. + */ + if (enable_parallel_append) + { + parallel_workers = Max(parallel_workers, + fls(list_length(live_childrels))); + parallel_workers = Min(parallel_workers, + max_parallel_workers_per_gather); + } + Assert(parallel_workers > 0); + /* Generate a partial append path. */ - appendpath = create_append_path(rel, partial_subpaths, NULL, - parallel_workers, partitioned_rels); + appendpath = create_append_path(root, rel, NIL, partial_subpaths, + NULL, parallel_workers, + enable_parallel_append, + partitioned_rels, -1); + + /* + * Make sure any subsequent partial paths use the same row count + * estimate. + */ + partial_rows = appendpath->path.rows; + + /* Add the path. */ + add_partial_path(rel, (Path *) appendpath); + } + + /* + * Consider a parallel-aware append using a mix of partial and non-partial + * paths. (This only makes sense if there's at least one child which has + * a non-partial path that is substantially cheaper than any partial path; + * otherwise, we should use the append path added in the previous step.) + */ + if (pa_subpaths_valid && pa_nonpartial_subpaths != NIL) + { + AppendPath *appendpath; + ListCell *lc; + int parallel_workers = 0; + + /* + * Find the highest number of workers requested for any partial + * subpath. + */ + foreach(lc, pa_partial_subpaths) + { + Path *path = lfirst(lc); + + parallel_workers = Max(parallel_workers, path->parallel_workers); + } + + /* + * Same formula here as above. It's even more important in this + * instance because the non-partial paths won't contribute anything to + * the planned number of parallel workers. + */ + parallel_workers = Max(parallel_workers, + fls(list_length(live_childrels))); + parallel_workers = Min(parallel_workers, + max_parallel_workers_per_gather); + Assert(parallel_workers > 0); + + appendpath = create_append_path(root, rel, pa_nonpartial_subpaths, + pa_partial_subpaths, + NULL, parallel_workers, true, + partitioned_rels, partial_rows); add_partial_path(rel, (Path *) appendpath); } @@ -1462,6 +1770,13 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr); Path *subpath; + if (childrel->pathlist == NIL) + { + /* failed to make a suitable path for this child */ + subpaths_valid = false; + break; + } + subpath = get_cheapest_parameterized_child_path(root, childrel, required_outer); @@ -1471,13 +1786,14 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, subpaths_valid = false; break; } - subpaths = accumulate_append_subpath(subpaths, subpath); + accumulate_append_subpath(subpath, &subpaths, NULL); } if (subpaths_valid) add_path(rel, (Path *) - create_append_path(rel, subpaths, required_outer, 0, - partitioned_rels)); + create_append_path(root, rel, subpaths, NIL, + required_outer, 0, false, + partitioned_rels, -1)); } } @@ -1561,10 +1877,10 @@ generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel, if (cheapest_startup != cheapest_total) startup_neq_total = true; - startup_subpaths = - accumulate_append_subpath(startup_subpaths, cheapest_startup); - total_subpaths = - accumulate_append_subpath(total_subpaths, cheapest_total); + accumulate_append_subpath(cheapest_startup, + &startup_subpaths, NULL); + accumulate_append_subpath(cheapest_total, + &total_subpaths, NULL); } /* ... and build the MergeAppend paths */ @@ -1660,7 +1976,7 @@ get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, /* * accumulate_append_subpath - * Add a subpath to the list being built for an Append or MergeAppend + * Add a subpath to the list being built for an Append or MergeAppend. * * It's possible that the child is itself an Append or MergeAppend path, in * which case we can "cut out the middleman" and just add its child paths to @@ -1671,26 +1987,54 @@ get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, * omitting a sort step, which seems fine: if the parent is to be an Append, * its result would be unsorted anyway, while if the parent is to be a * MergeAppend, there's no point in a separate sort on a child. + * its result would be unsorted anyway. + * + * Normally, either path is a partial path and subpaths is a list of partial + * paths, or else path is a non-partial plan and subpaths is a list of those. + * However, if path is a parallel-aware Append, then we add its partial path + * children to subpaths and the rest to special_subpaths. If the latter is + * NULL, we don't flatten the path at all (unless it contains only partial + * paths). */ -static List * -accumulate_append_subpath(List *subpaths, Path *path) +static void +accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths) { if (IsA(path, AppendPath)) { AppendPath *apath = (AppendPath *) path; - /* list_copy is important here to avoid sharing list substructure */ - return list_concat(subpaths, list_copy(apath->subpaths)); + if (!apath->path.parallel_aware || apath->first_partial_path == 0) + { + /* list_copy is important here to avoid sharing list substructure */ + *subpaths = list_concat(*subpaths, list_copy(apath->subpaths)); + return; + } + else if (special_subpaths != NULL) + { + List *new_special_subpaths; + + /* Split Parallel Append into partial and non-partial subpaths */ + *subpaths = list_concat(*subpaths, + list_copy_tail(apath->subpaths, + apath->first_partial_path)); + new_special_subpaths = + list_truncate(list_copy(apath->subpaths), + apath->first_partial_path); + *special_subpaths = list_concat(*special_subpaths, + new_special_subpaths); + return; + } } else if (IsA(path, MergeAppendPath)) { MergeAppendPath *mpath = (MergeAppendPath *) path; /* list_copy is important here to avoid sharing list substructure */ - return list_concat(subpaths, list_copy(mpath->subpaths)); + *subpaths = list_concat(*subpaths, list_copy(mpath->subpaths)); + return; } - else - return lappend(subpaths, path); + + *subpaths = lappend(*subpaths, path); } /* @@ -1713,7 +2057,8 @@ set_dummy_rel_pathlist(RelOptInfo *rel) rel->pathlist = NIL; rel->partial_pathlist = NIL; - add_path(rel, (Path *) create_append_path(rel, NIL, NULL, 0, NIL)); + add_path(rel, (Path *) create_append_path(NULL, rel, NIL, NIL, NULL, + 0, false, NIL, -1)); /* * We set the cheapest path immediately, to ensure that IS_DUMMY_REL() @@ -1788,7 +2133,7 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel, * Zero out result area for subquery_is_pushdown_safe, so that it can set * flags as needed while recursing. In particular, we need a workspace * for keeping track of unsafe-to-reference columns. unsafeColumns[i] - * will be set TRUE if we find that output column i of the subquery is + * will be set true if we find that output column i of the subquery is * unsafe to use in a pushed-down qual. */ memset(&safetyInfo, 0, sizeof(safetyInfo)); @@ -1929,6 +2274,33 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel, create_subqueryscan_path(root, rel, subpath, pathkeys, required_outer)); } + + /* If outer rel allows parallelism, do same for partial paths. */ + if (rel->consider_parallel && bms_is_empty(required_outer)) + { + /* If consider_parallel is false, there should be no partial paths. */ + Assert(sub_final_rel->consider_parallel || + sub_final_rel->partial_pathlist == NIL); + + /* Same for partial paths. */ + foreach(lc, sub_final_rel->partial_pathlist) + { + Path *subpath = (Path *) lfirst(lc); + List *pathkeys; + + /* Convert subpath's pathkeys to outer representation */ + pathkeys = convert_subquery_pathkeys(root, + rel, + subpath->pathkeys, + make_tlist_from_pathtarget(subpath->pathtarget)); + + /* Generate outer path using this subpath */ + add_partial_path(rel, (Path *) + create_subqueryscan_path(root, rel, subpath, + pathkeys, + required_outer)); + } + } } /* @@ -2194,27 +2566,42 @@ set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) * This must not be called until after we're done creating all partial paths * for the specified relation. (Otherwise, add_partial_path might delete a * path that some GatherPath or GatherMergePath has a reference to.) + * + * If we're generating paths for a scan or join relation, override_rows will + * be false, and we'll just use the relation's size estimate. When we're + * being called for a partially-grouped path, though, we need to override + * the rowcount estimate. (It's not clear that the particular value we're + * using here is actually best, but the underlying rel has no estimate so + * we must do something.) */ void -generate_gather_paths(PlannerInfo *root, RelOptInfo *rel) +generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows) { Path *cheapest_partial_path; Path *simple_gather_path; ListCell *lc; + double rows; + double *rowsp = NULL; /* If there are no partial paths, there's nothing to do here. */ if (rel->partial_pathlist == NIL) return; + /* Should we override the rel's rowcount estimate? */ + if (override_rows) + rowsp = &rows; + /* * The output of Gather is always unsorted, so there's only one partial * path of interest: the cheapest one. That will be the one at the front * of partial_pathlist because of the way add_partial_path works. */ cheapest_partial_path = linitial(rel->partial_pathlist); + rows = + cheapest_partial_path->rows * cheapest_partial_path->parallel_workers; simple_gather_path = (Path *) create_gather_path(root, rel, cheapest_partial_path, rel->reltarget, - NULL, NULL); + NULL, rowsp); add_path(rel, simple_gather_path); /* @@ -2229,8 +2616,9 @@ generate_gather_paths(PlannerInfo *root, RelOptInfo *rel) if (subpath->pathkeys == NIL) continue; + rows = subpath->rows * subpath->parallel_workers; path = create_gather_merge_path(root, rel, subpath, rel->reltarget, - subpath->pathkeys, NULL, NULL); + subpath->pathkeys, NULL, rowsp); add_path(rel, &path->path); } } @@ -2386,18 +2774,28 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels) join_search_one_level(root, lev); /* - * Run generate_gather_paths() for each just-processed joinrel. We - * could not do this earlier because both regular and partial paths - * can get added to a particular joinrel at multiple times within - * join_search_one_level. After that, we're done creating paths for - * the joinrel, so run set_cheapest(). + * Run generate_partitionwise_join_paths() and generate_gather_paths() + * for each just-processed joinrel. We could not do this earlier + * because both regular and partial paths can get added to a + * particular joinrel at multiple times within join_search_one_level. + * + * After that, we're done creating paths for the joinrel, so run + * set_cheapest(). */ foreach(lc, root->join_rel_level[lev]) { rel = (RelOptInfo *) lfirst(lc); - /* Create GatherPaths for any useful partial paths for rel */ - generate_gather_paths(root, rel); + /* Create paths for partitionwise joins. */ + generate_partitionwise_join_paths(root, rel); + + /* + * Except for the topmost scan/join rel, consider gathering + * partial paths. We'll do the same for the topmost scan/join rel + * once we know the final targetlist (see grouping_planner). + */ + if (lev < levels_needed) + generate_gather_paths(root, rel, false); /* Find and save the cheapest paths for this rel */ set_cheapest(rel); @@ -2464,7 +2862,7 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels) * In addition, we make several checks on the subquery's output columns to see * if it is safe to reference them in pushed-down quals. If output column k * is found to be unsafe to reference, we set safetyInfo->unsafeColumns[k] - * to TRUE, but we don't reject the subquery overall since column k might not + * to true, but we don't reject the subquery overall since column k might not * be referenced by some/all quals. The unsafeColumns[] array will be * consulted later by qual_is_pushdown_safe(). It's better to do it this way * than to make the checks directly in qual_is_pushdown_safe(), because when @@ -2586,7 +2984,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery, * * There are several cases in which it's unsafe to push down an upper-level * qual if it references a particular output column of a subquery. We check - * each output column of the subquery and set unsafeColumns[k] to TRUE if + * each output column of the subquery and set unsafeColumns[k] to true if * that column is unsafe for a pushed-down qual to reference. The conditions * checked here are: * @@ -2738,9 +3136,11 @@ targetIsInAllPartitionLists(TargetEntry *tle, Query *query) * * Conditions checked here: * - * 1. The qual must not contain any subselects (mainly because I'm not sure - * it will work correctly: sublinks will already have been transformed into - * subplans in the qual, but not in the subquery). + * 1. The qual must not contain any SubPlans (mainly because I'm not sure + * it will work correctly: SubLinks will already have been transformed into + * SubPlans in the qual, but not in the subquery). Note that SubLinks that + * transform to initplans are safe, and will be accepted here because what + * we'll see in the qual is just a Param referencing the initplan output. * * 2. If unsafeVolatile is set, the qual must not contain any volatile * functions. @@ -3042,7 +3442,8 @@ create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel, pages_fetched = compute_bitmap_pages(root, rel, bitmapqual, 1.0, NULL, NULL); - parallel_workers = compute_parallel_worker(rel, pages_fetched, -1); + parallel_workers = compute_parallel_worker(rel, pages_fetched, -1, + max_parallel_workers_per_gather); if (parallel_workers <= 0) return; @@ -3062,9 +3463,13 @@ create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel, * * "index_pages" is the number of pages from the index that we expect to scan, or * -1 if we don't expect to scan any. + * + * "max_workers" is caller's limit on the number of workers. This typically + * comes from a GUC. */ int -compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages) +compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, + int max_workers) { int parallel_workers = 0; @@ -3135,14 +3540,81 @@ compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages) } } - /* - * In no case use more than max_parallel_workers_per_gather workers. - */ - parallel_workers = Min(parallel_workers, max_parallel_workers_per_gather); + /* In no case use more than caller supplied maximum number of workers */ + parallel_workers = Min(parallel_workers, max_workers); return parallel_workers; } +/* + * generate_partitionwise_join_paths + * Create paths representing partitionwise join for given partitioned + * join relation. + * + * This must not be called until after we are done adding paths for all + * child-joins. Otherwise, add_path might delete a path to which some path + * generated here has a reference. + */ +void +generate_partitionwise_join_paths(PlannerInfo *root, RelOptInfo *rel) +{ + List *live_children = NIL; + int cnt_parts; + int num_parts; + RelOptInfo **part_rels; + + /* Handle only join relations here. */ + if (!IS_JOIN_REL(rel)) + return; + + /* We've nothing to do if the relation is not partitioned. */ + if (!IS_PARTITIONED_REL(rel)) + return; + + /* The relation should have consider_partitionwise_join set. */ + Assert(rel->consider_partitionwise_join); + + /* Guard against stack overflow due to overly deep partition hierarchy. */ + check_stack_depth(); + + num_parts = rel->nparts; + part_rels = rel->part_rels; + + /* Collect non-dummy child-joins. */ + for (cnt_parts = 0; cnt_parts < num_parts; cnt_parts++) + { + RelOptInfo *child_rel = part_rels[cnt_parts]; + + Assert(child_rel != NULL); + + /* Add partitionwise join paths for partitioned child-joins. */ + generate_partitionwise_join_paths(root, child_rel); + + /* Dummy children will not be scanned, so ignore those. */ + if (IS_DUMMY_REL(child_rel)) + continue; + + set_cheapest(child_rel); + +#ifdef OPTIMIZER_DEBUG + debug_print_rel(root, child_rel); +#endif + + live_children = lappend(live_children, child_rel); + } + + /* If all child-joins are dummy, parent join is also dummy. */ + if (!live_children) + { + mark_dummy_rel(rel); + return; + } + + /* Build additional paths for this rel from child-join paths. */ + add_paths_to_append_rel(root, rel, live_children); + list_free(live_children); +} + /***************************************************************************** * DEBUG SUPPORT @@ -3248,6 +3720,21 @@ print_path(PlannerInfo *root, Path *path, int indent) case T_ForeignPath: ptype = "ForeignScan"; break; + case T_CustomPath: + ptype = "CustomScan"; + break; + case T_NestPath: + ptype = "NestLoop"; + join = true; + break; + case T_MergePath: + ptype = "MergeJoin"; + join = true; + break; + case T_HashPath: + ptype = "HashJoin"; + join = true; + break; case T_AppendPath: ptype = "Append"; break; @@ -3269,6 +3756,10 @@ print_path(PlannerInfo *root, Path *path, int indent) ptype = "Gather"; subpath = ((GatherPath *) path)->subpath; break; + case T_GatherMergePath: + ptype = "GatherMerge"; + subpath = ((GatherMergePath *) path)->subpath; + break; case T_ProjectionPath: ptype = "Projection"; subpath = ((ProjectionPath *) path)->subpath; @@ -3322,18 +3813,6 @@ print_path(PlannerInfo *root, Path *path, int indent) ptype = "Limit"; subpath = ((LimitPath *) path)->subpath; break; - case T_NestPath: - ptype = "NestLoop"; - join = true; - break; - case T_MergePath: - ptype = "MergeJoin"; - join = true; - break; - case T_HashPath: - ptype = "HashJoin"; - join = true; - break; default: ptype = "???Path"; break; diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c index 9d340255c3..f4717942c3 100644 --- a/src/backend/optimizer/path/clausesel.c +++ b/src/backend/optimizer/path/clausesel.c @@ -3,7 +3,7 @@ * clausesel.c * Routines to compute clause selectivities * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -71,7 +71,7 @@ static RelOptInfo *find_single_rel_for_clauses(PlannerInfo *root, * * We also recognize "range queries", such as "x > 34 AND x < 42". Clauses * are recognized as possible range query components if they are restriction - * opclauses whose operators have scalarltsel() or scalargtsel() as their + * opclauses whose operators have scalarltsel or a related function as their * restriction selectivity estimator. We pair up clauses of this form that * refer to the same variable. An unpairable clause of this kind is simply * multiplied into the selectivity product in the normal way. But when we @@ -92,8 +92,8 @@ static RelOptInfo *find_single_rel_for_clauses(PlannerInfo *root, * A free side-effect is that we can recognize redundant inequalities such * as "x < 4 AND x < 5"; only the tighter constraint will be counted. * - * Of course this is all very dependent on the behavior of - * scalarltsel/scalargtsel; perhaps some day we can generalize the approach. + * Of course this is all very dependent on the behavior of the inequality + * selectivity functions; perhaps some day we can generalize the approach. */ Selectivity clauselist_selectivity(PlannerInfo *root, @@ -218,17 +218,19 @@ clauselist_selectivity(PlannerInfo *root, if (ok) { /* - * If it's not a "<" or ">" operator, just merge the + * If it's not a "<"/"<="/">"/">=" operator, just merge the * selectivity in generically. But if it's the right oprrest, * add the clause to rqlist for later processing. */ switch (get_oprrest(expr->opno)) { case F_SCALARLTSEL: + case F_SCALARLESEL: addRangeClause(&rqlist, clause, varonleft, true, s2); break; case F_SCALARGTSEL: + case F_SCALARGESEL: addRangeClause(&rqlist, clause, varonleft, false, s2); break; @@ -368,7 +370,7 @@ addRangeClause(RangeQueryClause **rqlist, Node *clause, /*------ * We have found two similar clauses, such as - * x < y AND x < z. + * x < y AND x <= z. * Keep only the more restrictive one. *------ */ @@ -388,7 +390,7 @@ addRangeClause(RangeQueryClause **rqlist, Node *clause, /*------ * We have found two similar clauses, such as - * x > y AND x > z. + * x > y AND x >= z. * Keep only the more restrictive one. *------ */ diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 051a8544b0..7bf67a0529 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -60,7 +60,7 @@ * values. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -71,9 +71,6 @@ #include "postgres.h" -#ifdef _MSC_VER -#include /* for _isnan */ -#endif #include #include "access/amapi.h" @@ -100,6 +97,13 @@ #define LOG2(x) (log(x) / 0.693147180559945) +/* + * Append and MergeAppend nodes are less expensive than some other operations + * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the + * per-tuple cost as cpu_tuple_cost multiplied by this value. + */ +#define APPEND_CPU_COST_MULTIPLIER 0.5 + double seq_page_cost = DEFAULT_SEQ_PAGE_COST; double random_page_cost = DEFAULT_RANDOM_PAGE_COST; @@ -127,6 +131,11 @@ bool enable_material = true; bool enable_mergejoin = true; bool enable_hashjoin = true; bool enable_gathermerge = true; +bool enable_partitionwise_join = false; +bool enable_partitionwise_aggregate = false; +bool enable_parallel_append = true; +bool enable_parallel_hash = true; +bool enable_partition_pruning = true; typedef struct { @@ -148,6 +157,7 @@ static bool has_indexed_join_quals(NestPath *joinpath); static double approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals); static double calc_joinrel_size_estimate(PlannerInfo *root, + RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, double outer_rows, @@ -159,6 +169,8 @@ static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root, Relids inner_relids, SpecialJoinInfo *sjinfo, List **restrictlist); +static Cost append_nonpartial_cost(List *subpaths, int numpaths, + int parallel_workers); static void set_rel_width(PlannerInfo *root, RelOptInfo *rel); static double relation_byte_size(double tuples, int width); static double page_size(double tuples, int width); @@ -677,7 +689,9 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count, * order. */ path->path.parallel_workers = compute_parallel_worker(baserel, - rand_heap_pages, index_pages); + rand_heap_pages, + index_pages, + max_parallel_workers_per_gather); /* * Fall out if workers can't be assigned for parallel scan, because in @@ -1740,6 +1754,178 @@ cost_sort(Path *path, PlannerInfo *root, path->total_cost = startup_cost + run_cost; } +/* + * append_nonpartial_cost + * Estimate the cost of the non-partial paths in a Parallel Append. + * The non-partial paths are assumed to be the first "numpaths" paths + * from the subpaths list, and to be in order of decreasing cost. + */ +static Cost +append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers) +{ + Cost *costarr; + int arrlen; + ListCell *l; + ListCell *cell; + int i; + int path_index; + int min_index; + int max_index; + + if (numpaths == 0) + return 0; + + /* + * Array length is number of workers or number of relevants paths, + * whichever is less. + */ + arrlen = Min(parallel_workers, numpaths); + costarr = (Cost *) palloc(sizeof(Cost) * arrlen); + + /* The first few paths will each be claimed by a different worker. */ + path_index = 0; + foreach(cell, subpaths) + { + Path *subpath = (Path *) lfirst(cell); + + if (path_index == arrlen) + break; + costarr[path_index++] = subpath->total_cost; + } + + /* + * Since subpaths are sorted by decreasing cost, the last one will have + * the minimum cost. + */ + min_index = arrlen - 1; + + /* + * For each of the remaining subpaths, add its cost to the array element + * with minimum cost. + */ + for_each_cell(l, cell) + { + Path *subpath = (Path *) lfirst(l); + int i; + + /* Consider only the non-partial paths */ + if (path_index++ == numpaths) + break; + + costarr[min_index] += subpath->total_cost; + + /* Update the new min cost array index */ + for (min_index = i = 0; i < arrlen; i++) + { + if (costarr[i] < costarr[min_index]) + min_index = i; + } + } + + /* Return the highest cost from the array */ + for (max_index = i = 0; i < arrlen; i++) + { + if (costarr[i] > costarr[max_index]) + max_index = i; + } + + return costarr[max_index]; +} + +/* + * cost_append + * Determines and returns the cost of an Append node. + */ +void +cost_append(AppendPath *apath) +{ + ListCell *l; + + apath->path.startup_cost = 0; + apath->path.total_cost = 0; + + if (apath->subpaths == NIL) + return; + + if (!apath->path.parallel_aware) + { + Path *subpath = (Path *) linitial(apath->subpaths); + + /* + * Startup cost of non-parallel-aware Append is the startup cost of + * first subpath. + */ + apath->path.startup_cost = subpath->startup_cost; + + /* Compute rows and costs as sums of subplan rows and costs. */ + foreach(l, apath->subpaths) + { + Path *subpath = (Path *) lfirst(l); + + apath->path.rows += subpath->rows; + apath->path.total_cost += subpath->total_cost; + } + } + else /* parallel-aware */ + { + int i = 0; + double parallel_divisor = get_parallel_divisor(&apath->path); + + /* Calculate startup cost. */ + foreach(l, apath->subpaths) + { + Path *subpath = (Path *) lfirst(l); + + /* + * Append will start returning tuples when the child node having + * lowest startup cost is done setting up. We consider only the + * first few subplans that immediately get a worker assigned. + */ + if (i == 0) + apath->path.startup_cost = subpath->startup_cost; + else if (i < apath->path.parallel_workers) + apath->path.startup_cost = Min(apath->path.startup_cost, + subpath->startup_cost); + + /* + * Apply parallel divisor to subpaths. Scale the number of rows + * for each partial subpath based on the ratio of the parallel + * divisor originally used for the subpath to the one we adopted. + * Also add the cost of partial paths to the total cost, but + * ignore non-partial paths for now. + */ + if (i < apath->first_partial_path) + apath->path.rows += subpath->rows / parallel_divisor; + else + { + double subpath_parallel_divisor; + + subpath_parallel_divisor = get_parallel_divisor(subpath); + apath->path.rows += subpath->rows * (subpath_parallel_divisor / + parallel_divisor); + apath->path.total_cost += subpath->total_cost; + } + + apath->path.rows = clamp_row_est(apath->path.rows); + + i++; + } + + /* Add cost for non-partial subpaths. */ + apath->path.total_cost += + append_nonpartial_cost(apath->subpaths, + apath->first_partial_path, + apath->path.parallel_workers); + } + + /* + * Although Append does not do any selection or projection, it's not free; + * add a small per-tuple overhead. + */ + apath->path.total_cost += + cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * apath->path.rows; +} + /* * cost_merge_append * Determines and returns the cost of a MergeAppend node. @@ -1792,12 +1978,10 @@ cost_merge_append(Path *path, PlannerInfo *root, run_cost += tuples * comparison_cost * logN; /* - * Also charge a small amount (arbitrarily set equal to operator cost) per - * extracted tuple. We don't charge cpu_tuple_cost because a MergeAppend - * node doesn't do qual-checking or projection, so it has less overhead - * than most plan nodes. + * Although MergeAppend does not do any selection or projection, it's not + * free; add a small per-tuple overhead. */ - run_cost += cpu_operator_cost * tuples; + run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples; path->startup_cost = startup_cost + input_startup_cost; path->total_cost = startup_cost + run_cost + input_total_cost; @@ -1873,6 +2057,7 @@ void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, + List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples) { @@ -1954,6 +2139,26 @@ cost_agg(Path *path, PlannerInfo *root, output_tuples = numGroups; } + /* + * If there are quals (HAVING quals), account for their cost and + * selectivity. + */ + if (quals) + { + QualCost qual_cost; + + cost_qual_eval(&qual_cost, quals, root); + startup_cost += qual_cost.startup; + total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple; + + output_tuples = clamp_row_est(output_tuples * + clauselist_selectivity(root, + quals, + 0, + JOIN_INNER, + NULL)); + } + path->rows = output_tuples; path->startup_cost = startup_cost; path->total_cost = total_cost; @@ -2039,12 +2244,15 @@ cost_windowagg(Path *path, PlannerInfo *root, void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, + List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples) { + double output_tuples; Cost startup_cost; Cost total_cost; + output_tuples = numGroups; startup_cost = input_startup_cost; total_cost = input_total_cost; @@ -2054,7 +2262,27 @@ cost_group(Path *path, PlannerInfo *root, */ total_cost += cpu_operator_cost * input_tuples * numGroupCols; - path->rows = numGroups; + /* + * If there are quals (HAVING quals), account for their cost and + * selectivity. + */ + if (quals) + { + QualCost qual_cost; + + cost_qual_eval(&qual_cost, quals, root); + startup_cost += qual_cost.startup; + total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple; + + output_tuples = clamp_row_est(output_tuples * + clauselist_selectivity(root, + quals, + 0, + JOIN_INNER, + NULL)); + } + + path->rows = output_tuples; path->startup_cost = startup_cost; path->total_cost = total_cost; } @@ -2915,22 +3143,27 @@ cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey) * 'outer_path' is the outer input to the join * 'inner_path' is the inner input to the join * 'extra' contains miscellaneous information about the join + * 'parallel_hash' indicates that inner_path is partial and that a shared + * hash table will be built in parallel */ void initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace, JoinType jointype, List *hashclauses, Path *outer_path, Path *inner_path, - JoinPathExtraData *extra) + JoinPathExtraData *extra, + bool parallel_hash) { Cost startup_cost = 0; Cost run_cost = 0; double outer_path_rows = outer_path->rows; double inner_path_rows = inner_path->rows; + double inner_path_rows_total = inner_path_rows; int num_hashclauses = list_length(hashclauses); int numbuckets; int numbatches; int num_skew_mcvs; + size_t space_allowed; /* unused */ /* cost of source data */ startup_cost += outer_path->startup_cost; @@ -2951,6 +3184,15 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace, * inner_path_rows; run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows; + /* + * If this is a parallel hash build, then the value we have for + * inner_rows_total currently refers only to the rows returned by each + * participant. For shared hash table size estimation, we need the total + * number, so we need to undo the division. + */ + if (parallel_hash) + inner_path_rows_total *= get_parallel_divisor(inner_path); + /* * Get hash table size that executor would use for inner relation. * @@ -2961,9 +3203,12 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace, * XXX at some point it might be interesting to try to account for skew * optimization in the cost estimate, but for now, we don't. */ - ExecChooseHashTableSize(inner_path_rows, + ExecChooseHashTableSize(inner_path_rows_total, inner_path->pathtarget->width, true, /* useskew */ + parallel_hash, /* try_combined_work_mem */ + outer_path->parallel_workers, + &space_allowed, &numbuckets, &numbatches, &num_skew_mcvs); @@ -2995,6 +3240,7 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace, workspace->run_cost = run_cost; workspace->numbuckets = numbuckets; workspace->numbatches = numbatches; + workspace->inner_rows_total = inner_path_rows_total; } /* @@ -3017,6 +3263,7 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path, Path *inner_path = path->jpath.innerjoinpath; double outer_path_rows = outer_path->rows; double inner_path_rows = inner_path->rows; + double inner_path_rows_total = workspace->inner_rows_total; List *hashclauses = path->path_hashclauses; Cost startup_cost = workspace->startup_cost; Cost run_cost = workspace->run_cost; @@ -3057,6 +3304,9 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path, /* mark the path with estimated # of batches */ path->num_batches = numbatches; + /* store the total number of tuples (sum of partial row estimates) */ + path->inner_rows_total = inner_path_rows_total; + /* and compute the number of "virtual" buckets in the whole join */ virtualbuckets = (double) numbuckets * (double) numbatches; @@ -3204,10 +3454,10 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path, clamp_row_est(inner_path_rows / virtualbuckets) * 0.05; /* Get # of tuples that will pass the basic join */ - if (path->jpath.jointype == JOIN_SEMI) - hashjointuples = outer_matched_rows; - else + if (path->jpath.jointype == JOIN_ANTI) hashjointuples = outer_path_rows - outer_matched_rows; + else + hashjointuples = outer_matched_rows; } else { @@ -3632,11 +3882,14 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context) else if (IsA(node, ArrayCoerceExpr)) { ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node; - Node *arraynode = (Node *) acoerce->arg; - - if (OidIsValid(acoerce->elemfuncid)) - context->total.per_tuple += get_func_cost(acoerce->elemfuncid) * - cpu_operator_cost * estimate_array_length(arraynode); + QualCost perelemcost; + + cost_qual_eval_node(&perelemcost, (Node *) acoerce->elemexpr, + context->root); + context->total.startup += perelemcost.startup; + if (perelemcost.per_tuple > 0) + context->total.per_tuple += perelemcost.per_tuple * + estimate_array_length((Node *) acoerce->arg); } else if (IsA(node, RowCompareExpr)) { @@ -3768,6 +4021,7 @@ get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, * them to all the join cost estimation functions. * * Input parameters: + * joinrel: join relation under consideration * outerrel: outer relation under consideration * innerrel: inner relation under consideration * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique @@ -3778,6 +4032,7 @@ get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel, */ void compute_semi_anti_join_factors(PlannerInfo *root, + RelOptInfo *joinrel, RelOptInfo *outerrel, RelOptInfo *innerrel, JoinType jointype, @@ -3806,7 +4061,7 @@ compute_semi_anti_join_factors(PlannerInfo *root, { RestrictInfo *rinfo = lfirst_node(RestrictInfo, l); - if (!rinfo->is_pushed_down) + if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids)) joinquals = lappend(joinquals, rinfo); } } @@ -4121,6 +4376,7 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel, List *restrictlist) { rel->rows = calc_joinrel_size_estimate(root, + rel, outer_rel, inner_rel, outer_rel->rows, @@ -4163,6 +4419,7 @@ get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel, * estimate for any pair with the same parameterization. */ nrows = calc_joinrel_size_estimate(root, + rel, outer_path->parent, inner_path->parent, outer_path->rows, @@ -4186,6 +4443,7 @@ get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel, */ static double calc_joinrel_size_estimate(PlannerInfo *root, + RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel, double outer_rows, @@ -4238,7 +4496,7 @@ calc_joinrel_size_estimate(PlannerInfo *root, { RestrictInfo *rinfo = lfirst_node(RestrictInfo, l); - if (rinfo->is_pushed_down) + if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids)) pushedquals = lappend(pushedquals, rinfo); else joinquals = lappend(joinquals, rinfo); @@ -4550,15 +4808,11 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel) { PlannerInfo *subroot = rel->subroot; RelOptInfo *sub_final_rel; - RangeTblEntry *rte PG_USED_FOR_ASSERTS_ONLY; ListCell *lc; /* Should only be applied to base relations that are subqueries */ Assert(rel->relid > 0); -#ifdef USE_ASSERT_CHECKING - rte = planner_rt_fetch(rel->relid, root); - Assert(rte->rtekind == RTE_SUBQUERY); -#endif + Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_SUBQUERY); /* * Copy raw number of output rows from subquery. All of its paths should @@ -4670,14 +4924,9 @@ set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel) void set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel) { - RangeTblEntry *rte PG_USED_FOR_ASSERTS_ONLY; - /* Should only be applied to base relations that are functions */ Assert(rel->relid > 0); -#ifdef USE_ASSERT_CHECKING - rte = planner_rt_fetch(rel->relid, root); - Assert(rte->rtekind == RTE_TABLEFUNC); -#endif + Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_TABLEFUNC); rel->tuples = 100; @@ -5098,7 +5347,6 @@ static double get_parallel_divisor(Path *path) { double parallel_divisor = path->parallel_workers; - double leader_contribution; /* * Early experience with parallel query suggests that when there is only @@ -5111,9 +5359,14 @@ get_parallel_divisor(Path *path) * its time servicing each worker, and the remainder executing the * parallel plan. */ - leader_contribution = 1.0 - (0.3 * path->parallel_workers); - if (leader_contribution > 0) - parallel_divisor += leader_contribution; + if (parallel_leader_participation) + { + double leader_contribution; + + leader_contribution = 1.0 - (0.3 * path->parallel_workers); + if (leader_contribution > 0) + parallel_divisor += leader_contribution; + } return parallel_divisor; } @@ -5132,6 +5385,8 @@ compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual, double T; double pages_fetched; double tuples_fetched; + double heap_pages; + long maxentries; /* * Fetch total cost of obtaining the bitmap, as well as its total @@ -5146,6 +5401,24 @@ compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual, T = (baserel->pages > 1) ? (double) baserel->pages : 1.0; + /* + * For a single scan, the number of heap pages that need to be fetched is + * the same as the Mackert and Lohman formula for the case T <= b (ie, no + * re-reads needed). + */ + pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched); + + /* + * Calculate the number of pages fetched from the heap. Then based on + * current work_mem estimate get the estimated maxentries in the bitmap. + * (Note that we always do this calculation based on the number of pages + * that would be fetched in a single iteration, even if loop_count > 1. + * That's correct, because only that number of entries will be stored in + * the bitmap at one time.) + */ + heap_pages = Min(pages_fetched, baserel->pages); + maxentries = tbm_calculate_entries(work_mem * 1024L); + if (loop_count > 1) { /* @@ -5160,22 +5433,41 @@ compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual, root); pages_fetched /= loop_count; } - else - { - /* - * For a single scan, the number of heap pages that need to be fetched - * is the same as the Mackert and Lohman formula for the case T <= b - * (ie, no re-reads needed). - */ - pages_fetched = - (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched); - } if (pages_fetched >= T) pages_fetched = T; else pages_fetched = ceil(pages_fetched); + if (maxentries < heap_pages) + { + double exact_pages; + double lossy_pages; + + /* + * Crude approximation of the number of lossy pages. Because of the + * way tbm_lossify() is coded, the number of lossy pages increases + * very sharply as soon as we run short of memory; this formula has + * that property and seems to perform adequately in testing, but it's + * possible we could do better somehow. + */ + lossy_pages = Max(0, heap_pages - maxentries / 2); + exact_pages = heap_pages - lossy_pages; + + /* + * If there are lossy pages then recompute the number of tuples + * processed by the bitmap heap node. We assume here that the chance + * of a given tuple coming from an exact page is the same as the + * chance that a given page is exact. This might not be true, but + * it's not clear how we can do any better. + */ + if (lossy_pages > 0) + tuples_fetched = + clamp_row_est(indexSelectivity * + (exact_pages / heap_pages) * baserel->tuples + + (lossy_pages / heap_pages) * baserel->tuples); + } + if (cost) *cost = indexTotalCost; if (tuple) diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c index 7997f50c18..b22b36ec0e 100644 --- a/src/backend/optimizer/path/equivclass.c +++ b/src/backend/optimizer/path/equivclass.c @@ -6,7 +6,7 @@ * See src/backend/optimizer/README for discussion of EquivalenceClasses. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -27,6 +27,7 @@ #include "optimizer/paths.h" #include "optimizer/planmain.h" #include "optimizer/prep.h" +#include "optimizer/restrictinfo.h" #include "optimizer/var.h" #include "utils/lsyscache.h" @@ -71,8 +72,14 @@ static bool reconsider_full_join_clause(PlannerInfo *root, * any delay by an outer join, so its two sides can be considered equal * anywhere they are both computable; moreover that equality can be * extended transitively. Record this knowledge in the EquivalenceClass - * data structure. Returns TRUE if successful, FALSE if not (in which - * case caller should treat the clause as ordinary, not an equivalence). + * data structure, if applicable. Returns true if successful, false if not + * (in which case caller should treat the clause as ordinary, not an + * equivalence). + * + * In some cases, although we cannot convert a clause into EquivalenceClass + * knowledge, we can still modify it to a more useful form than the original. + * Then, *p_restrictinfo will be replaced by a new RestrictInfo, which is what + * the caller should use for further processing. * * If below_outer_join is true, then the clause was found below the nullable * side of an outer join, so its sides might validly be both NULL rather than @@ -104,9 +111,11 @@ static bool reconsider_full_join_clause(PlannerInfo *root, * memory context. */ bool -process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo, +process_equivalence(PlannerInfo *root, + RestrictInfo **p_restrictinfo, bool below_outer_join) { + RestrictInfo *restrictinfo = *p_restrictinfo; Expr *clause = restrictinfo->clause; Oid opno, collation, @@ -154,16 +163,45 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo, collation); /* - * Reject clauses of the form X=X. These are not as redundant as they - * might seem at first glance: assuming the operator is strict, this is - * really an expensive way to write X IS NOT NULL. So we must not risk - * just losing the clause, which would be possible if there is already a - * single-element EquivalenceClass containing X. The case is not common - * enough to be worth contorting the EC machinery for, so just reject the - * clause and let it be processed as a normal restriction clause. + * Clauses of the form X=X cannot be translated into EquivalenceClasses. + * We'd either end up with a single-entry EC, losing the knowledge that + * the clause was present at all, or else make an EC with duplicate + * entries, causing other issues. */ if (equal(item1, item2)) - return false; /* X=X is not a useful equivalence */ + { + /* + * If the operator is strict, then the clause can be treated as just + * "X IS NOT NULL". (Since we know we are considering a top-level + * qual, we can ignore the difference between FALSE and NULL results.) + * It's worth making the conversion because we'll typically get a much + * better selectivity estimate than we would for X=X. + * + * If the operator is not strict, we can't be sure what it will do + * with NULLs, so don't attempt to optimize it. + */ + set_opfuncid((OpExpr *) clause); + if (func_strict(((OpExpr *) clause)->opfuncid)) + { + NullTest *ntest = makeNode(NullTest); + + ntest->arg = item1; + ntest->nulltesttype = IS_NOT_NULL; + ntest->argisrow = false; /* correct even if composite arg */ + ntest->location = -1; + + *p_restrictinfo = + make_restrictinfo((Expr *) ntest, + restrictinfo->is_pushed_down, + restrictinfo->outerjoin_delayed, + restrictinfo->pseudoconstant, + restrictinfo->security_level, + NULL, + restrictinfo->outer_relids, + restrictinfo->nullable_relids); + } + return false; + } /* * If below outer join, check for strictness, else reject. @@ -459,8 +497,9 @@ canonicalize_ec_expression(Expr *expr, Oid req_type, Oid req_collation) /* * For a polymorphic-input-type opclass, just keep the same exposed type. + * RECORD opclasses work like polymorphic-type ones for this purpose. */ - if (IsPolymorphicType(req_type)) + if (IsPolymorphicType(req_type) || req_type == RECORDOID) req_type = expr_type; /* @@ -564,8 +603,8 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids, * so for now we live with just reporting the first match. See also * generate_implied_equalities_for_column and match_pathkeys_to_index.) * - * If create_it is TRUE, we'll build a new EquivalenceClass when there is no - * match. If create_it is FALSE, we just return NULL when no match. + * If create_it is true, we'll build a new EquivalenceClass when there is no + * match. If create_it is false, we just return NULL when no match. * * This can be used safely both before and after EquivalenceClass merging; * since it never causes merging it does not invalidate any existing ECs @@ -1637,7 +1676,7 @@ reconsider_outer_join_clauses(PlannerInfo *root) /* * reconsider_outer_join_clauses for a single LEFT/RIGHT JOIN clause * - * Returns TRUE if we were able to propagate a constant through the clause. + * Returns true if we were able to propagate a constant through the clause. */ static bool reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo, @@ -1741,7 +1780,7 @@ reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo, bms_copy(inner_relids), bms_copy(inner_nullable_relids), cur_ec->ec_min_security); - if (process_equivalence(root, newrinfo, true)) + if (process_equivalence(root, &newrinfo, true)) match = true; } @@ -1762,7 +1801,7 @@ reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo, /* * reconsider_outer_join_clauses for a single FULL JOIN clause * - * Returns TRUE if we were able to propagate a constant through the clause. + * Returns true if we were able to propagate a constant through the clause. */ static bool reconsider_full_join_clause(PlannerInfo *root, RestrictInfo *rinfo) @@ -1884,7 +1923,7 @@ reconsider_full_join_clause(PlannerInfo *root, RestrictInfo *rinfo) bms_copy(left_relids), bms_copy(left_nullable_relids), cur_ec->ec_min_security); - if (process_equivalence(root, newrinfo, true)) + if (process_equivalence(root, &newrinfo, true)) matchleft = true; } eq_op = select_equality_operator(cur_ec, @@ -1899,7 +1938,7 @@ reconsider_full_join_clause(PlannerInfo *root, RestrictInfo *rinfo) bms_copy(right_relids), bms_copy(right_nullable_relids), cur_ec->ec_min_security); - if (process_equivalence(root, newrinfo, true)) + if (process_equivalence(root, &newrinfo, true)) matchright = true; } } diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c index f35380391a..f295558f76 100644 --- a/src/backend/optimizer/path/indxpath.c +++ b/src/backend/optimizer/path/indxpath.c @@ -4,7 +4,7 @@ * Routines to determine which indexes are usable for scanning a * given relation, and create Paths accordingly. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -40,9 +40,7 @@ #include "utils/selfuncs.h" -#define IsBooleanOpfamily(opfamily) \ - ((opfamily) == BOOL_BTREE_FAM_OID || (opfamily) == BOOL_HASH_FAM_OID) - +/* XXX see PartCollMatchesExprColl */ #define IndexCollMatchesExprColl(idxcollation, exprcollation) \ ((idxcollation) == InvalidOid || (idxcollation) == (exprcollation)) @@ -838,12 +836,12 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel, * * If skip_nonnative_saop is non-NULL, we ignore ScalarArrayOpExpr clauses * unless the index AM supports them directly, and we set *skip_nonnative_saop - * to TRUE if we found any such clauses (caller must initialize the variable - * to FALSE). If it's NULL, we do not ignore ScalarArrayOpExpr clauses. + * to true if we found any such clauses (caller must initialize the variable + * to false). If it's NULL, we do not ignore ScalarArrayOpExpr clauses. * * If skip_lower_saop is non-NULL, we ignore ScalarArrayOpExpr clauses for - * non-first index columns, and we set *skip_lower_saop to TRUE if we found - * any such clauses (caller must initialize the variable to FALSE). If it's + * non-first index columns, and we set *skip_lower_saop to true if we found + * any such clauses (caller must initialize the variable to false). If it's * NULL, we do not ignore non-first ScalarArrayOpExpr clauses, but they will * result in considering the scan's output to be unordered. * @@ -1866,6 +1864,7 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index) bool result; Bitmapset *attrs_used = NULL; Bitmapset *index_canreturn_attrs = NULL; + Bitmapset *index_cannotreturn_attrs = NULL; ListCell *lc; int i; @@ -1905,7 +1904,11 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index) /* * Construct a bitmapset of columns that the index can return back in an - * index-only scan. + * index-only scan. If there are multiple index columns containing the + * same attribute, all of them must be capable of returning the value, + * since we might recheck operators on any of them. (Potentially we could + * be smarter about that, but it's such a weird situation that it doesn't + * seem worth spending a lot of sweat on.) */ for (i = 0; i < index->ncolumns; i++) { @@ -1922,13 +1925,21 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index) index_canreturn_attrs = bms_add_member(index_canreturn_attrs, attno - FirstLowInvalidHeapAttributeNumber); + else + index_cannotreturn_attrs = + bms_add_member(index_cannotreturn_attrs, + attno - FirstLowInvalidHeapAttributeNumber); } + index_canreturn_attrs = bms_del_members(index_canreturn_attrs, + index_cannotreturn_attrs); + /* Do we have all the necessary attributes? */ result = bms_is_subset(attrs_used, index_canreturn_attrs); bms_free(attrs_used); bms_free(index_canreturn_attrs); + bms_free(index_cannotreturn_attrs); return result; } @@ -2151,7 +2162,7 @@ match_eclass_clauses_to_index(PlannerInfo *root, IndexOptInfo *index, if (!index->rel->has_eclass_joins) return; - for (indexcol = 0; indexcol < index->ncolumns; indexcol++) + for (indexcol = 0; indexcol < index->nkeycolumns; indexcol++) { ec_member_matches_arg arg; List *clauses; @@ -2233,8 +2244,8 @@ match_clause_to_index(IndexOptInfo *index, if (!restriction_is_securely_promotable(rinfo, index->rel)) return; - /* OK, check each index column for a match */ - for (indexcol = 0; indexcol < index->ncolumns; indexcol++) + /* OK, check each index key column for a match */ + for (indexcol = 0; indexcol < index->nkeycolumns; indexcol++) { if (match_clause_to_indexcol(index, indexcol, @@ -2318,8 +2329,8 @@ match_clause_to_indexcol(IndexOptInfo *index, { Expr *clause = rinfo->clause; Index index_relid = index->rel->relid; - Oid opfamily = index->opfamily[indexcol]; - Oid idxcollation = index->indexcollations[indexcol]; + Oid opfamily; + Oid idxcollation; Node *leftop, *rightop; Relids left_relids; @@ -2328,6 +2339,11 @@ match_clause_to_indexcol(IndexOptInfo *index, Oid expr_coll; bool plain_op; + Assert(indexcol < index->nkeycolumns); + + opfamily = index->opfamily[indexcol]; + idxcollation = index->indexcollations[indexcol]; + /* First check for boolean-index cases. */ if (IsBooleanOpfamily(opfamily)) { @@ -2667,8 +2683,8 @@ match_clause_to_ordering_op(IndexOptInfo *index, Expr *clause, Oid pk_opfamily) { - Oid opfamily = index->opfamily[indexcol]; - Oid idxcollation = index->indexcollations[indexcol]; + Oid opfamily; + Oid idxcollation; Node *leftop, *rightop; Oid expr_op; @@ -2676,6 +2692,11 @@ match_clause_to_ordering_op(IndexOptInfo *index, Oid sortfamily; bool commuted; + Assert(indexcol < index->nkeycolumns); + + opfamily = index->opfamily[indexcol]; + idxcollation = index->indexcollations[indexcol]; + /* * Clause must be a binary opclause. */ @@ -2910,8 +2931,13 @@ ec_member_matches_indexcol(PlannerInfo *root, RelOptInfo *rel, { IndexOptInfo *index = ((ec_member_matches_arg *) arg)->index; int indexcol = ((ec_member_matches_arg *) arg)->indexcol; - Oid curFamily = index->opfamily[indexcol]; - Oid curCollation = index->indexcollations[indexcol]; + Oid curFamily; + Oid curCollation; + + Assert(indexcol < index->nkeycolumns); + + curFamily = index->opfamily[indexcol]; + curCollation = index->indexcollations[indexcol]; /* * If it's a btree index, we can reject it if its opfamily isn't @@ -3537,8 +3563,13 @@ expand_indexqual_conditions(IndexOptInfo *index, RestrictInfo *rinfo = (RestrictInfo *) lfirst(lcc); int indexcol = lfirst_int(lci); Expr *clause = rinfo->clause; - Oid curFamily = index->opfamily[indexcol]; - Oid curCollation = index->indexcollations[indexcol]; + Oid curFamily; + Oid curCollation; + + Assert(indexcol < index->nkeycolumns); + + curFamily = index->opfamily[indexcol]; + curCollation = index->indexcollations[indexcol]; /* First check for boolean cases */ if (IsBooleanOpfamily(curFamily)) @@ -3907,13 +3938,14 @@ adjust_rowcompare_for_index(RowCompareExpr *clause, /* * The Var side can match any column of the index. */ - for (i = 0; i < index->ncolumns; i++) + for (i = 0; i < index->nkeycolumns; i++) { if (match_index_to_operand(varop, i, index) && get_op_opfamily_strategy(expr_op, index->opfamily[i]) == op_strategy && IndexCollMatchesExprColl(index->indexcollations[i], lfirst_oid(collids_cell))) + break; } if (i >= index->ncolumns) diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index 43833ea9c9..642f951093 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -3,7 +3,7 @@ * joinpath.c * Routines to find all possible paths for processing a set of joins * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -26,9 +26,19 @@ /* Hook for plugins to get control in add_paths_to_joinrel() */ set_join_pathlist_hook_type set_join_pathlist_hook = NULL; -#define PATH_PARAM_BY_REL(path, rel) \ +/* + * Paths parameterized by the parent can be considered to be parameterized by + * any of its child. + */ +#define PATH_PARAM_BY_PARENT(path, rel) \ + ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), \ + (rel)->top_parent_relids)) +#define PATH_PARAM_BY_REL_SELF(path, rel) \ ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), (rel)->relids)) +#define PATH_PARAM_BY_REL(path, rel) \ + (PATH_PARAM_BY_REL_SELF(path, rel) || PATH_PARAM_BY_PARENT(path, rel)) + static void try_partial_mergejoin_path(PlannerInfo *root, RelOptInfo *joinrel, Path *outer_path, @@ -115,6 +125,19 @@ add_paths_to_joinrel(PlannerInfo *root, JoinPathExtraData extra; bool mergejoin_allowed = true; ListCell *lc; + Relids joinrelids; + + /* + * PlannerInfo doesn't contain the SpecialJoinInfos created for joins + * between child relations, even if there is a SpecialJoinInfo node for + * the join between the topmost parents. So, while calculating Relids set + * representing the restriction, consider relids of topmost parent of + * partitions. + */ + if (joinrel->reloptkind == RELOPT_OTHER_JOINREL) + joinrelids = joinrel->top_parent_relids; + else + joinrelids = joinrel->relids; extra.restrictlist = restrictlist; extra.mergeclause_list = NIL; @@ -148,6 +171,7 @@ add_paths_to_joinrel(PlannerInfo *root, break; case JOIN_UNIQUE_OUTER: extra.inner_unique = innerrel_is_unique(root, + joinrel->relids, outerrel->relids, innerrel, JOIN_INNER, @@ -156,6 +180,7 @@ add_paths_to_joinrel(PlannerInfo *root, break; default: extra.inner_unique = innerrel_is_unique(root, + joinrel->relids, outerrel->relids, innerrel, jointype, @@ -184,7 +209,7 @@ add_paths_to_joinrel(PlannerInfo *root, * for cost estimation. These will be the same for all paths. */ if (jointype == JOIN_SEMI || jointype == JOIN_ANTI || extra.inner_unique) - compute_semi_anti_join_factors(root, outerrel, innerrel, + compute_semi_anti_join_factors(root, joinrel, outerrel, innerrel, jointype, sjinfo, restrictlist, &extra.semifactors); @@ -211,16 +236,16 @@ add_paths_to_joinrel(PlannerInfo *root, * join has already been proven legal.) If the SJ is relevant, it * presents constraints for joining to anything not in its RHS. */ - if (bms_overlap(joinrel->relids, sjinfo2->min_righthand) && - !bms_overlap(joinrel->relids, sjinfo2->min_lefthand)) + if (bms_overlap(joinrelids, sjinfo2->min_righthand) && + !bms_overlap(joinrelids, sjinfo2->min_lefthand)) extra.param_source_rels = bms_join(extra.param_source_rels, bms_difference(root->all_baserels, sjinfo2->min_righthand)); /* full joins constrain both sides symmetrically */ if (sjinfo2->jointype == JOIN_FULL && - bms_overlap(joinrel->relids, sjinfo2->min_lefthand) && - !bms_overlap(joinrel->relids, sjinfo2->min_righthand)) + bms_overlap(joinrelids, sjinfo2->min_lefthand) && + !bms_overlap(joinrelids, sjinfo2->min_righthand)) extra.param_source_rels = bms_join(extra.param_source_rels, bms_difference(root->all_baserels, sjinfo2->min_lefthand)); @@ -313,7 +338,7 @@ add_paths_to_joinrel(PlannerInfo *root, * across joins unless there's a join-order-constraint-based reason to do so. * So we ignore the param_source_rels restriction when this case applies. * - * allow_star_schema_join() returns TRUE if the param_source_rels restriction + * allow_star_schema_join() returns true if the param_source_rels restriction * should be overridden, ie, it's okay to perform this join. */ static inline bool @@ -347,11 +372,25 @@ try_nestloop_path(PlannerInfo *root, JoinCostWorkspace workspace; RelOptInfo *innerrel = inner_path->parent; RelOptInfo *outerrel = outer_path->parent; - Relids innerrelids = innerrel->relids; - Relids outerrelids = outerrel->relids; + Relids innerrelids; + Relids outerrelids; Relids inner_paramrels = PATH_REQ_OUTER(inner_path); Relids outer_paramrels = PATH_REQ_OUTER(outer_path); + /* + * Paths are parameterized by top-level parents, so run parameterization + * tests on the parent relids. + */ + if (innerrel->top_parent_relids) + innerrelids = innerrel->top_parent_relids; + else + innerrelids = innerrel->relids; + + if (outerrel->top_parent_relids) + outerrelids = outerrel->top_parent_relids; + else + outerrelids = outerrel->relids; + /* * Check to see if proposed path is still parameterized, and reject if the * parameterization wouldn't be sensible --- unless allow_star_schema_join @@ -387,6 +426,27 @@ try_nestloop_path(PlannerInfo *root, workspace.startup_cost, workspace.total_cost, pathkeys, required_outer)) { + /* + * If the inner path is parameterized, it is parameterized by the + * topmost parent of the outer rel, not the outer rel itself. Fix + * that. + */ + if (PATH_PARAM_BY_PARENT(inner_path, outer_path->parent)) + { + inner_path = reparameterize_path_by_child(root, inner_path, + outer_path->parent); + + /* + * If we could not translate the path, we can't create nest loop + * path. + */ + if (!inner_path) + { + bms_free(required_outer); + return; + } + } + add_path(joinrel, (Path *) create_nestloop_path(root, joinrel, @@ -432,8 +492,20 @@ try_partial_nestloop_path(PlannerInfo *root, if (inner_path->param_info != NULL) { Relids inner_paramrels = inner_path->param_info->ppi_req_outer; + RelOptInfo *outerrel = outer_path->parent; + Relids outerrelids; - if (!bms_is_subset(inner_paramrels, outer_path->parent->relids)) + /* + * The inner and outer paths are parameterized, if at all, by the top + * level parents, not the child relations, so we must use those relids + * for our parameterization tests. + */ + if (outerrel->top_parent_relids) + outerrelids = outerrel->top_parent_relids; + else + outerrelids = outerrel->relids; + + if (!bms_is_subset(inner_paramrels, outerrelids)) return; } @@ -446,6 +518,22 @@ try_partial_nestloop_path(PlannerInfo *root, if (!add_partial_path_precheck(joinrel, workspace.total_cost, pathkeys)) return; + /* + * If the inner path is parameterized, it is parameterized by the topmost + * parent of the outer rel, not the outer rel itself. Fix that. + */ + if (PATH_PARAM_BY_PARENT(inner_path, outer_path->parent)) + { + inner_path = reparameterize_path_by_child(root, inner_path, + outer_path->parent); + + /* + * If we could not translate the path, we can't create nest loop path. + */ + if (!inner_path) + return; + } + /* Might be good enough to be worth trying, so let's try it. */ add_partial_path(joinrel, (Path *) create_nestloop_path(root, @@ -661,7 +749,7 @@ try_hashjoin_path(PlannerInfo *root, * never have any output pathkeys, per comments in create_hashjoin_path. */ initial_cost_hashjoin(root, &workspace, jointype, hashclauses, - outer_path, inner_path, extra); + outer_path, inner_path, extra, false); if (add_path_precheck(joinrel, workspace.startup_cost, workspace.total_cost, @@ -675,6 +763,7 @@ try_hashjoin_path(PlannerInfo *root, extra, outer_path, inner_path, + false, /* parallel_hash */ extra->restrictlist, required_outer, hashclauses)); @@ -690,6 +779,10 @@ try_hashjoin_path(PlannerInfo *root, * try_partial_hashjoin_path * Consider a partial hashjoin join path; if it appears useful, push it into * the joinrel's partial_pathlist via add_partial_path(). + * The outer side is partial. If parallel_hash is true, then the inner path + * must be partial and will be run in parallel to create one or more shared + * hash tables; otherwise the inner path must be complete and a copy of it + * is run in every process to create separate identical private hash tables. */ static void try_partial_hashjoin_path(PlannerInfo *root, @@ -698,7 +791,8 @@ try_partial_hashjoin_path(PlannerInfo *root, Path *inner_path, List *hashclauses, JoinType jointype, - JoinPathExtraData *extra) + JoinPathExtraData *extra, + bool parallel_hash) { JoinCostWorkspace workspace; @@ -722,7 +816,7 @@ try_partial_hashjoin_path(PlannerInfo *root, * cost. Bail out right away if it looks terrible. */ initial_cost_hashjoin(root, &workspace, jointype, hashclauses, - outer_path, inner_path, extra); + outer_path, inner_path, extra, parallel_hash); if (!add_partial_path_precheck(joinrel, workspace.total_cost, NIL)) return; @@ -735,6 +829,7 @@ try_partial_hashjoin_path(PlannerInfo *root, extra, outer_path, inner_path, + parallel_hash, extra->restrictlist, NULL, hashclauses)); @@ -916,10 +1011,10 @@ sort_inner_and_outer(PlannerInfo *root, outerkeys = all_pathkeys; /* no work at first one... */ /* Sort the mergeclauses into the corresponding ordering */ - cur_mergeclauses = find_mergeclauses_for_pathkeys(root, - outerkeys, - true, - extra->mergeclause_list); + cur_mergeclauses = + find_mergeclauses_for_outer_pathkeys(root, + outerkeys, + extra->mergeclause_list); /* Should have used them all... */ Assert(list_length(cur_mergeclauses) == list_length(extra->mergeclause_list)); @@ -1009,10 +1104,10 @@ generate_mergejoin_paths(PlannerInfo *root, jointype = JOIN_INNER; /* Look for useful mergeclauses (if any) */ - mergeclauses = find_mergeclauses_for_pathkeys(root, - outerpath->pathkeys, - true, - extra->mergeclause_list); + mergeclauses = + find_mergeclauses_for_outer_pathkeys(root, + outerpath->pathkeys, + extra->mergeclause_list); /* * Done with this outer path if no chance for a mergejoin. @@ -1135,10 +1230,9 @@ generate_mergejoin_paths(PlannerInfo *root, if (sortkeycnt < num_sortkeys) { newclauses = - find_mergeclauses_for_pathkeys(root, - trialsortkeys, - false, - mergeclauses); + trim_mergeclauses_for_inner_pathkeys(root, + mergeclauses, + trialsortkeys); Assert(newclauses != NIL); } else @@ -1179,10 +1273,9 @@ generate_mergejoin_paths(PlannerInfo *root, if (sortkeycnt < num_sortkeys) { newclauses = - find_mergeclauses_for_pathkeys(root, - trialsortkeys, - false, - mergeclauses); + trim_mergeclauses_for_inner_pathkeys(root, + mergeclauses, + trialsortkeys); Assert(newclauses != NIL); } else @@ -1609,7 +1702,7 @@ hash_inner_and_outer(PlannerInfo *root, * If processing an outer join, only use its own join clauses for * hashing. For inner joins we need not be so picky. */ - if (isouterjoin && restrictinfo->is_pushed_down) + if (isouterjoin && RINFO_IS_PUSHED_DOWN(restrictinfo, joinrel->relids)) continue; if (!restrictinfo->can_join || @@ -1753,6 +1846,10 @@ hash_inner_and_outer(PlannerInfo *root, * able to properly guarantee uniqueness. Similarly, we can't handle * JOIN_FULL and JOIN_RIGHT, because they can produce false null * extended rows. Also, the resulting path must not be parameterized. + * We would be able to support JOIN_FULL and JOIN_RIGHT for Parallel + * Hash, since in that case we're back to a single hash table with a + * single set of match bits for each batch, but that will require + * figuring out a deadlock-free way to wait for the probe to finish. */ if (joinrel->consider_parallel && save_jointype != JOIN_UNIQUE_OUTER && @@ -1762,11 +1859,27 @@ hash_inner_and_outer(PlannerInfo *root, bms_is_empty(joinrel->lateral_relids)) { Path *cheapest_partial_outer; + Path *cheapest_partial_inner = NULL; Path *cheapest_safe_inner = NULL; cheapest_partial_outer = (Path *) linitial(outerrel->partial_pathlist); + /* + * Can we use a partial inner plan too, so that we can build a + * shared hash table in parallel? + */ + if (innerrel->partial_pathlist != NIL && enable_parallel_hash) + { + cheapest_partial_inner = + (Path *) linitial(innerrel->partial_pathlist); + try_partial_hashjoin_path(root, joinrel, + cheapest_partial_outer, + cheapest_partial_inner, + hashclauses, jointype, extra, + true /* parallel_hash */ ); + } + /* * Normally, given that the joinrel is parallel-safe, the cheapest * total inner path will also be parallel-safe, but if not, we'll @@ -1784,7 +1897,8 @@ hash_inner_and_outer(PlannerInfo *root, try_partial_hashjoin_path(root, joinrel, cheapest_partial_outer, cheapest_safe_inner, - hashclauses, jointype, extra); + hashclauses, jointype, extra, + false /* parallel_hash */ ); } } } @@ -1794,7 +1908,7 @@ hash_inner_and_outer(PlannerInfo *root, * Select mergejoin clauses that are usable for a particular join. * Returns a list of RestrictInfo nodes for those clauses. * - * *mergejoin_allowed is normally set to TRUE, but it is set to FALSE if + * *mergejoin_allowed is normally set to true, but it is set to false if * this is a right/full join and there are nonmergejoinable join clauses. * The executor's mergejoin machinery cannot handle such cases, so we have * to avoid generating a mergejoin plan. (Note that this flag does NOT @@ -1835,7 +1949,7 @@ select_mergejoin_clauses(PlannerInfo *root, * we don't set have_nonmergeable_joinclause here because pushed-down * clauses will become otherquals not joinquals.) */ - if (isouterjoin && restrictinfo->is_pushed_down) + if (isouterjoin && RINFO_IS_PUSHED_DOWN(restrictinfo, joinrel->relids)) continue; /* Check that clause is a mergeable operator clause */ diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c index 6ee23509c5..d3d21fed5d 100644 --- a/src/backend/optimizer/path/joinrels.c +++ b/src/backend/optimizer/path/joinrels.c @@ -3,7 +3,7 @@ * joinrels.c * Routines to determine which relations should be joined * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -14,9 +14,14 @@ */ #include "postgres.h" +#include "miscadmin.h" +#include "optimizer/clauses.h" #include "optimizer/joininfo.h" #include "optimizer/pathnode.h" #include "optimizer/paths.h" +#include "optimizer/prep.h" +#include "partitioning/partbounds.h" +#include "utils/lsyscache.h" #include "utils/memutils.h" @@ -29,12 +34,18 @@ static void make_rels_by_clauseless_joins(PlannerInfo *root, static bool has_join_restriction(PlannerInfo *root, RelOptInfo *rel); static bool has_legal_joinclause(PlannerInfo *root, RelOptInfo *rel); static bool is_dummy_rel(RelOptInfo *rel); -static void mark_dummy_rel(RelOptInfo *rel); static bool restriction_is_constant_false(List *restrictlist, + RelOptInfo *joinrel, bool only_pushed_down); static void populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, RelOptInfo *joinrel, SpecialJoinInfo *sjinfo, List *restrictlist); +static void try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, + RelOptInfo *rel2, RelOptInfo *joinrel, + SpecialJoinInfo *parent_sjinfo, + List *parent_restrictlist); +static int match_expr_to_partition_keys(Expr *expr, RelOptInfo *rel, + bool strict_op); /* @@ -323,7 +334,7 @@ make_rels_by_clauseless_joins(PlannerInfo *root, * * On success, *sjinfo_p is set to NULL if this is to be a plain inner join, * else it's set to point to the associated SpecialJoinInfo node. Also, - * *reversed_p is set TRUE if the given relations need to be swapped to + * *reversed_p is set true if the given relations need to be swapped to * match the SpecialJoinInfo node. */ static bool @@ -770,7 +781,7 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1, { case JOIN_INNER: if (is_dummy_rel(rel1) || is_dummy_rel(rel2) || - restriction_is_constant_false(restrictlist, false)) + restriction_is_constant_false(restrictlist, joinrel, false)) { mark_dummy_rel(joinrel); break; @@ -784,12 +795,12 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1, break; case JOIN_LEFT: if (is_dummy_rel(rel1) || - restriction_is_constant_false(restrictlist, true)) + restriction_is_constant_false(restrictlist, joinrel, true)) { mark_dummy_rel(joinrel); break; } - if (restriction_is_constant_false(restrictlist, false) && + if (restriction_is_constant_false(restrictlist, joinrel, false) && bms_is_subset(rel2->relids, sjinfo->syn_righthand)) mark_dummy_rel(rel2); add_paths_to_joinrel(root, joinrel, rel1, rel2, @@ -801,7 +812,7 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1, break; case JOIN_FULL: if ((is_dummy_rel(rel1) && is_dummy_rel(rel2)) || - restriction_is_constant_false(restrictlist, true)) + restriction_is_constant_false(restrictlist, joinrel, true)) { mark_dummy_rel(joinrel); break; @@ -837,7 +848,7 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1, bms_is_subset(sjinfo->min_righthand, rel2->relids)) { if (is_dummy_rel(rel1) || is_dummy_rel(rel2) || - restriction_is_constant_false(restrictlist, false)) + restriction_is_constant_false(restrictlist, joinrel, false)) { mark_dummy_rel(joinrel); break; @@ -860,7 +871,7 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1, sjinfo) != NULL) { if (is_dummy_rel(rel1) || is_dummy_rel(rel2) || - restriction_is_constant_false(restrictlist, false)) + restriction_is_constant_false(restrictlist, joinrel, false)) { mark_dummy_rel(joinrel); break; @@ -875,12 +886,12 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1, break; case JOIN_ANTI: if (is_dummy_rel(rel1) || - restriction_is_constant_false(restrictlist, true)) + restriction_is_constant_false(restrictlist, joinrel, true)) { mark_dummy_rel(joinrel); break; } - if (restriction_is_constant_false(restrictlist, false) && + if (restriction_is_constant_false(restrictlist, joinrel, false) && bms_is_subset(rel2->relids, sjinfo->syn_righthand)) mark_dummy_rel(rel2); add_paths_to_joinrel(root, joinrel, rel1, rel2, @@ -892,6 +903,9 @@ populate_joinrel_with_paths(PlannerInfo *root, RelOptInfo *rel1, elog(ERROR, "unrecognized join type: %d", (int) sjinfo->jointype); break; } + + /* Apply partitionwise join technique, if possible. */ + try_partitionwise_join(root, rel1, rel2, joinrel, sjinfo, restrictlist); } @@ -1197,7 +1211,7 @@ is_dummy_rel(RelOptInfo *rel) * is that the best solution is to explicitly make the dummy path in the same * context the given RelOptInfo is in. */ -static void +void mark_dummy_rel(RelOptInfo *rel) { MemoryContext oldcontext; @@ -1217,7 +1231,8 @@ mark_dummy_rel(RelOptInfo *rel) rel->partial_pathlist = NIL; /* Set up the dummy path */ - add_path(rel, (Path *) create_append_path(rel, NIL, NULL, 0, NIL)); + add_path(rel, (Path *) create_append_path(NULL, rel, NIL, NIL, NULL, + 0, false, NIL, -1)); /* Set or update cheapest_total_path and related fields */ set_cheapest(rel); @@ -1235,10 +1250,13 @@ mark_dummy_rel(RelOptInfo *rel) * decide there's no match for an outer row, which is pretty stupid. So, * we need to detect the case. * - * If only_pushed_down is TRUE, then consider only pushed-down quals. + * If only_pushed_down is true, then consider only quals that are pushed-down + * from the point of view of the joinrel. */ static bool -restriction_is_constant_false(List *restrictlist, bool only_pushed_down) +restriction_is_constant_false(List *restrictlist, + RelOptInfo *joinrel, + bool only_pushed_down) { ListCell *lc; @@ -1252,7 +1270,7 @@ restriction_is_constant_false(List *restrictlist, bool only_pushed_down) { RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc); - if (only_pushed_down && !rinfo->is_pushed_down) + if (only_pushed_down && !RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids)) continue; if (rinfo->clause && IsA(rinfo->clause, Const)) @@ -1268,3 +1286,299 @@ restriction_is_constant_false(List *restrictlist, bool only_pushed_down) } return false; } + +/* + * Assess whether join between given two partitioned relations can be broken + * down into joins between matching partitions; a technique called + * "partitionwise join" + * + * Partitionwise join is possible when a. Joining relations have same + * partitioning scheme b. There exists an equi-join between the partition keys + * of the two relations. + * + * Partitionwise join is planned as follows (details: optimizer/README.) + * + * 1. Create the RelOptInfos for joins between matching partitions i.e + * child-joins and add paths to them. + * + * 2. Construct Append or MergeAppend paths across the set of child joins. + * This second phase is implemented by generate_partitionwise_join_paths(). + * + * The RelOptInfo, SpecialJoinInfo and restrictlist for each child join are + * obtained by translating the respective parent join structures. + */ +static void +try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, + RelOptInfo *joinrel, SpecialJoinInfo *parent_sjinfo, + List *parent_restrictlist) +{ + int nparts; + int cnt_parts; + + /* Guard against stack overflow due to overly deep partition hierarchy. */ + check_stack_depth(); + + /* Nothing to do, if the join relation is not partitioned. */ + if (!IS_PARTITIONED_REL(joinrel)) + return; + + /* The join relation should have consider_partitionwise_join set. */ + Assert(joinrel->consider_partitionwise_join); + + /* + * Since this join relation is partitioned, all the base relations + * participating in this join must be partitioned and so are all the + * intermediate join relations. + */ + Assert(IS_PARTITIONED_REL(rel1) && IS_PARTITIONED_REL(rel2)); + Assert(REL_HAS_ALL_PART_PROPS(rel1) && REL_HAS_ALL_PART_PROPS(rel2)); + + /* The joining relations should have consider_partitionwise_join set. */ + Assert(rel1->consider_partitionwise_join && + rel2->consider_partitionwise_join); + + /* + * The partition scheme of the join relation should match that of the + * joining relations. + */ + Assert(joinrel->part_scheme == rel1->part_scheme && + joinrel->part_scheme == rel2->part_scheme); + + /* + * Since we allow partitionwise join only when the partition bounds of the + * joining relations exactly match, the partition bounds of the join + * should match those of the joining relations. + */ + Assert(partition_bounds_equal(joinrel->part_scheme->partnatts, + joinrel->part_scheme->parttyplen, + joinrel->part_scheme->parttypbyval, + joinrel->boundinfo, rel1->boundinfo)); + Assert(partition_bounds_equal(joinrel->part_scheme->partnatts, + joinrel->part_scheme->parttyplen, + joinrel->part_scheme->parttypbyval, + joinrel->boundinfo, rel2->boundinfo)); + + nparts = joinrel->nparts; + + /* + * Create child-join relations for this partitioned join, if those don't + * exist. Add paths to child-joins for a pair of child relations + * corresponding to the given pair of parent relations. + */ + for (cnt_parts = 0; cnt_parts < nparts; cnt_parts++) + { + RelOptInfo *child_rel1 = rel1->part_rels[cnt_parts]; + RelOptInfo *child_rel2 = rel2->part_rels[cnt_parts]; + SpecialJoinInfo *child_sjinfo; + List *child_restrictlist; + RelOptInfo *child_joinrel; + Relids child_joinrelids; + AppendRelInfo **appinfos; + int nappinfos; + + /* We should never try to join two overlapping sets of rels. */ + Assert(!bms_overlap(child_rel1->relids, child_rel2->relids)); + child_joinrelids = bms_union(child_rel1->relids, child_rel2->relids); + appinfos = find_appinfos_by_relids(root, child_joinrelids, &nappinfos); + + /* + * Construct SpecialJoinInfo from parent join relations's + * SpecialJoinInfo. + */ + child_sjinfo = build_child_join_sjinfo(root, parent_sjinfo, + child_rel1->relids, + child_rel2->relids); + + /* + * Construct restrictions applicable to the child join from those + * applicable to the parent join. + */ + child_restrictlist = + (List *) adjust_appendrel_attrs(root, + (Node *) parent_restrictlist, + nappinfos, appinfos); + pfree(appinfos); + + child_joinrel = joinrel->part_rels[cnt_parts]; + if (!child_joinrel) + { + child_joinrel = build_child_join_rel(root, child_rel1, child_rel2, + joinrel, child_restrictlist, + child_sjinfo, + child_sjinfo->jointype); + joinrel->part_rels[cnt_parts] = child_joinrel; + } + + Assert(bms_equal(child_joinrel->relids, child_joinrelids)); + + populate_joinrel_with_paths(root, child_rel1, child_rel2, + child_joinrel, child_sjinfo, + child_restrictlist); + } +} + +/* + * Returns true if there exists an equi-join condition for each pair of + * partition keys from given relations being joined. + */ +bool +have_partkey_equi_join(RelOptInfo *joinrel, + RelOptInfo *rel1, RelOptInfo *rel2, + JoinType jointype, List *restrictlist) +{ + PartitionScheme part_scheme = rel1->part_scheme; + ListCell *lc; + int cnt_pks; + bool pk_has_clause[PARTITION_MAX_KEYS]; + bool strict_op; + + /* + * This function should be called when the joining relations have same + * partitioning scheme. + */ + Assert(rel1->part_scheme == rel2->part_scheme); + Assert(part_scheme); + + memset(pk_has_clause, 0, sizeof(pk_has_clause)); + foreach(lc, restrictlist) + { + RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc); + OpExpr *opexpr; + Expr *expr1; + Expr *expr2; + int ipk1; + int ipk2; + + /* If processing an outer join, only use its own join clauses. */ + if (IS_OUTER_JOIN(jointype) && + RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids)) + continue; + + /* Skip clauses which can not be used for a join. */ + if (!rinfo->can_join) + continue; + + /* Skip clauses which are not equality conditions. */ + if (!rinfo->mergeopfamilies && !OidIsValid(rinfo->hashjoinoperator)) + continue; + + opexpr = (OpExpr *) rinfo->clause; + Assert(is_opclause(opexpr)); + + /* + * The equi-join between partition keys is strict if equi-join between + * at least one partition key is using a strict operator. See + * explanation about outer join reordering identity 3 in + * optimizer/README + */ + strict_op = op_strict(opexpr->opno); + + /* Match the operands to the relation. */ + if (bms_is_subset(rinfo->left_relids, rel1->relids) && + bms_is_subset(rinfo->right_relids, rel2->relids)) + { + expr1 = linitial(opexpr->args); + expr2 = lsecond(opexpr->args); + } + else if (bms_is_subset(rinfo->left_relids, rel2->relids) && + bms_is_subset(rinfo->right_relids, rel1->relids)) + { + expr1 = lsecond(opexpr->args); + expr2 = linitial(opexpr->args); + } + else + continue; + + /* + * Only clauses referencing the partition keys are useful for + * partitionwise join. + */ + ipk1 = match_expr_to_partition_keys(expr1, rel1, strict_op); + if (ipk1 < 0) + continue; + ipk2 = match_expr_to_partition_keys(expr2, rel2, strict_op); + if (ipk2 < 0) + continue; + + /* + * If the clause refers to keys at different ordinal positions, it can + * not be used for partitionwise join. + */ + if (ipk1 != ipk2) + continue; + + /* + * The clause allows partitionwise join if only it uses the same + * operator family as that specified by the partition key. + */ + if (rel1->part_scheme->strategy == PARTITION_STRATEGY_HASH) + { + if (!op_in_opfamily(rinfo->hashjoinoperator, + part_scheme->partopfamily[ipk1])) + continue; + } + else if (!list_member_oid(rinfo->mergeopfamilies, + part_scheme->partopfamily[ipk1])) + continue; + + /* Mark the partition key as having an equi-join clause. */ + pk_has_clause[ipk1] = true; + } + + /* Check whether every partition key has an equi-join condition. */ + for (cnt_pks = 0; cnt_pks < part_scheme->partnatts; cnt_pks++) + { + if (!pk_has_clause[cnt_pks]) + return false; + } + + return true; +} + +/* + * Find the partition key from the given relation matching the given + * expression. If found, return the index of the partition key, else return -1. + */ +static int +match_expr_to_partition_keys(Expr *expr, RelOptInfo *rel, bool strict_op) +{ + int cnt; + + /* This function should be called only for partitioned relations. */ + Assert(rel->part_scheme); + + /* Remove any relabel decorations. */ + while (IsA(expr, RelabelType)) + expr = (Expr *) (castNode(RelabelType, expr))->arg; + + for (cnt = 0; cnt < rel->part_scheme->partnatts; cnt++) + { + ListCell *lc; + + Assert(rel->partexprs); + foreach(lc, rel->partexprs[cnt]) + { + if (equal(lfirst(lc), expr)) + return cnt; + } + + if (!strict_op) + continue; + + /* + * If it's a strict equi-join a NULL partition key on one side will + * not join a NULL partition key on the other side. So, rows with NULL + * partition key from a partition on one side can not join with those + * from a non-matching partition on the other side. So, search the + * nullable partition keys as well. + */ + Assert(rel->nullable_partexprs); + foreach(lc, rel->nullable_partexprs[cnt]) + { + if (equal(lfirst(lc), expr)) + return cnt; + } + } + + return -1; +} diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c index 9d83a5ca62..ec66cb9c3c 100644 --- a/src/backend/optimizer/path/pathkeys.c +++ b/src/backend/optimizer/path/pathkeys.c @@ -7,7 +7,7 @@ * the nature and use of path keys. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -162,8 +162,8 @@ pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys) * considered. Otherwise child members are ignored. (See the comments for * get_eclass_for_sort_expr.) * - * create_it is TRUE if we should create any missing EquivalenceClass - * needed to represent the sort key. If it's FALSE, we return NULL if the + * create_it is true if we should create any missing EquivalenceClass + * needed to represent the sort key. If it's false, we return NULL if the * sort key isn't already present in any EquivalenceClass. */ static PathKey * @@ -447,8 +447,10 @@ get_cheapest_parallel_safe_total_inner(List *paths) * If 'scandir' is BackwardScanDirection, build pathkeys representing a * backwards scan of the index. * - * The result is canonical, meaning that redundant pathkeys are removed; - * it may therefore have fewer entries than there are index columns. + * We iterate only key columns of covering indexes, since non-key columns + * don't influence index ordering. The result is canonical, meaning that + * redundant pathkeys are removed; it may therefore have fewer entries than + * there are key columns in the index. * * Another reason for stopping early is that we may be able to tell that * an index column's sort order is uninteresting for this query. However, @@ -477,6 +479,13 @@ build_index_pathkeys(PlannerInfo *root, bool nulls_first; PathKey *cpathkey; + /* + * INCLUDE columns are stored in index unordered, so they don't + * support ordered index scan. + */ + if (i >= index->nkeycolumns) + break; + /* We assume we don't need to make a copy of the tlist item */ indexkey = indextle->expr; @@ -981,16 +990,14 @@ update_mergeclause_eclasses(PlannerInfo *root, RestrictInfo *restrictinfo) } /* - * find_mergeclauses_for_pathkeys - * This routine attempts to find a set of mergeclauses that can be - * used with a specified ordering for one of the input relations. + * find_mergeclauses_for_outer_pathkeys + * This routine attempts to find a list of mergeclauses that can be + * used with a specified ordering for the join's outer relation. * If successful, it returns a list of mergeclauses. * - * 'pathkeys' is a pathkeys list showing the ordering of an input path. - * 'outer_keys' is TRUE if these keys are for the outer input path, - * FALSE if for inner. + * 'pathkeys' is a pathkeys list showing the ordering of an outer-rel path. * 'restrictinfos' is a list of mergejoinable restriction clauses for the - * join relation being formed. + * join relation being formed, in no particular order. * * The restrictinfos must be marked (via outer_is_left) to show which side * of each clause is associated with the current outer path. (See @@ -998,12 +1005,12 @@ update_mergeclause_eclasses(PlannerInfo *root, RestrictInfo *restrictinfo) * * The result is NIL if no merge can be done, else a maximal list of * usable mergeclauses (represented as a list of their restrictinfo nodes). + * The list is ordered to match the pathkeys, as required for execution. */ List * -find_mergeclauses_for_pathkeys(PlannerInfo *root, - List *pathkeys, - bool outer_keys, - List *restrictinfos) +find_mergeclauses_for_outer_pathkeys(PlannerInfo *root, + List *pathkeys, + List *restrictinfos) { List *mergeclauses = NIL; ListCell *i; @@ -1044,19 +1051,20 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root, * * It's possible that multiple matching clauses might have different * ECs on the other side, in which case the order we put them into our - * result makes a difference in the pathkeys required for the other - * input path. However this routine hasn't got any info about which + * result makes a difference in the pathkeys required for the inner + * input rel. However this routine hasn't got any info about which * order would be best, so we don't worry about that. * * It's also possible that the selected mergejoin clauses produce - * a noncanonical ordering of pathkeys for the other side, ie, we + * a noncanonical ordering of pathkeys for the inner side, ie, we * might select clauses that reference b.v1, b.v2, b.v1 in that * order. This is not harmful in itself, though it suggests that - * the clauses are partially redundant. Since it happens only with - * redundant query conditions, we don't bother to eliminate it. - * make_inner_pathkeys_for_merge() has to delete duplicates when - * it constructs the canonical pathkeys list, and we also have to - * deal with the case in create_mergejoin_plan(). + * the clauses are partially redundant. Since the alternative is + * to omit mergejoin clauses and thereby possibly fail to generate a + * plan altogether, we live with it. make_inner_pathkeys_for_merge() + * has to delete duplicates when it constructs the inner pathkeys + * list, and we also have to deal with such cases specially in + * create_mergejoin_plan(). *---------- */ foreach(j, restrictinfos) @@ -1064,12 +1072,8 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root, RestrictInfo *rinfo = (RestrictInfo *) lfirst(j); EquivalenceClass *clause_ec; - if (outer_keys) - clause_ec = rinfo->outer_is_left ? - rinfo->left_ec : rinfo->right_ec; - else - clause_ec = rinfo->outer_is_left ? - rinfo->right_ec : rinfo->left_ec; + clause_ec = rinfo->outer_is_left ? + rinfo->left_ec : rinfo->right_ec; if (clause_ec == pathkey_ec) matched_restrictinfos = lappend(matched_restrictinfos, rinfo); } @@ -1273,8 +1277,8 @@ select_outer_pathkeys_for_merge(PlannerInfo *root, * must be applied to an inner path to make it usable with the * given mergeclauses. * - * 'mergeclauses' is a list of RestrictInfos for mergejoin clauses - * that will be used in a merge join. + * 'mergeclauses' is a list of RestrictInfos for the mergejoin clauses + * that will be used in a merge join, in order. * 'outer_pathkeys' are the already-known canonical pathkeys for the outer * side of the join. * @@ -1351,8 +1355,13 @@ make_inner_pathkeys_for_merge(PlannerInfo *root, opathkey->pk_nulls_first); /* - * Don't generate redundant pathkeys (can happen if multiple - * mergeclauses refer to same EC). + * Don't generate redundant pathkeys (which can happen if multiple + * mergeclauses refer to the same EC). Because we do this, the output + * pathkey list isn't necessarily ordered like the mergeclauses, which + * complicates life for create_mergejoin_plan(). But if we didn't, + * we'd have a noncanonical sort key list, which would be bad; for one + * reason, it certainly wouldn't match any available sort order for + * the input relation. */ if (!pathkey_is_redundant(pathkey, pathkeys)) pathkeys = lappend(pathkeys, pathkey); @@ -1361,6 +1370,98 @@ make_inner_pathkeys_for_merge(PlannerInfo *root, return pathkeys; } +/* + * trim_mergeclauses_for_inner_pathkeys + * This routine trims a list of mergeclauses to include just those that + * work with a specified ordering for the join's inner relation. + * + * 'mergeclauses' is a list of RestrictInfos for mergejoin clauses for the + * join relation being formed, in an order known to work for the + * currently-considered sort ordering of the join's outer rel. + * 'pathkeys' is a pathkeys list showing the ordering of an inner-rel path; + * it should be equal to, or a truncation of, the result of + * make_inner_pathkeys_for_merge for these mergeclauses. + * + * What we return will be a prefix of the given mergeclauses list. + * + * We need this logic because make_inner_pathkeys_for_merge's result isn't + * necessarily in the same order as the mergeclauses. That means that if we + * consider an inner-rel pathkey list that is a truncation of that result, + * we might need to drop mergeclauses even though they match a surviving inner + * pathkey. This happens when they are to the right of a mergeclause that + * matches a removed inner pathkey. + * + * The mergeclauses must be marked (via outer_is_left) to show which side + * of each clause is associated with the current outer path. (See + * select_mergejoin_clauses()) + */ +List * +trim_mergeclauses_for_inner_pathkeys(PlannerInfo *root, + List *mergeclauses, + List *pathkeys) +{ + List *new_mergeclauses = NIL; + PathKey *pathkey; + EquivalenceClass *pathkey_ec; + bool matched_pathkey; + ListCell *lip; + ListCell *i; + + /* No pathkeys => no mergeclauses (though we don't expect this case) */ + if (pathkeys == NIL) + return NIL; + /* Initialize to consider first pathkey */ + lip = list_head(pathkeys); + pathkey = (PathKey *) lfirst(lip); + pathkey_ec = pathkey->pk_eclass; + lip = lnext(lip); + matched_pathkey = false; + + /* Scan mergeclauses to see how many we can use */ + foreach(i, mergeclauses) + { + RestrictInfo *rinfo = (RestrictInfo *) lfirst(i); + EquivalenceClass *clause_ec; + + /* Assume we needn't do update_mergeclause_eclasses again here */ + + /* Check clause's inner-rel EC against current pathkey */ + clause_ec = rinfo->outer_is_left ? + rinfo->right_ec : rinfo->left_ec; + + /* If we don't have a match, attempt to advance to next pathkey */ + if (clause_ec != pathkey_ec) + { + /* If we had no clauses matching this inner pathkey, must stop */ + if (!matched_pathkey) + break; + + /* Advance to next inner pathkey, if any */ + if (lip == NULL) + break; + pathkey = (PathKey *) lfirst(lip); + pathkey_ec = pathkey->pk_eclass; + lip = lnext(lip); + matched_pathkey = false; + } + + /* If mergeclause matches current inner pathkey, we can use it */ + if (clause_ec == pathkey_ec) + { + new_mergeclauses = lappend(new_mergeclauses, rinfo); + matched_pathkey = true; + } + else + { + /* Else, no hope of adding any more mergeclauses */ + break; + } + } + + return new_mergeclauses; +} + + /**************************************************************************** * PATHKEY USEFULNESS CHECKS * diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c index a2fe661075..3bb5b8def6 100644 --- a/src/backend/optimizer/path/tidpath.c +++ b/src/backend/optimizer/path/tidpath.c @@ -25,7 +25,7 @@ * for that. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/optimizer/plan/analyzejoins.c b/src/backend/optimizer/plan/analyzejoins.c index 34317fe778..0e73f9cf4c 100644 --- a/src/backend/optimizer/plan/analyzejoins.c +++ b/src/backend/optimizer/plan/analyzejoins.c @@ -11,7 +11,7 @@ * is that we have to work harder to clean up after ourselves when we modify * the query, since the derived data structures have to be updated too. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -42,6 +42,7 @@ static bool rel_is_distinct_for(PlannerInfo *root, RelOptInfo *rel, List *clause_list); static Oid distinct_col_search(int colno, List *colnos, List *opids); static bool is_innerrel_unique_for(PlannerInfo *root, + Relids joinrelids, Relids outerrelids, RelOptInfo *innerrel, JoinType jointype, @@ -253,8 +254,7 @@ join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo) * above the outer join, even if it references no other rels (it might * be from WHERE, for example). */ - if (restrictinfo->is_pushed_down || - !bms_equal(restrictinfo->required_relids, joinrelids)) + if (RINFO_IS_PUSHED_DOWN(restrictinfo, joinrelids)) { /* * If such a clause actually references the inner rel then join @@ -422,8 +422,7 @@ remove_rel_from_query(PlannerInfo *root, int relid, Relids joinrelids) remove_join_clause_from_rels(root, rinfo, rinfo->required_relids); - if (rinfo->is_pushed_down || - !bms_equal(rinfo->required_relids, joinrelids)) + if (RINFO_IS_PUSHED_DOWN(rinfo, joinrelids)) { /* Recheck that qual doesn't actually reference the target rel */ Assert(!bms_is_member(relid, rinfo->clause_relids)); @@ -567,7 +566,8 @@ reduce_unique_semijoins(PlannerInfo *root) innerrel->joininfo); /* Test whether the innerrel is unique for those clauses. */ - if (!innerrel_is_unique(root, sjinfo->min_lefthand, innerrel, + if (!innerrel_is_unique(root, + joinrelids, sjinfo->min_lefthand, innerrel, JOIN_SEMI, restrictlist, true)) continue; @@ -582,7 +582,7 @@ reduce_unique_semijoins(PlannerInfo *root) * Could the relation possibly be proven distinct on some set of columns? * * This is effectively a pre-checking function for rel_is_distinct_for(). - * It must return TRUE if rel_is_distinct_for() could possibly return TRUE + * It must return true if rel_is_distinct_for() could possibly return true * with this rel, but it should not expend a lot of cycles. The idea is * that callers can avoid doing possibly-expensive processing to compute * rel_is_distinct_for()'s argument lists if the call could not possibly @@ -703,6 +703,14 @@ rel_is_distinct_for(PlannerInfo *root, RelOptInfo *rel, List *clause_list) else var = (Var *) get_leftop(rinfo->clause); + /* + * We may ignore any RelabelType node above the operand. (There + * won't be more than one, since eval_const_expressions() has been + * applied already.) + */ + if (var && IsA(var, RelabelType)) + var = (Var *) ((RelabelType *) var)->arg; + /* * If inner side isn't a Var referencing a subquery output column, * this clause doesn't help us. @@ -727,7 +735,7 @@ rel_is_distinct_for(PlannerInfo *root, RelOptInfo *rel, List *clause_list) * on some set of output columns? * * This is effectively a pre-checking function for query_is_distinct_for(). - * It must return TRUE if query_is_distinct_for() could possibly return TRUE + * It must return true if query_is_distinct_for() could possibly return true * with this query, but it should not expend a lot of cycles. The idea is * that callers can avoid doing possibly-expensive processing to compute * query_is_distinct_for()'s argument lists if the call could not possibly @@ -736,8 +744,8 @@ rel_is_distinct_for(PlannerInfo *root, RelOptInfo *rel, List *clause_list) bool query_supports_distinctness(Query *query) { - /* we don't cope with SRFs, see comment below */ - if (query->hasTargetSRFs) + /* SRFs break distinctness except with DISTINCT, see below */ + if (query->hasTargetSRFs && query->distinctClause == NIL) return false; /* check for features we can prove distinctness with */ @@ -778,21 +786,11 @@ query_is_distinct_for(Query *query, List *colnos, List *opids) Assert(list_length(colnos) == list_length(opids)); - /* - * A set-returning function in the query's targetlist can result in - * returning duplicate rows, if the SRF is evaluated after the - * de-duplication step; so we play it safe and say "no" if there are any - * SRFs. (We could be certain that it's okay if SRFs appear only in the - * specified columns, since those must be evaluated before de-duplication; - * but it doesn't presently seem worth the complication to check that.) - */ - if (query->hasTargetSRFs) - return false; - /* * DISTINCT (including DISTINCT ON) guarantees uniqueness if all the * columns in the DISTINCT clause appear in colnos and operator semantics - * match. + * match. This is true even if there are SRFs in the DISTINCT columns or + * elsewhere in the tlist. */ if (query->distinctClause) { @@ -811,6 +809,16 @@ query_is_distinct_for(Query *query, List *colnos, List *opids) return true; } + /* + * Otherwise, a set-returning function in the query's targetlist can + * result in returning duplicate rows, despite any grouping that might + * occur before tlist evaluation. (If all tlist SRFs are within GROUP BY + * columns, it would be safe because they'd be expanded before grouping. + * But it doesn't currently seem worth the effort to check for that.) + */ + if (query->hasTargetSRFs) + return false; + /* * Similarly, GROUP BY without GROUPING SETS guarantees uniqueness if all * the grouped columns appear in colnos and operator semantics match. @@ -941,7 +949,8 @@ distinct_col_search(int colno, List *colnos, List *opids) * * We need an actual RelOptInfo for the innerrel, but it's sufficient to * identify the outerrel by its Relids. This asymmetry supports use of this - * function before joinrels have been built. + * function before joinrels have been built. (The caller is expected to + * also supply the joinrelids, just to save recalculating that.) * * The proof must be made based only on clauses that will be "joinquals" * rather than "otherquals" at execution. For an inner join there's no @@ -960,6 +969,7 @@ distinct_col_search(int colno, List *colnos, List *opids) */ bool innerrel_is_unique(PlannerInfo *root, + Relids joinrelids, Relids outerrelids, RelOptInfo *innerrel, JoinType jointype, @@ -1008,7 +1018,7 @@ innerrel_is_unique(PlannerInfo *root, } /* No cached information, so try to make the proof. */ - if (is_innerrel_unique_for(root, outerrelids, innerrel, + if (is_innerrel_unique_for(root, joinrelids, outerrelids, innerrel, jointype, restrictlist)) { /* @@ -1067,6 +1077,7 @@ innerrel_is_unique(PlannerInfo *root, */ static bool is_innerrel_unique_for(PlannerInfo *root, + Relids joinrelids, Relids outerrelids, RelOptInfo *innerrel, JoinType jointype, @@ -1090,7 +1101,8 @@ is_innerrel_unique_for(PlannerInfo *root, * As noted above, if it's a pushed-down clause and we're at an outer * join, we can't use it. */ - if (restrictinfo->is_pushed_down && IS_OUTER_JOIN(jointype)) + if (IS_OUTER_JOIN(jointype) && + RINFO_IS_PUSHED_DOWN(restrictinfo, joinrelids)) continue; /* Ignore if it's not a mergejoinable clause */ diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 5c934f223d..da7a92081a 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -5,7 +5,7 @@ * Planning is complete, we just need to convert the selected * Path into a Plan. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -41,6 +41,7 @@ #include "optimizer/var.h" #include "parser/parse_clause.h" #include "parser/parsetree.h" +#include "partitioning/partprune.h" #include "utils/lsyscache.h" @@ -62,10 +63,14 @@ * any sortgrouprefs specified in its pathtarget, with appropriate * ressortgroupref labels. This is passed down by parent nodes such as Sort * and Group, which need these values to be available in their inputs. + * + * CP_IGNORE_TLIST specifies that the caller plans to replace the targetlist, + * and therefore it doesn't matter a bit what target list gets generated. */ #define CP_EXACT_TLIST 0x0001 /* Plan must return specified tlist */ #define CP_SMALL_TLIST 0x0002 /* Prefer narrower tlists */ #define CP_LABEL_TLIST 0x0004 /* tlist must contain sortgrouprefs */ +#define CP_IGNORE_TLIST 0x0008 /* caller will replace tlist */ static Plan *create_plan_recurse(PlannerInfo *root, Path *best_path, @@ -87,7 +92,9 @@ static Material *create_material_plan(PlannerInfo *root, MaterialPath *best_path static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path, int flags); static Gather *create_gather_plan(PlannerInfo *root, GatherPath *best_path); -static Plan *create_projection_plan(PlannerInfo *root, ProjectionPath *best_path); +static Plan *create_projection_plan(PlannerInfo *root, + ProjectionPath *best_path, + int flags); static Plan *inject_projection_plan(Plan *subplan, List *tlist, bool parallel_safe); static Sort *create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags); static Group *create_group_plan(PlannerInfo *root, GroupPath *best_path); @@ -100,15 +107,6 @@ static WindowAgg *create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_p static SetOp *create_setop_plan(PlannerInfo *root, SetOpPath *best_path, int flags); static RecursiveUnion *create_recursiveunion_plan(PlannerInfo *root, RecursiveUnionPath *best_path); -static void get_column_info_for_window(PlannerInfo *root, WindowClause *wc, - List *tlist, - int numSortCols, AttrNumber *sortColIdx, - int *partNumCols, - AttrNumber **partColIdx, - Oid **partOperators, - int *ordNumCols, - AttrNumber **ordColIdx, - Oid **ordOperators); static LockRows *create_lockrows_plan(PlannerInfo *root, LockRowsPath *best_path, int flags); static ModifyTable *create_modifytable_plan(PlannerInfo *root, ModifyTablePath *best_path); @@ -203,7 +201,8 @@ static NamedTuplestoreScan *make_namedtuplestorescan(List *qptlist, List *qpqual Index scanrelid, char *enrname); static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual, Index scanrelid, int wtParam); -static Append *make_append(List *appendplans, List *tlist, List *partitioned_rels); +static Append *make_append(List *appendplans, int first_partial_plan, + List *tlist, PartitionPruneInfo *partpruneinfo); static RecursiveUnion *make_recursive_union(List *tlist, Plan *lefttree, Plan *righttree, @@ -250,7 +249,8 @@ static Plan *prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys, static EquivalenceMember *find_ec_member_for_tle(EquivalenceClass *ec, TargetEntry *tle, Relids relids); -static Sort *make_sort_from_pathkeys(Plan *lefttree, List *pathkeys); +static Sort *make_sort_from_pathkeys(Plan *lefttree, List *pathkeys, + Relids relids); static Sort *make_sort_from_groupcols(List *groupcls, AttrNumber *grpColIdx, Plan *lefttree); @@ -259,6 +259,8 @@ static WindowAgg *make_windowagg(List *tlist, Index winref, int partNumCols, AttrNumber *partColIdx, Oid *partOperators, int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, int frameOptions, Node *startOffset, Node *endOffset, + Oid startInRangeFunc, Oid endInRangeFunc, + Oid inRangeColl, bool inRangeAsc, bool inRangeNullsFirst, Plan *lefttree); static Group *make_group(List *tlist, List *qual, int numGroupCols, AttrNumber *grpColIdx, Oid *grpOperators, @@ -267,7 +269,7 @@ static Unique *make_unique_from_sortclauses(Plan *lefttree, List *distinctList); static Unique *make_unique_from_pathkeys(Plan *lefttree, List *pathkeys, int numCols); static Gather *make_gather(List *qptlist, List *qpqual, - int nworkers, bool single_copy, Plan *subplan); + int nworkers, int rescan_param, bool single_copy, Plan *subplan); static SetOp *make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan *lefttree, List *distinctList, AttrNumber flagColIdx, int firstFlag, long numGroups); @@ -276,8 +278,9 @@ static Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan); static ProjectSet *make_project_set(List *tlist, Plan *subplan); static ModifyTable *make_modifytable(PlannerInfo *root, CmdType operation, bool canSetTag, - Index nominalRelation, List *partitioned_rels, - List *resultRelations, List *subplans, + Index nominalRelation, Index rootRelation, + bool partColsUpdated, + List *resultRelations, List *subplans, List *subroots, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, int epqParam); static GatherMerge *create_gather_merge_plan(PlannerInfo *root, @@ -355,6 +358,9 @@ create_plan_recurse(PlannerInfo *root, Path *best_path, int flags) { Plan *plan; + /* Guard against stack overflow due to overly complex plans */ + check_stack_depth(); + switch (best_path->pathtype) { case T_SeqScan: @@ -392,7 +398,8 @@ create_plan_recurse(PlannerInfo *root, Path *best_path, int flags) if (IsA(best_path, ProjectionPath)) { plan = create_projection_plan(root, - (ProjectionPath *) best_path); + (ProjectionPath *) best_path, + flags); } else if (IsA(best_path, MinMaxAggPath)) { @@ -555,8 +562,16 @@ create_scan_plan(PlannerInfo *root, Path *best_path, int flags) * only those Vars actually needed by the query), we prefer to generate a * tlist containing all Vars in order. This will allow the executor to * optimize away projection of the table tuples, if possible. + * + * But if the caller is going to ignore our tlist anyway, then don't + * bother generating one at all. We use an exact equality test here, so + * that this only applies when CP_IGNORE_TLIST is the only flag set. */ - if (use_physical_tlist(root, best_path, flags)) + if (flags == CP_IGNORE_TLIST) + { + tlist = NULL; + } + else if (use_physical_tlist(root, best_path, flags)) { if (best_path->pathtype == T_IndexOnlyScan) { @@ -564,10 +579,10 @@ create_scan_plan(PlannerInfo *root, Path *best_path, int flags) tlist = copyObject(((IndexPath *) best_path)->indexinfo->indextlist); /* - * Transfer any sortgroupref data to the replacement tlist, unless - * we don't care because the gating Result will handle it. + * Transfer sortgroupref data to the replacement tlist, if + * requested (use_physical_tlist checked that this will work). */ - if (!gating_clauses) + if (flags & CP_LABEL_TLIST) apply_pathtarget_labeling_to_tlist(tlist, best_path->pathtarget); } else @@ -581,7 +596,7 @@ create_scan_plan(PlannerInfo *root, Path *best_path, int flags) else { /* As above, transfer sortgroupref data to replacement tlist */ - if (!gating_clauses) + if (flags & CP_LABEL_TLIST) apply_pathtarget_labeling_to_tlist(tlist, best_path->pathtarget); } } @@ -806,6 +821,15 @@ use_physical_tlist(PlannerInfo *root, Path *path, int flags) if (IsA(path, CustomPath)) return false; + /* + * If a bitmap scan's tlist is empty, keep it as-is. This may allow the + * executor to skip heap page fetches, and in any case, the benefit of + * using a physical tlist instead would be minimal. + */ + if (IsA(path, BitmapHeapPath) && + path->pathtarget->exprs == NIL) + return false; + /* * Can't do it if any system columns or whole-row Vars are requested. * (This could possibly be fixed but would take some fragile assumptions @@ -1005,6 +1029,8 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path) List *tlist = build_path_tlist(root, &best_path->path); List *subplans = NIL; ListCell *subpaths; + RelOptInfo *rel = best_path->path.parent; + PartitionPruneInfo *partpruneinfo = NULL; /* * The subpaths list could be empty, if every child was proven empty by @@ -1042,6 +1068,38 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path) subplans = lappend(subplans, subplan); } + /* + * If any quals exist, they may be useful to perform further partition + * pruning during execution. Gather information needed by the executor to + * do partition pruning. + */ + if (enable_partition_pruning && + rel->reloptkind == RELOPT_BASEREL && + best_path->partitioned_rels != NIL) + { + List *prunequal; + + prunequal = extract_actual_clauses(rel->baserestrictinfo, false); + + if (best_path->path.param_info) + { + List *prmquals = best_path->path.param_info->ppi_clauses; + + prmquals = extract_actual_clauses(prmquals, false); + prmquals = (List *) replace_nestloop_params(root, + (Node *) prmquals); + + prunequal = list_concat(prunequal, prmquals); + } + + if (prunequal != NIL) + partpruneinfo = + make_partition_pruneinfo(root, rel, + best_path->subpaths, + best_path->partitioned_rels, + prunequal); + } + /* * XXX ideally, if there's just one child, we'd not bother to generate an * Append node but just return the single child. At the moment this does @@ -1049,7 +1107,8 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path) * parent-rel Vars it'll be asked to emit. */ - plan = make_append(subplans, tlist, best_path->partitioned_rels); + plan = make_append(subplans, best_path->first_partial_path, + tlist, partpruneinfo); copy_generic_path_info(&plan->plan, (Path *) best_path); @@ -1072,6 +1131,8 @@ create_merge_append_plan(PlannerInfo *root, MergeAppendPath *best_path) List *pathkeys = best_path->path.pathkeys; List *subplans = NIL; ListCell *subpaths; + RelOptInfo *rel = best_path->path.parent; + PartitionPruneInfo *partpruneinfo = NULL; /* * We don't have the actual creation of the MergeAppend node split out @@ -1157,8 +1218,40 @@ create_merge_append_plan(PlannerInfo *root, MergeAppendPath *best_path) subplans = lappend(subplans, subplan); } - node->partitioned_rels = best_path->partitioned_rels; + /* + * If any quals exist, they may be useful to perform further partition + * pruning during execution. Gather information needed by the executor to + * do partition pruning. + */ + if (enable_partition_pruning && + rel->reloptkind == RELOPT_BASEREL && + best_path->partitioned_rels != NIL) + { + List *prunequal; + + prunequal = extract_actual_clauses(rel->baserestrictinfo, false); + + if (best_path->path.param_info) + { + + List *prmquals = best_path->path.param_info->ppi_clauses; + + prmquals = extract_actual_clauses(prmquals, false); + prmquals = (List *) replace_nestloop_params(root, + (Node *) prmquals); + + prunequal = list_concat(prunequal, prmquals); + } + + if (prunequal != NIL) + partpruneinfo = make_partition_pruneinfo(root, rel, + best_path->subpaths, + best_path->partitioned_rels, + prunequal); + } + node->mergeplans = subplans; + node->part_prune_info = partpruneinfo; return (Plan *) node; } @@ -1471,6 +1564,7 @@ create_gather_plan(PlannerInfo *root, GatherPath *best_path) gather_plan = make_gather(tlist, NIL, best_path->num_workers, + SS_assign_special_param(root), best_path->single_copy, subplan); @@ -1505,6 +1599,9 @@ create_gather_merge_plan(PlannerInfo *root, GatherMergePath *best_path) gm_plan->num_workers = best_path->num_workers; copy_generic_path_info(&gm_plan->plan, &best_path->path); + /* Assign the rescan Param. */ + gm_plan->rescan_param = SS_assign_special_param(root); + /* Gather Merge is pointless with no pathkeys; use Gather instead. */ Assert(pathkeys != NIL); @@ -1545,34 +1642,71 @@ create_gather_merge_plan(PlannerInfo *root, GatherMergePath *best_path) * but sometimes we can just let the subplan do the work. */ static Plan * -create_projection_plan(PlannerInfo *root, ProjectionPath *best_path) +create_projection_plan(PlannerInfo *root, ProjectionPath *best_path, int flags) { Plan *plan; Plan *subplan; List *tlist; + bool needs_result_node = false; - /* Since we intend to project, we don't need to constrain child tlist */ - subplan = create_plan_recurse(root, best_path->subpath, 0); - - tlist = build_path_tlist(root, &best_path->path); + /* + * Convert our subpath to a Plan and determine whether we need a Result + * node. + * + * In most cases where we don't need to project, creation_projection_path + * will have set dummypp, but not always. First, some createplan.c + * routines change the tlists of their nodes. (An example is that + * create_merge_append_plan might add resjunk sort columns to a + * MergeAppend.) Second, create_projection_path has no way of knowing + * what path node will be placed on top of the projection path and + * therefore can't predict whether it will require an exact tlist. For + * both of these reasons, we have to recheck here. + */ + if (use_physical_tlist(root, &best_path->path, flags)) + { + /* + * Our caller doesn't really care what tlist we return, so we don't + * actually need to project. However, we may still need to ensure + * proper sortgroupref labels, if the caller cares about those. + */ + subplan = create_plan_recurse(root, best_path->subpath, 0); + tlist = subplan->targetlist; + if (flags & CP_LABEL_TLIST) + apply_pathtarget_labeling_to_tlist(tlist, + best_path->path.pathtarget); + } + else if (is_projection_capable_path(best_path->subpath)) + { + /* + * Our caller requires that we return the exact tlist, but no separate + * result node is needed because the subpath is projection-capable. + * Tell create_plan_recurse that we're going to ignore the tlist it + * produces. + */ + subplan = create_plan_recurse(root, best_path->subpath, + CP_IGNORE_TLIST); + tlist = build_path_tlist(root, &best_path->path); + } + else + { + /* + * It looks like we need a result node, unless by good fortune the + * requested tlist is exactly the one the child wants to produce. + */ + subplan = create_plan_recurse(root, best_path->subpath, 0); + tlist = build_path_tlist(root, &best_path->path); + needs_result_node = !tlist_same_exprs(tlist, subplan->targetlist); + } /* - * We might not really need a Result node here, either because the subplan - * can project or because it's returning the right list of expressions - * anyway. Usually create_projection_path will have detected that and set - * dummypp if we don't need a Result; but its decision can't be final, - * because some createplan.c routines change the tlists of their nodes. - * (An example is that create_merge_append_plan might add resjunk sort - * columns to a MergeAppend.) So we have to recheck here. If we do - * arrive at a different answer than create_projection_path did, we'll - * have made slightly wrong cost estimates; but label the plan with the - * cost estimates we actually used, not "corrected" ones. (XXX this could - * be cleaned up if we moved more of the sortcolumn setup logic into Path - * creation, but that would add expense to creating Paths we might end up - * not using.) + * If we make a different decision about whether to include a Result node + * than create_projection_path did, we'll have made slightly wrong cost + * estimates; but label the plan with the cost estimates we actually used, + * not "corrected" ones. (XXX this could be cleaned up if we moved more + * of the sortcolumn setup logic into Path creation, but that would add + * expense to creating Paths we might end up not using.) */ - if (is_projection_capable_path(best_path->subpath) || - tlist_same_exprs(tlist, subplan->targetlist)) + if (!needs_result_node) { /* Don't need a separate Result, just assign tlist to subplan */ plan = subplan; @@ -1648,7 +1782,15 @@ create_sort_plan(PlannerInfo *root, SortPath *best_path, int flags) subplan = create_plan_recurse(root, best_path->subpath, flags | CP_SMALL_TLIST); - plan = make_sort_from_pathkeys(subplan, best_path->path.pathkeys); + /* + * make_sort_from_pathkeys() indirectly calls find_ec_member_for_tle(), + * which will ignore any child EC members that don't belong to the given + * relids. Thus, if this sort path is based on a child relation, we must + * pass its relids. + */ + plan = make_sort_from_pathkeys(subplan, best_path->path.pathkeys, + IS_OTHER_REL(best_path->subpath->parent) ? + best_path->path.parent->relids : NULL); copy_generic_path_info(&plan->plan, (Path *) best_path); @@ -1861,7 +2003,7 @@ create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path) * create_modifytable_plan). Fortunately we can't be because there would * never be grouping in an UPDATE/DELETE; but let's Assert that. */ - Assert(!root->hasInheritedTarget); + Assert(root->inhTargetKind == INHKIND_NONE); Assert(root->grouping_map == NULL); root->grouping_map = grouping_map; @@ -2023,7 +2165,7 @@ create_minmaxagg_plan(PlannerInfo *root, MinMaxAggPath *best_path) * create_modifytable_plan). Fortunately we can't be because there would * never be aggregates in an UPDATE/DELETE; but let's Assert that. */ - Assert(!root->hasInheritedTarget); + Assert(root->inhTargetKind == INHKIND_NONE); Assert(root->minmax_aggs == NIL); root->minmax_aggs = best_path->mmaggregates; @@ -2041,19 +2183,17 @@ create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path) { WindowAgg *plan; WindowClause *wc = best_path->winclause; + int numPart = list_length(wc->partitionClause); + int numOrder = list_length(wc->orderClause); Plan *subplan; List *tlist; - int numsortkeys; - AttrNumber *sortColIdx; - Oid *sortOperators; - Oid *collations; - bool *nullsFirst; int partNumCols; AttrNumber *partColIdx; Oid *partOperators; int ordNumCols; AttrNumber *ordColIdx; Oid *ordOperators; + ListCell *lc; /* * WindowAgg can project, so no need to be terribly picky about child @@ -2064,32 +2204,43 @@ create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path) tlist = build_path_tlist(root, &best_path->path); /* - * We shouldn't need to actually sort, but it's convenient to use - * prepare_sort_from_pathkeys to identify the input's sort columns. + * Convert SortGroupClause lists into arrays of attr indexes and equality + * operators, as wanted by executor. (Note: in principle, it's possible + * to drop some of the sort columns, if they were proved redundant by + * pathkey logic. However, it doesn't seem worth going out of our way to + * optimize such cases. In any case, we must *not* remove the ordering + * column for RANGE OFFSET cases, as the executor needs that for in_range + * tests even if it's known to be equal to some partitioning column.) */ - subplan = prepare_sort_from_pathkeys(subplan, - best_path->winpathkeys, - NULL, - NULL, - false, - &numsortkeys, - &sortColIdx, - &sortOperators, - &collations, - &nullsFirst); - - /* Now deconstruct that into partition and ordering portions */ - get_column_info_for_window(root, - wc, - subplan->targetlist, - numsortkeys, - sortColIdx, - &partNumCols, - &partColIdx, - &partOperators, - &ordNumCols, - &ordColIdx, - &ordOperators); + partColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numPart); + partOperators = (Oid *) palloc(sizeof(Oid) * numPart); + + partNumCols = 0; + foreach(lc, wc->partitionClause) + { + SortGroupClause *sgc = (SortGroupClause *) lfirst(lc); + TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist); + + Assert(OidIsValid(sgc->eqop)); + partColIdx[partNumCols] = tle->resno; + partOperators[partNumCols] = sgc->eqop; + partNumCols++; + } + + ordColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numOrder); + ordOperators = (Oid *) palloc(sizeof(Oid) * numOrder); + + ordNumCols = 0; + foreach(lc, wc->orderClause) + { + SortGroupClause *sgc = (SortGroupClause *) lfirst(lc); + TargetEntry *tle = get_sortgroupclause_tle(sgc, subplan->targetlist); + + Assert(OidIsValid(sgc->eqop)); + ordColIdx[ordNumCols] = tle->resno; + ordOperators[ordNumCols] = sgc->eqop; + ordNumCols++; + } /* And finally we can make the WindowAgg node */ plan = make_windowagg(tlist, @@ -2103,6 +2254,11 @@ create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path) wc->frameOptions, wc->startOffset, wc->endOffset, + wc->startInRangeFunc, + wc->endInRangeFunc, + wc->inRangeColl, + wc->inRangeAsc, + wc->inRangeNullsFirst, subplan); copy_generic_path_info(&plan->plan, (Path *) best_path); @@ -2110,112 +2266,6 @@ create_windowagg_plan(PlannerInfo *root, WindowAggPath *best_path) return plan; } -/* - * get_column_info_for_window - * Get the partitioning/ordering column numbers and equality operators - * for a WindowAgg node. - * - * This depends on the behavior of planner.c's make_pathkeys_for_window! - * - * We are given the target WindowClause and an array of the input column - * numbers associated with the resulting pathkeys. In the easy case, there - * are the same number of pathkey columns as partitioning + ordering columns - * and we just have to copy some data around. However, it's possible that - * some of the original partitioning + ordering columns were eliminated as - * redundant during the transformation to pathkeys. (This can happen even - * though the parser gets rid of obvious duplicates. A typical scenario is a - * window specification "PARTITION BY x ORDER BY y" coupled with a clause - * "WHERE x = y" that causes the two sort columns to be recognized as - * redundant.) In that unusual case, we have to work a lot harder to - * determine which keys are significant. - * - * The method used here is a bit brute-force: add the sort columns to a list - * one at a time and note when the resulting pathkey list gets longer. But - * it's a sufficiently uncommon case that a faster way doesn't seem worth - * the amount of code refactoring that'd be needed. - */ -static void -get_column_info_for_window(PlannerInfo *root, WindowClause *wc, List *tlist, - int numSortCols, AttrNumber *sortColIdx, - int *partNumCols, - AttrNumber **partColIdx, - Oid **partOperators, - int *ordNumCols, - AttrNumber **ordColIdx, - Oid **ordOperators) -{ - int numPart = list_length(wc->partitionClause); - int numOrder = list_length(wc->orderClause); - - if (numSortCols == numPart + numOrder) - { - /* easy case */ - *partNumCols = numPart; - *partColIdx = sortColIdx; - *partOperators = extract_grouping_ops(wc->partitionClause); - *ordNumCols = numOrder; - *ordColIdx = sortColIdx + numPart; - *ordOperators = extract_grouping_ops(wc->orderClause); - } - else - { - List *sortclauses; - List *pathkeys; - int scidx; - ListCell *lc; - - /* first, allocate what's certainly enough space for the arrays */ - *partNumCols = 0; - *partColIdx = (AttrNumber *) palloc(numPart * sizeof(AttrNumber)); - *partOperators = (Oid *) palloc(numPart * sizeof(Oid)); - *ordNumCols = 0; - *ordColIdx = (AttrNumber *) palloc(numOrder * sizeof(AttrNumber)); - *ordOperators = (Oid *) palloc(numOrder * sizeof(Oid)); - sortclauses = NIL; - pathkeys = NIL; - scidx = 0; - foreach(lc, wc->partitionClause) - { - SortGroupClause *sgc = (SortGroupClause *) lfirst(lc); - List *new_pathkeys; - - sortclauses = lappend(sortclauses, sgc); - new_pathkeys = make_pathkeys_for_sortclauses(root, - sortclauses, - tlist); - if (list_length(new_pathkeys) > list_length(pathkeys)) - { - /* this sort clause is actually significant */ - (*partColIdx)[*partNumCols] = sortColIdx[scidx++]; - (*partOperators)[*partNumCols] = sgc->eqop; - (*partNumCols)++; - pathkeys = new_pathkeys; - } - } - foreach(lc, wc->orderClause) - { - SortGroupClause *sgc = (SortGroupClause *) lfirst(lc); - List *new_pathkeys; - - sortclauses = lappend(sortclauses, sgc); - new_pathkeys = make_pathkeys_for_sortclauses(root, - sortclauses, - tlist); - if (list_length(new_pathkeys) > list_length(pathkeys)) - { - /* this sort clause is actually significant */ - (*ordColIdx)[*ordNumCols] = sortColIdx[scidx++]; - (*ordOperators)[*ordNumCols] = sgc->eqop; - (*ordNumCols)++; - pathkeys = new_pathkeys; - } - } - /* complain if we didn't eat exactly the right number of sort cols */ - if (scidx != numSortCols) - elog(ERROR, "failed to deconstruct sort operators into partitioning/ordering operators"); - } -} - /* * create_setop_plan * @@ -2356,9 +2406,11 @@ create_modifytable_plan(PlannerInfo *root, ModifyTablePath *best_path) best_path->operation, best_path->canSetTag, best_path->nominalRelation, - best_path->partitioned_rels, + best_path->rootRelation, + best_path->partColsUpdated, best_path->resultRelations, subplans, + best_path->subroots, best_path->withCheckOptionLists, best_path->returningLists, best_path->rowMarks, @@ -3484,7 +3536,7 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path, * upper rel doesn't have relids set, but it covers all the base relations * participating in the underlying scan, so use root's all_baserels. */ - if (IS_UPPER_REL(rel)) + if (rel->reloptkind == RELOPT_UPPER_REL) scan_plan->fs_relids = root->all_baserels; else scan_plan->fs_relids = best_path->path.parent->relids; @@ -3678,6 +3730,7 @@ create_nestloop_plan(PlannerInfo *root, if (IS_OUTER_JOIN(best_path->jointype)) { extract_actual_join_clauses(joinrestrictclauses, + best_path->path.parent->relids, &joinclauses, &otherclauses); } else @@ -3763,10 +3816,14 @@ create_mergejoin_plan(PlannerInfo *root, Oid *mergecollations; int *mergestrategies; bool *mergenullsfirst; + PathKey *opathkey; + EquivalenceClass *opeclass; int i; ListCell *lc; ListCell *lop; ListCell *lip; + Path *outer_path = best_path->jpath.outerjoinpath; + Path *inner_path = best_path->jpath.innerjoinpath; /* * MergeJoin can project, so we don't have to demand exact tlists from the @@ -3789,6 +3846,7 @@ create_mergejoin_plan(PlannerInfo *root, if (IS_OUTER_JOIN(best_path->jpath.jointype)) { extract_actual_join_clauses(joinclauses, + best_path->jpath.path.parent->relids, &joinclauses, &otherclauses); } else @@ -3830,8 +3888,10 @@ create_mergejoin_plan(PlannerInfo *root, */ if (best_path->outersortkeys) { + Relids outer_relids = outer_path->parent->relids; Sort *sort = make_sort_from_pathkeys(outer_plan, - best_path->outersortkeys); + best_path->outersortkeys, + outer_relids); label_sort_with_costsize(root, sort, -1.0); outer_plan = (Plan *) sort; @@ -3842,8 +3902,10 @@ create_mergejoin_plan(PlannerInfo *root, if (best_path->innersortkeys) { + Relids inner_relids = inner_path->parent->relids; Sort *sort = make_sort_from_pathkeys(inner_plan, - best_path->innersortkeys); + best_path->innersortkeys, + inner_relids); label_sort_with_costsize(root, sort, -1.0); inner_plan = (Plan *) sort; @@ -3875,7 +3937,8 @@ create_mergejoin_plan(PlannerInfo *root, * Compute the opfamily/collation/strategy/nullsfirst arrays needed by the * executor. The information is in the pathkeys for the two inputs, but * we need to be careful about the possibility of mergeclauses sharing a - * pathkey (compare find_mergeclauses_for_pathkeys()). + * pathkey, as well as the possibility that the inner pathkeys are not in + * an order matching the mergeclauses. */ nClauses = list_length(mergeclauses); Assert(nClauses == list_length(best_path->path_mergeclauses)); @@ -3884,6 +3947,8 @@ create_mergejoin_plan(PlannerInfo *root, mergestrategies = (int *) palloc(nClauses * sizeof(int)); mergenullsfirst = (bool *) palloc(nClauses * sizeof(bool)); + opathkey = NULL; + opeclass = NULL; lop = list_head(outerpathkeys); lip = list_head(innerpathkeys); i = 0; @@ -3892,11 +3957,9 @@ create_mergejoin_plan(PlannerInfo *root, RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc); EquivalenceClass *oeclass; EquivalenceClass *ieclass; - PathKey *opathkey; - PathKey *ipathkey; - EquivalenceClass *opeclass; - EquivalenceClass *ipeclass; - ListCell *l2; + PathKey *ipathkey = NULL; + EquivalenceClass *ipeclass = NULL; + bool first_inner_match = false; /* fetch outer/inner eclass from mergeclause */ if (rinfo->outer_is_left) @@ -3913,104 +3976,96 @@ create_mergejoin_plan(PlannerInfo *root, Assert(ieclass != NULL); /* - * For debugging purposes, we check that the eclasses match the paths' - * pathkeys. In typical cases the merge clauses are one-to-one with - * the pathkeys, but when dealing with partially redundant query - * conditions, we might have clauses that re-reference earlier path - * keys. The case that we need to reject is where a pathkey is - * entirely skipped over. + * We must identify the pathkey elements associated with this clause + * by matching the eclasses (which should give a unique match, since + * the pathkey lists should be canonical). In typical cases the merge + * clauses are one-to-one with the pathkeys, but when dealing with + * partially redundant query conditions, things are more complicated. + * + * lop and lip reference the first as-yet-unmatched pathkey elements. + * If they're NULL then all pathkey elements have been matched. * - * lop and lip reference the first as-yet-unused pathkey elements; - * it's okay to match them, or any element before them. If they're - * NULL then we have found all pathkey elements to be used. + * The ordering of the outer pathkeys should match the mergeclauses, + * by construction (see find_mergeclauses_for_outer_pathkeys()). There + * could be more than one mergeclause for the same outer pathkey, but + * no pathkey may be entirely skipped over. */ - if (lop) + if (oeclass != opeclass) /* multiple matches are not interesting */ { + /* doesn't match the current opathkey, so must match the next */ + if (lop == NULL) + elog(ERROR, "outer pathkeys do not match mergeclauses"); opathkey = (PathKey *) lfirst(lop); opeclass = opathkey->pk_eclass; - if (oeclass == opeclass) - { - /* fast path for typical case */ - lop = lnext(lop); - } - else - { - /* redundant clauses ... must match something before lop */ - foreach(l2, outerpathkeys) - { - if (l2 == lop) - break; - opathkey = (PathKey *) lfirst(l2); - opeclass = opathkey->pk_eclass; - if (oeclass == opeclass) - break; - } - if (oeclass != opeclass) - elog(ERROR, "outer pathkeys do not match mergeclauses"); - } - } - else - { - /* redundant clauses ... must match some already-used pathkey */ - opathkey = NULL; - opeclass = NULL; - foreach(l2, outerpathkeys) - { - opathkey = (PathKey *) lfirst(l2); - opeclass = opathkey->pk_eclass; - if (oeclass == opeclass) - break; - } - if (l2 == NULL) + lop = lnext(lop); + if (oeclass != opeclass) elog(ERROR, "outer pathkeys do not match mergeclauses"); } + /* + * The inner pathkeys likewise should not have skipped-over keys, but + * it's possible for a mergeclause to reference some earlier inner + * pathkey if we had redundant pathkeys. For example we might have + * mergeclauses like "o.a = i.x AND o.b = i.y AND o.c = i.x". The + * implied inner ordering is then "ORDER BY x, y, x", but the pathkey + * mechanism drops the second sort by x as redundant, and this code + * must cope. + * + * It's also possible for the implied inner-rel ordering to be like + * "ORDER BY x, y, x DESC". We still drop the second instance of x as + * redundant; but this means that the sort ordering of a redundant + * inner pathkey should not be considered significant. So we must + * detect whether this is the first clause matching an inner pathkey. + */ if (lip) { ipathkey = (PathKey *) lfirst(lip); ipeclass = ipathkey->pk_eclass; if (ieclass == ipeclass) { - /* fast path for typical case */ + /* successful first match to this inner pathkey */ lip = lnext(lip); - } - else - { - /* redundant clauses ... must match something before lip */ - foreach(l2, innerpathkeys) - { - if (l2 == lip) - break; - ipathkey = (PathKey *) lfirst(l2); - ipeclass = ipathkey->pk_eclass; - if (ieclass == ipeclass) - break; - } - if (ieclass != ipeclass) - elog(ERROR, "inner pathkeys do not match mergeclauses"); + first_inner_match = true; } } - else + if (!first_inner_match) { - /* redundant clauses ... must match some already-used pathkey */ - ipathkey = NULL; - ipeclass = NULL; + /* redundant clause ... must match something before lip */ + ListCell *l2; + foreach(l2, innerpathkeys) { + if (l2 == lip) + break; ipathkey = (PathKey *) lfirst(l2); ipeclass = ipathkey->pk_eclass; if (ieclass == ipeclass) break; } - if (l2 == NULL) + if (ieclass != ipeclass) elog(ERROR, "inner pathkeys do not match mergeclauses"); } - /* pathkeys should match each other too (more debugging) */ + /* + * The pathkeys should always match each other as to opfamily and + * collation (which affect equality), but if we're considering a + * redundant inner pathkey, its sort ordering might not match. In + * such cases we may ignore the inner pathkey's sort ordering and use + * the outer's. (In effect, we're lying to the executor about the + * sort direction of this inner column, but it does not matter since + * the run-time row comparisons would only reach this column when + * there's equality for the earlier column containing the same eclass. + * There could be only one value in this column for the range of inner + * rows having a given value in the earlier column, so it does not + * matter which way we imagine this column to be ordered.) But a + * non-redundant inner pathkey had better match outer's ordering too. + */ if (opathkey->pk_opfamily != ipathkey->pk_opfamily || - opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation || - opathkey->pk_strategy != ipathkey->pk_strategy || - opathkey->pk_nulls_first != ipathkey->pk_nulls_first) + opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation) + elog(ERROR, "left and right pathkeys do not match in mergejoin"); + if (first_inner_match && + (opathkey->pk_strategy != ipathkey->pk_strategy || + opathkey->pk_nulls_first != ipathkey->pk_nulls_first)) elog(ERROR, "left and right pathkeys do not match in mergejoin"); /* OK, save info for executor */ @@ -4088,6 +4143,7 @@ create_hashjoin_plan(PlannerInfo *root, if (IS_OUTER_JOIN(best_path->jpath.jointype)) { extract_actual_join_clauses(joinclauses, + best_path->jpath.path.parent->relids, &joinclauses, &otherclauses); } else @@ -4170,6 +4226,17 @@ create_hashjoin_plan(PlannerInfo *root, copy_plan_costsize(&hash_plan->plan, inner_plan); hash_plan->plan.startup_cost = hash_plan->plan.total_cost; + /* + * If parallel-aware, the executor will also need an estimate of the total + * number of rows expected from all participants so that it can size the + * shared hash table. + */ + if (best_path->jpath.path.parallel_aware) + { + hash_plan->plan.parallel_aware = true; + hash_plan->rows_total = best_path->inner_rows_total; + } + join_plan = make_hashjoin(tlist, joinclauses, otherclauses, @@ -4922,7 +4989,11 @@ bitmap_subplan_mark_shared(Plan *plan) bitmap_subplan_mark_shared( linitial(((BitmapAnd *) plan)->bitmapplans)); else if (IsA(plan, BitmapOr)) + { ((BitmapOr *) plan)->isshared = true; + bitmap_subplan_mark_shared( + linitial(((BitmapOr *) plan)->bitmapplans)); + } else if (IsA(plan, BitmapIndexScan)) ((BitmapIndexScan *) plan)->isshared = true; else @@ -5270,7 +5341,8 @@ make_foreignscan(List *qptlist, } static Append * -make_append(List *appendplans, List *tlist, List *partitioned_rels) +make_append(List *appendplans, int first_partial_plan, + List *tlist, PartitionPruneInfo *partpruneinfo) { Append *node = makeNode(Append); Plan *plan = &node->plan; @@ -5279,9 +5351,9 @@ make_append(List *appendplans, List *tlist, List *partitioned_rels) plan->qual = NIL; plan->lefttree = NULL; plan->righttree = NULL; - node->partitioned_rels = partitioned_rels; node->appendplans = appendplans; - + node->first_partial_plan = first_partial_plan; + node->part_prune_info = partpruneinfo; return node; } @@ -5513,7 +5585,7 @@ make_sort(Plan *lefttree, int numCols, * 'pathkeys' is the list of pathkeys by which the result is to be sorted * 'relids' identifies the child relation being sorted, if any * 'reqColIdx' is NULL or an array of required sort key column numbers - * 'adjust_tlist_in_place' is TRUE if lefttree must be modified in-place + * 'adjust_tlist_in_place' is true if lefttree must be modified in-place * * We must convert the pathkey information into arrays of sort key column * numbers, sort operator OIDs, collation OIDs, and nulls-first flags, @@ -5521,8 +5593,9 @@ make_sort(Plan *lefttree, int numCols, * the output parameters *p_numsortkeys etc. * * When looking for matches to an EquivalenceClass's members, we will only - * consider child EC members if they match 'relids'. This protects against - * possible incorrect matches to child expressions that contain no Vars. + * consider child EC members if they belong to given 'relids'. This protects + * against possible incorrect matches to child expressions that contain no + * Vars. * * If reqColIdx isn't NULL then it contains sort key column numbers that * we should match. This is used when making child plans for a MergeAppend; @@ -5533,7 +5606,7 @@ make_sort(Plan *lefttree, int numCols, * compute these expressions, since a Sort or MergeAppend node itself won't * do any such calculations. If the input plan type isn't one that can do * projections, this means adding a Result node just to do the projection. - * However, the caller can pass adjust_tlist_in_place = TRUE to force the + * However, the caller can pass adjust_tlist_in_place = true to force the * lefttree tlist to be modified in-place regardless of whether the node type * can project --- we use this for fixing the tlist of MergeAppend itself. * @@ -5677,11 +5750,11 @@ prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys, continue; /* - * Ignore child members unless they match the rel being + * Ignore child members unless they belong to the rel being * sorted. */ if (em->em_is_child && - !bms_equal(em->em_relids, relids)) + !bms_is_subset(em->em_relids, relids)) continue; sortexpr = em->em_expr; @@ -5765,7 +5838,7 @@ prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys, * find_ec_member_for_tle * Locate an EquivalenceClass member matching the given TLE, if any * - * Child EC members are ignored unless they match 'relids'. + * Child EC members are ignored unless they belong to given 'relids'. */ static EquivalenceMember * find_ec_member_for_tle(EquivalenceClass *ec, @@ -5793,10 +5866,10 @@ find_ec_member_for_tle(EquivalenceClass *ec, continue; /* - * Ignore child members unless they match the rel being sorted. + * Ignore child members unless they belong to the rel being sorted. */ if (em->em_is_child && - !bms_equal(em->em_relids, relids)) + !bms_is_subset(em->em_relids, relids)) continue; /* Match if same expression (after stripping relabel) */ @@ -5817,9 +5890,10 @@ find_ec_member_for_tle(EquivalenceClass *ec, * * 'lefttree' is the node which yields input tuples * 'pathkeys' is the list of pathkeys by which the result is to be sorted + * 'relids' is the set of relations required by prepare_sort_from_pathkeys() */ static Sort * -make_sort_from_pathkeys(Plan *lefttree, List *pathkeys) +make_sort_from_pathkeys(Plan *lefttree, List *pathkeys, Relids relids) { int numsortkeys; AttrNumber *sortColIdx; @@ -5829,7 +5903,7 @@ make_sort_from_pathkeys(Plan *lefttree, List *pathkeys) /* Compute sort column info, and adjust lefttree as needed */ lefttree = prepare_sort_from_pathkeys(lefttree, pathkeys, - NULL, + relids, NULL, false, &numsortkeys, @@ -6034,6 +6108,8 @@ make_windowagg(List *tlist, Index winref, int partNumCols, AttrNumber *partColIdx, Oid *partOperators, int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, int frameOptions, Node *startOffset, Node *endOffset, + Oid startInRangeFunc, Oid endInRangeFunc, + Oid inRangeColl, bool inRangeAsc, bool inRangeNullsFirst, Plan *lefttree) { WindowAgg *node = makeNode(WindowAgg); @@ -6049,6 +6125,11 @@ make_windowagg(List *tlist, Index winref, node->frameOptions = frameOptions; node->startOffset = startOffset; node->endOffset = endOffset; + node->startInRangeFunc = startInRangeFunc; + node->endInRangeFunc = endInRangeFunc; + node->inRangeColl = inRangeColl; + node->inRangeAsc = inRangeAsc; + node->inRangeNullsFirst = inRangeNullsFirst; plan->targetlist = tlist; plan->lefttree = lefttree; @@ -6238,6 +6319,7 @@ static Gather * make_gather(List *qptlist, List *qpqual, int nworkers, + int rescan_param, bool single_copy, Plan *subplan) { @@ -6249,8 +6331,10 @@ make_gather(List *qptlist, plan->lefttree = subplan; plan->righttree = NULL; node->num_workers = nworkers; + node->rescan_param = rescan_param; node->single_copy = single_copy; node->invisible = false; + node->initParam = NULL; return node; } @@ -6282,7 +6366,6 @@ make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan *lefttree, * convert SortGroupClause list into arrays of attr indexes and equality * operators, as wanted by executor */ - Assert(numCols > 0); dupColIdx = (AttrNumber *) palloc(sizeof(AttrNumber) * numCols); dupOperators = (Oid *) palloc(sizeof(Oid) * numCols); @@ -6398,8 +6481,9 @@ make_project_set(List *tlist, static ModifyTable * make_modifytable(PlannerInfo *root, CmdType operation, bool canSetTag, - Index nominalRelation, List *partitioned_rels, - List *resultRelations, List *subplans, + Index nominalRelation, Index rootRelation, + bool partColsUpdated, + List *resultRelations, List *subplans, List *subroots, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, int epqParam) { @@ -6407,9 +6491,11 @@ make_modifytable(PlannerInfo *root, List *fdw_private_list; Bitmapset *direct_modify_plans; ListCell *lc; + ListCell *lc2; int i; Assert(list_length(resultRelations) == list_length(subplans)); + Assert(list_length(resultRelations) == list_length(subroots)); Assert(withCheckOptionLists == NIL || list_length(resultRelations) == list_length(withCheckOptionLists)); Assert(returningLists == NIL || @@ -6424,7 +6510,8 @@ make_modifytable(PlannerInfo *root, node->operation = operation; node->canSetTag = canSetTag; node->nominalRelation = nominalRelation; - node->partitioned_rels = partitioned_rels; + node->rootRelation = rootRelation; + node->partColsUpdated = partColsUpdated; node->resultRelations = resultRelations; node->resultRelIndex = -1; /* will be set correctly in setrefs.c */ node->rootResultRelIndex = -1; /* will be set correctly in setrefs.c */ @@ -6467,9 +6554,10 @@ make_modifytable(PlannerInfo *root, fdw_private_list = NIL; direct_modify_plans = NULL; i = 0; - foreach(lc, resultRelations) + forboth(lc, resultRelations, lc2, subroots) { Index rti = lfirst_int(lc); + PlannerInfo *subroot = lfirst_node(PlannerInfo, lc2); FdwRoutine *fdwroutine; List *fdw_private; bool direct_modify; @@ -6481,16 +6569,16 @@ make_modifytable(PlannerInfo *root, * so it's not a baserel; and there are also corner cases for * updatable views where the target rel isn't a baserel.) */ - if (rti < root->simple_rel_array_size && - root->simple_rel_array[rti] != NULL) + if (rti < subroot->simple_rel_array_size && + subroot->simple_rel_array[rti] != NULL) { - RelOptInfo *resultRel = root->simple_rel_array[rti]; + RelOptInfo *resultRel = subroot->simple_rel_array[rti]; fdwroutine = resultRel->fdwroutine; } else { - RangeTblEntry *rte = planner_rt_fetch(rti, root); + RangeTblEntry *rte = planner_rt_fetch(rti, subroot); Assert(rte->rtekind == RTE_RELATION); if (rte->relkind == RELKIND_FOREIGN_TABLE) @@ -6512,15 +6600,15 @@ make_modifytable(PlannerInfo *root, fdwroutine->IterateDirectModify != NULL && fdwroutine->EndDirectModify != NULL && withCheckOptionLists == NIL && - !has_row_triggers(root, rti, operation)) - direct_modify = fdwroutine->PlanDirectModify(root, node, rti, i); + !has_row_triggers(subroot, rti, operation)) + direct_modify = fdwroutine->PlanDirectModify(subroot, node, rti, i); if (direct_modify) direct_modify_plans = bms_add_member(direct_modify_plans, i); if (!direct_modify && fdwroutine != NULL && fdwroutine->PlanForeignModify != NULL) - fdw_private = fdwroutine->PlanForeignModify(root, node, rti, i); + fdw_private = fdwroutine->PlanForeignModify(subroot, node, rti, i); else fdw_private = NIL; fdw_private_list = lappend(fdw_private_list, fdw_private); diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c index 987c20ac9f..01335db511 100644 --- a/src/backend/optimizer/plan/initsplan.c +++ b/src/backend/optimizer/plan/initsplan.c @@ -3,7 +3,7 @@ * initsplan.c * Target list, qualification, joininfo initialization routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -15,6 +15,7 @@ #include "postgres.h" #include "catalog/pg_type.h" +#include "catalog/pg_class.h" #include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" #include "optimizer/cost.h" @@ -629,11 +630,31 @@ create_lateral_join_info(PlannerInfo *root) for (rti = 1; rti < root->simple_rel_array_size; rti++) { RelOptInfo *brel = root->simple_rel_array[rti]; + RangeTblEntry *brte = root->simple_rte_array[rti]; - if (brel == NULL || brel->reloptkind != RELOPT_BASEREL) + /* + * Skip empty slots. Also skip non-simple relations i.e. dead + * relations. + */ + if (brel == NULL || !IS_SIMPLE_REL(brel)) continue; - if (root->simple_rte_array[rti]->inh) + /* + * In the case of table inheritance, the parent RTE is directly linked + * to every child table via an AppendRelInfo. In the case of table + * partitioning, the inheritance hierarchy is expanded one level at a + * time rather than flattened. Therefore, an other member rel that is + * a partitioned table may have children of its own, and must + * therefore be marked with the appropriate lateral info so that those + * children eventually get marked also. + */ + Assert(brte); + if (brel->reloptkind == RELOPT_OTHER_MEMBER_REL && + (brte->rtekind != RTE_RELATION || + brte->relkind != RELKIND_PARTITIONED_TABLE)) + continue; + + if (brte->inh) { foreach(lc, root->append_rel_list) { @@ -719,7 +740,7 @@ deconstruct_jointree(PlannerInfo *root) * * Inputs: * jtnode is the jointree node to examine - * below_outer_join is TRUE if this node is within the nullable side of a + * below_outer_join is true if this node is within the nullable side of a * higher-level outer join * Outputs: * *qualscope gets the set of base Relids syntactically included in this @@ -1588,8 +1609,8 @@ compute_semijoin_info(SpecialJoinInfo *sjinfo, List *clause) * as belonging to a higher join level, just add it to postponed_qual_list. * * 'clause': the qual clause to be distributed - * 'is_deduced': TRUE if the qual came from implied-equality deduction - * 'below_outer_join': TRUE if the qual is from a JOIN/ON that is below the + * 'is_deduced': true if the qual came from implied-equality deduction + * 'below_outer_join': true if the qual is from a JOIN/ON that is below the * nullable side of a higher-level outer join * 'jointype': type of join the qual is from (JOIN_INNER for a WHERE clause) * 'security_level': security_level to assign to the qual @@ -1600,7 +1621,7 @@ compute_semijoin_info(SpecialJoinInfo *sjinfo, List *clause) * baserels appearing on the outer (nonnullable) side of the join * (for FULL JOIN this includes both sides of the join, and must in fact * equal qualscope) - * 'deduced_nullable_relids': if is_deduced is TRUE, the nullable relids to + * 'deduced_nullable_relids': if is_deduced is true, the nullable relids to * impute to the clause; otherwise NULL * 'postponed_qual_list': list of PostponedQual structs, which we can add * this qual to if it turns out to belong to a higher join level. @@ -1610,9 +1631,9 @@ compute_semijoin_info(SpecialJoinInfo *sjinfo, List *clause) * 'ojscope' is needed if we decide to force the qual up to the outer-join * level, which will be ojscope not necessarily qualscope. * - * In normal use (when is_deduced is FALSE), at the time this is called, + * In normal use (when is_deduced is false), at the time this is called, * root->join_info_list must contain entries for all and only those special - * joins that are syntactically below this qual. But when is_deduced is TRUE, + * joins that are syntactically below this qual. But when is_deduced is true, * we are adding new deduced clauses after completion of deconstruct_jointree, * so it cannot be assumed that root->join_info_list has anything to do with * qual placement. @@ -1754,6 +1775,11 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * attach quals to the lowest level where they can be evaluated. But * if we were ever to re-introduce a mechanism for delaying evaluation * of "expensive" quals, this area would need work. + * + * Note: generally, use of is_pushed_down has to go through the macro + * RINFO_IS_PUSHED_DOWN, because that flag alone is not always sufficient + * to tell whether a clause must be treated as pushed-down in context. + * This seems like another reason why it should perhaps be rethought. *---------- */ if (is_deduced) @@ -1943,10 +1969,11 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, if (maybe_equivalence) { if (check_equivalence_delay(root, restrictinfo) && - process_equivalence(root, restrictinfo, below_outer_join)) + process_equivalence(root, &restrictinfo, below_outer_join)) return; /* EC rejected it, so set left_ec/right_ec the hard way ... */ - initialize_mergeclause_eclasses(root, restrictinfo); + if (restrictinfo->mergeopfamilies) /* EC might have changed this */ + initialize_mergeclause_eclasses(root, restrictinfo); /* ... and fall through to distribute_restrictinfo_to_rels */ } else if (maybe_outer_join && restrictinfo->can_join) @@ -2001,8 +2028,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * may force extra delay of higher-level outer joins. * * If the qual must be delayed, add relids to *relids_p to reflect the lowest - * safe level for evaluating the qual, and return TRUE. Any extra delay for - * higher-level joins is reflected by setting delay_upper_joins to TRUE in + * safe level for evaluating the qual, and return true. Any extra delay for + * higher-level joins is reflected by setting delay_upper_joins to true in * SpecialJoinInfo structs. We also compute nullable_relids, the set of * referenced relids that are nullable by lower outer joins (note that this * can be nonempty even for a non-delayed qual). @@ -2034,7 +2061,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * Lastly, a pushed-down qual that references the nullable side of any current * join_info_list member and has to be evaluated above that OJ (because its * required relids overlap the LHS too) causes that OJ's delay_upper_joins - * flag to be set TRUE. This will prevent any higher-level OJs from + * flag to be set true. This will prevent any higher-level OJs from * being interchanged with that OJ, which would result in not having any * correct place to evaluate the qual. (The case we care about here is a * sub-select WHERE clause within the RHS of some outer join. The WHERE @@ -2118,7 +2145,7 @@ check_outerjoin_delay(PlannerInfo *root, /* * check_equivalence_delay * Detect whether a potential equivalence clause is rendered unsafe - * by outer-join-delay considerations. Return TRUE if it's safe. + * by outer-join-delay considerations. Return true if it's safe. * * The initial tests in distribute_qual_to_rels will consider a mergejoinable * clause to be a potential equivalence clause if it is not outerjoin_delayed. diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c index bba8a1ff58..95cbffbd69 100644 --- a/src/backend/optimizer/plan/planagg.c +++ b/src/backend/optimizer/plan/planagg.c @@ -17,7 +17,7 @@ * scan all the rows anyway. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -232,9 +232,9 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist) * that each one is a MIN/MAX aggregate. If so, build a list of the * distinct aggregate calls in the tree. * - * Returns TRUE if a non-MIN/MAX aggregate is found, FALSE otherwise. + * Returns true if a non-MIN/MAX aggregate is found, false otherwise. * (This seemingly-backward definition is used because expression_tree_walker - * aborts the scan on TRUE return, which is what we want.) + * aborts the scan on true return, which is what we want.) * * Found aggregates are added to the list at *context; it's up to the caller * to initialize the list to NIL. @@ -335,8 +335,8 @@ find_minmax_aggs_walker(Node *node, List **context) * Given a MIN/MAX aggregate, try to build an indexscan Path it can be * optimized with. * - * If successful, stash the best path in *mminfo and return TRUE. - * Otherwise, return FALSE. + * If successful, stash the best path in *mminfo and return true. + * Otherwise, return false. */ static bool build_minmax_path(PlannerInfo *root, MinMaxAggInfo *mminfo, diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c index f4e0a6ea3d..9b6cc9e10f 100644 --- a/src/backend/optimizer/plan/planmain.c +++ b/src/backend/optimizer/plan/planmain.c @@ -9,7 +9,7 @@ * shorn of features like subselects, inheritance, aggregates, grouping, * and so on. (Those are the things planner.c deals with.) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -57,8 +57,6 @@ query_planner(PlannerInfo *root, List *tlist, Query *parse = root->parse; List *joinlist; RelOptInfo *final_rel; - Index rti; - double total_pages; /* * If the query has an empty join tree, then it's something easy like @@ -124,6 +122,12 @@ query_planner(PlannerInfo *root, List *tlist, */ setup_simple_rel_arrays(root); + /* + * Populate append_rel_array with each AppendRelInfo to allow direct + * lookups by child relid. + */ + setup_append_rel_array(root); + /* * Construct RelOptInfo nodes for all base relations in query, and * indirectly for all appendrel member relations ("other rels"). This @@ -225,34 +229,6 @@ query_planner(PlannerInfo *root, List *tlist, */ extract_restriction_or_clauses(root); - /* - * We should now have size estimates for every actual table involved in - * the query, and we also know which if any have been deleted from the - * query by join removal; so we can compute total_table_pages. - * - * Note that appendrels are not double-counted here, even though we don't - * bother to distinguish RelOptInfos for appendrel parents, because the - * parents will still have size zero. - * - * XXX if a table is self-joined, we will count it once per appearance, - * which perhaps is the wrong thing ... but that's not completely clear, - * and detecting self-joins here is difficult, so ignore it for now. - */ - total_pages = 0; - for (rti = 1; rti < root->simple_rel_array_size; rti++) - { - RelOptInfo *brel = root->simple_rel_array[rti]; - - if (brel == NULL) - continue; - - Assert(brel->relid == rti); /* sanity check on array */ - - if (IS_SIMPLE_REL(brel)) - total_pages += (double) brel->pages; - } - root->total_table_pages = total_pages; - /* * Ready to do the primary planning. */ diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 407df9ae79..c729a99f8b 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -3,7 +3,7 @@ * planner.c * The query optimizer external interface. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -22,13 +22,14 @@ #include "access/parallel.h" #include "access/sysattr.h" #include "access/xact.h" -#include "catalog/pg_constraint_fn.h" +#include "catalog/pg_constraint.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "executor/executor.h" #include "executor/nodeAgg.h" #include "foreign/fdwapi.h" #include "miscadmin.h" +#include "jit/jit.h" #include "lib/bipartite_match.h" #include "lib/knapsack.h" #include "nodes/makefuncs.h" @@ -61,6 +62,7 @@ /* GUC parameters */ double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION; int force_parallel_mode = FORCE_PARALLEL_OFF; +bool parallel_leader_participation = true; /* Hook for plugins to get control in planner() */ planner_hook_type planner_hook = NULL; @@ -108,6 +110,17 @@ typedef struct int *tleref_to_colnum_map; } grouping_sets_data; +/* + * Temporary structure for use during WindowClause reordering in order to be + * be able to sort WindowClauses on partitioning/ordering prefix. + */ +typedef struct +{ + WindowClause *wc; + List *uniqueOrder; /* A List of unique ordering/partitioning + * clauses per Window */ +} WindowClauseSortData; + /* Local functions */ static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind); static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode); @@ -121,7 +134,6 @@ static void preprocess_rowmarks(PlannerInfo *root); static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est); -static bool limit_needed(Query *parse); static void remove_useless_groupby_columns(PlannerInfo *root); static List *preprocess_groupclause(PlannerInfo *root, List *force); static List *extract_rollup_sets(List *groupingSets); @@ -129,21 +141,36 @@ static List *reorder_grouping_sets(List *groupingSets, List *sortclause); static void standard_qp_callback(PlannerInfo *root, void *extra); static double get_number_of_groups(PlannerInfo *root, double path_rows, - grouping_sets_data *gd); + grouping_sets_data *gd, + List *target_list); static Size estimate_hashagg_tablesize(Path *path, const AggClauseCosts *agg_costs, double dNumGroups); static RelOptInfo *create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, + bool target_parallel_safe, const AggClauseCosts *agg_costs, grouping_sets_data *gd); +static bool is_degenerate_grouping(PlannerInfo *root); +static void create_degenerate_grouping_paths(PlannerInfo *root, + RelOptInfo *input_rel, + RelOptInfo *grouped_rel); +static RelOptInfo *make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, + PathTarget *target, bool target_parallel_safe, + Node *havingQual); +static void create_ordinary_grouping_paths(PlannerInfo *root, + RelOptInfo *input_rel, + RelOptInfo *grouped_rel, + const AggClauseCosts *agg_costs, + grouping_sets_data *gd, + GroupPathExtraData *extra, + RelOptInfo **partially_grouped_rel_p); static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, - PathTarget *target, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups); @@ -151,6 +178,7 @@ static RelOptInfo *create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, + bool output_target_parallel_safe, List *tlist, WindowFuncLists *wflists, List *activeWindows); @@ -167,11 +195,13 @@ static RelOptInfo *create_distinct_paths(PlannerInfo *root, static RelOptInfo *create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, + bool target_parallel_safe, double limit_tuples); static PathTarget *make_group_input_target(PlannerInfo *root, PathTarget *final_target); static PathTarget *make_partial_grouping_target(PlannerInfo *root, - PathTarget *grouping_target); + PathTarget *grouping_target, + Node *havingQual); static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist); static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists); static PathTarget *make_window_input_target(PlannerInfo *root, @@ -184,6 +214,40 @@ static PathTarget *make_sort_input_target(PlannerInfo *root, bool *have_postponed_srfs); static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs); +static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, + RelOptInfo *grouped_rel, + RelOptInfo *partially_grouped_rel, + const AggClauseCosts *agg_costs, + grouping_sets_data *gd, + double dNumGroups, + GroupPathExtraData *extra); +static RelOptInfo *create_partial_grouping_paths(PlannerInfo *root, + RelOptInfo *grouped_rel, + RelOptInfo *input_rel, + grouping_sets_data *gd, + GroupPathExtraData *extra, + bool force_rel_creation); +static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel); +static bool can_partial_agg(PlannerInfo *root, + const AggClauseCosts *agg_costs); +static void apply_scanjoin_target_to_paths(PlannerInfo *root, + RelOptInfo *rel, + List *scanjoin_targets, + List *scanjoin_targets_contain_srfs, + bool scanjoin_target_parallel_safe, + bool tlist_same_exprs); +static void create_partitionwise_grouping_paths(PlannerInfo *root, + RelOptInfo *input_rel, + RelOptInfo *grouped_rel, + RelOptInfo *partially_grouped_rel, + const AggClauseCosts *agg_costs, + grouping_sets_data *gd, + PartitionwiseAggregateType patype, + GroupPathExtraData *extra); +static bool group_by_has_partkey(RelOptInfo *input_rel, + List *targetList, + List *groupClause); +static int common_prefix_cmp(const void *a, const void *b); /***************************************************************************** @@ -239,11 +303,10 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) glob->finalrtable = NIL; glob->finalrowmarks = NIL; glob->resultRelations = NIL; - glob->nonleafResultRelations = NIL; glob->rootResultRelations = NIL; glob->relationOids = NIL; glob->invalItems = NIL; - glob->nParamExec = 0; + glob->paramExecTypes = NIL; glob->lastPHId = 0; glob->lastRowMarkId = 0; glob->lastPlanNodeId = 0; @@ -257,6 +320,16 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) * to values that don't permit parallelism, or if parallel-unsafe * functions are present in the query tree. * + * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE + * MATERIALIZED VIEW to use parallel plans, but this is safe only because + * the command is writing into a completely new table which workers won't + * be able to see. If the workers could see the table, the fact that + * group locking would cause them to ignore the leader's heavyweight + * relation extension lock and GIN page locks would make this unsafe. + * We'll have to fix that somehow if we want to allow parallel inserts in + * general; updates and deletes have additional problems especially around + * combo CIDs.) + * * For now, we don't try to use parallel mode if we're running inside a * parallel worker. We might eventually be able to relax this * restriction, but for now it seems best not to have parallel workers @@ -272,7 +345,6 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) */ if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 && IsUnderPostmaster && - dynamic_shared_memory_type != DSM_IMPL_NONE && parse->commandType == CMD_SELECT && !parse->hasModifyingCTE && max_parallel_workers_per_gather > 0 && @@ -291,13 +363,21 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) } /* - * glob->parallelModeNeeded should tell us whether it's necessary to - * impose the parallel mode restrictions, but we don't actually want to - * impose them unless we choose a parallel plan, so it is normally set - * only if a parallel plan is chosen (see create_gather_plan). That way, - * people who mislabel their functions but don't use parallelism anyway - * aren't harmed. But when force_parallel_mode is set, we enable the - * restrictions whenever possible for testing purposes. + * glob->parallelModeNeeded is normally set to false here and changed to + * true during plan creation if a Gather or Gather Merge plan is actually + * created (cf. create_gather_plan, create_gather_merge_plan). + * + * However, if force_parallel_mode = on or force_parallel_mode = regress, + * then we impose parallel mode whenever it's safe to do so, even if the + * final plan doesn't use parallelism. It's not safe to do so if the + * query contains anything parallel-unsafe; parallelModeOK will be false + * in that case. Note that parallelModeOK can't change after this point. + * Otherwise, everything in the query is either parallel-safe or + * parallel-restricted, and in either case it should be OK to impose + * parallel-mode restrictions. If that ends up breaking something, then + * either some function the user included in the query is incorrectly + * labelled as parallel-safe or parallel-restricted when in reality it's + * parallel-unsafe, or else the query planner itself has a bug. */ glob->parallelModeNeeded = glob->parallelModeOK && (force_parallel_mode != FORCE_PARALLEL_OFF); @@ -358,6 +438,14 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) { Gather *gather = makeNode(Gather); + /* + * If there are any initPlans attached to the formerly-top plan node, + * move them up to the Gather node; same as we do for Material node in + * materialize_finished_plan. + */ + gather->plan.initPlan = top_plan->initPlan; + top_plan->initPlan = NIL; + gather->plan.targetlist = top_plan->targetlist; gather->plan.qual = NIL; gather->plan.lefttree = top_plan; @@ -366,6 +454,12 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) gather->single_copy = true; gather->invisible = (force_parallel_mode == FORCE_PARALLEL_REGRESS); + /* + * Since this Gather has no parallel-aware descendants to signal to, + * we don't need a rescan Param. + */ + gather->rescan_param = -1; + /* * Ideally we'd use cost_gather here, but setting up dummy path data * to satisfy it doesn't seem much cleaner than knowing what it does. @@ -391,13 +485,13 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) * set_plan_references' tree traversal, but for now it has to be separate * because we need to visit subplans before not after main plan. */ - if (glob->nParamExec > 0) + if (glob->paramExecTypes != NIL) { Assert(list_length(glob->subplans) == list_length(glob->subroots)); forboth(lp, glob->subplans, lr, glob->subroots) { Plan *subplan = (Plan *) lfirst(lp); - PlannerInfo *subroot = (PlannerInfo *) lfirst(lr); + PlannerInfo *subroot = lfirst_node(PlannerInfo, lr); SS_finalize_plan(subroot, subplan); } @@ -408,7 +502,6 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) Assert(glob->finalrtable == NIL); Assert(glob->finalrowmarks == NIL); Assert(glob->resultRelations == NIL); - Assert(glob->nonleafResultRelations == NIL); Assert(glob->rootResultRelations == NIL); top_plan = set_plan_references(root, top_plan); /* ... and the subplans (both regular subplans and initplans) */ @@ -416,7 +509,7 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) forboth(lp, glob->subplans, lr, glob->subroots) { Plan *subplan = (Plan *) lfirst(lp); - PlannerInfo *subroot = (PlannerInfo *) lfirst(lr); + PlannerInfo *subroot = lfirst_node(PlannerInfo, lr); lfirst(lp) = set_plan_references(subroot, subplan); } @@ -435,19 +528,43 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) result->planTree = top_plan; result->rtable = glob->finalrtable; result->resultRelations = glob->resultRelations; - result->nonleafResultRelations = glob->nonleafResultRelations; result->rootResultRelations = glob->rootResultRelations; result->subplans = glob->subplans; result->rewindPlanIDs = glob->rewindPlanIDs; result->rowMarks = glob->finalrowmarks; result->relationOids = glob->relationOids; result->invalItems = glob->invalItems; - result->nParamExec = glob->nParamExec; + result->paramExecTypes = glob->paramExecTypes; /* utilityStmt should be null, but we might as well copy it */ result->utilityStmt = parse->utilityStmt; result->stmt_location = parse->stmt_location; result->stmt_len = parse->stmt_len; + result->jitFlags = PGJIT_NONE; + if (jit_enabled && jit_above_cost >= 0 && + top_plan->total_cost > jit_above_cost) + { + result->jitFlags |= PGJIT_PERFORM; + + /* + * Decide how much effort should be put into generating better code. + */ + if (jit_optimize_above_cost >= 0 && + top_plan->total_cost > jit_optimize_above_cost) + result->jitFlags |= PGJIT_OPT3; + if (jit_inline_above_cost >= 0 && + top_plan->total_cost > jit_inline_above_cost) + result->jitFlags |= PGJIT_INLINE; + + /* + * Decide which operations should be JITed. + */ + if (jit_expressions) + result->jitFlags |= PGJIT_EXPR; + if (jit_tuple_deforming) + result->jitFlags |= PGJIT_DEFORM; + } + return result; } @@ -506,7 +623,6 @@ subquery_planner(PlannerGlobal *glob, Query *parse, root->multiexpr_params = NIL; root->eq_classes = NIL; root->append_rel_list = NIL; - root->pcinfo_list = NIL; root->rowMarks = NIL; memset(root->upper_rels, 0, sizeof(root->upper_rels)); memset(root->upper_targets, 0, sizeof(root->upper_targets)); @@ -514,13 +630,14 @@ subquery_planner(PlannerGlobal *glob, Query *parse, root->grouping_map = NULL; root->minmax_aggs = NIL; root->qual_security_level = 0; - root->hasInheritedTarget = false; + root->inhTargetKind = INHKIND_NONE; root->hasRecursion = hasRecursion; if (hasRecursion) root->wt_param_id = SS_assign_special_param(root); else root->wt_param_id = -1; root->non_recursive_path = NULL; + root->partColsUpdated = false; /* * If there is a WITH list, process each WITH query and build an initplan @@ -572,7 +689,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse, hasOuterJoins = false; foreach(l, parse->rtable) { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); + RangeTblEntry *rte = lfirst_node(RangeTblEntry, l); if (rte->rtekind == RTE_JOIN) { @@ -629,7 +746,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse, newWithCheckOptions = NIL; foreach(l, parse->withCheckOptions) { - WithCheckOption *wco = (WithCheckOption *) lfirst(l); + WithCheckOption *wco = lfirst_node(WithCheckOption, l); wco->qual = preprocess_expression(root, wco->qual, EXPRKIND_QUAL); @@ -649,7 +766,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse, foreach(l, parse->windowClause) { - WindowClause *wc = (WindowClause *) lfirst(l); + WindowClause *wc = lfirst_node(WindowClause, l); /* partitionClause/orderClause are sort/group expressions */ wc->startOffset = preprocess_expression(root, wc->startOffset, @@ -691,7 +808,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse, /* Also need to preprocess expressions within RTEs */ foreach(l, parse->rtable) { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); + RangeTblEntry *rte = lfirst_node(RangeTblEntry, l); int kind; ListCell *lcsq; @@ -752,6 +869,27 @@ subquery_planner(PlannerGlobal *glob, Query *parse, } } + /* + * Now that we are done preprocessing expressions, and in particular done + * flattening join alias variables, get rid of the joinaliasvars lists. + * They no longer match what expressions in the rest of the tree look + * like, because we have not preprocessed expressions in those lists (and + * do not want to; for example, expanding a SubLink there would result in + * a useless unreferenced subplan). Leaving them in place simply creates + * a hazard for later scans of the tree. We could try to prevent that by + * using QTW_IGNORE_JOINALIASES in every tree scan done after this point, + * but that doesn't sound very reliable. + */ + if (root->hasJoinRTEs) + { + foreach(l, parse->rtable) + { + RangeTblEntry *rte = lfirst_node(RangeTblEntry, l); + + rte->joinaliasvars = NIL; + } + } + /* * In some cases we may want to transfer a HAVING clause into WHERE. We * cannot do so if the HAVING clause contains aggregates (obviously) or @@ -878,11 +1016,12 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind) /* * If the query has any join RTEs, replace join alias variables with - * base-relation variables. We must do this before sublink processing, - * else sublinks expanded out from join aliases would not get processed. - * We can skip it in non-lateral RTE functions, VALUES lists, and - * TABLESAMPLE clauses, however, since they can't contain any Vars of the - * current query level. + * base-relation variables. We must do this first, since any expressions + * we may extract from the joinaliasvars lists have not been preprocessed. + * For example, if we did this after sublink processing, sublinks expanded + * out from join aliases would not get processed. But we can skip this in + * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since + * they can't contain any Vars of the current query level. */ if (root->hasJoinRTEs && !(kind == EXPRKIND_RTFUNC || @@ -913,7 +1052,7 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind) */ if (kind == EXPRKIND_QUAL) { - expr = (Node *) canonicalize_qual((Expr *) expr); + expr = (Node *) canonicalize_qual((Expr *) expr, false); #ifdef OPTIMIZER_DEBUG printf("After canonicalize_qual()\n"); @@ -1024,13 +1163,15 @@ static void inheritance_planner(PlannerInfo *root) { Query *parse = root->parse; - int parentRTindex = parse->resultRelation; + int top_parentRTindex = parse->resultRelation; Bitmapset *subqueryRTindexes; Bitmapset *modifiableARIindexes; int nominalRelation = -1; + Index rootRelation = 0; List *final_rtable = NIL; int save_rel_array_size = 0; RelOptInfo **save_rel_array = NULL; + AppendRelInfo **save_append_rel_array = NULL; List *subpaths = NIL; List *subroots = NIL; List *resultRelations = NIL; @@ -1041,7 +1182,10 @@ inheritance_planner(PlannerInfo *root) ListCell *lc; Index rti; RangeTblEntry *parent_rte; - List *partitioned_rels = NIL; + PlannerInfo *parent_root; + Query *parent_parse; + Bitmapset *parent_relids = bms_make_singleton(top_parentRTindex); + PlannerInfo **parent_roots = NULL; Assert(parse->commandType != CMD_INSERT); @@ -1066,7 +1210,7 @@ inheritance_planner(PlannerInfo *root) rti = 1; foreach(lc, parse->rtable) { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); + RangeTblEntry *rte = lfirst_node(RangeTblEntry, lc); if (rte->rtekind == RTE_SUBQUERY) subqueryRTindexes = bms_add_member(subqueryRTindexes, rti); @@ -1088,7 +1232,7 @@ inheritance_planner(PlannerInfo *root) { foreach(lc, root->append_rel_list) { - AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); + AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc); if (bms_is_member(appinfo->parent_relid, subqueryRTindexes) || bms_is_member(appinfo->child_relid, subqueryRTindexes) || @@ -1101,37 +1245,61 @@ inheritance_planner(PlannerInfo *root) /* * If the parent RTE is a partitioned table, we should use that as the - * nominal relation, because the RTEs added for partitioned tables + * nominal target relation, because the RTEs added for partitioned tables * (including the root parent) as child members of the inheritance set do - * not appear anywhere else in the plan. The situation is exactly the - * opposite in the case of non-partitioned inheritance parent as described - * below. + * not appear anywhere else in the plan, so the confusion explained below + * for non-partitioning inheritance cases is not possible. */ - parent_rte = rt_fetch(parentRTindex, root->parse->rtable); + parent_rte = rt_fetch(top_parentRTindex, root->parse->rtable); if (parent_rte->relkind == RELKIND_PARTITIONED_TABLE) - nominalRelation = parentRTindex; + { + nominalRelation = top_parentRTindex; + rootRelation = top_parentRTindex; + } + + /* + * The PlannerInfo for each child is obtained by translating the relevant + * members of the PlannerInfo for its immediate parent, which we find + * using the parent_relid in its AppendRelInfo. We save the PlannerInfo + * for each parent in an array indexed by relid for fast retrieval. Since + * the maximum number of parents is limited by the number of RTEs in the + * query, we use that number to allocate the array. An extra entry is + * needed since relids start from 1. + */ + parent_roots = (PlannerInfo **) palloc0((list_length(parse->rtable) + 1) * + sizeof(PlannerInfo *)); + parent_roots[top_parentRTindex] = root; /* * And now we can get on with generating a plan for each child table. */ foreach(lc, root->append_rel_list) { - AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); + AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc); PlannerInfo *subroot; RangeTblEntry *child_rte; RelOptInfo *sub_final_rel; Path *subpath; /* append_rel_list contains all append rels; ignore others */ - if (appinfo->parent_relid != parentRTindex) + if (!bms_is_member(appinfo->parent_relid, parent_relids)) continue; + /* + * expand_inherited_rtentry() always processes a parent before any of + * that parent's children, so the parent_root for this relation should + * already be available. + */ + parent_root = parent_roots[appinfo->parent_relid]; + Assert(parent_root != NULL); + parent_parse = parent_root->parse; + /* * We need a working copy of the PlannerInfo so that we can control * propagation of information back to the main copy. */ subroot = makeNode(PlannerInfo); - memcpy(subroot, root, sizeof(PlannerInfo)); + memcpy(subroot, parent_root, sizeof(PlannerInfo)); /* * Generate modified query with this rel as target. We first apply @@ -1140,26 +1308,79 @@ inheritance_planner(PlannerInfo *root) * then fool around with subquery RTEs. */ subroot->parse = (Query *) - adjust_appendrel_attrs(root, - (Node *) parse, + adjust_appendrel_attrs(parent_root, + (Node *) parent_parse, 1, &appinfo); /* * If there are securityQuals attached to the parent, move them to the * child rel (they've already been transformed properly for that). */ - parent_rte = rt_fetch(parentRTindex, subroot->parse->rtable); + parent_rte = rt_fetch(appinfo->parent_relid, subroot->parse->rtable); child_rte = rt_fetch(appinfo->child_relid, subroot->parse->rtable); child_rte->securityQuals = parent_rte->securityQuals; parent_rte->securityQuals = NIL; + /* + * Mark whether we're planning a query to a partitioned table or an + * inheritance parent. + */ + subroot->inhTargetKind = + (rootRelation != 0) ? INHKIND_PARTITIONED : INHKIND_INHERITED; + + /* + * If this child is further partitioned, remember it as a parent. + * Since a partitioned table does not have any data, we don't need to + * create a plan for it, and we can stop processing it here. We do, + * however, need to remember its modified PlannerInfo for use when + * processing its children, since we'll update their varnos based on + * the delta from immediate parent to child, not from top to child. + * + * Note: a very non-obvious point is that we have not yet added + * duplicate subquery RTEs to the subroot's rtable. We mustn't, + * because then its children would have two sets of duplicates, + * confusing matters. + */ + if (child_rte->inh) + { + Assert(child_rte->relkind == RELKIND_PARTITIONED_TABLE); + parent_relids = bms_add_member(parent_relids, appinfo->child_relid); + parent_roots[appinfo->child_relid] = subroot; + + continue; + } + + /* + * Set the nominal target relation of the ModifyTable node if not + * already done. If the target is a partitioned table, we already set + * nominalRelation to refer to the partition root, above. For + * non-partitioned inheritance cases, we'll use the first child + * relation (even if it's excluded) as the nominal target relation. + * Because of the way expand_inherited_rtentry works, that should be + * the RTE representing the parent table in its role as a simple + * member of the inheritance set. + * + * It would be logically cleaner to *always* use the inheritance + * parent RTE as the nominal relation; but that RTE is not otherwise + * referenced in the plan in the non-partitioned inheritance case. + * Instead the duplicate child RTE created by expand_inherited_rtentry + * is used elsewhere in the plan, so using the original parent RTE + * would give rise to confusing use of multiple aliases in EXPLAIN + * output for what the user will think is the "same" table. OTOH, + * it's not a problem in the partitioned inheritance case, because the + * duplicate child RTE added for the parent does not appear anywhere + * else in the plan tree. + */ + if (nominalRelation < 0) + nominalRelation = appinfo->child_relid; + /* * The rowMarks list might contain references to subquery RTEs, so * make a copy that we can apply ChangeVarNodes to. (Fortunately, the * executor doesn't need to see the modified copies --- we can just * pass it the original rowMarks list.) */ - subroot->rowMarks = copyObject(root->rowMarks); + subroot->rowMarks = copyObject(parent_root->rowMarks); /* * The append_rel_list likewise might contain references to subquery @@ -1176,9 +1397,9 @@ inheritance_planner(PlannerInfo *root) ListCell *lc2; subroot->append_rel_list = NIL; - foreach(lc2, root->append_rel_list) + foreach(lc2, parent_root->append_rel_list) { - AppendRelInfo *appinfo2 = (AppendRelInfo *) lfirst(lc2); + AppendRelInfo *appinfo2 = lfirst_node(AppendRelInfo, lc2); if (bms_is_member(appinfo2->child_relid, modifiableARIindexes)) appinfo2 = copyObject(appinfo2); @@ -1211,9 +1432,9 @@ inheritance_planner(PlannerInfo *root) ListCell *lr; rti = 1; - foreach(lr, parse->rtable) + foreach(lr, parent_parse->rtable) { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lr); + RangeTblEntry *rte = lfirst_node(RangeTblEntry, lr); if (bms_is_member(rti, subqueryRTindexes)) { @@ -1235,7 +1456,7 @@ inheritance_planner(PlannerInfo *root) foreach(lc2, subroot->append_rel_list) { - AppendRelInfo *appinfo2 = (AppendRelInfo *) lfirst(lc2); + AppendRelInfo *appinfo2 = lfirst_node(AppendRelInfo, lc2); if (bms_is_member(appinfo2->child_relid, modifiableARIindexes)) @@ -1255,36 +1476,10 @@ inheritance_planner(PlannerInfo *root) Assert(subroot->join_info_list == NIL); /* and we haven't created PlaceHolderInfos, either */ Assert(subroot->placeholder_list == NIL); - /* hack to mark target relation as an inheritance partition */ - subroot->hasInheritedTarget = true; /* Generate Path(s) for accessing this result relation */ grouping_planner(subroot, true, 0.0 /* retrieve all tuples */ ); - /* - * Set the nomimal target relation of the ModifyTable node if not - * already done. We use the inheritance parent RTE as the nominal - * target relation if it's a partitioned table (see just above this - * loop). In the non-partitioned parent case, we'll use the first - * child relation (even if it's excluded) as the nominal target - * relation. Because of the way expand_inherited_rtentry works, the - * latter should be the RTE representing the parent table in its role - * as a simple member of the inheritance set. - * - * It would be logically cleaner to *always* use the inheritance - * parent RTE as the nominal relation; but that RTE is not otherwise - * referenced in the plan in the non-partitioned inheritance case. - * Instead the duplicate child RTE created by expand_inherited_rtentry - * is used elsewhere in the plan, so using the original parent RTE - * would give rise to confusing use of multiple aliases in EXPLAIN - * output for what the user will think is the "same" table. OTOH, - * it's not a problem in the partitioned inheritance case, because the - * duplicate child RTE added for the parent does not appear anywhere - * else in the plan tree. - */ - if (nominalRelation < 0) - nominalRelation = appinfo->child_relid; - /* * Select cheapest path in case there's more than one. We always run * modification queries to conclusion, so we care only for the @@ -1330,6 +1525,7 @@ inheritance_planner(PlannerInfo *root) } save_rel_array_size = subroot->simple_rel_array_size; save_rel_array = subroot->simple_rel_array; + save_append_rel_array = subroot->append_rel_array; /* Make sure any initplans from this rel get into the outer list */ root->init_plans = subroot->init_plans; @@ -1354,13 +1550,6 @@ inheritance_planner(PlannerInfo *root) Assert(!parse->onConflict); } - if (parent_rte->relkind == RELKIND_PARTITIONED_TABLE) - { - partitioned_rels = get_partitioned_child_rels(root, parentRTindex); - /* The root partitioned table is included as a child rel */ - Assert(list_length(partitioned_rels) >= 1); - } - /* Result path must go into outer query's FINAL upperrel */ final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL); @@ -1387,13 +1576,15 @@ inheritance_planner(PlannerInfo *root) parse->rtable = final_rtable; root->simple_rel_array_size = save_rel_array_size; root->simple_rel_array = save_rel_array; + root->append_rel_array = save_append_rel_array; + /* Must reconstruct master's simple_rte_array, too */ root->simple_rte_array = (RangeTblEntry **) palloc0((list_length(final_rtable) + 1) * sizeof(RangeTblEntry *)); rti = 1; foreach(lc, final_rtable) { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); + RangeTblEntry *rte = lfirst_node(RangeTblEntry, lc); root->simple_rte_array[rti++] = rte; } @@ -1414,7 +1605,8 @@ inheritance_planner(PlannerInfo *root) parse->commandType, parse->canSetTag, nominalRelation, - partitioned_rels, + rootRelation, + root->partColsUpdated, resultRelations, subpaths, subroots, @@ -1458,7 +1650,7 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, double tuple_fraction) { Query *parse = root->parse; - List *tlist = parse->targetList; + List *tlist; int64 offset_est = 0; int64 count_est = 0; double limit_tuples = -1.0; @@ -1466,6 +1658,7 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, PathTarget *final_target; List *final_targets; List *final_targets_contain_srfs; + bool final_target_parallel_safe; RelOptInfo *current_rel; RelOptInfo *final_rel; ListCell *lc; @@ -1528,6 +1721,10 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, /* Also extract the PathTarget form of the setop result tlist */ final_target = current_rel->cheapest_total_path->pathtarget; + /* And check whether it's parallel safe */ + final_target_parallel_safe = + is_parallel_safe(root, (Node *) final_target->exprs); + /* The setop result tlist couldn't contain any SRFs */ Assert(!parse->hasTargetSRFs); final_targets = final_targets_contain_srfs = NIL; @@ -1542,8 +1739,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, /*------ translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT", - LCS_asString(((RowMarkClause *) - linitial(parse->rowMarks))->strength)))); + LCS_asString(linitial_node(RowMarkClause, + parse->rowMarks)->strength)))); /* * Calculate pathkeys that represent result ordering requirements @@ -1559,12 +1756,16 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, PathTarget *sort_input_target; List *sort_input_targets; List *sort_input_targets_contain_srfs; + bool sort_input_target_parallel_safe; PathTarget *grouping_target; List *grouping_targets; List *grouping_targets_contain_srfs; + bool grouping_target_parallel_safe; PathTarget *scanjoin_target; List *scanjoin_targets; List *scanjoin_targets_contain_srfs; + bool scanjoin_target_parallel_safe; + bool scanjoin_target_same_exprs; bool have_grouping; AggClauseCosts agg_costs; WindowFuncLists *wflists = NULL; @@ -1588,13 +1789,7 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, } /* Preprocess targetlist */ - tlist = preprocess_targetlist(root, tlist); - - if (parse->onConflict) - parse->onConflict->onConflictSet = - preprocess_onconflict_targetlist(parse->onConflict->onConflictSet, - parse->resultRelation, - parse->rtable); + tlist = preprocess_targetlist(root); /* * We are now done hacking up the query's targetlist. Most of the @@ -1673,7 +1868,7 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, qp_extra.tlist = tlist; qp_extra.activeWindows = activeWindows; qp_extra.groupClause = (gset_data - ? (gset_data->rollups ? ((RollupData *) linitial(gset_data->rollups))->groupClause : NIL) + ? (gset_data->rollups ? linitial_node(RollupData, gset_data->rollups)->groupClause : NIL) : parse->groupClause); /* @@ -1694,6 +1889,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, * that were obtained within query_planner(). */ final_target = create_pathtarget(root, tlist); + final_target_parallel_safe = + is_parallel_safe(root, (Node *) final_target->exprs); /* * If ORDER BY was given, consider whether we should use a post-sort @@ -1701,11 +1898,18 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, * so. */ if (parse->sortClause) + { sort_input_target = make_sort_input_target(root, final_target, &have_postponed_srfs); + sort_input_target_parallel_safe = + is_parallel_safe(root, (Node *) sort_input_target->exprs); + } else + { sort_input_target = final_target; + sort_input_target_parallel_safe = final_target_parallel_safe; + } /* * If we have window functions to deal with, the output from any @@ -1713,11 +1917,18 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, * otherwise, it should be sort_input_target. */ if (activeWindows) + { grouping_target = make_window_input_target(root, final_target, activeWindows); + grouping_target_parallel_safe = + is_parallel_safe(root, (Node *) grouping_target->exprs); + } else + { grouping_target = sort_input_target; + grouping_target_parallel_safe = sort_input_target_parallel_safe; + } /* * If we have grouping or aggregation to do, the topmost scan/join @@ -1727,9 +1938,16 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, have_grouping = (parse->groupClause || parse->groupingSets || parse->hasAggs || root->hasHavingQual); if (have_grouping) + { scanjoin_target = make_group_input_target(root, final_target); + scanjoin_target_parallel_safe = + is_parallel_safe(root, (Node *) grouping_target->exprs); + } else + { scanjoin_target = grouping_target; + scanjoin_target_parallel_safe = grouping_target_parallel_safe; + } /* * If there are any SRFs in the targetlist, we must separate each of @@ -1743,114 +1961,44 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, split_pathtarget_at_srfs(root, final_target, sort_input_target, &final_targets, &final_targets_contain_srfs); - final_target = (PathTarget *) linitial(final_targets); + final_target = linitial_node(PathTarget, final_targets); Assert(!linitial_int(final_targets_contain_srfs)); /* likewise for sort_input_target vs. grouping_target */ split_pathtarget_at_srfs(root, sort_input_target, grouping_target, &sort_input_targets, &sort_input_targets_contain_srfs); - sort_input_target = (PathTarget *) linitial(sort_input_targets); + sort_input_target = linitial_node(PathTarget, sort_input_targets); Assert(!linitial_int(sort_input_targets_contain_srfs)); /* likewise for grouping_target vs. scanjoin_target */ split_pathtarget_at_srfs(root, grouping_target, scanjoin_target, &grouping_targets, &grouping_targets_contain_srfs); - grouping_target = (PathTarget *) linitial(grouping_targets); + grouping_target = linitial_node(PathTarget, grouping_targets); Assert(!linitial_int(grouping_targets_contain_srfs)); /* scanjoin_target will not have any SRFs precomputed for it */ split_pathtarget_at_srfs(root, scanjoin_target, NULL, &scanjoin_targets, &scanjoin_targets_contain_srfs); - scanjoin_target = (PathTarget *) linitial(scanjoin_targets); + scanjoin_target = linitial_node(PathTarget, scanjoin_targets); Assert(!linitial_int(scanjoin_targets_contain_srfs)); } else { - /* initialize lists, just to keep compiler quiet */ + /* initialize lists; for most of these, dummy values are OK */ final_targets = final_targets_contain_srfs = NIL; sort_input_targets = sort_input_targets_contain_srfs = NIL; grouping_targets = grouping_targets_contain_srfs = NIL; - scanjoin_targets = scanjoin_targets_contain_srfs = NIL; - } - - /* - * Forcibly apply SRF-free scan/join target to all the Paths for the - * scan/join rel. - * - * In principle we should re-run set_cheapest() here to identify the - * cheapest path, but it seems unlikely that adding the same tlist - * eval costs to all the paths would change that, so we don't bother. - * Instead, just assume that the cheapest-startup and cheapest-total - * paths remain so. (There should be no parameterized paths anymore, - * so we needn't worry about updating cheapest_parameterized_paths.) - */ - foreach(lc, current_rel->pathlist) - { - Path *subpath = (Path *) lfirst(lc); - Path *path; - - Assert(subpath->param_info == NULL); - path = apply_projection_to_path(root, current_rel, - subpath, scanjoin_target); - /* If we had to add a Result, path is different from subpath */ - if (path != subpath) - { - lfirst(lc) = path; - if (subpath == current_rel->cheapest_startup_path) - current_rel->cheapest_startup_path = path; - if (subpath == current_rel->cheapest_total_path) - current_rel->cheapest_total_path = path; - } - } - - /* - * Upper planning steps which make use of the top scan/join rel's - * partial pathlist will expect partial paths for that rel to produce - * the same output as complete paths ... and we just changed the - * output for the complete paths, so we'll need to do the same thing - * for partial paths. But only parallel-safe expressions can be - * computed by partial paths. - */ - if (current_rel->partial_pathlist && - is_parallel_safe(root, (Node *) scanjoin_target->exprs)) - { - /* Apply the scan/join target to each partial path */ - foreach(lc, current_rel->partial_pathlist) - { - Path *subpath = (Path *) lfirst(lc); - Path *newpath; - - /* Shouldn't have any parameterized paths anymore */ - Assert(subpath->param_info == NULL); - - /* - * Don't use apply_projection_to_path() here, because there - * could be other pointers to these paths, and therefore we - * mustn't modify them in place. - */ - newpath = (Path *) create_projection_path(root, - current_rel, - subpath, - scanjoin_target); - lfirst(lc) = newpath; - } - } - else - { - /* - * In the unfortunate event that scanjoin_target is not - * parallel-safe, we can't apply it to the partial paths; in that - * case, we'll need to forget about the partial paths, which - * aren't valid input for upper planning steps. - */ - current_rel->partial_pathlist = NIL; + scanjoin_targets = list_make1(scanjoin_target); + scanjoin_targets_contain_srfs = NIL; } - /* Now fix things up if scan/join target contains SRFs */ - if (parse->hasTargetSRFs) - adjust_paths_for_srfs(root, current_rel, - scanjoin_targets, - scanjoin_targets_contain_srfs); + /* Apply scan/join target. */ + scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1 + && equal(scanjoin_target->exprs, current_rel->reltarget->exprs); + apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets, + scanjoin_targets_contain_srfs, + scanjoin_target_parallel_safe, + scanjoin_target_same_exprs); /* * Save the various upper-rel PathTargets we just computed into @@ -1873,6 +2021,7 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, current_rel = create_grouping_paths(root, current_rel, grouping_target, + grouping_target_parallel_safe, &agg_costs, gset_data); /* Fix things up if grouping_target contains SRFs */ @@ -1892,6 +2041,7 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, current_rel, grouping_target, sort_input_target, + sort_input_target_parallel_safe, tlist, wflists, activeWindows); @@ -1925,6 +2075,7 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, current_rel = create_ordered_paths(root, current_rel, final_target, + final_target_parallel_safe, have_postponed_srfs ? -1.0 : limit_tuples); /* Fix things up if final_target contains SRFs */ @@ -1998,10 +2149,21 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, */ if (parse->commandType != CMD_SELECT && !inheritance_update) { + Index rootRelation; List *withCheckOptionLists; List *returningLists; List *rowMarks; + /* + * If target is a partition root table, we need to mark the + * ModifyTable node appropriately for that. + */ + if (rt_fetch(parse->resultRelation, parse->rtable)->relkind == + RELKIND_PARTITIONED_TABLE) + rootRelation = parse->resultRelation; + else + rootRelation = 0; + /* * Set up the WITH CHECK OPTION and RETURNING lists-of-lists, if * needed. @@ -2031,7 +2193,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, parse->commandType, parse->canSetTag, parse->resultRelation, - NIL, + rootRelation, + false, list_make1_int(parse->resultRelation), list_make1(path), list_make1(root), @@ -2046,6 +2209,22 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, add_path(final_rel, path); } + /* + * Generate partial paths for final_rel, too, if outer query levels might + * be able to make use of them. + */ + if (final_rel->consider_parallel && root->query_level > 1 && + !limit_needed(parse)) + { + Assert(!parse->rowMarks && parse->commandType == CMD_SELECT); + foreach(lc, current_rel->partial_pathlist) + { + Path *partial_path = (Path *) lfirst(lc); + + add_partial_path(final_rel, partial_path); + } + } + /* * If there is an FDW that's responsible for all baserels of the query, * let it consider adding ForeignPaths. @@ -2053,12 +2232,13 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, if (final_rel->fdwroutine && final_rel->fdwroutine->GetForeignUpperPaths) final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL, - current_rel, final_rel); + current_rel, final_rel, + NULL); /* Let extensions possibly add some more paths */ if (create_upper_paths_hook) (*create_upper_paths_hook) (root, UPPERREL_FINAL, - current_rel, final_rel); + current_rel, final_rel, NULL); /* Note: currently, we leave it to callers to do set_cheapest() */ } @@ -2092,7 +2272,7 @@ preprocess_grouping_sets(PlannerInfo *root) foreach(lc, parse->groupClause) { - SortGroupClause *gc = lfirst(lc); + SortGroupClause *gc = lfirst_node(SortGroupClause, lc); Index ref = gc->tleSortGroupRef; if (ref > maxref) @@ -2121,7 +2301,7 @@ preprocess_grouping_sets(PlannerInfo *root) foreach(lc, parse->groupingSets) { - List *gset = lfirst(lc); + List *gset = (List *) lfirst(lc); if (bms_overlap_list(gd->unsortable_refs, gset)) { @@ -2180,7 +2360,7 @@ preprocess_grouping_sets(PlannerInfo *root) /* * Get the initial (and therefore largest) grouping set. */ - gs = linitial(current_sets); + gs = linitial_node(GroupingSetData, current_sets); /* * Order the groupClause appropriately. If the first grouping set is @@ -2255,7 +2435,7 @@ remap_to_groupclause_idx(List *groupClause, foreach(lc, groupClause) { - SortGroupClause *gc = lfirst(lc); + SortGroupClause *gc = lfirst_node(SortGroupClause, lc); tleref_to_colnum_map[gc->tleSortGroupRef] = ref++; } @@ -2264,7 +2444,7 @@ remap_to_groupclause_idx(List *groupClause, { List *set = NIL; ListCell *lc2; - GroupingSetData *gs = lfirst(lc); + GroupingSetData *gs = lfirst_node(GroupingSetData, lc); foreach(lc2, gs->set) { @@ -2330,8 +2510,8 @@ preprocess_rowmarks(PlannerInfo *root) * CTIDs invalid. This is also checked at parse time, but that's * insufficient because of rule substitution, query pullup, etc. */ - CheckSelectLocking(parse, ((RowMarkClause *) - linitial(parse->rowMarks))->strength); + CheckSelectLocking(parse, linitial_node(RowMarkClause, + parse->rowMarks)->strength); } else { @@ -2359,7 +2539,7 @@ preprocess_rowmarks(PlannerInfo *root) prowmarks = NIL; foreach(l, parse->rowMarks) { - RowMarkClause *rc = (RowMarkClause *) lfirst(l); + RowMarkClause *rc = lfirst_node(RowMarkClause, l); RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable); PlanRowMark *newrc; @@ -2399,7 +2579,7 @@ preprocess_rowmarks(PlannerInfo *root) i = 0; foreach(l, parse->rtable) { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); + RangeTblEntry *rte = lfirst_node(RangeTblEntry, l); PlanRowMark *newrc; i++; @@ -2675,7 +2855,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas * in preprocess_limit it's good enough to consider estimated values. */ -static bool +bool limit_needed(Query *parse) { Node *node; @@ -2758,7 +2938,7 @@ remove_useless_groupby_columns(PlannerInfo *root) (list_length(parse->rtable) + 1)); foreach(lc, parse->groupClause) { - SortGroupClause *sgc = (SortGroupClause *) lfirst(lc); + SortGroupClause *sgc = lfirst_node(SortGroupClause, lc); TargetEntry *tle = get_sortgroupclause_tle(sgc, parse->targetList); Var *var = (Var *) tle->expr; @@ -2791,7 +2971,7 @@ remove_useless_groupby_columns(PlannerInfo *root) relid = 0; foreach(lc, parse->rtable) { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); + RangeTblEntry *rte = lfirst_node(RangeTblEntry, lc); Bitmapset *relattnos; Bitmapset *pkattnos; Oid constraintOid; @@ -2849,7 +3029,7 @@ remove_useless_groupby_columns(PlannerInfo *root) foreach(lc, parse->groupClause) { - SortGroupClause *sgc = (SortGroupClause *) lfirst(lc); + SortGroupClause *sgc = lfirst_node(SortGroupClause, lc); TargetEntry *tle = get_sortgroupclause_tle(sgc, parse->targetList); Var *var = (Var *) tle->expr; @@ -2924,11 +3104,11 @@ preprocess_groupclause(PlannerInfo *root, List *force) */ foreach(sl, parse->sortClause) { - SortGroupClause *sc = (SortGroupClause *) lfirst(sl); + SortGroupClause *sc = lfirst_node(SortGroupClause, sl); foreach(gl, parse->groupClause) { - SortGroupClause *gc = (SortGroupClause *) lfirst(gl); + SortGroupClause *gc = lfirst_node(SortGroupClause, gl); if (equal(gc, sc)) { @@ -2957,7 +3137,7 @@ preprocess_groupclause(PlannerInfo *root, List *force) */ foreach(gl, parse->groupClause) { - SortGroupClause *gc = (SortGroupClause *) lfirst(gl); + SortGroupClause *gc = lfirst_node(SortGroupClause, gl); if (list_member_ptr(new_groupclause, gc)) continue; /* it matched an ORDER BY item */ @@ -3057,7 +3237,7 @@ extract_rollup_sets(List *groupingSets) for_each_cell(lc, lc1) { - List *candidate = lfirst(lc); + List *candidate = (List *) lfirst(lc); Bitmapset *candidate_set = NULL; ListCell *lc2; int dup_of = 0; @@ -3214,7 +3394,7 @@ reorder_grouping_sets(List *groupingsets, List *sortclause) foreach(lc, groupingsets) { - List *candidate = lfirst(lc); + List *candidate = (List *) lfirst(lc); List *new_elems = list_difference_int(candidate, previous); GroupingSetData *gs = makeNode(GroupingSetData); @@ -3282,7 +3462,7 @@ standard_qp_callback(PlannerInfo *root, void *extra) /* We consider only the first (bottom) window in pathkeys logic */ if (activeWindows != NIL) { - WindowClause *wc = (WindowClause *) linitial(activeWindows); + WindowClause *wc = linitial_node(WindowClause, activeWindows); root->window_pathkeys = make_pathkeys_for_window(root, wc, @@ -3340,7 +3520,8 @@ standard_qp_callback(PlannerInfo *root, void *extra) * Estimate number of groups produced by grouping clauses (1 if not grouping) * * path_rows: number of output rows from scan/join step - * gsets: grouping set data, or NULL if not doing grouping sets + * gd: grouping sets data including list of grouping sets and their clauses + * target_list: target list containing group clause references * * If doing grouping sets, we also annotate the gsets data with the estimates * for each set and each individual rollup list, with a view to later @@ -3349,7 +3530,8 @@ standard_qp_callback(PlannerInfo *root, void *extra) static double get_number_of_groups(PlannerInfo *root, double path_rows, - grouping_sets_data *gd) + grouping_sets_data *gd, + List *target_list) { Query *parse = root->parse; double dNumGroups; @@ -3370,18 +3552,18 @@ get_number_of_groups(PlannerInfo *root, foreach(lc, gd->rollups) { - RollupData *rollup = lfirst(lc); + RollupData *rollup = lfirst_node(RollupData, lc); ListCell *lc; groupExprs = get_sortgrouplist_exprs(rollup->groupClause, - parse->targetList); + target_list); rollup->numGroups = 0.0; forboth(lc, rollup->gsets, lc2, rollup->gsets_data) { List *gset = (List *) lfirst(lc); - GroupingSetData *gs = lfirst(lc2); + GroupingSetData *gs = lfirst_node(GroupingSetData, lc2); double numGroups = estimate_num_groups(root, groupExprs, path_rows, @@ -3401,12 +3583,12 @@ get_number_of_groups(PlannerInfo *root, gd->dNumHashGroups = 0; groupExprs = get_sortgrouplist_exprs(parse->groupClause, - parse->targetList); + target_list); forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets) { List *gset = (List *) lfirst(lc); - GroupingSetData *gs = lfirst(lc2); + GroupingSetData *gs = lfirst_node(GroupingSetData, lc2); double numGroups = estimate_num_groups(root, groupExprs, path_rows, @@ -3423,7 +3605,7 @@ get_number_of_groups(PlannerInfo *root, { /* Plain GROUP BY */ groupExprs = get_sortgrouplist_exprs(parse->groupClause, - parse->targetList); + target_list); dNumGroups = estimate_num_groups(root, groupExprs, path_rows, NULL); @@ -3486,55 +3668,163 @@ estimate_hashagg_tablesize(Path *path, const AggClauseCosts *agg_costs, * create_grouping_paths * * Build a new upperrel containing Paths for grouping and/or aggregation. + * Along the way, we also build an upperrel for Paths which are partially + * grouped and/or aggregated. A partially grouped and/or aggregated path + * needs a FinalizeAggregate node to complete the aggregation. Currently, + * the only partially grouped paths we build are also partial paths; that + * is, they need a Gather and then a FinalizeAggregate. * * input_rel: contains the source-data Paths * target: the pathtarget for the result Paths to compute * agg_costs: cost info about all aggregates in query (in AGGSPLIT_SIMPLE mode) - * rollup_lists: list of grouping sets, or NIL if not doing grouping sets - * rollup_groupclauses: list of grouping clauses for grouping sets, - * or NIL if not doing grouping sets + * gd: grouping sets data including list of grouping sets and their clauses * * Note: all Paths in input_rel are expected to return the target computed * by make_group_input_target. - * - * We need to consider sorted and hashed aggregation in the same function, - * because otherwise (1) it would be harder to throw an appropriate error - * message if neither way works, and (2) we should not allow hashtable size - * considerations to dissuade us from using hashing if sorting is not possible. */ static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, + bool target_parallel_safe, const AggClauseCosts *agg_costs, grouping_sets_data *gd) { Query *parse = root->parse; - Path *cheapest_path = input_rel->cheapest_total_path; RelOptInfo *grouped_rel; - PathTarget *partial_grouping_target = NULL; - AggClauseCosts agg_partial_costs; /* parallel only */ - AggClauseCosts agg_final_costs; /* parallel only */ - Size hashaggtablesize; - double dNumGroups; - double dNumPartialGroups = 0; - bool can_hash; - bool can_sort; - bool try_parallel_aggregation; + RelOptInfo *partially_grouped_rel; - ListCell *lc; + /* + * Create grouping relation to hold fully aggregated grouping and/or + * aggregation paths. + */ + grouped_rel = make_grouping_rel(root, input_rel, target, + target_parallel_safe, parse->havingQual); + + /* + * Create either paths for a degenerate grouping or paths for ordinary + * grouping, as appropriate. + */ + if (is_degenerate_grouping(root)) + create_degenerate_grouping_paths(root, input_rel, grouped_rel); + else + { + int flags = 0; + GroupPathExtraData extra; + + /* + * Determine whether it's possible to perform sort-based + * implementations of grouping. (Note that if groupClause is empty, + * grouping_is_sortable() is trivially true, and all the + * pathkeys_contained_in() tests will succeed too, so that we'll + * consider every surviving input path.) + * + * If we have grouping sets, we might be able to sort some but not all + * of them; in this case, we need can_sort to be true as long as we + * must consider any sorted-input plan. + */ + if ((gd && gd->rollups != NIL) + || grouping_is_sortable(parse->groupClause)) + flags |= GROUPING_CAN_USE_SORT; + + /* + * Determine whether we should consider hash-based implementations of + * grouping. + * + * Hashed aggregation only applies if we're grouping. If we have + * grouping sets, some groups might be hashable but others not; in + * this case we set can_hash true as long as there is nothing globally + * preventing us from hashing (and we should therefore consider plans + * with hashes). + * + * Executor doesn't support hashed aggregation with DISTINCT or ORDER + * BY aggregates. (Doing so would imply storing *all* the input + * values in the hash table, and/or running many sorts in parallel, + * either of which seems like a certain loser.) We similarly don't + * support ordered-set aggregates in hashed aggregation, but that case + * is also included in the numOrderedAggs count. + * + * Note: grouping_is_hashable() is much more expensive to check than + * the other gating conditions, so we want to do it last. + */ + if ((parse->groupClause != NIL && + agg_costs->numOrderedAggs == 0 && + (gd ? gd->any_hashable : grouping_is_hashable(parse->groupClause)))) + flags |= GROUPING_CAN_USE_HASH; + + /* + * Determine whether partial aggregation is possible. + */ + if (can_partial_agg(root, agg_costs)) + flags |= GROUPING_CAN_PARTIAL_AGG; + + extra.flags = flags; + extra.target_parallel_safe = target_parallel_safe; + extra.havingQual = parse->havingQual; + extra.targetList = parse->targetList; + extra.partial_costs_set = false; + + /* + * Determine whether partitionwise aggregation is in theory possible. + * It can be disabled by the user, and for now, we don't try to + * support grouping sets. create_ordinary_grouping_paths() will check + * additional conditions, such as whether input_rel is partitioned. + */ + if (enable_partitionwise_aggregate && !parse->groupingSets) + extra.patype = PARTITIONWISE_AGGREGATE_FULL; + else + extra.patype = PARTITIONWISE_AGGREGATE_NONE; + + create_ordinary_grouping_paths(root, input_rel, grouped_rel, + agg_costs, gd, &extra, + &partially_grouped_rel); + } + + set_cheapest(grouped_rel); + return grouped_rel; +} + +/* + * make_grouping_rel + * + * Create a new grouping rel and set basic properties. + * + * input_rel represents the underlying scan/join relation. + * target is the output expected from the grouping relation. + */ +static RelOptInfo * +make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, + PathTarget *target, bool target_parallel_safe, + Node *havingQual) +{ + RelOptInfo *grouped_rel; - /* For now, do all work in the (GROUP_AGG, NULL) upperrel */ - grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL); + if (IS_OTHER_REL(input_rel)) + { + grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, + input_rel->relids); + grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL; + } + else + { + /* + * By tradition, the relids set for the main grouping relation is + * NULL. (This could be changed, but might require adjustments + * elsewhere.) + */ + grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL); + } + + /* Set target. */ + grouped_rel->reltarget = target; /* * If the input relation is not parallel-safe, then the grouped relation * can't be parallel-safe, either. Otherwise, it's parallel-safe if the * target list and HAVING quals are parallel-safe. */ - if (input_rel->consider_parallel && - is_parallel_safe(root, (Node *) target->exprs) && - is_parallel_safe(root, (Node *) parse->havingQual)) + if (input_rel->consider_parallel && target_parallel_safe && + is_parallel_safe(root, (Node *) havingQual)) grouped_rel->consider_parallel = true; /* @@ -3545,555 +3835,208 @@ create_grouping_paths(PlannerInfo *root, grouped_rel->useridiscurrent = input_rel->useridiscurrent; grouped_rel->fdwroutine = input_rel->fdwroutine; - /* - * Check for degenerate grouping. - */ - if ((root->hasHavingQual || parse->groupingSets) && - !parse->hasAggs && parse->groupClause == NIL) + return grouped_rel; +} + +/* + * is_degenerate_grouping + * + * A degenerate grouping is one in which the query has a HAVING qual and/or + * grouping sets, but no aggregates and no GROUP BY (which implies that the + * grouping sets are all empty). + */ +static bool +is_degenerate_grouping(PlannerInfo *root) +{ + Query *parse = root->parse; + + return (root->hasHavingQual || parse->groupingSets) && + !parse->hasAggs && parse->groupClause == NIL; +} + +/* + * create_degenerate_grouping_paths + * + * When the grouping is degenerate (see is_degenerate_grouping), we are + * supposed to emit either zero or one row for each grouping set depending on + * whether HAVING succeeds. Furthermore, there cannot be any variables in + * either HAVING or the targetlist, so we actually do not need the FROM table + * at all! We can just throw away the plan-so-far and generate a Result node. + * This is a sufficiently unusual corner case that it's not worth contorting + * the structure of this module to avoid having to generate the earlier paths + * in the first place. + */ +static void +create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, + RelOptInfo *grouped_rel) +{ + Query *parse = root->parse; + int nrows; + Path *path; + + nrows = list_length(parse->groupingSets); + if (nrows > 1) { /* - * We have a HAVING qual and/or grouping sets, but no aggregates and - * no GROUP BY (which implies that the grouping sets are all empty). - * - * This is a degenerate case in which we are supposed to emit either - * zero or one row for each grouping set depending on whether HAVING - * succeeds. Furthermore, there cannot be any variables in either - * HAVING or the targetlist, so we actually do not need the FROM table - * at all! We can just throw away the plan-so-far and generate a - * Result node. This is a sufficiently unusual corner case that it's - * not worth contorting the structure of this module to avoid having - * to generate the earlier paths in the first place. + * Doesn't seem worthwhile writing code to cons up a generate_series + * or a values scan to emit multiple rows. Instead just make N clones + * and append them. (With a volatile HAVING clause, this means you + * might get between 0 and N output rows. Offhand I think that's + * desired.) */ - int nrows = list_length(parse->groupingSets); - Path *path; + List *paths = NIL; - if (nrows > 1) + while (--nrows >= 0) { - /* - * Doesn't seem worthwhile writing code to cons up a - * generate_series or a values scan to emit multiple rows. Instead - * just make N clones and append them. (With a volatile HAVING - * clause, this means you might get between 0 and N output rows. - * Offhand I think that's desired.) - */ - List *paths = NIL; - - while (--nrows >= 0) - { - path = (Path *) - create_result_path(root, grouped_rel, - target, - (List *) parse->havingQual); - paths = lappend(paths, path); - } - path = (Path *) - create_append_path(grouped_rel, - paths, - NULL, - 0, - NIL); - path->pathtarget = target; - } - else - { - /* No grouping sets, or just one, so one output row */ path = (Path *) create_result_path(root, grouped_rel, - target, + grouped_rel->reltarget, (List *) parse->havingQual); + paths = lappend(paths, path); } + path = (Path *) + create_append_path(root, + grouped_rel, + paths, + NIL, + NULL, + 0, + false, + NIL, + -1); + } + else + { + /* No grouping sets, or just one, so one output row */ + path = (Path *) + create_result_path(root, grouped_rel, + grouped_rel->reltarget, + (List *) parse->havingQual); + } - add_path(grouped_rel, path); - - /* No need to consider any other alternatives. */ - set_cheapest(grouped_rel); + add_path(grouped_rel, path); +} - return grouped_rel; - } +/* + * create_ordinary_grouping_paths + * + * Create grouping paths for the ordinary (that is, non-degenerate) case. + * + * We need to consider sorted and hashed aggregation in the same function, + * because otherwise (1) it would be harder to throw an appropriate error + * message if neither way works, and (2) we should not allow hashtable size + * considerations to dissuade us from using hashing if sorting is not possible. + * + * *partially_grouped_rel_p will be set to the partially grouped rel which this + * function creates, or to NULL if it doesn't create one. + */ +static void +create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, + RelOptInfo *grouped_rel, + const AggClauseCosts *agg_costs, + grouping_sets_data *gd, + GroupPathExtraData *extra, + RelOptInfo **partially_grouped_rel_p) +{ + Path *cheapest_path = input_rel->cheapest_total_path; + RelOptInfo *partially_grouped_rel = NULL; + double dNumGroups; + PartitionwiseAggregateType patype = PARTITIONWISE_AGGREGATE_NONE; /* - * Estimate number of groups. + * If this is the topmost grouping relation or if the parent relation is + * doing some form of partitionwise aggregation, then we may be able to do + * it at this level also. However, if the input relation is not + * partitioned, partitionwise aggregate is impossible, and if it is dummy, + * partitionwise aggregate is pointless. */ - dNumGroups = get_number_of_groups(root, - cheapest_path->rows, - gd); - - /* - * Determine whether it's possible to perform sort-based implementations - * of grouping. (Note that if groupClause is empty, - * grouping_is_sortable() is trivially true, and all the - * pathkeys_contained_in() tests will succeed too, so that we'll consider - * every surviving input path.) - * - * If we have grouping sets, we might be able to sort some but not all of - * them; in this case, we need can_sort to be true as long as we must - * consider any sorted-input plan. - */ - can_sort = (gd && gd->rollups != NIL) - || grouping_is_sortable(parse->groupClause); - - /* - * Determine whether we should consider hash-based implementations of - * grouping. - * - * Hashed aggregation only applies if we're grouping. If we have grouping - * sets, some groups might be hashable but others not; in this case we set - * can_hash true as long as there is nothing globally preventing us from - * hashing (and we should therefore consider plans with hashes). - * - * Executor doesn't support hashed aggregation with DISTINCT or ORDER BY - * aggregates. (Doing so would imply storing *all* the input values in - * the hash table, and/or running many sorts in parallel, either of which - * seems like a certain loser.) We similarly don't support ordered-set - * aggregates in hashed aggregation, but that case is also included in the - * numOrderedAggs count. - * - * Note: grouping_is_hashable() is much more expensive to check than the - * other gating conditions, so we want to do it last. - */ - can_hash = (parse->groupClause != NIL && - agg_costs->numOrderedAggs == 0 && - (gd ? gd->any_hashable : grouping_is_hashable(parse->groupClause))); - - /* - * If grouped_rel->consider_parallel is true, then paths that we generate - * for this grouping relation could be run inside of a worker, but that - * doesn't mean we can actually use the PartialAggregate/FinalizeAggregate - * execution strategy. Figure that out. - */ - if (!grouped_rel->consider_parallel) - { - /* Not even parallel-safe. */ - try_parallel_aggregation = false; - } - else if (input_rel->partial_pathlist == NIL) - { - /* Nothing to use as input for partial aggregate. */ - try_parallel_aggregation = false; - } - else if (!parse->hasAggs && parse->groupClause == NIL) + if (extra->patype != PARTITIONWISE_AGGREGATE_NONE && + input_rel->part_scheme && input_rel->part_rels && + !IS_DUMMY_REL(input_rel)) { /* - * We don't know how to do parallel aggregation unless we have either - * some aggregates or a grouping clause. + * If this is the topmost relation or if the parent relation is doing + * full partitionwise aggregation, then we can do full partitionwise + * aggregation provided that the GROUP BY clause contains all of the + * partitioning columns at this level. Otherwise, we can do at most + * partial partitionwise aggregation. But if partial aggregation is + * not supported in general then we can't use it for partitionwise + * aggregation either. */ - try_parallel_aggregation = false; - } - else if (parse->groupingSets) - { - /* We don't know how to do grouping sets in parallel. */ - try_parallel_aggregation = false; - } - else if (agg_costs->hasNonPartial || agg_costs->hasNonSerial) - { - /* Insufficient support for partial mode. */ - try_parallel_aggregation = false; - } - else - { - /* Everything looks good. */ - try_parallel_aggregation = true; + if (extra->patype == PARTITIONWISE_AGGREGATE_FULL && + group_by_has_partkey(input_rel, extra->targetList, + root->parse->groupClause)) + patype = PARTITIONWISE_AGGREGATE_FULL; + else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0) + patype = PARTITIONWISE_AGGREGATE_PARTIAL; + else + patype = PARTITIONWISE_AGGREGATE_NONE; } /* * Before generating paths for grouped_rel, we first generate any possible - * partial paths; that way, later code can easily consider both parallel - * and non-parallel approaches to grouping. Note that the partial paths - * we generate here are also partially aggregated, so simply pushing a - * Gather node on top is insufficient to create a final path, as would be - * the case for a scan/join rel. + * partially grouped paths; that way, later code can easily consider both + * parallel and non-parallel approaches to grouping. */ - if (try_parallel_aggregation) + if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0) { - Path *cheapest_partial_path = linitial(input_rel->partial_pathlist); - - /* - * Build target list for partial aggregate paths. These paths cannot - * just emit the same tlist as regular aggregate paths, because (1) we - * must include Vars and Aggrefs needed in HAVING, which might not - * appear in the result tlist, and (2) the Aggrefs must be set in - * partial mode. - */ - partial_grouping_target = make_partial_grouping_target(root, target); - - /* Estimate number of partial groups. */ - dNumPartialGroups = get_number_of_groups(root, - cheapest_partial_path->rows, - gd); + bool force_rel_creation; /* - * Collect statistics about aggregates for estimating costs of - * performing aggregation in parallel. + * If we're doing partitionwise aggregation at this level, force + * creation of a partially_grouped_rel so we can add partitionwise + * paths to it. */ - MemSet(&agg_partial_costs, 0, sizeof(AggClauseCosts)); - MemSet(&agg_final_costs, 0, sizeof(AggClauseCosts)); - if (parse->hasAggs) - { - /* partial phase */ - get_agg_clause_costs(root, (Node *) partial_grouping_target->exprs, - AGGSPLIT_INITIAL_SERIAL, - &agg_partial_costs); - - /* final phase */ - get_agg_clause_costs(root, (Node *) target->exprs, - AGGSPLIT_FINAL_DESERIAL, - &agg_final_costs); - get_agg_clause_costs(root, parse->havingQual, - AGGSPLIT_FINAL_DESERIAL, - &agg_final_costs); - } - - if (can_sort) - { - /* This was checked before setting try_parallel_aggregation */ - Assert(parse->hasAggs || parse->groupClause); - - /* - * Use any available suitably-sorted path as input, and also - * consider sorting the cheapest partial path. - */ - foreach(lc, input_rel->partial_pathlist) - { - Path *path = (Path *) lfirst(lc); - bool is_sorted; - - is_sorted = pathkeys_contained_in(root->group_pathkeys, - path->pathkeys); - if (path == cheapest_partial_path || is_sorted) - { - /* Sort the cheapest partial path, if it isn't already */ - if (!is_sorted) - path = (Path *) create_sort_path(root, - grouped_rel, - path, - root->group_pathkeys, - -1.0); - - if (parse->hasAggs) - add_partial_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - path, - partial_grouping_target, - parse->groupClause ? AGG_SORTED : AGG_PLAIN, - AGGSPLIT_INITIAL_SERIAL, - parse->groupClause, - NIL, - &agg_partial_costs, - dNumPartialGroups)); - else - add_partial_path(grouped_rel, (Path *) - create_group_path(root, - grouped_rel, - path, - partial_grouping_target, - parse->groupClause, - NIL, - dNumPartialGroups)); - } - } - } - - if (can_hash) - { - /* Checked above */ - Assert(parse->hasAggs || parse->groupClause); - - hashaggtablesize = - estimate_hashagg_tablesize(cheapest_partial_path, - &agg_partial_costs, - dNumPartialGroups); + force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL); - /* - * Tentatively produce a partial HashAgg Path, depending on if it - * looks as if the hash table will fit in work_mem. - */ - if (hashaggtablesize < work_mem * 1024L) - { - add_partial_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - cheapest_partial_path, - partial_grouping_target, - AGG_HASHED, - AGGSPLIT_INITIAL_SERIAL, - parse->groupClause, - NIL, - &agg_partial_costs, - dNumPartialGroups)); - } - } + partially_grouped_rel = + create_partial_grouping_paths(root, + grouped_rel, + input_rel, + gd, + extra, + force_rel_creation); } - /* Build final grouping paths */ - if (can_sort) - { - /* - * Use any available suitably-sorted path as input, and also consider - * sorting the cheapest-total path. - */ - foreach(lc, input_rel->pathlist) - { - Path *path = (Path *) lfirst(lc); - bool is_sorted; - - is_sorted = pathkeys_contained_in(root->group_pathkeys, - path->pathkeys); - if (path == cheapest_path || is_sorted) - { - /* Sort the cheapest-total path if it isn't already sorted */ - if (!is_sorted) - path = (Path *) create_sort_path(root, - grouped_rel, - path, - root->group_pathkeys, - -1.0); - - /* Now decide what to stick atop it */ - if (parse->groupingSets) - { - consider_groupingsets_paths(root, grouped_rel, - path, true, can_hash, target, - gd, agg_costs, dNumGroups); - } - else if (parse->hasAggs) - { - /* - * We have aggregation, possibly with plain GROUP BY. Make - * an AggPath. - */ - add_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - path, - target, - parse->groupClause ? AGG_SORTED : AGG_PLAIN, - AGGSPLIT_SIMPLE, - parse->groupClause, - (List *) parse->havingQual, - agg_costs, - dNumGroups)); - } - else if (parse->groupClause) - { - /* - * We have GROUP BY without aggregation or grouping sets. - * Make a GroupPath. - */ - add_path(grouped_rel, (Path *) - create_group_path(root, - grouped_rel, - path, - target, - parse->groupClause, - (List *) parse->havingQual, - dNumGroups)); - } - else - { - /* Other cases should have been handled above */ - Assert(false); - } - } - } - - /* - * Now generate a complete GroupAgg Path atop of the cheapest partial - * path. We can do this using either Gather or Gather Merge. - */ - if (grouped_rel->partial_pathlist) - { - Path *path = (Path *) linitial(grouped_rel->partial_pathlist); - double total_groups = path->rows * path->parallel_workers; - - path = (Path *) create_gather_path(root, - grouped_rel, - path, - partial_grouping_target, - NULL, - &total_groups); - - /* - * Since Gather's output is always unsorted, we'll need to sort, - * unless there's no GROUP BY clause or a degenerate (constant) - * one, in which case there will only be a single group. - */ - if (root->group_pathkeys) - path = (Path *) create_sort_path(root, - grouped_rel, - path, - root->group_pathkeys, - -1.0); + /* Set out parameter. */ + *partially_grouped_rel_p = partially_grouped_rel; - if (parse->hasAggs) - add_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - path, - target, - parse->groupClause ? AGG_SORTED : AGG_PLAIN, - AGGSPLIT_FINAL_DESERIAL, - parse->groupClause, - (List *) parse->havingQual, - &agg_final_costs, - dNumGroups)); - else - add_path(grouped_rel, (Path *) - create_group_path(root, - grouped_rel, - path, - target, - parse->groupClause, - (List *) parse->havingQual, - dNumGroups)); + /* Apply partitionwise aggregation technique, if possible. */ + if (patype != PARTITIONWISE_AGGREGATE_NONE) + create_partitionwise_grouping_paths(root, input_rel, grouped_rel, + partially_grouped_rel, agg_costs, + gd, patype, extra); - /* - * The point of using Gather Merge rather than Gather is that it - * can preserve the ordering of the input path, so there's no - * reason to try it unless (1) it's possible to produce more than - * one output row and (2) we want the output path to be ordered. - */ - if (parse->groupClause != NIL && root->group_pathkeys != NIL) - { - foreach(lc, grouped_rel->partial_pathlist) - { - Path *subpath = (Path *) lfirst(lc); - Path *gmpath; - double total_groups; + /* If we are doing partial aggregation only, return. */ + if (extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL) + { + Assert(partially_grouped_rel); - /* - * It's useful to consider paths that are already properly - * ordered for Gather Merge, because those don't need a - * sort. It's also useful to consider the cheapest path, - * because sorting it in parallel and then doing Gather - * Merge may be better than doing an unordered Gather - * followed by a sort. But there's no point in - * considering non-cheapest paths that aren't already - * sorted correctly. - */ - if (path != subpath && - !pathkeys_contained_in(root->group_pathkeys, - subpath->pathkeys)) - continue; + if (partially_grouped_rel->pathlist) + set_cheapest(partially_grouped_rel); - total_groups = subpath->rows * subpath->parallel_workers; - - gmpath = (Path *) - create_gather_merge_path(root, - grouped_rel, - subpath, - partial_grouping_target, - root->group_pathkeys, - NULL, - &total_groups); - - if (parse->hasAggs) - add_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - gmpath, - target, - parse->groupClause ? AGG_SORTED : AGG_PLAIN, - AGGSPLIT_FINAL_DESERIAL, - parse->groupClause, - (List *) parse->havingQual, - &agg_final_costs, - dNumGroups)); - else - add_path(grouped_rel, (Path *) - create_group_path(root, - grouped_rel, - gmpath, - target, - parse->groupClause, - (List *) parse->havingQual, - dNumGroups)); - } - } - } + return; } - if (can_hash) + /* Gather any partially grouped partial paths. */ + if (partially_grouped_rel && partially_grouped_rel->partial_pathlist) { - if (parse->groupingSets) - { - /* - * Try for a hash-only groupingsets path over unsorted input. - */ - consider_groupingsets_paths(root, grouped_rel, - cheapest_path, false, true, target, - gd, agg_costs, dNumGroups); - } - else - { - hashaggtablesize = estimate_hashagg_tablesize(cheapest_path, - agg_costs, - dNumGroups); - - /* - * Provided that the estimated size of the hashtable does not - * exceed work_mem, we'll generate a HashAgg Path, although if we - * were unable to sort above, then we'd better generate a Path, so - * that we at least have one. - */ - if (hashaggtablesize < work_mem * 1024L || - grouped_rel->pathlist == NIL) - { - /* - * We just need an Agg over the cheapest-total input path, - * since input order won't matter. - */ - add_path(grouped_rel, (Path *) - create_agg_path(root, grouped_rel, - cheapest_path, - target, - AGG_HASHED, - AGGSPLIT_SIMPLE, - parse->groupClause, - (List *) parse->havingQual, - agg_costs, - dNumGroups)); - } - } - - /* - * Generate a HashAgg Path atop of the cheapest partial path. Once - * again, we'll only do this if it looks as though the hash table - * won't exceed work_mem. - */ - if (grouped_rel->partial_pathlist) - { - Path *path = (Path *) linitial(grouped_rel->partial_pathlist); - - hashaggtablesize = estimate_hashagg_tablesize(path, - &agg_final_costs, - dNumGroups); - - if (hashaggtablesize < work_mem * 1024L) - { - double total_groups = path->rows * path->parallel_workers; + gather_grouping_paths(root, partially_grouped_rel); + set_cheapest(partially_grouped_rel); + } - path = (Path *) create_gather_path(root, - grouped_rel, - path, - partial_grouping_target, - NULL, - &total_groups); + /* + * Estimate number of groups. + */ + dNumGroups = get_number_of_groups(root, + cheapest_path->rows, + gd, + extra->targetList); - add_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - path, - target, - AGG_HASHED, - AGGSPLIT_FINAL_DESERIAL, - parse->groupClause, - (List *) parse->havingQual, - &agg_final_costs, - dNumGroups)); - } - } - } + /* Build final grouping paths */ + add_paths_to_grouping_rel(root, input_rel, grouped_rel, + partially_grouped_rel, agg_costs, gd, + dNumGroups, extra); /* Give a helpful error if we failed to find any implementation */ if (grouped_rel->pathlist == NIL) @@ -4109,30 +4052,16 @@ create_grouping_paths(PlannerInfo *root, if (grouped_rel->fdwroutine && grouped_rel->fdwroutine->GetForeignUpperPaths) grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG, - input_rel, grouped_rel); + input_rel, grouped_rel, + extra); /* Let extensions possibly add some more paths */ if (create_upper_paths_hook) (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG, - input_rel, grouped_rel); - - /* Now choose the best path(s) */ - set_cheapest(grouped_rel); - - /* - * We've been using the partial pathlist for the grouped relation to hold - * partially aggregated paths, but that's actually a little bit bogus - * because it's unsafe for later planning stages -- like ordered_rel --- - * to get the idea that they can use these partial paths as if they didn't - * need a FinalizeAggregate step. Zap the partial pathlist at this stage - * so we don't get confused. - */ - grouped_rel->partial_pathlist = NIL; - - return grouped_rel; + input_rel, grouped_rel, + extra); } - /* * For a given input path, consider the possible ways of doing grouping sets on * it, by combinations of hashing and sorting. This can be called multiple @@ -4145,7 +4074,6 @@ consider_groupingsets_paths(PlannerInfo *root, Path *path, bool is_sorted, bool can_hash, - PathTarget *target, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups) @@ -4178,9 +4106,30 @@ consider_groupingsets_paths(PlannerInfo *root, Assert(can_hash); - if (pathkeys_contained_in(root->group_pathkeys, path->pathkeys)) + /* + * If the input is coincidentally sorted usefully (which can happen + * even if is_sorted is false, since that only means that our caller + * has set up the sorting for us), then save some hashtable space by + * making use of that. But we need to watch out for degenerate cases: + * + * 1) If there are any empty grouping sets, then group_pathkeys might + * be NIL if all non-empty grouping sets are unsortable. In this case, + * there will be a rollup containing only empty groups, and the + * pathkeys_contained_in test is vacuously true; this is ok. + * + * XXX: the above relies on the fact that group_pathkeys is generated + * from the first rollup. If we add the ability to consider multiple + * sort orders for grouping input, this assumption might fail. + * + * 2) If there are no empty sets and only unsortable sets, then the + * rollups list will be empty (and thus l_start == NULL), and + * group_pathkeys will be NIL; we must ensure that the vacuously-true + * pathkeys_contain_in test doesn't cause us to crash. + */ + if (l_start != NULL && + pathkeys_contained_in(root->group_pathkeys, path->pathkeys)) { - unhashed_rollup = lfirst(l_start); + unhashed_rollup = lfirst_node(RollupData, l_start); exclude_groups = unhashed_rollup->numGroups; l_start = lnext(l_start); } @@ -4205,7 +4154,7 @@ consider_groupingsets_paths(PlannerInfo *root, for_each_cell(lc, l_start) { - RollupData *rollup = lfirst(lc); + RollupData *rollup = lfirst_node(RollupData, lc); /* * If we find an unhashable rollup that's not been skipped by the @@ -4225,7 +4174,7 @@ consider_groupingsets_paths(PlannerInfo *root, } foreach(lc, sets_data) { - GroupingSetData *gs = lfirst(lc); + GroupingSetData *gs = lfirst_node(GroupingSetData, lc); List *gset = gs->set; RollupData *rollup; @@ -4287,7 +4236,6 @@ consider_groupingsets_paths(PlannerInfo *root, create_groupingsets_path(root, grouped_rel, path, - target, (List *) parse->havingQual, strat, new_rollups, @@ -4367,7 +4315,7 @@ consider_groupingsets_paths(PlannerInfo *root, i = 0; for_each_cell(lc, lnext(list_head(gd->rollups))) { - RollupData *rollup = lfirst(lc); + RollupData *rollup = lfirst_node(RollupData, lc); if (rollup->hashable) { @@ -4401,7 +4349,7 @@ consider_groupingsets_paths(PlannerInfo *root, i = 0; for_each_cell(lc, lnext(list_head(gd->rollups))) { - RollupData *rollup = lfirst(lc); + RollupData *rollup = lfirst_node(RollupData, lc); if (rollup->hashable) { @@ -4423,7 +4371,7 @@ consider_groupingsets_paths(PlannerInfo *root, foreach(lc, hash_sets) { - GroupingSetData *gs = lfirst(lc); + GroupingSetData *gs = lfirst_node(GroupingSetData, lc); RollupData *rollup = makeNode(RollupData); Assert(gs->set != NIL); @@ -4445,7 +4393,6 @@ consider_groupingsets_paths(PlannerInfo *root, create_groupingsets_path(root, grouped_rel, path, - target, (List *) parse->havingQual, AGG_MIXED, rollups, @@ -4462,7 +4409,6 @@ consider_groupingsets_paths(PlannerInfo *root, create_groupingsets_path(root, grouped_rel, path, - target, (List *) parse->havingQual, AGG_SORTED, gd->rollups, @@ -4489,6 +4435,7 @@ create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, + bool output_target_parallel_safe, List *tlist, WindowFuncLists *wflists, List *activeWindows) @@ -4504,8 +4451,7 @@ create_window_paths(PlannerInfo *root, * can't be parallel-safe, either. Otherwise, we need to examine the * target list and active windows for non-parallel-safe constructs. */ - if (input_rel->consider_parallel && - is_parallel_safe(root, (Node *) output_target->exprs) && + if (input_rel->consider_parallel && output_target_parallel_safe && is_parallel_safe(root, (Node *) activeWindows)) window_rel->consider_parallel = true; @@ -4545,12 +4491,13 @@ create_window_paths(PlannerInfo *root, if (window_rel->fdwroutine && window_rel->fdwroutine->GetForeignUpperPaths) window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW, - input_rel, window_rel); + input_rel, window_rel, + NULL); /* Let extensions possibly add some more paths */ if (create_upper_paths_hook) (*create_upper_paths_hook) (root, UPPERREL_WINDOW, - input_rel, window_rel); + input_rel, window_rel, NULL); /* Now choose the best path(s) */ set_cheapest(window_rel); @@ -4602,7 +4549,7 @@ create_one_window_path(PlannerInfo *root, foreach(l, activeWindows) { - WindowClause *wc = (WindowClause *) lfirst(l); + WindowClause *wc = lfirst_node(WindowClause, l); List *window_pathkeys; window_pathkeys = make_pathkeys_for_window(root, @@ -4648,8 +4595,7 @@ create_one_window_path(PlannerInfo *root, path = (Path *) create_windowagg_path(root, window_rel, path, window_target, wflists->windowFuncs[wc->winref], - wc, - window_pathkeys); + wc); } add_path(window_rel, path); @@ -4849,12 +4795,13 @@ create_distinct_paths(PlannerInfo *root, if (distinct_rel->fdwroutine && distinct_rel->fdwroutine->GetForeignUpperPaths) distinct_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_DISTINCT, - input_rel, distinct_rel); + input_rel, distinct_rel, + NULL); /* Let extensions possibly add some more paths */ if (create_upper_paths_hook) (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, - input_rel, distinct_rel); + input_rel, distinct_rel, NULL); /* Now choose the best path(s) */ set_cheapest(distinct_rel); @@ -4880,6 +4827,7 @@ static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, + bool target_parallel_safe, double limit_tuples) { Path *cheapest_input_path = input_rel->cheapest_total_path; @@ -4894,8 +4842,7 @@ create_ordered_paths(PlannerInfo *root, * can't be parallel-safe, either. Otherwise, it's parallel-safe if the * target list is parallel-safe. */ - if (input_rel->consider_parallel && - is_parallel_safe(root, (Node *) target->exprs)) + if (input_rel->consider_parallel && target_parallel_safe) ordered_rel->consider_parallel = true; /* @@ -4965,14 +4912,15 @@ create_ordered_paths(PlannerInfo *root, ordered_rel, cheapest_partial_path, root->sort_pathkeys, - -1.0); + limit_tuples); total_groups = cheapest_partial_path->rows * cheapest_partial_path->parallel_workers; path = (Path *) create_gather_merge_path(root, ordered_rel, path, - target, root->sort_pathkeys, NULL, + path->pathtarget, + root->sort_pathkeys, NULL, &total_groups); /* Add projection step if needed */ @@ -4991,12 +4939,13 @@ create_ordered_paths(PlannerInfo *root, if (ordered_rel->fdwroutine && ordered_rel->fdwroutine->GetForeignUpperPaths) ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED, - input_rel, ordered_rel); + input_rel, ordered_rel, + NULL); /* Let extensions possibly add some more paths */ if (create_upper_paths_hook) (*create_upper_paths_hook) (root, UPPERREL_ORDERED, - input_rel, ordered_rel); + input_rel, ordered_rel, NULL); /* * No need to bother with set_cheapest here; grouping_planner does not @@ -5120,10 +5069,12 @@ make_group_input_target(PlannerInfo *root, PathTarget *final_target) * these would be Vars that are grouped by or used in grouping expressions.) * * grouping_target is the tlist to be emitted by the topmost aggregation step. - * We get the HAVING clause out of *root. + * havingQual represents the HAVING clause. */ static PathTarget * -make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target) +make_partial_grouping_target(PlannerInfo *root, + PathTarget *grouping_target, + Node *havingQual) { Query *parse = root->parse; PathTarget *partial_target; @@ -5165,8 +5116,8 @@ make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target) /* * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too. */ - if (parse->havingQual) - non_group_cols = lappend(non_group_cols, parse->havingQual); + if (havingQual) + non_group_cols = lappend(non_group_cols, havingQual); /* * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in @@ -5266,7 +5217,7 @@ postprocess_setop_tlist(List *new_tlist, List *orig_tlist) foreach(l, new_tlist) { - TargetEntry *new_tle = (TargetEntry *) lfirst(l); + TargetEntry *new_tle = lfirst_node(TargetEntry, l); TargetEntry *orig_tle; /* ignore resjunk columns in setop result */ @@ -5274,7 +5225,7 @@ postprocess_setop_tlist(List *new_tlist, List *orig_tlist) continue; Assert(orig_tlist_item != NULL); - orig_tle = (TargetEntry *) lfirst(orig_tlist_item); + orig_tle = lfirst_node(TargetEntry, orig_tlist_item); orig_tlist_item = lnext(orig_tlist_item); if (orig_tle->resjunk) /* should not happen */ elog(ERROR, "resjunk output columns are not implemented"); @@ -5294,65 +5245,117 @@ postprocess_setop_tlist(List *new_tlist, List *orig_tlist) static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists) { - List *result; - List *actives; + List *windowClause = root->parse->windowClause; + List *result = NIL; ListCell *lc; + int nActive = 0; + WindowClauseSortData *actives = palloc(sizeof(WindowClauseSortData) + * list_length(windowClause)); - /* First, make a list of the active windows */ - actives = NIL; - foreach(lc, root->parse->windowClause) + /* First, construct an array of the active windows */ + foreach(lc, windowClause) { - WindowClause *wc = (WindowClause *) lfirst(lc); + WindowClause *wc = lfirst_node(WindowClause, lc); /* It's only active if wflists shows some related WindowFuncs */ Assert(wc->winref <= wflists->maxWinRef); - if (wflists->windowFuncs[wc->winref] != NIL) - actives = lappend(actives, wc); + if (wflists->windowFuncs[wc->winref] == NIL) + continue; + + actives[nActive].wc = wc; /* original clause */ + + /* + * For sorting, we want the list of partition keys followed by the + * list of sort keys. But pathkeys construction will remove duplicates + * between the two, so we can as well (even though we can't detect all + * of the duplicates, since some may come from ECs - that might mean + * we miss optimization chances here). We must, however, ensure that + * the order of entries is preserved with respect to the ones we do + * keep. + * + * partitionClause and orderClause had their own duplicates removed in + * parse analysis, so we're only concerned here with removing + * orderClause entries that also appear in partitionClause. + */ + actives[nActive].uniqueOrder = + list_concat_unique(list_copy(wc->partitionClause), + wc->orderClause); + nActive++; } /* - * Now, ensure that windows with identical partitioning/ordering clauses - * are adjacent in the list. This is required by the SQL standard, which - * says that only one sort is to be used for such windows, even if they - * are otherwise distinct (eg, different names or framing clauses). + * Sort active windows by their partitioning/ordering clauses, ignoring + * any framing clauses, so that the windows that need the same sorting are + * adjacent in the list. When we come to generate paths, this will avoid + * inserting additional Sort nodes. * - * There is room to be much smarter here, for example detecting whether - * one window's sort keys are a prefix of another's (so that sorting for - * the latter would do for the former), or putting windows first that - * match a sort order available for the underlying query. For the moment - * we are content with meeting the spec. - */ - result = NIL; - while (actives != NIL) - { - WindowClause *wc = (WindowClause *) linitial(actives); - ListCell *prev; - ListCell *next; + * This is how we implement a specific requirement from the SQL standard, + * which says that when two or more windows are order-equivalent (i.e. + * have matching partition and order clauses, even if their names or + * framing clauses differ), then all peer rows must be presented in the + * same order in all of them. If we allowed multiple sort nodes for such + * cases, we'd risk having the peer rows end up in different orders in + * equivalent windows due to sort instability. (See General Rule 4 of + * in SQL2008 - SQL2016.) + * + * Additionally, if the entire list of clauses of one window is a prefix + * of another, put first the window with stronger sorting requirements. + * This way we will first sort for stronger window, and won't have to sort + * again for the weaker one. + */ + qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp); - /* Move wc from actives to result */ - actives = list_delete_first(actives); - result = lappend(result, wc); + /* build ordered list of the original WindowClause nodes */ + for (int i = 0; i < nActive; i++) + result = lappend(result, actives[i].wc); - /* Now move any matching windows from actives to result */ - prev = NULL; - for (lc = list_head(actives); lc; lc = next) - { - WindowClause *wc2 = (WindowClause *) lfirst(lc); + pfree(actives); - next = lnext(lc); - /* framing options are NOT to be compared here! */ - if (equal(wc->partitionClause, wc2->partitionClause) && - equal(wc->orderClause, wc2->orderClause)) - { - actives = list_delete_cell(actives, lc, prev); - result = lappend(result, wc2); - } - else - prev = lc; - } + return result; +} + +/* + * common_prefix_cmp + * QSort comparison function for WindowClauseSortData + * + * Sort the windows by the required sorting clauses. First, compare the sort + * clauses themselves. Second, if one window's clauses are a prefix of another + * one's clauses, put the window with more sort clauses first. + */ +static int +common_prefix_cmp(const void *a, const void *b) +{ + const WindowClauseSortData *wcsa = a; + const WindowClauseSortData *wcsb = b; + ListCell *item_a; + ListCell *item_b; + + forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder) + { + SortGroupClause *sca = lfirst_node(SortGroupClause, item_a); + SortGroupClause *scb = lfirst_node(SortGroupClause, item_b); + + if (sca->tleSortGroupRef > scb->tleSortGroupRef) + return -1; + else if (sca->tleSortGroupRef < scb->tleSortGroupRef) + return 1; + else if (sca->sortop > scb->sortop) + return -1; + else if (sca->sortop < scb->sortop) + return 1; + else if (sca->nulls_first && !scb->nulls_first) + return -1; + else if (!sca->nulls_first && scb->nulls_first) + return 1; + /* no need to compare eqop, since it is fully determined by sortop */ } - return result; + if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder)) + return -1; + else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder)) + return 1; + + return 0; } /* @@ -5410,18 +5413,18 @@ make_window_input_target(PlannerInfo *root, sgrefs = NULL; foreach(lc, activeWindows) { - WindowClause *wc = (WindowClause *) lfirst(lc); + WindowClause *wc = lfirst_node(WindowClause, lc); ListCell *lc2; foreach(lc2, wc->partitionClause) { - SortGroupClause *sortcl = (SortGroupClause *) lfirst(lc2); + SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2); sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef); } foreach(lc2, wc->orderClause) { - SortGroupClause *sortcl = (SortGroupClause *) lfirst(lc2); + SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2); sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef); } @@ -5430,7 +5433,7 @@ make_window_input_target(PlannerInfo *root, /* Add in sortgroupref numbers of GROUP BY clauses, too */ foreach(lc, parse->groupClause) { - SortGroupClause *grpcl = (SortGroupClause *) lfirst(lc); + SortGroupClause *grpcl = lfirst_node(SortGroupClause, lc); sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef); } @@ -5505,8 +5508,6 @@ make_window_input_target(PlannerInfo *root, * The required ordering is first the PARTITION keys, then the ORDER keys. * In the future we might try to implement windowing using hashing, in which * case the ordering could be relaxed, but for now we always sort. - * - * Caution: if you change this, see createplan.c's get_column_info_for_window! */ static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, @@ -5600,7 +5601,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, * below the Sort step (and the Distinct step, if any). This will be * exactly final_target if we decide a projection step wouldn't be helpful. * - * In addition, *have_postponed_srfs is set to TRUE if we choose to postpone + * In addition, *have_postponed_srfs is set to true if we choose to postpone * any set-returning functions to after the Sort. */ static PathTarget * @@ -5850,7 +5851,7 @@ adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, Assert(subpath->param_info == NULL); forboth(lc1, targets, lc2, targets_contain_srfs) { - PathTarget *thistarget = (PathTarget *) lfirst(lc1); + PathTarget *thistarget = lfirst_node(PathTarget, lc1); bool contains_srfs = (bool) lfirst_int(lc2); /* If this level doesn't contain SRFs, do regular projection */ @@ -5883,7 +5884,7 @@ adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, Assert(subpath->param_info == NULL); forboth(lc1, targets, lc2, targets_contain_srfs) { - PathTarget *thistarget = (PathTarget *) lfirst(lc1); + PathTarget *thistarget = lfirst_node(PathTarget, lc1); bool contains_srfs = (bool) lfirst_int(lc2); /* If this level doesn't contain SRFs, do regular projection */ @@ -5952,7 +5953,7 @@ expression_planner(Expr *expr) * tableOid is the OID of a table to be clustered on its index indexOid * (which is already known to be a btree index). Decide whether it's * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER. - * Return TRUE to use sorting, FALSE to use an indexscan. + * Return true to use sorting, false to use an indexscan. * * Note: caller had better already hold some type of lock on the table. */ @@ -5994,6 +5995,7 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid) rte->rtekind = RTE_RELATION; rte->relid = tableOid; rte->relkind = RELKIND_RELATION; /* Don't be too picky. */ + rte->rellockmode = AccessShareLock; rte->lateral = false; rte->inh = false; rte->inFromCl = true; @@ -6009,7 +6011,7 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid) indexInfo = NULL; foreach(lc, rel->indexlist) { - indexInfo = (IndexOptInfo *) lfirst(lc); + indexInfo = lfirst_node(IndexOptInfo, lc); if (indexInfo->indexoid == indexOid) break; } @@ -6058,31 +6060,1177 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid) } /* - * get_partitioned_child_rels - * Returns a list of the RT indexes of the partitioned child relations - * with rti as the root parent RT index. + * plan_create_index_workers + * Use the planner to decide how many parallel worker processes + * CREATE INDEX should request for use + * + * tableOid is the table on which the index is to be built. indexOid is the + * OID of an index to be created or reindexed (which must be a btree index). * - * Note: Only call this function on RTEs known to be partitioned tables. + * Return value is the number of parallel worker processes to request. It + * may be unsafe to proceed if this is 0. Note that this does not include the + * leader participating as a worker (value is always a number of parallel + * worker processes). + * + * Note: caller had better already hold some type of lock on the table and + * index. */ -List * -get_partitioned_child_rels(PlannerInfo *root, Index rti) +int +plan_create_index_workers(Oid tableOid, Oid indexOid) { - List *result = NIL; - ListCell *l; + PlannerInfo *root; + Query *query; + PlannerGlobal *glob; + RangeTblEntry *rte; + Relation heap; + Relation index; + RelOptInfo *rel; + int parallel_workers; + BlockNumber heap_blocks; + double reltuples; + double allvisfrac; + + /* Return immediately when parallelism disabled */ + if (max_parallel_maintenance_workers == 0) + return 0; + + /* Set up largely-dummy planner state */ + query = makeNode(Query); + query->commandType = CMD_SELECT; + + glob = makeNode(PlannerGlobal); + + root = makeNode(PlannerInfo); + root->parse = query; + root->glob = glob; + root->query_level = 1; + root->planner_cxt = CurrentMemoryContext; + root->wt_param_id = -1; + + /* + * Build a minimal RTE. + * + * Set the target's table to be an inheritance parent. This is a kludge + * that prevents problems within get_relation_info(), which does not + * expect that any IndexOptInfo is currently undergoing REINDEX. + */ + rte = makeNode(RangeTblEntry); + rte->rtekind = RTE_RELATION; + rte->relid = tableOid; + rte->relkind = RELKIND_RELATION; /* Don't be too picky. */ + rte->rellockmode = AccessShareLock; + rte->lateral = false; + rte->inh = true; + rte->inFromCl = true; + query->rtable = list_make1(rte); + + /* Set up RTE/RelOptInfo arrays */ + setup_simple_rel_arrays(root); + + /* Build RelOptInfo */ + rel = build_simple_rel(root, 1, NULL); + + heap = heap_open(tableOid, NoLock); + index = index_open(indexOid, NoLock); + + /* + * Determine if it's safe to proceed. + * + * Currently, parallel workers can't access the leader's temporary tables. + * Furthermore, any index predicate or index expressions must be parallel + * safe. + */ + if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP || + !is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) || + !is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index))) + { + parallel_workers = 0; + goto done; + } - foreach(l, root->pcinfo_list) + /* + * If parallel_workers storage parameter is set for the table, accept that + * as the number of parallel worker processes to launch (though still cap + * at max_parallel_maintenance_workers). Note that we deliberately do not + * consider any other factor when parallel_workers is set. (e.g., memory + * use by workers.) + */ + if (rel->rel_parallel_workers != -1) { - PartitionedChildRelInfo *pc = lfirst(l); + parallel_workers = Min(rel->rel_parallel_workers, + max_parallel_maintenance_workers); + goto done; + } + + /* + * Estimate heap relation size ourselves, since rel->pages cannot be + * trusted (heap RTE was marked as inheritance parent) + */ + estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac); + + /* + * Determine number of workers to scan the heap relation using generic + * model + */ + parallel_workers = compute_parallel_worker(rel, heap_blocks, -1, + max_parallel_maintenance_workers); + + /* + * Cap workers based on available maintenance_work_mem as needed. + * + * Note that each tuplesort participant receives an even share of the + * total maintenance_work_mem budget. Aim to leave participants + * (including the leader as a participant) with no less than 32MB of + * memory. This leaves cases where maintenance_work_mem is set to 64MB + * immediately past the threshold of being capable of launching a single + * parallel worker to sort. + */ + while (parallel_workers > 0 && + maintenance_work_mem / (parallel_workers + 1) < 32768L) + parallel_workers--; + +done: + index_close(index, NoLock); + heap_close(heap, NoLock); + + return parallel_workers; +} + +/* + * add_paths_to_grouping_rel + * + * Add non-partial paths to grouping relation. + */ +static void +add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, + RelOptInfo *grouped_rel, + RelOptInfo *partially_grouped_rel, + const AggClauseCosts *agg_costs, + grouping_sets_data *gd, double dNumGroups, + GroupPathExtraData *extra) +{ + Query *parse = root->parse; + Path *cheapest_path = input_rel->cheapest_total_path; + ListCell *lc; + bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0; + bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0; + List *havingQual = (List *) extra->havingQual; + AggClauseCosts *agg_final_costs = &extra->agg_final_costs; - if (pc->parent_relid == rti) + if (can_sort) + { + /* + * Use any available suitably-sorted path as input, and also consider + * sorting the cheapest-total path. + */ + foreach(lc, input_rel->pathlist) { - result = pc->child_rels; - break; + Path *path = (Path *) lfirst(lc); + bool is_sorted; + + is_sorted = pathkeys_contained_in(root->group_pathkeys, + path->pathkeys); + if (path == cheapest_path || is_sorted) + { + /* Sort the cheapest-total path if it isn't already sorted */ + if (!is_sorted) + path = (Path *) create_sort_path(root, + grouped_rel, + path, + root->group_pathkeys, + -1.0); + + /* Now decide what to stick atop it */ + if (parse->groupingSets) + { + consider_groupingsets_paths(root, grouped_rel, + path, true, can_hash, + gd, agg_costs, dNumGroups); + } + else if (parse->hasAggs) + { + /* + * We have aggregation, possibly with plain GROUP BY. Make + * an AggPath. + */ + add_path(grouped_rel, (Path *) + create_agg_path(root, + grouped_rel, + path, + grouped_rel->reltarget, + parse->groupClause ? AGG_SORTED : AGG_PLAIN, + AGGSPLIT_SIMPLE, + parse->groupClause, + havingQual, + agg_costs, + dNumGroups)); + } + else if (parse->groupClause) + { + /* + * We have GROUP BY without aggregation or grouping sets. + * Make a GroupPath. + */ + add_path(grouped_rel, (Path *) + create_group_path(root, + grouped_rel, + path, + parse->groupClause, + havingQual, + dNumGroups)); + } + else + { + /* Other cases should have been handled above */ + Assert(false); + } + } + } + + /* + * Instead of operating directly on the input relation, we can + * consider finalizing a partially aggregated path. + */ + if (partially_grouped_rel != NULL) + { + foreach(lc, partially_grouped_rel->pathlist) + { + Path *path = (Path *) lfirst(lc); + + /* + * Insert a Sort node, if required. But there's no point in + * sorting anything but the cheapest path. + */ + if (!pathkeys_contained_in(root->group_pathkeys, path->pathkeys)) + { + if (path != partially_grouped_rel->cheapest_total_path) + continue; + path = (Path *) create_sort_path(root, + grouped_rel, + path, + root->group_pathkeys, + -1.0); + } + + if (parse->hasAggs) + add_path(grouped_rel, (Path *) + create_agg_path(root, + grouped_rel, + path, + grouped_rel->reltarget, + parse->groupClause ? AGG_SORTED : AGG_PLAIN, + AGGSPLIT_FINAL_DESERIAL, + parse->groupClause, + havingQual, + agg_final_costs, + dNumGroups)); + else + add_path(grouped_rel, (Path *) + create_group_path(root, + grouped_rel, + path, + parse->groupClause, + havingQual, + dNumGroups)); + } } } - /* The root partitioned table is included as a child rel */ - Assert(list_length(result) >= 1); + if (can_hash) + { + Size hashaggtablesize; - return result; + if (parse->groupingSets) + { + /* + * Try for a hash-only groupingsets path over unsorted input. + */ + consider_groupingsets_paths(root, grouped_rel, + cheapest_path, false, true, + gd, agg_costs, dNumGroups); + } + else + { + hashaggtablesize = estimate_hashagg_tablesize(cheapest_path, + agg_costs, + dNumGroups); + + /* + * Provided that the estimated size of the hashtable does not + * exceed work_mem, we'll generate a HashAgg Path, although if we + * were unable to sort above, then we'd better generate a Path, so + * that we at least have one. + */ + if (hashaggtablesize < work_mem * 1024L || + grouped_rel->pathlist == NIL) + { + /* + * We just need an Agg over the cheapest-total input path, + * since input order won't matter. + */ + add_path(grouped_rel, (Path *) + create_agg_path(root, grouped_rel, + cheapest_path, + grouped_rel->reltarget, + AGG_HASHED, + AGGSPLIT_SIMPLE, + parse->groupClause, + havingQual, + agg_costs, + dNumGroups)); + } + } + + /* + * Generate a Finalize HashAgg Path atop of the cheapest partially + * grouped path, assuming there is one. Once again, we'll only do this + * if it looks as though the hash table won't exceed work_mem. + */ + if (partially_grouped_rel && partially_grouped_rel->pathlist) + { + Path *path = partially_grouped_rel->cheapest_total_path; + + hashaggtablesize = estimate_hashagg_tablesize(path, + agg_final_costs, + dNumGroups); + + if (hashaggtablesize < work_mem * 1024L) + add_path(grouped_rel, (Path *) + create_agg_path(root, + grouped_rel, + path, + grouped_rel->reltarget, + AGG_HASHED, + AGGSPLIT_FINAL_DESERIAL, + parse->groupClause, + havingQual, + agg_final_costs, + dNumGroups)); + } + } + + /* + * When partitionwise aggregate is used, we might have fully aggregated + * paths in the partial pathlist, because add_paths_to_append_rel() will + * consider a path for grouped_rel consisting of a Parallel Append of + * non-partial paths from each child. + */ + if (grouped_rel->partial_pathlist != NIL) + gather_grouping_paths(root, grouped_rel); +} + +/* + * create_partial_grouping_paths + * + * Create a new upper relation representing the result of partial aggregation + * and populate it with appropriate paths. Note that we don't finalize the + * lists of paths here, so the caller can add additional partial or non-partial + * paths and must afterward call gather_grouping_paths and set_cheapest on + * the returned upper relation. + * + * All paths for this new upper relation -- both partial and non-partial -- + * have been partially aggregated but require a subsequent FinalizeAggregate + * step. + * + * NB: This function is allowed to return NULL if it determines that there is + * no real need to create a new RelOptInfo. + */ +static RelOptInfo * +create_partial_grouping_paths(PlannerInfo *root, + RelOptInfo *grouped_rel, + RelOptInfo *input_rel, + grouping_sets_data *gd, + GroupPathExtraData *extra, + bool force_rel_creation) +{ + Query *parse = root->parse; + RelOptInfo *partially_grouped_rel; + AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs; + AggClauseCosts *agg_final_costs = &extra->agg_final_costs; + Path *cheapest_partial_path = NULL; + Path *cheapest_total_path = NULL; + double dNumPartialGroups = 0; + double dNumPartialPartialGroups = 0; + ListCell *lc; + bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0; + bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0; + + /* + * Consider whether we should generate partially aggregated non-partial + * paths. We can only do this if we have a non-partial path, and only if + * the parent of the input rel is performing partial partitionwise + * aggregation. (Note that extra->patype is the type of partitionwise + * aggregation being used at the parent level, not this level.) + */ + if (input_rel->pathlist != NIL && + extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL) + cheapest_total_path = input_rel->cheapest_total_path; + + /* + * If parallelism is possible for grouped_rel, then we should consider + * generating partially-grouped partial paths. However, if the input rel + * has no partial paths, then we can't. + */ + if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL) + cheapest_partial_path = linitial(input_rel->partial_pathlist); + + /* + * If we can't partially aggregate partial paths, and we can't partially + * aggregate non-partial paths, then don't bother creating the new + * RelOptInfo at all, unless the caller specified force_rel_creation. + */ + if (cheapest_total_path == NULL && + cheapest_partial_path == NULL && + !force_rel_creation) + return NULL; + + /* + * Build a new upper relation to represent the result of partially + * aggregating the rows from the input relation. + */ + partially_grouped_rel = fetch_upper_rel(root, + UPPERREL_PARTIAL_GROUP_AGG, + grouped_rel->relids); + partially_grouped_rel->consider_parallel = + grouped_rel->consider_parallel; + partially_grouped_rel->reloptkind = grouped_rel->reloptkind; + partially_grouped_rel->serverid = grouped_rel->serverid; + partially_grouped_rel->userid = grouped_rel->userid; + partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent; + partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine; + + /* + * Build target list for partial aggregate paths. These paths cannot just + * emit the same tlist as regular aggregate paths, because (1) we must + * include Vars and Aggrefs needed in HAVING, which might not appear in + * the result tlist, and (2) the Aggrefs must be set in partial mode. + */ + partially_grouped_rel->reltarget = + make_partial_grouping_target(root, grouped_rel->reltarget, + extra->havingQual); + + if (!extra->partial_costs_set) + { + /* + * Collect statistics about aggregates for estimating costs of + * performing aggregation in parallel. + */ + MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts)); + MemSet(agg_final_costs, 0, sizeof(AggClauseCosts)); + if (parse->hasAggs) + { + List *partial_target_exprs; + + /* partial phase */ + partial_target_exprs = partially_grouped_rel->reltarget->exprs; + get_agg_clause_costs(root, (Node *) partial_target_exprs, + AGGSPLIT_INITIAL_SERIAL, + agg_partial_costs); + + /* final phase */ + get_agg_clause_costs(root, (Node *) grouped_rel->reltarget->exprs, + AGGSPLIT_FINAL_DESERIAL, + agg_final_costs); + get_agg_clause_costs(root, extra->havingQual, + AGGSPLIT_FINAL_DESERIAL, + agg_final_costs); + } + + extra->partial_costs_set = true; + } + + /* Estimate number of partial groups. */ + if (cheapest_total_path != NULL) + dNumPartialGroups = + get_number_of_groups(root, + cheapest_total_path->rows, + gd, + extra->targetList); + if (cheapest_partial_path != NULL) + dNumPartialPartialGroups = + get_number_of_groups(root, + cheapest_partial_path->rows, + gd, + extra->targetList); + + if (can_sort && cheapest_total_path != NULL) + { + /* This should have been checked previously */ + Assert(parse->hasAggs || parse->groupClause); + + /* + * Use any available suitably-sorted path as input, and also consider + * sorting the cheapest partial path. + */ + foreach(lc, input_rel->pathlist) + { + Path *path = (Path *) lfirst(lc); + bool is_sorted; + + is_sorted = pathkeys_contained_in(root->group_pathkeys, + path->pathkeys); + if (path == cheapest_total_path || is_sorted) + { + /* Sort the cheapest partial path, if it isn't already */ + if (!is_sorted) + path = (Path *) create_sort_path(root, + partially_grouped_rel, + path, + root->group_pathkeys, + -1.0); + + if (parse->hasAggs) + add_path(partially_grouped_rel, (Path *) + create_agg_path(root, + partially_grouped_rel, + path, + partially_grouped_rel->reltarget, + parse->groupClause ? AGG_SORTED : AGG_PLAIN, + AGGSPLIT_INITIAL_SERIAL, + parse->groupClause, + NIL, + agg_partial_costs, + dNumPartialGroups)); + else + add_path(partially_grouped_rel, (Path *) + create_group_path(root, + partially_grouped_rel, + path, + parse->groupClause, + NIL, + dNumPartialGroups)); + } + } + } + + if (can_sort && cheapest_partial_path != NULL) + { + /* Similar to above logic, but for partial paths. */ + foreach(lc, input_rel->partial_pathlist) + { + Path *path = (Path *) lfirst(lc); + bool is_sorted; + + is_sorted = pathkeys_contained_in(root->group_pathkeys, + path->pathkeys); + if (path == cheapest_partial_path || is_sorted) + { + /* Sort the cheapest partial path, if it isn't already */ + if (!is_sorted) + path = (Path *) create_sort_path(root, + partially_grouped_rel, + path, + root->group_pathkeys, + -1.0); + + if (parse->hasAggs) + add_partial_path(partially_grouped_rel, (Path *) + create_agg_path(root, + partially_grouped_rel, + path, + partially_grouped_rel->reltarget, + parse->groupClause ? AGG_SORTED : AGG_PLAIN, + AGGSPLIT_INITIAL_SERIAL, + parse->groupClause, + NIL, + agg_partial_costs, + dNumPartialPartialGroups)); + else + add_partial_path(partially_grouped_rel, (Path *) + create_group_path(root, + partially_grouped_rel, + path, + parse->groupClause, + NIL, + dNumPartialPartialGroups)); + } + } + } + + if (can_hash && cheapest_total_path != NULL) + { + Size hashaggtablesize; + + /* Checked above */ + Assert(parse->hasAggs || parse->groupClause); + + hashaggtablesize = + estimate_hashagg_tablesize(cheapest_total_path, + agg_partial_costs, + dNumPartialGroups); + + /* + * Tentatively produce a partial HashAgg Path, depending on if it + * looks as if the hash table will fit in work_mem. + */ + if (hashaggtablesize < work_mem * 1024L && + cheapest_total_path != NULL) + { + add_path(partially_grouped_rel, (Path *) + create_agg_path(root, + partially_grouped_rel, + cheapest_total_path, + partially_grouped_rel->reltarget, + AGG_HASHED, + AGGSPLIT_INITIAL_SERIAL, + parse->groupClause, + NIL, + agg_partial_costs, + dNumPartialGroups)); + } + } + + if (can_hash && cheapest_partial_path != NULL) + { + Size hashaggtablesize; + + hashaggtablesize = + estimate_hashagg_tablesize(cheapest_partial_path, + agg_partial_costs, + dNumPartialPartialGroups); + + /* Do the same for partial paths. */ + if (hashaggtablesize < work_mem * 1024L && + cheapest_partial_path != NULL) + { + add_partial_path(partially_grouped_rel, (Path *) + create_agg_path(root, + partially_grouped_rel, + cheapest_partial_path, + partially_grouped_rel->reltarget, + AGG_HASHED, + AGGSPLIT_INITIAL_SERIAL, + parse->groupClause, + NIL, + agg_partial_costs, + dNumPartialPartialGroups)); + } + } + + /* + * If there is an FDW that's responsible for all baserels of the query, + * let it consider adding partially grouped ForeignPaths. + */ + if (partially_grouped_rel->fdwroutine && + partially_grouped_rel->fdwroutine->GetForeignUpperPaths) + { + FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine; + + fdwroutine->GetForeignUpperPaths(root, + UPPERREL_PARTIAL_GROUP_AGG, + input_rel, partially_grouped_rel, + extra); + } + + return partially_grouped_rel; +} + +/* + * Generate Gather and Gather Merge paths for a grouping relation or partial + * grouping relation. + * + * generate_gather_paths does most of the work, but we also consider a special + * case: we could try sorting the data by the group_pathkeys and then applying + * Gather Merge. + * + * NB: This function shouldn't be used for anything other than a grouped or + * partially grouped relation not only because of the fact that it explicitly + * references group_pathkeys but we pass "true" as the third argument to + * generate_gather_paths(). + */ +static void +gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel) +{ + Path *cheapest_partial_path; + + /* Try Gather for unordered paths and Gather Merge for ordered ones. */ + generate_gather_paths(root, rel, true); + + /* Try cheapest partial path + explicit Sort + Gather Merge. */ + cheapest_partial_path = linitial(rel->partial_pathlist); + if (!pathkeys_contained_in(root->group_pathkeys, + cheapest_partial_path->pathkeys)) + { + Path *path; + double total_groups; + + total_groups = + cheapest_partial_path->rows * cheapest_partial_path->parallel_workers; + path = (Path *) create_sort_path(root, rel, cheapest_partial_path, + root->group_pathkeys, + -1.0); + path = (Path *) + create_gather_merge_path(root, + rel, + path, + rel->reltarget, + root->group_pathkeys, + NULL, + &total_groups); + + add_path(rel, path); + } +} + +/* + * can_partial_agg + * + * Determines whether or not partial grouping and/or aggregation is possible. + * Returns true when possible, false otherwise. + */ +static bool +can_partial_agg(PlannerInfo *root, const AggClauseCosts *agg_costs) +{ + Query *parse = root->parse; + + if (!parse->hasAggs && parse->groupClause == NIL) + { + /* + * We don't know how to do parallel aggregation unless we have either + * some aggregates or a grouping clause. + */ + return false; + } + else if (parse->groupingSets) + { + /* We don't know how to do grouping sets in parallel. */ + return false; + } + else if (agg_costs->hasNonPartial || agg_costs->hasNonSerial) + { + /* Insufficient support for partial mode. */ + return false; + } + + /* Everything looks good. */ + return true; +} + +/* + * apply_scanjoin_target_to_paths + * + * Adjust the final scan/join relation, and recursively all of its children, + * to generate the final scan/join target. It would be more correct to model + * this as a separate planning step with a new RelOptInfo at the toplevel and + * for each child relation, but doing it this way is noticeably cheaper. + * Maybe that problem can be solved at some point, but for now we do this. + * + * If tlist_same_exprs is true, then the scan/join target to be applied has + * the same expressions as the existing reltarget, so we need only insert the + * appropriate sortgroupref information. By avoiding the creation of + * projection paths we save effort both immediately and at plan creation time. + */ +static void +apply_scanjoin_target_to_paths(PlannerInfo *root, + RelOptInfo *rel, + List *scanjoin_targets, + List *scanjoin_targets_contain_srfs, + bool scanjoin_target_parallel_safe, + bool tlist_same_exprs) +{ + ListCell *lc; + PathTarget *scanjoin_target; + bool is_dummy_rel = IS_DUMMY_REL(rel); + + check_stack_depth(); + + /* + * If the scan/join target is not parallel-safe, partial paths cannot + * generate it. + */ + if (!scanjoin_target_parallel_safe) + { + /* + * Since we can't generate the final scan/join target, this is our + * last opportunity to use any partial paths that exist. We don't do + * this if the case where the target is parallel-safe, since we will + * be able to generate superior paths by doing it after the final + * scan/join target has been applied. + * + * Note that this may invalidate rel->cheapest_total_path, so we must + * not rely on it after this point without first calling set_cheapest. + */ + generate_gather_paths(root, rel, false); + + /* Can't use parallel query above this level. */ + rel->partial_pathlist = NIL; + rel->consider_parallel = false; + } + + /* + * Update the reltarget. This may not be strictly necessary in all cases, + * but it is at least necessary when create_append_path() gets called + * below directly or indirectly, since that function uses the reltarget as + * the pathtarget for the resulting path. It seems like a good idea to do + * it unconditionally. + */ + rel->reltarget = llast_node(PathTarget, scanjoin_targets); + + /* Special case: handle dummy relations separately. */ + if (is_dummy_rel) + { + /* + * Since this is a dummy rel, it's got a single Append path with no + * child paths. Replace it with a new path having the final scan/join + * target. (Note that since Append is not projection-capable, it + * would be bad to handle this using the general purpose code below; + * we'd end up putting a ProjectionPath on top of the existing Append + * node, which would cause this relation to stop appearing to be a + * dummy rel.) + */ + rel->pathlist = list_make1(create_append_path(root, rel, NIL, NIL, + NULL, 0, false, NIL, + -1)); + rel->partial_pathlist = NIL; + set_cheapest(rel); + Assert(IS_DUMMY_REL(rel)); + + /* + * Forget about any child relations. There's no point in adjusting + * them and no point in using them for later planning stages (in + * particular, partitionwise aggregate). + */ + rel->nparts = 0; + rel->part_rels = NULL; + rel->boundinfo = NULL; + + return; + } + + /* Extract SRF-free scan/join target. */ + scanjoin_target = linitial_node(PathTarget, scanjoin_targets); + + /* + * Adjust each input path. If the tlist exprs are the same, we can just + * inject the sortgroupref information into the existing pathtarget. + * Otherwise, replace each path with a projection path that generates the + * SRF-free scan/join target. This can't change the ordering of paths + * within rel->pathlist, so we just modify the list in place. + */ + foreach(lc, rel->pathlist) + { + Path *subpath = (Path *) lfirst(lc); + Path *newpath; + + Assert(subpath->param_info == NULL); + + if (tlist_same_exprs) + subpath->pathtarget->sortgrouprefs = + scanjoin_target->sortgrouprefs; + else + { + newpath = (Path *) create_projection_path(root, rel, subpath, + scanjoin_target); + lfirst(lc) = newpath; + } + } + + /* Same for partial paths. */ + foreach(lc, rel->partial_pathlist) + { + Path *subpath = (Path *) lfirst(lc); + Path *newpath; + + /* Shouldn't have any parameterized paths anymore */ + Assert(subpath->param_info == NULL); + + if (tlist_same_exprs) + subpath->pathtarget->sortgrouprefs = + scanjoin_target->sortgrouprefs; + else + { + newpath = (Path *) create_projection_path(root, + rel, + subpath, + scanjoin_target); + lfirst(lc) = newpath; + } + } + + /* Now fix things up if scan/join target contains SRFs */ + if (root->parse->hasTargetSRFs) + adjust_paths_for_srfs(root, rel, + scanjoin_targets, + scanjoin_targets_contain_srfs); + + /* + * If the relation is partitioned, recursively apply the same changes to + * all partitions and generate new Append paths. Since Append is not + * projection-capable, that might save a separate Result node, and it also + * is important for partitionwise aggregate. + */ + if (rel->part_scheme && rel->part_rels) + { + int partition_idx; + List *live_children = NIL; + + /* Adjust each partition. */ + for (partition_idx = 0; partition_idx < rel->nparts; partition_idx++) + { + RelOptInfo *child_rel = rel->part_rels[partition_idx]; + ListCell *lc; + AppendRelInfo **appinfos; + int nappinfos; + List *child_scanjoin_targets = NIL; + + /* Translate scan/join targets for this child. */ + appinfos = find_appinfos_by_relids(root, child_rel->relids, + &nappinfos); + foreach(lc, scanjoin_targets) + { + PathTarget *target = lfirst_node(PathTarget, lc); + + target = copy_pathtarget(target); + target->exprs = (List *) + adjust_appendrel_attrs(root, + (Node *) target->exprs, + nappinfos, appinfos); + child_scanjoin_targets = lappend(child_scanjoin_targets, + target); + } + pfree(appinfos); + + /* Recursion does the real work. */ + apply_scanjoin_target_to_paths(root, child_rel, + child_scanjoin_targets, + scanjoin_targets_contain_srfs, + scanjoin_target_parallel_safe, + tlist_same_exprs); + + /* Save non-dummy children for Append paths. */ + if (!IS_DUMMY_REL(child_rel)) + live_children = lappend(live_children, child_rel); + } + + /* Build new paths for this relation by appending child paths. */ + if (live_children != NIL) + add_paths_to_append_rel(root, rel, live_children); + } + + /* + * Consider generating Gather or Gather Merge paths. We must only do this + * if the relation is parallel safe, and we don't do it for child rels to + * avoid creating multiple Gather nodes within the same plan. We must do + * this after all paths have been generated and before set_cheapest, since + * one of the generated paths may turn out to be the cheapest one. + */ + if (rel->consider_parallel && !IS_OTHER_REL(rel)) + generate_gather_paths(root, rel, false); + + /* + * Reassess which paths are the cheapest, now that we've potentially added + * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to + * this relation. + */ + set_cheapest(rel); +} + +/* + * create_partitionwise_grouping_paths + * + * If the partition keys of input relation are part of the GROUP BY clause, all + * the rows belonging to a given group come from a single partition. This + * allows aggregation/grouping over a partitioned relation to be broken down + * into aggregation/grouping on each partition. This should be no worse, and + * often better, than the normal approach. + * + * However, if the GROUP BY clause does not contain all the partition keys, + * rows from a given group may be spread across multiple partitions. In that + * case, we perform partial aggregation for each group, append the results, + * and then finalize aggregation. This is less certain to win than the + * previous case. It may win if the PartialAggregate stage greatly reduces + * the number of groups, because fewer rows will pass through the Append node. + * It may lose if we have lots of small groups. + */ +static void +create_partitionwise_grouping_paths(PlannerInfo *root, + RelOptInfo *input_rel, + RelOptInfo *grouped_rel, + RelOptInfo *partially_grouped_rel, + const AggClauseCosts *agg_costs, + grouping_sets_data *gd, + PartitionwiseAggregateType patype, + GroupPathExtraData *extra) +{ + int nparts = input_rel->nparts; + int cnt_parts; + List *grouped_live_children = NIL; + List *partially_grouped_live_children = NIL; + PathTarget *target = grouped_rel->reltarget; + bool partial_grouping_valid = true; + + Assert(patype != PARTITIONWISE_AGGREGATE_NONE); + Assert(patype != PARTITIONWISE_AGGREGATE_PARTIAL || + partially_grouped_rel != NULL); + + /* Add paths for partitionwise aggregation/grouping. */ + for (cnt_parts = 0; cnt_parts < nparts; cnt_parts++) + { + RelOptInfo *child_input_rel = input_rel->part_rels[cnt_parts]; + PathTarget *child_target = copy_pathtarget(target); + AppendRelInfo **appinfos; + int nappinfos; + GroupPathExtraData child_extra; + RelOptInfo *child_grouped_rel; + RelOptInfo *child_partially_grouped_rel; + + /* Input child rel must have a path */ + Assert(child_input_rel->pathlist != NIL); + + /* + * Copy the given "extra" structure as is and then override the + * members specific to this child. + */ + memcpy(&child_extra, extra, sizeof(child_extra)); + + appinfos = find_appinfos_by_relids(root, child_input_rel->relids, + &nappinfos); + + child_target->exprs = (List *) + adjust_appendrel_attrs(root, + (Node *) target->exprs, + nappinfos, appinfos); + + /* Translate havingQual and targetList. */ + child_extra.havingQual = (Node *) + adjust_appendrel_attrs(root, + extra->havingQual, + nappinfos, appinfos); + child_extra.targetList = (List *) + adjust_appendrel_attrs(root, + (Node *) extra->targetList, + nappinfos, appinfos); + + /* + * extra->patype was the value computed for our parent rel; patype is + * the value for this relation. For the child, our value is its + * parent rel's value. + */ + child_extra.patype = patype; + + /* + * Create grouping relation to hold fully aggregated grouping and/or + * aggregation paths for the child. + */ + child_grouped_rel = make_grouping_rel(root, child_input_rel, + child_target, + extra->target_parallel_safe, + child_extra.havingQual); + + /* Ignore empty children. They contribute nothing. */ + if (IS_DUMMY_REL(child_input_rel)) + { + mark_dummy_rel(child_grouped_rel); + + continue; + } + + /* Create grouping paths for this child relation. */ + create_ordinary_grouping_paths(root, child_input_rel, + child_grouped_rel, + agg_costs, gd, &child_extra, + &child_partially_grouped_rel); + + if (child_partially_grouped_rel) + { + partially_grouped_live_children = + lappend(partially_grouped_live_children, + child_partially_grouped_rel); + } + else + partial_grouping_valid = false; + + if (patype == PARTITIONWISE_AGGREGATE_FULL) + { + set_cheapest(child_grouped_rel); + grouped_live_children = lappend(grouped_live_children, + child_grouped_rel); + } + + pfree(appinfos); + } + + /* + * Try to create append paths for partially grouped children. For full + * partitionwise aggregation, we might have paths in the partial_pathlist + * if parallel aggregation is possible. For partial partitionwise + * aggregation, we may have paths in both pathlist and partial_pathlist. + * + * NB: We must have a partially grouped path for every child in order to + * generate a partially grouped path for this relation. + */ + if (partially_grouped_rel && partial_grouping_valid) + { + Assert(partially_grouped_live_children != NIL); + + add_paths_to_append_rel(root, partially_grouped_rel, + partially_grouped_live_children); + + /* + * We need call set_cheapest, since the finalization step will use the + * cheapest path from the rel. + */ + if (partially_grouped_rel->pathlist) + set_cheapest(partially_grouped_rel); + } + + /* If possible, create append paths for fully grouped children. */ + if (patype == PARTITIONWISE_AGGREGATE_FULL) + { + Assert(grouped_live_children != NIL); + + add_paths_to_append_rel(root, grouped_rel, grouped_live_children); + } +} + +/* + * group_by_has_partkey + * + * Returns true, if all the partition keys of the given relation are part of + * the GROUP BY clauses, false otherwise. + */ +static bool +group_by_has_partkey(RelOptInfo *input_rel, + List *targetList, + List *groupClause) +{ + List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList); + int cnt = 0; + int partnatts; + + /* Input relation should be partitioned. */ + Assert(input_rel->part_scheme); + + /* Rule out early, if there are no partition keys present. */ + if (!input_rel->partexprs) + return false; + + partnatts = input_rel->part_scheme->partnatts; + + for (cnt = 0; cnt < partnatts; cnt++) + { + List *partexprs = input_rel->partexprs[cnt]; + ListCell *lc; + bool found = false; + + foreach(lc, partexprs) + { + Expr *partexpr = lfirst(lc); + + if (list_member(groupexprs, partexpr)) + { + found = true; + break; + } + } + + /* + * If none of the partition key expressions match with any of the + * GROUP BY expression, return false. + */ + if (!found) + return false; + } + + return true; } diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index b0c9e94459..6d6ef1c376 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -4,7 +4,7 @@ * Post-processing of a completed plan tree: fix references to subplan * vars, compute regproc values for operators, etc * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -104,6 +104,7 @@ static Node *fix_scan_expr_mutator(Node *node, fix_scan_expr_context *context); static bool fix_scan_expr_walker(Node *node, fix_scan_expr_context *context); static void set_join_references(PlannerInfo *root, Join *join, int rtoffset); static void set_upper_references(PlannerInfo *root, Plan *plan, int rtoffset); +static void set_param_references(PlannerInfo *root, Plan *plan); static Node *convert_combining_aggrefs(Node *node, void *context); static void set_dummy_tlist_references(Plan *plan, int rtoffset); static indexed_tlist *build_tlist_index(List *tlist); @@ -628,7 +629,10 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) case T_Gather: case T_GatherMerge: - set_upper_references(root, plan, rtoffset); + { + set_upper_references(root, plan, rtoffset); + set_param_references(root, plan); + } break; case T_Hash: @@ -844,12 +848,10 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) } splan->nominalRelation += rtoffset; + if (splan->rootRelation) + splan->rootRelation += rtoffset; splan->exclRelRTI += rtoffset; - foreach(l, splan->partitioned_rels) - { - lfirst_int(l) += rtoffset; - } foreach(l, splan->resultRelations) { lfirst_int(l) += rtoffset; @@ -880,24 +882,17 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) list_copy(splan->resultRelations)); /* - * If the main target relation is a partitioned table, the - * following list contains the RT indexes of partitioned child - * relations including the root, which are not included in the - * above list. We also keep RT indexes of the roots - * separately to be identitied as such during the executor - * initialization. + * If the main target relation is a partitioned table, also + * add the partition root's RT index to rootResultRelations, + * and remember its index in that list in rootResultRelIndex. */ - if (splan->partitioned_rels != NIL) + if (splan->rootRelation) { - root->glob->nonleafResultRelations = - list_concat(root->glob->nonleafResultRelations, - list_copy(splan->partitioned_rels)); - /* Remember where this root will be in the global list. */ splan->rootResultRelIndex = list_length(root->glob->rootResultRelations); root->glob->rootResultRelations = lappend_int(root->glob->rootResultRelations, - linitial_int(splan->partitioned_rels)); + splan->rootRelation); } } break; @@ -911,16 +906,27 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) */ set_dummy_tlist_references(plan, rtoffset); Assert(splan->plan.qual == NIL); - foreach(l, splan->partitioned_rels) - { - lfirst_int(l) += rtoffset; - } foreach(l, splan->appendplans) { lfirst(l) = set_plan_refs(root, (Plan *) lfirst(l), rtoffset); } + if (splan->part_prune_info) + { + foreach(l, splan->part_prune_info->prune_infos) + { + List *prune_infos = lfirst(l); + ListCell *l2; + + foreach(l2, prune_infos) + { + PartitionedRelPruneInfo *pinfo = lfirst(l2); + + pinfo->rtindex += rtoffset; + } + } + } } break; case T_MergeAppend: @@ -933,16 +939,27 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) */ set_dummy_tlist_references(plan, rtoffset); Assert(splan->plan.qual == NIL); - foreach(l, splan->partitioned_rels) - { - lfirst_int(l) += rtoffset; - } foreach(l, splan->mergeplans) { lfirst(l) = set_plan_refs(root, (Plan *) lfirst(l), rtoffset); } + if (splan->part_prune_info) + { + foreach(l, splan->part_prune_info->prune_infos) + { + List *prune_infos = lfirst(l); + ListCell *l2; + + foreach(l2, prune_infos) + { + PartitionedRelPruneInfo *pinfo = lfirst(l2); + + pinfo->rtindex += rtoffset; + } + } + } } break; case T_RecursiveUnion: @@ -1395,12 +1412,6 @@ fix_expr_common(PlannerInfo *root, Node *node) record_plan_function_dependency(root, ((ScalarArrayOpExpr *) node)->opfuncid); } - else if (IsA(node, ArrayCoerceExpr)) - { - if (OidIsValid(((ArrayCoerceExpr *) node)->elemfuncid)) - record_plan_function_dependency(root, - ((ArrayCoerceExpr *) node)->elemfuncid); - } else if (IsA(node, Const)) { Const *con = (Const *) node; @@ -1746,8 +1757,8 @@ set_upper_references(PlannerInfo *root, Plan *plan, int rtoffset) TargetEntry *tle = (TargetEntry *) lfirst(l); Node *newexpr; - /* If it's a non-Var sort/group item, first try to match by sortref */ - if (tle->ressortgroupref != 0 && !IsA(tle->expr, Var)) + /* If it's a sort/group item, first try to match by sortref */ + if (tle->ressortgroupref != 0) { newexpr = (Node *) search_indexed_tlist_for_sortgroupref(tle->expr, @@ -1783,6 +1794,51 @@ set_upper_references(PlannerInfo *root, Plan *plan, int rtoffset) pfree(subplan_itlist); } +/* + * set_param_references + * Initialize the initParam list in Gather or Gather merge node such that + * it contains reference of all the params that needs to be evaluated + * before execution of the node. It contains the initplan params that are + * being passed to the plan nodes below it. + */ +static void +set_param_references(PlannerInfo *root, Plan *plan) +{ + Assert(IsA(plan, Gather) ||IsA(plan, GatherMerge)); + + if (plan->lefttree->extParam) + { + PlannerInfo *proot; + Bitmapset *initSetParam = NULL; + ListCell *l; + + for (proot = root; proot != NULL; proot = proot->parent_root) + { + foreach(l, proot->init_plans) + { + SubPlan *initsubplan = (SubPlan *) lfirst(l); + ListCell *l2; + + foreach(l2, initsubplan->setParam) + { + initSetParam = bms_add_member(initSetParam, lfirst_int(l2)); + } + } + } + + /* + * Remember the list of all external initplan params that are used by + * the children of Gather or Gather merge node. + */ + if (IsA(plan, Gather)) + ((Gather *) plan)->initParam = + bms_intersect(plan->lefttree->extParam, initSetParam); + else + ((GatherMerge *) plan)->initParam = + bms_intersect(plan->lefttree->extParam, initSetParam); + } +} + /* * Recursively scan an expression tree and convert Aggrefs to the proper * intermediate form for combining aggregates. This means (1) replacing each @@ -2108,7 +2164,6 @@ search_indexed_tlist_for_non_var(Expr *node, /* * search_indexed_tlist_for_sortgroupref --- find a sort/group expression - * (which is assumed not to be just a Var) * * If a match is found, return a Var constructed to reference the tlist item. * If no match, return NULL. diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index ffbd3eeed7..83008d7661 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -3,7 +3,7 @@ * subselect.c * Planning routines for subselects and parameters. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -79,6 +79,7 @@ static Node *process_sublinks_mutator(Node *node, process_sublinks_context *context); static Bitmapset *finalize_plan(PlannerInfo *root, Plan *plan, + int gather_param, Bitmapset *valid_params, Bitmapset *scan_params); static bool finalize_primnode(Node *node, finalize_primnode_context *context); @@ -130,7 +131,9 @@ assign_param_for_var(PlannerInfo *root, Var *var) pitem = makeNode(PlannerParamItem); pitem->item = (Node *) var; - pitem->paramId = root->glob->nParamExec++; + pitem->paramId = list_length(root->glob->paramExecTypes); + root->glob->paramExecTypes = lappend_oid(root->glob->paramExecTypes, + var->vartype); root->plan_params = lappend(root->plan_params, pitem); @@ -233,7 +236,9 @@ assign_param_for_placeholdervar(PlannerInfo *root, PlaceHolderVar *phv) pitem = makeNode(PlannerParamItem); pitem->item = (Node *) phv; - pitem->paramId = root->glob->nParamExec++; + pitem->paramId = list_length(root->glob->paramExecTypes); + root->glob->paramExecTypes = lappend_oid(root->glob->paramExecTypes, + exprType((Node *) phv->phexpr)); root->plan_params = lappend(root->plan_params, pitem); @@ -322,7 +327,9 @@ replace_outer_agg(PlannerInfo *root, Aggref *agg) pitem = makeNode(PlannerParamItem); pitem->item = (Node *) agg; - pitem->paramId = root->glob->nParamExec++; + pitem->paramId = list_length(root->glob->paramExecTypes); + root->glob->paramExecTypes = lappend_oid(root->glob->paramExecTypes, + agg->aggtype); root->plan_params = lappend(root->plan_params, pitem); @@ -347,6 +354,7 @@ replace_outer_grouping(PlannerInfo *root, GroupingFunc *grp) Param *retval; PlannerParamItem *pitem; Index levelsup; + Oid ptype; Assert(grp->agglevelsup > 0 && grp->agglevelsup < root->query_level); @@ -361,17 +369,20 @@ replace_outer_grouping(PlannerInfo *root, GroupingFunc *grp) grp = copyObject(grp); IncrementVarSublevelsUp((Node *) grp, -((int) grp->agglevelsup), 0); Assert(grp->agglevelsup == 0); + ptype = exprType((Node *) grp); pitem = makeNode(PlannerParamItem); pitem->item = (Node *) grp; - pitem->paramId = root->glob->nParamExec++; + pitem->paramId = list_length(root->glob->paramExecTypes); + root->glob->paramExecTypes = lappend_oid(root->glob->paramExecTypes, + ptype); root->plan_params = lappend(root->plan_params, pitem); retval = makeNode(Param); retval->paramkind = PARAM_EXEC; retval->paramid = pitem->paramId; - retval->paramtype = exprType((Node *) grp); + retval->paramtype = ptype; retval->paramtypmod = -1; retval->paramcollid = InvalidOid; retval->location = grp->location; @@ -384,7 +395,8 @@ replace_outer_grouping(PlannerInfo *root, GroupingFunc *grp) * * This is used to create Params representing subplan outputs. * We don't need to build a PlannerParamItem for such a Param, but we do - * need to record the PARAM_EXEC slot number as being allocated. + * need to make sure we record the type in paramExecTypes (otherwise, + * there won't be a slot allocated for it). */ static Param * generate_new_param(PlannerInfo *root, Oid paramtype, int32 paramtypmod, @@ -394,7 +406,9 @@ generate_new_param(PlannerInfo *root, Oid paramtype, int32 paramtypmod, retval = makeNode(Param); retval->paramkind = PARAM_EXEC; - retval->paramid = root->glob->nParamExec++; + retval->paramid = list_length(root->glob->paramExecTypes); + root->glob->paramExecTypes = lappend_oid(root->glob->paramExecTypes, + paramtype); retval->paramtype = paramtype; retval->paramtypmod = paramtypmod; retval->paramcollid = paramcollation; @@ -414,7 +428,11 @@ generate_new_param(PlannerInfo *root, Oid paramtype, int32 paramtypmod, int SS_assign_special_param(PlannerInfo *root) { - return root->glob->nParamExec++; + int paramId = list_length(root->glob->paramExecTypes); + + root->glob->paramExecTypes = lappend_oid(root->glob->paramExecTypes, + InvalidOid); + return paramId; } /* @@ -1562,7 +1580,7 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink, * won't occur, nor will other side-effects of volatile functions. This seems * unlikely to bother anyone in practice. * - * Returns TRUE if was able to discard the targetlist, else FALSE. + * Returns true if was able to discard the targetlist, else false. */ static bool simplify_EXISTS_query(PlannerInfo *root, Query *query) @@ -1722,7 +1740,7 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect, * subroot. */ whereClause = eval_const_expressions(root, whereClause); - whereClause = (Node *) canonicalize_qual((Expr *) whereClause); + whereClause = (Node *) canonicalize_qual((Expr *) whereClause, false); whereClause = (Node *) make_ands_implicit((Expr *) whereClause); /* @@ -2097,7 +2115,7 @@ SS_identify_outer_params(PlannerInfo *root) * If no parameters have been assigned anywhere in the tree, we certainly * don't need to do anything here. */ - if (root->glob->nParamExec == 0) + if (root->glob->paramExecTypes == NIL) return; /* @@ -2184,6 +2202,13 @@ SS_charge_for_initplans(PlannerInfo *root, RelOptInfo *final_rel) path->parallel_safe = false; } + /* + * Forget about any partial paths and clear consider_parallel, too; + * they're not usable if we attached an initPlan. + */ + final_rel->partial_pathlist = NIL; + final_rel->consider_parallel = false; + /* We needn't do set_cheapest() here, caller will do it */ } @@ -2217,12 +2242,15 @@ void SS_finalize_plan(PlannerInfo *root, Plan *plan) { /* No setup needed, just recurse through plan tree. */ - (void) finalize_plan(root, plan, root->outer_params, NULL); + (void) finalize_plan(root, plan, -1, root->outer_params, NULL); } /* * Recursive processing of all nodes in the plan tree * + * gather_param is the rescan_param of an ancestral Gather/GatherMerge, + * or -1 if there is none. + * * valid_params is the set of param IDs supplied by outer plan levels * that are valid to reference in this plan node or its children. * @@ -2249,7 +2277,9 @@ SS_finalize_plan(PlannerInfo *root, Plan *plan) * can be handled more cleanly. */ static Bitmapset * -finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, +finalize_plan(PlannerInfo *root, Plan *plan, + int gather_param, + Bitmapset *valid_params, Bitmapset *scan_params) { finalize_primnode_context context; @@ -2302,6 +2332,18 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, finalize_primnode((Node *) plan->targetlist, &context); finalize_primnode((Node *) plan->qual, &context); + /* + * If it's a parallel-aware scan node, mark it as dependent on the parent + * Gather/GatherMerge's rescan Param. + */ + if (plan->parallel_aware) + { + if (gather_param < 0) + elog(ERROR, "parallel-aware plan node is not below a Gather"); + context.paramids = + bms_add_member(context.paramids, gather_param); + } + /* Check additional node-type-specific fields */ switch (nodeTag(plan)) { @@ -2372,10 +2414,16 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, { SubqueryScan *sscan = (SubqueryScan *) plan; RelOptInfo *rel; + Bitmapset *subquery_params; - /* We must run SS_finalize_plan on the subquery */ + /* We must run finalize_plan on the subquery */ rel = find_base_rel(root, sscan->scan.scanrelid); - SS_finalize_plan(rel->subroot, sscan->subplan); + subquery_params = rel->subroot->outer_params; + if (gather_param >= 0) + subquery_params = bms_add_member(bms_copy(subquery_params), + gather_param); + finalize_plan(rel->subroot, sscan->subplan, gather_param, + subquery_params, NULL); /* Now we can add its extParams to the parent's params */ context.paramids = bms_add_members(context.paramids, @@ -2512,6 +2560,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(lc), + gather_param, valid_params, scan_params)); } @@ -2542,6 +2591,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(l), + gather_param, valid_params, scan_params)); } @@ -2558,6 +2608,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(l), + gather_param, valid_params, scan_params)); } @@ -2574,6 +2625,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(l), + gather_param, valid_params, scan_params)); } @@ -2590,6 +2642,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(l), + gather_param, valid_params, scan_params)); } @@ -2606,6 +2659,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, bms_add_members(context.paramids, finalize_plan(root, (Plan *) lfirst(l), + gather_param, valid_params, scan_params)); } @@ -2697,13 +2751,51 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, &context); break; + case T_Gather: + /* child nodes are allowed to reference rescan_param, if any */ + locally_added_param = ((Gather *) plan)->rescan_param; + if (locally_added_param >= 0) + { + valid_params = bms_add_member(bms_copy(valid_params), + locally_added_param); + + /* + * We currently don't support nested Gathers. The issue so + * far as this function is concerned would be how to identify + * which child nodes depend on which Gather. + */ + Assert(gather_param < 0); + /* Pass down rescan_param to child parallel-aware nodes */ + gather_param = locally_added_param; + } + /* rescan_param does *not* get added to scan_params */ + break; + + case T_GatherMerge: + /* child nodes are allowed to reference rescan_param, if any */ + locally_added_param = ((GatherMerge *) plan)->rescan_param; + if (locally_added_param >= 0) + { + valid_params = bms_add_member(bms_copy(valid_params), + locally_added_param); + + /* + * We currently don't support nested Gathers. The issue so + * far as this function is concerned would be how to identify + * which child nodes depend on which Gather. + */ + Assert(gather_param < 0); + /* Pass down rescan_param to child parallel-aware nodes */ + gather_param = locally_added_param; + } + /* rescan_param does *not* get added to scan_params */ + break; + case T_ProjectSet: case T_Hash: case T_Material: case T_Sort: case T_Unique: - case T_Gather: - case T_GatherMerge: case T_SetOp: case T_Group: /* no node-type-specific fields need fixing */ @@ -2717,6 +2809,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, /* Process left and right child plans, if any */ child_params = finalize_plan(root, plan->lefttree, + gather_param, valid_params, scan_params); context.paramids = bms_add_members(context.paramids, child_params); @@ -2726,6 +2819,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, /* right child can reference nestloop_params as well as valid_params */ child_params = finalize_plan(root, plan->righttree, + gather_param, bms_union(nestloop_params, valid_params), scan_params); /* ... and they don't count as parameters used at my level */ @@ -2737,6 +2831,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, /* easy case */ child_params = finalize_plan(root, plan->righttree, + gather_param, valid_params, scan_params); } diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c index f3bb73a664..cd6e11904e 100644 --- a/src/backend/optimizer/prep/prepjointree.c +++ b/src/backend/optimizer/prep/prepjointree.c @@ -12,7 +12,7 @@ * reduce_outer_joins * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -586,10 +586,13 @@ inline_set_returning_functions(PlannerInfo *root) funcquery = inline_set_returning_function(root, rte); if (funcquery) { - /* Successful expansion, replace the rtable entry */ + /* Successful expansion, convert the RTE to a subquery */ rte->rtekind = RTE_SUBQUERY; rte->subquery = funcquery; + rte->security_barrier = false; + /* Clear fields that should not be set in a subquery RTE */ rte->functions = NIL; + rte->funcordinality = false; } } } @@ -644,9 +647,9 @@ pull_up_subqueries(PlannerInfo *root) * This forces use of the PlaceHolderVar mechanism for all non-Var targetlist * items, and puts some additional restrictions on what can be pulled up. * - * deletion_ok is TRUE if the caller can cope with us returning NULL for a + * deletion_ok is true if the caller can cope with us returning NULL for a * deletable leaf node (for example, a VALUES RTE that could be pulled up). - * If it's FALSE, we'll avoid pullup in such cases. + * If it's false, we'll avoid pullup in such cases. * * A tricky aspect of this code is that if we pull up a subquery we have * to replace Vars that reference the subquery's outputs throughout the @@ -914,7 +917,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, subroot->grouping_map = NULL; subroot->minmax_aggs = NIL; subroot->qual_security_level = 0; - subroot->hasInheritedTarget = false; + subroot->inhTargetKind = INHKIND_NONE; subroot->hasRecursion = false; subroot->wt_param_id = -1; subroot->non_recursive_path = NULL; @@ -1003,11 +1006,8 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, /* * The subquery's targetlist items are now in the appropriate form to - * insert into the top query, but if we are under an outer join then - * non-nullable items and lateral references may have to be turned into - * PlaceHolderVars. If we are dealing with an appendrel member then - * anything that's not a simple Var has to be turned into a - * PlaceHolderVar. Set up required context data for pullup_replace_vars. + * insert into the top query, except that we may need to wrap them in + * PlaceHolderVars. Set up required context data for pullup_replace_vars. */ rvcontext.root = root; rvcontext.targetlist = subquery->targetList; @@ -1019,13 +1019,48 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, rvcontext.relids = NULL; rvcontext.outer_hasSubLinks = &parse->hasSubLinks; rvcontext.varno = varno; - rvcontext.need_phvs = (lowest_nulling_outer_join != NULL || - containing_appendrel != NULL); - rvcontext.wrap_non_vars = (containing_appendrel != NULL); + /* these flags will be set below, if needed */ + rvcontext.need_phvs = false; + rvcontext.wrap_non_vars = false; /* initialize cache array with indexes 0 .. length(tlist) */ rvcontext.rv_cache = palloc0((list_length(subquery->targetList) + 1) * sizeof(Node *)); + /* + * If we are under an outer join then non-nullable items and lateral + * references may have to be turned into PlaceHolderVars. + */ + if (lowest_nulling_outer_join != NULL) + rvcontext.need_phvs = true; + + /* + * If we are dealing with an appendrel member then anything that's not a + * simple Var has to be turned into a PlaceHolderVar. We force this to + * ensure that what we pull up doesn't get merged into a surrounding + * expression during later processing and then fail to match the + * expression actually available from the appendrel. + */ + if (containing_appendrel != NULL) + { + rvcontext.need_phvs = true; + rvcontext.wrap_non_vars = true; + } + + /* + * If the parent query uses grouping sets, we need a PlaceHolderVar for + * anything that's not a simple Var. Again, this ensures that expressions + * retain their separate identity so that they will match grouping set + * columns when appropriate. (It'd be sufficient to wrap values used in + * grouping set columns, and do so only in non-aggregated portions of the + * tlist and havingQual, but that would require a lot of infrastructure + * that pullup_replace_vars hasn't currently got.) + */ + if (parse->groupingSets) + { + rvcontext.need_phvs = true; + rvcontext.wrap_non_vars = true; + } + /* * Replace all of the top query's references to the subquery's outputs * with copies of the adjusted subtlist items, being careful not to @@ -1401,7 +1436,7 @@ make_setop_translation_list(Query *query, Index newvarno, * (Note subquery is not necessarily equal to rte->subquery; it could be a * processed copy of that.) * lowest_outer_join is the lowest outer join above the subquery, or NULL. - * deletion_ok is TRUE if it'd be okay to delete the subquery entirely. + * deletion_ok is true if it'd be okay to delete the subquery entirely. */ static bool is_simple_subquery(Query *subquery, RangeTblEntry *rte, @@ -1457,7 +1492,7 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte, /* * Don't pull up a subquery with an empty jointree, unless it has no quals - * and deletion_ok is TRUE and we're not underneath an outer join. + * and deletion_ok is true and we're not underneath an outer join. * * query_planner() will correctly generate a Result plan for a jointree * that's totally empty, but we can't cope with an empty FromExpr @@ -1681,7 +1716,7 @@ pull_up_simple_values(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte) * to pull up into the parent query. * * rte is the RTE_VALUES RangeTblEntry to check. - * deletion_ok is TRUE if it'd be okay to delete the VALUES RTE entirely. + * deletion_ok is true if it'd be okay to delete the VALUES RTE entirely. */ static bool is_simple_values(PlannerInfo *root, RangeTblEntry *rte, bool deletion_ok) @@ -1689,7 +1724,7 @@ is_simple_values(PlannerInfo *root, RangeTblEntry *rte, bool deletion_ok) Assert(rte->rtekind == RTE_VALUES); /* - * We can only pull up a VALUES RTE if deletion_ok is TRUE. It's + * We can only pull up a VALUES RTE if deletion_ok is true. It's * basically the same case as a sub-select with empty FROM list; see * comments in is_simple_subquery(). */ @@ -1844,7 +1879,7 @@ is_safe_append_member(Query *subquery) * * If restricted is false, all level-1 Vars are allowed (but we still must * search the jointree, since it might contain outer joins below which there - * will be restrictions). If restricted is true, return TRUE when any qual + * will be restrictions). If restricted is true, return true when any qual * in the jointree contains level-1 Vars coming from outside the rels listed * in safe_upper_varnos. */ @@ -2009,6 +2044,18 @@ replace_vars_in_jointree(Node *jtnode, } replace_vars_in_jointree(j->larg, context, lowest_nulling_outer_join); replace_vars_in_jointree(j->rarg, context, lowest_nulling_outer_join); + + /* + * Use PHVs within the join quals of a full join, even when it's the + * lowest nulling outer join. Otherwise, we cannot identify which + * side of the join a pulled-up var-free expression came from, which + * can lead to failure to make a plan at all because none of the quals + * appear to be mergeable or hashable conditions. For this purpose we + * don't care about the state of wrap_non_vars, so leave it alone. + */ + if (j->jointype == JOIN_FULL) + context->need_phvs = true; + j->quals = pullup_replace_vars(j->quals, context); /* diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c index f75b3274ad..52f8893f4f 100644 --- a/src/backend/optimizer/prep/prepqual.c +++ b/src/backend/optimizer/prep/prepqual.c @@ -19,7 +19,7 @@ * tree after local transformations that might introduce nested AND/ORs. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -39,7 +39,7 @@ static List *pull_ands(List *andlist); static List *pull_ors(List *orlist); -static Expr *find_duplicate_ors(Expr *qual); +static Expr *find_duplicate_ors(Expr *qual, bool is_check); static Expr *process_duplicate_ors(List *orlist); @@ -269,6 +269,11 @@ negate_clause(Node *node) * canonicalize_qual * Convert a qualification expression to the most useful form. * + * This is primarily intended to be used on top-level WHERE (or JOIN/ON) + * clauses. It can also be used on top-level CHECK constraints, for which + * pass is_check = true. DO NOT call it on any expression that is not known + * to be one or the other, as it might apply inappropriate simplifications. + * * The name of this routine is a holdover from a time when it would try to * force the expression into canonical AND-of-ORs or OR-of-ANDs form. * Eventually, we recognized that that had more theoretical purity than @@ -283,7 +288,7 @@ negate_clause(Node *node) * Returns the modified qualification. */ Expr * -canonicalize_qual(Expr *qual) +canonicalize_qual(Expr *qual, bool is_check) { Expr *newqual; @@ -291,12 +296,15 @@ canonicalize_qual(Expr *qual) if (qual == NULL) return NULL; + /* This should not be invoked on quals in implicit-AND format */ + Assert(!IsA(qual, List)); + /* * Pull up redundant subclauses in OR-of-AND trees. We do this only * within the top-level AND/OR structure; there's no point in looking * deeper. Also remove any NULL constants in the top-level structure. */ - newqual = find_duplicate_ors(qual); + newqual = find_duplicate_ors(qual, is_check); return newqual; } @@ -395,16 +403,17 @@ pull_ors(List *orlist) * Only the top-level AND/OR structure is searched. * * While at it, we remove any NULL constants within the top-level AND/OR - * structure, eg "x OR NULL::boolean" is reduced to "x". In general that - * would change the result, so eval_const_expressions can't do it; but at - * top level of WHERE, we don't need to distinguish between FALSE and NULL - * results, so it's valid to treat NULL::boolean the same as FALSE and then - * simplify AND/OR accordingly. + * structure, eg in a WHERE clause, "x OR NULL::boolean" is reduced to "x". + * In general that would change the result, so eval_const_expressions can't + * do it; but at top level of WHERE, we don't need to distinguish between + * FALSE and NULL results, so it's valid to treat NULL::boolean the same + * as FALSE and then simplify AND/OR accordingly. Conversely, in a top-level + * CHECK constraint, we may treat a NULL the same as TRUE. * * Returns the modified qualification. AND/OR flatness is preserved. */ static Expr * -find_duplicate_ors(Expr *qual) +find_duplicate_ors(Expr *qual, bool is_check) { if (or_clause((Node *) qual)) { @@ -416,18 +425,29 @@ find_duplicate_ors(Expr *qual) { Expr *arg = (Expr *) lfirst(temp); - arg = find_duplicate_ors(arg); + arg = find_duplicate_ors(arg, is_check); /* Get rid of any constant inputs */ if (arg && IsA(arg, Const)) { Const *carg = (Const *) arg; - /* Drop constant FALSE or NULL */ - if (carg->constisnull || !DatumGetBool(carg->constvalue)) - continue; - /* constant TRUE, so OR reduces to TRUE */ - return arg; + if (is_check) + { + /* Within OR in CHECK, drop constant FALSE */ + if (!carg->constisnull && !DatumGetBool(carg->constvalue)) + continue; + /* Constant TRUE or NULL, so OR reduces to TRUE */ + return (Expr *) makeBoolConst(true, false); + } + else + { + /* Within OR in WHERE, drop constant FALSE or NULL */ + if (carg->constisnull || !DatumGetBool(carg->constvalue)) + continue; + /* Constant TRUE, so OR reduces to TRUE */ + return arg; + } } orlist = lappend(orlist, arg); @@ -449,18 +469,29 @@ find_duplicate_ors(Expr *qual) { Expr *arg = (Expr *) lfirst(temp); - arg = find_duplicate_ors(arg); + arg = find_duplicate_ors(arg, is_check); /* Get rid of any constant inputs */ if (arg && IsA(arg, Const)) { Const *carg = (Const *) arg; - /* Drop constant TRUE */ - if (!carg->constisnull && DatumGetBool(carg->constvalue)) - continue; - /* constant FALSE or NULL, so AND reduces to FALSE */ - return (Expr *) makeBoolConst(false, false); + if (is_check) + { + /* Within AND in CHECK, drop constant TRUE or NULL */ + if (carg->constisnull || DatumGetBool(carg->constvalue)) + continue; + /* Constant FALSE, so AND reduces to FALSE */ + return arg; + } + else + { + /* Within AND in WHERE, drop constant TRUE */ + if (!carg->constisnull && DatumGetBool(carg->constvalue)) + continue; + /* Constant FALSE or NULL, so AND reduces to FALSE */ + return (Expr *) makeBoolConst(false, false); + } } andlist = lappend(andlist, arg); diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c index afc733f183..8603feef2b 100644 --- a/src/backend/optimizer/prep/preptlist.c +++ b/src/backend/optimizer/prep/preptlist.c @@ -4,20 +4,22 @@ * Routines to preprocess the parse tree target list * * For INSERT and UPDATE queries, the targetlist must contain an entry for - * each attribute of the target relation in the correct order. For all query + * each attribute of the target relation in the correct order. For UPDATE and + * DELETE queries, it must also contain junk tlist entries needed to allow the + * executor to identify the rows to be updated or deleted. For all query * types, we may need to add junk tlist entries for Vars used in the RETURNING * list and row ID information needed for SELECT FOR UPDATE locking and/or * EvalPlanQual checking. * - * The rewriter's rewriteTargetListIU and rewriteTargetListUD routines - * also do preprocessing of the targetlist. The division of labor between - * here and there is partially historical, but it's not entirely arbitrary. - * In particular, consider an UPDATE across an inheritance tree. What the - * rewriter does need be done only once (because it depends only on the - * properties of the parent relation). What's done here has to be done over - * again for each child relation, because it depends on the column list of - * the child, which might have more columns and/or a different column order - * than the parent. + * The query rewrite phase also does preprocessing of the targetlist (see + * rewriteTargetListIU). The division of labor between here and there is + * partially historical, but it's not entirely arbitrary. In particular, + * consider an UPDATE across an inheritance tree. What rewriteTargetListIU + * does need be done only once (because it depends only on the properties of + * the parent relation). What's done here has to be done over again for each + * child relation, because it depends on the properties of the child, which + * might be of a different relation type, or have more columns and/or a + * different column order than the parent. * * The fact that rewriteTargetListIU sorts non-resjunk tlist entries by column * position, which expand_targetlist depends on, violates the above comment @@ -27,7 +29,7 @@ * that because it's faster in typical non-inherited cases. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -47,11 +49,12 @@ #include "optimizer/var.h" #include "parser/parsetree.h" #include "parser/parse_coerce.h" +#include "rewrite/rewriteHandler.h" #include "utils/rel.h" static List *expand_targetlist(List *tlist, int command_type, - Index result_relation, List *range_table); + Index result_relation, Relation rel); /* @@ -59,36 +62,61 @@ static List *expand_targetlist(List *tlist, int command_type, * Driver for preprocessing the parse tree targetlist. * * Returns the new targetlist. + * + * As a side effect, if there's an ON CONFLICT UPDATE clause, its targetlist + * is also preprocessed (and updated in-place). */ List * -preprocess_targetlist(PlannerInfo *root, List *tlist) +preprocess_targetlist(PlannerInfo *root) { Query *parse = root->parse; int result_relation = parse->resultRelation; List *range_table = parse->rtable; CmdType command_type = parse->commandType; + RangeTblEntry *target_rte = NULL; + Relation target_relation = NULL; + List *tlist; ListCell *lc; /* - * Sanity check: if there is a result relation, it'd better be a real - * relation not a subquery. Else parser or rewriter messed up. + * If there is a result relation, open it so we can look for missing + * columns and so on. We assume that previous code already acquired at + * least AccessShareLock on the relation, so we need no lock here. */ if (result_relation) { - RangeTblEntry *rte = rt_fetch(result_relation, range_table); + target_rte = rt_fetch(result_relation, range_table); + + /* + * Sanity check: it'd better be a real relation not, say, a subquery. + * Else parser or rewriter messed up. + */ + if (target_rte->rtekind != RTE_RELATION) + elog(ERROR, "result relation must be a regular relation"); - if (rte->subquery != NULL || rte->relid == InvalidOid) - elog(ERROR, "subquery cannot be result relation"); + target_relation = heap_open(target_rte->relid, NoLock); } + else + Assert(command_type == CMD_SELECT); + + /* + * For UPDATE/DELETE, add any junk column(s) needed to allow the executor + * to identify the rows to be updated or deleted. Note that this step + * scribbles on parse->targetList, which is not very desirable, but we + * keep it that way to avoid changing APIs used by FDWs. + */ + if (command_type == CMD_UPDATE || command_type == CMD_DELETE) + rewriteTargetListUD(parse, target_rte, target_relation); /* * for heap_form_tuple to work, the targetlist must match the exact order * of the attributes. We also need to fill in any missing attributes. -ay * 10/94 */ + tlist = parse->targetList; if (command_type == CMD_INSERT || command_type == CMD_UPDATE) tlist = expand_targetlist(tlist, command_type, - result_relation, range_table); + result_relation, target_relation); /* * Add necessary junk columns for rowmarked rels. These values are needed @@ -193,19 +221,21 @@ preprocess_targetlist(PlannerInfo *root, List *tlist) list_free(vars); } - return tlist; -} + /* + * If there's an ON CONFLICT UPDATE clause, preprocess its targetlist too + * while we have the relation open. + */ + if (parse->onConflict) + parse->onConflict->onConflictSet = + expand_targetlist(parse->onConflict->onConflictSet, + CMD_UPDATE, + result_relation, + target_relation); -/* - * preprocess_onconflict_targetlist - * Process ON CONFLICT SET targetlist. - * - * Returns the new targetlist. - */ -List * -preprocess_onconflict_targetlist(List *tlist, int result_relation, List *range_table) -{ - return expand_targetlist(tlist, CMD_UPDATE, result_relation, range_table); + if (target_relation) + heap_close(target_relation, NoLock); + + return tlist; } @@ -223,11 +253,10 @@ preprocess_onconflict_targetlist(List *tlist, int result_relation, List *range_t */ static List * expand_targetlist(List *tlist, int command_type, - Index result_relation, List *range_table) + Index result_relation, Relation rel) { List *new_tlist = NIL; ListCell *tlist_item; - Relation rel; int attrno, numattrs; @@ -238,17 +267,13 @@ expand_targetlist(List *tlist, int command_type, * order; but we have to insert TLEs for any missing attributes. * * Scan the tuple description in the relation's relcache entry to make - * sure we have all the user attributes in the right order. We assume - * that the rewriter already acquired at least AccessShareLock on the - * relation, so we need no lock here. + * sure we have all the user attributes in the right order. */ - rel = heap_open(getrelid(result_relation, range_table), NoLock); - numattrs = RelationGetNumberOfAttributes(rel); for (attrno = 1; attrno <= numattrs; attrno++) { - Form_pg_attribute att_tup = rel->rd_att->attrs[attrno - 1]; + Form_pg_attribute att_tup = TupleDescAttr(rel->rd_att, attrno - 1); TargetEntry *new_tle = NULL; if (tlist_item != NULL) @@ -306,9 +331,9 @@ expand_targetlist(List *tlist, int command_type, new_expr = coerce_to_domain(new_expr, InvalidOid, -1, atttype, + COERCION_IMPLICIT, COERCE_IMPLICIT_CAST, -1, - false, false); } else @@ -386,8 +411,6 @@ expand_targetlist(List *tlist, int command_type, tlist_item = lnext(tlist_item); } - heap_close(rel, NoLock); - return new_tlist; } diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c index 6d8f8938b2..d5720518a8 100644 --- a/src/backend/optimizer/prep/prepunion.c +++ b/src/backend/optimizer/prep/prepunion.c @@ -17,7 +17,7 @@ * append relations, and thenceforth share code with the UNION ALL case. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -33,7 +33,8 @@ #include "access/heapam.h" #include "access/htup_details.h" #include "access/sysattr.h" -#include "catalog/pg_inherits_fn.h" +#include "catalog/partition.h" +#include "catalog/pg_inherits.h" #include "catalog/pg_type.h" #include "miscadmin.h" #include "nodes/makefuncs.h" @@ -50,6 +51,7 @@ #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/selfuncs.h" +#include "utils/syscache.h" typedef struct @@ -59,30 +61,29 @@ typedef struct AppendRelInfo **appinfos; } adjust_appendrel_attrs_context; -static Path *recurse_set_operations(Node *setOp, PlannerInfo *root, +static RelOptInfo *recurse_set_operations(Node *setOp, PlannerInfo *root, List *colTypes, List *colCollations, bool junkOK, int flag, List *refnames_tlist, List **pTargetList, double *pNumGroups); -static Path *generate_recursion_path(SetOperationStmt *setOp, +static RelOptInfo *generate_recursion_path(SetOperationStmt *setOp, PlannerInfo *root, List *refnames_tlist, List **pTargetList); -static Path *generate_union_path(SetOperationStmt *op, PlannerInfo *root, +static RelOptInfo *generate_union_paths(SetOperationStmt *op, PlannerInfo *root, + List *refnames_tlist, + List **pTargetList); +static RelOptInfo *generate_nonunion_paths(SetOperationStmt *op, PlannerInfo *root, + List *refnames_tlist, + List **pTargetList); +static List *plan_union_children(PlannerInfo *root, + SetOperationStmt *top_union, List *refnames_tlist, - List **pTargetList, - double *pNumGroups); -static Path *generate_nonunion_path(SetOperationStmt *op, PlannerInfo *root, - List *refnames_tlist, - List **pTargetList, - double *pNumGroups); -static List *recurse_union_children(Node *setOp, PlannerInfo *root, - SetOperationStmt *top_union, - List *refnames_tlist, - List **tlist_list); + List **tlist_list); static Path *make_union_unique(SetOperationStmt *op, Path *path, List *tlist, PlannerInfo *root); +static void postprocess_setop_rel(PlannerInfo *root, RelOptInfo *rel); static bool choose_hashed_setop(PlannerInfo *root, List *groupClauses, Path *input_path, double dNumGroups, double dNumOutputRows, @@ -100,6 +101,17 @@ static List *generate_append_tlist(List *colTypes, List *colCollations, static List *generate_setop_grouplist(SetOperationStmt *op, List *targetlist); static void expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti); +static void expand_partitioned_rtentry(PlannerInfo *root, + RangeTblEntry *parentrte, + Index parentRTindex, Relation parentrel, + PlanRowMark *top_parentrc, LOCKMODE lockmode, + List **appinfos); +static void expand_single_inheritance_child(PlannerInfo *root, + RangeTblEntry *parentrte, + Index parentRTindex, Relation parentrel, + PlanRowMark *top_parentrc, Relation childrel, + List **appinfos, RangeTblEntry **childrte_p, + Index *childRTindex_p); static void make_inh_translation_list(Relation oldrelation, Relation newrelation, Index newvarno, @@ -136,7 +148,6 @@ plan_set_operations(PlannerInfo *root) RangeTblEntry *leftmostRTE; Query *leftmostQuery; RelOptInfo *setop_rel; - Path *path; List *top_tlist; Assert(topop); @@ -156,6 +167,12 @@ plan_set_operations(PlannerInfo *root) */ setup_simple_rel_arrays(root); + /* + * Populate append_rel_array with each AppendRelInfo to allow direct + * lookups by child relid. + */ + setup_append_rel_array(root); + /* * Find the leftmost component Query. We need to use its column names for * all generated tlists (else SELECT INTO won't work right). @@ -168,57 +185,34 @@ plan_set_operations(PlannerInfo *root) leftmostQuery = leftmostRTE->subquery; Assert(leftmostQuery != NULL); - /* - * We return our results in the (SETOP, NULL) upperrel. For the moment, - * this is also the parent rel of all Paths in the setop tree; we may well - * change that in future. - */ - setop_rel = fetch_upper_rel(root, UPPERREL_SETOP, NULL); - - /* - * We don't currently worry about setting setop_rel's consider_parallel - * flag, nor about allowing FDWs to contribute paths to it. - */ - /* * If the topmost node is a recursive union, it needs special processing. */ if (root->hasRecursion) { - path = generate_recursion_path(topop, root, - leftmostQuery->targetList, - &top_tlist); + setop_rel = generate_recursion_path(topop, root, + leftmostQuery->targetList, + &top_tlist); } else { /* * Recurse on setOperations tree to generate paths for set ops. The - * final output path should have just the column types shown as the + * final output paths should have just the column types shown as the * output from the top-level node, plus possibly resjunk working * columns (we can rely on upper-level nodes to deal with that). */ - path = recurse_set_operations((Node *) topop, root, - topop->colTypes, topop->colCollations, - true, -1, - leftmostQuery->targetList, - &top_tlist, - NULL); + setop_rel = recurse_set_operations((Node *) topop, root, + topop->colTypes, topop->colCollations, + true, -1, + leftmostQuery->targetList, + &top_tlist, + NULL); } /* Must return the built tlist into root->processed_tlist. */ root->processed_tlist = top_tlist; - /* Add only the final path to the SETOP upperrel. */ - add_path(setop_rel, path); - - /* Let extensions possibly add some more paths */ - if (create_upper_paths_hook) - (*create_upper_paths_hook) (root, UPPERREL_SETOP, - NULL, setop_rel); - - /* Select cheapest path */ - set_cheapest(setop_rel); - return setop_rel; } @@ -232,21 +226,21 @@ plan_set_operations(PlannerInfo *root) * flag: if >= 0, add a resjunk output column indicating value of flag * refnames_tlist: targetlist to take column names from * - * Returns a path for the subtree, as well as these output parameters: + * Returns a RelOptInfo for the subtree, as well as these output parameters: * *pTargetList: receives the fully-fledged tlist for the subtree's top plan * *pNumGroups: if not NULL, we estimate the number of distinct groups * in the result, and store it there * * The pTargetList output parameter is mostly redundant with the pathtarget - * of the returned path, but for the moment we need it because much of the - * logic in this file depends on flag columns being marked resjunk. Pending - * a redesign of how that works, this is the easy way out. + * of the returned RelOptInfo, but for the moment we need it because much of + * the logic in this file depends on flag columns being marked resjunk. + * Pending a redesign of how that works, this is the easy way out. * * We don't have to care about typmods here: the only allowed difference * between set-op input and output typmods is input is a specific typmod * and output is -1, and that does not require a coercion. */ -static Path * +static RelOptInfo * recurse_set_operations(Node *setOp, PlannerInfo *root, List *colTypes, List *colCollations, bool junkOK, @@ -254,12 +248,16 @@ recurse_set_operations(Node *setOp, PlannerInfo *root, List **pTargetList, double *pNumGroups) { + RelOptInfo *rel = NULL; /* keep compiler quiet */ + + /* Guard against stack overflow due to overly complex setop nests */ + check_stack_depth(); + if (IsA(setOp, RangeTblRef)) { RangeTblRef *rtr = (RangeTblRef *) setOp; RangeTblEntry *rte = root->simple_rte_array[rtr->rtindex]; Query *subquery = rte->subquery; - RelOptInfo *rel; PlannerInfo *subroot; RelOptInfo *final_rel; Path *subpath; @@ -268,11 +266,7 @@ recurse_set_operations(Node *setOp, PlannerInfo *root, Assert(subquery != NULL); - /* - * We need to build a RelOptInfo for each leaf subquery. This isn't - * used for much here, but it carries the subroot data structures - * forward to setrefs.c processing. - */ + /* Build a RelOptInfo for this leaf subquery. */ rel = build_simple_rel(root, rtr->rtindex, NULL); /* plan_params should not be in use in current query level */ @@ -291,6 +285,18 @@ recurse_set_operations(Node *setOp, PlannerInfo *root, if (root->plan_params) elog(ERROR, "unexpected outer reference in set operation subquery"); + /* Figure out the appropriate target list for this subquery. */ + tlist = generate_setop_tlist(colTypes, colCollations, + flag, + rtr->rtindex, + true, + subroot->processed_tlist, + refnames_tlist); + rel->reltarget = create_pathtarget(root, tlist); + + /* Return the fully-fledged tlist to caller, too */ + *pTargetList = tlist; + /* * Mark rel with estimated output rows, width, etc. Note that we have * to do this before generating outer-query paths, else @@ -298,12 +304,18 @@ recurse_set_operations(Node *setOp, PlannerInfo *root, */ set_subquery_size_estimates(root, rel); + /* + * Since we may want to add a partial path to this relation, we must + * set its consider_parallel flag correctly. + */ + final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL); + rel->consider_parallel = final_rel->consider_parallel; + /* * For the moment, we consider only a single Path for the subquery. * This should change soon (make it look more like * set_subquery_pathlist). */ - final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL); subpath = get_cheapest_fractional_path(final_rel, root->tuple_fraction); @@ -318,22 +330,25 @@ recurse_set_operations(Node *setOp, PlannerInfo *root, path = (Path *) create_subqueryscan_path(root, rel, subpath, NIL, NULL); + add_path(rel, path); + /* - * Figure out the appropriate target list, and update the - * SubqueryScanPath with the PathTarget form of that. + * If we have a partial path for the child relation, we can use that + * to build a partial path for this relation. But there's no point in + * considering any path but the cheapest. */ - tlist = generate_setop_tlist(colTypes, colCollations, - flag, - rtr->rtindex, - true, - subroot->processed_tlist, - refnames_tlist); - - path = apply_projection_to_path(root, rel, path, - create_pathtarget(root, tlist)); - - /* Return the fully-fledged tlist to caller, too */ - *pTargetList = tlist; + if (rel->consider_parallel && bms_is_empty(rel->lateral_relids) && + final_rel->partial_pathlist != NIL) + { + Path *partial_subpath; + Path *partial_path; + + partial_subpath = linitial(final_rel->partial_pathlist); + partial_path = (Path *) + create_subqueryscan_path(root, rel, partial_subpath, + NIL, NULL); + add_partial_path(rel, partial_path); + } /* * Estimate number of groups if caller wants it. If the subquery used @@ -362,25 +377,22 @@ recurse_set_operations(Node *setOp, PlannerInfo *root, subpath->rows, NULL); } - - return (Path *) path; } else if (IsA(setOp, SetOperationStmt)) { SetOperationStmt *op = (SetOperationStmt *) setOp; - Path *path; /* UNIONs are much different from INTERSECT/EXCEPT */ if (op->op == SETOP_UNION) - path = generate_union_path(op, root, + rel = generate_union_paths(op, root, refnames_tlist, - pTargetList, - pNumGroups); + pTargetList); else - path = generate_nonunion_path(op, root, + rel = generate_nonunion_paths(op, root, refnames_tlist, - pTargetList, - pNumGroups); + pTargetList); + if (pNumGroups) + *pNumGroups = rel->rows; /* * If necessary, add a Result node to project the caller-requested @@ -399,39 +411,70 @@ recurse_set_operations(Node *setOp, PlannerInfo *root, !tlist_same_datatypes(*pTargetList, colTypes, junkOK) || !tlist_same_collations(*pTargetList, colCollations, junkOK)) { + PathTarget *target; + ListCell *lc; + *pTargetList = generate_setop_tlist(colTypes, colCollations, flag, 0, false, *pTargetList, refnames_tlist); - path = apply_projection_to_path(root, - path->parent, - path, - create_pathtarget(root, - *pTargetList)); + target = create_pathtarget(root, *pTargetList); + + /* Apply projection to each path */ + foreach(lc, rel->pathlist) + { + Path *subpath = (Path *) lfirst(lc); + Path *path; + + Assert(subpath->param_info == NULL); + path = apply_projection_to_path(root, subpath->parent, + subpath, target); + /* If we had to add a Result, path is different from subpath */ + if (path != subpath) + lfirst(lc) = path; + } + + /* Apply projection to each partial path */ + foreach(lc, rel->partial_pathlist) + { + Path *subpath = (Path *) lfirst(lc); + Path *path; + + Assert(subpath->param_info == NULL); + + /* avoid apply_projection_to_path, in case of multiple refs */ + path = (Path *) create_projection_path(root, subpath->parent, + subpath, target); + lfirst(lc) = path; + } } - return path; } else { elog(ERROR, "unrecognized node type: %d", (int) nodeTag(setOp)); *pTargetList = NIL; - return NULL; /* keep compiler quiet */ } + + postprocess_setop_rel(root, rel); + + return rel; } /* - * Generate path for a recursive UNION node + * Generate paths for a recursive UNION node */ -static Path * +static RelOptInfo * generate_recursion_path(SetOperationStmt *setOp, PlannerInfo *root, List *refnames_tlist, List **pTargetList) { - RelOptInfo *result_rel = fetch_upper_rel(root, UPPERREL_SETOP, NULL); + RelOptInfo *result_rel; Path *path; + RelOptInfo *lrel, + *rrel; Path *lpath; Path *rpath; List *lpath_tlist; @@ -450,20 +493,22 @@ generate_recursion_path(SetOperationStmt *setOp, PlannerInfo *root, * Unlike a regular UNION node, process the left and right inputs * separately without any intention of combining them into one Append. */ - lpath = recurse_set_operations(setOp->larg, root, - setOp->colTypes, setOp->colCollations, - false, -1, - refnames_tlist, - &lpath_tlist, - NULL); + lrel = recurse_set_operations(setOp->larg, root, + setOp->colTypes, setOp->colCollations, + false, -1, + refnames_tlist, + &lpath_tlist, + NULL); + lpath = lrel->cheapest_total_path; /* The right path will want to look at the left one ... */ root->non_recursive_path = lpath; - rpath = recurse_set_operations(setOp->rarg, root, - setOp->colTypes, setOp->colCollations, - false, -1, - refnames_tlist, - &rpath_tlist, - NULL); + rrel = recurse_set_operations(setOp->rarg, root, + setOp->colTypes, setOp->colCollations, + false, -1, + refnames_tlist, + &rpath_tlist, + NULL); + rpath = rrel->cheapest_total_path; root->non_recursive_path = NULL; /* @@ -475,6 +520,11 @@ generate_recursion_path(SetOperationStmt *setOp, PlannerInfo *root, *pTargetList = tlist; + /* Build result relation. */ + result_rel = fetch_upper_rel(root, UPPERREL_SETOP, + bms_union(lrel->relids, rrel->relids)); + result_rel->reltarget = create_pathtarget(root, tlist); + /* * If UNION, identify the grouping operators */ @@ -509,28 +559,33 @@ generate_recursion_path(SetOperationStmt *setOp, PlannerInfo *root, result_rel, lpath, rpath, - create_pathtarget(root, tlist), + result_rel->reltarget, groupList, root->wt_param_id, dNumGroups); - return path; + add_path(result_rel, path); + postprocess_setop_rel(root, result_rel); + return result_rel; } /* - * Generate path for a UNION or UNION ALL node + * Generate paths for a UNION or UNION ALL node */ -static Path * -generate_union_path(SetOperationStmt *op, PlannerInfo *root, - List *refnames_tlist, - List **pTargetList, - double *pNumGroups) +static RelOptInfo * +generate_union_paths(SetOperationStmt *op, PlannerInfo *root, + List *refnames_tlist, + List **pTargetList) { - RelOptInfo *result_rel = fetch_upper_rel(root, UPPERREL_SETOP, NULL); + Relids relids = NULL; + RelOptInfo *result_rel; double save_fraction = root->tuple_fraction; - List *pathlist; - List *child_tlists1; - List *child_tlists2; + ListCell *lc; + List *pathlist = NIL; + List *partial_pathlist = NIL; + bool partial_paths_valid = true; + bool consider_parallel = true; + List *rellist; List *tlist_list; List *tlist; Path *path; @@ -555,13 +610,7 @@ generate_union_path(SetOperationStmt *op, PlannerInfo *root, * only one Append and unique-ification for the lot. Recurse to find such * nodes and compute their children's paths. */ - pathlist = list_concat(recurse_union_children(op->larg, root, - op, refnames_tlist, - &child_tlists1), - recurse_union_children(op->rarg, root, - op, refnames_tlist, - &child_tlists2)); - tlist_list = list_concat(child_tlists1, child_tlists2); + rellist = plan_union_children(root, op, refnames_tlist, &tlist_list); /* * Generate tlist for Append plan node. @@ -575,13 +624,40 @@ generate_union_path(SetOperationStmt *op, PlannerInfo *root, *pTargetList = tlist; + /* Build path lists and relid set. */ + foreach(lc, rellist) + { + RelOptInfo *rel = lfirst(lc); + + pathlist = lappend(pathlist, rel->cheapest_total_path); + + if (consider_parallel) + { + if (!rel->consider_parallel) + { + consider_parallel = false; + partial_paths_valid = false; + } + else if (rel->partial_pathlist == NIL) + partial_paths_valid = false; + else + partial_pathlist = lappend(partial_pathlist, + linitial(rel->partial_pathlist)); + } + + relids = bms_union(relids, rel->relids); + } + + /* Build result relation. */ + result_rel = fetch_upper_rel(root, UPPERREL_SETOP, relids); + result_rel->reltarget = create_pathtarget(root, tlist); + result_rel->consider_parallel = consider_parallel; + /* * Append the child results together. */ - path = (Path *) create_append_path(result_rel, pathlist, NULL, 0, NIL); - - /* We have to manually jam the right tlist into the path; ick */ - path->pathtarget = create_pathtarget(root, tlist); + path = (Path *) create_append_path(root, result_rel, pathlist, NIL, + NULL, 0, false, NIL, -1); /* * For UNION ALL, we just need the Append path. For UNION, need to add @@ -590,30 +666,79 @@ generate_union_path(SetOperationStmt *op, PlannerInfo *root, if (!op->all) path = make_union_unique(op, path, tlist, root); + add_path(result_rel, path); + /* - * Estimate number of groups if caller wants it. For now we just assume - * the output is unique --- this is certainly true for the UNION case, and - * we want worst-case estimates anyway. + * Estimate number of groups. For now we just assume the output is unique + * --- this is certainly true for the UNION case, and we want worst-case + * estimates anyway. */ - if (pNumGroups) - *pNumGroups = path->rows; + result_rel->rows = path->rows; + + /* + * Now consider doing the same thing using the partial paths plus Append + * plus Gather. + */ + if (partial_paths_valid) + { + Path *ppath; + ListCell *lc; + int parallel_workers = 0; + + /* Find the highest number of workers requested for any subpath. */ + foreach(lc, partial_pathlist) + { + Path *path = lfirst(lc); + + parallel_workers = Max(parallel_workers, path->parallel_workers); + } + Assert(parallel_workers > 0); + + /* + * If the use of parallel append is permitted, always request at least + * log2(# of children) paths. We assume it can be useful to have + * extra workers in this case because they will be spread out across + * the children. The precise formula is just a guess; see + * add_paths_to_append_rel. + */ + if (enable_parallel_append) + { + parallel_workers = Max(parallel_workers, + fls(list_length(partial_pathlist))); + parallel_workers = Min(parallel_workers, + max_parallel_workers_per_gather); + } + Assert(parallel_workers > 0); + + ppath = (Path *) + create_append_path(root, result_rel, NIL, partial_pathlist, + NULL, parallel_workers, enable_parallel_append, + NIL, -1); + ppath = (Path *) + create_gather_path(root, result_rel, ppath, + result_rel->reltarget, NULL, NULL); + if (!op->all) + ppath = make_union_unique(op, ppath, tlist, root); + add_path(result_rel, ppath); + } /* Undo effects of possibly forcing tuple_fraction to 0 */ root->tuple_fraction = save_fraction; - return path; + return result_rel; } /* - * Generate path for an INTERSECT, INTERSECT ALL, EXCEPT, or EXCEPT ALL node + * Generate paths for an INTERSECT, INTERSECT ALL, EXCEPT, or EXCEPT ALL node */ -static Path * -generate_nonunion_path(SetOperationStmt *op, PlannerInfo *root, - List *refnames_tlist, - List **pTargetList, - double *pNumGroups) +static RelOptInfo * +generate_nonunion_paths(SetOperationStmt *op, PlannerInfo *root, + List *refnames_tlist, + List **pTargetList) { - RelOptInfo *result_rel = fetch_upper_rel(root, UPPERREL_SETOP, NULL); + RelOptInfo *result_rel; + RelOptInfo *lrel, + *rrel; double save_fraction = root->tuple_fraction; Path *lpath, *rpath, @@ -638,18 +763,20 @@ generate_nonunion_path(SetOperationStmt *op, PlannerInfo *root, root->tuple_fraction = 0.0; /* Recurse on children, ensuring their outputs are marked */ - lpath = recurse_set_operations(op->larg, root, - op->colTypes, op->colCollations, - false, 0, - refnames_tlist, - &lpath_tlist, - &dLeftGroups); - rpath = recurse_set_operations(op->rarg, root, - op->colTypes, op->colCollations, - false, 1, - refnames_tlist, - &rpath_tlist, - &dRightGroups); + lrel = recurse_set_operations(op->larg, root, + op->colTypes, op->colCollations, + false, 0, + refnames_tlist, + &lpath_tlist, + &dLeftGroups); + lpath = lrel->cheapest_total_path; + rrel = recurse_set_operations(op->rarg, root, + op->colTypes, op->colCollations, + false, 1, + refnames_tlist, + &rpath_tlist, + &dRightGroups); + rpath = rrel->cheapest_total_path; /* Undo effects of forcing tuple_fraction to 0 */ root->tuple_fraction = save_fraction; @@ -687,21 +814,20 @@ generate_nonunion_path(SetOperationStmt *op, PlannerInfo *root, *pTargetList = tlist; + /* Build result relation. */ + result_rel = fetch_upper_rel(root, UPPERREL_SETOP, + bms_union(lrel->relids, rrel->relids)); + result_rel->reltarget = create_pathtarget(root, tlist);; + /* * Append the child results together. */ - path = (Path *) create_append_path(result_rel, pathlist, NULL, 0, NIL); - - /* We have to manually jam the right tlist into the path; ick */ - path->pathtarget = create_pathtarget(root, tlist); + path = (Path *) create_append_path(root, result_rel, pathlist, NIL, + NULL, 0, false, NIL, -1); /* Identify the grouping semantics */ groupList = generate_setop_grouplist(op, tlist); - /* punt if nothing to group on (can this happen?) */ - if (groupList == NIL) - return path; - /* * Estimate number of distinct groups that we'll need hashtable entries * for; this is the size of the left-hand input for EXCEPT, or the smaller @@ -728,7 +854,7 @@ generate_nonunion_path(SetOperationStmt *op, PlannerInfo *root, dNumGroups, dNumOutputRows, (op->op == SETOP_INTERSECT) ? "INTERSECT" : "EXCEPT"); - if (!use_hash) + if (groupList && !use_hash) path = (Path *) create_sort_path(root, result_rel, path, @@ -764,10 +890,9 @@ generate_nonunion_path(SetOperationStmt *op, PlannerInfo *root, dNumGroups, dNumOutputRows); - if (pNumGroups) - *pNumGroups = dNumGroups; - - return path; + result_rel->rows = path->rows; + add_path(result_rel, path); + return result_rel; } /* @@ -781,59 +906,61 @@ generate_nonunion_path(SetOperationStmt *op, PlannerInfo *root, * collations have the same notion of equality. It is valid from an * implementation standpoint because we don't care about the ordering of * a UNION child's result: UNION ALL results are always unordered, and - * generate_union_path will force a fresh sort if the top level is a UNION. + * generate_union_paths will force a fresh sort if the top level is a UNION. */ static List * -recurse_union_children(Node *setOp, PlannerInfo *root, - SetOperationStmt *top_union, - List *refnames_tlist, - List **tlist_list) +plan_union_children(PlannerInfo *root, + SetOperationStmt *top_union, + List *refnames_tlist, + List **tlist_list) { - List *result; + List *pending_rels = list_make1(top_union); + List *result = NIL; List *child_tlist; - if (IsA(setOp, SetOperationStmt)) + *tlist_list = NIL; + + while (pending_rels != NIL) { - SetOperationStmt *op = (SetOperationStmt *) setOp; + Node *setOp = linitial(pending_rels); + + pending_rels = list_delete_first(pending_rels); - if (op->op == top_union->op && - (op->all == top_union->all || op->all) && - equal(op->colTypes, top_union->colTypes)) + if (IsA(setOp, SetOperationStmt)) { - /* Same UNION, so fold children into parent's subpath list */ - List *child_tlists1; - List *child_tlists2; + SetOperationStmt *op = (SetOperationStmt *) setOp; - result = list_concat(recurse_union_children(op->larg, root, - top_union, - refnames_tlist, - &child_tlists1), - recurse_union_children(op->rarg, root, - top_union, - refnames_tlist, - &child_tlists2)); - *tlist_list = list_concat(child_tlists1, child_tlists2); - return result; + if (op->op == top_union->op && + (op->all == top_union->all || op->all) && + equal(op->colTypes, top_union->colTypes)) + { + /* Same UNION, so fold children into parent */ + pending_rels = lcons(op->rarg, pending_rels); + pending_rels = lcons(op->larg, pending_rels); + continue; + } } + + /* + * Not same, so plan this child separately. + * + * Note we disallow any resjunk columns in child results. This is + * necessary since the Append node that implements the union won't do + * any projection, and upper levels will get confused if some of our + * output tuples have junk and some don't. This case only arises when + * we have an EXCEPT or INTERSECT as child, else there won't be + * resjunk anyway. + */ + result = lappend(result, recurse_set_operations(setOp, root, + top_union->colTypes, + top_union->colCollations, + false, -1, + refnames_tlist, + &child_tlist, + NULL)); + *tlist_list = lappend(*tlist_list, child_tlist); } - /* - * Not same, so plan this child separately. - * - * Note we disallow any resjunk columns in child results. This is - * necessary since the Append node that implements the union won't do any - * projection, and upper levels will get confused if some of our output - * tuples have junk and some don't. This case only arises when we have an - * EXCEPT or INTERSECT as child, else there won't be resjunk anyway. - */ - result = list_make1(recurse_set_operations(setOp, root, - top_union->colTypes, - top_union->colCollations, - false, -1, - refnames_tlist, - &child_tlist, - NULL)); - *tlist_list = list_make1(child_tlist); return result; } @@ -851,10 +978,6 @@ make_union_unique(SetOperationStmt *op, Path *path, List *tlist, /* Identify the grouping semantics */ groupList = generate_setop_grouplist(op, tlist); - /* punt if nothing to group on (can this happen?) */ - if (groupList == NIL) - return path; - /* * XXX for the moment, take the number of distinct groups as equal to the * total input size, ie, the worst case. This is too conservative, but we @@ -885,15 +1008,15 @@ make_union_unique(SetOperationStmt *op, Path *path, List *tlist, else { /* Sort and Unique */ - path = (Path *) create_sort_path(root, - result_rel, - path, - make_pathkeys_for_sortclauses(root, - groupList, - tlist), - -1.0); - /* We have to manually jam the right tlist into the path; ick */ - path->pathtarget = create_pathtarget(root, tlist); + if (groupList) + path = (Path *) + create_sort_path(root, + result_rel, + path, + make_pathkeys_for_sortclauses(root, + groupList, + tlist), + -1.0); path = (Path *) create_upper_unique_path(root, result_rel, path, @@ -904,6 +1027,24 @@ make_union_unique(SetOperationStmt *op, Path *path, List *tlist, return path; } +/* + * postprocess_setop_rel - perform steps required after adding paths + */ +static void +postprocess_setop_rel(PlannerInfo *root, RelOptInfo *rel) +{ + /* + * We don't currently worry about allowing FDWs to contribute paths to + * this relation, but give extensions a chance. + */ + if (create_upper_paths_hook) + (*create_upper_paths_hook) (root, UPPERREL_SETOP, + NULL, rel, NULL); + + /* Select cheapest path */ + set_cheapest(rel); +} + /* * choose_hashed_setop - should we use hashing for a set operation? */ @@ -965,6 +1106,7 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses, */ cost_agg(&hashed_p, root, AGG_HASHED, NULL, numGroupCols, dNumGroups, + NIL, input_path->startup_cost, input_path->total_cost, input_path->rows); @@ -979,6 +1121,7 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses, input_path->rows, input_path->pathtarget->width, 0.0, work_mem, -1.0); cost_group(&sorted_p, root, numGroupCols, dNumGroups, + NIL, sorted_p.startup_cost, sorted_p.total_cost, input_path->rows); @@ -1334,9 +1477,9 @@ expand_inherited_tables(PlannerInfo *root) ListCell *rl; /* - * expand_inherited_rtentry may add RTEs to parse->rtable; there is no - * need to scan them since they can't have inh=true. So just scan as far - * as the original end of the rtable list. + * expand_inherited_rtentry may add RTEs to parse->rtable. The function is + * expected to recursively handle any RTEs that it creates with inh=true. + * So just scan as far as the original end of the rtable list. */ nrtes = list_length(root->parse->rtable); rl = list_head(root->parse->rtable); @@ -1372,17 +1515,12 @@ expand_inherited_tables(PlannerInfo *root) static void expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti) { - Query *parse = root->parse; Oid parentOID; PlanRowMark *oldrc; Relation oldrelation; LOCKMODE lockmode; List *inhOIDs; - List *appinfos; ListCell *l; - bool has_child; - PartitionedChildRelInfo *pcinfo; - List *partitioned_child_rels = NIL; /* Does RT entry allow inheritance? */ if (!rte->inh) @@ -1407,21 +1545,9 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti) * relation named in the query. However, for each child relation we add * to the query, we must obtain an appropriate lock, because this will be * the first use of those relations in the parse/rewrite/plan pipeline. - * - * If the parent relation is the query's result relation, then we need - * RowExclusiveLock. Otherwise, if it's accessed FOR UPDATE/SHARE, we - * need RowShareLock; otherwise AccessShareLock. We can't just grab - * AccessShareLock because then the executor would be trying to upgrade - * the lock, leading to possible deadlocks. (This code should match the - * parser and rewriter.) + * Child rels should use the same lockmode as their parent. */ - oldrc = get_plan_rowmark(root->rowMarks, rti); - if (rti == parse->resultRelation) - lockmode = RowExclusiveLock; - else if (oldrc && RowMarkRequiresRowShareLock(oldrc->markType)) - lockmode = RowShareLock; - else - lockmode = AccessShareLock; + lockmode = rte->rellockmode; /* Scan for all members of inheritance set, acquire needed locks */ inhOIDs = find_all_inheritors(parentOID, lockmode, NULL); @@ -1443,6 +1569,7 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti) * PlanRowMark as isParent = true, and generate a new PlanRowMark for each * child. */ + oldrc = get_plan_rowmark(root->rowMarks, rti); if (oldrc) oldrc->isParent = true; @@ -1453,171 +1580,293 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti) oldrelation = heap_open(parentOID, NoLock); /* Scan the inheritance set and expand it */ - appinfos = NIL; - has_child = false; - foreach(l, inhOIDs) + if (RelationGetPartitionDesc(oldrelation) != NULL) { - Oid childOID = lfirst_oid(l); - Relation newrelation; - RangeTblEntry *childrte; - Index childRTindex; - AppendRelInfo *appinfo; - - /* Open rel if needed; we already have required locks */ - if (childOID != parentOID) - newrelation = heap_open(childOID, NoLock); - else - newrelation = oldrelation; - - /* - * It is possible that the parent table has children that are temp - * tables of other backends. We cannot safely access such tables - * (because of buffering issues), and the best thing to do seems to be - * to silently ignore them. - */ - if (childOID != parentOID && RELATION_IS_OTHER_TEMP(newrelation)) - { - heap_close(newrelation, lockmode); - continue; - } + Assert(rte->relkind == RELKIND_PARTITIONED_TABLE); /* - * Build an RTE for the child, and attach to query's rangetable list. - * We copy most fields of the parent's RTE, but replace relation OID - * and relkind, and set inh = false. Also, set requiredPerms to zero - * since all required permissions checks are done on the original RTE. - * Likewise, set the child's securityQuals to empty, because we only - * want to apply the parent's RLS conditions regardless of what RLS - * properties individual children may have. (This is an intentional - * choice to make inherited RLS work like regular permissions checks.) - * The parent securityQuals will be propagated to children along with - * other base restriction clauses, so we don't need to do it here. + * If this table has partitions, recursively expand them in the order + * in which they appear in the PartitionDesc. While at it, also + * extract the partition key columns of all the partitioned tables. */ - childrte = copyObject(rte); - childrte->relid = childOID; - childrte->relkind = newrelation->rd_rel->relkind; - childrte->inh = false; - childrte->requiredPerms = 0; - childrte->securityQuals = NIL; - parse->rtable = lappend(parse->rtable, childrte); - childRTindex = list_length(parse->rtable); + expand_partitioned_rtentry(root, rte, rti, oldrelation, oldrc, + lockmode, &root->append_rel_list); + } + else + { + List *appinfos = NIL; + RangeTblEntry *childrte; + Index childRTindex; /* - * Build an AppendRelInfo for this parent and child, unless the child - * is a partitioned table. + * This table has no partitions. Expand any plain inheritance + * children in the order the OIDs were returned by + * find_all_inheritors. */ - if (childrte->relkind != RELKIND_PARTITIONED_TABLE) + foreach(l, inhOIDs) { - /* Remember if we saw a real child. */ + Oid childOID = lfirst_oid(l); + Relation newrelation; + + /* Open rel if needed; we already have required locks */ if (childOID != parentOID) - has_child = true; - - appinfo = makeNode(AppendRelInfo); - appinfo->parent_relid = rti; - appinfo->child_relid = childRTindex; - appinfo->parent_reltype = oldrelation->rd_rel->reltype; - appinfo->child_reltype = newrelation->rd_rel->reltype; - make_inh_translation_list(oldrelation, newrelation, childRTindex, - &appinfo->translated_vars); - appinfo->parent_reloid = parentOID; - appinfos = lappend(appinfos, appinfo); + newrelation = heap_open(childOID, NoLock); + else + newrelation = oldrelation; /* - * Translate the column permissions bitmaps to the child's attnums - * (we have to build the translated_vars list before we can do - * this). But if this is the parent table, leave copyObject's - * result alone. - * - * Note: we need to do this even though the executor won't run any - * permissions checks on the child RTE. The - * insertedCols/updatedCols bitmaps may be examined for - * trigger-firing purposes. + * It is possible that the parent table has children that are temp + * tables of other backends. We cannot safely access such tables + * (because of buffering issues), and the best thing to do seems + * to be to silently ignore them. */ - if (childOID != parentOID) + if (childOID != parentOID && RELATION_IS_OTHER_TEMP(newrelation)) { - childrte->selectedCols = translate_col_privs(rte->selectedCols, - appinfo->translated_vars); - childrte->insertedCols = translate_col_privs(rte->insertedCols, - appinfo->translated_vars); - childrte->updatedCols = translate_col_privs(rte->updatedCols, - appinfo->translated_vars); + heap_close(newrelation, lockmode); + continue; } + + expand_single_inheritance_child(root, rte, rti, oldrelation, oldrc, + newrelation, + &appinfos, &childrte, + &childRTindex); + + /* Close child relations, but keep locks */ + if (childOID != parentOID) + heap_close(newrelation, NoLock); } - else - partitioned_child_rels = lappend_int(partitioned_child_rels, - childRTindex); /* - * Build a PlanRowMark if parent is marked FOR UPDATE/SHARE. + * If all the children were temp tables, pretend it's a + * non-inheritance situation; we don't need Append node in that case. + * The duplicate RTE we added for the parent table is harmless, so we + * don't bother to get rid of it; ditto for the useless PlanRowMark + * node. */ - if (oldrc) - { - PlanRowMark *newrc = makeNode(PlanRowMark); + if (list_length(appinfos) < 2) + rte->inh = false; + else + root->append_rel_list = list_concat(root->append_rel_list, + appinfos); - newrc->rti = childRTindex; - newrc->prti = rti; - newrc->rowmarkId = oldrc->rowmarkId; - /* Reselect rowmark type, because relkind might not match parent */ - newrc->markType = select_rowmark_type(childrte, oldrc->strength); - newrc->allMarkTypes = (1 << newrc->markType); - newrc->strength = oldrc->strength; - newrc->waitPolicy = oldrc->waitPolicy; + } - /* - * We mark RowMarks for partitioned child tables as parent - * RowMarks so that the executor ignores them (except their - * existence means that the child tables be locked using - * appropriate mode). - */ - newrc->isParent = (childrte->relkind == RELKIND_PARTITIONED_TABLE); + heap_close(oldrelation, NoLock); +} - /* Include child's rowmark type in parent's allMarkTypes */ - oldrc->allMarkTypes |= newrc->allMarkTypes; +/* + * expand_partitioned_rtentry + * Recursively expand an RTE for a partitioned table. + * + * Note that RelationGetPartitionDispatchInfo will expand partitions in the + * same order as this code. + */ +static void +expand_partitioned_rtentry(PlannerInfo *root, RangeTblEntry *parentrte, + Index parentRTindex, Relation parentrel, + PlanRowMark *top_parentrc, LOCKMODE lockmode, + List **appinfos) +{ + int i; + RangeTblEntry *childrte; + Index childRTindex; + PartitionDesc partdesc = RelationGetPartitionDesc(parentrel); - root->rowMarks = lappend(root->rowMarks, newrc); - } + check_stack_depth(); - /* Close child relations, but keep locks */ - if (childOID != parentOID) - heap_close(newrelation, NoLock); - } + /* A partitioned table should always have a partition descriptor. */ + Assert(partdesc); - heap_close(oldrelation, NoLock); + Assert(parentrte->inh); + + /* + * Note down whether any partition key cols are being updated. Though it's + * the root partitioned table's updatedCols we are interested in, we + * instead use parentrte to get the updatedCols. This is convenient + * because parentrte already has the root partrel's updatedCols translated + * to match the attribute ordering of parentrel. + */ + if (!root->partColsUpdated) + root->partColsUpdated = + has_partition_attrs(parentrel, parentrte->updatedCols, NULL); + + /* First expand the partitioned table itself. */ + expand_single_inheritance_child(root, parentrte, parentRTindex, parentrel, + top_parentrc, parentrel, + appinfos, &childrte, &childRTindex); /* - * If all the children were temp tables or a partitioned parent did not - * have any leaf partitions, pretend it's a non-inheritance situation; we - * don't need Append node in that case. The duplicate RTE we added for - * the parent table is harmless, so we don't bother to get rid of it; - * ditto for the useless PlanRowMark node. + * If the partitioned table has no partitions, treat this as the + * non-inheritance case. */ - if (!has_child) + if (partdesc->nparts == 0) { - /* Clear flag before returning */ - rte->inh = false; + parentrte->inh = false; return; } + for (i = 0; i < partdesc->nparts; i++) + { + Oid childOID = partdesc->oids[i]; + Relation childrel; + + /* Open rel; we already have required locks */ + childrel = heap_open(childOID, NoLock); + + /* + * Temporary partitions belonging to other sessions should have been + * disallowed at definition, but for paranoia's sake, let's double + * check. + */ + if (RELATION_IS_OTHER_TEMP(childrel)) + elog(ERROR, "temporary relation from another session found as partition"); + + expand_single_inheritance_child(root, parentrte, parentRTindex, + parentrel, top_parentrc, childrel, + appinfos, &childrte, &childRTindex); + + /* If this child is itself partitioned, recurse */ + if (childrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + expand_partitioned_rtentry(root, childrte, childRTindex, + childrel, top_parentrc, lockmode, + appinfos); + + /* Close child relation, but keep locks */ + heap_close(childrel, NoLock); + } +} + +/* + * expand_single_inheritance_child + * Build a RangeTblEntry and an AppendRelInfo, if appropriate, plus + * maybe a PlanRowMark. + * + * We now expand the partition hierarchy level by level, creating a + * corresponding hierarchy of AppendRelInfos and RelOptInfos, where each + * partitioned descendant acts as a parent of its immediate partitions. + * (This is a difference from what older versions of PostgreSQL did and what + * is still done in the case of table inheritance for unpartitioned tables, + * where the hierarchy is flattened during RTE expansion.) + * + * PlanRowMarks still carry the top-parent's RTI, and the top-parent's + * allMarkTypes field still accumulates values from all descendents. + * + * "parentrte" and "parentRTindex" are immediate parent's RTE and + * RTI. "top_parentrc" is top parent's PlanRowMark. + * + * The child RangeTblEntry and its RTI are returned in "childrte_p" and + * "childRTindex_p" resp. + */ +static void +expand_single_inheritance_child(PlannerInfo *root, RangeTblEntry *parentrte, + Index parentRTindex, Relation parentrel, + PlanRowMark *top_parentrc, Relation childrel, + List **appinfos, RangeTblEntry **childrte_p, + Index *childRTindex_p) +{ + Query *parse = root->parse; + Oid parentOID = RelationGetRelid(parentrel); + Oid childOID = RelationGetRelid(childrel); + RangeTblEntry *childrte; + Index childRTindex; + AppendRelInfo *appinfo; + + /* + * Build an RTE for the child, and attach to query's rangetable list. We + * copy most fields of the parent's RTE, but replace relation OID and + * relkind, and set inh = false. Also, set requiredPerms to zero since + * all required permissions checks are done on the original RTE. Likewise, + * set the child's securityQuals to empty, because we only want to apply + * the parent's RLS conditions regardless of what RLS properties + * individual children may have. (This is an intentional choice to make + * inherited RLS work like regular permissions checks.) The parent + * securityQuals will be propagated to children along with other base + * restriction clauses, so we don't need to do it here. + */ + childrte = copyObject(parentrte); + *childrte_p = childrte; + childrte->relid = childOID; + childrte->relkind = childrel->rd_rel->relkind; + /* A partitioned child will need to be expanded further. */ + if (childOID != parentOID && + childrte->relkind == RELKIND_PARTITIONED_TABLE) + childrte->inh = true; + else + childrte->inh = false; + childrte->requiredPerms = 0; + childrte->securityQuals = NIL; + parse->rtable = lappend(parse->rtable, childrte); + childRTindex = list_length(parse->rtable); + *childRTindex_p = childRTindex; + /* - * We keep a list of objects in root, each of which maps a partitioned - * parent RT index to the list of RT indexes of its partitioned child - * tables. When creating an Append or a ModifyTable path for the parent, - * we copy the child RT index list verbatim to the path so that it could - * be carried over to the executor so that the latter could identify the - * partitioned child tables. + * We need an AppendRelInfo if paths will be built for the child RTE. If + * childrte->inh is true, then we'll always need to generate append paths + * for it. If childrte->inh is false, we must scan it if it's not a + * partitioned table; but if it is a partitioned table, then it never has + * any data of its own and need not be scanned. */ - if (partitioned_child_rels != NIL) + if (childrte->relkind != RELKIND_PARTITIONED_TABLE || childrte->inh) { - pcinfo = makeNode(PartitionedChildRelInfo); + appinfo = makeNode(AppendRelInfo); + appinfo->parent_relid = parentRTindex; + appinfo->child_relid = childRTindex; + appinfo->parent_reltype = parentrel->rd_rel->reltype; + appinfo->child_reltype = childrel->rd_rel->reltype; + make_inh_translation_list(parentrel, childrel, childRTindex, + &appinfo->translated_vars); + appinfo->parent_reloid = parentOID; + *appinfos = lappend(*appinfos, appinfo); - Assert(rte->relkind == RELKIND_PARTITIONED_TABLE); - pcinfo->parent_relid = rti; - pcinfo->child_rels = partitioned_child_rels; - root->pcinfo_list = lappend(root->pcinfo_list, pcinfo); + /* + * Translate the column permissions bitmaps to the child's attnums (we + * have to build the translated_vars list before we can do this). But + * if this is the parent table, leave copyObject's result alone. + * + * Note: we need to do this even though the executor won't run any + * permissions checks on the child RTE. The insertedCols/updatedCols + * bitmaps may be examined for trigger-firing purposes. + */ + if (childOID != parentOID) + { + childrte->selectedCols = translate_col_privs(parentrte->selectedCols, + appinfo->translated_vars); + childrte->insertedCols = translate_col_privs(parentrte->insertedCols, + appinfo->translated_vars); + childrte->updatedCols = translate_col_privs(parentrte->updatedCols, + appinfo->translated_vars); + } } - /* Otherwise, OK to add to root->append_rel_list */ - root->append_rel_list = list_concat(root->append_rel_list, appinfos); + /* + * Build a PlanRowMark if parent is marked FOR UPDATE/SHARE. + */ + if (top_parentrc) + { + PlanRowMark *childrc = makeNode(PlanRowMark); + + childrc->rti = childRTindex; + childrc->prti = top_parentrc->rti; + childrc->rowmarkId = top_parentrc->rowmarkId; + /* Reselect rowmark type, because relkind might not match parent */ + childrc->markType = select_rowmark_type(childrte, + top_parentrc->strength); + childrc->allMarkTypes = (1 << childrc->markType); + childrc->strength = top_parentrc->strength; + childrc->waitPolicy = top_parentrc->waitPolicy; + + /* + * We mark RowMarks for partitioned child tables as parent RowMarks so + * that the executor ignores them (except their existence means that + * the child tables be locked using appropriate mode). + */ + childrc->isParent = (childrte->relkind == RELKIND_PARTITIONED_TABLE); + + /* Include child's rowmark type in top parent's allMarkTypes */ + top_parentrc->allMarkTypes |= childrc->allMarkTypes; + + root->rowMarks = lappend(root->rowMarks, childrc); + } } /* @@ -1635,9 +1884,11 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, List *vars = NIL; TupleDesc old_tupdesc = RelationGetDescr(oldrelation); TupleDesc new_tupdesc = RelationGetDescr(newrelation); + Oid new_relid = RelationGetRelid(newrelation); int oldnatts = old_tupdesc->natts; int newnatts = new_tupdesc->natts; int old_attno; + int new_attno = 0; for (old_attno = 0; old_attno < oldnatts; old_attno++) { @@ -1646,9 +1897,8 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, Oid atttypid; int32 atttypmod; Oid attcollation; - int new_attno; - att = old_tupdesc->attrs[old_attno]; + att = TupleDescAttr(old_tupdesc, old_attno); if (att->attisdropped) { /* Just put NULL into this list entry */ @@ -1679,29 +1929,25 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, * Otherwise we have to search for the matching column by name. * There's no guarantee it'll have the same column position, because * of cases like ALTER TABLE ADD COLUMN and multiple inheritance. - * However, in simple cases it will be the same column number, so try - * that before we go groveling through all the columns. - * - * Note: the test for (att = ...) != NULL cannot fail, it's just a - * notational device to include the assignment into the if-clause. + * However, in simple cases, the relative order of columns is mostly + * the same in both relations, so try the column of newrelation that + * follows immediately after the one that we just found, and if that + * fails, let syscache handle it. */ - if (old_attno < newnatts && - (att = new_tupdesc->attrs[old_attno]) != NULL && - !att->attisdropped && att->attinhcount != 0 && - strcmp(attname, NameStr(att->attname)) == 0) - new_attno = old_attno; - else + if (new_attno >= newnatts || + (att = TupleDescAttr(new_tupdesc, new_attno))->attisdropped || + strcmp(attname, NameStr(att->attname)) != 0) { - for (new_attno = 0; new_attno < newnatts; new_attno++) - { - att = new_tupdesc->attrs[new_attno]; - if (!att->attisdropped && att->attinhcount != 0 && - strcmp(attname, NameStr(att->attname)) == 0) - break; - } - if (new_attno >= newnatts) + HeapTuple newtup; + + newtup = SearchSysCacheAttName(new_relid, attname); + if (!newtup) elog(ERROR, "could not find inherited attribute \"%s\" of relation \"%s\"", attname, RelationGetRelationName(newrelation)); + new_attno = ((Form_pg_attribute) GETSTRUCT(newtup))->attnum - 1; + ReleaseSysCache(newtup); + + att = TupleDescAttr(new_tupdesc, new_attno); } /* Found it, check type and collation match */ @@ -1718,6 +1964,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, atttypmod, attcollation, 0)); + new_attno++; } *translated_vars = vars; @@ -2088,7 +2335,7 @@ adjust_appendrel_attrs_mutator(Node *node, * Substitute child relids for parent relids in a Relid set. The array of * appinfos specifies the substitutions to be performed. */ -Relids +static Relids adjust_child_relids(Relids relids, int nappinfos, AppendRelInfo **appinfos) { Bitmapset *result = NULL; @@ -2118,6 +2365,59 @@ adjust_child_relids(Relids relids, int nappinfos, AppendRelInfo **appinfos) return relids; } +/* + * Replace any relid present in top_parent_relids with its child in + * child_relids. Members of child_relids can be multiple levels below top + * parent in the partition hierarchy. + */ +Relids +adjust_child_relids_multilevel(PlannerInfo *root, Relids relids, + Relids child_relids, Relids top_parent_relids) +{ + AppendRelInfo **appinfos; + int nappinfos; + Relids parent_relids = NULL; + Relids result; + Relids tmp_result = NULL; + int cnt; + + /* + * If the given relids set doesn't contain any of the top parent relids, + * it will remain unchanged. + */ + if (!bms_overlap(relids, top_parent_relids)) + return relids; + + appinfos = find_appinfos_by_relids(root, child_relids, &nappinfos); + + /* Construct relids set for the immediate parent of the given child. */ + for (cnt = 0; cnt < nappinfos; cnt++) + { + AppendRelInfo *appinfo = appinfos[cnt]; + + parent_relids = bms_add_member(parent_relids, appinfo->parent_relid); + } + + /* Recurse if immediate parent is not the top parent. */ + if (!bms_equal(parent_relids, top_parent_relids)) + { + tmp_result = adjust_child_relids_multilevel(root, relids, + parent_relids, + top_parent_relids); + relids = tmp_result; + } + + result = adjust_child_relids(relids, nappinfos, appinfos); + + /* Free memory consumed by any intermediate result. */ + if (tmp_result) + bms_free(tmp_result); + bms_free(parent_relids); + pfree(appinfos); + + return result; +} + /* * Adjust the targetlist entries of an inherited UPDATE operation * @@ -2257,39 +2557,74 @@ adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, return node; } +/* + * Construct the SpecialJoinInfo for a child-join by translating + * SpecialJoinInfo for the join between parents. left_relids and right_relids + * are the relids of left and right side of the join respectively. + */ +SpecialJoinInfo * +build_child_join_sjinfo(PlannerInfo *root, SpecialJoinInfo *parent_sjinfo, + Relids left_relids, Relids right_relids) +{ + SpecialJoinInfo *sjinfo = makeNode(SpecialJoinInfo); + AppendRelInfo **left_appinfos; + int left_nappinfos; + AppendRelInfo **right_appinfos; + int right_nappinfos; + + memcpy(sjinfo, parent_sjinfo, sizeof(SpecialJoinInfo)); + left_appinfos = find_appinfos_by_relids(root, left_relids, + &left_nappinfos); + right_appinfos = find_appinfos_by_relids(root, right_relids, + &right_nappinfos); + + sjinfo->min_lefthand = adjust_child_relids(sjinfo->min_lefthand, + left_nappinfos, left_appinfos); + sjinfo->min_righthand = adjust_child_relids(sjinfo->min_righthand, + right_nappinfos, + right_appinfos); + sjinfo->syn_lefthand = adjust_child_relids(sjinfo->syn_lefthand, + left_nappinfos, left_appinfos); + sjinfo->syn_righthand = adjust_child_relids(sjinfo->syn_righthand, + right_nappinfos, + right_appinfos); + sjinfo->semi_rhs_exprs = (List *) adjust_appendrel_attrs(root, + (Node *) sjinfo->semi_rhs_exprs, + right_nappinfos, + right_appinfos); + + pfree(left_appinfos); + pfree(right_appinfos); + + return sjinfo; +} + /* * find_appinfos_by_relids * Find AppendRelInfo structures for all relations specified by relids. * * The AppendRelInfos are returned in an array, which can be pfree'd by the - * caller. *nappinfos is set to the the number of entries in the array. + * caller. *nappinfos is set to the number of entries in the array. */ AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos) { - ListCell *lc; AppendRelInfo **appinfos; int cnt = 0; + int i; *nappinfos = bms_num_members(relids); appinfos = (AppendRelInfo **) palloc(sizeof(AppendRelInfo *) * *nappinfos); - foreach(lc, root->append_rel_list) + i = -1; + while ((i = bms_next_member(relids, i)) >= 0) { - AppendRelInfo *appinfo = lfirst(lc); + AppendRelInfo *appinfo = root->append_rel_array[i]; - if (bms_is_member(appinfo->child_relid, relids)) - { - appinfos[cnt] = appinfo; - cnt++; + if (!appinfo) + elog(ERROR, "child rel %d not found in append_rel_array", i); - /* Stop when we have gathered all the AppendRelInfos. */ - if (cnt == *nappinfos) - return appinfos; - } + appinfos[cnt++] = appinfo; } - - /* Should have found the entries ... */ - elog(ERROR, "did not find all requested child rels in append_rel_list"); - return NULL; /* not reached */ + return appinfos; } diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index 602d17dfb4..8df369315b 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -3,7 +3,7 @@ * clauses.c * routines to manipulate qualification clauses * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -115,6 +115,9 @@ static List *find_nonnullable_vars_walker(Node *node, bool top_level); static bool is_strict_saop(ScalarArrayOpExpr *expr, bool falseOK); static Node *eval_const_expressions_mutator(Node *node, eval_const_expressions_context *context); +static bool contain_non_const_walker(Node *node, void *context); +static bool ece_function_is_safe(Oid funcid, + eval_const_expressions_context *context); static List *simplify_or_arguments(List *args, eval_const_expressions_context *context, bool *haveNull, bool *forceTrue); @@ -127,8 +130,6 @@ static Expr *simplify_function(Oid funcid, Oid result_collid, Oid input_collid, List **args_p, bool funcvariadic, bool process_args, bool allow_non_const, eval_const_expressions_context *context); -static List *expand_function_arguments(List *args, Oid result_type, - HeapTuple func_tuple); static List *reorder_function_arguments(List *args, HeapTuple func_tuple); static List *add_function_defaults(List *args, HeapTuple func_tuple); static List *fetch_function_defaults(HeapTuple func_tuple); @@ -832,7 +833,7 @@ expression_returns_set_rows(Node *clause) * contain_subplans * Recursively search for subplan nodes within a clause. * - * If we see a SubLink node, we will return TRUE. This is only possible if + * If we see a SubLink node, we will return true. This is only possible if * the expression tree hasn't yet been transformed by subselect.c. We do not * know whether the node will produce a true subplan or just an initplan, * but we make the conservative assumption that it will be a subplan. @@ -1087,6 +1088,8 @@ bool is_parallel_safe(PlannerInfo *root, Node *node) { max_parallel_hazard_context context; + PlannerInfo *proot; + ListCell *l; /* * Even if the original querytree contained nothing unsafe, we need to @@ -1095,12 +1098,31 @@ is_parallel_safe(PlannerInfo *root, Node *node) * in this expression. But otherwise we don't need to look. */ if (root->glob->maxParallelHazard == PROPARALLEL_SAFE && - root->glob->nParamExec == 0) + root->glob->paramExecTypes == NIL) return true; /* Else use max_parallel_hazard's search logic, but stop on RESTRICTED */ context.max_hazard = PROPARALLEL_SAFE; context.max_interesting = PROPARALLEL_RESTRICTED; context.safe_param_ids = NIL; + + /* + * The params that refer to the same or parent query level are considered + * parallel-safe. The idea is that we compute such params at Gather or + * Gather Merge node and pass their value to workers. + */ + for (proot = root; proot != NULL; proot = proot->parent_root) + { + foreach(l, proot->init_plans) + { + SubPlan *initsubplan = (SubPlan *) lfirst(l); + ListCell *l2; + + foreach(l2, initsubplan->setParam) + context.safe_param_ids = lcons_int(lfirst_int(l2), + context.safe_param_ids); + } + } + return !max_parallel_hazard_walker(node, &context); } @@ -1168,12 +1190,26 @@ max_parallel_hazard_walker(Node *node, max_parallel_hazard_context *context) return true; } - if (IsA(node, NextValueExpr)) + else if (IsA(node, NextValueExpr)) { if (max_parallel_hazard_test(PROPARALLEL_UNSAFE, context)) return true; } + /* + * Treat window functions as parallel-restricted because we aren't sure + * whether the input row ordering is fully deterministic, and the output + * of window functions might vary across workers if not. (In some cases, + * like where the window frame orders by a primary key, we could relax + * this restriction. But it doesn't currently seem worth expending extra + * effort to do so.) + */ + else if (IsA(node, WindowFunc)) + { + if (max_parallel_hazard_test(PROPARALLEL_RESTRICTED, context)) + return true; + } + /* * As a notational convenience for callers, look through RestrictInfo. */ @@ -1223,13 +1259,18 @@ max_parallel_hazard_walker(Node *node, max_parallel_hazard_context *context) /* * We can't pass Params to workers at the moment either, so they are also - * parallel-restricted, unless they are PARAM_EXEC Params listed in - * safe_param_ids, meaning they could be generated within the worker. + * parallel-restricted, unless they are PARAM_EXTERN Params or are + * PARAM_EXEC Params listed in safe_param_ids, meaning they could be + * either generated within the worker or can be computed in master and + * then their value can be passed to the worker. */ else if (IsA(node, Param)) { Param *param = (Param *) node; + if (param->paramkind == PARAM_EXTERN) + return false; + if (param->paramkind != PARAM_EXEC || !list_member_int(context->safe_param_ids, param->paramid)) { @@ -1361,6 +1402,17 @@ contain_nonstrict_functions_walker(Node *node, void *context) return true; if (IsA(node, FieldStore)) return true; + if (IsA(node, ArrayCoerceExpr)) + { + /* + * ArrayCoerceExpr is strict at the array level, regardless of what + * the per-element expression is; so we should ignore elemexpr and + * recurse only into the arg. + */ + return expression_tree_walker((Node *) ((ArrayCoerceExpr *) node)->arg, + contain_nonstrict_functions_walker, + context); + } if (IsA(node, CaseExpr)) return true; if (IsA(node, ArrayExpr)) @@ -1380,14 +1432,11 @@ contain_nonstrict_functions_walker(Node *node, void *context) if (IsA(node, BooleanTest)) return true; - /* - * Check other function-containing nodes; but ArrayCoerceExpr is strict at - * the array level, regardless of elemfunc. - */ - if (!IsA(node, ArrayCoerceExpr) && - check_functions_in_node(node, contain_nonstrict_functions_checker, + /* Check other function-containing nodes */ + if (check_functions_in_node(node, contain_nonstrict_functions_checker, context)) return true; + return expression_tree_walker(node, contain_nonstrict_functions_walker, context); } @@ -1403,7 +1452,8 @@ contain_nonstrict_functions_walker(Node *node, void *context) * CaseTestExpr nodes must appear directly within the corresponding CaseExpr, * not nested within another one, or they'll see the wrong test value. If one * appears "bare" in the arguments of a SQL function, then we can't inline the - * SQL function for fear of creating such a situation. + * SQL function for fear of creating such a situation. The same applies for + * CaseTestExpr used within the elemexpr of an ArrayCoerceExpr. * * CoerceToDomainValue would have the same issue if domain CHECK expressions * could get inlined into larger expressions, but presently that's impossible. @@ -1419,7 +1469,7 @@ contain_context_dependent_node(Node *clause) return contain_context_dependent_node_walker(clause, &flags); } -#define CCDN_IN_CASEEXPR 0x0001 /* CaseTestExpr okay here? */ +#define CCDN_CASETESTEXPR_OK 0x0001 /* CaseTestExpr okay here? */ static bool contain_context_dependent_node_walker(Node *node, int *flags) @@ -1427,8 +1477,8 @@ contain_context_dependent_node_walker(Node *node, int *flags) if (node == NULL) return false; if (IsA(node, CaseTestExpr)) - return !(*flags & CCDN_IN_CASEEXPR); - if (IsA(node, CaseExpr)) + return !(*flags & CCDN_CASETESTEXPR_OK); + else if (IsA(node, CaseExpr)) { CaseExpr *caseexpr = (CaseExpr *) node; @@ -1450,7 +1500,7 @@ contain_context_dependent_node_walker(Node *node, int *flags) * seem worth any extra code. If there are any bare CaseTestExprs * elsewhere in the CASE, something's wrong already. */ - *flags |= CCDN_IN_CASEEXPR; + *flags |= CCDN_CASETESTEXPR_OK; res = expression_tree_walker(node, contain_context_dependent_node_walker, (void *) flags); @@ -1458,6 +1508,24 @@ contain_context_dependent_node_walker(Node *node, int *flags) return res; } } + else if (IsA(node, ArrayCoerceExpr)) + { + ArrayCoerceExpr *ac = (ArrayCoerceExpr *) node; + int save_flags; + bool res; + + /* Check the array expression */ + if (contain_context_dependent_node_walker((Node *) ac->arg, flags)) + return true; + + /* Check the elemexpr, which is allowed to contain CaseTestExpr */ + save_flags = *flags; + *flags |= CCDN_CASETESTEXPR_OK; + res = contain_context_dependent_node_walker((Node *) ac->elemexpr, + flags); + *flags = save_flags; + return res; + } return expression_tree_walker(node, contain_context_dependent_node_walker, (void *) flags); } @@ -1574,7 +1642,7 @@ contain_leaked_vars_walker(Node *node, void *context) * WHERE CURRENT OF doesn't contain leaky function calls. * Moreover, it is essential that this is considered non-leaky, * since the planner must always generate a TID scan when CURRENT - * OF is present -- c.f. cost_tidscan. + * OF is present -- cf. cost_tidscan. */ return false; @@ -1614,8 +1682,8 @@ contain_leaked_vars_walker(Node *node, void *context) * that either v1 or v2 can't be NULL, but it does prove that the t1 row * as a whole can't be all-NULL. * - * top_level is TRUE while scanning top-level AND/OR structure; here, showing - * the result is either FALSE or NULL is good enough. top_level is FALSE when + * top_level is true while scanning top-level AND/OR structure; here, showing + * the result is either FALSE or NULL is good enough. top_level is false when * we have descended below a NOT or a strict function: now we must be able to * prove that the subexpression goes to NULL. * @@ -1757,7 +1825,7 @@ find_nonnullable_rels_walker(Node *node, bool top_level) } else if (IsA(node, ArrayCoerceExpr)) { - /* ArrayCoerceExpr is strict at the array level */ + /* ArrayCoerceExpr is strict at the array level; ignore elemexpr */ ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node; result = find_nonnullable_rels_walker((Node *) expr->arg, top_level); @@ -1822,8 +1890,8 @@ find_nonnullable_rels_walker(Node *node, bool top_level) * The result is a palloc'd List, but we have not copied the member Var nodes. * Also, we don't bother trying to eliminate duplicate entries. * - * top_level is TRUE while scanning top-level AND/OR structure; here, showing - * the result is either FALSE or NULL is good enough. top_level is FALSE when + * top_level is true while scanning top-level AND/OR structure; here, showing + * the result is either FALSE or NULL is good enough. top_level is false when * we have descended below a NOT or a strict function: now we must be able to * prove that the subexpression goes to NULL. * @@ -1965,7 +2033,7 @@ find_nonnullable_vars_walker(Node *node, bool top_level) } else if (IsA(node, ArrayCoerceExpr)) { - /* ArrayCoerceExpr is strict at the array level */ + /* ArrayCoerceExpr is strict at the array level; ignore elemexpr */ ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node; result = find_nonnullable_vars_walker((Node *) expr->arg, top_level); @@ -2348,6 +2416,10 @@ CommuteRowCompareExpr(RowCompareExpr *clause) * is still what it was when the expression was parsed. This is needed to * guard against improper simplification after ALTER COLUMN TYPE. (XXX we * may well need to make similar checks elsewhere?) + * + * rowtypeid may come from a whole-row Var, and therefore it can be a domain + * over composite, but for this purpose we only care about checking the type + * of a contained field. */ static bool rowtype_field_matches(Oid rowtypeid, int fieldnum, @@ -2360,13 +2432,13 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum, /* No issue for RECORD, since there is no way to ALTER such a type */ if (rowtypeid == RECORDOID) return true; - tupdesc = lookup_rowtype_tupdesc(rowtypeid, -1); + tupdesc = lookup_rowtype_tupdesc_domain(rowtypeid, -1, false); if (fieldnum <= 0 || fieldnum > tupdesc->natts) { ReleaseTupleDesc(tupdesc); return false; } - attr = tupdesc->attrs[fieldnum - 1]; + attr = TupleDescAttr(tupdesc, fieldnum - 1); if (attr->attisdropped || attr->atttypid != expectedtype || attr->atttypmod != expectedtypmod || @@ -2464,6 +2536,37 @@ estimate_expression_value(PlannerInfo *root, Node *node) return eval_const_expressions_mutator(node, &context); } +/* + * The generic case in eval_const_expressions_mutator is to recurse using + * expression_tree_mutator, which will copy the given node unchanged but + * const-simplify its arguments (if any) as far as possible. If the node + * itself does immutable processing, and each of its arguments were reduced + * to a Const, we can then reduce it to a Const using evaluate_expr. (Some + * node types need more complicated logic; for example, a CASE expression + * might be reducible to a constant even if not all its subtrees are.) + */ +#define ece_generic_processing(node) \ + expression_tree_mutator((Node *) (node), eval_const_expressions_mutator, \ + (void *) context) + +/* + * Check whether all arguments of the given node were reduced to Consts. + * By going directly to expression_tree_walker, contain_non_const_walker + * is not applied to the node itself, only to its children. + */ +#define ece_all_arguments_const(node) \ + (!expression_tree_walker((Node *) (node), contain_non_const_walker, NULL)) + +/* Generic macro for applying evaluate_expr */ +#define ece_evaluate_expr(node) \ + ((Node *) evaluate_expr((Expr *) (node), \ + exprType((Node *) (node)), \ + exprTypmod((Node *) (node)), \ + exprCollation((Node *) (node)))) + +/* + * Recursive guts of eval_const_expressions/estimate_expression_value + */ static Node * eval_const_expressions_mutator(Node *node, eval_const_expressions_context *context) @@ -2475,16 +2578,36 @@ eval_const_expressions_mutator(Node *node, case T_Param: { Param *param = (Param *) node; + ParamListInfo paramLI = context->boundParams; /* Look to see if we've been given a value for this Param */ if (param->paramkind == PARAM_EXTERN && - context->boundParams != NULL && + paramLI != NULL && param->paramid > 0 && - param->paramid <= context->boundParams->numParams) + param->paramid <= paramLI->numParams) { - ParamExternData *prm = &context->boundParams->params[param->paramid - 1]; + ParamExternData *prm; + ParamExternData prmdata; - if (OidIsValid(prm->ptype)) + /* + * Give hook a chance in case parameter is dynamic. Tell + * it that this fetch is speculative, so it should avoid + * erroring out if parameter is unavailable. + */ + if (paramLI->paramFetch != NULL) + prm = paramLI->paramFetch(paramLI, param->paramid, + true, &prmdata); + else + prm = ¶mLI->params[param->paramid - 1]; + + /* + * We don't just check OidIsValid, but insist that the + * fetched type match the Param, just in case the hook did + * something unexpected. No need to throw an error here + * though; leave that for runtime. + */ + if (OidIsValid(prm->ptype) && + prm->ptype == param->paramtype) { /* OK to substitute parameter value? */ if (context->estimate || @@ -2500,7 +2623,6 @@ eval_const_expressions_mutator(Node *node, bool typByVal; Datum pval; - Assert(prm->ptype == param->paramtype); get_typlenbyval(param->paramtype, &typLen, &typByVal); if (prm->isnull || typByVal) @@ -2779,6 +2901,25 @@ eval_const_expressions_mutator(Node *node, newexpr->location = expr->location; return (Node *) newexpr; } + case T_ScalarArrayOpExpr: + { + ScalarArrayOpExpr *saop; + + /* Copy the node and const-simplify its arguments */ + saop = (ScalarArrayOpExpr *) ece_generic_processing(node); + + /* Make sure we know underlying function */ + set_sa_opfuncid(saop); + + /* + * If all arguments are Consts, and it's a safe function, we + * can fold to a constant + */ + if (ece_all_arguments_const(saop) && + ece_function_is_safe(saop->opfuncid, context)) + return ece_evaluate_expr(saop); + return (Node *) saop; + } case T_BoolExpr: { BoolExpr *expr = (BoolExpr *) node; @@ -3003,41 +3144,46 @@ eval_const_expressions_mutator(Node *node, } case T_ArrayCoerceExpr: { - ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node; - Expr *arg; - ArrayCoerceExpr *newexpr; + ArrayCoerceExpr *ac = makeNode(ArrayCoerceExpr); + Node *save_case_val; /* - * Reduce constants in the ArrayCoerceExpr's argument, then - * build a new ArrayCoerceExpr. + * Copy the node and const-simplify its arguments. We can't + * use ece_generic_processing() here because we need to mess + * with case_val only while processing the elemexpr. */ - arg = (Expr *) eval_const_expressions_mutator((Node *) expr->arg, - context); + memcpy(ac, node, sizeof(ArrayCoerceExpr)); + ac->arg = (Expr *) + eval_const_expressions_mutator((Node *) ac->arg, + context); - newexpr = makeNode(ArrayCoerceExpr); - newexpr->arg = arg; - newexpr->elemfuncid = expr->elemfuncid; - newexpr->resulttype = expr->resulttype; - newexpr->resulttypmod = expr->resulttypmod; - newexpr->resultcollid = expr->resultcollid; - newexpr->isExplicit = expr->isExplicit; - newexpr->coerceformat = expr->coerceformat; - newexpr->location = expr->location; + /* + * Set up for the CaseTestExpr node contained in the elemexpr. + * We must prevent it from absorbing any outer CASE value. + */ + save_case_val = context->case_val; + context->case_val = NULL; + + ac->elemexpr = (Expr *) + eval_const_expressions_mutator((Node *) ac->elemexpr, + context); + + context->case_val = save_case_val; /* - * If constant argument and it's a binary-coercible or - * immutable conversion, we can simplify it to a constant. + * If constant argument and the per-element expression is + * immutable, we can simplify the whole thing to a constant. + * Exception: although contain_mutable_functions considers + * CoerceToDomain immutable for historical reasons, let's not + * do so here; this ensures coercion to an array-over-domain + * does not apply the domain's constraints until runtime. */ - if (arg && IsA(arg, Const) && - (!OidIsValid(newexpr->elemfuncid) || - func_volatile(newexpr->elemfuncid) == PROVOLATILE_IMMUTABLE)) - return (Node *) evaluate_expr((Expr *) newexpr, - newexpr->resulttype, - newexpr->resulttypmod, - newexpr->resultcollid); - - /* Else we must return the partially-simplified node */ - return (Node *) newexpr; + if (ac->arg && IsA(ac->arg, Const) && + ac->elemexpr && !IsA(ac->elemexpr, CoerceToDomain) && + !contain_mutable_functions((Node *) ac->elemexpr)) + return ece_evaluate_expr(ac); + + return (Node *) ac; } case T_CollateExpr: { @@ -3229,41 +3375,22 @@ eval_const_expressions_mutator(Node *node, else return copyObject(node); } + case T_ArrayRef: case T_ArrayExpr: + case T_RowExpr: { - ArrayExpr *arrayexpr = (ArrayExpr *) node; - ArrayExpr *newarray; - bool all_const = true; - List *newelems; - ListCell *element; - - newelems = NIL; - foreach(element, arrayexpr->elements) - { - Node *e; - - e = eval_const_expressions_mutator((Node *) lfirst(element), - context); - if (!IsA(e, Const)) - all_const = false; - newelems = lappend(newelems, e); - } + /* + * Generic handling for node types whose own processing is + * known to be immutable, and for which we need no smarts + * beyond "simplify if all inputs are constants". + */ - newarray = makeNode(ArrayExpr); - newarray->array_typeid = arrayexpr->array_typeid; - newarray->array_collid = arrayexpr->array_collid; - newarray->element_typeid = arrayexpr->element_typeid; - newarray->elements = newelems; - newarray->multidims = arrayexpr->multidims; - newarray->location = arrayexpr->location; - - if (all_const) - return (Node *) evaluate_expr((Expr *) newarray, - newarray->array_typeid, - exprTypmod(node), - newarray->array_collid); - - return (Node *) newarray; + /* Copy the node and const-simplify its arguments */ + node = ece_generic_processing(node); + /* If all arguments are Consts, we can fold to a constant */ + if (ece_all_arguments_const(node)) + return ece_evaluate_expr(node); + return node; } case T_CoalesceExpr: { @@ -3340,7 +3467,8 @@ eval_const_expressions_mutator(Node *node, * simple Var. (This case won't be generated directly by the * parser, because ParseComplexProjection short-circuits it. * But it can arise while simplifying functions.) Also, we - * can optimize field selection from a RowExpr construct. + * can optimize field selection from a RowExpr construct, or + * of course from a constant. * * However, replacing a whole-row Var in this way has a * pitfall: if we've already built the rel targetlist for the @@ -3355,6 +3483,8 @@ eval_const_expressions_mutator(Node *node, * We must also check that the declared type of the field is * still the same as when the FieldSelect was created --- this * can change if someone did ALTER COLUMN TYPE on the rowtype. + * If it isn't, we skip the optimization; the case will + * probably fail at runtime, but that's not our problem here. */ FieldSelect *fselect = (FieldSelect *) node; FieldSelect *newfselect; @@ -3405,6 +3535,17 @@ eval_const_expressions_mutator(Node *node, newfselect->resulttype = fselect->resulttype; newfselect->resulttypmod = fselect->resulttypmod; newfselect->resultcollid = fselect->resultcollid; + if (arg && IsA(arg, Const)) + { + Const *con = (Const *) arg; + + if (rowtype_field_matches(con->consttype, + newfselect->fieldnum, + newfselect->resulttype, + newfselect->resulttypmod, + newfselect->resultcollid)) + return ece_evaluate_expr(newfselect); + } return (Node *) newfselect; } case T_NullTest: @@ -3500,6 +3641,13 @@ eval_const_expressions_mutator(Node *node, } case T_BooleanTest: { + /* + * This case could be folded into the generic handling used + * for ArrayRef etc. But because the simplification logic is + * so trivial, applying evaluate_expr() to perform it would be + * a heavy overhead. BooleanTest is probably common enough to + * justify keeping this bespoke implementation. + */ BooleanTest *btest = (BooleanTest *) node; BooleanTest *newbtest; Node *arg; @@ -3568,19 +3716,108 @@ eval_const_expressions_mutator(Node *node, context); } break; + case T_ConvertRowtypeExpr: + { + ConvertRowtypeExpr *cre = castNode(ConvertRowtypeExpr, node); + Node *arg; + ConvertRowtypeExpr *newcre; + + arg = eval_const_expressions_mutator((Node *) cre->arg, + context); + + newcre = makeNode(ConvertRowtypeExpr); + newcre->resulttype = cre->resulttype; + newcre->convertformat = cre->convertformat; + newcre->location = cre->location; + + /* + * In case of a nested ConvertRowtypeExpr, we can convert the + * leaf row directly to the topmost row format without any + * intermediate conversions. (This works because + * ConvertRowtypeExpr is used only for child->parent + * conversion in inheritance trees, which works by exact match + * of column name, and a column absent in an intermediate + * result can't be present in the final result.) + * + * No need to check more than one level deep, because the + * above recursion will have flattened anything else. + */ + if (arg != NULL && IsA(arg, ConvertRowtypeExpr)) + { + ConvertRowtypeExpr *argcre = (ConvertRowtypeExpr *) arg; + + arg = (Node *) argcre->arg; + + /* + * Make sure an outer implicit conversion can't hide an + * inner explicit one. + */ + if (newcre->convertformat == COERCE_IMPLICIT_CAST) + newcre->convertformat = argcre->convertformat; + } + + newcre->arg = (Expr *) arg; + + if (arg != NULL && IsA(arg, Const)) + return ece_evaluate_expr((Node *) newcre); + return (Node *) newcre; + } default: break; } /* - * For any node type not handled above, we recurse using - * expression_tree_mutator, which will copy the node unchanged but try to - * simplify its arguments (if any) using this routine. For example: we - * cannot eliminate an ArrayRef node, but we might be able to simplify - * constant expressions in its subscripts. + * For any node type not handled above, copy the node unchanged but + * const-simplify its subexpressions. This is the correct thing for node + * types whose behavior might change between planning and execution, such + * as CoerceToDomain. It's also a safe default for new node types not + * known to this routine. */ - return expression_tree_mutator(node, eval_const_expressions_mutator, - (void *) context); + return ece_generic_processing(node); +} + +/* + * Subroutine for eval_const_expressions: check for non-Const nodes. + * + * We can abort recursion immediately on finding a non-Const node. This is + * critical for performance, else eval_const_expressions_mutator would take + * O(N^2) time on non-simplifiable trees. However, we do need to descend + * into List nodes since expression_tree_walker sometimes invokes the walker + * function directly on List subtrees. + */ +static bool +contain_non_const_walker(Node *node, void *context) +{ + if (node == NULL) + return false; + if (IsA(node, Const)) + return false; + if (IsA(node, List)) + return expression_tree_walker(node, contain_non_const_walker, context); + /* Otherwise, abort the tree traversal and return true */ + return true; +} + +/* + * Subroutine for eval_const_expressions: check if a function is OK to evaluate + */ +static bool +ece_function_is_safe(Oid funcid, eval_const_expressions_context *context) +{ + char provolatile = func_volatile(funcid); + + /* + * Ordinarily we are only allowed to simplify immutable functions. But for + * purposes of estimation, we consider it okay to simplify functions that + * are merely stable; the risk that the result might change from planning + * time to execution time is worth taking in preference to not being able + * to estimate the value at all. + */ + if (provolatile == PROVOLATILE_IMMUTABLE) + return true; + if (context->estimate && provolatile == PROVOLATILE_STABLE) + return true; + return false; } /* @@ -3598,8 +3835,8 @@ eval_const_expressions_mutator(Node *node, * input is TRUE and at least one is NULL. We don't actually include the NULL * here, that's supposed to be done by the caller. * - * The output arguments *haveNull and *forceTrue must be initialized FALSE - * by the caller. They will be set TRUE if a null constant or true constant, + * The output arguments *haveNull and *forceTrue must be initialized false + * by the caller. They will be set true if a NULL constant or TRUE constant, * respectively, is detected anywhere in the argument list. */ static List * @@ -3710,8 +3947,8 @@ simplify_or_arguments(List *args, * no input is FALSE and at least one is NULL. We don't actually include the * NULL here, that's supposed to be done by the caller. * - * The output arguments *haveNull and *forceFalse must be initialized FALSE - * by the caller. They will be set TRUE if a null constant or false constant, + * The output arguments *haveNull and *forceFalse must be initialized false + * by the caller. They will be set true if a null constant or false constant, * respectively, is detected anywhere in the argument list. */ static List * @@ -3980,7 +4217,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod, * cases it handles should never occur there. This should be OK since it * will fall through very quickly if there's nothing to do. */ -static List * +List * expand_function_arguments(List *args, Oid result_type, HeapTuple func_tuple) { Form_pg_proc funcform = (Form_pg_proc) GETSTRUCT(func_tuple); @@ -4352,13 +4589,14 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid, /* * Forget it if the function is not SQL-language or has other showstopper - * properties. (The nargs check is just paranoia.) + * properties. (The prokind and nargs checks are just paranoia.) */ if (funcform->prolang != SQLlanguageId || + funcform->prokind != PROKIND_FUNCTION || funcform->prosecdef || funcform->proretset || funcform->prorettype == RECORDOID || - !heap_attisnull(func_tuple, Anum_pg_proc_proconfig) || + !heap_attisnull(func_tuple, Anum_pg_proc_proconfig, NULL) || funcform->pronargs != list_length(args)) return NULL; @@ -4490,9 +4728,18 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid, /* Now we can grab the tlist expression */ newexpr = (Node *) ((TargetEntry *) linitial(querytree->targetList))->expr; - /* Assert that check_sql_fn_retval did the right thing */ - Assert(exprType(newexpr) == result_type); - /* It couldn't have made any dangerous tlist changes, either */ + /* + * If the SQL function returns VOID, we can only inline it if it is a + * SELECT of an expression returning VOID (ie, it's just a redirection to + * another VOID-returning function). In all non-VOID-returning cases, + * check_sql_fn_retval should ensure that newexpr returns the function's + * declared result type, so this test shouldn't fail otherwise; but we may + * as well cope gracefully if it does. + */ + if (exprType(newexpr) != result_type) + goto fail; + + /* check_sql_fn_retval couldn't have made any dangerous tlist changes */ Assert(!modifyTargetList); /* @@ -4877,15 +5124,19 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) * properties. In particular it mustn't be declared STRICT, since we * couldn't enforce that. It also mustn't be VOLATILE, because that is * supposed to cause it to be executed with its own snapshot, rather than - * sharing the snapshot of the calling query. (Rechecking proretset is - * just paranoia.) + * sharing the snapshot of the calling query. We also disallow returning + * SETOF VOID, because inlining would result in exposing the actual result + * of the function's last SELECT, which should not happen in that case. + * (Rechecking prokind and proretset is just paranoia.) */ if (funcform->prolang != SQLlanguageId || + funcform->prokind != PROKIND_FUNCTION || funcform->proisstrict || funcform->provolatile == PROVOLATILE_VOLATILE || + funcform->prorettype == VOIDOID || funcform->prosecdef || !funcform->proretset || - !heap_attisnull(func_tuple, Anum_pg_proc_proconfig)) + !heap_attisnull(func_tuple, Anum_pg_proc_proconfig, NULL)) { ReleaseSysCache(func_tuple); return NULL; @@ -4991,7 +5242,9 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) * * If the function returns a composite type, don't inline unless the check * shows it's returning a whole tuple result; otherwise what it's - * returning is a single composite column which is not what we need. + * returning is a single composite column which is not what we need. (Like + * check_sql_fn_retval, we deliberately exclude domains over composite + * here.) */ if (!check_sql_fn_retval(func_oid, fexpr->funcresulttype, querytree_list, diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c index 62629ee7d8..3aaa004275 100644 --- a/src/backend/optimizer/util/joininfo.c +++ b/src/backend/optimizer/util/joininfo.c @@ -3,7 +3,7 @@ * joininfo.c * joininfo list manipulation routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/optimizer/util/orclauses.c b/src/backend/optimizer/util/orclauses.c index 9aa661c909..1e78028abe 100644 --- a/src/backend/optimizer/util/orclauses.c +++ b/src/backend/optimizer/util/orclauses.c @@ -3,7 +3,7 @@ * orclauses.c * Routines to extract restriction OR clauses from join OR clauses * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index 26567cb7f6..d50d86b252 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -3,7 +3,7 @@ * pathnode.c * Routines to manipulate pathlists and create path nodes * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -17,16 +17,21 @@ #include #include "miscadmin.h" +#include "foreign/fdwapi.h" +#include "nodes/extensible.h" #include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" #include "optimizer/cost.h" #include "optimizer/pathnode.h" #include "optimizer/paths.h" #include "optimizer/planmain.h" +#include "optimizer/prep.h" #include "optimizer/restrictinfo.h" +#include "optimizer/tlist.h" #include "optimizer/var.h" #include "parser/parsetree.h" #include "utils/lsyscache.h" +#include "utils/memutils.h" #include "utils/selfuncs.h" @@ -46,6 +51,11 @@ typedef enum #define STD_FUZZ_FACTOR 1.01 static List *translate_sub_tlist(List *tlist, int relid); +static int append_total_cost_compare(const void *a, const void *b); +static int append_startup_cost_compare(const void *a, const void *b); +static List *reparameterize_pathlist_by_child(PlannerInfo *root, + List *pathlist, + RelOptInfo *child_rel); /***************************************************************************** @@ -760,6 +770,12 @@ add_partial_path(RelOptInfo *parent_rel, Path *new_path) /* Check for query cancel. */ CHECK_FOR_INTERRUPTS(); + /* Path to be added must be parallel safe. */ + Assert(new_path->parallel_safe); + + /* Relation should be OK for parallelism, too. */ + Assert(parent_rel->consider_parallel); + /* * As in add_path, throw out any paths which are dominated by the new * path, but throw out the new path if some existing path dominates it. @@ -1200,44 +1216,68 @@ create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals, * Note that we must handle subpaths = NIL, representing a dummy access path. */ AppendPath * -create_append_path(RelOptInfo *rel, List *subpaths, Relids required_outer, - int parallel_workers, List *partitioned_rels) +create_append_path(PlannerInfo *root, + RelOptInfo *rel, + List *subpaths, List *partial_subpaths, + Relids required_outer, + int parallel_workers, bool parallel_aware, + List *partitioned_rels, double rows) { AppendPath *pathnode = makeNode(AppendPath); ListCell *l; + Assert(!parallel_aware || parallel_workers > 0); + pathnode->path.pathtype = T_Append; pathnode->path.parent = rel; pathnode->path.pathtarget = rel->reltarget; - pathnode->path.param_info = get_appendrel_parampathinfo(rel, - required_outer); - pathnode->path.parallel_aware = false; + + /* + * When generating an Append path for a partitioned table, there may be + * parameters that are useful so we can eliminate certain partitions + * during execution. Here we'll go all the way and fully populate the + * parameter info data as we do for normal base relations. However, we + * need only bother doing this for RELOPT_BASEREL rels, as + * RELOPT_OTHER_MEMBER_REL's Append paths are merged into the base rel's + * Append subpaths. It would do no harm to do this, we just avoid it to + * save wasting effort. + */ + if (partitioned_rels != NIL && root && rel->reloptkind == RELOPT_BASEREL) + pathnode->path.param_info = get_baserel_parampathinfo(root, + rel, + required_outer); + else + pathnode->path.param_info = get_appendrel_parampathinfo(rel, + required_outer); + + pathnode->path.parallel_aware = parallel_aware; pathnode->path.parallel_safe = rel->consider_parallel; pathnode->path.parallel_workers = parallel_workers; pathnode->path.pathkeys = NIL; /* result is always considered unsorted */ pathnode->partitioned_rels = list_copy(partitioned_rels); - pathnode->subpaths = subpaths; /* - * We don't bother with inventing a cost_append(), but just do it here. - * - * Compute rows and costs as sums of subplan rows and costs. We charge - * nothing extra for the Append itself, which perhaps is too optimistic, - * but since it doesn't do any selection or projection, it is a pretty - * cheap node. + * For parallel append, non-partial paths are sorted by descending total + * costs. That way, the total time to finish all non-partial paths is + * minimized. Also, the partial paths are sorted by descending startup + * costs. There may be some paths that require to do startup work by a + * single worker. In such case, it's better for workers to choose the + * expensive ones first, whereas the leader should choose the cheapest + * startup plan. */ - pathnode->path.rows = 0; - pathnode->path.startup_cost = 0; - pathnode->path.total_cost = 0; - foreach(l, subpaths) + if (pathnode->path.parallel_aware) { - Path *subpath = (Path *) lfirst(l); + subpaths = list_qsort(subpaths, append_total_cost_compare); + partial_subpaths = list_qsort(partial_subpaths, + append_startup_cost_compare); + } + pathnode->first_partial_path = list_length(subpaths); + pathnode->subpaths = list_concat(subpaths, partial_subpaths); - pathnode->path.rows += subpath->rows; + foreach(l, pathnode->subpaths) + { + Path *subpath = (Path *) lfirst(l); - if (l == list_head(subpaths)) /* first node? */ - pathnode->path.startup_cost = subpath->startup_cost; - pathnode->path.total_cost += subpath->total_cost; pathnode->path.parallel_safe = pathnode->path.parallel_safe && subpath->parallel_safe; @@ -1245,9 +1285,59 @@ create_append_path(RelOptInfo *rel, List *subpaths, Relids required_outer, Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer)); } + Assert(!parallel_aware || pathnode->path.parallel_safe); + + cost_append(pathnode); + + /* If the caller provided a row estimate, override the computed value. */ + if (rows >= 0) + pathnode->path.rows = rows; + return pathnode; } +/* + * append_total_cost_compare + * qsort comparator for sorting append child paths by total_cost descending + * + * For equal total costs, we fall back to comparing startup costs; if those + * are equal too, break ties using bms_compare on the paths' relids. + * (This is to avoid getting unpredictable results from qsort.) + */ +static int +append_total_cost_compare(const void *a, const void *b) +{ + Path *path1 = (Path *) lfirst(*(ListCell **) a); + Path *path2 = (Path *) lfirst(*(ListCell **) b); + int cmp; + + cmp = compare_path_costs(path1, path2, TOTAL_COST); + if (cmp != 0) + return -cmp; + return bms_compare(path1->parent->relids, path2->parent->relids); +} + +/* + * append_startup_cost_compare + * qsort comparator for sorting append child paths by startup_cost descending + * + * For equal startup costs, we fall back to comparing total costs; if those + * are equal too, break ties using bms_compare on the paths' relids. + * (This is to avoid getting unpredictable results from qsort.) + */ +static int +append_startup_cost_compare(const void *a, const void *b) +{ + Path *path1 = (Path *) lfirst(*(ListCell **) a); + Path *path2 = (Path *) lfirst(*(ListCell **) b); + int cmp; + + cmp = compare_path_costs(path1, path2, STARTUP_COST); + if (cmp != 0) + return -cmp; + return bms_compare(path1->parent->relids, path2->parent->relids); +} + /* * create_merge_append_path * Creates a path corresponding to a MergeAppend plan, returning the @@ -1366,6 +1456,11 @@ create_result_path(PlannerInfo *root, RelOptInfo *rel, pathnode->path.startup_cost = target->cost.startup; pathnode->path.total_cost = target->cost.startup + cpu_tuple_cost + target->cost.per_tuple; + + /* + * Add cost of qual, if any --- but we ignore its selectivity, since our + * rowcount estimate should be 1 no matter what the qual is. + */ if (resconstantqual) { QualCost qual_cost; @@ -1449,10 +1544,16 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, return NULL; /* - * We must ensure path struct and subsidiary data are allocated in main - * planning context; otherwise GEQO memory management causes trouble. + * When called during GEQO join planning, we are in a short-lived memory + * context. We must make sure that the path and any subsidiary data + * structures created for a baserel survive the GEQO cycle, else the + * baserel is trashed for future GEQO cycles. On the other hand, when we + * are creating those for a joinrel during GEQO, we don't want them to + * clutter the main planning context. Upshot is that the best solution is + * to explicitly allocate memory in the same context the given RelOptInfo + * is in. */ - oldcontext = MemoryContextSwitchTo(root->planner_cxt); + oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel)); pathnode = makeNode(UniquePath); @@ -1588,6 +1689,7 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, cost_agg(&agg_path, root, AGG_HASHED, NULL, numCols, pathnode->path.rows, + NIL, subpath->startup_cost, subpath->total_cost, rel->rows); @@ -2206,6 +2308,7 @@ create_mergejoin_path(PlannerInfo *root, * 'extra' contains various information about the join * 'outer_path' is the cheapest outer path * 'inner_path' is the cheapest inner path + * 'parallel_hash' to select Parallel Hash of inner path (shared hash table) * 'restrict_clauses' are the RestrictInfo nodes to apply at the join * 'required_outer' is the set of required outer rels * 'hashclauses' are the RestrictInfo nodes to use as hash clauses @@ -2219,6 +2322,7 @@ create_hashjoin_path(PlannerInfo *root, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, + bool parallel_hash, List *restrict_clauses, Relids required_outer, List *hashclauses) @@ -2236,7 +2340,8 @@ create_hashjoin_path(PlannerInfo *root, extra->sjinfo, required_outer, &restrict_clauses); - pathnode->jpath.path.parallel_aware = false; + pathnode->jpath.path.parallel_aware = + joinrel->consider_parallel && parallel_hash; pathnode->jpath.path.parallel_safe = joinrel->consider_parallel && outer_path->parallel_safe && inner_path->parallel_safe; /* This is a foolish way to estimate parallel_workers, but for now... */ @@ -2354,9 +2459,9 @@ create_projection_path(PlannerInfo *root, * knows that the given path isn't referenced elsewhere and so can be modified * in-place. * - * If the input path is a GatherPath, we try to push the new target down to - * its input as well; this is a yet more invasive modification of the input - * path, which create_projection_path() can't do. + * If the input path is a GatherPath or GatherMergePath, we try to push the + * new target down to its input as well; this is a yet more invasive + * modification of the input path, which create_projection_path() can't do. * * Note that we mustn't change the source path's parent link; so when it is * add_path'd to "rel" things will be a bit inconsistent. So far that has @@ -2393,31 +2498,44 @@ apply_projection_to_path(PlannerInfo *root, (target->cost.per_tuple - oldcost.per_tuple) * path->rows; /* - * If the path happens to be a Gather path, we'd like to arrange for the - * subpath to return the required target list so that workers can help - * project. But if there is something that is not parallel-safe in the - * target expressions, then we can't. + * If the path happens to be a Gather or GatherMerge path, we'd like to + * arrange for the subpath to return the required target list so that + * workers can help project. But if there is something that is not + * parallel-safe in the target expressions, then we can't. */ - if (IsA(path, GatherPath) && + if ((IsA(path, GatherPath) ||IsA(path, GatherMergePath)) && is_parallel_safe(root, (Node *) target->exprs)) { - GatherPath *gpath = (GatherPath *) path; - /* * We always use create_projection_path here, even if the subpath is * projection-capable, so as to avoid modifying the subpath in place. * It seems unlikely at present that there could be any other * references to the subpath, but better safe than sorry. * - * Note that we don't change the GatherPath's cost estimates; it might - * be appropriate to do so, to reflect the fact that the bulk of the - * target evaluation will happen in workers. + * Note that we don't change the parallel path's cost estimates; it + * might be appropriate to do so, to reflect the fact that the bulk of + * the target evaluation will happen in workers. */ - gpath->subpath = (Path *) - create_projection_path(root, - gpath->subpath->parent, - gpath->subpath, - target); + if (IsA(path, GatherPath)) + { + GatherPath *gpath = (GatherPath *) path; + + gpath->subpath = (Path *) + create_projection_path(root, + gpath->subpath->parent, + gpath->subpath, + target); + } + else + { + GatherMergePath *gmpath = (GatherMergePath *) path; + + gmpath->subpath = (Path *) + create_projection_path(root, + gmpath->subpath->parent, + gmpath->subpath, + target); + } } else if (path->parallel_safe && !is_parallel_safe(root, (Node *) target->exprs)) @@ -2557,12 +2675,12 @@ GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, - PathTarget *target, List *groupClause, List *qual, double numGroups) { GroupPath *pathnode = makeNode(GroupPath); + PathTarget *target = rel->reltarget; pathnode->path.pathtype = T_Group; pathnode->path.parent = rel; @@ -2584,6 +2702,7 @@ create_group_path(PlannerInfo *root, cost_group(&pathnode->path, root, list_length(groupClause), numGroups, + qual, subpath->startup_cost, subpath->total_cost, subpath->rows); @@ -2701,6 +2820,7 @@ create_agg_path(PlannerInfo *root, cost_agg(&pathnode->path, root, aggstrategy, aggcosts, list_length(groupClause), numGroups, + qual, subpath->startup_cost, subpath->total_cost, subpath->rows); @@ -2732,7 +2852,6 @@ GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, - PathTarget *target, List *having_qual, AggStrategy aggstrategy, List *rollups, @@ -2740,6 +2859,7 @@ create_groupingsets_path(PlannerInfo *root, double numGroups) { GroupingSetsPath *pathnode = makeNode(GroupingSetsPath); + PathTarget *target = rel->reltarget; ListCell *lc; bool is_first = true; bool is_first_sort = true; @@ -2809,6 +2929,7 @@ create_groupingsets_path(PlannerInfo *root, agg_costs, numGroupCols, rollup->numGroups, + having_qual, subpath->startup_cost, subpath->total_cost, subpath->rows); @@ -2832,6 +2953,7 @@ create_groupingsets_path(PlannerInfo *root, agg_costs, numGroupCols, rollup->numGroups, + having_qual, 0.0, 0.0, subpath->rows); if (!rollup->is_hashed) @@ -2855,6 +2977,7 @@ create_groupingsets_path(PlannerInfo *root, agg_costs, numGroupCols, rollup->numGroups, + having_qual, sort_path.startup_cost, sort_path.total_cost, sort_path.rows); @@ -2924,6 +3047,19 @@ create_minmaxagg_path(PlannerInfo *root, pathnode->path.total_cost = initplan_cost + target->cost.startup + target->cost.per_tuple + cpu_tuple_cost; + /* + * Add cost of qual, if any --- but we ignore its selectivity, since our + * rowcount estimate should be 1 no matter what the qual is. + */ + if (quals) + { + QualCost qual_cost; + + cost_qual_eval(&qual_cost, quals, root); + pathnode->path.startup_cost += qual_cost.startup; + pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple; + } + return pathnode; } @@ -2936,10 +3072,9 @@ create_minmaxagg_path(PlannerInfo *root, * 'target' is the PathTarget to be computed * 'windowFuncs' is a list of WindowFunc structs * 'winclause' is a WindowClause that is common to all the WindowFuncs - * 'winpathkeys' is the pathkeys for the PARTITION keys + ORDER keys * - * The actual sort order of the input must match winpathkeys, but might - * have additional keys after those. + * The input must be sorted according to the WindowClause's PARTITION keys + * plus ORDER BY keys. */ WindowAggPath * create_windowagg_path(PlannerInfo *root, @@ -2947,8 +3082,7 @@ create_windowagg_path(PlannerInfo *root, Path *subpath, PathTarget *target, List *windowFuncs, - WindowClause *winclause, - List *winpathkeys) + WindowClause *winclause) { WindowAggPath *pathnode = makeNode(WindowAggPath); @@ -2966,7 +3100,6 @@ create_windowagg_path(PlannerInfo *root, pathnode->subpath = subpath; pathnode->winclause = winclause; - pathnode->winpathkeys = winpathkeys; /* * For costing purposes, assume that there are no redundant partitioning @@ -3159,9 +3292,9 @@ create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, * 'operation' is the operation type * 'canSetTag' is true if we set the command tag/es_processed * 'nominalRelation' is the parent RT index for use of EXPLAIN - * 'partitioned_rels' is an integer list of RT indexes of non-leaf tables in - * the partition tree, if this is an UPDATE/DELETE to a partitioned table. - * Otherwise NIL. + * 'rootRelation' is the partitioned table root RT index, or 0 if none + * 'partColsUpdated' is true if any partitioning columns are being updated, + * either from the target relation or a descendent partitioned table. * 'resultRelations' is an integer list of actual RT indexes of target rel(s) * 'subpaths' is a list of Path(s) producing source data (one per rel) * 'subroots' is a list of PlannerInfo structs (one per rel) @@ -3174,7 +3307,8 @@ create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, CmdType operation, bool canSetTag, - Index nominalRelation, List *partitioned_rels, + Index nominalRelation, Index rootRelation, + bool partColsUpdated, List *resultRelations, List *subpaths, List *subroots, List *withCheckOptionLists, List *returningLists, @@ -3241,7 +3375,8 @@ create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, pathnode->operation = operation; pathnode->canSetTag = canSetTag; pathnode->nominalRelation = nominalRelation; - pathnode->partitioned_rels = list_copy(partitioned_rels); + pathnode->rootRelation = rootRelation; + pathnode->partColsUpdated = partColsUpdated; pathnode->resultRelations = resultRelations; pathnode->subpaths = subpaths; pathnode->subroots = subroots; @@ -3424,8 +3559,398 @@ reparameterize_path(PlannerInfo *root, Path *path, spath->path.pathkeys, required_outer); } + case T_Append: + { + AppendPath *apath = (AppendPath *) path; + List *childpaths = NIL; + List *partialpaths = NIL; + int i; + ListCell *lc; + + /* Reparameterize the children */ + i = 0; + foreach(lc, apath->subpaths) + { + Path *spath = (Path *) lfirst(lc); + + spath = reparameterize_path(root, spath, + required_outer, + loop_count); + if (spath == NULL) + return NULL; + /* We have to re-split the regular and partial paths */ + if (i < apath->first_partial_path) + childpaths = lappend(childpaths, spath); + else + partialpaths = lappend(partialpaths, spath); + i++; + } + return (Path *) + create_append_path(root, rel, childpaths, partialpaths, + required_outer, + apath->path.parallel_workers, + apath->path.parallel_aware, + apath->partitioned_rels, + -1); + } default: break; } return NULL; } + +/* + * reparameterize_path_by_child + * Given a path parameterized by the parent of the given child relation, + * translate the path to be parameterized by the given child relation. + * + * The function creates a new path of the same type as the given path, but + * parameterized by the given child relation. Most fields from the original + * path can simply be flat-copied, but any expressions must be adjusted to + * refer to the correct varnos, and any paths must be recursively + * reparameterized. Other fields that refer to specific relids also need + * adjustment. + * + * The cost, number of rows, width and parallel path properties depend upon + * path->parent, which does not change during the translation. Hence those + * members are copied as they are. + * + * If the given path can not be reparameterized, the function returns NULL. + */ +Path * +reparameterize_path_by_child(PlannerInfo *root, Path *path, + RelOptInfo *child_rel) +{ + +#define FLAT_COPY_PATH(newnode, node, nodetype) \ + ( (newnode) = makeNode(nodetype), \ + memcpy((newnode), (node), sizeof(nodetype)) ) + +#define ADJUST_CHILD_ATTRS(node) \ + ((node) = \ + (List *) adjust_appendrel_attrs_multilevel(root, (Node *) (node), \ + child_rel->relids, \ + child_rel->top_parent_relids)) + +#define REPARAMETERIZE_CHILD_PATH(path) \ +do { \ + (path) = reparameterize_path_by_child(root, (path), child_rel); \ + if ((path) == NULL) \ + return NULL; \ +} while(0); + +#define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \ +do { \ + if ((pathlist) != NIL) \ + { \ + (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \ + child_rel); \ + if ((pathlist) == NIL) \ + return NULL; \ + } \ +} while(0); + + Path *new_path; + ParamPathInfo *new_ppi; + ParamPathInfo *old_ppi; + Relids required_outer; + + /* + * If the path is not parameterized by parent of the given relation, it + * doesn't need reparameterization. + */ + if (!path->param_info || + !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids)) + return path; + + /* Reparameterize a copy of given path. */ + switch (nodeTag(path)) + { + case T_Path: + FLAT_COPY_PATH(new_path, path, Path); + break; + + case T_IndexPath: + { + IndexPath *ipath; + + FLAT_COPY_PATH(ipath, path, IndexPath); + ADJUST_CHILD_ATTRS(ipath->indexclauses); + ADJUST_CHILD_ATTRS(ipath->indexquals); + new_path = (Path *) ipath; + } + break; + + case T_BitmapHeapPath: + { + BitmapHeapPath *bhpath; + + FLAT_COPY_PATH(bhpath, path, BitmapHeapPath); + REPARAMETERIZE_CHILD_PATH(bhpath->bitmapqual); + new_path = (Path *) bhpath; + } + break; + + case T_BitmapAndPath: + { + BitmapAndPath *bapath; + + FLAT_COPY_PATH(bapath, path, BitmapAndPath); + REPARAMETERIZE_CHILD_PATH_LIST(bapath->bitmapquals); + new_path = (Path *) bapath; + } + break; + + case T_BitmapOrPath: + { + BitmapOrPath *bopath; + + FLAT_COPY_PATH(bopath, path, BitmapOrPath); + REPARAMETERIZE_CHILD_PATH_LIST(bopath->bitmapquals); + new_path = (Path *) bopath; + } + break; + + case T_TidPath: + { + TidPath *tpath; + + /* + * TidPath contains tidquals, which do not contain any + * external parameters per create_tidscan_path(). So don't + * bother to translate those. + */ + FLAT_COPY_PATH(tpath, path, TidPath); + new_path = (Path *) tpath; + } + break; + + case T_ForeignPath: + { + ForeignPath *fpath; + ReparameterizeForeignPathByChild_function rfpc_func; + + FLAT_COPY_PATH(fpath, path, ForeignPath); + if (fpath->fdw_outerpath) + REPARAMETERIZE_CHILD_PATH(fpath->fdw_outerpath); + + /* Hand over to FDW if needed. */ + rfpc_func = + path->parent->fdwroutine->ReparameterizeForeignPathByChild; + if (rfpc_func) + fpath->fdw_private = rfpc_func(root, fpath->fdw_private, + child_rel); + new_path = (Path *) fpath; + } + break; + + case T_CustomPath: + { + CustomPath *cpath; + + FLAT_COPY_PATH(cpath, path, CustomPath); + REPARAMETERIZE_CHILD_PATH_LIST(cpath->custom_paths); + if (cpath->methods && + cpath->methods->ReparameterizeCustomPathByChild) + cpath->custom_private = + cpath->methods->ReparameterizeCustomPathByChild(root, + cpath->custom_private, + child_rel); + new_path = (Path *) cpath; + } + break; + + case T_NestPath: + { + JoinPath *jpath; + + FLAT_COPY_PATH(jpath, path, NestPath); + + REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath); + REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath); + ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo); + new_path = (Path *) jpath; + } + break; + + case T_MergePath: + { + JoinPath *jpath; + MergePath *mpath; + + FLAT_COPY_PATH(mpath, path, MergePath); + + jpath = (JoinPath *) mpath; + REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath); + REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath); + ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo); + ADJUST_CHILD_ATTRS(mpath->path_mergeclauses); + new_path = (Path *) mpath; + } + break; + + case T_HashPath: + { + JoinPath *jpath; + HashPath *hpath; + + FLAT_COPY_PATH(hpath, path, HashPath); + + jpath = (JoinPath *) hpath; + REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath); + REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath); + ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo); + ADJUST_CHILD_ATTRS(hpath->path_hashclauses); + new_path = (Path *) hpath; + } + break; + + case T_AppendPath: + { + AppendPath *apath; + + FLAT_COPY_PATH(apath, path, AppendPath); + REPARAMETERIZE_CHILD_PATH_LIST(apath->subpaths); + new_path = (Path *) apath; + } + break; + + case T_MergeAppendPath: + { + MergeAppendPath *mapath; + + FLAT_COPY_PATH(mapath, path, MergeAppendPath); + REPARAMETERIZE_CHILD_PATH_LIST(mapath->subpaths); + new_path = (Path *) mapath; + } + break; + + case T_MaterialPath: + { + MaterialPath *mpath; + + FLAT_COPY_PATH(mpath, path, MaterialPath); + REPARAMETERIZE_CHILD_PATH(mpath->subpath); + new_path = (Path *) mpath; + } + break; + + case T_UniquePath: + { + UniquePath *upath; + + FLAT_COPY_PATH(upath, path, UniquePath); + REPARAMETERIZE_CHILD_PATH(upath->subpath); + ADJUST_CHILD_ATTRS(upath->uniq_exprs); + new_path = (Path *) upath; + } + break; + + case T_GatherPath: + { + GatherPath *gpath; + + FLAT_COPY_PATH(gpath, path, GatherPath); + REPARAMETERIZE_CHILD_PATH(gpath->subpath); + new_path = (Path *) gpath; + } + break; + + case T_GatherMergePath: + { + GatherMergePath *gmpath; + + FLAT_COPY_PATH(gmpath, path, GatherMergePath); + REPARAMETERIZE_CHILD_PATH(gmpath->subpath); + new_path = (Path *) gmpath; + } + break; + + default: + + /* We don't know how to reparameterize this path. */ + return NULL; + } + + /* + * Adjust the parameterization information, which refers to the topmost + * parent. The topmost parent can be multiple levels away from the given + * child, hence use multi-level expression adjustment routines. + */ + old_ppi = new_path->param_info; + required_outer = + adjust_child_relids_multilevel(root, old_ppi->ppi_req_outer, + child_rel->relids, + child_rel->top_parent_relids); + + /* If we already have a PPI for this parameterization, just return it */ + new_ppi = find_param_path_info(new_path->parent, required_outer); + + /* + * If not, build a new one and link it to the list of PPIs. For the same + * reason as explained in mark_dummy_rel(), allocate new PPI in the same + * context the given RelOptInfo is in. + */ + if (new_ppi == NULL) + { + MemoryContext oldcontext; + RelOptInfo *rel = path->parent; + + oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel)); + + new_ppi = makeNode(ParamPathInfo); + new_ppi->ppi_req_outer = bms_copy(required_outer); + new_ppi->ppi_rows = old_ppi->ppi_rows; + new_ppi->ppi_clauses = old_ppi->ppi_clauses; + ADJUST_CHILD_ATTRS(new_ppi->ppi_clauses); + rel->ppilist = lappend(rel->ppilist, new_ppi); + + MemoryContextSwitchTo(oldcontext); + } + bms_free(required_outer); + + new_path->param_info = new_ppi; + + /* + * Adjust the path target if the parent of the outer relation is + * referenced in the targetlist. This can happen when only the parent of + * outer relation is laterally referenced in this relation. + */ + if (bms_overlap(path->parent->lateral_relids, + child_rel->top_parent_relids)) + { + new_path->pathtarget = copy_pathtarget(new_path->pathtarget); + ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs); + } + + return new_path; +} + +/* + * reparameterize_pathlist_by_child + * Helper function to reparameterize a list of paths by given child rel. + */ +static List * +reparameterize_pathlist_by_child(PlannerInfo *root, + List *pathlist, + RelOptInfo *child_rel) +{ + ListCell *lc; + List *result = NIL; + + foreach(lc, pathlist) + { + Path *path = reparameterize_path_by_child(root, lfirst(lc), + child_rel); + + if (path == NULL) + { + list_free(result); + return NIL; + } + + result = lappend(result, path); + } + + return result; +} diff --git a/src/backend/optimizer/util/placeholder.c b/src/backend/optimizer/util/placeholder.c index 970542dde5..4dc632dcb5 100644 --- a/src/backend/optimizer/util/placeholder.c +++ b/src/backend/optimizer/util/placeholder.c @@ -4,7 +4,7 @@ * PlaceHolderVar and PlaceHolderInfo manipulation routines * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -62,7 +62,7 @@ make_placeholder_expr(PlannerInfo *root, Expr *expr, Relids phrels) * simplified query passed to query_planner(). * * Note: this should only be called after query_planner() has started. Also, - * create_new_ph must not be TRUE after deconstruct_jointree begins, because + * create_new_ph must not be true after deconstruct_jointree begins, because * make_outerjoininfo assumes that we already know about all placeholders. */ PlaceHolderInfo * diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index dc0b0b0706..0c88c90de4 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -4,7 +4,7 @@ * routines for accessing the system catalogs * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -38,6 +38,7 @@ #include "optimizer/plancat.h" #include "optimizer/predtest.h" #include "optimizer/prep.h" +#include "partitioning/partbounds.h" #include "parser/parse_relation.h" #include "parser/parsetree.h" #include "rewrite/rewriteManip.h" @@ -45,8 +46,9 @@ #include "storage/bufmgr.h" #include "utils/builtins.h" #include "utils/lsyscache.h" -#include "utils/syscache.h" +#include "utils/partcache.h" #include "utils/rel.h" +#include "utils/syscache.h" #include "utils/snapmgr.h" @@ -68,6 +70,11 @@ static List *get_relation_constraints(PlannerInfo *root, static List *build_index_tlist(PlannerInfo *root, IndexOptInfo *index, Relation heapRelation); static List *get_relation_statistics(RelOptInfo *rel, Relation relation); +static void set_relation_partition_info(PlannerInfo *root, RelOptInfo *rel, + Relation relation); +static PartitionScheme find_partition_scheme(PlannerInfo *root, Relation rel); +static void set_baserel_partition_key_exprs(Relation relation, + RelOptInfo *rel); /* * get_relation_info - @@ -131,9 +138,8 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, /* * Estimate relation size --- unless it's an inheritance parent, in which - * case the size will be computed later in set_append_rel_pathlist, and we - * must leave it zero for now to avoid bollixing the total_table_pages - * calculation. + * case the size we want is not the rel's own size but the size of its + * inheritance tree. That will be computed in set_append_rel_size(). */ if (!inhparent) estimate_rel_size(relation, rel->attr_widths - rel->min_attr, @@ -180,7 +186,8 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, Form_pg_index index; IndexAmRoutine *amroutine; IndexOptInfo *info; - int ncolumns; + int ncolumns, + nkeycolumns; int i; /* @@ -202,6 +209,16 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, continue; } + /* + * Ignore partitioned indexes, since they are not usable for + * queries. + */ + if (indexRelation->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) + { + index_close(indexRelation, NoLock); + continue; + } + /* * If the index is valid, but cannot yet be used, ignore it; but * mark the plan we are generating as transient. See @@ -223,19 +240,25 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, RelationGetForm(indexRelation)->reltablespace; info->rel = rel; info->ncolumns = ncolumns = index->indnatts; + info->nkeycolumns = nkeycolumns = index->indnkeyatts; + info->indexkeys = (int *) palloc(sizeof(int) * ncolumns); - info->indexcollations = (Oid *) palloc(sizeof(Oid) * ncolumns); - info->opfamily = (Oid *) palloc(sizeof(Oid) * ncolumns); - info->opcintype = (Oid *) palloc(sizeof(Oid) * ncolumns); + info->indexcollations = (Oid *) palloc(sizeof(Oid) * nkeycolumns); + info->opfamily = (Oid *) palloc(sizeof(Oid) * nkeycolumns); + info->opcintype = (Oid *) palloc(sizeof(Oid) * nkeycolumns); info->canreturn = (bool *) palloc(sizeof(bool) * ncolumns); for (i = 0; i < ncolumns; i++) { info->indexkeys[i] = index->indkey.values[i]; - info->indexcollations[i] = indexRelation->rd_indcollation[i]; + info->canreturn[i] = index_can_return(indexRelation, i + 1); + } + + for (i = 0; i < nkeycolumns; i++) + { info->opfamily[i] = indexRelation->rd_opfamily[i]; info->opcintype[i] = indexRelation->rd_opcintype[i]; - info->canreturn[i] = index_can_return(indexRelation, i + 1); + info->indexcollations[i] = indexRelation->rd_indcollation[i]; } info->relam = indexRelation->rd_rel->relam; @@ -264,10 +287,10 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, Assert(amroutine->amcanorder); info->sortopfamily = info->opfamily; - info->reverse_sort = (bool *) palloc(sizeof(bool) * ncolumns); - info->nulls_first = (bool *) palloc(sizeof(bool) * ncolumns); + info->reverse_sort = (bool *) palloc(sizeof(bool) * nkeycolumns); + info->nulls_first = (bool *) palloc(sizeof(bool) * nkeycolumns); - for (i = 0; i < ncolumns; i++) + for (i = 0; i < nkeycolumns; i++) { int16 opt = indexRelation->rd_indoption[i]; @@ -291,11 +314,11 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, * of current or foreseeable amcanorder index types, it's not * worth expending more effort on now. */ - info->sortopfamily = (Oid *) palloc(sizeof(Oid) * ncolumns); - info->reverse_sort = (bool *) palloc(sizeof(bool) * ncolumns); - info->nulls_first = (bool *) palloc(sizeof(bool) * ncolumns); + info->sortopfamily = (Oid *) palloc(sizeof(Oid) * nkeycolumns); + info->reverse_sort = (bool *) palloc(sizeof(bool) * nkeycolumns); + info->nulls_first = (bool *) palloc(sizeof(bool) * nkeycolumns); - for (i = 0; i < ncolumns; i++) + for (i = 0; i < nkeycolumns; i++) { int16 opt = indexRelation->rd_indoption[i]; Oid ltopr; @@ -420,6 +443,13 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, /* Collect info about relation's foreign keys, if relevant */ get_relation_foreign_keys(root, rel, relation, inhparent); + /* + * Collect info about relation's partitioning scheme, if any. Only + * inheritance parents may be partitioned. + */ + if (inhparent && relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + set_relation_partition_info(root, rel, relation); + heap_close(relation, NoLock); /* @@ -709,7 +739,7 @@ infer_arbiter_indexes(PlannerInfo *root) /* Build BMS representation of plain (non expression) index attrs */ indexedAttrs = NULL; - for (natt = 0; natt < idxForm->indnatts; natt++) + for (natt = 0; natt < idxForm->indnkeyatts; natt++) { int attno = idxRel->rd_index->indkey.values[natt]; @@ -1072,7 +1102,7 @@ get_rel_data_width(Relation rel, int32 *attr_widths) for (i = 1; i <= RelationGetNumberOfAttributes(rel); i++) { - Form_pg_attribute att = rel->rd_att->attrs[i - 1]; + Form_pg_attribute att = TupleDescAttr(rel->rd_att, i - 1); int32 item_width; if (att->attisdropped) @@ -1149,7 +1179,6 @@ get_relation_constraints(PlannerInfo *root, Index varno = rel->relid; Relation relation; TupleConstr *constr; - List *pcqual; /* * We assume the relation has already been safely locked. @@ -1187,7 +1216,7 @@ get_relation_constraints(PlannerInfo *root, */ cexpr = eval_const_expressions(root, cexpr); - cexpr = (Node *) canonicalize_qual((Expr *) cexpr); + cexpr = (Node *) canonicalize_qual((Expr *) cexpr, true); /* Fix Vars to have the desired varno */ if (varno != 1) @@ -1208,7 +1237,7 @@ get_relation_constraints(PlannerInfo *root, for (i = 1; i <= natts; i++) { - Form_pg_attribute att = relation->rd_att->attrs[i - 1]; + Form_pg_attribute att = TupleDescAttr(relation->rd_att, i - 1); if (att->attnotnull && !att->attisdropped) { @@ -1235,22 +1264,34 @@ get_relation_constraints(PlannerInfo *root, } } - /* Append partition predicates, if any */ - pcqual = RelationGetPartitionQual(relation); - if (pcqual) + /* + * Append partition predicates, if any. + * + * For selects, partition pruning uses the parent table's partition bound + * descriptor, instead of constraint exclusion which is driven by the + * individual partition's partition constraint. + */ + if (enable_partition_pruning && root->parse->commandType != CMD_SELECT) { - /* - * Run each expression through const-simplification and - * canonicalization similar to check constraints. - */ - pcqual = (List *) eval_const_expressions(root, (Node *) pcqual); - pcqual = (List *) canonicalize_qual((Expr *) pcqual); + List *pcqual = RelationGetPartitionQual(relation); - /* Fix Vars to have the desired varno */ - if (varno != 1) - ChangeVarNodes((Node *) pcqual, 1, varno, 0); + if (pcqual) + { + /* + * Run the partition quals through const-simplification similar to + * check constraints. We skip canonicalize_qual, though, because + * partition quals should be in canonical form already; also, + * since the qual is in implicit-AND format, we'd have to + * explicitly convert it to explicit-AND format and back again. + */ + pcqual = (List *) eval_const_expressions(root, (Node *) pcqual); + + /* Fix Vars to have the desired varno */ + if (varno != 1) + ChangeVarNodes((Node *) pcqual, 1, varno, 0); - result = list_concat(result, pcqual); + result = list_concat(result, pcqual); + } } heap_close(relation, NoLock); @@ -1373,14 +1414,43 @@ relation_excluded_by_constraints(PlannerInfo *root, return true; } - /* Skip further tests if constraint exclusion is disabled for the rel */ - if (constraint_exclusion == CONSTRAINT_EXCLUSION_OFF || - (constraint_exclusion == CONSTRAINT_EXCLUSION_PARTITION && - !(rel->reloptkind == RELOPT_OTHER_MEMBER_REL || - (root->hasInheritedTarget && - rel->reloptkind == RELOPT_BASEREL && - rel->relid == root->parse->resultRelation)))) - return false; + /* + * Skip further tests, depending on constraint_exclusion. + */ + switch (constraint_exclusion) + { + case CONSTRAINT_EXCLUSION_OFF: + + /* + * Don't prune if feature turned off -- except if the relation is + * a partition. While partprune.c-style partition pruning is not + * yet in use for all cases (update/delete is not handled), it + * would be a UI horror to use different user-visible controls + * depending on such a volatile implementation detail. Therefore, + * for partitioned tables we use enable_partition_pruning to + * control this behavior. + */ + if (root->inhTargetKind == INHKIND_PARTITIONED) + break; + return false; + + case CONSTRAINT_EXCLUSION_PARTITION: + + /* + * When constraint_exclusion is set to 'partition' we only handle + * OTHER_MEMBER_RELs, or BASERELs in cases where the result target + * is an inheritance parent or a partitioned table. + */ + if ((rel->reloptkind != RELOPT_OTHER_MEMBER_REL) && + !(rel->reloptkind == RELOPT_BASEREL && + root->inhTargetKind != INHKIND_NONE && + rel->relid == root->parse->resultRelation)) + return false; + break; + + case CONSTRAINT_EXCLUSION_ON: + break; /* always try to exclude */ + } /* * Check for self-contradictory restriction clauses. We dare not make @@ -1399,11 +1469,25 @@ relation_excluded_by_constraints(PlannerInfo *root, safe_restrictions = lappend(safe_restrictions, rinfo->clause); } - if (predicate_refuted_by(safe_restrictions, safe_restrictions, false)) + /* + * We can use weak refutation here, since we're comparing restriction + * clauses with restriction clauses. + */ + if (predicate_refuted_by(safe_restrictions, safe_restrictions, true)) return true; - /* Only plain relations have constraints */ - if (rte->rtekind != RTE_RELATION || rte->inh) + /* + * Only plain relations have constraints. In a partitioning hierarchy, + * but not with regular table inheritance, it's OK to assume that any + * constraints that hold for the parent also hold for every child; for + * instance, table inheritance allows the parent to have constraints + * marked NO INHERIT, but table partitioning does not. We choose to check + * whether the partitioning parents can be excluded here; doing so + * consumes some cycles, but potentially saves us the work of excluding + * each child individually. + */ + if (rte->rtekind != RTE_RELATION || + (rte->inh && rte->relkind != RELKIND_PARTITIONED_TABLE)) return false; /* @@ -1437,6 +1521,9 @@ relation_excluded_by_constraints(PlannerInfo *root, * an obvious optimization. Some of the clauses might be OR clauses that * have volatile and nonvolatile subclauses, and it's OK to make * deductions with the nonvolatile parts. + * + * We need strong refutation because we have to prove that the constraints + * would yield false, not just NULL. */ if (predicate_refuted_by(safe_constraints, rel->baserestrictinfo, false)) return true; @@ -1452,8 +1539,8 @@ relation_excluded_by_constraints(PlannerInfo *root, * in order. The executor can special-case such tlists to avoid a projection * step at runtime, so we use such tlists preferentially for scan nodes. * - * Exception: if there are any dropped columns, we punt and return NIL. - * Ideally we would like to handle the dropped-column case too. However this + * Exception: if there are any dropped or missing columns, we punt and return + * NIL. Ideally we would like to handle these cases too. However this * creates problems for ExecTypeFromTL, which may be asked to build a tupdesc * for a tlist that includes vars of no-longer-existent types. In theory we * could dig out the required info from the pg_attribute entries of the @@ -1489,11 +1576,12 @@ build_physical_tlist(PlannerInfo *root, RelOptInfo *rel) numattrs = RelationGetNumberOfAttributes(relation); for (attrno = 1; attrno <= numattrs; attrno++) { - Form_pg_attribute att_tup = relation->rd_att->attrs[attrno - 1]; + Form_pg_attribute att_tup = TupleDescAttr(relation->rd_att, + attrno - 1); - if (att_tup->attisdropped) + if (att_tup->attisdropped || att_tup->atthasmissing) { - /* found a dropped col, so punt */ + /* found a dropped or missing col, so punt */ tlist = NIL; break; } @@ -1603,13 +1691,13 @@ build_index_tlist(PlannerInfo *root, IndexOptInfo *index, if (indexkey != 0) { /* simple column */ - Form_pg_attribute att_tup; + const FormData_pg_attribute *att_tup; if (indexkey < 0) att_tup = SystemAttributeDefinition(indexkey, heapRelation->rd_rel->relhasoids); else - att_tup = heapRelation->rd_att->attrs[indexkey - 1]; + att_tup = TupleDescAttr(heapRelation->rd_att, indexkey - 1); indexvar = (Expr *) makeVar(varno, indexkey, @@ -1747,7 +1835,7 @@ has_unique_index(RelOptInfo *rel, AttrNumber attno) * just the specified attr is unique. */ if (index->unique && - index->ncolumns == 1 && + index->nkeycolumns == 1 && index->indexkeys[0] == attno && (index->indpred == NIL || index->predOK)) return true; @@ -1801,3 +1889,200 @@ has_row_triggers(PlannerInfo *root, Index rti, CmdType event) heap_close(relation, NoLock); return result; } + +/* + * set_relation_partition_info + * + * Set partitioning scheme and related information for a partitioned table. + */ +static void +set_relation_partition_info(PlannerInfo *root, RelOptInfo *rel, + Relation relation) +{ + PartitionDesc partdesc; + PartitionKey partkey; + + Assert(relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE); + + partdesc = RelationGetPartitionDesc(relation); + partkey = RelationGetPartitionKey(relation); + rel->part_scheme = find_partition_scheme(root, relation); + Assert(partdesc != NULL && rel->part_scheme != NULL); + rel->boundinfo = partition_bounds_copy(partdesc->boundinfo, partkey); + rel->nparts = partdesc->nparts; + set_baserel_partition_key_exprs(relation, rel); + rel->partition_qual = RelationGetPartitionQual(relation); +} + +/* + * find_partition_scheme + * + * Find or create a PartitionScheme for this Relation. + */ +static PartitionScheme +find_partition_scheme(PlannerInfo *root, Relation relation) +{ + PartitionKey partkey = RelationGetPartitionKey(relation); + ListCell *lc; + int partnatts, + i; + PartitionScheme part_scheme; + + /* A partitioned table should have a partition key. */ + Assert(partkey != NULL); + + partnatts = partkey->partnatts; + + /* Search for a matching partition scheme and return if found one. */ + foreach(lc, root->part_schemes) + { + part_scheme = lfirst(lc); + + /* Match partitioning strategy and number of keys. */ + if (partkey->strategy != part_scheme->strategy || + partnatts != part_scheme->partnatts) + continue; + + /* Match partition key type properties. */ + if (memcmp(partkey->partopfamily, part_scheme->partopfamily, + sizeof(Oid) * partnatts) != 0 || + memcmp(partkey->partopcintype, part_scheme->partopcintype, + sizeof(Oid) * partnatts) != 0 || + memcmp(partkey->partcollation, part_scheme->partcollation, + sizeof(Oid) * partnatts) != 0) + continue; + + /* + * Length and byval information should match when partopcintype + * matches. + */ + Assert(memcmp(partkey->parttyplen, part_scheme->parttyplen, + sizeof(int16) * partnatts) == 0); + Assert(memcmp(partkey->parttypbyval, part_scheme->parttypbyval, + sizeof(bool) * partnatts) == 0); + + /* + * If partopfamily and partopcintype matched, must have the same + * partition comparison functions. Note that we cannot reliably + * Assert the equality of function structs themselves for they might + * be different across PartitionKey's, so just Assert for the function + * OIDs. + */ +#ifdef USE_ASSERT_CHECKING + for (i = 0; i < partkey->partnatts; i++) + Assert(partkey->partsupfunc[i].fn_oid == + part_scheme->partsupfunc[i].fn_oid); +#endif + + /* Found matching partition scheme. */ + return part_scheme; + } + + /* + * Did not find matching partition scheme. Create one copying relevant + * information from the relcache. We need to copy the contents of the + * array since the relcache entry may not survive after we have closed the + * relation. + */ + part_scheme = (PartitionScheme) palloc0(sizeof(PartitionSchemeData)); + part_scheme->strategy = partkey->strategy; + part_scheme->partnatts = partkey->partnatts; + + part_scheme->partopfamily = (Oid *) palloc(sizeof(Oid) * partnatts); + memcpy(part_scheme->partopfamily, partkey->partopfamily, + sizeof(Oid) * partnatts); + + part_scheme->partopcintype = (Oid *) palloc(sizeof(Oid) * partnatts); + memcpy(part_scheme->partopcintype, partkey->partopcintype, + sizeof(Oid) * partnatts); + + part_scheme->partcollation = (Oid *) palloc(sizeof(Oid) * partnatts); + memcpy(part_scheme->partcollation, partkey->partcollation, + sizeof(Oid) * partnatts); + + part_scheme->parttyplen = (int16 *) palloc(sizeof(int16) * partnatts); + memcpy(part_scheme->parttyplen, partkey->parttyplen, + sizeof(int16) * partnatts); + + part_scheme->parttypbyval = (bool *) palloc(sizeof(bool) * partnatts); + memcpy(part_scheme->parttypbyval, partkey->parttypbyval, + sizeof(bool) * partnatts); + + part_scheme->partsupfunc = (FmgrInfo *) + palloc(sizeof(FmgrInfo) * partnatts); + for (i = 0; i < partnatts; i++) + fmgr_info_copy(&part_scheme->partsupfunc[i], &partkey->partsupfunc[i], + CurrentMemoryContext); + + /* Add the partitioning scheme to PlannerInfo. */ + root->part_schemes = lappend(root->part_schemes, part_scheme); + + return part_scheme; +} + +/* + * set_baserel_partition_key_exprs + * + * Builds partition key expressions for the given base relation and sets them + * in given RelOptInfo. Any single column partition keys are converted to Var + * nodes. All Var nodes are restamped with the relid of given relation. + */ +static void +set_baserel_partition_key_exprs(Relation relation, + RelOptInfo *rel) +{ + PartitionKey partkey = RelationGetPartitionKey(relation); + int partnatts; + int cnt; + List **partexprs; + ListCell *lc; + Index varno = rel->relid; + + Assert(IS_SIMPLE_REL(rel) && rel->relid > 0); + + /* A partitioned table should have a partition key. */ + Assert(partkey != NULL); + + partnatts = partkey->partnatts; + partexprs = (List **) palloc(sizeof(List *) * partnatts); + lc = list_head(partkey->partexprs); + + for (cnt = 0; cnt < partnatts; cnt++) + { + Expr *partexpr; + AttrNumber attno = partkey->partattrs[cnt]; + + if (attno != InvalidAttrNumber) + { + /* Single column partition key is stored as a Var node. */ + Assert(attno > 0); + + partexpr = (Expr *) makeVar(varno, attno, + partkey->parttypid[cnt], + partkey->parttypmod[cnt], + partkey->parttypcoll[cnt], 0); + } + else + { + if (lc == NULL) + elog(ERROR, "wrong number of partition key expressions"); + + /* Re-stamp the expression with given varno. */ + partexpr = (Expr *) copyObject(lfirst(lc)); + ChangeVarNodes((Node *) partexpr, 1, varno, 0); + lc = lnext(lc); + } + + partexprs[cnt] = list_make1(partexpr); + } + + rel->partexprs = partexprs; + + /* + * A base relation can not have nullable partition key expressions. We + * still allocate array of empty expressions lists to keep partition key + * expression handling code simple. See build_joinrel_partition_info() and + * match_expr_to_partition_keys(). + */ + rel->nullable_partexprs = (List **) palloc0(sizeof(List *) * partnatts); +} diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c index 536d24b698..446207de30 100644 --- a/src/backend/optimizer/util/predtest.c +++ b/src/backend/optimizer/util/predtest.c @@ -4,7 +4,7 @@ * Routines to attempt to prove logical implications between predicate * expressions. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -78,9 +78,9 @@ typedef struct PredIterInfoData static bool predicate_implied_by_recurse(Node *clause, Node *predicate, - bool clause_is_check); + bool weak); static bool predicate_refuted_by_recurse(Node *clause, Node *predicate, - bool clause_is_check); + bool weak); static PredClass predicate_classify(Node *clause, PredIterInfo info); static void list_startup_fn(Node *clause, PredIterInfo info); static Node *list_next_fn(PredIterInfo info); @@ -93,14 +93,14 @@ static void arrayexpr_startup_fn(Node *clause, PredIterInfo info); static Node *arrayexpr_next_fn(PredIterInfo info); static void arrayexpr_cleanup_fn(PredIterInfo info); static bool predicate_implied_by_simple_clause(Expr *predicate, Node *clause, - bool clause_is_check); + bool weak); static bool predicate_refuted_by_simple_clause(Expr *predicate, Node *clause, - bool clause_is_check); + bool weak); static Node *extract_not_arg(Node *clause); static Node *extract_strong_not_arg(Node *clause); -static bool list_member_strip(List *list, Expr *datum); +static bool clause_is_strict_for(Node *clause, Node *subexpr); static bool operator_predicate_proof(Expr *predicate, Node *clause, - bool refute_it); + bool refute_it, bool weak); static bool operator_same_subexprs_proof(Oid pred_op, Oid clause_op, bool refute_it); static bool operator_same_subexprs_lookup(Oid pred_op, Oid clause_op, @@ -112,10 +112,23 @@ static void InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashv /* * predicate_implied_by * Recursively checks whether the clauses in clause_list imply that the - * given predicate is true. If clause_is_check is true, assume that the - * clauses in clause_list are CHECK constraints (where null is - * effectively true) rather than WHERE clauses (where null is effectively - * false). + * given predicate is true. + * + * We support two definitions of implication: + * + * "Strong" implication: A implies B means that truth of A implies truth of B. + * We use this to prove that a row satisfying one WHERE clause or index + * predicate must satisfy another one. + * + * "Weak" implication: A implies B means that non-falsity of A implies + * non-falsity of B ("non-false" means "either true or NULL"). We use this to + * prove that a row satisfying one CHECK constraint must satisfy another one. + * + * Strong implication can also be used to prove that a WHERE clause implies a + * CHECK constraint, although it will fail to prove a few cases where we could + * safely conclude that the implication holds. There's no support for proving + * the converse case, since only a few kinds of CHECK constraint would allow + * deducing anything. * * The top-level List structure of each list corresponds to an AND list. * We assume that eval_const_expressions() has been applied and so there @@ -125,18 +138,19 @@ static void InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashv * valid, but no worse consequences will ensue. * * We assume the predicate has already been checked to contain only - * immutable functions and operators. (In most current uses this is true - * because the predicate is part of an index predicate that has passed - * CheckPredicate().) We dare not make deductions based on non-immutable - * functions, because they might change answers between the time we make - * the plan and the time we execute the plan. + * immutable functions and operators. (In many current uses this is known + * true because the predicate is part of an index predicate that has passed + * CheckPredicate(); otherwise, the caller must check it.) We dare not make + * deductions based on non-immutable functions, because they might change + * answers between the time we make the plan and the time we execute the plan. + * Immutability of functions in the clause_list is checked here, if necessary. */ bool predicate_implied_by(List *predicate_list, List *clause_list, - bool clause_is_check) + bool weak) { Node *p, - *r; + *c; if (predicate_list == NIL) return true; /* no predicate: implication is vacuous */ @@ -154,32 +168,39 @@ predicate_implied_by(List *predicate_list, List *clause_list, else p = (Node *) predicate_list; if (list_length(clause_list) == 1) - r = (Node *) linitial(clause_list); + c = (Node *) linitial(clause_list); else - r = (Node *) clause_list; + c = (Node *) clause_list; /* And away we go ... */ - return predicate_implied_by_recurse(r, p, clause_is_check); + return predicate_implied_by_recurse(c, p, weak); } /* * predicate_refuted_by * Recursively checks whether the clauses in clause_list refute the given - * predicate (that is, prove it false). If clause_is_check is true, assume - * that the clauses in clause_list are CHECK constraints (where null is - * effectively true) rather than WHERE clauses (where null is effectively - * false). + * predicate (that is, prove it false). * * This is NOT the same as !(predicate_implied_by), though it is similar * in the technique and structure of the code. * - * An important fine point is that truth of the clauses must imply that - * the predicate returns FALSE, not that it does not return TRUE. This - * is normally used to try to refute CHECK constraints, and the only - * thing we can assume about a CHECK constraint is that it didn't return - * FALSE --- a NULL result isn't a violation per the SQL spec. (Someday - * perhaps this code should be extended to support both "strong" and - * "weak" refutation, but for now we only need "strong".) + * We support two definitions of refutation: + * + * "Strong" refutation: A refutes B means truth of A implies falsity of B. + * We use this to disprove a CHECK constraint given a WHERE clause, i.e., + * prove that any row satisfying the WHERE clause would violate the CHECK + * constraint. (Observe we must prove B yields false, not just not-true.) + * + * "Weak" refutation: A refutes B means truth of A implies non-truth of B + * (i.e., B must yield false or NULL). We use this to detect mutually + * contradictory WHERE clauses. + * + * Weak refutation can be proven in some cases where strong refutation doesn't + * hold, so it's useful to use it when possible. We don't currently have + * support for disproving one CHECK constraint based on another one, nor for + * disproving WHERE based on CHECK. (As with implication, the last case + * doesn't seem very practical. CHECK-vs-CHECK might be useful, but isn't + * currently needed anywhere.) * * The top-level List structure of each list corresponds to an AND list. * We assume that eval_const_expressions() has been applied and so there @@ -192,13 +213,14 @@ predicate_implied_by(List *predicate_list, List *clause_list, * immutable functions and operators. We dare not make deductions based on * non-immutable functions, because they might change answers between the * time we make the plan and the time we execute the plan. + * Immutability of functions in the clause_list is checked here, if necessary. */ bool predicate_refuted_by(List *predicate_list, List *clause_list, - bool clause_is_check) + bool weak) { Node *p, - *r; + *c; if (predicate_list == NIL) return false; /* no predicate: no refutation is possible */ @@ -216,12 +238,12 @@ predicate_refuted_by(List *predicate_list, List *clause_list, else p = (Node *) predicate_list; if (list_length(clause_list) == 1) - r = (Node *) linitial(clause_list); + c = (Node *) linitial(clause_list); else - r = (Node *) clause_list; + c = (Node *) clause_list; /* And away we go ... */ - return predicate_refuted_by_recurse(r, p, clause_is_check); + return predicate_refuted_by_recurse(c, p, weak); } /*---------- @@ -243,7 +265,9 @@ predicate_refuted_by(List *predicate_list, List *clause_list, * * An "atom" is anything other than an AND or OR node. Notice that we don't * have any special logic to handle NOT nodes; these should have been pushed - * down or eliminated where feasible by prepqual.c. + * down or eliminated where feasible during eval_const_expressions(). + * + * All of these rules apply equally to strong or weak implication. * * We can't recursively expand either side first, but have to interleave * the expansions per the above rules, to be sure we handle all of these @@ -261,7 +285,7 @@ predicate_refuted_by(List *predicate_list, List *clause_list, */ static bool predicate_implied_by_recurse(Node *clause, Node *predicate, - bool clause_is_check) + bool weak) { PredIterInfoData clause_info; PredIterInfoData pred_info; @@ -289,7 +313,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate, iterate_begin(pitem, predicate, pred_info) { if (!predicate_implied_by_recurse(clause, pitem, - clause_is_check)) + weak)) { result = false; break; @@ -309,7 +333,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate, iterate_begin(pitem, predicate, pred_info) { if (predicate_implied_by_recurse(clause, pitem, - clause_is_check)) + weak)) { result = true; break; @@ -327,7 +351,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate, iterate_begin(citem, clause, clause_info) { if (predicate_implied_by_recurse(citem, predicate, - clause_is_check)) + weak)) { result = true; break; @@ -345,7 +369,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate, iterate_begin(citem, clause, clause_info) { if (predicate_implied_by_recurse(citem, predicate, - clause_is_check)) + weak)) { result = true; break; @@ -373,7 +397,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate, iterate_begin(pitem, predicate, pred_info) { if (predicate_implied_by_recurse(citem, pitem, - clause_is_check)) + weak)) { presult = true; break; @@ -401,7 +425,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate, iterate_begin(citem, clause, clause_info) { if (!predicate_implied_by_recurse(citem, predicate, - clause_is_check)) + weak)) { result = false; break; @@ -424,7 +448,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate, iterate_begin(pitem, predicate, pred_info) { if (!predicate_implied_by_recurse(clause, pitem, - clause_is_check)) + weak)) { result = false; break; @@ -442,7 +466,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate, iterate_begin(pitem, predicate, pred_info) { if (predicate_implied_by_recurse(clause, pitem, - clause_is_check)) + weak)) { result = true; break; @@ -459,7 +483,7 @@ predicate_implied_by_recurse(Node *clause, Node *predicate, return predicate_implied_by_simple_clause((Expr *) predicate, clause, - clause_is_check); + weak); } break; } @@ -486,22 +510,23 @@ predicate_implied_by_recurse(Node *clause, Node *predicate, * OR-expr A R=> AND-expr B iff: each of A's components R=> any of B's * OR-expr A R=> OR-expr B iff: A R=> each of B's components * + * All of the above rules apply equally to strong or weak refutation. + * * In addition, if the predicate is a NOT-clause then we can use * A R=> NOT B if: A => B * This works for several different SQL constructs that assert the non-truth - * of their argument, ie NOT, IS FALSE, IS NOT TRUE, IS UNKNOWN. - * Unfortunately we *cannot* use + * of their argument, ie NOT, IS FALSE, IS NOT TRUE, IS UNKNOWN, although some + * of them require that we prove strong implication. Likewise, we can use * NOT A R=> B if: B => A - * because this type of reasoning fails to prove that B doesn't yield NULL. - * We can however make the more limited deduction that - * NOT A R=> A + * but here we must be careful about strong vs. weak refutation and make + * the appropriate type of implication proof (weak or strong respectively). * * Other comments are as for predicate_implied_by_recurse(). *---------- */ static bool predicate_refuted_by_recurse(Node *clause, Node *predicate, - bool clause_is_check) + bool weak) { PredIterInfoData clause_info; PredIterInfoData pred_info; @@ -532,7 +557,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, iterate_begin(pitem, predicate, pred_info) { if (predicate_refuted_by_recurse(clause, pitem, - clause_is_check)) + weak)) { result = true; break; @@ -550,7 +575,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, iterate_begin(citem, clause, clause_info) { if (predicate_refuted_by_recurse(citem, predicate, - clause_is_check)) + weak)) { result = true; break; @@ -568,7 +593,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, iterate_begin(pitem, predicate, pred_info) { if (!predicate_refuted_by_recurse(clause, pitem, - clause_is_check)) + weak)) { result = false; break; @@ -580,12 +605,19 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, case CLASS_ATOM: /* - * If B is a NOT-clause, A R=> B if A => B's arg + * If B is a NOT-type clause, A R=> B if A => B's arg + * + * Since, for either type of refutation, we are starting + * with the premise that A is true, we can use a strong + * implication test in all cases. That proves B's arg is + * true, which is more than we need for weak refutation if + * B is a simple NOT, but it allows not worrying about + * exactly which kind of negation clause we have. */ not_arg = extract_not_arg(predicate); if (not_arg && predicate_implied_by_recurse(clause, not_arg, - clause_is_check)) + false)) return true; /* @@ -595,7 +627,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, iterate_begin(citem, clause, clause_info) { if (predicate_refuted_by_recurse(citem, predicate, - clause_is_check)) + weak)) { result = true; break; @@ -618,7 +650,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, iterate_begin(pitem, predicate, pred_info) { if (!predicate_refuted_by_recurse(clause, pitem, - clause_is_check)) + weak)) { result = false; break; @@ -641,7 +673,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, iterate_begin(pitem, predicate, pred_info) { if (predicate_refuted_by_recurse(citem, pitem, - clause_is_check)) + weak)) { presult = true; break; @@ -660,12 +692,14 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, case CLASS_ATOM: /* - * If B is a NOT-clause, A R=> B if A => B's arg + * If B is a NOT-type clause, A R=> B if A => B's arg + * + * Same logic as for the AND-clause case above. */ not_arg = extract_not_arg(predicate); if (not_arg && predicate_implied_by_recurse(clause, not_arg, - clause_is_check)) + false)) return true; /* @@ -675,7 +709,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, iterate_begin(citem, clause, clause_info) { if (!predicate_refuted_by_recurse(citem, predicate, - clause_is_check)) + weak)) { result = false; break; @@ -689,16 +723,18 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, case CLASS_ATOM: /* - * If A is a strong NOT-clause, A R=> B if B equals A's arg + * If A is a strong NOT-clause, A R=> B if B => A's arg * - * We cannot make the stronger conclusion that B is refuted if B - * implies A's arg; that would only prove that B is not-TRUE, not - * that it's not NULL either. Hence use equal() rather than - * predicate_implied_by_recurse(). We could do the latter if we - * ever had a need for the weak form of refutation. + * Since A is strong, we may assume A's arg is false (not just + * not-true). If B weakly implies A's arg, then B can be neither + * true nor null, so that strong refutation is proven. If B + * strongly implies A's arg, then B cannot be true, so that weak + * refutation is proven. */ not_arg = extract_strong_not_arg(clause); - if (not_arg && equal(predicate, not_arg)) + if (not_arg && + predicate_implied_by_recurse(predicate, not_arg, + !weak)) return true; switch (pclass) @@ -712,7 +748,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, iterate_begin(pitem, predicate, pred_info) { if (predicate_refuted_by_recurse(clause, pitem, - clause_is_check)) + weak)) { result = true; break; @@ -730,7 +766,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, iterate_begin(pitem, predicate, pred_info) { if (!predicate_refuted_by_recurse(clause, pitem, - clause_is_check)) + weak)) { result = false; break; @@ -742,12 +778,14 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, case CLASS_ATOM: /* - * If B is a NOT-clause, A R=> B if A => B's arg + * If B is a NOT-type clause, A R=> B if A => B's arg + * + * Same logic as for the AND-clause case above. */ not_arg = extract_not_arg(predicate); if (not_arg && predicate_implied_by_recurse(clause, not_arg, - clause_is_check)) + false)) return true; /* @@ -756,7 +794,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate, return predicate_refuted_by_simple_clause((Expr *) predicate, clause, - clause_is_check); + weak); } break; } @@ -1048,25 +1086,22 @@ arrayexpr_cleanup_fn(PredIterInfo info) * Does the predicate implication test for a "simple clause" predicate * and a "simple clause" restriction. * - * We return TRUE if able to prove the implication, FALSE if not. + * We return true if able to prove the implication, false if not. * * We have three strategies for determining whether one simple clause * implies another: * * A simple and general way is to see if they are equal(); this works for any - * kind of expression. (Actually, there is an implied assumption that the - * functions in the expression are immutable, ie dependent only on their input - * arguments --- but this was checked for the predicate by the caller.) + * kind of expression, and for either implication definition. (Actually, + * there is an implied assumption that the functions in the expression are + * immutable --- but this was checked for the predicate by the caller.) * - * When clause_is_check is false, we know we are within an AND/OR - * subtree of a WHERE clause. So, if the predicate is of the form "foo IS - * NOT NULL", we can conclude that the predicate is implied if the clause is - * a strict operator or function that has "foo" as an input. In this case - * the clause must yield NULL when "foo" is NULL, which we can take as - * equivalent to FALSE given the context. (Again, "foo" is already known - * immutable, so the clause will certainly always fail.) Also, if the clause - * is just "foo" (meaning it's a boolean variable), the predicate is implied - * since the clause can't be true if "foo" is NULL. + * If the predicate is of the form "foo IS NOT NULL", and we are considering + * strong implication, we can conclude that the predicate is implied if the + * clause is strict for "foo", i.e., it must yield NULL when "foo" is NULL. + * In that case truth of the clause requires that "foo" isn't NULL. + * (Again, this is a safe conclusion because "foo" must be immutable.) + * This doesn't work for weak implication, though. * * Finally, if both clauses are binary operator expressions, we may be able * to prove something using the system's knowledge about operators; those @@ -1075,7 +1110,7 @@ arrayexpr_cleanup_fn(PredIterInfo info) */ static bool predicate_implied_by_simple_clause(Expr *predicate, Node *clause, - bool clause_is_check) + bool weak) { /* Allow interrupting long proof attempts */ CHECK_FOR_INTERRUPTS(); @@ -1085,30 +1120,24 @@ predicate_implied_by_simple_clause(Expr *predicate, Node *clause, return true; /* Next try the IS NOT NULL case */ - if (predicate && IsA(predicate, NullTest) && - ((NullTest *) predicate)->nulltesttype == IS_NOT_NULL) + if (!weak && + predicate && IsA(predicate, NullTest)) { - Expr *nonnullarg = ((NullTest *) predicate)->arg; + NullTest *ntest = (NullTest *) predicate; /* row IS NOT NULL does not act in the simple way we have in mind */ - if (!((NullTest *) predicate)->argisrow && !clause_is_check) + if (ntest->nulltesttype == IS_NOT_NULL && + !ntest->argisrow) { - if (is_opclause(clause) && - list_member_strip(((OpExpr *) clause)->args, nonnullarg) && - op_strict(((OpExpr *) clause)->opno)) - return true; - if (is_funcclause(clause) && - list_member_strip(((FuncExpr *) clause)->args, nonnullarg) && - func_strict(((FuncExpr *) clause)->funcid)) - return true; - if (equal(clause, nonnullarg)) + /* strictness of clause for foo implies foo IS NOT NULL */ + if (clause_is_strict_for(clause, (Node *) ntest->arg)) return true; } return false; /* we can't succeed below... */ } /* Else try operator-related knowledge */ - return operator_predicate_proof(predicate, clause, false); + return operator_predicate_proof(predicate, clause, false, weak); } /*---------- @@ -1116,19 +1145,25 @@ predicate_implied_by_simple_clause(Expr *predicate, Node *clause, * Does the predicate refutation test for a "simple clause" predicate * and a "simple clause" restriction. * - * We return TRUE if able to prove the refutation, FALSE if not. + * We return true if able to prove the refutation, false if not. * - * Unlike the implication case, checking for equal() clauses isn't - * helpful. + * Unlike the implication case, checking for equal() clauses isn't helpful. + * But relation_excluded_by_constraints() checks for self-contradictions in a + * list of clauses, so that we may get here with predicate and clause being + * actually pointer-equal, and that is worth eliminating quickly. * * When the predicate is of the form "foo IS NULL", we can conclude that - * the predicate is refuted if the clause is a strict operator or function - * that has "foo" as an input (see notes for implication case), or if the - * clause is "foo IS NOT NULL". A clause "foo IS NULL" refutes a predicate - * "foo IS NOT NULL", but unfortunately does not refute strict predicates, - * because we are looking for strong refutation. (The motivation for covering - * these cases is to support using IS NULL/IS NOT NULL as partition-defining - * constraints.) + * the predicate is refuted if the clause is strict for "foo" (see notes for + * implication case), or is "foo IS NOT NULL". That works for either strong + * or weak refutation. + * + * A clause "foo IS NULL" refutes a predicate "foo IS NOT NULL" in all cases. + * If we are considering weak refutation, it also refutes a predicate that + * is strict for "foo", since then the predicate must yield NULL (and since + * "foo" appears in the predicate, it's known immutable). + * + * (The main motivation for covering these IS [NOT] NULL cases is to support + * using IS NULL/IS NOT NULL as partition-defining constraints.) * * Finally, if both clauses are binary operator expressions, we may be able * to prove something using the system's knowledge about operators; those @@ -1137,7 +1172,7 @@ predicate_implied_by_simple_clause(Expr *predicate, Node *clause, */ static bool predicate_refuted_by_simple_clause(Expr *predicate, Node *clause, - bool clause_is_check) + bool weak) { /* Allow interrupting long proof attempts */ CHECK_FOR_INTERRUPTS(); @@ -1153,21 +1188,12 @@ predicate_refuted_by_simple_clause(Expr *predicate, Node *clause, { Expr *isnullarg = ((NullTest *) predicate)->arg; - if (clause_is_check) - return false; - /* row IS NULL does not act in the simple way we have in mind */ if (((NullTest *) predicate)->argisrow) return false; - /* Any strict op/func on foo refutes foo IS NULL */ - if (is_opclause(clause) && - list_member_strip(((OpExpr *) clause)->args, isnullarg) && - op_strict(((OpExpr *) clause)->opno)) - return true; - if (is_funcclause(clause) && - list_member_strip(((FuncExpr *) clause)->args, isnullarg) && - func_strict(((FuncExpr *) clause)->funcid)) + /* strictness of clause for foo refutes foo IS NULL */ + if (clause_is_strict_for(clause, (Node *) isnullarg)) return true; /* foo IS NOT NULL refutes foo IS NULL */ @@ -1197,11 +1223,16 @@ predicate_refuted_by_simple_clause(Expr *predicate, Node *clause, equal(((NullTest *) predicate)->arg, isnullarg)) return true; + /* foo IS NULL weakly refutes any predicate that is strict for foo */ + if (weak && + clause_is_strict_for((Node *) predicate, (Node *) isnullarg)) + return true; + return false; /* we can't succeed below... */ } /* Else try operator-related knowledge */ - return operator_predicate_proof(predicate, clause, true); + return operator_predicate_proof(predicate, clause, true, weak); } @@ -1261,29 +1292,63 @@ extract_strong_not_arg(Node *clause) /* - * Check whether an Expr is equal() to any member of a list, ignoring - * any top-level RelabelType nodes. This is legitimate for the purposes - * we use it for (matching IS [NOT] NULL arguments to arguments of strict - * functions) because RelabelType doesn't change null-ness. It's helpful - * for cases such as a varchar argument of a strict function on text. + * Can we prove that "clause" returns NULL if "subexpr" does? + * + * The base case is that clause and subexpr are equal(). (We assume that + * the caller knows at least one of the input expressions is immutable, + * as this wouldn't hold for volatile expressions.) + * + * We can also report success if the subexpr appears as a subexpression + * of "clause" in a place where it'd force nullness of the overall result. */ static bool -list_member_strip(List *list, Expr *datum) +clause_is_strict_for(Node *clause, Node *subexpr) { - ListCell *cell; + ListCell *lc; - if (datum && IsA(datum, RelabelType)) - datum = ((RelabelType *) datum)->arg; + /* safety checks */ + if (clause == NULL || subexpr == NULL) + return false; - foreach(cell, list) - { - Expr *elem = (Expr *) lfirst(cell); + /* + * Look through any RelabelType nodes, so that we can match, say, + * varcharcol with lower(varcharcol::text). (In general we could recurse + * through any nullness-preserving, immutable operation.) We should not + * see stacked RelabelTypes here. + */ + if (IsA(clause, RelabelType)) + clause = (Node *) ((RelabelType *) clause)->arg; + if (IsA(subexpr, RelabelType)) + subexpr = (Node *) ((RelabelType *) subexpr)->arg; - if (elem && IsA(elem, RelabelType)) - elem = ((RelabelType *) elem)->arg; + /* Base case */ + if (equal(clause, subexpr)) + return true; - if (equal(elem, datum)) - return true; + /* + * If we have a strict operator or function, a NULL result is guaranteed + * if any input is forced NULL by subexpr. This is OK even if the op or + * func isn't immutable, since it won't even be called on NULL input. + */ + if (is_opclause(clause) && + op_strict(((OpExpr *) clause)->opno)) + { + foreach(lc, ((OpExpr *) clause)->args) + { + if (clause_is_strict_for((Node *) lfirst(lc), subexpr)) + return true; + } + return false; + } + if (is_funcclause(clause) && + func_strict(((FuncExpr *) clause)->funcid)) + { + foreach(lc, ((FuncExpr *) clause)->args) + { + if (clause_is_strict_for((Node *) lfirst(lc), subexpr)) + return true; + } + return false; } return false; @@ -1360,12 +1425,12 @@ static const bool BT_implies_table[6][6] = { * The predicate operator: * LT LE EQ GE GT NE */ - {TRUE, TRUE, none, none, none, TRUE}, /* LT */ - {none, TRUE, none, none, none, none}, /* LE */ - {none, TRUE, TRUE, TRUE, none, none}, /* EQ */ - {none, none, none, TRUE, none, none}, /* GE */ - {none, none, none, TRUE, TRUE, TRUE}, /* GT */ - {none, none, none, none, none, TRUE} /* NE */ + {true, true, none, none, none, true}, /* LT */ + {none, true, none, none, none, none}, /* LE */ + {none, true, true, true, none, none}, /* EQ */ + {none, none, none, true, none, none}, /* GE */ + {none, none, none, true, true, true}, /* GT */ + {none, none, none, none, none, true} /* NE */ }; static const bool BT_refutes_table[6][6] = { @@ -1373,12 +1438,12 @@ static const bool BT_refutes_table[6][6] = { * The predicate operator: * LT LE EQ GE GT NE */ - {none, none, TRUE, TRUE, TRUE, none}, /* LT */ - {none, none, none, none, TRUE, none}, /* LE */ - {TRUE, none, none, none, TRUE, TRUE}, /* EQ */ - {TRUE, none, none, none, none, none}, /* GE */ - {TRUE, TRUE, TRUE, none, none, none}, /* GT */ - {none, none, TRUE, none, none, none} /* NE */ + {none, none, true, true, true, none}, /* LT */ + {none, none, none, none, true, none}, /* LE */ + {true, none, none, none, true, true}, /* EQ */ + {true, none, none, none, none, none}, /* GE */ + {true, true, true, none, none, none}, /* GT */ + {none, none, true, none, none, none} /* NE */ }; static const StrategyNumber BT_implic_table[6][6] = { @@ -1417,9 +1482,25 @@ static const StrategyNumber BT_refute_table[6][6] = { * When refute_it == false, we want to prove the predicate true; * when refute_it == true, we want to prove the predicate false. * (There is enough common code to justify handling these two cases - * in one routine.) We return TRUE if able to make the proof, FALSE + * in one routine.) We return true if able to make the proof, false * if not able to prove it. * + * We mostly need not distinguish strong vs. weak implication/refutation here. + * This depends on the assumption that a pair of related operators (i.e., + * commutators, negators, or btree opfamily siblings) will not return one NULL + * and one non-NULL result for the same inputs. Then, for the proof types + * where we start with an assumption of truth of the clause, the predicate + * operator could not return NULL either, so it doesn't matter whether we are + * trying to make a strong or weak proof. For weak implication, it could be + * that the clause operator returned NULL, but then the predicate operator + * would as well, so that the weak implication still holds. This argument + * doesn't apply in the case where we are considering two different constant + * values, since then the operators aren't being given identical inputs. But + * we only support that for btree operators, for which we can assume that all + * non-null inputs result in non-null outputs, so that it doesn't matter which + * two non-null constants we consider. If either constant is NULL, we have + * to think harder, but sometimes the proof still works, as explained below. + * * We can make proofs involving several expression forms (here "foo" and "bar" * represent subexpressions that are identical according to equal()): * "foo op1 bar" refutes "foo op2 bar" if op1 is op2's negator @@ -1446,7 +1527,8 @@ static const StrategyNumber BT_refute_table[6][6] = { * and we dare not make deductions with those. */ static bool -operator_predicate_proof(Expr *predicate, Node *clause, bool refute_it) +operator_predicate_proof(Expr *predicate, Node *clause, + bool refute_it, bool weak) { OpExpr *pred_opexpr, *clause_opexpr; @@ -1593,12 +1675,46 @@ operator_predicate_proof(Expr *predicate, Node *clause, bool refute_it) * We have two identical subexpressions, and two other subexpressions that * are not identical but are both Consts; and we have commuted the * operators if necessary so that the Consts are on the right. We'll need - * to compare the Consts' values. If either is NULL, fail. + * to compare the Consts' values. If either is NULL, we can't do that, so + * usually the proof fails ... but in some cases we can claim success. */ - if (pred_const->constisnull) - return false; if (clause_const->constisnull) + { + /* If clause_op isn't strict, we can't prove anything */ + if (!op_strict(clause_op)) + return false; + + /* + * At this point we know that the clause returns NULL. For proof + * types that assume truth of the clause, this means the proof is + * vacuously true (a/k/a "false implies anything"). That's all proof + * types except weak implication. + */ + if (!(weak && !refute_it)) + return true; + + /* + * For weak implication, it's still possible for the proof to succeed, + * if the predicate can also be proven NULL. In that case we've got + * NULL => NULL which is valid for this proof type. + */ + if (pred_const->constisnull && op_strict(pred_op)) + return true; + /* Else the proof fails */ return false; + } + if (pred_const->constisnull) + { + /* + * If the pred_op is strict, we know the predicate yields NULL, which + * means the proof succeeds for either weak implication or weak + * refutation. + */ + if (weak && op_strict(pred_op)) + return true; + /* Else the proof fails */ + return false; + } /* * Lookup the constant-comparison operator using the system catalogs and @@ -1661,7 +1777,7 @@ operator_predicate_proof(Expr *predicate, Node *clause, bool refute_it) * Assuming that EXPR1 clause_op EXPR2 is true, try to prove or refute * EXPR1 pred_op EXPR2. * - * Return TRUE if able to make the proof, false if not able to prove it. + * Return true if able to make the proof, false if not able to prove it. */ static bool operator_same_subexprs_proof(Oid pred_op, Oid clause_op, bool refute_it) diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index 8ad0b4a669..39f5729b91 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -3,7 +3,7 @@ * relnode.c * Relation-node lookup/construction routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -23,8 +23,10 @@ #include "optimizer/paths.h" #include "optimizer/placeholder.h" #include "optimizer/plancat.h" +#include "optimizer/prep.h" #include "optimizer/restrictinfo.h" #include "optimizer/tlist.h" +#include "partitioning/partbounds.h" #include "utils/hsearch.h" @@ -52,6 +54,14 @@ static List *subbuild_joinrel_joinlist(RelOptInfo *joinrel, static void set_foreign_rel_properties(RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel); static void add_join_rel(PlannerInfo *root, RelOptInfo *joinrel); +static void build_joinrel_partition_info(RelOptInfo *joinrel, + RelOptInfo *outer_rel, RelOptInfo *inner_rel, + List *restrictlist, JoinType jointype); +static void build_child_join_reltarget(PlannerInfo *root, + RelOptInfo *parentrel, + RelOptInfo *childrel, + int nappinfos, + AppendRelInfo **appinfos); /* @@ -83,6 +93,43 @@ setup_simple_rel_arrays(PlannerInfo *root) } } +/* + * setup_append_rel_array + * Populate the append_rel_array to allow direct lookups of + * AppendRelInfos by child relid. + * + * The array remains unallocated if there are no AppendRelInfos. + */ +void +setup_append_rel_array(PlannerInfo *root) +{ + ListCell *lc; + int size = list_length(root->parse->rtable) + 1; + + if (root->append_rel_list == NIL) + { + root->append_rel_array = NULL; + return; + } + + root->append_rel_array = (AppendRelInfo **) + palloc0(size * sizeof(AppendRelInfo *)); + + foreach(lc, root->append_rel_list) + { + AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc); + int child_relid = appinfo->child_relid; + + /* Sanity check */ + Assert(child_relid < size); + + if (root->append_rel_array[child_relid]) + elog(ERROR, "child relation already exists"); + + root->append_rel_array[child_relid] = appinfo; + } +} + /* * build_simple_rel * Construct a new RelOptInfo for a base relation or 'other' relation. @@ -146,6 +193,15 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent) rel->baserestrict_min_security = UINT_MAX; rel->joininfo = NIL; rel->has_eclass_joins = false; + rel->consider_partitionwise_join = false; /* might get changed later */ + rel->part_scheme = NULL; + rel->nparts = 0; + rel->boundinfo = NULL; + rel->partition_qual = NIL; + rel->part_rels = NULL; + rel->partexprs = NULL; + rel->nullable_partexprs = NULL; + rel->partitioned_child_rels = NIL; /* * Pass top parent's relids down the inheritance hierarchy. If the parent @@ -178,8 +234,8 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent) case RTE_NAMEDTUPLESTORE: /* - * Subquery, function, tablefunc, or values list --- set up attr - * range and arrays + * Subquery, function, tablefunc, values list, CTE, or ENR --- set + * up attr range and arrays * * Note: 0 is included in range to support whole-row Vars */ @@ -218,18 +274,41 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent) if (rte->inh) { ListCell *l; + int nparts = rel->nparts; + int cnt_parts = 0; + + if (nparts > 0) + rel->part_rels = (RelOptInfo **) + palloc(sizeof(RelOptInfo *) * nparts); foreach(l, root->append_rel_list) { AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); + RelOptInfo *childrel; /* append_rel_list contains all append rels; ignore others */ if (appinfo->parent_relid != relid) continue; - (void) build_simple_rel(root, appinfo->child_relid, - rel); + childrel = build_simple_rel(root, appinfo->child_relid, + rel); + + /* Nothing more to do for an unpartitioned table. */ + if (!rel->part_scheme) + continue; + + /* + * The order of partition OIDs in append_rel_list is the same as + * the order in the PartitionDesc, so the order of part_rels will + * also match the PartitionDesc. See expand_partitioned_rtentry. + */ + Assert(cnt_parts < nparts); + rel->part_rels[cnt_parts] = childrel; + cnt_parts++; } + + /* We should have seen all the child partitions. */ + Assert(cnt_parts == nparts); } return rel; @@ -453,6 +532,9 @@ build_join_rel(PlannerInfo *root, RelOptInfo *joinrel; List *restrictlist; + /* This function should be used only for join between parents. */ + Assert(!IS_OTHER_REL(outer_rel) && !IS_OTHER_REL(inner_rel)); + /* * See if we already have a joinrel for this set of base rels. */ @@ -526,7 +608,16 @@ build_join_rel(PlannerInfo *root, joinrel->baserestrict_min_security = UINT_MAX; joinrel->joininfo = NIL; joinrel->has_eclass_joins = false; + joinrel->consider_partitionwise_join = false; /* might get changed later */ joinrel->top_parent_relids = NULL; + joinrel->part_scheme = NULL; + joinrel->nparts = 0; + joinrel->boundinfo = NULL; + joinrel->partition_qual = NIL; + joinrel->part_rels = NULL; + joinrel->partexprs = NULL; + joinrel->nullable_partexprs = NULL; + joinrel->partitioned_child_rels = NIL; /* Compute information relevant to the foreign relations. */ set_foreign_rel_properties(joinrel, outer_rel, inner_rel); @@ -572,6 +663,10 @@ build_join_rel(PlannerInfo *root, */ joinrel->has_eclass_joins = has_relevant_eclass_joinclause(root, joinrel); + /* Store the partition information. */ + build_joinrel_partition_info(joinrel, outer_rel, inner_rel, restrictlist, + sjinfo->jointype); + /* * Set estimates of the joinrel's size. */ @@ -617,6 +712,143 @@ build_join_rel(PlannerInfo *root, return joinrel; } +/* + * build_child_join_rel + * Builds RelOptInfo representing join between given two child relations. + * + * 'outer_rel' and 'inner_rel' are the RelOptInfos of child relations being + * joined + * 'parent_joinrel' is the RelOptInfo representing the join between parent + * relations. Some of the members of new RelOptInfo are produced by + * translating corresponding members of this RelOptInfo + * 'sjinfo': child-join context info + * 'restrictlist': list of RestrictInfo nodes that apply to this particular + * pair of joinable relations + * 'jointype' is the join type (inner, left, full, etc) + */ +RelOptInfo * +build_child_join_rel(PlannerInfo *root, RelOptInfo *outer_rel, + RelOptInfo *inner_rel, RelOptInfo *parent_joinrel, + List *restrictlist, SpecialJoinInfo *sjinfo, + JoinType jointype) +{ + RelOptInfo *joinrel = makeNode(RelOptInfo); + AppendRelInfo **appinfos; + int nappinfos; + + /* Only joins between "other" relations land here. */ + Assert(IS_OTHER_REL(outer_rel) && IS_OTHER_REL(inner_rel)); + + /* The parent joinrel should have consider_partitionwise_join set. */ + Assert(parent_joinrel->consider_partitionwise_join); + + joinrel->reloptkind = RELOPT_OTHER_JOINREL; + joinrel->relids = bms_union(outer_rel->relids, inner_rel->relids); + joinrel->rows = 0; + /* cheap startup cost is interesting iff not all tuples to be retrieved */ + joinrel->consider_startup = (root->tuple_fraction > 0); + joinrel->consider_param_startup = false; + joinrel->consider_parallel = false; + joinrel->reltarget = create_empty_pathtarget(); + joinrel->pathlist = NIL; + joinrel->ppilist = NIL; + joinrel->partial_pathlist = NIL; + joinrel->cheapest_startup_path = NULL; + joinrel->cheapest_total_path = NULL; + joinrel->cheapest_unique_path = NULL; + joinrel->cheapest_parameterized_paths = NIL; + joinrel->direct_lateral_relids = NULL; + joinrel->lateral_relids = NULL; + joinrel->relid = 0; /* indicates not a baserel */ + joinrel->rtekind = RTE_JOIN; + joinrel->min_attr = 0; + joinrel->max_attr = 0; + joinrel->attr_needed = NULL; + joinrel->attr_widths = NULL; + joinrel->lateral_vars = NIL; + joinrel->lateral_referencers = NULL; + joinrel->indexlist = NIL; + joinrel->pages = 0; + joinrel->tuples = 0; + joinrel->allvisfrac = 0; + joinrel->subroot = NULL; + joinrel->subplan_params = NIL; + joinrel->serverid = InvalidOid; + joinrel->userid = InvalidOid; + joinrel->useridiscurrent = false; + joinrel->fdwroutine = NULL; + joinrel->fdw_private = NULL; + joinrel->baserestrictinfo = NIL; + joinrel->baserestrictcost.startup = 0; + joinrel->baserestrictcost.per_tuple = 0; + joinrel->joininfo = NIL; + joinrel->has_eclass_joins = false; + joinrel->consider_partitionwise_join = false; /* might get changed later */ + joinrel->top_parent_relids = NULL; + joinrel->part_scheme = NULL; + joinrel->nparts = 0; + joinrel->boundinfo = NULL; + joinrel->partition_qual = NIL; + joinrel->part_rels = NULL; + joinrel->partexprs = NULL; + joinrel->nullable_partexprs = NULL; + joinrel->partitioned_child_rels = NIL; + + joinrel->top_parent_relids = bms_union(outer_rel->top_parent_relids, + inner_rel->top_parent_relids); + + /* Compute information relevant to foreign relations. */ + set_foreign_rel_properties(joinrel, outer_rel, inner_rel); + + appinfos = find_appinfos_by_relids(root, joinrel->relids, &nappinfos); + + /* Set up reltarget struct */ + build_child_join_reltarget(root, parent_joinrel, joinrel, + nappinfos, appinfos); + + /* Construct joininfo list. */ + joinrel->joininfo = (List *) adjust_appendrel_attrs(root, + (Node *) parent_joinrel->joininfo, + nappinfos, + appinfos); + pfree(appinfos); + + /* + * Lateral relids referred in child join will be same as that referred in + * the parent relation. Throw any partial result computed while building + * the targetlist. + */ + bms_free(joinrel->direct_lateral_relids); + bms_free(joinrel->lateral_relids); + joinrel->direct_lateral_relids = (Relids) bms_copy(parent_joinrel->direct_lateral_relids); + joinrel->lateral_relids = (Relids) bms_copy(parent_joinrel->lateral_relids); + + /* + * If the parent joinrel has pending equivalence classes, so does the + * child. + */ + joinrel->has_eclass_joins = parent_joinrel->has_eclass_joins; + + /* Is the join between partitions itself partitioned? */ + build_joinrel_partition_info(joinrel, outer_rel, inner_rel, restrictlist, + jointype); + + /* Child joinrel is parallel safe if parent is parallel safe. */ + joinrel->consider_parallel = parent_joinrel->consider_parallel; + + /* Set estimates of the child-joinrel's size. */ + set_joinrel_size_estimates(root, joinrel, outer_rel, inner_rel, + sjinfo, restrictlist); + + /* We build the join only once. */ + Assert(!find_join_rel(root, joinrel->relids)); + + /* Add the relation to the PlannerInfo. */ + add_join_rel(root, joinrel); + + return joinrel; +} + /* * min_join_parameterization * @@ -843,6 +1075,9 @@ subbuild_joinrel_joinlist(RelOptInfo *joinrel, { ListCell *l; + /* Expected to be called only for join between parent relations. */ + Assert(joinrel->reloptkind == RELOPT_JOINREL); + foreach(l, joininfo_list) { RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); @@ -958,36 +1193,6 @@ fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids) } -/* - * find_childrel_appendrelinfo - * Get the AppendRelInfo associated with an appendrel child rel. - * - * This search could be eliminated by storing a link in child RelOptInfos, - * but for now it doesn't seem performance-critical. (Also, it might be - * difficult to maintain such a link during mutation of the append_rel_list.) - */ -AppendRelInfo * -find_childrel_appendrelinfo(PlannerInfo *root, RelOptInfo *rel) -{ - Index relid = rel->relid; - ListCell *lc; - - /* Should only be called on child rels */ - Assert(rel->reloptkind == RELOPT_OTHER_MEMBER_REL); - - foreach(lc, root->append_rel_list) - { - AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); - - if (appinfo->child_relid == relid) - return appinfo; - } - /* should have found the entry ... */ - elog(ERROR, "child rel %d not found in append_rel_list", relid); - return NULL; /* not reached */ -} - - /* * find_childrel_parents * Compute the set of parent relids of an appendrel child rel. @@ -1002,10 +1207,11 @@ find_childrel_parents(PlannerInfo *root, RelOptInfo *rel) Relids result = NULL; Assert(rel->reloptkind == RELOPT_OTHER_MEMBER_REL); + Assert(rel->relid > 0 && rel->relid < root->simple_rel_array_size); do { - AppendRelInfo *appinfo = find_childrel_appendrelinfo(root, rel); + AppendRelInfo *appinfo = root->append_rel_array[rel->relid]; Index prelid = appinfo->parent_relid; result = bms_add_member(result, prelid); @@ -1366,3 +1572,202 @@ find_param_path_info(RelOptInfo *rel, Relids required_outer) return NULL; } + +/* + * build_joinrel_partition_info + * If the two relations have same partitioning scheme, their join may be + * partitioned and will follow the same partitioning scheme as the joining + * relations. Set the partition scheme and partition key expressions in + * the join relation. + */ +static void +build_joinrel_partition_info(RelOptInfo *joinrel, RelOptInfo *outer_rel, + RelOptInfo *inner_rel, List *restrictlist, + JoinType jointype) +{ + int partnatts; + int cnt; + PartitionScheme part_scheme; + + /* Nothing to do if partitionwise join technique is disabled. */ + if (!enable_partitionwise_join) + { + Assert(!IS_PARTITIONED_REL(joinrel)); + return; + } + + /* + * We can only consider this join as an input to further partitionwise + * joins if (a) the input relations are partitioned and have + * consider_partitionwise_join=true, (b) the partition schemes match, and + * (c) we can identify an equi-join between the partition keys. Note that + * if it were possible for have_partkey_equi_join to return different + * answers for the same joinrel depending on which join ordering we try + * first, this logic would break. That shouldn't happen, though, because + * of the way the query planner deduces implied equalities and reorders + * the joins. Please see optimizer/README for details. + */ + if (!IS_PARTITIONED_REL(outer_rel) || !IS_PARTITIONED_REL(inner_rel) || + !outer_rel->consider_partitionwise_join || + !inner_rel->consider_partitionwise_join || + outer_rel->part_scheme != inner_rel->part_scheme || + !have_partkey_equi_join(joinrel, outer_rel, inner_rel, + jointype, restrictlist)) + { + Assert(!IS_PARTITIONED_REL(joinrel)); + return; + } + + part_scheme = outer_rel->part_scheme; + + Assert(REL_HAS_ALL_PART_PROPS(outer_rel) && + REL_HAS_ALL_PART_PROPS(inner_rel)); + + /* + * For now, our partition matching algorithm can match partitions only + * when the partition bounds of the joining relations are exactly same. + * So, bail out otherwise. + */ + if (outer_rel->nparts != inner_rel->nparts || + !partition_bounds_equal(part_scheme->partnatts, + part_scheme->parttyplen, + part_scheme->parttypbyval, + outer_rel->boundinfo, inner_rel->boundinfo)) + { + Assert(!IS_PARTITIONED_REL(joinrel)); + return; + } + + /* + * This function will be called only once for each joinrel, hence it + * should not have partition scheme, partition bounds, partition key + * expressions and array for storing child relations set. + */ + Assert(!joinrel->part_scheme && !joinrel->partexprs && + !joinrel->nullable_partexprs && !joinrel->part_rels && + !joinrel->boundinfo); + + /* + * Join relation is partitioned using the same partitioning scheme as the + * joining relations and has same bounds. + */ + joinrel->part_scheme = part_scheme; + joinrel->boundinfo = outer_rel->boundinfo; + partnatts = joinrel->part_scheme->partnatts; + joinrel->partexprs = (List **) palloc0(sizeof(List *) * partnatts); + joinrel->nullable_partexprs = + (List **) palloc0(sizeof(List *) * partnatts); + joinrel->nparts = outer_rel->nparts; + joinrel->part_rels = + (RelOptInfo **) palloc0(sizeof(RelOptInfo *) * joinrel->nparts); + + /* + * Set the consider_partitionwise_join flag. + */ + Assert(outer_rel->consider_partitionwise_join); + Assert(inner_rel->consider_partitionwise_join); + joinrel->consider_partitionwise_join = true; + + /* + * Construct partition keys for the join. + * + * An INNER join between two partitioned relations can be regarded as + * partitioned by either key expression. For example, A INNER JOIN B ON + * A.a = B.b can be regarded as partitioned on A.a or on B.b; they are + * equivalent. + * + * For a SEMI or ANTI join, the result can only be regarded as being + * partitioned in the same manner as the outer side, since the inner + * columns are not retained. + * + * An OUTER join like (A LEFT JOIN B ON A.a = B.b) may produce rows with + * B.b NULL. These rows may not fit the partitioning conditions imposed on + * B.b. Hence, strictly speaking, the join is not partitioned by B.b and + * thus partition keys of an OUTER join should include partition key + * expressions from the OUTER side only. However, because all + * commonly-used comparison operators are strict, the presence of nulls on + * the outer side doesn't cause any problem; they can't match anything at + * future join levels anyway. Therefore, we track two sets of + * expressions: those that authentically partition the relation + * (partexprs) and those that partition the relation with the exception + * that extra nulls may be present (nullable_partexprs). When the + * comparison operator is strict, the latter is just as good as the + * former. + */ + for (cnt = 0; cnt < partnatts; cnt++) + { + List *outer_expr; + List *outer_null_expr; + List *inner_expr; + List *inner_null_expr; + List *partexpr = NIL; + List *nullable_partexpr = NIL; + + outer_expr = list_copy(outer_rel->partexprs[cnt]); + outer_null_expr = list_copy(outer_rel->nullable_partexprs[cnt]); + inner_expr = list_copy(inner_rel->partexprs[cnt]); + inner_null_expr = list_copy(inner_rel->nullable_partexprs[cnt]); + + switch (jointype) + { + case JOIN_INNER: + partexpr = list_concat(outer_expr, inner_expr); + nullable_partexpr = list_concat(outer_null_expr, + inner_null_expr); + break; + + case JOIN_SEMI: + case JOIN_ANTI: + partexpr = outer_expr; + nullable_partexpr = outer_null_expr; + break; + + case JOIN_LEFT: + partexpr = outer_expr; + nullable_partexpr = list_concat(inner_expr, + outer_null_expr); + nullable_partexpr = list_concat(nullable_partexpr, + inner_null_expr); + break; + + case JOIN_FULL: + nullable_partexpr = list_concat(outer_expr, + inner_expr); + nullable_partexpr = list_concat(nullable_partexpr, + outer_null_expr); + nullable_partexpr = list_concat(nullable_partexpr, + inner_null_expr); + break; + + default: + elog(ERROR, "unrecognized join type: %d", (int) jointype); + + } + + joinrel->partexprs[cnt] = partexpr; + joinrel->nullable_partexprs[cnt] = nullable_partexpr; + } +} + +/* + * build_child_join_reltarget + * Set up a child-join relation's reltarget from a parent-join relation. + */ +static void +build_child_join_reltarget(PlannerInfo *root, + RelOptInfo *parentrel, + RelOptInfo *childrel, + int nappinfos, + AppendRelInfo **appinfos) +{ + /* Build the targetlist */ + childrel->reltarget->exprs = (List *) + adjust_appendrel_attrs(root, + (Node *) parentrel->reltarget->exprs, + nappinfos, appinfos); + + /* Set the cost and width fields */ + childrel->reltarget->cost.startup = parentrel->reltarget->cost.startup; + childrel->reltarget->cost.per_tuple = parentrel->reltarget->cost.per_tuple; + childrel->reltarget->width = parentrel->reltarget->width; +} diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c index 39b52aecc5..edf5a4807f 100644 --- a/src/backend/optimizer/util/restrictinfo.c +++ b/src/backend/optimizer/util/restrictinfo.c @@ -3,7 +3,7 @@ * restrictinfo.c * RestrictInfo node manipulation routines. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -373,7 +373,7 @@ extract_actual_clauses(List *restrictinfo_list, * extract_actual_join_clauses * * Extract bare clauses from 'restrictinfo_list', separating those that - * syntactically match the join level from those that were pushed down. + * semantically match the join level from those that were pushed down. * Pseudoconstant clauses are excluded from the results. * * This is only used at outer joins, since for plain joins we don't care @@ -381,6 +381,7 @@ extract_actual_clauses(List *restrictinfo_list, */ void extract_actual_join_clauses(List *restrictinfo_list, + Relids joinrelids, List **joinquals, List **otherquals) { @@ -393,7 +394,7 @@ extract_actual_join_clauses(List *restrictinfo_list, { RestrictInfo *rinfo = lfirst_node(RestrictInfo, l); - if (rinfo->is_pushed_down) + if (RINFO_IS_PUSHED_DOWN(rinfo, joinrelids)) { if (!rinfo->pseudoconstant) *otherquals = lappend(*otherquals, rinfo->clause); diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c index 9345891380..5500f33e63 100644 --- a/src/backend/optimizer/util/tlist.c +++ b/src/backend/optimizer/util/tlist.c @@ -3,7 +3,7 @@ * tlist.c * Target list manipulation routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -25,20 +25,38 @@ ((IsA(node, FuncExpr) && ((FuncExpr *) (node))->funcretset) || \ (IsA(node, OpExpr) && ((OpExpr *) (node))->opretset)) -/* Workspace for split_pathtarget_walker */ +/* + * Data structures for split_pathtarget_at_srfs(). To preserve the identity + * of sortgroupref items even if they are textually equal(), what we track is + * not just bare expressions but expressions plus their sortgroupref indexes. + */ +typedef struct +{ + Node *expr; /* some subexpression of a PathTarget */ + Index sortgroupref; /* its sortgroupref, or 0 if none */ +} split_pathtarget_item; + typedef struct { + /* This is a List of bare expressions: */ List *input_target_exprs; /* exprs available from input */ - List *level_srfs; /* list of lists of SRF exprs */ - List *level_input_vars; /* vars needed by SRFs of each level */ - List *level_input_srfs; /* SRFs needed by SRFs of each level */ + /* These are Lists of Lists of split_pathtarget_items: */ + List *level_srfs; /* SRF exprs to evaluate at each level */ + List *level_input_vars; /* input vars needed at each level */ + List *level_input_srfs; /* input SRFs needed at each level */ + /* These are Lists of split_pathtarget_items: */ List *current_input_vars; /* vars needed in current subexpr */ List *current_input_srfs; /* SRFs needed in current subexpr */ + /* Auxiliary data for current split_pathtarget_walker traversal: */ int current_depth; /* max SRF depth in current subexpr */ + Index current_sgref; /* current subexpr's sortgroupref, or 0 */ } split_pathtarget_context; static bool split_pathtarget_walker(Node *node, split_pathtarget_context *context); +static void add_sp_item_to_pathtarget(PathTarget *target, + split_pathtarget_item *item); +static void add_sp_items_to_pathtarget(PathTarget *target, List *items); /***************************************************************************** @@ -822,6 +840,9 @@ apply_pathtarget_labeling_to_tlist(List *tlist, PathTarget *target) * already meant as a reference to a lower subexpression). So, don't expand * any tlist expressions that appear in input_target, if that's not NULL. * + * It's also important that we preserve any sortgroupref annotation appearing + * in the given target, especially on expressions matching input_target items. + * * The outputs of this function are two parallel lists, one a list of * PathTargets and the other an integer list of bool flags indicating * whether the corresponding PathTarget contains any evaluatable SRFs. @@ -845,6 +866,7 @@ split_pathtarget_at_srfs(PlannerInfo *root, int max_depth; bool need_extra_projection; List *prev_level_tlist; + int lci; ListCell *lc, *lc1, *lc2, @@ -884,10 +906,15 @@ split_pathtarget_at_srfs(PlannerInfo *root, need_extra_projection = false; /* Scan each expression in the PathTarget looking for SRFs */ + lci = 0; foreach(lc, target->exprs) { Node *node = (Node *) lfirst(lc); + /* Tell split_pathtarget_walker about this expr's sortgroupref */ + context.current_sgref = get_pathtarget_sortgroupref(target, lci); + lci++; + /* * Find all SRFs and Vars (and Var-like nodes) in this expression, and * enter them into appropriate lists within the context struct. @@ -981,16 +1008,14 @@ split_pathtarget_at_srfs(PlannerInfo *root, * This target should actually evaluate any SRFs of the current * level, and it needs to propagate forward any Vars needed by * later levels, as well as SRFs computed earlier and needed by - * later levels. We rely on add_new_columns_to_pathtarget() to - * remove duplicate items. Also, for safety, make a separate copy - * of each item for each PathTarget. + * later levels. */ - add_new_columns_to_pathtarget(ntarget, copyObject(level_srfs)); + add_sp_items_to_pathtarget(ntarget, level_srfs); for_each_cell(lc, lnext(lc2)) { List *input_vars = (List *) lfirst(lc); - add_new_columns_to_pathtarget(ntarget, copyObject(input_vars)); + add_sp_items_to_pathtarget(ntarget, input_vars); } for_each_cell(lc, lnext(lc3)) { @@ -999,10 +1024,10 @@ split_pathtarget_at_srfs(PlannerInfo *root, foreach(lcx, input_srfs) { - Expr *srf = (Expr *) lfirst(lcx); + split_pathtarget_item *item = lfirst(lcx); - if (list_member(prev_level_tlist, srf)) - add_new_column_to_pathtarget(ntarget, copyObject(srf)); + if (list_member(prev_level_tlist, item->expr)) + add_sp_item_to_pathtarget(ntarget, item); } } set_pathtarget_cost_width(root, ntarget); @@ -1037,12 +1062,17 @@ split_pathtarget_walker(Node *node, split_pathtarget_context *context) * input_target can be treated like a Var (which indeed it will be after * setrefs.c gets done with it), even if it's actually a SRF. Record it * as being needed for the current expression, and ignore any - * substructure. + * substructure. (Note in particular that this preserves the identity of + * any expressions that appear as sortgrouprefs in input_target.) */ if (list_member(context->input_target_exprs, node)) { + split_pathtarget_item *item = palloc(sizeof(split_pathtarget_item)); + + item->expr = node; + item->sortgroupref = context->current_sgref; context->current_input_vars = lappend(context->current_input_vars, - node); + item); return false; } @@ -1057,8 +1087,12 @@ split_pathtarget_walker(Node *node, split_pathtarget_context *context) IsA(node, GroupingFunc) || IsA(node, WindowFunc)) { + split_pathtarget_item *item = palloc(sizeof(split_pathtarget_item)); + + item->expr = node; + item->sortgroupref = context->current_sgref; context->current_input_vars = lappend(context->current_input_vars, - node); + item); return false; } @@ -1068,15 +1102,20 @@ split_pathtarget_walker(Node *node, split_pathtarget_context *context) */ if (IS_SRF_CALL(node)) { + split_pathtarget_item *item = palloc(sizeof(split_pathtarget_item)); List *save_input_vars = context->current_input_vars; List *save_input_srfs = context->current_input_srfs; int save_current_depth = context->current_depth; int srf_depth; ListCell *lc; + item->expr = node; + item->sortgroupref = context->current_sgref; + context->current_input_vars = NIL; context->current_input_srfs = NIL; context->current_depth = 0; + context->current_sgref = 0; /* subexpressions are not sortgroup items */ (void) expression_tree_walker(node, split_pathtarget_walker, (void *) context); @@ -1094,7 +1133,7 @@ split_pathtarget_walker(Node *node, split_pathtarget_context *context) /* Record this SRF as needing to be evaluated at appropriate level */ lc = list_nth_cell(context->level_srfs, srf_depth); - lfirst(lc) = lappend(lfirst(lc), node); + lfirst(lc) = lappend(lfirst(lc), item); /* Record its inputs as being needed at the same level */ lc = list_nth_cell(context->level_input_vars, srf_depth); @@ -1108,7 +1147,7 @@ split_pathtarget_walker(Node *node, split_pathtarget_context *context) * surrounding expression. */ context->current_input_vars = save_input_vars; - context->current_input_srfs = lappend(save_input_srfs, node); + context->current_input_srfs = lappend(save_input_srfs, item); context->current_depth = Max(save_current_depth, srf_depth); /* We're done here */ @@ -1119,6 +1158,79 @@ split_pathtarget_walker(Node *node, split_pathtarget_context *context) * Otherwise, the node is a scalar (non-set) expression, so recurse to * examine its inputs. */ + context->current_sgref = 0; /* subexpressions are not sortgroup items */ return expression_tree_walker(node, split_pathtarget_walker, (void *) context); } + +/* + * Add a split_pathtarget_item to the PathTarget, unless a matching item is + * already present. This is like add_new_column_to_pathtarget, but allows + * for sortgrouprefs to be handled. An item having zero sortgroupref can + * be merged with one that has a sortgroupref, acquiring the latter's + * sortgroupref. + * + * Note that we don't worry about possibly adding duplicate sortgrouprefs + * to the PathTarget. That would be bad, but it should be impossible unless + * the target passed to split_pathtarget_at_srfs already had duplicates. + * As long as it didn't, we can have at most one split_pathtarget_item with + * any particular nonzero sortgroupref. + */ +static void +add_sp_item_to_pathtarget(PathTarget *target, split_pathtarget_item *item) +{ + int lci; + ListCell *lc; + + /* + * Look for a pre-existing entry that is equal() and does not have a + * conflicting sortgroupref already. + */ + lci = 0; + foreach(lc, target->exprs) + { + Node *node = (Node *) lfirst(lc); + Index sgref = get_pathtarget_sortgroupref(target, lci); + + if ((item->sortgroupref == sgref || + item->sortgroupref == 0 || + sgref == 0) && + equal(item->expr, node)) + { + /* Found a match. Assign item's sortgroupref if it has one. */ + if (item->sortgroupref) + { + if (target->sortgrouprefs == NULL) + { + target->sortgrouprefs = (Index *) + palloc0(list_length(target->exprs) * sizeof(Index)); + } + target->sortgrouprefs[lci] = item->sortgroupref; + } + return; + } + lci++; + } + + /* + * No match, so add item to PathTarget. Copy the expr for safety. + */ + add_column_to_pathtarget(target, (Expr *) copyObject(item->expr), + item->sortgroupref); +} + +/* + * Apply add_sp_item_to_pathtarget to each element of list. + */ +static void +add_sp_items_to_pathtarget(PathTarget *target, List *items) +{ + ListCell *lc; + + foreach(lc, items) + { + split_pathtarget_item *item = lfirst(lc); + + add_sp_item_to_pathtarget(target, item); + } +} diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c index b8d7d3ffad..b16b1e4656 100644 --- a/src/backend/optimizer/util/var.c +++ b/src/backend/optimizer/util/var.c @@ -9,7 +9,7 @@ * contains variables. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -657,9 +657,9 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context) * entries might now be arbitrary expressions, not just Vars. This affects * this function in one important way: we might find ourselves inserting * SubLink expressions into subqueries, and we must make sure that their - * Query.hasSubLinks fields get set to TRUE if so. If there are any + * Query.hasSubLinks fields get set to true if so. If there are any * SubLinks in the join alias lists, the outer Query should already have - * hasSubLinks = TRUE, so this is only relevant to un-flattened subqueries. + * hasSubLinks = true, so this is only relevant to un-flattened subqueries. * * NOTE: this is used on not-yet-planned expressions. We do not expect it * to be applied directly to the whole Query, so if we see a Query to start diff --git a/src/backend/parser/Makefile b/src/backend/parser/Makefile index 4b97f83803..f14febdbda 100644 --- a/src/backend/parser/Makefile +++ b/src/backend/parser/Makefile @@ -23,12 +23,17 @@ include $(top_srcdir)/src/backend/common.mk # There is no correct way to write a rule that generates two files. # Rules with two targets don't have that meaning, they are merely -# shorthand for two otherwise separate rules. To be safe for parallel -# make, we must chain the dependencies like this. The semicolon is -# important, otherwise make will choose the built-in rule for -# gram.y=>gram.c. - -gram.h: gram.c ; +# shorthand for two otherwise separate rules. If we have an action +# that in fact generates two or more files, we must choose one of them +# as primary and show it as the action's output, then make all of the +# other output files dependent on the primary, like this. Furthermore, +# the "touch" action is essential, because it ensures that gram.h is +# marked as newer than (or at least no older than) gram.c. Without that, +# make is likely to try to rebuild gram.h in subsequent runs, which causes +# failures in VPATH builds from tarballs. + +gram.h: gram.c + touch $@ gram.c: BISONFLAGS += -d gram.c: BISON_CHECK_CMD = $(PERL) $(srcdir)/check_keywords.pl $< $(top_srcdir)/src/include/parser/kwlist.h diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c index 4fb793cfbf..226927b7ab 100644 --- a/src/backend/parser/analyze.c +++ b/src/backend/parser/analyze.c @@ -14,7 +14,7 @@ * contain optimizable statements, which we should transform. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/parser/analyze.c @@ -36,6 +36,8 @@ #include "parser/parse_coerce.h" #include "parser/parse_collate.h" #include "parser/parse_cte.h" +#include "parser/parse_expr.h" +#include "parser/parse_func.h" #include "parser/parse_oper.h" #include "parser/parse_param.h" #include "parser/parse_relation.h" @@ -74,6 +76,8 @@ static Query *transformExplainStmt(ParseState *pstate, ExplainStmt *stmt); static Query *transformCreateTableAsStmt(ParseState *pstate, CreateTableAsStmt *stmt); +static Query *transformCallStmt(ParseState *pstate, + CallStmt *stmt); static void transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc, bool pushedDown); #ifdef RAW_EXPRESSION_COVERAGE_TEST @@ -318,6 +322,11 @@ transformStmt(ParseState *pstate, Node *parseTree) (CreateTableAsStmt *) parseTree); break; + case T_CallStmt: + result = transformCallStmt(pstate, + (CallStmt *) parseTree); + break; + default: /* @@ -847,16 +856,8 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt) /* Process ON CONFLICT, if any. */ if (stmt->onConflictClause) - { - /* Bail out if target relation is partitioned table */ - if (pstate->p_target_rangetblentry->relkind == RELKIND_PARTITIONED_TABLE) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("ON CONFLICT clause is not supported with partitioned tables"))); - qry->onConflict = transformOnConflictClause(pstate, stmt->onConflictClause); - } /* * If we have a RETURNING clause, we need to add the target relation to @@ -1021,9 +1022,6 @@ transformOnConflictClause(ParseState *pstate, if (onConflictClause->action == ONCONFLICT_UPDATE) { Relation targetrel = pstate->p_target_relation; - Var *var; - TargetEntry *te; - int attno; /* * All INSERT expressions have been parsed, get ready for potentially @@ -1032,75 +1030,37 @@ transformOnConflictClause(ParseState *pstate, pstate->p_is_insert = false; /* - * Add range table entry for the EXCLUDED pseudo relation; relkind is + * Add range table entry for the EXCLUDED pseudo relation. relkind is * set to composite to signal that we're not dealing with an actual - * relation. + * relation, and no permission checks are required on it. (We'll + * check the actual target relation, instead.) */ exclRte = addRangeTableEntryForRelation(pstate, targetrel, + RowExclusiveLock, makeAlias("excluded", NIL), false, false); exclRte->relkind = RELKIND_COMPOSITE_TYPE; - exclRelIndex = list_length(pstate->p_rtable); - - /* - * Build a targetlist representing the columns of the EXCLUDED pseudo - * relation. Have to be careful to use resnos that correspond to - * attnos of the underlying relation. - */ - for (attno = 0; attno < targetrel->rd_rel->relnatts; attno++) - { - Form_pg_attribute attr = targetrel->rd_att->attrs[attno]; - char *name; - - if (attr->attisdropped) - { - /* - * can't use atttypid here, but it doesn't really matter what - * type the Const claims to be. - */ - var = (Var *) makeNullConst(INT4OID, -1, InvalidOid); - name = ""; - } - else - { - var = makeVar(exclRelIndex, attno + 1, - attr->atttypid, attr->atttypmod, - attr->attcollation, - 0); - name = pstrdup(NameStr(attr->attname)); - } + exclRte->requiredPerms = 0; + /* other permissions fields in exclRte are already empty */ - te = makeTargetEntry((Expr *) var, - attno + 1, - name, - false); - - /* don't require select access yet */ - exclRelTlist = lappend(exclRelTlist, te); - } + exclRelIndex = list_length(pstate->p_rtable); - /* - * Add a whole-row-Var entry to support references to "EXCLUDED.*". - * Like the other entries in exclRelTlist, its resno must match the - * Var's varattno, else the wrong things happen while resolving - * references in setrefs.c. This is against normal conventions for - * targetlists, but it's okay since we don't use this as a real tlist. - */ - var = makeVar(exclRelIndex, InvalidAttrNumber, - targetrel->rd_rel->reltype, - -1, InvalidOid, 0); - te = makeTargetEntry((Expr *) var, InvalidAttrNumber, NULL, true); - exclRelTlist = lappend(exclRelTlist, te); + /* Create EXCLUDED rel's targetlist for use by EXPLAIN */ + exclRelTlist = BuildOnConflictExcludedTargetlist(targetrel, + exclRelIndex); /* * Add EXCLUDED and the target RTE to the namespace, so that they can - * be used in the UPDATE statement. + * be used in the UPDATE subexpressions. */ addRTEtoQuery(pstate, exclRte, false, true, true); addRTEtoQuery(pstate, pstate->p_target_rangetblentry, false, true, true); + /* + * Now transform the UPDATE subexpressions. + */ onConflictSet = transformUpdateTargetList(pstate, onConflictClause->targetList); @@ -1125,6 +1085,74 @@ transformOnConflictClause(ParseState *pstate, } +/* + * BuildOnConflictExcludedTargetlist + * Create target list for the EXCLUDED pseudo-relation of ON CONFLICT, + * representing the columns of targetrel with varno exclRelIndex. + * + * Note: Exported for use in the rewriter. + */ +List * +BuildOnConflictExcludedTargetlist(Relation targetrel, + Index exclRelIndex) +{ + List *result = NIL; + int attno; + Var *var; + TargetEntry *te; + + /* + * Note that resnos of the tlist must correspond to attnos of the + * underlying relation, hence we need entries for dropped columns too. + */ + for (attno = 0; attno < RelationGetNumberOfAttributes(targetrel); attno++) + { + Form_pg_attribute attr = TupleDescAttr(targetrel->rd_att, attno); + char *name; + + if (attr->attisdropped) + { + /* + * can't use atttypid here, but it doesn't really matter what type + * the Const claims to be. + */ + var = (Var *) makeNullConst(INT4OID, -1, InvalidOid); + name = NULL; + } + else + { + var = makeVar(exclRelIndex, attno + 1, + attr->atttypid, attr->atttypmod, + attr->attcollation, + 0); + name = pstrdup(NameStr(attr->attname)); + } + + te = makeTargetEntry((Expr *) var, + attno + 1, + name, + false); + + result = lappend(result, te); + } + + /* + * Add a whole-row-Var entry to support references to "EXCLUDED.*". Like + * the other entries in the EXCLUDED tlist, its resno must match the Var's + * varattno, else the wrong things happen while resolving references in + * setrefs.c. This is against normal conventions for targetlists, but + * it's okay since we don't use this as a real tlist. + */ + var = makeVar(exclRelIndex, InvalidAttrNumber, + targetrel->rd_rel->reltype, + -1, InvalidOid, 0); + te = makeTargetEntry((Expr *) var, InvalidAttrNumber, NULL, true); + result = lappend(result, te); + + return result; +} + + /* * count_rowexpr_columns - * get number of columns contained in a ROW() expression; @@ -2273,8 +2301,8 @@ transformUpdateTargetList(ParseState *pstate, List *origTlist) EXPR_KIND_UPDATE_SOURCE); /* Prepare to assign non-conflicting resnos to resjunk attributes */ - if (pstate->p_next_resno <= pstate->p_target_relation->rd_rel->relnatts) - pstate->p_next_resno = pstate->p_target_relation->rd_rel->relnatts + 1; + if (pstate->p_next_resno <= RelationGetNumberOfAttributes(pstate->p_target_relation)) + pstate->p_next_resno = RelationGetNumberOfAttributes(pstate->p_target_relation) + 1; /* Prepare non-junk columns for assignment to target table */ target_rte = pstate->p_target_rangetblentry; @@ -2572,6 +2600,43 @@ transformCreateTableAsStmt(ParseState *pstate, CreateTableAsStmt *stmt) return result; } +/* + * transform a CallStmt + * + * We need to do parse analysis on the procedure call and its arguments. + */ +static Query * +transformCallStmt(ParseState *pstate, CallStmt *stmt) +{ + List *targs; + ListCell *lc; + Node *node; + Query *result; + + targs = NIL; + foreach(lc, stmt->funccall->args) + { + targs = lappend(targs, transformExpr(pstate, + (Node *) lfirst(lc), + EXPR_KIND_CALL_ARGUMENT)); + } + + node = ParseFuncOrColumn(pstate, + stmt->funccall->funcname, + targs, + pstate->p_last_srf, + stmt->funccall, + true, + stmt->funccall->location); + + stmt->funcexpr = castNode(FuncExpr, node); + + result = makeNode(Query); + result->commandType = CMD_UTILITY; + result->utilityStmt = (Node *) stmt; + + return result; +} /* * Produce a string representation of a LockClauseStrength value. diff --git a/src/backend/parser/check_keywords.pl b/src/backend/parser/check_keywords.pl index 6eb0aea96b..718441c215 100644 --- a/src/backend/parser/check_keywords.pl +++ b/src/backend/parser/check_keywords.pl @@ -4,7 +4,7 @@ # Usage: check_keywords.pl gram.y kwlist.h # src/backend/parser/check_keywords.pl -# Copyright (c) 2009-2017, PostgreSQL Global Development Group +# Copyright (c) 2009-2018, PostgreSQL Global Development Group use warnings; use strict; @@ -18,6 +18,7 @@ sub error { print STDERR @_; $errors = 1; + return; } $, = ' '; # set output field separator @@ -177,14 +178,14 @@ sub error if ($kwstring !~ /^[a-z_]+$/) { error -"'$kwstring' is not a valid keyword string, must be all lower-case ASCII chars"; + "'$kwstring' is not a valid keyword string, must be all lower-case ASCII chars"; } # Check that the keyword name is valid: all upper-case ASCII chars if ($kwname !~ /^[A-Z_]+$/) { error -"'$kwname' is not a valid keyword name, must be all upper-case ASCII chars"; + "'$kwname' is not a valid keyword name, must be all upper-case ASCII chars"; } # Check that the keyword string matches keyword name @@ -193,7 +194,7 @@ sub error if ($bare_kwname ne uc($kwstring)) { error -"keyword name '$kwname' doesn't match keyword string '$kwstring'"; + "keyword name '$kwname' doesn't match keyword string '$kwstring'"; } # Check that the keyword is present in the grammar diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y index 7d0de99baf..2effd51135 100644 --- a/src/backend/parser/gram.y +++ b/src/backend/parser/gram.y @@ -6,7 +6,7 @@ * gram.y * POSTGRESQL BISON rules/actions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -115,7 +115,7 @@ typedef struct PrivTarget { GrantTargetType targtype; - GrantObjectType objtype; + ObjectType objtype; List *objs; } PrivTarget; @@ -253,17 +253,17 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); AlterCompositeTypeStmt AlterUserMappingStmt AlterRoleStmt AlterRoleSetStmt AlterPolicyStmt AlterDefaultPrivilegesStmt DefACLAction - AnalyzeStmt ClosePortalStmt ClusterStmt CommentStmt + AnalyzeStmt CallStmt ClosePortalStmt ClusterStmt CommentStmt ConstraintsSetStmt CopyStmt CreateAsStmt CreateCastStmt CreateDomainStmt CreateExtensionStmt CreateGroupStmt CreateOpClassStmt CreateOpFamilyStmt AlterOpFamilyStmt CreatePLangStmt CreateSchemaStmt CreateSeqStmt CreateStmt CreateStatsStmt CreateTableSpaceStmt CreateFdwStmt CreateForeignServerStmt CreateForeignTableStmt - CreateAssertStmt CreateTransformStmt CreateTrigStmt CreateEventTrigStmt + CreateAssertionStmt CreateTransformStmt CreateTrigStmt CreateEventTrigStmt CreateUserStmt CreateUserMappingStmt CreateRoleStmt CreatePolicyStmt CreatedbStmt DeclareCursorStmt DefineStmt DeleteStmt DiscardStmt DoStmt DropOpClassStmt DropOpFamilyStmt DropPLangStmt DropStmt - DropAssertStmt DropCastStmt DropRoleStmt + DropCastStmt DropRoleStmt DropdbStmt DropTableSpaceStmt DropTransformStmt DropUserMappingStmt ExplainStmt FetchStmt @@ -290,7 +290,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %type add_drop opt_asc_desc opt_nulls_order %type alter_table_cmd alter_type_cmd opt_collate_clause - replica_identity partition_cmd + replica_identity partition_cmd index_partition_cmd %type alter_table_cmds alter_type_cmds %type alter_identity_column_option_list %type alter_identity_column_option @@ -306,6 +306,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %type opt_lock lock_type cast_context %type vacuum_option_list vacuum_option_elem + analyze_option_list analyze_option_elem %type opt_or_replace opt_grant_grant_option opt_grant_admin_option opt_nowait opt_if_exists opt_with_data @@ -365,6 +366,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %type DefACLOptionList %type import_qualification_type %type import_qualification +%type vacuum_relation %type stmtblock stmtmulti OptTableElementList TableElementList OptInherit definition @@ -378,6 +380,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); oper_argtypes RuleActionList RuleActionMulti opt_column_list columnList opt_name_list sort_clause opt_sort_clause sortby_list index_params + opt_include opt_c_include index_including_params name_list role_list from_clause from_list opt_array_bounds qualified_name_list any_name any_name_list type_name_list any_operator expr_list attrs @@ -396,6 +399,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); transform_element_list transform_type_list TriggerTransitions TriggerReferencing publication_name_list + vacuum_relation_list opt_vacuum_relation_list %type group_by_list %type group_by_item empty_grouping_set rollup_clause cube_clause @@ -435,7 +439,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %type opt_instead %type opt_unique opt_concurrently opt_verbose opt_full -%type opt_freeze opt_default opt_recheck +%type opt_freeze opt_analyze opt_default opt_recheck %type opt_binary opt_oids copy_delimiter %type copy_from opt_program @@ -447,7 +451,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %type fetch_args limit_clause select_limit_value offset_clause select_offset_value - select_offset_value2 opt_select_fetch_first_value + select_fetch_first_value I_or_F_const %type row_or_rows first_or_next %type OptSeqOptList SeqOptList OptParenthesizedSeqOptList @@ -568,6 +572,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %type window_clause window_definition_list opt_partition_clause %type window_definition over_clause window_specification opt_frame_clause frame_extent frame_bound +%type opt_window_exclusion_clause %type opt_existing_window_name %type opt_if_not_exists %type generated_when override_kind @@ -575,9 +580,10 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %type part_strategy %type part_elem %type part_params -%type ForValues +%type PartitionBoundSpec %type partbound_datum PartitionRangeDatum -%type partbound_datum_list range_datum_list +%type hash_partbound partbound_datum_list range_datum_list +%type hash_partbound_elem /* * Non-keyword token types. These are hard-wired into the "flex" lexer. @@ -608,7 +614,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); BACKWARD BEFORE BEGIN_P BETWEEN BIGINT BINARY BIT BOOLEAN_P BOTH BY - CACHE CALLED CASCADE CASCADED CASE CAST CATALOG_P CHAIN CHAR_P + CACHE CALL CALLED CASCADE CASCADED CASE CAST CATALOG_P CHAIN CHAR_P CHARACTER CHARACTERISTICS CHECK CHECKPOINT CLASS CLOSE CLUSTER COALESCE COLLATE COLLATION COLUMN COLUMNS COMMENT COMMENTS COMMIT COMMITTED CONCURRENTLY CONFIGURATION CONFLICT CONNECTION CONSTRAINT @@ -629,11 +635,11 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); FALSE_P FAMILY FETCH FILTER FIRST_P FLOAT_P FOLLOWING FOR FORCE FOREIGN FORWARD FREEZE FROM FULL FUNCTION FUNCTIONS - GENERATED GLOBAL GRANT GRANTED GREATEST GROUP_P GROUPING + GENERATED GLOBAL GRANT GRANTED GREATEST GROUP_P GROUPING GROUPS HANDLER HAVING HEADER_P HOLD HOUR_P - IDENTITY_P IF_P ILIKE IMMEDIATE IMMUTABLE IMPLICIT_P IMPORT_P IN_P + IDENTITY_P IF_P ILIKE IMMEDIATE IMMUTABLE IMPLICIT_P IMPORT_P IN_P INCLUDE INCLUDING INCREMENT INDEX INDEXES INHERIT INHERITS INITIALLY INLINE_P INNER_P INOUT INPUT_P INSENSITIVE INSERT INSTEAD INT_P INTEGER INTERSECT INTERVAL INTO INVOKER IS ISNULL ISOLATION @@ -653,18 +659,19 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); NULLS_P NUMERIC OBJECT_P OF OFF OFFSET OIDS OLD ON ONLY OPERATOR OPTION OPTIONS OR - ORDER ORDINALITY OUT_P OUTER_P OVER OVERLAPS OVERLAY OVERRIDING OWNED OWNER + ORDER ORDINALITY OTHERS OUT_P OUTER_P + OVER OVERLAPS OVERLAY OVERRIDING OWNED OWNER PARALLEL PARSER PARTIAL PARTITION PASSING PASSWORD PLACING PLANS POLICY POSITION PRECEDING PRECISION PRESERVE PREPARE PREPARED PRIMARY - PRIOR PRIVILEGES PROCEDURAL PROCEDURE PROGRAM PUBLICATION + PRIOR PRIVILEGES PROCEDURAL PROCEDURE PROCEDURES PROGRAM PUBLICATION QUOTE RANGE READ REAL REASSIGN RECHECK RECURSIVE REF REFERENCES REFERENCING REFRESH REINDEX RELATIVE_P RELEASE RENAME REPEATABLE REPLACE REPLICA RESET RESTART RESTRICT RETURNING RETURNS REVOKE RIGHT ROLE ROLLBACK ROLLUP - ROW ROWS RULE + ROUTINE ROUTINES ROW ROWS RULE SAVEPOINT SCHEMA SCHEMAS SCROLL SEARCH SECOND_P SECURITY SELECT SEQUENCE SEQUENCES SERIALIZABLE SERVER SESSION SESSION_USER SET SETS SETOF SHARE SHOW @@ -673,7 +680,8 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); SUBSCRIPTION SUBSTRING SYMMETRIC SYSID SYSTEM_P TABLE TABLES TABLESAMPLE TABLESPACE TEMP TEMPLATE TEMPORARY TEXT_P THEN - TIME TIMESTAMP TO TRAILING TRANSACTION TRANSFORM TREAT TRIGGER TRIM TRUE_P + TIES TIME TIMESTAMP TO TRAILING TRANSACTION TRANSFORM + TREAT TRIGGER TRIM TRUE_P TRUNCATE TRUSTED TYPE_P TYPES_P UNBOUNDED UNCOMMITTED UNENCRYPTED UNION UNIQUE UNKNOWN UNLISTEN UNLOGGED @@ -721,9 +729,10 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); * between POSTFIXOP and Op. We can safely assign the same priority to * various unreserved keywords as needed to resolve ambiguities (this can't * have any bad effects since obviously the keywords will still behave the - * same as if they weren't keywords). We need to do this for PARTITION, - * RANGE, ROWS to support opt_existing_window_name; and for RANGE, ROWS - * so that they can follow a_expr without creating postfix-operator problems; + * same as if they weren't keywords). We need to do this: + * for PARTITION, RANGE, ROWS, GROUPS to support opt_existing_window_name; + * for RANGE, ROWS, GROUPS so that they can follow a_expr without creating + * postfix-operator problems; * for GENERATED so that it can follow b_expr; * and for NULL so that it can follow b_expr in ColQualList without creating * postfix-operator problems. @@ -743,7 +752,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); * blame any funny behavior of UNBOUNDED on the SQL standard, though. */ %nonassoc UNBOUNDED /* ideally should have same precedence as IDENT */ -%nonassoc IDENT GENERATED NULL_P PARTITION RANGE ROWS PRECEDING FOLLOWING CUBE ROLLUP +%nonassoc IDENT GENERATED NULL_P PARTITION RANGE ROWS GROUPS PRECEDING FOLLOWING CUBE ROLLUP %left Op OPERATOR /* multi-character ops and user-defined operators */ %left '+' '-' %left '*' '/' '%' @@ -842,6 +851,7 @@ stmt : | AlterTSDictionaryStmt | AlterUserMappingStmt | AnalyzeStmt + | CallStmt | CheckPointStmt | ClosePortalStmt | ClusterStmt @@ -850,7 +860,7 @@ stmt : | CopyStmt | CreateAmStmt | CreateAsStmt - | CreateAssertStmt + | CreateAssertionStmt | CreateCastStmt | CreateConversionStmt | CreateDomainStmt @@ -886,7 +896,6 @@ stmt : | DeleteStmt | DiscardStmt | DoStmt - | DropAssertStmt | DropCastStmt | DropOpClassStmt | DropOpFamilyStmt @@ -937,6 +946,20 @@ stmt : { $$ = NULL; } ; +/***************************************************************************** + * + * CALL statement + * + *****************************************************************************/ + +CallStmt: CALL func_application + { + CallStmt *n = makeNode(CallStmt); + n->funccall = castNode(FuncCall, $2); + $$ = (Node *)n; + } + ; + /***************************************************************************** * * Create a new Postgres DBMS role @@ -1005,7 +1028,7 @@ AlterOptRoleElem: } | INHERIT { - $$ = makeDefElem("inherit", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("inherit", (Node *)makeInteger(true), @1); } | CONNECTION LIMIT SignedIconst { @@ -1028,36 +1051,36 @@ AlterOptRoleElem: * size of the main parser. */ if (strcmp($1, "superuser") == 0) - $$ = makeDefElem("superuser", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("superuser", (Node *)makeInteger(true), @1); else if (strcmp($1, "nosuperuser") == 0) - $$ = makeDefElem("superuser", (Node *)makeInteger(FALSE), @1); + $$ = makeDefElem("superuser", (Node *)makeInteger(false), @1); else if (strcmp($1, "createrole") == 0) - $$ = makeDefElem("createrole", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("createrole", (Node *)makeInteger(true), @1); else if (strcmp($1, "nocreaterole") == 0) - $$ = makeDefElem("createrole", (Node *)makeInteger(FALSE), @1); + $$ = makeDefElem("createrole", (Node *)makeInteger(false), @1); else if (strcmp($1, "replication") == 0) - $$ = makeDefElem("isreplication", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("isreplication", (Node *)makeInteger(true), @1); else if (strcmp($1, "noreplication") == 0) - $$ = makeDefElem("isreplication", (Node *)makeInteger(FALSE), @1); + $$ = makeDefElem("isreplication", (Node *)makeInteger(false), @1); else if (strcmp($1, "createdb") == 0) - $$ = makeDefElem("createdb", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("createdb", (Node *)makeInteger(true), @1); else if (strcmp($1, "nocreatedb") == 0) - $$ = makeDefElem("createdb", (Node *)makeInteger(FALSE), @1); + $$ = makeDefElem("createdb", (Node *)makeInteger(false), @1); else if (strcmp($1, "login") == 0) - $$ = makeDefElem("canlogin", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("canlogin", (Node *)makeInteger(true), @1); else if (strcmp($1, "nologin") == 0) - $$ = makeDefElem("canlogin", (Node *)makeInteger(FALSE), @1); + $$ = makeDefElem("canlogin", (Node *)makeInteger(false), @1); else if (strcmp($1, "bypassrls") == 0) - $$ = makeDefElem("bypassrls", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("bypassrls", (Node *)makeInteger(true), @1); else if (strcmp($1, "nobypassrls") == 0) - $$ = makeDefElem("bypassrls", (Node *)makeInteger(FALSE), @1); + $$ = makeDefElem("bypassrls", (Node *)makeInteger(false), @1); else if (strcmp($1, "noinherit") == 0) { /* * Note that INHERIT is a keyword, so it's handled by main parser, but * NOINHERIT is handled here. */ - $$ = makeDefElem("inherit", (Node *)makeInteger(FALSE), @1); + $$ = makeDefElem("inherit", (Node *)makeInteger(false), @1); } else ereport(ERROR, @@ -1190,21 +1213,21 @@ DropRoleStmt: DROP ROLE role_list { DropRoleStmt *n = makeNode(DropRoleStmt); - n->missing_ok = FALSE; + n->missing_ok = false; n->roles = $3; $$ = (Node *)n; } | DROP ROLE IF_P EXISTS role_list { DropRoleStmt *n = makeNode(DropRoleStmt); - n->missing_ok = TRUE; + n->missing_ok = true; n->roles = $5; $$ = (Node *)n; } | DROP USER role_list { DropRoleStmt *n = makeNode(DropRoleStmt); - n->missing_ok = FALSE; + n->missing_ok = false; n->roles = $3; $$ = (Node *)n; } @@ -1212,20 +1235,20 @@ DropRoleStmt: { DropRoleStmt *n = makeNode(DropRoleStmt); n->roles = $5; - n->missing_ok = TRUE; + n->missing_ok = true; $$ = (Node *)n; } | DROP GROUP_P role_list { DropRoleStmt *n = makeNode(DropRoleStmt); - n->missing_ok = FALSE; + n->missing_ok = false; n->roles = $3; $$ = (Node *)n; } | DROP GROUP_P IF_P EXISTS role_list { DropRoleStmt *n = makeNode(DropRoleStmt); - n->missing_ok = TRUE; + n->missing_ok = true; n->roles = $5; $$ = (Node *)n; } @@ -1730,8 +1753,8 @@ constraints_set_list: ; constraints_set_mode: - DEFERRED { $$ = TRUE; } - | IMMEDIATE { $$ = FALSE; } + DEFERRED { $$ = true; } + | IMMEDIATE { $$ = false; } ; @@ -1873,6 +1896,15 @@ AlterTableStmt: n->missing_ok = true; $$ = (Node *)n; } + | ALTER INDEX qualified_name index_partition_cmd + { + AlterTableStmt *n = makeNode(AlterTableStmt); + n->relation = $3; + n->cmds = list_make1($4); + n->relkind = OBJECT_INDEX; + n->missing_ok = false; + $$ = (Node *)n; + } | ALTER INDEX ALL IN_P TABLESPACE name SET TABLESPACE name opt_nowait { AlterTableMoveAllStmt *n = @@ -1980,7 +2012,7 @@ alter_table_cmds: partition_cmd: /* ALTER TABLE ATTACH PARTITION FOR VALUES */ - ATTACH PARTITION qualified_name ForValues + ATTACH PARTITION qualified_name PartitionBoundSpec { AlterTableCmd *n = makeNode(AlterTableCmd); PartitionCmd *cmd = makeNode(PartitionCmd); @@ -2007,6 +2039,22 @@ partition_cmd: } ; +index_partition_cmd: + /* ALTER INDEX ATTACH PARTITION */ + ATTACH PARTITION qualified_name + { + AlterTableCmd *n = makeNode(AlterTableCmd); + PartitionCmd *cmd = makeNode(PartitionCmd); + + n->subtype = AT_AttachPartition; + cmd->name = $3; + cmd->bound = NULL; + n->def = (Node *) cmd; + + $$ = (Node *) n; + } + ; + alter_table_cmd: /* ALTER TABLE ADD */ ADD_P columnDef @@ -2078,6 +2126,22 @@ alter_table_cmd: n->def = (Node *) makeInteger($6); $$ = (Node *)n; } + /* ALTER TABLE ALTER [COLUMN] SET STATISTICS */ + | ALTER opt_column Iconst SET STATISTICS SignedIconst + { + AlterTableCmd *n = makeNode(AlterTableCmd); + + if ($3 <= 0 || $3 > PG_INT16_MAX) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("column number must be in range from 1 to %d", PG_INT16_MAX), + parser_errposition(@3))); + + n->subtype = AT_SetStatistics; + n->num = (int16) $3; + n->def = (Node *) makeInteger($6); + $$ = (Node *)n; + } /* ALTER TABLE ALTER [COLUMN] SET ( column_parameter = value [, ... ] ) */ | ALTER opt_column ColId SET reloptions { @@ -2156,7 +2220,7 @@ alter_table_cmd: n->subtype = AT_DropColumn; n->name = $5; n->behavior = $6; - n->missing_ok = TRUE; + n->missing_ok = true; $$ = (Node *)n; } /* ALTER TABLE DROP [COLUMN] [RESTRICT|CASCADE] */ @@ -2166,7 +2230,7 @@ alter_table_cmd: n->subtype = AT_DropColumn; n->name = $3; n->behavior = $4; - n->missing_ok = FALSE; + n->missing_ok = false; $$ = (Node *)n; } /* @@ -2234,7 +2298,7 @@ alter_table_cmd: n->subtype = AT_DropConstraint; n->name = $5; n->behavior = $6; - n->missing_ok = TRUE; + n->missing_ok = true; $$ = (Node *)n; } /* ALTER TABLE DROP CONSTRAINT [RESTRICT|CASCADE] */ @@ -2244,7 +2308,7 @@ alter_table_cmd: n->subtype = AT_DropConstraint; n->name = $3; n->behavior = $4; - n->missing_ok = FALSE; + n->missing_ok = false; $$ = (Node *)n; } /* ALTER TABLE SET WITH OIDS */ @@ -2619,13 +2683,67 @@ alter_identity_column_option: } ; -ForValues: +PartitionBoundSpec: + /* a HASH partition*/ + FOR VALUES WITH '(' hash_partbound ')' + { + ListCell *lc; + PartitionBoundSpec *n = makeNode(PartitionBoundSpec); + + n->strategy = PARTITION_STRATEGY_HASH; + n->modulus = n->remainder = -1; + + foreach (lc, $5) + { + DefElem *opt = lfirst_node(DefElem, lc); + + if (strcmp(opt->defname, "modulus") == 0) + { + if (n->modulus != -1) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("modulus for hash partition provided more than once"), + parser_errposition(opt->location))); + n->modulus = defGetInt32(opt); + } + else if (strcmp(opt->defname, "remainder") == 0) + { + if (n->remainder != -1) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("remainder for hash partition provided more than once"), + parser_errposition(opt->location))); + n->remainder = defGetInt32(opt); + } + else + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("unrecognized hash partition bound specification \"%s\"", + opt->defname), + parser_errposition(opt->location))); + } + + if (n->modulus == -1) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("modulus for hash partition must be specified"))); + if (n->remainder == -1) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("remainder for hash partition must be specified"))); + + n->location = @3; + + $$ = n; + } + /* a LIST partition */ - FOR VALUES IN_P '(' partbound_datum_list ')' + | FOR VALUES IN_P '(' partbound_datum_list ')' { PartitionBoundSpec *n = makeNode(PartitionBoundSpec); n->strategy = PARTITION_STRATEGY_LIST; + n->is_default = false; n->listdatums = $5; n->location = @3; @@ -2638,17 +2756,49 @@ ForValues: PartitionBoundSpec *n = makeNode(PartitionBoundSpec); n->strategy = PARTITION_STRATEGY_RANGE; + n->is_default = false; n->lowerdatums = $5; n->upperdatums = $9; n->location = @3; $$ = n; } + + /* a DEFAULT partition */ + | DEFAULT + { + PartitionBoundSpec *n = makeNode(PartitionBoundSpec); + + n->is_default = true; + n->location = @1; + + $$ = n; + } + ; + +hash_partbound_elem: + NonReservedWord Iconst + { + $$ = makeDefElem($1, (Node *)makeInteger($2), @1); + } + ; + +hash_partbound: + hash_partbound_elem + { + $$ = list_make1($1); + } + | hash_partbound ',' hash_partbound_elem + { + $$ = lappend($1, $3); + } ; partbound_datum: Sconst { $$ = makeStringConst($1, @1); } | NumericOnly { $$ = makeAConst($1, @1); } + | TRUE_P { $$ = makeStringConst(pstrdup("true"), @1); } + | FALSE_P { $$ = makeStringConst(pstrdup("false"), @1); } | NULL_P { $$ = makeNullAConst(@1); } ; @@ -2739,7 +2889,7 @@ alter_type_cmd: n->subtype = AT_DropColumn; n->name = $5; n->behavior = $6; - n->missing_ok = TRUE; + n->missing_ok = true; $$ = (Node *)n; } /* ALTER TYPE DROP ATTRIBUTE [RESTRICT|CASCADE] */ @@ -2749,7 +2899,7 @@ alter_type_cmd: n->subtype = AT_DropColumn; n->name = $3; n->behavior = $4; - n->missing_ok = FALSE; + n->missing_ok = false; $$ = (Node *)n; } /* ALTER TYPE ALTER ATTRIBUTE [SET DATA] TYPE [RESTRICT|CASCADE] */ @@ -2869,13 +3019,13 @@ CopyStmt: COPY opt_binary qualified_name opt_column_list opt_oids ; copy_from: - FROM { $$ = TRUE; } - | TO { $$ = FALSE; } + FROM { $$ = true; } + | TO { $$ = false; } ; opt_program: - PROGRAM { $$ = TRUE; } - | /* EMPTY */ { $$ = FALSE; } + PROGRAM { $$ = true; } + | /* EMPTY */ { $$ = false; } ; /* @@ -2906,11 +3056,11 @@ copy_opt_item: } | OIDS { - $$ = makeDefElem("oids", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("oids", (Node *)makeInteger(true), @1); } | FREEZE { - $$ = makeDefElem("freeze", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("freeze", (Node *)makeInteger(true), @1); } | DELIMITER opt_as Sconst { @@ -2926,7 +3076,7 @@ copy_opt_item: } | HEADER_P { - $$ = makeDefElem("header", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("header", (Node *)makeInteger(true), @1); } | QUOTE opt_as Sconst { @@ -2971,7 +3121,7 @@ opt_binary: opt_oids: WITH OIDS { - $$ = makeDefElem("oids", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("oids", (Node *)makeInteger(true), @1); } | /*EMPTY*/ { $$ = NULL; } ; @@ -3114,7 +3264,7 @@ CreateStmt: CREATE OptTemp TABLE qualified_name '(' OptTableElementList ')' $$ = (Node *)n; } | CREATE OptTemp TABLE qualified_name PARTITION OF qualified_name - OptTypedTableElementList ForValues OptPartitionSpec OptWith + OptTypedTableElementList PartitionBoundSpec OptPartitionSpec OptWith OnCommitOption OptTableSpace { CreateStmt *n = makeNode(CreateStmt); @@ -3133,7 +3283,7 @@ CreateStmt: CREATE OptTemp TABLE qualified_name '(' OptTableElementList ')' $$ = (Node *)n; } | CREATE OptTemp TABLE IF_P NOT EXISTS qualified_name PARTITION OF - qualified_name OptTypedTableElementList ForValues OptPartitionSpec + qualified_name OptTypedTableElementList PartitionBoundSpec OptPartitionSpec OptWith OnCommitOption OptTableSpace { CreateStmt *n = makeNode(CreateStmt); @@ -3238,7 +3388,6 @@ columnDef: ColId Typename create_generic_options ColQualList n->is_local = true; n->is_not_null = false; n->is_from_type = false; - n->is_from_parent = false; n->storage = 0; n->raw_default = NULL; n->cooked_default = NULL; @@ -3260,7 +3409,6 @@ columnOptions: ColId ColQualList n->is_local = true; n->is_not_null = false; n->is_from_type = false; - n->is_from_parent = false; n->storage = 0; n->raw_default = NULL; n->cooked_default = NULL; @@ -3279,7 +3427,6 @@ columnOptions: ColId ColQualList n->is_local = true; n->is_not_null = false; n->is_from_type = false; - n->is_from_parent = false; n->storage = 0; n->raw_default = NULL; n->cooked_default = NULL; @@ -3489,12 +3636,13 @@ TableLikeOptionList: ; TableLikeOption: - DEFAULTS { $$ = CREATE_TABLE_LIKE_DEFAULTS; } + COMMENTS { $$ = CREATE_TABLE_LIKE_COMMENTS; } | CONSTRAINTS { $$ = CREATE_TABLE_LIKE_CONSTRAINTS; } + | DEFAULTS { $$ = CREATE_TABLE_LIKE_DEFAULTS; } | IDENTITY_P { $$ = CREATE_TABLE_LIKE_IDENTITY; } | INDEXES { $$ = CREATE_TABLE_LIKE_INDEXES; } + | STATISTICS { $$ = CREATE_TABLE_LIKE_STATISTICS; } | STORAGE { $$ = CREATE_TABLE_LIKE_STORAGE; } - | COMMENTS { $$ = CREATE_TABLE_LIKE_COMMENTS; } | ALL { $$ = CREATE_TABLE_LIKE_ALL; } ; @@ -3528,17 +3676,18 @@ ConstraintElem: n->initially_valid = !n->skip_validation; $$ = (Node *)n; } - | UNIQUE '(' columnList ')' opt_definition OptConsTableSpace + | UNIQUE '(' columnList ')' opt_c_include opt_definition OptConsTableSpace ConstraintAttributeSpec { Constraint *n = makeNode(Constraint); n->contype = CONSTR_UNIQUE; n->location = @1; n->keys = $3; - n->options = $5; + n->including = $5; + n->options = $6; n->indexname = NULL; - n->indexspace = $6; - processCASbits($7, @7, "UNIQUE", + n->indexspace = $7; + processCASbits($8, @8, "UNIQUE", &n->deferrable, &n->initdeferred, NULL, NULL, yyscanner); $$ = (Node *)n; @@ -3549,6 +3698,7 @@ ConstraintElem: n->contype = CONSTR_UNIQUE; n->location = @1; n->keys = NIL; + n->including = NIL; n->options = NIL; n->indexname = $2; n->indexspace = NULL; @@ -3557,17 +3707,18 @@ ConstraintElem: NULL, yyscanner); $$ = (Node *)n; } - | PRIMARY KEY '(' columnList ')' opt_definition OptConsTableSpace + | PRIMARY KEY '(' columnList ')' opt_c_include opt_definition OptConsTableSpace ConstraintAttributeSpec { Constraint *n = makeNode(Constraint); n->contype = CONSTR_PRIMARY; n->location = @1; n->keys = $4; - n->options = $6; + n->including = $6; + n->options = $7; n->indexname = NULL; - n->indexspace = $7; - processCASbits($8, @8, "PRIMARY KEY", + n->indexspace = $8; + processCASbits($9, @9, "PRIMARY KEY", &n->deferrable, &n->initdeferred, NULL, NULL, yyscanner); $$ = (Node *)n; @@ -3578,6 +3729,7 @@ ConstraintElem: n->contype = CONSTR_PRIMARY; n->location = @1; n->keys = NIL; + n->including = NIL; n->options = NIL; n->indexname = $3; n->indexspace = NULL; @@ -3587,7 +3739,7 @@ ConstraintElem: $$ = (Node *)n; } | EXCLUDE access_method_clause '(' ExclusionConstraintList ')' - opt_definition OptConsTableSpace ExclusionWhereClause + opt_c_include opt_definition OptConsTableSpace ExclusionWhereClause ConstraintAttributeSpec { Constraint *n = makeNode(Constraint); @@ -3595,11 +3747,12 @@ ConstraintElem: n->location = @1; n->access_method = $2; n->exclusions = $4; - n->options = $6; + n->including = $6; + n->options = $7; n->indexname = NULL; - n->indexspace = $7; - n->where_clause = $8; - processCASbits($9, @9, "EXCLUDE", + n->indexspace = $8; + n->where_clause = $9; + processCASbits($10, @10, "EXCLUDE", &n->deferrable, &n->initdeferred, NULL, NULL, yyscanner); $$ = (Node *)n; @@ -3625,8 +3778,8 @@ ConstraintElem: } ; -opt_no_inherit: NO INHERIT { $$ = TRUE; } - | /* EMPTY */ { $$ = FALSE; } +opt_no_inherit: NO INHERIT { $$ = true; } + | /* EMPTY */ { $$ = false; } ; opt_column_list: @@ -3645,6 +3798,10 @@ columnElem: ColId } ; +opt_c_include: INCLUDE '(' columnList ')' { $$ = $3; } + | /* EMPTY */ { $$ = NIL; } + ; + key_match: MATCH FULL { $$ = FKCONSTR_MATCH_FULL; @@ -3833,6 +3990,7 @@ CreateStatsStmt: n->stat_types = $4; n->exprs = $6; n->relations = $8; + n->stxcomment = NULL; n->if_not_exists = false; $$ = (Node *)n; } @@ -3844,6 +4002,7 @@ CreateStatsStmt: n->stat_types = $7; n->exprs = $9; n->relations = $11; + n->stxcomment = NULL; n->if_not_exists = true; $$ = (Node *)n; } @@ -3903,9 +4062,9 @@ create_as_target: ; opt_with_data: - WITH DATA_P { $$ = TRUE; } - | WITH NO DATA_P { $$ = FALSE; } - | /*EMPTY*/ { $$ = TRUE; } + WITH DATA_P { $$ = true; } + | WITH NO DATA_P { $$ = false; } + | /*EMPTY*/ { $$ = true; } ; @@ -4056,11 +4215,11 @@ SeqOptElem: AS SimpleTypename } | CYCLE { - $$ = makeDefElem("cycle", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("cycle", (Node *)makeInteger(true), @1); } | NO CYCLE { - $$ = makeDefElem("cycle", (Node *)makeInteger(FALSE), @1); + $$ = makeDefElem("cycle", (Node *)makeInteger(false), @1); } | INCREMENT opt_by NumericOnly { @@ -4160,8 +4319,8 @@ CreatePLangStmt: ; opt_trusted: - TRUSTED { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } + TRUSTED { $$ = true; } + | /*EMPTY*/ { $$ = false; } ; /* This ought to be just func_name, but that causes reduce/reduce conflicts @@ -4312,7 +4471,7 @@ create_extension_opt_item: } | CASCADE { - $$ = makeDefElem("cascade", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("cascade", (Node *)makeInteger(true), @1); } ; @@ -4451,6 +4610,24 @@ AlterExtensionContentsStmt: n->object = (Node *) lcons(makeString($9), $7); $$ = (Node *)n; } + | ALTER EXTENSION name add_drop PROCEDURE function_with_argtypes + { + AlterExtensionContentsStmt *n = makeNode(AlterExtensionContentsStmt); + n->extname = $3; + n->action = $4; + n->objtype = OBJECT_PROCEDURE; + n->object = (Node *) $6; + $$ = (Node *)n; + } + | ALTER EXTENSION name add_drop ROUTINE function_with_argtypes + { + AlterExtensionContentsStmt *n = makeNode(AlterExtensionContentsStmt); + n->extname = $3; + n->action = $4; + n->objtype = OBJECT_ROUTINE; + n->object = (Node *) $6; + $$ = (Node *)n; + } | ALTER EXTENSION name add_drop SCHEMA name { AlterExtensionContentsStmt *n = makeNode(AlterExtensionContentsStmt); @@ -4848,7 +5025,7 @@ CreateForeignTableStmt: $$ = (Node *) n; } | CREATE FOREIGN TABLE qualified_name - PARTITION OF qualified_name OptTypedTableElementList ForValues + PARTITION OF qualified_name OptTypedTableElementList PartitionBoundSpec SERVER name create_generic_options { CreateForeignTableStmt *n = makeNode(CreateForeignTableStmt); @@ -4869,7 +5046,7 @@ CreateForeignTableStmt: $$ = (Node *) n; } | CREATE FOREIGN TABLE IF_P NOT EXISTS qualified_name - PARTITION OF qualified_name OptTypedTableElementList ForValues + PARTITION OF qualified_name OptTypedTableElementList PartitionBoundSpec SERVER name create_generic_options { CreateForeignTableStmt *n = makeNode(CreateForeignTableStmt); @@ -5163,7 +5340,7 @@ CreateAmStmt: CREATE ACCESS METHOD name TYPE_P INDEX HANDLER handler_name CreateTrigStmt: CREATE TRIGGER name TriggerActionTime TriggerEvents ON qualified_name TriggerReferencing TriggerForSpec TriggerWhen - EXECUTE PROCEDURE func_name '(' TriggerFuncArgs ')' + EXECUTE FUNCTION_or_PROCEDURE func_name '(' TriggerFuncArgs ')' { CreateTrigStmt *n = makeNode(CreateTrigStmt); n->trigname = $3; @@ -5176,29 +5353,29 @@ CreateTrigStmt: n->columns = (List *) lsecond($5); n->whenClause = $10; n->transitionRels = $8; - n->isconstraint = FALSE; - n->deferrable = FALSE; - n->initdeferred = FALSE; + n->isconstraint = false; + n->deferrable = false; + n->initdeferred = false; n->constrrel = NULL; $$ = (Node *)n; } | CREATE CONSTRAINT TRIGGER name AFTER TriggerEvents ON qualified_name OptConstrFromTable ConstraintAttributeSpec FOR EACH ROW TriggerWhen - EXECUTE PROCEDURE func_name '(' TriggerFuncArgs ')' + EXECUTE FUNCTION_or_PROCEDURE func_name '(' TriggerFuncArgs ')' { CreateTrigStmt *n = makeNode(CreateTrigStmt); n->trigname = $4; n->relation = $8; n->funcname = $17; n->args = $19; - n->row = TRUE; + n->row = true; n->timing = TRIGGER_TYPE_AFTER; n->events = intVal(linitial($6)); n->columns = (List *) lsecond($6); n->whenClause = $14; n->transitionRels = NIL; - n->isconstraint = TRUE; + n->isconstraint = true; processCASbits($10, @10, "TRIGGER", &n->deferrable, &n->initdeferred, NULL, NULL, yyscanner); @@ -5272,12 +5449,12 @@ TriggerTransition: ; TransitionOldOrNew: - NEW { $$ = TRUE; } - | OLD { $$ = FALSE; } + NEW { $$ = true; } + | OLD { $$ = false; } ; TransitionRowOrTable: - TABLE { $$ = TRUE; } + TABLE { $$ = true; } /* * According to the standard, lack of a keyword here implies ROW. * Support for that would require prohibiting ROW entirely here, @@ -5286,7 +5463,7 @@ TransitionRowOrTable: * next token. Requiring ROW seems cleanest and easiest to * explain. */ - | ROW { $$ = FALSE; } + | ROW { $$ = false; } ; TransitionRelName: @@ -5304,7 +5481,7 @@ TriggerForSpec: * If ROW/STATEMENT not specified, default to * STATEMENT, per SQL */ - $$ = FALSE; + $$ = false; } ; @@ -5314,8 +5491,8 @@ TriggerForOptEach: ; TriggerForType: - ROW { $$ = TRUE; } - | STATEMENT { $$ = FALSE; } + ROW { $$ = true; } + | STATEMENT { $$ = false; } ; TriggerWhen: @@ -5323,6 +5500,11 @@ TriggerWhen: | /*EMPTY*/ { $$ = NULL; } ; +FUNCTION_or_PROCEDURE: + FUNCTION + | PROCEDURE + ; + TriggerFuncArgs: TriggerFuncArg { $$ = list_make1($1); } | TriggerFuncArgs ',' TriggerFuncArg { $$ = lappend($1, $3); } @@ -5393,7 +5575,7 @@ ConstraintAttributeElem: CreateEventTrigStmt: CREATE EVENT TRIGGER name ON ColLabel - EXECUTE PROCEDURE func_name '(' ')' + EXECUTE FUNCTION_or_PROCEDURE func_name '(' ')' { CreateEventTrigStmt *n = makeNode(CreateEventTrigStmt); n->trigname = $4; @@ -5404,7 +5586,7 @@ CreateEventTrigStmt: } | CREATE EVENT TRIGGER name ON ColLabel WHEN event_trigger_when_list - EXECUTE PROCEDURE func_name '(' ')' + EXECUTE FUNCTION_or_PROCEDURE func_name '(' ')' { CreateEventTrigStmt *n = makeNode(CreateEventTrigStmt); n->trigname = $4; @@ -5453,43 +5635,19 @@ enable_trigger: /***************************************************************************** * - * QUERIES : + * QUERY : * CREATE ASSERTION ... - * DROP ASSERTION ... * *****************************************************************************/ -CreateAssertStmt: - CREATE ASSERTION name CHECK '(' a_expr ')' - ConstraintAttributeSpec +CreateAssertionStmt: + CREATE ASSERTION any_name CHECK '(' a_expr ')' ConstraintAttributeSpec { - CreateTrigStmt *n = makeNode(CreateTrigStmt); - n->trigname = $3; - n->args = list_make1($6); - n->isconstraint = TRUE; - processCASbits($8, @8, "ASSERTION", - &n->deferrable, &n->initdeferred, NULL, - NULL, yyscanner); - ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("CREATE ASSERTION is not yet implemented"))); - $$ = (Node *)n; - } - ; - -DropAssertStmt: - DROP ASSERTION name opt_drop_behavior - { - DropStmt *n = makeNode(DropStmt); - n->objects = NIL; - n->behavior = $4; - n->removeType = OBJECT_TRIGGER; /* XXX */ - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("DROP ASSERTION is not yet implemented"))); - $$ = (Node *) n; + $$ = NULL; } ; @@ -5846,8 +6004,8 @@ opclass_item: } ; -opt_default: DEFAULT { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } +opt_default: DEFAULT { $$ = true; } + | /*EMPTY*/ { $$ = false; } ; opt_opfamily: FAMILY any_name { $$ = $2; } @@ -5871,9 +6029,9 @@ opt_recheck: RECHECK errmsg("RECHECK is no longer required"), errhint("Update your data type."), parser_errposition(@1))); - $$ = TRUE; + $$ = true; } - | /*EMPTY*/ { $$ = FALSE; } + | /*EMPTY*/ { $$ = false; } ; @@ -6021,7 +6179,7 @@ DropStmt: DROP drop_type_any_name IF_P EXISTS any_name_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = $2; - n->missing_ok = TRUE; + n->missing_ok = true; n->objects = $5; n->behavior = $6; n->concurrent = false; @@ -6031,7 +6189,7 @@ DropStmt: DROP drop_type_any_name IF_P EXISTS any_name_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = $2; - n->missing_ok = FALSE; + n->missing_ok = false; n->objects = $3; n->behavior = $4; n->concurrent = false; @@ -6041,7 +6199,7 @@ DropStmt: DROP drop_type_any_name IF_P EXISTS any_name_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = $2; - n->missing_ok = TRUE; + n->missing_ok = true; n->objects = $5; n->behavior = $6; n->concurrent = false; @@ -6051,7 +6209,7 @@ DropStmt: DROP drop_type_any_name IF_P EXISTS any_name_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = $2; - n->missing_ok = FALSE; + n->missing_ok = false; n->objects = $3; n->behavior = $4; n->concurrent = false; @@ -6081,7 +6239,7 @@ DropStmt: DROP drop_type_any_name IF_P EXISTS any_name_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = OBJECT_TYPE; - n->missing_ok = FALSE; + n->missing_ok = false; n->objects = $3; n->behavior = $4; n->concurrent = false; @@ -6091,7 +6249,7 @@ DropStmt: DROP drop_type_any_name IF_P EXISTS any_name_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = OBJECT_TYPE; - n->missing_ok = TRUE; + n->missing_ok = true; n->objects = $5; n->behavior = $6; n->concurrent = false; @@ -6101,7 +6259,7 @@ DropStmt: DROP drop_type_any_name IF_P EXISTS any_name_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = OBJECT_DOMAIN; - n->missing_ok = FALSE; + n->missing_ok = false; n->objects = $3; n->behavior = $4; n->concurrent = false; @@ -6111,7 +6269,7 @@ DropStmt: DROP drop_type_any_name IF_P EXISTS any_name_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = OBJECT_DOMAIN; - n->missing_ok = TRUE; + n->missing_ok = true; n->objects = $5; n->behavior = $6; n->concurrent = false; @@ -6121,7 +6279,7 @@ DropStmt: DROP drop_type_any_name IF_P EXISTS any_name_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = OBJECT_INDEX; - n->missing_ok = FALSE; + n->missing_ok = false; n->objects = $4; n->behavior = $5; n->concurrent = true; @@ -6131,7 +6289,7 @@ DropStmt: DROP drop_type_any_name IF_P EXISTS any_name_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = OBJECT_INDEX; - n->missing_ok = TRUE; + n->missing_ok = true; n->objects = $6; n->behavior = $7; n->concurrent = true; @@ -6333,6 +6491,22 @@ CommentStmt: n->comment = $8; $$ = (Node *) n; } + | COMMENT ON PROCEDURE function_with_argtypes IS comment_text + { + CommentStmt *n = makeNode(CommentStmt); + n->objtype = OBJECT_PROCEDURE; + n->object = (Node *) $4; + n->comment = $6; + $$ = (Node *) n; + } + | COMMENT ON ROUTINE function_with_argtypes IS comment_text + { + CommentStmt *n = makeNode(CommentStmt); + n->objtype = OBJECT_ROUTINE; + n->object = (Node *) $4; + n->comment = $6; + $$ = (Node *) n; + } | COMMENT ON RULE name ON any_name IS comment_text { CommentStmt *n = makeNode(CommentStmt); @@ -6511,6 +6685,26 @@ SecLabelStmt: n->label = $9; $$ = (Node *) n; } + | SECURITY LABEL opt_provider ON PROCEDURE function_with_argtypes + IS security_label + { + SecLabelStmt *n = makeNode(SecLabelStmt); + n->provider = $3; + n->objtype = OBJECT_PROCEDURE; + n->object = (Node *) $6; + n->label = $8; + $$ = (Node *) n; + } + | SECURITY LABEL opt_provider ON ROUTINE function_with_argtypes + IS security_label + { + SecLabelStmt *n = makeNode(SecLabelStmt); + n->provider = $3; + n->objtype = OBJECT_ROUTINE; + n->object = (Node *) $6; + n->label = $8; + $$ = (Node *) n; + } ; opt_provider: FOR NonReservedWord_or_Sconst { $$ = $2; } @@ -6553,13 +6747,13 @@ security_label: Sconst { $$ = $1; } FetchStmt: FETCH fetch_args { FetchStmt *n = (FetchStmt *) $2; - n->ismove = FALSE; + n->ismove = false; $$ = (Node *)n; } | MOVE fetch_args { FetchStmt *n = (FetchStmt *) $2; - n->ismove = TRUE; + n->ismove = true; $$ = (Node *)n; } ; @@ -6830,7 +7024,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; - n->objtype = ACL_OBJECT_RELATION; + n->objtype = OBJECT_TABLE; n->objs = $1; $$ = n; } @@ -6838,7 +7032,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; - n->objtype = ACL_OBJECT_RELATION; + n->objtype = OBJECT_TABLE; n->objs = $2; $$ = n; } @@ -6846,7 +7040,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; - n->objtype = ACL_OBJECT_SEQUENCE; + n->objtype = OBJECT_SEQUENCE; n->objs = $2; $$ = n; } @@ -6854,7 +7048,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; - n->objtype = ACL_OBJECT_FDW; + n->objtype = OBJECT_FDW; n->objs = $4; $$ = n; } @@ -6862,7 +7056,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; - n->objtype = ACL_OBJECT_FOREIGN_SERVER; + n->objtype = OBJECT_FOREIGN_SERVER; n->objs = $3; $$ = n; } @@ -6870,7 +7064,23 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; - n->objtype = ACL_OBJECT_FUNCTION; + n->objtype = OBJECT_FUNCTION; + n->objs = $2; + $$ = n; + } + | PROCEDURE function_with_argtypes_list + { + PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); + n->targtype = ACL_TARGET_OBJECT; + n->objtype = OBJECT_PROCEDURE; + n->objs = $2; + $$ = n; + } + | ROUTINE function_with_argtypes_list + { + PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); + n->targtype = ACL_TARGET_OBJECT; + n->objtype = OBJECT_ROUTINE; n->objs = $2; $$ = n; } @@ -6878,7 +7088,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; - n->objtype = ACL_OBJECT_DATABASE; + n->objtype = OBJECT_DATABASE; n->objs = $2; $$ = n; } @@ -6886,7 +7096,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; - n->objtype = ACL_OBJECT_DOMAIN; + n->objtype = OBJECT_DOMAIN; n->objs = $2; $$ = n; } @@ -6894,7 +7104,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; - n->objtype = ACL_OBJECT_LANGUAGE; + n->objtype = OBJECT_LANGUAGE; n->objs = $2; $$ = n; } @@ -6902,7 +7112,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; - n->objtype = ACL_OBJECT_LARGEOBJECT; + n->objtype = OBJECT_LARGEOBJECT; n->objs = $3; $$ = n; } @@ -6910,7 +7120,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; - n->objtype = ACL_OBJECT_NAMESPACE; + n->objtype = OBJECT_SCHEMA; n->objs = $2; $$ = n; } @@ -6918,7 +7128,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; - n->objtype = ACL_OBJECT_TABLESPACE; + n->objtype = OBJECT_TABLESPACE; n->objs = $2; $$ = n; } @@ -6926,7 +7136,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; - n->objtype = ACL_OBJECT_TYPE; + n->objtype = OBJECT_TYPE; n->objs = $2; $$ = n; } @@ -6934,7 +7144,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_ALL_IN_SCHEMA; - n->objtype = ACL_OBJECT_RELATION; + n->objtype = OBJECT_TABLE; n->objs = $5; $$ = n; } @@ -6942,7 +7152,7 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_ALL_IN_SCHEMA; - n->objtype = ACL_OBJECT_SEQUENCE; + n->objtype = OBJECT_SEQUENCE; n->objs = $5; $$ = n; } @@ -6950,7 +7160,23 @@ privilege_target: { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_ALL_IN_SCHEMA; - n->objtype = ACL_OBJECT_FUNCTION; + n->objtype = OBJECT_FUNCTION; + n->objs = $5; + $$ = n; + } + | ALL PROCEDURES IN_P SCHEMA name_list + { + PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); + n->targtype = ACL_TARGET_ALL_IN_SCHEMA; + n->objtype = OBJECT_PROCEDURE; + n->objs = $5; + $$ = n; + } + | ALL ROUTINES IN_P SCHEMA name_list + { + PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); + n->targtype = ACL_TARGET_ALL_IN_SCHEMA; + n->objtype = OBJECT_ROUTINE; n->objs = $5; $$ = n; } @@ -6969,8 +7195,8 @@ grantee: opt_grant_grant_option: - WITH GRANT OPTION { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } + WITH GRANT OPTION { $$ = true; } + | /*EMPTY*/ { $$ = false; } ; /***************************************************************************** @@ -7015,8 +7241,8 @@ RevokeRoleStmt: } ; -opt_grant_admin_option: WITH ADMIN OPTION { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } +opt_grant_admin_option: WITH ADMIN OPTION { $$ = true; } + | /*EMPTY*/ { $$ = false; } ; opt_granted_by: GRANTED BY RoleSpec { $$ = $3; } @@ -7108,11 +7334,12 @@ DefACLAction: ; defacl_privilege_target: - TABLES { $$ = ACL_OBJECT_RELATION; } - | FUNCTIONS { $$ = ACL_OBJECT_FUNCTION; } - | SEQUENCES { $$ = ACL_OBJECT_SEQUENCE; } - | TYPES_P { $$ = ACL_OBJECT_TYPE; } - | SCHEMAS { $$ = ACL_OBJECT_NAMESPACE; } + TABLES { $$ = OBJECT_TABLE; } + | FUNCTIONS { $$ = OBJECT_FUNCTION; } + | ROUTINES { $$ = OBJECT_FUNCTION; } + | SEQUENCES { $$ = OBJECT_SEQUENCE; } + | TYPES_P { $$ = OBJECT_TYPE; } + | SCHEMAS { $$ = OBJECT_SCHEMA; } ; @@ -7125,19 +7352,21 @@ defacl_privilege_target: *****************************************************************************/ IndexStmt: CREATE opt_unique INDEX opt_concurrently opt_index_name - ON qualified_name access_method_clause '(' index_params ')' - opt_reloptions OptTableSpace where_clause + ON relation_expr access_method_clause '(' index_params ')' + opt_include opt_reloptions OptTableSpace where_clause { IndexStmt *n = makeNode(IndexStmt); n->unique = $2; n->concurrent = $4; n->idxname = $5; n->relation = $7; + n->relationId = InvalidOid; n->accessMethod = $8; n->indexParams = $10; - n->options = $12; - n->tableSpace = $13; - n->whereClause = $14; + n->indexIncludingParams = $12; + n->options = $13; + n->tableSpace = $14; + n->whereClause = $15; n->excludeOpNames = NIL; n->idxcomment = NULL; n->indexOid = InvalidOid; @@ -7151,19 +7380,21 @@ IndexStmt: CREATE opt_unique INDEX opt_concurrently opt_index_name $$ = (Node *)n; } | CREATE opt_unique INDEX opt_concurrently IF_P NOT EXISTS index_name - ON qualified_name access_method_clause '(' index_params ')' - opt_reloptions OptTableSpace where_clause + ON relation_expr access_method_clause '(' index_params ')' + opt_include opt_reloptions OptTableSpace where_clause { IndexStmt *n = makeNode(IndexStmt); n->unique = $2; n->concurrent = $4; n->idxname = $8; n->relation = $10; + n->relationId = InvalidOid; n->accessMethod = $11; n->indexParams = $13; - n->options = $15; - n->tableSpace = $16; - n->whereClause = $17; + n->indexIncludingParams = $15; + n->options = $16; + n->tableSpace = $17; + n->whereClause = $18; n->excludeOpNames = NIL; n->idxcomment = NULL; n->indexOid = InvalidOid; @@ -7179,13 +7410,13 @@ IndexStmt: CREATE opt_unique INDEX opt_concurrently opt_index_name ; opt_unique: - UNIQUE { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } + UNIQUE { $$ = true; } + | /*EMPTY*/ { $$ = false; } ; opt_concurrently: - CONCURRENTLY { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } + CONCURRENTLY { $$ = true; } + | /*EMPTY*/ { $$ = false; } ; opt_index_name: @@ -7242,6 +7473,14 @@ index_elem: ColId opt_collate opt_class opt_asc_desc opt_nulls_order } ; +opt_include: INCLUDE '(' index_including_params ')' { $$ = $3; } + | /* EMPTY */ { $$ = NIL; } + ; + +index_including_params: index_elem { $$ = list_make1($1); } + | index_including_params ',' index_elem { $$ = lappend($1, $3); } + ; + opt_collate: COLLATE any_name { $$ = $2; } | /*EMPTY*/ { $$ = NIL; } ; @@ -7274,47 +7513,59 @@ opt_nulls_order: NULLS_LA FIRST_P { $$ = SORTBY_NULLS_FIRST; } CreateFunctionStmt: CREATE opt_or_replace FUNCTION func_name func_args_with_defaults - RETURNS func_return createfunc_opt_list opt_definition + RETURNS func_return createfunc_opt_list { CreateFunctionStmt *n = makeNode(CreateFunctionStmt); + n->is_procedure = false; n->replace = $2; n->funcname = $4; n->parameters = $5; n->returnType = $7; n->options = $8; - n->withClause = $9; $$ = (Node *)n; } | CREATE opt_or_replace FUNCTION func_name func_args_with_defaults - RETURNS TABLE '(' table_func_column_list ')' createfunc_opt_list opt_definition + RETURNS TABLE '(' table_func_column_list ')' createfunc_opt_list { CreateFunctionStmt *n = makeNode(CreateFunctionStmt); + n->is_procedure = false; n->replace = $2; n->funcname = $4; n->parameters = mergeTableFuncParameters($5, $9); n->returnType = TableFuncTypeName($9); n->returnType->location = @7; n->options = $11; - n->withClause = $12; $$ = (Node *)n; } | CREATE opt_or_replace FUNCTION func_name func_args_with_defaults - createfunc_opt_list opt_definition + createfunc_opt_list + { + CreateFunctionStmt *n = makeNode(CreateFunctionStmt); + n->is_procedure = false; + n->replace = $2; + n->funcname = $4; + n->parameters = $5; + n->returnType = NULL; + n->options = $6; + $$ = (Node *)n; + } + | CREATE opt_or_replace PROCEDURE func_name func_args_with_defaults + createfunc_opt_list { CreateFunctionStmt *n = makeNode(CreateFunctionStmt); + n->is_procedure = true; n->replace = $2; n->funcname = $4; n->parameters = $5; n->returnType = NULL; n->options = $6; - n->withClause = $7; $$ = (Node *)n; } ; opt_or_replace: - OR REPLACE { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } + OR REPLACE { $$ = true; } + | /*EMPTY*/ { $$ = false; } ; func_args: '(' func_args_list ')' { $$ = $2; } @@ -7483,7 +7734,7 @@ func_type: Typename { $$ = $1; } { $$ = makeTypeNameFromNameList(lcons(makeString($2), $3)); $$->pct_type = true; - $$->setof = TRUE; + $$->setof = true; $$->location = @2; } ; @@ -7599,15 +7850,15 @@ createfunc_opt_list: common_func_opt_item: CALLED ON NULL_P INPUT_P { - $$ = makeDefElem("strict", (Node *)makeInteger(FALSE), @1); + $$ = makeDefElem("strict", (Node *)makeInteger(false), @1); } | RETURNS NULL_P ON NULL_P INPUT_P { - $$ = makeDefElem("strict", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("strict", (Node *)makeInteger(true), @1); } | STRICT_P { - $$ = makeDefElem("strict", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("strict", (Node *)makeInteger(true), @1); } | IMMUTABLE { @@ -7623,27 +7874,27 @@ common_func_opt_item: } | EXTERNAL SECURITY DEFINER { - $$ = makeDefElem("security", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("security", (Node *)makeInteger(true), @1); } | EXTERNAL SECURITY INVOKER { - $$ = makeDefElem("security", (Node *)makeInteger(FALSE), @1); + $$ = makeDefElem("security", (Node *)makeInteger(false), @1); } | SECURITY DEFINER { - $$ = makeDefElem("security", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("security", (Node *)makeInteger(true), @1); } | SECURITY INVOKER { - $$ = makeDefElem("security", (Node *)makeInteger(FALSE), @1); + $$ = makeDefElem("security", (Node *)makeInteger(false), @1); } | LEAKPROOF { - $$ = makeDefElem("leakproof", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("leakproof", (Node *)makeInteger(true), @1); } | NOT LEAKPROOF { - $$ = makeDefElem("leakproof", (Node *)makeInteger(FALSE), @1); + $$ = makeDefElem("leakproof", (Node *)makeInteger(false), @1); } | COST NumericOnly { @@ -7679,7 +7930,7 @@ createfunc_opt_item: } | WINDOW { - $$ = makeDefElem("window", (Node *)makeInteger(TRUE), @1); + $$ = makeDefElem("window", (Node *)makeInteger(true), @1); } | common_func_opt_item { @@ -7727,7 +7978,7 @@ table_func_column_list: ; /***************************************************************************** - * ALTER FUNCTION + * ALTER FUNCTION / ALTER PROCEDURE / ALTER ROUTINE * * RENAME and OWNER subcommands are already provided by the generic * ALTER infrastructure, here we just specify alterations that can @@ -7738,6 +7989,23 @@ AlterFunctionStmt: ALTER FUNCTION function_with_argtypes alterfunc_opt_list opt_restrict { AlterFunctionStmt *n = makeNode(AlterFunctionStmt); + n->objtype = OBJECT_FUNCTION; + n->func = $3; + n->actions = $4; + $$ = (Node *) n; + } + | ALTER PROCEDURE function_with_argtypes alterfunc_opt_list opt_restrict + { + AlterFunctionStmt *n = makeNode(AlterFunctionStmt); + n->objtype = OBJECT_PROCEDURE; + n->func = $3; + n->actions = $4; + $$ = (Node *) n; + } + | ALTER ROUTINE function_with_argtypes alterfunc_opt_list opt_restrict + { + AlterFunctionStmt *n = makeNode(AlterFunctionStmt); + n->objtype = OBJECT_ROUTINE; n->func = $3; n->actions = $4; $$ = (Node *) n; @@ -7762,6 +8030,8 @@ opt_restrict: * QUERY: * * DROP FUNCTION funcname (arg1, arg2, ...) [ RESTRICT | CASCADE ] + * DROP PROCEDURE procname (arg1, arg2, ...) [ RESTRICT | CASCADE ] + * DROP ROUTINE routname (arg1, arg2, ...) [ RESTRICT | CASCADE ] * DROP AGGREGATE aggname (arg1, ...) [ RESTRICT | CASCADE ] * DROP OPERATOR opname (leftoperand_typ, rightoperand_typ) [ RESTRICT | CASCADE ] * @@ -7788,6 +8058,46 @@ RemoveFuncStmt: n->concurrent = false; $$ = (Node *)n; } + | DROP PROCEDURE function_with_argtypes_list opt_drop_behavior + { + DropStmt *n = makeNode(DropStmt); + n->removeType = OBJECT_PROCEDURE; + n->objects = $3; + n->behavior = $4; + n->missing_ok = false; + n->concurrent = false; + $$ = (Node *)n; + } + | DROP PROCEDURE IF_P EXISTS function_with_argtypes_list opt_drop_behavior + { + DropStmt *n = makeNode(DropStmt); + n->removeType = OBJECT_PROCEDURE; + n->objects = $5; + n->behavior = $6; + n->missing_ok = true; + n->concurrent = false; + $$ = (Node *)n; + } + | DROP ROUTINE function_with_argtypes_list opt_drop_behavior + { + DropStmt *n = makeNode(DropStmt); + n->removeType = OBJECT_ROUTINE; + n->objects = $3; + n->behavior = $4; + n->missing_ok = false; + n->concurrent = false; + $$ = (Node *)n; + } + | DROP ROUTINE IF_P EXISTS function_with_argtypes_list opt_drop_behavior + { + DropStmt *n = makeNode(DropStmt); + n->removeType = OBJECT_ROUTINE; + n->objects = $5; + n->behavior = $6; + n->missing_ok = true; + n->concurrent = false; + $$ = (Node *)n; + } ; RemoveAggrStmt: @@ -7968,8 +8278,8 @@ DropCastStmt: DROP CAST opt_if_exists '(' Typename AS Typename ')' opt_drop_beha } ; -opt_if_exists: IF_P EXISTS { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } +opt_if_exists: IF_P EXISTS { $$ = true; } + | /*EMPTY*/ { $$ = false; } ; @@ -8097,7 +8407,7 @@ AlterTblSpcStmt: makeNode(AlterTableSpaceOptionsStmt); n->tablespacename = $3; n->options = $5; - n->isReset = FALSE; + n->isReset = false; $$ = (Node *)n; } | ALTER TABLESPACE name RESET reloptions @@ -8106,7 +8416,7 @@ AlterTblSpcStmt: makeNode(AlterTableSpaceOptionsStmt); n->tablespacename = $3; n->options = $5; - n->isReset = TRUE; + n->isReset = true; $$ = (Node *)n; } ; @@ -8245,6 +8555,15 @@ RenameStmt: ALTER AGGREGATE aggregate_with_argtypes RENAME TO name n->missing_ok = true; $$ = (Node *)n; } + | ALTER PROCEDURE function_with_argtypes RENAME TO name + { + RenameStmt *n = makeNode(RenameStmt); + n->renameType = OBJECT_PROCEDURE; + n->object = (Node *) $3; + n->newname = $6; + n->missing_ok = false; + $$ = (Node *)n; + } | ALTER PUBLICATION name RENAME TO name { RenameStmt *n = makeNode(RenameStmt); @@ -8254,6 +8573,15 @@ RenameStmt: ALTER AGGREGATE aggregate_with_argtypes RENAME TO name n->missing_ok = false; $$ = (Node *)n; } + | ALTER ROUTINE function_with_argtypes RENAME TO name + { + RenameStmt *n = makeNode(RenameStmt); + n->renameType = OBJECT_ROUTINE; + n->object = (Node *) $3; + n->newname = $6; + n->missing_ok = false; + $$ = (Node *)n; + } | ALTER SCHEMA name RENAME TO name { RenameStmt *n = makeNode(RenameStmt); @@ -8633,6 +8961,22 @@ AlterObjectDependsStmt: n->extname = makeString($7); $$ = (Node *)n; } + | ALTER PROCEDURE function_with_argtypes DEPENDS ON EXTENSION name + { + AlterObjectDependsStmt *n = makeNode(AlterObjectDependsStmt); + n->objectType = OBJECT_PROCEDURE; + n->object = (Node *) $3; + n->extname = makeString($7); + $$ = (Node *)n; + } + | ALTER ROUTINE function_with_argtypes DEPENDS ON EXTENSION name + { + AlterObjectDependsStmt *n = makeNode(AlterObjectDependsStmt); + n->objectType = OBJECT_ROUTINE; + n->object = (Node *) $3; + n->extname = makeString($7); + $$ = (Node *)n; + } | ALTER TRIGGER name ON qualified_name DEPENDS ON EXTENSION name { AlterObjectDependsStmt *n = makeNode(AlterObjectDependsStmt); @@ -8748,6 +9092,24 @@ AlterObjectSchemaStmt: n->missing_ok = false; $$ = (Node *)n; } + | ALTER PROCEDURE function_with_argtypes SET SCHEMA name + { + AlterObjectSchemaStmt *n = makeNode(AlterObjectSchemaStmt); + n->objectType = OBJECT_PROCEDURE; + n->object = (Node *) $3; + n->newschema = $6; + n->missing_ok = false; + $$ = (Node *)n; + } + | ALTER ROUTINE function_with_argtypes SET SCHEMA name + { + AlterObjectSchemaStmt *n = makeNode(AlterObjectSchemaStmt); + n->objectType = OBJECT_ROUTINE; + n->object = (Node *) $3; + n->newschema = $6; + n->missing_ok = false; + $$ = (Node *)n; + } | ALTER TABLE relation_expr SET SCHEMA name { AlterObjectSchemaStmt *n = makeNode(AlterObjectSchemaStmt); @@ -9023,6 +9385,22 @@ AlterOwnerStmt: ALTER AGGREGATE aggregate_with_argtypes OWNER TO RoleSpec n->newowner = $9; $$ = (Node *)n; } + | ALTER PROCEDURE function_with_argtypes OWNER TO RoleSpec + { + AlterOwnerStmt *n = makeNode(AlterOwnerStmt); + n->objectType = OBJECT_PROCEDURE; + n->object = (Node *) $3; + n->newowner = $6; + $$ = (Node *)n; + } + | ALTER ROUTINE function_with_argtypes OWNER TO RoleSpec + { + AlterOwnerStmt *n = makeNode(AlterOwnerStmt); + n->objectType = OBJECT_ROUTINE; + n->object = (Node *) $3; + n->newowner = $6; + $$ = (Node *)n; + } | ALTER SCHEMA name OWNER TO RoleSpec { AlterOwnerStmt *n = makeNode(AlterOwnerStmt); @@ -9133,7 +9511,7 @@ CreatePublicationStmt: n->tables = (List *)$4; /* FOR ALL TABLES */ else - n->for_all_tables = TRUE; + n->for_all_tables = true; } $$ = (Node *)n; } @@ -9151,7 +9529,7 @@ publication_for_tables: } | FOR ALL TABLES { - $$ = (Node *) makeInteger(TRUE); + $$ = (Node *) makeInteger(true); } ; @@ -9286,7 +9664,7 @@ AlterSubscriptionStmt: n->kind = ALTER_SUBSCRIPTION_ENABLED; n->subname = $3; n->options = list_make1(makeDefElem("enabled", - (Node *)makeInteger(TRUE), @1)); + (Node *)makeInteger(true), @1)); $$ = (Node *)n; } | ALTER SUBSCRIPTION name DISABLE_P @@ -9296,7 +9674,7 @@ AlterSubscriptionStmt: n->kind = ALTER_SUBSCRIPTION_ENABLED; n->subname = $3; n->options = list_make1(makeDefElem("enabled", - (Node *)makeInteger(FALSE), @1)); + (Node *)makeInteger(false), @1)); $$ = (Node *)n; } ; @@ -9389,9 +9767,9 @@ event: SELECT { $$ = CMD_SELECT; } ; opt_instead: - INSTEAD { $$ = TRUE; } - | ALSO { $$ = FALSE; } - | /*EMPTY*/ { $$ = FALSE; } + INSTEAD { $$ = true; } + | ALSO { $$ = false; } + | /*EMPTY*/ { $$ = false; } ; @@ -9497,40 +9875,35 @@ TransactionStmt: { TransactionStmt *n = makeNode(TransactionStmt); n->kind = TRANS_STMT_SAVEPOINT; - n->options = list_make1(makeDefElem("savepoint_name", - (Node *)makeString($2), @1)); + n->savepoint_name = $2; $$ = (Node *)n; } | RELEASE SAVEPOINT ColId { TransactionStmt *n = makeNode(TransactionStmt); n->kind = TRANS_STMT_RELEASE; - n->options = list_make1(makeDefElem("savepoint_name", - (Node *)makeString($3), @1)); + n->savepoint_name = $3; $$ = (Node *)n; } | RELEASE ColId { TransactionStmt *n = makeNode(TransactionStmt); n->kind = TRANS_STMT_RELEASE; - n->options = list_make1(makeDefElem("savepoint_name", - (Node *)makeString($2), @1)); + n->savepoint_name = $2; $$ = (Node *)n; } | ROLLBACK opt_transaction TO SAVEPOINT ColId { TransactionStmt *n = makeNode(TransactionStmt); n->kind = TRANS_STMT_ROLLBACK_TO; - n->options = list_make1(makeDefElem("savepoint_name", - (Node *)makeString($5), @1)); + n->savepoint_name = $5; $$ = (Node *)n; } | ROLLBACK opt_transaction TO ColId { TransactionStmt *n = makeNode(TransactionStmt); n->kind = TRANS_STMT_ROLLBACK_TO; - n->options = list_make1(makeDefElem("savepoint_name", - (Node *)makeString($4), @1)); + n->savepoint_name = $4; $$ = (Node *)n; } | PREPARE TRANSACTION Sconst @@ -9567,16 +9940,16 @@ transaction_mode_item: makeStringConst($3, @3), @1); } | READ ONLY { $$ = makeDefElem("transaction_read_only", - makeIntConst(TRUE, @1), @1); } + makeIntConst(true, @1), @1); } | READ WRITE { $$ = makeDefElem("transaction_read_only", - makeIntConst(FALSE, @1), @1); } + makeIntConst(false, @1), @1); } | DEFERRABLE { $$ = makeDefElem("transaction_deferrable", - makeIntConst(TRUE, @1), @1); } + makeIntConst(true, @1), @1); } | NOT DEFERRABLE { $$ = makeDefElem("transaction_deferrable", - makeIntConst(FALSE, @1), @1); } + makeIntConst(false, @1), @1); } ; /* Syntax with commas is SQL-spec, without commas is Postgres historical */ @@ -9815,14 +10188,14 @@ DropdbStmt: DROP DATABASE database_name { DropdbStmt *n = makeNode(DropdbStmt); n->dbname = $3; - n->missing_ok = FALSE; + n->missing_ok = false; $$ = (Node *)n; } | DROP DATABASE IF_P EXISTS database_name { DropdbStmt *n = makeNode(DropdbStmt); n->dbname = $5; - n->missing_ok = TRUE; + n->missing_ok = true; $$ = (Node *)n; } ; @@ -10082,7 +10455,9 @@ ClusterStmt: ClusterStmt *n = makeNode(ClusterStmt); n->relation = $3; n->indexname = $4; - n->verbose = $2; + n->options = 0; + if ($2) + n->options |= CLUOPT_VERBOSE; $$ = (Node*)n; } | CLUSTER opt_verbose @@ -10090,7 +10465,9 @@ ClusterStmt: ClusterStmt *n = makeNode(ClusterStmt); n->relation = NULL; n->indexname = NULL; - n->verbose = $2; + n->options = 0; + if ($2) + n->options |= CLUOPT_VERBOSE; $$ = (Node*)n; } /* kept for pre-8.3 compatibility */ @@ -10099,7 +10476,9 @@ ClusterStmt: ClusterStmt *n = makeNode(ClusterStmt); n->relation = $5; n->indexname = $3; - n->verbose = $2; + n->options = 0; + if ($2) + n->options |= CLUOPT_VERBOSE; $$ = (Node*)n; } ; @@ -10118,21 +10497,7 @@ cluster_index_specification: * *****************************************************************************/ -VacuumStmt: VACUUM opt_full opt_freeze opt_verbose - { - VacuumStmt *n = makeNode(VacuumStmt); - n->options = VACOPT_VACUUM; - if ($2) - n->options |= VACOPT_FULL; - if ($3) - n->options |= VACOPT_FREEZE; - if ($4) - n->options |= VACOPT_VERBOSE; - n->relation = NULL; - n->va_cols = NIL; - $$ = (Node *)n; - } - | VACUUM opt_full opt_freeze opt_verbose qualified_name +VacuumStmt: VACUUM opt_full opt_freeze opt_verbose opt_analyze opt_vacuum_relation_list { VacuumStmt *n = makeNode(VacuumStmt); n->options = VACOPT_VACUUM; @@ -10142,38 +10507,16 @@ VacuumStmt: VACUUM opt_full opt_freeze opt_verbose n->options |= VACOPT_FREEZE; if ($4) n->options |= VACOPT_VERBOSE; - n->relation = $5; - n->va_cols = NIL; - $$ = (Node *)n; - } - | VACUUM opt_full opt_freeze opt_verbose AnalyzeStmt - { - VacuumStmt *n = (VacuumStmt *) $5; - n->options |= VACOPT_VACUUM; - if ($2) - n->options |= VACOPT_FULL; - if ($3) - n->options |= VACOPT_FREEZE; - if ($4) - n->options |= VACOPT_VERBOSE; + if ($5) + n->options |= VACOPT_ANALYZE; + n->rels = $6; $$ = (Node *)n; } - | VACUUM '(' vacuum_option_list ')' + | VACUUM '(' vacuum_option_list ')' opt_vacuum_relation_list { VacuumStmt *n = makeNode(VacuumStmt); n->options = VACOPT_VACUUM | $3; - n->relation = NULL; - n->va_cols = NIL; - $$ = (Node *) n; - } - | VACUUM '(' vacuum_option_list ')' qualified_name opt_name_list - { - VacuumStmt *n = makeNode(VacuumStmt); - n->options = VACOPT_VACUUM | $3; - n->relation = $5; - n->va_cols = $6; - if (n->va_cols != NIL) /* implies analyze */ - n->options |= VACOPT_ANALYZE; + n->rels = $5; $$ = (Node *) n; } ; @@ -10192,6 +10535,8 @@ vacuum_option_elem: { if (strcmp($1, "disable_page_skipping") == 0) $$ = VACOPT_DISABLE_PAGE_SKIPPING; + else if (strcmp($1, "skip_locked") == 0) + $$ = VACOPT_SKIP_LOCKED; else ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), @@ -10200,26 +10545,40 @@ vacuum_option_elem: } ; -AnalyzeStmt: - analyze_keyword opt_verbose +AnalyzeStmt: analyze_keyword opt_verbose opt_vacuum_relation_list { VacuumStmt *n = makeNode(VacuumStmt); n->options = VACOPT_ANALYZE; if ($2) n->options |= VACOPT_VERBOSE; - n->relation = NULL; - n->va_cols = NIL; + n->rels = $3; $$ = (Node *)n; } - | analyze_keyword opt_verbose qualified_name opt_name_list + | analyze_keyword '(' analyze_option_list ')' opt_vacuum_relation_list { VacuumStmt *n = makeNode(VacuumStmt); - n->options = VACOPT_ANALYZE; - if ($2) - n->options |= VACOPT_VERBOSE; - n->relation = $3; - n->va_cols = $4; - $$ = (Node *)n; + n->options = VACOPT_ANALYZE | $3; + n->rels = $5; + $$ = (Node *) n; + } + ; + +analyze_option_list: + analyze_option_elem { $$ = $1; } + | analyze_option_list ',' analyze_option_elem { $$ = $1 | $3; } + ; + +analyze_option_elem: + VERBOSE { $$ = VACOPT_VERBOSE; } + | IDENT + { + if (strcmp($1, "skip_locked") == 0) + $$ = VACOPT_SKIP_LOCKED; + else + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("unrecognized ANALYZE option \"%s\"", $1), + parser_errposition(@1))); } ; @@ -10228,17 +10587,22 @@ analyze_keyword: | ANALYSE /* British */ {} ; +opt_analyze: + analyze_keyword { $$ = true; } + | /*EMPTY*/ { $$ = false; } + ; + opt_verbose: - VERBOSE { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } + VERBOSE { $$ = true; } + | /*EMPTY*/ { $$ = false; } ; -opt_full: FULL { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } +opt_full: FULL { $$ = true; } + | /*EMPTY*/ { $$ = false; } ; -opt_freeze: FREEZE { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } +opt_freeze: FREEZE { $$ = true; } + | /*EMPTY*/ { $$ = false; } ; opt_name_list: @@ -10246,6 +10610,25 @@ opt_name_list: | /*EMPTY*/ { $$ = NIL; } ; +vacuum_relation: + qualified_name opt_name_list + { + $$ = (Node *) makeVacuumRelation($1, InvalidOid, $2); + } + ; + +vacuum_relation_list: + vacuum_relation + { $$ = list_make1($1); } + | vacuum_relation_list ',' vacuum_relation + { $$ = lappend($1, $3); } + ; + +opt_vacuum_relation_list: + vacuum_relation_list { $$ = $1; } + | /*EMPTY*/ { $$ = NIL; } + ; + /***************************************************************************** * @@ -10636,8 +11019,8 @@ lock_type: ACCESS SHARE { $$ = AccessShareLock; } | ACCESS EXCLUSIVE { $$ = AccessExclusiveLock; } ; -opt_nowait: NOWAIT { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } +opt_nowait: NOWAIT { $$ = true; } + | /*EMPTY*/ { $$ = false; } ; opt_nowait_or_skip: @@ -11104,9 +11487,9 @@ opt_table: TABLE {} ; all_or_distinct: - ALL { $$ = TRUE; } - | DISTINCT { $$ = FALSE; } - | /*EMPTY*/ { $$ = FALSE; } + ALL { $$ = true; } + | DISTINCT { $$ = false; } + | /*EMPTY*/ { $$ = false; } ; /* We use (NIL) as a placeholder to indicate that all target expressions @@ -11182,15 +11565,23 @@ limit_clause: parser_errposition(@1))); } /* SQL:2008 syntax */ - | FETCH first_or_next opt_select_fetch_first_value row_or_rows ONLY + /* to avoid shift/reduce conflicts, handle the optional value with + * a separate production rather than an opt_ expression. The fact + * that ONLY is fully reserved means that this way, we defer any + * decision about what rule reduces ROW or ROWS to the point where + * we can see the ONLY token in the lookahead slot. + */ + | FETCH first_or_next select_fetch_first_value row_or_rows ONLY { $$ = $3; } + | FETCH first_or_next row_or_rows ONLY + { $$ = makeIntConst(1, -1); } ; offset_clause: OFFSET select_offset_value { $$ = $2; } /* SQL:2008 syntax */ - | OFFSET select_offset_value2 row_or_rows + | OFFSET select_fetch_first_value row_or_rows { $$ = $2; } ; @@ -11209,22 +11600,31 @@ select_offset_value: /* * Allowing full expressions without parentheses causes various parsing - * problems with the trailing ROW/ROWS key words. SQL only calls for - * constants, so we allow the rest only with parentheses. If omitted, - * default to 1. + * problems with the trailing ROW/ROWS key words. SQL spec only calls for + * , which is either a literal or a parameter (but + * an could be an identifier, bringing up conflicts + * with ROW/ROWS). We solve this by leveraging the presence of ONLY (see above) + * to determine whether the expression is missing rather than trying to make it + * optional in this rule. + * + * c_expr covers almost all the spec-required cases (and more), but it doesn't + * cover signed numeric literals, which are allowed by the spec. So we include + * those here explicitly. We need FCONST as well as ICONST because values that + * don't fit in the platform's "long", but do fit in bigint, should still be + * accepted here. (This is possible in 64-bit Windows as well as all 32-bit + * builds.) */ -opt_select_fetch_first_value: - SignedIconst { $$ = makeIntConst($1, @1); } - | '(' a_expr ')' { $$ = $2; } - | /*EMPTY*/ { $$ = makeIntConst(1, -1); } +select_fetch_first_value: + c_expr { $$ = $1; } + | '+' I_or_F_const + { $$ = (Node *) makeSimpleA_Expr(AEXPR_OP, "+", NULL, $2, @1); } + | '-' I_or_F_const + { $$ = doNegate($2, @1); } ; -/* - * Again, the trailing ROW/ROWS in this case prevent the full expression - * syntax. c_expr is the best we can do. - */ -select_offset_value2: - c_expr { $$ = $1; } +I_or_F_const: + Iconst { $$ = makeIntConst($1,@1); } + | FCONST { $$ = makeFloatConst($1,@1); } ; /* noise words */ @@ -11535,7 +11935,7 @@ joined_table: /* CROSS JOIN is same as unqualified inner join */ JoinExpr *n = makeNode(JoinExpr); n->jointype = JOIN_INNER; - n->isNatural = FALSE; + n->isNatural = false; n->larg = $1; n->rarg = $4; n->usingClause = NIL; @@ -11546,7 +11946,7 @@ joined_table: { JoinExpr *n = makeNode(JoinExpr); n->jointype = $2; - n->isNatural = FALSE; + n->isNatural = false; n->larg = $1; n->rarg = $4; if ($5 != NULL && IsA($5, List)) @@ -11560,7 +11960,7 @@ joined_table: /* letting join_type reduce to empty doesn't work */ JoinExpr *n = makeNode(JoinExpr); n->jointype = JOIN_INNER; - n->isNatural = FALSE; + n->isNatural = false; n->larg = $1; n->rarg = $3; if ($4 != NULL && IsA($4, List)) @@ -11573,7 +11973,7 @@ joined_table: { JoinExpr *n = makeNode(JoinExpr); n->jointype = $3; - n->isNatural = TRUE; + n->isNatural = true; n->larg = $1; n->rarg = $5; n->usingClause = NIL; /* figure out which columns later... */ @@ -11585,7 +11985,7 @@ joined_table: /* letting join_type reduce to empty doesn't work */ JoinExpr *n = makeNode(JoinExpr); n->jointype = JOIN_INNER; - n->isNatural = TRUE; + n->isNatural = true; n->larg = $1; n->rarg = $4; n->usingClause = NIL; /* figure out which columns later... */ @@ -11864,7 +12264,6 @@ TableFuncElement: ColId Typename opt_collate_clause n->is_local = true; n->is_not_null = false; n->is_from_type = false; - n->is_from_parent = false; n->storage = 0; n->raw_default = NULL; n->cooked_default = NULL; @@ -12055,7 +12454,7 @@ Typename: SimpleTypename opt_array_bounds { $$ = $2; $$->arrayBounds = $3; - $$->setof = TRUE; + $$->setof = true; } /* SQL standard syntax, currently only one-dimensional */ | SimpleTypename ARRAY '[' Iconst ']' @@ -12067,7 +12466,7 @@ Typename: SimpleTypename opt_array_bounds { $$ = $2; $$->arrayBounds = list_make1(makeInteger($5)); - $$->setof = TRUE; + $$->setof = true; } | SimpleTypename ARRAY { @@ -12078,7 +12477,7 @@ Typename: SimpleTypename opt_array_bounds { $$ = $2; $$->arrayBounds = list_make1(makeInteger(-1)); - $$->setof = TRUE; + $$->setof = true; } ; @@ -12365,8 +12764,8 @@ character: CHARACTER opt_varying ; opt_varying: - VARYING { $$ = TRUE; } - | /*EMPTY*/ { $$ = FALSE; } + VARYING { $$ = true; } + | /*EMPTY*/ { $$ = false; } ; /* @@ -12418,9 +12817,9 @@ ConstInterval: ; opt_timezone: - WITH_LA TIME ZONE { $$ = TRUE; } - | WITHOUT TIME ZONE { $$ = FALSE; } - | /*EMPTY*/ { $$ = FALSE; } + WITH_LA TIME ZONE { $$ = true; } + | WITHOUT TIME ZONE { $$ = false; } + | /*EMPTY*/ { $$ = false; } ; opt_interval: @@ -13181,14 +13580,14 @@ func_application: func_name '(' ')' | func_name '(' VARIADIC func_arg_expr opt_sort_clause ')' { FuncCall *n = makeFuncCall($1, list_make1($4), @1); - n->func_variadic = TRUE; + n->func_variadic = true; n->agg_order = $5; $$ = (Node *)n; } | func_name '(' func_arg_list ',' VARIADIC func_arg_expr opt_sort_clause ')' { FuncCall *n = makeFuncCall($1, lappend($3, $6), @1); - n->func_variadic = TRUE; + n->func_variadic = true; n->agg_order = $7; $$ = (Node *)n; } @@ -13206,7 +13605,7 @@ func_application: func_name '(' ')' { FuncCall *n = makeFuncCall($1, $4, @1); n->agg_order = $5; - n->agg_distinct = TRUE; + n->agg_distinct = true; $$ = (Node *)n; } | func_name '(' '*' ')' @@ -13222,7 +13621,7 @@ func_application: func_name '(' ')' * really was. */ FuncCall *n = makeFuncCall($1, NIL, @1); - n->agg_star = TRUE; + n->agg_star = true; $$ = (Node *)n; } ; @@ -13266,7 +13665,7 @@ func_expr: func_application within_group_clause filter_clause over_clause errmsg("cannot use VARIADIC with WITHIN GROUP"), parser_errposition(@2))); n->agg_order = $2; - n->agg_within_group = TRUE; + n->agg_within_group = true; } n->agg_filter = $3; n->over = $4; @@ -13556,9 +13955,9 @@ document_or_content: DOCUMENT_P { $$ = XMLOPTION_DOCUMENT; } | CONTENT_P { $$ = XMLOPTION_CONTENT; } ; -xml_whitespace_option: PRESERVE WHITESPACE_P { $$ = TRUE; } - | STRIP_P WHITESPACE_P { $$ = FALSE; } - | /*EMPTY*/ { $$ = FALSE; } +xml_whitespace_option: PRESERVE WHITESPACE_P { $$ = true; } + | STRIP_P WHITESPACE_P { $$ = false; } + | /*EMPTY*/ { $$ = false; } ; /* We allow several variants for SQL and other compatibility. */ @@ -13656,7 +14055,7 @@ window_specification: '(' opt_existing_window_name opt_partition_clause ; /* - * If we see PARTITION, RANGE, or ROWS as the first token after the '(' + * If we see PARTITION, RANGE, ROWS or GROUPS as the first token after the '(' * of a window_specification, we want the assumption to be that there is * no existing_window_name; but those keywords are unreserved and so could * be ColIds. We fix this by making them have the same precedence as IDENT @@ -13676,33 +14075,27 @@ opt_partition_clause: PARTITION BY expr_list { $$ = $3; } /* * For frame clauses, we return a WindowDef, but only some fields are used: * frameOptions, startOffset, and endOffset. - * - * This is only a subset of the full SQL:2008 frame_clause grammar. - * We don't support yet. */ opt_frame_clause: - RANGE frame_extent + RANGE frame_extent opt_window_exclusion_clause { WindowDef *n = $2; n->frameOptions |= FRAMEOPTION_NONDEFAULT | FRAMEOPTION_RANGE; - if (n->frameOptions & (FRAMEOPTION_START_VALUE_PRECEDING | - FRAMEOPTION_END_VALUE_PRECEDING)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("RANGE PRECEDING is only supported with UNBOUNDED"), - parser_errposition(@1))); - if (n->frameOptions & (FRAMEOPTION_START_VALUE_FOLLOWING | - FRAMEOPTION_END_VALUE_FOLLOWING)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("RANGE FOLLOWING is only supported with UNBOUNDED"), - parser_errposition(@1))); + n->frameOptions |= $3; $$ = n; } - | ROWS frame_extent + | ROWS frame_extent opt_window_exclusion_clause { WindowDef *n = $2; n->frameOptions |= FRAMEOPTION_NONDEFAULT | FRAMEOPTION_ROWS; + n->frameOptions |= $3; + $$ = n; + } + | GROUPS frame_extent opt_window_exclusion_clause + { + WindowDef *n = $2; + n->frameOptions |= FRAMEOPTION_NONDEFAULT | FRAMEOPTION_GROUPS; + n->frameOptions |= $3; $$ = n; } | /*EMPTY*/ @@ -13724,7 +14117,7 @@ frame_extent: frame_bound (errcode(ERRCODE_WINDOWING_ERROR), errmsg("frame start cannot be UNBOUNDED FOLLOWING"), parser_errposition(@1))); - if (n->frameOptions & FRAMEOPTION_START_VALUE_FOLLOWING) + if (n->frameOptions & FRAMEOPTION_START_OFFSET_FOLLOWING) ereport(ERROR, (errcode(ERRCODE_WINDOWING_ERROR), errmsg("frame starting from following row cannot end with current row"), @@ -13753,13 +14146,13 @@ frame_extent: frame_bound errmsg("frame end cannot be UNBOUNDED PRECEDING"), parser_errposition(@4))); if ((frameOptions & FRAMEOPTION_START_CURRENT_ROW) && - (frameOptions & FRAMEOPTION_END_VALUE_PRECEDING)) + (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING)) ereport(ERROR, (errcode(ERRCODE_WINDOWING_ERROR), errmsg("frame starting from current row cannot have preceding rows"), parser_errposition(@4))); - if ((frameOptions & FRAMEOPTION_START_VALUE_FOLLOWING) && - (frameOptions & (FRAMEOPTION_END_VALUE_PRECEDING | + if ((frameOptions & FRAMEOPTION_START_OFFSET_FOLLOWING) && + (frameOptions & (FRAMEOPTION_END_OFFSET_PRECEDING | FRAMEOPTION_END_CURRENT_ROW))) ereport(ERROR, (errcode(ERRCODE_WINDOWING_ERROR), @@ -13804,7 +14197,7 @@ frame_bound: | a_expr PRECEDING { WindowDef *n = makeNode(WindowDef); - n->frameOptions = FRAMEOPTION_START_VALUE_PRECEDING; + n->frameOptions = FRAMEOPTION_START_OFFSET_PRECEDING; n->startOffset = $1; n->endOffset = NULL; $$ = n; @@ -13812,13 +14205,21 @@ frame_bound: | a_expr FOLLOWING { WindowDef *n = makeNode(WindowDef); - n->frameOptions = FRAMEOPTION_START_VALUE_FOLLOWING; + n->frameOptions = FRAMEOPTION_START_OFFSET_FOLLOWING; n->startOffset = $1; n->endOffset = NULL; $$ = n; } ; +opt_window_exclusion_clause: + EXCLUDE CURRENT_P ROW { $$ = FRAMEOPTION_EXCLUDE_CURRENT_ROW; } + | EXCLUDE GROUP_P { $$ = FRAMEOPTION_EXCLUDE_GROUP; } + | EXCLUDE TIES { $$ = FRAMEOPTION_EXCLUDE_TIES; } + | EXCLUDE NO OTHERS { $$ = 0; } + | /*EMPTY*/ { $$ = 0; } + ; + /* * Supporting nonterminals for expressions. @@ -14430,11 +14831,11 @@ AexprConst: Iconst } | TRUE_P { - $$ = makeBoolAConst(TRUE, @1); + $$ = makeBoolAConst(true, @1); } | FALSE_P { - $$ = makeBoolAConst(FALSE, @1); + $$ = makeBoolAConst(false, @1); } | NULL_P { @@ -14465,18 +14866,21 @@ RoleId: RoleSpec errmsg("role name \"%s\" is reserved", "public"), parser_errposition(@1))); + break; case ROLESPEC_SESSION_USER: ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("%s cannot be used as a role name here", "SESSION_USER"), parser_errposition(@1))); + break; case ROLESPEC_CURRENT_USER: ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("%s cannot be used as a role name here", "CURRENT_USER"), parser_errposition(@1))); + break; } } ; @@ -14605,6 +15009,7 @@ unreserved_keyword: | BEGIN_P | BY | CACHE + | CALL | CALLED | CASCADE | CASCADED @@ -14679,6 +15084,7 @@ unreserved_keyword: | GENERATED | GLOBAL | GRANTED + | GROUPS | HANDLER | HEADER_P | HOLD @@ -14689,6 +15095,7 @@ unreserved_keyword: | IMMUTABLE | IMPLICIT_P | IMPORT_P + | INCLUDE | INCLUDING | INCREMENT | INDEX @@ -14744,6 +15151,7 @@ unreserved_keyword: | OPTION | OPTIONS | ORDINALITY + | OTHERS | OVER | OVERRIDING | OWNED @@ -14764,6 +15172,7 @@ unreserved_keyword: | PRIVILEGES | PROCEDURAL | PROCEDURE + | PROCEDURES | PROGRAM | PUBLICATION | QUOTE @@ -14790,6 +15199,8 @@ unreserved_keyword: | ROLE | ROLLBACK | ROLLUP + | ROUTINE + | ROUTINES | ROWS | RULE | SAVEPOINT @@ -14831,6 +15242,7 @@ unreserved_keyword: | TEMPLATE | TEMPORARY | TEXT_P + | TIES | TRANSACTION | TRANSFORM | TRIGGER diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c index 64111f315e..61727e1d71 100644 --- a/src/backend/parser/parse_agg.c +++ b/src/backend/parser/parse_agg.c @@ -3,7 +3,7 @@ * parse_agg.c * handle aggregates and window functions in parser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -15,7 +15,7 @@ #include "postgres.h" #include "catalog/pg_aggregate.h" -#include "catalog/pg_constraint_fn.h" +#include "catalog/pg_constraint.h" #include "catalog/pg_type.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" @@ -419,6 +419,13 @@ check_agglevels_and_constraints(ParseState *pstate, Node *expr) else err = _("grouping operations are not allowed in window ROWS"); + break; + case EXPR_KIND_WINDOW_FRAME_GROUPS: + if (isAgg) + err = _("aggregate functions are not allowed in window GROUPS"); + else + err = _("grouping operations are not allowed in window GROUPS"); + break; case EXPR_KIND_SELECT_TARGET: /* okay */ @@ -502,9 +509,17 @@ check_agglevels_and_constraints(ParseState *pstate, Node *expr) break; case EXPR_KIND_PARTITION_EXPRESSION: if (isAgg) - err = _("aggregate functions are not allowed in partition key expression"); + err = _("aggregate functions are not allowed in partition key expressions"); + else + err = _("grouping operations are not allowed in partition key expressions"); + + break; + + case EXPR_KIND_CALL_ARGUMENT: + if (isAgg) + err = _("aggregate functions are not allowed in CALL arguments"); else - err = _("grouping operations are not allowed in partition key expression"); + err = _("grouping operations are not allowed in CALL arguments"); break; @@ -827,6 +842,7 @@ transformWindowFuncCall(ParseState *pstate, WindowFunc *wfunc, case EXPR_KIND_WINDOW_ORDER: case EXPR_KIND_WINDOW_FRAME_RANGE: case EXPR_KIND_WINDOW_FRAME_ROWS: + case EXPR_KIND_WINDOW_FRAME_GROUPS: err = _("window functions are not allowed in window definitions"); break; case EXPR_KIND_SELECT_TARGET: @@ -881,7 +897,10 @@ transformWindowFuncCall(ParseState *pstate, WindowFunc *wfunc, err = _("window functions are not allowed in trigger WHEN conditions"); break; case EXPR_KIND_PARTITION_EXPRESSION: - err = _("window functions are not allowed in partition key expression"); + err = _("window functions are not allowed in partition key expressions"); + break; + case EXPR_KIND_CALL_ARGUMENT: + err = _("window functions are not allowed in CALL arguments"); break; /* diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c index 9ff80b8b40..660011a3ec 100644 --- a/src/backend/parser/parse_clause.c +++ b/src/backend/parser/parse_clause.c @@ -3,7 +3,7 @@ * parse_clause.c * handle clauses in parser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -18,12 +18,15 @@ #include "miscadmin.h" #include "access/heapam.h" +#include "access/htup_details.h" +#include "access/nbtree.h" #include "access/tsmapi.h" #include "catalog/catalog.h" #include "catalog/heap.h" #include "catalog/pg_am.h" +#include "catalog/pg_amproc.h" #include "catalog/pg_collation.h" -#include "catalog/pg_constraint_fn.h" +#include "catalog/pg_constraint.h" #include "catalog/pg_type.h" #include "commands/defrem.h" #include "nodes/makefuncs.h" @@ -43,8 +46,11 @@ #include "parser/parse_target.h" #include "parser/parse_type.h" #include "rewrite/rewriteManip.h" +#include "utils/builtins.h" #include "utils/guc.h" +#include "utils/catcache.h" #include "utils/lsyscache.h" +#include "utils/syscache.h" #include "utils/rel.h" @@ -62,9 +68,6 @@ static Node *transformJoinOnClause(ParseState *pstate, JoinExpr *j, static RangeTblEntry *getRTEForSpecialRelationTypes(ParseState *pstate, RangeVar *rv); static RangeTblEntry *transformTableEntry(ParseState *pstate, RangeVar *r); -static RangeTblEntry *transformCTEReference(ParseState *pstate, RangeVar *r, - CommonTableExpr *cte, Index levelsup); -static RangeTblEntry *transformENRReference(ParseState *pstate, RangeVar *r); static RangeTblEntry *transformRangeSubselect(ParseState *pstate, RangeSubselect *r); static RangeTblEntry *transformRangeFunction(ParseState *pstate, @@ -98,6 +101,7 @@ static List *addTargetToGroupList(ParseState *pstate, TargetEntry *tle, List *grouplist, List *targetlist, int location); static WindowClause *findWindowClause(List *wclist, const char *name); static Node *transformFrameOffset(ParseState *pstate, int frameOptions, + Oid rangeopfamily, Oid rangeopcintype, Oid *inRangeFunc, Node *clause); @@ -184,9 +188,12 @@ setTargetTable(ParseState *pstate, RangeVar *relation, RangeTblEntry *rte; int rtindex; - /* So far special relations are immutable; so they cannot be targets. */ - rte = getRTEForSpecialRelationTypes(pstate, relation); - if (rte != NULL) + /* + * ENRs hide tables of the same name, so we need to check for them first. + * In contrast, CTEs don't hide tables (for this purpose). + */ + if (relation->schemaname == NULL && + scanNameSpaceForENR(pstate, relation->relname)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("relation \"%s\" cannot be the target of a modifying statement", @@ -210,6 +217,7 @@ setTargetTable(ParseState *pstate, RangeVar *relation, * Now build an RTE. */ rte = addRangeTableEntryForRelation(pstate, pstate->p_target_relation, + RowExclusiveLock, relation->alias, inh, false); pstate->p_target_rangetblentry = rte; @@ -262,7 +270,7 @@ interpretOidsOption(List *defList, bool allowOids) DefElem *def = (DefElem *) lfirst(cell); if (def->defnamespace == NULL && - pg_strcasecmp(def->defname, "oids") == 0) + strcmp(def->defname, "oids") == 0) { if (!allowOids) ereport(ERROR, @@ -430,35 +438,6 @@ transformTableEntry(ParseState *pstate, RangeVar *r) return rte; } -/* - * transformCTEReference --- transform a RangeVar that references a common - * table expression (ie, a sub-SELECT defined in a WITH clause) - */ -static RangeTblEntry * -transformCTEReference(ParseState *pstate, RangeVar *r, - CommonTableExpr *cte, Index levelsup) -{ - RangeTblEntry *rte; - - rte = addRangeTableEntryForCTE(pstate, cte, levelsup, r, true); - - return rte; -} - -/* - * transformENRReference --- transform a RangeVar that references an ephemeral - * named relation - */ -static RangeTblEntry * -transformENRReference(ParseState *pstate, RangeVar *r) -{ - RangeTblEntry *rte; - - rte = addRangeTableEntryForENR(pstate, r, true); - - return rte; -} - /* * transformRangeSubselect --- transform a sub-SELECT appearing in FROM */ @@ -801,7 +780,7 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf) /* undef ordinality column number */ tf->ordinalitycol = -1; - + /* Process column specs */ names = palloc(sizeof(char *) * list_length(rtf->columns)); colno = 0; @@ -922,15 +901,15 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf) { foreach(lc2, ns_names) { - char *name = strVal(lfirst(lc2)); + Value *ns_node = (Value *) lfirst(lc2); - if (name == NULL) + if (ns_node == NULL) continue; - if (strcmp(name, r->name) == 0) + if (strcmp(strVal(ns_node), r->name) == 0) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("namespace name \"%s\" is not unique", - name), + r->name), parser_errposition(pstate, r->location))); } } @@ -944,8 +923,9 @@ transformRangeTableFunc(ParseState *pstate, RangeTableFunc *rtf) default_ns_seen = true; } - /* Note the string may be NULL */ - ns_names = lappend(ns_names, makeString(r->name)); + /* We represent DEFAULT by a null pointer */ + ns_names = lappend(ns_names, + r->name ? makeString(r->name) : NULL); } tf->ns_uris = ns_uris; @@ -1071,19 +1051,32 @@ transformRangeTableSample(ParseState *pstate, RangeTableSample *rts) return tablesample; } - +/* + * getRTEForSpecialRelationTypes + * + * If given RangeVar refers to a CTE or an EphemeralNamedRelation, + * build and return an appropriate RTE, otherwise return NULL + */ static RangeTblEntry * getRTEForSpecialRelationTypes(ParseState *pstate, RangeVar *rv) { CommonTableExpr *cte; Index levelsup; - RangeTblEntry *rte = NULL; + RangeTblEntry *rte; + + /* + * if it is a qualified name, it can't be a CTE or tuplestore reference + */ + if (rv->schemaname) + return NULL; cte = scanNameSpaceForCTE(pstate, rv->relname, &levelsup); if (cte) - rte = transformCTEReference(pstate, rv, cte, levelsup); - if (!rte && scanNameSpaceForENR(pstate, rv->relname)) - rte = transformENRReference(pstate, rv); + rte = addRangeTableEntryForCTE(pstate, cte, levelsup, rv, true); + else if (scanNameSpaceForENR(pstate, rv->relname)) + rte = addRangeTableEntryForENR(pstate, rv, true); + else + rte = NULL; return rte; } @@ -1119,15 +1112,11 @@ transformFromClauseItem(ParseState *pstate, Node *n, /* Plain relation reference, or perhaps a CTE reference */ RangeVar *rv = (RangeVar *) n; RangeTblRef *rtr; - RangeTblEntry *rte = NULL; + RangeTblEntry *rte; int rtindex; - /* - * if it is an unqualified name, it might be a CTE or tuplestore - * reference - */ - if (!rv->schemaname) - rte = getRTEForSpecialRelationTypes(pstate, rv); + /* Check if it's a CTE or tuplestore reference */ + rte = getRTEForSpecialRelationTypes(pstate, rv); /* if not found above, must be a table reference */ if (!rte) @@ -2076,7 +2065,7 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist, /* * If no matches, construct a new target entry which is appended to the - * end of the target list. This target is given resjunk = TRUE so that it + * end of the target list. This target is given resjunk = true so that it * will not be projected into the final tuple. */ target_result = transformTargetEntry(pstate, node, expr, exprKind, @@ -2647,6 +2636,8 @@ transformWindowDefinitions(ParseState *pstate, WindowClause *refwc = NULL; List *partitionClause; List *orderClause; + Oid rangeopfamily = InvalidOid; + Oid rangeopcintype = InvalidOid; WindowClause *wc; winref++; @@ -2773,10 +2764,57 @@ transformWindowDefinitions(ParseState *pstate, parser_errposition(pstate, windef->location))); } wc->frameOptions = windef->frameOptions; + + /* + * RANGE offset PRECEDING/FOLLOWING requires exactly one ORDER BY + * column; check that and get its sort opfamily info. + */ + if ((wc->frameOptions & FRAMEOPTION_RANGE) && + (wc->frameOptions & (FRAMEOPTION_START_OFFSET | + FRAMEOPTION_END_OFFSET))) + { + SortGroupClause *sortcl; + Node *sortkey; + int16 rangestrategy; + + if (list_length(wc->orderClause) != 1) + ereport(ERROR, + (errcode(ERRCODE_WINDOWING_ERROR), + errmsg("RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column"), + parser_errposition(pstate, windef->location))); + sortcl = castNode(SortGroupClause, linitial(wc->orderClause)); + sortkey = get_sortgroupclause_expr(sortcl, *targetlist); + /* Find the sort operator in pg_amop */ + if (!get_ordering_op_properties(sortcl->sortop, + &rangeopfamily, + &rangeopcintype, + &rangestrategy)) + elog(ERROR, "operator %u is not a valid ordering operator", + sortcl->sortop); + /* Record properties of sort ordering */ + wc->inRangeColl = exprCollation(sortkey); + wc->inRangeAsc = (rangestrategy == BTLessStrategyNumber); + wc->inRangeNullsFirst = sortcl->nulls_first; + } + + /* Per spec, GROUPS mode requires an ORDER BY clause */ + if (wc->frameOptions & FRAMEOPTION_GROUPS) + { + if (wc->orderClause == NIL) + ereport(ERROR, + (errcode(ERRCODE_WINDOWING_ERROR), + errmsg("GROUPS mode requires an ORDER BY clause"), + parser_errposition(pstate, windef->location))); + } + /* Process frame offset expressions */ wc->startOffset = transformFrameOffset(pstate, wc->frameOptions, + rangeopfamily, rangeopcintype, + &wc->startInRangeFunc, windef->startOffset); wc->endOffset = transformFrameOffset(pstate, wc->frameOptions, + rangeopfamily, rangeopcintype, + &wc->endInRangeFunc, windef->endOffset); wc->winref = winref; @@ -3080,12 +3118,11 @@ resolve_unique_index_expr(ParseState *pstate, InferClause *infer, } /* - * transformExpr() should have already rejected subqueries, - * aggregates, and window functions, based on the EXPR_KIND_ for an - * index expression. Expressions returning sets won't have been - * rejected, but don't bother doing so here; there should be no - * available expression unique index to match any such expression - * against anyway. + * transformExpr() will reject subqueries, aggregates, window + * functions, and SRFs, based on being passed + * EXPR_KIND_INDEX_EXPRESSION. So we needn't worry about those + * further ... not that they would match any available index + * expression anyway. */ pInfer->expr = transformExpr(pstate, parse, EXPR_KIND_INDEX_EXPRESSION); @@ -3184,9 +3221,26 @@ transformOnConflictArbiter(ParseState *pstate, pstate->p_namespace = save_namespace; + /* + * If the arbiter is specified by constraint name, get the constraint + * OID and mark the constrained columns as requiring SELECT privilege, + * in the same way as would have happened if the arbiter had been + * specified by explicit reference to the constraint's index columns. + */ if (infer->conname) - *constraint = get_relation_constraint_oid(RelationGetRelid(pstate->p_target_relation), - infer->conname, false); + { + Oid relid = RelationGetRelid(pstate->p_target_relation); + RangeTblEntry *rte = pstate->p_target_rangetblentry; + Bitmapset *conattnos; + + conattnos = get_relation_constraint_attnos(relid, infer->conname, + false, constraint); + + /* Make sure the rel as a whole is marked for SELECT access */ + rte->requiredPerms |= ACL_SELECT; + /* Mark the constrained columns as requiring SELECT access */ + rte->selectedCols = bms_add_members(rte->selectedCols, conattnos); + } } /* @@ -3492,13 +3546,24 @@ findWindowClause(List *wclist, const char *name) /* * transformFrameOffset * Process a window frame offset expression + * + * In RANGE mode, rangeopfamily is the sort opfamily for the input ORDER BY + * column, and rangeopcintype is the input data type the sort operator is + * registered with. We expect the in_range function to be registered with + * that same type. (In binary-compatible cases, it might be different from + * the input column's actual type, so we can't use that for the lookups.) + * We'll return the OID of the in_range function to *inRangeFunc. */ static Node * -transformFrameOffset(ParseState *pstate, int frameOptions, Node *clause) +transformFrameOffset(ParseState *pstate, int frameOptions, + Oid rangeopfamily, Oid rangeopcintype, Oid *inRangeFunc, + Node *clause) { const char *constructName = NULL; Node *node; + *inRangeFunc = InvalidOid; /* default result */ + /* Quick exit if no offset expression */ if (clause == NULL) return NULL; @@ -3516,16 +3581,105 @@ transformFrameOffset(ParseState *pstate, int frameOptions, Node *clause) } else if (frameOptions & FRAMEOPTION_RANGE) { + /* + * We must look up the in_range support function that's to be used, + * possibly choosing one of several, and coerce the "offset" value to + * the appropriate input type. + */ + Oid nodeType; + Oid preferredType; + int nfuncs = 0; + int nmatches = 0; + Oid selectedType = InvalidOid; + Oid selectedFunc = InvalidOid; + CatCList *proclist; + int i; + /* Transform the raw expression tree */ node = transformExpr(pstate, clause, EXPR_KIND_WINDOW_FRAME_RANGE); + nodeType = exprType(node); /* - * this needs a lot of thought to decide how to support in the context - * of Postgres' extensible datatype framework + * If there are multiple candidates, we'll prefer the one that exactly + * matches nodeType; or if nodeType is as yet unknown, prefer the one + * that exactly matches the sort column type. (The second rule is + * like what we do for "known_type operator unknown".) */ + preferredType = (nodeType != UNKNOWNOID) ? nodeType : rangeopcintype; + + /* Find the in_range support functions applicable to this case */ + proclist = SearchSysCacheList2(AMPROCNUM, + ObjectIdGetDatum(rangeopfamily), + ObjectIdGetDatum(rangeopcintype)); + for (i = 0; i < proclist->n_members; i++) + { + HeapTuple proctup = &proclist->members[i]->tuple; + Form_pg_amproc procform = (Form_pg_amproc) GETSTRUCT(proctup); + + /* The search will find all support proc types; ignore others */ + if (procform->amprocnum != BTINRANGE_PROC) + continue; + nfuncs++; + + /* Ignore function if given value can't be coerced to that type */ + if (!can_coerce_type(1, &nodeType, &procform->amprocrighttype, + COERCION_IMPLICIT)) + continue; + nmatches++; + + /* Remember preferred match, or any match if didn't find that */ + if (selectedType != preferredType) + { + selectedType = procform->amprocrighttype; + selectedFunc = procform->amproc; + } + } + ReleaseCatCacheList(proclist); + + /* + * Throw error if needed. It seems worth taking the trouble to + * distinguish "no support at all" from "you didn't match any + * available offset type". + */ + if (nfuncs == 0) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("RANGE with offset PRECEDING/FOLLOWING is not supported for column type %s", + format_type_be(rangeopcintype)), + parser_errposition(pstate, exprLocation(node)))); + if (nmatches == 0) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("RANGE with offset PRECEDING/FOLLOWING is not supported for column type %s and offset type %s", + format_type_be(rangeopcintype), + format_type_be(nodeType)), + errhint("Cast the offset value to an appropriate type."), + parser_errposition(pstate, exprLocation(node)))); + if (nmatches != 1 && selectedType != preferredType) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("RANGE with offset PRECEDING/FOLLOWING has multiple interpretations for column type %s and offset type %s", + format_type_be(rangeopcintype), + format_type_be(nodeType)), + errhint("Cast the offset value to the exact intended type."), + parser_errposition(pstate, exprLocation(node)))); + + /* OK, coerce the offset to the right type */ constructName = "RANGE"; - /* error was already thrown by gram.y, this is just a backstop */ - elog(ERROR, "window frame with value offset is not implemented"); + node = coerce_to_specific_type(pstate, node, + selectedType, constructName); + *inRangeFunc = selectedFunc; + } + else if (frameOptions & FRAMEOPTION_GROUPS) + { + /* Transform the raw expression tree */ + node = transformExpr(pstate, clause, EXPR_KIND_WINDOW_FRAME_GROUPS); + + /* + * Like LIMIT clause, simply coerce to int8 + */ + constructName = "GROUPS"; + node = coerce_to_specific_type(pstate, node, INT8OID, constructName); } else { diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c index 0bc7dba6a0..f4fc7b61e7 100644 --- a/src/backend/parser/parse_coerce.c +++ b/src/backend/parser/parse_coerce.c @@ -3,7 +3,7 @@ * parse_coerce.c * handle type coercions/conversions for parser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -17,7 +17,7 @@ #include "access/htup_details.h" #include "catalog/pg_cast.h" #include "catalog/pg_class.h" -#include "catalog/pg_inherits_fn.h" +#include "catalog/pg_inherits.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "nodes/makefuncs.h" @@ -34,15 +34,16 @@ static Node *coerce_type_typmod(Node *node, Oid targetTypeId, int32 targetTypMod, - CoercionForm cformat, int location, - bool isExplicit, bool hideInputCoercion); + CoercionContext ccontext, CoercionForm cformat, + int location, + bool hideInputCoercion); static void hide_coercion_node(Node *node); static Node *build_coercion_expression(Node *node, CoercionPathType pathtype, Oid funcId, Oid targetTypeId, int32 targetTypMod, - CoercionForm cformat, int location, - bool isExplicit); + CoercionContext ccontext, CoercionForm cformat, + int location); static Node *coerce_record_to_complex(ParseState *pstate, Node *node, Oid targetTypeId, CoercionContext ccontext, @@ -110,8 +111,7 @@ coerce_to_target_type(ParseState *pstate, Node *expr, Oid exprtype, */ result = coerce_type_typmod(result, targettype, targettypmod, - cformat, location, - (cformat != COERCE_IMPLICIT_CAST), + ccontext, cformat, location, (result != expr && !IsA(result, Const))); if (expr != origexpr) @@ -355,7 +355,8 @@ coerce_type(ParseState *pstate, Node *node, result = coerce_to_domain(result, baseTypeId, baseTypeMod, targetTypeId, - cformat, location, false, false); + ccontext, cformat, location, + false); ReleaseSysCache(baseType); @@ -369,11 +370,11 @@ coerce_type(ParseState *pstate, Node *node, * transformed node (very possibly the same Param node), or return * NULL to indicate we should proceed with normal coercion. */ - result = (*pstate->p_coerce_param_hook) (pstate, - (Param *) node, - targetTypeId, - targetTypeMod, - location); + result = pstate->p_coerce_param_hook(pstate, + (Param *) node, + targetTypeId, + targetTypeMod, + location); if (result) return result; } @@ -417,20 +418,17 @@ coerce_type(ParseState *pstate, Node *node, result = build_coercion_expression(node, pathtype, funcId, baseTypeId, baseTypeMod, - cformat, location, - (cformat != COERCE_IMPLICIT_CAST)); + ccontext, cformat, location); /* * If domain, coerce to the domain type and relabel with domain - * type ID. We can skip the internal length-coercion step if the - * selected coercion function was a type-and-length coercion. + * type ID, hiding the previous coercion node. */ if (targetTypeId != baseTypeId) result = coerce_to_domain(result, baseTypeId, baseTypeMod, targetTypeId, - cformat, location, true, - exprIsLengthCoercion(result, - NULL)); + ccontext, cformat, location, + true); } else { @@ -444,7 +442,8 @@ coerce_type(ParseState *pstate, Node *node, * then we won't need a RelabelType node. */ result = coerce_to_domain(node, InvalidOid, -1, targetTypeId, - cformat, location, false, false); + ccontext, cformat, location, + false); if (result == node) { /* @@ -500,9 +499,26 @@ coerce_type(ParseState *pstate, Node *node, * Input class type is a subclass of target, so generate an * appropriate runtime conversion (removing unneeded columns and * possibly rearranging the ones that are wanted). + * + * We will also get here when the input is a domain over a subclass of + * the target type. To keep life simple for the executor, we define + * ConvertRowtypeExpr as only working between regular composite types; + * therefore, in such cases insert a RelabelType to smash the input + * expression down to its base type. */ + Oid baseTypeId = getBaseType(inputTypeId); ConvertRowtypeExpr *r = makeNode(ConvertRowtypeExpr); + if (baseTypeId != inputTypeId) + { + RelabelType *rt = makeRelabelType((Expr *) node, + baseTypeId, -1, + InvalidOid, + COERCE_IMPLICIT_CAST); + + rt->location = location; + node = (Node *) rt; + } r->arg = (Expr *) node; r->resulttype = targetTypeId; r->convertformat = cformat; @@ -524,7 +540,7 @@ coerce_type(ParseState *pstate, Node *node, * as this determines the set of available casts. */ bool -can_coerce_type(int nargs, Oid *input_typeids, Oid *target_typeids, +can_coerce_type(int nargs, const Oid *input_typeids, const Oid *target_typeids, CoercionContext ccontext) { bool have_generics = false; @@ -636,19 +652,17 @@ can_coerce_type(int nargs, Oid *input_typeids, Oid *target_typeids, * 'baseTypeMod': base type typmod of domain, if known (pass -1 if caller * has not bothered to look this up) * 'typeId': target type to coerce to - * 'cformat': coercion format + * 'ccontext': context indicator to control coercions + * 'cformat': coercion display format * 'location': coercion request location * 'hideInputCoercion': if true, hide the input coercion under this one. - * 'lengthCoercionDone': if true, caller already accounted for length, - * ie the input is already of baseTypMod as well as baseTypeId. * * If the target type isn't a domain, the given 'arg' is returned as-is. */ Node * coerce_to_domain(Node *arg, Oid baseTypeId, int32 baseTypeMod, Oid typeId, - CoercionForm cformat, int location, - bool hideInputCoercion, - bool lengthCoercionDone) + CoercionContext ccontext, CoercionForm cformat, int location, + bool hideInputCoercion) { CoerceToDomain *result; @@ -677,14 +691,9 @@ coerce_to_domain(Node *arg, Oid baseTypeId, int32 baseTypeMod, Oid typeId, * would be safe to do anyway, without lots of knowledge about what the * base type thinks the typmod means. */ - if (!lengthCoercionDone) - { - if (baseTypeMod >= 0) - arg = coerce_type_typmod(arg, baseTypeId, baseTypeMod, - COERCE_IMPLICIT_CAST, location, - (cformat != COERCE_IMPLICIT_CAST), - false); - } + arg = coerce_type_typmod(arg, baseTypeId, baseTypeMod, + ccontext, COERCE_IMPLICIT_CAST, location, + false); /* * Now build the domain coercion node. This represents run-time checking @@ -714,11 +723,14 @@ coerce_to_domain(Node *arg, Oid baseTypeId, int32 baseTypeMod, Oid typeId, * The caller must have already ensured that the value is of the correct * type, typically by applying coerce_type. * - * cformat determines the display properties of the generated node (if any), - * while isExplicit may affect semantics. If hideInputCoercion is true - * *and* we generate a node, the input node is forced to IMPLICIT display - * form, so that only the typmod coercion node will be visible when - * displaying the expression. + * ccontext may affect semantics, depending on whether the length coercion + * function pays attention to the isExplicit flag it's passed. + * + * cformat determines the display properties of the generated node (if any). + * + * If hideInputCoercion is true *and* we generate a node, the input node is + * forced to IMPLICIT display form, so that only the typmod coercion node will + * be visible when displaying the expression. * * NOTE: this does not need to work on domain types, because any typmod * coercion for a domain is considered to be part of the type coercion @@ -726,8 +738,9 @@ coerce_to_domain(Node *arg, Oid baseTypeId, int32 baseTypeMod, Oid typeId, */ static Node * coerce_type_typmod(Node *node, Oid targetTypeId, int32 targetTypMod, - CoercionForm cformat, int location, - bool isExplicit, bool hideInputCoercion) + CoercionContext ccontext, CoercionForm cformat, + int location, + bool hideInputCoercion) { CoercionPathType pathtype; Oid funcId; @@ -749,8 +762,7 @@ coerce_type_typmod(Node *node, Oid targetTypeId, int32 targetTypMod, node = build_coercion_expression(node, pathtype, funcId, targetTypeId, targetTypMod, - cformat, location, - isExplicit); + ccontext, cformat, location); } return node; @@ -799,8 +811,8 @@ build_coercion_expression(Node *node, CoercionPathType pathtype, Oid funcId, Oid targetTypeId, int32 targetTypMod, - CoercionForm cformat, int location, - bool isExplicit) + CoercionContext ccontext, CoercionForm cformat, + int location) { int nargs = 0; @@ -822,8 +834,7 @@ build_coercion_expression(Node *node, */ /* Assert(targetTypeId == procstruct->prorettype); */ Assert(!procstruct->proretset); - Assert(!procstruct->proisagg); - Assert(!procstruct->proiswindow); + Assert(procstruct->prokind == PROKIND_FUNCTION); nargs = procstruct->pronargs; Assert(nargs >= 1 && nargs <= 3); /* Assert(procstruct->proargtypes.values[0] == exprType(node)); */ @@ -865,7 +876,7 @@ build_coercion_expression(Node *node, -1, InvalidOid, sizeof(bool), - BoolGetDatum(isExplicit), + BoolGetDatum(ccontext == COERCION_EXPLICIT), false, true); @@ -881,19 +892,57 @@ build_coercion_expression(Node *node, { /* We need to build an ArrayCoerceExpr */ ArrayCoerceExpr *acoerce = makeNode(ArrayCoerceExpr); + CaseTestExpr *ctest = makeNode(CaseTestExpr); + Oid sourceBaseTypeId; + int32 sourceBaseTypeMod; + Oid targetElementType; + Node *elemexpr; + + /* + * Look through any domain over the source array type. Note we don't + * expect that the target type is a domain; it must be a plain array. + * (To get to a domain target type, we'll do coerce_to_domain later.) + */ + sourceBaseTypeMod = exprTypmod(node); + sourceBaseTypeId = getBaseTypeAndTypmod(exprType(node), + &sourceBaseTypeMod); + + /* + * Set up a CaseTestExpr representing one element of the source array. + * This is an abuse of CaseTestExpr, but it's OK as long as there + * can't be any CaseExpr or ArrayCoerceExpr within the completed + * elemexpr. + */ + ctest->typeId = get_element_type(sourceBaseTypeId); + Assert(OidIsValid(ctest->typeId)); + ctest->typeMod = sourceBaseTypeMod; + ctest->collation = InvalidOid; /* Assume coercions don't care */ + + /* And coerce it to the target element type */ + targetElementType = get_element_type(targetTypeId); + Assert(OidIsValid(targetElementType)); + + elemexpr = coerce_to_target_type(NULL, + (Node *) ctest, + ctest->typeId, + targetElementType, + targetTypMod, + ccontext, + cformat, + location); + if (elemexpr == NULL) /* shouldn't happen */ + elog(ERROR, "failed to coerce array element type as expected"); acoerce->arg = (Expr *) node; - acoerce->elemfuncid = funcId; + acoerce->elemexpr = (Expr *) elemexpr; acoerce->resulttype = targetTypeId; /* - * Label the output as having a particular typmod only if we are - * really invoking a length-coercion function, ie one with more than - * one argument. + * Label the output as having a particular element typmod only if we + * ended up with a per-element expression that is labeled that way. */ - acoerce->resulttypmod = (nargs >= 2) ? targetTypMod : -1; + acoerce->resulttypmod = exprTypmod(elemexpr); /* resultcollid will be set by parse_collate.c */ - acoerce->isExplicit = isExplicit; acoerce->coerceformat = cformat; acoerce->location = location; @@ -938,6 +987,8 @@ coerce_record_to_complex(ParseState *pstate, Node *node, int location) { RowExpr *rowexpr; + Oid baseTypeId; + int32 baseTypeMod = -1; TupleDesc tupdesc; List *args = NIL; List *newargs; @@ -973,7 +1024,14 @@ coerce_record_to_complex(ParseState *pstate, Node *node, format_type_be(targetTypeId)), parser_coercion_errposition(pstate, location, node))); - tupdesc = lookup_rowtype_tupdesc(targetTypeId, -1); + /* + * Look up the composite type, accounting for possibility that what we are + * given is a domain over composite. + */ + baseTypeId = getBaseTypeAndTypmod(targetTypeId, &baseTypeMod); + tupdesc = lookup_rowtype_tupdesc(baseTypeId, baseTypeMod); + + /* Process the fields */ newargs = NIL; ucolno = 1; arg = list_head(args); @@ -982,9 +1040,10 @@ coerce_record_to_complex(ParseState *pstate, Node *node, Node *expr; Node *cexpr; Oid exprtype; + Form_pg_attribute attr = TupleDescAttr(tupdesc, i); /* Fill in NULLs for dropped columns in rowtype */ - if (tupdesc->attrs[i]->attisdropped) + if (attr->attisdropped) { /* * can't use atttypid here, but it doesn't really matter what type @@ -1008,8 +1067,8 @@ coerce_record_to_complex(ParseState *pstate, Node *node, cexpr = coerce_to_target_type(pstate, expr, exprtype, - tupdesc->attrs[i]->atttypid, - tupdesc->attrs[i]->atttypmod, + attr->atttypid, + attr->atttypmod, ccontext, COERCE_IMPLICIT_CAST, -1); @@ -1021,7 +1080,7 @@ coerce_record_to_complex(ParseState *pstate, Node *node, format_type_be(targetTypeId)), errdetail("Cannot cast type %s to %s in column %d.", format_type_be(exprtype), - format_type_be(tupdesc->attrs[i]->atttypid), + format_type_be(attr->atttypid), ucolno), parser_coercion_errposition(pstate, location, expr))); newargs = lappend(newargs, cexpr); @@ -1041,10 +1100,22 @@ coerce_record_to_complex(ParseState *pstate, Node *node, rowexpr = makeNode(RowExpr); rowexpr->args = newargs; - rowexpr->row_typeid = targetTypeId; + rowexpr->row_typeid = baseTypeId; rowexpr->row_format = cformat; rowexpr->colnames = NIL; /* not needed for named target type */ rowexpr->location = location; + + /* If target is a domain, apply constraints */ + if (baseTypeId != targetTypeId) + { + rowexpr->row_format = COERCE_IMPLICIT_CAST; + return coerce_to_domain((Node *) rowexpr, + baseTypeId, baseTypeMod, + targetTypeId, + ccontext, cformat, location, + false); + } + return (Node *) rowexpr; } @@ -1398,11 +1469,11 @@ coerce_to_common_type(ParseState *pstate, Node *node, * that is, so long as there is no use of ANYELEMENT. This is mostly for * backwards compatibility with the pre-7.4 behavior of ANYARRAY. * - * We do not ereport here, but just return FALSE if a rule is violated. + * We do not ereport here, but just return false if a rule is violated. */ bool -check_generic_type_consistency(Oid *actual_arg_types, - Oid *declared_arg_types, +check_generic_type_consistency(const Oid *actual_arg_types, + const Oid *declared_arg_types, int nargs) { int j; @@ -1598,7 +1669,7 @@ check_generic_type_consistency(Oid *actual_arg_types, * assume that successive inputs are of the same actual element type. */ Oid -enforce_generic_type_consistency(Oid *actual_arg_types, +enforce_generic_type_consistency(const Oid *actual_arg_types, Oid *declared_arg_types, int nargs, Oid rettype, @@ -2021,7 +2092,7 @@ TypeCategory(Oid type) /* IsPreferredType() * Check if this type is a preferred type for the given category. * - * If category is TYPCATEGORY_INVALID, then we'll return TRUE for preferred + * If category is TYPCATEGORY_INVALID, then we'll return true for preferred * types of any category; otherwise, only for preferred types of that * category. */ @@ -2147,8 +2218,7 @@ IsBinaryCoercible(Oid srctype, Oid targettype) * COERCION_PATH_RELABELTYPE: binary-compatible cast, no function needed * *funcid is set to InvalidOid * COERCION_PATH_ARRAYCOERCE: need an ArrayCoerceExpr node - * *funcid is set to the element cast function, or InvalidOid - * if the array elements are binary-compatible + * *funcid is set to InvalidOid * COERCION_PATH_COERCEVIAIO: need a CoerceViaIO node * *funcid is set to InvalidOid * @@ -2234,11 +2304,8 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId, { /* * If there's no pg_cast entry, perhaps we are dealing with a pair of - * array types. If so, and if the element types have a suitable cast, - * report that we can coerce with an ArrayCoerceExpr. - * - * Note that the source type can be a domain over array, but not the - * target, because ArrayCoerceExpr won't check domain constraints. + * array types. If so, and if their element types have a conversion + * pathway, report that we can coerce with an ArrayCoerceExpr. * * Hack: disallow coercions to oidvector and int2vector, which * otherwise tend to capture coercions that should go to "real" array @@ -2253,7 +2320,7 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId, Oid sourceElem; if ((targetElem = get_element_type(targetTypeId)) != InvalidOid && - (sourceElem = get_base_element_type(sourceTypeId)) != InvalidOid) + (sourceElem = get_element_type(sourceTypeId)) != InvalidOid) { CoercionPathType elempathtype; Oid elemfuncid; @@ -2262,14 +2329,9 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId, sourceElem, ccontext, &elemfuncid); - if (elempathtype != COERCION_PATH_NONE && - elempathtype != COERCION_PATH_ARRAYCOERCE) + if (elempathtype != COERCION_PATH_NONE) { - *funcid = elemfuncid; - if (elempathtype == COERCION_PATH_COERCEVIAIO) - result = COERCION_PATH_COERCEVIAIO; - else - result = COERCION_PATH_ARRAYCOERCE; + result = COERCION_PATH_ARRAYCOERCE; } } } @@ -2310,7 +2372,9 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId, * If the given type is a varlena array type, we do not look for a coercion * function associated directly with the array type, but instead look for * one associated with the element type. An ArrayCoerceExpr node must be - * used to apply such a function. + * used to apply such a function. (Note: currently, it's pointless to + * return the funcid in this case, because it'll just get looked up again + * in the recursive construction of the ArrayCoerceExpr's elemexpr.) * * We use the same result enum as find_coercion_pathway, but the only possible * result codes are: @@ -2379,13 +2443,13 @@ is_complex_array(Oid typid) /* * Check whether reltypeId is the row type of a typed table of type - * reloftypeId. (This is conceptually similar to the subtype - * relationship checked by typeInheritsFrom().) + * reloftypeId, or is a domain over such a row type. (This is conceptually + * similar to the subtype relationship checked by typeInheritsFrom().) */ static bool typeIsOfTypedTable(Oid reltypeId, Oid reloftypeId) { - Oid relid = typeidTypeRelid(reltypeId); + Oid relid = typeOrDomainTypeRelid(reltypeId); bool result = false; if (relid) diff --git a/src/backend/parser/parse_collate.c b/src/backend/parser/parse_collate.c index 0d106c4c19..6d34245083 100644 --- a/src/backend/parser/parse_collate.c +++ b/src/backend/parser/parse_collate.c @@ -29,7 +29,7 @@ * at runtime. If we knew exactly which functions require collation * information, we could throw those errors at parse time instead. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/parser/parse_cte.c b/src/backend/parser/parse_cte.c index 5160fdb0e0..d28c421b6f 100644 --- a/src/backend/parser/parse_cte.c +++ b/src/backend/parser/parse_cte.c @@ -3,7 +3,7 @@ * parse_cte.c * handle CTEs (common table expressions) in parser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/parser/parse_enr.c b/src/backend/parser/parse_enr.c index 1cfcf65a51..069249b732 100644 --- a/src/backend/parser/parse_enr.c +++ b/src/backend/parser/parse_enr.c @@ -3,7 +3,7 @@ * parse_enr.c * parser support routines dealing with ephemeral named relations * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c index 6d8cb07766..385e54a9b6 100644 --- a/src/backend/parser/parse_expr.c +++ b/src/backend/parser/parse_expr.c @@ -3,7 +3,7 @@ * parse_expr.c * handle expressions in parser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -386,7 +386,7 @@ transformExprRecurse(ParseState *pstate, Node *expr) * selection from an arbitrary node needs it.) */ static void -unknown_attribute(ParseState *pstate, Node *relref, char *attname, +unknown_attribute(ParseState *pstate, Node *relref, const char *attname, int location) { RangeTblEntry *rte; @@ -480,6 +480,7 @@ transformIndirection(ParseState *pstate, A_Indirection *ind) list_make1(result), last_srf, NULL, + false, location); if (newresult == NULL) unknown_attribute(pstate, result, strVal(n), location); @@ -527,7 +528,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref) */ if (pstate->p_pre_columnref_hook != NULL) { - node = (*pstate->p_pre_columnref_hook) (pstate, cref); + node = pstate->p_pre_columnref_hook(pstate, cref); if (node != NULL) return node; } @@ -629,6 +630,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref) list_make1(node), pstate->p_last_srf, NULL, + false, cref->location); } break; @@ -676,6 +678,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref) list_make1(node), pstate->p_last_srf, NULL, + false, cref->location); } break; @@ -736,6 +739,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref) list_make1(node), pstate->p_last_srf, NULL, + false, cref->location); } break; @@ -758,7 +762,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref) { Node *hookresult; - hookresult = (*pstate->p_post_columnref_hook) (pstate, cref, node); + hookresult = pstate->p_post_columnref_hook(pstate, cref, node); if (node == NULL) node = hookresult; else if (hookresult != NULL) @@ -813,7 +817,7 @@ transformParamRef(ParseState *pstate, ParamRef *pref) * call it. If not, or if the hook returns NULL, throw a generic error. */ if (pstate->p_paramref_hook != NULL) - result = (*pstate->p_paramref_hook) (pstate, pref); + result = pstate->p_paramref_hook(pstate, pref); else result = NULL; @@ -1477,6 +1481,7 @@ transformFuncCall(ParseState *pstate, FuncCall *fn) targs, last_srf, fn, + false, fn->location); } @@ -1800,6 +1805,7 @@ transformSubLink(ParseState *pstate, SubLink *sublink) case EXPR_KIND_WINDOW_ORDER: case EXPR_KIND_WINDOW_FRAME_RANGE: case EXPR_KIND_WINDOW_FRAME_ROWS: + case EXPR_KIND_WINDOW_FRAME_GROUPS: case EXPR_KIND_SELECT_TARGET: case EXPR_KIND_INSERT_TARGET: case EXPR_KIND_UPDATE_SOURCE: @@ -1840,6 +1846,9 @@ transformSubLink(ParseState *pstate, SubLink *sublink) case EXPR_KIND_PARTITION_EXPRESSION: err = _("cannot use subquery in partition key expression"); break; + case EXPR_KIND_CALL_ARGUMENT: + err = _("cannot use subquery in CALL argument"); + break; /* * There is intentionally no default: case here, so that the @@ -2585,9 +2594,9 @@ transformCurrentOfExpr(ParseState *pstate, CurrentOfExpr *cexpr) /* See if there is a translation available from a parser hook */ if (pstate->p_pre_columnref_hook != NULL) - node = (*pstate->p_pre_columnref_hook) (pstate, cref); + node = pstate->p_pre_columnref_hook(pstate, cref); if (node == NULL && pstate->p_post_columnref_hook != NULL) - node = (*pstate->p_post_columnref_hook) (pstate, cref, NULL); + node = pstate->p_post_columnref_hook(pstate, cref, NULL); /* * XXX Should we throw an error if we get a translation that isn't a @@ -3422,6 +3431,8 @@ ParseExprKindName(ParseExprKind exprKind) return "window RANGE"; case EXPR_KIND_WINDOW_FRAME_ROWS: return "window ROWS"; + case EXPR_KIND_WINDOW_FRAME_GROUPS: + return "window GROUPS"; case EXPR_KIND_SELECT_TARGET: return "SELECT"; case EXPR_KIND_INSERT_TARGET: @@ -3462,6 +3473,8 @@ ParseExprKindName(ParseExprKind exprKind) return "WHEN"; case EXPR_KIND_PARTITION_EXPRESSION: return "PARTITION BY"; + case EXPR_KIND_CALL_ARGUMENT: + return "CALL"; /* * There is intentionally no default: case here, so that the diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c index 8487edaa95..44257154b8 100644 --- a/src/backend/parser/parse_func.c +++ b/src/backend/parser/parse_func.c @@ -3,7 +3,7 @@ * parse_func.c * handle function calls in parser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -39,7 +39,7 @@ static void unify_hypothetical_args(ParseState *pstate, List *fargs, int numAggregatedArgs, Oid *actual_arg_types, Oid *declared_arg_types); static Oid FuncNameAsType(List *funcname); -static Node *ParseComplexProjection(ParseState *pstate, char *funcname, +static Node *ParseComplexProjection(ParseState *pstate, const char *funcname, Node *first_arg, int location); @@ -49,15 +49,17 @@ static Node *ParseComplexProjection(ParseState *pstate, char *funcname, * For historical reasons, Postgres tries to treat the notations tab.col * and col(tab) as equivalent: if a single-argument function call has an * argument of complex type and the (unqualified) function name matches - * any attribute of the type, we take it as a column projection. Conversely - * a function of a single complex-type argument can be written like a - * column reference, allowing functions to act like computed columns. + * any attribute of the type, we can interpret it as a column projection. + * Conversely a function of a single complex-type argument can be written + * like a column reference, allowing functions to act like computed columns. + * + * If both interpretations are possible, we prefer the one matching the + * syntactic form, but otherwise the form does not matter. * * Hence, both cases come through here. If fn is null, we're dealing with - * column syntax not function syntax, but in principle that should not - * affect the lookup behavior, only which error messages we deliver. - * The FuncCall struct is needed however to carry various decoration that - * applies to aggregate and window functions. + * column syntax not function syntax. In the function-syntax case, + * the FuncCall struct is needed to carry various decoration that applies + * to aggregate and window functions. * * Also, when fn is null, we return NULL on failure rather than * reporting a no-such-function error. @@ -68,10 +70,13 @@ static Node *ParseComplexProjection(ParseState *pstate, char *funcname, * last_srf should be a copy of pstate->p_last_srf from just before we * started transforming fargs. If the caller knows that fargs couldn't * contain any SRF calls, last_srf can just be pstate->p_last_srf. + * + * proc_call is true if we are considering a CALL statement, so that the + * name must resolve to a procedure name, not anything else. */ Node * ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, - Node *last_srf, FuncCall *fn, int location) + Node *last_srf, FuncCall *fn, bool proc_call, int location) { bool is_column = (fn == NULL); List *agg_order = (fn ? fn->agg_order : NIL); @@ -81,6 +86,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, bool agg_distinct = (fn ? fn->agg_distinct : false); bool func_variadic = (fn ? fn->func_variadic : false); WindowDef *over = (fn ? fn->over : NULL); + bool could_be_projection; Oid rettype; Oid funcid; ListCell *l; @@ -199,35 +205,39 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, } /* - * Check for column projection: if function has one argument, and that - * argument is of complex type, and function name is not qualified, then - * the "function call" could be a projection. We also check that there - * wasn't any aggregate or variadic decoration, nor an argument name. + * Decide whether it's legitimate to consider the construct to be a column + * projection. For that, there has to be a single argument of complex + * type, the function name must not be qualified, and there cannot be any + * syntactic decoration that'd require it to be a function (such as + * aggregate or variadic decoration, or named arguments). */ - if (nargs == 1 && agg_order == NIL && agg_filter == NULL && !agg_star && - !agg_distinct && over == NULL && !func_variadic && argnames == NIL && - list_length(funcname) == 1) - { - Oid argtype = actual_arg_types[0]; + could_be_projection = (nargs == 1 && !proc_call && + agg_order == NIL && agg_filter == NULL && + !agg_star && !agg_distinct && over == NULL && + !func_variadic && argnames == NIL && + list_length(funcname) == 1 && + (actual_arg_types[0] == RECORDOID || + ISCOMPLEX(actual_arg_types[0]))); - if (argtype == RECORDOID || ISCOMPLEX(argtype)) - { - retval = ParseComplexProjection(pstate, - strVal(linitial(funcname)), - first_arg, - location); - if (retval) - return retval; + /* + * If it's column syntax, check for column projection case first. + */ + if (could_be_projection && is_column) + { + retval = ParseComplexProjection(pstate, + strVal(linitial(funcname)), + first_arg, + location); + if (retval) + return retval; - /* - * If ParseComplexProjection doesn't recognize it as a projection, - * just press on. - */ - } + /* + * If ParseComplexProjection doesn't recognize it as a projection, + * just press on. + */ } /* - * Okay, it's not a column projection, so it must really be a function. * func_get_detail looks up the function in the catalogs, does * disambiguation for polymorphic functions, handles inheritance, and * returns the funcid and type and set or singleton status of the @@ -253,21 +263,42 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, cancel_parser_errposition_callback(&pcbstate); - if (fdresult == FUNCDETAIL_COERCION) - { - /* - * We interpreted it as a type coercion. coerce_type can handle these - * cases, so why duplicate code... - */ - return coerce_type(pstate, linitial(fargs), - actual_arg_types[0], rettype, -1, - COERCION_EXPLICIT, COERCE_EXPLICIT_CALL, location); - } - else if (fdresult == FUNCDETAIL_NORMAL) + /* + * Check for various wrong-kind-of-routine cases. + */ + + /* If this is a CALL, reject things that aren't procedures */ + if (proc_call && + (fdresult == FUNCDETAIL_NORMAL || + fdresult == FUNCDETAIL_AGGREGATE || + fdresult == FUNCDETAIL_WINDOWFUNC || + fdresult == FUNCDETAIL_COERCION)) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("%s is not a procedure", + func_signature_string(funcname, nargs, + argnames, + actual_arg_types)), + errhint("To call a function, use SELECT."), + parser_errposition(pstate, location))); + /* Conversely, if not a CALL, reject procedures */ + if (fdresult == FUNCDETAIL_PROCEDURE && !proc_call) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("%s is a procedure", + func_signature_string(funcname, nargs, + argnames, + actual_arg_types)), + errhint("To call a procedure, use CALL."), + parser_errposition(pstate, location))); + + if (fdresult == FUNCDETAIL_NORMAL || + fdresult == FUNCDETAIL_PROCEDURE || + fdresult == FUNCDETAIL_COERCION) { /* - * Normal function found; was there anything indicating it must be an - * aggregate? + * In these cases, complain if there was anything indicating it must + * be an aggregate or window function. */ if (agg_star) ereport(ERROR, @@ -307,6 +338,14 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, NameListToString(funcname)), parser_errposition(pstate, location))); } + + /* + * So far so good, so do some fdresult-type-specific processing. + */ + if (fdresult == FUNCDETAIL_NORMAL || fdresult == FUNCDETAIL_PROCEDURE) + { + /* Nothing special to do for these cases. */ + } else if (fdresult == FUNCDETAIL_AGGREGATE) { /* @@ -481,21 +520,38 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, NameListToString(funcname)), parser_errposition(pstate, location))); } - else + else if (fdresult == FUNCDETAIL_COERCION) { /* - * Oops. Time to die. - * - * If we are dealing with the attribute notation rel.function, let the - * caller handle failure. + * We interpreted it as a type coercion. coerce_type can handle these + * cases, so why duplicate code... + */ + return coerce_type(pstate, linitial(fargs), + actual_arg_types[0], rettype, -1, + COERCION_EXPLICIT, COERCE_EXPLICIT_CALL, location); + } + else if (fdresult == FUNCDETAIL_MULTIPLE) + { + /* + * We found multiple possible functional matches. If we are dealing + * with attribute notation, return failure, letting the caller report + * "no such column" (we already determined there wasn't one). If + * dealing with function notation, report "ambiguous function", + * regardless of whether there's also a column by this name. */ if (is_column) return NULL; - /* - * Else generate a detailed complaint for a function - */ - if (fdresult == FUNCDETAIL_MULTIPLE) + if (proc_call) + ereport(ERROR, + (errcode(ERRCODE_AMBIGUOUS_FUNCTION), + errmsg("procedure %s is not unique", + func_signature_string(funcname, nargs, argnames, + actual_arg_types)), + errhint("Could not choose a best candidate procedure. " + "You might need to add explicit type casts."), + parser_errposition(pstate, location))); + else ereport(ERROR, (errcode(ERRCODE_AMBIGUOUS_FUNCTION), errmsg("function %s is not unique", @@ -504,7 +560,35 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, errhint("Could not choose a best candidate function. " "You might need to add explicit type casts."), parser_errposition(pstate, location))); - else if (list_length(agg_order) > 1 && !agg_within_group) + } + else + { + /* + * Not found as a function. If we are dealing with attribute + * notation, return failure, letting the caller report "no such + * column" (we already determined there wasn't one). + */ + if (is_column) + return NULL; + + /* + * Check for column projection interpretation, since we didn't before. + */ + if (could_be_projection) + { + retval = ParseComplexProjection(pstate, + strVal(linitial(funcname)), + first_arg, + location); + if (retval) + return retval; + } + + /* + * No function, and no column either. Since we're dealing with + * function notation, report "function does not exist". + */ + if (list_length(agg_order) > 1 && !agg_within_group) { /* It's agg(x, ORDER BY y,z) ... perhaps misplaced ORDER BY */ ereport(ERROR, @@ -517,6 +601,15 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, "after all regular arguments of the aggregate."), parser_errposition(pstate, location))); } + else if (proc_call) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmsg("procedure %s does not exist", + func_signature_string(funcname, nargs, argnames, + actual_arg_types)), + errhint("No procedure matches the given name and argument types. " + "You might need to add explicit type casts."), + parser_errposition(pstate, location))); else ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), @@ -635,7 +728,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, check_srf_call_placement(pstate, last_srf, location); /* build the appropriate output structure */ - if (fdresult == FUNCDETAIL_NORMAL) + if (fdresult == FUNCDETAIL_NORMAL || fdresult == FUNCDETAIL_PROCEDURE) { FuncExpr *funcexpr = makeNode(FuncExpr); @@ -1585,12 +1678,27 @@ func_get_detail(List *funcname, *argdefaults = defaults; } } - if (pform->proisagg) - result = FUNCDETAIL_AGGREGATE; - else if (pform->proiswindow) - result = FUNCDETAIL_WINDOWFUNC; - else - result = FUNCDETAIL_NORMAL; + + switch (pform->prokind) + { + case PROKIND_AGGREGATE: + result = FUNCDETAIL_AGGREGATE; + break; + case PROKIND_FUNCTION: + result = FUNCDETAIL_NORMAL; + break; + case PROKIND_PROCEDURE: + result = FUNCDETAIL_PROCEDURE; + break; + case PROKIND_WINDOW: + result = FUNCDETAIL_WINDOWFUNC; + break; + default: + elog(ERROR, "unrecognized prokind: %c", pform->prokind); + result = FUNCDETAIL_NORMAL; /* keep compiler quiet */ + break; + } + ReleaseSysCache(ftup); return result; } @@ -1790,7 +1898,7 @@ FuncNameAsType(List *funcname) * transformed expression tree. If not, return NULL. */ static Node * -ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg, +ParseComplexProjection(ParseState *pstate, const char *funcname, Node *first_arg, int location) { TupleDesc tupdesc; @@ -1819,22 +1927,23 @@ ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg, } /* - * Else do it the hard way with get_expr_result_type(). + * Else do it the hard way with get_expr_result_tupdesc(). * * If it's a Var of type RECORD, we have to work even harder: we have to - * find what the Var refers to, and pass that to get_expr_result_type. + * find what the Var refers to, and pass that to get_expr_result_tupdesc. * That task is handled by expandRecordVariable(). */ if (IsA(first_arg, Var) && ((Var *) first_arg)->vartype == RECORDOID) tupdesc = expandRecordVariable(pstate, (Var *) first_arg, 0); - else if (get_expr_result_type(first_arg, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + else + tupdesc = get_expr_result_tupdesc(first_arg, true); + if (!tupdesc) return NULL; /* unresolvable RECORD type */ - Assert(tupdesc); for (i = 0; i < tupdesc->natts; i++) { - Form_pg_attribute att = tupdesc->attrs[i]; + Form_pg_attribute att = TupleDescAttr(tupdesc, i); if (strcmp(funcname, NameStr(att->attname)) == 0 && !att->attisdropped) @@ -1983,16 +2092,28 @@ LookupFuncName(List *funcname, int nargs, const Oid *argtypes, bool noError) /* * LookupFuncWithArgs - * Like LookupFuncName, but the argument types are specified by a - * ObjectWithArgs node. + * + * Like LookupFuncName, but the argument types are specified by a + * ObjectWithArgs node. Also, this function can check whether the result is a + * function, procedure, or aggregate, based on the objtype argument. Pass + * OBJECT_ROUTINE to accept any of them. + * + * For historical reasons, we also accept aggregates when looking for a + * function. */ Oid -LookupFuncWithArgs(ObjectWithArgs *func, bool noError) +LookupFuncWithArgs(ObjectType objtype, ObjectWithArgs *func, bool noError) { Oid argoids[FUNC_MAX_ARGS]; int argcount; int i; ListCell *args_item; + Oid oid; + + Assert(objtype == OBJECT_AGGREGATE || + objtype == OBJECT_FUNCTION || + objtype == OBJECT_PROCEDURE || + objtype == OBJECT_ROUTINE); argcount = list_length(func->objargs); if (argcount > FUNC_MAX_ARGS) @@ -2012,90 +2133,100 @@ LookupFuncWithArgs(ObjectWithArgs *func, bool noError) args_item = lnext(args_item); } - return LookupFuncName(func->objname, func->args_unspecified ? -1 : argcount, argoids, noError); -} - -/* - * LookupAggWithArgs - * Find an aggregate function from a given ObjectWithArgs node. - * - * This is almost like LookupFuncWithArgs, but the error messages refer - * to aggregates rather than plain functions, and we verify that the found - * function really is an aggregate. - */ -Oid -LookupAggWithArgs(ObjectWithArgs *agg, bool noError) -{ - Oid argoids[FUNC_MAX_ARGS]; - int argcount; - int i; - ListCell *lc; - Oid oid; - HeapTuple ftup; - Form_pg_proc pform; - - argcount = list_length(agg->objargs); - if (argcount > FUNC_MAX_ARGS) - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg_plural("functions cannot have more than %d argument", - "functions cannot have more than %d arguments", - FUNC_MAX_ARGS, - FUNC_MAX_ARGS))); + /* + * When looking for a function or routine, we pass noError through to + * LookupFuncName and let it make any error messages. Otherwise, we make + * our own errors for the aggregate and procedure cases. + */ + oid = LookupFuncName(func->objname, func->args_unspecified ? -1 : argcount, argoids, + (objtype == OBJECT_FUNCTION || objtype == OBJECT_ROUTINE) ? noError : true); - i = 0; - foreach(lc, agg->objargs) + if (objtype == OBJECT_FUNCTION) { - TypeName *t = (TypeName *) lfirst(lc); - - argoids[i] = LookupTypeNameOid(NULL, t, noError); - i++; + /* Make sure it's a function, not a procedure */ + if (oid && get_func_prokind(oid) == PROKIND_PROCEDURE) + { + if (noError) + return InvalidOid; + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("%s is not a function", + func_signature_string(func->objname, argcount, + NIL, argoids)))); + } } - - oid = LookupFuncName(agg->objname, argcount, argoids, true); - - if (!OidIsValid(oid)) + else if (objtype == OBJECT_PROCEDURE) { - if (noError) - return InvalidOid; - if (argcount == 0) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("aggregate %s(*) does not exist", - NameListToString(agg->objname)))); - else + if (!OidIsValid(oid)) + { + if (noError) + return InvalidOid; + else if (func->args_unspecified) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmsg("could not find a procedure named \"%s\"", + NameListToString(func->objname)))); + else + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmsg("procedure %s does not exist", + func_signature_string(func->objname, argcount, + NIL, argoids)))); + } + + /* Make sure it's a procedure */ + if (get_func_prokind(oid) != PROKIND_PROCEDURE) + { + if (noError) + return InvalidOid; ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("aggregate %s does not exist", - func_signature_string(agg->objname, argcount, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("%s is not a procedure", + func_signature_string(func->objname, argcount, NIL, argoids)))); + } } - - /* Make sure it's an aggregate */ - ftup = SearchSysCache1(PROCOID, ObjectIdGetDatum(oid)); - if (!HeapTupleIsValid(ftup)) /* should not happen */ - elog(ERROR, "cache lookup failed for function %u", oid); - pform = (Form_pg_proc) GETSTRUCT(ftup); - - if (!pform->proisagg) + else if (objtype == OBJECT_AGGREGATE) { - ReleaseSysCache(ftup); - if (noError) - return InvalidOid; - /* we do not use the (*) notation for functions... */ - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("function %s is not an aggregate", - func_signature_string(agg->objname, argcount, - NIL, argoids)))); - } + if (!OidIsValid(oid)) + { + if (noError) + return InvalidOid; + else if (func->args_unspecified) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmsg("could not find an aggregate named \"%s\"", + NameListToString(func->objname)))); + else if (argcount == 0) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmsg("aggregate %s(*) does not exist", + NameListToString(func->objname)))); + else + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_FUNCTION), + errmsg("aggregate %s does not exist", + func_signature_string(func->objname, argcount, + NIL, argoids)))); + } - ReleaseSysCache(ftup); + /* Make sure it's an aggregate */ + if (get_func_prokind(oid) != PROKIND_AGGREGATE) + { + if (noError) + return InvalidOid; + /* we do not use the (*) notation for functions... */ + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("function %s is not an aggregate", + func_signature_string(func->objname, argcount, + NIL, argoids)))); + } + } return oid; } - /* * check_srf_call_placement * Verify that a set-returning function is called in a valid place, @@ -2173,6 +2304,7 @@ check_srf_call_placement(ParseState *pstate, Node *last_srf, int location) break; case EXPR_KIND_WINDOW_FRAME_RANGE: case EXPR_KIND_WINDOW_FRAME_ROWS: + case EXPR_KIND_WINDOW_FRAME_GROUPS: err = _("set-returning functions are not allowed in window definitions"); break; case EXPR_KIND_SELECT_TARGET: @@ -2235,6 +2367,9 @@ check_srf_call_placement(ParseState *pstate, Node *last_srf, int location) case EXPR_KIND_PARTITION_EXPRESSION: err = _("set-returning functions are not allowed in partition key expressions"); break; + case EXPR_KIND_CALL_ARGUMENT: + err = _("set-returning functions are not allowed in CALL arguments"); + break; /* * There is intentionally no default: case here, so that the diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c index 6dbad53a41..d2672882d7 100644 --- a/src/backend/parser/parse_node.c +++ b/src/backend/parser/parse_node.c @@ -3,7 +3,7 @@ * parse_node.c * various routines that make nodes for querytrees * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c index e9bf50243f..b279e1236a 100644 --- a/src/backend/parser/parse_oper.c +++ b/src/backend/parser/parse_oper.c @@ -3,7 +3,7 @@ * parse_oper.c * handle operator things for parser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -723,7 +723,10 @@ op_error(ParseState *pstate, List *op, char oprkind, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("operator does not exist: %s", op_signature_string(op, oprkind, arg1, arg2)), - errhint("No operator matches the given name and argument type(s). " + (!arg1 || !arg2) ? + errhint("No operator matches the given name and argument type. " + "You might need to add an explicit type cast.") : + errhint("No operator matches the given name and argument types. " "You might need to add explicit type casts."), parser_errposition(pstate, location))); } @@ -1020,7 +1023,7 @@ static HTAB *OprCacheHash = NULL; * make_oper_cache_key * Fill the lookup key struct given operator name and arg types. * - * Returns TRUE if successful, FALSE if the search_path overflowed + * Returns true if successful, false if the search_path overflowed * (hence no caching is possible). * * pstate/location are used only to report the error position; pass NULL/-1 diff --git a/src/backend/parser/parse_param.c b/src/backend/parser/parse_param.c index 3e04e8c4d1..454a3e07f7 100644 --- a/src/backend/parser/parse_param.c +++ b/src/backend/parser/parse_param.c @@ -12,7 +12,7 @@ * Note that other approaches to parameters are possible using the parser * hooks defined in ParseState. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c index 684a50d3df..66a7105b09 100644 --- a/src/backend/parser/parse_relation.c +++ b/src/backend/parser/parse_relation.c @@ -3,7 +3,7 @@ * parse_relation.c * parser support routines dealing with relations * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -28,6 +28,7 @@ #include "parser/parse_enr.h" #include "parser/parse_relation.h" #include "parser/parse_type.h" +#include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/rel.h" @@ -652,7 +653,7 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty, * for an approximate match and update fuzzystate accordingly. */ Node * -scanRTEForColumn(ParseState *pstate, RangeTblEntry *rte, char *colname, +scanRTEForColumn(ParseState *pstate, RangeTblEntry *rte, const char *colname, int location, int fuzzy_rte_penalty, FuzzyAttrMatchState *fuzzystate) { @@ -754,7 +755,7 @@ scanRTEForColumn(ParseState *pstate, RangeTblEntry *rte, char *colname, * If localonly is true, only names in the innermost query are considered. */ Node * -colNameToVar(ParseState *pstate, char *colname, bool localonly, +colNameToVar(ParseState *pstate, const char *colname, bool localonly, int location) { Node *result = NULL; @@ -828,7 +829,7 @@ colNameToVar(ParseState *pstate, char *colname, bool localonly, * and 'second' will contain the attribute number for the second match. */ static FuzzyAttrMatchState * -searchRangeTableForCol(ParseState *pstate, const char *alias, char *colname, +searchRangeTableForCol(ParseState *pstate, const char *alias, const char *colname, int location) { ParseState *orig_pstate = pstate; @@ -1052,7 +1053,7 @@ buildRelationAliases(TupleDesc tupdesc, Alias *alias, Alias *eref) for (varattno = 0; varattno < maxattrs; varattno++) { - Form_pg_attribute attr = tupdesc->attrs[varattno]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, varattno); Value *attrname; if (attr->attisdropped) @@ -1159,19 +1160,13 @@ parserOpenTable(ParseState *pstate, const RangeVar *relation, int lockmode) relation->schemaname, relation->relname))); else { - /* - * An unqualified name might be a named ephemeral relation. - */ - if (get_visible_ENR_metadata(pstate->p_queryEnv, relation->relname)) - rel = NULL; - /* * An unqualified name might have been meant as a reference to * some not-yet-in-scope CTE. The bare "does not exist" message * has proven remarkably unhelpful for figuring out such problems, * so we take pains to offer a specific hint. */ - else if (isFutureCTE(pstate, relation->relname)) + if (isFutureCTE(pstate, relation->relname)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("relation \"%s\" does not exist", @@ -1213,16 +1208,23 @@ addRangeTableEntry(ParseState *pstate, rte->rtekind = RTE_RELATION; rte->alias = alias; + /* + * Identify the type of lock we'll need on this relation. It's not the + * query's target table (that case is handled elsewhere), so we need + * either RowShareLock if it's locked by FOR UPDATE/SHARE, or plain + * AccessShareLock otherwise. + */ + lockmode = isLockedRefname(pstate, refname) ? RowShareLock : AccessShareLock; + /* * Get the rel's OID. This access also ensures that we have an up-to-date * relcache entry for the rel. Since this is typically the first access - * to a rel in a statement, be careful to get the right access level - * depending on whether we're doing SELECT FOR UPDATE/SHARE. + * to a rel in a statement, we must open the rel with the proper lockmode. */ - lockmode = isLockedRefname(pstate, refname) ? RowShareLock : AccessShareLock; rel = parserOpenTable(pstate, relation, lockmode); rte->relid = RelationGetRelid(rel); rte->relkind = rel->rd_rel->relkind; + rte->rellockmode = lockmode; /* * Build the list of effective column names using user-supplied aliases @@ -1268,10 +1270,20 @@ addRangeTableEntry(ParseState *pstate, * * This is just like addRangeTableEntry() except that it makes an RTE * given an already-open relation instead of a RangeVar reference. + * + * lockmode is the lock type required for query execution; it must be one + * of AccessShareLock, RowShareLock, or RowExclusiveLock depending on the + * RTE's role within the query. The caller must hold that lock mode + * or a stronger one. + * + * Note: properly, lockmode should be declared LOCKMODE not int, but that + * would require importing storage/lock.h into parse_relation.h. Since + * LOCKMODE is typedef'd as int anyway, that seems like overkill. */ RangeTblEntry * addRangeTableEntryForRelation(ParseState *pstate, Relation rel, + int lockmode, Alias *alias, bool inh, bool inFromCl) @@ -1281,10 +1293,16 @@ addRangeTableEntryForRelation(ParseState *pstate, Assert(pstate != NULL); + Assert(lockmode == AccessShareLock || + lockmode == RowShareLock || + lockmode == RowExclusiveLock); + Assert(CheckRelationLockedByMe(rel, lockmode, true)); + rte->rtekind = RTE_RELATION; rte->alias = alias; rte->relid = RelationGetRelid(rel); rte->relkind = rel->rd_rel->relkind; + rte->rellockmode = lockmode; /* * Build the list of effective column names using user-supplied aliases @@ -1341,7 +1359,6 @@ addRangeTableEntryForSubquery(ParseState *pstate, Assert(pstate != NULL); rte->rtekind = RTE_SUBQUERY; - rte->relid = InvalidOid; rte->subquery = subquery; rte->alias = alias; @@ -1502,7 +1519,8 @@ addRangeTableEntryForFunction(ParseState *pstate, parser_errposition(pstate, exprLocation(funcexpr)))); } - if (functypclass == TYPEFUNC_COMPOSITE) + if (functypclass == TYPEFUNC_COMPOSITE || + functypclass == TYPEFUNC_COMPOSITE_DOMAIN) { /* Composite data type, e.g. a table's row type */ Assert(tupdesc); @@ -2014,11 +2032,13 @@ addRangeTableEntryForENR(ParseState *pstate, /* * Build the list of effective column names using user-supplied aliases - * and/or actual column names. Also build the cannibalized fields. + * and/or actual column names. */ tupdesc = ENRMetadataGetTupDesc(enrmd); rte->eref = makeAlias(refname, NIL); buildRelationAliases(tupdesc, alias, rte->eref); + + /* Record additional data for ENR, including column type info */ rte->enrname = enrmd->name; rte->enrtuples = enrmd->enrtuples; rte->coltypes = NIL; @@ -2026,19 +2046,26 @@ addRangeTableEntryForENR(ParseState *pstate, rte->colcollations = NIL; for (attno = 1; attno <= tupdesc->natts; ++attno) { - if (tupdesc->attrs[attno - 1]->atttypid == InvalidOid && - !(tupdesc->attrs[attno - 1]->attisdropped)) - elog(ERROR, "atttypid was invalid for column which has not been dropped from \"%s\"", - rv->relname); - rte->coltypes = - lappend_oid(rte->coltypes, - tupdesc->attrs[attno - 1]->atttypid); - rte->coltypmods = - lappend_int(rte->coltypmods, - tupdesc->attrs[attno - 1]->atttypmod); - rte->colcollations = - lappend_oid(rte->colcollations, - tupdesc->attrs[attno - 1]->attcollation); + Form_pg_attribute att = TupleDescAttr(tupdesc, attno - 1); + + if (att->attisdropped) + { + /* Record zeroes for a dropped column */ + rte->coltypes = lappend_oid(rte->coltypes, InvalidOid); + rte->coltypmods = lappend_int(rte->coltypmods, 0); + rte->colcollations = lappend_oid(rte->colcollations, InvalidOid); + } + else + { + /* Let's just make sure we can tell this isn't dropped */ + if (att->atttypid == InvalidOid) + elog(ERROR, "atttypid is invalid for non-dropped column in \"%s\"", + rv->relname); + rte->coltypes = lappend_oid(rte->coltypes, att->atttypid); + rte->coltypmods = lappend_int(rte->coltypmods, att->atttypmod); + rte->colcollations = lappend_oid(rte->colcollations, + att->attcollation); + } } /* @@ -2153,8 +2180,8 @@ addRTEtoQuery(ParseState *pstate, RangeTblEntry *rte, * * This creates lists of an RTE's column names (aliases if provided, else * real names) and Vars for each column. Only user columns are considered. - * If include_dropped is FALSE then dropped columns are omitted from the - * results. If include_dropped is TRUE then empty strings and NULL constants + * If include_dropped is false then dropped columns are omitted from the + * results. If include_dropped is true then empty strings and NULL constants * (not Vars!) are returned for dropped columns. * * rtindex, sublevels_up, and location are the varno, varlevelsup, and location @@ -2201,13 +2228,22 @@ expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up, varattno++; Assert(varattno == te->resno); + /* + * In scenarios where columns have been added to a view + * since the outer query was originally parsed, there can + * be more items in the subquery tlist than the outer + * query expects. We should ignore such extra column(s) + * --- compare the behavior for composite-returning + * functions, in the RTE_FUNCTION case below. + */ + if (!aliasp_item) + break; + if (colnames) { - /* Assume there is one alias per target item */ char *label = strVal(lfirst(aliasp_item)); *colnames = lappend(*colnames, makeString(pstrdup(label))); - aliasp_item = lnext(aliasp_item); } if (colvars) @@ -2223,6 +2259,8 @@ expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up, *colvars = lappend(*colvars, varnode); } + + aliasp_item = lnext(aliasp_item); } } break; @@ -2242,7 +2280,8 @@ expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up, functypclass = get_expr_result_type(rtfunc->funcexpr, &funcrettype, &tupdesc); - if (functypclass == TYPEFUNC_COMPOSITE) + if (functypclass == TYPEFUNC_COMPOSITE || + functypclass == TYPEFUNC_COMPOSITE_DOMAIN) { /* Composite data type, e.g. a table's row type */ Assert(tupdesc); @@ -2417,7 +2456,7 @@ expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up, case RTE_CTE: case RTE_NAMEDTUPLESTORE: { - /* Tablefunc, Values or CTE RTE */ + /* Tablefunc, Values, CTE, or ENR RTE */ ListCell *aliasp_item = list_head(rte->eref->colnames); ListCell *lct; ListCell *lcm; @@ -2437,23 +2476,43 @@ expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up, if (colnames) { /* Assume there is one alias per output column */ - char *label = strVal(lfirst(aliasp_item)); + if (OidIsValid(coltype)) + { + char *label = strVal(lfirst(aliasp_item)); + + *colnames = lappend(*colnames, + makeString(pstrdup(label))); + } + else if (include_dropped) + *colnames = lappend(*colnames, + makeString(pstrdup(""))); - *colnames = lappend(*colnames, - makeString(pstrdup(label))); aliasp_item = lnext(aliasp_item); } if (colvars) { - Var *varnode; + if (OidIsValid(coltype)) + { + Var *varnode; - varnode = makeVar(rtindex, varattno, - coltype, coltypmod, colcoll, - sublevels_up); - varnode->location = location; + varnode = makeVar(rtindex, varattno, + coltype, coltypmod, colcoll, + sublevels_up); + varnode->location = location; - *colvars = lappend(*colvars, varnode); + *colvars = lappend(*colvars, varnode); + } + else if (include_dropped) + { + /* + * It doesn't really matter what type the Const + * claims to be. + */ + *colvars = lappend(*colvars, + makeNullConst(INT4OID, -1, + InvalidOid)); + } } } } @@ -2514,7 +2573,7 @@ expandTupleDesc(TupleDesc tupdesc, Alias *eref, int count, int offset, Assert(count <= tupdesc->natts); for (varattno = 0; varattno < count; varattno++) { - Form_pg_attribute attr = tupdesc->attrs[varattno]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, varattno); if (attr->attisdropped) { @@ -2651,7 +2710,7 @@ get_rte_attribute_name(RangeTblEntry *rte, AttrNumber attnum) * built (which can easily happen for rules). */ if (rte->rtekind == RTE_RELATION) - return get_relid_attribute_name(rte->relid, attnum); + return get_attname(rte->relid, attnum, false); /* * Otherwise use the column name from eref. There should always be one. @@ -2742,14 +2801,15 @@ get_rte_attribute_type(RangeTblEntry *rte, AttrNumber attnum, &funcrettype, &tupdesc); - if (functypclass == TYPEFUNC_COMPOSITE) + if (functypclass == TYPEFUNC_COMPOSITE || + functypclass == TYPEFUNC_COMPOSITE_DOMAIN) { /* Composite data type, e.g. a table's row type */ Form_pg_attribute att_tup; Assert(tupdesc); Assert(attnum <= tupdesc->natts); - att_tup = tupdesc->attrs[attnum - 1]; + att_tup = TupleDescAttr(tupdesc, attnum - 1); /* * If dropped column, pretend it ain't there. See @@ -2832,13 +2892,21 @@ get_rte_attribute_type(RangeTblEntry *rte, AttrNumber attnum, case RTE_NAMEDTUPLESTORE: { /* - * tablefunc, VALUES or CTE RTE --- get type info from lists - * in the RTE + * tablefunc, VALUES, CTE, or ENR RTE --- get type info from + * lists in the RTE */ Assert(attnum > 0 && attnum <= list_length(rte->coltypes)); *vartype = list_nth_oid(rte->coltypes, attnum - 1); *vartypmod = list_nth_int(rte->coltypmods, attnum - 1); *varcollid = list_nth_oid(rte->colcollations, attnum - 1); + + /* For ENR, better check for dropped column */ + if (!OidIsValid(*vartype)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column %d of relation \"%s\" does not exist", + attnum, + rte->eref->aliasname))); } break; default: @@ -2889,15 +2957,11 @@ get_rte_attribute_is_dropped(RangeTblEntry *rte, AttrNumber attnum) break; case RTE_NAMEDTUPLESTORE: { - Assert(rte->enrname); - - /* - * We checked when we loaded coltypes for the tuplestore that - * InvalidOid was only used for dropped columns, so it is safe - * to count on that here. - */ - result = - ((list_nth_oid(rte->coltypes, attnum - 1) == InvalidOid)); + /* Check dropped-ness by testing for valid coltype */ + if (attnum <= 0 || + attnum > list_length(rte->coltypes)) + elog(ERROR, "invalid varattno %d", attnum); + result = !OidIsValid((list_nth_oid(rte->coltypes, attnum - 1))); } break; case RTE_JOIN: @@ -2939,21 +3003,19 @@ get_rte_attribute_is_dropped(RangeTblEntry *rte, AttrNumber attnum) if (attnum > atts_done && attnum <= atts_done + rtfunc->funccolcount) { - TypeFuncClass functypclass; - Oid funcrettype; TupleDesc tupdesc; - functypclass = get_expr_result_type(rtfunc->funcexpr, - &funcrettype, - &tupdesc); - if (functypclass == TYPEFUNC_COMPOSITE) + tupdesc = get_expr_result_tupdesc(rtfunc->funcexpr, + true); + if (tupdesc) { /* Composite data type, e.g. a table's row type */ Form_pg_attribute att_tup; Assert(tupdesc); Assert(attnum - atts_done <= tupdesc->natts); - att_tup = tupdesc->attrs[attnum - atts_done - 1]; + att_tup = TupleDescAttr(tupdesc, + attnum - atts_done - 1); return att_tup->attisdropped; } /* Otherwise, it can't have any dropped columns */ @@ -3040,9 +3102,9 @@ attnameAttNum(Relation rd, const char *attname, bool sysColOK) { int i; - for (i = 0; i < rd->rd_rel->relnatts; i++) + for (i = 0; i < RelationGetNumberOfAttributes(rd); i++) { - Form_pg_attribute att = rd->rd_att->attrs[i]; + Form_pg_attribute att = TupleDescAttr(rd->rd_att, i); if (namestrcmp(&(att->attname), attname) == 0 && !att->attisdropped) return i + 1; @@ -3073,7 +3135,7 @@ attnameAttNum(Relation rd, const char *attname, bool sysColOK) static int specialAttNum(const char *attname) { - Form_pg_attribute sysatt; + const FormData_pg_attribute *sysatt; sysatt = SystemAttributeByName(attname, true /* "oid" will be accepted */ ); @@ -3090,19 +3152,19 @@ specialAttNum(const char *attname) * heap_open()'ed. Use the cache version get_atttype() * for access to non-opened relations. */ -Name +const NameData * attnumAttName(Relation rd, int attid) { if (attid <= 0) { - Form_pg_attribute sysatt; + const FormData_pg_attribute *sysatt; sysatt = SystemAttributeDefinition(attid, rd->rd_rel->relhasoids); return &sysatt->attname; } if (attid > rd->rd_att->natts) elog(ERROR, "invalid attribute number %d", attid); - return &rd->rd_att->attrs[attid - 1]->attname; + return &TupleDescAttr(rd->rd_att, attid - 1)->attname; } /* @@ -3117,14 +3179,14 @@ attnumTypeId(Relation rd, int attid) { if (attid <= 0) { - Form_pg_attribute sysatt; + const FormData_pg_attribute *sysatt; sysatt = SystemAttributeDefinition(attid, rd->rd_rel->relhasoids); return sysatt->atttypid; } if (attid > rd->rd_att->natts) elog(ERROR, "invalid attribute number %d", attid); - return rd->rd_att->attrs[attid - 1]->atttypid; + return TupleDescAttr(rd->rd_att, attid - 1)->atttypid; } /* @@ -3142,7 +3204,7 @@ attnumCollationId(Relation rd, int attid) } if (attid > rd->rd_att->natts) elog(ERROR, "invalid attribute number %d", attid); - return rd->rd_att->attrs[attid - 1]->attcollation; + return TupleDescAttr(rd->rd_att, attid - 1)->attcollation; } /* @@ -3209,7 +3271,7 @@ errorMissingRTE(ParseState *pstate, RangeVar *relation) */ void errorMissingColumn(ParseState *pstate, - char *relname, char *colname, int location) + const char *relname, const char *colname, int location) { FuzzyAttrMatchState *state; char *closestfirst = NULL; @@ -3276,7 +3338,7 @@ errorMissingColumn(ParseState *pstate, /* - * Examine a fully-parsed query, and return TRUE iff any relation underlying + * Examine a fully-parsed query, and return true iff any relation underlying * the query is a temporary relation (table, view, or materialized view). */ bool diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c index 0a70539fb1..3d31be38d5 100644 --- a/src/backend/parser/parse_target.c +++ b/src/backend/parser/parse_target.c @@ -3,7 +3,7 @@ * parse_target.c * handle target lists * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -455,7 +455,7 @@ Expr * transformAssignedExpr(ParseState *pstate, Expr *expr, ParseExprKind exprKind, - char *colname, + const char *colname, int attrno, List *indirection, int location) @@ -484,8 +484,8 @@ transformAssignedExpr(ParseState *pstate, colname), parser_errposition(pstate, location))); attrtype = attnumTypeId(rd, attrno); - attrtypmod = rd->rd_att->attrs[attrno - 1]->atttypmod; - attrcollation = rd->rd_att->attrs[attrno - 1]->attcollation; + attrtypmod = TupleDescAttr(rd->rd_att, attrno - 1)->atttypmod; + attrcollation = TupleDescAttr(rd->rd_att, attrno - 1)->attcollation; /* * If the expression is a DEFAULT placeholder, insert the attribute's @@ -691,7 +691,13 @@ transformAssignmentIndirection(ParseState *pstate, if (indirection && !basenode) { - /* Set up a substitution. We reuse CaseTestExpr for this. */ + /* + * Set up a substitution. We abuse CaseTestExpr for this. It's safe + * to do so because the only nodes that will be above the CaseTestExpr + * in the finished expression will be FieldStore and ArrayRef nodes. + * (There could be other stuff in the tree, but it will be within + * other child fields of those node types.) + */ CaseTestExpr *ctest = makeNode(CaseTestExpr); ctest->typeId = targetTypeId; @@ -725,6 +731,8 @@ transformAssignmentIndirection(ParseState *pstate, else { FieldStore *fstore; + Oid baseTypeId; + int32 baseTypeMod; Oid typrelid; AttrNumber attnum; Oid fieldTypeId; @@ -752,7 +760,14 @@ transformAssignmentIndirection(ParseState *pstate, /* No subscripts, so can process field selection here */ - typrelid = typeidTypeRelid(targetTypeId); + /* + * Look up the composite type, accounting for possibility that + * what we are given is a domain over composite. + */ + baseTypeMod = targetTypMod; + baseTypeId = getBaseTypeAndTypmod(targetTypeId, &baseTypeMod); + + typrelid = typeidTypeRelid(baseTypeId); if (!typrelid) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), @@ -796,7 +811,17 @@ transformAssignmentIndirection(ParseState *pstate, fstore->arg = (Expr *) basenode; fstore->newvals = list_make1(rhs); fstore->fieldnums = list_make1_int(attnum); - fstore->resulttype = targetTypeId; + fstore->resulttype = baseTypeId; + + /* If target is a domain, apply constraints */ + if (baseTypeId != targetTypeId) + return coerce_to_domain((Node *) fstore, + baseTypeId, baseTypeMod, + targetTypeId, + COERCION_IMPLICIT, + COERCE_IMPLICIT_CAST, + location, + false); return (Node *) fstore; } @@ -959,19 +984,22 @@ checkInsertTargets(ParseState *pstate, List *cols, List **attrnos) /* * Generate default column list for INSERT. */ - Form_pg_attribute *attr = pstate->p_target_relation->rd_att->attrs; - int numcol = pstate->p_target_relation->rd_rel->relnatts; + int numcol = RelationGetNumberOfAttributes(pstate->p_target_relation); + int i; for (i = 0; i < numcol; i++) { ResTarget *col; + Form_pg_attribute attr; + + attr = TupleDescAttr(pstate->p_target_relation->rd_att, i); - if (attr[i]->attisdropped) + if (attr->attisdropped) continue; col = makeNode(ResTarget); - col->name = pstrdup(NameStr(attr[i]->attname)); + col->name = pstrdup(NameStr(attr->attname)); col->indirection = NIL; col->val = NULL; col->location = -1; @@ -1106,7 +1134,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref, { Node *node; - node = (*pstate->p_pre_columnref_hook) (pstate, cref); + node = pstate->p_pre_columnref_hook(pstate, cref); if (node != NULL) return ExpandRowReference(pstate, node, make_target_entry); } @@ -1161,8 +1189,8 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref, { Node *node; - node = (*pstate->p_post_columnref_hook) (pstate, cref, - (Node *) rte); + node = pstate->p_post_columnref_hook(pstate, cref, + (Node *) rte); if (node != NULL) { if (rte != NULL) @@ -1385,29 +1413,25 @@ ExpandRowReference(ParseState *pstate, Node *expr, * (This can be pretty inefficient if the expression involves nontrivial * computation :-(.) * - * Verify it's a composite type, and get the tupdesc. We use - * get_expr_result_type() because that can handle references to functions - * returning anonymous record types. If that fails, use - * lookup_rowtype_tupdesc(), which will almost certainly fail as well, but - * it will give an appropriate error message. + * Verify it's a composite type, and get the tupdesc. + * get_expr_result_tupdesc() handles this conveniently. * * If it's a Var of type RECORD, we have to work even harder: we have to - * find what the Var refers to, and pass that to get_expr_result_type. + * find what the Var refers to, and pass that to get_expr_result_tupdesc. * That task is handled by expandRecordVariable(). */ if (IsA(expr, Var) && ((Var *) expr)->vartype == RECORDOID) tupleDesc = expandRecordVariable(pstate, (Var *) expr, 0); - else if (get_expr_result_type(expr, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) - tupleDesc = lookup_rowtype_tupdesc_copy(exprType(expr), - exprTypmod(expr)); + else + tupleDesc = get_expr_result_tupdesc(expr, false); Assert(tupleDesc); /* Generate a list of references to the individual fields */ numAttrs = tupleDesc->natts; for (i = 0; i < numAttrs; i++) { - Form_pg_attribute att = tupleDesc->attrs[i]; + Form_pg_attribute att = TupleDescAttr(tupleDesc, i); FieldSelect *fselect; if (att->attisdropped) @@ -1509,8 +1533,8 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup) case RTE_NAMEDTUPLESTORE: /* - * This case should not occur: a column of a table or values list - * shouldn't have type RECORD. Fall through and fail (most + * This case should not occur: a column of a table, values list, + * or ENR shouldn't have type RECORD. Fall through and fail (most * likely) at the bottom. */ break; @@ -1608,15 +1632,9 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup) /* * We now have an expression we can't expand any more, so see if - * get_expr_result_type() can do anything with it. If not, pass to - * lookup_rowtype_tupdesc() which will probably fail, but will give an - * appropriate error message while failing. + * get_expr_result_tupdesc() can do anything with it. */ - if (get_expr_result_type(expr, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) - tupleDesc = lookup_rowtype_tupdesc_copy(exprType(expr), - exprTypmod(expr)); - - return tupleDesc; + return get_expr_result_tupdesc(expr, false); } diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c index d0b3fbeb57..d959b6122a 100644 --- a/src/backend/parser/parse_type.c +++ b/src/backend/parser/parse_type.c @@ -3,7 +3,7 @@ * parse_type.c * handle type operations for parser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -641,7 +641,10 @@ stringTypeDatum(Type tp, char *string, int32 atttypmod) return OidInputFunctionCall(typinput, string, typioparam, atttypmod); } -/* given a typeid, return the type's typrelid (associated relation, if any) */ +/* + * Given a typeid, return the type's typrelid (associated relation), if any. + * Returns InvalidOid if type is not a composite type. + */ Oid typeidTypeRelid(Oid type_id) { @@ -652,13 +655,44 @@ typeidTypeRelid(Oid type_id) typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id)); if (!HeapTupleIsValid(typeTuple)) elog(ERROR, "cache lookup failed for type %u", type_id); - type = (Form_pg_type) GETSTRUCT(typeTuple); result = type->typrelid; ReleaseSysCache(typeTuple); return result; } +/* + * Given a typeid, return the type's typrelid (associated relation), if any. + * Returns InvalidOid if type is not a composite type or a domain over one. + * This is the same as typeidTypeRelid(getBaseType(type_id)), but faster. + */ +Oid +typeOrDomainTypeRelid(Oid type_id) +{ + HeapTuple typeTuple; + Form_pg_type type; + Oid result; + + for (;;) + { + typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id)); + if (!HeapTupleIsValid(typeTuple)) + elog(ERROR, "cache lookup failed for type %u", type_id); + type = (Form_pg_type) GETSTRUCT(typeTuple); + if (type->typtype != TYPTYPE_DOMAIN) + { + /* Not a domain, so done looking through domains */ + break; + } + /* It is a domain, so examine the base type instead */ + type_id = type->typbasetype; + ReleaseSysCache(typeTuple); + } + result = type->typrelid; + ReleaseSysCache(typeTuple); + return result; +} + /* * error context callback for parse failure during parseTypeString() */ diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index 495ba3dffc..2e222d822b 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -16,7 +16,7 @@ * a quick copyObject() call before manipulating the query tree. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/backend/parser/parse_utilcmd.c @@ -36,9 +36,9 @@ #include "catalog/pg_am.h" #include "catalog/pg_collation.h" #include "catalog/pg_constraint.h" -#include "catalog/pg_constraint_fn.h" #include "catalog/pg_opclass.h" #include "catalog/pg_operator.h" +#include "catalog/pg_statistic_ext.h" #include "catalog/pg_type.h" #include "commands/comment.h" #include "commands/defrem.h" @@ -63,6 +63,7 @@ #include "utils/acl.h" #include "utils/builtins.h" #include "utils/lsyscache.h" +#include "utils/partcache.h" #include "utils/rel.h" #include "utils/ruleutils.h" #include "utils/syscache.h" @@ -85,6 +86,7 @@ typedef struct List *fkconstraints; /* FOREIGN KEY constraints */ List *ixconstraints; /* index-creating constraints */ List *inh_indexes; /* cloned indexes from INCLUDING INDEXES */ + List *extstats; /* cloned extended statistics */ List *blist; /* "before list" of things to do before * creating the table */ List *alist; /* "after list" of things to do after creating @@ -92,6 +94,7 @@ typedef struct IndexStmt *pkey; /* PRIMARY KEY index, if any */ bool ispartitioned; /* true if table is partitioned */ PartitionBoundSpec *partbound; /* transformed FOR VALUES */ + bool ofType; /* true if statement contains OF typename */ } CreateStmtContext; /* State shared by transformCreateSchemaStmt and its subroutines */ @@ -117,14 +120,14 @@ static void transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_clause); static void transformOfType(CreateStmtContext *cxt, TypeName *ofTypename); -static IndexStmt *generateClonedIndexStmt(CreateStmtContext *cxt, - Relation source_idx, - const AttrNumber *attmap, int attmap_length); +static CreateStatsStmt *generateClonedExtStatsStmt(RangeVar *heapRel, + Oid heapRelid, Oid source_statsid); static List *get_collation(Oid collation, Oid actual_datatype); static List *get_opclass(Oid opclass, Oid actual_datatype); static void transformIndexConstraints(CreateStmtContext *cxt); static IndexStmt *transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt); +static void transformExtendedStatistics(CreateStmtContext *cxt); static void transformFKConstraints(CreateStmtContext *cxt, bool skipValidation, bool isAddConstraint); @@ -135,6 +138,7 @@ static void transformConstraintAttrs(CreateStmtContext *cxt, static void transformColumnType(CreateStmtContext *cxt, ColumnDef *column); static void setSchemaName(char *context_schema, char **stmt_schema_name); static void transformPartitionCmd(CreateStmtContext *cxt, PartitionCmd *cmd); +static void validateInfiniteBounds(ParseState *pstate, List *blist); static Const *transformPartitionBoundValue(ParseState *pstate, A_Const *con, const char *colName, Oid colType, int32 colTypmod); @@ -235,10 +239,13 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString) cxt.fkconstraints = NIL; cxt.ixconstraints = NIL; cxt.inh_indexes = NIL; + cxt.extstats = NIL; cxt.blist = NIL; cxt.alist = NIL; cxt.pkey = NULL; cxt.ispartitioned = stmt->partspec != NULL; + cxt.partbound = stmt->partbound; + cxt.ofType = (stmt->ofTypename != NULL); /* * Notice that we allow OIDs here only for plain tables, even though @@ -333,6 +340,11 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString) */ transformCheckConstraints(&cxt, !is_foreign_table ? true : false); + /* + * Postprocess extended statistics. + */ + transformExtendedStatistics(&cxt); + /* * Output results. */ @@ -428,7 +440,8 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column, sname = ChooseRelationName(cxt->relation->relname, column->colname, "seq", - snamespaceid); + snamespaceid, + false); } ereport(DEBUG1, @@ -471,6 +484,14 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column, cxt->blist = lappend(cxt->blist, seqstmt); + /* + * Store the identity sequence name that we decided on. ALTER TABLE ... + * ADD COLUMN ... IDENTITY needs this so that it can fill the new column + * with values from the sequence, while the association of the sequence + * with the table is not set until after the ALTER TABLE. + */ + column->identitySequence = seqstmt->sequence; + /* * Build an ALTER SEQUENCE ... OWNED BY command to mark the sequence as * owned by this column, and add it to the list of things to be done after @@ -627,7 +648,7 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column) column->colname, cxt->relation->relname), parser_errposition(cxt->pstate, constraint->location))); - column->is_not_null = FALSE; + column->is_not_null = false; saw_nullable = true; break; @@ -639,7 +660,7 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column) column->colname, cxt->relation->relname), parser_errposition(cxt->pstate, constraint->location))); - column->is_not_null = TRUE; + column->is_not_null = true; saw_nullable = true; break; @@ -661,6 +682,15 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column) Type ctype; Oid typeOid; + if (cxt->ofType) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("identity columns are not supported on typed tables"))); + if (cxt->partbound) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("identity columns are not supported on partitions"))); + ctype = typenameType(cxt->pstate, column->typeName, NULL); typeOid = HeapTupleGetOid(ctype); ReleaseSysCache(ctype); @@ -679,7 +709,7 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column) column->identity = constraint->generated_when; saw_identity = true; - column->is_not_null = TRUE; + column->is_not_null = true; break; } @@ -694,12 +724,6 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column) errmsg("primary key constraints are not supported on foreign tables"), parser_errposition(cxt->pstate, constraint->location))); - if (cxt->ispartitioned) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("primary key constraints are not supported on partitioned tables"), - parser_errposition(cxt->pstate, - constraint->location))); /* FALL THRU */ case CONSTR_UNIQUE: @@ -709,12 +733,6 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column) errmsg("unique constraints are not supported on foreign tables"), parser_errposition(cxt->pstate, constraint->location))); - if (cxt->ispartitioned) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unique constraints are not supported on partitioned tables"), - parser_errposition(cxt->pstate, - constraint->location))); if (constraint->keys == NIL) constraint->keys = list_make1(makeString(column->colname)); cxt->ixconstraints = lappend(cxt->ixconstraints, constraint); @@ -732,12 +750,6 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column) errmsg("foreign key constraints are not supported on foreign tables"), parser_errposition(cxt->pstate, constraint->location))); - if (cxt->ispartitioned) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("foreign key constraints are not supported on partitioned tables"), - parser_errposition(cxt->pstate, - constraint->location))); /* * Fill in the current attribute's name and throw it into the @@ -811,12 +823,6 @@ transformTableConstraint(CreateStmtContext *cxt, Constraint *constraint) errmsg("primary key constraints are not supported on foreign tables"), parser_errposition(cxt->pstate, constraint->location))); - if (cxt->ispartitioned) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("primary key constraints are not supported on partitioned tables"), - parser_errposition(cxt->pstate, - constraint->location))); cxt->ixconstraints = lappend(cxt->ixconstraints, constraint); break; @@ -827,12 +833,6 @@ transformTableConstraint(CreateStmtContext *cxt, Constraint *constraint) errmsg("unique constraints are not supported on foreign tables"), parser_errposition(cxt->pstate, constraint->location))); - if (cxt->ispartitioned) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unique constraints are not supported on partitioned tables"), - parser_errposition(cxt->pstate, - constraint->location))); cxt->ixconstraints = lappend(cxt->ixconstraints, constraint); break; @@ -863,12 +863,6 @@ transformTableConstraint(CreateStmtContext *cxt, Constraint *constraint) errmsg("foreign key constraints are not supported on foreign tables"), parser_errposition(cxt->pstate, constraint->location))); - if (cxt->ispartitioned) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("foreign key constraints are not supported on partitioned tables"), - parser_errposition(cxt->pstate, - constraint->location))); cxt->fkconstraints = lappend(cxt->fkconstraints, constraint); break; @@ -941,7 +935,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla aclresult = pg_type_aclcheck(relation->rd_rel->reltype, GetUserId(), ACL_USAGE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_TYPE, + aclcheck_error(aclresult, OBJECT_TYPE, RelationGetRelationName(relation)); } else @@ -949,7 +943,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla aclresult = pg_class_aclcheck(RelationGetRelid(relation), GetUserId(), ACL_SELECT); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_CLASS, + aclcheck_error(aclresult, get_relkind_objtype(relation->rd_rel->relkind), RelationGetRelationName(relation)); } @@ -969,7 +963,8 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla for (parent_attno = 1; parent_attno <= tupleDesc->natts; parent_attno++) { - Form_pg_attribute attribute = tupleDesc->attrs[parent_attno - 1]; + Form_pg_attribute attribute = TupleDescAttr(tupleDesc, + parent_attno - 1); char *attributeName = NameStr(attribute->attname); ColumnDef *def; @@ -993,7 +988,6 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla def->is_local = true; def->is_not_null = attribute->attnotnull; def->is_from_type = false; - def->is_from_parent = false; def->storage = 0; def->raw_default = NULL; def->cooked_default = NULL; @@ -1171,8 +1165,9 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla parent_index = index_open(parent_index_oid, AccessShareLock); /* Build CREATE INDEX statement to recreate the parent_index */ - index_stmt = generateClonedIndexStmt(cxt, parent_index, - attmap, tupleDesc->natts); + index_stmt = generateClonedIndexStmt(cxt->relation, InvalidOid, + parent_index, + attmap, tupleDesc->natts, NULL); /* Copy comment on index, if requested */ if (table_like_clause->options & CREATE_TABLE_LIKE_COMMENTS) @@ -1193,6 +1188,43 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla } } + /* + * Likewise, copy extended statistics if requested + */ + if (table_like_clause->options & CREATE_TABLE_LIKE_STATISTICS) + { + List *parent_extstats; + ListCell *l; + + parent_extstats = RelationGetStatExtList(relation); + + foreach(l, parent_extstats) + { + Oid parent_stat_oid = lfirst_oid(l); + CreateStatsStmt *stats_stmt; + + stats_stmt = generateClonedExtStatsStmt(cxt->relation, + RelationGetRelid(relation), + parent_stat_oid); + + /* Copy comment on statistics object, if requested */ + if (table_like_clause->options & CREATE_TABLE_LIKE_COMMENTS) + { + comment = GetComment(parent_stat_oid, StatisticExtRelationId, 0); + + /* + * We make use of CreateStatsStmt's stxcomment option, so as + * not to need to know now what name the statistics will have. + */ + stats_stmt->stxcomment = comment; + } + + cxt->extstats = lappend(cxt->extstats, stats_stmt); + } + + list_free(parent_extstats); + } + /* * Close the parent rel, but keep our AccessShareLock on it until xact * commit. That will prevent someone else from deleting or ALTERing the @@ -1219,7 +1251,7 @@ transformOfType(CreateStmtContext *cxt, TypeName *ofTypename) tupdesc = lookup_rowtype_tupdesc(ofTypeId, -1); for (i = 0; i < tupdesc->natts; i++) { - Form_pg_attribute attr = tupdesc->attrs[i]; + Form_pg_attribute attr = TupleDescAttr(tupdesc, i); ColumnDef *n; if (attr->attisdropped) @@ -1232,7 +1264,6 @@ transformOfType(CreateStmtContext *cxt, TypeName *ofTypename) n->is_local = true; n->is_not_null = false; n->is_from_type = true; - n->is_from_parent = false; n->storage = 0; n->raw_default = NULL; n->cooked_default = NULL; @@ -1249,14 +1280,15 @@ transformOfType(CreateStmtContext *cxt, TypeName *ofTypename) /* * Generate an IndexStmt node using information from an already existing index - * "source_idx". Attribute numbers should be adjusted according to attmap. + * "source_idx", for the rel identified either by heapRel or heapRelid. + * + * Attribute numbers should be adjusted according to attmap. */ -static IndexStmt * -generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx, - const AttrNumber *attmap, int attmap_length) +IndexStmt * +generateClonedIndexStmt(RangeVar *heapRel, Oid heapRelid, Relation source_idx, + const AttrNumber *attmap, int attmap_length, Oid *constraintOid) { Oid source_relid = RelationGetRelid(source_idx); - Form_pg_attribute *attrs = RelationGetDescr(source_idx)->attrs; HeapTuple ht_idxrel; HeapTuple ht_idx; HeapTuple ht_am; @@ -1274,6 +1306,9 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx, Datum datum; bool isnull; + Assert((heapRel == NULL && OidIsValid(heapRelid)) || + (heapRel != NULL && !OidIsValid(heapRelid))); + /* * Fetch pg_class tuple of source index. We can't use the copy in the * relcache entry because it doesn't include optional fields. @@ -1309,7 +1344,8 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx, /* Begin building the IndexStmt */ index = makeNode(IndexStmt); - index->relation = cxt->relation; + index->relation = heapRel; + index->relationId = heapRelid; index->accessMethod = pstrdup(NameStr(amrec->amname)); if (OidIsValid(idxrelrec->reltablespace)) index->tableSpace = get_tablespace_name(idxrelrec->reltablespace); @@ -1348,6 +1384,9 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx, HeapTuple ht_constr; Form_pg_constraint conrec; + if (constraintOid) + *constraintOid = constraintId; + ht_constr = SearchSysCache1(CONSTROID, ObjectIdGetDatum(constraintId)); if (!HeapTupleIsValid(ht_constr)) @@ -1428,12 +1467,15 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx, /* Build the list of IndexElem */ index->indexParams = NIL; + index->indexIncludingParams = NIL; indexpr_item = list_head(indexprs); - for (keyno = 0; keyno < idxrec->indnatts; keyno++) + for (keyno = 0; keyno < idxrec->indnkeyatts; keyno++) { IndexElem *iparam; AttrNumber attnum = idxrec->indkey.values[keyno]; + Form_pg_attribute attr = TupleDescAttr(RelationGetDescr(source_idx), + keyno); int16 opt = source_idx->rd_indoption[keyno]; iparam = makeNode(IndexElem); @@ -1443,7 +1485,7 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx, /* Simple index column */ char *attname; - attname = get_relid_attribute_name(indrelid, attnum); + attname = get_attname(indrelid, attnum, false); keycoltype = get_atttype(indrelid, attnum); iparam->name = attname; @@ -1481,7 +1523,7 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx, } /* Copy the original index column name */ - iparam->indexcolname = pstrdup(NameStr(attrs[keyno]->attname)); + iparam->indexcolname = pstrdup(NameStr(attr->attname)); /* Add the collation name, if non-default */ iparam->collation = get_collation(indcollation->values[keyno], keycoltype); @@ -1517,6 +1559,37 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx, index->indexParams = lappend(index->indexParams, iparam); } + /* Handle included columns separately */ + for (keyno = idxrec->indnkeyatts; keyno < idxrec->indnatts; keyno++) + { + IndexElem *iparam; + AttrNumber attnum = idxrec->indkey.values[keyno]; + Form_pg_attribute attr = TupleDescAttr(RelationGetDescr(source_idx), + keyno); + + iparam = makeNode(IndexElem); + + if (AttributeNumberIsValid(attnum)) + { + /* Simple index column */ + char *attname; + + attname = get_attname(indrelid, attnum, false); + keycoltype = get_atttype(indrelid, attnum); + + iparam->name = attname; + iparam->expr = NULL; + } + else + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("expressions are not supported in included columns"))); + + /* Copy the original index column name */ + iparam->indexcolname = pstrdup(NameStr(attr->attname)); + + index->indexIncludingParams = lappend(index->indexIncludingParams, iparam); + } /* Copy reloptions if any */ datum = SysCacheGetAttr(RELOID, ht_idxrel, Anum_pg_class_reloptions, &isnull); @@ -1560,6 +1633,85 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx, return index; } +/* + * Generate a CreateStatsStmt node using information from an already existing + * extended statistic "source_statsid", for the rel identified by heapRel and + * heapRelid. + */ +static CreateStatsStmt * +generateClonedExtStatsStmt(RangeVar *heapRel, Oid heapRelid, + Oid source_statsid) +{ + HeapTuple ht_stats; + Form_pg_statistic_ext statsrec; + CreateStatsStmt *stats; + List *stat_types = NIL; + List *def_names = NIL; + bool isnull; + Datum datum; + ArrayType *arr; + char *enabled; + int i; + + Assert(OidIsValid(heapRelid)); + Assert(heapRel != NULL); + + /* + * Fetch pg_statistic_ext tuple of source statistics object. + */ + ht_stats = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(source_statsid)); + if (!HeapTupleIsValid(ht_stats)) + elog(ERROR, "cache lookup failed for statistics object %u", source_statsid); + statsrec = (Form_pg_statistic_ext) GETSTRUCT(ht_stats); + + /* Determine which statistics types exist */ + datum = SysCacheGetAttr(STATEXTOID, ht_stats, + Anum_pg_statistic_ext_stxkind, &isnull); + Assert(!isnull); + arr = DatumGetArrayTypeP(datum); + if (ARR_NDIM(arr) != 1 || + ARR_HASNULL(arr) || + ARR_ELEMTYPE(arr) != CHAROID) + elog(ERROR, "stxkind is not a 1-D char array"); + enabled = (char *) ARR_DATA_PTR(arr); + for (i = 0; i < ARR_DIMS(arr)[0]; i++) + { + if (enabled[i] == STATS_EXT_NDISTINCT) + stat_types = lappend(stat_types, makeString("ndistinct")); + else if (enabled[i] == STATS_EXT_DEPENDENCIES) + stat_types = lappend(stat_types, makeString("dependencies")); + else + elog(ERROR, "unrecognized statistics kind %c", enabled[i]); + } + + /* Determine which columns the statistics are on */ + for (i = 0; i < statsrec->stxkeys.dim1; i++) + { + ColumnRef *cref = makeNode(ColumnRef); + AttrNumber attnum = statsrec->stxkeys.values[i]; + + cref->fields = list_make1(makeString(get_attname(heapRelid, + attnum, false))); + cref->location = -1; + + def_names = lappend(def_names, cref); + } + + /* finally, build the output node */ + stats = makeNode(CreateStatsStmt); + stats->defnames = NULL; + stats->stat_types = stat_types; + stats->exprs = def_names; + stats->relations = list_make1(heapRel); + stats->stxcomment = NULL; + stats->if_not_exists = false; + + /* Clean up */ + ReleaseSysCache(ht_stats); + + return stats; +} + /* * get_collation - fetch qualified name of a collation * @@ -1708,6 +1860,7 @@ transformIndexConstraints(CreateStmtContext *cxt) IndexStmt *priorindex = lfirst(k); if (equal(index->indexParams, priorindex->indexParams) && + equal(index->indexIncludingParams, priorindex->indexIncludingParams) && equal(index->whereClause, priorindex->whereClause) && equal(index->excludeOpNames, priorindex->excludeOpNames) && strcmp(index->accessMethod, priorindex->accessMethod) == 0 && @@ -1779,6 +1932,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) index->tableSpace = constraint->indexspace; index->whereClause = constraint->where_clause; index->indexParams = NIL; + index->indexIncludingParams = NIL; index->excludeOpNames = NIL; index->idxcomment = NULL; index->indexOid = InvalidOid; @@ -1909,7 +2063,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) for (i = 0; i < index_form->indnatts; i++) { int16 attnum = index_form->indkey.values[i]; - Form_pg_attribute attform; + const FormData_pg_attribute *attform; char *attname; Oid defopclass; @@ -1921,31 +2075,36 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) if (attnum > 0) { Assert(attnum <= heap_rel->rd_att->natts); - attform = heap_rel->rd_att->attrs[attnum - 1]; + attform = TupleDescAttr(heap_rel->rd_att, attnum - 1); } else attform = SystemAttributeDefinition(attnum, heap_rel->rd_rel->relhasoids); attname = pstrdup(NameStr(attform->attname)); - /* - * Insist on default opclass and sort options. While the index - * would still work as a constraint with non-default settings, it - * might not provide exactly the same uniqueness semantics as - * you'd get from a normally-created constraint; and there's also - * the dump/reload problem mentioned above. - */ - defopclass = GetDefaultOpClass(attform->atttypid, - index_rel->rd_rel->relam); - if (indclass->values[i] != defopclass || - index_rel->rd_indoption[i] != 0) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("index \"%s\" does not have default sorting behavior", index_name), - errdetail("Cannot create a primary key or unique constraint using such an index."), - parser_errposition(cxt->pstate, constraint->location))); + if (i < index_form->indnkeyatts) + { + /* + * Insist on default opclass and sort options. While the + * index would still work as a constraint with non-default + * settings, it might not provide exactly the same uniqueness + * semantics as you'd get from a normally-created constraint; + * and there's also the dump/reload problem mentioned above. + */ + defopclass = GetDefaultOpClass(attform->atttypid, + index_rel->rd_rel->relam); + if (indclass->values[i] != defopclass || + index_rel->rd_indoption[i] != 0) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("index \"%s\" column number %d does not have default sorting behavior", index_name, i + 1), + errdetail("Cannot create a primary key or unique constraint using such an index."), + parser_errposition(cxt->pstate, constraint->location))); - constraint->keys = lappend(constraint->keys, makeString(attname)); + constraint->keys = lappend(constraint->keys, makeString(attname)); + } + else + constraint->including = lappend(constraint->including, makeString(attname)); } /* Close the index relation but keep the lock */ @@ -1974,8 +2133,6 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) index->indexParams = lappend(index->indexParams, elem); index->excludeOpNames = lappend(index->excludeOpNames, opname); } - - return index; } /* @@ -1986,7 +2143,136 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) * it to DefineIndex to mark the columns NOT NULL, it's more efficient to * get it right the first time.) */ - foreach(lc, constraint->keys) + else + { + foreach(lc, constraint->keys) + { + char *key = strVal(lfirst(lc)); + bool found = false; + ColumnDef *column = NULL; + ListCell *columns; + IndexElem *iparam; + + /* Make sure referenced column exist. */ + foreach(columns, cxt->columns) + { + column = castNode(ColumnDef, lfirst(columns)); + if (strcmp(column->colname, key) == 0) + { + found = true; + break; + } + } + if (found) + { + /* found column in the new table; force it to be NOT NULL */ + if (constraint->contype == CONSTR_PRIMARY) + column->is_not_null = true; + } + else if (SystemAttributeByName(key, cxt->hasoids) != NULL) + { + /* + * column will be a system column in the new table, so accept + * it. System columns can't ever be null, so no need to worry + * about PRIMARY/NOT NULL constraint. + */ + found = true; + } + else if (cxt->inhRelations) + { + /* try inherited tables */ + ListCell *inher; + + foreach(inher, cxt->inhRelations) + { + RangeVar *inh = castNode(RangeVar, lfirst(inher)); + Relation rel; + int count; + + rel = heap_openrv(inh, AccessShareLock); + /* check user requested inheritance from valid relkind */ + if (rel->rd_rel->relkind != RELKIND_RELATION && + rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE && + rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("inherited relation \"%s\" is not a table or foreign table", + inh->relname))); + for (count = 0; count < rel->rd_att->natts; count++) + { + Form_pg_attribute inhattr = TupleDescAttr(rel->rd_att, + count); + char *inhname = NameStr(inhattr->attname); + + if (inhattr->attisdropped) + continue; + if (strcmp(key, inhname) == 0) + { + found = true; + + /* + * We currently have no easy way to force an + * inherited column to be NOT NULL at creation, if + * its parent wasn't so already. We leave it to + * DefineIndex to fix things up in this case. + */ + break; + } + } + heap_close(rel, NoLock); + if (found) + break; + } + } + + /* + * In the ALTER TABLE case, don't complain about index keys not + * created in the command; they may well exist already. + * DefineIndex will complain about them if not, and will also take + * care of marking them NOT NULL. + */ + if (!found && !cxt->isalter) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column \"%s\" named in key does not exist", key), + parser_errposition(cxt->pstate, constraint->location))); + + /* Check for PRIMARY KEY(foo, foo) */ + foreach(columns, index->indexParams) + { + iparam = (IndexElem *) lfirst(columns); + if (iparam->name && strcmp(key, iparam->name) == 0) + { + if (index->primary) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_COLUMN), + errmsg("column \"%s\" appears twice in primary key constraint", + key), + parser_errposition(cxt->pstate, constraint->location))); + else + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_COLUMN), + errmsg("column \"%s\" appears twice in unique constraint", + key), + parser_errposition(cxt->pstate, constraint->location))); + } + } + + /* OK, add it to the index definition */ + iparam = makeNode(IndexElem); + iparam->name = pstrdup(key); + iparam->expr = NULL; + iparam->indexcolname = NULL; + iparam->collation = NIL; + iparam->opclass = NIL; + iparam->ordering = SORTBY_DEFAULT; + iparam->nulls_ordering = SORTBY_NULLS_DEFAULT; + index->indexParams = lappend(index->indexParams, iparam); + } + } + + /* Add included columns to index definition */ + foreach(lc, constraint->including) { char *key = strVal(lfirst(lc)); bool found = false; @@ -2003,64 +2289,63 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) break; } } - if (found) - { - /* found column in the new table; force it to be NOT NULL */ - if (constraint->contype == CONSTR_PRIMARY) - column->is_not_null = TRUE; - } - else if (SystemAttributeByName(key, cxt->hasoids) != NULL) - { - /* - * column will be a system column in the new table, so accept it. - * System columns can't ever be null, so no need to worry about - * PRIMARY/NOT NULL constraint. - */ - found = true; - } - else if (cxt->inhRelations) - { - /* try inherited tables */ - ListCell *inher; - foreach(inher, cxt->inhRelations) + if (!found) + { + if (SystemAttributeByName(key, cxt->hasoids) != NULL) { - RangeVar *inh = lfirst_node(RangeVar, inher); - Relation rel; - int count; - - rel = heap_openrv(inh, AccessShareLock); - /* check user requested inheritance from valid relkind */ - if (rel->rd_rel->relkind != RELKIND_RELATION && - rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE && - rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("inherited relation \"%s\" is not a table or foreign table", - inh->relname))); - for (count = 0; count < rel->rd_att->natts; count++) - { - Form_pg_attribute inhattr = rel->rd_att->attrs[count]; - char *inhname = NameStr(inhattr->attname); + /* + * column will be a system column in the new table, so accept + * it. System columns can't ever be null, so no need to worry + * about PRIMARY/NOT NULL constraint. + */ + found = true; + } + else if (cxt->inhRelations) + { + /* try inherited tables */ + ListCell *inher; - if (inhattr->attisdropped) - continue; - if (strcmp(key, inhname) == 0) + foreach(inher, cxt->inhRelations) + { + RangeVar *inh = lfirst_node(RangeVar, inher); + Relation rel; + int count; + + rel = heap_openrv(inh, AccessShareLock); + /* check user requested inheritance from valid relkind */ + if (rel->rd_rel->relkind != RELKIND_RELATION && + rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE && + rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("inherited relation \"%s\" is not a table or foreign table", + inh->relname))); + for (count = 0; count < rel->rd_att->natts; count++) { - found = true; - - /* - * We currently have no easy way to force an inherited - * column to be NOT NULL at creation, if its parent - * wasn't so already. We leave it to DefineIndex to - * fix things up in this case. - */ - break; + Form_pg_attribute inhattr = TupleDescAttr(rel->rd_att, + count); + char *inhname = NameStr(inhattr->attname); + + if (inhattr->attisdropped) + continue; + if (strcmp(key, inhname) == 0) + { + found = true; + + /* + * We currently have no easy way to force an + * inherited column to be NOT NULL at creation, if + * its parent wasn't so already. We leave it to + * DefineIndex to fix things up in this case. + */ + break; + } } + heap_close(rel, NoLock); + if (found) + break; } - heap_close(rel, NoLock); - if (found) - break; } } @@ -2076,27 +2361,6 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) errmsg("column \"%s\" named in key does not exist", key), parser_errposition(cxt->pstate, constraint->location))); - /* Check for PRIMARY KEY(foo, foo) */ - foreach(columns, index->indexParams) - { - iparam = (IndexElem *) lfirst(columns); - if (iparam->name && strcmp(key, iparam->name) == 0) - { - if (index->primary) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" appears twice in primary key constraint", - key), - parser_errposition(cxt->pstate, constraint->location))); - else - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" appears twice in unique constraint", - key), - parser_errposition(cxt->pstate, constraint->location))); - } - } - /* OK, add it to the index definition */ iparam = makeNode(IndexElem); iparam->name = pstrdup(key); @@ -2104,14 +2368,25 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt) iparam->indexcolname = NULL; iparam->collation = NIL; iparam->opclass = NIL; - iparam->ordering = SORTBY_DEFAULT; - iparam->nulls_ordering = SORTBY_NULLS_DEFAULT; - index->indexParams = lappend(index->indexParams, iparam); + index->indexIncludingParams = lappend(index->indexIncludingParams, iparam); } return index; } +/* + * transformExtendedStatistics + * Handle extended statistic objects + * + * Right now, there's nothing to do here, so we just append the list to + * the existing "after" list. + */ +static void +transformExtendedStatistics(CreateStmtContext *cxt) +{ + cxt->alist = list_concat(cxt->alist, cxt->extstats); +} + /* * transformCheckConstraints * handle CHECK constraints @@ -2249,7 +2524,9 @@ transformIndexStmt(Oid relid, IndexStmt *stmt, const char *queryString) * relation, but we still need to open it. */ rel = relation_open(relid, NoLock); - rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true); + rte = addRangeTableEntryForRelation(pstate, rel, + AccessShareLock, + NULL, false, true); /* no to join list, yes to namespaces */ addRTEtoQuery(pstate, rte, false, true, true); @@ -2358,9 +2635,11 @@ transformRuleStmt(RuleStmt *stmt, const char *queryString, * qualification. */ oldrte = addRangeTableEntryForRelation(pstate, rel, + AccessShareLock, makeAlias("old", NIL), false, false); newrte = addRangeTableEntryForRelation(pstate, rel, + AccessShareLock, makeAlias("new", NIL), false, false); /* Must override addRangeTableEntry's default access-check flags */ @@ -2456,9 +2735,11 @@ transformRuleStmt(RuleStmt *stmt, const char *queryString, * them in the joinlist. */ oldrte = addRangeTableEntryForRelation(sub_pstate, rel, + AccessShareLock, makeAlias("old", NIL), false, false); newrte = addRangeTableEntryForRelation(sub_pstate, rel, + AccessShareLock, makeAlias("new", NIL), false, false); oldrte->requiredPerms = 0; @@ -2636,6 +2917,7 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt, const char *queryString) { Relation rel; + TupleDesc tupdesc; ParseState *pstate; CreateStmtContext cxt; List *result; @@ -2655,12 +2937,14 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt, /* Caller is responsible for locking the relation */ rel = relation_open(relid, NoLock); + tupdesc = RelationGetDescr(rel); /* Set up pstate */ pstate = make_parsestate(NULL); pstate->p_sourcetext = queryString; rte = addRangeTableEntryForRelation(pstate, rel, + AccessShareLock, NULL, false, true); @@ -2688,11 +2972,13 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt, cxt.fkconstraints = NIL; cxt.ixconstraints = NIL; cxt.inh_indexes = NIL; + cxt.extstats = NIL; cxt.blist = NIL; cxt.alist = NIL; cxt.pkey = NULL; cxt.ispartitioned = (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE); cxt.partbound = NULL; + cxt.ofType = false; /* * The only subtypes that currently require parse transformation handling @@ -2781,7 +3067,8 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt, * if attribute not found, something will error about it * later */ - if (attnum != InvalidAttrNumber && get_attidentity(relid, attnum)) + if (attnum != InvalidAttrNumber && + TupleDescAttr(tupdesc, attnum - 1)->attidentity) { Oid seq_relid = getOwnedSequence(relid, attnum); Oid typeOid = typenameTypeId(pstate, def->typeName); @@ -2949,6 +3236,9 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt, newcmds = lappend(newcmds, newcmd); } + /* Append extended statistic objects */ + transformExtendedStatistics(&cxt); + /* Close rel */ relation_close(rel, NoLock); @@ -3272,18 +3562,39 @@ transformPartitionCmd(CreateStmtContext *cxt, PartitionCmd *cmd) { Relation parentRel = cxt->rel; - /* the table must be partitioned */ - if (parentRel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("\"%s\" is not partitioned", - RelationGetRelationName(parentRel)))); - - /* transform the partition bound, if any */ - Assert(RelationGetPartitionKey(parentRel) != NULL); - if (cmd->bound != NULL) - cxt->partbound = transformPartitionBound(cxt->pstate, parentRel, - cmd->bound); + switch (parentRel->rd_rel->relkind) + { + case RELKIND_PARTITIONED_TABLE: + /* transform the partition bound, if any */ + Assert(RelationGetPartitionKey(parentRel) != NULL); + if (cmd->bound != NULL) + cxt->partbound = transformPartitionBound(cxt->pstate, parentRel, + cmd->bound); + break; + case RELKIND_PARTITIONED_INDEX: + /* nothing to check */ + Assert(cmd->bound == NULL); + break; + case RELKIND_RELATION: + /* the table must be partitioned */ + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("table \"%s\" is not partitioned", + RelationGetRelationName(parentRel)))); + break; + case RELKIND_INDEX: + /* the index must be partitioned */ + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("index \"%s\" is not partitioned", + RelationGetRelationName(parentRel)))); + break; + default: + /* parser shouldn't let this case through */ + elog(ERROR, "\"%s\" is not a partitioned table or index", + RelationGetRelationName(parentRel)); + break; + } } /* @@ -3304,7 +3615,44 @@ transformPartitionBound(ParseState *pstate, Relation parent, /* Avoid scribbling on input */ result_spec = copyObject(spec); - if (strategy == PARTITION_STRATEGY_LIST) + if (spec->is_default) + { + if (strategy == PARTITION_STRATEGY_HASH) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLE_DEFINITION), + errmsg("a hash-partitioned table may not have a default partition"))); + + /* + * In case of the default partition, parser had no way to identify the + * partition strategy. Assign the parent's strategy to the default + * partition bound spec. + */ + result_spec->strategy = strategy; + + return result_spec; + } + + if (strategy == PARTITION_STRATEGY_HASH) + { + if (spec->strategy != PARTITION_STRATEGY_HASH) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLE_DEFINITION), + errmsg("invalid bound specification for a hash partition"), + parser_errposition(pstate, exprLocation((Node *) spec)))); + + if (spec->modulus <= 0) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLE_DEFINITION), + errmsg("modulus for hash partition must be a positive integer"))); + + Assert(spec->remainder >= 0); + + if (spec->remainder >= spec->modulus) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLE_DEFINITION), + errmsg("remainder for hash partition must be less than modulus"))); + } + else if (strategy == PARTITION_STRATEGY_LIST) { ListCell *cell; char *colname; @@ -3319,8 +3667,8 @@ transformPartitionBound(ParseState *pstate, Relation parent, /* Get the only column's name in case we need to output an error */ if (key->partattrs[0] != 0) - colname = get_relid_attribute_name(RelationGetRelid(parent), - key->partattrs[0]); + colname = get_attname(RelationGetRelid(parent), + key->partattrs[0], false); else colname = deparse_expression((Node *) linitial(partexprs), deparse_context_for(RelationGetRelationName(parent), @@ -3382,6 +3730,13 @@ transformPartitionBound(ParseState *pstate, Relation parent, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("TO must specify exactly one value per partitioning column"))); + /* + * Once we see MINVALUE or MAXVALUE for one column, the remaining + * columns must be the same. + */ + validateInfiniteBounds(pstate, spec->lowerdatums); + validateInfiniteBounds(pstate, spec->upperdatums); + /* Transform all the constants */ i = j = 0; result_spec->lowerdatums = result_spec->upperdatums = NIL; @@ -3397,8 +3752,8 @@ transformPartitionBound(ParseState *pstate, Relation parent, /* Get the column's name in case we need to output an error */ if (key->partattrs[i] != 0) - colname = get_relid_attribute_name(RelationGetRelid(parent), - key->partattrs[i]); + colname = get_attname(RelationGetRelid(parent), + key->partattrs[i], false); else { colname = deparse_expression((Node *) list_nth(partexprs, j), @@ -3453,6 +3808,48 @@ transformPartitionBound(ParseState *pstate, Relation parent, return result_spec; } +/* + * validateInfiniteBounds + * + * Check that a MAXVALUE or MINVALUE specification in a partition bound is + * followed only by more of the same. + */ +static void +validateInfiniteBounds(ParseState *pstate, List *blist) +{ + ListCell *lc; + PartitionRangeDatumKind kind = PARTITION_RANGE_DATUM_VALUE; + + foreach(lc, blist) + { + PartitionRangeDatum *prd = castNode(PartitionRangeDatum, lfirst(lc)); + + if (kind == prd->kind) + continue; + + switch (kind) + { + case PARTITION_RANGE_DATUM_VALUE: + kind = prd->kind; + break; + + case PARTITION_RANGE_DATUM_MAXVALUE: + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("every bound following MAXVALUE must also be MAXVALUE"), + parser_errposition(pstate, exprLocation((Node *) prd)))); + break; + + case PARTITION_RANGE_DATUM_MINVALUE: + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("every bound following MINVALUE must also be MINVALUE"), + parser_errposition(pstate, exprLocation((Node *) prd)))); + break; + } + } +} + /* * Transform one constant in a partition bound spec */ diff --git a/src/backend/parser/parser.c b/src/backend/parser/parser.c index 245b4cda3b..db30483459 100644 --- a/src/backend/parser/parser.c +++ b/src/backend/parser/parser.c @@ -10,7 +10,7 @@ * analyze.c and related files. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/backend/parser/scan.l b/src/backend/parser/scan.l index 634bfa512f..950b8b8591 100644 --- a/src/backend/parser/scan.l +++ b/src/backend/parser/scan.l @@ -21,7 +21,7 @@ * Postgres 9.2, this check is made automatically by the Makefile.) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -34,6 +34,7 @@ #include #include +#include "common/string.h" #include "parser/gramparse.h" #include "parser/parser.h" /* only needed for GUC variables */ #include "parser/scansup.h" @@ -41,6 +42,9 @@ } %{ + +/* LCOV_EXCL_START */ + /* Avoid exit() on fatal scanner errors (a bit ugly -- see yy_fatal_error) */ #undef fprintf #define fprintf(file, fmt, msg) fprintf_to_ereport(fmt, msg) @@ -335,6 +339,15 @@ identifier {ident_start}{ident_cont}* typecast "::" dot_dot \.\. colon_equals ":=" + +/* + * These operator-like tokens (unlike the above ones) also match the {operator} + * rule, which means that they might be overridden by a longer match if they + * are followed by a comment start or a + or - character. Accordingly, if you + * add to this list, you must also add corresponding code to the {operator} + * block to return the correct token in such cases. (This is not needed in + * psqlscan.l since the token value is ignored there.) + */ equals_greater "=>" less_equals "<=" greater_equals ">=" @@ -881,20 +894,33 @@ other . * to forbid operator names like '?-' that could not be * sequences of SQL operators. */ - while (nchars > 1 && - (yytext[nchars - 1] == '+' || - yytext[nchars - 1] == '-')) + if (nchars > 1 && + (yytext[nchars - 1] == '+' || + yytext[nchars - 1] == '-')) { int ic; for (ic = nchars - 2; ic >= 0; ic--) { - if (strchr("~!@#^&|`?%", yytext[ic])) + char c = yytext[ic]; + if (c == '~' || c == '!' || c == '@' || + c == '#' || c == '^' || c == '&' || + c == '|' || c == '`' || c == '?' || + c == '%') break; } - if (ic >= 0) - break; /* found a char that makes it OK */ - nchars--; /* else remove the +/-, and check again */ + if (ic < 0) + { + /* + * didn't find a qualifying character, so remove + * all trailing [+-] + */ + do { + nchars--; + } while (nchars > 1 && + (yytext[nchars - 1] == '+' || + yytext[nchars - 1] == '-')); + } } SET_YYLLOC(); @@ -912,6 +938,25 @@ other . if (nchars == 1 && strchr(",()[].;:+-*/%^<>=", yytext[0])) return yytext[0]; + /* + * Likewise, if what we have left is two chars, and + * those match the tokens ">=", "<=", "=>", "<>" or + * "!=", then we must return the appropriate token + * rather than the generic Op. + */ + if (nchars == 2) + { + if (yytext[0] == '=' && yytext[1] == '>') + return EQUALS_GREATER; + if (yytext[0] == '>' && yytext[1] == '=') + return GREATER_EQUALS; + if (yytext[0] == '<' && yytext[1] == '=') + return LESS_EQUALS; + if (yytext[0] == '<' && yytext[1] == '>') + return NOT_EQUALS; + if (yytext[0] == '!' && yytext[1] == '=') + return NOT_EQUALS; + } } /* @@ -1011,6 +1056,8 @@ other . %% +/* LCOV_EXCL_STOP */ + /* * Arrange access to yyextra for subroutines of the main yylex() function. * We expect each subroutine to have a yyscanner parameter. Rather than @@ -1206,17 +1253,12 @@ litbufdup(core_yyscan_t yyscanner) static int process_integer_literal(const char *token, YYSTYPE *lval) { - long val; + int val; char *endptr; errno = 0; - val = strtol(token, &endptr, 10); - if (*endptr != '\0' || errno == ERANGE -#ifdef HAVE_LONG_INT_64 - /* if long > 32 bits, check for overflow of int4 */ - || val != (long) ((int32) val) -#endif - ) + val = strtoint(token, &endptr, 10); + if (*endptr != '\0' || errno == ERANGE) { /* integer too large, treat it as a float */ lval->str = pstrdup(token); diff --git a/src/backend/parser/scansup.c b/src/backend/parser/scansup.c index c3d2805803..9256524b8d 100644 --- a/src/backend/parser/scansup.c +++ b/src/backend/parser/scansup.c @@ -4,7 +4,7 @@ * support routines for the lex/flex scanner, used by both the normal * backend as well as the bootstrap backend * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -209,7 +209,7 @@ truncate_identifier(char *ident, int len, bool warn) } /* - * scanner_isspace() --- return TRUE if flex scanner considers char whitespace + * scanner_isspace() --- return true if flex scanner considers char whitespace * * This should be used instead of the potentially locale-dependent isspace() * function when it's important to match the lexer's behavior. diff --git a/src/backend/partitioning/Makefile b/src/backend/partitioning/Makefile new file mode 100644 index 0000000000..278fac3afa --- /dev/null +++ b/src/backend/partitioning/Makefile @@ -0,0 +1,17 @@ +#------------------------------------------------------------------------- +# +# Makefile-- +# Makefile for backend/partitioning +# +# IDENTIFICATION +# src/backend/partitioning/Makefile +# +#------------------------------------------------------------------------- + +subdir = src/backend/partitioning +top_builddir = ../../.. +include $(top_builddir)/src/Makefile.global + +OBJS = partprune.o partbounds.o + +include $(top_srcdir)/src/backend/common.mk diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c new file mode 100644 index 0000000000..c94f73aadc --- /dev/null +++ b/src/backend/partitioning/partbounds.c @@ -0,0 +1,2307 @@ +/*------------------------------------------------------------------------- + * + * partbounds.c + * Support routines for manipulating partition bounds + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/backend/partitioning/partbounds.c + * + *------------------------------------------------------------------------- +*/ +#include "postgres.h" + +#include "catalog/partition.h" +#include "catalog/pg_inherits.h" +#include "catalog/pg_type.h" +#include "commands/tablecmds.h" +#include "executor/executor.h" +#include "miscadmin.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "optimizer/clauses.h" +#include "parser/parse_coerce.h" +#include "partitioning/partprune.h" +#include "partitioning/partbounds.h" +#include "utils/builtins.h" +#include "utils/datum.h" +#include "utils/fmgroids.h" +#include "utils/hashutils.h" +#include "utils/lsyscache.h" +#include "utils/partcache.h" +#include "utils/rel.h" +#include "utils/snapmgr.h" +#include "utils/ruleutils.h" +#include "utils/syscache.h" + +static int get_partition_bound_num_indexes(PartitionBoundInfo b); +static Expr *make_partition_op_expr(PartitionKey key, int keynum, + uint16 strategy, Expr *arg1, Expr *arg2); +static Oid get_partition_operator(PartitionKey key, int col, + StrategyNumber strategy, bool *need_relabel); +static List *get_qual_for_hash(Relation parent, PartitionBoundSpec *spec); +static List *get_qual_for_list(Relation parent, PartitionBoundSpec *spec); +static List *get_qual_for_range(Relation parent, PartitionBoundSpec *spec, + bool for_default); +static void get_range_key_properties(PartitionKey key, int keynum, + PartitionRangeDatum *ldatum, + PartitionRangeDatum *udatum, + ListCell **partexprs_item, + Expr **keyCol, + Const **lower_val, Const **upper_val); +static List *get_range_nulltest(PartitionKey key); + +/* + * get_qual_from_partbound + * Given a parser node for partition bound, return the list of executable + * expressions as partition constraint + */ +List * +get_qual_from_partbound(Relation rel, Relation parent, + PartitionBoundSpec *spec) +{ + PartitionKey key = RelationGetPartitionKey(parent); + List *my_qual = NIL; + + Assert(key != NULL); + + switch (key->strategy) + { + case PARTITION_STRATEGY_HASH: + Assert(spec->strategy == PARTITION_STRATEGY_HASH); + my_qual = get_qual_for_hash(parent, spec); + break; + + case PARTITION_STRATEGY_LIST: + Assert(spec->strategy == PARTITION_STRATEGY_LIST); + my_qual = get_qual_for_list(parent, spec); + break; + + case PARTITION_STRATEGY_RANGE: + Assert(spec->strategy == PARTITION_STRATEGY_RANGE); + my_qual = get_qual_for_range(parent, spec, false); + break; + + default: + elog(ERROR, "unexpected partition strategy: %d", + (int) key->strategy); + } + + return my_qual; +} + +/* + * Are two partition bound collections logically equal? + * + * Used in the keep logic of relcache.c (ie, in RelationClearRelation()). + * This is also useful when b1 and b2 are bound collections of two separate + * relations, respectively, because PartitionBoundInfo is a canonical + * representation of partition bounds. + */ +bool +partition_bounds_equal(int partnatts, int16 *parttyplen, bool *parttypbyval, + PartitionBoundInfo b1, PartitionBoundInfo b2) +{ + int i; + + if (b1->strategy != b2->strategy) + return false; + + if (b1->ndatums != b2->ndatums) + return false; + + if (b1->null_index != b2->null_index) + return false; + + if (b1->default_index != b2->default_index) + return false; + + if (b1->strategy == PARTITION_STRATEGY_HASH) + { + int greatest_modulus = get_hash_partition_greatest_modulus(b1); + + /* + * If two hash partitioned tables have different greatest moduli, + * their partition schemes don't match. + */ + if (greatest_modulus != get_hash_partition_greatest_modulus(b2)) + return false; + + /* + * We arrange the partitions in the ascending order of their moduli + * and remainders. Also every modulus is factor of next larger + * modulus. Therefore we can safely store index of a given partition + * in indexes array at remainder of that partition. Also entries at + * (remainder + N * modulus) positions in indexes array are all same + * for (modulus, remainder) specification for any partition. Thus + * datums array from both the given bounds are same, if and only if + * their indexes array will be same. So, it suffices to compare + * indexes array. + */ + for (i = 0; i < greatest_modulus; i++) + if (b1->indexes[i] != b2->indexes[i]) + return false; + +#ifdef USE_ASSERT_CHECKING + + /* + * Nonetheless make sure that the bounds are indeed same when the + * indexes match. Hash partition bound stores modulus and remainder + * at b1->datums[i][0] and b1->datums[i][1] position respectively. + */ + for (i = 0; i < b1->ndatums; i++) + Assert((b1->datums[i][0] == b2->datums[i][0] && + b1->datums[i][1] == b2->datums[i][1])); +#endif + } + else + { + for (i = 0; i < b1->ndatums; i++) + { + int j; + + for (j = 0; j < partnatts; j++) + { + /* For range partitions, the bounds might not be finite. */ + if (b1->kind != NULL) + { + /* The different kinds of bound all differ from each other */ + if (b1->kind[i][j] != b2->kind[i][j]) + return false; + + /* + * Non-finite bounds are equal without further + * examination. + */ + if (b1->kind[i][j] != PARTITION_RANGE_DATUM_VALUE) + continue; + } + + /* + * Compare the actual values. Note that it would be both + * incorrect and unsafe to invoke the comparison operator + * derived from the partitioning specification here. It would + * be incorrect because we want the relcache entry to be + * updated for ANY change to the partition bounds, not just + * those that the partitioning operator thinks are + * significant. It would be unsafe because we might reach + * this code in the context of an aborted transaction, and an + * arbitrary partitioning operator might not be safe in that + * context. datumIsEqual() should be simple enough to be + * safe. + */ + if (!datumIsEqual(b1->datums[i][j], b2->datums[i][j], + parttypbyval[j], parttyplen[j])) + return false; + } + + if (b1->indexes[i] != b2->indexes[i]) + return false; + } + + /* There are ndatums+1 indexes in case of range partitions */ + if (b1->strategy == PARTITION_STRATEGY_RANGE && + b1->indexes[i] != b2->indexes[i]) + return false; + } + return true; +} + +/* + * Return a copy of given PartitionBoundInfo structure. The data types of bounds + * are described by given partition key specification. + */ +PartitionBoundInfo +partition_bounds_copy(PartitionBoundInfo src, + PartitionKey key) +{ + PartitionBoundInfo dest; + int i; + int ndatums; + int partnatts; + int num_indexes; + + dest = (PartitionBoundInfo) palloc(sizeof(PartitionBoundInfoData)); + + dest->strategy = src->strategy; + ndatums = dest->ndatums = src->ndatums; + partnatts = key->partnatts; + + num_indexes = get_partition_bound_num_indexes(src); + + /* List partitioned tables have only a single partition key. */ + Assert(key->strategy != PARTITION_STRATEGY_LIST || partnatts == 1); + + dest->datums = (Datum **) palloc(sizeof(Datum *) * ndatums); + + if (src->kind != NULL) + { + dest->kind = (PartitionRangeDatumKind **) palloc(ndatums * + sizeof(PartitionRangeDatumKind *)); + for (i = 0; i < ndatums; i++) + { + dest->kind[i] = (PartitionRangeDatumKind *) palloc(partnatts * + sizeof(PartitionRangeDatumKind)); + + memcpy(dest->kind[i], src->kind[i], + sizeof(PartitionRangeDatumKind) * key->partnatts); + } + } + else + dest->kind = NULL; + + for (i = 0; i < ndatums; i++) + { + int j; + + /* + * For a corresponding to hash partition, datums array will have two + * elements - modulus and remainder. + */ + bool hash_part = (key->strategy == PARTITION_STRATEGY_HASH); + int natts = hash_part ? 2 : partnatts; + + dest->datums[i] = (Datum *) palloc(sizeof(Datum) * natts); + + for (j = 0; j < natts; j++) + { + bool byval; + int typlen; + + if (hash_part) + { + typlen = sizeof(int32); /* Always int4 */ + byval = true; /* int4 is pass-by-value */ + } + else + { + byval = key->parttypbyval[j]; + typlen = key->parttyplen[j]; + } + + if (dest->kind == NULL || + dest->kind[i][j] == PARTITION_RANGE_DATUM_VALUE) + dest->datums[i][j] = datumCopy(src->datums[i][j], + byval, typlen); + } + } + + dest->indexes = (int *) palloc(sizeof(int) * num_indexes); + memcpy(dest->indexes, src->indexes, sizeof(int) * num_indexes); + + dest->null_index = src->null_index; + dest->default_index = src->default_index; + + return dest; +} + +/* + * check_new_partition_bound + * + * Checks if the new partition's bound overlaps any of the existing partitions + * of parent. Also performs additional checks as necessary per strategy. + */ +void +check_new_partition_bound(char *relname, Relation parent, + PartitionBoundSpec *spec) +{ + PartitionKey key = RelationGetPartitionKey(parent); + PartitionDesc partdesc = RelationGetPartitionDesc(parent); + PartitionBoundInfo boundinfo = partdesc->boundinfo; + ParseState *pstate = make_parsestate(NULL); + int with = -1; + bool overlap = false; + + if (spec->is_default) + { + /* + * The default partition bound never conflicts with any other + * partition's; if that's what we're attaching, the only possible + * problem is that one already exists, so check for that and we're + * done. + */ + if (boundinfo == NULL || !partition_bound_has_default(boundinfo)) + return; + + /* Default partition already exists, error out. */ + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("partition \"%s\" conflicts with existing default partition \"%s\"", + relname, get_rel_name(partdesc->oids[boundinfo->default_index])), + parser_errposition(pstate, spec->location))); + } + + switch (key->strategy) + { + case PARTITION_STRATEGY_HASH: + { + Assert(spec->strategy == PARTITION_STRATEGY_HASH); + Assert(spec->remainder >= 0 && spec->remainder < spec->modulus); + + if (partdesc->nparts > 0) + { + Datum **datums = boundinfo->datums; + int ndatums = boundinfo->ndatums; + int greatest_modulus; + int remainder; + int offset; + bool valid_modulus = true; + int prev_modulus, /* Previous largest modulus */ + next_modulus; /* Next largest modulus */ + + /* + * Check rule that every modulus must be a factor of the + * next larger modulus. For example, if you have a bunch + * of partitions that all have modulus 5, you can add a + * new partition with modulus 10 or a new partition with + * modulus 15, but you cannot add both a partition with + * modulus 10 and a partition with modulus 15, because 10 + * is not a factor of 15. + * + * Get the greatest (modulus, remainder) pair contained in + * boundinfo->datums that is less than or equal to the + * (spec->modulus, spec->remainder) pair. + */ + offset = partition_hash_bsearch(boundinfo, + spec->modulus, + spec->remainder); + if (offset < 0) + { + next_modulus = DatumGetInt32(datums[0][0]); + valid_modulus = (next_modulus % spec->modulus) == 0; + } + else + { + prev_modulus = DatumGetInt32(datums[offset][0]); + valid_modulus = (spec->modulus % prev_modulus) == 0; + + if (valid_modulus && (offset + 1) < ndatums) + { + next_modulus = DatumGetInt32(datums[offset + 1][0]); + valid_modulus = (next_modulus % spec->modulus) == 0; + } + } + + if (!valid_modulus) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("every hash partition modulus must be a factor of the next larger modulus"))); + + greatest_modulus = get_hash_partition_greatest_modulus(boundinfo); + remainder = spec->remainder; + + /* + * Normally, the lowest remainder that could conflict with + * the new partition is equal to the remainder specified + * for the new partition, but when the new partition has a + * modulus higher than any used so far, we need to adjust. + */ + if (remainder >= greatest_modulus) + remainder = remainder % greatest_modulus; + + /* Check every potentially-conflicting remainder. */ + do + { + if (boundinfo->indexes[remainder] != -1) + { + overlap = true; + with = boundinfo->indexes[remainder]; + break; + } + remainder += spec->modulus; + } while (remainder < greatest_modulus); + } + + break; + } + + case PARTITION_STRATEGY_LIST: + { + Assert(spec->strategy == PARTITION_STRATEGY_LIST); + + if (partdesc->nparts > 0) + { + ListCell *cell; + + Assert(boundinfo && + boundinfo->strategy == PARTITION_STRATEGY_LIST && + (boundinfo->ndatums > 0 || + partition_bound_accepts_nulls(boundinfo) || + partition_bound_has_default(boundinfo))); + + foreach(cell, spec->listdatums) + { + Const *val = castNode(Const, lfirst(cell)); + + if (!val->constisnull) + { + int offset; + bool equal; + + offset = partition_list_bsearch(&key->partsupfunc[0], + key->partcollation, + boundinfo, + val->constvalue, + &equal); + if (offset >= 0 && equal) + { + overlap = true; + with = boundinfo->indexes[offset]; + break; + } + } + else if (partition_bound_accepts_nulls(boundinfo)) + { + overlap = true; + with = boundinfo->null_index; + break; + } + } + } + + break; + } + + case PARTITION_STRATEGY_RANGE: + { + PartitionRangeBound *lower, + *upper; + + Assert(spec->strategy == PARTITION_STRATEGY_RANGE); + lower = make_one_partition_rbound(key, -1, spec->lowerdatums, true); + upper = make_one_partition_rbound(key, -1, spec->upperdatums, false); + + /* + * First check if the resulting range would be empty with + * specified lower and upper bounds + */ + if (partition_rbound_cmp(key->partnatts, key->partsupfunc, + key->partcollation, lower->datums, + lower->kind, true, upper) >= 0) + { + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("empty range bound specified for partition \"%s\"", + relname), + errdetail("Specified lower bound %s is greater than or equal to upper bound %s.", + get_range_partbound_string(spec->lowerdatums), + get_range_partbound_string(spec->upperdatums)), + parser_errposition(pstate, spec->location))); + } + + if (partdesc->nparts > 0) + { + int offset; + bool equal; + + Assert(boundinfo && + boundinfo->strategy == PARTITION_STRATEGY_RANGE && + (boundinfo->ndatums > 0 || + partition_bound_has_default(boundinfo))); + + /* + * Test whether the new lower bound (which is treated + * inclusively as part of the new partition) lies inside + * an existing partition, or in a gap. + * + * If it's inside an existing partition, the bound at + * offset + 1 will be the upper bound of that partition, + * and its index will be >= 0. + * + * If it's in a gap, the bound at offset + 1 will be the + * lower bound of the next partition, and its index will + * be -1. This is also true if there is no next partition, + * since the index array is initialised with an extra -1 + * at the end. + */ + offset = partition_range_bsearch(key->partnatts, + key->partsupfunc, + key->partcollation, + boundinfo, lower, + &equal); + + if (boundinfo->indexes[offset + 1] < 0) + { + /* + * Check that the new partition will fit in the gap. + * For it to fit, the new upper bound must be less + * than or equal to the lower bound of the next + * partition, if there is one. + */ + if (offset + 1 < boundinfo->ndatums) + { + int32 cmpval; + Datum *datums; + PartitionRangeDatumKind *kind; + bool is_lower; + + datums = boundinfo->datums[offset + 1]; + kind = boundinfo->kind[offset + 1]; + is_lower = (boundinfo->indexes[offset + 1] == -1); + + cmpval = partition_rbound_cmp(key->partnatts, + key->partsupfunc, + key->partcollation, + datums, kind, + is_lower, upper); + if (cmpval < 0) + { + /* + * The new partition overlaps with the + * existing partition between offset + 1 and + * offset + 2. + */ + overlap = true; + with = boundinfo->indexes[offset + 2]; + } + } + } + else + { + /* + * The new partition overlaps with the existing + * partition between offset and offset + 1. + */ + overlap = true; + with = boundinfo->indexes[offset + 1]; + } + } + + break; + } + + default: + elog(ERROR, "unexpected partition strategy: %d", + (int) key->strategy); + } + + if (overlap) + { + Assert(with >= 0); + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("partition \"%s\" would overlap partition \"%s\"", + relname, get_rel_name(partdesc->oids[with])), + parser_errposition(pstate, spec->location))); + } +} + +/* + * check_default_partition_contents + * + * This function checks if there exists a row in the default partition that + * would properly belong to the new partition being added. If it finds one, + * it throws an error. + */ +void +check_default_partition_contents(Relation parent, Relation default_rel, + PartitionBoundSpec *new_spec) +{ + List *new_part_constraints; + List *def_part_constraints; + List *all_parts; + ListCell *lc; + + new_part_constraints = (new_spec->strategy == PARTITION_STRATEGY_LIST) + ? get_qual_for_list(parent, new_spec) + : get_qual_for_range(parent, new_spec, false); + def_part_constraints = + get_proposed_default_constraint(new_part_constraints); + + /* + * If the existing constraints on the default partition imply that it will + * not contain any row that would belong to the new partition, we can + * avoid scanning the default partition. + */ + if (PartConstraintImpliedByRelConstraint(default_rel, def_part_constraints)) + { + ereport(INFO, + (errmsg("updated partition constraint for default partition \"%s\" is implied by existing constraints", + RelationGetRelationName(default_rel)))); + return; + } + + /* + * Scan the default partition and its subpartitions, and check for rows + * that do not satisfy the revised partition constraints. + */ + if (default_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + all_parts = find_all_inheritors(RelationGetRelid(default_rel), + AccessExclusiveLock, NULL); + else + all_parts = list_make1_oid(RelationGetRelid(default_rel)); + + foreach(lc, all_parts) + { + Oid part_relid = lfirst_oid(lc); + Relation part_rel; + Expr *constr; + Expr *partition_constraint; + EState *estate; + HeapTuple tuple; + ExprState *partqualstate = NULL; + Snapshot snapshot; + TupleDesc tupdesc; + ExprContext *econtext; + HeapScanDesc scan; + MemoryContext oldCxt; + TupleTableSlot *tupslot; + + /* Lock already taken above. */ + if (part_relid != RelationGetRelid(default_rel)) + { + part_rel = heap_open(part_relid, NoLock); + + /* + * If the partition constraints on default partition child imply + * that it will not contain any row that would belong to the new + * partition, we can avoid scanning the child table. + */ + if (PartConstraintImpliedByRelConstraint(part_rel, + def_part_constraints)) + { + ereport(INFO, + (errmsg("updated partition constraint for default partition \"%s\" is implied by existing constraints", + RelationGetRelationName(part_rel)))); + + heap_close(part_rel, NoLock); + continue; + } + } + else + part_rel = default_rel; + + /* + * Only RELKIND_RELATION relations (i.e. leaf partitions) need to be + * scanned. + */ + if (part_rel->rd_rel->relkind != RELKIND_RELATION) + { + if (part_rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) + ereport(WARNING, + (errcode(ERRCODE_CHECK_VIOLATION), + errmsg("skipped scanning foreign table \"%s\" which is a partition of default partition \"%s\"", + RelationGetRelationName(part_rel), + RelationGetRelationName(default_rel)))); + + if (RelationGetRelid(default_rel) != RelationGetRelid(part_rel)) + heap_close(part_rel, NoLock); + + continue; + } + + tupdesc = CreateTupleDescCopy(RelationGetDescr(part_rel)); + constr = linitial(def_part_constraints); + partition_constraint = (Expr *) + map_partition_varattnos((List *) constr, + 1, part_rel, parent, NULL); + estate = CreateExecutorState(); + + /* Build expression execution states for partition check quals */ + partqualstate = ExecPrepareExpr(partition_constraint, estate); + + econtext = GetPerTupleExprContext(estate); + snapshot = RegisterSnapshot(GetLatestSnapshot()); + scan = heap_beginscan(part_rel, snapshot, 0, NULL); + tupslot = MakeSingleTupleTableSlot(tupdesc); + + /* + * Switch to per-tuple memory context and reset it for each tuple + * produced, so we don't leak memory. + */ + oldCxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); + + while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) + { + ExecStoreHeapTuple(tuple, tupslot, false); + econtext->ecxt_scantuple = tupslot; + + if (!ExecCheck(partqualstate, econtext)) + ereport(ERROR, + (errcode(ERRCODE_CHECK_VIOLATION), + errmsg("updated partition constraint for default partition \"%s\" would be violated by some row", + RelationGetRelationName(default_rel)))); + + ResetExprContext(econtext); + CHECK_FOR_INTERRUPTS(); + } + + MemoryContextSwitchTo(oldCxt); + heap_endscan(scan); + UnregisterSnapshot(snapshot); + ExecDropSingleTupleTableSlot(tupslot); + FreeExecutorState(estate); + + if (RelationGetRelid(default_rel) != RelationGetRelid(part_rel)) + heap_close(part_rel, NoLock); /* keep the lock until commit */ + } +} + +/* + * get_hash_partition_greatest_modulus + * + * Returns the greatest modulus of the hash partition bound. The greatest + * modulus will be at the end of the datums array because hash partitions are + * arranged in the ascending order of their moduli and remainders. + */ +int +get_hash_partition_greatest_modulus(PartitionBoundInfo bound) +{ + Assert(bound && bound->strategy == PARTITION_STRATEGY_HASH); + Assert(bound->datums && bound->ndatums > 0); + Assert(DatumGetInt32(bound->datums[bound->ndatums - 1][0]) > 0); + + return DatumGetInt32(bound->datums[bound->ndatums - 1][0]); +} + +/* + * make_one_partition_rbound + * + * Return a PartitionRangeBound given a list of PartitionRangeDatum elements + * and a flag telling whether the bound is lower or not. Made into a function + * because there are multiple sites that want to use this facility. + */ +PartitionRangeBound * +make_one_partition_rbound(PartitionKey key, int index, List *datums, bool lower) +{ + PartitionRangeBound *bound; + ListCell *lc; + int i; + + Assert(datums != NIL); + + bound = (PartitionRangeBound *) palloc0(sizeof(PartitionRangeBound)); + bound->index = index; + bound->datums = (Datum *) palloc0(key->partnatts * sizeof(Datum)); + bound->kind = (PartitionRangeDatumKind *) palloc0(key->partnatts * + sizeof(PartitionRangeDatumKind)); + bound->lower = lower; + + i = 0; + foreach(lc, datums) + { + PartitionRangeDatum *datum = castNode(PartitionRangeDatum, lfirst(lc)); + + /* What's contained in this range datum? */ + bound->kind[i] = datum->kind; + + if (datum->kind == PARTITION_RANGE_DATUM_VALUE) + { + Const *val = castNode(Const, datum->value); + + if (val->constisnull) + elog(ERROR, "invalid range bound datum"); + bound->datums[i] = val->constvalue; + } + + i++; + } + + return bound; +} + +/* + * partition_rbound_cmp + * + * Return for two range bounds whether the 1st one (specified in datums1, + * kind1, and lower1) is <, =, or > the bound specified in *b2. + * + * partnatts, partsupfunc and partcollation give the number of attributes in the + * bounds to be compared, comparison function to be used and the collations of + * attributes, respectively. + * + * Note that if the values of the two range bounds compare equal, then we take + * into account whether they are upper or lower bounds, and an upper bound is + * considered to be smaller than a lower bound. This is important to the way + * that RelationBuildPartitionDesc() builds the PartitionBoundInfoData + * structure, which only stores the upper bound of a common boundary between + * two contiguous partitions. + */ +int32 +partition_rbound_cmp(int partnatts, FmgrInfo *partsupfunc, + Oid *partcollation, + Datum *datums1, PartitionRangeDatumKind *kind1, + bool lower1, PartitionRangeBound *b2) +{ + int32 cmpval = 0; /* placate compiler */ + int i; + Datum *datums2 = b2->datums; + PartitionRangeDatumKind *kind2 = b2->kind; + bool lower2 = b2->lower; + + for (i = 0; i < partnatts; i++) + { + /* + * First, handle cases where the column is unbounded, which should not + * invoke the comparison procedure, and should not consider any later + * columns. Note that the PartitionRangeDatumKind enum elements + * compare the same way as the values they represent. + */ + if (kind1[i] < kind2[i]) + return -1; + else if (kind1[i] > kind2[i]) + return 1; + else if (kind1[i] != PARTITION_RANGE_DATUM_VALUE) + + /* + * The column bounds are both MINVALUE or both MAXVALUE. No later + * columns should be considered, but we still need to compare + * whether they are upper or lower bounds. + */ + break; + + cmpval = DatumGetInt32(FunctionCall2Coll(&partsupfunc[i], + partcollation[i], + datums1[i], + datums2[i])); + if (cmpval != 0) + break; + } + + /* + * If the comparison is anything other than equal, we're done. If they + * compare equal though, we still have to consider whether the boundaries + * are inclusive or exclusive. Exclusive one is considered smaller of the + * two. + */ + if (cmpval == 0 && lower1 != lower2) + cmpval = lower1 ? 1 : -1; + + return cmpval; +} + +/* + * partition_rbound_datum_cmp + * + * Return whether range bound (specified in rb_datums, rb_kind, and rb_lower) + * is <, =, or > partition key of tuple (tuple_datums) + * + * n_tuple_datums, partsupfunc and partcollation give number of attributes in + * the bounds to be compared, comparison function to be used and the collations + * of attributes resp. + * + */ +int32 +partition_rbound_datum_cmp(FmgrInfo *partsupfunc, Oid *partcollation, + Datum *rb_datums, PartitionRangeDatumKind *rb_kind, + Datum *tuple_datums, int n_tuple_datums) +{ + int i; + int32 cmpval = -1; + + for (i = 0; i < n_tuple_datums; i++) + { + if (rb_kind[i] == PARTITION_RANGE_DATUM_MINVALUE) + return -1; + else if (rb_kind[i] == PARTITION_RANGE_DATUM_MAXVALUE) + return 1; + + cmpval = DatumGetInt32(FunctionCall2Coll(&partsupfunc[i], + partcollation[i], + rb_datums[i], + tuple_datums[i])); + if (cmpval != 0) + break; + } + + return cmpval; +} + +/* + * partition_hbound_cmp + * + * Compares modulus first, then remainder if modulus is equal. + */ +int32 +partition_hbound_cmp(int modulus1, int remainder1, int modulus2, int remainder2) +{ + if (modulus1 < modulus2) + return -1; + if (modulus1 > modulus2) + return 1; + if (modulus1 == modulus2 && remainder1 != remainder2) + return (remainder1 > remainder2) ? 1 : -1; + return 0; +} + +/* + * partition_list_bsearch + * Returns the index of the greatest bound datum that is less than equal + * to the given value or -1 if all of the bound datums are greater + * + * *is_equal is set to true if the bound datum at the returned index is equal + * to the input value. + */ +int +partition_list_bsearch(FmgrInfo *partsupfunc, Oid *partcollation, + PartitionBoundInfo boundinfo, + Datum value, bool *is_equal) +{ + int lo, + hi, + mid; + + lo = -1; + hi = boundinfo->ndatums - 1; + while (lo < hi) + { + int32 cmpval; + + mid = (lo + hi + 1) / 2; + cmpval = DatumGetInt32(FunctionCall2Coll(&partsupfunc[0], + partcollation[0], + boundinfo->datums[mid][0], + value)); + if (cmpval <= 0) + { + lo = mid; + *is_equal = (cmpval == 0); + if (*is_equal) + break; + } + else + hi = mid - 1; + } + + return lo; +} + +/* + * partition_range_bsearch + * Returns the index of the greatest range bound that is less than or + * equal to the given range bound or -1 if all of the range bounds are + * greater + * + * *is_equal is set to true if the range bound at the returned index is equal + * to the input range bound + */ +int +partition_range_bsearch(int partnatts, FmgrInfo *partsupfunc, + Oid *partcollation, + PartitionBoundInfo boundinfo, + PartitionRangeBound *probe, bool *is_equal) +{ + int lo, + hi, + mid; + + lo = -1; + hi = boundinfo->ndatums - 1; + while (lo < hi) + { + int32 cmpval; + + mid = (lo + hi + 1) / 2; + cmpval = partition_rbound_cmp(partnatts, partsupfunc, + partcollation, + boundinfo->datums[mid], + boundinfo->kind[mid], + (boundinfo->indexes[mid] == -1), + probe); + if (cmpval <= 0) + { + lo = mid; + *is_equal = (cmpval == 0); + + if (*is_equal) + break; + } + else + hi = mid - 1; + } + + return lo; +} + +/* + * partition_range_bsearch + * Returns the index of the greatest range bound that is less than or + * equal to the given tuple or -1 if all of the range bounds are greater + * + * *is_equal is set to true if the range bound at the returned index is equal + * to the input tuple. + */ +int +partition_range_datum_bsearch(FmgrInfo *partsupfunc, Oid *partcollation, + PartitionBoundInfo boundinfo, + int nvalues, Datum *values, bool *is_equal) +{ + int lo, + hi, + mid; + + lo = -1; + hi = boundinfo->ndatums - 1; + while (lo < hi) + { + int32 cmpval; + + mid = (lo + hi + 1) / 2; + cmpval = partition_rbound_datum_cmp(partsupfunc, + partcollation, + boundinfo->datums[mid], + boundinfo->kind[mid], + values, + nvalues); + if (cmpval <= 0) + { + lo = mid; + *is_equal = (cmpval == 0); + + if (*is_equal) + break; + } + else + hi = mid - 1; + } + + return lo; +} + +/* + * partition_hash_bsearch + * Returns the index of the greatest (modulus, remainder) pair that is + * less than or equal to the given (modulus, remainder) pair or -1 if + * all of them are greater + */ +int +partition_hash_bsearch(PartitionBoundInfo boundinfo, + int modulus, int remainder) +{ + int lo, + hi, + mid; + + lo = -1; + hi = boundinfo->ndatums - 1; + while (lo < hi) + { + int32 cmpval, + bound_modulus, + bound_remainder; + + mid = (lo + hi + 1) / 2; + bound_modulus = DatumGetInt32(boundinfo->datums[mid][0]); + bound_remainder = DatumGetInt32(boundinfo->datums[mid][1]); + cmpval = partition_hbound_cmp(bound_modulus, bound_remainder, + modulus, remainder); + if (cmpval <= 0) + { + lo = mid; + + if (cmpval == 0) + break; + } + else + hi = mid - 1; + } + + return lo; +} + +/* + * get_partition_bound_num_indexes + * + * Returns the number of the entries in the partition bound indexes array. + */ +static int +get_partition_bound_num_indexes(PartitionBoundInfo bound) +{ + int num_indexes; + + Assert(bound); + + switch (bound->strategy) + { + case PARTITION_STRATEGY_HASH: + + /* + * The number of the entries in the indexes array is same as the + * greatest modulus. + */ + num_indexes = get_hash_partition_greatest_modulus(bound); + break; + + case PARTITION_STRATEGY_LIST: + num_indexes = bound->ndatums; + break; + + case PARTITION_STRATEGY_RANGE: + /* Range partitioned table has an extra index. */ + num_indexes = bound->ndatums + 1; + break; + + default: + elog(ERROR, "unexpected partition strategy: %d", + (int) bound->strategy); + } + + return num_indexes; +} + +/* + * get_partition_operator + * + * Return oid of the operator of the given strategy for the given partition + * key column. It is assumed that the partitioning key is of the same type as + * the chosen partitioning opclass, or at least binary-compatible. In the + * latter case, *need_relabel is set to true if the opclass is not of a + * polymorphic type (indicating a RelabelType node needed on top), otherwise + * false. + */ +static Oid +get_partition_operator(PartitionKey key, int col, StrategyNumber strategy, + bool *need_relabel) +{ + Oid operoid; + + /* + * Get the operator in the partitioning opfamily using the opclass' + * declared input type as both left- and righttype. + */ + operoid = get_opfamily_member(key->partopfamily[col], + key->partopcintype[col], + key->partopcintype[col], + strategy); + if (!OidIsValid(operoid)) + elog(ERROR, "missing operator %d(%u,%u) in partition opfamily %u", + strategy, key->partopcintype[col], key->partopcintype[col], + key->partopfamily[col]); + + /* + * If the partition key column is not of the same type as the operator + * class and not polymorphic, tell caller to wrap the non-Const expression + * in a RelabelType. This matches what parse_coerce.c does. + */ + *need_relabel = (key->parttypid[col] != key->partopcintype[col] && + key->partopcintype[col] != RECORDOID && + !IsPolymorphicType(key->partopcintype[col])); + + return operoid; +} + +/* + * make_partition_op_expr + * Returns an Expr for the given partition key column with arg1 and + * arg2 as its leftop and rightop, respectively + */ +static Expr * +make_partition_op_expr(PartitionKey key, int keynum, + uint16 strategy, Expr *arg1, Expr *arg2) +{ + Oid operoid; + bool need_relabel = false; + Expr *result = NULL; + + /* Get the correct btree operator for this partitioning column */ + operoid = get_partition_operator(key, keynum, strategy, &need_relabel); + + /* + * Chosen operator may be such that the non-Const operand needs to be + * coerced, so apply the same; see the comment in + * get_partition_operator(). + */ + if (!IsA(arg1, Const) && + (need_relabel || + key->partcollation[keynum] != key->parttypcoll[keynum])) + arg1 = (Expr *) makeRelabelType(arg1, + key->partopcintype[keynum], + -1, + key->partcollation[keynum], + COERCE_EXPLICIT_CAST); + + /* Generate the actual expression */ + switch (key->strategy) + { + case PARTITION_STRATEGY_LIST: + { + List *elems = (List *) arg2; + int nelems = list_length(elems); + + Assert(nelems >= 1); + Assert(keynum == 0); + + if (nelems > 1 && + !type_is_array(key->parttypid[keynum])) + { + ArrayExpr *arrexpr; + ScalarArrayOpExpr *saopexpr; + + /* Construct an ArrayExpr for the right-hand inputs */ + arrexpr = makeNode(ArrayExpr); + arrexpr->array_typeid = + get_array_type(key->parttypid[keynum]); + arrexpr->array_collid = key->parttypcoll[keynum]; + arrexpr->element_typeid = key->parttypid[keynum]; + arrexpr->elements = elems; + arrexpr->multidims = false; + arrexpr->location = -1; + + /* Build leftop = ANY (rightop) */ + saopexpr = makeNode(ScalarArrayOpExpr); + saopexpr->opno = operoid; + saopexpr->opfuncid = get_opcode(operoid); + saopexpr->useOr = true; + saopexpr->inputcollid = key->partcollation[keynum]; + saopexpr->args = list_make2(arg1, arrexpr); + saopexpr->location = -1; + + result = (Expr *) saopexpr; + } + else + { + List *elemops = NIL; + ListCell *lc; + + foreach(lc, elems) + { + Expr *elem = lfirst(lc), + *elemop; + + elemop = make_opclause(operoid, + BOOLOID, + false, + arg1, elem, + InvalidOid, + key->partcollation[keynum]); + elemops = lappend(elemops, elemop); + } + + result = nelems > 1 ? makeBoolExpr(OR_EXPR, elemops, -1) : linitial(elemops); + } + break; + } + + case PARTITION_STRATEGY_RANGE: + result = make_opclause(operoid, + BOOLOID, + false, + arg1, arg2, + InvalidOid, + key->partcollation[keynum]); + break; + + default: + elog(ERROR, "invalid partitioning strategy"); + break; + } + + return result; +} + +/* + * get_qual_for_hash + * + * Returns a CHECK constraint expression to use as a hash partition's + * constraint, given the parent relation and partition bound structure. + * + * The partition constraint for a hash partition is always a call to the + * built-in function satisfies_hash_partition(). + */ +static List * +get_qual_for_hash(Relation parent, PartitionBoundSpec *spec) +{ + PartitionKey key = RelationGetPartitionKey(parent); + FuncExpr *fexpr; + Node *relidConst; + Node *modulusConst; + Node *remainderConst; + List *args; + ListCell *partexprs_item; + int i; + + /* Fixed arguments. */ + relidConst = (Node *) makeConst(OIDOID, + -1, + InvalidOid, + sizeof(Oid), + ObjectIdGetDatum(RelationGetRelid(parent)), + false, + true); + + modulusConst = (Node *) makeConst(INT4OID, + -1, + InvalidOid, + sizeof(int32), + Int32GetDatum(spec->modulus), + false, + true); + + remainderConst = (Node *) makeConst(INT4OID, + -1, + InvalidOid, + sizeof(int32), + Int32GetDatum(spec->remainder), + false, + true); + + args = list_make3(relidConst, modulusConst, remainderConst); + partexprs_item = list_head(key->partexprs); + + /* Add an argument for each key column. */ + for (i = 0; i < key->partnatts; i++) + { + Node *keyCol; + + /* Left operand */ + if (key->partattrs[i] != 0) + { + keyCol = (Node *) makeVar(1, + key->partattrs[i], + key->parttypid[i], + key->parttypmod[i], + key->parttypcoll[i], + 0); + } + else + { + keyCol = (Node *) copyObject(lfirst(partexprs_item)); + partexprs_item = lnext(partexprs_item); + } + + args = lappend(args, keyCol); + } + + fexpr = makeFuncExpr(F_SATISFIES_HASH_PARTITION, + BOOLOID, + args, + InvalidOid, + InvalidOid, + COERCE_EXPLICIT_CALL); + + return list_make1(fexpr); +} + +/* + * get_qual_for_list + * + * Returns an implicit-AND list of expressions to use as a list partition's + * constraint, given the parent relation and partition bound structure. + * + * The function returns NIL for a default partition when it's the only + * partition since in that case there is no constraint. + */ +static List * +get_qual_for_list(Relation parent, PartitionBoundSpec *spec) +{ + PartitionKey key = RelationGetPartitionKey(parent); + List *result; + Expr *keyCol; + Expr *opexpr; + NullTest *nulltest; + ListCell *cell; + List *elems = NIL; + bool list_has_null = false; + + /* + * Only single-column list partitioning is supported, so we are worried + * only about the partition key with index 0. + */ + Assert(key->partnatts == 1); + + /* Construct Var or expression representing the partition column */ + if (key->partattrs[0] != 0) + keyCol = (Expr *) makeVar(1, + key->partattrs[0], + key->parttypid[0], + key->parttypmod[0], + key->parttypcoll[0], + 0); + else + keyCol = (Expr *) copyObject(linitial(key->partexprs)); + + /* + * For default list partition, collect datums for all the partitions. The + * default partition constraint should check that the partition key is + * equal to none of those. + */ + if (spec->is_default) + { + int i; + int ndatums = 0; + PartitionDesc pdesc = RelationGetPartitionDesc(parent); + PartitionBoundInfo boundinfo = pdesc->boundinfo; + + if (boundinfo) + { + ndatums = boundinfo->ndatums; + + if (partition_bound_accepts_nulls(boundinfo)) + list_has_null = true; + } + + /* + * If default is the only partition, there need not be any partition + * constraint on it. + */ + if (ndatums == 0 && !list_has_null) + return NIL; + + for (i = 0; i < ndatums; i++) + { + Const *val; + + /* + * Construct Const from known-not-null datum. We must be careful + * to copy the value, because our result has to be able to outlive + * the relcache entry we're copying from. + */ + val = makeConst(key->parttypid[0], + key->parttypmod[0], + key->parttypcoll[0], + key->parttyplen[0], + datumCopy(*boundinfo->datums[i], + key->parttypbyval[0], + key->parttyplen[0]), + false, /* isnull */ + key->parttypbyval[0]); + + elems = lappend(elems, val); + } + } + else + { + /* + * Create list of Consts for the allowed values, excluding any nulls. + */ + foreach(cell, spec->listdatums) + { + Const *val = castNode(Const, lfirst(cell)); + + if (val->constisnull) + list_has_null = true; + else + elems = lappend(elems, copyObject(val)); + } + } + + if (elems) + { + /* + * Generate the operator expression from the non-null partition + * values. + */ + opexpr = make_partition_op_expr(key, 0, BTEqualStrategyNumber, + keyCol, (Expr *) elems); + } + else + { + /* + * If there are no partition values, we don't need an operator + * expression. + */ + opexpr = NULL; + } + + if (!list_has_null) + { + /* + * Gin up a "col IS NOT NULL" test that will be AND'd with the main + * expression. This might seem redundant, but the partition routing + * machinery needs it. + */ + nulltest = makeNode(NullTest); + nulltest->arg = keyCol; + nulltest->nulltesttype = IS_NOT_NULL; + nulltest->argisrow = false; + nulltest->location = -1; + + result = opexpr ? list_make2(nulltest, opexpr) : list_make1(nulltest); + } + else + { + /* + * Gin up a "col IS NULL" test that will be OR'd with the main + * expression. + */ + nulltest = makeNode(NullTest); + nulltest->arg = keyCol; + nulltest->nulltesttype = IS_NULL; + nulltest->argisrow = false; + nulltest->location = -1; + + if (opexpr) + { + Expr *or; + + or = makeBoolExpr(OR_EXPR, list_make2(nulltest, opexpr), -1); + result = list_make1(or); + } + else + result = list_make1(nulltest); + } + + /* + * Note that, in general, applying NOT to a constraint expression doesn't + * necessarily invert the set of rows it accepts, because NOT (NULL) is + * NULL. However, the partition constraints we construct here never + * evaluate to NULL, so applying NOT works as intended. + */ + if (spec->is_default) + { + result = list_make1(make_ands_explicit(result)); + result = list_make1(makeBoolExpr(NOT_EXPR, result, -1)); + } + + return result; +} + +/* + * get_qual_for_range + * + * Returns an implicit-AND list of expressions to use as a range partition's + * constraint, given the parent relation and partition bound structure. + * + * For a multi-column range partition key, say (a, b, c), with (al, bl, cl) + * as the lower bound tuple and (au, bu, cu) as the upper bound tuple, we + * generate an expression tree of the following form: + * + * (a IS NOT NULL) and (b IS NOT NULL) and (c IS NOT NULL) + * AND + * (a > al OR (a = al AND b > bl) OR (a = al AND b = bl AND c >= cl)) + * AND + * (a < au OR (a = au AND b < bu) OR (a = au AND b = bu AND c < cu)) + * + * It is often the case that a prefix of lower and upper bound tuples contains + * the same values, for example, (al = au), in which case, we will emit an + * expression tree of the following form: + * + * (a IS NOT NULL) and (b IS NOT NULL) and (c IS NOT NULL) + * AND + * (a = al) + * AND + * (b > bl OR (b = bl AND c >= cl)) + * AND + * (b < bu) OR (b = bu AND c < cu)) + * + * If a bound datum is either MINVALUE or MAXVALUE, these expressions are + * simplified using the fact that any value is greater than MINVALUE and less + * than MAXVALUE. So, for example, if cu = MAXVALUE, c < cu is automatically + * true, and we need not emit any expression for it, and the last line becomes + * + * (b < bu) OR (b = bu), which is simplified to (b <= bu) + * + * In most common cases with only one partition column, say a, the following + * expression tree will be generated: a IS NOT NULL AND a >= al AND a < au + * + * For default partition, it returns the negation of the constraints of all + * the other partitions. + * + * External callers should pass for_default as false; we set it to true only + * when recursing. + */ +static List * +get_qual_for_range(Relation parent, PartitionBoundSpec *spec, + bool for_default) +{ + List *result = NIL; + ListCell *cell1, + *cell2, + *partexprs_item, + *partexprs_item_saved; + int i, + j; + PartitionRangeDatum *ldatum, + *udatum; + PartitionKey key = RelationGetPartitionKey(parent); + Expr *keyCol; + Const *lower_val, + *upper_val; + List *lower_or_arms, + *upper_or_arms; + int num_or_arms, + current_or_arm; + ListCell *lower_or_start_datum, + *upper_or_start_datum; + bool need_next_lower_arm, + need_next_upper_arm; + + if (spec->is_default) + { + List *or_expr_args = NIL; + PartitionDesc pdesc = RelationGetPartitionDesc(parent); + Oid *inhoids = pdesc->oids; + int nparts = pdesc->nparts, + i; + + for (i = 0; i < nparts; i++) + { + Oid inhrelid = inhoids[i]; + HeapTuple tuple; + Datum datum; + bool isnull; + PartitionBoundSpec *bspec; + + tuple = SearchSysCache1(RELOID, inhrelid); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for relation %u", inhrelid); + + datum = SysCacheGetAttr(RELOID, tuple, + Anum_pg_class_relpartbound, + &isnull); + if (isnull) + elog(ERROR, "null relpartbound for relation %u", inhrelid); + + bspec = (PartitionBoundSpec *) + stringToNode(TextDatumGetCString(datum)); + if (!IsA(bspec, PartitionBoundSpec)) + elog(ERROR, "expected PartitionBoundSpec"); + + if (!bspec->is_default) + { + List *part_qual; + + part_qual = get_qual_for_range(parent, bspec, true); + + /* + * AND the constraints of the partition and add to + * or_expr_args + */ + or_expr_args = lappend(or_expr_args, list_length(part_qual) > 1 + ? makeBoolExpr(AND_EXPR, part_qual, -1) + : linitial(part_qual)); + } + ReleaseSysCache(tuple); + } + + if (or_expr_args != NIL) + { + Expr *other_parts_constr; + + /* + * Combine the constraints obtained for non-default partitions + * using OR. As requested, each of the OR's args doesn't include + * the NOT NULL test for partition keys (which is to avoid its + * useless repetition). Add the same now. + */ + other_parts_constr = + makeBoolExpr(AND_EXPR, + lappend(get_range_nulltest(key), + list_length(or_expr_args) > 1 + ? makeBoolExpr(OR_EXPR, or_expr_args, + -1) + : linitial(or_expr_args)), + -1); + + /* + * Finally, the default partition contains everything *NOT* + * contained in the non-default partitions. + */ + result = list_make1(makeBoolExpr(NOT_EXPR, + list_make1(other_parts_constr), -1)); + } + + return result; + } + + lower_or_start_datum = list_head(spec->lowerdatums); + upper_or_start_datum = list_head(spec->upperdatums); + num_or_arms = key->partnatts; + + /* + * If it is the recursive call for default, we skip the get_range_nulltest + * to avoid accumulating the NullTest on the same keys for each partition. + */ + if (!for_default) + result = get_range_nulltest(key); + + /* + * Iterate over the key columns and check if the corresponding lower and + * upper datums are equal using the btree equality operator for the + * column's type. If equal, we emit single keyCol = common_value + * expression. Starting from the first column for which the corresponding + * lower and upper bound datums are not equal, we generate OR expressions + * as shown in the function's header comment. + */ + i = 0; + partexprs_item = list_head(key->partexprs); + partexprs_item_saved = partexprs_item; /* placate compiler */ + forboth(cell1, spec->lowerdatums, cell2, spec->upperdatums) + { + EState *estate; + MemoryContext oldcxt; + Expr *test_expr; + ExprState *test_exprstate; + Datum test_result; + bool isNull; + + ldatum = castNode(PartitionRangeDatum, lfirst(cell1)); + udatum = castNode(PartitionRangeDatum, lfirst(cell2)); + + /* + * Since get_range_key_properties() modifies partexprs_item, and we + * might need to start over from the previous expression in the later + * part of this function, save away the current value. + */ + partexprs_item_saved = partexprs_item; + + get_range_key_properties(key, i, ldatum, udatum, + &partexprs_item, + &keyCol, + &lower_val, &upper_val); + + /* + * If either value is NULL, the corresponding partition bound is + * either MINVALUE or MAXVALUE, and we treat them as unequal, because + * even if they're the same, there is no common value to equate the + * key column with. + */ + if (!lower_val || !upper_val) + break; + + /* Create the test expression */ + estate = CreateExecutorState(); + oldcxt = MemoryContextSwitchTo(estate->es_query_cxt); + test_expr = make_partition_op_expr(key, i, BTEqualStrategyNumber, + (Expr *) lower_val, + (Expr *) upper_val); + fix_opfuncids((Node *) test_expr); + test_exprstate = ExecInitExpr(test_expr, NULL); + test_result = ExecEvalExprSwitchContext(test_exprstate, + GetPerTupleExprContext(estate), + &isNull); + MemoryContextSwitchTo(oldcxt); + FreeExecutorState(estate); + + /* If not equal, go generate the OR expressions */ + if (!DatumGetBool(test_result)) + break; + + /* + * The bounds for the last key column can't be equal, because such a + * range partition would never be allowed to be defined (it would have + * an empty range otherwise). + */ + if (i == key->partnatts - 1) + elog(ERROR, "invalid range bound specification"); + + /* Equal, so generate keyCol = lower_val expression */ + result = lappend(result, + make_partition_op_expr(key, i, BTEqualStrategyNumber, + keyCol, (Expr *) lower_val)); + + i++; + } + + /* First pair of lower_val and upper_val that are not equal. */ + lower_or_start_datum = cell1; + upper_or_start_datum = cell2; + + /* OR will have as many arms as there are key columns left. */ + num_or_arms = key->partnatts - i; + current_or_arm = 0; + lower_or_arms = upper_or_arms = NIL; + need_next_lower_arm = need_next_upper_arm = true; + while (current_or_arm < num_or_arms) + { + List *lower_or_arm_args = NIL, + *upper_or_arm_args = NIL; + + /* Restart scan of columns from the i'th one */ + j = i; + partexprs_item = partexprs_item_saved; + + for_both_cell(cell1, lower_or_start_datum, cell2, upper_or_start_datum) + { + PartitionRangeDatum *ldatum_next = NULL, + *udatum_next = NULL; + + ldatum = castNode(PartitionRangeDatum, lfirst(cell1)); + if (lnext(cell1)) + ldatum_next = castNode(PartitionRangeDatum, + lfirst(lnext(cell1))); + udatum = castNode(PartitionRangeDatum, lfirst(cell2)); + if (lnext(cell2)) + udatum_next = castNode(PartitionRangeDatum, + lfirst(lnext(cell2))); + get_range_key_properties(key, j, ldatum, udatum, + &partexprs_item, + &keyCol, + &lower_val, &upper_val); + + if (need_next_lower_arm && lower_val) + { + uint16 strategy; + + /* + * For the non-last columns of this arm, use the EQ operator. + * For the last column of this arm, use GT, unless this is the + * last column of the whole bound check, or the next bound + * datum is MINVALUE, in which case use GE. + */ + if (j - i < current_or_arm) + strategy = BTEqualStrategyNumber; + else if (j == key->partnatts - 1 || + (ldatum_next && + ldatum_next->kind == PARTITION_RANGE_DATUM_MINVALUE)) + strategy = BTGreaterEqualStrategyNumber; + else + strategy = BTGreaterStrategyNumber; + + lower_or_arm_args = lappend(lower_or_arm_args, + make_partition_op_expr(key, j, + strategy, + keyCol, + (Expr *) lower_val)); + } + + if (need_next_upper_arm && upper_val) + { + uint16 strategy; + + /* + * For the non-last columns of this arm, use the EQ operator. + * For the last column of this arm, use LT, unless the next + * bound datum is MAXVALUE, in which case use LE. + */ + if (j - i < current_or_arm) + strategy = BTEqualStrategyNumber; + else if (udatum_next && + udatum_next->kind == PARTITION_RANGE_DATUM_MAXVALUE) + strategy = BTLessEqualStrategyNumber; + else + strategy = BTLessStrategyNumber; + + upper_or_arm_args = lappend(upper_or_arm_args, + make_partition_op_expr(key, j, + strategy, + keyCol, + (Expr *) upper_val)); + } + + /* + * Did we generate enough of OR's arguments? First arm considers + * the first of the remaining columns, second arm considers first + * two of the remaining columns, and so on. + */ + ++j; + if (j - i > current_or_arm) + { + /* + * We must not emit any more arms if the new column that will + * be considered is unbounded, or this one was. + */ + if (!lower_val || !ldatum_next || + ldatum_next->kind != PARTITION_RANGE_DATUM_VALUE) + need_next_lower_arm = false; + if (!upper_val || !udatum_next || + udatum_next->kind != PARTITION_RANGE_DATUM_VALUE) + need_next_upper_arm = false; + break; + } + } + + if (lower_or_arm_args != NIL) + lower_or_arms = lappend(lower_or_arms, + list_length(lower_or_arm_args) > 1 + ? makeBoolExpr(AND_EXPR, lower_or_arm_args, -1) + : linitial(lower_or_arm_args)); + + if (upper_or_arm_args != NIL) + upper_or_arms = lappend(upper_or_arms, + list_length(upper_or_arm_args) > 1 + ? makeBoolExpr(AND_EXPR, upper_or_arm_args, -1) + : linitial(upper_or_arm_args)); + + /* If no work to do in the next iteration, break away. */ + if (!need_next_lower_arm && !need_next_upper_arm) + break; + + ++current_or_arm; + } + + /* + * Generate the OR expressions for each of lower and upper bounds (if + * required), and append to the list of implicitly ANDed list of + * expressions. + */ + if (lower_or_arms != NIL) + result = lappend(result, + list_length(lower_or_arms) > 1 + ? makeBoolExpr(OR_EXPR, lower_or_arms, -1) + : linitial(lower_or_arms)); + if (upper_or_arms != NIL) + result = lappend(result, + list_length(upper_or_arms) > 1 + ? makeBoolExpr(OR_EXPR, upper_or_arms, -1) + : linitial(upper_or_arms)); + + /* + * As noted above, for non-default, we return list with constant TRUE. If + * the result is NIL during the recursive call for default, it implies + * this is the only other partition which can hold every value of the key + * except NULL. Hence we return the NullTest result skipped earlier. + */ + if (result == NIL) + result = for_default + ? get_range_nulltest(key) + : list_make1(makeBoolConst(true, false)); + + return result; +} + +/* + * get_range_key_properties + * Returns range partition key information for a given column + * + * This is a subroutine for get_qual_for_range, and its API is pretty + * specialized to that caller. + * + * Constructs an Expr for the key column (returned in *keyCol) and Consts + * for the lower and upper range limits (returned in *lower_val and + * *upper_val). For MINVALUE/MAXVALUE limits, NULL is returned instead of + * a Const. All of these structures are freshly palloc'd. + * + * *partexprs_item points to the cell containing the next expression in + * the key->partexprs list, or NULL. It may be advanced upon return. + */ +static void +get_range_key_properties(PartitionKey key, int keynum, + PartitionRangeDatum *ldatum, + PartitionRangeDatum *udatum, + ListCell **partexprs_item, + Expr **keyCol, + Const **lower_val, Const **upper_val) +{ + /* Get partition key expression for this column */ + if (key->partattrs[keynum] != 0) + { + *keyCol = (Expr *) makeVar(1, + key->partattrs[keynum], + key->parttypid[keynum], + key->parttypmod[keynum], + key->parttypcoll[keynum], + 0); + } + else + { + if (*partexprs_item == NULL) + elog(ERROR, "wrong number of partition key expressions"); + *keyCol = copyObject(lfirst(*partexprs_item)); + *partexprs_item = lnext(*partexprs_item); + } + + /* Get appropriate Const nodes for the bounds */ + if (ldatum->kind == PARTITION_RANGE_DATUM_VALUE) + *lower_val = castNode(Const, copyObject(ldatum->value)); + else + *lower_val = NULL; + + if (udatum->kind == PARTITION_RANGE_DATUM_VALUE) + *upper_val = castNode(Const, copyObject(udatum->value)); + else + *upper_val = NULL; +} + +/* + * get_range_nulltest + * + * A non-default range partition table does not currently allow partition + * keys to be null, so emit an IS NOT NULL expression for each key column. + */ +static List * +get_range_nulltest(PartitionKey key) +{ + List *result = NIL; + NullTest *nulltest; + ListCell *partexprs_item; + int i; + + partexprs_item = list_head(key->partexprs); + for (i = 0; i < key->partnatts; i++) + { + Expr *keyCol; + + if (key->partattrs[i] != 0) + { + keyCol = (Expr *) makeVar(1, + key->partattrs[i], + key->parttypid[i], + key->parttypmod[i], + key->parttypcoll[i], + 0); + } + else + { + if (partexprs_item == NULL) + elog(ERROR, "wrong number of partition key expressions"); + keyCol = copyObject(lfirst(partexprs_item)); + partexprs_item = lnext(partexprs_item); + } + + nulltest = makeNode(NullTest); + nulltest->arg = keyCol; + nulltest->nulltesttype = IS_NOT_NULL; + nulltest->argisrow = false; + nulltest->location = -1; + result = lappend(result, nulltest); + } + + return result; +} + +/* + * compute_partition_hash_value + * + * Compute the hash value for given partition key values. + */ +uint64 +compute_partition_hash_value(int partnatts, FmgrInfo *partsupfunc, + Datum *values, bool *isnull) +{ + int i; + uint64 rowHash = 0; + Datum seed = UInt64GetDatum(HASH_PARTITION_SEED); + + for (i = 0; i < partnatts; i++) + { + /* Nulls are just ignored */ + if (!isnull[i]) + { + Datum hash; + + Assert(OidIsValid(partsupfunc[i].fn_oid)); + + /* + * Compute hash for each datum value by calling respective + * datatype-specific hash functions of each partition key + * attribute. + */ + hash = FunctionCall2(&partsupfunc[i], values[i], seed); + + /* Form a single 64-bit hash value */ + rowHash = hash_combine64(rowHash, DatumGetUInt64(hash)); + } + } + + return rowHash; +} + +/* + * satisfies_hash_partition + * + * This is an SQL-callable function for use in hash partition constraints. + * The first three arguments are the parent table OID, modulus, and remainder. + * The remaining arguments are the value of the partitioning columns (or + * expressions); these are hashed and the results are combined into a single + * hash value by calling hash_combine64. + * + * Returns true if remainder produced when this computed single hash value is + * divided by the given modulus is equal to given remainder, otherwise false. + * + * See get_qual_for_hash() for usage. + */ +Datum +satisfies_hash_partition(PG_FUNCTION_ARGS) +{ + typedef struct ColumnsHashData + { + Oid relid; + int nkeys; + Oid variadic_type; + int16 variadic_typlen; + bool variadic_typbyval; + char variadic_typalign; + FmgrInfo partsupfunc[PARTITION_MAX_KEYS]; + } ColumnsHashData; + Oid parentId; + int modulus; + int remainder; + Datum seed = UInt64GetDatum(HASH_PARTITION_SEED); + ColumnsHashData *my_extra; + uint64 rowHash = 0; + + /* Return null if the parent OID, modulus, or remainder is NULL. */ + if (PG_ARGISNULL(0) || PG_ARGISNULL(1) || PG_ARGISNULL(2)) + PG_RETURN_NULL(); + parentId = PG_GETARG_OID(0); + modulus = PG_GETARG_INT32(1); + remainder = PG_GETARG_INT32(2); + + /* Sanity check modulus and remainder. */ + if (modulus <= 0) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("modulus for hash partition must be a positive integer"))); + if (remainder < 0) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("remainder for hash partition must be a non-negative integer"))); + if (remainder >= modulus) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("remainder for hash partition must be less than modulus"))); + + /* + * Cache hash function information. + */ + my_extra = (ColumnsHashData *) fcinfo->flinfo->fn_extra; + if (my_extra == NULL || my_extra->relid != parentId) + { + Relation parent; + PartitionKey key; + int j; + + /* Open parent relation and fetch partition keyinfo */ + parent = try_relation_open(parentId, AccessShareLock); + if (parent == NULL) + PG_RETURN_NULL(); + key = RelationGetPartitionKey(parent); + + /* Reject parent table that is not hash-partitioned. */ + if (parent->rd_rel->relkind != RELKIND_PARTITIONED_TABLE || + key->strategy != PARTITION_STRATEGY_HASH) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("\"%s\" is not a hash partitioned table", + get_rel_name(parentId)))); + + if (!get_fn_expr_variadic(fcinfo->flinfo)) + { + int nargs = PG_NARGS() - 3; + + /* complain if wrong number of column values */ + if (key->partnatts != nargs) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("number of partitioning columns (%d) does not match number of partition keys provided (%d)", + key->partnatts, nargs))); + + /* allocate space for our cache */ + fcinfo->flinfo->fn_extra = + MemoryContextAllocZero(fcinfo->flinfo->fn_mcxt, + offsetof(ColumnsHashData, partsupfunc) + + sizeof(FmgrInfo) * nargs); + my_extra = (ColumnsHashData *) fcinfo->flinfo->fn_extra; + my_extra->relid = parentId; + my_extra->nkeys = key->partnatts; + + /* check argument types and save fmgr_infos */ + for (j = 0; j < key->partnatts; ++j) + { + Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, j + 3); + + if (argtype != key->parttypid[j] && !IsBinaryCoercible(argtype, key->parttypid[j])) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("column %d of the partition key has type \"%s\", but supplied value is of type \"%s\"", + j + 1, format_type_be(key->parttypid[j]), format_type_be(argtype)))); + + fmgr_info_copy(&my_extra->partsupfunc[j], + &key->partsupfunc[j], + fcinfo->flinfo->fn_mcxt); + } + + } + else + { + ArrayType *variadic_array = PG_GETARG_ARRAYTYPE_P(3); + + /* allocate space for our cache -- just one FmgrInfo in this case */ + fcinfo->flinfo->fn_extra = + MemoryContextAllocZero(fcinfo->flinfo->fn_mcxt, + offsetof(ColumnsHashData, partsupfunc) + + sizeof(FmgrInfo)); + my_extra = (ColumnsHashData *) fcinfo->flinfo->fn_extra; + my_extra->relid = parentId; + my_extra->nkeys = key->partnatts; + my_extra->variadic_type = ARR_ELEMTYPE(variadic_array); + get_typlenbyvalalign(my_extra->variadic_type, + &my_extra->variadic_typlen, + &my_extra->variadic_typbyval, + &my_extra->variadic_typalign); + + /* check argument types */ + for (j = 0; j < key->partnatts; ++j) + if (key->parttypid[j] != my_extra->variadic_type) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("column %d of the partition key has type \"%s\", but supplied value is of type \"%s\"", + j + 1, + format_type_be(key->parttypid[j]), + format_type_be(my_extra->variadic_type)))); + + fmgr_info_copy(&my_extra->partsupfunc[0], + &key->partsupfunc[0], + fcinfo->flinfo->fn_mcxt); + } + + /* Hold lock until commit */ + relation_close(parent, NoLock); + } + + if (!OidIsValid(my_extra->variadic_type)) + { + int nkeys = my_extra->nkeys; + int i; + + /* + * For a non-variadic call, neither the number of arguments nor their + * types can change across calls, so avoid the expense of rechecking + * here. + */ + + for (i = 0; i < nkeys; i++) + { + Datum hash; + + /* keys start from fourth argument of function. */ + int argno = i + 3; + + if (PG_ARGISNULL(argno)) + continue; + + Assert(OidIsValid(my_extra->partsupfunc[i].fn_oid)); + + hash = FunctionCall2(&my_extra->partsupfunc[i], + PG_GETARG_DATUM(argno), + seed); + + /* Form a single 64-bit hash value */ + rowHash = hash_combine64(rowHash, DatumGetUInt64(hash)); + } + } + else + { + ArrayType *variadic_array = PG_GETARG_ARRAYTYPE_P(3); + int i; + int nelems; + Datum *datum; + bool *isnull; + + deconstruct_array(variadic_array, + my_extra->variadic_type, + my_extra->variadic_typlen, + my_extra->variadic_typbyval, + my_extra->variadic_typalign, + &datum, &isnull, &nelems); + + /* complain if wrong number of column values */ + if (nelems != my_extra->nkeys) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("number of partitioning columns (%d) does not match number of partition keys provided (%d)", + my_extra->nkeys, nelems))); + + for (i = 0; i < nelems; i++) + { + Datum hash; + + if (isnull[i]) + continue; + + Assert(OidIsValid(my_extra->partsupfunc[0].fn_oid)); + + hash = FunctionCall2(&my_extra->partsupfunc[0], + datum[i], + seed); + + /* Form a single 64-bit hash value */ + rowHash = hash_combine64(rowHash, DatumGetUInt64(hash)); + } + } + + PG_RETURN_BOOL(rowHash % modulus == remainder); +} diff --git a/src/backend/partitioning/partprune.c b/src/backend/partitioning/partprune.c new file mode 100644 index 0000000000..35c87535d3 --- /dev/null +++ b/src/backend/partitioning/partprune.c @@ -0,0 +1,3338 @@ +/*------------------------------------------------------------------------- + * + * partprune.c + * Support for partition pruning during query planning and execution + * + * This module implements partition pruning using the information contained in + * a table's partition descriptor, query clauses, and run-time parameters. + * + * During planning, clauses that can be matched to the table's partition key + * are turned into a set of "pruning steps", which are then executed to + * identify a set of partitions (as indexes in the RelOptInfo->part_rels + * array) that satisfy the constraints in the step. Partitions not in the set + * are said to have been pruned. + * + * A base pruning step may involve expressions whose values are only known + * during execution, such as Params, in which case pruning cannot occur + * entirely during planning. In that case, such steps are included alongside + * the plan, so that they can be used by the executor for further pruning. + * + * There are two kinds of pruning steps. A "base" pruning step represents + * tests on partition key column(s), typically comparisons to expressions. + * A "combine" pruning step represents a Boolean connector (AND/OR), and + * combines the outputs of some previous steps using the appropriate + * combination method. + * + * See gen_partprune_steps_internal() for more details on step generation. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/backend/partitioning/partprune.c + * + *------------------------------------------------------------------------- +*/ +#include "postgres.h" + +#include "access/hash.h" +#include "access/nbtree.h" +#include "catalog/pg_operator.h" +#include "catalog/pg_opfamily.h" +#include "catalog/pg_type.h" +#include "executor/executor.h" +#include "miscadmin.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "optimizer/clauses.h" +#include "optimizer/pathnode.h" +#include "optimizer/planner.h" +#include "optimizer/predtest.h" +#include "optimizer/prep.h" +#include "optimizer/var.h" +#include "partitioning/partprune.h" +#include "partitioning/partbounds.h" +#include "rewrite/rewriteManip.h" +#include "utils/lsyscache.h" + + +/* + * Information about a clause matched with a partition key. + */ +typedef struct PartClauseInfo +{ + int keyno; /* Partition key number (0 to partnatts - 1) */ + Oid opno; /* operator used to compare partkey to expr */ + bool op_is_ne; /* is clause's original operator <> ? */ + Expr *expr; /* expr the partition key is compared to */ + Oid cmpfn; /* Oid of function to compare 'expr' to the + * partition key */ + int op_strategy; /* btree strategy identifying the operator */ +} PartClauseInfo; + +/* + * PartClauseMatchStatus + * Describes the result of match_clause_to_partition_key() + */ +typedef enum PartClauseMatchStatus +{ + PARTCLAUSE_NOMATCH, + PARTCLAUSE_MATCH_CLAUSE, + PARTCLAUSE_MATCH_NULLNESS, + PARTCLAUSE_MATCH_STEPS, + PARTCLAUSE_MATCH_CONTRADICT, + PARTCLAUSE_UNSUPPORTED +} PartClauseMatchStatus; + +/* + * GeneratePruningStepsContext + * Information about the current state of generation of "pruning steps" + * for a given set of clauses + * + * gen_partprune_steps() initializes an instance of this struct, which is used + * throughout the step generation process. + */ +typedef struct GeneratePruningStepsContext +{ + int next_step_id; + List *steps; +} GeneratePruningStepsContext; + +/* The result of performing one PartitionPruneStep */ +typedef struct PruneStepResult +{ + /* + * The offsets of bounds (in a table's boundinfo) whose partition is + * selected by the pruning step. + */ + Bitmapset *bound_offsets; + + bool scan_default; /* Scan the default partition? */ + bool scan_null; /* Scan the partition for NULL values? */ +} PruneStepResult; + + +static List *make_partitionedrel_pruneinfo(PlannerInfo *root, + RelOptInfo *parentrel, + int *relid_subplan_map, + List *partitioned_rels, List *prunequal, + Bitmapset **matchedsubplans); +static List *gen_partprune_steps(RelOptInfo *rel, List *clauses, + bool *contradictory); +static List *gen_partprune_steps_internal(GeneratePruningStepsContext *context, + RelOptInfo *rel, List *clauses, + bool *contradictory); +static PartitionPruneStep *gen_prune_step_op(GeneratePruningStepsContext *context, + StrategyNumber opstrategy, bool op_is_ne, + List *exprs, List *cmpfns, Bitmapset *nullkeys); +static PartitionPruneStep *gen_prune_step_combine(GeneratePruningStepsContext *context, + List *source_stepids, + PartitionPruneCombineOp combineOp); +static PartitionPruneStep *gen_prune_steps_from_opexps(PartitionScheme part_scheme, + GeneratePruningStepsContext *context, + List **keyclauses, Bitmapset *nullkeys); +static PartClauseMatchStatus match_clause_to_partition_key(RelOptInfo *rel, + GeneratePruningStepsContext *context, + Expr *clause, Expr *partkey, int partkeyidx, + bool *clause_is_not_null, + PartClauseInfo **pc, List **clause_steps); +static List *get_steps_using_prefix(GeneratePruningStepsContext *context, + StrategyNumber step_opstrategy, + bool step_op_is_ne, + Expr *step_lastexpr, + Oid step_lastcmpfn, + int step_lastkeyno, + Bitmapset *step_nullkeys, + List *prefix); +static List *get_steps_using_prefix_recurse(GeneratePruningStepsContext *context, + StrategyNumber step_opstrategy, + bool step_op_is_ne, + Expr *step_lastexpr, + Oid step_lastcmpfn, + int step_lastkeyno, + Bitmapset *step_nullkeys, + ListCell *start, + List *step_exprs, + List *step_cmpfns); +static PruneStepResult *get_matching_hash_bounds(PartitionPruneContext *context, + StrategyNumber opstrategy, Datum *values, int nvalues, + FmgrInfo *partsupfunc, Bitmapset *nullkeys); +static PruneStepResult *get_matching_list_bounds(PartitionPruneContext *context, + StrategyNumber opstrategy, Datum value, int nvalues, + FmgrInfo *partsupfunc, Bitmapset *nullkeys); +static PruneStepResult *get_matching_range_bounds(PartitionPruneContext *context, + StrategyNumber opstrategy, Datum *values, int nvalues, + FmgrInfo *partsupfunc, Bitmapset *nullkeys); +static Bitmapset *pull_exec_paramids(Expr *expr); +static bool pull_exec_paramids_walker(Node *node, Bitmapset **context); +static bool analyze_partkey_exprs(PartitionedRelPruneInfo *pinfo, List *steps, + int partnatts); +static PruneStepResult *perform_pruning_base_step(PartitionPruneContext *context, + PartitionPruneStepOp *opstep); +static PruneStepResult *perform_pruning_combine_step(PartitionPruneContext *context, + PartitionPruneStepCombine *cstep, + PruneStepResult **step_results); +static bool match_boolean_partition_clause(Oid partopfamily, Expr *clause, + Expr *partkey, Expr **outconst); +static bool partkey_datum_from_expr(PartitionPruneContext *context, + Expr *expr, int stateidx, + Datum *value, bool *isnull); + + +/* + * make_partition_pruneinfo + * Builds a PartitionPruneInfo which can be used in the executor to allow + * additional partition pruning to take place. Returns NULL when + * partition pruning would be useless. + * + * 'parentrel' is the RelOptInfo for an appendrel, and 'subpaths' is the list + * of scan paths for its child rels. + * + * 'partitioned_rels' is a List containing Lists of relids of partitioned + * tables (a/k/a non-leaf partitions) that are parents of some of the child + * rels. Here we attempt to populate the PartitionPruneInfo by adding a + * 'prune_infos' item for each sublist in the 'partitioned_rels' list. + * However, some of the sets of partitioned relations may not require any + * run-time pruning. In these cases we'll simply not include a 'prune_infos' + * item for that set and instead we'll add all the subplans which belong to + * that set into the PartitionPruneInfo's 'other_subplans' field. Callers + * will likely never want to prune subplans which are mentioned in this field. + * + * 'prunequal' is a list of potential pruning quals. + */ +PartitionPruneInfo * +make_partition_pruneinfo(PlannerInfo *root, RelOptInfo *parentrel, + List *subpaths, List *partitioned_rels, + List *prunequal) +{ + PartitionPruneInfo *pruneinfo; + Bitmapset *allmatchedsubplans = NULL; + int *relid_subplan_map; + ListCell *lc; + List *prunerelinfos; + int i; + + /* + * Construct a temporary array to map from planner relids to subplan + * indexes. For convenience, we use 1-based indexes here, so that zero + * can represent an un-filled array entry. + */ + relid_subplan_map = palloc0(sizeof(int) * root->simple_rel_array_size); + + /* + * relid_subplan_map maps relid of a leaf partition to the index in + * 'subpaths' of the scan plan for that partition. + */ + i = 1; + foreach(lc, subpaths) + { + Path *path = (Path *) lfirst(lc); + RelOptInfo *pathrel = path->parent; + + Assert(IS_SIMPLE_REL(pathrel)); + Assert(pathrel->relid < root->simple_rel_array_size); + /* No duplicates please */ + Assert(relid_subplan_map[pathrel->relid] == 0); + + relid_subplan_map[pathrel->relid] = i++; + } + + /* We now build a PartitionedRelPruneInfo for each partitioned rel. */ + prunerelinfos = NIL; + foreach(lc, partitioned_rels) + { + List *rels = (List *) lfirst(lc); + List *pinfolist; + Bitmapset *matchedsubplans = NULL; + + pinfolist = make_partitionedrel_pruneinfo(root, parentrel, + relid_subplan_map, + rels, prunequal, + &matchedsubplans); + + /* When pruning is possible, record the matched subplans */ + if (pinfolist != NIL) + { + prunerelinfos = lappend(prunerelinfos, pinfolist); + allmatchedsubplans = bms_join(matchedsubplans, + allmatchedsubplans); + } + } + + pfree(relid_subplan_map); + + /* + * If none of the partition hierarchies had any useful run-time pruning + * quals, then we can just not bother with run-time pruning. + */ + if (prunerelinfos == NIL) + return NULL; + + /* Else build the result data structure */ + pruneinfo = makeNode(PartitionPruneInfo); + pruneinfo->prune_infos = prunerelinfos; + + /* + * Some subplans may not belong to any of the listed partitioned rels. + * This can happen for UNION ALL queries which include a non-partitioned + * table, or when some of the hierarchies aren't run-time prunable. Build + * a bitmapset of the indexes of all such subplans, so that the executor + * can identify which subplans should never be pruned. + */ + if (bms_num_members(allmatchedsubplans) < list_length(subpaths)) + { + Bitmapset *other_subplans; + + /* Create the complement of allmatchedsubplans */ + other_subplans = bms_add_range(NULL, 0, list_length(subpaths) - 1); + other_subplans = bms_del_members(other_subplans, allmatchedsubplans); + + pruneinfo->other_subplans = other_subplans; + } + else + pruneinfo->other_subplans = NULL; + + return pruneinfo; +} + +/* + * make_partitionedrel_pruneinfo + * Build a List of PartitionedRelPruneInfos, one for each partitioned + * rel. These can be used in the executor to allow additional partition + * pruning to take place. + * + * Here we generate partition pruning steps for 'prunequal' and also build a + * data structure which allows mapping of partition indexes into 'subpaths' + * indexes. + * + * If no non-Const expressions are being compared to the partition key in any + * of the 'partitioned_rels', then we return NIL to indicate no run-time + * pruning should be performed. Run-time pruning would be useless since the + * pruning done during planning will have pruned everything that can be. + * + * On non-NIL return, 'matchedsubplans' is set to the subplan indexes which + * were matched to this partition hierarchy. + */ +static List * +make_partitionedrel_pruneinfo(PlannerInfo *root, RelOptInfo *parentrel, + int *relid_subplan_map, + List *partitioned_rels, List *prunequal, + Bitmapset **matchedsubplans) +{ + RelOptInfo *targetpart = NULL; + List *pinfolist = NIL; + bool doruntimeprune = false; + int *relid_subpart_map; + Bitmapset *subplansfound = NULL; + ListCell *lc; + int i; + + /* + * Construct a temporary array to map from planner relids to index of the + * partitioned_rel. For convenience, we use 1-based indexes here, so that + * zero can represent an un-filled array entry. + */ + relid_subpart_map = palloc0(sizeof(int) * root->simple_rel_array_size); + + /* + * relid_subpart_map maps relid of a non-leaf partition to the index in + * 'partitioned_rels' of that rel (which will also be the index in the + * returned PartitionedRelPruneInfo list of the info for that partition). + */ + i = 1; + foreach(lc, partitioned_rels) + { + Index rti = lfirst_int(lc); + + Assert(rti < root->simple_rel_array_size); + /* No duplicates please */ + Assert(relid_subpart_map[rti] == 0); + + relid_subpart_map[rti] = i++; + } + + /* We now build a PartitionedRelPruneInfo for each partitioned rel */ + foreach(lc, partitioned_rels) + { + Index rti = lfirst_int(lc); + RelOptInfo *subpart = find_base_rel(root, rti); + PartitionedRelPruneInfo *pinfo; + Bitmapset *present_parts; + int nparts = subpart->nparts; + int partnatts = subpart->part_scheme->partnatts; + int *subplan_map; + int *subpart_map; + List *partprunequal; + List *pruning_steps; + bool contradictory; + + /* + * The first item in the list is the target partitioned relation. + */ + if (!targetpart) + { + targetpart = subpart; + + /* + * The prunequal is presented to us as a qual for 'parentrel'. + * Frequently this rel is the same as targetpart, so we can skip + * an adjust_appendrel_attrs step. But it might not be, and then + * we have to translate. We update the prunequal parameter here, + * because in later iterations of the loop for child partitions, + * we want to translate from parent to child variables. + */ + if (!bms_equal(parentrel->relids, subpart->relids)) + { + int nappinfos; + AppendRelInfo **appinfos = find_appinfos_by_relids(root, + subpart->relids, + &nappinfos); + + prunequal = (List *) adjust_appendrel_attrs(root, (Node *) + prunequal, + nappinfos, + appinfos); + + pfree(appinfos); + } + + partprunequal = prunequal; + } + else + { + /* + * For sub-partitioned tables the columns may not be in the same + * order as the parent, so we must translate the prunequal to make + * it compatible with this relation. + */ + partprunequal = (List *) + adjust_appendrel_attrs_multilevel(root, + (Node *) prunequal, + subpart->relids, + targetpart->relids); + } + + pruning_steps = gen_partprune_steps(subpart, partprunequal, + &contradictory); + + if (contradictory) + { + /* + * This shouldn't happen as the planner should have detected this + * earlier. However, we do use additional quals from parameterized + * paths here. These do only compare Params to the partition key, + * so this shouldn't cause the discovery of any new qual + * contradictions that were not previously discovered as the Param + * values are unknown during planning. Anyway, we'd better do + * something sane here, so let's just disable run-time pruning. + */ + return NIL; + } + + /* + * Construct the subplan and subpart maps for this partitioning level. + * Here we convert to zero-based indexes, with -1 for empty entries. + * Also construct a Bitmapset of all partitions that are present (that + * is, not pruned already). + */ + subplan_map = (int *) palloc(nparts * sizeof(int)); + subpart_map = (int *) palloc(nparts * sizeof(int)); + present_parts = NULL; + + for (i = 0; i < nparts; i++) + { + RelOptInfo *partrel = subpart->part_rels[i]; + int subplanidx = relid_subplan_map[partrel->relid] - 1; + int subpartidx = relid_subpart_map[partrel->relid] - 1; + + subplan_map[i] = subplanidx; + subpart_map[i] = subpartidx; + if (subplanidx >= 0) + { + present_parts = bms_add_member(present_parts, i); + + /* Record finding this subplan */ + subplansfound = bms_add_member(subplansfound, subplanidx); + } + else if (subpartidx >= 0) + present_parts = bms_add_member(present_parts, i); + } + + pinfo = makeNode(PartitionedRelPruneInfo); + pinfo->rtindex = rti; + pinfo->pruning_steps = pruning_steps; + pinfo->present_parts = present_parts; + pinfo->nparts = nparts; + pinfo->subplan_map = subplan_map; + pinfo->subpart_map = subpart_map; + + /* Determine which pruning types should be enabled at this level */ + doruntimeprune |= analyze_partkey_exprs(pinfo, pruning_steps, + partnatts); + + pinfolist = lappend(pinfolist, pinfo); + } + + pfree(relid_subpart_map); + + if (!doruntimeprune) + { + /* No run-time pruning required. */ + return NIL; + } + + *matchedsubplans = subplansfound; + + return pinfolist; +} + +/* + * gen_partprune_steps + * Process 'clauses' (a rel's baserestrictinfo list of clauses) and return + * a list of "partition pruning steps" + * + * If the clauses in the input list are contradictory or there is a + * pseudo-constant "false", *contradictory is set to true upon return. + */ +static List * +gen_partprune_steps(RelOptInfo *rel, List *clauses, bool *contradictory) +{ + GeneratePruningStepsContext context; + + context.next_step_id = 0; + context.steps = NIL; + + /* The clauses list may be modified below, so better make a copy. */ + clauses = list_copy(clauses); + + /* + * For sub-partitioned tables there's a corner case where if the + * sub-partitioned table shares any partition keys with its parent, then + * it's possible that the partitioning hierarchy allows the parent + * partition to only contain a narrower range of values than the + * sub-partitioned table does. In this case it is possible that we'd + * include partitions that could not possibly have any tuples matching + * 'clauses'. The possibility of such a partition arrangement is perhaps + * unlikely for non-default partitions, but it may be more likely in the + * case of default partitions, so we'll add the parent partition table's + * partition qual to the clause list in this case only. This may result + * in the default partition being eliminated. + */ + if (partition_bound_has_default(rel->boundinfo) && + rel->partition_qual != NIL) + { + List *partqual = rel->partition_qual; + + partqual = (List *) expression_planner((Expr *) partqual); + + /* Fix Vars to have the desired varno */ + if (rel->relid != 1) + ChangeVarNodes((Node *) partqual, 1, rel->relid, 0); + + clauses = list_concat(clauses, partqual); + } + + /* Down into the rabbit-hole. */ + gen_partprune_steps_internal(&context, rel, clauses, contradictory); + + return context.steps; +} + +/* + * prune_append_rel_partitions + * Returns RT indexes of the minimum set of child partitions which must + * be scanned to satisfy rel's baserestrictinfo quals. + * + * Callers must ensure that 'rel' is a partitioned table. + */ +Relids +prune_append_rel_partitions(RelOptInfo *rel) +{ + Relids result; + List *clauses = rel->baserestrictinfo; + List *pruning_steps; + bool contradictory; + PartitionPruneContext context; + Bitmapset *partindexes; + int i; + + Assert(clauses != NIL); + Assert(rel->part_scheme != NULL); + + /* If there are no partitions, return the empty set */ + if (rel->nparts == 0) + return NULL; + + /* + * Process clauses. If the clauses are found to be contradictory, we can + * return the empty set. + */ + pruning_steps = gen_partprune_steps(rel, clauses, &contradictory); + if (contradictory) + return NULL; + + /* Set up PartitionPruneContext */ + context.strategy = rel->part_scheme->strategy; + context.partnatts = rel->part_scheme->partnatts; + context.nparts = rel->nparts; + context.boundinfo = rel->boundinfo; + context.partcollation = rel->part_scheme->partcollation; + context.partsupfunc = rel->part_scheme->partsupfunc; + context.stepcmpfuncs = (FmgrInfo *) palloc0(sizeof(FmgrInfo) * + context.partnatts * + list_length(pruning_steps)); + context.ppccontext = CurrentMemoryContext; + + /* These are not valid when being called from the planner */ + context.planstate = NULL; + context.exprstates = NULL; + context.exprhasexecparam = NULL; + context.evalexecparams = false; + + /* Actual pruning happens here. */ + partindexes = get_matching_partitions(&context, pruning_steps); + + /* Add selected partitions' RT indexes to result. */ + i = -1; + result = NULL; + while ((i = bms_next_member(partindexes, i)) >= 0) + result = bms_add_member(result, rel->part_rels[i]->relid); + + return result; +} + +/* + * get_matching_partitions + * Determine partitions that survive partition pruning + * + * Returns a Bitmapset of the RelOptInfo->part_rels indexes of the surviving + * partitions. + */ +Bitmapset * +get_matching_partitions(PartitionPruneContext *context, List *pruning_steps) +{ + Bitmapset *result; + int num_steps = list_length(pruning_steps), + i; + PruneStepResult **results, + *final_result; + ListCell *lc; + + /* If there are no pruning steps then all partitions match. */ + if (num_steps == 0) + { + Assert(context->nparts > 0); + return bms_add_range(NULL, 0, context->nparts - 1); + } + + /* + * Allocate space for individual pruning steps to store its result. Each + * slot will hold a PruneStepResult after performing a given pruning step. + * Later steps may use the result of one or more earlier steps. The + * result of applying all pruning steps is the value contained in the slot + * of the last pruning step. + */ + results = (PruneStepResult **) + palloc0(num_steps * sizeof(PruneStepResult *)); + foreach(lc, pruning_steps) + { + PartitionPruneStep *step = lfirst(lc); + + switch (nodeTag(step)) + { + case T_PartitionPruneStepOp: + results[step->step_id] = + perform_pruning_base_step(context, + (PartitionPruneStepOp *) step); + break; + + case T_PartitionPruneStepCombine: + results[step->step_id] = + perform_pruning_combine_step(context, + (PartitionPruneStepCombine *) step, + results); + break; + + default: + elog(ERROR, "invalid pruning step type: %d", + (int) nodeTag(step)); + } + } + + /* + * At this point we know the offsets of all the datums whose corresponding + * partitions need to be in the result, including special null-accepting + * and default partitions. Collect the actual partition indexes now. + */ + final_result = results[num_steps - 1]; + Assert(final_result != NULL); + i = -1; + result = NULL; + while ((i = bms_next_member(final_result->bound_offsets, i)) >= 0) + { + int partindex = context->boundinfo->indexes[i]; + + /* + * In range and hash partitioning cases, some slots may contain -1, + * indicating that no partition has been defined to accept a given + * range of data or for a given remainder, respectively. The default + * partition, if any, in case of range partitioning, will be added to + * the result, because the specified range still satisfies the query's + * conditions. + */ + if (partindex >= 0) + result = bms_add_member(result, partindex); + } + + /* Add the null and/or default partition if needed and if present. */ + if (final_result->scan_null) + { + Assert(context->strategy == PARTITION_STRATEGY_LIST); + Assert(partition_bound_accepts_nulls(context->boundinfo)); + result = bms_add_member(result, context->boundinfo->null_index); + } + if (final_result->scan_default) + { + Assert(context->strategy == PARTITION_STRATEGY_LIST || + context->strategy == PARTITION_STRATEGY_RANGE); + Assert(partition_bound_has_default(context->boundinfo)); + result = bms_add_member(result, context->boundinfo->default_index); + } + + return result; +} + +/* + * gen_partprune_steps_internal + * Processes 'clauses' to generate partition pruning steps. + * + * From OpExpr clauses that are mutually AND'd, we find combinations of those + * that match to the partition key columns and for every such combination, + * we emit a PartitionPruneStepOp containing a vector of expressions whose + * values are used as a look up key to search partitions by comparing the + * values with partition bounds. Relevant details of the operator and a + * vector of (possibly cross-type) comparison functions is also included with + * each step. + * + * For BoolExpr clauses, we recursively generate steps for each argument, and + * return a PartitionPruneStepCombine of their results. + * + * The return value is a list of the steps generated, which are also added to + * the context's steps list. Each step is assigned a step identifier, unique + * even across recursive calls. + * + * If we find clauses that are mutually contradictory, or a pseudoconstant + * clause that contains false, we set *contradictory to true and return NIL + * (that is, no pruning steps). Caller should consider all partitions as + * pruned in that case. Otherwise, *contradictory is set to false. + * + * Note: the 'clauses' List may be modified inside this function. Callers may + * like to make a copy of it before passing them to this function. + */ +static List * +gen_partprune_steps_internal(GeneratePruningStepsContext *context, + RelOptInfo *rel, List *clauses, + bool *contradictory) +{ + PartitionScheme part_scheme = rel->part_scheme; + List *keyclauses[PARTITION_MAX_KEYS]; + Bitmapset *nullkeys = NULL, + *notnullkeys = NULL; + bool generate_opsteps = false; + List *result = NIL; + ListCell *lc; + + *contradictory = false; + + memset(keyclauses, 0, sizeof(keyclauses)); + foreach(lc, clauses) + { + Expr *clause = (Expr *) lfirst(lc); + int i; + + /* Look through RestrictInfo, if any */ + if (IsA(clause, RestrictInfo)) + clause = ((RestrictInfo *) clause)->clause; + + /* Constant-false-or-null is contradictory */ + if (IsA(clause, Const) && + (((Const *) clause)->constisnull || + !DatumGetBool(((Const *) clause)->constvalue))) + { + *contradictory = true; + return NIL; + } + + /* Get the BoolExpr's out of the way. */ + if (IsA(clause, BoolExpr)) + { + /* + * Generate steps for arguments. + * + * While steps generated for the arguments themselves will be + * added to context->steps during recursion and will be evaluated + * independently, collect their step IDs to be stored in the + * combine step we'll be creating. + */ + if (or_clause((Node *) clause)) + { + List *arg_stepids = NIL; + bool all_args_contradictory = true; + ListCell *lc1; + + /* + * Get pruning step for each arg. If we get contradictory for + * all args, it means the OR expression is false as a whole. + */ + foreach(lc1, ((BoolExpr *) clause)->args) + { + Expr *arg = lfirst(lc1); + bool arg_contradictory; + List *argsteps; + + argsteps = + gen_partprune_steps_internal(context, rel, + list_make1(arg), + &arg_contradictory); + if (!arg_contradictory) + all_args_contradictory = false; + + if (argsteps != NIL) + { + PartitionPruneStep *step; + + Assert(list_length(argsteps) == 1); + step = (PartitionPruneStep *) linitial(argsteps); + arg_stepids = lappend_int(arg_stepids, step->step_id); + } + else + { + /* + * No steps either means that arg_contradictory is + * true or the arg didn't contain a clause matching + * this partition key. + * + * In case of the latter, we cannot prune using such + * an arg. To indicate that to the pruning code, we + * must construct a dummy PartitionPruneStepCombine + * whose source_stepids is set to an empty List. + * However, if we can prove using constraint exclusion + * that the clause refutes the table's partition + * constraint (if it's sub-partitioned), we need not + * bother with that. That is, we effectively ignore + * this OR arm. + */ + List *partconstr = rel->partition_qual; + PartitionPruneStep *orstep; + + /* Just ignore this argument. */ + if (arg_contradictory) + continue; + + if (partconstr) + { + partconstr = (List *) + expression_planner((Expr *) partconstr); + if (rel->relid != 1) + ChangeVarNodes((Node *) partconstr, 1, + rel->relid, 0); + if (predicate_refuted_by(partconstr, + list_make1(arg), + false)) + continue; + } + + orstep = gen_prune_step_combine(context, NIL, + PARTPRUNE_COMBINE_UNION); + arg_stepids = lappend_int(arg_stepids, orstep->step_id); + } + } + + *contradictory = all_args_contradictory; + + /* Check if any contradicting clauses were found */ + if (*contradictory) + return NIL; + + if (arg_stepids != NIL) + { + PartitionPruneStep *step; + + step = gen_prune_step_combine(context, arg_stepids, + PARTPRUNE_COMBINE_UNION); + result = lappend(result, step); + } + continue; + } + else if (and_clause((Node *) clause)) + { + List *args = ((BoolExpr *) clause)->args; + List *argsteps, + *arg_stepids = NIL; + ListCell *lc1; + + /* + * args may itself contain clauses of arbitrary type, so just + * recurse and later combine the component partitions sets + * using a combine step. + */ + argsteps = gen_partprune_steps_internal(context, rel, args, + contradictory); + if (*contradictory) + return NIL; + + foreach(lc1, argsteps) + { + PartitionPruneStep *step = lfirst(lc1); + + arg_stepids = lappend_int(arg_stepids, step->step_id); + } + + if (arg_stepids != NIL) + { + PartitionPruneStep *step; + + step = gen_prune_step_combine(context, arg_stepids, + PARTPRUNE_COMBINE_INTERSECT); + result = lappend(result, step); + } + continue; + } + + /* + * Fall-through for a NOT clause, which if it's a Boolean clause, + * will be handled in match_clause_to_partition_key(). We + * currently don't perform any pruning for more complex NOT + * clauses. + */ + } + + /* + * Must be a clause for which we can check if one of its args matches + * the partition key. + */ + for (i = 0; i < part_scheme->partnatts; i++) + { + Expr *partkey = linitial(rel->partexprs[i]); + bool clause_is_not_null = false; + PartClauseInfo *pc = NULL; + List *clause_steps = NIL; + + switch (match_clause_to_partition_key(rel, context, + clause, partkey, i, + &clause_is_not_null, + &pc, &clause_steps)) + { + case PARTCLAUSE_MATCH_CLAUSE: + Assert(pc != NULL); + + /* + * Since we only allow strict operators, check for any + * contradicting IS NULL. + */ + if (bms_is_member(i, nullkeys)) + { + *contradictory = true; + return NIL; + } + generate_opsteps = true; + keyclauses[i] = lappend(keyclauses[i], pc); + break; + + case PARTCLAUSE_MATCH_NULLNESS: + if (!clause_is_not_null) + { + /* check for conflicting IS NOT NULL */ + if (bms_is_member(i, notnullkeys)) + { + *contradictory = true; + return NIL; + } + nullkeys = bms_add_member(nullkeys, i); + } + else + { + /* check for conflicting IS NULL */ + if (bms_is_member(i, nullkeys)) + { + *contradictory = true; + return NIL; + } + notnullkeys = bms_add_member(notnullkeys, i); + } + break; + + case PARTCLAUSE_MATCH_STEPS: + Assert(clause_steps != NIL); + result = list_concat(result, clause_steps); + break; + + case PARTCLAUSE_MATCH_CONTRADICT: + /* We've nothing more to do if a contradiction was found. */ + *contradictory = true; + return NIL; + + case PARTCLAUSE_NOMATCH: + + /* + * Clause didn't match this key, but it might match the + * next one. + */ + continue; + + case PARTCLAUSE_UNSUPPORTED: + /* This clause cannot be used for pruning. */ + break; + } + + /* done; go check the next clause. */ + break; + } + } + + /*----------- + * Now generate some (more) pruning steps. We have three strategies: + * + * 1) Generate pruning steps based on IS NULL clauses: + * a) For list partitioning, null partition keys can only be found in + * the designated null-accepting partition, so if there are IS NULL + * clauses containing partition keys we should generate a pruning + * step that gets rid of all partitions but that one. We can + * disregard any OpExpr we may have found. + * b) For range partitioning, only the default partition can contain + * NULL values, so the same rationale applies. + * c) For hash partitioning, we only apply this strategy if we have + * IS NULL clauses for all the keys. Strategy 2 below will take + * care of the case where some keys have OpExprs and others have + * IS NULL clauses. + * + * 2) If not, generate steps based on OpExprs we have (if any). + * + * 3) If this doesn't work either, we may be able to generate steps to + * prune just the null-accepting partition (if one exists), if we have + * IS NOT NULL clauses for all partition keys. + */ + if (!bms_is_empty(nullkeys) && + (part_scheme->strategy == PARTITION_STRATEGY_LIST || + part_scheme->strategy == PARTITION_STRATEGY_RANGE || + (part_scheme->strategy == PARTITION_STRATEGY_HASH && + bms_num_members(nullkeys) == part_scheme->partnatts))) + { + PartitionPruneStep *step; + + /* Strategy 1 */ + step = gen_prune_step_op(context, InvalidStrategy, + false, NIL, NIL, nullkeys); + result = lappend(result, step); + } + else if (generate_opsteps) + { + PartitionPruneStep *step; + + /* Strategy 2 */ + step = gen_prune_steps_from_opexps(part_scheme, context, + keyclauses, nullkeys); + if (step != NULL) + result = lappend(result, step); + } + else if (bms_num_members(notnullkeys) == part_scheme->partnatts) + { + PartitionPruneStep *step; + + /* Strategy 3 */ + step = gen_prune_step_op(context, InvalidStrategy, + false, NIL, NIL, NULL); + result = lappend(result, step); + } + + /* + * Finally, results from all entries appearing in result should be + * combined using an INTERSECT combine step, if more than one. + */ + if (list_length(result) > 1) + { + List *step_ids = NIL; + + foreach(lc, result) + { + PartitionPruneStep *step = lfirst(lc); + + step_ids = lappend_int(step_ids, step->step_id); + } + + if (step_ids != NIL) + { + PartitionPruneStep *step; + + step = gen_prune_step_combine(context, step_ids, + PARTPRUNE_COMBINE_INTERSECT); + result = lappend(result, step); + } + } + + return result; +} + +/* + * gen_prune_step_op + * Generate a pruning step for a specific operator + * + * The step is assigned a unique step identifier and added to context's 'steps' + * list. + */ +static PartitionPruneStep * +gen_prune_step_op(GeneratePruningStepsContext *context, + StrategyNumber opstrategy, bool op_is_ne, + List *exprs, List *cmpfns, + Bitmapset *nullkeys) +{ + PartitionPruneStepOp *opstep = makeNode(PartitionPruneStepOp); + + opstep->step.step_id = context->next_step_id++; + + /* + * For clauses that contain an <> operator, set opstrategy to + * InvalidStrategy to signal get_matching_list_bounds to do the right + * thing. + */ + opstep->opstrategy = op_is_ne ? InvalidStrategy : opstrategy; + Assert(list_length(exprs) == list_length(cmpfns)); + opstep->exprs = exprs; + opstep->cmpfns = cmpfns; + opstep->nullkeys = nullkeys; + + context->steps = lappend(context->steps, opstep); + + return (PartitionPruneStep *) opstep; +} + +/* + * gen_prune_step_combine + * Generate a pruning step for a combination of several other steps + * + * The step is assigned a unique step identifier and added to context's + * 'steps' list. + */ +static PartitionPruneStep * +gen_prune_step_combine(GeneratePruningStepsContext *context, + List *source_stepids, + PartitionPruneCombineOp combineOp) +{ + PartitionPruneStepCombine *cstep = makeNode(PartitionPruneStepCombine); + + cstep->step.step_id = context->next_step_id++; + cstep->combineOp = combineOp; + cstep->source_stepids = source_stepids; + + context->steps = lappend(context->steps, cstep); + + return (PartitionPruneStep *) cstep; +} + +/* + * gen_prune_steps_from_opexps + * Generate pruning steps based on clauses for partition keys + * + * 'keyclauses' contains one list of clauses per partition key. We check here + * if we have found clauses for a valid subset of the partition key. In some + * cases, (depending on the type of partitioning being used) if we didn't + * find clauses for a given key, we discard clauses that may have been + * found for any subsequent keys; see specific notes below. + */ +static PartitionPruneStep * +gen_prune_steps_from_opexps(PartitionScheme part_scheme, + GeneratePruningStepsContext *context, + List **keyclauses, Bitmapset *nullkeys) +{ + ListCell *lc; + List *opsteps = NIL; + List *btree_clauses[BTMaxStrategyNumber + 1], + *hash_clauses[HTMaxStrategyNumber + 1]; + bool need_next_less, + need_next_eq, + need_next_greater; + int i; + + memset(btree_clauses, 0, sizeof(btree_clauses)); + memset(hash_clauses, 0, sizeof(hash_clauses)); + for (i = 0; i < part_scheme->partnatts; i++) + { + List *clauselist = keyclauses[i]; + bool consider_next_key = true; + + /* + * To be useful for pruning, we must have clauses for a prefix of + * partition keys in the case of range partitioning. So, ignore + * clauses for keys after this one. + */ + if (part_scheme->strategy == PARTITION_STRATEGY_RANGE && + clauselist == NIL) + break; + + /* + * For hash partitioning, if a column doesn't have the necessary + * equality clause, there should be an IS NULL clause, otherwise + * pruning is not possible. + */ + if (part_scheme->strategy == PARTITION_STRATEGY_HASH && + clauselist == NIL && !bms_is_member(i, nullkeys)) + return NULL; + + need_next_eq = need_next_less = need_next_greater = true; + foreach(lc, clauselist) + { + PartClauseInfo *pc = (PartClauseInfo *) lfirst(lc); + Oid lefttype, + righttype; + + /* Look up the operator's btree/hash strategy number. */ + if (pc->op_strategy == InvalidStrategy) + get_op_opfamily_properties(pc->opno, + part_scheme->partopfamily[i], + false, + &pc->op_strategy, + &lefttype, + &righttype); + + switch (part_scheme->strategy) + { + case PARTITION_STRATEGY_LIST: + case PARTITION_STRATEGY_RANGE: + { + PartClauseInfo *last = NULL; + bool inclusive = false; + + /* + * Add this clause to the list of clauses to be used + * for pruning if this is the first such key for this + * operator strategy or if it is consecutively next to + * the last column for which a clause with this + * operator strategy was matched. + */ + if (btree_clauses[pc->op_strategy] != NIL) + last = llast(btree_clauses[pc->op_strategy]); + + if (last == NULL || + i == last->keyno || i == last->keyno + 1) + btree_clauses[pc->op_strategy] = + lappend(btree_clauses[pc->op_strategy], pc); + + /* + * We may not need the next clause if they're of + * certain strategy. + */ + switch (pc->op_strategy) + { + case BTLessEqualStrategyNumber: + inclusive = true; + /* fall through */ + case BTLessStrategyNumber: + if (!inclusive) + need_next_eq = need_next_less = false; + break; + case BTEqualStrategyNumber: + /* always accept clauses for the next key. */ + break; + case BTGreaterEqualStrategyNumber: + inclusive = true; + /* fall through */ + case BTGreaterStrategyNumber: + if (!inclusive) + need_next_eq = need_next_greater = false; + break; + } + + /* We may want to change our mind. */ + if (consider_next_key) + consider_next_key = (need_next_eq || + need_next_less || + need_next_greater); + break; + } + + case PARTITION_STRATEGY_HASH: + if (pc->op_strategy != HTEqualStrategyNumber) + elog(ERROR, "invalid clause for hash partitioning"); + hash_clauses[pc->op_strategy] = + lappend(hash_clauses[pc->op_strategy], pc); + break; + + default: + elog(ERROR, "invalid partition strategy: %c", + part_scheme->strategy); + break; + } + } + + /* + * If we've decided that clauses for subsequent partition keys + * wouldn't be useful for pruning, don't search any further. + */ + if (!consider_next_key) + break; + } + + /* + * Now, we have divided clauses according to their operator strategies. + * Check for each strategy if we can generate pruning step(s) by + * collecting a list of expressions whose values will constitute a vector + * that can be used as a lookup key by a partition bound searching + * function. + */ + switch (part_scheme->strategy) + { + case PARTITION_STRATEGY_LIST: + case PARTITION_STRATEGY_RANGE: + { + List *eq_clauses = btree_clauses[BTEqualStrategyNumber]; + List *le_clauses = btree_clauses[BTLessEqualStrategyNumber]; + List *ge_clauses = btree_clauses[BTGreaterEqualStrategyNumber]; + int strat; + + /* + * For each clause under consideration for a given strategy, + * we collect expressions from clauses for earlier keys, whose + * operator strategy is inclusive, into a list called + * 'prefix'. By appending the clause's own expression to the + * 'prefix', we'll generate one step using the so generated + * vector and assign the current strategy to it. Actually, + * 'prefix' might contain multiple clauses for the same key, + * in which case, we must generate steps for various + * combinations of expressions of different keys, which + * get_steps_using_prefix takes care of for us. + */ + for (strat = 1; strat <= BTMaxStrategyNumber; strat++) + { + foreach(lc, btree_clauses[strat]) + { + PartClauseInfo *pc = lfirst(lc); + ListCell *lc1; + List *prefix = NIL; + List *pc_steps; + + /* + * Expressions from = clauses can always be in the + * prefix, provided they're from an earlier key. + */ + foreach(lc1, eq_clauses) + { + PartClauseInfo *eqpc = lfirst(lc1); + + if (eqpc->keyno == pc->keyno) + break; + if (eqpc->keyno < pc->keyno) + prefix = lappend(prefix, eqpc); + } + + /* + * If we're generating steps for keyno == pc->keyno) + break; + if (lepc->keyno < pc->keyno) + prefix = lappend(prefix, lepc); + } + } + + /* + * If we're generating steps for >/>= strategy, we can + * add other >= clauses to the prefix, provided + * they're from an earlier key. + */ + if (strat == BTGreaterStrategyNumber || + strat == BTGreaterEqualStrategyNumber) + { + foreach(lc1, ge_clauses) + { + PartClauseInfo *gepc = lfirst(lc1); + + if (gepc->keyno == pc->keyno) + break; + if (gepc->keyno < pc->keyno) + prefix = lappend(prefix, gepc); + } + } + + /* + * As mentioned above, if 'prefix' contains multiple + * expressions for the same key, the following will + * generate multiple steps, one for each combination + * of the expressions for different keys. + * + * Note that we pass NULL for step_nullkeys, because + * we don't search list/range partition bounds where + * some keys are NULL. + */ + Assert(pc->op_strategy == strat); + pc_steps = get_steps_using_prefix(context, strat, + pc->op_is_ne, + pc->expr, + pc->cmpfn, + pc->keyno, + NULL, + prefix); + opsteps = list_concat(opsteps, list_copy(pc_steps)); + } + } + break; + } + + case PARTITION_STRATEGY_HASH: + { + List *eq_clauses = hash_clauses[HTEqualStrategyNumber]; + + /* For hash partitioning, we have just the = strategy. */ + if (eq_clauses != NIL) + { + PartClauseInfo *pc; + List *pc_steps; + List *prefix = NIL; + int last_keyno; + ListCell *lc1; + + /* + * Locate the clause for the greatest column. This may + * not belong to the last partition key, but it is the + * clause belonging to the last partition key we found a + * clause for above. + */ + pc = llast(eq_clauses); + + /* + * There might be multiple clauses which matched to that + * partition key; find the first such clause. While at + * it, add all the clauses before that one to 'prefix'. + */ + last_keyno = pc->keyno; + foreach(lc, eq_clauses) + { + pc = lfirst(lc); + if (pc->keyno == last_keyno) + break; + prefix = lappend(prefix, pc); + } + + /* + * For each clause for the "last" column, after appending + * the clause's own expression to the 'prefix', we'll + * generate one step using the so generated vector and + * assign = as its strategy. Actually, 'prefix' might + * contain multiple clauses for the same key, in which + * case, we must generate steps for various combinations + * of expressions of different keys, which + * get_steps_using_prefix will take care of for us. + */ + for_each_cell(lc1, lc) + { + pc = lfirst(lc1); + + /* + * Note that we pass nullkeys for step_nullkeys, + * because we need to tell hash partition bound search + * function which of the keys we found IS NULL clauses + * for. + */ + Assert(pc->op_strategy == HTEqualStrategyNumber); + pc_steps = + get_steps_using_prefix(context, + HTEqualStrategyNumber, + false, + pc->expr, + pc->cmpfn, + pc->keyno, + nullkeys, + prefix); + opsteps = list_concat(opsteps, list_copy(pc_steps)); + } + } + break; + } + + default: + elog(ERROR, "invalid partition strategy: %c", + part_scheme->strategy); + break; + } + + /* Lastly, add a combine step to mutually AND these op steps, if needed */ + if (list_length(opsteps) > 1) + { + List *opstep_ids = NIL; + + foreach(lc, opsteps) + { + PartitionPruneStep *step = lfirst(lc); + + opstep_ids = lappend_int(opstep_ids, step->step_id); + } + + if (opstep_ids != NIL) + return gen_prune_step_combine(context, opstep_ids, + PARTPRUNE_COMBINE_INTERSECT); + return NULL; + } + else if (opsteps != NIL) + return linitial(opsteps); + + return NULL; +} + +/* + * If the partition key has a collation, then the clause must have the same + * input collation. If the partition key is non-collatable, we assume the + * collation doesn't matter, because while collation wasn't considered when + * performing partitioning, the clause still may have a collation assigned + * due to the other input being of a collatable type. + * + * See also IndexCollMatchesExprColl. + */ +#define PartCollMatchesExprColl(partcoll, exprcoll) \ + ((partcoll) == InvalidOid || (partcoll) == (exprcoll)) + +/* + * match_clause_to_partition_key + * Attempt to match the given 'clause' with the specified partition key. + * + * Return value is: + * * PARTCLAUSE_NOMATCH if the clause doesn't match this partition key (but + * caller should keep trying, because it might match a subsequent key). + * Output arguments: none set. + * + * * PARTCLAUSE_MATCH_CLAUSE if there is a match. + * Output arguments: *pc is set to a PartClauseInfo constructed for the + * matched clause. + * + * * PARTCLAUSE_MATCH_NULLNESS if there is a match, and the matched clause was + * either a "a IS NULL" or "a IS NOT NULL" clause. + * Output arguments: *clause_is_not_null is set to false in the former case + * true otherwise. + * + * * PARTCLAUSE_MATCH_STEPS if there is a match. + * Output arguments: *clause_steps is set to a list of PartitionPruneStep + * generated for the clause. + * + * * PARTCLAUSE_MATCH_CONTRADICT if the clause is self-contradictory, ie + * it provably returns FALSE or NULL. + * Output arguments: none set. + * + * * PARTCLAUSE_UNSUPPORTED if the clause doesn't match this partition key + * and couldn't possibly match any other one either, due to its form or + * properties (such as containing a volatile function). + * Output arguments: none set. + */ +static PartClauseMatchStatus +match_clause_to_partition_key(RelOptInfo *rel, + GeneratePruningStepsContext *context, + Expr *clause, Expr *partkey, int partkeyidx, + bool *clause_is_not_null, PartClauseInfo **pc, + List **clause_steps) +{ + PartitionScheme part_scheme = rel->part_scheme; + Oid partopfamily = part_scheme->partopfamily[partkeyidx], + partcoll = part_scheme->partcollation[partkeyidx]; + Expr *expr; + + /* + * Recognize specially shaped clauses that match with the Boolean + * partition key. + */ + if (match_boolean_partition_clause(partopfamily, clause, partkey, &expr)) + { + PartClauseInfo *partclause; + + partclause = (PartClauseInfo *) palloc(sizeof(PartClauseInfo)); + partclause->keyno = partkeyidx; + /* Do pruning with the Boolean equality operator. */ + partclause->opno = BooleanEqualOperator; + partclause->op_is_ne = false; + partclause->expr = expr; + /* We know that expr is of Boolean type. */ + partclause->cmpfn = rel->part_scheme->partsupfunc[partkeyidx].fn_oid; + partclause->op_strategy = InvalidStrategy; + + *pc = partclause; + + return PARTCLAUSE_MATCH_CLAUSE; + } + else if (IsA(clause, OpExpr) && + list_length(((OpExpr *) clause)->args) == 2) + { + OpExpr *opclause = (OpExpr *) clause; + Expr *leftop, + *rightop; + Oid opno, + op_lefttype, + op_righttype, + negator = InvalidOid; + Oid cmpfn; + int op_strategy; + bool is_opne_listp = false; + PartClauseInfo *partclause; + + leftop = (Expr *) get_leftop(clause); + if (IsA(leftop, RelabelType)) + leftop = ((RelabelType *) leftop)->arg; + rightop = (Expr *) get_rightop(clause); + if (IsA(rightop, RelabelType)) + rightop = ((RelabelType *) rightop)->arg; + opno = opclause->opno; + + /* check if the clause matches this partition key */ + if (equal(leftop, partkey)) + expr = rightop; + else if (equal(rightop, partkey)) + { + /* + * It's only useful if we can commute the operator to put the + * partkey on the left. If we can't, the clause can be deemed + * UNSUPPORTED. Even if its leftop matches some later partkey, we + * now know it has Vars on the right, so it's no use. + */ + opno = get_commutator(opno); + if (!OidIsValid(opno)) + return PARTCLAUSE_UNSUPPORTED; + expr = leftop; + } + else + /* clause does not match this partition key, but perhaps next. */ + return PARTCLAUSE_NOMATCH; + + /* + * Partition key match also requires collation match. There may be + * multiple partkeys with the same expression but different + * collations, so failure is NOMATCH. + */ + if (!PartCollMatchesExprColl(partcoll, opclause->inputcollid)) + return PARTCLAUSE_NOMATCH; + + /* + * Matched with this key. Now check various properties of the clause + * to see if it's sane to use it for pruning. In most of these cases, + * we can return UNSUPPORTED because the same failure would occur no + * matter which partkey it's matched to. + */ + + /* + * We can't prune using an expression with Vars. (Report failure as + * UNSUPPORTED, not NOMATCH: as in the no-commutator case above, we + * now know there are Vars on both sides, so it's no good.) + */ + if (contain_var_clause((Node *) expr)) + return PARTCLAUSE_UNSUPPORTED; + + /* + * Only allow strict operators. This will guarantee nulls are + * filtered. + */ + if (!op_strict(opno)) + return PARTCLAUSE_UNSUPPORTED; + + /* We can't use any volatile expressions to prune partitions. */ + if (contain_volatile_functions((Node *) expr)) + return PARTCLAUSE_UNSUPPORTED; + + /* + * See if the operator is relevant to the partitioning opfamily. + * + * Normally we only care about operators that are listed as being part + * of the partitioning operator family. But there is one exception: + * the not-equals operators are not listed in any operator family + * whatsoever, but their negators (equality) are. We can use one of + * those if we find it, but only for list partitioning. + * + * Note: we report NOMATCH on failure, in case a later partkey has the + * same expression but different opfamily. That's unlikely, but not + * much more so than duplicate expressions with different collations. + */ + if (op_in_opfamily(opno, partopfamily)) + { + get_op_opfamily_properties(opno, partopfamily, false, + &op_strategy, &op_lefttype, + &op_righttype); + } + else + { + if (part_scheme->strategy != PARTITION_STRATEGY_LIST) + return PARTCLAUSE_NOMATCH; + + /* See if the negator is equality */ + negator = get_negator(opno); + if (OidIsValid(negator) && op_in_opfamily(negator, partopfamily)) + { + get_op_opfamily_properties(negator, partopfamily, false, + &op_strategy, &op_lefttype, + &op_righttype); + if (op_strategy == BTEqualStrategyNumber) + is_opne_listp = true; /* bingo */ + } + + /* Nope, it's not <> either. */ + if (!is_opne_listp) + return PARTCLAUSE_NOMATCH; + } + + /* + * Now find the procedure to use, based on the types. If the clause's + * other argument is of the same type as the partitioning opclass's + * declared input type, we can use the procedure cached in + * PartitionKey. If not, search for a cross-type one in the same + * opfamily; if one doesn't exist, report no match. + */ + if (op_righttype == part_scheme->partopcintype[partkeyidx]) + cmpfn = part_scheme->partsupfunc[partkeyidx].fn_oid; + else + { + switch (part_scheme->strategy) + { + /* + * For range and list partitioning, we need the ordering + * procedure with lefttype being the partition key's type, + * and righttype the clause's operator's right type. + */ + case PARTITION_STRATEGY_LIST: + case PARTITION_STRATEGY_RANGE: + cmpfn = + get_opfamily_proc(part_scheme->partopfamily[partkeyidx], + part_scheme->partopcintype[partkeyidx], + op_righttype, BTORDER_PROC); + break; + + /* + * For hash partitioning, we need the hashing procedure + * for the clause's type. + */ + case PARTITION_STRATEGY_HASH: + cmpfn = + get_opfamily_proc(part_scheme->partopfamily[partkeyidx], + op_righttype, op_righttype, + HASHEXTENDED_PROC); + break; + + default: + elog(ERROR, "invalid partition strategy: %c", + part_scheme->strategy); + cmpfn = InvalidOid; /* keep compiler quiet */ + break; + } + + if (!OidIsValid(cmpfn)) + return PARTCLAUSE_NOMATCH; + } + + /* + * Build the clause, passing the negator if applicable. + */ + partclause = (PartClauseInfo *) palloc(sizeof(PartClauseInfo)); + partclause->keyno = partkeyidx; + if (is_opne_listp) + { + Assert(OidIsValid(negator)); + partclause->opno = negator; + partclause->op_is_ne = true; + partclause->op_strategy = InvalidStrategy; + } + else + { + partclause->opno = opno; + partclause->op_is_ne = false; + partclause->op_strategy = op_strategy; + } + partclause->expr = expr; + partclause->cmpfn = cmpfn; + + *pc = partclause; + + return PARTCLAUSE_MATCH_CLAUSE; + } + else if (IsA(clause, ScalarArrayOpExpr)) + { + ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause; + Oid saop_op = saop->opno; + Oid saop_coll = saop->inputcollid; + Expr *leftop = (Expr *) linitial(saop->args), + *rightop = (Expr *) lsecond(saop->args); + List *elem_exprs, + *elem_clauses; + ListCell *lc1; + bool contradictory; + + if (IsA(leftop, RelabelType)) + leftop = ((RelabelType *) leftop)->arg; + + /* Check it matches this partition key */ + if (!equal(leftop, partkey) || + !PartCollMatchesExprColl(partcoll, saop->inputcollid)) + return PARTCLAUSE_NOMATCH; + + /* + * Matched with this key. Check various properties of the clause to + * see if it can sanely be used for partition pruning (this is mostly + * the same as for a plain OpExpr). + */ + + /* We can't prune using an expression with Vars. */ + if (contain_var_clause((Node *) rightop)) + return PARTCLAUSE_UNSUPPORTED; + + /* + * Only allow strict operators. This will guarantee nulls are + * filtered. + */ + if (!op_strict(saop_op)) + return PARTCLAUSE_UNSUPPORTED; + + /* We can't use any volatile expressions to prune partitions. */ + if (contain_volatile_functions((Node *) rightop)) + return PARTCLAUSE_UNSUPPORTED; + + /* + * In case of NOT IN (..), we get a '<>', which we handle if list + * partitioning is in use and we're able to confirm that it's negator + * is a btree equality operator belonging to the partitioning operator + * family. As above, report NOMATCH for non-matching operator. + */ + if (!op_in_opfamily(saop_op, partopfamily)) + { + Oid negator; + + if (part_scheme->strategy != PARTITION_STRATEGY_LIST) + return PARTCLAUSE_NOMATCH; + + negator = get_negator(saop_op); + if (OidIsValid(negator) && op_in_opfamily(negator, partopfamily)) + { + int strategy; + Oid lefttype, + righttype; + + get_op_opfamily_properties(negator, partopfamily, + false, &strategy, + &lefttype, &righttype); + if (strategy != BTEqualStrategyNumber) + return PARTCLAUSE_NOMATCH; + } + else + return PARTCLAUSE_NOMATCH; /* no useful negator */ + } + + /* + * First generate a list of Const nodes, one for each array element + * (excepting nulls). + */ + elem_exprs = NIL; + if (IsA(rightop, Const)) + { + Const *arr = (Const *) rightop; + ArrayType *arrval = DatumGetArrayTypeP(arr->constvalue); + int16 elemlen; + bool elembyval; + char elemalign; + Datum *elem_values; + bool *elem_nulls; + int num_elems, + i; + + get_typlenbyvalalign(ARR_ELEMTYPE(arrval), + &elemlen, &elembyval, &elemalign); + deconstruct_array(arrval, + ARR_ELEMTYPE(arrval), + elemlen, elembyval, elemalign, + &elem_values, &elem_nulls, + &num_elems); + for (i = 0; i < num_elems; i++) + { + Const *elem_expr; + + /* + * A null array element must lead to a null comparison result, + * since saop_op is known strict. We can ignore it in the + * useOr case, but otherwise it implies self-contradiction. + */ + if (elem_nulls[i]) + { + if (saop->useOr) + continue; + return PARTCLAUSE_MATCH_CONTRADICT; + } + + elem_expr = makeConst(ARR_ELEMTYPE(arrval), -1, + arr->constcollid, elemlen, + elem_values[i], false, elembyval); + elem_exprs = lappend(elem_exprs, elem_expr); + } + } + else if (IsA(rightop, ArrayExpr)) + { + ArrayExpr *arrexpr = castNode(ArrayExpr, rightop); + + /* + * For a nested ArrayExpr, we don't know how to get the actual + * scalar values out into a flat list, so we give up doing + * anything with this ScalarArrayOpExpr. + */ + if (arrexpr->multidims) + return PARTCLAUSE_UNSUPPORTED; + + elem_exprs = arrexpr->elements; + } + else + { + /* Give up on any other clause types. */ + return PARTCLAUSE_UNSUPPORTED; + } + + /* + * Now generate a list of clauses, one for each array element, of the + * form saop_leftop saop_op elem_expr + */ + elem_clauses = NIL; + foreach(lc1, elem_exprs) + { + Expr *rightop = (Expr *) lfirst(lc1), + *elem_clause; + + elem_clause = make_opclause(saop_op, BOOLOID, false, + leftop, rightop, + InvalidOid, saop_coll); + elem_clauses = lappend(elem_clauses, elem_clause); + } + + /* + * If we have an ANY clause and multiple elements, now turn the list + * of clauses into an OR expression. + */ + if (saop->useOr && list_length(elem_clauses) > 1) + elem_clauses = list_make1(makeBoolExpr(OR_EXPR, elem_clauses, -1)); + + /* Finally, generate steps */ + *clause_steps = + gen_partprune_steps_internal(context, rel, elem_clauses, + &contradictory); + if (contradictory) + return PARTCLAUSE_MATCH_CONTRADICT; + else if (*clause_steps == NIL) + return PARTCLAUSE_UNSUPPORTED; /* step generation failed */ + return PARTCLAUSE_MATCH_STEPS; + } + else if (IsA(clause, NullTest)) + { + NullTest *nulltest = (NullTest *) clause; + Expr *arg = nulltest->arg; + + if (IsA(arg, RelabelType)) + arg = ((RelabelType *) arg)->arg; + + /* Does arg match with this partition key column? */ + if (!equal(arg, partkey)) + return PARTCLAUSE_NOMATCH; + + *clause_is_not_null = (nulltest->nulltesttype == IS_NOT_NULL); + + return PARTCLAUSE_MATCH_NULLNESS; + } + + return PARTCLAUSE_UNSUPPORTED; +} + +/* + * get_steps_using_prefix + * Generate list of PartitionPruneStepOp steps each consisting of given + * opstrategy + * + * To generate steps, step_lastexpr and step_lastcmpfn are appended to + * expressions and cmpfns, respectively, extracted from the clauses in + * 'prefix'. Actually, since 'prefix' may contain multiple clauses for the + * same partition key column, we must generate steps for various combinations + * of the clauses of different keys. + */ +static List * +get_steps_using_prefix(GeneratePruningStepsContext *context, + StrategyNumber step_opstrategy, + bool step_op_is_ne, + Expr *step_lastexpr, + Oid step_lastcmpfn, + int step_lastkeyno, + Bitmapset *step_nullkeys, + List *prefix) +{ + /* Quick exit if there are no values to prefix with. */ + if (list_length(prefix) == 0) + { + PartitionPruneStep *step; + + step = gen_prune_step_op(context, + step_opstrategy, + step_op_is_ne, + list_make1(step_lastexpr), + list_make1_oid(step_lastcmpfn), + step_nullkeys); + return list_make1(step); + } + + /* Recurse to generate steps for various combinations. */ + return get_steps_using_prefix_recurse(context, + step_opstrategy, + step_op_is_ne, + step_lastexpr, + step_lastcmpfn, + step_lastkeyno, + step_nullkeys, + list_head(prefix), + NIL, NIL); +} + +/* + * get_steps_using_prefix_recurse + * Recursively generate combinations of clauses for different partition + * keys and start generating steps upon reaching clauses for the greatest + * column that is less than the one for which we're currently generating + * steps (that is, step_lastkeyno) + * + * 'start' is where we should start iterating for the current invocation. + * 'step_exprs' and 'step_cmpfns' each contains the expressions and cmpfns + * we've generated so far from the clauses for the previous part keys. + */ +static List * +get_steps_using_prefix_recurse(GeneratePruningStepsContext *context, + StrategyNumber step_opstrategy, + bool step_op_is_ne, + Expr *step_lastexpr, + Oid step_lastcmpfn, + int step_lastkeyno, + Bitmapset *step_nullkeys, + ListCell *start, + List *step_exprs, + List *step_cmpfns) +{ + List *result = NIL; + ListCell *lc; + int cur_keyno; + + /* Actually, recursion would be limited by PARTITION_MAX_KEYS. */ + check_stack_depth(); + + /* Check if we need to recurse. */ + Assert(start != NULL); + cur_keyno = ((PartClauseInfo *) lfirst(start))->keyno; + if (cur_keyno < step_lastkeyno - 1) + { + PartClauseInfo *pc; + ListCell *next_start; + + /* + * For each clause with cur_keyno, adds its expr and cmpfn to + * step_exprs and step_cmpfns, respectively, and recurse after setting + * next_start to the ListCell of the first clause for the next + * partition key. + */ + for_each_cell(lc, start) + { + pc = lfirst(lc); + + if (pc->keyno > cur_keyno) + break; + } + next_start = lc; + + for_each_cell(lc, start) + { + List *moresteps; + + pc = lfirst(lc); + if (pc->keyno == cur_keyno) + { + /* clean up before starting a new recursion cycle. */ + if (cur_keyno == 0) + { + list_free(step_exprs); + list_free(step_cmpfns); + step_exprs = list_make1(pc->expr); + step_cmpfns = list_make1_oid(pc->cmpfn); + } + else + { + step_exprs = lappend(step_exprs, pc->expr); + step_cmpfns = lappend_oid(step_cmpfns, pc->cmpfn); + } + } + else + { + Assert(pc->keyno > cur_keyno); + break; + } + + moresteps = get_steps_using_prefix_recurse(context, + step_opstrategy, + step_op_is_ne, + step_lastexpr, + step_lastcmpfn, + step_lastkeyno, + step_nullkeys, + next_start, + step_exprs, + step_cmpfns); + result = list_concat(result, moresteps); + } + } + else + { + /* + * End the current recursion cycle and start generating steps, one for + * each clause with cur_keyno, which is all clauses from here onward + * till the end of the list. + */ + Assert(list_length(step_exprs) == cur_keyno); + for_each_cell(lc, start) + { + PartClauseInfo *pc = lfirst(lc); + PartitionPruneStep *step; + List *step_exprs1, + *step_cmpfns1; + + Assert(pc->keyno == cur_keyno); + + /* Leave the original step_exprs unmodified. */ + step_exprs1 = list_copy(step_exprs); + step_exprs1 = lappend(step_exprs1, pc->expr); + step_exprs1 = lappend(step_exprs1, step_lastexpr); + + /* Leave the original step_cmpfns unmodified. */ + step_cmpfns1 = list_copy(step_cmpfns); + step_cmpfns1 = lappend_oid(step_cmpfns1, pc->cmpfn); + step_cmpfns1 = lappend_oid(step_cmpfns1, step_lastcmpfn); + + step = gen_prune_step_op(context, + step_opstrategy, step_op_is_ne, + step_exprs1, step_cmpfns1, + step_nullkeys); + result = lappend(result, step); + } + } + + return result; +} + +/* + * get_matching_hash_bounds + * Determine offset of the hash bound matching the specified values, + * considering that all the non-null values come from clauses containing + * a compatible hash equality operator and any keys that are null come + * from an IS NULL clause. + * + * Generally this function will return a single matching bound offset, + * although if a partition has not been setup for a given modulus then we may + * return no matches. If the number of clauses found don't cover the entire + * partition key, then we'll need to return all offsets. + * + * 'opstrategy' if non-zero must be HTEqualStrategyNumber. + * + * 'values' contains Datums indexed by the partition key to use for pruning. + * + * 'nvalues', the number of Datums in the 'values' array. + * + * 'partsupfunc' contains partition hashing functions that can produce correct + * hash for the type of the values contained in 'values'. + * + * 'nullkeys' is the set of partition keys that are null. + */ +static PruneStepResult * +get_matching_hash_bounds(PartitionPruneContext *context, + StrategyNumber opstrategy, Datum *values, int nvalues, + FmgrInfo *partsupfunc, Bitmapset *nullkeys) +{ + PruneStepResult *result = (PruneStepResult *) palloc0(sizeof(PruneStepResult)); + PartitionBoundInfo boundinfo = context->boundinfo; + int *partindices = boundinfo->indexes; + int partnatts = context->partnatts; + bool isnull[PARTITION_MAX_KEYS]; + int i; + uint64 rowHash; + int greatest_modulus; + + Assert(context->strategy == PARTITION_STRATEGY_HASH); + + /* + * For hash partitioning we can only perform pruning based on equality + * clauses to the partition key or IS NULL clauses. We also can only + * prune if we got values for all keys. + */ + if (nvalues + bms_num_members(nullkeys) == partnatts) + { + /* + * If there are any values, they must have come from clauses + * containing an equality operator compatible with hash partitioning. + */ + Assert(opstrategy == HTEqualStrategyNumber || nvalues == 0); + + for (i = 0; i < partnatts; i++) + isnull[i] = bms_is_member(i, nullkeys); + + greatest_modulus = get_hash_partition_greatest_modulus(boundinfo); + rowHash = compute_partition_hash_value(partnatts, partsupfunc, + values, isnull); + + if (partindices[rowHash % greatest_modulus] >= 0) + result->bound_offsets = + bms_make_singleton(rowHash % greatest_modulus); + } + else + { + /* Getting here means at least one hash partition exists. */ + Assert(boundinfo->ndatums > 0); + result->bound_offsets = bms_add_range(NULL, 0, + boundinfo->ndatums - 1); + } + + /* + * There is neither a special hash null partition or the default hash + * partition. + */ + result->scan_null = result->scan_default = false; + + return result; +} + +/* + * get_matching_list_bounds + * Determine the offsets of list bounds matching the specified value, + * according to the semantics of the given operator strategy + * 'opstrategy' if non-zero must be a btree strategy number. + * + * 'value' contains the value to use for pruning. + * + * 'nvalues', if non-zero, should be exactly 1, because of list partitioning. + * + * 'partsupfunc' contains the list partitioning comparison function to be used + * to perform partition_list_bsearch + * + * 'nullkeys' is the set of partition keys that are null. + */ +static PruneStepResult * +get_matching_list_bounds(PartitionPruneContext *context, + StrategyNumber opstrategy, Datum value, int nvalues, + FmgrInfo *partsupfunc, Bitmapset *nullkeys) +{ + PruneStepResult *result = (PruneStepResult *) palloc0(sizeof(PruneStepResult)); + PartitionBoundInfo boundinfo = context->boundinfo; + int off, + minoff, + maxoff; + bool is_equal; + bool inclusive = false; + Oid *partcollation = context->partcollation; + + Assert(context->strategy == PARTITION_STRATEGY_LIST); + Assert(context->partnatts == 1); + + result->scan_null = result->scan_default = false; + + if (!bms_is_empty(nullkeys)) + { + /* + * Nulls may exist in only one partition - the partition whose + * accepted set of values includes null or the default partition if + * the former doesn't exist. + */ + if (partition_bound_accepts_nulls(boundinfo)) + result->scan_null = true; + else + result->scan_default = partition_bound_has_default(boundinfo); + return result; + } + + /* + * If there are no datums to compare keys with, but there are partitions, + * just return the default partition if one exists. + */ + if (boundinfo->ndatums == 0) + { + result->scan_default = partition_bound_has_default(boundinfo); + return result; + } + + minoff = 0; + maxoff = boundinfo->ndatums - 1; + + /* + * If there are no values to compare with the datums in boundinfo, it + * means the caller asked for partitions for all non-null datums. Add + * indexes of *all* partitions, including the default if any. + */ + if (nvalues == 0) + { + Assert(boundinfo->ndatums > 0); + result->bound_offsets = bms_add_range(NULL, 0, + boundinfo->ndatums - 1); + result->scan_default = partition_bound_has_default(boundinfo); + return result; + } + + /* Special case handling of values coming from a <> operator clause. */ + if (opstrategy == InvalidStrategy) + { + /* + * First match to all bounds. We'll remove any matching datums below. + */ + Assert(boundinfo->ndatums > 0); + result->bound_offsets = bms_add_range(NULL, 0, + boundinfo->ndatums - 1); + + off = partition_list_bsearch(partsupfunc, partcollation, boundinfo, + value, &is_equal); + if (off >= 0 && is_equal) + { + + /* We have a match. Remove from the result. */ + Assert(boundinfo->indexes[off] >= 0); + result->bound_offsets = bms_del_member(result->bound_offsets, + off); + } + + /* Always include the default partition if any. */ + result->scan_default = partition_bound_has_default(boundinfo); + + return result; + } + + /* + * With range queries, always include the default list partition, because + * list partitions divide the key space in a discontinuous manner, not all + * values in the given range will have a partition assigned. This may not + * technically be true for some data types (e.g. integer types), however, + * we currently lack any sort of infrastructure to provide us with proofs + * that would allow us to do anything smarter here. + */ + if (opstrategy != BTEqualStrategyNumber) + result->scan_default = partition_bound_has_default(boundinfo); + + switch (opstrategy) + { + case BTEqualStrategyNumber: + off = partition_list_bsearch(partsupfunc, + partcollation, + boundinfo, value, + &is_equal); + if (off >= 0 && is_equal) + { + Assert(boundinfo->indexes[off] >= 0); + result->bound_offsets = bms_make_singleton(off); + } + else + result->scan_default = partition_bound_has_default(boundinfo); + return result; + + case BTGreaterEqualStrategyNumber: + inclusive = true; + /* fall through */ + case BTGreaterStrategyNumber: + off = partition_list_bsearch(partsupfunc, + partcollation, + boundinfo, value, + &is_equal); + if (off >= 0) + { + /* We don't want the matched datum to be in the result. */ + if (!is_equal || !inclusive) + off++; + } + else + { + /* + * This case means all partition bounds are greater, which in + * turn means that all partitions satisfy this key. + */ + off = 0; + } + + /* + * off is greater than the numbers of datums we have partitions + * for. The only possible partition that could contain a match is + * the default partition, but we must've set context->scan_default + * above anyway if one exists. + */ + if (off > boundinfo->ndatums - 1) + return result; + + minoff = off; + break; + + case BTLessEqualStrategyNumber: + inclusive = true; + /* fall through */ + case BTLessStrategyNumber: + off = partition_list_bsearch(partsupfunc, + partcollation, + boundinfo, value, + &is_equal); + if (off >= 0 && is_equal && !inclusive) + off--; + + /* + * off is smaller than the datums of all non-default partitions. + * The only possible partition that could contain a match is the + * default partition, but we must've set context->scan_default + * above anyway if one exists. + */ + if (off < 0) + return result; + + maxoff = off; + break; + + default: + elog(ERROR, "invalid strategy number %d", opstrategy); + break; + } + + Assert(minoff >= 0 && maxoff >= 0); + result->bound_offsets = bms_add_range(NULL, minoff, maxoff); + return result; +} + + +/* + * get_matching_range_bounds + * Determine the offsets of range bounds matching the specified values, + * according to the semantics of the given operator strategy + * + * Each datum whose offset is in result is to be treated as the upper bound of + * the partition that will contain the desired values. + * + * If default partition needs to be scanned for given values, set scan_default + * in result if present. + * + * 'opstrategy' if non-zero must be a btree strategy number. + * + * 'values' contains Datums indexed by the partition key to use for pruning. + * + * 'nvalues', number of Datums in 'values' array. Must be <= context->partnatts. + * + * 'partsupfunc' contains the range partitioning comparison functions to be + * used to perform partition_range_datum_bsearch or partition_rbound_datum_cmp + * using. + * + * 'nullkeys' is the set of partition keys that are null. + */ +static PruneStepResult * +get_matching_range_bounds(PartitionPruneContext *context, + StrategyNumber opstrategy, Datum *values, int nvalues, + FmgrInfo *partsupfunc, Bitmapset *nullkeys) +{ + PruneStepResult *result = (PruneStepResult *) palloc0(sizeof(PruneStepResult)); + PartitionBoundInfo boundinfo = context->boundinfo; + Oid *partcollation = context->partcollation; + int partnatts = context->partnatts; + int *partindices = boundinfo->indexes; + int off, + minoff, + maxoff, + i; + bool is_equal; + bool inclusive = false; + + Assert(context->strategy == PARTITION_STRATEGY_RANGE); + Assert(nvalues <= partnatts); + + result->scan_null = result->scan_default = false; + + /* + * If there are no datums to compare keys with, or if we got an IS NULL + * clause just return the default partition, if it exists. + */ + if (boundinfo->ndatums == 0 || !bms_is_empty(nullkeys)) + { + result->scan_default = partition_bound_has_default(boundinfo); + return result; + } + + minoff = 0; + maxoff = boundinfo->ndatums; + + /* + * If there are no values to compare with the datums in boundinfo, it + * means the caller asked for partitions for all non-null datums. Add + * indexes of *all* partitions, including the default partition if one + * exists. + */ + if (nvalues == 0) + { + if (partindices[minoff] < 0) + minoff++; + if (partindices[maxoff] < 0) + maxoff--; + + result->scan_default = partition_bound_has_default(boundinfo); + Assert(minoff >= 0 && maxoff >= 0); + result->bound_offsets = bms_add_range(NULL, minoff, maxoff); + + return result; + } + + /* + * If the query does not constrain all key columns, we'll need to scan the + * default partition, if any. + */ + if (nvalues < partnatts) + result->scan_default = partition_bound_has_default(boundinfo); + + switch (opstrategy) + { + case BTEqualStrategyNumber: + /* Look for the smallest bound that is = lookup value. */ + off = partition_range_datum_bsearch(partsupfunc, + partcollation, + boundinfo, + nvalues, values, + &is_equal); + + if (off >= 0 && is_equal) + { + if (nvalues == partnatts) + { + /* There can only be zero or one matching partition. */ + if (partindices[off + 1] >= 0) + result->bound_offsets = bms_make_singleton(off + 1); + else + result->scan_default = + partition_bound_has_default(boundinfo); + return result; + } + else + { + int saved_off = off; + + /* + * Since the lookup value contains only a prefix of keys, + * we must find other bounds that may also match the + * prefix. partition_range_datum_bsearch() returns the + * offset of one of them, find others by checking adjacent + * bounds. + */ + + /* + * First find greatest bound that's smaller than the + * lookup value. + */ + while (off >= 1) + { + int32 cmpval; + + cmpval = + partition_rbound_datum_cmp(partsupfunc, + partcollation, + boundinfo->datums[off - 1], + boundinfo->kind[off - 1], + values, nvalues); + if (cmpval != 0) + break; + off--; + } + + Assert(0 == + partition_rbound_datum_cmp(partsupfunc, + partcollation, + boundinfo->datums[off], + boundinfo->kind[off], + values, nvalues)); + + /* + * We can treat 'off' as the offset of the smallest bound + * to be included in the result, if we know it is the + * upper bound of the partition in which the lookup value + * could possibly exist. One case it couldn't is if the + * bound, or precisely the matched portion of its prefix, + * is not inclusive. + */ + if (boundinfo->kind[off][nvalues] == + PARTITION_RANGE_DATUM_MINVALUE) + off++; + + minoff = off; + + /* + * Now find smallest bound that's greater than the lookup + * value. + */ + off = saved_off; + while (off < boundinfo->ndatums - 1) + { + int32 cmpval; + + cmpval = partition_rbound_datum_cmp(partsupfunc, + partcollation, + boundinfo->datums[off + 1], + boundinfo->kind[off + 1], + values, nvalues); + if (cmpval != 0) + break; + off++; + } + + Assert(0 == + partition_rbound_datum_cmp(partsupfunc, + partcollation, + boundinfo->datums[off], + boundinfo->kind[off], + values, nvalues)); + + /* + * off + 1, then would be the offset of the greatest bound + * to be included in the result. + */ + maxoff = off + 1; + } + + /* + * Skip if minoff/maxoff are actually the upper bound of a + * un-assigned portion of values. + */ + if (partindices[minoff] < 0 && minoff < boundinfo->ndatums) + minoff++; + if (partindices[maxoff] < 0 && maxoff >= 1) + maxoff--; + + /* + * There may exist a range of values unassigned to any + * non-default partition between the datums at minoff and + * maxoff. Add the default partition in that case. + */ + if (partition_bound_has_default(boundinfo)) + { + for (i = minoff; i <= maxoff; i++) + { + if (partindices[i] < 0) + { + result->scan_default = true; + break; + } + } + } + + Assert(minoff >= 0 && maxoff >= 0); + result->bound_offsets = bms_add_range(NULL, minoff, maxoff); + } + else if (off >= 0) /* !is_equal */ + { + /* + * The lookup value falls in the range between some bounds in + * boundinfo. 'off' would be the offset of the greatest bound + * that is <= lookup value, so add off + 1 to the result + * instead as the offset of the upper bound of the only + * partition that may contain the lookup value. + */ + if (partindices[off + 1] >= 0) + result->bound_offsets = bms_make_singleton(off + 1); + else + result->scan_default = + partition_bound_has_default(boundinfo); + } + else + { + /* + * off < 0: the lookup value is smaller than all bounds, so + * only the default partition qualifies, if there is one. + */ + result->scan_default = partition_bound_has_default(boundinfo); + } + + return result; + + case BTGreaterEqualStrategyNumber: + inclusive = true; + /* fall through */ + case BTGreaterStrategyNumber: + + /* + * Look for the smallest bound that is > or >= lookup value and + * set minoff to its offset. + */ + off = partition_range_datum_bsearch(partsupfunc, + partcollation, + boundinfo, + nvalues, values, + &is_equal); + if (off < 0) + { + /* + * All bounds are greater than the lookup value, so include + * all of them in the result. + */ + minoff = 0; + } + else + { + if (is_equal && nvalues < partnatts) + { + /* + * Since the lookup value contains only a prefix of keys, + * we must find other bounds that may also match the + * prefix. partition_range_datum_bsearch() returns the + * offset of one of them, find others by checking adjacent + * bounds. + * + * Based on whether the lookup values are inclusive or + * not, we must either include the indexes of all such + * bounds in the result (that is, set minoff to the index + * of smallest such bound) or find the smallest one that's + * greater than the lookup values and set minoff to that. + */ + while (off >= 1 && off < boundinfo->ndatums - 1) + { + int32 cmpval; + int nextoff; + + nextoff = inclusive ? off - 1 : off + 1; + cmpval = + partition_rbound_datum_cmp(partsupfunc, + partcollation, + boundinfo->datums[nextoff], + boundinfo->kind[nextoff], + values, nvalues); + if (cmpval != 0) + break; + + off = nextoff; + } + + Assert(0 == + partition_rbound_datum_cmp(partsupfunc, + partcollation, + boundinfo->datums[off], + boundinfo->kind[off], + values, nvalues)); + + minoff = inclusive ? off : off + 1; + } + + /* + * lookup value falls in the range between some bounds in + * boundinfo. off would be the offset of the greatest bound + * that is <= lookup value, so add off + 1 to the result + * instead as the offset of the upper bound of the smallest + * partition that may contain the lookup value. + */ + else + minoff = off + 1; + } + break; + + case BTLessEqualStrategyNumber: + inclusive = true; + /* fall through */ + case BTLessStrategyNumber: + + /* + * Look for the greatest bound that is < or <= lookup value and + * set minoff to its offset. + */ + off = partition_range_datum_bsearch(partsupfunc, + partcollation, + boundinfo, + nvalues, values, + &is_equal); + if (off < 0) + { + /* + * All bounds are greater than the key, so we could only + * expect to find the lookup key in the default partition. + */ + result->scan_default = partition_bound_has_default(boundinfo); + return result; + } + else + { + /* + * See the comment above. + */ + if (is_equal && nvalues < partnatts) + { + while (off >= 1 && off < boundinfo->ndatums - 1) + { + int32 cmpval; + int nextoff; + + nextoff = inclusive ? off + 1 : off - 1; + cmpval = partition_rbound_datum_cmp(partsupfunc, + partcollation, + boundinfo->datums[nextoff], + boundinfo->kind[nextoff], + values, nvalues); + if (cmpval != 0) + break; + + off = nextoff; + } + + Assert(0 == + partition_rbound_datum_cmp(partsupfunc, + partcollation, + boundinfo->datums[off], + boundinfo->kind[off], + values, nvalues)); + + maxoff = inclusive ? off + 1 : off; + } + + /* + * The lookup value falls in the range between some bounds in + * boundinfo. 'off' would be the offset of the greatest bound + * that is <= lookup value, so add off + 1 to the result + * instead as the offset of the upper bound of the greatest + * partition that may contain lookup value. If the lookup + * value had exactly matched the bound, but it isn't + * inclusive, no need add the adjacent partition. + */ + else if (!is_equal || inclusive) + maxoff = off + 1; + else + maxoff = off; + } + break; + + default: + elog(ERROR, "invalid strategy number %d", opstrategy); + break; + } + + /* + * Skip a gap and when doing so, check if the bound contains a finite + * value to decide if we need to add the default partition. If it's an + * infinite bound, we need not add the default partition, as having an + * infinite bound means the partition in question catches any values that + * would otherwise be in the default partition. + */ + if (partindices[minoff] < 0) + { + int lastkey = nvalues - 1; + + if (minoff >= 0 && + minoff < boundinfo->ndatums && + boundinfo->kind[minoff][lastkey] == + PARTITION_RANGE_DATUM_VALUE) + result->scan_default = partition_bound_has_default(boundinfo); + + minoff++; + } + + /* + * Skip a gap. See the above comment about how we decide whether or not + * to scan the default partition based whether the datum that will become + * the maximum datum is finite or not. + */ + if (maxoff >= 1 && partindices[maxoff] < 0) + { + int lastkey = nvalues - 1; + + if (maxoff >= 0 && + maxoff <= boundinfo->ndatums && + boundinfo->kind[maxoff - 1][lastkey] == + PARTITION_RANGE_DATUM_VALUE) + result->scan_default = partition_bound_has_default(boundinfo); + + maxoff--; + } + + if (partition_bound_has_default(boundinfo)) + { + /* + * There may exist a range of values unassigned to any non-default + * partition between the datums at minoff and maxoff. Add the default + * partition in that case. + */ + for (i = minoff; i <= maxoff; i++) + { + if (partindices[i] < 0) + { + result->scan_default = true; + break; + } + } + } + + Assert(minoff >= 0 && maxoff >= 0); + if (minoff <= maxoff) + result->bound_offsets = bms_add_range(NULL, minoff, maxoff); + + return result; +} + +/* + * pull_exec_paramids + * Returns a Bitmapset containing the paramids of all Params with + * paramkind = PARAM_EXEC in 'expr'. + */ +static Bitmapset * +pull_exec_paramids(Expr *expr) +{ + Bitmapset *result = NULL; + + (void) pull_exec_paramids_walker((Node *) expr, &result); + + return result; +} + +static bool +pull_exec_paramids_walker(Node *node, Bitmapset **context) +{ + if (node == NULL) + return false; + if (IsA(node, Param)) + { + Param *param = (Param *) node; + + if (param->paramkind == PARAM_EXEC) + *context = bms_add_member(*context, param->paramid); + return false; + } + return expression_tree_walker(node, pull_exec_paramids_walker, + (void *) context); +} + +/* + * analyze_partkey_exprs + * Loop through all pruning steps and identify which ones require + * executor startup-time or executor run-time pruning. + * + * Returns true if any executor partition pruning should be attempted at this + * level. Also fills fields of *pinfo to record how to process each step. + */ +static bool +analyze_partkey_exprs(PartitionedRelPruneInfo *pinfo, List *steps, + int partnatts) +{ + bool doruntimeprune = false; + ListCell *lc; + + /* + * Steps require run-time pruning if they contain EXEC_PARAM Params. + * Otherwise, if their expressions aren't simple Consts, they require + * startup-time pruning. + */ + pinfo->nexprs = list_length(steps) * partnatts; + pinfo->hasexecparam = (bool *) palloc0(sizeof(bool) * pinfo->nexprs); + pinfo->do_initial_prune = false; + pinfo->do_exec_prune = false; + pinfo->execparamids = NULL; + + foreach(lc, steps) + { + PartitionPruneStepOp *step = (PartitionPruneStepOp *) lfirst(lc); + ListCell *lc2; + int keyno; + + if (!IsA(step, PartitionPruneStepOp)) + continue; + + keyno = 0; + foreach(lc2, step->exprs) + { + Expr *expr = lfirst(lc2); + + if (!IsA(expr, Const)) + { + Bitmapset *execparamids = pull_exec_paramids(expr); + bool hasexecparams; + int stateidx = PruneCxtStateIdx(partnatts, + step->step.step_id, + keyno); + + Assert(stateidx < pinfo->nexprs); + hasexecparams = !bms_is_empty(execparamids); + pinfo->hasexecparam[stateidx] = hasexecparams; + pinfo->execparamids = bms_join(pinfo->execparamids, + execparamids); + + if (hasexecparams) + pinfo->do_exec_prune = true; + else + pinfo->do_initial_prune = true; + + doruntimeprune = true; + } + keyno++; + } + } + + return doruntimeprune; +} + +/* + * perform_pruning_base_step + * Determines the indexes of datums that satisfy conditions specified in + * 'opstep'. + * + * Result also contains whether special null-accepting and/or default + * partition need to be scanned. + */ +static PruneStepResult * +perform_pruning_base_step(PartitionPruneContext *context, + PartitionPruneStepOp *opstep) +{ + ListCell *lc1, + *lc2; + int keyno, + nvalues; + Datum values[PARTITION_MAX_KEYS]; + FmgrInfo *partsupfunc; + int stateidx; + + /* + * There better be the same number of expressions and compare functions. + */ + Assert(list_length(opstep->exprs) == list_length(opstep->cmpfns)); + + nvalues = 0; + lc1 = list_head(opstep->exprs); + lc2 = list_head(opstep->cmpfns); + + /* + * Generate the partition lookup key that will be used by one of the + * get_matching_*_bounds functions called below. + */ + for (keyno = 0; keyno < context->partnatts; keyno++) + { + /* + * For hash partitioning, it is possible that values of some keys are + * not provided in operator clauses, but instead the planner found + * that they appeared in a IS NULL clause. + */ + if (bms_is_member(keyno, opstep->nullkeys)) + continue; + + /* + * For range partitioning, we must only perform pruning with values + * for either all partition keys or a prefix thereof. + */ + if (keyno > nvalues && context->strategy == PARTITION_STRATEGY_RANGE) + break; + + if (lc1 != NULL) + { + Expr *expr; + Datum datum; + bool isnull; + + expr = lfirst(lc1); + stateidx = PruneCxtStateIdx(context->partnatts, + opstep->step.step_id, keyno); + if (partkey_datum_from_expr(context, expr, stateidx, + &datum, &isnull)) + { + Oid cmpfn; + + /* + * Since we only allow strict operators in pruning steps, any + * null-valued comparison value must cause the comparison to + * fail, so that no partitions could match. + */ + if (isnull) + { + PruneStepResult *result; + + result = (PruneStepResult *) palloc(sizeof(PruneStepResult)); + result->bound_offsets = NULL; + result->scan_default = false; + result->scan_null = false; + + return result; + } + + /* Set up the stepcmpfuncs entry, unless we already did */ + cmpfn = lfirst_oid(lc2); + Assert(OidIsValid(cmpfn)); + if (cmpfn != context->stepcmpfuncs[stateidx].fn_oid) + { + /* + * If the needed support function is the same one cached + * in the relation's partition key, copy the cached + * FmgrInfo. Otherwise (i.e., when we have a cross-type + * comparison), an actual lookup is required. + */ + if (cmpfn == context->partsupfunc[keyno].fn_oid) + fmgr_info_copy(&context->stepcmpfuncs[stateidx], + &context->partsupfunc[keyno], + context->ppccontext); + else + fmgr_info_cxt(cmpfn, &context->stepcmpfuncs[stateidx], + context->ppccontext); + } + + values[keyno] = datum; + nvalues++; + } + + lc1 = lnext(lc1); + lc2 = lnext(lc2); + } + } + + /* + * Point partsupfunc to the entry for the 0th key of this step; the + * additional support functions, if any, follow consecutively. + */ + stateidx = PruneCxtStateIdx(context->partnatts, opstep->step.step_id, 0); + partsupfunc = &context->stepcmpfuncs[stateidx]; + + switch (context->strategy) + { + case PARTITION_STRATEGY_HASH: + return get_matching_hash_bounds(context, + opstep->opstrategy, + values, nvalues, + partsupfunc, + opstep->nullkeys); + + case PARTITION_STRATEGY_LIST: + return get_matching_list_bounds(context, + opstep->opstrategy, + values[0], nvalues, + &partsupfunc[0], + opstep->nullkeys); + + case PARTITION_STRATEGY_RANGE: + return get_matching_range_bounds(context, + opstep->opstrategy, + values, nvalues, + partsupfunc, + opstep->nullkeys); + + default: + elog(ERROR, "unexpected partition strategy: %d", + (int) context->strategy); + break; + } + + return NULL; +} + +/* + * perform_pruning_combine_step + * Determines the indexes of datums obtained by combining those given + * by the steps identified by cstep->source_stepids using the specified + * combination method + * + * Since cstep may refer to the result of earlier steps, we also receive + * step_results here. + */ +static PruneStepResult * +perform_pruning_combine_step(PartitionPruneContext *context, + PartitionPruneStepCombine *cstep, + PruneStepResult **step_results) +{ + ListCell *lc1; + PruneStepResult *result = NULL; + bool firststep; + + /* + * A combine step without any source steps is an indication to not perform + * any partition pruning, we just return all partitions. + */ + result = (PruneStepResult *) palloc0(sizeof(PruneStepResult)); + if (list_length(cstep->source_stepids) == 0) + { + PartitionBoundInfo boundinfo = context->boundinfo; + + result->bound_offsets = bms_add_range(NULL, 0, boundinfo->ndatums - 1); + result->scan_default = partition_bound_has_default(boundinfo); + result->scan_null = partition_bound_accepts_nulls(boundinfo); + return result; + } + + switch (cstep->combineOp) + { + case PARTPRUNE_COMBINE_UNION: + foreach(lc1, cstep->source_stepids) + { + int step_id = lfirst_int(lc1); + PruneStepResult *step_result; + + /* + * step_results[step_id] must contain a valid result, which is + * confirmed by the fact that cstep's step_id is greater than + * step_id and the fact that results of the individual steps + * are evaluated in sequence of their step_ids. + */ + if (step_id >= cstep->step.step_id) + elog(ERROR, "invalid pruning combine step argument"); + step_result = step_results[step_id]; + Assert(step_result != NULL); + + /* Record any additional datum indexes from this step */ + result->bound_offsets = bms_add_members(result->bound_offsets, + step_result->bound_offsets); + + /* Update whether to scan null and default partitions. */ + if (!result->scan_null) + result->scan_null = step_result->scan_null; + if (!result->scan_default) + result->scan_default = step_result->scan_default; + } + break; + + case PARTPRUNE_COMBINE_INTERSECT: + firststep = true; + foreach(lc1, cstep->source_stepids) + { + int step_id = lfirst_int(lc1); + PruneStepResult *step_result; + + if (step_id >= cstep->step.step_id) + elog(ERROR, "invalid pruning combine step argument"); + step_result = step_results[step_id]; + Assert(step_result != NULL); + + if (firststep) + { + /* Copy step's result the first time. */ + result->bound_offsets = + bms_copy(step_result->bound_offsets); + result->scan_null = step_result->scan_null; + result->scan_default = step_result->scan_default; + firststep = false; + } + else + { + /* Record datum indexes common to both steps */ + result->bound_offsets = + bms_int_members(result->bound_offsets, + step_result->bound_offsets); + + /* Update whether to scan null and default partitions. */ + if (result->scan_null) + result->scan_null = step_result->scan_null; + if (result->scan_default) + result->scan_default = step_result->scan_default; + } + } + break; + } + + return result; +} + +/* + * match_boolean_partition_clause + * + * Sets *outconst to a Const containing true or false value and returns true if + * we're able to match the clause to the partition key as specially-shaped + * Boolean clause. Returns false otherwise with *outconst set to NULL. + */ +static bool +match_boolean_partition_clause(Oid partopfamily, Expr *clause, Expr *partkey, + Expr **outconst) +{ + Expr *leftop; + + *outconst = NULL; + + if (!IsBooleanOpfamily(partopfamily)) + return false; + + if (IsA(clause, BooleanTest)) + { + BooleanTest *btest = (BooleanTest *) clause; + + /* Only IS [NOT] TRUE/FALSE are any good to us */ + if (btest->booltesttype == IS_UNKNOWN || + btest->booltesttype == IS_NOT_UNKNOWN) + return false; + + leftop = btest->arg; + if (IsA(leftop, RelabelType)) + leftop = ((RelabelType *) leftop)->arg; + + if (equal(leftop, partkey)) + *outconst = (btest->booltesttype == IS_TRUE || + btest->booltesttype == IS_NOT_FALSE) + ? (Expr *) makeBoolConst(true, false) + : (Expr *) makeBoolConst(false, false); + + if (*outconst) + return true; + } + else + { + bool is_not_clause = not_clause((Node *) clause); + + leftop = is_not_clause ? get_notclausearg(clause) : clause; + + if (IsA(leftop, RelabelType)) + leftop = ((RelabelType *) leftop)->arg; + + /* Compare to the partition key, and make up a clause ... */ + if (equal(leftop, partkey)) + *outconst = is_not_clause ? + (Expr *) makeBoolConst(false, false) : + (Expr *) makeBoolConst(true, false); + else if (equal(negate_clause((Node *) leftop), partkey)) + *outconst = (Expr *) makeBoolConst(false, false); + + if (*outconst) + return true; + } + + return false; +} + +/* + * partkey_datum_from_expr + * Evaluate expression for potential partition pruning + * + * Evaluate 'expr', whose ExprState is stateidx of the context exprstate + * array; set *value and *isnull to the resulting Datum and nullflag. + * Return true if evaluation was possible, otherwise false. + * + * Note that the evaluated result may be in the per-tuple memory context of + * context->planstate->ps_ExprContext, and we may have leaked other memory + * there too. This memory must be recovered by resetting that ExprContext + * after we're done with the pruning operation (see execPartition.c). + */ +static bool +partkey_datum_from_expr(PartitionPruneContext *context, + Expr *expr, int stateidx, + Datum *value, bool *isnull) +{ + if (IsA(expr, Const)) + { + /* We can always determine the value of a constant */ + Const *con = (Const *) expr; + + *value = con->constvalue; + *isnull = con->constisnull; + return true; + } + else + { + /* + * When called from the executor we'll have a valid planstate so we + * may be able to evaluate an expression which could not be folded to + * a Const during planning. Since run-time pruning can occur both + * during initialization of the executor or while it's running, we + * must be careful here to evaluate expressions containing PARAM_EXEC + * Params only when told it's OK. + */ + if (context->planstate && + (context->evalexecparams || + !context->exprhasexecparam[stateidx])) + { + ExprState *exprstate; + ExprContext *ectx; + + exprstate = context->exprstates[stateidx]; + ectx = context->planstate->ps_ExprContext; + *value = ExecEvalExprSwitchContext(exprstate, ectx, isnull); + return true; + } + } + + return false; +} diff --git a/src/backend/po/de.po b/src/backend/po/de.po index 09622423fb..ca52df6731 100644 --- a/src/backend/po/de.po +++ b/src/backend/po/de.po @@ -1,14 +1,14 @@ # German message translation file for PostgreSQL server -# Peter Eisentraut , 2001 - 2017. +# Peter Eisentraut , 2001 - 2018. # # Use these quotes: »%s« # msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 10\n" +"Project-Id-Version: PostgreSQL 11\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-03-13 20:08+0000\n" -"PO-Revision-Date: 2017-03-13 17:04-0400\n" +"POT-Creation-Date: 2018-05-21 12:39+0000\n" +"PO-Revision-Date: 2018-05-21 11:21-0400\n" "Last-Translator: Peter Eisentraut \n" "Language-Team: German \n" "Language: de\n" @@ -17,48 +17,60 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -#: ../common/config_info.c:131 ../common/config_info.c:139 -#: ../common/config_info.c:147 ../common/config_info.c:155 -#: ../common/config_info.c:163 ../common/config_info.c:171 -#: ../common/config_info.c:179 ../common/config_info.c:187 -#: ../common/config_info.c:195 +#: ../common/config_info.c:130 ../common/config_info.c:138 +#: ../common/config_info.c:146 ../common/config_info.c:154 +#: ../common/config_info.c:162 ../common/config_info.c:170 +#: ../common/config_info.c:178 ../common/config_info.c:186 +#: ../common/config_info.c:194 msgid "not recorded" msgstr "nicht aufgezeichnet" -#: ../common/controldata_utils.c:57 commands/copy.c:3028 -#: commands/extension.c:3323 utils/adt/genfile.c:134 +#: ../common/controldata_utils.c:58 commands/copy.c:3147 +#: commands/extension.c:3330 utils/adt/genfile.c:151 #, c-format msgid "could not open file \"%s\" for reading: %m" msgstr "konnte Datei »%s« nicht zum Lesen öffnen: %m" -#: ../common/controldata_utils.c:61 +#: ../common/controldata_utils.c:62 #, c-format msgid "%s: could not open file \"%s\" for reading: %s\n" msgstr "%s: konnte Datei »%s« nicht zum Lesen öffnen: %s\n" -#: ../common/controldata_utils.c:71 access/transam/timeline.c:345 -#: access/transam/xlog.c:3368 access/transam/xlog.c:10701 -#: access/transam/xlog.c:10714 access/transam/xlog.c:11106 -#: access/transam/xlog.c:11149 access/transam/xlog.c:11188 -#: access/transam/xlog.c:11231 access/transam/xlogfuncs.c:664 -#: access/transam/xlogfuncs.c:683 commands/extension.c:3333 libpq/hba.c:496 -#: replication/logical/origin.c:658 replication/logical/origin.c:688 -#: replication/logical/reorderbuffer.c:3055 replication/walsender.c:469 -#: storage/file/copydir.c:176 utils/adt/genfile.c:151 utils/adt/misc.c:924 +#: ../common/controldata_utils.c:75 access/transam/timeline.c:347 +#: access/transam/xlog.c:3407 access/transam/xlog.c:10857 +#: access/transam/xlog.c:10870 access/transam/xlog.c:11295 +#: access/transam/xlog.c:11375 access/transam/xlog.c:11414 +#: access/transam/xlog.c:11457 access/transam/xlogfuncs.c:658 +#: access/transam/xlogfuncs.c:677 commands/extension.c:3340 libpq/hba.c:499 +#: replication/logical/origin.c:701 replication/logical/origin.c:731 +#: replication/logical/reorderbuffer.c:3101 replication/walsender.c:507 +#: storage/file/copydir.c:195 utils/adt/genfile.c:168 utils/adt/misc.c:944 #, c-format msgid "could not read file \"%s\": %m" msgstr "konnte Datei »%s« nicht lesen: %m" -#: ../common/controldata_utils.c:74 +#: ../common/controldata_utils.c:78 #, c-format msgid "%s: could not read file \"%s\": %s\n" msgstr "%s: konnte Datei »%s« nicht lesen: %s\n" -#: ../common/controldata_utils.c:95 +#: ../common/controldata_utils.c:86 +#, fuzzy, c-format +#| msgid "could not read file \"%s\": read %d of %zu" +msgid "could not read file \"%s\": read %d bytes, expected %d" +msgstr "konnte Datei »%s« nicht lesen: %d von %zu gelesen" + +#: ../common/controldata_utils.c:90 +#, fuzzy, c-format +#| msgid "could not read file \"%s\": read %d of %zu" +msgid "%s: could not read file \"%s\": read %d bytes, expected %d\n" +msgstr "konnte Datei »%s« nicht lesen: %d von %zu gelesen" + +#: ../common/controldata_utils.c:112 msgid "byte ordering mismatch" msgstr "falsche Byte-Reihenfolge" -#: ../common/controldata_utils.c:97 +#: ../common/controldata_utils.c:114 #, c-format msgid "" "WARNING: possible byte ordering mismatch\n" @@ -121,33 +133,33 @@ msgstr "Speicher aufgebraucht\n" msgid "cannot duplicate null pointer (internal error)\n" msgstr "kann NULL-Zeiger nicht kopieren (interner Fehler)\n" -#: ../common/file_utils.c:82 ../common/file_utils.c:167 +#: ../common/file_utils.c:82 ../common/file_utils.c:186 #, c-format msgid "%s: could not stat file \"%s\": %s\n" msgstr "%s: konnte »stat« für Datei »%s« nicht ausführen: %s\n" -#: ../common/file_utils.c:143 +#: ../common/file_utils.c:162 #, c-format msgid "%s: could not open directory \"%s\": %s\n" msgstr "%s: konnte Verzeichnis »%s« nicht öffnen: %s\n" -#: ../common/file_utils.c:179 +#: ../common/file_utils.c:198 #, c-format msgid "%s: could not read directory \"%s\": %s\n" msgstr "%s: konnte Verzeichnis »%s« nicht lesen: %s\n" -#: ../common/file_utils.c:212 ../common/file_utils.c:272 -#: ../common/file_utils.c:348 +#: ../common/file_utils.c:231 ../common/file_utils.c:291 +#: ../common/file_utils.c:367 #, c-format msgid "%s: could not open file \"%s\": %s\n" msgstr "%s: konnte Datei »%s« nicht öffnen: %s\n" -#: ../common/file_utils.c:285 ../common/file_utils.c:357 +#: ../common/file_utils.c:304 ../common/file_utils.c:376 #, c-format msgid "%s: could not fsync file \"%s\": %s\n" msgstr "%s: konnte Datei »%s« nicht fsyncen: %s\n" -#: ../common/file_utils.c:368 +#: ../common/file_utils.c:387 #, c-format msgid "%s: could not rename file \"%s\" to \"%s\": %s\n" msgstr "%s: konnte Datei »%s« nicht in »%s« umbenennen: %s\n" @@ -168,42 +180,42 @@ msgid "could not close directory \"%s\": %s\n" msgstr "konnte Verzeichnis »%s« nicht schließen: %s\n" #: ../common/psprintf.c:179 ../port/path.c:630 ../port/path.c:668 -#: ../port/path.c:685 access/transam/twophase.c:1261 -#: access/transam/xlog.c:6317 lib/stringinfo.c:300 libpq/auth.c:1011 -#: libpq/auth.c:1376 libpq/auth.c:1444 libpq/auth.c:1960 -#: postmaster/bgworker.c:318 postmaster/bgworker.c:874 -#: postmaster/postmaster.c:2363 postmaster/postmaster.c:2385 -#: postmaster/postmaster.c:3935 postmaster/postmaster.c:4635 -#: postmaster/postmaster.c:4710 postmaster/postmaster.c:5379 -#: postmaster/postmaster.c:5660 -#: replication/libpqwalreceiver/libpqwalreceiver.c:246 -#: replication/logical/logical.c:168 storage/buffer/localbuf.c:436 -#: storage/file/fd.c:736 storage/file/fd.c:1164 storage/file/fd.c:1282 -#: storage/file/fd.c:1993 storage/ipc/procarray.c:1054 -#: storage/ipc/procarray.c:1540 storage/ipc/procarray.c:1547 -#: storage/ipc/procarray.c:1961 storage/ipc/procarray.c:2564 -#: utils/adt/formatting.c:1509 utils/adt/formatting.c:1629 -#: utils/adt/formatting.c:1750 utils/adt/pg_locale.c:462 -#: utils/adt/pg_locale.c:646 utils/adt/regexp.c:219 utils/adt/varlena.c:4427 -#: utils/adt/varlena.c:4448 utils/fmgr/dfmgr.c:216 utils/hash/dynahash.c:429 -#: utils/hash/dynahash.c:535 utils/hash/dynahash.c:1047 utils/mb/mbutils.c:376 -#: utils/mb/mbutils.c:709 utils/misc/guc.c:3954 utils/misc/guc.c:3970 -#: utils/misc/guc.c:3983 utils/misc/guc.c:6929 utils/misc/tzparser.c:468 -#: utils/mmgr/aset.c:404 utils/mmgr/dsa.c:713 utils/mmgr/dsa.c:795 -#: utils/mmgr/mcxt.c:725 utils/mmgr/mcxt.c:760 utils/mmgr/mcxt.c:797 -#: utils/mmgr/mcxt.c:834 utils/mmgr/mcxt.c:868 utils/mmgr/mcxt.c:897 -#: utils/mmgr/mcxt.c:931 utils/mmgr/mcxt.c:982 utils/mmgr/mcxt.c:1016 -#: utils/mmgr/mcxt.c:1050 +#: ../port/path.c:685 access/transam/twophase.c:1372 access/transam/xlog.c:6443 +#: lib/dshash.c:246 lib/stringinfo.c:277 libpq/auth.c:1150 libpq/auth.c:1516 +#: libpq/auth.c:1584 libpq/auth.c:2102 postmaster/bgworker.c:337 +#: postmaster/bgworker.c:914 postmaster/postmaster.c:2390 +#: postmaster/postmaster.c:2412 postmaster/postmaster.c:3979 +#: postmaster/postmaster.c:4687 postmaster/postmaster.c:4762 +#: postmaster/postmaster.c:5454 postmaster/postmaster.c:5791 +#: replication/libpqwalreceiver/libpqwalreceiver.c:260 +#: replication/logical/logical.c:174 storage/buffer/localbuf.c:436 +#: storage/file/fd.c:781 storage/file/fd.c:1219 storage/file/fd.c:1380 +#: storage/file/fd.c:2286 storage/ipc/procarray.c:1058 +#: storage/ipc/procarray.c:1546 storage/ipc/procarray.c:1553 +#: storage/ipc/procarray.c:1970 storage/ipc/procarray.c:2581 +#: utils/adt/cryptohashes.c:45 utils/adt/cryptohashes.c:65 +#: utils/adt/formatting.c:1568 utils/adt/formatting.c:1690 +#: utils/adt/formatting.c:1813 utils/adt/pg_locale.c:468 +#: utils/adt/pg_locale.c:652 utils/adt/regexp.c:219 utils/fmgr/dfmgr.c:221 +#: utils/hash/dynahash.c:448 utils/hash/dynahash.c:557 +#: utils/hash/dynahash.c:1069 utils/mb/mbutils.c:365 utils/mb/mbutils.c:698 +#: utils/misc/guc.c:4220 utils/misc/guc.c:4236 utils/misc/guc.c:4249 +#: utils/misc/guc.c:7224 utils/misc/tzparser.c:468 utils/mmgr/aset.c:482 +#: utils/mmgr/dsa.c:713 utils/mmgr/dsa.c:795 utils/mmgr/generation.c:249 +#: utils/mmgr/mcxt.c:796 utils/mmgr/mcxt.c:832 utils/mmgr/mcxt.c:870 +#: utils/mmgr/mcxt.c:908 utils/mmgr/mcxt.c:944 utils/mmgr/mcxt.c:975 +#: utils/mmgr/mcxt.c:1011 utils/mmgr/mcxt.c:1063 utils/mmgr/mcxt.c:1098 +#: utils/mmgr/mcxt.c:1133 utils/mmgr/slab.c:239 #, c-format msgid "out of memory" msgstr "Speicher aufgebraucht" -#: ../common/relpath.c:59 +#: ../common/relpath.c:58 #, c-format msgid "invalid fork name" msgstr "ungültiger Fork-Name" -#: ../common/relpath.c:60 +#: ../common/relpath.c:59 #, c-format msgid "Valid fork names are \"main\", \"fsm\", \"vm\", and \"init\"." msgstr "Gültige Fork-Namen sind »main«, »fsm«, »vm« und »init«." @@ -253,12 +265,17 @@ msgstr "konnte »stat« für Datei oder Verzeichnis »%s« nicht ausführen: %s\ msgid "could not remove file or directory \"%s\": %s\n" msgstr "konnte Datei oder Verzeichnis »%s« nicht entfernen: %s\n" +#: ../common/saslprep.c:1090 +#, c-format +msgid "password too long" +msgstr "Passwort zu lang" + #: ../common/username.c:43 #, c-format msgid "could not look up effective user ID %ld: %s" msgstr "konnte effektive Benutzer-ID %ld nicht nachschlagen: %s" -#: ../common/username.c:45 libpq/auth.c:1907 +#: ../common/username.c:45 libpq/auth.c:2049 msgid "user does not exist" msgstr "Benutzer existiert nicht" @@ -365,159 +382,194 @@ msgstr "konnte aktuelles Arbeitsverzeichnis nicht ermitteln: %s\n" msgid "unrecognized error %d" msgstr "unbekannter Fehler %d" -#: ../port/win32security.c:68 -#, c-format -msgid "could not open process token: error code %lu\n" -msgstr "konnte Prozess-Token nicht öffnen: Fehlercode %lu\n" - -#: ../port/win32security.c:89 +#: ../port/win32security.c:62 #, c-format msgid "could not get SID for Administrators group: error code %lu\n" msgstr "konnte SID der Administrators-Gruppe nicht ermitteln: Fehlercode %lu\n" -#: ../port/win32security.c:99 +#: ../port/win32security.c:72 #, c-format msgid "could not get SID for PowerUsers group: error code %lu\n" msgstr "konnte SID der PowerUsers-Gruppe nicht ermitteln: Fehlercode %lu\n" -#: access/brin/brin.c:820 +#: ../port/win32security.c:80 +#, c-format +msgid "could not check access token membership: error code %lu\n" +msgstr "konnte Access-Token-Mitgliedschaft nicht prüfen: Fehlercode %lu\n" + +#: access/brin/brin.c:200 +#, c-format +msgid "request for BRIN range summarization for index \"%s\" page %u was not recorded" +msgstr "" + +#: access/brin/brin.c:880 access/brin/brin.c:951 +#, c-format +msgid "block number out of range: %s" +msgstr "Blocknummer ist außerhalb des gültigen Bereichs: %s" + +#: access/brin/brin.c:903 access/brin/brin.c:974 #, c-format msgid "\"%s\" is not a BRIN index" msgstr "»%s« ist kein BRIN-Index" -#: access/brin/brin.c:836 +#: access/brin/brin.c:919 access/brin/brin.c:990 #, c-format msgid "could not open parent table of index %s" msgstr "konnte Basistabelle von Index %s nicht öffnen" -#: access/brin/brin_pageops.c:76 access/brin/brin_pageops.c:360 -#: access/brin/brin_pageops.c:826 +#: access/brin/brin_pageops.c:77 access/brin/brin_pageops.c:363 +#: access/brin/brin_pageops.c:844 access/gin/ginentrypage.c:110 +#: access/gist/gist.c:1376 access/nbtree/nbtinsert.c:678 +#: access/nbtree/nbtsort.c:839 access/spgist/spgdoinsert.c:1957 +#, c-format +msgid "index row size %zu exceeds maximum %zu for index \"%s\"" +msgstr "Größe %zu der Indexzeile überschreitet Maximum %zu für Index »%s«" + +#: access/brin/brin_revmap.c:382 access/brin/brin_revmap.c:388 +#, c-format +msgid "corrupted BRIN index: inconsistent range map" +msgstr "verfälschter BRIN-Index: inkonsistente Range-Map" + +#: access/brin/brin_revmap.c:404 #, c-format -msgid "index row size %lu exceeds maximum %lu for index \"%s\"" -msgstr "Größe %lu der Indexzeile überschreitet Maximum %lu für Index »%s«" +msgid "leftover placeholder tuple detected in BRIN index \"%s\", deleting" +msgstr "übrig gebliebenes Platzhaltertupel in BRIN-Index »%s« entdeckt, wird gelöscht" -#: access/brin/brin_revmap.c:459 +#: access/brin/brin_revmap.c:601 #, c-format msgid "unexpected page type 0x%04X in BRIN index \"%s\" block %u" msgstr "unerwarteter Seitentyp 0x%04X in BRIN-Index »%s« Block %u" -#: access/brin/brin_validate.c:116 +#: access/brin/brin_validate.c:116 access/gin/ginvalidate.c:149 +#: access/gist/gistvalidate.c:146 access/hash/hashvalidate.c:132 +#: access/nbtree/nbtvalidate.c:110 access/spgist/spgvalidate.c:165 #, c-format -msgid "brin operator family \"%s\" contains function %s with invalid support number %d" -msgstr "BRIN-Operatorfamilie »%s« enthält Funktion %s mit ungültiger Support-Nummer %d" +msgid "operator family \"%s\" of access method %s contains function %s with invalid support number %d" +msgstr "Operatorfamilie »%s« für Zugriffsmethode %s enthält Funktion %s mit ungültiger Support-Nummer %d" -#: access/brin/brin_validate.c:132 +#: access/brin/brin_validate.c:132 access/gin/ginvalidate.c:161 +#: access/gist/gistvalidate.c:158 access/hash/hashvalidate.c:115 +#: access/nbtree/nbtvalidate.c:122 access/spgist/spgvalidate.c:177 #, c-format -msgid "brin operator family \"%s\" contains function %s with wrong signature for support number %d" -msgstr "BRIN-Operatorfamilie »%s« enthält Funktion %s mit falscher Signatur für Support-Nummer %d" +msgid "operator family \"%s\" of access method %s contains function %s with wrong signature for support number %d" +msgstr "Operatorfamilie »%s« für Zugriffsmethode %s enthält Funktion %s mit falscher Signatur für Support-Nummer %d" -#: access/brin/brin_validate.c:154 +#: access/brin/brin_validate.c:154 access/gin/ginvalidate.c:180 +#: access/gist/gistvalidate.c:178 access/hash/hashvalidate.c:153 +#: access/nbtree/nbtvalidate.c:142 access/spgist/spgvalidate.c:196 #, c-format -msgid "brin operator family \"%s\" contains operator %s with invalid strategy number %d" -msgstr "BRIN-Operatorfamilie »%s« enthält Operator %s mit ungültiger Strategienummer %d" +msgid "operator family \"%s\" of access method %s contains operator %s with invalid strategy number %d" +msgstr "Operatorfamilie »%s« für Zugriffsmethode %s enthält Operator %s mit ungültiger Strategienummer %d" -#: access/brin/brin_validate.c:183 +#: access/brin/brin_validate.c:183 access/gin/ginvalidate.c:193 +#: access/hash/hashvalidate.c:166 access/nbtree/nbtvalidate.c:155 +#: access/spgist/spgvalidate.c:209 #, c-format -msgid "brin operator family \"%s\" contains invalid ORDER BY specification for operator %s" -msgstr "BRIN-Operatorfamilie »%s« enthält ungültige ORDER-BY-Angabe für Operator %s" +msgid "operator family \"%s\" of access method %s contains invalid ORDER BY specification for operator %s" +msgstr "Operatorfamilie »%s« für Zugriffsmethode %s enthält ungültige ORDER-BY-Angabe für Operator %s" -#: access/brin/brin_validate.c:196 +#: access/brin/brin_validate.c:196 access/gin/ginvalidate.c:206 +#: access/gist/gistvalidate.c:226 access/hash/hashvalidate.c:179 +#: access/nbtree/nbtvalidate.c:168 access/spgist/spgvalidate.c:222 #, c-format -msgid "brin operator family \"%s\" contains operator %s with wrong signature" -msgstr "BRIN-Operatorfamilie »%s« enthält Operator %s mit falscher Signatur" +msgid "operator family \"%s\" of access method %s contains operator %s with wrong signature" +msgstr "Operatorfamilie »%s« für Zugriffsmethode %s enthält Operator %s mit falscher Signatur" -#: access/brin/brin_validate.c:234 +#: access/brin/brin_validate.c:234 access/hash/hashvalidate.c:219 +#: access/nbtree/nbtvalidate.c:226 access/spgist/spgvalidate.c:249 #, c-format -msgid "brin operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "in BRIN-Operatorfamilie »%s« fehlen Operatoren für Typen %s und %s" +msgid "operator family \"%s\" of access method %s is missing operator(s) for types %s and %s" +msgstr "in Operatorfamilie »%s« für Zugriffsmethode %s fehlen Operatoren für Typen %s und %s" #: access/brin/brin_validate.c:244 #, c-format -msgid "brin operator family \"%s\" is missing support function(s) for types %s and %s" -msgstr "in BRIN-Operatorfamilie »%s« fehlen Support-Funktionen für Typen %s und %s" +msgid "operator family \"%s\" of access method %s is missing support function(s) for types %s and %s" +msgstr "in Operatorfamilie »%s« für Zugriffsmethode %s fehlen Support-Funktionen für Typen %s und %s" -#: access/brin/brin_validate.c:257 +#: access/brin/brin_validate.c:257 access/hash/hashvalidate.c:233 +#: access/nbtree/nbtvalidate.c:250 access/spgist/spgvalidate.c:282 #, c-format -msgid "brin operator class \"%s\" is missing operator(s)" -msgstr "in BRIN-Operatorklasse »%s« fehlen Operatoren" +msgid "operator class \"%s\" of access method %s is missing operator(s)" +msgstr "in Operatorklasse »%s« für Zugriffsmethode %s fehlen Operatoren" -#: access/brin/brin_validate.c:268 +#: access/brin/brin_validate.c:268 access/gin/ginvalidate.c:247 +#: access/gist/gistvalidate.c:266 #, c-format -msgid "brin operator class \"%s\" is missing support function %d" -msgstr "in BRIN-Operatorklasse »%s« fehlt Support-Funktion %d" +msgid "operator class \"%s\" of access method %s is missing support function %d" +msgstr "in Operatorklasse »%s« für Zugriffsmethode %s fehlt Support-Funktion %d" -#: access/common/heaptuple.c:708 access/common/heaptuple.c:1407 +#: access/common/heaptuple.c:1090 access/common/heaptuple.c:1806 #, c-format msgid "number of columns (%d) exceeds limit (%d)" msgstr "Anzahl der Spalten (%d) überschreitet Maximum (%d)" -#: access/common/indextuple.c:60 +#: access/common/indextuple.c:63 #, c-format msgid "number of index columns (%d) exceeds limit (%d)" msgstr "Anzahl der Indexspalten (%d) überschreitet Maximum (%d)" -#: access/common/indextuple.c:176 access/spgist/spgutils.c:647 +#: access/common/indextuple.c:179 access/spgist/spgutils.c:685 #, c-format msgid "index row requires %zu bytes, maximum size is %zu" msgstr "Indexzeile benötigt %zu Bytes, Maximalgröße ist %zu" -#: access/common/printtup.c:290 tcop/fastpath.c:182 tcop/fastpath.c:544 -#: tcop/postgres.c:1727 +#: access/common/printtup.c:365 tcop/fastpath.c:180 tcop/fastpath.c:530 +#: tcop/postgres.c:1755 #, c-format msgid "unsupported format code: %d" msgstr "nicht unterstützter Formatcode: %d" -#: access/common/reloptions.c:531 +#: access/common/reloptions.c:568 #, c-format msgid "user-defined relation parameter types limit exceeded" msgstr "Wertebereich des Typs für benutzerdefinierte Relationsparameter überschritten" -#: access/common/reloptions.c:812 +#: access/common/reloptions.c:849 #, c-format msgid "RESET must not include values for parameters" msgstr "RESET darf keinen Parameterwert enthalten" -#: access/common/reloptions.c:845 +#: access/common/reloptions.c:881 #, c-format msgid "unrecognized parameter namespace \"%s\"" msgstr "unbekannter Parameter-Namensraum »%s«" -#: access/common/reloptions.c:1087 parser/parse_clause.c:259 +#: access/common/reloptions.c:1121 parser/parse_clause.c:277 #, c-format msgid "unrecognized parameter \"%s\"" msgstr "unbekannter Parameter »%s«" -#: access/common/reloptions.c:1117 +#: access/common/reloptions.c:1151 #, c-format msgid "parameter \"%s\" specified more than once" msgstr "Parameter »%s« mehrmals angegeben" -#: access/common/reloptions.c:1133 +#: access/common/reloptions.c:1167 #, c-format msgid "invalid value for boolean option \"%s\": %s" msgstr "ungültiger Wert für Boole’sche Option »%s«: »%s«" -#: access/common/reloptions.c:1145 +#: access/common/reloptions.c:1179 #, c-format msgid "invalid value for integer option \"%s\": %s" msgstr "ungültiger Wert für ganzzahlige Option »%s«: »%s«" -#: access/common/reloptions.c:1151 access/common/reloptions.c:1171 +#: access/common/reloptions.c:1185 access/common/reloptions.c:1205 #, c-format msgid "value %s out of bounds for option \"%s\"" msgstr "Wert %s ist außerhalb des gültigen Bereichs für Option »%s«" -#: access/common/reloptions.c:1153 +#: access/common/reloptions.c:1187 #, c-format msgid "Valid values are between \"%d\" and \"%d\"." msgstr "Gültige Werte sind zwischen »%d« und »%d«." -#: access/common/reloptions.c:1165 +#: access/common/reloptions.c:1199 #, c-format msgid "invalid value for floating point option \"%s\": %s" msgstr "ungültiger Wert für Gleitkommaoption »%s«: »%s«" -#: access/common/reloptions.c:1173 +#: access/common/reloptions.c:1207 #, c-format msgid "Valid values are between \"%f\" and \"%f\"." msgstr "Gültige Werte sind zwischen »%f« und »%f«." @@ -532,18 +584,18 @@ msgstr "Zurückgegebener Typ %1$s stimmt in Spalte %3$d nicht mit erwartetem Typ msgid "Number of returned columns (%d) does not match expected column count (%d)." msgstr "Anzahl der zurückgegebenen Spalten (%d) entspricht nicht der erwarteten Spaltenanzahl (%d)." -#: access/common/tupconvert.c:316 +#: access/common/tupconvert.c:329 #, c-format msgid "Attribute \"%s\" of type %s does not match corresponding attribute of type %s." msgstr "Attribut »%s« von Typ %s stimmt nicht mit dem entsprechenden Attribut von Typ %s überein." -#: access/common/tupconvert.c:328 +#: access/common/tupconvert.c:341 #, c-format msgid "Attribute \"%s\" of type %s does not exist in type %s." msgstr "Attribut »%s« von Typ %s existiert nicht in Typ %s." -#: access/common/tupdesc.c:722 parser/parse_clause.c:791 -#: parser/parse_relation.c:1517 +#: access/common/tupdesc.c:834 parser/parse_clause.c:819 +#: parser/parse_relation.c:1539 #, c-format msgid "column \"%s\" cannot be declared SETOF" msgstr "Spalte »%s« kann nicht als SETOF deklariert werden" @@ -558,109 +610,73 @@ msgstr "Posting-Liste ist zu lang" msgid "Reduce maintenance_work_mem." msgstr "Reduzieren Sie maintenance_work_mem." -#: access/gin/ginentrypage.c:110 access/gist/gist.c:1363 -#: access/nbtree/nbtinsert.c:577 access/nbtree/nbtsort.c:488 -#: access/spgist/spgdoinsert.c:1933 -#, c-format -msgid "index row size %zu exceeds maximum %zu for index \"%s\"" -msgstr "Größe %zu der Indexzeile überschreitet Maximum %zu für Index »%s«" - -#: access/gin/ginfast.c:991 access/transam/xlog.c:10136 -#: access/transam/xlog.c:10640 access/transam/xlogfuncs.c:292 -#: access/transam/xlogfuncs.c:319 access/transam/xlogfuncs.c:358 -#: access/transam/xlogfuncs.c:379 access/transam/xlogfuncs.c:400 -#: access/transam/xlogfuncs.c:470 access/transam/xlogfuncs.c:526 +#: access/gin/ginfast.c:1023 access/transam/xlog.c:10269 +#: access/transam/xlog.c:10796 access/transam/xlogfuncs.c:286 +#: access/transam/xlogfuncs.c:313 access/transam/xlogfuncs.c:352 +#: access/transam/xlogfuncs.c:373 access/transam/xlogfuncs.c:394 +#: access/transam/xlogfuncs.c:464 access/transam/xlogfuncs.c:520 #, c-format msgid "recovery is in progress" msgstr "Wiederherstellung läuft" -#: access/gin/ginfast.c:992 +#: access/gin/ginfast.c:1024 #, c-format msgid "GIN pending list cannot be cleaned up during recovery." msgstr "GIN-Pending-Liste kann nicht während der Wiederherstellung aufgeräumt werden." -#: access/gin/ginfast.c:999 +#: access/gin/ginfast.c:1031 #, c-format msgid "\"%s\" is not a GIN index" msgstr "»%s« ist kein GIN-Index" -#: access/gin/ginfast.c:1010 +#: access/gin/ginfast.c:1042 #, c-format msgid "cannot access temporary indexes of other sessions" msgstr "auf temporäre Indexe anderer Sitzungen kann nicht zugegriffen werden" -#: access/gin/ginscan.c:405 +#: access/gin/ginscan.c:402 #, c-format msgid "old GIN indexes do not support whole-index scans nor searches for nulls" msgstr "alte GIN-Indexe unterstützen keine Scans des ganzen Index oder Suchen nach NULL-Werten" -#: access/gin/ginscan.c:406 +#: access/gin/ginscan.c:403 #, c-format msgid "To fix this, do REINDEX INDEX \"%s\"." msgstr "Um das zu reparieren, führen Sie REINDEX INDEX \"%s\" aus." -#: access/gin/ginutil.c:134 executor/execQual.c:4865 -#: utils/adt/arrayfuncs.c:3803 utils/adt/arrayfuncs.c:6325 -#: utils/adt/rowtypes.c:927 +#: access/gin/ginutil.c:138 executor/execExpr.c:1867 +#: utils/adt/arrayfuncs.c:3777 utils/adt/arrayfuncs.c:6375 +#: utils/adt/rowtypes.c:935 #, c-format msgid "could not identify a comparison function for type %s" msgstr "konnte keine Vergleichsfunktion für Typ %s ermitteln" -#: access/gin/ginvalidate.c:93 -#, c-format -msgid "gin operator family \"%s\" contains support procedure %s with cross-type registration" -msgstr "GIN-Operatorfamilie »%s« enthält Support-Prozedur %s mit typübergreifender Registrierung" - -#: access/gin/ginvalidate.c:149 -#, c-format -msgid "gin operator family \"%s\" contains function %s with invalid support number %d" -msgstr "GIN-Operatorfamilie »%s« enthält Funktion %s mit ungültiger Support-Nummer %d" - -#: access/gin/ginvalidate.c:161 -#, c-format -msgid "gin operator family \"%s\" contains function %s with wrong signature for support number %d" -msgstr "GIN-Operatorfamilie »%s« enthält Funktion %s mit falscher Signatur für Support-Nummer %d" - -#: access/gin/ginvalidate.c:180 -#, c-format -msgid "gin operator family \"%s\" contains operator %s with invalid strategy number %d" -msgstr "GIN-Operatorfamilie »%s« enthält Operator %s mit ungültiger Strategienummer %d" - -#: access/gin/ginvalidate.c:193 -#, c-format -msgid "gin operator family \"%s\" contains invalid ORDER BY specification for operator %s" -msgstr "GIN-Operatorfamilie »%s« enthält ungültige ORDER-BY-Angabe für Operator %s" - -#: access/gin/ginvalidate.c:206 -#, c-format -msgid "gin operator family \"%s\" contains operator %s with wrong signature" -msgstr "GIN-Operatorfamilie »%s« enthält Operator %s mit falscher Signatur" - -#: access/gin/ginvalidate.c:247 +#: access/gin/ginvalidate.c:93 access/gist/gistvalidate.c:93 +#: access/hash/hashvalidate.c:99 access/spgist/spgvalidate.c:99 #, c-format -msgid "gin operator class \"%s\" is missing support function %d" -msgstr "in GIN-Operatorklasse »%s« fehlt Support-Funktion %d" +msgid "operator family \"%s\" of access method %s contains support procedure %s with different left and right input types" +msgstr "Operatorfamilie »%s« für Zugriffsmethode %s enthält Support-Prozedur %s mit unterschiedlichen linken und rechten Eingabetypen" #: access/gin/ginvalidate.c:257 #, c-format -msgid "gin operator class \"%s\" is missing support function %d or %d" -msgstr "in GIN-Operatorklasse »%s« fehlt Support-Funktion %d oder %d" +msgid "operator class \"%s\" of access method %s is missing support function %d or %d" +msgstr "in Operatorklasse »%s« für Zugriffsmethode %s fehlt Support-Funktion %d oder %d" -#: access/gist/gist.c:706 access/gist/gistvacuum.c:258 +#: access/gist/gist.c:713 access/gist/gistvacuum.c:257 #, c-format msgid "index \"%s\" contains an inner tuple marked as invalid" msgstr "Index »%s« enthält ein inneres Tupel, das als ungültig markiert ist" -#: access/gist/gist.c:708 access/gist/gistvacuum.c:260 +#: access/gist/gist.c:715 access/gist/gistvacuum.c:259 #, c-format msgid "This is caused by an incomplete page split at crash recovery before upgrading to PostgreSQL 9.1." msgstr "Das kommt von einem unvollständigen Page-Split bei der Crash-Recovery vor dem Upgrade auf PostgreSQL 9.1." -#: access/gist/gist.c:709 access/gist/gistutil.c:739 -#: access/gist/gistutil.c:750 access/gist/gistvacuum.c:261 -#: access/hash/hashutil.c:174 access/hash/hashutil.c:185 -#: access/hash/hashutil.c:197 access/hash/hashutil.c:218 -#: access/nbtree/nbtpage.c:519 access/nbtree/nbtpage.c:530 +#: access/gist/gist.c:716 access/gist/gistutil.c:759 access/gist/gistutil.c:770 +#: access/gist/gistvacuum.c:260 access/hash/hashutil.c:241 +#: access/hash/hashutil.c:252 access/hash/hashutil.c:264 +#: access/hash/hashutil.c:285 access/nbtree/nbtpage.c:678 +#: access/nbtree/nbtpage.c:689 #, c-format msgid "Please REINDEX it." msgstr "Bitte führen Sie REINDEX für den Index aus." @@ -675,7 +691,7 @@ msgstr "ungültiger Wert für Option »buffering«" msgid "Valid values are \"on\", \"off\", and \"auto\"." msgstr "Gültige Werte sind »on«, »off« und »auto«." -#: access/gist/gistbuildbuffers.c:778 utils/sort/logtape.c:231 +#: access/gist/gistbuildbuffers.c:778 utils/sort/logtape.c:255 #, c-format msgid "could not write block %ld of temporary file: %m" msgstr "konnte Block %ld von temporärer Datei nicht schreiben: %m" @@ -690,276 +706,206 @@ msgstr "Picksplit-Methode für Spalte %d von Index »%s« fehlgeschlagen" msgid "The index is not optimal. To optimize it, contact a developer, or try to use the column as the second one in the CREATE INDEX command." msgstr "Der Index ist nicht optimal. Um ihn zu optimieren, kontaktieren Sie einen Entwickler oder versuchen Sie, die Spalte als die zweite im CREATE-INDEX-Befehl zu verwenden." -#: access/gist/gistutil.c:736 access/hash/hashutil.c:171 -#: access/nbtree/nbtpage.c:516 +#: access/gist/gistutil.c:756 access/hash/hashutil.c:238 +#: access/nbtree/nbtpage.c:675 #, c-format msgid "index \"%s\" contains unexpected zero page at block %u" msgstr "Index »%s« enthält unerwartete Nullseite bei Block %u" -#: access/gist/gistutil.c:747 access/hash/hashutil.c:182 -#: access/hash/hashutil.c:194 access/nbtree/nbtpage.c:527 +#: access/gist/gistutil.c:767 access/hash/hashutil.c:249 +#: access/hash/hashutil.c:261 access/nbtree/nbtpage.c:686 #, c-format msgid "index \"%s\" contains corrupted page at block %u" msgstr "Index »%s« enthält korrupte Seite bei Block %u" -#: access/gist/gistvalidate.c:93 -#, c-format -msgid "gist operator family \"%s\" contains support procedure %s with cross-type registration" -msgstr "GiST-Operatorfamilie »%s« enthält Support-Prozedur %s mit typübergreifender Registrierung" - -#: access/gist/gistvalidate.c:146 -#, c-format -msgid "gist operator family \"%s\" contains function %s with invalid support number %d" -msgstr "GiST-Operatorfamilie »%s« enthält Funktion %s mit ungültiger Support-Nummer %d" - -#: access/gist/gistvalidate.c:158 -#, c-format -msgid "gist operator family \"%s\" contains function %s with wrong signature for support number %d" -msgstr "GiST-Operatorfamilie »%s« enthält Funktion %s mit falscher Signatur für Support-Nummer %d" - -#: access/gist/gistvalidate.c:178 -#, c-format -msgid "gist operator family \"%s\" contains operator %s with invalid strategy number %d" -msgstr "GiST-Operatorfamilie »%s« enthält Operator %s mit ungültiger Strategienummer %d" - #: access/gist/gistvalidate.c:196 #, c-format -msgid "gist operator family \"%s\" contains unsupported ORDER BY specification for operator %s" -msgstr "GiST-Operatorfamilie »%s« enthält nicht unterstützte ORDER-BY-Angabe für Operator %s" +msgid "operator family \"%s\" of access method %s contains unsupported ORDER BY specification for operator %s" +msgstr "Operatorfamilie »%s« für Zugriffsmethode %s enthält nicht unterstützte ORDER-BY-Angabe für Operator %s" #: access/gist/gistvalidate.c:207 #, c-format -msgid "gist operator family \"%s\" contains incorrect ORDER BY opfamily specification for operator %s" -msgstr "GiST-Operatorfamilie »%s« enthält ungültige ORDER-BY-Operatorfamilienangabe für Operator %s" - -#: access/gist/gistvalidate.c:226 -#, c-format -msgid "gist operator family \"%s\" contains operator %s with wrong signature" -msgstr "GiST-Operatorfamilie »%s« enthält Operator %s mit falscher Signatur" - -#: access/gist/gistvalidate.c:265 -#, c-format -msgid "gist operator class \"%s\" is missing support function %d" -msgstr "in GiST-Operatorklasse »%s« fehlt Support-Funktion %d" +msgid "operator family \"%s\" of access method %s contains incorrect ORDER BY opfamily specification for operator %s" +msgstr "Operatorfamilie »%s« für Zugriffsmethode %s enthält ungültige ORDER-BY-Operatorfamilienangabe für Operator %s" -#: access/hash/hashinsert.c:74 +#: access/hash/hashinsert.c:83 #, c-format msgid "index row size %zu exceeds hash maximum %zu" msgstr "Größe der Indexzeile %zu überschreitet Maximum für Hash-Index %zu" -#: access/hash/hashinsert.c:76 access/spgist/spgdoinsert.c:1937 -#: access/spgist/spgutils.c:708 +#: access/hash/hashinsert.c:85 access/spgist/spgdoinsert.c:1961 +#: access/spgist/spgutils.c:746 #, c-format msgid "Values larger than a buffer page cannot be indexed." msgstr "Werte, die größer sind als eine Pufferseite, können nicht indiziert werden." -#: access/hash/hashovfl.c:84 -#, fuzzy, c-format -#| msgid "invalid port number: \"%s\"\n" +#: access/hash/hashovfl.c:87 +#, c-format msgid "invalid overflow block number %u" -msgstr "ungültige Portnummer: »%s«\n" +msgstr "ungültige Überlaufblocknummer %u" -#: access/hash/hashovfl.c:273 access/hash/hashpage.c:426 +#: access/hash/hashovfl.c:283 access/hash/hashpage.c:463 #, c-format msgid "out of overflow pages in hash index \"%s\"" msgstr "keine Überlaufseiten in Hash-Index »%s« mehr" -#: access/hash/hashsearch.c:248 +#: access/hash/hashsearch.c:315 #, c-format msgid "hash indexes do not support whole-index scans" msgstr "Hash-Indexe unterstützen keine Scans des ganzen Index" -#: access/hash/hashutil.c:210 +#: access/hash/hashutil.c:277 #, c-format msgid "index \"%s\" is not a hash index" msgstr "Index »%s« ist kein Hash-Index" -#: access/hash/hashutil.c:216 +#: access/hash/hashutil.c:283 #, c-format msgid "index \"%s\" has wrong hash version" msgstr "Index »%s« hat falsche Hash-Version" -#: access/hash/hashvalidate.c:99 -#, c-format -msgid "hash operator family \"%s\" contains support procedure %s with cross-type registration" -msgstr "Hash-Operatorfamilie »%s« enthält Support-Prozedur %s mit typübergreifender Registrierung" - -#: access/hash/hashvalidate.c:114 -#, c-format -msgid "hash operator family \"%s\" contains function %s with wrong signature for support number %d" -msgstr "Hash-Operatorfamilie »%s« enthält Funktion %s mit falscher Signatur für Support-Nummer %d" - -#: access/hash/hashvalidate.c:131 -#, c-format -msgid "hash operator family \"%s\" contains function %s with invalid support number %d" -msgstr "Hash-Operatorfamilie »%s« enthält Funktion %s mit ungültiger Support-Nummer %d" - -#: access/hash/hashvalidate.c:152 -#, c-format -msgid "hash operator family \"%s\" contains operator %s with invalid strategy number %d" -msgstr "Hash-Operatorfamilie »%s« enthält Operator %s mit ungültiger Strategienummer %d" - -#: access/hash/hashvalidate.c:165 -#, c-format -msgid "hash operator family \"%s\" contains invalid ORDER BY specification for operator %s" -msgstr "Hash-Operatorfamilie »%s« enthält ungültige ORDER-BY-Angabe für Operator %s" - -#: access/hash/hashvalidate.c:178 -#, c-format -msgid "hash operator family \"%s\" contains operator %s with wrong signature" -msgstr "Hash-Operatorfamilie »%s« enthält Operator %s mit falscher Signatur" - -#: access/hash/hashvalidate.c:190 -#, c-format -msgid "hash operator family \"%s\" lacks support function for operator %s" -msgstr "in Hash-Operatorfamilie »%s« fehlt Support-Funktion für Operator %s" - -#: access/hash/hashvalidate.c:218 -#, c-format -msgid "hash operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "in Hash-Operatorfamilie »%s« fehlen Operatoren für Typen %s und %s" - -#: access/hash/hashvalidate.c:232 +#: access/hash/hashvalidate.c:191 #, c-format -msgid "hash operator class \"%s\" is missing operator(s)" -msgstr "in Hash-Operatorklasse »%s« fehlen Operatoren" +msgid "operator family \"%s\" of access method %s lacks support function for operator %s" +msgstr "in Operatorfamilie »%s« für Zugriffsmethode %s fehlt Support-Funktion für Operator %s" -#: access/hash/hashvalidate.c:248 +#: access/hash/hashvalidate.c:249 access/nbtree/nbtvalidate.c:266 #, c-format -msgid "hash operator family \"%s\" is missing cross-type operator(s)" -msgstr "in Hash-Operatorfamilie »%s« fehlen typübergreifende Operatoren" +msgid "operator family \"%s\" of access method %s is missing cross-type operator(s)" +msgstr "in Operatorfamilie »%s« für Zugriffsmethode %s fehlen typübergreifende Operatoren" -#: access/heap/heapam.c:1296 access/heap/heapam.c:1324 -#: access/heap/heapam.c:1356 catalog/aclchk.c:1754 +#: access/heap/heapam.c:1304 access/heap/heapam.c:1333 +#: access/heap/heapam.c:1366 catalog/aclchk.c:1828 #, c-format msgid "\"%s\" is an index" msgstr "»%s« ist ein Index" -#: access/heap/heapam.c:1301 access/heap/heapam.c:1329 -#: access/heap/heapam.c:1361 catalog/aclchk.c:1761 commands/tablecmds.c:9558 -#: commands/tablecmds.c:12769 +#: access/heap/heapam.c:1309 access/heap/heapam.c:1338 +#: access/heap/heapam.c:1371 catalog/aclchk.c:1835 commands/tablecmds.c:10326 +#: commands/tablecmds.c:13538 #, c-format msgid "\"%s\" is a composite type" msgstr "»%s« ist ein zusammengesetzter Typ" -#: access/heap/heapam.c:2595 +#: access/heap/heapam.c:2639 #, c-format -msgid "cannot insert tuples during a parallel operation" -msgstr "während einer parallelen Operation können keine Tupel eingefügt werden" +msgid "cannot insert tuples in a parallel worker" +msgstr "in einem parallelen Arbeitsprozess können keine Tupel eingefügt werden" -#: access/heap/heapam.c:3045 +#: access/heap/heapam.c:3091 #, c-format msgid "cannot delete tuples during a parallel operation" msgstr "während einer parallelen Operation können keine Tupel gelöscht werden" -#: access/heap/heapam.c:3091 +#: access/heap/heapam.c:3137 #, c-format msgid "attempted to delete invisible tuple" msgstr "Versuch ein unsichtbares Tupel zu löschen" -#: access/heap/heapam.c:3517 access/heap/heapam.c:6268 +#: access/heap/heapam.c:3572 access/heap/heapam.c:6409 #, c-format msgid "cannot update tuples during a parallel operation" msgstr "während einer parallelen Operation können keine Tupel aktualisiert werden" -#: access/heap/heapam.c:3639 +#: access/heap/heapam.c:3720 #, c-format msgid "attempted to update invisible tuple" msgstr "Versuch ein unsichtbares Tupel zu aktualisieren" -#: access/heap/heapam.c:4991 access/heap/heapam.c:5029 -#: access/heap/heapam.c:5281 executor/execMain.c:2461 +#: access/heap/heapam.c:5085 access/heap/heapam.c:5123 +#: access/heap/heapam.c:5375 executor/execMain.c:2654 #, c-format msgid "could not obtain lock on row in relation \"%s\"" msgstr "konnte Sperre für Zeile in Relation »%s« nicht setzen" -#: access/heap/hio.c:322 access/heap/rewriteheap.c:664 +#: access/heap/hio.c:338 access/heap/rewriteheap.c:670 #, c-format msgid "row is too big: size %zu, maximum size %zu" msgstr "Zeile ist zu groß: Größe ist %zu, Maximalgröße ist %zu" -#: access/heap/rewriteheap.c:923 +#: access/heap/rewriteheap.c:930 #, c-format msgid "could not write to file \"%s\", wrote %d of %d: %m" msgstr "konnte nicht in Datei »%s« schreiben, %d von %d geschrieben: %m" -#: access/heap/rewriteheap.c:963 access/heap/rewriteheap.c:1175 -#: access/heap/rewriteheap.c:1272 access/transam/timeline.c:406 -#: access/transam/timeline.c:482 access/transam/xlog.c:3235 -#: access/transam/xlog.c:3397 replication/logical/snapbuild.c:1604 -#: replication/slot.c:1125 replication/slot.c:1210 storage/file/fd.c:631 -#: storage/file/fd.c:3129 storage/smgr/md.c:1043 storage/smgr/md.c:1276 -#: storage/smgr/md.c:1449 utils/misc/guc.c:6951 +#: access/heap/rewriteheap.c:970 access/heap/rewriteheap.c:1185 +#: access/heap/rewriteheap.c:1284 access/transam/timeline.c:411 +#: access/transam/timeline.c:490 access/transam/xlog.c:3274 +#: access/transam/xlog.c:3440 replication/logical/snapbuild.c:1629 +#: replication/slot.c:1290 replication/slot.c:1377 storage/file/fd.c:639 +#: storage/file/fd.c:3515 storage/smgr/md.c:1043 storage/smgr/md.c:1276 +#: storage/smgr/md.c:1449 utils/misc/guc.c:7246 #, c-format msgid "could not fsync file \"%s\": %m" msgstr "konnte Datei »%s« nicht fsyncen: %m" -#: access/heap/rewriteheap.c:1018 access/heap/rewriteheap.c:1138 -#: access/transam/timeline.c:314 access/transam/timeline.c:460 -#: access/transam/xlog.c:3191 access/transam/xlog.c:3340 -#: access/transam/xlog.c:10470 access/transam/xlog.c:10508 -#: access/transam/xlog.c:10881 postmaster/postmaster.c:4410 -#: replication/logical/origin.c:535 replication/slot.c:1082 -#: storage/file/copydir.c:162 storage/smgr/md.c:326 utils/time/snapmgr.c:1275 +#: access/heap/rewriteheap.c:1024 access/heap/rewriteheap.c:1143 +#: access/transam/timeline.c:314 access/transam/timeline.c:465 +#: access/transam/xlog.c:3227 access/transam/xlog.c:3378 +#: access/transam/xlog.c:10607 access/transam/xlog.c:10645 +#: access/transam/xlog.c:11048 postmaster/postmaster.c:4454 +#: replication/logical/origin.c:575 replication/slot.c:1242 +#: storage/file/copydir.c:167 storage/smgr/md.c:326 utils/time/snapmgr.c:1297 #, c-format msgid "could not create file \"%s\": %m" msgstr "konnte Datei »%s« nicht erstellen: %m" -#: access/heap/rewriteheap.c:1147 +#: access/heap/rewriteheap.c:1153 #, c-format msgid "could not truncate file \"%s\" to %u: %m" msgstr "konnte Datei »%s« nicht auf %u kürzen: %m" -#: access/heap/rewriteheap.c:1154 replication/walsender.c:451 +#: access/heap/rewriteheap.c:1161 replication/walsender.c:487 #: storage/smgr/md.c:1948 #, c-format msgid "could not seek to end of file \"%s\": %m" msgstr "konnte Positionszeiger nicht ans Ende der Datei »%s« setzen: %m" -#: access/heap/rewriteheap.c:1165 access/transam/timeline.c:366 -#: access/transam/timeline.c:400 access/transam/timeline.c:476 -#: access/transam/xlog.c:3226 access/transam/xlog.c:3390 -#: postmaster/postmaster.c:4420 postmaster/postmaster.c:4430 -#: replication/logical/origin.c:544 replication/logical/origin.c:580 -#: replication/logical/origin.c:596 replication/logical/snapbuild.c:1588 -#: replication/slot.c:1111 storage/file/copydir.c:187 -#: utils/init/miscinit.c:1228 utils/init/miscinit.c:1237 -#: utils/init/miscinit.c:1244 utils/misc/guc.c:6912 utils/misc/guc.c:6943 -#: utils/misc/guc.c:8792 utils/misc/guc.c:8806 utils/time/snapmgr.c:1280 -#: utils/time/snapmgr.c:1287 +#: access/heap/rewriteheap.c:1173 access/transam/timeline.c:369 +#: access/transam/timeline.c:404 access/transam/timeline.c:482 +#: access/transam/xlog.c:3263 access/transam/xlog.c:3431 +#: postmaster/postmaster.c:4464 postmaster/postmaster.c:4474 +#: replication/logical/origin.c:584 replication/logical/origin.c:623 +#: replication/logical/origin.c:639 replication/logical/snapbuild.c:1611 +#: replication/slot.c:1273 storage/file/copydir.c:208 +#: utils/init/miscinit.c:1341 utils/init/miscinit.c:1352 +#: utils/init/miscinit.c:1360 utils/misc/guc.c:7207 utils/misc/guc.c:7238 +#: utils/misc/guc.c:9099 utils/misc/guc.c:9113 utils/time/snapmgr.c:1302 +#: utils/time/snapmgr.c:1309 #, c-format msgid "could not write to file \"%s\": %m" msgstr "konnte nicht in Datei »%s« schreiben: %m" -#: access/heap/rewriteheap.c:1248 access/transam/xlog.c:10719 -#: access/transam/xlogarchive.c:113 access/transam/xlogarchive.c:467 -#: postmaster/postmaster.c:1239 postmaster/syslogger.c:1371 -#: replication/logical/origin.c:522 replication/logical/reorderbuffer.c:2588 -#: replication/logical/reorderbuffer.c:2645 -#: replication/logical/snapbuild.c:1532 replication/logical/snapbuild.c:1907 -#: replication/slot.c:1184 storage/ipc/dsm.c:327 storage/smgr/md.c:425 -#: storage/smgr/md.c:474 storage/smgr/md.c:1396 +#: access/heap/rewriteheap.c:1259 access/transam/xlogarchive.c:113 +#: access/transam/xlogarchive.c:469 postmaster/postmaster.c:1275 +#: postmaster/syslogger.c:1372 replication/logical/origin.c:563 +#: replication/logical/reorderbuffer.c:2607 +#: replication/logical/snapbuild.c:1560 replication/logical/snapbuild.c:1935 +#: replication/slot.c:1350 storage/file/fd.c:690 storage/file/fd.c:3118 +#: storage/file/fd.c:3180 storage/file/reinit.c:255 storage/ipc/dsm.c:315 +#: storage/smgr/md.c:425 storage/smgr/md.c:474 storage/smgr/md.c:1396 +#: utils/time/snapmgr.c:1640 #, c-format msgid "could not remove file \"%s\": %m" msgstr "konnte Datei »%s« nicht löschen: %m" -#: access/heap/rewriteheap.c:1262 access/transam/timeline.c:110 -#: access/transam/timeline.c:235 access/transam/timeline.c:333 -#: access/transam/xlog.c:3167 access/transam/xlog.c:3284 -#: access/transam/xlog.c:3325 access/transam/xlog.c:3598 -#: access/transam/xlog.c:3676 access/transam/xlogutils.c:702 -#: postmaster/syslogger.c:1380 replication/basebackup.c:474 -#: replication/basebackup.c:1218 replication/logical/origin.c:651 -#: replication/logical/reorderbuffer.c:2112 -#: replication/logical/reorderbuffer.c:2358 -#: replication/logical/reorderbuffer.c:3037 -#: replication/logical/snapbuild.c:1581 replication/logical/snapbuild.c:1665 -#: replication/slot.c:1199 replication/walsender.c:444 -#: replication/walsender.c:2055 storage/file/copydir.c:155 -#: storage/file/fd.c:614 storage/file/fd.c:3041 storage/file/fd.c:3108 -#: storage/smgr/md.c:607 utils/error/elog.c:1879 utils/init/miscinit.c:1163 -#: utils/init/miscinit.c:1284 utils/init/miscinit.c:1359 utils/misc/guc.c:7171 -#: utils/misc/guc.c:7204 +#: access/heap/rewriteheap.c:1273 access/transam/timeline.c:111 +#: access/transam/timeline.c:236 access/transam/timeline.c:333 +#: access/transam/xlog.c:3204 access/transam/xlog.c:3323 +#: access/transam/xlog.c:3364 access/transam/xlog.c:3641 +#: access/transam/xlog.c:3719 access/transam/xlogutils.c:708 +#: postmaster/syslogger.c:1381 replication/basebackup.c:507 +#: replication/basebackup.c:1381 replication/logical/origin.c:694 +#: replication/logical/reorderbuffer.c:2134 +#: replication/logical/reorderbuffer.c:2378 +#: replication/logical/reorderbuffer.c:3081 +#: replication/logical/snapbuild.c:1603 replication/logical/snapbuild.c:1691 +#: replication/slot.c:1365 replication/walsender.c:480 +#: replication/walsender.c:2401 storage/file/copydir.c:161 +#: storage/file/fd.c:622 storage/file/fd.c:3410 storage/file/fd.c:3494 +#: storage/smgr/md.c:607 utils/error/elog.c:1879 utils/init/miscinit.c:1265 +#: utils/init/miscinit.c:1400 utils/init/miscinit.c:1477 utils/misc/guc.c:7466 +#: utils/misc/guc.c:7498 #, c-format msgid "could not open file \"%s\": %m" msgstr "konnte Datei »%s« nicht öffnen: %m" @@ -974,34 +920,34 @@ msgstr "Zugriffsmethode »%s« ist nicht vom Typ %s" msgid "index access method \"%s\" does not have a handler" msgstr "Indexzugriffsmethode »%s« hat keinen Handler" -#: access/index/indexam.c:160 catalog/objectaddress.c:1200 -#: commands/indexcmds.c:1811 commands/tablecmds.c:247 -#: commands/tablecmds.c:12760 +#: access/index/indexam.c:160 catalog/objectaddress.c:1223 +#: commands/indexcmds.c:2236 commands/tablecmds.c:249 commands/tablecmds.c:273 +#: commands/tablecmds.c:13529 commands/tablecmds.c:14772 #, c-format msgid "\"%s\" is not an index" msgstr "»%s« ist kein Index" -#: access/nbtree/nbtinsert.c:429 +#: access/nbtree/nbtinsert.c:530 #, c-format msgid "duplicate key value violates unique constraint \"%s\"" msgstr "doppelter Schlüsselwert verletzt Unique-Constraint »%s«" -#: access/nbtree/nbtinsert.c:431 +#: access/nbtree/nbtinsert.c:532 #, c-format msgid "Key %s already exists." msgstr "Schlüssel »%s« existiert bereits." -#: access/nbtree/nbtinsert.c:498 +#: access/nbtree/nbtinsert.c:599 #, c-format msgid "failed to re-find tuple within index \"%s\"" msgstr "konnte Tupel mit Index »%s« nicht erneut finden" -#: access/nbtree/nbtinsert.c:500 +#: access/nbtree/nbtinsert.c:601 #, c-format msgid "This may be because of a non-immutable index expression." msgstr "Das kann daran liegen, dass der Indexausdruck nicht »immutable« ist." -#: access/nbtree/nbtinsert.c:580 access/nbtree/nbtsort.c:491 +#: access/nbtree/nbtinsert.c:681 access/nbtree/nbtsort.c:842 #, c-format msgid "" "Values larger than 1/3 of a buffer page cannot be indexed.\n" @@ -1010,122 +956,47 @@ msgstr "" "Werte, die größer sind als 1/3 einer Pufferseite, können nicht indiziert werden.\n" "Erstellen Sie eventuell einen Funktionsindex auf einen MD5-Hash oder verwenden Sie Volltextindizierung." -#: access/nbtree/nbtpage.c:169 access/nbtree/nbtpage.c:372 -#: access/nbtree/nbtpage.c:459 parser/parse_utilcmd.c:1770 +#: access/nbtree/nbtpage.c:318 access/nbtree/nbtpage.c:529 +#: access/nbtree/nbtpage.c:618 parser/parse_utilcmd.c:2055 #, c-format msgid "index \"%s\" is not a btree" msgstr "Index »%s« ist kein B-Tree" -#: access/nbtree/nbtpage.c:175 access/nbtree/nbtpage.c:378 -#: access/nbtree/nbtpage.c:465 +#: access/nbtree/nbtpage.c:325 access/nbtree/nbtpage.c:536 +#: access/nbtree/nbtpage.c:625 #, c-format -msgid "version mismatch in index \"%s\": file version %d, code version %d" -msgstr "keine Versionsübereinstimmung in Index »%s«: Dateiversion %d, Code-Version %d" +msgid "version mismatch in index \"%s\": file version %d, current version %d, minimal supported version %d" +msgstr "keine Versionsübereinstimmung in Index »%s«: Dateiversion %d, aktuelle Version %d, kleinste unterstützte Version %d" -#: access/nbtree/nbtpage.c:1153 +#: access/nbtree/nbtpage.c:1312 #, c-format msgid "index \"%s\" contains a half-dead internal page" msgstr "Index »%s« enthält eine halbtote interne Seite" -#: access/nbtree/nbtpage.c:1155 +#: access/nbtree/nbtpage.c:1314 #, c-format msgid "This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it." msgstr "Die Ursache kann ein unterbrochenes VACUUM in Version 9.3 oder älter vor dem Upgrade sein. Bitte REINDEX durchführen." -#: access/nbtree/nbtvalidate.c:101 -#, c-format -msgid "btree operator family \"%s\" contains function %s with invalid support number %d" -msgstr "B-Tree-Operatorfamilie »%s« enthält Funktion %s mit ungültiger Support-Nummer %d" - -#: access/nbtree/nbtvalidate.c:113 -#, c-format -msgid "btree operator family \"%s\" contains function %s with wrong signature for support number %d" -msgstr "B-Tree-Operatorfamilie »%s« enthält Funktion %s mit falscher Signatur für Support-Nummer %d" - -#: access/nbtree/nbtvalidate.c:133 -#, c-format -msgid "btree operator family \"%s\" contains operator %s with invalid strategy number %d" -msgstr "B-Tree-Operatorfamilie »%s« enthält Operator %s mit ungültiger Strategienummer %d" - -#: access/nbtree/nbtvalidate.c:146 -#, c-format -msgid "btree operator family \"%s\" contains invalid ORDER BY specification for operator %s" -msgstr "B-Tree-Operatorfamilie »%s« enthält ungültige ORDER-BY-Angabe für Operator %s" - -#: access/nbtree/nbtvalidate.c:159 -#, c-format -msgid "btree operator family \"%s\" contains operator %s with wrong signature" -msgstr "B-Tree-Operatorfamilie »%s« enthält Operator %s mit falscher Signatur" - -#: access/nbtree/nbtvalidate.c:201 -#, c-format -msgid "btree operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "in B-Tree-Operatorfamilie »%s« fehlen Operatoren für Typen %s und %s" - -#: access/nbtree/nbtvalidate.c:211 -#, c-format -msgid "btree operator family \"%s\" is missing support function for types %s and %s" -msgstr "in B-Tree-Operatorfamilie »%s« fehlen Support-Funktionen für Typen %s und %s" - -#: access/nbtree/nbtvalidate.c:225 +#: access/nbtree/nbtvalidate.c:236 #, c-format -msgid "btree operator class \"%s\" is missing operator(s)" -msgstr "in B-Tree-Operatorklasse »%s« fehlen Operatoren" +msgid "operator family \"%s\" of access method %s is missing support function for types %s and %s" +msgstr "in Operatorfamilie »%s« für Zugriffsmethode %s fehlt Support-Funktion für Typen %s und %s" -#: access/nbtree/nbtvalidate.c:242 +#: access/spgist/spgutils.c:136 #, c-format -msgid "btree operator family \"%s\" is missing cross-type operator(s)" -msgstr "in B-Tree-Operatorfamilie »%s« fehlen typübergreifende Operatoren" +msgid "compress method must not defined when leaf type is different from input type" +msgstr "" -#: access/spgist/spgutils.c:705 +#: access/spgist/spgutils.c:743 #, c-format msgid "SP-GiST inner tuple size %zu exceeds maximum %zu" msgstr "innere Tupelgröße %zu überschreitet SP-GiST-Maximum %zu" -#: access/spgist/spgvalidate.c:93 -#, c-format -msgid "spgist operator family \"%s\" contains support procedure %s with cross-type registration" -msgstr "SPGiST-Operatorfamilie »%s« enthält Support-Prozedur %s mit typübergreifender Registrierung" - -#: access/spgist/spgvalidate.c:116 -#, c-format -msgid "spgist operator family \"%s\" contains function %s with invalid support number %d" -msgstr "SPGiST-Operatorfamilie »%s« enthält Funktion %s mit ungültiger Support-Nummer %d" - -#: access/spgist/spgvalidate.c:128 -#, c-format -msgid "spgist operator family \"%s\" contains function %s with wrong signature for support number %d" -msgstr "SPGiST-Operatorfamilie »%s« enthält Funktion %s mit falscher Signatur für Support-Nummer %d" - -#: access/spgist/spgvalidate.c:147 -#, c-format -msgid "spgist operator family \"%s\" contains operator %s with invalid strategy number %d" -msgstr "SPGiST-Operatorfamilie »%s« enthält Operator %s mit ungültiger Strategienummer %d" - -#: access/spgist/spgvalidate.c:160 +#: access/spgist/spgvalidate.c:269 #, c-format -msgid "spgist operator family \"%s\" contains invalid ORDER BY specification for operator %s" -msgstr "SPGiST-Operatorfamilie »%s« enthält ungültige ORDER-BY-Angabe für Operator %s" - -#: access/spgist/spgvalidate.c:173 -#, c-format -msgid "spgist operator family \"%s\" contains operator %s with wrong signature" -msgstr "SPGiST-Operatorfamilie »%s« enthält Operator %s mit falscher Signatur" - -#: access/spgist/spgvalidate.c:201 -#, c-format -msgid "spgist operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "in SPGiST-Operatorfamilie »%s« fehlen Operatoren für Typen %s und %s" - -#: access/spgist/spgvalidate.c:221 -#, c-format -msgid "spgist operator family \"%s\" is missing support function %d for type %s" -msgstr "in SPGiST-Operatorfamilie »%s« fehlt Support-Funktion %d für Typ %s" - -#: access/spgist/spgvalidate.c:234 -#, c-format -msgid "spgist operator class \"%s\" is missing operator(s)" -msgstr "in SPGiST-Operatorklasse »%s« fehlen Operatoren" +msgid "operator family \"%s\" of access method %s is missing support function %d for type %s" +msgstr "in Operatorfamilie »%s« für Zugriffsmethode %s fehlt Support-Funktion %d für Typ %s" #: access/tablesample/bernoulli.c:152 access/tablesample/system.c:156 #, c-format @@ -1162,24 +1033,24 @@ msgstr "Datenbank nimmt keine Befehle an, die neue MultiXactIds erzeugen, um Dat #, c-format msgid "" "Execute a database-wide VACUUM in that database.\n" -"You might also need to commit or roll back old prepared transactions." +"You might also need to commit or roll back old prepared transactions, or drop stale replication slots." msgstr "" "Führen Sie ein datenbankweites VACUUM in dieser Datenbank aus.\n" -"Eventuell müssen Sie auch alte vorbereitete Transaktionen committen oder zurückrollen." +"Eventuell müssen Sie auch alte vorbereitete Transaktionen committen oder zurückrollen oder unbenutzte Replikations-Slots löschen." #: access/transam/multixact.c:1007 #, c-format msgid "database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database with OID %u" msgstr "Datenbank nimmt keine Befehle an, die neue MultiXactIds erzeugen, um Datenverlust wegen Transaktionsnummernüberlauf in Datenbank mit OID %u zu vermeiden" -#: access/transam/multixact.c:1028 access/transam/multixact.c:2314 +#: access/transam/multixact.c:1028 access/transam/multixact.c:2318 #, c-format msgid "database \"%s\" must be vacuumed before %u more MultiXactId is used" msgid_plural "database \"%s\" must be vacuumed before %u more MultiXactIds are used" msgstr[0] "Datenbank »%s« muss gevacuumt werden, bevor %u weitere MultiXactId aufgebraucht ist" msgstr[1] "Datenbank »%s« muss gevacuumt werden, bevor %u weitere MultiXactIds aufgebraucht sind" -#: access/transam/multixact.c:1037 access/transam/multixact.c:2323 +#: access/transam/multixact.c:1037 access/transam/multixact.c:2327 #, c-format msgid "database with OID %u must be vacuumed before %u more MultiXactId is used" msgid_plural "database with OID %u must be vacuumed before %u more MultiXactIds are used" @@ -1225,331 +1096,352 @@ msgstr "MultiXactId %u existiert nicht mehr -- anscheinender Überlauf" msgid "MultiXactId %u has not been created yet -- apparent wraparound" msgstr "MultiXactId %u wurde noch nicht erzeugt -- anscheinender Überlauf" -#: access/transam/multixact.c:2264 +#: access/transam/multixact.c:2268 #, c-format msgid "MultiXactId wrap limit is %u, limited by database with OID %u" msgstr "Grenze für MultiXactId-Überlauf ist %u, begrenzt durch Datenbank mit OID %u" -#: access/transam/multixact.c:2319 access/transam/multixact.c:2328 +#: access/transam/multixact.c:2323 access/transam/multixact.c:2332 #: access/transam/varsup.c:146 access/transam/varsup.c:153 -#: access/transam/varsup.c:384 access/transam/varsup.c:391 +#: access/transam/varsup.c:405 access/transam/varsup.c:412 #, c-format msgid "" "To avoid a database shutdown, execute a database-wide VACUUM in that database.\n" -"You might also need to commit or roll back old prepared transactions." +"You might also need to commit or roll back old prepared transactions, or drop stale replication slots." msgstr "" "Um ein Abschalten der Datenbank zu vermeiden, führen Sie ein komplettes VACUUM über diese Datenbank aus.\n" -"Eventuell müssen Sie auch alte vorbereitete Transaktionen committen oder zurückrollen." +"Eventuell müssen Sie auch alte vorbereitete Transaktionen committen oder zurückrollen oder unbenutzte Replikations-Slots löschen." -#: access/transam/multixact.c:2598 +#: access/transam/multixact.c:2602 #, c-format msgid "oldest MultiXactId member is at offset %u" msgstr "ältestes MultiXactId-Mitglied ist bei Offset %u" -#: access/transam/multixact.c:2602 +#: access/transam/multixact.c:2606 #, c-format msgid "MultiXact member wraparound protections are disabled because oldest checkpointed MultiXact %u does not exist on disk" msgstr "MultiXact-Member-Wraparound-Schutz ist deaktiviert, weil die älteste gecheckpointete MultiXact %u nicht auf der Festplatte existiert" -#: access/transam/multixact.c:2624 +#: access/transam/multixact.c:2628 #, c-format msgid "MultiXact member wraparound protections are now enabled" msgstr "MultiXact-Member-Wraparound-Schutz ist jetzt aktiviert" -#: access/transam/multixact.c:2626 +#: access/transam/multixact.c:2631 #, c-format msgid "MultiXact member stop limit is now %u based on MultiXact %u" msgstr "MultiXact-Member-Stopp-Limit ist jetzt %u, basierend auf MultiXact %u" -#: access/transam/multixact.c:3006 +#: access/transam/multixact.c:3011 #, c-format msgid "oldest MultiXact %u not found, earliest MultiXact %u, skipping truncation" msgstr "älteste MultiXact %u nicht gefunden, älteste ist MultiXact %u, Truncate wird ausgelassen" -#: access/transam/multixact.c:3024 +#: access/transam/multixact.c:3029 #, c-format msgid "cannot truncate up to MultiXact %u because it does not exist on disk, skipping truncation" msgstr "kann nicht bis MultiXact %u trunkieren, weil sie nicht auf der Festplatte existiert, Trunkierung wird ausgelassen" -#: access/transam/multixact.c:3350 +#: access/transam/multixact.c:3355 #, c-format msgid "invalid MultiXactId: %u" msgstr "ungültige MultiXactId: %u" -#: access/transam/parallel.c:592 +#: access/transam/parallel.c:660 access/transam/parallel.c:783 +#, c-format +msgid "parallel worker failed to initialize" +msgstr "Initialisierung von parallelem Arbeitsprozess fehlgeschlagen" + +#: access/transam/parallel.c:661 access/transam/parallel.c:784 +#, c-format +msgid "More details may be available in the server log." +msgstr "Weitere Einzelheiten sind möglicherweise im Serverlog zu finden." + +#: access/transam/parallel.c:845 #, c-format msgid "postmaster exited during a parallel transaction" msgstr "Postmaster beendete während einer parallelen Transaktion" -#: access/transam/parallel.c:777 +#: access/transam/parallel.c:1032 #, c-format msgid "lost connection to parallel worker" msgstr "Verbindung mit parallelem Arbeitsprozess verloren" -#: access/transam/parallel.c:836 access/transam/parallel.c:838 +#: access/transam/parallel.c:1098 access/transam/parallel.c:1100 msgid "parallel worker" msgstr "paralleler Arbeitsprozess" -#: access/transam/parallel.c:977 +#: access/transam/parallel.c:1245 #, c-format msgid "could not map dynamic shared memory segment" msgstr "konnte dynamisches Shared-Memory-Segment nicht mappen" -#: access/transam/parallel.c:982 +#: access/transam/parallel.c:1250 #, c-format msgid "invalid magic number in dynamic shared memory segment" msgstr "ungültige magische Zahl in dynamischem Shared-Memory-Segment" -#: access/transam/slru.c:663 +#: access/transam/slru.c:668 #, c-format msgid "file \"%s\" doesn't exist, reading as zeroes" msgstr "Datei »%s« existiert nicht, wird als Nullen eingelesen" -#: access/transam/slru.c:893 access/transam/slru.c:899 -#: access/transam/slru.c:906 access/transam/slru.c:913 -#: access/transam/slru.c:920 access/transam/slru.c:927 +#: access/transam/slru.c:906 access/transam/slru.c:912 +#: access/transam/slru.c:919 access/transam/slru.c:926 +#: access/transam/slru.c:933 access/transam/slru.c:940 #, c-format msgid "could not access status of transaction %u" msgstr "konnte auf den Status von Transaktion %u nicht zugreifen" -#: access/transam/slru.c:894 +#: access/transam/slru.c:907 #, c-format msgid "Could not open file \"%s\": %m." msgstr "Konnte Datei »%s« nicht öffnen: %m." -#: access/transam/slru.c:900 +#: access/transam/slru.c:913 #, c-format msgid "Could not seek in file \"%s\" to offset %u: %m." msgstr "Konnte Positionszeiger in Datei »%s« nicht auf %u setzen: %m." -#: access/transam/slru.c:907 +#: access/transam/slru.c:920 #, c-format msgid "Could not read from file \"%s\" at offset %u: %m." msgstr "Konnte nicht aus Datei »%s« bei Position %u lesen: %m." -#: access/transam/slru.c:914 +#: access/transam/slru.c:927 #, c-format msgid "Could not write to file \"%s\" at offset %u: %m." msgstr "Konnte nicht in Datei »%s« bei Position %u schreiben: %m." -#: access/transam/slru.c:921 +#: access/transam/slru.c:934 #, c-format msgid "Could not fsync file \"%s\": %m." msgstr "Konnte Datei »%s« nicht fsyncen: %m." -#: access/transam/slru.c:928 +#: access/transam/slru.c:941 #, c-format msgid "Could not close file \"%s\": %m." msgstr "Konnte Datei »%s« nicht schließen: %m." -#: access/transam/slru.c:1183 +#: access/transam/slru.c:1198 #, c-format msgid "could not truncate directory \"%s\": apparent wraparound" msgstr "konnte Verzeichnis »%s« nicht leeren: anscheinender Überlauf" -#: access/transam/slru.c:1238 access/transam/slru.c:1294 +#: access/transam/slru.c:1253 access/transam/slru.c:1309 #, c-format msgid "removing file \"%s\"" msgstr "entferne Datei »%s«" -#: access/transam/timeline.c:147 access/transam/timeline.c:152 +#: access/transam/timeline.c:148 access/transam/timeline.c:153 #, c-format msgid "syntax error in history file: %s" msgstr "Syntaxfehler in History-Datei: %s" -#: access/transam/timeline.c:148 +#: access/transam/timeline.c:149 #, c-format msgid "Expected a numeric timeline ID." msgstr "Eine numerische Zeitleisten-ID wurde erwartet." -#: access/transam/timeline.c:153 +#: access/transam/timeline.c:154 #, c-format -msgid "Expected a transaction log switchpoint location." -msgstr "Eine Transaktionslog-Switchpoint-Position wurde erwartet." +msgid "Expected a write-ahead log switchpoint location." +msgstr "Eine Write-Ahead-Log-Switchpoint-Position wurde erwartet." -#: access/transam/timeline.c:157 +#: access/transam/timeline.c:158 #, c-format msgid "invalid data in history file: %s" msgstr "ungültige Daten in History-Datei: %s" -#: access/transam/timeline.c:158 +#: access/transam/timeline.c:159 #, c-format msgid "Timeline IDs must be in increasing sequence." msgstr "Zeitleisten-IDs müssen in aufsteigender Folge sein." -#: access/transam/timeline.c:178 +#: access/transam/timeline.c:179 #, c-format msgid "invalid data in history file \"%s\"" msgstr "ungültige Daten in History-Datei »%s«" -#: access/transam/timeline.c:179 +#: access/transam/timeline.c:180 #, c-format msgid "Timeline IDs must be less than child timeline's ID." msgstr "Zeitleisten-IDs müssen kleiner als die Zeitleisten-ID des Kindes sein." -#: access/transam/timeline.c:411 access/transam/timeline.c:487 -#: access/transam/xlog.c:3241 access/transam/xlog.c:3402 -#: access/transam/xlogfuncs.c:689 commands/copy.c:1743 -#: storage/file/copydir.c:201 +#: access/transam/timeline.c:417 access/transam/timeline.c:496 +#: access/transam/xlog.c:3281 access/transam/xlog.c:3446 +#: access/transam/xlogfuncs.c:683 commands/copy.c:1742 +#: storage/file/copydir.c:219 #, c-format msgid "could not close file \"%s\": %m" msgstr "konnte Datei »%s« nicht schließen: %m" -#: access/transam/timeline.c:569 +#: access/transam/timeline.c:578 #, c-format msgid "requested timeline %u is not in this server's history" msgstr "angeforderte Zeitleiste %u ist nicht in der History dieses Servers" -#: access/transam/twophase.c:362 +#: access/transam/twophase.c:381 #, c-format msgid "transaction identifier \"%s\" is too long" msgstr "Transaktionsbezeichner »%s« ist zu lang" -#: access/transam/twophase.c:369 +#: access/transam/twophase.c:388 #, c-format msgid "prepared transactions are disabled" msgstr "vorbereitete Transaktionen sind abgeschaltet" -#: access/transam/twophase.c:370 +#: access/transam/twophase.c:389 #, c-format msgid "Set max_prepared_transactions to a nonzero value." msgstr "Setzen Sie max_prepared_transactions auf einen Wert höher als null." -#: access/transam/twophase.c:389 +#: access/transam/twophase.c:408 #, c-format msgid "transaction identifier \"%s\" is already in use" msgstr "Transaktionsbezeichner »%s« wird bereits verwendet" -#: access/transam/twophase.c:398 +#: access/transam/twophase.c:417 access/transam/twophase.c:2415 #, c-format msgid "maximum number of prepared transactions reached" msgstr "maximale Anzahl vorbereiteter Transaktionen erreicht" -#: access/transam/twophase.c:399 +#: access/transam/twophase.c:418 access/transam/twophase.c:2416 #, c-format msgid "Increase max_prepared_transactions (currently %d)." msgstr "Erhöhen Sie max_prepared_transactions (aktuell %d)." -#: access/transam/twophase.c:539 +#: access/transam/twophase.c:585 #, c-format msgid "prepared transaction with identifier \"%s\" is busy" msgstr "vorbereitete Transaktion mit Bezeichner »%s« ist beschäftigt" -#: access/transam/twophase.c:545 +#: access/transam/twophase.c:591 #, c-format msgid "permission denied to finish prepared transaction" msgstr "keine Berechtigung, um vorbereitete Transaktion abzuschließen" -#: access/transam/twophase.c:546 +#: access/transam/twophase.c:592 #, c-format msgid "Must be superuser or the user that prepared the transaction." msgstr "Sie müssen Superuser oder der Benutzer sein, der die Transaktion vorbereitet hat." -#: access/transam/twophase.c:557 +#: access/transam/twophase.c:603 #, c-format msgid "prepared transaction belongs to another database" msgstr "vorbereitete Transaktion gehört zu einer anderen Datenbank" -#: access/transam/twophase.c:558 +#: access/transam/twophase.c:604 #, c-format msgid "Connect to the database where the transaction was prepared to finish it." msgstr "Verbinden Sie sich mit der Datenbank, wo die Transaktion vorbereitet wurde, um sie zu beenden." -#: access/transam/twophase.c:573 +#: access/transam/twophase.c:619 #, c-format msgid "prepared transaction with identifier \"%s\" does not exist" msgstr "vorbereitete Transaktion mit Bezeichner »%s« existiert nicht" -#: access/transam/twophase.c:1042 +#: access/transam/twophase.c:1102 #, c-format msgid "two-phase state file maximum length exceeded" msgstr "maximale Länge der Zweiphasen-Statusdatei überschritten" -#: access/transam/twophase.c:1160 +#: access/transam/twophase.c:1231 #, c-format msgid "could not open two-phase state file \"%s\": %m" msgstr "konnte Zweiphasen-Statusdatei »%s« nicht öffnen: %m" -#: access/transam/twophase.c:1177 +#: access/transam/twophase.c:1248 #, c-format msgid "could not stat two-phase state file \"%s\": %m" msgstr "konnte »stat« für Zweiphasen-Statusdatei »%s« nicht ausführen: %m" -#: access/transam/twophase.c:1209 +#: access/transam/twophase.c:1282 #, c-format msgid "could not read two-phase state file \"%s\": %m" msgstr "konnte Zweiphasen-Statusdatei »%s« nicht lesen: %m" -#: access/transam/twophase.c:1262 access/transam/xlog.c:6318 +#: access/transam/twophase.c:1373 access/transam/xlog.c:6444 #, c-format msgid "Failed while allocating a WAL reading processor." msgstr "Fehlgeschlagen beim Anlegen eines WAL-Leseprozessors." -#: access/transam/twophase.c:1268 +#: access/transam/twophase.c:1379 #, c-format msgid "could not read two-phase state from WAL at %X/%X" msgstr "konnte Zweiphasen-Status nicht aus dem WAL bei %X/%X lesen" -#: access/transam/twophase.c:1276 +#: access/transam/twophase.c:1387 #, c-format msgid "expected two-phase state data is not present in WAL at %X/%X" msgstr "erwartete Zweiphasen-Status-Daten sind nicht im WAL bei %X/%X vorhanden" -#: access/transam/twophase.c:1511 +#: access/transam/twophase.c:1630 #, c-format msgid "could not remove two-phase state file \"%s\": %m" msgstr "konnte Zweiphasen-Statusdatei »%s« nicht löschen: %m" -#: access/transam/twophase.c:1541 +#: access/transam/twophase.c:1659 #, c-format msgid "could not recreate two-phase state file \"%s\": %m" msgstr "konnte Zweiphasen-Statusdatei »%s« nicht wieder erstellen: %m" -#: access/transam/twophase.c:1550 access/transam/twophase.c:1557 +#: access/transam/twophase.c:1670 access/transam/twophase.c:1678 #, c-format msgid "could not write two-phase state file: %m" msgstr "konnte Zweiphasen-Statusdatei nicht schreiben: %m" -#: access/transam/twophase.c:1569 +#: access/transam/twophase.c:1692 #, c-format msgid "could not fsync two-phase state file: %m" msgstr "konnte Zweiphasen-Statusdatei nicht fsyncen: %m" -#: access/transam/twophase.c:1575 +#: access/transam/twophase.c:1699 #, c-format msgid "could not close two-phase state file: %m" msgstr "konnte Zweiphasen-Statusdatei nicht schließen: %m" -#: access/transam/twophase.c:1648 -#, fuzzy, c-format -#| msgid "%u two-phase state file was written for long-running prepared transactions" -#| msgid_plural "%u two-phase state files were written for long-running prepared transactions" +#: access/transam/twophase.c:1787 +#, c-format msgid "%u two-phase state file was written for a long-running prepared transaction" msgid_plural "%u two-phase state files were written for long-running prepared transactions" -msgstr[0] "%u Zweiphasen-Statusdatei wurde für lange laufende vorbereitete Transaktionen geschrieben" +msgstr[0] "%u Zweiphasen-Statusdatei wurde für eine lange laufende vorbereitete Transaktion geschrieben" msgstr[1] "%u Zweiphasen-Statusdateien wurden für lange laufende vorbereitete Transaktionen geschrieben" -#: access/transam/twophase.c:1712 +#: access/transam/twophase.c:2016 #, c-format -msgid "removing future two-phase state file \"%s\"" -msgstr "entferne Zweiphasen-Statusdatei aus der Zukunft »%s«" +msgid "recovering prepared transaction %u from shared memory" +msgstr "Wiederherstellung der vorbereiteten Transaktion %u aus dem Shared Memory" -#: access/transam/twophase.c:1728 access/transam/twophase.c:1739 -#: access/transam/twophase.c:1859 access/transam/twophase.c:1870 -#: access/transam/twophase.c:1947 +#: access/transam/twophase.c:2106 #, c-format -msgid "removing corrupt two-phase state file \"%s\"" -msgstr "entferne verfälschte Zweiphasen-Statusdatei »%s«" +msgid "removing stale two-phase state file for transaction %u" +msgstr "entferne abgelaufene Zweiphasen-Statusdatei für Transaktion %u" -#: access/transam/twophase.c:1848 access/transam/twophase.c:1936 +#: access/transam/twophase.c:2113 #, c-format -msgid "removing stale two-phase state file \"%s\"" -msgstr "entferne abgelaufene Zweiphasen-Statusdatei »%s«" +msgid "removing stale two-phase state from memory for transaction %u" +msgstr "entferne abgelaufenen Zweiphasen-Status aus dem Speicher für Transaktion %u" -#: access/transam/twophase.c:1954 +#: access/transam/twophase.c:2126 #, c-format -msgid "recovering prepared transaction %u" -msgstr "Wiederherstellung der vorbereiteten Transaktion %u" +msgid "removing future two-phase state file for transaction %u" +msgstr "entferne zukünftige Zweiphasen-Statusdatei für Transaktion %u" + +#: access/transam/twophase.c:2133 +#, c-format +msgid "removing future two-phase state from memory for transaction %u" +msgstr "entferne zukünftigen Zweiphasen-Status aus dem Speicher für Transaktion %u" + +#: access/transam/twophase.c:2147 access/transam/twophase.c:2166 +#, c-format +msgid "removing corrupt two-phase state file for transaction %u" +msgstr "entferne verfälschte Zweiphasen-Statusdatei für Transaktion %u" + +#: access/transam/twophase.c:2173 +#, c-format +msgid "removing corrupt two-phase state from memory for transaction %u" +msgstr "entferne verfälschten Zweiphasen-Status aus dem Speicher für Transaktion %u" #: access/transam/varsup.c:124 #, c-format @@ -1560,1189 +1452,1205 @@ msgstr "Datenbank nimmt keine Befehle an, um Datenverlust wegen Transaktionsnumm #, c-format msgid "" "Stop the postmaster and vacuum that database in single-user mode.\n" -"You might also need to commit or roll back old prepared transactions." +"You might also need to commit or roll back old prepared transactions, or drop stale replication slots." msgstr "" "Halten Sie den Postmaster an und führen Sie in dieser Datenbank VACUUM im Einzelbenutzermodus aus.\n" -"Eventuell müssen Sie auch alte vorbereitete Transaktionen committen oder zurückrollen." +"Eventuell müssen Sie auch alte vorbereitete Transaktionen committen oder zurückrollen oder unbenutzte Replikations-Slots löschen." #: access/transam/varsup.c:131 #, c-format msgid "database is not accepting commands to avoid wraparound data loss in database with OID %u" msgstr "Datenbank nimmt keine Befehle an, um Datenverlust wegen Transaktionsnummernüberlauf in Datenbank mit OID %u zu vermeiden" -#: access/transam/varsup.c:143 access/transam/varsup.c:381 +#: access/transam/varsup.c:143 access/transam/varsup.c:402 #, c-format msgid "database \"%s\" must be vacuumed within %u transactions" msgstr "Datenbank »%s« muss innerhalb von %u Transaktionen gevacuumt werden" -#: access/transam/varsup.c:150 access/transam/varsup.c:388 +#: access/transam/varsup.c:150 access/transam/varsup.c:409 #, c-format msgid "database with OID %u must be vacuumed within %u transactions" msgstr "Datenbank mit OID %u muss innerhalb von %u Transaktionen gevacuumt werden" -#: access/transam/varsup.c:346 +#: access/transam/varsup.c:367 #, c-format msgid "transaction ID wrap limit is %u, limited by database with OID %u" msgstr "Grenze für Transaktionsnummernüberlauf ist %u, begrenzt durch Datenbank mit OID %u" -#: access/transam/xact.c:945 +#: access/transam/xact.c:938 #, c-format msgid "cannot have more than 2^32-2 commands in a transaction" msgstr "kann nicht mehr als 2^32-2 Befehle in einer Transaktion ausführen" -#: access/transam/xact.c:1469 +#: access/transam/xact.c:1463 #, c-format msgid "maximum number of committed subtransactions (%d) exceeded" msgstr "maximale Anzahl committeter Subtransaktionen (%d) überschritten" -#: access/transam/xact.c:2266 +#: access/transam/xact.c:2258 #, c-format msgid "cannot PREPARE a transaction that has operated on temporary tables" msgstr "PREPARE kann nicht für eine Transaktion ausgeführt werden, die temporäre Tabellen bearbeitet hat" -#: access/transam/xact.c:2276 +#: access/transam/xact.c:2268 #, c-format msgid "cannot PREPARE a transaction that has exported snapshots" msgstr "PREPARE kann nicht für eine Transaktion ausgeführt werden, die Snapshots exportiert hat" +#: access/transam/xact.c:2277 +#, c-format +msgid "cannot PREPARE a transaction that has manipulated logical replication workers" +msgstr "PREPARE kann nicht für eine Transaktion ausgeführt werden, die Arbeitsprozesse für logische Replikation manipuliert hat" + #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3161 +#: access/transam/xact.c:3162 #, c-format msgid "%s cannot run inside a transaction block" msgstr "%s kann nicht in einem Transaktionsblock laufen" #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3171 +#: access/transam/xact.c:3172 #, c-format msgid "%s cannot run inside a subtransaction" msgstr "%s kann nicht in einer Subtransaktion laufen" #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3181 +#: access/transam/xact.c:3182 #, c-format -msgid "%s cannot be executed from a function or multi-command string" -msgstr "%s kann nicht aus einer Funktion oder einer mehrbefehligen Zeichenkette heraus ausgeführt werden" +msgid "%s cannot be executed from a function" +msgstr "%s kann nicht aus einer Funktion ausgerufen werden" #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3252 +#: access/transam/xact.c:3251 access/transam/xact.c:3875 +#: access/transam/xact.c:3944 access/transam/xact.c:4055 #, c-format msgid "%s can only be used in transaction blocks" msgstr "%s kann nur in Transaktionsblöcken verwendet werden" -#: access/transam/xact.c:3436 +#: access/transam/xact.c:3444 #, c-format msgid "there is already a transaction in progress" msgstr "eine Transaktion ist bereits begonnen" -#: access/transam/xact.c:3604 access/transam/xact.c:3707 +#: access/transam/xact.c:3555 access/transam/xact.c:3625 +#: access/transam/xact.c:3734 #, c-format msgid "there is no transaction in progress" msgstr "keine Transaktion offen" -#: access/transam/xact.c:3615 +#: access/transam/xact.c:3636 #, c-format msgid "cannot commit during a parallel operation" msgstr "während einer parallelen Operation kann nicht committet werden" -#: access/transam/xact.c:3718 +#: access/transam/xact.c:3745 #, c-format msgid "cannot abort during a parallel operation" msgstr "während einer parallelen Operation kann nicht abgebrochen werden" -#: access/transam/xact.c:3760 +#: access/transam/xact.c:3839 #, c-format msgid "cannot define savepoints during a parallel operation" msgstr "während einer parallelen Operation können keine Sicherungspunkte definiert werden" -#: access/transam/xact.c:3827 +#: access/transam/xact.c:3926 #, c-format msgid "cannot release savepoints during a parallel operation" msgstr "während einer parallelen Operation können keine Sicherungspunkte freigegeben werden" -#: access/transam/xact.c:3838 access/transam/xact.c:3890 -#: access/transam/xact.c:3896 access/transam/xact.c:3952 -#: access/transam/xact.c:4002 access/transam/xact.c:4008 +#: access/transam/xact.c:3936 access/transam/xact.c:3987 +#: access/transam/xact.c:4047 access/transam/xact.c:4096 #, c-format -msgid "no such savepoint" -msgstr "Sicherungspunkt existiert nicht" +msgid "savepoint \"%s\" does not exist" +msgstr "Sicherungspunkt »%s« existiert nicht" -#: access/transam/xact.c:3940 +#: access/transam/xact.c:3993 access/transam/xact.c:4102 +#, fuzzy, c-format +#| msgid "user mapping for \"%s\" does not exist for the server" +msgid "savepoint \"%s\" does not exist within current savepoint level" +msgstr "Benutzerabbildung für »%s« existiert für den Server nicht" + +#: access/transam/xact.c:4035 #, c-format msgid "cannot rollback to savepoints during a parallel operation" msgstr "während einer parallelen Operation kann nicht auf einen Sicherungspunkt zurückgerollt werden" -#: access/transam/xact.c:4068 +#: access/transam/xact.c:4163 #, c-format msgid "cannot start subtransactions during a parallel operation" msgstr "während einer parallelen Operation können keine Subtransaktionen gestartet werden" -#: access/transam/xact.c:4135 +#: access/transam/xact.c:4231 #, c-format msgid "cannot commit subtransactions during a parallel operation" msgstr "während einer parallelen Operation können keine Subtransaktionen committet werden" -#: access/transam/xact.c:4743 +#: access/transam/xact.c:4867 #, c-format msgid "cannot have more than 2^32-1 subtransactions in a transaction" msgstr "kann nicht mehr als 2^32-1 Subtransaktionen in einer Transaktion haben" -#: access/transam/xlog.c:2446 +#: access/transam/xlog.c:2479 #, c-format msgid "could not seek in log file %s to offset %u: %m" msgstr "konnte Positionszeiger in Logdatei %s nicht auf %u setzen: %m" -#: access/transam/xlog.c:2466 +#: access/transam/xlog.c:2501 #, c-format msgid "could not write to log file %s at offset %u, length %zu: %m" msgstr "konnte nicht in Logdatei %s bei Position %u, Länge %zu schreiben: %m" -#: access/transam/xlog.c:2730 +#: access/transam/xlog.c:2767 #, c-format msgid "updated min recovery point to %X/%X on timeline %u" msgstr "minimaler Recovery-Punkt auf %X/%X auf Zeitleiste %u aktualisiert" -#: access/transam/xlog.c:3372 +#: access/transam/xlog.c:3411 #, c-format msgid "not enough data in file \"%s\"" msgstr "nicht genug Daten in Datei »%s«" -#: access/transam/xlog.c:3513 +#: access/transam/xlog.c:3556 #, c-format -msgid "could not open transaction log file \"%s\": %m" -msgstr "konnte Transaktionslogdatei »%s« nicht öffnen: %m" +msgid "could not open write-ahead log file \"%s\": %m" +msgstr "konnte Write-Ahead-Log-Datei »%s« nicht öffnen: %m" -#: access/transam/xlog.c:3702 access/transam/xlog.c:5503 +#: access/transam/xlog.c:3745 access/transam/xlog.c:5634 #, c-format msgid "could not close log file %s: %m" msgstr "konnte Logdatei %s nicht schließen: %m" -#: access/transam/xlog.c:3759 access/transam/xlogutils.c:697 -#: replication/walsender.c:2050 +#: access/transam/xlog.c:3811 access/transam/xlogutils.c:703 +#: replication/walsender.c:2396 #, c-format msgid "requested WAL segment %s has already been removed" msgstr "das angeforderte WAL-Segment %s wurde schon entfernt" -#: access/transam/xlog.c:3819 access/transam/xlog.c:3894 -#: access/transam/xlog.c:4092 -#, c-format -msgid "could not open transaction log directory \"%s\": %m" -msgstr "konnte Transaktionslog-Verzeichnis »%s« nicht öffnen: %m" - -#: access/transam/xlog.c:3975 +#: access/transam/xlog.c:4018 #, c-format -msgid "recycled transaction log file \"%s\"" -msgstr "Transaktionslogdatei »%s« wird wiederverwendet" +msgid "recycled write-ahead log file \"%s\"" +msgstr "Write-Ahead-Log-Datei »%s« wird wiederverwendet" -#: access/transam/xlog.c:3987 +#: access/transam/xlog.c:4030 #, c-format -msgid "removing transaction log file \"%s\"" -msgstr "entferne Transaktionslogdatei »%s«" +msgid "removing write-ahead log file \"%s\"" +msgstr "entferne Write-Ahead-Log-Datei »%s«" -#: access/transam/xlog.c:4007 +#: access/transam/xlog.c:4050 #, c-format -msgid "could not rename old transaction log file \"%s\": %m" -msgstr "konnte alte Transaktionslogdatei »%s« nicht umbenennen: %m" +msgid "could not rename old write-ahead log file \"%s\": %m" +msgstr "konnte alte Write-Ahead-Log-Datei »%s« nicht umbenennen: %m" -#: access/transam/xlog.c:4019 -#, c-format -msgid "could not remove old transaction log file \"%s\": %m" -msgstr "konnte alte Transaktionslogdatei »%s« nicht löschen: %m" - -#: access/transam/xlog.c:4052 access/transam/xlog.c:4062 +#: access/transam/xlog.c:4092 access/transam/xlog.c:4102 #, c-format msgid "required WAL directory \"%s\" does not exist" msgstr "benötigtes WAL-Verzeichnis »%s« existiert nicht" -#: access/transam/xlog.c:4068 +#: access/transam/xlog.c:4108 #, c-format msgid "creating missing WAL directory \"%s\"" msgstr "erzeuge fehlendes WAL-Verzeichnis »%s«" -#: access/transam/xlog.c:4071 +#: access/transam/xlog.c:4111 #, c-format msgid "could not create missing directory \"%s\": %m" msgstr "konnte fehlendes Verzeichnis »%s« nicht erzeugen: %m" -#: access/transam/xlog.c:4102 -#, c-format -msgid "removing transaction log backup history file \"%s\"" -msgstr "entferne Transaktionslog-Backup-History-Datei »%s«" - -#: access/transam/xlog.c:4183 +#: access/transam/xlog.c:4219 #, c-format msgid "unexpected timeline ID %u in log segment %s, offset %u" msgstr "unerwartete Zeitleisten-ID %u in Logsegment %s, Offset %u" -#: access/transam/xlog.c:4305 +#: access/transam/xlog.c:4341 #, c-format msgid "new timeline %u is not a child of database system timeline %u" msgstr "neue Zeitleiste %u ist kein Kind der Datenbanksystemzeitleiste %u" -#: access/transam/xlog.c:4319 +#: access/transam/xlog.c:4355 #, c-format msgid "new timeline %u forked off current database system timeline %u before current recovery point %X/%X" msgstr "neue Zeitleiste %u zweigte von der aktuellen Datenbanksystemzeitleiste %u vor dem aktuellen Wiederherstellungspunkt %X/%X ab" -#: access/transam/xlog.c:4338 +#: access/transam/xlog.c:4374 #, c-format msgid "new target timeline is %u" msgstr "neue Zielzeitleiste ist %u" -#: access/transam/xlog.c:4413 +#: access/transam/xlog.c:4454 #, c-format msgid "could not create control file \"%s\": %m" msgstr "konnte Kontrolldatei »%s« nicht erzeugen: %m" -#: access/transam/xlog.c:4424 access/transam/xlog.c:4644 +#: access/transam/xlog.c:4466 access/transam/xlog.c:4720 #, c-format msgid "could not write to control file: %m" msgstr "konnte nicht in Kontrolldatei schreiben: %m" -#: access/transam/xlog.c:4430 access/transam/xlog.c:4650 +#: access/transam/xlog.c:4474 access/transam/xlog.c:4728 #, c-format msgid "could not fsync control file: %m" msgstr "konnte Kontrolldatei nicht fsyncen: %m" -#: access/transam/xlog.c:4435 access/transam/xlog.c:4655 +#: access/transam/xlog.c:4480 access/transam/xlog.c:4734 #, c-format msgid "could not close control file: %m" msgstr "konnte Kontrolldatei nicht schließen: %m" -#: access/transam/xlog.c:4453 access/transam/xlog.c:4633 +#: access/transam/xlog.c:4499 access/transam/xlog.c:4708 #, c-format msgid "could not open control file \"%s\": %m" msgstr "konnte Kontrolldatei »%s« nicht öffnen: %m" -#: access/transam/xlog.c:4459 +#: access/transam/xlog.c:4509 #, c-format msgid "could not read from control file: %m" msgstr "konnte nicht aus Kontrolldatei lesen: %m" -#: access/transam/xlog.c:4472 access/transam/xlog.c:4481 -#: access/transam/xlog.c:4505 access/transam/xlog.c:4512 -#: access/transam/xlog.c:4519 access/transam/xlog.c:4524 -#: access/transam/xlog.c:4531 access/transam/xlog.c:4538 -#: access/transam/xlog.c:4545 access/transam/xlog.c:4552 -#: access/transam/xlog.c:4559 access/transam/xlog.c:4566 -#: access/transam/xlog.c:4573 access/transam/xlog.c:4582 -#: access/transam/xlog.c:4589 access/transam/xlog.c:4598 -#: access/transam/xlog.c:4605 utils/init/miscinit.c:1380 +#: access/transam/xlog.c:4512 +#, fuzzy, c-format +#| msgid "could not read from control file: %m" +msgid "could not read from control file: read %d bytes, expected %d" +msgstr "konnte nicht aus Kontrolldatei lesen: %m" + +#: access/transam/xlog.c:4527 access/transam/xlog.c:4536 +#: access/transam/xlog.c:4560 access/transam/xlog.c:4567 +#: access/transam/xlog.c:4574 access/transam/xlog.c:4579 +#: access/transam/xlog.c:4586 access/transam/xlog.c:4593 +#: access/transam/xlog.c:4600 access/transam/xlog.c:4607 +#: access/transam/xlog.c:4614 access/transam/xlog.c:4621 +#: access/transam/xlog.c:4630 access/transam/xlog.c:4637 +#: access/transam/xlog.c:4646 access/transam/xlog.c:4653 +#: utils/init/miscinit.c:1498 #, c-format msgid "database files are incompatible with server" msgstr "Datenbankdateien sind inkompatibel mit Server" -#: access/transam/xlog.c:4473 +#: access/transam/xlog.c:4528 #, c-format msgid "The database cluster was initialized with PG_CONTROL_VERSION %d (0x%08x), but the server was compiled with PG_CONTROL_VERSION %d (0x%08x)." msgstr "Der Datenbank-Cluster wurde mit PG_CONTROL_VERSION %d (0x%08x) initialisiert, aber der Server wurde mit PG_CONTROL_VERSION %d (0x%08x) kompiliert." -#: access/transam/xlog.c:4477 +#: access/transam/xlog.c:4532 #, c-format msgid "This could be a problem of mismatched byte ordering. It looks like you need to initdb." msgstr "Das Problem könnte eine falsche Byte-Reihenfolge sein. Es sieht so aus, dass Sie initdb ausführen müssen." -#: access/transam/xlog.c:4482 +#: access/transam/xlog.c:4537 #, c-format msgid "The database cluster was initialized with PG_CONTROL_VERSION %d, but the server was compiled with PG_CONTROL_VERSION %d." msgstr "Der Datenbank-Cluster wurde mit PG_CONTROL_VERSION %d initialisiert, aber der Server wurde mit PG_CONTROL_VERSION %d kompiliert." -#: access/transam/xlog.c:4485 access/transam/xlog.c:4509 -#: access/transam/xlog.c:4516 access/transam/xlog.c:4521 +#: access/transam/xlog.c:4540 access/transam/xlog.c:4564 +#: access/transam/xlog.c:4571 access/transam/xlog.c:4576 #, c-format msgid "It looks like you need to initdb." msgstr "Es sieht so aus, dass Sie initdb ausführen müssen." -#: access/transam/xlog.c:4496 +#: access/transam/xlog.c:4551 #, c-format msgid "incorrect checksum in control file" msgstr "falsche Prüfsumme in Kontrolldatei" -#: access/transam/xlog.c:4506 +#: access/transam/xlog.c:4561 #, c-format msgid "The database cluster was initialized with CATALOG_VERSION_NO %d, but the server was compiled with CATALOG_VERSION_NO %d." msgstr "Der Datenbank-Cluster wurde mit CATALOG_VERSION_NO %d initialisiert, aber der Server wurde mit CATALOG_VERSION_NO %d kompiliert." -#: access/transam/xlog.c:4513 +#: access/transam/xlog.c:4568 #, c-format msgid "The database cluster was initialized with MAXALIGN %d, but the server was compiled with MAXALIGN %d." msgstr "Der Datenbank-Cluster wurde mit MAXALIGN %d initialisiert, aber der Server wurde mit MAXALIGN %d kompiliert." -#: access/transam/xlog.c:4520 +#: access/transam/xlog.c:4575 #, c-format msgid "The database cluster appears to use a different floating-point number format than the server executable." msgstr "Der Datenbank-Cluster verwendet anscheinend ein anderes Fließkommazahlenformat als das Serverprogramm." -#: access/transam/xlog.c:4525 +#: access/transam/xlog.c:4580 #, c-format msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." msgstr "Der Datenbank-Cluster wurde mit BLCKSZ %d initialisiert, aber der Server wurde mit BLCKSZ %d kompiliert." -#: access/transam/xlog.c:4528 access/transam/xlog.c:4535 -#: access/transam/xlog.c:4542 access/transam/xlog.c:4549 -#: access/transam/xlog.c:4556 access/transam/xlog.c:4563 -#: access/transam/xlog.c:4570 access/transam/xlog.c:4577 -#: access/transam/xlog.c:4585 access/transam/xlog.c:4592 -#: access/transam/xlog.c:4601 access/transam/xlog.c:4608 +#: access/transam/xlog.c:4583 access/transam/xlog.c:4590 +#: access/transam/xlog.c:4597 access/transam/xlog.c:4604 +#: access/transam/xlog.c:4611 access/transam/xlog.c:4618 +#: access/transam/xlog.c:4625 access/transam/xlog.c:4633 +#: access/transam/xlog.c:4640 access/transam/xlog.c:4649 +#: access/transam/xlog.c:4656 #, c-format msgid "It looks like you need to recompile or initdb." msgstr "Es sieht so aus, dass Sie neu kompilieren oder initdb ausführen müssen." -#: access/transam/xlog.c:4532 +#: access/transam/xlog.c:4587 #, c-format msgid "The database cluster was initialized with RELSEG_SIZE %d, but the server was compiled with RELSEG_SIZE %d." msgstr "Der Datenbank-Cluster wurde mit RELSEG_SIZE %d initialisiert, aber der Server wurde mit RELSEGSIZE %d kompiliert." -#: access/transam/xlog.c:4539 +#: access/transam/xlog.c:4594 #, c-format msgid "The database cluster was initialized with XLOG_BLCKSZ %d, but the server was compiled with XLOG_BLCKSZ %d." msgstr "Der Datenbank-Cluster wurde mit XLOG_BLCKSZ %d initialisiert, aber der Server wurde mit XLOG_BLCKSZ %d kompiliert." -#: access/transam/xlog.c:4546 -#, c-format -msgid "The database cluster was initialized with XLOG_SEG_SIZE %d, but the server was compiled with XLOG_SEG_SIZE %d." -msgstr "Der Datenbank-Cluster wurde mit XLOG_SEG_SIZE %d initialisiert, aber der Server wurde mit XLOG_SEG_SIZE %d kompiliert." - -#: access/transam/xlog.c:4553 +#: access/transam/xlog.c:4601 #, c-format msgid "The database cluster was initialized with NAMEDATALEN %d, but the server was compiled with NAMEDATALEN %d." msgstr "Der Datenbank-Cluster wurde mit NAMEDATALEN %d initialisiert, aber der Server wurde mit NAMEDATALEN %d kompiliert." -#: access/transam/xlog.c:4560 +#: access/transam/xlog.c:4608 #, c-format msgid "The database cluster was initialized with INDEX_MAX_KEYS %d, but the server was compiled with INDEX_MAX_KEYS %d." msgstr "Der Datenbank-Cluster wurde mit INDEX_MAX_KEYS %d initialisiert, aber der Server wurde mit INDEX_MAX_KEYS %d kompiliert." -#: access/transam/xlog.c:4567 +#: access/transam/xlog.c:4615 #, c-format msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." msgstr "Der Datenbank-Cluster wurde mit TOAST_MAX_CHUNK_SIZE %d initialisiert, aber der Server wurde mit TOAST_MAX_CHUNK_SIZE %d kompiliert." -#: access/transam/xlog.c:4574 +#: access/transam/xlog.c:4622 #, c-format msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." msgstr "Der Datenbank-Cluster wurde mit LOBLKSIZE %d initialisiert, aber der Server wurde mit LOBLKSIZE %d kompiliert." -#: access/transam/xlog.c:4583 +#: access/transam/xlog.c:4631 #, c-format msgid "The database cluster was initialized without USE_FLOAT4_BYVAL but the server was compiled with USE_FLOAT4_BYVAL." msgstr "Der Datenbank-Cluster wurde ohne USE_FLOAT4_BYVAL initialisiert, aber der Server wurde mit USE_FLOAT4_BYVAL kompiliert." -#: access/transam/xlog.c:4590 +#: access/transam/xlog.c:4638 #, c-format msgid "The database cluster was initialized with USE_FLOAT4_BYVAL but the server was compiled without USE_FLOAT4_BYVAL." msgstr "Der Datenbank-Cluster wurde mit USE_FLOAT4_BYVAL initialisiert, aber der Server wurde ohne USE_FLOAT4_BYVAL kompiliert." -#: access/transam/xlog.c:4599 +#: access/transam/xlog.c:4647 #, c-format msgid "The database cluster was initialized without USE_FLOAT8_BYVAL but the server was compiled with USE_FLOAT8_BYVAL." msgstr "Der Datenbank-Cluster wurde ohne USE_FLOAT8_BYVAL initialisiert, aber der Server wurde mit USE_FLOAT8_BYVAL kompiliert." -#: access/transam/xlog.c:4606 +#: access/transam/xlog.c:4654 #, c-format msgid "The database cluster was initialized with USE_FLOAT8_BYVAL but the server was compiled without USE_FLOAT8_BYVAL." msgstr "Der Datenbank-Cluster wurde mit USE_FLOAT8_BYVAL initialisiert, aber der Server wurde ohne USE_FLOAT8_BYVAL kompiliert." -#: access/transam/xlog.c:4958 -#, fuzzy, c-format -#| msgid "could not generate random encryption vector" -msgid "could not generation secret authorization token" -msgstr "konnte zufälligen Verschlüsselungsvektor nicht erzeugen" +#: access/transam/xlog.c:4663 +#, c-format +msgid "WAL segment size must be a power of two between 1 MB and 1 GB, but the control file specifies %d byte" +msgid_plural "WAL segment size must be a power of two between 1 MB and 1 GB, but the control file specifies %d bytes" +msgstr[0] "WAL-Segmentgröße muss eine Zweierpotenz zwischen 1 MB und 1 GB sein, aber die Kontrolldatei gibt %d Byte an" +msgstr[1] "WAL-Segmentgröße muss eine Zweierpotenz zwischen 1 MB und 1 GB sein, aber die Kontrolldatei gibt %d Bytes an" + +#: access/transam/xlog.c:4675 +#, c-format +msgid "\"min_wal_size\" must be at least twice \"wal_segment_size\"." +msgstr "»min_wal_size« muss mindestens zweimal so groß wie »wal_segment_size« sein." + +#: access/transam/xlog.c:4679 +#, c-format +msgid "\"max_wal_size\" must be at least twice \"wal_segment_size\"." +msgstr "»max_wal_size« muss mindestens zweimal so groß wie »wal_segment_size« sein." + +#: access/transam/xlog.c:5066 +#, c-format +msgid "could not generate secret authorization token" +msgstr "konnte geheimes Autorisierungstoken nicht erzeugen" -#: access/transam/xlog.c:5046 +#: access/transam/xlog.c:5156 #, c-format -msgid "could not write bootstrap transaction log file: %m" -msgstr "konnte Bootstrap-Transaktionslogdatei nicht schreiben: %m" +msgid "could not write bootstrap write-ahead log file: %m" +msgstr "konnte Bootstrap-Write-Ahead-Log-Datei nicht schreiben: %m" -#: access/transam/xlog.c:5052 +#: access/transam/xlog.c:5164 #, c-format -msgid "could not fsync bootstrap transaction log file: %m" -msgstr "konnte Bootstrap-Transaktionslogdatei nicht fsyncen: %m" +msgid "could not fsync bootstrap write-ahead log file: %m" +msgstr "konnte Bootstrap-Write-Ahead-Log-Datei nicht fsyncen: %m" -#: access/transam/xlog.c:5057 +#: access/transam/xlog.c:5170 #, c-format -msgid "could not close bootstrap transaction log file: %m" -msgstr "konnte Bootstrap-Transaktionslogdatei nicht schließen: %m" +msgid "could not close bootstrap write-ahead log file: %m" +msgstr "konnte Bootstrap-Write-Ahead-Log-Datei nicht schließen: %m" -#: access/transam/xlog.c:5133 +#: access/transam/xlog.c:5252 #, c-format msgid "could not open recovery command file \"%s\": %m" msgstr "konnte Recovery-Kommandodatei »%s« nicht öffnen: %m" -#: access/transam/xlog.c:5179 access/transam/xlog.c:5281 +#: access/transam/xlog.c:5298 access/transam/xlog.c:5412 #, c-format msgid "invalid value for recovery parameter \"%s\": \"%s\"" msgstr "ungültiger Wert für Recovery-Parameter »%s«: »%s«" -#: access/transam/xlog.c:5182 +#: access/transam/xlog.c:5301 #, c-format msgid "Valid values are \"pause\", \"promote\", and \"shutdown\"." msgstr "Gültige Werte sind »pause«, »promote« und »shutdown«." -#: access/transam/xlog.c:5202 +#: access/transam/xlog.c:5321 #, c-format msgid "recovery_target_timeline is not a valid number: \"%s\"" msgstr "recovery_target_timeline ist keine gültige Zahl: »%s«" -#: access/transam/xlog.c:5219 +#: access/transam/xlog.c:5338 #, c-format msgid "recovery_target_xid is not a valid number: \"%s\"" msgstr "recovery_target_xid ist keine gültige Zahl: »%s«" -#: access/transam/xlog.c:5250 +#: access/transam/xlog.c:5358 +#, c-format +msgid "recovery_target_time is not a valid timestamp: \"%s\"" +msgstr "recovery_target_time ist keine gültige Zeitangabe: »%s«" + +#: access/transam/xlog.c:5381 #, c-format msgid "recovery_target_name is too long (maximum %d characters)" msgstr "recovery_target_name ist zu lang (maximal %d Zeichen)" -#: access/transam/xlog.c:5284 +#: access/transam/xlog.c:5415 #, c-format msgid "The only allowed value is \"immediate\"." msgstr "Der einzige erlaubte Wert ist »immediate«." -#: access/transam/xlog.c:5297 access/transam/xlog.c:5308 -#: commands/extension.c:546 commands/extension.c:554 utils/misc/guc.c:5706 +#: access/transam/xlog.c:5428 access/transam/xlog.c:5439 +#: commands/extension.c:547 commands/extension.c:555 utils/misc/guc.c:5973 #, c-format msgid "parameter \"%s\" requires a Boolean value" msgstr "Parameter »%s« erfordert einen Boole’schen Wert" -#: access/transam/xlog.c:5343 +#: access/transam/xlog.c:5474 #, c-format msgid "parameter \"%s\" requires a temporal value" msgstr "Parameter »%s« erfordert einen Zeitwert" -#: access/transam/xlog.c:5345 catalog/dependency.c:959 -#: catalog/dependency.c:960 catalog/dependency.c:966 catalog/dependency.c:967 -#: catalog/dependency.c:978 catalog/dependency.c:979 commands/tablecmds.c:946 -#: commands/tablecmds.c:10018 commands/user.c:1052 commands/view.c:505 -#: libpq/auth.c:334 replication/syncrep.c:1118 storage/lmgr/deadlock.c:1139 -#: storage/lmgr/proc.c:1286 utils/adt/acl.c:5248 utils/misc/guc.c:5728 -#: utils/misc/guc.c:5821 utils/misc/guc.c:9773 utils/misc/guc.c:9807 -#: utils/misc/guc.c:9841 utils/misc/guc.c:9875 utils/misc/guc.c:9910 +#: access/transam/xlog.c:5476 catalog/dependency.c:969 catalog/dependency.c:970 +#: catalog/dependency.c:976 catalog/dependency.c:977 catalog/dependency.c:988 +#: catalog/dependency.c:989 commands/tablecmds.c:1069 +#: commands/tablecmds.c:10790 commands/user.c:1064 commands/view.c:505 +#: libpq/auth.c:336 replication/syncrep.c:1158 storage/lmgr/deadlock.c:1139 +#: storage/lmgr/proc.c:1322 utils/adt/acl.c:5269 utils/misc/guc.c:5995 +#: utils/misc/guc.c:6088 utils/misc/guc.c:10075 utils/misc/guc.c:10109 +#: utils/misc/guc.c:10143 utils/misc/guc.c:10177 utils/misc/guc.c:10212 #, c-format msgid "%s" msgstr "%s" -#: access/transam/xlog.c:5352 +#: access/transam/xlog.c:5483 #, c-format msgid "unrecognized recovery parameter \"%s\"" msgstr "unbekannter Recovery-Parameter »%s«" -#: access/transam/xlog.c:5363 +#: access/transam/xlog.c:5494 #, c-format msgid "recovery command file \"%s\" specified neither primary_conninfo nor restore_command" msgstr "Recovery-Kommandodatei »%s« hat weder primary_conninfo noch restore_command angegeben" -#: access/transam/xlog.c:5365 +#: access/transam/xlog.c:5496 #, c-format msgid "The database server will regularly poll the pg_wal subdirectory to check for files placed there." msgstr "Der Datenbankserver prüft das Unterverzeichnis pg_wal regelmäßig auf dort abgelegte Dateien." -#: access/transam/xlog.c:5372 +#: access/transam/xlog.c:5503 #, c-format msgid "recovery command file \"%s\" must specify restore_command when standby mode is not enabled" msgstr "Recovery-Kommandodatei »%s« muss restore_command angeben, wenn der Standby-Modus nicht eingeschaltet ist" -#: access/transam/xlog.c:5393 +#: access/transam/xlog.c:5524 #, c-format msgid "standby mode is not supported by single-user servers" msgstr "Standby-Modus wird von Servern im Einzelbenutzermodus nicht unterstützt" -#: access/transam/xlog.c:5412 +#: access/transam/xlog.c:5543 #, c-format msgid "recovery target timeline %u does not exist" msgstr "recovery_target_timeline %u existiert nicht" -#: access/transam/xlog.c:5533 +#: access/transam/xlog.c:5664 #, c-format msgid "archive recovery complete" msgstr "Wiederherstellung aus Archiv abgeschlossen" -#: access/transam/xlog.c:5592 access/transam/xlog.c:5858 +#: access/transam/xlog.c:5723 access/transam/xlog.c:5989 #, c-format msgid "recovery stopping after reaching consistency" msgstr "Wiederherstellung beendet nachdem Konsistenz erreicht wurde" -#: access/transam/xlog.c:5613 +#: access/transam/xlog.c:5744 #, c-format -msgid "recovery stopping before WAL position (LSN) \"%X/%X\"" +msgid "recovery stopping before WAL location (LSN) \"%X/%X\"" msgstr "Wiederherstellung beendet vor WAL-Position (LSN) »%X/%X«" -#: access/transam/xlog.c:5699 +#: access/transam/xlog.c:5830 #, c-format msgid "recovery stopping before commit of transaction %u, time %s" msgstr "Wiederherstellung beendet vor Commit der Transaktion %u, Zeit %s" -#: access/transam/xlog.c:5706 +#: access/transam/xlog.c:5837 #, c-format msgid "recovery stopping before abort of transaction %u, time %s" msgstr "Wiederherstellung beendet vor Abbruch der Transaktion %u, Zeit %s" -#: access/transam/xlog.c:5752 +#: access/transam/xlog.c:5883 #, c-format msgid "recovery stopping at restore point \"%s\", time %s" msgstr "Wiederherstellung beendet bei Restore-Punkt »%s«, Zeit %s" -#: access/transam/xlog.c:5770 +#: access/transam/xlog.c:5901 #, c-format -msgid "recovery stopping after WAL position (LSN) \"%X/%X\"" +msgid "recovery stopping after WAL location (LSN) \"%X/%X\"" msgstr "Wiederherstellung beendet nach WAL-Position (LSN) »%X/%X«" -#: access/transam/xlog.c:5838 +#: access/transam/xlog.c:5969 #, c-format msgid "recovery stopping after commit of transaction %u, time %s" msgstr "Wiederherstellung beendet nach Commit der Transaktion %u, Zeit %s" -#: access/transam/xlog.c:5846 +#: access/transam/xlog.c:5977 #, c-format msgid "recovery stopping after abort of transaction %u, time %s" msgstr "Wiederherstellung beendet nach Abbruch der Transaktion %u, Zeit %s" -#: access/transam/xlog.c:5886 +#: access/transam/xlog.c:6017 #, c-format msgid "recovery has paused" msgstr "Wiederherstellung wurde pausiert" -#: access/transam/xlog.c:5887 +#: access/transam/xlog.c:6018 #, c-format msgid "Execute pg_wal_replay_resume() to continue." msgstr "Führen Sie pg_wal_replay_resume() aus um fortzusetzen." -#: access/transam/xlog.c:6095 +#: access/transam/xlog.c:6226 #, c-format msgid "hot standby is not possible because %s = %d is a lower setting than on the master server (its value was %d)" msgstr "Hot Standby ist nicht möglich, weil %s = %d eine niedrigere Einstellung als auf dem Masterserver ist (Wert dort war %d)" -#: access/transam/xlog.c:6121 +#: access/transam/xlog.c:6252 #, c-format msgid "WAL was generated with wal_level=minimal, data may be missing" msgstr "WAL wurde mit wal_level=minimal erzeugt, eventuell fehlen Daten" -#: access/transam/xlog.c:6122 +#: access/transam/xlog.c:6253 #, c-format msgid "This happens if you temporarily set wal_level=minimal without taking a new base backup." msgstr "Das passiert, wenn vorübergehend wal_level=minimal gesetzt wurde, ohne ein neues Base-Backup zu erzeugen." -#: access/transam/xlog.c:6133 +#: access/transam/xlog.c:6264 #, c-format msgid "hot standby is not possible because wal_level was not set to \"replica\" or higher on the master server" msgstr "Hot Standby ist nicht möglich, weil wal_level auf dem Masterserver nicht auf »replica« oder höher gesetzt wurde" -#: access/transam/xlog.c:6134 +#: access/transam/xlog.c:6265 #, c-format msgid "Either set wal_level to \"replica\" on the master, or turn off hot_standby here." msgstr "Setzen Sie entweder wal_level auf »replica« auf dem Master oder schalten Sie hot_standby hier aus." -#: access/transam/xlog.c:6191 +#: access/transam/xlog.c:6317 #, c-format msgid "control file contains invalid data" msgstr "Kontrolldatei enthält ungültige Daten" -#: access/transam/xlog.c:6197 +#: access/transam/xlog.c:6323 #, c-format msgid "database system was shut down at %s" msgstr "Datenbanksystem wurde am %s heruntergefahren" -#: access/transam/xlog.c:6202 +#: access/transam/xlog.c:6328 #, c-format msgid "database system was shut down in recovery at %s" msgstr "Datenbanksystem wurde während der Wiederherstellung am %s heruntergefahren" -#: access/transam/xlog.c:6206 +#: access/transam/xlog.c:6332 #, c-format msgid "database system shutdown was interrupted; last known up at %s" msgstr "Datenbanksystem wurde beim Herunterfahren unterbrochen; letzte bekannte Aktion am %s" -#: access/transam/xlog.c:6210 +#: access/transam/xlog.c:6336 #, c-format msgid "database system was interrupted while in recovery at %s" msgstr "Datenbanksystem wurde während der Wiederherstellung am %s unterbrochen" -#: access/transam/xlog.c:6212 +#: access/transam/xlog.c:6338 #, c-format msgid "This probably means that some data is corrupted and you will have to use the last backup for recovery." msgstr "Das bedeutet wahrscheinlich, dass einige Daten verfälscht sind und Sie die letzte Datensicherung zur Wiederherstellung verwenden müssen." -#: access/transam/xlog.c:6216 +#: access/transam/xlog.c:6342 #, c-format msgid "database system was interrupted while in recovery at log time %s" msgstr "Datenbanksystem wurde während der Wiederherstellung bei Logzeit %s unterbrochen" -#: access/transam/xlog.c:6218 +#: access/transam/xlog.c:6344 #, c-format msgid "If this has occurred more than once some data might be corrupted and you might need to choose an earlier recovery target." msgstr "Wenn dies mehr als einmal vorgekommen ist, dann sind einige Daten möglicherweise verfälscht und Sie müssen ein früheres Wiederherstellungsziel wählen." -#: access/transam/xlog.c:6222 +#: access/transam/xlog.c:6348 #, c-format msgid "database system was interrupted; last known up at %s" msgstr "Datenbanksystem wurde unterbrochen; letzte bekannte Aktion am %s" -#: access/transam/xlog.c:6278 +#: access/transam/xlog.c:6404 #, c-format msgid "entering standby mode" msgstr "Standby-Modus eingeschaltet" -#: access/transam/xlog.c:6281 +#: access/transam/xlog.c:6407 #, c-format msgid "starting point-in-time recovery to XID %u" msgstr "starte Point-in-Time-Recovery bis XID %u" -#: access/transam/xlog.c:6285 +#: access/transam/xlog.c:6411 #, c-format msgid "starting point-in-time recovery to %s" msgstr "starte Point-in-Time-Recovery bis %s" -#: access/transam/xlog.c:6289 +#: access/transam/xlog.c:6415 #, c-format msgid "starting point-in-time recovery to \"%s\"" msgstr "starte Point-in-Time-Recovery bis »%s«" -#: access/transam/xlog.c:6293 +#: access/transam/xlog.c:6419 #, c-format -msgid "starting point-in-time recovery to WAL position (LSN) \"%X/%X\"" +msgid "starting point-in-time recovery to WAL location (LSN) \"%X/%X\"" msgstr "starte Point-in-Time-Recovery bis WAL-Position (LSN) »%X/%X«" -#: access/transam/xlog.c:6298 +#: access/transam/xlog.c:6424 #, c-format msgid "starting point-in-time recovery to earliest consistent point" msgstr "starte Point-in-Time-Recovery bis zum frühesten konsistenten Punkt" -#: access/transam/xlog.c:6301 +#: access/transam/xlog.c:6427 #, c-format msgid "starting archive recovery" msgstr "starte Wiederherstellung aus Archiv" -#: access/transam/xlog.c:6352 access/transam/xlog.c:6480 +#: access/transam/xlog.c:6478 access/transam/xlog.c:6603 #, c-format msgid "checkpoint record is at %X/%X" msgstr "Checkpoint-Eintrag ist bei %X/%X" -#: access/transam/xlog.c:6366 +#: access/transam/xlog.c:6492 #, c-format msgid "could not find redo location referenced by checkpoint record" msgstr "konnte die vom Checkpoint-Datensatz referenzierte Redo-Position nicht finden" -#: access/transam/xlog.c:6367 access/transam/xlog.c:6374 +#: access/transam/xlog.c:6493 access/transam/xlog.c:6500 #, c-format msgid "If you are not restoring from a backup, try removing the file \"%s/backup_label\"." msgstr "Wenn Sie gerade keine Sicherung wiederherstellen, versuchen Sie, die Datei »%s/backup_label« zu löschen." -#: access/transam/xlog.c:6373 +#: access/transam/xlog.c:6499 #, c-format msgid "could not locate required checkpoint record" msgstr "konnte den nötigen Checkpoint-Datensatz nicht finden" -#: access/transam/xlog.c:6399 commands/tablespace.c:639 +#: access/transam/xlog.c:6525 commands/tablespace.c:641 #, c-format msgid "could not create symbolic link \"%s\": %m" msgstr "konnte symbolische Verknüpfung »%s« nicht erstellen: %m" -#: access/transam/xlog.c:6431 access/transam/xlog.c:6437 +#: access/transam/xlog.c:6557 access/transam/xlog.c:6563 #, c-format msgid "ignoring file \"%s\" because no file \"%s\" exists" msgstr "ignoriere Datei »%s«, weil keine Datei »%s« existiert" -#: access/transam/xlog.c:6433 access/transam/xlog.c:11310 +#: access/transam/xlog.c:6559 access/transam/xlog.c:11536 #, c-format msgid "File \"%s\" was renamed to \"%s\"." msgstr "Datei »%s« wurde in »%s« umbenannt." -#: access/transam/xlog.c:6439 +#: access/transam/xlog.c:6565 #, c-format msgid "Could not rename file \"%s\" to \"%s\": %m." msgstr "Konnte Datei »%s« nicht in »%s« umbenennen: %m." -#: access/transam/xlog.c:6490 access/transam/xlog.c:6505 +#: access/transam/xlog.c:6615 #, c-format msgid "could not locate a valid checkpoint record" msgstr "konnte keinen gültigen Checkpoint-Datensatz finden" -#: access/transam/xlog.c:6499 -#, c-format -msgid "using previous checkpoint record at %X/%X" -msgstr "verwende vorherigen Checkpoint-Eintrag bei %X/%X" - -#: access/transam/xlog.c:6543 +#: access/transam/xlog.c:6653 #, c-format msgid "requested timeline %u is not a child of this server's history" msgstr "angeforderte Zeitleiste %u ist kein Kind der History dieses Servers" -#: access/transam/xlog.c:6545 +#: access/transam/xlog.c:6655 #, c-format msgid "Latest checkpoint is at %X/%X on timeline %u, but in the history of the requested timeline, the server forked off from that timeline at %X/%X." msgstr "Neuester Checkpoint ist bei %X/%X auf Zeitleiste %u, aber in der History der angeforderten Zeitleiste zweigte der Server von dieser Zeitleiste bei %X/%X ab." -#: access/transam/xlog.c:6561 +#: access/transam/xlog.c:6671 #, c-format msgid "requested timeline %u does not contain minimum recovery point %X/%X on timeline %u" msgstr "angeforderte Zeitleiste %u enthält nicht den minimalen Wiederherstellungspunkt %X/%X auf Zeitleiste %u" -#: access/transam/xlog.c:6592 +#: access/transam/xlog.c:6702 #, c-format msgid "invalid next transaction ID" msgstr "ungültige nächste Transaktions-ID" -#: access/transam/xlog.c:6675 +#: access/transam/xlog.c:6796 #, c-format msgid "invalid redo in checkpoint record" msgstr "ungültiges Redo im Checkpoint-Datensatz" -#: access/transam/xlog.c:6686 +#: access/transam/xlog.c:6807 #, c-format msgid "invalid redo record in shutdown checkpoint" msgstr "ungültiger Redo-Datensatz im Shutdown-Checkpoint" -#: access/transam/xlog.c:6714 +#: access/transam/xlog.c:6835 #, c-format msgid "database system was not properly shut down; automatic recovery in progress" msgstr "Datenbanksystem wurde nicht richtig heruntergefahren; automatische Wiederherstellung läuft" -#: access/transam/xlog.c:6718 +#: access/transam/xlog.c:6839 #, c-format msgid "crash recovery starts in timeline %u and has target timeline %u" msgstr "Wiederherstellung nach Absturz beginnt in Zeitleiste %u und hat Zielzeitleiste %u" -#: access/transam/xlog.c:6762 +#: access/transam/xlog.c:6882 #, c-format msgid "backup_label contains data inconsistent with control file" msgstr "Daten in backup_label stimmen nicht mit Kontrolldatei überein" -#: access/transam/xlog.c:6763 +#: access/transam/xlog.c:6883 #, c-format msgid "This means that the backup is corrupted and you will have to use another backup for recovery." msgstr "Das bedeutet, dass die Datensicherung verfälscht ist und Sie eine andere Datensicherung zur Wiederherstellung verwenden werden müssen." -#: access/transam/xlog.c:6837 +#: access/transam/xlog.c:6957 #, c-format msgid "initializing for hot standby" msgstr "initialisiere für Hot Standby" -#: access/transam/xlog.c:6969 +#: access/transam/xlog.c:7089 #, c-format msgid "redo starts at %X/%X" msgstr "Redo beginnt bei %X/%X" -#: access/transam/xlog.c:7203 +#: access/transam/xlog.c:7323 #, c-format msgid "requested recovery stop point is before consistent recovery point" msgstr "angeforderter Recovery-Endpunkt ist vor konsistentem Recovery-Punkt" -#: access/transam/xlog.c:7241 +#: access/transam/xlog.c:7361 #, c-format msgid "redo done at %X/%X" msgstr "Redo fertig bei %X/%X" -#: access/transam/xlog.c:7246 access/transam/xlog.c:9244 +#: access/transam/xlog.c:7366 #, c-format msgid "last completed transaction was at log time %s" msgstr "letzte vollständige Transaktion war bei Logzeit %s" -#: access/transam/xlog.c:7255 +#: access/transam/xlog.c:7375 #, c-format msgid "redo is not required" msgstr "Redo nicht nötig" -#: access/transam/xlog.c:7330 access/transam/xlog.c:7334 +#: access/transam/xlog.c:7450 access/transam/xlog.c:7454 #, c-format msgid "WAL ends before end of online backup" msgstr "WAL endet vor dem Ende der Online-Sicherung" -#: access/transam/xlog.c:7331 +#: access/transam/xlog.c:7451 #, c-format msgid "All WAL generated while online backup was taken must be available at recovery." msgstr "Der komplette WAL, der während der Online-Sicherung erzeugt wurde, muss bei der Wiederherstellung verfügbar sein." -#: access/transam/xlog.c:7335 +#: access/transam/xlog.c:7455 #, c-format msgid "Online backup started with pg_start_backup() must be ended with pg_stop_backup(), and all WAL up to that point must be available at recovery." msgstr "Die mit pg_start_backup() begonnene Online-Sicherung muss mit pg_stop_backup() beendet werden und der ganze WAL bis zu diesem Punkt muss bei der Wiederherstellung verfügbar sein." -#: access/transam/xlog.c:7338 +#: access/transam/xlog.c:7458 #, c-format msgid "WAL ends before consistent recovery point" msgstr "WAL endet vor einem konsistenten Wiederherstellungspunkt" -#: access/transam/xlog.c:7365 +#: access/transam/xlog.c:7485 #, c-format msgid "selected new timeline ID: %u" msgstr "gewählte neue Zeitleisten-ID: %u" -#: access/transam/xlog.c:7794 +#: access/transam/xlog.c:7914 #, c-format msgid "consistent recovery state reached at %X/%X" msgstr "konsistenter Wiederherstellungszustand erreicht bei %X/%X" -#: access/transam/xlog.c:7986 +#: access/transam/xlog.c:8106 #, c-format msgid "invalid primary checkpoint link in control file" msgstr "ungültige primäre Checkpoint-Verknüpfung in Kontrolldatei" -#: access/transam/xlog.c:7990 -#, c-format -msgid "invalid secondary checkpoint link in control file" -msgstr "ungültige sekundäre Checkpoint-Verknüpfung in Kontrolldatei" - -#: access/transam/xlog.c:7994 +#: access/transam/xlog.c:8110 #, c-format msgid "invalid checkpoint link in backup_label file" msgstr "ungültige Checkpoint-Verknüpfung in backup_label-Datei" -#: access/transam/xlog.c:8011 +#: access/transam/xlog.c:8127 #, c-format msgid "invalid primary checkpoint record" msgstr "ungültiger primärer Checkpoint-Datensatz" -#: access/transam/xlog.c:8015 -#, c-format -msgid "invalid secondary checkpoint record" -msgstr "ungültiger sekundärer Checkpoint-Datensatz" - -#: access/transam/xlog.c:8019 +#: access/transam/xlog.c:8131 #, c-format msgid "invalid checkpoint record" msgstr "ungültiger Checkpoint-Datensatz" -#: access/transam/xlog.c:8030 +#: access/transam/xlog.c:8142 #, c-format msgid "invalid resource manager ID in primary checkpoint record" msgstr "ungültige Resource-Manager-ID im primären Checkpoint-Datensatz" -#: access/transam/xlog.c:8034 -#, c-format -msgid "invalid resource manager ID in secondary checkpoint record" -msgstr "ungültige Resource-Manager-ID im sekundären Checkpoint-Datensatz" - -#: access/transam/xlog.c:8038 +#: access/transam/xlog.c:8146 #, c-format msgid "invalid resource manager ID in checkpoint record" msgstr "ungültige Resource-Manager-ID im Checkpoint-Datensatz" -#: access/transam/xlog.c:8051 +#: access/transam/xlog.c:8159 #, c-format msgid "invalid xl_info in primary checkpoint record" msgstr "ungültige xl_info im primären Checkpoint-Datensatz" -#: access/transam/xlog.c:8055 -#, c-format -msgid "invalid xl_info in secondary checkpoint record" -msgstr "ungültige xl_info im sekundären Checkpoint-Datensatz" - -#: access/transam/xlog.c:8059 +#: access/transam/xlog.c:8163 #, c-format msgid "invalid xl_info in checkpoint record" msgstr "ungültige xl_info im Checkpoint-Datensatz" -#: access/transam/xlog.c:8070 +#: access/transam/xlog.c:8174 #, c-format msgid "invalid length of primary checkpoint record" msgstr "ungültige Länge des primären Checkpoint-Datensatzes" -#: access/transam/xlog.c:8074 -#, c-format -msgid "invalid length of secondary checkpoint record" -msgstr "ungültige Länge des sekundären Checkpoint-Datensatzes" - -#: access/transam/xlog.c:8078 +#: access/transam/xlog.c:8178 #, c-format msgid "invalid length of checkpoint record" msgstr "ungültige Länge des Checkpoint-Datensatzes" -#: access/transam/xlog.c:8281 +#: access/transam/xlog.c:8384 #, c-format msgid "shutting down" msgstr "fahre herunter" -#: access/transam/xlog.c:8589 -#, fuzzy, c-format -#| msgid "checkpoint request failed" -msgid "checkpoint skipped due to an idle system" -msgstr "Checkpoint-Anforderung fehlgeschlagen" +#: access/transam/xlog.c:8703 +#, c-format +msgid "checkpoint skipped because system is idle" +msgstr "Checkpoint übersprungen weil das System inaktiv ist" -#: access/transam/xlog.c:8789 +#: access/transam/xlog.c:8908 #, c-format -msgid "concurrent transaction log activity while database system is shutting down" -msgstr "gleichzeitige Transaktionslog-Aktivität während das Datenbanksystem herunterfährt" +msgid "concurrent write-ahead log activity while database system is shutting down" +msgstr "gleichzeitige Write-Ahead-Log-Aktivität während das Datenbanksystem herunterfährt" -#: access/transam/xlog.c:9043 +#: access/transam/xlog.c:9161 #, c-format msgid "skipping restartpoint, recovery has already ended" msgstr "Restart-Punkt übersprungen, Wiederherstellung ist bereits beendet" -#: access/transam/xlog.c:9066 +#: access/transam/xlog.c:9184 #, c-format msgid "skipping restartpoint, already performed at %X/%X" msgstr "Restart-Punkt wird übersprungen, schon bei %X/%X erledigt" -#: access/transam/xlog.c:9242 +#: access/transam/xlog.c:9359 #, c-format msgid "recovery restart point at %X/%X" msgstr "Recovery-Restart-Punkt bei %X/%X" -#: access/transam/xlog.c:9378 +#: access/transam/xlog.c:9361 +#, c-format +msgid "Last completed transaction was at log time %s." +msgstr "Die letzte vollständige Transaktion war bei Logzeit %s." + +#: access/transam/xlog.c:9495 #, c-format msgid "restore point \"%s\" created at %X/%X" msgstr "Restore-Punkt »%s« erzeugt bei %X/%X" -#: access/transam/xlog.c:9508 +#: access/transam/xlog.c:9625 #, c-format msgid "unexpected previous timeline ID %u (current timeline ID %u) in checkpoint record" msgstr "unerwartete vorherige Zeitleisten-ID %u (aktuelle Zeitleisten-ID %u) im Checkpoint-Datensatz" -#: access/transam/xlog.c:9517 +#: access/transam/xlog.c:9634 #, c-format msgid "unexpected timeline ID %u (after %u) in checkpoint record" msgstr "unerwartete Zeitleisten-ID %u (nach %u) im Checkpoint-Datensatz" -#: access/transam/xlog.c:9533 +#: access/transam/xlog.c:9650 #, c-format msgid "unexpected timeline ID %u in checkpoint record, before reaching minimum recovery point %X/%X on timeline %u" msgstr "unerwartete Zeitleisten-ID %u in Checkpoint-Datensatz, bevor der minimale Wiederherstellungspunkt %X/%X auf Zeitleiste %u erreicht wurde" -#: access/transam/xlog.c:9604 +#: access/transam/xlog.c:9726 #, c-format msgid "online backup was canceled, recovery cannot continue" msgstr "Online-Sicherung wurde storniert, Wiederherstellung kann nicht fortgesetzt werden" -#: access/transam/xlog.c:9660 access/transam/xlog.c:9707 -#: access/transam/xlog.c:9730 +#: access/transam/xlog.c:9782 access/transam/xlog.c:9838 +#: access/transam/xlog.c:9861 #, c-format msgid "unexpected timeline ID %u (should be %u) in checkpoint record" msgstr "unerwartete Zeitleisten-ID %u (sollte %u sein) im Checkpoint-Datensatz" -#: access/transam/xlog.c:10005 +#: access/transam/xlog.c:10137 #, c-format msgid "could not fsync log segment %s: %m" msgstr "konnte Logsegment %s nicht fsyncen: %m" -#: access/transam/xlog.c:10029 +#: access/transam/xlog.c:10162 #, c-format msgid "could not fsync log file %s: %m" msgstr "konnte Logdatei %s nicht fsyncen: %m" -#: access/transam/xlog.c:10037 +#: access/transam/xlog.c:10170 #, c-format msgid "could not fsync write-through log file %s: %m" msgstr "konnte Write-Through-Logdatei %s nicht fsyncen: %m" -#: access/transam/xlog.c:10046 +#: access/transam/xlog.c:10179 #, c-format msgid "could not fdatasync log file %s: %m" msgstr "konnte Logdatei %s nicht fdatasyncen: %m" -#: access/transam/xlog.c:10137 access/transam/xlog.c:10641 -#: access/transam/xlogfuncs.c:293 access/transam/xlogfuncs.c:320 -#: access/transam/xlogfuncs.c:359 access/transam/xlogfuncs.c:380 -#: access/transam/xlogfuncs.c:401 +#: access/transam/xlog.c:10270 access/transam/xlog.c:10797 +#: access/transam/xlogfuncs.c:287 access/transam/xlogfuncs.c:314 +#: access/transam/xlogfuncs.c:353 access/transam/xlogfuncs.c:374 +#: access/transam/xlogfuncs.c:395 #, c-format msgid "WAL control functions cannot be executed during recovery." msgstr "Während der Wiederherstellung können keine WAL-Kontrollfunktionen ausgeführt werden." -#: access/transam/xlog.c:10146 access/transam/xlog.c:10650 +#: access/transam/xlog.c:10279 access/transam/xlog.c:10806 #, c-format msgid "WAL level not sufficient for making an online backup" msgstr "WAL-Level nicht ausreichend, um Online-Sicherung durchzuführen" -#: access/transam/xlog.c:10147 access/transam/xlog.c:10651 -#: access/transam/xlogfuncs.c:326 +#: access/transam/xlog.c:10280 access/transam/xlog.c:10807 +#: access/transam/xlogfuncs.c:320 #, c-format msgid "wal_level must be set to \"replica\" or \"logical\" at server start." msgstr "wal_level muss beim Serverstart auf »replica« oder »logical« gesetzt werden." -#: access/transam/xlog.c:10152 +#: access/transam/xlog.c:10285 #, c-format msgid "backup label too long (max %d bytes)" msgstr "Backup-Label zu lang (maximal %d Bytes)" -#: access/transam/xlog.c:10189 access/transam/xlog.c:10461 -#: access/transam/xlog.c:10499 +#: access/transam/xlog.c:10322 access/transam/xlog.c:10598 +#: access/transam/xlog.c:10636 #, c-format msgid "a backup is already in progress" msgstr "ein Backup läuft bereits" -#: access/transam/xlog.c:10190 +#: access/transam/xlog.c:10323 #, c-format msgid "Run pg_stop_backup() and try again." msgstr "Führen Sie pg_stop_backup() aus und versuchen Sie es nochmal." -#: access/transam/xlog.c:10285 +#: access/transam/xlog.c:10419 #, c-format msgid "WAL generated with full_page_writes=off was replayed since last restartpoint" msgstr "mit full_page_writes=off erzeugtes WAL wurde seit dem letzten Restart-Punkt zurückgespielt" -#: access/transam/xlog.c:10287 access/transam/xlog.c:10832 +#: access/transam/xlog.c:10421 access/transam/xlog.c:11002 #, c-format msgid "This means that the backup being taken on the standby is corrupt and should not be used. Enable full_page_writes and run CHECKPOINT on the master, and then try an online backup again." msgstr "Das bedeutet, dass die aktuelle Datensicherung auf dem Standby-Server verfälscht ist und nicht verwendet werden sollte. Schalten Sie full_page_writes ein, führen Sie CHECKPOINT aus und versuchen Sie dann die Online-Sicherung erneut." -#: access/transam/xlog.c:10354 replication/basebackup.c:1096 -#: utils/adt/misc.c:497 +#: access/transam/xlog.c:10489 replication/basebackup.c:1222 +#: utils/adt/misc.c:517 #, c-format msgid "could not read symbolic link \"%s\": %m" msgstr "konnte symbolische Verknüpfung »%s« nicht lesen: %m" -#: access/transam/xlog.c:10361 replication/basebackup.c:1101 -#: utils/adt/misc.c:502 +#: access/transam/xlog.c:10496 replication/basebackup.c:1227 +#: utils/adt/misc.c:522 #, c-format msgid "symbolic link \"%s\" target is too long" msgstr "Ziel für symbolische Verknüpfung »%s« ist zu lang" -#: access/transam/xlog.c:10414 commands/tablespace.c:389 -#: commands/tablespace.c:551 replication/basebackup.c:1116 -#: utils/adt/misc.c:510 +#: access/transam/xlog.c:10548 commands/tablespace.c:391 +#: commands/tablespace.c:553 replication/basebackup.c:1242 utils/adt/misc.c:530 #, c-format msgid "tablespaces are not supported on this platform" msgstr "Tablespaces werden auf dieser Plattform nicht unterstützt" -#: access/transam/xlog.c:10455 access/transam/xlog.c:10493 -#: access/transam/xlog.c:10689 access/transam/xlogarchive.c:105 -#: access/transam/xlogarchive.c:264 commands/copy.c:1864 commands/copy.c:3038 -#: commands/extension.c:3312 commands/tablespace.c:780 -#: commands/tablespace.c:871 guc-file.l:1001 replication/basebackup.c:480 -#: replication/basebackup.c:548 replication/logical/snapbuild.c:1490 -#: storage/file/copydir.c:72 storage/file/copydir.c:115 storage/file/fd.c:2903 -#: storage/file/fd.c:2995 utils/adt/dbsize.c:69 utils/adt/dbsize.c:219 -#: utils/adt/dbsize.c:299 utils/adt/genfile.c:114 utils/adt/genfile.c:333 +#: access/transam/xlog.c:10592 access/transam/xlog.c:10630 +#: access/transam/xlog.c:10845 access/transam/xlogarchive.c:105 +#: access/transam/xlogarchive.c:265 commands/copy.c:1872 commands/copy.c:3157 +#: commands/extension.c:3319 commands/tablespace.c:782 +#: commands/tablespace.c:873 guc-file.l:1004 replication/basebackup.c:513 +#: replication/basebackup.c:583 replication/logical/snapbuild.c:1518 +#: storage/file/copydir.c:68 storage/file/copydir.c:107 storage/file/fd.c:1732 +#: storage/file/fd.c:3098 storage/file/fd.c:3277 storage/file/fd.c:3362 +#: utils/adt/dbsize.c:70 utils/adt/dbsize.c:222 utils/adt/dbsize.c:302 +#: utils/adt/genfile.c:131 utils/adt/genfile.c:382 #, c-format msgid "could not stat file \"%s\": %m" msgstr "konnte »stat« für Datei »%s« nicht ausführen: %m" -#: access/transam/xlog.c:10462 access/transam/xlog.c:10500 +#: access/transam/xlog.c:10599 access/transam/xlog.c:10637 #, c-format msgid "If you're sure there is no backup in progress, remove file \"%s\" and try again." msgstr "Wenn Sie sicher sind, dass noch kein Backup läuft, entfernen Sie die Datei »%s« und versuchen Sie es noch einmal." -#: access/transam/xlog.c:10479 access/transam/xlog.c:10517 -#: access/transam/xlog.c:10893 postmaster/syslogger.c:1391 -#: postmaster/syslogger.c:1404 +#: access/transam/xlog.c:10616 access/transam/xlog.c:10654 +#: access/transam/xlog.c:11065 postmaster/syslogger.c:1392 +#: postmaster/syslogger.c:1405 #, c-format msgid "could not write file \"%s\": %m" msgstr "konnte Datei »%s« nicht schreiben: %m" -#: access/transam/xlog.c:10666 +#: access/transam/xlog.c:10822 #, c-format msgid "exclusive backup not in progress" msgstr "es läuft kein exklusives Backup" -#: access/transam/xlog.c:10693 +#: access/transam/xlog.c:10849 #, c-format msgid "a backup is not in progress" msgstr "es läuft kein Backup" -#: access/transam/xlog.c:10767 access/transam/xlog.c:10780 -#: access/transam/xlog.c:11120 access/transam/xlog.c:11126 -#: access/transam/xlog.c:11210 access/transam/xlogfuncs.c:694 +#: access/transam/xlog.c:10935 access/transam/xlog.c:10948 +#: access/transam/xlog.c:11309 access/transam/xlog.c:11315 +#: access/transam/xlog.c:11363 access/transam/xlog.c:11436 +#: access/transam/xlogfuncs.c:688 #, c-format msgid "invalid data in file \"%s\"" msgstr "ungültige Daten in Datei »%s«" -#: access/transam/xlog.c:10784 replication/basebackup.c:994 +#: access/transam/xlog.c:10952 replication/basebackup.c:1079 #, c-format msgid "the standby was promoted during online backup" msgstr "der Standby-Server wurde während der Online-Sicherung zum Primärserver befördert" -#: access/transam/xlog.c:10785 replication/basebackup.c:995 +#: access/transam/xlog.c:10953 replication/basebackup.c:1080 #, c-format msgid "This means that the backup being taken is corrupt and should not be used. Try taking another online backup." msgstr "Das bedeutet, dass die aktuelle Online-Sicherung verfälscht ist und nicht verwendet werden sollte. Versuchen Sie, eine neue Online-Sicherung durchzuführen." -#: access/transam/xlog.c:10830 +#: access/transam/xlog.c:11000 #, c-format msgid "WAL generated with full_page_writes=off was replayed during online backup" msgstr "mit full_page_writes=off erzeugtes WAL wurde während der Online-Sicherung zurückgespielt" -#: access/transam/xlog.c:10942 +#: access/transam/xlog.c:11120 #, c-format msgid "pg_stop_backup cleanup done, waiting for required WAL segments to be archived" msgstr "Aufräumen nach pg_stop_backup beendet, warte bis die benötigten WAL-Segmente archiviert sind" -#: access/transam/xlog.c:10952 +#: access/transam/xlog.c:11130 #, c-format msgid "pg_stop_backup still waiting for all required WAL segments to be archived (%d seconds elapsed)" msgstr "pg_stop_backup wartet immer noch, bis alle benötigten WAL-Segmente archiviert sind (%d Sekunden abgelaufen)" -#: access/transam/xlog.c:10954 +#: access/transam/xlog.c:11132 #, c-format msgid "Check that your archive_command is executing properly. pg_stop_backup can be canceled safely, but the database backup will not be usable without all the WAL segments." msgstr "Prüfen Sie, ob das archive_command korrekt ausgeführt wird. pg_stop_backup kann gefahrlos abgebrochen werden, aber die Datenbanksicherung wird ohne die fehlenden WAL-Segmente nicht benutzbar sein." -#: access/transam/xlog.c:10961 +#: access/transam/xlog.c:11139 #, c-format msgid "pg_stop_backup complete, all required WAL segments have been archived" msgstr "pg_stop_backup abgeschlossen, alle benötigten WAL-Segmente wurden archiviert" -#: access/transam/xlog.c:10965 +#: access/transam/xlog.c:11143 #, c-format msgid "WAL archiving is not enabled; you must ensure that all required WAL segments are copied through other means to complete the backup" msgstr "WAL-Archivierung ist nicht eingeschaltet; Sie müssen dafür sorgen, dass alle benötigten WAL-Segmente auf andere Art kopiert werden, um die Sicherung abzuschließen" +#: access/transam/xlog.c:11346 +#, fuzzy, c-format +#| msgid "could not seek in file \"%s\": %m" +msgid "backup time %s in file \"%s\"" +msgstr "konnte Positionszeiger in Datei »%s« nicht setzen: %m" + +#: access/transam/xlog.c:11351 +#, fuzzy, c-format +#| msgid "could not read block %u in file \"%s\": %m" +msgid "backup label %s in file \"%s\"" +msgstr "konnte Block %u in Datei »%s« nicht lesen: %m" + +#: access/transam/xlog.c:11364 +#, c-format +msgid "Timeline ID parsed is %u, but expected %u" +msgstr "" + +#: access/transam/xlog.c:11368 +#, fuzzy, c-format +#| msgid "could not write block %u in file \"%s\": %m" +msgid "backup timeline %u in file \"%s\"" +msgstr "konnte Block %u in Datei »%s« nicht schreiben: %m" + #. translator: %s is a WAL record description -#: access/transam/xlog.c:11250 +#: access/transam/xlog.c:11476 #, c-format msgid "WAL redo at %X/%X for %s" msgstr "WAL-Redo bei %X/%X für %s" -#: access/transam/xlog.c:11299 +#: access/transam/xlog.c:11525 #, c-format msgid "online backup mode was not canceled" msgstr "Online-Sicherungsmodus wurde nicht storniert" -#: access/transam/xlog.c:11300 +#: access/transam/xlog.c:11526 #, c-format msgid "File \"%s\" could not be renamed to \"%s\": %m." msgstr "Konnte Datei »%s« nicht in »%s« umbenennen: %m." -#: access/transam/xlog.c:11309 access/transam/xlog.c:11321 -#: access/transam/xlog.c:11331 +#: access/transam/xlog.c:11535 access/transam/xlog.c:11547 +#: access/transam/xlog.c:11557 #, c-format msgid "online backup mode canceled" msgstr "Online-Sicherungsmodus storniert" -#: access/transam/xlog.c:11322 +#: access/transam/xlog.c:11548 #, c-format msgid "Files \"%s\" and \"%s\" were renamed to \"%s\" and \"%s\", respectively." msgstr "Dateien »%s« und »%s« wurden in »%s« und »%s« umbenannt." -#: access/transam/xlog.c:11332 +#: access/transam/xlog.c:11558 #, c-format msgid "File \"%s\" was renamed to \"%s\", but file \"%s\" could not be renamed to \"%s\": %m." msgstr "Datei »%s« wurde in »%s« umbenannt, aber Datei »%s« konnte nicht in »%s« umbenannt werden: %m." -#: access/transam/xlog.c:11454 access/transam/xlogutils.c:719 -#: replication/walreceiver.c:1005 replication/walsender.c:2067 +#: access/transam/xlog.c:11682 access/transam/xlogutils.c:726 +#: replication/walreceiver.c:1025 replication/walsender.c:2413 #, c-format msgid "could not seek in log segment %s to offset %u: %m" msgstr "konnte Positionszeiger von Logsegment %s nicht auf %u setzen: %m" -#: access/transam/xlog.c:11466 +#: access/transam/xlog.c:11696 #, c-format msgid "could not read from log segment %s, offset %u: %m" msgstr "konnte nicht aus Logsegment %s, Position %u lesen: %m" -#: access/transam/xlog.c:11940 +#: access/transam/xlog.c:12225 #, c-format msgid "received promote request" msgstr "Anforderung zum Befördern empfangen" -#: access/transam/xlog.c:11953 +#: access/transam/xlog.c:12238 #, c-format msgid "trigger file found: %s" msgstr "Triggerdatei gefunden: %s" -#: access/transam/xlog.c:11962 +#: access/transam/xlog.c:12247 #, c-format msgid "could not stat trigger file \"%s\": %m" msgstr "konnte »stat« für Trigger-Datei »%s« nicht ausführen: %m" -#: access/transam/xlogarchive.c:243 +#: access/transam/xlogarchive.c:244 #, c-format msgid "archive file \"%s\" has wrong size: %lu instead of %lu" msgstr "Archivdatei »%s« hat falsche Größe: %lu statt %lu" -#: access/transam/xlogarchive.c:252 +#: access/transam/xlogarchive.c:253 #, c-format msgid "restored log file \"%s\" from archive" msgstr "Logdatei »%s« aus Archiv wiederhergestellt" -#: access/transam/xlogarchive.c:302 +#: access/transam/xlogarchive.c:303 #, c-format msgid "could not restore file \"%s\" from archive: %s" msgstr "konnte Datei »%s« nicht aus Archiv wiederherstellen: %s" @@ -2750,125 +2658,115 @@ msgstr "konnte Datei »%s« nicht aus Archiv wiederherstellen: %s" #. translator: First %s represents a recovery.conf parameter name like #. "recovery_end_command", the 2nd is the value of that parameter, the #. third an already translated error message. -#: access/transam/xlogarchive.c:414 +#: access/transam/xlogarchive.c:416 #, c-format msgid "%s \"%s\": %s" msgstr "%s »%s«: %s" -#: access/transam/xlogarchive.c:457 postmaster/syslogger.c:1415 -#: replication/logical/snapbuild.c:1618 replication/slot.c:517 -#: replication/slot.c:1029 replication/slot.c:1137 storage/file/fd.c:642 -#: storage/file/fd.c:700 utils/time/snapmgr.c:1298 +#: access/transam/xlogarchive.c:459 postmaster/syslogger.c:1416 +#: replication/logical/snapbuild.c:1644 replication/slot.c:590 +#: replication/slot.c:1191 replication/slot.c:1303 storage/file/fd.c:650 +#: storage/file/fd.c:745 utils/time/snapmgr.c:1318 #, c-format msgid "could not rename file \"%s\" to \"%s\": %m" msgstr "konnte Datei »%s« nicht in »%s« umbenennen: %m" -#: access/transam/xlogarchive.c:524 access/transam/xlogarchive.c:588 +#: access/transam/xlogarchive.c:526 access/transam/xlogarchive.c:590 #, c-format msgid "could not create archive status file \"%s\": %m" msgstr "konnte Archivstatusdatei »%s« nicht erstellen: %m" -#: access/transam/xlogarchive.c:532 access/transam/xlogarchive.c:596 +#: access/transam/xlogarchive.c:534 access/transam/xlogarchive.c:598 #, c-format msgid "could not write archive status file \"%s\": %m" msgstr "konnte Archivstatusdatei »%s« nicht schreiben: %m" -#: access/transam/xlogfuncs.c:57 +#: access/transam/xlogfuncs.c:54 #, c-format msgid "aborting backup due to backend exiting before pg_stop_backup was called" msgstr "Backup wird abgebrochen, weil Backend-Prozess beendete, bevor pg_stop_backup aufgerufen wurde" -#: access/transam/xlogfuncs.c:87 +#: access/transam/xlogfuncs.c:84 #, c-format msgid "a backup is already in progress in this session" msgstr "ein Backup läuft bereits in dieser Sitzung" -#: access/transam/xlogfuncs.c:93 commands/tablespace.c:703 -#: commands/tablespace.c:713 postmaster/postmaster.c:1434 -#: replication/basebackup.c:368 replication/basebackup.c:708 -#: storage/file/copydir.c:53 storage/file/copydir.c:96 storage/file/fd.c:2369 -#: storage/file/fd.c:2968 storage/ipc/dsm.c:301 utils/adt/genfile.c:439 -#: utils/adt/misc.c:410 utils/misc/tzparser.c:339 -#, c-format -msgid "could not open directory \"%s\": %m" -msgstr "konnte Verzeichnis »%s« nicht öffnen: %m" - -#: access/transam/xlogfuncs.c:154 access/transam/xlogfuncs.c:228 +#: access/transam/xlogfuncs.c:142 access/transam/xlogfuncs.c:224 #, c-format msgid "non-exclusive backup in progress" msgstr "es läuft ein nicht-exklusives Backup" -#: access/transam/xlogfuncs.c:155 access/transam/xlogfuncs.c:229 +#: access/transam/xlogfuncs.c:143 access/transam/xlogfuncs.c:225 #, c-format msgid "Did you mean to use pg_stop_backup('f')?" msgstr "Meinten Sie pg_stop_backup('f')?" -#: access/transam/xlogfuncs.c:199 commands/event_trigger.c:1450 -#: commands/event_trigger.c:2001 commands/extension.c:1889 -#: commands/extension.c:1998 commands/extension.c:2222 commands/prepare.c:718 -#: executor/execQual.c:1312 executor/functions.c:1024 foreign/foreign.c:488 -#: libpq/hba.c:2414 replication/logical/launcher.c:680 -#: replication/logical/logicalfuncs.c:176 replication/logical/origin.c:1384 -#: replication/slotfuncs.c:196 replication/walsender.c:2716 -#: utils/adt/jsonfuncs.c:1484 utils/adt/jsonfuncs.c:1614 -#: utils/adt/jsonfuncs.c:1802 utils/adt/jsonfuncs.c:1929 -#: utils/adt/jsonfuncs.c:2695 utils/adt/pgstatfuncs.c:454 -#: utils/adt/pgstatfuncs.c:555 utils/fmgr/funcapi.c:62 utils/misc/guc.c:8501 -#: utils/mmgr/portalmem.c:1053 +#: access/transam/xlogfuncs.c:195 commands/event_trigger.c:1464 +#: commands/event_trigger.c:2015 commands/extension.c:1895 +#: commands/extension.c:2004 commands/extension.c:2228 commands/prepare.c:722 +#: executor/execExpr.c:2208 executor/execSRF.c:715 executor/functions.c:1034 +#: foreign/foreign.c:488 libpq/hba.c:2600 replication/logical/launcher.c:1027 +#: replication/logical/logicalfuncs.c:176 replication/logical/origin.c:1442 +#: replication/slotfuncs.c:200 replication/walsender.c:3182 +#: utils/adt/jsonfuncs.c:1701 utils/adt/jsonfuncs.c:1832 +#: utils/adt/jsonfuncs.c:2020 utils/adt/jsonfuncs.c:2147 +#: utils/adt/jsonfuncs.c:3567 utils/adt/pgstatfuncs.c:457 +#: utils/adt/pgstatfuncs.c:558 utils/fmgr/funcapi.c:62 utils/misc/guc.c:8808 +#: utils/mmgr/portalmem.c:1124 #, c-format msgid "set-valued function called in context that cannot accept a set" msgstr "Funktion mit Mengenergebnis in einem Zusammenhang aufgerufen, der keine Mengenergebnisse verarbeiten kann" -#: access/transam/xlogfuncs.c:203 commands/event_trigger.c:1454 -#: commands/event_trigger.c:2005 commands/extension.c:1893 -#: commands/extension.c:2002 commands/extension.c:2226 commands/prepare.c:722 -#: foreign/foreign.c:493 libpq/hba.c:2418 replication/logical/launcher.c:684 -#: replication/logical/logicalfuncs.c:180 replication/logical/origin.c:1388 -#: replication/slotfuncs.c:200 replication/walsender.c:2720 -#: utils/adt/pgstatfuncs.c:458 utils/adt/pgstatfuncs.c:559 -#: utils/misc/guc.c:8505 utils/misc/pg_config.c:44 utils/mmgr/portalmem.c:1057 +#: access/transam/xlogfuncs.c:199 commands/event_trigger.c:1468 +#: commands/event_trigger.c:2019 commands/extension.c:1899 +#: commands/extension.c:2008 commands/extension.c:2232 commands/prepare.c:726 +#: foreign/foreign.c:493 libpq/hba.c:2604 replication/logical/launcher.c:1031 +#: replication/logical/logicalfuncs.c:180 replication/logical/origin.c:1446 +#: replication/slotfuncs.c:204 replication/walsender.c:3186 +#: utils/adt/pgstatfuncs.c:461 utils/adt/pgstatfuncs.c:562 +#: utils/misc/guc.c:8812 utils/misc/pg_config.c:43 utils/mmgr/portalmem.c:1128 #, c-format msgid "materialize mode required, but it is not allowed in this context" msgstr "Materialisierungsmodus wird benötigt, ist aber in diesem Zusammenhang nicht erlaubt" -#: access/transam/xlogfuncs.c:246 +#: access/transam/xlogfuncs.c:241 #, c-format msgid "non-exclusive backup is not in progress" msgstr "es läuft kein nicht-exklusives Backup" -#: access/transam/xlogfuncs.c:247 +#: access/transam/xlogfuncs.c:242 #, c-format msgid "Did you mean to use pg_stop_backup('t')?" msgstr "Meinten Sie pg_stop_backup('t')?" -#: access/transam/xlogfuncs.c:325 +#: access/transam/xlogfuncs.c:319 #, c-format msgid "WAL level not sufficient for creating a restore point" msgstr "WAL-Level nicht ausreichend, um Restore-Punkt anzulegen" -#: access/transam/xlogfuncs.c:333 +#: access/transam/xlogfuncs.c:327 #, c-format msgid "value too long for restore point (maximum %d characters)" msgstr "Wert zu lang für Restore-Punkt (maximal %d Zeichen)" -#: access/transam/xlogfuncs.c:471 +#: access/transam/xlogfuncs.c:465 #, c-format msgid "pg_walfile_name_offset() cannot be executed during recovery." msgstr "pg_walfile_name_offset() kann nicht während der Wiederherstellung ausgeführt werden." -#: access/transam/xlogfuncs.c:527 +#: access/transam/xlogfuncs.c:521 #, c-format msgid "pg_walfile_name() cannot be executed during recovery." msgstr "pg_walfile_name() kann nicht während der Wiederherstellung ausgeführt werden." -#: access/transam/xlogfuncs.c:547 access/transam/xlogfuncs.c:567 -#: access/transam/xlogfuncs.c:584 +#: access/transam/xlogfuncs.c:541 access/transam/xlogfuncs.c:561 +#: access/transam/xlogfuncs.c:578 #, c-format msgid "recovery is not in progress" msgstr "Wiederherstellung läuft nicht" -#: access/transam/xlogfuncs.c:548 access/transam/xlogfuncs.c:568 -#: access/transam/xlogfuncs.c:585 +#: access/transam/xlogfuncs.c:542 access/transam/xlogfuncs.c:562 +#: access/transam/xlogfuncs.c:579 #, c-format msgid "Recovery control functions can only be executed during recovery." msgstr "Wiederherstellungskontrollfunktionen können nur während der Wiederherstellung ausgeführt werden." @@ -2883,7 +2781,7 @@ msgstr "ungültiger Datensatz-Offset bei %X/%X" msgid "contrecord is requested by %X/%X" msgstr "Contrecord angefordert von %X/%X" -#: access/transam/xlogreader.c:325 access/transam/xlogreader.c:625 +#: access/transam/xlogreader.c:325 access/transam/xlogreader.c:623 #, c-format msgid "invalid record length at %X/%X: wanted %u, got %u" msgstr "ungültige Datensatzlänge bei %X/%X: %u erwartet, %u erhalten" @@ -2903,713 +2801,838 @@ msgstr "keine Contrecord-Flag bei %X/%X" msgid "invalid contrecord length %u at %X/%X" msgstr "ungültige Contrecord-Länge %u bei %X/%X" -#: access/transam/xlogreader.c:633 +#: access/transam/xlogreader.c:631 #, c-format msgid "invalid resource manager ID %u at %X/%X" msgstr "ungültige Resource-Manager-ID %u bei %X/%X" -#: access/transam/xlogreader.c:647 access/transam/xlogreader.c:664 +#: access/transam/xlogreader.c:645 access/transam/xlogreader.c:662 #, c-format msgid "record with incorrect prev-link %X/%X at %X/%X" msgstr "Datensatz mit falschem Prev-Link %X/%X bei %X/%X" -#: access/transam/xlogreader.c:701 +#: access/transam/xlogreader.c:699 #, c-format msgid "incorrect resource manager data checksum in record at %X/%X" msgstr "ungültige Resource-Manager-Datenprüfsumme in Datensatz bei %X/%X" -#: access/transam/xlogreader.c:734 +#: access/transam/xlogreader.c:736 #, c-format msgid "invalid magic number %04X in log segment %s, offset %u" msgstr "ungültige magische Zahl %04X in Logsegment %s, Offset %u" -#: access/transam/xlogreader.c:748 access/transam/xlogreader.c:799 +#: access/transam/xlogreader.c:750 access/transam/xlogreader.c:801 #, c-format msgid "invalid info bits %04X in log segment %s, offset %u" msgstr "ungültige Info-Bits %04X in Logsegment %s, Offset %u" -#: access/transam/xlogreader.c:774 +#: access/transam/xlogreader.c:776 #, c-format msgid "WAL file is from different database system: WAL file database system identifier is %s, pg_control database system identifier is %s" msgstr "WAL-Datei ist von einem anderen Datenbanksystem: Datenbanksystemidentifikator in WAL-Datei ist %s, Datenbanksystemidentifikator in pg_control ist %s" -#: access/transam/xlogreader.c:781 +#: access/transam/xlogreader.c:783 #, c-format -msgid "WAL file is from different database system: incorrect XLOG_SEG_SIZE in page header" -msgstr "WAL-Datei ist von einem anderen Datenbanksystem: Falsche XLOG_SEG_SIZE im Seitenkopf" +msgid "WAL file is from different database system: incorrect segment size in page header" +msgstr "WAL-Datei ist von einem anderen Datenbanksystem: falsche Segmentgröße im Seitenkopf" -#: access/transam/xlogreader.c:787 +#: access/transam/xlogreader.c:789 #, c-format msgid "WAL file is from different database system: incorrect XLOG_BLCKSZ in page header" -msgstr "WAL-Datei ist von einem anderen Datenbanksystem: Falsche XLOG_BLCKSZ im Seitenkopf" +msgstr "WAL-Datei ist von einem anderen Datenbanksystem: falsche XLOG_BLCKSZ im Seitenkopf" -#: access/transam/xlogreader.c:813 +#: access/transam/xlogreader.c:820 #, c-format msgid "unexpected pageaddr %X/%X in log segment %s, offset %u" msgstr "unerwartete Pageaddr %X/%X in Logsegment %s, Offset %u" -#: access/transam/xlogreader.c:838 +#: access/transam/xlogreader.c:845 #, c-format msgid "out-of-sequence timeline ID %u (after %u) in log segment %s, offset %u" msgstr "Zeitleisten-ID %u außer der Reihe (nach %u) in Logsegment %s, Offset %u" -#: access/transam/xlogreader.c:1083 +#: access/transam/xlogreader.c:1090 #, c-format msgid "out-of-order block_id %u at %X/%X" msgstr "block_id %u außer der Reihe bei %X/%X" -#: access/transam/xlogreader.c:1106 +#: access/transam/xlogreader.c:1113 #, c-format msgid "BKPBLOCK_HAS_DATA set, but no data included at %X/%X" msgstr "BKPBLOCK_HAS_DATA gesetzt, aber keine Daten enthalten bei %X/%X" -#: access/transam/xlogreader.c:1113 +#: access/transam/xlogreader.c:1120 #, c-format msgid "BKPBLOCK_HAS_DATA not set, but data length is %u at %X/%X" msgstr "BKPBLOCK_HAS_DATA nicht gesetzt, aber Datenlänge ist %u bei %X/%X" -#: access/transam/xlogreader.c:1149 +#: access/transam/xlogreader.c:1156 #, c-format msgid "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X" msgstr "BKPIMAGE_HAS_HOLE gesetzt, aber Loch Offset %u Länge %u Block-Abbild-Länge %u bei %X/%X" -#: access/transam/xlogreader.c:1165 +#: access/transam/xlogreader.c:1172 #, c-format msgid "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X" msgstr "BKPIMAGE_HAS_HOLE nicht gesetzt, aber Loch Offset %u Länge %u bei %X/%X" -#: access/transam/xlogreader.c:1180 +#: access/transam/xlogreader.c:1187 #, c-format msgid "BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X" msgstr "BKPIMAGE_IS_COMPRESSED gesetzt, aber Block-Abbild-Länge %u bei %X/%X" -#: access/transam/xlogreader.c:1195 +#: access/transam/xlogreader.c:1202 #, c-format msgid "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X" msgstr "weder BKPIMAGE_HAS_HOLE noch BKPIMAGE_IS_COMPRESSED gesetzt, aber Block-Abbild-Länge ist %u bei %X/%X" -#: access/transam/xlogreader.c:1211 +#: access/transam/xlogreader.c:1218 #, c-format msgid "BKPBLOCK_SAME_REL set but no previous rel at %X/%X" msgstr "BKPBLOCK_SAME_REL gesetzt, aber keine vorangehende Relation bei %X/%X" -#: access/transam/xlogreader.c:1223 +#: access/transam/xlogreader.c:1230 #, c-format msgid "invalid block_id %u at %X/%X" msgstr "ungültige block_id %u bei %X/%X" -#: access/transam/xlogreader.c:1291 +#: access/transam/xlogreader.c:1319 #, c-format msgid "record with invalid length at %X/%X" msgstr "Datensatz mit ungültiger Länge bei %X/%X" -#: access/transam/xlogreader.c:1380 +#: access/transam/xlogreader.c:1408 #, c-format msgid "invalid compressed image at %X/%X, block %d" msgstr "ungültiges komprimiertes Abbild bei %X/%X, Block %d" -#: access/transam/xlogutils.c:740 replication/walsender.c:2084 +#: access/transam/xlogutils.c:749 replication/walsender.c:2432 #, c-format msgid "could not read from log segment %s, offset %u, length %lu: %m" msgstr "konnte nicht aus Logsegment %s bei Position %u, Länge %lu lesen: %m" -#: bootstrap/bootstrap.c:271 postmaster/postmaster.c:801 tcop/postgres.c:3489 +#: bootstrap/bootstrap.c:268 +#, c-format +msgid "-X requires a power of two value between 1 MB and 1 GB" +msgstr "-X benötigt eine Zweierpotenz zwischen 1 MB und 1 GB" + +#: bootstrap/bootstrap.c:285 postmaster/postmaster.c:826 tcop/postgres.c:3552 #, c-format msgid "--%s requires a value" msgstr "--%s benötigt einen Wert" -#: bootstrap/bootstrap.c:276 postmaster/postmaster.c:806 tcop/postgres.c:3494 +#: bootstrap/bootstrap.c:290 postmaster/postmaster.c:831 tcop/postgres.c:3557 #, c-format msgid "-c %s requires a value" msgstr "-c %s benötigt einen Wert" -#: bootstrap/bootstrap.c:287 postmaster/postmaster.c:818 -#: postmaster/postmaster.c:831 +#: bootstrap/bootstrap.c:301 postmaster/postmaster.c:843 +#: postmaster/postmaster.c:856 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Versuchen Sie »%s --help« für weitere Informationen.\n" -#: bootstrap/bootstrap.c:296 +#: bootstrap/bootstrap.c:310 #, c-format msgid "%s: invalid command-line arguments\n" msgstr "%s: ungültige Kommandozeilenargumente\n" -#: catalog/aclchk.c:202 +#: catalog/aclchk.c:203 #, c-format msgid "grant options can only be granted to roles" msgstr "Grant-Optionen können nur Rollen gewährt werden" -#: catalog/aclchk.c:325 +#: catalog/aclchk.c:326 #, c-format msgid "no privileges were granted for column \"%s\" of relation \"%s\"" msgstr "es wurden keine Privilegien für Spalte »%s« von Relation »%s« gewährt" -#: catalog/aclchk.c:330 +#: catalog/aclchk.c:331 #, c-format msgid "no privileges were granted for \"%s\"" msgstr "es wurden keine Privilegien für »%s« gewährt" -#: catalog/aclchk.c:338 +#: catalog/aclchk.c:339 #, c-format msgid "not all privileges were granted for column \"%s\" of relation \"%s\"" msgstr "es wurden nicht alle Priviligien für Spalte »%s« von Relation »%s« gewährt" -#: catalog/aclchk.c:343 +#: catalog/aclchk.c:344 #, c-format msgid "not all privileges were granted for \"%s\"" msgstr "es wurden nicht alle Priviligien für »%s« gewährt" -#: catalog/aclchk.c:354 +#: catalog/aclchk.c:355 #, c-format msgid "no privileges could be revoked for column \"%s\" of relation \"%s\"" msgstr "es konnten keine Privilegien für Spalte »%s« von Relation »%s« entzogen werden" -#: catalog/aclchk.c:359 +#: catalog/aclchk.c:360 #, c-format msgid "no privileges could be revoked for \"%s\"" msgstr "es konnten keine Privilegien für »%s« entzogen werden" -#: catalog/aclchk.c:367 +#: catalog/aclchk.c:368 #, c-format msgid "not all privileges could be revoked for column \"%s\" of relation \"%s\"" msgstr "es konnten nicht alle Privilegien für Spalte »%s« von Relation »%s« entzogen werden" -#: catalog/aclchk.c:372 +#: catalog/aclchk.c:373 #, c-format msgid "not all privileges could be revoked for \"%s\"" msgstr "es konnten nicht alle Privilegien für »%s« entzogen werden" -#: catalog/aclchk.c:454 catalog/aclchk.c:947 +#: catalog/aclchk.c:456 catalog/aclchk.c:995 #, c-format msgid "invalid privilege type %s for relation" msgstr "ungültiger Privilegtyp %s für Relation" -#: catalog/aclchk.c:458 catalog/aclchk.c:951 +#: catalog/aclchk.c:460 catalog/aclchk.c:999 #, c-format msgid "invalid privilege type %s for sequence" msgstr "ungültiger Privilegtyp %s für Sequenz" -#: catalog/aclchk.c:462 +#: catalog/aclchk.c:464 #, c-format msgid "invalid privilege type %s for database" msgstr "ungültiger Privilegtyp %s für Datenbank" -#: catalog/aclchk.c:466 +#: catalog/aclchk.c:468 #, c-format msgid "invalid privilege type %s for domain" msgstr "ungültiger Privilegtyp %s für Domäne" -#: catalog/aclchk.c:470 catalog/aclchk.c:955 +#: catalog/aclchk.c:472 catalog/aclchk.c:1003 #, c-format msgid "invalid privilege type %s for function" msgstr "ungültiger Privilegtyp %s für Funktion" -#: catalog/aclchk.c:474 +#: catalog/aclchk.c:476 #, c-format msgid "invalid privilege type %s for language" msgstr "ungültiger Privilegtyp %s für Sprache" -#: catalog/aclchk.c:478 +#: catalog/aclchk.c:480 #, c-format msgid "invalid privilege type %s for large object" msgstr "ungültiger Privilegtyp %s für Large Object" -#: catalog/aclchk.c:482 +#: catalog/aclchk.c:484 catalog/aclchk.c:1019 #, c-format msgid "invalid privilege type %s for schema" msgstr "ungültiger Privilegtyp %s für Schema" -#: catalog/aclchk.c:486 +#: catalog/aclchk.c:488 catalog/aclchk.c:1007 +#, c-format +msgid "invalid privilege type %s for procedure" +msgstr "ungültiger Privilegtyp %s für Prozedur" + +#: catalog/aclchk.c:492 catalog/aclchk.c:1011 +#, c-format +msgid "invalid privilege type %s for routine" +msgstr "ungültiger Privilegtyp %s für Routine" + +#: catalog/aclchk.c:496 #, c-format msgid "invalid privilege type %s for tablespace" msgstr "ungültiger Privilegtyp %s für Tablespace" -#: catalog/aclchk.c:490 catalog/aclchk.c:959 +#: catalog/aclchk.c:500 catalog/aclchk.c:1015 #, c-format msgid "invalid privilege type %s for type" msgstr "ungültiger Privilegtyp %s für Typ" -#: catalog/aclchk.c:494 +#: catalog/aclchk.c:504 #, c-format msgid "invalid privilege type %s for foreign-data wrapper" msgstr "ungültiger Privilegtyp %s für Fremddaten-Wrapper" -#: catalog/aclchk.c:498 +#: catalog/aclchk.c:508 #, c-format msgid "invalid privilege type %s for foreign server" msgstr "ungültiger Privilegtyp %s für Fremdserver" -#: catalog/aclchk.c:537 +#: catalog/aclchk.c:547 #, c-format msgid "column privileges are only valid for relations" msgstr "Spaltenprivilegien sind nur für Relation gültig" -#: catalog/aclchk.c:695 catalog/aclchk.c:3900 catalog/aclchk.c:4682 -#: catalog/objectaddress.c:912 catalog/pg_largeobject.c:111 -#: storage/large_object/inv_api.c:291 +#: catalog/aclchk.c:707 catalog/aclchk.c:4131 catalog/aclchk.c:4913 +#: catalog/objectaddress.c:928 catalog/pg_largeobject.c:111 +#: storage/large_object/inv_api.c:284 #, c-format msgid "large object %u does not exist" msgstr "Large Object %u existiert nicht" -#: catalog/aclchk.c:884 catalog/aclchk.c:893 commands/collationcmds.c:92 -#: commands/copy.c:1039 commands/copy.c:1059 commands/copy.c:1068 -#: commands/copy.c:1077 commands/copy.c:1086 commands/copy.c:1095 -#: commands/copy.c:1104 commands/copy.c:1113 commands/copy.c:1122 -#: commands/copy.c:1140 commands/copy.c:1156 commands/copy.c:1176 -#: commands/copy.c:1193 commands/dbcommands.c:155 commands/dbcommands.c:164 +#: catalog/aclchk.c:932 catalog/aclchk.c:941 commands/collationcmds.c:113 +#: commands/copy.c:1057 commands/copy.c:1077 commands/copy.c:1086 +#: commands/copy.c:1095 commands/copy.c:1104 commands/copy.c:1113 +#: commands/copy.c:1122 commands/copy.c:1131 commands/copy.c:1140 +#: commands/copy.c:1158 commands/copy.c:1174 commands/copy.c:1194 +#: commands/copy.c:1211 commands/dbcommands.c:155 commands/dbcommands.c:164 #: commands/dbcommands.c:173 commands/dbcommands.c:182 #: commands/dbcommands.c:191 commands/dbcommands.c:200 #: commands/dbcommands.c:209 commands/dbcommands.c:218 -#: commands/dbcommands.c:227 commands/dbcommands.c:1419 -#: commands/dbcommands.c:1428 commands/dbcommands.c:1437 -#: commands/dbcommands.c:1446 commands/extension.c:1672 -#: commands/extension.c:1682 commands/extension.c:1692 -#: commands/extension.c:1702 commands/extension.c:2942 +#: commands/dbcommands.c:227 commands/dbcommands.c:1427 +#: commands/dbcommands.c:1436 commands/dbcommands.c:1445 +#: commands/dbcommands.c:1454 commands/extension.c:1678 +#: commands/extension.c:1688 commands/extension.c:1698 +#: commands/extension.c:1708 commands/extension.c:2949 #: commands/foreigncmds.c:537 commands/foreigncmds.c:546 -#: commands/functioncmds.c:526 commands/functioncmds.c:643 -#: commands/functioncmds.c:652 commands/functioncmds.c:661 -#: commands/functioncmds.c:670 commands/functioncmds.c:2076 -#: commands/functioncmds.c:2084 commands/publicationcmds.c:89 -#: commands/publicationcmds.c:99 commands/publicationcmds.c:109 -#: commands/publicationcmds.c:119 commands/publicationcmds.c:129 -#: commands/publicationcmds.c:139 commands/sequence.c:1247 -#: commands/sequence.c:1256 commands/sequence.c:1265 commands/sequence.c:1274 -#: commands/sequence.c:1283 commands/sequence.c:1292 commands/sequence.c:1301 -#: commands/sequence.c:1310 commands/sequence.c:1319 -#: commands/subscriptioncmds.c:83 commands/subscriptioncmds.c:92 -#: commands/subscriptioncmds.c:101 commands/subscriptioncmds.c:111 -#: commands/subscriptioncmds.c:121 commands/subscriptioncmds.c:131 -#: commands/subscriptioncmds.c:141 commands/typecmds.c:298 -#: commands/typecmds.c:1375 commands/typecmds.c:1384 commands/typecmds.c:1392 -#: commands/typecmds.c:1400 commands/typecmds.c:1408 commands/user.c:138 -#: commands/user.c:161 commands/user.c:170 commands/user.c:179 -#: commands/user.c:188 commands/user.c:197 commands/user.c:206 -#: commands/user.c:215 commands/user.c:224 commands/user.c:233 -#: commands/user.c:242 commands/user.c:251 commands/user.c:260 -#: commands/user.c:547 commands/user.c:564 commands/user.c:572 -#: commands/user.c:580 commands/user.c:588 commands/user.c:596 -#: commands/user.c:604 commands/user.c:612 commands/user.c:621 -#: commands/user.c:629 commands/user.c:637 replication/pgoutput/pgoutput.c:107 -#: replication/pgoutput/pgoutput.c:128 +#: commands/functioncmds.c:557 commands/functioncmds.c:682 +#: commands/functioncmds.c:691 commands/functioncmds.c:700 +#: commands/functioncmds.c:709 commands/functioncmds.c:2103 +#: commands/functioncmds.c:2111 commands/publicationcmds.c:92 +#: commands/sequence.c:1256 commands/sequence.c:1266 commands/sequence.c:1276 +#: commands/sequence.c:1286 commands/sequence.c:1296 commands/sequence.c:1306 +#: commands/sequence.c:1316 commands/sequence.c:1326 commands/sequence.c:1336 +#: commands/subscriptioncmds.c:110 commands/subscriptioncmds.c:120 +#: commands/subscriptioncmds.c:130 commands/subscriptioncmds.c:140 +#: commands/subscriptioncmds.c:154 commands/subscriptioncmds.c:165 +#: commands/subscriptioncmds.c:179 commands/tablecmds.c:6254 +#: commands/typecmds.c:295 commands/typecmds.c:1444 commands/typecmds.c:1453 +#: commands/typecmds.c:1461 commands/typecmds.c:1469 commands/typecmds.c:1477 +#: commands/user.c:134 commands/user.c:148 commands/user.c:157 +#: commands/user.c:166 commands/user.c:175 commands/user.c:184 +#: commands/user.c:193 commands/user.c:202 commands/user.c:211 +#: commands/user.c:220 commands/user.c:229 commands/user.c:238 +#: commands/user.c:247 commands/user.c:555 commands/user.c:563 +#: commands/user.c:571 commands/user.c:579 commands/user.c:587 +#: commands/user.c:595 commands/user.c:603 commands/user.c:611 +#: commands/user.c:620 commands/user.c:628 commands/user.c:636 +#: parser/parse_utilcmd.c:407 replication/pgoutput/pgoutput.c:111 +#: replication/pgoutput/pgoutput.c:132 replication/walsender.c:801 +#: replication/walsender.c:812 replication/walsender.c:822 #, c-format msgid "conflicting or redundant options" msgstr "widersprüchliche oder überflüssige Optionen" -#: catalog/aclchk.c:992 +#: catalog/aclchk.c:1052 #, c-format msgid "default privileges cannot be set for columns" msgstr "Vorgabeprivilegien können nicht für Spalten gesetzt werden" -#: catalog/aclchk.c:1503 catalog/objectaddress.c:1367 commands/analyze.c:384 -#: commands/copy.c:4657 commands/sequence.c:1625 commands/tablecmds.c:5533 -#: commands/tablecmds.c:5694 commands/tablecmds.c:5751 -#: commands/tablecmds.c:5865 commands/tablecmds.c:5919 -#: commands/tablecmds.c:6011 commands/tablecmds.c:6167 -#: commands/tablecmds.c:8386 commands/tablecmds.c:8662 -#: commands/tablecmds.c:9079 commands/trigger.c:732 parser/analyze.c:2305 -#: parser/parse_relation.c:2575 parser/parse_relation.c:2637 -#: parser/parse_target.c:1001 parser/parse_type.c:127 utils/adt/acl.c:2823 -#: utils/adt/ruleutils.c:2188 +#: catalog/aclchk.c:1212 +#, c-format +msgid "cannot use IN SCHEMA clause when using GRANT/REVOKE ON SCHEMAS" +msgstr "Klausel IN SCHEMA kann nicht verwendet werden, wenn GRANT/REVOKE ON SCHEMAS verwendet wird" + +#: catalog/aclchk.c:1576 catalog/objectaddress.c:1390 commands/analyze.c:433 +#: commands/copy.c:4777 commands/sequence.c:1691 commands/tablecmds.c:5900 +#: commands/tablecmds.c:6048 commands/tablecmds.c:6105 +#: commands/tablecmds.c:6179 commands/tablecmds.c:6273 +#: commands/tablecmds.c:6332 commands/tablecmds.c:6471 +#: commands/tablecmds.c:6546 commands/tablecmds.c:6638 +#: commands/tablecmds.c:6732 commands/tablecmds.c:9066 +#: commands/tablecmds.c:9345 commands/tablecmds.c:9782 commands/trigger.c:904 +#: parser/analyze.c:2311 parser/parse_relation.c:2735 +#: parser/parse_relation.c:2798 parser/parse_target.c:1024 +#: parser/parse_type.c:127 utils/adt/acl.c:2843 utils/adt/ruleutils.c:2422 #, c-format msgid "column \"%s\" of relation \"%s\" does not exist" msgstr "Spalte »%s« von Relation »%s« existiert nicht" -#: catalog/aclchk.c:1769 catalog/objectaddress.c:1207 commands/sequence.c:1137 -#: commands/tablecmds.c:229 commands/tablecmds.c:12734 utils/adt/acl.c:2059 -#: utils/adt/acl.c:2089 utils/adt/acl.c:2121 utils/adt/acl.c:2153 -#: utils/adt/acl.c:2181 utils/adt/acl.c:2211 +#: catalog/aclchk.c:1843 catalog/objectaddress.c:1230 commands/sequence.c:1129 +#: commands/tablecmds.c:231 commands/tablecmds.c:13502 utils/adt/acl.c:2076 +#: utils/adt/acl.c:2106 utils/adt/acl.c:2138 utils/adt/acl.c:2170 +#: utils/adt/acl.c:2198 utils/adt/acl.c:2228 #, c-format msgid "\"%s\" is not a sequence" msgstr "»%s« ist keine Sequenz" -#: catalog/aclchk.c:1807 +#: catalog/aclchk.c:1881 #, c-format msgid "sequence \"%s\" only supports USAGE, SELECT, and UPDATE privileges" msgstr "Sequenz »%s« unterstützt nur die Privilegien USAGE, SELECT und UPDATE" -#: catalog/aclchk.c:1824 +#: catalog/aclchk.c:1898 #, c-format msgid "invalid privilege type %s for table" msgstr "ungültiger Privilegtyp %s für Tabelle" -#: catalog/aclchk.c:1990 +#: catalog/aclchk.c:2064 #, c-format msgid "invalid privilege type %s for column" msgstr "ungültiger Privilegtyp %s für Spalte" -#: catalog/aclchk.c:2003 +#: catalog/aclchk.c:2077 #, c-format msgid "sequence \"%s\" only supports SELECT column privileges" msgstr "Sequenz »%s« unterstützt nur den Spaltenprivilegientyp SELECT" -#: catalog/aclchk.c:2585 +#: catalog/aclchk.c:2659 #, c-format msgid "language \"%s\" is not trusted" msgstr "Sprache »%s« ist nicht »trusted«" -#: catalog/aclchk.c:2587 +#: catalog/aclchk.c:2661 #, c-format msgid "GRANT and REVOKE are not allowed on untrusted languages, because only superusers can use untrusted languages." msgstr "GRANT und REVOKE sind für nicht vertrauenswürdige Sprachen nicht erlaubt, weil nur Superuser nicht vertrauenswürdige Sprachen verwenden können." -#: catalog/aclchk.c:3101 +#: catalog/aclchk.c:3175 #, c-format msgid "cannot set privileges of array types" msgstr "für Array-Typen können keine Privilegien gesetzt werden" -#: catalog/aclchk.c:3102 +#: catalog/aclchk.c:3176 #, c-format msgid "Set the privileges of the element type instead." msgstr "Setzen Sie stattdessen die Privilegien des Elementtyps." -#: catalog/aclchk.c:3109 catalog/objectaddress.c:1497 commands/typecmds.c:3165 +#: catalog/aclchk.c:3183 catalog/objectaddress.c:1520 #, c-format msgid "\"%s\" is not a domain" msgstr "»%s« ist keine Domäne" -#: catalog/aclchk.c:3229 +#: catalog/aclchk.c:3303 #, c-format msgid "unrecognized privilege type \"%s\"" msgstr "unbekannter Privilegtyp »%s«" -#: catalog/aclchk.c:3278 +#: catalog/aclchk.c:3364 #, c-format -msgid "permission denied for column %s" -msgstr "keine Berechtigung für Spalte %s" +msgid "permission denied for aggregate %s" +msgstr "keine Berechtigung für Aggregatfunktion %s" + +#: catalog/aclchk.c:3367 +#, c-format +msgid "permission denied for collation %s" +msgstr "keine Berechtigung für Sortierfolge %s" -#: catalog/aclchk.c:3280 +#: catalog/aclchk.c:3370 #, c-format -msgid "permission denied for relation %s" -msgstr "keine Berechtigung für Relation %s" +msgid "permission denied for column %s" +msgstr "keine Berechtigung für Spalte %s" -#: catalog/aclchk.c:3282 commands/sequence.c:599 commands/sequence.c:833 -#: commands/sequence.c:875 commands/sequence.c:916 commands/sequence.c:1671 -#: commands/sequence.c:1735 +#: catalog/aclchk.c:3373 #, c-format -msgid "permission denied for sequence %s" -msgstr "keine Berechtigung für Sequenz %s" +msgid "permission denied for conversion %s" +msgstr "keine Berechtigung für Konversion %s" -#: catalog/aclchk.c:3284 +#: catalog/aclchk.c:3376 #, c-format msgid "permission denied for database %s" msgstr "keine Berechtigung für Datenbank %s" -#: catalog/aclchk.c:3286 +#: catalog/aclchk.c:3379 #, c-format -msgid "permission denied for function %s" -msgstr "keine Berechtigung für Funktion %s" +msgid "permission denied for domain %s" +msgstr "keine Berechtigung für Domäne %s" -#: catalog/aclchk.c:3288 +#: catalog/aclchk.c:3382 #, c-format -msgid "permission denied for operator %s" -msgstr "keine Berechtigung für Operator %s" +msgid "permission denied for event trigger %s" +msgstr "keine Berechtigung für Ereignistrigger %s" -#: catalog/aclchk.c:3290 +#: catalog/aclchk.c:3385 #, c-format -msgid "permission denied for type %s" -msgstr "keine Berechtigung für Typ %s" +msgid "permission denied for extension %s" +msgstr "keine Berechtigung für Erweiterung %s" + +#: catalog/aclchk.c:3388 +#, c-format +msgid "permission denied for foreign-data wrapper %s" +msgstr "keine Berechtigung für Fremddaten-Wrapper %s" + +#: catalog/aclchk.c:3391 +#, c-format +msgid "permission denied for foreign server %s" +msgstr "keine Berechtigung für Fremdserver %s" + +#: catalog/aclchk.c:3394 +#, c-format +msgid "permission denied for foreign table %s" +msgstr "keine Berechtigung für Fremdtabelle %s" + +#: catalog/aclchk.c:3397 +#, c-format +msgid "permission denied for function %s" +msgstr "keine Berechtigung für Funktion %s" -#: catalog/aclchk.c:3292 +#: catalog/aclchk.c:3400 +#, c-format +msgid "permission denied for index %s" +msgstr "keine Berechtigung für Index %s" + +#: catalog/aclchk.c:3403 #, c-format msgid "permission denied for language %s" msgstr "keine Berechtigung für Sprache %s" -#: catalog/aclchk.c:3294 +#: catalog/aclchk.c:3406 #, c-format msgid "permission denied for large object %s" msgstr "keine Berechtigung für Large Object %s" -#: catalog/aclchk.c:3296 +#: catalog/aclchk.c:3409 #, c-format -msgid "permission denied for schema %s" -msgstr "keine Berechtigung für Schema %s" +msgid "permission denied for materialized view %s" +msgstr "keine Berechtigung für materialisierte Sicht %s" -#: catalog/aclchk.c:3298 +#: catalog/aclchk.c:3412 #, c-format msgid "permission denied for operator class %s" msgstr "keine Berechtigung für Operatorklasse %s" -#: catalog/aclchk.c:3300 +#: catalog/aclchk.c:3415 +#, c-format +msgid "permission denied for operator %s" +msgstr "keine Berechtigung für Operator %s" + +#: catalog/aclchk.c:3418 #, c-format msgid "permission denied for operator family %s" msgstr "keine Berechtigung für Operatorfamilie %s" -#: catalog/aclchk.c:3302 +#: catalog/aclchk.c:3421 #, c-format -msgid "permission denied for collation %s" -msgstr "keine Berechtigung für Sortierfolge %s" +msgid "permission denied for policy %s" +msgstr "keine Berechtigung für Policy %s" -#: catalog/aclchk.c:3304 +#: catalog/aclchk.c:3424 #, c-format -msgid "permission denied for conversion %s" -msgstr "keine Berechtigung für Konversion %s" +msgid "permission denied for procedure %s" +msgstr "keine Berechtigung für Prozedur %s" -#: catalog/aclchk.c:3306 +#: catalog/aclchk.c:3427 #, c-format -msgid "permission denied for tablespace %s" -msgstr "keine Berechtigung für Tablespace %s" +msgid "permission denied for publication %s" +msgstr "keine Berechtigung für Publikation %s" -#: catalog/aclchk.c:3308 +#: catalog/aclchk.c:3430 #, c-format -msgid "permission denied for text search dictionary %s" -msgstr "keine Berechtigung für Textsuchewörterbuch %s" +msgid "permission denied for routine %s" +msgstr "keine Berechtigung für Routine %s" -#: catalog/aclchk.c:3310 +#: catalog/aclchk.c:3433 #, c-format -msgid "permission denied for text search configuration %s" -msgstr "keine Berechtigung für Textsuchekonfiguration %s" +msgid "permission denied for schema %s" +msgstr "keine Berechtigung für Schema %s" -#: catalog/aclchk.c:3312 +#: catalog/aclchk.c:3436 commands/sequence.c:599 commands/sequence.c:833 +#: commands/sequence.c:875 commands/sequence.c:916 commands/sequence.c:1789 +#: commands/sequence.c:1853 #, c-format -msgid "permission denied for foreign-data wrapper %s" -msgstr "keine Berechtigung für Fremddaten-Wrapper %s" +msgid "permission denied for sequence %s" +msgstr "keine Berechtigung für Sequenz %s" -#: catalog/aclchk.c:3314 +#: catalog/aclchk.c:3439 #, c-format -msgid "permission denied for foreign server %s" -msgstr "keine Berechtigung für Fremdserver %s" +msgid "permission denied for statistics object %s" +msgstr "keine Berechtigung für Statistikobjekt %s" -#: catalog/aclchk.c:3316 +#: catalog/aclchk.c:3442 #, c-format -msgid "permission denied for event trigger %s" -msgstr "keine Berechtigung für Ereignistrigger %s" +msgid "permission denied for subscription %s" +msgstr "keine Berechtigung für Subskription %s" -#: catalog/aclchk.c:3318 +#: catalog/aclchk.c:3445 #, c-format -msgid "permission denied for extension %s" -msgstr "keine Berechtigung für Erweiterung %s" +msgid "permission denied for table %s" +msgstr "keine Berechtigung für Tabelle %s" -#: catalog/aclchk.c:3320 +#: catalog/aclchk.c:3448 #, c-format -msgid "permission denied for publication %s" -msgstr "keine Berechtigung für Publikation %s" +msgid "permission denied for tablespace %s" +msgstr "keine Berechtigung für Tablespace %s" -#: catalog/aclchk.c:3322 +#: catalog/aclchk.c:3451 #, c-format -msgid "permission denied for subscription %s" -msgstr "keine Berechtigung für Subskription %s" +msgid "permission denied for text search configuration %s" +msgstr "keine Berechtigung für Textsuchekonfiguration %s" -#: catalog/aclchk.c:3328 catalog/aclchk.c:3330 +#: catalog/aclchk.c:3454 #, c-format -msgid "must be owner of relation %s" -msgstr "Berechtigung nur für Eigentümer der Relation %s" +msgid "permission denied for text search dictionary %s" +msgstr "keine Berechtigung für Textsuchewörterbuch %s" -#: catalog/aclchk.c:3332 +#: catalog/aclchk.c:3457 #, c-format -msgid "must be owner of sequence %s" -msgstr "Berechtigung nur für Eigentümer der Sequenz %s" +msgid "permission denied for type %s" +msgstr "keine Berechtigung für Typ %s" + +#: catalog/aclchk.c:3460 +#, c-format +msgid "permission denied for view %s" +msgstr "keine Berechtigung für Sicht %s" + +#: catalog/aclchk.c:3495 +#, c-format +msgid "must be owner of aggregate %s" +msgstr "Berechtigung nur für Eigentümer der Aggregatfunktion %s" + +#: catalog/aclchk.c:3498 +#, c-format +msgid "must be owner of collation %s" +msgstr "Berechtigung nur für Eigentümer der Sortierfolge %s" + +#: catalog/aclchk.c:3501 +#, c-format +msgid "must be owner of conversion %s" +msgstr "Berechtigung nur für Eigentümer der Konversion %s" -#: catalog/aclchk.c:3334 +#: catalog/aclchk.c:3504 #, c-format msgid "must be owner of database %s" msgstr "Berechtigung nur für Eigentümer der Datenbank %s" -#: catalog/aclchk.c:3336 +#: catalog/aclchk.c:3507 #, c-format -msgid "must be owner of function %s" -msgstr "Berechtigung nur für Eigentümer der Funktion %s" +msgid "must be owner of domain %s" +msgstr "Berechtigung nur für Eigentümer der Domäne %s" -#: catalog/aclchk.c:3338 +#: catalog/aclchk.c:3510 #, c-format -msgid "must be owner of operator %s" -msgstr "Berechtigung nur für Eigentümer des Operators %s" +msgid "must be owner of event trigger %s" +msgstr "Berechtigung nur für Eigentümer des Ereignistriggers %s" -#: catalog/aclchk.c:3340 +#: catalog/aclchk.c:3513 #, c-format -msgid "must be owner of type %s" -msgstr "Berechtigung nur für Eigentümer des Typs %s" +msgid "must be owner of extension %s" +msgstr "Berechtigung nur für Eigentümer der Erweiterung %s" + +#: catalog/aclchk.c:3516 +#, c-format +msgid "must be owner of foreign-data wrapper %s" +msgstr "Berechtigung nur für Eigentümer des Fremddaten-Wrappers %s" + +#: catalog/aclchk.c:3519 +#, c-format +msgid "must be owner of foreign server %s" +msgstr "Berechtigung nur für Eigentümer des Fremdservers %s" -#: catalog/aclchk.c:3342 +#: catalog/aclchk.c:3522 +#, c-format +msgid "must be owner of foreign table %s" +msgstr "Berechtigung nur für Eigentümer der Fremdtabelle %s" + +#: catalog/aclchk.c:3525 +#, c-format +msgid "must be owner of function %s" +msgstr "Berechtigung nur für Eigentümer der Funktion %s" + +#: catalog/aclchk.c:3528 +#, c-format +msgid "must be owner of index %s" +msgstr "Berechtigung nur für Eigentümer des Index %s" + +#: catalog/aclchk.c:3531 #, c-format msgid "must be owner of language %s" msgstr "Berechtigung nur für Eigentümer der Sprache %s" -#: catalog/aclchk.c:3344 +#: catalog/aclchk.c:3534 #, c-format msgid "must be owner of large object %s" msgstr "Berechtigung nur für Eigentümer des Large Object %s" -#: catalog/aclchk.c:3346 +#: catalog/aclchk.c:3537 #, c-format -msgid "must be owner of schema %s" -msgstr "Berechtigung nur für Eigentümer des Schemas %s" +msgid "must be owner of materialized view %s" +msgstr "Berechtigung nur für Eigentümer der materialisierten Sicht %s" -#: catalog/aclchk.c:3348 +#: catalog/aclchk.c:3540 #, c-format msgid "must be owner of operator class %s" msgstr "Berechtigung nur für Eigentümer der Operatorklasse %s" -#: catalog/aclchk.c:3350 +#: catalog/aclchk.c:3543 +#, c-format +msgid "must be owner of operator %s" +msgstr "Berechtigung nur für Eigentümer des Operators %s" + +#: catalog/aclchk.c:3546 #, c-format msgid "must be owner of operator family %s" msgstr "Berechtigung nur für Eigentümer der Operatorfamilie %s" -#: catalog/aclchk.c:3352 +#: catalog/aclchk.c:3549 #, c-format -msgid "must be owner of collation %s" -msgstr "Berechtigung nur für Eigentümer der Sortierfolge %s" +msgid "must be owner of procedure %s" +msgstr "Berechtigung nur für Eigentümer der Prozedur %s" -#: catalog/aclchk.c:3354 +#: catalog/aclchk.c:3552 #, c-format -msgid "must be owner of conversion %s" -msgstr "Berechtigung nur für Eigentümer der Konversion %s" +msgid "must be owner of publication %s" +msgstr "Berechtigung nur für Eigentümer der Publikation %s" -#: catalog/aclchk.c:3356 +#: catalog/aclchk.c:3555 #, c-format -msgid "must be owner of tablespace %s" -msgstr "Berechtigung nur für Eigentümer des Tablespace %s" +msgid "must be owner of routine %s" +msgstr "Berechtigung nur für Eigentümer der Routine %s" -#: catalog/aclchk.c:3358 +#: catalog/aclchk.c:3558 #, c-format -msgid "must be owner of text search dictionary %s" -msgstr "Berechtigung nur für Eigentümer des Textsuchewörterbuches %s" +msgid "must be owner of sequence %s" +msgstr "Berechtigung nur für Eigentümer der Sequenz %s" -#: catalog/aclchk.c:3360 +#: catalog/aclchk.c:3561 #, c-format -msgid "must be owner of text search configuration %s" -msgstr "Berechtigung nur für Eigentümer der Textsuchekonfiguration %s" +msgid "must be owner of subscription %s" +msgstr "Berechtigung nur für Eigentümer der Subskription %s" -#: catalog/aclchk.c:3362 +#: catalog/aclchk.c:3564 #, c-format -msgid "must be owner of foreign-data wrapper %s" -msgstr "Berechtigung nur für Eigentümer des Fremddaten-Wrappers %s" +msgid "must be owner of table %s" +msgstr "Berechtigung nur für Eigentümer der Tabelle %s" -#: catalog/aclchk.c:3364 +#: catalog/aclchk.c:3567 #, c-format -msgid "must be owner of foreign server %s" -msgstr "Berechtigung nur für Eigentümer des Fremdservers %s" +msgid "must be owner of type %s" +msgstr "Berechtigung nur für Eigentümer des Typs %s" -#: catalog/aclchk.c:3366 +#: catalog/aclchk.c:3570 #, c-format -msgid "must be owner of event trigger %s" -msgstr "Berechtigung nur für Eigentümer des Ereignistriggers %s" +msgid "must be owner of view %s" +msgstr "Berechtigung nur für Eigentümer der Sicht %s" -#: catalog/aclchk.c:3368 +#: catalog/aclchk.c:3573 #, c-format -msgid "must be owner of extension %s" -msgstr "Berechtigung nur für Eigentümer der Erweiterung %s" +msgid "must be owner of schema %s" +msgstr "Berechtigung nur für Eigentümer des Schemas %s" -#: catalog/aclchk.c:3370 +#: catalog/aclchk.c:3576 #, c-format -msgid "must be owner of publication %s" -msgstr "Berechtigung nur für Eigentümer der Publikation %s" +msgid "must be owner of statistics object %s" +msgstr "Berechtigung nur für Eigentümer des Statistikobjekts %s" -#: catalog/aclchk.c:3372 +#: catalog/aclchk.c:3579 #, c-format -msgid "must be owner of subscription %s" -msgstr "Berechtigung nur für Eigentümer der Subskription %s" +msgid "must be owner of tablespace %s" +msgstr "Berechtigung nur für Eigentümer des Tablespace %s" + +#: catalog/aclchk.c:3582 +#, c-format +msgid "must be owner of text search configuration %s" +msgstr "Berechtigung nur für Eigentümer der Textsuchekonfiguration %s" + +#: catalog/aclchk.c:3585 +#, c-format +msgid "must be owner of text search dictionary %s" +msgstr "Berechtigung nur für Eigentümer des Textsuchewörterbuches %s" + +#: catalog/aclchk.c:3599 +#, c-format +msgid "must be owner of relation %s" +msgstr "Berechtigung nur für Eigentümer der Relation %s" -#: catalog/aclchk.c:3414 +#: catalog/aclchk.c:3643 #, c-format msgid "permission denied for column \"%s\" of relation \"%s\"" msgstr "keine Berechtigung für Spalte »%s« von Relation »%s«" -#: catalog/aclchk.c:3533 catalog/aclchk.c:3541 +#: catalog/aclchk.c:3764 catalog/aclchk.c:3772 #, c-format msgid "attribute %d of relation with OID %u does not exist" msgstr "Attribut %d der Relation mit OID %u existiert nicht" -#: catalog/aclchk.c:3614 catalog/aclchk.c:4533 +#: catalog/aclchk.c:3845 catalog/aclchk.c:4764 #, c-format msgid "relation with OID %u does not exist" msgstr "Relation mit OID %u existiert nicht" -#: catalog/aclchk.c:3713 catalog/aclchk.c:4951 +#: catalog/aclchk.c:3944 catalog/aclchk.c:5182 #, c-format msgid "database with OID %u does not exist" msgstr "Datenbank mit OID %u existiert nicht" -#: catalog/aclchk.c:3767 catalog/aclchk.c:4611 tcop/fastpath.c:223 -#: utils/fmgr/fmgr.c:2428 +#: catalog/aclchk.c:3998 catalog/aclchk.c:4842 tcop/fastpath.c:221 +#: utils/fmgr/fmgr.c:2195 #, c-format msgid "function with OID %u does not exist" msgstr "Funktion mit OID %u existiert nicht" -#: catalog/aclchk.c:3821 catalog/aclchk.c:4637 +#: catalog/aclchk.c:4052 catalog/aclchk.c:4868 #, c-format msgid "language with OID %u does not exist" msgstr "Sprache mit OID %u existiert nicht" -#: catalog/aclchk.c:3985 catalog/aclchk.c:4709 +#: catalog/aclchk.c:4216 catalog/aclchk.c:4940 #, c-format msgid "schema with OID %u does not exist" msgstr "Schema mit OID %u existiert nicht" -#: catalog/aclchk.c:4039 catalog/aclchk.c:4736 +#: catalog/aclchk.c:4270 catalog/aclchk.c:4967 #, c-format msgid "tablespace with OID %u does not exist" msgstr "Tablespace mit OID %u existiert nicht" -#: catalog/aclchk.c:4098 catalog/aclchk.c:4870 commands/foreigncmds.c:324 +#: catalog/aclchk.c:4329 catalog/aclchk.c:5101 commands/foreigncmds.c:324 #, c-format msgid "foreign-data wrapper with OID %u does not exist" msgstr "Fremddaten-Wrapper mit OID %u existiert nicht" -#: catalog/aclchk.c:4160 catalog/aclchk.c:4897 commands/foreigncmds.c:459 +#: catalog/aclchk.c:4391 catalog/aclchk.c:5128 commands/foreigncmds.c:459 #, c-format msgid "foreign server with OID %u does not exist" msgstr "Fremdserver mit OID %u existiert nicht" -#: catalog/aclchk.c:4220 catalog/aclchk.c:4559 utils/cache/typcache.c:238 +#: catalog/aclchk.c:4451 catalog/aclchk.c:4790 utils/cache/typcache.c:368 #, c-format msgid "type with OID %u does not exist" msgstr "Typ mit OID %u existiert nicht" -#: catalog/aclchk.c:4585 +#: catalog/aclchk.c:4816 #, c-format msgid "operator with OID %u does not exist" msgstr "Operator mit OID %u existiert nicht" -#: catalog/aclchk.c:4762 +#: catalog/aclchk.c:4993 #, c-format msgid "operator class with OID %u does not exist" msgstr "Operatorklasse mit OID %u existiert nicht" -#: catalog/aclchk.c:4789 +#: catalog/aclchk.c:5020 #, c-format msgid "operator family with OID %u does not exist" msgstr "Operatorfamilie mit OID %u existiert nicht" -#: catalog/aclchk.c:4816 +#: catalog/aclchk.c:5047 #, c-format msgid "text search dictionary with OID %u does not exist" msgstr "Textsuchewörterbuch mit OID %u existiert nicht" -#: catalog/aclchk.c:4843 +#: catalog/aclchk.c:5074 #, c-format msgid "text search configuration with OID %u does not exist" msgstr "Textsuchekonfiguration mit OID %u existiert nicht" -#: catalog/aclchk.c:4924 commands/event_trigger.c:587 +#: catalog/aclchk.c:5155 commands/event_trigger.c:590 #, c-format msgid "event trigger with OID %u does not exist" msgstr "Ereignistrigger mit OID %u existiert nicht" -#: catalog/aclchk.c:4977 +#: catalog/aclchk.c:5208 commands/collationcmds.c:347 #, c-format msgid "collation with OID %u does not exist" msgstr "Sortierfolge mit OID %u existiert nicht" -#: catalog/aclchk.c:5003 +#: catalog/aclchk.c:5234 #, c-format msgid "conversion with OID %u does not exist" msgstr "Konversion mit OID %u existiert nicht" -#: catalog/aclchk.c:5044 +#: catalog/aclchk.c:5275 #, c-format msgid "extension with OID %u does not exist" msgstr "Erweiterung mit OID %u existiert nicht" -#: catalog/aclchk.c:5071 commands/publicationcmds.c:746 +#: catalog/aclchk.c:5302 commands/publicationcmds.c:747 #, c-format msgid "publication with OID %u does not exist" msgstr "Publikation mit OID %u existiert nicht" -#: catalog/aclchk.c:5097 commands/subscriptioncmds.c:682 +#: catalog/aclchk.c:5328 commands/subscriptioncmds.c:1098 #, c-format msgid "subscription with OID %u does not exist" msgstr "Subskription mit OID %u existiert nicht" +#: catalog/aclchk.c:5354 +#, c-format +msgid "statistics object with OID %u does not exist" +msgstr "Statistikobjekt mit OID %u existiert nicht" + #: catalog/dependency.c:611 #, c-format msgid "cannot drop %s because %s requires it" @@ -3620,27 +3643,27 @@ msgstr "kann %s nicht löschen, wird von %s benötigt" msgid "You can drop %s instead." msgstr "Sie können stattdessen %s löschen." -#: catalog/dependency.c:777 catalog/pg_shdepend.c:573 +#: catalog/dependency.c:787 catalog/pg_shdepend.c:574 #, c-format msgid "cannot drop %s because it is required by the database system" msgstr "kann %s nicht löschen, wird vom Datenbanksystem benötigt" -#: catalog/dependency.c:895 +#: catalog/dependency.c:905 #, c-format msgid "drop auto-cascades to %s" msgstr "Löschvorgang löscht automatisch %s" -#: catalog/dependency.c:907 catalog/dependency.c:916 +#: catalog/dependency.c:917 catalog/dependency.c:926 #, c-format msgid "%s depends on %s" msgstr "%s hängt von %s ab" -#: catalog/dependency.c:928 catalog/dependency.c:937 +#: catalog/dependency.c:938 catalog/dependency.c:947 #, c-format msgid "drop cascades to %s" msgstr "Löschvorgang löscht ebenfalls %s" -#: catalog/dependency.c:945 catalog/pg_shdepend.c:684 +#: catalog/dependency.c:955 catalog/pg_shdepend.c:685 #, c-format msgid "" "\n" @@ -3655,617 +3678,628 @@ msgstr[1] "" "\n" "und %d weitere Objekte (Liste im Serverlog)" -#: catalog/dependency.c:957 +#: catalog/dependency.c:967 #, c-format msgid "cannot drop %s because other objects depend on it" msgstr "kann %s nicht löschen, weil andere Objekte davon abhängen" -#: catalog/dependency.c:961 catalog/dependency.c:968 +#: catalog/dependency.c:971 catalog/dependency.c:978 #, c-format msgid "Use DROP ... CASCADE to drop the dependent objects too." msgstr "Verwenden Sie DROP ... CASCADE, um die abhängigen Objekte ebenfalls zu löschen." -#: catalog/dependency.c:965 +#: catalog/dependency.c:975 #, c-format msgid "cannot drop desired object(s) because other objects depend on them" msgstr "kann gewünschte Objekte nicht löschen, weil andere Objekte davon abhängen" #. translator: %d always has a value larger than 1 -#: catalog/dependency.c:974 +#: catalog/dependency.c:984 #, c-format msgid "drop cascades to %d other object" msgid_plural "drop cascades to %d other objects" msgstr[0] "Löschvorgang löscht ebenfalls %d weiteres Objekt" msgstr[1] "Löschvorgang löscht ebenfalls %d weitere Objekte" -#: catalog/dependency.c:1616 -#, fuzzy, c-format -#| msgid "constant of the type \"regrole\" cannot be used here" +#: catalog/dependency.c:1644 +#, c-format msgid "constant of the type %s cannot be used here" -msgstr "Konstante vom Typ »regrole« kann hier nicht verwendet werden" +msgstr "Konstante vom Typ %s kann hier nicht verwendet werden" -#: catalog/heap.c:281 +#: catalog/heap.c:286 #, c-format msgid "permission denied to create \"%s.%s\"" msgstr "keine Berechtigung, um »%s.%s« zu erzeugen" -#: catalog/heap.c:283 +#: catalog/heap.c:288 #, c-format msgid "System catalog modifications are currently disallowed." msgstr "Änderungen an Systemkatalogen sind gegenwärtig nicht erlaubt." -#: catalog/heap.c:418 commands/tablecmds.c:1619 commands/tablecmds.c:2122 -#: commands/tablecmds.c:5144 +#: catalog/heap.c:425 commands/tablecmds.c:1861 commands/tablecmds.c:2372 +#: commands/tablecmds.c:5467 #, c-format msgid "tables can have at most %d columns" msgstr "Tabellen können höchstens %d Spalten haben" -#: catalog/heap.c:435 commands/tablecmds.c:5402 +#: catalog/heap.c:444 commands/tablecmds.c:5763 #, c-format msgid "column name \"%s\" conflicts with a system column name" msgstr "Spaltenname »%s« steht im Konflikt mit dem Namen einer Systemspalte" -#: catalog/heap.c:451 +#: catalog/heap.c:460 #, c-format msgid "column name \"%s\" specified more than once" msgstr "Spaltenname »%s« mehrmals angegeben" -#: catalog/heap.c:504 +#: catalog/heap.c:513 #, c-format msgid "column \"%s\" has pseudo-type %s" msgstr "Spalte »%s« hat Pseudotyp %s" -#: catalog/heap.c:534 +#: catalog/heap.c:543 #, c-format msgid "composite type %s cannot be made a member of itself" msgstr "zusammengesetzter Typ %s kann nicht Teil von sich selbst werden" -#: catalog/heap.c:576 commands/createas.c:201 commands/createas.c:497 +#: catalog/heap.c:585 commands/createas.c:201 commands/createas.c:498 #, c-format msgid "no collation was derived for column \"%s\" with collatable type %s" msgstr "für Spalte »%s« mit sortierbarem Typ %s wurde keine Sortierfolge abgeleitet" -#: catalog/heap.c:578 commands/createas.c:204 commands/createas.c:500 -#: commands/indexcmds.c:1141 commands/tablecmds.c:13000 commands/view.c:103 -#: regex/regc_pg_locale.c:262 utils/adt/formatting.c:1500 -#: utils/adt/formatting.c:1552 utils/adt/formatting.c:1620 -#: utils/adt/formatting.c:1672 utils/adt/formatting.c:1741 -#: utils/adt/formatting.c:1805 utils/adt/like.c:213 utils/adt/selfuncs.c:5332 -#: utils/adt/varlena.c:1422 utils/adt/varlena.c:1827 +#: catalog/heap.c:587 commands/createas.c:204 commands/createas.c:501 +#: commands/indexcmds.c:1557 commands/tablecmds.c:13803 commands/view.c:103 +#: regex/regc_pg_locale.c:263 utils/adt/formatting.c:1536 +#: utils/adt/formatting.c:1658 utils/adt/formatting.c:1781 utils/adt/like.c:184 +#: utils/adt/selfuncs.c:5807 utils/adt/varlena.c:1416 utils/adt/varlena.c:1881 #, c-format msgid "Use the COLLATE clause to set the collation explicitly." msgstr "Verwenden Sie die COLLATE-Klausel, um die Sortierfolge explizit zu setzen." -#: catalog/heap.c:1063 catalog/index.c:806 commands/tablecmds.c:2903 +#: catalog/heap.c:1076 catalog/index.c:864 commands/tablecmds.c:3141 #, c-format msgid "relation \"%s\" already exists" msgstr "Relation »%s« existiert bereits" -#: catalog/heap.c:1079 catalog/pg_type.c:410 catalog/pg_type.c:717 -#: commands/typecmds.c:239 commands/typecmds.c:788 commands/typecmds.c:1139 -#: commands/typecmds.c:1350 commands/typecmds.c:2106 +#: catalog/heap.c:1092 catalog/pg_type.c:409 catalog/pg_type.c:731 +#: commands/typecmds.c:236 commands/typecmds.c:787 commands/typecmds.c:1186 +#: commands/typecmds.c:1419 commands/typecmds.c:2174 #, c-format msgid "type \"%s\" already exists" msgstr "Typ »%s« existiert bereits" -#: catalog/heap.c:1080 +#: catalog/heap.c:1093 #, c-format msgid "A relation has an associated type of the same name, so you must use a name that doesn't conflict with any existing type." msgstr "Eine Relation hat einen zugehörigen Typ mit dem selben Namen, daher müssen Sie einen Namen wählen, der nicht mit einem bestehenden Typ kollidiert." -#: catalog/heap.c:1109 +#: catalog/heap.c:1122 #, c-format msgid "pg_class heap OID value not set when in binary upgrade mode" msgstr "Heap-OID-Wert für pg_class ist im Binary-Upgrade-Modus nicht gesetzt" -#: catalog/heap.c:2063 -#, fuzzy, c-format -#| msgid "cannot use a deferrable unique constraint for referenced table \"%s\"" +#: catalog/heap.c:2254 +#, c-format msgid "cannot add NO INHERIT constraint to partitioned table \"%s\"" -msgstr "aufschiebbarer Unique-Constraint kann nicht für Tabelle »%s«, auf die verwiesen wird, verwendet werden" +msgstr "zur partitionierten Tabelle »%s« kann kein NO-INHERIT-Constraint hinzugefügt werden" -#: catalog/heap.c:2321 +#: catalog/heap.c:2519 #, c-format msgid "check constraint \"%s\" already exists" msgstr "Check-Constraint »%s« existiert bereits" -#: catalog/heap.c:2489 catalog/pg_constraint.c:649 commands/tablecmds.c:6525 +#: catalog/heap.c:2688 catalog/pg_constraint.c:912 commands/tablecmds.c:7109 #, c-format msgid "constraint \"%s\" for relation \"%s\" already exists" msgstr "Constraint »%s« existiert bereits für Relation »%s«" -#: catalog/heap.c:2496 +#: catalog/heap.c:2695 #, c-format msgid "constraint \"%s\" conflicts with non-inherited constraint on relation \"%s\"" msgstr "Constraint »%s« kollidiert mit nicht vererbtem Constraint für Relation »%s«" -#: catalog/heap.c:2507 +#: catalog/heap.c:2706 #, c-format msgid "constraint \"%s\" conflicts with inherited constraint on relation \"%s\"" msgstr "Constraint »%s« kollidiert mit vererbtem Constraint für Relation »%s«" -#: catalog/heap.c:2517 +#: catalog/heap.c:2716 #, c-format msgid "constraint \"%s\" conflicts with NOT VALID constraint on relation \"%s\"" msgstr "Constraint »%s« kollidiert mit NOT-VALID-Constraint für Relation »%s«" -#: catalog/heap.c:2522 +#: catalog/heap.c:2721 #, c-format msgid "merging constraint \"%s\" with inherited definition" msgstr "Constraint »%s« wird mit geerbter Definition zusammengeführt" -#: catalog/heap.c:2638 +#: catalog/heap.c:2837 #, c-format msgid "cannot use column references in default expression" msgstr "Spaltenverweise können nicht in Vorgabeausdrücken verwendet werden" -#: catalog/heap.c:2663 rewrite/rewriteHandler.c:1097 +#: catalog/heap.c:2862 rewrite/rewriteHandler.c:1176 #, c-format msgid "column \"%s\" is of type %s but default expression is of type %s" msgstr "Spalte »%s« hat Typ %s, aber der Vorgabeausdruck hat Typ %s" -#: catalog/heap.c:2668 commands/prepare.c:384 parser/parse_node.c:428 -#: parser/parse_target.c:589 parser/parse_target.c:839 -#: parser/parse_target.c:849 rewrite/rewriteHandler.c:1102 +#: catalog/heap.c:2867 commands/prepare.c:384 parser/parse_node.c:430 +#: parser/parse_target.c:590 parser/parse_target.c:859 +#: parser/parse_target.c:869 rewrite/rewriteHandler.c:1181 #, c-format msgid "You will need to rewrite or cast the expression." msgstr "Sie müssen den Ausdruck umschreiben oder eine Typumwandlung vornehmen." -#: catalog/heap.c:2715 +#: catalog/heap.c:2914 #, c-format msgid "only table \"%s\" can be referenced in check constraint" msgstr "nur Verweise auf Tabelle »%s« sind im Check-Constraint zugelassen" -#: catalog/heap.c:2955 +#: catalog/heap.c:3154 #, c-format msgid "unsupported ON COMMIT and foreign key combination" msgstr "nicht unterstützte Kombination aus ON COMMIT und Fremdschlüssel" -#: catalog/heap.c:2956 +#: catalog/heap.c:3155 #, c-format msgid "Table \"%s\" references \"%s\", but they do not have the same ON COMMIT setting." msgstr "Tabelle »%s« verweist auf »%s«, aber sie haben nicht die gleiche ON-COMMIT-Einstellung." -#: catalog/heap.c:2961 +#: catalog/heap.c:3160 #, c-format msgid "cannot truncate a table referenced in a foreign key constraint" msgstr "kann eine Tabelle, die in einen Fremdschlüssel-Constraint eingebunden ist, nicht leeren" -#: catalog/heap.c:2962 +#: catalog/heap.c:3161 #, c-format msgid "Table \"%s\" references \"%s\"." msgstr "Tabelle »%s« verweist auf »%s«." -#: catalog/heap.c:2964 +#: catalog/heap.c:3163 #, c-format msgid "Truncate table \"%s\" at the same time, or use TRUNCATE ... CASCADE." msgstr "Leeren Sie die Tabelle »%s« gleichzeitig oder verwenden Sie TRUNCATE ... CASCADE." -#: catalog/index.c:210 parser/parse_utilcmd.c:1541 parser/parse_utilcmd.c:1627 +#: catalog/index.c:231 parser/parse_utilcmd.c:1824 parser/parse_utilcmd.c:1911 #, c-format msgid "multiple primary keys for table \"%s\" are not allowed" msgstr "mehrere Primärschlüssel für Tabelle »%s« nicht erlaubt" -#: catalog/index.c:228 +#: catalog/index.c:249 #, c-format msgid "primary keys cannot be expressions" msgstr "Primärschlüssel können keine Ausdrücke sein" -#: catalog/index.c:756 catalog/index.c:1174 +#: catalog/index.c:814 catalog/index.c:1259 #, c-format msgid "user-defined indexes on system catalog tables are not supported" msgstr "benutzerdefinierte Indexe für Systemkatalogtabellen werden nicht unterstützt" -#: catalog/index.c:766 +#: catalog/index.c:824 #, c-format msgid "concurrent index creation on system catalog tables is not supported" msgstr "nebenläufige Indexerzeugung für Systemkatalogtabellen wird nicht unterstützt" -#: catalog/index.c:784 +#: catalog/index.c:842 #, c-format msgid "shared indexes cannot be created after initdb" msgstr "Cluster-globale Indexe können nicht nach initdb erzeugt werden" -#: catalog/index.c:798 commands/createas.c:249 commands/sequence.c:149 -#: parser/parse_utilcmd.c:197 +#: catalog/index.c:856 commands/createas.c:250 commands/sequence.c:152 +#: parser/parse_utilcmd.c:205 #, c-format msgid "relation \"%s\" already exists, skipping" msgstr "Relation »%s« existiert bereits, wird übersprungen" -#: catalog/index.c:834 +#: catalog/index.c:892 #, c-format msgid "pg_class index OID value not set when in binary upgrade mode" msgstr "Index-OID-Wert für pg_class ist im Binary-Upgrade-Modus nicht gesetzt" -#: catalog/index.c:1435 +#: catalog/index.c:1534 #, c-format msgid "DROP INDEX CONCURRENTLY must be first action in transaction" msgstr "DROP INDEX CONCURRENTLY muss die erste Aktion in einer Transaktion sein" -#: catalog/index.c:2020 -#, c-format -msgid "building index \"%s\" on table \"%s\"" +#: catalog/index.c:2263 +#, fuzzy, c-format +#| msgid "building index \"%s\" on table \"%s\"" +msgid "building index \"%s\" on table \"%s\" serially" msgstr "baue Index »%s« von Tabelle »%s«" -#: catalog/index.c:3338 +#: catalog/index.c:2268 +#, fuzzy, c-format +#| msgid "building index \"%s\" on table \"%s\"" +msgid "building index \"%s\" on table \"%s\" with request for %d parallel worker" +msgid_plural "building index \"%s\" on table \"%s\" with request for %d parallel workers" +msgstr[0] "baue Index »%s« von Tabelle »%s«" +msgstr[1] "baue Index »%s« von Tabelle »%s«" + +#: catalog/index.c:3657 #, c-format msgid "cannot reindex temporary tables of other sessions" msgstr "kann temporäre Tabellen anderer Sitzungen nicht reindizieren" -#: catalog/index.c:3469 +#: catalog/index.c:3788 #, c-format msgid "index \"%s\" was reindexed" msgstr "Index »%s« wurde neu indiziert" -#: catalog/index.c:3471 commands/vacuumlazy.c:1345 commands/vacuumlazy.c:1421 -#: commands/vacuumlazy.c:1610 commands/vacuumlazy.c:1820 +#: catalog/index.c:3859 #, c-format -msgid "%s." -msgstr "%s." +msgid "REINDEX of partitioned tables is not yet implemented, skipping \"%s\"" +msgstr "REINDEX von partitionierten Tabellen ist noch nicht implementiert, »%s« wird übersprungen" -#: catalog/namespace.c:234 catalog/namespace.c:432 catalog/namespace.c:526 -#: commands/trigger.c:4782 +#: catalog/namespace.c:248 catalog/namespace.c:452 catalog/namespace.c:546 +#: commands/trigger.c:5377 #, c-format msgid "cross-database references are not implemented: \"%s.%s.%s\"" msgstr "Verweise auf andere Datenbanken sind nicht implementiert: »%s.%s.%s«" -#: catalog/namespace.c:291 +#: catalog/namespace.c:305 #, c-format msgid "temporary tables cannot specify a schema name" msgstr "temporäre Tabellen können keinen Schemanamen angeben" -#: catalog/namespace.c:370 +#: catalog/namespace.c:386 #, c-format msgid "could not obtain lock on relation \"%s.%s\"" msgstr "konnte Sperre für Relation »%s.%s« nicht setzen" -#: catalog/namespace.c:375 commands/lockcmds.c:145 +#: catalog/namespace.c:391 commands/lockcmds.c:152 commands/lockcmds.c:238 #, c-format msgid "could not obtain lock on relation \"%s\"" msgstr "konnte Sperre für Relation »%s« nicht setzen" -#: catalog/namespace.c:399 parser/parse_relation.c:1137 +#: catalog/namespace.c:419 parser/parse_relation.c:1158 #, c-format msgid "relation \"%s.%s\" does not exist" msgstr "Relation »%s.%s« existiert nicht" -#: catalog/namespace.c:404 parser/parse_relation.c:1150 -#: parser/parse_relation.c:1158 utils/adt/regproc.c:1036 +#: catalog/namespace.c:424 parser/parse_relation.c:1171 +#: parser/parse_relation.c:1179 #, c-format msgid "relation \"%s\" does not exist" msgstr "Relation »%s« existiert nicht" -#: catalog/namespace.c:472 catalog/namespace.c:2826 commands/extension.c:1460 -#: commands/extension.c:1466 +#: catalog/namespace.c:492 catalog/namespace.c:3011 commands/extension.c:1466 +#: commands/extension.c:1472 #, c-format msgid "no schema has been selected to create in" msgstr "kein Schema für die Objekterzeugung ausgewählt" -#: catalog/namespace.c:624 catalog/namespace.c:637 +#: catalog/namespace.c:644 catalog/namespace.c:657 #, c-format msgid "cannot create relations in temporary schemas of other sessions" msgstr "kann keine Relationen in temporären Schemas anderer Sitzungen erzeugen" -#: catalog/namespace.c:628 +#: catalog/namespace.c:648 #, c-format msgid "cannot create temporary relation in non-temporary schema" msgstr "kann keine temporäre Relation in einem nicht-temporären Schema erzeugen" -#: catalog/namespace.c:643 +#: catalog/namespace.c:663 #, c-format msgid "only temporary relations may be created in temporary schemas" msgstr "nur temporäre Relationen können in temporären Schemas erzeugt werden" -#: catalog/namespace.c:2139 +#: catalog/namespace.c:2201 +#, c-format +msgid "statistics object \"%s\" does not exist" +msgstr "Statistikobjekt »%s« existiert nicht" + +#: catalog/namespace.c:2324 #, c-format msgid "text search parser \"%s\" does not exist" msgstr "Textsucheparser »%s« existiert nicht" -#: catalog/namespace.c:2265 +#: catalog/namespace.c:2450 #, c-format msgid "text search dictionary \"%s\" does not exist" msgstr "Textsuchewörterbuch »%s« existiert nicht" -#: catalog/namespace.c:2392 +#: catalog/namespace.c:2577 #, c-format msgid "text search template \"%s\" does not exist" msgstr "Textsuchevorlage »%s« existiert nicht" -#: catalog/namespace.c:2518 commands/tsearchcmds.c:1185 -#: utils/cache/ts_cache.c:612 +#: catalog/namespace.c:2703 commands/tsearchcmds.c:1185 +#: utils/cache/ts_cache.c:616 #, c-format msgid "text search configuration \"%s\" does not exist" msgstr "Textsuchekonfiguration »%s« existiert nicht" -#: catalog/namespace.c:2631 parser/parse_expr.c:791 parser/parse_target.c:1191 +#: catalog/namespace.c:2816 parser/parse_expr.c:793 parser/parse_target.c:1214 #, c-format msgid "cross-database references are not implemented: %s" msgstr "Verweise auf andere Datenbanken sind nicht implementiert: %s" -#: catalog/namespace.c:2637 gram.y:14068 gram.y:15487 parser/parse_expr.c:798 -#: parser/parse_target.c:1198 +#: catalog/namespace.c:2822 gram.y:14690 gram.y:16122 parser/parse_expr.c:800 +#: parser/parse_target.c:1221 #, c-format msgid "improper qualified name (too many dotted names): %s" msgstr "falscher qualifizierter Name (zu viele Namensteile): %s" -#: catalog/namespace.c:2768 +#: catalog/namespace.c:2953 #, c-format msgid "cannot move objects into or out of temporary schemas" msgstr "Objekte können nicht in oder aus temporären Schemas verschoben werden" -#: catalog/namespace.c:2774 +#: catalog/namespace.c:2959 #, c-format msgid "cannot move objects into or out of TOAST schema" msgstr "Objekte können nicht in oder aus TOAST-Schemas verschoben werden" -#: catalog/namespace.c:2847 commands/schemacmds.c:255 -#: commands/schemacmds.c:333 commands/tablecmds.c:891 +#: catalog/namespace.c:3032 commands/schemacmds.c:256 commands/schemacmds.c:334 +#: commands/tablecmds.c:1014 #, c-format msgid "schema \"%s\" does not exist" msgstr "Schema »%s« existiert nicht" -#: catalog/namespace.c:2878 +#: catalog/namespace.c:3063 #, c-format msgid "improper relation name (too many dotted names): %s" msgstr "falscher Relationsname (zu viele Namensteile): %s" -#: catalog/namespace.c:3388 +#: catalog/namespace.c:3557 #, c-format msgid "collation \"%s\" for encoding \"%s\" does not exist" msgstr "Sortierfolge »%s« für Kodierung »%s« existiert nicht" -#: catalog/namespace.c:3443 +#: catalog/namespace.c:3612 #, c-format msgid "conversion \"%s\" does not exist" msgstr "Konversion »%s« existiert nicht" -#: catalog/namespace.c:3651 +#: catalog/namespace.c:3820 #, c-format msgid "permission denied to create temporary tables in database \"%s\"" msgstr "keine Berechtigung, um temporäre Tabellen in Datenbank »%s« zu erzeugen" -#: catalog/namespace.c:3667 +#: catalog/namespace.c:3836 #, c-format msgid "cannot create temporary tables during recovery" msgstr "während der Wiederherstellung können keine temporären Tabellen erzeugt werden" -#: catalog/namespace.c:3673 +#: catalog/namespace.c:3842 #, c-format -msgid "cannot create temporary tables in parallel mode" -msgstr "im Parallelmodus können keine temporären Tabellen erzeugt werden" +msgid "cannot create temporary tables during a parallel operation" +msgstr "während einer parallelen Operation können keine temporären Tabellen erzeugt werden" -#: catalog/namespace.c:3922 commands/tablespace.c:1169 commands/variable.c:64 -#: utils/misc/guc.c:9942 utils/misc/guc.c:10020 +#: catalog/namespace.c:4091 commands/tablespace.c:1171 commands/variable.c:64 +#: utils/misc/guc.c:10244 utils/misc/guc.c:10322 #, c-format msgid "List syntax is invalid." msgstr "Die Listensyntax ist ungültig." -#: catalog/objectaddress.c:1215 catalog/pg_publication.c:57 -#: commands/lockcmds.c:93 commands/policy.c:94 commands/policy.c:391 -#: commands/policy.c:480 commands/tablecmds.c:223 commands/tablecmds.c:265 -#: commands/tablecmds.c:1477 commands/tablecmds.c:4665 -#: commands/tablecmds.c:8502 +#: catalog/objectaddress.c:1238 catalog/pg_publication.c:66 +#: commands/policy.c:94 commands/policy.c:394 commands/policy.c:484 +#: commands/tablecmds.c:225 commands/tablecmds.c:267 commands/tablecmds.c:1719 +#: commands/tablecmds.c:4962 commands/tablecmds.c:9184 #, c-format msgid "\"%s\" is not a table" msgstr "»%s« ist keine Tabelle" -#: catalog/objectaddress.c:1222 commands/tablecmds.c:235 -#: commands/tablecmds.c:4695 commands/tablecmds.c:12739 commands/view.c:141 +#: catalog/objectaddress.c:1245 commands/tablecmds.c:237 +#: commands/tablecmds.c:4992 commands/tablecmds.c:13507 commands/view.c:141 #, c-format msgid "\"%s\" is not a view" msgstr "»%s« ist keine Sicht" -#: catalog/objectaddress.c:1229 commands/matview.c:172 -#: commands/tablecmds.c:241 commands/tablecmds.c:12744 +#: catalog/objectaddress.c:1252 commands/matview.c:172 commands/tablecmds.c:243 +#: commands/tablecmds.c:13512 #, c-format msgid "\"%s\" is not a materialized view" msgstr "»%s« ist keine materialisierte Sicht" -#: catalog/objectaddress.c:1236 commands/tablecmds.c:259 -#: commands/tablecmds.c:4698 commands/tablecmds.c:12749 +#: catalog/objectaddress.c:1259 commands/tablecmds.c:261 +#: commands/tablecmds.c:4995 commands/tablecmds.c:13517 #, c-format msgid "\"%s\" is not a foreign table" msgstr "»%s« ist keine Fremdtabelle" -#: catalog/objectaddress.c:1277 -#, fuzzy, c-format -#| msgid "%s must specify unqualified relation names" +#: catalog/objectaddress.c:1300 +#, c-format msgid "must specify relation and object name" -msgstr "%s muss unqualifizierte Relationsnamen angeben" +msgstr "Relations- und Objektname müssen angegeben werden" -#: catalog/objectaddress.c:1353 catalog/objectaddress.c:1406 +#: catalog/objectaddress.c:1376 catalog/objectaddress.c:1429 #, c-format msgid "column name must be qualified" msgstr "Spaltenname muss qualifiziert werden" -#: catalog/objectaddress.c:1449 +#: catalog/objectaddress.c:1472 #, c-format msgid "default value for column \"%s\" of relation \"%s\" does not exist" msgstr "Vorgabewert für Spalte »%s« von Relation »%s« existiert nicht" -#: catalog/objectaddress.c:1486 commands/functioncmds.c:128 -#: commands/tablecmds.c:251 commands/typecmds.c:3233 parser/parse_type.c:226 -#: parser/parse_type.c:255 parser/parse_type.c:794 utils/adt/acl.c:4357 -#: utils/adt/regproc.c:1227 +#: catalog/objectaddress.c:1509 commands/functioncmds.c:131 +#: commands/tablecmds.c:253 commands/typecmds.c:3320 parser/parse_type.c:226 +#: parser/parse_type.c:255 parser/parse_type.c:828 utils/adt/acl.c:4377 #, c-format msgid "type \"%s\" does not exist" msgstr "Typ »%s« existiert nicht" -#: catalog/objectaddress.c:1603 +#: catalog/objectaddress.c:1628 #, c-format msgid "operator %d (%s, %s) of %s does not exist" msgstr "Operator %d (%s, %s) von %s existiert nicht" -#: catalog/objectaddress.c:1632 +#: catalog/objectaddress.c:1659 #, c-format msgid "function %d (%s, %s) of %s does not exist" msgstr "Funktion %d (%s, %s) von %s existiert nicht" -#: catalog/objectaddress.c:1681 catalog/objectaddress.c:1707 +#: catalog/objectaddress.c:1710 catalog/objectaddress.c:1736 #, c-format msgid "user mapping for user \"%s\" on server \"%s\" does not exist" msgstr "Benutzerabbildung für Benutzer »%s« auf Server »%s« existiert nicht" -#: catalog/objectaddress.c:1696 commands/foreigncmds.c:428 -#: commands/foreigncmds.c:991 commands/foreigncmds.c:1349 +#: catalog/objectaddress.c:1725 commands/foreigncmds.c:428 +#: commands/foreigncmds.c:1004 commands/foreigncmds.c:1377 #: foreign/foreign.c:688 #, c-format msgid "server \"%s\" does not exist" msgstr "Server »%s« existiert nicht" -#: catalog/objectaddress.c:1763 +#: catalog/objectaddress.c:1792 #, c-format msgid "publication relation \"%s\" in publication \"%s\" does not exist" msgstr "Publikationsrelation »%s« in Publikation »%s« existiert nicht" -#: catalog/objectaddress.c:1822 +#: catalog/objectaddress.c:1854 #, c-format -msgid "unrecognized default ACL object type %c" -msgstr "unbekannter Standard-ACL-Objekttyp %c" +msgid "unrecognized default ACL object type \"%c\"" +msgstr "unbekannter Standard-ACL-Objekttyp »%c«" -#: catalog/objectaddress.c:1823 +#: catalog/objectaddress.c:1855 #, c-format -msgid "Valid object types are \"r\", \"S\", \"f\", and \"T\"." -msgstr "Gültige Objekttypen sind »r«, »S«, »f« und »T«." +msgid "Valid object types are \"%c\", \"%c\", \"%c\", \"%c\", \"%c\"." +msgstr "Gültige Objekttypen sind »%c«, »%c«, »%c«, »%c«, »%c«." -#: catalog/objectaddress.c:1869 +#: catalog/objectaddress.c:1906 #, c-format msgid "default ACL for user \"%s\" in schema \"%s\" on %s does not exist" msgstr "Standard-ACL für Benutzer »%s« in Schema »%s« für %s existiert nicht" -#: catalog/objectaddress.c:1874 +#: catalog/objectaddress.c:1911 #, c-format msgid "default ACL for user \"%s\" on %s does not exist" msgstr "Standard-ACL für Benutzer »%s« für %s existiert nicht" -#: catalog/objectaddress.c:1901 catalog/objectaddress.c:1959 -#: catalog/objectaddress.c:2014 +#: catalog/objectaddress.c:1938 catalog/objectaddress.c:1996 +#: catalog/objectaddress.c:2053 #, c-format msgid "name or argument lists may not contain nulls" msgstr "Namens- oder Argumentlisten dürfen keine NULL-Werte enthalten" -#: catalog/objectaddress.c:1935 +#: catalog/objectaddress.c:1972 #, c-format msgid "unsupported object type \"%s\"" msgstr "nicht unterstützter Objekttyp »%s«" -#: catalog/objectaddress.c:1955 catalog/objectaddress.c:1973 -#: catalog/objectaddress.c:2110 +#: catalog/objectaddress.c:1992 catalog/objectaddress.c:2010 +#: catalog/objectaddress.c:2151 #, c-format msgid "name list length must be exactly %d" msgstr "Länge der Namensliste muss genau %d sein" -#: catalog/objectaddress.c:1977 +#: catalog/objectaddress.c:2014 #, c-format msgid "large object OID may not be null" msgstr "Large-Object-OID darf nicht NULL sein" -#: catalog/objectaddress.c:1986 catalog/objectaddress.c:2047 -#: catalog/objectaddress.c:2054 +#: catalog/objectaddress.c:2023 catalog/objectaddress.c:2086 +#: catalog/objectaddress.c:2093 #, c-format msgid "name list length must be at least %d" msgstr "Länge der Namensliste muss mindestens %d sein" -#: catalog/objectaddress.c:2040 catalog/objectaddress.c:2060 +#: catalog/objectaddress.c:2079 catalog/objectaddress.c:2100 #, c-format msgid "argument list length must be exactly %d" msgstr "Länge der Argumentliste muss genau %d sein" -#: catalog/objectaddress.c:2285 libpq/be-fsstubs.c:350 +#: catalog/objectaddress.c:2330 libpq/be-fsstubs.c:321 #, c-format msgid "must be owner of large object %u" msgstr "Berechtigung nur für Eigentümer des Large Object %u" -#: catalog/objectaddress.c:2300 commands/functioncmds.c:1419 +#: catalog/objectaddress.c:2345 commands/functioncmds.c:1452 #, c-format msgid "must be owner of type %s or type %s" msgstr "Berechtigung nur für Eigentümer des Typs %s oder des Typs %s" -#: catalog/objectaddress.c:2350 catalog/objectaddress.c:2367 +#: catalog/objectaddress.c:2395 catalog/objectaddress.c:2412 #, c-format msgid "must be superuser" msgstr "Berechtigung nur für Superuser" -#: catalog/objectaddress.c:2357 +#: catalog/objectaddress.c:2402 #, c-format msgid "must have CREATEROLE privilege" msgstr "Berechtigung nur mit CREATEROLE-Privileg" -#: catalog/objectaddress.c:2432 +#: catalog/objectaddress.c:2481 #, c-format msgid "unrecognized object type \"%s\"" msgstr "unbekannter Objekttyp »%s«" -#: catalog/objectaddress.c:2627 +#: catalog/objectaddress.c:2686 #, c-format msgid " column %s" msgstr " Spalte %s" -#: catalog/objectaddress.c:2633 +#: catalog/objectaddress.c:2693 #, c-format msgid "function %s" msgstr "Funktion %s" -#: catalog/objectaddress.c:2638 +#: catalog/objectaddress.c:2698 #, c-format msgid "type %s" msgstr "Typ %s" -#: catalog/objectaddress.c:2668 +#: catalog/objectaddress.c:2728 #, c-format msgid "cast from %s to %s" msgstr "Typumwandlung von %s in %s" -#: catalog/objectaddress.c:2688 +#: catalog/objectaddress.c:2748 #, c-format msgid "collation %s" msgstr "Sortierfolge %s" -#: catalog/objectaddress.c:2712 +#: catalog/objectaddress.c:2772 #, c-format msgid "constraint %s on %s" msgstr "Constraint %s für %s" -#: catalog/objectaddress.c:2718 +#: catalog/objectaddress.c:2778 #, c-format msgid "constraint %s" msgstr "Constraint %s" -#: catalog/objectaddress.c:2735 +#: catalog/objectaddress.c:2795 #, c-format msgid "conversion %s" msgstr "Konversion %s" -#: catalog/objectaddress.c:2772 +#: catalog/objectaddress.c:2832 #, c-format msgid "default for %s" msgstr "Vorgabewert für %s" -#: catalog/objectaddress.c:2781 +#: catalog/objectaddress.c:2841 #, c-format msgid "language %s" msgstr "Sprache %s" -#: catalog/objectaddress.c:2786 +#: catalog/objectaddress.c:2846 #, c-format msgid "large object %u" msgstr "Large Object %u" -#: catalog/objectaddress.c:2791 +#: catalog/objectaddress.c:2851 #, c-format msgid "operator %s" msgstr "Operator %s" -#: catalog/objectaddress.c:2823 +#: catalog/objectaddress.c:2883 #, c-format msgid "operator class %s for access method %s" msgstr "Operatorklasse %s für Zugriffsmethode %s" +#: catalog/objectaddress.c:2906 +#, c-format +msgid "access method %s" +msgstr "Zugriffsmethode %s" + #. translator: %d is the operator strategy (a number), the #. first two %s's are data type names, the third %s is the #. description of the operator family, and the last %s is the #. textual form of the operator with arguments. -#: catalog/objectaddress.c:2873 +#: catalog/objectaddress.c:2948 #, c-format msgid "operator %d (%s, %s) of %s: %s" msgstr "Operator %d (%s, %s) von %s: %s" @@ -4274,406 +4308,401 @@ msgstr "Operator %d (%s, %s) von %s: %s" #. are data type names, the third %s is the description of the #. operator family, and the last %s is the textual form of the #. function with arguments. -#: catalog/objectaddress.c:2923 +#: catalog/objectaddress.c:2998 #, c-format msgid "function %d (%s, %s) of %s: %s" msgstr "Funktion %d (%s, %s) von %s: %s" -#: catalog/objectaddress.c:2963 +#: catalog/objectaddress.c:3038 #, c-format msgid "rule %s on " msgstr "Regel %s für " -#: catalog/objectaddress.c:2985 -#, c-format -msgid "transform for %s language %s" -msgstr "Transformation %s für Sprache %s" - -#: catalog/objectaddress.c:3019 +#: catalog/objectaddress.c:3073 #, c-format msgid "trigger %s on " msgstr "Trigger %s für " -#: catalog/objectaddress.c:3036 +#: catalog/objectaddress.c:3090 #, c-format msgid "schema %s" msgstr "Schema %s" -#: catalog/objectaddress.c:3049 +#: catalog/objectaddress.c:3107 +#, c-format +msgid "statistics object %s" +msgstr "Statistikobjekt %s" + +#: catalog/objectaddress.c:3123 #, c-format msgid "text search parser %s" msgstr "Textsucheparser %s" -#: catalog/objectaddress.c:3064 +#: catalog/objectaddress.c:3138 #, c-format msgid "text search dictionary %s" msgstr "Textsuchewörterbuch %s" -#: catalog/objectaddress.c:3079 +#: catalog/objectaddress.c:3153 #, c-format msgid "text search template %s" msgstr "Textsuchevorlage %s" -#: catalog/objectaddress.c:3094 +#: catalog/objectaddress.c:3168 #, c-format msgid "text search configuration %s" msgstr "Textsuchekonfiguration %s" -#: catalog/objectaddress.c:3102 +#: catalog/objectaddress.c:3176 #, c-format msgid "role %s" msgstr "Rolle %s" -#: catalog/objectaddress.c:3115 +#: catalog/objectaddress.c:3189 #, c-format msgid "database %s" msgstr "Datenbank %s" -#: catalog/objectaddress.c:3127 +#: catalog/objectaddress.c:3201 #, c-format msgid "tablespace %s" msgstr "Tablespace %s" -#: catalog/objectaddress.c:3136 +#: catalog/objectaddress.c:3210 #, c-format msgid "foreign-data wrapper %s" msgstr "Fremddaten-Wrapper %s" -#: catalog/objectaddress.c:3145 +#: catalog/objectaddress.c:3219 #, c-format msgid "server %s" msgstr "Server %s" -#: catalog/objectaddress.c:3173 +#: catalog/objectaddress.c:3247 #, c-format msgid "user mapping for %s on server %s" msgstr "Benutzerabbildung für %s auf Server %s" -#: catalog/objectaddress.c:3208 +#: catalog/objectaddress.c:3282 #, c-format msgid "default privileges on new relations belonging to role %s" msgstr "Vorgabeprivilegien für neue Relationen von Rolle %s" -#: catalog/objectaddress.c:3213 +#: catalog/objectaddress.c:3287 #, c-format msgid "default privileges on new sequences belonging to role %s" msgstr "Vorgabeprivilegien für neue Sequenzen von Rolle %s" -#: catalog/objectaddress.c:3218 +#: catalog/objectaddress.c:3292 #, c-format msgid "default privileges on new functions belonging to role %s" msgstr "Vorgabeprivilegien für neue Funktionen von Rolle %s" -#: catalog/objectaddress.c:3223 +#: catalog/objectaddress.c:3297 #, c-format msgid "default privileges on new types belonging to role %s" msgstr "Vorgabeprivilegien für neue Typen von Rolle %s" -#: catalog/objectaddress.c:3229 +#: catalog/objectaddress.c:3302 +#, c-format +msgid "default privileges on new schemas belonging to role %s" +msgstr "Vorgabeprivilegien für neue Schemas von Rolle %s" + +#: catalog/objectaddress.c:3308 #, c-format msgid "default privileges belonging to role %s" msgstr "Vorgabeprivilegien von Rolle %s" -#: catalog/objectaddress.c:3237 +#: catalog/objectaddress.c:3316 #, c-format msgid " in schema %s" msgstr " in Schema %s" -#: catalog/objectaddress.c:3254 +#: catalog/objectaddress.c:3333 #, c-format msgid "extension %s" msgstr "Erweiterung %s" -#: catalog/objectaddress.c:3267 +#: catalog/objectaddress.c:3346 #, c-format msgid "event trigger %s" msgstr "Ereignistrigger %s" -#: catalog/objectaddress.c:3299 +#: catalog/objectaddress.c:3378 #, c-format msgid "policy %s on " msgstr "Policy %s für " -#: catalog/objectaddress.c:3317 -#, c-format -msgid "access method %s" -msgstr "Zugriffsmethode %s" - -#: catalog/objectaddress.c:3325 +#: catalog/objectaddress.c:3389 #, c-format msgid "publication %s" msgstr "Publikation %s" -#: catalog/objectaddress.c:3345 +#: catalog/objectaddress.c:3409 #, c-format msgid "publication table %s in publication %s" msgstr "Publikationstabelle %s in Publikation %s" -#: catalog/objectaddress.c:3353 +#: catalog/objectaddress.c:3417 #, c-format msgid "subscription %s" msgstr "Subskription %s" -#: catalog/objectaddress.c:3413 +#: catalog/objectaddress.c:3435 +#, c-format +msgid "transform for %s language %s" +msgstr "Transformation %s für Sprache %s" + +#: catalog/objectaddress.c:3496 #, c-format msgid "table %s" msgstr "Tabelle %s" -#: catalog/objectaddress.c:3417 +#: catalog/objectaddress.c:3501 #, c-format msgid "index %s" msgstr "Index %s" -#: catalog/objectaddress.c:3421 +#: catalog/objectaddress.c:3505 #, c-format msgid "sequence %s" msgstr "Sequenz %s" -#: catalog/objectaddress.c:3425 +#: catalog/objectaddress.c:3509 #, c-format msgid "toast table %s" msgstr "TOAST-Tabelle %s" -#: catalog/objectaddress.c:3429 +#: catalog/objectaddress.c:3513 #, c-format msgid "view %s" msgstr "Sicht %s" -#: catalog/objectaddress.c:3433 +#: catalog/objectaddress.c:3517 #, c-format msgid "materialized view %s" msgstr "materialisierte Sicht %s" -#: catalog/objectaddress.c:3437 +#: catalog/objectaddress.c:3521 #, c-format msgid "composite type %s" msgstr "zusammengesetzter Typ %s" -#: catalog/objectaddress.c:3441 +#: catalog/objectaddress.c:3525 #, c-format msgid "foreign table %s" msgstr "Fremdtabelle %s" -#: catalog/objectaddress.c:3446 +#: catalog/objectaddress.c:3530 #, c-format msgid "relation %s" msgstr "Relation %s" -#: catalog/objectaddress.c:3483 +#: catalog/objectaddress.c:3567 #, c-format msgid "operator family %s for access method %s" msgstr "Operatorfamilie %s für Zugriffsmethode %s" -#: catalog/objectaddress.c:4854 +#: catalog/objectaddress.c:4939 #, c-format msgid "%s in publication %s" msgstr "%s in Publikation %s" -#: catalog/partition.c:741 -#, fuzzy, c-format -#| msgid "cannot determine transition data type" -msgid "cannot create range partition with empty range" -msgstr "kann Übergangsdatentyp nicht bestimmen" - -#: catalog/partition.c:835 -#, fuzzy, c-format -#| msgid "relation \"%s\" is not a parent of relation \"%s\"" -msgid "partition \"%s\" would overlap partition \"%s\"" -msgstr "Relation »%s« ist keine Basisrelation von Relation »%s«" - -#: catalog/partition.c:939 catalog/partition.c:1088 commands/analyze.c:1438 -#: commands/tablecmds.c:8564 executor/execMain.c:3159 executor/execQual.c:2691 +#: catalog/partition.c:180 catalog/pg_constraint.c:441 commands/analyze.c:1499 +#: commands/indexcmds.c:922 commands/tablecmds.c:941 commands/tablecmds.c:9246 +#: commands/tablecmds.c:14403 commands/tablecmds.c:14880 +#: executor/execExprInterp.c:3302 executor/execMain.c:1927 +#: executor/execMain.c:2006 executor/execMain.c:2054 executor/execMain.c:2165 +#: executor/execPartition.c:409 executor/execPartition.c:469 +#: executor/execPartition.c:585 executor/execPartition.c:688 +#: executor/execPartition.c:759 executor/execPartition.c:957 +#: executor/nodeModifyTable.c:1835 msgid "could not convert row type" msgstr "konnte Zeilentyp nicht umwandeln" -#: catalog/partition.c:1726 -#, c-format -msgid "range partition key of row contains null" -msgstr "" - -#: catalog/pg_aggregate.c:125 +#: catalog/pg_aggregate.c:126 #, c-format msgid "aggregates cannot have more than %d argument" msgid_plural "aggregates cannot have more than %d arguments" msgstr[0] "Aggregatfunktionen können nicht mehr als %d Argument haben" msgstr[1] "Aggregatfunktionen können nicht mehr als %d Argumente haben" -#: catalog/pg_aggregate.c:148 catalog/pg_aggregate.c:158 +#: catalog/pg_aggregate.c:149 catalog/pg_aggregate.c:159 #, c-format msgid "cannot determine transition data type" msgstr "kann Übergangsdatentyp nicht bestimmen" -#: catalog/pg_aggregate.c:149 catalog/pg_aggregate.c:159 +#: catalog/pg_aggregate.c:150 catalog/pg_aggregate.c:160 #, c-format msgid "An aggregate using a polymorphic transition type must have at least one polymorphic argument." msgstr "Eine Aggregatfunktion mit polymorphischem Übergangstyp muss mindestens ein polymorphisches Argument haben." -#: catalog/pg_aggregate.c:172 +#: catalog/pg_aggregate.c:173 #, c-format msgid "a variadic ordered-set aggregate must use VARIADIC type ANY" msgstr "eine variadische Ordered-Set-Aggregatfunktion muss VARIADIC-Typ ANY verwenden" -#: catalog/pg_aggregate.c:198 +#: catalog/pg_aggregate.c:199 #, c-format msgid "a hypothetical-set aggregate must have direct arguments matching its aggregated arguments" msgstr "eine Hypothetical-Set-Aggregatfunktion muss direkte Argumente haben, die mit ihren aggregierten Argumenten übereinstimmen" -#: catalog/pg_aggregate.c:245 catalog/pg_aggregate.c:289 +#: catalog/pg_aggregate.c:246 catalog/pg_aggregate.c:290 #, c-format msgid "return type of transition function %s is not %s" msgstr "Rückgabetyp der Übergangsfunktion %s ist nicht %s" -#: catalog/pg_aggregate.c:265 catalog/pg_aggregate.c:308 +#: catalog/pg_aggregate.c:266 catalog/pg_aggregate.c:309 #, c-format msgid "must not omit initial value when transition function is strict and transition type is not compatible with input type" msgstr "Anfangswert darf nicht ausgelassen werden, wenn Übergangsfunktion strikt ist und Übergangstyp nicht mit Eingabetyp kompatibel ist" -#: catalog/pg_aggregate.c:334 +#: catalog/pg_aggregate.c:335 #, c-format msgid "return type of inverse transition function %s is not %s" msgstr "Rückgabetyp der inversen Übergangsfunktion %s ist nicht %s" -#: catalog/pg_aggregate.c:351 executor/nodeWindowAgg.c:2298 +#: catalog/pg_aggregate.c:352 executor/nodeWindowAgg.c:2823 #, c-format msgid "strictness of aggregate's forward and inverse transition functions must match" msgstr "Striktheit der vorwärtigen und inversen Übergangsfunktionen einer Aggregatfunktion müssen übereinstimmen" -#: catalog/pg_aggregate.c:395 catalog/pg_aggregate.c:545 +#: catalog/pg_aggregate.c:396 catalog/pg_aggregate.c:549 #, c-format msgid "final function with extra arguments must not be declared STRICT" msgstr "Abschlussfunktion mit zusätzlichen Argumenten darf nicht als STRICT deklariert sein" -#: catalog/pg_aggregate.c:425 +#: catalog/pg_aggregate.c:427 #, c-format msgid "return type of combine function %s is not %s" msgstr "Rückgabetyp der Kombinierfunktion %s ist nicht %s" -#: catalog/pg_aggregate.c:436 +#: catalog/pg_aggregate.c:439 executor/nodeAgg.c:2943 #, c-format -msgid "combine function with \"%s\" transition type must not be declared STRICT" -msgstr "Kombinierfunktion mit Übergangstyp »%s« darf nicht als STRICT deklariert sein" +msgid "combine function with transition type %s must not be declared STRICT" +msgstr "Kombinierfunktion mit Übergangstyp %s darf nicht als STRICT deklariert sein" -#: catalog/pg_aggregate.c:455 +#: catalog/pg_aggregate.c:458 #, c-format msgid "return type of serialization function %s is not %s" msgstr "Rückgabetyp der Serialisierungsfunktion %s ist nicht %s" -#: catalog/pg_aggregate.c:475 +#: catalog/pg_aggregate.c:479 #, c-format msgid "return type of deserialization function %s is not %s" msgstr "Rückgabetyp der Deserialisierungsfunktion %s ist nicht %s" -#: catalog/pg_aggregate.c:491 catalog/pg_proc.c:243 catalog/pg_proc.c:250 +#: catalog/pg_aggregate.c:495 catalog/pg_proc.c:241 catalog/pg_proc.c:248 #, c-format msgid "cannot determine result data type" msgstr "kann Ergebnisdatentyp nicht bestimmen" -#: catalog/pg_aggregate.c:492 +#: catalog/pg_aggregate.c:496 #, c-format msgid "An aggregate returning a polymorphic type must have at least one polymorphic argument." msgstr "Eine Aggregatfunktion, die einen polymorphischen Typ zurückgibt, muss mindestens ein polymorphisches Argument haben." -#: catalog/pg_aggregate.c:504 catalog/pg_proc.c:256 +#: catalog/pg_aggregate.c:508 catalog/pg_proc.c:254 #, c-format msgid "unsafe use of pseudo-type \"internal\"" msgstr "unsichere Verwendung des Pseudotyps »internal«" -#: catalog/pg_aggregate.c:505 catalog/pg_proc.c:257 +#: catalog/pg_aggregate.c:509 catalog/pg_proc.c:255 #, c-format msgid "A function returning \"internal\" must have at least one \"internal\" argument." msgstr "Eine Funktion, die »internal« zurückgibt, muss mindestens ein Argument vom Typ »internal« haben." -#: catalog/pg_aggregate.c:558 +#: catalog/pg_aggregate.c:562 #, c-format msgid "moving-aggregate implementation returns type %s, but plain implementation returns type %s" msgstr "Moving-Aggregat-Implementierung gibt Typ %s zurück, aber die normale Implementierung gibt Typ %s zurück" -#: catalog/pg_aggregate.c:569 +#: catalog/pg_aggregate.c:573 #, c-format msgid "sort operator can only be specified for single-argument aggregates" msgstr "Sortieroperator kann nur für Aggregatfunktionen mit einem Argument angegeben werden" -#: catalog/pg_aggregate.c:810 commands/typecmds.c:1698 -#: commands/typecmds.c:1749 commands/typecmds.c:1780 commands/typecmds.c:1803 -#: commands/typecmds.c:1824 commands/typecmds.c:1851 commands/typecmds.c:1878 -#: commands/typecmds.c:1955 commands/typecmds.c:1997 parser/parse_func.c:365 -#: parser/parse_func.c:394 parser/parse_func.c:419 parser/parse_func.c:433 -#: parser/parse_func.c:508 parser/parse_func.c:519 parser/parse_func.c:1927 +#: catalog/pg_aggregate.c:819 commands/typecmds.c:1766 commands/typecmds.c:1817 +#: commands/typecmds.c:1848 commands/typecmds.c:1871 commands/typecmds.c:1892 +#: commands/typecmds.c:1919 commands/typecmds.c:1946 commands/typecmds.c:2023 +#: commands/typecmds.c:2065 parser/parse_func.c:398 parser/parse_func.c:427 +#: parser/parse_func.c:452 parser/parse_func.c:466 parser/parse_func.c:541 +#: parser/parse_func.c:552 parser/parse_func.c:2022 #, c-format msgid "function %s does not exist" msgstr "Funktion %s existiert nicht" -#: catalog/pg_aggregate.c:816 +#: catalog/pg_aggregate.c:825 #, c-format msgid "function %s returns a set" msgstr "Funktion %s gibt eine Ergebnismenge zurück" -#: catalog/pg_aggregate.c:831 +#: catalog/pg_aggregate.c:840 #, c-format msgid "function %s must accept VARIADIC ANY to be used in this aggregate" msgstr "Funktion %s muss VARIADIC ANY akzeptieren, um in dieser Aggregatfunktion verwendet zu werden" -#: catalog/pg_aggregate.c:855 +#: catalog/pg_aggregate.c:864 #, c-format msgid "function %s requires run-time type coercion" msgstr "Funktion %s erfordert Typumwandlung zur Laufzeit" -#: catalog/pg_collation.c:81 -#, c-format -msgid "collation \"%s\" for encoding \"%s\" already exists, skipping" -msgstr "Sortierfolge »%s« für Kodierung »%s« existiert bereits, wird übersprungen" - -#: catalog/pg_collation.c:88 -#, c-format -msgid "collation \"%s\" for encoding \"%s\" already exists" -msgstr "Sortierfolge »%s« für Kodierung »%s« existiert bereits" - -#: catalog/pg_collation.c:106 +#: catalog/pg_collation.c:92 catalog/pg_collation.c:139 #, c-format msgid "collation \"%s\" already exists, skipping" msgstr "Sortierfolge »%s« existiert bereits, wird übersprungen" -#: catalog/pg_collation.c:113 +#: catalog/pg_collation.c:94 +#, c-format +msgid "collation \"%s\" for encoding \"%s\" already exists, skipping" +msgstr "Sortierfolge »%s« für Kodierung »%s« existiert bereits, wird übersprungen" + +#: catalog/pg_collation.c:102 catalog/pg_collation.c:146 #, c-format msgid "collation \"%s\" already exists" msgstr "Sortierfolge »%s« existiert bereits" -#: catalog/pg_constraint.c:658 +#: catalog/pg_collation.c:104 +#, c-format +msgid "collation \"%s\" for encoding \"%s\" already exists" +msgstr "Sortierfolge »%s« für Kodierung »%s« existiert bereits" + +#: catalog/pg_constraint.c:921 #, c-format msgid "constraint \"%s\" for domain %s already exists" msgstr "Constraint »%s« für Domäne %s existiert bereits" -#: catalog/pg_constraint.c:788 +#: catalog/pg_constraint.c:1089 catalog/pg_constraint.c:1165 #, c-format msgid "table \"%s\" has multiple constraints named \"%s\"" msgstr "Tabelle »%s« hat mehrere Constraints namens »%s«" -#: catalog/pg_constraint.c:800 +#: catalog/pg_constraint.c:1101 catalog/pg_constraint.c:1199 #, c-format msgid "constraint \"%s\" for table \"%s\" does not exist" msgstr "Constraint »%s« für Tabelle »%s« existiert nicht" -#: catalog/pg_constraint.c:846 +#: catalog/pg_constraint.c:1284 #, c-format -msgid "domain \"%s\" has multiple constraints named \"%s\"" -msgstr "Domäne »%s« hat mehrere Constraints namens »%s«" +msgid "domain %s has multiple constraints named \"%s\"" +msgstr "Domäne %s hat mehrere Constraints namens »%s«" -#: catalog/pg_constraint.c:858 +#: catalog/pg_constraint.c:1296 #, c-format -msgid "constraint \"%s\" for domain \"%s\" does not exist" -msgstr "Constraint »%s« für Domäne »%s« existiert nicht" +msgid "constraint \"%s\" for domain %s does not exist" +msgstr "Constraint »%s« für Domäne %s existiert nicht" -#: catalog/pg_conversion.c:66 +#: catalog/pg_conversion.c:65 #, c-format msgid "conversion \"%s\" already exists" msgstr "Konversion »%s« existiert bereits" -#: catalog/pg_conversion.c:79 +#: catalog/pg_conversion.c:78 #, c-format msgid "default conversion for %s to %s already exists" msgstr "Standardumwandlung von %s nach %s existiert bereits" -#: catalog/pg_depend.c:163 commands/extension.c:3211 +#: catalog/pg_depend.c:163 commands/extension.c:3218 #, c-format msgid "%s is already a member of extension \"%s\"" msgstr "%s ist schon Mitglied der Erweiterung »%s«" @@ -4718,206 +4747,226 @@ msgstr "OID-Wert für pg_enum ist im Binary-Upgrade-Modus nicht gesetzt" msgid "ALTER TYPE ADD BEFORE/AFTER is incompatible with binary upgrade" msgstr "ALTER TYPE ADD BEFORE/AFTER ist mit Binary Upgrade inkompatibel" -#: catalog/pg_namespace.c:61 commands/schemacmds.c:263 +#: catalog/pg_namespace.c:63 commands/schemacmds.c:264 #, c-format msgid "schema \"%s\" already exists" msgstr "Schema »%s« existiert bereits" -#: catalog/pg_operator.c:219 catalog/pg_operator.c:358 +#: catalog/pg_operator.c:218 catalog/pg_operator.c:357 #, c-format msgid "\"%s\" is not a valid operator name" msgstr "»%s« ist kein gültiger Operatorname" -#: catalog/pg_operator.c:367 +#: catalog/pg_operator.c:366 #, c-format msgid "only binary operators can have commutators" msgstr "nur binäre Operatoren können Kommutatoren haben" -#: catalog/pg_operator.c:371 commands/operatorcmds.c:482 +#: catalog/pg_operator.c:370 commands/operatorcmds.c:481 #, c-format msgid "only binary operators can have join selectivity" msgstr "nur binäre Operatoren können Join-Selectivity haben" -#: catalog/pg_operator.c:375 +#: catalog/pg_operator.c:374 #, c-format msgid "only binary operators can merge join" msgstr "nur binäre Operatoren können an einem Merge-Verbund teilnehmen" -#: catalog/pg_operator.c:379 +#: catalog/pg_operator.c:378 #, c-format msgid "only binary operators can hash" msgstr "nur binäre Operatoren können eine Hash-Funktion haben" -#: catalog/pg_operator.c:390 +#: catalog/pg_operator.c:389 #, c-format msgid "only boolean operators can have negators" msgstr "nur Boole’sche Operatoren können Negatoren haben" -#: catalog/pg_operator.c:394 commands/operatorcmds.c:490 +#: catalog/pg_operator.c:393 commands/operatorcmds.c:489 #, c-format msgid "only boolean operators can have restriction selectivity" msgstr "nur Boole’sche Operatoren können Restriction-Selectivity haben" -#: catalog/pg_operator.c:398 commands/operatorcmds.c:494 +#: catalog/pg_operator.c:397 commands/operatorcmds.c:493 #, c-format msgid "only boolean operators can have join selectivity" msgstr "nur Boole’sche Operatoren können Join-Selectivity haben" -#: catalog/pg_operator.c:402 +#: catalog/pg_operator.c:401 #, c-format msgid "only boolean operators can merge join" msgstr "nur Boole’sche Operatoren können an einem Merge-Verbund teilnehmen" -#: catalog/pg_operator.c:406 +#: catalog/pg_operator.c:405 #, c-format msgid "only boolean operators can hash" msgstr "nur Boole’sche Operatoren können eine Hash-Funktion haben" -#: catalog/pg_operator.c:418 +#: catalog/pg_operator.c:417 #, c-format msgid "operator %s already exists" msgstr "Operator %s existiert bereits" -#: catalog/pg_operator.c:612 +#: catalog/pg_operator.c:611 #, c-format msgid "operator cannot be its own negator or sort operator" msgstr "Operator kann nicht sein eigener Negator oder Sortierungsoperator sein" -#: catalog/pg_proc.c:131 parser/parse_func.c:1951 parser/parse_func.c:1991 +#: catalog/pg_proc.c:129 parser/parse_func.c:2058 #, c-format msgid "functions cannot have more than %d argument" msgid_plural "functions cannot have more than %d arguments" msgstr[0] "Funktionen können nicht mehr als %d Argument haben" msgstr[1] "Funktionen können nicht mehr als %d Argumente haben" -#: catalog/pg_proc.c:244 +#: catalog/pg_proc.c:242 #, c-format msgid "A function returning a polymorphic type must have at least one polymorphic argument." msgstr "Eine Funktion, die einen polymorphischen Typ zurückgibt, muss mindestens ein polymorphisches Argument haben." -#: catalog/pg_proc.c:251 +#: catalog/pg_proc.c:249 #, c-format msgid "A function returning \"anyrange\" must have at least one \"anyrange\" argument." msgstr "Eine Funktion, die »anyrange« zurückgibt, muss mindestens ein Argument vom Typ »anyrange« haben." -#: catalog/pg_proc.c:269 +#: catalog/pg_proc.c:267 #, c-format msgid "\"%s\" is already an attribute of type %s" msgstr "»%s« ist schon ein Attribut von Typ %s" -#: catalog/pg_proc.c:400 +#: catalog/pg_proc.c:397 #, c-format msgid "function \"%s\" already exists with same argument types" msgstr "Funktion »%s« existiert bereits mit den selben Argumenttypen" -#: catalog/pg_proc.c:414 catalog/pg_proc.c:437 +#: catalog/pg_proc.c:407 +#, c-format +msgid "cannot change routine kind" +msgstr "kann Routinenart nicht ändern" + +#: catalog/pg_proc.c:409 +#, c-format +msgid "\"%s\" is an aggregate function." +msgstr "»%s« ist eine Aggregatfunktion." + +#: catalog/pg_proc.c:411 +#, c-format +msgid "\"%s\" is a function." +msgstr "»%s« ist eine Funktion." + +#: catalog/pg_proc.c:413 +#, c-format +msgid "\"%s\" is a procedure." +msgstr "»%s« ist eine Prozedur." + +#: catalog/pg_proc.c:415 +#, c-format +msgid "\"%s\" is a window function." +msgstr "»%s« ist eine Fensterfunktion." + +#: catalog/pg_proc.c:426 catalog/pg_proc.c:450 #, c-format msgid "cannot change return type of existing function" msgstr "kann Rückgabetyp einer bestehenden Funktion nicht ändern" -#: catalog/pg_proc.c:415 catalog/pg_proc.c:439 catalog/pg_proc.c:482 -#: catalog/pg_proc.c:506 catalog/pg_proc.c:532 +#: catalog/pg_proc.c:427 catalog/pg_proc.c:452 catalog/pg_proc.c:495 +#: catalog/pg_proc.c:519 catalog/pg_proc.c:545 #, c-format msgid "Use DROP FUNCTION %s first." msgstr "Verwenden Sie zuerst DROP FUNCTION %s." -#: catalog/pg_proc.c:438 +#: catalog/pg_proc.c:451 #, c-format msgid "Row type defined by OUT parameters is different." msgstr "Der von OUT-Parametern bestimmte Zeilentyp ist verschieden." -#: catalog/pg_proc.c:480 +#: catalog/pg_proc.c:493 #, c-format msgid "cannot change name of input parameter \"%s\"" msgstr "kann Name des Eingabeparameters »%s« nicht ändern" -#: catalog/pg_proc.c:505 +#: catalog/pg_proc.c:518 #, c-format msgid "cannot remove parameter defaults from existing function" msgstr "kann Parametervorgabewerte einer bestehenden Funktion nicht entfernen" -#: catalog/pg_proc.c:531 +#: catalog/pg_proc.c:544 #, c-format msgid "cannot change data type of existing parameter default value" msgstr "kann Datentyp eines bestehenden Parametervorgabewerts nicht ändern" -#: catalog/pg_proc.c:544 -#, c-format -msgid "function \"%s\" is an aggregate function" -msgstr "Funktion »%s« ist eine Aggregatfunktion" - -#: catalog/pg_proc.c:549 -#, c-format -msgid "function \"%s\" is not an aggregate function" -msgstr "Funktion »%s« ist keine Aggregatfunktion" - -#: catalog/pg_proc.c:557 -#, c-format -msgid "function \"%s\" is a window function" -msgstr "Funktion %s ist eine Fensterfunktion" - -#: catalog/pg_proc.c:562 -#, c-format -msgid "function \"%s\" is not a window function" -msgstr "Funktion »%s« ist keine Fensterfunktion" - -#: catalog/pg_proc.c:768 +#: catalog/pg_proc.c:753 #, c-format msgid "there is no built-in function named \"%s\"" msgstr "es gibt keine eingebaute Funktion namens %s" -#: catalog/pg_proc.c:866 +#: catalog/pg_proc.c:851 #, c-format msgid "SQL functions cannot return type %s" msgstr "SQL-Funktionen können keinen Rückgabetyp »%s« haben" -#: catalog/pg_proc.c:881 +#: catalog/pg_proc.c:866 #, c-format msgid "SQL functions cannot have arguments of type %s" msgstr "SQL-Funktionen können keine Argumente vom Typ »%s« haben" -#: catalog/pg_proc.c:967 executor/functions.c:1424 +#: catalog/pg_proc.c:954 executor/functions.c:1434 #, c-format msgid "SQL function \"%s\"" msgstr "SQL-Funktion »%s«" +#: catalog/pg_publication.c:57 commands/trigger.c:235 commands/trigger.c:253 +#, c-format +msgid "\"%s\" is a partitioned table" +msgstr "»%s« ist eine partitionierte Tabelle" + #: catalog/pg_publication.c:59 #, c-format +msgid "Adding partitioned tables to publications is not supported." +msgstr "Partitionierte Tabellen in Publikationen werden nicht unterstützt." + +#: catalog/pg_publication.c:60 +#, c-format +msgid "You can add the table partitions individually." +msgstr "Sie können die Tabellenpartitionen einzeln hinzufügen." + +#: catalog/pg_publication.c:68 +#, c-format msgid "Only tables can be added to publications." msgstr "Nur Tabellen können Teil einer Publikationen sein." -#: catalog/pg_publication.c:65 +#: catalog/pg_publication.c:74 #, c-format msgid "\"%s\" is a system table" msgstr "»%s« ist eine Systemtabelle" -#: catalog/pg_publication.c:67 +#: catalog/pg_publication.c:76 #, c-format msgid "System tables cannot be added to publications." msgstr "Systemtabellen können nicht Teil einer Publikationen sein." -#: catalog/pg_publication.c:73 +#: catalog/pg_publication.c:82 #, c-format msgid "table \"%s\" cannot be replicated" msgstr "Tabelle »%s« kann nicht repliziert werden" -#: catalog/pg_publication.c:75 +#: catalog/pg_publication.c:84 #, c-format msgid "Temporary and unlogged relations cannot be replicated." msgstr "Temporäre und ungeloggte Tabellen können nicht repliziert werden." -#: catalog/pg_publication.c:134 +#: catalog/pg_publication.c:175 #, c-format msgid "relation \"%s\" is already member of publication \"%s\"" msgstr "Relation »%s« ist schon Mitglied der Publikation »%s«" -#: catalog/pg_publication.c:361 catalog/pg_publication.c:382 -#: commands/publicationcmds.c:430 commands/publicationcmds.c:715 +#: catalog/pg_publication.c:403 catalog/pg_publication.c:424 +#: commands/publicationcmds.c:415 commands/publicationcmds.c:716 #, c-format msgid "publication \"%s\" does not exist" msgstr "Publikation »%s« existiert nicht" -#: catalog/pg_shdepend.c:691 +#: catalog/pg_shdepend.c:692 #, c-format msgid "" "\n" @@ -4932,98 +4981,98 @@ msgstr[1] "" "\n" "und Objekte in %d anderen Datenbanken (Liste im Serverlog)" -#: catalog/pg_shdepend.c:997 +#: catalog/pg_shdepend.c:998 #, c-format msgid "role %u was concurrently dropped" msgstr "Rolle %u wurde gleichzeitig gelöscht" -#: catalog/pg_shdepend.c:1016 +#: catalog/pg_shdepend.c:1017 #, c-format msgid "tablespace %u was concurrently dropped" msgstr "Tablespace %u wurde gleichzeitig gelöscht" -#: catalog/pg_shdepend.c:1031 +#: catalog/pg_shdepend.c:1032 #, c-format msgid "database %u was concurrently dropped" msgstr "Datenbank %u wurde gleichzeitig gelöscht" -#: catalog/pg_shdepend.c:1076 +#: catalog/pg_shdepend.c:1077 #, c-format msgid "owner of %s" msgstr "Eigentümer von %s" -#: catalog/pg_shdepend.c:1078 +#: catalog/pg_shdepend.c:1079 #, c-format msgid "privileges for %s" msgstr "Privilegien für %s" -#: catalog/pg_shdepend.c:1080 +#: catalog/pg_shdepend.c:1081 #, c-format msgid "target of %s" msgstr "Ziel von %s" #. translator: %s will always be "database %s" -#: catalog/pg_shdepend.c:1088 +#: catalog/pg_shdepend.c:1089 #, c-format msgid "%d object in %s" msgid_plural "%d objects in %s" msgstr[0] "%d Objekt in %s" msgstr[1] "%d Objekte in %s" -#: catalog/pg_shdepend.c:1199 +#: catalog/pg_shdepend.c:1200 #, c-format msgid "cannot drop objects owned by %s because they are required by the database system" msgstr "kann Objekte, die %s gehören, nicht löschen, weil sie vom Datenbanksystem benötigt werden" -#: catalog/pg_shdepend.c:1314 +#: catalog/pg_shdepend.c:1315 #, c-format msgid "cannot reassign ownership of objects owned by %s because they are required by the database system" msgstr "kann den Eigentümer von den Objekten, die %s gehören, nicht ändern, weil die Objekte vom Datenbanksystem benötigt werden" -#: catalog/pg_subscription.c:158 commands/subscriptioncmds.c:373 -#: commands/subscriptioncmds.c:481 commands/subscriptioncmds.c:651 +#: catalog/pg_subscription.c:176 commands/subscriptioncmds.c:633 +#: commands/subscriptioncmds.c:843 commands/subscriptioncmds.c:1067 #, c-format msgid "subscription \"%s\" does not exist" msgstr "Subskription »%s« existiert nicht" -#: catalog/pg_type.c:136 catalog/pg_type.c:452 +#: catalog/pg_type.c:135 catalog/pg_type.c:451 #, c-format msgid "pg_type OID value not set when in binary upgrade mode" msgstr "OID-Wert für pg_type ist im Binary-Upgrade-Modus nicht gesetzt" -#: catalog/pg_type.c:251 +#: catalog/pg_type.c:250 #, c-format msgid "invalid type internal size %d" msgstr "ungültige interne Typgröße %d" -#: catalog/pg_type.c:267 catalog/pg_type.c:275 catalog/pg_type.c:283 -#: catalog/pg_type.c:292 +#: catalog/pg_type.c:266 catalog/pg_type.c:274 catalog/pg_type.c:282 +#: catalog/pg_type.c:291 #, c-format msgid "alignment \"%c\" is invalid for passed-by-value type of size %d" msgstr "Ausrichtung »%c« ist ungültig für Typen mit Wertübergabe mit Größe %d" -#: catalog/pg_type.c:299 +#: catalog/pg_type.c:298 #, c-format msgid "internal size %d is invalid for passed-by-value type" msgstr "interne Größe %d ist ungültig für Typen mit Wertübergabe" -#: catalog/pg_type.c:308 catalog/pg_type.c:314 +#: catalog/pg_type.c:307 catalog/pg_type.c:313 #, c-format msgid "alignment \"%c\" is invalid for variable-length type" msgstr "Ausrichtung »%c« ist ungültig für Typen variabler Länge" -#: catalog/pg_type.c:322 +#: catalog/pg_type.c:321 #, c-format msgid "fixed-size types must have storage PLAIN" msgstr "Typen mit fester Größe müssen Storage-Typ PLAIN haben" -#: catalog/pg_type.c:781 +#: catalog/pg_type.c:800 #, c-format msgid "could not form array type name for type \"%s\"" msgstr "konnte keinen Arraytypnamen für Datentyp »%s« erzeugen" -#: catalog/toasting.c:105 commands/indexcmds.c:395 commands/tablecmds.c:4677 -#: commands/tablecmds.c:12627 +#: catalog/toasting.c:105 commands/indexcmds.c:447 commands/tablecmds.c:4974 +#: commands/tablecmds.c:13395 #, c-format msgid "\"%s\" is not a table or materialized view" msgstr "»%s« ist keine Tabelle oder materialisierte Sicht" @@ -5033,152 +5082,163 @@ msgstr "»%s« ist keine Tabelle oder materialisierte Sicht" msgid "shared tables cannot be toasted after initdb" msgstr "Cluster-globale Tabellen können nach initdb nicht mehr getoastet werden" -#: commands/aggregatecmds.c:157 +#: commands/aggregatecmds.c:166 #, c-format msgid "only ordered-set aggregates can be hypothetical" msgstr "nur Ordered-Set-Aggregatfunktionen können Hypothetical-Set-Aggregatfunktionen sein" -#: commands/aggregatecmds.c:182 +#: commands/aggregatecmds.c:191 #, c-format msgid "aggregate attribute \"%s\" not recognized" msgstr "Attribut »%s« für Aggregatfunktion unbekannt" -#: commands/aggregatecmds.c:192 +#: commands/aggregatecmds.c:201 #, c-format msgid "aggregate stype must be specified" msgstr "»stype« für Aggregatfunktion muss angegeben werden" -#: commands/aggregatecmds.c:196 +#: commands/aggregatecmds.c:205 #, c-format msgid "aggregate sfunc must be specified" msgstr "»sfunc« für Aggregatfunktion muss angegeben werden" -#: commands/aggregatecmds.c:208 +#: commands/aggregatecmds.c:217 #, c-format msgid "aggregate msfunc must be specified when mstype is specified" msgstr "»msfunc« für Aggregatfunktion muss angegeben werden, wenn »mstype« angegeben ist" -#: commands/aggregatecmds.c:212 +#: commands/aggregatecmds.c:221 #, c-format msgid "aggregate minvfunc must be specified when mstype is specified" msgstr "»minvfunc« für Aggregatfunktion muss angegeben werden, wenn »mstype« angegeben ist" -#: commands/aggregatecmds.c:219 +#: commands/aggregatecmds.c:228 #, c-format msgid "aggregate msfunc must not be specified without mstype" msgstr "»msfunc« für Aggregatfunktion darf nicht angegeben werden, wenn »mstype« nicht angegeben ist" -#: commands/aggregatecmds.c:223 +#: commands/aggregatecmds.c:232 #, c-format msgid "aggregate minvfunc must not be specified without mstype" msgstr "»minvfunc« für Aggregatfunktion darf nicht angegeben werden, wenn »mstype« nicht angegeben ist" -#: commands/aggregatecmds.c:227 +#: commands/aggregatecmds.c:236 #, c-format msgid "aggregate mfinalfunc must not be specified without mstype" msgstr "»mfinalfunc« für Aggregatfunktion darf nicht angegeben werden, wenn »mstype« nicht angegeben ist" -#: commands/aggregatecmds.c:231 +#: commands/aggregatecmds.c:240 #, c-format msgid "aggregate msspace must not be specified without mstype" msgstr "»msspace« für Aggregatfunktion darf nicht angegeben werden, wenn »mstype« nicht angegeben ist" -#: commands/aggregatecmds.c:235 +#: commands/aggregatecmds.c:244 #, c-format msgid "aggregate minitcond must not be specified without mstype" msgstr "»minitcond« für Aggregatfunktion darf nicht angegeben werden, wenn »mstype« nicht angegeben ist" -#: commands/aggregatecmds.c:255 +#: commands/aggregatecmds.c:273 #, c-format msgid "aggregate input type must be specified" msgstr "Eingabetyp für Aggregatfunktion muss angegeben werden" -#: commands/aggregatecmds.c:285 +#: commands/aggregatecmds.c:303 #, c-format msgid "basetype is redundant with aggregate input type specification" msgstr "Angabe »basetype« ist überflüssig bei Angabe des Eingabetyps der Aggregatfunktion" -#: commands/aggregatecmds.c:326 commands/aggregatecmds.c:367 +#: commands/aggregatecmds.c:344 commands/aggregatecmds.c:385 #, c-format msgid "aggregate transition data type cannot be %s" msgstr "Übergangsdatentyp von Aggregatfunktion kann nicht %s sein" -#: commands/aggregatecmds.c:338 +#: commands/aggregatecmds.c:356 #, c-format msgid "serialization functions may be specified only when the aggregate transition data type is %s" msgstr "Serialisierungsfunktionen dürfen nur angegeben werden, wenn der Übergangsdatentyp der Aggregatfunktion %s ist" -#: commands/aggregatecmds.c:348 +#: commands/aggregatecmds.c:366 #, c-format msgid "must specify both or neither of serialization and deserialization functions" msgstr "Serialisierungs- und Deserialisierungsfunktionen müssen zusammen angegeben werden" -#: commands/aggregatecmds.c:413 commands/functioncmds.c:564 +#: commands/aggregatecmds.c:431 commands/functioncmds.c:602 #, c-format msgid "parameter \"parallel\" must be SAFE, RESTRICTED, or UNSAFE" msgstr "Parameter »parallel« muss SAFE, RESTRICTED oder UNSAFE sein" -#: commands/alter.c:83 commands/event_trigger.c:233 +#: commands/aggregatecmds.c:486 +#, fuzzy, c-format +#| msgid "parameter \"parallel\" must be SAFE, RESTRICTED, or UNSAFE" +msgid "parameter \"%s\" must be READ_ONLY, SHARABLE, or READ_WRITE" +msgstr "Parameter »parallel« muss SAFE, RESTRICTED oder UNSAFE sein" + +#: commands/alter.c:84 commands/event_trigger.c:236 #, c-format msgid "event trigger \"%s\" already exists" msgstr "Ereignistrigger »%s« existiert bereits" -#: commands/alter.c:86 commands/foreigncmds.c:595 +#: commands/alter.c:87 commands/foreigncmds.c:595 #, c-format msgid "foreign-data wrapper \"%s\" already exists" msgstr "Fremddaten-Wrapper »%s« existiert bereits" -#: commands/alter.c:89 commands/foreigncmds.c:886 +#: commands/alter.c:90 commands/foreigncmds.c:898 #, c-format msgid "server \"%s\" already exists" msgstr "Server »%s« existiert bereits" -#: commands/alter.c:92 commands/proclang.c:367 +#: commands/alter.c:93 commands/proclang.c:363 #, c-format msgid "language \"%s\" already exists" msgstr "Sprache »%s« existiert bereits" -#: commands/alter.c:95 commands/publicationcmds.c:189 +#: commands/alter.c:96 commands/publicationcmds.c:176 #, c-format msgid "publication \"%s\" already exists" msgstr "Publikation »%s« existiert bereits" -#: commands/alter.c:98 commands/subscriptioncmds.c:256 +#: commands/alter.c:99 commands/subscriptioncmds.c:358 #, c-format msgid "subscription \"%s\" already exists" msgstr "Subskription »%s« existiert bereits" -#: commands/alter.c:121 +#: commands/alter.c:122 #, c-format msgid "conversion \"%s\" already exists in schema \"%s\"" msgstr "Konversion »%s« existiert bereits in Schema »%s«" -#: commands/alter.c:125 +#: commands/alter.c:126 +#, c-format +msgid "statistics object \"%s\" already exists in schema \"%s\"" +msgstr "Statistikobjekt »%s« existiert bereits in Schema »%s«" + +#: commands/alter.c:130 #, c-format msgid "text search parser \"%s\" already exists in schema \"%s\"" msgstr "Textsucheparser »%s« existiert bereits in Schema »%s«" -#: commands/alter.c:129 +#: commands/alter.c:134 #, c-format msgid "text search dictionary \"%s\" already exists in schema \"%s\"" msgstr "Textsuchewörterbuch »%s« existiert bereits in Schema »%s«" -#: commands/alter.c:133 +#: commands/alter.c:138 #, c-format msgid "text search template \"%s\" already exists in schema \"%s\"" msgstr "Textsuchevorlage »%s« existiert bereits in Schema »%s«" -#: commands/alter.c:137 +#: commands/alter.c:142 #, c-format msgid "text search configuration \"%s\" already exists in schema \"%s\"" msgstr "Textsuchekonfiguration »%s« existiert bereits in Schema »%s«" -#: commands/alter.c:211 +#: commands/alter.c:216 #, c-format msgid "must be superuser to rename %s" msgstr "nur Superuser können %s umbenennen" -#: commands/alter.c:670 +#: commands/alter.c:713 #, c-format msgid "must be superuser to set schema of %s" msgstr "nur Superuser können Schema von %s setzen" @@ -5203,8 +5263,8 @@ msgstr "Zugriffsmethode »%s« existiert bereits" msgid "must be superuser to drop access methods" msgstr "nur Superuser können Zugriffsmethoden löschen" -#: commands/amcmds.c:174 commands/indexcmds.c:163 commands/indexcmds.c:502 -#: commands/opclasscmds.c:363 commands/opclasscmds.c:777 +#: commands/amcmds.c:174 commands/indexcmds.c:172 commands/indexcmds.c:587 +#: commands/opclasscmds.c:364 commands/opclasscmds.c:778 #, c-format msgid "access method \"%s\" does not exist" msgstr "Zugriffsmethode »%s« existiert nicht" @@ -5214,179 +5274,200 @@ msgstr "Zugriffsmethode »%s« existiert nicht" msgid "handler function is not specified" msgstr "keine Handler-Funktion angegeben" -#: commands/amcmds.c:262 commands/event_trigger.c:242 -#: commands/foreigncmds.c:487 commands/proclang.c:117 commands/proclang.c:289 -#: commands/trigger.c:531 parser/parse_clause.c:961 +#: commands/amcmds.c:262 commands/event_trigger.c:245 +#: commands/foreigncmds.c:487 commands/proclang.c:116 commands/proclang.c:285 +#: commands/trigger.c:696 parser/parse_clause.c:989 #, c-format msgid "function %s must return type %s" msgstr "Function %s muss Rückgabetyp %s haben" -#: commands/analyze.c:145 +#: commands/analyze.c:187 #, c-format msgid "skipping analyze of \"%s\" --- lock not available" msgstr "überspringe Analyze von »%s« --- Sperre nicht verfügbar" -#: commands/analyze.c:162 +#: commands/analyze.c:192 +#, c-format +msgid "skipping analyze of \"%s\" --- relation no longer exists" +msgstr "überspringe Analyze von »%s« --- Relation existiert nicht mehr" + +#: commands/analyze.c:209 #, c-format msgid "skipping \"%s\" --- only superuser can analyze it" msgstr "überspringe »%s« --- nur Superuser kann sie analysieren" -#: commands/analyze.c:166 +#: commands/analyze.c:213 #, c-format msgid "skipping \"%s\" --- only superuser or database owner can analyze it" msgstr "überspringe »%s« --- nur Superuser oder Eigentümer der Datenbank kann sie analysieren" -#: commands/analyze.c:170 +#: commands/analyze.c:217 #, c-format msgid "skipping \"%s\" --- only table or database owner can analyze it" msgstr "überspringe »%s« --- nur Eigentümer der Tabelle oder der Datenbank kann sie analysieren" -#: commands/analyze.c:230 +#: commands/analyze.c:275 #, c-format msgid "skipping \"%s\" --- cannot analyze this foreign table" msgstr "überspringe »%s« --- kann diese Fremdtabelle nicht analysieren" -#: commands/analyze.c:247 +#: commands/analyze.c:292 #, c-format msgid "skipping \"%s\" --- cannot analyze non-tables or special system tables" msgstr "überspringe »%s« --- kann Nicht-Tabellen oder besondere Systemtabellen nicht analysieren" -#: commands/analyze.c:328 +#: commands/analyze.c:373 #, c-format msgid "analyzing \"%s.%s\" inheritance tree" msgstr "analysiere Vererbungsbaum von »%s.%s«" -#: commands/analyze.c:333 +#: commands/analyze.c:378 #, c-format msgid "analyzing \"%s.%s\"" msgstr "analysiere »%s.%s«" -#: commands/analyze.c:658 +#: commands/analyze.c:438 +#, c-format +msgid "column \"%s\" of relation \"%s\" appears more than once" +msgstr "Spalte »%s« von Relation »%s« erscheint mehrmals" + +#: commands/analyze.c:718 #, c-format msgid "automatic analyze of table \"%s.%s.%s\" system usage: %s" msgstr "automatisches Analysieren von Tabelle »%s.%s.%s« Systembenutzung: %s" -#: commands/analyze.c:1212 +#: commands/analyze.c:1273 #, c-format msgid "\"%s\": scanned %d of %u pages, containing %.0f live rows and %.0f dead rows; %d rows in sample, %.0f estimated total rows" msgstr "»%s«: %d von %u Seiten gelesen, enthalten %.0f lebende Zeilen und %.0f tote Zeilen; %d Zeilen in Stichprobe, schätzungsweise %.0f Zeilen insgesamt" -#: commands/analyze.c:1292 +#: commands/analyze.c:1353 #, c-format msgid "skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree contains no child tables" msgstr "überspringe Analysieren des Vererbungsbaums »%s.%s« --- dieser Vererbungsbaum enthält keine abgeleiteten Tabellen" -#: commands/analyze.c:1390 +#: commands/analyze.c:1451 #, c-format msgid "skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree contains no analyzable child tables" msgstr "überspringe Analysieren des Vererbungsbaums »%s.%s« --- dieser Vererbungsbaum enthält keine analysierbaren abgeleiteten Tabellen" -#: commands/async.c:555 +#: commands/async.c:558 #, c-format msgid "channel name cannot be empty" msgstr "Kanalname kann nicht leer sein" -#: commands/async.c:560 +#: commands/async.c:563 #, c-format msgid "channel name too long" msgstr "Kanalname zu lang" -#: commands/async.c:567 +#: commands/async.c:570 #, c-format msgid "payload string too long" msgstr "Payload-Zeichenkette zu lang" -#: commands/async.c:753 +#: commands/async.c:756 #, c-format msgid "cannot PREPARE a transaction that has executed LISTEN, UNLISTEN, or NOTIFY" msgstr "PREPARE kann nicht in einer Transaktion ausgeführt werden, die LISTEN, UNLISTEN oder NOTIFY ausgeführt hat" -#: commands/async.c:856 +#: commands/async.c:859 #, c-format msgid "too many notifications in the NOTIFY queue" msgstr "zu viele Benachrichtigungen in NOTIFY-Schlange" -#: commands/async.c:1486 +#: commands/async.c:1491 #, c-format msgid "NOTIFY queue is %.0f%% full" msgstr "NOTIFY-Schlange ist %.0f%% voll" -#: commands/async.c:1488 +#: commands/async.c:1493 #, c-format msgid "The server process with PID %d is among those with the oldest transactions." msgstr "Der Serverprozess mit PID %d gehört zu denen mit den ältesten Transaktionen." -#: commands/async.c:1491 +#: commands/async.c:1496 #, c-format msgid "The NOTIFY queue cannot be emptied until that process ends its current transaction." msgstr "Die NOTIFY-Schlange kann erst geleert werden, wenn dieser Prozess seine aktuelle Transaktion beendet." -#: commands/cluster.c:129 commands/cluster.c:364 +#: commands/cluster.c:129 commands/cluster.c:372 #, c-format msgid "cannot cluster temporary tables of other sessions" msgstr "kann temporäre Tabellen anderer Sitzungen nicht clustern" -#: commands/cluster.c:159 +#: commands/cluster.c:137 +#, c-format +msgid "cannot cluster a partitioned table" +msgstr "eine partitionierte Tabelle kann nicht geclustert werden" + +#: commands/cluster.c:167 #, c-format msgid "there is no previously clustered index for table \"%s\"" msgstr "es gibt keinen bereits geclusterten Index für Tabelle »%s«" -#: commands/cluster.c:173 commands/tablecmds.c:9858 commands/tablecmds.c:11721 +#: commands/cluster.c:181 commands/tablecmds.c:10629 commands/tablecmds.c:12488 #, c-format msgid "index \"%s\" for table \"%s\" does not exist" msgstr "Index »%s« für Tabelle »%s« existiert nicht" -#: commands/cluster.c:353 +#: commands/cluster.c:361 #, c-format msgid "cannot cluster a shared catalog" msgstr "globaler Katalog kann nicht geclustert werden" -#: commands/cluster.c:368 +#: commands/cluster.c:376 #, c-format msgid "cannot vacuum temporary tables of other sessions" msgstr "temporäre Tabellen anderer Sitzungen können nicht gevacuumt werden" -#: commands/cluster.c:431 commands/tablecmds.c:11731 +#: commands/cluster.c:439 commands/tablecmds.c:12498 #, c-format msgid "\"%s\" is not an index for table \"%s\"" msgstr "»%s« ist kein Index für Tabelle »%s«" -#: commands/cluster.c:439 +#: commands/cluster.c:447 #, c-format msgid "cannot cluster on index \"%s\" because access method does not support clustering" msgstr "kann nicht anhand des Index »%s« clustern, weil die Indexmethode Clustern nicht unterstützt" -#: commands/cluster.c:451 +#: commands/cluster.c:459 #, c-format msgid "cannot cluster on partial index \"%s\"" msgstr "kann nicht anhand des partiellen Index »%s« clustern" -#: commands/cluster.c:465 +#: commands/cluster.c:473 #, c-format msgid "cannot cluster on invalid index \"%s\"" msgstr "kann nicht anhand des ungültigen Index »%s« clustern" -#: commands/cluster.c:918 +#: commands/cluster.c:497 +#, fuzzy, c-format +#| msgid "cannot create index on partitioned table \"%s\"" +msgid "cannot mark index clustered in partitioned table" +msgstr "kann keinen Index für partitionierte Tabelle »%s« erzeugen" + +#: commands/cluster.c:938 #, c-format msgid "clustering \"%s.%s\" using index scan on \"%s\"" msgstr "clustere »%s.%s« durch Index-Scan von »%s«" -#: commands/cluster.c:924 +#: commands/cluster.c:944 #, c-format msgid "clustering \"%s.%s\" using sequential scan and sort" msgstr "clustere »%s.%s« durch sequenziellen Scan und Sortieren" -#: commands/cluster.c:929 commands/vacuumlazy.c:486 +#: commands/cluster.c:949 commands/vacuumlazy.c:505 #, c-format msgid "vacuuming \"%s.%s\"" msgstr "vacuume »%s.%s«" -#: commands/cluster.c:1084 +#: commands/cluster.c:1106 #, c-format msgid "\"%s\": found %.0f removable, %.0f nonremovable row versions in %u pages" msgstr "»%s«: %.0f entfernbare, %.0f nicht entfernbare Zeilenversionen in %u Seiten gefunden" -#: commands/cluster.c:1088 +#: commands/cluster.c:1110 #, c-format msgid "" "%.0f dead row versions cannot be removed yet.\n" @@ -5395,66 +5476,92 @@ msgstr "" "%.0f tote Zeilenversionen können noch nicht entfernt werden.\n" "%s." -#: commands/collationcmds.c:79 +#: commands/collationcmds.c:100 #, c-format msgid "collation attribute \"%s\" not recognized" msgstr "Attribut »%s« für Sortierfolge unbekannt" -#: commands/collationcmds.c:125 +#: commands/collationcmds.c:142 +#, c-format +msgid "collation \"default\" cannot be copied" +msgstr "Sortierfolge »default« kann nicht kopiert werden" + +#: commands/collationcmds.c:172 +#, c-format +msgid "unrecognized collation provider: %s" +msgstr "unbekannter Sortierfolgen-Provider: %s" + +#: commands/collationcmds.c:181 #, c-format msgid "parameter \"lc_collate\" must be specified" msgstr "Parameter »lc_collate« muss angegeben werden" -#: commands/collationcmds.c:130 +#: commands/collationcmds.c:186 #, c-format msgid "parameter \"lc_ctype\" must be specified" msgstr "Parameter »lc_ctype« muss angegeben werden" -#: commands/collationcmds.c:170 +#: commands/collationcmds.c:245 #, c-format msgid "collation \"%s\" for encoding \"%s\" already exists in schema \"%s\"" msgstr "Sortierfolge »%s« für Kodierung »%s« existiert bereits in Schema »%s«" -#: commands/collationcmds.c:181 +#: commands/collationcmds.c:256 #, c-format msgid "collation \"%s\" already exists in schema \"%s\"" msgstr "Sortierfolge »%s« existiert bereits in Schema »%s«" -#: commands/collationcmds.c:243 +#: commands/collationcmds.c:304 #, c-format -msgid "must be superuser to import system collations" -msgstr "nur Superuser können Systemsortierfolgen importieren" +msgid "changing version from %s to %s" +msgstr "Version wird von %s in %s geändert" -#: commands/collationcmds.c:250 commands/copy.c:1827 commands/copy.c:3013 +#: commands/collationcmds.c:319 #, c-format -msgid "could not execute command \"%s\": %m" -msgstr "konnte Befehl »%s« nicht ausführen: %m" +msgid "version has not changed" +msgstr "Version hat sich nicht geändert" -#: commands/collationcmds.c:343 +#: commands/collationcmds.c:450 #, c-format -msgid "no usable system locales were found" +msgid "could not convert locale name \"%s\" to language tag: %s" +msgstr "konnte Locale-Namen »%s« nicht in Sprach-Tag umwandeln: %s" + +#: commands/collationcmds.c:511 +#, c-format +msgid "must be superuser to import system collations" +msgstr "nur Superuser können Systemsortierfolgen importieren" + +#: commands/collationcmds.c:534 commands/copy.c:1826 commands/copy.c:3132 +#: libpq/be-secure-common.c:80 +#, c-format +msgid "could not execute command \"%s\": %m" +msgstr "konnte Befehl »%s« nicht ausführen: %m" + +#: commands/collationcmds.c:665 +#, c-format +msgid "no usable system locales were found" msgstr "keine brauchbaren System-Locales gefunden" -#: commands/comment.c:61 commands/dbcommands.c:808 commands/dbcommands.c:988 -#: commands/dbcommands.c:1092 commands/dbcommands.c:1282 -#: commands/dbcommands.c:1505 commands/dbcommands.c:1619 -#: commands/dbcommands.c:2035 utils/init/postinit.c:841 -#: utils/init/postinit.c:943 utils/init/postinit.c:960 +#: commands/comment.c:61 commands/dbcommands.c:808 commands/dbcommands.c:996 +#: commands/dbcommands.c:1100 commands/dbcommands.c:1290 +#: commands/dbcommands.c:1513 commands/dbcommands.c:1627 +#: commands/dbcommands.c:2043 utils/init/postinit.c:853 +#: utils/init/postinit.c:958 utils/init/postinit.c:975 #, c-format msgid "database \"%s\" does not exist" msgstr "Datenbank »%s« existiert nicht" -#: commands/comment.c:100 commands/seclabel.c:117 parser/parse_utilcmd.c:824 +#: commands/comment.c:101 commands/seclabel.c:117 parser/parse_utilcmd.c:924 #, c-format msgid "\"%s\" is not a table, view, materialized view, composite type, or foreign table" msgstr "»%s« ist weder Tabelle, Sicht, materialisierte Sicht, zusammengesetzter Typ noch Fremdtabelle" -#: commands/constraint.c:60 utils/adt/ri_triggers.c:2715 +#: commands/constraint.c:60 utils/adt/ri_triggers.c:2256 #, c-format msgid "function \"%s\" was not called by trigger manager" msgstr "Funktion »%s« wurde nicht von Triggermanager aufgerufen" -#: commands/constraint.c:67 utils/adt/ri_triggers.c:2724 +#: commands/constraint.c:67 utils/adt/ri_triggers.c:2265 #, c-format msgid "function \"%s\" must be fired AFTER ROW" msgstr "Funktion »%s« muss AFTER ROW ausgelöst werden" @@ -5464,561 +5571,554 @@ msgstr "Funktion »%s« muss AFTER ROW ausgelöst werden" msgid "function \"%s\" must be fired for INSERT or UPDATE" msgstr "Funktion »%s« muss von INSERT oder UPDATE ausgelöst werden" -#: commands/conversioncmds.c:66 +#: commands/conversioncmds.c:65 #, c-format msgid "source encoding \"%s\" does not exist" msgstr "Quellkodierung »%s« existiert nicht" -#: commands/conversioncmds.c:73 +#: commands/conversioncmds.c:72 #, c-format msgid "destination encoding \"%s\" does not exist" msgstr "Zielkodierung »%s« existiert nicht" -#: commands/conversioncmds.c:87 +#: commands/conversioncmds.c:86 #, c-format msgid "encoding conversion function %s must return type %s" msgstr "Kodierungskonversionsfunktion %s muss Typ %s zurückgeben" -#: commands/copy.c:369 commands/copy.c:403 +#: commands/copy.c:372 commands/copy.c:406 #, c-format msgid "COPY BINARY is not supported to stdout or from stdin" msgstr "COPY BINARY mit STDOUT oder STDIN wird nicht unterstützt" -#: commands/copy.c:503 +#: commands/copy.c:506 #, c-format msgid "could not write to COPY program: %m" msgstr "konnte nicht zum COPY-Programm schreiben: %m" -#: commands/copy.c:508 +#: commands/copy.c:511 #, c-format msgid "could not write to COPY file: %m" msgstr "konnte nicht in COPY-Datei schreiben: %m" -#: commands/copy.c:521 +#: commands/copy.c:524 #, c-format msgid "connection lost during COPY to stdout" msgstr "Verbindung während COPY nach STDOUT verloren" -#: commands/copy.c:562 +#: commands/copy.c:568 #, c-format msgid "could not read from COPY file: %m" msgstr "konnte nicht aus COPY-Datei lesen: %m" -#: commands/copy.c:578 commands/copy.c:599 commands/copy.c:603 -#: tcop/postgres.c:341 tcop/postgres.c:377 tcop/postgres.c:404 +#: commands/copy.c:584 commands/copy.c:605 commands/copy.c:609 +#: tcop/postgres.c:348 tcop/postgres.c:384 tcop/postgres.c:411 #, c-format msgid "unexpected EOF on client connection with an open transaction" msgstr "unerwartetes EOF auf Client-Verbindung mit einer offenen Transaktion" -#: commands/copy.c:616 +#: commands/copy.c:622 #, c-format msgid "COPY from stdin failed: %s" msgstr "COPY FROM STDIN fehlgeschlagen: %s" -#: commands/copy.c:632 +#: commands/copy.c:638 #, c-format msgid "unexpected message type 0x%02X during COPY from stdin" msgstr "unerwarteter Messagetyp 0x%02X während COPY FROM STDIN" -#: commands/copy.c:791 +#: commands/copy.c:804 #, c-format -msgid "must be superuser to COPY to or from an external program" -msgstr "nur Superuser können COPY mit externen Programmen verwenden" +msgid "must be superuser or a member of the pg_execute_server_program role to COPY to or from an external program" +msgstr "nur Superuser oder Mitglieder von pg_execute_server_program können COPY mit externen Programmen verwenden" -#: commands/copy.c:792 commands/copy.c:798 +#: commands/copy.c:805 commands/copy.c:814 commands/copy.c:821 #, c-format msgid "Anyone can COPY to stdout or from stdin. psql's \\copy command also works for anyone." msgstr "Jeder kann COPY mit STDOUT oder STDIN verwenden. Der Befehl \\copy in psql funktioniert auch für jeden." -#: commands/copy.c:797 +#: commands/copy.c:813 +#, c-format +msgid "must be superuser or a member of the pg_read_server_files role to COPY from a file" +msgstr "nur Superuser oder Mitglieder von pg_read_server_files können mit COPY aus einer Datei lesen" + +#: commands/copy.c:820 #, c-format -msgid "must be superuser to COPY to or from a file" -msgstr "nur Superuser können COPY mit Dateien verwenden" +msgid "must be superuser or a member of the pg_write_server_files role to COPY to a file" +msgstr "nur Superuser oder Mitglieder von pg_write_server_files können mit COPY in eine Datei schreiben" -#: commands/copy.c:864 +#: commands/copy.c:883 #, c-format msgid "COPY FROM not supported with row-level security" msgstr "COPY FROM wird nicht unterstützt mit Sicherheit auf Zeilenebene" -#: commands/copy.c:865 +#: commands/copy.c:884 #, c-format msgid "Use INSERT statements instead." msgstr "Verwenden Sie stattdessen INSERT-Anweisungen." -#: commands/copy.c:1051 +#: commands/copy.c:1069 #, c-format msgid "COPY format \"%s\" not recognized" msgstr "COPY-Format »%s« nicht erkannt" -#: commands/copy.c:1131 commands/copy.c:1147 commands/copy.c:1162 -#: commands/copy.c:1184 +#: commands/copy.c:1149 commands/copy.c:1165 commands/copy.c:1180 +#: commands/copy.c:1202 #, c-format msgid "argument to option \"%s\" must be a list of column names" msgstr "Argument von Option »%s« muss eine Liste aus Spaltennamen sein" -#: commands/copy.c:1199 +#: commands/copy.c:1217 #, c-format msgid "argument to option \"%s\" must be a valid encoding name" msgstr "Argument von Option »%s« muss ein gültiger Kodierungsname sein" -#: commands/copy.c:1206 commands/dbcommands.c:242 commands/dbcommands.c:1453 +#: commands/copy.c:1224 commands/dbcommands.c:242 commands/dbcommands.c:1461 #, c-format msgid "option \"%s\" not recognized" msgstr "Option »%s« nicht erkannt" -#: commands/copy.c:1218 +#: commands/copy.c:1236 #, c-format msgid "cannot specify DELIMITER in BINARY mode" msgstr "DELIMITER kann nicht im BINARY-Modus angegeben werden" -#: commands/copy.c:1223 +#: commands/copy.c:1241 #, c-format msgid "cannot specify NULL in BINARY mode" msgstr "NULL kann nicht im BINARY-Modus angegeben werden" -#: commands/copy.c:1245 +#: commands/copy.c:1263 #, c-format msgid "COPY delimiter must be a single one-byte character" msgstr "DELIMITER für COPY muss ein einzelnes Ein-Byte-Zeichen sein" -#: commands/copy.c:1252 +#: commands/copy.c:1270 #, c-format msgid "COPY delimiter cannot be newline or carriage return" msgstr "COPY-Trennzeichen kann nicht Newline oder Carriage Return sein" -#: commands/copy.c:1258 +#: commands/copy.c:1276 #, c-format msgid "COPY null representation cannot use newline or carriage return" msgstr "COPY NULL-Darstellung kann nicht Newline oder Carriage Return enthalten" -#: commands/copy.c:1275 +#: commands/copy.c:1293 #, c-format msgid "COPY delimiter cannot be \"%s\"" msgstr "DELIMITER für COPY darf nicht »%s« sein" -#: commands/copy.c:1281 +#: commands/copy.c:1299 #, c-format msgid "COPY HEADER available only in CSV mode" msgstr "COPY HEADER ist nur im CSV-Modus verfügbar" -#: commands/copy.c:1287 +#: commands/copy.c:1305 #, c-format msgid "COPY quote available only in CSV mode" msgstr "Quote-Zeichen für COPY ist nur im CSV-Modus verfügbar" -#: commands/copy.c:1292 +#: commands/copy.c:1310 #, c-format msgid "COPY quote must be a single one-byte character" msgstr "Quote-Zeichen für COPY muss ein einzelnes Ein-Byte-Zeichen sein" -#: commands/copy.c:1297 +#: commands/copy.c:1315 #, c-format msgid "COPY delimiter and quote must be different" msgstr "DELIMITER und QUOTE für COPY müssen verschieden sein" -#: commands/copy.c:1303 +#: commands/copy.c:1321 #, c-format msgid "COPY escape available only in CSV mode" msgstr "Escape-Zeichen für COPY ist nur im CSV-Modus verfügbar" -#: commands/copy.c:1308 +#: commands/copy.c:1326 #, c-format msgid "COPY escape must be a single one-byte character" msgstr "Escape-Zeichen für COPY muss ein einzelnes Ein-Byte-Zeichen sein" -#: commands/copy.c:1314 +#: commands/copy.c:1332 #, c-format msgid "COPY force quote available only in CSV mode" msgstr "FORCE_QUOTE für COPY ist nur im CSV-Modus verfügbar" -#: commands/copy.c:1318 +#: commands/copy.c:1336 #, c-format msgid "COPY force quote only available using COPY TO" msgstr "FORCE_QUOTE ist nur bei COPY TO verfügbar" -#: commands/copy.c:1324 +#: commands/copy.c:1342 #, c-format msgid "COPY force not null available only in CSV mode" msgstr "FORCE_NOT_NULL für COPY ist nur im CSV-Modus verfügbar" -#: commands/copy.c:1328 +#: commands/copy.c:1346 #, c-format msgid "COPY force not null only available using COPY FROM" msgstr "FORCE_NOT_NULL ist nur bei COPY FROM verfügbar" -#: commands/copy.c:1334 +#: commands/copy.c:1352 #, c-format msgid "COPY force null available only in CSV mode" msgstr "FORCE_NULL für COPY ist nur im CSV-Modus verfügbar" -#: commands/copy.c:1339 +#: commands/copy.c:1357 #, c-format msgid "COPY force null only available using COPY FROM" msgstr "FORCE_NULL ist nur bei COPY FROM verfügbar" -#: commands/copy.c:1345 +#: commands/copy.c:1363 #, c-format msgid "COPY delimiter must not appear in the NULL specification" msgstr "Trennzeichen für COPY darf nicht in der NULL-Darstellung erscheinen" -#: commands/copy.c:1352 +#: commands/copy.c:1370 #, c-format msgid "CSV quote character must not appear in the NULL specification" msgstr "CSV-Quote-Zeichen darf nicht in der NULL-Darstellung erscheinen" -#: commands/copy.c:1413 +#: commands/copy.c:1431 #, c-format msgid "table \"%s\" does not have OIDs" msgstr "Tabelle »%s« hat keine OIDs" -#: commands/copy.c:1454 +#: commands/copy.c:1448 #, c-format msgid "COPY (query) WITH OIDS is not supported" msgstr "COPY (Anfrage) WITH OIDS wird nicht unterstützt" -#: commands/copy.c:1474 +#: commands/copy.c:1469 #, c-format msgid "DO INSTEAD NOTHING rules are not supported for COPY" msgstr "DO INSTEAD NOTHING-Regeln werden für COPY nicht unterstützt" -#: commands/copy.c:1488 +#: commands/copy.c:1483 #, c-format msgid "conditional DO INSTEAD rules are not supported for COPY" msgstr "Do INSTEAD-Regeln mit Bedingung werden für COPY nicht unterstützt" -#: commands/copy.c:1492 +#: commands/copy.c:1487 #, c-format msgid "DO ALSO rules are not supported for the COPY" msgstr "DO ALSO-Regeln werden für COPY nicht unterstützt" -#: commands/copy.c:1497 +#: commands/copy.c:1492 #, c-format msgid "multi-statement DO INSTEAD rules are not supported for COPY" msgstr "DO INSTEAD-Regeln mit mehreren Anweisungen werden für COPY nicht unterstützt" -#: commands/copy.c:1507 +#: commands/copy.c:1502 #, c-format msgid "COPY (SELECT INTO) is not supported" msgstr "COPY (SELECT INTO) wird nicht unterstützt" -#: commands/copy.c:1524 +#: commands/copy.c:1519 #, c-format msgid "COPY query must have a RETURNING clause" msgstr "COPY-Anfrage muss eine RETURNING-Klausel haben" -#: commands/copy.c:1552 +#: commands/copy.c:1547 #, c-format msgid "relation referenced by COPY statement has changed" msgstr "die von der COPY-Anweisung verwendete Relation hat sich geändert" -#: commands/copy.c:1610 +#: commands/copy.c:1606 #, c-format msgid "FORCE_QUOTE column \"%s\" not referenced by COPY" msgstr "FORCE_QUOTE-Spalte »%s« wird von COPY nicht verwendet" -#: commands/copy.c:1632 +#: commands/copy.c:1629 #, c-format msgid "FORCE_NOT_NULL column \"%s\" not referenced by COPY" msgstr "Spalte »%s« mit FORCE_NOT_NULL wird von COPY nicht verwendet" -#: commands/copy.c:1654 +#: commands/copy.c:1652 #, c-format msgid "FORCE_NULL column \"%s\" not referenced by COPY" msgstr "Spalte »%s« mit FORCE_NULL wird von COPY nicht verwendet" -#: commands/copy.c:1719 +#: commands/copy.c:1718 libpq/be-secure-common.c:102 #, c-format msgid "could not close pipe to external command: %m" msgstr "konnte Pipe zu externem Programm nicht schließen: %m" -#: commands/copy.c:1723 +#: commands/copy.c:1722 #, c-format msgid "program \"%s\" failed" msgstr "Programm »%s« fehlgeschlagen" -#: commands/copy.c:1773 +#: commands/copy.c:1772 #, c-format msgid "cannot copy from view \"%s\"" msgstr "kann nicht aus Sicht »%s« kopieren" -#: commands/copy.c:1775 commands/copy.c:1781 commands/copy.c:1787 -#: commands/copy.c:1798 +#: commands/copy.c:1774 commands/copy.c:1780 commands/copy.c:1786 +#: commands/copy.c:1797 #, c-format msgid "Try the COPY (SELECT ...) TO variant." msgstr "Versuchen Sie die Variante COPY (SELECT ...) TO." -#: commands/copy.c:1779 +#: commands/copy.c:1778 #, c-format msgid "cannot copy from materialized view \"%s\"" msgstr "kann nicht aus materialisierter Sicht »%s« kopieren" -#: commands/copy.c:1785 +#: commands/copy.c:1784 #, c-format msgid "cannot copy from foreign table \"%s\"" msgstr "kann nicht aus Fremdtabelle »%s« kopieren" -#: commands/copy.c:1791 +#: commands/copy.c:1790 #, c-format msgid "cannot copy from sequence \"%s\"" msgstr "kann nicht aus Sequenz »%s« kopieren" -#: commands/copy.c:1796 +#: commands/copy.c:1795 #, c-format msgid "cannot copy from partitioned table \"%s\"" msgstr "kann nicht aus partitionierter Tabelle »%s« kopieren" -#: commands/copy.c:1802 +#: commands/copy.c:1801 #, c-format msgid "cannot copy from non-table relation \"%s\"" msgstr "kann nicht aus Relation »%s«, die keine Tabelle ist, kopieren" -#: commands/copy.c:1842 +#: commands/copy.c:1841 #, c-format msgid "relative path not allowed for COPY to file" msgstr "relativer Pfad bei COPY in Datei nicht erlaubt" -#: commands/copy.c:1854 +#: commands/copy.c:1862 #, c-format msgid "could not open file \"%s\" for writing: %m" msgstr "konnte Datei »%s« nicht zum Schreiben öffnen: %m" -#: commands/copy.c:1857 +#: commands/copy.c:1865 #, c-format msgid "COPY TO instructs the PostgreSQL server process to write a file. You may want a client-side facility such as psql's \\copy." -msgstr "" +msgstr "Mit COPY TO schreibt der PostgreSQL-Serverprozess eine Datei. Möglicherweise möchten Sie Funktionalität auf Client-Seite verwenden, wie zum Beispiel \\copy in psql." -#: commands/copy.c:1870 commands/copy.c:3044 +#: commands/copy.c:1878 commands/copy.c:3163 #, c-format msgid "\"%s\" is a directory" msgstr "»%s« ist ein Verzeichnis" -#: commands/copy.c:2193 +#: commands/copy.c:2200 #, c-format msgid "COPY %s, line %d, column %s" msgstr "COPY %s, Zeile %d, Spalte %s" -#: commands/copy.c:2197 commands/copy.c:2244 +#: commands/copy.c:2204 commands/copy.c:2251 #, c-format msgid "COPY %s, line %d" msgstr "COPY %s, Zeile %d" -#: commands/copy.c:2208 +#: commands/copy.c:2215 #, c-format msgid "COPY %s, line %d, column %s: \"%s\"" msgstr "COPY %s, Zeile %d, Spalte %s: »%s«" -#: commands/copy.c:2216 +#: commands/copy.c:2223 #, c-format msgid "COPY %s, line %d, column %s: null input" msgstr "COPY %s, Zeile %d, Spalte %s: NULL Eingabe" -#: commands/copy.c:2238 +#: commands/copy.c:2245 #, c-format msgid "COPY %s, line %d: \"%s\"" msgstr "COPY %s, Zeile %d: »%s«" -#: commands/copy.c:2332 +#: commands/copy.c:2341 #, c-format msgid "cannot copy to view \"%s\"" msgstr "kann nicht in Sicht »%s« kopieren" -#: commands/copy.c:2334 -#, fuzzy, c-format -#| msgid "To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule." +#: commands/copy.c:2343 +#, c-format msgid "To enable copying to a view, provide an INSTEAD OF INSERT trigger." -msgstr "Um Einfügen in die Sicht zu ermöglichen, richten Sie einen INSTEAD OF INSERT Trigger oder eine ON INSERT DO INSTEAD Regel ohne Bedingung ein." +msgstr "Um Kopieren in eine Sicht zu ermöglichen, richten Sie einen INSTEAD OF INSERT Trigger ein." -#: commands/copy.c:2338 +#: commands/copy.c:2347 #, c-format msgid "cannot copy to materialized view \"%s\"" msgstr "kann nicht in materialisierte Sicht »%s« kopieren" -#: commands/copy.c:2343 -#, c-format -msgid "cannot copy to foreign table \"%s\"" -msgstr "kann nicht in Fremdtabelle »%s« kopieren" - -#: commands/copy.c:2348 +#: commands/copy.c:2352 #, c-format msgid "cannot copy to sequence \"%s\"" msgstr "kann nicht in Sequenz »%s« kopieren" -#: commands/copy.c:2353 +#: commands/copy.c:2357 #, c-format msgid "cannot copy to non-table relation \"%s\"" msgstr "kann nicht in Relation »%s« kopieren, die keine Tabelle ist" -#: commands/copy.c:2416 +#: commands/copy.c:2432 #, c-format msgid "cannot perform FREEZE because of prior transaction activity" msgstr "FREEZE kann nicht durchgeführt werden wegen vorheriger Aktivität in dieser Transaktion" -#: commands/copy.c:2422 +#: commands/copy.c:2438 #, c-format msgid "cannot perform FREEZE because the table was not created or truncated in the current subtransaction" msgstr "FREEZE kann nicht durchgeführt werden, weil die Tabelle nicht in der aktuellen Transaktion erzeugt oder geleert wurde" -#: commands/copy.c:2587 executor/nodeModifyTable.c:311 -#, fuzzy, c-format -#| msgid "cannot insert into foreign table \"%s\"" -msgid "cannot route inserted tuples to a foreign table" -msgstr "kann nicht in Fremdtabelle »%s« einfügen" - -#: commands/copy.c:3031 +#: commands/copy.c:3150 #, c-format msgid "COPY FROM instructs the PostgreSQL server process to read a file. You may want a client-side facility such as psql's \\copy." -msgstr "" +msgstr "Mit COPY FROM liest der PostgreSQL-Serverprozess eine Datei. Möglicherweise möchten Sie Funktionalität auf Client-Seite verwenden, wie zum Beispiel \\copy in psql." -#: commands/copy.c:3064 +#: commands/copy.c:3183 #, c-format msgid "COPY file signature not recognized" msgstr "COPY-Datei-Signatur nicht erkannt" -#: commands/copy.c:3069 +#: commands/copy.c:3188 #, c-format msgid "invalid COPY file header (missing flags)" msgstr "ungültiger COPY-Dateikopf (Flags fehlen)" -#: commands/copy.c:3075 +#: commands/copy.c:3194 #, c-format msgid "unrecognized critical flags in COPY file header" msgstr "unbekannte kritische Flags im COPY-Dateikopf" -#: commands/copy.c:3081 +#: commands/copy.c:3200 #, c-format msgid "invalid COPY file header (missing length)" msgstr "ungültiger COPY-Dateikopf (Länge fehlt)" -#: commands/copy.c:3088 +#: commands/copy.c:3207 #, c-format msgid "invalid COPY file header (wrong length)" msgstr "ungültiger COPY-Dateikopf (falsche Länge)" -#: commands/copy.c:3221 commands/copy.c:3928 commands/copy.c:4158 +#: commands/copy.c:3338 commands/copy.c:4047 commands/copy.c:4277 #, c-format msgid "extra data after last expected column" msgstr "zusätzliche Daten nach letzter erwarteter Spalte" -#: commands/copy.c:3231 +#: commands/copy.c:3348 #, c-format msgid "missing data for OID column" msgstr "fehlende Daten für OID-Spalte" -#: commands/copy.c:3237 +#: commands/copy.c:3354 #, c-format msgid "null OID in COPY data" msgstr "OID ist NULL in COPY-Daten" -#: commands/copy.c:3247 commands/copy.c:3370 +#: commands/copy.c:3364 commands/copy.c:3488 #, c-format msgid "invalid OID in COPY data" msgstr "ungültige OID in COPY-Daten" -#: commands/copy.c:3262 +#: commands/copy.c:3380 #, c-format msgid "missing data for column \"%s\"" msgstr "fehlende Daten für Spalte »%s«" -#: commands/copy.c:3345 +#: commands/copy.c:3463 #, c-format msgid "received copy data after EOF marker" msgstr "COPY-Daten nach EOF-Markierung empfangen" -#: commands/copy.c:3352 +#: commands/copy.c:3470 #, c-format msgid "row field count is %d, expected %d" msgstr "Feldanzahl in Zeile ist %d, erwartet wurden %d" -#: commands/copy.c:3692 commands/copy.c:3709 +#: commands/copy.c:3811 commands/copy.c:3828 #, c-format msgid "literal carriage return found in data" msgstr "Carriage-Return-Zeichen in Daten gefunden" -#: commands/copy.c:3693 commands/copy.c:3710 +#: commands/copy.c:3812 commands/copy.c:3829 #, c-format msgid "unquoted carriage return found in data" msgstr "ungequotetes Carriage-Return-Zeichen in Daten gefunden" -#: commands/copy.c:3695 commands/copy.c:3712 +#: commands/copy.c:3814 commands/copy.c:3831 #, c-format msgid "Use \"\\r\" to represent carriage return." msgstr "Verwenden Sie »\\r«, um ein Carriage-Return-Zeichen darzustellen." -#: commands/copy.c:3696 commands/copy.c:3713 +#: commands/copy.c:3815 commands/copy.c:3832 #, c-format msgid "Use quoted CSV field to represent carriage return." msgstr "Verwenden Sie ein gequotetes CSV-Feld, um ein Carriage-Return-Zeichen darzustellen." -#: commands/copy.c:3725 +#: commands/copy.c:3844 #, c-format msgid "literal newline found in data" msgstr "Newline-Zeichen in Daten gefunden" -#: commands/copy.c:3726 +#: commands/copy.c:3845 #, c-format msgid "unquoted newline found in data" msgstr "ungequotetes Newline-Zeichen in Daten gefunden" -#: commands/copy.c:3728 +#: commands/copy.c:3847 #, c-format msgid "Use \"\\n\" to represent newline." msgstr "Verwenden Sie »\\n«, um ein Newline-Zeichen darzustellen." -#: commands/copy.c:3729 +#: commands/copy.c:3848 #, c-format msgid "Use quoted CSV field to represent newline." msgstr "Verwenden Sie ein gequotetes CSV-Feld, um ein Newline-Zeichen darzustellen." -#: commands/copy.c:3775 commands/copy.c:3811 +#: commands/copy.c:3894 commands/copy.c:3930 #, c-format msgid "end-of-copy marker does not match previous newline style" msgstr "COPY-Ende-Markierung stimmt nicht mit vorherigem Newline-Stil überein" -#: commands/copy.c:3784 commands/copy.c:3800 +#: commands/copy.c:3903 commands/copy.c:3919 #, c-format msgid "end-of-copy marker corrupt" msgstr "COPY-Ende-Markierung verfälscht" -#: commands/copy.c:4242 +#: commands/copy.c:4361 #, c-format msgid "unterminated CSV quoted field" msgstr "Quotes in CSV-Feld nicht abgeschlossen" -#: commands/copy.c:4319 commands/copy.c:4338 +#: commands/copy.c:4438 commands/copy.c:4457 #, c-format msgid "unexpected EOF in COPY data" msgstr "unerwartetes EOF in COPY-Daten" -#: commands/copy.c:4328 +#: commands/copy.c:4447 #, c-format msgid "invalid field size" msgstr "ungültige Feldgröße" -#: commands/copy.c:4351 +#: commands/copy.c:4470 #, c-format msgid "incorrect binary data format" msgstr "falsches Binärdatenformat" -#: commands/copy.c:4662 commands/indexcmds.c:1062 commands/tablecmds.c:1655 -#: commands/tablecmds.c:2150 commands/tablecmds.c:2573 -#: parser/parse_relation.c:3111 parser/parse_relation.c:3131 -#: utils/adt/tsvector_op.c:2561 +#: commands/copy.c:4782 commands/indexcmds.c:1442 commands/statscmds.c:206 +#: commands/tablecmds.c:1897 commands/tablecmds.c:2400 +#: commands/tablecmds.c:2811 parser/parse_relation.c:3288 +#: parser/parse_relation.c:3308 utils/adt/tsvector_op.c:2561 #, c-format msgid "column \"%s\" does not exist" msgstr "Spalte »%s« existiert nicht" -#: commands/copy.c:4669 commands/tablecmds.c:1681 commands/trigger.c:741 -#: parser/parse_target.c:1017 parser/parse_target.c:1028 +#: commands/copy.c:4789 commands/tablecmds.c:1923 commands/tablecmds.c:2426 +#: commands/trigger.c:913 parser/parse_target.c:1040 parser/parse_target.c:1051 #, c-format msgid "column \"%s\" specified more than once" msgstr "Spalte »%s« mehrmals angegeben" -#: commands/createas.c:213 commands/createas.c:508 +#: commands/createas.c:213 commands/createas.c:509 #, c-format msgid "too many column names were specified" msgstr "zu viele Spaltennamen wurden angegeben" -#: commands/createas.c:549 +#: commands/createas.c:550 #, c-format msgid "policies not yet implemented for this command" msgstr "Policys sind für diesen Befehl noch nicht implementiert" @@ -6043,8 +6143,8 @@ msgstr "%d ist kein gültiger Kodierungscode" msgid "%s is not a valid encoding name" msgstr "%s ist kein gültiger Kodierungsname" -#: commands/dbcommands.c:292 commands/dbcommands.c:1486 commands/user.c:289 -#: commands/user.c:665 +#: commands/dbcommands.c:292 commands/dbcommands.c:1494 commands/user.c:276 +#: commands/user.c:664 #, c-format msgid "invalid connection limit: %d" msgstr "ungültige Verbindungshöchstgrenze: %d" @@ -6104,7 +6204,7 @@ msgstr "neues LC_CTYPE (%s) ist inkompatibel mit dem LC_CTYPE der Template-Daten msgid "Use the same LC_CTYPE as in the template database, or use template0 as template." msgstr "Verwenden Sie das gleiche LC_CTYPE wie die Template-Datenbank oder verwenden Sie template0 als Template." -#: commands/dbcommands.c:432 commands/dbcommands.c:1138 +#: commands/dbcommands.c:432 commands/dbcommands.c:1146 #, c-format msgid "pg_global cannot be used as default tablespace" msgstr "pg_global kann nicht als Standard-Tablespace verwendet werden" @@ -6119,7 +6219,7 @@ msgstr "kann neuen Standard-Tablespace »%s« nicht setzen" msgid "There is a conflict because database \"%s\" already has some tables in this tablespace." msgstr "Es gibt einen Konflikt, weil Datenbank »%s« schon einige Tabellen in diesem Tablespace hat." -#: commands/dbcommands.c:480 commands/dbcommands.c:1008 +#: commands/dbcommands.c:480 commands/dbcommands.c:1016 #, c-format msgid "database \"%s\" already exists" msgstr "Datenbank »%s« existiert bereits" @@ -6159,96 +6259,96 @@ msgstr "Template-Datenbank kann nicht gelöscht werden" msgid "cannot drop the currently open database" msgstr "kann aktuell geöffnete Datenbank nicht löschen" -#: commands/dbcommands.c:855 +#: commands/dbcommands.c:858 #, c-format -msgid "database \"%s\" is used by a logical replication slot" -msgstr "Datenbank »%s« wird von einem logischen Replikations-Slot verwendet" +msgid "database \"%s\" is used by an active logical replication slot" +msgstr "Datenbank »%s« wird von einem aktiven logischen Replikations-Slot verwendet" -#: commands/dbcommands.c:857 +#: commands/dbcommands.c:860 #, c-format -msgid "There is %d slot, %d of them active." -msgid_plural "There are %d slots, %d of them active." -msgstr[0] "%d Slot ist vorhanden, %d davon aktiv." -msgstr[1] "%d Slots sind vorhanden, %d davon aktiv." +msgid "There is %d active slot." +msgid_plural "There are %d active slots." +msgstr[0] "%d Slot ist vorhanden." +msgstr[1] "%d Slots sind vorhanden." -#: commands/dbcommands.c:871 commands/dbcommands.c:1030 -#: commands/dbcommands.c:1160 +#: commands/dbcommands.c:874 commands/dbcommands.c:1038 +#: commands/dbcommands.c:1168 #, c-format msgid "database \"%s\" is being accessed by other users" msgstr "auf Datenbank »%s« wird von anderen Benutzern zugegriffen" -#: commands/dbcommands.c:884 +#: commands/dbcommands.c:887 #, c-format msgid "database \"%s\" is being used by logical replication subscription" msgstr "Datenbank »%s« wird von einer Subskription für logische Replikation verwendet" -#: commands/dbcommands.c:886 +#: commands/dbcommands.c:889 #, c-format msgid "There is %d subscription." msgid_plural "There are %d subscriptions." -msgstr[0] "" -msgstr[1] "" +msgstr[0] "%d Subskription ist vorhanden." +msgstr[1] "%d Subskriptionen sind vorhanden." -#: commands/dbcommands.c:999 +#: commands/dbcommands.c:1007 #, c-format msgid "permission denied to rename database" msgstr "keine Berechtigung, um Datenbank umzubenennen" -#: commands/dbcommands.c:1019 +#: commands/dbcommands.c:1027 #, c-format msgid "current database cannot be renamed" msgstr "aktuelle Datenbank kann nicht umbenannt werden" -#: commands/dbcommands.c:1116 +#: commands/dbcommands.c:1124 #, c-format msgid "cannot change the tablespace of the currently open database" msgstr "kann den Tablespace der aktuell geöffneten Datenbank nicht ändern" -#: commands/dbcommands.c:1219 +#: commands/dbcommands.c:1227 #, c-format msgid "some relations of database \"%s\" are already in tablespace \"%s\"" msgstr "einige Relationen von Datenbank »%s« ist bereits in Tablespace »%s«" -#: commands/dbcommands.c:1221 +#: commands/dbcommands.c:1229 #, c-format msgid "You must move them back to the database's default tablespace before using this command." msgstr "Sie müssen sie zurück in den Standard-Tablespace der Datenbank verschieben, bevor Sie diesen Befehl verwenden können." -#: commands/dbcommands.c:1347 commands/dbcommands.c:1892 -#: commands/dbcommands.c:2096 commands/dbcommands.c:2144 -#: commands/tablespace.c:604 +#: commands/dbcommands.c:1355 commands/dbcommands.c:1900 +#: commands/dbcommands.c:2104 commands/dbcommands.c:2159 +#: commands/tablespace.c:606 #, c-format msgid "some useless files may be left behind in old database directory \"%s\"" msgstr "einige nutzlose Dateien wurde möglicherweise im alten Datenbankverzeichnis »%s« zurückgelassen" -#: commands/dbcommands.c:1467 +#: commands/dbcommands.c:1475 #, c-format msgid "option \"%s\" cannot be specified with other options" msgstr "Option »%s« kann nicht mit anderen Optionen angegeben werden" -#: commands/dbcommands.c:1522 +#: commands/dbcommands.c:1530 #, c-format msgid "cannot disallow connections for current database" msgstr "Verbindungen mit der aktuellen Datenbank können nicht verboten werden" -#: commands/dbcommands.c:1659 +#: commands/dbcommands.c:1667 #, c-format msgid "permission denied to change owner of database" msgstr "keine Berechtigung, um Eigentümer der Datenbank zu ändern" -#: commands/dbcommands.c:1979 +#: commands/dbcommands.c:1987 #, c-format msgid "There are %d other session(s) and %d prepared transaction(s) using the database." msgstr "%d andere Sitzung(en) und %d vorbereitete Transaktion(en) verwenden die Datenbank." -#: commands/dbcommands.c:1982 +#: commands/dbcommands.c:1990 #, c-format msgid "There is %d other session using the database." msgid_plural "There are %d other sessions using the database." msgstr[0] "%d andere Sitzung verwendet die Datenbank." msgstr[1] "%d andere Sitzungen verwenden die Datenbank." -#: commands/dbcommands.c:1987 +#: commands/dbcommands.c:1995 #, c-format msgid "There is %d prepared transaction using the database." msgid_plural "There are %d prepared transactions using the database." @@ -6292,70 +6392,75 @@ msgstr "Argument von %s muss ein Typname sein" msgid "invalid argument for %s: \"%s\"" msgstr "ungültiges Argument für %s: »%s«" -#: commands/dropcmds.c:104 commands/functioncmds.c:1200 -#: utils/adt/ruleutils.c:2284 +#: commands/dropcmds.c:98 commands/functioncmds.c:1210 +#: utils/adt/ruleutils.c:2520 #, c-format msgid "\"%s\" is an aggregate function" msgstr "»%s« ist eine Aggregatfunktion" -#: commands/dropcmds.c:106 +#: commands/dropcmds.c:100 #, c-format msgid "Use DROP AGGREGATE to drop aggregate functions." msgstr "Verwenden Sie DROP AGGREGATE, um Aggregatfunktionen zu löschen." -#: commands/dropcmds.c:157 commands/sequence.c:430 commands/tablecmds.c:2657 -#: commands/tablecmds.c:2808 commands/tablecmds.c:2851 -#: commands/tablecmds.c:12104 tcop/utility.c:1159 +#: commands/dropcmds.c:149 commands/sequence.c:441 commands/tablecmds.c:2895 +#: commands/tablecmds.c:3046 commands/tablecmds.c:3089 +#: commands/tablecmds.c:12871 tcop/utility.c:1160 #, c-format msgid "relation \"%s\" does not exist, skipping" msgstr "Relation »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:187 commands/dropcmds.c:286 commands/tablecmds.c:896 +#: commands/dropcmds.c:179 commands/dropcmds.c:278 commands/tablecmds.c:1019 #, c-format msgid "schema \"%s\" does not exist, skipping" msgstr "Schema »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:227 commands/dropcmds.c:266 commands/tablecmds.c:252 +#: commands/dropcmds.c:219 commands/dropcmds.c:258 commands/tablecmds.c:254 #, c-format msgid "type \"%s\" does not exist, skipping" msgstr "Typ »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:256 +#: commands/dropcmds.c:248 #, c-format msgid "access method \"%s\" does not exist, skipping" msgstr "Zugriffsmethode »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:274 +#: commands/dropcmds.c:266 #, c-format msgid "collation \"%s\" does not exist, skipping" msgstr "Sortierfolge »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:281 +#: commands/dropcmds.c:273 #, c-format msgid "conversion \"%s\" does not exist, skipping" msgstr "Konversion »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:292 +#: commands/dropcmds.c:284 +#, c-format +msgid "statistics object \"%s\" does not exist, skipping" +msgstr "Statistikobjekt »%s« existiert nicht, wird übersprungen" + +#: commands/dropcmds.c:291 #, c-format msgid "text search parser \"%s\" does not exist, skipping" msgstr "Textsucheparser »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:299 +#: commands/dropcmds.c:298 #, c-format msgid "text search dictionary \"%s\" does not exist, skipping" msgstr "Textsuchewörterbuch »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:306 +#: commands/dropcmds.c:305 #, c-format msgid "text search template \"%s\" does not exist, skipping" msgstr "Textsuchevorlage »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:313 +#: commands/dropcmds.c:312 #, c-format msgid "text search configuration \"%s\" does not exist, skipping" msgstr "Textsuchekonfiguration »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:318 +#: commands/dropcmds.c:317 #, c-format msgid "extension \"%s\" does not exist, skipping" msgstr "Erweiterung »%s« existiert nicht, wird übersprungen" @@ -6365,139 +6470,149 @@ msgstr "Erweiterung »%s« existiert nicht, wird übersprungen" msgid "function %s(%s) does not exist, skipping" msgstr "Funktion %s(%s) existiert nicht, wird übersprungen" -#: commands/dropcmds.c:339 +#: commands/dropcmds.c:340 +#, c-format +msgid "procedure %s(%s) does not exist, skipping" +msgstr "Prozedur %s(%s) existiert nicht, wird übersprungen" + +#: commands/dropcmds.c:353 +#, c-format +msgid "routine %s(%s) does not exist, skipping" +msgstr "Routine %s(%s) existiert nicht, wird übersprungen" + +#: commands/dropcmds.c:366 #, c-format msgid "aggregate %s(%s) does not exist, skipping" msgstr "Aggregatfunktion %s(%s) existiert nicht, wird übersprungen" -#: commands/dropcmds.c:351 +#: commands/dropcmds.c:379 #, c-format msgid "operator %s does not exist, skipping" msgstr "Operator %s existiert nicht, wird übersprungen" -#: commands/dropcmds.c:357 +#: commands/dropcmds.c:385 #, c-format msgid "language \"%s\" does not exist, skipping" msgstr "Sprache »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:366 +#: commands/dropcmds.c:394 #, c-format msgid "cast from type %s to type %s does not exist, skipping" msgstr "Typumwandlung von Typ %s in Typ %s existiert nicht, wird übersprungen" -#: commands/dropcmds.c:375 +#: commands/dropcmds.c:403 #, c-format msgid "transform for type %s language \"%s\" does not exist, skipping" msgstr "Transformation für Typ %s Sprache »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:383 +#: commands/dropcmds.c:411 #, c-format msgid "trigger \"%s\" for relation \"%s\" does not exist, skipping" msgstr "Trigger »%s« für Relation »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:392 +#: commands/dropcmds.c:420 #, c-format msgid "policy \"%s\" for relation \"%s\" does not exist, skipping" msgstr "Policy »%s« für Relation »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:399 +#: commands/dropcmds.c:427 #, c-format msgid "event trigger \"%s\" does not exist, skipping" msgstr "Ereignistrigger »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:405 +#: commands/dropcmds.c:433 #, c-format msgid "rule \"%s\" for relation \"%s\" does not exist, skipping" msgstr "Regel »%s« für Relation »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:412 +#: commands/dropcmds.c:440 #, c-format msgid "foreign-data wrapper \"%s\" does not exist, skipping" msgstr "Fremddaten-Wrapper »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:416 +#: commands/dropcmds.c:444 #, c-format msgid "server \"%s\" does not exist, skipping" msgstr "Server »%s« existiert nicht, wird übersprungen" -#: commands/dropcmds.c:425 +#: commands/dropcmds.c:453 #, c-format msgid "operator class \"%s\" does not exist for access method \"%s\", skipping" msgstr "Operatorklasse »%s« existiert nicht für Zugriffsmethode »%s«, wird übersprungen" -#: commands/dropcmds.c:437 +#: commands/dropcmds.c:465 #, c-format msgid "operator family \"%s\" does not exist for access method \"%s\", skipping" msgstr "Operatorfamilie »%s« existiert nicht für Zugriffsmethode »%s«, wird übersprungen" -#: commands/dropcmds.c:444 +#: commands/dropcmds.c:472 #, c-format msgid "publication \"%s\" does not exist, skipping" msgstr "Publikation »%s« existiert nicht, wird übersprungen" -#: commands/event_trigger.c:184 +#: commands/event_trigger.c:187 #, c-format msgid "permission denied to create event trigger \"%s\"" msgstr "keine Berechtigung, um Ereignistrigger »%s« zu erzeugen" -#: commands/event_trigger.c:186 +#: commands/event_trigger.c:189 #, c-format msgid "Must be superuser to create an event trigger." msgstr "Nur Superuser können Ereignistrigger anlegen." -#: commands/event_trigger.c:195 +#: commands/event_trigger.c:198 #, c-format msgid "unrecognized event name \"%s\"" msgstr "unbekannter Ereignisname »%s«" -#: commands/event_trigger.c:212 +#: commands/event_trigger.c:215 #, c-format msgid "unrecognized filter variable \"%s\"" msgstr "unbekannte Filtervariable »%s«" -#: commands/event_trigger.c:267 +#: commands/event_trigger.c:270 #, c-format msgid "filter value \"%s\" not recognized for filter variable \"%s\"" msgstr "Filterwert »%s« nicht erkannt für Filtervariable »%s«" #. translator: %s represents an SQL statement name -#: commands/event_trigger.c:273 commands/event_trigger.c:343 +#: commands/event_trigger.c:276 commands/event_trigger.c:346 #, c-format msgid "event triggers are not supported for %s" msgstr "Ereignistrigger für %s werden nicht unterstützt" -#: commands/event_trigger.c:366 +#: commands/event_trigger.c:369 #, c-format msgid "filter variable \"%s\" specified more than once" msgstr "Filtervariable »%s« mehrmals angegeben" -#: commands/event_trigger.c:513 commands/event_trigger.c:556 -#: commands/event_trigger.c:648 +#: commands/event_trigger.c:516 commands/event_trigger.c:559 +#: commands/event_trigger.c:651 #, c-format msgid "event trigger \"%s\" does not exist" msgstr "Ereignistrigger »%s« existiert nicht" -#: commands/event_trigger.c:617 +#: commands/event_trigger.c:620 #, c-format msgid "permission denied to change owner of event trigger \"%s\"" msgstr "keine Berechtigung, um Eigentümer des Ereignistriggers »%s« zu ändern" -#: commands/event_trigger.c:619 +#: commands/event_trigger.c:622 #, c-format msgid "The owner of an event trigger must be a superuser." msgstr "Der Eigentümer eines Ereignistriggers muss ein Superuser sein." -#: commands/event_trigger.c:1443 +#: commands/event_trigger.c:1457 #, c-format msgid "%s can only be called in a sql_drop event trigger function" msgstr "%s kann nur in einer sql_drop-Ereignistriggerfunktion aufgerufen werden" -#: commands/event_trigger.c:1563 commands/event_trigger.c:1584 +#: commands/event_trigger.c:1577 commands/event_trigger.c:1598 #, c-format msgid "%s can only be called in a table_rewrite event trigger function" msgstr "%s kann nur in einer table_rewrite-Ereignistriggerfunktion aufgerufen werden" -#: commands/event_trigger.c:1994 +#: commands/event_trigger.c:2008 #, c-format msgid "%s can only be called in an event trigger function" msgstr "%s kann nur in einer Ereignistriggerfunktion aufgerufen werden" @@ -6522,235 +6637,234 @@ msgstr "EXPLAIN-Option BUFFERS erfordert ANALYZE" msgid "EXPLAIN option TIMING requires ANALYZE" msgstr "EXPLAIN-Option TIMING erfordert ANALYZE" -#: commands/extension.c:167 commands/extension.c:2900 +#: commands/extension.c:168 commands/extension.c:2907 #, c-format msgid "extension \"%s\" does not exist" msgstr "Erweiterung »%s« existiert nicht" -#: commands/extension.c:266 commands/extension.c:275 commands/extension.c:287 -#: commands/extension.c:297 +#: commands/extension.c:267 commands/extension.c:276 commands/extension.c:288 +#: commands/extension.c:298 #, c-format msgid "invalid extension name: \"%s\"" msgstr "ungültiger Erweiterungsname: »%s«" -#: commands/extension.c:267 +#: commands/extension.c:268 #, c-format msgid "Extension names must not be empty." msgstr "Erweiterungsnamen dürfen nicht leer sein." -#: commands/extension.c:276 +#: commands/extension.c:277 #, c-format msgid "Extension names must not contain \"--\"." msgstr "Erweiterungsnamen dürfen nicht »--« enthalten." -#: commands/extension.c:288 +#: commands/extension.c:289 #, c-format msgid "Extension names must not begin or end with \"-\"." msgstr "Erweiterungsnamen dürfen nicht mit »-« anfangen oder aufhören." -#: commands/extension.c:298 +#: commands/extension.c:299 #, c-format msgid "Extension names must not contain directory separator characters." msgstr "Erweiterungsnamen dürfen keine Verzeichnistrennzeichen enthalten." -#: commands/extension.c:313 commands/extension.c:322 commands/extension.c:331 -#: commands/extension.c:341 +#: commands/extension.c:314 commands/extension.c:323 commands/extension.c:332 +#: commands/extension.c:342 #, c-format msgid "invalid extension version name: \"%s\"" msgstr "ungültiger Erweiterungsversionsname: »%s«" -#: commands/extension.c:314 +#: commands/extension.c:315 #, c-format msgid "Version names must not be empty." msgstr "Versionsnamen dürfen nicht leer sein." -#: commands/extension.c:323 +#: commands/extension.c:324 #, c-format msgid "Version names must not contain \"--\"." msgstr "Versionsnamen dürfen nicht »--« enthalten." -#: commands/extension.c:332 +#: commands/extension.c:333 #, c-format msgid "Version names must not begin or end with \"-\"." msgstr "Versionsnamen dürfen nicht mit »-« anfangen oder aufhören." -#: commands/extension.c:342 +#: commands/extension.c:343 #, c-format msgid "Version names must not contain directory separator characters." msgstr "Versionsnamen dürfen keine Verzeichnistrennzeichen enthalten." -#: commands/extension.c:492 +#: commands/extension.c:493 #, c-format msgid "could not open extension control file \"%s\": %m" msgstr "konnte Erweiterungskontrolldatei »%s« nicht öffnen: %m" -#: commands/extension.c:514 commands/extension.c:524 +#: commands/extension.c:515 commands/extension.c:525 #, c-format msgid "parameter \"%s\" cannot be set in a secondary extension control file" msgstr "Parameter »%s« kann nicht in einer sekundären Erweitungskontrolldatei gesetzt werden" -#: commands/extension.c:563 +#: commands/extension.c:564 #, c-format msgid "\"%s\" is not a valid encoding name" msgstr "»%s« ist kein gültiger Kodierungsname" -#: commands/extension.c:577 +#: commands/extension.c:578 #, c-format msgid "parameter \"%s\" must be a list of extension names" msgstr "Parameter »%s« muss eine Liste von Erweiterungsnamen sein" -#: commands/extension.c:584 +#: commands/extension.c:585 #, c-format msgid "unrecognized parameter \"%s\" in file \"%s\"" msgstr "unbekannter Parameter »%s« in Datei »%s«" -#: commands/extension.c:593 +#: commands/extension.c:594 #, c-format msgid "parameter \"schema\" cannot be specified when \"relocatable\" is true" msgstr "Parameter »schema« kann nicht angegeben werden, wenn »relocatable« an ist" -#: commands/extension.c:756 +#: commands/extension.c:761 #, c-format msgid "transaction control statements are not allowed within an extension script" msgstr "Transaktionskontrollanweisungen sind nicht in einem Erweiterungsskript erlaubt" -#: commands/extension.c:801 +#: commands/extension.c:807 #, c-format msgid "permission denied to create extension \"%s\"" msgstr "keine Berechtigung, um Erweiterung »%s« zu erzeugen" -#: commands/extension.c:803 +#: commands/extension.c:809 #, c-format msgid "Must be superuser to create this extension." msgstr "Nur Superuser können diese Erweiterung anlegen." -#: commands/extension.c:807 +#: commands/extension.c:813 #, c-format msgid "permission denied to update extension \"%s\"" msgstr "keine Berechtigung, um Erweiterung »%s« zu aktualisieren" -#: commands/extension.c:809 +#: commands/extension.c:815 #, c-format msgid "Must be superuser to update this extension." msgstr "Nur Superuser können diese Erweiterung aktualisieren." -#: commands/extension.c:1091 +#: commands/extension.c:1097 #, c-format msgid "extension \"%s\" has no update path from version \"%s\" to version \"%s\"" msgstr "Erweiterung »%s« hat keinen Aktualisierungspfad von Version »%s« auf Version »%s«" -#: commands/extension.c:1298 commands/extension.c:2961 +#: commands/extension.c:1304 commands/extension.c:2968 #, c-format msgid "version to install must be specified" msgstr "die zu installierende Version muss angegeben werden" -#: commands/extension.c:1320 +#: commands/extension.c:1326 #, c-format msgid "FROM version must be different from installation target version \"%s\"" msgstr "FROM-Version muss verschieden von der zu installierenden Version »%s« sein" -#: commands/extension.c:1385 -#, fuzzy, c-format -#| msgid "extension \"%s\" has no update path from version \"%s\" to version \"%s\"" +#: commands/extension.c:1391 +#, c-format msgid "extension \"%s\" has no installation script nor update path for version \"%s\"" -msgstr "Erweiterung »%s« hat keinen Aktualisierungspfad von Version »%s« auf Version »%s«" +msgstr "Erweiterung »%s« hat kein Installationsskript und keinen Aktualisierungspfad für Version »%s«" -#: commands/extension.c:1420 +#: commands/extension.c:1426 #, c-format msgid "extension \"%s\" must be installed in schema \"%s\"" msgstr "Erweiterung »%s« muss in Schema »%s« installiert werden" -#: commands/extension.c:1573 +#: commands/extension.c:1579 #, c-format msgid "cyclic dependency detected between extensions \"%s\" and \"%s\"" msgstr "zyklische Abhängigkeit zwischen Erweiterungen »%s« und »%s« entdeckt" -#: commands/extension.c:1578 +#: commands/extension.c:1584 #, c-format msgid "installing required extension \"%s\"" msgstr "installiere benötigte Erweiterung »%s«" -#: commands/extension.c:1602 +#: commands/extension.c:1608 #, c-format msgid "required extension \"%s\" is not installed" msgstr "benötigte Erweiterung »%s« ist nicht installiert" -#: commands/extension.c:1605 +#: commands/extension.c:1611 #, c-format msgid "Use CREATE EXTENSION ... CASCADE to install required extensions too." msgstr "Verwenden Sie CREATE EXTENSION ... CASCADE, um die benötigten Erweiterungen ebenfalls zu installieren." -#: commands/extension.c:1642 +#: commands/extension.c:1648 #, c-format msgid "extension \"%s\" already exists, skipping" msgstr "Erweiterung »%s« existiert bereits, wird übersprungen" -#: commands/extension.c:1649 +#: commands/extension.c:1655 #, c-format msgid "extension \"%s\" already exists" msgstr "Erweiterung »%s« existiert bereits" -#: commands/extension.c:1660 +#: commands/extension.c:1666 #, c-format msgid "nested CREATE EXTENSION is not supported" msgstr "geschachteltes CREATE EXTENSION wird nicht unterstützt" -#: commands/extension.c:1841 +#: commands/extension.c:1847 #, c-format msgid "cannot drop extension \"%s\" because it is being modified" msgstr "Erweiterung »%s« kann nicht gelöscht werden, weil sie gerade geändert wird" -#: commands/extension.c:2343 +#: commands/extension.c:2349 #, c-format msgid "pg_extension_config_dump() can only be called from an SQL script executed by CREATE EXTENSION" msgstr "pg_extension_config_dump() kann nur von einem SQL-Skript aufgerufen werden, das von CREATE EXTENSION ausgeführt wird" -#: commands/extension.c:2355 +#: commands/extension.c:2361 #, c-format msgid "OID %u does not refer to a table" msgstr "OID %u bezieht sich nicht auf eine Tabelle" -#: commands/extension.c:2360 +#: commands/extension.c:2366 #, c-format msgid "table \"%s\" is not a member of the extension being created" msgstr "Tabelle »%s« ist kein Mitglied der anzulegenden Erweiterung" -#: commands/extension.c:2716 +#: commands/extension.c:2722 #, c-format msgid "cannot move extension \"%s\" into schema \"%s\" because the extension contains the schema" msgstr "kann Erweiterung »%s« nicht in Schema »%s« verschieben, weil die Erweiterung das Schema enthält" -#: commands/extension.c:2756 commands/extension.c:2819 +#: commands/extension.c:2763 commands/extension.c:2826 #, c-format msgid "extension \"%s\" does not support SET SCHEMA" msgstr "Erweiterung »%s« unterstützt SET SCHEMA nicht" -#: commands/extension.c:2821 +#: commands/extension.c:2828 #, c-format msgid "%s is not in the extension's schema \"%s\"" msgstr "%s ist nicht im Schema der Erweiterung (»%s«)" -#: commands/extension.c:2880 +#: commands/extension.c:2887 #, c-format msgid "nested ALTER EXTENSION is not supported" msgstr "geschachteltes ALTER EXTENSION wird nicht unterstützt" -#: commands/extension.c:2972 +#: commands/extension.c:2979 #, c-format msgid "version \"%s\" of extension \"%s\" is already installed" msgstr "Version »%s« von Erweiterung »%s« ist bereits installiert" -#: commands/extension.c:3223 +#: commands/extension.c:3230 #, c-format msgid "cannot add schema \"%s\" to extension \"%s\" because the schema contains the extension" msgstr "kann Schema »%s« nicht zu Erweiterung »%s« hinzufügen, weil das Schema die Erweiterung enthält" -#: commands/extension.c:3251 +#: commands/extension.c:3258 #, c-format msgid "%s is not a member of extension \"%s\"" msgstr "%s ist kein Mitglied der Erweiterung »%s«" -#: commands/extension.c:3317 +#: commands/extension.c:3324 #, c-format msgid "file \"%s\" is too large" msgstr "Datei »%s« ist zu groß" @@ -6815,536 +6929,641 @@ msgstr "das Ändern des Handlers des Fremddaten-Wrappers kann das Verhalten von msgid "changing the foreign-data wrapper validator can cause the options for dependent objects to become invalid" msgstr "durch Ändern des Validators des Fremddaten-Wrappers können die Optionen von abhängigen Objekten ungültig werden" -#: commands/foreigncmds.c:1158 +#: commands/foreigncmds.c:890 +#, c-format +msgid "server \"%s\" already exists, skipping" +msgstr "Server »%s« existiert bereits, wird übersprungen" + +#: commands/foreigncmds.c:1175 #, c-format -msgid "user mapping \"%s\" already exists for server %s" -msgstr "Benutzerabbildung »%s« existiert bereits für Server »%s«" +msgid "user mapping for \"%s\" already exists for server %s, skipping" +msgstr "Benutzerabbildung für »%s« existiert bereits für Server %s, wird übersprungen" -#: commands/foreigncmds.c:1250 commands/foreigncmds.c:1365 +#: commands/foreigncmds.c:1185 #, c-format -msgid "user mapping \"%s\" does not exist for the server" -msgstr "Benutzerabbildung »%s« existiert für den Server nicht" +msgid "user mapping for \"%s\" already exists for server %s" +msgstr "Benutzerabbildung für »%s« existiert bereits für Server %s" -#: commands/foreigncmds.c:1352 +#: commands/foreigncmds.c:1278 commands/foreigncmds.c:1393 +#, c-format +msgid "user mapping for \"%s\" does not exist for the server" +msgstr "Benutzerabbildung für »%s« existiert für den Server nicht" + +#: commands/foreigncmds.c:1380 #, c-format msgid "server does not exist, skipping" msgstr "Server existiert nicht, wird übersprungen" -#: commands/foreigncmds.c:1370 +#: commands/foreigncmds.c:1398 #, c-format -msgid "user mapping \"%s\" does not exist for the server, skipping" -msgstr "Benutzerabbildung »%s« existiert nicht für den Server, wird übersprungen" +msgid "user mapping for \"%s\" does not exist for the server, skipping" +msgstr "Benutzerabbildung für »%s« existiert für den Server nicht, wird übersprungen" -#: commands/foreigncmds.c:1521 foreign/foreign.c:357 +#: commands/foreigncmds.c:1549 foreign/foreign.c:357 #, c-format msgid "foreign-data wrapper \"%s\" has no handler" msgstr "Fremddaten-Wrapper »%s« hat keinen Handler" -#: commands/foreigncmds.c:1527 +#: commands/foreigncmds.c:1555 #, c-format msgid "foreign-data wrapper \"%s\" does not support IMPORT FOREIGN SCHEMA" msgstr "Fremddaten-Wrapper »%s« unterstützt IMPORT FOREIGN SCHEMA nicht" -#: commands/foreigncmds.c:1630 +#: commands/foreigncmds.c:1658 #, c-format msgid "importing foreign table \"%s\"" msgstr "importiere Fremdtabelle »%s«" -#: commands/functioncmds.c:99 +#: commands/functioncmds.c:102 #, c-format msgid "SQL function cannot return shell type %s" msgstr "SQL-Funktion kann keinen Hüllen-Rückgabetyp %s haben" -#: commands/functioncmds.c:104 +#: commands/functioncmds.c:107 #, c-format msgid "return type %s is only a shell" msgstr "Rückgabetyp %s ist nur eine Hülle" -#: commands/functioncmds.c:134 parser/parse_type.c:337 +#: commands/functioncmds.c:137 parser/parse_type.c:337 #, c-format msgid "type modifier cannot be specified for shell type \"%s\"" msgstr "Typmodifikator kann für Hüllentyp »%s« nicht angegeben werden" -#: commands/functioncmds.c:140 +#: commands/functioncmds.c:143 #, c-format msgid "type \"%s\" is not yet defined" msgstr "Typ »%s« ist noch nicht definiert" -#: commands/functioncmds.c:141 +#: commands/functioncmds.c:144 #, c-format msgid "Creating a shell type definition." msgstr "Hüllentypdefinition wird erzeugt." -#: commands/functioncmds.c:233 +#: commands/functioncmds.c:236 #, c-format msgid "SQL function cannot accept shell type %s" msgstr "SQL-Funktion kann keinen Hüllentyp %s annehmen" -#: commands/functioncmds.c:239 +#: commands/functioncmds.c:242 #, c-format msgid "aggregate cannot accept shell type %s" msgstr "Aggregatfunktion kann keinen Hüllentyp %s annehmen" -#: commands/functioncmds.c:244 +#: commands/functioncmds.c:247 #, c-format msgid "argument type %s is only a shell" msgstr "Argumenttyp %s ist nur eine Hülle" -#: commands/functioncmds.c:254 +#: commands/functioncmds.c:257 #, c-format msgid "type %s does not exist" msgstr "Typ %s existiert nicht" -#: commands/functioncmds.c:268 +#: commands/functioncmds.c:271 #, c-format msgid "aggregates cannot accept set arguments" msgstr "Aggregatfunktionen können keine SETOF-Argumente haben" -#: commands/functioncmds.c:272 +#: commands/functioncmds.c:275 +#, c-format +msgid "procedures cannot accept set arguments" +msgstr "Prozeduren können keine SETOF-Argumente haben" + +#: commands/functioncmds.c:279 #, c-format msgid "functions cannot accept set arguments" msgstr "Funktionen können keine SETOF-Argumente haben" -#: commands/functioncmds.c:282 +#: commands/functioncmds.c:287 +#, c-format +msgid "procedures cannot have OUT arguments" +msgstr "Prozeduren können keine OUT-Argumente haben" + +#: commands/functioncmds.c:288 +#, c-format +msgid "INOUT arguments are permitted." +msgstr "INOUT-Argumente sind erlaubt." + +#: commands/functioncmds.c:298 #, c-format msgid "VARIADIC parameter must be the last input parameter" msgstr "VARIADIC-Parameter muss der letzte Eingabeparameter sein" -#: commands/functioncmds.c:310 +#: commands/functioncmds.c:328 #, c-format msgid "VARIADIC parameter must be an array" msgstr "VARIADIC-Parameter muss ein Array sein" -#: commands/functioncmds.c:350 +#: commands/functioncmds.c:368 #, c-format msgid "parameter name \"%s\" used more than once" msgstr "Parametername »%s« mehrmals angegeben" -#: commands/functioncmds.c:365 +#: commands/functioncmds.c:383 #, c-format msgid "only input parameters can have default values" msgstr "nur Eingabeparameter können Vorgabewerte haben" -#: commands/functioncmds.c:380 +#: commands/functioncmds.c:398 #, c-format msgid "cannot use table references in parameter default value" msgstr "Tabellenverweise können nicht in Parametervorgabewerten verwendet werden" -#: commands/functioncmds.c:404 +#: commands/functioncmds.c:422 #, c-format msgid "input parameters after one with a default value must also have defaults" msgstr "Eingabeparameter hinter einem mit Vorgabewert müssen auch einen Vorgabewert haben" -#: commands/functioncmds.c:700 +#: commands/functioncmds.c:564 commands/functioncmds.c:714 +#, c-format +msgid "invalid attribute in procedure definition" +msgstr "ungültiges Attribut in Prozedurdefinition" + +#: commands/functioncmds.c:745 #, c-format msgid "no function body specified" msgstr "kein Funktionskörper angegeben" -#: commands/functioncmds.c:710 +#: commands/functioncmds.c:755 #, c-format msgid "no language specified" msgstr "keine Sprache angegeben" -#: commands/functioncmds.c:735 commands/functioncmds.c:1241 +#: commands/functioncmds.c:780 commands/functioncmds.c:1254 #, c-format msgid "COST must be positive" msgstr "COST muss positiv sein" -#: commands/functioncmds.c:743 commands/functioncmds.c:1249 +#: commands/functioncmds.c:788 commands/functioncmds.c:1262 #, c-format msgid "ROWS must be positive" msgstr "ROWS muss positiv sein" -#: commands/functioncmds.c:784 -#, c-format -msgid "unrecognized function attribute \"%s\" ignored" -msgstr "unbekanntes Funktionsattribut »%s« ignoriert" - -#: commands/functioncmds.c:836 +#: commands/functioncmds.c:840 #, c-format msgid "only one AS item needed for language \"%s\"" msgstr "nur ein AS-Element benötigt für Sprache »%s«" -#: commands/functioncmds.c:930 commands/functioncmds.c:2110 -#: commands/proclang.c:561 +#: commands/functioncmds.c:935 commands/functioncmds.c:2137 +#: commands/proclang.c:557 #, c-format msgid "language \"%s\" does not exist" msgstr "Sprache »%s« existiert nicht" -#: commands/functioncmds.c:932 commands/functioncmds.c:2112 +#: commands/functioncmds.c:937 commands/functioncmds.c:2139 #, c-format -msgid "Use CREATE LANGUAGE to load the language into the database." -msgstr "Sie müssen CREATE LANGUAGE verwenden, um die Sprache in die Datenbank zu laden." +msgid "Use CREATE EXTENSION to load the language into the database." +msgstr "Verwenden Sie CREATE EXTENSION, um die Sprache in die Datenbank zu laden." -#: commands/functioncmds.c:967 commands/functioncmds.c:1233 +#: commands/functioncmds.c:972 commands/functioncmds.c:1246 #, c-format msgid "only superuser can define a leakproof function" msgstr "nur Superuser können eine »leakproof«-Funktion definieren" -#: commands/functioncmds.c:1009 +#: commands/functioncmds.c:1021 #, c-format msgid "function result type must be %s because of OUT parameters" msgstr "Ergebnistyp der Funktion muss %s sein wegen OUT-Parametern" -#: commands/functioncmds.c:1022 +#: commands/functioncmds.c:1034 #, c-format msgid "function result type must be specified" msgstr "Ergebnistyp der Funktion muss angegeben werden" -#: commands/functioncmds.c:1076 commands/functioncmds.c:1253 +#: commands/functioncmds.c:1086 commands/functioncmds.c:1266 #, c-format msgid "ROWS is not applicable when function does not return a set" msgstr "ROWS ist nicht anwendbar, wenn die Funktion keine Ergebnismenge zurückgibt" -#: commands/functioncmds.c:1405 +#: commands/functioncmds.c:1438 #, c-format msgid "source data type %s is a pseudo-type" msgstr "Quelldatentyp %s ist ein Pseudotyp" -#: commands/functioncmds.c:1411 +#: commands/functioncmds.c:1444 #, c-format msgid "target data type %s is a pseudo-type" msgstr "Zieldatentyp %s ist ein Pseudotyp" -#: commands/functioncmds.c:1435 +#: commands/functioncmds.c:1468 #, c-format msgid "cast will be ignored because the source data type is a domain" msgstr "Typumwandlung wird ignoriert werden, weil der Quelldatentyp eine Domäne ist" -#: commands/functioncmds.c:1440 +#: commands/functioncmds.c:1473 #, c-format msgid "cast will be ignored because the target data type is a domain" msgstr "Typumwandlung wird ignoriert werden, weil der Zieldatentyp eine Domäne ist" -#: commands/functioncmds.c:1465 +#: commands/functioncmds.c:1498 #, c-format msgid "cast function must take one to three arguments" msgstr "Typumwandlungsfunktion muss ein bis drei Argumente haben" -#: commands/functioncmds.c:1469 +#: commands/functioncmds.c:1502 #, c-format msgid "argument of cast function must match or be binary-coercible from source data type" msgstr "Argument der Typumwandlungsfunktion muss mit Quelldatentyp übereinstimmen oder in ihn binär-umwandelbar sein" -#: commands/functioncmds.c:1473 +#: commands/functioncmds.c:1506 #, c-format msgid "second argument of cast function must be type %s" msgstr "zweites Argument der Typumwandlungsfunktion muss Typ %s haben" -#: commands/functioncmds.c:1478 +#: commands/functioncmds.c:1511 #, c-format msgid "third argument of cast function must be type %s" msgstr "drittes Argument der Typumwandlungsfunktion muss Typ %s haben" -#: commands/functioncmds.c:1483 +#: commands/functioncmds.c:1516 #, c-format msgid "return data type of cast function must match or be binary-coercible to target data type" msgstr "Rückgabetyp der Typumwandlungsfunktion muss mit Zieldatentyp übereinstimmen oder in ihn binär-umwandelbar sein" -#: commands/functioncmds.c:1494 +#: commands/functioncmds.c:1527 #, c-format msgid "cast function must not be volatile" msgstr "Typumwandlungsfunktion darf nicht VOLATILE sein" -#: commands/functioncmds.c:1499 -#, c-format -msgid "cast function must not be an aggregate function" -msgstr "Typumwandlungsfunktion darf keine Aggregatfunktion sein" - -#: commands/functioncmds.c:1503 +#: commands/functioncmds.c:1532 #, c-format -msgid "cast function must not be a window function" -msgstr "Typumwandlungsfunktion darf keine Fensterfunktion sein" +msgid "cast function must be a normal function" +msgstr "Typumwandlungsfunktion muss eine normale Funktion sein" -#: commands/functioncmds.c:1507 +#: commands/functioncmds.c:1536 #, c-format msgid "cast function must not return a set" msgstr "Typumwandlungsfunktion darf keine Ergebnismenge zurückgeben" -#: commands/functioncmds.c:1533 +#: commands/functioncmds.c:1562 #, c-format msgid "must be superuser to create a cast WITHOUT FUNCTION" msgstr "nur Superuser können Typumwandlungen mit WITHOUT FUNCTION erzeugen" -#: commands/functioncmds.c:1548 +#: commands/functioncmds.c:1577 #, c-format msgid "source and target data types are not physically compatible" msgstr "Quelldatentyp und Zieldatentyp sind nicht physikalisch kompatibel" -#: commands/functioncmds.c:1563 +#: commands/functioncmds.c:1592 #, c-format msgid "composite data types are not binary-compatible" msgstr "zusammengesetzte Datentypen sind nicht binärkompatibel" -#: commands/functioncmds.c:1569 +#: commands/functioncmds.c:1598 #, c-format msgid "enum data types are not binary-compatible" msgstr "Enum-Datentypen sind nicht binärkompatibel" -#: commands/functioncmds.c:1575 +#: commands/functioncmds.c:1604 #, c-format msgid "array data types are not binary-compatible" msgstr "Array-Datentypen sind nicht binärkompatibel" -#: commands/functioncmds.c:1592 +#: commands/functioncmds.c:1621 #, c-format msgid "domain data types must not be marked binary-compatible" msgstr "Domänendatentypen dürfen nicht als binärkompatibel markiert werden" -#: commands/functioncmds.c:1602 +#: commands/functioncmds.c:1631 #, c-format msgid "source data type and target data type are the same" msgstr "Quelldatentyp und Zieldatentyp sind der selbe" -#: commands/functioncmds.c:1635 +#: commands/functioncmds.c:1664 #, c-format msgid "cast from type %s to type %s already exists" msgstr "Typumwandlung von Typ %s in Typ %s existiert bereits" -#: commands/functioncmds.c:1708 +#: commands/functioncmds.c:1737 #, c-format msgid "cast from type %s to type %s does not exist" msgstr "Typumwandlung von Typ %s in Typ %s existiert nicht" -#: commands/functioncmds.c:1747 +#: commands/functioncmds.c:1776 #, c-format msgid "transform function must not be volatile" msgstr "Transformationsfunktion darf nicht VOLATILE sein" -#: commands/functioncmds.c:1751 -#, c-format -msgid "transform function must not be an aggregate function" -msgstr "Transformationsfunktion darf keine Aggregatfunktion sein" - -#: commands/functioncmds.c:1755 +#: commands/functioncmds.c:1780 #, c-format -msgid "transform function must not be a window function" -msgstr "Transformationsfunktion darf keine Fensterfunktion sein" +msgid "transform function must be a normal function" +msgstr "Transformationsfunktion muss eine normale Funktion sein" -#: commands/functioncmds.c:1759 +#: commands/functioncmds.c:1784 #, c-format msgid "transform function must not return a set" msgstr "Transformationsfunktion darf keine Ergebnismenge zurückgeben" -#: commands/functioncmds.c:1763 +#: commands/functioncmds.c:1788 #, c-format msgid "transform function must take one argument" msgstr "Transformationsfunktion muss ein Argument haben" -#: commands/functioncmds.c:1767 +#: commands/functioncmds.c:1792 #, c-format msgid "first argument of transform function must be type %s" msgstr "erstes Argument der Transformationsfunktion muss Typ %s haben" -#: commands/functioncmds.c:1805 +#: commands/functioncmds.c:1830 #, c-format msgid "data type %s is a pseudo-type" msgstr "Datentyp %s ist ein Pseudotyp" -#: commands/functioncmds.c:1811 +#: commands/functioncmds.c:1836 #, c-format msgid "data type %s is a domain" msgstr "Datentyp %s ist eine Domäne" -#: commands/functioncmds.c:1851 +#: commands/functioncmds.c:1876 #, c-format msgid "return data type of FROM SQL function must be %s" msgstr "Rückgabetyp der FROM-SQL-Funktion muss %s sein" -#: commands/functioncmds.c:1877 +#: commands/functioncmds.c:1902 #, c-format msgid "return data type of TO SQL function must be the transform data type" msgstr "Rückgabetyp der TO-SQL-Funktion muss der zu transformierende Datentyp sein" -#: commands/functioncmds.c:1904 +#: commands/functioncmds.c:1929 #, c-format msgid "transform for type %s language \"%s\" already exists" msgstr "Transformation für Typ %s Sprache »%s« existiert bereits" -#: commands/functioncmds.c:1993 +#: commands/functioncmds.c:2018 #, c-format msgid "transform for type %s language \"%s\" does not exist" msgstr "Transformation für Typ %s Sprache »%s« existiert nicht" -#: commands/functioncmds.c:2044 +#: commands/functioncmds.c:2069 #, c-format msgid "function %s already exists in schema \"%s\"" msgstr "Funktion %s existiert bereits in Schema »%s«" -#: commands/functioncmds.c:2097 +#: commands/functioncmds.c:2124 #, c-format msgid "no inline code specified" msgstr "kein Inline-Code angegeben" -#: commands/functioncmds.c:2142 +#: commands/functioncmds.c:2170 #, c-format msgid "language \"%s\" does not support inline code execution" msgstr "Sprache »%s« unterstützt das Ausführen von Inline-Code nicht" -#: commands/indexcmds.c:350 +#: commands/functioncmds.c:2259 +#, c-format +msgid "cannot pass more than %d argument to a procedure" +msgid_plural "cannot pass more than %d arguments to a procedure" +msgstr[0] "kann nicht mehr als %d Argument an eine Prozedur übergeben" +msgstr[1] "kann nicht mehr als %d Argumente an eine Prozedur übergeben" + +#: commands/indexcmds.c:375 +#, c-format +msgid "included columns must not intersect with key columns" +msgstr "" + +#: commands/indexcmds.c:397 #, c-format msgid "must specify at least one column" msgstr "mindestens eine Spalte muss angegeben werden" -#: commands/indexcmds.c:354 +#: commands/indexcmds.c:401 #, c-format msgid "cannot use more than %d columns in an index" msgstr "Index kann nicht mehr als %d Spalten enthalten" -#: commands/indexcmds.c:385 +#: commands/indexcmds.c:441 #, c-format msgid "cannot create index on foreign table \"%s\"" msgstr "kann keinen Index für Fremdtabelle »%s« erzeugen" -#: commands/indexcmds.c:390 -#, c-format -msgid "cannot create index on partitioned table \"%s\"" +#: commands/indexcmds.c:466 +#, fuzzy, c-format +#| msgid "cannot create index on partitioned table \"%s\"" +msgid "cannot create index on partitioned table \"%s\" concurrently" msgstr "kann keinen Index für partitionierte Tabelle »%s« erzeugen" -#: commands/indexcmds.c:405 +#: commands/indexcmds.c:471 +#, fuzzy, c-format +#| msgid "cannot create index on partitioned table \"%s\"" +msgid "cannot create exclusion constraints on partitioned table \"%s\"" +msgstr "kann keinen Index für partitionierte Tabelle »%s« erzeugen" + +#: commands/indexcmds.c:481 #, c-format msgid "cannot create indexes on temporary tables of other sessions" msgstr "kann keine Indexe für temporäre Tabellen anderer Sitzungen erzeugen" -#: commands/indexcmds.c:461 commands/tablecmds.c:579 -#: commands/tablecmds.c:10166 +#: commands/indexcmds.c:546 commands/tablecmds.c:614 commands/tablecmds.c:10938 #, c-format msgid "only shared relations can be placed in pg_global tablespace" msgstr "nur geteilte Relationen können in den Tablespace »pg_global« gelegt werden" -#: commands/indexcmds.c:494 +#: commands/indexcmds.c:579 #, c-format msgid "substituting access method \"gist\" for obsolete method \"rtree\"" msgstr "ersetze Zugriffsmethode »gist« für obsolete Methode »rtree«" -#: commands/indexcmds.c:512 -#, c-format -msgid "hash indexes are not WAL-logged and their use is discouraged" -msgstr "Hash-Indexe werden nicht im WAL geloggt und von ihrer Verwendung wird abgeraten." - -#: commands/indexcmds.c:517 +#: commands/indexcmds.c:597 #, c-format msgid "access method \"%s\" does not support unique indexes" msgstr "Zugriffsmethode »%s« unterstützt keine Unique Indexe" -#: commands/indexcmds.c:522 +#: commands/indexcmds.c:602 +#, fuzzy, c-format +#| msgid "access method \"%s\" does not support unique indexes" +msgid "access method \"%s\" does not support included columns" +msgstr "Zugriffsmethode »%s« unterstützt keine Unique Indexe" + +#: commands/indexcmds.c:607 #, c-format msgid "access method \"%s\" does not support multicolumn indexes" msgstr "Zugriffsmethode »%s« unterstützt keine mehrspaltigen Indexe" -#: commands/indexcmds.c:527 +#: commands/indexcmds.c:612 #, c-format msgid "access method \"%s\" does not support exclusion constraints" msgstr "Zugriffsmethode »%s« unterstützt keine Exclusion-Constraints" -#: commands/indexcmds.c:599 commands/indexcmds.c:619 +#: commands/indexcmds.c:724 +#, fuzzy, c-format +#| msgid "merging constraint \"%s\" with inherited definition" +msgid "unsupported %s constraint with partition key definition" +msgstr "Constraint »%s« wird mit geerbter Definition zusammengeführt" + +#: commands/indexcmds.c:726 +#, fuzzy, c-format +#| msgid "cannot use subquery in partition key expression" +msgid "%s constraints cannot be used when partition keys include expressions." +msgstr "Unteranfragen können nicht in Partitionierungsschlüsselausdrücken verwendet werden" + +#: commands/indexcmds.c:744 +#, fuzzy, c-format +#| msgid "duplicate column name in statistics definition" +msgid "insufficient columns in %s constraint definition" +msgstr "doppelter Spaltenname in Statistikdefinition" + +#: commands/indexcmds.c:746 +#, c-format +msgid "%s constraint on table \"%s\" lacks column \"%s\" which is part of the partition key." +msgstr "" + +#: commands/indexcmds.c:765 commands/indexcmds.c:785 #, c-format msgid "index creation on system columns is not supported" msgstr "Indexerzeugung für Systemspalten wird nicht unterstützt" -#: commands/indexcmds.c:644 +#: commands/indexcmds.c:810 #, c-format msgid "%s %s will create implicit index \"%s\" for table \"%s\"" msgstr "%s %s erstellt implizit einen Index »%s« für Tabelle »%s«" -#: commands/indexcmds.c:991 +#: commands/indexcmds.c:1371 #, c-format msgid "functions in index predicate must be marked IMMUTABLE" msgstr "Funktionen im Indexprädikat müssen als IMMUTABLE markiert sein" -#: commands/indexcmds.c:1057 parser/parse_utilcmd.c:1946 +#: commands/indexcmds.c:1437 parser/parse_utilcmd.c:2238 +#: parser/parse_utilcmd.c:2362 #, c-format msgid "column \"%s\" named in key does not exist" msgstr "Spalte »%s«, die im Schlüssel verwendet wird, existiert nicht" -#: commands/indexcmds.c:1117 +#: commands/indexcmds.c:1461 parser/parse_utilcmd.c:1587 +#, fuzzy, c-format +#| msgid "lossy distance functions are not supported in index-only scans" +msgid "expressions are not supported in included columns" +msgstr "verlustbehaftete Abstandsfunktionen werden in Index-Only-Scans nicht unterstützt" + +#: commands/indexcmds.c:1502 #, c-format msgid "functions in index expression must be marked IMMUTABLE" msgstr "Funktionen im Indexausdruck müssen als IMMUTABLE markiert sein" -#: commands/indexcmds.c:1140 +#: commands/indexcmds.c:1517 +#, fuzzy, c-format +#| msgid "identity columns are not supported on partitions" +msgid "including column does not support a collation" +msgstr "Identitätsspalten in partitionierten Tabellen werden nicht unterstützt" + +#: commands/indexcmds.c:1521 +#, fuzzy, c-format +#| msgid "identity columns are not supported on partitions" +msgid "including column does not support an operator class" +msgstr "Identitätsspalten in partitionierten Tabellen werden nicht unterstützt" + +#: commands/indexcmds.c:1525 +#, fuzzy, c-format +#| msgid "access method \"%s\" does not support ASC/DESC options" +msgid "including column does not support ASC/DESC options" +msgstr "Zugriffsmethode »%s« unterstützt die Optionen ASC/DESC nicht" + +#: commands/indexcmds.c:1529 +#, fuzzy, c-format +#| msgid "access method \"%s\" does not support NULLS FIRST/LAST options" +msgid "including column does not support NULLS FIRST/LAST options" +msgstr "Zugriffsmethode »%s« unterstützt die Optionen NULLS FIRST/LAST nicht" + +#: commands/indexcmds.c:1556 #, c-format msgid "could not determine which collation to use for index expression" msgstr "konnte die für den Indexausdruck zu verwendende Sortierfolge nicht bestimmen" -#: commands/indexcmds.c:1148 commands/tablecmds.c:13007 -#: commands/typecmds.c:831 parser/parse_expr.c:2730 parser/parse_type.c:549 -#: parser/parse_utilcmd.c:2873 utils/adt/misc.c:661 +#: commands/indexcmds.c:1564 commands/tablecmds.c:13810 commands/typecmds.c:833 +#: parser/parse_expr.c:2772 parser/parse_type.c:549 parser/parse_utilcmd.c:3393 +#: utils/adt/misc.c:681 #, c-format msgid "collations are not supported by type %s" msgstr "Sortierfolgen werden von Typ %s nicht unterstützt" -#: commands/indexcmds.c:1186 +#: commands/indexcmds.c:1602 #, c-format msgid "operator %s is not commutative" msgstr "Operator %s ist nicht kommutativ" -#: commands/indexcmds.c:1188 +#: commands/indexcmds.c:1604 #, c-format msgid "Only commutative operators can be used in exclusion constraints." msgstr "In Exclusion-Constraints können nur kommutative Operatoren verwendet werden." -#: commands/indexcmds.c:1214 +#: commands/indexcmds.c:1630 #, c-format msgid "operator %s is not a member of operator family \"%s\"" msgstr "Operator %s ist kein Mitglied der Operatorfamilie »%s«" -#: commands/indexcmds.c:1217 +#: commands/indexcmds.c:1633 #, c-format msgid "The exclusion operator must be related to the index operator class for the constraint." msgstr "Der Exklusionsoperator muss in Beziehung zur Indexoperatorklasse des Constraints stehen." -#: commands/indexcmds.c:1252 +#: commands/indexcmds.c:1668 #, c-format msgid "access method \"%s\" does not support ASC/DESC options" msgstr "Zugriffsmethode »%s« unterstützt die Optionen ASC/DESC nicht" -#: commands/indexcmds.c:1257 +#: commands/indexcmds.c:1673 #, c-format msgid "access method \"%s\" does not support NULLS FIRST/LAST options" msgstr "Zugriffsmethode »%s« unterstützt die Optionen NULLS FIRST/LAST nicht" -#: commands/indexcmds.c:1316 commands/typecmds.c:1928 +#: commands/indexcmds.c:1732 commands/typecmds.c:1996 #, c-format msgid "data type %s has no default operator class for access method \"%s\"" msgstr "Datentyp %s hat keine Standardoperatorklasse für Zugriffsmethode »%s«" -#: commands/indexcmds.c:1318 +#: commands/indexcmds.c:1734 #, c-format msgid "You must specify an operator class for the index or define a default operator class for the data type." msgstr "Sie müssen für den Index eine Operatorklasse angeben oder eine Standardoperatorklasse für den Datentyp definieren." -#: commands/indexcmds.c:1347 commands/indexcmds.c:1355 -#: commands/opclasscmds.c:205 +#: commands/indexcmds.c:1763 commands/indexcmds.c:1771 +#: commands/opclasscmds.c:206 #, c-format msgid "operator class \"%s\" does not exist for access method \"%s\"" msgstr "Operatorklasse »%s« existiert nicht für Zugriffsmethode »%s«" -#: commands/indexcmds.c:1368 commands/typecmds.c:1916 +#: commands/indexcmds.c:1784 commands/typecmds.c:1984 #, c-format msgid "operator class \"%s\" does not accept data type %s" msgstr "Operatorklasse »%s« akzeptiert Datentyp %s nicht" -#: commands/indexcmds.c:1458 +#: commands/indexcmds.c:1874 #, c-format msgid "there are multiple default operator classes for data type %s" msgstr "es gibt mehrere Standardoperatorklassen für Datentyp %s" -#: commands/indexcmds.c:1849 +#: commands/indexcmds.c:2274 #, c-format msgid "table \"%s\" has no indexes" msgstr "Tabelle »%s« hat keine Indexe" -#: commands/indexcmds.c:1904 +#: commands/indexcmds.c:2329 #, c-format msgid "can only reindex the currently open database" msgstr "aktuell geöffnete Datenbank kann nicht reindiziert werden" -#: commands/indexcmds.c:2004 +#: commands/indexcmds.c:2435 #, c-format msgid "table \"%s.%s\" was reindexed" msgstr "Tabelle »%s.%s« wurde neu indiziert" +#: commands/indexcmds.c:2457 +#, c-format +msgid "REINDEX is not yet implemented for partitioned indexes" +msgstr "REINDEX ist für partitionierte Indexe noch nicht implementiert" + +#: commands/lockcmds.c:100 +#, fuzzy, c-format +#| msgid "\"%s\" is not a table or view" +msgid "\"%s\" is not a table or a view" +msgstr "»%s« ist keine Tabelle oder Sicht" + +#: commands/lockcmds.c:224 rewrite/rewriteHandler.c:1824 +#: rewrite/rewriteHandler.c:3424 +#, c-format +msgid "infinite recursion detected in rules for relation \"%s\"" +msgstr "unendliche Rekursion entdeckt in Regeln für Relation »%s«" + #: commands/matview.c:179 #, c-format msgid "CONCURRENTLY cannot be used when the materialized view is not populated" @@ -7355,235 +7574,261 @@ msgstr "CONCURRENTLY kann nicht verwendet werden, wenn die materialisierte Sicht msgid "CONCURRENTLY and WITH NO DATA options cannot be used together" msgstr "Optionen CONCURRENTLY und WITH NO DATA können nicht zusammen verwendet werden" -#: commands/matview.c:255 +#: commands/matview.c:244 #, c-format msgid "cannot refresh materialized view \"%s\" concurrently" msgstr "kann materialisierte Sicht »%s« nicht nebenläufig auffrischen" -#: commands/matview.c:258 +#: commands/matview.c:247 #, c-format msgid "Create a unique index with no WHERE clause on one or more columns of the materialized view." msgstr "Erzeugen Sie einen Unique Index ohne WHERE-Klausel für eine oder mehrere Spalten der materialisierten Sicht." -#: commands/matview.c:654 +#: commands/matview.c:645 #, c-format msgid "new data for materialized view \"%s\" contains duplicate rows without any null columns" msgstr "neue Daten für materialisierte Sicht »%s« enthalten doppelte Zeilen ohne Spalten mit NULL-Werten" -#: commands/matview.c:656 +#: commands/matview.c:647 #, c-format msgid "Row: %s" msgstr "Zeile: %s" -#: commands/opclasscmds.c:126 +#: commands/opclasscmds.c:127 #, c-format msgid "operator family \"%s\" does not exist for access method \"%s\"" msgstr "Operatorfamilie »%s« existiert nicht für Zugriffsmethode »%s«" -#: commands/opclasscmds.c:264 +#: commands/opclasscmds.c:265 #, c-format msgid "operator family \"%s\" for access method \"%s\" already exists" msgstr "Operatorfamilie »%s« für Zugriffsmethode »%s« existiert bereits" -#: commands/opclasscmds.c:402 +#: commands/opclasscmds.c:403 #, c-format msgid "must be superuser to create an operator class" msgstr "nur Superuser können Operatorklassen erzeugen" -#: commands/opclasscmds.c:475 commands/opclasscmds.c:849 -#: commands/opclasscmds.c:973 +#: commands/opclasscmds.c:476 commands/opclasscmds.c:850 +#: commands/opclasscmds.c:974 #, c-format msgid "invalid operator number %d, must be between 1 and %d" msgstr "ungültige Operatornummer %d, muss zwischen 1 und %d sein" -#: commands/opclasscmds.c:519 commands/opclasscmds.c:893 -#: commands/opclasscmds.c:988 +#: commands/opclasscmds.c:520 commands/opclasscmds.c:894 +#: commands/opclasscmds.c:989 #, c-format msgid "invalid procedure number %d, must be between 1 and %d" msgstr "ungültige Prozedurnummer %d, muss zwischen 1 und %d sein" -#: commands/opclasscmds.c:548 +#: commands/opclasscmds.c:549 #, c-format msgid "storage type specified more than once" msgstr "Storage-Typ mehrmals angegeben" -#: commands/opclasscmds.c:575 +#: commands/opclasscmds.c:576 #, c-format msgid "storage type cannot be different from data type for access method \"%s\"" msgstr "Storage-Typ kann nicht vom Datentyp der Zugriffsmethode »%s« verschieden sein" -#: commands/opclasscmds.c:591 +#: commands/opclasscmds.c:592 #, c-format msgid "operator class \"%s\" for access method \"%s\" already exists" msgstr "Operatorklasse »%s« für Zugriffsmethode »%s« existiert bereits" -#: commands/opclasscmds.c:619 +#: commands/opclasscmds.c:620 #, c-format msgid "could not make operator class \"%s\" be default for type %s" msgstr "konnte Operatorklasse »%s« nicht zum Standard für Typ %s machen" -#: commands/opclasscmds.c:622 +#: commands/opclasscmds.c:623 #, c-format msgid "Operator class \"%s\" already is the default." msgstr "Operatorklasse »%s« ist bereits der Standard." -#: commands/opclasscmds.c:747 +#: commands/opclasscmds.c:748 #, c-format msgid "must be superuser to create an operator family" msgstr "nur Superuser können Operatorfamilien erzeugen" -#: commands/opclasscmds.c:803 +#: commands/opclasscmds.c:804 #, c-format msgid "must be superuser to alter an operator family" msgstr "nur Superuser können Operatorfamilien ändern" -#: commands/opclasscmds.c:858 +#: commands/opclasscmds.c:859 #, c-format msgid "operator argument types must be specified in ALTER OPERATOR FAMILY" msgstr "Operatorargumenttypen müssen in ALTER OPERATOR FAMILY angegeben werden" -#: commands/opclasscmds.c:921 +#: commands/opclasscmds.c:922 #, c-format msgid "STORAGE cannot be specified in ALTER OPERATOR FAMILY" msgstr "STORAGE kann in ALTER OPERATOR FAMILY nicht angegeben werden" -#: commands/opclasscmds.c:1043 +#: commands/opclasscmds.c:1044 #, c-format msgid "one or two argument types must be specified" msgstr "ein oder zwei Argumenttypen müssen angegeben werden" -#: commands/opclasscmds.c:1069 +#: commands/opclasscmds.c:1070 #, c-format msgid "index operators must be binary" msgstr "Indexoperatoren müssen binär sein" -#: commands/opclasscmds.c:1088 +#: commands/opclasscmds.c:1089 #, c-format msgid "access method \"%s\" does not support ordering operators" msgstr "Zugriffsmethode »%s« unterstützt keine Sortieroperatoren" -#: commands/opclasscmds.c:1099 +#: commands/opclasscmds.c:1100 #, c-format msgid "index search operators must return boolean" msgstr "Indexsuchoperatoren müssen Typ boolean zurückgeben" -#: commands/opclasscmds.c:1141 +#: commands/opclasscmds.c:1144 #, c-format msgid "btree comparison procedures must have two arguments" msgstr "btree-Vergleichsprozeduren müssen zwei Argumente haben" -#: commands/opclasscmds.c:1145 +#: commands/opclasscmds.c:1148 #, c-format msgid "btree comparison procedures must return integer" msgstr "btree-Vergleichsprozeduren müssen Typ integer zurückgeben" -#: commands/opclasscmds.c:1162 +#: commands/opclasscmds.c:1165 #, c-format msgid "btree sort support procedures must accept type \"internal\"" msgstr "btree-Sortierunterstützungsprozeduren müssen Typ »internal« akzeptieren" -#: commands/opclasscmds.c:1166 +#: commands/opclasscmds.c:1169 #, c-format msgid "btree sort support procedures must return void" msgstr "btree-Sortierunterstützungsprozeduren müssen Typ void zurückgeben" -#: commands/opclasscmds.c:1178 -#, c-format -msgid "hash procedures must have one argument" +#: commands/opclasscmds.c:1180 +#, fuzzy, c-format +#| msgid "btree comparison procedures must have two arguments" +msgid "btree in_range procedures must have five arguments" +msgstr "btree-Vergleichsprozeduren müssen zwei Argumente haben" + +#: commands/opclasscmds.c:1184 +#, fuzzy, c-format +#| msgid "btree comparison procedures must return integer" +msgid "btree in_range procedures must return boolean" +msgstr "btree-Vergleichsprozeduren müssen Typ integer zurückgeben" + +#: commands/opclasscmds.c:1203 +#, fuzzy, c-format +#| msgid "hash procedures must have one argument" +msgid "hash procedure 1 must have one argument" msgstr "Hash-Prozeduren müssen ein Argument haben" -#: commands/opclasscmds.c:1182 -#, c-format -msgid "hash procedures must return integer" +#: commands/opclasscmds.c:1207 +#, fuzzy, c-format +#| msgid "hash procedures must return integer" +msgid "hash procedure 1 must return integer" +msgstr "Hash-Prozeduren müssen Typ integer zurückgeben" + +#: commands/opclasscmds.c:1214 +#, fuzzy, c-format +#| msgid "hash procedures must have one argument" +msgid "hash procedure 2 must have two arguments" +msgstr "Hash-Prozeduren müssen ein Argument haben" + +#: commands/opclasscmds.c:1218 +#, fuzzy, c-format +#| msgid "hash procedures must return integer" +msgid "hash procedure 2 must return bigint" msgstr "Hash-Prozeduren müssen Typ integer zurückgeben" -#: commands/opclasscmds.c:1206 +#: commands/opclasscmds.c:1243 #, c-format msgid "associated data types must be specified for index support procedure" msgstr "zugehörige Datentypen müssen für Indexunterstützungsprozedur angegeben werden" -#: commands/opclasscmds.c:1231 +#: commands/opclasscmds.c:1268 #, c-format msgid "procedure number %d for (%s,%s) appears more than once" msgstr "Prozedurnummer %d für (%s,%s) einscheint mehrmals" -#: commands/opclasscmds.c:1238 +#: commands/opclasscmds.c:1275 #, c-format msgid "operator number %d for (%s,%s) appears more than once" msgstr "Operatornummer %d für (%s,%s) einscheint mehrmals" -#: commands/opclasscmds.c:1287 +#: commands/opclasscmds.c:1324 #, c-format msgid "operator %d(%s,%s) already exists in operator family \"%s\"" msgstr "Operator %d(%s,%s) existiert bereits in Operatorfamilie »%s«" -#: commands/opclasscmds.c:1401 +#: commands/opclasscmds.c:1438 #, c-format msgid "function %d(%s,%s) already exists in operator family \"%s\"" msgstr "Funktion %d(%s,%s) existiert bereits in Operatorfamilie »%s«" -#: commands/opclasscmds.c:1489 +#: commands/opclasscmds.c:1526 #, c-format msgid "operator %d(%s,%s) does not exist in operator family \"%s\"" msgstr "Operator %d(%s,%s) existiert nicht in Operatorfamilie »%s«" -#: commands/opclasscmds.c:1529 +#: commands/opclasscmds.c:1566 #, c-format msgid "function %d(%s,%s) does not exist in operator family \"%s\"" msgstr "Funktion %d(%s,%s) existiert nicht in Operatorfamilie »%s«" -#: commands/opclasscmds.c:1659 +#: commands/opclasscmds.c:1696 #, c-format msgid "operator class \"%s\" for access method \"%s\" already exists in schema \"%s\"" msgstr "Operatorklasse »%s« für Zugriffsmethode »%s« existiert bereits in Schema »%s«" -#: commands/opclasscmds.c:1682 +#: commands/opclasscmds.c:1719 #, c-format msgid "operator family \"%s\" for access method \"%s\" already exists in schema \"%s\"" msgstr "Operatorfamilie »%s« für Zugriffsmethode »%s« existiert bereits in Schema »%s«" -#: commands/operatorcmds.c:114 commands/operatorcmds.c:122 +#: commands/operatorcmds.c:113 commands/operatorcmds.c:121 #, c-format msgid "SETOF type not allowed for operator argument" msgstr "SETOF-Typ nicht als Operatorargument erlaubt" -#: commands/operatorcmds.c:152 commands/operatorcmds.c:454 +#: commands/operatorcmds.c:151 commands/operatorcmds.c:453 #, c-format msgid "operator attribute \"%s\" not recognized" msgstr "Operator-Attribut »%s« unbekannt" -#: commands/operatorcmds.c:163 +#: commands/operatorcmds.c:162 #, c-format msgid "operator procedure must be specified" msgstr "Operatorprozedur muss angegeben werden" -#: commands/operatorcmds.c:174 +#: commands/operatorcmds.c:173 #, c-format msgid "at least one of leftarg or rightarg must be specified" msgstr "entweder leftarg oder rightarg (oder beides) muss angegeben werden" -#: commands/operatorcmds.c:278 +#: commands/operatorcmds.c:277 #, c-format msgid "restriction estimator function %s must return type %s" msgstr "Restriktionsschätzfunktion %s muss Typ %s zurückgeben" -#: commands/operatorcmds.c:324 +#: commands/operatorcmds.c:323 #, c-format msgid "join estimator function %s must return type %s" msgstr "Join-Schätzfunktion %s muss Typ %s zurückgeben" -#: commands/operatorcmds.c:448 +#: commands/operatorcmds.c:447 #, c-format msgid "operator attribute \"%s\" cannot be changed" msgstr "Operator-Attribut »%s« kann nicht geändert werden" -#: commands/policy.c:87 commands/policy.c:397 commands/policy.c:486 -#: commands/tablecmds.c:1134 commands/tablecmds.c:1490 -#: commands/tablecmds.c:2467 commands/tablecmds.c:4647 -#: commands/tablecmds.c:6742 commands/tablecmds.c:12660 -#: commands/tablecmds.c:12695 commands/trigger.c:251 commands/trigger.c:1235 -#: commands/trigger.c:1344 rewrite/rewriteDefine.c:272 -#: rewrite/rewriteDefine.c:911 +#: commands/policy.c:87 commands/policy.c:400 commands/policy.c:490 +#: commands/tablecmds.c:1275 commands/tablecmds.c:1732 +#: commands/tablecmds.c:2705 commands/tablecmds.c:4944 +#: commands/tablecmds.c:7343 commands/tablecmds.c:13428 +#: commands/tablecmds.c:13463 commands/trigger.c:316 commands/trigger.c:1526 +#: commands/trigger.c:1635 rewrite/rewriteDefine.c:272 +#: rewrite/rewriteDefine.c:924 #, c-format msgid "permission denied: \"%s\" is a system catalog" msgstr "keine Berechtigung: »%s« ist ein Systemkatalog" @@ -7598,44 +7843,43 @@ msgstr "angegebene Rollen außer PUBLIC werden ignoriert" msgid "All roles are members of the PUBLIC role." msgstr "Alle Rollen sind Mitglieder der Rolle PUBLIC." -#: commands/policy.c:510 +#: commands/policy.c:514 #, c-format msgid "role \"%s\" could not be removed from policy \"%s\" on \"%s\"" msgstr "Rolle »%s« konnte nicht aus Policy »%s« für »%s« entfernt werden" -#: commands/policy.c:716 +#: commands/policy.c:720 #, c-format msgid "WITH CHECK cannot be applied to SELECT or DELETE" msgstr "WITH CHECK kann nicht auf SELECT oder DELETE angewendet werden" -#: commands/policy.c:725 commands/policy.c:1023 +#: commands/policy.c:729 commands/policy.c:1027 #, c-format msgid "only WITH CHECK expression allowed for INSERT" msgstr "für INSERT sind nur WITH-CHECK-Ausdrücke erlaubt" -#: commands/policy.c:798 commands/policy.c:1243 +#: commands/policy.c:802 commands/policy.c:1247 #, c-format msgid "policy \"%s\" for table \"%s\" already exists" msgstr "Policy »%s« für Tabelle »%s« existiert bereits" -#: commands/policy.c:995 commands/policy.c:1271 commands/policy.c:1343 +#: commands/policy.c:999 commands/policy.c:1275 commands/policy.c:1347 #, c-format msgid "policy \"%s\" for table \"%s\" does not exist" msgstr "Policy »%s« für Tabelle »%s« existiert nicht" -#: commands/policy.c:1013 +#: commands/policy.c:1017 #, c-format msgid "only USING expression allowed for SELECT, DELETE" msgstr "für SELECT und DELETE sind nur USING-Ausdrücke erlaubt" -#: commands/portalcmds.c:58 commands/portalcmds.c:182 -#: commands/portalcmds.c:234 +#: commands/portalcmds.c:58 commands/portalcmds.c:182 commands/portalcmds.c:234 #, c-format msgid "invalid cursor name: must not be empty" msgstr "ungültiger Cursorname: darf nicht leer sein" #: commands/portalcmds.c:190 commands/portalcmds.c:244 -#: executor/execCurrent.c:67 utils/adt/xml.c:2460 utils/adt/xml.c:2627 +#: executor/execCurrent.c:68 utils/adt/xml.c:2469 utils/adt/xml.c:2639 #, c-format msgid "cursor \"%s\" does not exist" msgstr "Cursor »%s« existiert nicht" @@ -7645,7 +7889,7 @@ msgstr "Cursor »%s« existiert nicht" msgid "invalid statement name: must not be empty" msgstr "ungültiger Anweisungsname: darf nicht leer sein" -#: commands/prepare.c:141 parser/parse_param.c:304 tcop/postgres.c:1350 +#: commands/prepare.c:141 parser/parse_param.c:304 tcop/postgres.c:1376 #, c-format msgid "could not determine data type of parameter $%d" msgstr "konnte Datentyp von Parameter $%d nicht ermitteln" @@ -7675,67 +7919,81 @@ msgstr "%d Parameter erwartet aber %d erhalten." msgid "parameter $%d of type %s cannot be coerced to the expected type %s" msgstr "Parameter $%d mit Typ %s kann nicht in erwarteten Typ %s umgewandelt werden" -#: commands/prepare.c:474 +#: commands/prepare.c:475 #, c-format msgid "prepared statement \"%s\" already exists" msgstr "vorbereitete Anweisung »%s« existiert bereits" -#: commands/prepare.c:513 +#: commands/prepare.c:514 #, c-format msgid "prepared statement \"%s\" does not exist" msgstr "vorbereitete Anweisung »%s« existiert nicht" -#: commands/proclang.c:87 +#: commands/proclang.c:86 #, c-format msgid "using pg_pltemplate information instead of CREATE LANGUAGE parameters" msgstr "verwende Informationen aus pg_pltemplate statt der CREATE-LANGUAGE-Parameter" -#: commands/proclang.c:97 +#: commands/proclang.c:96 #, c-format msgid "must be superuser to create procedural language \"%s\"" msgstr "nur Superuser können prozedurale Sprache »%s« erzeugen" -#: commands/proclang.c:252 +#: commands/proclang.c:248 #, c-format msgid "unsupported language \"%s\"" msgstr "nicht unterstützte Sprache »%s«" -#: commands/proclang.c:254 +#: commands/proclang.c:250 #, c-format msgid "The supported languages are listed in the pg_pltemplate system catalog." msgstr "Die unterstützten Sprachen stehen im Systemkatalog pg_pltemplate." -#: commands/proclang.c:262 +#: commands/proclang.c:258 #, c-format msgid "must be superuser to create custom procedural language" msgstr "nur Superuser können maßgeschneiderte prozedurale Sprachen erzeugen" -#: commands/proclang.c:281 commands/trigger.c:523 commands/typecmds.c:457 -#: commands/typecmds.c:474 +#: commands/proclang.c:277 commands/trigger.c:688 commands/typecmds.c:454 +#: commands/typecmds.c:471 #, c-format msgid "changing return type of function %s from %s to %s" msgstr "ändere Rückgabetyp von Funktion %s von %s in %s" -#: commands/publicationcmds.c:179 +#: commands/publicationcmds.c:109 +#, c-format +msgid "invalid list syntax for \"publish\" option" +msgstr "ungültige Listensyntax für »publish«-Option" + +#: commands/publicationcmds.c:127 +#, c-format +msgid "unrecognized \"publish\" value: \"%s\"" +msgstr "unbekannter »publish«-Wert: »%s«" + +#: commands/publicationcmds.c:133 +#, c-format +msgid "unrecognized publication parameter: %s" +msgstr "unbekannter Publikationsparameter: %s" + +#: commands/publicationcmds.c:166 #, c-format msgid "must be superuser to create FOR ALL TABLES publication" msgstr "nur Superuser können eine Publikation FOR ALL TABLES erzeugen" -#: commands/publicationcmds.c:351 +#: commands/publicationcmds.c:335 #, c-format msgid "publication \"%s\" is defined as FOR ALL TABLES" msgstr "Publikation »%s« ist als FOR ALL TABLES definiert" -#: commands/publicationcmds.c:353 +#: commands/publicationcmds.c:337 #, c-format msgid "Tables cannot be added to or dropped from FOR ALL TABLES publications." -msgstr "" +msgstr "In einer FOR-ALL-TABLES-Publikation können keine Tabellen hinzugefügt oder entfernt werden." -#: commands/publicationcmds.c:651 -#, fuzzy, c-format -#| msgid "relation \"%s\" is not a parent of relation \"%s\"" +#: commands/publicationcmds.c:638 +#, c-format msgid "relation \"%s\" is not part of the publication" -msgstr "Relation »%s« ist keine Basisrelation von Relation »%s«" +msgstr "Relation »%s« ist nicht Teil der Publikation" #: commands/publicationcmds.c:681 #, c-format @@ -7744,15 +8002,15 @@ msgstr "keine Berechtigung, um Eigentümer der Publikation »%s« zu ändern" #: commands/publicationcmds.c:683 #, c-format -msgid "The owner of a publication must be a superuser." -msgstr "Der Eigentümer einer Publikation muss ein Superuser sein." +msgid "The owner of a FOR ALL TABLES publication must be a superuser." +msgstr "Der Eigentümer einer FOR-ALL-TABLES-Publikation muss ein Superuser sein." -#: commands/schemacmds.c:106 commands/schemacmds.c:279 +#: commands/schemacmds.c:106 commands/schemacmds.c:280 #, c-format msgid "unacceptable schema name \"%s\"" msgstr "inakzeptabler Schemaname »%s«" -#: commands/schemacmds.c:107 commands/schemacmds.c:280 +#: commands/schemacmds.c:107 commands/schemacmds.c:281 #, c-format msgid "The prefix \"pg_\" is reserved for system schemas." msgstr "Der Präfix »pg_« ist für Systemschemas reserviert." @@ -7777,7 +8035,7 @@ msgstr "Provider muss angegeben werden, wenn mehrere Security-Label-Provider gel msgid "security label provider \"%s\" is not loaded" msgstr "Security-Label-Provider »%s« ist nicht geladen" -#: commands/sequence.c:135 +#: commands/sequence.c:138 #, c-format msgid "unlogged sequences are not supported" msgstr "ungeloggte Sequenzen werden nicht unterstützt" @@ -7807,1664 +8065,2023 @@ msgstr "lastval ist in dieser Sitzung noch nicht definiert" msgid "setval: value %s is out of bounds for sequence \"%s\" (%s..%s)" msgstr "setval: Wert %s ist außerhalb des gültigen Bereichs von Sequenz »%s« (%s..%s)" -#: commands/sequence.c:1344 +#: commands/sequence.c:1349 +#, c-format +msgid "invalid sequence option SEQUENCE NAME" +msgstr "ungültige Sequenzoption SEQUENCE NAME" + +#: commands/sequence.c:1375 +#, c-format +msgid "identity column type must be smallint, integer, or bigint" +msgstr "Typ von Identitätsspalte muss smallint, integer oder bigint sein" + +#: commands/sequence.c:1376 #, c-format msgid "sequence type must be smallint, integer, or bigint" -msgstr "" +msgstr "Sequenztyp muss smallint, integer oder bigint sein" -#: commands/sequence.c:1356 +#: commands/sequence.c:1410 #, c-format msgid "INCREMENT must not be zero" msgstr "INCREMENT darf nicht null sein" -#: commands/sequence.c:1405 -#, fuzzy, c-format -#| msgid "\"%s\" is out of range for type real" +#: commands/sequence.c:1463 +#, c-format msgid "MAXVALUE (%s) is out of range for sequence data type %s" -msgstr "»%s« ist außerhalb des gültigen Bereichs für Typ real" +msgstr "MAXVALUE (%s) ist außerhalb des gültigen Bereichs für Sequenzdatentyp %s" -#: commands/sequence.c:1442 -#, fuzzy, c-format -#| msgid "\"%s\" is out of range for type real" +#: commands/sequence.c:1500 +#, c-format msgid "MINVALUE (%s) is out of range for sequence data type %s" -msgstr "»%s« ist außerhalb des gültigen Bereichs für Typ real" +msgstr "MINVALUE (%s) ist außerhalb des gültigen Bereichs für Sequenzdatentyp %s" -#: commands/sequence.c:1456 +#: commands/sequence.c:1514 #, c-format msgid "MINVALUE (%s) must be less than MAXVALUE (%s)" msgstr "MINVALUE (%s) muss kleiner als MAXVALUE (%s) sein" -#: commands/sequence.c:1481 +#: commands/sequence.c:1541 #, c-format msgid "START value (%s) cannot be less than MINVALUE (%s)" msgstr "START-Wert (%s) kann nicht kleiner als MINVALUE (%s) sein" -#: commands/sequence.c:1493 +#: commands/sequence.c:1553 #, c-format msgid "START value (%s) cannot be greater than MAXVALUE (%s)" msgstr "START-Wert (%s) kann nicht größer als MAXVALUE (%s) sein" -#: commands/sequence.c:1523 +#: commands/sequence.c:1583 #, c-format msgid "RESTART value (%s) cannot be less than MINVALUE (%s)" msgstr "RESTART-Wert (%s) kann nicht kleiner als MINVALUE (%s) sein" -#: commands/sequence.c:1535 +#: commands/sequence.c:1595 #, c-format msgid "RESTART value (%s) cannot be greater than MAXVALUE (%s)" msgstr "RESTART-Wert (%s) kann nicht größer als MAXVALUE (%s) sein" -#: commands/sequence.c:1550 +#: commands/sequence.c:1610 #, c-format msgid "CACHE (%s) must be greater than zero" msgstr "CACHE (%s) muss größer als null sein" -#: commands/sequence.c:1582 +#: commands/sequence.c:1647 #, c-format msgid "invalid OWNED BY option" msgstr "ungültige OWNED BY Option" -#: commands/sequence.c:1583 +#: commands/sequence.c:1648 #, c-format msgid "Specify OWNED BY table.column or OWNED BY NONE." msgstr "Geben Sie OWNED BY tabelle.spalte oder OWNED BY NONE an." -#: commands/sequence.c:1607 +#: commands/sequence.c:1673 #, c-format msgid "referenced relation \"%s\" is not a table or foreign table" msgstr "Relation »%s«, auf die verwiesen wird, ist keine Tabelle oder Fremdtabelle" -#: commands/sequence.c:1614 +#: commands/sequence.c:1680 #, c-format msgid "sequence must have same owner as table it is linked to" msgstr "Sequenz muss selben Eigentümer wie die verknüpfte Tabelle haben" -#: commands/sequence.c:1618 +#: commands/sequence.c:1684 #, c-format msgid "sequence must be in same schema as table it is linked to" msgstr "Sequenz muss im selben Schema wie die verknüpfte Tabelle sein" -#: commands/subscriptioncmds.c:188 +#: commands/sequence.c:1706 +#, c-format +msgid "cannot change ownership of identity sequence" +msgstr "kann Eigentümer einer Identitätssequenz nicht ändern" + +#: commands/sequence.c:1707 commands/tablecmds.c:10316 +#: commands/tablecmds.c:12891 +#, c-format +msgid "Sequence \"%s\" is linked to table \"%s\"." +msgstr "Sequenz »%s« ist mit Tabelle »%s« verknüpft." + +#: commands/statscmds.c:93 commands/statscmds.c:102 +#, c-format +msgid "only a single relation is allowed in CREATE STATISTICS" +msgstr "in CREATE STATISTICS ist nur eine einzelne Relation erlaubt" + +#: commands/statscmds.c:120 +#, c-format +msgid "relation \"%s\" is not a table, foreign table, or materialized view" +msgstr "Relation »%s« ist keine Tabelle, Fremdtabelle oder materialisierte Sicht" + +#: commands/statscmds.c:163 +#, c-format +msgid "statistics object \"%s\" already exists, skipping" +msgstr "Statistikobjekt »%s« existiert bereits, wird übersprungen" + +#: commands/statscmds.c:171 +#, c-format +msgid "statistics object \"%s\" already exists" +msgstr "Statistikobjekt »%s« existiert bereits" + +#: commands/statscmds.c:193 commands/statscmds.c:199 +#, c-format +msgid "only simple column references are allowed in CREATE STATISTICS" +msgstr "in CREATE STATISTICS sind nur einfache Spaltenverweise erlaubt" + +#: commands/statscmds.c:214 +#, c-format +msgid "statistics creation on system columns is not supported" +msgstr "Statistikerzeugung für Systemspalten wird nicht unterstützt" + +#: commands/statscmds.c:221 +#, c-format +msgid "column \"%s\" cannot be used in statistics because its type %s has no default btree operator class" +msgstr "Spalte »%s« kann nicht in Statistiken verwendet werden, weil ihr Typ %s keine Standardoperatorklasse für btree hat" + +#: commands/statscmds.c:228 +#, c-format +msgid "cannot have more than %d columns in statistics" +msgstr "Statistiken können nicht mehr als %d Spalten enthalten" + +#: commands/statscmds.c:243 +#, c-format +msgid "extended statistics require at least 2 columns" +msgstr "erweiterte Statistiken benötigen mindestens 2 Spalten" + +#: commands/statscmds.c:261 +#, c-format +msgid "duplicate column name in statistics definition" +msgstr "doppelter Spaltenname in Statistikdefinition" + +#: commands/statscmds.c:289 +#, c-format +msgid "unrecognized statistics kind \"%s\"" +msgstr "unbekannte Statistikart »%s«" + +#: commands/subscriptioncmds.c:187 +#, c-format +msgid "unrecognized subscription parameter: %s" +msgstr "unbekannter Subskriptionsparameter: %s" + +#: commands/subscriptioncmds.c:200 +#, c-format +msgid "connect = false and enabled = true are mutually exclusive options" +msgstr "die Optionen connect = false und enabled = true schließen einander aus" + +#: commands/subscriptioncmds.c:205 +#, c-format +msgid "connect = false and create_slot = true are mutually exclusive options" +msgstr "die Optionen connect = false und create_slot = true schließen einander aus" + +#: commands/subscriptioncmds.c:210 +#, c-format +msgid "connect = false and copy_data = true are mutually exclusive options" +msgstr "die Optionen connect = false und copy_data = true schließen einander aus" + +#: commands/subscriptioncmds.c:227 +#, c-format +msgid "slot_name = NONE and enabled = true are mutually exclusive options" +msgstr "die Optionen slot_name = NONE und enabled = true schließen einander aus" + +#: commands/subscriptioncmds.c:232 +#, c-format +msgid "slot_name = NONE and create_slot = true are mutually exclusive options" +msgstr "die Optionen slot_name = NONE und create_slot = true schließen einander aus" + +#: commands/subscriptioncmds.c:237 +#, c-format +msgid "subscription with slot_name = NONE must also set enabled = false" +msgstr "Subskription mit slot_name = NONE muss auch enabled = false setzen" + +#: commands/subscriptioncmds.c:242 +#, c-format +msgid "subscription with slot_name = NONE must also set create_slot = false" +msgstr "Subskription mit slot_name = NONE muss auch create_slot = false setzen" + +#: commands/subscriptioncmds.c:283 #, c-format msgid "publication name \"%s\" used more than once" msgstr "Publikationsname »%s« mehrmals angegeben" -#: commands/subscriptioncmds.c:245 +#: commands/subscriptioncmds.c:347 #, c-format msgid "must be superuser to create subscriptions" msgstr "nur Superuser können Subskriptionen erzeugen" -#: commands/subscriptioncmds.c:313 replication/logical/worker.c:1427 -#, fuzzy, c-format -#| msgid "could not connect to the primary server: %s" +#: commands/subscriptioncmds.c:427 commands/subscriptioncmds.c:520 +#: replication/logical/tablesync.c:856 replication/logical/worker.c:1720 +#, c-format msgid "could not connect to the publisher: %s" -msgstr "konnte nicht mit dem Primärserver verbinden: %s" +msgstr "konnte nicht mit dem Publikationsserver verbinden: %s" -#: commands/subscriptioncmds.c:319 -#, fuzzy, c-format -#| msgid "%s: creating replication slot \"%s\"\n" +#: commands/subscriptioncmds.c:469 +#, c-format msgid "created replication slot \"%s\" on publisher" -msgstr "%s: erzeuge Replikations-Slot »%s«\n" +msgstr "Replikations-Slot »%s« wurde auf dem Publikationsserver erzeugt" + +#: commands/subscriptioncmds.c:486 +#, c-format +msgid "tables were not subscribed, you will have to run ALTER SUBSCRIPTION ... REFRESH PUBLICATION to subscribe the tables" +msgstr "keine Tabellen wurden zur Subskription hinzugefügt; Sie müssen ALTER SUBSCRIPTION ... REFRESH PUBLICATION ausführen, um Tabellen zur Subskription hinzuzufügen" + +#: commands/subscriptioncmds.c:576 +#, c-format +msgid "table \"%s.%s\" added to subscription \"%s\"" +msgstr "Tabelle »%s.%s« wurde zur Subskription »%s« hinzugefügt" + +#: commands/subscriptioncmds.c:600 +#, c-format +msgid "table \"%s.%s\" removed from subscription \"%s\"" +msgstr "Tabelle »%s.%s« wurde aus Subskription »%s« entfernt" + +#: commands/subscriptioncmds.c:669 +#, c-format +msgid "cannot set slot_name = NONE for enabled subscription" +msgstr "für eine aktivierte Subskription kann nicht slot_name = NONE gesetzt werden" + +#: commands/subscriptioncmds.c:703 +#, c-format +msgid "cannot enable subscription that does not have a slot name" +msgstr "eine Subskription ohne Slot-Name kann nicht aktiviert werden" + +#: commands/subscriptioncmds.c:749 +#, c-format +msgid "ALTER SUBSCRIPTION with refresh is not allowed for disabled subscriptions" +msgstr "ALTER SUBSCRIPTION mit Refresh ist für deaktivierte Subskriptionen nicht erlaubt" -#: commands/subscriptioncmds.c:485 +#: commands/subscriptioncmds.c:750 +#, c-format +msgid "Use ALTER SUBSCRIPTION ... SET PUBLICATION ... WITH (refresh = false)." +msgstr "Verwenden Sie ALTER SUBSCRIPTION ... SET PUBLICATION ... WITH (refresh = false)." + +#: commands/subscriptioncmds.c:768 +#, c-format +msgid "ALTER SUBSCRIPTION ... REFRESH is not allowed for disabled subscriptions" +msgstr "ALTER SUBSCRIPTION ... REFRESH ist für eine deaktivierte Subskription nicht erlaubt" + +#: commands/subscriptioncmds.c:847 #, c-format msgid "subscription \"%s\" does not exist, skipping" msgstr "Subskription »%s« existiert nicht, wird übersprungen" -#: commands/subscriptioncmds.c:564 +#: commands/subscriptioncmds.c:972 #, c-format msgid "could not connect to publisher when attempting to drop the replication slot \"%s\"" -msgstr "" +msgstr "konnte beim Versuch den Replikations-Slot »%s« zu löschen nicht mit dem Publikationsserver verbinden" -#: commands/subscriptioncmds.c:566 commands/subscriptioncmds.c:574 +#: commands/subscriptioncmds.c:974 commands/subscriptioncmds.c:988 +#: replication/logical/tablesync.c:905 replication/logical/tablesync.c:927 #, c-format msgid "The error was: %s" msgstr "Der Fehler war: %s" -#: commands/subscriptioncmds.c:572 -#, fuzzy, c-format -#| msgid "%s: could not send replication command \"%s\": %s" +#: commands/subscriptioncmds.c:975 +#, c-format +msgid "Use ALTER SUBSCRIPTION ... SET (slot_name = NONE) to disassociate the subscription from the slot." +msgstr "Verwenden Sie ALTER SUBSCRIPTION ... SET (slot_name = NONE), um die Subskription vom Slot zu trennen." + +#: commands/subscriptioncmds.c:986 +#, c-format msgid "could not drop the replication slot \"%s\" on publisher" -msgstr "%s: konnte Replikationsbefehl »%s« nicht senden: %s" +msgstr "konnte Replikations-Slot »%s« auf dem Publikationsserver nicht löschen" -#: commands/subscriptioncmds.c:577 -#, fuzzy, c-format -#| msgid "%s: dropping replication slot \"%s\"\n" +#: commands/subscriptioncmds.c:991 +#, c-format msgid "dropped replication slot \"%s\" on publisher" -msgstr "%s: lösche Replikations-Slot »%s«\n" +msgstr "Replikations-Slot »%s« auf dem Publikationsserver wurde gelöscht" -#: commands/subscriptioncmds.c:616 +#: commands/subscriptioncmds.c:1032 #, c-format msgid "permission denied to change owner of subscription \"%s\"" msgstr "keine Berechtigung, um Eigentümer der Subskription »%s« zu ändern" -#: commands/subscriptioncmds.c:618 +#: commands/subscriptioncmds.c:1034 #, c-format -msgid "The owner of an subscription must be a superuser." +msgid "The owner of a subscription must be a superuser." msgstr "Der Eigentümer einer Subskription muss ein Superuser sein." -#: commands/tablecmds.c:221 commands/tablecmds.c:263 +#: commands/subscriptioncmds.c:1147 +#, c-format +msgid "could not receive list of replicated tables from the publisher: %s" +msgstr "konnte Liste der replizierten Tabellen nicht vom Publikationsserver empfangen: %s" + +#: commands/tablecmds.c:223 commands/tablecmds.c:265 #, c-format msgid "table \"%s\" does not exist" msgstr "Tabelle »%s« existiert nicht" -#: commands/tablecmds.c:222 commands/tablecmds.c:264 +#: commands/tablecmds.c:224 commands/tablecmds.c:266 #, c-format msgid "table \"%s\" does not exist, skipping" msgstr "Tabelle »%s« existiert nicht, wird übersprungen" -#: commands/tablecmds.c:224 commands/tablecmds.c:266 +#: commands/tablecmds.c:226 commands/tablecmds.c:268 msgid "Use DROP TABLE to remove a table." msgstr "Verwenden Sie DROP TABLE, um eine Tabelle zu löschen." -#: commands/tablecmds.c:227 +#: commands/tablecmds.c:229 #, c-format msgid "sequence \"%s\" does not exist" msgstr "Sequenz »%s« existiert nicht" -#: commands/tablecmds.c:228 +#: commands/tablecmds.c:230 #, c-format msgid "sequence \"%s\" does not exist, skipping" msgstr "Sequenz »%s« existiert nicht, wird übersprungen" -#: commands/tablecmds.c:230 +#: commands/tablecmds.c:232 msgid "Use DROP SEQUENCE to remove a sequence." msgstr "Verwenden Sie DROP SEQUENCE, um eine Sequenz zu löschen." -#: commands/tablecmds.c:233 +#: commands/tablecmds.c:235 #, c-format msgid "view \"%s\" does not exist" msgstr "Sicht »%s« existiert nicht" -#: commands/tablecmds.c:234 +#: commands/tablecmds.c:236 #, c-format msgid "view \"%s\" does not exist, skipping" msgstr "Sicht »%s« existiert nicht, wird übersprungen" -#: commands/tablecmds.c:236 +#: commands/tablecmds.c:238 msgid "Use DROP VIEW to remove a view." msgstr "Verwenden Sie DROP VIEW, um eine Sicht zu löschen." -#: commands/tablecmds.c:239 +#: commands/tablecmds.c:241 #, c-format msgid "materialized view \"%s\" does not exist" msgstr "materialisierte Sicht »%s« existiert nicht" -#: commands/tablecmds.c:240 +#: commands/tablecmds.c:242 #, c-format msgid "materialized view \"%s\" does not exist, skipping" msgstr "materialisierte Sicht »%s« existiert nicht, wird übersprungen" -#: commands/tablecmds.c:242 +#: commands/tablecmds.c:244 msgid "Use DROP MATERIALIZED VIEW to remove a materialized view." msgstr "Verwenden Sie DROP MATERIALIZED VIEW, um eine materialisierte Sicht zu löschen." -#: commands/tablecmds.c:245 parser/parse_utilcmd.c:1698 +#: commands/tablecmds.c:247 commands/tablecmds.c:271 commands/tablecmds.c:14815 +#: parser/parse_utilcmd.c:1983 #, c-format msgid "index \"%s\" does not exist" msgstr "Index »%s« existiert nicht" -#: commands/tablecmds.c:246 +#: commands/tablecmds.c:248 commands/tablecmds.c:272 #, c-format msgid "index \"%s\" does not exist, skipping" msgstr "Index »%s« existiert nicht, wird übersprungen" -#: commands/tablecmds.c:248 +#: commands/tablecmds.c:250 commands/tablecmds.c:274 msgid "Use DROP INDEX to remove an index." msgstr "Verwenden Sie DROP INDEX, um einen Index zu löschen." -#: commands/tablecmds.c:253 +#: commands/tablecmds.c:255 #, c-format msgid "\"%s\" is not a type" msgstr "»%s« ist kein Typ" -#: commands/tablecmds.c:254 +#: commands/tablecmds.c:256 msgid "Use DROP TYPE to remove a type." msgstr "Verwenden Sie DROP TYPE, um einen Typen zu löschen." -#: commands/tablecmds.c:257 commands/tablecmds.c:9065 -#: commands/tablecmds.c:11904 +#: commands/tablecmds.c:259 commands/tablecmds.c:9768 +#: commands/tablecmds.c:12671 #, c-format msgid "foreign table \"%s\" does not exist" msgstr "Fremdtabelle »%s« existiert nicht" -#: commands/tablecmds.c:258 +#: commands/tablecmds.c:260 #, c-format msgid "foreign table \"%s\" does not exist, skipping" msgstr "Fremdtabelle »%s« existiert nicht, wird übersprungen" -#: commands/tablecmds.c:260 +#: commands/tablecmds.c:262 msgid "Use DROP FOREIGN TABLE to remove a foreign table." msgstr "Verwenden Sie DROP FOREIGN TABLE, um eine Fremdtabelle zu löschen." -#: commands/tablecmds.c:519 +#: commands/tablecmds.c:554 #, c-format msgid "ON COMMIT can only be used on temporary tables" msgstr "ON COMMIT kann nur mit temporären Tabellen verwendet werden" -#: commands/tablecmds.c:547 +#: commands/tablecmds.c:582 #, c-format msgid "cannot create temporary table within security-restricted operation" msgstr "kann temporäre Tabelle nicht in einer sicherheitsbeschränkten Operation erzeugen" -#: commands/tablecmds.c:646 -#, fuzzy, c-format -#| msgid "table \"%s\" without OIDs cannot inherit from table \"%s\" with OIDs" +#: commands/tablecmds.c:683 +#, c-format msgid "cannot create table with OIDs as partition of table without OIDs" -msgstr "Tabelle »%s« ohne OIDs kann nicht von Tabelle »%s« mit OIDs erben" +msgstr "kann Tabelle mit OIDs nicht als Partition einer Tabelle ohne OIDs erzeugen" -#: commands/tablecmds.c:764 parser/parse_utilcmd.c:3040 -#, fuzzy, c-format -#| msgid "\"%s\" is not an index" +#: commands/tablecmds.c:810 +#, c-format msgid "\"%s\" is not partitioned" -msgstr "»%s« ist kein Index" +msgstr "»%s« ist nicht partitioniert" + +#: commands/tablecmds.c:888 +#, c-format +msgid "cannot partition using more than %d columns" +msgstr "Partitionierung kann nicht mehr als %d Spalten verwenden" -#: commands/tablecmds.c:972 +#: commands/tablecmds.c:1095 #, c-format msgid "DROP INDEX CONCURRENTLY does not support dropping multiple objects" msgstr "DROP INDEX CONCURRENTLY unterstützt das Löschen von mehreren Objekten nicht" -#: commands/tablecmds.c:976 +#: commands/tablecmds.c:1099 #, c-format msgid "DROP INDEX CONCURRENTLY does not support CASCADE" msgstr "DROP INDEX CONCURRENTLY unterstützt kein CASCADE" -#: commands/tablecmds.c:1224 -#, fuzzy, c-format -#| msgid "column must be added to child tables too" -msgid "must truncate child tables too" -msgstr "Spalte muss ebenso in den abgeleiteten Tabellen hinzugefügt werden" +#: commands/tablecmds.c:1381 +#, c-format +msgid "cannot truncate only a partitioned table" +msgstr "kann nicht nur eine partitionierte Tabelle leeren" + +#: commands/tablecmds.c:1382 +#, c-format +msgid "Do not specify the ONLY keyword, or use TRUNCATE ONLY on the partitions directly." +msgstr "Lassen Sie das Schlüsselwort ONLY weg oder wenden Sie TRUNCATE ONLY direkt auf die Partitionen an." -#: commands/tablecmds.c:1252 +#: commands/tablecmds.c:1451 #, c-format msgid "truncate cascades to table \"%s\"" msgstr "Truncate-Vorgang leert ebenfalls Tabelle »%s«" -#: commands/tablecmds.c:1500 +#: commands/tablecmds.c:1742 #, c-format msgid "cannot truncate temporary tables of other sessions" msgstr "kann temporäre Tabellen anderer Sitzungen nicht leeren" -#: commands/tablecmds.c:1731 commands/tablecmds.c:10648 -#, fuzzy, c-format -#| msgid "cannot insert into foreign table \"%s\"" +#: commands/tablecmds.c:1973 commands/tablecmds.c:11422 +#, c-format msgid "cannot inherit from partitioned table \"%s\"" -msgstr "kann nicht in Fremdtabelle »%s« einfügen" +msgstr "von partitionierter Tabelle »%s« kann nicht geerbt werden" -#: commands/tablecmds.c:1736 -#, fuzzy, c-format -#| msgid "cannot inherit from temporary relation \"%s\"" +#: commands/tablecmds.c:1978 +#, c-format msgid "cannot inherit from partition \"%s\"" -msgstr "von temporärer Relation »%s« kann nicht geerbt werden" +msgstr "von Partition »%s« kann nicht geerbt werden" -#: commands/tablecmds.c:1744 parser/parse_utilcmd.c:1909 +#: commands/tablecmds.c:1986 parser/parse_utilcmd.c:2200 +#: parser/parse_utilcmd.c:2323 #, c-format msgid "inherited relation \"%s\" is not a table or foreign table" msgstr "geerbte Relation »%s« ist keine Tabelle oder Fremdtabelle" -#: commands/tablecmds.c:1752 commands/tablecmds.c:10627 +#: commands/tablecmds.c:1994 commands/tablecmds.c:11401 #, c-format msgid "cannot inherit from temporary relation \"%s\"" msgstr "von temporärer Relation »%s« kann nicht geerbt werden" -#: commands/tablecmds.c:1762 commands/tablecmds.c:10635 +#: commands/tablecmds.c:2004 commands/tablecmds.c:11409 #, c-format msgid "cannot inherit from temporary relation of another session" msgstr "von temporärer Relation einer anderen Sitzung kann nicht geerbt werden" -#: commands/tablecmds.c:1779 commands/tablecmds.c:10746 +#: commands/tablecmds.c:2021 commands/tablecmds.c:11533 #, c-format msgid "relation \"%s\" would be inherited from more than once" msgstr "von der Relation »%s« würde mehrmals geerbt werden" -#: commands/tablecmds.c:1827 +#: commands/tablecmds.c:2070 #, c-format msgid "merging multiple inherited definitions of column \"%s\"" msgstr "geerbte Definitionen von Spalte »%s« werden zusammengeführt" -#: commands/tablecmds.c:1835 +#: commands/tablecmds.c:2078 #, c-format msgid "inherited column \"%s\" has a type conflict" msgstr "geerbte Spalte »%s« hat Typkonflikt" -#: commands/tablecmds.c:1837 commands/tablecmds.c:1860 -#: commands/tablecmds.c:2065 commands/tablecmds.c:2089 -#: parser/parse_coerce.c:1650 parser/parse_coerce.c:1670 -#: parser/parse_coerce.c:1690 parser/parse_coerce.c:1736 -#: parser/parse_coerce.c:1775 parser/parse_param.c:218 +#: commands/tablecmds.c:2080 commands/tablecmds.c:2103 +#: commands/tablecmds.c:2309 commands/tablecmds.c:2339 +#: parser/parse_coerce.c:1716 parser/parse_coerce.c:1736 +#: parser/parse_coerce.c:1756 parser/parse_coerce.c:1802 +#: parser/parse_coerce.c:1841 parser/parse_param.c:218 #, c-format msgid "%s versus %s" msgstr "%s gegen %s" -#: commands/tablecmds.c:1846 +#: commands/tablecmds.c:2089 #, c-format msgid "inherited column \"%s\" has a collation conflict" msgstr "geerbte Spalte »%s« hat Sortierfolgenkonflikt" -#: commands/tablecmds.c:1848 commands/tablecmds.c:2077 -#: commands/tablecmds.c:5092 +#: commands/tablecmds.c:2091 commands/tablecmds.c:2321 +#: commands/tablecmds.c:5404 #, c-format msgid "\"%s\" versus \"%s\"" msgstr "»%s« gegen »%s«" -#: commands/tablecmds.c:1858 +#: commands/tablecmds.c:2101 #, c-format msgid "inherited column \"%s\" has a storage parameter conflict" msgstr "geerbte Spalte »%s« hat einen Konflikt bei einem Storage-Parameter" -#: commands/tablecmds.c:1971 commands/tablecmds.c:8573 -#: parser/parse_utilcmd.c:993 parser/parse_utilcmd.c:1343 -#: parser/parse_utilcmd.c:1419 +#: commands/tablecmds.c:2215 commands/tablecmds.c:9255 +#: parser/parse_utilcmd.c:1116 parser/parse_utilcmd.c:1516 +#: parser/parse_utilcmd.c:1623 #, c-format msgid "cannot convert whole-row table reference" msgstr "kann Verweis auf ganze Zeile der Tabelle nicht umwandeln" -#: commands/tablecmds.c:1972 parser/parse_utilcmd.c:994 +#: commands/tablecmds.c:2216 parser/parse_utilcmd.c:1117 #, c-format msgid "Constraint \"%s\" contains a whole-row reference to table \"%s\"." msgstr "Constraint »%s« enthält einen Verweis auf die ganze Zeile der Tabelle »%s«." -#: commands/tablecmds.c:2051 +#: commands/tablecmds.c:2295 #, c-format msgid "merging column \"%s\" with inherited definition" msgstr "Spalte »%s« wird mit geerbter Definition zusammengeführt" -#: commands/tablecmds.c:2055 +#: commands/tablecmds.c:2299 #, c-format msgid "moving and merging column \"%s\" with inherited definition" msgstr "Spalte »%s« wird verschoben und mit geerbter Definition zusammengeführt" -#: commands/tablecmds.c:2056 +#: commands/tablecmds.c:2300 #, c-format msgid "User-specified column moved to the position of the inherited column." msgstr "Benutzerdefinierte Spalte wurde auf die Position der geerbten Spalte verschoben." -#: commands/tablecmds.c:2063 +#: commands/tablecmds.c:2307 #, c-format msgid "column \"%s\" has a type conflict" msgstr "für Spalte »%s« besteht ein Typkonflikt" -#: commands/tablecmds.c:2075 +#: commands/tablecmds.c:2319 #, c-format msgid "column \"%s\" has a collation conflict" msgstr "für Spalte »%s« besteht ein Sortierfolgenkonflikt" -#: commands/tablecmds.c:2087 +#: commands/tablecmds.c:2337 #, c-format msgid "column \"%s\" has a storage parameter conflict" msgstr "für Spalte »%s« besteht ein Konflikt bei einem Storage-Parameter" -#: commands/tablecmds.c:2189 +#: commands/tablecmds.c:2448 #, c-format msgid "column \"%s\" inherits conflicting default values" msgstr "Spalte »%s« erbt widersprüchliche Vorgabewerte" -#: commands/tablecmds.c:2191 +#: commands/tablecmds.c:2450 #, c-format msgid "To resolve the conflict, specify a default explicitly." msgstr "Um den Konflikt zu lösen, geben Sie einen Vorgabewert ausdrücklich an." -#: commands/tablecmds.c:2238 +#: commands/tablecmds.c:2497 #, c-format msgid "check constraint name \"%s\" appears multiple times but with different expressions" msgstr "Check-Constraint-Name »%s« erscheint mehrmals, aber mit unterschiedlichen Ausdrücken" -#: commands/tablecmds.c:2437 +#: commands/tablecmds.c:2674 #, c-format msgid "cannot rename column of typed table" msgstr "Spalte einer getypten Tabelle kann nicht umbenannt werden" -#: commands/tablecmds.c:2455 +#: commands/tablecmds.c:2693 #, c-format msgid "\"%s\" is not a table, view, materialized view, composite type, index, or foreign table" msgstr "»%s« ist weder Tabelle, Sicht, materialisierte Sicht, zusammengesetzter Typ, Index noch Fremdtabelle" -#: commands/tablecmds.c:2549 +#: commands/tablecmds.c:2787 #, c-format msgid "inherited column \"%s\" must be renamed in child tables too" msgstr "vererbte Spalte »%s« muss ebenso in den abgeleiteten Tabellen umbenannt werden" -#: commands/tablecmds.c:2581 +#: commands/tablecmds.c:2819 #, c-format msgid "cannot rename system column \"%s\"" msgstr "Systemspalte »%s« kann nicht umbenannt werden" -#: commands/tablecmds.c:2596 +#: commands/tablecmds.c:2834 #, c-format msgid "cannot rename inherited column \"%s\"" msgstr "kann vererbte Spalte »%s« nicht umbenennen" -#: commands/tablecmds.c:2748 +#: commands/tablecmds.c:2986 #, c-format msgid "inherited constraint \"%s\" must be renamed in child tables too" msgstr "vererbter Constraint »%s« muss ebenso in den abgeleiteten Tabellen umbenannt werden" -#: commands/tablecmds.c:2755 +#: commands/tablecmds.c:2993 #, c-format msgid "cannot rename inherited constraint \"%s\"" msgstr "kann vererbten Constraint »%s« nicht umbenennen" #. translator: first %s is a SQL command, eg ALTER TABLE -#: commands/tablecmds.c:2979 +#: commands/tablecmds.c:3218 #, c-format msgid "cannot %s \"%s\" because it is being used by active queries in this session" msgstr "%s mit Relation »%s« nicht möglich, weil sie von aktiven Anfragen in dieser Sitzung verwendet wird" #. translator: first %s is a SQL command, eg ALTER TABLE -#: commands/tablecmds.c:2988 +#: commands/tablecmds.c:3228 #, c-format msgid "cannot %s \"%s\" because it has pending trigger events" msgstr "%s mit Relation »%s« nicht möglich, weil es anstehende Trigger-Ereignisse dafür gibt" -#: commands/tablecmds.c:4087 +#: commands/tablecmds.c:4372 #, c-format msgid "cannot rewrite system relation \"%s\"" msgstr "Systemrelation »%s« kann nicht neu geschrieben werden" -#: commands/tablecmds.c:4093 +#: commands/tablecmds.c:4378 #, c-format msgid "cannot rewrite table \"%s\" used as a catalog table" msgstr "Tabelle »%s«, die als Katalogtabelle verwendet wird, kann nicht neu geschrieben werden" -#: commands/tablecmds.c:4103 +#: commands/tablecmds.c:4388 #, c-format msgid "cannot rewrite temporary tables of other sessions" msgstr "kann temporäre Tabellen anderer Sitzungen nicht neu schreiben" -#: commands/tablecmds.c:4382 +#: commands/tablecmds.c:4665 #, c-format msgid "rewriting table \"%s\"" msgstr "schreibe Tabelle »%s« neu" -#: commands/tablecmds.c:4386 +#: commands/tablecmds.c:4669 #, c-format msgid "verifying table \"%s\"" msgstr "überprüfe Tabelle »%s«" -#: commands/tablecmds.c:4499 +#: commands/tablecmds.c:4785 #, c-format msgid "column \"%s\" contains null values" msgstr "Spalte »%s« enthält NULL-Werte" -#: commands/tablecmds.c:4514 commands/tablecmds.c:7844 +#: commands/tablecmds.c:4801 commands/tablecmds.c:8492 #, c-format msgid "check constraint \"%s\" is violated by some row" msgstr "Check-Constraint »%s« wird von irgendeiner Zeile verletzt" -#: commands/tablecmds.c:4530 +#: commands/tablecmds.c:4819 #, fuzzy, c-format -#| msgid "check constraint \"%s\" is violated by some row" +#| msgid "partition constraint is violated by some row" +msgid "updated partition constraint for default partition would be violated by some row" +msgstr "Partitions-Constraint wird von irgendeiner Zeile verletzt" + +#: commands/tablecmds.c:4823 +#, c-format msgid "partition constraint is violated by some row" -msgstr "Check-Constraint »%s« wird von irgendeiner Zeile verletzt" +msgstr "Partitions-Constraint wird von irgendeiner Zeile verletzt" -#: commands/tablecmds.c:4668 commands/trigger.c:245 -#: rewrite/rewriteDefine.c:266 rewrite/rewriteDefine.c:906 +#: commands/tablecmds.c:4965 commands/trigger.c:310 rewrite/rewriteDefine.c:266 +#: rewrite/rewriteDefine.c:919 #, c-format msgid "\"%s\" is not a table or view" msgstr "»%s« ist keine Tabelle oder Sicht" -#: commands/tablecmds.c:4671 commands/trigger.c:1229 commands/trigger.c:1335 +#: commands/tablecmds.c:4968 commands/trigger.c:1520 commands/trigger.c:1626 #, c-format msgid "\"%s\" is not a table, view, or foreign table" msgstr "»%s« ist keine Tabelle, Sicht oder Fremdtabelle" -#: commands/tablecmds.c:4674 +#: commands/tablecmds.c:4971 #, c-format msgid "\"%s\" is not a table, view, materialized view, or index" msgstr "»%s« ist weder Tabelle, Sicht, materialisierte Sicht noch Index" -#: commands/tablecmds.c:4680 +#: commands/tablecmds.c:4977 #, c-format msgid "\"%s\" is not a table, materialized view, or index" msgstr "»%s« ist weder Tabelle, materialisierte Sicht noch Index" -#: commands/tablecmds.c:4683 +#: commands/tablecmds.c:4980 #, c-format msgid "\"%s\" is not a table, materialized view, or foreign table" msgstr "»%s« ist weder Tabelle, materialisierte Sicht noch Fremdtabelle" -#: commands/tablecmds.c:4686 +#: commands/tablecmds.c:4983 #, c-format msgid "\"%s\" is not a table or foreign table" msgstr "»%s« ist keine Tabelle oder Fremdtabelle" -#: commands/tablecmds.c:4689 +#: commands/tablecmds.c:4986 #, c-format msgid "\"%s\" is not a table, composite type, or foreign table" msgstr "»%s« ist weder Tabelle, Sicht, zusammengesetzter Typ noch Fremdtabelle" -#: commands/tablecmds.c:4692 commands/tablecmds.c:5814 +#: commands/tablecmds.c:4989 commands/tablecmds.c:6407 #, c-format msgid "\"%s\" is not a table, materialized view, index, or foreign table" msgstr "»%s« ist weder Tabelle, materialisierte Sicht, Index noch Fremdtabelle" -#: commands/tablecmds.c:4702 +#: commands/tablecmds.c:4999 #, c-format msgid "\"%s\" is of the wrong type" msgstr "»%s« hat den falschen Typ" -#: commands/tablecmds.c:4856 commands/tablecmds.c:4863 +#: commands/tablecmds.c:5174 commands/tablecmds.c:5181 #, c-format msgid "cannot alter type \"%s\" because column \"%s.%s\" uses it" msgstr "kann Typ »%s« nicht ändern, weil Spalte »%s.%s« ihn verwendet" -#: commands/tablecmds.c:4870 +#: commands/tablecmds.c:5188 #, c-format msgid "cannot alter foreign table \"%s\" because column \"%s.%s\" uses its row type" msgstr "kann Fremdtabelle »%s« nicht ändern, weil Spalte »%s.%s« ihren Zeilentyp verwendet" -#: commands/tablecmds.c:4877 +#: commands/tablecmds.c:5195 #, c-format msgid "cannot alter table \"%s\" because column \"%s.%s\" uses its row type" msgstr "kann Tabelle »%s« nicht ändern, weil Spalte »%s.%s« ihren Zeilentyp verwendet" -#: commands/tablecmds.c:4939 +#: commands/tablecmds.c:5249 #, c-format msgid "cannot alter type \"%s\" because it is the type of a typed table" msgstr "kann Typ »%s« nicht ändern, weil er der Typ einer getypten Tabelle ist" -#: commands/tablecmds.c:4941 +#: commands/tablecmds.c:5251 #, c-format msgid "Use ALTER ... CASCADE to alter the typed tables too." msgstr "Verwenden Sie ALTER ... CASCADE, um die getypten Tabellen ebenfalls zu ändern." -#: commands/tablecmds.c:4985 +#: commands/tablecmds.c:5297 #, c-format msgid "type %s is not a composite type" msgstr "Typ %s ist kein zusammengesetzter Typ" -#: commands/tablecmds.c:5011 +#: commands/tablecmds.c:5323 #, c-format msgid "cannot add column to typed table" msgstr "zu einer getypten Tabelle kann keine Spalte hinzugefügt werden" -#: commands/tablecmds.c:5055 -#, fuzzy, c-format -#| msgid "cannot add column to typed table" +#: commands/tablecmds.c:5367 +#, c-format msgid "cannot add column to a partition" -msgstr "zu einer getypten Tabelle kann keine Spalte hinzugefügt werden" +msgstr "zu einer Partition kann keine Spalte hinzugefügt werden" -#: commands/tablecmds.c:5084 commands/tablecmds.c:10872 +#: commands/tablecmds.c:5396 commands/tablecmds.c:11660 #, c-format msgid "child table \"%s\" has different type for column \"%s\"" msgstr "abgeleitete Tabelle »%s« hat unterschiedlichen Typ für Spalte »%s«" -#: commands/tablecmds.c:5090 commands/tablecmds.c:10879 +#: commands/tablecmds.c:5402 commands/tablecmds.c:11667 #, c-format msgid "child table \"%s\" has different collation for column \"%s\"" msgstr "abgeleitete Tabelle »%s« hat unterschiedliche Sortierfolge für Spalte »%s«" -#: commands/tablecmds.c:5100 +#: commands/tablecmds.c:5412 #, c-format msgid "child table \"%s\" has a conflicting \"%s\" column" msgstr "abgeleitete Tabelle »%s« hat eine widersprüchliche Spalte »%s«" -#: commands/tablecmds.c:5111 +#: commands/tablecmds.c:5423 #, c-format msgid "merging definition of column \"%s\" for child \"%s\"" msgstr "Definition von Spalte »%s« für abgeleitete Tabelle »%s« wird zusammengeführt" -#: commands/tablecmds.c:5335 +#: commands/tablecmds.c:5447 +#, c-format +msgid "cannot recursively add identity column to table that has child tables" +msgstr "eine Identitätsspalte kann nicht rekursiv zu einer Tabelle hinzugefügt werden, die abgeleitete Tabellen hat" + +#: commands/tablecmds.c:5696 #, c-format msgid "column must be added to child tables too" msgstr "Spalte muss ebenso in den abgeleiteten Tabellen hinzugefügt werden" -#: commands/tablecmds.c:5410 +#: commands/tablecmds.c:5771 #, c-format msgid "column \"%s\" of relation \"%s\" already exists, skipping" msgstr "Spalte »%s« von Relation »%s« existiert bereits, wird übersprungen" -#: commands/tablecmds.c:5417 +#: commands/tablecmds.c:5778 #, c-format msgid "column \"%s\" of relation \"%s\" already exists" msgstr "Spalte »%s« von Relation »%s« existiert bereits" -#: commands/tablecmds.c:5511 commands/tablecmds.c:8246 -#, fuzzy, c-format -#| msgid "constraint must be added to child tables too" -msgid "constraint must be dropped from child tables too" -msgstr "Constraint muss ebenso in den abgeleiteten Tabellen hinzugefügt werden" +#: commands/tablecmds.c:5876 commands/tablecmds.c:8935 +#, c-format +msgid "cannot remove constraint from only the partitioned table when partitions exist" +msgstr "Constraint kann nicht nur von der partitionierten Tabelle entfernt werden, wenn Partitionen existieren" + +#: commands/tablecmds.c:5877 commands/tablecmds.c:6021 +#: commands/tablecmds.c:6798 commands/tablecmds.c:8936 +#, c-format +msgid "Do not specify the ONLY keyword." +msgstr "Lassen Sie das Schlüsselwort ONLY weg." -#: commands/tablecmds.c:5542 commands/tablecmds.c:5703 -#: commands/tablecmds.c:5758 commands/tablecmds.c:5873 -#: commands/tablecmds.c:5927 commands/tablecmds.c:6019 -#: commands/tablecmds.c:8395 commands/tablecmds.c:9088 +#: commands/tablecmds.c:5909 commands/tablecmds.c:6057 +#: commands/tablecmds.c:6112 commands/tablecmds.c:6188 +#: commands/tablecmds.c:6282 commands/tablecmds.c:6341 +#: commands/tablecmds.c:6491 commands/tablecmds.c:6554 +#: commands/tablecmds.c:6646 commands/tablecmds.c:9075 +#: commands/tablecmds.c:9791 #, c-format msgid "cannot alter system column \"%s\"" msgstr "Systemspalte »%s« kann nicht geändert werden" -#: commands/tablecmds.c:5578 +#: commands/tablecmds.c:5915 commands/tablecmds.c:6118 +#, c-format +msgid "column \"%s\" of relation \"%s\" is an identity column" +msgstr "Spalte »%s« von Relation »%s« ist eine Identitätsspalte" + +#: commands/tablecmds.c:5951 #, c-format msgid "column \"%s\" is in a primary key" msgstr "Spalte »%s« ist in einem Primärschlüssel" -#: commands/tablecmds.c:5600 -#, fuzzy, c-format -#| msgid "column \"%s\" in child table must be marked NOT NULL" +#: commands/tablecmds.c:5973 +#, c-format msgid "column \"%s\" is marked NOT NULL in parent table" -msgstr "Spalte »%s« in abgeleiteter Tabelle muss als NOT NULL markiert sein" +msgstr "Spalte »%s« ist in Elterntabelle als NOT NULL markiert" -#: commands/tablecmds.c:5625 -#, fuzzy, c-format -#| msgid "column \"%s\" is in a primary key" -msgid "column \"%s\" is in range partition key" -msgstr "Spalte »%s« ist in einem Primärschlüssel" +#: commands/tablecmds.c:6020 +#, c-format +msgid "cannot add constraint to only the partitioned table when partitions exist" +msgstr "Constraint kann nicht nur zu der partitionierten Tabelle hinzugefügt werden, wenn Partitionen existieren" -#: commands/tablecmds.c:5672 commands/tablecmds.c:6659 +#: commands/tablecmds.c:6120 #, c-format -msgid "constraint must be added to child tables too" -msgstr "Constraint muss ebenso in den abgeleiteten Tabellen hinzugefügt werden" +msgid "Use ALTER TABLE ... ALTER COLUMN ... DROP IDENTITY instead." +msgstr "Verwenden Sie stattdessen ALTER TABLE ... ALTER COLUMN ... DROP IDENTITY." + +#: commands/tablecmds.c:6199 +#, c-format +msgid "column \"%s\" of relation \"%s\" must be declared NOT NULL before identity can be added" +msgstr "Spalte »%s« von Relation »%s« muss als NOT NULL deklariert werden, bevor Sie Identitätsspalte werden kann" + +#: commands/tablecmds.c:6205 +#, c-format +msgid "column \"%s\" of relation \"%s\" is already an identity column" +msgstr "Spalte »%s« von Relation »%s« ist bereits eine Identitätsspalte" + +#: commands/tablecmds.c:6211 +#, c-format +msgid "column \"%s\" of relation \"%s\" already has a default value" +msgstr "Spalte »%s« von Relation »%s« hat bereits einen Vorgabewert" -#: commands/tablecmds.c:5846 +#: commands/tablecmds.c:6288 commands/tablecmds.c:6349 +#, c-format +msgid "column \"%s\" of relation \"%s\" is not an identity column" +msgstr "Spalte »%s« von Relation »%s« ist keine Identitätsspalte" + +#: commands/tablecmds.c:6354 +#, c-format +msgid "column \"%s\" of relation \"%s\" is not an identity column, skipping" +msgstr "Spalte »%s« von Relation »%s« ist keine Identitätsspalte, wird übersprungen" + +#: commands/tablecmds.c:6419 +#, c-format +msgid "cannot refer to non-index column by number" +msgstr "" + +#: commands/tablecmds.c:6450 #, c-format msgid "statistics target %d is too low" msgstr "Statistikziel %d ist zu niedrig" -#: commands/tablecmds.c:5854 +#: commands/tablecmds.c:6458 #, c-format msgid "lowering statistics target to %d" msgstr "setze Statistikziel auf %d herab" -#: commands/tablecmds.c:5999 +#: commands/tablecmds.c:6481 +#, c-format +msgid "column number %d of relation \"%s\" does not exist" +msgstr "Spalte Nummer %d von Relation »%s« existiert nicht" + +#: commands/tablecmds.c:6499 +#, fuzzy, c-format +#| msgid "cannot insert into column \"%s\" of view \"%s\"" +msgid "cannot alter statistics on non-expression column \"%s\" of index \"%s\"" +msgstr "kann nicht in Spalte »%s« von Sicht »%s« einfügen" + +#: commands/tablecmds.c:6501 +#, fuzzy, c-format +#| msgid "Collects statistics on database activity." +msgid "Alter statistics on table column instead." +msgstr "Sammelt Statistiken über Datenbankaktivität." + +#: commands/tablecmds.c:6626 #, c-format msgid "invalid storage type \"%s\"" msgstr "ungültiger Storage-Typ »%s«" -#: commands/tablecmds.c:6031 +#: commands/tablecmds.c:6658 #, c-format msgid "column data type %s can only have storage PLAIN" msgstr "Spaltendatentyp %s kann nur Storage-Typ PLAIN" -#: commands/tablecmds.c:6066 +#: commands/tablecmds.c:6693 #, c-format msgid "cannot drop column from typed table" msgstr "aus einer getypten Tabelle können keine Spalten gelöscht werden" -#: commands/tablecmds.c:6173 +#: commands/tablecmds.c:6738 #, c-format msgid "column \"%s\" of relation \"%s\" does not exist, skipping" msgstr "Spalte »%s« von Relation »%s« existiert nicht, wird übersprungen" -#: commands/tablecmds.c:6186 +#: commands/tablecmds.c:6751 #, c-format msgid "cannot drop system column \"%s\"" msgstr "Systemspalte »%s« kann nicht gelöscht werden" -#: commands/tablecmds.c:6193 +#: commands/tablecmds.c:6758 #, c-format msgid "cannot drop inherited column \"%s\"" msgstr "geerbte Spalte »%s« kann nicht gelöscht werden" -#: commands/tablecmds.c:6202 -#, fuzzy, c-format -#| msgid "cannot drop column from typed table" +#: commands/tablecmds.c:6769 +#, c-format msgid "cannot drop column named in partition key" -msgstr "aus einer getypten Tabelle können keine Spalten gelöscht werden" +msgstr "eine im Partitionierungsschlüssel verwendete Spalte kann nicht gelöscht werden" -#: commands/tablecmds.c:6206 -#, fuzzy, c-format -#| msgid "cannot use column references in default expression" +#: commands/tablecmds.c:6773 +#, c-format msgid "cannot drop column referenced in partition key expression" -msgstr "Spaltenverweise können nicht in Vorgabeausdrücken verwendet werden" +msgstr "eine im Partitionierungsschlüsselausdruck verwendete Spalte kann nicht gelöscht werden" + +#: commands/tablecmds.c:6797 +#, c-format +msgid "cannot drop column from only the partitioned table when partitions exist" +msgstr "Spalte kann nicht nur aus der partitionierten Tabelle gelöscht werden, wenn Partitionen existieren" -#: commands/tablecmds.c:6230 +#: commands/tablecmds.c:7002 #, fuzzy, c-format -#| msgid "column must be added to child tables too" -msgid "column must be dropped from child tables too" -msgstr "Spalte muss ebenso in den abgeleiteten Tabellen hinzugefügt werden" +#| msgid "ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index \"%s\" to \"%s\"" +msgid "ALTER TABLE / ADD CONSTRAINT USING INDEX is not supported on partitioned tables" +msgstr "ALTER TABLE / ADD CONSTRAINT USING INDEX benennt Index »%s« um in »%s«" -#: commands/tablecmds.c:6446 +#: commands/tablecmds.c:7027 #, c-format msgid "ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index \"%s\" to \"%s\"" msgstr "ALTER TABLE / ADD CONSTRAINT USING INDEX benennt Index »%s« um in »%s«" -#: commands/tablecmds.c:6730 -#, fuzzy, c-format -#| msgid "cannot insert into foreign table \"%s\"" +#: commands/tablecmds.c:7244 +#, c-format +msgid "constraint must be added to child tables too" +msgstr "Constraint muss ebenso in den abgeleiteten Tabellen hinzugefügt werden" + +#: commands/tablecmds.c:7316 +#, c-format msgid "cannot reference partitioned table \"%s\"" -msgstr "kann nicht in Fremdtabelle »%s« einfügen" +msgstr "Fremdschlüssel kann nicht auf partitionierte Tabelle »%s« verweisen" -#: commands/tablecmds.c:6736 +#: commands/tablecmds.c:7324 +#, fuzzy, c-format +#| msgid "cannot reference partitioned table \"%s\"" +msgid "foreign key referencing partitioned table \"%s\" must not be ONLY" +msgstr "Fremdschlüssel kann nicht auf partitionierte Tabelle »%s« verweisen" + +#: commands/tablecmds.c:7329 +#, fuzzy, c-format +#| msgid "cannot rewrite system relation \"%s\"" +msgid "cannot add NOT VALID foreign key to relation \"%s\"" +msgstr "Systemrelation »%s« kann nicht neu geschrieben werden" + +#: commands/tablecmds.c:7331 +#, fuzzy, c-format +#| msgid "unique constraints are not supported on partitioned tables" +msgid "This feature is not yet supported on partitioned tables." +msgstr "Unique-Constraints auf partitionierten Tabellen werden nicht unterstützt" + +#: commands/tablecmds.c:7337 #, c-format msgid "referenced relation \"%s\" is not a table" msgstr "Relation »%s«, auf die verwiesen wird, ist keine Tabelle" -#: commands/tablecmds.c:6759 +#: commands/tablecmds.c:7360 #, c-format msgid "constraints on permanent tables may reference only permanent tables" msgstr "Constraints für permanente Tabellen dürfen nur auf permanente Tabellen verweisen" -#: commands/tablecmds.c:6766 +#: commands/tablecmds.c:7367 #, c-format msgid "constraints on unlogged tables may reference only permanent or unlogged tables" msgstr "Constraints für ungeloggte Tabellen dürfen nur auf permanente oder ungeloggte Tabellen verweisen" -#: commands/tablecmds.c:6772 +#: commands/tablecmds.c:7373 #, c-format msgid "constraints on temporary tables may reference only temporary tables" msgstr "Constraints für temporäre Tabellen dürfen nur auf temporäre Tabellen verweisen" -#: commands/tablecmds.c:6776 +#: commands/tablecmds.c:7377 #, c-format msgid "constraints on temporary tables must involve temporary tables of this session" msgstr "Constraints für temporäre Tabellen müssen temporäre Tabellen dieser Sitzung beinhalten" -#: commands/tablecmds.c:6837 +#: commands/tablecmds.c:7437 #, c-format msgid "number of referencing and referenced columns for foreign key disagree" msgstr "Anzahl der Quell- und Zielspalten im Fremdschlüssel stimmt nicht überein" -#: commands/tablecmds.c:6944 +#: commands/tablecmds.c:7544 #, c-format msgid "foreign key constraint \"%s\" cannot be implemented" msgstr "Fremdschlüssel-Constraint »%s« kann nicht implementiert werden" -#: commands/tablecmds.c:6947 +#: commands/tablecmds.c:7547 #, c-format msgid "Key columns \"%s\" and \"%s\" are of incompatible types: %s and %s." msgstr "Schlüsselspalten »%s« und »%s« haben inkompatible Typen: %s und %s." -#: commands/tablecmds.c:7153 commands/tablecmds.c:7319 -#: commands/tablecmds.c:8225 commands/tablecmds.c:8291 +#: commands/tablecmds.c:7792 commands/tablecmds.c:7958 +#: commands/tablecmds.c:8903 commands/tablecmds.c:8971 #, c-format msgid "constraint \"%s\" of relation \"%s\" does not exist" msgstr "Constraint »%s« von Relation »%s« existiert nicht" -#: commands/tablecmds.c:7159 +#: commands/tablecmds.c:7798 #, c-format msgid "constraint \"%s\" of relation \"%s\" is not a foreign key constraint" msgstr "Constraint »%s« von Relation »%s« ist kein Fremdschlüssel-Constraint" -#: commands/tablecmds.c:7326 +#: commands/tablecmds.c:7965 #, c-format msgid "constraint \"%s\" of relation \"%s\" is not a foreign key or check constraint" msgstr "Constraint »%s« von Relation »%s« ist kein Fremdschlüssel- oder Check-Constraint" -#: commands/tablecmds.c:7395 +#: commands/tablecmds.c:8035 #, c-format msgid "constraint must be validated on child tables too" msgstr "Constraint muss ebenso in den abgeleiteten Tabellen validiert werden" -#: commands/tablecmds.c:7463 +#: commands/tablecmds.c:8103 #, c-format msgid "column \"%s\" referenced in foreign key constraint does not exist" msgstr "Spalte »%s«, die im Fremdschlüssel verwendet wird, existiert nicht" -#: commands/tablecmds.c:7468 +#: commands/tablecmds.c:8108 #, c-format msgid "cannot have more than %d keys in a foreign key" msgstr "Fremdschlüssel kann nicht mehr als %d Schlüssel haben" -#: commands/tablecmds.c:7533 +#: commands/tablecmds.c:8173 #, c-format msgid "cannot use a deferrable primary key for referenced table \"%s\"" msgstr "aufschiebbarer Primärschlüssel kann nicht für Tabelle »%s«, auf die verwiesen wird, verwendet werden" -#: commands/tablecmds.c:7550 +#: commands/tablecmds.c:8190 #, c-format msgid "there is no primary key for referenced table \"%s\"" msgstr "in Tabelle »%s«, auf die verwiesen wird, gibt es keinen Primärschlüssel" -#: commands/tablecmds.c:7615 +#: commands/tablecmds.c:8255 #, c-format msgid "foreign key referenced-columns list must not contain duplicates" msgstr "die Liste der Spalten, auf die ein Fremdschlüssel verweist, darf keine doppelten Einträge enthalten" -#: commands/tablecmds.c:7709 +#: commands/tablecmds.c:8349 #, c-format msgid "cannot use a deferrable unique constraint for referenced table \"%s\"" msgstr "aufschiebbarer Unique-Constraint kann nicht für Tabelle »%s«, auf die verwiesen wird, verwendet werden" -#: commands/tablecmds.c:7714 +#: commands/tablecmds.c:8354 #, c-format msgid "there is no unique constraint matching given keys for referenced table \"%s\"" msgstr "in Tabelle »%s«, auf die verwiesen wird, gibt es keinen Unique-Constraint, der auf die angegebenen Schlüssel passt" -#: commands/tablecmds.c:7877 +#: commands/tablecmds.c:8525 #, c-format msgid "validating foreign key constraint \"%s\"" msgstr "validiere Fremdschlüssel-Constraint »%s«" -#: commands/tablecmds.c:8179 +#: commands/tablecmds.c:8857 #, c-format msgid "cannot drop inherited constraint \"%s\" of relation \"%s\"" msgstr "geerbter Constraint »%s« von Relation »%s« kann nicht gelöscht werden" -#: commands/tablecmds.c:8231 +#: commands/tablecmds.c:8909 #, c-format msgid "constraint \"%s\" of relation \"%s\" does not exist, skipping" msgstr "Constraint »%s« von Relation »%s« existiert nicht, wird übersprungen" -#: commands/tablecmds.c:8379 +#: commands/tablecmds.c:9059 #, c-format msgid "cannot alter column type of typed table" msgstr "Spaltentyp einer getypten Tabelle kann nicht geändert werden" -#: commands/tablecmds.c:8402 +#: commands/tablecmds.c:9082 #, c-format msgid "cannot alter inherited column \"%s\"" msgstr "kann vererbte Spalte »%s« nicht ändern" -#: commands/tablecmds.c:8411 -#, fuzzy, c-format -#| msgid "cannot alter type of a column used in a trigger definition" +#: commands/tablecmds.c:9093 +#, c-format msgid "cannot alter type of column named in partition key" -msgstr "Typ einer Spalte, die in einer Trigger-Definition verwendet wird, kann nicht geändert werden" +msgstr "Typ einer Spalte, die im Partitionierungschlüssel verwendet wird, kann nicht geändert werden" -#: commands/tablecmds.c:8415 -#, fuzzy, c-format -#| msgid "cannot use column references in default expression" +#: commands/tablecmds.c:9097 +#, c-format msgid "cannot alter type of column referenced in partition key expression" -msgstr "Spaltenverweise können nicht in Vorgabeausdrücken verwendet werden" +msgstr "Typ einer Spalte, die im Partitionierungschlüsselausdruck verwendet wird, kann nicht geändert werden" -#: commands/tablecmds.c:8465 +#: commands/tablecmds.c:9147 #, c-format msgid "result of USING clause for column \"%s\" cannot be cast automatically to type %s" msgstr "Ergebnis der USING-Klausel für Spalte »%s« kann nicht automatisch in Typ %s umgewandelt werden" -#: commands/tablecmds.c:8468 +#: commands/tablecmds.c:9150 #, c-format msgid "You might need to add an explicit cast." msgstr "Sie müssen möglicherweise eine ausdrückliche Typumwandlung hinzufügen." -#: commands/tablecmds.c:8472 +#: commands/tablecmds.c:9154 #, c-format msgid "column \"%s\" cannot be cast automatically to type %s" msgstr "Spalte »%s« kann nicht automatisch in Typ %s umgewandelt werden" #. translator: USING is SQL, don't translate it -#: commands/tablecmds.c:8475 +#: commands/tablecmds.c:9157 #, c-format msgid "You might need to specify \"USING %s::%s\"." msgstr "Sie müssen möglicherweise »USING %s::%s« angeben." -#: commands/tablecmds.c:8574 +#: commands/tablecmds.c:9256 #, c-format msgid "USING expression contains a whole-row table reference." msgstr "USING-Ausdruck enthält einen Verweis auf die ganze Zeile der Tabelle." -#: commands/tablecmds.c:8585 +#: commands/tablecmds.c:9267 #, c-format msgid "type of inherited column \"%s\" must be changed in child tables too" msgstr "Typ der vererbten Spalte »%s« muss ebenso in den abgeleiteten Tabellen geändert werden" -#: commands/tablecmds.c:8672 +#: commands/tablecmds.c:9356 #, c-format msgid "cannot alter type of column \"%s\" twice" msgstr "Typ der Spalte »%s« kann nicht zweimal geändert werden" -#: commands/tablecmds.c:8708 +#: commands/tablecmds.c:9392 #, c-format msgid "default for column \"%s\" cannot be cast automatically to type %s" msgstr "Vorgabewert der Spalte »%s« kann nicht automatisch in Typ %s umgewandelt werden" -#: commands/tablecmds.c:8834 +#: commands/tablecmds.c:9519 #, c-format msgid "cannot alter type of a column used by a view or rule" msgstr "Typ einer Spalte, die von einer Sicht oder Regel verwendet wird, kann nicht geändert werden" -#: commands/tablecmds.c:8835 commands/tablecmds.c:8854 -#: commands/tablecmds.c:8872 +#: commands/tablecmds.c:9520 commands/tablecmds.c:9539 +#: commands/tablecmds.c:9557 #, c-format msgid "%s depends on column \"%s\"" msgstr "%s hängt von Spalte »%s« ab" -#: commands/tablecmds.c:8853 +#: commands/tablecmds.c:9538 #, c-format msgid "cannot alter type of a column used in a trigger definition" msgstr "Typ einer Spalte, die in einer Trigger-Definition verwendet wird, kann nicht geändert werden" -#: commands/tablecmds.c:8871 +#: commands/tablecmds.c:9556 #, c-format msgid "cannot alter type of a column used in a policy definition" msgstr "Typ einer Spalte, die in einer Policy-Definition verwendet wird, kann nicht geändert werden" -#: commands/tablecmds.c:9528 +#: commands/tablecmds.c:10286 commands/tablecmds.c:10298 #, c-format msgid "cannot change owner of index \"%s\"" msgstr "kann Eigentümer des Index »%s« nicht ändern" -#: commands/tablecmds.c:9530 +#: commands/tablecmds.c:10288 commands/tablecmds.c:10300 #, c-format msgid "Change the ownership of the index's table, instead." msgstr "Ändern Sie stattdessen den Eigentümer der Tabelle des Index." -#: commands/tablecmds.c:9546 +#: commands/tablecmds.c:10314 #, c-format msgid "cannot change owner of sequence \"%s\"" msgstr "kann Eigentümer der Sequenz »%s« nicht ändern" -#: commands/tablecmds.c:9548 commands/tablecmds.c:12123 -#, c-format -msgid "Sequence \"%s\" is linked to table \"%s\"." -msgstr "Sequenz »%s« ist mit Tabelle »%s« verknüpft." - -#: commands/tablecmds.c:9560 commands/tablecmds.c:12770 +#: commands/tablecmds.c:10328 commands/tablecmds.c:13539 #, c-format msgid "Use ALTER TYPE instead." msgstr "Verwenden Sie stattdessen ALTER TYPE." -#: commands/tablecmds.c:9569 +#: commands/tablecmds.c:10337 #, c-format msgid "\"%s\" is not a table, view, sequence, or foreign table" msgstr "»%s« ist keine Tabelle, Sicht, Sequenz oder Fremdtabelle" -#: commands/tablecmds.c:9910 +#: commands/tablecmds.c:10681 #, c-format msgid "cannot have multiple SET TABLESPACE subcommands" msgstr "mehrere SET TABLESPACE Unterbefehle sind ungültig" -#: commands/tablecmds.c:9984 +#: commands/tablecmds.c:10756 #, c-format msgid "\"%s\" is not a table, view, materialized view, index, or TOAST table" msgstr "»%s« ist weder Tabelle, Sicht, materialisierte Sicht, Index noch TOAST-Tabelle" -#: commands/tablecmds.c:10017 commands/view.c:504 +#: commands/tablecmds.c:10789 commands/view.c:504 #, c-format msgid "WITH CHECK OPTION is supported only on automatically updatable views" msgstr "WITH CHECK OPTION wird nur für automatisch aktualisierbare Sichten unterstützt" -#: commands/tablecmds.c:10159 +#: commands/tablecmds.c:10931 #, c-format msgid "cannot move system relation \"%s\"" msgstr "Systemrelation »%s« kann nicht verschoben werden" -#: commands/tablecmds.c:10175 +#: commands/tablecmds.c:10947 #, c-format msgid "cannot move temporary tables of other sessions" msgstr "temporäre Tabellen anderer Sitzungen können nicht verschoben werden" -#: commands/tablecmds.c:10311 +#: commands/tablecmds.c:11083 #, c-format msgid "only tables, indexes, and materialized views exist in tablespaces" msgstr "nur Tabellen, Indexe und materialisierte Sichten existieren in Tablespaces" -#: commands/tablecmds.c:10323 +#: commands/tablecmds.c:11095 #, c-format msgid "cannot move relations in to or out of pg_global tablespace" msgstr "Relationen können nicht in den oder aus dem Tablespace »pg_global« verschoben werden" -#: commands/tablecmds.c:10415 +#: commands/tablecmds.c:11188 #, c-format msgid "aborting because lock on relation \"%s.%s\" is not available" msgstr "Abbruch weil Sperre für Relation »%s.%s« nicht verfügbar ist" -#: commands/tablecmds.c:10431 +#: commands/tablecmds.c:11204 #, c-format msgid "no matching relations in tablespace \"%s\" found" msgstr "keine passenden Relationen in Tablespace »%s« gefunden" -#: commands/tablecmds.c:10505 storage/buffer/bufmgr.c:915 +#: commands/tablecmds.c:11278 storage/buffer/bufmgr.c:915 #, c-format msgid "invalid page in block %u of relation %s" msgstr "ungültige Seite in Block %u von Relation %s" -#: commands/tablecmds.c:10587 +#: commands/tablecmds.c:11360 #, c-format msgid "cannot change inheritance of typed table" msgstr "Vererbung einer getypten Tabelle kann nicht geändert werden" -#: commands/tablecmds.c:10592 commands/tablecmds.c:11120 -#, fuzzy, c-format -#| msgid "cannot change inheritance of typed table" +#: commands/tablecmds.c:11365 commands/tablecmds.c:11908 +#, c-format msgid "cannot change inheritance of a partition" -msgstr "Vererbung einer getypten Tabelle kann nicht geändert werden" +msgstr "Vererbung einer Partition kann nicht geändert werden" -#: commands/tablecmds.c:10597 -#, fuzzy, c-format -#| msgid "cannot change inheritance of typed table" +#: commands/tablecmds.c:11370 +#, c-format msgid "cannot change inheritance of partitioned table" -msgstr "Vererbung einer getypten Tabelle kann nicht geändert werden" +msgstr "Vererbung einer partitionierten Tabelle kann nicht geändert werden" -#: commands/tablecmds.c:10642 +#: commands/tablecmds.c:11416 #, c-format msgid "cannot inherit to temporary relation of another session" msgstr "an temporäre Relation einer anderen Sitzung kann nicht vererbt werden" -#: commands/tablecmds.c:10655 -#, fuzzy, c-format -#| msgid "cannot inherit from temporary relation \"%s\"" +#: commands/tablecmds.c:11429 +#, c-format msgid "cannot inherit from a partition" -msgstr "von temporärer Relation »%s« kann nicht geerbt werden" +msgstr "von einer Partition kann nicht geerbt werden" -#: commands/tablecmds.c:10677 commands/tablecmds.c:13122 +#: commands/tablecmds.c:11451 commands/tablecmds.c:14133 #, c-format msgid "circular inheritance not allowed" msgstr "zirkuläre Vererbung ist nicht erlaubt" -#: commands/tablecmds.c:10678 commands/tablecmds.c:13123 +#: commands/tablecmds.c:11452 commands/tablecmds.c:14134 #, c-format msgid "\"%s\" is already a child of \"%s\"." msgstr "»%s« ist schon von »%s« abgeleitet." -#: commands/tablecmds.c:10686 +#: commands/tablecmds.c:11460 #, c-format msgid "table \"%s\" without OIDs cannot inherit from table \"%s\" with OIDs" msgstr "Tabelle »%s« ohne OIDs kann nicht von Tabelle »%s« mit OIDs erben" -#: commands/tablecmds.c:10890 +#: commands/tablecmds.c:11473 +#, c-format +msgid "trigger \"%s\" prevents table \"%s\" from becoming an inheritance child" +msgstr "Trigger »%s« verhindert, dass Tabelle »%s« ein Vererbungskind werden kann" + +#: commands/tablecmds.c:11475 +#, c-format +msgid "ROW triggers with transition tables are not supported in inheritance hierarchies" +msgstr "ROW-Trigger mit Übergangstabellen werden in Vererbungshierarchien nicht unterstützt" + +#: commands/tablecmds.c:11678 #, c-format msgid "column \"%s\" in child table must be marked NOT NULL" msgstr "Spalte »%s« in abgeleiteter Tabelle muss als NOT NULL markiert sein" -#: commands/tablecmds.c:10917 commands/tablecmds.c:10956 +#: commands/tablecmds.c:11705 commands/tablecmds.c:11744 #, c-format msgid "child table is missing column \"%s\"" msgstr "Spalte »%s« fehlt in abgeleiteter Tabelle" -#: commands/tablecmds.c:11044 +#: commands/tablecmds.c:11832 #, c-format msgid "child table \"%s\" has different definition for check constraint \"%s\"" msgstr "abgeleitete Tabelle »%s« hat unterschiedliche Definition für Check-Constraint »%s«" -#: commands/tablecmds.c:11052 +#: commands/tablecmds.c:11840 #, c-format msgid "constraint \"%s\" conflicts with non-inherited constraint on child table \"%s\"" msgstr "Constraint »%s« kollidiert mit nicht vererbtem Constraint für abgeleitete Tabelle »%s«" -#: commands/tablecmds.c:11063 +#: commands/tablecmds.c:11851 #, c-format msgid "constraint \"%s\" conflicts with NOT VALID constraint on child table \"%s\"" msgstr "Constraint »%s« kollidiert mit NOT-VALID-Constraint für abgeleitete Tabelle »%s«" -#: commands/tablecmds.c:11098 +#: commands/tablecmds.c:11886 #, c-format msgid "child table is missing constraint \"%s\"" msgstr "Constraint »%s« fehlt in abgeleiteter Tabelle" -#: commands/tablecmds.c:11214 -#, fuzzy, c-format -#| msgid "relation \"%s\" is not a parent of relation \"%s\"" +#: commands/tablecmds.c:11975 +#, c-format msgid "relation \"%s\" is not a partition of relation \"%s\"" -msgstr "Relation »%s« ist keine Basisrelation von Relation »%s«" +msgstr "Relation »%s« ist keine Partition von Relation »%s«" -#: commands/tablecmds.c:11220 +#: commands/tablecmds.c:11981 #, c-format msgid "relation \"%s\" is not a parent of relation \"%s\"" msgstr "Relation »%s« ist keine Basisrelation von Relation »%s«" -#: commands/tablecmds.c:11444 +#: commands/tablecmds.c:12207 #, c-format msgid "typed tables cannot inherit" msgstr "getypte Tabellen können nicht erben" -#: commands/tablecmds.c:11475 +#: commands/tablecmds.c:12238 #, c-format msgid "table is missing column \"%s\"" msgstr "Spalte »%s« fehlt in Tabelle" -#: commands/tablecmds.c:11485 +#: commands/tablecmds.c:12249 #, c-format msgid "table has column \"%s\" where type requires \"%s\"" msgstr "Tabelle hat Spalte »%s«, aber Typ benötigt »%s«" -#: commands/tablecmds.c:11494 +#: commands/tablecmds.c:12258 #, c-format msgid "table \"%s\" has different type for column \"%s\"" msgstr "Tabelle »%s« hat unterschiedlichen Typ für Spalte »%s«" -#: commands/tablecmds.c:11507 +#: commands/tablecmds.c:12272 #, c-format msgid "table has extra column \"%s\"" msgstr "Tabelle hat zusätzliche Spalte »%s«" -#: commands/tablecmds.c:11558 +#: commands/tablecmds.c:12324 #, c-format msgid "\"%s\" is not a typed table" msgstr "»%s« ist keine getypte Tabelle" -#: commands/tablecmds.c:11739 +#: commands/tablecmds.c:12506 #, c-format msgid "cannot use non-unique index \"%s\" as replica identity" msgstr "nicht eindeutiger Index »%s« kann nicht als Replik-Identität verwendet werden" -#: commands/tablecmds.c:11745 +#: commands/tablecmds.c:12512 #, c-format msgid "cannot use non-immediate index \"%s\" as replica identity" msgstr "Index »%s« kann nicht als Replik-Identität verwendet werden, weil er nicht IMMEDIATE ist" -#: commands/tablecmds.c:11751 +#: commands/tablecmds.c:12518 #, c-format msgid "cannot use expression index \"%s\" as replica identity" msgstr "Ausdrucksindex »%s« kann nicht als Replik-Identität verwendet werden" -#: commands/tablecmds.c:11757 +#: commands/tablecmds.c:12524 #, c-format msgid "cannot use partial index \"%s\" as replica identity" msgstr "partieller Index »%s« kann nicht als Replik-Identität verwendet werden" -#: commands/tablecmds.c:11763 +#: commands/tablecmds.c:12530 #, c-format msgid "cannot use invalid index \"%s\" as replica identity" msgstr "ungültiger Index »%s« kann nicht als Replik-Identität verwendet werden" -#: commands/tablecmds.c:11784 +#: commands/tablecmds.c:12551 #, c-format msgid "index \"%s\" cannot be used as replica identity because column %d is a system column" msgstr "Index »%s« kann nicht als Replik-Identität verwendet werden, weil Spalte %d eine Systemspalte ist" -#: commands/tablecmds.c:11791 +#: commands/tablecmds.c:12558 #, c-format msgid "index \"%s\" cannot be used as replica identity because column \"%s\" is nullable" msgstr "Index »%s« kann nicht als Replik-Identität verwendet werden, weil Spalte »%s« NULL-Werte akzeptiert" -#: commands/tablecmds.c:11984 +#: commands/tablecmds.c:12751 #, c-format msgid "cannot change logged status of table \"%s\" because it is temporary" msgstr "kann den geloggten Status der Tabelle »%s« nicht ändern, weil sie temporär ist" -#: commands/tablecmds.c:12008 -#, fuzzy, c-format +#: commands/tablecmds.c:12775 +#, c-format msgid "cannot change table \"%s\" to unlogged because it is part of a publication" msgstr "kann Tabelle »%s« nicht in ungeloggt ändern, weil sie Teil einer Publikation ist" -#: commands/tablecmds.c:12010 +#: commands/tablecmds.c:12777 #, c-format msgid "Unlogged relations cannot be replicated." msgstr "Ungeloggte Relationen können nicht repliziert werden." -#: commands/tablecmds.c:12055 +#: commands/tablecmds.c:12822 #, c-format msgid "could not change table \"%s\" to logged because it references unlogged table \"%s\"" msgstr "konnte Tabelle »%s« nicht in geloggt ändern, weil sie auf die ungeloggte Tabelle »%s« verweist" -#: commands/tablecmds.c:12065 +#: commands/tablecmds.c:12832 #, c-format msgid "could not change table \"%s\" to unlogged because it references logged table \"%s\"" msgstr "konnte Tabelle »%s« nicht in ungeloggt ändern, weil sie auf die geloggte Tabelle »%s« verweist" -#: commands/tablecmds.c:12122 +#: commands/tablecmds.c:12890 #, c-format msgid "cannot move an owned sequence into another schema" msgstr "einer Tabelle zugeordnete Sequenz kann nicht in ein anderes Schema verschoben werden" -#: commands/tablecmds.c:12228 +#: commands/tablecmds.c:12996 #, c-format msgid "relation \"%s\" already exists in schema \"%s\"" msgstr "Relation »%s« existiert bereits in Schema »%s«" -#: commands/tablecmds.c:12754 +#: commands/tablecmds.c:13522 #, c-format msgid "\"%s\" is not a composite type" msgstr "»%s« ist kein zusammengesetzter Typ" -#: commands/tablecmds.c:12785 +#: commands/tablecmds.c:13554 #, c-format msgid "\"%s\" is not a table, view, materialized view, sequence, or foreign table" msgstr "»%s« ist weder Tabelle, Sicht, materialisierte Sicht, Sequenz noch Fremdtabelle" -#: commands/tablecmds.c:12816 -#, fuzzy, c-format -#| msgid "unrecognized privilege type \"%s\"" +#: commands/tablecmds.c:13589 +#, c-format msgid "unrecognized partitioning strategy \"%s\"" -msgstr "unbekannter Privilegtyp »%s«" +msgstr "unbekannte Partitionierungsstrategie »%s«" -#: commands/tablecmds.c:12842 -#, fuzzy, c-format -#| msgid "common column name \"%s\" appears more than once in right table" +#: commands/tablecmds.c:13597 +#, c-format +msgid "cannot use \"list\" partition strategy with more than one column" +msgstr "Partitionierungsstrategie »list« kann nicht mit mehr als einer Spalte verwendet werden" + +#: commands/tablecmds.c:13622 +#, c-format msgid "column \"%s\" appears more than once in partition key" -msgstr "gemeinsamer Spaltenname »%s« erscheint mehrmals in der rechten Tabelle" +msgstr "Spalte »%s« erscheint mehrmals im Partitionierungsschlüssel" -#: commands/tablecmds.c:12890 -#, fuzzy, c-format -#| msgid "column \"%s\" named in key does not exist" +#: commands/tablecmds.c:13677 +#, c-format msgid "column \"%s\" named in partition key does not exist" -msgstr "Spalte »%s«, die im Schlüssel verwendet wird, existiert nicht" +msgstr "Spalte »%s«, die im Partitionierungsschlüssel verwendet wird, existiert nicht" -#: commands/tablecmds.c:12897 -#, fuzzy, c-format -#| msgid "cannot alter system column \"%s\"" +#: commands/tablecmds.c:13684 +#, c-format msgid "cannot use system column \"%s\" in partition key" -msgstr "Systemspalte »%s« kann nicht geändert werden" +msgstr "Systemspalte »%s« kann nicht im Partitionierungsschlüssel verwendet werden" -#: commands/tablecmds.c:12955 -#, fuzzy, c-format -#| msgid "functions in index expression must be marked IMMUTABLE" +#: commands/tablecmds.c:13747 +#, c-format msgid "functions in partition key expression must be marked IMMUTABLE" -msgstr "Funktionen im Indexausdruck müssen als IMMUTABLE markiert sein" +msgstr "Funktionen im Partitionierungsschlüsselausdruck müssen als IMMUTABLE markiert sein" -#: commands/tablecmds.c:12964 -#, fuzzy, c-format -#| msgid "cannot use expression index \"%s\" as replica identity" +#: commands/tablecmds.c:13764 +#, c-format +msgid "partition key expressions cannot contain whole-row references" +msgstr "Partitionierungsschlüsselausdruck kann nicht Verweis auf die ganze Zeile der Tabelle enthalten" + +#: commands/tablecmds.c:13771 +#, c-format +msgid "partition key expressions cannot contain system column references" +msgstr "Partitionierungsschlüsselausdruck kann nicht auf Systemspalten verweisen" + +#: commands/tablecmds.c:13781 +#, c-format msgid "cannot use constant expression as partition key" -msgstr "Ausdrucksindex »%s« kann nicht als Replik-Identität verwendet werden" +msgstr "Partitionierungsschlüssel kann kein konstanter Ausdruck sein" -#: commands/tablecmds.c:12978 -#, fuzzy, c-format -#| msgid "USING expression contains a whole-row table reference." -msgid "partition key expressions cannot contain whole-row references" -msgstr "USING-Ausdruck enthält einen Verweis auf die ganze Zeile der Tabelle." +#: commands/tablecmds.c:13802 +#, c-format +msgid "could not determine which collation to use for partition expression" +msgstr "konnte die für den Partitionierungsausdruck zu verwendende Sortierfolge nicht bestimmen" -#: commands/tablecmds.c:12999 +#: commands/tablecmds.c:13835 #, fuzzy, c-format -#| msgid "could not determine which collation to use for index expression" -msgid "could not determine which collation to use for partition expression" -msgstr "konnte die für den Indexausdruck zu verwendende Sortierfolge nicht bestimmen" +#| msgid "data type %s has no default btree operator class" +msgid "data type %s has no default hash operator class" +msgstr "Datentyp %s hat keine Standardoperatorklasse für btree" -#: commands/tablecmds.c:13024 +#: commands/tablecmds.c:13837 #, fuzzy, c-format -#| msgid "data type %s has no default operator class for access method \"%s\"" +#| msgid "You must specify a btree operator class or define a default btree operator class for the data type." +msgid "You must specify a hash operator class or define a default hash operator class for the data type." +msgstr "Sie müssen eine btree-Operatorklasse angeben oder eine btree-Standardoperatorklasse für den Datentyp definieren." + +#: commands/tablecmds.c:13841 +#, c-format msgid "data type %s has no default btree operator class" -msgstr "Datentyp %s hat keine Standardoperatorklasse für Zugriffsmethode »%s«" +msgstr "Datentyp %s hat keine Standardoperatorklasse für btree" -#: commands/tablecmds.c:13026 -#, fuzzy, c-format -#| msgid "You must specify an operator class for the index or define a default operator class for the data type." +#: commands/tablecmds.c:13843 +#, c-format msgid "You must specify a btree operator class or define a default btree operator class for the data type." -msgstr "Sie müssen für den Index eine Operatorklasse angeben oder eine Standardoperatorklasse für den Datentyp definieren." +msgstr "Sie müssen eine btree-Operatorklasse angeben oder eine btree-Standardoperatorklasse für den Datentyp definieren." + +#: commands/tablecmds.c:13968 +#, c-format +msgid "partition constraint for table \"%s\" is implied by existing constraints" +msgstr "Partitions-Constraint für Tabelle »%s« ist schon in bestehenden Constraints inbegriffen" -#: commands/tablecmds.c:13073 +#: commands/tablecmds.c:13972 partitioning/partbounds.c:621 +#: partitioning/partbounds.c:666 #, fuzzy, c-format -#| msgid "\"%s\" is already a view" +#| msgid "partition constraint for table \"%s\" is implied by existing constraints" +msgid "updated partition constraint for default partition \"%s\" is implied by existing constraints" +msgstr "Partitions-Constraint für Tabelle »%s« ist schon in bestehenden Constraints inbegriffen" + +#: commands/tablecmds.c:14073 +#, c-format msgid "\"%s\" is already a partition" -msgstr "»%s« ist bereits eine Sicht" +msgstr "»%s« ist bereits eine Partition" -#: commands/tablecmds.c:13079 -#, fuzzy, c-format -#| msgid "cannot add column to typed table" +#: commands/tablecmds.c:14079 +#, c-format msgid "cannot attach a typed table as partition" -msgstr "zu einer getypten Tabelle kann keine Spalte hinzugefügt werden" +msgstr "eine getypte Tabelle kann nicht als Partition angefügt werden" -#: commands/tablecmds.c:13095 +#: commands/tablecmds.c:14095 #, c-format msgid "cannot attach inheritance child as partition" -msgstr "" +msgstr "ein Vererbungskind kann nicht als Partition angefügt werden" -#: commands/tablecmds.c:13109 -#, fuzzy, c-format -#| msgid "cannot change inheritance of typed table" +#: commands/tablecmds.c:14109 +#, c-format msgid "cannot attach inheritance parent as partition" -msgstr "Vererbung einer getypten Tabelle kann nicht geändert werden" +msgstr "eine Tabelle mit abgeleiteten Tabellen kann nicht als Partition angefügt werden" -#: commands/tablecmds.c:13132 -#, fuzzy, c-format -#| msgid "cannot inherit from temporary relation \"%s\"" +#: commands/tablecmds.c:14143 +#, c-format msgid "cannot attach a permanent relation as partition of temporary relation \"%s\"" -msgstr "von temporärer Relation »%s« kann nicht geerbt werden" +msgstr "eine permanente Relation kann nicht als Partition an temporäre Relation »%s« angefügt werden" -#: commands/tablecmds.c:13140 -#, fuzzy, c-format -#| msgid "cannot inherit to temporary relation of another session" +#: commands/tablecmds.c:14151 +#, c-format msgid "cannot attach as partition of temporary relation of another session" -msgstr "an temporäre Relation einer anderen Sitzung kann nicht vererbt werden" +msgstr "kann nicht als Partition an temporäre Relation einer anderen Sitzung anfügen" -#: commands/tablecmds.c:13147 -#, fuzzy, c-format -#| msgid "cannot inherit to temporary relation of another session" +#: commands/tablecmds.c:14158 +#, c-format msgid "cannot attach temporary relation of another session as partition" -msgstr "an temporäre Relation einer anderen Sitzung kann nicht vererbt werden" +msgstr "temporäre Relation einer anderen Sitzung kann nicht als Partition angefügt werden" -#: commands/tablecmds.c:13153 -#, fuzzy, c-format -#| msgid "table \"%s\" without OIDs cannot inherit from table \"%s\" with OIDs" +#: commands/tablecmds.c:14164 +#, c-format msgid "cannot attach table \"%s\" without OIDs as partition of table \"%s\" with OIDs" -msgstr "Tabelle »%s« ohne OIDs kann nicht von Tabelle »%s« mit OIDs erben" +msgstr "kann Tabelle »%s« ohne OIDs nicht als Partition an Tabelle »%s« mit OIDs anfügen" -#: commands/tablecmds.c:13161 -#, fuzzy, c-format -#| msgid "table \"%s\" without OIDs cannot inherit from table \"%s\" with OIDs" +#: commands/tablecmds.c:14172 +#, c-format msgid "cannot attach table \"%s\" with OIDs as partition of table \"%s\" without OIDs" -msgstr "Tabelle »%s« ohne OIDs kann nicht von Tabelle »%s« mit OIDs erben" +msgstr "kann Tabelle »%s« mit OIDs nicht als Partition an Tabelle »%s« ohne OIDs anfügen" -#: commands/tablecmds.c:13183 -#, fuzzy, c-format -#| msgid "column \"%s\" not found in data type %s" +#: commands/tablecmds.c:14194 +#, c-format msgid "table \"%s\" contains column \"%s\" not found in parent \"%s\"" -msgstr "Spalte »%s« nicht gefunden im Datentyp %s" +msgstr "Tabelle »%s« enthält Spalte »%s«, die nicht in der Elterntabelle »%s« gefunden wurde" -#: commands/tablecmds.c:13186 +#: commands/tablecmds.c:14197 #, c-format -msgid "New partition should contain only the columns present in parent." -msgstr "" +msgid "The new partition may contain only the columns present in parent." +msgstr "Die neue Partition darf nur Spalten enthalten, die auch die Elterntabelle hat." -#: commands/tablecmds.c:13358 +#: commands/tablecmds.c:14209 #, c-format -msgid "partition constraint for table \"%s\" is implied by existing constraints" +msgid "trigger \"%s\" prevents table \"%s\" from becoming a partition" +msgstr "Trigger »%s« verhindert, dass Tabelle »%s« eine Partition werden kann" + +#: commands/tablecmds.c:14211 commands/trigger.c:462 +#, c-format +msgid "ROW triggers with transition tables are not supported on partitions" +msgstr "ROW-Trigger mit Übergangstabellen werden für Partitionen nicht unterstützt" + +#: commands/tablecmds.c:14849 commands/tablecmds.c:14868 +#: commands/tablecmds.c:14890 commands/tablecmds.c:14909 +#: commands/tablecmds.c:14965 +#, fuzzy, c-format +#| msgid "cannot attach table \"%s\" without OIDs as partition of table \"%s\" with OIDs" +msgid "cannot attach index \"%s\" as a partition of index \"%s\"" +msgstr "kann Tabelle »%s« ohne OIDs nicht als Partition an Tabelle »%s« mit OIDs anfügen" + +#: commands/tablecmds.c:14852 +#, fuzzy, c-format +#| msgid "index \"%s\" is already associated with a constraint" +msgid "Index \"%s\" is already attached to another index." +msgstr "Index »%s« gehört bereits zu einem Constraint" + +#: commands/tablecmds.c:14871 +#, fuzzy, c-format +#| msgid "\"%s\" is not an index for table \"%s\"" +msgid "Index \"%s\" is not an index on any partition of table \"%s\"." +msgstr "»%s« ist kein Index für Tabelle »%s«" + +#: commands/tablecmds.c:14893 +#, fuzzy, c-format +#| msgid "Nonce does not match." +msgid "The index definitions do not match." +msgstr "Nonce stimmt nicht überein." + +#: commands/tablecmds.c:14912 +#, c-format +msgid "The index \"%s\" belongs to a constraint in table \"%s\" but no constraint exists for index \"%s\"." msgstr "" -#: commands/tablespace.c:162 commands/tablespace.c:179 -#: commands/tablespace.c:190 commands/tablespace.c:198 -#: commands/tablespace.c:623 replication/slot.c:1017 storage/file/copydir.c:47 +#: commands/tablecmds.c:14968 +#, fuzzy, c-format +#| msgid "cannot inherit from partition \"%s\"" +msgid "Another index is already attached for partition \"%s\"." +msgstr "von Partition »%s« kann nicht geerbt werden" + +#: commands/tablespace.c:163 commands/tablespace.c:180 +#: commands/tablespace.c:191 commands/tablespace.c:199 +#: commands/tablespace.c:625 replication/slot.c:1179 storage/file/copydir.c:47 #, c-format msgid "could not create directory \"%s\": %m" msgstr "konnte Verzeichnis »%s« nicht erzeugen: %m" -#: commands/tablespace.c:209 +#: commands/tablespace.c:210 utils/adt/genfile.c:581 #, c-format msgid "could not stat directory \"%s\": %m" msgstr "konnte »stat« für Verzeichnis »%s« nicht ausführen: %m" -#: commands/tablespace.c:218 +#: commands/tablespace.c:219 #, c-format msgid "\"%s\" exists but is not a directory" msgstr "»%s« existiert, ist aber kein Verzeichnis" -#: commands/tablespace.c:249 +#: commands/tablespace.c:250 #, c-format msgid "permission denied to create tablespace \"%s\"" msgstr "keine Berechtigung, um Tablespace »%s« zu erzeugen" -#: commands/tablespace.c:251 +#: commands/tablespace.c:252 #, c-format msgid "Must be superuser to create a tablespace." msgstr "Nur Superuser können Tablespaces anlegen." -#: commands/tablespace.c:267 +#: commands/tablespace.c:268 #, c-format msgid "tablespace location cannot contain single quotes" msgstr "Tablespace-Pfad darf keine Apostrophe enthalten" -#: commands/tablespace.c:277 +#: commands/tablespace.c:278 #, c-format msgid "tablespace location must be an absolute path" msgstr "Tablespace-Pfad muss ein absoluter Pfad sein" -#: commands/tablespace.c:288 +#: commands/tablespace.c:290 #, c-format msgid "tablespace location \"%s\" is too long" msgstr "Tablespace-Pfad »%s« ist zu lang" -#: commands/tablespace.c:295 +#: commands/tablespace.c:297 #, c-format msgid "tablespace location should not be inside the data directory" msgstr "Tablespace-Pfad sollte nicht innerhalb des Datenverzeichnisses sein" -#: commands/tablespace.c:304 commands/tablespace.c:950 +#: commands/tablespace.c:306 commands/tablespace.c:952 #, c-format msgid "unacceptable tablespace name \"%s\"" msgstr "inakzeptabler Tablespace-Name »%s«" -#: commands/tablespace.c:306 commands/tablespace.c:951 +#: commands/tablespace.c:308 commands/tablespace.c:953 #, c-format msgid "The prefix \"pg_\" is reserved for system tablespaces." msgstr "Der Präfix »pg_« ist für System-Tablespaces reserviert." -#: commands/tablespace.c:316 commands/tablespace.c:963 +#: commands/tablespace.c:318 commands/tablespace.c:965 #, c-format msgid "tablespace \"%s\" already exists" msgstr "Tablespace »%s« existiert bereits" -#: commands/tablespace.c:428 commands/tablespace.c:933 -#: commands/tablespace.c:1013 commands/tablespace.c:1081 -#: commands/tablespace.c:1214 commands/tablespace.c:1414 +#: commands/tablespace.c:430 commands/tablespace.c:935 +#: commands/tablespace.c:1015 commands/tablespace.c:1083 +#: commands/tablespace.c:1216 commands/tablespace.c:1416 #, c-format msgid "tablespace \"%s\" does not exist" msgstr "Tablespace »%s« existiert nicht" -#: commands/tablespace.c:434 +#: commands/tablespace.c:436 #, c-format msgid "tablespace \"%s\" does not exist, skipping" msgstr "Tablespace »%s« existiert nicht, wird übersprungen" -#: commands/tablespace.c:510 +#: commands/tablespace.c:512 #, c-format msgid "tablespace \"%s\" is not empty" msgstr "Tablespace »%s« ist nicht leer" -#: commands/tablespace.c:582 +#: commands/tablespace.c:584 #, c-format msgid "directory \"%s\" does not exist" msgstr "Verzeichnis »%s« existiert nicht" -#: commands/tablespace.c:583 +#: commands/tablespace.c:585 #, c-format msgid "Create this directory for the tablespace before restarting the server." msgstr "Erzeugen Sie dieses Verzeichnis für den Tablespace bevor Sie den Server neu starten." -#: commands/tablespace.c:588 +#: commands/tablespace.c:590 #, c-format msgid "could not set permissions on directory \"%s\": %m" msgstr "konnte Zugriffsrechte für Verzeichnis »%s« nicht setzen: %m" -#: commands/tablespace.c:618 +#: commands/tablespace.c:620 #, c-format msgid "directory \"%s\" already in use as a tablespace" msgstr "Verzeichnis »%s« ist bereits als Tablespace in Verwendung" -#: commands/tablespace.c:742 commands/tablespace.c:755 -#: commands/tablespace.c:791 commands/tablespace.c:883 +#: commands/tablespace.c:705 commands/tablespace.c:715 +#: postmaster/postmaster.c:1476 storage/file/fd.c:2680 +#: storage/file/reinit.c:122 utils/adt/genfile.c:483 utils/adt/genfile.c:554 +#: utils/adt/misc.c:436 utils/misc/tzparser.c:339 +#, c-format +msgid "could not open directory \"%s\": %m" +msgstr "konnte Verzeichnis »%s« nicht öffnen: %m" + +#: commands/tablespace.c:744 commands/tablespace.c:757 +#: commands/tablespace.c:793 commands/tablespace.c:885 storage/file/fd.c:3110 #, c-format msgid "could not remove directory \"%s\": %m" msgstr "konnte Verzeichnis »%s« nicht löschen: %m" -#: commands/tablespace.c:804 commands/tablespace.c:892 +#: commands/tablespace.c:806 commands/tablespace.c:894 #, c-format msgid "could not remove symbolic link \"%s\": %m" msgstr "konnte symbolische Verknüpfung »%s« nicht löschen: %m" -#: commands/tablespace.c:814 commands/tablespace.c:901 +#: commands/tablespace.c:816 commands/tablespace.c:903 #, c-format msgid "\"%s\" is not a directory or symbolic link" msgstr "»%s« ist kein Verzeichnis oder symbolische Verknüpfung" -#: commands/tablespace.c:1086 +#: commands/tablespace.c:1088 #, c-format msgid "Tablespace \"%s\" does not exist." msgstr "Tablespace »%s« existiert nicht." -#: commands/tablespace.c:1513 +#: commands/tablespace.c:1515 #, c-format msgid "directories for tablespace %u could not be removed" msgstr "Verzeichnisse für Tablespace %u konnten nicht entfernt werden" -#: commands/tablespace.c:1515 +#: commands/tablespace.c:1517 #, c-format msgid "You can remove the directories manually if necessary." msgstr "Sie können die Verzeichnisse falls nötig manuell entfernen." -#: commands/trigger.c:187 +#: commands/trigger.c:207 commands/trigger.c:218 #, c-format msgid "\"%s\" is a table" msgstr "»%s« ist eine Tabelle" -#: commands/trigger.c:189 +#: commands/trigger.c:209 commands/trigger.c:220 #, c-format msgid "Tables cannot have INSTEAD OF triggers." msgstr "Tabellen können keine INSTEAD OF-Trigger haben." -#: commands/trigger.c:194 +#: commands/trigger.c:237 #, fuzzy, c-format -#| msgid "\"%s\" is a partial index" -msgid "\"%s\" is a partitioned table" -msgstr "»%s« ist ein partieller Index" +#| msgid "Partitioned tables cannot have ROW triggers." +msgid "Partitioned tables cannot have BEFORE / FOR EACH ROW triggers." +msgstr "Partitionierte Tabellen können keine ROW-Trigger haben." -#: commands/trigger.c:196 +#: commands/trigger.c:255 #, fuzzy, c-format -#| msgid "Foreign tables cannot have TRUNCATE triggers." -msgid "Partitioned tables cannot have ROW triggers." -msgstr "Fremdtabellen können keine TRUNCATE-Trigger haben." +#| msgid "Triggers on foreign tables cannot have transition tables." +msgid "Triggers on partitioned tables cannot have transition tables." +msgstr "Trigger für Fremdtabellen können keine Übergangstabellen haben." -#: commands/trigger.c:207 commands/trigger.c:214 +#: commands/trigger.c:267 commands/trigger.c:274 commands/trigger.c:444 #, c-format msgid "\"%s\" is a view" msgstr "»%s« ist eine Sicht" -#: commands/trigger.c:209 +#: commands/trigger.c:269 #, c-format msgid "Views cannot have row-level BEFORE or AFTER triggers." msgstr "Sichten können keine BEFORE- oder AFTER-Trigger auf Zeilenebene haben." -#: commands/trigger.c:216 +#: commands/trigger.c:276 #, c-format msgid "Views cannot have TRUNCATE triggers." msgstr "Sichten können keine TRUNCATE-Trigger haben." -#: commands/trigger.c:224 commands/trigger.c:231 commands/trigger.c:238 +#: commands/trigger.c:284 commands/trigger.c:291 commands/trigger.c:303 +#: commands/trigger.c:437 #, c-format msgid "\"%s\" is a foreign table" msgstr "»%s« ist eine Fremdtabelle" -#: commands/trigger.c:226 +#: commands/trigger.c:286 #, c-format msgid "Foreign tables cannot have INSTEAD OF triggers." msgstr "Fremdtabellen können keine INSTEAD OF-Trigger haben." -#: commands/trigger.c:233 +#: commands/trigger.c:293 #, c-format msgid "Foreign tables cannot have TRUNCATE triggers." msgstr "Fremdtabellen können keine TRUNCATE-Trigger haben." -#: commands/trigger.c:240 +#: commands/trigger.c:305 #, c-format msgid "Foreign tables cannot have constraint triggers." msgstr "Fremdtabellen können keine Constraint-Trigger haben." -#: commands/trigger.c:303 +#: commands/trigger.c:380 #, c-format msgid "TRUNCATE FOR EACH ROW triggers are not supported" msgstr "TRUNCATE FOR EACH ROW-Trigger werden nicht unterstützt" -#: commands/trigger.c:311 +#: commands/trigger.c:388 #, c-format msgid "INSTEAD OF triggers must be FOR EACH ROW" msgstr "INSTEAD OF-Trigger müssen FOR EACH ROW sein" -#: commands/trigger.c:315 +#: commands/trigger.c:392 #, c-format msgid "INSTEAD OF triggers cannot have WHEN conditions" msgstr "INSTEAD OF-Trigger können keine WHEN-Bedingungen haben" -#: commands/trigger.c:319 +#: commands/trigger.c:396 #, c-format msgid "INSTEAD OF triggers cannot have column lists" msgstr "INSTEAD OF-Trigger können keine Spaltenlisten haben" -#: commands/trigger.c:348 -#, fuzzy, c-format -#| msgid "using variable \"%s\" in different declare statements is not supported" +#: commands/trigger.c:425 +#, c-format msgid "ROW variable naming in the REFERENCING clause is not supported" -msgstr "Verwendung der Variable »%s« in verschiedenen DECLARE-Anweisungen wird nicht unterstützt" +msgstr "Benennung von ROW-Variablen in der REFERENCING-Klausel wird nicht unterstützt" -#: commands/trigger.c:349 +#: commands/trigger.c:426 #, c-format msgid "Use OLD TABLE or NEW TABLE for naming transition tables." -msgstr "" +msgstr "Verwenden Sie OLD TABLE und NEW TABLE, um Übergangstabellen zu benennen." -#: commands/trigger.c:360 -#, fuzzy, c-format -#| msgid "%s: transaction log directory location can only be specified in plain mode\n" +#: commands/trigger.c:439 +#, c-format +msgid "Triggers on foreign tables cannot have transition tables." +msgstr "Trigger für Fremdtabellen können keine Übergangstabellen haben." + +#: commands/trigger.c:446 +#, c-format +msgid "Triggers on views cannot have transition tables." +msgstr "Trigger für Sichten können keine Übergangstabellen haben." + +#: commands/trigger.c:466 +#, c-format +msgid "ROW triggers with transition tables are not supported on inheritance children" +msgstr "ROW-Trigger mit Übergangstabellen werden für Vererbungskinder nicht unterstützt" + +#: commands/trigger.c:472 +#, c-format msgid "transition table name can only be specified for an AFTER trigger" -msgstr "%s: Transaktionslogverzeichnis kann nur im »plain«-Modus angegeben werden\n" +msgstr "Übergangstabellenname kann nur für einen AFTER-Trigger angegeben werden" + +#: commands/trigger.c:477 +#, c-format +msgid "TRUNCATE triggers with transition tables are not supported" +msgstr "TRUNCATE-Trigger mit Übergangstabellen werden nicht unterstützt" + +#: commands/trigger.c:494 +#, c-format +msgid "transition tables cannot be specified for triggers with more than one event" +msgstr "Übergangstabellen können nicht für Trigger mit mehr als einem Ereignis angegeben werden" + +#: commands/trigger.c:505 +#, c-format +msgid "transition tables cannot be specified for triggers with column lists" +msgstr "Übergangstabellen können nicht für Trigger mit Spaltenlisten angegeben werden" -#: commands/trigger.c:368 +#: commands/trigger.c:522 #, c-format msgid "NEW TABLE can only be specified for an INSERT or UPDATE trigger" -msgstr "" +msgstr "NEW TABLE kann nur für INSERT- oder UPDATE-Trigger angegeben werden" -#: commands/trigger.c:373 +#: commands/trigger.c:527 #, c-format msgid "NEW TABLE cannot be specified multiple times" -msgstr "" +msgstr "NEW TABLE kann nicht mehrmals angegeben werden" -#: commands/trigger.c:383 +#: commands/trigger.c:537 #, c-format msgid "OLD TABLE can only be specified for a DELETE or UPDATE trigger" -msgstr "" +msgstr "OLD TABLE kann nur für DELETE- oder UPDATE-Trigger angegeben werden" -#: commands/trigger.c:388 +#: commands/trigger.c:542 #, c-format msgid "OLD TABLE cannot be specified multiple times" -msgstr "" +msgstr "OLD TABLE kann nicht mehrmals angegeben werden" -#: commands/trigger.c:398 +#: commands/trigger.c:552 #, c-format msgid "OLD TABLE name and NEW TABLE name cannot be the same" -msgstr "" +msgstr "Name für OLD TABLE und NEW TABLE kann nicht gleich sein" -#: commands/trigger.c:455 commands/trigger.c:468 +#: commands/trigger.c:614 commands/trigger.c:627 #, c-format msgid "statement trigger's WHEN condition cannot reference column values" msgstr "WHEN-Bedingung eines Statement-Triggers kann keine Verweise auf Spaltenwerte enthalten" -#: commands/trigger.c:460 +#: commands/trigger.c:619 #, c-format msgid "INSERT trigger's WHEN condition cannot reference OLD values" msgstr "WHEN-Bedingung eines INSERT-Triggers kann keine Verweise auf OLD-Werte enthalten" -#: commands/trigger.c:473 +#: commands/trigger.c:632 #, c-format msgid "DELETE trigger's WHEN condition cannot reference NEW values" msgstr "WHEN-Bedingung eines DELETE-Triggers kann keine Verweise auf NEW-Werte enthalten" -#: commands/trigger.c:478 +#: commands/trigger.c:637 #, c-format msgid "BEFORE trigger's WHEN condition cannot reference NEW system columns" msgstr "WHEN-Bedingung eines BEFORE-Triggers kann keine Verweise auf Systemspalten in NEW enthalten" -#: commands/trigger.c:643 commands/trigger.c:1414 +#: commands/trigger.c:810 commands/trigger.c:1705 #, c-format msgid "trigger \"%s\" for relation \"%s\" already exists" msgstr "Trigger »%s« für Relation »%s« existiert bereits" -#: commands/trigger.c:939 +#: commands/trigger.c:1230 msgid "Found referenced table's UPDATE trigger." msgstr "UPDATE-Trigger der Zieltabelle wurde gefunden." -#: commands/trigger.c:940 +#: commands/trigger.c:1231 msgid "Found referenced table's DELETE trigger." msgstr "DELETE-Trigger der Zieltabelle wurde gefunden." -#: commands/trigger.c:941 +#: commands/trigger.c:1232 msgid "Found referencing table's trigger." msgstr "Trigger der Quelltabelle wurde gefunden." -#: commands/trigger.c:1050 commands/trigger.c:1066 +#: commands/trigger.c:1341 commands/trigger.c:1357 #, c-format msgid "ignoring incomplete trigger group for constraint \"%s\" %s" msgstr "unvollständige Triggergruppe für Constraint \"%s\" %s ignoriert" -#: commands/trigger.c:1079 +#: commands/trigger.c:1370 #, c-format msgid "converting trigger group into constraint \"%s\" %s" msgstr "Triggergruppe wird in Constraint \"%s\" %s umgewandelt" -#: commands/trigger.c:1300 commands/trigger.c:1459 commands/trigger.c:1574 +#: commands/trigger.c:1591 commands/trigger.c:1750 commands/trigger.c:1886 #, c-format msgid "trigger \"%s\" for table \"%s\" does not exist" msgstr "Trigger »%s« für Tabelle »%s« existiert nicht" -#: commands/trigger.c:1542 +#: commands/trigger.c:1833 #, c-format msgid "permission denied: \"%s\" is a system trigger" msgstr "keine Berechtigung: »%s« ist ein Systemtrigger" -#: commands/trigger.c:2097 +#: commands/trigger.c:2433 #, c-format msgid "trigger function %u returned null value" msgstr "Triggerfunktion %u gab NULL-Wert zurück" -#: commands/trigger.c:2158 commands/trigger.c:2364 commands/trigger.c:2575 -#: commands/trigger.c:2854 +#: commands/trigger.c:2499 commands/trigger.c:2714 commands/trigger.c:2933 +#: commands/trigger.c:3223 #, c-format msgid "BEFORE STATEMENT trigger cannot return a value" msgstr "Trigger für BEFORE STATEMENT kann keinen Wert zurückgeben" -#: commands/trigger.c:2916 executor/nodeModifyTable.c:746 -#: executor/nodeModifyTable.c:1041 +#: commands/trigger.c:3285 executor/nodeModifyTable.c:757 +#: executor/nodeModifyTable.c:1220 #, c-format msgid "tuple to be updated was already modified by an operation triggered by the current command" msgstr "das zu aktualisierende Tupel wurde schon durch eine vom aktuellen Befehl ausgelöste Operation verändert" -#: commands/trigger.c:2917 executor/nodeModifyTable.c:747 -#: executor/nodeModifyTable.c:1042 +#: commands/trigger.c:3286 executor/nodeModifyTable.c:758 +#: executor/nodeModifyTable.c:1221 #, c-format msgid "Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows." msgstr "Verwenden Sie einen AFTER-Trigger anstelle eines BEFORE-Triggers, um Änderungen an andere Zeilen zu propagieren." -#: commands/trigger.c:2931 executor/execMain.c:2526 -#: executor/nodeLockRows.c:216 executor/nodeModifyTable.c:213 -#: executor/nodeModifyTable.c:759 executor/nodeModifyTable.c:1054 -#: executor/nodeModifyTable.c:1220 +#: commands/trigger.c:3300 executor/execMain.c:2719 executor/nodeLockRows.c:220 +#: executor/nodeModifyTable.c:225 executor/nodeModifyTable.c:770 +#: executor/nodeModifyTable.c:1233 executor/nodeModifyTable.c:1409 #, c-format msgid "could not serialize access due to concurrent update" msgstr "kann Zugriff nicht serialisieren wegen gleichzeitiger Aktualisierung" -#: commands/trigger.c:4834 +#: commands/trigger.c:3304 executor/execMain.c:2723 executor/execMain.c:2798 +#: executor/nodeLockRows.c:224 +#, fuzzy, c-format +#| msgid "tuple to be updated was already modified by an operation triggered by the current command" +msgid "tuple to be locked was already moved to another partition due to concurrent update" +msgstr "das zu aktualisierende Tupel wurde schon durch eine vom aktuellen Befehl ausgelöste Operation verändert" + +#: commands/trigger.c:5429 #, c-format msgid "constraint \"%s\" is not deferrable" msgstr "Constraint »%s« ist nicht aufschiebbar" -#: commands/trigger.c:4857 +#: commands/trigger.c:5452 #, c-format msgid "constraint \"%s\" does not exist" msgstr "Constraint »%s« existiert nicht" @@ -9544,632 +10161,675 @@ msgstr "Optionen PARSER und COPY können nicht beide angegeben werden" msgid "text search parser is required" msgstr "Textsucheparser muss angegeben werden" -#: commands/tsearchcmds.c:1266 +#: commands/tsearchcmds.c:1265 #, c-format msgid "token type \"%s\" does not exist" msgstr "Tokentyp »%s« existiert nicht" -#: commands/tsearchcmds.c:1487 +#: commands/tsearchcmds.c:1486 #, c-format msgid "mapping for token type \"%s\" does not exist" msgstr "Mapping für Tokentyp »%s« existiert nicht" -#: commands/tsearchcmds.c:1493 +#: commands/tsearchcmds.c:1492 #, c-format msgid "mapping for token type \"%s\" does not exist, skipping" msgstr "Mapping für Tokentyp »%s« existiert nicht, wird übersprungen" -#: commands/tsearchcmds.c:1648 commands/tsearchcmds.c:1759 +#: commands/tsearchcmds.c:1647 commands/tsearchcmds.c:1758 #, c-format msgid "invalid parameter list format: \"%s\"" msgstr "ungültiges Parameterlistenformat: »%s«" -#: commands/typecmds.c:183 +#: commands/typecmds.c:180 #, c-format msgid "must be superuser to create a base type" msgstr "nur Superuser können Basistypen anlegen" -#: commands/typecmds.c:290 commands/typecmds.c:1414 +#: commands/typecmds.c:287 commands/typecmds.c:1483 #, c-format msgid "type attribute \"%s\" not recognized" msgstr "Typ-Attribut »%s« nicht erkannt" -#: commands/typecmds.c:346 +#: commands/typecmds.c:343 #, c-format msgid "invalid type category \"%s\": must be simple ASCII" msgstr "ungültige Typenkategorie »%s«: muss einfacher ASCII-Wert sein" -#: commands/typecmds.c:365 +#: commands/typecmds.c:362 #, c-format msgid "array element type cannot be %s" msgstr "Arrayelementtyp kann nicht %s sein" -#: commands/typecmds.c:397 +#: commands/typecmds.c:394 #, c-format msgid "alignment \"%s\" not recognized" msgstr "Ausrichtung »%s« nicht erkannt" -#: commands/typecmds.c:414 +#: commands/typecmds.c:411 #, c-format msgid "storage \"%s\" not recognized" msgstr "Storage-Typ »%s« nicht erkannt" -#: commands/typecmds.c:425 +#: commands/typecmds.c:422 #, c-format msgid "type input function must be specified" msgstr "Typeingabefunktion muss angegeben werden" -#: commands/typecmds.c:429 +#: commands/typecmds.c:426 #, c-format msgid "type output function must be specified" msgstr "Typausgabefunktion muss angegeben werden" -#: commands/typecmds.c:434 +#: commands/typecmds.c:431 #, c-format msgid "type modifier output function is useless without a type modifier input function" msgstr "Typmodifikatorausgabefunktion ist nutzlos ohne Typmodifikatoreingabefunktion" -#: commands/typecmds.c:464 +#: commands/typecmds.c:461 #, c-format msgid "type input function %s must return type %s" msgstr "Typeingabefunktion %s muss Typ %s zurückgeben" -#: commands/typecmds.c:481 +#: commands/typecmds.c:478 #, c-format msgid "type output function %s must return type %s" msgstr "Typausgabefunktion %s muss Typ %s zurückgeben" -#: commands/typecmds.c:490 +#: commands/typecmds.c:487 #, c-format msgid "type receive function %s must return type %s" msgstr "Typempfangsfunktion %s muss Typ %s zurückgeben" -#: commands/typecmds.c:499 +#: commands/typecmds.c:496 #, c-format msgid "type send function %s must return type %s" msgstr "Typsendefunktion %s muss Typ %s zurückgeben" -#: commands/typecmds.c:564 +#: commands/typecmds.c:561 #, c-format msgid "type input function %s should not be volatile" msgstr "Typeingabefunktion %s sollte nicht VOLATILE sein" -#: commands/typecmds.c:569 +#: commands/typecmds.c:566 #, c-format msgid "type output function %s should not be volatile" msgstr "Typausgabefunktion %s sollte nicht VOLATILE sein" -#: commands/typecmds.c:574 +#: commands/typecmds.c:571 #, c-format msgid "type receive function %s should not be volatile" msgstr "Typempfangsfunktion %s sollte nicht VOLATILE sein" -#: commands/typecmds.c:579 +#: commands/typecmds.c:576 #, c-format msgid "type send function %s should not be volatile" msgstr "Typsendefunktion %s sollte nicht VOLATILE sein" -#: commands/typecmds.c:584 +#: commands/typecmds.c:581 #, c-format msgid "type modifier input function %s should not be volatile" msgstr "Typmodifikatoreingabefunktion %s sollte nicht VOLATILE sein" -#: commands/typecmds.c:589 +#: commands/typecmds.c:586 #, c-format msgid "type modifier output function %s should not be volatile" msgstr "Typmodifikatorausgabefunktion %s sollte nicht VOLATILE sein" -#: commands/typecmds.c:811 +#: commands/typecmds.c:813 #, c-format msgid "\"%s\" is not a valid base type for a domain" msgstr "»%s« ist kein gültiger Basistyp für eine Domäne" -#: commands/typecmds.c:897 +#: commands/typecmds.c:899 #, c-format msgid "multiple default expressions" msgstr "mehrere Vorgabeausdrücke" -#: commands/typecmds.c:959 commands/typecmds.c:968 +#: commands/typecmds.c:961 commands/typecmds.c:970 #, c-format msgid "conflicting NULL/NOT NULL constraints" msgstr "wiedersprüchliche NULL/NOT NULL-Constraints" -#: commands/typecmds.c:984 +#: commands/typecmds.c:986 #, c-format msgid "check constraints for domains cannot be marked NO INHERIT" msgstr "Check-Constraints für Domänen können nicht als NO INHERIT markiert werden" -#: commands/typecmds.c:993 commands/typecmds.c:2512 +#: commands/typecmds.c:995 commands/typecmds.c:2581 #, c-format msgid "unique constraints not possible for domains" msgstr "Unique-Constraints sind nicht für Domänen möglich" -#: commands/typecmds.c:999 commands/typecmds.c:2518 +#: commands/typecmds.c:1001 commands/typecmds.c:2587 #, c-format msgid "primary key constraints not possible for domains" msgstr "Primärschlüssel-Constraints sind nicht fürDomänen möglich" -#: commands/typecmds.c:1005 commands/typecmds.c:2524 +#: commands/typecmds.c:1007 commands/typecmds.c:2593 #, c-format msgid "exclusion constraints not possible for domains" msgstr "Exclusion-Constraints sind nicht für Domänen möglich" -#: commands/typecmds.c:1011 commands/typecmds.c:2530 +#: commands/typecmds.c:1013 commands/typecmds.c:2599 #, c-format msgid "foreign key constraints not possible for domains" msgstr "Fremdschlüssel-Constraints sind nicht für Domänen möglich" -#: commands/typecmds.c:1020 commands/typecmds.c:2539 +#: commands/typecmds.c:1022 commands/typecmds.c:2608 #, c-format msgid "specifying constraint deferrability not supported for domains" msgstr "Setzen des Constraint-Modus wird für Domänen nicht unterstützt" -#: commands/typecmds.c:1284 utils/cache/typcache.c:1636 +#: commands/typecmds.c:1353 utils/cache/typcache.c:2319 #, c-format msgid "%s is not an enum" msgstr "»%s« ist kein Enum" -#: commands/typecmds.c:1422 +#: commands/typecmds.c:1491 #, c-format msgid "type attribute \"subtype\" is required" msgstr "Typ-Attribut »subtype« muss angegeben werden" -#: commands/typecmds.c:1427 +#: commands/typecmds.c:1496 #, c-format msgid "range subtype cannot be %s" msgstr "Bereichtsuntertyp kann nicht %s sein" -#: commands/typecmds.c:1446 +#: commands/typecmds.c:1515 #, c-format msgid "range collation specified but subtype does not support collation" msgstr "Sortierfolge für Bereichstyp angegeben, aber Untertyp unterstützt keine Sortierfolgen" -#: commands/typecmds.c:1680 +#: commands/typecmds.c:1748 #, c-format msgid "changing argument type of function %s from \"opaque\" to \"cstring\"" msgstr "ändere Argumenttyp von Funktion %s von »opaque« in »cstring«" -#: commands/typecmds.c:1731 +#: commands/typecmds.c:1799 #, c-format msgid "changing argument type of function %s from \"opaque\" to %s" msgstr "ändere Argumenttyp von Funktion %s von »opaque« in %s" -#: commands/typecmds.c:1830 +#: commands/typecmds.c:1898 #, c-format msgid "typmod_in function %s must return type %s" msgstr "typmod_in-Funktion %s muss Typ %s zurückgeben" -#: commands/typecmds.c:1857 +#: commands/typecmds.c:1925 #, c-format msgid "typmod_out function %s must return type %s" msgstr "typmod_out-Funktion %s muss Typ %s zurückgeben" -#: commands/typecmds.c:1884 +#: commands/typecmds.c:1952 #, c-format msgid "type analyze function %s must return type %s" msgstr "Typanalysefunktion %s muss Typ %s zurückgeben" -#: commands/typecmds.c:1930 +#: commands/typecmds.c:1998 #, c-format msgid "You must specify an operator class for the range type or define a default operator class for the subtype." msgstr "Sie müssen für den Bereichstyp eine Operatorklasse angeben oder eine Standardoperatorklasse für den Untertyp definieren." -#: commands/typecmds.c:1961 +#: commands/typecmds.c:2029 #, c-format msgid "range canonical function %s must return range type" msgstr "Bereichstyp-Canonical-Funktion %s muss Bereichstyp zurückgeben" -#: commands/typecmds.c:1967 +#: commands/typecmds.c:2035 #, c-format msgid "range canonical function %s must be immutable" msgstr "Bereichstyp-Canonical-Funktion %s muss »immutable« sein" -#: commands/typecmds.c:2003 +#: commands/typecmds.c:2071 #, c-format msgid "range subtype diff function %s must return type %s" msgstr "Bereichstyp-Untertyp-Diff-Funktion %s muss Typ %s zurückgeben" -#: commands/typecmds.c:2010 +#: commands/typecmds.c:2078 #, c-format msgid "range subtype diff function %s must be immutable" msgstr "Bereichstyp-Untertyp-Diff-Funktion %s muss »immutable« sein" -#: commands/typecmds.c:2037 +#: commands/typecmds.c:2105 #, c-format msgid "pg_type array OID value not set when in binary upgrade mode" msgstr "Array-OID-Wert für pg_type ist im Binary-Upgrade-Modus nicht gesetzt" -#: commands/typecmds.c:2340 +#: commands/typecmds.c:2409 #, c-format msgid "column \"%s\" of table \"%s\" contains null values" msgstr "Spalte »%s« von Tabelle »%s« enthält NULL-Werte" -#: commands/typecmds.c:2453 commands/typecmds.c:2636 +#: commands/typecmds.c:2522 commands/typecmds.c:2705 #, c-format msgid "constraint \"%s\" of domain \"%s\" does not exist" msgstr "Constraint »%s« von Domäne »%s« existiert nicht" -#: commands/typecmds.c:2457 +#: commands/typecmds.c:2526 #, c-format msgid "constraint \"%s\" of domain \"%s\" does not exist, skipping" msgstr "Constraint »%s« von Domäne »%s« existiert nicht, wird übersprungen" -#: commands/typecmds.c:2642 +#: commands/typecmds.c:2711 #, c-format msgid "constraint \"%s\" of domain \"%s\" is not a check constraint" msgstr "Constraint »%s« von Domäne »%s« ist kein Check-Constraint" -#: commands/typecmds.c:2747 +#: commands/typecmds.c:2817 #, c-format msgid "column \"%s\" of table \"%s\" contains values that violate the new constraint" msgstr "Spalte »%s« von Tabelle »%s« enthält Werte, die den neuen Constraint verletzen" -#: commands/typecmds.c:2960 commands/typecmds.c:3247 commands/typecmds.c:3434 +#: commands/typecmds.c:3045 commands/typecmds.c:3252 commands/typecmds.c:3334 +#: commands/typecmds.c:3521 #, c-format msgid "%s is not a domain" msgstr "%s ist keine Domäne" -#: commands/typecmds.c:2994 +#: commands/typecmds.c:3079 #, c-format msgid "constraint \"%s\" for domain \"%s\" already exists" msgstr "Constraint »%s« für Domäne »%s« existiert bereits" -#: commands/typecmds.c:3045 +#: commands/typecmds.c:3130 #, c-format msgid "cannot use table references in domain check constraint" msgstr "Tabellenverweise können in Domänen-Check-Constraints nicht verwendet werden" -#: commands/typecmds.c:3177 commands/typecmds.c:3259 commands/typecmds.c:3551 +#: commands/typecmds.c:3264 commands/typecmds.c:3346 commands/typecmds.c:3638 #, c-format msgid "%s is a table's row type" msgstr "%s ist der Zeilentyp einer Tabelle" -#: commands/typecmds.c:3179 commands/typecmds.c:3261 commands/typecmds.c:3553 +#: commands/typecmds.c:3266 commands/typecmds.c:3348 commands/typecmds.c:3640 #, c-format msgid "Use ALTER TABLE instead." msgstr "Verwenden Sie stattdessen ALTER TABLE." -#: commands/typecmds.c:3186 commands/typecmds.c:3268 commands/typecmds.c:3466 +#: commands/typecmds.c:3273 commands/typecmds.c:3355 commands/typecmds.c:3553 #, c-format msgid "cannot alter array type %s" msgstr "Array-Typ %s kann nicht verändert werden" -#: commands/typecmds.c:3188 commands/typecmds.c:3270 commands/typecmds.c:3468 +#: commands/typecmds.c:3275 commands/typecmds.c:3357 commands/typecmds.c:3555 #, c-format msgid "You can alter type %s, which will alter the array type as well." msgstr "Sie können den Typ %s ändern, wodurch der Array-Typ ebenfalls geändert wird." -#: commands/typecmds.c:3536 +#: commands/typecmds.c:3623 #, c-format msgid "type \"%s\" already exists in schema \"%s\"" msgstr "Typ %s existiert bereits in Schema »%s«" -#: commands/user.c:154 +#: commands/user.c:141 #, c-format msgid "SYSID can no longer be specified" msgstr "SYSID kann nicht mehr angegeben werden" -#: commands/user.c:308 +#: commands/user.c:295 #, c-format msgid "must be superuser to create superusers" msgstr "nur Superuser können Superuser anlegen" -#: commands/user.c:315 +#: commands/user.c:302 #, c-format msgid "must be superuser to create replication users" msgstr "nur Superuser können Replikationsbenutzer anlegen" -#: commands/user.c:322 commands/user.c:708 +#: commands/user.c:309 commands/user.c:707 #, c-format msgid "must be superuser to change bypassrls attribute" msgstr "nur Superuser können das Attribut »bypassrls« ändern" -#: commands/user.c:329 +#: commands/user.c:316 #, c-format msgid "permission denied to create role" msgstr "keine Berechtigung, um Rolle zu erzeugen" -#: commands/user.c:339 commands/user.c:1183 commands/user.c:1190 gram.y:14233 -#: gram.y:14268 utils/adt/acl.c:5246 utils/adt/acl.c:5252 +#: commands/user.c:326 commands/user.c:1195 commands/user.c:1202 gram.y:14855 +#: gram.y:14893 utils/adt/acl.c:5267 utils/adt/acl.c:5273 #, c-format msgid "role name \"%s\" is reserved" msgstr "Rollenname »%s« ist reserviert" -#: commands/user.c:341 commands/user.c:1185 commands/user.c:1192 +#: commands/user.c:328 commands/user.c:1197 commands/user.c:1204 #, c-format msgid "Role names starting with \"pg_\" are reserved." msgstr "Rollennamen, die mit »pg_« anfangen, sind reserviert." -#: commands/user.c:353 commands/user.c:1198 +#: commands/user.c:340 commands/user.c:1210 #, c-format msgid "role \"%s\" already exists" msgstr "Rolle »%s« existiert bereits" -#: commands/user.c:426 +#: commands/user.c:406 commands/user.c:816 +#, c-format +msgid "empty string is not a valid password, clearing password" +msgstr "leere Zeichenkette ist kein gültiges Passwort, Passwort wird entfernt" + +#: commands/user.c:437 #, c-format msgid "pg_authid OID value not set when in binary upgrade mode" msgstr "OID-Wert für pg_auth ist im Binary-Upgrade-Modus nicht gesetzt" -#: commands/user.c:694 commands/user.c:903 commands/user.c:1437 -#: commands/user.c:1581 +#: commands/user.c:693 commands/user.c:915 commands/user.c:1449 +#: commands/user.c:1593 #, c-format msgid "must be superuser to alter superusers" msgstr "nur Superuser können Superuser ändern" -#: commands/user.c:701 +#: commands/user.c:700 #, c-format msgid "must be superuser to alter replication users" msgstr "nur Superuser können Replikationsbenutzer ändern" -#: commands/user.c:724 commands/user.c:911 +#: commands/user.c:723 commands/user.c:923 #, c-format msgid "permission denied" msgstr "keine Berechtigung" -#: commands/user.c:941 +#: commands/user.c:953 #, c-format msgid "must be superuser to alter settings globally" msgstr "nur Superuser können globale Einstellungen ändern" -#: commands/user.c:963 +#: commands/user.c:975 #, c-format msgid "permission denied to drop role" msgstr "keine Berechtigung, um Rolle zu entfernen" -#: commands/user.c:987 +#: commands/user.c:999 #, c-format msgid "cannot use special role specifier in DROP ROLE" msgstr "in DROP ROLE kann kein Rollenplatzhalter verwendet werden" -#: commands/user.c:997 commands/user.c:1154 commands/variable.c:822 -#: commands/variable.c:894 utils/adt/acl.c:5104 utils/adt/acl.c:5151 -#: utils/adt/acl.c:5179 utils/adt/acl.c:5197 utils/init/miscinit.c:502 +#: commands/user.c:1009 commands/user.c:1166 commands/variable.c:822 +#: commands/variable.c:894 utils/adt/acl.c:5124 utils/adt/acl.c:5171 +#: utils/adt/acl.c:5199 utils/adt/acl.c:5217 utils/init/miscinit.c:599 #, c-format msgid "role \"%s\" does not exist" msgstr "Rolle »%s« existiert nicht" -#: commands/user.c:1002 +#: commands/user.c:1014 #, c-format msgid "role \"%s\" does not exist, skipping" msgstr "Rolle »%s« existiert nicht, wird übersprungen" -#: commands/user.c:1014 commands/user.c:1018 +#: commands/user.c:1026 commands/user.c:1030 #, c-format msgid "current user cannot be dropped" msgstr "aktueller Benutzer kann nicht entfernt werden" -#: commands/user.c:1022 +#: commands/user.c:1034 #, c-format msgid "session user cannot be dropped" msgstr "aktueller Sitzungsbenutzer kann nicht entfernt werden" -#: commands/user.c:1033 +#: commands/user.c:1045 #, c-format msgid "must be superuser to drop superusers" msgstr "nur Superuser können Superuser löschen" -#: commands/user.c:1049 +#: commands/user.c:1061 #, c-format msgid "role \"%s\" cannot be dropped because some objects depend on it" msgstr "kann Rolle »%s« nicht löschen, weil andere Objekte davon abhängen" -#: commands/user.c:1170 +#: commands/user.c:1182 #, c-format msgid "session user cannot be renamed" msgstr "aktueller Sitzungsbenutzer kann nicht umbenannt werden" -#: commands/user.c:1174 +#: commands/user.c:1186 #, c-format msgid "current user cannot be renamed" msgstr "aktueller Benutzer kann nicht umbenannt werden" -#: commands/user.c:1208 +#: commands/user.c:1220 #, c-format msgid "must be superuser to rename superusers" msgstr "nur Superuser können Superuser umbenennen" -#: commands/user.c:1215 +#: commands/user.c:1227 #, c-format msgid "permission denied to rename role" msgstr "keine Berechtigung, um Rolle umzubenennen" -#: commands/user.c:1236 +#: commands/user.c:1248 #, c-format msgid "MD5 password cleared because of role rename" msgstr "MD5-Passwort wegen Rollenumbenennung gelöscht" -#: commands/user.c:1296 +#: commands/user.c:1308 #, c-format msgid "column names cannot be included in GRANT/REVOKE ROLE" msgstr "bei GRANT/REVOKE ROLE können keine Spaltennamen angegeben werden" -#: commands/user.c:1334 +#: commands/user.c:1346 #, c-format msgid "permission denied to drop objects" msgstr "keine Berechtigung, um Objekte zu löschen" -#: commands/user.c:1361 commands/user.c:1370 +#: commands/user.c:1373 commands/user.c:1382 #, c-format msgid "permission denied to reassign objects" msgstr "keine Berechtigung, um Objekte neu zuzuordnen" -#: commands/user.c:1445 commands/user.c:1589 +#: commands/user.c:1457 commands/user.c:1601 #, c-format msgid "must have admin option on role \"%s\"" msgstr "Admin-Option für Rolle »%s« wird benötigt" -#: commands/user.c:1462 +#: commands/user.c:1474 #, c-format msgid "must be superuser to set grantor" msgstr "nur Superuser können Grantor setzen" -#: commands/user.c:1487 +#: commands/user.c:1499 #, c-format msgid "role \"%s\" is a member of role \"%s\"" msgstr "Rolle »%s« ist ein Mitglied der Rolle »%s«" -#: commands/user.c:1502 +#: commands/user.c:1514 #, c-format msgid "role \"%s\" is already a member of role \"%s\"" msgstr "Rolle »%s« ist schon Mitglied der Rolle »%s«" -#: commands/user.c:1611 +#: commands/user.c:1623 #, c-format msgid "role \"%s\" is not a member of role \"%s\"" msgstr "Rolle »%s« ist kein Mitglied der Rolle »%s«" -#: commands/vacuum.c:186 +#: commands/vacuum.c:111 +#, fuzzy, c-format +#| msgid "aggregate minvfunc must be specified when mstype is specified" +msgid "ANALYZE option must be specified when a column list is provided" +msgstr "»minvfunc« für Aggregatfunktion muss angegeben werden, wenn »mstype« angegeben ist" + +#: commands/vacuum.c:203 #, c-format msgid "%s cannot be executed from VACUUM or ANALYZE" msgstr "%s kann nicht aus VACUUM oder ANALYZE ausgeführt werden" -#: commands/vacuum.c:196 +#: commands/vacuum.c:213 #, c-format msgid "VACUUM option DISABLE_PAGE_SKIPPING cannot be used with FULL" msgstr "VACUUM-Option DISABLE_PAGE_SKIPPING kann nicht zusammen mit FULL verwendet werden" -#: commands/vacuum.c:565 +#: commands/vacuum.c:657 #, c-format msgid "oldest xmin is far in the past" msgstr "älteste xmin ist weit in der Vergangenheit" -#: commands/vacuum.c:566 +#: commands/vacuum.c:658 #, c-format -msgid "Close open transactions soon to avoid wraparound problems." -msgstr "Schließen Sie bald alle offenen Transaktionen, um Überlaufprobleme zu vermeiden." +msgid "" +"Close open transactions soon to avoid wraparound problems.\n" +"You might also need to commit or roll back old prepared transactions, or drop stale replication slots." +msgstr "" +"Schließen Sie bald alle offenen Transaktionen, um Überlaufprobleme zu vermeiden.\n" +"Eventuell müssen Sie auch alte vorbereitete Transaktionen committen oder zurückrollen oder unbenutzte Replikations-Slots löschen." -#: commands/vacuum.c:605 +#: commands/vacuum.c:698 #, c-format msgid "oldest multixact is far in the past" msgstr "älteste Multixact ist weit in der Vergangenheit" -#: commands/vacuum.c:606 +#: commands/vacuum.c:699 #, c-format msgid "Close open transactions with multixacts soon to avoid wraparound problems." msgstr "Schließen Sie bald alle offenen Transaktionen mit Multixacts, um Überlaufprobleme zu vermeiden." -#: commands/vacuum.c:1176 +#: commands/vacuum.c:1245 #, c-format msgid "some databases have not been vacuumed in over 2 billion transactions" msgstr "einige Datenbanken sind seit über 2 Milliarden Transaktionen nicht gevacuumt worden" -#: commands/vacuum.c:1177 +#: commands/vacuum.c:1246 #, c-format msgid "You might have already suffered transaction-wraparound data loss." msgstr "Sie haben möglicherweise bereits Daten wegen Transaktionsnummernüberlauf verloren." -#: commands/vacuum.c:1306 +#: commands/vacuum.c:1418 #, c-format msgid "skipping vacuum of \"%s\" --- lock not available" msgstr "überspringe Vacuum von »%s« --- Sperre nicht verfügbar" -#: commands/vacuum.c:1332 +#: commands/vacuum.c:1423 +#, c-format +msgid "skipping vacuum of \"%s\" --- relation no longer exists" +msgstr "überspringe Vacuum von »%s« --- Relation existiert nicht mehr" + +#: commands/vacuum.c:1447 #, c-format msgid "skipping \"%s\" --- only superuser can vacuum it" msgstr "überspringe »%s« --- nur Superuser kann sie vacuumen" -#: commands/vacuum.c:1336 +#: commands/vacuum.c:1451 #, c-format msgid "skipping \"%s\" --- only superuser or database owner can vacuum it" msgstr "überspringe »%s« --- nur Superuser oder Eigentümer der Datenbank kann sie vacuumen" -#: commands/vacuum.c:1340 +#: commands/vacuum.c:1455 #, c-format msgid "skipping \"%s\" --- only table or database owner can vacuum it" msgstr "überspringe »%s« --- nur Eigentümer der Tabelle oder der Datenbank kann sie vacuumen" -#: commands/vacuum.c:1359 +#: commands/vacuum.c:1472 #, c-format msgid "skipping \"%s\" --- cannot vacuum non-tables or special system tables" msgstr "überspringe »%s« --- kann Nicht-Tabellen oder besondere Systemtabellen nicht vacuumen" -#: commands/vacuumlazy.c:372 +#: commands/vacuumlazy.c:378 +#, fuzzy, c-format +#| msgid "automatic vacuum of table \"%s.%s.%s\": index scans: %d\n" +msgid "automatic aggressive vacuum of table \"%s.%s.%s\": index scans: %d\n" +msgstr "automatisches Vacuum der Tabelle »%s.%s.%s«: Index-Scans: %d\n" + +#: commands/vacuumlazy.c:380 #, c-format msgid "automatic vacuum of table \"%s.%s.%s\": index scans: %d\n" msgstr "automatisches Vacuum der Tabelle »%s.%s.%s«: Index-Scans: %d\n" -#: commands/vacuumlazy.c:377 +#: commands/vacuumlazy.c:386 #, c-format msgid "pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n" msgstr "Seiten: %u entfernt, %u verbleiben, %u übersprungen wegen Pins, %u übersprungen weil eingefroren\n" -#: commands/vacuumlazy.c:383 -#, fuzzy, c-format -#| msgid "tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable\n" +#: commands/vacuumlazy.c:392 +#, c-format msgid "tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, oldest xmin: %u\n" -msgstr "Tupel: %.0f entfernt, %.0f verbleiben, %.0f sind tot aber noch nicht entfernbar\n" +msgstr "Tupel: %.0f entfernt, %.0f verbleiben, %.0f sind tot aber noch nicht entfernbar, ältestes xmin: %u\n" -#: commands/vacuumlazy.c:389 +#: commands/vacuumlazy.c:398 #, c-format msgid "buffer usage: %d hits, %d misses, %d dirtied\n" msgstr "Puffer-Verwendung: %d Treffer, %d Verfehlen, %d geändert\n" -#: commands/vacuumlazy.c:393 +#: commands/vacuumlazy.c:402 #, c-format msgid "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n" msgstr "durchschn. Leserate: %.3f MB/s, durchschn. Schreibrate: %.3f MB/s\n" -#: commands/vacuumlazy.c:395 +#: commands/vacuumlazy.c:404 #, c-format msgid "system usage: %s" msgstr "Systembenutzung: %s" -#: commands/vacuumlazy.c:853 +#: commands/vacuumlazy.c:500 +#, fuzzy, c-format +#| msgid "vacuuming \"%s.%s\"" +msgid "aggressively vacuuming \"%s.%s\"" +msgstr "vacuume »%s.%s«" + +#: commands/vacuumlazy.c:881 #, c-format msgid "relation \"%s\" page %u is uninitialized --- fixing" msgstr "Seite %2$u in Relation »%1$s« ist nicht initialisiert --- wird repariert" -#: commands/vacuumlazy.c:1323 +#: commands/vacuumlazy.c:1417 #, c-format msgid "\"%s\": removed %.0f row versions in %u pages" msgstr "»%s«: %.0f Zeilenversionen in %u Seiten entfernt" -#: commands/vacuumlazy.c:1333 -#, fuzzy, c-format -#| msgid "%.0f dead row versions cannot be removed yet.\n" +#: commands/vacuumlazy.c:1427 +#, c-format msgid "%.0f dead row versions cannot be removed yet, oldest xmin: %u\n" -msgstr "%.0f tote Zeilenversionen können noch nicht entfernt werden.\n" +msgstr "%.0f tote Zeilenversionen können noch nicht entfernt werden, ältestes xmin: %u\n" -#: commands/vacuumlazy.c:1335 +#: commands/vacuumlazy.c:1429 #, c-format msgid "There were %.0f unused item pointers.\n" msgstr "Es gab %.0f unbenutzte Itemzeiger.\n" -#: commands/vacuumlazy.c:1337 +#: commands/vacuumlazy.c:1431 #, c-format -msgid "Skipped %u page due to buffer pins.\n" -msgid_plural "Skipped %u pages due to buffer pins.\n" -msgstr[0] "%u Seite wegen Buffer-Pins übersprungen.\n" -msgstr[1] "%u Seiten wegen Buffer-Pins übersprungen.\n" +msgid "Skipped %u page due to buffer pins, " +msgid_plural "Skipped %u pages due to buffer pins, " +msgstr[0] "%u Seite wegen Buffer-Pins übersprungen, " +msgstr[1] "%u Seiten wegen Buffer-Pins übersprungen, " -#: commands/vacuumlazy.c:1341 +#: commands/vacuumlazy.c:1435 #, c-format -msgid "%u page is entirely empty.\n" +msgid "%u frozen page.\n" +msgid_plural "%u frozen pages.\n" +msgstr[0] "%u eingefrorene Seite.\n" +msgstr[1] "%u eingefrorene Seiten.\n" + +#: commands/vacuumlazy.c:1439 +#, c-format +msgid "%u page is entirely empty.\n" msgid_plural "%u pages are entirely empty.\n" msgstr[0] "%u Seite ist vollkommen leer.\n" msgstr[1] "%u Seiten sind vollkommen leer.\n" -#: commands/vacuumlazy.c:1349 +#: commands/vacuumlazy.c:1443 +#, c-format +msgid "%s." +msgstr "%s." + +#: commands/vacuumlazy.c:1446 #, c-format msgid "\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages" msgstr "»%s«: %.0f entfernbare, %.0f nicht entfernbare Zeilenversionen in %u von %u Seiten gefunden" -#: commands/vacuumlazy.c:1418 +#: commands/vacuumlazy.c:1515 #, c-format msgid "\"%s\": removed %d row versions in %d pages" msgstr "»%s«: %d Zeilenversionen in %d Seiten entfernt" -#: commands/vacuumlazy.c:1607 +#: commands/vacuumlazy.c:1704 #, c-format msgid "scanned index \"%s\" to remove %d row versions" msgstr "Index »%s« gelesen und %d Zeilenversionen entfernt" -#: commands/vacuumlazy.c:1653 +#: commands/vacuumlazy.c:1756 #, c-format msgid "index \"%s\" now contains %.0f row versions in %u pages" msgstr "Index »%s« enthält %.0f Zeilenversionen in %u Seiten" -#: commands/vacuumlazy.c:1657 +#: commands/vacuumlazy.c:1760 #, c-format msgid "" "%.0f index row versions were removed.\n" @@ -10180,22 +10840,22 @@ msgstr "" "%u Indexseiten wurden gelöscht, %u sind gegenwärtig wiederverwendbar.\n" "%s." -#: commands/vacuumlazy.c:1752 +#: commands/vacuumlazy.c:1855 #, c-format msgid "\"%s\": stopping truncate due to conflicting lock request" msgstr "»%s«: Truncate wird gestoppt wegen Sperrkonflikt" -#: commands/vacuumlazy.c:1817 +#: commands/vacuumlazy.c:1920 #, c-format msgid "\"%s\": truncated %u to %u pages" msgstr "»%s«: von %u auf %u Seiten verkürzt" -#: commands/vacuumlazy.c:1882 +#: commands/vacuumlazy.c:1985 #, c-format msgid "\"%s\": suspending truncate due to conflicting lock request" msgstr "»%s«: Truncate wird ausgesetzt wegen Sperrkonflikt" -#: commands/variable.c:165 utils/misc/guc.c:9982 utils/misc/guc.c:10044 +#: commands/variable.c:165 utils/misc/guc.c:10284 utils/misc/guc.c:10346 #, c-format msgid "Unrecognized key word: \"%s\"." msgstr "Unbekanntes Schlüsselwort: »%s«." @@ -10255,7 +10915,7 @@ msgstr "SET TRANSACTION ISOLATION LEVEL muss vor allen Anfragen aufgerufen werde msgid "SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction" msgstr "SET TRANSACTION ISOLATION LEVEL kann nicht in einer Subtransaktion aufgerufen werden" -#: commands/variable.c:571 storage/lmgr/predicate.c:1576 +#: commands/variable.c:571 storage/lmgr/predicate.c:1603 #, c-format msgid "cannot use serializable mode in a hot standby" msgstr "kann serialisierbaren Modus nicht in einem Hot Standby verwenden" @@ -10287,8 +10947,8 @@ msgstr "»client_encoding« kann jetzt nicht geändert werden." #: commands/variable.c:776 #, c-format -msgid "cannot change client_encoding in a parallel worker" -msgstr "client_encoding kann nicht in einem parallelen Arbeitsprozess geändert werden" +msgid "cannot change client_encoding during a parallel operation" +msgstr "client_encoding kann nicht während einer parallelen Operation geändert werden" #: commands/variable.c:912 #, c-format @@ -10355,422 +11015,449 @@ msgstr "Sichten können nicht ungeloggt sein, weil sie keinen Speicherplatz verw msgid "view \"%s\" will be a temporary view" msgstr "Sicht »%s« wird eine temporäre Sicht" -#: executor/execCurrent.c:76 +#: executor/execCurrent.c:77 #, c-format msgid "cursor \"%s\" is not a SELECT query" msgstr "Cursor »%s« ist keine SELECT-Anfrage" -#: executor/execCurrent.c:82 +#: executor/execCurrent.c:83 #, c-format msgid "cursor \"%s\" is held from a previous transaction" msgstr "Cursor »%s« wurde aus einer vorherigen Transaktion beibehalten" -#: executor/execCurrent.c:114 +#: executor/execCurrent.c:115 #, c-format msgid "cursor \"%s\" has multiple FOR UPDATE/SHARE references to table \"%s\"" msgstr "Cursor »%s« hat mehrere FOR UPDATE/SHARE-Verweise auf Tabelle »%s«" -#: executor/execCurrent.c:123 +#: executor/execCurrent.c:124 #, c-format msgid "cursor \"%s\" does not have a FOR UPDATE/SHARE reference to table \"%s\"" msgstr "Cursor »%s« hat keinen FOR UPDATE/SHARE-Verweis auf Tabelle »%s«" -#: executor/execCurrent.c:133 executor/execCurrent.c:179 +#: executor/execCurrent.c:134 executor/execCurrent.c:177 #, c-format msgid "cursor \"%s\" is not positioned on a row" msgstr "Cursor »%s« ist nicht auf eine Zeile positioniert" -#: executor/execCurrent.c:166 +#: executor/execCurrent.c:164 executor/execCurrent.c:219 +#: executor/execCurrent.c:231 #, c-format msgid "cursor \"%s\" is not a simply updatable scan of table \"%s\"" msgstr "Cursor »%s« ist kein einfach aktualisierbarer Scan der Tabelle »%s«" -#: executor/execCurrent.c:231 executor/execQual.c:1128 +#: executor/execCurrent.c:273 executor/execExprInterp.c:2311 #, c-format msgid "type of parameter %d (%s) does not match that when preparing the plan (%s)" msgstr "Typ von Parameter %d (%s) stimmt nicht mit dem überein, als der Plan vorbereitet worden ist (%s)" -#: executor/execCurrent.c:243 executor/execQual.c:1140 +#: executor/execCurrent.c:285 executor/execExprInterp.c:2323 #, c-format msgid "no value found for parameter %d" msgstr "kein Wert für Parameter %d gefunden" -#: executor/execIndexing.c:545 +#: executor/execExpr.c:856 parser/parse_agg.c:794 +#, c-format +msgid "window function calls cannot be nested" +msgstr "Aufrufe von Fensterfunktionen können nicht geschachtelt werden" + +#: executor/execExpr.c:1314 +#, c-format +msgid "target type is not an array" +msgstr "Zieltyp ist kein Array" + +#: executor/execExpr.c:1646 +#, c-format +msgid "ROW() column has type %s instead of type %s" +msgstr "ROW()-Spalte hat Typ %s statt Typ %s" + +#: executor/execExpr.c:2181 executor/execSRF.c:697 parser/parse_func.c:120 +#: parser/parse_func.c:576 parser/parse_func.c:950 +#, c-format +msgid "cannot pass more than %d argument to a function" +msgid_plural "cannot pass more than %d arguments to a function" +msgstr[0] "kann nicht mehr als %d Argument an eine Funktion übergeben" +msgstr[1] "kann nicht mehr als %d Argumente an eine Funktion übergeben" + +#: executor/execExpr.c:2479 executor/execExpr.c:2485 +#: executor/execExprInterp.c:2640 utils/adt/arrayfuncs.c:261 +#: utils/adt/arrayfuncs.c:559 utils/adt/arrayfuncs.c:1289 +#: utils/adt/arrayfuncs.c:3335 utils/adt/arrayfuncs.c:5291 +#: utils/adt/arrayfuncs.c:5808 +#, c-format +msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" +msgstr "Anzahl der Arraydimensionen (%d) überschreitet erlaubtes Maximum (%d)" + +#: executor/execExprInterp.c:1879 +#, c-format +msgid "attribute %d of type %s has been dropped" +msgstr "Attribut %d von Typ %s wurde gelöscht" + +#: executor/execExprInterp.c:1885 +#, c-format +msgid "attribute %d of type %s has wrong type" +msgstr "Attribut %d von Typ %s hat falschen Typ" + +#: executor/execExprInterp.c:1887 executor/execExprInterp.c:2913 +#: executor/execExprInterp.c:2960 +#, c-format +msgid "Table has type %s, but query expects %s." +msgstr "Tabelle hat Typ %s, aber Anfrage erwartet %s." + +#: executor/execExprInterp.c:2401 +#, c-format +msgid "WHERE CURRENT OF is not supported for this table type" +msgstr "WHERE CURRENT OF wird für diesen Tabellentyp nicht unterstützt" + +#: executor/execExprInterp.c:2618 +#, c-format +msgid "cannot merge incompatible arrays" +msgstr "kann inkompatible Arrays nicht verschmelzen" + +#: executor/execExprInterp.c:2619 +#, c-format +msgid "Array with element type %s cannot be included in ARRAY construct with element type %s." +msgstr "Arrayelement mit Typ %s kann nicht in ARRAY-Konstrukt mit Elementtyp %s verwendet werden." + +#: executor/execExprInterp.c:2660 executor/execExprInterp.c:2690 +#, c-format +msgid "multidimensional arrays must have array expressions with matching dimensions" +msgstr "mehrdimensionale Arrays müssen Arraysausdrücke mit gleicher Anzahl Dimensionen haben" + +#: executor/execExprInterp.c:2912 executor/execExprInterp.c:2959 +#, c-format +msgid "attribute %d has wrong type" +msgstr "Attribut %d hat falschen Typ" + +#: executor/execExprInterp.c:3069 +#, c-format +msgid "array subscript in assignment must not be null" +msgstr "Arrayindex in Zuweisung darf nicht NULL sein" + +#: executor/execExprInterp.c:3502 utils/adt/domains.c:149 +#, c-format +msgid "domain %s does not allow null values" +msgstr "Domäne %s erlaubt keine NULL-Werte" + +#: executor/execExprInterp.c:3517 utils/adt/domains.c:184 +#, c-format +msgid "value for domain %s violates check constraint \"%s\"" +msgstr "Wert für Domäne %s verletzt Check-Constraint »%s«" + +#: executor/execExprInterp.c:3888 executor/execExprInterp.c:3905 +#: executor/execExprInterp.c:4007 executor/nodeModifyTable.c:106 +#: executor/nodeModifyTable.c:117 executor/nodeModifyTable.c:134 +#: executor/nodeModifyTable.c:142 +#, c-format +msgid "table row type and query-specified row type do not match" +msgstr "Zeilentyp der Tabelle und der von der Anfrage angegebene Zeilentyp stimmen nicht überein" + +#: executor/execExprInterp.c:3889 +#, c-format +msgid "Table row contains %d attribute, but query expects %d." +msgid_plural "Table row contains %d attributes, but query expects %d." +msgstr[0] "Tabellenzeile enthält %d Attribut, aber Anfrage erwartet %d." +msgstr[1] "Tabellenzeile enthält %d Attribute, aber Anfrage erwartet %d." + +#: executor/execExprInterp.c:3906 executor/nodeModifyTable.c:118 +#, c-format +msgid "Table has type %s at ordinal position %d, but query expects %s." +msgstr "Tabelle hat Typ %s auf Position %d, aber Anfrage erwartet %s." + +#: executor/execExprInterp.c:4008 executor/execSRF.c:953 +#, c-format +msgid "Physical storage mismatch on dropped attribute at ordinal position %d." +msgstr "Physischer Speicher stimmt nicht überein mit gelöschtem Attribut auf Position %d." + +#: executor/execIndexing.c:543 #, c-format msgid "ON CONFLICT does not support deferrable unique constraints/exclusion constraints as arbiters" msgstr "ON CONFLICT unterstützt keine aufschiebbaren Unique-Constraints/Exclusion-Constraints als Arbiter" -#: executor/execIndexing.c:822 +#: executor/execIndexing.c:818 #, c-format msgid "could not create exclusion constraint \"%s\"" msgstr "konnte Exclusion-Constraint »%s« nicht erzeugen" -#: executor/execIndexing.c:825 +#: executor/execIndexing.c:821 #, c-format msgid "Key %s conflicts with key %s." msgstr "Schlüssel %s kollidiert mit Schlüssel %s." -#: executor/execIndexing.c:827 +#: executor/execIndexing.c:823 #, c-format msgid "Key conflicts exist." msgstr "Es bestehen Schlüsselkonflikte." -#: executor/execIndexing.c:833 +#: executor/execIndexing.c:829 #, c-format msgid "conflicting key value violates exclusion constraint \"%s\"" msgstr "kollidierender Schlüsselwert verletzt Exclusion-Constraint »%s«" -#: executor/execIndexing.c:836 +#: executor/execIndexing.c:832 #, c-format msgid "Key %s conflicts with existing key %s." msgstr "Schlüssel %s kollidiert mit vorhandenem Schlüssel %s." -#: executor/execIndexing.c:838 +#: executor/execIndexing.c:834 #, c-format msgid "Key conflicts with existing key." msgstr "Der Schlüssel kollidiert mit einem vorhandenen Schlüssel." -#: executor/execMain.c:1040 +#: executor/execMain.c:1119 #, c-format msgid "cannot change sequence \"%s\"" msgstr "kann Sequenz »%s« nicht ändern" -#: executor/execMain.c:1046 +#: executor/execMain.c:1125 #, c-format msgid "cannot change TOAST relation \"%s\"" msgstr "kann TOAST-Relation »%s« nicht ändern" -#: executor/execMain.c:1064 rewrite/rewriteHandler.c:2661 +#: executor/execMain.c:1143 rewrite/rewriteHandler.c:2752 #, c-format msgid "cannot insert into view \"%s\"" msgstr "kann nicht in Sicht »%s« einfügen" -#: executor/execMain.c:1066 rewrite/rewriteHandler.c:2664 +#: executor/execMain.c:1145 rewrite/rewriteHandler.c:2755 #, c-format msgid "To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule." msgstr "Um Einfügen in die Sicht zu ermöglichen, richten Sie einen INSTEAD OF INSERT Trigger oder eine ON INSERT DO INSTEAD Regel ohne Bedingung ein." -#: executor/execMain.c:1072 rewrite/rewriteHandler.c:2669 +#: executor/execMain.c:1151 rewrite/rewriteHandler.c:2760 #, c-format msgid "cannot update view \"%s\"" msgstr "kann Sicht »%s« nicht aktualisieren" -#: executor/execMain.c:1074 rewrite/rewriteHandler.c:2672 +#: executor/execMain.c:1153 rewrite/rewriteHandler.c:2763 #, c-format msgid "To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule." msgstr "Um Aktualisieren der Sicht zu ermöglichen, richten Sie einen INSTEAD OF UPDATE Trigger oder eine ON UPDATE DO INSTEAD Regel ohne Bedingung ein." -#: executor/execMain.c:1080 rewrite/rewriteHandler.c:2677 +#: executor/execMain.c:1159 rewrite/rewriteHandler.c:2768 #, c-format msgid "cannot delete from view \"%s\"" msgstr "kann nicht aus Sicht »%s« löschen" -#: executor/execMain.c:1082 rewrite/rewriteHandler.c:2680 +#: executor/execMain.c:1161 rewrite/rewriteHandler.c:2771 #, c-format msgid "To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule." msgstr "Um Löschen aus der Sicht zu ermöglichen, richten Sie einen INSTEAD OF DELETE Trigger oder eine ON DELETE DO INSTEAD Regel ohne Bedingung ein." -#: executor/execMain.c:1093 +#: executor/execMain.c:1172 #, c-format msgid "cannot change materialized view \"%s\"" msgstr "kann materialisierte Sicht »%s« nicht ändern" -#: executor/execMain.c:1105 +#: executor/execMain.c:1184 #, c-format msgid "cannot insert into foreign table \"%s\"" msgstr "kann nicht in Fremdtabelle »%s« einfügen" -#: executor/execMain.c:1111 +#: executor/execMain.c:1190 #, c-format msgid "foreign table \"%s\" does not allow inserts" msgstr "Fremdtabelle »%s« erlaubt kein Einfügen" -#: executor/execMain.c:1118 +#: executor/execMain.c:1197 #, c-format msgid "cannot update foreign table \"%s\"" msgstr "kann Fremdtabelle »%s« nicht aktualisieren" -#: executor/execMain.c:1124 +#: executor/execMain.c:1203 #, c-format msgid "foreign table \"%s\" does not allow updates" msgstr "Fremdtabelle »%s« erlaubt kein Aktualisieren" -#: executor/execMain.c:1131 +#: executor/execMain.c:1210 #, c-format msgid "cannot delete from foreign table \"%s\"" msgstr "kann nicht aus Fremdtabelle »%s« löschen" -#: executor/execMain.c:1137 +#: executor/execMain.c:1216 #, c-format msgid "foreign table \"%s\" does not allow deletes" msgstr "Fremdtabelle »%s« erlaubt kein Löschen" -#: executor/execMain.c:1148 +#: executor/execMain.c:1227 #, c-format msgid "cannot change relation \"%s\"" msgstr "kann Relation »%s« nicht ändern" -#: executor/execMain.c:1175 +#: executor/execMain.c:1254 #, c-format msgid "cannot lock rows in sequence \"%s\"" msgstr "kann Zeilen in Sequenz »%s« nicht sperren" -#: executor/execMain.c:1182 +#: executor/execMain.c:1261 #, c-format msgid "cannot lock rows in TOAST relation \"%s\"" msgstr "kann Zeilen in TOAST-Relation »%s« nicht sperren" -#: executor/execMain.c:1189 +#: executor/execMain.c:1268 #, c-format msgid "cannot lock rows in view \"%s\"" msgstr "kann Zeilen in Sicht »%s« nicht sperren" -#: executor/execMain.c:1197 +#: executor/execMain.c:1276 #, c-format msgid "cannot lock rows in materialized view \"%s\"" msgstr "kann Zeilen in materialisierter Sicht »%s« nicht sperren" -#: executor/execMain.c:1206 executor/execMain.c:2760 -#: executor/nodeLockRows.c:132 +#: executor/execMain.c:1285 executor/execMain.c:2966 +#: executor/nodeLockRows.c:136 #, c-format msgid "cannot lock rows in foreign table \"%s\"" msgstr "kann Zeilen in Fremdtabelle »%s« nicht sperren" -#: executor/execMain.c:1212 +#: executor/execMain.c:1291 #, c-format msgid "cannot lock rows in relation \"%s\"" msgstr "kann Zeilen in Relation »%s« nicht sperren" -#: executor/execMain.c:1842 +#: executor/execMain.c:1946 #, c-format -msgid "null value in column \"%s\" violates not-null constraint" -msgstr "NULL-Wert in Spalte »%s« verletzt Not-Null-Constraint" +msgid "new row for relation \"%s\" violates partition constraint" +msgstr "neue Zeile für Relation »%s« verletzt Partitions-Constraint" -#: executor/execMain.c:1844 executor/execMain.c:1878 executor/execMain.c:1908 -#: executor/execMain.c:1995 +#: executor/execMain.c:1948 executor/execMain.c:2028 executor/execMain.c:2075 +#: executor/execMain.c:2187 #, c-format msgid "Failing row contains %s." msgstr "Fehlgeschlagene Zeile enthält %s." -#: executor/execMain.c:1876 +#: executor/execMain.c:2026 #, c-format -msgid "new row for relation \"%s\" violates check constraint \"%s\"" -msgstr "neue Zeile für Relation »%s« verletzt Check-Constraint »%s«" +msgid "null value in column \"%s\" violates not-null constraint" +msgstr "NULL-Wert in Spalte »%s« verletzt Not-Null-Constraint" -#: executor/execMain.c:1906 -#, fuzzy, c-format -#| msgid "new row for relation \"%s\" violates check constraint \"%s\"" -msgid "new row for relation \"%s\" violates partition constraint" +#: executor/execMain.c:2073 +#, c-format +msgid "new row for relation \"%s\" violates check constraint \"%s\"" msgstr "neue Zeile für Relation »%s« verletzt Check-Constraint »%s«" -#: executor/execMain.c:1993 +#: executor/execMain.c:2185 #, c-format msgid "new row violates check option for view \"%s\"" msgstr "neue Zeile verletzt Check-Option für Sicht »%s«" -#: executor/execMain.c:2003 +#: executor/execMain.c:2195 #, c-format msgid "new row violates row-level security policy \"%s\" for table \"%s\"" msgstr "neue Zeile verletzt Policy für Sicherheit auf Zeilenebene »%s« für Tabelle »%s«" -#: executor/execMain.c:2008 +#: executor/execMain.c:2200 #, c-format msgid "new row violates row-level security policy for table \"%s\"" msgstr "neue Zeile verletzt Policy für Sicherheit auf Zeilenebene für Tabelle »%s«" -#: executor/execMain.c:2015 +#: executor/execMain.c:2207 #, c-format msgid "new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"" msgstr "neue Zeile verletzt Policy für Sicherheit auf Zeilenebene »%s« (USING-Ausdruck) für Tabelle »%s«" -#: executor/execMain.c:2020 +#: executor/execMain.c:2212 #, c-format msgid "new row violates row-level security policy (USING expression) for table \"%s\"" msgstr "neue Zeile verletzt Policy für Sicherheit auf Zeilenebene (USING-Ausdruck) für Tabelle »%s«" -#: executor/execMain.c:3221 -#, fuzzy, c-format -#| msgid "no matching relations in tablespace \"%s\" found" +#: executor/execPartition.c:286 +#, c-format msgid "no partition of relation \"%s\" found for row" -msgstr "keine passenden Relationen in Tablespace »%s« gefunden" +msgstr "keine Partition von Relation »%s« für die Zeile gefunden" -#: executor/execMain.c:3223 -#, fuzzy, c-format -#| msgid "Failing row contains %s." +#: executor/execPartition.c:288 +#, c-format msgid "Partition key of the failing row contains %s." -msgstr "Fehlgeschlagene Zeile enthält %s." +msgstr "Partitionierungsschlüssel der fehlgeschlagenen Zeile enthält %s." + +#: executor/execReplication.c:197 executor/execReplication.c:361 +#, fuzzy, c-format +#| msgid "tuple to be updated was already modified by an operation triggered by the current command" +msgid "tuple to be locked was already moved to another partition due to concurrent update, retrying" +msgstr "das zu aktualisierende Tupel wurde schon durch eine vom aktuellen Befehl ausgelöste Operation verändert" -#: executor/execQual.c:282 executor/execQual.c:318 executor/execQual.c:2985 -#: utils/adt/array_userfuncs.c:484 utils/adt/arrayfuncs.c:260 -#: utils/adt/arrayfuncs.c:558 utils/adt/arrayfuncs.c:1288 -#: utils/adt/arrayfuncs.c:3361 utils/adt/arrayfuncs.c:5241 -#: utils/adt/arrayfuncs.c:5758 +#: executor/execReplication.c:201 executor/execReplication.c:365 #, c-format -msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" -msgstr "Anzahl der Arraydimensionen (%d) überschreitet erlaubtes Maximum (%d)" +msgid "concurrent update, retrying" +msgstr "gleichzeitige Aktualisierung, versuche erneut" -#: executor/execQual.c:303 executor/execQual.c:338 +#: executor/execReplication.c:262 parser/parse_oper.c:228 +#: utils/adt/array_userfuncs.c:719 utils/adt/array_userfuncs.c:858 +#: utils/adt/arrayfuncs.c:3613 utils/adt/arrayfuncs.c:4129 +#: utils/adt/arrayfuncs.c:6089 utils/adt/rowtypes.c:1179 #, c-format -msgid "array subscript in assignment must not be null" -msgstr "Arrayindex in Zuweisung darf nicht NULL sein" +msgid "could not identify an equality operator for type %s" +msgstr "konnte keinen Ist-Gleich-Operator für Typ %s ermitteln" -#: executor/execQual.c:626 executor/execQual.c:3964 +#: executor/execReplication.c:574 #, c-format -msgid "attribute %d has wrong type" -msgstr "Attribut %d hat falschen Typ" +msgid "cannot update table \"%s\" because it does not have a replica identity and publishes updates" +msgstr "Tabelle »%s« kann nicht aktualisiert werden, weil sie keine Replik-Identität hat und Updates publiziert" -#: executor/execQual.c:627 executor/execQual.c:3965 +#: executor/execReplication.c:576 #, c-format -msgid "Table has type %s, but query expects %s." -msgstr "Tabelle hat Typ %s, aber Anfrage erwartet %s." +msgid "To enable updating the table, set REPLICA IDENTITY using ALTER TABLE." +msgstr "Um Aktualisieren der Tabelle zu ermöglichen, setzen Sie REPLICA IDENTITY mit ALTER TABLE." -#: executor/execQual.c:814 executor/execQual.c:831 executor/execQual.c:1027 -#: executor/nodeModifyTable.c:95 executor/nodeModifyTable.c:105 -#: executor/nodeModifyTable.c:122 executor/nodeModifyTable.c:130 +#: executor/execReplication.c:580 #, c-format -msgid "table row type and query-specified row type do not match" -msgstr "Zeilentyp der Tabelle und der von der Anfrage angegebene Zeilentyp stimmen nicht überein" +msgid "cannot delete from table \"%s\" because it does not have a replica identity and publishes deletes" +msgstr "aus Tabelle »%s« kann nicht gelöscht werden, weil sie keine Replik-Identität hat und Deletes publiziert" -#: executor/execQual.c:815 +#: executor/execReplication.c:582 #, c-format -msgid "Table row contains %d attribute, but query expects %d." -msgid_plural "Table row contains %d attributes, but query expects %d." -msgstr[0] "Tabellenzeile enthält %d Attribut, aber Anfrage erwartet %d." -msgstr[1] "Tabellenzeile enthält %d Attribute, aber Anfrage erwartet %d." +msgid "To enable deleting from the table, set REPLICA IDENTITY using ALTER TABLE." +msgstr "Um Löschen in der Tabelle zu ermöglichen, setzen Sie REPLICA IDENTITY mit ALTER TABLE." -#: executor/execQual.c:832 executor/nodeModifyTable.c:106 +#: executor/execReplication.c:601 #, c-format -msgid "Table has type %s at ordinal position %d, but query expects %s." -msgstr "Tabelle hat Typ %s auf Position %d, aber Anfrage erwartet %s." +msgid "logical replication target relation \"%s.%s\" is not a table" +msgstr "Zielrelation für logische Replikation »%s.%s« ist keine Tabelle" -#: executor/execQual.c:1028 executor/execQual.c:1602 +#: executor/execSRF.c:308 #, c-format -msgid "Physical storage mismatch on dropped attribute at ordinal position %d." -msgstr "Physischer Speicher stimmt nicht überein mit gelöschtem Attribut auf Position %d." +msgid "rows returned by function are not all of the same row type" +msgstr "von Funktion zurückgegebene Zeilen haben nicht alle den selben Zeilentyp" -#: executor/execQual.c:1294 parser/parse_func.c:116 parser/parse_func.c:543 -#: parser/parse_func.c:902 +#: executor/execSRF.c:356 executor/execSRF.c:647 #, c-format -msgid "cannot pass more than %d argument to a function" -msgid_plural "cannot pass more than %d arguments to a function" -msgstr[0] "kann nicht mehr als %d Argument an Funktion übergeben" -msgstr[1] "kann nicht mehr als %d Argumente an Funktion übergeben" +msgid "table-function protocol for materialize mode was not followed" +msgstr "Tabellenfunktionsprotokoll für Materialisierungsmodus wurde nicht befolgt" + +#: executor/execSRF.c:363 executor/execSRF.c:665 +#, c-format +msgid "unrecognized table-function returnMode: %d" +msgstr "unbekannter returnMode von Tabellenfunktion: %d" -#: executor/execQual.c:1520 +#: executor/execSRF.c:871 #, c-format msgid "function returning setof record called in context that cannot accept type record" msgstr "Funktion mit Ergebnis SETOF RECORD in einem Zusammenhang aufgerufen, der den Typ RECORD nicht verarbeiten kann" -#: executor/execQual.c:1575 executor/execQual.c:1591 executor/execQual.c:1601 +#: executor/execSRF.c:926 executor/execSRF.c:942 executor/execSRF.c:952 #, c-format msgid "function return row and query-specified return row do not match" msgstr "von Funktion zurückgegebene Zeile und von der Anfrage angegebene zurückzugebende Zeile stimmen nicht überein" -#: executor/execQual.c:1576 +#: executor/execSRF.c:927 #, c-format msgid "Returned row contains %d attribute, but query expects %d." msgid_plural "Returned row contains %d attributes, but query expects %d." msgstr[0] "Zurückgegebene Zeile enthält %d Attribut, aber Anfrage erwartet %d." msgstr[1] "Zurückgegebene Zeile enthält %d Attribute, aber Anfrage erwartet %d." -#: executor/execQual.c:1592 +#: executor/execSRF.c:943 #, c-format msgid "Returned type %s at ordinal position %d, but query expects %s." msgstr "Rückgabetyp war %s auf Position %d, aber Anfrage erwartet %s." -#: executor/execQual.c:1790 executor/execQual.c:2153 -#, c-format -msgid "table-function protocol for materialize mode was not followed" -msgstr "Tabellenfunktionsprotokoll für Materialisierungsmodus wurde nicht befolgt" - -#: executor/execQual.c:1808 executor/execQual.c:2160 -#, c-format -msgid "unrecognized table-function returnMode: %d" -msgstr "unbekannter returnMode von Tabellenfunktion: %d" - -#: executor/execQual.c:2105 -#, c-format -msgid "rows returned by function are not all of the same row type" -msgstr "von Funktion zurückgegebene Zeilen haben nicht alle den selben Zeilentyp" - -#: executor/execQual.c:2963 -#, c-format -msgid "cannot merge incompatible arrays" -msgstr "kann inkompatible Arrays nicht verschmelzen" - -#: executor/execQual.c:2964 -#, c-format -msgid "Array with element type %s cannot be included in ARRAY construct with element type %s." -msgstr "Arrayelement mit Typ %s kann nicht in ARRAY-Konstrukt mit Elementtyp %s verwendet werden." - -#: executor/execQual.c:3005 executor/execQual.c:3032 -#, c-format -msgid "multidimensional arrays must have array expressions with matching dimensions" -msgstr "mehrdimensionale Arrays müssen Arraysausdrücke mit gleicher Anzahl Dimensionen haben" - -#: executor/execQual.c:3831 utils/adt/domains.c:146 -#, c-format -msgid "domain %s does not allow null values" -msgstr "Domäne %s erlaubt keine NULL-Werte" - -#: executor/execQual.c:3868 utils/adt/domains.c:188 -#, c-format -msgid "value for domain %s violates check constraint \"%s\"" -msgstr "Wert für Domäne %s verletzt Check-Constraint »%s«" - -#: executor/execQual.c:4209 -#, c-format -msgid "WHERE CURRENT OF is not supported for this table type" -msgstr "WHERE CURRENT OF wird für diesen Tabellentyp nicht unterstützt" - -#: executor/execQual.c:4397 parser/parse_agg.c:764 -#, c-format -msgid "window function calls cannot be nested" -msgstr "Aufrufe von Fensterfunktionen können nicht geschachtelt werden" - -#: executor/execQual.c:4614 -#, c-format -msgid "target type is not an array" -msgstr "Zieltyp ist kein Array" - -#: executor/execQual.c:4730 -#, c-format -msgid "ROW() column has type %s instead of type %s" -msgstr "ROW()-Spalte hat Typ %s statt Typ %s" - -#: executor/execReplication.c:195 executor/execReplication.c:342 -#, c-format -msgid "concurrent update, retrying" -msgstr "" - -#: executor/execReplication.c:544 -#, fuzzy, c-format -#| msgid "cannot alter type \"%s\" because it is the type of a typed table" -msgid "cannot update table \"%s\" because it does not have replica identity and publishes updates" -msgstr "kann Typ »%s« nicht ändern, weil er der Typ einer getypten Tabelle ist" - -#: executor/execReplication.c:546 -#, c-format -msgid "To enable updating the table, set REPLICA IDENTITY using ALTER TABLE." -msgstr "" - -#: executor/execReplication.c:550 -#, fuzzy, c-format -#| msgid "cannot alter type \"%s\" because it is the type of a typed table" -msgid "cannot delete from table \"%s\" because it does not have replica identity and publishes deletes" -msgstr "kann Typ »%s« nicht ändern, weil er der Typ einer getypten Tabelle ist" - -#: executor/execReplication.c:552 -#, c-format -msgid "To enable deleting from the table, set REPLICA IDENTITY using ALTER TABLE." -msgstr "" - -#: executor/execUtils.c:808 +#: executor/execUtils.c:679 #, c-format msgid "materialized view \"%s\" has not been populated" msgstr "materialisierte Sicht »%s« wurde noch nicht befüllt" -#: executor/execUtils.c:810 +#: executor/execUtils.c:681 #, c-format msgid "Use the REFRESH MATERIALIZED VIEW command." msgstr "Verwenden Sie den Befehl REFRESH MATERIALIZED VIEW." @@ -10780,278 +11467,321 @@ msgstr "Verwenden Sie den Befehl REFRESH MATERIALIZED VIEW." msgid "could not determine actual type of argument declared %s" msgstr "konnte tatsächlichen Typ von Argument mit deklarierten Typ %s nicht bestimmen" -#: executor/functions.c:519 +#: executor/functions.c:521 #, c-format msgid "cannot COPY to/from client in a SQL function" msgstr "COPY vom/zum Client funktioniert in einer SQL-Funktion nicht" #. translator: %s is a SQL statement name -#: executor/functions.c:525 +#: executor/functions.c:527 #, c-format msgid "%s is not allowed in a SQL function" msgstr "%s ist in SQL-Funktionen nicht erlaubt" #. translator: %s is a SQL statement name -#: executor/functions.c:533 executor/spi.c:1281 executor/spi.c:2052 +#: executor/functions.c:535 executor/spi.c:1380 executor/spi.c:2170 #, c-format msgid "%s is not allowed in a non-volatile function" msgstr "%s ist in als nicht »volatile« markierten Funktionen nicht erlaubt" -#: executor/functions.c:653 +#: executor/functions.c:656 #, c-format msgid "could not determine actual result type for function declared to return type %s" msgstr "konnte tatsächlichen Ergebnistyp von Funktion mit deklarierten Rückgabetyp %s nicht bestimmen" -#: executor/functions.c:1408 +#: executor/functions.c:1418 #, c-format msgid "SQL function \"%s\" statement %d" msgstr "SQL-Funktion »%s« Anweisung %d" -#: executor/functions.c:1434 +#: executor/functions.c:1444 #, c-format msgid "SQL function \"%s\" during startup" msgstr "SQL-Funktion »%s« beim Start" -#: executor/functions.c:1592 executor/functions.c:1629 -#: executor/functions.c:1641 executor/functions.c:1754 -#: executor/functions.c:1787 executor/functions.c:1817 +#: executor/functions.c:1537 +#, fuzzy, c-format +#| msgid "return type %s is not supported for SQL functions" +msgid "calling procedures with output arguments is not supported in SQL functions" +msgstr "Rückgabetyp %s wird von SQL-Funktionen nicht unterstützt" + +#: executor/functions.c:1657 executor/functions.c:1690 +#: executor/functions.c:1702 executor/functions.c:1826 +#: executor/functions.c:1859 executor/functions.c:1889 #, c-format msgid "return type mismatch in function declared to return %s" msgstr "Rückgabetyp von Funktion stimmt nicht überein; deklariert als %s" -#: executor/functions.c:1594 +#: executor/functions.c:1659 #, c-format msgid "Function's final statement must be SELECT or INSERT/UPDATE/DELETE RETURNING." msgstr "Die letzte Anweisung der Funktion muss ein SELECT oder INSERT/UPDATE/DELETE RETURNING sein." -#: executor/functions.c:1631 +#: executor/functions.c:1692 #, c-format msgid "Final statement must return exactly one column." msgstr "Die letzte Anweisung muss genau eine Spalte zurückgeben." -#: executor/functions.c:1643 +#: executor/functions.c:1704 #, c-format msgid "Actual return type is %s." msgstr "Eigentlicher Rückgabetyp ist %s." -#: executor/functions.c:1756 +#: executor/functions.c:1828 #, c-format msgid "Final statement returns too many columns." msgstr "Die letzte Anweisung gibt zu viele Spalten zurück." -#: executor/functions.c:1789 +#: executor/functions.c:1861 #, c-format msgid "Final statement returns %s instead of %s at column %d." msgstr "Die letzte Anweisung ergibt %s statt %s in Spalte %d." -#: executor/functions.c:1819 +#: executor/functions.c:1891 #, c-format msgid "Final statement returns too few columns." msgstr "Die letzte Anweisung gibt zu wenige Spalten zurück." -#: executor/functions.c:1868 +#: executor/functions.c:1940 #, c-format msgid "return type %s is not supported for SQL functions" msgstr "Rückgabetyp %s wird von SQL-Funktionen nicht unterstützt" -#: executor/nodeAgg.c:3121 +#: executor/nodeAgg.c:2802 parser/parse_agg.c:633 parser/parse_agg.c:663 #, c-format -msgid "combine function for aggregate %u must be declared as STRICT" -msgstr "Kombinierfunktion für Aggregatfunktion %u muss als STRICT deklariert sein" +msgid "aggregate function calls cannot be nested" +msgstr "Aufrufe von Aggregatfunktionen können nicht geschachtelt werden" -#: executor/nodeAgg.c:3166 executor/nodeWindowAgg.c:2282 +#: executor/nodeAgg.c:2988 executor/nodeWindowAgg.c:2807 #, c-format msgid "aggregate %u needs to have compatible input type and transition type" msgstr "Aggregatfunktion %u muss kompatiblen Eingabe- und Übergangstyp haben" -#: executor/nodeAgg.c:3220 parser/parse_agg.c:618 parser/parse_agg.c:648 -#, c-format -msgid "aggregate function calls cannot be nested" -msgstr "Aufrufe von Aggregatfunktionen können nicht geschachtelt werden" - -#: executor/nodeCustom.c:146 executor/nodeCustom.c:157 +#: executor/nodeCustom.c:148 executor/nodeCustom.c:159 #, c-format msgid "custom scan \"%s\" does not support MarkPos" msgstr "Custom-Scan »%s« unterstützt MarkPos nicht" -#: executor/nodeHashjoin.c:768 executor/nodeHashjoin.c:798 +#: executor/nodeHashjoin.c:1040 executor/nodeHashjoin.c:1070 #, c-format msgid "could not rewind hash-join temporary file: %m" msgstr "konnte Position in temporärer Datei für Hash-Verbund nicht auf Anfang setzen: %m" -#: executor/nodeHashjoin.c:833 executor/nodeHashjoin.c:839 +#: executor/nodeHashjoin.c:1228 executor/nodeHashjoin.c:1234 #, c-format msgid "could not write to hash-join temporary file: %m" msgstr "konnte nicht in temporäre Datei für Hash-Verbund schreiben: %m" -#: executor/nodeHashjoin.c:880 executor/nodeHashjoin.c:890 +#: executor/nodeHashjoin.c:1275 executor/nodeHashjoin.c:1285 #, c-format msgid "could not read from hash-join temporary file: %m" msgstr "konnte nicht aus temporärer Datei für Hash-Verbund lesen: %m" -#: executor/nodeIndexonlyscan.c:233 +#: executor/nodeIndexonlyscan.c:236 #, c-format msgid "lossy distance functions are not supported in index-only scans" msgstr "verlustbehaftete Abstandsfunktionen werden in Index-Only-Scans nicht unterstützt" -#: executor/nodeLimit.c:252 +#: executor/nodeLimit.c:256 #, c-format msgid "OFFSET must not be negative" msgstr "OFFSET darf nicht negativ sein" -#: executor/nodeLimit.c:278 +#: executor/nodeLimit.c:282 #, c-format msgid "LIMIT must not be negative" msgstr "LIMIT darf nicht negativ sein" -#: executor/nodeMergejoin.c:1535 +#: executor/nodeMergejoin.c:1567 #, c-format msgid "RIGHT JOIN is only supported with merge-joinable join conditions" msgstr "RIGHT JOIN wird nur für Merge-Verbund-fähige Verbundbedingungen unterstützt" -#: executor/nodeMergejoin.c:1555 +#: executor/nodeMergejoin.c:1585 #, c-format msgid "FULL JOIN is only supported with merge-joinable join conditions" msgstr "FULL JOIN wird nur für Merge-Verbund-fähige Verbundbedingungen unterstützt" -#: executor/nodeModifyTable.c:96 +#: executor/nodeModifyTable.c:107 #, c-format msgid "Query has too many columns." msgstr "Anfrage hat zu viele Spalten." -#: executor/nodeModifyTable.c:123 +#: executor/nodeModifyTable.c:135 #, c-format msgid "Query provides a value for a dropped column at ordinal position %d." msgstr "Anfrage liefert einen Wert für eine gelöschte Spalte auf Position %d." -#: executor/nodeModifyTable.c:131 +#: executor/nodeModifyTable.c:143 #, c-format msgid "Query has too few columns." msgstr "Anfrage hat zu wenige Spalten." -#: executor/nodeModifyTable.c:1201 +#: executor/nodeModifyTable.c:774 +#, fuzzy, c-format +#| msgid "tuple to be updated was already modified by an operation triggered by the current command" +msgid "tuple to be deleted was already moved to another partition due to concurrent update" +msgstr "das zu aktualisierende Tupel wurde schon durch eine vom aktuellen Befehl ausgelöste Operation verändert" + +#: executor/nodeModifyTable.c:1074 +#, fuzzy, c-format +#| msgid "invalid OWNED BY option" +msgid "invalid ON UPDATE specification" +msgstr "ungültige OWNED BY Option" + +#: executor/nodeModifyTable.c:1075 +#, c-format +msgid "The result tuple would appear in a different partition than the original tuple." +msgstr "" + +#: executor/nodeModifyTable.c:1237 +#, fuzzy, c-format +#| msgid "tuple to be updated was already modified by an operation triggered by the current command" +msgid "tuple to be updated was already moved to another partition due to concurrent update" +msgstr "das zu aktualisierende Tupel wurde schon durch eine vom aktuellen Befehl ausgelöste Operation verändert" + +#: executor/nodeModifyTable.c:1388 #, c-format msgid "ON CONFLICT DO UPDATE command cannot affect row a second time" msgstr "Befehl in ON CONFLICT DO UPDATE kann eine Zeile nicht ein zweites Mal ändern" -#: executor/nodeModifyTable.c:1202 +#: executor/nodeModifyTable.c:1389 #, c-format msgid "Ensure that no rows proposed for insertion within the same command have duplicate constrained values." msgstr "Stellen Sie sicher, dass keine im selben Befehl fürs Einfügen vorgesehene Zeilen doppelte Werte haben, die einen Constraint verletzen würden." -#: executor/nodeSamplescan.c:305 +#: executor/nodeSamplescan.c:279 #, c-format msgid "TABLESAMPLE parameter cannot be null" msgstr "Parameter von TABLESAMPLE darf nicht NULL sein" -#: executor/nodeSamplescan.c:317 +#: executor/nodeSamplescan.c:291 #, c-format msgid "TABLESAMPLE REPEATABLE parameter cannot be null" msgstr "Parameter von TABLESAMPLE REPEATABLE darf nicht NULL sein" -#: executor/nodeSubplan.c:339 executor/nodeSubplan.c:378 -#: executor/nodeSubplan.c:1028 +#: executor/nodeSubplan.c:336 executor/nodeSubplan.c:375 +#: executor/nodeSubplan.c:1097 #, c-format msgid "more than one row returned by a subquery used as an expression" msgstr "als Ausdruck verwendete Unteranfrage ergab mehr als eine Zeile" -#: executor/nodeTableFuncscan.c:369 -#, fuzzy, c-format -#| msgid "slot name must not be null" +#: executor/nodeTableFuncscan.c:362 +#, c-format msgid "namespace URI must not be null" -msgstr "Slot-Name darf nicht NULL sein" +msgstr "Namensraum-URI darf nicht NULL sein" -#: executor/nodeTableFuncscan.c:380 -#, fuzzy, c-format -#| msgid "FOREACH expression must not be null" +#: executor/nodeTableFuncscan.c:373 +#, c-format msgid "row filter expression must not be null" -msgstr "FOREACH-Ausdruck darf nicht NULL sein" +msgstr "Zeilenfilterausdruck darf nicht NULL sein" -#: executor/nodeTableFuncscan.c:405 -#, fuzzy, c-format -#| msgid "FOREACH expression must not be null" +#: executor/nodeTableFuncscan.c:399 +#, c-format msgid "column filter expression must not be null" -msgstr "FOREACH-Ausdruck darf nicht NULL sein" +msgstr "Spaltenfilterausdruck darf nicht NULL sein" -#: executor/nodeTableFuncscan.c:406 -#, fuzzy, c-format -#| msgid "missing data for column \"%s\"" +#: executor/nodeTableFuncscan.c:400 +#, c-format msgid "Filter for column \"%s\" is null." -msgstr "fehlende Daten für Spalte »%s«" +msgstr "Filter für Spalte »%s« ist NULL." -#: executor/nodeTableFuncscan.c:485 -#, fuzzy, c-format -#| msgid "cannot alter inherited column \"%s\"" +#: executor/nodeTableFuncscan.c:483 +#, c-format msgid "null is not allowed in column \"%s\"" -msgstr "kann vererbte Spalte »%s« nicht ändern" +msgstr "NULL ist in Spalte »%s« nicht erlaubt" -#: executor/nodeWindowAgg.c:353 +#: executor/nodeWindowAgg.c:355 #, c-format msgid "moving-aggregate transition function must not return null" msgstr "Moving-Aggregat-Übergangsfunktion darf nicht NULL zurückgeben" -#: executor/nodeWindowAgg.c:1621 +#: executor/nodeWindowAgg.c:2047 #, c-format msgid "frame starting offset must not be null" msgstr "Frame-Start-Offset darf nicht NULL sein" -#: executor/nodeWindowAgg.c:1634 +#: executor/nodeWindowAgg.c:2060 #, c-format msgid "frame starting offset must not be negative" msgstr "Frame-Start-Offset darf nicht negativ sein" -#: executor/nodeWindowAgg.c:1646 +#: executor/nodeWindowAgg.c:2072 #, c-format msgid "frame ending offset must not be null" msgstr "Frame-Ende-Offset darf nicht NULL sein" -#: executor/nodeWindowAgg.c:1659 +#: executor/nodeWindowAgg.c:2085 #, c-format msgid "frame ending offset must not be negative" msgstr "Frame-Ende-Offset darf nicht negativ sein" -#: executor/spi.c:196 +#: executor/nodeWindowAgg.c:2723 +#, fuzzy, c-format +#| msgid "aggregate function calls cannot contain window function calls" +msgid "aggregate function %s does not support use as a window function" +msgstr "Aufrufe von Aggregatfunktionen können keine Aufrufe von Fensterfunktionen enthalten" + +#: executor/spi.c:213 executor/spi.c:247 +#, fuzzy, c-format +#| msgid "invalid next transaction ID" +msgid "invalid transaction termination" +msgstr "ungültige nächste Transaktions-ID" + +#: executor/spi.c:227 +#, fuzzy, c-format +#| msgid "cannot commit subtransactions during a parallel operation" +msgid "cannot commit while a subtransaction is active" +msgstr "während einer parallelen Operation können keine Subtransaktionen committet werden" + +#: executor/spi.c:253 +#, fuzzy, c-format +#| msgid "%s cannot run inside a subtransaction" +msgid "cannot roll back while a subtransaction is active" +msgstr "%s kann nicht in einer Subtransaktion laufen" + +#: executor/spi.c:290 #, c-format msgid "transaction left non-empty SPI stack" msgstr "Transaktion ließ nicht-leeren SPI-Stack zurück" -#: executor/spi.c:197 executor/spi.c:260 +#: executor/spi.c:291 executor/spi.c:352 #, c-format msgid "Check for missing \"SPI_finish\" calls." msgstr "Prüfen Sie, ob Aufrufe von »SPI_finish« fehlen." -#: executor/spi.c:259 +#: executor/spi.c:351 #, c-format msgid "subtransaction left non-empty SPI stack" msgstr "Subtransaktion ließ nicht-leeren SPI-Stack zurück" -#: executor/spi.c:1142 +#: executor/spi.c:1241 #, c-format msgid "cannot open multi-query plan as cursor" msgstr "Plan mit mehreren Anfragen kann nicht als Cursor geöffnet werden" #. translator: %s is name of a SQL command, eg INSERT -#: executor/spi.c:1147 +#: executor/spi.c:1246 #, c-format msgid "cannot open %s query as cursor" msgstr "%s kann nicht als Cursor geöffnet werden" -#: executor/spi.c:1255 +#: executor/spi.c:1351 #, c-format msgid "DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE is not supported" msgstr "DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE wird nicht unterstützt" -#: executor/spi.c:1256 parser/analyze.c:2442 +#: executor/spi.c:1352 parser/analyze.c:2448 #, c-format msgid "Scrollable cursors must be READ ONLY." msgstr "Scrollbare Cursor müssen READ ONLY sein." -#: executor/spi.c:2355 +#: executor/spi.c:2492 #, c-format msgid "SQL statement \"%s\"" msgstr "SQL-Anweisung »%s«" -#: executor/tqueue.c:317 +#: executor/tqueue.c:70 #, c-format msgid "could not send tuple to shared-memory queue" msgstr "konnte Tupel nicht an Shared-Memory-Queue senden" @@ -11071,652 +11801,733 @@ msgstr "ungültige Option »%s«" msgid "Valid options in this context are: %s" msgstr "Gültige Optionen in diesem Zusammenhang sind: %s" -#: gram.y:1060 +#: gram.y:1026 +#, c-format +msgid "UNENCRYPTED PASSWORD is no longer supported" +msgstr "UNENCRYPTED PASSWORD wird nicht mehr unterstützt" + +#: gram.y:1027 +#, c-format +msgid "Remove UNENCRYPTED to store the password in encrypted form instead." +msgstr "Lassen Sie UNENCRYPTED weg, um das Passwort stattdessen in verschlüsselter Form zu speichern." + +#: gram.y:1089 #, c-format msgid "unrecognized role option \"%s\"" msgstr "unbekannte Rollenoption »%s«" -#: gram.y:1334 gram.y:1349 +#: gram.y:1336 gram.y:1351 #, c-format msgid "CREATE SCHEMA IF NOT EXISTS cannot include schema elements" msgstr "CREATE SCHEMA IF NOT EXISTS kann keine Schemaelemente enthalten" -#: gram.y:1494 +#: gram.y:1496 #, c-format msgid "current database cannot be changed" msgstr "aktuelle Datenbank kann nicht geändert werden" -#: gram.y:1618 +#: gram.y:1620 #, c-format msgid "time zone interval must be HOUR or HOUR TO MINUTE" msgstr "Zeitzonenintervall muss HOUR oder HOUR TO MINUTE sein" -#: gram.y:2769 gram.y:2798 +#: gram.y:2138 +#, fuzzy, c-format +#| msgid "Value must be in the range %d to %d." +msgid "column number must be in range from 1 to %d" +msgstr "Der Wert muss im Bereich %d bis %d sein." + +#: gram.y:2677 +#, c-format +msgid "sequence option \"%s\" not supported here" +msgstr "Sequenzoption »%s« wird hier nicht unterstützt" + +#: gram.y:2706 +#, fuzzy, c-format +#| msgid "option \"%s\" provided more than once" +msgid "modulus for hash partition provided more than once" +msgstr "Option »%s« mehrmals angegeben" + +#: gram.y:2715 +#, fuzzy, c-format +#| msgid "option \"%s\" provided more than once" +msgid "remainder for hash partition provided more than once" +msgstr "Option »%s« mehrmals angegeben" + +#: gram.y:2722 +#, fuzzy, c-format +#| msgid "unrecognized exception condition \"%s\"" +msgid "unrecognized hash partition bound specification \"%s\"" +msgstr "unbekannte Ausnahmebedingung »%s«" + +#: gram.y:2730 +#, fuzzy, c-format +#| msgid "type output function must be specified" +msgid "modulus for hash partition must be specified" +msgstr "Typausgabefunktion muss angegeben werden" + +#: gram.y:2734 +#, fuzzy, c-format +#| msgid "one or two argument types must be specified" +msgid "remainder for hash partition must be specified" +msgstr "ein oder zwei Argumenttypen müssen angegeben werden" + +#: gram.y:2986 gram.y:3015 #, c-format msgid "STDIN/STDOUT not allowed with PROGRAM" msgstr "STDIN/STDOUT sind nicht mit PROGRAM erlaubt" -#: gram.y:3108 gram.y:3115 gram.y:10841 gram.y:10849 +#: gram.y:3325 gram.y:3332 gram.y:11460 gram.y:11468 #, c-format msgid "GLOBAL is deprecated in temporary table creation" msgstr "die Verwendung von GLOBAL beim Erzeugen einer temporären Tabelle ist veraltet" -#: gram.y:3592 utils/adt/ri_triggers.c:314 utils/adt/ri_triggers.c:371 -#: utils/adt/ri_triggers.c:790 utils/adt/ri_triggers.c:1013 -#: utils/adt/ri_triggers.c:1169 utils/adt/ri_triggers.c:1350 -#: utils/adt/ri_triggers.c:1515 utils/adt/ri_triggers.c:1691 -#: utils/adt/ri_triggers.c:1871 utils/adt/ri_triggers.c:2062 -#: utils/adt/ri_triggers.c:2120 utils/adt/ri_triggers.c:2225 -#: utils/adt/ri_triggers.c:2402 +#: gram.y:3817 utils/adt/ri_triggers.c:308 utils/adt/ri_triggers.c:365 +#: utils/adt/ri_triggers.c:853 utils/adt/ri_triggers.c:1013 +#: utils/adt/ri_triggers.c:1198 utils/adt/ri_triggers.c:1419 +#: utils/adt/ri_triggers.c:1654 utils/adt/ri_triggers.c:1712 +#: utils/adt/ri_triggers.c:1817 utils/adt/ri_triggers.c:1997 #, c-format msgid "MATCH PARTIAL not yet implemented" msgstr "MATCH PARTIAL ist noch nicht implementiert" -#: gram.y:4983 -#, fuzzy, c-format -#| msgid "unrecognized role option \"%s\"" +#: gram.y:5299 +#, c-format msgid "unrecognized row security option \"%s\"" -msgstr "unbekannte Rollenoption »%s«" +msgstr "unbekannte Zeilensicherheitsoption »%s«" -#: gram.y:4984 +#: gram.y:5300 #, c-format msgid "Only PERMISSIVE or RESTRICTIVE policies are supported currently." -msgstr "" +msgstr "Aktuell werden nur PERMISSIVE und RESTRICTIVE unterstützt." -#: gram.y:5092 +#: gram.y:5408 msgid "duplicate trigger events specified" msgstr "mehrere Trigger-Ereignisse angegeben" -#: gram.y:5228 parser/parse_utilcmd.c:2794 parser/parse_utilcmd.c:2820 +#: gram.y:5544 parser/parse_utilcmd.c:3314 parser/parse_utilcmd.c:3340 #, c-format msgid "constraint declared INITIALLY DEFERRED must be DEFERRABLE" msgstr "Constraint, der als INITIALLY DEFERRED deklariert wurde, muss DEFERRABLE sein" -#: gram.y:5235 +#: gram.y:5551 #, c-format msgid "conflicting constraint properties" msgstr "widersprüchliche Constraint-Eigentschaften" -#: gram.y:5341 +#: gram.y:5657 #, c-format msgid "CREATE ASSERTION is not yet implemented" msgstr "CREATE ASSERTION ist noch nicht implementiert" -#: gram.y:5356 +#: gram.y:5672 #, c-format msgid "DROP ASSERTION is not yet implemented" msgstr "DROP ASSERTION ist noch nicht implementiert" -#: gram.y:5740 +#: gram.y:6052 #, c-format msgid "RECHECK is no longer required" msgstr "RECHECK wird nicht mehr benötigt" -#: gram.y:5741 +#: gram.y:6053 #, c-format msgid "Update your data type." msgstr "Aktualisieren Sie Ihren Datentyp." -#: gram.y:7350 +#: gram.y:7789 #, c-format msgid "aggregates cannot have output arguments" msgstr "Aggregatfunktionen können keine OUT-Argumente haben" -#: gram.y:7679 utils/adt/regproc.c:776 utils/adt/regproc.c:817 +#: gram.y:8177 utils/adt/regproc.c:691 utils/adt/regproc.c:732 #, c-format msgid "missing argument" msgstr "Argument fehlt" -#: gram.y:7680 utils/adt/regproc.c:777 utils/adt/regproc.c:818 +#: gram.y:8178 utils/adt/regproc.c:692 utils/adt/regproc.c:733 #, c-format msgid "Use NONE to denote the missing argument of a unary operator." msgstr "Verwenden Sie NONE, um das fehlende Argument eines unären Operators anzugeben." -#: gram.y:9125 -#, fuzzy, c-format -#| msgid "unrecognized role option \"%s\"" -msgid "unrecognized option \"%s\"" -msgstr "unbekannte Rollenoption »%s«" - -#: gram.y:9450 gram.y:9468 +#: gram.y:10043 gram.y:10061 #, c-format msgid "WITH CHECK OPTION not supported on recursive views" msgstr "WITH CHECK OPTION wird für rekursive Sichten nicht unterstützt" -#: gram.y:9986 +#: gram.y:10558 #, c-format msgid "unrecognized VACUUM option \"%s\"" msgstr "unbekannte VACUUM-Option »%s«" -#: gram.y:10949 +#: gram.y:11568 #, c-format msgid "LIMIT #,# syntax is not supported" msgstr "Syntax LIMIT x,y wird nicht unterstützt" -#: gram.y:10950 +#: gram.y:11569 #, c-format msgid "Use separate LIMIT and OFFSET clauses." msgstr "Verwenden Sie die getrennten Klauseln LIMIT und OFFSET." -#: gram.y:11231 gram.y:11256 +#: gram.y:11850 gram.y:11875 #, c-format msgid "VALUES in FROM must have an alias" msgstr "VALUES in FROM muss Aliasnamen erhalten" -#: gram.y:11232 gram.y:11257 +#: gram.y:11851 gram.y:11876 #, c-format msgid "For example, FROM (VALUES ...) [AS] foo." msgstr "Zum Beispiel FROM (VALUES ...) [AS] xyz." -#: gram.y:11237 gram.y:11262 +#: gram.y:11856 gram.y:11881 #, c-format msgid "subquery in FROM must have an alias" msgstr "Unteranfrage in FROM muss Aliasnamen erhalten" -#: gram.y:11238 gram.y:11263 +#: gram.y:11857 gram.y:11882 #, c-format msgid "For example, FROM (SELECT ...) [AS] foo." msgstr "Zum Beispiel FROM (SELECT ...) [AS] xyz." -#: gram.y:11716 +#: gram.y:12336 #, c-format msgid "only one DEFAULT value is allowed" -msgstr "" +msgstr "nur ein DEFAULT-Wert ist erlaubt" -#: gram.y:11725 +#: gram.y:12345 #, c-format msgid "only one PATH value per column is allowed" -msgstr "" +msgstr "nur ein PATH-Wert pro Spalte ist erlaubt" -#: gram.y:11734 -#, fuzzy, c-format -#| msgid "conflicting NULL/NOT NULL declarations for column \"%s\" of table \"%s\"" +#: gram.y:12354 +#, c-format msgid "conflicting or redundant NULL / NOT NULL declarations for column \"%s\"" -msgstr "widersprüchliche NULL/NOT NULL-Deklarationen für Spalte »%s« von Tabelle »%s«" +msgstr "widersprüchliche oder überflüssige NULL/NOT NULL-Deklarationen für Spalte »%s«" -#: gram.y:11743 -#, fuzzy, c-format -#| msgid "unrecognized role option \"%s\"" +#: gram.y:12363 +#, c-format msgid "unrecognized column option \"%s\"" -msgstr "unbekannte Rollenoption »%s«" +msgstr "unbekannte Spaltenoption »%s«" -#: gram.y:11997 +#: gram.y:12617 #, c-format msgid "precision for type float must be at least 1 bit" msgstr "Präzision von Typ float muss mindestens 1 Bit sein" -#: gram.y:12006 +#: gram.y:12626 #, c-format msgid "precision for type float must be less than 54 bits" msgstr "Präzision von Typ float muss weniger als 54 Bits sein" -#: gram.y:12497 +#: gram.y:13117 #, c-format msgid "wrong number of parameters on left side of OVERLAPS expression" msgstr "falsche Anzahl Parameter auf linker Seite von OVERLAPS-Ausdruck" -#: gram.y:12502 +#: gram.y:13122 #, c-format msgid "wrong number of parameters on right side of OVERLAPS expression" msgstr "falsche Anzahl Parameter auf rechter Seite von OVERLAPS-Ausdruck" -#: gram.y:12677 +#: gram.y:13297 #, c-format msgid "UNIQUE predicate is not yet implemented" msgstr "UNIQUE-Prädikat ist noch nicht implementiert" -#: gram.y:13024 +#: gram.y:13644 #, c-format msgid "cannot use multiple ORDER BY clauses with WITHIN GROUP" msgstr "in WITHIN GROUP können nicht mehrere ORDER-BY-Klauseln verwendet werden" -#: gram.y:13029 +#: gram.y:13649 #, c-format msgid "cannot use DISTINCT with WITHIN GROUP" msgstr "DISTINCT kann nicht mit WITHIN GROUP verwendet werden" -#: gram.y:13034 +#: gram.y:13654 #, c-format msgid "cannot use VARIADIC with WITHIN GROUP" msgstr "VARIADIC kann nicht mit WITHIN GROUP verwendet werden" -#: gram.y:13460 -#, c-format -msgid "RANGE PRECEDING is only supported with UNBOUNDED" -msgstr "RANGE PRECEDING wird nur mit UNBOUNDED unterstützt" - -#: gram.y:13466 -#, c-format -msgid "RANGE FOLLOWING is only supported with UNBOUNDED" -msgstr "RANGE FOLLOWING wird nur mit UNBOUNDED unterstützt" - -#: gram.y:13493 gram.y:13516 +#: gram.y:14107 gram.y:14130 #, c-format msgid "frame start cannot be UNBOUNDED FOLLOWING" msgstr "Frame-Beginn kann nicht UNBOUNDED FOLLOWING sein" -#: gram.y:13498 +#: gram.y:14112 #, c-format msgid "frame starting from following row cannot end with current row" msgstr "Frame der in der folgenden Zeile beginnt kann nicht in der aktuellen Zeile enden" -#: gram.y:13521 +#: gram.y:14135 #, c-format msgid "frame end cannot be UNBOUNDED PRECEDING" msgstr "Frame-Ende kann nicht UNBOUNDED PRECEDING sein" -#: gram.y:13527 +#: gram.y:14141 #, c-format msgid "frame starting from current row cannot have preceding rows" msgstr "Frame der in der aktuellen Zeile beginnt kann keine vorhergehenden Zeilen haben" -#: gram.y:13534 +#: gram.y:14148 #, c-format msgid "frame starting from following row cannot have preceding rows" msgstr "Frame der in der folgenden Zeile beginnt kann keine vorhergehenden Zeilen haben" -#: gram.y:14169 +#: gram.y:14791 #, c-format msgid "type modifier cannot have parameter name" msgstr "Typmodifikator kann keinen Parameternamen haben" -#: gram.y:14175 +#: gram.y:14797 #, c-format msgid "type modifier cannot have ORDER BY" msgstr "Typmodifikator kann kein ORDER BY haben" -#: gram.y:14239 gram.y:14245 +#: gram.y:14862 gram.y:14869 #, c-format msgid "%s cannot be used as a role name here" msgstr "%s kann hier nicht als Rollenname verwendet werden" -#: gram.y:14905 gram.y:15094 +#: gram.y:15540 gram.y:15729 msgid "improper use of \"*\"" msgstr "unzulässige Verwendung von »*«" -#: gram.y:15057 gram.y:15074 tsearch/spell.c:954 tsearch/spell.c:971 +#: gram.y:15692 gram.y:15709 tsearch/spell.c:954 tsearch/spell.c:971 #: tsearch/spell.c:988 tsearch/spell.c:1005 tsearch/spell.c:1070 #, c-format msgid "syntax error" msgstr "Syntaxfehler" -#: gram.y:15158 +#: gram.y:15793 #, c-format msgid "an ordered-set aggregate with a VARIADIC direct argument must have one VARIADIC aggregated argument of the same data type" msgstr "eine Ordered-Set-Aggregatfunktion mit einem direkten VARIADIC-Argument muss ein aggregiertes VARIADIC-Argument des selben Datentyps haben" -#: gram.y:15195 +#: gram.y:15830 #, c-format msgid "multiple ORDER BY clauses not allowed" msgstr "mehrere ORDER-BY-Klauseln sind nicht erlaubt" -#: gram.y:15206 +#: gram.y:15841 #, c-format msgid "multiple OFFSET clauses not allowed" msgstr "mehrere OFFSET-Klauseln sind nicht erlaubt" -#: gram.y:15215 +#: gram.y:15850 #, c-format msgid "multiple LIMIT clauses not allowed" msgstr "mehrere LIMIT-Klauseln sind nicht erlaubt" -#: gram.y:15224 +#: gram.y:15859 #, c-format msgid "multiple WITH clauses not allowed" msgstr "mehrere WITH-Klauseln sind nicht erlaubt" -#: gram.y:15428 +#: gram.y:16063 #, c-format msgid "OUT and INOUT arguments aren't allowed in TABLE functions" msgstr "OUT- und INOUT-Argumente sind in TABLE-Funktionen nicht erlaubt" -#: gram.y:15529 +#: gram.y:16164 #, c-format msgid "multiple COLLATE clauses not allowed" msgstr "mehrere COLLATE-Klauseln sind nicht erlaubt" #. translator: %s is CHECK, UNIQUE, or similar -#: gram.y:15567 gram.y:15580 +#: gram.y:16202 gram.y:16215 #, c-format msgid "%s constraints cannot be marked DEFERRABLE" msgstr "%s-Constraints können nicht als DEFERRABLE markiert werden" #. translator: %s is CHECK, UNIQUE, or similar -#: gram.y:15593 +#: gram.y:16228 #, c-format msgid "%s constraints cannot be marked NOT VALID" msgstr "%s-Constraints können nicht als NOT VALID markiert werden" #. translator: %s is CHECK, UNIQUE, or similar -#: gram.y:15606 +#: gram.y:16241 #, c-format msgid "%s constraints cannot be marked NO INHERIT" msgstr "%s-Constraints können nicht als NO INHERIT markiert werden" -#: guc-file.l:313 +#: guc-file.l:316 #, c-format msgid "unrecognized configuration parameter \"%s\" in file \"%s\" line %u" msgstr "unbekannter Konfigurationsparameter »%s« in Datei »%s« Zeile %u" -#: guc-file.l:350 utils/misc/guc.c:5962 utils/misc/guc.c:6155 -#: utils/misc/guc.c:6245 utils/misc/guc.c:6335 utils/misc/guc.c:6443 -#: utils/misc/guc.c:6538 +#: guc-file.l:353 utils/misc/guc.c:6229 utils/misc/guc.c:6423 +#: utils/misc/guc.c:6513 utils/misc/guc.c:6603 utils/misc/guc.c:6711 +#: utils/misc/guc.c:6806 #, c-format msgid "parameter \"%s\" cannot be changed without restarting the server" msgstr "Parameter »%s« kann nicht geändert werden, ohne den Server neu zu starten" -#: guc-file.l:386 +#: guc-file.l:389 #, c-format msgid "parameter \"%s\" removed from configuration file, reset to default" msgstr "Parameter »%s« wurde aus Konfigurationsdatei entfernt, wird auf Standardwert zurückgesetzt" -#: guc-file.l:452 +#: guc-file.l:455 #, c-format msgid "parameter \"%s\" changed to \"%s\"" msgstr "Parameter »%s« auf »%s« gesetzt" -#: guc-file.l:494 +#: guc-file.l:497 #, c-format msgid "configuration file \"%s\" contains errors" msgstr "Konfigurationsdatei »%s« enthält Fehler" -#: guc-file.l:499 +#: guc-file.l:502 #, c-format msgid "configuration file \"%s\" contains errors; unaffected changes were applied" msgstr "Konfigurationsdatei »%s« enthält Fehler; nicht betroffene Änderungen wurden durchgeführt" -#: guc-file.l:504 +#: guc-file.l:507 #, c-format msgid "configuration file \"%s\" contains errors; no changes were applied" msgstr "Konfigurationsdatei »%s« enthält Fehler; keine Änderungen wurden durchgeführt" -#: guc-file.l:577 +#: guc-file.l:580 #, c-format msgid "could not open configuration file \"%s\": maximum nesting depth exceeded" msgstr "konnte Konfigurationsdatei »%s« nicht öffnen: maximale Verschachtelungstiefe überschritten" -#: guc-file.l:593 libpq/hba.c:1961 libpq/hba.c:2361 +#: guc-file.l:596 libpq/hba.c:2142 libpq/hba.c:2547 #, c-format msgid "could not open configuration file \"%s\": %m" msgstr "konnte Konfigurationsdatei »%s« nicht öffnen: %m" -#: guc-file.l:604 +#: guc-file.l:607 #, c-format msgid "skipping missing configuration file \"%s\"" msgstr "fehlende Konfigurationsdatei »%s« wird übersprungen" -#: guc-file.l:858 +#: guc-file.l:861 #, c-format msgid "syntax error in file \"%s\" line %u, near end of line" msgstr "Syntaxfehler in Datei »%s«, Zeile %u, am Ende der Zeile" -#: guc-file.l:868 +#: guc-file.l:871 #, c-format msgid "syntax error in file \"%s\" line %u, near token \"%s\"" msgstr "Syntaxfehler in Datei »%s«, Zeile %u, bei »%s«" -#: guc-file.l:888 +#: guc-file.l:891 #, c-format msgid "too many syntax errors found, abandoning file \"%s\"" msgstr "zu viele Syntaxfehler gefunden, Datei »%s« wird aufgegeben" -#: guc-file.l:940 +#: guc-file.l:943 #, c-format msgid "could not open configuration directory \"%s\": %m" msgstr "konnte Konfigurationsverzeichnis »%s« nicht öffnen: %m" -#: lib/stringinfo.c:301 +#: jit/jit.c:197 utils/fmgr/dfmgr.c:201 utils/fmgr/dfmgr.c:418 +#: utils/fmgr/dfmgr.c:466 +#, c-format +msgid "could not access file \"%s\": %m" +msgstr "konnte nicht auf Datei »%s« zugreifen: %m" + +#: jit/llvm/llvmjit.c:595 +#, c-format +msgid "time to inline: %.3fs, opt: %.3fs, emit: %.3fs" +msgstr "" + +#: lib/dshash.c:247 utils/mmgr/dsa.c:714 utils/mmgr/dsa.c:796 +#, c-format +msgid "Failed on DSA request of size %zu." +msgstr "Fehler bei DSA-Anfrage mit Größe %zu." + +#: lib/stringinfo.c:278 #, c-format msgid "Cannot enlarge string buffer containing %d bytes by %d more bytes." msgstr "Kann Zeichenkettenpuffer mit %d Bytes nicht um %d Bytes vergrößern." -#: libpq/auth-scram.c:200 -#, fuzzy, c-format -#| msgid "User does not have CONNECT privilege." +#: libpq/auth-scram.c:203 libpq/auth-scram.c:443 libpq/auth-scram.c:452 +#, c-format +msgid "invalid SCRAM verifier for user \"%s\"" +msgstr "ungültiger SCRAM-Verifier für Benutzer »%s«" + +#: libpq/auth-scram.c:214 +#, c-format msgid "User \"%s\" does not have a valid SCRAM verifier." -msgstr "Benutzer hat das CONNECT-Privileg nicht." +msgstr "Benutzer »%s« hat keinen gültigen SCRAM-Verifier." -#: libpq/auth-scram.c:234 +#: libpq/auth-scram.c:292 libpq/auth-scram.c:297 libpq/auth-scram.c:591 +#: libpq/auth-scram.c:599 libpq/auth-scram.c:680 libpq/auth-scram.c:690 +#: libpq/auth-scram.c:797 libpq/auth-scram.c:820 libpq/auth-scram.c:871 +#: libpq/auth-scram.c:886 libpq/auth-scram.c:1188 libpq/auth-scram.c:1196 #, c-format -msgid "malformed SCRAM message (empty message)" -msgstr "" +msgid "malformed SCRAM message" +msgstr "fehlerhafte SCRAM-Nachricht" -#: libpq/auth-scram.c:238 +#: libpq/auth-scram.c:293 #, c-format -msgid "malformed SCRAM message (length mismatch)" -msgstr "" +msgid "The message is empty." +msgstr "Die Nachricht ist leer." -#: libpq/auth-scram.c:270 +#: libpq/auth-scram.c:298 #, c-format -msgid "invalid SCRAM response (nonce mismatch)" -msgstr "" +msgid "Message length does not match input length." +msgstr "Länge der Nachricht stimmt nicht mit Länge der Eingabe überein." -#: libpq/auth-scram.c:343 -#, fuzzy, c-format -#| msgid "could not generate random encryption vector" +#: libpq/auth-scram.c:330 +#, c-format +msgid "invalid SCRAM response" +msgstr "ungültige SCRAM-Antwort" + +#: libpq/auth-scram.c:331 +#, c-format +msgid "Nonce does not match." +msgstr "Nonce stimmt nicht überein." + +#: libpq/auth-scram.c:405 +#, c-format msgid "could not generate random salt" -msgstr "konnte zufälligen Verschlüsselungsvektor nicht erzeugen" +msgstr "konnte zufälliges Salt nicht erzeugen" -#: libpq/auth-scram.c:483 +#: libpq/auth-scram.c:592 #, c-format -msgid "malformed SCRAM message (attribute '%c' expected, %s found)" -msgstr "" +msgid "Expected attribute \"%c\" but found \"%s\"." +msgstr "Attribut »%c« wurde erwartet, aber »%s« wurde gefunden." + +#: libpq/auth-scram.c:600 libpq/auth-scram.c:691 +#, c-format +msgid "Expected character \"=\" for attribute \"%c\"." +msgstr "Zeichen »=« für Attribut »%c« wurde erwartet." + +#: libpq/auth-scram.c:681 +#, c-format +msgid "Attribute expected, but found invalid character \"%s\"." +msgstr "Attribut wurde erwartet, aber ungültiges Zeichen »%s« wurde gefunden." + +#: libpq/auth-scram.c:798 libpq/auth-scram.c:821 +#, c-format +msgid "Comma expected, but found character \"%s\"." +msgstr "Komma wurde erwartet, aber Zeichen »%s« wurde gefunden." -#: libpq/auth-scram.c:490 libpq/auth-scram.c:579 +#: libpq/auth-scram.c:813 #, c-format -msgid "malformed SCRAM message (expected = in attr %c)" +msgid "SCRAM channel binding negotiation error" msgstr "" -#: libpq/auth-scram.c:570 +#: libpq/auth-scram.c:814 #, c-format -msgid "malformed SCRAM message (attribute expected, invalid char %s found)" +msgid "The client supports SCRAM channel binding but thinks the server does not. However, this server does support channel binding." msgstr "" -#: libpq/auth-scram.c:692 +#: libpq/auth-scram.c:848 #, c-format msgid "client requires SCRAM channel binding, but it is not supported" -msgstr "" +msgstr "Client verlangt SCRAM-Channel-Binding, was nicht unterstützt wird" -#: libpq/auth-scram.c:696 +#: libpq/auth-scram.c:862 #, c-format -msgid "malformed SCRAM message (unexpected channel-binding flag %s)" +msgid "unsupported SCRAM channel-binding type" msgstr "" -#: libpq/auth-scram.c:702 +#: libpq/auth-scram.c:872 #, c-format -msgid "malformed SCRAM message (comma expected, got %s)" -msgstr "" +msgid "Unexpected channel-binding flag \"%s\"." +msgstr "Unerwartetes Channel-Binding-Flag »%s«." -#: libpq/auth-scram.c:712 +#: libpq/auth-scram.c:882 #, c-format msgid "client uses authorization identity, but it is not supported" -msgstr "" +msgstr "Client verwendet Autorisierungsidentität, was nicht unterstützt wird" -#: libpq/auth-scram.c:716 +#: libpq/auth-scram.c:887 #, c-format -msgid "malformed SCRAM message (unexpected attribute %s in client-first-message)" -msgstr "" +msgid "Unexpected attribute \"%s\" in client-first-message." +msgstr "Unerwartetes Attribut »%s« in »client-first-message«." -#: libpq/auth-scram.c:732 +#: libpq/auth-scram.c:903 #, c-format -msgid "client requires mandatory SCRAM extension" -msgstr "" +msgid "client requires an unsupported SCRAM extension" +msgstr "Client verlangt eine nicht unterstützte SCRAM-Erweiterung" -#: libpq/auth-scram.c:746 +#: libpq/auth-scram.c:917 #, c-format msgid "non-printable characters in SCRAM nonce" -msgstr "" +msgstr "nicht druckbare Zeichen in SCRAM-Nonce" -#: libpq/auth-scram.c:863 -#, fuzzy, c-format -#| msgid "could not generate random encryption vector" +#: libpq/auth-scram.c:1034 +#, c-format msgid "could not generate random nonce" -msgstr "konnte zufälligen Verschlüsselungsvektor nicht erzeugen" +msgstr "konnte zufällige Nonce nicht erzeugen" -#: libpq/auth-scram.c:931 +#: libpq/auth-scram.c:1158 #, c-format -msgid "unexpected SCRAM channel-binding attribute in client-final-message" +msgid "SCRAM channel binding check failed" msgstr "" -#: libpq/auth-scram.c:945 +#: libpq/auth-scram.c:1172 #, c-format -msgid "malformed SCRAM message (malformed proof in client-final-message" -msgstr "" +msgid "unexpected SCRAM channel-binding attribute in client-final-message" +msgstr "unerwartetes SCRAM-Channel-Binding-Attribut in »client-final-message«" -#: libpq/auth-scram.c:952 +#: libpq/auth-scram.c:1189 #, c-format -msgid "malformed SCRAM message (garbage at end of client-final-message)" -msgstr "" +msgid "Malformed proof in client-final-message." +msgstr "Fehlerhafter Proof in »client-final-message«." -#: libpq/auth.c:280 +#: libpq/auth-scram.c:1197 +#, c-format +msgid "Garbage found at the end of client-final-message." +msgstr "Müll am Ende der »client-final-message« gefunden." + +#: libpq/auth.c:282 #, c-format msgid "authentication failed for user \"%s\": host rejected" msgstr "Authentifizierung für Benutzer »%s« fehlgeschlagen: Host abgelehnt" -#: libpq/auth.c:283 +#: libpq/auth.c:285 #, c-format msgid "\"trust\" authentication failed for user \"%s\"" msgstr "»trust«-Authentifizierung für Benutzer »%s« fehlgeschlagen" -#: libpq/auth.c:286 +#: libpq/auth.c:288 #, c-format msgid "Ident authentication failed for user \"%s\"" msgstr "Ident-Authentifizierung für Benutzer »%s« fehlgeschlagen" -#: libpq/auth.c:289 +#: libpq/auth.c:291 #, c-format msgid "Peer authentication failed for user \"%s\"" msgstr "Peer-Authentifizierung für Benutzer »%s« fehlgeschlagen" -#: libpq/auth.c:294 +#: libpq/auth.c:296 #, c-format msgid "password authentication failed for user \"%s\"" msgstr "Passwort-Authentifizierung für Benutzer »%s« fehlgeschlagen" -#: libpq/auth.c:299 +#: libpq/auth.c:301 #, c-format msgid "GSSAPI authentication failed for user \"%s\"" msgstr "GSSAPI-Authentifizierung für Benutzer »%s« fehlgeschlagen" -#: libpq/auth.c:302 +#: libpq/auth.c:304 #, c-format msgid "SSPI authentication failed for user \"%s\"" msgstr "SSPI-Authentifizierung für Benutzer »%s« fehlgeschlagen" -#: libpq/auth.c:305 +#: libpq/auth.c:307 #, c-format msgid "PAM authentication failed for user \"%s\"" msgstr "PAM-Authentifizierung für Benutzer »%s« fehlgeschlagen" -#: libpq/auth.c:308 +#: libpq/auth.c:310 #, c-format msgid "BSD authentication failed for user \"%s\"" msgstr "BSD-Authentifizierung für Benutzer »%s« fehlgeschlagen" -#: libpq/auth.c:311 +#: libpq/auth.c:313 #, c-format msgid "LDAP authentication failed for user \"%s\"" msgstr "LDAP-Authentifizierung für Benutzer »%s« fehlgeschlagen" -#: libpq/auth.c:314 +#: libpq/auth.c:316 #, c-format msgid "certificate authentication failed for user \"%s\"" msgstr "Zertifikatauthentifizierung für Benutzer »%s« fehlgeschlagen" -#: libpq/auth.c:317 +#: libpq/auth.c:319 #, c-format msgid "RADIUS authentication failed for user \"%s\"" msgstr "RADIUS-Authentifizierung für Benutzer »%s« fehlgeschlagen" -#: libpq/auth.c:320 +#: libpq/auth.c:322 #, c-format msgid "authentication failed for user \"%s\": invalid authentication method" msgstr "Authentifizierung für Benutzer »%s« fehlgeschlagen: ungültige Authentifizierungsmethode" -#: libpq/auth.c:324 +#: libpq/auth.c:326 #, c-format msgid "Connection matched pg_hba.conf line %d: \"%s\"" msgstr "Verbindung stimmte mit pg_hba.conf-Zeile %d überein: »%s«" -#: libpq/auth.c:371 +#: libpq/auth.c:373 #, c-format msgid "client certificates can only be checked if a root certificate store is available" msgstr "Client-Zertifikate können nur überprüft werden, wenn Wurzelzertifikat verfügbar ist" -#: libpq/auth.c:382 +#: libpq/auth.c:384 #, c-format msgid "connection requires a valid client certificate" msgstr "Verbindung erfordert ein gültiges Client-Zertifikat" -#: libpq/auth.c:415 +#: libpq/auth.c:417 #, c-format msgid "pg_hba.conf rejects replication connection for host \"%s\", user \"%s\", %s" msgstr "pg_hba.conf lehnt Replikationsverbindung ab für Host »%s«, Benutzer »%s«, %s" -#: libpq/auth.c:417 libpq/auth.c:433 libpq/auth.c:491 libpq/auth.c:509 +#: libpq/auth.c:419 libpq/auth.c:435 libpq/auth.c:493 libpq/auth.c:511 msgid "SSL off" msgstr "SSL aus" -#: libpq/auth.c:417 libpq/auth.c:433 libpq/auth.c:491 libpq/auth.c:509 +#: libpq/auth.c:419 libpq/auth.c:435 libpq/auth.c:493 libpq/auth.c:511 msgid "SSL on" msgstr "SSL an" -#: libpq/auth.c:421 +#: libpq/auth.c:423 #, c-format msgid "pg_hba.conf rejects replication connection for host \"%s\", user \"%s\"" msgstr "pg_hba.conf lehnt Replikationsverbindung ab für Host »%s«, Benutzer »%s«" -#: libpq/auth.c:430 +#: libpq/auth.c:432 #, c-format msgid "pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s\", %s" msgstr "pg_hba.conf lehnt Verbindung ab für Host »%s«, Benutzer »%s«, Datenbank »%s«, %s" -#: libpq/auth.c:437 +#: libpq/auth.c:439 #, c-format msgid "pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s\"" msgstr "pg_hba.conf lehnt Verbindung ab für Host »%s«, Benutzer »%s«, Datenbank »%s«" -#: libpq/auth.c:466 +#: libpq/auth.c:468 #, c-format msgid "Client IP address resolved to \"%s\", forward lookup matches." msgstr "Auflösung der Client-IP-Adresse ergab »%s«, Vorwärtsauflösung stimmt überein." -#: libpq/auth.c:469 +#: libpq/auth.c:471 #, c-format msgid "Client IP address resolved to \"%s\", forward lookup not checked." msgstr "Auflösung der Client-IP-Adresse ergab »%s«, Vorwärtsauflösung nicht geprüft." -#: libpq/auth.c:472 +#: libpq/auth.c:474 #, c-format msgid "Client IP address resolved to \"%s\", forward lookup does not match." msgstr "Auflösung der Client-IP-Adresse ergab »%s«, Vorwärtsauflösung stimmt nicht überein." -#: libpq/auth.c:475 +#: libpq/auth.c:477 #, c-format msgid "Could not translate client host name \"%s\" to IP address: %s." msgstr "Konnte Client-Hostnamen »%s« nicht in IP-Adresse übersetzen: %s." -#: libpq/auth.c:480 +#: libpq/auth.c:482 #, c-format msgid "Could not resolve client IP address to a host name: %s." msgstr "Konnte Client-IP-Adresse nicht in einen Hostnamen auflösen: %s." -#: libpq/auth.c:489 +#: libpq/auth.c:491 #, c-format msgid "no pg_hba.conf entry for replication connection from host \"%s\", user \"%s\", %s" msgstr "kein pg_hba.conf-Eintrag für Replikationsverbindung von Host »%s«, Benutzer »%s«, %s" -#: libpq/auth.c:496 +#: libpq/auth.c:498 #, c-format msgid "no pg_hba.conf entry for replication connection from host \"%s\", user \"%s\"" msgstr "kein pg_hba.conf-Eintrag für Replikationsverbindung von Host »%s«, Benutzer »%s«" -#: libpq/auth.c:506 +#: libpq/auth.c:508 #, c-format msgid "no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s" msgstr "kein pg_hba.conf-Eintrag für Host »%s«, Benutzer »%s«, Datenbank »%s«, %s" -#: libpq/auth.c:514 +#: libpq/auth.c:516 #, c-format msgid "no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"" msgstr "kein pg_hba.conf-Eintrag für Host »%s«, Benutzer »%s«, Datenbank »%s«" @@ -11731,1100 +12542,1192 @@ msgstr "Passwort-Antwort erwartet, Message-Typ %d empfangen" msgid "invalid password packet size" msgstr "ungültige Größe des Passwortpakets" -#: libpq/auth.c:727 libpq/hba.c:1319 +#: libpq/auth.c:715 +#, c-format +msgid "empty password returned by client" +msgstr "Client gab leeres Passwort zurück" + +#: libpq/auth.c:835 libpq/hba.c:1325 #, c-format msgid "MD5 authentication is not supported when \"db_user_namespace\" is enabled" msgstr "MD5-Authentifizierung wird nicht unterstützt, wenn »db_user_namespace« angeschaltet ist" -#: libpq/auth.c:733 -#, fuzzy, c-format -#| msgid "could not generate random encryption vector" +#: libpq/auth.c:841 +#, c-format msgid "could not generate random MD5 salt" -msgstr "konnte zufälligen Verschlüsselungsvektor nicht erzeugen" +msgstr "konnte zufälliges MD5-Salt nicht erzeugen" -#: libpq/auth.c:812 -#, fuzzy, c-format -#| msgid "SSPI is not supported in protocol version 2" +#: libpq/auth.c:888 +#, c-format msgid "SASL authentication is not supported in protocol version 2" -msgstr "SSL wird in Protokollversion 2 nicht unterstützt" +msgstr "SASL-Authentifizierung wird in Protokollversion 2 nicht unterstützt" -#: libpq/auth.c:852 -#, fuzzy, c-format -#| msgid "expected GSS response, got message type %d" +#: libpq/auth.c:947 +#, c-format msgid "expected SASL response, got message type %d" -msgstr "GSS-Antwort erwartet, Message-Typ %d empfangen" +msgstr "SASL-Antwort erwartet, Message-Typ %d empfangen" + +#: libpq/auth.c:981 +#, c-format +msgid "client selected an invalid SASL authentication mechanism" +msgstr "Client hat einen ungültigen SASL-Authentifizierungsmechanismums gewählt" -#: libpq/auth.c:989 +#: libpq/auth.c:1128 #, c-format msgid "GSSAPI is not supported in protocol version 2" msgstr "GSSAPI wird in Protokollversion 2 nicht unterstützt" -#: libpq/auth.c:1049 +#: libpq/auth.c:1188 #, c-format msgid "expected GSS response, got message type %d" msgstr "GSS-Antwort erwartet, Message-Typ %d empfangen" -#: libpq/auth.c:1111 +#: libpq/auth.c:1250 msgid "accepting GSS security context failed" msgstr "Annahme des GSS-Sicherheitskontexts fehlgeschlagen" -#: libpq/auth.c:1137 +#: libpq/auth.c:1276 msgid "retrieving GSS user name failed" msgstr "Abfrage des GSS-Benutzernamens fehlgeschlagen" -#: libpq/auth.c:1256 +#: libpq/auth.c:1396 #, c-format msgid "SSPI is not supported in protocol version 2" msgstr "SSL wird in Protokollversion 2 nicht unterstützt" -#: libpq/auth.c:1271 +#: libpq/auth.c:1411 msgid "could not acquire SSPI credentials" msgstr "konnte SSPI-Credentials nicht erhalten" -#: libpq/auth.c:1289 +#: libpq/auth.c:1429 #, c-format msgid "expected SSPI response, got message type %d" msgstr "SSPI-Antwort erwartet, Message-Typ %d empfangen" -#: libpq/auth.c:1362 +#: libpq/auth.c:1502 msgid "could not accept SSPI security context" msgstr "konnte SSPI-Sicherheitskontext nicht akzeptieren" -#: libpq/auth.c:1424 +#: libpq/auth.c:1564 msgid "could not get token from SSPI security context" msgstr "konnte kein Token vom SSPI-Sicherheitskontext erhalten" -#: libpq/auth.c:1543 libpq/auth.c:1562 +#: libpq/auth.c:1683 libpq/auth.c:1702 #, c-format msgid "could not translate name" msgstr "konnte Namen nicht umwandeln" -#: libpq/auth.c:1575 +#: libpq/auth.c:1715 #, c-format msgid "realm name too long" msgstr "Realm-Name zu lang" -#: libpq/auth.c:1590 +#: libpq/auth.c:1730 #, c-format msgid "translated account name too long" msgstr "umgewandelter Account-Name zu lang" -#: libpq/auth.c:1776 +#: libpq/auth.c:1916 #, c-format msgid "could not create socket for Ident connection: %m" msgstr "konnte Socket für Ident-Verbindung nicht erzeugen: %m" -#: libpq/auth.c:1791 +#: libpq/auth.c:1931 #, c-format msgid "could not bind to local address \"%s\": %m" msgstr "konnte nicht mit lokaler Adresse »%s« verbinden: %m" -#: libpq/auth.c:1803 +#: libpq/auth.c:1943 #, c-format msgid "could not connect to Ident server at address \"%s\", port %s: %m" msgstr "konnte nicht mit Ident-Server auf Adresse »%s«, Port %s verbinden: %m" -#: libpq/auth.c:1825 +#: libpq/auth.c:1965 #, c-format msgid "could not send query to Ident server at address \"%s\", port %s: %m" msgstr "konnte Anfrage an Ident-Server auf Adresse »%s«, Port %s nicht senden: %m" -#: libpq/auth.c:1842 +#: libpq/auth.c:1982 #, c-format msgid "could not receive response from Ident server at address \"%s\", port %s: %m" msgstr "konnte Antwort von Ident-Server auf Adresse »%s«, Port %s nicht empfangen: %m" -#: libpq/auth.c:1852 +#: libpq/auth.c:1992 #, c-format msgid "invalidly formatted response from Ident server: \"%s\"" msgstr "ungültig formatierte Antwort vom Ident-Server: »%s«" -#: libpq/auth.c:1892 +#: libpq/auth.c:2032 #, c-format msgid "peer authentication is not supported on this platform" msgstr "Peer-Authentifizierung wird auf dieser Plattform nicht unterstützt" -#: libpq/auth.c:1896 +#: libpq/auth.c:2036 #, c-format msgid "could not get peer credentials: %m" msgstr "konnte Credentials von Gegenstelle nicht ermitteln: %m" -#: libpq/auth.c:1905 +#: libpq/auth.c:2047 #, c-format msgid "could not look up local user ID %ld: %s" msgstr "konnte lokale Benutzer-ID %ld nicht nachschlagen: %s" -#: libpq/auth.c:1989 libpq/auth.c:2315 libpq/auth.c:2675 -#, c-format -msgid "empty password returned by client" -msgstr "Client gab leeres Passwort zurück" - -#: libpq/auth.c:1999 +#: libpq/auth.c:2135 #, c-format msgid "error from underlying PAM layer: %s" msgstr "Fehler von der unteren PAM-Ebene: %s" -#: libpq/auth.c:2080 +#: libpq/auth.c:2216 #, c-format msgid "could not create PAM authenticator: %s" msgstr "konnte PAM-Authenticator nicht erzeugen: %s" -#: libpq/auth.c:2091 +#: libpq/auth.c:2227 #, c-format msgid "pam_set_item(PAM_USER) failed: %s" msgstr "pam_set_item(PAM_USER) fehlgeschlagen: %s" -#: libpq/auth.c:2102 +#: libpq/auth.c:2238 #, c-format msgid "pam_set_item(PAM_RHOST) failed: %s" msgstr "pam_set_item(PAM_RHOST) fehlgeschlagen: %s" -#: libpq/auth.c:2113 +#: libpq/auth.c:2249 #, c-format msgid "pam_set_item(PAM_CONV) failed: %s" msgstr "pam_set_item(PAM_CONV) fehlgeschlagen: %s" -#: libpq/auth.c:2124 +#: libpq/auth.c:2260 #, c-format msgid "pam_authenticate failed: %s" msgstr "pam_authenticate fehlgeschlagen: %s" -#: libpq/auth.c:2135 +#: libpq/auth.c:2271 #, c-format msgid "pam_acct_mgmt failed: %s" msgstr "pam_acct_mgmt fehlgeschlagen: %s" -#: libpq/auth.c:2146 +#: libpq/auth.c:2282 #, c-format msgid "could not release PAM authenticator: %s" msgstr "konnte PAM-Authenticator nicht freigeben: %s" -#: libpq/auth.c:2211 -#, c-format -msgid "could not initialize LDAP: %m" -msgstr "konnte LDAP nicht initialisieren: %m" - -#: libpq/auth.c:2214 +#: libpq/auth.c:2358 #, c-format msgid "could not initialize LDAP: error code %d" msgstr "konnte LDAP nicht initialisieren: Fehlercode %d" -#: libpq/auth.c:2224 +#: libpq/auth.c:2375 +#, c-format +msgid "could not initialize LDAP: %s" +msgstr "konnte LDAP nicht initialisieren: %s" + +#: libpq/auth.c:2385 +#, fuzzy, c-format +#| msgid "filters not supported in LDAP URLs" +msgid "ldaps not supported with this LDAP library" +msgstr "Filter in LDAP-URLs werden nicht unterstützt" + +#: libpq/auth.c:2393 +#, c-format +msgid "could not initialize LDAP: %m" +msgstr "konnte LDAP nicht initialisieren: %m" + +#: libpq/auth.c:2403 #, c-format msgid "could not set LDAP protocol version: %s" msgstr "konnte LDAP-Protokollversion nicht setzen: %s" -#: libpq/auth.c:2253 +#: libpq/auth.c:2434 #, c-format msgid "could not load wldap32.dll" msgstr "konnte wldap32.dll nicht laden" -#: libpq/auth.c:2261 +#: libpq/auth.c:2442 #, c-format msgid "could not load function _ldap_start_tls_sA in wldap32.dll" msgstr "konnte Funktion _ldap_start_tls_sA in wldap32.dll nicht laden" -#: libpq/auth.c:2262 +#: libpq/auth.c:2443 #, c-format msgid "LDAP over SSL is not supported on this platform." msgstr "LDAP über SSL wird auf dieser Plattform nicht unterstützt." -#: libpq/auth.c:2277 +#: libpq/auth.c:2458 #, c-format msgid "could not start LDAP TLS session: %s" msgstr "konnte LDAP-TLS-Sitzung nicht starten: %s" -#: libpq/auth.c:2299 +#: libpq/auth.c:2521 #, c-format msgid "LDAP server not specified" msgstr "LDAP-Server nicht angegeben" -#: libpq/auth.c:2352 +#: libpq/auth.c:2576 #, c-format msgid "invalid character in user name for LDAP authentication" msgstr "ungültiges Zeichen im Benutzernamen für LDAP-Authentifizierung" -#: libpq/auth.c:2367 +#: libpq/auth.c:2593 #, c-format msgid "could not perform initial LDAP bind for ldapbinddn \"%s\" on server \"%s\": %s" msgstr "erstes LDAP-Binden für ldapbinddn »%s« auf Server »%s« fehlgeschlagen: %s" -#: libpq/auth.c:2391 +#: libpq/auth.c:2622 #, c-format msgid "could not search LDAP for filter \"%s\" on server \"%s\": %s" msgstr "konnte LDAP nicht mit Filter »%s« auf Server »%s« durchsuchen: %s" -#: libpq/auth.c:2402 +#: libpq/auth.c:2636 #, c-format msgid "LDAP user \"%s\" does not exist" msgstr "LDAP-Benutzer »%s« existiert nicht" -#: libpq/auth.c:2403 +#: libpq/auth.c:2637 #, c-format msgid "LDAP search for filter \"%s\" on server \"%s\" returned no entries." msgstr "LDAP-Suche nach Filter »%s« auf Server »%s« gab keine Einträge zurück." -#: libpq/auth.c:2407 +#: libpq/auth.c:2641 #, c-format msgid "LDAP user \"%s\" is not unique" msgstr "LDAP-Benutzer »%s« ist nicht eindeutig" -#: libpq/auth.c:2408 +#: libpq/auth.c:2642 #, c-format msgid "LDAP search for filter \"%s\" on server \"%s\" returned %d entry." msgid_plural "LDAP search for filter \"%s\" on server \"%s\" returned %d entries." msgstr[0] "LDAP-Suche nach Filter »%s« auf Server »%s« gab %d Eintrag zurück." msgstr[1] "LDAP-Suche nach Filter »%s« auf Server »%s« gab %d Einträge zurück." -#: libpq/auth.c:2426 +#: libpq/auth.c:2662 #, c-format msgid "could not get dn for the first entry matching \"%s\" on server \"%s\": %s" msgstr "konnte DN fũr den ersten Treffer für »%s« auf Server »%s« nicht lesen: %s" -#: libpq/auth.c:2446 -#, c-format -msgid "could not unbind after searching for user \"%s\" on server \"%s\": %s" +#: libpq/auth.c:2683 +#, fuzzy, c-format +#| msgid "could not unbind after searching for user \"%s\" on server \"%s\": %s" +msgid "could not unbind after searching for user \"%s\" on server \"%s\"" msgstr "Losbinden fehlgeschlagen nach Suche nach Benutzer »%s« auf Server »%s«: %s" -#: libpq/auth.c:2476 +#: libpq/auth.c:2714 #, c-format msgid "LDAP login failed for user \"%s\" on server \"%s\": %s" msgstr "LDAP-Login fehlgeschlagen für Benutzer »%s« auf Server »%s«: %s" -#: libpq/auth.c:2504 +#: libpq/auth.c:2743 +#, c-format +msgid "LDAP diagnostics: %s" +msgstr "LDAP-Diagnostik: %s" + +#: libpq/auth.c:2768 #, c-format msgid "certificate authentication failed for user \"%s\": client certificate contains no user name" msgstr "Zertifikatauthentifizierung für Benutzer »%s« fehlgeschlagen: Client-Zertifikat enthält keinen Benutzernamen" -#: libpq/auth.c:2631 +#: libpq/auth.c:2871 #, c-format msgid "RADIUS server not specified" msgstr "RADIUS-Server nicht angegeben" -#: libpq/auth.c:2638 +#: libpq/auth.c:2878 #, c-format msgid "RADIUS secret not specified" msgstr "RADIUS-Geheimnis nicht angegeben" -#: libpq/auth.c:2654 libpq/hba.c:1786 -#, c-format -msgid "could not translate RADIUS server name \"%s\" to address: %s" -msgstr "konnte RADIUS-Servername »%s« nicht in Adresse übersetzen: %s" - -#: libpq/auth.c:2682 +#: libpq/auth.c:2892 #, c-format msgid "RADIUS authentication does not support passwords longer than %d characters" msgstr "RADIUS-Authentifizierung unterstützt keine Passwörter länger als %d Zeichen" -#: libpq/auth.c:2693 +#: libpq/auth.c:2997 libpq/hba.c:1908 +#, c-format +msgid "could not translate RADIUS server name \"%s\" to address: %s" +msgstr "konnte RADIUS-Servername »%s« nicht in Adresse übersetzen: %s" + +#: libpq/auth.c:3011 #, c-format msgid "could not generate random encryption vector" msgstr "konnte zufälligen Verschlüsselungsvektor nicht erzeugen" -#: libpq/auth.c:2726 +#: libpq/auth.c:3045 #, c-format msgid "could not perform MD5 encryption of password" msgstr "konnte MD5-Verschlüsselung des Passworts nicht durchführen" -#: libpq/auth.c:2751 +#: libpq/auth.c:3071 #, c-format msgid "could not create RADIUS socket: %m" msgstr "konnte RADIUS-Socket nicht erstellen: %m" -#: libpq/auth.c:2772 +#: libpq/auth.c:3093 #, c-format msgid "could not bind local RADIUS socket: %m" msgstr "konnte lokales RADIUS-Socket nicht binden: %m" -#: libpq/auth.c:2782 +#: libpq/auth.c:3103 #, c-format msgid "could not send RADIUS packet: %m" msgstr "konnte RADIUS-Paket nicht senden: %m" -#: libpq/auth.c:2815 libpq/auth.c:2840 +#: libpq/auth.c:3136 libpq/auth.c:3162 #, c-format -msgid "timeout waiting for RADIUS response" -msgstr "Zeitüberschreitung beim Warten auf RADIUS-Antwort" +msgid "timeout waiting for RADIUS response from %s" +msgstr "Zeitüberschreitung beim Warten auf RADIUS-Antwort von %s" -#: libpq/auth.c:2833 +#: libpq/auth.c:3155 #, c-format msgid "could not check status on RADIUS socket: %m" msgstr "konnte Status des RADIUS-Sockets nicht prüfen: %m" -#: libpq/auth.c:2862 +#: libpq/auth.c:3185 #, c-format msgid "could not read RADIUS response: %m" msgstr "konnte RADIUS-Antwort nicht lesen: %m" -#: libpq/auth.c:2874 libpq/auth.c:2878 +#: libpq/auth.c:3198 libpq/auth.c:3202 #, c-format -msgid "RADIUS response was sent from incorrect port: %d" -msgstr "RADIUS-Antwort wurde von falschem Port gesendet: %d" +msgid "RADIUS response from %s was sent from incorrect port: %d" +msgstr "RADIUS-Antwort von %s wurde von falschem Port gesendet: %d" -#: libpq/auth.c:2887 +#: libpq/auth.c:3211 #, c-format -msgid "RADIUS response too short: %d" -msgstr "RADIUS-Antwort zu kurz: %d" +msgid "RADIUS response from %s too short: %d" +msgstr "RADIUS-Antwort von %s zu kurz: %d" -#: libpq/auth.c:2894 +#: libpq/auth.c:3218 #, c-format -msgid "RADIUS response has corrupt length: %d (actual length %d)" -msgstr "RADIUS-Antwort hat verfälschte Länge: %d (tatsächliche Länge %d)" +msgid "RADIUS response from %s has corrupt length: %d (actual length %d)" +msgstr "RADIUS-Antwort von %s hat verfälschte Länge: %d (tatsächliche Länge %d)" -#: libpq/auth.c:2902 +#: libpq/auth.c:3226 #, c-format -msgid "RADIUS response is to a different request: %d (should be %d)" -msgstr "RADIUS-Antwort unterscheidet sich von Anfrage: %d (sollte %d sein)" +msgid "RADIUS response from %s is to a different request: %d (should be %d)" +msgstr "RADIUS-Antwort von %s unterscheidet sich von Anfrage: %d (sollte %d sein)" -#: libpq/auth.c:2927 +#: libpq/auth.c:3251 #, c-format msgid "could not perform MD5 encryption of received packet" msgstr "konnte MD5-Verschlüsselung des empfangenen Pakets nicht durchführen" -#: libpq/auth.c:2936 +#: libpq/auth.c:3260 #, c-format -msgid "RADIUS response has incorrect MD5 signature" -msgstr "RADIUS-Antwort hat falsche MD5-Signatur" +msgid "RADIUS response from %s has incorrect MD5 signature" +msgstr "RADIUS-Antwort von %s hat falsche MD5-Signatur" -#: libpq/auth.c:2953 +#: libpq/auth.c:3278 #, c-format -msgid "RADIUS response has invalid code (%d) for user \"%s\"" -msgstr "RADIUS-Antwort hat ungültigen Code (%d) für Benutzer »%s«" +msgid "RADIUS response from %s has invalid code (%d) for user \"%s\"" +msgstr "RADIUS-Antwort von %s hat ungültigen Code (%d) für Benutzer »%s«" -#: libpq/be-fsstubs.c:132 libpq/be-fsstubs.c:163 libpq/be-fsstubs.c:197 -#: libpq/be-fsstubs.c:237 libpq/be-fsstubs.c:262 libpq/be-fsstubs.c:310 -#: libpq/be-fsstubs.c:333 libpq/be-fsstubs.c:581 +#: libpq/be-fsstubs.c:119 libpq/be-fsstubs.c:150 libpq/be-fsstubs.c:178 +#: libpq/be-fsstubs.c:204 libpq/be-fsstubs.c:229 libpq/be-fsstubs.c:277 +#: libpq/be-fsstubs.c:300 libpq/be-fsstubs.c:545 #, c-format msgid "invalid large-object descriptor: %d" msgstr "ungültiger Large-Object-Deskriptor: %d" -#: libpq/be-fsstubs.c:178 libpq/be-fsstubs.c:216 libpq/be-fsstubs.c:600 -#: libpq/be-fsstubs.c:788 +#: libpq/be-fsstubs.c:161 #, c-format -msgid "permission denied for large object %u" -msgstr "keine Berechtigung für Large Object %u" +msgid "large object descriptor %d was not opened for reading" +msgstr "Large-Objekt-Deskriptor %d wurde nicht zum Lesen geöffnet" -#: libpq/be-fsstubs.c:203 libpq/be-fsstubs.c:587 +#: libpq/be-fsstubs.c:185 libpq/be-fsstubs.c:552 #, c-format msgid "large object descriptor %d was not opened for writing" msgstr "Large-Objekt-Deskriptor %d wurde nicht zum Schreiben geöffnet" -#: libpq/be-fsstubs.c:245 +#: libpq/be-fsstubs.c:212 #, c-format msgid "lo_lseek result out of range for large-object descriptor %d" msgstr "Ergebnis von lo_lseek ist außerhalb des gültigen Bereichs für Large-Object-Deskriptor %d" -#: libpq/be-fsstubs.c:318 +#: libpq/be-fsstubs.c:285 #, c-format msgid "lo_tell result out of range for large-object descriptor %d" msgstr "Ergebnis von lo_tell ist außerhalb des gültigen Bereichs für Large-Object-Deskriptor: %d" -#: libpq/be-fsstubs.c:455 -#, c-format -msgid "must be superuser to use server-side lo_import()" -msgstr "nur Superuser können das serverseitige lo_import() verwenden" - -#: libpq/be-fsstubs.c:456 -#, c-format -msgid "Anyone can use the client-side lo_import() provided by libpq." -msgstr "Jeder kann das clientseitige lo_import() von libpq verwenden." - -#: libpq/be-fsstubs.c:469 +#: libpq/be-fsstubs.c:432 #, c-format msgid "could not open server file \"%s\": %m" msgstr "konnte Serverdatei »%s« nicht öffnen: %m" -#: libpq/be-fsstubs.c:491 +#: libpq/be-fsstubs.c:454 #, c-format msgid "could not read server file \"%s\": %m" msgstr "konnte Serverdatei »%s« nicht lesen: %m" -#: libpq/be-fsstubs.c:521 -#, c-format -msgid "must be superuser to use server-side lo_export()" -msgstr "nur Superuser können das serverseitige lo_export() verwenden" - -#: libpq/be-fsstubs.c:522 -#, c-format -msgid "Anyone can use the client-side lo_export() provided by libpq." -msgstr "Jeder kann das clientseitige lo_export() von libpq verwenden." - -#: libpq/be-fsstubs.c:547 +#: libpq/be-fsstubs.c:511 #, c-format msgid "could not create server file \"%s\": %m" msgstr "konnte Serverdatei »%s« nicht erstellen: %m" -#: libpq/be-fsstubs.c:559 +#: libpq/be-fsstubs.c:523 #, c-format msgid "could not write server file \"%s\": %m" msgstr "konnte Serverdatei »%s« nicht schreiben: %m" -#: libpq/be-fsstubs.c:813 +#: libpq/be-fsstubs.c:752 #, c-format msgid "large object read request is too large" msgstr "Large-Object-Leseaufforderung ist zu groß" -#: libpq/be-fsstubs.c:855 utils/adt/genfile.c:211 utils/adt/genfile.c:252 +#: libpq/be-fsstubs.c:794 utils/adt/genfile.c:231 utils/adt/genfile.c:270 +#: utils/adt/genfile.c:306 #, c-format msgid "requested length cannot be negative" msgstr "verlangte Länge darf nicht negativ sein" -#: libpq/be-secure-openssl.c:197 +#: libpq/be-fsstubs.c:847 storage/large_object/inv_api.c:296 +#: storage/large_object/inv_api.c:308 storage/large_object/inv_api.c:512 +#: storage/large_object/inv_api.c:623 storage/large_object/inv_api.c:813 #, c-format -msgid "could not create SSL context: %s" -msgstr "konnte SSL-Kontext nicht erzeugen: %s" +msgid "permission denied for large object %u" +msgstr "keine Berechtigung für Large Object %u" + +#: libpq/be-secure-common.c:91 +#, fuzzy, c-format +#| msgid "could not read from file \"%s\": %m" +msgid "could not read from command \"%s\": %m" +msgstr "konnte nicht aus Datei »%s« lesen: %m" -#: libpq/be-secure-openssl.c:225 +#: libpq/be-secure-common.c:109 #, c-format -msgid "could not load server certificate file \"%s\": %s" -msgstr "konnte Serverzertifikatsdatei »%s« nicht laden: %s" +msgid "command \"%s\" failed" +msgstr "Befehl »%s« fehlgeschlagen" -#: libpq/be-secure-openssl.c:234 +#: libpq/be-secure-common.c:139 #, c-format msgid "could not access private key file \"%s\": %m" msgstr "konnte auf private Schlüsseldatei »%s« nicht zugreifen: %m" -#: libpq/be-secure-openssl.c:243 +#: libpq/be-secure-common.c:148 #, c-format msgid "private key file \"%s\" is not a regular file" msgstr "private Schlüsseldatei »%s« ist keine normale Datei" -#: libpq/be-secure-openssl.c:258 +#: libpq/be-secure-common.c:163 #, c-format msgid "private key file \"%s\" must be owned by the database user or root" msgstr "private Schlüsseldatei »%s« muss als Eigentümer den Datenbankbenutzer oder »root« haben" -#: libpq/be-secure-openssl.c:281 +#: libpq/be-secure-common.c:186 #, c-format msgid "private key file \"%s\" has group or world access" msgstr "private Schlüsseldatei »%s« erlaubt Zugriff von Gruppe oder Welt" -#: libpq/be-secure-openssl.c:283 +#: libpq/be-secure-common.c:188 #, c-format msgid "File must have permissions u=rw (0600) or less if owned by the database user, or permissions u=rw,g=r (0640) or less if owned by root." msgstr "Dateirechte müssen u=rw (0600) oder weniger sein, wenn der Eigentümer der Datenbankbenutzer ist, oder u=rw,g=r (0640) oder weniger, wenn der Eigentümer »root« ist." -#: libpq/be-secure-openssl.c:300 -#, fuzzy, c-format -#| msgid "private key file \"%s\" must be owned by the database user or root" +#: libpq/be-secure-openssl.c:104 +#, c-format +msgid "could not create SSL context: %s" +msgstr "konnte SSL-Kontext nicht erzeugen: %s" + +#: libpq/be-secure-openssl.c:147 +#, c-format +msgid "could not load server certificate file \"%s\": %s" +msgstr "konnte Serverzertifikatsdatei »%s« nicht laden: %s" + +#: libpq/be-secure-openssl.c:167 +#, c-format msgid "private key file \"%s\" cannot be reloaded because it requires a passphrase" -msgstr "private Schlüsseldatei »%s« muss als Eigentümer den Datenbankbenutzer oder »root« haben" +msgstr "private Schlüsseldatei »%s« kann nicht neu geladen werden, weil sie eine Passphrase benötigt" -#: libpq/be-secure-openssl.c:305 +#: libpq/be-secure-openssl.c:172 #, c-format msgid "could not load private key file \"%s\": %s" msgstr "konnte private Schlüsseldatei »%s« nicht laden: %s" -#: libpq/be-secure-openssl.c:314 +#: libpq/be-secure-openssl.c:181 #, c-format msgid "check of private key failed: %s" msgstr "Überprüfung des privaten Schlüssels fehlgeschlagen: %s" -#: libpq/be-secure-openssl.c:334 +#: libpq/be-secure-openssl.c:208 #, c-format msgid "could not set the cipher list (no valid ciphers available)" -msgstr "" +msgstr "konnte Cipher-Liste nicht setzen (keine gültigen Ciphers verfügbar)" -#: libpq/be-secure-openssl.c:352 +#: libpq/be-secure-openssl.c:226 #, c-format msgid "could not load root certificate file \"%s\": %s" msgstr "konnte Root-Zertifikat-Datei »%s« nicht laden: %s" -#: libpq/be-secure-openssl.c:379 +#: libpq/be-secure-openssl.c:253 #, c-format msgid "SSL certificate revocation list file \"%s\" ignored" msgstr "SSL-Certificate-Revocation-List-Datei »%s« ignoriert" -#: libpq/be-secure-openssl.c:381 +#: libpq/be-secure-openssl.c:255 #, c-format msgid "SSL library does not support certificate revocation lists." msgstr "SSL-Bibliothek unterstützt keine Certificate-Revocation-Lists." -#: libpq/be-secure-openssl.c:388 +#: libpq/be-secure-openssl.c:262 #, c-format msgid "could not load SSL certificate revocation list file \"%s\": %s" msgstr "konnte SSL-Certificate-Revocation-List-Datei »%s« nicht laden: %s" -#: libpq/be-secure-openssl.c:469 -#, fuzzy, c-format -#| msgid "could not initialize SSL connection: %s" +#: libpq/be-secure-openssl.c:337 +#, c-format msgid "could not initialize SSL connection: SSL context not set up" -msgstr "konnte SSL-Verbindung nicht initialisieren: %s" +msgstr "konnte SSL-Verbindung nicht initialisieren: SSL-Kontext nicht eingerichtet" -#: libpq/be-secure-openssl.c:477 +#: libpq/be-secure-openssl.c:345 #, c-format msgid "could not initialize SSL connection: %s" msgstr "konnte SSL-Verbindung nicht initialisieren: %s" -#: libpq/be-secure-openssl.c:485 +#: libpq/be-secure-openssl.c:353 #, c-format msgid "could not set SSL socket: %s" msgstr "konnte SSL-Socket nicht setzen: %s" -#: libpq/be-secure-openssl.c:540 +#: libpq/be-secure-openssl.c:408 #, c-format msgid "could not accept SSL connection: %m" msgstr "konnte SSL-Verbindung nicht annehmen: %m" -#: libpq/be-secure-openssl.c:544 libpq/be-secure-openssl.c:555 +#: libpq/be-secure-openssl.c:412 libpq/be-secure-openssl.c:423 #, c-format msgid "could not accept SSL connection: EOF detected" msgstr "konnte SSL-Verbindung nicht annehmen: EOF entdeckt" -#: libpq/be-secure-openssl.c:549 +#: libpq/be-secure-openssl.c:417 #, c-format msgid "could not accept SSL connection: %s" msgstr "konnte SSL-Verbindung nicht annehmen: %s" -#: libpq/be-secure-openssl.c:560 libpq/be-secure-openssl.c:699 -#: libpq/be-secure-openssl.c:759 +#: libpq/be-secure-openssl.c:428 libpq/be-secure-openssl.c:559 +#: libpq/be-secure-openssl.c:623 #, c-format msgid "unrecognized SSL error code: %d" msgstr "unbekannter SSL-Fehlercode: %d" -#: libpq/be-secure-openssl.c:602 +#: libpq/be-secure-openssl.c:470 #, c-format msgid "SSL certificate's common name contains embedded null" msgstr "Common-Name im SSL-Zertifikat enthält Null-Byte" -#: libpq/be-secure-openssl.c:613 -#, c-format -msgid "SSL connection from \"%s\"" -msgstr "SSL-Verbindung von »%s«" - -#: libpq/be-secure-openssl.c:690 libpq/be-secure-openssl.c:750 +#: libpq/be-secure-openssl.c:548 libpq/be-secure-openssl.c:607 #, c-format msgid "SSL error: %s" msgstr "SSL-Fehler: %s" -#: libpq/be-secure-openssl.c:1179 +#: libpq/be-secure-openssl.c:788 +#, c-format +msgid "could not open DH parameters file \"%s\": %m" +msgstr "konnte DH-Parameterdatei »%s« nicht öffnen: %m" + +#: libpq/be-secure-openssl.c:800 +#, c-format +msgid "could not load DH parameters file: %s" +msgstr "konnte DH-Parameterdatei nicht laden: %s" + +#: libpq/be-secure-openssl.c:810 +#, c-format +msgid "invalid DH parameters: %s" +msgstr "ungültige DH-Parameter: %s" + +#: libpq/be-secure-openssl.c:818 +#, c-format +msgid "invalid DH parameters: p is not prime" +msgstr "ungültige DH-Parameter: p ist keine Primzahl" + +#: libpq/be-secure-openssl.c:826 +#, c-format +msgid "invalid DH parameters: neither suitable generator or safe prime" +msgstr "ungültige DH-Parameter: weder geeigneter Generator noch sichere Primzahl" + +#: libpq/be-secure-openssl.c:981 +#, c-format +msgid "DH: could not load DH parameters" +msgstr "DH: konnte DH-Parameter nicht laden" + +#: libpq/be-secure-openssl.c:989 +#, c-format +msgid "DH: could not set DH parameters: %s" +msgstr "DH: konnte DH-Parameter nicht setzen: %s" + +#: libpq/be-secure-openssl.c:1013 #, c-format msgid "ECDH: unrecognized curve name: %s" msgstr "ECDH: unbekannter Kurvenname: %s" -#: libpq/be-secure-openssl.c:1188 +#: libpq/be-secure-openssl.c:1022 #, c-format msgid "ECDH: could not create key" msgstr "ECDH: konnte Schlüssel nicht erzeugen" -#: libpq/be-secure-openssl.c:1216 +#: libpq/be-secure-openssl.c:1050 msgid "no SSL error reported" msgstr "kein SSL-Fehler berichtet" -#: libpq/be-secure-openssl.c:1220 +#: libpq/be-secure-openssl.c:1054 #, c-format msgid "SSL error code %lu" msgstr "SSL-Fehlercode %lu" -#: libpq/be-secure.c:188 libpq/be-secure.c:274 +#: libpq/be-secure-openssl.c:1182 +#, fuzzy, c-format +#| msgid "local connections are not supported by this build" +msgid "channel binding type \"tls-server-end-point\" is not supported by this build" +msgstr "lokale Verbindungen werden von dieser Installation nicht unterstützt" + +#: libpq/be-secure.c:119 +#, c-format +msgid "SSL connection from \"%s\"" +msgstr "SSL-Verbindung von »%s«" + +#: libpq/be-secure.c:193 libpq/be-secure.c:279 #, c-format msgid "terminating connection due to unexpected postmaster exit" msgstr "Verbindung wird abgebrochen wegen unerwartetem Ende des Postmasters" -#: libpq/crypt.c:58 +#: libpq/crypt.c:51 #, c-format msgid "Role \"%s\" does not exist." msgstr "Rolle »%s« existiert nicht." -#: libpq/crypt.c:68 +#: libpq/crypt.c:61 #, c-format msgid "User \"%s\" has no password assigned." msgstr "Benutzer »%s« hat kein Passwort zugewiesen." -#: libpq/crypt.c:83 -#, c-format -msgid "User \"%s\" has an empty password." -msgstr "Benutzer »%s« hat ein leeres Passwort." - -#: libpq/crypt.c:97 +#: libpq/crypt.c:79 #, c-format msgid "User \"%s\" has an expired password." msgstr "Benutzer »%s« hat ein abgelaufenes Passwort." -#: libpq/crypt.c:254 +#: libpq/crypt.c:173 #, c-format msgid "User \"%s\" has a password that cannot be used with MD5 authentication." -msgstr "" +msgstr "Benutzer »%s« hat ein Passwort, das nicht mit MD5-Authentifizierung verwendet werden kann." -#: libpq/crypt.c:263 libpq/crypt.c:330 +#: libpq/crypt.c:197 libpq/crypt.c:238 libpq/crypt.c:262 #, c-format msgid "Password does not match for user \"%s\"." msgstr "Passwort stimmt nicht überein für Benutzer »%s«." -#: libpq/crypt.c:321 +#: libpq/crypt.c:281 #, c-format msgid "Password of user \"%s\" is in unrecognized format." -msgstr "" +msgstr "Passwort von Benutzer »%s« hat unbekanntes Format." -#: libpq/hba.c:232 +#: libpq/hba.c:235 #, c-format msgid "authentication file token too long, skipping: \"%s\"" msgstr "Token in Authentifizierungsdatei zu lang, wird übersprungen: »%s«" -#: libpq/hba.c:404 +#: libpq/hba.c:407 #, c-format msgid "could not open secondary authentication file \"@%s\" as \"%s\": %m" msgstr "konnte sekundäre Authentifizierungsdatei »@%s« nicht als »%s« öffnen: %m" -#: libpq/hba.c:506 +#: libpq/hba.c:509 #, c-format msgid "authentication file line too long" msgstr "Zeile in Authentifizierungsdatei zu lang" -#: libpq/hba.c:507 libpq/hba.c:861 libpq/hba.c:881 libpq/hba.c:919 -#: libpq/hba.c:969 libpq/hba.c:983 libpq/hba.c:1005 libpq/hba.c:1014 -#: libpq/hba.c:1035 libpq/hba.c:1048 libpq/hba.c:1068 libpq/hba.c:1090 -#: libpq/hba.c:1102 libpq/hba.c:1158 libpq/hba.c:1178 libpq/hba.c:1192 -#: libpq/hba.c:1211 libpq/hba.c:1222 libpq/hba.c:1237 libpq/hba.c:1255 -#: libpq/hba.c:1271 libpq/hba.c:1283 libpq/hba.c:1320 libpq/hba.c:1361 -#: libpq/hba.c:1374 libpq/hba.c:1396 libpq/hba.c:1408 libpq/hba.c:1426 -#: libpq/hba.c:1476 libpq/hba.c:1515 libpq/hba.c:1526 libpq/hba.c:1583 -#: libpq/hba.c:1599 libpq/hba.c:1698 libpq/hba.c:1788 libpq/hba.c:1808 -#: libpq/hba.c:1830 tsearch/ts_locale.c:182 +#: libpq/hba.c:510 libpq/hba.c:867 libpq/hba.c:887 libpq/hba.c:925 +#: libpq/hba.c:975 libpq/hba.c:989 libpq/hba.c:1011 libpq/hba.c:1020 +#: libpq/hba.c:1041 libpq/hba.c:1054 libpq/hba.c:1074 libpq/hba.c:1096 +#: libpq/hba.c:1108 libpq/hba.c:1164 libpq/hba.c:1184 libpq/hba.c:1198 +#: libpq/hba.c:1217 libpq/hba.c:1228 libpq/hba.c:1243 libpq/hba.c:1261 +#: libpq/hba.c:1277 libpq/hba.c:1289 libpq/hba.c:1326 libpq/hba.c:1367 +#: libpq/hba.c:1380 libpq/hba.c:1402 libpq/hba.c:1414 libpq/hba.c:1432 +#: libpq/hba.c:1482 libpq/hba.c:1523 libpq/hba.c:1534 libpq/hba.c:1550 +#: libpq/hba.c:1567 libpq/hba.c:1577 libpq/hba.c:1635 libpq/hba.c:1673 +#: libpq/hba.c:1689 libpq/hba.c:1779 libpq/hba.c:1797 libpq/hba.c:1891 +#: libpq/hba.c:1910 libpq/hba.c:1939 libpq/hba.c:1952 libpq/hba.c:1975 +#: libpq/hba.c:1997 libpq/hba.c:2011 tsearch/ts_locale.c:179 #, c-format msgid "line %d of configuration file \"%s\"" msgstr "Zeile %d in Konfigurationsdatei »%s«" #. translator: the second %s is a list of auth methods -#: libpq/hba.c:859 +#: libpq/hba.c:865 #, c-format msgid "authentication option \"%s\" is only valid for authentication methods %s" msgstr "Authentifizierungsoption »%s« ist nur gültig für Authentifizierungsmethoden %s" -#: libpq/hba.c:879 +#: libpq/hba.c:885 #, c-format msgid "authentication method \"%s\" requires argument \"%s\" to be set" msgstr "Authentifizierungsmethode »%s« benötigt Argument »%s«" -#: libpq/hba.c:907 +#: libpq/hba.c:913 #, c-format msgid "missing entry in file \"%s\" at end of line %d" msgstr "fehlender Eintrag in Datei »%s« am Ende von Zeile %d" -#: libpq/hba.c:918 +#: libpq/hba.c:924 #, c-format msgid "multiple values in ident field" msgstr "mehrere Werte in Ident-Feld" -#: libpq/hba.c:967 +#: libpq/hba.c:973 #, c-format msgid "multiple values specified for connection type" msgstr "mehrere Werte angegeben für Verbindungstyp" -#: libpq/hba.c:968 +#: libpq/hba.c:974 #, c-format msgid "Specify exactly one connection type per line." msgstr "Geben Sie genau einen Verbindungstyp pro Zeile an." -#: libpq/hba.c:982 +#: libpq/hba.c:988 #, c-format msgid "local connections are not supported by this build" msgstr "lokale Verbindungen werden von dieser Installation nicht unterstützt" -#: libpq/hba.c:1003 +#: libpq/hba.c:1009 #, c-format msgid "hostssl record cannot match because SSL is disabled" -msgstr "" +msgstr "hostssl-Eintrag kann nicht angewendet werden, weil SSL deaktiviert ist" -#: libpq/hba.c:1004 +#: libpq/hba.c:1010 #, c-format msgid "Set ssl = on in postgresql.conf." msgstr "Setzen Sie ssl = on in postgresql.conf." -#: libpq/hba.c:1012 -#, fuzzy, c-format -#| msgid "hostssl is not supported by this build" +#: libpq/hba.c:1018 +#, c-format msgid "hostssl record cannot match because SSL is not supported by this build" -msgstr "hostssl wird von dieser Installation nicht unterstützt" +msgstr "hostssl-Eintrag kann nicht angewendet werden, weil SSL von dieser Installation nicht unterstützt wird" -#: libpq/hba.c:1013 +#: libpq/hba.c:1019 #, c-format msgid "Compile with --with-openssl to use SSL connections." msgstr "Kompilieren Sie mit --with-openssl, um SSL-Verbindungen zu verwenden." -#: libpq/hba.c:1033 +#: libpq/hba.c:1039 #, c-format msgid "invalid connection type \"%s\"" msgstr "ungültiger Verbindungstyp »%s«" -#: libpq/hba.c:1047 +#: libpq/hba.c:1053 #, c-format msgid "end-of-line before database specification" msgstr "Zeilenende vor Datenbankangabe" -#: libpq/hba.c:1067 +#: libpq/hba.c:1073 #, c-format msgid "end-of-line before role specification" msgstr "Zeilenende vor Rollenangabe" -#: libpq/hba.c:1089 +#: libpq/hba.c:1095 #, c-format msgid "end-of-line before IP address specification" msgstr "Zeilenende vor IP-Adressangabe" -#: libpq/hba.c:1100 +#: libpq/hba.c:1106 #, c-format msgid "multiple values specified for host address" msgstr "mehrere Werte für Hostadresse angegeben" -#: libpq/hba.c:1101 +#: libpq/hba.c:1107 #, c-format msgid "Specify one address range per line." msgstr "Geben Sie einen Adressbereich pro Zeile an." -#: libpq/hba.c:1156 +#: libpq/hba.c:1162 #, c-format msgid "invalid IP address \"%s\": %s" msgstr "ungültige IP-Adresse »%s«: %s" -#: libpq/hba.c:1176 +#: libpq/hba.c:1182 #, c-format msgid "specifying both host name and CIDR mask is invalid: \"%s\"" msgstr "Angabe von sowohl Hostname als auch CIDR-Maske ist ungültig: »%s«" -#: libpq/hba.c:1190 +#: libpq/hba.c:1196 #, c-format msgid "invalid CIDR mask in address \"%s\"" msgstr "ungültige CIDR-Maske in Adresse »%s«" -#: libpq/hba.c:1209 +#: libpq/hba.c:1215 #, c-format msgid "end-of-line before netmask specification" msgstr "Zeilenende vor Netzmaskenangabe" -#: libpq/hba.c:1210 +#: libpq/hba.c:1216 #, c-format msgid "Specify an address range in CIDR notation, or provide a separate netmask." msgstr "Geben Sie einen Adressbereich in CIDR-Schreibweise oder eine separate Netzmaske an." -#: libpq/hba.c:1221 +#: libpq/hba.c:1227 #, c-format msgid "multiple values specified for netmask" msgstr "mehrere Werte für Netzmaske angegeben" -#: libpq/hba.c:1235 +#: libpq/hba.c:1241 #, c-format msgid "invalid IP mask \"%s\": %s" msgstr "ungültige IP-Maske »%s«: %s" -#: libpq/hba.c:1254 +#: libpq/hba.c:1260 #, c-format msgid "IP address and mask do not match" msgstr "IP-Adresse und -Maske passen nicht zusammen" -#: libpq/hba.c:1270 +#: libpq/hba.c:1276 #, c-format msgid "end-of-line before authentication method" msgstr "Zeilenende vor Authentifizierungsmethode" -#: libpq/hba.c:1281 +#: libpq/hba.c:1287 #, c-format msgid "multiple values specified for authentication type" msgstr "mehrere Werte für Authentifizierungstyp angegeben" -#: libpq/hba.c:1282 +#: libpq/hba.c:1288 #, c-format msgid "Specify exactly one authentication type per line." msgstr "Geben Sie genau einen Authentifizierungstyp pro Zeile an." -#: libpq/hba.c:1359 +#: libpq/hba.c:1365 #, c-format msgid "invalid authentication method \"%s\"" msgstr "ungültige Authentifizierungsmethode »%s«" -#: libpq/hba.c:1372 +#: libpq/hba.c:1378 #, c-format msgid "invalid authentication method \"%s\": not supported by this build" msgstr "ungültige Authentifizierungsmethode »%s«: von dieser Installation nicht unterstützt" -#: libpq/hba.c:1395 +#: libpq/hba.c:1401 #, c-format msgid "gssapi authentication is not supported on local sockets" msgstr "gssapi-Authentifizierung wird auf lokalen Sockets nicht unterstützt" -#: libpq/hba.c:1407 +#: libpq/hba.c:1413 #, c-format msgid "peer authentication is only supported on local sockets" msgstr "peer-Authentifizierung wird nur auf lokalen Sockets unterstützt" -#: libpq/hba.c:1425 +#: libpq/hba.c:1431 #, c-format msgid "cert authentication is only supported on hostssl connections" msgstr "cert-Authentifizierung wird nur auf »hostssl«-Verbindungen unterstützt" -#: libpq/hba.c:1475 +#: libpq/hba.c:1481 #, c-format msgid "authentication option not in name=value format: %s" msgstr "Authentifizierungsoption nicht im Format name=wert: %s" -#: libpq/hba.c:1514 +#: libpq/hba.c:1522 #, c-format -msgid "cannot use ldapbasedn, ldapbinddn, ldapbindpasswd, ldapsearchattribute, or ldapurl together with ldapprefix" -msgstr "ldapbasedn, ldapbinddn, ldapbindpasswd, ldapsearchattribute oder ldapurl kann nicht zusammen mit ldapprefix verwendet werden" +msgid "cannot use ldapbasedn, ldapbinddn, ldapbindpasswd, ldapsearchattribute, ldapsearchfilter or ldapurl together with ldapprefix" +msgstr "ldapbasedn, ldapbinddn, ldapbindpasswd, ldapsearchattribute, ldapsearchfilter oder ldapurl kann nicht zusammen mit ldapprefix verwendet werden" -#: libpq/hba.c:1525 +#: libpq/hba.c:1533 #, c-format msgid "authentication method \"ldap\" requires argument \"ldapbasedn\", \"ldapprefix\", or \"ldapsuffix\" to be set" msgstr "Authentifizierungsmethode »ldap« benötigt Argument »ldapbasedn«, »ldapprefix« oder »ldapsuffix«" -#: libpq/hba.c:1573 +#: libpq/hba.c:1549 +#, fuzzy, c-format +#| msgid "cannot use ldapbasedn, ldapbinddn, ldapbindpasswd, ldapsearchattribute, or ldapurl together with ldapprefix" +msgid "cannot use ldapsearchattribute together with ldapsearchfilter" +msgstr "ldapbasedn, ldapbinddn, ldapbindpasswd, ldapsearchattribute oder ldapurl kann nicht zusammen mit ldapprefix verwendet werden" + +#: libpq/hba.c:1566 +#, c-format +msgid "list of RADIUS servers cannot be empty" +msgstr "List der RADIUS-Server darf nicht leer sein" + +#: libpq/hba.c:1576 +#, c-format +msgid "list of RADIUS secrets cannot be empty" +msgstr "Liste der RADIUS-Geheimnisse darf nicht leer sein" + +#: libpq/hba.c:1629 +#, c-format +msgid "the number of %s (%d) must be 1 or the same as the number of %s (%d)" +msgstr "die Anzahl %s (%d) muss 1 oder gleich der Anzahl %s (%d) sein" + +#: libpq/hba.c:1663 msgid "ident, peer, gssapi, sspi, and cert" msgstr "ident, peer, gssapi, sspi und cert" -#: libpq/hba.c:1582 +#: libpq/hba.c:1672 #, c-format msgid "clientcert can only be configured for \"hostssl\" rows" msgstr "clientcert kann nur für »hostssl«-Zeilen konfiguriert werden" -#: libpq/hba.c:1598 +#: libpq/hba.c:1688 #, c-format msgid "clientcert can not be set to 0 when using \"cert\" authentication" msgstr "clientcert kann nicht auf 0 gesetzt sein, wenn »cert«-Authentifizierung verwendet wird" -#: libpq/hba.c:1635 +#: libpq/hba.c:1725 #, c-format msgid "could not parse LDAP URL \"%s\": %s" msgstr "konnte LDAP-URL »%s« nicht interpretieren: %s" -#: libpq/hba.c:1645 +#: libpq/hba.c:1736 #, c-format msgid "unsupported LDAP URL scheme: %s" msgstr "nicht unterstütztes LDAP-URL-Schema: %s" -#: libpq/hba.c:1663 -#, c-format -msgid "filters not supported in LDAP URLs" -msgstr "Filter in LDAP-URLs werden nicht unterstützt" - -#: libpq/hba.c:1672 +#: libpq/hba.c:1760 #, c-format msgid "LDAP URLs not supported on this platform" msgstr "LDAP-URLs werden auf dieser Plattform nicht unterstützt" -#: libpq/hba.c:1697 +#: libpq/hba.c:1778 +#, c-format +msgid "invalid ldapscheme value: \"%s\"" +msgstr "ungültiger ldapscheme-Wert: »%s«" + +#: libpq/hba.c:1796 #, c-format msgid "invalid LDAP port number: \"%s\"" msgstr "ungültige LDAP-Portnummer: »%s«" -#: libpq/hba.c:1738 libpq/hba.c:1745 +#: libpq/hba.c:1842 libpq/hba.c:1849 msgid "gssapi and sspi" msgstr "gssapi und sspi" -#: libpq/hba.c:1754 libpq/hba.c:1763 +#: libpq/hba.c:1858 libpq/hba.c:1867 msgid "sspi" msgstr "sspi" -#: libpq/hba.c:1807 +#: libpq/hba.c:1889 +#, c-format +msgid "could not parse RADIUS server list \"%s\"" +msgstr "konnte RADIUS-Serverliste »%s« nicht parsen" + +#: libpq/hba.c:1937 +#, c-format +msgid "could not parse RADIUS port list \"%s\"" +msgstr "konnte RADIUS-Portliste »%s« nicht parsen" + +#: libpq/hba.c:1951 #, c-format msgid "invalid RADIUS port number: \"%s\"" msgstr "ungültige RADIUS-Portnummer: »%s«" -#: libpq/hba.c:1828 +#: libpq/hba.c:1973 +#, c-format +msgid "could not parse RADIUS secret list \"%s\"" +msgstr "konnte RADIUS-Geheimnisliste »%s« nicht parsen" + +#: libpq/hba.c:1995 +#, c-format +msgid "could not parse RADIUS identifiers list \"%s\"" +msgstr "konnte RADIUS-Bezeichnerliste »%s« nicht parsen" + +#: libpq/hba.c:2009 #, c-format msgid "unrecognized authentication option name: \"%s\"" msgstr "unbekannter Authentifizierungsoptionsname: »%s«" -#: libpq/hba.c:2012 +#: libpq/hba.c:2193 #, c-format msgid "configuration file \"%s\" contains no entries" msgstr "Konfigurationsdatei »%s« enthält keine Einträge" -#: libpq/hba.c:2517 +#: libpq/hba.c:2703 #, c-format msgid "invalid regular expression \"%s\": %s" msgstr "ungültiger regulärer Ausdruck »%s«: %s" -#: libpq/hba.c:2577 +#: libpq/hba.c:2763 #, c-format msgid "regular expression match for \"%s\" failed: %s" msgstr "Suche nach regulärem Ausdruck für »%s« fehlgeschlagen: %s" -#: libpq/hba.c:2596 +#: libpq/hba.c:2782 #, c-format msgid "regular expression \"%s\" has no subexpressions as requested by backreference in \"%s\"" msgstr "regulärer Ausdruck »%s« hat keine Teilausdrücke wie von der Backreference in »%s« verlangt" -#: libpq/hba.c:2693 +#: libpq/hba.c:2879 #, c-format msgid "provided user name (%s) and authenticated user name (%s) do not match" msgstr "angegebener Benutzername (%s) und authentifizierter Benutzername (%s) stimmen nicht überein" -#: libpq/hba.c:2713 +#: libpq/hba.c:2899 #, c-format msgid "no match in usermap \"%s\" for user \"%s\" authenticated as \"%s\"" msgstr "kein passender Eintrag in Usermap »%s« für Benutzer »%s«, authentifiziert als »%s«" -#: libpq/hba.c:2746 +#: libpq/hba.c:2932 #, c-format msgid "could not open usermap file \"%s\": %m" msgstr "konnte Usermap-Datei »%s« nicht öffnen: %m" -#: libpq/pqcomm.c:201 +#: libpq/pqcomm.c:220 #, c-format msgid "could not set socket to nonblocking mode: %m" msgstr "konnte Socket nicht auf nicht-blockierenden Modus umstellen: %m" -#: libpq/pqcomm.c:355 +#: libpq/pqcomm.c:374 #, c-format msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)" msgstr "Unix-Domain-Socket-Pfad »%s« ist zu lang (maximal %d Bytes)" -#: libpq/pqcomm.c:376 +#: libpq/pqcomm.c:395 #, c-format msgid "could not translate host name \"%s\", service \"%s\" to address: %s" msgstr "konnte Hostname »%s«, Dienst »%s« nicht in Adresse übersetzen: %s" -#: libpq/pqcomm.c:380 +#: libpq/pqcomm.c:399 #, c-format msgid "could not translate service \"%s\" to address: %s" msgstr "konnte Dienst »%s« nicht in Adresse übersetzen: %s" -#: libpq/pqcomm.c:407 +#: libpq/pqcomm.c:426 #, c-format msgid "could not bind to all requested addresses: MAXLISTEN (%d) exceeded" msgstr "konnte nicht an alle verlangten Adressen binden: MAXLISTEN (%d) überschritten" -#: libpq/pqcomm.c:416 +#: libpq/pqcomm.c:435 msgid "IPv4" msgstr "IPv4" -#: libpq/pqcomm.c:420 +#: libpq/pqcomm.c:439 msgid "IPv6" msgstr "IPv6" -#: libpq/pqcomm.c:425 +#: libpq/pqcomm.c:444 msgid "Unix" msgstr "Unix" -#: libpq/pqcomm.c:430 +#: libpq/pqcomm.c:449 #, c-format msgid "unrecognized address family %d" msgstr "unbekannte Adressfamilie %d" #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:456 -#, fuzzy, c-format -#| msgid "could not create listen socket for \"%s\"" +#: libpq/pqcomm.c:475 +#, c-format msgid "could not create %s socket for address \"%s\": %m" -msgstr "konnte Listen-Socket für »%s« nicht erzeugen" +msgstr "konnte %s-Socket für Adresse »%s« nicht erzeugen: %m" #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:482 -#, fuzzy, c-format -#| msgid "setsockopt(SO_REUSEADDR) failed: %m" +#: libpq/pqcomm.c:501 +#, c-format msgid "setsockopt(SO_REUSEADDR) failed for %s address \"%s\": %m" -msgstr "setsockopt(SO_REUSEADDR) fehlgeschlagen: %m" +msgstr "setsockopt(SO_REUSEADDR) für %s-Adresse »%s« fehlgeschlagen: %m" #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:499 -#, fuzzy, c-format -#| msgid "setsockopt(IPV6_V6ONLY) failed: %m" +#: libpq/pqcomm.c:518 +#, c-format msgid "setsockopt(IPV6_V6ONLY) failed for %s address \"%s\": %m" -msgstr "setsockopt(IPV6_V6ONLY) fehlgeschlagen: %m" +msgstr "setsockopt(IPV6_V6ONLY) für %s-Adresse »%s« fehlgeschlagen: %m" #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:519 -#, fuzzy, c-format -#| msgid "could not bind to local address \"%s\": %m" +#: libpq/pqcomm.c:538 +#, c-format msgid "could not bind %s address \"%s\": %m" -msgstr "konnte nicht mit lokaler Adresse »%s« verbinden: %m" +msgstr "konnte %s-Adresse »%s« nicht binden: %m" -#: libpq/pqcomm.c:522 +#: libpq/pqcomm.c:541 #, c-format msgid "Is another postmaster already running on port %d? If not, remove socket file \"%s\" and retry." msgstr "Läuft bereits ein anderer Postmaster auf Port %d? Wenn nicht, entfernen Sie die Socketdatei »%s« und versuchen Sie erneut." -#: libpq/pqcomm.c:525 +#: libpq/pqcomm.c:544 #, c-format msgid "Is another postmaster already running on port %d? If not, wait a few seconds and retry." msgstr "Läuft bereits ein anderer Postmaster auf Port %d? Wenn nicht, warten Sie einige Sekunden und versuchen Sie erneut." #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:558 -#, fuzzy, c-format -#| msgid "could not bind to local address \"%s\": %m" +#: libpq/pqcomm.c:577 +#, c-format msgid "could not listen on %s address \"%s\": %m" -msgstr "konnte nicht mit lokaler Adresse »%s« verbinden: %m" +msgstr "konnte nicht auf %s-Adresse »%s« hören: %m" -#. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:566 -#, fuzzy, c-format -#| msgid "invalid CIDR mask in address \"%s\"" -msgid "listening on %s address \"%s\"" -msgstr "ungültige CIDR-Maske in Adresse »%s«" +#: libpq/pqcomm.c:586 +#, c-format +msgid "listening on Unix socket \"%s\"" +msgstr "erwarte Verbindungen auf Unix-Socket »%s«" + +#. translator: first %s is IPv4 or IPv6 +#: libpq/pqcomm.c:592 +#, c-format +msgid "listening on %s address \"%s\", port %d" +msgstr "erwarte Verbindungen auf %s-Adresse »%s«, Port %d" -#: libpq/pqcomm.c:649 +#: libpq/pqcomm.c:675 #, c-format msgid "group \"%s\" does not exist" msgstr "Gruppe »%s« existiert nicht" -#: libpq/pqcomm.c:659 +#: libpq/pqcomm.c:685 #, c-format msgid "could not set group of file \"%s\": %m" msgstr "konnte Gruppe von Datei »%s« nicht setzen: %m" -#: libpq/pqcomm.c:670 +#: libpq/pqcomm.c:696 #, c-format msgid "could not set permissions of file \"%s\": %m" msgstr "konnte Zugriffsrechte von Datei »%s« nicht setzen: %m" -#: libpq/pqcomm.c:700 +#: libpq/pqcomm.c:726 #, c-format msgid "could not accept new connection: %m" msgstr "konnte neue Verbindung nicht akzeptieren: %m" -#: libpq/pqcomm.c:901 +#: libpq/pqcomm.c:927 #, c-format msgid "there is no client connection" msgstr "es besteht keine Client-Verbindung" -#: libpq/pqcomm.c:952 libpq/pqcomm.c:1048 +#: libpq/pqcomm.c:978 libpq/pqcomm.c:1074 #, c-format msgid "could not receive data from client: %m" msgstr "konnte Daten vom Client nicht empfangen: %m" -#: libpq/pqcomm.c:1193 tcop/postgres.c:3907 +#: libpq/pqcomm.c:1219 tcop/postgres.c:3991 #, c-format msgid "terminating connection because protocol synchronization was lost" msgstr "Verbindung wird abgebrochen, weil Protokollsynchronisierung verloren wurde" -#: libpq/pqcomm.c:1259 +#: libpq/pqcomm.c:1285 #, c-format msgid "unexpected EOF within message length word" msgstr "unerwartetes EOF im Message-Längenwort" -#: libpq/pqcomm.c:1270 +#: libpq/pqcomm.c:1296 #, c-format msgid "invalid message length" msgstr "ungültige Message-Länge" -#: libpq/pqcomm.c:1292 libpq/pqcomm.c:1305 +#: libpq/pqcomm.c:1318 libpq/pqcomm.c:1331 #, c-format msgid "incomplete message from client" msgstr "unvollständige Message vom Client" -#: libpq/pqcomm.c:1438 +#: libpq/pqcomm.c:1464 #, c-format msgid "could not send data to client: %m" msgstr "konnte Daten nicht an den Client senden: %m" -#: libpq/pqformat.c:437 +#: libpq/pqformat.c:406 #, c-format msgid "no data left in message" msgstr "keine Daten in Message übrig" -#: libpq/pqformat.c:557 libpq/pqformat.c:575 libpq/pqformat.c:596 -#: utils/adt/arrayfuncs.c:1457 utils/adt/rowtypes.c:563 +#: libpq/pqformat.c:517 libpq/pqformat.c:535 libpq/pqformat.c:556 +#: utils/adt/arrayfuncs.c:1458 utils/adt/rowtypes.c:566 #, c-format msgid "insufficient data left in message" msgstr "nicht genug Daten in Message übrig" -#: libpq/pqformat.c:637 libpq/pqformat.c:666 +#: libpq/pqformat.c:597 libpq/pqformat.c:626 #, c-format msgid "invalid string in message" msgstr "ungültige Zeichenkette in Message" -#: libpq/pqformat.c:682 +#: libpq/pqformat.c:642 #, c-format msgid "invalid message format" msgstr "ungültiges Message-Format" @@ -13129,317 +14032,316 @@ msgstr "erweiterbarer Knotentyp »%s« existiert bereits" msgid "ExtensibleNodeMethods \"%s\" was not registered" msgstr "ExtensibleNodeMethods »%s« wurde nicht registriert" -#: nodes/nodeFuncs.c:123 nodes/nodeFuncs.c:154 parser/parse_coerce.c:1844 -#: parser/parse_coerce.c:1872 parser/parse_coerce.c:1948 -#: parser/parse_expr.c:2089 parser/parse_func.c:598 parser/parse_oper.c:958 +#: nodes/nodeFuncs.c:123 nodes/nodeFuncs.c:154 parser/parse_coerce.c:1910 +#: parser/parse_coerce.c:1938 parser/parse_coerce.c:2014 +#: parser/parse_expr.c:2119 parser/parse_func.c:631 parser/parse_oper.c:967 #, c-format msgid "could not find array type for data type %s" msgstr "konnte Arraytyp für Datentyp %s nicht finden" -#: optimizer/path/joinrels.c:802 +#: optimizer/path/joinrels.c:837 #, c-format msgid "FULL JOIN is only supported with merge-joinable or hash-joinable join conditions" msgstr "FULL JOIN wird nur für Merge- oder Hash-Verbund-fähige Verbundbedingungen unterstützt" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: optimizer/plan/initsplan.c:1200 +#: optimizer/plan/initsplan.c:1221 #, c-format msgid "%s cannot be applied to the nullable side of an outer join" msgstr "%s kann nicht auf die nullbare Seite eines äußeren Verbundes angewendet werden" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: optimizer/plan/planner.c:1480 parser/analyze.c:1619 parser/analyze.c:1816 -#: parser/analyze.c:2610 +#: optimizer/plan/planner.c:1757 parser/analyze.c:1625 parser/analyze.c:1822 +#: parser/analyze.c:2653 #, c-format msgid "%s is not allowed with UNION/INTERSECT/EXCEPT" msgstr "%s ist nicht in UNION/INTERSECT/EXCEPT erlaubt" -#: optimizer/plan/planner.c:3854 +#: optimizer/plan/planner.c:2329 optimizer/plan/planner.c:4050 #, c-format msgid "could not implement GROUP BY" msgstr "konnte GROUP BY nicht implementieren" -#: optimizer/plan/planner.c:3855 optimizer/plan/planner.c:4257 -#: optimizer/prep/prepunion.c:928 +#: optimizer/plan/planner.c:2330 optimizer/plan/planner.c:4051 +#: optimizer/plan/planner.c:4795 optimizer/prep/prepunion.c:1074 #, c-format msgid "Some of the datatypes only support hashing, while others only support sorting." msgstr "Einige Datentypen unterstützen nur Hashing, während andere nur Sortieren unterstützen." -#: optimizer/plan/planner.c:4256 +#: optimizer/plan/planner.c:4794 #, c-format msgid "could not implement DISTINCT" msgstr "konnte DISTINCT nicht implementieren" -#: optimizer/plan/planner.c:4936 +#: optimizer/plan/planner.c:5479 #, c-format msgid "could not implement window PARTITION BY" msgstr "konnte PARTITION BY für Fenster nicht implementieren" -#: optimizer/plan/planner.c:4937 +#: optimizer/plan/planner.c:5480 #, c-format msgid "Window partitioning columns must be of sortable datatypes." msgstr "Fensterpartitionierungsspalten müssen sortierbare Datentypen haben." -#: optimizer/plan/planner.c:4941 +#: optimizer/plan/planner.c:5484 #, c-format msgid "could not implement window ORDER BY" msgstr "konnte ORDER BY für Fenster nicht implementieren" -#: optimizer/plan/planner.c:4942 +#: optimizer/plan/planner.c:5485 #, c-format msgid "Window ordering columns must be of sortable datatypes." msgstr "Fenstersortierspalten müssen sortierbare Datentypen haben." -#: optimizer/plan/setrefs.c:413 +#: optimizer/plan/setrefs.c:418 #, c-format msgid "too many range table entries" msgstr "zu viele Range-Table-Einträge" -#: optimizer/prep/prepunion.c:483 +#: optimizer/prep/prepunion.c:538 #, c-format msgid "could not implement recursive UNION" msgstr "konnte rekursive UNION nicht implementieren" -#: optimizer/prep/prepunion.c:484 +#: optimizer/prep/prepunion.c:539 #, c-format msgid "All column datatypes must be hashable." msgstr "Alle Spaltendatentypen müssen hashbar sein." #. translator: %s is UNION, INTERSECT, or EXCEPT -#: optimizer/prep/prepunion.c:927 +#: optimizer/prep/prepunion.c:1073 #, c-format msgid "could not implement %s" msgstr "konnte %s nicht implementieren" -#: optimizer/util/clauses.c:4634 +#: optimizer/util/clauses.c:4834 #, c-format msgid "SQL function \"%s\" during inlining" msgstr "SQL-Funktion »%s« beim Inlining" -#: optimizer/util/plancat.c:115 +#: optimizer/util/plancat.c:127 #, c-format msgid "cannot access temporary or unlogged relations during recovery" msgstr "während der Wiederherstellung kann nicht auf temporäre oder ungeloggte Tabellen zugegriffen werden" -#: optimizer/util/plancat.c:613 +#: optimizer/util/plancat.c:651 #, c-format msgid "whole row unique index inference specifications are not supported" msgstr "Inferenzangaben mit Unique-Index über die gesamte Zeile werden nicht unterstützt" -#: optimizer/util/plancat.c:630 +#: optimizer/util/plancat.c:668 #, c-format msgid "constraint in ON CONFLICT clause has no associated index" msgstr "Constraint in der ON-CONFLICT-Klausel hat keinen zugehörigen Index" -#: optimizer/util/plancat.c:681 +#: optimizer/util/plancat.c:719 #, c-format msgid "ON CONFLICT DO UPDATE not supported with exclusion constraints" msgstr "ON CONFLICT DO UDPATE nicht unterstützt mit Exclusion-Constraints" -#: optimizer/util/plancat.c:786 +#: optimizer/util/plancat.c:824 #, c-format msgid "there is no unique or exclusion constraint matching the ON CONFLICT specification" msgstr "es gibt keinen Unique-Constraint oder Exclusion-Constraint, der auf die ON-CONFLICT-Angabe passt" -#: parser/analyze.c:695 parser/analyze.c:1382 +#: parser/analyze.c:709 parser/analyze.c:1388 #, c-format msgid "VALUES lists must all be the same length" msgstr "VALUES-Listen müssen alle die gleiche Länge haben" -#: parser/analyze.c:850 -#, fuzzy, c-format -#| msgid "ON CONFLICT is not supported with system catalog tables" -msgid "ON CONFLICT clause is not supported with partitioned tables" -msgstr "ON CONFLICT wird nicht mit Systemkatalogtabellen unterstützt" - -#: parser/analyze.c:913 +#: parser/analyze.c:919 #, c-format msgid "INSERT has more expressions than target columns" msgstr "INSERT hat mehr Ausdrücke als Zielspalten" -#: parser/analyze.c:931 +#: parser/analyze.c:937 #, c-format msgid "INSERT has more target columns than expressions" msgstr "INSERT hat mehr Zielspalten als Ausdrücke" -#: parser/analyze.c:935 +#: parser/analyze.c:941 #, c-format msgid "The insertion source is a row expression containing the same number of columns expected by the INSERT. Did you accidentally use extra parentheses?" msgstr "Der einzufügende Wert ist ein Zeilenausdruck mit der gleichen Anzahl Spalten wie von INSERT erwartet. Haben Sie versehentlich zu viele Klammern gesetzt?" -#: parser/analyze.c:1195 parser/analyze.c:1592 +#: parser/analyze.c:1201 parser/analyze.c:1598 #, c-format msgid "SELECT ... INTO is not allowed here" msgstr "SELECT ... INTO ist hier nicht erlaubt" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:1524 parser/analyze.c:2789 +#: parser/analyze.c:1530 parser/analyze.c:2832 #, c-format msgid "%s cannot be applied to VALUES" msgstr "%s kann nicht auf VALUES angewendet werden" -#: parser/analyze.c:1743 +#: parser/analyze.c:1749 #, c-format msgid "invalid UNION/INTERSECT/EXCEPT ORDER BY clause" msgstr "ungültige ORDER-BY-Klausel mit UNION/INTERSECT/EXCEPT" -#: parser/analyze.c:1744 +#: parser/analyze.c:1750 #, c-format msgid "Only result column names can be used, not expressions or functions." msgstr "Es können nur Ergebnisspaltennamen verwendet werden, keine Ausdrücke oder Funktionen." -#: parser/analyze.c:1745 +#: parser/analyze.c:1751 #, c-format msgid "Add the expression/function to every SELECT, or move the UNION into a FROM clause." msgstr "Fügen Sie den Ausdrück/die Funktion jedem SELECT hinzu oder verlegen Sie die UNION in eine FROM-Klausel." -#: parser/analyze.c:1806 +#: parser/analyze.c:1812 #, c-format msgid "INTO is only allowed on first SELECT of UNION/INTERSECT/EXCEPT" msgstr "INTO ist nur im ersten SELECT von UNION/INTERSECT/EXCEPT erlaubt" -#: parser/analyze.c:1878 +#: parser/analyze.c:1884 #, c-format msgid "UNION/INTERSECT/EXCEPT member statement cannot refer to other relations of same query level" msgstr "Teilanweisung von UNION/INTERSECT/EXCEPT kann nicht auf andere Relationen auf der selben Anfrageebene verweisen" -#: parser/analyze.c:1967 +#: parser/analyze.c:1973 #, c-format msgid "each %s query must have the same number of columns" msgstr "jede %s-Anfrage muss die gleiche Anzahl Spalten haben" -#: parser/analyze.c:2360 +#: parser/analyze.c:2366 #, c-format msgid "RETURNING must have at least one column" msgstr "RETURNING muss mindestens eine Spalte haben" -#: parser/analyze.c:2401 +#: parser/analyze.c:2407 #, c-format msgid "cannot specify both SCROLL and NO SCROLL" msgstr "SCROLL und NO SCROLL können nicht beide angegeben werden" -#: parser/analyze.c:2420 +#: parser/analyze.c:2426 #, c-format msgid "DECLARE CURSOR must not contain data-modifying statements in WITH" msgstr "DECLARE CURSOR darf keine datenmodifizierenden Anweisungen in WITH enthalten" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2428 +#: parser/analyze.c:2434 #, c-format msgid "DECLARE CURSOR WITH HOLD ... %s is not supported" msgstr "DECLARE CURSOR WITH HOLD ... %s wird nicht unterstützt" -#: parser/analyze.c:2431 +#: parser/analyze.c:2437 #, c-format msgid "Holdable cursors must be READ ONLY." msgstr "Haltbare Cursor müssen READ ONLY sein." #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2439 +#: parser/analyze.c:2445 #, c-format msgid "DECLARE SCROLL CURSOR ... %s is not supported" msgstr "DECLARE SCROLL CURSOR ... %s wird nicht unterstützt" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2450 +#: parser/analyze.c:2456 #, c-format msgid "DECLARE INSENSITIVE CURSOR ... %s is not supported" msgstr "DECLARE INSENSITIVE CURSOR ... %s wird nicht unterstützt" -#: parser/analyze.c:2453 +#: parser/analyze.c:2459 #, c-format msgid "Insensitive cursors must be READ ONLY." msgstr "Insensitive Cursor müssen READ ONLY sein." -#: parser/analyze.c:2519 +#: parser/analyze.c:2525 #, c-format msgid "materialized views must not use data-modifying statements in WITH" msgstr "materialisierte Sichten dürfen keine datenmodifizierenden Anweisungen in WITH verwenden" -#: parser/analyze.c:2529 +#: parser/analyze.c:2535 #, c-format msgid "materialized views must not use temporary tables or views" msgstr "materialisierte Sichten dürfen keine temporären Tabellen oder Sichten verwenden" -#: parser/analyze.c:2539 +#: parser/analyze.c:2545 #, c-format msgid "materialized views may not be defined using bound parameters" msgstr "materialisierte Sichten können nicht unter Verwendung von gebundenen Parametern definiert werden" -#: parser/analyze.c:2551 +#: parser/analyze.c:2557 #, c-format msgid "materialized views cannot be UNLOGGED" msgstr "materialisierte Sichten können nicht UNLOGGED sein" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2617 +#: parser/analyze.c:2660 #, c-format msgid "%s is not allowed with DISTINCT clause" msgstr "%s ist nicht mit DISTINCT-Klausel erlaubt" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2624 +#: parser/analyze.c:2667 #, c-format msgid "%s is not allowed with GROUP BY clause" msgstr "%s ist nicht mit GROUP-BY-Klausel erlaubt" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2631 +#: parser/analyze.c:2674 #, c-format msgid "%s is not allowed with HAVING clause" msgstr "%s ist nicht mit HAVING-Klausel erlaubt" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2638 +#: parser/analyze.c:2681 #, c-format msgid "%s is not allowed with aggregate functions" msgstr "%s ist nicht mit Aggregatfunktionen erlaubt" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2645 +#: parser/analyze.c:2688 #, c-format msgid "%s is not allowed with window functions" msgstr "%s ist nicht mit Fensterfunktionen erlaubt" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2652 +#: parser/analyze.c:2695 #, c-format msgid "%s is not allowed with set-returning functions in the target list" msgstr "%s ist nicht mit Funktionen mit Ergebnismenge in der Targetliste erlaubt" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2731 +#: parser/analyze.c:2774 #, c-format msgid "%s must specify unqualified relation names" msgstr "%s muss unqualifizierte Relationsnamen angeben" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2762 +#: parser/analyze.c:2805 #, c-format msgid "%s cannot be applied to a join" msgstr "%s kann nicht auf einen Verbund angewendet werden" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2771 +#: parser/analyze.c:2814 #, c-format msgid "%s cannot be applied to a function" msgstr "%s kann nicht auf eine Funktion angewendet werden" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2780 -#, fuzzy, c-format -#| msgid "%s cannot be applied to a function" +#: parser/analyze.c:2823 +#, c-format msgid "%s cannot be applied to a table function" -msgstr "%s kann nicht auf eine Funktion angewendet werden" +msgstr "%s kann nicht auf eine Tabellenfunktion angewendet werden" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2798 +#: parser/analyze.c:2841 #, c-format msgid "%s cannot be applied to a WITH query" msgstr "%s kann nicht auf eine WITH-Anfrage angewendet werden" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2815 +#: parser/analyze.c:2850 +#, c-format +msgid "%s cannot be applied to a named tuplestore" +msgstr "%s kann nicht auf einen benannten Tupelstore angewendet werden" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2867 #, c-format msgid "relation \"%s\" in %s clause not found in FROM clause" msgstr "Relation »%s« in %s nicht in der FROM-Klausel gefunden" @@ -13507,513 +14409,577 @@ msgstr "Aggregatfunktionen sind in der Fenster-ROWS-Klausel nicht erlaubt" msgid "grouping operations are not allowed in window ROWS" msgstr "Gruppieroperationen sind in der Fenster-ROWS-Klausel nicht erlaubt" -#: parser/parse_agg.c:454 +#: parser/parse_agg.c:425 +#, fuzzy +#| msgid "aggregate functions are not allowed in window ROWS" +msgid "aggregate functions are not allowed in window GROUPS" +msgstr "Aggregatfunktionen sind in der Fenster-ROWS-Klausel nicht erlaubt" + +#: parser/parse_agg.c:427 +#, fuzzy +#| msgid "grouping operations are not allowed in window ROWS" +msgid "grouping operations are not allowed in window GROUPS" +msgstr "Gruppieroperationen sind in der Fenster-ROWS-Klausel nicht erlaubt" + +#: parser/parse_agg.c:461 msgid "aggregate functions are not allowed in check constraints" msgstr "Aggregatfunktionen sind in Check-Constraints nicht erlaubt" -#: parser/parse_agg.c:456 +#: parser/parse_agg.c:463 msgid "grouping operations are not allowed in check constraints" msgstr "Gruppieroperationen sind in Check-Constraints nicht erlaubt" -#: parser/parse_agg.c:463 +#: parser/parse_agg.c:470 msgid "aggregate functions are not allowed in DEFAULT expressions" msgstr "Aggregatfunktionen sind in DEFAULT-Ausdrücken nicht erlaubt" -#: parser/parse_agg.c:465 +#: parser/parse_agg.c:472 msgid "grouping operations are not allowed in DEFAULT expressions" msgstr "Gruppieroperationen sind in DEFAULT-Ausdrücken nicht erlaubt" -#: parser/parse_agg.c:470 +#: parser/parse_agg.c:477 msgid "aggregate functions are not allowed in index expressions" msgstr "Aggregatfunktionen sind in Indexausdrücken nicht erlaubt" -#: parser/parse_agg.c:472 +#: parser/parse_agg.c:479 msgid "grouping operations are not allowed in index expressions" msgstr "Gruppieroperationen sind in Indexausdrücken nicht erlaubt" -#: parser/parse_agg.c:477 +#: parser/parse_agg.c:484 msgid "aggregate functions are not allowed in index predicates" msgstr "Aggregatfunktionen sind in Indexprädikaten nicht erlaubt" -#: parser/parse_agg.c:479 +#: parser/parse_agg.c:486 msgid "grouping operations are not allowed in index predicates" msgstr "Gruppieroperationen sind in Indexprädikaten nicht erlaubt" -#: parser/parse_agg.c:484 +#: parser/parse_agg.c:491 msgid "aggregate functions are not allowed in transform expressions" msgstr "Aggregatfunktionen sind in Umwandlungsausdrücken nicht erlaubt" -#: parser/parse_agg.c:486 +#: parser/parse_agg.c:493 msgid "grouping operations are not allowed in transform expressions" msgstr "Gruppieroperationen sind in Umwandlungsausdrücken nicht erlaubt" -#: parser/parse_agg.c:491 +#: parser/parse_agg.c:498 msgid "aggregate functions are not allowed in EXECUTE parameters" msgstr "Aggregatfunktionen sind in EXECUTE-Parametern nicht erlaubt" -#: parser/parse_agg.c:493 +#: parser/parse_agg.c:500 msgid "grouping operations are not allowed in EXECUTE parameters" msgstr "Gruppieroperationen sind in EXECUTE-Parametern nicht erlaubt" -#: parser/parse_agg.c:498 +#: parser/parse_agg.c:505 msgid "aggregate functions are not allowed in trigger WHEN conditions" msgstr "Aggregatfunktionen sind in der WHEN-Bedingung eines Triggers nicht erlaubt" -#: parser/parse_agg.c:500 +#: parser/parse_agg.c:507 msgid "grouping operations are not allowed in trigger WHEN conditions" msgstr "Gruppieroperationen sind in der WHEN-Bedingung eines Triggers nicht erlaubt" -#: parser/parse_agg.c:505 -#, fuzzy -#| msgid "aggregate functions are not allowed in index expressions" -msgid "aggregate functions are not allowed in partition key expression" -msgstr "Aggregatfunktionen sind in Indexausdrücken nicht erlaubt" +#: parser/parse_agg.c:512 +msgid "aggregate functions are not allowed in partition key expressions" +msgstr "Aggregatfunktionen sind in Partitionierungsschlüsselausdrücken nicht erlaubt" -#: parser/parse_agg.c:507 -#, fuzzy -#| msgid "grouping operations are not allowed in index expressions" -msgid "grouping operations are not allowed in partition key expression" -msgstr "Gruppieroperationen sind in Indexausdrücken nicht erlaubt" +#: parser/parse_agg.c:514 +msgid "grouping operations are not allowed in partition key expressions" +msgstr "Gruppieroperationen sind in Partitionierungsschlüsselausdrücken nicht erlaubt" + +#: parser/parse_agg.c:520 +msgid "aggregate functions are not allowed in CALL arguments" +msgstr "Aggregatfunktionen sind in CALL-Argumenten nicht erlaubt" + +#: parser/parse_agg.c:522 +msgid "grouping operations are not allowed in CALL arguments" +msgstr "Gruppieroperationen sind in CALL-Argumenten nicht erlaubt" #. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_agg.c:530 parser/parse_clause.c:1767 +#: parser/parse_agg.c:545 parser/parse_clause.c:1817 #, c-format msgid "aggregate functions are not allowed in %s" msgstr "Aggregatfunktionen sind in %s nicht erlaubt" #. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_agg.c:533 +#: parser/parse_agg.c:548 #, c-format msgid "grouping operations are not allowed in %s" msgstr "Gruppieroperationen sind in %s nicht erlaubt" -#: parser/parse_agg.c:641 +#: parser/parse_agg.c:656 #, c-format msgid "outer-level aggregate cannot contain a lower-level variable in its direct arguments" msgstr "Aggregatfunktion auf äußerer Ebene kann keine Variable einer unteren Ebene in ihren direkten Argumenten haben" -#: parser/parse_agg.c:712 +#: parser/parse_agg.c:735 +#, c-format +msgid "aggregate function calls cannot contain set-returning function calls" +msgstr "Aufrufe von Aggregatfunktionen können keine Aufrufe von Funktionen mit Ergebnismenge enthalten" + +#: parser/parse_agg.c:736 parser/parse_expr.c:1766 parser/parse_expr.c:2246 +#: parser/parse_func.c:802 +#, c-format +msgid "You might be able to move the set-returning function into a LATERAL FROM item." +msgstr "Sie können möglicherweise die Funktion mit Ergebnismenge in ein LATERAL-FROM-Element verschieben." + +#: parser/parse_agg.c:741 #, c-format msgid "aggregate function calls cannot contain window function calls" msgstr "Aufrufe von Aggregatfunktionen können keine Aufrufe von Fensterfunktionen enthalten" -#: parser/parse_agg.c:790 +#: parser/parse_agg.c:820 msgid "window functions are not allowed in JOIN conditions" msgstr "Fensterfunktionen sind in JOIN-Bedingungen nicht erlaubt" -#: parser/parse_agg.c:797 +#: parser/parse_agg.c:827 msgid "window functions are not allowed in functions in FROM" msgstr "Fensterfunktionen sind in Funktionen in FROM nicht erlaubt" -#: parser/parse_agg.c:803 +#: parser/parse_agg.c:833 msgid "window functions are not allowed in policy expressions" msgstr "Fensterfunktionen sind in Policy-Ausdrücken nicht erlaubt" -#: parser/parse_agg.c:815 +#: parser/parse_agg.c:846 msgid "window functions are not allowed in window definitions" msgstr "Fensterfunktionen sind in Fensterdefinitionen nicht erlaubt" -#: parser/parse_agg.c:847 +#: parser/parse_agg.c:878 msgid "window functions are not allowed in check constraints" msgstr "Fensterfunktionen sind in Check-Constraints nicht erlaubt" -#: parser/parse_agg.c:851 +#: parser/parse_agg.c:882 msgid "window functions are not allowed in DEFAULT expressions" msgstr "Fensterfunktionen sind in DEFAULT-Ausdrücken nicht erlaubt" -#: parser/parse_agg.c:854 +#: parser/parse_agg.c:885 msgid "window functions are not allowed in index expressions" msgstr "Fensterfunktionen sind in Indexausdrücken nicht erlaubt" -#: parser/parse_agg.c:857 +#: parser/parse_agg.c:888 msgid "window functions are not allowed in index predicates" msgstr "Fensterfunktionen sind in Indexprädikaten nicht erlaubt" -#: parser/parse_agg.c:860 +#: parser/parse_agg.c:891 msgid "window functions are not allowed in transform expressions" msgstr "Fensterfunktionen sind in Umwandlungsausdrücken nicht erlaubt" -#: parser/parse_agg.c:863 +#: parser/parse_agg.c:894 msgid "window functions are not allowed in EXECUTE parameters" msgstr "Fensterfunktionen sind in EXECUTE-Parametern nicht erlaubt" -#: parser/parse_agg.c:866 +#: parser/parse_agg.c:897 msgid "window functions are not allowed in trigger WHEN conditions" msgstr "Fensterfunktionen sind in der WHEN-Bedingung eines Triggers nicht erlaubt" -#: parser/parse_agg.c:869 -#, fuzzy -#| msgid "window functions are not allowed in index expressions" -msgid "window functions are not allowed in partition key expression" -msgstr "Fensterfunktionen sind in Indexausdrücken nicht erlaubt" +#: parser/parse_agg.c:900 +msgid "window functions are not allowed in partition key expressions" +msgstr "Fensterfunktionen sind in Partitionierungsschlüsselausdrücken nicht erlaubt" + +#: parser/parse_agg.c:903 +msgid "window functions are not allowed in CALL arguments" +msgstr "Fensterfunktionen sind in CALL-Argumenten nicht erlaubt" #. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_agg.c:889 parser/parse_clause.c:1776 +#: parser/parse_agg.c:923 parser/parse_clause.c:1826 #, c-format msgid "window functions are not allowed in %s" msgstr "Fensterfunktionen sind in %s nicht erlaubt" -#: parser/parse_agg.c:923 parser/parse_clause.c:2610 +#: parser/parse_agg.c:957 parser/parse_clause.c:2662 #, c-format msgid "window \"%s\" does not exist" msgstr "Fenster »%s« existiert nicht" -#: parser/parse_agg.c:1008 +#: parser/parse_agg.c:1042 #, c-format msgid "too many grouping sets present (maximum 4096)" msgstr "zu viele Grouping-Sets vorhanden (maximal 4096)" -#: parser/parse_agg.c:1157 +#: parser/parse_agg.c:1191 #, c-format msgid "aggregate functions are not allowed in a recursive query's recursive term" msgstr "Aggregatfunktionen sind nicht im rekursiven Ausdruck einer rekursiven Anfrage erlaubt" -#: parser/parse_agg.c:1350 +#: parser/parse_agg.c:1384 #, c-format msgid "column \"%s.%s\" must appear in the GROUP BY clause or be used in an aggregate function" msgstr "Spalte »%s.%s« muss in der GROUP-BY-Klausel erscheinen oder in einer Aggregatfunktion verwendet werden" -#: parser/parse_agg.c:1353 +#: parser/parse_agg.c:1387 #, c-format msgid "Direct arguments of an ordered-set aggregate must use only grouped columns." msgstr "Direkte Argumente einer Ordered-Set-Aggregatfunktion dürfen nur gruppierte Spalten verwenden." -#: parser/parse_agg.c:1358 +#: parser/parse_agg.c:1392 #, c-format msgid "subquery uses ungrouped column \"%s.%s\" from outer query" msgstr "Unteranfrage verwendet nicht gruppierte Spalte »%s.%s« aus äußerer Anfrage" -#: parser/parse_agg.c:1522 +#: parser/parse_agg.c:1556 #, c-format msgid "arguments to GROUPING must be grouping expressions of the associated query level" msgstr "Argumente von GROUPING müssen Gruppierausdrücke der zugehörigen Anfrageebene sein" -#: parser/parse_clause.c:626 +#: parser/parse_clause.c:199 +#, c-format +msgid "relation \"%s\" cannot be the target of a modifying statement" +msgstr "Relation »%s« kann nicht das Ziel einer datenverändernden Anweisung sein" + +#: parser/parse_clause.c:615 parser/parse_clause.c:643 parser/parse_func.c:2220 +#, c-format +msgid "set-returning functions must appear at top level of FROM" +msgstr "Funktionen mit Ergebnismenge müssen auf oberster Ebene von FROM erscheinen" + +#: parser/parse_clause.c:655 #, c-format msgid "multiple column definition lists are not allowed for the same function" msgstr "mehrere Spaltendefinitionslisten für die selbe Funktion sind nicht erlaubt" -#: parser/parse_clause.c:659 +#: parser/parse_clause.c:688 #, c-format msgid "ROWS FROM() with multiple functions cannot have a column definition list" msgstr "ROWS FROM() mit mehreren Funktionen kann keine Spaltendefinitionsliste haben" -#: parser/parse_clause.c:660 +#: parser/parse_clause.c:689 #, c-format msgid "Put a separate column definition list for each function inside ROWS FROM()." msgstr "Geben Sie innerhalb von ROWS FROM() jeder Funktion eine eigene Spaltendefinitionsliste." -#: parser/parse_clause.c:666 +#: parser/parse_clause.c:695 #, c-format msgid "UNNEST() with multiple arguments cannot have a column definition list" msgstr "UNNEST() mit mehreren Argumenten kann keine Spaltendefinitionsliste haben" -#: parser/parse_clause.c:667 +#: parser/parse_clause.c:696 #, c-format msgid "Use separate UNNEST() calls inside ROWS FROM(), and attach a column definition list to each one." msgstr "Verwenden Sie getrennte UNNEST()-Aufrufe innerhalb von ROWS FROM() und geben Sie jeder eine eigene Spaltendefinitionsliste." -#: parser/parse_clause.c:674 +#: parser/parse_clause.c:703 #, c-format msgid "WITH ORDINALITY cannot be used with a column definition list" msgstr "WITH ORDINALITY kann nicht mit einer Spaltendefinitionsliste verwendet werden" -#: parser/parse_clause.c:675 +#: parser/parse_clause.c:704 #, c-format msgid "Put the column definition list inside ROWS FROM()." msgstr "Geben Sie die Spaltendefinitionsliste innerhalb von ROWS FROM() an." -#: parser/parse_clause.c:779 +#: parser/parse_clause.c:807 #, c-format msgid "only one FOR ORDINALITY column is allowed" -msgstr "" +msgstr "nur eine FOR-ORDINALITY-Spalte ist erlaubt" -#: parser/parse_clause.c:840 +#: parser/parse_clause.c:868 #, c-format msgid "column name \"%s\" is not unique" msgstr "Spaltenname »%s« ist nicht eindeutig" -#: parser/parse_clause.c:882 -#, fuzzy, c-format -#| msgid "function %s is not unique" +#: parser/parse_clause.c:910 +#, c-format msgid "namespace name \"%s\" is not unique" -msgstr "Funktion %s ist nicht eindeutig" +msgstr "Namensraumname »%s« ist nicht eindeutig" -#: parser/parse_clause.c:892 +#: parser/parse_clause.c:920 #, c-format msgid "only one default namespace is allowed" -msgstr "" +msgstr "nur ein Standardnamensraum ist erlaubt" -#: parser/parse_clause.c:953 +#: parser/parse_clause.c:981 #, c-format msgid "tablesample method %s does not exist" msgstr "Tablesample-Methode %s existiert nicht" -#: parser/parse_clause.c:975 +#: parser/parse_clause.c:1003 #, c-format msgid "tablesample method %s requires %d argument, not %d" msgid_plural "tablesample method %s requires %d arguments, not %d" msgstr[0] "Tablesample-Methode %s benötigt %d Argument, nicht %d" msgstr[1] "Tablesample-Methode %s benötigt %d Argumente, nicht %d" -#: parser/parse_clause.c:1009 +#: parser/parse_clause.c:1037 #, c-format msgid "tablesample method %s does not support REPEATABLE" msgstr "Tablesample-Methode %s unterstützt REPEATABLE nicht" -#: parser/parse_clause.c:1157 +#: parser/parse_clause.c:1207 #, c-format msgid "TABLESAMPLE clause can only be applied to tables and materialized views" msgstr "TABLESAMPLE-Klausel kann nur auf Tabellen und materialisierte Sichten angewendet werden" -#: parser/parse_clause.c:1327 +#: parser/parse_clause.c:1377 #, c-format msgid "column name \"%s\" appears more than once in USING clause" msgstr "Spaltenname »%s« erscheint mehrmals in der USING-Klausel" -#: parser/parse_clause.c:1342 +#: parser/parse_clause.c:1392 #, c-format msgid "common column name \"%s\" appears more than once in left table" msgstr "gemeinsamer Spaltenname »%s« erscheint mehrmals in der linken Tabelle" -#: parser/parse_clause.c:1351 +#: parser/parse_clause.c:1401 #, c-format msgid "column \"%s\" specified in USING clause does not exist in left table" msgstr "Spalte »%s« aus der USING-Klausel existiert nicht in der linken Tabelle" -#: parser/parse_clause.c:1365 +#: parser/parse_clause.c:1415 #, c-format msgid "common column name \"%s\" appears more than once in right table" msgstr "gemeinsamer Spaltenname »%s« erscheint mehrmals in der rechten Tabelle" -#: parser/parse_clause.c:1374 +#: parser/parse_clause.c:1424 #, c-format msgid "column \"%s\" specified in USING clause does not exist in right table" msgstr "Spalte »%s« aus der USING-Klausel existiert nicht in der rechten Tabelle" -#: parser/parse_clause.c:1428 +#: parser/parse_clause.c:1478 #, c-format msgid "column alias list for \"%s\" has too many entries" msgstr "Spaltenaliasliste für »%s« hat zu viele Einträge" #. translator: %s is name of a SQL construct, eg LIMIT -#: parser/parse_clause.c:1737 +#: parser/parse_clause.c:1787 #, c-format msgid "argument of %s must not contain variables" msgstr "Argument von %s darf keine Variablen enthalten" #. translator: first %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1902 +#: parser/parse_clause.c:1952 #, c-format msgid "%s \"%s\" is ambiguous" msgstr "%s »%s« ist nicht eindeutig" #. translator: %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1931 +#: parser/parse_clause.c:1981 #, c-format msgid "non-integer constant in %s" msgstr "Konstante in %s ist keine ganze Zahl" #. translator: %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1953 +#: parser/parse_clause.c:2003 #, c-format msgid "%s position %d is not in select list" msgstr "%s Position %d ist nicht in der Select-Liste" -#: parser/parse_clause.c:2394 +#: parser/parse_clause.c:2444 #, c-format msgid "CUBE is limited to 12 elements" msgstr "CUBE ist auf 12 Elemente begrenzt" -#: parser/parse_clause.c:2598 +#: parser/parse_clause.c:2650 #, c-format msgid "window \"%s\" is already defined" msgstr "Fenster »%s« ist bereits definiert" -#: parser/parse_clause.c:2659 +#: parser/parse_clause.c:2711 #, c-format msgid "cannot override PARTITION BY clause of window \"%s\"" msgstr "PARTITION-BY-Klausel von Fenster »%s« kann nicht aufgehoben werden" -#: parser/parse_clause.c:2671 +#: parser/parse_clause.c:2723 #, c-format msgid "cannot override ORDER BY clause of window \"%s\"" msgstr "ORDER-BY-Klausel von Fenster »%s« kann nicht aufgehoben werden" -#: parser/parse_clause.c:2701 parser/parse_clause.c:2707 +#: parser/parse_clause.c:2753 parser/parse_clause.c:2759 #, c-format msgid "cannot copy window \"%s\" because it has a frame clause" msgstr "kann Fenster »%s« nicht kopieren, weil es eine Frame-Klausel hat" -#: parser/parse_clause.c:2709 +#: parser/parse_clause.c:2761 #, c-format msgid "Omit the parentheses in this OVER clause." msgstr "Lassen Sie die Klammern in dieser OVER-Klausel weg." -#: parser/parse_clause.c:2775 +#: parser/parse_clause.c:2781 +#, c-format +msgid "RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column" +msgstr "" + +#: parser/parse_clause.c:2864 #, c-format msgid "in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list" msgstr "in einer Aggregatfunktion mit DISTINCT müssen ORDER-BY-Ausdrücke in der Argumentliste erscheinen" -#: parser/parse_clause.c:2776 +#: parser/parse_clause.c:2865 #, c-format msgid "for SELECT DISTINCT, ORDER BY expressions must appear in select list" msgstr "bei SELECT DISTINCT müssen ORDER-BY-Ausdrücke in der Select-Liste erscheinen" -#: parser/parse_clause.c:2808 +#: parser/parse_clause.c:2897 #, c-format msgid "an aggregate with DISTINCT must have at least one argument" msgstr "eine Aggregatfunktion mit DISTINCT muss mindestens ein Argument haben" -#: parser/parse_clause.c:2809 +#: parser/parse_clause.c:2898 #, c-format msgid "SELECT DISTINCT must have at least one column" msgstr "SELECT DISTINCT muss mindestens eine Spalte haben" -#: parser/parse_clause.c:2875 parser/parse_clause.c:2907 +#: parser/parse_clause.c:2964 parser/parse_clause.c:2996 #, c-format msgid "SELECT DISTINCT ON expressions must match initial ORDER BY expressions" msgstr "Ausdrücke in SELECT DISTINCT ON müssen mit den ersten Ausdrücken in ORDER BY übereinstimmen" -#: parser/parse_clause.c:2985 +#: parser/parse_clause.c:3074 #, c-format msgid "ASC/DESC is not allowed in ON CONFLICT clause" msgstr "ASC/DESC ist in der ON-CONFLICT-Klausel nicht erlaubt" -#: parser/parse_clause.c:2991 +#: parser/parse_clause.c:3080 #, c-format msgid "NULLS FIRST/LAST is not allowed in ON CONFLICT clause" msgstr "NULLS FIRST/LAST ist in der ON-CONFLICT-Klausel nicht erlaubt" -#: parser/parse_clause.c:3071 +#: parser/parse_clause.c:3159 #, c-format msgid "ON CONFLICT DO UPDATE requires inference specification or constraint name" msgstr "ON CONFLICT DO UPDATE benötigt Inferenzangabe oder Constraint-Namen" -#: parser/parse_clause.c:3072 +#: parser/parse_clause.c:3160 #, c-format msgid "For example, ON CONFLICT (column_name)." msgstr "Zum Bespiel ON CONFLICT (Spaltenname)." -#: parser/parse_clause.c:3083 +#: parser/parse_clause.c:3171 #, c-format msgid "ON CONFLICT is not supported with system catalog tables" msgstr "ON CONFLICT wird nicht mit Systemkatalogtabellen unterstützt" -#: parser/parse_clause.c:3091 +#: parser/parse_clause.c:3179 #, c-format msgid "ON CONFLICT is not supported on table \"%s\" used as a catalog table" msgstr "ON CONFLICT wird nicht unterstützt mit Tabelle »%s«, die als Katalogtabelle verwendet wird" -#: parser/parse_clause.c:3217 +#: parser/parse_clause.c:3322 #, c-format msgid "operator %s is not a valid ordering operator" msgstr "Operator %s ist kein gültiger Sortieroperator" -#: parser/parse_clause.c:3219 +#: parser/parse_clause.c:3324 #, c-format msgid "Ordering operators must be \"<\" or \">\" members of btree operator families." msgstr "Sortieroperatoren müssen die Mitglieder »<« oder »>« einer »btree«-Operatorfamilie sein." -#: parser/parse_coerce.c:971 parser/parse_coerce.c:1001 -#: parser/parse_coerce.c:1019 parser/parse_coerce.c:1034 -#: parser/parse_expr.c:2123 parser/parse_expr.c:2699 parser/parse_target.c:935 +#: parser/parse_clause.c:3635 +#, c-format +msgid "RANGE with offset PRECEDING/FOLLOWING is not supported for column type %s" +msgstr "" + +#: parser/parse_clause.c:3641 +#, c-format +msgid "RANGE with offset PRECEDING/FOLLOWING is not supported for column type %s and offset type %s" +msgstr "" + +#: parser/parse_clause.c:3644 +#, c-format +msgid "Cast the offset value to an appropriate type." +msgstr "" + +#: parser/parse_clause.c:3649 +#, c-format +msgid "RANGE with offset PRECEDING/FOLLOWING has multiple interpretations for column type %s and offset type %s" +msgstr "" + +#: parser/parse_clause.c:3652 +#, c-format +msgid "Cast the offset value to the exact intended type." +msgstr "" + +#: parser/parse_coerce.c:1017 parser/parse_coerce.c:1055 +#: parser/parse_coerce.c:1073 parser/parse_coerce.c:1088 +#: parser/parse_expr.c:2153 parser/parse_expr.c:2741 parser/parse_target.c:955 #, c-format msgid "cannot cast type %s to %s" msgstr "kann Typ %s nicht in Typ %s umwandeln" -#: parser/parse_coerce.c:1004 +#: parser/parse_coerce.c:1058 #, c-format msgid "Input has too few columns." msgstr "Eingabe hat zu wenige Spalten." -#: parser/parse_coerce.c:1022 +#: parser/parse_coerce.c:1076 #, c-format msgid "Cannot cast type %s to %s in column %d." msgstr "Kann in Spalte %3$d Typ %1$s nicht in Typ %2$s umwandeln." -#: parser/parse_coerce.c:1037 +#: parser/parse_coerce.c:1091 #, c-format msgid "Input has too many columns." msgstr "Eingabe hat zu viele Spalten." #. translator: first %s is name of a SQL construct, eg WHERE #. translator: first %s is name of a SQL construct, eg LIMIT -#: parser/parse_coerce.c:1080 parser/parse_coerce.c:1128 +#: parser/parse_coerce.c:1146 parser/parse_coerce.c:1194 #, c-format msgid "argument of %s must be type %s, not type %s" msgstr "Argument von %s muss Typ %s haben, nicht Typ %s" #. translator: %s is name of a SQL construct, eg WHERE #. translator: %s is name of a SQL construct, eg LIMIT -#: parser/parse_coerce.c:1091 parser/parse_coerce.c:1140 +#: parser/parse_coerce.c:1157 parser/parse_coerce.c:1206 #, c-format msgid "argument of %s must not return a set" msgstr "Argument von %s darf keine Ergebnismenge zurückgeben" #. translator: first %s is name of a SQL construct, eg CASE -#: parser/parse_coerce.c:1280 +#: parser/parse_coerce.c:1346 #, c-format msgid "%s types %s and %s cannot be matched" msgstr "%s-Typen %s und %s passen nicht zusammen" #. translator: first %s is name of a SQL construct, eg CASE -#: parser/parse_coerce.c:1347 +#: parser/parse_coerce.c:1413 #, c-format msgid "%s could not convert type %s to %s" msgstr "%s konnte Typ %s nicht in %s umwandeln" -#: parser/parse_coerce.c:1649 +#: parser/parse_coerce.c:1715 #, c-format msgid "arguments declared \"anyelement\" are not all alike" msgstr "als »anyelement« deklariert Argumente sind nicht alle gleich" -#: parser/parse_coerce.c:1669 +#: parser/parse_coerce.c:1735 #, c-format msgid "arguments declared \"anyarray\" are not all alike" msgstr "als »anyarray« deklarierte Argumente sind nicht alle gleich" -#: parser/parse_coerce.c:1689 +#: parser/parse_coerce.c:1755 #, c-format msgid "arguments declared \"anyrange\" are not all alike" msgstr "als »anyrange« deklarierte Argumente sind nicht alle gleich" -#: parser/parse_coerce.c:1718 parser/parse_coerce.c:1933 -#: parser/parse_coerce.c:1967 -#, fuzzy, c-format -#| msgid "argument declared \"anyarray\" is not an array but type %s" +#: parser/parse_coerce.c:1784 parser/parse_coerce.c:1999 +#: parser/parse_coerce.c:2033 +#, c-format msgid "argument declared %s is not an array but type %s" -msgstr "als »anyarray« deklariertes Argument ist kein Array sondern Typ %s" +msgstr "als %s deklariertes Argument ist kein Array sondern Typ %s" -#: parser/parse_coerce.c:1734 parser/parse_coerce.c:1773 -#, fuzzy, c-format -#| msgid "argument declared \"anyarray\" is not consistent with argument declared \"anyelement\"" +#: parser/parse_coerce.c:1800 parser/parse_coerce.c:1839 +#, c-format msgid "argument declared %s is not consistent with argument declared %s" -msgstr "als »anyarray« deklariertes Argument ist nicht mit als »anyelement« deklariertem Argument konsistent" +msgstr "als %s deklariertes Argument ist nicht mit als %s deklariertem Argument konsistent" -#: parser/parse_coerce.c:1756 parser/parse_coerce.c:1980 -#, fuzzy, c-format -#| msgid "argument declared \"anyrange\" is not a range type but type %s" +#: parser/parse_coerce.c:1822 parser/parse_coerce.c:2046 +#, c-format msgid "argument declared %s is not a range type but type %s" -msgstr "als »anyrange« deklariertes Argument ist kein Bereichstyp sondern Typ %s" +msgstr "als %s deklariertes Argument ist kein Bereichstyp sondern Typ %s" -#: parser/parse_coerce.c:1794 -#, fuzzy, c-format -#| msgid "could not determine polymorphic type because input has type \"unknown\"" +#: parser/parse_coerce.c:1860 +#, c-format msgid "could not determine polymorphic type because input has type %s" -msgstr "konnte polymorphischen Typ nicht bestimmen, weil Eingabe Typ »unknown« hat" +msgstr "konnte polymorphischen Typ nicht bestimmen, weil Eingabe Typ %s hat" -#: parser/parse_coerce.c:1805 +#: parser/parse_coerce.c:1871 #, c-format msgid "type matched to anynonarray is an array type: %s" msgstr "mit »anynonarray« gepaarter Typ ist ein Array-Typ: %s" -#: parser/parse_coerce.c:1815 +#: parser/parse_coerce.c:1881 #, c-format msgid "type matched to anyenum is not an enum type: %s" msgstr "mit »anyenum« gepaarter Typ ist kein Enum-Typ: %s" -#: parser/parse_coerce.c:1855 parser/parse_coerce.c:1885 +#: parser/parse_coerce.c:1921 parser/parse_coerce.c:1951 #, c-format msgid "could not find range type for data type %s" msgstr "konnte Bereichstyp für Datentyp %s nicht finden" @@ -14135,444 +15101,493 @@ msgstr "FOR UPDATE/SHARE in einer rekursiven Anfrage ist nicht implementiert" msgid "recursive reference to query \"%s\" must not appear more than once" msgstr "rekursiver Verweis auf Anfrage »%s« darf nicht mehrmals erscheinen" -#: parser/parse_expr.c:357 -#, fuzzy, c-format -#| msgid "null array element not allowed in this context" +#: parser/parse_expr.c:350 +#, c-format msgid "DEFAULT is not allowed in this context" -msgstr "NULL-Werte im Array sind in diesem Zusammenhang nicht erlaubt" +msgstr "DEFAULT ist in diesem Zusammenhang nicht erlaubt" -#: parser/parse_expr.c:410 parser/parse_relation.c:3110 -#: parser/parse_relation.c:3130 +#: parser/parse_expr.c:403 parser/parse_relation.c:3287 +#: parser/parse_relation.c:3307 #, c-format msgid "column %s.%s does not exist" msgstr "Spalte %s.%s existiert nicht" -#: parser/parse_expr.c:422 +#: parser/parse_expr.c:415 #, c-format msgid "column \"%s\" not found in data type %s" msgstr "Spalte »%s« nicht gefunden im Datentyp %s" -#: parser/parse_expr.c:428 +#: parser/parse_expr.c:421 #, c-format msgid "could not identify column \"%s\" in record data type" msgstr "konnte Spalte »%s« im Record-Datentyp nicht identifizieren" -#: parser/parse_expr.c:434 +#: parser/parse_expr.c:427 #, c-format msgid "column notation .%s applied to type %s, which is not a composite type" msgstr "Spaltenschreibweise .%s mit Typ %s verwendet, der kein zusammengesetzter Typ ist" -#: parser/parse_expr.c:464 parser/parse_target.c:721 +#: parser/parse_expr.c:458 parser/parse_target.c:722 #, c-format msgid "row expansion via \"*\" is not supported here" msgstr "Zeilenexpansion mit »*« wird hier nicht unterstützt" -#: parser/parse_expr.c:769 parser/parse_relation.c:668 -#: parser/parse_relation.c:768 parser/parse_target.c:1170 +#: parser/parse_expr.c:771 parser/parse_relation.c:689 +#: parser/parse_relation.c:789 parser/parse_target.c:1193 #, c-format msgid "column reference \"%s\" is ambiguous" msgstr "Spaltenverweis »%s« ist nicht eindeutig" -#: parser/parse_expr.c:825 parser/parse_param.c:110 parser/parse_param.c:142 +#: parser/parse_expr.c:827 parser/parse_param.c:110 parser/parse_param.c:142 #: parser/parse_param.c:199 parser/parse_param.c:298 #, c-format msgid "there is no parameter $%d" msgstr "es gibt keinen Parameter $%d" -#: parser/parse_expr.c:1064 +#: parser/parse_expr.c:1070 #, c-format msgid "NULLIF requires = operator to yield boolean" msgstr "NULLIF erfordert, dass Operator = boolean ergibt" -#: parser/parse_expr.c:1508 parser/parse_expr.c:1540 +#. translator: %s is name of a SQL construct, eg NULLIF +#: parser/parse_expr.c:1076 parser/parse_expr.c:3057 +#, c-format +msgid "%s must not return a set" +msgstr "%s darf keine Ergebnismenge zurückgeben" + +#: parser/parse_expr.c:1524 parser/parse_expr.c:1556 #, c-format msgid "number of columns does not match number of values" msgstr "Anzahl der Spalten stimmt nicht mit der Anzahl der Werte überein" -#: parser/parse_expr.c:1554 +#: parser/parse_expr.c:1570 #, c-format msgid "source for a multiple-column UPDATE item must be a sub-SELECT or ROW() expression" -msgstr "" +msgstr "die Quelle für ein UPDATE-Element mit mehreren Spalten muss ein Sub-SELECT oder ein ROW()-Ausdruck sein" -#: parser/parse_expr.c:1798 +#. translator: %s is name of a SQL construct, eg GROUP BY +#: parser/parse_expr.c:1764 parser/parse_expr.c:2244 parser/parse_func.c:2327 +#, c-format +msgid "set-returning functions are not allowed in %s" +msgstr "Funktionen mit Ergebnismenge sind in %s nicht erlaubt" + +#: parser/parse_expr.c:1825 msgid "cannot use subquery in check constraint" msgstr "Unteranfragen können nicht in Check-Constraints verwendet werden" -#: parser/parse_expr.c:1802 +#: parser/parse_expr.c:1829 msgid "cannot use subquery in DEFAULT expression" msgstr "Unteranfragen können nicht in DEFAULT-Ausdrücken verwendet werden" -#: parser/parse_expr.c:1805 +#: parser/parse_expr.c:1832 msgid "cannot use subquery in index expression" msgstr "Unteranfragen können nicht in Indexausdrücken verwendet werden" -#: parser/parse_expr.c:1808 +#: parser/parse_expr.c:1835 msgid "cannot use subquery in index predicate" msgstr "Unteranfragen können nicht im Indexprädikat verwendet werden" -#: parser/parse_expr.c:1811 +#: parser/parse_expr.c:1838 msgid "cannot use subquery in transform expression" msgstr "Unteranfragen können in Umwandlungsausdrücken nicht verwendet werden" -#: parser/parse_expr.c:1814 +#: parser/parse_expr.c:1841 msgid "cannot use subquery in EXECUTE parameter" msgstr "Unteranfragen können nicht in EXECUTE-Parameter verwendet werden" -#: parser/parse_expr.c:1817 +#: parser/parse_expr.c:1844 msgid "cannot use subquery in trigger WHEN condition" msgstr "Unteranfragen können nicht in der WHEN-Bedingung eines Triggers verwendet werden" -#: parser/parse_expr.c:1820 -#, fuzzy -#| msgid "cannot use subquery in index expression" +#: parser/parse_expr.c:1847 msgid "cannot use subquery in partition key expression" -msgstr "Unteranfragen können nicht in Indexausdrücken verwendet werden" +msgstr "Unteranfragen können nicht in Partitionierungsschlüsselausdrücken verwendet werden" -#: parser/parse_expr.c:1873 +#: parser/parse_expr.c:1850 +msgid "cannot use subquery in CALL argument" +msgstr "Unteranfragen können nicht in CALL-Argument verwendet werden" + +#: parser/parse_expr.c:1903 #, c-format msgid "subquery must return only one column" msgstr "Unteranfrage darf nur eine Spalte zurückgeben" -#: parser/parse_expr.c:1957 +#: parser/parse_expr.c:1987 #, c-format msgid "subquery has too many columns" msgstr "Unteranfrage hat zu viele Spalten" -#: parser/parse_expr.c:1962 +#: parser/parse_expr.c:1992 #, c-format msgid "subquery has too few columns" msgstr "Unteranfrage hat zu wenige Spalten" -#: parser/parse_expr.c:2063 +#: parser/parse_expr.c:2093 #, c-format msgid "cannot determine type of empty array" msgstr "kann Typ eines leeren Arrays nicht bestimmen" -#: parser/parse_expr.c:2064 +#: parser/parse_expr.c:2094 #, c-format msgid "Explicitly cast to the desired type, for example ARRAY[]::integer[]." msgstr "Wandeln Sie ausdrücklich in den gewünschten Typ um, zum Beispiel ARRAY[]::integer[]." -#: parser/parse_expr.c:2078 +#: parser/parse_expr.c:2108 #, c-format msgid "could not find element type for data type %s" msgstr "konnte Elementtyp für Datentyp %s nicht finden" -#: parser/parse_expr.c:2353 +#: parser/parse_expr.c:2395 #, c-format msgid "unnamed XML attribute value must be a column reference" msgstr "unbenannter XML-Attributwert muss ein Spaltenverweis sein" -#: parser/parse_expr.c:2354 +#: parser/parse_expr.c:2396 #, c-format msgid "unnamed XML element value must be a column reference" msgstr "unbenannter XML-Elementwert muss ein Spaltenverweis sein" -#: parser/parse_expr.c:2369 +#: parser/parse_expr.c:2411 #, c-format msgid "XML attribute name \"%s\" appears more than once" msgstr "XML-Attributname »%s« einscheint mehrmals" -#: parser/parse_expr.c:2476 +#: parser/parse_expr.c:2518 #, c-format msgid "cannot cast XMLSERIALIZE result to %s" msgstr "kann das Ergebnis von XMLSERIALIZE nicht in Typ %s umwandeln" -#: parser/parse_expr.c:2772 parser/parse_expr.c:2967 +#: parser/parse_expr.c:2814 parser/parse_expr.c:3010 #, c-format msgid "unequal number of entries in row expressions" msgstr "ungleiche Anzahl Einträge in Zeilenausdrücken" -#: parser/parse_expr.c:2782 +#: parser/parse_expr.c:2824 #, c-format msgid "cannot compare rows of zero length" msgstr "kann Zeilen mit Länge null nicht vergleichen" -#: parser/parse_expr.c:2806 +#: parser/parse_expr.c:2849 #, c-format msgid "row comparison operator must yield type boolean, not type %s" msgstr "Zeilenvergleichsoperator muss Typ boolean zurückgeben, nicht Typ %s" -#: parser/parse_expr.c:2813 +#: parser/parse_expr.c:2856 #, c-format msgid "row comparison operator must not return a set" msgstr "Zeilenvergleichsoperator darf keine Ergebnismenge zurückgeben" -#: parser/parse_expr.c:2872 parser/parse_expr.c:2913 +#: parser/parse_expr.c:2915 parser/parse_expr.c:2956 #, c-format msgid "could not determine interpretation of row comparison operator %s" msgstr "konnte Interpretation des Zeilenvergleichsoperators %s nicht bestimmen" -#: parser/parse_expr.c:2874 +#: parser/parse_expr.c:2917 #, c-format msgid "Row comparison operators must be associated with btree operator families." msgstr "Zeilenvergleichsoperatoren müssen einer »btree«-Operatorfamilie zugeordnet sein." -#: parser/parse_expr.c:2915 +#: parser/parse_expr.c:2958 #, c-format msgid "There are multiple equally-plausible candidates." msgstr "Es gibt mehrere gleichermaßen plausible Kandidaten." -#: parser/parse_expr.c:3007 +#: parser/parse_expr.c:3051 #, c-format msgid "IS DISTINCT FROM requires = operator to yield boolean" msgstr "IS DISTINCT FROM erfordert, dass Operator = boolean ergibt" -#: parser/parse_expr.c:3320 parser/parse_expr.c:3338 +#: parser/parse_expr.c:3370 parser/parse_expr.c:3388 #, c-format msgid "operator precedence change: %s is now lower precedence than %s" msgstr "Änderung der Operatorrangfolge: %s hat jetzt niedrigere Priorität als %s" -#: parser/parse_func.c:175 +#: parser/parse_func.c:179 #, c-format msgid "argument name \"%s\" used more than once" msgstr "Argumentname »%s« mehrmals angegeben" -#: parser/parse_func.c:186 +#: parser/parse_func.c:190 #, c-format msgid "positional argument cannot follow named argument" msgstr "Positionsargument kann nicht hinter benanntem Argument stehen" -#: parser/parse_func.c:271 +#: parser/parse_func.c:275 #, c-format msgid "%s(*) specified, but %s is not an aggregate function" msgstr "%s(*) angegeben, aber %s ist keine Aggregatfunktion" -#: parser/parse_func.c:278 +#: parser/parse_func.c:282 #, c-format msgid "DISTINCT specified, but %s is not an aggregate function" msgstr "DISTINCT wurde angegeben, aber %s ist keine Aggregatfunktion" -#: parser/parse_func.c:284 +#: parser/parse_func.c:288 #, c-format msgid "WITHIN GROUP specified, but %s is not an aggregate function" msgstr "WITHIN GROUP wurde angegeben, aber %s ist keine Aggregatfunktion" -#: parser/parse_func.c:290 +#: parser/parse_func.c:294 #, c-format msgid "ORDER BY specified, but %s is not an aggregate function" msgstr "ORDER BY angegeben, aber %s ist keine Aggregatfunktion" -#: parser/parse_func.c:296 +#: parser/parse_func.c:300 #, c-format msgid "FILTER specified, but %s is not an aggregate function" msgstr "FILTER wurde angegeben, aber %s ist keine Aggregatfunktion" -#: parser/parse_func.c:302 +#: parser/parse_func.c:306 +#, c-format +msgid "OVER specified, but %s is not a window function nor an aggregate function" +msgstr "OVER angegeben, aber %s ist keine Fensterfunktion oder Aggregatfunktion" + +#: parser/parse_func.c:313 parser/parse_func.c:342 parser/parse_func.c:2120 +#, c-format +msgid "%s is not a procedure" +msgstr "%s ist keine Prozedur" + +#: parser/parse_func.c:317 +#, c-format +msgid "To call a function, use SELECT." +msgstr "Um eine Funktion aufzurufen, verwenden Sie SELECT." + +#: parser/parse_func.c:323 +#, c-format +msgid "%s is a procedure" +msgstr "%s ist eine Prozedur" + +#: parser/parse_func.c:327 #, c-format -msgid "OVER specified, but %s is not a window function nor an aggregate function" -msgstr "OVER angegeben, aber %s ist keine Fensterfunktion oder Aggregatfunktion" +msgid "To call a procedure, use CALL." +msgstr "Um eine Prozedur aufzurufen, verwenden Sie CALL." -#: parser/parse_func.c:332 +#: parser/parse_func.c:365 #, c-format msgid "WITHIN GROUP is required for ordered-set aggregate %s" msgstr "WITHIN GROUP muss angegeben werden für Ordered-Set-Aggregatfunktion %s" -#: parser/parse_func.c:338 +#: parser/parse_func.c:371 #, c-format msgid "OVER is not supported for ordered-set aggregate %s" msgstr "OVER wird für Ordered-Set-Aggregatfunktion %s nicht unterstützt" -#: parser/parse_func.c:369 parser/parse_func.c:398 +#: parser/parse_func.c:402 parser/parse_func.c:431 #, c-format msgid "There is an ordered-set aggregate %s, but it requires %d direct arguments, not %d." msgstr "Es gibt eine Ordered-Set-Aggregatfunktion %s, aber sie benötigt %d direkte Argumente, nicht %d." -#: parser/parse_func.c:423 +#: parser/parse_func.c:456 #, c-format msgid "To use the hypothetical-set aggregate %s, the number of hypothetical direct arguments (here %d) must match the number of ordering columns (here %d)." msgstr "Um die Hypothetical-Set-Aggregatfunktion %s zu verwenden, muss die Anzahl der hypothetischen direkten Argumente (hier %d) mit der Anzahl der Sortierspalten (hier %d) übereinstimmen." -#: parser/parse_func.c:437 +#: parser/parse_func.c:470 #, c-format msgid "There is an ordered-set aggregate %s, but it requires at least %d direct arguments." msgstr "Es gibt eine Ordered-Set-Aggregatfunktion %s, aber sie benötigt mindestens %d direkte Argumente." -#: parser/parse_func.c:456 +#: parser/parse_func.c:489 #, c-format msgid "%s is not an ordered-set aggregate, so it cannot have WITHIN GROUP" msgstr "%s ist keine Ordered-Set-Aggregatfunktion und kann deshalb kein WITHIN GROUP haben" -#: parser/parse_func.c:469 +#: parser/parse_func.c:502 #, c-format msgid "window function %s requires an OVER clause" msgstr "Fensterfunktion %s erfordert eine OVER-Klausel" -#: parser/parse_func.c:476 +#: parser/parse_func.c:509 #, c-format msgid "window function %s cannot have WITHIN GROUP" msgstr "Fensterfunktion %s kann kein WITHIN GROUP haben" -#: parser/parse_func.c:497 +#: parser/parse_func.c:530 #, c-format msgid "function %s is not unique" msgstr "Funktion %s ist nicht eindeutig" -#: parser/parse_func.c:500 +#: parser/parse_func.c:533 #, c-format msgid "Could not choose a best candidate function. You might need to add explicit type casts." msgstr "Konnte keine beste Kandidatfunktion auswählen. Sie müssen möglicherweise ausdrückliche Typumwandlungen hinzufügen." -#: parser/parse_func.c:511 +#: parser/parse_func.c:544 #, c-format msgid "No aggregate function matches the given name and argument types. Perhaps you misplaced ORDER BY; ORDER BY must appear after all regular arguments of the aggregate." msgstr "Keine Aggregatfunktion stimmt mit dem angegebenen Namen und den Argumenttypen überein. Mõglicherweise steht ORDER BY an der falschen Stelle; ORDER BY muss hinter allen normalen Argumenten der Aggregatfunktion stehen." -#: parser/parse_func.c:522 +#: parser/parse_func.c:555 #, c-format msgid "No function matches the given name and argument types. You might need to add explicit type casts." msgstr "Keine Funktion stimmt mit dem angegebenen Namen und den Argumenttypen überein. Sie müssen möglicherweise ausdrückliche Typumwandlungen hinzufügen." -#: parser/parse_func.c:624 +#: parser/parse_func.c:657 #, c-format msgid "VARIADIC argument must be an array" msgstr "VARIADIC-Argument muss ein Array sein" -#: parser/parse_func.c:676 parser/parse_func.c:740 +#: parser/parse_func.c:709 parser/parse_func.c:773 #, c-format msgid "%s(*) must be used to call a parameterless aggregate function" msgstr "beim Aufruf einer parameterlosen Aggregatfunktion muss %s(*) angegeben werden" -#: parser/parse_func.c:683 +#: parser/parse_func.c:716 #, c-format msgid "aggregates cannot return sets" msgstr "Aggregatfunktionen können keine Ergebnismengen zurückgeben" -#: parser/parse_func.c:698 +#: parser/parse_func.c:731 #, c-format msgid "aggregates cannot use named arguments" msgstr "Aggregatfunktionen können keine benannten Argumente verwenden" -#: parser/parse_func.c:730 +#: parser/parse_func.c:763 #, c-format msgid "DISTINCT is not implemented for window functions" msgstr "DISTINCT ist für Fensterfunktionen nicht implementiert" -#: parser/parse_func.c:750 +#: parser/parse_func.c:783 #, c-format msgid "aggregate ORDER BY is not implemented for window functions" msgstr "ORDER BY in Aggregatfunktion ist für Fensterfunktionen nicht implementiert" -#: parser/parse_func.c:759 +#: parser/parse_func.c:792 #, c-format msgid "FILTER is not implemented for non-aggregate window functions" msgstr "FILTER ist für Fensterfunktionen, die keine Aggregatfunktionen sind, nicht implementiert" -#: parser/parse_func.c:765 +#: parser/parse_func.c:801 +#, c-format +msgid "window function calls cannot contain set-returning function calls" +msgstr "Aufrufe von Fensterfunktionen können keine Aufrufe von Funktionen mit Ergebnismenge enthalten" + +#: parser/parse_func.c:809 #, c-format msgid "window functions cannot return sets" msgstr "Fensterfunktionen können keine Ergebnismengen zurückgeben" -#: parser/parse_func.c:2014 +#: parser/parse_func.c:1995 +#, c-format +msgid "function name \"%s\" is not unique" +msgstr "Funktionsname »%s« ist nicht eindeutig" + +#: parser/parse_func.c:1997 +#, c-format +msgid "Specify the argument list to select the function unambiguously." +msgstr "Geben Sie eine Argumentliste an, um die Funktion eindeutig auszuwählen." + +#: parser/parse_func.c:2007 +#, c-format +msgid "could not find a function named \"%s\"" +msgstr "konnte keine Funktion namens »%s« finden" + +#: parser/parse_func.c:2089 +#, c-format +msgid "%s is not a function" +msgstr "%s ist keine Funktion" + +#: parser/parse_func.c:2103 +#, c-format +msgid "could not find a procedure named \"%s\"" +msgstr "konnte keine Prozedur namens »%s« finden" + +#: parser/parse_func.c:2108 +#, c-format +msgid "procedure %s does not exist" +msgstr "Prozedur %s existiert nicht" + +#: parser/parse_func.c:2134 +#, fuzzy, c-format +#| msgid "could not find a function named \"%s\"" +msgid "could not find a aggregate named \"%s\"" +msgstr "konnte keine Funktion namens »%s« finden" + +#: parser/parse_func.c:2139 #, c-format msgid "aggregate %s(*) does not exist" msgstr "Aggregatfunktion %s(*) existiert nicht" -#: parser/parse_func.c:2019 +#: parser/parse_func.c:2144 #, c-format msgid "aggregate %s does not exist" msgstr "Aggregatfunktion %s existiert nicht" -#: parser/parse_func.c:2038 +#: parser/parse_func.c:2157 #, c-format msgid "function %s is not an aggregate" msgstr "Funktion %s ist keine Aggregatfunktion" -#: parser/parse_func.c:2086 -#, fuzzy -#| msgid "window functions are not allowed in JOIN conditions" +#: parser/parse_func.c:2207 msgid "set-returning functions are not allowed in JOIN conditions" -msgstr "Fensterfunktionen sind in JOIN-Bedingungen nicht erlaubt" +msgstr "Funktionen mit Ergebnismenge sind in JOIN-Bedingungen nicht erlaubt" -#: parser/parse_func.c:2099 -#, fuzzy -#| msgid "window functions are not allowed in policy expressions" +#: parser/parse_func.c:2228 msgid "set-returning functions are not allowed in policy expressions" -msgstr "Fensterfunktionen sind in Policy-Ausdrücken nicht erlaubt" +msgstr "Funktionen mit Ergebnismenge sind in Policy-Ausdrücken nicht erlaubt" -#: parser/parse_func.c:2114 -#, fuzzy -#| msgid "window functions are not allowed in window definitions" +#: parser/parse_func.c:2244 msgid "set-returning functions are not allowed in window definitions" -msgstr "Fensterfunktionen sind in Fensterdefinitionen nicht erlaubt" +msgstr "Funktionen mit Ergebnismenge sind in Fensterdefinitionen nicht erlaubt" -#: parser/parse_func.c:2152 -#, fuzzy -#| msgid "window functions are not allowed in check constraints" +#: parser/parse_func.c:2282 msgid "set-returning functions are not allowed in check constraints" -msgstr "Fensterfunktionen sind in Check-Constraints nicht erlaubt" +msgstr "Funktionen mit Ergebnismenge sind in Check-Constraints nicht erlaubt" -#: parser/parse_func.c:2156 -#, fuzzy -#| msgid "window functions are not allowed in DEFAULT expressions" +#: parser/parse_func.c:2286 msgid "set-returning functions are not allowed in DEFAULT expressions" -msgstr "Fensterfunktionen sind in DEFAULT-Ausdrücken nicht erlaubt" +msgstr "Funktionen mit Ergebnismenge sind in DEFAULT-Ausdrücken nicht erlaubt" -#: parser/parse_func.c:2159 -#, fuzzy -#| msgid "window functions are not allowed in index expressions" +#: parser/parse_func.c:2289 msgid "set-returning functions are not allowed in index expressions" -msgstr "Fensterfunktionen sind in Indexausdrücken nicht erlaubt" +msgstr "Funktionen mit Ergebnismenge sind in Indexausdrücken nicht erlaubt" -#: parser/parse_func.c:2162 -#, fuzzy -#| msgid "window functions are not allowed in index predicates" +#: parser/parse_func.c:2292 msgid "set-returning functions are not allowed in index predicates" -msgstr "Fensterfunktionen sind in Indexprädikaten nicht erlaubt" +msgstr "Funktionen mit Ergebnismenge sind in Indexprädikaten nicht erlaubt" -#: parser/parse_func.c:2165 -#, fuzzy -#| msgid "window functions are not allowed in transform expressions" +#: parser/parse_func.c:2295 msgid "set-returning functions are not allowed in transform expressions" -msgstr "Fensterfunktionen sind in Umwandlungsausdrücken nicht erlaubt" +msgstr "Funktionen mit Ergebnismenge sind in Umwandlungsausdrücken nicht erlaubt" -#: parser/parse_func.c:2168 -#, fuzzy -#| msgid "window functions are not allowed in EXECUTE parameters" +#: parser/parse_func.c:2298 msgid "set-returning functions are not allowed in EXECUTE parameters" -msgstr "Fensterfunktionen sind in EXECUTE-Parametern nicht erlaubt" +msgstr "Funktionen mit Ergebnismenge sind in EXECUTE-Parametern nicht erlaubt" -#: parser/parse_func.c:2171 -#, fuzzy -#| msgid "window functions are not allowed in trigger WHEN conditions" +#: parser/parse_func.c:2301 msgid "set-returning functions are not allowed in trigger WHEN conditions" -msgstr "Fensterfunktionen sind in der WHEN-Bedingung eines Triggers nicht erlaubt" +msgstr "Funktionen mit Ergebnismenge sind in der WHEN-Bedingung eines Triggers nicht erlaubt" -#: parser/parse_func.c:2174 -#, fuzzy -#| msgid "window functions are not allowed in index expressions" -msgid "set-returning functions are not allowed in partition key expression" -msgstr "Fensterfunktionen sind in Indexausdrücken nicht erlaubt" +#: parser/parse_func.c:2304 +msgid "set-returning functions are not allowed in partition key expressions" +msgstr "Funktionen mit Ergebnismenge sind in Partitionierungsschlüsselausdrücken nicht erlaubt" -#. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_func.c:2194 -#, fuzzy, c-format -#| msgid "window functions are not allowed in %s" -msgid "set-returning functions are not allowed in %s" -msgstr "Fensterfunktionen sind in %s nicht erlaubt" +#: parser/parse_func.c:2307 +msgid "set-returning functions are not allowed in CALL arguments" +msgstr "Funktionen mit Ergebnismenge sind in CALL-Argumenten nicht erlaubt" -#: parser/parse_node.c:85 +#: parser/parse_node.c:87 #, c-format msgid "target lists can have at most %d entries" msgstr "Targetlisten können höchstens %d Einträge haben" -#: parser/parse_node.c:254 +#: parser/parse_node.c:256 #, c-format msgid "cannot subscript type %s because it is not an array" msgstr "kann aus Typ %s kein Element auswählen, weil er kein Array ist" -#: parser/parse_node.c:356 parser/parse_node.c:393 +#: parser/parse_node.c:358 parser/parse_node.c:395 #, c-format msgid "array subscript must have type integer" msgstr "Arrayindex muss Typ integer haben" -#: parser/parse_node.c:424 +#: parser/parse_node.c:426 #, c-format msgid "array assignment requires type %s but expression is of type %s" msgstr "Arrayzuweisung erfordert Typ %s, aber Ausdruck hat Typ %s" -#: parser/parse_oper.c:125 parser/parse_oper.c:724 utils/adt/regproc.c:585 -#: utils/adt/regproc.c:605 utils/adt/regproc.c:789 +#: parser/parse_oper.c:125 parser/parse_oper.c:724 utils/adt/regproc.c:520 +#: utils/adt/regproc.c:704 #, c-format msgid "operator does not exist: %s" msgstr "Operator existiert nicht: %s" @@ -14582,14 +15597,6 @@ msgstr "Operator existiert nicht: %s" msgid "Use an explicit ordering operator or modify the query." msgstr "Verwenden Sie einen ausdrücklichen Sortieroperator oder ändern Sie die Anfrage." -#: parser/parse_oper.c:228 utils/adt/array_userfuncs.c:794 -#: utils/adt/array_userfuncs.c:933 utils/adt/arrayfuncs.c:3639 -#: utils/adt/arrayfuncs.c:4077 utils/adt/arrayfuncs.c:6039 -#: utils/adt/rowtypes.c:1167 -#, c-format -msgid "could not identify an equality operator for type %s" -msgstr "konnte keinen Ist-Gleich-Operator für Typ %s ermitteln" - #: parser/parse_oper.c:480 #, c-format msgid "operator requires run-time type coercion: %s" @@ -14605,27 +15612,32 @@ msgstr "Operator ist nicht eindeutig: %s" msgid "Could not choose a best candidate operator. You might need to add explicit type casts." msgstr "Konnte keinen besten Kandidatoperator auswählen. Sie müssen möglicherweise ausdrückliche Typumwandlungen hinzufügen." -#: parser/parse_oper.c:726 +#: parser/parse_oper.c:727 #, c-format -msgid "No operator matches the given name and argument type(s). You might need to add explicit type casts." +msgid "No operator matches the given name and argument type. You might need to add an explicit type cast." +msgstr "Kein Operator stimmt mit dem angegebenen Namen und Argumenttyp überein. Sie müssen möglicherweise ausdrückliche Typumwandlungen hinzufügen." + +#: parser/parse_oper.c:729 +#, c-format +msgid "No operator matches the given name and argument types. You might need to add explicit type casts." msgstr "Kein Operator stimmt mit dem angegebenen Namen und den Argumenttypen überein. Sie müssen möglicherweise ausdrückliche Typumwandlungen hinzufügen." -#: parser/parse_oper.c:785 parser/parse_oper.c:903 +#: parser/parse_oper.c:790 parser/parse_oper.c:912 #, c-format msgid "operator is only a shell: %s" msgstr "Operator ist nur eine Hülle: %s" -#: parser/parse_oper.c:891 +#: parser/parse_oper.c:900 #, c-format msgid "op ANY/ALL (array) requires array on right side" msgstr "op ANY/ALL (array) erfordert Array auf der rechten Seite" -#: parser/parse_oper.c:933 +#: parser/parse_oper.c:942 #, c-format msgid "op ANY/ALL (array) requires operator to yield boolean" msgstr "op ANY/ALL (array) erfordert, dass Operator boolean ergibt" -#: parser/parse_oper.c:938 +#: parser/parse_oper.c:947 #, c-format msgid "op ANY/ALL (array) requires operator not to return a set" msgstr "op ANY/ALL (array) erfordert, dass Operator keine Ergebnismenge zurückgibt" @@ -14635,158 +15647,159 @@ msgstr "op ANY/ALL (array) erfordert, dass Operator keine Ergebnismenge zurückg msgid "inconsistent types deduced for parameter $%d" msgstr "inkonsistente Typen für Parameter $%d ermittelt" -#: parser/parse_relation.c:175 +#: parser/parse_relation.c:176 #, c-format msgid "table reference \"%s\" is ambiguous" msgstr "Tabellenbezug »%s« ist nicht eindeutig" -#: parser/parse_relation.c:219 +#: parser/parse_relation.c:220 #, c-format msgid "table reference %u is ambiguous" msgstr "Tabellenbezug %u ist nicht eindeutig" -#: parser/parse_relation.c:398 +#: parser/parse_relation.c:419 #, c-format msgid "table name \"%s\" specified more than once" msgstr "Tabellenname »%s« mehrmals angegeben" -#: parser/parse_relation.c:425 parser/parse_relation.c:3050 +#: parser/parse_relation.c:446 parser/parse_relation.c:3227 #, c-format msgid "invalid reference to FROM-clause entry for table \"%s\"" msgstr "ungültiger Verweis auf FROM-Klausel-Eintrag für Tabelle »%s«" -#: parser/parse_relation.c:428 parser/parse_relation.c:3055 +#: parser/parse_relation.c:449 parser/parse_relation.c:3232 #, c-format msgid "There is an entry for table \"%s\", but it cannot be referenced from this part of the query." msgstr "Es gibt einen Eintrag für Tabelle »%s«, aber auf ihn kann aus diesem Teil der Anfrage nicht verwiesen werden." -#: parser/parse_relation.c:430 +#: parser/parse_relation.c:451 #, c-format msgid "The combining JOIN type must be INNER or LEFT for a LATERAL reference." msgstr "Der JOIN-Typ für LATERAL muss INNER oder LEFT sein." -#: parser/parse_relation.c:706 +#: parser/parse_relation.c:727 #, c-format msgid "system column \"%s\" reference in check constraint is invalid" msgstr "Verweis auf Systemspalte »%s« im Check-Constraint ist ungültig" -#: parser/parse_relation.c:1065 parser/parse_relation.c:1345 -#: parser/parse_relation.c:1914 +#: parser/parse_relation.c:1086 parser/parse_relation.c:1366 +#: parser/parse_relation.c:1936 #, c-format msgid "table \"%s\" has %d columns available but %d columns specified" msgstr "Tabelle »%s« hat %d Spalten, aber %d Spalten wurden angegeben" -#: parser/parse_relation.c:1152 +#: parser/parse_relation.c:1173 #, c-format msgid "There is a WITH item named \"%s\", but it cannot be referenced from this part of the query." msgstr "Es gibt ein WITH-Element namens »%s«, aber darauf kann aus diesem Teil der Anfrage kein Bezug genommen werden." -#: parser/parse_relation.c:1154 +#: parser/parse_relation.c:1175 #, c-format msgid "Use WITH RECURSIVE, or re-order the WITH items to remove forward references." msgstr "Verwenden Sie WITH RECURSIVE oder sortieren Sie die WITH-Ausdrücke um, um Vorwärtsreferenzen zu entfernen." -#: parser/parse_relation.c:1465 +#: parser/parse_relation.c:1486 #, c-format msgid "a column definition list is only allowed for functions returning \"record\"" msgstr "eine Spaltendefinitionsliste ist nur erlaubt bei Funktionen, die »record« zurückgeben" -#: parser/parse_relation.c:1474 +#: parser/parse_relation.c:1495 #, c-format msgid "a column definition list is required for functions returning \"record\"" msgstr "eine Spaltendefinitionsliste ist erforderlich bei Funktionen, die »record« zurückgeben" -#: parser/parse_relation.c:1553 +#: parser/parse_relation.c:1575 #, c-format msgid "function \"%s\" in FROM has unsupported return type %s" msgstr "Funktion »%s« in FROM hat nicht unterstützten Rückgabetyp %s" -#: parser/parse_relation.c:1742 +#: parser/parse_relation.c:1764 #, c-format msgid "VALUES lists \"%s\" have %d columns available but %d columns specified" msgstr "VALUES-Liste »%s« hat %d Spalten verfügbar, aber %d Spalten wurden angegeben" -#: parser/parse_relation.c:1797 +#: parser/parse_relation.c:1819 #, c-format msgid "joins can have at most %d columns" msgstr "Verbunde können höchstens %d Spalten haben" -#: parser/parse_relation.c:1887 +#: parser/parse_relation.c:1909 #, c-format msgid "WITH query \"%s\" does not have a RETURNING clause" msgstr "WITH-Anfrage »%s« hat keine RETURNING-Klausel" -#: parser/parse_relation.c:2685 parser/parse_relation.c:2834 +#: parser/parse_relation.c:2846 parser/parse_relation.c:2884 +#: parser/parse_relation.c:3011 #, c-format msgid "column %d of relation \"%s\" does not exist" msgstr "Spalte %d von Relation »%s« existiert nicht" -#: parser/parse_relation.c:3053 +#: parser/parse_relation.c:3230 #, c-format msgid "Perhaps you meant to reference the table alias \"%s\"." msgstr "Vielleicht wurde beabsichtigt, auf den Tabellenalias »%s« zu verweisen." -#: parser/parse_relation.c:3061 +#: parser/parse_relation.c:3238 #, c-format msgid "missing FROM-clause entry for table \"%s\"" msgstr "fehlender Eintrag in FROM-Klausel für Tabelle »%s«" -#: parser/parse_relation.c:3113 +#: parser/parse_relation.c:3290 #, c-format msgid "Perhaps you meant to reference the column \"%s.%s\"." msgstr "Vielleicht wurde beabsichtigt, auf die Spalte »%s.%s« zu verweisen." -#: parser/parse_relation.c:3115 +#: parser/parse_relation.c:3292 #, c-format msgid "There is a column named \"%s\" in table \"%s\", but it cannot be referenced from this part of the query." msgstr "Es gibt eine Spalte namens »%s« in Tabelle »%s«, aber auf sie kann aus diesem Teil der Anfrage nicht verwiesen werden." -#: parser/parse_relation.c:3132 +#: parser/parse_relation.c:3309 #, c-format msgid "Perhaps you meant to reference the column \"%s.%s\" or the column \"%s.%s\"." msgstr "Vielleicht wurde beabsichtigt, auf die Spalte »%s.%s« oder die Spalte »%s.%s« zu verweisen." -#: parser/parse_target.c:482 parser/parse_target.c:774 +#: parser/parse_target.c:483 parser/parse_target.c:784 #, c-format msgid "cannot assign to system column \"%s\"" msgstr "kann Systemspalte »%s« keinen Wert zuweisen" -#: parser/parse_target.c:510 +#: parser/parse_target.c:511 #, c-format msgid "cannot set an array element to DEFAULT" msgstr "kann Arrayelement nicht auf DEFAULT setzen" -#: parser/parse_target.c:515 +#: parser/parse_target.c:516 #, c-format msgid "cannot set a subfield to DEFAULT" msgstr "kann Subfeld nicht auf DEFAULT setzen" -#: parser/parse_target.c:584 +#: parser/parse_target.c:585 #, c-format msgid "column \"%s\" is of type %s but expression is of type %s" msgstr "Spalte »%s« hat Typ %s, aber der Ausdruck hat Typ %s" -#: parser/parse_target.c:758 +#: parser/parse_target.c:768 #, c-format msgid "cannot assign to field \"%s\" of column \"%s\" because its type %s is not a composite type" msgstr "kann Feld »%s« in Spalte »%s« nicht setzen, weil ihr Typ %s kein zusammengesetzter Typ ist" -#: parser/parse_target.c:767 +#: parser/parse_target.c:777 #, c-format msgid "cannot assign to field \"%s\" of column \"%s\" because there is no such column in data type %s" msgstr "kann Feld »%s« in Spalte »%s« nicht setzen, weil es keine solche Spalte in Datentyp %s gibt" -#: parser/parse_target.c:834 +#: parser/parse_target.c:854 #, c-format msgid "array assignment to \"%s\" requires type %s but expression is of type %s" msgstr "Wertzuweisung für »%s« erfordert Typ %s, aber Ausdruck hat Typ %s" -#: parser/parse_target.c:844 +#: parser/parse_target.c:864 #, c-format msgid "subfield \"%s\" is of type %s but expression is of type %s" msgstr "Subfeld »%s« hat Typ %s, aber der Ausdruck hat Typ %s" -#: parser/parse_target.c:1260 +#: parser/parse_target.c:1283 #, c-format msgid "SELECT * with no tables specified is not valid" msgstr "SELECT * ist nicht gültig, wenn keine Tabellen angegeben sind" @@ -14806,7 +15819,7 @@ msgstr "falscher %%TYPE-Verweis (zu viele Namensteile): %s" msgid "type reference %s converted to %s" msgstr "Typverweis %s in %s umgewandelt" -#: parser/parse_type.c:261 parser/parse_type.c:804 utils/cache/typcache.c:243 +#: parser/parse_type.c:261 parser/parse_type.c:838 utils/cache/typcache.c:373 #, c-format msgid "type \"%s\" is only a shell" msgstr "Typ »%s« ist nur eine Hülle" @@ -14821,312 +15834,408 @@ msgstr "Typmodifikator ist für Typ »%s« nicht erlaubt" msgid "type modifiers must be simple constants or identifiers" msgstr "Typmodifikatoren müssen einfache Konstanten oder Bezeichner sein" -#: parser/parse_type.c:670 parser/parse_type.c:769 +#: parser/parse_type.c:704 parser/parse_type.c:803 #, c-format msgid "invalid type name \"%s\"" msgstr "ungültiger Typname: »%s«" -#: parser/parse_utilcmd.c:263 -#, fuzzy, c-format -#| msgid "cannot create temporary tables in parallel mode" +#: parser/parse_utilcmd.c:272 +#, c-format msgid "cannot create partitioned table as inheritance child" -msgstr "im Parallelmodus können keine temporären Tabellen erzeugt werden" +msgstr "partitionierte Tabelle kann nicht als Vererbungskind erzeugt werden" -#: parser/parse_utilcmd.c:268 -#, fuzzy, c-format -#| msgid "cannot use more than %d columns in an index" -msgid "cannot partition using more than %d columns" -msgstr "Index kann nicht mehr als %d Spalten enthalten" - -#: parser/parse_utilcmd.c:275 +#: parser/parse_utilcmd.c:447 #, c-format -msgid "cannot list partition using more than one column" -msgstr "" +msgid "%s will create implicit sequence \"%s\" for serial column \"%s.%s\"" +msgstr "%s erstellt implizit eine Sequenz »%s« für die »serial«-Spalte »%s.%s«" -#: parser/parse_utilcmd.c:413 +#: parser/parse_utilcmd.c:570 #, c-format msgid "array of serial is not implemented" msgstr "Array aus Typ serial ist nicht implementiert" -#: parser/parse_utilcmd.c:461 -#, c-format -msgid "%s will create implicit sequence \"%s\" for serial column \"%s.%s\"" -msgstr "%s erstellt implizit eine Sequenz »%s« für die »serial«-Spalte »%s.%s«" - -#: parser/parse_utilcmd.c:554 parser/parse_utilcmd.c:566 +#: parser/parse_utilcmd.c:646 parser/parse_utilcmd.c:658 #, c-format msgid "conflicting NULL/NOT NULL declarations for column \"%s\" of table \"%s\"" msgstr "widersprüchliche NULL/NOT NULL-Deklarationen für Spalte »%s« von Tabelle »%s«" -#: parser/parse_utilcmd.c:578 +#: parser/parse_utilcmd.c:670 #, c-format msgid "multiple default values specified for column \"%s\" of table \"%s\"" msgstr "mehrere Vorgabewerte angegeben für Spalte »%s« von Tabelle »%s«" -#: parser/parse_utilcmd.c:595 parser/parse_utilcmd.c:704 +#: parser/parse_utilcmd.c:687 #, c-format -msgid "primary key constraints are not supported on foreign tables" -msgstr "Primärschlüssel für Fremdtabellen werden nicht unterstützt" +msgid "identity columns are not supported on typed tables" +msgstr "Identitätsspalten in getypten Tabellen werden nicht unterstützt" -#: parser/parse_utilcmd.c:601 parser/parse_utilcmd.c:710 -#, fuzzy, c-format -#| msgid "primary key constraints are not supported on foreign tables" -msgid "primary key constraints are not supported on partitioned tables" +#: parser/parse_utilcmd.c:691 +#, c-format +msgid "identity columns are not supported on partitions" +msgstr "Identitätsspalten in partitionierten Tabellen werden nicht unterstützt" + +#: parser/parse_utilcmd.c:700 +#, c-format +msgid "multiple identity specifications for column \"%s\" of table \"%s\"" +msgstr "mehrere Identitätsangaben für Spalte »%s« von Tabelle »%s«" + +#: parser/parse_utilcmd.c:723 parser/parse_utilcmd.c:822 +#, c-format +msgid "primary key constraints are not supported on foreign tables" msgstr "Primärschlüssel für Fremdtabellen werden nicht unterstützt" -#: parser/parse_utilcmd.c:610 parser/parse_utilcmd.c:720 +#: parser/parse_utilcmd.c:732 parser/parse_utilcmd.c:832 #, c-format msgid "unique constraints are not supported on foreign tables" msgstr "Unique-Constraints auf Fremdtabellen werden nicht unterstützt" -#: parser/parse_utilcmd.c:616 parser/parse_utilcmd.c:726 -#, fuzzy, c-format -#| msgid "unique constraints are not supported on foreign tables" -msgid "unique constraints are not supported on partitioned tables" -msgstr "Unique-Constraints auf Fremdtabellen werden nicht unterstützt" - -#: parser/parse_utilcmd.c:633 parser/parse_utilcmd.c:756 +#: parser/parse_utilcmd.c:749 parser/parse_utilcmd.c:862 #, c-format msgid "foreign key constraints are not supported on foreign tables" msgstr "Fremdschlüssel-Constraints auf Fremdtabellen werden nicht unterstützt" -#: parser/parse_utilcmd.c:639 parser/parse_utilcmd.c:762 -#, fuzzy, c-format -#| msgid "foreign key constraints are not supported on foreign tables" -msgid "foreign key constraints are not supported on partitioned tables" -msgstr "Fremdschlüssel-Constraints auf Fremdtabellen werden nicht unterstützt" +#: parser/parse_utilcmd.c:777 +#, c-format +msgid "both default and identity specified for column \"%s\" of table \"%s\"" +msgstr "sowohl Vorgabewert als auch Identität angegeben für Spalte »%s« von Tabelle »%s«" -#: parser/parse_utilcmd.c:736 +#: parser/parse_utilcmd.c:842 #, c-format msgid "exclusion constraints are not supported on foreign tables" msgstr "Exclusion-Constraints auf Fremdtabellen werden nicht unterstützt" -#: parser/parse_utilcmd.c:742 -#, fuzzy, c-format -#| msgid "exclusion constraints are not supported on foreign tables" +#: parser/parse_utilcmd.c:848 +#, c-format msgid "exclusion constraints are not supported on partitioned tables" -msgstr "Exclusion-Constraints auf Fremdtabellen werden nicht unterstützt" +msgstr "Exclusion-Constraints auf partitionierten Tabellen werden nicht unterstützt" -#: parser/parse_utilcmd.c:812 +#: parser/parse_utilcmd.c:912 #, c-format msgid "LIKE is not supported for creating foreign tables" msgstr "LIKE wird für das Erzeugen von Fremdtabellen nicht unterstützt" -#: parser/parse_utilcmd.c:1344 parser/parse_utilcmd.c:1420 +#: parser/parse_utilcmd.c:1517 parser/parse_utilcmd.c:1624 #, c-format msgid "Index \"%s\" contains a whole-row table reference." msgstr "Index »%s« enthält einen Verweis auf die ganze Zeile der Tabelle." -#: parser/parse_utilcmd.c:1689 +#: parser/parse_utilcmd.c:1974 #, c-format msgid "cannot use an existing index in CREATE TABLE" msgstr "bestehender Index kann nicht in CREATE TABLE verwendet werden" -#: parser/parse_utilcmd.c:1709 +#: parser/parse_utilcmd.c:1994 #, c-format msgid "index \"%s\" is already associated with a constraint" msgstr "Index »%s« gehört bereits zu einem Constraint" -#: parser/parse_utilcmd.c:1717 +#: parser/parse_utilcmd.c:2002 #, c-format msgid "index \"%s\" does not belong to table \"%s\"" msgstr "Index »%s« gehört nicht zu Tabelle »%s«" -#: parser/parse_utilcmd.c:1724 +#: parser/parse_utilcmd.c:2009 #, c-format msgid "index \"%s\" is not valid" msgstr "Index »%s« ist nicht gültig" -#: parser/parse_utilcmd.c:1730 +#: parser/parse_utilcmd.c:2015 #, c-format msgid "\"%s\" is not a unique index" msgstr "»%s« ist kein Unique Index" -#: parser/parse_utilcmd.c:1731 parser/parse_utilcmd.c:1738 -#: parser/parse_utilcmd.c:1745 parser/parse_utilcmd.c:1815 +#: parser/parse_utilcmd.c:2016 parser/parse_utilcmd.c:2023 +#: parser/parse_utilcmd.c:2030 parser/parse_utilcmd.c:2102 #, c-format msgid "Cannot create a primary key or unique constraint using such an index." msgstr "Ein Primärschlüssel oder Unique-Constraint kann nicht mit einem solchen Index erzeugt werden." -#: parser/parse_utilcmd.c:1737 +#: parser/parse_utilcmd.c:2022 #, c-format msgid "index \"%s\" contains expressions" msgstr "Index »%s« enthält Ausdrücke" -#: parser/parse_utilcmd.c:1744 +#: parser/parse_utilcmd.c:2029 #, c-format msgid "\"%s\" is a partial index" msgstr "»%s« ist ein partieller Index" -#: parser/parse_utilcmd.c:1756 +#: parser/parse_utilcmd.c:2041 #, c-format msgid "\"%s\" is a deferrable index" msgstr "»%s« ist ein aufschiebbarer Index" -#: parser/parse_utilcmd.c:1757 +#: parser/parse_utilcmd.c:2042 #, c-format msgid "Cannot create a non-deferrable constraint using a deferrable index." msgstr "Ein nicht aufschiebbarer Constraint kann nicht mit einem aufschiebbaren Index erzeugt werden." -#: parser/parse_utilcmd.c:1814 +#: parser/parse_utilcmd.c:2101 #, c-format msgid "index \"%s\" does not have default sorting behavior" msgstr "Index »%s« hat nicht das Standardsortierverhalten" -#: parser/parse_utilcmd.c:1958 +#: parser/parse_utilcmd.c:2250 #, c-format msgid "column \"%s\" appears twice in primary key constraint" msgstr "Spalte »%s« erscheint zweimal im Primärschlüssel-Constraint" -#: parser/parse_utilcmd.c:1964 +#: parser/parse_utilcmd.c:2256 #, c-format msgid "column \"%s\" appears twice in unique constraint" msgstr "Spalte »%s« erscheint zweimal im Unique-Constraint" -#: parser/parse_utilcmd.c:2173 +#: parser/parse_utilcmd.c:2579 #, c-format msgid "index expressions and predicates can refer only to the table being indexed" msgstr "Indexausdrücke und -prädikate können nur auf die zu indizierende Tabelle verweisen" -#: parser/parse_utilcmd.c:2219 +#: parser/parse_utilcmd.c:2625 #, c-format msgid "rules on materialized views are not supported" msgstr "Regeln für materialisierte Sichten werden nicht unterstützt" -#: parser/parse_utilcmd.c:2280 +#: parser/parse_utilcmd.c:2686 #, c-format msgid "rule WHERE condition cannot contain references to other relations" msgstr "WHERE-Bedingung einer Regel kann keine Verweise auf andere Relationen enthalten" -#: parser/parse_utilcmd.c:2352 +#: parser/parse_utilcmd.c:2758 #, c-format msgid "rules with WHERE conditions can only have SELECT, INSERT, UPDATE, or DELETE actions" msgstr "Regeln mit WHERE-Bedingungen können als Aktion nur SELECT, INSERT, UPDATE oder DELETE haben" -#: parser/parse_utilcmd.c:2370 parser/parse_utilcmd.c:2469 -#: rewrite/rewriteHandler.c:498 rewrite/rewriteManip.c:1015 +#: parser/parse_utilcmd.c:2776 parser/parse_utilcmd.c:2875 +#: rewrite/rewriteHandler.c:497 rewrite/rewriteManip.c:1015 #, c-format msgid "conditional UNION/INTERSECT/EXCEPT statements are not implemented" msgstr "UNION/INTERSECTION/EXCEPT mit Bedingung sind nicht implementiert" -#: parser/parse_utilcmd.c:2388 +#: parser/parse_utilcmd.c:2794 #, c-format msgid "ON SELECT rule cannot use OLD" msgstr "ON-SELECT-Regel kann nicht OLD verwenden" -#: parser/parse_utilcmd.c:2392 +#: parser/parse_utilcmd.c:2798 #, c-format msgid "ON SELECT rule cannot use NEW" msgstr "ON-SELECT-Regel kann nicht NEW verwenden" -#: parser/parse_utilcmd.c:2401 +#: parser/parse_utilcmd.c:2807 #, c-format msgid "ON INSERT rule cannot use OLD" msgstr "ON-INSERT-Regel kann nicht OLD verwenden" -#: parser/parse_utilcmd.c:2407 +#: parser/parse_utilcmd.c:2813 #, c-format msgid "ON DELETE rule cannot use NEW" msgstr "ON-DELETE-Regel kann nicht NEW verwenden" -#: parser/parse_utilcmd.c:2435 +#: parser/parse_utilcmd.c:2841 #, c-format msgid "cannot refer to OLD within WITH query" msgstr "in WITH-Anfrage kann nicht auf OLD verweisen werden" -#: parser/parse_utilcmd.c:2442 +#: parser/parse_utilcmd.c:2848 #, c-format msgid "cannot refer to NEW within WITH query" msgstr "in WITH-Anfrage kann nicht auf NEW verwiesen werden" -#: parser/parse_utilcmd.c:2766 +#: parser/parse_utilcmd.c:3286 #, c-format msgid "misplaced DEFERRABLE clause" msgstr "falsch platzierte DEFERRABLE-Klausel" -#: parser/parse_utilcmd.c:2771 parser/parse_utilcmd.c:2786 +#: parser/parse_utilcmd.c:3291 parser/parse_utilcmd.c:3306 #, c-format msgid "multiple DEFERRABLE/NOT DEFERRABLE clauses not allowed" msgstr "mehrere DEFERRABLE/NOT DEFERRABLE-Klauseln sind nicht erlaubt" -#: parser/parse_utilcmd.c:2781 +#: parser/parse_utilcmd.c:3301 #, c-format msgid "misplaced NOT DEFERRABLE clause" msgstr "falsch platzierte NOT DEFERRABLE-Klausel" -#: parser/parse_utilcmd.c:2802 +#: parser/parse_utilcmd.c:3322 #, c-format msgid "misplaced INITIALLY DEFERRED clause" msgstr "falsch platzierte INITIALLY DEFERRED-Klausel" -#: parser/parse_utilcmd.c:2807 parser/parse_utilcmd.c:2833 +#: parser/parse_utilcmd.c:3327 parser/parse_utilcmd.c:3353 #, c-format msgid "multiple INITIALLY IMMEDIATE/DEFERRED clauses not allowed" msgstr "mehrere INITIALLY IMMEDIATE/DEFERRED-Klauseln sind nicht erlaubt" -#: parser/parse_utilcmd.c:2828 +#: parser/parse_utilcmd.c:3348 #, c-format msgid "misplaced INITIALLY IMMEDIATE clause" msgstr "falsch platzierte INITIALLY IMMEDIATE-Klausel" -#: parser/parse_utilcmd.c:3019 +#: parser/parse_utilcmd.c:3539 #, c-format msgid "CREATE specifies a schema (%s) different from the one being created (%s)" msgstr "CREATE gibt ein Schema an (%s) welches nicht gleich dem zu erzeugenden Schema ist (%s)" -#: parser/parse_utilcmd.c:3085 +#: parser/parse_utilcmd.c:3573 +#, c-format +msgid "table \"%s\" is not partitioned" +msgstr "Tabelle »%s« ist nicht partitioniert" + +#: parser/parse_utilcmd.c:3580 +#, c-format +msgid "index \"%s\" is not partitioned" +msgstr "Index »%s« ist nicht partitioniert" + +#: parser/parse_utilcmd.c:3614 #, fuzzy, c-format -#| msgid "invalid format specification for an interval value" -msgid "invalid bound specification for a list partition" -msgstr "ungültige Formatangabe für Intervall-Wert" +#| msgid "Partitioned tables cannot have ROW triggers." +msgid "a hash-partitioned table may not have a default partition" +msgstr "Partitionierte Tabellen können keine ROW-Trigger haben." -#: parser/parse_utilcmd.c:3108 parser/parse_utilcmd.c:3208 -#: parser/parse_utilcmd.c:3235 +#: parser/parse_utilcmd.c:3631 #, fuzzy, c-format -#| msgid "cannot alter type of column \"%s\" twice" -msgid "specified value cannot be cast to type \"%s\" of column \"%s\"" -msgstr "Typ der Spalte »%s« kann nicht zweimal geändert werden" +#| msgid "invalid bound specification for a list partition" +msgid "invalid bound specification for a hash partition" +msgstr "ungültige Begrenzungsangabe für eine Listenpartition" -#: parser/parse_utilcmd.c:3147 +#: parser/parse_utilcmd.c:3637 partitioning/partbounds.c:2135 #, fuzzy, c-format -#| msgid "invalid format specification for an interval value" -msgid "invalid bound specification for a range partition" -msgstr "ungültige Formatangabe für Intervall-Wert" +#| msgid "%s: duration must be a positive integer (duration is \"%d\")\n" +msgid "modulus for hash partition must be a positive integer" +msgstr "%s: Dauer muss eine positive ganze Zahl sein (Dauer ist »%d«)\n" -#: parser/parse_utilcmd.c:3155 +#: parser/parse_utilcmd.c:3644 partitioning/partbounds.c:2143 #, fuzzy, c-format -#| msgid "must specify at least one column" +#| msgid "precision for type float must be less than 54 bits" +msgid "remainder for hash partition must be less than modulus" +msgstr "Präzision von Typ float muss weniger als 54 Bits sein" + +#: parser/parse_utilcmd.c:3656 +#, c-format +msgid "invalid bound specification for a list partition" +msgstr "ungültige Begrenzungsangabe für eine Listenpartition" + +#: parser/parse_utilcmd.c:3712 +#, c-format +msgid "invalid bound specification for a range partition" +msgstr "ungültige Begrenzungsangabe für eine Bereichspartition" + +#: parser/parse_utilcmd.c:3718 +#, c-format msgid "FROM must specify exactly one value per partitioning column" -msgstr "mindestens eine Spalte muss angegeben werden" +msgstr "FROM muss genau einen Wert pro Partitionierungsspalte angeben" -#: parser/parse_utilcmd.c:3159 -#, fuzzy, c-format -#| msgid "must specify at least one column" +#: parser/parse_utilcmd.c:3722 +#, c-format msgid "TO must specify exactly one value per partitioning column" -msgstr "mindestens eine Spalte muss angegeben werden" +msgstr "TO muss genau einen Wert pro Partitionierungsspalte angeben" -#: parser/parse_utilcmd.c:3197 parser/parse_utilcmd.c:3224 -#, fuzzy, c-format -#| msgid "cannot specify NULL in BINARY mode" +#: parser/parse_utilcmd.c:3769 parser/parse_utilcmd.c:3783 +#, c-format msgid "cannot specify NULL in range bound" -msgstr "NULL kann nicht im BINARY-Modus angegeben werden" +msgstr "NULL kann nicht in der Bereichsgrenze angegeben werden" + +#: parser/parse_utilcmd.c:3830 +#, c-format +msgid "every bound following MAXVALUE must also be MAXVALUE" +msgstr "jede Begrenzung, die auf MAXVALUE folgt, muss auch MAXVALUE sein" + +#: parser/parse_utilcmd.c:3837 +#, c-format +msgid "every bound following MINVALUE must also be MINVALUE" +msgstr "jede Begrenzung, die auf MINVALUE folgt, muss auch MINVALUE sein" + +#: parser/parse_utilcmd.c:3868 parser/parse_utilcmd.c:3880 +#, c-format +msgid "specified value cannot be cast to type %s for column \"%s\"" +msgstr "angegebener Wert kann nicht in Typ %s für Spalte »%s« umgewandelt werden" + +#: parser/parse_utilcmd.c:3882 +#, c-format +msgid "The cast requires a non-immutable conversion." +msgstr "Die Typumwandlung ist nicht »immutable«." + +#: parser/parse_utilcmd.c:3883 +#, c-format +msgid "Try putting the literal value in single quotes." +msgstr "Versuchen Sie, den Wert in einfachen Ausführungszeichen zu schreiben." #: parser/scansup.c:204 #, c-format msgid "identifier \"%s\" will be truncated to \"%s\"" msgstr "Bezeichner »%s« wird auf »%s« gekürzt" -#: port/pg_shmem.c:175 port/sysv_shmem.c:175 +#: partitioning/partbounds.c:331 +#, fuzzy, c-format +#| msgid "partition \"%s\" would overlap partition \"%s\"" +msgid "partition \"%s\" conflicts with existing default partition \"%s\"" +msgstr "Partition »%s« würde sich mit Partition »%s« überlappen" + +#: partitioning/partbounds.c:390 +#, c-format +msgid "every hash partition modulus must be a factor of the next larger modulus" +msgstr "" + +#: partitioning/partbounds.c:486 +#, c-format +msgid "empty range bound specified for partition \"%s\"" +msgstr "leere Bereichsgrenze angegeben für Partition »%s«" + +#: partitioning/partbounds.c:488 +#, c-format +msgid "Specified lower bound %s is greater than or equal to upper bound %s." +msgstr "Angegebene Untergrenze %s ist größer als oder gleich der Obergrenze %s." + +#: partitioning/partbounds.c:585 +#, c-format +msgid "partition \"%s\" would overlap partition \"%s\"" +msgstr "Partition »%s« würde sich mit Partition »%s« überlappen" + +#: partitioning/partbounds.c:685 +#, fuzzy, c-format +#| msgid "relation \"%s\" is not a partition of relation \"%s\"" +msgid "skipped scanning foreign table \"%s\" which is a partition of default partition \"%s\"" +msgstr "Relation »%s« ist keine Partition von Relation »%s«" + +#: partitioning/partbounds.c:724 +#, fuzzy, c-format +#| msgid "partition constraint is violated by some row" +msgid "updated partition constraint for default partition \"%s\" would be violated by some row" +msgstr "Partitions-Constraint wird von irgendeiner Zeile verletzt" + +#: partitioning/partbounds.c:2139 +#, c-format +msgid "remainder for hash partition must be a non-negative integer" +msgstr "" + +#: partitioning/partbounds.c:2166 +#, fuzzy, c-format +#| msgid "\"%s\" is a partitioned table" +msgid "\"%s\" is not a hash partitioned table" +msgstr "»%s« ist eine partitionierte Tabelle" + +#: partitioning/partbounds.c:2177 partitioning/partbounds.c:2293 +#, fuzzy, c-format +#| msgid "number of columns does not match number of values" +msgid "number of partitioning columns (%d) does not match number of partition keys provided (%d)" +msgstr "Anzahl der Spalten stimmt nicht mit der Anzahl der Werte überein" + +#: partitioning/partbounds.c:2197 partitioning/partbounds.c:2229 +#, c-format +msgid "column %d of the partition key has type \"%s\", but supplied value is of type \"%s\"" +msgstr "" + +#: port/pg_shmem.c:196 port/sysv_shmem.c:196 #, c-format msgid "could not create shared memory segment: %m" msgstr "konnte Shared-Memory-Segment nicht erzeugen: %m" -#: port/pg_shmem.c:176 port/sysv_shmem.c:176 +#: port/pg_shmem.c:197 port/sysv_shmem.c:197 #, c-format msgid "Failed system call was shmget(key=%lu, size=%zu, 0%o)." msgstr "Fehlgeschlagener Systemaufruf war shmget(Key=%lu, Größe=%zu, 0%o)." -#: port/pg_shmem.c:180 port/sysv_shmem.c:180 +#: port/pg_shmem.c:201 port/sysv_shmem.c:201 #, c-format msgid "" "This error usually means that PostgreSQL's request for a shared memory segment exceeded your kernel's SHMMAX parameter, or possibly that it is less than your kernel's SHMMIN parameter.\n" @@ -15135,7 +16244,7 @@ msgstr "" "Dieser Fehler bedeutet gewöhnlich, dass das von PostgreSQL angeforderte Shared-Memory-Segment den Kernel-Parameter SHMMAX überschreitet, oder eventuell, dass es kleiner als der Kernel-Parameter SHMMIN ist.\n" "Die PostgreSQL-Dokumentation enthält weitere Informationen über die Konfiguration von Shared Memory." -#: port/pg_shmem.c:187 port/sysv_shmem.c:187 +#: port/pg_shmem.c:208 port/sysv_shmem.c:208 #, c-format msgid "" "This error usually means that PostgreSQL's request for a shared memory segment exceeded your kernel's SHMALL parameter. You might need to reconfigure the kernel with larger SHMALL.\n" @@ -15144,7 +16253,7 @@ msgstr "" "Dieser Fehler bedeutet gewöhnlich, dass das von PostgreSQL angeforderte Shared-Memory-Segment den Kernel-Parameter SHMALL überschreitet. Sie müssen eventuell den Kernel mit einem größeren SHMALL neu konfigurieren.\n" "Die PostgreSQL-Dokumentation enthält weitere Informationen über die Konfiguration von Shared Memory." -#: port/pg_shmem.c:193 port/sysv_shmem.c:193 +#: port/pg_shmem.c:214 port/sysv_shmem.c:214 #, c-format msgid "" "This error does *not* mean that you have run out of disk space. It occurs either if all available shared memory IDs have been taken, in which case you need to raise the SHMMNI parameter in your kernel, or because the system's overall limit for shared memory has been reached.\n" @@ -15153,24 +16262,24 @@ msgstr "" "Dieser Fehler bedeutet *nicht*, dass kein Platz mehr auf der Festplatte ist. Er tritt auf, wenn entweder alle verfügbaren Shared-Memory-IDs aufgebraucht sind, dann müssen den Kernelparameter SHMMNI erhöhen, oder weil die Systemhöchstgrenze für Shared Memory insgesamt erreicht wurde.\n" "Die PostgreSQL-Dokumentation enthält weitere Informationen über die Konfiguration von Shared Memory." -#: port/pg_shmem.c:483 port/sysv_shmem.c:483 +#: port/pg_shmem.c:505 port/sysv_shmem.c:505 #, c-format msgid "could not map anonymous shared memory: %m" msgstr "konnte anonymes Shared Memory nicht mappen: %m" -#: port/pg_shmem.c:485 port/sysv_shmem.c:485 +#: port/pg_shmem.c:507 port/sysv_shmem.c:507 #, c-format msgid "This error usually means that PostgreSQL's request for a shared memory segment exceeded available memory, swap space, or huge pages. To reduce the request size (currently %zu bytes), reduce PostgreSQL's shared memory usage, perhaps by reducing shared_buffers or max_connections." msgstr "" "Dieser Fehler bedeutet gewöhnlich, dass das von PostgreSQL angeforderte Shared-Memory-Segment den verfügbaren Speicher, Swap-Space oder Huge Pages überschreitet. Um die benötigte Shared-Memory-Größe zu reduzieren (aktuell %zu Bytes), reduzieren Sie den Shared-Memory-Verbrauch von PostgreSQL, beispielsweise indem Sie »shared_buffers« oder »max_connections« reduzieren.\n" "Die PostgreSQL-Dokumentation enthält weitere Informationen über die Konfiguration von Shared Memory." -#: port/pg_shmem.c:551 port/sysv_shmem.c:551 port/win32_shmem.c:134 +#: port/pg_shmem.c:573 port/sysv_shmem.c:573 #, c-format msgid "huge pages not supported on this platform" msgstr "Huge Pages werden auf dieser Plattform nicht unterstützt" -#: port/pg_shmem.c:646 port/sysv_shmem.c:646 +#: port/pg_shmem.c:668 port/sysv_shmem.c:668 #, c-format msgid "could not stat data directory \"%s\": %m" msgstr "konnte »stat« für Datenverzeichnis »%s« nicht ausführen: %m" @@ -15259,161 +16368,202 @@ msgstr "konnte Semaphore nicht entsperren: Fehlercode %lu" msgid "could not try-lock semaphore: error code %lu" msgstr "konnte Semaphore nicht versuchsweise sperren: Fehlercode %lu" -#: port/win32_shmem.c:173 port/win32_shmem.c:208 port/win32_shmem.c:226 +#: port/win32_shmem.c:122 port/win32_shmem.c:130 port/win32_shmem.c:142 +#: port/win32_shmem.c:157 +#, fuzzy, c-format +#| msgid "could not create shared memory segment: error code %lu" +msgid "could not enable Lock Pages in Memory user right: error code %lu" +msgstr "konnte Shared-Memory-Segment nicht erzeugen: Fehlercode %lu" + +#: port/win32_shmem.c:123 port/win32_shmem.c:131 port/win32_shmem.c:143 +#: port/win32_shmem.c:158 +#, fuzzy, c-format +#| msgid "Failed system call was DuplicateHandle." +msgid "Failed system call was %s." +msgstr "Fehlgeschlagener Systemaufruf war DuplicateHandle." + +#: port/win32_shmem.c:153 +#, fuzzy, c-format +#| msgid "could not create shared memory segment: %m" +msgid "could not enable Lock Pages in Memory user right" +msgstr "konnte Shared-Memory-Segment nicht erzeugen: %m" + +#: port/win32_shmem.c:154 +#, c-format +msgid "Assign Lock Pages in Memory user right to the Windows user account which runs PostgreSQL." +msgstr "" + +#: port/win32_shmem.c:210 +#, fuzzy, c-format +#| msgid "The server (version %s) does not support tablespaces.\n" +msgid "the processor does not support large pages" +msgstr "Der Server (Version %s) unterstützt keine Tablespaces.\n" + +#: port/win32_shmem.c:212 port/win32_shmem.c:217 +#, fuzzy, c-format +#| msgid "disabling triggers for %s\n" +msgid "disabling huge pages" +msgstr "schalte Trigger für %s aus\n" + +#: port/win32_shmem.c:279 port/win32_shmem.c:315 port/win32_shmem.c:333 #, c-format msgid "could not create shared memory segment: error code %lu" msgstr "konnte Shared-Memory-Segment nicht erzeugen: Fehlercode %lu" -#: port/win32_shmem.c:174 +#: port/win32_shmem.c:280 #, c-format msgid "Failed system call was CreateFileMapping(size=%zu, name=%s)." msgstr "Fehlgeschlagener Systemaufruf war CreateFileMapping(Größe=%zu, Name=%s)." -#: port/win32_shmem.c:198 +#: port/win32_shmem.c:305 #, c-format msgid "pre-existing shared memory block is still in use" msgstr "bereits bestehender Shared-Memory-Block wird noch benutzt" -#: port/win32_shmem.c:199 +#: port/win32_shmem.c:306 #, c-format msgid "Check if there are any old server processes still running, and terminate them." msgstr "Prüfen Sie, ob irgendwelche alten Serverprozesse noch laufen und beenden Sie diese." -#: port/win32_shmem.c:209 +#: port/win32_shmem.c:316 #, c-format msgid "Failed system call was DuplicateHandle." msgstr "Fehlgeschlagener Systemaufruf war DuplicateHandle." -#: port/win32_shmem.c:227 +#: port/win32_shmem.c:334 #, c-format msgid "Failed system call was MapViewOfFileEx." msgstr "Fehlgeschlagener Systemaufruf war MapViewOfFileEx." -#: postmaster/autovacuum.c:380 +#: postmaster/autovacuum.c:406 #, c-format msgid "could not fork autovacuum launcher process: %m" msgstr "konnte Autovacuum-Launcher-Prozess nicht starten (fork-Fehler): %m" -#: postmaster/autovacuum.c:416 +#: postmaster/autovacuum.c:442 #, c-format msgid "autovacuum launcher started" msgstr "Autovacuum-Launcher startet" -#: postmaster/autovacuum.c:780 +#: postmaster/autovacuum.c:832 #, c-format msgid "autovacuum launcher shutting down" msgstr "Autovacuum-Launcher fährt herunter" -#: postmaster/autovacuum.c:1442 +#: postmaster/autovacuum.c:1494 #, c-format msgid "could not fork autovacuum worker process: %m" msgstr "konnte Autovacuum-Worker-Prozess nicht starten (fork-Fehler): %m" -#: postmaster/autovacuum.c:1640 +#: postmaster/autovacuum.c:1700 #, c-format msgid "autovacuum: processing database \"%s\"" msgstr "Autovacuum: bearbeite Datenbank »%s«" -#: postmaster/autovacuum.c:2214 -#, fuzzy, c-format -#| msgid "autovacuum: dropping orphan temp table \"%s\".\"%s\" in database \"%s\"" +#: postmaster/autovacuum.c:2275 +#, c-format msgid "autovacuum: dropping orphan temp table \"%s.%s.%s\"" -msgstr "Autovacuum: lösche verwaiste temporäre Tabelle »%s.%s« in Datenbank »%s«" +msgstr "Autovacuum: lösche verwaiste temporäre Tabelle »%s.%s.%s«" -#: postmaster/autovacuum.c:2420 +#: postmaster/autovacuum.c:2504 #, c-format msgid "automatic vacuum of table \"%s.%s.%s\"" msgstr "automatisches Vacuum der Tabelle »%s.%s.%s«" -#: postmaster/autovacuum.c:2423 +#: postmaster/autovacuum.c:2507 #, c-format msgid "automatic analyze of table \"%s.%s.%s\"" msgstr "automatisches Analysieren der Tabelle »%s.%s.%s«" -#: postmaster/autovacuum.c:2972 +#: postmaster/autovacuum.c:2700 +#, c-format +msgid "processing work entry for relation \"%s.%s.%s\"" +msgstr "verarbeite Arbeitseintrag für Relation »%s.%s.%s«" + +#: postmaster/autovacuum.c:3279 #, c-format msgid "autovacuum not started because of misconfiguration" msgstr "Autovacuum wegen Fehlkonfiguration nicht gestartet" -#: postmaster/autovacuum.c:2973 +#: postmaster/autovacuum.c:3280 #, c-format msgid "Enable the \"track_counts\" option." msgstr "Schalten Sie die Option »track_counts« ein." -#: postmaster/bgworker.c:375 postmaster/bgworker.c:814 +#: postmaster/bgworker.c:395 postmaster/bgworker.c:862 #, c-format msgid "registering background worker \"%s\"" msgstr "registriere Background-Worker »%s«" -#: postmaster/bgworker.c:407 +#: postmaster/bgworker.c:427 #, c-format msgid "unregistering background worker \"%s\"" msgstr "deregistriere Background-Worker »%s«" -#: postmaster/bgworker.c:551 +#: postmaster/bgworker.c:592 #, c-format msgid "background worker \"%s\": must attach to shared memory in order to request a database connection" msgstr "Background-Worker »%s«: muss mit Shared Memory verbinden, um eine Datenbankverbindung anzufordern" -#: postmaster/bgworker.c:560 +#: postmaster/bgworker.c:601 #, c-format msgid "background worker \"%s\": cannot request database access if starting at postmaster start" msgstr "Background-Worker »%s«: kann kein Datenbankzugriff anfordern, wenn er nach Postmaster-Start gestartet hat" -#: postmaster/bgworker.c:574 +#: postmaster/bgworker.c:615 #, c-format msgid "background worker \"%s\": invalid restart interval" msgstr "Background-Worker »%s«: ungültiges Neustart-Intervall" -#: postmaster/bgworker.c:619 +#: postmaster/bgworker.c:630 +#, c-format +msgid "background worker \"%s\": parallel workers may not be configured for restart" +msgstr "Background-Worker »%s«: parallele Arbeitsprozesse dürfen nicht für Neustart konfiguriert sein" + +#: postmaster/bgworker.c:681 #, c-format msgid "terminating background worker \"%s\" due to administrator command" msgstr "Background-Worker »%s« wird abgebrochen aufgrund von Anweisung des Administrators" -#: postmaster/bgworker.c:830 +#: postmaster/bgworker.c:870 #, c-format msgid "background worker \"%s\": must be registered in shared_preload_libraries" msgstr "Background-Worker »%s«: muss in shared_preload_libraries registriert sein" -#: postmaster/bgworker.c:842 +#: postmaster/bgworker.c:882 #, c-format msgid "background worker \"%s\": only dynamic background workers can request notification" msgstr "Background-Worker »%s«: nur dynamische Background-Worker können Benachrichtigung verlangen" -#: postmaster/bgworker.c:857 +#: postmaster/bgworker.c:897 #, c-format msgid "too many background workers" msgstr "zu viele Background-Worker" -#: postmaster/bgworker.c:858 +#: postmaster/bgworker.c:898 #, c-format msgid "Up to %d background worker can be registered with the current settings." msgid_plural "Up to %d background workers can be registered with the current settings." msgstr[0] "Mit den aktuellen Einstellungen können bis zu %d Background-Worker registriert werden." msgstr[1] "Mit den aktuellen Einstellungen können bis zu %d Background-Worker registriert werden." -#: postmaster/bgworker.c:862 +#: postmaster/bgworker.c:902 #, c-format msgid "Consider increasing the configuration parameter \"max_worker_processes\"." msgstr "Erhöhen Sie eventuell den Konfigurationsparameter »max_worker_processes«." -#: postmaster/checkpointer.c:465 +#: postmaster/checkpointer.c:464 #, c-format msgid "checkpoints are occurring too frequently (%d second apart)" msgid_plural "checkpoints are occurring too frequently (%d seconds apart)" msgstr[0] "Checkpoints passieren zu oft (alle %d Sekunde)" msgstr[1] "Checkpoints passieren zu oft (alle %d Sekunden)" -#: postmaster/checkpointer.c:469 +#: postmaster/checkpointer.c:468 #, c-format msgid "Consider increasing the configuration parameter \"max_wal_size\"." msgstr "Erhöhen Sie eventuell den Konfigurationsparameter »max_wal_size«." -#: postmaster/checkpointer.c:629 -#, c-format -msgid "transaction log switch forced (archive_timeout=%d)" -msgstr "Umschalten des Transaktionslogs erzwungen (archive_timeout=%d)" - #: postmaster/checkpointer.c:1088 #, c-format msgid "checkpoint request failed" @@ -15429,354 +16579,305 @@ msgstr "Einzelheiten finden Sie in den letzten Meldungen im Serverlog." msgid "compacted fsync request queue from %d entries to %d entries" msgstr "fsync-Anfrageschlange von %d Einträgen auf %d Einträge zusammengefasst" -#: postmaster/pgarch.c:149 +#: postmaster/pgarch.c:148 #, c-format msgid "could not fork archiver: %m" msgstr "konnte Archivierer nicht starten (fork-Fehler): %m" -#: postmaster/pgarch.c:457 +#: postmaster/pgarch.c:456 #, c-format msgid "archive_mode enabled, yet archive_command is not set" msgstr "archive_mode ist an, aber archive_command ist nicht gesetzt" -#: postmaster/pgarch.c:485 +#: postmaster/pgarch.c:484 #, c-format -msgid "archiving transaction log file \"%s\" failed too many times, will try again later" -msgstr "Archivieren der Transaktionslogdatei »%s« schlug zu oft fehl, wird später erneut versucht" +msgid "archiving write-ahead log file \"%s\" failed too many times, will try again later" +msgstr "Archivieren der Write-Ahead-Log-Datei »%s« schlug zu oft fehl, wird später erneut versucht" -#: postmaster/pgarch.c:588 +#: postmaster/pgarch.c:587 #, c-format msgid "archive command failed with exit code %d" msgstr "Archivbefehl ist fehlgeschlagen mit Statuscode %d" -#: postmaster/pgarch.c:590 postmaster/pgarch.c:600 postmaster/pgarch.c:607 -#: postmaster/pgarch.c:613 postmaster/pgarch.c:622 +#: postmaster/pgarch.c:589 postmaster/pgarch.c:599 postmaster/pgarch.c:606 +#: postmaster/pgarch.c:612 postmaster/pgarch.c:621 #, c-format msgid "The failed archive command was: %s" msgstr "Der fehlgeschlagene Archivbefehl war: %s" -#: postmaster/pgarch.c:597 +#: postmaster/pgarch.c:596 #, c-format msgid "archive command was terminated by exception 0x%X" msgstr "Archivbefehl wurde durch Ausnahme 0x%X beendet" -#: postmaster/pgarch.c:599 postmaster/postmaster.c:3527 +#: postmaster/pgarch.c:598 postmaster/postmaster.c:3567 #, c-format msgid "See C include file \"ntstatus.h\" for a description of the hexadecimal value." msgstr "Sehen Sie die Beschreibung des Hexadezimalwerts in der C-Include-Datei »ntstatus.h« nach." -#: postmaster/pgarch.c:604 +#: postmaster/pgarch.c:603 #, c-format msgid "archive command was terminated by signal %d: %s" msgstr "Archivbefehl wurde von Signal %d beendet: %s" -#: postmaster/pgarch.c:611 +#: postmaster/pgarch.c:610 #, c-format msgid "archive command was terminated by signal %d" msgstr "Archivbefehl wurde von Signal %d beendet" -#: postmaster/pgarch.c:620 +#: postmaster/pgarch.c:619 #, c-format msgid "archive command exited with unrecognized status %d" msgstr "Archivbefehl hat mit unbekanntem Status %d beendet" -#: postmaster/pgarch.c:632 -#, c-format -msgid "archived transaction log file \"%s\"" -msgstr "archivierte Transaktionslogdatei »%s«" - -#: postmaster/pgarch.c:681 -#, c-format -msgid "could not open archive status directory \"%s\": %m" -msgstr "konnte Archivstatusverzeichnis »%s« nicht öffnen: %m" - -#: postmaster/pgstat.c:360 +#: postmaster/pgstat.c:395 #, c-format msgid "could not resolve \"localhost\": %s" msgstr "konnte »localhost« nicht auflösen: %s" -#: postmaster/pgstat.c:383 +#: postmaster/pgstat.c:418 #, c-format msgid "trying another address for the statistics collector" msgstr "andere Adresse für Statistiksammelprozess wird versucht" -#: postmaster/pgstat.c:392 +#: postmaster/pgstat.c:427 #, c-format msgid "could not create socket for statistics collector: %m" msgstr "konnte Socket für Statistiksammelprozess nicht erzeugen: %m" -#: postmaster/pgstat.c:404 +#: postmaster/pgstat.c:439 #, c-format msgid "could not bind socket for statistics collector: %m" msgstr "konnte Socket für Statistiksammelprozess nicht binden: %m" -#: postmaster/pgstat.c:415 +#: postmaster/pgstat.c:450 #, c-format msgid "could not get address of socket for statistics collector: %m" msgstr "konnte Adresse für Socket für Statistiksammelprozess nicht ermitteln: %m" -#: postmaster/pgstat.c:431 +#: postmaster/pgstat.c:466 #, c-format msgid "could not connect socket for statistics collector: %m" msgstr "konnte nicht mit Socket für Statistiksammelprozess verbinden: %m" -#: postmaster/pgstat.c:452 +#: postmaster/pgstat.c:487 #, c-format msgid "could not send test message on socket for statistics collector: %m" msgstr "konnte Testnachricht auf Socket für Statistiksammelprozess nicht senden: %m" -#: postmaster/pgstat.c:478 +#: postmaster/pgstat.c:513 #, c-format msgid "select() failed in statistics collector: %m" msgstr "select() im Statistiksammelprozess fehlgeschlagen: %m" -#: postmaster/pgstat.c:493 +#: postmaster/pgstat.c:528 #, c-format msgid "test message did not get through on socket for statistics collector" msgstr "Testnachricht auf Socket für Statistiksammelprozess kam nicht durch" -#: postmaster/pgstat.c:508 +#: postmaster/pgstat.c:543 #, c-format msgid "could not receive test message on socket for statistics collector: %m" msgstr "konnte Testnachricht auf Socket für Statistiksammelprozess nicht empfangen: %m" -#: postmaster/pgstat.c:518 +#: postmaster/pgstat.c:553 #, c-format msgid "incorrect test message transmission on socket for statistics collector" msgstr "fehlerhafte Übertragung der Testnachricht auf Socket für Statistiksammelprozess" -#: postmaster/pgstat.c:541 +#: postmaster/pgstat.c:576 #, c-format msgid "could not set statistics collector socket to nonblocking mode: %m" msgstr "konnte Socket von Statistiksammelprozess nicht auf nicht blockierenden Modus setzen: %m" -#: postmaster/pgstat.c:551 +#: postmaster/pgstat.c:615 #, c-format msgid "disabling statistics collector for lack of working socket" msgstr "Statistiksammelprozess abgeschaltet wegen nicht funkionierender Socket" -#: postmaster/pgstat.c:698 +#: postmaster/pgstat.c:762 #, c-format msgid "could not fork statistics collector: %m" msgstr "konnte Statistiksammelprozess nicht starten (fork-Fehler): %m" -#: postmaster/pgstat.c:1266 +#: postmaster/pgstat.c:1342 #, c-format msgid "unrecognized reset target: \"%s\"" msgstr "unbekanntes Reset-Ziel: »%s«" -#: postmaster/pgstat.c:1267 +#: postmaster/pgstat.c:1343 #, c-format msgid "Target must be \"archiver\" or \"bgwriter\"." msgstr "Das Reset-Ziel muss »archiver« oder »bgwriter« sein." -#: postmaster/pgstat.c:3816 +#: postmaster/pgstat.c:4362 #, c-format msgid "could not read statistics message: %m" msgstr "konnte Statistiknachricht nicht lesen: %m" -#: postmaster/pgstat.c:4148 postmaster/pgstat.c:4305 +#: postmaster/pgstat.c:4694 postmaster/pgstat.c:4851 #, c-format msgid "could not open temporary statistics file \"%s\": %m" msgstr "konnte temporäre Statistikdatei »%s« nicht öffnen: %m" -#: postmaster/pgstat.c:4215 postmaster/pgstat.c:4350 +#: postmaster/pgstat.c:4761 postmaster/pgstat.c:4896 #, c-format msgid "could not write temporary statistics file \"%s\": %m" msgstr "konnte temporäre Statistikdatei »%s« nicht schreiben: %m" -#: postmaster/pgstat.c:4224 postmaster/pgstat.c:4359 +#: postmaster/pgstat.c:4770 postmaster/pgstat.c:4905 #, c-format msgid "could not close temporary statistics file \"%s\": %m" msgstr "konnte temporäre Statistikdatei »%s« nicht schließen: %m" -#: postmaster/pgstat.c:4232 postmaster/pgstat.c:4367 +#: postmaster/pgstat.c:4778 postmaster/pgstat.c:4913 #, c-format msgid "could not rename temporary statistics file \"%s\" to \"%s\": %m" msgstr "konnte temporäre Statistikdatei »%s« nicht in »%s« umbenennen: %m" -#: postmaster/pgstat.c:4456 postmaster/pgstat.c:4641 postmaster/pgstat.c:4794 +#: postmaster/pgstat.c:5002 postmaster/pgstat.c:5208 postmaster/pgstat.c:5361 #, c-format msgid "could not open statistics file \"%s\": %m" msgstr "konnte Statistikdatei »%s« nicht öffnen: %m" -#: postmaster/pgstat.c:4468 postmaster/pgstat.c:4478 postmaster/pgstat.c:4488 -#: postmaster/pgstat.c:4509 postmaster/pgstat.c:4524 postmaster/pgstat.c:4578 -#: postmaster/pgstat.c:4653 postmaster/pgstat.c:4673 postmaster/pgstat.c:4691 -#: postmaster/pgstat.c:4707 postmaster/pgstat.c:4725 postmaster/pgstat.c:4741 -#: postmaster/pgstat.c:4806 postmaster/pgstat.c:4818 postmaster/pgstat.c:4830 -#: postmaster/pgstat.c:4855 postmaster/pgstat.c:4877 +#: postmaster/pgstat.c:5014 postmaster/pgstat.c:5024 postmaster/pgstat.c:5045 +#: postmaster/pgstat.c:5067 postmaster/pgstat.c:5082 postmaster/pgstat.c:5145 +#: postmaster/pgstat.c:5220 postmaster/pgstat.c:5240 postmaster/pgstat.c:5258 +#: postmaster/pgstat.c:5274 postmaster/pgstat.c:5292 postmaster/pgstat.c:5308 +#: postmaster/pgstat.c:5373 postmaster/pgstat.c:5385 postmaster/pgstat.c:5397 +#: postmaster/pgstat.c:5422 postmaster/pgstat.c:5444 #, c-format msgid "corrupted statistics file \"%s\"" msgstr "verfälschte Statistikdatei »%s«" -#: postmaster/pgstat.c:5006 +#: postmaster/pgstat.c:5573 #, c-format msgid "using stale statistics instead of current ones because stats collector is not responding" msgstr "verwende veraltete Statistiken anstatt aktueller, weil der Statistiksammelprozess nicht antwortet" -#: postmaster/pgstat.c:5333 +#: postmaster/pgstat.c:5900 #, c-format msgid "database hash table corrupted during cleanup --- abort" msgstr "Datenbank-Hash-Tabelle beim Aufräumen verfälscht --- Abbruch" -#: postmaster/postmaster.c:692 +#: postmaster/postmaster.c:717 #, c-format msgid "%s: invalid argument for option -f: \"%s\"\n" msgstr "%s: ungültiges Argument für Option -f: »%s«\n" -#: postmaster/postmaster.c:778 +#: postmaster/postmaster.c:803 #, c-format msgid "%s: invalid argument for option -t: \"%s\"\n" msgstr "%s: ungültiges Argument für Option -t: »%s«\n" -#: postmaster/postmaster.c:829 +#: postmaster/postmaster.c:854 #, c-format msgid "%s: invalid argument: \"%s\"\n" msgstr "%s: ungültiges Argument: »%s«\n" -#: postmaster/postmaster.c:868 -#, c-format -msgid "%s: superuser_reserved_connections must be less than max_connections\n" +#: postmaster/postmaster.c:896 +#, fuzzy, c-format +#| msgid "%s: superuser_reserved_connections must be less than max_connections\n" +msgid "%s: superuser_reserved_connections (%d) plus max_wal_senders (%d) must be less than max_connections (%d)\n" msgstr "%s: superuser_reserved_connections muss kleiner als max_connections sein\n" -#: postmaster/postmaster.c:873 -#, c-format -msgid "%s: max_wal_senders must be less than max_connections\n" -msgstr "%s: max_wal_senders muss kleiner als max_connections sein\n" - -#: postmaster/postmaster.c:878 +#: postmaster/postmaster.c:903 #, c-format msgid "WAL archival cannot be enabled when wal_level is \"minimal\"" msgstr "WAL-Archivierung kann nicht eingeschaltet werden, wenn wal_level »minimal« ist" -#: postmaster/postmaster.c:881 +#: postmaster/postmaster.c:906 #, c-format msgid "WAL streaming (max_wal_senders > 0) requires wal_level \"replica\" or \"logical\"" msgstr "WAL-Streaming (max_wal_senders > 0) benötigt wal_level »replica« oder »logical«" -#: postmaster/postmaster.c:889 +#: postmaster/postmaster.c:914 #, c-format msgid "%s: invalid datetoken tables, please fix\n" msgstr "%s: ungültige datetoken-Tabellen, bitte reparieren\n" -#: postmaster/postmaster.c:992 postmaster/postmaster.c:1090 -#: utils/init/miscinit.c:1429 +#: postmaster/postmaster.c:1028 postmaster/postmaster.c:1126 +#: utils/init/miscinit.c:1547 #, c-format msgid "invalid list syntax in parameter \"%s\"" msgstr "ungültige Listensyntax für Parameter »%s«" -#: postmaster/postmaster.c:1023 +#: postmaster/postmaster.c:1059 #, c-format msgid "could not create listen socket for \"%s\"" msgstr "konnte Listen-Socket für »%s« nicht erzeugen" -#: postmaster/postmaster.c:1029 +#: postmaster/postmaster.c:1065 #, c-format msgid "could not create any TCP/IP sockets" msgstr "konnte keine TCP/IP-Sockets erstellen" -#: postmaster/postmaster.c:1112 +#: postmaster/postmaster.c:1148 #, c-format msgid "could not create Unix-domain socket in directory \"%s\"" msgstr "konnte Unix-Domain-Socket in Verzeichnis »%s« nicht erzeugen" -#: postmaster/postmaster.c:1118 +#: postmaster/postmaster.c:1154 #, c-format msgid "could not create any Unix-domain sockets" msgstr "konnte keine Unix-Domain-Sockets erzeugen" -#: postmaster/postmaster.c:1130 +#: postmaster/postmaster.c:1166 #, c-format msgid "no socket created for listening" msgstr "keine Listen-Socket erzeugt" -#: postmaster/postmaster.c:1170 +#: postmaster/postmaster.c:1206 #, c-format msgid "could not create I/O completion port for child queue" msgstr "konnte Ein-/Ausgabe-Completion-Port für Child-Queue nicht erzeugen" -#: postmaster/postmaster.c:1199 +#: postmaster/postmaster.c:1235 #, c-format msgid "%s: could not change permissions of external PID file \"%s\": %s\n" msgstr "%s: konnte Rechte der externen PID-Datei »%s« nicht ändern: %s\n" -#: postmaster/postmaster.c:1203 +#: postmaster/postmaster.c:1239 #, c-format msgid "%s: could not write external PID file \"%s\": %s\n" msgstr "%s: konnte externe PID-Datei »%s« nicht schreiben: %s\n" -#: postmaster/postmaster.c:1260 +#: postmaster/postmaster.c:1296 #, c-format msgid "ending log output to stderr" msgstr "Logausgabe nach stderr endet" -#: postmaster/postmaster.c:1261 +#: postmaster/postmaster.c:1297 #, c-format msgid "Future log output will go to log destination \"%s\"." msgstr "Die weitere Logausgabe geht an Logziel »%s«." -#: postmaster/postmaster.c:1287 utils/init/postinit.c:213 +#: postmaster/postmaster.c:1323 utils/init/postinit.c:214 #, c-format msgid "could not load pg_hba.conf" msgstr "konnte pg_hba.conf nicht laden" -#: postmaster/postmaster.c:1313 +#: postmaster/postmaster.c:1349 #, c-format msgid "postmaster became multithreaded during startup" msgstr "Postmaster ist während des Starts multithreaded geworden" -#: postmaster/postmaster.c:1314 +#: postmaster/postmaster.c:1350 #, c-format msgid "Set the LC_ALL environment variable to a valid locale." msgstr "Setzen Sie die Umgebungsvariable LC_ALL auf eine gültige Locale." -#: postmaster/postmaster.c:1413 +#: postmaster/postmaster.c:1455 #, c-format msgid "%s: could not locate matching postgres executable" msgstr "%s: konnte kein passendes Programm »postgres« finden" -#: postmaster/postmaster.c:1436 utils/misc/tzparser.c:341 +#: postmaster/postmaster.c:1478 utils/misc/tzparser.c:341 #, c-format msgid "This may indicate an incomplete PostgreSQL installation, or that the file \"%s\" has been moved away from its proper location." msgstr "Dies kann auf eine unvollständige PostgreSQL-Installation hindeuten, oder darauf, dass die Datei »%s« von ihrer richtigen Stelle verschoben worden ist." -#: postmaster/postmaster.c:1464 -#, c-format -msgid "data directory \"%s\" does not exist" -msgstr "Datenverzeichnis »%s« existiert nicht" - -#: postmaster/postmaster.c:1469 -#, c-format -msgid "could not read permissions of directory \"%s\": %m" -msgstr "konnte Zugriffsrechte von Verzeichnis »%s« nicht lesen: %m" - -#: postmaster/postmaster.c:1477 -#, c-format -msgid "specified data directory \"%s\" is not a directory" -msgstr "angegebenes Datenverzeichnis »%s« ist kein Verzeichnis" - -#: postmaster/postmaster.c:1493 -#, c-format -msgid "data directory \"%s\" has wrong ownership" -msgstr "Datenverzeichnis »%s« hat falschen Eigentümer" - -#: postmaster/postmaster.c:1495 -#, c-format -msgid "The server must be started by the user that owns the data directory." -msgstr "Der Server muss von dem Benutzer gestartet werden, dem das Datenverzeichnis gehört." - -#: postmaster/postmaster.c:1515 -#, c-format -msgid "data directory \"%s\" has group or world access" -msgstr "Datenverzeichnis »%s« erlaubt Zugriff von Gruppe oder Welt" - -#: postmaster/postmaster.c:1517 -#, c-format -msgid "Permissions should be u=rwx (0700)." -msgstr "Rechte sollten u=rwx (0700) sein." - -#: postmaster/postmaster.c:1528 +#: postmaster/postmaster.c:1505 #, c-format msgid "" "%s: could not find the database system\n" @@ -15787,716 +16888,784 @@ msgstr "" "Es wurde im Verzeichnis »%s« erwartet,\n" "aber die Datei »%s« konnte nicht geöffnet werden: %s\n" -#: postmaster/postmaster.c:1705 +#: postmaster/postmaster.c:1682 #, c-format msgid "select() failed in postmaster: %m" msgstr "select() fehlgeschlagen im Postmaster: %m" -#: postmaster/postmaster.c:1856 +#: postmaster/postmaster.c:1837 #, c-format msgid "performing immediate shutdown because data directory lock file is invalid" msgstr "führe sofortiges Herunterfahren durch, weil Sperrdatei im Datenverzeichnis ungültig ist" -#: postmaster/postmaster.c:1934 postmaster/postmaster.c:1965 +#: postmaster/postmaster.c:1915 postmaster/postmaster.c:1946 #, c-format msgid "incomplete startup packet" msgstr "unvollständiges Startpaket" -#: postmaster/postmaster.c:1946 +#: postmaster/postmaster.c:1927 #, c-format msgid "invalid length of startup packet" msgstr "ungültige Länge des Startpakets" -#: postmaster/postmaster.c:2004 +#: postmaster/postmaster.c:1985 #, c-format msgid "failed to send SSL negotiation response: %m" msgstr "konnte SSL-Verhandlungsantwort nicht senden: %m" -#: postmaster/postmaster.c:2033 +#: postmaster/postmaster.c:2011 #, c-format msgid "unsupported frontend protocol %u.%u: server supports %u.0 to %u.%u" msgstr "nicht unterstütztes Frontend-Protokoll %u.%u: Server unterstützt %u.0 bis %u.%u" -#: postmaster/postmaster.c:2096 utils/misc/guc.c:5726 utils/misc/guc.c:5819 -#: utils/misc/guc.c:7117 utils/misc/guc.c:9870 utils/misc/guc.c:9904 +#: postmaster/postmaster.c:2075 utils/misc/guc.c:5993 utils/misc/guc.c:6086 +#: utils/misc/guc.c:7412 utils/misc/guc.c:10172 utils/misc/guc.c:10206 #, c-format msgid "invalid value for parameter \"%s\": \"%s\"" msgstr "ungültiger Wert für Parameter »%s«: »%s«" -#: postmaster/postmaster.c:2099 +#: postmaster/postmaster.c:2078 #, c-format msgid "Valid values are: \"false\", 0, \"true\", 1, \"database\"." msgstr "Gültige Werte sind: »false«, 0, »true«, 1, »database«." -#: postmaster/postmaster.c:2119 +#: postmaster/postmaster.c:2108 #, c-format msgid "invalid startup packet layout: expected terminator as last byte" msgstr "ungültiges Layout des Startpakets: Abschluss als letztes Byte erwartet" -#: postmaster/postmaster.c:2147 +#: postmaster/postmaster.c:2146 #, c-format msgid "no PostgreSQL user name specified in startup packet" msgstr "kein PostgreSQL-Benutzername im Startpaket angegeben" -#: postmaster/postmaster.c:2206 +#: postmaster/postmaster.c:2205 #, c-format msgid "the database system is starting up" msgstr "das Datenbanksystem startet" -#: postmaster/postmaster.c:2211 +#: postmaster/postmaster.c:2210 #, c-format msgid "the database system is shutting down" msgstr "das Datenbanksystem fährt herunter" -#: postmaster/postmaster.c:2216 +#: postmaster/postmaster.c:2215 #, c-format msgid "the database system is in recovery mode" msgstr "das Datenbanksystem ist im Wiederherstellungsmodus" -#: postmaster/postmaster.c:2221 storage/ipc/procarray.c:290 -#: storage/ipc/sinvaladt.c:298 storage/lmgr/proc.c:338 +#: postmaster/postmaster.c:2220 storage/ipc/procarray.c:292 +#: storage/ipc/sinvaladt.c:298 storage/lmgr/proc.c:339 #, c-format msgid "sorry, too many clients already" msgstr "tut mir leid, schon zu viele Verbindungen" -#: postmaster/postmaster.c:2283 +#: postmaster/postmaster.c:2310 #, c-format msgid "wrong key in cancel request for process %d" msgstr "falscher Schlüssel in Stornierungsanfrage für Prozess %d" -#: postmaster/postmaster.c:2291 +#: postmaster/postmaster.c:2318 #, c-format msgid "PID %d in cancel request did not match any process" msgstr "PID %d in Stornierungsanfrage stimmte mit keinem Prozess überein" -#: postmaster/postmaster.c:2502 +#: postmaster/postmaster.c:2529 #, c-format msgid "received SIGHUP, reloading configuration files" msgstr "SIGHUP empfangen, Konfigurationsdateien werden neu geladen" -#: postmaster/postmaster.c:2527 -#, fuzzy, c-format -#| msgid "pg_hba.conf not reloaded" +#: postmaster/postmaster.c:2554 +#, c-format msgid "pg_hba.conf was not reloaded" -msgstr "pg_hba.conf nicht neu geladen" +msgstr "pg_hba.conf wurde nicht neu geladen" -#: postmaster/postmaster.c:2531 -#, fuzzy, c-format -#| msgid "pg_ident.conf not reloaded" +#: postmaster/postmaster.c:2558 +#, c-format msgid "pg_ident.conf was not reloaded" -msgstr "pg_ident.conf nicht neu geladen" +msgstr "pg_ident.conf wurde nicht neu geladen" -#: postmaster/postmaster.c:2541 -#, fuzzy, c-format -#| msgid "text search configuration parameter \"%s\" not recognized" +#: postmaster/postmaster.c:2568 +#, c-format msgid "SSL configuration was not reloaded" -msgstr "Textsuchekonfigurationsparameter »%s« nicht erkannt" +msgstr "SSL-Konfiguration wurde nicht neu geladen" -#: postmaster/postmaster.c:2589 +#: postmaster/postmaster.c:2616 #, c-format msgid "received smart shutdown request" msgstr "intelligentes Herunterfahren verlangt" -#: postmaster/postmaster.c:2644 +#: postmaster/postmaster.c:2674 #, c-format msgid "received fast shutdown request" msgstr "schnelles Herunterfahren verlangt" -#: postmaster/postmaster.c:2674 +#: postmaster/postmaster.c:2707 #, c-format msgid "aborting any active transactions" msgstr "etwaige aktive Transaktionen werden abgebrochen" -#: postmaster/postmaster.c:2708 +#: postmaster/postmaster.c:2741 #, c-format msgid "received immediate shutdown request" msgstr "sofortiges Herunterfahren verlangt" -#: postmaster/postmaster.c:2772 +#: postmaster/postmaster.c:2808 #, c-format msgid "shutdown at recovery target" msgstr "Herunterfahren beim Wiederherstellungsziel" -#: postmaster/postmaster.c:2788 postmaster/postmaster.c:2811 +#: postmaster/postmaster.c:2824 postmaster/postmaster.c:2847 msgid "startup process" msgstr "Startprozess" -#: postmaster/postmaster.c:2791 +#: postmaster/postmaster.c:2827 #, c-format msgid "aborting startup due to startup process failure" msgstr "Serverstart abgebrochen wegen Startprozessfehler" -#: postmaster/postmaster.c:2852 +#: postmaster/postmaster.c:2888 #, c-format msgid "database system is ready to accept connections" msgstr "Datenbanksystem ist bereit, um Verbindungen anzunehmen" -#: postmaster/postmaster.c:2871 +#: postmaster/postmaster.c:2909 msgid "background writer process" msgstr "Background-Writer-Prozess" -#: postmaster/postmaster.c:2925 +#: postmaster/postmaster.c:2963 msgid "checkpointer process" msgstr "Checkpointer-Prozess" -#: postmaster/postmaster.c:2941 +#: postmaster/postmaster.c:2979 msgid "WAL writer process" msgstr "WAL-Schreibprozess" -#: postmaster/postmaster.c:2955 +#: postmaster/postmaster.c:2994 msgid "WAL receiver process" msgstr "WAL-Receiver-Prozess" -#: postmaster/postmaster.c:2970 +#: postmaster/postmaster.c:3009 msgid "autovacuum launcher process" msgstr "Autovacuum-Launcher-Prozess" -#: postmaster/postmaster.c:2985 +#: postmaster/postmaster.c:3024 msgid "archiver process" msgstr "Archivierprozess" -#: postmaster/postmaster.c:3001 +#: postmaster/postmaster.c:3040 msgid "statistics collector process" msgstr "Statistiksammelprozess" -#: postmaster/postmaster.c:3015 +#: postmaster/postmaster.c:3054 msgid "system logger process" msgstr "Systemlogger-Prozess" -#: postmaster/postmaster.c:3077 -msgid "worker process" -msgstr "Worker-Prozess" +#: postmaster/postmaster.c:3116 +#, c-format +msgid "background worker \"%s\"" +msgstr "Background-Worker »%s«" -#: postmaster/postmaster.c:3160 postmaster/postmaster.c:3180 -#: postmaster/postmaster.c:3187 postmaster/postmaster.c:3205 +#: postmaster/postmaster.c:3200 postmaster/postmaster.c:3220 +#: postmaster/postmaster.c:3227 postmaster/postmaster.c:3245 msgid "server process" msgstr "Serverprozess" -#: postmaster/postmaster.c:3259 +#: postmaster/postmaster.c:3299 #, c-format msgid "terminating any other active server processes" msgstr "aktive Serverprozesse werden abgebrochen" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3515 +#: postmaster/postmaster.c:3555 #, c-format msgid "%s (PID %d) exited with exit code %d" msgstr "%s (PID %d) beendete mit Status %d" -#: postmaster/postmaster.c:3517 postmaster/postmaster.c:3528 -#: postmaster/postmaster.c:3539 postmaster/postmaster.c:3548 -#: postmaster/postmaster.c:3558 +#: postmaster/postmaster.c:3557 postmaster/postmaster.c:3568 +#: postmaster/postmaster.c:3579 postmaster/postmaster.c:3588 +#: postmaster/postmaster.c:3598 #, c-format msgid "Failed process was running: %s" msgstr "Der fehlgeschlagene Prozess führte aus: %s" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3525 +#: postmaster/postmaster.c:3565 #, c-format msgid "%s (PID %d) was terminated by exception 0x%X" msgstr "%s (PID %d) wurde durch Ausnahme 0x%X beendet" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3535 +#: postmaster/postmaster.c:3575 #, c-format msgid "%s (PID %d) was terminated by signal %d: %s" msgstr "%s (PID %d) wurde von Signal %d beendet: %s" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3546 +#: postmaster/postmaster.c:3586 #, c-format msgid "%s (PID %d) was terminated by signal %d" msgstr "%s (PID %d) wurde von Signal %d beendet" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3556 +#: postmaster/postmaster.c:3596 #, c-format msgid "%s (PID %d) exited with unrecognized status %d" msgstr "%s (PID %d) beendete mit unbekanntem Status %d" -#: postmaster/postmaster.c:3743 +#: postmaster/postmaster.c:3783 #, c-format msgid "abnormal database system shutdown" msgstr "abnormales Herunterfahren des Datenbanksystems" -#: postmaster/postmaster.c:3783 +#: postmaster/postmaster.c:3823 #, c-format msgid "all server processes terminated; reinitializing" msgstr "alle Serverprozesse beendet; initialisiere neu" -#: postmaster/postmaster.c:3949 postmaster/postmaster.c:5343 -#: postmaster/postmaster.c:5649 -#, fuzzy, c-format -#| msgid "could not generate random encryption vector" +#: postmaster/postmaster.c:3993 postmaster/postmaster.c:5418 +#: postmaster/postmaster.c:5782 +#, c-format msgid "could not generate random cancel key" -msgstr "konnte zufälligen Verschlüsselungsvektor nicht erzeugen" +msgstr "konnte zufälligen Stornierungsschlüssel nicht erzeugen" -#: postmaster/postmaster.c:4003 +#: postmaster/postmaster.c:4047 #, c-format msgid "could not fork new process for connection: %m" msgstr "konnte neuen Prozess für Verbindung nicht starten (fork-Fehler): %m" -#: postmaster/postmaster.c:4045 +#: postmaster/postmaster.c:4089 msgid "could not fork new process for connection: " msgstr "konnte neuen Prozess für Verbindung nicht starten (fork-Fehler): " -#: postmaster/postmaster.c:4159 +#: postmaster/postmaster.c:4203 #, c-format msgid "connection received: host=%s port=%s" msgstr "Verbindung empfangen: Host=%s Port=%s" -#: postmaster/postmaster.c:4164 +#: postmaster/postmaster.c:4208 #, c-format msgid "connection received: host=%s" msgstr "Verbindung empfangen: Host=%s" -#: postmaster/postmaster.c:4449 +#: postmaster/postmaster.c:4493 #, c-format msgid "could not execute server process \"%s\": %m" msgstr "konnte Serverprozess »%s« nicht ausführen: %m" -#: postmaster/postmaster.c:4792 +#: postmaster/postmaster.c:4646 +#, c-format +msgid "giving up after too many tries to reserve shared memory" +msgstr "Aufgabe nach zu vielen Versuchen, Shared Memory zu reservieren" + +#: postmaster/postmaster.c:4647 +#, c-format +msgid "This might be caused by ASLR or antivirus software." +msgstr "Dies kann durch ASLR oder Antivirus-Software verursacht werden." + +#: postmaster/postmaster.c:4858 #, c-format msgid "SSL configuration could not be loaded in child process" -msgstr "" +msgstr "SSL-Konfiguration konnte im Kindprozess nicht geladen werden" -#: postmaster/postmaster.c:4924 +#: postmaster/postmaster.c:4990 #, c-format msgid "Please report this to ." msgstr "Bitte berichten Sie das an ." -#: postmaster/postmaster.c:5003 +#: postmaster/postmaster.c:5077 #, c-format msgid "database system is ready to accept read only connections" msgstr "Datenbanksystem ist bereit, um lesende Verbindungen anzunehmen" -#: postmaster/postmaster.c:5271 +#: postmaster/postmaster.c:5346 #, c-format msgid "could not fork startup process: %m" msgstr "konnte Startprozess nicht starten (fork-Fehler): %m" -#: postmaster/postmaster.c:5275 +#: postmaster/postmaster.c:5350 #, c-format msgid "could not fork background writer process: %m" msgstr "konnte Background-Writer-Prozess nicht starten (fork-Fehler): %m" -#: postmaster/postmaster.c:5279 +#: postmaster/postmaster.c:5354 #, c-format msgid "could not fork checkpointer process: %m" msgstr "konnte Checkpointer-Prozess nicht starten (fork-Fehler): %m" -#: postmaster/postmaster.c:5283 +#: postmaster/postmaster.c:5358 #, c-format msgid "could not fork WAL writer process: %m" msgstr "konnte WAL-Writer-Prozess nicht starten (fork-Fehler): %m" -#: postmaster/postmaster.c:5287 +#: postmaster/postmaster.c:5362 #, c-format msgid "could not fork WAL receiver process: %m" msgstr "konnte WAL-Receiver-Prozess nicht starten (fork-Fehler): %m" -#: postmaster/postmaster.c:5291 +#: postmaster/postmaster.c:5366 #, c-format msgid "could not fork process: %m" msgstr "konnte Prozess nicht starten (fork-Fehler): %m" -#: postmaster/postmaster.c:5460 postmaster/postmaster.c:5483 +#: postmaster/postmaster.c:5553 postmaster/postmaster.c:5576 #, c-format msgid "database connection requirement not indicated during registration" msgstr "die Notwendigkeit, Datenbankverbindungen zu erzeugen, wurde bei der Registrierung nicht angezeigt" -#: postmaster/postmaster.c:5467 postmaster/postmaster.c:5490 +#: postmaster/postmaster.c:5560 postmaster/postmaster.c:5583 #, c-format msgid "invalid processing mode in background worker" msgstr "ungültiger Verarbeitungsmodus in Background-Worker" -#: postmaster/postmaster.c:5542 +#: postmaster/postmaster.c:5655 #, c-format msgid "starting background worker process \"%s\"" msgstr "starte Background-Worker-Prozess »%s«" -#: postmaster/postmaster.c:5553 +#: postmaster/postmaster.c:5667 #, c-format msgid "could not fork worker process: %m" msgstr "konnte Worker-Prozess nicht starten (fork-Fehler): %m" -#: postmaster/postmaster.c:5950 +#: postmaster/postmaster.c:6100 #, c-format msgid "could not duplicate socket %d for use in backend: error code %d" msgstr "konnte Socket %d nicht für Verwendung in Backend duplizieren: Fehlercode %d" -#: postmaster/postmaster.c:5982 +#: postmaster/postmaster.c:6132 #, c-format msgid "could not create inherited socket: error code %d\n" msgstr "konnte geerbtes Socket nicht erzeugen: Fehlercode %d\n" -#: postmaster/postmaster.c:6011 +#: postmaster/postmaster.c:6161 #, c-format msgid "could not open backend variables file \"%s\": %s\n" msgstr "konnte Servervariablendatei »%s« nicht öffnen: %s\n" -#: postmaster/postmaster.c:6018 +#: postmaster/postmaster.c:6168 #, c-format msgid "could not read from backend variables file \"%s\": %s\n" msgstr "konnte nicht aus Servervariablendatei »%s« lesen: %s\n" -#: postmaster/postmaster.c:6027 +#: postmaster/postmaster.c:6177 #, c-format msgid "could not remove file \"%s\": %s\n" msgstr "konnte Datei »%s« nicht löschen: %s\n" -#: postmaster/postmaster.c:6044 +#: postmaster/postmaster.c:6194 #, c-format msgid "could not map view of backend variables: error code %lu\n" msgstr "konnte Sicht der Backend-Variablen nicht mappen: Fehlercode %lu\n" -#: postmaster/postmaster.c:6053 +#: postmaster/postmaster.c:6203 #, c-format msgid "could not unmap view of backend variables: error code %lu\n" msgstr "konnte Sicht der Backend-Variablen nicht unmappen: Fehlercode %lu\n" -#: postmaster/postmaster.c:6060 +#: postmaster/postmaster.c:6210 #, c-format msgid "could not close handle to backend parameter variables: error code %lu\n" msgstr "konnte Handle für Backend-Parametervariablen nicht schließen: Fehlercode %lu\n" -#: postmaster/postmaster.c:6221 +#: postmaster/postmaster.c:6371 #, c-format msgid "could not read exit code for process\n" msgstr "konnte Exitcode des Prozesses nicht lesen\n" -#: postmaster/postmaster.c:6226 +#: postmaster/postmaster.c:6376 #, c-format msgid "could not post child completion status\n" msgstr "konnte Child-Completion-Status nicht versenden\n" -#: postmaster/syslogger.c:452 postmaster/syslogger.c:1053 +#: postmaster/syslogger.c:453 postmaster/syslogger.c:1054 #, c-format msgid "could not read from logger pipe: %m" msgstr "konnte nicht aus Logger-Pipe lesen: %m" -#: postmaster/syslogger.c:502 +#: postmaster/syslogger.c:503 #, c-format msgid "logger shutting down" msgstr "Logger fährt herunter" -#: postmaster/syslogger.c:546 postmaster/syslogger.c:560 +#: postmaster/syslogger.c:547 postmaster/syslogger.c:561 #, c-format msgid "could not create pipe for syslog: %m" msgstr "konnte Pipe für Syslog nicht erzeugen: %m" -#: postmaster/syslogger.c:596 +#: postmaster/syslogger.c:597 #, c-format msgid "could not fork system logger: %m" msgstr "konnte Systemlogger nicht starten (fork-Fehler): %m" -#: postmaster/syslogger.c:632 +#: postmaster/syslogger.c:633 #, c-format msgid "redirecting log output to logging collector process" msgstr "Logausgabe wird an Logsammelprozess umgeleitet" -#: postmaster/syslogger.c:633 +#: postmaster/syslogger.c:634 #, c-format msgid "Future log output will appear in directory \"%s\"." msgstr "Die weitere Logausgabe wird im Verzeichnis »%s« erscheinen." -#: postmaster/syslogger.c:641 +#: postmaster/syslogger.c:642 #, c-format msgid "could not redirect stdout: %m" msgstr "konnte Standardausgabe nicht umleiten: %m" -#: postmaster/syslogger.c:646 postmaster/syslogger.c:663 +#: postmaster/syslogger.c:647 postmaster/syslogger.c:664 #, c-format msgid "could not redirect stderr: %m" msgstr "konnte Standardfehlerausgabe nicht umleiten: %m" -#: postmaster/syslogger.c:1008 +#: postmaster/syslogger.c:1009 #, c-format msgid "could not write to log file: %s\n" msgstr "konnte nicht in Logdatei schreiben: %s\n" -#: postmaster/syslogger.c:1150 +#: postmaster/syslogger.c:1151 #, c-format msgid "could not open log file \"%s\": %m" msgstr "konnte Logdatei »%s« nicht öffnen: %m" -#: postmaster/syslogger.c:1212 postmaster/syslogger.c:1256 +#: postmaster/syslogger.c:1213 postmaster/syslogger.c:1257 #, c-format msgid "disabling automatic rotation (use SIGHUP to re-enable)" msgstr "automatische Rotation abgeschaltet (SIGHUP zum Wiederanschalten verwenden)" -#: regex/regc_pg_locale.c:261 +#: regex/regc_pg_locale.c:262 #, c-format msgid "could not determine which collation to use for regular expression" msgstr "konnte die für den regulären Ausdruck zu verwendende Sortierfolge nicht bestimmen" -#: repl_gram.y:280 repl_gram.y:317 +#: repl_gram.y:336 repl_gram.y:368 #, c-format msgid "invalid timeline %u" msgstr "ungültige Zeitleiste %u" -#: repl_scanner.l:122 +#: repl_scanner.l:129 msgid "invalid streaming start location" msgstr "ungültige Streaming-Startposition" -#: repl_scanner.l:173 scan.l:670 +#: repl_scanner.l:180 scan.l:674 msgid "unterminated quoted string" msgstr "Zeichenkette in Anführungszeichen nicht abgeschlossen" -#: repl_scanner.l:183 -#, c-format -msgid "syntax error: unexpected character \"%s\"" -msgstr "Syntaxfehler: unerwartetes Zeichen »%s«" - -#: replication/basebackup.c:303 +#: replication/basebackup.c:336 #, c-format msgid "could not stat control file \"%s\": %m" msgstr "konnte »stat« für Kontrolldatei »%s« nicht ausführen: %m" -#: replication/basebackup.c:412 +#: replication/basebackup.c:443 #, c-format msgid "could not find any WAL files" msgstr "konnte keine WAL-Dateien finden" -#: replication/basebackup.c:425 replication/basebackup.c:439 -#: replication/basebackup.c:448 +#: replication/basebackup.c:457 replication/basebackup.c:472 +#: replication/basebackup.c:481 #, c-format msgid "could not find WAL file \"%s\"" msgstr "konnte WAL-Datei »%s« nicht finden" -#: replication/basebackup.c:487 replication/basebackup.c:513 +#: replication/basebackup.c:520 replication/basebackup.c:548 #, c-format msgid "unexpected WAL file size \"%s\"" msgstr "unerwartete WAL-Dateigröße »%s«" -#: replication/basebackup.c:499 replication/basebackup.c:1228 +#: replication/basebackup.c:534 replication/basebackup.c:1526 #, c-format msgid "base backup could not send data, aborting backup" msgstr "Basissicherung konnte keine Daten senden, Sicherung abgebrochen" -#: replication/basebackup.c:601 replication/basebackup.c:610 -#: replication/basebackup.c:619 replication/basebackup.c:628 -#: replication/basebackup.c:637 replication/basebackup.c:648 -#: replication/basebackup.c:665 +#: replication/basebackup.c:606 +#, fuzzy, c-format +#| msgid " data checksum version\n" +msgid "%s total checksum verification failures" +msgstr " Datenprüfsummenversion\n" + +#: replication/basebackup.c:610 +#, c-format +msgid "checksum verification failure during base backup" +msgstr "" + +#: replication/basebackup.c:654 replication/basebackup.c:663 +#: replication/basebackup.c:672 replication/basebackup.c:681 +#: replication/basebackup.c:690 replication/basebackup.c:701 +#: replication/basebackup.c:718 replication/basebackup.c:727 #, c-format msgid "duplicate option \"%s\"" msgstr "doppelte Option »%s«" -#: replication/basebackup.c:654 utils/misc/guc.c:5736 -#, c-format -msgid "%d is outside the valid range for parameter \"%s\" (%d .. %d)" -msgstr "%d ist außerhalb des gültigen Bereichs für Parameter »%s« (%d ... %d)" +#: replication/basebackup.c:707 utils/misc/guc.c:6003 +#, c-format +msgid "%d is outside the valid range for parameter \"%s\" (%d .. %d)" +msgstr "%d ist außerhalb des gültigen Bereichs für Parameter »%s« (%d ... %d)" + +#: replication/basebackup.c:981 replication/basebackup.c:1151 +#, c-format +msgid "could not stat file or directory \"%s\": %m" +msgstr "konnte »stat« für Datei oder Verzeichnis »%s« nicht ausführen: %m" + +#: replication/basebackup.c:1306 +#, c-format +msgid "skipping special file \"%s\"" +msgstr "überspringe besondere Datei »%s«" + +#: replication/basebackup.c:1411 +#, fuzzy, c-format +#| msgid "invalid column number %d for table \"%s\"\n" +msgid "invalid segment number %d in file \"%s\"" +msgstr "ungültige Spaltennummer %d in Tabelle »%s«\n" + +#: replication/basebackup.c:1430 +#, c-format +msgid "cannot verify checksum in file \"%s\", block %d: read buffer size %d and page size %d differ" +msgstr "" + +#: replication/basebackup.c:1474 replication/basebackup.c:1490 +#, fuzzy, c-format +#| msgid "could not seek in file \"%s\": %m" +msgid "could not fseek in file \"%s\": %m" +msgstr "konnte Positionszeiger in Datei »%s« nicht setzen: %m" + +#: replication/basebackup.c:1482 +#, fuzzy, c-format +#| msgid "could not read block %u in file \"%s\": %m" +msgid "could not reread block %d of file \"%s\": %m" +msgstr "konnte Block %u in Datei »%s« nicht lesen: %m" + +#: replication/basebackup.c:1506 +#, fuzzy, c-format +#| msgid "page verification failed, calculated checksum %u but expected %u" +msgid "checksum verification failed in file \"%s\", block %d: calculated %X but expected %X" +msgstr "Seitenüberprüfung fehlgeschlagen, berechnete Prüfsumme %u, aber erwartet %u" -#: replication/basebackup.c:928 replication/basebackup.c:1025 +#: replication/basebackup.c:1513 #, c-format -msgid "could not stat file or directory \"%s\": %m" -msgstr "konnte »stat« für Datei oder Verzeichnis »%s« nicht ausführen: %m" +msgid "further checksum verification failures in file \"%s\" will not be reported" +msgstr "" -#: replication/basebackup.c:1180 +#: replication/basebackup.c:1571 #, c-format -msgid "skipping special file \"%s\"" -msgstr "überspringe besondere Datei »%s«" +msgid "file \"%s\" has a total of %d checksum verification failures" +msgstr "" -#: replication/basebackup.c:1293 +#: replication/basebackup.c:1599 #, c-format msgid "file name too long for tar format: \"%s\"" msgstr "Dateiname zu lang für Tar-Format: »%s«" -#: replication/basebackup.c:1298 +#: replication/basebackup.c:1604 #, c-format msgid "symbolic link target too long for tar format: file name \"%s\", target \"%s\"" msgstr "Ziel der symbolischen Verknüpfung zu lang für Tar-Format: Dateiname »%s«, Ziel »%s«" -#: replication/libpqwalreceiver/libpqwalreceiver.c:221 -#, fuzzy, c-format -#| msgid "invalid connection type: %s" +#: replication/libpqwalreceiver/libpqwalreceiver.c:235 +#, c-format msgid "invalid connection string syntax: %s" -msgstr "ungültiger Verbindungstyp: %s" +msgstr "ungültige Syntax für Verbindungszeichenkette: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:245 +#: replication/libpqwalreceiver/libpqwalreceiver.c:259 #, c-format msgid "could not parse connection string: %s" msgstr "konnte Verbindungsparameter nicht interpretieren: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:295 +#: replication/libpqwalreceiver/libpqwalreceiver.c:332 #, c-format msgid "could not receive database system identifier and timeline ID from the primary server: %s" msgstr "konnte Datenbanksystemidentifikator und Zeitleisten-ID nicht vom Primärserver empfangen: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:306 -#: replication/libpqwalreceiver/libpqwalreceiver.c:512 +#: replication/libpqwalreceiver/libpqwalreceiver.c:343 +#: replication/libpqwalreceiver/libpqwalreceiver.c:550 #, c-format msgid "invalid response from primary server" msgstr "ungültige Antwort vom Primärserver" -#: replication/libpqwalreceiver/libpqwalreceiver.c:307 +#: replication/libpqwalreceiver/libpqwalreceiver.c:344 #, c-format msgid "Could not identify system: got %d rows and %d fields, expected %d rows and %d or more fields." msgstr "Konnte System nicht identifizieren: %d Zeilen und %d Felder erhalten, %d Zeilen und %d oder mehr Felder erwartet." -#: replication/libpqwalreceiver/libpqwalreceiver.c:373 -#: replication/libpqwalreceiver/libpqwalreceiver.c:379 -#: replication/libpqwalreceiver/libpqwalreceiver.c:404 +#: replication/libpqwalreceiver/libpqwalreceiver.c:410 +#: replication/libpqwalreceiver/libpqwalreceiver.c:416 +#: replication/libpqwalreceiver/libpqwalreceiver.c:441 #, c-format msgid "could not start WAL streaming: %s" msgstr "konnte WAL-Streaming nicht starten: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:423 +#: replication/libpqwalreceiver/libpqwalreceiver.c:460 #, c-format msgid "could not send end-of-streaming message to primary: %s" msgstr "konnte End-of-Streaming-Nachricht nicht an Primärserver senden: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:447 +#: replication/libpqwalreceiver/libpqwalreceiver.c:482 #, c-format msgid "unexpected result set after end-of-streaming" msgstr "unerwartete Ergebnismenge nach End-of-Streaming" -#: replication/libpqwalreceiver/libpqwalreceiver.c:467 +#: replication/libpqwalreceiver/libpqwalreceiver.c:496 +#, c-format +msgid "error while shutting down streaming COPY: %s" +msgstr "Fehler beim Beenden des COPY-Datenstroms: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:505 #, c-format msgid "error reading result of streaming command: %s" msgstr "Fehler beim Lesen des Ergebnisses von Streaming-Befehl: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:475 +#: replication/libpqwalreceiver/libpqwalreceiver.c:513 +#: replication/libpqwalreceiver/libpqwalreceiver.c:741 #, c-format msgid "unexpected result after CommandComplete: %s" msgstr "unerwartetes Ergebnis nach CommandComplete: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:501 +#: replication/libpqwalreceiver/libpqwalreceiver.c:539 #, c-format msgid "could not receive timeline history file from the primary server: %s" msgstr "konnte Zeitleisten-History-Datei nicht vom Primärserver empfangen: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:513 +#: replication/libpqwalreceiver/libpqwalreceiver.c:551 #, c-format msgid "Expected 1 tuple with 2 fields, got %d tuples with %d fields." msgstr "1 Tupel mit 2 Feldern erwartet, %d Tupel mit %d Feldern erhalten." -#: replication/libpqwalreceiver/libpqwalreceiver.c:660 -#: replication/libpqwalreceiver/libpqwalreceiver.c:687 -#: replication/libpqwalreceiver/libpqwalreceiver.c:693 +#: replication/libpqwalreceiver/libpqwalreceiver.c:705 +#: replication/libpqwalreceiver/libpqwalreceiver.c:756 +#: replication/libpqwalreceiver/libpqwalreceiver.c:762 #, c-format msgid "could not receive data from WAL stream: %s" msgstr "konnte keine Daten vom WAL-Stream empfangen: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:712 +#: replication/libpqwalreceiver/libpqwalreceiver.c:781 #, c-format msgid "could not send data to WAL stream: %s" msgstr "konnte keine Daten an den WAL-Stream senden: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:746 -#, fuzzy, c-format -#| msgid "could not create directory \"%s\": %s\n" +#: replication/libpqwalreceiver/libpqwalreceiver.c:830 +#, c-format msgid "could not create replication slot \"%s\": %s" -msgstr "konnte Verzeichnis »%s« nicht erzeugen: %s\n" +msgstr "konnte Replikations-Slot »%s« nicht erzeugen: %s" -#: replication/logical/launcher.c:236 -#, fuzzy, c-format -#| msgid "starting logical decoding for slot \"%s\"" +#: replication/libpqwalreceiver/libpqwalreceiver.c:864 +#, c-format +msgid "invalid query response" +msgstr "ungültige Antwort auf Anfrage" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:865 +#, c-format +msgid "Expected %d fields, got %d fields." +msgstr "%d Felder erwartet, %d Feldern erhalten." + +#: replication/libpqwalreceiver/libpqwalreceiver.c:934 +#, c-format +msgid "the query interface requires a database connection" +msgstr "Ausführen von Anfragen benötigt eine Datenbankverbindung" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:965 +msgid "empty query" +msgstr "leere Anfrage" + +#: replication/logical/launcher.c:298 +#, c-format msgid "starting logical replication worker for subscription \"%s\"" -msgstr "starte logisches Dekodieren für Slot »%s«" +msgstr "starte Arbeitsprozess für logische Replikation für Subskription »%s«" -#: replication/logical/launcher.c:243 -#, fuzzy, c-format -#| msgid "cannot query or manipulate replication origin when max_replication_slots = 0" +#: replication/logical/launcher.c:305 +#, c-format msgid "cannot start logical replication workers when max_replication_slots = 0" -msgstr "Replication-Origin kann nicht abgefragt oder geändert werden, wenn max_replication_slots = 0" +msgstr "Arbeitsprozesse für logische Replikation können nicht gestartet werden, wenn max_replication_slots = 0" -#: replication/logical/launcher.c:267 +#: replication/logical/launcher.c:385 #, c-format -msgid "out of logical replication workers slots" -msgstr "" +msgid "out of logical replication worker slots" +msgstr "alle Slots für Arbeitsprozesse für logische Replikation belegt" -#: replication/logical/launcher.c:268 -#, fuzzy, c-format -#| msgid "You might need to increase max_locks_per_transaction." +#: replication/logical/launcher.c:386 +#, c-format msgid "You might need to increase max_logical_replication_workers." -msgstr "Sie müssen möglicherweise max_locks_per_transaction erhöhen." +msgstr "Sie müssen möglicherweise max_logical_replication_workers erhöhen." -#: replication/logical/launcher.c:296 -#, fuzzy, c-format -#| msgid "too many background workers" -msgid "out of background workers slots" -msgstr "zu viele Background-Worker" +#: replication/logical/launcher.c:441 +#, c-format +msgid "out of background worker slots" +msgstr "alle Slots für Background-Worker belegt" -#: replication/logical/launcher.c:297 -#, fuzzy, c-format -#| msgid "You might need to increase max_locks_per_transaction." +#: replication/logical/launcher.c:442 +#, c-format msgid "You might need to increase max_worker_processes." -msgstr "Sie müssen möglicherweise max_locks_per_transaction erhöhen." +msgstr "Sie müssen möglicherweise max_worker_processes erhöhen." -#: replication/logical/launcher.c:413 +#: replication/logical/launcher.c:625 #, c-format -msgid "logical replication worker slot %d already used by another worker" -msgstr "" +msgid "logical replication worker slot %d is empty, cannot attach" +msgstr "Arbeitsprozess-Slot %d für logische Replikation ist leer, kann nicht zugeteilt werden" -#: replication/logical/launcher.c:556 -#, fuzzy, c-format -#| msgid "autovacuum launcher started" -msgid "logical replication launcher started" -msgstr "Autovacuum-Launcher startet" +#: replication/logical/launcher.c:634 +#, c-format +msgid "logical replication worker slot %d is already used by another worker, cannot attach" +msgstr "Arbeitsprozess-Slot %d für logische Replikation wird schon von einem anderen Arbeitsprozess verwendet, kann nicht zugeteilt werden" -#: replication/logical/launcher.c:656 -#, fuzzy, c-format -#| msgid "autovacuum launcher shutting down" -msgid "logical replication launcher shutting down" -msgstr "Autovacuum-Launcher fährt herunter" +#: replication/logical/launcher.c:888 +#, c-format +msgid "logical replication launcher started" +msgstr "Logical-Replication-Launcher startet" -#: replication/logical/logical.c:83 +#: replication/logical/logical.c:85 #, c-format msgid "logical decoding requires wal_level >= logical" msgstr "logische Dekodierung erfordert wal_level >= logical" -#: replication/logical/logical.c:88 +#: replication/logical/logical.c:90 #, c-format msgid "logical decoding requires a database connection" msgstr "logische Dekodierung benötigt eine Datenbankverbindung" -#: replication/logical/logical.c:106 +#: replication/logical/logical.c:108 #, c-format msgid "logical decoding cannot be used while in recovery" msgstr "logische Dekodierung kann nicht während der Wiederherstellung verwendet werden" -#: replication/logical/logical.c:236 replication/logical/logical.c:348 +#: replication/logical/logical.c:250 replication/logical/logical.c:376 #, c-format msgid "cannot use physical replication slot for logical decoding" msgstr "physischer Replikations-Slot kann nicht für logisches Dekodieren verwendet werden" -#: replication/logical/logical.c:241 replication/logical/logical.c:353 +#: replication/logical/logical.c:255 replication/logical/logical.c:381 #, c-format msgid "replication slot \"%s\" was not created in this database" msgstr "Replikations-Slot »%s« wurde nicht in dieser Datenbank erzeugt" -#: replication/logical/logical.c:248 +#: replication/logical/logical.c:262 #, c-format msgid "cannot create logical replication slot in transaction that has performed writes" msgstr "logischer Replikations-Slot kann nicht in einer Transaktion erzeugt werden, die Schreibvorgänge ausgeführt hat" -#: replication/logical/logical.c:390 +#: replication/logical/logical.c:421 #, c-format msgid "starting logical decoding for slot \"%s\"" msgstr "starte logisches Dekodieren für Slot »%s«" -#: replication/logical/logical.c:392 +#: replication/logical/logical.c:423 #, c-format -msgid "streaming transactions committing after %X/%X, reading WAL from %X/%X" -msgstr "Streaming beginnt bei Transaktionen, die nach %X/%X committen; lese WAL ab %X/%X" +msgid "Streaming transactions committing after %X/%X, reading WAL from %X/%X." +msgstr "Streaming beginnt bei Transaktionen, die nach %X/%X committen; lese WAL ab %X/%X." -#: replication/logical/logical.c:527 +#: replication/logical/logical.c:570 #, c-format msgid "slot \"%s\", output plugin \"%s\", in the %s callback, associated LSN %X/%X" msgstr "Slot »%s«, Ausgabe-Plugin »%s«, im Callback %s, zugehörige LSN %X/%X" -#: replication/logical/logical.c:534 +#: replication/logical/logical.c:577 #, c-format msgid "slot \"%s\", output plugin \"%s\", in the %s callback" msgstr "Slot »%s«, Ausgabe-Plugin »%s«, im Callback %s" -#: replication/logical/logicalfuncs.c:114 replication/slotfuncs.c:32 +#: replication/logical/logicalfuncs.c:114 replication/slotfuncs.c:35 #, c-format msgid "must be superuser or replication role to use replication slots" msgstr "nur Superuser und Replikationsrollen können Replikations-Slots verwenden" @@ -16521,616 +17690,716 @@ msgstr "Array muss eindimensional sein" msgid "array must not contain nulls" msgstr "Array darf keine NULL-Werte enthalten" -#: replication/logical/logicalfuncs.c:222 utils/adt/json.c:2282 -#: utils/adt/jsonb.c:1357 +#: replication/logical/logicalfuncs.c:222 utils/adt/json.c:2310 +#: utils/adt/jsonb.c:1269 #, c-format msgid "array must have even number of elements" msgstr "Array muss eine gerade Anzahl Elemente haben" -#: replication/logical/logicalfuncs.c:265 +#: replication/logical/logicalfuncs.c:269 #, c-format msgid "logical decoding output plugin \"%s\" produces binary output, but function \"%s\" expects textual data" msgstr "Ausgabe-Plugin »%s« erzeugt binäre Ausgabe, aber Funktion »%s« erwartet Textdaten" -#: replication/logical/origin.c:180 +#: replication/logical/origin.c:185 #, c-format msgid "only superusers can query or manipulate replication origins" msgstr "nur Superuser können Replication-Origins abfragen oder ändern" -#: replication/logical/origin.c:185 +#: replication/logical/origin.c:190 #, c-format msgid "cannot query or manipulate replication origin when max_replication_slots = 0" msgstr "Replication-Origin kann nicht abgefragt oder geändert werden, wenn max_replication_slots = 0" -#: replication/logical/origin.c:190 +#: replication/logical/origin.c:195 #, c-format msgid "cannot manipulate replication origins during recovery" msgstr "Replication-Origins können nicht während der Wiederherstellung geändert werden" -#: replication/logical/origin.c:314 +#: replication/logical/origin.c:230 +#, c-format +msgid "replication origin \"%s\" does not exist" +msgstr "Replication-Origin »%s« existiert nicht" + +#: replication/logical/origin.c:321 #, c-format msgid "could not find free replication origin OID" msgstr "konnte keine freie Replication-Origin-OID finden" -#: replication/logical/origin.c:351 +#: replication/logical/origin.c:369 #, c-format msgid "could not drop replication origin with OID %d, in use by PID %d" msgstr "konnte Replication-Origin mit OID %d nicht löschen, wird von PID %d verwendet" -#: replication/logical/origin.c:664 +#: replication/logical/origin.c:461 +#, c-format +msgid "replication origin with OID %u does not exist" +msgstr "Replication-Origin mit OID %u existiert nicht" + +#: replication/logical/origin.c:707 #, c-format msgid "replication checkpoint has wrong magic %u instead of %u" msgstr "Replikations-Checkpoint hat falsche magische Zahl %u statt %u" -#: replication/logical/origin.c:696 +#: replication/logical/origin.c:739 #, c-format msgid "could not read file \"%s\": read %d of %zu" msgstr "konnte Datei »%s« nicht lesen: %d von %zu gelesen" -#: replication/logical/origin.c:705 +#: replication/logical/origin.c:748 #, c-format msgid "could not find free replication state, increase max_replication_slots" msgstr "konnte keinen freien Replication-State finden, erhöhen Sie max_replication_slots" -#: replication/logical/origin.c:723 +#: replication/logical/origin.c:766 #, c-format msgid "replication slot checkpoint has wrong checksum %u, expected %u" msgstr "Replikations-Slot-Checkpoint hat falsche Prüfsumme %u, erwartet wurde %u" -#: replication/logical/origin.c:847 +#: replication/logical/origin.c:890 #, c-format msgid "replication origin with OID %d is already active for PID %d" msgstr "Replication-Origin mit OID %d ist bereits aktiv für PID %d" -#: replication/logical/origin.c:858 replication/logical/origin.c:1038 +#: replication/logical/origin.c:901 replication/logical/origin.c:1088 #, c-format msgid "could not find free replication state slot for replication origin with OID %u" msgstr "konnte keinen freien Replication-State-Slot für Replication-Origin mit OID %u finden" -#: replication/logical/origin.c:860 replication/logical/origin.c:1040 -#: replication/slot.c:1336 +#: replication/logical/origin.c:903 replication/logical/origin.c:1090 +#: replication/slot.c:1508 #, c-format msgid "Increase max_replication_slots and try again." msgstr "Erhöhen Sie max_replication_slots und versuchen Sie es erneut." -#: replication/logical/origin.c:997 +#: replication/logical/origin.c:1047 #, c-format msgid "cannot setup replication origin when one is already setup" msgstr "kann Replication-Origin nicht einrichten, wenn schon einer eingerichtet ist" -#: replication/logical/origin.c:1026 +#: replication/logical/origin.c:1076 #, c-format msgid "replication identifier %d is already active for PID %d" msgstr "Replikationsidentifikator %d ist bereits aktiv für PID %d" -#: replication/logical/origin.c:1072 replication/logical/origin.c:1267 -#: replication/logical/origin.c:1287 +#: replication/logical/origin.c:1127 replication/logical/origin.c:1325 +#: replication/logical/origin.c:1345 #, c-format msgid "no replication origin is configured" msgstr "kein Replication-Origin konfiguriert" -#: replication/logical/relation.c:265 -#, fuzzy, c-format -#| msgid "replication slot \"%s\" does not exist" +#: replication/logical/relation.c:255 +#, c-format msgid "logical replication target relation \"%s.%s\" does not exist" -msgstr "Replikations-Slot »%s« existiert nicht" - -#: replication/logical/relation.c:276 -#, fuzzy, c-format -#| msgid "referenced relation \"%s\" is not a table" -msgid "logical replication target relation \"%s.%s\" is not a table" -msgstr "Relation »%s«, auf die verwiesen wird, ist keine Tabelle" +msgstr "Zielrelation für logische Replikation »%s.%s« existiert nicht" -#: replication/logical/relation.c:303 +#: replication/logical/relation.c:297 #, c-format msgid "logical replication target relation \"%s.%s\" is missing some replicated columns" -msgstr "" +msgstr "in Zielrelation für logische Replikation »%s.%s« fehlen replizierte Spalten" -#: replication/logical/relation.c:342 +#: replication/logical/relation.c:337 #, c-format msgid "logical replication target relation \"%s.%s\" uses system columns in REPLICA IDENTITY index" -msgstr "" - -#: replication/logical/relation.c:452 -#, fuzzy, c-format -#| msgid "function \"%s\" not found\n" -msgid "builtin type %u not found" -msgstr "Funktion »%s« nicht gefunden\n" - -#: replication/logical/relation.c:453 -#, c-format -msgid "This can be caused by having publisher with higher major version than subscriber" -msgstr "" - -#: replication/logical/relation.c:485 -#, fuzzy, c-format -#| msgid "database \"%s\" is used by a logical replication slot" -msgid "data type \"%s.%s\" required for logical replication does not exist" -msgstr "Datenbank »%s« wird von einem logischen Replikations-Slot verwendet" +msgstr "Zielrelation für logische Replikation »%s.%s« verwendet Systemspalten in REPLICA-IDENTITY-Index" -#: replication/logical/reorderbuffer.c:2286 +#: replication/logical/reorderbuffer.c:2310 #, c-format msgid "could not write to data file for XID %u: %m" msgstr "konnte nicht in Datendatei für XID %u schreiben: %m" -#: replication/logical/reorderbuffer.c:2382 -#: replication/logical/reorderbuffer.c:2402 +#: replication/logical/reorderbuffer.c:2403 +#: replication/logical/reorderbuffer.c:2425 #, c-format msgid "could not read from reorderbuffer spill file: %m" msgstr "konnte nicht aus Reorder-Buffer-Spill-Datei lesen: %m" -#: replication/logical/reorderbuffer.c:2386 -#: replication/logical/reorderbuffer.c:2406 +#: replication/logical/reorderbuffer.c:2407 +#: replication/logical/reorderbuffer.c:2429 #, c-format msgid "could not read from reorderbuffer spill file: read %d instead of %u bytes" msgstr "konnte nicht aus Reorder-Buffer-Spill-Datei lesen: %d statt %u Bytes gelesen" -#: replication/logical/reorderbuffer.c:3062 +#: replication/logical/reorderbuffer.c:2642 +#, fuzzy, c-format +#| msgid "could not read file \"%s\", read %d of %d: %m" +msgid "could not remove file \"%s\" during removal of pg_replslot/%s/*.xid: %m" +msgstr "konnte Datei »%s« nicht lesen, %d von %d gelesen: %m" + +#: replication/logical/reorderbuffer.c:3108 #, c-format msgid "could not read from file \"%s\": read %d instead of %d bytes" msgstr "konnte nicht aus Datei »%s« lesen: %d statt %d Bytes gelesen" -#: replication/logical/snapbuild.c:597 +#: replication/logical/snapbuild.c:612 +#, c-format +msgid "initial slot snapshot too large" +msgstr "initialer Slot-Snapshot ist zu groß" + +#: replication/logical/snapbuild.c:664 #, c-format msgid "exported logical decoding snapshot: \"%s\" with %u transaction ID" msgid_plural "exported logical decoding snapshot: \"%s\" with %u transaction IDs" msgstr[0] "logischer Dekodierungs-Snapshot exportiert: »%s« mit %u Transaktions-ID" msgstr[1] "logischer Dekodierungs-Snapshot exportiert: »%s« mit %u Transaktions-IDs" -#: replication/logical/snapbuild.c:916 replication/logical/snapbuild.c:1281 -#: replication/logical/snapbuild.c:1812 +#: replication/logical/snapbuild.c:1262 replication/logical/snapbuild.c:1355 +#: replication/logical/snapbuild.c:1841 #, c-format msgid "logical decoding found consistent point at %X/%X" msgstr "logisches Dekodieren fand konsistenten Punkt bei %X/%X" -#: replication/logical/snapbuild.c:918 -#, c-format -msgid "Transaction ID %u finished; no more running transactions." -msgstr "Transaktions-ID %u beendet; keine laufenden Transaktionen mehr." - -#: replication/logical/snapbuild.c:1283 +#: replication/logical/snapbuild.c:1264 #, c-format msgid "There are no running transactions." msgstr "Keine laufenden Transaktionen." -#: replication/logical/snapbuild.c:1345 +#: replication/logical/snapbuild.c:1306 #, c-format msgid "logical decoding found initial starting point at %X/%X" msgstr "logisches Dekodieren fand initialen Startpunkt bei %X/%X" -#: replication/logical/snapbuild.c:1347 +#: replication/logical/snapbuild.c:1308 replication/logical/snapbuild.c:1332 +#, c-format +msgid "Waiting for transactions (approximately %d) older than %u to end." +msgstr "Warten auf Abschluss der Transaktionen (ungefähr %d), die älter als %u sind." + +#: replication/logical/snapbuild.c:1330 +#, c-format +msgid "logical decoding found initial consistent point at %X/%X" +msgstr "logisches Dekodieren fand initialen konsistenten Punkt bei %X/%X" + +#: replication/logical/snapbuild.c:1357 #, c-format -msgid "%u transaction needs to finish." -msgid_plural "%u transactions need to finish." -msgstr[0] "%u Transaktion muss noch abschließen." -msgstr[1] "%u Transaktionen müssen noch abschließen." +msgid "There are no old transactions anymore." +msgstr "Es laufen keine alten Transaktionen mehr." -#: replication/logical/snapbuild.c:1686 replication/logical/snapbuild.c:1712 -#: replication/logical/snapbuild.c:1726 replication/logical/snapbuild.c:1740 +#: replication/logical/snapbuild.c:1714 replication/logical/snapbuild.c:1742 +#: replication/logical/snapbuild.c:1759 replication/logical/snapbuild.c:1775 #, c-format msgid "could not read file \"%s\", read %d of %d: %m" msgstr "konnte Datei »%s« nicht lesen, %d von %d gelesen: %m" -#: replication/logical/snapbuild.c:1692 +#: replication/logical/snapbuild.c:1720 #, c-format msgid "snapbuild state file \"%s\" has wrong magic number: %u instead of %u" msgstr "Scanbuild-State-Datei »%s« hat falsche magische Zahl %u statt %u" -#: replication/logical/snapbuild.c:1697 +#: replication/logical/snapbuild.c:1725 #, c-format msgid "snapbuild state file \"%s\" has unsupported version: %u instead of %u" msgstr "Snapbuild-State-Datei »%s« hat nicht unterstützte Version: %u statt %u" -#: replication/logical/snapbuild.c:1753 +#: replication/logical/snapbuild.c:1788 #, c-format msgid "checksum mismatch for snapbuild state file \"%s\": is %u, should be %u" msgstr "Prüfsummenfehler bei Snapbuild-State-Datei »%s«: ist %u, sollte %u sein" -#: replication/logical/snapbuild.c:1814 +#: replication/logical/snapbuild.c:1843 #, c-format msgid "Logical decoding will begin using saved snapshot." msgstr "Logische Dekodierung beginnt mit gespeichertem Snapshot." -#: replication/logical/snapbuild.c:1887 +#: replication/logical/snapbuild.c:1915 #, c-format msgid "could not parse file name \"%s\"" msgstr "konnte Dateinamen »%s« nicht parsen" -#: replication/logical/worker.c:256 +#: replication/logical/tablesync.c:138 +#, c-format +msgid "logical replication table synchronization worker for subscription \"%s\", table \"%s\" has finished" +msgstr "Arbeitsprozess für logische Replikation für Tabellensynchronisation für Subskription »%s«, Tabelle »%s« hat abgeschlossen" + +#: replication/logical/tablesync.c:685 +#, c-format +msgid "could not fetch table info for table \"%s.%s\" from publisher: %s" +msgstr "konnte Tabelleninformationen für Tabelle »%s.%s« nicht vom Publikationsserver holen: %s" + +#: replication/logical/tablesync.c:691 +#, c-format +msgid "table \"%s.%s\" not found on publisher" +msgstr "Tabelle »%s.%s« nicht auf dem Publikationsserver gefunden" + +#: replication/logical/tablesync.c:721 +#, c-format +msgid "could not fetch table info for table \"%s.%s\": %s" +msgstr "konnte Tabelleninformationen für Tabelle »%s.%s« nicht holen: %s" + +#: replication/logical/tablesync.c:791 +#, c-format +msgid "could not start initial contents copy for table \"%s.%s\": %s" +msgstr "konnte Kopieren des Anfangsinhalts für Tabelle »%s.%s« nicht starten: %s" + +#: replication/logical/tablesync.c:904 +#, c-format +msgid "table copy could not start transaction on publisher" +msgstr "beim Kopieren der Tabelle konnte die Transaktion auf dem Publikationsserver nicht gestartet werden" + +#: replication/logical/tablesync.c:926 +#, c-format +msgid "table copy could not finish transaction on publisher" +msgstr "beim Kopieren der Tabelle konnte die Transaktion auf dem Publikationsserver nicht beenden werden" + +#: replication/logical/worker.c:307 #, c-format msgid "processing remote data for replication target relation \"%s.%s\" column \"%s\", remote type %s, local type %s" -msgstr "" +msgstr "Verarbeiten empfangener Daten für Replikationszielrelation »%s.%s« Spalte »%s«, entfernter Typ %s, lokaler Typ %s" -#: replication/logical/worker.c:448 +#: replication/logical/worker.c:528 #, c-format msgid "ORIGIN message sent out of order" -msgstr "" +msgstr "ORIGIN-Nachricht in falscher Reihenfolge gesendet" -#: replication/logical/worker.c:570 +#: replication/logical/worker.c:659 #, c-format -msgid "publisher does not send replica identity column expected by the logical replication target relation \"%s.%s\"" -msgstr "" +msgid "publisher did not send replica identity column expected by the logical replication target relation \"%s.%s\"" +msgstr "Publikationsserver hat nicht die Replikidentitätsspalten gesendet, die von Replikationszielrelation »%s.%s« erwartet wurden" -#: replication/logical/worker.c:577 +#: replication/logical/worker.c:666 #, c-format -msgid "logical replication target relation \"%s.%s\" has neither REPLICA IDENTIY index nor PRIMARY KEY and published relation does not have REPLICA IDENTITY FULL" -msgstr "" +msgid "logical replication target relation \"%s.%s\" has neither REPLICA IDENTITY index nor PRIMARY KEY and published relation does not have REPLICA IDENTITY FULL" +msgstr "Zielrelation für logische Replikation »%s.%s« hat weder REPLICA-IDENTITY-Index noch Primärschlüssel und die publizierte Relation hat kein REPLICA IDENTITY FULL" -#: replication/logical/worker.c:766 -#, fuzzy, c-format -#| msgid "could not find free replication origin OID" -msgid "logical replication could not find row for delete in replication target %s" -msgstr "konnte keine freie Replication-Origin-OID finden" +#: replication/logical/worker.c:873 +#, c-format +msgid "logical replication could not find row for delete in replication target relation \"%s\"" +msgstr "logische Replikation konnte zu löschende Zeile in Zielrelation »%s« nicht finden" -#: replication/logical/worker.c:833 -#, fuzzy, c-format -#| msgid "invalid frontend message type %d" -msgid "invalid logical replication message type %c" -msgstr "ungültiger Frontend-Message-Typ %d" +#: replication/logical/worker.c:1005 +#, c-format +msgid "invalid logical replication message type \"%c\"" +msgstr "ungültiger Nachrichtentyp für logische Replikation »%c«" -#: replication/logical/worker.c:972 +#: replication/logical/worker.c:1146 #, c-format msgid "data stream from publisher has ended" -msgstr "" +msgstr "Datenstrom vom Publikationsserver endete" -#: replication/logical/worker.c:1100 -#, fuzzy, c-format -#| msgid "terminating walreceiver due to timeout" +#: replication/logical/worker.c:1305 +#, c-format msgid "terminating logical replication worker due to timeout" -msgstr "WAL-Receiver-Prozess wird abgebrochen wegen Zeitüberschreitung" +msgstr "Arbeitsprozess für logische Replikation wird abgebrochen wegen Zeitüberschreitung" -#: replication/logical/worker.c:1239 +#: replication/logical/worker.c:1453 #, c-format -msgid "logical replication worker for subscription \"%s\" will stop because the subscription was removed" -msgstr "" +msgid "logical replication apply worker for subscription \"%s\" will stop because the subscription was removed" +msgstr "Apply-Worker für logische Replikation für Subskription »%s« wird anhalten, weil die Subskription entfernt wurde" -#: replication/logical/worker.c:1254 +#: replication/logical/worker.c:1467 #, c-format -msgid "logical replication worker for subscription \"%s\" will restart because the connection information was changed" -msgstr "" +msgid "logical replication apply worker for subscription \"%s\" will stop because the subscription was disabled" +msgstr "Apply-Worker für logische Replikation für Subskription »%s« wird anhalten, weil die Subskription deaktiviert wurde" -#: replication/logical/worker.c:1269 +#: replication/logical/worker.c:1481 #, c-format -msgid "logical replication worker for subscription \"%s\" will restart because subscription was renamed" -msgstr "" +msgid "logical replication apply worker for subscription \"%s\" will restart because the connection information was changed" +msgstr "Apply-Worker für logische Replikation für Subskription »%s« wird neu starten, weil die Verbindungsinformationen geändert wurden" -#: replication/logical/worker.c:1284 +#: replication/logical/worker.c:1495 #, c-format -msgid "logical replication worker for subscription \"%s\" will restart because subscription's publications were changed" -msgstr "" +msgid "logical replication apply worker for subscription \"%s\" will restart because subscription was renamed" +msgstr "Apply-Worker für logische Replikation für Subskription »%s« wird neu starten, weil die Subskription umbenannt wurde" -#: replication/logical/worker.c:1300 +#: replication/logical/worker.c:1512 #, c-format -msgid "logical replication worker for subscription \"%s\" will stop because the subscription was disabled" -msgstr "" +msgid "logical replication apply worker for subscription \"%s\" will restart because the replication slot name was changed" +msgstr "Apply-Worker für logische Replikation für Subskription »%s« wird neu starten, weil der Replikations-Slot-Name geändert wurde" -#: replication/logical/worker.c:1393 +#: replication/logical/worker.c:1526 #, c-format -msgid "logical replication worker for subscription \"%s\" will not start because the subscription was disabled during startup" -msgstr "" +msgid "logical replication apply worker for subscription \"%s\" will restart because subscription's publications were changed" +msgstr "Apply-Worker für logische Replikation für Subskription »%s« wird neu starten, weil die Publikationen der Subskription geandert wurden" -#: replication/logical/worker.c:1406 +#: replication/logical/worker.c:1629 #, fuzzy, c-format -#| msgid "authentication failed for user \"%s\": host rejected" -msgid "logical replication apply for subscription \"%s\" has started" -msgstr "Authentifizierung für Benutzer »%s« fehlgeschlagen: Host abgelehnt" +#| msgid "logical replication apply worker for subscription \"%s\" will not start because the subscription was disabled during startup" +msgid "logical replication apply worker for subscription %u will not start because the subscription was removed during startup" +msgstr "Apply-Worker für logische Replikation für Subskription »%s« wird nicht starten, weil die Subskription während des Starts deaktiviert wurde" -#: replication/pgoutput/pgoutput.c:113 -#, fuzzy, c-format -#| msgid "invalid option \"%s\"" +#: replication/logical/worker.c:1641 +#, c-format +msgid "logical replication apply worker for subscription \"%s\" will not start because the subscription was disabled during startup" +msgstr "Apply-Worker für logische Replikation für Subskription »%s« wird nicht starten, weil die Subskription während des Starts deaktiviert wurde" + +#: replication/logical/worker.c:1659 +#, c-format +msgid "logical replication table synchronization worker for subscription \"%s\", table \"%s\" has started" +msgstr "Arbeitsprozess für logische Replikation für Tabellensynchronisation für Subskription »%s«, Tabelle »%s« hat gestartet" + +#: replication/logical/worker.c:1663 +#, c-format +msgid "logical replication apply worker for subscription \"%s\" has started" +msgstr "Apply-Worker für logische Replikation für Subskription »%s« hat gestartet" + +#: replication/logical/worker.c:1703 +#, c-format +msgid "subscription has no replication slot set" +msgstr "für die Subskription ist kein Replikations-Slot gesetzt" + +#: replication/pgoutput/pgoutput.c:117 +#, c-format msgid "invalid proto_version" -msgstr "ungültige Option »%s«" +msgstr "ungültige proto_version" -#: replication/pgoutput/pgoutput.c:118 -#, fuzzy, c-format -#| msgid "numeric time zone \"%s\" out of range" -msgid "proto_verson \"%s\" out of range" -msgstr "numerische Zeitzone »%s« ist außerhalb des gültigen Bereichs" +#: replication/pgoutput/pgoutput.c:122 +#, c-format +msgid "proto_version \"%s\" out of range" +msgstr "proto_version »%s« ist außerhalb des gültigen Bereichs" -#: replication/pgoutput/pgoutput.c:135 -#, fuzzy, c-format -#| msgid "invalid name syntax" +#: replication/pgoutput/pgoutput.c:139 +#, c-format msgid "invalid publication_names syntax" -msgstr "ungültige Namenssyntax" +msgstr "ungültige Syntax für publication_names" -#: replication/pgoutput/pgoutput.c:179 +#: replication/pgoutput/pgoutput.c:181 #, c-format msgid "client sent proto_version=%d but we only support protocol %d or lower" -msgstr "" +msgstr "Client sendete proto_version=%d, aber wir unterstützen nur Protokoll %d oder niedriger" -#: replication/pgoutput/pgoutput.c:185 +#: replication/pgoutput/pgoutput.c:187 #, c-format msgid "client sent proto_version=%d but we only support protocol %d or higher" -msgstr "" +msgstr "Client sendete proto_version=%d, aber wir unterstützen nur Protokoll %d oder höher" -#: replication/pgoutput/pgoutput.c:191 -#, fuzzy, c-format -#| msgid "multiple Dictionary parameters" +#: replication/pgoutput/pgoutput.c:193 +#, c-format msgid "publication_names parameter missing" -msgstr "mehrere »Dictionary«-Parameter" +msgstr "Parameter »publication_names« fehlt" -#: replication/slot.c:180 +#: replication/slot.c:182 #, c-format msgid "replication slot name \"%s\" is too short" msgstr "Replikations-Slot-Name »%s« ist zu kurz" -#: replication/slot.c:189 +#: replication/slot.c:191 #, c-format msgid "replication slot name \"%s\" is too long" msgstr "Replikations-Slot-Name »%s« ist zu lang" -#: replication/slot.c:202 +#: replication/slot.c:204 #, c-format msgid "replication slot name \"%s\" contains invalid character" msgstr "Replikations-Slot-Name »%s« enthält ungültiges Zeichen" -#: replication/slot.c:204 +#: replication/slot.c:206 #, c-format msgid "Replication slot names may only contain lower case letters, numbers, and the underscore character." msgstr "Replikations-Slot-Namen dürfen nur Kleinbuchstaben, Zahlen und Unterstriche enthalten." -#: replication/slot.c:251 +#: replication/slot.c:253 #, c-format msgid "replication slot \"%s\" already exists" msgstr "Replikations-Slot »%s« existiert bereits" -#: replication/slot.c:261 +#: replication/slot.c:263 #, c-format msgid "all replication slots are in use" msgstr "alle Replikations-Slots sind in Benutzung" -#: replication/slot.c:262 +#: replication/slot.c:264 #, c-format msgid "Free one or increase max_replication_slots." msgstr "Geben Sie einen frei oder erhöhen Sie max_replication_slots." -#: replication/slot.c:358 +#: replication/slot.c:379 #, c-format msgid "replication slot \"%s\" does not exist" msgstr "Replikations-Slot »%s« existiert nicht" -#: replication/slot.c:362 +#: replication/slot.c:390 replication/slot.c:940 #, c-format msgid "replication slot \"%s\" is active for PID %d" msgstr "Replikations-Slot »%s« ist aktiv für PID %d" -#: replication/slot.c:548 replication/slot.c:960 replication/slot.c:1297 +#: replication/slot.c:624 replication/slot.c:1121 replication/slot.c:1469 #, c-format msgid "could not remove directory \"%s\"" msgstr "konnte Verzeichnis »%s« nicht löschen" -#: replication/slot.c:809 +#: replication/slot.c:970 #, c-format msgid "replication slots can only be used if max_replication_slots > 0" msgstr "Replikations-Slots können nur verwendet werden, wenn max_replication_slots > 0" -#: replication/slot.c:814 +#: replication/slot.c:975 #, c-format msgid "replication slots can only be used if wal_level >= replica" msgstr "Replikations-Slots können nur verwendet werden, wenn wal_level >= replica" -#: replication/slot.c:1229 replication/slot.c:1267 +#: replication/slot.c:1399 replication/slot.c:1439 #, c-format msgid "could not read file \"%s\", read %d of %u: %m" msgstr "konnte Datei »%s« nicht lesen, %d von %u gelesen: %m" -#: replication/slot.c:1238 +#: replication/slot.c:1408 #, c-format msgid "replication slot file \"%s\" has wrong magic number: %u instead of %u" msgstr "Replikations-Slot-Datei »%s« hat falsche magische Zahl: %u statt %u" -#: replication/slot.c:1245 +#: replication/slot.c:1415 #, c-format msgid "replication slot file \"%s\" has unsupported version %u" msgstr "Replikations-Slot-Datei »%s« hat nicht unterstützte Version %u" -#: replication/slot.c:1252 +#: replication/slot.c:1422 #, c-format msgid "replication slot file \"%s\" has corrupted length %u" msgstr "Replikations-Slot-Datei »%s« hat falsche Länge %u" -#: replication/slot.c:1282 +#: replication/slot.c:1454 #, c-format msgid "checksum mismatch for replication slot file \"%s\": is %u, should be %u" msgstr "Prüfsummenfehler bei Replikations-Slot-Datei »%s«: ist %u, sollte %u sein" -#: replication/slot.c:1335 +#: replication/slot.c:1507 #, c-format msgid "too many replication slots active before shutdown" msgstr "zu viele aktive Replikations-Slots vor dem Herunterfahren" -#: replication/syncrep.c:244 +#: replication/slotfuncs.c:457 +#, fuzzy, c-format +#| msgid "invalid array flags" +msgid "invalid target wal lsn" +msgstr "ungültige Array-Flags" + +#: replication/slotfuncs.c:481 +#, c-format +msgid "cannot move slot to %X/%X, minimum is %X/%X" +msgstr "" + +#: replication/syncrep.c:246 #, c-format msgid "canceling the wait for synchronous replication and terminating connection due to administrator command" msgstr "Warten auf synchrone Replikation wird storniert and Verbindung wird abgebrochen, aufgrund von Anweisung des Administrators" -#: replication/syncrep.c:245 replication/syncrep.c:262 +#: replication/syncrep.c:247 replication/syncrep.c:264 #, c-format msgid "The transaction has already committed locally, but might not have been replicated to the standby." msgstr "Die Transaktion wurde lokal bereits committet, aber möglicherweise noch nicht zum Standby repliziert." -#: replication/syncrep.c:261 +#: replication/syncrep.c:263 #, c-format msgid "canceling wait for synchronous replication due to user request" msgstr "storniere Warten auf synchrone Replikation wegen Benutzeraufforderung" -#: replication/syncrep.c:392 +#: replication/syncrep.c:397 #, c-format msgid "standby \"%s\" now has synchronous standby priority %u" msgstr "Standby »%s« hat jetzt synchrone Standby-Priorität %u" -#: replication/syncrep.c:453 +#: replication/syncrep.c:458 #, c-format msgid "standby \"%s\" is now a synchronous standby with priority %u" msgstr "Standby »%s« ist jetzt ein synchroner Standby mit Priorität %u" -#: replication/syncrep.c:457 -#, fuzzy, c-format -#| msgid "standby \"%s\" is now a synchronous standby with priority %u" +#: replication/syncrep.c:462 +#, c-format msgid "standby \"%s\" is now a candidate for quorum synchronous standby" -msgstr "Standby »%s« ist jetzt ein synchroner Standby mit Priorität %u" +msgstr "Standby »%s« ist jetzt ein Kandidat für synchroner Standby mit Quorum" -#: replication/syncrep.c:1120 +#: replication/syncrep.c:1160 #, c-format msgid "synchronous_standby_names parser failed" msgstr "Parser für synchronous_standby_names fehlgeschlagen" -#: replication/syncrep.c:1126 +#: replication/syncrep.c:1166 #, c-format msgid "number of synchronous standbys (%d) must be greater than zero" msgstr "Anzahl synchroner Standbys (%d) muss größer als null sein" -#: replication/walreceiver.c:167 +#: replication/walreceiver.c:169 #, c-format msgid "terminating walreceiver process due to administrator command" msgstr "WAL-Receiver-Prozess wird abgebrochen aufgrund von Anweisung des Administrators" -#: replication/walreceiver.c:300 +#: replication/walreceiver.c:309 #, c-format msgid "could not connect to the primary server: %s" msgstr "konnte nicht mit dem Primärserver verbinden: %s" -#: replication/walreceiver.c:339 +#: replication/walreceiver.c:359 #, c-format msgid "database system identifier differs between the primary and standby" msgstr "Datenbanksystemidentifikator unterscheidet sich zwischen Primär- und Standby-Server" -#: replication/walreceiver.c:340 +#: replication/walreceiver.c:360 #, c-format msgid "The primary's identifier is %s, the standby's identifier is %s." msgstr "Identifikator des Primärservers ist %s, Identifikator des Standby ist %s." -#: replication/walreceiver.c:351 +#: replication/walreceiver.c:371 #, c-format msgid "highest timeline %u of the primary is behind recovery timeline %u" msgstr "höchste Zeitleiste %u des primären Servers liegt hinter Wiederherstellungszeitleiste %u zurück" -#: replication/walreceiver.c:387 +#: replication/walreceiver.c:407 #, c-format msgid "started streaming WAL from primary at %X/%X on timeline %u" msgstr "WAL-Streaming vom Primärserver gestartet bei %X/%X auf Zeitleiste %u" -#: replication/walreceiver.c:392 +#: replication/walreceiver.c:412 #, c-format msgid "restarted WAL streaming at %X/%X on timeline %u" msgstr "WAL-Streaming neu gestartet bei %X/%X auf Zeitleiste %u" -#: replication/walreceiver.c:421 +#: replication/walreceiver.c:441 #, c-format msgid "cannot continue WAL streaming, recovery has already ended" msgstr "kann WAL-Streaming nicht fortsetzen, Wiederherstellung ist bereits beendet" -#: replication/walreceiver.c:458 +#: replication/walreceiver.c:478 #, c-format msgid "replication terminated by primary server" msgstr "Replikation wurde durch Primärserver beendet" -#: replication/walreceiver.c:459 +#: replication/walreceiver.c:479 #, c-format msgid "End of WAL reached on timeline %u at %X/%X." msgstr "WAL-Ende erreicht auf Zeitleiste %u bei %X/%X." -#: replication/walreceiver.c:554 +#: replication/walreceiver.c:574 #, c-format msgid "terminating walreceiver due to timeout" msgstr "WAL-Receiver-Prozess wird abgebrochen wegen Zeitüberschreitung" -#: replication/walreceiver.c:594 +#: replication/walreceiver.c:614 #, c-format msgid "primary server contains no more WAL on requested timeline %u" msgstr "Primärserver enthält kein WAL mehr auf angeforderter Zeitleiste %u" -#: replication/walreceiver.c:609 replication/walreceiver.c:968 +#: replication/walreceiver.c:629 replication/walreceiver.c:988 #, c-format msgid "could not close log segment %s: %m" msgstr "konnte Logsegment %s nicht schließen: %m" -#: replication/walreceiver.c:734 +#: replication/walreceiver.c:754 #, c-format msgid "fetching timeline history file for timeline %u from primary server" msgstr "hole Zeitleisten-History-Datei für Zeitleiste %u vom Primärserver" -#: replication/walreceiver.c:1022 +#: replication/walreceiver.c:1042 #, c-format msgid "could not write to log segment %s at offset %u, length %lu: %m" msgstr "konnte nicht in Logsegment %s bei Position %u, Länge %lu schreiben: %m" -#: replication/walsender.c:455 +#: replication/walsender.c:491 #, c-format msgid "could not seek to beginning of file \"%s\": %m" msgstr "konnte Positionszeiger nicht den Anfang der Datei »%s« setzen: %m" -#: replication/walsender.c:494 +#: replication/walsender.c:532 #, c-format msgid "IDENTIFY_SYSTEM has not been run before START_REPLICATION" -msgstr "" +msgstr "IDENTIFY_SYSTEM wurde nicht vor START_REPLICATION ausgeführt" -#: replication/walsender.c:511 +#: replication/walsender.c:549 #, c-format msgid "cannot use a logical replication slot for physical replication" msgstr "logischer Replikations-Slot kann nicht für physische Replikation verwendet werden" -#: replication/walsender.c:574 +#: replication/walsender.c:612 #, c-format msgid "requested starting point %X/%X on timeline %u is not in this server's history" msgstr "angeforderter Startpunkt %X/%X auf Zeitleiste %u ist nicht in der History dieses Servers" -#: replication/walsender.c:578 +#: replication/walsender.c:616 #, c-format msgid "This server's history forked from timeline %u at %X/%X." msgstr "Die History dieses Servers zweigte von Zeitleiste %u bei %X/%X ab." -#: replication/walsender.c:623 +#: replication/walsender.c:661 #, c-format msgid "requested starting point %X/%X is ahead of the WAL flush position of this server %X/%X" msgstr "angeforderter Startpunkt %X/%X ist vor der WAL-Flush-Position dieses Servers %X/%X" -#: replication/walsender.c:912 +#: replication/walsender.c:890 +#, c-format +msgid "CREATE_REPLICATION_SLOT ... EXPORT_SNAPSHOT must not be called inside a transaction" +msgstr "CREATE_REPLICATION_SLOT ... EXPORT_SNAPSHOT kann nicht in einer Transaktion aufgerufen werden" + +#: replication/walsender.c:899 +#, c-format +msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called inside a transaction" +msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT muss in einer Transaktion aufgerufen werden" + +#: replication/walsender.c:904 +#, c-format +msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called in REPEATABLE READ isolation mode transaction" +msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT muss in einer Transaktion im Isolationsmodus REPEATABLE READ aufgerufen werden" + +#: replication/walsender.c:909 +#, c-format +msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called before any query" +msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT muss vor allen Anfragen aufgerufen werden" + +#: replication/walsender.c:914 +#, c-format +msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must not be called in a subtransaction" +msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT kann nicht in einer Subtransaktion aufgerufen werden" + +#: replication/walsender.c:1060 #, c-format msgid "terminating walsender process after promotion" msgstr "WAL-Sender-Prozess wird nach Beförderung abgebrochen" -#: replication/walsender.c:1240 +#: replication/walsender.c:1447 +#, c-format +msgid "cannot execute new commands while WAL sender is in stopping mode" +msgstr "während der WAL-Sender im Stoppmodus ist können keine neuen Befehle ausgeführt werden" + +#: replication/walsender.c:1480 #, c-format msgid "received replication command: %s" msgstr "Replikationsbefehl empfangen: %s" -#: replication/walsender.c:1348 replication/walsender.c:1364 +#: replication/walsender.c:1496 tcop/fastpath.c:279 tcop/postgres.c:1010 +#: tcop/postgres.c:1334 tcop/postgres.c:1594 tcop/postgres.c:2000 +#: tcop/postgres.c:2373 tcop/postgres.c:2452 +#, c-format +msgid "current transaction is aborted, commands ignored until end of transaction block" +msgstr "aktuelle Transaktion wurde abgebrochen, Befehle werden bis zum Ende der Transaktion ignoriert" + +#: replication/walsender.c:1561 +#, c-format +msgid "cannot execute SQL commands in WAL sender for physical replication" +msgstr "im WAL-Sender für physische Replikation können keine SQL-Befehle ausgeführt werden" + +#: replication/walsender.c:1607 replication/walsender.c:1623 #, c-format msgid "unexpected EOF on standby connection" msgstr "unerwartetes EOF auf Standby-Verbindung" -#: replication/walsender.c:1378 +#: replication/walsender.c:1637 #, c-format msgid "unexpected standby message type \"%c\", after receiving CopyDone" msgstr "unerwarteter Standby-Message-Typ »%c«, nach Empfang von CopyDone" -#: replication/walsender.c:1416 +#: replication/walsender.c:1675 #, c-format msgid "invalid standby message type \"%c\"" msgstr "ungültiger Standby-Message-Typ »%c«" -#: replication/walsender.c:1457 +#: replication/walsender.c:1716 #, c-format msgid "unexpected message type \"%c\"" msgstr "unerwarteter Message-Typ »%c«" -#: replication/walsender.c:1741 +#: replication/walsender.c:2086 #, c-format msgid "terminating walsender process due to replication timeout" msgstr "WAL-Sender-Prozess wird abgebrochen wegen Zeitüberschreitung bei der Replikation" -#: replication/walsender.c:1829 +#: replication/walsender.c:2172 #, c-format msgid "standby \"%s\" has now caught up with primary" msgstr "Standby-Server »%s« hat jetzt den Primärserver eingeholt" -#: replication/walsender.c:1933 +#: replication/walsender.c:2279 #, c-format msgid "number of requested standby connections exceeds max_wal_senders (currently %d)" msgstr "Anzahl angeforderter Standby-Verbindungen überschreitet max_wal_senders (aktuell %d)" -#: rewrite/rewriteDefine.c:112 rewrite/rewriteDefine.c:967 +#: rewrite/rewriteDefine.c:112 rewrite/rewriteDefine.c:980 #, c-format msgid "rule \"%s\" for relation \"%s\" already exists" msgstr "Regel »%s« für Relation »%s« existiert bereits" @@ -17195,293 +18464,318 @@ msgstr "»%s« ist bereits eine Sicht" msgid "view rule for \"%s\" must be named \"%s\"" msgstr "Sicht-Regel für »%s« muss »%s« heißen" -#: rewrite/rewriteDefine.c:430 +#: rewrite/rewriteDefine.c:428 +#, c-format +msgid "cannot convert partitioned table \"%s\" to a view" +msgstr "kann partitionierte Tabelle »%s« nicht in eine Sicht umwandeln" + +#: rewrite/rewriteDefine.c:434 +#, c-format +msgid "cannot convert partition \"%s\" to a view" +msgstr "kann Partition »%s« nicht in eine Sicht umwandeln" + +#: rewrite/rewriteDefine.c:442 #, c-format msgid "could not convert table \"%s\" to a view because it is not empty" msgstr "konnte Tabelle »%s« nicht in Sicht umwandeln, weil sie nicht leer ist" -#: rewrite/rewriteDefine.c:438 +#: rewrite/rewriteDefine.c:450 #, c-format msgid "could not convert table \"%s\" to a view because it has triggers" msgstr "konnte Tabelle »%s« nicht in Sicht umwandeln, weil sie Trigger hat" -#: rewrite/rewriteDefine.c:440 +#: rewrite/rewriteDefine.c:452 #, c-format msgid "In particular, the table cannot be involved in any foreign key relationships." msgstr "Insbesondere darf die Tabelle nicht in Fremschlüsselverhältnisse eingebunden sein." -#: rewrite/rewriteDefine.c:445 +#: rewrite/rewriteDefine.c:457 #, c-format msgid "could not convert table \"%s\" to a view because it has indexes" msgstr "konnte Tabelle »%s« nicht in Sicht umwandeln, weil sie Indexe hat" -#: rewrite/rewriteDefine.c:451 +#: rewrite/rewriteDefine.c:463 #, c-format msgid "could not convert table \"%s\" to a view because it has child tables" msgstr "konnte Tabelle »%s« nicht in Sicht umwandeln, weil sie abgeleitete Tabellen hat" -#: rewrite/rewriteDefine.c:457 +#: rewrite/rewriteDefine.c:469 #, c-format msgid "could not convert table \"%s\" to a view because it has row security enabled" msgstr "konnte Tabelle »%s« nicht in Sicht umwandeln, weil sie Sicherheit auf Zeilenebene eingeschaltet hat" -#: rewrite/rewriteDefine.c:463 +#: rewrite/rewriteDefine.c:475 #, c-format msgid "could not convert table \"%s\" to a view because it has row security policies" msgstr "konnte Tabelle »%s« nicht in Sicht umwandeln, weil sie Policys für Sicherheit auf Zeilenebene hat" -#: rewrite/rewriteDefine.c:490 +#: rewrite/rewriteDefine.c:502 #, c-format msgid "cannot have multiple RETURNING lists in a rule" msgstr "Regel kann nicht mehrere RETURNING-Listen enthalten" -#: rewrite/rewriteDefine.c:495 +#: rewrite/rewriteDefine.c:507 #, c-format msgid "RETURNING lists are not supported in conditional rules" msgstr "RETURNING-Listen werden in Regeln mit Bedingung nicht unterstützt" -#: rewrite/rewriteDefine.c:499 +#: rewrite/rewriteDefine.c:511 #, c-format msgid "RETURNING lists are not supported in non-INSTEAD rules" msgstr "RETURNING-Listen werden nur in INSTEAD-Regeln unterstützt" -#: rewrite/rewriteDefine.c:664 +#: rewrite/rewriteDefine.c:675 #, c-format msgid "SELECT rule's target list has too many entries" msgstr "Targetliste von SELECT-Regel hat zu viele Einträge" -#: rewrite/rewriteDefine.c:665 +#: rewrite/rewriteDefine.c:676 #, c-format msgid "RETURNING list has too many entries" msgstr "RETURNING-Liste hat zu viele Einträge" -#: rewrite/rewriteDefine.c:692 +#: rewrite/rewriteDefine.c:703 #, c-format msgid "cannot convert relation containing dropped columns to view" msgstr "kann Relation mit gelöschten Spalten nicht in Sicht umwandeln" -#: rewrite/rewriteDefine.c:693 +#: rewrite/rewriteDefine.c:704 #, c-format msgid "cannot create a RETURNING list for a relation containing dropped columns" msgstr "für eine Relation mit gelöschten Spalten kann keine RETURNING-Liste erzeugt werden" -#: rewrite/rewriteDefine.c:699 +#: rewrite/rewriteDefine.c:710 #, c-format msgid "SELECT rule's target entry %d has different column name from column \"%s\"" msgstr "Spaltenname in Targeteintrag %d von SELECT-Regel unterscheidet sich von Spalte »%s«" -#: rewrite/rewriteDefine.c:701 +#: rewrite/rewriteDefine.c:712 #, c-format msgid "SELECT target entry is named \"%s\"." msgstr "SELECT-Targeteintrag heißt »%s«." -#: rewrite/rewriteDefine.c:710 +#: rewrite/rewriteDefine.c:721 #, c-format msgid "SELECT rule's target entry %d has different type from column \"%s\"" msgstr "Typ von Targeteintrag %d von SELECT-Regel unterscheidet sich von Spalte »%s«" -#: rewrite/rewriteDefine.c:712 +#: rewrite/rewriteDefine.c:723 #, c-format msgid "RETURNING list's entry %d has different type from column \"%s\"" msgstr "Eintrag %d in RETURNING-Liste hat anderen Typ als Spalte »%s«" -#: rewrite/rewriteDefine.c:715 rewrite/rewriteDefine.c:739 +#: rewrite/rewriteDefine.c:726 rewrite/rewriteDefine.c:750 #, c-format msgid "SELECT target entry has type %s, but column has type %s." msgstr "SELECT-Targeteintrag hat Typ %s, aber Spalte hat Typ %s." -#: rewrite/rewriteDefine.c:718 rewrite/rewriteDefine.c:743 +#: rewrite/rewriteDefine.c:729 rewrite/rewriteDefine.c:754 #, c-format msgid "RETURNING list entry has type %s, but column has type %s." msgstr "Eintrag in RETURNING-Liste hat Typ %s, aber Spalte hat Typ %s." -#: rewrite/rewriteDefine.c:734 +#: rewrite/rewriteDefine.c:745 #, c-format msgid "SELECT rule's target entry %d has different size from column \"%s\"" msgstr "Größe von Targeteintrag %d von SELECT-Regel unterscheidet sich von Spalte »%s«" -#: rewrite/rewriteDefine.c:736 +#: rewrite/rewriteDefine.c:747 #, c-format msgid "RETURNING list's entry %d has different size from column \"%s\"" msgstr "Eintrag %d in RETURNING-Liste hat andere Größe als Spalte »%s«" -#: rewrite/rewriteDefine.c:753 +#: rewrite/rewriteDefine.c:764 #, c-format msgid "SELECT rule's target list has too few entries" msgstr "Targetliste von SELECT-Regeln hat zu wenige Einträge" -#: rewrite/rewriteDefine.c:754 +#: rewrite/rewriteDefine.c:765 #, c-format msgid "RETURNING list has too few entries" msgstr "RETURNING-Liste hat zu wenige Einträge" -#: rewrite/rewriteDefine.c:846 rewrite/rewriteDefine.c:958 +#: rewrite/rewriteDefine.c:857 rewrite/rewriteDefine.c:971 #: rewrite/rewriteSupport.c:109 #, c-format msgid "rule \"%s\" for relation \"%s\" does not exist" msgstr "Regel »%s« für Relation »%s« existiert nicht" -#: rewrite/rewriteDefine.c:977 +#: rewrite/rewriteDefine.c:990 #, c-format msgid "renaming an ON SELECT rule is not allowed" msgstr "Umbenennen einer ON-SELECT-Regel ist nicht erlaubt" -#: rewrite/rewriteHandler.c:541 +#: rewrite/rewriteHandler.c:540 #, c-format msgid "WITH query name \"%s\" appears in both a rule action and the query being rewritten" msgstr "WITH-Anfragename »%s« erscheint sowohl in der Regelaktion als auch in der umzuschreibenden Anfrage" -#: rewrite/rewriteHandler.c:601 +#: rewrite/rewriteHandler.c:600 #, c-format msgid "cannot have RETURNING lists in multiple rules" msgstr "RETURNING-Listen können nicht in mehreren Regeln auftreten" -#: rewrite/rewriteHandler.c:941 rewrite/rewriteHandler.c:959 +#: rewrite/rewriteHandler.c:822 #, c-format -msgid "multiple assignments to same column \"%s\"" -msgstr "mehrere Zuweisungen zur selben Spalte »%s«" +msgid "cannot insert into column \"%s\"" +msgstr "kann nicht in Spalte »%s« einfügen" -#: rewrite/rewriteHandler.c:1735 rewrite/rewriteHandler.c:3349 +#: rewrite/rewriteHandler.c:823 rewrite/rewriteHandler.c:838 #, c-format -msgid "infinite recursion detected in rules for relation \"%s\"" -msgstr "unendliche Rekursion entdeckt in Regeln für Relation »%s«" +msgid "Column \"%s\" is an identity column defined as GENERATED ALWAYS." +msgstr "Spalte »%s« ist eine Identitätsspalte, die als GENERATED ALWAYS definiert ist." + +#: rewrite/rewriteHandler.c:825 +#, c-format +msgid "Use OVERRIDING SYSTEM VALUE to override." +msgstr "Verwenden Sie OVERRIDING SYSTEM VALUE, um diese Einschränkung außer Kraft zu setzen." -#: rewrite/rewriteHandler.c:1820 +#: rewrite/rewriteHandler.c:837 +#, c-format +msgid "column \"%s\" can only be updated to DEFAULT" +msgstr "Spalte »%s« kann nur auf DEFAULT aktualisiert werden" + +#: rewrite/rewriteHandler.c:999 rewrite/rewriteHandler.c:1017 +#, c-format +msgid "multiple assignments to same column \"%s\"" +msgstr "mehrere Zuweisungen zur selben Spalte »%s«" + +#: rewrite/rewriteHandler.c:1909 #, c-format msgid "infinite recursion detected in policy for relation \"%s\"" msgstr "unendliche Rekursion entdeckt in Policys für Relation »%s«" -#: rewrite/rewriteHandler.c:2137 +#: rewrite/rewriteHandler.c:2226 msgid "Junk view columns are not updatable." msgstr "Junk-Sichtspalten sind nicht aktualisierbar." -#: rewrite/rewriteHandler.c:2142 +#: rewrite/rewriteHandler.c:2231 msgid "View columns that are not columns of their base relation are not updatable." msgstr "Sichtspalten, die nicht Spalten ihrer Basisrelation sind, sind nicht aktualisierbar." -#: rewrite/rewriteHandler.c:2145 +#: rewrite/rewriteHandler.c:2234 msgid "View columns that refer to system columns are not updatable." msgstr "Sichtspalten, die auf Systemspalten verweisen, sind nicht aktualisierbar." -#: rewrite/rewriteHandler.c:2148 +#: rewrite/rewriteHandler.c:2237 msgid "View columns that return whole-row references are not updatable." msgstr "Sichtspalten, die Verweise auf ganze Zeilen zurückgeben, sind nicht aktualisierbar." -#: rewrite/rewriteHandler.c:2206 +#: rewrite/rewriteHandler.c:2295 msgid "Views containing DISTINCT are not automatically updatable." msgstr "Sichten, die DISTINCT enthalten, sind nicht automatisch aktualisierbar." -#: rewrite/rewriteHandler.c:2209 +#: rewrite/rewriteHandler.c:2298 msgid "Views containing GROUP BY are not automatically updatable." msgstr "Sichten, die GROUP BY enthalten, sind nicht automatisch aktualisierbar." -#: rewrite/rewriteHandler.c:2212 +#: rewrite/rewriteHandler.c:2301 msgid "Views containing HAVING are not automatically updatable." msgstr "Sichten, die HAVING enthalten, sind nicht automatisch aktualisierbar." -#: rewrite/rewriteHandler.c:2215 +#: rewrite/rewriteHandler.c:2304 msgid "Views containing UNION, INTERSECT, or EXCEPT are not automatically updatable." msgstr "Sichten, die UNION, INTERSECT oder EXCEPT enthalten, sind nicht automatisch aktualisierbar." -#: rewrite/rewriteHandler.c:2218 +#: rewrite/rewriteHandler.c:2307 msgid "Views containing WITH are not automatically updatable." msgstr "Sichten, die WITH enthalten, sind nicht automatisch aktualisierbar." -#: rewrite/rewriteHandler.c:2221 +#: rewrite/rewriteHandler.c:2310 msgid "Views containing LIMIT or OFFSET are not automatically updatable." msgstr "Sichten, die LIMIT oder OFFSET enthalten, sind nicht automatisch aktualisierbar." -#: rewrite/rewriteHandler.c:2233 +#: rewrite/rewriteHandler.c:2322 msgid "Views that return aggregate functions are not automatically updatable." msgstr "Sichten, die Aggregatfunktionen zurückgeben, sind nicht automatisch aktualisierbar." -#: rewrite/rewriteHandler.c:2236 +#: rewrite/rewriteHandler.c:2325 msgid "Views that return window functions are not automatically updatable." msgstr "Sichten, die Fensterfunktionen zurückgeben, sind nicht automatisch aktualisierbar." -#: rewrite/rewriteHandler.c:2239 +#: rewrite/rewriteHandler.c:2328 msgid "Views that return set-returning functions are not automatically updatable." msgstr "Sichten, die Funktionen mit Ergebnismenge zurückgeben, sind nicht automatisch aktualisierbar." -#: rewrite/rewriteHandler.c:2246 rewrite/rewriteHandler.c:2250 -#: rewrite/rewriteHandler.c:2258 +#: rewrite/rewriteHandler.c:2335 rewrite/rewriteHandler.c:2339 +#: rewrite/rewriteHandler.c:2347 msgid "Views that do not select from a single table or view are not automatically updatable." msgstr "Sichten, die nicht aus einer einzigen Tabelle oder Sicht lesen, sind nicht automatisch aktualisierbar." -#: rewrite/rewriteHandler.c:2261 +#: rewrite/rewriteHandler.c:2350 msgid "Views containing TABLESAMPLE are not automatically updatable." msgstr "Sichten, die TABLESAMPLE enthalten, sind nicht automatisch aktualisierbar." -#: rewrite/rewriteHandler.c:2285 +#: rewrite/rewriteHandler.c:2374 msgid "Views that have no updatable columns are not automatically updatable." msgstr "Sichten, die keine aktualisierbaren Spalten haben, sind nicht automatisch aktualisierbar." -#: rewrite/rewriteHandler.c:2737 +#: rewrite/rewriteHandler.c:2828 #, c-format msgid "cannot insert into column \"%s\" of view \"%s\"" msgstr "kann nicht in Spalte »%s« von Sicht »%s« einfügen" -#: rewrite/rewriteHandler.c:2745 +#: rewrite/rewriteHandler.c:2836 #, c-format msgid "cannot update column \"%s\" of view \"%s\"" msgstr "kann Spalte »%s« von Sicht »%s« nicht aktualisieren" -#: rewrite/rewriteHandler.c:3148 +#: rewrite/rewriteHandler.c:3219 #, c-format msgid "DO INSTEAD NOTHING rules are not supported for data-modifying statements in WITH" msgstr "DO INSTEAD NOTHING-Regeln werden für datenmodifizierende Anweisungen in WITH nicht unterstützt" -#: rewrite/rewriteHandler.c:3162 +#: rewrite/rewriteHandler.c:3233 #, c-format msgid "conditional DO INSTEAD rules are not supported for data-modifying statements in WITH" msgstr "Do INSTEAD-Regeln mit Bedingung werden für datenmodifizierende Anweisungen in WITH nicht unterstützt" -#: rewrite/rewriteHandler.c:3166 +#: rewrite/rewriteHandler.c:3237 #, c-format msgid "DO ALSO rules are not supported for data-modifying statements in WITH" msgstr "DO ALSO-Regeln werden für datenmodifizierende Anweisungen in WITH nicht unterstützt" -#: rewrite/rewriteHandler.c:3171 +#: rewrite/rewriteHandler.c:3242 #, c-format msgid "multi-statement DO INSTEAD rules are not supported for data-modifying statements in WITH" msgstr "DO INSTEAD-Regeln mit mehreren Anweisungen werden für datenmodifizierende Anweisungen in WITH nicht unterstützt" -#: rewrite/rewriteHandler.c:3386 +#: rewrite/rewriteHandler.c:3461 #, c-format msgid "cannot perform INSERT RETURNING on relation \"%s\"" msgstr "INSERT RETURNING kann in Relation »%s« nicht ausgeführt werden" -#: rewrite/rewriteHandler.c:3388 +#: rewrite/rewriteHandler.c:3463 #, c-format msgid "You need an unconditional ON INSERT DO INSTEAD rule with a RETURNING clause." msgstr "Sie benötigen eine ON INSERT DO INSTEAD Regel ohne Bedingung, mit RETURNING-Klausel." -#: rewrite/rewriteHandler.c:3393 +#: rewrite/rewriteHandler.c:3468 #, c-format msgid "cannot perform UPDATE RETURNING on relation \"%s\"" msgstr "UPDATE RETURNING kann in Relation »%s« nicht ausgeführt werden" -#: rewrite/rewriteHandler.c:3395 +#: rewrite/rewriteHandler.c:3470 #, c-format msgid "You need an unconditional ON UPDATE DO INSTEAD rule with a RETURNING clause." msgstr "Sie benötigen eine ON UPDATE DO INSTEAD Regel ohne Bedingung, mit RETURNING-Klausel." -#: rewrite/rewriteHandler.c:3400 +#: rewrite/rewriteHandler.c:3475 #, c-format msgid "cannot perform DELETE RETURNING on relation \"%s\"" msgstr "DELETE RETURNING kann in Relation »%s« nicht ausgeführt werden" -#: rewrite/rewriteHandler.c:3402 +#: rewrite/rewriteHandler.c:3477 #, c-format msgid "You need an unconditional ON DELETE DO INSTEAD rule with a RETURNING clause." msgstr "Sie benötigen eine ON DELETE DO INSTEAD Regel ohne Bedingung, mit RETURNING-Klausel." -#: rewrite/rewriteHandler.c:3420 +#: rewrite/rewriteHandler.c:3495 #, c-format msgid "INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules" msgstr "INSERT mit ON-CONFLICT-Klausel kann nicht mit Tabelle verwendet werden, die INSERT- oder UPDATE-Regeln hat" -#: rewrite/rewriteHandler.c:3477 +#: rewrite/rewriteHandler.c:3552 #, c-format msgid "WITH cannot be used in a query that is rewritten by rules into multiple queries" msgstr "WITH kann nicht in einer Anfrage verwendet werden, die durch Regeln in mehrere Anfragen umgeschrieben wird" @@ -17496,124 +18790,124 @@ msgstr "Utility-Anweisungen mit Bedingung sind nicht implementiert" msgid "WHERE CURRENT OF on a view is not implemented" msgstr "WHERE CURRENT OF mit einer Sicht ist nicht implementiert" -#: rewrite/rewriteManip.c:1434 +#: rewrite/rewriteManip.c:1503 #, c-format msgid "NEW variables in ON UPDATE rules cannot reference columns that are part of a multiple assignment in the subject UPDATE command" msgstr "NEW-Variablen in ON UPDATE-Regeln können nicht auf Spalten verweisen, die Teil einer Mehrfachzuweisung in dem UPDATE-Befehl sind" -#: scan.l:432 +#: scan.l:436 msgid "unterminated /* comment" msgstr "/*-Kommentar nicht abgeschlossen" -#: scan.l:461 +#: scan.l:465 msgid "unterminated bit string literal" msgstr "Bitkettenkonstante nicht abgeschlossen" -#: scan.l:482 +#: scan.l:486 msgid "unterminated hexadecimal string literal" msgstr "hexadezimale Zeichenkette nicht abgeschlossen" -#: scan.l:532 +#: scan.l:536 #, c-format msgid "unsafe use of string constant with Unicode escapes" msgstr "unsichere Verwendung von Zeichenkette mit Unicode-Escapes" -#: scan.l:533 +#: scan.l:537 #, c-format msgid "String constants with Unicode escapes cannot be used when standard_conforming_strings is off." msgstr "Zeichenketten mit Unicode-Escapes können nicht verwendet werden, wenn standard_conforming_strings aus ist." -#: scan.l:579 scan.l:778 +#: scan.l:583 scan.l:782 msgid "invalid Unicode escape character" msgstr "ungültiges Unicode-Escape-Zeichen" -#: scan.l:605 scan.l:613 scan.l:621 scan.l:622 scan.l:623 scan.l:1337 -#: scan.l:1364 scan.l:1368 scan.l:1406 scan.l:1410 scan.l:1432 scan.l:1442 +#: scan.l:609 scan.l:617 scan.l:625 scan.l:626 scan.l:627 scan.l:1339 +#: scan.l:1366 scan.l:1370 scan.l:1408 scan.l:1412 scan.l:1434 scan.l:1444 msgid "invalid Unicode surrogate pair" msgstr "ungültiges Unicode-Surrogatpaar" -#: scan.l:627 +#: scan.l:631 #, c-format msgid "invalid Unicode escape" msgstr "ungültiges Unicode-Escape" -#: scan.l:628 +#: scan.l:632 #, c-format msgid "Unicode escapes must be \\uXXXX or \\UXXXXXXXX." msgstr "Unicode-Escapes müssen \\uXXXX oder \\UXXXXXXXX sein." -#: scan.l:639 +#: scan.l:643 #, c-format msgid "unsafe use of \\' in a string literal" msgstr "unsichere Verwendung von \\' in Zeichenkettenkonstante" -#: scan.l:640 +#: scan.l:644 #, c-format msgid "Use '' to write quotes in strings. \\' is insecure in client-only encodings." msgstr "Verwenden Sie '', um Quotes in Zeichenketten zu schreiben. \\' ist in bestimmten Client-seitigen Kodierungen unsicher." -#: scan.l:715 +#: scan.l:719 msgid "unterminated dollar-quoted string" msgstr "Dollar-Quotes nicht abgeschlossen" -#: scan.l:732 scan.l:758 scan.l:773 +#: scan.l:736 scan.l:762 scan.l:777 msgid "zero-length delimited identifier" msgstr "Bezeichner in Anführungszeichen hat Länge null" -#: scan.l:793 syncrep_scanner.l:87 +#: scan.l:797 syncrep_scanner.l:91 msgid "unterminated quoted identifier" msgstr "Bezeichner in Anführungszeichen nicht abgeschlossen" -#: scan.l:924 +#: scan.l:928 msgid "operator too long" msgstr "Operator zu lang" #. translator: %s is typically the translation of "syntax error" -#: scan.l:1077 +#: scan.l:1084 #, c-format msgid "%s at end of input" msgstr "%s am Ende der Eingabe" #. translator: first %s is typically the translation of "syntax error" -#: scan.l:1085 +#: scan.l:1092 #, c-format msgid "%s at or near \"%s\"" msgstr "%s bei »%s«" -#: scan.l:1251 scan.l:1283 +#: scan.l:1253 scan.l:1285 msgid "Unicode escape values cannot be used for code point values above 007F when the server encoding is not UTF8" msgstr "Unicode-Escape-Werte können nicht für Code-Punkt-Werte über 007F verwendet werden, wenn die Serverkodierung nicht UTF8 ist" -#: scan.l:1279 scan.l:1424 +#: scan.l:1281 scan.l:1426 msgid "invalid Unicode escape value" msgstr "ungültiger Unicode-Escape-Wert" -#: scan.l:1488 +#: scan.l:1490 #, c-format msgid "nonstandard use of \\' in a string literal" msgstr "nicht standardkonforme Verwendung von \\' in Zeichenkettenkonstante" -#: scan.l:1489 +#: scan.l:1491 #, c-format msgid "Use '' to write quotes in strings, or use the escape string syntax (E'...')." msgstr "Verwenden Sie '', um Quotes in Zeichenketten zu schreiben, oder verwenden Sie die Syntax für Escape-Zeichenketten (E'...')." -#: scan.l:1498 +#: scan.l:1500 #, c-format msgid "nonstandard use of \\\\ in a string literal" msgstr "nicht standardkonforme Verwendung von \\\\ in Zeichenkettenkonstante" -#: scan.l:1499 +#: scan.l:1501 #, c-format msgid "Use the escape string syntax for backslashes, e.g., E'\\\\'." msgstr "Verwenden Sie die Syntax für Escape-Zeichenketten für Backslashes, z.B. E'\\\\'." -#: scan.l:1513 +#: scan.l:1515 #, c-format msgid "nonstandard use of escape in a string literal" msgstr "nicht standardkonforme Verwendung von Escape in Zeichenkettenkonstante" -#: scan.l:1514 +#: scan.l:1516 #, c-format msgid "Use the escape string syntax for escapes, e.g., E'\\r\\n'." msgstr "Verwenden Sie die Syntax für Escape-Zeichenketten, z.B. E'\\r\\n'." @@ -17644,6 +18938,47 @@ msgstr "unbekannter Snowball-Parameter: »%s«" msgid "missing Language parameter" msgstr "Parameter »Language« fehlt" +#: statistics/dependencies.c:534 +#, c-format +msgid "invalid zero-length item array in MVDependencies" +msgstr "ungültiges Array mit Länge null in MVDependencies" + +#: statistics/dependencies.c:672 statistics/dependencies.c:725 +#: statistics/mvdistinct.c:341 statistics/mvdistinct.c:394 +#: utils/adt/pseudotypes.c:94 utils/adt/pseudotypes.c:122 +#: utils/adt/pseudotypes.c:147 utils/adt/pseudotypes.c:171 +#: utils/adt/pseudotypes.c:282 utils/adt/pseudotypes.c:307 +#: utils/adt/pseudotypes.c:335 utils/adt/pseudotypes.c:363 +#: utils/adt/pseudotypes.c:393 +#, c-format +msgid "cannot accept a value of type %s" +msgstr "kann keinen Wert vom Typ %s annehmen" + +#: statistics/extended_stats.c:104 +#, c-format +msgid "statistics object \"%s.%s\" could not be computed for relation \"%s.%s\"" +msgstr "Statistikobjekt »%s.%s« konnte für Relation »%s.%s« nicht berechnet werden" + +#: statistics/mvdistinct.c:262 +#, c-format +msgid "invalid ndistinct magic %08x (expected %08x)" +msgstr "ungültige ndistinct-Magic %08x (erwartet wurde %08x)" + +#: statistics/mvdistinct.c:267 +#, c-format +msgid "invalid ndistinct type %d (expected %d)" +msgstr "ungültiger ndistinct-Typ %d (erwartet wurde %d)" + +#: statistics/mvdistinct.c:272 +#, c-format +msgid "invalid zero-length item array in MVNDistinct" +msgstr "ungültiges Array mit Länge null in MVNDistinct" + +#: statistics/mvdistinct.c:281 +#, c-format +msgid "invalid MVNDistinct size %zd (expected at least %zd)" +msgstr "ungültige MVNDistinct-Größe %zd (erwartet wurde mindestens %zd)" + #: storage/buffer/bufmgr.c:544 storage/buffer/bufmgr.c:657 #, c-format msgid "cannot access temporary tables of other sessions" @@ -17664,22 +18999,22 @@ msgstr "Das scheint mit fehlerhaften Kernels vorzukommen; Sie sollten eine Syste msgid "invalid page in block %u of relation %s; zeroing out page" msgstr "ungültige Seite in Block %u von Relation %s; fülle Seite mit Nullen" -#: storage/buffer/bufmgr.c:3996 +#: storage/buffer/bufmgr.c:4013 #, c-format msgid "could not write block %u of %s" msgstr "konnte Block %u von %s nicht schreiben" -#: storage/buffer/bufmgr.c:3998 +#: storage/buffer/bufmgr.c:4015 #, c-format msgid "Multiple failures --- write error might be permanent." msgstr "Mehrere Fehlschläge --- Schreibfehler ist möglicherweise dauerhaft." -#: storage/buffer/bufmgr.c:4019 storage/buffer/bufmgr.c:4038 +#: storage/buffer/bufmgr.c:4036 storage/buffer/bufmgr.c:4055 #, c-format msgid "writing block %u of relation %s" msgstr "schreibe Block %u von Relation %s" -#: storage/buffer/bufmgr.c:4339 +#: storage/buffer/bufmgr.c:4358 #, c-format msgid "snapshot too old" msgstr "Snapshot zu alt" @@ -17694,189 +19029,234 @@ msgstr "kein leerer lokaler Puffer verfügbar" msgid "cannot access temporary tables during a parallel operation" msgstr "während einer parallelen Operation kann nicht auf temporäre Tabellen zugegriffen werden" -#: storage/file/fd.c:443 storage/file/fd.c:515 storage/file/fd.c:551 +#: storage/file/buffile.c:318 +#, fuzzy, c-format +#| msgid "could not open file \"%s\"" +msgid "could not open BufFile \"%s\"" +msgstr "konnte Datei »%s« nicht öffnen" + +#: storage/file/fd.c:451 storage/file/fd.c:523 storage/file/fd.c:559 #, c-format msgid "could not flush dirty data: %m" msgstr "konnte schmutzige Daten nicht flushen: %m" -#: storage/file/fd.c:473 +#: storage/file/fd.c:481 #, c-format msgid "could not determine dirty data size: %m" msgstr "konnte Größe der schmutzigen Daten nicht bestimmen: %m" -#: storage/file/fd.c:525 +#: storage/file/fd.c:533 #, c-format msgid "could not munmap() while flushing data: %m" msgstr "munmap() fehlgeschlagen beim Flushen von Daten: %m" -#: storage/file/fd.c:689 +#: storage/file/fd.c:734 #, c-format msgid "could not link file \"%s\" to \"%s\": %m" msgstr "konnte Datei »%s« nicht nach »%s« linken: %m" -#: storage/file/fd.c:783 +#: storage/file/fd.c:828 #, c-format msgid "getrlimit failed: %m" msgstr "getrlimit fehlgeschlagen: %m" -#: storage/file/fd.c:873 +#: storage/file/fd.c:918 #, c-format msgid "insufficient file descriptors available to start server process" msgstr "nicht genug Dateideskriptoren verfügbar, um Serverprozess zu starten" -#: storage/file/fd.c:874 +#: storage/file/fd.c:919 #, c-format msgid "System allows %d, we need at least %d." msgstr "System erlaubt %d, wir benötigen mindestens %d." -#: storage/file/fd.c:915 storage/file/fd.c:2078 storage/file/fd.c:2171 -#: storage/file/fd.c:2319 +#: storage/file/fd.c:970 storage/file/fd.c:2371 storage/file/fd.c:2473 +#: storage/file/fd.c:2625 #, c-format msgid "out of file descriptors: %m; release and retry" msgstr "keine Dateideskriptoren mehr: %m; freigeben und nochmal versuchen" -#: storage/file/fd.c:1520 +#: storage/file/fd.c:1312 #, c-format msgid "temporary file: path \"%s\", size %lu" msgstr "temporäre Datei: Pfad »%s«, Größe %lu" -#: storage/file/fd.c:1717 +#: storage/file/fd.c:1444 +#, c-format +msgid "cannot create temporary directory \"%s\": %m" +msgstr "konnte temporäres Verzeichnis »%s« nicht erzeugen: %m" + +#: storage/file/fd.c:1451 +#, c-format +msgid "cannot create temporary subdirectory \"%s\": %m" +msgstr "konnte temporäres Unterverzeichnis »%s« nicht erzeugen: %m" + +#: storage/file/fd.c:1644 +#, c-format +msgid "could not create temporary file \"%s\": %m" +msgstr "konnte temporäre Datei »%s« nicht erzeugen: %m" + +#: storage/file/fd.c:1679 +#, fuzzy, c-format +#| msgid "could not open temporary file \"%s\": %s\n" +msgid "could not open temporary file \"%s\": %m" +msgstr "konnte temporäre Datei »%s« nicht öffnen: %s\n" + +#: storage/file/fd.c:1720 +#, fuzzy, c-format +#| msgid "could not open temporary file \"%s\": %s\n" +msgid "cannot unlink temporary file \"%s\": %m" +msgstr "konnte temporäre Datei »%s« nicht öffnen: %s\n" + +#: storage/file/fd.c:2002 #, c-format msgid "temporary file size exceeds temp_file_limit (%dkB)" msgstr "Größe der temporären Datei überschreitet temp_file_limit (%dkB)" -#: storage/file/fd.c:2054 storage/file/fd.c:2104 +#: storage/file/fd.c:2347 storage/file/fd.c:2406 #, c-format msgid "exceeded maxAllocatedDescs (%d) while trying to open file \"%s\"" msgstr "maxAllocatedDescs (%d) überschritten beim Versuch, die Datei »%s« zu öffnen" -#: storage/file/fd.c:2144 +#: storage/file/fd.c:2446 #, c-format msgid "exceeded maxAllocatedDescs (%d) while trying to execute command \"%s\"" msgstr "maxAllocatedDescs (%d) überschritten beim Versuch, den Befehl »%s« auszuführen" -#: storage/file/fd.c:2295 +#: storage/file/fd.c:2601 #, c-format msgid "exceeded maxAllocatedDescs (%d) while trying to open directory \"%s\"" msgstr "maxAllocatedDescs (%d) überschritten beim Versuch, das Verzeichnis »%s« zu öffnen" -#: storage/file/fd.c:2381 +#: storage/file/fd.c:2692 #, c-format msgid "could not read directory \"%s\": %m" msgstr "konnte Verzeichnis »%s« nicht lesen: %m" -#: storage/ipc/dsm.c:364 +#: storage/file/fd.c:3124 +#, fuzzy, c-format +#| msgid "could not locate temporary directory: %s\n" +msgid "unexpected file found in temporary-files directory: \"%s\"" +msgstr "konnte temporäres Verzeichnis nicht finden: %s\n" + +#: storage/file/fd.c:3443 +#, fuzzy, c-format +#| msgid "could not read directory \"%s\": %m" +msgid "could not rmdir directory \"%s\": %m" +msgstr "konnte Verzeichnis »%s« nicht lesen: %m" + +#: storage/file/sharedfileset.c:93 +#, fuzzy, c-format +#| msgid "could not attach to dynamic shared area" +msgid "could not attach to a SharedFileSet that is already destroyed" +msgstr "konnte nicht an dynamische Shared Area anbinden" + +#: storage/ipc/dsm.c:351 #, c-format msgid "dynamic shared memory control segment is corrupt" msgstr "Kontrollsegment von dynamischem Shared Memory ist verfälscht" -#: storage/ipc/dsm.c:411 +#: storage/ipc/dsm.c:398 #, c-format msgid "dynamic shared memory is disabled" msgstr "dynamisches Shared-Memory ist abgeschaltet" -#: storage/ipc/dsm.c:412 +#: storage/ipc/dsm.c:399 #, c-format msgid "Set dynamic_shared_memory_type to a value other than \"none\"." msgstr "Setzen Sie dynamic_shared_memory_type auf einen anderen Wert als »none«." -#: storage/ipc/dsm.c:432 +#: storage/ipc/dsm.c:419 #, c-format msgid "dynamic shared memory control segment is not valid" msgstr "Kontrollsegment von dynamischem Shared Memory ist ungültig" -#: storage/ipc/dsm.c:521 +#: storage/ipc/dsm.c:515 #, c-format msgid "too many dynamic shared memory segments" msgstr "zu viele dynamische Shared-Memory-Segmente" -#: storage/ipc/dsm_impl.c:260 storage/ipc/dsm_impl.c:360 -#: storage/ipc/dsm_impl.c:532 storage/ipc/dsm_impl.c:647 -#: storage/ipc/dsm_impl.c:818 storage/ipc/dsm_impl.c:960 +#: storage/ipc/dsm_impl.c:263 storage/ipc/dsm_impl.c:364 +#: storage/ipc/dsm_impl.c:581 storage/ipc/dsm_impl.c:696 +#: storage/ipc/dsm_impl.c:867 storage/ipc/dsm_impl.c:1011 #, c-format msgid "could not unmap shared memory segment \"%s\": %m" msgstr "konnte Shared-Memory-Segment »%s« nicht unmappen: %m" -#: storage/ipc/dsm_impl.c:270 storage/ipc/dsm_impl.c:542 -#: storage/ipc/dsm_impl.c:657 storage/ipc/dsm_impl.c:828 +#: storage/ipc/dsm_impl.c:273 storage/ipc/dsm_impl.c:591 +#: storage/ipc/dsm_impl.c:706 storage/ipc/dsm_impl.c:877 #, c-format msgid "could not remove shared memory segment \"%s\": %m" msgstr "konnte Shared-Memory-Segment »%s« nicht entfernen: %m" -#: storage/ipc/dsm_impl.c:291 storage/ipc/dsm_impl.c:728 -#: storage/ipc/dsm_impl.c:842 +#: storage/ipc/dsm_impl.c:294 storage/ipc/dsm_impl.c:777 +#: storage/ipc/dsm_impl.c:891 #, c-format msgid "could not open shared memory segment \"%s\": %m" msgstr "konnte Shared-Memory-Segment »%s« nicht öffnen: %m" -#: storage/ipc/dsm_impl.c:315 storage/ipc/dsm_impl.c:558 -#: storage/ipc/dsm_impl.c:773 storage/ipc/dsm_impl.c:866 +#: storage/ipc/dsm_impl.c:318 storage/ipc/dsm_impl.c:607 +#: storage/ipc/dsm_impl.c:822 storage/ipc/dsm_impl.c:915 #, c-format msgid "could not stat shared memory segment \"%s\": %m" msgstr "konnte »stat« für Shared-Memory-Segment »%s« nicht ausführen: %m" -#: storage/ipc/dsm_impl.c:334 storage/ipc/dsm_impl.c:885 -#: storage/ipc/dsm_impl.c:933 +#: storage/ipc/dsm_impl.c:338 storage/ipc/dsm_impl.c:934 +#: storage/ipc/dsm_impl.c:984 #, c-format msgid "could not resize shared memory segment \"%s\" to %zu bytes: %m" msgstr "konnte Größe des Shared-Memory-Segments »%s« nicht auf %zu Bytes ändern: %m" -#: storage/ipc/dsm_impl.c:384 storage/ipc/dsm_impl.c:579 -#: storage/ipc/dsm_impl.c:749 storage/ipc/dsm_impl.c:984 +#: storage/ipc/dsm_impl.c:388 storage/ipc/dsm_impl.c:628 +#: storage/ipc/dsm_impl.c:798 storage/ipc/dsm_impl.c:1035 #, c-format msgid "could not map shared memory segment \"%s\": %m" msgstr "konnte Shared-Memory-Segment »%s« nicht mappen: %m" -#: storage/ipc/dsm_impl.c:514 +#: storage/ipc/dsm_impl.c:563 #, c-format msgid "could not get shared memory segment: %m" msgstr "konnte Shared-Memory-Segment nicht finden: %m" -#: storage/ipc/dsm_impl.c:713 +#: storage/ipc/dsm_impl.c:762 #, c-format msgid "could not create shared memory segment \"%s\": %m" msgstr "konnte Shared-Memory-Segment »%s« nicht erzeugen: %m" -#: storage/ipc/dsm_impl.c:1026 storage/ipc/dsm_impl.c:1074 +#: storage/ipc/dsm_impl.c:1077 storage/ipc/dsm_impl.c:1125 #, c-format msgid "could not duplicate handle for \"%s\": %m" msgstr "konnte Handle für »%s« nicht duplizieren: %m" -#: storage/ipc/latch.c:780 +#: storage/ipc/latch.c:829 #, c-format msgid "epoll_ctl() failed: %m" msgstr "epoll_ctl() fehlgeschlagen: %m" -#: storage/ipc/latch.c:1009 +#: storage/ipc/latch.c:1060 #, c-format msgid "epoll_wait() failed: %m" msgstr "epoll_wait() fehlgeschlagen: %m" -#: storage/ipc/latch.c:1129 +#: storage/ipc/latch.c:1182 #, c-format msgid "poll() failed: %m" msgstr "poll() fehlgeschlagen: %m" -#: storage/ipc/latch.c:1287 -#, c-format -msgid "select() failed: %m" -msgstr "select() fehlgeschlagen: %m" - -#: storage/ipc/shm_toc.c:108 storage/ipc/shm_toc.c:189 storage/lmgr/lock.c:883 +#: storage/ipc/shm_toc.c:118 storage/ipc/shm_toc.c:200 storage/lmgr/lock.c:883 #: storage/lmgr/lock.c:917 storage/lmgr/lock.c:2679 storage/lmgr/lock.c:4004 #: storage/lmgr/lock.c:4069 storage/lmgr/lock.c:4361 -#: storage/lmgr/predicate.c:2318 storage/lmgr/predicate.c:2333 -#: storage/lmgr/predicate.c:3725 storage/lmgr/predicate.c:4868 -#: utils/hash/dynahash.c:1043 +#: storage/lmgr/predicate.c:2355 storage/lmgr/predicate.c:2370 +#: storage/lmgr/predicate.c:3762 storage/lmgr/predicate.c:4905 +#: utils/hash/dynahash.c:1065 #, c-format msgid "out of shared memory" msgstr "Shared Memory aufgebraucht" #: storage/ipc/shmem.c:165 storage/ipc/shmem.c:246 -#, fuzzy, c-format -#| msgid "not enough shared memory for data structure \"%s\" (%zu bytes requested)" +#, c-format msgid "out of shared memory (%zu bytes requested)" -msgstr "nicht genug Shared-Memory für Datenstruktur »%s« (%zu Bytes angefordert)" +msgstr "Shared Memory aufgebraucht (%zu Bytes angefordert)" #: storage/ipc/shmem.c:421 #, c-format @@ -17898,32 +19278,32 @@ msgstr "nicht genug Shared-Memory für Datenstruktur »%s« (%zu Bytes angeforde msgid "requested shared memory size overflows size_t" msgstr "angeforderte Shared-Memory-Größe übersteigt Kapazität von size_t" -#: storage/ipc/standby.c:531 tcop/postgres.c:2964 +#: storage/ipc/standby.c:531 tcop/postgres.c:3027 #, c-format msgid "canceling statement due to conflict with recovery" msgstr "storniere Anfrage wegen Konflikt mit der Wiederherstellung" -#: storage/ipc/standby.c:532 tcop/postgres.c:2271 +#: storage/ipc/standby.c:532 tcop/postgres.c:2306 #, c-format msgid "User transaction caused buffer deadlock with recovery." msgstr "Benutzertransaktion hat Verklemmung (Deadlock) mit Wiederherstellung verursacht." -#: storage/large_object/inv_api.c:203 +#: storage/large_object/inv_api.c:190 #, c-format msgid "pg_largeobject entry for OID %u, page %d has invalid data field size %d" msgstr "pg_largeobject-Eintrag für OID %u, Seite %d hat ungültige Datenfeldgröße %d" -#: storage/large_object/inv_api.c:284 +#: storage/large_object/inv_api.c:271 #, c-format msgid "invalid flags for opening a large object: %d" msgstr "ungültige Flags zum Öffnen eines Large Objects: %d" -#: storage/large_object/inv_api.c:436 +#: storage/large_object/inv_api.c:461 #, c-format msgid "invalid whence setting: %d" msgstr "ungültige »whence«-Angabe: %d" -#: storage/large_object/inv_api.c:593 +#: storage/large_object/inv_api.c:633 #, c-format msgid "invalid large object write request size: %d" msgstr "ungültige Größe der Large-Object-Schreibaufforderung: %d" @@ -17948,97 +19328,97 @@ msgstr "Verklemmung (Deadlock) entdeckt" msgid "See server log for query details." msgstr "Einzelheiten zur Anfrage finden Sie im Serverlog." -#: storage/lmgr/lmgr.c:719 +#: storage/lmgr/lmgr.c:745 #, c-format msgid "while updating tuple (%u,%u) in relation \"%s\"" msgstr "beim Aktualisieren von Tupel (%u,%u) in Relation »%s«" -#: storage/lmgr/lmgr.c:722 +#: storage/lmgr/lmgr.c:748 #, c-format msgid "while deleting tuple (%u,%u) in relation \"%s\"" msgstr "beim Löschen von Tupel (%u,%u) in Relation »%s«" -#: storage/lmgr/lmgr.c:725 +#: storage/lmgr/lmgr.c:751 #, c-format msgid "while locking tuple (%u,%u) in relation \"%s\"" msgstr "beim Sperren von Tupel (%u,%u) in Relation »%s«" -#: storage/lmgr/lmgr.c:728 +#: storage/lmgr/lmgr.c:754 #, c-format msgid "while locking updated version (%u,%u) of tuple in relation \"%s\"" msgstr "beim Sperren von aktualisierter Version (%u,%u) von Tupel in Relation »%s«" -#: storage/lmgr/lmgr.c:731 +#: storage/lmgr/lmgr.c:757 #, c-format msgid "while inserting index tuple (%u,%u) in relation \"%s\"" msgstr "beim Einfügen von Indextupel (%u,%u) in Relation »%s«" -#: storage/lmgr/lmgr.c:734 +#: storage/lmgr/lmgr.c:760 #, c-format msgid "while checking uniqueness of tuple (%u,%u) in relation \"%s\"" msgstr "beim Prüfen der Eindeutigkeit von Tupel (%u,%u) in Relation »%s«" -#: storage/lmgr/lmgr.c:737 +#: storage/lmgr/lmgr.c:763 #, c-format msgid "while rechecking updated tuple (%u,%u) in relation \"%s\"" msgstr "beim erneuten Prüfen des aktualisierten Tupels (%u,%u) in Relation »%s«" -#: storage/lmgr/lmgr.c:740 +#: storage/lmgr/lmgr.c:766 #, c-format msgid "while checking exclusion constraint on tuple (%u,%u) in relation \"%s\"" msgstr "beim Prüfen eines Exclusion-Constraints für Tupel (%u,%u) in Relation »%s«" -#: storage/lmgr/lmgr.c:960 +#: storage/lmgr/lmgr.c:986 #, c-format msgid "relation %u of database %u" msgstr "Relation %u der Datenbank %u" -#: storage/lmgr/lmgr.c:966 +#: storage/lmgr/lmgr.c:992 #, c-format msgid "extension of relation %u of database %u" msgstr "Erweiterung von Relation %u in Datenbank %u" -#: storage/lmgr/lmgr.c:972 +#: storage/lmgr/lmgr.c:998 #, c-format msgid "page %u of relation %u of database %u" msgstr "Seite %u von Relation %u von Datenbank %u" -#: storage/lmgr/lmgr.c:979 +#: storage/lmgr/lmgr.c:1005 #, c-format msgid "tuple (%u,%u) of relation %u of database %u" msgstr "Tupel (%u, %u) von Relation %u von Datenbank %u" -#: storage/lmgr/lmgr.c:987 +#: storage/lmgr/lmgr.c:1013 #, c-format msgid "transaction %u" msgstr "Transaktion %u" -#: storage/lmgr/lmgr.c:992 +#: storage/lmgr/lmgr.c:1018 #, c-format msgid "virtual transaction %d/%u" msgstr "virtuelle Transaktion %d/%u" -#: storage/lmgr/lmgr.c:998 +#: storage/lmgr/lmgr.c:1024 #, c-format msgid "speculative token %u of transaction %u" msgstr "spekulatives Token %u von Transaktion %u" -#: storage/lmgr/lmgr.c:1004 +#: storage/lmgr/lmgr.c:1030 #, c-format msgid "object %u of class %u of database %u" msgstr "Objekt %u von Klasse %u von Datenbank %u" -#: storage/lmgr/lmgr.c:1012 +#: storage/lmgr/lmgr.c:1038 #, c-format msgid "user lock [%u,%u,%u]" msgstr "Benutzersperre [%u,%u,%u]" -#: storage/lmgr/lmgr.c:1019 +#: storage/lmgr/lmgr.c:1045 #, c-format msgid "advisory lock [%u,%u,%u,%u]" msgstr "Benutzersperre [%u,%u,%u,%u]" -#: storage/lmgr/lmgr.c:1027 +#: storage/lmgr/lmgr.c:1053 #, c-format msgid "unrecognized locktag type %d" msgstr "unbekannter Locktag-Typ %d" @@ -18064,122 +19444,112 @@ msgstr "Sie müssen möglicherweise max_locks_per_transaction erhöhen." msgid "cannot PREPARE while holding both session-level and transaction-level locks on the same object" msgstr "PREPARE kann nicht ausgeführt werden, wenn für das selbe Objekt Sperren auf Sitzungsebene und auf Transaktionsebene gehalten werden" -#: storage/lmgr/predicate.c:676 +#: storage/lmgr/predicate.c:682 #, c-format msgid "not enough elements in RWConflictPool to record a read/write conflict" msgstr "nicht genügend Elemente in RWConflictPool, um einen Lese-/Schreibkonflikt aufzuzeichnen" -#: storage/lmgr/predicate.c:677 storage/lmgr/predicate.c:705 +#: storage/lmgr/predicate.c:683 storage/lmgr/predicate.c:711 #, c-format msgid "You might need to run fewer transactions at a time or increase max_connections." msgstr "Sie müssten entweder weniger Transaktionen auf einmal ausführen oder max_connections erhöhen." -#: storage/lmgr/predicate.c:704 +#: storage/lmgr/predicate.c:710 #, c-format msgid "not enough elements in RWConflictPool to record a potential read/write conflict" msgstr "nicht genügend Elemente in RWConflictPool, um einen möglichen Lese-/Schreibkonflikt aufzuzeichnen" -#: storage/lmgr/predicate.c:910 -#, c-format -msgid "memory for serializable conflict tracking is nearly exhausted" -msgstr "Speicher für die Verfolgung von Serialisierungskonflikten ist fast aufgebraucht" - -#: storage/lmgr/predicate.c:911 -#, c-format -msgid "There might be an idle transaction or a forgotten prepared transaction causing this." -msgstr "Möglicherweise gibt es eine stillliegende Transaktion oder eine vergessene vorbereitete Transaktion, die der Grund dafür ist." - -#: storage/lmgr/predicate.c:1538 +#: storage/lmgr/predicate.c:1515 #, c-format msgid "deferrable snapshot was unsafe; trying a new one" msgstr "aufschiebbarer Snapshot war unsicher; versuche einen neuen" -#: storage/lmgr/predicate.c:1577 +#: storage/lmgr/predicate.c:1604 #, c-format msgid "\"default_transaction_isolation\" is set to \"serializable\"." msgstr "»default_transaction_isolation« ist auf »serializable« gesetzt." -#: storage/lmgr/predicate.c:1578 +#: storage/lmgr/predicate.c:1605 #, c-format msgid "You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default." msgstr "Mit »SET default_transaction_isolation = 'repeatable read'« können Sie die Voreinstellung ändern." -#: storage/lmgr/predicate.c:1617 +#: storage/lmgr/predicate.c:1645 #, c-format msgid "a snapshot-importing transaction must not be READ ONLY DEFERRABLE" msgstr "eine Transaktion, die einen Snapshot importiert, must READ ONLY DEFERRABLE sein" -#: storage/lmgr/predicate.c:1695 utils/time/snapmgr.c:617 -#: utils/time/snapmgr.c:623 +#: storage/lmgr/predicate.c:1725 utils/time/snapmgr.c:621 +#: utils/time/snapmgr.c:627 #, c-format msgid "could not import the requested snapshot" msgstr "konnte den angeforderten Snapshot nicht importieren" -#: storage/lmgr/predicate.c:1696 utils/time/snapmgr.c:624 +#: storage/lmgr/predicate.c:1726 utils/time/snapmgr.c:628 #, c-format -msgid "The source transaction %u is not running anymore." -msgstr "Die Quelltransaktion %u läuft nicht mehr." +msgid "The source process with PID %d is not running anymore." +msgstr "Der Ausgangsprozess mit PID %d läuft nicht mehr." -#: storage/lmgr/predicate.c:2319 storage/lmgr/predicate.c:2334 -#: storage/lmgr/predicate.c:3726 +#: storage/lmgr/predicate.c:2356 storage/lmgr/predicate.c:2371 +#: storage/lmgr/predicate.c:3763 #, c-format msgid "You might need to increase max_pred_locks_per_transaction." msgstr "Sie müssen möglicherweise max_pred_locks_per_transaction erhöhen." -#: storage/lmgr/predicate.c:3880 storage/lmgr/predicate.c:3969 -#: storage/lmgr/predicate.c:3977 storage/lmgr/predicate.c:4016 -#: storage/lmgr/predicate.c:4255 storage/lmgr/predicate.c:4592 -#: storage/lmgr/predicate.c:4604 storage/lmgr/predicate.c:4646 -#: storage/lmgr/predicate.c:4684 +#: storage/lmgr/predicate.c:3917 storage/lmgr/predicate.c:4006 +#: storage/lmgr/predicate.c:4014 storage/lmgr/predicate.c:4053 +#: storage/lmgr/predicate.c:4292 storage/lmgr/predicate.c:4629 +#: storage/lmgr/predicate.c:4641 storage/lmgr/predicate.c:4683 +#: storage/lmgr/predicate.c:4721 #, c-format msgid "could not serialize access due to read/write dependencies among transactions" msgstr "konnte Zugriff nicht serialisieren wegen Lese-/Schreib-Abhängigkeiten zwischen Transaktionen" -#: storage/lmgr/predicate.c:3882 storage/lmgr/predicate.c:3971 -#: storage/lmgr/predicate.c:3979 storage/lmgr/predicate.c:4018 -#: storage/lmgr/predicate.c:4257 storage/lmgr/predicate.c:4594 -#: storage/lmgr/predicate.c:4606 storage/lmgr/predicate.c:4648 -#: storage/lmgr/predicate.c:4686 +#: storage/lmgr/predicate.c:3919 storage/lmgr/predicate.c:4008 +#: storage/lmgr/predicate.c:4016 storage/lmgr/predicate.c:4055 +#: storage/lmgr/predicate.c:4294 storage/lmgr/predicate.c:4631 +#: storage/lmgr/predicate.c:4643 storage/lmgr/predicate.c:4685 +#: storage/lmgr/predicate.c:4723 #, c-format msgid "The transaction might succeed if retried." msgstr "Die Transaktion könnte erfolgreich sein, wenn sie erneut versucht würde." -#: storage/lmgr/proc.c:1273 +#: storage/lmgr/proc.c:1309 #, c-format msgid "Process %d waits for %s on %s." msgstr "Prozess %d wartet auf %s-Sperre auf %s." -#: storage/lmgr/proc.c:1284 +#: storage/lmgr/proc.c:1320 #, c-format msgid "sending cancel to blocking autovacuum PID %d" msgstr "sende Stornierung an blockierende Autovacuum-PID %d" -#: storage/lmgr/proc.c:1302 utils/adt/misc.c:269 +#: storage/lmgr/proc.c:1338 utils/adt/misc.c:270 #, c-format msgid "could not send signal to process %d: %m" msgstr "konnte Signal nicht an Prozess %d senden: %m" -#: storage/lmgr/proc.c:1404 +#: storage/lmgr/proc.c:1440 #, c-format msgid "process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms" msgstr "Prozess %d vermied Verklemmung wegen %s-Sperre auf %s durch Umordnen der Queue nach %ld,%03d ms" -#: storage/lmgr/proc.c:1419 +#: storage/lmgr/proc.c:1455 #, c-format msgid "process %d detected deadlock while waiting for %s on %s after %ld.%03d ms" msgstr "Prozess %d hat Verklemmung festgestellt beim Warten auf %s-Sperre auf %s nach %ld,%03d ms" -#: storage/lmgr/proc.c:1428 +#: storage/lmgr/proc.c:1464 #, c-format msgid "process %d still waiting for %s on %s after %ld.%03d ms" msgstr "Prozess %d wartet immer noch auf %s-Sperre auf %s nach %ld,%03d ms" -#: storage/lmgr/proc.c:1435 +#: storage/lmgr/proc.c:1471 #, c-format msgid "process %d acquired %s on %s after %ld.%03d ms" msgstr "Prozess %d erlangte %s-Sperre auf %s nach %ld,%03d ms" -#: storage/lmgr/proc.c:1451 +#: storage/lmgr/proc.c:1487 #, c-format msgid "process %d failed to acquire %s on %s after %ld.%03d ms" msgstr "Prozess %d konnte %s-Sperre auf %s nach %ld,%03d ms nicht erlangen" @@ -18189,29 +19559,34 @@ msgstr "Prozess %d konnte %s-Sperre auf %s nach %ld,%03d ms nicht erlangen" msgid "page verification failed, calculated checksum %u but expected %u" msgstr "Seitenüberprüfung fehlgeschlagen, berechnete Prüfsumme %u, aber erwartet %u" -#: storage/page/bufpage.c:213 storage/page/bufpage.c:505 -#: storage/page/bufpage.c:748 storage/page/bufpage.c:881 -#: storage/page/bufpage.c:977 storage/page/bufpage.c:1087 +#: storage/page/bufpage.c:213 storage/page/bufpage.c:507 +#: storage/page/bufpage.c:744 storage/page/bufpage.c:877 +#: storage/page/bufpage.c:973 storage/page/bufpage.c:1083 #, c-format msgid "corrupted page pointers: lower = %u, upper = %u, special = %u" msgstr "verfälschte Seitenzeiger: lower = %u, upper = %u, special = %u" -#: storage/page/bufpage.c:549 +#: storage/page/bufpage.c:529 #, c-format msgid "corrupted item pointer: %u" msgstr "verfälschter Item-Zeiger: %u" -#: storage/page/bufpage.c:560 storage/page/bufpage.c:932 +#: storage/page/bufpage.c:556 storage/page/bufpage.c:928 #, c-format msgid "corrupted item lengths: total %u, available space %u" msgstr "verfälschte Item-Längen: gesamt %u, verfügbarer Platz %u" -#: storage/page/bufpage.c:767 storage/page/bufpage.c:905 -#: storage/page/bufpage.c:993 storage/page/bufpage.c:1103 +#: storage/page/bufpage.c:763 storage/page/bufpage.c:989 +#: storage/page/bufpage.c:1099 #, c-format msgid "corrupted item pointer: offset = %u, size = %u" msgstr "verfälschter Item-Zeiger: offset = %u, size = %u" +#: storage/page/bufpage.c:901 +#, c-format +msgid "corrupted item pointer: offset = %u, length = %u" +msgstr "verfälschter Item-Zeiger: offset = %u, length = %u" + #: storage/smgr/md.c:447 storage/smgr/md.c:973 #, c-format msgid "could not truncate file \"%s\": %m" @@ -18293,362 +19668,377 @@ msgstr "konnte Datei »%s« nicht öffnen (Zielblock %u): vorhergehendes Segment msgid "could not open file \"%s\" (target block %u): %m" msgstr "konnte Datei »%s« nicht öffnen (Zielblock %u): %m" -#: tcop/fastpath.c:111 tcop/fastpath.c:475 tcop/fastpath.c:605 +#: tcop/fastpath.c:109 tcop/fastpath.c:461 tcop/fastpath.c:591 #, c-format msgid "invalid argument size %d in function call message" msgstr "ungültige Argumentgröße %d in Funktionsaufruf-Message" -#: tcop/fastpath.c:291 tcop/postgres.c:999 tcop/postgres.c:1308 -#: tcop/postgres.c:1567 tcop/postgres.c:1972 tcop/postgres.c:2339 -#: tcop/postgres.c:2414 -#, c-format -msgid "current transaction is aborted, commands ignored until end of transaction block" -msgstr "aktuelle Transaktion wurde abgebrochen, Befehle werden bis zum Ende der Transaktion ignoriert" - -#: tcop/fastpath.c:319 +#: tcop/fastpath.c:307 #, c-format msgid "fastpath function call: \"%s\" (OID %u)" msgstr "Fastpath-Funktionsaufruf: »%s« (OID %u)" -#: tcop/fastpath.c:401 tcop/postgres.c:1170 tcop/postgres.c:1433 -#: tcop/postgres.c:1813 tcop/postgres.c:2030 +#: tcop/fastpath.c:389 tcop/postgres.c:1195 tcop/postgres.c:1459 +#: tcop/postgres.c:1841 tcop/postgres.c:2062 #, c-format msgid "duration: %s ms" msgstr "Dauer: %s ms" -#: tcop/fastpath.c:405 +#: tcop/fastpath.c:393 #, c-format msgid "duration: %s ms fastpath function call: \"%s\" (OID %u)" msgstr "Dauer: %s ms Fastpath-Funktionsaufruf: »%s« (OID %u)" -#: tcop/fastpath.c:443 tcop/fastpath.c:570 +#: tcop/fastpath.c:429 tcop/fastpath.c:556 #, c-format msgid "function call message contains %d arguments but function requires %d" msgstr "Funktionsaufruf-Message enthält %d Argumente, aber Funktion benötigt %d" -#: tcop/fastpath.c:451 +#: tcop/fastpath.c:437 #, c-format msgid "function call message contains %d argument formats but %d arguments" msgstr "Funktionsaufruf-Message enthält %d Argumentformate aber %d Argumente" -#: tcop/fastpath.c:538 tcop/fastpath.c:621 +#: tcop/fastpath.c:524 tcop/fastpath.c:607 #, c-format msgid "incorrect binary data format in function argument %d" msgstr "falsches Binärdatenformat in Funktionsargument %d" -#: tcop/postgres.c:352 tcop/postgres.c:388 tcop/postgres.c:415 +#: tcop/postgres.c:359 tcop/postgres.c:395 tcop/postgres.c:422 #, c-format msgid "unexpected EOF on client connection" msgstr "unerwartetes EOF auf Client-Verbindung" -#: tcop/postgres.c:438 tcop/postgres.c:450 tcop/postgres.c:461 -#: tcop/postgres.c:473 tcop/postgres.c:4304 +#: tcop/postgres.c:445 tcop/postgres.c:457 tcop/postgres.c:468 +#: tcop/postgres.c:480 tcop/postgres.c:4379 #, c-format msgid "invalid frontend message type %d" msgstr "ungültiger Frontend-Message-Typ %d" -#: tcop/postgres.c:940 +#: tcop/postgres.c:950 #, c-format msgid "statement: %s" msgstr "Anweisung: %s" -#: tcop/postgres.c:1175 +#: tcop/postgres.c:1200 #, c-format msgid "duration: %s ms statement: %s" msgstr "Dauer: %s ms Anweisung: %s" -#: tcop/postgres.c:1225 +#: tcop/postgres.c:1250 #, c-format msgid "parse %s: %s" msgstr "Parsen %s: %s" -#: tcop/postgres.c:1281 +#: tcop/postgres.c:1307 #, c-format msgid "cannot insert multiple commands into a prepared statement" msgstr "kann nicht mehrere Befehle in vorbereitete Anweisung einfügen" -#: tcop/postgres.c:1438 +#: tcop/postgres.c:1464 #, c-format msgid "duration: %s ms parse %s: %s" msgstr "Dauer: %s ms Parsen %s: %s" -#: tcop/postgres.c:1483 +#: tcop/postgres.c:1509 #, c-format msgid "bind %s to %s" msgstr "Binden %s an %s" -#: tcop/postgres.c:1502 tcop/postgres.c:2320 +#: tcop/postgres.c:1528 tcop/postgres.c:2354 #, c-format msgid "unnamed prepared statement does not exist" msgstr "unbenannte vorbereitete Anweisung existiert nicht" -#: tcop/postgres.c:1544 +#: tcop/postgres.c:1571 #, c-format msgid "bind message has %d parameter formats but %d parameters" msgstr "Binden-Nachricht hat %d Parameterformate aber %d Parameter" -#: tcop/postgres.c:1550 +#: tcop/postgres.c:1577 #, c-format msgid "bind message supplies %d parameters, but prepared statement \"%s\" requires %d" msgstr "Binden-Nachricht enthält %d Parameter, aber vorbereitete Anweisung »%s« erfordert %d" -#: tcop/postgres.c:1720 +#: tcop/postgres.c:1748 #, c-format msgid "incorrect binary data format in bind parameter %d" msgstr "falsches Binärdatenformat in Binden-Parameter %d" -#: tcop/postgres.c:1818 +#: tcop/postgres.c:1846 #, c-format msgid "duration: %s ms bind %s%s%s: %s" msgstr "Dauer: %s ms Binden %s%s%s: %s" -#: tcop/postgres.c:1866 tcop/postgres.c:2400 +#: tcop/postgres.c:1894 tcop/postgres.c:2438 #, c-format msgid "portal \"%s\" does not exist" msgstr "Portal »%s« existiert nicht" -#: tcop/postgres.c:1951 +#: tcop/postgres.c:1979 #, c-format msgid "%s %s%s%s: %s" msgstr "%s %s%s%s: %s" -#: tcop/postgres.c:1953 tcop/postgres.c:2038 +#: tcop/postgres.c:1981 tcop/postgres.c:2070 msgid "execute fetch from" msgstr "Ausführen Fetch von" -#: tcop/postgres.c:1954 tcop/postgres.c:2039 +#: tcop/postgres.c:1982 tcop/postgres.c:2071 msgid "execute" msgstr "Ausführen" -#: tcop/postgres.c:2035 +#: tcop/postgres.c:2067 #, c-format msgid "duration: %s ms %s %s%s%s: %s" msgstr "Dauer: %s ms %s %s%s%s: %s" -#: tcop/postgres.c:2161 +#: tcop/postgres.c:2193 #, c-format msgid "prepare: %s" msgstr "Vorbereiten: %s" -#: tcop/postgres.c:2224 +#: tcop/postgres.c:2259 #, c-format msgid "parameters: %s" msgstr "Parameter: %s" -#: tcop/postgres.c:2243 +#: tcop/postgres.c:2278 #, c-format msgid "abort reason: recovery conflict" msgstr "Abbruchgrund: Konflikt bei Wiederherstellung" -#: tcop/postgres.c:2259 +#: tcop/postgres.c:2294 #, c-format msgid "User was holding shared buffer pin for too long." msgstr "Benutzer hat Shared-Buffer-Pin zu lange gehalten." -#: tcop/postgres.c:2262 +#: tcop/postgres.c:2297 #, c-format msgid "User was holding a relation lock for too long." msgstr "Benutzer hat Relationssperre zu lange gehalten." -#: tcop/postgres.c:2265 +#: tcop/postgres.c:2300 #, c-format msgid "User was or might have been using tablespace that must be dropped." msgstr "Benutzer hat (möglicherweise) einen Tablespace verwendet, der gelöscht werden muss." -#: tcop/postgres.c:2268 +#: tcop/postgres.c:2303 #, c-format msgid "User query might have needed to see row versions that must be removed." msgstr "Benutzeranfrage hat möglicherweise Zeilenversionen sehen müssen, die entfernt werden müssen." -#: tcop/postgres.c:2274 +#: tcop/postgres.c:2309 #, c-format msgid "User was connected to a database that must be dropped." msgstr "Benutzer war mit einer Datenbank verbunden, die gelöscht werden muss." -#: tcop/postgres.c:2583 +#: tcop/postgres.c:2624 #, c-format msgid "terminating connection because of crash of another server process" msgstr "Verbindung wird abgebrochen wegen Absturz eines anderen Serverprozesses" -#: tcop/postgres.c:2584 +#: tcop/postgres.c:2625 #, c-format msgid "The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory." msgstr "Der Postmaster hat diesen Serverprozess angewiesen, die aktuelle Transaktion zurückzurollen und die Sitzung zu beenden, weil ein anderer Serverprozess abnormal beendet wurde und möglicherweise das Shared Memory verfälscht hat." -#: tcop/postgres.c:2588 tcop/postgres.c:2892 +#: tcop/postgres.c:2629 tcop/postgres.c:2957 #, c-format msgid "In a moment you should be able to reconnect to the database and repeat your command." msgstr "In einem Moment sollten Sie wieder mit der Datenbank verbinden und Ihren Befehl wiederholen können." -#: tcop/postgres.c:2674 +#: tcop/postgres.c:2715 #, c-format msgid "floating-point exception" msgstr "Fließkommafehler" -#: tcop/postgres.c:2675 +#: tcop/postgres.c:2716 #, c-format msgid "An invalid floating-point operation was signaled. This probably means an out-of-range result or an invalid operation, such as division by zero." msgstr "Eine ungültige Fließkommaoperation wurde signalisiert. Das bedeutet wahrscheinlich ein Ergebnis außerhalb des gültigen Bereichs oder eine ungültige Operation, zum Beispiel Division durch null." -#: tcop/postgres.c:2837 +#: tcop/postgres.c:2887 #, c-format msgid "canceling authentication due to timeout" msgstr "storniere Authentifizierung wegen Zeitüberschreitung" -#: tcop/postgres.c:2841 +#: tcop/postgres.c:2891 #, c-format msgid "terminating autovacuum process due to administrator command" msgstr "Autovacuum-Prozess wird abgebrochen aufgrund von Anweisung des Administrators" -#: tcop/postgres.c:2847 tcop/postgres.c:2857 tcop/postgres.c:2890 +#: tcop/postgres.c:2895 +#, c-format +msgid "terminating logical replication worker due to administrator command" +msgstr "Arbeitsprozess für logische Replikation wird abgebrochen aufgrund von Anweisung des Administrators" + +#: tcop/postgres.c:2899 +#, c-format +msgid "logical replication launcher shutting down" +msgstr "Logical-Replication-Launcher fährt herunter" + +#: tcop/postgres.c:2912 tcop/postgres.c:2922 tcop/postgres.c:2955 #, c-format msgid "terminating connection due to conflict with recovery" msgstr "Verbindung wird abgebrochen wegen Konflikt mit der Wiederherstellung" -#: tcop/postgres.c:2863 +#: tcop/postgres.c:2928 #, c-format msgid "terminating connection due to administrator command" msgstr "Verbindung wird abgebrochen aufgrund von Anweisung des Administrators" -#: tcop/postgres.c:2873 +#: tcop/postgres.c:2938 #, c-format msgid "connection to client lost" msgstr "Verbindung zum Client wurde verloren" -#: tcop/postgres.c:2941 +#: tcop/postgres.c:3004 #, c-format msgid "canceling statement due to lock timeout" msgstr "storniere Anfrage wegen Zeitüberschreitung einer Sperre" -#: tcop/postgres.c:2948 +#: tcop/postgres.c:3011 #, c-format msgid "canceling statement due to statement timeout" msgstr "storniere Anfrage wegen Zeitüberschreitung der Anfrage" -#: tcop/postgres.c:2955 +#: tcop/postgres.c:3018 #, c-format msgid "canceling autovacuum task" msgstr "storniere Autovacuum-Aufgabe" -#: tcop/postgres.c:2978 +#: tcop/postgres.c:3041 #, c-format msgid "canceling statement due to user request" msgstr "storniere Anfrage wegen Benutzeraufforderung" -#: tcop/postgres.c:2988 +#: tcop/postgres.c:3051 #, c-format msgid "terminating connection due to idle-in-transaction timeout" msgstr "Verbindung wird abgebrochen wegen Zeitüberschreitung in inaktiver Transaktion" -#: tcop/postgres.c:3102 +#: tcop/postgres.c:3165 #, c-format msgid "stack depth limit exceeded" msgstr "Grenze für Stacktiefe überschritten" -#: tcop/postgres.c:3103 +#: tcop/postgres.c:3166 #, c-format msgid "Increase the configuration parameter \"max_stack_depth\" (currently %dkB), after ensuring the platform's stack depth limit is adequate." msgstr "Erhöhen Sie den Konfigurationsparameter »max_stack_depth« (aktuell %dkB), nachdem Sie sichergestellt haben, dass die Stacktiefenbegrenzung Ihrer Plattform ausreichend ist." -#: tcop/postgres.c:3166 +#: tcop/postgres.c:3229 #, c-format msgid "\"max_stack_depth\" must not exceed %ldkB." msgstr "»max_stack_depth« darf %ldkB nicht überschreiten." -#: tcop/postgres.c:3168 +#: tcop/postgres.c:3231 #, c-format msgid "Increase the platform's stack depth limit via \"ulimit -s\" or local equivalent." msgstr "Erhöhen Sie die Stacktiefenbegrenzung Ihrer Plattform mit »ulimit -s« oder der lokalen Entsprechung." -#: tcop/postgres.c:3528 +#: tcop/postgres.c:3591 #, c-format msgid "invalid command-line argument for server process: %s" msgstr "ungültiges Kommandozeilenargument für Serverprozess: %s" -#: tcop/postgres.c:3529 tcop/postgres.c:3535 +#: tcop/postgres.c:3592 tcop/postgres.c:3598 #, c-format msgid "Try \"%s --help\" for more information." msgstr "Versuchen Sie »%s --help« für weitere Informationen." -#: tcop/postgres.c:3533 +#: tcop/postgres.c:3596 #, c-format msgid "%s: invalid command-line argument: %s" msgstr "%s: ungültiges Kommandozeilenargument: %s" -#: tcop/postgres.c:3595 +#: tcop/postgres.c:3658 #, c-format msgid "%s: no database nor user name specified" msgstr "%s: weder Datenbankname noch Benutzername angegeben" -#: tcop/postgres.c:4212 +#: tcop/postgres.c:4287 #, c-format msgid "invalid CLOSE message subtype %d" msgstr "ungültiger Subtyp %d von CLOSE-Message" -#: tcop/postgres.c:4247 +#: tcop/postgres.c:4322 #, c-format msgid "invalid DESCRIBE message subtype %d" msgstr "ungültiger Subtyp %d von DESCRIBE-Message" -#: tcop/postgres.c:4325 +#: tcop/postgres.c:4400 #, c-format msgid "fastpath function calls not supported in a replication connection" msgstr "Fastpath-Funktionsaufrufe werden auf einer Replikationsverbindung nicht unterstützt" -#: tcop/postgres.c:4329 +#: tcop/postgres.c:4404 #, c-format msgid "extended query protocol not supported in a replication connection" msgstr "erweitertes Anfrageprotokoll wird nicht auf einer Replikationsverbindung unterstützt" -#: tcop/postgres.c:4499 +#: tcop/postgres.c:4581 #, c-format msgid "disconnection: session time: %d:%02d:%02d.%03d user=%s database=%s host=%s%s%s" msgstr "Verbindungsende: Sitzungszeit: %d:%02d:%02d.%03d Benutzer=%s Datenbank=%s Host=%s%s%s" -#: tcop/pquery.c:638 +#: tcop/pquery.c:645 #, c-format msgid "bind message has %d result formats but query has %d columns" msgstr "Bind-Message hat %d Ergebnisspalten, aber Anfrage hat %d Spalten" -#: tcop/pquery.c:940 +#: tcop/pquery.c:952 #, c-format msgid "cursor can only scan forward" msgstr "Cursor kann nur vorwärts scannen" -#: tcop/pquery.c:941 +#: tcop/pquery.c:953 #, c-format msgid "Declare it with SCROLL option to enable backward scan." msgstr "Deklarieren Sie ihn mit der Option SCROLL, um rückwarts scannen zu können." #. translator: %s is name of a SQL command, eg CREATE -#: tcop/utility.c:241 +#: tcop/utility.c:245 #, c-format msgid "cannot execute %s in a read-only transaction" msgstr "%s kann nicht in einer Read-Only-Transaktion ausgeführt werden" #. translator: %s is name of a SQL command, eg CREATE -#: tcop/utility.c:259 +#: tcop/utility.c:263 #, c-format msgid "cannot execute %s during a parallel operation" msgstr "%s kann nicht während einer parallelen Operation ausgeführt werden" #. translator: %s is name of a SQL command, eg CREATE -#: tcop/utility.c:278 +#: tcop/utility.c:282 #, c-format msgid "cannot execute %s during recovery" msgstr "%s kann nicht während der Wiederherstellung ausgeführt werden" #. translator: %s is name of a SQL command, eg PREPARE -#: tcop/utility.c:296 +#: tcop/utility.c:300 #, c-format msgid "cannot execute %s within security-restricted operation" msgstr "kann %s nicht in einer sicherheitsbeschränkten Operation ausführen" -#: tcop/utility.c:759 +#: tcop/utility.c:757 #, c-format msgid "must be superuser to do CHECKPOINT" msgstr "nur Superuser können CHECKPOINT ausführen" +#: tcop/utility.c:1338 +#, fuzzy, c-format +#| msgid "cannot create index on partitioned table \"%s\"" +msgid "cannot create index on partitioned table \"%s\"" +msgstr "kann keinen Index für partitionierte Tabelle »%s« erzeugen" + +#: tcop/utility.c:1340 +#, fuzzy, c-format +#| msgid "\"%s\" is not a foreign table" +msgid "Table \"%s\" contains partitions that are foreign tables." +msgstr "»%s« ist keine Fremdtabelle" + #: tsearch/dict_ispell.c:52 tsearch/dict_thesaurus.c:624 #, c-format msgid "multiple DictFile parameters" @@ -18800,12 +20190,12 @@ msgstr "konnte Wörterbuchdatei »%s« nicht öffnen: %m" msgid "invalid regular expression: %s" msgstr "ungültiger regulärer Ausdruck: %s" -#: tsearch/spell.c:1161 tsearch/spell.c:1721 +#: tsearch/spell.c:1161 tsearch/spell.c:1726 #, c-format msgid "invalid affix alias \"%s\"" msgstr "ungültiges Affixalias »%s«" -#: tsearch/spell.c:1211 tsearch/spell.c:1282 tsearch/spell.c:1426 +#: tsearch/spell.c:1211 tsearch/spell.c:1282 tsearch/spell.c:1431 #, c-format msgid "could not open affix file \"%s\": %m" msgstr "konnte Affixdatei »%s« nicht öffnen: %m" @@ -18820,23 +20210,28 @@ msgstr "Ispell-Wörterbuch unterstützt nur die Flag-Werte »default«, »long« msgid "invalid number of flag vector aliases" msgstr "ungültige Anzahl Flag-Vektor-Aliasse" -#: tsearch/spell.c:1542 +#: tsearch/spell.c:1332 +#, fuzzy, c-format +#| msgid "number of aliases does not match number of columns" +msgid "number of aliases exceeds specified number %d" +msgstr "Anzahl der Aliasnamen stimmt nicht mit der Anzahl der Spalten überein" + +#: tsearch/spell.c:1547 #, c-format msgid "affix file contains both old-style and new-style commands" msgstr "Affixdatei enthält Befehle im alten und im neuen Stil" -#: tsearch/to_tsany.c:170 utils/adt/tsvector.c:271 -#: utils/adt/tsvector_op.c:1134 +#: tsearch/to_tsany.c:185 utils/adt/tsvector.c:271 utils/adt/tsvector_op.c:1134 #, c-format msgid "string is too long for tsvector (%d bytes, max %d bytes)" msgstr "Zeichenkette ist zu lang für tsvector (%d Bytes, maximal %d Bytes)" -#: tsearch/ts_locale.c:177 +#: tsearch/ts_locale.c:174 #, c-format msgid "line %d of configuration file \"%s\": \"%s\"" msgstr "Zeile %d in Konfigurationsdatei »%s«: »%s«" -#: tsearch/ts_locale.c:299 +#: tsearch/ts_locale.c:291 #, c-format msgid "conversion from wchar_t to server encoding failed: %m" msgstr "Umwandlung von wchar_t in Serverkodierung fehlgeschlagen: %m" @@ -18863,458 +20258,463 @@ msgstr "ungültiger Textsuchekonfigurationsdateiname »%s«" msgid "could not open stop-word file \"%s\": %m" msgstr "konnte Stoppwortdatei »%s« nicht öffnen: %m" -#: tsearch/wparser.c:307 +#: tsearch/wparser.c:322 tsearch/wparser.c:410 tsearch/wparser.c:487 #, c-format msgid "text search parser does not support headline creation" msgstr "Textsucheparser unterstützt das Erzeugen von Headlines nicht" -#: tsearch/wparser_def.c:2583 +#: tsearch/wparser_def.c:2486 #, c-format msgid "unrecognized headline parameter: \"%s\"" msgstr "unbekannter Headline-Parameter: »%s«" -#: tsearch/wparser_def.c:2592 +#: tsearch/wparser_def.c:2495 #, c-format msgid "MinWords should be less than MaxWords" msgstr "»MinWords« sollte kleiner als »MaxWords« sein" -#: tsearch/wparser_def.c:2596 +#: tsearch/wparser_def.c:2499 #, c-format msgid "MinWords should be positive" msgstr "»MinWords« sollte positiv sein" -#: tsearch/wparser_def.c:2600 +#: tsearch/wparser_def.c:2503 #, c-format msgid "ShortWord should be >= 0" msgstr "»ShortWord« sollte >= 0 sein" -#: tsearch/wparser_def.c:2604 +#: tsearch/wparser_def.c:2507 #, c-format msgid "MaxFragments should be >= 0" msgstr "»MaxFragments« sollte >= 0 sein" -#: utils/adt/acl.c:170 utils/adt/name.c:91 +#: utils/adt/acl.c:171 utils/adt/name.c:91 #, c-format msgid "identifier too long" msgstr "Bezeichner zu lang" -#: utils/adt/acl.c:171 utils/adt/name.c:92 +#: utils/adt/acl.c:172 utils/adt/name.c:92 #, c-format msgid "Identifier must be less than %d characters." msgstr "Bezeichner muss weniger als %d Zeichen haben." -#: utils/adt/acl.c:257 +#: utils/adt/acl.c:258 #, c-format msgid "unrecognized key word: \"%s\"" msgstr "unbekanntes Schlüsselwort: »%s«" -#: utils/adt/acl.c:258 +#: utils/adt/acl.c:259 #, c-format msgid "ACL key word must be \"group\" or \"user\"." msgstr "ACL-Schlüsselwort muss »group« oder »user« sein." -#: utils/adt/acl.c:263 +#: utils/adt/acl.c:264 #, c-format msgid "missing name" msgstr "Name fehlt" -#: utils/adt/acl.c:264 +#: utils/adt/acl.c:265 #, c-format msgid "A name must follow the \"group\" or \"user\" key word." msgstr "Auf das Schlüsselwort »group« oder »user« muss ein Name folgen." -#: utils/adt/acl.c:270 +#: utils/adt/acl.c:271 #, c-format msgid "missing \"=\" sign" msgstr "»=«-Zeichen fehlt" -#: utils/adt/acl.c:323 +#: utils/adt/acl.c:324 #, c-format msgid "invalid mode character: must be one of \"%s\"" msgstr "ungültiges Moduszeichen: muss eines aus »%s« sein" -#: utils/adt/acl.c:345 +#: utils/adt/acl.c:346 #, c-format msgid "a name must follow the \"/\" sign" msgstr "auf das »/«-Zeichen muss ein Name folgen" -#: utils/adt/acl.c:353 +#: utils/adt/acl.c:354 #, c-format msgid "defaulting grantor to user ID %u" msgstr "nicht angegebener Grantor wird auf user ID %u gesetzt" -#: utils/adt/acl.c:544 +#: utils/adt/acl.c:545 #, c-format msgid "ACL array contains wrong data type" msgstr "ACL-Array enthält falschen Datentyp" -#: utils/adt/acl.c:548 +#: utils/adt/acl.c:549 #, c-format msgid "ACL arrays must be one-dimensional" msgstr "ACL-Arrays müssen eindimensional sein" -#: utils/adt/acl.c:552 +#: utils/adt/acl.c:553 #, c-format msgid "ACL arrays must not contain null values" msgstr "ACL-Array darf keine NULL-Werte enthalten" -#: utils/adt/acl.c:576 +#: utils/adt/acl.c:577 #, c-format msgid "extra garbage at the end of the ACL specification" msgstr "überflüssiger Müll am Ende der ACL-Angabe" -#: utils/adt/acl.c:1196 +#: utils/adt/acl.c:1213 #, c-format msgid "grant options cannot be granted back to your own grantor" msgstr "Grant-Optionen können nicht an den eigenen Grantor gegeben werden" -#: utils/adt/acl.c:1257 +#: utils/adt/acl.c:1274 #, c-format msgid "dependent privileges exist" msgstr "abhängige Privilegien existieren" -#: utils/adt/acl.c:1258 +#: utils/adt/acl.c:1275 #, c-format msgid "Use CASCADE to revoke them too." msgstr "Verwenden Sie CASCADE, um diese auch zu entziehen." -#: utils/adt/acl.c:1520 +#: utils/adt/acl.c:1537 #, c-format msgid "aclinsert is no longer supported" msgstr "aclinsert wird nicht mehr unterstützt" -#: utils/adt/acl.c:1530 +#: utils/adt/acl.c:1547 #, c-format msgid "aclremove is no longer supported" msgstr "aclremove wird nicht mehr unterstützt" -#: utils/adt/acl.c:1616 utils/adt/acl.c:1670 +#: utils/adt/acl.c:1633 utils/adt/acl.c:1687 #, c-format msgid "unrecognized privilege type: \"%s\"" msgstr "unbekannter Privilegtyp: »%s«" -#: utils/adt/acl.c:3410 utils/adt/regproc.c:125 utils/adt/regproc.c:146 -#: utils/adt/regproc.c:321 +#: utils/adt/acl.c:3430 utils/adt/regproc.c:102 utils/adt/regproc.c:277 #, c-format msgid "function \"%s\" does not exist" msgstr "Funktion »%s« existiert nicht" -#: utils/adt/acl.c:4864 +#: utils/adt/acl.c:4884 #, c-format msgid "must be member of role \"%s\"" msgstr "Berechtigung nur für Mitglied von Rolle »%s«" -#: utils/adt/array_expanded.c:274 utils/adt/arrayfuncs.c:931 -#: utils/adt/arrayfuncs.c:1519 utils/adt/arrayfuncs.c:3251 -#: utils/adt/arrayfuncs.c:3389 utils/adt/arrayfuncs.c:5848 -#: utils/adt/arrayfuncs.c:6159 utils/adt/arrayutils.c:93 +#: utils/adt/array_expanded.c:274 utils/adt/arrayfuncs.c:932 +#: utils/adt/arrayfuncs.c:1520 utils/adt/arrayfuncs.c:3223 +#: utils/adt/arrayfuncs.c:3363 utils/adt/arrayfuncs.c:5898 +#: utils/adt/arrayfuncs.c:6209 utils/adt/arrayutils.c:93 #: utils/adt/arrayutils.c:102 utils/adt/arrayutils.c:109 #, c-format msgid "array size exceeds the maximum allowed (%d)" msgstr "Arraygröße überschreitet erlaubtes Maximum (%d)" -#: utils/adt/array_userfuncs.c:79 utils/adt/array_userfuncs.c:541 -#: utils/adt/array_userfuncs.c:621 utils/adt/json.c:1764 utils/adt/json.c:1859 -#: utils/adt/json.c:1897 utils/adt/jsonb.c:1127 utils/adt/jsonb.c:1156 -#: utils/adt/jsonb.c:1592 utils/adt/jsonb.c:1756 utils/adt/jsonb.c:1766 +#: utils/adt/array_userfuncs.c:80 utils/adt/array_userfuncs.c:466 +#: utils/adt/array_userfuncs.c:546 utils/adt/json.c:1829 utils/adt/json.c:1924 +#: utils/adt/json.c:1962 utils/adt/jsonb.c:1083 utils/adt/jsonb.c:1112 +#: utils/adt/jsonb.c:1504 utils/adt/jsonb.c:1668 utils/adt/jsonb.c:1678 #, c-format msgid "could not determine input data type" msgstr "konnte Eingabedatentypen nicht bestimmen" -#: utils/adt/array_userfuncs.c:84 +#: utils/adt/array_userfuncs.c:85 #, c-format msgid "input data type is not an array" msgstr "Eingabedatentyp ist kein Array" -#: utils/adt/array_userfuncs.c:132 utils/adt/array_userfuncs.c:186 -#: utils/adt/arrayfuncs.c:1322 utils/adt/float.c:1228 utils/adt/float.c:1287 -#: utils/adt/float.c:3556 utils/adt/float.c:3572 utils/adt/int.c:608 -#: utils/adt/int.c:637 utils/adt/int.c:658 utils/adt/int.c:689 -#: utils/adt/int.c:722 utils/adt/int.c:744 utils/adt/int.c:892 -#: utils/adt/int.c:913 utils/adt/int.c:940 utils/adt/int.c:980 -#: utils/adt/int.c:1001 utils/adt/int.c:1028 utils/adt/int.c:1061 -#: utils/adt/int.c:1144 utils/adt/int8.c:1298 utils/adt/numeric.c:2953 -#: utils/adt/numeric.c:2962 utils/adt/varbit.c:1173 utils/adt/varbit.c:1575 -#: utils/adt/varlena.c:1056 utils/adt/varlena.c:2808 +#: utils/adt/array_userfuncs.c:129 utils/adt/array_userfuncs.c:181 +#: utils/adt/arrayfuncs.c:1323 utils/adt/float.c:1363 utils/adt/float.c:1422 +#: utils/adt/float.c:3708 utils/adt/float.c:3722 utils/adt/int.c:755 +#: utils/adt/int.c:777 utils/adt/int.c:791 utils/adt/int.c:805 +#: utils/adt/int.c:836 utils/adt/int.c:857 utils/adt/int.c:974 +#: utils/adt/int.c:988 utils/adt/int.c:1002 utils/adt/int.c:1035 +#: utils/adt/int.c:1049 utils/adt/int.c:1063 utils/adt/int.c:1094 +#: utils/adt/int.c:1176 utils/adt/int8.c:1164 utils/adt/numeric.c:3117 +#: utils/adt/numeric.c:3126 utils/adt/varbit.c:1173 utils/adt/varbit.c:1575 +#: utils/adt/varlena.c:1053 utils/adt/varlena.c:2983 #, c-format msgid "integer out of range" msgstr "integer ist außerhalb des gültigen Bereichs" -#: utils/adt/array_userfuncs.c:139 utils/adt/array_userfuncs.c:196 +#: utils/adt/array_userfuncs.c:136 utils/adt/array_userfuncs.c:191 #, c-format msgid "argument must be empty or one-dimensional array" msgstr "Argument muss entweder leer oder ein eindimensionales Array sein" -#: utils/adt/array_userfuncs.c:278 utils/adt/array_userfuncs.c:317 -#: utils/adt/array_userfuncs.c:354 utils/adt/array_userfuncs.c:383 -#: utils/adt/array_userfuncs.c:411 +#: utils/adt/array_userfuncs.c:273 utils/adt/array_userfuncs.c:312 +#: utils/adt/array_userfuncs.c:349 utils/adt/array_userfuncs.c:378 +#: utils/adt/array_userfuncs.c:406 #, c-format msgid "cannot concatenate incompatible arrays" msgstr "inkompatible Arrays können nicht aneinandergehängt werden" -#: utils/adt/array_userfuncs.c:279 +#: utils/adt/array_userfuncs.c:274 #, c-format msgid "Arrays with element types %s and %s are not compatible for concatenation." msgstr "Arrays mit Elementtypen %s und %s sind nicht kompatibel für Aneinanderhängen." -#: utils/adt/array_userfuncs.c:318 +#: utils/adt/array_userfuncs.c:313 #, c-format msgid "Arrays of %d and %d dimensions are not compatible for concatenation." msgstr "Arrays mit %d und %d Dimensionen sind nicht kompatibel für Aneinanderhängen." -#: utils/adt/array_userfuncs.c:355 +#: utils/adt/array_userfuncs.c:350 #, c-format msgid "Arrays with differing element dimensions are not compatible for concatenation." msgstr "Arrays mit unterschiedlichen Elementdimensionen sind nicht kompatibel für Aneinanderhängen." -#: utils/adt/array_userfuncs.c:384 utils/adt/array_userfuncs.c:412 +#: utils/adt/array_userfuncs.c:379 utils/adt/array_userfuncs.c:407 #, c-format msgid "Arrays with differing dimensions are not compatible for concatenation." msgstr "Arrays mit unterschiedlichen Dimensionen sind nicht kompatibel für Aneinanderhängen." -#: utils/adt/array_userfuncs.c:480 utils/adt/arrayfuncs.c:1284 -#: utils/adt/arrayfuncs.c:3357 utils/adt/arrayfuncs.c:5754 -#, c-format -msgid "invalid number of dimensions: %d" -msgstr "ungültige Anzahl Dimensionen: %d" - -#: utils/adt/array_userfuncs.c:737 utils/adt/array_userfuncs.c:889 +#: utils/adt/array_userfuncs.c:662 utils/adt/array_userfuncs.c:814 #, c-format msgid "searching for elements in multidimensional arrays is not supported" msgstr "Suche nach Elementen in mehrdimensionalen Arrays wird nicht unterstützt" -#: utils/adt/array_userfuncs.c:761 +#: utils/adt/array_userfuncs.c:686 #, c-format msgid "initial position must not be null" msgstr "Startposition darf nicht NULL sein" -#: utils/adt/arrayfuncs.c:268 utils/adt/arrayfuncs.c:282 -#: utils/adt/arrayfuncs.c:293 utils/adt/arrayfuncs.c:315 -#: utils/adt/arrayfuncs.c:330 utils/adt/arrayfuncs.c:344 -#: utils/adt/arrayfuncs.c:350 utils/adt/arrayfuncs.c:357 -#: utils/adt/arrayfuncs.c:488 utils/adt/arrayfuncs.c:504 -#: utils/adt/arrayfuncs.c:515 utils/adt/arrayfuncs.c:530 -#: utils/adt/arrayfuncs.c:551 utils/adt/arrayfuncs.c:581 -#: utils/adt/arrayfuncs.c:588 utils/adt/arrayfuncs.c:596 -#: utils/adt/arrayfuncs.c:630 utils/adt/arrayfuncs.c:653 -#: utils/adt/arrayfuncs.c:673 utils/adt/arrayfuncs.c:785 -#: utils/adt/arrayfuncs.c:794 utils/adt/arrayfuncs.c:824 -#: utils/adt/arrayfuncs.c:839 utils/adt/arrayfuncs.c:892 +#: utils/adt/arrayfuncs.c:269 utils/adt/arrayfuncs.c:283 +#: utils/adt/arrayfuncs.c:294 utils/adt/arrayfuncs.c:316 +#: utils/adt/arrayfuncs.c:331 utils/adt/arrayfuncs.c:345 +#: utils/adt/arrayfuncs.c:351 utils/adt/arrayfuncs.c:358 +#: utils/adt/arrayfuncs.c:489 utils/adt/arrayfuncs.c:505 +#: utils/adt/arrayfuncs.c:516 utils/adt/arrayfuncs.c:531 +#: utils/adt/arrayfuncs.c:552 utils/adt/arrayfuncs.c:582 +#: utils/adt/arrayfuncs.c:589 utils/adt/arrayfuncs.c:597 +#: utils/adt/arrayfuncs.c:631 utils/adt/arrayfuncs.c:654 +#: utils/adt/arrayfuncs.c:674 utils/adt/arrayfuncs.c:786 +#: utils/adt/arrayfuncs.c:795 utils/adt/arrayfuncs.c:825 +#: utils/adt/arrayfuncs.c:840 utils/adt/arrayfuncs.c:893 #, c-format msgid "malformed array literal: \"%s\"" msgstr "fehlerhafte Arraykonstante: »%s«" -#: utils/adt/arrayfuncs.c:269 +#: utils/adt/arrayfuncs.c:270 #, c-format msgid "\"[\" must introduce explicitly-specified array dimensions." msgstr "Auf »[« müssen explizit angegebene Array-Dimensionen folgen." -#: utils/adt/arrayfuncs.c:283 +#: utils/adt/arrayfuncs.c:284 #, c-format msgid "Missing array dimension value." msgstr "Dimensionswert fehlt." -#: utils/adt/arrayfuncs.c:294 utils/adt/arrayfuncs.c:331 +#: utils/adt/arrayfuncs.c:295 utils/adt/arrayfuncs.c:332 #, c-format msgid "Missing \"%s\" after array dimensions." msgstr "»%s« fehlt nach Arraydimensionen." -#: utils/adt/arrayfuncs.c:303 utils/adt/arrayfuncs.c:2870 -#: utils/adt/arrayfuncs.c:2902 utils/adt/arrayfuncs.c:2917 +#: utils/adt/arrayfuncs.c:304 utils/adt/arrayfuncs.c:2871 +#: utils/adt/arrayfuncs.c:2903 utils/adt/arrayfuncs.c:2918 #, c-format msgid "upper bound cannot be less than lower bound" msgstr "Obergrenze kann nicht kleiner als Untergrenze sein" -#: utils/adt/arrayfuncs.c:316 +#: utils/adt/arrayfuncs.c:317 #, c-format msgid "Array value must start with \"{\" or dimension information." msgstr "Arraywert muss mit »{« oder Dimensionsinformationen anfangen." -#: utils/adt/arrayfuncs.c:345 +#: utils/adt/arrayfuncs.c:346 #, c-format msgid "Array contents must start with \"{\"." msgstr "Array-Inhalt muss mit {« anfangen." -#: utils/adt/arrayfuncs.c:351 utils/adt/arrayfuncs.c:358 +#: utils/adt/arrayfuncs.c:352 utils/adt/arrayfuncs.c:359 #, c-format msgid "Specified array dimensions do not match array contents." msgstr "Angegebene Array-Dimensionen stimmen nicht mit dem Array-Inhalt überein." -#: utils/adt/arrayfuncs.c:489 utils/adt/arrayfuncs.c:516 -#: utils/adt/rangetypes.c:2114 utils/adt/rangetypes.c:2122 -#: utils/adt/rowtypes.c:208 utils/adt/rowtypes.c:216 +#: utils/adt/arrayfuncs.c:490 utils/adt/arrayfuncs.c:517 +#: utils/adt/rangetypes.c:2178 utils/adt/rangetypes.c:2186 +#: utils/adt/rowtypes.c:209 utils/adt/rowtypes.c:217 #, c-format msgid "Unexpected end of input." msgstr "Unerwartetes Ende der Eingabe." -#: utils/adt/arrayfuncs.c:505 utils/adt/arrayfuncs.c:552 -#: utils/adt/arrayfuncs.c:582 utils/adt/arrayfuncs.c:631 +#: utils/adt/arrayfuncs.c:506 utils/adt/arrayfuncs.c:553 +#: utils/adt/arrayfuncs.c:583 utils/adt/arrayfuncs.c:632 #, c-format msgid "Unexpected \"%c\" character." msgstr "Unerwartetes Zeichen »%c«." -#: utils/adt/arrayfuncs.c:531 utils/adt/arrayfuncs.c:654 +#: utils/adt/arrayfuncs.c:532 utils/adt/arrayfuncs.c:655 #, c-format msgid "Unexpected array element." msgstr "Unerwartetes Arrayelement." -#: utils/adt/arrayfuncs.c:589 +#: utils/adt/arrayfuncs.c:590 #, c-format msgid "Unmatched \"%c\" character." msgstr "Zeichen »%c« ohne Gegenstück." -#: utils/adt/arrayfuncs.c:597 +#: utils/adt/arrayfuncs.c:598 utils/adt/jsonfuncs.c:2394 #, c-format msgid "Multidimensional arrays must have sub-arrays with matching dimensions." msgstr "Mehrdimensionale Arrays müssen Arraysausdrücke mit gleicher Anzahl Dimensionen haben." -#: utils/adt/arrayfuncs.c:674 +#: utils/adt/arrayfuncs.c:675 #, c-format msgid "Junk after closing right brace." msgstr "Müll nach schließender rechter geschweifter Klammer." -#: utils/adt/arrayfuncs.c:1295 +#: utils/adt/arrayfuncs.c:1285 utils/adt/arrayfuncs.c:3331 +#: utils/adt/arrayfuncs.c:5804 +#, c-format +msgid "invalid number of dimensions: %d" +msgstr "ungültige Anzahl Dimensionen: %d" + +#: utils/adt/arrayfuncs.c:1296 #, c-format msgid "invalid array flags" msgstr "ungültige Array-Flags" -#: utils/adt/arrayfuncs.c:1303 +#: utils/adt/arrayfuncs.c:1304 #, c-format msgid "wrong element type" msgstr "falscher Elementtyp" -#: utils/adt/arrayfuncs.c:1353 utils/adt/rangetypes.c:334 -#: utils/cache/lsyscache.c:2651 +#: utils/adt/arrayfuncs.c:1354 utils/adt/rangetypes.c:334 +#: utils/cache/lsyscache.c:2701 #, c-format msgid "no binary input function available for type %s" msgstr "keine binäre Eingabefunktion verfügbar für Typ %s" -#: utils/adt/arrayfuncs.c:1493 +#: utils/adt/arrayfuncs.c:1494 #, c-format msgid "improper binary format in array element %d" msgstr "falsches Binärformat in Arrayelement %d" -#: utils/adt/arrayfuncs.c:1574 utils/adt/rangetypes.c:339 -#: utils/cache/lsyscache.c:2684 +#: utils/adt/arrayfuncs.c:1575 utils/adt/rangetypes.c:339 +#: utils/cache/lsyscache.c:2734 #, c-format msgid "no binary output function available for type %s" msgstr "keine binäre Ausgabefunktion verfügbar für Typ %s" -#: utils/adt/arrayfuncs.c:2052 +#: utils/adt/arrayfuncs.c:2053 #, c-format msgid "slices of fixed-length arrays not implemented" msgstr "Auswählen von Stücken aus Arrays mit fester Länge ist nicht implementiert" -#: utils/adt/arrayfuncs.c:2230 utils/adt/arrayfuncs.c:2252 -#: utils/adt/arrayfuncs.c:2301 utils/adt/arrayfuncs.c:2537 -#: utils/adt/arrayfuncs.c:2848 utils/adt/arrayfuncs.c:5740 -#: utils/adt/arrayfuncs.c:5766 utils/adt/arrayfuncs.c:5777 -#: utils/adt/json.c:2295 utils/adt/json.c:2370 utils/adt/jsonb.c:1370 -#: utils/adt/jsonb.c:1456 utils/adt/jsonfuncs.c:3465 -#: utils/adt/jsonfuncs.c:3616 utils/adt/jsonfuncs.c:3661 -#: utils/adt/jsonfuncs.c:3708 +#: utils/adt/arrayfuncs.c:2231 utils/adt/arrayfuncs.c:2253 +#: utils/adt/arrayfuncs.c:2302 utils/adt/arrayfuncs.c:2538 +#: utils/adt/arrayfuncs.c:2849 utils/adt/arrayfuncs.c:5790 +#: utils/adt/arrayfuncs.c:5816 utils/adt/arrayfuncs.c:5827 +#: utils/adt/json.c:2323 utils/adt/json.c:2398 utils/adt/jsonb.c:1282 +#: utils/adt/jsonb.c:1368 utils/adt/jsonfuncs.c:4277 utils/adt/jsonfuncs.c:4428 +#: utils/adt/jsonfuncs.c:4473 utils/adt/jsonfuncs.c:4520 #, c-format msgid "wrong number of array subscripts" msgstr "falsche Anzahl Arrayindizes" -#: utils/adt/arrayfuncs.c:2235 utils/adt/arrayfuncs.c:2343 -#: utils/adt/arrayfuncs.c:2601 utils/adt/arrayfuncs.c:2907 +#: utils/adt/arrayfuncs.c:2236 utils/adt/arrayfuncs.c:2344 +#: utils/adt/arrayfuncs.c:2602 utils/adt/arrayfuncs.c:2908 #, c-format msgid "array subscript out of range" msgstr "Arrayindex außerhalb des gültigen Bereichs" -#: utils/adt/arrayfuncs.c:2240 +#: utils/adt/arrayfuncs.c:2241 #, c-format msgid "cannot assign null value to an element of a fixed-length array" msgstr "Array mit fester Länge kann keinen NULL-Wert enthalten" -#: utils/adt/arrayfuncs.c:2795 +#: utils/adt/arrayfuncs.c:2796 #, c-format msgid "updates on slices of fixed-length arrays not implemented" msgstr "Aktualisieren von Stücken aus Arrays mit fester Länge ist nicht implementiert" -#: utils/adt/arrayfuncs.c:2826 +#: utils/adt/arrayfuncs.c:2827 #, c-format msgid "array slice subscript must provide both boundaries" msgstr "Array-Slice-Index muss beide Begrenzungen angeben" -#: utils/adt/arrayfuncs.c:2827 +#: utils/adt/arrayfuncs.c:2828 #, c-format msgid "When assigning to a slice of an empty array value, slice boundaries must be fully specified." msgstr "Wenn ein Slice eines leeren Array-Wertes zugewiesen wird, dann müssen die Slice-Begrenzungen vollständig angegeben werden." -#: utils/adt/arrayfuncs.c:2838 utils/adt/arrayfuncs.c:2933 +#: utils/adt/arrayfuncs.c:2839 utils/adt/arrayfuncs.c:2934 #, c-format msgid "source array too small" msgstr "Quellarray ist zu klein" -#: utils/adt/arrayfuncs.c:3513 +#: utils/adt/arrayfuncs.c:3487 #, c-format msgid "null array element not allowed in this context" msgstr "NULL-Werte im Array sind in diesem Zusammenhang nicht erlaubt" -#: utils/adt/arrayfuncs.c:3615 utils/adt/arrayfuncs.c:3786 -#: utils/adt/arrayfuncs.c:4060 +#: utils/adt/arrayfuncs.c:3589 utils/adt/arrayfuncs.c:3760 +#: utils/adt/arrayfuncs.c:4112 #, c-format msgid "cannot compare arrays of different element types" msgstr "kann Arrays mit verschiedenen Elementtypen nicht vergleichen" -#: utils/adt/arrayfuncs.c:3962 utils/adt/rangetypes.c:1253 +#: utils/adt/arrayfuncs.c:3936 utils/adt/rangetypes.c:1253 +#: utils/adt/rangetypes.c:1317 #, c-format msgid "could not identify a hash function for type %s" msgstr "konnte keine Hash-Funktion für Typ %s ermitteln" -#: utils/adt/arrayfuncs.c:5154 +#: utils/adt/arrayfuncs.c:4028 +#, fuzzy, c-format +#| msgid "could not identify a hash function for type %s" +msgid "could not identify an extended hash function for type %s" +msgstr "konnte keine Hash-Funktion für Typ %s ermitteln" + +#: utils/adt/arrayfuncs.c:5204 #, c-format msgid "data type %s is not an array type" msgstr "Datentyp %s ist kein Array-Typ" -#: utils/adt/arrayfuncs.c:5209 +#: utils/adt/arrayfuncs.c:5259 #, c-format msgid "cannot accumulate null arrays" msgstr "Arrays, die NULL sind, können nicht akkumuliert werden" -#: utils/adt/arrayfuncs.c:5237 +#: utils/adt/arrayfuncs.c:5287 #, c-format msgid "cannot accumulate empty arrays" msgstr "leere Arrays können nicht akkumuliert werden" -#: utils/adt/arrayfuncs.c:5266 utils/adt/arrayfuncs.c:5272 +#: utils/adt/arrayfuncs.c:5316 utils/adt/arrayfuncs.c:5322 #, c-format msgid "cannot accumulate arrays of different dimensionality" msgstr "Arrays unterschiedlicher Dimensionalität können nicht akkumuliert werden" -#: utils/adt/arrayfuncs.c:5638 utils/adt/arrayfuncs.c:5678 +#: utils/adt/arrayfuncs.c:5688 utils/adt/arrayfuncs.c:5728 #, c-format msgid "dimension array or low bound array cannot be null" msgstr "Dimensions-Array oder Untergrenzen-Array darf nicht NULL sein" -#: utils/adt/arrayfuncs.c:5741 utils/adt/arrayfuncs.c:5767 +#: utils/adt/arrayfuncs.c:5791 utils/adt/arrayfuncs.c:5817 #, c-format msgid "Dimension array must be one dimensional." msgstr "Dimensions-Array muss eindimensional sein." -#: utils/adt/arrayfuncs.c:5746 utils/adt/arrayfuncs.c:5772 +#: utils/adt/arrayfuncs.c:5796 utils/adt/arrayfuncs.c:5822 #, c-format msgid "dimension values cannot be null" msgstr "Dimensionswerte dürfen nicht NULL sein" -#: utils/adt/arrayfuncs.c:5778 +#: utils/adt/arrayfuncs.c:5828 #, c-format msgid "Low bound array has different size than dimensions array." msgstr "Untergrenzen-Array hat andere Größe als Dimensions-Array." -#: utils/adt/arrayfuncs.c:6024 +#: utils/adt/arrayfuncs.c:6074 #, c-format msgid "removing elements from multidimensional arrays is not supported" msgstr "Entfernen von Elementen aus mehrdimensionalen Arrays wird nicht unterstützt" -#: utils/adt/arrayfuncs.c:6301 +#: utils/adt/arrayfuncs.c:6351 #, c-format msgid "thresholds must be one-dimensional array" msgstr "Parameter »thresholds« muss ein eindimensionales Array sein" -#: utils/adt/arrayfuncs.c:6306 +#: utils/adt/arrayfuncs.c:6356 #, c-format msgid "thresholds array must not contain NULLs" msgstr "»thresholds«-Array darf keine NULL-Werte enthalten" @@ -19340,42 +20740,43 @@ msgid "encoding conversion from %s to ASCII not supported" msgstr "Kodierungsumwandlung zwischen %s und ASCII wird nicht unterstützt" #. translator: first %s is inet or cidr -#: utils/adt/bool.c:153 utils/adt/cash.c:278 utils/adt/datetime.c:3799 -#: utils/adt/float.c:244 utils/adt/float.c:318 utils/adt/float.c:342 -#: utils/adt/float.c:461 utils/adt/float.c:544 utils/adt/float.c:570 -#: utils/adt/geo_ops.c:156 utils/adt/geo_ops.c:166 utils/adt/geo_ops.c:178 -#: utils/adt/geo_ops.c:210 utils/adt/geo_ops.c:255 utils/adt/geo_ops.c:265 -#: utils/adt/geo_ops.c:935 utils/adt/geo_ops.c:1321 utils/adt/geo_ops.c:1356 -#: utils/adt/geo_ops.c:1364 utils/adt/geo_ops.c:3430 utils/adt/geo_ops.c:4563 -#: utils/adt/geo_ops.c:4579 utils/adt/geo_ops.c:4586 utils/adt/mac.c:68 -#: utils/adt/nabstime.c:1539 utils/adt/network.c:58 utils/adt/numeric.c:593 -#: utils/adt/numeric.c:620 utils/adt/numeric.c:5488 utils/adt/numeric.c:5512 -#: utils/adt/numeric.c:5536 utils/adt/numeric.c:6338 utils/adt/numeric.c:6364 -#: utils/adt/oid.c:44 utils/adt/oid.c:58 utils/adt/oid.c:64 utils/adt/oid.c:86 +#: utils/adt/bool.c:153 utils/adt/cash.c:277 utils/adt/datetime.c:3788 +#: utils/adt/float.c:241 utils/adt/float.c:315 utils/adt/float.c:339 +#: utils/adt/float.c:458 utils/adt/float.c:541 utils/adt/float.c:567 +#: utils/adt/geo_ops.c:155 utils/adt/geo_ops.c:165 utils/adt/geo_ops.c:177 +#: utils/adt/geo_ops.c:209 utils/adt/geo_ops.c:254 utils/adt/geo_ops.c:264 +#: utils/adt/geo_ops.c:934 utils/adt/geo_ops.c:1320 utils/adt/geo_ops.c:1355 +#: utils/adt/geo_ops.c:1363 utils/adt/geo_ops.c:3429 utils/adt/geo_ops.c:4562 +#: utils/adt/geo_ops.c:4578 utils/adt/geo_ops.c:4585 utils/adt/mac.c:94 +#: utils/adt/mac8.c:93 utils/adt/mac8.c:166 utils/adt/mac8.c:184 +#: utils/adt/mac8.c:202 utils/adt/mac8.c:221 utils/adt/nabstime.c:1539 +#: utils/adt/network.c:58 utils/adt/numeric.c:604 utils/adt/numeric.c:631 +#: utils/adt/numeric.c:5662 utils/adt/numeric.c:5686 utils/adt/numeric.c:5710 +#: utils/adt/numeric.c:6516 utils/adt/numeric.c:6542 utils/adt/oid.c:44 +#: utils/adt/oid.c:58 utils/adt/oid.c:64 utils/adt/oid.c:86 #: utils/adt/pg_lsn.c:44 utils/adt/pg_lsn.c:50 utils/adt/tid.c:72 -#: utils/adt/tid.c:80 utils/adt/tid.c:88 utils/adt/txid.c:339 +#: utils/adt/tid.c:80 utils/adt/tid.c:88 utils/adt/txid.c:405 #: utils/adt/uuid.c:136 #, c-format msgid "invalid input syntax for type %s: \"%s\"" msgstr "ungültige Eingabesyntax für Typ %s: »%s«" -#: utils/adt/cash.c:211 utils/adt/cash.c:238 utils/adt/cash.c:249 -#: utils/adt/cash.c:290 utils/adt/int8.c:114 utils/adt/numutils.c:75 +#: utils/adt/cash.c:215 utils/adt/cash.c:240 utils/adt/cash.c:250 +#: utils/adt/cash.c:290 utils/adt/int8.c:117 utils/adt/numutils.c:75 #: utils/adt/numutils.c:82 utils/adt/oid.c:70 utils/adt/oid.c:109 #, c-format msgid "value \"%s\" is out of range for type %s" msgstr "Wert »%s« ist außerhalb des gültigen Bereichs für Typ %s" -#: utils/adt/cash.c:651 utils/adt/cash.c:701 utils/adt/cash.c:752 -#: utils/adt/cash.c:801 utils/adt/cash.c:853 utils/adt/cash.c:903 -#: utils/adt/float.c:855 utils/adt/float.c:919 utils/adt/float.c:3315 -#: utils/adt/float.c:3378 utils/adt/geo_ops.c:4093 utils/adt/int.c:704 -#: utils/adt/int.c:846 utils/adt/int.c:954 utils/adt/int.c:1043 -#: utils/adt/int.c:1082 utils/adt/int.c:1110 utils/adt/int8.c:597 -#: utils/adt/int8.c:657 utils/adt/int8.c:897 utils/adt/int8.c:1005 -#: utils/adt/int8.c:1094 utils/adt/int8.c:1202 utils/adt/numeric.c:6902 -#: utils/adt/numeric.c:7191 utils/adt/numeric.c:8204 -#: utils/adt/timestamp.c:3186 +#: utils/adt/cash.c:652 utils/adt/cash.c:702 utils/adt/cash.c:753 +#: utils/adt/cash.c:802 utils/adt/cash.c:854 utils/adt/cash.c:904 +#: utils/adt/float.c:852 utils/adt/float.c:916 utils/adt/float.c:3469 +#: utils/adt/float.c:3532 utils/adt/geo_ops.c:4092 utils/adt/int.c:820 +#: utils/adt/int.c:936 utils/adt/int.c:1016 utils/adt/int.c:1078 +#: utils/adt/int.c:1116 utils/adt/int.c:1144 utils/adt/int8.c:592 +#: utils/adt/int8.c:650 utils/adt/int8.c:850 utils/adt/int8.c:930 +#: utils/adt/int8.c:992 utils/adt/int8.c:1072 utils/adt/numeric.c:7080 +#: utils/adt/numeric.c:7369 utils/adt/numeric.c:8381 utils/adt/timestamp.c:3235 #, c-format msgid "division by zero" msgstr "Division durch Null" @@ -19385,162 +20786,172 @@ msgstr "Division durch Null" msgid "\"char\" out of range" msgstr "\"char\" ist außerhalb des gültigen Bereichs" -#: utils/adt/date.c:67 utils/adt/timestamp.c:94 utils/adt/varbit.c:53 +#: utils/adt/date.c:65 utils/adt/timestamp.c:95 utils/adt/varbit.c:54 #: utils/adt/varchar.c:46 #, c-format msgid "invalid type modifier" msgstr "ungültige Typmodifikation" -#: utils/adt/date.c:79 +#: utils/adt/date.c:77 #, c-format msgid "TIME(%d)%s precision must not be negative" msgstr "Präzision von TIME(%d)%s darf nicht negativ sein" -#: utils/adt/date.c:85 +#: utils/adt/date.c:83 #, c-format msgid "TIME(%d)%s precision reduced to maximum allowed, %d" msgstr "Präzision von TIME(%d)%s auf erlaubten Höchstwert %d reduziert" -#: utils/adt/date.c:146 utils/adt/datetime.c:1209 utils/adt/datetime.c:2117 +#: utils/adt/date.c:144 utils/adt/datetime.c:1193 utils/adt/datetime.c:2104 #, c-format msgid "date/time value \"current\" is no longer supported" msgstr "Datum/Zeitwert »current« wird nicht mehr unterstützt" -#: utils/adt/date.c:172 utils/adt/date.c:180 utils/adt/formatting.c:3523 -#: utils/adt/formatting.c:3532 +#: utils/adt/date.c:170 utils/adt/date.c:178 utils/adt/formatting.c:3606 +#: utils/adt/formatting.c:3615 #, c-format msgid "date out of range: \"%s\"" msgstr "date ist außerhalb des gültigen Bereichs: »%s«" -#: utils/adt/date.c:227 utils/adt/date.c:539 utils/adt/date.c:563 -#: utils/adt/xml.c:2086 +#: utils/adt/date.c:225 utils/adt/date.c:537 utils/adt/date.c:561 +#: utils/adt/xml.c:2089 #, c-format msgid "date out of range" msgstr "date ist außerhalb des gültigen Bereichs" -#: utils/adt/date.c:273 utils/adt/timestamp.c:563 +#: utils/adt/date.c:271 utils/adt/timestamp.c:564 #, c-format msgid "date field value out of range: %d-%02d-%02d" msgstr "Datum-Feldwert ist außerhalb des gültigen Bereichs: %d-%02d-%02d" -#: utils/adt/date.c:280 utils/adt/date.c:289 utils/adt/timestamp.c:569 +#: utils/adt/date.c:278 utils/adt/date.c:287 utils/adt/timestamp.c:570 #, c-format msgid "date out of range: %d-%02d-%02d" msgstr "date ist außerhalb des gültigen Bereichs: %d-%02d-%02d" -#: utils/adt/date.c:327 utils/adt/date.c:350 utils/adt/date.c:376 -#: utils/adt/date.c:1092 utils/adt/date.c:1138 utils/adt/date.c:1672 -#: utils/adt/date.c:1703 utils/adt/date.c:1732 utils/adt/date.c:2469 -#: utils/adt/datetime.c:1690 utils/adt/formatting.c:3398 -#: utils/adt/formatting.c:3430 utils/adt/formatting.c:3498 -#: utils/adt/json.c:1539 utils/adt/json.c:1561 utils/adt/jsonb.c:824 -#: utils/adt/jsonb.c:848 utils/adt/nabstime.c:456 utils/adt/nabstime.c:499 -#: utils/adt/nabstime.c:529 utils/adt/nabstime.c:572 utils/adt/timestamp.c:229 -#: utils/adt/timestamp.c:261 utils/adt/timestamp.c:691 -#: utils/adt/timestamp.c:700 utils/adt/timestamp.c:778 -#: utils/adt/timestamp.c:811 utils/adt/timestamp.c:2765 -#: utils/adt/timestamp.c:2786 utils/adt/timestamp.c:2799 -#: utils/adt/timestamp.c:2808 utils/adt/timestamp.c:2816 -#: utils/adt/timestamp.c:2871 utils/adt/timestamp.c:2894 -#: utils/adt/timestamp.c:2907 utils/adt/timestamp.c:2918 -#: utils/adt/timestamp.c:2926 utils/adt/timestamp.c:3482 -#: utils/adt/timestamp.c:3607 utils/adt/timestamp.c:3648 -#: utils/adt/timestamp.c:3729 utils/adt/timestamp.c:3775 -#: utils/adt/timestamp.c:3878 utils/adt/timestamp.c:4277 -#: utils/adt/timestamp.c:4376 utils/adt/timestamp.c:4386 -#: utils/adt/timestamp.c:4478 utils/adt/timestamp.c:4580 -#: utils/adt/timestamp.c:4590 utils/adt/timestamp.c:4822 -#: utils/adt/timestamp.c:4836 utils/adt/timestamp.c:4841 -#: utils/adt/timestamp.c:4855 utils/adt/timestamp.c:4900 -#: utils/adt/timestamp.c:4932 utils/adt/timestamp.c:4939 -#: utils/adt/timestamp.c:4972 utils/adt/timestamp.c:4976 -#: utils/adt/timestamp.c:5045 utils/adt/timestamp.c:5049 -#: utils/adt/timestamp.c:5063 utils/adt/timestamp.c:5097 utils/adt/xml.c:2108 -#: utils/adt/xml.c:2115 utils/adt/xml.c:2135 utils/adt/xml.c:2142 +#: utils/adt/date.c:325 utils/adt/date.c:348 utils/adt/date.c:374 +#: utils/adt/date.c:1118 utils/adt/date.c:1164 utils/adt/date.c:1704 +#: utils/adt/date.c:1735 utils/adt/date.c:1764 utils/adt/date.c:2596 +#: utils/adt/datetime.c:1677 utils/adt/formatting.c:3472 +#: utils/adt/formatting.c:3504 utils/adt/formatting.c:3581 +#: utils/adt/json.c:1621 utils/adt/json.c:1641 utils/adt/nabstime.c:456 +#: utils/adt/nabstime.c:499 utils/adt/nabstime.c:529 utils/adt/nabstime.c:572 +#: utils/adt/timestamp.c:230 utils/adt/timestamp.c:262 +#: utils/adt/timestamp.c:692 utils/adt/timestamp.c:701 +#: utils/adt/timestamp.c:779 utils/adt/timestamp.c:812 +#: utils/adt/timestamp.c:2814 utils/adt/timestamp.c:2835 +#: utils/adt/timestamp.c:2848 utils/adt/timestamp.c:2857 +#: utils/adt/timestamp.c:2865 utils/adt/timestamp.c:2920 +#: utils/adt/timestamp.c:2943 utils/adt/timestamp.c:2956 +#: utils/adt/timestamp.c:2967 utils/adt/timestamp.c:2975 +#: utils/adt/timestamp.c:3635 utils/adt/timestamp.c:3760 +#: utils/adt/timestamp.c:3801 utils/adt/timestamp.c:3891 +#: utils/adt/timestamp.c:3937 utils/adt/timestamp.c:4040 +#: utils/adt/timestamp.c:4447 utils/adt/timestamp.c:4546 +#: utils/adt/timestamp.c:4556 utils/adt/timestamp.c:4648 +#: utils/adt/timestamp.c:4750 utils/adt/timestamp.c:4760 +#: utils/adt/timestamp.c:4992 utils/adt/timestamp.c:5006 +#: utils/adt/timestamp.c:5011 utils/adt/timestamp.c:5025 +#: utils/adt/timestamp.c:5070 utils/adt/timestamp.c:5102 +#: utils/adt/timestamp.c:5109 utils/adt/timestamp.c:5142 +#: utils/adt/timestamp.c:5146 utils/adt/timestamp.c:5215 +#: utils/adt/timestamp.c:5219 utils/adt/timestamp.c:5233 +#: utils/adt/timestamp.c:5267 utils/adt/xml.c:2111 utils/adt/xml.c:2118 +#: utils/adt/xml.c:2138 utils/adt/xml.c:2145 #, c-format msgid "timestamp out of range" msgstr "timestamp ist außerhalb des gültigen Bereichs" -#: utils/adt/date.c:514 +#: utils/adt/date.c:512 #, c-format msgid "cannot subtract infinite dates" msgstr "kann unendliche date-Werte nicht subtrahieren" -#: utils/adt/date.c:592 utils/adt/date.c:623 utils/adt/date.c:641 -#: utils/adt/date.c:2506 utils/adt/date.c:2516 +#: utils/adt/date.c:590 utils/adt/date.c:621 utils/adt/date.c:639 +#: utils/adt/date.c:2633 utils/adt/date.c:2643 #, c-format msgid "date out of range for timestamp" msgstr "Datum ist außerhalb des gültigen Bereichs für Typ »timestamp«" -#: utils/adt/date.c:1164 +#: utils/adt/date.c:1190 #, c-format msgid "cannot convert reserved abstime value to date" msgstr "kann reservierten »abstime«-Wert nicht in »date« umwandeln" -#: utils/adt/date.c:1182 utils/adt/date.c:1188 +#: utils/adt/date.c:1208 utils/adt/date.c:1214 #, c-format msgid "abstime out of range for date" msgstr "abstime ist außerhalb des gültigen Bereichs für Typ »date«" -#: utils/adt/date.c:1301 utils/adt/date.c:2020 +#: utils/adt/date.c:1327 utils/adt/date.c:2091 #, c-format msgid "time out of range" msgstr "time ist außerhalb des gültigen Bereichs" -#: utils/adt/date.c:1357 utils/adt/timestamp.c:588 +#: utils/adt/date.c:1383 utils/adt/timestamp.c:589 #, c-format msgid "time field value out of range: %d:%02d:%02g" msgstr "Zeit-Feldwert ist außerhalb des gültigen Bereichs: %d:%02d:%02g" -#: utils/adt/date.c:1907 utils/adt/date.c:1920 +#: utils/adt/date.c:1893 utils/adt/date.c:2395 utils/adt/float.c:1202 +#: utils/adt/float.c:1271 utils/adt/int.c:612 utils/adt/int.c:659 +#: utils/adt/int.c:694 utils/adt/int8.c:491 utils/adt/numeric.c:2189 +#: utils/adt/timestamp.c:3284 utils/adt/timestamp.c:3315 +#: utils/adt/timestamp.c:3346 +#, fuzzy, c-format +#| msgid "window functions are not allowed in window definitions" +msgid "invalid preceding or following size in window function" +msgstr "Fensterfunktionen sind in Fensterdefinitionen nicht erlaubt" + +#: utils/adt/date.c:1978 utils/adt/date.c:1991 #, c-format msgid "\"time\" units \"%s\" not recognized" msgstr "»time«-Einheit »%s« nicht erkannt" -#: utils/adt/date.c:2028 +#: utils/adt/date.c:2099 #, c-format msgid "time zone displacement out of range" msgstr "Zeitzonenunterschied ist außerhalb des gültigen Bereichs" -#: utils/adt/date.c:2601 utils/adt/date.c:2614 +#: utils/adt/date.c:2728 utils/adt/date.c:2741 #, c-format msgid "\"time with time zone\" units \"%s\" not recognized" msgstr "»time with time zone«-Einheit »%s« nicht erkannt" -#: utils/adt/date.c:2687 utils/adt/datetime.c:931 utils/adt/datetime.c:1848 -#: utils/adt/datetime.c:4636 utils/adt/timestamp.c:502 -#: utils/adt/timestamp.c:529 utils/adt/timestamp.c:4847 -#: utils/adt/timestamp.c:5055 +#: utils/adt/date.c:2814 utils/adt/datetime.c:915 utils/adt/datetime.c:1835 +#: utils/adt/datetime.c:4625 utils/adt/timestamp.c:503 +#: utils/adt/timestamp.c:530 utils/adt/timestamp.c:5017 +#: utils/adt/timestamp.c:5225 #, c-format msgid "time zone \"%s\" not recognized" msgstr "Zeitzone »%s« nicht erkannt" -#: utils/adt/date.c:2719 utils/adt/timestamp.c:4889 utils/adt/timestamp.c:5086 +#: utils/adt/date.c:2846 utils/adt/timestamp.c:5059 utils/adt/timestamp.c:5256 #, c-format msgid "interval time zone \"%s\" must not include months or days" msgstr "Intervall-Zeitzone »%s« darf keine Monate oder Tage enthalten" -#: utils/adt/datetime.c:3772 utils/adt/datetime.c:3779 +#: utils/adt/datetime.c:3761 utils/adt/datetime.c:3768 #, c-format msgid "date/time field value out of range: \"%s\"" msgstr "Datum/Zeit-Feldwert ist außerhalb des gültigen Bereichs: »%s«" -#: utils/adt/datetime.c:3781 +#: utils/adt/datetime.c:3770 #, c-format msgid "Perhaps you need a different \"datestyle\" setting." msgstr "Möglicherweise benötigen Sie eine andere »datestyle«-Einstellung." -#: utils/adt/datetime.c:3786 +#: utils/adt/datetime.c:3775 #, c-format msgid "interval field value out of range: \"%s\"" msgstr "»interval«-Feldwert ist außerhalb des gültigen Bereichs: »%s«" -#: utils/adt/datetime.c:3792 +#: utils/adt/datetime.c:3781 #, c-format msgid "time zone displacement out of range: \"%s\"" msgstr "Zeitzonenunterschied ist außerhalb des gültigen Bereichs: »%s«" -#: utils/adt/datetime.c:4638 +#: utils/adt/datetime.c:4627 #, c-format msgid "This time zone name appears in the configuration file for time zone abbreviation \"%s\"." msgstr "Dieser Zeitzonenname erscheint in der Konfigurationsdatei für Zeitzonenabkürzung »%s«." @@ -19550,27 +20961,22 @@ msgstr "Dieser Zeitzonenname erscheint in der Konfigurationsdatei für Zeitzonen msgid "invalid Datum pointer" msgstr "ungültiger »Datum«-Zeiger" -#: utils/adt/dbsize.c:109 -#, c-format -msgid "could not open tablespace directory \"%s\": %m" -msgstr "konnte Tablespace-Verzeichnis »%s« nicht öffnen: %m" - -#: utils/adt/dbsize.c:756 utils/adt/dbsize.c:824 +#: utils/adt/dbsize.c:759 utils/adt/dbsize.c:827 #, c-format msgid "invalid size: \"%s\"" msgstr "ungültige Größe: »%s«" -#: utils/adt/dbsize.c:825 +#: utils/adt/dbsize.c:828 #, c-format msgid "Invalid size unit: \"%s\"." msgstr "Ungültige Größeneinheit: »%s«." -#: utils/adt/dbsize.c:826 +#: utils/adt/dbsize.c:829 #, c-format msgid "Valid units are \"bytes\", \"kB\", \"MB\", \"GB\", and \"TB\"." msgstr "Gültige Einheiten sind »kB«, »MB«, »GB« und »TB«." -#: utils/adt/domains.c:91 +#: utils/adt/domains.c:92 #, c-format msgid "type %s is not a domain" msgstr "Typ %s ist keine Domäne" @@ -19610,701 +21016,725 @@ msgstr "ungültige Base64-Endsequenz" msgid "Input data is missing padding, is truncated, or is otherwise corrupted." msgstr "Die Eingabedaten haben fehlendes Padding, sind zu kurz oder sind anderweitig verfälscht." -#: utils/adt/encode.c:442 utils/adt/encode.c:507 utils/adt/json.c:785 -#: utils/adt/json.c:825 utils/adt/json.c:841 utils/adt/json.c:853 -#: utils/adt/json.c:863 utils/adt/json.c:914 utils/adt/json.c:946 -#: utils/adt/json.c:965 utils/adt/json.c:977 utils/adt/json.c:989 -#: utils/adt/json.c:1134 utils/adt/json.c:1148 utils/adt/json.c:1159 -#: utils/adt/json.c:1167 utils/adt/json.c:1175 utils/adt/json.c:1183 -#: utils/adt/json.c:1191 utils/adt/json.c:1199 utils/adt/json.c:1207 -#: utils/adt/json.c:1215 utils/adt/json.c:1245 utils/adt/varlena.c:298 -#: utils/adt/varlena.c:339 +#: utils/adt/encode.c:442 utils/adt/encode.c:507 utils/adt/json.c:786 +#: utils/adt/json.c:826 utils/adt/json.c:842 utils/adt/json.c:854 +#: utils/adt/json.c:864 utils/adt/json.c:915 utils/adt/json.c:947 +#: utils/adt/json.c:966 utils/adt/json.c:978 utils/adt/json.c:990 +#: utils/adt/json.c:1135 utils/adt/json.c:1149 utils/adt/json.c:1160 +#: utils/adt/json.c:1168 utils/adt/json.c:1176 utils/adt/json.c:1184 +#: utils/adt/json.c:1192 utils/adt/json.c:1200 utils/adt/json.c:1208 +#: utils/adt/json.c:1216 utils/adt/json.c:1246 utils/adt/varlena.c:296 +#: utils/adt/varlena.c:337 #, c-format msgid "invalid input syntax for type %s" msgstr "ungültige Eingabesyntax für Typ %s" -#: utils/adt/enum.c:115 -#, c-format -msgid "unsafe use of new value \"%s\" of enum type %s" -msgstr "" - -#: utils/adt/enum.c:118 -#, c-format -msgid "New enum values must be committed before they can be used." -msgstr "" - -#: utils/adt/enum.c:136 utils/adt/enum.c:146 utils/adt/enum.c:204 -#: utils/adt/enum.c:214 +#: utils/adt/enum.c:48 utils/adt/enum.c:58 utils/adt/enum.c:113 +#: utils/adt/enum.c:123 #, c-format msgid "invalid input value for enum %s: \"%s\"" msgstr "ungültiger Eingabewert für Enum %s: »%s«" -#: utils/adt/enum.c:176 utils/adt/enum.c:242 utils/adt/enum.c:301 +#: utils/adt/enum.c:85 utils/adt/enum.c:148 utils/adt/enum.c:207 #, c-format msgid "invalid internal value for enum: %u" msgstr "ungültiger interner Wert für Enum: %u" -#: utils/adt/enum.c:461 utils/adt/enum.c:490 utils/adt/enum.c:530 -#: utils/adt/enum.c:550 +#: utils/adt/enum.c:360 utils/adt/enum.c:389 utils/adt/enum.c:429 +#: utils/adt/enum.c:449 #, c-format msgid "could not determine actual enum type" msgstr "konnte tatsächlichen Enum-Typen nicht bestimmen" -#: utils/adt/enum.c:469 utils/adt/enum.c:498 +#: utils/adt/enum.c:368 utils/adt/enum.c:397 #, c-format msgid "enum %s contains no values" msgstr "Enum %s enthält keine Werte" -#: utils/adt/float.c:58 +#: utils/adt/expandedrecord.c:98 utils/adt/expandedrecord.c:230 +#: utils/cache/typcache.c:1563 utils/cache/typcache.c:1719 +#: utils/cache/typcache.c:1849 utils/fmgr/funcapi.c:430 +#, c-format +msgid "type %s is not composite" +msgstr "Typ %s ist kein zusammengesetzter Typ" + +#: utils/adt/float.c:55 #, c-format msgid "value out of range: overflow" msgstr "Wert ist außerhalb des gültigen Bereichs: Überlauf" -#: utils/adt/float.c:63 +#: utils/adt/float.c:60 #, c-format msgid "value out of range: underflow" msgstr "Wert ist außerhalb des gültigen Bereichs: Unterlauf" -#: utils/adt/float.c:312 +#: utils/adt/float.c:309 #, c-format msgid "\"%s\" is out of range for type real" msgstr "»%s« ist außerhalb des gültigen Bereichs für Typ real" -#: utils/adt/float.c:537 +#: utils/adt/float.c:534 #, c-format msgid "\"%s\" is out of range for type double precision" msgstr "»%s« ist außerhalb des gültigen Bereichs für Typ double precision" -#: utils/adt/float.c:1246 utils/adt/float.c:1304 utils/adt/int.c:334 -#: utils/adt/int.c:760 utils/adt/int.c:789 utils/adt/int.c:810 -#: utils/adt/int.c:830 utils/adt/int.c:864 utils/adt/int.c:1159 -#: utils/adt/int8.c:1323 utils/adt/numeric.c:3050 utils/adt/numeric.c:3059 +#: utils/adt/float.c:1381 utils/adt/float.c:1439 utils/adt/int.c:332 +#: utils/adt/int.c:870 utils/adt/int.c:892 utils/adt/int.c:906 +#: utils/adt/int.c:920 utils/adt/int.c:952 utils/adt/int.c:1190 +#: utils/adt/int8.c:1185 utils/adt/numeric.c:3214 utils/adt/numeric.c:3223 #, c-format msgid "smallint out of range" msgstr "smallint ist außerhalb des gültigen Bereichs" -#: utils/adt/float.c:1430 utils/adt/numeric.c:7624 +#: utils/adt/float.c:1565 utils/adt/numeric.c:7802 #, c-format msgid "cannot take square root of a negative number" msgstr "Quadratwurzel von negativer Zahl kann nicht ermittelt werden" -#: utils/adt/float.c:1472 utils/adt/numeric.c:2853 +#: utils/adt/float.c:1626 utils/adt/numeric.c:3017 #, c-format msgid "zero raised to a negative power is undefined" msgstr "null hoch eine negative Zahl ist undefiniert" -#: utils/adt/float.c:1476 utils/adt/numeric.c:2859 +#: utils/adt/float.c:1630 utils/adt/numeric.c:3023 #, c-format msgid "a negative number raised to a non-integer power yields a complex result" msgstr "eine negative Zahl hoch eine nicht ganze Zahl ergibt ein komplexes Ergebnis" -#: utils/adt/float.c:1542 utils/adt/float.c:1572 utils/adt/numeric.c:7890 +#: utils/adt/float.c:1696 utils/adt/float.c:1726 utils/adt/numeric.c:8068 #, c-format msgid "cannot take logarithm of zero" msgstr "Logarithmus von null kann nicht ermittelt werden" -#: utils/adt/float.c:1546 utils/adt/float.c:1576 utils/adt/numeric.c:7894 +#: utils/adt/float.c:1700 utils/adt/float.c:1730 utils/adt/numeric.c:8072 #, c-format msgid "cannot take logarithm of a negative number" msgstr "Logarithmus negativer Zahlen kann nicht ermittelt werden" -#: utils/adt/float.c:1606 utils/adt/float.c:1636 utils/adt/float.c:1728 -#: utils/adt/float.c:1754 utils/adt/float.c:1781 utils/adt/float.c:1807 -#: utils/adt/float.c:1954 utils/adt/float.c:1989 utils/adt/float.c:2153 -#: utils/adt/float.c:2207 utils/adt/float.c:2271 utils/adt/float.c:2326 +#: utils/adt/float.c:1760 utils/adt/float.c:1790 utils/adt/float.c:1882 +#: utils/adt/float.c:1908 utils/adt/float.c:1935 utils/adt/float.c:1961 +#: utils/adt/float.c:2108 utils/adt/float.c:2143 utils/adt/float.c:2307 +#: utils/adt/float.c:2361 utils/adt/float.c:2425 utils/adt/float.c:2480 #, c-format msgid "input is out of range" msgstr "Eingabe ist außerhalb des gültigen Bereichs" -#: utils/adt/float.c:3532 utils/adt/numeric.c:1493 +#: utils/adt/float.c:3686 utils/adt/numeric.c:1504 #, c-format msgid "count must be greater than zero" msgstr "Anzahl muss größer als null sein" -#: utils/adt/float.c:3537 utils/adt/numeric.c:1500 +#: utils/adt/float.c:3691 utils/adt/numeric.c:1511 #, c-format msgid "operand, lower bound, and upper bound cannot be NaN" msgstr "Operand, Untergrenze und Obergrenze dürfen nicht NaN sein" -#: utils/adt/float.c:3543 +#: utils/adt/float.c:3697 #, c-format msgid "lower and upper bounds must be finite" msgstr "Untergrenze und Obergrenze müssen endlich sein" -#: utils/adt/float.c:3581 utils/adt/numeric.c:1513 +#: utils/adt/float.c:3731 utils/adt/numeric.c:1524 #, c-format msgid "lower bound cannot equal upper bound" msgstr "Untergrenze kann nicht gleich der Obergrenze sein" -#: utils/adt/formatting.c:489 +#: utils/adt/formatting.c:488 #, c-format msgid "invalid format specification for an interval value" msgstr "ungültige Formatangabe für Intervall-Wert" -#: utils/adt/formatting.c:490 +#: utils/adt/formatting.c:489 #, c-format msgid "Intervals are not tied to specific calendar dates." msgstr "Intervalle beziehen sich nicht auf bestimmte Kalenderdaten." -#: utils/adt/formatting.c:1056 +#: utils/adt/formatting.c:1059 #, c-format msgid "\"EEEE\" must be the last pattern used" msgstr "»EEEE« muss das letzte Muster sein" -#: utils/adt/formatting.c:1064 +#: utils/adt/formatting.c:1067 #, c-format msgid "\"9\" must be ahead of \"PR\"" msgstr "»9« muss vor »PR« stehen" -#: utils/adt/formatting.c:1080 +#: utils/adt/formatting.c:1083 #, c-format msgid "\"0\" must be ahead of \"PR\"" msgstr "»0« muss vor »PR« stehen" -#: utils/adt/formatting.c:1107 +#: utils/adt/formatting.c:1110 #, c-format msgid "multiple decimal points" msgstr "mehrere Dezimalpunkte" -#: utils/adt/formatting.c:1111 utils/adt/formatting.c:1194 +#: utils/adt/formatting.c:1114 utils/adt/formatting.c:1197 #, c-format msgid "cannot use \"V\" and decimal point together" msgstr "»V« und Dezimalpunkt können nicht zusammen verwendet werden" -#: utils/adt/formatting.c:1123 +#: utils/adt/formatting.c:1126 #, c-format msgid "cannot use \"S\" twice" msgstr "»S« kann nicht zweimal verwendet werden" -#: utils/adt/formatting.c:1127 +#: utils/adt/formatting.c:1130 #, c-format msgid "cannot use \"S\" and \"PL\"/\"MI\"/\"SG\"/\"PR\" together" msgstr "»S« und »PL«/»MI«/»SG«/»PR« können nicht zusammen verwendet werden" -#: utils/adt/formatting.c:1147 +#: utils/adt/formatting.c:1150 #, c-format msgid "cannot use \"S\" and \"MI\" together" msgstr "»S« und »MI« können nicht zusammen verwendet werden" -#: utils/adt/formatting.c:1157 +#: utils/adt/formatting.c:1160 #, c-format msgid "cannot use \"S\" and \"PL\" together" msgstr "»S« und »PL« können nicht zusammen verwendet werden" -#: utils/adt/formatting.c:1167 +#: utils/adt/formatting.c:1170 #, c-format msgid "cannot use \"S\" and \"SG\" together" msgstr "»S« und »SG« können nicht zusammen verwendet werden" -#: utils/adt/formatting.c:1176 +#: utils/adt/formatting.c:1179 #, c-format msgid "cannot use \"PR\" and \"S\"/\"PL\"/\"MI\"/\"SG\" together" msgstr "»PR« und »S«/»PL«/»MI«/»SG« können nicht zusammen verwendet werden" -#: utils/adt/formatting.c:1202 +#: utils/adt/formatting.c:1205 #, c-format msgid "cannot use \"EEEE\" twice" msgstr "»EEEE« kann nicht zweimal verwendet werden" -#: utils/adt/formatting.c:1208 +#: utils/adt/formatting.c:1211 #, c-format msgid "\"EEEE\" is incompatible with other formats" msgstr "»EEEE« ist mit anderen Formaten inkompatibel" -#: utils/adt/formatting.c:1209 +#: utils/adt/formatting.c:1212 #, c-format msgid "\"EEEE\" may only be used together with digit and decimal point patterns." msgstr "»EEEE« kann nur zusammen mit Platzhaltern für Ziffern oder Dezimalpunkt verwendet werden." -#: utils/adt/formatting.c:1398 +#: utils/adt/formatting.c:1392 #, c-format msgid "\"%s\" is not a number" msgstr "»%s« ist keine Zahl" -#: utils/adt/formatting.c:1499 utils/adt/formatting.c:1551 +#: utils/adt/formatting.c:1470 +#, c-format +msgid "case conversion failed: %s" +msgstr "Groß/Klein-Umwandlung fehlgeschlagen: %s" + +#: utils/adt/formatting.c:1535 #, c-format msgid "could not determine which collation to use for lower() function" msgstr "konnte die für die Funktion lower() zu verwendende Sortierfolge nicht bestimmen" -#: utils/adt/formatting.c:1619 utils/adt/formatting.c:1671 +#: utils/adt/formatting.c:1657 #, c-format msgid "could not determine which collation to use for upper() function" msgstr "konnte die für die Funktion upper() zu verwendende Sortierfolge nicht bestimmen" -#: utils/adt/formatting.c:1740 utils/adt/formatting.c:1804 +#: utils/adt/formatting.c:1780 #, c-format msgid "could not determine which collation to use for initcap() function" msgstr "konnte die für die Funktion initcap() zu verwendende Sortierfolge nicht bestimmen" -#: utils/adt/formatting.c:2101 +#: utils/adt/formatting.c:2148 #, c-format msgid "invalid combination of date conventions" msgstr "ungültige Kombination von Datumskonventionen" -#: utils/adt/formatting.c:2102 +#: utils/adt/formatting.c:2149 #, c-format msgid "Do not mix Gregorian and ISO week date conventions in a formatting template." msgstr "Die Gregorianische und die ISO-Konvention für Wochendaten können nicht einer Formatvorlage gemischt werden." -#: utils/adt/formatting.c:2119 +#: utils/adt/formatting.c:2166 #, c-format msgid "conflicting values for \"%s\" field in formatting string" msgstr "widersprüchliche Werte für das Feld »%s« in Formatzeichenkette" -#: utils/adt/formatting.c:2121 +#: utils/adt/formatting.c:2168 #, c-format msgid "This value contradicts a previous setting for the same field type." msgstr "Der Wert widerspricht einer vorherigen Einstellung für den selben Feldtyp." -#: utils/adt/formatting.c:2182 +#: utils/adt/formatting.c:2229 #, c-format msgid "source string too short for \"%s\" formatting field" msgstr "Quellzeichenkette zu kurz für Formatfeld »%s»" -#: utils/adt/formatting.c:2184 +#: utils/adt/formatting.c:2231 #, c-format msgid "Field requires %d characters, but only %d remain." msgstr "Feld benötigt %d Zeichen, aber nur %d verbleiben." -#: utils/adt/formatting.c:2187 utils/adt/formatting.c:2201 +#: utils/adt/formatting.c:2234 utils/adt/formatting.c:2248 #, c-format msgid "If your source string is not fixed-width, try using the \"FM\" modifier." msgstr "Wenn die Quellzeichenkette keine feste Breite hat, versuchen Sie den Modifikator »FM«." -#: utils/adt/formatting.c:2197 utils/adt/formatting.c:2210 -#: utils/adt/formatting.c:2340 +#: utils/adt/formatting.c:2244 utils/adt/formatting.c:2257 +#: utils/adt/formatting.c:2387 #, c-format msgid "invalid value \"%s\" for \"%s\"" msgstr "ungültiger Wert »%s« für »%s«" -#: utils/adt/formatting.c:2199 +#: utils/adt/formatting.c:2246 #, c-format msgid "Field requires %d characters, but only %d could be parsed." msgstr "Feld benötigt %d Zeichen, aber nur %d konnten geparst werden." -#: utils/adt/formatting.c:2212 +#: utils/adt/formatting.c:2259 #, c-format msgid "Value must be an integer." msgstr "Der Wert muss eine ganze Zahl sein." -#: utils/adt/formatting.c:2217 +#: utils/adt/formatting.c:2264 #, c-format msgid "value for \"%s\" in source string is out of range" msgstr "Wert für »%s« in der Eingabezeichenkette ist außerhalb des gültigen Bereichs" -#: utils/adt/formatting.c:2219 +#: utils/adt/formatting.c:2266 #, c-format msgid "Value must be in the range %d to %d." msgstr "Der Wert muss im Bereich %d bis %d sein." -#: utils/adt/formatting.c:2342 +#: utils/adt/formatting.c:2389 #, c-format msgid "The given value did not match any of the allowed values for this field." msgstr "Der angegebene Wert stimmte mit keinem der für dieses Feld zulässigen Werte überein." -#: utils/adt/formatting.c:2527 utils/adt/formatting.c:2547 -#: utils/adt/formatting.c:2567 utils/adt/formatting.c:2587 -#: utils/adt/formatting.c:2606 utils/adt/formatting.c:2625 -#: utils/adt/formatting.c:2649 utils/adt/formatting.c:2667 -#: utils/adt/formatting.c:2685 utils/adt/formatting.c:2703 -#: utils/adt/formatting.c:2720 utils/adt/formatting.c:2737 +#: utils/adt/formatting.c:2587 utils/adt/formatting.c:2607 +#: utils/adt/formatting.c:2627 utils/adt/formatting.c:2647 +#: utils/adt/formatting.c:2666 utils/adt/formatting.c:2685 +#: utils/adt/formatting.c:2709 utils/adt/formatting.c:2727 +#: utils/adt/formatting.c:2745 utils/adt/formatting.c:2763 +#: utils/adt/formatting.c:2780 utils/adt/formatting.c:2797 #, c-format msgid "localized string format value too long" msgstr "lokalisierter Formatwert ist zu lang" -#: utils/adt/formatting.c:3024 -#, fuzzy, c-format -#| msgid "replication slot file \"%s\" has unsupported version %u" +#: utils/adt/formatting.c:3084 +#, c-format msgid "formatting field \"%s\" is only supported in to_char" -msgstr "Replikations-Slot-Datei »%s« hat nicht unterstützte Version %u" +msgstr "Formatfeld »%s« wird nur in to_char unterstützt" -#: utils/adt/formatting.c:3135 +#: utils/adt/formatting.c:3209 #, c-format msgid "invalid input string for \"Y,YYY\"" msgstr "ungültige Eingabe für »Y,YYY«" -#: utils/adt/formatting.c:3641 +#: utils/adt/formatting.c:3724 #, c-format msgid "hour \"%d\" is invalid for the 12-hour clock" msgstr "Stunde »%d« ist bei einer 12-Stunden-Uhr ungültig" -#: utils/adt/formatting.c:3643 +#: utils/adt/formatting.c:3726 #, c-format msgid "Use the 24-hour clock, or give an hour between 1 and 12." msgstr "Verwenden Sie die 24-Stunden-Uhr oder geben Sie eine Stunde zwischen 1 und 12 an." -#: utils/adt/formatting.c:3749 +#: utils/adt/formatting.c:3832 #, c-format msgid "cannot calculate day of year without year information" msgstr "kann Tag des Jahres nicht berechnen ohne Jahrinformationen" -#: utils/adt/formatting.c:4616 +#: utils/adt/formatting.c:4737 #, c-format msgid "\"EEEE\" not supported for input" msgstr "»E« wird nicht bei der Eingabe unterstützt" -#: utils/adt/formatting.c:4628 +#: utils/adt/formatting.c:4749 #, c-format msgid "\"RN\" not supported for input" msgstr "»RN« wird nicht bei der Eingabe unterstützt" -#: utils/adt/genfile.c:62 +#: utils/adt/genfile.c:79 #, c-format msgid "reference to parent directory (\"..\") not allowed" msgstr "Verweis auf übergeordnetes Verzeichnis (»..«) nicht erlaubt" -#: utils/adt/genfile.c:73 +#: utils/adt/genfile.c:90 #, c-format msgid "absolute path not allowed" msgstr "absoluter Pfad nicht erlaubt" -#: utils/adt/genfile.c:78 +#: utils/adt/genfile.c:95 #, c-format msgid "path must be in or below the current directory" msgstr "Pfad muss in oder unter aktuellem Verzeichnis sein" -#: utils/adt/genfile.c:125 utils/adt/oracle_compat.c:184 -#: utils/adt/oracle_compat.c:282 utils/adt/oracle_compat.c:758 -#: utils/adt/oracle_compat.c:1059 +#: utils/adt/genfile.c:142 utils/adt/oracle_compat.c:185 +#: utils/adt/oracle_compat.c:283 utils/adt/oracle_compat.c:759 +#: utils/adt/oracle_compat.c:1054 #, c-format msgid "requested length too large" msgstr "verlangte Länge zu groß" -#: utils/adt/genfile.c:142 +#: utils/adt/genfile.c:159 #, c-format msgid "could not seek in file \"%s\": %m" msgstr "konnte Positionszeiger in Datei »%s« nicht setzen: %m" -#: utils/adt/genfile.c:200 utils/adt/genfile.c:241 -#, c-format -msgid "must be superuser to read files" +#: utils/adt/genfile.c:219 +#, fuzzy, c-format +#| msgid "must be superuser to read files" +msgid "must be superuser to read files with adminpack 1.0" msgstr "nur Superuser können Dateien lesen" -#: utils/adt/genfile.c:318 -#, c-format -msgid "must be superuser to get file information" -msgstr "nur Superuser können Dateiinformationen lesen" - -#: utils/adt/genfile.c:404 -#, c-format -msgid "must be superuser to get directory listings" -msgstr "nur Superuser können Verzeichnislisten lesen" +#: utils/adt/genfile.c:220 +#, fuzzy, c-format +#| msgid "Consider using tablespaces instead." +msgid "Consider using pg_file_read(), which is part of core, instead." +msgstr "Verwenden Sie stattdessen Tablespaces." -#: utils/adt/geo_ops.c:940 +#: utils/adt/geo_ops.c:939 #, c-format msgid "invalid line specification: A and B cannot both be zero" msgstr "ungültige »line«-Angabe: A und B können nicht beide null sein" -#: utils/adt/geo_ops.c:948 +#: utils/adt/geo_ops.c:947 #, c-format msgid "invalid line specification: must be two distinct points" msgstr "ungültige »line«-Angabe: es müssen zwei verschiedene Punkte angegeben werden" -#: utils/adt/geo_ops.c:1342 utils/adt/geo_ops.c:3440 utils/adt/geo_ops.c:4253 -#: utils/adt/geo_ops.c:5181 +#: utils/adt/geo_ops.c:1341 utils/adt/geo_ops.c:3439 utils/adt/geo_ops.c:4252 +#: utils/adt/geo_ops.c:5180 #, c-format msgid "too many points requested" msgstr "zu viele Punkte verlangt" -#: utils/adt/geo_ops.c:1404 +#: utils/adt/geo_ops.c:1403 #, c-format msgid "invalid number of points in external \"path\" value" msgstr "ungültige Anzahl Punkte in externem »path«-Wert" -#: utils/adt/geo_ops.c:2555 +#: utils/adt/geo_ops.c:2554 #, c-format msgid "function \"dist_lb\" not implemented" msgstr "Funktion »dist_lb« ist nicht implementiert" -#: utils/adt/geo_ops.c:3015 +#: utils/adt/geo_ops.c:3014 #, c-format msgid "function \"close_sl\" not implemented" msgstr "Funktion »close_sl« ist nicht implementiert" -#: utils/adt/geo_ops.c:3117 +#: utils/adt/geo_ops.c:3116 #, c-format msgid "function \"close_lb\" not implemented" msgstr "Funktion »close_lb« ist nicht implementiert" -#: utils/adt/geo_ops.c:3406 +#: utils/adt/geo_ops.c:3405 #, c-format msgid "cannot create bounding box for empty polygon" msgstr "kann kein umschließendes Rechteck für leeres Polygon berechnen" -#: utils/adt/geo_ops.c:3487 +#: utils/adt/geo_ops.c:3486 #, c-format msgid "invalid number of points in external \"polygon\" value" msgstr "ungültige Anzahl Punkte in externem »polygon«-Wert" -#: utils/adt/geo_ops.c:4012 +#: utils/adt/geo_ops.c:4011 #, c-format msgid "function \"poly_distance\" not implemented" msgstr "Funktion »poly_distance« ist nicht implementiert" -#: utils/adt/geo_ops.c:4365 +#: utils/adt/geo_ops.c:4364 #, c-format msgid "function \"path_center\" not implemented" msgstr "Funktion »path_center« ist nicht implementiert" -#: utils/adt/geo_ops.c:4382 +#: utils/adt/geo_ops.c:4381 #, c-format msgid "open path cannot be converted to polygon" msgstr "offener Pfad kann nicht in Polygon umgewandelt werden" -#: utils/adt/geo_ops.c:4631 +#: utils/adt/geo_ops.c:4630 #, c-format msgid "invalid radius in external \"circle\" value" msgstr "ungültiger Radius in externem »circle«-Wert" -#: utils/adt/geo_ops.c:5167 +#: utils/adt/geo_ops.c:5166 #, c-format msgid "cannot convert circle with radius zero to polygon" msgstr "kann Kreis mit Radius null nicht in Polygon umwandeln" -#: utils/adt/geo_ops.c:5172 +#: utils/adt/geo_ops.c:5171 #, c-format msgid "must request at least 2 points" msgstr "mindestens 2 Punkte müssen angefordert werden" -#: utils/adt/geo_ops.c:5216 +#: utils/adt/geo_ops.c:5215 #, c-format msgid "cannot convert empty polygon to circle" msgstr "kann leeres Polygon nicht in Kreis umwandeln" -#: utils/adt/int.c:162 +#: utils/adt/int.c:160 #, c-format msgid "int2vector has too many elements" msgstr "int2vector-Wert hat zu viele Elemente" -#: utils/adt/int.c:237 +#: utils/adt/int.c:235 #, c-format msgid "invalid int2vector data" msgstr "ungültige int2vector-Daten" -#: utils/adt/int.c:243 utils/adt/oid.c:215 utils/adt/oid.c:296 +#: utils/adt/int.c:241 utils/adt/oid.c:215 utils/adt/oid.c:296 #, c-format msgid "oidvector has too many elements" msgstr "oidvector-Wert hat zu viele Elemente" -#: utils/adt/int.c:1347 utils/adt/int8.c:1460 utils/adt/numeric.c:1401 -#: utils/adt/timestamp.c:5148 utils/adt/timestamp.c:5229 +#: utils/adt/int.c:1379 utils/adt/int8.c:1309 utils/adt/numeric.c:1412 +#: utils/adt/timestamp.c:5318 utils/adt/timestamp.c:5399 #, c-format msgid "step size cannot equal zero" msgstr "Schrittgröße kann nicht gleich null sein" -#: utils/adt/int8.c:98 utils/adt/int8.c:133 utils/adt/numutils.c:51 -#: utils/adt/numutils.c:61 utils/adt/numutils.c:105 -#, fuzzy, c-format -#| msgid "invalid input syntax for type %s: \"%s\"" -msgid "invalid input syntax for %s: \"%s\"" -msgstr "ungültige Eingabesyntax für Typ %s: »%s«" - -#: utils/adt/int8.c:500 utils/adt/int8.c:529 utils/adt/int8.c:550 -#: utils/adt/int8.c:581 utils/adt/int8.c:615 utils/adt/int8.c:640 -#: utils/adt/int8.c:697 utils/adt/int8.c:714 utils/adt/int8.c:741 -#: utils/adt/int8.c:758 utils/adt/int8.c:834 utils/adt/int8.c:855 -#: utils/adt/int8.c:882 utils/adt/int8.c:915 utils/adt/int8.c:943 -#: utils/adt/int8.c:964 utils/adt/int8.c:991 utils/adt/int8.c:1031 -#: utils/adt/int8.c:1052 utils/adt/int8.c:1079 utils/adt/int8.c:1112 -#: utils/adt/int8.c:1140 utils/adt/int8.c:1161 utils/adt/int8.c:1188 -#: utils/adt/int8.c:1361 utils/adt/int8.c:1400 utils/adt/numeric.c:3005 +#: utils/adt/int8.c:125 utils/adt/numutils.c:51 utils/adt/numutils.c:61 +#: utils/adt/numutils.c:105 +#, c-format +msgid "invalid input syntax for integer: \"%s\"" +msgstr "ungültige Eingabesyntax für ganze Zahl: »%s«" + +#: utils/adt/int8.c:526 utils/adt/int8.c:549 utils/adt/int8.c:563 +#: utils/adt/int8.c:577 utils/adt/int8.c:608 utils/adt/int8.c:632 +#: utils/adt/int8.c:687 utils/adt/int8.c:701 utils/adt/int8.c:725 +#: utils/adt/int8.c:738 utils/adt/int8.c:807 utils/adt/int8.c:821 +#: utils/adt/int8.c:835 utils/adt/int8.c:866 utils/adt/int8.c:888 +#: utils/adt/int8.c:902 utils/adt/int8.c:916 utils/adt/int8.c:949 +#: utils/adt/int8.c:963 utils/adt/int8.c:977 utils/adt/int8.c:1008 +#: utils/adt/int8.c:1030 utils/adt/int8.c:1044 utils/adt/int8.c:1058 +#: utils/adt/int8.c:1218 utils/adt/int8.c:1253 utils/adt/numeric.c:3169 #: utils/adt/varbit.c:1655 #, c-format msgid "bigint out of range" msgstr "bigint ist außerhalb des gültigen Bereichs" -#: utils/adt/int8.c:1417 +#: utils/adt/int8.c:1266 #, c-format msgid "OID out of range" msgstr "OID ist außerhalb des gültigen Bereichs" -#: utils/adt/json.c:786 +#: utils/adt/json.c:787 #, c-format msgid "Character with value 0x%02x must be escaped." msgstr "Zeichen mit Wert 0x%02x muss escapt werden." -#: utils/adt/json.c:827 +#: utils/adt/json.c:828 #, c-format msgid "\"\\u\" must be followed by four hexadecimal digits." msgstr "Nach »\\u« müssen vier Hexadezimalziffern folgen." -#: utils/adt/json.c:843 +#: utils/adt/json.c:844 #, c-format msgid "Unicode high surrogate must not follow a high surrogate." msgstr "Unicode-High-Surrogate darf nicht auf ein High-Surrogate folgen." -#: utils/adt/json.c:854 utils/adt/json.c:864 utils/adt/json.c:916 -#: utils/adt/json.c:978 utils/adt/json.c:990 +#: utils/adt/json.c:855 utils/adt/json.c:865 utils/adt/json.c:917 +#: utils/adt/json.c:979 utils/adt/json.c:991 #, c-format msgid "Unicode low surrogate must follow a high surrogate." msgstr "Unicode-Low-Surrogate muss auf ein High-Surrogate folgen." -#: utils/adt/json.c:879 utils/adt/json.c:902 +#: utils/adt/json.c:880 utils/adt/json.c:903 #, c-format msgid "unsupported Unicode escape sequence" msgstr "nicht unterstützte Unicode-Escape-Sequenz" -#: utils/adt/json.c:880 +#: utils/adt/json.c:881 #, c-format msgid "\\u0000 cannot be converted to text." msgstr "\\u0000 kann nicht in »text« umgewandelt werden." -#: utils/adt/json.c:903 +#: utils/adt/json.c:904 #, c-format msgid "Unicode escape values cannot be used for code point values above 007F when the server encoding is not UTF8." msgstr "Unicode-Escape-Werte können nicht für Code-Punkt-Werte über 007F verwendet werden, wenn die Serverkodierung nicht UTF8 ist." -#: utils/adt/json.c:948 utils/adt/json.c:966 +#: utils/adt/json.c:949 utils/adt/json.c:967 #, c-format msgid "Escape sequence \"\\%s\" is invalid." msgstr "Escape-Sequenz »\\%s« ist nicht gültig." -#: utils/adt/json.c:1135 +#: utils/adt/json.c:1136 #, c-format msgid "The input string ended unexpectedly." msgstr "Die Eingabezeichenkette endete unerwartet." -#: utils/adt/json.c:1149 +#: utils/adt/json.c:1150 #, c-format msgid "Expected end of input, but found \"%s\"." msgstr "Ende der Eingabe erwartet, aber »%s« gefunden." -#: utils/adt/json.c:1160 +#: utils/adt/json.c:1161 #, c-format msgid "Expected JSON value, but found \"%s\"." msgstr "JSON-Wert erwartet, aber »%s« gefunden." -#: utils/adt/json.c:1168 utils/adt/json.c:1216 +#: utils/adt/json.c:1169 utils/adt/json.c:1217 #, c-format msgid "Expected string, but found \"%s\"." msgstr "Zeichenkette erwartet, aber »%s« gefunden." -#: utils/adt/json.c:1176 +#: utils/adt/json.c:1177 #, c-format msgid "Expected array element or \"]\", but found \"%s\"." msgstr "Array-Element oder »]« erwartet, aber »%s« gefunden." -#: utils/adt/json.c:1184 +#: utils/adt/json.c:1185 #, c-format msgid "Expected \",\" or \"]\", but found \"%s\"." msgstr "»,« oder »]« erwartet, aber »%s« gefunden." -#: utils/adt/json.c:1192 +#: utils/adt/json.c:1193 #, c-format msgid "Expected string or \"}\", but found \"%s\"." msgstr "Zeichenkette oder »}« erwartet, aber »%s« gefunden." -#: utils/adt/json.c:1200 +#: utils/adt/json.c:1201 #, c-format msgid "Expected \":\", but found \"%s\"." msgstr "»:« erwartet, aber »%s« gefunden." -#: utils/adt/json.c:1208 +#: utils/adt/json.c:1209 #, c-format msgid "Expected \",\" or \"}\", but found \"%s\"." msgstr "»,« oder »}« erwartet, aber »%s« gefunden." -#: utils/adt/json.c:1246 +#: utils/adt/json.c:1247 #, c-format msgid "Token \"%s\" is invalid." msgstr "Token »%s« ist ungültig." -#: utils/adt/json.c:1318 +#: utils/adt/json.c:1319 #, c-format msgid "JSON data, line %d: %s%s%s" msgstr "JSON-Daten, Zeile %d: %s%s%s" -#: utils/adt/json.c:1474 utils/adt/jsonb.c:725 +#: utils/adt/json.c:1475 utils/adt/jsonb.c:728 #, c-format msgid "key value must be scalar, not array, composite, or json" msgstr "Schlüsselwert muss skalar sein, nicht Array, zusammengesetzt oder json" -#: utils/adt/json.c:2011 -#, c-format -msgid "could not determine data type for argument 1" -msgstr "konnte Datentyp von Argument 1 nicht ermitteln" - -#: utils/adt/json.c:2021 +#: utils/adt/json.c:2076 utils/adt/json.c:2086 utils/fmgr/funcapi.c:1564 #, c-format -msgid "could not determine data type for argument 2" -msgstr "konnte Datentyp von Argument 2 nicht ermitteln" +msgid "could not determine data type for argument %d" +msgstr "konnte Datentyp von Argument %d nicht ermitteln" -#: utils/adt/json.c:2045 utils/adt/jsonb.c:1782 +#: utils/adt/json.c:2110 utils/adt/jsonb.c:1694 #, c-format msgid "field name must not be null" msgstr "Feldname darf nicht NULL sein" -#: utils/adt/json.c:2122 +#: utils/adt/json.c:2194 utils/adt/jsonb.c:1146 #, c-format msgid "argument list must have even number of elements" msgstr "Argumentliste muss gerade Anzahl Elemente haben" -#: utils/adt/json.c:2123 +#: utils/adt/json.c:2195 #, c-format msgid "The arguments of json_build_object() must consist of alternating keys and values." msgstr "Die Argumente von json_build_object() müssen abwechselnd Schlüssel und Werte sein." -#: utils/adt/json.c:2147 utils/adt/json.c:2168 utils/adt/json.c:2227 -#, c-format -msgid "could not determine data type for argument %d" -msgstr "konnte Datentyp von Argument %d nicht ermitteln" - -#: utils/adt/json.c:2153 +#: utils/adt/json.c:2210 #, c-format msgid "argument %d cannot be null" msgstr "Argument %d darf nicht NULL sein" -#: utils/adt/json.c:2154 +#: utils/adt/json.c:2211 #, c-format msgid "Object keys should be text." msgstr "Objektschlüssel sollten Text sein." -#: utils/adt/json.c:2289 utils/adt/jsonb.c:1364 +#: utils/adt/json.c:2317 utils/adt/jsonb.c:1276 #, c-format msgid "array must have two columns" msgstr "Array muss zwei Spalten haben" -#: utils/adt/json.c:2313 utils/adt/json.c:2397 utils/adt/jsonb.c:1388 -#: utils/adt/jsonb.c:1483 +#: utils/adt/json.c:2341 utils/adt/json.c:2425 utils/adt/jsonb.c:1300 +#: utils/adt/jsonb.c:1395 #, c-format msgid "null value not allowed for object key" msgstr "NULL-Werte sind nicht als Objektschlüssel erlaubt" -#: utils/adt/json.c:2386 utils/adt/jsonb.c:1472 +#: utils/adt/json.c:2414 utils/adt/jsonb.c:1384 #, c-format msgid "mismatched array dimensions" msgstr "Array-Dimensionen passen nicht" -#: utils/adt/jsonb.c:257 +#: utils/adt/jsonb.c:258 #, c-format msgid "string too long to represent as jsonb string" msgstr "Zeichenkette ist zu lang für jsonb" -#: utils/adt/jsonb.c:258 +#: utils/adt/jsonb.c:259 #, c-format msgid "Due to an implementation restriction, jsonb strings cannot exceed %d bytes." msgstr "Aufgrund einer Einschränkung der Implementierung können jsonb-Zeichenketten nicht länger als %d Bytes sein." -#: utils/adt/jsonb.c:1183 +#: utils/adt/jsonb.c:1147 #, c-format -msgid "invalid number of arguments: object must be matched key value pairs" -msgstr "ungültige Anzahl Argumente: Objekt muss aus Schlüssel-Wert-Paaren bestehen" +msgid "The arguments of jsonb_build_object() must consist of alternating keys and values." +msgstr "Die Argumente von jsonb_build_object() müssen abwechselnd Schlüssel und Werte sein." -#: utils/adt/jsonb.c:1196 +#: utils/adt/jsonb.c:1159 #, c-format msgid "argument %d: key must not be null" msgstr "Argument %d: Schlüssel darf nicht NULL sein" -#: utils/adt/jsonb.c:1215 utils/adt/jsonb.c:1238 utils/adt/jsonb.c:1298 -#, c-format -msgid "argument %d: could not determine data type" -msgstr "Argument %d: konnte Datentypen nicht bestimmen" - -#: utils/adt/jsonb.c:1835 +#: utils/adt/jsonb.c:1747 #, c-format msgid "object keys must be strings" msgstr "Objektschlüssel müssen Zeichenketten sein" +#: utils/adt/jsonb.c:1910 +#, fuzzy, c-format +#| msgid "cannot accept a value of type %s" +msgid "cannot cast jsonb null to type %s" +msgstr "kann keinen Wert vom Typ %s annehmen" + +#: utils/adt/jsonb.c:1911 +#, fuzzy, c-format +#| msgid "cannot cast type %s to %s" +msgid "cannot cast jsonb string to type %s" +msgstr "kann Typ %s nicht in Typ %s umwandeln" + +#: utils/adt/jsonb.c:1912 +#, fuzzy, c-format +#| msgid "cannot accept a value of type %s" +msgid "cannot cast jsonb numeric to type %s" +msgstr "kann keinen Wert vom Typ %s annehmen" + +#: utils/adt/jsonb.c:1913 +#, fuzzy, c-format +#| msgid "cannot accept a value of type %s" +msgid "cannot cast jsonb boolean to type %s" +msgstr "kann keinen Wert vom Typ %s annehmen" + +#: utils/adt/jsonb.c:1914 +#, fuzzy, c-format +#| msgid "cannot alter array type %s" +msgid "cannot cast jsonb array to type %s" +msgstr "Array-Typ %s kann nicht verändert werden" + +#: utils/adt/jsonb.c:1915 +#, fuzzy, c-format +#| msgid "cannot accept a value of type %s" +msgid "cannot cast jsonb object to type %s" +msgstr "kann keinen Wert vom Typ %s annehmen" + +#: utils/adt/jsonb.c:1916 +#, fuzzy, c-format +#| msgid "cannot alter array type %s" +msgid "cannot cast jsonb array or object to type %s" +msgstr "Array-Typ %s kann nicht verändert werden" + #: utils/adt/jsonb_util.c:657 #, c-format msgid "number of jsonb object pairs exceeds the maximum allowed (%zu)" @@ -20315,148 +21745,195 @@ msgstr "Anzahl der jsonb-Objekte-Paare überschreitet erlaubtes Maximum (%zu)" msgid "number of jsonb array elements exceeds the maximum allowed (%zu)" msgstr "Anzahl der jsonb-Arrayelemente überschreitet erlaubtes Maximum (%zu)" -#: utils/adt/jsonb_util.c:1526 utils/adt/jsonb_util.c:1546 +#: utils/adt/jsonb_util.c:1569 utils/adt/jsonb_util.c:1589 #, c-format msgid "total size of jsonb array elements exceeds the maximum of %u bytes" msgstr "Gesamtgröße der jsonb-Array-Elemente überschreitet die maximale Größe von %u Bytes" -#: utils/adt/jsonb_util.c:1607 utils/adt/jsonb_util.c:1642 -#: utils/adt/jsonb_util.c:1662 +#: utils/adt/jsonb_util.c:1650 utils/adt/jsonb_util.c:1685 +#: utils/adt/jsonb_util.c:1705 #, c-format msgid "total size of jsonb object elements exceeds the maximum of %u bytes" msgstr "Gesamtgröße der jsonb-Objektelemente überschreitet die maximale Größe von %u Bytes" -#: utils/adt/jsonfuncs.c:306 utils/adt/jsonfuncs.c:471 -#: utils/adt/jsonfuncs.c:2058 utils/adt/jsonfuncs.c:2499 -#: utils/adt/jsonfuncs.c:3005 +#: utils/adt/jsonfuncs.c:523 utils/adt/jsonfuncs.c:688 +#: utils/adt/jsonfuncs.c:2276 utils/adt/jsonfuncs.c:2712 +#: utils/adt/jsonfuncs.c:3468 utils/adt/jsonfuncs.c:3812 #, c-format msgid "cannot call %s on a scalar" msgstr "%s kann nicht mit einem skalaren Wert aufgerufen werden" -#: utils/adt/jsonfuncs.c:311 utils/adt/jsonfuncs.c:458 -#: utils/adt/jsonfuncs.c:2488 +#: utils/adt/jsonfuncs.c:528 utils/adt/jsonfuncs.c:675 +#: utils/adt/jsonfuncs.c:2714 utils/adt/jsonfuncs.c:3457 #, c-format msgid "cannot call %s on an array" msgstr "%s kann nicht mit einem Array aufgerufen werden" -#: utils/adt/jsonfuncs.c:1374 utils/adt/jsonfuncs.c:1409 +#: utils/adt/jsonfuncs.c:1591 utils/adt/jsonfuncs.c:1626 #, c-format msgid "cannot get array length of a scalar" msgstr "kann nicht die Arraylänge eines skalaren Wertes ermitteln" -#: utils/adt/jsonfuncs.c:1378 utils/adt/jsonfuncs.c:1397 +#: utils/adt/jsonfuncs.c:1595 utils/adt/jsonfuncs.c:1614 #, c-format msgid "cannot get array length of a non-array" msgstr "kann nicht die Arraylänge eines Nicht-Arrays ermitteln" -#: utils/adt/jsonfuncs.c:1474 +#: utils/adt/jsonfuncs.c:1691 #, c-format msgid "cannot call %s on a non-object" msgstr "%s kann nicht mit etwas aufgerufen werden, das kein Objekt ist" -#: utils/adt/jsonfuncs.c:1492 utils/adt/jsonfuncs.c:2171 -#: utils/adt/jsonfuncs.c:2708 +#: utils/adt/jsonfuncs.c:1709 utils/adt/jsonfuncs.c:3261 +#: utils/adt/jsonfuncs.c:3612 #, c-format msgid "function returning record called in context that cannot accept type record" msgstr "Funktion, die einen Record zurückgibt, in einem Zusammenhang aufgerufen, der Typ record nicht verarbeiten kann" -#: utils/adt/jsonfuncs.c:1731 +#: utils/adt/jsonfuncs.c:1949 #, c-format msgid "cannot deconstruct an array as an object" msgstr "kann Array nicht in ein Objekt zerlegen" -#: utils/adt/jsonfuncs.c:1743 +#: utils/adt/jsonfuncs.c:1961 #, c-format msgid "cannot deconstruct a scalar" msgstr "kann skalaren Wert nicht zerlegen" -#: utils/adt/jsonfuncs.c:1789 +#: utils/adt/jsonfuncs.c:2007 #, c-format msgid "cannot extract elements from a scalar" msgstr "kann keine Elemente aus einem skalaren Wert auswählen" -#: utils/adt/jsonfuncs.c:1793 +#: utils/adt/jsonfuncs.c:2011 #, c-format msgid "cannot extract elements from an object" msgstr "kann keine Elemente aus einem Objekt auswählen" -#: utils/adt/jsonfuncs.c:2045 utils/adt/jsonfuncs.c:2804 +#: utils/adt/jsonfuncs.c:2263 utils/adt/jsonfuncs.c:3701 #, c-format msgid "cannot call %s on a non-array" msgstr "%s kann nicht mit etwas aufgerufen werden, das kein Array ist" -#: utils/adt/jsonfuncs.c:2132 utils/adt/jsonfuncs.c:2684 +#: utils/adt/jsonfuncs.c:2329 utils/adt/jsonfuncs.c:2334 +#: utils/adt/jsonfuncs.c:2351 utils/adt/jsonfuncs.c:2357 +#, c-format +msgid "expected JSON array" +msgstr "JSON-Array wurde erwartet" + +#: utils/adt/jsonfuncs.c:2330 +#, c-format +msgid "See the value of key \"%s\"." +msgstr "Prüfen Sie den Wert des Schlüssels »%s«." + +#: utils/adt/jsonfuncs.c:2352 +#, c-format +msgid "See the array element %s of key \"%s\"." +msgstr "Prüfen Sie das Arrayelement %s des Schlüssels »%s«." + +#: utils/adt/jsonfuncs.c:2358 +#, c-format +msgid "See the array element %s." +msgstr "Prüfen Sie das Arrayelement %s." + +#: utils/adt/jsonfuncs.c:2393 +#, c-format +msgid "malformed JSON array" +msgstr "fehlerhaftes JSON-Array" + +#: utils/adt/jsonfuncs.c:3245 utils/adt/jsonfuncs.c:3597 #, c-format msgid "first argument of %s must be a row type" msgstr "erstes Argument von %s muss ein Zeilentyp sein" -#: utils/adt/jsonfuncs.c:2173 +#: utils/adt/jsonfuncs.c:3263 utils/adt/jsonfuncs.c:3614 #, c-format msgid "Try calling the function in the FROM clause using a column definition list." msgstr "Versuchen Sie, die Funktion in der FROM-Klausel mit einer Spaltendefinitionsliste aufzurufen." -#: utils/adt/jsonfuncs.c:2820 utils/adt/jsonfuncs.c:2987 +#: utils/adt/jsonfuncs.c:3718 utils/adt/jsonfuncs.c:3794 #, c-format msgid "argument of %s must be an array of objects" msgstr "Argument von %s muss ein Array von Objekten sein" -#: utils/adt/jsonfuncs.c:2844 +#: utils/adt/jsonfuncs.c:3746 #, c-format msgid "cannot call %s on an object" msgstr "%s kann nicht mit einem Objekt aufgerufen werden" -#: utils/adt/jsonfuncs.c:3411 utils/adt/jsonfuncs.c:3470 -#: utils/adt/jsonfuncs.c:3550 +#: utils/adt/jsonfuncs.c:4223 utils/adt/jsonfuncs.c:4282 +#: utils/adt/jsonfuncs.c:4362 #, c-format msgid "cannot delete from scalar" msgstr "kann nicht aus skalarem Wert löschen" -#: utils/adt/jsonfuncs.c:3555 +#: utils/adt/jsonfuncs.c:4367 #, c-format msgid "cannot delete from object using integer index" msgstr "aus einem Objekt kann nicht per numerischem Index gelöscht werden" -#: utils/adt/jsonfuncs.c:3621 utils/adt/jsonfuncs.c:3713 +#: utils/adt/jsonfuncs.c:4433 utils/adt/jsonfuncs.c:4525 #, c-format msgid "cannot set path in scalar" msgstr "in einem skalaren Wert kann kein Pfad gesetzt werden" -#: utils/adt/jsonfuncs.c:3666 +#: utils/adt/jsonfuncs.c:4478 #, c-format msgid "cannot delete path in scalar" msgstr "in einem skalaren Wert kann kein Pfad gelöscht werden" -#: utils/adt/jsonfuncs.c:3836 +#: utils/adt/jsonfuncs.c:4648 #, c-format msgid "invalid concatenation of jsonb objects" msgstr "ungültiges Aneinanderhängen von jsonb-Objekten" -#: utils/adt/jsonfuncs.c:3870 +#: utils/adt/jsonfuncs.c:4682 #, c-format msgid "path element at position %d is null" msgstr "Pfadelement auf Position %d ist NULL" -#: utils/adt/jsonfuncs.c:3956 +#: utils/adt/jsonfuncs.c:4768 #, c-format msgid "cannot replace existing key" msgstr "existierender Schlüssel kann nicht ersetzt werden" -#: utils/adt/jsonfuncs.c:3957 +#: utils/adt/jsonfuncs.c:4769 #, c-format msgid "Try using the function jsonb_set to replace key value." msgstr "Verwenden Sie die Funktion jsonb_set, um den Schlüsselwert zu ersetzen." -#: utils/adt/jsonfuncs.c:4039 +#: utils/adt/jsonfuncs.c:4851 #, c-format msgid "path element at position %d is not an integer: \"%s\"" msgstr "Pfadelement auf Position %d ist keine ganze Zahl: »%s«" +#: utils/adt/jsonfuncs.c:4970 +#, c-format +msgid "wrong flag type, only arrays and scalars are allowed" +msgstr "" + +#: utils/adt/jsonfuncs.c:4977 +#, fuzzy, c-format +#| msgid "array element type cannot be %s" +msgid "flag array element is not a string" +msgstr "Arrayelementtyp kann nicht %s sein" + +#: utils/adt/jsonfuncs.c:4978 utils/adt/jsonfuncs.c:5000 +#, c-format +msgid "Possible values are: \"string\", \"numeric\", \"boolean\", \"key\" and \"all\"" +msgstr "" + +#: utils/adt/jsonfuncs.c:4998 +#, c-format +msgid "wrong flag in flag array: \"%s\"" +msgstr "" + #: utils/adt/levenshtein.c:133 #, c-format msgid "levenshtein argument exceeds maximum length of %d characters" msgstr "Levenshtein-Argument überschreitet die maximale Länge von %d Zeichen" -#: utils/adt/like.c:212 utils/adt/selfuncs.c:5331 +#: utils/adt/like.c:183 utils/adt/selfuncs.c:5806 #, c-format msgid "could not determine which collation to use for ILIKE" msgstr "konnte die für ILIKE zu verwendende Sortierfolge nicht bestimmen" @@ -20476,113 +21953,134 @@ msgstr "ungültige ESCAPE-Zeichenkette" msgid "Escape string must be empty or one character." msgstr "ESCAPE-Zeichenkette muss null oder ein Zeichen lang sein." -#: utils/adt/lockfuncs.c:545 +#: utils/adt/lockfuncs.c:664 #, c-format msgid "cannot use advisory locks during a parallel operation" msgstr "während einer parallelen Operation können keine Benutzersperren verwendet werden" -#: utils/adt/mac.c:76 +#: utils/adt/mac.c:102 #, c-format msgid "invalid octet value in \"macaddr\" value: \"%s\"" msgstr "ungültiger Oktettwert in »macaddr«-Wert: »%s«" -#: utils/adt/misc.c:238 +#: utils/adt/mac8.c:563 +#, c-format +msgid "macaddr8 data out of range to convert to macaddr" +msgstr "macaddr8-Daten außerhalb des gültigen Bereichs für Umwandlung in macaddr" + +#: utils/adt/mac8.c:564 +#, c-format +msgid "Only addresses that have FF and FE as values in the 4th and 5th bytes from the left, for example xx:xx:xx:ff:fe:xx:xx:xx, are eligible to be converted from macaddr8 to macaddr." +msgstr "Nur Adressen, die FF und FE als Werte im 4. und 5. Byte von links haben, zum Beispiel xx:xx:xx:ff:fe:xx:xx:xx, kommen für eine Umwandlung von macaddr8 nach macaddr in Frage." + +#: utils/adt/misc.c:239 #, c-format msgid "PID %d is not a PostgreSQL server process" msgstr "PID %d ist kein PostgreSQL-Serverprozess" -#: utils/adt/misc.c:289 +#: utils/adt/misc.c:290 #, c-format msgid "must be a superuser to cancel superuser query" msgstr "nur Superuser können Anfragen eines Superusers stornieren" -#: utils/adt/misc.c:294 +#: utils/adt/misc.c:295 #, c-format msgid "must be a member of the role whose query is being canceled or member of pg_signal_backend" msgstr "muss Mitglied der Rolle sein, deren Anfrage storniert wird, oder Mitglied von pg_signal_backend" -#: utils/adt/misc.c:313 +#: utils/adt/misc.c:314 #, c-format msgid "must be a superuser to terminate superuser process" msgstr "nur Superuser können Prozesse eines Superusers beenden" -#: utils/adt/misc.c:318 +#: utils/adt/misc.c:319 #, c-format msgid "must be a member of the role whose process is being terminated or member of pg_signal_backend" msgstr "muss Mitglied der Rolle sein, deren Prozess beendet wird, oder Mitglied von pg_signal_backend" -#: utils/adt/misc.c:335 +#: utils/adt/misc.c:336 #, c-format msgid "failed to send signal to postmaster: %m" msgstr "konnte Signal nicht an Postmaster senden: %m" #: utils/adt/misc.c:355 +#, fuzzy, c-format +#| msgid "Must be superuser to create a tablespace." +msgid "must be superuser to rotate log files with adminpack 1.0" +msgstr "Nur Superuser können Tablespaces anlegen." + +#: utils/adt/misc.c:356 +#, fuzzy, c-format +#| msgid "Consider using tablespaces instead." +msgid "Consider using pg_logfile_rotate(), which is part of core, instead." +msgstr "Verwenden Sie stattdessen Tablespaces." + +#: utils/adt/misc.c:361 utils/adt/misc.c:381 #, c-format msgid "rotation not possible because log collection not active" msgstr "Rotierung nicht möglich, weil Logsammlung nicht aktiv ist" -#: utils/adt/misc.c:392 +#: utils/adt/misc.c:418 #, c-format msgid "global tablespace never has databases" msgstr "globaler Tablespace hat niemals Datenbanken" -#: utils/adt/misc.c:413 +#: utils/adt/misc.c:439 #, c-format msgid "%u is not a tablespace OID" msgstr "%u ist keine Tablespace-OID" -#: utils/adt/misc.c:606 +#: utils/adt/misc.c:626 msgid "unreserved" msgstr "unreserviert" -#: utils/adt/misc.c:610 +#: utils/adt/misc.c:630 msgid "unreserved (cannot be function or type name)" msgstr "unreserviert (kann nicht Funktions- oder Typname sein)" -#: utils/adt/misc.c:614 +#: utils/adt/misc.c:634 msgid "reserved (can be function or type name)" msgstr "reserviert (kann Funktions- oder Typname sein)" -#: utils/adt/misc.c:618 +#: utils/adt/misc.c:638 msgid "reserved" msgstr "reserviert" -#: utils/adt/misc.c:792 utils/adt/misc.c:806 utils/adt/misc.c:845 -#: utils/adt/misc.c:851 utils/adt/misc.c:857 utils/adt/misc.c:880 +#: utils/adt/misc.c:812 utils/adt/misc.c:826 utils/adt/misc.c:865 +#: utils/adt/misc.c:871 utils/adt/misc.c:877 utils/adt/misc.c:900 #, c-format msgid "string is not a valid identifier: \"%s\"" msgstr "Zeichenkette ist kein gültiger Bezeichner: »%s«" -#: utils/adt/misc.c:794 +#: utils/adt/misc.c:814 #, c-format msgid "String has unclosed double quotes." msgstr "Zeichenkette hat nicht geschlossene doppelte Anführungszeichen." -#: utils/adt/misc.c:808 +#: utils/adt/misc.c:828 #, c-format msgid "Quoted identifier must not be empty." msgstr "Bezeichner in Anführungszeichen darf nicht leer sein." -#: utils/adt/misc.c:847 +#: utils/adt/misc.c:867 #, c-format msgid "No valid identifier before \".\"." msgstr "Kein gültiger Bezeichner vor ».«." -#: utils/adt/misc.c:853 +#: utils/adt/misc.c:873 #, c-format msgid "No valid identifier after \".\"." msgstr "Kein gültiger Bezeichner nach ».«." -#: utils/adt/misc.c:914 -#, fuzzy, c-format -#| msgid "interval units \"%s\" not supported" +#: utils/adt/misc.c:934 +#, c-format msgid "log format \"%s\" is not supported" -msgstr "»interval«-Einheit »%s« nicht unterstützt" +msgstr "Logformat »%s« wird nicht unterstützt" -#: utils/adt/misc.c:915 +#: utils/adt/misc.c:935 #, c-format msgid "The supported log formats are \"stderr\" and \"csvlog\"." -msgstr "" +msgstr "Die unterstützten Logformate sind »stderr« und »csvlog«." #: utils/adt/nabstime.c:137 #, c-format @@ -20614,8 +22112,8 @@ msgstr "ungültiger cidr-Wert: »%s«" msgid "Value has bits set to right of mask." msgstr "Wert hat gesetzte Bits rechts von der Maske." -#: utils/adt/network.c:111 utils/adt/network.c:582 utils/adt/network.c:607 -#: utils/adt/network.c:632 +#: utils/adt/network.c:111 utils/adt/network.c:592 utils/adt/network.c:617 +#: utils/adt/network.c:642 #, c-format msgid "could not format inet value: %m" msgstr "konnte inet-Wert nicht formatieren: %m" @@ -20648,109 +22146,114 @@ msgstr "ungültiger externer »cidr«-Wert" msgid "invalid mask length: %d" msgstr "ungültige Maskenlänge: %d" -#: utils/adt/network.c:650 +#: utils/adt/network.c:660 #, c-format msgid "could not format cidr value: %m" msgstr "konnte cidr-Wert nicht formatieren: %m" -#: utils/adt/network.c:883 +#: utils/adt/network.c:893 #, c-format msgid "cannot merge addresses from different families" msgstr "Adressen verschiedener Familien können nicht zusammengeführt werden" -#: utils/adt/network.c:1292 +#: utils/adt/network.c:1309 #, c-format msgid "cannot AND inet values of different sizes" msgstr "binäres »Und« nicht mit »inet«-Werten unterschiedlicher Größe möglich" -#: utils/adt/network.c:1324 +#: utils/adt/network.c:1341 #, c-format msgid "cannot OR inet values of different sizes" msgstr "binäres »Oder« nicht mit »inet«-Werten unterschiedlicher Größe möglich" -#: utils/adt/network.c:1385 utils/adt/network.c:1461 +#: utils/adt/network.c:1402 utils/adt/network.c:1478 #, c-format msgid "result is out of range" msgstr "Ergebnis ist außerhalb des gültigen Bereichs" -#: utils/adt/network.c:1426 +#: utils/adt/network.c:1443 #, c-format msgid "cannot subtract inet values of different sizes" msgstr "Subtraktion von »inet«-Werten unterschiedlicher Größe nicht möglich" -#: utils/adt/numeric.c:819 +#: utils/adt/numeric.c:830 #, c-format msgid "invalid sign in external \"numeric\" value" msgstr "ungültiges Vorzeichen in externem »numeric«-Wert" -#: utils/adt/numeric.c:825 +#: utils/adt/numeric.c:836 #, c-format msgid "invalid scale in external \"numeric\" value" msgstr "ungültige Skala in externem »numeric«-Wert" -#: utils/adt/numeric.c:834 +#: utils/adt/numeric.c:845 #, c-format msgid "invalid digit in external \"numeric\" value" msgstr "ungültige Ziffer in externem »numeric«-Wert" -#: utils/adt/numeric.c:1024 utils/adt/numeric.c:1038 +#: utils/adt/numeric.c:1035 utils/adt/numeric.c:1049 #, c-format msgid "NUMERIC precision %d must be between 1 and %d" msgstr "Präzision von NUMERIC (%d) muss zwischen 1 und %d liegen" -#: utils/adt/numeric.c:1029 +#: utils/adt/numeric.c:1040 #, c-format msgid "NUMERIC scale %d must be between 0 and precision %d" msgstr "Skala von NUMERIC (%d) muss zwischen 0 und %d liegen" -#: utils/adt/numeric.c:1047 +#: utils/adt/numeric.c:1058 #, c-format msgid "invalid NUMERIC type modifier" msgstr "ungültiker Modifikator für Typ NUMERIC" -#: utils/adt/numeric.c:1379 +#: utils/adt/numeric.c:1390 #, c-format msgid "start value cannot be NaN" msgstr "Startwert kann nicht NaN sein" -#: utils/adt/numeric.c:1384 +#: utils/adt/numeric.c:1395 #, c-format msgid "stop value cannot be NaN" msgstr "Stoppwert kann nicht NaN sein" -#: utils/adt/numeric.c:1394 +#: utils/adt/numeric.c:1405 #, c-format msgid "step size cannot be NaN" msgstr "Schrittgröße kann nicht NaN sein" -#: utils/adt/numeric.c:2589 utils/adt/numeric.c:5551 utils/adt/numeric.c:5996 -#: utils/adt/numeric.c:7700 utils/adt/numeric.c:8125 utils/adt/numeric.c:8240 -#: utils/adt/numeric.c:8313 +#: utils/adt/numeric.c:2736 utils/adt/numeric.c:5725 utils/adt/numeric.c:6170 +#: utils/adt/numeric.c:7878 utils/adt/numeric.c:8303 utils/adt/numeric.c:8417 +#: utils/adt/numeric.c:8490 #, c-format msgid "value overflows numeric format" msgstr "Wert verursacht Überlauf im »numeric«-Format" -#: utils/adt/numeric.c:2931 +#: utils/adt/numeric.c:3095 #, c-format msgid "cannot convert NaN to integer" msgstr "kann NaN nicht in integer umwandeln" -#: utils/adt/numeric.c:2997 +#: utils/adt/numeric.c:3161 #, c-format msgid "cannot convert NaN to bigint" msgstr "kann NaN nicht in bigint umwandeln" -#: utils/adt/numeric.c:3042 +#: utils/adt/numeric.c:3206 #, c-format msgid "cannot convert NaN to smallint" msgstr "kann NaN nicht in smallint umwandeln" -#: utils/adt/numeric.c:6066 +#: utils/adt/numeric.c:3243 utils/adt/numeric.c:3314 +#, c-format +msgid "cannot convert infinity to numeric" +msgstr "kann Unendlich nicht in numeric umwandeln" + +#: utils/adt/numeric.c:6240 #, c-format msgid "numeric field overflow" msgstr "Feldüberlauf bei Typ »numeric«" -#: utils/adt/numeric.c:6067 +#: utils/adt/numeric.c:6241 #, c-format msgid "A field with precision %d, scale %d must round to an absolute value less than %s%d." msgstr "Ein Feld mit Präzision %d, Skala %d muss beim Runden einen Betrag von weniger als %s%d ergeben." @@ -20765,63 +22268,118 @@ msgstr "Wert »%s« ist außerhalb des gültigen Bereichs für 8-Bit-Ganzzahl" msgid "invalid oidvector data" msgstr "ungültige oidvector-Daten" -#: utils/adt/oracle_compat.c:895 +#: utils/adt/oracle_compat.c:896 #, c-format msgid "requested character too large" msgstr "verlangtes Zeichen zu groß" -#: utils/adt/oracle_compat.c:945 utils/adt/oracle_compat.c:1007 +#: utils/adt/oracle_compat.c:946 utils/adt/oracle_compat.c:1008 #, c-format msgid "requested character too large for encoding: %d" msgstr "gewünschtes Zeichen ist zu groß für die Kodierung: %d" -#: utils/adt/oracle_compat.c:986 +#: utils/adt/oracle_compat.c:987 #, c-format msgid "requested character not valid for encoding: %d" msgstr "gewünschtes Zeichen ist nicht gültig für die Kodierung: %d" -#: utils/adt/oracle_compat.c:1000 +#: utils/adt/oracle_compat.c:1001 #, c-format msgid "null character not permitted" msgstr "Null-Zeichen ist nicht erlaubt" -#: utils/adt/orderedsetaggs.c:426 utils/adt/orderedsetaggs.c:531 -#: utils/adt/orderedsetaggs.c:670 +#: utils/adt/orderedsetaggs.c:442 utils/adt/orderedsetaggs.c:546 +#: utils/adt/orderedsetaggs.c:684 #, c-format msgid "percentile value %g is not between 0 and 1" msgstr "Perzentilwert %g ist nicht zwischen 0 und 1" -#: utils/adt/pg_locale.c:1028 +#: utils/adt/pg_locale.c:1034 #, c-format msgid "Apply system library package updates." msgstr "Aktualisieren Sie die Systembibliotheken." -#: utils/adt/pg_locale.c:1233 +#: utils/adt/pg_locale.c:1249 #, c-format msgid "could not create locale \"%s\": %m" msgstr "konnte Locale »%s« nicht erzeugen: %m" -#: utils/adt/pg_locale.c:1236 +#: utils/adt/pg_locale.c:1252 #, c-format msgid "The operating system could not find any locale data for the locale name \"%s\"." msgstr "Das Betriebssystem konnte keine Locale-Daten für den Locale-Namen »%s« finden." -#: utils/adt/pg_locale.c:1323 +#: utils/adt/pg_locale.c:1353 #, c-format msgid "collations with different collate and ctype values are not supported on this platform" msgstr "Sortierfolgen mit unterschiedlichen »collate«- und »ctype«-Werten werden auf dieser Plattform nicht unterstützt" -#: utils/adt/pg_locale.c:1338 +#: utils/adt/pg_locale.c:1362 +#, c-format +msgid "collation provider LIBC is not supported on this platform" +msgstr "Sortierfolgen-Provider LIBC wird auf dieser Plattform nicht unterstützt" + +#: utils/adt/pg_locale.c:1374 +#, c-format +msgid "collations with different collate and ctype values are not supported by ICU" +msgstr "Sortierfolgen mit unterschiedlichen »collate«- und »ctype«-Werten werden von ICU nicht unterstützt" + +#: utils/adt/pg_locale.c:1380 utils/adt/pg_locale.c:1468 +#, c-format +msgid "could not open collator for locale \"%s\": %s" +msgstr "konnte Collator für Locale »%s« nicht öffnen: %s" + +#: utils/adt/pg_locale.c:1391 +#, c-format +msgid "ICU is not supported in this build" +msgstr "ICU wird in dieser Installation nicht unterstützt" + +#: utils/adt/pg_locale.c:1392 +#, c-format +msgid "You need to rebuild PostgreSQL using --with-icu." +msgstr "Sie müssen PostgreSQL mit --with-icu neu bauen." + +#: utils/adt/pg_locale.c:1412 +#, c-format +msgid "collation \"%s\" has no actual version, but a version was specified" +msgstr "Sortierfolge »%s« hat keine tatsächliche Version, aber eine Version wurde angegeben" + +#: utils/adt/pg_locale.c:1419 #, c-format -msgid "nondefault collations are not supported on this platform" -msgstr "Sortierfolgen außer der Standardsortierfolge werden auf dieser Plattform nicht unterstützt" +msgid "collation \"%s\" has version mismatch" +msgstr "Version von Sortierfolge »%s« stimmt nicht überein" -#: utils/adt/pg_locale.c:1509 +#: utils/adt/pg_locale.c:1421 +#, c-format +msgid "The collation in the database was created using version %s, but the operating system provides version %s." +msgstr "Die Sortierfolge in der Datenbank wurde mit Version %s erzeugt, aber das Betriebssystem hat Version %s." + +#: utils/adt/pg_locale.c:1424 +#, c-format +msgid "Rebuild all objects affected by this collation and run ALTER COLLATION %s REFRESH VERSION, or build PostgreSQL with the right library version." +msgstr "Bauen Sie alle von dieser Sortierfolge beinflussten Objekte neu und führen Sie ALTER COLLATION %s REFRESH VERSION aus, oder bauen Sie PostgreSQL mit der richtigen Bibliotheksversion." + +#: utils/adt/pg_locale.c:1508 +#, c-format +msgid "could not open ICU converter for encoding \"%s\": %s" +msgstr "konnte ICU-Konverter für Kodierung »%s« nicht öffnen: %s" + +#: utils/adt/pg_locale.c:1539 utils/adt/pg_locale.c:1548 +#, c-format +msgid "ucnv_toUChars failed: %s" +msgstr "ucnv_toUChars fehlgeschlagen: %s" + +#: utils/adt/pg_locale.c:1577 utils/adt/pg_locale.c:1586 +#, c-format +msgid "ucnv_fromUChars failed: %s" +msgstr "ucnv_fromUChars fehlgeschlagen: %s" + +#: utils/adt/pg_locale.c:1758 #, c-format msgid "invalid multibyte character for locale" msgstr "ungültiges Mehrbytezeichen für Locale" -#: utils/adt/pg_locale.c:1510 +#: utils/adt/pg_locale.c:1759 #, c-format msgid "The server's LC_CTYPE locale is probably incompatible with the database encoding." msgstr "Die LC_CTYPE-Locale des Servers ist wahrscheinlich mit der Kodierung der Datenbank inkompatibel." @@ -20831,20 +22389,11 @@ msgstr "Die LC_CTYPE-Locale des Servers ist wahrscheinlich mit der Kodierung der msgid "function can only be called when server is in binary upgrade mode" msgstr "Funktion kann nur aufgerufen werden, wenn der Server im Binary-Upgrade-Modus ist" -#: utils/adt/pgstatfuncs.c:471 +#: utils/adt/pgstatfuncs.c:474 #, c-format msgid "invalid command name: \"%s\"" msgstr "ungültiger Befehlsname: »%s«" -#: utils/adt/pseudotypes.c:94 utils/adt/pseudotypes.c:122 -#: utils/adt/pseudotypes.c:147 utils/adt/pseudotypes.c:171 -#: utils/adt/pseudotypes.c:282 utils/adt/pseudotypes.c:307 -#: utils/adt/pseudotypes.c:335 utils/adt/pseudotypes.c:363 -#: utils/adt/pseudotypes.c:393 -#, c-format -msgid "cannot accept a value of type %s" -msgstr "kann keinen Wert vom Typ %s annehmen" - #: utils/adt/pseudotypes.c:247 #, c-format msgid "cannot accept a value of a shell type" @@ -20861,10 +22410,9 @@ msgid "cannot output a value of type %s" msgstr "kann keinen Wert vom Typ %s anzeigen" #: utils/adt/pseudotypes.c:403 -#, fuzzy, c-format -#| msgid "cannot display a value of type any" +#, c-format msgid "cannot display a value of type %s" -msgstr "kann keinen Wert vom Typ any anzeigen" +msgstr "kann keinen Wert vom Typ %s anzeigen" #: utils/adt/rangetypes.c:405 #, c-format @@ -20881,57 +22429,57 @@ msgstr "Ergebnis von Bereichsdifferenz würde nicht zusammenhängend sein" msgid "result of range union would not be contiguous" msgstr "Ergebnis von Bereichsvereinigung würde nicht zusammenhängend sein" -#: utils/adt/rangetypes.c:1533 +#: utils/adt/rangetypes.c:1597 #, c-format msgid "range lower bound must be less than or equal to range upper bound" msgstr "Bereichsuntergrenze muss kleiner als oder gleich der Bereichsobergrenze sein" -#: utils/adt/rangetypes.c:1916 utils/adt/rangetypes.c:1929 -#: utils/adt/rangetypes.c:1943 +#: utils/adt/rangetypes.c:1980 utils/adt/rangetypes.c:1993 +#: utils/adt/rangetypes.c:2007 #, c-format msgid "invalid range bound flags" msgstr "ungültige Markierungen für Bereichsgrenzen" -#: utils/adt/rangetypes.c:1917 utils/adt/rangetypes.c:1930 -#: utils/adt/rangetypes.c:1944 +#: utils/adt/rangetypes.c:1981 utils/adt/rangetypes.c:1994 +#: utils/adt/rangetypes.c:2008 #, c-format msgid "Valid values are \"[]\", \"[)\", \"(]\", and \"()\"." msgstr "Gültige Werte sind »[]«, »[)«, »(]« und »()«." -#: utils/adt/rangetypes.c:2009 utils/adt/rangetypes.c:2026 -#: utils/adt/rangetypes.c:2039 utils/adt/rangetypes.c:2057 -#: utils/adt/rangetypes.c:2068 utils/adt/rangetypes.c:2112 -#: utils/adt/rangetypes.c:2120 +#: utils/adt/rangetypes.c:2073 utils/adt/rangetypes.c:2090 +#: utils/adt/rangetypes.c:2103 utils/adt/rangetypes.c:2121 +#: utils/adt/rangetypes.c:2132 utils/adt/rangetypes.c:2176 +#: utils/adt/rangetypes.c:2184 #, c-format msgid "malformed range literal: \"%s\"" msgstr "fehlerhafte Bereichskonstante: »%s«" -#: utils/adt/rangetypes.c:2011 +#: utils/adt/rangetypes.c:2075 #, c-format msgid "Junk after \"empty\" key word." msgstr "Müll nach Schlüsselwort »empty«." -#: utils/adt/rangetypes.c:2028 +#: utils/adt/rangetypes.c:2092 #, c-format msgid "Missing left parenthesis or bracket." msgstr "Linke runde oder eckige Klammer fehlt." -#: utils/adt/rangetypes.c:2041 +#: utils/adt/rangetypes.c:2105 #, c-format msgid "Missing comma after lower bound." msgstr "Komma fehlt nach Untergrenze." -#: utils/adt/rangetypes.c:2059 +#: utils/adt/rangetypes.c:2123 #, c-format msgid "Too many commas." msgstr "Zu viele Kommas." -#: utils/adt/rangetypes.c:2070 +#: utils/adt/rangetypes.c:2134 #, c-format msgid "Junk after right parenthesis or bracket." msgstr "Müll nach rechter runder oder eckiger Klammer." -#: utils/adt/regexp.c:285 utils/adt/regexp.c:1344 utils/adt/varlena.c:3816 +#: utils/adt/regexp.c:285 utils/adt/regexp.c:1344 utils/adt/varlena.c:3993 #, c-format msgid "regular expression failed: %s" msgstr "regulärer Ausdruck fehlgeschlagen: %s" @@ -20942,154 +22490,151 @@ msgid "invalid regexp option: \"%c\"" msgstr "ungültige Option für regulären Ausdruck: »%c«" #: utils/adt/regexp.c:862 -#, fuzzy, c-format -#| msgid "regexp_split does not support the global option" +#, c-format msgid "regexp_match does not support the global option" -msgstr "regexp_split unterstützt die »Global«-Option nicht" +msgstr "regexp_match unterstützt die »Global«-Option nicht" #: utils/adt/regexp.c:863 #, c-format msgid "Use the regexp_matches function instead." -msgstr "" +msgstr "Verwenden Sie stattdessen die Funktion regexp_matches." #: utils/adt/regexp.c:1163 -#, fuzzy, c-format -#| msgid "regexp_split does not support the global option" +#, c-format msgid "regexp_split_to_table does not support the global option" -msgstr "regexp_split unterstützt die »Global«-Option nicht" +msgstr "regexp_split_to_table unterstützt die »Global«-Option nicht" #: utils/adt/regexp.c:1219 -#, fuzzy, c-format -#| msgid "regexp_split does not support the global option" +#, c-format msgid "regexp_split_to_array does not support the global option" -msgstr "regexp_split unterstützt die »Global«-Option nicht" +msgstr "regexp_split_to_array unterstützt die »Global«-Option nicht" -#: utils/adt/regproc.c:130 utils/adt/regproc.c:150 +#: utils/adt/regproc.c:106 #, c-format msgid "more than one function named \"%s\"" msgstr "es gibt mehrere Funktionen namens »%s«" -#: utils/adt/regproc.c:589 utils/adt/regproc.c:609 +#: utils/adt/regproc.c:524 #, c-format msgid "more than one operator named %s" msgstr "es gibt mehrere Operatoren namens %s" -#: utils/adt/regproc.c:781 utils/adt/regproc.c:822 utils/adt/regproc.c:2008 -#: utils/adt/ruleutils.c:8718 utils/adt/ruleutils.c:8886 +#: utils/adt/regproc.c:696 utils/adt/regproc.c:737 utils/adt/regproc.c:1865 +#: utils/adt/ruleutils.c:9065 utils/adt/ruleutils.c:9233 #, c-format msgid "too many arguments" msgstr "zu viele Argumente" -#: utils/adt/regproc.c:782 utils/adt/regproc.c:823 +#: utils/adt/regproc.c:697 utils/adt/regproc.c:738 #, c-format msgid "Provide two argument types for operator." msgstr "Geben Sie zwei Argumente für den Operator an." -#: utils/adt/regproc.c:1596 utils/adt/regproc.c:1620 utils/adt/regproc.c:1717 -#: utils/adt/regproc.c:1741 utils/adt/regproc.c:1843 utils/adt/regproc.c:1848 -#: utils/adt/varlena.c:3071 utils/adt/varlena.c:3076 +#: utils/adt/regproc.c:1449 utils/adt/regproc.c:1473 utils/adt/regproc.c:1574 +#: utils/adt/regproc.c:1598 utils/adt/regproc.c:1700 utils/adt/regproc.c:1705 +#: utils/adt/varlena.c:3246 utils/adt/varlena.c:3251 #, c-format msgid "invalid name syntax" msgstr "ungültige Namenssyntax" -#: utils/adt/regproc.c:1906 +#: utils/adt/regproc.c:1763 #, c-format msgid "expected a left parenthesis" msgstr "linke Klammer erwartet" -#: utils/adt/regproc.c:1922 +#: utils/adt/regproc.c:1779 #, c-format msgid "expected a right parenthesis" msgstr "rechte Klammer erwartet" -#: utils/adt/regproc.c:1941 +#: utils/adt/regproc.c:1798 #, c-format msgid "expected a type name" msgstr "Typname erwartet" -#: utils/adt/regproc.c:1973 +#: utils/adt/regproc.c:1830 #, c-format msgid "improper type name" msgstr "falscher Typname" -#: utils/adt/ri_triggers.c:343 utils/adt/ri_triggers.c:2490 -#: utils/adt/ri_triggers.c:3315 +#: utils/adt/ri_triggers.c:337 utils/adt/ri_triggers.c:2085 +#: utils/adt/ri_triggers.c:2842 #, c-format msgid "insert or update on table \"%s\" violates foreign key constraint \"%s\"" msgstr "Einfügen oder Aktualisieren in Tabelle »%s« verletzt Fremdschlüssel-Constraint »%s«" -#: utils/adt/ri_triggers.c:346 utils/adt/ri_triggers.c:2493 +#: utils/adt/ri_triggers.c:340 utils/adt/ri_triggers.c:2088 #, c-format msgid "MATCH FULL does not allow mixing of null and nonnull key values." msgstr "MATCH FULL erlaubt das Mischen von Schlüsseln, die NULL und nicht NULL sind, nicht." -#: utils/adt/ri_triggers.c:2732 +#: utils/adt/ri_triggers.c:2273 #, c-format msgid "function \"%s\" must be fired for INSERT" msgstr "Funktion »%s« muss von INSERT ausgelöst werden" -#: utils/adt/ri_triggers.c:2738 +#: utils/adt/ri_triggers.c:2279 #, c-format msgid "function \"%s\" must be fired for UPDATE" msgstr "Funktion »%s« muss von UPDATE ausgelöst werden" -#: utils/adt/ri_triggers.c:2744 +#: utils/adt/ri_triggers.c:2285 #, c-format msgid "function \"%s\" must be fired for DELETE" msgstr "Funktion »%s« muss von DELETE ausgelöst werden" -#: utils/adt/ri_triggers.c:2767 +#: utils/adt/ri_triggers.c:2308 #, c-format msgid "no pg_constraint entry for trigger \"%s\" on table \"%s\"" msgstr "kein »pg_constraint«-Eintrag für Trigger »%s« für Tabelle »%s«" -#: utils/adt/ri_triggers.c:2769 +#: utils/adt/ri_triggers.c:2310 #, c-format msgid "Remove this referential integrity trigger and its mates, then do ALTER TABLE ADD CONSTRAINT." msgstr "Entfernen Sie diesen Referentielle-Integritäts-Trigger und seine Partner und führen Sie dann ALTER TABLE ADD CONSTRAINT aus." -#: utils/adt/ri_triggers.c:3225 +#: utils/adt/ri_triggers.c:2689 #, c-format msgid "referential integrity query on \"%s\" from constraint \"%s\" on \"%s\" gave unexpected result" msgstr "RI-Anfrage in Tabelle »%s« für Constraint »%s« von Tabelle »%s« ergab unerwartetes Ergebnis" -#: utils/adt/ri_triggers.c:3229 +#: utils/adt/ri_triggers.c:2693 #, c-format msgid "This is most likely due to a rule having rewritten the query." msgstr "Das liegt höchstwahrscheinlich daran, dass eine Regel die Anfrage umgeschrieben hat." -#: utils/adt/ri_triggers.c:3319 +#: utils/adt/ri_triggers.c:2846 #, c-format msgid "Key (%s)=(%s) is not present in table \"%s\"." msgstr "Schlüssel (%s)=(%s) ist nicht in Tabelle »%s« vorhanden." -#: utils/adt/ri_triggers.c:3322 +#: utils/adt/ri_triggers.c:2849 #, c-format msgid "Key is not present in table \"%s\"." msgstr "Der Schlüssel ist nicht in Tabelle »%s« vorhanden." -#: utils/adt/ri_triggers.c:3328 +#: utils/adt/ri_triggers.c:2855 #, c-format msgid "update or delete on table \"%s\" violates foreign key constraint \"%s\" on table \"%s\"" msgstr "Aktualisieren oder Löschen in Tabelle »%s« verletzt Fremdschlüssel-Constraint »%s« von Tabelle »%s«" -#: utils/adt/ri_triggers.c:3333 +#: utils/adt/ri_triggers.c:2860 #, c-format msgid "Key (%s)=(%s) is still referenced from table \"%s\"." msgstr "Auf Schlüssel (%s)=(%s) wird noch aus Tabelle »%s« verwiesen." -#: utils/adt/ri_triggers.c:3336 +#: utils/adt/ri_triggers.c:2863 #, c-format msgid "Key is still referenced from table \"%s\"." msgstr "Auf den Schlüssel wird noch aus Tabelle »%s« verwiesen." -#: utils/adt/rowtypes.c:103 utils/adt/rowtypes.c:479 +#: utils/adt/rowtypes.c:103 utils/adt/rowtypes.c:481 #, c-format msgid "input of anonymous composite types is not implemented" msgstr "Eingabe anonymer zusammengesetzter Typen ist nicht implementiert" -#: utils/adt/rowtypes.c:155 utils/adt/rowtypes.c:183 utils/adt/rowtypes.c:206 -#: utils/adt/rowtypes.c:214 utils/adt/rowtypes.c:266 utils/adt/rowtypes.c:274 +#: utils/adt/rowtypes.c:155 utils/adt/rowtypes.c:184 utils/adt/rowtypes.c:207 +#: utils/adt/rowtypes.c:215 utils/adt/rowtypes.c:267 utils/adt/rowtypes.c:275 #, c-format msgid "malformed record literal: \"%s\"" msgstr "fehlerhafte Record-Konstante: »%s«" @@ -21099,193 +22644,193 @@ msgstr "fehlerhafte Record-Konstante: »%s«" msgid "Missing left parenthesis." msgstr "Linke Klammer fehlt." -#: utils/adt/rowtypes.c:184 +#: utils/adt/rowtypes.c:185 #, c-format msgid "Too few columns." msgstr "Zu wenige Spalten." -#: utils/adt/rowtypes.c:267 +#: utils/adt/rowtypes.c:268 #, c-format msgid "Too many columns." msgstr "Zu viele Spalten." -#: utils/adt/rowtypes.c:275 +#: utils/adt/rowtypes.c:276 #, c-format msgid "Junk after right parenthesis." msgstr "Müll nach rechter Klammer." -#: utils/adt/rowtypes.c:528 +#: utils/adt/rowtypes.c:530 #, c-format msgid "wrong number of columns: %d, expected %d" msgstr "falsche Anzahl der Spalten: %d, erwartet wurden %d" -#: utils/adt/rowtypes.c:555 +#: utils/adt/rowtypes.c:558 #, c-format msgid "wrong data type: %u, expected %u" msgstr "falscher Datentyp: %u, erwartet wurde %u" -#: utils/adt/rowtypes.c:616 +#: utils/adt/rowtypes.c:619 #, c-format msgid "improper binary format in record column %d" msgstr "falsches Binärformat in Record-Spalte %d" -#: utils/adt/rowtypes.c:902 utils/adt/rowtypes.c:1142 -#: utils/adt/rowtypes.c:1396 utils/adt/rowtypes.c:1673 +#: utils/adt/rowtypes.c:910 utils/adt/rowtypes.c:1154 utils/adt/rowtypes.c:1413 +#: utils/adt/rowtypes.c:1657 #, c-format msgid "cannot compare dissimilar column types %s and %s at record column %d" msgstr "kann unterschiedliche Spaltentyp %s und %s in Record-Spalte %d nicht vergleichen" -#: utils/adt/rowtypes.c:991 utils/adt/rowtypes.c:1213 -#: utils/adt/rowtypes.c:1529 utils/adt/rowtypes.c:1769 +#: utils/adt/rowtypes.c:999 utils/adt/rowtypes.c:1225 utils/adt/rowtypes.c:1508 +#: utils/adt/rowtypes.c:1731 #, c-format msgid "cannot compare record types with different numbers of columns" msgstr "kann Record-Typen mit unterschiedlicher Anzahl Spalten nicht vergleichen" -#: utils/adt/ruleutils.c:4494 +#: utils/adt/ruleutils.c:4756 #, c-format msgid "rule \"%s\" has unsupported event type %d" msgstr "Regel »%s« hat nicht unterstützten Ereignistyp %d" -#: utils/adt/selfuncs.c:5316 +#: utils/adt/selfuncs.c:5791 #, c-format msgid "case insensitive matching not supported on type bytea" msgstr "Mustersuche ohne Rücksicht auf Groß-/Kleinschreibung wird für Typ bytea nicht unterstützt" -#: utils/adt/selfuncs.c:5418 +#: utils/adt/selfuncs.c:5893 #, c-format msgid "regular-expression matching not supported on type bytea" msgstr "Mustersuche mit regulären Ausdrücken wird für Typ bytea nicht unterstützt" -#: utils/adt/timestamp.c:106 +#: utils/adt/timestamp.c:107 #, c-format msgid "TIMESTAMP(%d)%s precision must not be negative" msgstr "Präzision von TIMESTAMP(%d)%s darf nicht negativ sein" -#: utils/adt/timestamp.c:112 +#: utils/adt/timestamp.c:113 #, c-format msgid "TIMESTAMP(%d)%s precision reduced to maximum allowed, %d" msgstr "Präzision von TIMESTAMP(%d)%s auf erlaubten Höchstwert %d reduziert" -#: utils/adt/timestamp.c:175 utils/adt/timestamp.c:415 +#: utils/adt/timestamp.c:176 utils/adt/timestamp.c:416 #, c-format msgid "timestamp out of range: \"%s\"" msgstr "timestamp ist außerhalb des gültigen Bereichs: »%s«" -#: utils/adt/timestamp.c:193 utils/adt/timestamp.c:433 -#: utils/adt/timestamp.c:940 +#: utils/adt/timestamp.c:194 utils/adt/timestamp.c:434 +#: utils/adt/timestamp.c:941 #, c-format msgid "date/time value \"%s\" is no longer supported" msgstr "Datum/Zeit-Wert »%s« wird nicht mehr unterstützt" -#: utils/adt/timestamp.c:361 +#: utils/adt/timestamp.c:362 #, c-format msgid "timestamp(%d) precision must be between %d and %d" msgstr "Präzision von timestamp(%d) muss zwischen %d und %d sein" -#: utils/adt/timestamp.c:483 +#: utils/adt/timestamp.c:484 #, c-format msgid "invalid input syntax for numeric time zone: \"%s\"" msgstr "ungültige Eingabesyntax für numerische Zeitzone: »%s«" -#: utils/adt/timestamp.c:485 +#: utils/adt/timestamp.c:486 #, c-format msgid "Numeric time zones must have \"-\" or \"+\" as first character." msgstr "Numerische Zeitzonen müssen »-« oder »+« als erstes Zeichen haben." -#: utils/adt/timestamp.c:498 +#: utils/adt/timestamp.c:499 #, c-format msgid "numeric time zone \"%s\" out of range" msgstr "numerische Zeitzone »%s« ist außerhalb des gültigen Bereichs" -#: utils/adt/timestamp.c:600 utils/adt/timestamp.c:610 -#: utils/adt/timestamp.c:618 +#: utils/adt/timestamp.c:601 utils/adt/timestamp.c:611 +#: utils/adt/timestamp.c:619 #, c-format msgid "timestamp out of range: %d-%02d-%02d %d:%02d:%02g" msgstr "timestamp ist außerhalb des gültigen Bereichs: %d-%02d-%02d %d:%02d:%02g" -#: utils/adt/timestamp.c:719 +#: utils/adt/timestamp.c:720 #, c-format msgid "timestamp cannot be NaN" msgstr "timestamp kann nicht NaN sein" -#: utils/adt/timestamp.c:737 utils/adt/timestamp.c:749 +#: utils/adt/timestamp.c:738 utils/adt/timestamp.c:750 #, c-format msgid "timestamp out of range: \"%g\"" msgstr "timestamp ist außerhalb des gültigen Bereichs: »%g«" -#: utils/adt/timestamp.c:934 utils/adt/timestamp.c:1504 -#: utils/adt/timestamp.c:1917 utils/adt/timestamp.c:2964 -#: utils/adt/timestamp.c:2969 utils/adt/timestamp.c:2974 -#: utils/adt/timestamp.c:3024 utils/adt/timestamp.c:3031 -#: utils/adt/timestamp.c:3038 utils/adt/timestamp.c:3058 -#: utils/adt/timestamp.c:3065 utils/adt/timestamp.c:3072 -#: utils/adt/timestamp.c:3102 utils/adt/timestamp.c:3110 -#: utils/adt/timestamp.c:3154 utils/adt/timestamp.c:3477 -#: utils/adt/timestamp.c:3602 utils/adt/timestamp.c:3970 +#: utils/adt/timestamp.c:935 utils/adt/timestamp.c:1505 +#: utils/adt/timestamp.c:1918 utils/adt/timestamp.c:3013 +#: utils/adt/timestamp.c:3018 utils/adt/timestamp.c:3023 +#: utils/adt/timestamp.c:3073 utils/adt/timestamp.c:3080 +#: utils/adt/timestamp.c:3087 utils/adt/timestamp.c:3107 +#: utils/adt/timestamp.c:3114 utils/adt/timestamp.c:3121 +#: utils/adt/timestamp.c:3151 utils/adt/timestamp.c:3159 +#: utils/adt/timestamp.c:3203 utils/adt/timestamp.c:3630 +#: utils/adt/timestamp.c:3755 utils/adt/timestamp.c:4140 #, c-format msgid "interval out of range" msgstr "interval-Wert ist außerhalb des gültigen Bereichs" -#: utils/adt/timestamp.c:1067 utils/adt/timestamp.c:1100 +#: utils/adt/timestamp.c:1068 utils/adt/timestamp.c:1101 #, c-format msgid "invalid INTERVAL type modifier" msgstr "ungültiger Modifikator für Typ INTERVAL" -#: utils/adt/timestamp.c:1083 +#: utils/adt/timestamp.c:1084 #, c-format msgid "INTERVAL(%d) precision must not be negative" msgstr "INTERVAL(%d)-Präzision darf nicht negativ sein" -#: utils/adt/timestamp.c:1089 +#: utils/adt/timestamp.c:1090 #, c-format msgid "INTERVAL(%d) precision reduced to maximum allowed, %d" msgstr "INTERVAL(%d)-Präzision auf erlaubtes Maximum %d reduziert" -#: utils/adt/timestamp.c:1461 +#: utils/adt/timestamp.c:1462 #, c-format msgid "interval(%d) precision must be between %d and %d" msgstr "Präzision von interval(%d) muss zwischen %d und %d sein" -#: utils/adt/timestamp.c:2565 +#: utils/adt/timestamp.c:2614 #, c-format msgid "cannot subtract infinite timestamps" msgstr "kann unendliche timestamp-Werte nicht subtrahieren" -#: utils/adt/timestamp.c:3721 utils/adt/timestamp.c:4230 -#: utils/adt/timestamp.c:4397 utils/adt/timestamp.c:4418 +#: utils/adt/timestamp.c:3883 utils/adt/timestamp.c:4400 +#: utils/adt/timestamp.c:4567 utils/adt/timestamp.c:4588 #, c-format msgid "timestamp units \"%s\" not supported" msgstr "»timestamp«-Einheit »%s« nicht unterstützt" -#: utils/adt/timestamp.c:3735 utils/adt/timestamp.c:4184 -#: utils/adt/timestamp.c:4428 +#: utils/adt/timestamp.c:3897 utils/adt/timestamp.c:4354 +#: utils/adt/timestamp.c:4598 #, c-format msgid "timestamp units \"%s\" not recognized" msgstr "»timestamp«-Einheit »%s« nicht erkannt" -#: utils/adt/timestamp.c:3867 utils/adt/timestamp.c:4225 -#: utils/adt/timestamp.c:4598 utils/adt/timestamp.c:4620 +#: utils/adt/timestamp.c:4029 utils/adt/timestamp.c:4395 +#: utils/adt/timestamp.c:4768 utils/adt/timestamp.c:4790 #, c-format msgid "timestamp with time zone units \"%s\" not supported" msgstr "»timestamp with time zone«-Einheit »%s« nicht unterstützt" -#: utils/adt/timestamp.c:3884 utils/adt/timestamp.c:4179 -#: utils/adt/timestamp.c:4629 +#: utils/adt/timestamp.c:4046 utils/adt/timestamp.c:4349 +#: utils/adt/timestamp.c:4799 #, c-format msgid "timestamp with time zone units \"%s\" not recognized" msgstr "»timestamp with time zone«-Einheit »%s« nicht erkannt" -#: utils/adt/timestamp.c:3957 +#: utils/adt/timestamp.c:4127 #, c-format msgid "interval units \"%s\" not supported because months usually have fractional weeks" msgstr "»interval«-Einheit »%s« wird nicht unterstützt, weil Monate gewöhnlich partielle Wochen haben" -#: utils/adt/timestamp.c:3963 utils/adt/timestamp.c:4723 +#: utils/adt/timestamp.c:4133 utils/adt/timestamp.c:4893 #, c-format msgid "interval units \"%s\" not supported" msgstr "»interval«-Einheit »%s« nicht unterstützt" -#: utils/adt/timestamp.c:3979 utils/adt/timestamp.c:4746 +#: utils/adt/timestamp.c:4149 utils/adt/timestamp.c:4916 #, c-format msgid "interval units \"%s\" not recognized" msgstr "»interval«-Einheit »%s« nicht erkannt" @@ -21315,43 +22860,43 @@ msgstr "suppress_redundant_updates_trigger: muss für jede Zeile aufgerufen werd msgid "gtsvector_in not implemented" msgstr "gtsvector_in ist nicht implementiert" -#: utils/adt/tsquery.c:166 +#: utils/adt/tsquery.c:200 #, c-format msgid "distance in phrase operator should not be greater than %d" msgstr "Abstand im Phrasenoperator sollte nicht größer als %d sein" -#: utils/adt/tsquery.c:254 utils/adt/tsquery.c:513 -#: utils/adt/tsvector_parser.c:141 +#: utils/adt/tsquery.c:310 utils/adt/tsquery.c:725 +#: utils/adt/tsvector_parser.c:133 #, c-format msgid "syntax error in tsquery: \"%s\"" msgstr "Syntaxfehler in tsquery: »%s«" -#: utils/adt/tsquery.c:275 +#: utils/adt/tsquery.c:334 #, c-format msgid "no operand in tsquery: \"%s\"" msgstr "kein Operand in tsquery: »%s«" -#: utils/adt/tsquery.c:358 +#: utils/adt/tsquery.c:568 #, c-format msgid "value is too big in tsquery: \"%s\"" msgstr "Wert ist zu groß in tsquery: »%s«" -#: utils/adt/tsquery.c:363 +#: utils/adt/tsquery.c:573 #, c-format msgid "operand is too long in tsquery: \"%s\"" msgstr "Operator ist zu lang in tsquery: »%s«" -#: utils/adt/tsquery.c:391 +#: utils/adt/tsquery.c:601 #, c-format msgid "word is too long in tsquery: \"%s\"" msgstr "Wort ist zu lang in tsquery: »%s«" -#: utils/adt/tsquery.c:642 +#: utils/adt/tsquery.c:870 #, c-format msgid "text-search query doesn't contain lexemes: \"%s\"" msgstr "Textsucheanfrage enthält keine Lexeme: »%s«" -#: utils/adt/tsquery.c:653 utils/adt/tsquery_util.c:375 +#: utils/adt/tsquery.c:881 utils/adt/tsquery_util.c:375 #, c-format msgid "tsquery is too large" msgstr "tsquery ist zu groß" @@ -21457,69 +23002,74 @@ msgstr "Textsuchekonfigurationsname »%s« muss Schemaqualifikation haben" msgid "column \"%s\" is not of a character type" msgstr "Spalte »%s« hat keinen Zeichentyp" -#: utils/adt/tsvector_parser.c:142 +#: utils/adt/tsvector_parser.c:134 #, c-format msgid "syntax error in tsvector: \"%s\"" msgstr "Syntaxfehler in tsvector: »%s«" -#: utils/adt/tsvector_parser.c:207 +#: utils/adt/tsvector_parser.c:200 #, c-format msgid "there is no escaped character: \"%s\"" msgstr "es gibt kein escaptes Zeichen: »%s«" -#: utils/adt/tsvector_parser.c:324 +#: utils/adt/tsvector_parser.c:318 #, c-format msgid "wrong position info in tsvector: \"%s\"" msgstr "falsche Positionsinformationen in tsvector: »%s«" -#: utils/adt/txid.c:555 +#: utils/adt/txid.c:135 +#, c-format +msgid "transaction ID %s is in the future" +msgstr "Transaktions-ID %s ist in der Zukunft" + +#: utils/adt/txid.c:624 #, c-format msgid "invalid external txid_snapshot data" msgstr "ungültige externe txid_snapshot-Daten" -#: utils/adt/varbit.c:58 utils/adt/varchar.c:51 +#: utils/adt/varbit.c:59 utils/adt/varchar.c:51 #, c-format msgid "length for type %s must be at least 1" msgstr "Länge von Typ %s muss mindestens 1 sein" -#: utils/adt/varbit.c:63 utils/adt/varchar.c:55 +#: utils/adt/varbit.c:64 utils/adt/varchar.c:55 #, c-format msgid "length for type %s cannot exceed %d" msgstr "Länge von Typ %s kann %d nicht überschreiten" -#: utils/adt/varbit.c:164 utils/adt/varbit.c:476 utils/adt/varbit.c:973 +#: utils/adt/varbit.c:165 utils/adt/varbit.c:477 utils/adt/varbit.c:974 #, c-format msgid "bit string length exceeds the maximum allowed (%d)" msgstr "Länge der Bitkette überschreitet erlaubtes Maximum (%d)" -#: utils/adt/varbit.c:178 utils/adt/varbit.c:321 utils/adt/varbit.c:378 +#: utils/adt/varbit.c:179 utils/adt/varbit.c:322 utils/adt/varbit.c:379 #, c-format msgid "bit string length %d does not match type bit(%d)" msgstr "Länge der Bitkette %d stimmt nicht mit Typ bit(%d) überein" -#: utils/adt/varbit.c:200 utils/adt/varbit.c:512 +#: utils/adt/varbit.c:201 utils/adt/varbit.c:513 #, c-format msgid "\"%c\" is not a valid binary digit" msgstr "»%c« ist keine gültige Binärziffer" -#: utils/adt/varbit.c:225 utils/adt/varbit.c:537 +#: utils/adt/varbit.c:226 utils/adt/varbit.c:538 #, c-format msgid "\"%c\" is not a valid hexadecimal digit" msgstr "»%c« ist keine gültige Hexadezimalziffer" -#: utils/adt/varbit.c:312 utils/adt/varbit.c:628 +#: utils/adt/varbit.c:313 utils/adt/varbit.c:629 #, c-format msgid "invalid length in external bit string" msgstr "ungültige Länge in externer Bitkette" -#: utils/adt/varbit.c:490 utils/adt/varbit.c:637 utils/adt/varbit.c:731 +#: utils/adt/varbit.c:491 utils/adt/varbit.c:638 utils/adt/varbit.c:732 #, c-format msgid "bit string too long for type bit varying(%d)" msgstr "Bitkette ist zu lang für Typ bit varying(%d)" -#: utils/adt/varbit.c:1066 utils/adt/varbit.c:1168 utils/adt/varlena.c:843 -#: utils/adt/varlena.c:907 utils/adt/varlena.c:1051 utils/adt/varlena.c:2736 -#: utils/adt/varlena.c:2803 +#: utils/adt/varbit.c:1067 utils/adt/varbit.c:1169 utils/adt/varlena.c:841 +#: utils/adt/varlena.c:905 utils/adt/varlena.c:1049 utils/adt/varlena.c:2912 +#: utils/adt/varlena.c:2979 #, c-format msgid "negative substring length not allowed" msgstr "negative Teilzeichenkettenlänge nicht erlaubt" @@ -21544,7 +23094,7 @@ msgstr "binäres »Exklusiv-Oder« nicht mit Bitketten unterschiedlicher Länge msgid "bit index %d out of valid range (0..%d)" msgstr "Bitindex %d ist außerhalb des gültigen Bereichs (0..%d)" -#: utils/adt/varbit.c:1812 utils/adt/varlena.c:2995 +#: utils/adt/varbit.c:1812 utils/adt/varlena.c:3170 #, c-format msgid "new bit must be 0 or 1" msgstr "neues Bit muss 0 oder 1 sein" @@ -21559,68 +23109,78 @@ msgstr "Wert zu lang für Typ character(%d)" msgid "value too long for type character varying(%d)" msgstr "Wert zu lang für Typ character varying(%d)" -#: utils/adt/varlena.c:1421 utils/adt/varlena.c:1826 +#: utils/adt/varlena.c:1415 utils/adt/varlena.c:1880 #, c-format msgid "could not determine which collation to use for string comparison" msgstr "konnte die für den Zeichenkettenvergleich zu verwendende Sortierfolge nicht bestimmen" -#: utils/adt/varlena.c:1479 utils/adt/varlena.c:1492 +#: utils/adt/varlena.c:1472 utils/adt/varlena.c:1485 #, c-format msgid "could not convert string to UTF-16: error code %lu" msgstr "konnte Zeichenkette nicht in UTF-16 umwandeln: Fehlercode %lu" -#: utils/adt/varlena.c:1507 +#: utils/adt/varlena.c:1500 #, c-format msgid "could not compare Unicode strings: %m" msgstr "konnte Unicode-Zeichenketten nicht vergleichen: %m" -#: utils/adt/varlena.c:2881 utils/adt/varlena.c:2912 utils/adt/varlena.c:2947 -#: utils/adt/varlena.c:2983 +#: utils/adt/varlena.c:1555 utils/adt/varlena.c:2176 +#, c-format +msgid "collation failed: %s" +msgstr "Vergleichung fehlgeschlagen: %s" + +#: utils/adt/varlena.c:2394 +#, c-format +msgid "sort key generation failed: %s" +msgstr "Sortierschlüsselerzeugung fehlgeschlagen: %s" + +#: utils/adt/varlena.c:3056 utils/adt/varlena.c:3087 utils/adt/varlena.c:3122 +#: utils/adt/varlena.c:3158 #, c-format msgid "index %d out of valid range, 0..%d" msgstr "Index %d ist außerhalb des gültigen Bereichs, 0..%d" -#: utils/adt/varlena.c:3912 +#: utils/adt/varlena.c:4089 #, c-format msgid "field position must be greater than zero" msgstr "Feldposition muss größer als null sein" -#: utils/adt/varlena.c:4791 +#: utils/adt/varlena.c:4968 #, c-format msgid "unterminated format() type specifier" msgstr "Typspezifikation in format() nicht abgeschlossen" -#: utils/adt/varlena.c:4792 utils/adt/varlena.c:4926 utils/adt/varlena.c:5047 +#: utils/adt/varlena.c:4969 utils/adt/varlena.c:5103 utils/adt/varlena.c:5224 #, c-format msgid "For a single \"%%\" use \"%%%%\"." msgstr "Für ein einzelnes »%%« geben Sie »%%%%« an." -#: utils/adt/varlena.c:4924 utils/adt/varlena.c:5045 +#: utils/adt/varlena.c:5101 utils/adt/varlena.c:5222 #, c-format msgid "unrecognized format() type specifier \"%c\"" msgstr "unbekannte Typspezifikation in format(): »%c«" -#: utils/adt/varlena.c:4937 utils/adt/varlena.c:4994 +#: utils/adt/varlena.c:5114 utils/adt/varlena.c:5171 #, c-format msgid "too few arguments for format()" msgstr "zu wenige Argumente für format()" -#: utils/adt/varlena.c:5089 utils/adt/varlena.c:5272 +#: utils/adt/varlena.c:5267 utils/adt/varlena.c:5449 #, c-format msgid "number is out of range" msgstr "Zahl ist außerhalb des gültigen Bereichs" -#: utils/adt/varlena.c:5153 utils/adt/varlena.c:5181 +#: utils/adt/varlena.c:5330 utils/adt/varlena.c:5358 #, c-format msgid "format specifies argument 0, but arguments are numbered from 1" msgstr "Format gibt Argument 0 an, aber die Argumente sind von 1 an nummeriert" -#: utils/adt/varlena.c:5174 +#: utils/adt/varlena.c:5351 #, c-format msgid "width argument position must be ended by \"$\"" msgstr "Argumentposition der Breitenangabe muss mit »$« enden" -#: utils/adt/varlena.c:5219 +#: utils/adt/varlena.c:5396 #, c-format msgid "null values cannot be formatted as an SQL identifier" msgstr "NULL-Werte können nicht als SQL-Bezeichner formatiert werden" @@ -21635,206 +23195,207 @@ msgstr "Argument von ntile muss größer als null sein" msgid "argument of nth_value must be greater than zero" msgstr "Argument von nth_value muss größer als null sein" -#: utils/adt/xml.c:217 +#: utils/adt/xml.c:220 #, c-format msgid "unsupported XML feature" msgstr "nicht unterstützte XML-Funktionalität" -#: utils/adt/xml.c:218 +#: utils/adt/xml.c:221 #, c-format msgid "This functionality requires the server to be built with libxml support." msgstr "Diese Funktionalität verlangt, dass der Server mit Libxml-Unterstützung gebaut wird." -#: utils/adt/xml.c:219 +#: utils/adt/xml.c:222 #, c-format msgid "You need to rebuild PostgreSQL using --with-libxml." msgstr "Sie müssen PostgreSQL mit --with-libxml neu bauen." -#: utils/adt/xml.c:238 utils/mb/mbutils.c:523 +#: utils/adt/xml.c:241 utils/mb/mbutils.c:512 #, c-format msgid "invalid encoding name \"%s\"" msgstr "ungültiger Kodierungsname »%s«" -#: utils/adt/xml.c:481 utils/adt/xml.c:486 +#: utils/adt/xml.c:484 utils/adt/xml.c:489 #, c-format msgid "invalid XML comment" msgstr "ungültiger XML-Kommentar" -#: utils/adt/xml.c:615 +#: utils/adt/xml.c:618 #, c-format msgid "not an XML document" msgstr "kein XML-Dokument" -#: utils/adt/xml.c:774 utils/adt/xml.c:797 +#: utils/adt/xml.c:777 utils/adt/xml.c:800 #, c-format msgid "invalid XML processing instruction" msgstr "ungültige XML-Verarbeitungsanweisung" -#: utils/adt/xml.c:775 +#: utils/adt/xml.c:778 #, c-format msgid "XML processing instruction target name cannot be \"%s\"." msgstr "Die Zielangabe der XML-Verarbeitungsanweisung darf nicht »%s« sein." -#: utils/adt/xml.c:798 +#: utils/adt/xml.c:801 #, c-format msgid "XML processing instruction cannot contain \"?>\"." msgstr "XML-Verarbeitungsanweisung darf nicht »?>« enthalten." -#: utils/adt/xml.c:877 +#: utils/adt/xml.c:880 #, c-format msgid "xmlvalidate is not implemented" msgstr "xmlvalidate ist nicht implementiert" -#: utils/adt/xml.c:956 +#: utils/adt/xml.c:959 #, c-format msgid "could not initialize XML library" msgstr "konnte XML-Bibliothek nicht initialisieren" -#: utils/adt/xml.c:957 +#: utils/adt/xml.c:960 #, c-format msgid "libxml2 has incompatible char type: sizeof(char)=%u, sizeof(xmlChar)=%u." msgstr "libxml2 hat inkompatiblen char-Typ: sizeof(char)=%u, sizeof(xmlChar)=%u." -#: utils/adt/xml.c:1043 +#: utils/adt/xml.c:1046 #, c-format msgid "could not set up XML error handler" msgstr "konnte XML-Fehlerbehandlung nicht einrichten" -#: utils/adt/xml.c:1044 +#: utils/adt/xml.c:1047 #, c-format msgid "This probably indicates that the version of libxml2 being used is not compatible with the libxml2 header files that PostgreSQL was built with." msgstr "Das deutet wahrscheinlich darauf hin, dass die verwendete Version von libxml2 nicht mit den Header-Dateien der Version, mit der PostgreSQL gebaut wurde, kompatibel ist." -#: utils/adt/xml.c:1794 +#: utils/adt/xml.c:1797 msgid "Invalid character value." msgstr "Ungültiger Zeichenwert." -#: utils/adt/xml.c:1797 +#: utils/adt/xml.c:1800 msgid "Space required." msgstr "Leerzeichen benötigt." -#: utils/adt/xml.c:1800 +#: utils/adt/xml.c:1803 msgid "standalone accepts only 'yes' or 'no'." msgstr "standalone akzeptiert nur »yes« oder »no«." -#: utils/adt/xml.c:1803 +#: utils/adt/xml.c:1806 msgid "Malformed declaration: missing version." msgstr "Fehlerhafte Deklaration: Version fehlt." -#: utils/adt/xml.c:1806 +#: utils/adt/xml.c:1809 msgid "Missing encoding in text declaration." msgstr "Fehlende Kodierung in Textdeklaration." -#: utils/adt/xml.c:1809 +#: utils/adt/xml.c:1812 msgid "Parsing XML declaration: '?>' expected." msgstr "Beim Parsen der XML-Deklaration: »?>« erwartet." -#: utils/adt/xml.c:1812 +#: utils/adt/xml.c:1815 #, c-format msgid "Unrecognized libxml error code: %d." msgstr "Unbekannter Libxml-Fehlercode: %d." -#: utils/adt/xml.c:2087 +#: utils/adt/xml.c:2090 #, c-format msgid "XML does not support infinite date values." msgstr "XML unterstützt keine unendlichen Datumswerte." -#: utils/adt/xml.c:2109 utils/adt/xml.c:2136 +#: utils/adt/xml.c:2112 utils/adt/xml.c:2139 #, c-format msgid "XML does not support infinite timestamp values." msgstr "XML unterstützt keine unendlichen timestamp-Werte." -#: utils/adt/xml.c:2539 +#: utils/adt/xml.c:2551 #, c-format msgid "invalid query" msgstr "ungültige Anfrage" -#: utils/adt/xml.c:3858 +#: utils/adt/xml.c:3874 #, c-format msgid "invalid array for XML namespace mapping" msgstr "ungültiges Array for XML-Namensraumabbildung" -#: utils/adt/xml.c:3859 +#: utils/adt/xml.c:3875 #, c-format msgid "The array must be two-dimensional with length of the second axis equal to 2." msgstr "Das Array muss zweidimensional sein und die Länge der zweiten Achse muss gleich 2 sein." -#: utils/adt/xml.c:3883 +#: utils/adt/xml.c:3899 #, c-format msgid "empty XPath expression" msgstr "leerer XPath-Ausdruck" -#: utils/adt/xml.c:3927 +#: utils/adt/xml.c:3954 #, c-format msgid "neither namespace name nor URI may be null" msgstr "weder Namensraumname noch URI dürfen NULL sein" -#: utils/adt/xml.c:3934 +#: utils/adt/xml.c:3961 #, c-format msgid "could not register XML namespace with name \"%s\" and URI \"%s\"" msgstr "konnte XML-Namensraum mit Namen »%s« und URI »%s« nicht registrieren" -#: utils/adt/xml.c:4288 -#, fuzzy, c-format -#| msgid "LIMIT #,# syntax is not supported" +#: utils/adt/xml.c:4315 +#, c-format msgid "DEFAULT namespace is not supported" -msgstr "Syntax LIMIT x,y wird nicht unterstützt" +msgstr "DEFAULT-Namensraum wird nicht unterstützt" -#: utils/adt/xml.c:4317 -#, fuzzy, c-format -#| msgid "Quoted identifier must not be empty." +#: utils/adt/xml.c:4344 +#, c-format msgid "row path filter must not be empty string" -msgstr "Bezeichner in Anführungszeichen darf nicht leer sein." +msgstr "Zeilenpfadfilter darf nicht leer sein" -#: utils/adt/xml.c:4348 -#, fuzzy, c-format -#| msgid "Quoted identifier must not be empty." +#: utils/adt/xml.c:4375 +#, c-format msgid "column path filter must not be empty string" -msgstr "Bezeichner in Anführungszeichen darf nicht leer sein." +msgstr "Spaltenpfadfilter darf nicht leer sein" -#: utils/adt/xml.c:4531 -#, fuzzy, c-format -#| msgid "more than one row returned by a subquery used as an expression" +#: utils/adt/xml.c:4557 +#, c-format msgid "more than one value returned by column XPath expression" -msgstr "als Ausdruck verwendete Unteranfrage ergab mehr als eine Zeile" +msgstr "XPath-Ausdruck für Spalte gab mehr als einen Wert zurück" -#: utils/cache/lsyscache.c:2580 utils/cache/lsyscache.c:2613 -#: utils/cache/lsyscache.c:2646 utils/cache/lsyscache.c:2679 +#: utils/cache/lsyscache.c:2630 utils/cache/lsyscache.c:2663 +#: utils/cache/lsyscache.c:2696 utils/cache/lsyscache.c:2729 #, c-format msgid "type %s is only a shell" msgstr "Typ %s ist nur eine Hülle" -#: utils/cache/lsyscache.c:2585 +#: utils/cache/lsyscache.c:2635 #, c-format msgid "no input function available for type %s" msgstr "keine Eingabefunktion verfügbar für Typ %s" -#: utils/cache/lsyscache.c:2618 +#: utils/cache/lsyscache.c:2668 #, c-format msgid "no output function available for type %s" msgstr "keine Ausgabefunktion verfügbar für Typ %s" -#: utils/cache/plancache.c:718 +#: utils/cache/partcache.c:202 +#, c-format +msgid "operator class \"%s\" of access method %s is missing support function %d for type %s" +msgstr "in Operatorklasse »%s« für Zugriffsmethode %s fehlt Support-Funktion %d für Typ %s" + +#: utils/cache/plancache.c:723 #, c-format msgid "cached plan must not change result type" msgstr "gecachter Plan darf den Ergebnistyp nicht ändern" -#: utils/cache/relcache.c:5700 +#: utils/cache/relcache.c:5750 #, c-format msgid "could not create relation-cache initialization file \"%s\": %m" msgstr "konnte Initialisierungsdatei für Relationscache »%s« nicht erzeugen: %m" -#: utils/cache/relcache.c:5702 +#: utils/cache/relcache.c:5752 #, c-format msgid "Continuing anyway, but there's something wrong." msgstr "Setze trotzdem fort, aber irgendwas stimmt nicht." -#: utils/cache/relcache.c:5976 +#: utils/cache/relcache.c:6026 #, c-format msgid "could not remove cache file \"%s\": %m" msgstr "konnte Cache-Datei »%s« nicht löschen: %m" -#: utils/cache/relmapper.c:508 +#: utils/cache/relmapper.c:509 #, c-format msgid "cannot PREPARE a transaction that modified relation mapping" msgstr "PREPARE kann nicht in einer Transaktion ausgeführt werden, die das Relation-Mapping geändert hat" @@ -21844,42 +23405,37 @@ msgstr "PREPARE kann nicht in einer Transaktion ausgeführt werden, die das Rela msgid "could not open relation mapping file \"%s\": %m" msgstr "konnte Relation-Mapping-Datei »%s« nicht öffnen: %m" -#: utils/cache/relmapper.c:664 +#: utils/cache/relmapper.c:665 #, c-format msgid "could not read relation mapping file \"%s\": %m" msgstr "konnte nicht aus Relation-Mapping-Datei »%s« lesen: %m" -#: utils/cache/relmapper.c:674 +#: utils/cache/relmapper.c:676 #, c-format msgid "relation mapping file \"%s\" contains invalid data" msgstr "Relation-Mapping-Datei »%s« enthält ungültige Daten" -#: utils/cache/relmapper.c:684 +#: utils/cache/relmapper.c:686 #, c-format msgid "relation mapping file \"%s\" contains incorrect checksum" msgstr "Relation-Mapping-Datei »%s« enthält falsche Prüfsumme" -#: utils/cache/relmapper.c:784 +#: utils/cache/relmapper.c:785 #, c-format msgid "could not write to relation mapping file \"%s\": %m" msgstr "konnte nicht in Relation-Mapping-Datei »%s« schreiben: %m" -#: utils/cache/relmapper.c:797 +#: utils/cache/relmapper.c:800 #, c-format msgid "could not fsync relation mapping file \"%s\": %m" msgstr "konnte Relation-Mapping-Datei »%s« nicht fsyncen: %m" -#: utils/cache/relmapper.c:803 +#: utils/cache/relmapper.c:807 #, c-format msgid "could not close relation mapping file \"%s\": %m" msgstr "konnte Relation-Mapping-Datei »%s« nicht schließen: %m" -#: utils/cache/typcache.c:1211 -#, c-format -msgid "type %s is not composite" -msgstr "Typ %s ist kein zusammengesetzter Typ" - -#: utils/cache/typcache.c:1225 +#: utils/cache/typcache.c:1623 utils/fmgr/funcapi.c:435 #, c-format msgid "record type has not been registered" msgstr "Record-Typ wurde nicht registriert" @@ -21909,513 +23465,550 @@ msgstr "konnte Datei »%s« nicht als stderr neu öffnen: %m" msgid "could not reopen file \"%s\" as stdout: %m" msgstr "konnte Datei »%s« nicht als stdou neu öffnen: %m" -#: utils/error/elog.c:2389 utils/error/elog.c:2406 utils/error/elog.c:2422 +#: utils/error/elog.c:2394 utils/error/elog.c:2411 utils/error/elog.c:2427 msgid "[unknown]" msgstr "[unbekannt]" -#: utils/error/elog.c:2882 utils/error/elog.c:3185 utils/error/elog.c:3293 +#: utils/error/elog.c:2887 utils/error/elog.c:3190 utils/error/elog.c:3298 msgid "missing error text" msgstr "fehlender Fehlertext" -#: utils/error/elog.c:2885 utils/error/elog.c:2888 utils/error/elog.c:3296 -#: utils/error/elog.c:3299 +#: utils/error/elog.c:2890 utils/error/elog.c:2893 utils/error/elog.c:3301 +#: utils/error/elog.c:3304 #, c-format msgid " at character %d" msgstr " bei Zeichen %d" -#: utils/error/elog.c:2898 utils/error/elog.c:2905 +#: utils/error/elog.c:2903 utils/error/elog.c:2910 msgid "DETAIL: " msgstr "DETAIL: " -#: utils/error/elog.c:2912 +#: utils/error/elog.c:2917 msgid "HINT: " msgstr "TIPP: " -#: utils/error/elog.c:2919 +#: utils/error/elog.c:2924 msgid "QUERY: " msgstr "ANFRAGE: " -#: utils/error/elog.c:2926 +#: utils/error/elog.c:2931 msgid "CONTEXT: " msgstr "ZUSAMMENHANG: " -#: utils/error/elog.c:2936 +#: utils/error/elog.c:2941 #, c-format msgid "LOCATION: %s, %s:%d\n" msgstr "ORT: %s, %s:%d\n" -#: utils/error/elog.c:2943 +#: utils/error/elog.c:2948 #, c-format msgid "LOCATION: %s:%d\n" msgstr "ORT: %s:%d\n" -#: utils/error/elog.c:2957 +#: utils/error/elog.c:2962 msgid "STATEMENT: " msgstr "ANWEISUNG: " #. translator: This string will be truncated at 47 #. characters expanded. -#: utils/error/elog.c:3414 +#: utils/error/elog.c:3419 #, c-format msgid "operating system error %d" msgstr "Betriebssystemfehler %d" -#: utils/error/elog.c:3612 +#: utils/error/elog.c:3617 msgid "DEBUG" msgstr "DEBUG" -#: utils/error/elog.c:3616 +#: utils/error/elog.c:3621 msgid "LOG" msgstr "LOG" -#: utils/error/elog.c:3619 +#: utils/error/elog.c:3624 msgid "INFO" msgstr "INFO" -#: utils/error/elog.c:3622 +#: utils/error/elog.c:3627 msgid "NOTICE" msgstr "HINWEIS" -#: utils/error/elog.c:3625 +#: utils/error/elog.c:3630 msgid "WARNING" msgstr "WARNUNG" -#: utils/error/elog.c:3628 +#: utils/error/elog.c:3633 msgid "ERROR" msgstr "FEHLER" -#: utils/error/elog.c:3631 +#: utils/error/elog.c:3636 msgid "FATAL" msgstr "FATAL" -#: utils/error/elog.c:3634 +#: utils/error/elog.c:3639 msgid "PANIC" msgstr "PANIK" -#: utils/fmgr/dfmgr.c:117 +#: utils/fmgr/dfmgr.c:121 #, c-format msgid "could not find function \"%s\" in file \"%s\"" msgstr "konnte Funktion »%s« nicht in Datei »%s« finden" -#: utils/fmgr/dfmgr.c:196 utils/fmgr/dfmgr.c:413 utils/fmgr/dfmgr.c:461 -#, c-format -msgid "could not access file \"%s\": %m" -msgstr "konnte nicht auf Datei »%s« zugreifen: %m" - -#: utils/fmgr/dfmgr.c:234 +#: utils/fmgr/dfmgr.c:239 #, c-format msgid "could not load library \"%s\": %s" msgstr "konnte Bibliothek »%s« nicht laden: %s" -#: utils/fmgr/dfmgr.c:266 +#: utils/fmgr/dfmgr.c:271 #, c-format msgid "incompatible library \"%s\": missing magic block" msgstr "inkompatible Bibliothek »%s«: magischer Block fehlt" -#: utils/fmgr/dfmgr.c:268 +#: utils/fmgr/dfmgr.c:273 #, c-format msgid "Extension libraries are required to use the PG_MODULE_MAGIC macro." msgstr "Erweiterungsbibliotheken müssen das Makro PG_MODULE_MAGIC verwenden." -#: utils/fmgr/dfmgr.c:314 +#: utils/fmgr/dfmgr.c:319 #, c-format msgid "incompatible library \"%s\": version mismatch" msgstr "inkompatible Bibliothek »%s«: Version stimmt nicht überein" -#: utils/fmgr/dfmgr.c:316 +#: utils/fmgr/dfmgr.c:321 #, c-format msgid "Server is version %d, library is version %s." msgstr "Serverversion ist %d, Bibliotheksversion ist %s." -#: utils/fmgr/dfmgr.c:333 +#: utils/fmgr/dfmgr.c:338 #, c-format msgid "Server has FUNC_MAX_ARGS = %d, library has %d." msgstr "Server hat FUNC_MAX_ARGS = %d, Bibliothek hat %d." -#: utils/fmgr/dfmgr.c:342 +#: utils/fmgr/dfmgr.c:347 #, c-format msgid "Server has INDEX_MAX_KEYS = %d, library has %d." msgstr "Server hat INDEX_MAX_KEYS = %d, Bibliothek hat %d." -#: utils/fmgr/dfmgr.c:351 +#: utils/fmgr/dfmgr.c:356 #, c-format msgid "Server has NAMEDATALEN = %d, library has %d." msgstr "Server hat NAMEDATALEN = %d, Bibliothek hat %d." -#: utils/fmgr/dfmgr.c:360 +#: utils/fmgr/dfmgr.c:365 #, c-format msgid "Server has FLOAT4PASSBYVAL = %s, library has %s." msgstr "Server hat FLOAT4PASSBYVAL = %s, Bibliothek hat %s." -#: utils/fmgr/dfmgr.c:369 +#: utils/fmgr/dfmgr.c:374 #, c-format msgid "Server has FLOAT8PASSBYVAL = %s, library has %s." msgstr "Server hat FLOAT8PASSBYVAL = %s, Bibliothek hat %s." -#: utils/fmgr/dfmgr.c:376 +#: utils/fmgr/dfmgr.c:381 msgid "Magic block has unexpected length or padding difference." msgstr "Magischer Block hat unerwartete Länge oder unterschiedliches Padding." -#: utils/fmgr/dfmgr.c:379 +#: utils/fmgr/dfmgr.c:384 #, c-format msgid "incompatible library \"%s\": magic block mismatch" msgstr "inkompatible Bibliothek »%s«: magischer Block stimmt überein" -#: utils/fmgr/dfmgr.c:543 +#: utils/fmgr/dfmgr.c:548 #, c-format msgid "access to library \"%s\" is not allowed" msgstr "Zugriff auf Bibliothek »%s« ist nicht erlaubt" -#: utils/fmgr/dfmgr.c:569 +#: utils/fmgr/dfmgr.c:574 #, c-format msgid "invalid macro name in dynamic library path: %s" msgstr "ungültiger Makroname in Parameter »dynamic_library_path«: %s" -#: utils/fmgr/dfmgr.c:609 +#: utils/fmgr/dfmgr.c:614 #, c-format msgid "zero-length component in parameter \"dynamic_library_path\"" msgstr "eine Komponente im Parameter »dynamic_library_path« hat Länge null" -#: utils/fmgr/dfmgr.c:628 +#: utils/fmgr/dfmgr.c:633 #, c-format msgid "component in parameter \"dynamic_library_path\" is not an absolute path" msgstr "eine Komponente im Parameter »dynamic_library_path« ist kein absoluter Pfad" -#: utils/fmgr/fmgr.c:271 +#: utils/fmgr/fmgr.c:236 #, c-format msgid "internal function \"%s\" is not in internal lookup table" msgstr "interne Funktion »%s« ist nicht in der internen Suchtabelle" -#: utils/fmgr/fmgr.c:478 +#: utils/fmgr/fmgr.c:485 #, c-format -msgid "unrecognized API version %d reported by info function \"%s\"" -msgstr "Info-Funktion »%2$s« berichtete unbekannte API-Version %1$d" +msgid "could not find function information for function \"%s\"" +msgstr "konnte Funktionsinformationen für Funktion »%s« nicht finden" + +#: utils/fmgr/fmgr.c:487 +#, c-format +msgid "SQL-callable functions need an accompanying PG_FUNCTION_INFO_V1(funcname)." +msgstr "Von SQL aufrufbare Funktionen benötigen ein begleitendes PG_FUNCTION_INFO_V1(funkname)." -#: utils/fmgr/fmgr.c:848 utils/fmgr/fmgr.c:2068 +#: utils/fmgr/fmgr.c:505 #, c-format -msgid "function %u has too many arguments (%d, maximum is %d)" -msgstr "Funktion %u hat zu viele Argumente (%d, Maximum ist %d)" +msgid "unrecognized API version %d reported by info function \"%s\"" +msgstr "Info-Funktion »%2$s« berichtete unbekannte API-Version %1$d" -#: utils/fmgr/fmgr.c:2443 +#: utils/fmgr/fmgr.c:2210 #, c-format msgid "language validation function %u called for language %u instead of %u" msgstr "Sprachvalidierungsfunktion %u wurde für Sprache %u statt %u aufgerufen" -#: utils/fmgr/funcapi.c:354 +#: utils/fmgr/funcapi.c:358 #, c-format msgid "could not determine actual result type for function \"%s\" declared to return type %s" msgstr "konnte tatsächlichen Ergebnistyp von Funktion »%s« mit deklarierten Rückgabetyp %s nicht bestimmen" -#: utils/fmgr/funcapi.c:1341 utils/fmgr/funcapi.c:1372 +#: utils/fmgr/funcapi.c:1403 utils/fmgr/funcapi.c:1435 #, c-format msgid "number of aliases does not match number of columns" msgstr "Anzahl der Aliasnamen stimmt nicht mit der Anzahl der Spalten überein" -#: utils/fmgr/funcapi.c:1366 +#: utils/fmgr/funcapi.c:1429 #, c-format msgid "no column alias was provided" msgstr "Spaltenalias fehlt" -#: utils/fmgr/funcapi.c:1390 +#: utils/fmgr/funcapi.c:1453 +#, c-format +msgid "could not determine row description for function returning record" +msgstr "konnte Zeilenbeschreibung für Funktion, die »record« zurückgibt, nicht ermitteln" + +#: utils/init/miscinit.c:108 +#, c-format +msgid "data directory \"%s\" does not exist" +msgstr "Datenverzeichnis »%s« existiert nicht" + +#: utils/init/miscinit.c:113 +#, c-format +msgid "could not read permissions of directory \"%s\": %m" +msgstr "konnte Zugriffsrechte von Verzeichnis »%s« nicht lesen: %m" + +#: utils/init/miscinit.c:121 +#, c-format +msgid "specified data directory \"%s\" is not a directory" +msgstr "angegebenes Datenverzeichnis »%s« ist kein Verzeichnis" + +#: utils/init/miscinit.c:137 +#, c-format +msgid "data directory \"%s\" has wrong ownership" +msgstr "Datenverzeichnis »%s« hat falschen Eigentümer" + +#: utils/init/miscinit.c:139 #, c-format -msgid "could not determine row description for function returning record" -msgstr "konnte Zeilenbeschreibung für Funktion, die »record« zurückgibt, nicht ermitteln" +msgid "The server must be started by the user that owns the data directory." +msgstr "Der Server muss von dem Benutzer gestartet werden, dem das Datenverzeichnis gehört." -#: utils/init/miscinit.c:121 +#: utils/init/miscinit.c:157 +#, fuzzy, c-format +#| msgid "data directory \"%s\" has wrong ownership" +msgid "data directory \"%s\" has invalid permissions" +msgstr "Datenverzeichnis »%s« hat falschen Eigentümer" + +#: utils/init/miscinit.c:159 +#, fuzzy, c-format +#| msgid "Permissions should be u=rwx (0700)." +msgid "Permissions should be u=rwx (0700) or u=rwx,g=rx (0750)." +msgstr "Rechte sollten u=rwx (0700) sein." + +#: utils/init/miscinit.c:218 #, c-format msgid "could not change directory to \"%s\": %m" msgstr "konnte nicht in Verzeichnis »%s« wechseln: %m" -#: utils/init/miscinit.c:449 utils/misc/guc.c:6082 +#: utils/init/miscinit.c:546 utils/misc/guc.c:6350 #, c-format msgid "cannot set parameter \"%s\" within security-restricted operation" msgstr "kann Parameter »%s« nicht in einer sicherheitsbeschränkten Operation setzen" -#: utils/init/miscinit.c:510 +#: utils/init/miscinit.c:607 #, c-format msgid "role with OID %u does not exist" msgstr "Rolle mit OID %u existiert nicht" -#: utils/init/miscinit.c:540 +#: utils/init/miscinit.c:637 #, c-format msgid "role \"%s\" is not permitted to log in" msgstr "Rolle »%s« hat keine Berechtigung zum Einloggen" -#: utils/init/miscinit.c:558 +#: utils/init/miscinit.c:655 #, c-format msgid "too many connections for role \"%s\"" msgstr "zu viele Verbindungen von Rolle »%s«" -#: utils/init/miscinit.c:618 +#: utils/init/miscinit.c:715 #, c-format msgid "permission denied to set session authorization" msgstr "keine Berechtigung, um Sitzungsautorisierung zu setzen" -#: utils/init/miscinit.c:701 +#: utils/init/miscinit.c:798 #, c-format msgid "invalid role OID: %u" msgstr "ungültige Rollen-OID: %u" -#: utils/init/miscinit.c:755 +#: utils/init/miscinit.c:852 #, c-format msgid "database system is shut down" msgstr "Datenbanksystem ist heruntergefahren" -#: utils/init/miscinit.c:842 +#: utils/init/miscinit.c:939 #, c-format msgid "could not create lock file \"%s\": %m" msgstr "konnte Sperrdatei »%s« nicht erstellen: %m" -#: utils/init/miscinit.c:856 +#: utils/init/miscinit.c:953 #, c-format msgid "could not open lock file \"%s\": %m" msgstr "konnte Sperrdatei »%s« nicht öffnen: %m" -#: utils/init/miscinit.c:862 +#: utils/init/miscinit.c:960 #, c-format msgid "could not read lock file \"%s\": %m" msgstr "konnte Sperrdatei »%s« nicht lesen: %m" -#: utils/init/miscinit.c:870 +#: utils/init/miscinit.c:969 #, c-format msgid "lock file \"%s\" is empty" msgstr "Sperrdatei »%s« ist leer" -#: utils/init/miscinit.c:871 +#: utils/init/miscinit.c:970 #, c-format msgid "Either another server is starting, or the lock file is the remnant of a previous server startup crash." msgstr "Entweder startet gerade ein anderer Server oder die Sperrdatei ist von einen Absturz übrig geblieben." -#: utils/init/miscinit.c:918 +#: utils/init/miscinit.c:1014 #, c-format msgid "lock file \"%s\" already exists" msgstr "Sperrdatei »%s« existiert bereits" -#: utils/init/miscinit.c:922 +#: utils/init/miscinit.c:1018 #, c-format msgid "Is another postgres (PID %d) running in data directory \"%s\"?" msgstr "Läuft bereits ein anderer postgres-Prozess (PID %d) im Datenverzeichnis »%s«?" -#: utils/init/miscinit.c:924 +#: utils/init/miscinit.c:1020 #, c-format msgid "Is another postmaster (PID %d) running in data directory \"%s\"?" msgstr "Läuft bereits ein anderer postmaster-Prozess (PID %d) im Datenverzeichnis »%s«?" -#: utils/init/miscinit.c:927 +#: utils/init/miscinit.c:1023 #, c-format msgid "Is another postgres (PID %d) using socket file \"%s\"?" msgstr "Verwendet bereits ein anderer postgres-Prozess (PID %d) die Socketdatei »%s«?" -#: utils/init/miscinit.c:929 +#: utils/init/miscinit.c:1025 #, c-format msgid "Is another postmaster (PID %d) using socket file \"%s\"?" msgstr "Verwendet bereits ein anderer postmaster-Prozess (PID %d) die Socketdatei »%s«?" -#: utils/init/miscinit.c:965 +#: utils/init/miscinit.c:1061 #, c-format msgid "pre-existing shared memory block (key %lu, ID %lu) is still in use" msgstr "bereits bestehender Shared-Memory-Block (Schlüssel %lu, ID %lu) wird noch benutzt" -#: utils/init/miscinit.c:968 +#: utils/init/miscinit.c:1064 #, c-format msgid "If you're sure there are no old server processes still running, remove the shared memory block or just delete the file \"%s\"." msgstr "Wenn Sie sich sicher sind, dass kein alter Serverprozess mehr läuft, entfernen Sie den Shared-Memory-Block oder löschen Sie einfach die Datei »%s«." -#: utils/init/miscinit.c:984 +#: utils/init/miscinit.c:1080 #, c-format msgid "could not remove old lock file \"%s\": %m" msgstr "konnte alte Sperrdatei »%s« nicht löschen: %m" -#: utils/init/miscinit.c:986 +#: utils/init/miscinit.c:1082 #, c-format msgid "The file seems accidentally left over, but it could not be removed. Please remove the file by hand and try again." msgstr "Die Datei ist anscheinend aus Versehen übrig geblieben, konnte aber nicht gelöscht werden. Bitte entfernen Sie die Datei von Hand und versuchen Sie es erneut." -#: utils/init/miscinit.c:1022 utils/init/miscinit.c:1033 -#: utils/init/miscinit.c:1043 +#: utils/init/miscinit.c:1119 utils/init/miscinit.c:1133 +#: utils/init/miscinit.c:1144 #, c-format msgid "could not write lock file \"%s\": %m" msgstr "konnte Sperrdatei »%s« nicht schreiben: %m" -#: utils/init/miscinit.c:1172 utils/init/miscinit.c:1301 utils/misc/guc.c:8883 +#: utils/init/miscinit.c:1276 utils/init/miscinit.c:1419 utils/misc/guc.c:9190 #, c-format msgid "could not read from file \"%s\": %m" msgstr "konnte nicht aus Datei »%s« lesen: %m" -#: utils/init/miscinit.c:1291 +#: utils/init/miscinit.c:1407 #, c-format msgid "could not open file \"%s\": %m; continuing anyway" msgstr "konnte Datei »%s« nicht öffnen: %m; setze trotzdem fort" -#: utils/init/miscinit.c:1314 +#: utils/init/miscinit.c:1432 #, c-format msgid "lock file \"%s\" contains wrong PID: %ld instead of %ld" msgstr "Sperrdatei »%s« enthält falsche PID: %ld statt %ld" -#: utils/init/miscinit.c:1353 utils/init/miscinit.c:1369 +#: utils/init/miscinit.c:1471 utils/init/miscinit.c:1487 #, c-format msgid "\"%s\" is not a valid data directory" msgstr "»%s« ist kein gültiges Datenverzeichnis" -#: utils/init/miscinit.c:1355 +#: utils/init/miscinit.c:1473 #, c-format msgid "File \"%s\" is missing." msgstr "Die Datei »%s« fehlt." -#: utils/init/miscinit.c:1371 +#: utils/init/miscinit.c:1489 #, c-format msgid "File \"%s\" does not contain valid data." msgstr "Die Datei »%s« enthält keine gültigen Daten." -#: utils/init/miscinit.c:1373 +#: utils/init/miscinit.c:1491 #, c-format msgid "You might need to initdb." msgstr "Sie müssen möglicherweise initdb ausführen." -#: utils/init/miscinit.c:1381 +#: utils/init/miscinit.c:1499 #, c-format msgid "The data directory was initialized by PostgreSQL version %s, which is not compatible with this version %s." msgstr "Das Datenverzeichnis wurde von PostgreSQL Version %s initialisiert, welche nicht mit dieser Version %s kompatibel ist." -#: utils/init/miscinit.c:1452 +#: utils/init/miscinit.c:1566 #, c-format msgid "loaded library \"%s\"" msgstr "Bibliothek »%s« geladen" -#: utils/init/postinit.c:251 +#: utils/init/postinit.c:252 #, c-format -msgid "replication connection authorized: user=%s SSL enabled (protocol=%s, cipher=%s, compression=%s)" -msgstr "Replikationsverbindung autorisiert: Benutzer=%s SSL an (Protokoll=%s, Verschlüsselungsmethode=%s, Komprimierung=%s)" +msgid "replication connection authorized: user=%s SSL enabled (protocol=%s, cipher=%s, bits=%d, compression=%s)" +msgstr "Replikationsverbindung autorisiert: Benutzer=%s SSL an (Protokoll=%s, Verschlüsselungsmethode=%s, Bits=%d, Komprimierung=%s)" -#: utils/init/postinit.c:253 utils/init/postinit.c:267 +#: utils/init/postinit.c:257 utils/init/postinit.c:274 msgid "off" msgstr "aus" -#: utils/init/postinit.c:253 utils/init/postinit.c:267 +#: utils/init/postinit.c:257 utils/init/postinit.c:274 msgid "on" msgstr "an" -#: utils/init/postinit.c:257 +#: utils/init/postinit.c:261 #, c-format msgid "replication connection authorized: user=%s" msgstr "Replikationsverbindung autorisiert: Benutzer=%s" -#: utils/init/postinit.c:265 +#: utils/init/postinit.c:269 #, c-format -msgid "connection authorized: user=%s database=%s SSL enabled (protocol=%s, cipher=%s, compression=%s)" -msgstr "Verbindung autorisiert: Benutzer=%s Datenbank=%s SSL an (Protokoll=%s, Verschlüsselungsmethode=%s, Komprimierung=%s)" +msgid "connection authorized: user=%s database=%s SSL enabled (protocol=%s, cipher=%s, bits=%d, compression=%s)" +msgstr "Verbindung autorisiert: Benutzer=%s Datenbank=%s SSL an (Protokoll=%s, Verschlüsselungsmethode=%s, Bits=%d, Komprimierung=%s)" -#: utils/init/postinit.c:271 +#: utils/init/postinit.c:278 #, c-format msgid "connection authorized: user=%s database=%s" msgstr "Verbindung autorisiert: Benutzer=%s Datenbank=%s" -#: utils/init/postinit.c:303 +#: utils/init/postinit.c:310 #, c-format msgid "database \"%s\" has disappeared from pg_database" msgstr "Datenbank »%s« ist aus pg_database verschwunden" -#: utils/init/postinit.c:305 +#: utils/init/postinit.c:312 #, c-format msgid "Database OID %u now seems to belong to \"%s\"." msgstr "Datenbank-OID %u gehört jetzt anscheinend zu »%s«." -#: utils/init/postinit.c:325 +#: utils/init/postinit.c:332 #, c-format msgid "database \"%s\" is not currently accepting connections" msgstr "Datenbank »%s« akzeptiert gegenwärtig keine Verbindungen" -#: utils/init/postinit.c:338 +#: utils/init/postinit.c:345 #, c-format msgid "permission denied for database \"%s\"" msgstr "keine Berechtigung für Datenbank »%s«" -#: utils/init/postinit.c:339 +#: utils/init/postinit.c:346 #, c-format msgid "User does not have CONNECT privilege." msgstr "Benutzer hat das CONNECT-Privileg nicht." -#: utils/init/postinit.c:356 +#: utils/init/postinit.c:363 #, c-format msgid "too many connections for database \"%s\"" msgstr "zu viele Verbindungen für Datenbank »%s«" -#: utils/init/postinit.c:378 utils/init/postinit.c:385 +#: utils/init/postinit.c:385 utils/init/postinit.c:392 #, c-format msgid "database locale is incompatible with operating system" msgstr "Datenbank-Locale ist inkompatibel mit Betriebssystem" -#: utils/init/postinit.c:379 +#: utils/init/postinit.c:386 #, c-format msgid "The database was initialized with LC_COLLATE \"%s\", which is not recognized by setlocale()." msgstr "Die Datenbank wurde mit LC_COLLATE »%s« initialisiert, was von setlocale() nicht erkannt wird." -#: utils/init/postinit.c:381 utils/init/postinit.c:388 +#: utils/init/postinit.c:388 utils/init/postinit.c:395 #, c-format msgid "Recreate the database with another locale or install the missing locale." msgstr "Erzeugen Sie die Datenbank neu mit einer anderen Locale oder installieren Sie die fehlende Locale." -#: utils/init/postinit.c:386 +#: utils/init/postinit.c:393 #, c-format msgid "The database was initialized with LC_CTYPE \"%s\", which is not recognized by setlocale()." msgstr "Die Datenbank wurde mit LC_CTYPE »%s« initialisiert, was von setlocale() nicht erkannt wird." -#: utils/init/postinit.c:714 +#: utils/init/postinit.c:726 #, c-format msgid "no roles are defined in this database system" msgstr "in diesem Datenbanksystem sind keine Rollen definiert" -#: utils/init/postinit.c:715 +#: utils/init/postinit.c:727 #, c-format msgid "You should immediately run CREATE USER \"%s\" SUPERUSER;." msgstr "Sie sollten sofort CREATE USER \"%s\" SUPERUSER; ausführen." -#: utils/init/postinit.c:751 +#: utils/init/postinit.c:763 #, c-format msgid "new replication connections are not allowed during database shutdown" msgstr "während des Herunterfahrens der Datenbank sind keine neuen Replikationsverbindungen erlaubt" -#: utils/init/postinit.c:755 +#: utils/init/postinit.c:767 #, c-format msgid "must be superuser to connect during database shutdown" msgstr "nur Superuser können während des Herunterfahrens der Datenbank verbinden" -#: utils/init/postinit.c:765 +#: utils/init/postinit.c:777 #, c-format msgid "must be superuser to connect in binary upgrade mode" msgstr "nur Superuser können im Binary-Upgrade-Modus verbinden" -#: utils/init/postinit.c:779 +#: utils/init/postinit.c:791 #, c-format msgid "remaining connection slots are reserved for non-replication superuser connections" msgstr "die verbleibenden Verbindungen sind für Superuser auf Nicht-Replikationsverbindungen reserviert" -#: utils/init/postinit.c:789 +#: utils/init/postinit.c:801 #, c-format msgid "must be superuser or replication role to start walsender" msgstr "nur Superuser und Replikationsrollen können WAL-Sender starten" -#: utils/init/postinit.c:858 +#: utils/init/postinit.c:870 #, c-format msgid "database %u does not exist" msgstr "Datenbank %u existiert nicht" -#: utils/init/postinit.c:944 +#: utils/init/postinit.c:959 #, c-format msgid "It seems to have just been dropped or renamed." msgstr "Sie wurde anscheinend gerade gelöscht oder umbenannt." -#: utils/init/postinit.c:962 +#: utils/init/postinit.c:977 #, c-format msgid "The database subdirectory \"%s\" is missing." msgstr "Das Datenbankunterverzeichnis »%s« fehlt." -#: utils/init/postinit.c:967 +#: utils/init/postinit.c:982 #, c-format msgid "could not access directory \"%s\": %m" msgstr "konnte nicht auf Verzeichnis »%s« zugreifen: %m" -#: utils/mb/conv.c:488 utils/mb/conv.c:679 +#: utils/mb/conv.c:488 utils/mb/conv.c:680 #, c-format msgid "invalid encoding number: %d" msgstr "ungültige Kodierungsnummer: %d" @@ -22432,42 +24025,47 @@ msgstr "unerwartete Kodierungs-ID %d für ISO-8859-Zeichensatz" msgid "unexpected encoding ID %d for WIN character sets" msgstr "unerwartete Kodierungs-ID %d für WIN-Zeichensatz" -#: utils/mb/encnames.c:496 +#: utils/mb/encnames.c:473 +#, c-format +msgid "encoding \"%s\" not supported by ICU" +msgstr "Kodierung »%s« wird von ICU nicht unterstützt" + +#: utils/mb/encnames.c:572 #, c-format msgid "encoding name too long" msgstr "Kodierungsname zu lang" -#: utils/mb/mbutils.c:307 +#: utils/mb/mbutils.c:296 #, c-format msgid "conversion between %s and %s is not supported" msgstr "Umwandlung zwischen %s und %s wird nicht unterstützt" -#: utils/mb/mbutils.c:366 +#: utils/mb/mbutils.c:355 #, c-format msgid "default conversion function for encoding \"%s\" to \"%s\" does not exist" msgstr "Standardumwandlung von Kodierung »%s« nach »%s« existiert nicht" -#: utils/mb/mbutils.c:377 utils/mb/mbutils.c:710 +#: utils/mb/mbutils.c:366 utils/mb/mbutils.c:699 #, c-format msgid "String of %d bytes is too long for encoding conversion." msgstr "Zeichenkette mit %d Bytes ist zu lang für Kodierungsumwandlung." -#: utils/mb/mbutils.c:464 +#: utils/mb/mbutils.c:453 #, c-format msgid "invalid source encoding name \"%s\"" msgstr "ungültiger Quellkodierungsname »%s«" -#: utils/mb/mbutils.c:469 +#: utils/mb/mbutils.c:458 #, c-format msgid "invalid destination encoding name \"%s\"" msgstr "ungültiger Zielkodierungsname »%s«" -#: utils/mb/mbutils.c:609 +#: utils/mb/mbutils.c:598 #, c-format msgid "invalid byte value for encoding \"%s\": 0x%02x" msgstr "ungültiger Byte-Wert für Kodierung »%s«: 0x%02x" -#: utils/mb/mbutils.c:951 +#: utils/mb/mbutils.c:940 #, c-format msgid "bind_textdomain_codeset failed" msgstr "bind_textdomain_codeset fehlgeschlagen" @@ -22482,25 +24080,29 @@ msgstr "ungültige Byte-Sequenz für Kodierung »%s«: %s" msgid "character with byte sequence %s in encoding \"%s\" has no equivalent in encoding \"%s\"" msgstr "Zeichen mit Byte-Folge %s in Kodierung »%s« hat keine Entsprechung in Kodierung »%s«" -#: utils/misc/guc.c:573 +#: utils/misc/guc.c:571 msgid "Ungrouped" msgstr "Ungruppiert" -#: utils/misc/guc.c:575 +#: utils/misc/guc.c:573 msgid "File Locations" msgstr "Dateipfade" -#: utils/misc/guc.c:577 +#: utils/misc/guc.c:575 msgid "Connections and Authentication" msgstr "Verbindungen und Authentifizierung" -#: utils/misc/guc.c:579 +#: utils/misc/guc.c:577 msgid "Connections and Authentication / Connection Settings" msgstr "Verbindungen und Authentifizierung / Verbindungseinstellungen" +#: utils/misc/guc.c:579 +msgid "Connections and Authentication / Authentication" +msgstr "Verbindungen und Authentifizierung / Authentifizierung" + #: utils/misc/guc.c:581 -msgid "Connections and Authentication / Security and Authentication" -msgstr "Verbindungen und Authentifizierung / Sicherheit und Authentifizierung" +msgid "Connections and Authentication / SSL" +msgstr "Verbindungen und Authentifizierung / SSL" #: utils/misc/guc.c:583 msgid "Resource Usage" @@ -22563,1463 +24165,1599 @@ msgid "Replication / Standby Servers" msgstr "Replikation / Standby-Server" #: utils/misc/guc.c:613 +msgid "Replication / Subscribers" +msgstr "Replikation / Subskriptionsserver" + +#: utils/misc/guc.c:615 msgid "Query Tuning" msgstr "Anfragetuning" -#: utils/misc/guc.c:615 +#: utils/misc/guc.c:617 msgid "Query Tuning / Planner Method Configuration" msgstr "Anfragetuning / Planermethoden" -#: utils/misc/guc.c:617 +#: utils/misc/guc.c:619 msgid "Query Tuning / Planner Cost Constants" msgstr "Anfragetuning / Planerkosten" -#: utils/misc/guc.c:619 +#: utils/misc/guc.c:621 msgid "Query Tuning / Genetic Query Optimizer" msgstr "Anfragetuning / Genetischer Anfrageoptimierer" -#: utils/misc/guc.c:621 +#: utils/misc/guc.c:623 msgid "Query Tuning / Other Planner Options" msgstr "Anfragetuning / Andere Planeroptionen" -#: utils/misc/guc.c:623 +#: utils/misc/guc.c:625 msgid "Reporting and Logging" msgstr "Berichte und Logging" -#: utils/misc/guc.c:625 +#: utils/misc/guc.c:627 msgid "Reporting and Logging / Where to Log" msgstr "Berichte und Logging / Wohin geloggt wird" -#: utils/misc/guc.c:627 +#: utils/misc/guc.c:629 msgid "Reporting and Logging / When to Log" msgstr "Berichte und Logging / Wann geloggt wird" -#: utils/misc/guc.c:629 +#: utils/misc/guc.c:631 msgid "Reporting and Logging / What to Log" msgstr "Berichte und Logging / Was geloggt wird" -#: utils/misc/guc.c:631 +#: utils/misc/guc.c:633 msgid "Process Title" msgstr "Prozesstitel" -#: utils/misc/guc.c:633 +#: utils/misc/guc.c:635 msgid "Statistics" msgstr "Statistiken" -#: utils/misc/guc.c:635 +#: utils/misc/guc.c:637 msgid "Statistics / Monitoring" msgstr "Statistiken / Überwachung" -#: utils/misc/guc.c:637 +#: utils/misc/guc.c:639 msgid "Statistics / Query and Index Statistics Collector" msgstr "Statistiken / Statistiksammler für Anfragen und Indexe" -#: utils/misc/guc.c:639 +#: utils/misc/guc.c:641 msgid "Autovacuum" msgstr "Autovacuum" -#: utils/misc/guc.c:641 +#: utils/misc/guc.c:643 msgid "Client Connection Defaults" msgstr "Standardeinstellungen für Clientverbindungen" -#: utils/misc/guc.c:643 +#: utils/misc/guc.c:645 msgid "Client Connection Defaults / Statement Behavior" msgstr "Standardeinstellungen für Clientverbindungen / Anweisungsverhalten" -#: utils/misc/guc.c:645 +#: utils/misc/guc.c:647 msgid "Client Connection Defaults / Locale and Formatting" msgstr "Standardeinstellungen für Clientverbindungen / Locale und Formatierung" -#: utils/misc/guc.c:647 +#: utils/misc/guc.c:649 msgid "Client Connection Defaults / Shared Library Preloading" msgstr "Standardeinstellungen für Clientverbindungen / Shared Library Preloading" -#: utils/misc/guc.c:649 +#: utils/misc/guc.c:651 msgid "Client Connection Defaults / Other Defaults" msgstr "Standardeinstellungen für Clientverbindungen / Andere" -#: utils/misc/guc.c:651 +#: utils/misc/guc.c:653 msgid "Lock Management" msgstr "Sperrenverwaltung" -#: utils/misc/guc.c:653 +#: utils/misc/guc.c:655 msgid "Version and Platform Compatibility" msgstr "Versions- und Plattformkompatibilität" -#: utils/misc/guc.c:655 +#: utils/misc/guc.c:657 msgid "Version and Platform Compatibility / Previous PostgreSQL Versions" msgstr "Versions- und Plattformkompatibilität / Frühere PostgreSQL-Versionen" -#: utils/misc/guc.c:657 +#: utils/misc/guc.c:659 msgid "Version and Platform Compatibility / Other Platforms and Clients" msgstr "Versions- und Plattformkompatibilität / Andere Plattformen und Clients" -#: utils/misc/guc.c:659 +#: utils/misc/guc.c:661 msgid "Error Handling" msgstr "Fehlerbehandlung" -#: utils/misc/guc.c:661 +#: utils/misc/guc.c:663 msgid "Preset Options" msgstr "Voreingestellte Optionen" -#: utils/misc/guc.c:663 +#: utils/misc/guc.c:665 msgid "Customized Options" msgstr "Angepasste Optionen" -#: utils/misc/guc.c:665 +#: utils/misc/guc.c:667 msgid "Developer Options" msgstr "Entwickleroptionen" -#: utils/misc/guc.c:722 +#: utils/misc/guc.c:721 msgid "Valid units for this parameter are \"kB\", \"MB\", \"GB\", and \"TB\"." msgstr "Gültige Einheiten für diesen Parameter sind »kB«, »MB«, »GB« und »TB«." -#: utils/misc/guc.c:749 +#: utils/misc/guc.c:753 msgid "Valid units for this parameter are \"ms\", \"s\", \"min\", \"h\", and \"d\"." msgstr "Gültige Einheiten für diesen Parameter sind »ms«, »s«, »min«, »h« und »d«." -#: utils/misc/guc.c:808 +#: utils/misc/guc.c:812 msgid "Enables the planner's use of sequential-scan plans." msgstr "Ermöglicht sequenzielle Scans in Planer." -#: utils/misc/guc.c:817 +#: utils/misc/guc.c:821 msgid "Enables the planner's use of index-scan plans." msgstr "Ermöglicht Index-Scans im Planer." -#: utils/misc/guc.c:826 +#: utils/misc/guc.c:830 msgid "Enables the planner's use of index-only-scan plans." msgstr "Ermöglicht Index-Only-Scans im Planer." -#: utils/misc/guc.c:835 +#: utils/misc/guc.c:839 msgid "Enables the planner's use of bitmap-scan plans." msgstr "Ermöglicht Bitmap-Scans im Planer." -#: utils/misc/guc.c:844 +#: utils/misc/guc.c:848 msgid "Enables the planner's use of TID scan plans." msgstr "Ermöglicht TID-Scans im Planer." -#: utils/misc/guc.c:853 +#: utils/misc/guc.c:857 msgid "Enables the planner's use of explicit sort steps." msgstr "Ermöglicht Sortierschritte im Planer." -#: utils/misc/guc.c:862 +#: utils/misc/guc.c:866 msgid "Enables the planner's use of hashed aggregation plans." msgstr "Ermöglicht Hash-Aggregierung im Planer." -#: utils/misc/guc.c:871 +#: utils/misc/guc.c:875 msgid "Enables the planner's use of materialization." msgstr "Ermöglicht Materialisierung im Planer." -#: utils/misc/guc.c:880 +#: utils/misc/guc.c:884 msgid "Enables the planner's use of nested-loop join plans." msgstr "Ermöglicht Nested-Loop-Verbunde im Planer." -#: utils/misc/guc.c:889 +#: utils/misc/guc.c:893 msgid "Enables the planner's use of merge join plans." msgstr "Ermöglicht Merge-Verbunde im Planer." -#: utils/misc/guc.c:898 +#: utils/misc/guc.c:902 msgid "Enables the planner's use of hash join plans." msgstr "Ermöglicht Hash-Verbunde im Planer." -#: utils/misc/guc.c:907 +#: utils/misc/guc.c:911 +msgid "Enables the planner's use of gather merge plans." +msgstr "Ermöglicht Gather-Merge-Pläne im Planer." + +#: utils/misc/guc.c:920 +msgid "Enables partitionwise join." +msgstr "" + +#: utils/misc/guc.c:929 +msgid "Enables partitionwise aggregation and grouping." +msgstr "" + +#: utils/misc/guc.c:938 #, fuzzy #| msgid "Enables the planner's use of merge join plans." -msgid "Enables the planner's use of gather merge plans." +msgid "Enables the planner's use of parallel append plans." msgstr "Ermöglicht Merge-Verbunde im Planer." -#: utils/misc/guc.c:917 +#: utils/misc/guc.c:947 +#, fuzzy +#| msgid "Enables the planner's use of hash join plans." +msgid "Enables the planner's user of parallel hash plans." +msgstr "Ermöglicht Hash-Verbunde im Planer." + +#: utils/misc/guc.c:956 +msgid "Enable plan-time and run-time partition pruning." +msgstr "" + +#: utils/misc/guc.c:957 +msgid "Allows the query planner and executor to compare partition bounds to conditions in the query to determine which partitions must be scanned." +msgstr "" + +#: utils/misc/guc.c:967 msgid "Enables genetic query optimization." msgstr "Ermöglicht genetische Anfrageoptimierung." -#: utils/misc/guc.c:918 +#: utils/misc/guc.c:968 msgid "This algorithm attempts to do planning without exhaustive searching." msgstr "Dieser Algorithmus versucht das Planen ohne erschöpfende Suche durchzuführen." -#: utils/misc/guc.c:928 +#: utils/misc/guc.c:978 msgid "Shows whether the current user is a superuser." msgstr "Zeigt, ob der aktuelle Benutzer ein Superuser ist." -#: utils/misc/guc.c:938 +#: utils/misc/guc.c:988 msgid "Enables advertising the server via Bonjour." msgstr "Ermöglicht die Bekanntgabe des Servers mit Bonjour." -#: utils/misc/guc.c:947 +#: utils/misc/guc.c:997 msgid "Collects transaction commit time." msgstr "Sammelt Commit-Timestamps von Transaktionen." -#: utils/misc/guc.c:956 +#: utils/misc/guc.c:1006 msgid "Enables SSL connections." msgstr "Ermöglicht SSL-Verbindungen." -#: utils/misc/guc.c:965 +#: utils/misc/guc.c:1015 +msgid "Also use ssl_passphrase_command during server reload." +msgstr "" + +#: utils/misc/guc.c:1024 msgid "Give priority to server ciphersuite order." msgstr "Der Ciphersuite-Reihenfolge des Servers Vorrang geben." -#: utils/misc/guc.c:974 +#: utils/misc/guc.c:1033 msgid "Forces synchronization of updates to disk." msgstr "Erzwingt die Synchronisierung von Aktualisierungen auf Festplatte." -#: utils/misc/guc.c:975 +#: utils/misc/guc.c:1034 msgid "The server will use the fsync() system call in several places to make sure that updates are physically written to disk. This insures that a database cluster will recover to a consistent state after an operating system or hardware crash." msgstr "Der Server verwendet den Systemaufruf fsync() an mehreren Stellen, um sicherzustellen, dass Datenänderungen physikalisch auf die Festplatte geschrieben werden. Das stellt sicher, dass der Datenbankcluster nach einem Betriebssystemabsturz oder Hardwarefehler in einem korrekten Zustand wiederhergestellt werden kann." -#: utils/misc/guc.c:986 +#: utils/misc/guc.c:1045 msgid "Continues processing after a checksum failure." msgstr "Setzt die Verarbeitung trotz Prüfsummenfehler fort." -#: utils/misc/guc.c:987 +#: utils/misc/guc.c:1046 msgid "Detection of a checksum failure normally causes PostgreSQL to report an error, aborting the current transaction. Setting ignore_checksum_failure to true causes the system to ignore the failure (but still report a warning), and continue processing. This behavior could cause crashes or other serious problems. Only has an effect if checksums are enabled." msgstr "Wenn eine fehlerhafte Prüfsumme entdeckt wird, gibt PostgreSQL normalerweise ein Fehler aus und bricht die aktuelle Transaktion ab. Wenn »ignore_checksum_failure« an ist, dann wird der Fehler ignoriert (aber trotzdem eine Warnung ausgegeben) und die Verarbeitung geht weiter. Dieses Verhalten kann Abstürze und andere ernsthafte Probleme verursachen. Es hat keine Auswirkungen, wenn Prüfsummen nicht eingeschaltet sind." -#: utils/misc/guc.c:1001 +#: utils/misc/guc.c:1060 msgid "Continues processing past damaged page headers." msgstr "Setzt die Verarbeitung trotz kaputter Seitenköpfe fort." -#: utils/misc/guc.c:1002 +#: utils/misc/guc.c:1061 msgid "Detection of a damaged page header normally causes PostgreSQL to report an error, aborting the current transaction. Setting zero_damaged_pages to true causes the system to instead report a warning, zero out the damaged page, and continue processing. This behavior will destroy data, namely all the rows on the damaged page." msgstr "Wenn ein kaputter Seitenkopf entdeckt wird, gibt PostgreSQL normalerweise einen Fehler aus und bricht die aktuelle Transaktion ab. Wenn »zero_damaged_pages« an ist, dann wird eine Warnung ausgegeben, die kaputte Seite mit Nullen gefüllt und die Verarbeitung geht weiter. Dieses Verhalten zerstört Daten, nämlich alle Zeilen in der kaputten Seite." -#: utils/misc/guc.c:1015 +#: utils/misc/guc.c:1074 msgid "Writes full pages to WAL when first modified after a checkpoint." msgstr "Schreibt volle Seiten in den WAL, sobald sie nach einem Checkpoint geändert werden." -#: utils/misc/guc.c:1016 +#: utils/misc/guc.c:1075 msgid "A page write in process during an operating system crash might be only partially written to disk. During recovery, the row changes stored in WAL are not enough to recover. This option writes pages when first modified after a checkpoint to WAL so full recovery is possible." msgstr "Ein Seitenschreibvorgang während eines Betriebssystemabsturzes könnte eventuell nur teilweise geschrieben worden sein. Bei der Wiederherstellung sind die im WAL gespeicherten Zeilenänderungen nicht ausreichend. Diese Option schreibt Seiten, sobald sie nach einem Checkpoint geändert worden sind, damit eine volle Wiederherstellung möglich ist." -#: utils/misc/guc.c:1029 +#: utils/misc/guc.c:1088 msgid "Writes full pages to WAL when first modified after a checkpoint, even for a non-critical modifications." msgstr "Schreibt volle Seiten in den WAL, sobald sie nach einem Checkpoint geändert werden, auch für nicht kritische Änderungen." -#: utils/misc/guc.c:1039 +#: utils/misc/guc.c:1098 msgid "Compresses full-page writes written in WAL file." msgstr "Komprimiert in WAL-Dateien geschriebene volle Seiten." -#: utils/misc/guc.c:1049 +#: utils/misc/guc.c:1108 msgid "Logs each checkpoint." msgstr "Schreibt jeden Checkpoint in den Log." -#: utils/misc/guc.c:1058 +#: utils/misc/guc.c:1117 msgid "Logs each successful connection." msgstr "Schreibt jede erfolgreiche Verbindung in den Log." -#: utils/misc/guc.c:1067 +#: utils/misc/guc.c:1126 msgid "Logs end of a session, including duration." msgstr "Schreibt jedes Verbindungsende mit Sitzungszeit in den Log." -#: utils/misc/guc.c:1076 +#: utils/misc/guc.c:1135 msgid "Logs each replication command." msgstr "Schreibt jeden Replikationsbefehl in den Log." -#: utils/misc/guc.c:1085 +#: utils/misc/guc.c:1144 msgid "Shows whether the running server has assertion checks enabled." msgstr "Zeigt, ob der laufende Server Assertion-Prüfungen aktiviert hat." -#: utils/misc/guc.c:1100 +#: utils/misc/guc.c:1159 msgid "Terminate session on any error." msgstr "Sitzung bei jedem Fehler abbrechen." -#: utils/misc/guc.c:1109 +#: utils/misc/guc.c:1168 msgid "Reinitialize server after backend crash." msgstr "Server nach Absturz eines Serverprozesses reinitialisieren." -#: utils/misc/guc.c:1119 +#: utils/misc/guc.c:1178 msgid "Logs the duration of each completed SQL statement." msgstr "Loggt die Dauer jeder abgeschlossenen SQL-Anweisung." -#: utils/misc/guc.c:1128 +#: utils/misc/guc.c:1187 msgid "Logs each query's parse tree." msgstr "Scheibt den Parsebaum jeder Anfrage in den Log." -#: utils/misc/guc.c:1137 +#: utils/misc/guc.c:1196 msgid "Logs each query's rewritten parse tree." msgstr "Schreibt den umgeschriebenen Parsebaum jeder Anfrage in den Log." -#: utils/misc/guc.c:1146 +#: utils/misc/guc.c:1205 msgid "Logs each query's execution plan." msgstr "Schreibt den Ausführungsplan jeder Anfrage in den Log." -#: utils/misc/guc.c:1155 +#: utils/misc/guc.c:1214 msgid "Indents parse and plan tree displays." msgstr "Rückt die Anzeige von Parse- und Planbäumen ein." -#: utils/misc/guc.c:1164 +#: utils/misc/guc.c:1223 msgid "Writes parser performance statistics to the server log." msgstr "Schreibt Parser-Leistungsstatistiken in den Serverlog." -#: utils/misc/guc.c:1173 +#: utils/misc/guc.c:1232 msgid "Writes planner performance statistics to the server log." msgstr "Schreibt Planer-Leistungsstatistiken in den Serverlog." -#: utils/misc/guc.c:1182 +#: utils/misc/guc.c:1241 msgid "Writes executor performance statistics to the server log." msgstr "Schreibt Executor-Leistungsstatistiken in den Serverlog." -#: utils/misc/guc.c:1191 +#: utils/misc/guc.c:1250 msgid "Writes cumulative performance statistics to the server log." msgstr "Schreibt Gesamtleistungsstatistiken in den Serverlog." -#: utils/misc/guc.c:1201 +#: utils/misc/guc.c:1260 msgid "Logs system resource usage statistics (memory and CPU) on various B-tree operations." msgstr "Loggt Statistiken über Systemressourcen (Speicher und CPU) während diverser B-Baum-Operationen." -#: utils/misc/guc.c:1213 +#: utils/misc/guc.c:1272 msgid "Collects information about executing commands." msgstr "Sammelt Informationen über ausgeführte Befehle." -#: utils/misc/guc.c:1214 +#: utils/misc/guc.c:1273 msgid "Enables the collection of information on the currently executing command of each session, along with the time at which that command began execution." msgstr "Schaltet die Sammlung von Informationen über den aktuell ausgeführten Befehl jeder Sitzung ein, einschließlich der Zeit, and dem die Befehlsausführung begann." -#: utils/misc/guc.c:1224 +#: utils/misc/guc.c:1283 msgid "Collects statistics on database activity." msgstr "Sammelt Statistiken über Datenbankaktivität." -#: utils/misc/guc.c:1233 +#: utils/misc/guc.c:1292 msgid "Collects timing statistics for database I/O activity." msgstr "Sammelt Zeitmessungsstatistiken über Datenbank-I/O-Aktivität." -#: utils/misc/guc.c:1243 +#: utils/misc/guc.c:1302 msgid "Updates the process title to show the active SQL command." msgstr "Der Prozesstitel wird aktualisiert, um den aktuellen SQL-Befehl anzuzeigen." -#: utils/misc/guc.c:1244 +#: utils/misc/guc.c:1303 msgid "Enables updating of the process title every time a new SQL command is received by the server." msgstr "Ermöglicht das Aktualisieren des Prozesstitels bei jedem von Server empfangenen neuen SQL-Befehl." -#: utils/misc/guc.c:1257 +#: utils/misc/guc.c:1316 msgid "Starts the autovacuum subprocess." msgstr "Startet den Autovacuum-Prozess." -#: utils/misc/guc.c:1267 +#: utils/misc/guc.c:1326 msgid "Generates debugging output for LISTEN and NOTIFY." msgstr "Erzeugt Debug-Ausgabe für LISTEN und NOTIFY." -#: utils/misc/guc.c:1279 +#: utils/misc/guc.c:1338 msgid "Emits information about lock usage." msgstr "Gibt Informationen über Sperrenverwendung aus." -#: utils/misc/guc.c:1289 +#: utils/misc/guc.c:1348 msgid "Emits information about user lock usage." msgstr "Gibt Informationen über Benutzersperrenverwendung aus." -#: utils/misc/guc.c:1299 +#: utils/misc/guc.c:1358 msgid "Emits information about lightweight lock usage." msgstr "Gibt Informationen über die Verwendung von Lightweight Locks aus." -#: utils/misc/guc.c:1309 +#: utils/misc/guc.c:1368 msgid "Dumps information about all current locks when a deadlock timeout occurs." msgstr "Gibt Informationen über alle aktuellen Sperren aus, wenn eine Verklemmung auftritt." -#: utils/misc/guc.c:1321 +#: utils/misc/guc.c:1380 msgid "Logs long lock waits." msgstr "Schreibt Meldungen über langes Warten auf Sperren in den Log." -#: utils/misc/guc.c:1331 +#: utils/misc/guc.c:1390 msgid "Logs the host name in the connection logs." msgstr "Schreibt den Hostnamen jeder Verbindung in den Log." -#: utils/misc/guc.c:1332 +#: utils/misc/guc.c:1391 msgid "By default, connection logs only show the IP address of the connecting host. If you want them to show the host name you can turn this on, but depending on your host name resolution setup it might impose a non-negligible performance penalty." msgstr "In der Standardeinstellung zeigen die Verbindungslogs nur die IP-Adresse der Clienthosts. Wenn Sie den Hostnamen auch anzeigen wollen, dann können Sie diese Option anschalten, aber je nachdem, wie Ihr DNS eingerichtet ist, kann das die Leistung nicht unerheblich beeinträchtigen." -#: utils/misc/guc.c:1343 +#: utils/misc/guc.c:1402 msgid "Treats \"expr=NULL\" as \"expr IS NULL\"." msgstr "Behandelt »ausdruck=NULL« als »ausdruck IS NULL«." -#: utils/misc/guc.c:1344 +#: utils/misc/guc.c:1403 msgid "When turned on, expressions of the form expr = NULL (or NULL = expr) are treated as expr IS NULL, that is, they return true if expr evaluates to the null value, and false otherwise. The correct behavior of expr = NULL is to always return null (unknown)." msgstr "Wenn an, dann werden Ausdrücke der Form ausdruck = NULL (oder NULL = ausdruck) wie ausdruck IS NULL behandelt, das heißt, sie ergeben wahr, wenn das Ergebnis von ausdruck der NULL-Wert ist, und ansonsten falsch. Das korrekte Verhalten von ausdruck = NULL ist immer den NULL-Wert (für unbekannt) zurückzugeben." -#: utils/misc/guc.c:1356 +#: utils/misc/guc.c:1415 msgid "Enables per-database user names." msgstr "Ermöglicht Datenbank-lokale Benutzernamen." -#: utils/misc/guc.c:1365 +#: utils/misc/guc.c:1424 msgid "Sets the default read-only status of new transactions." msgstr "Setzt den Standardwert für die Read-Only-Einstellung einer neuen Transaktion." -#: utils/misc/guc.c:1374 +#: utils/misc/guc.c:1433 msgid "Sets the current transaction's read-only status." msgstr "Setzt die Read-Only-Einstellung der aktuellen Transaktion." -#: utils/misc/guc.c:1384 +#: utils/misc/guc.c:1443 msgid "Sets the default deferrable status of new transactions." msgstr "Setzt den Standardwert für die Deferrable-Einstellung einer neuen Transaktion." -#: utils/misc/guc.c:1393 +#: utils/misc/guc.c:1452 msgid "Whether to defer a read-only serializable transaction until it can be executed with no possible serialization failures." msgstr "Ob eine serialisierbare Read-Only-Transaktion aufgeschoben werden soll, bis sie ohne mögliche Serialisierungsfehler ausgeführt werden kann." -#: utils/misc/guc.c:1403 +#: utils/misc/guc.c:1462 msgid "Enable row security." msgstr "Schaltet Sicherheit auf Zeilenebene ein." -#: utils/misc/guc.c:1404 +#: utils/misc/guc.c:1463 msgid "When enabled, row security will be applied to all users." msgstr "Wenn eingeschaltet, wird Sicherheit auf Zeilenebene auf alle Benutzer angewendet." -#: utils/misc/guc.c:1412 +#: utils/misc/guc.c:1471 msgid "Check function bodies during CREATE FUNCTION." msgstr "Prüft Funktionskörper bei der Ausführung von CREATE FUNCTION." -#: utils/misc/guc.c:1421 +#: utils/misc/guc.c:1480 msgid "Enable input of NULL elements in arrays." msgstr "Ermöglicht die Eingabe von NULL-Elementen in Arrays." -#: utils/misc/guc.c:1422 +#: utils/misc/guc.c:1481 msgid "When turned on, unquoted NULL in an array input value means a null value; otherwise it is taken literally." msgstr "Wenn dies eingeschaltet ist, wird ein nicht gequotetes NULL in einem Array-Eingabewert als NULL-Wert interpretiert, ansonsten als Zeichenkette." -#: utils/misc/guc.c:1432 +#: utils/misc/guc.c:1491 msgid "Create new tables with OIDs by default." msgstr "Erzeugt neue Tabellen standardmäßig mit OIDs." -#: utils/misc/guc.c:1441 +#: utils/misc/guc.c:1500 msgid "Start a subprocess to capture stderr output and/or csvlogs into log files." msgstr "Startet einen Subprozess, um die Stderr-Ausgabe und/oder CSV-Logs in Logdateien auszugeben." -#: utils/misc/guc.c:1450 +#: utils/misc/guc.c:1509 msgid "Truncate existing log files of same name during log rotation." msgstr "Kürzt existierende Logdateien mit dem selben Namen beim Rotieren." -#: utils/misc/guc.c:1461 +#: utils/misc/guc.c:1520 msgid "Emit information about resource usage in sorting." msgstr "Gibt Informationen über die Ressourcenverwendung beim Sortieren aus." -#: utils/misc/guc.c:1475 +#: utils/misc/guc.c:1534 msgid "Generate debugging output for synchronized scanning." msgstr "Erzeugt Debug-Ausgabe für synchronisiertes Scannen." -#: utils/misc/guc.c:1490 +#: utils/misc/guc.c:1549 msgid "Enable bounded sorting using heap sort." msgstr "Ermöglicht Bounded Sorting mittels Heap-Sort." -#: utils/misc/guc.c:1503 +#: utils/misc/guc.c:1562 msgid "Emit WAL-related debugging output." msgstr "Gibt diverse Debug-Meldungen über WAL aus." -#: utils/misc/guc.c:1515 +#: utils/misc/guc.c:1574 msgid "Datetimes are integer based." msgstr "Datum/Zeit verwendet intern ganze Zahlen." -#: utils/misc/guc.c:1526 +#: utils/misc/guc.c:1585 msgid "Sets whether Kerberos and GSSAPI user names should be treated as case-insensitive." msgstr "Bestimmt, ob Groß-/Kleinschreibung bei Kerberos- und GSSAPI-Benutzernamen ignoriert werden soll." -#: utils/misc/guc.c:1536 +#: utils/misc/guc.c:1595 msgid "Warn about backslash escapes in ordinary string literals." msgstr "Warnt bei Backslash-Escapes in normalen Zeichenkettenkonstanten." -#: utils/misc/guc.c:1546 +#: utils/misc/guc.c:1605 msgid "Causes '...' strings to treat backslashes literally." msgstr "Bewirkt, dass Zeichenketten der Art '...' Backslashes als normales Zeichen behandeln." -#: utils/misc/guc.c:1557 +#: utils/misc/guc.c:1616 msgid "Enable synchronized sequential scans." msgstr "Ermöglicht synchronisierte sequenzielle Scans." -#: utils/misc/guc.c:1567 +#: utils/misc/guc.c:1626 msgid "Allows connections and queries during recovery." msgstr "Erlaubt Verbindungen und Anfragen während der Wiederherstellung." -#: utils/misc/guc.c:1577 +#: utils/misc/guc.c:1636 msgid "Allows feedback from a hot standby to the primary that will avoid query conflicts." msgstr "Erlaubt Rückmeldungen von einem Hot Standby an den Primärserver, um Anfragekonflikte zu vermeiden." -#: utils/misc/guc.c:1587 +#: utils/misc/guc.c:1646 msgid "Allows modifications of the structure of system tables." msgstr "Erlaubt Änderungen an der Struktur von Systemtabellen." -#: utils/misc/guc.c:1598 +#: utils/misc/guc.c:1657 msgid "Disables reading from system indexes." msgstr "Schaltet das Lesen aus Systemindexen ab." -#: utils/misc/guc.c:1599 +#: utils/misc/guc.c:1658 msgid "It does not prevent updating the indexes, so it is safe to use. The worst consequence is slowness." msgstr "Das Aktualisieren der Indexe wird nicht verhindert, also ist die Verwendung unbedenklich. Schlimmstenfalls wird alles langsamer." -#: utils/misc/guc.c:1610 +#: utils/misc/guc.c:1669 msgid "Enables backward compatibility mode for privilege checks on large objects." msgstr "Schaltet den rückwärtskompatiblen Modus für Privilegienprüfungen bei Large Objects ein." -#: utils/misc/guc.c:1611 +#: utils/misc/guc.c:1670 msgid "Skips privilege checks when reading or modifying large objects, for compatibility with PostgreSQL releases prior to 9.0." msgstr "Überspringt Privilegienprüfungen beim Lesen oder Ändern von Large Objects, zur Kompatibilität mit PostgreSQL-Versionen vor 9.0." -#: utils/misc/guc.c:1621 +#: utils/misc/guc.c:1680 msgid "Emit a warning for constructs that changed meaning since PostgreSQL 9.4." msgstr "Warnung ausgeben für Konstrukte, deren Bedeutung sich seit PostgreSQL 9.4 geändert hat." -#: utils/misc/guc.c:1631 +#: utils/misc/guc.c:1690 msgid "When generating SQL fragments, quote all identifiers." msgstr "Wenn SQL-Fragmente erzeugt werden, alle Bezeichner quoten." -#: utils/misc/guc.c:1641 +#: utils/misc/guc.c:1700 msgid "Shows whether data checksums are turned on for this cluster." msgstr "Zeigt, ob Datenprüfsummen in diesem Cluster angeschaltet sind." -#: utils/misc/guc.c:1652 +#: utils/misc/guc.c:1711 msgid "Add sequence number to syslog messages to avoid duplicate suppression." msgstr "Syslog-Nachrichten mit Sequenznummern versehen, um Unterdrückung doppelter Nachrichten zu unterbinden." -#: utils/misc/guc.c:1662 +#: utils/misc/guc.c:1721 msgid "Split messages sent to syslog by lines and to fit into 1024 bytes." msgstr "An Syslog gesendete Nachrichten nach Zeilen und in maximal 1024 Bytes aufteilen." -#: utils/misc/guc.c:1681 +#: utils/misc/guc.c:1731 +msgid "Controls whether Gather and Gather Merge also run subplans." +msgstr "" + +#: utils/misc/guc.c:1732 +msgid "Should gather nodes also run subplans, or just gather tuples?" +msgstr "" + +#: utils/misc/guc.c:1741 +msgid "Allow JIT compilation." +msgstr "Erlaubt JIT-Kompilierung." + +#: utils/misc/guc.c:1751 +msgid "Register JIT compiled function with debugger." +msgstr "" + +#: utils/misc/guc.c:1768 +msgid "Write out LLVM bitcode to facilitate JIT debugging." +msgstr "" + +#: utils/misc/guc.c:1779 +msgid "Allow JIT compilation of expressions." +msgstr "Erlaubt JIT-Kompilierung von Ausdrücken." + +#: utils/misc/guc.c:1790 +msgid "Register JIT compiled function with perf profiler." +msgstr "" + +#: utils/misc/guc.c:1807 +msgid "Allow JIT compilation of tuple deforming." +msgstr "" + +#: utils/misc/guc.c:1827 msgid "Forces a switch to the next WAL file if a new file has not been started within N seconds." msgstr "Erzwingt das Umschalten zur nächsten WAL-Datei, wenn seit N Sekunden keine neue Datei begonnen worden ist." -#: utils/misc/guc.c:1692 +#: utils/misc/guc.c:1838 msgid "Waits N seconds on connection startup after authentication." msgstr "Wartet beim Starten einer Verbindung N Sekunden nach der Authentifizierung." -#: utils/misc/guc.c:1693 utils/misc/guc.c:2216 +#: utils/misc/guc.c:1839 utils/misc/guc.c:2390 msgid "This allows attaching a debugger to the process." msgstr "Das ermöglicht es, einen Debugger in den Prozess einzuhängen." -#: utils/misc/guc.c:1702 +#: utils/misc/guc.c:1848 msgid "Sets the default statistics target." msgstr "Setzt das voreingestellte Statistikziel." -#: utils/misc/guc.c:1703 +#: utils/misc/guc.c:1849 msgid "This applies to table columns that have not had a column-specific target set via ALTER TABLE SET STATISTICS." msgstr "Diese Einstellung gilt für Tabellenspalten, für die kein spaltenspezifisches Ziel mit ALTER TABLE SET STATISTICS gesetzt worden ist." -#: utils/misc/guc.c:1712 +#: utils/misc/guc.c:1858 msgid "Sets the FROM-list size beyond which subqueries are not collapsed." msgstr "Setzt die Größe der FROM-Liste, ab der Unteranfragen nicht kollabiert werden." -#: utils/misc/guc.c:1714 +#: utils/misc/guc.c:1860 msgid "The planner will merge subqueries into upper queries if the resulting FROM list would have no more than this many items." msgstr "Der Planer bindet Unteranfragen in die übergeordneten Anfragen ein, wenn die daraus resultierende FROM-Liste nicht mehr als so viele Elemente haben würde." -#: utils/misc/guc.c:1724 +#: utils/misc/guc.c:1870 msgid "Sets the FROM-list size beyond which JOIN constructs are not flattened." msgstr "Setzt die Größe der FROM-Liste, ab der JOIN-Konstrukte nicht aufgelöst werden." -#: utils/misc/guc.c:1726 +#: utils/misc/guc.c:1872 msgid "The planner will flatten explicit JOIN constructs into lists of FROM items whenever a list of no more than this many items would result." msgstr "Der Planer löst ausdrückliche JOIN-Konstrukte in FROM-Listen auf, wenn die daraus resultierende FROM-Liste nicht mehr als so viele Elemente haben würde." -#: utils/misc/guc.c:1736 +#: utils/misc/guc.c:1882 msgid "Sets the threshold of FROM items beyond which GEQO is used." msgstr "Setzt die Anzahl der Elemente in der FROM-Liste, ab der GEQO verwendet wird." -#: utils/misc/guc.c:1745 +#: utils/misc/guc.c:1891 msgid "GEQO: effort is used to set the default for other GEQO parameters." msgstr "GEQO: wird für die Berechnung der Vorgabewerte anderer GEQO-Parameter verwendet." -#: utils/misc/guc.c:1754 +#: utils/misc/guc.c:1900 msgid "GEQO: number of individuals in the population." msgstr "GEQO: Anzahl der Individien in der Bevölkerung." -#: utils/misc/guc.c:1755 utils/misc/guc.c:1764 +#: utils/misc/guc.c:1901 utils/misc/guc.c:1910 msgid "Zero selects a suitable default value." msgstr "Null wählt einen passenden Vorgabewert." -#: utils/misc/guc.c:1763 +#: utils/misc/guc.c:1909 msgid "GEQO: number of iterations of the algorithm." msgstr "GEQO: Anzahl der Iterationen im Algorithmus." -#: utils/misc/guc.c:1774 +#: utils/misc/guc.c:1920 msgid "Sets the time to wait on a lock before checking for deadlock." msgstr "Setzt die Zeit, die gewartet wird, bis auf Verklemmung geprüft wird." -#: utils/misc/guc.c:1785 +#: utils/misc/guc.c:1931 msgid "Sets the maximum delay before canceling queries when a hot standby server is processing archived WAL data." msgstr "Setzt die maximale Verzögerung bevor Anfragen storniert werden, wenn ein Hot-Standby-Server archivierte WAL-Daten verarbeitet." -#: utils/misc/guc.c:1796 +#: utils/misc/guc.c:1942 msgid "Sets the maximum delay before canceling queries when a hot standby server is processing streamed WAL data." msgstr "Setzt die maximale Verzögerung bevor Anfragen storniert werden, wenn ein Hot-Standby-Server gestreamte WAL-Daten verarbeitet." -#: utils/misc/guc.c:1807 +#: utils/misc/guc.c:1953 msgid "Sets the maximum interval between WAL receiver status reports to the primary." msgstr "Setzt das maximale Intervall zwischen Statusberichten des WAL-Receivers an den Primärserver." -#: utils/misc/guc.c:1818 +#: utils/misc/guc.c:1964 msgid "Sets the maximum wait time to receive data from the primary." msgstr "Setzt die maximale Zeit, um auf den Empfang von Daten vom Primärserver zu warten." -#: utils/misc/guc.c:1829 +#: utils/misc/guc.c:1975 msgid "Sets the maximum number of concurrent connections." msgstr "Setzt die maximale Anzahl gleichzeitiger Verbindungen." -#: utils/misc/guc.c:1839 +#: utils/misc/guc.c:1986 msgid "Sets the number of connection slots reserved for superusers." msgstr "Setzt die Anzahl der für Superuser reservierten Verbindungen." -#: utils/misc/guc.c:1853 +#: utils/misc/guc.c:2000 msgid "Sets the number of shared memory buffers used by the server." msgstr "Setzt die Anzahl der vom Server verwendeten Shared-Memory-Puffer." -#: utils/misc/guc.c:1864 +#: utils/misc/guc.c:2011 msgid "Sets the maximum number of temporary buffers used by each session." msgstr "Setzt die maximale Anzahl der von jeder Sitzung verwendeten temporären Puffer." -#: utils/misc/guc.c:1875 +#: utils/misc/guc.c:2022 msgid "Sets the TCP port the server listens on." msgstr "Setzt den TCP-Port, auf dem der Server auf Verbindungen wartet." -#: utils/misc/guc.c:1885 +#: utils/misc/guc.c:2032 msgid "Sets the access permissions of the Unix-domain socket." msgstr "Setzt die Zugriffsrechte für die Unix-Domain-Socket." -#: utils/misc/guc.c:1886 +#: utils/misc/guc.c:2033 msgid "Unix-domain sockets use the usual Unix file system permission set. The parameter value is expected to be a numeric mode specification in the form accepted by the chmod and umask system calls. (To use the customary octal format the number must start with a 0 (zero).)" msgstr "Unix-Domain-Sockets verwenden die üblichen Zugriffsrechte für Unix-Dateisysteme. Der Wert dieser Option muss ein numerischer Wert in der von den Systemaufrufen chmod und umask verwendeten Form sein. (Um das gebräuchliche Oktalformat zu verwenden, muss die Zahl mit 0 (einer Null) anfangen.)" -#: utils/misc/guc.c:1900 +#: utils/misc/guc.c:2047 msgid "Sets the file permissions for log files." msgstr "Setzt die Dateizugriffsrechte für Logdateien." -#: utils/misc/guc.c:1901 +#: utils/misc/guc.c:2048 msgid "The parameter value is expected to be a numeric mode specification in the form accepted by the chmod and umask system calls. (To use the customary octal format the number must start with a 0 (zero).)" msgstr "Der Wert dieser Option muss ein numerischer Wert in der von den Systemaufrufen chmod und umask verwendeten Form sein. (Um das gebräuchliche Oktalformat zu verwenden, muss die Zahl mit 0 (einer Null) anfangen.)" -#: utils/misc/guc.c:1914 +#: utils/misc/guc.c:2062 +#, fuzzy +#| msgid "Sets the server's data directory." +msgid "Mode of the data directory." +msgstr "Setzt das Datenverzeichnis des Servers." + +#: utils/misc/guc.c:2063 +#, fuzzy +#| msgid "The parameter value is expected to be a numeric mode specification in the form accepted by the chmod and umask system calls. (To use the customary octal format the number must start with a 0 (zero).)" +msgid "The parameter value is a numeric mode specification in the form accepted by the chmod and umask system calls. (To use the customary octal format the number must start with a 0 (zero).)" +msgstr "Der Wert dieser Option muss ein numerischer Wert in der von den Systemaufrufen chmod und umask verwendeten Form sein. (Um das gebräuchliche Oktalformat zu verwenden, muss die Zahl mit 0 (einer Null) anfangen.)" + +#: utils/misc/guc.c:2076 msgid "Sets the maximum memory to be used for query workspaces." msgstr "Setzt die maximale Speichergröße für Anfrage-Arbeitsbereiche." -#: utils/misc/guc.c:1915 +#: utils/misc/guc.c:2077 msgid "This much memory can be used by each internal sort operation and hash table before switching to temporary disk files." msgstr "Gibt die Speichermenge an, die für interne Sortiervorgänge und Hashtabellen verwendet werden kann, bevor auf temporäre Dateien umgeschaltet wird." -#: utils/misc/guc.c:1927 +#: utils/misc/guc.c:2089 msgid "Sets the maximum memory to be used for maintenance operations." msgstr "Setzt die maximale Speichergröße für Wartungsoperationen." -#: utils/misc/guc.c:1928 +#: utils/misc/guc.c:2090 msgid "This includes operations such as VACUUM and CREATE INDEX." msgstr "Das schließt Operationen wie VACUUM und CREATE INDEX ein." -#: utils/misc/guc.c:1938 -msgid "Sets the maximum number of tuples to be sorted using replacement selection." -msgstr "Setzt die maximale Anzahl Tupel, die mit Replacement-Selection sortiert werden." - -#: utils/misc/guc.c:1939 -msgid "When more tuples than this are present, quicksort will be used." -msgstr "Wenn mehr Tupel als dieser Wert vorhanden sind, dann wird Quicksort verwendet." - -#: utils/misc/guc.c:1953 +#: utils/misc/guc.c:2105 msgid "Sets the maximum stack depth, in kilobytes." msgstr "Setzt die maximale Stackgröße, in Kilobytes." -#: utils/misc/guc.c:1964 +#: utils/misc/guc.c:2116 msgid "Limits the total size of all temporary files used by each process." msgstr "Beschränkt die Gesamtgröße aller temporären Dateien, die von einem Prozess verwendet werden." -#: utils/misc/guc.c:1965 +#: utils/misc/guc.c:2117 msgid "-1 means no limit." msgstr "-1 bedeutet keine Grenze." -#: utils/misc/guc.c:1975 +#: utils/misc/guc.c:2127 msgid "Vacuum cost for a page found in the buffer cache." msgstr "Vacuum-Kosten für eine im Puffer-Cache gefundene Seite." -#: utils/misc/guc.c:1985 +#: utils/misc/guc.c:2137 msgid "Vacuum cost for a page not found in the buffer cache." msgstr "Vacuum-Kosten für eine nicht im Puffer-Cache gefundene Seite." -#: utils/misc/guc.c:1995 +#: utils/misc/guc.c:2147 msgid "Vacuum cost for a page dirtied by vacuum." msgstr "Vacuum-Kosten für eine durch Vacuum schmutzig gemachte Seite." -#: utils/misc/guc.c:2005 +#: utils/misc/guc.c:2157 msgid "Vacuum cost amount available before napping." msgstr "Verfügbare Vacuum-Kosten vor Nickerchen." -#: utils/misc/guc.c:2015 +#: utils/misc/guc.c:2167 msgid "Vacuum cost delay in milliseconds." msgstr "Vacuum-Kosten-Verzögerung in Millisekunden." -#: utils/misc/guc.c:2026 +#: utils/misc/guc.c:2178 msgid "Vacuum cost delay in milliseconds, for autovacuum." msgstr "Vacuum-Kosten-Verzögerung in Millisekunden, für Autovacuum." -#: utils/misc/guc.c:2037 +#: utils/misc/guc.c:2189 msgid "Vacuum cost amount available before napping, for autovacuum." msgstr "Verfügbare Vacuum-Kosten vor Nickerchen, für Autovacuum." -#: utils/misc/guc.c:2047 +#: utils/misc/guc.c:2199 msgid "Sets the maximum number of simultaneously open files for each server process." msgstr "Setzt die maximale Zahl gleichzeitig geöffneter Dateien für jeden Serverprozess." -#: utils/misc/guc.c:2060 +#: utils/misc/guc.c:2212 msgid "Sets the maximum number of simultaneously prepared transactions." msgstr "Setzt die maximale Anzahl von gleichzeitig vorbereiteten Transaktionen." -#: utils/misc/guc.c:2071 +#: utils/misc/guc.c:2223 msgid "Sets the minimum OID of tables for tracking locks." msgstr "Setzt die minimale Tabellen-OID für das Verfolgen von Sperren." -#: utils/misc/guc.c:2072 +#: utils/misc/guc.c:2224 msgid "Is used to avoid output on system tables." msgstr "Wird verwendet, um Ausgabe für Systemtabellen zu vermeiden." -#: utils/misc/guc.c:2081 +#: utils/misc/guc.c:2233 msgid "Sets the OID of the table with unconditionally lock tracing." msgstr "Setzt die OID der Tabelle mit bedingungsloser Sperrenverfolgung." -#: utils/misc/guc.c:2093 +#: utils/misc/guc.c:2245 msgid "Sets the maximum allowed duration of any statement." msgstr "Setzt die maximal erlaubte Dauer jeder Anweisung." -#: utils/misc/guc.c:2094 utils/misc/guc.c:2105 utils/misc/guc.c:2116 +#: utils/misc/guc.c:2246 utils/misc/guc.c:2257 utils/misc/guc.c:2268 msgid "A value of 0 turns off the timeout." msgstr "Der Wert 0 schaltet die Zeitprüfung aus." -#: utils/misc/guc.c:2104 +#: utils/misc/guc.c:2256 msgid "Sets the maximum allowed duration of any wait for a lock." msgstr "Setzt die maximal erlaubte Dauer, um auf eine Sperre zu warten." -#: utils/misc/guc.c:2115 +#: utils/misc/guc.c:2267 msgid "Sets the maximum allowed duration of any idling transaction." msgstr "Setzt die maximal erlaubte Dauer einer inaktiven Transaktion." -#: utils/misc/guc.c:2126 +#: utils/misc/guc.c:2278 msgid "Minimum age at which VACUUM should freeze a table row." msgstr "Mindestalter, bei dem VACUUM eine Tabellenzeile einfrieren soll." -#: utils/misc/guc.c:2136 +#: utils/misc/guc.c:2288 msgid "Age at which VACUUM should scan whole table to freeze tuples." msgstr "Alter, bei dem VACUUM die ganze Tabelle durchsuchen soll, um Zeilen einzufrieren." -#: utils/misc/guc.c:2146 +#: utils/misc/guc.c:2298 msgid "Minimum age at which VACUUM should freeze a MultiXactId in a table row." msgstr "Mindestalter, bei dem VACUUM eine MultiXactId in einer Tabellenzeile einfrieren soll." -#: utils/misc/guc.c:2156 +#: utils/misc/guc.c:2308 msgid "Multixact age at which VACUUM should scan whole table to freeze tuples." msgstr "Multixact-Alter, bei dem VACUUM die ganze Tabelle durchsuchen soll, um Zeilen einzufrieren." -#: utils/misc/guc.c:2166 +#: utils/misc/guc.c:2318 msgid "Number of transactions by which VACUUM and HOT cleanup should be deferred, if any." msgstr "Anzahl Transaktionen, um die VACUUM- und HOT-Aufräumen aufgeschoben werden soll." -#: utils/misc/guc.c:2179 +#: utils/misc/guc.c:2331 msgid "Sets the maximum number of locks per transaction." msgstr "Setzt die maximale Anzahl Sperren pro Transaktion." -#: utils/misc/guc.c:2180 +#: utils/misc/guc.c:2332 msgid "The shared lock table is sized on the assumption that at most max_locks_per_transaction * max_connections distinct objects will need to be locked at any one time." msgstr "Die globale Sperrentabelle wird mit der Annahme angelegt, das höchstens max_locks_per_transaction * max_connections verschiedene Objekte gleichzeitig gesperrt werden müssen." -#: utils/misc/guc.c:2191 +#: utils/misc/guc.c:2343 msgid "Sets the maximum number of predicate locks per transaction." msgstr "Setzt die maximale Anzahl Prädikatsperren pro Transaktion." -#: utils/misc/guc.c:2192 +#: utils/misc/guc.c:2344 msgid "The shared predicate lock table is sized on the assumption that at most max_pred_locks_per_transaction * max_connections distinct objects will need to be locked at any one time." msgstr "Die globale Prädikatsperrentabelle wird mit der Annahme angelegt, das höchstens max_pred_locks_per_transaction * max_connections verschiedene Objekte gleichzeitig gesperrt werden müssen." -#: utils/misc/guc.c:2203 +#: utils/misc/guc.c:2355 +msgid "Sets the maximum number of predicate-locked pages and tuples per relation." +msgstr "Setzt die maximale Anzahl Prädikatsperren für Seiten und Tupel pro Relation." + +#: utils/misc/guc.c:2356 +msgid "If more than this total of pages and tuples in the same relation are locked by a connection, those locks are replaced by a relation-level lock." +msgstr "Wenn mehr als diese Gesamtzahl Seiten und Tupel in der selben Relation von einer Verbindung gesperrt sind, werden diese Sperren durch eine Sperre auf Relationsebene ersetzt." + +#: utils/misc/guc.c:2366 +msgid "Sets the maximum number of predicate-locked tuples per page." +msgstr "Setzt die maximale Anzahl Prädikatsperren für Tupel pro Seite." + +#: utils/misc/guc.c:2367 +msgid "If more than this number of tuples on the same page are locked by a connection, those locks are replaced by a page-level lock." +msgstr "Wenn mehr als diese Anzahl Tupel auf der selben Seite von einer Verbindung gesperrt sind, werden diese Sperren durch eine Sperre auf Seitenebene ersetzt." + +#: utils/misc/guc.c:2377 msgid "Sets the maximum allowed time to complete client authentication." msgstr "Setzt die maximale Zeit, um die Client-Authentifizierung zu beenden." -#: utils/misc/guc.c:2215 +#: utils/misc/guc.c:2389 msgid "Waits N seconds on connection startup before authentication." msgstr "Wartet beim Starten einer Verbindung N Sekunden vor der Authentifizierung." -#: utils/misc/guc.c:2226 +#: utils/misc/guc.c:2400 msgid "Sets the number of WAL files held for standby servers." msgstr "Setzt die maximale Anzahl der für Standby-Server vorgehaltenen WAL-Dateien." -#: utils/misc/guc.c:2236 +#: utils/misc/guc.c:2410 msgid "Sets the minimum size to shrink the WAL to." msgstr "Setzt die minimale Größe, auf die der WAL geschrumpft wird." -#: utils/misc/guc.c:2247 +#: utils/misc/guc.c:2422 msgid "Sets the WAL size that triggers a checkpoint." msgstr "Setzt die WAL-Größe, die einen Checkpoint auslöst." -#: utils/misc/guc.c:2258 +#: utils/misc/guc.c:2434 msgid "Sets the maximum time between automatic WAL checkpoints." msgstr "Setzt die maximale Zeit zwischen automatischen WAL-Checkpoints." -#: utils/misc/guc.c:2269 +#: utils/misc/guc.c:2445 msgid "Enables warnings if checkpoint segments are filled more frequently than this." msgstr "Schreibt eine Logmeldung, wenn Checkpoint-Segmente häufiger als dieser Wert gefüllt werden." -#: utils/misc/guc.c:2271 +#: utils/misc/guc.c:2447 msgid "Write a message to the server log if checkpoints caused by the filling of checkpoint segment files happens more frequently than this number of seconds. Zero turns off the warning." msgstr "Schreibe Meldung in den Serverlog, wenn Checkpoints, die durch Füllen der Checkpoint-Segmente ausgelöst werden, häufiger als dieser Wert in Sekunden passieren. Null schaltet die Warnung ab." -#: utils/misc/guc.c:2283 utils/misc/guc.c:2440 utils/misc/guc.c:2467 +#: utils/misc/guc.c:2459 utils/misc/guc.c:2616 utils/misc/guc.c:2643 msgid "Number of pages after which previously performed writes are flushed to disk." msgstr "Anzahl der Seiten, nach denen getätigte Schreibvorgänge auf die Festplatte zurückgeschrieben werden." -#: utils/misc/guc.c:2294 +#: utils/misc/guc.c:2470 msgid "Sets the number of disk-page buffers in shared memory for WAL." msgstr "Setzt die Anzahl Diskseitenpuffer für WAL im Shared Memory." -#: utils/misc/guc.c:2305 +#: utils/misc/guc.c:2481 msgid "Time between WAL flushes performed in the WAL writer." msgstr "Zeit zwischen WAL-Flush-Operationen im WAL-Writer." -#: utils/misc/guc.c:2316 +#: utils/misc/guc.c:2492 msgid "Amount of WAL written out by WAL writer that triggers a flush." msgstr "Ein Flush wird ausgelöst, wenn diese Menge WAL vom WAL-Writer geschrieben worden ist." -#: utils/misc/guc.c:2328 +#: utils/misc/guc.c:2504 msgid "Sets the maximum number of simultaneously running WAL sender processes." msgstr "Setzt die maximale Anzahl gleichzeitig laufender WAL-Sender-Prozesse." -#: utils/misc/guc.c:2339 +#: utils/misc/guc.c:2515 msgid "Sets the maximum number of simultaneously defined replication slots." msgstr "Setzt die maximale Anzahl von gleichzeitig definierten Replikations-Slots." -#: utils/misc/guc.c:2349 +#: utils/misc/guc.c:2525 msgid "Sets the maximum time to wait for WAL replication." msgstr "Setzt die maximale Zeit, um auf WAL-Replikation zu warten." -#: utils/misc/guc.c:2360 +#: utils/misc/guc.c:2536 msgid "Sets the delay in microseconds between transaction commit and flushing WAL to disk." msgstr "Setzt die Verzögerung in Millisekunden zwischen Transaktionsabschluss und dem Schreiben von WAL auf die Festplatte." -#: utils/misc/guc.c:2372 +#: utils/misc/guc.c:2548 msgid "Sets the minimum concurrent open transactions before performing commit_delay." msgstr "Setzt die minimale Anzahl gleichzeitig offener Transaktionen bevor »commit_delay« angewendet wird." -#: utils/misc/guc.c:2383 +#: utils/misc/guc.c:2559 msgid "Sets the number of digits displayed for floating-point values." msgstr "Setzt die Anzahl ausgegebener Ziffern für Fließkommawerte." -#: utils/misc/guc.c:2384 +#: utils/misc/guc.c:2560 msgid "This affects real, double precision, and geometric data types. The parameter value is added to the standard number of digits (FLT_DIG or DBL_DIG as appropriate)." msgstr "Diese Einstellung betrifft real, double precision und geometrische Datentypen. Der Parameterwert wird zur Standardziffernanzahl (FLT_DIG bzw. DBL_DIG) hinzuaddiert." -#: utils/misc/guc.c:2395 +#: utils/misc/guc.c:2571 msgid "Sets the minimum execution time above which statements will be logged." msgstr "Setzt die minimale Ausführungszeit, über der Anweisungen geloggt werden." -#: utils/misc/guc.c:2397 +#: utils/misc/guc.c:2573 msgid "Zero prints all queries. -1 turns this feature off." msgstr "Null zeigt alle Anfragen. -1 schaltet dieses Feature aus." -#: utils/misc/guc.c:2407 +#: utils/misc/guc.c:2583 msgid "Sets the minimum execution time above which autovacuum actions will be logged." msgstr "Setzt die minimale Ausführungszeit, über der Autovacuum-Aktionen geloggt werden." -#: utils/misc/guc.c:2409 +#: utils/misc/guc.c:2585 msgid "Zero prints all actions. -1 turns autovacuum logging off." msgstr "Null gibt alls Aktionen aus. -1 schaltet die Log-Aufzeichnung über Autovacuum aus." -#: utils/misc/guc.c:2419 +#: utils/misc/guc.c:2595 msgid "Background writer sleep time between rounds." msgstr "Schlafzeit zwischen Durchläufen des Background-Writers." -#: utils/misc/guc.c:2430 +#: utils/misc/guc.c:2606 msgid "Background writer maximum number of LRU pages to flush per round." msgstr "Maximale Anzahl der vom Background-Writer pro Durchlauf zu flushenden LRU-Seiten." -#: utils/misc/guc.c:2453 +#: utils/misc/guc.c:2629 msgid "Number of simultaneous requests that can be handled efficiently by the disk subsystem." msgstr "Anzahl simultaner Anfragen, die das Festplattensubsystem effizient bearbeiten kann." -#: utils/misc/guc.c:2454 +#: utils/misc/guc.c:2630 msgid "For RAID arrays, this should be approximately the number of drive spindles in the array." msgstr "Für RAID-Arrays sollte dies ungefähr die Anzahl Spindeln im Array sein." -#: utils/misc/guc.c:2480 +#: utils/misc/guc.c:2656 msgid "Maximum number of concurrent worker processes." msgstr "Maximale Anzahl gleichzeitiger Worker-Prozesse." -#: utils/misc/guc.c:2492 -#, fuzzy -#| msgid "Maximum number of concurrent worker processes." +#: utils/misc/guc.c:2668 msgid "Maximum number of logical replication worker processes." -msgstr "Maximale Anzahl gleichzeitiger Worker-Prozesse." +msgstr "Maximale Anzahl Arbeitsprozesse für logische Replikation." + +#: utils/misc/guc.c:2680 +msgid "Maximum number of table synchronization workers per subscription." +msgstr "Maximale Anzahl Arbeitsprozesse für Tabellensynchronisation pro Subskription." -#: utils/misc/guc.c:2502 +#: utils/misc/guc.c:2690 msgid "Automatic log file rotation will occur after N minutes." msgstr "Automatische Rotation der Logdateien geschieht nach N Minuten." -#: utils/misc/guc.c:2513 +#: utils/misc/guc.c:2701 msgid "Automatic log file rotation will occur after N kilobytes." msgstr "Automatische Rotation der Logdateien geschieht nach N Kilobytes." -#: utils/misc/guc.c:2524 +#: utils/misc/guc.c:2712 msgid "Shows the maximum number of function arguments." msgstr "Setzt die maximale Anzahl von Funktionsargumenten." -#: utils/misc/guc.c:2535 +#: utils/misc/guc.c:2723 msgid "Shows the maximum number of index keys." msgstr "Zeigt die maximale Anzahl von Indexschlüsseln." -#: utils/misc/guc.c:2546 +#: utils/misc/guc.c:2734 msgid "Shows the maximum identifier length." msgstr "Zeigt die maximale Länge von Bezeichnern." -#: utils/misc/guc.c:2557 +#: utils/misc/guc.c:2745 msgid "Shows the size of a disk block." msgstr "Zeigt die Größe eines Diskblocks." -#: utils/misc/guc.c:2568 +#: utils/misc/guc.c:2756 msgid "Shows the number of pages per disk file." msgstr "Zeigt die Anzahl Seiten pro Diskdatei." -#: utils/misc/guc.c:2579 +#: utils/misc/guc.c:2767 msgid "Shows the block size in the write ahead log." msgstr "Zeigt die Blockgröße im Write-Ahead-Log." -#: utils/misc/guc.c:2590 +#: utils/misc/guc.c:2778 msgid "Sets the time to wait before retrying to retrieve WAL after a failed attempt." msgstr "Setzt die Zeit, die gewartet wird, bevor nach einem fehlgeschlagenen Versuch neue WAL-Daten angefordert werden." -#: utils/misc/guc.c:2602 -msgid "Shows the number of pages per write ahead log segment." +#: utils/misc/guc.c:2790 +#, fuzzy +#| msgid "Shows the number of pages per write ahead log segment." +msgid "Shows the size of write ahead log segments." msgstr "Zeit die Anzahl Seiten pro Write-Ahead-Log-Segment." -#: utils/misc/guc.c:2615 +#: utils/misc/guc.c:2803 msgid "Time to sleep between autovacuum runs." msgstr "Wartezeit zwischen Autovacuum-Durchläufen." -#: utils/misc/guc.c:2625 +#: utils/misc/guc.c:2813 msgid "Minimum number of tuple updates or deletes prior to vacuum." msgstr "Mindestanzahl an geänderten oder gelöschten Tupeln vor einem Vacuum." -#: utils/misc/guc.c:2634 +#: utils/misc/guc.c:2822 msgid "Minimum number of tuple inserts, updates, or deletes prior to analyze." msgstr "Mindestanzahl an Einfüge-, Änderungs- oder Löschoperationen von einem Analyze." -#: utils/misc/guc.c:2644 +#: utils/misc/guc.c:2832 msgid "Age at which to autovacuum a table to prevent transaction ID wraparound." msgstr "Alter, nach dem eine Tabelle automatisch gevacuumt wird, um Transaktionsnummernüberlauf zu verhindern." -#: utils/misc/guc.c:2655 +#: utils/misc/guc.c:2843 msgid "Multixact age at which to autovacuum a table to prevent multixact wraparound." msgstr "Multixact-Alter, nach dem eine Tabelle automatisch gevacuumt wird, um Transaktionsnummernüberlauf zu verhindern." -#: utils/misc/guc.c:2665 +#: utils/misc/guc.c:2853 msgid "Sets the maximum number of simultaneously running autovacuum worker processes." msgstr "Setzt die maximale Anzahl gleichzeitig laufender Autovacuum-Worker-Prozesse." -#: utils/misc/guc.c:2675 +#: utils/misc/guc.c:2863 +#, fuzzy +#| msgid "Sets the maximum number of parallel processes per executor node." +msgid "Sets the maximum number of parallel processes per maintenance operation." +msgstr "Setzt die maximale Anzahl paralleler Prozesse pro Executor-Knoten." + +#: utils/misc/guc.c:2873 msgid "Sets the maximum number of parallel processes per executor node." msgstr "Setzt die maximale Anzahl paralleler Prozesse pro Executor-Knoten." -#: utils/misc/guc.c:2685 +#: utils/misc/guc.c:2883 #, fuzzy -#| msgid "Sets the maximum number of predicate locks per transaction." -msgid "Sets the maximum number of parallel workers than can be active at one time." -msgstr "Setzt die maximale Anzahl Prädikatsperren pro Transaktion." +#| msgid "Sets the maximum number of parallel workers than can be active at one time." +msgid "Sets the maximum number of parallel workers that can be active at one time." +msgstr "Setzt die maximale Anzahl paralleler Arbeitsprozesse, die gleichzeitig aktiv sein können." -#: utils/misc/guc.c:2695 +#: utils/misc/guc.c:2893 msgid "Sets the maximum memory to be used by each autovacuum worker process." msgstr "Setzt die maximale Speichergröße für jeden Autovacuum-Worker-Prozess." -#: utils/misc/guc.c:2706 +#: utils/misc/guc.c:2904 msgid "Time before a snapshot is too old to read pages changed after the snapshot was taken." msgstr "Zeit bevor ein Snapshot zu alt ist, um Seiten zu lesen, die geändert wurden, nachdem der Snapshot gemacht wurde." -#: utils/misc/guc.c:2707 +#: utils/misc/guc.c:2905 msgid "A value of -1 disables this feature." msgstr "Der Wert -1 schaltet dieses Feature aus." -#: utils/misc/guc.c:2717 +#: utils/misc/guc.c:2915 msgid "Time between issuing TCP keepalives." msgstr "Zeit zwischen TCP-Keepalive-Sendungen." -#: utils/misc/guc.c:2718 utils/misc/guc.c:2729 +#: utils/misc/guc.c:2916 utils/misc/guc.c:2927 msgid "A value of 0 uses the system default." msgstr "Der Wert 0 verwendet die Systemvoreinstellung." -#: utils/misc/guc.c:2728 +#: utils/misc/guc.c:2926 msgid "Time between TCP keepalive retransmits." msgstr "Zeit zwischen TCP-Keepalive-Neuübertragungen." -#: utils/misc/guc.c:2739 +#: utils/misc/guc.c:2937 msgid "SSL renegotiation is no longer supported; this can only be 0." msgstr "SSL-Renegotiation wird nicht mehr unterstützt; kann nur auf 0 gesetzt werden." -#: utils/misc/guc.c:2750 +#: utils/misc/guc.c:2948 msgid "Maximum number of TCP keepalive retransmits." msgstr "Maximale Anzahl an TCP-Keepalive-Neuübertragungen." -#: utils/misc/guc.c:2751 +#: utils/misc/guc.c:2949 msgid "This controls the number of consecutive keepalive retransmits that can be lost before a connection is considered dead. A value of 0 uses the system default." msgstr "Dies bestimmt die Anzahl von aufeinanderfolgenden Keepalive-Neuübertragungen, die verloren gehen dürfen, bis die Verbindung als tot betrachtet wird. Der Wert 0 verwendet die Betriebssystemvoreinstellung." -#: utils/misc/guc.c:2762 +#: utils/misc/guc.c:2960 msgid "Sets the maximum allowed result for exact search by GIN." msgstr "Setzt die maximal erlaubte Anzahl Ergebnisse für eine genaue Suche mit GIN." -#: utils/misc/guc.c:2773 +#: utils/misc/guc.c:2971 msgid "Sets the planner's assumption about the size of the disk cache." msgstr "Setzt die Annahme des Planers über die Größe des Festplatten-Caches." -#: utils/misc/guc.c:2774 +#: utils/misc/guc.c:2972 msgid "That is, the portion of the kernel's disk cache that will be used for PostgreSQL data files. This is measured in disk pages, which are normally 8 kB each." msgstr "Setzt die Annahme des Planers über die effektive Größe des Diskcaches (das heißt des Teils des Diskcaches vom Kernel, der für die Datendateien von PostgreSQL verwendet wird). Das wird in Diskseiten gemessen, welche normalerweise 8 kB groß sind." -#: utils/misc/guc.c:2786 -#, fuzzy -#| msgid "Sets the minimum size of relations to be considered for parallel scan." +#: utils/misc/guc.c:2984 msgid "Sets the minimum amount of table data for a parallel scan." -msgstr "Setzt die minimale Größe einer Relation, um für einen parallelen Scan in Betracht gezogen werden zu können." +msgstr "Setzt die Mindestmenge an Tabellendaten für einen parallelen Scan." -#: utils/misc/guc.c:2787 +#: utils/misc/guc.c:2985 msgid "If the planner estimates that it will read a number of table pages too small to reach this limit, a parallel scan will not be considered." -msgstr "" +msgstr "Wenn der Planer schätzt, dass zu wenige Tabellenseiten gelesen werden werden um diesen Wert zu erreichen, dann wird kein paralleler Scan in Erwägung gezogen werden." -#: utils/misc/guc.c:2797 -#, fuzzy -#| msgid "Sets the minimum size of relations to be considered for parallel scan." +#: utils/misc/guc.c:2995 msgid "Sets the minimum amount of index data for a parallel scan." -msgstr "Setzt die minimale Größe einer Relation, um für einen parallelen Scan in Betracht gezogen werden zu können." +msgstr "Setzt die Mindestmenge an Indexdaten für einen parallelen Scan." -#: utils/misc/guc.c:2798 +#: utils/misc/guc.c:2996 msgid "If the planner estimates that it will read a number of index pages too small to reach this limit, a parallel scan will not be considered." -msgstr "" +msgstr "Wenn der Planer schätzt, dass zu wenige Indexseiten gelesen werden werden um diesen Wert zu erreichen, dann wird kein paralleler Scan in Erwägung gezogen werden." -#: utils/misc/guc.c:2809 +#: utils/misc/guc.c:3007 msgid "Shows the server version as an integer." msgstr "Zeigt die Serverversion als Zahl." -#: utils/misc/guc.c:2820 +#: utils/misc/guc.c:3018 msgid "Log the use of temporary files larger than this number of kilobytes." msgstr "Schreibt Meldungen über die Verwendung von temporären Dateien in den Log, wenn sie größer als diese Anzahl an Kilobytes sind." -#: utils/misc/guc.c:2821 +#: utils/misc/guc.c:3019 msgid "Zero logs all files. The default is -1 (turning this feature off)." msgstr "Null loggt alle Dateien. Die Standardeinstellung ist -1 (wodurch dieses Feature ausgeschaltet wird)." -#: utils/misc/guc.c:2831 +#: utils/misc/guc.c:3029 msgid "Sets the size reserved for pg_stat_activity.query, in bytes." msgstr "Setzt die für pg_stat_activity.query reservierte Größe, in Bytes." -#: utils/misc/guc.c:2846 +#: utils/misc/guc.c:3040 msgid "Sets the maximum size of the pending list for GIN index." msgstr "Setzt die maximale Größe der Pending-Liste eines GIN-Index." -#: utils/misc/guc.c:2866 +#: utils/misc/guc.c:3060 msgid "Sets the planner's estimate of the cost of a sequentially fetched disk page." msgstr "Setzt den vom Planer geschätzten Aufwand, um eine sequenzielle Diskseite zu lesen." -#: utils/misc/guc.c:2876 +#: utils/misc/guc.c:3070 msgid "Sets the planner's estimate of the cost of a nonsequentially fetched disk page." msgstr "Setzt den vom Planer geschätzten Aufwand, um eine nichtsequenzielle Diskseite zu lesen." -#: utils/misc/guc.c:2886 +#: utils/misc/guc.c:3080 msgid "Sets the planner's estimate of the cost of processing each tuple (row)." msgstr "Setzt den vom Planer geschätzten Aufwand für die Verarbeitung einer Zeile." -#: utils/misc/guc.c:2896 +#: utils/misc/guc.c:3090 msgid "Sets the planner's estimate of the cost of processing each index entry during an index scan." msgstr "Setzt den vom Planer geschätzten Aufwand für die Verarbeitung eines Indexeintrags während eines Index-Scans." -#: utils/misc/guc.c:2906 +#: utils/misc/guc.c:3100 msgid "Sets the planner's estimate of the cost of processing each operator or function call." msgstr "Setzt den vom Planer geschätzten Aufwand für die Verarbeitung eines Operators oder Funktionsaufrufs." -#: utils/misc/guc.c:2916 +#: utils/misc/guc.c:3110 msgid "Sets the planner's estimate of the cost of passing each tuple (row) from worker to master backend." msgstr "Setzt den vom Planer geschätzten Aufwand, um eine Zeile vom Arbeitsprozess and das Master-Backend zu senden." -#: utils/misc/guc.c:2926 +#: utils/misc/guc.c:3120 msgid "Sets the planner's estimate of the cost of starting up worker processes for parallel query." msgstr "Setzt den vom Planer geschätzten Aufwand für das Starten von Arbeitsprozessen für parallele Anfragen." -#: utils/misc/guc.c:2937 +#: utils/misc/guc.c:3131 +msgid "Perform JIT compilation if query is more expensive." +msgstr "" + +#: utils/misc/guc.c:3132 +msgid "-1 disables JIT compilation." +msgstr "" + +#: utils/misc/guc.c:3141 +msgid "Optimize JITed functions if query is more expensive." +msgstr "" + +#: utils/misc/guc.c:3142 +#, fuzzy +#| msgid "Enables genetic query optimization." +msgid "-1 disables optimization." +msgstr "Ermöglicht genetische Anfrageoptimierung." + +#: utils/misc/guc.c:3151 +msgid "Perform JIT inlining if query is more expensive." +msgstr "" + +#: utils/misc/guc.c:3152 +msgid "-1 disables inlining." +msgstr "" + +#: utils/misc/guc.c:3161 msgid "Sets the planner's estimate of the fraction of a cursor's rows that will be retrieved." msgstr "Setzt den vom Planer geschätzten Anteil der Cursor-Zeilen, die ausgelesen werden werden." -#: utils/misc/guc.c:2948 +#: utils/misc/guc.c:3172 msgid "GEQO: selective pressure within the population." msgstr "GEQO: selektiver Auswahldruck in der Bevölkerung." -#: utils/misc/guc.c:2958 +#: utils/misc/guc.c:3182 msgid "GEQO: seed for random path selection." msgstr "GEQO: Ausgangswert für die zufällige Pfadauswahl." -#: utils/misc/guc.c:2968 +#: utils/misc/guc.c:3192 msgid "Multiple of the average buffer usage to free per round." msgstr "Vielfaches der durchschnittlichen freizugebenden Pufferverwendung pro Runde." -#: utils/misc/guc.c:2978 +#: utils/misc/guc.c:3202 msgid "Sets the seed for random-number generation." msgstr "Setzt den Ausgangswert für die Zufallszahlenerzeugung." -#: utils/misc/guc.c:2989 +#: utils/misc/guc.c:3213 msgid "Number of tuple updates or deletes prior to vacuum as a fraction of reltuples." msgstr "Anzahl geänderter oder gelöschter Tupel vor einem Vacuum, relativ zu reltuples." -#: utils/misc/guc.c:2998 +#: utils/misc/guc.c:3222 msgid "Number of tuple inserts, updates, or deletes prior to analyze as a fraction of reltuples." msgstr "Anzahl eingefügter, geänderter oder gelöschter Tupel vor einem Analyze, relativ zu reltuples." -#: utils/misc/guc.c:3008 +#: utils/misc/guc.c:3232 msgid "Time spent flushing dirty buffers during checkpoint, as fraction of checkpoint interval." msgstr "Zeit, die damit verbracht wird, modifizierte Puffer während eines Checkpoints zurückzuschreiben, als Bruchteil des Checkpoint-Intervalls." -#: utils/misc/guc.c:3027 +#: utils/misc/guc.c:3242 +#, fuzzy +#| msgid "Number of tuple updates or deletes prior to vacuum as a fraction of reltuples." +msgid "Number of tuple inserts prior to index cleanup as a fraction of reltuples." +msgstr "Anzahl geänderter oder gelöschter Tupel vor einem Vacuum, relativ zu reltuples." + +#: utils/misc/guc.c:3261 msgid "Sets the shell command that will be called to archive a WAL file." msgstr "Setzt den Shell-Befehl, der aufgerufen wird, um eine WAL-Datei zu archivieren." -#: utils/misc/guc.c:3037 +#: utils/misc/guc.c:3271 msgid "Sets the client's character set encoding." msgstr "Setzt die Zeichensatzkodierung des Clients." -#: utils/misc/guc.c:3048 +#: utils/misc/guc.c:3282 msgid "Controls information prefixed to each log line." msgstr "Bestimmt die Informationen, die vor jede Logzeile geschrieben werden." -#: utils/misc/guc.c:3049 +#: utils/misc/guc.c:3283 msgid "If blank, no prefix is used." msgstr "Wenn leer, dann wird kein Präfix verwendet." -#: utils/misc/guc.c:3058 +#: utils/misc/guc.c:3292 msgid "Sets the time zone to use in log messages." msgstr "Setzt die in Logmeldungen verwendete Zeitzone." -#: utils/misc/guc.c:3068 +#: utils/misc/guc.c:3302 msgid "Sets the display format for date and time values." msgstr "Setzt das Ausgabeformat für Datums- und Zeitwerte." -#: utils/misc/guc.c:3069 +#: utils/misc/guc.c:3303 msgid "Also controls interpretation of ambiguous date inputs." msgstr "Kontrolliert auch die Interpretation von zweideutigen Datumseingaben." -#: utils/misc/guc.c:3080 +#: utils/misc/guc.c:3314 msgid "Sets the default tablespace to create tables and indexes in." msgstr "Setzt den Standard-Tablespace für Tabellen und Indexe." -#: utils/misc/guc.c:3081 +#: utils/misc/guc.c:3315 msgid "An empty string selects the database's default tablespace." msgstr "Eine leere Zeichenkette wählt den Standard-Tablespace der Datenbank." -#: utils/misc/guc.c:3091 +#: utils/misc/guc.c:3325 msgid "Sets the tablespace(s) to use for temporary tables and sort files." msgstr "Setzt den oder die Tablespaces für temporäre Tabellen und Sortierdateien." -#: utils/misc/guc.c:3102 +#: utils/misc/guc.c:3336 msgid "Sets the path for dynamically loadable modules." msgstr "Setzt den Pfad für ladbare dynamische Bibliotheken." -#: utils/misc/guc.c:3103 +#: utils/misc/guc.c:3337 msgid "If a dynamically loadable module needs to be opened and the specified name does not have a directory component (i.e., the name does not contain a slash), the system will search this path for the specified file." msgstr "Wenn ein dynamisch ladbares Modul geöffnet werden muss und der angegebene Name keine Verzeichniskomponente hat (das heißt er enthält keinen Schrägstrich), dann sucht das System in diesem Pfad nach der angegebenen Datei." -#: utils/misc/guc.c:3116 +#: utils/misc/guc.c:3350 msgid "Sets the location of the Kerberos server key file." msgstr "Setzt den Ort der Kerberos-Server-Schlüsseldatei." -#: utils/misc/guc.c:3127 +#: utils/misc/guc.c:3361 msgid "Sets the Bonjour service name." msgstr "Setzt den Bonjour-Servicenamen." -#: utils/misc/guc.c:3139 +#: utils/misc/guc.c:3373 msgid "Shows the collation order locale." msgstr "Zeigt die Locale für die Sortierreihenfolge." -#: utils/misc/guc.c:3150 +#: utils/misc/guc.c:3384 msgid "Shows the character classification and case conversion locale." msgstr "Zeigt die Locale für Zeichenklassifizierung und Groß-/Kleinschreibung." -#: utils/misc/guc.c:3161 +#: utils/misc/guc.c:3395 msgid "Sets the language in which messages are displayed." msgstr "Setzt die Sprache, in der Mitteilungen ausgegeben werden." -#: utils/misc/guc.c:3171 +#: utils/misc/guc.c:3405 msgid "Sets the locale for formatting monetary amounts." msgstr "Setzt die Locale für die Formatierung von Geldbeträgen." -#: utils/misc/guc.c:3181 +#: utils/misc/guc.c:3415 msgid "Sets the locale for formatting numbers." msgstr "Setzt die Locale für die Formatierung von Zahlen." -#: utils/misc/guc.c:3191 +#: utils/misc/guc.c:3425 msgid "Sets the locale for formatting date and time values." msgstr "Setzt die Locale für die Formatierung von Datums- und Zeitwerten." -#: utils/misc/guc.c:3201 +#: utils/misc/guc.c:3435 msgid "Lists shared libraries to preload into each backend." msgstr "Listet dynamische Bibliotheken, die vorab in jeden Serverprozess geladen werden." -#: utils/misc/guc.c:3212 +#: utils/misc/guc.c:3446 msgid "Lists shared libraries to preload into server." msgstr "Listet dynamische Bibliotheken, die vorab in den Server geladen werden." -#: utils/misc/guc.c:3223 +#: utils/misc/guc.c:3457 msgid "Lists unprivileged shared libraries to preload into each backend." msgstr "Listet unprivilegierte dynamische Bibliotheken, die vorab in jeden Serverprozess geladen werden." -#: utils/misc/guc.c:3234 +#: utils/misc/guc.c:3468 msgid "Sets the schema search order for names that are not schema-qualified." msgstr "Setzt die Schemasuchreihenfolge für Namen ohne Schemaqualifikation." -#: utils/misc/guc.c:3246 +#: utils/misc/guc.c:3480 msgid "Sets the server (database) character set encoding." msgstr "Setzt die Zeichensatzkodierung des Servers (der Datenbank)." -#: utils/misc/guc.c:3258 +#: utils/misc/guc.c:3492 msgid "Shows the server version." msgstr "Zeigt die Serverversion." -#: utils/misc/guc.c:3270 +#: utils/misc/guc.c:3504 msgid "Sets the current role." msgstr "Setzt die aktuelle Rolle." -#: utils/misc/guc.c:3282 +#: utils/misc/guc.c:3516 msgid "Sets the session user name." msgstr "Setzt den Sitzungsbenutzernamen." -#: utils/misc/guc.c:3293 +#: utils/misc/guc.c:3527 msgid "Sets the destination for server log output." msgstr "Setzt das Ziel für die Serverlogausgabe." -#: utils/misc/guc.c:3294 +#: utils/misc/guc.c:3528 msgid "Valid values are combinations of \"stderr\", \"syslog\", \"csvlog\", and \"eventlog\", depending on the platform." msgstr "Gültige Werte sind Kombinationen von »stderr«, »syslog«, »csvlog« und »eventlog«, je nach Plattform." -#: utils/misc/guc.c:3305 +#: utils/misc/guc.c:3539 msgid "Sets the destination directory for log files." msgstr "Bestimmt das Zielverzeichnis für Logdateien." -#: utils/misc/guc.c:3306 +#: utils/misc/guc.c:3540 msgid "Can be specified as relative to the data directory or as absolute path." msgstr "Kann relativ zum Datenverzeichnis oder als absoluter Pfad angegeben werden." -#: utils/misc/guc.c:3316 +#: utils/misc/guc.c:3550 msgid "Sets the file name pattern for log files." msgstr "Bestimmt das Dateinamenmuster für Logdateien." -#: utils/misc/guc.c:3327 +#: utils/misc/guc.c:3561 msgid "Sets the program name used to identify PostgreSQL messages in syslog." msgstr "Setzt den Programmnamen, mit dem PostgreSQL-Meldungen im Syslog identifiziert werden." -#: utils/misc/guc.c:3338 +#: utils/misc/guc.c:3572 msgid "Sets the application name used to identify PostgreSQL messages in the event log." msgstr "Setzt den Programmnamen, mit dem PostgreSQL-Meldungen im Ereignisprotokoll identifiziert werden." -#: utils/misc/guc.c:3349 +#: utils/misc/guc.c:3583 msgid "Sets the time zone for displaying and interpreting time stamps." msgstr "Setzt die Zeitzone, in der Zeitangaben interpretiert und ausgegeben werden." -#: utils/misc/guc.c:3359 +#: utils/misc/guc.c:3593 msgid "Selects a file of time zone abbreviations." msgstr "Wählt eine Datei mit Zeitzonenabkürzungen." -#: utils/misc/guc.c:3369 +#: utils/misc/guc.c:3603 msgid "Sets the current transaction's isolation level." msgstr "Zeigt den Isolationsgrad der aktuellen Transaktion." -#: utils/misc/guc.c:3380 +#: utils/misc/guc.c:3614 msgid "Sets the owning group of the Unix-domain socket." msgstr "Setzt die Eigentümergruppe der Unix-Domain-Socket." -#: utils/misc/guc.c:3381 +#: utils/misc/guc.c:3615 msgid "The owning user of the socket is always the user that starts the server." msgstr "Der Eigentümer ist immer der Benutzer, der den Server startet." -#: utils/misc/guc.c:3391 +#: utils/misc/guc.c:3625 msgid "Sets the directories where Unix-domain sockets will be created." msgstr "Setzt die Verzeichnisse, in denen Unix-Domain-Sockets erzeugt werden sollen." -#: utils/misc/guc.c:3406 +#: utils/misc/guc.c:3640 msgid "Sets the host name or IP address(es) to listen to." msgstr "Setzt den Hostnamen oder die IP-Adresse(n), auf der auf Verbindungen gewartet wird." -#: utils/misc/guc.c:3421 +#: utils/misc/guc.c:3655 msgid "Sets the server's data directory." msgstr "Setzt das Datenverzeichnis des Servers." -#: utils/misc/guc.c:3432 +#: utils/misc/guc.c:3666 msgid "Sets the server's main configuration file." msgstr "Setzt die Hauptkonfigurationsdatei des Servers." -#: utils/misc/guc.c:3443 +#: utils/misc/guc.c:3677 msgid "Sets the server's \"hba\" configuration file." msgstr "Setzt die »hba«-Konfigurationsdatei des Servers." -#: utils/misc/guc.c:3454 +#: utils/misc/guc.c:3688 msgid "Sets the server's \"ident\" configuration file." msgstr "Setzt die »ident«-Konfigurationsdatei des Servers." -#: utils/misc/guc.c:3465 +#: utils/misc/guc.c:3699 msgid "Writes the postmaster PID to the specified file." msgstr "Schreibt die Postmaster-PID in die angegebene Datei." -#: utils/misc/guc.c:3476 +#: utils/misc/guc.c:3710 msgid "Location of the SSL server certificate file." msgstr "Ort der SSL-Serverzertifikatsdatei." -#: utils/misc/guc.c:3486 +#: utils/misc/guc.c:3720 msgid "Location of the SSL server private key file." msgstr "Setzt den Ort der Datei mit dem privaten SSL-Server-Schlüssel." -#: utils/misc/guc.c:3496 +#: utils/misc/guc.c:3730 msgid "Location of the SSL certificate authority file." msgstr "Ort der SSL-Certificate-Authority-Datei." -#: utils/misc/guc.c:3506 +#: utils/misc/guc.c:3740 msgid "Location of the SSL certificate revocation list file." msgstr "Ort der SSL-Certificate-Revocation-List-Datei." -#: utils/misc/guc.c:3516 +#: utils/misc/guc.c:3750 msgid "Writes temporary statistics files to the specified directory." msgstr "Schreibt temporäre Statistikdateien in das angegebene Verzeichnis." -#: utils/misc/guc.c:3527 +#: utils/misc/guc.c:3761 msgid "Number of synchronous standbys and list of names of potential synchronous ones." msgstr "Anzahl synchroner Standbys und Liste der Namen der möglichen synchronen Standbys." -#: utils/misc/guc.c:3538 +#: utils/misc/guc.c:3772 msgid "Sets default text search configuration." msgstr "Setzt die vorgegebene Textsuchekonfiguration." -#: utils/misc/guc.c:3548 +#: utils/misc/guc.c:3782 msgid "Sets the list of allowed SSL ciphers." msgstr "Setzt die Liste der erlaubten SSL-Verschlüsselungsalgorithmen." -#: utils/misc/guc.c:3563 +#: utils/misc/guc.c:3797 msgid "Sets the curve to use for ECDH." msgstr "Setzt die für ECDH zu verwendende Kurve." -#: utils/misc/guc.c:3578 +#: utils/misc/guc.c:3812 +msgid "Location of the SSL DH parameters file." +msgstr "Setzt den Ort der SSL-DH-Parameter-Datei." + +#: utils/misc/guc.c:3823 +msgid "Command to obtain passphrases for SSL." +msgstr "Befehl zum Einlesen von Passphrasen für SSL." + +#: utils/misc/guc.c:3833 msgid "Sets the application name to be reported in statistics and logs." msgstr "Setzt den Anwendungsnamen, der in Statistiken und Logs verzeichnet wird." -#: utils/misc/guc.c:3589 +#: utils/misc/guc.c:3844 msgid "Sets the name of the cluster, which is included in the process title." msgstr "Setzt den Namen des Clusters, welcher im Prozesstitel angezeigt wird." -#: utils/misc/guc.c:3600 +#: utils/misc/guc.c:3855 msgid "Sets the WAL resource managers for which WAL consistency checks are done." -msgstr "" +msgstr "Setzt die WAL-Resource-Manager, für die WAL-Konsistenzprüfungen durchgeführt werden." -#: utils/misc/guc.c:3601 +#: utils/misc/guc.c:3856 msgid "Full-page images will be logged for all data blocks and cross-checked against the results of WAL replay." +msgstr "Volle Seitenabbilder werden für alle Datenblöcke geloggt und gegen die Resultate der WAL-Wiederherstellung geprüft." + +#: utils/misc/guc.c:3866 +msgid "JIT provider to use." msgstr "" -#: utils/misc/guc.c:3620 +#: utils/misc/guc.c:3886 msgid "Sets whether \"\\'\" is allowed in string literals." msgstr "Bestimmt, ob »\\'« in Zeichenkettenkonstanten erlaubt ist." -#: utils/misc/guc.c:3630 +#: utils/misc/guc.c:3896 msgid "Sets the output format for bytea." msgstr "Setzt das Ausgabeformat für bytea." -#: utils/misc/guc.c:3640 +#: utils/misc/guc.c:3906 msgid "Sets the message levels that are sent to the client." msgstr "Setzt die Meldungstypen, die an den Client gesendet werden." -#: utils/misc/guc.c:3641 utils/misc/guc.c:3694 utils/misc/guc.c:3705 -#: utils/misc/guc.c:3771 +#: utils/misc/guc.c:3907 utils/misc/guc.c:3960 utils/misc/guc.c:3971 +#: utils/misc/guc.c:4037 msgid "Each level includes all the levels that follow it. The later the level, the fewer messages are sent." msgstr "Jeder Wert schließt alle ihm folgenden Werte mit ein. Je weiter hinten der Wert steht, desto weniger Meldungen werden gesendet werden." -#: utils/misc/guc.c:3651 +#: utils/misc/guc.c:3917 msgid "Enables the planner to use constraints to optimize queries." msgstr "Ermöglicht dem Planer die Verwendung von Constraints, um Anfragen zu optimieren." -#: utils/misc/guc.c:3652 +#: utils/misc/guc.c:3918 msgid "Table scans will be skipped if their constraints guarantee that no rows match the query." msgstr "Tabellen-Scans werden übersprungen, wenn deren Constraints garantieren, dass keine Zeile mit der Abfrage übereinstimmt." -#: utils/misc/guc.c:3662 +#: utils/misc/guc.c:3928 msgid "Sets the transaction isolation level of each new transaction." msgstr "Setzt den Transaktionsisolationsgrad neuer Transaktionen." -#: utils/misc/guc.c:3672 +#: utils/misc/guc.c:3938 msgid "Sets the display format for interval values." msgstr "Setzt das Ausgabeformat für Intervallwerte." -#: utils/misc/guc.c:3683 +#: utils/misc/guc.c:3949 msgid "Sets the verbosity of logged messages." msgstr "Setzt den Detailgrad von geloggten Meldungen." -#: utils/misc/guc.c:3693 +#: utils/misc/guc.c:3959 msgid "Sets the message levels that are logged." msgstr "Setzt die Meldungstypen, die geloggt werden." -#: utils/misc/guc.c:3704 +#: utils/misc/guc.c:3970 msgid "Causes all statements generating error at or above this level to be logged." msgstr "Schreibt alle Anweisungen, die einen Fehler auf dieser Stufe oder höher verursachen, in den Log." -#: utils/misc/guc.c:3715 +#: utils/misc/guc.c:3981 msgid "Sets the type of statements logged." msgstr "Setzt die Anweisungsarten, die geloggt werden." -#: utils/misc/guc.c:3725 +#: utils/misc/guc.c:3991 msgid "Sets the syslog \"facility\" to be used when syslog enabled." msgstr "Setzt die zu verwendende Syslog-»Facility«, wenn Syslog angeschaltet ist." -#: utils/misc/guc.c:3740 +#: utils/misc/guc.c:4006 msgid "Sets the session's behavior for triggers and rewrite rules." msgstr "Setzt das Sitzungsverhalten für Trigger und Regeln." -#: utils/misc/guc.c:3750 +#: utils/misc/guc.c:4016 msgid "Sets the current transaction's synchronization level." msgstr "Setzt den Synchronisationsgrad der aktuellen Transaktion." -#: utils/misc/guc.c:3760 +#: utils/misc/guc.c:4026 msgid "Allows archiving of WAL files using archive_command." msgstr "Erlaubt die Archivierung von WAL-Dateien mittels archive_command." -#: utils/misc/guc.c:3770 +#: utils/misc/guc.c:4036 msgid "Enables logging of recovery-related debugging information." msgstr "Ermöglicht das Loggen von Debug-Informationen über die Wiederherstellung." -#: utils/misc/guc.c:3786 +#: utils/misc/guc.c:4052 msgid "Collects function-level statistics on database activity." msgstr "Sammelt Statistiken auf Funktionsebene über Datenbankaktivität." -#: utils/misc/guc.c:3796 +#: utils/misc/guc.c:4062 msgid "Set the level of information written to the WAL." msgstr "Setzt den Umfang der in den WAL geschriebenen Informationen." -#: utils/misc/guc.c:3806 +#: utils/misc/guc.c:4072 msgid "Selects the dynamic shared memory implementation used." msgstr "Wählt die zu verwendende Implementierung von dynamischem Shared Memory." -#: utils/misc/guc.c:3816 +#: utils/misc/guc.c:4082 msgid "Selects the method used for forcing WAL updates to disk." msgstr "Wählt die Methode, um das Schreiben von WAL-Änderungen auf die Festplatte zu erzwingen." -#: utils/misc/guc.c:3826 +#: utils/misc/guc.c:4092 msgid "Sets how binary values are to be encoded in XML." msgstr "Setzt, wie binäre Werte in XML kodiert werden." -#: utils/misc/guc.c:3836 +#: utils/misc/guc.c:4102 msgid "Sets whether XML data in implicit parsing and serialization operations is to be considered as documents or content fragments." msgstr "Setzt, ob XML-Daten in impliziten Parse- und Serialisierungsoperationen als Dokument oder Fragment betrachtet werden sollen." -#: utils/misc/guc.c:3847 -msgid "Use of huge pages on Linux." -msgstr "Huge Pages auf Linux verwenden." +#: utils/misc/guc.c:4113 +msgid "Use of huge pages on Linux or Windows." +msgstr "Huge Pages auf Linux oder Windows verwenden." -#: utils/misc/guc.c:3857 +#: utils/misc/guc.c:4123 msgid "Forces use of parallel query facilities." msgstr "Verwendung der Einrichtungen für parallele Anfragen erzwingen." -#: utils/misc/guc.c:3858 +#: utils/misc/guc.c:4124 msgid "If possible, run query using a parallel worker and with parallel restrictions." msgstr "Wenn möglich werden Anfragen in einem parallelen Arbeitsprozess und mit parallelen Beschränkungen ausgeführt." -#: utils/misc/guc.c:3867 +#: utils/misc/guc.c:4133 msgid "Encrypt passwords." msgstr "Verschlüsselt Passwörter." -#: utils/misc/guc.c:3868 +#: utils/misc/guc.c:4134 msgid "When a password is specified in CREATE USER or ALTER USER without writing either ENCRYPTED or UNENCRYPTED, this parameter determines whether the password is to be encrypted." msgstr "Wenn in CREATE USER oder ALTER USER ein Passwort ohne ENCRYPTED oder UNENCRYPTED angegeben ist, bestimmt dieser Parameter, ob das Passwort verschlüsselt wird." -#: utils/misc/guc.c:4670 +#: utils/misc/guc.c:4936 #, c-format msgid "%s: could not access directory \"%s\": %s\n" msgstr "%s: konnte nicht auf Verzeichnis »%s« zugreifen: %s\n" -#: utils/misc/guc.c:4675 +#: utils/misc/guc.c:4941 #, c-format msgid "Run initdb or pg_basebackup to initialize a PostgreSQL data directory.\n" msgstr "Führen Sie initdb oder pg_basebackup aus, um ein PostgreSQL-Datenverzeichnis zu initialisieren.\n" -#: utils/misc/guc.c:4695 +#: utils/misc/guc.c:4961 #, c-format msgid "" "%s does not know where to find the server configuration file.\n" @@ -24029,12 +25767,12 @@ msgstr "" "Sie müssen die Kommandozeilenoption --config-file oder -D angegeben oder\n" "die Umgebungsvariable PGDATA setzen.\n" -#: utils/misc/guc.c:4714 +#: utils/misc/guc.c:4980 #, c-format msgid "%s: could not access the server configuration file \"%s\": %s\n" msgstr "%s: konnte nicht auf die Serverkonfigurationsdatei »%s« zugreifen: %s\n" -#: utils/misc/guc.c:4740 +#: utils/misc/guc.c:5006 #, c-format msgid "" "%s does not know where to find the database system data.\n" @@ -24044,7 +25782,7 @@ msgstr "" "zu finden sind. Sie können dies mit »data_directory« in »%s«, mit der\n" "Kommandozeilenoption -D oder der Umgebungsvariable PGDATA angeben.\n" -#: utils/misc/guc.c:4788 +#: utils/misc/guc.c:5054 #, c-format msgid "" "%s does not know where to find the \"hba\" configuration file.\n" @@ -24054,7 +25792,7 @@ msgstr "" "Sie können dies mit »hba_file« in »%s«, mit der\n" "Kommandozeilenoption -D oder der Umgebungsvariable PGDATA angeben.\n" -#: utils/misc/guc.c:4811 +#: utils/misc/guc.c:5077 #, c-format msgid "" "%s does not know where to find the \"ident\" configuration file.\n" @@ -24064,138 +25802,138 @@ msgstr "" "Sie können dies mit »ident_file« in »%s«, mit der\n" "Kommandozeilenoption -D oder der Umgebungsvariable PGDATA angeben.\n" -#: utils/misc/guc.c:5485 utils/misc/guc.c:5532 +#: utils/misc/guc.c:5752 utils/misc/guc.c:5799 msgid "Value exceeds integer range." msgstr "Wert überschreitet Bereich für ganze Zahlen." -#: utils/misc/guc.c:5755 +#: utils/misc/guc.c:6022 #, c-format msgid "parameter \"%s\" requires a numeric value" msgstr "Parameter »%s« erfordert einen numerischen Wert" -#: utils/misc/guc.c:5764 +#: utils/misc/guc.c:6031 #, c-format msgid "%g is outside the valid range for parameter \"%s\" (%g .. %g)" msgstr "%g ist außerhalb des gültigen Bereichs für Parameter »%s« (%g ... %g)" -#: utils/misc/guc.c:5917 utils/misc/guc.c:7260 +#: utils/misc/guc.c:6184 utils/misc/guc.c:7554 #, c-format msgid "cannot set parameters during a parallel operation" msgstr "während einer parallelen Operation können keine Parameter gesetzt werden" -#: utils/misc/guc.c:5924 utils/misc/guc.c:6675 utils/misc/guc.c:6727 -#: utils/misc/guc.c:7088 utils/misc/guc.c:7847 utils/misc/guc.c:8015 -#: utils/misc/guc.c:9690 +#: utils/misc/guc.c:6191 utils/misc/guc.c:6943 utils/misc/guc.c:6996 +#: utils/misc/guc.c:7047 utils/misc/guc.c:7383 utils/misc/guc.c:8150 +#: utils/misc/guc.c:8318 utils/misc/guc.c:9992 #, c-format msgid "unrecognized configuration parameter \"%s\"" msgstr "unbekannter Konfigurationsparameter »%s«" -#: utils/misc/guc.c:5939 utils/misc/guc.c:7100 +#: utils/misc/guc.c:6206 utils/misc/guc.c:7395 #, c-format msgid "parameter \"%s\" cannot be changed" msgstr "Parameter »%s« kann nicht geändert werden" -#: utils/misc/guc.c:5972 +#: utils/misc/guc.c:6239 #, c-format msgid "parameter \"%s\" cannot be changed now" msgstr "Parameter »%s« kann jetzt nicht geändert werden" -#: utils/misc/guc.c:5990 utils/misc/guc.c:6036 utils/misc/guc.c:9706 +#: utils/misc/guc.c:6257 utils/misc/guc.c:6304 utils/misc/guc.c:10008 #, c-format msgid "permission denied to set parameter \"%s\"" msgstr "keine Berechtigung, um Parameter »%s« zu setzen" -#: utils/misc/guc.c:6026 +#: utils/misc/guc.c:6294 #, c-format msgid "parameter \"%s\" cannot be set after connection start" msgstr "Parameter »%s« kann nach Start der Verbindung nicht geändert werden" -#: utils/misc/guc.c:6074 +#: utils/misc/guc.c:6342 #, c-format msgid "cannot set parameter \"%s\" within security-definer function" msgstr "Parameter »%s« kann nicht in einer Security-Definer-Funktion gesetzt werden" -#: utils/misc/guc.c:6683 utils/misc/guc.c:6731 utils/misc/guc.c:8021 +#: utils/misc/guc.c:6951 utils/misc/guc.c:7001 utils/misc/guc.c:8325 #, c-format -msgid "must be superuser to examine \"%s\"" -msgstr "nur Superuser können »%s« ansehen" +msgid "must be superuser or a member of pg_read_all_settings to examine \"%s\"" +msgstr "nur Superuser oder Mitglieder von pg_read_all_settings können »%s« ansehen" -#: utils/misc/guc.c:6797 +#: utils/misc/guc.c:7092 #, c-format msgid "SET %s takes only one argument" msgstr "SET %s darf nur ein Argument haben" -#: utils/misc/guc.c:7048 +#: utils/misc/guc.c:7343 #, c-format msgid "must be superuser to execute ALTER SYSTEM command" msgstr "nur Superuser können den Befehl ALTER SYSTEM ausführen" -#: utils/misc/guc.c:7133 +#: utils/misc/guc.c:7428 #, c-format msgid "parameter value for ALTER SYSTEM must not contain a newline" msgstr "Parameterwert für ALTER SYSTEM darf keine Newline enthalten" -#: utils/misc/guc.c:7178 +#: utils/misc/guc.c:7473 #, c-format msgid "could not parse contents of file \"%s\"" msgstr "konnte Inhalt der Datei »%s« nicht parsen" -#: utils/misc/guc.c:7336 +#: utils/misc/guc.c:7630 #, c-format msgid "SET LOCAL TRANSACTION SNAPSHOT is not implemented" msgstr "SET LOCAL TRANSACTION SNAPSHOT ist nicht implementiert" -#: utils/misc/guc.c:7420 +#: utils/misc/guc.c:7714 #, c-format msgid "SET requires parameter name" msgstr "SET benötigt Parameternamen" -#: utils/misc/guc.c:7544 +#: utils/misc/guc.c:7847 #, c-format msgid "attempt to redefine parameter \"%s\"" msgstr "Versuch, den Parameter »%s« zu redefinieren" -#: utils/misc/guc.c:9323 +#: utils/misc/guc.c:9625 #, c-format msgid "parameter \"%s\" could not be set" msgstr "Parameter »%s« kann nicht gesetzt werden" -#: utils/misc/guc.c:9410 +#: utils/misc/guc.c:9712 #, c-format msgid "could not parse setting for parameter \"%s\"" msgstr "konnte Wert von Parameter »%s« nicht lesen" -#: utils/misc/guc.c:9768 utils/misc/guc.c:9802 +#: utils/misc/guc.c:10070 utils/misc/guc.c:10104 #, c-format msgid "invalid value for parameter \"%s\": %d" msgstr "ungültiger Wert für Parameter »%s«: %d" -#: utils/misc/guc.c:9836 +#: utils/misc/guc.c:10138 #, c-format msgid "invalid value for parameter \"%s\": %g" msgstr "ungültiger Wert für Parameter »%s«: %g" -#: utils/misc/guc.c:10106 +#: utils/misc/guc.c:10408 #, c-format msgid "\"temp_buffers\" cannot be changed after any temporary tables have been accessed in the session." msgstr "»temp_buffers« kann nicht geändert werden, nachdem in der Sitzung auf temporäre Tabellen zugriffen wurde." -#: utils/misc/guc.c:10118 +#: utils/misc/guc.c:10420 #, c-format msgid "Bonjour is not supported by this build" msgstr "Bonjour wird von dieser Installation nicht unterstützt" -#: utils/misc/guc.c:10131 +#: utils/misc/guc.c:10433 #, c-format msgid "SSL is not supported by this build" msgstr "SSL wird von dieser Installation nicht unterstützt" -#: utils/misc/guc.c:10143 +#: utils/misc/guc.c:10445 #, c-format msgid "Cannot enable parameter when \"log_statement_stats\" is true." msgstr "Kann Parameter nicht einschalten, wenn »log_statement_stats« an ist." -#: utils/misc/guc.c:10155 +#: utils/misc/guc.c:10457 #, c-format msgid "Cannot enable \"log_statement_stats\" when \"log_parser_stats\", \"log_planner_stats\", or \"log_executor_stats\" is true." msgstr "Kann »log_statement_stats« nicht einschalten, wenn »log_parser_stats«, »log_planner_stats« oder »log_executor_stats« an ist." @@ -24205,23 +25943,28 @@ msgstr "Kann »log_statement_stats« nicht einschalten, wenn »log_parser_stats msgid "internal error: unrecognized run-time parameter type\n" msgstr "interner Fehler: unbekannter Parametertyp\n" -#: utils/misc/pg_config.c:61 +#: utils/misc/pg_config.c:60 #, c-format msgid "query-specified return tuple and function return type are not compatible" msgstr "in der Anfrage angegebenes Rückgabetupel und Rückgabetyp der Funktion sind nicht kompatibel" -#: utils/misc/pg_controldata.c:58 utils/misc/pg_controldata.c:138 -#: utils/misc/pg_controldata.c:244 utils/misc/pg_controldata.c:311 +#: utils/misc/pg_controldata.c:59 utils/misc/pg_controldata.c:137 +#: utils/misc/pg_controldata.c:241 utils/misc/pg_controldata.c:308 #, c-format msgid "calculated CRC checksum does not match value stored in file" msgstr "berechnete CRC-Prüfsumme stimmt nicht mit dem Wert in der Datei überein" -#: utils/misc/rls.c:128 +#: utils/misc/pg_rusage.c:64 +#, c-format +msgid "CPU: user: %d.%02d s, system: %d.%02d s, elapsed: %d.%02d s" +msgstr "CPU: Benutzer: %d,%02d s, System: %d,%02d s, verstrichen: %d,%02d s" + +#: utils/misc/rls.c:127 #, c-format msgid "query would be affected by row-level security policy for table \"%s\"" msgstr "Policy für Sicherheit auf Zeilenebene für Tabelle »%s« würde Auswirkung auf die Anfrage haben" -#: utils/misc/rls.c:130 +#: utils/misc/rls.c:129 #, c-format msgid "To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY." msgstr "Um die Policy für den Tabelleneigentümer zu deaktivieren, verwenden Sie ALTER TABLE NO FORCE ROW LEVEL SECURITY." @@ -24296,249 +26039,180 @@ msgstr "Zeile ist zu lang in Zeitzonendatei »%s«, Zeile %d" msgid "@INCLUDE without file name in time zone file \"%s\", line %d" msgstr "@INCLUDE ohne Dateiname in Zeitzonendatei »%s«, Zeile %d" -#: utils/mmgr/aset.c:405 +#: utils/mmgr/aset.c:483 utils/mmgr/generation.c:250 utils/mmgr/slab.c:240 #, c-format msgid "Failed while creating memory context \"%s\"." msgstr "Fehler während der Erzeugung des Speicherkontexts »%s«." -#: utils/mmgr/dsa.c:518 -#, fuzzy, c-format -#| msgid "could not set up XML error handler" -msgid "could not attach to dsa_handle" -msgstr "konnte XML-Fehlerbehandlung nicht einrichten" +#: utils/mmgr/dsa.c:518 utils/mmgr/dsa.c:1323 +#, c-format +msgid "could not attach to dynamic shared area" +msgstr "konnte nicht an dynamische Shared Area anbinden" -#: utils/mmgr/dsa.c:714 utils/mmgr/dsa.c:796 +#: utils/mmgr/mcxt.c:797 utils/mmgr/mcxt.c:833 utils/mmgr/mcxt.c:871 +#: utils/mmgr/mcxt.c:909 utils/mmgr/mcxt.c:945 utils/mmgr/mcxt.c:976 +#: utils/mmgr/mcxt.c:1012 utils/mmgr/mcxt.c:1064 utils/mmgr/mcxt.c:1099 +#: utils/mmgr/mcxt.c:1134 #, fuzzy, c-format #| msgid "Failed on request of size %zu." -msgid "Failed on DSA request of size %zu." -msgstr "Fehler bei Anfrage mit Größe %zu." - -#: utils/mmgr/mcxt.c:726 utils/mmgr/mcxt.c:761 utils/mmgr/mcxt.c:798 -#: utils/mmgr/mcxt.c:835 utils/mmgr/mcxt.c:869 utils/mmgr/mcxt.c:898 -#: utils/mmgr/mcxt.c:932 utils/mmgr/mcxt.c:983 utils/mmgr/mcxt.c:1017 -#: utils/mmgr/mcxt.c:1051 -#, c-format -msgid "Failed on request of size %zu." +msgid "Failed on request of size %zu in memory context \"%s\"." msgstr "Fehler bei Anfrage mit Größe %zu." -#: utils/mmgr/portalmem.c:186 +#: utils/mmgr/portalmem.c:187 #, c-format msgid "cursor \"%s\" already exists" msgstr "Cursor »%s« existiert bereits" -#: utils/mmgr/portalmem.c:190 +#: utils/mmgr/portalmem.c:191 #, c-format msgid "closing existing cursor \"%s\"" msgstr "bestehender Cursor »%s« wird geschlossen" -#: utils/mmgr/portalmem.c:394 +#: utils/mmgr/portalmem.c:398 #, c-format msgid "portal \"%s\" cannot be run" msgstr "Portal »%s« kann nicht ausgeführt werden" -#: utils/mmgr/portalmem.c:474 +#: utils/mmgr/portalmem.c:476 +#, fuzzy, c-format +#| msgid "cannot drop active portal \"%s\"" +msgid "cannot drop pinned portal \"%s\"" +msgstr "aktives Portal »%s« kann nicht gelöscht werden" + +#: utils/mmgr/portalmem.c:484 #, c-format msgid "cannot drop active portal \"%s\"" msgstr "aktives Portal »%s« kann nicht gelöscht werden" -#: utils/mmgr/portalmem.c:678 +#: utils/mmgr/portalmem.c:719 #, c-format msgid "cannot PREPARE a transaction that has created a cursor WITH HOLD" msgstr "PREPARE kann nicht in einer Transaktion ausgeführt werden, die einen Cursor mit WITH HOLD erzeugt hat" -#: utils/sort/logtape.c:252 +#: utils/mmgr/portalmem.c:1253 +#, c-format +msgid "cannot perform transaction commands inside a cursor loop that is not read-only" +msgstr "" + +#: utils/sort/logtape.c:276 #, c-format msgid "could not read block %ld of temporary file: %m" msgstr "konnte Block %ld von temporärer Datei nicht lesen: %m" -#: utils/sort/tuplesort.c:3056 +#: utils/sort/logtape.c:439 +#, fuzzy, c-format +#| msgid "could not open temporary file \"%s\": %s\n" +msgid "could not determine size of temporary file \"%s\"" +msgstr "konnte temporäre Datei »%s« nicht öffnen: %s\n" + +#: utils/sort/sharedtuplestore.c:208 +#, fuzzy, c-format +#| msgid "could not write to hash-join temporary file: %m" +msgid "could not write to temporary file: %m" +msgstr "konnte nicht in temporäre Datei für Hash-Verbund schreiben: %m" + +#: utils/sort/sharedtuplestore.c:437 utils/sort/sharedtuplestore.c:446 +#: utils/sort/sharedtuplestore.c:469 utils/sort/sharedtuplestore.c:486 +#: utils/sort/sharedtuplestore.c:503 utils/sort/sharedtuplestore.c:575 +#: utils/sort/sharedtuplestore.c:581 +#, fuzzy, c-format +#| msgid "could not read from tuplestore temporary file: %m" +msgid "could not read from shared tuplestore temporary file" +msgstr "konnte nicht aus temporärer Datei für Tuplestore lesen: %m" + +#: utils/sort/sharedtuplestore.c:492 +#, fuzzy, c-format +#| msgid "could not seek in tuplestore temporary file: %m" +msgid "unexpected chunk in shared tuplestore temporary file" +msgstr "konnte Positionszeiger in temporärer Datei für Tuplestore nicht setzen: %m" + +#: utils/sort/tuplesort.c:2967 #, c-format msgid "cannot have more than %d runs for an external sort" msgstr "ein externer Sortiervorgang kann nicht mehr als %d Durchgänge haben" -#: utils/sort/tuplesort.c:4125 +#: utils/sort/tuplesort.c:4051 #, c-format msgid "could not create unique index \"%s\"" msgstr "konnte Unique Index »%s« nicht erstellen" -#: utils/sort/tuplesort.c:4127 +#: utils/sort/tuplesort.c:4053 #, c-format msgid "Key %s is duplicated." msgstr "Schlüssel %s ist doppelt vorhanden." -#: utils/sort/tuplesort.c:4128 +#: utils/sort/tuplesort.c:4054 #, c-format msgid "Duplicate keys exist." msgstr "Es existieren doppelte Schlüssel." -#: utils/sort/tuplestore.c:515 utils/sort/tuplestore.c:525 -#: utils/sort/tuplestore.c:852 utils/sort/tuplestore.c:956 -#: utils/sort/tuplestore.c:1020 utils/sort/tuplestore.c:1037 -#: utils/sort/tuplestore.c:1239 utils/sort/tuplestore.c:1304 -#: utils/sort/tuplestore.c:1313 +#: utils/sort/tuplestore.c:518 utils/sort/tuplestore.c:528 +#: utils/sort/tuplestore.c:869 utils/sort/tuplestore.c:973 +#: utils/sort/tuplestore.c:1037 utils/sort/tuplestore.c:1054 +#: utils/sort/tuplestore.c:1256 utils/sort/tuplestore.c:1321 +#: utils/sort/tuplestore.c:1330 #, c-format msgid "could not seek in tuplestore temporary file: %m" msgstr "konnte Positionszeiger in temporärer Datei für Tuplestore nicht setzen: %m" -#: utils/sort/tuplestore.c:1460 utils/sort/tuplestore.c:1533 -#: utils/sort/tuplestore.c:1539 +#: utils/sort/tuplestore.c:1477 utils/sort/tuplestore.c:1550 +#: utils/sort/tuplestore.c:1556 #, c-format msgid "could not read from tuplestore temporary file: %m" msgstr "konnte nicht aus temporärer Datei für Tuplestore lesen: %m" -#: utils/sort/tuplestore.c:1501 utils/sort/tuplestore.c:1506 -#: utils/sort/tuplestore.c:1512 +#: utils/sort/tuplestore.c:1518 utils/sort/tuplestore.c:1523 +#: utils/sort/tuplestore.c:1529 #, c-format msgid "could not write to tuplestore temporary file: %m" msgstr "konnte nicht in temporäre Datei für Tuplestore schreiben: %m" -#: utils/time/snapmgr.c:618 +#: utils/time/snapmgr.c:622 #, c-format msgid "The source transaction is not running anymore." msgstr "Die Quelltransaktion läuft nicht mehr." -#: utils/time/snapmgr.c:1190 +#: utils/time/snapmgr.c:1200 #, c-format msgid "cannot export a snapshot from a subtransaction" msgstr "aus einer Subtransaktion kann kein Snapshot exportiert werden" -#: utils/time/snapmgr.c:1339 utils/time/snapmgr.c:1344 -#: utils/time/snapmgr.c:1349 utils/time/snapmgr.c:1364 -#: utils/time/snapmgr.c:1369 utils/time/snapmgr.c:1374 -#: utils/time/snapmgr.c:1473 utils/time/snapmgr.c:1489 -#: utils/time/snapmgr.c:1514 +#: utils/time/snapmgr.c:1359 utils/time/snapmgr.c:1364 +#: utils/time/snapmgr.c:1369 utils/time/snapmgr.c:1384 +#: utils/time/snapmgr.c:1389 utils/time/snapmgr.c:1394 +#: utils/time/snapmgr.c:1409 utils/time/snapmgr.c:1414 +#: utils/time/snapmgr.c:1419 utils/time/snapmgr.c:1519 +#: utils/time/snapmgr.c:1535 utils/time/snapmgr.c:1560 #, c-format msgid "invalid snapshot data in file \"%s\"" msgstr "ungültige Snapshot-Daten in Datei »%s«" -#: utils/time/snapmgr.c:1411 +#: utils/time/snapmgr.c:1456 #, c-format msgid "SET TRANSACTION SNAPSHOT must be called before any query" msgstr "SET TRANSACTION SNAPSHOT muss vor allen Anfragen aufgerufen werden" -#: utils/time/snapmgr.c:1420 +#: utils/time/snapmgr.c:1465 #, c-format msgid "a snapshot-importing transaction must have isolation level SERIALIZABLE or REPEATABLE READ" msgstr "eine Snapshot-importierende Transaktion muss Isolationsgrad SERIALIZABLE oder REPEATABLE READ haben" -#: utils/time/snapmgr.c:1429 utils/time/snapmgr.c:1438 +#: utils/time/snapmgr.c:1474 utils/time/snapmgr.c:1483 #, c-format msgid "invalid snapshot identifier: \"%s\"" msgstr "ungültiger Snapshot-Bezeichner: »%s«" -#: utils/time/snapmgr.c:1527 +#: utils/time/snapmgr.c:1573 #, c-format msgid "a serializable transaction cannot import a snapshot from a non-serializable transaction" msgstr "eine serialisierbare Transaktion kann keinen Snapshot aus einer nicht-serialisierbaren Transaktion importieren" -#: utils/time/snapmgr.c:1531 +#: utils/time/snapmgr.c:1577 #, c-format msgid "a non-read-only serializable transaction cannot import a snapshot from a read-only transaction" msgstr "eine serialisierbare Transaktion, die nicht im Read-Only-Modus ist, kann keinen Snapshot aus einer Read-Only-Transaktion importieren" -#: utils/time/snapmgr.c:1546 +#: utils/time/snapmgr.c:1592 #, c-format msgid "cannot import a snapshot from a different database" msgstr "kann keinen Snapshot aus einer anderen Datenbank importieren" - -#~ msgid "" -#~ "WARNING: Calculated CRC checksum does not match value stored in file.\n" -#~ "Either the file is corrupt, or it has a different layout than this program\n" -#~ "is expecting. The results below are untrustworthy.\n" -#~ "\n" -#~ msgstr "" -#~ "WARNUNG: Berechnete CRC-Prüfsumme stimmt nicht mit dem Wert in der Datei\n" -#~ "überein. Entweder ist die Datei kaputt oder sie hat ein anderes Layout\n" -#~ "als von diesem Programm erwartet. Die Ergebnisse unten sind nicht\n" -#~ "verlässlich.\n" -#~ "\n" - -#~ msgid "Proceeding with relation creation anyway." -#~ msgstr "Relation wird trotzdem erzeugt." - -#~ msgid "default expression must not return a set" -#~ msgstr "Vorgabeausdruck kann keine Ergebnismenge zurückgeben" - -#~ msgid "changing return type of function %s from \"opaque\" to \"language_handler\"" -#~ msgstr "ändere Rückgabetyp von Funktion %s von »opaque« in »language_handler«" - -#~ msgid "changing return type of function %s from \"opaque\" to \"trigger\"" -#~ msgstr "ändere Rückgabetyp von Funktion %s von »opaque« in »trigger«" - -#~ msgid "functions and operators can take at most one set argument" -#~ msgstr "Funktionen und Operatoren können höchstens ein Mengenargument haben" - -#~ msgid "IS DISTINCT FROM does not support set arguments" -#~ msgstr "IS DISTINCT FROM unterstützt keine Mengenargumente" - -#~ msgid "op ANY/ALL (array) does not support set arguments" -#~ msgstr "op ANY/ALL (array) unterstützt keine Mengenargumente" - -#~ msgid "NULLIF does not support set arguments" -#~ msgstr "NULLIF unterstützt keine Mengenargumente" - -#~ msgid "hostssl requires SSL to be turned on" -#~ msgstr "für hostssl muss SSL angeschaltet sein" - -#~ msgid "could not create %s socket: %m" -#~ msgstr "konnte %s-Socket nicht erstellen: %m" - -#~ msgid "could not bind %s socket: %m" -#~ msgstr "konnte %s-Socket nicht binden: %m" - -#~ msgid "could not listen on %s socket: %m" -#~ msgstr "konnte nicht auf %s-Socket hören: %m" - -#~ msgid "WHERE CURRENT OF is not supported on a view with no underlying relation" -#~ msgstr "WHERE CURRENT OF wird nicht unterstützt für Sichten ohne zugrundeliegende Relation" - -#~ msgid "WHERE CURRENT OF is not supported on a view with more than one underlying relation" -#~ msgstr "WHERE CURRENT OF wird nicht unterstützt für Sichten mit mehr als einer zugrundeliegenden Relation" - -#~ msgid "WHERE CURRENT OF is not supported on a view with grouping or aggregation" -#~ msgstr "WHERE CURRENT OF wird nicht unterstützt für Sichten mit Gruppierung oder Aggregierung" - -#~ msgid "DEFAULT can only appear in a VALUES list within INSERT" -#~ msgstr "DEFAULT kann nur in VALUES-Liste innerhalb von INSERT auftreten" - -#~ msgid "argument of %s must be type boolean, not type %s" -#~ msgstr "Argument von %s muss Typ boolean haben, nicht Typ %s" - -#~ msgid "argument declared \"anyrange\" is not consistent with argument declared \"anyelement\"" -#~ msgstr "als »anyrange« deklariertes Argument ist nicht mit als »anyelement« deklariertem Argument konsistent" - -#~ msgid "index expression cannot return a set" -#~ msgstr "Indexausdruck kann keine Ergebnismenge zurückgeben" - -#~ msgid "transform expression must not return a set" -#~ msgstr "Umwandlungsausdruck kann keine Ergebnismenge zurückgeben" - -#~ msgid "autovacuum: found orphan temp table \"%s\".\"%s\" in database \"%s\"" -#~ msgstr "Autovacuum: verwaiste temporäre Tabelle »%s.%s« in Datenbank »%s« gefunden" - -#~ msgid "invalid socket: %s" -#~ msgstr "ungültiges Socket: %s" - -#~ msgid "rule \"%s\" does not exist" -#~ msgstr "Regel »%s« existiert nicht" - -#~ msgid "there are multiple rules named \"%s\"" -#~ msgstr "es gibt mehrere Regeln namens »%s«" - -#~ msgid "Specify a relation name as well as a rule name." -#~ msgstr "Geben Sie einen Relationsnamen und einen Regelnamen an." - -#~ msgid "not enough shared memory for elements of data structure \"%s\" (%zu bytes requested)" -#~ msgstr "nicht genug Shared-Memory für Elemente der Datenstruktur »%s« (%zu Bytes angefordert)" - -#~ msgid "corrupted item pointer: offset = %u, length = %u" -#~ msgstr "verfälschter Item-Zeiger: offset = %u, length = %u" - -#~ msgid "\"TZ\"/\"tz\"/\"OF\" format patterns are not supported in to_date" -#~ msgstr "Formatmuster »TZ«/»tz«/»OF« werden in to_date nicht unterstützt" - -#~ msgid "invalid input syntax for integer: \"%s\"" -#~ msgstr "ungültige Eingabesyntax für ganze Zahl: »%s«" - -#~ msgid "Causes subtables to be included by default in various commands." -#~ msgstr "Schließt abgeleitete Tabellen in diverse Befehle automatisch ein." diff --git a/src/backend/po/fr.po b/src/backend/po/fr.po index a375bc57cb..d77cd32c34 100644 --- a/src/backend/po/fr.po +++ b/src/backend/po/fr.po @@ -8,8 +8,8 @@ msgid "" msgstr "" "Project-Id-Version: PostgreSQL 9.6\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-08-04 02:39+0000\n" -"PO-Revision-Date: 2017-08-04 20:59+0200\n" +"POT-Creation-Date: 2018-02-23 04:09+0000\n" +"PO-Revision-Date: 2018-02-23 18:27+0100\n" "Last-Translator: Guillaume Lelarge \n" "Language-Team: French \n" "Language: fr\n" @@ -17,13 +17,13 @@ msgstr "" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" -"X-Generator: Poedit 2.0.2\n" +"X-Generator: Poedit 2.0.3\n" #: ../common/config_info.c:130 ../common/config_info.c:138 ../common/config_info.c:146 ../common/config_info.c:154 ../common/config_info.c:162 ../common/config_info.c:170 ../common/config_info.c:178 ../common/config_info.c:186 ../common/config_info.c:194 msgid "not recorded" msgstr "non enregistré" -#: ../common/controldata_utils.c:57 commands/copy.c:3117 commands/extension.c:3330 utils/adt/genfile.c:135 +#: ../common/controldata_utils.c:57 commands/copy.c:3145 commands/extension.c:3330 utils/adt/genfile.c:135 #, c-format msgid "could not open file \"%s\" for reading: %m" msgstr "n'a pas pu ouvrir le fichier « %s » pour une lecture : %m" @@ -33,8 +33,8 @@ msgstr "n'a pas pu ouvrir le fichier « %s » pour une lecture : %m" msgid "%s: could not open file \"%s\" for reading: %s\n" msgstr "%s : n'a pas pu ouvrir le fichier « %s » en lecture : %s\n" -#: ../common/controldata_utils.c:71 access/transam/timeline.c:348 access/transam/xlog.c:3384 access/transam/xlog.c:10787 access/transam/xlog.c:10800 access/transam/xlog.c:11192 access/transam/xlog.c:11235 access/transam/xlog.c:11274 access/transam/xlog.c:11317 access/transam/xlogfuncs.c:668 access/transam/xlogfuncs.c:687 commands/extension.c:3340 libpq/hba.c:499 replication/logical/origin.c:661 replication/logical/origin.c:691 -#: replication/logical/reorderbuffer.c:3064 replication/walsender.c:506 storage/file/copydir.c:178 utils/adt/genfile.c:152 utils/adt/misc.c:924 +#: ../common/controldata_utils.c:71 access/transam/timeline.c:348 access/transam/xlog.c:3384 access/transam/xlog.c:10802 access/transam/xlog.c:10815 access/transam/xlog.c:11232 access/transam/xlog.c:11275 access/transam/xlog.c:11314 access/transam/xlog.c:11357 access/transam/xlogfuncs.c:668 access/transam/xlogfuncs.c:687 commands/extension.c:3340 libpq/hba.c:499 replication/logical/origin.c:702 replication/logical/origin.c:732 +#: replication/logical/reorderbuffer.c:3079 replication/walsender.c:507 storage/file/copydir.c:204 utils/adt/genfile.c:152 utils/adt/misc.c:924 #, c-format msgid "could not read file \"%s\": %m" msgstr "n'a pas pu lire le fichier « %s » : %m" @@ -152,9 +152,9 @@ msgstr "n'a pas pu lire le répertoire « %s » : %s\n" msgid "could not close directory \"%s\": %s\n" msgstr "n'a pas pu fermer le répertoire « %s » : %s\n" -#: ../common/psprintf.c:179 ../port/path.c:630 ../port/path.c:668 ../port/path.c:685 access/transam/twophase.c:1306 access/transam/xlog.c:6355 lib/stringinfo.c:258 libpq/auth.c:1108 libpq/auth.c:1474 libpq/auth.c:1542 libpq/auth.c:2058 postmaster/bgworker.c:337 postmaster/bgworker.c:908 postmaster/postmaster.c:2391 postmaster/postmaster.c:2413 postmaster/postmaster.c:3975 postmaster/postmaster.c:4683 postmaster/postmaster.c:4758 -#: postmaster/postmaster.c:5436 postmaster/postmaster.c:5773 replication/libpqwalreceiver/libpqwalreceiver.c:251 replication/logical/logical.c:170 storage/buffer/localbuf.c:436 storage/file/fd.c:773 storage/file/fd.c:1201 storage/file/fd.c:1319 storage/file/fd.c:2044 storage/ipc/procarray.c:1057 storage/ipc/procarray.c:1545 storage/ipc/procarray.c:1552 storage/ipc/procarray.c:1969 storage/ipc/procarray.c:2580 utils/adt/formatting.c:1579 -#: utils/adt/formatting.c:1703 utils/adt/formatting.c:1828 utils/adt/pg_locale.c:468 utils/adt/pg_locale.c:652 utils/adt/regexp.c:219 utils/adt/varlena.c:4585 utils/adt/varlena.c:4606 utils/fmgr/dfmgr.c:221 utils/hash/dynahash.c:444 utils/hash/dynahash.c:553 utils/hash/dynahash.c:1065 utils/mb/mbutils.c:376 utils/mb/mbutils.c:709 utils/misc/guc.c:3998 utils/misc/guc.c:4014 utils/misc/guc.c:4027 utils/misc/guc.c:6976 utils/misc/tzparser.c:468 +#: ../common/psprintf.c:179 ../port/path.c:630 ../port/path.c:668 ../port/path.c:685 access/transam/twophase.c:1306 access/transam/xlog.c:6363 lib/stringinfo.c:258 libpq/auth.c:1126 libpq/auth.c:1492 libpq/auth.c:1560 libpq/auth.c:2076 postmaster/bgworker.c:337 postmaster/bgworker.c:908 postmaster/postmaster.c:2439 postmaster/postmaster.c:2461 postmaster/postmaster.c:4023 postmaster/postmaster.c:4731 postmaster/postmaster.c:4806 +#: postmaster/postmaster.c:5484 postmaster/postmaster.c:5821 replication/libpqwalreceiver/libpqwalreceiver.c:256 replication/logical/logical.c:170 storage/buffer/localbuf.c:436 storage/file/fd.c:772 storage/file/fd.c:1200 storage/file/fd.c:1318 storage/file/fd.c:2049 storage/ipc/procarray.c:1058 storage/ipc/procarray.c:1546 storage/ipc/procarray.c:1553 storage/ipc/procarray.c:1970 storage/ipc/procarray.c:2581 utils/adt/formatting.c:1579 +#: utils/adt/formatting.c:1703 utils/adt/formatting.c:1828 utils/adt/pg_locale.c:468 utils/adt/pg_locale.c:652 utils/adt/regexp.c:219 utils/adt/varlena.c:4589 utils/adt/varlena.c:4610 utils/fmgr/dfmgr.c:221 utils/hash/dynahash.c:444 utils/hash/dynahash.c:553 utils/hash/dynahash.c:1065 utils/mb/mbutils.c:376 utils/mb/mbutils.c:709 utils/misc/guc.c:3998 utils/misc/guc.c:4014 utils/misc/guc.c:4027 utils/misc/guc.c:6976 utils/misc/tzparser.c:468 #: utils/mmgr/aset.c:404 utils/mmgr/dsa.c:713 utils/mmgr/dsa.c:795 utils/mmgr/mcxt.c:725 utils/mmgr/mcxt.c:760 utils/mmgr/mcxt.c:797 utils/mmgr/mcxt.c:834 utils/mmgr/mcxt.c:868 utils/mmgr/mcxt.c:897 utils/mmgr/mcxt.c:931 utils/mmgr/mcxt.c:982 utils/mmgr/mcxt.c:1016 utils/mmgr/mcxt.c:1050 #, c-format msgid "out of memory" @@ -173,7 +173,7 @@ msgstr "Les noms de fork valides sont « main », « fsm », « vm » et « init #: ../common/restricted_token.c:68 #, c-format msgid "%s: WARNING: cannot create restricted tokens on this platform\n" -msgstr "%s : ATTENTION : ne peut pas crér les jetons restreints sur cette plateforme\n" +msgstr "%s : ATTENTION : ne peut pas créer les jetons restreints sur cette plateforme\n" #: ../common/restricted_token.c:77 #, c-format @@ -227,7 +227,7 @@ msgstr "mot de passe trop long" msgid "could not look up effective user ID %ld: %s" msgstr "n'a pas pu trouver l'identifiant réel %ld de l'utilisateur : %s" -#: ../common/username.c:45 libpq/auth.c:2005 +#: ../common/username.c:45 libpq/auth.c:2023 msgid "user does not exist" msgstr "l'utilisateur n'existe pas" @@ -368,7 +368,7 @@ msgstr "« %s » n'est pas un index BRIN" msgid "could not open parent table of index %s" msgstr "n'a pas pu ouvrir la table parent de l'index %s" -#: access/brin/brin_pageops.c:76 access/brin/brin_pageops.c:358 access/brin/brin_pageops.c:824 access/gin/ginentrypage.c:110 access/gist/gist.c:1363 access/nbtree/nbtinsert.c:577 access/nbtree/nbtsort.c:488 access/spgist/spgdoinsert.c:1933 +#: access/brin/brin_pageops.c:76 access/brin/brin_pageops.c:364 access/brin/brin_pageops.c:830 access/gin/ginentrypage.c:110 access/gist/gist.c:1364 access/nbtree/nbtinsert.c:577 access/nbtree/nbtsort.c:488 access/spgist/spgdoinsert.c:1933 #, c-format msgid "index row size %zu exceeds maximum %zu for index \"%s\"" msgstr "la taille de la ligne index, %zu, dépasse le maximum, %zu, pour l'index « %s »" @@ -542,7 +542,7 @@ msgstr "L'attribut « %s » du type %s ne correspond pas à l'attribut correspon msgid "Attribute \"%s\" of type %s does not exist in type %s." msgstr "L'attribut « %s » du type %s n'existe pas dans le type %s." -#: access/common/tupdesc.c:728 parser/parse_clause.c:841 parser/parse_relation.c:1544 +#: access/common/tupdesc.c:728 parser/parse_clause.c:812 parser/parse_relation.c:1538 #, c-format msgid "column \"%s\" cannot be declared SETOF" msgstr "la colonne « %s » ne peut pas être déclarée SETOF" @@ -557,22 +557,22 @@ msgstr "la posting list est trop longue" msgid "Reduce maintenance_work_mem." msgstr "Réduisez le maintenance_work_mem." -#: access/gin/ginfast.c:991 access/transam/xlog.c:10208 access/transam/xlog.c:10726 access/transam/xlogfuncs.c:296 access/transam/xlogfuncs.c:323 access/transam/xlogfuncs.c:362 access/transam/xlogfuncs.c:383 access/transam/xlogfuncs.c:404 access/transam/xlogfuncs.c:474 access/transam/xlogfuncs.c:530 +#: access/gin/ginfast.c:995 access/transam/xlog.c:10216 access/transam/xlog.c:10741 access/transam/xlogfuncs.c:296 access/transam/xlogfuncs.c:323 access/transam/xlogfuncs.c:362 access/transam/xlogfuncs.c:383 access/transam/xlogfuncs.c:404 access/transam/xlogfuncs.c:474 access/transam/xlogfuncs.c:530 #, c-format msgid "recovery is in progress" msgstr "restauration en cours" -#: access/gin/ginfast.c:992 +#: access/gin/ginfast.c:996 #, c-format msgid "GIN pending list cannot be cleaned up during recovery." msgstr "la pending list GIN ne peut pas être nettoyée lors de la restauration" -#: access/gin/ginfast.c:999 +#: access/gin/ginfast.c:1003 #, c-format msgid "\"%s\" is not a GIN index" msgstr "« %s » n'est pas un index GIN" -#: access/gin/ginfast.c:1010 +#: access/gin/ginfast.c:1014 #, c-format msgid "cannot access temporary indexes of other sessions" msgstr "ne peut pas accéder aux index temporaires d'autres sessions" @@ -606,19 +606,19 @@ msgstr "" msgid "operator class \"%s\" of access method %s is missing support function %d or %d" msgstr "la classe d'opérateur « %s » de la méthode d'accès %s nécessite la fonction de support manquante %d ou %d" -#: access/gist/gist.c:706 access/gist/gistvacuum.c:258 +#: access/gist/gist.c:707 access/gist/gistvacuum.c:258 #, c-format msgid "index \"%s\" contains an inner tuple marked as invalid" msgstr "l'index « %s » contient une ligne interne marquée comme invalide" -#: access/gist/gist.c:708 access/gist/gistvacuum.c:260 +#: access/gist/gist.c:709 access/gist/gistvacuum.c:260 #, c-format msgid "This is caused by an incomplete page split at crash recovery before upgrading to PostgreSQL 9.1." msgstr "" "Ceci est dû à la division d'une page incomplète à la restauration suite à un\n" "crash avant la mise à jour en 9.1." -#: access/gist/gist.c:709 access/gist/gistutil.c:739 access/gist/gistutil.c:750 access/gist/gistvacuum.c:261 access/hash/hashutil.c:241 access/hash/hashutil.c:252 access/hash/hashutil.c:264 access/hash/hashutil.c:285 access/nbtree/nbtpage.c:519 access/nbtree/nbtpage.c:530 +#: access/gist/gist.c:710 access/gist/gistutil.c:739 access/gist/gistutil.c:750 access/gist/gistvacuum.c:261 access/hash/hashutil.c:241 access/hash/hashutil.c:252 access/hash/hashutil.c:264 access/hash/hashutil.c:285 access/nbtree/nbtpage.c:519 access/nbtree/nbtpage.c:530 #, c-format msgid "Please REINDEX it." msgstr "Merci d'exécuter REINDEX sur cet objet." @@ -727,7 +727,7 @@ msgstr "il manque un opérateur inter-type pour la famille d'opérateur « %s » msgid "\"%s\" is an index" msgstr "« %s » est un index" -#: access/heap/heapam.c:1298 access/heap/heapam.c:1326 access/heap/heapam.c:1358 catalog/aclchk.c:1779 commands/tablecmds.c:9885 commands/tablecmds.c:13115 +#: access/heap/heapam.c:1298 access/heap/heapam.c:1326 access/heap/heapam.c:1358 catalog/aclchk.c:1779 commands/tablecmds.c:9912 commands/tablecmds.c:13142 #, c-format msgid "\"%s\" is a composite type" msgstr "« %s » est un type composite" @@ -747,7 +747,7 @@ msgstr "ne peut pas supprimer les lignes lors d'une opération parallèle" msgid "attempted to delete invisible tuple" msgstr "a tenté de supprimer la ligne invisible" -#: access/heap/heapam.c:3514 access/heap/heapam.c:6247 +#: access/heap/heapam.c:3514 access/heap/heapam.c:6248 #, c-format msgid "cannot update tuples during a parallel operation" msgstr "ne peut pas mettre à jour les lignes lors d'une opération parallèle" @@ -757,55 +757,55 @@ msgstr "ne peut pas mettre à jour les lignes lors d'une opération parallèle" msgid "attempted to update invisible tuple" msgstr "a tenté de mettre à jour la ligne invisible" -#: access/heap/heapam.c:4937 access/heap/heapam.c:4975 access/heap/heapam.c:5227 executor/execMain.c:2602 +#: access/heap/heapam.c:4938 access/heap/heapam.c:4976 access/heap/heapam.c:5228 executor/execMain.c:2631 #, c-format msgid "could not obtain lock on row in relation \"%s\"" msgstr "n'a pas pu obtenir un verrou sur la relation « %s »" -#: access/heap/hio.c:322 access/heap/rewriteheap.c:666 +#: access/heap/hio.c:322 access/heap/rewriteheap.c:669 #, c-format msgid "row is too big: size %zu, maximum size %zu" msgstr "la ligne est trop grande : taille %zu, taille maximale %zu" -#: access/heap/rewriteheap.c:926 +#: access/heap/rewriteheap.c:929 #, c-format msgid "could not write to file \"%s\", wrote %d of %d: %m" msgstr "n'a pas pu écrire le fichier « %s », a écrit %d de %d : %m" -#: access/heap/rewriteheap.c:966 access/heap/rewriteheap.c:1183 access/heap/rewriteheap.c:1282 access/transam/timeline.c:412 access/transam/timeline.c:492 access/transam/xlog.c:3249 access/transam/xlog.c:3417 replication/logical/snapbuild.c:1630 replication/slot.c:1290 replication/slot.c:1377 storage/file/fd.c:631 storage/file/fd.c:3180 storage/smgr/md.c:1044 storage/smgr/md.c:1277 storage/smgr/md.c:1450 utils/misc/guc.c:6998 +#: access/heap/rewriteheap.c:969 access/heap/rewriteheap.c:1186 access/heap/rewriteheap.c:1285 access/transam/timeline.c:412 access/transam/timeline.c:492 access/transam/xlog.c:3249 access/transam/xlog.c:3417 replication/logical/snapbuild.c:1630 replication/slot.c:1291 replication/slot.c:1378 storage/file/fd.c:630 storage/file/fd.c:3202 storage/smgr/md.c:1044 storage/smgr/md.c:1277 storage/smgr/md.c:1450 utils/misc/guc.c:6998 #, c-format msgid "could not fsync file \"%s\": %m" msgstr "n'a pas pu synchroniser sur disque (fsync) le fichier « %s » : %m" -#: access/heap/rewriteheap.c:1021 access/heap/rewriteheap.c:1141 access/transam/timeline.c:315 access/transam/timeline.c:467 access/transam/xlog.c:3202 access/transam/xlog.c:3355 access/transam/xlog.c:10543 access/transam/xlog.c:10581 access/transam/xlog.c:10966 postmaster/postmaster.c:4450 replication/logical/origin.c:535 replication/slot.c:1242 storage/file/copydir.c:162 storage/smgr/md.c:327 utils/time/snapmgr.c:1297 +#: access/heap/rewriteheap.c:1024 access/heap/rewriteheap.c:1144 access/transam/timeline.c:315 access/transam/timeline.c:467 access/transam/xlog.c:3202 access/transam/xlog.c:3355 access/transam/xlog.c:10551 access/transam/xlog.c:10589 access/transam/xlog.c:10993 postmaster/postmaster.c:4498 replication/logical/origin.c:576 replication/slot.c:1243 storage/file/copydir.c:176 storage/smgr/md.c:327 utils/time/snapmgr.c:1297 #, c-format msgid "could not create file \"%s\": %m" msgstr "n'a pas pu créer le fichier « %s » : %m" -#: access/heap/rewriteheap.c:1151 +#: access/heap/rewriteheap.c:1154 #, c-format msgid "could not truncate file \"%s\" to %u: %m" msgstr "n'a pas pu tronquer le fichier « %s » en %u : %m" -#: access/heap/rewriteheap.c:1159 replication/walsender.c:486 storage/smgr/md.c:1949 +#: access/heap/rewriteheap.c:1162 replication/walsender.c:487 storage/smgr/md.c:1949 #, c-format msgid "could not seek to end of file \"%s\": %m" msgstr "n'a pas pu trouver la fin du fichier « %s » : %m" -#: access/heap/rewriteheap.c:1171 access/transam/timeline.c:370 access/transam/timeline.c:405 access/transam/timeline.c:484 access/transam/xlog.c:3238 access/transam/xlog.c:3408 postmaster/postmaster.c:4460 postmaster/postmaster.c:4470 replication/logical/origin.c:544 replication/logical/origin.c:583 replication/logical/origin.c:599 replication/logical/snapbuild.c:1612 replication/slot.c:1273 storage/file/copydir.c:191 +#: access/heap/rewriteheap.c:1174 access/transam/timeline.c:370 access/transam/timeline.c:405 access/transam/timeline.c:484 access/transam/xlog.c:3238 access/transam/xlog.c:3408 postmaster/postmaster.c:4508 postmaster/postmaster.c:4518 replication/logical/origin.c:585 replication/logical/origin.c:624 replication/logical/origin.c:640 replication/logical/snapbuild.c:1612 replication/slot.c:1274 storage/file/copydir.c:217 #: utils/init/miscinit.c:1249 utils/init/miscinit.c:1260 utils/init/miscinit.c:1268 utils/misc/guc.c:6959 utils/misc/guc.c:6990 utils/misc/guc.c:8840 utils/misc/guc.c:8854 utils/time/snapmgr.c:1302 utils/time/snapmgr.c:1309 #, c-format msgid "could not write to file \"%s\": %m" msgstr "n'a pas pu écrire dans le fichier « %s » : %m" -#: access/heap/rewriteheap.c:1257 access/transam/xlogarchive.c:113 access/transam/xlogarchive.c:467 postmaster/postmaster.c:1257 postmaster/syslogger.c:1371 replication/logical/origin.c:522 replication/logical/reorderbuffer.c:2595 replication/logical/reorderbuffer.c:2652 replication/logical/snapbuild.c:1560 replication/logical/snapbuild.c:1936 replication/slot.c:1350 storage/file/fd.c:682 storage/ipc/dsm.c:327 storage/smgr/md.c:426 +#: access/heap/rewriteheap.c:1260 access/transam/xlogarchive.c:113 access/transam/xlogarchive.c:467 postmaster/postmaster.c:1259 postmaster/syslogger.c:1371 replication/logical/origin.c:563 replication/logical/reorderbuffer.c:2610 replication/logical/reorderbuffer.c:2667 replication/logical/snapbuild.c:1560 replication/logical/snapbuild.c:1936 replication/slot.c:1351 storage/file/fd.c:681 storage/ipc/dsm.c:327 storage/smgr/md.c:426 #: storage/smgr/md.c:475 storage/smgr/md.c:1397 #, c-format msgid "could not remove file \"%s\": %m" msgstr "n'a pas pu supprimer le fichier « %s » : %m" -#: access/heap/rewriteheap.c:1271 access/transam/timeline.c:111 access/transam/timeline.c:236 access/transam/timeline.c:334 access/transam/xlog.c:3178 access/transam/xlog.c:3299 access/transam/xlog.c:3340 access/transam/xlog.c:3619 access/transam/xlog.c:3697 access/transam/xlogutils.c:706 postmaster/syslogger.c:1380 replication/basebackup.c:474 replication/basebackup.c:1218 replication/logical/origin.c:654 -#: replication/logical/reorderbuffer.c:2112 replication/logical/reorderbuffer.c:2361 replication/logical/reorderbuffer.c:3044 replication/logical/snapbuild.c:1604 replication/logical/snapbuild.c:1692 replication/slot.c:1365 replication/walsender.c:479 replication/walsender.c:2385 storage/file/copydir.c:155 storage/file/fd.c:614 storage/file/fd.c:3092 storage/file/fd.c:3159 storage/smgr/md.c:608 utils/error/elog.c:1879 +#: access/heap/rewriteheap.c:1274 access/transam/timeline.c:111 access/transam/timeline.c:236 access/transam/timeline.c:334 access/transam/xlog.c:3178 access/transam/xlog.c:3299 access/transam/xlog.c:3340 access/transam/xlog.c:3619 access/transam/xlog.c:3697 access/transam/xlogutils.c:706 postmaster/syslogger.c:1380 replication/basebackup.c:475 replication/basebackup.c:1219 replication/logical/origin.c:695 +#: replication/logical/reorderbuffer.c:2127 replication/logical/reorderbuffer.c:2376 replication/logical/reorderbuffer.c:3059 replication/logical/snapbuild.c:1604 replication/logical/snapbuild.c:1692 replication/slot.c:1366 replication/walsender.c:480 replication/walsender.c:2400 storage/file/copydir.c:169 storage/file/fd.c:613 storage/file/fd.c:3114 storage/file/fd.c:3181 storage/smgr/md.c:608 utils/error/elog.c:1879 #: utils/init/miscinit.c:1173 utils/init/miscinit.c:1308 utils/init/miscinit.c:1385 utils/misc/guc.c:7218 utils/misc/guc.c:7251 #, c-format msgid "could not open file \"%s\": %m" @@ -821,7 +821,7 @@ msgstr "la méthode d'accès « %s » n'est pas de type %s" msgid "index access method \"%s\" does not have a handler" msgstr "la méthode d'accès « %s » n'a pas de handler" -#: access/index/indexam.c:160 catalog/objectaddress.c:1222 commands/indexcmds.c:1819 commands/tablecmds.c:247 commands/tablecmds.c:13106 +#: access/index/indexam.c:160 catalog/objectaddress.c:1222 commands/indexcmds.c:1822 commands/tablecmds.c:247 commands/tablecmds.c:13133 #, c-format msgid "\"%s\" is not an index" msgstr "« %s » n'est pas un index" @@ -857,7 +857,7 @@ msgstr "" "Utilisez un index sur le hachage MD5 de la valeur ou passez à l'indexation\n" "de la recherche plein texte." -#: access/nbtree/nbtpage.c:169 access/nbtree/nbtpage.c:372 access/nbtree/nbtpage.c:459 parser/parse_utilcmd.c:1900 +#: access/nbtree/nbtpage.c:169 access/nbtree/nbtpage.c:372 access/nbtree/nbtpage.c:459 parser/parse_utilcmd.c:1921 #, c-format msgid "index \"%s\" is not a btree" msgstr "l'index « %s » n'est pas un btree" @@ -993,7 +993,7 @@ msgstr "le MultiXactId %u n'existe plus - wraparound apparent" #: access/transam/multixact.c:1285 #, c-format msgid "MultiXactId %u has not been created yet -- apparent wraparound" -msgstr "le MultiXactId %u n'a pas encore été créer : wraparound apparent" +msgstr "le MultiXactId %u n'a pas encore été créé : wraparound apparent" #: access/transam/multixact.c:2268 #, c-format @@ -1028,7 +1028,7 @@ msgstr "Les protections sur la réutilisation d'un membre MultiXact sont mainten #: access/transam/multixact.c:2631 #, c-format msgid "MultiXact member stop limit is now %u based on MultiXact %u" -msgstr "La limite d'arrêt d'un membre MultiXact est maintenant %x, base sur le MultiXact %u" +msgstr "La limite d'arrêt d'un membre MultiXact est maintenant %u, basée sur le MultiXact %u" #: access/transam/multixact.c:3011 #, c-format @@ -1045,26 +1045,36 @@ msgstr "ne peut pas tronquer jusqu'au MutiXact %u car il n'existe pas sur disque msgid "invalid MultiXactId: %u" msgstr "MultiXactId invalide : %u" -#: access/transam/parallel.c:577 +#: access/transam/parallel.c:604 +#, c-format +msgid "parallel worker failed to initialize" +msgstr "échec de l'initialisation du worker parallèle" + +#: access/transam/parallel.c:605 +#, c-format +msgid "More details may be available in the server log." +msgstr "Plus de détails sont disponibles dans les traces du serveur." + +#: access/transam/parallel.c:666 #, c-format msgid "postmaster exited during a parallel transaction" msgstr "postmaster a quitté pendant une transaction parallèle" -#: access/transam/parallel.c:764 +#: access/transam/parallel.c:853 #, c-format msgid "lost connection to parallel worker" msgstr "perte de la connexion au processus parallèle" -#: access/transam/parallel.c:823 access/transam/parallel.c:825 +#: access/transam/parallel.c:915 access/transam/parallel.c:917 msgid "parallel worker" msgstr "processus parallèle" -#: access/transam/parallel.c:968 +#: access/transam/parallel.c:1060 #, c-format msgid "could not map dynamic shared memory segment" msgstr "n'a pas pu mapper le segment de mémoire partagée dynamique" -#: access/transam/parallel.c:973 +#: access/transam/parallel.c:1065 #, c-format msgid "invalid magic number in dynamic shared memory segment" msgstr "numéro magique invalide dans le segment de mémoire partagée dynamique" @@ -1156,7 +1166,7 @@ msgstr "" "Les identifiants timeline doivent être plus petits que les enfants des\n" "identifiants timeline." -#: access/transam/timeline.c:418 access/transam/timeline.c:498 access/transam/xlog.c:3256 access/transam/xlog.c:3423 access/transam/xlogfuncs.c:693 commands/copy.c:1776 storage/file/copydir.c:206 +#: access/transam/timeline.c:418 access/transam/timeline.c:498 access/transam/xlog.c:3256 access/transam/xlog.c:3423 access/transam/xlogfuncs.c:693 commands/copy.c:1723 storage/file/copydir.c:228 #, c-format msgid "could not close file \"%s\": %m" msgstr "n'a pas pu fermer le fichier « %s » : %m" @@ -1256,7 +1266,7 @@ msgstr "" "n'a pas pu lire le fichier d'état de la validation en deux phases nommé\n" "« %s » : %m" -#: access/transam/twophase.c:1307 access/transam/xlog.c:6356 +#: access/transam/twophase.c:1307 access/transam/xlog.c:6364 #, c-format msgid "Failed while allocating a WAL reading processor." msgstr "Échec lors de l'allocation d'un processeur de lecture de journaux de transactions." @@ -1318,32 +1328,36 @@ msgstr "récupération de la transaction préparée %u à partir de la mémoire #: access/transam/twophase.c:2034 #, c-format -msgid "removing stale two-phase state file for \"%u\"" -msgstr "suppression du vieux fichier d'état de la validation en deux phases pour %u" +msgid "removing stale two-phase state file for transaction %u" +msgstr "suppression du vieux fichier d'état de la validation en deux phases pour la transaction %u" #: access/transam/twophase.c:2041 #, c-format -msgid "removing stale two-phase state from shared memory for \"%u\"" -msgstr "suppression du vieux fichier d'état de la validation en deux phases nommé à partir de la mémoire partagée pour « %u »" +msgid "removing stale two-phase state from memory for transaction %u" +msgstr "suppression du vieux fichier d'état de la validation en deux phases de la mémoire pour la transaction %u" #: access/transam/twophase.c:2054 -#, c-format -msgid "removing future two-phase state file for \"%u\"" +#, fuzzy, c-format +#| msgid "removing future two-phase state file for \"%u\"" +msgid "removing future two-phase state file for transaction %u" msgstr "suppression du futur fichier d'état de la validation en deux phases pour « %u »" #: access/transam/twophase.c:2061 -#, c-format -msgid "removing future two-phase state from memory for \"%u\"" +#, fuzzy, c-format +#| msgid "removing future two-phase state from memory for \"%u\"" +msgid "removing future two-phase state from memory for transaction %u" msgstr "suppression du futur fichier d'état de la validation en deux phases en mémoire pour « %u »" #: access/transam/twophase.c:2075 access/transam/twophase.c:2094 -#, c-format -msgid "removing corrupt two-phase state file for \"%u\"" +#, fuzzy, c-format +#| msgid "removing corrupt two-phase state file for \"%u\"" +msgid "removing corrupt two-phase state file for transaction %u" msgstr "suppression du fichier d'état corrompu de la validation en deux phases pour « %u »" #: access/transam/twophase.c:2101 -#, c-format -msgid "removing corrupt two-phase state from memory for \"%u\"" +#, fuzzy, c-format +#| msgid "removing corrupt two-phase state from memory for \"%u\"" +msgid "removing corrupt two-phase state from memory for transaction %u" msgstr "" "suppression du fichier d'état corrompu de la validation en deux phases en mémoire\n" "pour « %u »" @@ -1405,32 +1419,40 @@ msgstr "ne peux pas avoir plus de 2^32-2 commandes dans une transaction" msgid "maximum number of committed subtransactions (%d) exceeded" msgstr "nombre maximum de sous-transactions validées (%d) dépassé" -#: access/transam/xact.c:2268 +#: access/transam/xact.c:2265 #, c-format msgid "cannot PREPARE a transaction that has operated on temporary tables" msgstr "" "ne peut pas préparer (PREPARE) une transaction qui a travaillé sur des\n" "tables temporaires" -#: access/transam/xact.c:2278 +#: access/transam/xact.c:2275 #, c-format msgid "cannot PREPARE a transaction that has exported snapshots" msgstr "ne peut pas préparer (PREPARE) une transaction qui a exporté des snapshots" +#: access/transam/xact.c:2284 +#, fuzzy, c-format +#| msgid "cannot PREPARE a transaction that has operated on temporary tables" +msgid "cannot PREPARE a transaction that has manipulated logical replication workers" +msgstr "" +"ne peut pas préparer (PREPARE) une transaction qui a travaillé sur des\n" +"tables temporaires" + #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3164 +#: access/transam/xact.c:3166 #, c-format msgid "%s cannot run inside a transaction block" msgstr "%s ne peut pas être exécuté dans un bloc de transaction" #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3174 +#: access/transam/xact.c:3176 #, c-format msgid "%s cannot run inside a subtransaction" msgstr "%s ne peut pas être exécuté dans un sous-bloc de transaction" #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3184 +#: access/transam/xact.c:3186 #, c-format msgid "%s cannot be executed from a function or multi-command string" msgstr "" @@ -1438,62 +1460,62 @@ msgstr "" "contenant plusieurs commandes" #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3255 +#: access/transam/xact.c:3257 #, c-format msgid "%s can only be used in transaction blocks" msgstr "%s peut seulement être utilisé dans des blocs de transaction" -#: access/transam/xact.c:3439 +#: access/transam/xact.c:3441 #, c-format msgid "there is already a transaction in progress" msgstr "une transaction est déjà en cours" -#: access/transam/xact.c:3607 access/transam/xact.c:3710 +#: access/transam/xact.c:3609 access/transam/xact.c:3712 #, c-format msgid "there is no transaction in progress" msgstr "aucune transaction en cours" -#: access/transam/xact.c:3618 +#: access/transam/xact.c:3620 #, c-format msgid "cannot commit during a parallel operation" msgstr "ne peut pas valider pendant une opération parallèle" -#: access/transam/xact.c:3721 +#: access/transam/xact.c:3723 #, c-format msgid "cannot abort during a parallel operation" msgstr "ne peut pas annuler pendant une opération en parallèle" -#: access/transam/xact.c:3763 +#: access/transam/xact.c:3765 #, c-format msgid "cannot define savepoints during a parallel operation" msgstr "ne peut pas définir de points de sauvegarde lors d'une opération parallèle" -#: access/transam/xact.c:3830 +#: access/transam/xact.c:3832 #, c-format msgid "cannot release savepoints during a parallel operation" msgstr "ne peut pas relâcher de points de sauvegarde pendant une opération parallèle" -#: access/transam/xact.c:3841 access/transam/xact.c:3893 access/transam/xact.c:3899 access/transam/xact.c:3955 access/transam/xact.c:4005 access/transam/xact.c:4011 +#: access/transam/xact.c:3843 access/transam/xact.c:3895 access/transam/xact.c:3901 access/transam/xact.c:3957 access/transam/xact.c:4007 access/transam/xact.c:4013 #, c-format msgid "no such savepoint" msgstr "aucun point de sauvegarde" -#: access/transam/xact.c:3943 +#: access/transam/xact.c:3945 #, c-format msgid "cannot rollback to savepoints during a parallel operation" msgstr "ne peut pas retourner à un point de sauvegarde pendant un opération parallèle" -#: access/transam/xact.c:4071 +#: access/transam/xact.c:4073 #, c-format msgid "cannot start subtransactions during a parallel operation" msgstr "ne peut pas lancer de sous-transactions pendant une opération parallèle" -#: access/transam/xact.c:4138 +#: access/transam/xact.c:4140 #, c-format msgid "cannot commit subtransactions during a parallel operation" msgstr "ne peut pas valider de sous-transactions pendant une opération parallèle" -#: access/transam/xact.c:4746 +#: access/transam/xact.c:4772 #, c-format msgid "cannot have more than 2^32-1 subtransactions in a transaction" msgstr "ne peut pas avoir plus de 2^32-1 sous-transactions dans une transaction" @@ -1523,112 +1545,112 @@ msgstr "données insuffisantes dans le fichier « %s »" msgid "could not open write-ahead log file \"%s\": %m" msgstr "n'a pas pu écrire dans le journal de transactions « %s » : %m" -#: access/transam/xlog.c:3723 access/transam/xlog.c:5541 +#: access/transam/xlog.c:3723 access/transam/xlog.c:5549 #, c-format msgid "could not close log file %s: %m" msgstr "n'a pas pu fermer le fichier de transactions « %s » : %m" -#: access/transam/xlog.c:3780 access/transam/xlogutils.c:701 replication/walsender.c:2380 +#: access/transam/xlog.c:3787 access/transam/xlogutils.c:701 replication/walsender.c:2395 #, c-format msgid "requested WAL segment %s has already been removed" msgstr "le segment demandé du journal de transaction, %s, a déjà été supprimé" -#: access/transam/xlog.c:3840 access/transam/xlog.c:3915 access/transam/xlog.c:4110 +#: access/transam/xlog.c:3848 access/transam/xlog.c:3923 access/transam/xlog.c:4118 #, c-format msgid "could not open write-ahead log directory \"%s\": %m" msgstr "n'a pas pu ouvrir le répertoire des journaux de transactions « %s » : %m" -#: access/transam/xlog.c:3996 +#: access/transam/xlog.c:4004 #, c-format msgid "recycled write-ahead log file \"%s\"" msgstr "recyclage du journal de transactions « %s »" -#: access/transam/xlog.c:4008 +#: access/transam/xlog.c:4016 #, c-format msgid "removing write-ahead log file \"%s\"" msgstr "suppression du journal de transactions « %s »" -#: access/transam/xlog.c:4028 +#: access/transam/xlog.c:4036 #, c-format msgid "could not rename old write-ahead log file \"%s\": %m" msgstr "n'a pas pu renommer l'ancien journal de transactions « %s » : %m" -#: access/transam/xlog.c:4070 access/transam/xlog.c:4080 +#: access/transam/xlog.c:4078 access/transam/xlog.c:4088 #, c-format msgid "required WAL directory \"%s\" does not exist" msgstr "le répertoire « %s » requis pour les journaux de transactions n'existe pas" -#: access/transam/xlog.c:4086 +#: access/transam/xlog.c:4094 #, c-format msgid "creating missing WAL directory \"%s\"" msgstr "création du répertoire manquant « %s » pour les journaux de transactions" -#: access/transam/xlog.c:4089 +#: access/transam/xlog.c:4097 #, c-format msgid "could not create missing directory \"%s\": %m" msgstr "n'a pas pu créer le répertoire « %s » manquant : %m" -#: access/transam/xlog.c:4200 +#: access/transam/xlog.c:4208 #, c-format msgid "unexpected timeline ID %u in log segment %s, offset %u" msgstr "identifiant timeline %u inattendu dans le journal de transactions %s, décalage %u" -#: access/transam/xlog.c:4322 +#: access/transam/xlog.c:4330 #, c-format msgid "new timeline %u is not a child of database system timeline %u" msgstr "" "le nouveau timeline %u n'est pas un fils du timeline %u du système de bases\n" "de données" -#: access/transam/xlog.c:4336 +#: access/transam/xlog.c:4344 #, c-format msgid "new timeline %u forked off current database system timeline %u before current recovery point %X/%X" msgstr "" "la nouvelle timeline %u a été créée à partir de la timeline de la base de données système %u\n" "avant le point de restauration courant %X/%X" -#: access/transam/xlog.c:4355 +#: access/transam/xlog.c:4363 #, c-format msgid "new target timeline is %u" msgstr "la nouvelle timeline cible est %u" -#: access/transam/xlog.c:4436 +#: access/transam/xlog.c:4444 #, c-format msgid "could not create control file \"%s\": %m" msgstr "n'a pas pu créer le fichier de contrôle « %s » : %m" -#: access/transam/xlog.c:4448 access/transam/xlog.c:4674 +#: access/transam/xlog.c:4456 access/transam/xlog.c:4682 #, c-format msgid "could not write to control file: %m" msgstr "n'a pas pu écrire le fichier de contrôle : %m" -#: access/transam/xlog.c:4456 access/transam/xlog.c:4682 +#: access/transam/xlog.c:4464 access/transam/xlog.c:4690 #, c-format msgid "could not fsync control file: %m" msgstr "n'a pas pu synchroniser sur disque (fsync) le fichier de contrôle : %m" -#: access/transam/xlog.c:4462 access/transam/xlog.c:4688 +#: access/transam/xlog.c:4470 access/transam/xlog.c:4696 #, c-format msgid "could not close control file: %m" msgstr "n'a pas pu fermer le fichier de contrôle : %m" -#: access/transam/xlog.c:4480 access/transam/xlog.c:4662 +#: access/transam/xlog.c:4488 access/transam/xlog.c:4670 #, c-format msgid "could not open control file \"%s\": %m" msgstr "n'a pas pu ouvrir le fichier de contrôle « %s » : %m" -#: access/transam/xlog.c:4487 +#: access/transam/xlog.c:4495 #, c-format msgid "could not read from control file: %m" msgstr "n'a pas pu lire le fichier de contrôle : %m" -#: access/transam/xlog.c:4501 access/transam/xlog.c:4510 access/transam/xlog.c:4534 access/transam/xlog.c:4541 access/transam/xlog.c:4548 access/transam/xlog.c:4553 access/transam/xlog.c:4560 access/transam/xlog.c:4567 access/transam/xlog.c:4574 access/transam/xlog.c:4581 access/transam/xlog.c:4588 access/transam/xlog.c:4595 access/transam/xlog.c:4602 access/transam/xlog.c:4611 access/transam/xlog.c:4618 access/transam/xlog.c:4627 -#: access/transam/xlog.c:4634 utils/init/miscinit.c:1406 +#: access/transam/xlog.c:4509 access/transam/xlog.c:4518 access/transam/xlog.c:4542 access/transam/xlog.c:4549 access/transam/xlog.c:4556 access/transam/xlog.c:4561 access/transam/xlog.c:4568 access/transam/xlog.c:4575 access/transam/xlog.c:4582 access/transam/xlog.c:4589 access/transam/xlog.c:4596 access/transam/xlog.c:4603 access/transam/xlog.c:4610 access/transam/xlog.c:4619 access/transam/xlog.c:4626 access/transam/xlog.c:4635 +#: access/transam/xlog.c:4642 utils/init/miscinit.c:1406 #, c-format msgid "database files are incompatible with server" msgstr "les fichiers de la base de données sont incompatibles avec le serveur" -#: access/transam/xlog.c:4502 +#: access/transam/xlog.c:4510 #, c-format msgid "The database cluster was initialized with PG_CONTROL_VERSION %d (0x%08x), but the server was compiled with PG_CONTROL_VERSION %d (0x%08x)." msgstr "" @@ -1636,303 +1658,303 @@ msgstr "" "%d (0x%08x) alors que le serveur a été compilé avec un PG_CONTROL_VERSION à\n" "%d (0x%08x)." -#: access/transam/xlog.c:4506 +#: access/transam/xlog.c:4514 #, c-format msgid "This could be a problem of mismatched byte ordering. It looks like you need to initdb." msgstr "" "Ceci peut être un problème d'incohérence dans l'ordre des octets.\n" "Il se peut que vous ayez besoin d'initdb." -#: access/transam/xlog.c:4511 +#: access/transam/xlog.c:4519 #, c-format msgid "The database cluster was initialized with PG_CONTROL_VERSION %d, but the server was compiled with PG_CONTROL_VERSION %d." msgstr "" "Le cluster de base de données a été initialisé avec un PG_CONTROL_VERSION à\n" "%d alors que le serveur a été compilé avec un PG_CONTROL_VERSION à %d." -#: access/transam/xlog.c:4514 access/transam/xlog.c:4538 access/transam/xlog.c:4545 access/transam/xlog.c:4550 +#: access/transam/xlog.c:4522 access/transam/xlog.c:4546 access/transam/xlog.c:4553 access/transam/xlog.c:4558 #, c-format msgid "It looks like you need to initdb." msgstr "Il semble que vous avez besoin d'initdb." -#: access/transam/xlog.c:4525 +#: access/transam/xlog.c:4533 #, c-format msgid "incorrect checksum in control file" msgstr "somme de contrôle incorrecte dans le fichier de contrôle" -#: access/transam/xlog.c:4535 +#: access/transam/xlog.c:4543 #, c-format msgid "The database cluster was initialized with CATALOG_VERSION_NO %d, but the server was compiled with CATALOG_VERSION_NO %d." msgstr "" "Le cluster de base de données a été initialisé avec un CATALOG_VERSION_NO à\n" "%d alors que le serveur a été compilé avec un CATALOG_VERSION_NO à %d." -#: access/transam/xlog.c:4542 +#: access/transam/xlog.c:4550 #, c-format msgid "The database cluster was initialized with MAXALIGN %d, but the server was compiled with MAXALIGN %d." msgstr "" "Le cluster de bases de données a été initialisé avec un MAXALIGN à %d alors\n" "que le serveur a été compilé avec un MAXALIGN à %d." -#: access/transam/xlog.c:4549 +#: access/transam/xlog.c:4557 #, c-format msgid "The database cluster appears to use a different floating-point number format than the server executable." msgstr "" "Le cluster de bases de données semble utiliser un format différent pour les\n" "nombres à virgule flottante de celui de l'exécutable serveur." -#: access/transam/xlog.c:4554 +#: access/transam/xlog.c:4562 #, c-format msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." msgstr "" "Le cluster de base de données a été initialisé avec un BLCKSZ à %d alors que\n" "le serveur a été compilé avec un BLCKSZ à %d." -#: access/transam/xlog.c:4557 access/transam/xlog.c:4564 access/transam/xlog.c:4571 access/transam/xlog.c:4578 access/transam/xlog.c:4585 access/transam/xlog.c:4592 access/transam/xlog.c:4599 access/transam/xlog.c:4606 access/transam/xlog.c:4614 access/transam/xlog.c:4621 access/transam/xlog.c:4630 access/transam/xlog.c:4637 +#: access/transam/xlog.c:4565 access/transam/xlog.c:4572 access/transam/xlog.c:4579 access/transam/xlog.c:4586 access/transam/xlog.c:4593 access/transam/xlog.c:4600 access/transam/xlog.c:4607 access/transam/xlog.c:4614 access/transam/xlog.c:4622 access/transam/xlog.c:4629 access/transam/xlog.c:4638 access/transam/xlog.c:4645 #, c-format msgid "It looks like you need to recompile or initdb." msgstr "Il semble que vous avez besoin de recompiler ou de relancer initdb." -#: access/transam/xlog.c:4561 +#: access/transam/xlog.c:4569 #, c-format msgid "The database cluster was initialized with RELSEG_SIZE %d, but the server was compiled with RELSEG_SIZE %d." msgstr "" "Le cluster de bases de données a été initialisé avec un RELSEG_SIZE à %d\n" "alors que le serveur a été compilé avec un RELSEG_SIZE à %d." -#: access/transam/xlog.c:4568 +#: access/transam/xlog.c:4576 #, c-format msgid "The database cluster was initialized with XLOG_BLCKSZ %d, but the server was compiled with XLOG_BLCKSZ %d." msgstr "" "Le cluster de base de données a été initialisé avec un XLOG_BLCKSZ à %d\n" "alors que le serveur a été compilé avec un XLOG_BLCKSZ à %d." -#: access/transam/xlog.c:4575 +#: access/transam/xlog.c:4583 #, c-format msgid "The database cluster was initialized with XLOG_SEG_SIZE %d, but the server was compiled with XLOG_SEG_SIZE %d." msgstr "" "Le cluster de bases de données a été initialisé avec un XLOG_SEG_SIZE à %d\n" "alors que le serveur a été compilé avec un XLOG_SEG_SIZE à %d." -#: access/transam/xlog.c:4582 +#: access/transam/xlog.c:4590 #, c-format msgid "The database cluster was initialized with NAMEDATALEN %d, but the server was compiled with NAMEDATALEN %d." msgstr "" "Le cluster de bases de données a été initialisé avec un NAMEDATALEN à %d\n" "alors que le serveur a été compilé avec un NAMEDATALEN à %d." -#: access/transam/xlog.c:4589 +#: access/transam/xlog.c:4597 #, c-format msgid "The database cluster was initialized with INDEX_MAX_KEYS %d, but the server was compiled with INDEX_MAX_KEYS %d." msgstr "" "Le groupe de bases de données a été initialisé avec un INDEX_MAX_KEYS à %d\n" "alors que le serveur a été compilé avec un INDEX_MAX_KEYS à %d." -#: access/transam/xlog.c:4596 +#: access/transam/xlog.c:4604 #, c-format msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." msgstr "" "Le cluster de bases de données a été initialisé avec un TOAST_MAX_CHUNK_SIZE\n" "à %d alors que le serveur a été compilé avec un TOAST_MAX_CHUNK_SIZE à %d." -#: access/transam/xlog.c:4603 +#: access/transam/xlog.c:4611 #, c-format msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." msgstr "" "Le cluster de base de données a été initialisé avec un LOBLKSIZE à %d alors que\n" "le serveur a été compilé avec un LOBLKSIZE à %d." -#: access/transam/xlog.c:4612 +#: access/transam/xlog.c:4620 #, c-format msgid "The database cluster was initialized without USE_FLOAT4_BYVAL but the server was compiled with USE_FLOAT4_BYVAL." msgstr "" "Le cluster de base de données a été initialisé sans USE_FLOAT4_BYVAL\n" "alors que le serveur a été compilé avec USE_FLOAT4_BYVAL." -#: access/transam/xlog.c:4619 +#: access/transam/xlog.c:4627 #, c-format msgid "The database cluster was initialized with USE_FLOAT4_BYVAL but the server was compiled without USE_FLOAT4_BYVAL." msgstr "" "Le cluster de base de données a été initialisé avec USE_FLOAT4_BYVAL\n" "alors que le serveur a été compilé sans USE_FLOAT4_BYVAL." -#: access/transam/xlog.c:4628 +#: access/transam/xlog.c:4636 #, c-format msgid "The database cluster was initialized without USE_FLOAT8_BYVAL but the server was compiled with USE_FLOAT8_BYVAL." msgstr "" "Le cluster de base de données a été initialisé sans USE_FLOAT8_BYVAL\n" "alors que le serveur a été compilé avec USE_FLOAT8_BYVAL." -#: access/transam/xlog.c:4635 +#: access/transam/xlog.c:4643 #, c-format msgid "The database cluster was initialized with USE_FLOAT8_BYVAL but the server was compiled without USE_FLOAT8_BYVAL." msgstr "" "Le cluster de base de données a été initialisé avec USE_FLOAT8_BYVAL\n" "alors que le serveur a été compilé sans USE_FLOAT8_BYVAL." -#: access/transam/xlog.c:4991 +#: access/transam/xlog.c:4999 #, c-format msgid "could not generate secret authorization token" msgstr "n'a pas pu générer le jeton secret d'autorisation" -#: access/transam/xlog.c:5081 +#: access/transam/xlog.c:5089 #, c-format msgid "could not write bootstrap write-ahead log file: %m" msgstr "n'a pas pu écrire le « bootstrap » du journal des transactions : %m" -#: access/transam/xlog.c:5089 +#: access/transam/xlog.c:5097 #, c-format msgid "could not fsync bootstrap write-ahead log file: %m" msgstr "" "n'a pas pu synchroniser sur disque (fsync) le « bootstrap » du journal des\n" "transactions : %m" -#: access/transam/xlog.c:5095 +#: access/transam/xlog.c:5103 #, c-format msgid "could not close bootstrap write-ahead log file: %m" msgstr "n'a pas pu fermer le « bootstrap » du journal des transactions : %m" -#: access/transam/xlog.c:5171 +#: access/transam/xlog.c:5179 #, c-format msgid "could not open recovery command file \"%s\": %m" msgstr "n'a pas pu ouvrir le fichier de restauration « %s » : %m" -#: access/transam/xlog.c:5217 access/transam/xlog.c:5319 +#: access/transam/xlog.c:5225 access/transam/xlog.c:5327 #, c-format msgid "invalid value for recovery parameter \"%s\": \"%s\"" msgstr "valeur invalide pour le paramètre de restauration « %s » : « %s »" -#: access/transam/xlog.c:5220 +#: access/transam/xlog.c:5228 #, c-format msgid "Valid values are \"pause\", \"promote\", and \"shutdown\"." msgstr "Les valeurs valides sont « pause », « promote » et « shutdown »." -#: access/transam/xlog.c:5240 +#: access/transam/xlog.c:5248 #, c-format msgid "recovery_target_timeline is not a valid number: \"%s\"" msgstr "recovery_target_timeline n'est pas un nombre valide : « %s »" -#: access/transam/xlog.c:5257 +#: access/transam/xlog.c:5265 #, c-format msgid "recovery_target_xid is not a valid number: \"%s\"" msgstr "recovery_target_xid n'est pas un nombre valide : « %s »" -#: access/transam/xlog.c:5288 +#: access/transam/xlog.c:5296 #, c-format msgid "recovery_target_name is too long (maximum %d characters)" msgstr "recovery_target_name est trop long (%d caractères maximum)" -#: access/transam/xlog.c:5322 +#: access/transam/xlog.c:5330 #, c-format msgid "The only allowed value is \"immediate\"." msgstr "La seule valeur autorisée est « immediate »." -#: access/transam/xlog.c:5335 access/transam/xlog.c:5346 commands/extension.c:547 commands/extension.c:555 utils/misc/guc.c:5750 +#: access/transam/xlog.c:5343 access/transam/xlog.c:5354 commands/extension.c:547 commands/extension.c:555 utils/misc/guc.c:5750 #, c-format msgid "parameter \"%s\" requires a Boolean value" msgstr "le paramètre « %s » requiert une valeur booléenne" -#: access/transam/xlog.c:5381 +#: access/transam/xlog.c:5389 #, c-format msgid "parameter \"%s\" requires a temporal value" msgstr "le paramètre « %s » requiert une valeur temporelle" -#: access/transam/xlog.c:5383 catalog/dependency.c:961 catalog/dependency.c:962 catalog/dependency.c:968 catalog/dependency.c:969 catalog/dependency.c:980 catalog/dependency.c:981 commands/tablecmds.c:946 commands/tablecmds.c:10345 commands/user.c:1029 commands/view.c:505 libpq/auth.c:328 replication/syncrep.c:1160 storage/lmgr/deadlock.c:1139 storage/lmgr/proc.c:1313 utils/adt/acl.c:5248 utils/misc/guc.c:5772 utils/misc/guc.c:5865 -#: utils/misc/guc.c:9821 utils/misc/guc.c:9855 utils/misc/guc.c:9889 utils/misc/guc.c:9923 utils/misc/guc.c:9958 +#: access/transam/xlog.c:5391 catalog/dependency.c:961 catalog/dependency.c:962 catalog/dependency.c:968 catalog/dependency.c:969 catalog/dependency.c:980 catalog/dependency.c:981 commands/tablecmds.c:946 commands/tablecmds.c:10372 commands/user.c:1064 commands/view.c:505 libpq/auth.c:328 replication/syncrep.c:1160 storage/lmgr/deadlock.c:1139 storage/lmgr/proc.c:1313 utils/adt/acl.c:5253 utils/misc/guc.c:5772 utils/misc/guc.c:5865 +#: utils/misc/guc.c:9814 utils/misc/guc.c:9848 utils/misc/guc.c:9882 utils/misc/guc.c:9916 utils/misc/guc.c:9951 #, c-format msgid "%s" msgstr "%s" -#: access/transam/xlog.c:5390 +#: access/transam/xlog.c:5398 #, c-format msgid "unrecognized recovery parameter \"%s\"" msgstr "paramètre de restauration « %s » non reconnu" -#: access/transam/xlog.c:5401 +#: access/transam/xlog.c:5409 #, c-format msgid "recovery command file \"%s\" specified neither primary_conninfo nor restore_command" msgstr "le fichier de restauration « %s » n'a spécifié ni primary_conninfo ni restore_command" -#: access/transam/xlog.c:5403 +#: access/transam/xlog.c:5411 #, c-format msgid "The database server will regularly poll the pg_wal subdirectory to check for files placed there." msgstr "" "Le serveur de la base de données va régulièrement interroger le sous-répertoire\n" "pg_wal pour vérifier les fichiers placés ici." -#: access/transam/xlog.c:5410 +#: access/transam/xlog.c:5418 #, c-format msgid "recovery command file \"%s\" must specify restore_command when standby mode is not enabled" msgstr "" "le fichier de restauration « %s » doit spécifier restore_command quand le mode\n" "de restauration n'est pas activé" -#: access/transam/xlog.c:5431 +#: access/transam/xlog.c:5439 #, c-format msgid "standby mode is not supported by single-user servers" msgstr "le mode de restauration n'est pas supporté pour les serveurs mono-utilisateur" -#: access/transam/xlog.c:5450 +#: access/transam/xlog.c:5458 #, c-format msgid "recovery target timeline %u does not exist" msgstr "le timeline cible, %u, de la restauration n'existe pas" -#: access/transam/xlog.c:5571 +#: access/transam/xlog.c:5579 #, c-format msgid "archive recovery complete" msgstr "restauration terminée de l'archive" -#: access/transam/xlog.c:5630 access/transam/xlog.c:5896 +#: access/transam/xlog.c:5638 access/transam/xlog.c:5904 #, c-format msgid "recovery stopping after reaching consistency" msgstr "arrêt de la restauration après avoir atteint le point de cohérence" -#: access/transam/xlog.c:5651 +#: access/transam/xlog.c:5659 #, c-format msgid "recovery stopping before WAL location (LSN) \"%X/%X\"" msgstr "arrêt de la restauration avant l'emplacement WAL (LSN) « %X/%X »" -#: access/transam/xlog.c:5737 +#: access/transam/xlog.c:5745 #, c-format msgid "recovery stopping before commit of transaction %u, time %s" msgstr "arrêt de la restauration avant validation de la transaction %u, %s" -#: access/transam/xlog.c:5744 +#: access/transam/xlog.c:5752 #, c-format msgid "recovery stopping before abort of transaction %u, time %s" msgstr "arrêt de la restauration avant annulation de la transaction %u, %s" -#: access/transam/xlog.c:5790 +#: access/transam/xlog.c:5798 #, c-format msgid "recovery stopping at restore point \"%s\", time %s" msgstr "restauration en arrêt au point de restauration « %s », heure %s" -#: access/transam/xlog.c:5808 +#: access/transam/xlog.c:5816 #, c-format msgid "recovery stopping after WAL location (LSN) \"%X/%X\"" msgstr "arrêt de la restauration après l'emplacement WAL (LSN) « %X/%X »" -#: access/transam/xlog.c:5876 +#: access/transam/xlog.c:5884 #, c-format msgid "recovery stopping after commit of transaction %u, time %s" msgstr "arrêt de la restauration après validation de la transaction %u, %s" -#: access/transam/xlog.c:5884 +#: access/transam/xlog.c:5892 #, c-format msgid "recovery stopping after abort of transaction %u, time %s" msgstr "arrêt de la restauration après annulation de la transaction %u, %s" -#: access/transam/xlog.c:5924 +#: access/transam/xlog.c:5932 #, c-format msgid "recovery has paused" msgstr "restauration en pause" -#: access/transam/xlog.c:5925 +#: access/transam/xlog.c:5933 #, c-format msgid "Execute pg_wal_replay_resume() to continue." msgstr "Exécuter pg_wal_replay_resume() pour continuer." -#: access/transam/xlog.c:6133 +#: access/transam/xlog.c:6141 #, c-format msgid "hot standby is not possible because %s = %d is a lower setting than on the master server (its value was %d)" msgstr "" @@ -1940,271 +1962,271 @@ msgstr "" "paramètrage plus bas que celui du serveur maître des journaux de transactions\n" "(la valeur était %d)" -#: access/transam/xlog.c:6159 +#: access/transam/xlog.c:6167 #, c-format msgid "WAL was generated with wal_level=minimal, data may be missing" msgstr "" "le journal de transactions a été généré avec le paramètre wal_level configuré\n" "à « minimal », des données pourraient manquer" -#: access/transam/xlog.c:6160 +#: access/transam/xlog.c:6168 #, c-format msgid "This happens if you temporarily set wal_level=minimal without taking a new base backup." msgstr "" "Ceci peut arriver si vous configurez temporairement wal_level à minimal sans avoir\n" "pris une nouvelle sauvegarde de base." -#: access/transam/xlog.c:6171 +#: access/transam/xlog.c:6179 #, c-format msgid "hot standby is not possible because wal_level was not set to \"replica\" or higher on the master server" msgstr "" "les connexions en lecture seules ne sont pas possibles parce que le paramètre wal_level\n" "n'a pas été positionné à « replica » ou plus sur le serveur maître" -#: access/transam/xlog.c:6172 +#: access/transam/xlog.c:6180 #, c-format msgid "Either set wal_level to \"replica\" on the master, or turn off hot_standby here." msgstr "" "Vous devez soit positionner le paramètre wal_level à « replica » sur le maître,\n" "soit désactiver le hot_standby ici." -#: access/transam/xlog.c:6229 +#: access/transam/xlog.c:6237 #, c-format msgid "control file contains invalid data" msgstr "le fichier de contrôle contient des données invalides" -#: access/transam/xlog.c:6235 +#: access/transam/xlog.c:6243 #, c-format msgid "database system was shut down at %s" msgstr "le système de bases de données a été arrêté à %s" -#: access/transam/xlog.c:6240 +#: access/transam/xlog.c:6248 #, c-format msgid "database system was shut down in recovery at %s" msgstr "le système de bases de données a été arrêté pendant la restauration à %s" -#: access/transam/xlog.c:6244 +#: access/transam/xlog.c:6252 #, c-format msgid "database system shutdown was interrupted; last known up at %s" msgstr "le système de bases de données a été interrompu ; dernier lancement connu à %s" -#: access/transam/xlog.c:6248 +#: access/transam/xlog.c:6256 #, c-format msgid "database system was interrupted while in recovery at %s" msgstr "le système de bases de données a été interrompu lors d'une restauration à %s" -#: access/transam/xlog.c:6250 +#: access/transam/xlog.c:6258 #, c-format msgid "This probably means that some data is corrupted and you will have to use the last backup for recovery." msgstr "" "Ceci signifie probablement que des données ont été corrompues et que vous\n" "devrez utiliser la dernière sauvegarde pour la restauration." -#: access/transam/xlog.c:6254 +#: access/transam/xlog.c:6262 #, c-format msgid "database system was interrupted while in recovery at log time %s" msgstr "" "le système de bases de données a été interrompu lors d'une récupération à %s\n" "(moment de la journalisation)" -#: access/transam/xlog.c:6256 +#: access/transam/xlog.c:6264 #, c-format msgid "If this has occurred more than once some data might be corrupted and you might need to choose an earlier recovery target." msgstr "" "Si c'est arrivé plus d'une fois, des données ont pu être corrompues et vous\n" "pourriez avoir besoin de choisir une cible de récupération antérieure." -#: access/transam/xlog.c:6260 +#: access/transam/xlog.c:6268 #, c-format msgid "database system was interrupted; last known up at %s" msgstr "le système de bases de données a été interrompu ; dernier lancement connu à %s" -#: access/transam/xlog.c:6316 +#: access/transam/xlog.c:6324 #, c-format msgid "entering standby mode" msgstr "entre en mode standby" -#: access/transam/xlog.c:6319 +#: access/transam/xlog.c:6327 #, c-format msgid "starting point-in-time recovery to XID %u" msgstr "début de la restauration de l'archive au XID %u" -#: access/transam/xlog.c:6323 +#: access/transam/xlog.c:6331 #, c-format msgid "starting point-in-time recovery to %s" msgstr "début de la restauration de l'archive à %s" -#: access/transam/xlog.c:6327 +#: access/transam/xlog.c:6335 #, c-format msgid "starting point-in-time recovery to \"%s\"" msgstr "début de la restauration PITR à « %s »" -#: access/transam/xlog.c:6331 +#: access/transam/xlog.c:6339 #, c-format msgid "starting point-in-time recovery to WAL location (LSN) \"%X/%X\"" msgstr "début de la restauration PITR à l'emplacement WAL (LSN) « %X/%X »" -#: access/transam/xlog.c:6336 +#: access/transam/xlog.c:6344 #, c-format msgid "starting point-in-time recovery to earliest consistent point" msgstr "début de la restauration de l'archive jusqu'au point de cohérence le plus proche" -#: access/transam/xlog.c:6339 +#: access/transam/xlog.c:6347 #, c-format msgid "starting archive recovery" msgstr "début de la restauration de l'archive" -#: access/transam/xlog.c:6390 access/transam/xlog.c:6518 +#: access/transam/xlog.c:6398 access/transam/xlog.c:6526 #, c-format msgid "checkpoint record is at %X/%X" msgstr "l'enregistrement du point de vérification est à %X/%X" -#: access/transam/xlog.c:6404 +#: access/transam/xlog.c:6412 #, c-format msgid "could not find redo location referenced by checkpoint record" msgstr "n'a pas pu localiser l'enregistrement redo référencé par le point de vérification" -#: access/transam/xlog.c:6405 access/transam/xlog.c:6412 +#: access/transam/xlog.c:6413 access/transam/xlog.c:6420 #, c-format msgid "If you are not restoring from a backup, try removing the file \"%s/backup_label\"." msgstr "" "Si vous n'avez pas pu restaurer une sauvegarde, essayez de supprimer le\n" "fichier « %s/backup_label »." -#: access/transam/xlog.c:6411 +#: access/transam/xlog.c:6419 #, c-format msgid "could not locate required checkpoint record" msgstr "n'a pas pu localiser l'enregistrement d'un point de vérification requis" -#: access/transam/xlog.c:6437 commands/tablespace.c:639 +#: access/transam/xlog.c:6445 commands/tablespace.c:639 #, c-format msgid "could not create symbolic link \"%s\": %m" msgstr "n'a pas pu créer le lien symbolique « %s » : %m" -#: access/transam/xlog.c:6469 access/transam/xlog.c:6475 +#: access/transam/xlog.c:6477 access/transam/xlog.c:6483 #, c-format msgid "ignoring file \"%s\" because no file \"%s\" exists" msgstr "ignore le fichier « %s » car le fichier « %s » n'existe pas" -#: access/transam/xlog.c:6471 access/transam/xlog.c:11396 +#: access/transam/xlog.c:6479 access/transam/xlog.c:11436 #, c-format msgid "File \"%s\" was renamed to \"%s\"." msgstr "Le fichier « %s » a été renommé en « %s »." -#: access/transam/xlog.c:6477 +#: access/transam/xlog.c:6485 #, c-format msgid "Could not rename file \"%s\" to \"%s\": %m." msgstr "N'a pas pu renommer le fichier « %s » en « %s » : %m" -#: access/transam/xlog.c:6528 access/transam/xlog.c:6543 +#: access/transam/xlog.c:6536 access/transam/xlog.c:6551 #, c-format msgid "could not locate a valid checkpoint record" msgstr "n'a pas pu localiser un enregistrement d'un point de vérification valide" -#: access/transam/xlog.c:6537 +#: access/transam/xlog.c:6545 #, c-format msgid "using previous checkpoint record at %X/%X" msgstr "utilisation du précédent enregistrement d'un point de vérification à %X/%X" -#: access/transam/xlog.c:6581 +#: access/transam/xlog.c:6589 #, c-format msgid "requested timeline %u is not a child of this server's history" msgstr "la timeline requise %u n'est pas un fils de l'historique de ce serveur" -#: access/transam/xlog.c:6583 +#: access/transam/xlog.c:6591 #, c-format msgid "Latest checkpoint is at %X/%X on timeline %u, but in the history of the requested timeline, the server forked off from that timeline at %X/%X." msgstr "Le dernier checkpoint est à %X/%X sur la timeline %u, mais dans l'historique de la timeline demandée, le serveur est sorti de cette timeline à %X/%X." -#: access/transam/xlog.c:6599 +#: access/transam/xlog.c:6607 #, c-format msgid "requested timeline %u does not contain minimum recovery point %X/%X on timeline %u" msgstr "la timeline requise, %u, ne contient pas le point de restauration minimum (%X/%X) sur la timeline %u" -#: access/transam/xlog.c:6630 +#: access/transam/xlog.c:6638 #, c-format msgid "invalid next transaction ID" msgstr "prochain ID de transaction invalide" -#: access/transam/xlog.c:6724 +#: access/transam/xlog.c:6732 #, c-format msgid "invalid redo in checkpoint record" msgstr "ré-exécution invalide dans l'enregistrement du point de vérification" -#: access/transam/xlog.c:6735 +#: access/transam/xlog.c:6743 #, c-format msgid "invalid redo record in shutdown checkpoint" msgstr "enregistrement de ré-exécution invalide dans le point de vérification d'arrêt" -#: access/transam/xlog.c:6763 +#: access/transam/xlog.c:6771 #, c-format msgid "database system was not properly shut down; automatic recovery in progress" msgstr "" "le système de bases de données n'a pas été arrêté proprement ; restauration\n" "automatique en cours" -#: access/transam/xlog.c:6767 +#: access/transam/xlog.c:6775 #, c-format msgid "crash recovery starts in timeline %u and has target timeline %u" msgstr "la restauration après crash commence avec la timeline %u et a la timeline %u en cible" -#: access/transam/xlog.c:6811 +#: access/transam/xlog.c:6819 #, c-format msgid "backup_label contains data inconsistent with control file" msgstr "backup_label contient des données incohérentes avec le fichier de contrôle" -#: access/transam/xlog.c:6812 +#: access/transam/xlog.c:6820 #, c-format msgid "This means that the backup is corrupted and you will have to use another backup for recovery." msgstr "" "Ceci signifie que la sauvegarde a été corrompue et que vous devrez utiliser\n" "la dernière sauvegarde pour la restauration." -#: access/transam/xlog.c:6886 +#: access/transam/xlog.c:6894 #, c-format msgid "initializing for hot standby" msgstr "initialisation pour « Hot Standby »" -#: access/transam/xlog.c:7018 +#: access/transam/xlog.c:7026 #, c-format msgid "redo starts at %X/%X" msgstr "la ré-exécution commence à %X/%X" -#: access/transam/xlog.c:7252 +#: access/transam/xlog.c:7260 #, c-format msgid "requested recovery stop point is before consistent recovery point" msgstr "" "le point d'arrêt de la restauration demandée se trouve avant le point\n" "cohérent de restauration" -#: access/transam/xlog.c:7290 +#: access/transam/xlog.c:7298 #, c-format msgid "redo done at %X/%X" msgstr "ré-exécution faite à %X/%X" -#: access/transam/xlog.c:7295 access/transam/xlog.c:9309 +#: access/transam/xlog.c:7303 access/transam/xlog.c:9317 #, c-format msgid "last completed transaction was at log time %s" msgstr "la dernière transaction a eu lieu à %s (moment de la journalisation)" -#: access/transam/xlog.c:7304 +#: access/transam/xlog.c:7312 #, c-format msgid "redo is not required" msgstr "la ré-exécution n'est pas nécessaire" -#: access/transam/xlog.c:7379 access/transam/xlog.c:7383 +#: access/transam/xlog.c:7387 access/transam/xlog.c:7391 #, c-format msgid "WAL ends before end of online backup" msgstr "le journal de transactions se termine avant la fin de la sauvegarde de base" -#: access/transam/xlog.c:7380 +#: access/transam/xlog.c:7388 #, c-format msgid "All WAL generated while online backup was taken must be available at recovery." msgstr "" "Tous les journaux de transactions générés pendant la sauvegarde en ligne\n" "doivent être disponibles pour la restauration." -#: access/transam/xlog.c:7384 +#: access/transam/xlog.c:7392 #, c-format msgid "Online backup started with pg_start_backup() must be ended with pg_stop_backup(), and all WAL up to that point must be available at recovery." msgstr "" @@ -2212,107 +2234,108 @@ msgstr "" "pg_stop_backup() et tous les journaux de transactions générés entre les deux\n" "doivent être disponibles pour la restauration." -#: access/transam/xlog.c:7387 +#: access/transam/xlog.c:7395 #, c-format msgid "WAL ends before consistent recovery point" msgstr "Le journal de transaction se termine avant un point de restauration cohérent" -#: access/transam/xlog.c:7414 +#: access/transam/xlog.c:7422 #, c-format msgid "selected new timeline ID: %u" msgstr "identifiant d'un timeline nouvellement sélectionné : %u" -#: access/transam/xlog.c:7843 +#: access/transam/xlog.c:7851 #, c-format msgid "consistent recovery state reached at %X/%X" msgstr "état de restauration cohérent atteint à %X/%X" -#: access/transam/xlog.c:8035 +#: access/transam/xlog.c:8043 #, c-format msgid "invalid primary checkpoint link in control file" msgstr "lien du point de vérification primaire invalide dans le fichier de contrôle" -#: access/transam/xlog.c:8039 +#: access/transam/xlog.c:8047 #, c-format msgid "invalid secondary checkpoint link in control file" msgstr "lien du point de vérification secondaire invalide dans le fichier de contrôle" -#: access/transam/xlog.c:8043 +#: access/transam/xlog.c:8051 #, c-format msgid "invalid checkpoint link in backup_label file" msgstr "lien du point de vérification invalide dans le fichier backup_label" -#: access/transam/xlog.c:8060 +#: access/transam/xlog.c:8068 #, c-format msgid "invalid primary checkpoint record" msgstr "enregistrement du point de vérification primaire invalide" -#: access/transam/xlog.c:8064 +#: access/transam/xlog.c:8072 #, c-format msgid "invalid secondary checkpoint record" msgstr "enregistrement du point de vérification secondaire invalide" -#: access/transam/xlog.c:8068 +#: access/transam/xlog.c:8076 #, c-format msgid "invalid checkpoint record" msgstr "enregistrement du point de vérification invalide" -#: access/transam/xlog.c:8079 +#: access/transam/xlog.c:8087 #, c-format msgid "invalid resource manager ID in primary checkpoint record" msgstr "identifiant du gestionnaire de ressource invalide dans l'enregistrement primaire du point de vérification" -#: access/transam/xlog.c:8083 +#: access/transam/xlog.c:8091 #, c-format msgid "invalid resource manager ID in secondary checkpoint record" msgstr "identifiant du gestionnaire de ressource invalide dans l'enregistrement secondaire du point de vérification" -#: access/transam/xlog.c:8087 +#: access/transam/xlog.c:8095 #, c-format msgid "invalid resource manager ID in checkpoint record" msgstr "identifiant du gestionnaire de ressource invalide dans l'enregistrement du point de vérification" -#: access/transam/xlog.c:8100 +#: access/transam/xlog.c:8108 #, c-format msgid "invalid xl_info in primary checkpoint record" msgstr "xl_info invalide dans l'enregistrement du point de vérification primaire" -#: access/transam/xlog.c:8104 +#: access/transam/xlog.c:8112 #, c-format msgid "invalid xl_info in secondary checkpoint record" msgstr "xl_info invalide dans l'enregistrement du point de vérification secondaire" -#: access/transam/xlog.c:8108 +#: access/transam/xlog.c:8116 #, c-format msgid "invalid xl_info in checkpoint record" msgstr "xl_info invalide dans l'enregistrement du point de vérification" -#: access/transam/xlog.c:8119 +#: access/transam/xlog.c:8127 #, c-format msgid "invalid length of primary checkpoint record" msgstr "longueur invalide de l'enregistrement primaire du point de vérification" -#: access/transam/xlog.c:8123 +#: access/transam/xlog.c:8131 #, c-format msgid "invalid length of secondary checkpoint record" msgstr "longueur invalide de l'enregistrement secondaire du point de vérification" -#: access/transam/xlog.c:8127 +#: access/transam/xlog.c:8135 #, c-format msgid "invalid length of checkpoint record" msgstr "longueur invalide de l'enregistrement du point de vérification" -#: access/transam/xlog.c:8330 +#: access/transam/xlog.c:8338 #, c-format msgid "shutting down" msgstr "arrêt en cours" -#: access/transam/xlog.c:8649 -#, c-format -msgid "checkpoint skipped due to an idle system" +#: access/transam/xlog.c:8657 +#, fuzzy, c-format +#| msgid "checkpoint skipped due to an idle system" +msgid "checkpoint skipped because system is idle" msgstr "checkpoint ignoré, le système étant en attente" -#: access/transam/xlog.c:8854 +#: access/transam/xlog.c:8862 #, fuzzy, c-format #| msgid "concurrent transaction log activity while database system is shutting down" msgid "concurrent write-ahead log activity while database system is shutting down" @@ -2320,117 +2343,117 @@ msgstr "" "activité en cours du journal de transactions alors que le système de bases\n" "de données est en cours d'arrêt" -#: access/transam/xlog.c:9108 +#: access/transam/xlog.c:9116 #, c-format msgid "skipping restartpoint, recovery has already ended" msgstr "restartpoint ignoré, la récupération est déjà terminée" -#: access/transam/xlog.c:9131 +#: access/transam/xlog.c:9139 #, c-format msgid "skipping restartpoint, already performed at %X/%X" msgstr "ignore le point de redémarrage, déjà réalisé à %X/%X" -#: access/transam/xlog.c:9307 +#: access/transam/xlog.c:9315 #, c-format msgid "recovery restart point at %X/%X" msgstr "la ré-exécution en restauration commence à %X/%X" -#: access/transam/xlog.c:9443 +#: access/transam/xlog.c:9451 #, c-format msgid "restore point \"%s\" created at %X/%X" msgstr "point de restauration « %s » créé à %X/%X" -#: access/transam/xlog.c:9573 +#: access/transam/xlog.c:9581 #, c-format msgid "unexpected previous timeline ID %u (current timeline ID %u) in checkpoint record" msgstr "identifiant de timeline précédent %u inattendu (identifiant de la timeline courante %u) dans l'enregistrement du point de vérification" -#: access/transam/xlog.c:9582 +#: access/transam/xlog.c:9590 #, c-format msgid "unexpected timeline ID %u (after %u) in checkpoint record" msgstr "" "identifiant timeline %u inattendu (après %u) dans l'enregistrement du point\n" "de vérification" -#: access/transam/xlog.c:9598 +#: access/transam/xlog.c:9606 #, c-format msgid "unexpected timeline ID %u in checkpoint record, before reaching minimum recovery point %X/%X on timeline %u" msgstr "identifiant timeline %u inattendu dans l'enregistrement du checkpoint, avant d'atteindre le point de restauration minimum %X/%X sur la timeline %u" -#: access/transam/xlog.c:9674 +#: access/transam/xlog.c:9682 #, c-format msgid "online backup was canceled, recovery cannot continue" msgstr "la sauvegarde en ligne a été annulée, la restauration ne peut pas continuer" -#: access/transam/xlog.c:9730 access/transam/xlog.c:9777 access/transam/xlog.c:9800 +#: access/transam/xlog.c:9738 access/transam/xlog.c:9785 access/transam/xlog.c:9808 #, c-format msgid "unexpected timeline ID %u (should be %u) in checkpoint record" msgstr "" "identifiant timeline %u inattendu (devrait être %u) dans l'enregistrement du\n" "point de vérification" -#: access/transam/xlog.c:10076 +#: access/transam/xlog.c:10084 #, c-format msgid "could not fsync log segment %s: %m" msgstr "n'a pas pu synchroniser sur disque (fsync) le segment du journal des transactions %s : %m" -#: access/transam/xlog.c:10101 +#: access/transam/xlog.c:10109 #, c-format msgid "could not fsync log file %s: %m" msgstr "n'a pas pu synchroniser sur disque (fsync) le fichier de transactions « %s » : %m" -#: access/transam/xlog.c:10109 +#: access/transam/xlog.c:10117 #, c-format msgid "could not fsync write-through log file %s: %m" msgstr "n'a pas pu synchroniser sur disque (fsync) le journal des transactions %s : %m" -#: access/transam/xlog.c:10118 +#: access/transam/xlog.c:10126 #, c-format msgid "could not fdatasync log file %s: %m" msgstr "n'a pas pu synchroniser sur disque (fdatasync) le journal de transactions %s : %m" -#: access/transam/xlog.c:10209 access/transam/xlog.c:10727 access/transam/xlogfuncs.c:297 access/transam/xlogfuncs.c:324 access/transam/xlogfuncs.c:363 access/transam/xlogfuncs.c:384 access/transam/xlogfuncs.c:405 +#: access/transam/xlog.c:10217 access/transam/xlog.c:10742 access/transam/xlogfuncs.c:297 access/transam/xlogfuncs.c:324 access/transam/xlogfuncs.c:363 access/transam/xlogfuncs.c:384 access/transam/xlogfuncs.c:405 #, c-format msgid "WAL control functions cannot be executed during recovery." msgstr "" "les fonctions de contrôle des journaux de transactions ne peuvent pas\n" "être exécutées lors de la restauration." -#: access/transam/xlog.c:10218 access/transam/xlog.c:10736 +#: access/transam/xlog.c:10226 access/transam/xlog.c:10751 #, c-format msgid "WAL level not sufficient for making an online backup" msgstr "" "Le niveau de journalisation (configuré par wal_level) n'est pas suffisant pour\n" "faire une sauvegarde en ligne." -#: access/transam/xlog.c:10219 access/transam/xlog.c:10737 access/transam/xlogfuncs.c:330 +#: access/transam/xlog.c:10227 access/transam/xlog.c:10752 access/transam/xlogfuncs.c:330 #, c-format msgid "wal_level must be set to \"replica\" or \"logical\" at server start." msgstr "" "wal_level doit être configuré à « replica » ou « logical »\n" "au démarrage du serveur." -#: access/transam/xlog.c:10224 +#: access/transam/xlog.c:10232 #, c-format msgid "backup label too long (max %d bytes)" msgstr "label de sauvegarde trop long (%d octets maximum)" -#: access/transam/xlog.c:10261 access/transam/xlog.c:10534 access/transam/xlog.c:10572 +#: access/transam/xlog.c:10269 access/transam/xlog.c:10542 access/transam/xlog.c:10580 #, c-format msgid "a backup is already in progress" msgstr "une sauvegarde est déjà en cours" -#: access/transam/xlog.c:10262 +#: access/transam/xlog.c:10270 #, c-format msgid "Run pg_stop_backup() and try again." msgstr "Exécutez pg_stop_backup() et tentez de nouveau." -#: access/transam/xlog.c:10357 +#: access/transam/xlog.c:10365 #, c-format msgid "WAL generated with full_page_writes=off was replayed since last restartpoint" msgstr "Les journaux générés avec full_page_writes=off ont été rejoués depuis le dernier restartpoint." -#: access/transam/xlog.c:10359 access/transam/xlog.c:10917 +#: access/transam/xlog.c:10367 access/transam/xlog.c:10947 #, c-format msgid "This means that the backup being taken on the standby is corrupt and should not be used. Enable full_page_writes and run CHECKPOINT on the master, and then try an online backup again." msgstr "" @@ -2438,86 +2461,86 @@ msgstr "" "corrompue et ne doit pas être utilisée. Activez full_page_writes et lancez\n" "CHECKPOINT sur le maître, puis recommencez la sauvegarde." -#: access/transam/xlog.c:10426 replication/basebackup.c:1096 utils/adt/misc.c:497 +#: access/transam/xlog.c:10434 replication/basebackup.c:1097 utils/adt/misc.c:497 #, c-format msgid "could not read symbolic link \"%s\": %m" msgstr "n'a pas pu lire le lien symbolique « %s » : %m" -#: access/transam/xlog.c:10433 replication/basebackup.c:1101 utils/adt/misc.c:502 +#: access/transam/xlog.c:10441 replication/basebackup.c:1102 utils/adt/misc.c:502 #, c-format msgid "symbolic link \"%s\" target is too long" msgstr "la cible du lien symbolique « %s » est trop long" -#: access/transam/xlog.c:10486 commands/tablespace.c:389 commands/tablespace.c:551 replication/basebackup.c:1116 utils/adt/misc.c:510 +#: access/transam/xlog.c:10494 commands/tablespace.c:389 commands/tablespace.c:551 replication/basebackup.c:1117 utils/adt/misc.c:510 #, c-format msgid "tablespaces are not supported on this platform" msgstr "les tablespaces ne sont pas supportés sur cette plateforme" -#: access/transam/xlog.c:10528 access/transam/xlog.c:10566 access/transam/xlog.c:10775 access/transam/xlogarchive.c:105 access/transam/xlogarchive.c:264 commands/copy.c:1897 commands/copy.c:3127 commands/extension.c:3319 commands/tablespace.c:780 commands/tablespace.c:871 guc-file.l:1001 replication/basebackup.c:480 replication/basebackup.c:548 replication/logical/snapbuild.c:1518 storage/file/copydir.c:72 storage/file/copydir.c:115 -#: storage/file/fd.c:2954 storage/file/fd.c:3046 utils/adt/dbsize.c:70 utils/adt/dbsize.c:227 utils/adt/dbsize.c:307 utils/adt/genfile.c:115 utils/adt/genfile.c:334 +#: access/transam/xlog.c:10536 access/transam/xlog.c:10574 access/transam/xlog.c:10790 access/transam/xlogarchive.c:105 access/transam/xlogarchive.c:264 commands/copy.c:1853 commands/copy.c:3155 commands/extension.c:3319 commands/tablespace.c:780 commands/tablespace.c:871 guc-file.l:1002 replication/basebackup.c:481 replication/basebackup.c:549 replication/logical/snapbuild.c:1518 storage/file/copydir.c:72 storage/file/copydir.c:115 +#: storage/file/fd.c:2976 storage/file/fd.c:3068 utils/adt/dbsize.c:70 utils/adt/dbsize.c:227 utils/adt/dbsize.c:307 utils/adt/genfile.c:115 utils/adt/genfile.c:334 #, c-format msgid "could not stat file \"%s\": %m" msgstr "n'a pas pu tester le fichier « %s » : %m" -#: access/transam/xlog.c:10535 access/transam/xlog.c:10573 +#: access/transam/xlog.c:10543 access/transam/xlog.c:10581 #, c-format msgid "If you're sure there is no backup in progress, remove file \"%s\" and try again." msgstr "" "Si vous êtes certain qu'aucune sauvegarde n'est en cours, supprimez le\n" "fichier « %s » et recommencez de nouveau." -#: access/transam/xlog.c:10552 access/transam/xlog.c:10590 access/transam/xlog.c:10978 postmaster/syslogger.c:1391 postmaster/syslogger.c:1404 +#: access/transam/xlog.c:10560 access/transam/xlog.c:10598 access/transam/xlog.c:11005 postmaster/syslogger.c:1391 postmaster/syslogger.c:1404 #, c-format msgid "could not write file \"%s\": %m" msgstr "impossible d'écrire le fichier « %s » : %m" -#: access/transam/xlog.c:10752 +#: access/transam/xlog.c:10767 #, c-format msgid "exclusive backup not in progress" msgstr "une sauvegarde exclusive n'est pas en cours" -#: access/transam/xlog.c:10779 +#: access/transam/xlog.c:10794 #, c-format msgid "a backup is not in progress" msgstr "une sauvegarde n'est pas en cours" -#: access/transam/xlog.c:10852 access/transam/xlog.c:10865 access/transam/xlog.c:11206 access/transam/xlog.c:11212 access/transam/xlog.c:11296 access/transam/xlogfuncs.c:698 +#: access/transam/xlog.c:10880 access/transam/xlog.c:10893 access/transam/xlog.c:11246 access/transam/xlog.c:11252 access/transam/xlog.c:11336 access/transam/xlogfuncs.c:698 #, c-format msgid "invalid data in file \"%s\"" msgstr "données invalides dans le fichier « %s »" -#: access/transam/xlog.c:10869 replication/basebackup.c:994 +#: access/transam/xlog.c:10897 replication/basebackup.c:995 #, c-format msgid "the standby was promoted during online backup" msgstr "le standby a été promu lors de la sauvegarde en ligne" -#: access/transam/xlog.c:10870 replication/basebackup.c:995 +#: access/transam/xlog.c:10898 replication/basebackup.c:996 #, c-format msgid "This means that the backup being taken is corrupt and should not be used. Try taking another online backup." msgstr "" "Cela signifie que la sauvegarde en cours de réalisation est corrompue et ne\n" "doit pas être utilisée. Recommencez la sauvegarde." -#: access/transam/xlog.c:10915 +#: access/transam/xlog.c:10945 #, c-format msgid "WAL generated with full_page_writes=off was replayed during online backup" msgstr "" "le journal de transactions généré avec full_page_writes=off a été rejoué lors\n" "de la sauvegarde en ligne" -#: access/transam/xlog.c:11028 +#: access/transam/xlog.c:11060 #, c-format msgid "pg_stop_backup cleanup done, waiting for required WAL segments to be archived" msgstr "nettoyage de pg_stop_backup terminé, en attente des journaux de transactions requis à archiver" -#: access/transam/xlog.c:11038 +#: access/transam/xlog.c:11070 #, c-format msgid "pg_stop_backup still waiting for all required WAL segments to be archived (%d seconds elapsed)" msgstr "" "pg_stop_backup toujours en attente de la fin de l'archivage des segments de\n" "journaux de transactions requis (%d secondes passées)" -#: access/transam/xlog.c:11040 +#: access/transam/xlog.c:11072 #, c-format msgid "Check that your archive_command is executing properly. pg_stop_backup can be canceled safely, but the database backup will not be usable without all the WAL segments." msgstr "" @@ -2525,12 +2548,12 @@ msgstr "" "peut être annulé avec sûreté mais la sauvegarde de la base ne sera pas\n" "utilisable sans tous les segments WAL." -#: access/transam/xlog.c:11047 +#: access/transam/xlog.c:11079 #, c-format msgid "pg_stop_backup complete, all required WAL segments have been archived" msgstr "pg_stop_backup terminé, tous les journaux de transactions requis ont été archivés" -#: access/transam/xlog.c:11051 +#: access/transam/xlog.c:11083 #, c-format msgid "WAL archiving is not enabled; you must ensure that all required WAL segments are copied through other means to complete the backup" msgstr "" @@ -2539,57 +2562,57 @@ msgstr "" "transactions sont copiés par d'autre moyens pour terminer la sauvegarde." #. translator: %s is a WAL record description -#: access/transam/xlog.c:11336 +#: access/transam/xlog.c:11376 #, c-format msgid "WAL redo at %X/%X for %s" msgstr "rejeu des WAL à %X/%X pour %s" -#: access/transam/xlog.c:11385 +#: access/transam/xlog.c:11425 #, c-format msgid "online backup mode was not canceled" msgstr "le mode de sauvegarde en ligne n'a pas été annulé" -#: access/transam/xlog.c:11386 +#: access/transam/xlog.c:11426 #, c-format msgid "File \"%s\" could not be renamed to \"%s\": %m." msgstr "Le fichier « %s » n'a pas pu être renommé en « %s » : %m" -#: access/transam/xlog.c:11395 access/transam/xlog.c:11407 access/transam/xlog.c:11417 +#: access/transam/xlog.c:11435 access/transam/xlog.c:11447 access/transam/xlog.c:11457 #, c-format msgid "online backup mode canceled" msgstr "mode de sauvegarde en ligne annulé" -#: access/transam/xlog.c:11408 +#: access/transam/xlog.c:11448 #, c-format msgid "Files \"%s\" and \"%s\" were renamed to \"%s\" and \"%s\", respectively." msgstr "Les fichiers « %s » et « %s » sont renommés respectivement « %s » et « %s »." -#: access/transam/xlog.c:11418 +#: access/transam/xlog.c:11458 #, c-format msgid "File \"%s\" was renamed to \"%s\", but file \"%s\" could not be renamed to \"%s\": %m." msgstr "Le fichier « %s » a été renommé en « %s », mais le fichier « %s » n'a pas pu être renommé en « %s » : %m" -#: access/transam/xlog.c:11540 access/transam/xlogutils.c:724 replication/walreceiver.c:1005 replication/walsender.c:2397 +#: access/transam/xlog.c:11580 access/transam/xlogutils.c:724 replication/walreceiver.c:1011 replication/walsender.c:2412 #, c-format msgid "could not seek in log segment %s to offset %u: %m" msgstr "n'a pas pu se déplacer dans le journal de transactions %s au décalage %u : %m" -#: access/transam/xlog.c:11554 +#: access/transam/xlog.c:11594 #, c-format msgid "could not read from log segment %s, offset %u: %m" msgstr "n'a pas pu lire le journal de transactions %s, décalage %u : %m" -#: access/transam/xlog.c:12043 +#: access/transam/xlog.c:12083 #, c-format msgid "received promote request" msgstr "a reçu une demande de promotion" -#: access/transam/xlog.c:12056 +#: access/transam/xlog.c:12096 #, c-format msgid "trigger file found: %s" msgstr "fichier trigger trouvé : %s" -#: access/transam/xlog.c:12065 +#: access/transam/xlog.c:12105 #, c-format msgid "could not stat trigger file \"%s\": %m" msgstr "n'a pas pu tester le fichier trigger « %s » : %m" @@ -2617,7 +2640,7 @@ msgstr "n'a pas pu restaurer le fichier « %s » à partir de l'archive : %s" msgid "%s \"%s\": %s" msgstr "%s « %s »: %s" -#: access/transam/xlogarchive.c:457 postmaster/syslogger.c:1415 replication/logical/snapbuild.c:1645 replication/slot.c:589 replication/slot.c:1189 replication/slot.c:1303 storage/file/fd.c:642 storage/file/fd.c:737 utils/time/snapmgr.c:1318 +#: access/transam/xlogarchive.c:457 postmaster/syslogger.c:1415 replication/logical/snapbuild.c:1645 replication/slot.c:590 replication/slot.c:1190 replication/slot.c:1304 storage/file/fd.c:641 storage/file/fd.c:736 utils/time/snapmgr.c:1318 #, c-format msgid "could not rename file \"%s\" to \"%s\": %m" msgstr "n'a pas pu renommer le fichier « %s » en « %s » : %m" @@ -2642,7 +2665,7 @@ msgstr "annulation de la sauvegarde due à la déconnexion du processus serveur msgid "a backup is already in progress in this session" msgstr "une sauvegarde est déjà en cours dans cette session" -#: access/transam/xlogfuncs.c:92 commands/tablespace.c:703 commands/tablespace.c:713 postmaster/postmaster.c:1458 replication/basebackup.c:368 replication/basebackup.c:708 storage/file/copydir.c:53 storage/file/copydir.c:96 storage/file/fd.c:2420 storage/file/fd.c:3019 storage/ipc/dsm.c:301 utils/adt/genfile.c:440 utils/adt/misc.c:410 utils/misc/tzparser.c:339 +#: access/transam/xlogfuncs.c:92 commands/tablespace.c:703 commands/tablespace.c:713 postmaster/postmaster.c:1460 replication/basebackup.c:369 replication/basebackup.c:709 storage/file/copydir.c:53 storage/file/copydir.c:96 storage/file/fd.c:2434 storage/file/fd.c:3041 storage/ipc/dsm.c:301 utils/adt/genfile.c:440 utils/adt/misc.c:410 utils/misc/tzparser.c:339 #, c-format msgid "could not open directory \"%s\": %m" msgstr "n'a pas pu ouvrir le répertoire « %s » : %m" @@ -2657,16 +2680,16 @@ msgstr "une sauvegarde non exclusive est en cours" msgid "Did you mean to use pg_stop_backup('f')?" msgstr "Souhaitiez-vous utiliser pg_stop_backup('f') ?" -#: access/transam/xlogfuncs.c:205 commands/event_trigger.c:1471 commands/event_trigger.c:2022 commands/extension.c:1895 commands/extension.c:2004 commands/extension.c:2228 commands/prepare.c:721 executor/execExpr.c:2121 executor/execSRF.c:688 executor/functions.c:1029 foreign/foreign.c:488 libpq/hba.c:2563 replication/logical/launcher.c:936 replication/logical/logicalfuncs.c:176 replication/logical/origin.c:1387 replication/slotfuncs.c:197 -#: replication/walsender.c:3166 utils/adt/jsonfuncs.c:1689 utils/adt/jsonfuncs.c:1819 utils/adt/jsonfuncs.c:2007 utils/adt/jsonfuncs.c:2134 utils/adt/jsonfuncs.c:3489 utils/adt/pgstatfuncs.c:456 utils/adt/pgstatfuncs.c:557 utils/fmgr/funcapi.c:62 utils/misc/guc.c:8549 utils/mmgr/portalmem.c:1053 +#: access/transam/xlogfuncs.c:205 commands/event_trigger.c:1471 commands/event_trigger.c:2022 commands/extension.c:1895 commands/extension.c:2004 commands/extension.c:2228 commands/prepare.c:721 executor/execExpr.c:2121 executor/execSRF.c:690 executor/functions.c:1029 foreign/foreign.c:488 libpq/hba.c:2565 replication/logical/launcher.c:1026 replication/logical/logicalfuncs.c:176 replication/logical/origin.c:1443 replication/slotfuncs.c:197 +#: replication/walsender.c:3181 utils/adt/jsonfuncs.c:1689 utils/adt/jsonfuncs.c:1819 utils/adt/jsonfuncs.c:2007 utils/adt/jsonfuncs.c:2134 utils/adt/jsonfuncs.c:3489 utils/adt/pgstatfuncs.c:456 utils/adt/pgstatfuncs.c:557 utils/fmgr/funcapi.c:62 utils/misc/guc.c:8549 utils/mmgr/portalmem.c:1067 #, c-format msgid "set-valued function called in context that cannot accept a set" msgstr "" "la fonction avec set-value a été appelé dans un contexte qui n'accepte pas\n" "un ensemble" -#: access/transam/xlogfuncs.c:209 commands/event_trigger.c:1475 commands/event_trigger.c:2026 commands/extension.c:1899 commands/extension.c:2008 commands/extension.c:2232 commands/prepare.c:725 foreign/foreign.c:493 libpq/hba.c:2567 replication/logical/launcher.c:940 replication/logical/logicalfuncs.c:180 replication/logical/origin.c:1391 replication/slotfuncs.c:201 replication/walsender.c:3170 utils/adt/pgstatfuncs.c:460 -#: utils/adt/pgstatfuncs.c:561 utils/misc/guc.c:8553 utils/misc/pg_config.c:44 utils/mmgr/portalmem.c:1057 +#: access/transam/xlogfuncs.c:209 commands/event_trigger.c:1475 commands/event_trigger.c:2026 commands/extension.c:1899 commands/extension.c:2008 commands/extension.c:2232 commands/prepare.c:725 foreign/foreign.c:493 libpq/hba.c:2569 replication/logical/launcher.c:1030 replication/logical/logicalfuncs.c:180 replication/logical/origin.c:1447 replication/slotfuncs.c:201 replication/walsender.c:3185 utils/adt/pgstatfuncs.c:460 +#: utils/adt/pgstatfuncs.c:561 utils/misc/guc.c:8553 utils/misc/pg_config.c:44 utils/mmgr/portalmem.c:1071 #, c-format msgid "materialize mode required, but it is not allowed in this context" msgstr "mode matérialisé requis mais interdit dans ce contexte" @@ -2842,32 +2865,32 @@ msgstr "BKPBLOCK_SAME_REL configuré, mais pas de relation précédente à %X/%X msgid "invalid block_id %u at %X/%X" msgstr "block_id %u invalide à %X/%X" -#: access/transam/xlogreader.c:1291 +#: access/transam/xlogreader.c:1306 #, c-format msgid "record with invalid length at %X/%X" msgstr "enregistrement de longueur invalide à %X/%X" -#: access/transam/xlogreader.c:1380 +#: access/transam/xlogreader.c:1395 #, c-format msgid "invalid compressed image at %X/%X, block %d" msgstr "image compressée invalide à %X/%X, bloc %d" -#: access/transam/xlogutils.c:747 replication/walsender.c:2416 +#: access/transam/xlogutils.c:747 replication/walsender.c:2431 #, c-format msgid "could not read from log segment %s, offset %u, length %lu: %m" msgstr "n'a pas pu lire le journal de transactions %s, décalage %u, longueur %lu : %m" -#: bootstrap/bootstrap.c:272 postmaster/postmaster.c:819 tcop/postgres.c:3510 +#: bootstrap/bootstrap.c:272 postmaster/postmaster.c:821 tcop/postgres.c:3508 #, c-format msgid "--%s requires a value" msgstr "--%s requiert une valeur" -#: bootstrap/bootstrap.c:277 postmaster/postmaster.c:824 tcop/postgres.c:3515 +#: bootstrap/bootstrap.c:277 postmaster/postmaster.c:826 tcop/postgres.c:3513 #, c-format msgid "-c %s requires a value" msgstr "-c %s requiert une valeur" -#: bootstrap/bootstrap.c:288 postmaster/postmaster.c:836 postmaster/postmaster.c:849 +#: bootstrap/bootstrap.c:288 postmaster/postmaster.c:838 postmaster/postmaster.c:851 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Essayez « %s --help » pour plus d'informations.\n" @@ -2995,9 +3018,9 @@ msgstr "le « Large Object » %u n'existe pas" #: catalog/aclchk.c:885 catalog/aclchk.c:894 commands/collationcmds.c:114 commands/copy.c:1042 commands/copy.c:1062 commands/copy.c:1071 commands/copy.c:1080 commands/copy.c:1089 commands/copy.c:1098 commands/copy.c:1107 commands/copy.c:1116 commands/copy.c:1125 commands/copy.c:1143 commands/copy.c:1159 commands/copy.c:1179 commands/copy.c:1196 commands/dbcommands.c:155 commands/dbcommands.c:164 commands/dbcommands.c:173 #: commands/dbcommands.c:182 commands/dbcommands.c:191 commands/dbcommands.c:200 commands/dbcommands.c:209 commands/dbcommands.c:218 commands/dbcommands.c:227 commands/dbcommands.c:1427 commands/dbcommands.c:1436 commands/dbcommands.c:1445 commands/dbcommands.c:1454 commands/extension.c:1678 commands/extension.c:1688 commands/extension.c:1698 commands/extension.c:1708 commands/extension.c:2949 commands/foreigncmds.c:537 #: commands/foreigncmds.c:546 commands/functioncmds.c:526 commands/functioncmds.c:643 commands/functioncmds.c:652 commands/functioncmds.c:661 commands/functioncmds.c:670 commands/functioncmds.c:2097 commands/functioncmds.c:2105 commands/publicationcmds.c:90 commands/sequence.c:1265 commands/sequence.c:1275 commands/sequence.c:1285 commands/sequence.c:1295 commands/sequence.c:1305 commands/sequence.c:1315 commands/sequence.c:1325 -#: commands/sequence.c:1335 commands/sequence.c:1345 commands/subscriptioncmds.c:110 commands/subscriptioncmds.c:120 commands/subscriptioncmds.c:130 commands/subscriptioncmds.c:140 commands/subscriptioncmds.c:154 commands/subscriptioncmds.c:165 commands/subscriptioncmds.c:179 commands/tablecmds.c:5960 commands/typecmds.c:298 commands/typecmds.c:1375 commands/typecmds.c:1384 commands/typecmds.c:1392 commands/typecmds.c:1400 -#: commands/typecmds.c:1408 commands/user.c:134 commands/user.c:148 commands/user.c:157 commands/user.c:166 commands/user.c:175 commands/user.c:184 commands/user.c:193 commands/user.c:202 commands/user.c:211 commands/user.c:220 commands/user.c:229 commands/user.c:238 commands/user.c:247 commands/user.c:532 commands/user.c:540 commands/user.c:548 commands/user.c:556 commands/user.c:564 commands/user.c:572 commands/user.c:580 -#: commands/user.c:588 commands/user.c:597 commands/user.c:605 commands/user.c:613 parser/parse_utilcmd.c:395 replication/pgoutput/pgoutput.c:107 replication/pgoutput/pgoutput.c:128 replication/walsender.c:800 replication/walsender.c:811 replication/walsender.c:821 +#: commands/sequence.c:1335 commands/sequence.c:1345 commands/subscriptioncmds.c:110 commands/subscriptioncmds.c:120 commands/subscriptioncmds.c:130 commands/subscriptioncmds.c:140 commands/subscriptioncmds.c:154 commands/subscriptioncmds.c:165 commands/subscriptioncmds.c:179 commands/tablecmds.c:5987 commands/typecmds.c:298 commands/typecmds.c:1396 commands/typecmds.c:1405 commands/typecmds.c:1413 commands/typecmds.c:1421 +#: commands/typecmds.c:1429 commands/user.c:134 commands/user.c:148 commands/user.c:157 commands/user.c:166 commands/user.c:175 commands/user.c:184 commands/user.c:193 commands/user.c:202 commands/user.c:211 commands/user.c:220 commands/user.c:229 commands/user.c:238 commands/user.c:247 commands/user.c:555 commands/user.c:563 commands/user.c:571 commands/user.c:579 commands/user.c:587 commands/user.c:595 commands/user.c:603 +#: commands/user.c:611 commands/user.c:620 commands/user.c:628 commands/user.c:636 parser/parse_utilcmd.c:399 replication/pgoutput/pgoutput.c:108 replication/pgoutput/pgoutput.c:129 replication/walsender.c:801 replication/walsender.c:812 replication/walsender.c:822 #, c-format msgid "conflicting or redundant options" msgstr "options en conflit ou redondantes" @@ -3012,13 +3035,13 @@ msgstr "les droits par défaut ne peuvent pas être configurés pour les colonne msgid "cannot use IN SCHEMA clause when using GRANT/REVOKE ON SCHEMAS" msgstr "ne peut pas utiliser la clause IN SCHEMA lors de l'utilisation de GRANT/REVOKE ON SCHEMAS" -#: catalog/aclchk.c:1521 catalog/objectaddress.c:1389 commands/analyze.c:390 commands/copy.c:4746 commands/sequence.c:1700 commands/tablecmds.c:5608 commands/tablecmds.c:5755 commands/tablecmds.c:5812 commands/tablecmds.c:5885 commands/tablecmds.c:5979 commands/tablecmds.c:6038 commands/tablecmds.c:6163 commands/tablecmds.c:6217 commands/tablecmds.c:6309 commands/tablecmds.c:6465 commands/tablecmds.c:8694 commands/tablecmds.c:8970 -#: commands/tablecmds.c:9405 commands/trigger.c:791 parser/analyze.c:2310 parser/parse_relation.c:2699 parser/parse_relation.c:2761 parser/parse_target.c:1002 parser/parse_type.c:127 utils/adt/acl.c:2823 utils/adt/ruleutils.c:2356 +#: catalog/aclchk.c:1521 catalog/objectaddress.c:1389 commands/analyze.c:399 commands/copy.c:4774 commands/sequence.c:1700 commands/tablecmds.c:5635 commands/tablecmds.c:5782 commands/tablecmds.c:5839 commands/tablecmds.c:5912 commands/tablecmds.c:6006 commands/tablecmds.c:6065 commands/tablecmds.c:6190 commands/tablecmds.c:6244 commands/tablecmds.c:6336 commands/tablecmds.c:6492 commands/tablecmds.c:8721 commands/tablecmds.c:8997 +#: commands/tablecmds.c:9432 commands/trigger.c:817 parser/analyze.c:2310 parser/parse_relation.c:2733 parser/parse_relation.c:2795 parser/parse_target.c:1002 parser/parse_type.c:127 utils/adt/acl.c:2828 utils/adt/ruleutils.c:2356 #, c-format msgid "column \"%s\" of relation \"%s\" does not exist" msgstr "la colonne « %s » de la relation « %s » n'existe pas" -#: catalog/aclchk.c:1787 catalog/objectaddress.c:1229 commands/sequence.c:1138 commands/tablecmds.c:229 commands/tablecmds.c:13080 utils/adt/acl.c:2059 utils/adt/acl.c:2089 utils/adt/acl.c:2121 utils/adt/acl.c:2153 utils/adt/acl.c:2181 utils/adt/acl.c:2211 +#: catalog/aclchk.c:1787 catalog/objectaddress.c:1229 commands/sequence.c:1138 commands/tablecmds.c:229 commands/tablecmds.c:13107 utils/adt/acl.c:2061 utils/adt/acl.c:2091 utils/adt/acl.c:2123 utils/adt/acl.c:2155 utils/adt/acl.c:2183 utils/adt/acl.c:2213 #, c-format msgid "\"%s\" is not a sequence" msgstr "« %s » n'est pas une séquence" @@ -3358,7 +3381,7 @@ msgstr "le wrapper de données distantes d'OID %u n'existe pas" msgid "foreign server with OID %u does not exist" msgstr "le serveur distant d'OID %u n'existe pas" -#: catalog/aclchk.c:4246 catalog/aclchk.c:4585 utils/cache/typcache.c:238 +#: catalog/aclchk.c:4246 catalog/aclchk.c:4585 utils/cache/typcache.c:240 #, c-format msgid "type with OID %u does not exist" msgstr "le type d'OID %u n'existe pas" @@ -3413,7 +3436,7 @@ msgstr "l'extension d'OID %u n'existe pas" msgid "publication with OID %u does not exist" msgstr "la publication d'OID %u n'existe pas" -#: catalog/aclchk.c:5123 commands/subscriptioncmds.c:1075 +#: catalog/aclchk.c:5123 commands/subscriptioncmds.c:1098 #, c-format msgid "subscription with OID %u does not exist" msgstr "la souscription d'OID %u n'existe pas" @@ -3506,12 +3529,12 @@ msgstr "droit refusé pour créer « %s.%s »" msgid "System catalog modifications are currently disallowed." msgstr "Les modifications du catalogue système sont actuellement interdites." -#: catalog/heap.c:421 commands/tablecmds.c:1649 commands/tablecmds.c:2159 commands/tablecmds.c:5212 +#: catalog/heap.c:421 commands/tablecmds.c:1649 commands/tablecmds.c:2159 commands/tablecmds.c:5225 #, c-format msgid "tables can have at most %d columns" msgstr "les tables peuvent avoir au plus %d colonnes" -#: catalog/heap.c:438 commands/tablecmds.c:5471 +#: catalog/heap.c:438 commands/tablecmds.c:5498 #, c-format msgid "column name \"%s\" conflicts with a system column name" msgstr "le nom de la colonne « %s » entre en conflit avec le nom d'une colonne système" @@ -3536,17 +3559,17 @@ msgstr "le type composite %s ne peut pas être membre de lui-même" msgid "no collation was derived for column \"%s\" with collatable type %s" msgstr "aucun collationnement n'a été dérivé pour la colonne « %s » de type collationnable %s" -#: catalog/heap.c:581 commands/createas.c:204 commands/createas.c:501 commands/indexcmds.c:1149 commands/tablecmds.c:13376 commands/view.c:103 regex/regc_pg_locale.c:263 utils/adt/formatting.c:1547 utils/adt/formatting.c:1671 utils/adt/formatting.c:1796 utils/adt/like.c:184 utils/adt/selfuncs.c:5563 utils/adt/varlena.c:1417 utils/adt/varlena.c:1866 +#: catalog/heap.c:581 commands/createas.c:204 commands/createas.c:501 commands/indexcmds.c:1152 commands/tablecmds.c:13403 commands/view.c:103 regex/regc_pg_locale.c:263 utils/adt/formatting.c:1546 utils/adt/formatting.c:1670 utils/adt/formatting.c:1795 utils/adt/like.c:184 utils/adt/selfuncs.c:5590 utils/adt/varlena.c:1417 utils/adt/varlena.c:1854 #, c-format msgid "Use the COLLATE clause to set the collation explicitly." msgstr "Utilisez la clause COLLARE pour configurer explicitement le collationnement." -#: catalog/heap.c:1067 catalog/index.c:807 commands/tablecmds.c:2943 +#: catalog/heap.c:1067 catalog/index.c:806 commands/tablecmds.c:2943 #, c-format msgid "relation \"%s\" already exists" msgstr "la relation « %s » existe déjà" -#: catalog/heap.c:1083 catalog/pg_type.c:410 catalog/pg_type.c:732 commands/typecmds.c:239 commands/typecmds.c:788 commands/typecmds.c:1139 commands/typecmds.c:1350 commands/typecmds.c:2106 +#: catalog/heap.c:1083 catalog/pg_type.c:410 catalog/pg_type.c:732 commands/typecmds.c:239 commands/typecmds.c:788 commands/typecmds.c:1139 commands/typecmds.c:1371 commands/typecmds.c:2127 #, c-format msgid "type \"%s\" already exists" msgstr "le type « %s » existe déjà" @@ -3563,146 +3586,146 @@ msgstr "" msgid "pg_class heap OID value not set when in binary upgrade mode" msgstr "OID du heap de pg_class non configuré en mode de mise à jour binaire" -#: catalog/heap.c:2078 +#: catalog/heap.c:2080 #, c-format msgid "cannot add NO INHERIT constraint to partitioned table \"%s\"" msgstr "ne peut pas ajouter une contrainte NO INHERIT pour la table partitionnée « %s »" -#: catalog/heap.c:2336 +#: catalog/heap.c:2338 #, c-format msgid "check constraint \"%s\" already exists" msgstr "la contrainte de vérification « %s » existe déjà" -#: catalog/heap.c:2504 catalog/pg_constraint.c:649 commands/tablecmds.c:6825 +#: catalog/heap.c:2506 catalog/pg_constraint.c:649 commands/tablecmds.c:6852 #, c-format msgid "constraint \"%s\" for relation \"%s\" already exists" msgstr "la contrainte « %s » de la relation « %s » existe déjà" -#: catalog/heap.c:2511 +#: catalog/heap.c:2513 #, c-format msgid "constraint \"%s\" conflicts with non-inherited constraint on relation \"%s\"" msgstr "la contrainte « %s » entre en conflit avec la constrainte non héritée sur la relation « %s »" -#: catalog/heap.c:2522 +#: catalog/heap.c:2524 #, c-format msgid "constraint \"%s\" conflicts with inherited constraint on relation \"%s\"" msgstr "la contrainte « %s » entre en conflit avec une contrainte héritée sur la relation « %s »" -#: catalog/heap.c:2532 +#: catalog/heap.c:2534 #, c-format msgid "constraint \"%s\" conflicts with NOT VALID constraint on relation \"%s\"" msgstr "la contrainte « %s » entre en conflit avec une contrainte NOT VALID sur la relation « %s »" -#: catalog/heap.c:2537 +#: catalog/heap.c:2539 #, c-format msgid "merging constraint \"%s\" with inherited definition" msgstr "assemblage de la contrainte « %s » avec une définition héritée" -#: catalog/heap.c:2653 +#: catalog/heap.c:2655 #, c-format msgid "cannot use column references in default expression" msgstr "ne peut pas utiliser les références de colonnes dans l'expression par défaut" -#: catalog/heap.c:2678 rewrite/rewriteHandler.c:1171 +#: catalog/heap.c:2680 rewrite/rewriteHandler.c:1176 #, c-format msgid "column \"%s\" is of type %s but default expression is of type %s" msgstr "la colonne « %s » est de type %s alors que l'expression par défaut est de type %s" -#: catalog/heap.c:2683 commands/prepare.c:384 parser/parse_node.c:430 parser/parse_target.c:590 parser/parse_target.c:840 parser/parse_target.c:850 rewrite/rewriteHandler.c:1176 +#: catalog/heap.c:2685 commands/prepare.c:384 parser/parse_node.c:430 parser/parse_target.c:590 parser/parse_target.c:840 parser/parse_target.c:850 rewrite/rewriteHandler.c:1181 #, c-format msgid "You will need to rewrite or cast the expression." msgstr "Vous devez réécrire l'expression ou lui appliquer une transformation de type." -#: catalog/heap.c:2730 +#: catalog/heap.c:2732 #, c-format msgid "only table \"%s\" can be referenced in check constraint" msgstr "seule la table « %s » peut être référencée dans la contrainte de vérification" -#: catalog/heap.c:2970 +#: catalog/heap.c:2972 #, c-format msgid "unsupported ON COMMIT and foreign key combination" msgstr "combinaison ON COMMIT et clé étrangère non supportée" -#: catalog/heap.c:2971 +#: catalog/heap.c:2973 #, c-format msgid "Table \"%s\" references \"%s\", but they do not have the same ON COMMIT setting." msgstr "" "La table « %s » référence « %s » mais elles n'ont pas la même valeur pour le\n" "paramètre ON COMMIT." -#: catalog/heap.c:2976 +#: catalog/heap.c:2978 #, c-format msgid "cannot truncate a table referenced in a foreign key constraint" msgstr "ne peut pas tronquer une table référencée dans une contrainte de clé étrangère" -#: catalog/heap.c:2977 +#: catalog/heap.c:2979 #, c-format msgid "Table \"%s\" references \"%s\"." msgstr "La table « %s » référence « %s »." -#: catalog/heap.c:2979 +#: catalog/heap.c:2981 #, c-format msgid "Truncate table \"%s\" at the same time, or use TRUNCATE ... CASCADE." msgstr "Tronquez la table « %s » en même temps, ou utilisez TRUNCATE ... CASCADE." -#: catalog/index.c:210 parser/parse_utilcmd.c:1671 parser/parse_utilcmd.c:1757 +#: catalog/index.c:213 parser/parse_utilcmd.c:1692 parser/parse_utilcmd.c:1778 #, c-format msgid "multiple primary keys for table \"%s\" are not allowed" msgstr "les clés primaires multiples ne sont pas autorisées pour la table « %s »" -#: catalog/index.c:228 +#: catalog/index.c:231 #, c-format msgid "primary keys cannot be expressions" msgstr "les clés primaires ne peuvent pas être des expressions" -#: catalog/index.c:757 catalog/index.c:1175 +#: catalog/index.c:756 catalog/index.c:1174 #, c-format msgid "user-defined indexes on system catalog tables are not supported" msgstr "les index définis par l'utilisateur sur les tables du catalogue système ne sont pas supportés" -#: catalog/index.c:767 +#: catalog/index.c:766 #, c-format msgid "concurrent index creation on system catalog tables is not supported" msgstr "" "la création en parallèle d'un index sur les tables du catalogue système\n" "n'est pas supportée" -#: catalog/index.c:785 +#: catalog/index.c:784 #, c-format msgid "shared indexes cannot be created after initdb" msgstr "les index partagés ne peuvent pas être créés après initdb" -#: catalog/index.c:799 commands/createas.c:250 commands/sequence.c:152 parser/parse_utilcmd.c:201 +#: catalog/index.c:798 commands/createas.c:250 commands/sequence.c:152 parser/parse_utilcmd.c:203 #, c-format msgid "relation \"%s\" already exists, skipping" msgstr "la relation « %s » existe déjà, poursuite du traitement" -#: catalog/index.c:835 +#: catalog/index.c:834 #, c-format msgid "pg_class index OID value not set when in binary upgrade mode" msgstr "OID de l'index de pg_class non configuré en mode de mise à jour binaire" -#: catalog/index.c:1436 +#: catalog/index.c:1435 #, c-format msgid "DROP INDEX CONCURRENTLY must be first action in transaction" msgstr "DROP INDEX CONCURRENTLY doit être la première action dans une transaction" -#: catalog/index.c:2024 +#: catalog/index.c:2023 #, c-format msgid "building index \"%s\" on table \"%s\"" msgstr "construction de l'index « %s » sur la table « %s »" -#: catalog/index.c:3336 +#: catalog/index.c:3335 #, c-format msgid "cannot reindex temporary tables of other sessions" msgstr "ne peut pas ré-indexer les tables temporaires des autres sessions" -#: catalog/index.c:3467 +#: catalog/index.c:3466 #, c-format msgid "index \"%s\" was reindexed" msgstr "l'index « %s » a été réindexée" -#: catalog/namespace.c:235 catalog/namespace.c:433 catalog/namespace.c:527 commands/trigger.c:4931 +#: catalog/namespace.c:235 catalog/namespace.c:433 catalog/namespace.c:527 commands/trigger.c:5148 #, c-format msgid "cross-database references are not implemented: \"%s.%s.%s\"" msgstr "les références entre bases de données ne sont pas implémentées : « %s.%s.%s »" @@ -3727,7 +3750,7 @@ msgstr "n'a pas pu obtenir un verrou sur la relation « %s »" msgid "relation \"%s.%s\" does not exist" msgstr "la relation « %s.%s » n'existe pas" -#: catalog/namespace.c:405 parser/parse_relation.c:1177 parser/parse_relation.c:1185 +#: catalog/namespace.c:405 parser/parse_relation.c:1171 parser/parse_relation.c:1179 #, c-format msgid "relation \"%s\" does not exist" msgstr "la relation « %s » n'existe pas" @@ -3832,27 +3855,27 @@ msgstr "ne peut pas créer des tables temporaires lors de la restauration" msgid "cannot create temporary tables during a parallel operation" msgstr "ne peut pas créer de tables temporaires pendant une opération parallèle" -#: catalog/namespace.c:4072 commands/tablespace.c:1169 commands/variable.c:64 utils/misc/guc.c:9990 utils/misc/guc.c:10068 +#: catalog/namespace.c:4072 commands/tablespace.c:1169 commands/variable.c:64 utils/misc/guc.c:9983 utils/misc/guc.c:10061 #, c-format msgid "List syntax is invalid." msgstr "La syntaxe de la liste est invalide." -#: catalog/objectaddress.c:1237 catalog/pg_publication.c:66 commands/lockcmds.c:93 commands/policy.c:94 commands/policy.c:391 commands/policy.c:481 commands/tablecmds.c:223 commands/tablecmds.c:265 commands/tablecmds.c:1507 commands/tablecmds.c:4722 commands/tablecmds.c:8810 +#: catalog/objectaddress.c:1237 catalog/pg_publication.c:66 commands/lockcmds.c:93 commands/policy.c:94 commands/policy.c:391 commands/policy.c:481 commands/tablecmds.c:223 commands/tablecmds.c:265 commands/tablecmds.c:1507 commands/tablecmds.c:4722 commands/tablecmds.c:8837 #, c-format msgid "\"%s\" is not a table" msgstr "« %s » n'est pas une table" -#: catalog/objectaddress.c:1244 commands/tablecmds.c:235 commands/tablecmds.c:4752 commands/tablecmds.c:13085 commands/view.c:141 +#: catalog/objectaddress.c:1244 commands/tablecmds.c:235 commands/tablecmds.c:4752 commands/tablecmds.c:13112 commands/view.c:141 #, c-format msgid "\"%s\" is not a view" msgstr "« %s » n'est pas une vue" -#: catalog/objectaddress.c:1251 commands/matview.c:174 commands/tablecmds.c:241 commands/tablecmds.c:13090 +#: catalog/objectaddress.c:1251 commands/matview.c:174 commands/tablecmds.c:241 commands/tablecmds.c:13117 #, c-format msgid "\"%s\" is not a materialized view" msgstr "« %s » n'est pas une vue matérialisée" -#: catalog/objectaddress.c:1258 commands/tablecmds.c:259 commands/tablecmds.c:4755 commands/tablecmds.c:13095 +#: catalog/objectaddress.c:1258 commands/tablecmds.c:259 commands/tablecmds.c:4755 commands/tablecmds.c:13122 #, c-format msgid "\"%s\" is not a foreign table" msgstr "« %s » n'est pas une table distante" @@ -3872,7 +3895,7 @@ msgstr "le nom de la colonne doit être qualifié" msgid "default value for column \"%s\" of relation \"%s\" does not exist" msgstr "la valeur par défaut de la colonne « %s » de la relation « %s » n'existe pas" -#: catalog/objectaddress.c:1508 commands/functioncmds.c:128 commands/tablecmds.c:251 commands/typecmds.c:3233 parser/parse_type.c:226 parser/parse_type.c:255 parser/parse_type.c:794 utils/adt/acl.c:4357 +#: catalog/objectaddress.c:1508 commands/functioncmds.c:128 commands/tablecmds.c:251 commands/typecmds.c:3269 parser/parse_type.c:226 parser/parse_type.c:255 parser/parse_type.c:794 utils/adt/acl.c:4362 #, c-format msgid "type \"%s\" does not exist" msgstr "le type « %s » n'existe pas" @@ -4260,17 +4283,25 @@ msgstr "famille d'opérateur %s pour la méthode d'accès %s" msgid "%s in publication %s" msgstr "%s dans la publication %s" -#: catalog/partition.c:727 +#: catalog/partition.c:728 #, c-format -msgid "cannot create range partition with empty range" -msgstr "ne peut pas créer une partition par intervalle avec un intervalle vide" +msgid "empty range bound specified for partition \"%s\"" +msgstr "limite d'intervalle vide indiquée pour la parttion « %s »" + +#: catalog/partition.c:730 +#, fuzzy, c-format +#| msgid "range lower bound must be less than or equal to range upper bound" +msgid "Specified lower bound %s is greater than or equal to upper bound %s." +msgstr "" +"la limite inférieure de l'intervalle de valeurs doit être inférieure ou égale\n" +"à la limite supérieure de l'intervalle de valeurs" -#: catalog/partition.c:808 +#: catalog/partition.c:814 #, c-format msgid "partition \"%s\" would overlap partition \"%s\"" msgstr "la partition « %s » surchargerait la partition « %s »" -#: catalog/partition.c:921 catalog/partition.c:1099 commands/analyze.c:1446 commands/copy.c:1467 commands/tablecmds.c:8872 executor/execExprInterp.c:2853 executor/execMain.c:1878 executor/execMain.c:1956 executor/execMain.c:2004 executor/execMain.c:2114 executor/execMain.c:3294 executor/nodeModifyTable.c:1518 +#: catalog/partition.c:927 catalog/partition.c:1110 commands/analyze.c:1462 commands/copy.c:2510 commands/tablecmds.c:8899 executor/execExprInterp.c:2853 executor/execMain.c:1907 executor/execMain.c:1985 executor/execMain.c:2033 executor/execMain.c:2143 executor/execMain.c:3322 executor/nodeModifyTable.c:1533 msgid "could not convert row type" msgstr "n'a pas pu convertir le type de ligne" @@ -4386,7 +4417,7 @@ msgstr "l'impémentation d'aggrégat glissant retourne le type %s, mais l'implé msgid "sort operator can only be specified for single-argument aggregates" msgstr "l'opérateur de tri peut seulement être indiqué pour des agrégats à un seul argument" -#: catalog/pg_aggregate.c:810 commands/typecmds.c:1698 commands/typecmds.c:1749 commands/typecmds.c:1780 commands/typecmds.c:1803 commands/typecmds.c:1824 commands/typecmds.c:1851 commands/typecmds.c:1878 commands/typecmds.c:1955 commands/typecmds.c:1997 parser/parse_func.c:369 parser/parse_func.c:398 parser/parse_func.c:423 parser/parse_func.c:437 parser/parse_func.c:512 parser/parse_func.c:523 parser/parse_func.c:1977 +#: catalog/pg_aggregate.c:810 commands/typecmds.c:1719 commands/typecmds.c:1770 commands/typecmds.c:1801 commands/typecmds.c:1824 commands/typecmds.c:1845 commands/typecmds.c:1872 commands/typecmds.c:1899 commands/typecmds.c:1976 commands/typecmds.c:2018 parser/parse_func.c:369 parser/parse_func.c:398 parser/parse_func.c:423 parser/parse_func.c:437 parser/parse_func.c:512 parser/parse_func.c:523 parser/parse_func.c:1977 #, c-format msgid "function %s does not exist" msgstr "la fonction %s n'existe pas" @@ -4431,22 +4462,22 @@ msgstr "le collationnement « %s » pour l'encodage « %s » existe déjà" msgid "constraint \"%s\" for domain %s already exists" msgstr "la contrainte « %s » du domaine %s existe déjà" -#: catalog/pg_constraint.c:788 +#: catalog/pg_constraint.c:788 catalog/pg_constraint.c:864 #, c-format msgid "table \"%s\" has multiple constraints named \"%s\"" msgstr "la table « %s » a de nombreuses contraintes nommées « %s »" -#: catalog/pg_constraint.c:800 +#: catalog/pg_constraint.c:800 catalog/pg_constraint.c:898 #, c-format msgid "constraint \"%s\" for table \"%s\" does not exist" msgstr "la contrainte « %s » de la table « %s » n'existe pas" -#: catalog/pg_constraint.c:846 +#: catalog/pg_constraint.c:944 #, c-format msgid "domain %s has multiple constraints named \"%s\"" msgstr "le domaine %s a plusieurs contraintes nommées « %s »" -#: catalog/pg_constraint.c:858 +#: catalog/pg_constraint.c:956 #, c-format msgid "constraint \"%s\" for domain %s does not exist" msgstr "la contrainte « %s » du domaine %s n'existe pas" @@ -4674,7 +4705,7 @@ msgstr "les fonctions SQL ne peuvent avoir d'arguments du type %s" msgid "SQL function \"%s\"" msgstr "Fonction SQL « %s »" -#: catalog/pg_publication.c:57 commands/trigger.c:196 +#: catalog/pg_publication.c:57 commands/trigger.c:197 #, c-format msgid "\"%s\" is a partitioned table" msgstr "« %s » est une table partitionnée" @@ -4793,7 +4824,7 @@ msgstr "" "ne peut pas réaffecter les objets appartenant à %s car ils sont nécessaires au\n" "système de bases de données" -#: catalog/pg_subscription.c:176 commands/subscriptioncmds.c:636 commands/subscriptioncmds.c:844 commands/subscriptioncmds.c:1044 +#: catalog/pg_subscription.c:176 commands/subscriptioncmds.c:633 commands/subscriptioncmds.c:843 commands/subscriptioncmds.c:1067 #, c-format msgid "subscription \"%s\" does not exist" msgstr "la souscription « %s » n'existe pas" @@ -4833,7 +4864,7 @@ msgstr "les types de taille fixe doivent avoir un stockage de base" msgid "could not form array type name for type \"%s\"" msgstr "n'a pas pu former le nom du type array pour le type de données %s" -#: catalog/toasting.c:105 commands/indexcmds.c:399 commands/tablecmds.c:4734 commands/tablecmds.c:12973 +#: catalog/toasting.c:105 commands/indexcmds.c:399 commands/tablecmds.c:4734 commands/tablecmds.c:13000 #, c-format msgid "\"%s\" is not a table or materialized view" msgstr "« %s » n'est pas une table ou une vue matérialisée" @@ -5030,61 +5061,67 @@ msgstr "la méthode d'accès « %s » n'existe pas" msgid "handler function is not specified" msgstr "la fonction handler n'est pas spécifiée" -#: commands/amcmds.c:262 commands/event_trigger.c:243 commands/foreigncmds.c:487 commands/proclang.c:117 commands/proclang.c:289 commands/trigger.c:590 parser/parse_clause.c:1011 +#: commands/amcmds.c:262 commands/event_trigger.c:243 commands/foreigncmds.c:487 commands/proclang.c:117 commands/proclang.c:289 commands/trigger.c:616 parser/parse_clause.c:982 #, c-format msgid "function %s must return type %s" msgstr "la fonction %s doit renvoyer le type %s" -#: commands/analyze.c:151 +#: commands/analyze.c:156 #, c-format msgid "skipping analyze of \"%s\" --- lock not available" msgstr "ignore l'analyse de « %s » --- verrou non disponible" -#: commands/analyze.c:168 +#: commands/analyze.c:173 #, c-format msgid "skipping \"%s\" --- only superuser can analyze it" msgstr "ignore « %s » --- seul le super-utilisateur peut l'analyser" -#: commands/analyze.c:172 +#: commands/analyze.c:177 #, c-format msgid "skipping \"%s\" --- only superuser or database owner can analyze it" msgstr "" "ignore « %s » --- seul le super-utilisateur ou le propriétaire de la base de\n" "données peut l'analyser" -#: commands/analyze.c:176 +#: commands/analyze.c:181 #, c-format msgid "skipping \"%s\" --- only table or database owner can analyze it" msgstr "" "ignore « %s » --- seul le propriétaire de la table ou de la base de données\n" "peut l'analyser" -#: commands/analyze.c:236 +#: commands/analyze.c:241 #, c-format msgid "skipping \"%s\" --- cannot analyze this foreign table" msgstr "ignore « %s » --- ne peut pas analyser cette table distante" -#: commands/analyze.c:253 +#: commands/analyze.c:258 #, c-format msgid "skipping \"%s\" --- cannot analyze non-tables or special system tables" msgstr "ignore « %s » --- ne peut pas analyser les objets autres que les tables et les tables système" -#: commands/analyze.c:334 +#: commands/analyze.c:339 #, c-format msgid "analyzing \"%s.%s\" inheritance tree" msgstr "analyse l'arbre d'héritage « %s.%s »" -#: commands/analyze.c:339 +#: commands/analyze.c:344 #, c-format msgid "analyzing \"%s.%s\"" msgstr "analyse « %s.%s »" -#: commands/analyze.c:668 +#: commands/analyze.c:404 +#, fuzzy, c-format +#| msgid "column \"%s\" of relation \"%s\" does not exist" +msgid "column \"%s\" of relation \"%s\" appears more than once" +msgstr "la colonne « %s » de la relation « %s » n'existe pas" + +#: commands/analyze.c:684 #, c-format msgid "automatic analyze of table \"%s.%s.%s\" system usage: %s" msgstr "ANALYZE automatique de la table « %s.%s.%s » ; utilisation système : %s" -#: commands/analyze.c:1220 +#: commands/analyze.c:1236 #, c-format msgid "\"%s\": scanned %d of %u pages, containing %.0f live rows and %.0f dead rows; %d rows in sample, %.0f estimated total rows" msgstr "" @@ -5093,54 +5130,54 @@ msgstr "" " %d lignes dans l'échantillon,\n" " %.0f lignes totales estimées" -#: commands/analyze.c:1300 +#: commands/analyze.c:1316 #, c-format msgid "skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree contains no child tables" msgstr "ignore l'analyse de l'arbre d'héritage « %s.%s » --- cet arbre d'héritage ne contient pas de tables enfants" -#: commands/analyze.c:1398 +#: commands/analyze.c:1414 #, c-format msgid "skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree contains no analyzable child tables" msgstr "ignore l'analyse de l'arbre d'héritage « %s.%s » --- cet arbre d'héritage ne contient pas de tables enfants analysables" -#: commands/async.c:555 +#: commands/async.c:558 #, c-format msgid "channel name cannot be empty" msgstr "le nom du canal ne peut pas être vide" -#: commands/async.c:560 +#: commands/async.c:563 #, c-format msgid "channel name too long" msgstr "nom du canal trop long" -#: commands/async.c:567 +#: commands/async.c:570 #, c-format msgid "payload string too long" msgstr "chaîne de charge trop longue" -#: commands/async.c:753 +#: commands/async.c:756 #, c-format msgid "cannot PREPARE a transaction that has executed LISTEN, UNLISTEN, or NOTIFY" msgstr "" "ne peut pas exécuter PREPARE sur une transaction qui a exécuté LISTEN,\n" "UNLISTEN ou NOTIFY" -#: commands/async.c:856 +#: commands/async.c:859 #, c-format msgid "too many notifications in the NOTIFY queue" msgstr "trop de notifications dans la queue NOTIFY" -#: commands/async.c:1486 +#: commands/async.c:1491 #, c-format msgid "NOTIFY queue is %.0f%% full" msgstr "la queue NOTIFY est pleine à %.0f%%" -#: commands/async.c:1488 +#: commands/async.c:1493 #, c-format msgid "The server process with PID %d is among those with the oldest transactions." msgstr "Le processus serveur de PID %d est parmi ceux qui ont les transactions les plus anciennes." -#: commands/async.c:1491 +#: commands/async.c:1496 #, c-format msgid "The NOTIFY queue cannot be emptied until that process ends its current transaction." msgstr "" @@ -5157,7 +5194,7 @@ msgstr "ne peut pas exécuter CLUSTER sur les tables temporaires des autres sess msgid "there is no previously clustered index for table \"%s\"" msgstr "Il n'existe pas d'index CLUSTER pour la table « %s »" -#: commands/cluster.c:173 commands/tablecmds.c:10185 commands/tablecmds.c:12066 +#: commands/cluster.c:173 commands/tablecmds.c:10212 commands/tablecmds.c:12093 #, c-format msgid "index \"%s\" for table \"%s\" does not exist" msgstr "l'index « %s » pour la table « %s » n'existe pas" @@ -5172,7 +5209,7 @@ msgstr "ne peut pas exécuter CLUSTER sur un catalogue partagé" msgid "cannot vacuum temporary tables of other sessions" msgstr "ne peut pas exécuter VACUUM sur les tables temporaires des autres sessions" -#: commands/cluster.c:431 commands/tablecmds.c:12076 +#: commands/cluster.c:431 commands/tablecmds.c:12103 #, c-format msgid "\"%s\" is not an index for table \"%s\"" msgstr "« %s » n'est pas un index de la table « %s »" @@ -5194,29 +5231,29 @@ msgstr "ne peut pas exécuter CLUSTER sur l'index partiel « %s »" msgid "cannot cluster on invalid index \"%s\"" msgstr "ne peut pas exécuter la commande CLUSTER sur l'index invalide « %s »" -#: commands/cluster.c:918 +#: commands/cluster.c:922 #, c-format msgid "clustering \"%s.%s\" using index scan on \"%s\"" msgstr "cluster sur « %s.%s » en utilisant un parcours d'index sur « %s »" -#: commands/cluster.c:924 +#: commands/cluster.c:928 #, c-format msgid "clustering \"%s.%s\" using sequential scan and sort" msgstr "cluster sur « %s.%s » en utilisant un parcours séquentiel puis un tri" -#: commands/cluster.c:929 commands/vacuumlazy.c:490 +#: commands/cluster.c:933 commands/vacuumlazy.c:492 #, c-format msgid "vacuuming \"%s.%s\"" msgstr "exécution du VACUUM sur « %s.%s »" -#: commands/cluster.c:1084 +#: commands/cluster.c:1090 #, c-format msgid "\"%s\": found %.0f removable, %.0f nonremovable row versions in %u pages" msgstr "" "« %s » : %.0f versions de ligne supprimables, %.0f non supprimables\n" "parmi %u pages" -#: commands/cluster.c:1088 +#: commands/cluster.c:1094 #, c-format msgid "" "%.0f dead row versions cannot be removed yet.\n" @@ -5280,7 +5317,7 @@ msgstr "n'a pas pu convertir le nom de locale « %s » en balise de langage : %s msgid "must be superuser to import system collations" msgstr "doit être super-utilisateur pour importer les collationnements systèmes" -#: commands/collationcmds.c:535 commands/copy.c:1860 commands/copy.c:3102 +#: commands/collationcmds.c:535 commands/copy.c:1807 commands/copy.c:3130 #, c-format msgid "could not execute command \"%s\": %m" msgstr "n'a pas pu exécuter la commande « %s » : %m" @@ -5290,17 +5327,12 @@ msgstr "n'a pas pu exécuter la commande « %s » : %m" msgid "no usable system locales were found" msgstr "aucune locale système utilisable n'a été trouvée" -#: commands/collationcmds.c:730 commands/collationcmds.c:769 -#, c-format -msgid "could not get keyword values for locale \"%s\": %s" -msgstr "n'a pas pu obtenir les valeurs des mots clés pour la locale « %s » : %s" - #: commands/comment.c:61 commands/dbcommands.c:808 commands/dbcommands.c:996 commands/dbcommands.c:1100 commands/dbcommands.c:1290 commands/dbcommands.c:1513 commands/dbcommands.c:1627 commands/dbcommands.c:2043 utils/init/postinit.c:846 utils/init/postinit.c:951 utils/init/postinit.c:968 #, c-format msgid "database \"%s\" does not exist" msgstr "la base de données « %s » n'existe pas" -#: commands/comment.c:101 commands/seclabel.c:117 parser/parse_utilcmd.c:931 +#: commands/comment.c:101 commands/seclabel.c:117 parser/parse_utilcmd.c:952 #, c-format msgid "\"%s\" is not a table, view, materialized view, composite type, or foreign table" msgstr "« %s » n'est ni une table, ni une vue, ni une vue matérialisée, ni un type composite, ni une table distante" @@ -5531,337 +5563,337 @@ msgstr "le caractère guillemet de CSV ne doit pas apparaître dans la spécific msgid "table \"%s\" does not have OIDs" msgstr "la table « %s » n'a pas d'OID" -#: commands/copy.c:1486 +#: commands/copy.c:1433 #, c-format msgid "COPY (query) WITH OIDS is not supported" msgstr "COPY (requête) WITH OIDS n'est pas supporté" -#: commands/copy.c:1507 +#: commands/copy.c:1454 #, c-format msgid "DO INSTEAD NOTHING rules are not supported for COPY" msgstr "les règles DO INSTEAD NOTHING ne sont pas supportées par l'instruction COPY" -#: commands/copy.c:1521 +#: commands/copy.c:1468 #, c-format msgid "conditional DO INSTEAD rules are not supported for COPY" msgstr "les règles DO INSTEAD conditionnelles ne sont pas supportées par l'instruction COPY" -#: commands/copy.c:1525 +#: commands/copy.c:1472 #, c-format msgid "DO ALSO rules are not supported for the COPY" msgstr "les règles DO ALSO ne sont pas supportées par l'instruction COPY" -#: commands/copy.c:1530 +#: commands/copy.c:1477 #, c-format msgid "multi-statement DO INSTEAD rules are not supported for COPY" msgstr "les règles DO INSTEAD multi-instructions ne sont pas supportées par l'instruction COPY" -#: commands/copy.c:1540 +#: commands/copy.c:1487 #, c-format msgid "COPY (SELECT INTO) is not supported" msgstr "COPY (SELECT INTO) n'est pas supporté" -#: commands/copy.c:1557 +#: commands/copy.c:1504 #, c-format msgid "COPY query must have a RETURNING clause" msgstr "La requête COPY doit avoir une clause RETURNING" -#: commands/copy.c:1585 +#: commands/copy.c:1532 #, c-format msgid "relation referenced by COPY statement has changed" msgstr "la relation référencée par l'instruction COPY a changé" -#: commands/copy.c:1643 +#: commands/copy.c:1590 #, c-format msgid "FORCE_QUOTE column \"%s\" not referenced by COPY" msgstr "la colonne « %s » FORCE_QUOTE n'est pas référencée par COPY" -#: commands/copy.c:1665 +#: commands/copy.c:1612 #, c-format msgid "FORCE_NOT_NULL column \"%s\" not referenced by COPY" msgstr "la colonne « %s » FORCE_NOT_NULL n'est pas référencée par COPY" -#: commands/copy.c:1687 +#: commands/copy.c:1634 #, c-format msgid "FORCE_NULL column \"%s\" not referenced by COPY" msgstr "colonne « %s » FORCE_NULL non référencée par COPY" -#: commands/copy.c:1752 +#: commands/copy.c:1699 #, c-format msgid "could not close pipe to external command: %m" msgstr "n'a pas pu fermer le fichier pipe vers la commande externe : %m" -#: commands/copy.c:1756 +#: commands/copy.c:1703 #, c-format msgid "program \"%s\" failed" msgstr "le programme « %s » a échoué" -#: commands/copy.c:1806 +#: commands/copy.c:1753 #, c-format msgid "cannot copy from view \"%s\"" msgstr "ne peut pas copier à partir de la vue « %s »" -#: commands/copy.c:1808 commands/copy.c:1814 commands/copy.c:1820 commands/copy.c:1831 +#: commands/copy.c:1755 commands/copy.c:1761 commands/copy.c:1767 commands/copy.c:1778 #, c-format msgid "Try the COPY (SELECT ...) TO variant." msgstr "Tentez la variante COPY (SELECT ...) TO." -#: commands/copy.c:1812 +#: commands/copy.c:1759 #, c-format msgid "cannot copy from materialized view \"%s\"" msgstr "ne peut pas copier à partir de la vue matérialisée « %s »" -#: commands/copy.c:1818 +#: commands/copy.c:1765 #, c-format msgid "cannot copy from foreign table \"%s\"" msgstr "ne peut pas copier à partir de la table distante « %s »" -#: commands/copy.c:1824 +#: commands/copy.c:1771 #, c-format msgid "cannot copy from sequence \"%s\"" msgstr "ne peut pas copier à partir de la séquence « %s »" -#: commands/copy.c:1829 +#: commands/copy.c:1776 #, c-format msgid "cannot copy from partitioned table \"%s\"" msgstr "ne peut pas copier à partir de la table partitionnée « %s »" -#: commands/copy.c:1835 +#: commands/copy.c:1782 #, c-format msgid "cannot copy from non-table relation \"%s\"" msgstr "ne peut pas copier à partir de la relation « %s », qui n'est pas une table" -#: commands/copy.c:1875 +#: commands/copy.c:1822 #, c-format msgid "relative path not allowed for COPY to file" msgstr "un chemin relatif n'est pas autorisé à utiliser COPY vers un fichier" -#: commands/copy.c:1887 +#: commands/copy.c:1843 #, c-format msgid "could not open file \"%s\" for writing: %m" msgstr "n'a pas pu ouvrir le fichier « %s » en écriture : %m" -#: commands/copy.c:1890 +#: commands/copy.c:1846 #, c-format msgid "COPY TO instructs the PostgreSQL server process to write a file. You may want a client-side facility such as psql's \\copy." msgstr "COPY TO indique au serveur PostgreSQL d'écrire un fichier. Vous pourriez vouloir utiliser la fonctionnalité \\copy de psql pour écrire en local." -#: commands/copy.c:1903 commands/copy.c:3133 +#: commands/copy.c:1859 commands/copy.c:3161 #, c-format msgid "\"%s\" is a directory" msgstr "« %s » est un répertoire" -#: commands/copy.c:2226 +#: commands/copy.c:2182 #, c-format msgid "COPY %s, line %d, column %s" msgstr "COPY %s, ligne %d, colonne %s" -#: commands/copy.c:2230 commands/copy.c:2277 +#: commands/copy.c:2186 commands/copy.c:2233 #, c-format msgid "COPY %s, line %d" msgstr "COPY %s, ligne %d" -#: commands/copy.c:2241 +#: commands/copy.c:2197 #, c-format msgid "COPY %s, line %d, column %s: \"%s\"" msgstr "COPY %s, ligne %d, colonne %s : « %s »" -#: commands/copy.c:2249 +#: commands/copy.c:2205 #, c-format msgid "COPY %s, line %d, column %s: null input" msgstr "COPY %s, ligne %d, colonne %s : NULL en entrée" -#: commands/copy.c:2271 +#: commands/copy.c:2227 #, c-format msgid "COPY %s, line %d: \"%s\"" msgstr "COPY %s, ligne %d : « %s »" -#: commands/copy.c:2365 +#: commands/copy.c:2321 #, c-format msgid "cannot copy to view \"%s\"" msgstr "ne peut pas copier vers la vue « %s »" -#: commands/copy.c:2367 +#: commands/copy.c:2323 #, c-format msgid "To enable copying to a view, provide an INSTEAD OF INSERT trigger." msgstr "Pour activer la copie d'une vue, fournissez un trigger INSTEAD OF INSERT." -#: commands/copy.c:2371 +#: commands/copy.c:2327 #, c-format msgid "cannot copy to materialized view \"%s\"" msgstr "ne peut pas copier vers la vue matérialisée « %s »" -#: commands/copy.c:2376 +#: commands/copy.c:2332 #, c-format msgid "cannot copy to foreign table \"%s\"" msgstr "ne peut pas copier vers la table distante « %s »" -#: commands/copy.c:2381 +#: commands/copy.c:2337 #, c-format msgid "cannot copy to sequence \"%s\"" msgstr "ne peut pas copier vers la séquence « %s »" -#: commands/copy.c:2386 +#: commands/copy.c:2342 #, c-format msgid "cannot copy to non-table relation \"%s\"" msgstr "ne peut pas copier vers une relation « %s » qui n'est pas une table" -#: commands/copy.c:2449 +#: commands/copy.c:2417 #, c-format msgid "cannot perform FREEZE because of prior transaction activity" msgstr "n'a pas pu exécuter un FREEZE à cause d'une activité transactionnelle précédente" -#: commands/copy.c:2455 +#: commands/copy.c:2423 #, c-format msgid "cannot perform FREEZE because the table was not created or truncated in the current subtransaction" msgstr "n'a pas pu exécuter un FREEZE parce que la table n'était pas créée ou tronquée dans la transaction en cours" -#: commands/copy.c:2618 executor/nodeModifyTable.c:311 +#: commands/copy.c:2645 executor/nodeModifyTable.c:311 #, c-format msgid "cannot route inserted tuples to a foreign table" msgstr "ne peut pas envoyer les lignes insérées dans une table distante" -#: commands/copy.c:3120 +#: commands/copy.c:3148 #, c-format msgid "COPY FROM instructs the PostgreSQL server process to read a file. You may want a client-side facility such as psql's \\copy." msgstr "COPY TO indique au serveur PostgreSQL de lire un fichier. Vous pourriez vouloir utiliser la fonctionnalité \\copy de psql pour lire en local." -#: commands/copy.c:3153 +#: commands/copy.c:3181 #, c-format msgid "COPY file signature not recognized" msgstr "la signature du fichier COPY n'est pas reconnue" -#: commands/copy.c:3158 +#: commands/copy.c:3186 #, c-format msgid "invalid COPY file header (missing flags)" msgstr "en-tête du fichier COPY invalide (options manquantes)" -#: commands/copy.c:3164 +#: commands/copy.c:3192 #, c-format msgid "unrecognized critical flags in COPY file header" msgstr "options critiques non reconnues dans l'en-tête du fichier COPY" -#: commands/copy.c:3170 +#: commands/copy.c:3198 #, c-format msgid "invalid COPY file header (missing length)" msgstr "en-tête du fichier COPY invalide (longueur manquante)" -#: commands/copy.c:3177 +#: commands/copy.c:3205 #, c-format msgid "invalid COPY file header (wrong length)" msgstr "en-tête du fichier COPY invalide (mauvaise longueur)" -#: commands/copy.c:3310 commands/copy.c:4017 commands/copy.c:4247 +#: commands/copy.c:3338 commands/copy.c:4045 commands/copy.c:4275 #, c-format msgid "extra data after last expected column" msgstr "données supplémentaires après la dernière colonne attendue" -#: commands/copy.c:3320 +#: commands/copy.c:3348 #, c-format msgid "missing data for OID column" msgstr "données manquantes pour la colonne OID" -#: commands/copy.c:3326 +#: commands/copy.c:3354 #, c-format msgid "null OID in COPY data" msgstr "OID NULL dans les données du COPY" -#: commands/copy.c:3336 commands/copy.c:3459 +#: commands/copy.c:3364 commands/copy.c:3487 #, c-format msgid "invalid OID in COPY data" msgstr "OID invalide dans les données du COPY" -#: commands/copy.c:3351 +#: commands/copy.c:3379 #, c-format msgid "missing data for column \"%s\"" msgstr "données manquantes pour la colonne « %s »" -#: commands/copy.c:3434 +#: commands/copy.c:3462 #, c-format msgid "received copy data after EOF marker" msgstr "a reçu des données de COPY après le marqueur de fin" -#: commands/copy.c:3441 +#: commands/copy.c:3469 #, c-format msgid "row field count is %d, expected %d" msgstr "le nombre de champs de la ligne est %d, %d attendus" -#: commands/copy.c:3781 commands/copy.c:3798 +#: commands/copy.c:3809 commands/copy.c:3826 #, c-format msgid "literal carriage return found in data" msgstr "retour chariot trouvé dans les données" -#: commands/copy.c:3782 commands/copy.c:3799 +#: commands/copy.c:3810 commands/copy.c:3827 #, c-format msgid "unquoted carriage return found in data" msgstr "retour chariot sans guillemet trouvé dans les données" -#: commands/copy.c:3784 commands/copy.c:3801 +#: commands/copy.c:3812 commands/copy.c:3829 #, c-format msgid "Use \"\\r\" to represent carriage return." msgstr "Utilisez « \\r » pour représenter un retour chariot." -#: commands/copy.c:3785 commands/copy.c:3802 +#: commands/copy.c:3813 commands/copy.c:3830 #, c-format msgid "Use quoted CSV field to represent carriage return." msgstr "Utiliser le champ CSV entre guillemets pour représenter un retour chariot." -#: commands/copy.c:3814 +#: commands/copy.c:3842 #, c-format msgid "literal newline found in data" msgstr "retour à la ligne trouvé dans les données" -#: commands/copy.c:3815 +#: commands/copy.c:3843 #, c-format msgid "unquoted newline found in data" msgstr "retour à la ligne trouvé dans les données" -#: commands/copy.c:3817 +#: commands/copy.c:3845 #, c-format msgid "Use \"\\n\" to represent newline." msgstr "Utilisez « \\n » pour représenter un retour à la ligne." -#: commands/copy.c:3818 +#: commands/copy.c:3846 #, c-format msgid "Use quoted CSV field to represent newline." msgstr "Utiliser un champ CSV entre guillemets pour représenter un retour à la ligne." -#: commands/copy.c:3864 commands/copy.c:3900 +#: commands/copy.c:3892 commands/copy.c:3928 #, c-format msgid "end-of-copy marker does not match previous newline style" msgstr "le marqueur fin-de-copie ne correspond pas à un précédent style de fin de ligne" -#: commands/copy.c:3873 commands/copy.c:3889 +#: commands/copy.c:3901 commands/copy.c:3917 #, c-format msgid "end-of-copy marker corrupt" msgstr "marqueur fin-de-copie corrompu" -#: commands/copy.c:4331 +#: commands/copy.c:4359 #, c-format msgid "unterminated CSV quoted field" msgstr "champ CSV entre guillemets non terminé" -#: commands/copy.c:4408 commands/copy.c:4427 +#: commands/copy.c:4436 commands/copy.c:4455 #, c-format msgid "unexpected EOF in COPY data" msgstr "fin de fichier (EOF) inattendu dans les données du COPY" -#: commands/copy.c:4417 +#: commands/copy.c:4445 #, c-format msgid "invalid field size" msgstr "taille du champ invalide" -#: commands/copy.c:4440 +#: commands/copy.c:4468 #, c-format msgid "incorrect binary data format" msgstr "format de données binaires incorrect" -#: commands/copy.c:4751 commands/indexcmds.c:1070 commands/tablecmds.c:1685 commands/tablecmds.c:2187 commands/tablecmds.c:2613 parser/parse_relation.c:3249 parser/parse_relation.c:3269 utils/adt/tsvector_op.c:2561 +#: commands/copy.c:4779 commands/indexcmds.c:1073 commands/statscmds.c:183 commands/tablecmds.c:1685 commands/tablecmds.c:2187 commands/tablecmds.c:2613 parser/parse_relation.c:3287 parser/parse_relation.c:3307 utils/adt/tsvector_op.c:2561 #, c-format msgid "column \"%s\" does not exist" msgstr "la colonne « %s » n'existe pas" -#: commands/copy.c:4758 commands/tablecmds.c:1711 commands/tablecmds.c:2213 commands/trigger.c:800 parser/parse_target.c:1018 parser/parse_target.c:1029 +#: commands/copy.c:4786 commands/tablecmds.c:1711 commands/tablecmds.c:2213 commands/trigger.c:826 parser/parse_target.c:1018 parser/parse_target.c:1029 #, c-format msgid "column \"%s\" specified more than once" msgstr "la colonne « %s » est spécifiée plus d'une fois" @@ -5896,7 +5928,7 @@ msgstr "%d n'est pas un code d'encodage valide" msgid "%s is not a valid encoding name" msgstr "%s n'est pas un nom d'encodage valide" -#: commands/dbcommands.c:292 commands/dbcommands.c:1494 commands/user.c:276 commands/user.c:641 +#: commands/dbcommands.c:292 commands/dbcommands.c:1494 commands/user.c:276 commands/user.c:664 #, c-format msgid "invalid connection limit: %d" msgstr "limite de connexion invalide : %d" @@ -6159,7 +6191,7 @@ msgstr "l'argument de %s doit être un nom de type" msgid "invalid argument for %s: \"%s\"" msgstr "argument invalide pour %s : « %s »" -#: commands/dropcmds.c:104 commands/functioncmds.c:1201 utils/adt/ruleutils.c:2452 +#: commands/dropcmds.c:104 commands/functioncmds.c:1201 utils/adt/ruleutils.c:2453 #, c-format msgid "\"%s\" is an aggregate function" msgstr "« %s » est une fonction d'agrégat" @@ -6169,7 +6201,7 @@ msgstr "« %s » est une fonction d'agrégat" msgid "Use DROP AGGREGATE to drop aggregate functions." msgstr "Utiliser DROP AGGREGATE pour supprimer les fonctions d'agrégat." -#: commands/dropcmds.c:157 commands/sequence.c:442 commands/tablecmds.c:2697 commands/tablecmds.c:2848 commands/tablecmds.c:2891 commands/tablecmds.c:12449 tcop/utility.c:1168 +#: commands/dropcmds.c:157 commands/sequence.c:442 commands/tablecmds.c:2697 commands/tablecmds.c:2848 commands/tablecmds.c:2891 commands/tablecmds.c:12476 tcop/utility.c:1168 #, c-format msgid "relation \"%s\" does not exist, skipping" msgstr "la relation « %s » n'existe pas, poursuite du traitement" @@ -7112,7 +7144,7 @@ msgstr "ne peut pas créer un index sur la table partitionnée « %s »" msgid "cannot create indexes on temporary tables of other sessions" msgstr "ne peut pas créer les index sur les tables temporaires des autres sessions" -#: commands/indexcmds.c:474 commands/tablecmds.c:593 commands/tablecmds.c:10493 +#: commands/indexcmds.c:474 commands/tablecmds.c:593 commands/tablecmds.c:10520 #, c-format msgid "only shared relations can be placed in pg_global tablespace" msgstr "seules les relations partagées peuvent être placées dans le tablespace pg_global" @@ -7147,107 +7179,107 @@ msgstr "la création d'un index sur les tables du catalogue système n'est pas s msgid "%s %s will create implicit index \"%s\" for table \"%s\"" msgstr "%s %s créera un index implicite « %s » pour la table « %s »" -#: commands/indexcmds.c:999 +#: commands/indexcmds.c:1002 #, c-format msgid "functions in index predicate must be marked IMMUTABLE" msgstr "les fonctions dans un prédicat d'index doivent être marquées comme IMMUTABLE" -#: commands/indexcmds.c:1065 parser/parse_utilcmd.c:2076 +#: commands/indexcmds.c:1068 parser/parse_utilcmd.c:2097 #, c-format msgid "column \"%s\" named in key does not exist" msgstr "la colonne « %s » nommée dans la clé n'existe pas" -#: commands/indexcmds.c:1125 +#: commands/indexcmds.c:1128 #, c-format msgid "functions in index expression must be marked IMMUTABLE" msgstr "" "les fonctions dans l'expression de l'index doivent être marquées comme\n" "IMMUTABLE" -#: commands/indexcmds.c:1148 +#: commands/indexcmds.c:1151 #, c-format msgid "could not determine which collation to use for index expression" msgstr "n'a pas pu déterminer le collationnement à utiliser pour l'expression d'index" -#: commands/indexcmds.c:1156 commands/tablecmds.c:13383 commands/typecmds.c:831 parser/parse_expr.c:2763 parser/parse_type.c:549 parser/parse_utilcmd.c:3112 utils/adt/misc.c:661 +#: commands/indexcmds.c:1159 commands/tablecmds.c:13410 commands/typecmds.c:831 parser/parse_expr.c:2763 parser/parse_type.c:549 parser/parse_utilcmd.c:3134 utils/adt/misc.c:661 #, c-format msgid "collations are not supported by type %s" msgstr "les collationnements ne sont pas supportés par le type %s" -#: commands/indexcmds.c:1194 +#: commands/indexcmds.c:1197 #, c-format msgid "operator %s is not commutative" msgstr "l'opérateur %s n'est pas commutatif" -#: commands/indexcmds.c:1196 +#: commands/indexcmds.c:1199 #, c-format msgid "Only commutative operators can be used in exclusion constraints." msgstr "Seuls les opérateurs commutatifs peuvent être utilisés dans les contraintes d'exclusion." -#: commands/indexcmds.c:1222 +#: commands/indexcmds.c:1225 #, c-format msgid "operator %s is not a member of operator family \"%s\"" msgstr "l'opérateur %s n'est pas un membre de la famille d'opérateur « %s »" -#: commands/indexcmds.c:1225 +#: commands/indexcmds.c:1228 #, c-format msgid "The exclusion operator must be related to the index operator class for the constraint." msgstr "" "L'opérateur d'exclusion doit être en relation avec la classe d'opérateur de\n" "l'index pour la contrainte." -#: commands/indexcmds.c:1260 +#: commands/indexcmds.c:1263 #, c-format msgid "access method \"%s\" does not support ASC/DESC options" msgstr "la méthode d'accès « %s » ne supporte pas les options ASC/DESC" -#: commands/indexcmds.c:1265 +#: commands/indexcmds.c:1268 #, c-format msgid "access method \"%s\" does not support NULLS FIRST/LAST options" msgstr "la méthode d'accès « %s » ne supporte pas les options NULLS FIRST/LAST" -#: commands/indexcmds.c:1324 commands/typecmds.c:1928 +#: commands/indexcmds.c:1327 commands/typecmds.c:1949 #, c-format msgid "data type %s has no default operator class for access method \"%s\"" msgstr "" "le type de données %s n'a pas de classe d'opérateurs par défaut pour la\n" "méthode d'accès « %s »" -#: commands/indexcmds.c:1326 +#: commands/indexcmds.c:1329 #, c-format msgid "You must specify an operator class for the index or define a default operator class for the data type." msgstr "" "Vous devez spécifier une classe d'opérateur pour l'index ou définir une\n" "classe d'opérateur par défaut pour le type de données." -#: commands/indexcmds.c:1355 commands/indexcmds.c:1363 commands/opclasscmds.c:205 +#: commands/indexcmds.c:1358 commands/indexcmds.c:1366 commands/opclasscmds.c:205 #, c-format msgid "operator class \"%s\" does not exist for access method \"%s\"" msgstr "la classe d'opérateur « %s » n'existe pas pour la méthode d'accès « %s »" -#: commands/indexcmds.c:1376 commands/typecmds.c:1916 +#: commands/indexcmds.c:1379 commands/typecmds.c:1937 #, c-format msgid "operator class \"%s\" does not accept data type %s" msgstr "la classe d'opérateur « %s » n'accepte pas le type de données %s" -#: commands/indexcmds.c:1466 +#: commands/indexcmds.c:1469 #, c-format msgid "there are multiple default operator classes for data type %s" msgstr "" "il existe de nombreuses classes d'opérateur par défaut pour le type de\n" "données %s" -#: commands/indexcmds.c:1857 +#: commands/indexcmds.c:1860 #, c-format msgid "table \"%s\" has no indexes" msgstr "la table « %s » n'a pas d'index" -#: commands/indexcmds.c:1912 +#: commands/indexcmds.c:1915 #, c-format msgid "can only reindex the currently open database" msgstr "peut seulement réindexer la base de données en cours" -#: commands/indexcmds.c:2012 +#: commands/indexcmds.c:2015 #, c-format msgid "table \"%s.%s\" was reindexed" msgstr "la table « %s.%s » a été réindexée" @@ -7496,7 +7528,7 @@ msgstr "" msgid "operator attribute \"%s\" cannot be changed" msgstr "l'attribut « %s » de l'opérateur ne peut pas être changé" -#: commands/policy.c:87 commands/policy.c:397 commands/policy.c:487 commands/tablecmds.c:1150 commands/tablecmds.c:1520 commands/tablecmds.c:2507 commands/tablecmds.c:4704 commands/tablecmds.c:7041 commands/tablecmds.c:13006 commands/tablecmds.c:13041 commands/trigger.c:253 commands/trigger.c:1294 commands/trigger.c:1403 rewrite/rewriteDefine.c:272 rewrite/rewriteDefine.c:925 +#: commands/policy.c:87 commands/policy.c:397 commands/policy.c:487 commands/tablecmds.c:1150 commands/tablecmds.c:1520 commands/tablecmds.c:2507 commands/tablecmds.c:4704 commands/tablecmds.c:7068 commands/tablecmds.c:13033 commands/tablecmds.c:13068 commands/trigger.c:259 commands/trigger.c:1320 commands/trigger.c:1429 rewrite/rewriteDefine.c:272 rewrite/rewriteDefine.c:925 #, c-format msgid "permission denied: \"%s\" is a system catalog" msgstr "droit refusé : « %s » est un catalogue système" @@ -7625,15 +7657,15 @@ msgstr "Les langages supportés sont listés dans le catalogue système pg_pltem msgid "must be superuser to create custom procedural language" msgstr "doit être super-utilisateur pour créer un langage de procédures personnalisé" -#: commands/proclang.c:281 commands/trigger.c:582 commands/typecmds.c:457 commands/typecmds.c:474 +#: commands/proclang.c:281 commands/trigger.c:608 commands/typecmds.c:457 commands/typecmds.c:474 #, c-format msgid "changing return type of function %s from %s to %s" msgstr "changement du type de retour de la fonction %s de %s vers %s" #: commands/publicationcmds.c:106 #, c-format -msgid "invalid publish list" -msgstr "liste de publication invalide" +msgid "invalid list syntax for \"publish\" option" +msgstr "syntaxe de liste invalide pour l'option « publish »" #: commands/publicationcmds.c:122 #, c-format @@ -7827,7 +7859,7 @@ msgstr "la séquence doit être dans le même schéma que la table avec laquelle msgid "cannot change ownership of identity sequence" msgstr "ne peut pas modifier le propriétaire de la séquence d'identité" -#: commands/sequence.c:1716 commands/tablecmds.c:9875 commands/tablecmds.c:12469 +#: commands/sequence.c:1716 commands/tablecmds.c:9902 commands/tablecmds.c:12496 #, c-format msgid "Sequence \"%s\" is linked to table \"%s\"." msgstr "La séquence « %s » est liée à la table « %s »." @@ -7857,19 +7889,15 @@ msgstr "la relation « %s » n'est pas une table, une table distante ou une vue msgid "only simple column references are allowed in CREATE STATISTICS" msgstr "seules des références à une seule colonne sont acceptées dans CREATE STATISTICS" -#: commands/statscmds.c:183 -#, c-format -msgid "column \"%s\" referenced in statistics does not exist" -msgstr "la colonne « %s » référencée dans les statistiques n'existe pas" - #: commands/statscmds.c:191 #, c-format msgid "statistics creation on system columns is not supported" msgstr "la création de statistiques sur les colonnes systèmes n'est pas supportée" #: commands/statscmds.c:198 -#, c-format -msgid "column \"%s\" cannot be used in statistics because its type has no default btree operator class" +#, fuzzy, c-format +#| msgid "column \"%s\" cannot be used in statistics because its type has no default btree operator class" +msgid "column \"%s\" cannot be used in statistics because its type %s has no default btree operator class" msgstr "la colonne « %s » ne peut pas être utilisé dans des statistiques parce que son type n'a pas de classe d'opérateur btree par défaut" #: commands/statscmds.c:205 @@ -7889,7 +7917,7 @@ msgstr "nom de colonne dupliqué dans la définition des statistiques" #: commands/statscmds.c:266 #, c-format -msgid "unrecognized statistic type \"%s\"" +msgid "unrecognized statistics kind \"%s\"" msgstr "type de statistique « %s » non reconnu" #: commands/subscriptioncmds.c:187 @@ -7942,7 +7970,7 @@ msgstr "nom de publication « %s » utilisé plus d'une fois" msgid "must be superuser to create subscriptions" msgstr "doit être super-utilisateur pour créer des souscriptions" -#: commands/subscriptioncmds.c:427 commands/subscriptioncmds.c:520 replication/logical/tablesync.c:856 replication/logical/worker.c:1616 +#: commands/subscriptioncmds.c:427 commands/subscriptioncmds.c:520 replication/logical/tablesync.c:856 replication/logical/worker.c:1622 #, c-format msgid "could not connect to the publisher: %s" msgstr "n'a pas pu se connecter au publieur : %s" @@ -7959,80 +7987,80 @@ msgstr "les tables n'étaient pas souscrites, vous devrez exécuter ALTER SUBSCR #: commands/subscriptioncmds.c:576 #, c-format -msgid "added subscription for table %s.%s" -msgstr "souscription ajoutée pour la table %s.%s" +msgid "table \"%s.%s\" added to subscription \"%s\"" +msgstr "table « %s.%s » ajoutée à la souscription « %s »" -#: commands/subscriptioncmds.c:604 +#: commands/subscriptioncmds.c:600 #, c-format -msgid "removed subscription for table %s.%s" -msgstr "a supprimé une souscription pour la table %s.%s" +msgid "table \"%s.%s\" removed from subscription \"%s\"" +msgstr "table « %s.%s » supprimée de la souscription « %s »" -#: commands/subscriptioncmds.c:672 +#: commands/subscriptioncmds.c:669 #, c-format msgid "cannot set slot_name = NONE for enabled subscription" msgstr "ne peut pas configurer slot_name = NONE pour la souscription activée" -#: commands/subscriptioncmds.c:706 +#: commands/subscriptioncmds.c:703 #, c-format msgid "cannot enable subscription that does not have a slot name" msgstr "ne peut pas activer une souscription qui n'a pas de nom de slot" -#: commands/subscriptioncmds.c:752 +#: commands/subscriptioncmds.c:749 #, c-format msgid "ALTER SUBSCRIPTION with refresh is not allowed for disabled subscriptions" msgstr "ALTER SUBSCRIPTION avec rafraichissement n'est pas autorisé pour les souscriptions désactivées" -#: commands/subscriptioncmds.c:753 +#: commands/subscriptioncmds.c:750 #, c-format msgid "Use ALTER SUBSCRIPTION ... SET PUBLICATION ... WITH (refresh = false)." msgstr "Utilisez ALTER SUBSCRIPTION ... SET PUBLICATION ... WITH (refresh = false)." -#: commands/subscriptioncmds.c:771 +#: commands/subscriptioncmds.c:768 #, c-format msgid "ALTER SUBSCRIPTION ... REFRESH is not allowed for disabled subscriptions" msgstr "ALTER SUBSCRIPTION ... REFRESH n'est pas autorisé pour les souscriptions désactivées" -#: commands/subscriptioncmds.c:848 +#: commands/subscriptioncmds.c:847 #, c-format msgid "subscription \"%s\" does not exist, skipping" msgstr "la souscription « %s » n'existe pas, poursuite du traitement" -#: commands/subscriptioncmds.c:949 +#: commands/subscriptioncmds.c:972 #, c-format msgid "could not connect to publisher when attempting to drop the replication slot \"%s\"" msgstr "n'a pas pu se connecter au publieur pour supprimer le slot de réplication « %s »" -#: commands/subscriptioncmds.c:951 commands/subscriptioncmds.c:965 replication/logical/tablesync.c:906 replication/logical/tablesync.c:928 +#: commands/subscriptioncmds.c:974 commands/subscriptioncmds.c:988 replication/logical/tablesync.c:906 replication/logical/tablesync.c:928 #, c-format msgid "The error was: %s" msgstr "L'erreur était : %s" -#: commands/subscriptioncmds.c:952 +#: commands/subscriptioncmds.c:975 #, c-format msgid "Use ALTER SUBSCRIPTION ... SET (slot_name = NONE) to disassociate the subscription from the slot." msgstr "Utilisez ALTER SUBSCRIPTION ... SET (slot_name = NONE) pour dissocier la souscription du slot." -#: commands/subscriptioncmds.c:963 +#: commands/subscriptioncmds.c:986 #, c-format msgid "could not drop the replication slot \"%s\" on publisher" msgstr "n'a pas pu supprimer le slot de réplication « %s » sur le publieur" -#: commands/subscriptioncmds.c:968 +#: commands/subscriptioncmds.c:991 #, c-format msgid "dropped replication slot \"%s\" on publisher" msgstr "slot de réplication « %s » supprimé sur le publieur" -#: commands/subscriptioncmds.c:1009 +#: commands/subscriptioncmds.c:1032 #, c-format msgid "permission denied to change owner of subscription \"%s\"" msgstr "droit refusé pour modifier le propriétaire de la souscription « %s »" -#: commands/subscriptioncmds.c:1011 +#: commands/subscriptioncmds.c:1034 #, c-format msgid "The owner of a subscription must be a superuser." msgstr "Le propriétaire d'une souscription doit être un super-utilisateur." -#: commands/subscriptioncmds.c:1124 +#: commands/subscriptioncmds.c:1147 #, c-format msgid "could not receive list of replicated tables from the publisher: %s" msgstr "n'a pas pu recevoir la liste des tables répliquées à partir du publieur : %s" @@ -8093,7 +8121,7 @@ msgstr "la vue matérialisée « %s » n'existe pas, poursuite du traitement" msgid "Use DROP MATERIALIZED VIEW to remove a materialized view." msgstr "Utilisez DROP MATERIALIZED VIEW pour supprimer une vue matérialisée." -#: commands/tablecmds.c:245 parser/parse_utilcmd.c:1828 +#: commands/tablecmds.c:245 parser/parse_utilcmd.c:1849 #, c-format msgid "index \"%s\" does not exist" msgstr "l'index « %s » n'existe pas" @@ -8116,7 +8144,7 @@ msgstr "« %s » n'est pas un type" msgid "Use DROP TYPE to remove a type." msgstr "Utilisez DROP TYPE pour supprimer un type." -#: commands/tablecmds.c:257 commands/tablecmds.c:9391 commands/tablecmds.c:12249 +#: commands/tablecmds.c:257 commands/tablecmds.c:9418 commands/tablecmds.c:12276 #, c-format msgid "foreign table \"%s\" does not exist" msgstr "la table distante « %s » n'existe pas" @@ -8147,7 +8175,7 @@ msgstr "" msgid "cannot create table with OIDs as partition of table without OIDs" msgstr "ne peut pas créer une table avec OID comme partition d'une table sans OID" -#: commands/tablecmds.c:783 parser/parse_utilcmd.c:3279 +#: commands/tablecmds.c:783 parser/parse_utilcmd.c:3301 #, c-format msgid "\"%s\" is not partitioned" msgstr "« %s » n'est pas partitionné" @@ -8187,7 +8215,7 @@ msgstr "TRUNCATE cascade sur la table « %s »" msgid "cannot truncate temporary tables of other sessions" msgstr "ne peut pas tronquer les tables temporaires des autres sessions" -#: commands/tablecmds.c:1761 commands/tablecmds.c:10976 +#: commands/tablecmds.c:1761 commands/tablecmds.c:11003 #, c-format msgid "cannot inherit from partitioned table \"%s\"" msgstr "ne peut pas hériter de la table partitionnée « %s »" @@ -8197,22 +8225,22 @@ msgstr "ne peut pas hériter de la table partitionnée « %s »" msgid "cannot inherit from partition \"%s\"" msgstr "ne peut pas hériter de la partition « %s »" -#: commands/tablecmds.c:1774 parser/parse_utilcmd.c:2039 +#: commands/tablecmds.c:1774 parser/parse_utilcmd.c:2060 #, c-format msgid "inherited relation \"%s\" is not a table or foreign table" msgstr "la relation héritée « %s » n'est ni une table ni une table distante" -#: commands/tablecmds.c:1782 commands/tablecmds.c:10955 +#: commands/tablecmds.c:1782 commands/tablecmds.c:10982 #, c-format msgid "cannot inherit from temporary relation \"%s\"" msgstr "ine peut pas hériter à partir d'une relation temporaire « %s »" -#: commands/tablecmds.c:1792 commands/tablecmds.c:10963 +#: commands/tablecmds.c:1792 commands/tablecmds.c:10990 #, c-format msgid "cannot inherit from temporary relation of another session" msgstr "ne peut pas hériter de la table temporaire d'une autre session" -#: commands/tablecmds.c:1809 commands/tablecmds.c:11087 +#: commands/tablecmds.c:1809 commands/tablecmds.c:11114 #, c-format msgid "relation \"%s\" would be inherited from more than once" msgstr "la relation « %s » serait héritée plus d'une fois" @@ -8237,7 +8265,7 @@ msgstr "%s versus %s" msgid "inherited column \"%s\" has a collation conflict" msgstr "la colonne héritée « %s » a un conflit sur le collationnement" -#: commands/tablecmds.c:1878 commands/tablecmds.c:2108 commands/tablecmds.c:5149 +#: commands/tablecmds.c:1878 commands/tablecmds.c:2108 commands/tablecmds.c:5162 #, c-format msgid "\"%s\" versus \"%s\"" msgstr "« %s » versus « %s »" @@ -8247,12 +8275,12 @@ msgstr "« %s » versus « %s »" msgid "inherited column \"%s\" has a storage parameter conflict" msgstr "la colonne héritée « %s » a un conflit de paramètre de stockage" -#: commands/tablecmds.c:2002 commands/tablecmds.c:8881 parser/parse_utilcmd.c:1122 parser/parse_utilcmd.c:1473 parser/parse_utilcmd.c:1549 +#: commands/tablecmds.c:2002 commands/tablecmds.c:8908 parser/parse_utilcmd.c:1143 parser/parse_utilcmd.c:1494 parser/parse_utilcmd.c:1570 #, c-format msgid "cannot convert whole-row table reference" msgstr "ne peut pas convertir une référence de ligne complète de table" -#: commands/tablecmds.c:2003 parser/parse_utilcmd.c:1123 +#: commands/tablecmds.c:2003 parser/parse_utilcmd.c:1144 #, c-format msgid "Constraint \"%s\" contains a whole-row reference to table \"%s\"." msgstr "La constrainte « %s » contient une référence de ligne complète vers la table « %s »." @@ -8383,7 +8411,7 @@ msgstr "vérification de la table « %s »" msgid "column \"%s\" contains null values" msgstr "la colonne « %s » contient des valeurs NULL" -#: commands/tablecmds.c:4571 commands/tablecmds.c:8150 +#: commands/tablecmds.c:4571 commands/tablecmds.c:8177 #, c-format msgid "check constraint \"%s\" is violated by some row" msgstr "la contrainte de vérification « %s » est rompue par une ligne" @@ -8393,12 +8421,12 @@ msgstr "la contrainte de vérification « %s » est rompue par une ligne" msgid "partition constraint is violated by some row" msgstr "la contrainte de partition est violée par une ligne" -#: commands/tablecmds.c:4725 commands/trigger.c:247 rewrite/rewriteDefine.c:266 rewrite/rewriteDefine.c:920 +#: commands/tablecmds.c:4725 commands/trigger.c:253 rewrite/rewriteDefine.c:266 rewrite/rewriteDefine.c:920 #, c-format msgid "\"%s\" is not a table or view" msgstr "« %s » n'est pas une table ou une vue" -#: commands/tablecmds.c:4728 commands/trigger.c:1288 commands/trigger.c:1394 +#: commands/tablecmds.c:4728 commands/trigger.c:1314 commands/trigger.c:1420 #, c-format msgid "\"%s\" is not a table, view, or foreign table" msgstr "« %s » n'est pas une table, une vue ou une table distante" @@ -8428,7 +8456,7 @@ msgstr "« %s » n'est pas une table ou une table distante" msgid "\"%s\" is not a table, composite type, or foreign table" msgstr "« %s » n'est ni une table, ni un type composite, ni une table distante" -#: commands/tablecmds.c:4749 commands/tablecmds.c:6112 +#: commands/tablecmds.c:4749 commands/tablecmds.c:6139 #, c-format msgid "\"%s\" is not a table, materialized view, index, or foreign table" msgstr "« %s » n'est pas une table, une vue matérialisée, un index ou une table distante" @@ -8438,836 +8466,837 @@ msgstr "« %s » n'est pas une table, une vue matérialisée, un index ou une ta msgid "\"%s\" is of the wrong type" msgstr "« %s » est du mauvais type" -#: commands/tablecmds.c:4913 commands/tablecmds.c:4920 +#: commands/tablecmds.c:4934 commands/tablecmds.c:4941 #, c-format msgid "cannot alter type \"%s\" because column \"%s.%s\" uses it" msgstr "ne peux pas modifier le type « %s » car la colonne « %s.%s » l'utilise" -#: commands/tablecmds.c:4927 +#: commands/tablecmds.c:4948 #, c-format msgid "cannot alter foreign table \"%s\" because column \"%s.%s\" uses its row type" msgstr "" "ne peut pas modifier la table distante « %s » car la colonne « %s.%s » utilise\n" "son type de ligne" -#: commands/tablecmds.c:4934 +#: commands/tablecmds.c:4955 #, c-format msgid "cannot alter table \"%s\" because column \"%s.%s\" uses its row type" msgstr "" "ne peut pas modifier la table « %s » car la colonne « %s.%s » utilise\n" "son type de ligne" -#: commands/tablecmds.c:4996 +#: commands/tablecmds.c:5009 #, c-format msgid "cannot alter type \"%s\" because it is the type of a typed table" msgstr "ne peut pas modifier le type « %s » car il s'agit du type d'une table de type" -#: commands/tablecmds.c:4998 +#: commands/tablecmds.c:5011 #, c-format msgid "Use ALTER ... CASCADE to alter the typed tables too." msgstr "Utilisez ALTER ... CASCADE pour modifier aussi les tables de type." -#: commands/tablecmds.c:5042 +#: commands/tablecmds.c:5055 #, c-format msgid "type %s is not a composite type" msgstr "le type %s n'est pas un type composite" -#: commands/tablecmds.c:5068 +#: commands/tablecmds.c:5081 #, c-format msgid "cannot add column to typed table" msgstr "ne peut pas ajouter une colonne à une table typée" -#: commands/tablecmds.c:5112 +#: commands/tablecmds.c:5125 #, c-format msgid "cannot add column to a partition" msgstr "ne peut pas ajouter une colonne à une partition" -#: commands/tablecmds.c:5141 commands/tablecmds.c:11213 +#: commands/tablecmds.c:5154 commands/tablecmds.c:11240 #, c-format msgid "child table \"%s\" has different type for column \"%s\"" msgstr "la table fille « %s » a un type différent pour la colonne « %s »" -#: commands/tablecmds.c:5147 commands/tablecmds.c:11220 +#: commands/tablecmds.c:5160 commands/tablecmds.c:11247 #, c-format msgid "child table \"%s\" has different collation for column \"%s\"" msgstr "la table fille « %s » a un collationnement différent pour la colonne « %s »" -#: commands/tablecmds.c:5157 +#: commands/tablecmds.c:5170 #, c-format msgid "child table \"%s\" has a conflicting \"%s\" column" msgstr "la table fille « %s » a une colonne conflictuelle, « %s »" -#: commands/tablecmds.c:5168 +#: commands/tablecmds.c:5181 #, c-format msgid "merging definition of column \"%s\" for child \"%s\"" msgstr "assemblage de la définition de la colonne « %s » pour le fils « %s »" -#: commands/tablecmds.c:5192 +#: commands/tablecmds.c:5205 #, c-format msgid "cannot recursively add identity column to table that has child tables" msgstr "ne peut pas ajouter récursivement la colonne identité à une table qui a des tables filles" -#: commands/tablecmds.c:5404 +#: commands/tablecmds.c:5431 #, c-format msgid "column must be added to child tables too" msgstr "la colonne doit aussi être ajoutée aux tables filles" -#: commands/tablecmds.c:5479 +#: commands/tablecmds.c:5506 #, c-format msgid "column \"%s\" of relation \"%s\" already exists, skipping" msgstr "la colonne « %s » de la relation « %s » existe déjà, poursuite du traitement" -#: commands/tablecmds.c:5486 +#: commands/tablecmds.c:5513 #, c-format msgid "column \"%s\" of relation \"%s\" already exists" msgstr "la colonne « %s » de la relation « %s » existe déjà" -#: commands/tablecmds.c:5584 commands/tablecmds.c:8563 +#: commands/tablecmds.c:5611 commands/tablecmds.c:8590 #, c-format msgid "cannot remove constraint from only the partitioned table when partitions exist" msgstr "ne peut pas supprimer une contrainte uniquement d'une table partitionnée quand des partitions existent" -#: commands/tablecmds.c:5585 commands/tablecmds.c:5732 commands/tablecmds.c:6529 commands/tablecmds.c:8564 +#: commands/tablecmds.c:5612 commands/tablecmds.c:5759 commands/tablecmds.c:6556 commands/tablecmds.c:8591 #, c-format msgid "Do not specify the ONLY keyword." msgstr "Ne spécifiez pas le mot clé ONLY." -#: commands/tablecmds.c:5617 commands/tablecmds.c:5764 commands/tablecmds.c:5819 commands/tablecmds.c:5894 commands/tablecmds.c:5988 commands/tablecmds.c:6047 commands/tablecmds.c:6171 commands/tablecmds.c:6225 commands/tablecmds.c:6317 commands/tablecmds.c:8703 commands/tablecmds.c:9414 +#: commands/tablecmds.c:5644 commands/tablecmds.c:5791 commands/tablecmds.c:5846 commands/tablecmds.c:5921 commands/tablecmds.c:6015 commands/tablecmds.c:6074 commands/tablecmds.c:6198 commands/tablecmds.c:6252 commands/tablecmds.c:6344 commands/tablecmds.c:8730 commands/tablecmds.c:9441 #, c-format msgid "cannot alter system column \"%s\"" msgstr "n'a pas pu modifier la colonne système « %s »" -#: commands/tablecmds.c:5623 commands/tablecmds.c:5825 +#: commands/tablecmds.c:5650 commands/tablecmds.c:5852 #, c-format msgid "column \"%s\" of relation \"%s\" is an identity column" msgstr "la colonne « %s » de la relation « %s » n'est pas une colonne d'identité" -#: commands/tablecmds.c:5659 +#: commands/tablecmds.c:5686 #, c-format msgid "column \"%s\" is in a primary key" msgstr "la colonne « %s » est dans une clé primaire" -#: commands/tablecmds.c:5681 +#: commands/tablecmds.c:5708 #, c-format msgid "column \"%s\" is marked NOT NULL in parent table" msgstr "la colonne « %s » est marquée NOT NULL dans la table parent" -#: commands/tablecmds.c:5731 +#: commands/tablecmds.c:5758 #, c-format msgid "cannot add constraint to only the partitioned table when partitions exist" msgstr "ne peut pas ajouter la contrainte à la seule table partitionnée quand plusieurs partitions existent" -#: commands/tablecmds.c:5827 +#: commands/tablecmds.c:5854 #, c-format msgid "Use ALTER TABLE ... ALTER COLUMN ... DROP IDENTITY instead." msgstr "Utilisez à la place ALTER TABLE ... ALTER COLUMN ... DROP IDENTITY." -#: commands/tablecmds.c:5905 +#: commands/tablecmds.c:5932 #, c-format msgid "column \"%s\" of relation \"%s\" must be declared NOT NULL before identity can be added" msgstr "la colonne « %s » de la relation « %s » doit être déclarée NOT NULL avant que la colonne identité puisse être ajoutée" -#: commands/tablecmds.c:5911 +#: commands/tablecmds.c:5938 #, c-format msgid "column \"%s\" of relation \"%s\" is already an identity column" msgstr "la colonne « %s » de la relation « %s » est déjà une colonne d'identité" -#: commands/tablecmds.c:5917 +#: commands/tablecmds.c:5944 #, c-format msgid "column \"%s\" of relation \"%s\" already has a default value" msgstr "la colonne « %s » de la relation « %s » a déjà une valeur par défaut" -#: commands/tablecmds.c:5994 commands/tablecmds.c:6055 +#: commands/tablecmds.c:6021 commands/tablecmds.c:6082 #, c-format msgid "column \"%s\" of relation \"%s\" is not an identity column" msgstr "la colonne « %s » de la relation « %s » n'est pas une colonne d'identité" -#: commands/tablecmds.c:6060 +#: commands/tablecmds.c:6087 #, c-format msgid "column \"%s\" of relation \"%s\" is not an identity column, skipping" msgstr "la colonne « %s » de la relation « %s » n'est pas une colonne d'identité, poursuite du traitement" -#: commands/tablecmds.c:6144 +#: commands/tablecmds.c:6171 #, c-format msgid "statistics target %d is too low" msgstr "la cible statistique %d est trop basse" -#: commands/tablecmds.c:6152 +#: commands/tablecmds.c:6179 #, c-format msgid "lowering statistics target to %d" msgstr "abaissement de la cible statistique à %d" -#: commands/tablecmds.c:6297 +#: commands/tablecmds.c:6324 #, c-format msgid "invalid storage type \"%s\"" msgstr "type « %s » de stockage invalide" -#: commands/tablecmds.c:6329 +#: commands/tablecmds.c:6356 #, c-format msgid "column data type %s can only have storage PLAIN" msgstr "le type de données %s de la colonne peut seulement avoir un stockage PLAIN" -#: commands/tablecmds.c:6364 +#: commands/tablecmds.c:6391 #, c-format msgid "cannot drop column from typed table" msgstr "ne peut pas supprimer une colonne à une table typée" -#: commands/tablecmds.c:6471 +#: commands/tablecmds.c:6498 #, c-format msgid "column \"%s\" of relation \"%s\" does not exist, skipping" msgstr "la colonne « %s » de la relation « %s » n'existe pas, ignore" -#: commands/tablecmds.c:6484 +#: commands/tablecmds.c:6511 #, c-format msgid "cannot drop system column \"%s\"" msgstr "ne peut pas supprimer la colonne système « %s »" -#: commands/tablecmds.c:6491 +#: commands/tablecmds.c:6518 #, c-format msgid "cannot drop inherited column \"%s\"" msgstr "ne peut pas supprimer la colonne héritée « %s »" -#: commands/tablecmds.c:6500 +#: commands/tablecmds.c:6527 #, c-format msgid "cannot drop column named in partition key" msgstr "ne peut pas supprimer une colonne nommée dans une clé de partitionnement" -#: commands/tablecmds.c:6504 +#: commands/tablecmds.c:6531 #, c-format msgid "cannot drop column referenced in partition key expression" msgstr "ne peut pas supprimer une colonne référencée dans l'expression d'une clé de partitionnement" -#: commands/tablecmds.c:6528 +#: commands/tablecmds.c:6555 #, c-format msgid "cannot drop column from only the partitioned table when partitions exist" msgstr "ne peut pas supprimer une colonne sur une seule partition quand plusieurs partitions existent" -#: commands/tablecmds.c:6746 +#: commands/tablecmds.c:6773 #, c-format msgid "ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index \"%s\" to \"%s\"" msgstr "ALTER TABLE / ADD CONSTRAINT USING INDEX renommera l'index « %s » en « %s »" -#: commands/tablecmds.c:6958 +#: commands/tablecmds.c:6985 #, c-format msgid "constraint must be added to child tables too" msgstr "la contrainte doit aussi être ajoutée aux tables filles" -#: commands/tablecmds.c:7029 +#: commands/tablecmds.c:7056 #, c-format msgid "cannot reference partitioned table \"%s\"" msgstr "ne peut pas référencer la table partitionnée « %s »" -#: commands/tablecmds.c:7035 +#: commands/tablecmds.c:7062 #, c-format msgid "referenced relation \"%s\" is not a table" msgstr "la relation référencée « %s » n'est pas une table" -#: commands/tablecmds.c:7058 +#: commands/tablecmds.c:7085 #, c-format msgid "constraints on permanent tables may reference only permanent tables" msgstr "les contraintes sur les tables permanentes peuvent seulement référencer des tables permanentes" -#: commands/tablecmds.c:7065 +#: commands/tablecmds.c:7092 #, c-format msgid "constraints on unlogged tables may reference only permanent or unlogged tables" msgstr "les contraintes sur les tables non tracées peuvent seulement référencer des tables permanentes ou non tracées" -#: commands/tablecmds.c:7071 +#: commands/tablecmds.c:7098 #, c-format msgid "constraints on temporary tables may reference only temporary tables" msgstr "" "les constraintes sur des tables temporaires ne peuvent référencer que des\n" "tables temporaires" -#: commands/tablecmds.c:7075 +#: commands/tablecmds.c:7102 #, c-format msgid "constraints on temporary tables must involve temporary tables of this session" msgstr "" "les contraintes sur des tables temporaires doivent référencer les tables\n" "temporaires de cette session" -#: commands/tablecmds.c:7135 +#: commands/tablecmds.c:7162 #, c-format msgid "number of referencing and referenced columns for foreign key disagree" msgstr "nombre de colonnes de référence et référencées pour la clé étrangère en désaccord" -#: commands/tablecmds.c:7242 +#: commands/tablecmds.c:7269 #, c-format msgid "foreign key constraint \"%s\" cannot be implemented" msgstr "la contrainte de clé étrangère « %s » ne peut pas être implémentée" -#: commands/tablecmds.c:7245 +#: commands/tablecmds.c:7272 #, c-format msgid "Key columns \"%s\" and \"%s\" are of incompatible types: %s and %s." msgstr "Les colonnes clés « %s » et « %s » sont de types incompatibles : %s et %s." -#: commands/tablecmds.c:7450 commands/tablecmds.c:7616 commands/tablecmds.c:8531 commands/tablecmds.c:8599 +#: commands/tablecmds.c:7477 commands/tablecmds.c:7643 commands/tablecmds.c:8558 commands/tablecmds.c:8626 #, c-format msgid "constraint \"%s\" of relation \"%s\" does not exist" msgstr "la contrainte « %s » de la relation « %s » n'existe pas" -#: commands/tablecmds.c:7456 +#: commands/tablecmds.c:7483 #, c-format msgid "constraint \"%s\" of relation \"%s\" is not a foreign key constraint" msgstr "la contrainte « %s » de la relation « %s » n'est pas une clé étrangère" -#: commands/tablecmds.c:7623 +#: commands/tablecmds.c:7650 #, c-format msgid "constraint \"%s\" of relation \"%s\" is not a foreign key or check constraint" msgstr "la contrainte « %s » de la relation « %s » n'est pas une clé étrangère ou une contrainte de vérification" -#: commands/tablecmds.c:7693 +#: commands/tablecmds.c:7720 #, c-format msgid "constraint must be validated on child tables too" msgstr "la contrainte doit aussi être validées sur les tables enfants" -#: commands/tablecmds.c:7761 +#: commands/tablecmds.c:7788 #, c-format msgid "column \"%s\" referenced in foreign key constraint does not exist" msgstr "la colonne « %s » référencée dans la contrainte de clé étrangère n'existe pas" -#: commands/tablecmds.c:7766 +#: commands/tablecmds.c:7793 #, c-format msgid "cannot have more than %d keys in a foreign key" msgstr "ne peut pas avoir plus de %d clés dans une clé étrangère" -#: commands/tablecmds.c:7831 +#: commands/tablecmds.c:7858 #, c-format msgid "cannot use a deferrable primary key for referenced table \"%s\"" msgstr "ne peut pas utiliser une clé primaire déferrable pour la table « %s » référencée" -#: commands/tablecmds.c:7848 +#: commands/tablecmds.c:7875 #, c-format msgid "there is no primary key for referenced table \"%s\"" msgstr "il n'existe pas de clé étrangère pour la table « %s » référencée" -#: commands/tablecmds.c:7913 +#: commands/tablecmds.c:7940 #, c-format msgid "foreign key referenced-columns list must not contain duplicates" msgstr "la liste de colonnes référencées dans la clé étrangère ne doit pas contenir de duplicats" -#: commands/tablecmds.c:8007 +#: commands/tablecmds.c:8034 #, c-format msgid "cannot use a deferrable unique constraint for referenced table \"%s\"" msgstr "" "ne peut pas utiliser une contrainte unique déferrable pour la table\n" "référencée « %s »" -#: commands/tablecmds.c:8012 +#: commands/tablecmds.c:8039 #, c-format msgid "there is no unique constraint matching given keys for referenced table \"%s\"" msgstr "" "il n'existe aucune contrainte unique correspondant aux clés données pour la\n" "table « %s » référencée" -#: commands/tablecmds.c:8183 +#: commands/tablecmds.c:8210 #, c-format msgid "validating foreign key constraint \"%s\"" msgstr "validation de la contraintes de clé étrangère « %s »" -#: commands/tablecmds.c:8485 +#: commands/tablecmds.c:8512 #, c-format msgid "cannot drop inherited constraint \"%s\" of relation \"%s\"" msgstr "ne peut pas supprimer la contrainte héritée « %s » de la relation « %s »" -#: commands/tablecmds.c:8537 +#: commands/tablecmds.c:8564 #, c-format msgid "constraint \"%s\" of relation \"%s\" does not exist, skipping" msgstr "la contrainte « %s » de la relation « %s » n'existe pas, ignore" -#: commands/tablecmds.c:8687 +#: commands/tablecmds.c:8714 #, c-format msgid "cannot alter column type of typed table" msgstr "ne peut pas modifier le type d'une colonne appartenant à une table typée" -#: commands/tablecmds.c:8710 +#: commands/tablecmds.c:8737 #, c-format msgid "cannot alter inherited column \"%s\"" msgstr "ne peut pas modifier la colonne héritée « %s »" -#: commands/tablecmds.c:8719 +#: commands/tablecmds.c:8746 #, c-format msgid "cannot alter type of column named in partition key" msgstr "ne peut pas modifier le type d'une colonne nommée dans une clé de partitionnement" -#: commands/tablecmds.c:8723 +#: commands/tablecmds.c:8750 #, c-format msgid "cannot alter type of column referenced in partition key expression" msgstr "ne peut pas utiliser le type d'une colonne référencée dans l'expression d'une clé de partitionnement" -#: commands/tablecmds.c:8773 +#: commands/tablecmds.c:8800 #, c-format msgid "result of USING clause for column \"%s\" cannot be cast automatically to type %s" msgstr "le résultat de la clause USING pour la colonne « %s » ne peut pas être converti automatiquement vers le type %s" -#: commands/tablecmds.c:8776 +#: commands/tablecmds.c:8803 #, c-format msgid "You might need to add an explicit cast." msgstr "Vous pouvez avoir besoin d'ajouter une conversion explicite." -#: commands/tablecmds.c:8780 +#: commands/tablecmds.c:8807 #, c-format msgid "column \"%s\" cannot be cast automatically to type %s" msgstr "la colonne « %s » ne peut pas être convertie vers le type %s" #. translator: USING is SQL, don't translate it -#: commands/tablecmds.c:8783 +#: commands/tablecmds.c:8810 #, c-format msgid "You might need to specify \"USING %s::%s\"." msgstr "Vous pouvez avoir besoin de spécifier \"USING %s::%s\"." -#: commands/tablecmds.c:8882 +#: commands/tablecmds.c:8909 #, c-format msgid "USING expression contains a whole-row table reference." msgstr "l'expression USING contient une référence de table de ligne complète" -#: commands/tablecmds.c:8893 +#: commands/tablecmds.c:8920 #, c-format msgid "type of inherited column \"%s\" must be changed in child tables too" msgstr "le type de colonne héritée « %s » doit aussi être renommée pour les tables filles" -#: commands/tablecmds.c:8980 +#: commands/tablecmds.c:9007 #, c-format msgid "cannot alter type of column \"%s\" twice" msgstr "ne peut pas modifier la colonne « %s » deux fois" -#: commands/tablecmds.c:9016 +#: commands/tablecmds.c:9043 #, c-format msgid "default for column \"%s\" cannot be cast automatically to type %s" msgstr "" "la valeur par défaut de la colonne « %s » ne peut pas être convertie vers le\n" "type %s automatiquement" -#: commands/tablecmds.c:9142 +#: commands/tablecmds.c:9169 #, c-format msgid "cannot alter type of a column used by a view or rule" msgstr "ne peut pas modifier le type d'une colonne utilisée dans une vue ou une règle" -#: commands/tablecmds.c:9143 commands/tablecmds.c:9162 commands/tablecmds.c:9180 +#: commands/tablecmds.c:9170 commands/tablecmds.c:9189 commands/tablecmds.c:9207 #, c-format msgid "%s depends on column \"%s\"" msgstr "%s dépend de la colonne « %s »" -#: commands/tablecmds.c:9161 +#: commands/tablecmds.c:9188 #, c-format msgid "cannot alter type of a column used in a trigger definition" msgstr "ne peut pas modifier le type d'une colonne utilisée dans la définition d'un trigger" -#: commands/tablecmds.c:9179 +#: commands/tablecmds.c:9206 #, c-format msgid "cannot alter type of a column used in a policy definition" msgstr "ne peut pas modifier le type d'une colonne utilisée dans la définition d'une politique" -#: commands/tablecmds.c:9854 +#: commands/tablecmds.c:9881 #, c-format msgid "cannot change owner of index \"%s\"" msgstr "ne peut pas modifier le propriétaire de l'index « %s »" -#: commands/tablecmds.c:9856 +#: commands/tablecmds.c:9883 #, c-format msgid "Change the ownership of the index's table, instead." msgstr "Modifier à la place le propriétaire de la table concernée par l'index." -#: commands/tablecmds.c:9873 +#: commands/tablecmds.c:9900 #, c-format msgid "cannot change owner of sequence \"%s\"" msgstr "ne peut pas modifier le propriétaire de la séquence « %s »" -#: commands/tablecmds.c:9887 commands/tablecmds.c:13116 +#: commands/tablecmds.c:9914 commands/tablecmds.c:13143 #, c-format msgid "Use ALTER TYPE instead." msgstr "Utilisez ALTER TYPE à la place." -#: commands/tablecmds.c:9896 +#: commands/tablecmds.c:9923 #, c-format msgid "\"%s\" is not a table, view, sequence, or foreign table" msgstr "« %s » n'est pas une table, une vue, une séquence ou une table distante" -#: commands/tablecmds.c:10237 +#: commands/tablecmds.c:10264 #, c-format msgid "cannot have multiple SET TABLESPACE subcommands" msgstr "ne peut pas avoir de nombreuses sous-commandes SET TABLESPACE" -#: commands/tablecmds.c:10311 +#: commands/tablecmds.c:10338 #, c-format msgid "\"%s\" is not a table, view, materialized view, index, or TOAST table" msgstr "« %s » n'est pas une table, une vue, une vue matérialisée, un index ou une table TOAST" -#: commands/tablecmds.c:10344 commands/view.c:504 +#: commands/tablecmds.c:10371 commands/view.c:504 #, c-format msgid "WITH CHECK OPTION is supported only on automatically updatable views" msgstr "WITH CHECK OPTION est uniquement accepté pour les vues dont la mise à jour est automatique" -#: commands/tablecmds.c:10486 +#: commands/tablecmds.c:10513 #, c-format msgid "cannot move system relation \"%s\"" msgstr "ne peut pas déplacer la colonne système « %s »" -#: commands/tablecmds.c:10502 +#: commands/tablecmds.c:10529 #, c-format msgid "cannot move temporary tables of other sessions" msgstr "ne peut pas déplacer les tables temporaires d'autres sessions" -#: commands/tablecmds.c:10638 +#: commands/tablecmds.c:10665 #, c-format msgid "only tables, indexes, and materialized views exist in tablespaces" msgstr "seuls les tables, index et vues matérialisées existent dans les tablespaces" -#: commands/tablecmds.c:10650 +#: commands/tablecmds.c:10677 #, c-format msgid "cannot move relations in to or out of pg_global tablespace" msgstr "ne peut pas déplacer les relations dans ou à partir du tablespace pg_global" -#: commands/tablecmds.c:10742 +#: commands/tablecmds.c:10769 #, c-format msgid "aborting because lock on relation \"%s.%s\" is not available" msgstr "annulation car le verrou sur la relation « %s.%s » n'est pas disponible" -#: commands/tablecmds.c:10758 +#: commands/tablecmds.c:10785 #, c-format msgid "no matching relations in tablespace \"%s\" found" msgstr "aucune relation correspondante trouvée dans le tablespace « %s »" -#: commands/tablecmds.c:10832 storage/buffer/bufmgr.c:915 +#: commands/tablecmds.c:10859 storage/buffer/bufmgr.c:915 #, c-format msgid "invalid page in block %u of relation %s" msgstr "page invalide dans le bloc %u de la relation %s" -#: commands/tablecmds.c:10914 +#: commands/tablecmds.c:10941 #, c-format msgid "cannot change inheritance of typed table" msgstr "ne peut pas modifier l'héritage d'une table typée" -#: commands/tablecmds.c:10919 commands/tablecmds.c:11461 +#: commands/tablecmds.c:10946 commands/tablecmds.c:11488 #, c-format msgid "cannot change inheritance of a partition" msgstr "ne peut pas modifier l'héritage d'une partition" -#: commands/tablecmds.c:10924 +#: commands/tablecmds.c:10951 #, c-format msgid "cannot change inheritance of partitioned table" msgstr "ne peut pas modifier l'héritage d'une table partitionnée" -#: commands/tablecmds.c:10970 +#: commands/tablecmds.c:10997 #, c-format msgid "cannot inherit to temporary relation of another session" msgstr "ne peut pas hériter à partir d'une relation temporaire d'une autre session" -#: commands/tablecmds.c:10983 +#: commands/tablecmds.c:11010 #, c-format msgid "cannot inherit from a partition" msgstr "ne peut pas hériter d'une partition" -#: commands/tablecmds.c:11005 commands/tablecmds.c:13509 +#: commands/tablecmds.c:11032 commands/tablecmds.c:13537 #, c-format msgid "circular inheritance not allowed" msgstr "héritage circulaire interdit" -#: commands/tablecmds.c:11006 commands/tablecmds.c:13510 +#: commands/tablecmds.c:11033 commands/tablecmds.c:13538 #, c-format msgid "\"%s\" is already a child of \"%s\"." msgstr "« %s » est déjà un enfant de « %s »." -#: commands/tablecmds.c:11014 +#: commands/tablecmds.c:11041 #, c-format msgid "table \"%s\" without OIDs cannot inherit from table \"%s\" with OIDs" msgstr "la table « %s » qui n'a pas d'OID ne peut pas hériter de la table « %s » qui en a" -#: commands/tablecmds.c:11027 +#: commands/tablecmds.c:11054 #, c-format msgid "trigger \"%s\" prevents table \"%s\" from becoming an inheritance child" msgstr "le trigger « %s » empêche la table « %s » de devenir une fille dans l'héritage" -#: commands/tablecmds.c:11029 +#: commands/tablecmds.c:11056 #, c-format msgid "ROW triggers with transition tables are not supported in inheritance hierarchies" msgstr "les triggers ROW avec des tables de transition ne sont pas supportés dans les hiérarchies d'héritage" -#: commands/tablecmds.c:11231 +#: commands/tablecmds.c:11258 #, c-format msgid "column \"%s\" in child table must be marked NOT NULL" msgstr "la colonne « %s » de la table enfant doit être marquée comme NOT NULL" -#: commands/tablecmds.c:11258 commands/tablecmds.c:11297 +#: commands/tablecmds.c:11285 commands/tablecmds.c:11324 #, c-format msgid "child table is missing column \"%s\"" msgstr "la colonne « %s » manque à la table enfant" -#: commands/tablecmds.c:11385 +#: commands/tablecmds.c:11412 #, c-format msgid "child table \"%s\" has different definition for check constraint \"%s\"" msgstr "la table fille « %s » a un type différent pour la contrainte de vérification « %s »" -#: commands/tablecmds.c:11393 +#: commands/tablecmds.c:11420 #, c-format msgid "constraint \"%s\" conflicts with non-inherited constraint on child table \"%s\"" msgstr "la contrainte « %s » entre en conflit avec une contrainte non héritée sur la table fille « %s »" -#: commands/tablecmds.c:11404 +#: commands/tablecmds.c:11431 #, c-format msgid "constraint \"%s\" conflicts with NOT VALID constraint on child table \"%s\"" msgstr "la contrainte « %s » entre en conflit avec une contrainte NOT VALID sur la table fille « %s »" -#: commands/tablecmds.c:11439 +#: commands/tablecmds.c:11466 #, c-format msgid "child table is missing constraint \"%s\"" msgstr "la contrainte « %s » manque à la table enfant" -#: commands/tablecmds.c:11555 +#: commands/tablecmds.c:11582 #, c-format msgid "relation \"%s\" is not a partition of relation \"%s\"" msgstr "la relation « %s » n'est pas une partition de la relation « %s »" -#: commands/tablecmds.c:11561 +#: commands/tablecmds.c:11588 #, c-format msgid "relation \"%s\" is not a parent of relation \"%s\"" msgstr "la relation « %s » n'est pas un parent de la relation « %s »" -#: commands/tablecmds.c:11787 +#: commands/tablecmds.c:11814 #, c-format msgid "typed tables cannot inherit" msgstr "les tables avec type ne peuvent pas hériter d'autres tables" -#: commands/tablecmds.c:11818 +#: commands/tablecmds.c:11845 #, c-format msgid "table is missing column \"%s\"" msgstr "la colonne « %s » manque à la table" -#: commands/tablecmds.c:11828 +#: commands/tablecmds.c:11855 #, c-format msgid "table has column \"%s\" where type requires \"%s\"" msgstr "la table a une colonne « %s » alors que le type impose « %s »." -#: commands/tablecmds.c:11837 +#: commands/tablecmds.c:11864 #, c-format msgid "table \"%s\" has different type for column \"%s\"" msgstr "la table « %s » a un type différent pour la colonne « %s »" -#: commands/tablecmds.c:11850 +#: commands/tablecmds.c:11877 #, c-format msgid "table has extra column \"%s\"" msgstr "la table a une colonne supplémentaire « %s »" -#: commands/tablecmds.c:11902 +#: commands/tablecmds.c:11929 #, c-format msgid "\"%s\" is not a typed table" msgstr "« %s » n'est pas une table typée" -#: commands/tablecmds.c:12084 +#: commands/tablecmds.c:12111 #, c-format msgid "cannot use non-unique index \"%s\" as replica identity" msgstr "ne peut pas utiliser l'index non unique « %s » comme identité de réplicat" -#: commands/tablecmds.c:12090 +#: commands/tablecmds.c:12117 #, c-format msgid "cannot use non-immediate index \"%s\" as replica identity" msgstr "ne peut pas utiliser l'index « %s » immédiat comme identité de réplicat" -#: commands/tablecmds.c:12096 +#: commands/tablecmds.c:12123 #, c-format msgid "cannot use expression index \"%s\" as replica identity" msgstr "ne peut pas utiliser un index par expression « %s » comme identité de réplicat" -#: commands/tablecmds.c:12102 +#: commands/tablecmds.c:12129 #, c-format msgid "cannot use partial index \"%s\" as replica identity" msgstr "ne peut pas utiliser l'index partiel « %s » comme identité de réplicat" -#: commands/tablecmds.c:12108 +#: commands/tablecmds.c:12135 #, c-format msgid "cannot use invalid index \"%s\" as replica identity" msgstr "ne peut pas utiliser l'index invalide « %s » comme identité de réplicat" -#: commands/tablecmds.c:12129 +#: commands/tablecmds.c:12156 #, c-format msgid "index \"%s\" cannot be used as replica identity because column %d is a system column" msgstr "l'index « %s » ne peut pas être utilisé comme identité de réplicat car la colonne %d est une colonne système" -#: commands/tablecmds.c:12136 +#: commands/tablecmds.c:12163 #, c-format msgid "index \"%s\" cannot be used as replica identity because column \"%s\" is nullable" msgstr "l'index « %s » ne peut pas être utilisé comme identité de réplicat car la colonne « %s » peut être NULL" -#: commands/tablecmds.c:12329 +#: commands/tablecmds.c:12356 #, c-format msgid "cannot change logged status of table \"%s\" because it is temporary" msgstr "ne peut pas modifier le statut de journalisation de la table « %s » parce qu'elle est temporaire" -#: commands/tablecmds.c:12353 +#: commands/tablecmds.c:12380 #, c-format msgid "cannot change table \"%s\" to unlogged because it is part of a publication" msgstr "ne peut pas modifier la table « %s » en non journalisée car elle fait partie d'une publication" -#: commands/tablecmds.c:12355 +#: commands/tablecmds.c:12382 #, c-format msgid "Unlogged relations cannot be replicated." msgstr "Les relations non journalisées ne peuvent pas être répliquées." -#: commands/tablecmds.c:12400 +#: commands/tablecmds.c:12427 #, c-format msgid "could not change table \"%s\" to logged because it references unlogged table \"%s\"" msgstr "n'a pas pu passer la table « %s » en journalisé car elle référence la table non journalisée « %s »" -#: commands/tablecmds.c:12410 +#: commands/tablecmds.c:12437 #, c-format msgid "could not change table \"%s\" to unlogged because it references logged table \"%s\"" msgstr "n'a pas pu passer la table « %s » en non journalisé car elle référence la table journalisée « %s »" -#: commands/tablecmds.c:12468 +#: commands/tablecmds.c:12495 #, c-format msgid "cannot move an owned sequence into another schema" msgstr "ne peut pas déplacer une séquence OWNED BY dans un autre schéma" -#: commands/tablecmds.c:12574 +#: commands/tablecmds.c:12601 #, c-format msgid "relation \"%s\" already exists in schema \"%s\"" msgstr "la relation « %s » existe déjà dans le schéma « %s »" -#: commands/tablecmds.c:13100 +#: commands/tablecmds.c:13127 #, c-format msgid "\"%s\" is not a composite type" msgstr "« %s » n'est pas un type composite" -#: commands/tablecmds.c:13131 +#: commands/tablecmds.c:13158 #, c-format msgid "\"%s\" is not a table, view, materialized view, sequence, or foreign table" msgstr "« %s » n'est pas une table, une vue, une vue matérialisée, une séquence ou une table distante" -#: commands/tablecmds.c:13164 +#: commands/tablecmds.c:13191 #, c-format msgid "unrecognized partitioning strategy \"%s\"" msgstr "stratégie de partitionnement « %s » non reconnue" -#: commands/tablecmds.c:13172 +#: commands/tablecmds.c:13199 #, c-format msgid "cannot use \"list\" partition strategy with more than one column" msgstr "ne peut pas utiliser la stratégie de partitionnement « list » avec plus d'une colonne" -#: commands/tablecmds.c:13197 +#: commands/tablecmds.c:13224 #, c-format msgid "column \"%s\" appears more than once in partition key" msgstr "la colonne « %s » apparaît plus d'une fois dans la clé de partitionnement" -#: commands/tablecmds.c:13250 +#: commands/tablecmds.c:13277 #, c-format msgid "column \"%s\" named in partition key does not exist" msgstr "la colonne « %s » nommée dans la clé de partitionnement n'existe pas" -#: commands/tablecmds.c:13257 +#: commands/tablecmds.c:13284 #, c-format msgid "cannot use system column \"%s\" in partition key" msgstr "ne peut pas utiliser la colonne système « %s » comme clé de partitionnement" -#: commands/tablecmds.c:13320 +#: commands/tablecmds.c:13347 #, c-format msgid "functions in partition key expression must be marked IMMUTABLE" msgstr "" "les fonctions dans une expression de clé de partitionnement doivent être marquées comme\n" "IMMUTABLE" -#: commands/tablecmds.c:13337 +#: commands/tablecmds.c:13364 #, c-format msgid "partition key expressions cannot contain whole-row references" msgstr "les expressions de clé de partitionnement ne peuvent pas contenir des références à des lignes complètes" -#: commands/tablecmds.c:13344 +#: commands/tablecmds.c:13371 #, c-format msgid "partition key expressions cannot contain system column references" msgstr "les expressions de la clé de partitionnement ne peuvent pas contenir des références aux colonnes systèmes" -#: commands/tablecmds.c:13354 +#: commands/tablecmds.c:13381 #, c-format msgid "cannot use constant expression as partition key" msgstr "ne peut pas utiliser une expression constante comme clé de partitionnement" -#: commands/tablecmds.c:13375 +#: commands/tablecmds.c:13402 #, c-format msgid "could not determine which collation to use for partition expression" msgstr "n'a pas pu déterminer le collationnement à utiliser pour l'expression de partitionnement" -#: commands/tablecmds.c:13400 +#: commands/tablecmds.c:13427 #, c-format msgid "data type %s has no default btree operator class" msgstr "le type de données %s n'a pas de classe d'opérateurs btree par défaut" -#: commands/tablecmds.c:13402 +#: commands/tablecmds.c:13429 #, c-format msgid "You must specify a btree operator class or define a default btree operator class for the data type." msgstr "" "Vous devez spécifier une classe d'opérateur btree ou définir une\n" "classe d'opérateur btree par défaut pour le type de données." -#: commands/tablecmds.c:13449 +#: commands/tablecmds.c:13477 #, c-format msgid "\"%s\" is already a partition" msgstr "« %s » est déjà une partition" -#: commands/tablecmds.c:13455 +#: commands/tablecmds.c:13483 #, c-format msgid "cannot attach a typed table as partition" msgstr "ne peut pas attacher une table typée à une partition" -#: commands/tablecmds.c:13471 +#: commands/tablecmds.c:13499 #, c-format msgid "cannot attach inheritance child as partition" msgstr "ne peut pas ajouter la table en héritage comme une partition" -#: commands/tablecmds.c:13485 +#: commands/tablecmds.c:13513 #, c-format msgid "cannot attach inheritance parent as partition" msgstr "ne peut pas attacher le parent d'héritage comme partition" -#: commands/tablecmds.c:13519 +#: commands/tablecmds.c:13547 #, c-format msgid "cannot attach a permanent relation as partition of temporary relation \"%s\"" msgstr "ne peut pas attacher une relation permanente comme partition de la relation temporaire « %s »" -#: commands/tablecmds.c:13527 +#: commands/tablecmds.c:13555 #, c-format msgid "cannot attach as partition of temporary relation of another session" msgstr "ne peut pas attacher comme partition d'une relation temporaire d'une autre session" -#: commands/tablecmds.c:13534 +#: commands/tablecmds.c:13562 #, c-format msgid "cannot attach temporary relation of another session as partition" msgstr "ne peut pas attacher une relation temporaire d'une autre session comme partition" -#: commands/tablecmds.c:13540 +#: commands/tablecmds.c:13568 #, c-format msgid "cannot attach table \"%s\" without OIDs as partition of table \"%s\" with OIDs" msgstr "ne peut pas attacher la table « %s » sans OID comme partition de la table « %s » avec OID" -#: commands/tablecmds.c:13548 +#: commands/tablecmds.c:13576 #, c-format msgid "cannot attach table \"%s\" with OIDs as partition of table \"%s\" without OIDs" msgstr "ne peut pas attacher la table « %s » avec OID comme partition de la table « %s » sans OID" -#: commands/tablecmds.c:13570 +#: commands/tablecmds.c:13598 #, c-format msgid "table \"%s\" contains column \"%s\" not found in parent \"%s\"" msgstr "la table « %s » contient la colonne « %s » introuvable dans le parent « %s »" -#: commands/tablecmds.c:13573 -#, c-format -msgid "New partition should contain only the columns present in parent." +#: commands/tablecmds.c:13601 +#, fuzzy, c-format +#| msgid "New partition should contain only the columns present in parent." +msgid "The new partition may contain only the columns present in parent." msgstr "La nouvelle partition devrait seulement contenir les colonnes présentes dans le parent." -#: commands/tablecmds.c:13585 +#: commands/tablecmds.c:13613 #, c-format msgid "trigger \"%s\" prevents table \"%s\" from becoming a partition" msgstr "le trigger « %s » empêche la table « %s » de devenir une partition" -#: commands/tablecmds.c:13587 commands/trigger.c:387 +#: commands/tablecmds.c:13615 commands/trigger.c:393 #, c-format msgid "ROW triggers with transition tables are not supported on partitions" msgstr "les triggers ROW avec des tables de transition ne sont pas supportés sur les partitions" -#: commands/tablecmds.c:13702 +#: commands/tablecmds.c:13740 #, c-format msgid "partition constraint for table \"%s\" is implied by existing constraints" msgstr "la contrainte de partitionnement pour la table « %s » provient des contraintes existantes" -#: commands/tablespace.c:162 commands/tablespace.c:179 commands/tablespace.c:190 commands/tablespace.c:198 commands/tablespace.c:623 replication/slot.c:1177 storage/file/copydir.c:47 +#: commands/tablespace.c:162 commands/tablespace.c:179 commands/tablespace.c:190 commands/tablespace.c:198 commands/tablespace.c:623 replication/slot.c:1178 storage/file/copydir.c:47 #, c-format msgid "could not create directory \"%s\": %m" msgstr "n'a pas pu créer le répertoire « %s » : %m" @@ -9392,233 +9421,238 @@ msgstr "les répertoires du tablespace %u n'ont pas pu être supprimés" msgid "You can remove the directories manually if necessary." msgstr "Vous pouvez supprimer les répertoires manuellement si nécessaire." -#: commands/trigger.c:189 +#: commands/trigger.c:190 #, c-format msgid "\"%s\" is a table" msgstr "« %s » est une table" -#: commands/trigger.c:191 +#: commands/trigger.c:192 #, c-format msgid "Tables cannot have INSTEAD OF triggers." msgstr "Les tables ne peuvent pas avoir de triggers INSTEAD OF." -#: commands/trigger.c:198 +#: commands/trigger.c:199 #, c-format msgid "Partitioned tables cannot have ROW triggers." msgstr "Les tables partitionnées ne peuvent pas avoir de triggers ROW." -#: commands/trigger.c:209 commands/trigger.c:216 commands/trigger.c:369 +#: commands/trigger.c:210 commands/trigger.c:217 commands/trigger.c:375 #, c-format msgid "\"%s\" is a view" msgstr "« %s » est une vue" -#: commands/trigger.c:211 +#: commands/trigger.c:212 #, c-format msgid "Views cannot have row-level BEFORE or AFTER triggers." msgstr "Les vues ne peuvent pas avoir de trigger BEFORE ou AFTER au niveau ligne." -#: commands/trigger.c:218 +#: commands/trigger.c:219 #, c-format msgid "Views cannot have TRUNCATE triggers." msgstr "Les vues ne peuvent pas avoir de triggers TRUNCATE." -#: commands/trigger.c:226 commands/trigger.c:233 commands/trigger.c:240 commands/trigger.c:362 +#: commands/trigger.c:227 commands/trigger.c:234 commands/trigger.c:246 commands/trigger.c:368 #, c-format msgid "\"%s\" is a foreign table" msgstr "« %s » est une table distante" -#: commands/trigger.c:228 +#: commands/trigger.c:229 #, c-format msgid "Foreign tables cannot have INSTEAD OF triggers." msgstr "Les tables distantes ne peuvent pas avoir de triggers INSTEAD OF." -#: commands/trigger.c:235 +#: commands/trigger.c:236 #, c-format msgid "Foreign tables cannot have TRUNCATE triggers." msgstr "Les tables distantes ne peuvent pas avoir de triggers TRUNCATE." -#: commands/trigger.c:242 +#: commands/trigger.c:248 #, c-format msgid "Foreign tables cannot have constraint triggers." msgstr "Les tables distantes ne peuvent pas avoir de triggers de contrainte." -#: commands/trigger.c:305 +#: commands/trigger.c:311 #, c-format msgid "TRUNCATE FOR EACH ROW triggers are not supported" msgstr "les triggers TRUNCATE FOR EACH ROW ne sont pas supportés" -#: commands/trigger.c:313 +#: commands/trigger.c:319 #, c-format msgid "INSTEAD OF triggers must be FOR EACH ROW" msgstr "les triggers INSTEAD OF doivent être FOR EACH ROW" -#: commands/trigger.c:317 +#: commands/trigger.c:323 #, c-format msgid "INSTEAD OF triggers cannot have WHEN conditions" msgstr "les triggers INSTEAD OF ne peuvent pas avoir de conditions WHEN" -#: commands/trigger.c:321 +#: commands/trigger.c:327 #, c-format msgid "INSTEAD OF triggers cannot have column lists" msgstr "les triggers INSTEAD OF ne peuvent pas avoir de liste de colonnes" -#: commands/trigger.c:350 +#: commands/trigger.c:356 #, c-format msgid "ROW variable naming in the REFERENCING clause is not supported" msgstr "le nommage de variable ROW dans la clause REFERENCING n'est pas supportée" -#: commands/trigger.c:351 +#: commands/trigger.c:357 #, c-format msgid "Use OLD TABLE or NEW TABLE for naming transition tables." msgstr "Utilisez OLD TABLE ou NEW TABLE pour nommer les tables de transition." -#: commands/trigger.c:364 +#: commands/trigger.c:370 #, c-format msgid "Triggers on foreign tables cannot have transition tables." msgstr "Les triggers sur les tables distantes ne peuvent pas avoir de tables de transition." -#: commands/trigger.c:371 +#: commands/trigger.c:377 #, c-format msgid "Triggers on views cannot have transition tables." msgstr "Les triggers sur les vues ne peuvent pas avoir de tables de transition." -#: commands/trigger.c:391 +#: commands/trigger.c:397 #, c-format msgid "ROW triggers with transition tables are not supported on inheritance children" msgstr "les triggers ROW avec des tables de transition ne sont pas supportés sur les filles en héritage" -#: commands/trigger.c:397 +#: commands/trigger.c:403 #, c-format msgid "transition table name can only be specified for an AFTER trigger" msgstr "le nom de la table de transition peut seulement être spécifié pour un trigger AFTER" -#: commands/trigger.c:402 +#: commands/trigger.c:408 #, c-format msgid "TRUNCATE triggers with transition tables are not supported" msgstr "les triggers TRUNCATE avec des tables de transition ne sont pas supportés" -#: commands/trigger.c:419 +#: commands/trigger.c:425 +#, c-format +msgid "transition tables cannot be specified for triggers with more than one event" +msgstr "les tables de transition ne peuvent pas être spécifiées pour les triggers avec plus d'un événement" + +#: commands/trigger.c:436 #, c-format -msgid "Transition tables cannot be specified for triggers with more than one event" -msgstr "Les tables de transition ne peuvent pas être spécifiées pour les triggers avec plus d'un événement" +msgid "transition tables cannot be specified for triggers with column lists" +msgstr "les tables de transition ne peuvent pas être spécifiées pour les triggers avec des listes de colonnes" -#: commands/trigger.c:427 +#: commands/trigger.c:453 #, c-format msgid "NEW TABLE can only be specified for an INSERT or UPDATE trigger" msgstr "OLD TABLE peut seulement être spécifié pour un trigger INSERT ou UPDATE" -#: commands/trigger.c:432 +#: commands/trigger.c:458 #, c-format msgid "NEW TABLE cannot be specified multiple times" msgstr "NEW TABLE ne peut pas être spécifié plusieurs fois" -#: commands/trigger.c:442 +#: commands/trigger.c:468 #, c-format msgid "OLD TABLE can only be specified for a DELETE or UPDATE trigger" msgstr "OLD TABLE peut seulement être spécifié pour un trigger DELETE ou UPDATE" -#: commands/trigger.c:447 +#: commands/trigger.c:473 #, c-format msgid "OLD TABLE cannot be specified multiple times" msgstr "OLD TABLE ne peut pas être spécifié plusieurs fois" -#: commands/trigger.c:457 +#: commands/trigger.c:483 #, c-format msgid "OLD TABLE name and NEW TABLE name cannot be the same" msgstr "les noms de OLD TABLE et NEW TABLE ne peuvent pas être identiques" -#: commands/trigger.c:514 commands/trigger.c:527 +#: commands/trigger.c:540 commands/trigger.c:553 #, c-format msgid "statement trigger's WHEN condition cannot reference column values" msgstr "" "la condition WHEN de l'instruction du trigger ne peut pas référencer les valeurs\n" "des colonnes" -#: commands/trigger.c:519 +#: commands/trigger.c:545 #, c-format msgid "INSERT trigger's WHEN condition cannot reference OLD values" msgstr "la condition WHEN du trigger INSERT ne peut pas référencer les valeurs OLD" -#: commands/trigger.c:532 +#: commands/trigger.c:558 #, c-format msgid "DELETE trigger's WHEN condition cannot reference NEW values" msgstr "la condition WHEN du trigger DELETE ne peut pas référencer les valeurs NEW" -#: commands/trigger.c:537 +#: commands/trigger.c:563 #, c-format msgid "BEFORE trigger's WHEN condition cannot reference NEW system columns" msgstr "" "la condition WHEN d'un trigger BEFORE ne doit pas référencer les colonnes\n" "système avec NEW" -#: commands/trigger.c:702 commands/trigger.c:1473 +#: commands/trigger.c:728 commands/trigger.c:1499 #, c-format msgid "trigger \"%s\" for relation \"%s\" already exists" msgstr "le trigger « %s » de la relation « %s » existe déjà" -#: commands/trigger.c:998 +#: commands/trigger.c:1024 msgid "Found referenced table's UPDATE trigger." msgstr "Trigger UPDATE de la table référencée trouvé." -#: commands/trigger.c:999 +#: commands/trigger.c:1025 msgid "Found referenced table's DELETE trigger." msgstr "Trigger DELETE de la table référencée trouvé." -#: commands/trigger.c:1000 +#: commands/trigger.c:1026 msgid "Found referencing table's trigger." msgstr "Trigger de la table référencée trouvé." -#: commands/trigger.c:1109 commands/trigger.c:1125 +#: commands/trigger.c:1135 commands/trigger.c:1151 #, c-format msgid "ignoring incomplete trigger group for constraint \"%s\" %s" msgstr "ignore le groupe de trigger incomplet pour la contrainte « %s » %s" -#: commands/trigger.c:1138 +#: commands/trigger.c:1164 #, c-format msgid "converting trigger group into constraint \"%s\" %s" msgstr "conversion du groupe de trigger en une contrainte « %s » %s" -#: commands/trigger.c:1359 commands/trigger.c:1518 commands/trigger.c:1633 +#: commands/trigger.c:1385 commands/trigger.c:1544 commands/trigger.c:1659 #, c-format msgid "trigger \"%s\" for table \"%s\" does not exist" msgstr "le trigger « %s » de la table « %s » n'existe pas" -#: commands/trigger.c:1601 +#: commands/trigger.c:1627 #, c-format msgid "permission denied: \"%s\" is a system trigger" msgstr "droit refusé : « %s » est un trigger système" -#: commands/trigger.c:2270 +#: commands/trigger.c:2206 #, c-format msgid "trigger function %u returned null value" msgstr "la fonction trigger %u a renvoyé la valeur NULL" -#: commands/trigger.c:2331 commands/trigger.c:2541 commands/trigger.c:2755 commands/trigger.c:3040 +#: commands/trigger.c:2272 commands/trigger.c:2487 commands/trigger.c:2706 commands/trigger.c:2991 #, c-format msgid "BEFORE STATEMENT trigger cannot return a value" msgstr "le trigger BEFORE STATEMENT ne peut pas renvoyer une valeur" -#: commands/trigger.c:3102 executor/nodeModifyTable.c:795 executor/nodeModifyTable.c:1092 +#: commands/trigger.c:3053 executor/nodeModifyTable.c:798 executor/nodeModifyTable.c:1095 #, c-format msgid "tuple to be updated was already modified by an operation triggered by the current command" msgstr "la ligne à mettre à jour était déjà modifiée par une opération déclenchée par la commande courante" -#: commands/trigger.c:3103 executor/nodeModifyTable.c:796 executor/nodeModifyTable.c:1093 +#: commands/trigger.c:3054 executor/nodeModifyTable.c:799 executor/nodeModifyTable.c:1096 #, c-format msgid "Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows." msgstr "Considérez l'utilisation d'un trigger AFTER au lieu d'un trigger BEFORE pour propager les changements sur les autres lignes." -#: commands/trigger.c:3117 executor/execMain.c:2667 executor/nodeLockRows.c:220 executor/nodeModifyTable.c:214 executor/nodeModifyTable.c:808 executor/nodeModifyTable.c:1105 executor/nodeModifyTable.c:1272 +#: commands/trigger.c:3068 executor/execMain.c:2696 executor/nodeLockRows.c:220 executor/nodeModifyTable.c:214 executor/nodeModifyTable.c:811 executor/nodeModifyTable.c:1108 executor/nodeModifyTable.c:1277 #, c-format msgid "could not serialize access due to concurrent update" msgstr "n'a pas pu sérialiser un accès à cause d'une mise à jour en parallèle" -#: commands/trigger.c:4983 +#: commands/trigger.c:5200 #, c-format msgid "constraint \"%s\" is not deferrable" msgstr "la contrainte « %s » n'est pas DEFERRABLE" -#: commands/trigger.c:5006 +#: commands/trigger.c:5223 #, c-format msgid "constraint \"%s\" does not exist" msgstr "la contrainte « %s » n'existe pas" @@ -9725,7 +9759,7 @@ msgstr "format de liste de paramètres invalide : « %s »" msgid "must be superuser to create a base type" msgstr "doit être super-utilisateur pour créer un type de base" -#: commands/typecmds.c:290 commands/typecmds.c:1414 +#: commands/typecmds.c:290 commands/typecmds.c:1435 #, c-format msgid "type attribute \"%s\" not recognized" msgstr "attribut du type « %s » non reconnu" @@ -9837,177 +9871,177 @@ msgstr "contraintes NULL/NOT NULL en conflit" msgid "check constraints for domains cannot be marked NO INHERIT" msgstr "les contraintes CHECK pour les domaines ne peuvent pas être marquées NO INHERIT" -#: commands/typecmds.c:993 commands/typecmds.c:2512 +#: commands/typecmds.c:993 commands/typecmds.c:2533 #, c-format msgid "unique constraints not possible for domains" msgstr "contraintes uniques impossible pour les domaines" -#: commands/typecmds.c:999 commands/typecmds.c:2518 +#: commands/typecmds.c:999 commands/typecmds.c:2539 #, c-format msgid "primary key constraints not possible for domains" msgstr "contraintes de clé primaire impossible pour les domaines" -#: commands/typecmds.c:1005 commands/typecmds.c:2524 +#: commands/typecmds.c:1005 commands/typecmds.c:2545 #, c-format msgid "exclusion constraints not possible for domains" msgstr "contraintes d'exclusion impossible pour les domaines" -#: commands/typecmds.c:1011 commands/typecmds.c:2530 +#: commands/typecmds.c:1011 commands/typecmds.c:2551 #, c-format msgid "foreign key constraints not possible for domains" msgstr "contraintes de clé étrangère impossible pour les domaines" -#: commands/typecmds.c:1020 commands/typecmds.c:2539 +#: commands/typecmds.c:1020 commands/typecmds.c:2560 #, c-format msgid "specifying constraint deferrability not supported for domains" msgstr "spécifier des contraintes déferrantes n'est pas supporté par les domaines" -#: commands/typecmds.c:1284 utils/cache/typcache.c:1648 +#: commands/typecmds.c:1305 utils/cache/typcache.c:1698 #, c-format msgid "%s is not an enum" msgstr "%s n'est pas un enum" -#: commands/typecmds.c:1422 +#: commands/typecmds.c:1443 #, c-format msgid "type attribute \"subtype\" is required" msgstr "l'attribut du sous-type est requis" -#: commands/typecmds.c:1427 +#: commands/typecmds.c:1448 #, c-format msgid "range subtype cannot be %s" msgstr "le sous-type de l'intervalle ne peut pas être %s" -#: commands/typecmds.c:1446 +#: commands/typecmds.c:1467 #, c-format msgid "range collation specified but subtype does not support collation" msgstr "collationnement spécifié pour l'intervalle mais le sous-type ne supporte pas les collationnements" -#: commands/typecmds.c:1680 +#: commands/typecmds.c:1701 #, c-format msgid "changing argument type of function %s from \"opaque\" to \"cstring\"" msgstr "changement du type d'argument de la fonction %s d'« opaque » à « cstring »" -#: commands/typecmds.c:1731 +#: commands/typecmds.c:1752 #, c-format msgid "changing argument type of function %s from \"opaque\" to %s" msgstr "changement du type d'argument de la fonction %s d'« opaque » à %s" -#: commands/typecmds.c:1830 +#: commands/typecmds.c:1851 #, c-format msgid "typmod_in function %s must return type %s" msgstr "le type de sortie de la fonction typmod_in %s doit être %s" -#: commands/typecmds.c:1857 +#: commands/typecmds.c:1878 #, c-format msgid "typmod_out function %s must return type %s" msgstr "le type de sortie de la fonction typmod_out %s doit être %s" -#: commands/typecmds.c:1884 +#: commands/typecmds.c:1905 #, c-format msgid "type analyze function %s must return type %s" msgstr "la fonction analyze du type %s doit renvoyer le type %s" -#: commands/typecmds.c:1930 +#: commands/typecmds.c:1951 #, c-format msgid "You must specify an operator class for the range type or define a default operator class for the subtype." msgstr "" "Vous devez spécifier une classe d'opérateur pour le type range ou définir une\n" "classe d'opérateur par défaut pour le sous-type." -#: commands/typecmds.c:1961 +#: commands/typecmds.c:1982 #, c-format msgid "range canonical function %s must return range type" msgstr "la fonction canonical %s du range doit renvoyer le type range" -#: commands/typecmds.c:1967 +#: commands/typecmds.c:1988 #, c-format msgid "range canonical function %s must be immutable" msgstr "la fonction canonical %s du range doit être immutable" -#: commands/typecmds.c:2003 +#: commands/typecmds.c:2024 #, c-format msgid "range subtype diff function %s must return type %s" msgstr "" "la fonction %s de calcul de différence pour le sous-type d'un intervalle de\n" "valeur doit renvoyer le type %s" -#: commands/typecmds.c:2010 +#: commands/typecmds.c:2031 #, c-format msgid "range subtype diff function %s must be immutable" msgstr "" "la fonction %s de calcul de différence pour le sous-type d'un intervalle de\n" "valeur doit être immutable" -#: commands/typecmds.c:2037 +#: commands/typecmds.c:2058 #, c-format msgid "pg_type array OID value not set when in binary upgrade mode" msgstr "les valeurs d'OID du tableau pgtype ne sont pas positionnées en mode de mise à jour binaire" -#: commands/typecmds.c:2340 +#: commands/typecmds.c:2361 #, c-format msgid "column \"%s\" of table \"%s\" contains null values" msgstr "la colonne « %s » de la table « %s » contient des valeurs NULL" -#: commands/typecmds.c:2453 commands/typecmds.c:2636 +#: commands/typecmds.c:2474 commands/typecmds.c:2657 #, c-format msgid "constraint \"%s\" of domain \"%s\" does not exist" msgstr "la contrainte « %s » du domaine « %s » n'existe pas" -#: commands/typecmds.c:2457 +#: commands/typecmds.c:2478 #, c-format msgid "constraint \"%s\" of domain \"%s\" does not exist, skipping" msgstr "la contrainte « %s » du domaine « %s » n'existe pas, ignore" -#: commands/typecmds.c:2642 +#: commands/typecmds.c:2663 #, c-format msgid "constraint \"%s\" of domain \"%s\" is not a check constraint" msgstr "la contrainte « %s » du domaine « %s » n'est pas une contrainte de vérification" -#: commands/typecmds.c:2747 +#: commands/typecmds.c:2768 #, c-format msgid "column \"%s\" of table \"%s\" contains values that violate the new constraint" msgstr "" "la colonne « %s » de la table « %s » contient des valeurs violant la\n" "nouvelle contrainte" -#: commands/typecmds.c:2960 commands/typecmds.c:3165 commands/typecmds.c:3247 commands/typecmds.c:3434 +#: commands/typecmds.c:2996 commands/typecmds.c:3201 commands/typecmds.c:3283 commands/typecmds.c:3470 #, c-format msgid "%s is not a domain" msgstr "%s n'est pas un domaine" -#: commands/typecmds.c:2994 +#: commands/typecmds.c:3030 #, c-format msgid "constraint \"%s\" for domain \"%s\" already exists" msgstr "la contrainte « %s » du domaine « %s » existe déjà" -#: commands/typecmds.c:3045 +#: commands/typecmds.c:3081 #, c-format msgid "cannot use table references in domain check constraint" msgstr "" "ne peut pas utiliser les références de table dans la contrainte de\n" "vérification du domaine" -#: commands/typecmds.c:3177 commands/typecmds.c:3259 commands/typecmds.c:3551 +#: commands/typecmds.c:3213 commands/typecmds.c:3295 commands/typecmds.c:3587 #, c-format msgid "%s is a table's row type" msgstr "« %s » est du type ligne de table" -#: commands/typecmds.c:3179 commands/typecmds.c:3261 commands/typecmds.c:3553 +#: commands/typecmds.c:3215 commands/typecmds.c:3297 commands/typecmds.c:3589 #, c-format msgid "Use ALTER TABLE instead." msgstr "Utilisez ALTER TABLE à la place." -#: commands/typecmds.c:3186 commands/typecmds.c:3268 commands/typecmds.c:3466 +#: commands/typecmds.c:3222 commands/typecmds.c:3304 commands/typecmds.c:3502 #, c-format msgid "cannot alter array type %s" msgstr "ne peut pas modifier le type array %s" -#: commands/typecmds.c:3188 commands/typecmds.c:3270 commands/typecmds.c:3468 +#: commands/typecmds.c:3224 commands/typecmds.c:3306 commands/typecmds.c:3504 #, c-format msgid "You can alter type %s, which will alter the array type as well." msgstr "Vous pouvez modifier le type %s, ce qui va modifier aussi le type tableau." -#: commands/typecmds.c:3536 +#: commands/typecmds.c:3572 #, c-format msgid "type \"%s\" already exists in schema \"%s\"" msgstr "le type « %s » existe déjà dans le schéma « %s »" @@ -10027,7 +10061,7 @@ msgstr "doit être super-utilisateur pour créer des super-utilisateurs" msgid "must be superuser to create replication users" msgstr "doit être super-utilisateur pour créer des utilisateurs avec l'attribut réplication" -#: commands/user.c:309 commands/user.c:684 +#: commands/user.c:309 commands/user.c:707 #, c-format msgid "must be superuser to change bypassrls attribute" msgstr "doit être super-utilisateur pour modifier l'attribut bypassrls" @@ -10037,226 +10071,231 @@ msgstr "doit être super-utilisateur pour modifier l'attribut bypassrls" msgid "permission denied to create role" msgstr "droit refusé pour créer un rôle" -#: commands/user.c:326 commands/user.c:1160 commands/user.c:1167 gram.y:14465 gram.y:14500 utils/adt/acl.c:5246 utils/adt/acl.c:5252 +#: commands/user.c:326 commands/user.c:1195 commands/user.c:1202 gram.y:14465 gram.y:14500 utils/adt/acl.c:5251 utils/adt/acl.c:5257 #, c-format msgid "role name \"%s\" is reserved" msgstr "le nom du rôle « %s » est réservé" -#: commands/user.c:328 commands/user.c:1162 commands/user.c:1169 +#: commands/user.c:328 commands/user.c:1197 commands/user.c:1204 #, c-format msgid "Role names starting with \"pg_\" are reserved." msgstr "Les noms de rôle commençant par « pg_ » sont réservés." -#: commands/user.c:340 commands/user.c:1175 +#: commands/user.c:340 commands/user.c:1210 #, c-format msgid "role \"%s\" already exists" msgstr "le rôle « %s » existe déjà" -#: commands/user.c:414 +#: commands/user.c:406 commands/user.c:816 +#, c-format +msgid "empty string is not a valid password, clearing password" +msgstr "une chaîne vide n'est pas un mot de passe valide, effacement du mot de passe" + +#: commands/user.c:437 #, c-format msgid "pg_authid OID value not set when in binary upgrade mode" msgstr "la valeur d'OID de pg_authid n'est pas positionnée en mode de mise à jour binaire" -#: commands/user.c:670 commands/user.c:880 commands/user.c:1414 commands/user.c:1558 +#: commands/user.c:693 commands/user.c:915 commands/user.c:1449 commands/user.c:1593 #, c-format msgid "must be superuser to alter superusers" msgstr "doit être super-utilisateur pour modifier des super-utilisateurs" -#: commands/user.c:677 +#: commands/user.c:700 #, c-format msgid "must be superuser to alter replication users" msgstr "doit être super-utilisateur pour modifier des utilisateurs ayant l'attribut réplication" -#: commands/user.c:700 commands/user.c:888 +#: commands/user.c:723 commands/user.c:923 #, c-format msgid "permission denied" msgstr "droit refusé" -#: commands/user.c:918 +#: commands/user.c:953 #, c-format msgid "must be superuser to alter settings globally" msgstr "doit être super-utilisateur pour modifier globalement les configurations" -#: commands/user.c:940 +#: commands/user.c:975 #, c-format msgid "permission denied to drop role" msgstr "droit refusé pour supprimer le rôle" -#: commands/user.c:964 +#: commands/user.c:999 #, c-format msgid "cannot use special role specifier in DROP ROLE" msgstr "ne peut pas être le spécificateur de rôle spécial dans DROP ROLE" -#: commands/user.c:974 commands/user.c:1131 commands/variable.c:822 commands/variable.c:894 utils/adt/acl.c:5104 utils/adt/acl.c:5151 utils/adt/acl.c:5179 utils/adt/acl.c:5197 utils/init/miscinit.c:504 +#: commands/user.c:1009 commands/user.c:1166 commands/variable.c:822 commands/variable.c:894 utils/adt/acl.c:5109 utils/adt/acl.c:5156 utils/adt/acl.c:5184 utils/adt/acl.c:5202 utils/init/miscinit.c:504 #, c-format msgid "role \"%s\" does not exist" msgstr "le rôle « %s » n'existe pas" -#: commands/user.c:979 +#: commands/user.c:1014 #, c-format msgid "role \"%s\" does not exist, skipping" msgstr "le rôle « %s » n'existe pas, poursuite du traitement" -#: commands/user.c:991 commands/user.c:995 +#: commands/user.c:1026 commands/user.c:1030 #, c-format msgid "current user cannot be dropped" msgstr "l'utilisateur actuel ne peut pas être supprimé" -#: commands/user.c:999 +#: commands/user.c:1034 #, c-format msgid "session user cannot be dropped" msgstr "l'utilisateur de la session ne peut pas être supprimé" -#: commands/user.c:1010 +#: commands/user.c:1045 #, c-format msgid "must be superuser to drop superusers" msgstr "doit être super-utilisateur pour supprimer des super-utilisateurs" -#: commands/user.c:1026 +#: commands/user.c:1061 #, c-format msgid "role \"%s\" cannot be dropped because some objects depend on it" msgstr "le rôle « %s » ne peut pas être supprimé car d'autres objets en dépendent" -#: commands/user.c:1147 +#: commands/user.c:1182 #, c-format msgid "session user cannot be renamed" msgstr "l'utilisateur de la session ne peut pas être renommé" -#: commands/user.c:1151 +#: commands/user.c:1186 #, c-format msgid "current user cannot be renamed" msgstr "l'utilisateur courant ne peut pas être renommé" -#: commands/user.c:1185 +#: commands/user.c:1220 #, c-format msgid "must be superuser to rename superusers" msgstr "doit être super-utilisateur pour renommer les super-utilisateurs" -#: commands/user.c:1192 +#: commands/user.c:1227 #, c-format msgid "permission denied to rename role" msgstr "droit refusé pour renommer le rôle" -#: commands/user.c:1213 +#: commands/user.c:1248 #, c-format msgid "MD5 password cleared because of role rename" msgstr "mot de passe MD5 effacé à cause du renommage du rôle" -#: commands/user.c:1273 +#: commands/user.c:1308 #, c-format msgid "column names cannot be included in GRANT/REVOKE ROLE" msgstr "les noms de colonne ne peuvent pas être inclus dans GRANT/REVOKE ROLE" -#: commands/user.c:1311 +#: commands/user.c:1346 #, c-format msgid "permission denied to drop objects" msgstr "droit refusé pour supprimer les objets" -#: commands/user.c:1338 commands/user.c:1347 +#: commands/user.c:1373 commands/user.c:1382 #, c-format msgid "permission denied to reassign objects" msgstr "droit refusé pour ré-affecter les objets" -#: commands/user.c:1422 commands/user.c:1566 +#: commands/user.c:1457 commands/user.c:1601 #, c-format msgid "must have admin option on role \"%s\"" msgstr "doit avoir l'option admin sur le rôle « %s »" -#: commands/user.c:1439 +#: commands/user.c:1474 #, c-format msgid "must be superuser to set grantor" msgstr "doit être super-utilisateur pour configurer le « donneur de droits »" -#: commands/user.c:1464 +#: commands/user.c:1499 #, c-format msgid "role \"%s\" is a member of role \"%s\"" msgstr "le rôle « %s » est un membre du rôle « %s »" -#: commands/user.c:1479 +#: commands/user.c:1514 #, c-format msgid "role \"%s\" is already a member of role \"%s\"" msgstr "le rôle « %s » est déjà un membre du rôle « %s »" -#: commands/user.c:1588 +#: commands/user.c:1623 #, c-format msgid "role \"%s\" is not a member of role \"%s\"" msgstr "le rôle « %s » n'est pas un membre du rôle « %s »" -#: commands/vacuum.c:186 +#: commands/vacuum.c:188 #, c-format msgid "%s cannot be executed from VACUUM or ANALYZE" msgstr "%s ne peut pas être exécuté dans un VACUUM ou un ANALYZE" -#: commands/vacuum.c:196 +#: commands/vacuum.c:198 #, c-format msgid "VACUUM option DISABLE_PAGE_SKIPPING cannot be used with FULL" msgstr "" "l'option DISABLE_PAGE_SKIPPING de la commande VACUUM ne pas pas être utilisée\n" "en même temps que l'option FULL" -#: commands/vacuum.c:565 +#: commands/vacuum.c:577 #, c-format msgid "oldest xmin is far in the past" msgstr "le plus ancien xmin est loin dans le passé" -#: commands/vacuum.c:566 +#: commands/vacuum.c:578 #, c-format msgid "Close open transactions soon to avoid wraparound problems." msgstr "" "Fermez les transactions ouvertes rapidement pour éviter des problèmes de\n" "réinitialisation." -#: commands/vacuum.c:605 +#: commands/vacuum.c:617 #, c-format msgid "oldest multixact is far in the past" msgstr "le plus ancien multixact est loin dans le passé" -#: commands/vacuum.c:606 +#: commands/vacuum.c:618 #, c-format msgid "Close open transactions with multixacts soon to avoid wraparound problems." msgstr "" "Fermez les transactions ouvertes avec multixacts rapidement pour éviter des problèmes de\n" "réinitialisation." -#: commands/vacuum.c:1176 +#: commands/vacuum.c:1188 #, c-format msgid "some databases have not been vacuumed in over 2 billion transactions" msgstr "" "certaines bases de données n'ont pas eu droit à l'opération de maintenance\n" "VACUUM depuis plus de 2 milliards de transactions" -#: commands/vacuum.c:1177 +#: commands/vacuum.c:1189 #, c-format msgid "You might have already suffered transaction-wraparound data loss." msgstr "" "Vous pouvez avoir déjà souffert de pertes de données suite à une\n" "réinitialisation de l'identifiant des transactions." -#: commands/vacuum.c:1306 +#: commands/vacuum.c:1324 #, c-format msgid "skipping vacuum of \"%s\" --- lock not available" msgstr "ignore le vacuum de « %s » --- verrou non disponible" -#: commands/vacuum.c:1332 +#: commands/vacuum.c:1350 #, c-format msgid "skipping \"%s\" --- only superuser can vacuum it" msgstr "ignore « %s » --- seul le super-utilisateur peut exécuter un VACUUM" -#: commands/vacuum.c:1336 +#: commands/vacuum.c:1354 #, c-format msgid "skipping \"%s\" --- only superuser or database owner can vacuum it" msgstr "" "ignore « %s » --- seul le super-utilisateur ou le propriétaire de la base de données\n" "peuvent exécuter un VACUUM" -#: commands/vacuum.c:1340 +#: commands/vacuum.c:1358 #, c-format msgid "skipping \"%s\" --- only table or database owner can vacuum it" msgstr "" "ignore « %s » --- seul le propriétaire de la table ou de la base de données\n" "peut exécuter un VACUUM" -#: commands/vacuum.c:1359 +#: commands/vacuum.c:1377 #, c-format msgid "skipping \"%s\" --- cannot vacuum non-tables or special system tables" msgstr "" @@ -10293,70 +10332,75 @@ msgstr "taux moyen de lecture : %.3f Mo/s, taux moyen d'écriture : %.3f Mo/s\n" msgid "system usage: %s" msgstr "utilisation du système : %s" -#: commands/vacuumlazy.c:858 +#: commands/vacuumlazy.c:860 #, c-format msgid "relation \"%s\" page %u is uninitialized --- fixing" msgstr "relation « %s » : la page %u n'est pas initialisée --- correction en cours" -#: commands/vacuumlazy.c:1328 +#: commands/vacuumlazy.c:1339 #, c-format msgid "\"%s\": removed %.0f row versions in %u pages" msgstr "« %s » : %.0f versions de ligne supprimées parmi %u pages" -#: commands/vacuumlazy.c:1338 +#: commands/vacuumlazy.c:1349 #, c-format msgid "%.0f dead row versions cannot be removed yet, oldest xmin: %u\n" msgstr "%.0f versions de lignes mortes ne peuvent pas encore être supprimées, plus ancien xmin : %u\n" -#: commands/vacuumlazy.c:1340 +#: commands/vacuumlazy.c:1351 #, c-format msgid "There were %.0f unused item pointers.\n" msgstr "Il y avait %.0f pointeurs d'éléments inutilisés.\n" -#: commands/vacuumlazy.c:1342 +#: commands/vacuumlazy.c:1353 #, c-format msgid "Skipped %u page due to buffer pins, " msgid_plural "Skipped %u pages due to buffer pins, " msgstr[0] "Ignore %u page à cause des verrous de blocs, " msgstr[1] "Ignore %u pages à cause des verrous de blocs, " -#: commands/vacuumlazy.c:1346 +#: commands/vacuumlazy.c:1357 #, c-format msgid "%u frozen page.\n" msgid_plural "%u frozen pages.\n" msgstr[0] "%u page gelée.\n" msgstr[1] "%u pages gelées.\n" -#: commands/vacuumlazy.c:1350 +#: commands/vacuumlazy.c:1361 #, c-format msgid "%u page is entirely empty.\n" msgid_plural "%u pages are entirely empty.\n" msgstr[0] "%u page est entièrement vide.\n" msgstr[1] "%u pages sont entièrement vides.\n" -#: commands/vacuumlazy.c:1357 +#: commands/vacuumlazy.c:1365 +#, c-format +msgid "%s." +msgstr "%s." + +#: commands/vacuumlazy.c:1368 #, c-format msgid "\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages" msgstr "" "« %s » : %.0f versions de ligne supprimables, %.0f non supprimables\n" "parmi %u pages sur %u" -#: commands/vacuumlazy.c:1426 +#: commands/vacuumlazy.c:1437 #, c-format msgid "\"%s\": removed %d row versions in %d pages" msgstr "« %s »: %d versions de ligne supprimée parmi %d pages" -#: commands/vacuumlazy.c:1614 +#: commands/vacuumlazy.c:1625 #, c-format msgid "scanned index \"%s\" to remove %d row versions" msgstr "a parcouru l'index « %s » pour supprimer %d versions de lignes" -#: commands/vacuumlazy.c:1660 +#: commands/vacuumlazy.c:1671 #, c-format msgid "index \"%s\" now contains %.0f row versions in %u pages" msgstr "l'index « %s » contient maintenant %.0f versions de ligne dans %u pages" -#: commands/vacuumlazy.c:1664 +#: commands/vacuumlazy.c:1675 #, c-format msgid "" "%.0f index row versions were removed.\n" @@ -10367,22 +10411,22 @@ msgstr "" "%u pages d'index ont été supprimées, %u sont actuellement réutilisables.\n" "%s." -#: commands/vacuumlazy.c:1759 +#: commands/vacuumlazy.c:1770 #, c-format msgid "\"%s\": stopping truncate due to conflicting lock request" msgstr "« %s » : mis en suspens du tronquage à cause d'un conflit dans la demande de verrou" -#: commands/vacuumlazy.c:1824 +#: commands/vacuumlazy.c:1835 #, c-format msgid "\"%s\": truncated %u to %u pages" msgstr "« %s » : %u pages tronqués en %u" -#: commands/vacuumlazy.c:1889 +#: commands/vacuumlazy.c:1900 #, c-format msgid "\"%s\": suspending truncate due to conflicting lock request" msgstr "« %s » : mis en suspens du tronquage à cause d'un conflit dans la demande de verrou" -#: commands/variable.c:165 utils/misc/guc.c:10030 utils/misc/guc.c:10092 +#: commands/variable.c:165 utils/misc/guc.c:10023 utils/misc/guc.c:10085 #, c-format msgid "Unrecognized key word: \"%s\"." msgstr "Mot clé non reconnu : « %s »" @@ -10450,7 +10494,7 @@ msgstr "" "SET TRANSACTION ISOLATION LEVEL ne doit pas être appelé dans une\n" "sous-transaction" -#: commands/variable.c:571 storage/lmgr/predicate.c:1647 +#: commands/variable.c:571 storage/lmgr/predicate.c:1649 #, c-format msgid "cannot use serializable mode in a hot standby" msgstr "ne peut pas utiliser le mode sérialisable sur un serveur en « Hot Standby »" @@ -10609,7 +10653,7 @@ msgstr "le type cible n'est pas un tableau" msgid "ROW() column has type %s instead of type %s" msgstr "une colonne ROW() a le type %s au lieu du type %s" -#: executor/execExpr.c:2094 executor/execSRF.c:670 parser/parse_func.c:120 parser/parse_func.c:547 parser/parse_func.c:921 +#: executor/execExpr.c:2094 executor/execSRF.c:672 parser/parse_func.c:120 parser/parse_func.c:547 parser/parse_func.c:921 #, c-format msgid "cannot pass more than %d argument to a function" msgid_plural "cannot pass more than %d arguments to a function" @@ -10695,7 +10739,7 @@ msgstr[1] "La ligne de la table contient %d attributs alors que la requête en a msgid "Table has type %s at ordinal position %d, but query expects %s." msgstr "La table a le type %s à la position ordinale %d alors que la requête attend %s." -#: executor/execExprInterp.c:3555 executor/execSRF.c:925 +#: executor/execExprInterp.c:3555 executor/execSRF.c:927 #, c-format msgid "Physical storage mismatch on dropped attribute at ordinal position %d." msgstr "" @@ -10737,167 +10781,167 @@ msgstr "La clé %s est en conflit avec la clé existante %s." msgid "Key conflicts with existing key." msgstr "La clé est en conflit avec une clé existante." -#: executor/execMain.c:1113 +#: executor/execMain.c:1115 #, c-format msgid "cannot change sequence \"%s\"" msgstr "ne peut pas modifier la séquence « %s »" -#: executor/execMain.c:1119 +#: executor/execMain.c:1121 #, c-format msgid "cannot change TOAST relation \"%s\"" msgstr "ne peut pas modifier la relation TOAST « %s »" -#: executor/execMain.c:1137 rewrite/rewriteHandler.c:2738 +#: executor/execMain.c:1139 rewrite/rewriteHandler.c:2747 #, c-format msgid "cannot insert into view \"%s\"" msgstr "ne peut pas insérer dans la vue « %s »" -#: executor/execMain.c:1139 rewrite/rewriteHandler.c:2741 +#: executor/execMain.c:1141 rewrite/rewriteHandler.c:2750 #, c-format msgid "To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule." msgstr "Pour activer l'insertion dans la vue, fournissez un trigger INSTEAD OF INSERT ou une règle ON INSERT DO INSTEAD sans condition." -#: executor/execMain.c:1145 rewrite/rewriteHandler.c:2746 +#: executor/execMain.c:1147 rewrite/rewriteHandler.c:2755 #, c-format msgid "cannot update view \"%s\"" msgstr "ne peut pas mettre à jour la vue « %s »" -#: executor/execMain.c:1147 rewrite/rewriteHandler.c:2749 +#: executor/execMain.c:1149 rewrite/rewriteHandler.c:2758 #, c-format msgid "To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule." msgstr "Pour activer la mise à jour dans la vue, fournissez un trigger INSTEAD OF UPDATE ou une règle ON UPDATE DO INSTEAD sans condition." -#: executor/execMain.c:1153 rewrite/rewriteHandler.c:2754 +#: executor/execMain.c:1155 rewrite/rewriteHandler.c:2763 #, c-format msgid "cannot delete from view \"%s\"" msgstr "ne peut pas supprimer à partir de la vue « %s »" -#: executor/execMain.c:1155 rewrite/rewriteHandler.c:2757 +#: executor/execMain.c:1157 rewrite/rewriteHandler.c:2766 #, c-format msgid "To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule." msgstr "Pour activer la suppression dans la vue, fournissez un trigger INSTEAD OF DELETE ou une règle ON DELETE DO INSTEAD sans condition." -#: executor/execMain.c:1166 +#: executor/execMain.c:1168 #, c-format msgid "cannot change materialized view \"%s\"" msgstr "ne peut pas modifier la vue matérialisée « %s »" -#: executor/execMain.c:1178 +#: executor/execMain.c:1187 #, c-format msgid "cannot insert into foreign table \"%s\"" msgstr "ne peut pas insérer dans la table distante « %s »" -#: executor/execMain.c:1184 +#: executor/execMain.c:1193 #, c-format msgid "foreign table \"%s\" does not allow inserts" msgstr "la table distante « %s » n'autorise pas les insertions" -#: executor/execMain.c:1191 +#: executor/execMain.c:1200 #, c-format msgid "cannot update foreign table \"%s\"" msgstr "ne peut pas modifier la table distante « %s »" -#: executor/execMain.c:1197 +#: executor/execMain.c:1206 #, c-format msgid "foreign table \"%s\" does not allow updates" msgstr "la table distante « %s » n'autorise pas les modifications" -#: executor/execMain.c:1204 +#: executor/execMain.c:1213 #, c-format msgid "cannot delete from foreign table \"%s\"" msgstr "ne peut pas supprimer à partir de la table distante « %s »" -#: executor/execMain.c:1210 +#: executor/execMain.c:1219 #, c-format msgid "foreign table \"%s\" does not allow deletes" msgstr "la table distante « %s » n'autorise pas les suppressions" -#: executor/execMain.c:1221 +#: executor/execMain.c:1230 #, c-format msgid "cannot change relation \"%s\"" msgstr "ne peut pas modifier la relation « %s »" -#: executor/execMain.c:1248 +#: executor/execMain.c:1257 #, c-format msgid "cannot lock rows in sequence \"%s\"" msgstr "ne peut pas verrouiller les lignes dans la séquence « %s »" -#: executor/execMain.c:1255 +#: executor/execMain.c:1264 #, c-format msgid "cannot lock rows in TOAST relation \"%s\"" msgstr "ne peut pas verrouiller les lignes dans la relation TOAST « %s »" -#: executor/execMain.c:1262 +#: executor/execMain.c:1271 #, c-format msgid "cannot lock rows in view \"%s\"" msgstr "ne peut pas verrouiller les lignes dans la vue « %s »" -#: executor/execMain.c:1270 +#: executor/execMain.c:1279 #, c-format msgid "cannot lock rows in materialized view \"%s\"" msgstr "ne peut pas verrouiller les lignes dans la vue matérialisée « %s »" -#: executor/execMain.c:1279 executor/execMain.c:2901 executor/nodeLockRows.c:136 +#: executor/execMain.c:1288 executor/execMain.c:2930 executor/nodeLockRows.c:136 #, c-format msgid "cannot lock rows in foreign table \"%s\"" msgstr "ne peut pas verrouiller la table distante « %s »" -#: executor/execMain.c:1285 +#: executor/execMain.c:1294 #, c-format msgid "cannot lock rows in relation \"%s\"" msgstr "n'a pas pu verrouiller les lignes dans la relation « %s »" -#: executor/execMain.c:1897 +#: executor/execMain.c:1926 #, c-format msgid "new row for relation \"%s\" violates partition constraint" msgstr "la nouvelle ligne de la relation « %s » viole la contrainte de partitionnement" -#: executor/execMain.c:1899 executor/execMain.c:1978 executor/execMain.c:2025 executor/execMain.c:2136 +#: executor/execMain.c:1928 executor/execMain.c:2007 executor/execMain.c:2054 executor/execMain.c:2165 #, c-format msgid "Failing row contains %s." msgstr "La ligne en échec contient %s" -#: executor/execMain.c:1976 +#: executor/execMain.c:2005 #, c-format msgid "null value in column \"%s\" violates not-null constraint" msgstr "une valeur NULL viole la contrainte NOT NULL de la colonne « %s »" -#: executor/execMain.c:2023 +#: executor/execMain.c:2052 #, c-format msgid "new row for relation \"%s\" violates check constraint \"%s\"" -msgstr "la nouvelle ligne viole la contrainte de vérification « %s » de la relation « %s »" +msgstr "la nouvelle ligne de la relation « %s » viole la contrainte de vérification « %s »" -#: executor/execMain.c:2134 +#: executor/execMain.c:2163 #, c-format msgid "new row violates check option for view \"%s\"" msgstr "la nouvelle ligne viole la contrainte de vérification pour la vue « %s »" -#: executor/execMain.c:2144 +#: executor/execMain.c:2173 #, c-format msgid "new row violates row-level security policy \"%s\" for table \"%s\"" msgstr "la nouvelle ligne viole la politique de sécurité au niveau ligne « %s » pour la table « %s »" -#: executor/execMain.c:2149 +#: executor/execMain.c:2178 #, c-format msgid "new row violates row-level security policy for table \"%s\"" msgstr "la nouvelle ligne viole la politique de sécurité au niveau ligne pour la table « %s »" -#: executor/execMain.c:2156 +#: executor/execMain.c:2185 #, c-format msgid "new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"" msgstr "la nouvelle ligne viole la politique de sécurité au niveau ligne « %s » (expression USING) pour la table « %s »" -#: executor/execMain.c:2161 +#: executor/execMain.c:2190 #, c-format msgid "new row violates row-level security policy (USING expression) for table \"%s\"" msgstr "la nouvelle ligne viole la politique de sécurité au niveau ligne (expression USING) pour la table « %s »" -#: executor/execMain.c:3363 +#: executor/execMain.c:3399 #, c-format msgid "no partition of relation \"%s\" found for row" msgstr "aucune partition de la relation « %s » trouvée pour la ligne" -#: executor/execMain.c:3365 +#: executor/execMain.c:3401 #, c-format msgid "Partition key of the failing row contains %s." msgstr "La clé de partitionnement de la ligne en échec contient %s." @@ -10913,8 +10957,9 @@ msgid "could not identify an equality operator for type %s" msgstr "n'a pas pu identifier un opérateur d'égalité pour le type %s" #: executor/execReplication.c:562 -#, c-format -msgid "cannot update table \"%s\" because it does not have replica identity and publishes updates" +#, fuzzy, c-format +#| msgid "cannot update table \"%s\" because it does not have replica identity and publishes updates" +msgid "cannot update table \"%s\" because it does not have a replica identity and publishes updates" msgstr "ne peut pas mettre à jour la table « %s » car elle n'a pas d'identité de réplicat et publie des mises à jour" #: executor/execReplication.c:564 @@ -10923,8 +10968,9 @@ msgid "To enable updating the table, set REPLICA IDENTITY using ALTER TABLE." msgstr "Pour activer les mises à jour sur la table, configurez REPLICA IDENTITY en utilisant ALTER TABLE" #: executor/execReplication.c:568 -#, c-format -msgid "cannot delete from table \"%s\" because it does not have replica identity and publishes deletes" +#, fuzzy, c-format +#| msgid "cannot delete from table \"%s\" because it does not have replica identity and publishes deletes" +msgid "cannot delete from table \"%s\" because it does not have a replica identity and publishes deletes" msgstr "ne peut pas supprimer à partir de la table « %s » car elle n'a pas d'identité de réplicat et publie des mises à jour" #: executor/execReplication.c:570 @@ -10942,46 +10988,46 @@ msgstr "la relation cible de la réplication logique « %s.%s » n'est pas une t msgid "rows returned by function are not all of the same row type" msgstr "les lignes renvoyées par la fonction ne sont pas toutes du même type ligne" -#: executor/execSRF.c:356 executor/execSRF.c:620 +#: executor/execSRF.c:356 executor/execSRF.c:622 #, c-format msgid "table-function protocol for materialize mode was not followed" msgstr "le protocole de la fonction table pour le mode matérialisé n'a pas été respecté" -#: executor/execSRF.c:363 executor/execSRF.c:638 +#: executor/execSRF.c:363 executor/execSRF.c:640 #, c-format msgid "unrecognized table-function returnMode: %d" msgstr "returnMode de la fonction table non reconnu : %d" -#: executor/execSRF.c:843 +#: executor/execSRF.c:845 #, c-format msgid "function returning setof record called in context that cannot accept type record" msgstr "" "la fonction renvoyant des lignes a été appelée dans un contexte qui\n" "n'accepte pas un ensemble" -#: executor/execSRF.c:898 executor/execSRF.c:914 executor/execSRF.c:924 +#: executor/execSRF.c:900 executor/execSRF.c:916 executor/execSRF.c:926 #, c-format msgid "function return row and query-specified return row do not match" msgstr "la ligne de retour spécifiée par la requête et la ligne de retour de la fonction ne correspondent pas" -#: executor/execSRF.c:899 +#: executor/execSRF.c:901 #, c-format msgid "Returned row contains %d attribute, but query expects %d." msgid_plural "Returned row contains %d attributes, but query expects %d." msgstr[0] "La ligne renvoyée contient %d attribut mais la requête en attend %d." msgstr[1] "La ligne renvoyée contient %d attributs mais la requête en attend %d." -#: executor/execSRF.c:915 +#: executor/execSRF.c:917 #, c-format msgid "Returned type %s at ordinal position %d, but query expects %s." msgstr "A renvoyé le type %s à la position ordinale %d, mais la requête attend %s." -#: executor/execUtils.c:639 +#: executor/execUtils.c:646 #, c-format msgid "materialized view \"%s\" has not been populated" msgstr "la vue matérialisée « %s » n'a pas été peuplée" -#: executor/execUtils.c:641 +#: executor/execUtils.c:648 #, c-format msgid "Use the REFRESH MATERIALIZED VIEW command." msgstr "Utilisez la commande REFRESH MATERIALIZED VIEW." @@ -11003,7 +11049,7 @@ msgid "%s is not allowed in a SQL function" msgstr "%s n'est pas autorisé dans une fonction SQL" #. translator: %s is a SQL statement name -#: executor/functions.c:534 executor/spi.c:1282 executor/spi.c:2069 +#: executor/functions.c:534 executor/spi.c:1288 executor/spi.c:2075 #, c-format msgid "%s is not allowed in a non-volatile function" msgstr "%s n'est pas autorisé dans une fonction non volatile" @@ -11067,23 +11113,23 @@ msgstr "L'instruction finale renvoie trop peu de colonnes." msgid "return type %s is not supported for SQL functions" msgstr "le type de retour %s n'est pas supporté pour les fonctions SQL" -#: executor/nodeAgg.c:3480 +#: executor/nodeAgg.c:3470 parser/parse_agg.c:618 parser/parse_agg.c:648 +#, c-format +msgid "aggregate function calls cannot be nested" +msgstr "les appels à la fonction d'agrégat ne peuvent pas être imbriqués" + +#: executor/nodeAgg.c:3559 #, c-format msgid "combine function for aggregate %u must be declared as STRICT" msgstr "la fonction d'unification pour l'aggrégat %u doit être déclarée comme STRICT" -#: executor/nodeAgg.c:3525 executor/nodeWindowAgg.c:2282 +#: executor/nodeAgg.c:3604 executor/nodeWindowAgg.c:2282 #, c-format msgid "aggregate %u needs to have compatible input type and transition type" msgstr "" "L'agrégat %u a besoin d'avoir un type en entrée compatible avec le type en\n" "transition" -#: executor/nodeAgg.c:3579 parser/parse_agg.c:618 parser/parse_agg.c:648 -#, c-format -msgid "aggregate function calls cannot be nested" -msgstr "les appels à la fonction d'agrégat ne peuvent pas être imbriqués" - #: executor/nodeCustom.c:152 executor/nodeCustom.c:163 #, c-format msgid "custom scan \"%s\" does not support MarkPos" @@ -11104,7 +11150,7 @@ msgstr "n'a pas pu écrire le fichier temporaire de la jointure hâchée : %m" msgid "could not read from hash-join temporary file: %m" msgstr "n'a pas pu lire le fichier temporaire contenant la jointure hâchée : %m" -#: executor/nodeIndexonlyscan.c:236 +#: executor/nodeIndexonlyscan.c:237 #, c-format msgid "lossy distance functions are not supported in index-only scans" msgstr "les fonctions de distance à perte ne sont pas supportés dans les parcours d'index seul" @@ -11119,12 +11165,12 @@ msgstr "OFFSET ne doit pas être négatif" msgid "LIMIT must not be negative" msgstr "LIMIT ne doit pas être négative" -#: executor/nodeMergejoin.c:1559 +#: executor/nodeMergejoin.c:1563 #, c-format msgid "RIGHT JOIN is only supported with merge-joinable join conditions" msgstr "RIGHT JOIN est supporté seulement avec les conditions de jointures MERGE" -#: executor/nodeMergejoin.c:1579 +#: executor/nodeMergejoin.c:1583 #, c-format msgid "FULL JOIN is only supported with merge-joinable join conditions" msgstr "FULL JOIN est supporté seulement avec les conditions de jointures MERGE" @@ -11146,12 +11192,12 @@ msgstr "" msgid "Query has too few columns." msgstr "La requête n'a pas assez de colonnes." -#: executor/nodeModifyTable.c:1253 +#: executor/nodeModifyTable.c:1258 #, c-format msgid "ON CONFLICT DO UPDATE command cannot affect row a second time" msgstr "la commande ON CONFLICT DO UPDATE ne peut pas affecter une ligne la deuxième fois" -#: executor/nodeModifyTable.c:1254 +#: executor/nodeModifyTable.c:1259 #, c-format msgid "Ensure that no rows proposed for insertion within the same command have duplicate constrained values." msgstr "S'assure qu'aucune ligne proposée à l'insertion dans la même commande n'a de valeurs contraintes dupliquées." @@ -11221,43 +11267,43 @@ msgstr "l'offset de fin de frame ne doit pas être NULL" msgid "frame ending offset must not be negative" msgstr "l'offset de fin de frame ne doit pas être négatif" -#: executor/spi.c:197 +#: executor/spi.c:198 #, c-format msgid "transaction left non-empty SPI stack" msgstr "transaction gauche non vide dans la pile SPI" -#: executor/spi.c:198 executor/spi.c:261 +#: executor/spi.c:199 executor/spi.c:262 #, c-format msgid "Check for missing \"SPI_finish\" calls." msgstr "Vérifiez les appels manquants à « SPI_finish »." -#: executor/spi.c:260 +#: executor/spi.c:261 #, c-format msgid "subtransaction left non-empty SPI stack" msgstr "sous-transaction gauche non vide dans la pile SPI" -#: executor/spi.c:1143 +#: executor/spi.c:1149 #, c-format msgid "cannot open multi-query plan as cursor" msgstr "ne peut pas ouvrir le plan à plusieurs requêtes comme curseur" #. translator: %s is name of a SQL command, eg INSERT -#: executor/spi.c:1148 +#: executor/spi.c:1154 #, c-format msgid "cannot open %s query as cursor" msgstr "ne peut pas ouvrir la requête %s comme curseur" -#: executor/spi.c:1253 +#: executor/spi.c:1259 #, c-format msgid "DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE is not supported" msgstr "DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE n'est pas supporté" -#: executor/spi.c:1254 parser/analyze.c:2447 +#: executor/spi.c:1260 parser/analyze.c:2447 #, c-format msgid "Scrollable cursors must be READ ONLY." msgstr "Les curseurs déplaçables doivent être en lecture seule (READ ONLY)." -#: executor/spi.c:2374 +#: executor/spi.c:2383 #, c-format msgid "SQL statement \"%s\"" msgstr "instruction SQL « %s »" @@ -11346,7 +11392,7 @@ msgstr "Seules les politiques PERMISSIVE et RESTRICTIVE sont supportées actuell msgid "duplicate trigger events specified" msgstr "événements de trigger dupliqués spécifiés" -#: gram.y:5363 parser/parse_utilcmd.c:3033 parser/parse_utilcmd.c:3059 +#: gram.y:5363 parser/parse_utilcmd.c:3055 parser/parse_utilcmd.c:3081 #, c-format msgid "constraint declared INITIALLY DEFERRED must be DEFERRABLE" msgstr "la contrainte déclarée INITIALLY DEFERRED doit être DEFERRABLE" @@ -11604,76 +11650,76 @@ msgstr "les contraintes %s ne peuvent pas être marquées comme NOT VALID" msgid "%s constraints cannot be marked NO INHERIT" msgstr "les contraintes %s ne peuvent pas être marquées NO INHERIT" -#: guc-file.l:313 +#: guc-file.l:314 #, c-format msgid "unrecognized configuration parameter \"%s\" in file \"%s\" line %u" msgstr "paramètre de configuration « %s » non reconnu dans le fichier « %s », ligne %u" -#: guc-file.l:350 utils/misc/guc.c:6006 utils/misc/guc.c:6199 utils/misc/guc.c:6289 utils/misc/guc.c:6379 utils/misc/guc.c:6487 utils/misc/guc.c:6582 +#: guc-file.l:351 utils/misc/guc.c:6006 utils/misc/guc.c:6199 utils/misc/guc.c:6289 utils/misc/guc.c:6379 utils/misc/guc.c:6487 utils/misc/guc.c:6582 #, c-format msgid "parameter \"%s\" cannot be changed without restarting the server" msgstr "le paramètre « %s » ne peut pas être modifié sans redémarrer le serveur" -#: guc-file.l:386 +#: guc-file.l:387 #, c-format msgid "parameter \"%s\" removed from configuration file, reset to default" msgstr "" "paramètre « %s » supprimé du fichier de configuration ;\n" "réinitialisation à la valeur par défaut" -#: guc-file.l:452 +#: guc-file.l:453 #, c-format msgid "parameter \"%s\" changed to \"%s\"" msgstr "paramètre « %s » modifié par « %s »" -#: guc-file.l:494 +#: guc-file.l:495 #, c-format msgid "configuration file \"%s\" contains errors" msgstr "le fichier de configuration « %s » contient des erreurs" -#: guc-file.l:499 +#: guc-file.l:500 #, c-format msgid "configuration file \"%s\" contains errors; unaffected changes were applied" msgstr "le fichier de configuration « %s » contient des erreurs ; les modifications non affectées ont été appliquées" -#: guc-file.l:504 +#: guc-file.l:505 #, c-format msgid "configuration file \"%s\" contains errors; no changes were applied" msgstr "le fichier de configuration « %s » contient des erreurs ; aucune modification n'a été appliquée" -#: guc-file.l:577 +#: guc-file.l:578 #, c-format msgid "could not open configuration file \"%s\": maximum nesting depth exceeded" msgstr "" "n'a pas pu ouvrir le fichier de configuration « %s » : profondeur\n" "d'imbrication dépassé" -#: guc-file.l:593 libpq/hba.c:2110 libpq/hba.c:2510 +#: guc-file.l:594 libpq/hba.c:2112 libpq/hba.c:2512 #, c-format msgid "could not open configuration file \"%s\": %m" msgstr "n'a pas pu ouvrir le fichier de configuration « %s » : %m" -#: guc-file.l:604 +#: guc-file.l:605 #, c-format msgid "skipping missing configuration file \"%s\"" msgstr "ignore le fichier de configuration « %s » manquant" -#: guc-file.l:858 +#: guc-file.l:859 #, c-format msgid "syntax error in file \"%s\" line %u, near end of line" msgstr "erreur de syntaxe dans le fichier « %s », ligne %u, près de la fin de ligne" -#: guc-file.l:868 +#: guc-file.l:869 #, c-format msgid "syntax error in file \"%s\" line %u, near token \"%s\"" msgstr "erreur de syntaxe dans le fichier « %s », ligne %u, près du mot clé « %s »" -#: guc-file.l:888 +#: guc-file.l:889 #, c-format msgid "too many syntax errors found, abandoning file \"%s\"" msgstr "trop d'erreurs de syntaxe trouvées, abandon du fichier « %s »" -#: guc-file.l:940 +#: guc-file.l:941 #, c-format msgid "could not open configuration directory \"%s\": %m" msgstr "n'a pas pu ouvrir le répertoire de configuration « %s » : %m" @@ -11683,113 +11729,112 @@ msgstr "n'a pas pu ouvrir le répertoire de configuration « %s » : %m" msgid "Cannot enlarge string buffer containing %d bytes by %d more bytes." msgstr "Ne peut pas agrandir de %d octets le tampon de chaîne contenant déjà %d octets" -#: libpq/auth-scram.c:199 libpq/auth-scram.c:439 libpq/auth-scram.c:448 +#: libpq/auth-scram.c:201 libpq/auth-scram.c:441 libpq/auth-scram.c:450 #, c-format msgid "invalid SCRAM verifier for user \"%s\"" msgstr "vérificateur SCRAM invalide pour l'utilisateur « %s »" -#: libpq/auth-scram.c:210 +#: libpq/auth-scram.c:212 #, c-format msgid "User \"%s\" does not have a valid SCRAM verifier." msgstr "L'utilisateur « %s » n'a pas de vérificateur SCRAM valide." -#: libpq/auth-scram.c:288 libpq/auth-scram.c:293 libpq/auth-scram.c:587 libpq/auth-scram.c:595 libpq/auth-scram.c:676 libpq/auth-scram.c:686 libpq/auth-scram.c:804 libpq/auth-scram.c:811 libpq/auth-scram.c:826 libpq/auth-scram.c:1056 libpq/auth-scram.c:1064 +#: libpq/auth-scram.c:290 libpq/auth-scram.c:295 libpq/auth-scram.c:589 libpq/auth-scram.c:597 libpq/auth-scram.c:678 libpq/auth-scram.c:688 libpq/auth-scram.c:807 libpq/auth-scram.c:814 libpq/auth-scram.c:829 libpq/auth-scram.c:1062 libpq/auth-scram.c:1070 #, c-format msgid "malformed SCRAM message" msgstr "message SCRAM malformé" -#: libpq/auth-scram.c:289 +#: libpq/auth-scram.c:291 #, c-format msgid "The message is empty." msgstr "Le message est vide." -#: libpq/auth-scram.c:294 +#: libpq/auth-scram.c:296 #, c-format msgid "Message length does not match input length." msgstr "La longueur du message ne correspond pas à la longueur en entrée." -#: libpq/auth-scram.c:326 +#: libpq/auth-scram.c:328 #, c-format msgid "invalid SCRAM response" msgstr "réponse SCRAM invalide" -#: libpq/auth-scram.c:327 +#: libpq/auth-scram.c:329 #, c-format msgid "Nonce does not match." msgstr "Le nonce ne correspond pas." -#: libpq/auth-scram.c:401 +#: libpq/auth-scram.c:403 #, c-format msgid "could not generate random salt" msgstr "n'a pas pu générer le sel aléatoire" -#: libpq/auth-scram.c:588 +#: libpq/auth-scram.c:590 #, c-format -msgid "Expected attribute '%c' but found %s." -msgstr "Attribut attendu '%c', mais « %s » trouvé." +msgid "Expected attribute \"%c\" but found \"%s\"." +msgstr "Attribut attendu « %c », mais « %s » trouvé." -#: libpq/auth-scram.c:596 libpq/auth-scram.c:687 +#: libpq/auth-scram.c:598 libpq/auth-scram.c:689 #, c-format -msgid "Expected character = for attribute %c." -msgstr "Caractère = attendu pour l'attribut %c." +msgid "Expected character \"=\" for attribute \"%c\"." +msgstr "Caractère « = » attendu pour l'attribut « %c »." -#: libpq/auth-scram.c:677 +#: libpq/auth-scram.c:679 #, c-format -msgid "Attribute expected, but found invalid character %s." -msgstr "Attribut attendu, mais a trouvé le caractère invalide %s." +msgid "Attribute expected, but found invalid character \"%s\"." +msgstr "Attribut attendu, mais a trouvé le caractère invalide « %s »." -#: libpq/auth-scram.c:800 +#: libpq/auth-scram.c:803 #, c-format msgid "client requires SCRAM channel binding, but it is not supported" -msgstr "" +msgstr "le client requiert le lien de canal SCRAM mais ceci n'est pas supporté" -#: libpq/auth-scram.c:805 -#, fuzzy, c-format -#| msgid "Unexpected end of input." -msgid "Unexpected channel-binding flag %s." -msgstr "Fin de l'entrée inattendue." +#: libpq/auth-scram.c:808 +#, c-format +msgid "Unexpected channel-binding flag \"%s\"." +msgstr "Drapeau du channel-binding inattendu « %s »." -#: libpq/auth-scram.c:812 +#: libpq/auth-scram.c:815 #, c-format -msgid "Comma expected, but found character %s." -msgstr "Virgule attendue, mais « %s » trouvé." +msgid "Comma expected, but found character \"%s\"." +msgstr "Virgule attendue, mais caractère « %s » trouvé." -#: libpq/auth-scram.c:822 +#: libpq/auth-scram.c:825 #, c-format msgid "client uses authorization identity, but it is not supported" msgstr "le client utilise une identité d'autorisation, mais elle n'est pas supportée" -#: libpq/auth-scram.c:827 +#: libpq/auth-scram.c:830 #, c-format -msgid "Unexpected attribute %s in client-first-message." -msgstr "Attribut %s inattendu dans client-first-message." +msgid "Unexpected attribute \"%s\" in client-first-message." +msgstr "Attribut « %s » inattendu dans client-first-message." -#: libpq/auth-scram.c:843 +#: libpq/auth-scram.c:846 #, c-format msgid "client requires an unsupported SCRAM extension" msgstr "le client requiert une extension SCRAM non supportée" -#: libpq/auth-scram.c:857 +#: libpq/auth-scram.c:860 #, c-format msgid "non-printable characters in SCRAM nonce" -msgstr "" +msgstr "caractères non affichables dans le nonce SCRAM" -#: libpq/auth-scram.c:974 +#: libpq/auth-scram.c:977 #, c-format msgid "could not generate random nonce" msgstr "n'a pas pu générer le nonce aléatoire" -#: libpq/auth-scram.c:1042 +#: libpq/auth-scram.c:1048 #, c-format msgid "unexpected SCRAM channel-binding attribute in client-final-message" -msgstr "" +msgstr "attribut du lien de canal SCRAM inattendu dans client-final-message" -#: libpq/auth-scram.c:1057 +#: libpq/auth-scram.c:1063 #, c-format msgid "Malformed proof in client-final-message." -msgstr "" +msgstr "Preuve malformée dans le client-final-message." -#: libpq/auth-scram.c:1065 +#: libpq/auth-scram.c:1071 #, c-format msgid "Garbage found at the end of client-final-message." msgstr "Problème trouvé à la fin de client-final-message." @@ -11977,389 +12022,389 @@ msgstr "en attente du mot de passe, a reçu un type de message %d" msgid "invalid password packet size" msgstr "taille du paquet du mot de passe invalide" -#: libpq/auth.c:809 libpq/hba.c:1325 +#: libpq/auth.c:707 +#, c-format +msgid "empty password returned by client" +msgstr "mot de passe vide renvoyé par le client" + +#: libpq/auth.c:827 libpq/hba.c:1325 #, c-format msgid "MD5 authentication is not supported when \"db_user_namespace\" is enabled" msgstr "l'authentification MD5 n'est pas supportée quand « db_user_namespace » est activé" -#: libpq/auth.c:815 +#: libpq/auth.c:833 #, c-format msgid "could not generate random MD5 salt" msgstr "n'a pas pu générer le sel MD5 aléatoire" -#: libpq/auth.c:860 +#: libpq/auth.c:878 #, c-format msgid "SASL authentication is not supported in protocol version 2" msgstr "l'authentification SASL n'est pas supportée dans le protocole de version 2" -#: libpq/auth.c:902 +#: libpq/auth.c:920 #, c-format msgid "expected SASL response, got message type %d" msgstr "attendait une réponse SASL, a reçu le type de message %d" -#: libpq/auth.c:939 +#: libpq/auth.c:957 #, c-format msgid "client selected an invalid SASL authentication mechanism" msgstr "le client a sélectionné un mécanisme d'authentification SASL invalide" -#: libpq/auth.c:1086 +#: libpq/auth.c:1104 #, c-format msgid "GSSAPI is not supported in protocol version 2" msgstr "GSSAPI n'est pas supporté dans le protocole de version 2" -#: libpq/auth.c:1146 +#: libpq/auth.c:1164 #, c-format msgid "expected GSS response, got message type %d" msgstr "en attente d'une réponse GSS, a reçu un message de type %d" -#: libpq/auth.c:1208 +#: libpq/auth.c:1226 msgid "accepting GSS security context failed" msgstr "échec de l'acceptation du contexte de sécurité GSS" -#: libpq/auth.c:1234 +#: libpq/auth.c:1252 msgid "retrieving GSS user name failed" msgstr "échec lors de la récupération du nom de l'utilisateur avec GSS" -#: libpq/auth.c:1354 +#: libpq/auth.c:1372 #, c-format msgid "SSPI is not supported in protocol version 2" msgstr "SSPI n'est pas supporté dans le protocole de version 2" -#: libpq/auth.c:1369 +#: libpq/auth.c:1387 msgid "could not acquire SSPI credentials" msgstr "n'a pas pu obtenir les pièces d'identité SSPI" -#: libpq/auth.c:1387 +#: libpq/auth.c:1405 #, c-format msgid "expected SSPI response, got message type %d" msgstr "en attente d'une réponse SSPI, a reçu un message de type %d" -#: libpq/auth.c:1460 +#: libpq/auth.c:1478 msgid "could not accept SSPI security context" msgstr "n'a pas pu accepter le contexte de sécurité SSPI" -#: libpq/auth.c:1522 +#: libpq/auth.c:1540 msgid "could not get token from SSPI security context" msgstr "n'a pas pu obtenir le jeton du contexte de sécurité SSPI" -#: libpq/auth.c:1641 libpq/auth.c:1660 +#: libpq/auth.c:1659 libpq/auth.c:1678 #, c-format msgid "could not translate name" msgstr "n'a pas pu traduit le nom" -#: libpq/auth.c:1673 +#: libpq/auth.c:1691 #, c-format msgid "realm name too long" msgstr "nom du royaume trop long" -#: libpq/auth.c:1688 +#: libpq/auth.c:1706 #, c-format msgid "translated account name too long" msgstr "traduction du nom de compte trop longue" -#: libpq/auth.c:1874 +#: libpq/auth.c:1892 #, c-format msgid "could not create socket for Ident connection: %m" msgstr "n'a pas pu créer le socket pour la connexion Ident : %m" -#: libpq/auth.c:1889 +#: libpq/auth.c:1907 #, c-format msgid "could not bind to local address \"%s\": %m" msgstr "n'a pas pu se lier à l'adresse locale « %s » : %m" -#: libpq/auth.c:1901 +#: libpq/auth.c:1919 #, c-format msgid "could not connect to Ident server at address \"%s\", port %s: %m" msgstr "n'a pas pu se connecter au serveur Ident à l'adresse « %s », port %s : %m" -#: libpq/auth.c:1923 +#: libpq/auth.c:1941 #, c-format msgid "could not send query to Ident server at address \"%s\", port %s: %m" msgstr "n'a pas pu envoyer la requête au serveur Ident à l'adresse « %s », port %s : %m" -#: libpq/auth.c:1940 +#: libpq/auth.c:1958 #, c-format msgid "could not receive response from Ident server at address \"%s\", port %s: %m" msgstr "" "n'a pas pu recevoir la réponse du serveur Ident à l'adresse « %s », port %s :\n" "%m" -#: libpq/auth.c:1950 +#: libpq/auth.c:1968 #, c-format msgid "invalidly formatted response from Ident server: \"%s\"" msgstr "réponse mal formatée du serveur Ident : « %s »" -#: libpq/auth.c:1990 +#: libpq/auth.c:2008 #, c-format msgid "peer authentication is not supported on this platform" msgstr "la méthode d'authentification «peer n'est pas supportée sur cette plateforme" -#: libpq/auth.c:1994 +#: libpq/auth.c:2012 #, c-format msgid "could not get peer credentials: %m" msgstr "n'a pas pu obtenir l'authentification de l'autre : %m" -#: libpq/auth.c:2003 +#: libpq/auth.c:2021 #, c-format msgid "could not look up local user ID %ld: %s" msgstr "n'a pas pu rechercher l'identifiant %ld de l'utilisateur local : %s" -#: libpq/auth.c:2087 libpq/auth.c:2413 libpq/auth.c:2726 -#, c-format -msgid "empty password returned by client" -msgstr "mot de passe vide renvoyé par le client" - -#: libpq/auth.c:2097 +#: libpq/auth.c:2109 #, c-format msgid "error from underlying PAM layer: %s" msgstr "erreur provenant de la couche PAM : %s" -#: libpq/auth.c:2178 +#: libpq/auth.c:2190 #, c-format msgid "could not create PAM authenticator: %s" msgstr "n'a pas pu créer l'authenticateur PAM : %s" -#: libpq/auth.c:2189 +#: libpq/auth.c:2201 #, c-format msgid "pam_set_item(PAM_USER) failed: %s" msgstr "pam_set_item(PAM_USER) a échoué : %s" -#: libpq/auth.c:2200 +#: libpq/auth.c:2212 #, c-format msgid "pam_set_item(PAM_RHOST) failed: %s" msgstr "pam_set_item(PAM_RHOST) a échoué : %s" -#: libpq/auth.c:2211 +#: libpq/auth.c:2223 #, c-format msgid "pam_set_item(PAM_CONV) failed: %s" msgstr "pam_set_item(PAM_CONV) a échoué : %s" -#: libpq/auth.c:2222 +#: libpq/auth.c:2234 #, c-format msgid "pam_authenticate failed: %s" msgstr "pam_authenticate a échoué : %s" -#: libpq/auth.c:2233 +#: libpq/auth.c:2245 #, c-format msgid "pam_acct_mgmt failed: %s" msgstr "pam_acct_mgmt a échoué : %s" -#: libpq/auth.c:2244 +#: libpq/auth.c:2256 #, c-format msgid "could not release PAM authenticator: %s" msgstr "n'a pas pu fermer l'authenticateur PAM : %s" -#: libpq/auth.c:2309 +#: libpq/auth.c:2323 #, c-format msgid "could not initialize LDAP: %m" msgstr "n'a pas pu initialiser LDAP : %m" -#: libpq/auth.c:2312 +#: libpq/auth.c:2326 #, c-format msgid "could not initialize LDAP: error code %d" msgstr "n'a pas pu initialiser LDAP : code d'erreur %d" -#: libpq/auth.c:2322 +#: libpq/auth.c:2336 #, c-format msgid "could not set LDAP protocol version: %s" msgstr "n'a pas pu initialiser la version du protocole LDAP : %s" -#: libpq/auth.c:2351 +#: libpq/auth.c:2365 #, c-format msgid "could not load wldap32.dll" msgstr "n'a pas pu charger wldap32.dll" -#: libpq/auth.c:2359 +#: libpq/auth.c:2373 #, c-format msgid "could not load function _ldap_start_tls_sA in wldap32.dll" msgstr "n'a pas pu charger la fonction _ldap_start_tls_sA de wldap32.dll" -#: libpq/auth.c:2360 +#: libpq/auth.c:2374 #, c-format msgid "LDAP over SSL is not supported on this platform." msgstr "LDAP via SSL n'est pas supporté sur cette plateforme." -#: libpq/auth.c:2375 +#: libpq/auth.c:2389 #, c-format msgid "could not start LDAP TLS session: %s" msgstr "n'a pas pu démarrer la session TLS LDAP : %s" -#: libpq/auth.c:2397 +#: libpq/auth.c:2411 #, c-format msgid "LDAP server not specified" msgstr "serveur LDAP non précisé" -#: libpq/auth.c:2450 +#: libpq/auth.c:2460 #, c-format msgid "invalid character in user name for LDAP authentication" msgstr "caractère invalide dans le nom de l'utilisateur pour l'authentification LDAP" -#: libpq/auth.c:2465 +#: libpq/auth.c:2476 #, c-format msgid "could not perform initial LDAP bind for ldapbinddn \"%s\" on server \"%s\": %s" msgstr "n'a pas pu réaliser le lien LDAP initiale pour ldapbinddn « %s » sur le serveur « %s » : %s" -#: libpq/auth.c:2489 +#: libpq/auth.c:2502 #, c-format msgid "could not search LDAP for filter \"%s\" on server \"%s\": %s" msgstr "n'a pas pu rechercher dans LDAP pour filtrer « %s » sur le serveur « %s » : %s" -#: libpq/auth.c:2500 +#: libpq/auth.c:2514 #, c-format msgid "LDAP user \"%s\" does not exist" msgstr "l'utilisateur LDAP « %s » n'existe pas" -#: libpq/auth.c:2501 +#: libpq/auth.c:2515 #, c-format msgid "LDAP search for filter \"%s\" on server \"%s\" returned no entries." msgstr "la recherche LDAP pour le filtre « %s » sur le serveur « %s » n'a renvoyé aucun enregistrement." -#: libpq/auth.c:2505 +#: libpq/auth.c:2519 #, c-format msgid "LDAP user \"%s\" is not unique" msgstr "l'utilisateur LDAP « %s » n'est pas unique" -#: libpq/auth.c:2506 +#: libpq/auth.c:2520 #, c-format msgid "LDAP search for filter \"%s\" on server \"%s\" returned %d entry." msgid_plural "LDAP search for filter \"%s\" on server \"%s\" returned %d entries." msgstr[0] "la recherche LDAP pour le filtre « %s » sur le serveur « %s » a renvoyé %d enregistrement." msgstr[1] "la recherche LDAP pour le filtre « %s » sur le serveur « %s » a renvoyé %d enregistrements." -#: libpq/auth.c:2524 +#: libpq/auth.c:2539 #, c-format msgid "could not get dn for the first entry matching \"%s\" on server \"%s\": %s" msgstr "" "n'a pas pu obtenir le dn pour la première entrée correspondante « %s » sur\n" "le serveur « %s » : %s" -#: libpq/auth.c:2544 +#: libpq/auth.c:2560 #, c-format msgid "could not unbind after searching for user \"%s\" on server \"%s\": %s" msgstr "" "n'a pas pu exécuter le unbind après la recherche de l'utilisateur « %s »\n" "sur le serveur « %s » : %s" -#: libpq/auth.c:2574 +#: libpq/auth.c:2592 #, c-format msgid "LDAP login failed for user \"%s\" on server \"%s\": %s" msgstr "échec de connexion LDAP pour l'utilisateur « %s » sur le serveur « %s » : %s" -#: libpq/auth.c:2602 +#: libpq/auth.c:2622 #, c-format msgid "certificate authentication failed for user \"%s\": client certificate contains no user name" msgstr "" "l'authentification par le certificat a échoué pour l'utilisateur « %s » :\n" "le certificat du client ne contient aucun nom d'utilisateur" -#: libpq/auth.c:2705 +#: libpq/auth.c:2725 #, c-format msgid "RADIUS server not specified" msgstr "serveur RADIUS non précisé" -#: libpq/auth.c:2712 +#: libpq/auth.c:2732 #, c-format msgid "RADIUS secret not specified" msgstr "secret RADIUS non précisé" -#: libpq/auth.c:2733 +#: libpq/auth.c:2746 #, c-format msgid "RADIUS authentication does not support passwords longer than %d characters" msgstr "" "l'authentification RADIUS ne supporte pas les mots de passe de plus de %d\n" "caractères" -#: libpq/auth.c:2830 libpq/hba.c:1876 +#: libpq/auth.c:2851 libpq/hba.c:1878 #, c-format msgid "could not translate RADIUS server name \"%s\" to address: %s" msgstr "n'a pas pu traduire le nom du serveur RADIUS « %s » en une adresse : %s" -#: libpq/auth.c:2844 +#: libpq/auth.c:2865 #, c-format msgid "could not generate random encryption vector" msgstr "n'a pas pu générer le vecteur de chiffrement aléatoire" -#: libpq/auth.c:2878 +#: libpq/auth.c:2899 #, c-format msgid "could not perform MD5 encryption of password" msgstr "n'a pas pu réaliser le chiffrement MD5 du mot de passe" -#: libpq/auth.c:2904 +#: libpq/auth.c:2925 #, c-format msgid "could not create RADIUS socket: %m" msgstr "n'a pas pu créer le socket RADIUS : %m" -#: libpq/auth.c:2926 +#: libpq/auth.c:2947 #, c-format msgid "could not bind local RADIUS socket: %m" msgstr "n'a pas pu se lier à la socket RADIUS : %m" -#: libpq/auth.c:2936 +#: libpq/auth.c:2957 #, c-format msgid "could not send RADIUS packet: %m" msgstr "n'a pas pu transmettre le paquet RADIUS : %m" -#: libpq/auth.c:2969 libpq/auth.c:2995 +#: libpq/auth.c:2990 libpq/auth.c:3016 #, c-format msgid "timeout waiting for RADIUS response from %s" msgstr "dépassement du délai pour la réponse du RADIUS à partir de %s" -#: libpq/auth.c:2988 +#: libpq/auth.c:3009 #, c-format msgid "could not check status on RADIUS socket: %m" msgstr "n'a pas pu vérifier le statut sur la socket RADIUS : %m" -#: libpq/auth.c:3018 +#: libpq/auth.c:3039 #, c-format msgid "could not read RADIUS response: %m" msgstr "n'a pas pu lire la réponse RADIUS : %m" -#: libpq/auth.c:3031 libpq/auth.c:3035 +#: libpq/auth.c:3052 libpq/auth.c:3056 #, c-format msgid "RADIUS response from %s was sent from incorrect port: %d" msgstr "la réponse RADIUS de %s a été envoyée à partir d'un mauvais port : %d" -#: libpq/auth.c:3044 +#: libpq/auth.c:3065 #, c-format msgid "RADIUS response from %s too short: %d" msgstr "réponse RADIUS de %s trop courte : %d" -#: libpq/auth.c:3051 +#: libpq/auth.c:3072 #, c-format msgid "RADIUS response from %s has corrupt length: %d (actual length %d)" msgstr "la réponse RADIUS de %s a une longueur corrompue : %d (longueur actuelle %d)" -#: libpq/auth.c:3059 +#: libpq/auth.c:3080 #, c-format msgid "RADIUS response from %s is to a different request: %d (should be %d)" msgstr "la réponse RADIUS à partir de %s correspond à une demande différente : %d (devrait être %d)" -#: libpq/auth.c:3084 +#: libpq/auth.c:3105 #, c-format msgid "could not perform MD5 encryption of received packet" msgstr "n'a pas pu réaliser le chiffrement MD5 du paquet reçu" -#: libpq/auth.c:3093 +#: libpq/auth.c:3114 #, c-format msgid "RADIUS response from %s has incorrect MD5 signature" msgstr "la réponse RADIUS à partir de %s a une signature MD5 invalide" -#: libpq/auth.c:3111 +#: libpq/auth.c:3132 #, c-format msgid "RADIUS response from %s has invalid code (%d) for user \"%s\"" msgstr "la réponse RADIUS à partir de %s a un code invalide (%d) pour l'utilisateur « %s »" -#: libpq/be-fsstubs.c:132 libpq/be-fsstubs.c:163 libpq/be-fsstubs.c:197 libpq/be-fsstubs.c:237 libpq/be-fsstubs.c:262 libpq/be-fsstubs.c:310 libpq/be-fsstubs.c:333 libpq/be-fsstubs.c:581 +#: libpq/be-fsstubs.c:132 libpq/be-fsstubs.c:163 libpq/be-fsstubs.c:197 libpq/be-fsstubs.c:237 libpq/be-fsstubs.c:262 libpq/be-fsstubs.c:310 libpq/be-fsstubs.c:333 libpq/be-fsstubs.c:590 #, c-format msgid "invalid large-object descriptor: %d" msgstr "descripteur invalide de « Large Object » : %d" -#: libpq/be-fsstubs.c:178 libpq/be-fsstubs.c:216 libpq/be-fsstubs.c:600 libpq/be-fsstubs.c:788 +#: libpq/be-fsstubs.c:178 libpq/be-fsstubs.c:216 libpq/be-fsstubs.c:609 libpq/be-fsstubs.c:797 libpq/be-fsstubs.c:917 #, c-format msgid "permission denied for large object %u" msgstr "droit refusé pour le Large Object %u" -#: libpq/be-fsstubs.c:203 libpq/be-fsstubs.c:587 +#: libpq/be-fsstubs.c:203 libpq/be-fsstubs.c:596 #, c-format msgid "large object descriptor %d was not opened for writing" msgstr "le descripteur %d du « Large Object » n'a pas été ouvert pour l'écriture" @@ -12404,22 +12449,22 @@ msgstr "doit être super-utilisateur pour utiliser lo_export() du côté serveur msgid "Anyone can use the client-side lo_export() provided by libpq." msgstr "Tout le monde peut utiliser lo_export(), fournie par libpq, du côté client." -#: libpq/be-fsstubs.c:547 +#: libpq/be-fsstubs.c:556 #, c-format msgid "could not create server file \"%s\": %m" msgstr "n'a pas pu créer le fichier serveur « %s » : %m" -#: libpq/be-fsstubs.c:559 +#: libpq/be-fsstubs.c:568 #, c-format msgid "could not write server file \"%s\": %m" msgstr "n'a pas pu écrire le fichier serveur « %s » : %m" -#: libpq/be-fsstubs.c:813 +#: libpq/be-fsstubs.c:822 #, c-format msgid "large object read request is too large" msgstr "la demande de lecture du Large Object est trop grande" -#: libpq/be-fsstubs.c:855 utils/adt/genfile.c:212 utils/adt/genfile.c:253 +#: libpq/be-fsstubs.c:864 utils/adt/genfile.c:212 utils/adt/genfile.c:253 #, c-format msgid "requested length cannot be negative" msgstr "la longueur demandée ne peut pas être négative" @@ -12478,131 +12523,131 @@ msgstr "n'a pas pu charger le fichier de clé privée « %s » : %s" msgid "check of private key failed: %s" msgstr "échec de la vérification de la clé privée : %s" -#: libpq/be-secure-openssl.c:302 +#: libpq/be-secure-openssl.c:310 #, c-format msgid "could not set the cipher list (no valid ciphers available)" msgstr "n'a pas pu configurer la liste des algorithmes de chiffrement (pas d'algorithmes valides disponibles)" -#: libpq/be-secure-openssl.c:320 +#: libpq/be-secure-openssl.c:328 #, c-format msgid "could not load root certificate file \"%s\": %s" msgstr "n'a pas pu charger le fichier du certificat racine « %s » : %s" -#: libpq/be-secure-openssl.c:347 +#: libpq/be-secure-openssl.c:355 #, c-format msgid "SSL certificate revocation list file \"%s\" ignored" msgstr "liste de révocation des certificats SSL « %s » ignorée" -#: libpq/be-secure-openssl.c:349 +#: libpq/be-secure-openssl.c:357 #, c-format msgid "SSL library does not support certificate revocation lists." msgstr "La bibliothèque SSL ne supporte pas les listes de révocation des certificats." -#: libpq/be-secure-openssl.c:356 +#: libpq/be-secure-openssl.c:364 #, c-format msgid "could not load SSL certificate revocation list file \"%s\": %s" msgstr "n'a pas pu charger le fichier de liste de révocation des certificats SSL (« %s ») : %s" -#: libpq/be-secure-openssl.c:437 +#: libpq/be-secure-openssl.c:445 #, c-format msgid "could not initialize SSL connection: SSL context not set up" msgstr "n'a pas pu initialiser la connexion SSL : contexte SSL non configuré" -#: libpq/be-secure-openssl.c:445 +#: libpq/be-secure-openssl.c:453 #, c-format msgid "could not initialize SSL connection: %s" msgstr "n'a pas pu initialiser la connexion SSL : %s" -#: libpq/be-secure-openssl.c:453 +#: libpq/be-secure-openssl.c:461 #, c-format msgid "could not set SSL socket: %s" msgstr "n'a pas pu créer le socket SSL : %s" -#: libpq/be-secure-openssl.c:508 +#: libpq/be-secure-openssl.c:516 #, c-format msgid "could not accept SSL connection: %m" msgstr "n'a pas pu accepter la connexion SSL : %m" -#: libpq/be-secure-openssl.c:512 libpq/be-secure-openssl.c:523 +#: libpq/be-secure-openssl.c:520 libpq/be-secure-openssl.c:531 #, c-format msgid "could not accept SSL connection: EOF detected" msgstr "n'a pas pu accepter la connexion SSL : fin de fichier détecté" -#: libpq/be-secure-openssl.c:517 +#: libpq/be-secure-openssl.c:525 #, c-format msgid "could not accept SSL connection: %s" msgstr "n'a pas pu accepter la connexion SSL : %s" -#: libpq/be-secure-openssl.c:528 libpq/be-secure-openssl.c:669 libpq/be-secure-openssl.c:735 +#: libpq/be-secure-openssl.c:536 libpq/be-secure-openssl.c:677 libpq/be-secure-openssl.c:744 #, c-format msgid "unrecognized SSL error code: %d" msgstr "code d'erreur SSL inconnu : %d" -#: libpq/be-secure-openssl.c:570 +#: libpq/be-secure-openssl.c:578 #, c-format msgid "SSL certificate's common name contains embedded null" msgstr "le nom commun du certificat SSL contient des NULL" -#: libpq/be-secure-openssl.c:581 +#: libpq/be-secure-openssl.c:589 #, c-format msgid "SSL connection from \"%s\"" msgstr "connexion SSL de « %s »" -#: libpq/be-secure-openssl.c:658 libpq/be-secure-openssl.c:720 +#: libpq/be-secure-openssl.c:666 libpq/be-secure-openssl.c:728 #, c-format msgid "SSL error: %s" msgstr "erreur SSL : %s" -#: libpq/be-secure-openssl.c:900 +#: libpq/be-secure-openssl.c:909 #, c-format msgid "could not open DH parameters file \"%s\": %m" msgstr "n'a pas pu ouvrir le fichier de paramètres DH « %s » : %m" -#: libpq/be-secure-openssl.c:912 +#: libpq/be-secure-openssl.c:921 #, c-format msgid "could not load DH parameters file: %s" msgstr "n'a pas pu charger le fichier de paramètres DH : %s" -#: libpq/be-secure-openssl.c:922 +#: libpq/be-secure-openssl.c:931 #, c-format msgid "invalid DH parameters: %s" msgstr "paramètres DH invalides : %s" -#: libpq/be-secure-openssl.c:930 +#: libpq/be-secure-openssl.c:939 #, c-format msgid "invalid DH parameters: p is not prime" msgstr "paramètres DH invalides : p n'est pas premier" -#: libpq/be-secure-openssl.c:938 +#: libpq/be-secure-openssl.c:947 #, c-format msgid "invalid DH parameters: neither suitable generator or safe prime" -msgstr "" +msgstr "paramètres DH invalides : pas de générateur convenable ou de premier sûr" -#: libpq/be-secure-openssl.c:1079 +#: libpq/be-secure-openssl.c:1088 #, c-format msgid "DH: could not load DH parameters" msgstr "DH : n'a pas pu charger les paramètres DH" -#: libpq/be-secure-openssl.c:1087 +#: libpq/be-secure-openssl.c:1096 #, c-format msgid "DH: could not set DH parameters: %s" msgstr "DH : n'a pas pu configurer les paramètres DH : %s" -#: libpq/be-secure-openssl.c:1111 +#: libpq/be-secure-openssl.c:1120 #, c-format msgid "ECDH: unrecognized curve name: %s" msgstr "ECDH : nome de courbe non reconnu : %s" -#: libpq/be-secure-openssl.c:1120 +#: libpq/be-secure-openssl.c:1129 #, c-format msgid "ECDH: could not create key" msgstr "ECDH : n'a pas pu créer la clé" -#: libpq/be-secure-openssl.c:1148 +#: libpq/be-secure-openssl.c:1157 msgid "no SSL error reported" msgstr "aucune erreur SSL reportée" -#: libpq/be-secure-openssl.c:1152 +#: libpq/be-secure-openssl.c:1161 #, c-format msgid "SSL error code %lu" msgstr "erreur SSL %lu" @@ -12622,27 +12667,22 @@ msgstr "Le rôle « %s » n'existe pas" msgid "User \"%s\" has no password assigned." msgstr "L'utilisateur « %s » n'a pas de mot de passe affecté." -#: libpq/crypt.c:76 -#, c-format -msgid "User \"%s\" has an empty password." -msgstr "L'utilisateur « %s » a un mot de passe vide." - -#: libpq/crypt.c:87 +#: libpq/crypt.c:79 #, c-format msgid "User \"%s\" has an expired password." msgstr "L'utilisateur « %s » a un mot de passe expiré." -#: libpq/crypt.c:181 +#: libpq/crypt.c:173 #, c-format msgid "User \"%s\" has a password that cannot be used with MD5 authentication." msgstr "L'utilisateur « %s » a un mot de passe qui ne peut pas être utilisé avec une authentification MD5." -#: libpq/crypt.c:205 libpq/crypt.c:246 libpq/crypt.c:270 +#: libpq/crypt.c:197 libpq/crypt.c:238 libpq/crypt.c:262 #, c-format msgid "Password does not match for user \"%s\"." msgstr "Mot de passe ne correspond pas pour l'utilisateur %s : " -#: libpq/crypt.c:289 +#: libpq/crypt.c:281 #, c-format msgid "Password of user \"%s\" is in unrecognized format." msgstr "Le mot de passe de l'utilisateur « %s » est dans un format non reconnu." @@ -12665,7 +12705,7 @@ msgid "authentication file line too long" msgstr "ligne du fichier d'authentification trop longue" #: libpq/hba.c:510 libpq/hba.c:867 libpq/hba.c:887 libpq/hba.c:925 libpq/hba.c:975 libpq/hba.c:989 libpq/hba.c:1011 libpq/hba.c:1020 libpq/hba.c:1041 libpq/hba.c:1054 libpq/hba.c:1074 libpq/hba.c:1096 libpq/hba.c:1108 libpq/hba.c:1164 libpq/hba.c:1184 libpq/hba.c:1198 libpq/hba.c:1217 libpq/hba.c:1228 libpq/hba.c:1243 libpq/hba.c:1261 libpq/hba.c:1277 libpq/hba.c:1289 libpq/hba.c:1326 libpq/hba.c:1367 libpq/hba.c:1380 libpq/hba.c:1402 -#: libpq/hba.c:1414 libpq/hba.c:1432 libpq/hba.c:1482 libpq/hba.c:1521 libpq/hba.c:1532 libpq/hba.c:1549 libpq/hba.c:1559 libpq/hba.c:1617 libpq/hba.c:1655 libpq/hba.c:1671 libpq/hba.c:1770 libpq/hba.c:1859 libpq/hba.c:1878 libpq/hba.c:1907 libpq/hba.c:1920 libpq/hba.c:1943 libpq/hba.c:1965 libpq/hba.c:1979 tsearch/ts_locale.c:182 +#: libpq/hba.c:1414 libpq/hba.c:1432 libpq/hba.c:1482 libpq/hba.c:1521 libpq/hba.c:1532 libpq/hba.c:1549 libpq/hba.c:1559 libpq/hba.c:1617 libpq/hba.c:1655 libpq/hba.c:1671 libpq/hba.c:1772 libpq/hba.c:1861 libpq/hba.c:1880 libpq/hba.c:1909 libpq/hba.c:1922 libpq/hba.c:1945 libpq/hba.c:1967 libpq/hba.c:1981 tsearch/ts_locale.c:182 #, c-format msgid "line %d of configuration file \"%s\"" msgstr "ligne %d du fichier de configuration « %s »" @@ -12873,8 +12913,8 @@ msgstr "la liste des secrets RADIUS ne peut pas être vide" #: libpq/hba.c:1611 #, c-format -msgid "the number of %s (%i) must be 1 or the same as the number of %s (%i)" -msgstr "le nombre de %s (%i) doit valoir 1 ou être identique au nombre de %s (%i)" +msgid "the number of %s (%d) must be 1 or the same as the number of %s (%d)" +msgstr "le nombre de %s (%d) doit valoir 1 ou être identique au nombre de %s (%d)" #: libpq/hba.c:1645 msgid "ident, peer, gssapi, sspi, and cert" @@ -12900,96 +12940,96 @@ msgstr "n'a pas pu analyser l'URL LDAP « %s » : %s" msgid "unsupported LDAP URL scheme: %s" msgstr "méthode URL LDAP non supporté : %s" -#: libpq/hba.c:1735 +#: libpq/hba.c:1737 #, c-format msgid "filters not supported in LDAP URLs" msgstr "filtres non supportés dans les URL LDAP" -#: libpq/hba.c:1744 +#: libpq/hba.c:1746 #, c-format msgid "LDAP URLs not supported on this platform" msgstr "URL LDAP non supportés sur cette plateforme." -#: libpq/hba.c:1769 +#: libpq/hba.c:1771 #, c-format msgid "invalid LDAP port number: \"%s\"" msgstr "numéro de port LDAP invalide : « %s »" -#: libpq/hba.c:1810 libpq/hba.c:1817 +#: libpq/hba.c:1812 libpq/hba.c:1819 msgid "gssapi and sspi" msgstr "gssapi et sspi" -#: libpq/hba.c:1826 libpq/hba.c:1835 +#: libpq/hba.c:1828 libpq/hba.c:1837 msgid "sspi" msgstr "sspi" -#: libpq/hba.c:1857 +#: libpq/hba.c:1859 #, c-format msgid "could not parse RADIUS server list \"%s\"" msgstr "n'a pas pu analyser la liste de serveurs RADIUS « %s »" -#: libpq/hba.c:1905 +#: libpq/hba.c:1907 #, c-format msgid "could not parse RADIUS port list \"%s\"" msgstr "n'a pas pu analyser la liste de ports RADIUS « %s »" -#: libpq/hba.c:1919 +#: libpq/hba.c:1921 #, c-format msgid "invalid RADIUS port number: \"%s\"" msgstr "numéro de port RADIUS invalide : « %s »" -#: libpq/hba.c:1941 +#: libpq/hba.c:1943 #, c-format msgid "could not parse RADIUS secret list \"%s\"" msgstr "n'a pas pu analyser la liste de secrets RADIUS « %s »" -#: libpq/hba.c:1963 +#: libpq/hba.c:1965 #, c-format msgid "could not parse RADIUS identifiers list \"%s\"" msgstr "n'a pas pu analyser la liste des identifieurs RADIUS « %s »" -#: libpq/hba.c:1977 +#: libpq/hba.c:1979 #, c-format msgid "unrecognized authentication option name: \"%s\"" msgstr "nom d'option de l'authentification inconnu : « %s »" -#: libpq/hba.c:2161 +#: libpq/hba.c:2163 #, c-format msgid "configuration file \"%s\" contains no entries" msgstr "le fichier de configuration « %s » ne contient aucun enregistrement" -#: libpq/hba.c:2666 +#: libpq/hba.c:2668 #, c-format msgid "invalid regular expression \"%s\": %s" msgstr "expression rationnelle invalide « %s » : %s" -#: libpq/hba.c:2726 +#: libpq/hba.c:2728 #, c-format msgid "regular expression match for \"%s\" failed: %s" msgstr "la correspondance de l'expression rationnelle pour « %s » a échoué : %s" -#: libpq/hba.c:2745 +#: libpq/hba.c:2747 #, c-format msgid "regular expression \"%s\" has no subexpressions as requested by backreference in \"%s\"" msgstr "" "l'expression rationnelle « %s » n'a pas de sous-expressions comme celle\n" "demandée par la référence dans « %s »" -#: libpq/hba.c:2842 +#: libpq/hba.c:2844 #, c-format msgid "provided user name (%s) and authenticated user name (%s) do not match" msgstr "" "le nom d'utilisateur (%s) et le nom d'utilisateur authentifié (%s) fournis ne\n" "correspondent pas" -#: libpq/hba.c:2862 +#: libpq/hba.c:2864 #, c-format msgid "no match in usermap \"%s\" for user \"%s\" authenticated as \"%s\"" msgstr "" "pas de correspondance dans la usermap « %s » pour l'utilisateur « %s »\n" "authentifié en tant que « %s »" -#: libpq/hba.c:2895 +#: libpq/hba.c:2897 #, c-format msgid "could not open usermap file \"%s\": %m" msgstr "n'a pas pu ouvrir le fichier usermap « %s » : %m" @@ -13119,7 +13159,7 @@ msgstr "il n'y a pas de connexion client" msgid "could not receive data from client: %m" msgstr "n'a pas pu recevoir les données du client : %m" -#: libpq/pqcomm.c:1219 tcop/postgres.c:3928 +#: libpq/pqcomm.c:1219 tcop/postgres.c:3926 #, c-format msgid "terminating connection because protocol synchronization was lost" msgstr "arrêt de la connexion à cause d'une perte de synchronisation du protocole" @@ -13498,46 +13538,46 @@ msgid "%s cannot be applied to the nullable side of an outer join" msgstr "%s ne peut être appliqué sur le côté possiblement NULL d'une jointure externe" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: optimizer/plan/planner.c:1544 parser/analyze.c:1624 parser/analyze.c:1821 parser/analyze.c:2615 +#: optimizer/plan/planner.c:1572 parser/analyze.c:1624 parser/analyze.c:1821 parser/analyze.c:2615 #, c-format msgid "%s is not allowed with UNION/INTERSECT/EXCEPT" msgstr "%s n'est pas autorisé avec UNION/INTERSECT/EXCEPT" -#: optimizer/plan/planner.c:2144 optimizer/plan/planner.c:4102 +#: optimizer/plan/planner.c:2166 optimizer/plan/planner.c:4124 #, c-format msgid "could not implement GROUP BY" -msgstr "n'a pas pu implanté GROUP BY" +msgstr "n'a pas pu implanter GROUP BY" -#: optimizer/plan/planner.c:2145 optimizer/plan/planner.c:4103 optimizer/plan/planner.c:4843 optimizer/prep/prepunion.c:938 +#: optimizer/plan/planner.c:2167 optimizer/plan/planner.c:4125 optimizer/plan/planner.c:4865 optimizer/prep/prepunion.c:935 #, c-format msgid "Some of the datatypes only support hashing, while others only support sorting." msgstr "" "Certains des types de données supportent seulement le hachage,\n" "alors que les autres supportent seulement le tri." -#: optimizer/plan/planner.c:4842 +#: optimizer/plan/planner.c:4864 #, c-format msgid "could not implement DISTINCT" -msgstr "n'a pas pu implanté DISTINCT" +msgstr "n'a pas pu implanter DISTINCT" -#: optimizer/plan/planner.c:5522 +#: optimizer/plan/planner.c:5544 #, c-format msgid "could not implement window PARTITION BY" msgstr "n'a pas pu implanter PARTITION BY de window" -#: optimizer/plan/planner.c:5523 +#: optimizer/plan/planner.c:5545 #, c-format msgid "Window partitioning columns must be of sortable datatypes." msgstr "" "Les colonnes de partitionnement de window doivent être d'un type de données\n" "triables." -#: optimizer/plan/planner.c:5527 +#: optimizer/plan/planner.c:5549 #, c-format msgid "could not implement window ORDER BY" msgstr "n'a pas pu implanter ORDER BY dans le window" -#: optimizer/plan/planner.c:5528 +#: optimizer/plan/planner.c:5550 #, c-format msgid "Window ordering columns must be of sortable datatypes." msgstr "Les colonnes de tri de la window doivent être d'un type de données triable." @@ -13547,23 +13587,23 @@ msgstr "Les colonnes de tri de la window doivent être d'un type de données tri msgid "too many range table entries" msgstr "trop d'enregistrements dans la table range" -#: optimizer/prep/prepunion.c:493 +#: optimizer/prep/prepunion.c:496 #, c-format msgid "could not implement recursive UNION" -msgstr "n'a pas pu implanté le UNION récursif" +msgstr "n'a pas pu implanter le UNION récursif" -#: optimizer/prep/prepunion.c:494 +#: optimizer/prep/prepunion.c:497 #, c-format msgid "All column datatypes must be hashable." msgstr "Tous les types de données colonnes doivent être hachables." #. translator: %s is UNION, INTERSECT, or EXCEPT -#: optimizer/prep/prepunion.c:937 +#: optimizer/prep/prepunion.c:934 #, c-format msgid "could not implement %s" -msgstr "n'a pas pu implanté %s" +msgstr "n'a pas pu implanter %s" -#: optimizer/util/clauses.c:4689 +#: optimizer/util/clauses.c:4693 #, c-format msgid "SQL function \"%s\" during inlining" msgstr "fonction SQL « %s » durant « inlining »" @@ -13935,7 +13975,7 @@ msgid "grouping operations are not allowed in partition key expression" msgstr "les opérations de regroupement ne sont pas autorisées dans les expressions de clé de partitionnement" #. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_agg.c:530 parser/parse_clause.c:1830 +#: parser/parse_agg.c:530 parser/parse_clause.c:1810 #, c-format msgid "aggregate functions are not allowed in %s" msgstr "les fonctions d'agrégats ne sont pas autorisés dans %s" @@ -14019,12 +14059,12 @@ msgid "window functions are not allowed in partition key expression" msgstr "les fonctions de fenêtrage ne sont pas autorisés dans les expressions de clé de partitionnement" #. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_agg.c:904 parser/parse_clause.c:1839 +#: parser/parse_agg.c:904 parser/parse_clause.c:1819 #, c-format msgid "window functions are not allowed in %s" msgstr "les fonctions de fenêtrage ne sont pas autorisés dans %s" -#: parser/parse_agg.c:938 parser/parse_clause.c:2673 +#: parser/parse_agg.c:938 parser/parse_clause.c:2653 #, c-format msgid "window \"%s\" does not exist" msgstr "le window « %s » n'existe pas" @@ -14066,247 +14106,247 @@ msgstr "les arguments de la clause GROUPING doivent être des expressions de reg msgid "relation \"%s\" cannot be the target of a modifying statement" msgstr "la relation « %s » ne peut pas être la cible d'une instruction modifiée" -#: parser/parse_clause.c:637 parser/parse_clause.c:665 parser/parse_func.c:2153 +#: parser/parse_clause.c:608 parser/parse_clause.c:636 parser/parse_func.c:2153 #, c-format msgid "set-returning functions must appear at top level of FROM" msgstr "les fonctions renvoyant des ensembles doivent apparaître au niveau haut d'un FROM" -#: parser/parse_clause.c:677 +#: parser/parse_clause.c:648 #, c-format msgid "multiple column definition lists are not allowed for the same function" msgstr "plusieurs listes de définition de colonnes ne sont pas autorisées pour la même fonction" -#: parser/parse_clause.c:710 +#: parser/parse_clause.c:681 #, c-format msgid "ROWS FROM() with multiple functions cannot have a column definition list" msgstr "ROWS FROM() avec plusieurs fonctions ne peut pas avoir une liste de définitions de colonnes" -#: parser/parse_clause.c:711 +#: parser/parse_clause.c:682 #, c-format msgid "Put a separate column definition list for each function inside ROWS FROM()." msgstr "Placer une liste de définitions de colonnes séparée pour chaque fonction à l'intérieur de ROWS FROM()." -#: parser/parse_clause.c:717 +#: parser/parse_clause.c:688 #, c-format msgid "UNNEST() with multiple arguments cannot have a column definition list" msgstr "UNNEST() avec plusieurs arguments ne peut pas avoir de liste de définition de colonnes" -#: parser/parse_clause.c:718 +#: parser/parse_clause.c:689 #, c-format msgid "Use separate UNNEST() calls inside ROWS FROM(), and attach a column definition list to each one." msgstr "Utiliser des appels séparés UNNEST() à l'intérieur de ROWS FROM(), et attacher une liste de définition des colonnes pour chaque." -#: parser/parse_clause.c:725 +#: parser/parse_clause.c:696 #, c-format msgid "WITH ORDINALITY cannot be used with a column definition list" msgstr "WITH ORDINALITY ne peut pas être utilisé avec une liste de définitions de colonnes" -#: parser/parse_clause.c:726 +#: parser/parse_clause.c:697 #, c-format msgid "Put the column definition list inside ROWS FROM()." msgstr "Placez la liste de définitions des colonnes dans ROWS FROM()." -#: parser/parse_clause.c:829 +#: parser/parse_clause.c:800 #, c-format msgid "only one FOR ORDINALITY column is allowed" msgstr "seule une colonne FOR ORDINALITY est autorisée" -#: parser/parse_clause.c:890 +#: parser/parse_clause.c:861 #, c-format msgid "column name \"%s\" is not unique" msgstr "le nom de colonne « %s » n'est pas unique" -#: parser/parse_clause.c:932 +#: parser/parse_clause.c:903 #, c-format msgid "namespace name \"%s\" is not unique" msgstr "l'espace de nom « %s » n'est pas unique" -#: parser/parse_clause.c:942 +#: parser/parse_clause.c:913 #, c-format msgid "only one default namespace is allowed" msgstr "seul un espace de nom par défaut est autorisé" -#: parser/parse_clause.c:1003 +#: parser/parse_clause.c:974 #, c-format msgid "tablesample method %s does not exist" msgstr "la méthode d'échantillonage %s n'existe pas" -#: parser/parse_clause.c:1025 +#: parser/parse_clause.c:996 #, c-format msgid "tablesample method %s requires %d argument, not %d" msgid_plural "tablesample method %s requires %d arguments, not %d" msgstr[0] "la méthode d'échantillonage %s requiert %d argument, et non pas %d" msgstr[1] "la méthode d'échantillonage %s requiert %d arguments, et non pas %d" -#: parser/parse_clause.c:1059 +#: parser/parse_clause.c:1030 #, c-format msgid "tablesample method %s does not support REPEATABLE" msgstr "la méthode d'échantillonage %s ne supporte pas REPEATABLE" -#: parser/parse_clause.c:1220 +#: parser/parse_clause.c:1200 #, c-format msgid "TABLESAMPLE clause can only be applied to tables and materialized views" msgstr "la clause TABLESAMPLE est uniquement applicable pour les tables et les vues matérialisées" -#: parser/parse_clause.c:1390 +#: parser/parse_clause.c:1370 #, c-format msgid "column name \"%s\" appears more than once in USING clause" msgstr "le nom de la colonne « %s » apparaît plus d'une fois dans la clause USING" -#: parser/parse_clause.c:1405 +#: parser/parse_clause.c:1385 #, c-format msgid "common column name \"%s\" appears more than once in left table" msgstr "" "le nom commun de la colonne « %s » apparaît plus d'une fois dans la table de\n" "gauche" -#: parser/parse_clause.c:1414 +#: parser/parse_clause.c:1394 #, c-format msgid "column \"%s\" specified in USING clause does not exist in left table" msgstr "" "la colonne « %s » spécifiée dans la clause USING n'existe pas dans la table\n" "de gauche" -#: parser/parse_clause.c:1428 +#: parser/parse_clause.c:1408 #, c-format msgid "common column name \"%s\" appears more than once in right table" msgstr "" "le nom commun de la colonne « %s » apparaît plus d'une fois dans la table de\n" " droite" -#: parser/parse_clause.c:1437 +#: parser/parse_clause.c:1417 #, c-format msgid "column \"%s\" specified in USING clause does not exist in right table" msgstr "" "la colonne « %s » spécifiée dans la clause USING n'existe pas dans la table\n" "de droite" -#: parser/parse_clause.c:1491 +#: parser/parse_clause.c:1471 #, c-format msgid "column alias list for \"%s\" has too many entries" msgstr "la liste d'alias de colonnes pour « %s » a beaucoup trop d'entrées" #. translator: %s is name of a SQL construct, eg LIMIT -#: parser/parse_clause.c:1800 +#: parser/parse_clause.c:1780 #, c-format msgid "argument of %s must not contain variables" msgstr "l'argument de « %s » ne doit pas contenir de variables" #. translator: first %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1965 +#: parser/parse_clause.c:1945 #, c-format msgid "%s \"%s\" is ambiguous" msgstr "%s « %s » est ambigu" #. translator: %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1994 +#: parser/parse_clause.c:1974 #, c-format msgid "non-integer constant in %s" msgstr "constante non entière dans %s" #. translator: %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:2016 +#: parser/parse_clause.c:1996 #, c-format msgid "%s position %d is not in select list" msgstr "%s, à la position %d, n'est pas dans la liste SELECT" -#: parser/parse_clause.c:2457 +#: parser/parse_clause.c:2437 #, c-format msgid "CUBE is limited to 12 elements" msgstr "CUBE est limité à 12 éléments" -#: parser/parse_clause.c:2661 +#: parser/parse_clause.c:2641 #, c-format msgid "window \"%s\" is already defined" msgstr "le window « %s » est déjà définie" -#: parser/parse_clause.c:2722 +#: parser/parse_clause.c:2702 #, c-format msgid "cannot override PARTITION BY clause of window \"%s\"" msgstr "n'a pas pu surcharger la clause PARTITION BY de window « %s »" -#: parser/parse_clause.c:2734 +#: parser/parse_clause.c:2714 #, c-format msgid "cannot override ORDER BY clause of window \"%s\"" msgstr "n'a pas pu surcharger la clause ORDER BY de window « %s »" -#: parser/parse_clause.c:2764 parser/parse_clause.c:2770 +#: parser/parse_clause.c:2744 parser/parse_clause.c:2750 #, c-format msgid "cannot copy window \"%s\" because it has a frame clause" msgstr "ne peut pas copier la fenêtre « %s » car il dispose d'une clause de portée" -#: parser/parse_clause.c:2772 +#: parser/parse_clause.c:2752 #, c-format msgid "Omit the parentheses in this OVER clause." msgstr "Omettre les parenthèses dans cette clause OVER." -#: parser/parse_clause.c:2838 +#: parser/parse_clause.c:2818 #, c-format msgid "in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list" msgstr "" "dans un agrégat avec DISTINCT, les expressions ORDER BY doivent apparaître\n" "dans la liste d'argument" -#: parser/parse_clause.c:2839 +#: parser/parse_clause.c:2819 #, c-format msgid "for SELECT DISTINCT, ORDER BY expressions must appear in select list" msgstr "" "pour SELECT DISTINCT, ORDER BY, les expressions doivent apparaître dans la\n" "liste SELECT" -#: parser/parse_clause.c:2871 +#: parser/parse_clause.c:2851 #, c-format msgid "an aggregate with DISTINCT must have at least one argument" msgstr "un agrégat avec DISTINCT doit avoir au moins un argument" -#: parser/parse_clause.c:2872 +#: parser/parse_clause.c:2852 #, c-format msgid "SELECT DISTINCT must have at least one column" msgstr "SELECT DISTINCT doit avoir au moins une colonne" -#: parser/parse_clause.c:2938 parser/parse_clause.c:2970 +#: parser/parse_clause.c:2918 parser/parse_clause.c:2950 #, c-format msgid "SELECT DISTINCT ON expressions must match initial ORDER BY expressions" msgstr "" "les expressions SELECT DISTINCT ON doivent correspondre aux expressions\n" "ORDER BY initiales" -#: parser/parse_clause.c:3048 +#: parser/parse_clause.c:3028 #, c-format msgid "ASC/DESC is not allowed in ON CONFLICT clause" msgstr "ASC/DESC n'est pas autorisé avec la clause ON CONFLICT" -#: parser/parse_clause.c:3054 +#: parser/parse_clause.c:3034 #, c-format msgid "NULLS FIRST/LAST is not allowed in ON CONFLICT clause" msgstr "NULLS FIRST/LAST n'est pas autorisé avec la clause ON CONFLICT" -#: parser/parse_clause.c:3134 +#: parser/parse_clause.c:3114 #, c-format msgid "ON CONFLICT DO UPDATE requires inference specification or constraint name" msgstr "ON CONFLICT DO UPDATE requiert une spécification d'inférence ou un nom de contrainte" -#: parser/parse_clause.c:3135 +#: parser/parse_clause.c:3115 #, c-format msgid "For example, ON CONFLICT (column_name)." msgstr "Par exemple, ON CONFLICT (nom_colonne)" -#: parser/parse_clause.c:3146 +#: parser/parse_clause.c:3126 #, c-format msgid "ON CONFLICT is not supported with system catalog tables" msgstr "ON CONFLICT n'est pas supporté avec les catalogues systèmes" -#: parser/parse_clause.c:3154 +#: parser/parse_clause.c:3134 #, c-format msgid "ON CONFLICT is not supported on table \"%s\" used as a catalog table" msgstr "ON CONFLICT n'est pas supporté sur la table « %s » utilisée comme une table catalogue" -#: parser/parse_clause.c:3280 +#: parser/parse_clause.c:3277 #, c-format msgid "operator %s is not a valid ordering operator" msgstr "l'opérateur %s n'est pas un opérateur de tri valide" -#: parser/parse_clause.c:3282 +#: parser/parse_clause.c:3279 #, c-format msgid "Ordering operators must be \"<\" or \">\" members of btree operator families." msgstr "" @@ -14547,7 +14587,7 @@ msgstr "la référence récursive à la requête « %s » ne doit pas apparaîtr msgid "DEFAULT is not allowed in this context" msgstr "DEFAULT interdit dans ce contexte" -#: parser/parse_expr.c:403 parser/parse_relation.c:3248 parser/parse_relation.c:3268 +#: parser/parse_expr.c:403 parser/parse_relation.c:3286 parser/parse_relation.c:3306 #, c-format msgid "column %s.%s does not exist" msgstr "la colonne %s.%s n'existe pas" @@ -15062,12 +15102,12 @@ msgstr "la référence à la table %u est ambigu" msgid "table name \"%s\" specified more than once" msgstr "le nom de la table « %s » est spécifié plus d'une fois" -#: parser/parse_relation.c:446 parser/parse_relation.c:3188 +#: parser/parse_relation.c:446 parser/parse_relation.c:3226 #, c-format msgid "invalid reference to FROM-clause entry for table \"%s\"" msgstr "référence invalide d'une entrée de la clause FROM pour la table « %s »" -#: parser/parse_relation.c:449 parser/parse_relation.c:3193 +#: parser/parse_relation.c:449 parser/parse_relation.c:3231 #, c-format msgid "There is an entry for table \"%s\", but it cannot be referenced from this part of the query." msgstr "" @@ -15084,87 +15124,87 @@ msgstr "Le type JOIN combiné doit être INNER ou LEFT pour une référence LATE msgid "system column \"%s\" reference in check constraint is invalid" msgstr "la référence de la colonne système « %s » dans la contrainte CHECK est invalide" -#: parser/parse_relation.c:1086 parser/parse_relation.c:1372 parser/parse_relation.c:1941 +#: parser/parse_relation.c:1086 parser/parse_relation.c:1366 parser/parse_relation.c:1935 #, c-format msgid "table \"%s\" has %d columns available but %d columns specified" msgstr "la table « %s » a %d colonnes disponibles mais %d colonnes spécifiées" -#: parser/parse_relation.c:1179 +#: parser/parse_relation.c:1173 #, c-format msgid "There is a WITH item named \"%s\", but it cannot be referenced from this part of the query." msgstr "" "Il existe un élément WITH nommé « %s » mais il ne peut pas être\n" "référencée de cette partie de la requête." -#: parser/parse_relation.c:1181 +#: parser/parse_relation.c:1175 #, c-format msgid "Use WITH RECURSIVE, or re-order the WITH items to remove forward references." msgstr "" "Utilisez WITH RECURSIVE ou ré-ordonnez les éléments WITH pour supprimer\n" "les références en avant." -#: parser/parse_relation.c:1492 +#: parser/parse_relation.c:1486 #, c-format msgid "a column definition list is only allowed for functions returning \"record\"" msgstr "" "une liste de définition de colonnes est uniquement autorisée pour les fonctions\n" "renvoyant un « record »" -#: parser/parse_relation.c:1501 +#: parser/parse_relation.c:1495 #, c-format msgid "a column definition list is required for functions returning \"record\"" msgstr "" "une liste de définition de colonnes est requise pour les fonctions renvoyant\n" "un « record »" -#: parser/parse_relation.c:1580 +#: parser/parse_relation.c:1574 #, c-format msgid "function \"%s\" in FROM has unsupported return type %s" msgstr "la fonction « %s » dans la clause FROM a un type de retour %s non supporté" -#: parser/parse_relation.c:1769 +#: parser/parse_relation.c:1763 #, c-format msgid "VALUES lists \"%s\" have %d columns available but %d columns specified" msgstr "" "les listes « %s » de VALUES ont %d colonnes disponibles mais %d colonnes\n" "spécifiées" -#: parser/parse_relation.c:1824 +#: parser/parse_relation.c:1818 #, c-format msgid "joins can have at most %d columns" msgstr "les jointures peuvent avoir au plus %d colonnes" -#: parser/parse_relation.c:1914 +#: parser/parse_relation.c:1908 #, c-format msgid "WITH query \"%s\" does not have a RETURNING clause" msgstr "La requête WITH « %s » n'a pas de clause RETURNING" -#: parser/parse_relation.c:2809 parser/parse_relation.c:2972 +#: parser/parse_relation.c:2843 parser/parse_relation.c:2881 parser/parse_relation.c:3010 #, c-format msgid "column %d of relation \"%s\" does not exist" msgstr "la colonne %d de la relation « %s » n'existe pas" -#: parser/parse_relation.c:3191 +#: parser/parse_relation.c:3229 #, c-format msgid "Perhaps you meant to reference the table alias \"%s\"." msgstr "Peut-être que vous souhaitiez référencer l'alias de la table « %s »." -#: parser/parse_relation.c:3199 +#: parser/parse_relation.c:3237 #, c-format msgid "missing FROM-clause entry for table \"%s\"" msgstr "entrée manquante de la clause FROM pour la table « %s »" -#: parser/parse_relation.c:3251 +#: parser/parse_relation.c:3289 #, c-format msgid "Perhaps you meant to reference the column \"%s.%s\"." msgstr "Peut-être que vous souhaitiez référencer la colonne « %s.%s »." -#: parser/parse_relation.c:3253 +#: parser/parse_relation.c:3291 #, c-format msgid "There is a column named \"%s\" in table \"%s\", but it cannot be referenced from this part of the query." msgstr "Il existe une colonne nommée « %s » pour la table « %s » mais elle ne peut pas être référencée dans cette partie de la requête." -#: parser/parse_relation.c:3270 +#: parser/parse_relation.c:3308 #, c-format msgid "Perhaps you meant to reference the column \"%s.%s\" or the column \"%s.%s\"." msgstr "Peut-être que vous souhaitiez référencer la colonne « %s.%s » ou la colonne « %s.%s »." @@ -15235,7 +15275,7 @@ msgstr "référence %%TYPE invalide (trop de points entre les noms) : %s" msgid "type reference %s converted to %s" msgstr "référence de type %s convertie en %s" -#: parser/parse_type.c:261 parser/parse_type.c:804 utils/cache/typcache.c:243 +#: parser/parse_type.c:261 parser/parse_type.c:804 utils/cache/typcache.c:245 #, c-format msgid "type \"%s\" is only a shell" msgstr "le type « %s » est seulement un shell" @@ -15255,290 +15295,310 @@ msgstr "les modificateurs de type doivent être des constantes ou des identifian msgid "invalid type name \"%s\"" msgstr "nom de type « %s » invalide" -#: parser/parse_utilcmd.c:265 +#: parser/parse_utilcmd.c:269 #, c-format msgid "cannot create partitioned table as inheritance child" msgstr "ne peut pas créer une table partitionnée comme la fille d'un héritage" -#: parser/parse_utilcmd.c:435 +#: parser/parse_utilcmd.c:439 #, c-format msgid "%s will create implicit sequence \"%s\" for serial column \"%s.%s\"" msgstr "%s créera des séquences implicites « %s » pour la colonne serial « %s.%s »" -#: parser/parse_utilcmd.c:550 +#: parser/parse_utilcmd.c:562 #, c-format msgid "array of serial is not implemented" msgstr "le tableau de type serial n'est pas implanté" -#: parser/parse_utilcmd.c:626 parser/parse_utilcmd.c:638 +#: parser/parse_utilcmd.c:638 parser/parse_utilcmd.c:650 #, c-format msgid "conflicting NULL/NOT NULL declarations for column \"%s\" of table \"%s\"" msgstr "déclarations NULL/NOT NULL en conflit pour la colonne « %s » de la table « %s »" -#: parser/parse_utilcmd.c:650 +#: parser/parse_utilcmd.c:662 #, c-format msgid "multiple default values specified for column \"%s\" of table \"%s\"" msgstr "" "plusieurs valeurs par défaut sont spécifiées pour la colonne « %s » de la table\n" "« %s »" -#: parser/parse_utilcmd.c:671 +#: parser/parse_utilcmd.c:679 +#, c-format +msgid "identity columns are not supported on typed tables" +msgstr "les colonnes d'identité uniques ne sont pas supportées sur les tables typées" + +#: parser/parse_utilcmd.c:683 +#, c-format +msgid "identity columns are not supported on partitions" +msgstr "les colonnes d'identité ne sont pas supportées sur les partitions" + +#: parser/parse_utilcmd.c:692 #, c-format msgid "multiple identity specifications for column \"%s\" of table \"%s\"" msgstr "plusieurs spécifications d'identité pour la colonne « %s » de la table « %s »" -#: parser/parse_utilcmd.c:694 parser/parse_utilcmd.c:811 +#: parser/parse_utilcmd.c:715 parser/parse_utilcmd.c:832 #, c-format msgid "primary key constraints are not supported on foreign tables" msgstr "les clés primaires ne sont pas supportées par les tables distantes" -#: parser/parse_utilcmd.c:700 parser/parse_utilcmd.c:817 +#: parser/parse_utilcmd.c:721 parser/parse_utilcmd.c:838 #, c-format msgid "primary key constraints are not supported on partitioned tables" msgstr "les clés primaires ne sont pas supportées sur les tables partitionnées" -#: parser/parse_utilcmd.c:709 parser/parse_utilcmd.c:827 +#: parser/parse_utilcmd.c:730 parser/parse_utilcmd.c:848 #, c-format msgid "unique constraints are not supported on foreign tables" msgstr "les contraintes uniques ne sont pas supportées par les tables distantes" -#: parser/parse_utilcmd.c:715 parser/parse_utilcmd.c:833 +#: parser/parse_utilcmd.c:736 parser/parse_utilcmd.c:854 #, c-format msgid "unique constraints are not supported on partitioned tables" msgstr "les contraintes uniques ne sont pas supportées sur les tables partitionnées" -#: parser/parse_utilcmd.c:732 parser/parse_utilcmd.c:863 +#: parser/parse_utilcmd.c:753 parser/parse_utilcmd.c:884 #, c-format msgid "foreign key constraints are not supported on foreign tables" msgstr "les clés étrangères ne sont pas supportées par les tables distantes" -#: parser/parse_utilcmd.c:738 parser/parse_utilcmd.c:869 +#: parser/parse_utilcmd.c:759 parser/parse_utilcmd.c:890 #, c-format msgid "foreign key constraints are not supported on partitioned tables" msgstr "les clés étrangères ne sont pas supportées sur les tables partitionnées" -#: parser/parse_utilcmd.c:766 +#: parser/parse_utilcmd.c:787 #, c-format msgid "both default and identity specified for column \"%s\" of table \"%s\"" msgstr "une valeur par défaut et une identité ont été spécifiées pour la colonne « %s » de la table « %s »" -#: parser/parse_utilcmd.c:843 +#: parser/parse_utilcmd.c:864 #, c-format msgid "exclusion constraints are not supported on foreign tables" msgstr "les contraintes d'exclusion ne sont pas supportées par les tables distantes" -#: parser/parse_utilcmd.c:849 +#: parser/parse_utilcmd.c:870 #, c-format msgid "exclusion constraints are not supported on partitioned tables" msgstr "les contraintes d'exclusion ne sont pas supportées sur les tables partitionnées" -#: parser/parse_utilcmd.c:919 +#: parser/parse_utilcmd.c:940 #, c-format msgid "LIKE is not supported for creating foreign tables" msgstr "LIKE n'est pas supporté pour la création de tables distantes" -#: parser/parse_utilcmd.c:1474 parser/parse_utilcmd.c:1550 +#: parser/parse_utilcmd.c:1495 parser/parse_utilcmd.c:1571 #, c-format msgid "Index \"%s\" contains a whole-row table reference." msgstr "l'index « %s » contient une référence de table de ligne complète" -#: parser/parse_utilcmd.c:1819 +#: parser/parse_utilcmd.c:1840 #, c-format msgid "cannot use an existing index in CREATE TABLE" msgstr "ne peut pas utiliser un index existant dans CREATE TABLE" -#: parser/parse_utilcmd.c:1839 +#: parser/parse_utilcmd.c:1860 #, c-format msgid "index \"%s\" is already associated with a constraint" msgstr "l'index « %s » est déjà associé à une contrainte" -#: parser/parse_utilcmd.c:1847 +#: parser/parse_utilcmd.c:1868 #, c-format msgid "index \"%s\" does not belong to table \"%s\"" msgstr "l'index « %s » n'appartient pas à la table « %s »" -#: parser/parse_utilcmd.c:1854 +#: parser/parse_utilcmd.c:1875 #, c-format msgid "index \"%s\" is not valid" msgstr "l'index « %s » n'est pas valide" -#: parser/parse_utilcmd.c:1860 +#: parser/parse_utilcmd.c:1881 #, c-format msgid "\"%s\" is not a unique index" msgstr "« %s » n'est pas un index unique" -#: parser/parse_utilcmd.c:1861 parser/parse_utilcmd.c:1868 parser/parse_utilcmd.c:1875 parser/parse_utilcmd.c:1945 +#: parser/parse_utilcmd.c:1882 parser/parse_utilcmd.c:1889 parser/parse_utilcmd.c:1896 parser/parse_utilcmd.c:1966 #, c-format msgid "Cannot create a primary key or unique constraint using such an index." msgstr "Ne peut pas créer une clé primaire ou une contrainte unique avec cet index." -#: parser/parse_utilcmd.c:1867 +#: parser/parse_utilcmd.c:1888 #, c-format msgid "index \"%s\" contains expressions" msgstr "l'index « %s » contient des expressions" -#: parser/parse_utilcmd.c:1874 +#: parser/parse_utilcmd.c:1895 #, c-format msgid "\"%s\" is a partial index" msgstr "« %s » est un index partiel" -#: parser/parse_utilcmd.c:1886 +#: parser/parse_utilcmd.c:1907 #, c-format msgid "\"%s\" is a deferrable index" msgstr "« %s » est un index déferrable" -#: parser/parse_utilcmd.c:1887 +#: parser/parse_utilcmd.c:1908 #, c-format msgid "Cannot create a non-deferrable constraint using a deferrable index." msgstr "Ne peut pas créer une contrainte non-déferrable utilisant un index déferrable." -#: parser/parse_utilcmd.c:1944 +#: parser/parse_utilcmd.c:1965 #, c-format msgid "index \"%s\" does not have default sorting behavior" msgstr "l'index « %s » n'a pas de comportement de tri par défaut" -#: parser/parse_utilcmd.c:2088 +#: parser/parse_utilcmd.c:2109 #, c-format msgid "column \"%s\" appears twice in primary key constraint" msgstr "la colonne « %s » apparaît deux fois dans la contrainte de la clé primaire" -#: parser/parse_utilcmd.c:2094 +#: parser/parse_utilcmd.c:2115 #, c-format msgid "column \"%s\" appears twice in unique constraint" msgstr "la colonne « %s » apparaît deux fois sur une contrainte unique" -#: parser/parse_utilcmd.c:2303 +#: parser/parse_utilcmd.c:2324 #, c-format msgid "index expressions and predicates can refer only to the table being indexed" msgstr "les expressions et prédicats d'index peuvent seulement faire référence à la table en cours d'indexage" -#: parser/parse_utilcmd.c:2349 +#: parser/parse_utilcmd.c:2370 #, c-format msgid "rules on materialized views are not supported" msgstr "les règles ne sont pas supportés sur les vues matérialisées" -#: parser/parse_utilcmd.c:2410 +#: parser/parse_utilcmd.c:2431 #, c-format msgid "rule WHERE condition cannot contain references to other relations" msgstr "" "la condition WHERE d'une règle ne devrait pas contenir de références à d'autres\n" "relations" -#: parser/parse_utilcmd.c:2482 +#: parser/parse_utilcmd.c:2503 #, c-format msgid "rules with WHERE conditions can only have SELECT, INSERT, UPDATE, or DELETE actions" msgstr "" "les règles avec des conditions WHERE ne peuvent contenir que des actions\n" "SELECT, INSERT, UPDATE ou DELETE " -#: parser/parse_utilcmd.c:2500 parser/parse_utilcmd.c:2599 rewrite/rewriteHandler.c:500 rewrite/rewriteManip.c:1015 +#: parser/parse_utilcmd.c:2521 parser/parse_utilcmd.c:2620 rewrite/rewriteHandler.c:498 rewrite/rewriteManip.c:1015 #, c-format msgid "conditional UNION/INTERSECT/EXCEPT statements are not implemented" msgstr "" "les instructions conditionnelles UNION/INTERSECT/EXCEPT ne sont pas\n" "implémentées" -#: parser/parse_utilcmd.c:2518 +#: parser/parse_utilcmd.c:2539 #, c-format msgid "ON SELECT rule cannot use OLD" msgstr "la règle ON SELECT ne peut pas utiliser OLD" -#: parser/parse_utilcmd.c:2522 +#: parser/parse_utilcmd.c:2543 #, c-format msgid "ON SELECT rule cannot use NEW" msgstr "la règle ON SELECT ne peut pas utiliser NEW" -#: parser/parse_utilcmd.c:2531 +#: parser/parse_utilcmd.c:2552 #, c-format msgid "ON INSERT rule cannot use OLD" msgstr "la règle ON INSERT ne peut pas utiliser OLD" -#: parser/parse_utilcmd.c:2537 +#: parser/parse_utilcmd.c:2558 #, c-format msgid "ON DELETE rule cannot use NEW" msgstr "la règle ON INSERT ne peut pas utiliser NEW" -#: parser/parse_utilcmd.c:2565 +#: parser/parse_utilcmd.c:2586 #, c-format msgid "cannot refer to OLD within WITH query" msgstr "ne peut référencer OLD dans une requête WITH" -#: parser/parse_utilcmd.c:2572 +#: parser/parse_utilcmd.c:2593 #, c-format msgid "cannot refer to NEW within WITH query" msgstr "ne peut référencer NEW dans une requête WITH" -#: parser/parse_utilcmd.c:3005 +#: parser/parse_utilcmd.c:3027 #, c-format msgid "misplaced DEFERRABLE clause" msgstr "clause DEFERRABLE mal placée" -#: parser/parse_utilcmd.c:3010 parser/parse_utilcmd.c:3025 +#: parser/parse_utilcmd.c:3032 parser/parse_utilcmd.c:3047 #, c-format msgid "multiple DEFERRABLE/NOT DEFERRABLE clauses not allowed" msgstr "clauses DEFERRABLE/NOT DEFERRABLE multiples non autorisées" -#: parser/parse_utilcmd.c:3020 +#: parser/parse_utilcmd.c:3042 #, c-format msgid "misplaced NOT DEFERRABLE clause" msgstr "clause NOT DEFERRABLE mal placée" -#: parser/parse_utilcmd.c:3041 +#: parser/parse_utilcmd.c:3063 #, c-format msgid "misplaced INITIALLY DEFERRED clause" msgstr "clause INITIALLY DEFERRED mal placée" -#: parser/parse_utilcmd.c:3046 parser/parse_utilcmd.c:3072 +#: parser/parse_utilcmd.c:3068 parser/parse_utilcmd.c:3094 #, c-format msgid "multiple INITIALLY IMMEDIATE/DEFERRED clauses not allowed" msgstr "clauses INITIALLY IMMEDIATE/DEFERRED multiples non autorisées" -#: parser/parse_utilcmd.c:3067 +#: parser/parse_utilcmd.c:3089 #, c-format msgid "misplaced INITIALLY IMMEDIATE clause" msgstr "clause INITIALLY IMMEDIATE mal placée" -#: parser/parse_utilcmd.c:3258 +#: parser/parse_utilcmd.c:3280 #, c-format msgid "CREATE specifies a schema (%s) different from the one being created (%s)" msgstr "CREATE spécifie un schéma (%s) différent de celui tout juste créé (%s)" -#: parser/parse_utilcmd.c:3317 +#: parser/parse_utilcmd.c:3339 #, c-format msgid "invalid bound specification for a list partition" msgstr "spécification de limite invalide pour une partition par liste" -#: parser/parse_utilcmd.c:3373 +#: parser/parse_utilcmd.c:3395 #, c-format msgid "invalid bound specification for a range partition" msgstr "spécification de limite invalide pour une partition par intervalle" -#: parser/parse_utilcmd.c:3379 +#: parser/parse_utilcmd.c:3401 #, c-format msgid "FROM must specify exactly one value per partitioning column" msgstr "FROM doit spécifier exactement une valeur par colonne de partitionnement" -#: parser/parse_utilcmd.c:3383 +#: parser/parse_utilcmd.c:3405 #, c-format msgid "TO must specify exactly one value per partitioning column" msgstr "TO doit spécifier exactement une valeur par colonne de partitionnement" -#: parser/parse_utilcmd.c:3423 parser/parse_utilcmd.c:3437 +#: parser/parse_utilcmd.c:3452 parser/parse_utilcmd.c:3466 #, c-format msgid "cannot specify NULL in range bound" msgstr "ne peut pas spécifier NULL dans la limite de l'intervalle" -#: parser/parse_utilcmd.c:3480 parser/parse_utilcmd.c:3492 +#: parser/parse_utilcmd.c:3513 +#, c-format +msgid "every bound following MAXVALUE must also be MAXVALUE" +msgstr "chaque limite suivant MAXVALUE doit aussi être MAXVALUE" + +#: parser/parse_utilcmd.c:3519 +#, c-format +msgid "every bound following MINVALUE must also be MINVALUE" +msgstr "chaque limite suivant MINVALUE doit aussi être MINVALUE" + +#: parser/parse_utilcmd.c:3549 parser/parse_utilcmd.c:3561 #, c-format msgid "specified value cannot be cast to type %s for column \"%s\"" msgstr "la valeur spécifiée ne peut pas être convertie vers le type %s pour la colonne « %s »" -#: parser/parse_utilcmd.c:3494 +#: parser/parse_utilcmd.c:3563 #, c-format msgid "The cast requires a non-immutable conversion." msgstr "Cette conversion requiert une conversion non immutable." -#: parser/parse_utilcmd.c:3495 +#: parser/parse_utilcmd.c:3564 #, c-format msgid "Try putting the literal value in single quotes." msgstr "Placer la valeur littérale en guillemets simples." @@ -15737,57 +15797,57 @@ msgstr "L'appel système qui a échoué était DuplicateHandle." msgid "Failed system call was MapViewOfFileEx." msgstr "L'appel système qui a échoué était MapViewOfFileEx." -#: postmaster/autovacuum.c:416 +#: postmaster/autovacuum.c:405 #, c-format msgid "could not fork autovacuum launcher process: %m" msgstr "n'a pas pu exécuter le processus autovacuum maître : %m" -#: postmaster/autovacuum.c:452 +#: postmaster/autovacuum.c:441 #, c-format msgid "autovacuum launcher started" msgstr "lancement du processus autovacuum" -#: postmaster/autovacuum.c:839 +#: postmaster/autovacuum.c:825 #, c-format msgid "autovacuum launcher shutting down" msgstr "arrêt du processus autovacuum" -#: postmaster/autovacuum.c:1501 +#: postmaster/autovacuum.c:1487 #, c-format msgid "could not fork autovacuum worker process: %m" msgstr "n'a pas pu exécuter le processus autovacuum worker : %m" -#: postmaster/autovacuum.c:1707 +#: postmaster/autovacuum.c:1685 #, c-format msgid "autovacuum: processing database \"%s\"" msgstr "autovacuum : traitement de la base de données « %s »" -#: postmaster/autovacuum.c:2281 +#: postmaster/autovacuum.c:2260 #, c-format msgid "autovacuum: dropping orphan temp table \"%s.%s.%s\"" msgstr "autovacuum : suppression de la table temporaire orpheline « %s.%s.%s »" -#: postmaster/autovacuum.c:2487 +#: postmaster/autovacuum.c:2468 #, c-format msgid "automatic vacuum of table \"%s.%s.%s\"" msgstr "VACUUM automatique de la table « %s.%s.%s »" -#: postmaster/autovacuum.c:2490 +#: postmaster/autovacuum.c:2471 #, c-format msgid "automatic analyze of table \"%s.%s.%s\"" msgstr "ANALYZE automatique de la table « %s.%s.%s »" -#: postmaster/autovacuum.c:2701 +#: postmaster/autovacuum.c:2664 #, c-format msgid "processing work entry for relation \"%s.%s.%s\"" msgstr "traitement de l'enregistrement de travail pour la relation « %s.%s.%s »" -#: postmaster/autovacuum.c:3345 +#: postmaster/autovacuum.c:3239 #, c-format msgid "autovacuum not started because of misconfiguration" msgstr "autovacuum non exécuté à cause d'une mauvaise configuration" -#: postmaster/autovacuum.c:3346 +#: postmaster/autovacuum.c:3240 #, c-format msgid "Enable the \"track_counts\" option." msgstr "Activez l'option « track_counts »." @@ -15917,7 +15977,7 @@ msgstr "La commande d'archivage qui a échoué était : %s" msgid "archive command was terminated by exception 0x%X" msgstr "la commande d'archivage a été terminée par l'exception 0x%X" -#: postmaster/pgarch.c:598 postmaster/postmaster.c:3567 +#: postmaster/pgarch.c:598 postmaster/postmaster.c:3615 #, c-format msgid "See C include file \"ntstatus.h\" for a description of the hexadecimal value." msgstr "" @@ -16038,219 +16098,219 @@ msgstr "cible reset non reconnu : « %s »" msgid "Target must be \"archiver\" or \"bgwriter\"." msgstr "La cible doit être « archiver » ou « bgwriter »." -#: postmaster/pgstat.c:4287 +#: postmaster/pgstat.c:4296 #, c-format msgid "could not read statistics message: %m" msgstr "n'a pas pu lire le message des statistiques : %m" -#: postmaster/pgstat.c:4619 postmaster/pgstat.c:4776 +#: postmaster/pgstat.c:4628 postmaster/pgstat.c:4785 #, c-format msgid "could not open temporary statistics file \"%s\": %m" msgstr "n'a pas pu ouvrir le fichier temporaire des statistiques « %s » : %m" -#: postmaster/pgstat.c:4686 postmaster/pgstat.c:4821 +#: postmaster/pgstat.c:4695 postmaster/pgstat.c:4830 #, c-format msgid "could not write temporary statistics file \"%s\": %m" msgstr "n'a pas pu écrire le fichier temporaire des statistiques « %s » : %m" -#: postmaster/pgstat.c:4695 postmaster/pgstat.c:4830 +#: postmaster/pgstat.c:4704 postmaster/pgstat.c:4839 #, c-format msgid "could not close temporary statistics file \"%s\": %m" msgstr "n'a pas pu fermer le fichier temporaire des statistiques « %s » : %m" -#: postmaster/pgstat.c:4703 postmaster/pgstat.c:4838 +#: postmaster/pgstat.c:4712 postmaster/pgstat.c:4847 #, c-format msgid "could not rename temporary statistics file \"%s\" to \"%s\": %m" msgstr "" "n'a pas pu renommer le fichier temporaire des statistiques « %s » en\n" "« %s » : %m" -#: postmaster/pgstat.c:4927 postmaster/pgstat.c:5133 postmaster/pgstat.c:5286 +#: postmaster/pgstat.c:4936 postmaster/pgstat.c:5142 postmaster/pgstat.c:5295 #, c-format msgid "could not open statistics file \"%s\": %m" msgstr "n'a pas pu ouvrir le fichier de statistiques « %s » : %m" -#: postmaster/pgstat.c:4939 postmaster/pgstat.c:4949 postmaster/pgstat.c:4970 postmaster/pgstat.c:4992 postmaster/pgstat.c:5007 postmaster/pgstat.c:5070 postmaster/pgstat.c:5145 postmaster/pgstat.c:5165 postmaster/pgstat.c:5183 postmaster/pgstat.c:5199 postmaster/pgstat.c:5217 postmaster/pgstat.c:5233 postmaster/pgstat.c:5298 postmaster/pgstat.c:5310 postmaster/pgstat.c:5322 postmaster/pgstat.c:5347 postmaster/pgstat.c:5369 +#: postmaster/pgstat.c:4948 postmaster/pgstat.c:4958 postmaster/pgstat.c:4979 postmaster/pgstat.c:5001 postmaster/pgstat.c:5016 postmaster/pgstat.c:5079 postmaster/pgstat.c:5154 postmaster/pgstat.c:5174 postmaster/pgstat.c:5192 postmaster/pgstat.c:5208 postmaster/pgstat.c:5226 postmaster/pgstat.c:5242 postmaster/pgstat.c:5307 postmaster/pgstat.c:5319 postmaster/pgstat.c:5331 postmaster/pgstat.c:5356 postmaster/pgstat.c:5378 #, c-format msgid "corrupted statistics file \"%s\"" msgstr "fichier de statistiques « %s » corrompu" -#: postmaster/pgstat.c:5498 +#: postmaster/pgstat.c:5507 #, c-format msgid "using stale statistics instead of current ones because stats collector is not responding" msgstr "" "utilise de vieilles statistiques à la place des actuelles car le collecteur de\n" "statistiques ne répond pas" -#: postmaster/pgstat.c:5825 +#: postmaster/pgstat.c:5834 #, c-format msgid "database hash table corrupted during cleanup --- abort" msgstr "" "corruption de la table hachée de la base de données lors du lancement\n" "--- annulation" -#: postmaster/postmaster.c:710 +#: postmaster/postmaster.c:712 #, c-format msgid "%s: invalid argument for option -f: \"%s\"\n" msgstr "%s : argument invalide pour l'option -f : « %s »\n" -#: postmaster/postmaster.c:796 +#: postmaster/postmaster.c:798 #, c-format msgid "%s: invalid argument for option -t: \"%s\"\n" msgstr "%s : argument invalide pour l'option -t : « %s »\n" -#: postmaster/postmaster.c:847 +#: postmaster/postmaster.c:849 #, c-format msgid "%s: invalid argument: \"%s\"\n" msgstr "%s : argument invalide : « %s »\n" -#: postmaster/postmaster.c:886 +#: postmaster/postmaster.c:888 #, c-format msgid "%s: superuser_reserved_connections must be less than max_connections\n" msgstr "%s : superuser_reserved_connections doit être inférieur à max_connections\n" -#: postmaster/postmaster.c:891 +#: postmaster/postmaster.c:893 #, c-format msgid "%s: max_wal_senders must be less than max_connections\n" msgstr "%s : max_wal_senders doit être inférieur à max_connections\n" -#: postmaster/postmaster.c:896 +#: postmaster/postmaster.c:898 #, c-format msgid "WAL archival cannot be enabled when wal_level is \"minimal\"" msgstr "L'archivage des journaux de transactions ne peut pas être activé quand wal_level vaut « minimal »" -#: postmaster/postmaster.c:899 +#: postmaster/postmaster.c:901 #, c-format msgid "WAL streaming (max_wal_senders > 0) requires wal_level \"replica\" or \"logical\"" msgstr "" "l'envoi d'un flux de transactions (max_wal_senders > 0) nécessite que\n" "le paramètre wal_level soit initialisé avec « replica » ou « logical »" -#: postmaster/postmaster.c:907 +#: postmaster/postmaster.c:909 #, c-format msgid "%s: invalid datetoken tables, please fix\n" msgstr "%s : tables datetoken invalide, merci de corriger\n" -#: postmaster/postmaster.c:1010 postmaster/postmaster.c:1108 utils/init/miscinit.c:1455 +#: postmaster/postmaster.c:1012 postmaster/postmaster.c:1110 utils/init/miscinit.c:1455 #, c-format msgid "invalid list syntax in parameter \"%s\"" msgstr "syntaxe de liste invalide pour le paramètre « %s »" -#: postmaster/postmaster.c:1041 +#: postmaster/postmaster.c:1043 #, c-format msgid "could not create listen socket for \"%s\"" msgstr "n'a pas pu créer le socket d'écoute pour « %s »" -#: postmaster/postmaster.c:1047 +#: postmaster/postmaster.c:1049 #, c-format msgid "could not create any TCP/IP sockets" msgstr "n'a pas pu créer de socket TCP/IP" -#: postmaster/postmaster.c:1130 +#: postmaster/postmaster.c:1132 #, c-format msgid "could not create Unix-domain socket in directory \"%s\"" msgstr "n'a pas pu créer la socket de domaine Unix dans le répertoire « %s »" -#: postmaster/postmaster.c:1136 +#: postmaster/postmaster.c:1138 #, c-format msgid "could not create any Unix-domain sockets" msgstr "n'a pas pu créer les sockets de domaine Unix" -#: postmaster/postmaster.c:1148 +#: postmaster/postmaster.c:1150 #, c-format msgid "no socket created for listening" msgstr "pas de socket créé pour l'écoute" -#: postmaster/postmaster.c:1188 +#: postmaster/postmaster.c:1190 #, c-format msgid "could not create I/O completion port for child queue" msgstr "n'a pas pu créer un port de terminaison I/O pour la queue" -#: postmaster/postmaster.c:1217 +#: postmaster/postmaster.c:1219 #, c-format msgid "%s: could not change permissions of external PID file \"%s\": %s\n" msgstr "%s : n'a pas pu modifier les droits du fichier PID externe « %s » : %s\n" -#: postmaster/postmaster.c:1221 +#: postmaster/postmaster.c:1223 #, c-format msgid "%s: could not write external PID file \"%s\": %s\n" msgstr "%s : n'a pas pu écrire le fichier PID externe « %s » : %s\n" -#: postmaster/postmaster.c:1278 +#: postmaster/postmaster.c:1280 #, c-format msgid "ending log output to stderr" msgstr "arrêt des traces sur stderr" -#: postmaster/postmaster.c:1279 +#: postmaster/postmaster.c:1281 #, c-format msgid "Future log output will go to log destination \"%s\"." msgstr "Les traces suivantes iront sur « %s »." -#: postmaster/postmaster.c:1305 utils/init/postinit.c:213 +#: postmaster/postmaster.c:1307 utils/init/postinit.c:213 #, c-format msgid "could not load pg_hba.conf" msgstr "n'a pas pu charger pg_hba.conf" -#: postmaster/postmaster.c:1331 +#: postmaster/postmaster.c:1333 #, c-format msgid "postmaster became multithreaded during startup" msgstr "le postmaster est devenu multithreadé lors du démarrage" -#: postmaster/postmaster.c:1332 +#: postmaster/postmaster.c:1334 #, c-format msgid "Set the LC_ALL environment variable to a valid locale." msgstr "Configurez la variable d'environnement LC_ALL avec une locale valide." -#: postmaster/postmaster.c:1437 +#: postmaster/postmaster.c:1439 #, c-format msgid "%s: could not locate matching postgres executable" msgstr "%s : n'a pas pu localiser l'exécutable postgres correspondant" -#: postmaster/postmaster.c:1460 utils/misc/tzparser.c:341 +#: postmaster/postmaster.c:1462 utils/misc/tzparser.c:341 #, c-format msgid "This may indicate an incomplete PostgreSQL installation, or that the file \"%s\" has been moved away from its proper location." msgstr "Ceci peut indiquer une installation PostgreSQL incomplète, ou que le fichier « %s » a été déplacé." -#: postmaster/postmaster.c:1488 +#: postmaster/postmaster.c:1490 #, c-format msgid "data directory \"%s\" does not exist" msgstr "le répertoire des données « %s » n'existe pas" -#: postmaster/postmaster.c:1493 +#: postmaster/postmaster.c:1495 #, c-format msgid "could not read permissions of directory \"%s\": %m" msgstr "n'a pas pu lire les droits du répertoire « %s » : %m" -#: postmaster/postmaster.c:1501 +#: postmaster/postmaster.c:1503 #, c-format msgid "specified data directory \"%s\" is not a directory" msgstr "le répertoire des données « %s » n'est pas un répertoire" -#: postmaster/postmaster.c:1517 +#: postmaster/postmaster.c:1519 #, c-format msgid "data directory \"%s\" has wrong ownership" msgstr "le répertoire des données « %s » a un mauvais propriétaire" -#: postmaster/postmaster.c:1519 +#: postmaster/postmaster.c:1521 #, c-format msgid "The server must be started by the user that owns the data directory." msgstr "" "Le serveur doit être en cours d'exécution par l'utilisateur qui possède le\n" "répertoire des données." -#: postmaster/postmaster.c:1539 +#: postmaster/postmaster.c:1541 #, c-format msgid "data directory \"%s\" has group or world access" msgstr "" "le répertoire des données « %s » est accessible par le groupe et/ou par les\n" "autres" -#: postmaster/postmaster.c:1541 +#: postmaster/postmaster.c:1543 #, c-format msgid "Permissions should be u=rwx (0700)." msgstr "Les droits devraient être u=rwx (0700)." -#: postmaster/postmaster.c:1552 +#: postmaster/postmaster.c:1554 #, c-format msgid "" "%s: could not find the database system\n" @@ -16261,39 +16321,39 @@ msgstr "" "S'attendait à le trouver dans le répertoire « %s »,\n" "mais n'a pas réussi à ouvrir le fichier « %s »: %s\n" -#: postmaster/postmaster.c:1729 +#: postmaster/postmaster.c:1731 #, c-format msgid "select() failed in postmaster: %m" msgstr "échec de select() dans postmaster : %m" -#: postmaster/postmaster.c:1884 +#: postmaster/postmaster.c:1886 #, c-format msgid "performing immediate shutdown because data directory lock file is invalid" msgstr "forçage d'un arrêt immédiat car le fichier de verrou du répertoire de données est invalide" -#: postmaster/postmaster.c:1962 postmaster/postmaster.c:1993 +#: postmaster/postmaster.c:1964 postmaster/postmaster.c:1995 #, c-format msgid "incomplete startup packet" msgstr "paquet de démarrage incomplet" -#: postmaster/postmaster.c:1974 +#: postmaster/postmaster.c:1976 #, c-format msgid "invalid length of startup packet" msgstr "longueur invalide du paquet de démarrage" -#: postmaster/postmaster.c:2032 +#: postmaster/postmaster.c:2034 #, c-format msgid "failed to send SSL negotiation response: %m" msgstr "échec lors de l'envoi de la réponse de négotiation SSL : %m" -#: postmaster/postmaster.c:2061 +#: postmaster/postmaster.c:2060 #, c-format msgid "unsupported frontend protocol %u.%u: server supports %u.0 to %u.%u" msgstr "" "Protocole non supportée de l'interface %u.%u : le serveur supporte de %u.0 à\n" "%u.%u" -#: postmaster/postmaster.c:2124 utils/misc/guc.c:5770 utils/misc/guc.c:5863 utils/misc/guc.c:7164 utils/misc/guc.c:9918 utils/misc/guc.c:9952 +#: postmaster/postmaster.c:2124 utils/misc/guc.c:5770 utils/misc/guc.c:5863 utils/misc/guc.c:7164 utils/misc/guc.c:9911 utils/misc/guc.c:9945 #, c-format msgid "invalid value for parameter \"%s\": \"%s\"" msgstr "valeur invalide pour le paramètre « %s » : « %s »" @@ -16303,364 +16363,364 @@ msgstr "valeur invalide pour le paramètre « %s » : « %s »" msgid "Valid values are: \"false\", 0, \"true\", 1, \"database\"." msgstr "Les valeurs valides sont : « false », « 0 », « true », « 1 », « database »." -#: postmaster/postmaster.c:2147 +#: postmaster/postmaster.c:2157 #, c-format msgid "invalid startup packet layout: expected terminator as last byte" msgstr "" "configuration invalide du paquet de démarrage : terminaison attendue comme\n" "dernier octet" -#: postmaster/postmaster.c:2175 +#: postmaster/postmaster.c:2195 #, c-format msgid "no PostgreSQL user name specified in startup packet" msgstr "aucun nom d'utilisateur PostgreSQL n'a été spécifié dans le paquet de démarrage" -#: postmaster/postmaster.c:2234 +#: postmaster/postmaster.c:2254 #, c-format msgid "the database system is starting up" msgstr "le système de bases de données se lance" -#: postmaster/postmaster.c:2239 +#: postmaster/postmaster.c:2259 #, c-format msgid "the database system is shutting down" msgstr "le système de base de données s'arrête" -#: postmaster/postmaster.c:2244 +#: postmaster/postmaster.c:2264 #, c-format msgid "the database system is in recovery mode" msgstr "le système de bases de données est en cours de restauration" -#: postmaster/postmaster.c:2249 storage/ipc/procarray.c:291 storage/ipc/sinvaladt.c:298 storage/lmgr/proc.c:338 +#: postmaster/postmaster.c:2269 storage/ipc/procarray.c:292 storage/ipc/sinvaladt.c:298 storage/lmgr/proc.c:338 #, c-format msgid "sorry, too many clients already" msgstr "désolé, trop de clients sont déjà connectés" -#: postmaster/postmaster.c:2311 +#: postmaster/postmaster.c:2359 #, c-format msgid "wrong key in cancel request for process %d" msgstr "mauvaise clé dans la demande d'annulation pour le processus %d" -#: postmaster/postmaster.c:2319 +#: postmaster/postmaster.c:2367 #, c-format msgid "PID %d in cancel request did not match any process" msgstr "le PID %d dans la demande d'annulation ne correspond à aucun processus" -#: postmaster/postmaster.c:2530 +#: postmaster/postmaster.c:2578 #, c-format msgid "received SIGHUP, reloading configuration files" msgstr "a reçu SIGHUP, rechargement des fichiers de configuration" -#: postmaster/postmaster.c:2555 +#: postmaster/postmaster.c:2603 #, c-format msgid "pg_hba.conf was not reloaded" msgstr "pg_hba.conf n'a pas été rechargé" -#: postmaster/postmaster.c:2559 +#: postmaster/postmaster.c:2607 #, c-format msgid "pg_ident.conf was not reloaded" msgstr "pg_ident.conf n'a pas été rechargé" -#: postmaster/postmaster.c:2569 +#: postmaster/postmaster.c:2617 #, c-format msgid "SSL configuration was not reloaded" msgstr "la configuration SSL n'a pas été rechargée" -#: postmaster/postmaster.c:2617 +#: postmaster/postmaster.c:2665 #, c-format msgid "received smart shutdown request" msgstr "a reçu une demande d'arrêt intelligent" -#: postmaster/postmaster.c:2675 +#: postmaster/postmaster.c:2723 #, c-format msgid "received fast shutdown request" msgstr "a reçu une demande d'arrêt rapide" -#: postmaster/postmaster.c:2708 +#: postmaster/postmaster.c:2756 #, c-format msgid "aborting any active transactions" msgstr "annulation des transactions actives" -#: postmaster/postmaster.c:2742 +#: postmaster/postmaster.c:2790 #, c-format msgid "received immediate shutdown request" msgstr "a reçu une demande d'arrêt immédiat" -#: postmaster/postmaster.c:2809 +#: postmaster/postmaster.c:2857 #, c-format msgid "shutdown at recovery target" msgstr "arrêt sur la cible de restauration" -#: postmaster/postmaster.c:2825 postmaster/postmaster.c:2848 +#: postmaster/postmaster.c:2873 postmaster/postmaster.c:2896 msgid "startup process" msgstr "processus de lancement" -#: postmaster/postmaster.c:2828 +#: postmaster/postmaster.c:2876 #, c-format msgid "aborting startup due to startup process failure" msgstr "annulation du démarrage à cause d'un échec dans le processus de lancement" -#: postmaster/postmaster.c:2889 +#: postmaster/postmaster.c:2937 #, c-format msgid "database system is ready to accept connections" msgstr "le système de bases de données est prêt pour accepter les connexions" -#: postmaster/postmaster.c:2910 +#: postmaster/postmaster.c:2958 msgid "background writer process" msgstr "processus d'écriture en tâche de fond" -#: postmaster/postmaster.c:2964 +#: postmaster/postmaster.c:3012 msgid "checkpointer process" msgstr "processus checkpointer" -#: postmaster/postmaster.c:2980 +#: postmaster/postmaster.c:3028 msgid "WAL writer process" msgstr "processus d'écriture des journaux de transaction" -#: postmaster/postmaster.c:2995 +#: postmaster/postmaster.c:3043 msgid "WAL receiver process" msgstr "processus de réception des journaux de transaction" -#: postmaster/postmaster.c:3010 +#: postmaster/postmaster.c:3058 msgid "autovacuum launcher process" msgstr "processus de l'autovacuum" -#: postmaster/postmaster.c:3025 +#: postmaster/postmaster.c:3073 msgid "archiver process" msgstr "processus d'archivage" -#: postmaster/postmaster.c:3041 +#: postmaster/postmaster.c:3089 msgid "statistics collector process" msgstr "processus de récupération des statistiques" -#: postmaster/postmaster.c:3055 +#: postmaster/postmaster.c:3103 msgid "system logger process" msgstr "processus des journaux applicatifs" -#: postmaster/postmaster.c:3117 +#: postmaster/postmaster.c:3165 msgid "worker process" msgstr "processus de travail" -#: postmaster/postmaster.c:3200 postmaster/postmaster.c:3220 postmaster/postmaster.c:3227 postmaster/postmaster.c:3245 +#: postmaster/postmaster.c:3248 postmaster/postmaster.c:3268 postmaster/postmaster.c:3275 postmaster/postmaster.c:3293 msgid "server process" msgstr "processus serveur" -#: postmaster/postmaster.c:3299 +#: postmaster/postmaster.c:3347 #, c-format msgid "terminating any other active server processes" msgstr "arrêt des autres processus serveur actifs" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3555 +#: postmaster/postmaster.c:3603 #, c-format msgid "%s (PID %d) exited with exit code %d" msgstr "%s (PID %d) quitte avec le code de sortie %d" -#: postmaster/postmaster.c:3557 postmaster/postmaster.c:3568 postmaster/postmaster.c:3579 postmaster/postmaster.c:3588 postmaster/postmaster.c:3598 +#: postmaster/postmaster.c:3605 postmaster/postmaster.c:3616 postmaster/postmaster.c:3627 postmaster/postmaster.c:3636 postmaster/postmaster.c:3646 #, c-format msgid "Failed process was running: %s" msgstr "Le processus qui a échoué exécutait : %s" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3565 +#: postmaster/postmaster.c:3613 #, c-format msgid "%s (PID %d) was terminated by exception 0x%X" msgstr "%s (PID %d) a été arrêté par l'exception 0x%X" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3575 +#: postmaster/postmaster.c:3623 #, c-format msgid "%s (PID %d) was terminated by signal %d: %s" msgstr "%s (PID %d) a été arrêté par le signal %d : %s" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3586 +#: postmaster/postmaster.c:3634 #, c-format msgid "%s (PID %d) was terminated by signal %d" msgstr "%s (PID %d) a été arrêté par le signal %d" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3596 +#: postmaster/postmaster.c:3644 #, c-format msgid "%s (PID %d) exited with unrecognized status %d" msgstr "%s (PID %d) a quitté avec le statut inattendu %d" -#: postmaster/postmaster.c:3783 +#: postmaster/postmaster.c:3831 #, c-format msgid "abnormal database system shutdown" msgstr "le système de base de données a été arrêté anormalement" -#: postmaster/postmaster.c:3823 +#: postmaster/postmaster.c:3871 #, c-format msgid "all server processes terminated; reinitializing" msgstr "tous les processus serveur se sont arrêtés, réinitialisation" -#: postmaster/postmaster.c:3989 postmaster/postmaster.c:5400 postmaster/postmaster.c:5764 +#: postmaster/postmaster.c:4037 postmaster/postmaster.c:5448 postmaster/postmaster.c:5812 #, c-format msgid "could not generate random cancel key" msgstr "n'a pas pu générer la clé d'annulation aléatoire" -#: postmaster/postmaster.c:4043 +#: postmaster/postmaster.c:4091 #, c-format msgid "could not fork new process for connection: %m" msgstr "n'a pas pu lancer le nouveau processus fils pour la connexion : %m" -#: postmaster/postmaster.c:4085 +#: postmaster/postmaster.c:4133 msgid "could not fork new process for connection: " msgstr "n'a pas pu lancer le nouveau processus fils pour la connexion : " -#: postmaster/postmaster.c:4199 +#: postmaster/postmaster.c:4247 #, c-format msgid "connection received: host=%s port=%s" msgstr "connexion reçue : hôte=%s port=%s" -#: postmaster/postmaster.c:4204 +#: postmaster/postmaster.c:4252 #, c-format msgid "connection received: host=%s" msgstr "connexion reçue : hôte=%s" -#: postmaster/postmaster.c:4489 +#: postmaster/postmaster.c:4537 #, c-format msgid "could not execute server process \"%s\": %m" msgstr "n'a pas pu exécuter le processus serveur « %s » : %m" -#: postmaster/postmaster.c:4642 +#: postmaster/postmaster.c:4690 #, c-format msgid "giving up after too many tries to reserve shared memory" msgstr "abandon après trop de tentatives pour réserver la mémoire partagée" -#: postmaster/postmaster.c:4643 +#: postmaster/postmaster.c:4691 #, c-format msgid "This might be caused by ASLR or antivirus software." msgstr "Ceci pourrait être causé par un logiciel ASLR ou un antivirus." -#: postmaster/postmaster.c:4840 +#: postmaster/postmaster.c:4888 #, c-format msgid "SSL configuration could not be loaded in child process" msgstr "la configuration SSL n'a pas pu être chargé sur le processus fils" -#: postmaster/postmaster.c:4972 +#: postmaster/postmaster.c:5020 #, c-format msgid "Please report this to ." msgstr "Veuillez rapporter ceci à ." -#: postmaster/postmaster.c:5059 +#: postmaster/postmaster.c:5107 #, c-format msgid "database system is ready to accept read only connections" msgstr "le système de bases de données est prêt pour accepter les connexions en lecture seule" -#: postmaster/postmaster.c:5328 +#: postmaster/postmaster.c:5376 #, c-format msgid "could not fork startup process: %m" msgstr "n'a pas pu lancer le processus fils de démarrage : %m" -#: postmaster/postmaster.c:5332 +#: postmaster/postmaster.c:5380 #, c-format msgid "could not fork background writer process: %m" msgstr "" "n'a pas pu créer un processus fils du processus d'écriture en tâche de\n" "fond : %m" -#: postmaster/postmaster.c:5336 +#: postmaster/postmaster.c:5384 #, c-format msgid "could not fork checkpointer process: %m" msgstr "n'a pas pu créer le processus checkpointer : %m" -#: postmaster/postmaster.c:5340 +#: postmaster/postmaster.c:5388 #, c-format msgid "could not fork WAL writer process: %m" msgstr "" "n'a pas pu créer un processus fils du processus d'écriture des journaux de\n" "transaction : %m" -#: postmaster/postmaster.c:5344 +#: postmaster/postmaster.c:5392 #, c-format msgid "could not fork WAL receiver process: %m" msgstr "" "n'a pas pu créer un processus fils de réception des journaux de\n" "transactions : %m" -#: postmaster/postmaster.c:5348 +#: postmaster/postmaster.c:5396 #, c-format msgid "could not fork process: %m" msgstr "n'a pas pu lancer le processus fils : %m" -#: postmaster/postmaster.c:5535 postmaster/postmaster.c:5558 +#: postmaster/postmaster.c:5583 postmaster/postmaster.c:5606 #, c-format msgid "database connection requirement not indicated during registration" msgstr "pré-requis de la connexion à la base non indiqué lors de l'enregistrement" -#: postmaster/postmaster.c:5542 postmaster/postmaster.c:5565 +#: postmaster/postmaster.c:5590 postmaster/postmaster.c:5613 #, c-format msgid "invalid processing mode in background worker" msgstr "mode de traitement invalide dans le processus en tâche de fond" -#: postmaster/postmaster.c:5637 +#: postmaster/postmaster.c:5685 #, c-format msgid "starting background worker process \"%s\"" msgstr "démarrage du processus d'écriture en tâche de fond « %s »" -#: postmaster/postmaster.c:5649 +#: postmaster/postmaster.c:5697 #, c-format msgid "could not fork worker process: %m" msgstr "n'a pas pu créer un processus fils du processus en tâche de fond : %m" -#: postmaster/postmaster.c:6073 +#: postmaster/postmaster.c:6130 #, c-format msgid "could not duplicate socket %d for use in backend: error code %d" msgstr "n'a pas pu dupliquer la socket %d pour le serveur : code d'erreur %d" -#: postmaster/postmaster.c:6105 +#: postmaster/postmaster.c:6162 #, c-format msgid "could not create inherited socket: error code %d\n" msgstr "n'a pas pu créer la socket héritée : code d'erreur %d\n" -#: postmaster/postmaster.c:6134 +#: postmaster/postmaster.c:6191 #, c-format msgid "could not open backend variables file \"%s\": %s\n" msgstr "n'a pas pu ouvrir le fichier des variables moteurs « %s » : %s\n" -#: postmaster/postmaster.c:6141 +#: postmaster/postmaster.c:6198 #, c-format msgid "could not read from backend variables file \"%s\": %s\n" msgstr "n'a pas pu lire le fichier de configuration serveur « %s » : %s\n" -#: postmaster/postmaster.c:6150 +#: postmaster/postmaster.c:6207 #, c-format msgid "could not remove file \"%s\": %s\n" msgstr "n'a pas pu supprimer le fichier « %s » : %s\n" -#: postmaster/postmaster.c:6167 +#: postmaster/postmaster.c:6224 #, c-format msgid "could not map view of backend variables: error code %lu\n" msgstr "" "n'a pas pu exécuter \"map\" la vue des variables serveurs : code\n" "d'erreur %lu\n" -#: postmaster/postmaster.c:6176 +#: postmaster/postmaster.c:6233 #, c-format msgid "could not unmap view of backend variables: error code %lu\n" msgstr "" "n'a pas pu exécuter \"unmap\" sur la vue des variables serveurs : code\n" "d'erreur %lu\n" -#: postmaster/postmaster.c:6183 +#: postmaster/postmaster.c:6240 #, c-format msgid "could not close handle to backend parameter variables: error code %lu\n" msgstr "" "n'a pas pu fermer le lien vers les variables des paramètres du serveur :\n" "code d'erreur %lu\n" -#: postmaster/postmaster.c:6344 +#: postmaster/postmaster.c:6401 #, c-format msgid "could not read exit code for process\n" msgstr "n'a pas pu lire le code de sortie du processus\n" -#: postmaster/postmaster.c:6349 +#: postmaster/postmaster.c:6406 #, c-format msgid "could not post child completion status\n" msgstr "n'a pas pu poster le statut de fin de l'enfant\n" @@ -16725,16 +16785,16 @@ msgstr "désactivation de la rotation automatique (utilisez SIGHUP pour la réac msgid "could not determine which collation to use for regular expression" msgstr "n'a pas pu déterminer le collationnement à utiliser pour une expression rationnelle" -#: repl_gram.y:320 repl_gram.y:352 +#: repl_gram.y:330 repl_gram.y:362 #, c-format msgid "invalid timeline %u" msgstr "timeline %u invalide" -#: repl_scanner.l:125 +#: repl_scanner.l:126 msgid "invalid streaming start location" msgstr "emplacement de démarrage du flux de réplication invalide" -#: repl_scanner.l:176 scan.l:670 +#: repl_scanner.l:177 scan.l:670 msgid "unterminated quoted string" msgstr "chaîne entre guillemets non terminée" @@ -16743,203 +16803,202 @@ msgstr "chaîne entre guillemets non terminée" msgid "could not stat control file \"%s\": %m" msgstr "n'a pas pu récupérer des informations sur le fichier de contrôle « %s » : %m" -#: replication/basebackup.c:412 +#: replication/basebackup.c:413 #, c-format msgid "could not find any WAL files" msgstr "n'a pas pu trouver un seul fichier WAL" -#: replication/basebackup.c:425 replication/basebackup.c:439 replication/basebackup.c:448 +#: replication/basebackup.c:426 replication/basebackup.c:440 replication/basebackup.c:449 #, c-format msgid "could not find WAL file \"%s\"" msgstr "n'a pas pu trouver le fichier WAL « %s »" -#: replication/basebackup.c:487 replication/basebackup.c:513 +#: replication/basebackup.c:488 replication/basebackup.c:514 #, c-format msgid "unexpected WAL file size \"%s\"" msgstr "taille du fichier WAL « %s » inattendue" -#: replication/basebackup.c:499 replication/basebackup.c:1228 +#: replication/basebackup.c:500 replication/basebackup.c:1229 #, c-format msgid "base backup could not send data, aborting backup" msgstr "la sauvegarde de base n'a pas pu envoyer les données, annulation de la sauvegarde" -#: replication/basebackup.c:601 replication/basebackup.c:610 replication/basebackup.c:619 replication/basebackup.c:628 replication/basebackup.c:637 replication/basebackup.c:648 replication/basebackup.c:665 +#: replication/basebackup.c:602 replication/basebackup.c:611 replication/basebackup.c:620 replication/basebackup.c:629 replication/basebackup.c:638 replication/basebackup.c:649 replication/basebackup.c:666 #, c-format msgid "duplicate option \"%s\"" msgstr "option « %s » dupliquée" -#: replication/basebackup.c:654 utils/misc/guc.c:5780 +#: replication/basebackup.c:655 utils/misc/guc.c:5780 #, c-format msgid "%d is outside the valid range for parameter \"%s\" (%d .. %d)" msgstr "%d est en dehors des limites valides pour le paramètre « %s » (%d .. %d)" -#: replication/basebackup.c:928 replication/basebackup.c:1025 +#: replication/basebackup.c:929 replication/basebackup.c:1026 #, c-format msgid "could not stat file or directory \"%s\": %m" msgstr "" "n'a pas pu récupérer les informations sur le fichier ou répertoire\n" "« %s » : %m" -#: replication/basebackup.c:1180 +#: replication/basebackup.c:1181 #, c-format msgid "skipping special file \"%s\"" msgstr "ignore le fichier spécial « %s »" -#: replication/basebackup.c:1293 +#: replication/basebackup.c:1294 #, c-format msgid "file name too long for tar format: \"%s\"" msgstr "nom du fichier trop long pour le format tar : « %s »" -#: replication/basebackup.c:1298 +#: replication/basebackup.c:1299 #, c-format msgid "symbolic link target too long for tar format: file name \"%s\", target \"%s\"" msgstr "cible du lien symbolique trop long pour le format tar : nom de fichier « %s », cible « %s »" -#: replication/libpqwalreceiver/libpqwalreceiver.c:226 +#: replication/libpqwalreceiver/libpqwalreceiver.c:231 #, c-format msgid "invalid connection string syntax: %s" msgstr "syntaxe de la chaîne de connexion invalide : %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:250 +#: replication/libpqwalreceiver/libpqwalreceiver.c:255 #, c-format msgid "could not parse connection string: %s" msgstr "n'a pas pu analyser la chaîne de connexion « %s »" -#: replication/libpqwalreceiver/libpqwalreceiver.c:300 +#: replication/libpqwalreceiver/libpqwalreceiver.c:305 #, c-format msgid "could not receive database system identifier and timeline ID from the primary server: %s" msgstr "" "n'a pas pu recevoir l'identifiant du système de bases de données et\n" "l'identifiant de la timeline à partir du serveur principal : %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:311 replication/libpqwalreceiver/libpqwalreceiver.c:518 +#: replication/libpqwalreceiver/libpqwalreceiver.c:316 replication/libpqwalreceiver/libpqwalreceiver.c:523 #, c-format msgid "invalid response from primary server" msgstr "réponse invalide du serveur principal" -#: replication/libpqwalreceiver/libpqwalreceiver.c:312 +#: replication/libpqwalreceiver/libpqwalreceiver.c:317 #, c-format msgid "Could not identify system: got %d rows and %d fields, expected %d rows and %d or more fields." msgstr "" "N'a pas pu identifier le système : a récupéré %d lignes et %d champs,\n" "attendait %d lignes et %d champs (ou plus)." -#: replication/libpqwalreceiver/libpqwalreceiver.c:378 replication/libpqwalreceiver/libpqwalreceiver.c:384 replication/libpqwalreceiver/libpqwalreceiver.c:409 +#: replication/libpqwalreceiver/libpqwalreceiver.c:383 replication/libpqwalreceiver/libpqwalreceiver.c:389 replication/libpqwalreceiver/libpqwalreceiver.c:414 #, c-format msgid "could not start WAL streaming: %s" msgstr "n'a pas pu démarrer l'envoi des WAL : %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:428 +#: replication/libpqwalreceiver/libpqwalreceiver.c:433 #, c-format msgid "could not send end-of-streaming message to primary: %s" msgstr "n'a pas pu transmettre le message de fin d'envoi de flux au primaire : %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:450 +#: replication/libpqwalreceiver/libpqwalreceiver.c:455 #, c-format msgid "unexpected result set after end-of-streaming" msgstr "ensemble de résultats inattendu après la fin du flux de réplication" -#: replication/libpqwalreceiver/libpqwalreceiver.c:464 -#, fuzzy, c-format -#| msgid "error reading result of streaming command: %s" +#: replication/libpqwalreceiver/libpqwalreceiver.c:469 +#, c-format msgid "error while shutting down streaming COPY: %s" -msgstr "erreur lors de la lecture de la commande de flux : %s" +msgstr "erreur lors de l'arrêt de la copie en flux : %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:473 +#: replication/libpqwalreceiver/libpqwalreceiver.c:478 #, c-format msgid "error reading result of streaming command: %s" msgstr "erreur lors de la lecture de la commande de flux : %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:481 replication/libpqwalreceiver/libpqwalreceiver.c:709 +#: replication/libpqwalreceiver/libpqwalreceiver.c:486 replication/libpqwalreceiver/libpqwalreceiver.c:714 #, c-format msgid "unexpected result after CommandComplete: %s" msgstr "résultat inattendu après CommandComplete : %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:507 +#: replication/libpqwalreceiver/libpqwalreceiver.c:512 #, c-format msgid "could not receive timeline history file from the primary server: %s" msgstr "n'a pas pu recevoir le fichier historique à partir du serveur principal : %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:519 +#: replication/libpqwalreceiver/libpqwalreceiver.c:524 #, c-format msgid "Expected 1 tuple with 2 fields, got %d tuples with %d fields." msgstr "Attendait 1 ligne avec 2 champs, a obtenu %d lignes avec %d champs." -#: replication/libpqwalreceiver/libpqwalreceiver.c:673 replication/libpqwalreceiver/libpqwalreceiver.c:724 replication/libpqwalreceiver/libpqwalreceiver.c:730 +#: replication/libpqwalreceiver/libpqwalreceiver.c:678 replication/libpqwalreceiver/libpqwalreceiver.c:729 replication/libpqwalreceiver/libpqwalreceiver.c:735 #, c-format msgid "could not receive data from WAL stream: %s" msgstr "n'a pas pu recevoir des données du flux de WAL : %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:749 +#: replication/libpqwalreceiver/libpqwalreceiver.c:754 #, c-format msgid "could not send data to WAL stream: %s" msgstr "n'a pas pu transmettre les données au flux WAL : %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:798 +#: replication/libpqwalreceiver/libpqwalreceiver.c:803 #, c-format msgid "could not create replication slot \"%s\": %s" msgstr "n'a pas pu créer le slot de réplication « %s » : %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:832 +#: replication/libpqwalreceiver/libpqwalreceiver.c:837 #, c-format msgid "invalid query response" msgstr "réponse à la requête invalide" -#: replication/libpqwalreceiver/libpqwalreceiver.c:833 +#: replication/libpqwalreceiver/libpqwalreceiver.c:838 #, c-format msgid "Expected %d fields, got %d fields." msgstr "Attendait %d champs, a obtenu %d champs." -#: replication/libpqwalreceiver/libpqwalreceiver.c:902 +#: replication/libpqwalreceiver/libpqwalreceiver.c:907 #, c-format msgid "the query interface requires a database connection" msgstr "l'interface de la requête requiert une connexion à une base" -#: replication/libpqwalreceiver/libpqwalreceiver.c:933 +#: replication/libpqwalreceiver/libpqwalreceiver.c:938 msgid "empty query" msgstr "requête vide" -#: replication/logical/launcher.c:268 +#: replication/logical/launcher.c:298 #, c-format msgid "starting logical replication worker for subscription \"%s\"" msgstr "lancement du processus worker de réplication logique pour la souscription « %s »" -#: replication/logical/launcher.c:275 +#: replication/logical/launcher.c:305 #, c-format msgid "cannot start logical replication workers when max_replication_slots = 0" msgstr "ne peut pas démarrer les processus worker de la réplication logique quand max_replication_slots = 0" -#: replication/logical/launcher.c:355 +#: replication/logical/launcher.c:385 #, c-format msgid "out of logical replication worker slots" msgstr "plus de slots de processus worker pour la réplication logique" -#: replication/logical/launcher.c:356 +#: replication/logical/launcher.c:386 #, c-format msgid "You might need to increase max_logical_replication_workers." msgstr "Vous pourriez avoir besoin d'augmenter max_logical_replication_workers." -#: replication/logical/launcher.c:401 +#: replication/logical/launcher.c:440 #, c-format msgid "out of background worker slots" msgstr "plus de slots de processus en tâche de fond" -#: replication/logical/launcher.c:402 +#: replication/logical/launcher.c:441 #, c-format msgid "You might need to increase max_worker_processes." msgstr "Vous pourriez avoir besoin d'augmenter max_worker_processes." -#: replication/logical/launcher.c:564 +#: replication/logical/launcher.c:624 #, c-format msgid "logical replication worker slot %d is empty, cannot attach" msgstr "le slot %d du processus de réplication logique est vide, ne peut pas s'y attacher" -#: replication/logical/launcher.c:573 +#: replication/logical/launcher.c:633 #, c-format msgid "logical replication worker slot %d is already used by another worker, cannot attach" msgstr "le slot %d du processus de réplication logique est déjà utilisé par un autre processus, ne peut pas s'attacher" -#: replication/logical/launcher.c:798 +#: replication/logical/launcher.c:885 #, c-format msgid "logical replication launcher started" msgstr "lancement du processus de lancement de la réplication logique" @@ -17021,7 +17080,7 @@ msgstr "le tableau doit avoir une dimension" msgid "array must not contain nulls" msgstr "le tableau ne doit pas contenir de valeurs NULL" -#: replication/logical/logicalfuncs.c:222 utils/adt/json.c:2282 utils/adt/jsonb.c:1357 +#: replication/logical/logicalfuncs.c:222 utils/adt/json.c:2246 utils/adt/jsonb.c:1314 #, c-format msgid "array must have even number of elements" msgstr "le tableau doit avoir un nombre pair d'éléments" @@ -17031,77 +17090,87 @@ msgstr "le tableau doit avoir un nombre pair d'éléments" msgid "logical decoding output plugin \"%s\" produces binary output, but function \"%s\" expects textual data" msgstr "le plugin de sortie « %s » pour le décodage logique produit une sortie binaire, mais la fonction « %s » attend des données texte" -#: replication/logical/origin.c:180 +#: replication/logical/origin.c:185 #, c-format msgid "only superusers can query or manipulate replication origins" msgstr "seuls les super-utilisateurs peuvent lire ou manipuler les origines de réplication" -#: replication/logical/origin.c:185 +#: replication/logical/origin.c:190 #, c-format msgid "cannot query or manipulate replication origin when max_replication_slots = 0" msgstr "ne peut pas lire ou manipuler une originie de réplication logique quand max_replication_slots = 0" -#: replication/logical/origin.c:190 +#: replication/logical/origin.c:195 #, c-format msgid "cannot manipulate replication origins during recovery" msgstr "ne peut pas manipuler les origines de réplication lors d'une restauration" -#: replication/logical/origin.c:314 +#: replication/logical/origin.c:230 +#, c-format +msgid "replication origin \"%s\" does not exist" +msgstr "l'origine de réplication « %s » n'existe pas" + +#: replication/logical/origin.c:321 #, c-format msgid "could not find free replication origin OID" msgstr "n'a pas pu trouver d'OID d'origine de réplication libre" -#: replication/logical/origin.c:351 +#: replication/logical/origin.c:369 #, c-format msgid "could not drop replication origin with OID %d, in use by PID %d" msgstr "ne peut pas supprimer l'origine de réplication d'OID %d, utilisée par le PID %d" -#: replication/logical/origin.c:667 +#: replication/logical/origin.c:461 +#, c-format +msgid "replication origin with OID %u does not exist" +msgstr "l'origine de réplication d'OID %u n'existe pas" + +#: replication/logical/origin.c:708 #, c-format msgid "replication checkpoint has wrong magic %u instead of %u" msgstr "le checkpoint de réplication a le mauvais nombre magique (%u au lieu de %u)" -#: replication/logical/origin.c:699 +#: replication/logical/origin.c:740 #, c-format msgid "could not read file \"%s\": read %d of %zu" msgstr "n'a pas pu lire le fichier « %s » : a lu %d sur %zu" -#: replication/logical/origin.c:708 +#: replication/logical/origin.c:749 #, c-format msgid "could not find free replication state, increase max_replication_slots" msgstr "n'a pas pu trouver d'état de réplication libre, augmentez max_replication_slots" -#: replication/logical/origin.c:726 +#: replication/logical/origin.c:767 #, c-format msgid "replication slot checkpoint has wrong checksum %u, expected %u" msgstr "le point de contrôle du slot de réplication à la mauvaise somme de contrôle %u, %u attendu" -#: replication/logical/origin.c:850 +#: replication/logical/origin.c:891 #, c-format msgid "replication origin with OID %d is already active for PID %d" msgstr "l'origine de réplication d'OID %d est déjà active pour le PID %d" -#: replication/logical/origin.c:861 replication/logical/origin.c:1041 +#: replication/logical/origin.c:902 replication/logical/origin.c:1089 #, c-format msgid "could not find free replication state slot for replication origin with OID %u" msgstr "n'a pas pu trouver de slot d'état de réplication libre pour l'origine de réplication d'OID %u" -#: replication/logical/origin.c:863 replication/logical/origin.c:1043 replication/slot.c:1508 +#: replication/logical/origin.c:904 replication/logical/origin.c:1091 replication/slot.c:1509 #, c-format msgid "Increase max_replication_slots and try again." msgstr "Augmentez max_replication_slots et recommencez." -#: replication/logical/origin.c:1000 +#: replication/logical/origin.c:1048 #, c-format msgid "cannot setup replication origin when one is already setup" msgstr "ne peut pas configurer l'origine de réplication si une origine existe déjà" -#: replication/logical/origin.c:1029 +#: replication/logical/origin.c:1077 #, c-format msgid "replication identifier %d is already active for PID %d" msgstr "l'identificateur de réplication %d est déjà actif pour le PID %d" -#: replication/logical/origin.c:1075 replication/logical/origin.c:1270 replication/logical/origin.c:1290 +#: replication/logical/origin.c:1128 replication/logical/origin.c:1326 replication/logical/origin.c:1346 #, c-format msgid "no replication origin is configured" msgstr "aucune origine de réplication n'est configurée" @@ -17111,49 +17180,49 @@ msgstr "aucune origine de réplication n'est configurée" msgid "logical replication target relation \"%s.%s\" does not exist" msgstr "la relation cible de la réplication logique « %s.%s » n'existe pas" -#: replication/logical/relation.c:297 +#: replication/logical/relation.c:300 #, c-format msgid "logical replication target relation \"%s.%s\" is missing some replicated columns" msgstr "il manque des colonnes répliquées dans la relation cible « %s.%s » de réplication logique" -#: replication/logical/relation.c:337 +#: replication/logical/relation.c:340 #, c-format msgid "logical replication target relation \"%s.%s\" uses system columns in REPLICA IDENTITY index" msgstr "la relation cible « %s.%s » de réplication logique utilise des colonnes systèmes dans l'index REPLICA IDENTITY" -#: replication/logical/relation.c:453 +#: replication/logical/relation.c:456 #, c-format -msgid "builtin type %u not found" +msgid "built-in type %u not found" msgstr "type interne %u non trouvé" -#: replication/logical/relation.c:454 +#: replication/logical/relation.c:457 #, c-format -msgid "This can be caused by having publisher with higher major version than subscriber" -msgstr "Ceci peut avoir pour cause un publieur ayant une version majeure supérieur à l'abonné" +msgid "This can be caused by having a publisher with a higher PostgreSQL major version than the subscriber." +msgstr "Ceci peut avoir pour cause un publieur ayant une version majeure de PostgreSQL supérieure à l'abonné" -#: replication/logical/relation.c:486 +#: replication/logical/relation.c:488 #, c-format msgid "data type \"%s.%s\" required for logical replication does not exist" msgstr "le type de données « %s/%s » requis par la réplication logique n'existe pas" -#: replication/logical/reorderbuffer.c:2288 +#: replication/logical/reorderbuffer.c:2303 #, c-format msgid "could not write to data file for XID %u: %m" msgstr "n'a pas pu écrire dans le fichier pour le XID %u : %m" -#: replication/logical/reorderbuffer.c:2387 replication/logical/reorderbuffer.c:2409 +#: replication/logical/reorderbuffer.c:2402 replication/logical/reorderbuffer.c:2424 #, c-format msgid "could not read from reorderbuffer spill file: %m" msgstr "n'a pas pu lire le fichier « reorderbuffer spill » : %m" -#: replication/logical/reorderbuffer.c:2391 replication/logical/reorderbuffer.c:2413 +#: replication/logical/reorderbuffer.c:2406 replication/logical/reorderbuffer.c:2428 #, c-format msgid "could not read from reorderbuffer spill file: read %d instead of %u bytes" msgstr "" "n'a pas pu lire à partir du fichier « reorderbuffer spill » : a lu seulement %d octets\n" "sur %u" -#: replication/logical/reorderbuffer.c:3071 +#: replication/logical/reorderbuffer.c:3086 #, c-format msgid "could not read from file \"%s\": read %d instead of %d bytes" msgstr "n'a pas pu lire à partir du fichier « %s » : lu %d octets au lieu de %d octets" @@ -17235,7 +17304,7 @@ msgstr "n'a pas pu analyser le mode du fichier « %s »" #: replication/logical/tablesync.c:138 #, c-format msgid "logical replication table synchronization worker for subscription \"%s\", table \"%s\" has finished" -msgstr "" +msgstr "le worker de synchronisation de table en réplication logique pour la souscription « %s », table « %s », a terminé" #: replication/logical/tablesync.c:685 #, c-format @@ -17267,122 +17336,123 @@ msgstr "la copie de table n'a pas pu démarrer la transaction sur le publieur" msgid "table copy could not finish transaction on publisher" msgstr "la copie de table n'a pas pu finir la transaction sur le publieur" -#: replication/logical/worker.c:291 +#: replication/logical/worker.c:293 #, c-format msgid "processing remote data for replication target relation \"%s.%s\" column \"%s\", remote type %s, local type %s" msgstr "traitement des données distantes pour la relation cible « %s.%s » de réplication logique, colonne « %s », type distant %s, type local %s" -#: replication/logical/worker.c:500 +#: replication/logical/worker.c:506 #, c-format msgid "ORIGIN message sent out of order" msgstr "message ORIGIN en désordre" -#: replication/logical/worker.c:631 -#, c-format -msgid "publisher does not send replica identity column expected by the logical replication target relation \"%s.%s\"" +#: replication/logical/worker.c:637 +#, fuzzy, c-format +#| msgid "publisher does not send replica identity column expected by the logical replication target relation \"%s.%s\"" +msgid "publisher did not send replica identity column expected by the logical replication target relation \"%s.%s\"" msgstr "le publieur n'envoie pas la colonne d'identité du réplicat attendue par la relation cible « %s.%s » de réplication logique" -#: replication/logical/worker.c:638 +#: replication/logical/worker.c:644 #, c-format msgid "logical replication target relation \"%s.%s\" has neither REPLICA IDENTITY index nor PRIMARY KEY and published relation does not have REPLICA IDENTITY FULL" msgstr "la relation cible « %s.%s » de réplication logique n'a ni un index REPLICA IDENTITY ni une clé primaire, et la relation publiée n'a pas REPLICA IDENTITY FULL" -#: replication/logical/worker.c:845 +#: replication/logical/worker.c:851 #, c-format -msgid "logical replication could not find row for delete in replication target %s" -msgstr "la réplication logique n'a pas pu trouver la ligne à supprimer dans la cible de réplication %s" +msgid "logical replication could not find row for delete in replication target relation \"%s\"" +msgstr "la réplication logique n'a pas pu trouver la ligne à supprimer dans la relation cible de réplication %s" -#: replication/logical/worker.c:912 +#: replication/logical/worker.c:918 #, c-format -msgid "invalid logical replication message type %c" -msgstr "type %c du message de la réplication logique invalide" +msgid "invalid logical replication message type \"%c\"" +msgstr "type « %c » du message de la réplication logique invalide" -#: replication/logical/worker.c:1053 +#: replication/logical/worker.c:1059 #, c-format msgid "data stream from publisher has ended" msgstr "le flux de données provenant du publieur s'est terminé" -#: replication/logical/worker.c:1212 +#: replication/logical/worker.c:1218 #, c-format msgid "terminating logical replication worker due to timeout" msgstr "arrêt du processus worker de la réplication logique suite à l'expiration du délai de réplication" -#: replication/logical/worker.c:1360 +#: replication/logical/worker.c:1366 #, c-format msgid "logical replication apply worker for subscription \"%s\" will stop because the subscription was removed" msgstr "le processus apply de réplication logique pour la souscription « %s » s'arrêtera car la souscription a été supprimée" -#: replication/logical/worker.c:1374 +#: replication/logical/worker.c:1380 #, c-format msgid "logical replication apply worker for subscription \"%s\" will stop because the subscription was disabled" msgstr "le processus apply de réplication logique pour la souscription « %s » s'arrêtera car la souscription a été désactivée" -#: replication/logical/worker.c:1388 +#: replication/logical/worker.c:1394 #, c-format msgid "logical replication apply worker for subscription \"%s\" will restart because the connection information was changed" msgstr "le processus apply de réplication logique pour la souscription « %s » redémarrera car la souscription a été modifiée" -#: replication/logical/worker.c:1402 +#: replication/logical/worker.c:1408 #, c-format msgid "logical replication apply worker for subscription \"%s\" will restart because subscription was renamed" msgstr "le processus apply de réplication logique pour la souscription « %s » redémarrera car la souscription a été renommée" -#: replication/logical/worker.c:1419 +#: replication/logical/worker.c:1425 #, c-format msgid "logical replication apply worker for subscription \"%s\" will restart because the replication slot name was changed" msgstr "le processus apply de réplication logique pour la souscription « %s » redémarrera car le nom du slot de réplication a été modifiée" -#: replication/logical/worker.c:1433 +#: replication/logical/worker.c:1439 #, c-format msgid "logical replication apply worker for subscription \"%s\" will restart because subscription's publications were changed" msgstr "le processus apply de réplication logique pour la souscription « %s » redémarrera car les publications ont été modifiées" -#: replication/logical/worker.c:1541 +#: replication/logical/worker.c:1547 #, c-format msgid "logical replication apply worker for subscription \"%s\" will not start because the subscription was disabled during startup" msgstr "le processus apply de réplication logique pour la souscription « %s » ne démarrera pas car la souscription a été désactivée au démarrage" -#: replication/logical/worker.c:1555 +#: replication/logical/worker.c:1561 #, c-format msgid "logical replication table synchronization worker for subscription \"%s\", table \"%s\" has started" msgstr "le processus de synchronisation des tables en réplication logique pour la souscription « %s », table « %s » a démarré" -#: replication/logical/worker.c:1559 +#: replication/logical/worker.c:1565 #, c-format msgid "logical replication apply worker for subscription \"%s\" has started" msgstr "le processus apply de réplication logique pour la souscription « %s » a démarré" -#: replication/logical/worker.c:1599 +#: replication/logical/worker.c:1605 #, c-format msgid "subscription has no replication slot set" msgstr "la souscription n'a aucun ensemble de slot de réplication" -#: replication/pgoutput/pgoutput.c:113 +#: replication/pgoutput/pgoutput.c:114 #, c-format msgid "invalid proto_version" msgstr "proto_version invalide" -#: replication/pgoutput/pgoutput.c:118 +#: replication/pgoutput/pgoutput.c:119 #, c-format -msgid "proto_verson \"%s\" out of range" -msgstr "proto_version « %s » est en dehors des limites" +msgid "proto_version \"%s\" out of range" +msgstr "proto_version « %s » en dehors des limites" -#: replication/pgoutput/pgoutput.c:135 +#: replication/pgoutput/pgoutput.c:136 #, c-format msgid "invalid publication_names syntax" msgstr "syntaxe publication_names invalide" -#: replication/pgoutput/pgoutput.c:179 +#: replication/pgoutput/pgoutput.c:180 #, c-format msgid "client sent proto_version=%d but we only support protocol %d or lower" msgstr "le client a envoyé proto_version=%d mais nous supportons seulement le protocole %d et les protocoles antérieurs" -#: replication/pgoutput/pgoutput.c:185 +#: replication/pgoutput/pgoutput.c:186 #, c-format msgid "client sent proto_version=%d but we only support protocol %d or higher" msgstr "le client a envoyé proto_version=%d mais nous supportons seulement le protocole %d et les protocoles supérieurs" -#: replication/pgoutput/pgoutput.c:191 +#: replication/pgoutput/pgoutput.c:192 #, c-format msgid "publication_names parameter missing" msgstr "paramètre publication_names manquant" @@ -17427,52 +17497,52 @@ msgstr "Libérez un slot ou augmentez max_replication_slots." msgid "replication slot \"%s\" does not exist" msgstr "le slot de réplication « %s » n'existe pas" -#: replication/slot.c:390 replication/slot.c:939 +#: replication/slot.c:390 replication/slot.c:940 #, c-format msgid "replication slot \"%s\" is active for PID %d" msgstr "le slot de réplication « %s » est actif pour le PID %d" -#: replication/slot.c:623 replication/slot.c:1120 replication/slot.c:1469 +#: replication/slot.c:624 replication/slot.c:1121 replication/slot.c:1470 #, c-format msgid "could not remove directory \"%s\"" msgstr "n'a pas pu supprimer le répertoire « %s »" -#: replication/slot.c:969 +#: replication/slot.c:970 #, c-format msgid "replication slots can only be used if max_replication_slots > 0" msgstr "les slots de réplications peuvent seulement être utilisés si max_replication_slots > 0" -#: replication/slot.c:974 +#: replication/slot.c:975 #, c-format msgid "replication slots can only be used if wal_level >= replica" msgstr "les slots de réplication peuvent seulement être utilisés si wal_level >= replica" -#: replication/slot.c:1399 replication/slot.c:1439 +#: replication/slot.c:1400 replication/slot.c:1440 #, c-format msgid "could not read file \"%s\", read %d of %u: %m" msgstr "n'a pas pu lire le fichier « %s », a lu %d sur %u : %m" -#: replication/slot.c:1408 +#: replication/slot.c:1409 #, c-format msgid "replication slot file \"%s\" has wrong magic number: %u instead of %u" msgstr "le fichier « %s » du slot de réplication a le nombre magique %u au lieu de %u" -#: replication/slot.c:1415 +#: replication/slot.c:1416 #, c-format msgid "replication slot file \"%s\" has unsupported version %u" msgstr "le fichier « %s » du slot de réplication a une version %u non supportée" -#: replication/slot.c:1422 +#: replication/slot.c:1423 #, c-format msgid "replication slot file \"%s\" has corrupted length %u" msgstr "le slot de réplication « %s » a une taille %u corrompue" -#: replication/slot.c:1454 +#: replication/slot.c:1455 #, c-format msgid "checksum mismatch for replication slot file \"%s\": is %u, should be %u" msgstr "différence de somme de contrôle pour le fichier de slot de réplication « %s » : est %u, devrait être %u" -#: replication/slot.c:1507 +#: replication/slot.c:1508 #, c-format msgid "too many replication slots active before shutdown" msgstr "trop de slots de réplication actifs avant l'arrêt" @@ -17528,193 +17598,193 @@ msgstr "le nombre de standbys synchrones (%d) doit être supérieur à zéro" msgid "terminating walreceiver process due to administrator command" msgstr "arrêt du processus walreceiver suite à la demande de l'administrateur" -#: replication/walreceiver.c:300 +#: replication/walreceiver.c:306 #, c-format msgid "could not connect to the primary server: %s" msgstr "n'a pas pu se connecter au serveur principal : %s" -#: replication/walreceiver.c:339 +#: replication/walreceiver.c:345 #, c-format msgid "database system identifier differs between the primary and standby" msgstr "" "l'identifiant du système de bases de données diffère entre le serveur principal\n" "et le serveur en attente" -#: replication/walreceiver.c:340 +#: replication/walreceiver.c:346 #, c-format msgid "The primary's identifier is %s, the standby's identifier is %s." msgstr "" "L'identifiant du serveur principal est %s, l'identifiant du serveur en attente\n" "est %s." -#: replication/walreceiver.c:351 +#: replication/walreceiver.c:357 #, c-format msgid "highest timeline %u of the primary is behind recovery timeline %u" msgstr "la plus grande timeline %u du serveur principal est derrière la timeline de restauration %u" -#: replication/walreceiver.c:387 +#: replication/walreceiver.c:393 #, c-format msgid "started streaming WAL from primary at %X/%X on timeline %u" msgstr "Commence le flux des journaux depuis le principal à %X/%X sur la timeline %u" -#: replication/walreceiver.c:392 +#: replication/walreceiver.c:398 #, c-format msgid "restarted WAL streaming at %X/%X on timeline %u" msgstr "recommence le flux WAL à %X/%X sur la timeline %u" -#: replication/walreceiver.c:421 +#: replication/walreceiver.c:427 #, c-format msgid "cannot continue WAL streaming, recovery has already ended" msgstr "ne peut pas continuer le flux de journaux de transactions, la récupération est déjà terminée" -#: replication/walreceiver.c:458 +#: replication/walreceiver.c:464 #, c-format msgid "replication terminated by primary server" msgstr "réplication terminée par le serveur primaire" -#: replication/walreceiver.c:459 +#: replication/walreceiver.c:465 #, c-format msgid "End of WAL reached on timeline %u at %X/%X." msgstr "Fin du WAL atteint sur la timeline %u à %X/%X" -#: replication/walreceiver.c:554 +#: replication/walreceiver.c:560 #, c-format msgid "terminating walreceiver due to timeout" msgstr "arrêt du processus walreceiver suite à l'expiration du délai de réplication" -#: replication/walreceiver.c:594 +#: replication/walreceiver.c:600 #, c-format msgid "primary server contains no more WAL on requested timeline %u" msgstr "le serveur principal ne contient plus de WAL sur la timeline %u demandée" -#: replication/walreceiver.c:609 replication/walreceiver.c:968 +#: replication/walreceiver.c:615 replication/walreceiver.c:974 #, c-format msgid "could not close log segment %s: %m" msgstr "n'a pas pu fermer le journal de transactions %s : %m" -#: replication/walreceiver.c:734 +#: replication/walreceiver.c:740 #, c-format msgid "fetching timeline history file for timeline %u from primary server" msgstr "récupération du fichier historique pour la timeline %u à partir du serveur principal" -#: replication/walreceiver.c:1022 +#: replication/walreceiver.c:1028 #, c-format msgid "could not write to log segment %s at offset %u, length %lu: %m" msgstr "n'a pas pu écrire le journal de transactions %s au décalage %u, longueur %lu : %m" -#: replication/walsender.c:490 +#: replication/walsender.c:491 #, c-format msgid "could not seek to beginning of file \"%s\": %m" msgstr "n'a pas pu se déplacer au début du fichier « %s » : %m" -#: replication/walsender.c:531 +#: replication/walsender.c:532 #, c-format msgid "IDENTIFY_SYSTEM has not been run before START_REPLICATION" msgstr "IDENTIFY_SYSTEM n'a pas été exécuté avant START_REPLICATION" -#: replication/walsender.c:548 +#: replication/walsender.c:549 #, c-format msgid "cannot use a logical replication slot for physical replication" msgstr "ne peut pas utiliser un slot de réplication logique pour une réplication physique" -#: replication/walsender.c:611 +#: replication/walsender.c:612 #, c-format msgid "requested starting point %X/%X on timeline %u is not in this server's history" msgstr "le point de reprise %X/%X de la timeline %u n'est pas dans l'historique du serveur" -#: replication/walsender.c:615 +#: replication/walsender.c:616 #, c-format msgid "This server's history forked from timeline %u at %X/%X." msgstr "L'historique du serveur a changé à partir de la timeline %u à %X/%X." -#: replication/walsender.c:660 +#: replication/walsender.c:661 #, c-format msgid "requested starting point %X/%X is ahead of the WAL flush position of this server %X/%X" msgstr "le point de reprise requis %X/%X est devant la position de vidage des WAL de ce serveur %X/%X" -#: replication/walsender.c:889 +#: replication/walsender.c:890 #, c-format msgid "CREATE_REPLICATION_SLOT ... EXPORT_SNAPSHOT must not be called inside a transaction" msgstr "CREATE_REPLICATION_SLOT ... EXPORT_SNAPSHOT ne doit pas être appelé dans une sous-transaction" -#: replication/walsender.c:898 +#: replication/walsender.c:899 #, c-format msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called inside a transaction" msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT doit être appelé dans une transaction" -#: replication/walsender.c:903 +#: replication/walsender.c:904 #, c-format msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called in REPEATABLE READ isolation mode transaction" msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT doit être appelé dans le niveau d'isolation REPEATABLE READ" -#: replication/walsender.c:908 +#: replication/walsender.c:909 #, c-format msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called before any query" msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT doit être appelé avant toute requête" -#: replication/walsender.c:913 +#: replication/walsender.c:914 #, c-format msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must not be called in a subtransaction" msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT ne doit pas être appelé dans une sous-transaction" -#: replication/walsender.c:1059 +#: replication/walsender.c:1060 #, c-format msgid "terminating walsender process after promotion" msgstr "arrêt du processus walreceiver suite promotion" -#: replication/walsender.c:1437 +#: replication/walsender.c:1446 #, c-format msgid "cannot execute new commands while WAL sender is in stopping mode" msgstr "ne peut pas exécuter de nouvelles commandes alors que le walsender est en mode d'arrêt" -#: replication/walsender.c:1470 +#: replication/walsender.c:1479 #, c-format msgid "received replication command: %s" msgstr "commande de réplication reçu : %s" -#: replication/walsender.c:1486 tcop/fastpath.c:281 tcop/postgres.c:997 tcop/postgres.c:1307 tcop/postgres.c:1566 tcop/postgres.c:1971 tcop/postgres.c:2339 tcop/postgres.c:2414 +#: replication/walsender.c:1495 tcop/fastpath.c:281 tcop/postgres.c:997 tcop/postgres.c:1307 tcop/postgres.c:1566 tcop/postgres.c:1971 tcop/postgres.c:2339 tcop/postgres.c:2414 #, c-format msgid "current transaction is aborted, commands ignored until end of transaction block" msgstr "" "la transaction est annulée, les commandes sont ignorées jusqu'à la fin du bloc\n" "de la transaction" -#: replication/walsender.c:1548 +#: replication/walsender.c:1560 #, c-format -msgid "not connected to database" -msgstr "non connecté à une base de données" +msgid "cannot execute SQL commands in WAL sender for physical replication" +msgstr "ne peut pas exécuter des commandes SQL dans le walsender pour la réplication physique" -#: replication/walsender.c:1588 replication/walsender.c:1604 +#: replication/walsender.c:1606 replication/walsender.c:1622 #, c-format msgid "unexpected EOF on standby connection" msgstr "fin de fichier (EOF) inattendue de la connexion du serveur en attente" -#: replication/walsender.c:1618 +#: replication/walsender.c:1636 #, c-format msgid "unexpected standby message type \"%c\", after receiving CopyDone" msgstr "type de message standby « %c » inattendu, après avoir reçu CopyDone" -#: replication/walsender.c:1656 +#: replication/walsender.c:1674 #, c-format msgid "invalid standby message type \"%c\"" msgstr "type de message « %c » invalide pour le serveur en standby" -#: replication/walsender.c:1697 +#: replication/walsender.c:1715 #, c-format msgid "unexpected message type \"%c\"" msgstr "type de message « %c » inattendu" -#: replication/walsender.c:2067 +#: replication/walsender.c:2085 #, c-format msgid "terminating walsender process due to replication timeout" msgstr "arrêt du processus walreceiver suite à l'expiration du délai de réplication" -#: replication/walsender.c:2156 +#: replication/walsender.c:2171 #, c-format msgid "standby \"%s\" has now caught up with primary" msgstr "le serveur standby « %s » a maintenant rattrapé le serveur primaire" -#: replication/walsender.c:2263 +#: replication/walsender.c:2278 #, c-format msgid "number of requested standby connections exceeds max_wal_senders (currently %d)" msgstr "" @@ -17792,13 +17862,13 @@ msgstr "la règle de la vue pour « %s » doit être nommée « %s »" #: rewrite/rewriteDefine.c:428 #, c-format -msgid "could not convert partitioned table \"%s\" to a view" -msgstr "n'a pas pu convertir la table partitionnée « %s » en une vue" +msgid "cannot convert partitioned table \"%s\" to a view" +msgstr "ne peut pas convertir la table partitionnée « %s » en une vue" #: rewrite/rewriteDefine.c:434 #, c-format -msgid "could not convert partition \"%s\" to a view" -msgstr "n'a pas pu convertir la partition « %s » en une vue" +msgid "cannot convert partition \"%s\" to a view" +msgstr "ne peut pas convertir la partition « %s » en une vue" #: rewrite/rewriteDefine.c:442 #, c-format @@ -17932,197 +18002,197 @@ msgstr "la règle « %s » de la relation « %s » n'existe pas" msgid "renaming an ON SELECT rule is not allowed" msgstr "le renommage d'une règle ON SELECT n'est pas autorisé" -#: rewrite/rewriteHandler.c:543 +#: rewrite/rewriteHandler.c:541 #, c-format msgid "WITH query name \"%s\" appears in both a rule action and the query being rewritten" msgstr "" "Le nom de la requête WITH «%s » apparaît à la fois dans l'action d'une règle\n" "et la requête en cours de ré-écriture." -#: rewrite/rewriteHandler.c:603 +#: rewrite/rewriteHandler.c:601 #, c-format msgid "cannot have RETURNING lists in multiple rules" msgstr "ne peut pas avoir des listes RETURNING dans plusieurs règles" -#: rewrite/rewriteHandler.c:818 +#: rewrite/rewriteHandler.c:823 #, c-format msgid "cannot insert into column \"%s\"" msgstr "ne peut pas insérer dans la colonne « %s »" -#: rewrite/rewriteHandler.c:819 rewrite/rewriteHandler.c:834 +#: rewrite/rewriteHandler.c:824 rewrite/rewriteHandler.c:839 #, c-format msgid "Column \"%s\" is an identity column defined as GENERATED ALWAYS." msgstr "La colonne « %s » est une colonne d'identité définie comme GENERATED ALWAYS." -#: rewrite/rewriteHandler.c:821 +#: rewrite/rewriteHandler.c:826 #, c-format msgid "Use OVERRIDING SYSTEM VALUE to override." msgstr "Utilisez OVERRIDING SYSTEM VALUE pour surcharger." -#: rewrite/rewriteHandler.c:833 +#: rewrite/rewriteHandler.c:838 #, c-format msgid "column \"%s\" can only be updated to DEFAULT" msgstr "la colonne « %s » peut seulement être mise à jour en DEFAULT" -#: rewrite/rewriteHandler.c:1005 rewrite/rewriteHandler.c:1023 +#: rewrite/rewriteHandler.c:1000 rewrite/rewriteHandler.c:1018 #, c-format msgid "multiple assignments to same column \"%s\"" msgstr "affectations multiples pour la même colonne « %s »" -#: rewrite/rewriteHandler.c:1809 rewrite/rewriteHandler.c:3431 +#: rewrite/rewriteHandler.c:1818 rewrite/rewriteHandler.c:3419 #, c-format msgid "infinite recursion detected in rules for relation \"%s\"" msgstr "récursion infinie détectée dans les règles de la relation « %s »" -#: rewrite/rewriteHandler.c:1895 +#: rewrite/rewriteHandler.c:1904 #, c-format msgid "infinite recursion detected in policy for relation \"%s\"" msgstr "récursion infinie détectée dans la politique pour la relation « %s »" -#: rewrite/rewriteHandler.c:2212 +#: rewrite/rewriteHandler.c:2221 msgid "Junk view columns are not updatable." msgstr "Les colonnes « junk » des vues ne sont pas automatiquement disponibles en écriture." -#: rewrite/rewriteHandler.c:2217 +#: rewrite/rewriteHandler.c:2226 msgid "View columns that are not columns of their base relation are not updatable." msgstr "Les colonnes des vues qui ne font pas référence à des colonnes de la relation de base ne sont pas automatiquement modifiables." -#: rewrite/rewriteHandler.c:2220 +#: rewrite/rewriteHandler.c:2229 msgid "View columns that refer to system columns are not updatable." msgstr "Les colonnes des vues qui font référence à des colonnes systèmes ne sont pas automatiquement modifiables." -#: rewrite/rewriteHandler.c:2223 +#: rewrite/rewriteHandler.c:2232 msgid "View columns that return whole-row references are not updatable." msgstr "Les colonnes de vue qui font références à des lignes complètes ne sont pas automatiquement modifiables." -#: rewrite/rewriteHandler.c:2281 +#: rewrite/rewriteHandler.c:2290 msgid "Views containing DISTINCT are not automatically updatable." msgstr "Les vues contenant DISTINCT ne sont pas automatiquement disponibles en écriture." -#: rewrite/rewriteHandler.c:2284 +#: rewrite/rewriteHandler.c:2293 msgid "Views containing GROUP BY are not automatically updatable." msgstr "Les vues contenant GROUP BY ne sont pas automatiquement disponibles en écriture." -#: rewrite/rewriteHandler.c:2287 +#: rewrite/rewriteHandler.c:2296 msgid "Views containing HAVING are not automatically updatable." msgstr "Les vues contenant HAVING ne sont pas automatiquement disponibles en écriture." -#: rewrite/rewriteHandler.c:2290 +#: rewrite/rewriteHandler.c:2299 msgid "Views containing UNION, INTERSECT, or EXCEPT are not automatically updatable." msgstr "Les vues contenant UNION, INTERSECT ou EXCEPT ne sont pas automatiquement disponibles en écriture." -#: rewrite/rewriteHandler.c:2293 +#: rewrite/rewriteHandler.c:2302 msgid "Views containing WITH are not automatically updatable." msgstr "Les vues contenant WITH ne sont pas automatiquement disponibles en écriture." -#: rewrite/rewriteHandler.c:2296 +#: rewrite/rewriteHandler.c:2305 msgid "Views containing LIMIT or OFFSET are not automatically updatable." msgstr "Les vues contenant LIMIT ou OFFSET ne sont pas automatiquement disponibles en écriture." -#: rewrite/rewriteHandler.c:2308 +#: rewrite/rewriteHandler.c:2317 msgid "Views that return aggregate functions are not automatically updatable." msgstr "Les vues qui renvoient des fonctions d'agrégat ne sont pas automatiquement disponibles en écriture." -#: rewrite/rewriteHandler.c:2311 +#: rewrite/rewriteHandler.c:2320 msgid "Views that return window functions are not automatically updatable." msgstr "Les vues qui renvoient des fonctions de fenêtrage ne sont pas automatiquement disponibles en écriture." -#: rewrite/rewriteHandler.c:2314 +#: rewrite/rewriteHandler.c:2323 msgid "Views that return set-returning functions are not automatically updatable." msgstr "Les vues qui renvoient des fonctions à plusieurs lignes ne sont pas automatiquement disponibles en écriture." -#: rewrite/rewriteHandler.c:2321 rewrite/rewriteHandler.c:2325 rewrite/rewriteHandler.c:2333 +#: rewrite/rewriteHandler.c:2330 rewrite/rewriteHandler.c:2334 rewrite/rewriteHandler.c:2342 msgid "Views that do not select from a single table or view are not automatically updatable." msgstr "Les vues qui lisent plusieurs tables ou vues ne sont pas automatiquement disponibles en écriture." -#: rewrite/rewriteHandler.c:2336 +#: rewrite/rewriteHandler.c:2345 msgid "Views containing TABLESAMPLE are not automatically updatable." msgstr "Les vues contenant TABLESAMPLE ne sont pas automatiquement disponibles en écriture." -#: rewrite/rewriteHandler.c:2360 +#: rewrite/rewriteHandler.c:2369 msgid "Views that have no updatable columns are not automatically updatable." msgstr "Les vues qui possèdent des colonnes non modifiables ne sont pas automatiquement disponibles en écriture." -#: rewrite/rewriteHandler.c:2814 +#: rewrite/rewriteHandler.c:2823 #, c-format msgid "cannot insert into column \"%s\" of view \"%s\"" msgstr "ne peut pas insérer dans la colonne « %s » de la vue « %s »" -#: rewrite/rewriteHandler.c:2822 +#: rewrite/rewriteHandler.c:2831 #, c-format msgid "cannot update column \"%s\" of view \"%s\"" msgstr "ne peut pas mettre à jour la colonne « %s » de la vue « %s »" -#: rewrite/rewriteHandler.c:3225 +#: rewrite/rewriteHandler.c:3214 #, c-format msgid "DO INSTEAD NOTHING rules are not supported for data-modifying statements in WITH" msgstr "" "les règles DO INSTEAD NOTHING ne sont pas supportées par les instructions\n" "de modification de données dans WITH" -#: rewrite/rewriteHandler.c:3239 +#: rewrite/rewriteHandler.c:3228 #, c-format msgid "conditional DO INSTEAD rules are not supported for data-modifying statements in WITH" msgstr "" "les règles DO INSTEAD conditionnelles ne sont pas supportées par les\n" "instructions de modification de données dans WITH" -#: rewrite/rewriteHandler.c:3243 +#: rewrite/rewriteHandler.c:3232 #, c-format msgid "DO ALSO rules are not supported for data-modifying statements in WITH" msgstr "" "les règles DO ALSO ne sont pas supportées par les instructions de modification\n" "de données dans WITH" -#: rewrite/rewriteHandler.c:3248 +#: rewrite/rewriteHandler.c:3237 #, c-format msgid "multi-statement DO INSTEAD rules are not supported for data-modifying statements in WITH" msgstr "" "les règles DO INSTEAD multi-instructions ne sont pas supportées pour les\n" "instructions de modification de données dans WITH" -#: rewrite/rewriteHandler.c:3468 +#: rewrite/rewriteHandler.c:3456 #, c-format msgid "cannot perform INSERT RETURNING on relation \"%s\"" msgstr "ne peut pas exécuter INSERT RETURNING sur la relation « %s »" -#: rewrite/rewriteHandler.c:3470 +#: rewrite/rewriteHandler.c:3458 #, c-format msgid "You need an unconditional ON INSERT DO INSTEAD rule with a RETURNING clause." msgstr "" "Vous avez besoin d'une règle ON INSERT DO INSTEAD sans condition avec une\n" "clause RETURNING." -#: rewrite/rewriteHandler.c:3475 +#: rewrite/rewriteHandler.c:3463 #, c-format msgid "cannot perform UPDATE RETURNING on relation \"%s\"" msgstr "ne peut pas exécuter UPDATE RETURNING sur la relation « %s »" -#: rewrite/rewriteHandler.c:3477 +#: rewrite/rewriteHandler.c:3465 #, c-format msgid "You need an unconditional ON UPDATE DO INSTEAD rule with a RETURNING clause." msgstr "" "Vous avez besoin d'une règle ON UPDATE DO INSTEAD sans condition avec une\n" "clause RETURNING." -#: rewrite/rewriteHandler.c:3482 +#: rewrite/rewriteHandler.c:3470 #, c-format msgid "cannot perform DELETE RETURNING on relation \"%s\"" msgstr "ne peut pas exécuter DELETE RETURNING sur la relation « %s »" -#: rewrite/rewriteHandler.c:3484 +#: rewrite/rewriteHandler.c:3472 #, c-format msgid "You need an unconditional ON DELETE DO INSTEAD rule with a RETURNING clause." msgstr "" "Vous avez besoin d'une règle ON DELETE DO INSTEAD sans condition avec une\n" "clause RETURNING." -#: rewrite/rewriteHandler.c:3502 +#: rewrite/rewriteHandler.c:3490 #, c-format msgid "INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules" msgstr "INSERT avec une clause ON CONFLICT ne peut pas être utilisée avec une table qui a des règles pour INSERT ou UPDATE" -#: rewrite/rewriteHandler.c:3559 +#: rewrite/rewriteHandler.c:3547 #, c-format msgid "WITH cannot be used in a query that is rewritten by rules into multiple queries" msgstr "WITH ne peut pas être utilisé dans une requête réécrite par des règles en plusieurs requêtes" @@ -18170,7 +18240,7 @@ msgstr "" msgid "invalid Unicode escape character" msgstr "chaîne d'échappement Unicode invalide" -#: scan.l:605 scan.l:613 scan.l:621 scan.l:622 scan.l:623 scan.l:1337 scan.l:1364 scan.l:1368 scan.l:1406 scan.l:1410 scan.l:1432 scan.l:1442 +#: scan.l:605 scan.l:613 scan.l:621 scan.l:622 scan.l:623 scan.l:1338 scan.l:1365 scan.l:1369 scan.l:1407 scan.l:1411 scan.l:1433 scan.l:1443 msgid "invalid Unicode surrogate pair" msgstr "paire surrogate Unicode invalide" @@ -18213,56 +18283,56 @@ msgid "operator too long" msgstr "opérateur trop long" #. translator: %s is typically the translation of "syntax error" -#: scan.l:1077 +#: scan.l:1078 #, c-format msgid "%s at end of input" msgstr "%s à la fin de l'entrée" #. translator: first %s is typically the translation of "syntax error" -#: scan.l:1085 +#: scan.l:1086 #, c-format msgid "%s at or near \"%s\"" msgstr "%s sur ou près de « %s »" -#: scan.l:1251 scan.l:1283 +#: scan.l:1252 scan.l:1284 msgid "Unicode escape values cannot be used for code point values above 007F when the server encoding is not UTF8" msgstr "" "Les valeurs d'échappement unicode ne peuvent pas être utilisées pour les\n" "valeurs de point de code au-dessus de 007F quand l'encodage serveur n'est\n" "pas UTF8" -#: scan.l:1279 scan.l:1424 +#: scan.l:1280 scan.l:1425 msgid "invalid Unicode escape value" msgstr "valeur d'échappement Unicode invalide" -#: scan.l:1488 +#: scan.l:1489 #, c-format msgid "nonstandard use of \\' in a string literal" msgstr "utilisation non standard de \\' dans une chaîne littérale" -#: scan.l:1489 +#: scan.l:1490 #, c-format msgid "Use '' to write quotes in strings, or use the escape string syntax (E'...')." msgstr "" "Utilisez '' pour écrire des guillemets dans une chaîne ou utilisez la syntaxe de\n" "chaîne d'échappement (E'...')." -#: scan.l:1498 +#: scan.l:1499 #, c-format msgid "nonstandard use of \\\\ in a string literal" msgstr "utilisation non standard de \\\\ dans une chaîne littérale" -#: scan.l:1499 +#: scan.l:1500 #, c-format msgid "Use the escape string syntax for backslashes, e.g., E'\\\\'." msgstr "Utilisez la syntaxe de chaîne d'échappement pour les antislashs, c'est-à-dire E'\\\\'." -#: scan.l:1513 +#: scan.l:1514 #, c-format msgid "nonstandard use of escape in a string literal" msgstr "utilisation non standard d'un échappement dans une chaîne littérale" -#: scan.l:1514 +#: scan.l:1515 #, c-format msgid "Use the escape string syntax for escapes, e.g., E'\\r\\n'." msgstr "" @@ -18304,7 +18374,7 @@ msgstr "tableau d'éléments de longueur zéro invalide dans MVDependencies" msgid "cannot accept a value of type %s" msgstr "ne peut pas accepter une valeur de type %s" -#: statistics/extended_stats.c:102 +#: statistics/extended_stats.c:103 #, c-format msgid "statistics object \"%s.%s\" could not be computed for relation \"%s.%s\"" msgstr "l'objet de statistiques « %s.%s » n'a pas pu être calculé pour la relation « %s.%s »" @@ -18312,7 +18382,7 @@ msgstr "l'objet de statistiques « %s.%s » n'a pas pu être calculé pour la re #: statistics/mvdistinct.c:259 #, c-format msgid "invalid ndistinct magic %08x (expected %08x)" -msgstr "" +msgstr "nombre magique ndistinct invalide %08x (attendu %08x)" #: statistics/mvdistinct.c:264 #, c-format @@ -18327,7 +18397,7 @@ msgstr "tableau d'élément de longueur zéro invalide dans MVNDistinct" #: statistics/mvdistinct.c:278 #, c-format msgid "invalid MVNDistinct size %zd (expected at least %zd)" -msgstr "" +msgstr "taille MVNDistinct %zd invalide (attendue au moins %zd)" #: storage/buffer/bufmgr.c:544 storage/buffer/bufmgr.c:657 #, c-format @@ -18368,7 +18438,7 @@ msgstr "Échecs multiples --- l'erreur d'écriture pourrait être permanent." msgid "writing block %u of relation %s" msgstr "écriture du bloc %u de la relation %s" -#: storage/buffer/bufmgr.c:4356 +#: storage/buffer/bufmgr.c:4358 #, c-format msgid "snapshot too old" msgstr "snapshot trop ancien" @@ -18383,72 +18453,72 @@ msgstr "aucun tampon local vide disponible" msgid "cannot access temporary tables during a parallel operation" msgstr "ne peut pas accéder à des tables temporaires pendant une opération parallèle" -#: storage/file/fd.c:443 storage/file/fd.c:515 storage/file/fd.c:551 +#: storage/file/fd.c:442 storage/file/fd.c:514 storage/file/fd.c:550 #, c-format msgid "could not flush dirty data: %m" msgstr "n'a pas pu vider les données modifiées : %m" -#: storage/file/fd.c:473 +#: storage/file/fd.c:472 #, c-format msgid "could not determine dirty data size: %m" msgstr "n'a pas pu déterminer la taille des données modifiées : %m" -#: storage/file/fd.c:525 +#: storage/file/fd.c:524 #, c-format msgid "could not munmap() while flushing data: %m" msgstr "n'a pas exécuter munmap() durant la synchronisation des données : %m" -#: storage/file/fd.c:726 +#: storage/file/fd.c:725 #, c-format msgid "could not link file \"%s\" to \"%s\": %m" msgstr "n'a pas pu lier le fichier « %s » à « %s » : %m" -#: storage/file/fd.c:820 +#: storage/file/fd.c:819 #, c-format msgid "getrlimit failed: %m" msgstr "échec de getrlimit : %m" -#: storage/file/fd.c:910 +#: storage/file/fd.c:909 #, c-format msgid "insufficient file descriptors available to start server process" msgstr "nombre de descripteurs de fichier insuffisants pour lancer le processus serveur" -#: storage/file/fd.c:911 +#: storage/file/fd.c:910 #, c-format msgid "System allows %d, we need at least %d." msgstr "Le système autorise %d, nous avons besoin d'au moins %d." -#: storage/file/fd.c:952 storage/file/fd.c:2129 storage/file/fd.c:2222 storage/file/fd.c:2370 +#: storage/file/fd.c:951 storage/file/fd.c:2134 storage/file/fd.c:2227 storage/file/fd.c:2379 #, c-format msgid "out of file descriptors: %m; release and retry" msgstr "plus de descripteurs de fichiers : %m; quittez et ré-essayez" -#: storage/file/fd.c:1557 +#: storage/file/fd.c:1562 #, c-format msgid "temporary file: path \"%s\", size %lu" msgstr "fichier temporaire : chemin « %s », taille %lu" -#: storage/file/fd.c:1760 +#: storage/file/fd.c:1765 #, c-format msgid "temporary file size exceeds temp_file_limit (%dkB)" msgstr "la taille du fichier temporaire dépasse temp_file_limit (%d Ko)" -#: storage/file/fd.c:2105 storage/file/fd.c:2155 +#: storage/file/fd.c:2110 storage/file/fd.c:2160 #, c-format msgid "exceeded maxAllocatedDescs (%d) while trying to open file \"%s\"" msgstr "dépassement de maxAllocatedDescs (%d) lors de la tentative d'ouverture du fichier « %s »" -#: storage/file/fd.c:2195 +#: storage/file/fd.c:2200 #, c-format msgid "exceeded maxAllocatedDescs (%d) while trying to execute command \"%s\"" msgstr "dépassement de maxAllocatedDescs (%d) lors de la tentative d'exécution de la commande « %s »" -#: storage/file/fd.c:2346 +#: storage/file/fd.c:2355 #, c-format msgid "exceeded maxAllocatedDescs (%d) while trying to open directory \"%s\"" msgstr "dépassement de maxAllocatedDescs (%d) lors de la tentative d'ouverture du répertoire « %s »" -#: storage/file/fd.c:2432 utils/adt/genfile.c:511 +#: storage/file/fd.c:2446 utils/adt/genfile.c:511 #, c-format msgid "could not read directory \"%s\": %m" msgstr "n'a pas pu lire le répertoire « %s » : %m" @@ -18478,67 +18548,67 @@ msgstr "le segment contrôle de mémoire partagée dynamique n'est pas valide" msgid "too many dynamic shared memory segments" msgstr "trop de segments de mémoire partagée dynamique" -#: storage/ipc/dsm_impl.c:261 storage/ipc/dsm_impl.c:361 storage/ipc/dsm_impl.c:533 storage/ipc/dsm_impl.c:648 storage/ipc/dsm_impl.c:819 storage/ipc/dsm_impl.c:963 +#: storage/ipc/dsm_impl.c:262 storage/ipc/dsm_impl.c:363 storage/ipc/dsm_impl.c:580 storage/ipc/dsm_impl.c:695 storage/ipc/dsm_impl.c:866 storage/ipc/dsm_impl.c:1010 #, c-format msgid "could not unmap shared memory segment \"%s\": %m" msgstr "n'a pas pu annuler le mappage du segment de mémoire partagée « %s » : %m" -#: storage/ipc/dsm_impl.c:271 storage/ipc/dsm_impl.c:543 storage/ipc/dsm_impl.c:658 storage/ipc/dsm_impl.c:829 +#: storage/ipc/dsm_impl.c:272 storage/ipc/dsm_impl.c:590 storage/ipc/dsm_impl.c:705 storage/ipc/dsm_impl.c:876 #, c-format msgid "could not remove shared memory segment \"%s\": %m" msgstr "n'a pas pu supprimer le segment de mémoire partagée « %s » : %m" -#: storage/ipc/dsm_impl.c:292 storage/ipc/dsm_impl.c:729 storage/ipc/dsm_impl.c:843 +#: storage/ipc/dsm_impl.c:293 storage/ipc/dsm_impl.c:776 storage/ipc/dsm_impl.c:890 #, c-format msgid "could not open shared memory segment \"%s\": %m" msgstr "n'a pas pu ouvrir le segment de mémoire partagée « %s » : %m" -#: storage/ipc/dsm_impl.c:316 storage/ipc/dsm_impl.c:559 storage/ipc/dsm_impl.c:774 storage/ipc/dsm_impl.c:867 +#: storage/ipc/dsm_impl.c:317 storage/ipc/dsm_impl.c:606 storage/ipc/dsm_impl.c:821 storage/ipc/dsm_impl.c:914 #, c-format msgid "could not stat shared memory segment \"%s\": %m" msgstr "n'a pas pu obtenir des informations sur le segment de mémoire partagée « %s » : %m" -#: storage/ipc/dsm_impl.c:335 storage/ipc/dsm_impl.c:886 storage/ipc/dsm_impl.c:936 +#: storage/ipc/dsm_impl.c:337 storage/ipc/dsm_impl.c:933 storage/ipc/dsm_impl.c:983 #, c-format msgid "could not resize shared memory segment \"%s\" to %zu bytes: %m" msgstr "n'a pas pu retailler le segment de mémoire partagée « %s » en %zu octets : %m" -#: storage/ipc/dsm_impl.c:385 storage/ipc/dsm_impl.c:580 storage/ipc/dsm_impl.c:750 storage/ipc/dsm_impl.c:987 +#: storage/ipc/dsm_impl.c:387 storage/ipc/dsm_impl.c:627 storage/ipc/dsm_impl.c:797 storage/ipc/dsm_impl.c:1034 #, c-format msgid "could not map shared memory segment \"%s\": %m" msgstr "n'a pas pu mapper le segment de mémoire partagée « %s » : %m" -#: storage/ipc/dsm_impl.c:515 +#: storage/ipc/dsm_impl.c:562 #, c-format msgid "could not get shared memory segment: %m" msgstr "n'a pas pu obtenir le segment de mémoire partagée : %m" -#: storage/ipc/dsm_impl.c:714 +#: storage/ipc/dsm_impl.c:761 #, c-format msgid "could not create shared memory segment \"%s\": %m" msgstr "n'a pas pu créer le segment de mémoire partagée « %s » : %m" -#: storage/ipc/dsm_impl.c:1029 storage/ipc/dsm_impl.c:1077 +#: storage/ipc/dsm_impl.c:1076 storage/ipc/dsm_impl.c:1124 #, c-format msgid "could not duplicate handle for \"%s\": %m" msgstr "n'a pas pu dupliquer le lien pour « %s » : %m" -#: storage/ipc/latch.c:828 +#: storage/ipc/latch.c:829 #, c-format msgid "epoll_ctl() failed: %m" msgstr "échec de epoll_ctl() : %m" -#: storage/ipc/latch.c:1057 +#: storage/ipc/latch.c:1060 #, c-format msgid "epoll_wait() failed: %m" msgstr "échec de epoll_wait() : %m" -#: storage/ipc/latch.c:1179 +#: storage/ipc/latch.c:1182 #, c-format msgid "poll() failed: %m" msgstr "échec de poll() : %m" -#: storage/ipc/shm_toc.c:108 storage/ipc/shm_toc.c:190 storage/lmgr/lock.c:883 storage/lmgr/lock.c:917 storage/lmgr/lock.c:2679 storage/lmgr/lock.c:4004 storage/lmgr/lock.c:4069 storage/lmgr/lock.c:4361 storage/lmgr/predicate.c:2399 storage/lmgr/predicate.c:2414 storage/lmgr/predicate.c:3806 storage/lmgr/predicate.c:4949 utils/hash/dynahash.c:1061 +#: storage/ipc/shm_toc.c:108 storage/ipc/shm_toc.c:190 storage/lmgr/lock.c:883 storage/lmgr/lock.c:917 storage/lmgr/lock.c:2679 storage/lmgr/lock.c:4004 storage/lmgr/lock.c:4069 storage/lmgr/lock.c:4361 storage/lmgr/predicate.c:2401 storage/lmgr/predicate.c:2416 storage/lmgr/predicate.c:3808 storage/lmgr/predicate.c:4951 utils/hash/dynahash.c:1061 #, c-format msgid "out of shared memory" msgstr "mémoire partagée épuisée" @@ -18568,7 +18638,7 @@ msgstr "pas assez de mémoire partagée pour la structure de données « %s » ( msgid "requested shared memory size overflows size_t" msgstr "la taille de la mémoire partagée demandée dépasse size_t" -#: storage/ipc/standby.c:531 tcop/postgres.c:2985 +#: storage/ipc/standby.c:531 tcop/postgres.c:2983 #, c-format msgid "canceling statement due to conflict with recovery" msgstr "annulation de la requête à cause d'un conflit avec la restauration" @@ -18618,97 +18688,97 @@ msgstr "Bloquage mortel détecté" msgid "See server log for query details." msgstr "Voir les journaux applicatifs du serveur pour les détails sur la requête." -#: storage/lmgr/lmgr.c:719 +#: storage/lmgr/lmgr.c:745 #, c-format msgid "while updating tuple (%u,%u) in relation \"%s\"" msgstr "lors de la mise à jour de la ligne (%u,%u) dans la relation « %s »" -#: storage/lmgr/lmgr.c:722 +#: storage/lmgr/lmgr.c:748 #, c-format msgid "while deleting tuple (%u,%u) in relation \"%s\"" msgstr "lors de la suppression de la ligne (%u,%u) dans la relation « %s »" -#: storage/lmgr/lmgr.c:725 +#: storage/lmgr/lmgr.c:751 #, c-format msgid "while locking tuple (%u,%u) in relation \"%s\"" msgstr "lors du verrouillage de la ligne (%u,%u) dans la relation « %s »" -#: storage/lmgr/lmgr.c:728 +#: storage/lmgr/lmgr.c:754 #, c-format msgid "while locking updated version (%u,%u) of tuple in relation \"%s\"" msgstr "lors du verrou de la version mise à jour (%u, %u) de la ligne de la relation « %s »" -#: storage/lmgr/lmgr.c:731 +#: storage/lmgr/lmgr.c:757 #, c-format msgid "while inserting index tuple (%u,%u) in relation \"%s\"" msgstr "lors de l'insertion de l'enregistrement (%u, %u) de l'index dans la relation « %s »" -#: storage/lmgr/lmgr.c:734 +#: storage/lmgr/lmgr.c:760 #, c-format msgid "while checking uniqueness of tuple (%u,%u) in relation \"%s\"" msgstr "lors de la vérification de l'unicité de l'enregistrement (%u,%u) dans la relation « %s »" -#: storage/lmgr/lmgr.c:737 +#: storage/lmgr/lmgr.c:763 #, c-format msgid "while rechecking updated tuple (%u,%u) in relation \"%s\"" msgstr "lors de la re-vérification de l'enregistrement mis à jour (%u,%u) dans la relation « %s »" -#: storage/lmgr/lmgr.c:740 +#: storage/lmgr/lmgr.c:766 #, c-format msgid "while checking exclusion constraint on tuple (%u,%u) in relation \"%s\"" msgstr "lors de la vérification de la contrainte d'exclusion sur l'enregistrement (%u,%u) dans la relation « %s »" -#: storage/lmgr/lmgr.c:960 +#: storage/lmgr/lmgr.c:986 #, c-format msgid "relation %u of database %u" msgstr "relation %u de la base de données %u" -#: storage/lmgr/lmgr.c:966 +#: storage/lmgr/lmgr.c:992 #, c-format msgid "extension of relation %u of database %u" msgstr "extension de la relation %u de la base de données %u" -#: storage/lmgr/lmgr.c:972 +#: storage/lmgr/lmgr.c:998 #, c-format msgid "page %u of relation %u of database %u" msgstr "page %u de la relation %u de la base de données %u" -#: storage/lmgr/lmgr.c:979 +#: storage/lmgr/lmgr.c:1005 #, c-format msgid "tuple (%u,%u) of relation %u of database %u" msgstr "ligne (%u,%u) de la relation %u de la base de données %u" -#: storage/lmgr/lmgr.c:987 +#: storage/lmgr/lmgr.c:1013 #, c-format msgid "transaction %u" msgstr "transaction %u" -#: storage/lmgr/lmgr.c:992 +#: storage/lmgr/lmgr.c:1018 #, c-format msgid "virtual transaction %d/%u" msgstr "transaction virtuelle %d/%u" -#: storage/lmgr/lmgr.c:998 +#: storage/lmgr/lmgr.c:1024 #, c-format msgid "speculative token %u of transaction %u" msgstr "jeton spéculatif %u de la transaction %u" -#: storage/lmgr/lmgr.c:1004 +#: storage/lmgr/lmgr.c:1030 #, c-format msgid "object %u of class %u of database %u" msgstr "objet %u de la classe %u de la base de données %u" -#: storage/lmgr/lmgr.c:1012 +#: storage/lmgr/lmgr.c:1038 #, c-format msgid "user lock [%u,%u,%u]" msgstr "verrou utilisateur [%u,%u,%u]" -#: storage/lmgr/lmgr.c:1019 +#: storage/lmgr/lmgr.c:1045 #, c-format msgid "advisory lock [%u,%u,%u,%u]" msgstr "verrou informatif [%u,%u,%u,%u]" -#: storage/lmgr/lmgr.c:1027 +#: storage/lmgr/lmgr.c:1053 #, c-format msgid "unrecognized locktag type %d" msgstr "type locktag non reconnu %d" @@ -18737,80 +18807,80 @@ msgstr "Vous pourriez avoir besoin d'augmenter max_locks_per_transaction." msgid "cannot PREPARE while holding both session-level and transaction-level locks on the same object" msgstr "ne peut pas utiliser PREPARE lorsque des verrous de niveau session et deniveau transaction sont détenus sur le même objet" -#: storage/lmgr/predicate.c:684 +#: storage/lmgr/predicate.c:686 #, c-format msgid "not enough elements in RWConflictPool to record a read/write conflict" msgstr "pas assez d'éléments dans RWConflictPool pour enregistrer un conflit en lecture/écriture" -#: storage/lmgr/predicate.c:685 storage/lmgr/predicate.c:713 +#: storage/lmgr/predicate.c:687 storage/lmgr/predicate.c:715 #, c-format msgid "You might need to run fewer transactions at a time or increase max_connections." msgstr "" "Il est possible que vous ayez à exécuter moins de transactions à la fois\n" "ou d'augmenter max_connections." -#: storage/lmgr/predicate.c:712 +#: storage/lmgr/predicate.c:714 #, c-format msgid "not enough elements in RWConflictPool to record a potential read/write conflict" msgstr "pas assez d'éléments dans RWConflictPool pour enregistrer un conflit en lecture/écriture potentiel" -#: storage/lmgr/predicate.c:919 +#: storage/lmgr/predicate.c:921 #, c-format msgid "memory for serializable conflict tracking is nearly exhausted" msgstr "la mémoire pour tracer les conflits sérialisables est pratiquement pleine" -#: storage/lmgr/predicate.c:920 +#: storage/lmgr/predicate.c:922 #, c-format msgid "There might be an idle transaction or a forgotten prepared transaction causing this." msgstr "" "Il pourait y avoir une transaction en attente ou une transaction préparée\n" "oubliée causant cela." -#: storage/lmgr/predicate.c:1559 +#: storage/lmgr/predicate.c:1561 #, c-format msgid "deferrable snapshot was unsafe; trying a new one" msgstr "l'image déferrable est non sûre ; tentative avec une nouvelle image" -#: storage/lmgr/predicate.c:1648 +#: storage/lmgr/predicate.c:1650 #, c-format msgid "\"default_transaction_isolation\" is set to \"serializable\"." msgstr "« default_transaction_isolation » est configuré à « serializable »." -#: storage/lmgr/predicate.c:1649 +#: storage/lmgr/predicate.c:1651 #, c-format msgid "You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default." msgstr "" "Vous pouvez utiliser « SET default_transaction_isolation = 'repeatable read' »\n" "pour modifier la valeur par défaut." -#: storage/lmgr/predicate.c:1689 +#: storage/lmgr/predicate.c:1691 #, c-format msgid "a snapshot-importing transaction must not be READ ONLY DEFERRABLE" msgstr "une transaction important un snapshot ne doit pas être READ ONLY DEFERRABLE" -#: storage/lmgr/predicate.c:1769 utils/time/snapmgr.c:621 utils/time/snapmgr.c:627 +#: storage/lmgr/predicate.c:1771 utils/time/snapmgr.c:621 utils/time/snapmgr.c:627 #, c-format msgid "could not import the requested snapshot" msgstr "n'a pas pu importer le snapshot demandé" -#: storage/lmgr/predicate.c:1770 utils/time/snapmgr.c:628 +#: storage/lmgr/predicate.c:1772 utils/time/snapmgr.c:628 #, c-format -msgid "The source process with pid %d is not running anymore." +msgid "The source process with PID %d is not running anymore." msgstr "Le processus source de PID %d n'est plus en cours d'exécution." -#: storage/lmgr/predicate.c:2400 storage/lmgr/predicate.c:2415 storage/lmgr/predicate.c:3807 +#: storage/lmgr/predicate.c:2402 storage/lmgr/predicate.c:2417 storage/lmgr/predicate.c:3809 #, c-format msgid "You might need to increase max_pred_locks_per_transaction." msgstr "Vous pourriez avoir besoin d'augmenter max_pred_locks_per_transaction." -#: storage/lmgr/predicate.c:3961 storage/lmgr/predicate.c:4050 storage/lmgr/predicate.c:4058 storage/lmgr/predicate.c:4097 storage/lmgr/predicate.c:4336 storage/lmgr/predicate.c:4673 storage/lmgr/predicate.c:4685 storage/lmgr/predicate.c:4727 storage/lmgr/predicate.c:4765 +#: storage/lmgr/predicate.c:3963 storage/lmgr/predicate.c:4052 storage/lmgr/predicate.c:4060 storage/lmgr/predicate.c:4099 storage/lmgr/predicate.c:4338 storage/lmgr/predicate.c:4675 storage/lmgr/predicate.c:4687 storage/lmgr/predicate.c:4729 storage/lmgr/predicate.c:4767 #, c-format msgid "could not serialize access due to read/write dependencies among transactions" msgstr "" "n'a pas pu sérialiser un accès à cause des dépendances de lecture/écriture\n" "parmi les transactions" -#: storage/lmgr/predicate.c:3963 storage/lmgr/predicate.c:4052 storage/lmgr/predicate.c:4060 storage/lmgr/predicate.c:4099 storage/lmgr/predicate.c:4338 storage/lmgr/predicate.c:4675 storage/lmgr/predicate.c:4687 storage/lmgr/predicate.c:4729 storage/lmgr/predicate.c:4767 +#: storage/lmgr/predicate.c:3965 storage/lmgr/predicate.c:4054 storage/lmgr/predicate.c:4062 storage/lmgr/predicate.c:4101 storage/lmgr/predicate.c:4340 storage/lmgr/predicate.c:4677 storage/lmgr/predicate.c:4689 storage/lmgr/predicate.c:4731 storage/lmgr/predicate.c:4769 #, c-format msgid "The transaction might succeed if retried." msgstr "La transaction pourrait réussir après une nouvelle tentative." @@ -19021,7 +19091,7 @@ msgstr "format des données binaires incorrect dans l'argument de la fonction %d msgid "unexpected EOF on client connection" msgstr "fin de fichier (EOF) inattendue de la connexion du client" -#: tcop/postgres.c:432 tcop/postgres.c:444 tcop/postgres.c:455 tcop/postgres.c:467 tcop/postgres.c:4316 +#: tcop/postgres.c:432 tcop/postgres.c:444 tcop/postgres.c:455 tcop/postgres.c:467 tcop/postgres.c:4314 #, c-format msgid "invalid frontend message type %d" msgstr "type %d du message de l'interface invalide" @@ -19192,10 +19262,9 @@ msgid "terminating autovacuum process due to administrator command" msgstr "arrêt du processus autovacuum suite à la demande de l'administrateur" #: tcop/postgres.c:2851 -#, fuzzy, c-format -#| msgid "terminating connection due to administrator command" +#, c-format msgid "terminating logical replication worker due to administrator command" -msgstr "arrêt des connexions suite à la demande de l'administrateur" +msgstr "arrêt des processus workers de réplication logique suite à la demande de l'administrateur" #: tcop/postgres.c:2855 #, c-format @@ -19217,37 +19286,37 @@ msgstr "arrêt des connexions suite à la demande de l'administrateur" msgid "connection to client lost" msgstr "connexion au client perdue" -#: tcop/postgres.c:2962 +#: tcop/postgres.c:2960 #, c-format msgid "canceling statement due to lock timeout" msgstr "annulation de la requête à cause du délai écoulé pour l'obtention des verrous" -#: tcop/postgres.c:2969 +#: tcop/postgres.c:2967 #, c-format msgid "canceling statement due to statement timeout" msgstr "annulation de la requête à cause du délai écoulé pour l'exécution de l'instruction" -#: tcop/postgres.c:2976 +#: tcop/postgres.c:2974 #, c-format msgid "canceling autovacuum task" msgstr "annulation de la tâche d'autovacuum" -#: tcop/postgres.c:2999 +#: tcop/postgres.c:2997 #, c-format msgid "canceling statement due to user request" msgstr "annulation de la requête à la demande de l'utilisateur" -#: tcop/postgres.c:3009 +#: tcop/postgres.c:3007 #, c-format msgid "terminating connection due to idle-in-transaction timeout" msgstr "arrêt des connexions suite à l'expiration du délai d'inactivité en transaction" -#: tcop/postgres.c:3123 +#: tcop/postgres.c:3121 #, c-format msgid "stack depth limit exceeded" msgstr "dépassement de limite (en profondeur) de la pile" -#: tcop/postgres.c:3124 +#: tcop/postgres.c:3122 #, c-format msgid "Increase the configuration parameter \"max_stack_depth\" (currently %dkB), after ensuring the platform's stack depth limit is adequate." msgstr "" @@ -19255,59 +19324,59 @@ msgstr "" "être assuré que la limite de profondeur de la pile de la plateforme est\n" "adéquate." -#: tcop/postgres.c:3187 +#: tcop/postgres.c:3185 #, c-format msgid "\"max_stack_depth\" must not exceed %ldkB." msgstr "« max_stack_depth » ne doit pas dépasser %ld Ko." -#: tcop/postgres.c:3189 +#: tcop/postgres.c:3187 #, c-format msgid "Increase the platform's stack depth limit via \"ulimit -s\" or local equivalent." msgstr "" "Augmenter la limite de profondeur de la pile sur votre plateforme via\n" "« ulimit -s » ou l'équivalent local." -#: tcop/postgres.c:3549 +#: tcop/postgres.c:3547 #, c-format msgid "invalid command-line argument for server process: %s" msgstr "argument invalide en ligne de commande pour le processus serveur : %s" -#: tcop/postgres.c:3550 tcop/postgres.c:3556 +#: tcop/postgres.c:3548 tcop/postgres.c:3554 #, c-format msgid "Try \"%s --help\" for more information." msgstr "Essayez « %s --help » pour plus d'informations." -#: tcop/postgres.c:3554 +#: tcop/postgres.c:3552 #, c-format msgid "%s: invalid command-line argument: %s" msgstr "%s : argument invalide en ligne de commande : %s" -#: tcop/postgres.c:3616 +#: tcop/postgres.c:3614 #, c-format msgid "%s: no database nor user name specified" msgstr "%s : aucune base de données et aucun utilisateur spécifiés" -#: tcop/postgres.c:4224 +#: tcop/postgres.c:4222 #, c-format msgid "invalid CLOSE message subtype %d" msgstr "sous-type %d du message CLOSE invalide" -#: tcop/postgres.c:4259 +#: tcop/postgres.c:4257 #, c-format msgid "invalid DESCRIBE message subtype %d" msgstr "sous-type %d du message DESCRIBE invalide" -#: tcop/postgres.c:4337 +#: tcop/postgres.c:4335 #, c-format msgid "fastpath function calls not supported in a replication connection" msgstr "appels à la fonction fastpath non supportés dans une connexion de réplication" -#: tcop/postgres.c:4341 +#: tcop/postgres.c:4339 #, c-format msgid "extended query protocol not supported in a replication connection" msgstr "protocole étendu de requêtes non supporté dans une connexion de réplication" -#: tcop/postgres.c:4511 +#: tcop/postgres.c:4509 #, c-format msgid "disconnection: session time: %d:%02d:%02d.%03d user=%s database=%s host=%s%s%s" msgstr "" @@ -19674,42 +19743,42 @@ msgstr "les tableaux d'ACL ne doivent pas contenir de valeurs NULL" msgid "extra garbage at the end of the ACL specification" msgstr "données superflues à la fin de la spécification de l'ACL" -#: utils/adt/acl.c:1196 +#: utils/adt/acl.c:1198 #, c-format msgid "grant options cannot be granted back to your own grantor" msgstr "les options grant ne peuvent pas être rendues à votre propre donateur" -#: utils/adt/acl.c:1257 +#: utils/adt/acl.c:1259 #, c-format msgid "dependent privileges exist" msgstr "des privilèges dépendants existent" -#: utils/adt/acl.c:1258 +#: utils/adt/acl.c:1260 #, c-format msgid "Use CASCADE to revoke them too." msgstr "Utilisez CASCADE pour les révoquer aussi." -#: utils/adt/acl.c:1520 +#: utils/adt/acl.c:1522 #, c-format msgid "aclinsert is no longer supported" msgstr "aclinsert n'est plus supporté" -#: utils/adt/acl.c:1530 +#: utils/adt/acl.c:1532 #, c-format msgid "aclremove is no longer supported" msgstr "aclremove n'est plus supporté" -#: utils/adt/acl.c:1616 utils/adt/acl.c:1670 +#: utils/adt/acl.c:1618 utils/adt/acl.c:1672 #, c-format msgid "unrecognized privilege type: \"%s\"" msgstr "type de droit non reconnu : « %s »" -#: utils/adt/acl.c:3410 utils/adt/regproc.c:102 utils/adt/regproc.c:277 +#: utils/adt/acl.c:3415 utils/adt/regproc.c:102 utils/adt/regproc.c:277 #, c-format msgid "function \"%s\" does not exist" msgstr "la fonction « %s » n'existe pas" -#: utils/adt/acl.c:4864 +#: utils/adt/acl.c:4869 #, c-format msgid "must be member of role \"%s\"" msgstr "doit être un membre du rôle « %s »" @@ -19719,7 +19788,7 @@ msgstr "doit être un membre du rôle « %s »" msgid "array size exceeds the maximum allowed (%d)" msgstr "la taille du tableau dépasse le maximum permis (%d)" -#: utils/adt/array_userfuncs.c:79 utils/adt/array_userfuncs.c:471 utils/adt/array_userfuncs.c:551 utils/adt/json.c:1764 utils/adt/json.c:1859 utils/adt/json.c:1897 utils/adt/jsonb.c:1127 utils/adt/jsonb.c:1156 utils/adt/jsonb.c:1592 utils/adt/jsonb.c:1756 utils/adt/jsonb.c:1766 +#: utils/adt/array_userfuncs.c:79 utils/adt/array_userfuncs.c:471 utils/adt/array_userfuncs.c:551 utils/adt/json.c:1765 utils/adt/json.c:1860 utils/adt/json.c:1898 utils/adt/jsonb.c:1128 utils/adt/jsonb.c:1157 utils/adt/jsonb.c:1549 utils/adt/jsonb.c:1713 utils/adt/jsonb.c:1723 #, c-format msgid "could not determine input data type" msgstr "n'a pas pu déterminer le type de données date en entrée" @@ -19730,7 +19799,7 @@ msgid "input data type is not an array" msgstr "le type de données en entrée n'est pas un tableau" #: utils/adt/array_userfuncs.c:132 utils/adt/array_userfuncs.c:186 utils/adt/arrayfuncs.c:1322 utils/adt/float.c:1228 utils/adt/float.c:1287 utils/adt/float.c:3556 utils/adt/float.c:3572 utils/adt/int.c:608 utils/adt/int.c:637 utils/adt/int.c:658 utils/adt/int.c:689 utils/adt/int.c:722 utils/adt/int.c:744 utils/adt/int.c:892 utils/adt/int.c:913 utils/adt/int.c:940 utils/adt/int.c:980 utils/adt/int.c:1001 utils/adt/int.c:1028 -#: utils/adt/int.c:1061 utils/adt/int.c:1144 utils/adt/int8.c:1298 utils/adt/numeric.c:2953 utils/adt/numeric.c:2962 utils/adt/varbit.c:1173 utils/adt/varbit.c:1575 utils/adt/varlena.c:1054 utils/adt/varlena.c:2953 +#: utils/adt/int.c:1061 utils/adt/int.c:1144 utils/adt/int8.c:1298 utils/adt/numeric.c:2953 utils/adt/numeric.c:2962 utils/adt/varbit.c:1173 utils/adt/varbit.c:1575 utils/adt/varlena.c:1054 utils/adt/varlena.c:2957 #, c-format msgid "integer out of range" msgstr "entier en dehors des limites" @@ -19893,7 +19962,7 @@ msgstr "aucune fonction de sortie binaire disponible pour le type %s" msgid "slices of fixed-length arrays not implemented" msgstr "les morceaux des tableaux à longueur fixe ne sont pas implémentés" -#: utils/adt/arrayfuncs.c:2230 utils/adt/arrayfuncs.c:2252 utils/adt/arrayfuncs.c:2301 utils/adt/arrayfuncs.c:2537 utils/adt/arrayfuncs.c:2848 utils/adt/arrayfuncs.c:5738 utils/adt/arrayfuncs.c:5764 utils/adt/arrayfuncs.c:5775 utils/adt/json.c:2295 utils/adt/json.c:2370 utils/adt/jsonb.c:1370 utils/adt/jsonb.c:1456 utils/adt/jsonfuncs.c:4141 utils/adt/jsonfuncs.c:4292 utils/adt/jsonfuncs.c:4337 utils/adt/jsonfuncs.c:4384 +#: utils/adt/arrayfuncs.c:2230 utils/adt/arrayfuncs.c:2252 utils/adt/arrayfuncs.c:2301 utils/adt/arrayfuncs.c:2537 utils/adt/arrayfuncs.c:2848 utils/adt/arrayfuncs.c:5738 utils/adt/arrayfuncs.c:5764 utils/adt/arrayfuncs.c:5775 utils/adt/json.c:2259 utils/adt/json.c:2334 utils/adt/jsonb.c:1327 utils/adt/jsonb.c:1413 utils/adt/jsonfuncs.c:4158 utils/adt/jsonfuncs.c:4309 utils/adt/jsonfuncs.c:4354 utils/adt/jsonfuncs.c:4401 #, c-format msgid "wrong number of array subscripts" msgstr "mauvais nombre d'indices du tableau" @@ -20022,8 +20091,8 @@ msgstr "la conversion de l'encodage de %s vers l'ASCII n'est pas supportée" #. translator: first %s is inet or cidr #: utils/adt/bool.c:153 utils/adt/cash.c:278 utils/adt/datetime.c:3799 utils/adt/float.c:244 utils/adt/float.c:318 utils/adt/float.c:342 utils/adt/float.c:461 utils/adt/float.c:544 utils/adt/float.c:570 utils/adt/geo_ops.c:156 utils/adt/geo_ops.c:166 utils/adt/geo_ops.c:178 utils/adt/geo_ops.c:210 utils/adt/geo_ops.c:255 utils/adt/geo_ops.c:265 utils/adt/geo_ops.c:935 utils/adt/geo_ops.c:1321 utils/adt/geo_ops.c:1356 utils/adt/geo_ops.c:1364 -#: utils/adt/geo_ops.c:3430 utils/adt/geo_ops.c:4563 utils/adt/geo_ops.c:4579 utils/adt/geo_ops.c:4586 utils/adt/mac.c:94 utils/adt/mac8.c:93 utils/adt/mac8.c:166 utils/adt/mac8.c:184 utils/adt/mac8.c:202 utils/adt/mac8.c:221 utils/adt/nabstime.c:1539 utils/adt/network.c:58 utils/adt/numeric.c:593 utils/adt/numeric.c:620 utils/adt/numeric.c:5488 utils/adt/numeric.c:5512 utils/adt/numeric.c:5536 utils/adt/numeric.c:6338 -#: utils/adt/numeric.c:6364 utils/adt/oid.c:44 utils/adt/oid.c:58 utils/adt/oid.c:64 utils/adt/oid.c:86 utils/adt/pg_lsn.c:44 utils/adt/pg_lsn.c:50 utils/adt/tid.c:72 utils/adt/tid.c:80 utils/adt/tid.c:88 utils/adt/txid.c:405 utils/adt/uuid.c:136 +#: utils/adt/geo_ops.c:3430 utils/adt/geo_ops.c:4563 utils/adt/geo_ops.c:4579 utils/adt/geo_ops.c:4586 utils/adt/mac.c:94 utils/adt/mac8.c:93 utils/adt/mac8.c:166 utils/adt/mac8.c:184 utils/adt/mac8.c:202 utils/adt/mac8.c:221 utils/adt/nabstime.c:1539 utils/adt/network.c:58 utils/adt/numeric.c:593 utils/adt/numeric.c:620 utils/adt/numeric.c:5498 utils/adt/numeric.c:5522 utils/adt/numeric.c:5546 utils/adt/numeric.c:6348 +#: utils/adt/numeric.c:6374 utils/adt/oid.c:44 utils/adt/oid.c:58 utils/adt/oid.c:64 utils/adt/oid.c:86 utils/adt/pg_lsn.c:44 utils/adt/pg_lsn.c:50 utils/adt/tid.c:72 utils/adt/tid.c:80 utils/adt/tid.c:88 utils/adt/txid.c:405 utils/adt/uuid.c:136 #, c-format msgid "invalid input syntax for type %s: \"%s\"" msgstr "syntaxe en entrée invalide pour le type %s : « %s »" @@ -20034,7 +20103,7 @@ msgid "value \"%s\" is out of range for type %s" msgstr "la valeur « %s » est en dehors des limites pour le type %s" #: utils/adt/cash.c:653 utils/adt/cash.c:703 utils/adt/cash.c:754 utils/adt/cash.c:803 utils/adt/cash.c:855 utils/adt/cash.c:905 utils/adt/float.c:855 utils/adt/float.c:919 utils/adt/float.c:3315 utils/adt/float.c:3378 utils/adt/geo_ops.c:4093 utils/adt/int.c:704 utils/adt/int.c:846 utils/adt/int.c:954 utils/adt/int.c:1043 utils/adt/int.c:1082 utils/adt/int.c:1110 utils/adt/int8.c:597 utils/adt/int8.c:657 utils/adt/int8.c:897 -#: utils/adt/int8.c:1005 utils/adt/int8.c:1094 utils/adt/int8.c:1202 utils/adt/numeric.c:6902 utils/adt/numeric.c:7191 utils/adt/numeric.c:8203 utils/adt/timestamp.c:3216 +#: utils/adt/int8.c:1005 utils/adt/int8.c:1094 utils/adt/int8.c:1202 utils/adt/numeric.c:6912 utils/adt/numeric.c:7201 utils/adt/numeric.c:8213 utils/adt/timestamp.c:3216 #, c-format msgid "division by zero" msgstr "division par zéro" @@ -20084,7 +20153,7 @@ msgstr "valeur du champ date en dehors des limites : %d-%02d-%02d" msgid "date out of range: %d-%02d-%02d" msgstr "date en dehors des limites : %d-%02d-%02d" -#: utils/adt/date.c:327 utils/adt/date.c:350 utils/adt/date.c:376 utils/adt/date.c:1092 utils/adt/date.c:1138 utils/adt/date.c:1672 utils/adt/date.c:1703 utils/adt/date.c:1732 utils/adt/date.c:2469 utils/adt/datetime.c:1690 utils/adt/formatting.c:3460 utils/adt/formatting.c:3492 utils/adt/formatting.c:3560 utils/adt/json.c:1539 utils/adt/json.c:1561 utils/adt/jsonb.c:824 utils/adt/jsonb.c:848 utils/adt/nabstime.c:456 utils/adt/nabstime.c:499 +#: utils/adt/date.c:327 utils/adt/date.c:350 utils/adt/date.c:376 utils/adt/date.c:1092 utils/adt/date.c:1138 utils/adt/date.c:1672 utils/adt/date.c:1703 utils/adt/date.c:1732 utils/adt/date.c:2469 utils/adt/datetime.c:1690 utils/adt/formatting.c:3460 utils/adt/formatting.c:3492 utils/adt/formatting.c:3560 utils/adt/json.c:1540 utils/adt/json.c:1562 utils/adt/jsonb.c:825 utils/adt/jsonb.c:849 utils/adt/nabstime.c:456 utils/adt/nabstime.c:499 #: utils/adt/nabstime.c:529 utils/adt/nabstime.c:572 utils/adt/timestamp.c:230 utils/adt/timestamp.c:262 utils/adt/timestamp.c:692 utils/adt/timestamp.c:701 utils/adt/timestamp.c:779 utils/adt/timestamp.c:812 utils/adt/timestamp.c:2795 utils/adt/timestamp.c:2816 utils/adt/timestamp.c:2829 utils/adt/timestamp.c:2838 utils/adt/timestamp.c:2846 utils/adt/timestamp.c:2901 utils/adt/timestamp.c:2924 utils/adt/timestamp.c:2937 #: utils/adt/timestamp.c:2948 utils/adt/timestamp.c:2956 utils/adt/timestamp.c:3512 utils/adt/timestamp.c:3637 utils/adt/timestamp.c:3678 utils/adt/timestamp.c:3759 utils/adt/timestamp.c:3805 utils/adt/timestamp.c:3908 utils/adt/timestamp.c:4307 utils/adt/timestamp.c:4406 utils/adt/timestamp.c:4416 utils/adt/timestamp.c:4508 utils/adt/timestamp.c:4610 utils/adt/timestamp.c:4620 utils/adt/timestamp.c:4852 utils/adt/timestamp.c:4866 #: utils/adt/timestamp.c:4871 utils/adt/timestamp.c:4885 utils/adt/timestamp.c:4930 utils/adt/timestamp.c:4962 utils/adt/timestamp.c:4969 utils/adt/timestamp.c:5002 utils/adt/timestamp.c:5006 utils/adt/timestamp.c:5075 utils/adt/timestamp.c:5079 utils/adt/timestamp.c:5093 utils/adt/timestamp.c:5127 utils/adt/xml.c:2111 utils/adt/xml.c:2118 utils/adt/xml.c:2138 utils/adt/xml.c:2145 @@ -20237,38 +20306,28 @@ msgstr "séquence base64 de fin invalide" msgid "Input data is missing padding, is truncated, or is otherwise corrupted." msgstr "Les données en entrée manquent un alignement, sont tronquées ou ont une corruption autre." -#: utils/adt/encode.c:442 utils/adt/encode.c:507 utils/adt/json.c:785 utils/adt/json.c:825 utils/adt/json.c:841 utils/adt/json.c:853 utils/adt/json.c:863 utils/adt/json.c:914 utils/adt/json.c:946 utils/adt/json.c:965 utils/adt/json.c:977 utils/adt/json.c:989 utils/adt/json.c:1134 utils/adt/json.c:1148 utils/adt/json.c:1159 utils/adt/json.c:1167 utils/adt/json.c:1175 utils/adt/json.c:1183 utils/adt/json.c:1191 utils/adt/json.c:1199 -#: utils/adt/json.c:1207 utils/adt/json.c:1215 utils/adt/json.c:1245 utils/adt/varlena.c:296 utils/adt/varlena.c:337 +#: utils/adt/encode.c:442 utils/adt/encode.c:507 utils/adt/json.c:786 utils/adt/json.c:826 utils/adt/json.c:842 utils/adt/json.c:854 utils/adt/json.c:864 utils/adt/json.c:915 utils/adt/json.c:947 utils/adt/json.c:966 utils/adt/json.c:978 utils/adt/json.c:990 utils/adt/json.c:1135 utils/adt/json.c:1149 utils/adt/json.c:1160 utils/adt/json.c:1168 utils/adt/json.c:1176 utils/adt/json.c:1184 utils/adt/json.c:1192 utils/adt/json.c:1200 +#: utils/adt/json.c:1208 utils/adt/json.c:1216 utils/adt/json.c:1246 utils/adt/varlena.c:296 utils/adt/varlena.c:337 #, c-format msgid "invalid input syntax for type %s" msgstr "syntaxe en entrée invalide pour le type %s" -#: utils/adt/enum.c:115 -#, c-format -msgid "unsafe use of new value \"%s\" of enum type %s" -msgstr "" - -#: utils/adt/enum.c:118 -#, c-format -msgid "New enum values must be committed before they can be used." -msgstr "Les nouvelles valeurs enum doivent être validées (COMMIT) avant de pouvoir être utilisées." - -#: utils/adt/enum.c:136 utils/adt/enum.c:146 utils/adt/enum.c:204 utils/adt/enum.c:214 +#: utils/adt/enum.c:48 utils/adt/enum.c:58 utils/adt/enum.c:113 utils/adt/enum.c:123 #, c-format msgid "invalid input value for enum %s: \"%s\"" msgstr "valeur en entrée invalide pour le enum %s : « %s »" -#: utils/adt/enum.c:176 utils/adt/enum.c:242 utils/adt/enum.c:301 +#: utils/adt/enum.c:85 utils/adt/enum.c:148 utils/adt/enum.c:207 #, c-format msgid "invalid internal value for enum: %u" msgstr "valeur interne invalide pour le enum : %u" -#: utils/adt/enum.c:461 utils/adt/enum.c:490 utils/adt/enum.c:530 utils/adt/enum.c:550 +#: utils/adt/enum.c:360 utils/adt/enum.c:389 utils/adt/enum.c:429 utils/adt/enum.c:449 #, c-format msgid "could not determine actual enum type" msgstr "n'a pas pu déterminer le type enum actuel" -#: utils/adt/enum.c:469 utils/adt/enum.c:498 +#: utils/adt/enum.c:368 utils/adt/enum.c:397 #, c-format msgid "enum %s contains no values" msgstr "l'énumération « %s » ne contient aucune valeur" @@ -20298,7 +20357,7 @@ msgstr "« %s » est en dehors des limites du type double precision" msgid "smallint out of range" msgstr "smallint en dehors des limites" -#: utils/adt/float.c:1430 utils/adt/numeric.c:7624 +#: utils/adt/float.c:1430 utils/adt/numeric.c:7634 #, c-format msgid "cannot take square root of a negative number" msgstr "ne peut pas calculer la racine carré d'un nombre négatif" @@ -20313,12 +20372,12 @@ msgstr "zéro à une puissance négative est indéfini" msgid "a negative number raised to a non-integer power yields a complex result" msgstr "un nombre négatif élevé à une puissance non entière donne un résultat complexe" -#: utils/adt/float.c:1542 utils/adt/float.c:1572 utils/adt/numeric.c:7890 +#: utils/adt/float.c:1542 utils/adt/float.c:1572 utils/adt/numeric.c:7900 #, c-format msgid "cannot take logarithm of zero" msgstr "ne peut pas calculer le logarithme de zéro" -#: utils/adt/float.c:1546 utils/adt/float.c:1576 utils/adt/numeric.c:7894 +#: utils/adt/float.c:1546 utils/adt/float.c:1576 utils/adt/numeric.c:7904 #, c-format msgid "cannot take logarithm of a negative number" msgstr "ne peut pas calculer le logarithme sur un nombre négatif" @@ -20438,17 +20497,17 @@ msgstr "« %s » n'est pas un nombre" msgid "case conversion failed: %s" msgstr "échec de la conversion de casse : %s" -#: utils/adt/formatting.c:1546 +#: utils/adt/formatting.c:1545 #, c-format msgid "could not determine which collation to use for lower() function" msgstr "n'a pas pu déterminer le collationnement à utiliser pour la fonction lower()" -#: utils/adt/formatting.c:1670 +#: utils/adt/formatting.c:1669 #, c-format msgid "could not determine which collation to use for upper() function" msgstr "n'a pas pu déterminer le collationnement à utiliser pour la fonction upper()" -#: utils/adt/formatting.c:1795 +#: utils/adt/formatting.c:1794 #, c-format msgid "could not determine which collation to use for initcap() function" msgstr "n'a pas pu déterminer le collationnement à utiliser pour la fonction initcap()" @@ -20704,8 +20763,8 @@ msgstr "la taille du pas ne peut pas valoir zéro" #: utils/adt/int8.c:98 utils/adt/int8.c:133 utils/adt/numutils.c:51 utils/adt/numutils.c:61 utils/adt/numutils.c:105 #, c-format -msgid "invalid input syntax for %s: \"%s\"" -msgstr "syntaxe en entrée invalide pour le type %s : « %s »" +msgid "invalid input syntax for integer: \"%s\"" +msgstr "syntaxe en entrée invalide pour l'entier : « %s »" #: utils/adt/int8.c:500 utils/adt/int8.c:529 utils/adt/int8.c:550 utils/adt/int8.c:581 utils/adt/int8.c:615 utils/adt/int8.c:640 utils/adt/int8.c:697 utils/adt/int8.c:714 utils/adt/int8.c:741 utils/adt/int8.c:758 utils/adt/int8.c:834 utils/adt/int8.c:855 utils/adt/int8.c:882 utils/adt/int8.c:915 utils/adt/int8.c:943 utils/adt/int8.c:964 utils/adt/int8.c:991 utils/adt/int8.c:1031 utils/adt/int8.c:1052 utils/adt/int8.c:1079 #: utils/adt/int8.c:1112 utils/adt/int8.c:1140 utils/adt/int8.c:1161 utils/adt/int8.c:1188 utils/adt/int8.c:1361 utils/adt/int8.c:1400 utils/adt/numeric.c:3005 utils/adt/varbit.c:1655 @@ -20718,174 +20777,174 @@ msgstr "bigint en dehors des limites" msgid "OID out of range" msgstr "OID en dehors des limites" -#: utils/adt/json.c:786 +#: utils/adt/json.c:787 #, c-format msgid "Character with value 0x%02x must be escaped." msgstr "Le caractère de valeur 0x%02x doit être échappé." -#: utils/adt/json.c:827 +#: utils/adt/json.c:828 #, c-format msgid "\"\\u\" must be followed by four hexadecimal digits." msgstr "« \\u » doit être suivi par quatre chiffres hexadécimaux." -#: utils/adt/json.c:843 +#: utils/adt/json.c:844 #, c-format msgid "Unicode high surrogate must not follow a high surrogate." msgstr "Une substitution unicode haute ne doit pas suivre une substitution haute." -#: utils/adt/json.c:854 utils/adt/json.c:864 utils/adt/json.c:916 utils/adt/json.c:978 utils/adt/json.c:990 +#: utils/adt/json.c:855 utils/adt/json.c:865 utils/adt/json.c:917 utils/adt/json.c:979 utils/adt/json.c:991 #, c-format msgid "Unicode low surrogate must follow a high surrogate." msgstr "Une substitution unicode basse ne doit pas suivre une substitution haute." -#: utils/adt/json.c:879 utils/adt/json.c:902 +#: utils/adt/json.c:880 utils/adt/json.c:903 #, c-format msgid "unsupported Unicode escape sequence" msgstr "séquence d'échappement Unicode non supportée" -#: utils/adt/json.c:880 +#: utils/adt/json.c:881 #, c-format msgid "\\u0000 cannot be converted to text." msgstr "\\u0000 ne peut pas être converti en texte." -#: utils/adt/json.c:903 +#: utils/adt/json.c:904 #, c-format msgid "Unicode escape values cannot be used for code point values above 007F when the server encoding is not UTF8." msgstr "" "Les valeurs d'échappement unicode ne peuvent pas être utilisées pour les valeurs de point de code\n" "au-dessus de 007F quand l'encodage serveur n'est pas UTF8." -#: utils/adt/json.c:948 utils/adt/json.c:966 +#: utils/adt/json.c:949 utils/adt/json.c:967 #, c-format msgid "Escape sequence \"\\%s\" is invalid." msgstr "La séquence d'échappement « \\%s » est invalide." -#: utils/adt/json.c:1135 +#: utils/adt/json.c:1136 #, c-format msgid "The input string ended unexpectedly." msgstr "La chaîne en entrée se ferme de manière inattendue." -#: utils/adt/json.c:1149 +#: utils/adt/json.c:1150 #, c-format msgid "Expected end of input, but found \"%s\"." -msgstr "Attendait une fin de l'entrée, mais ait trouvé « %s »." +msgstr "Attendait une fin de l'entrée, mais a trouvé « %s »." -#: utils/adt/json.c:1160 +#: utils/adt/json.c:1161 #, c-format msgid "Expected JSON value, but found \"%s\"." msgstr "Valeur JSON attendue, mais « %s » trouvé." -#: utils/adt/json.c:1168 utils/adt/json.c:1216 +#: utils/adt/json.c:1169 utils/adt/json.c:1217 #, c-format msgid "Expected string, but found \"%s\"." msgstr "Chaîne attendue, mais « %s » trouvé." -#: utils/adt/json.c:1176 +#: utils/adt/json.c:1177 #, c-format msgid "Expected array element or \"]\", but found \"%s\"." msgstr "Élément de tableau ou « ] » attendu, mais « %s » trouvé" -#: utils/adt/json.c:1184 +#: utils/adt/json.c:1185 #, c-format msgid "Expected \",\" or \"]\", but found \"%s\"." msgstr "« , » ou « ] » attendu, mais « %s » trouvé" -#: utils/adt/json.c:1192 +#: utils/adt/json.c:1193 #, c-format msgid "Expected string or \"}\", but found \"%s\"." msgstr "Chaîne ou « } » attendu, mais « %s » trouvé" -#: utils/adt/json.c:1200 +#: utils/adt/json.c:1201 #, c-format msgid "Expected \":\", but found \"%s\"." msgstr "« : » attendu, mais « %s » trouvé" -#: utils/adt/json.c:1208 +#: utils/adt/json.c:1209 #, c-format msgid "Expected \",\" or \"}\", but found \"%s\"." msgstr "« , » ou « } » attendu, mais « %s » trouvé" -#: utils/adt/json.c:1246 +#: utils/adt/json.c:1247 #, c-format msgid "Token \"%s\" is invalid." msgstr "le jeton « %s » n'est pas valide" -#: utils/adt/json.c:1318 +#: utils/adt/json.c:1319 #, c-format msgid "JSON data, line %d: %s%s%s" msgstr "données JSON, ligne %d : %s%s%s" -#: utils/adt/json.c:1474 utils/adt/jsonb.c:725 +#: utils/adt/json.c:1475 utils/adt/jsonb.c:726 #, c-format msgid "key value must be scalar, not array, composite, or json" msgstr "la valeur clé doit être scalaire, et non pas un tableau ou une valeur composite ou un json" -#: utils/adt/json.c:2011 utils/adt/json.c:2021 utils/adt/json.c:2147 utils/adt/json.c:2168 utils/adt/json.c:2227 utils/adt/jsonb.c:1215 utils/adt/jsonb.c:1238 utils/adt/jsonb.c:1298 +#: utils/adt/json.c:2012 utils/adt/json.c:2022 utils/fmgr/funcapi.c:1501 #, c-format msgid "could not determine data type for argument %d" msgstr "n'a pas pu déterminer le type de données pour l'argument %d" -#: utils/adt/json.c:2045 utils/adt/jsonb.c:1782 +#: utils/adt/json.c:2046 utils/adt/jsonb.c:1739 #, c-format msgid "field name must not be null" msgstr "le nom du champ ne doit pas être NULL" -#: utils/adt/json.c:2122 +#: utils/adt/json.c:2130 utils/adt/jsonb.c:1191 #, c-format msgid "argument list must have even number of elements" msgstr "la liste d'arguments doit avoir un nombre pair d'éléments" -#: utils/adt/json.c:2123 +#: utils/adt/json.c:2131 #, c-format msgid "The arguments of json_build_object() must consist of alternating keys and values." msgstr "Les arguments de json_build_object() doivent consister en des clés et valeurs alternées" -#: utils/adt/json.c:2153 +#: utils/adt/json.c:2146 #, c-format msgid "argument %d cannot be null" msgstr "l'argument %d ne peut pas être NULL" -#: utils/adt/json.c:2154 +#: utils/adt/json.c:2147 #, c-format msgid "Object keys should be text." msgstr "Les clés de l'objet doivent être du texte." -#: utils/adt/json.c:2289 utils/adt/jsonb.c:1364 +#: utils/adt/json.c:2253 utils/adt/jsonb.c:1321 #, c-format msgid "array must have two columns" msgstr "le tableau doit avoir deux colonnes" -#: utils/adt/json.c:2313 utils/adt/json.c:2397 utils/adt/jsonb.c:1388 utils/adt/jsonb.c:1483 +#: utils/adt/json.c:2277 utils/adt/json.c:2361 utils/adt/jsonb.c:1345 utils/adt/jsonb.c:1440 #, c-format msgid "null value not allowed for object key" msgstr "valeur NULL non autorisée pour une clé d'objet" -#: utils/adt/json.c:2386 utils/adt/jsonb.c:1472 +#: utils/adt/json.c:2350 utils/adt/jsonb.c:1429 #, c-format msgid "mismatched array dimensions" msgstr "dimensions du tableau non correspondantes" -#: utils/adt/jsonb.c:257 +#: utils/adt/jsonb.c:258 #, c-format msgid "string too long to represent as jsonb string" msgstr "chaîne trop longue pour être représentée en tant que chaîne jsonb" -#: utils/adt/jsonb.c:258 +#: utils/adt/jsonb.c:259 #, c-format msgid "Due to an implementation restriction, jsonb strings cannot exceed %d bytes." msgstr "Dû à l'implémentation, les chaînes jsonb ne peuvent excéder %d octets." -#: utils/adt/jsonb.c:1183 +#: utils/adt/jsonb.c:1192 #, c-format -msgid "invalid number of arguments: object must be matched key value pairs" -msgstr "nombre d'arguments invalide : l'objet doit correspond aux paires clé/valeur" +msgid "The arguments of jsonb_build_object() must consist of alternating keys and values." +msgstr "Les arguments de jsonb_build_object() doivent consister en des clés et valeurs alternées" -#: utils/adt/jsonb.c:1196 +#: utils/adt/jsonb.c:1204 #, c-format msgid "argument %d: key must not be null" msgstr "argument %d : la clé ne doit pas être NULL" -#: utils/adt/jsonb.c:1835 +#: utils/adt/jsonb.c:1792 #, c-format msgid "object keys must be strings" msgstr "les clés de l'objet doivent être du texte" @@ -20910,7 +20969,7 @@ msgstr "la taille totale des éléments du tableau jsonb dépasse le maximum de msgid "total size of jsonb object elements exceeds the maximum of %u bytes" msgstr "la taille totale des éléments de l'objet JSON dépasse le maximum de %u octets" -#: utils/adt/jsonfuncs.c:511 utils/adt/jsonfuncs.c:676 utils/adt/jsonfuncs.c:2263 utils/adt/jsonfuncs.c:2699 utils/adt/jsonfuncs.c:3393 utils/adt/jsonfuncs.c:3677 +#: utils/adt/jsonfuncs.c:511 utils/adt/jsonfuncs.c:676 utils/adt/jsonfuncs.c:2263 utils/adt/jsonfuncs.c:2699 utils/adt/jsonfuncs.c:3393 utils/adt/jsonfuncs.c:3694 #, c-format msgid "cannot call %s on a scalar" msgstr "ne peut pas appeler %s sur un scalaire" @@ -20935,7 +20994,7 @@ msgstr "ne peut pas obtenir la longueur du tableau d'un objet qui n'est pas un t msgid "cannot call %s on a non-object" msgstr "ne peut pas appeler %s sur un non objet" -#: utils/adt/jsonfuncs.c:1697 utils/adt/jsonfuncs.c:3208 utils/adt/jsonfuncs.c:3502 +#: utils/adt/jsonfuncs.c:1697 utils/adt/jsonfuncs.c:3208 utils/adt/jsonfuncs.c:3510 #, c-format msgid "function returning record called in context that cannot accept type record" msgstr "" @@ -20962,7 +21021,7 @@ msgstr "ne peut pas extraire des éléments d'un scalaire" msgid "cannot extract elements from an object" msgstr "ne peut pas extraire des éléments d'un objet" -#: utils/adt/jsonfuncs.c:2250 utils/adt/jsonfuncs.c:3566 +#: utils/adt/jsonfuncs.c:2250 utils/adt/jsonfuncs.c:3583 #, c-format msgid "cannot call %s on a non-array" msgstr "ne peut pas appeler %s sur un type non tableau" @@ -20974,18 +21033,18 @@ msgstr "attendait un tableau json" #: utils/adt/jsonfuncs.c:2317 #, c-format -msgid "see the value of key \"%s\"" -msgstr "voir la valeur de la clé « %s »" +msgid "See the value of key \"%s\"." +msgstr "Voir la valeur de la clé « %s »." #: utils/adt/jsonfuncs.c:2339 #, c-format -msgid "see the array element %s of key \"%s\"" -msgstr "voir l'élément de tableau %s de la clé « %s »" +msgid "See the array element %s of key \"%s\"." +msgstr "Voir l'élément de tableau %s de la clé « %s »." #: utils/adt/jsonfuncs.c:2345 #, c-format -msgid "see the array element %s" -msgstr "voir l'élément de tableau %s" +msgid "See the array element %s." +msgstr "voir l'élément de tableau %s." #: utils/adt/jsonfuncs.c:2380 #, c-format @@ -21002,57 +21061,57 @@ msgstr "le premier argument de %s doit être un type row" msgid "Try calling the function in the FROM clause using a column definition list." msgstr "Essayez d'appeler la fonction dans la clause FROM en utilisant une liste de définition de colonnes." -#: utils/adt/jsonfuncs.c:3583 utils/adt/jsonfuncs.c:3659 +#: utils/adt/jsonfuncs.c:3600 utils/adt/jsonfuncs.c:3676 #, c-format msgid "argument of %s must be an array of objects" msgstr "l'argument de %s doit être un tableau d'objets" -#: utils/adt/jsonfuncs.c:3611 +#: utils/adt/jsonfuncs.c:3628 #, c-format msgid "cannot call %s on an object" msgstr "ne peut pas appeler %s sur un objet" -#: utils/adt/jsonfuncs.c:4087 utils/adt/jsonfuncs.c:4146 utils/adt/jsonfuncs.c:4226 +#: utils/adt/jsonfuncs.c:4104 utils/adt/jsonfuncs.c:4163 utils/adt/jsonfuncs.c:4243 #, c-format msgid "cannot delete from scalar" msgstr "ne peut pas supprimer à partir du scalaire" -#: utils/adt/jsonfuncs.c:4231 +#: utils/adt/jsonfuncs.c:4248 #, c-format msgid "cannot delete from object using integer index" msgstr "ne peut pas supprimer à partir de l'objet en utilisant l'index de l'entier" -#: utils/adt/jsonfuncs.c:4297 utils/adt/jsonfuncs.c:4389 +#: utils/adt/jsonfuncs.c:4314 utils/adt/jsonfuncs.c:4406 #, c-format msgid "cannot set path in scalar" msgstr "ne peut pas initialiser le chemin dans le scalaire" -#: utils/adt/jsonfuncs.c:4342 +#: utils/adt/jsonfuncs.c:4359 #, c-format msgid "cannot delete path in scalar" msgstr "ne peut pas supprimer un chemin dans le scalaire" -#: utils/adt/jsonfuncs.c:4512 +#: utils/adt/jsonfuncs.c:4529 #, c-format msgid "invalid concatenation of jsonb objects" msgstr "concaténation invalide d'objets jsonb" -#: utils/adt/jsonfuncs.c:4546 +#: utils/adt/jsonfuncs.c:4563 #, c-format msgid "path element at position %d is null" msgstr "l'élément de chemin à la position %d est nul" -#: utils/adt/jsonfuncs.c:4632 +#: utils/adt/jsonfuncs.c:4649 #, c-format msgid "cannot replace existing key" msgstr "ne peut pas remplacer une clé existante" -#: utils/adt/jsonfuncs.c:4633 +#: utils/adt/jsonfuncs.c:4650 #, c-format msgid "Try using the function jsonb_set to replace key value." msgstr "Essayez d'utiliser la fonction jsonb_set pour remplacer la valeur de la clé." -#: utils/adt/jsonfuncs.c:4715 +#: utils/adt/jsonfuncs.c:4732 #, c-format msgid "path element at position %d is not an integer: \"%s\"" msgstr "l'élément du chemin à la position %d n'est pas un entier : « %s »" @@ -21062,7 +21121,7 @@ msgstr "l'élément du chemin à la position %d n'est pas un entier : « %s »" msgid "levenshtein argument exceeds maximum length of %d characters" msgstr "l'argument levenshtein dépasse la longueur maximale de %d caractères" -#: utils/adt/like.c:183 utils/adt/selfuncs.c:5562 +#: utils/adt/like.c:183 utils/adt/selfuncs.c:5589 #, c-format msgid "could not determine which collation to use for ILIKE" msgstr "n'a pas pu déterminer le collationnement à utiliser pour ILIKE" @@ -21095,12 +21154,12 @@ msgstr "valeur d'un octet invalide dans la valeur de « macaddr » : « %s »" #: utils/adt/mac8.c:554 #, c-format msgid "macaddr8 data out of range to convert to macaddr" -msgstr "" +msgstr "donnée macaddr8 hors de l'échelle pour être convertie en macaddr" #: utils/adt/mac8.c:555 #, c-format -msgid "Only addresses that have FF and FE as values in the 4th and 5th bytes, from the left, for example: XX-XX-XX-FF-FE-XX-XX-XX, are eligible to be converted from macaddr8 to macaddr." -msgstr "" +msgid "Only addresses that have FF and FE as values in the 4th and 5th bytes from the left, for example xx:xx:xx:ff:fe:xx:xx:xx, are eligible to be converted from macaddr8 to macaddr." +msgstr "Seules les adresses qui ont FF ou FE comme valeurs dans les 4è et 5è octets à partir de la gauche, par exemple xx:xx:xx:ff:fe:xx:xx:xx, , sont éligibles à être converties de macaddr8 à macaddr." #: utils/adt/misc.c:238 #, c-format @@ -21340,7 +21399,7 @@ msgstr "la valeur d'arrêt ne peut pas être NaN" msgid "step size cannot be NaN" msgstr "la taille du pas ne peut pas être NaN" -#: utils/adt/numeric.c:2589 utils/adt/numeric.c:5551 utils/adt/numeric.c:5996 utils/adt/numeric.c:7700 utils/adt/numeric.c:8125 utils/adt/numeric.c:8239 utils/adt/numeric.c:8312 +#: utils/adt/numeric.c:2589 utils/adt/numeric.c:5561 utils/adt/numeric.c:6006 utils/adt/numeric.c:7710 utils/adt/numeric.c:8135 utils/adt/numeric.c:8249 utils/adt/numeric.c:8322 #, c-format msgid "value overflows numeric format" msgstr "la valeur dépasse le format numeric" @@ -21360,12 +21419,17 @@ msgstr "ne peut pas convertir NaN en un entier de type bigint" msgid "cannot convert NaN to smallint" msgstr "ne peut pas convertir NaN en un entier de type smallint" -#: utils/adt/numeric.c:6066 +#: utils/adt/numeric.c:3079 utils/adt/numeric.c:3150 +#, c-format +msgid "cannot convert infinity to numeric" +msgstr "ne peut pas convertir infinity en un type numeric" + +#: utils/adt/numeric.c:6076 #, c-format msgid "numeric field overflow" msgstr "champ numérique en dehors des limites" -#: utils/adt/numeric.c:6067 +#: utils/adt/numeric.c:6077 #, c-format msgid "A field with precision %d, scale %d must round to an absolute value less than %s%d." msgstr "" @@ -21422,83 +21486,79 @@ msgstr "n'a pas pu créer la locale « %s » : %m" msgid "The operating system could not find any locale data for the locale name \"%s\"." msgstr "Le système d'exploitation n'a pas pu trouver des données de locale pour la locale « %s »." -#: utils/adt/pg_locale.c:1352 +#: utils/adt/pg_locale.c:1353 #, c-format msgid "collations with different collate and ctype values are not supported on this platform" msgstr "" "les collationnements avec des valeurs différents pour le tri et le jeu de\n" "caractères ne sont pas supportés sur cette plateforme" -#: utils/adt/pg_locale.c:1361 +#: utils/adt/pg_locale.c:1362 #, c-format msgid "collation provider LIBC is not supported on this platform" msgstr "le fournisseur du collationnement, LIBC, n'est pas supporté sur cette plateforme" -#: utils/adt/pg_locale.c:1373 -#, fuzzy, c-format -#| msgid "collations with different collate and ctype values are not supported on this platform" +#: utils/adt/pg_locale.c:1374 +#, c-format msgid "collations with different collate and ctype values are not supported by ICU" -msgstr "" -"les collationnements avec des valeurs différents pour le tri et le jeu de\n" -"caractères ne sont pas supportés sur cette plateforme" +msgstr "les collationnements avec des valeurs différentes pour le tri (collate) et le jeu de caractères (ctype) ne sont pas supportés par ICU" -#: utils/adt/pg_locale.c:1379 utils/adt/pg_locale.c:1461 -#, fuzzy, c-format -#| msgid "could not open control file \"%s\": %m" +#: utils/adt/pg_locale.c:1380 utils/adt/pg_locale.c:1468 +#, c-format msgid "could not open collator for locale \"%s\": %s" -msgstr "n'a pas pu ouvrir le fichier de contrôle « %s » : %m" +msgstr "n'a pas pu ouvrir le collationneur pour la locale « %s » : %s" -#: utils/adt/pg_locale.c:1388 +#: utils/adt/pg_locale.c:1391 #, c-format msgid "ICU is not supported in this build" msgstr "ICU n'est pas supporté dans cette installation" -#: utils/adt/pg_locale.c:1389 +#: utils/adt/pg_locale.c:1392 #, c-format msgid "You need to rebuild PostgreSQL using --with-icu." msgstr "Vous devez recompiler PostgreSQL en utilisant --with-icu." -#: utils/adt/pg_locale.c:1409 +#: utils/adt/pg_locale.c:1412 #, c-format msgid "collation \"%s\" has no actual version, but a version was specified" msgstr "le collationnement « %s » n'a pas de version réelle mais une version était indiquée" -#: utils/adt/pg_locale.c:1416 +#: utils/adt/pg_locale.c:1419 #, c-format msgid "collation \"%s\" has version mismatch" msgstr "le collationnement « %s » a des versions différentes" -#: utils/adt/pg_locale.c:1418 +#: utils/adt/pg_locale.c:1421 #, c-format msgid "The collation in the database was created using version %s, but the operating system provides version %s." msgstr "Le collationnement dans la base de données a été créé en utilisant la version %s mais le système d'exploitation fournit la version %s." -#: utils/adt/pg_locale.c:1421 +#: utils/adt/pg_locale.c:1424 #, c-format msgid "Rebuild all objects affected by this collation and run ALTER COLLATION %s REFRESH VERSION, or build PostgreSQL with the right library version." msgstr "Reconstruisez tous les objets affectés par ce collationnement, et lancez ALTER COLLATION %s REFRESH VERSION, ou construisez PostgreSQL avec la bonne version de bibliothèque." -#: utils/adt/pg_locale.c:1501 +#: utils/adt/pg_locale.c:1508 #, c-format msgid "could not open ICU converter for encoding \"%s\": %s" msgstr "n'a pas pu ouvrir le convertisseur ICU pour l'encodage « %s » : %s" -#: utils/adt/pg_locale.c:1532 utils/adt/pg_locale.c:1541 +#: utils/adt/pg_locale.c:1539 utils/adt/pg_locale.c:1548 #, c-format msgid "ucnv_toUChars failed: %s" msgstr "échec de ucnv_toUChars : %s" -#: utils/adt/pg_locale.c:1570 utils/adt/pg_locale.c:1579 +#: utils/adt/pg_locale.c:1577 utils/adt/pg_locale.c:1586 #, c-format msgid "ucnv_fromUChars failed: %s" msgstr "échec de ucnv_fromUChars : %s" -#: utils/adt/pg_locale.c:1752 +#: utils/adt/pg_locale.c:1759 #, c-format msgid "invalid multibyte character for locale" msgstr "caractère multi-octets invalide pour la locale" -#: utils/adt/pg_locale.c:1753 +#: utils/adt/pg_locale.c:1760 #, c-format msgid "The server's LC_CTYPE locale is probably incompatible with the database encoding." msgstr "" @@ -21597,7 +21657,7 @@ msgstr "Trop de virgules." msgid "Junk after right parenthesis or bracket." msgstr "Problème après la parenthèse droite ou le crochet droit." -#: utils/adt/regexp.c:285 utils/adt/regexp.c:1344 utils/adt/varlena.c:3963 +#: utils/adt/regexp.c:285 utils/adt/regexp.c:1344 utils/adt/varlena.c:3967 #, c-format msgid "regular expression failed: %s" msgstr "l'expression rationnelle a échoué : %s" @@ -21637,7 +21697,7 @@ msgstr "il existe plus d'une fonction nommée « %s »" msgid "more than one operator named %s" msgstr "il existe plus d'un opérateur nommé%s" -#: utils/adt/regproc.c:696 utils/adt/regproc.c:737 utils/adt/regproc.c:1865 utils/adt/ruleutils.c:8996 utils/adt/ruleutils.c:9164 +#: utils/adt/regproc.c:696 utils/adt/regproc.c:737 utils/adt/regproc.c:1865 utils/adt/ruleutils.c:8959 utils/adt/ruleutils.c:9127 #, c-format msgid "too many arguments" msgstr "trop d'arguments" @@ -21647,7 +21707,7 @@ msgstr "trop d'arguments" msgid "Provide two argument types for operator." msgstr "Fournit deux types d'argument pour l'opérateur." -#: utils/adt/regproc.c:1449 utils/adt/regproc.c:1473 utils/adt/regproc.c:1574 utils/adt/regproc.c:1598 utils/adt/regproc.c:1700 utils/adt/regproc.c:1705 utils/adt/varlena.c:3216 utils/adt/varlena.c:3221 +#: utils/adt/regproc.c:1449 utils/adt/regproc.c:1473 utils/adt/regproc.c:1574 utils/adt/regproc.c:1598 utils/adt/regproc.c:1700 utils/adt/regproc.c:1705 utils/adt/varlena.c:3220 utils/adt/varlena.c:3225 #, c-format msgid "invalid name syntax" msgstr "syntaxe du nom invalide" @@ -21809,17 +21869,17 @@ msgstr "" "ne peut pas comparer les types d'enregistrement avec des numéros différents\n" "des colonnes" -#: utils/adt/ruleutils.c:4667 +#: utils/adt/ruleutils.c:4668 #, c-format msgid "rule \"%s\" has unsupported event type %d" msgstr "la règle « %s » a un type d'événement %d non supporté" -#: utils/adt/selfuncs.c:5547 +#: utils/adt/selfuncs.c:5574 #, c-format msgid "case insensitive matching not supported on type bytea" msgstr "la recherche insensible à la casse n'est pas supportée avec le type bytea" -#: utils/adt/selfuncs.c:5649 +#: utils/adt/selfuncs.c:5676 #, c-format msgid "regular-expression matching not supported on type bytea" msgstr "la recherche par expression rationnelle n'est pas supportée sur le type bytea" @@ -22135,26 +22195,14 @@ msgstr "mauvaise information de position dans tsvector : « %s »" #: utils/adt/txid.c:135 #, c-format -msgid "transaction ID " -msgstr "ID de transaction " +msgid "transaction ID %s is in the future" +msgstr "l'identifiant de transaction %s est dans le futur" #: utils/adt/txid.c:624 #, c-format msgid "invalid external txid_snapshot data" msgstr "valeur externe « txid_snapshot » invalide" -#: utils/adt/txid.c:758 utils/adt/txid.c:779 -msgid "in progress" -msgstr "en cours" - -#: utils/adt/txid.c:760 -msgid "committed" -msgstr "validé" - -#: utils/adt/txid.c:762 utils/adt/txid.c:777 -msgid "aborted" -msgstr "annulé" - #: utils/adt/varbit.c:58 utils/adt/varchar.c:51 #, c-format msgid "length for type %s must be at least 1" @@ -22195,7 +22243,7 @@ msgstr "longueur invalide dans la chaîne bit externe" msgid "bit string too long for type bit varying(%d)" msgstr "la chaîne bit est trop longue pour le type bit varying(%d)" -#: utils/adt/varbit.c:1066 utils/adt/varbit.c:1168 utils/adt/varlena.c:841 utils/adt/varlena.c:905 utils/adt/varlena.c:1049 utils/adt/varlena.c:2881 utils/adt/varlena.c:2948 +#: utils/adt/varbit.c:1066 utils/adt/varbit.c:1168 utils/adt/varlena.c:841 utils/adt/varlena.c:905 utils/adt/varlena.c:1049 utils/adt/varlena.c:2885 utils/adt/varlena.c:2952 #, c-format msgid "negative substring length not allowed" msgstr "longueur de sous-chaîne négative non autorisée" @@ -22220,7 +22268,7 @@ msgstr "ne peut pas utiliser l'opérateur XOR sur des chaînes bit de tailles di msgid "bit index %d out of valid range (0..%d)" msgstr "index de bit %d en dehors des limites valides (0..%d)" -#: utils/adt/varbit.c:1812 utils/adt/varlena.c:3140 +#: utils/adt/varbit.c:1812 utils/adt/varlena.c:3144 #, c-format msgid "new bit must be 0 or 1" msgstr "le nouveau bit doit valoir soit 0 soit 1" @@ -22235,7 +22283,7 @@ msgstr "valeur trop longue pour le type character(%d)" msgid "value too long for type character varying(%d)" msgstr "valeur trop longue pour le type character varying(%d)" -#: utils/adt/varlena.c:1416 utils/adt/varlena.c:1865 +#: utils/adt/varlena.c:1416 utils/adt/varlena.c:1853 #, c-format msgid "could not determine which collation to use for string comparison" msgstr "n'a pas pu déterminer le collationnement à utiliser pour la comparaison de chaîne" @@ -22250,62 +22298,62 @@ msgstr "n'a pas pu convertir la chaîne en UTF-16 : erreur %lu" msgid "could not compare Unicode strings: %m" msgstr "n'a pas pu comparer les chaînes unicode : %m" -#: utils/adt/varlena.c:1556 utils/adt/varlena.c:2145 +#: utils/adt/varlena.c:1556 utils/adt/varlena.c:2149 #, c-format msgid "collation failed: %s" msgstr "échec du collationnement : %s" -#: utils/adt/varlena.c:2363 +#: utils/adt/varlena.c:2367 #, c-format msgid "sort key generation failed: %s" msgstr "échec de génération de la clé de tri : %s" -#: utils/adt/varlena.c:3026 utils/adt/varlena.c:3057 utils/adt/varlena.c:3092 utils/adt/varlena.c:3128 +#: utils/adt/varlena.c:3030 utils/adt/varlena.c:3061 utils/adt/varlena.c:3096 utils/adt/varlena.c:3132 #, c-format msgid "index %d out of valid range, 0..%d" msgstr "index %d en dehors des limites valides, 0..%d" -#: utils/adt/varlena.c:4059 +#: utils/adt/varlena.c:4063 #, c-format msgid "field position must be greater than zero" msgstr "la position du champ doit être plus grand que zéro" -#: utils/adt/varlena.c:4949 +#: utils/adt/varlena.c:4953 #, c-format msgid "unterminated format() type specifier" msgstr "spécificateur de type pour format() non terminé" -#: utils/adt/varlena.c:4950 utils/adt/varlena.c:5084 utils/adt/varlena.c:5205 +#: utils/adt/varlena.c:4954 utils/adt/varlena.c:5088 utils/adt/varlena.c:5209 #, c-format msgid "For a single \"%%\" use \"%%%%\"." msgstr "Pour un unique \"%%\" utilisez \"%%%%\"." -#: utils/adt/varlena.c:5082 utils/adt/varlena.c:5203 +#: utils/adt/varlena.c:5086 utils/adt/varlena.c:5207 #, c-format msgid "unrecognized format() type specifier \"%c\"" msgstr "spécificateur de type « %c » pour format() non reconnu" -#: utils/adt/varlena.c:5095 utils/adt/varlena.c:5152 +#: utils/adt/varlena.c:5099 utils/adt/varlena.c:5156 #, c-format msgid "too few arguments for format()" msgstr "trop peu d'arguments pour format()" -#: utils/adt/varlena.c:5247 utils/adt/varlena.c:5430 +#: utils/adt/varlena.c:5251 utils/adt/varlena.c:5434 #, c-format msgid "number is out of range" msgstr "le nombre est en dehors des limites" -#: utils/adt/varlena.c:5311 utils/adt/varlena.c:5339 +#: utils/adt/varlena.c:5315 utils/adt/varlena.c:5343 #, c-format msgid "format specifies argument 0, but arguments are numbered from 1" msgstr "le format indique l'argument 0 mais les arguments sont numérotés à partir de 1" -#: utils/adt/varlena.c:5332 +#: utils/adt/varlena.c:5336 #, c-format msgid "width argument position must be ended by \"$\"" msgstr "la position de l'argument width doit se terminer par « $ »" -#: utils/adt/varlena.c:5377 +#: utils/adt/varlena.c:5381 #, c-format msgid "null values cannot be formatted as an SQL identifier" msgstr "les valeurs NULL ne peuvent pas être formatés comme un identifiant SQL" @@ -22439,49 +22487,49 @@ msgstr "XML ne supporte pas les valeurs infinies de timestamp." msgid "invalid query" msgstr "requête invalide" -#: utils/adt/xml.c:3870 +#: utils/adt/xml.c:3871 #, c-format msgid "invalid array for XML namespace mapping" msgstr "tableau invalide pour la correspondance de l'espace de nom XML" -#: utils/adt/xml.c:3871 +#: utils/adt/xml.c:3872 #, c-format msgid "The array must be two-dimensional with length of the second axis equal to 2." msgstr "" "Le tableau doit avoir deux dimensions avec une longueur de 2 pour le\n" "deuxième axe." -#: utils/adt/xml.c:3895 +#: utils/adt/xml.c:3896 #, c-format msgid "empty XPath expression" msgstr "expression XPath vide" -#: utils/adt/xml.c:3939 +#: utils/adt/xml.c:3951 #, c-format msgid "neither namespace name nor URI may be null" msgstr "ni le nom de l'espace de noms ni l'URI ne peuvent être NULL" -#: utils/adt/xml.c:3946 +#: utils/adt/xml.c:3958 #, c-format msgid "could not register XML namespace with name \"%s\" and URI \"%s\"" msgstr "n'a pas pu enregistrer l'espace de noms XML de nom « %s » et d'URI « %s »" -#: utils/adt/xml.c:4300 +#: utils/adt/xml.c:4312 #, c-format msgid "DEFAULT namespace is not supported" msgstr "l'espace de nom DEFAULT n'est pas supporté" -#: utils/adt/xml.c:4329 +#: utils/adt/xml.c:4341 #, c-format msgid "row path filter must not be empty string" msgstr "le filtre du chemin de ligne ne doit pas être une chaîne vide" -#: utils/adt/xml.c:4360 +#: utils/adt/xml.c:4372 #, c-format msgid "column path filter must not be empty string" msgstr "le filtre du chemin de colonne ne doit pas être une chaîne vide" -#: utils/adt/xml.c:4542 +#: utils/adt/xml.c:4554 #, c-format msgid "more than one value returned by column XPath expression" msgstr "plus d'une valeur renvoyée par l'expression XPath de colonne" @@ -22506,17 +22554,17 @@ msgstr "aucune fonction en sortie disponible pour le type %s" msgid "cached plan must not change result type" msgstr "le plan en cache ne doit pas modifier le type en résultat" -#: utils/cache/relcache.c:5795 +#: utils/cache/relcache.c:5800 #, c-format msgid "could not create relation-cache initialization file \"%s\": %m" msgstr "n'a pas pu créer le fichier d'initialisation relation-cache « %s » : %m" -#: utils/cache/relcache.c:5797 +#: utils/cache/relcache.c:5802 #, c-format msgid "Continuing anyway, but there's something wrong." msgstr "Continue malgré tout, mais quelque chose s'est mal passé." -#: utils/cache/relcache.c:6067 +#: utils/cache/relcache.c:6072 #, c-format msgid "could not remove cache file \"%s\": %m" msgstr "n'a pas pu supprimer le fichier cache « %s » : %m" @@ -22565,12 +22613,12 @@ msgstr "n'a pas pu synchroniser (fsync) le fichier de correspondance des relatio msgid "could not close relation mapping file \"%s\": %m" msgstr "n'a pas pu fermer le fichier de correspondance des relations « %s » : %m" -#: utils/cache/typcache.c:1223 +#: utils/cache/typcache.c:1273 #, c-format msgid "type %s is not composite" msgstr "le type %s n'est pas un type composite" -#: utils/cache/typcache.c:1237 +#: utils/cache/typcache.c:1287 #, c-format msgid "record type has not been registered" msgstr "le type d'enregistrement n'a pas été enregistré" @@ -22602,85 +22650,85 @@ msgstr "n'a pas pu ré-ouvrir le fichier « %s » comme stderr : %m" msgid "could not reopen file \"%s\" as stdout: %m" msgstr "n'a pas pu ré-ouvrir le fichier « %s » comme stdout : %m" -#: utils/error/elog.c:2389 utils/error/elog.c:2406 utils/error/elog.c:2422 +#: utils/error/elog.c:2394 utils/error/elog.c:2411 utils/error/elog.c:2427 msgid "[unknown]" msgstr "[inconnu]" -#: utils/error/elog.c:2882 utils/error/elog.c:3185 utils/error/elog.c:3293 +#: utils/error/elog.c:2887 utils/error/elog.c:3190 utils/error/elog.c:3298 msgid "missing error text" msgstr "texte d'erreur manquant" -#: utils/error/elog.c:2885 utils/error/elog.c:2888 utils/error/elog.c:3296 utils/error/elog.c:3299 +#: utils/error/elog.c:2890 utils/error/elog.c:2893 utils/error/elog.c:3301 utils/error/elog.c:3304 #, c-format msgid " at character %d" msgstr " au caractère %d" -#: utils/error/elog.c:2898 utils/error/elog.c:2905 +#: utils/error/elog.c:2903 utils/error/elog.c:2910 msgid "DETAIL: " msgstr "DÉTAIL: " -#: utils/error/elog.c:2912 +#: utils/error/elog.c:2917 msgid "HINT: " msgstr "ASTUCE : " -#: utils/error/elog.c:2919 +#: utils/error/elog.c:2924 msgid "QUERY: " msgstr "REQUÊTE : " -#: utils/error/elog.c:2926 +#: utils/error/elog.c:2931 msgid "CONTEXT: " msgstr "CONTEXTE : " -#: utils/error/elog.c:2936 +#: utils/error/elog.c:2941 #, c-format msgid "LOCATION: %s, %s:%d\n" msgstr "EMPLACEMENT : %s, %s:%d\n" -#: utils/error/elog.c:2943 +#: utils/error/elog.c:2948 #, c-format msgid "LOCATION: %s:%d\n" msgstr "EMPLACEMENT : %s:%d\n" -#: utils/error/elog.c:2957 +#: utils/error/elog.c:2962 msgid "STATEMENT: " msgstr "INSTRUCTION : " #. translator: This string will be truncated at 47 #. characters expanded. -#: utils/error/elog.c:3414 +#: utils/error/elog.c:3419 #, c-format msgid "operating system error %d" msgstr "erreur %d du système d'exploitation" -#: utils/error/elog.c:3612 +#: utils/error/elog.c:3617 msgid "DEBUG" msgstr "DEBUG" -#: utils/error/elog.c:3616 +#: utils/error/elog.c:3621 msgid "LOG" msgstr "LOG" -#: utils/error/elog.c:3619 +#: utils/error/elog.c:3624 msgid "INFO" msgstr "INFO" -#: utils/error/elog.c:3622 +#: utils/error/elog.c:3627 msgid "NOTICE" msgstr "NOTICE" -#: utils/error/elog.c:3625 +#: utils/error/elog.c:3630 msgid "WARNING" msgstr "ATTENTION" -#: utils/error/elog.c:3628 +#: utils/error/elog.c:3633 msgid "ERROR" msgstr "ERREUR" -#: utils/error/elog.c:3631 +#: utils/error/elog.c:3636 msgid "FATAL" msgstr "FATAL" -#: utils/error/elog.c:3634 +#: utils/error/elog.c:3639 msgid "PANIC" msgstr "PANIC" @@ -24161,23 +24209,19 @@ msgstr "" "d'être verrouillés à tout moment." #: utils/misc/guc.c:2202 -#, fuzzy -#| msgid "Sets the maximum number of predicate locks per transaction." msgid "Sets the maximum number of predicate-locked pages and tuples per relation." -msgstr "Initialise le nombre maximum de verrous prédicats par transaction." +msgstr "Initialise le nombre maximum de pages et lignes verrouillées avec prédicats par transaction." #: utils/misc/guc.c:2203 -msgid "If more than this total of pages and tuples in the same relation are locked by a connection, those locks are replaced by a relation level lock." -msgstr "" +msgid "If more than this total of pages and tuples in the same relation are locked by a connection, those locks are replaced by a relation-level lock." +msgstr "Si plus que ce nombre de pages et lignes dans la même relation sont verrouillées par une connexion, ces verrous sont remplacés par un verrou de niveau relation." #: utils/misc/guc.c:2213 -#, fuzzy -#| msgid "Sets the maximum number of predicate locks per transaction." msgid "Sets the maximum number of predicate-locked tuples per page." -msgstr "Initialise le nombre maximum de verrous prédicats par transaction." +msgstr "Initialise le nombre maximum de lignes verrouillées avec prédicat par transaction." #: utils/misc/guc.c:2214 -msgid "If more than this number of tuples on the same page are locked by a connection, those locks are replaced by a page level lock." +msgid "If more than this number of tuples on the same page are locked by a connection, those locks are replaced by a page-level lock." msgstr "Si plus que ce nombre de lignes sur la même page sont verrouillées par une connexion, ces verrous sont remplacés par un verrou de niveau de page." #: utils/misc/guc.c:2224 @@ -24817,7 +24861,7 @@ msgid "Sets the curve to use for ECDH." msgstr "Initialise la courbe à utiliser pour ECDH." #: utils/misc/guc.c:3611 -msgid "Location of the SSL DH params file." +msgid "Location of the SSL DH parameters file." msgstr "Emplacement du fichier des paramètres DH SSL." #: utils/misc/guc.c:3622 @@ -25040,7 +25084,7 @@ msgstr "%g est en dehors des limites valides pour le paramètre « %s » (%g .. msgid "cannot set parameters during a parallel operation" msgstr "ne peut pas configurer les paramètres lors d'une opération parallèle" -#: utils/misc/guc.c:5968 utils/misc/guc.c:6719 utils/misc/guc.c:6772 utils/misc/guc.c:7135 utils/misc/guc.c:7894 utils/misc/guc.c:8062 utils/misc/guc.c:9738 +#: utils/misc/guc.c:5968 utils/misc/guc.c:6719 utils/misc/guc.c:6772 utils/misc/guc.c:7135 utils/misc/guc.c:7894 utils/misc/guc.c:8062 utils/misc/guc.c:9731 #, c-format msgid "unrecognized configuration parameter \"%s\"" msgstr "paramètre de configuration « %s » non reconnu" @@ -25055,7 +25099,7 @@ msgstr "le paramètre « %s » ne peut pas être changé" msgid "parameter \"%s\" cannot be changed now" msgstr "le paramètre « %s » ne peut pas être modifié maintenant" -#: utils/misc/guc.c:6034 utils/misc/guc.c:6080 utils/misc/guc.c:9754 +#: utils/misc/guc.c:6034 utils/misc/guc.c:6080 utils/misc/guc.c:9747 #, c-format msgid "permission denied to set parameter \"%s\"" msgstr "droit refusé pour initialiser le paramètre « %s »" @@ -25112,47 +25156,47 @@ msgstr "SET requiert le nom du paramètre" msgid "attempt to redefine parameter \"%s\"" msgstr "tentative de redéfinition du paramètre « %s »" -#: utils/misc/guc.c:9371 +#: utils/misc/guc.c:9364 #, c-format msgid "parameter \"%s\" could not be set" msgstr "le paramètre « %s » n'a pas pu être configuré" -#: utils/misc/guc.c:9458 +#: utils/misc/guc.c:9451 #, c-format msgid "could not parse setting for parameter \"%s\"" msgstr "n'a pas pu analyser la configuration du paramètre « %s »" -#: utils/misc/guc.c:9816 utils/misc/guc.c:9850 +#: utils/misc/guc.c:9809 utils/misc/guc.c:9843 #, c-format msgid "invalid value for parameter \"%s\": %d" msgstr "valeur invalide pour le paramètre « %s » : %d" -#: utils/misc/guc.c:9884 +#: utils/misc/guc.c:9877 #, c-format msgid "invalid value for parameter \"%s\": %g" msgstr "valeur invalide pour le paramètre « %s » : %g" -#: utils/misc/guc.c:10154 +#: utils/misc/guc.c:10147 #, c-format msgid "\"temp_buffers\" cannot be changed after any temporary tables have been accessed in the session." msgstr "« temp_buffers » ne peut pas être modifié après que des tables temporaires aient été utilisées dans la session." -#: utils/misc/guc.c:10166 +#: utils/misc/guc.c:10159 #, c-format msgid "Bonjour is not supported by this build" msgstr "Bonjour n'est pas supporté dans cette installation" -#: utils/misc/guc.c:10179 +#: utils/misc/guc.c:10172 #, c-format msgid "SSL is not supported by this build" msgstr "SSL n'est pas supporté dans cette installation" -#: utils/misc/guc.c:10191 +#: utils/misc/guc.c:10184 #, c-format msgid "Cannot enable parameter when \"log_statement_stats\" is true." msgstr "Ne peut pas activer le paramètre avec « log_statement_stats » à true." -#: utils/misc/guc.c:10203 +#: utils/misc/guc.c:10196 #, c-format msgid "Cannot enable \"log_statement_stats\" when \"log_parser_stats\", \"log_planner_stats\", or \"log_executor_stats\" is true." msgstr "" @@ -25405,6 +25449,45 @@ msgstr "" msgid "cannot import a snapshot from a different database" msgstr "ne peut pas importer un snapshot à partir d'une base de données différente" +#~ msgid "cannot create range partition with empty range" +#~ msgstr "ne peut pas créer une partition par intervalle avec un intervalle vide" + +#~ msgid "could not get keyword values for locale \"%s\": %s" +#~ msgstr "n'a pas pu obtenir les valeurs des mots clés pour la locale « %s » : %s" + +#~ msgid "invalid publish list" +#~ msgstr "liste de publication invalide" + +#~ msgid "column \"%s\" referenced in statistics does not exist" +#~ msgstr "la colonne « %s » référencée dans les statistiques n'existe pas" + +#~ msgid "added subscription for table %s.%s" +#~ msgstr "souscription ajoutée pour la table %s.%s" + +#~ msgid "removed subscription for table %s.%s" +#~ msgstr "a supprimé une souscription pour la table %s.%s" + +#~ msgid "User \"%s\" has an empty password." +#~ msgstr "L'utilisateur « %s » a un mot de passe vide." + +#~ msgid "not connected to database" +#~ msgstr "non connecté à une base de données" + +#~ msgid "invalid input syntax for %s: \"%s\"" +#~ msgstr "syntaxe en entrée invalide pour le type %s : « %s »" + +#~ msgid "transaction ID " +#~ msgstr "ID de transaction " + +#~ msgid "in progress" +#~ msgstr "en cours" + +#~ msgid "committed" +#~ msgstr "validé" + +#~ msgid "aborted" +#~ msgstr "annulé" + #~ msgid "wrong range of array subscripts" #~ msgstr "mauvais échelle des indices du tableau" @@ -25678,9 +25761,6 @@ msgstr "ne peut pas importer un snapshot à partir d'une base de données diffé #~ msgid "%s: could not determine user name (GetUserName failed)\n" #~ msgstr "%s : n'a pas pu déterminer le nom de l'utilisateur (GetUserName a échoué)\n" -#~ msgid "too many column aliases specified for function %s" -#~ msgstr "trop d'alias de colonnes spécifiées pour la fonction %s" - #~ msgid "Expected 1 tuple with 3 fields, got %d tuples with %d fields." #~ msgstr "Attendait 1 ligne avec 3 champs, a obtenu %d lignes avec %d champs." @@ -26963,9 +27043,6 @@ msgstr "ne peut pas importer un snapshot à partir d'une base de données diffé #~ msgid "invalid list syntax for \"unix_socket_directories\"" #~ msgstr "syntaxe de liste invalide pour le paramètre « unix_socket_directories »" -#~ msgid "invalid list syntax for \"listen_addresses\"" -#~ msgstr "syntaxe de liste invalide pour le paramètre « listen_addresses »" - #~ msgid "window functions cannot use named arguments" #~ msgstr "les fonctions window ne peuvent pas renvoyer des arguments nommés" @@ -27241,9 +27318,6 @@ msgstr "ne peut pas importer un snapshot à partir d'une base de données diffé #~ msgid "value \"%s\" is out of range for type bigint" #~ msgstr "la valeur « %s » est en dehors des limites du type bigint" -#~ msgid "invalid input syntax for integer: \"%s\"" -#~ msgstr "syntaxe en entrée invalide pour l'entier : « %s »" - #~ msgid "\"TZ\"/\"tz\"/\"OF\" format patterns are not supported in to_date" #~ msgstr "les motifs de format « TZ »/« tz »/« OF » ne sont pas supportés dans to_date" @@ -27391,9 +27465,6 @@ msgstr "ne peut pas importer un snapshot à partir d'une base de données diffé #~ msgid "access method name cannot be qualified" #~ msgstr "le nom de la méthode d'accès ne peut pas être qualifiée" -#~ msgid "%s." -#~ msgstr "%s." - #~ msgid "default expression must not return a set" #~ msgstr "l'expression par défaut ne doit pas renvoyer un ensemble" @@ -27599,3 +27670,9 @@ msgstr "ne peut pas importer un snapshot à partir d'une base de données diffé #~ "attendue par le programme.\n" #~ "Les résultats ci-dessous ne sont pas dignes de confiance.\n" #~ "\n" + +#~ msgid "invalid number of arguments: object must be matched key value pairs" +#~ msgstr "nombre d'arguments invalide : l'objet doit correspond aux paires clé/valeur" + +#~ msgid "New enum values must be committed before they can be used." +#~ msgstr "Les nouvelles valeurs enum doivent être validées (COMMIT) avant de pouvoir être utilisées." diff --git a/src/backend/po/it.po b/src/backend/po/it.po index 5f42bb4940..b63ad1331b 100644 --- a/src/backend/po/it.po +++ b/src/backend/po/it.po @@ -1,43 +1,44 @@ # -# Translation of postgres to Italian -# PostgreSQL Project +# postgres.po +# Italian message translation file for postgres # -# Associazione Culturale ITPUG - Italian PostgreSQL Users Group -# http://www.itpug.org/ - info@itpug.org +# For development and bug report please use: +# https://github.com/dvarrazzo/postgresql-it # -# Traduttori: -# * Daniele Varrazzo -# * Vincenzo Romano +# Copyright (C) 2012-2017 PostgreSQL Global Development Group +# Copyright (C) 2010, Associazione Culturale ITPUG # -# Copyright (c) 2010, Associazione Culturale ITPUG -# Distributed under the same license of the PostgreSQL project +# Daniele Varrazzo , 2012-2017. +# Vincenzo Romano +# +# This file is distributed under the same license as the PostgreSQL package. # msgid "" msgstr "" "Project-Id-Version: postgres (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-05-22 07:38+0000\n" -"PO-Revision-Date: 2017-06-03 01:29+0100\n" +"POT-Creation-Date: 2017-11-07 19:08+0000\n" +"PO-Revision-Date: 2018-06-25 08:51+0200\n" "Last-Translator: Daniele Varrazzo \n" -"Language-Team: Gruppo traduzioni ITPUG \n" +"Language-Team: https://github.com/dvarrazzo/postgresql-it\n" "Language: it\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=n != 1;\n" "X-Poedit-SourceCharset: utf-8\n" -"X-Generator: Poedit 1.8.7.1\n" +"X-Generator: Poedit 1.5.4\n" -#: ../common/config_info.c:131 ../common/config_info.c:139 -#: ../common/config_info.c:147 ../common/config_info.c:155 -#: ../common/config_info.c:163 ../common/config_info.c:171 -#: ../common/config_info.c:179 ../common/config_info.c:187 -#: ../common/config_info.c:195 +#: ../common/config_info.c:130 ../common/config_info.c:138 +#: ../common/config_info.c:146 ../common/config_info.c:154 +#: ../common/config_info.c:162 ../common/config_info.c:170 +#: ../common/config_info.c:178 ../common/config_info.c:186 +#: ../common/config_info.c:194 msgid "not recorded" msgstr "non registrato" -#: ../common/controldata_utils.c:57 commands/copy.c:3041 -#: commands/extension.c:3328 utils/adt/genfile.c:135 +#: ../common/controldata_utils.c:57 commands/copy.c:3145 +#: commands/extension.c:3330 utils/adt/genfile.c:135 #, c-format msgid "could not open file \"%s\" for reading: %m" msgstr "apertura del file \"%s\" in lettura fallita: %m" @@ -48,14 +49,14 @@ msgid "%s: could not open file \"%s\" for reading: %s\n" msgstr "%s: apertura del file \"%s\" in lettura fallita: %s\n" #: ../common/controldata_utils.c:71 access/transam/timeline.c:348 -#: access/transam/xlog.c:3385 access/transam/xlog.c:10777 -#: access/transam/xlog.c:10790 access/transam/xlog.c:11182 -#: access/transam/xlog.c:11225 access/transam/xlog.c:11264 -#: access/transam/xlog.c:11307 access/transam/xlogfuncs.c:668 -#: access/transam/xlogfuncs.c:687 commands/extension.c:3338 libpq/hba.c:499 -#: replication/logical/origin.c:661 replication/logical/origin.c:691 -#: replication/logical/reorderbuffer.c:3064 replication/walsender.c:508 -#: storage/file/copydir.c:178 utils/adt/genfile.c:152 utils/adt/misc.c:924 +#: access/transam/xlog.c:3384 access/transam/xlog.c:10787 +#: access/transam/xlog.c:10800 access/transam/xlog.c:11196 +#: access/transam/xlog.c:11239 access/transam/xlog.c:11278 +#: access/transam/xlog.c:11321 access/transam/xlogfuncs.c:668 +#: access/transam/xlogfuncs.c:687 commands/extension.c:3340 libpq/hba.c:499 +#: replication/logical/origin.c:685 replication/logical/origin.c:715 +#: replication/logical/reorderbuffer.c:3064 replication/walsender.c:506 +#: storage/file/copydir.c:204 utils/adt/genfile.c:152 utils/adt/misc.c:924 #, c-format msgid "could not read file \"%s\": %m" msgstr "lettura de file \"%s\" fallita: %m" @@ -179,26 +180,27 @@ msgid "could not close directory \"%s\": %s\n" msgstr "chiusura della directory \"%s\" fallita: %s\n" #: ../common/psprintf.c:179 ../port/path.c:630 ../port/path.c:668 -#: ../port/path.c:685 access/transam/twophase.c:1306 access/transam/xlog.c:6350 -#: lib/stringinfo.c:258 libpq/auth.c:1107 libpq/auth.c:1472 libpq/auth.c:1540 -#: libpq/auth.c:2056 postmaster/bgworker.c:337 postmaster/bgworker.c:908 -#: postmaster/postmaster.c:2377 postmaster/postmaster.c:2399 -#: postmaster/postmaster.c:3952 postmaster/postmaster.c:4652 -#: postmaster/postmaster.c:4727 postmaster/postmaster.c:5397 -#: postmaster/postmaster.c:5716 -#: replication/libpqwalreceiver/libpqwalreceiver.c:251 +#: ../port/path.c:685 access/transam/twophase.c:1306 +#: access/transam/xlog.c:6355 lib/stringinfo.c:258 libpq/auth.c:1126 +#: libpq/auth.c:1492 libpq/auth.c:1560 libpq/auth.c:2076 +#: postmaster/bgworker.c:337 postmaster/bgworker.c:908 +#: postmaster/postmaster.c:2391 postmaster/postmaster.c:2413 +#: postmaster/postmaster.c:3975 postmaster/postmaster.c:4683 +#: postmaster/postmaster.c:4758 postmaster/postmaster.c:5436 +#: postmaster/postmaster.c:5773 +#: replication/libpqwalreceiver/libpqwalreceiver.c:256 #: replication/logical/logical.c:170 storage/buffer/localbuf.c:436 #: storage/file/fd.c:773 storage/file/fd.c:1201 storage/file/fd.c:1319 -#: storage/file/fd.c:2044 storage/ipc/procarray.c:1057 -#: storage/ipc/procarray.c:1545 storage/ipc/procarray.c:1552 -#: storage/ipc/procarray.c:1966 storage/ipc/procarray.c:2577 -#: utils/adt/formatting.c:1578 utils/adt/formatting.c:1701 -#: utils/adt/formatting.c:1825 utils/adt/pg_locale.c:468 -#: utils/adt/pg_locale.c:652 utils/adt/regexp.c:219 utils/adt/varlena.c:4570 -#: utils/adt/varlena.c:4591 utils/fmgr/dfmgr.c:221 utils/hash/dynahash.c:429 -#: utils/hash/dynahash.c:535 utils/hash/dynahash.c:1047 utils/mb/mbutils.c:376 -#: utils/mb/mbutils.c:709 utils/misc/guc.c:3987 utils/misc/guc.c:4003 -#: utils/misc/guc.c:4016 utils/misc/guc.c:6965 utils/misc/tzparser.c:468 +#: storage/file/fd.c:2044 storage/ipc/procarray.c:1058 +#: storage/ipc/procarray.c:1546 storage/ipc/procarray.c:1553 +#: storage/ipc/procarray.c:1970 storage/ipc/procarray.c:2581 +#: utils/adt/formatting.c:1579 utils/adt/formatting.c:1703 +#: utils/adt/formatting.c:1828 utils/adt/pg_locale.c:468 +#: utils/adt/pg_locale.c:652 utils/adt/regexp.c:219 utils/adt/varlena.c:4589 +#: utils/adt/varlena.c:4610 utils/fmgr/dfmgr.c:221 utils/hash/dynahash.c:444 +#: utils/hash/dynahash.c:553 utils/hash/dynahash.c:1065 utils/mb/mbutils.c:376 +#: utils/mb/mbutils.c:709 utils/misc/guc.c:3998 utils/misc/guc.c:4014 +#: utils/misc/guc.c:4027 utils/misc/guc.c:6976 utils/misc/tzparser.c:468 #: utils/mmgr/aset.c:404 utils/mmgr/dsa.c:713 utils/mmgr/dsa.c:795 #: utils/mmgr/mcxt.c:725 utils/mmgr/mcxt.c:760 utils/mmgr/mcxt.c:797 #: utils/mmgr/mcxt.c:834 utils/mmgr/mcxt.c:868 utils/mmgr/mcxt.c:897 @@ -273,7 +275,7 @@ msgstr "password troppo lunga" msgid "could not look up effective user ID %ld: %s" msgstr "ID utente effettivo %ld non trovato: %s" -#: ../common/username.c:45 libpq/auth.c:2003 +#: ../common/username.c:45 libpq/auth.c:2023 msgid "user does not exist" msgstr "l'utente non esiste" @@ -395,86 +397,101 @@ msgstr "lettura del SID del gruppo PowerUsers fallita: codice di errore %lu\n" msgid "could not check access token membership: error code %lu\n" msgstr "errore nel controllo del token di appartenenza: codice di errore %lu\n" -#: access/brin/brin.c:866 access/brin/brin.c:937 +#: access/brin/brin.c:867 access/brin/brin.c:938 #, c-format msgid "block number out of range: %s" msgstr "numero di blocco fuori dall'intervallo consentito: %s" -#: access/brin/brin.c:889 access/brin/brin.c:960 +#: access/brin/brin.c:890 access/brin/brin.c:961 #, c-format msgid "\"%s\" is not a BRIN index" msgstr "\"%s\" non è un indice BRIN" -#: access/brin/brin.c:905 access/brin/brin.c:976 +#: access/brin/brin.c:906 access/brin/brin.c:977 #, c-format msgid "could not open parent table of index %s" msgstr "apertura della tabella dell'indice %s non riuscita" -#: access/brin/brin_pageops.c:76 access/brin/brin_pageops.c:360 -#: access/brin/brin_pageops.c:828 +#: access/brin/brin_pageops.c:76 access/brin/brin_pageops.c:364 +#: access/brin/brin_pageops.c:830 access/gin/ginentrypage.c:110 +#: access/gist/gist.c:1363 access/nbtree/nbtinsert.c:577 +#: access/nbtree/nbtsort.c:488 access/spgist/spgdoinsert.c:1933 #, c-format -msgid "index row size %lu exceeds maximum %lu for index \"%s\"" -msgstr "la dimensione %lu della riga dell'indice supera il massimo %lu per l'indice \"%s\"" +msgid "index row size %zu exceeds maximum %zu for index \"%s\"" +msgstr "la dimensione %zu della riga dell'indice supera il massimo %zu per l'indice \"%s\"" -#: access/brin/brin_revmap.c:379 access/brin/brin_revmap.c:385 +#: access/brin/brin_revmap.c:382 access/brin/brin_revmap.c:388 #, c-format msgid "corrupted BRIN index: inconsistent range map" msgstr "indice BRIN corrotto: mappa di dominio inconsistente" -#: access/brin/brin_revmap.c:401 +#: access/brin/brin_revmap.c:404 #, c-format msgid "leftover placeholder tuple detected in BRIN index \"%s\", deleting" msgstr "trovata tupla segnaposto avanzata nell'indice BRIN \"%s\", verrà cancellata" -#: access/brin/brin_revmap.c:598 +#: access/brin/brin_revmap.c:601 #, c-format msgid "unexpected page type 0x%04X in BRIN index \"%s\" block %u" msgstr "tipo di pagina inaspettato 0x%04X nell'indice BRIN \"%s\" blocco %u" -#: access/brin/brin_validate.c:116 +#: access/brin/brin_validate.c:116 access/gin/ginvalidate.c:149 +#: access/gist/gistvalidate.c:146 access/hash/hashvalidate.c:131 +#: access/nbtree/nbtvalidate.c:101 access/spgist/spgvalidate.c:116 #, c-format -msgid "brin operator family \"%s\" contains function %s with invalid support number %d" -msgstr "la famiglia di operatori brin \"%s\" contiene la funzione %s con numero di supporto non valido %d" +msgid "operator family \"%s\" of access method %s contains function %s with invalid support number %d" +msgstr "la famiglia di operatori \"%s\" del metodo di accesso %s contiene la funzione %s con numero di supporto non valido %d" -#: access/brin/brin_validate.c:132 +#: access/brin/brin_validate.c:132 access/gin/ginvalidate.c:161 +#: access/gist/gistvalidate.c:158 access/hash/hashvalidate.c:114 +#: access/nbtree/nbtvalidate.c:113 access/spgist/spgvalidate.c:128 #, c-format -msgid "brin operator family \"%s\" contains function %s with wrong signature for support number %d" -msgstr "la famiglia di operatori brin \"%s\" contiene la funzione %s con signature non valida per il numero di supporto %d" +msgid "operator family \"%s\" of access method %s contains function %s with wrong signature for support number %d" +msgstr "la famiglia di operatori \"%s\" del metodo di accesso %s contiene la funzione %s con signature errata per il numero di supporto %d" -#: access/brin/brin_validate.c:154 +#: access/brin/brin_validate.c:154 access/gin/ginvalidate.c:180 +#: access/gist/gistvalidate.c:178 access/hash/hashvalidate.c:152 +#: access/nbtree/nbtvalidate.c:133 access/spgist/spgvalidate.c:147 #, c-format -msgid "brin operator family \"%s\" contains operator %s with invalid strategy number %d" -msgstr "la famiglia di operatori brin \"%s\" contiene la funzione %s con numero di strategia non valido %d" +msgid "operator family \"%s\" of access method %s contains operator %s with invalid strategy number %d" +msgstr "la famiglia di operatori \"%s\" del metodo di accesso %s contiene l'operatore %s con numero di strategia %d non valido" -#: access/brin/brin_validate.c:183 +#: access/brin/brin_validate.c:183 access/gin/ginvalidate.c:193 +#: access/hash/hashvalidate.c:165 access/nbtree/nbtvalidate.c:146 +#: access/spgist/spgvalidate.c:160 #, c-format -msgid "brin operator family \"%s\" contains invalid ORDER BY specification for operator %s" -msgstr "la famiglia di operatori brin \"%s\" contiene una specifica ORDER BY non valida per l'operatore %s" +msgid "operator family \"%s\" of access method %s contains invalid ORDER BY specification for operator %s" +msgstr "la famiglia di operatori \"%s\" del metodo di accesso %s contiene una specifica ORDER BY non valida per l'operatore %s" -#: access/brin/brin_validate.c:196 +#: access/brin/brin_validate.c:196 access/gin/ginvalidate.c:206 +#: access/gist/gistvalidate.c:226 access/hash/hashvalidate.c:178 +#: access/nbtree/nbtvalidate.c:159 access/spgist/spgvalidate.c:173 #, c-format -msgid "brin operator family \"%s\" contains operator %s with wrong signature" -msgstr "la famiglia di operatori brin \"%s\" contiene l'operatore %s con signature non valida" +msgid "operator family \"%s\" of access method %s contains operator %s with wrong signature" +msgstr "la famiglia di operatori \"%s\" del metodo di accesso %s contiene l'operatore %s con signature non valida" -#: access/brin/brin_validate.c:234 +#: access/brin/brin_validate.c:234 access/hash/hashvalidate.c:218 +#: access/nbtree/nbtvalidate.c:201 access/spgist/spgvalidate.c:201 #, c-format -msgid "brin operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "alla famiglia di operatori brin \"%s\" mancano operatori per i tipi %s e %s" +msgid "operator family \"%s\" of access method %s is missing operator(s) for types %s and %s" +msgstr "alla famiglia di operatori \"%s\" del metodo di accesso %s mancano operatori per i tipi %s e %s" #: access/brin/brin_validate.c:244 #, c-format -msgid "brin operator family \"%s\" is missing support function(s) for types %s and %s" -msgstr "alla famiglia di operatori brin \"%s\" mancano funzioni di supporto per i tipi %s e %s" +msgid "operator family \"%s\" of access method %s is missing support function(s) for types %s and %s" +msgstr "alla famiglia di operatori \"%s\" del metodo di accesso %s mancano funzioni di supporto per i tipi %s e %s" -#: access/brin/brin_validate.c:257 +#: access/brin/brin_validate.c:257 access/hash/hashvalidate.c:232 +#: access/nbtree/nbtvalidate.c:225 access/spgist/spgvalidate.c:234 #, c-format -msgid "brin operator class \"%s\" is missing operator(s)" -msgstr "alla classe di operatori brin \"%s\" mancano operatori" +msgid "operator class \"%s\" of access method %s is missing operator(s)" +msgstr "alla famiglia di operatori \"%s\" del metodo di accesso %s mancano operatori" -#: access/brin/brin_validate.c:268 +#: access/brin/brin_validate.c:268 access/gin/ginvalidate.c:247 +#: access/gist/gistvalidate.c:265 #, c-format -msgid "brin operator class \"%s\" is missing support function %d" -msgstr "alla classe di operatori brin \"%s\" manca la funzione di supporto %d" +msgid "operator class \"%s\" of access method %s is missing support function %d" +msgstr "alla famiglia di operatori \"%s\" del metodo di accesso %s manca la funzione di supporto %d" #: access/common/heaptuple.c:708 access/common/heaptuple.c:1405 #, c-format @@ -492,7 +509,7 @@ msgid "index row requires %zu bytes, maximum size is %zu" msgstr "la riga dell'indice richiede %zu byte, la dimensione massima è %zu" #: access/common/printtup.c:290 tcop/fastpath.c:182 tcop/fastpath.c:532 -#: tcop/postgres.c:1732 +#: tcop/postgres.c:1726 #, c-format msgid "unsupported format code: %d" msgstr "codice di formato non supportato: %d" @@ -572,8 +589,8 @@ msgstr "L'attributo \"%s\" di tipo %s non combacia con l'attributo corrispondent msgid "Attribute \"%s\" of type %s does not exist in type %s." msgstr "L'attributo \"%s\" di tipo %s non esiste nel tipo %s." -#: access/common/tupdesc.c:728 parser/parse_clause.c:815 -#: parser/parse_relation.c:1544 +#: access/common/tupdesc.c:728 parser/parse_clause.c:812 +#: parser/parse_relation.c:1538 #, c-format msgid "column \"%s\" cannot be declared SETOF" msgstr "la colonna \"%s\" non può essere dichiarata SETOF" @@ -588,15 +605,8 @@ msgstr "la lista di posting è troppo lunga" msgid "Reduce maintenance_work_mem." msgstr "Riduci maintenance_work_mem." -#: access/gin/ginentrypage.c:110 access/gist/gist.c:1363 -#: access/nbtree/nbtinsert.c:577 access/nbtree/nbtsort.c:488 -#: access/spgist/spgdoinsert.c:1933 -#, c-format -msgid "index row size %zu exceeds maximum %zu for index \"%s\"" -msgstr "la dimensione %zu della riga dell'indice supera il massimo %zu per l'indice \"%s\"" - -#: access/gin/ginfast.c:991 access/transam/xlog.c:10198 -#: access/transam/xlog.c:10716 access/transam/xlogfuncs.c:296 +#: access/gin/ginfast.c:991 access/transam/xlog.c:10208 +#: access/transam/xlog.c:10726 access/transam/xlogfuncs.c:296 #: access/transam/xlogfuncs.c:323 access/transam/xlogfuncs.c:362 #: access/transam/xlogfuncs.c:383 access/transam/xlogfuncs.c:404 #: access/transam/xlogfuncs.c:474 access/transam/xlogfuncs.c:530 @@ -629,52 +639,23 @@ msgstr "i vecchi indici GIN non supportano le scansioni sull'intero indice né l msgid "To fix this, do REINDEX INDEX \"%s\"." msgstr "Per correggere questo problema esegui REINDEX INDEX \"%s\"." -#: access/gin/ginutil.c:134 executor/execExpr.c:1765 -#: utils/adt/arrayfuncs.c:3803 utils/adt/arrayfuncs.c:6325 +#: access/gin/ginutil.c:134 executor/execExpr.c:1780 +#: utils/adt/arrayfuncs.c:3803 utils/adt/arrayfuncs.c:6323 #: utils/adt/rowtypes.c:927 #, c-format msgid "could not identify a comparison function for type %s" msgstr "non è stato possibile trovare un operatore di confronto per il tipo %s" -#: access/gin/ginvalidate.c:93 -#, c-format -msgid "gin operator family \"%s\" contains support procedure %s with cross-type registration" -msgstr "la famiglia di operatori gin \"%s\" contiene la procedura di supporto %s con tipi misti" - -#: access/gin/ginvalidate.c:149 -#, c-format -msgid "gin operator family \"%s\" contains function %s with invalid support number %d" -msgstr "la famiglia di operatori gin \"%s\" contiene la funzione %s con numero di supporto non valido %d" - -#: access/gin/ginvalidate.c:161 -#, c-format -msgid "gin operator family \"%s\" contains function %s with wrong signature for support number %d" -msgstr "la famiglia di operatori gin \"%s\" contiene la funzione %s con signature non valida per il numero di supporto %d" - -#: access/gin/ginvalidate.c:180 +#: access/gin/ginvalidate.c:93 access/gist/gistvalidate.c:93 +#: access/hash/hashvalidate.c:99 access/spgist/spgvalidate.c:93 #, c-format -msgid "gin operator family \"%s\" contains operator %s with invalid strategy number %d" -msgstr "la famiglia di operatori gin \"%s\" contiene l'operatore %s con numero di strategia non valido %d" - -#: access/gin/ginvalidate.c:193 -#, c-format -msgid "gin operator family \"%s\" contains invalid ORDER BY specification for operator %s" -msgstr "la famiglia di operatori gin \"%s\" contiene una specifica ORDER BY non valida per l'operatore %s" - -#: access/gin/ginvalidate.c:206 -#, c-format -msgid "gin operator family \"%s\" contains operator %s with wrong signature" -msgstr "la famiglia di operatori gin \"%s\" contiene l'operatore %s con signature non valida" - -#: access/gin/ginvalidate.c:247 -#, c-format -msgid "gin operator class \"%s\" is missing support function %d" -msgstr "alla classe di operatori gin \"%s\" manca la funzione di supporto %d" +msgid "operator family \"%s\" of access method %s contains support procedure %s with different left and right input types" +msgstr "la famiglia di operatori \"%s\" del metodo di accesso %s contiene la procedura di supporto %s con tipi di input sinistro e destro diversi" #: access/gin/ginvalidate.c:257 #, c-format -msgid "gin operator class \"%s\" is missing support function %d or %d" -msgstr "alla classe di operatori gin \"%s\" mancano le funzioni di supporto %d o %d" +msgid "operator class \"%s\" of access method %s is missing support function %d or %d" +msgstr "alla classe di operatori \"%s\" del metodo di accesso %s manca la funzione di supporto %d o %d" #: access/gist/gist.c:706 access/gist/gistvacuum.c:258 #, c-format @@ -686,11 +667,11 @@ msgstr "l'indice \"%s\" contiene una tupla interna marcata come invalida" msgid "This is caused by an incomplete page split at crash recovery before upgrading to PostgreSQL 9.1." msgstr "Ciò è causato da una separazione di pagina incompleta al ripristino del crash prima dell'aggiornamento a PostgreSQL 9.1." -#: access/gist/gist.c:709 access/gist/gistutil.c:739 access/gist/gistutil.c:750 -#: access/gist/gistvacuum.c:261 access/hash/hashutil.c:241 -#: access/hash/hashutil.c:252 access/hash/hashutil.c:264 -#: access/hash/hashutil.c:285 access/nbtree/nbtpage.c:519 -#: access/nbtree/nbtpage.c:530 +#: access/gist/gist.c:709 access/gist/gistutil.c:739 +#: access/gist/gistutil.c:750 access/gist/gistvacuum.c:261 +#: access/hash/hashutil.c:241 access/hash/hashutil.c:252 +#: access/hash/hashutil.c:264 access/hash/hashutil.c:285 +#: access/nbtree/nbtpage.c:519 access/nbtree/nbtpage.c:530 #, c-format msgid "Please REINDEX it." msgstr "Si richiede l'esecuzione di REINDEX." @@ -732,45 +713,15 @@ msgstr "l'indice \"%s\" contiene una pagina inaspettata completamente a zero al msgid "index \"%s\" contains corrupted page at block %u" msgstr "l'indice \"%s\" contiene una pagina corrotta al blocco %u" -#: access/gist/gistvalidate.c:93 -#, c-format -msgid "gist operator family \"%s\" contains support procedure %s with cross-type registration" -msgstr "la famiglia di operatori gist \"%s\" contiene la procedura di supporto %s con tipi misti" - -#: access/gist/gistvalidate.c:146 -#, c-format -msgid "gist operator family \"%s\" contains function %s with invalid support number %d" -msgstr "la famiglia di operatori gist \"%s\" contiene la funzione %s con numero di supporto non valido %d" - -#: access/gist/gistvalidate.c:158 -#, c-format -msgid "gist operator family \"%s\" contains function %s with wrong signature for support number %d" -msgstr "la famiglia di operatori gist \"%s\" contiene la funzione %s con signature non valida per il numero di supporto %d" - -#: access/gist/gistvalidate.c:178 -#, c-format -msgid "gist operator family \"%s\" contains operator %s with invalid strategy number %d" -msgstr "la famiglia di operatori gist \"%s\" contiene l'operatore %s con numero di strategia non valido %d" - #: access/gist/gistvalidate.c:196 #, c-format -msgid "gist operator family \"%s\" contains unsupported ORDER BY specification for operator %s" -msgstr "la famiglia di operatori gist \"%s\" contiene una specifica ORDER BY non supportata per l'operatore %s" +msgid "operator family \"%s\" of access method %s contains unsupported ORDER BY specification for operator %s" +msgstr "la famiglia di operatori \"%s\" del metodo di accesso %s contiene una specifica ORDER BY non supportata per l'operatore %s" #: access/gist/gistvalidate.c:207 #, c-format -msgid "gist operator family \"%s\" contains incorrect ORDER BY opfamily specification for operator %s" -msgstr "la famiglia di operatori gist \"%s\" contiene una specifica ORDER BY non corretta per l'operatore %s" - -#: access/gist/gistvalidate.c:226 -#, c-format -msgid "gist operator family \"%s\" contains operator %s with wrong signature" -msgstr "la famiglia di operatori gist \"%s\" contiene l'operatore %s con signature non valida" - -#: access/gist/gistvalidate.c:265 -#, c-format -msgid "gist operator class \"%s\" is missing support function %d" -msgstr "-alla classe di operatori gist \"%s\" manca la funzione di supporto %d" +msgid "operator family \"%s\" of access method %s contains incorrect ORDER BY opfamily specification for operator %s" +msgstr "la famiglia di operatori \"%s\" del metodo di accesso %s contiene una specifica opfamily ORDER BY non corretta per l'operatore %s" #: access/hash/hashinsert.c:82 #, c-format @@ -788,7 +739,7 @@ msgstr "Non si possono indicizzare valori più grandi di una pagina di buffer." msgid "invalid overflow block number %u" msgstr "numero di blocco di overflow %u non valido" -#: access/hash/hashovfl.c:283 access/hash/hashpage.c:453 +#: access/hash/hashovfl.c:283 access/hash/hashpage.c:462 #, c-format msgid "out of overflow pages in hash index \"%s\"" msgstr "pagine di overflow esaurite per l'indice hash \"%s\"" @@ -808,55 +759,15 @@ msgstr "l'indice \"%s\" non è un indice hash" msgid "index \"%s\" has wrong hash version" msgstr "l'indice \"%s\" ha una versione errata dell'hash" -#: access/hash/hashvalidate.c:99 -#, c-format -msgid "hash operator family \"%s\" contains support procedure %s with cross-type registration" -msgstr "la famiglia di operatori hash \"%s\" contiene la procedura di supporto %s con tipi misti" - -#: access/hash/hashvalidate.c:114 -#, c-format -msgid "hash operator family \"%s\" contains function %s with wrong signature for support number %d" -msgstr "la famiglia di operatori hash \"%s\" contiene la funzione %s con signature non valida per il numero di supporto %d" - -#: access/hash/hashvalidate.c:131 -#, c-format -msgid "hash operator family \"%s\" contains function %s with invalid support number %d" -msgstr "la famiglia di operatori hash \"%s\" contiene la funzione %s con numero di supporto non valido %d" - -#: access/hash/hashvalidate.c:152 -#, c-format -msgid "hash operator family \"%s\" contains operator %s with invalid strategy number %d" -msgstr "la famiglia di operatori hash \"%s\" contiene l'operatore %s con numero di strategia non valido %d" - -#: access/hash/hashvalidate.c:165 -#, c-format -msgid "hash operator family \"%s\" contains invalid ORDER BY specification for operator %s" -msgstr "la famiglia di operatori hash \"%s\" contiene una specifica ORDER BY non valida per l'operatore %s" - -#: access/hash/hashvalidate.c:178 -#, c-format -msgid "hash operator family \"%s\" contains operator %s with wrong signature" -msgstr "la famiglia di operatori hash \"%s\" contiene l'operatore %s con signature non valida" - #: access/hash/hashvalidate.c:190 #, c-format -msgid "hash operator family \"%s\" lacks support function for operator %s" -msgstr "alla famiglia di operatori hash \"%s\" manca la funzione di supporto per l'operatore %s" - -#: access/hash/hashvalidate.c:218 -#, c-format -msgid "hash operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "alla famiglia di operatori hash \"%s\" mancano operatori per i tipi %s e %s" - -#: access/hash/hashvalidate.c:232 -#, c-format -msgid "hash operator class \"%s\" is missing operator(s)" -msgstr "alla classe di operatori hash \"%s\" mancano operatori" +msgid "operator family \"%s\" of access method %s lacks support function for operator %s" +msgstr "alla famiglia di operatori \"%s\" del metodo di accesso %s manca la funzione di supporto per l'operatore %s" -#: access/hash/hashvalidate.c:248 +#: access/hash/hashvalidate.c:248 access/nbtree/nbtvalidate.c:242 #, c-format -msgid "hash operator family \"%s\" is missing cross-type operator(s)" -msgstr "alla famiglia di operatori hash \"%s\" mancano operatori tra tipi diversi" +msgid "operator family \"%s\" of access method %s is missing cross-type operator(s)" +msgstr "alla famiglia di operatori \"%s\" del metodo di accesso %s mancano operatori tra tipi diversi" #: access/heap/heapam.c:1293 access/heap/heapam.c:1321 #: access/heap/heapam.c:1353 catalog/aclchk.c:1772 @@ -865,8 +776,8 @@ msgid "\"%s\" is an index" msgstr "\"%s\" è un indice" #: access/heap/heapam.c:1298 access/heap/heapam.c:1326 -#: access/heap/heapam.c:1358 catalog/aclchk.c:1779 commands/tablecmds.c:9876 -#: commands/tablecmds.c:13088 +#: access/heap/heapam.c:1358 catalog/aclchk.c:1779 commands/tablecmds.c:9898 +#: commands/tablecmds.c:13128 #, c-format msgid "\"%s\" is a composite type" msgstr "\"%s\" è un tipo composito" @@ -886,7 +797,7 @@ msgstr "non è possibile eliminare tuple durante un'operazione parallela" msgid "attempted to delete invisible tuple" msgstr "tentativo di eliminare tuple invisibili" -#: access/heap/heapam.c:3514 access/heap/heapam.c:6214 +#: access/heap/heapam.c:3514 access/heap/heapam.c:6248 #, c-format msgid "cannot update tuples during a parallel operation" msgstr "non è possibile aggiornare tuple durante un'operazione parallela" @@ -896,8 +807,8 @@ msgstr "non è possibile aggiornare tuple durante un'operazione parallela" msgid "attempted to update invisible tuple" msgstr "tentativo di aggiornare tuple invisibili" -#: access/heap/heapam.c:4937 access/heap/heapam.c:4975 -#: access/heap/heapam.c:5227 executor/execMain.c:2588 +#: access/heap/heapam.c:4938 access/heap/heapam.c:4976 +#: access/heap/heapam.c:5228 executor/execMain.c:2631 #, c-format msgid "could not obtain lock on row in relation \"%s\"" msgstr "lock di riga nella relazione \"%s\" fallito" @@ -914,22 +825,22 @@ msgstr "scrittura nel file \"%s\" fallita, scritti %d di %d: %m" #: access/heap/rewriteheap.c:966 access/heap/rewriteheap.c:1183 #: access/heap/rewriteheap.c:1282 access/transam/timeline.c:412 -#: access/transam/timeline.c:492 access/transam/xlog.c:3250 -#: access/transam/xlog.c:3418 replication/logical/snapbuild.c:1630 -#: replication/slot.c:1232 replication/slot.c:1319 storage/file/fd.c:631 +#: access/transam/timeline.c:492 access/transam/xlog.c:3249 +#: access/transam/xlog.c:3417 replication/logical/snapbuild.c:1630 +#: replication/slot.c:1291 replication/slot.c:1378 storage/file/fd.c:631 #: storage/file/fd.c:3180 storage/smgr/md.c:1044 storage/smgr/md.c:1277 -#: storage/smgr/md.c:1450 utils/misc/guc.c:6987 +#: storage/smgr/md.c:1450 utils/misc/guc.c:6998 #, c-format msgid "could not fsync file \"%s\": %m" msgstr "fsync del file \"%s\" fallito: %m" #: access/heap/rewriteheap.c:1021 access/heap/rewriteheap.c:1141 #: access/transam/timeline.c:315 access/transam/timeline.c:467 -#: access/transam/xlog.c:3203 access/transam/xlog.c:3356 -#: access/transam/xlog.c:10533 access/transam/xlog.c:10571 -#: access/transam/xlog.c:10956 postmaster/postmaster.c:4427 -#: replication/logical/origin.c:535 replication/slot.c:1184 -#: storage/file/copydir.c:162 storage/smgr/md.c:327 utils/time/snapmgr.c:1283 +#: access/transam/xlog.c:3202 access/transam/xlog.c:3355 +#: access/transam/xlog.c:10543 access/transam/xlog.c:10581 +#: access/transam/xlog.c:10965 postmaster/postmaster.c:4450 +#: replication/logical/origin.c:559 replication/slot.c:1243 +#: storage/file/copydir.c:176 storage/smgr/md.c:327 utils/time/snapmgr.c:1297 #, c-format msgid "could not create file \"%s\": %m" msgstr "creazione del file \"%s\" fallita: %m" @@ -939,7 +850,7 @@ msgstr "creazione del file \"%s\" fallita: %m" msgid "could not truncate file \"%s\" to %u: %m" msgstr "troncamento del file \"%s\" a %u fallito: %m" -#: access/heap/rewriteheap.c:1159 replication/walsender.c:488 +#: access/heap/rewriteheap.c:1159 replication/walsender.c:486 #: storage/smgr/md.c:1949 #, c-format msgid "could not seek to end of file \"%s\": %m" @@ -947,26 +858,26 @@ msgstr "non è stato possibile spostarsi alla fine del file \"%s\": %m" #: access/heap/rewriteheap.c:1171 access/transam/timeline.c:370 #: access/transam/timeline.c:405 access/transam/timeline.c:484 -#: access/transam/xlog.c:3239 access/transam/xlog.c:3409 -#: postmaster/postmaster.c:4437 postmaster/postmaster.c:4447 -#: replication/logical/origin.c:544 replication/logical/origin.c:583 -#: replication/logical/origin.c:599 replication/logical/snapbuild.c:1612 -#: replication/slot.c:1215 storage/file/copydir.c:191 -#: utils/init/miscinit.c:1240 utils/init/miscinit.c:1251 -#: utils/init/miscinit.c:1259 utils/misc/guc.c:6948 utils/misc/guc.c:6979 -#: utils/misc/guc.c:8829 utils/misc/guc.c:8843 utils/time/snapmgr.c:1288 -#: utils/time/snapmgr.c:1295 +#: access/transam/xlog.c:3238 access/transam/xlog.c:3408 +#: postmaster/postmaster.c:4460 postmaster/postmaster.c:4470 +#: replication/logical/origin.c:568 replication/logical/origin.c:607 +#: replication/logical/origin.c:623 replication/logical/snapbuild.c:1612 +#: replication/slot.c:1274 storage/file/copydir.c:217 +#: utils/init/miscinit.c:1249 utils/init/miscinit.c:1260 +#: utils/init/miscinit.c:1268 utils/misc/guc.c:6959 utils/misc/guc.c:6990 +#: utils/misc/guc.c:8840 utils/misc/guc.c:8854 utils/time/snapmgr.c:1302 +#: utils/time/snapmgr.c:1309 #, c-format msgid "could not write to file \"%s\": %m" msgstr "scrittura nel file \"%s\" fallita: %m" #: access/heap/rewriteheap.c:1257 access/transam/xlogarchive.c:113 -#: access/transam/xlogarchive.c:467 postmaster/postmaster.c:1253 -#: postmaster/syslogger.c:1371 replication/logical/origin.c:522 +#: access/transam/xlogarchive.c:467 postmaster/postmaster.c:1257 +#: postmaster/syslogger.c:1371 replication/logical/origin.c:546 #: replication/logical/reorderbuffer.c:2595 #: replication/logical/reorderbuffer.c:2652 #: replication/logical/snapbuild.c:1560 replication/logical/snapbuild.c:1936 -#: replication/slot.c:1292 storage/file/fd.c:682 storage/ipc/dsm.c:327 +#: replication/slot.c:1351 storage/file/fd.c:682 storage/ipc/dsm.c:327 #: storage/smgr/md.c:426 storage/smgr/md.c:475 storage/smgr/md.c:1397 #, c-format msgid "could not remove file \"%s\": %m" @@ -974,21 +885,21 @@ msgstr "rimozione del file \"%s\" fallita: %m" #: access/heap/rewriteheap.c:1271 access/transam/timeline.c:111 #: access/transam/timeline.c:236 access/transam/timeline.c:334 -#: access/transam/xlog.c:3179 access/transam/xlog.c:3300 -#: access/transam/xlog.c:3341 access/transam/xlog.c:3620 -#: access/transam/xlog.c:3698 access/transam/xlogutils.c:706 +#: access/transam/xlog.c:3178 access/transam/xlog.c:3299 +#: access/transam/xlog.c:3340 access/transam/xlog.c:3619 +#: access/transam/xlog.c:3697 access/transam/xlogutils.c:706 #: postmaster/syslogger.c:1380 replication/basebackup.c:474 -#: replication/basebackup.c:1218 replication/logical/origin.c:654 -#: replication/logical/reorderbuffer.c:2113 +#: replication/basebackup.c:1218 replication/logical/origin.c:678 +#: replication/logical/reorderbuffer.c:2112 #: replication/logical/reorderbuffer.c:2361 #: replication/logical/reorderbuffer.c:3044 #: replication/logical/snapbuild.c:1604 replication/logical/snapbuild.c:1692 -#: replication/slot.c:1307 replication/walsender.c:481 -#: replication/walsender.c:2388 storage/file/copydir.c:155 +#: replication/slot.c:1366 replication/walsender.c:479 +#: replication/walsender.c:2385 storage/file/copydir.c:169 #: storage/file/fd.c:614 storage/file/fd.c:3092 storage/file/fd.c:3159 -#: storage/smgr/md.c:608 utils/error/elog.c:1879 utils/init/miscinit.c:1171 -#: utils/init/miscinit.c:1299 utils/init/miscinit.c:1376 utils/misc/guc.c:7207 -#: utils/misc/guc.c:7240 +#: storage/smgr/md.c:608 utils/error/elog.c:1879 utils/init/miscinit.c:1173 +#: utils/init/miscinit.c:1308 utils/init/miscinit.c:1385 utils/misc/guc.c:7218 +#: utils/misc/guc.c:7251 #, c-format msgid "could not open file \"%s\": %m" msgstr "apertura del file \"%s\" fallita: %m" @@ -1003,9 +914,9 @@ msgstr "il metodo di accesso \"%s\" non è del tipo %s" msgid "index access method \"%s\" does not have a handler" msgstr "il metodo di accesso dell'indice \"%s\" non ha un handler" -#: access/index/indexam.c:160 catalog/objectaddress.c:1223 -#: commands/indexcmds.c:1806 commands/tablecmds.c:247 -#: commands/tablecmds.c:13079 +#: access/index/indexam.c:160 catalog/objectaddress.c:1222 +#: commands/indexcmds.c:1819 commands/tablecmds.c:247 +#: commands/tablecmds.c:13119 #, c-format msgid "\"%s\" is not an index" msgstr "\"%s\" non è un indice" @@ -1040,7 +951,7 @@ msgstr "" "Si consiglia un indice funzionale su un hash MD5 del valore o l'uso del full text indexing." #: access/nbtree/nbtpage.c:169 access/nbtree/nbtpage.c:372 -#: access/nbtree/nbtpage.c:459 parser/parse_utilcmd.c:1891 +#: access/nbtree/nbtpage.c:459 parser/parse_utilcmd.c:1901 #, c-format msgid "index \"%s\" is not a btree" msgstr "l'indice \"%s\" non è un btree" @@ -1061,100 +972,20 @@ msgstr "l'indice \"%s\" contiene una pagina interna mezza morta" msgid "This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it." msgstr "Ciò può essere causato da un VACUUM interrotto in una versione 9.3 o precedente, prima dell'aggiornamento. Si consiglia un REINDEX." -#: access/nbtree/nbtvalidate.c:101 -#, c-format -msgid "btree operator family \"%s\" contains function %s with invalid support number %d" -msgstr "la famiglia di operatori btree \"%s\" contiene la funzione %s con numero di supporto non valido %d" - -#: access/nbtree/nbtvalidate.c:113 -#, c-format -msgid "btree operator family \"%s\" contains function %s with wrong signature for support number %d" -msgstr "la famiglia di operatori btree \"%s\" contiene la funzione %s con signature non valida per il numero di supporto %d" - -#: access/nbtree/nbtvalidate.c:133 -#, c-format -msgid "btree operator family \"%s\" contains operator %s with invalid strategy number %d" -msgstr "la famiglia di operatori btree \"%s\" contiene l'operatore %s con numero di strategia non valido %d" - -#: access/nbtree/nbtvalidate.c:146 -#, c-format -msgid "btree operator family \"%s\" contains invalid ORDER BY specification for operator %s" -msgstr "la famiglia di operatori btree \"%s\" contiene una specifica ORDER BY non valida per l'operatore %s" - -#: access/nbtree/nbtvalidate.c:159 -#, c-format -msgid "btree operator family \"%s\" contains operator %s with wrong signature" -msgstr "la famiglia di operatori btree \"%s\" contiene l'operatore %s con signature non valida" - -#: access/nbtree/nbtvalidate.c:201 -#, c-format -msgid "btree operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "alla famiglia di operatori btree \"%s\" mancano operatori per i tipi %s e %s" - #: access/nbtree/nbtvalidate.c:211 #, c-format -msgid "btree operator family \"%s\" is missing support function for types %s and %s" -msgstr "alla famiglia di operatori btree \"%s\" mancano funzioni di supporto per i tipi %s e %s" - -#: access/nbtree/nbtvalidate.c:225 -#, c-format -msgid "btree operator class \"%s\" is missing operator(s)" -msgstr "alla classe di operatori btree \"%s\" mancano operatori" - -#: access/nbtree/nbtvalidate.c:242 -#, c-format -msgid "btree operator family \"%s\" is missing cross-type operator(s)" -msgstr "alla famiglia di operatori btree \"%s\" mancano operatori tra tipi diversi" +msgid "operator family \"%s\" of access method %s is missing support function for types %s and %s" +msgstr "alla famiglia di operatori \"%s\" del metodo di accesso %s manca la funzione di supporto per i tipi %s e %s" #: access/spgist/spgutils.c:705 #, c-format msgid "SP-GiST inner tuple size %zu exceeds maximum %zu" msgstr "La dimensione %zu della tupla interna SP-GiST supera il massimo %zu" -#: access/spgist/spgvalidate.c:93 -#, c-format -msgid "spgist operator family \"%s\" contains support procedure %s with cross-type registration" -msgstr "la famiglia di operatori spgist \"%s\" contiene la procedura di supporto %s con tipi misti" - -#: access/spgist/spgvalidate.c:116 -#, c-format -msgid "spgist operator family \"%s\" contains function %s with invalid support number %d" -msgstr "la famiglia di operatori spgist \"%s\" contiene la funzione %s con numero di supporto non valido %d" - -#: access/spgist/spgvalidate.c:128 -#, c-format -msgid "spgist operator family \"%s\" contains function %s with wrong signature for support number %d" -msgstr "la famiglia di operatori spgist \"%s\" contiene la funzione %s con signature non valida per il numero di supporto %d" - -#: access/spgist/spgvalidate.c:147 -#, c-format -msgid "spgist operator family \"%s\" contains operator %s with invalid strategy number %d" -msgstr "la famiglia di operatori spgist \"%s\" contiene l'operatore %s con numero di strategia non valido %d" - -#: access/spgist/spgvalidate.c:160 -#, c-format -msgid "spgist operator family \"%s\" contains invalid ORDER BY specification for operator %s" -msgstr "la famiglia di operatori spgist \"%s\" contiene una specifica ORDER BY non valida per l'operatore %s" - -#: access/spgist/spgvalidate.c:173 -#, c-format -msgid "spgist operator family \"%s\" contains operator %s with wrong signature" -msgstr "la famiglia di operatori spgist \"%s\" contiene l'operatore %s con signature non valida" - -#: access/spgist/spgvalidate.c:201 -#, c-format -msgid "spgist operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "alla famiglia di operatori spgist \"%s\" mancano operatori per i tipi %s e %s" - #: access/spgist/spgvalidate.c:221 #, c-format -msgid "spgist operator family \"%s\" is missing support function %d for type %s" -msgstr "alla famiglia di operatori spgist \"%s\" manca la funzione di supporto %d per il tipo %s" - -#: access/spgist/spgvalidate.c:234 -#, c-format -msgid "spgist operator class \"%s\" is missing operator(s)" -msgstr "alla classe di operatori spgist \"%s\" mancano operatori" +msgid "operator family \"%s\" of access method %s is missing support function %d for type %s" +msgstr "alla famiglia di operatori \"%s\" del metodo di accesso %s manca la funzione di supporto %d per il tipo %s" #: access/tablesample/bernoulli.c:152 access/tablesample/system.c:156 #, c-format @@ -1305,78 +1136,78 @@ msgstr "impossibile troncare fino al MultiXact %u perché non esiste su disco, t msgid "invalid MultiXactId: %u" msgstr "MultiXactId non valido: %u" -#: access/transam/parallel.c:577 +#: access/transam/parallel.c:581 #, c-format msgid "postmaster exited during a parallel transaction" msgstr "il postmaster è terminato durante una transazione parallela" -#: access/transam/parallel.c:764 +#: access/transam/parallel.c:768 #, c-format msgid "lost connection to parallel worker" msgstr "connessione al worker parallelo perduta" -#: access/transam/parallel.c:823 access/transam/parallel.c:825 +#: access/transam/parallel.c:827 access/transam/parallel.c:829 msgid "parallel worker" msgstr "worker parallelo" -#: access/transam/parallel.c:968 +#: access/transam/parallel.c:972 #, c-format msgid "could not map dynamic shared memory segment" msgstr "mappatura del segmento di memoria dinamica condivisa non riuscito" -#: access/transam/parallel.c:973 +#: access/transam/parallel.c:977 #, c-format msgid "invalid magic number in dynamic shared memory segment" msgstr "numero magico non valido nel segmento di memoria dinamica condivisa" -#: access/transam/slru.c:664 +#: access/transam/slru.c:668 #, c-format msgid "file \"%s\" doesn't exist, reading as zeroes" msgstr "il file \"%s\" non esiste, interpretato come zeri" -#: access/transam/slru.c:903 access/transam/slru.c:909 -#: access/transam/slru.c:916 access/transam/slru.c:923 -#: access/transam/slru.c:930 access/transam/slru.c:937 +#: access/transam/slru.c:907 access/transam/slru.c:913 +#: access/transam/slru.c:920 access/transam/slru.c:927 +#: access/transam/slru.c:934 access/transam/slru.c:941 #, c-format msgid "could not access status of transaction %u" msgstr "non è stato possibile accedere allo stato della transazione %u" -#: access/transam/slru.c:904 +#: access/transam/slru.c:908 #, c-format msgid "Could not open file \"%s\": %m." msgstr "Apertura del file \"%s\" fallita: %m." -#: access/transam/slru.c:910 +#: access/transam/slru.c:914 #, c-format msgid "Could not seek in file \"%s\" to offset %u: %m." msgstr "Spostamento nel file \"%s\" all'offset %u fallito: %m." -#: access/transam/slru.c:917 +#: access/transam/slru.c:921 #, c-format msgid "Could not read from file \"%s\" at offset %u: %m." msgstr "Lettura dal file \"%s\" all'offset %u fallita: %m." -#: access/transam/slru.c:924 +#: access/transam/slru.c:928 #, c-format msgid "Could not write to file \"%s\" at offset %u: %m." msgstr "Scrittura nel file \"%s\" all'offset %u fallita: %m." -#: access/transam/slru.c:931 +#: access/transam/slru.c:935 #, c-format msgid "Could not fsync file \"%s\": %m." msgstr "fsync del file \"%s\" fallito: %m." -#: access/transam/slru.c:938 +#: access/transam/slru.c:942 #, c-format msgid "Could not close file \"%s\": %m." msgstr "Chiusura del file \"%s\" fallita: %m." -#: access/transam/slru.c:1195 +#: access/transam/slru.c:1199 #, c-format msgid "could not truncate directory \"%s\": apparent wraparound" msgstr "troncamento della directory \"%s\" fallito: probabile wraparound" -#: access/transam/slru.c:1250 access/transam/slru.c:1306 +#: access/transam/slru.c:1254 access/transam/slru.c:1310 #, c-format msgid "removing file \"%s\"" msgstr "cancellazione del file \"%s\"" @@ -1417,9 +1248,9 @@ msgid "Timeline IDs must be less than child timeline's ID." msgstr "Gli ID della timeline devono avere valori inferiori degli ID della timeline figlia" #: access/transam/timeline.c:418 access/transam/timeline.c:498 -#: access/transam/xlog.c:3257 access/transam/xlog.c:3424 -#: access/transam/xlogfuncs.c:693 commands/copy.c:1745 -#: storage/file/copydir.c:206 +#: access/transam/xlog.c:3256 access/transam/xlog.c:3423 +#: access/transam/xlogfuncs.c:693 commands/copy.c:1723 +#: storage/file/copydir.c:228 #, c-format msgid "could not close file \"%s\": %m" msgstr "chiusura del file \"%s\" fallita: %m" @@ -1429,62 +1260,62 @@ msgstr "chiusura del file \"%s\" fallita: %m" msgid "requested timeline %u is not in this server's history" msgstr "la timeline richiesta %u non è nella storia di questo server" -#: access/transam/twophase.c:385 +#: access/transam/twophase.c:383 #, c-format msgid "transaction identifier \"%s\" is too long" msgstr "l'identificativo di transazione \"%s\" è troppo lungo" -#: access/transam/twophase.c:392 +#: access/transam/twophase.c:390 #, c-format msgid "prepared transactions are disabled" msgstr "le transazione preparate sono disabilitate" -#: access/transam/twophase.c:393 +#: access/transam/twophase.c:391 #, c-format msgid "Set max_prepared_transactions to a nonzero value." msgstr "Imposta max_prepared_transactions ad un valore non nullo." -#: access/transam/twophase.c:412 +#: access/transam/twophase.c:410 #, c-format msgid "transaction identifier \"%s\" is already in use" msgstr "l'identificativo di transazione \"%s\" è già in uso" -#: access/transam/twophase.c:421 access/transam/twophase.c:2332 +#: access/transam/twophase.c:419 access/transam/twophase.c:2340 #, c-format msgid "maximum number of prepared transactions reached" msgstr "è stato raggiunto il numero massimo di transazioni preparate" -#: access/transam/twophase.c:422 access/transam/twophase.c:2333 +#: access/transam/twophase.c:420 access/transam/twophase.c:2341 #, c-format msgid "Increase max_prepared_transactions (currently %d)." msgstr "Incrementa il valore di max_prepared_transactions (il valore attuale è %d)" -#: access/transam/twophase.c:583 +#: access/transam/twophase.c:587 #, c-format msgid "prepared transaction with identifier \"%s\" is busy" msgstr "la transazione preparata con identificativo \"%s\" è in uso" -#: access/transam/twophase.c:589 +#: access/transam/twophase.c:593 #, c-format msgid "permission denied to finish prepared transaction" msgstr "non è consentito portare a termine la transazione preparata" -#: access/transam/twophase.c:590 +#: access/transam/twophase.c:594 #, c-format msgid "Must be superuser or the user that prepared the transaction." msgstr "È consentito solo a un superutente o all'utente che ha preparato la transazione." -#: access/transam/twophase.c:601 +#: access/transam/twophase.c:605 #, c-format msgid "prepared transaction belongs to another database" msgstr "la transazione preparata appartiene ad un altro database" -#: access/transam/twophase.c:602 +#: access/transam/twophase.c:606 #, c-format msgid "Connect to the database where the transaction was prepared to finish it." msgstr "Connettersi al database in cui la transazione è stata preparata per portarla a termine." -#: access/transam/twophase.c:617 +#: access/transam/twophase.c:621 #, c-format msgid "prepared transaction with identifier \"%s\" does not exist" msgstr "la transazione preparata con identificativo \"%s\" non esiste" @@ -1509,7 +1340,7 @@ msgstr "non è stato possibile ottenere informazioni sul file dello stato a due msgid "could not read two-phase state file \"%s\": %m" msgstr "lettura del file dello stato a due fasi \"%s\" fallita: %m" -#: access/transam/twophase.c:1307 access/transam/xlog.c:6351 +#: access/transam/twophase.c:1307 access/transam/xlog.c:6356 #, c-format msgid "Failed while allocating a WAL reading processor." msgstr "Errore nell'allocazione di un processore di lettura del WAL." @@ -1524,72 +1355,72 @@ msgstr "lettura dello stato a due fasi dal WAL a %X/%X fallita" msgid "expected two-phase state data is not present in WAL at %X/%X" msgstr "i dati attesi sullo stato a due fasi non sono presenti nel WAL a %X/%X" -#: access/transam/twophase.c:1556 +#: access/transam/twophase.c:1558 #, c-format msgid "could not remove two-phase state file \"%s\": %m" msgstr "rimozione del file dello stato a due fasi \"%s\" fallita: %m" -#: access/transam/twophase.c:1586 +#: access/transam/twophase.c:1588 #, c-format msgid "could not recreate two-phase state file \"%s\": %m" msgstr "ricreazione del file dello stato a due fasi \"%s\" fallita: %m" -#: access/transam/twophase.c:1597 access/transam/twophase.c:1605 +#: access/transam/twophase.c:1599 access/transam/twophase.c:1607 #, c-format msgid "could not write two-phase state file: %m" msgstr "scrittura nel file dello stato a due fasi fallito: %m" -#: access/transam/twophase.c:1619 +#: access/transam/twophase.c:1621 #, c-format msgid "could not fsync two-phase state file: %m" msgstr "fsync del file dello stato a due fasi: %m" -#: access/transam/twophase.c:1626 +#: access/transam/twophase.c:1628 #, c-format msgid "could not close two-phase state file: %m" msgstr "chiusura del file dello stato a due fasi fallita: %m" -#: access/transam/twophase.c:1714 +#: access/transam/twophase.c:1716 #, c-format msgid "%u two-phase state file was written for a long-running prepared transaction" msgid_plural "%u two-phase state files were written for long-running prepared transactions" msgstr[0] "%u file di stato a due fasi scritto per una transazione preparata di lunga durata" msgstr[1] "%u file di stato a due fasi scritti per transazioni preparate di lunga durata" -#: access/transam/twophase.c:1941 +#: access/transam/twophase.c:1944 #, c-format msgid "recovering prepared transaction %u from shared memory" msgstr "recupero di %u transazioni preparate dalla memoria condivisa" -#: access/transam/twophase.c:2026 +#: access/transam/twophase.c:2034 #, c-format -msgid "removing stale two-phase state file for \"%u\"" -msgstr "vecchio file di stato a due fasi per \"%u\" rimosso" +msgid "removing stale two-phase state file for transaction %u" +msgstr "vecchio file di stato a due fasi per la transazione %u rimosso" -#: access/transam/twophase.c:2033 +#: access/transam/twophase.c:2041 #, c-format -msgid "removing stale two-phase state from shared memory for \"%u\"" -msgstr "rimozione del vecchio stato a due fasi dalla memoria condivisa per \"%u\"" +msgid "removing stale two-phase state from memory for transaction %u" +msgstr "rimozione del vecchio stato a due fasi dalla memoria condivisa per la transazione %u" -#: access/transam/twophase.c:2046 +#: access/transam/twophase.c:2054 #, c-format -msgid "removing future two-phase state file for \"%u\"" -msgstr "rimozione del file di stato a due fasi future per \"%u\"" +msgid "removing future two-phase state file for transaction %u" +msgstr "rimozione del file di stato a due fasi future per la transazione %u" -#: access/transam/twophase.c:2053 +#: access/transam/twophase.c:2061 #, c-format -msgid "removing future two-phase state from memory for \"%u\"" -msgstr "rimozione dello stato a due fasi dalla memoria per \"%u\"" +msgid "removing future two-phase state from memory for transaction %u" +msgstr "rimozione dello stato a due fasi dalla memoria per la transazione %u" -#: access/transam/twophase.c:2067 access/transam/twophase.c:2086 +#: access/transam/twophase.c:2075 access/transam/twophase.c:2094 #, c-format -msgid "removing corrupt two-phase state file for \"%u\"" -msgstr "rimozione del file di stato a due fasi corrotto per \"%u\"" +msgid "removing corrupt two-phase state file for transaction %u" +msgstr "rimozione del file di stato a due fasi corrotto per la transazione %u" -#: access/transam/twophase.c:2093 +#: access/transam/twophase.c:2101 #, c-format -msgid "removing corrupt two-phase state from memory for \"%u\"" -msgstr "rimozione dello stato a due fasi corrotto dalla memoria per \"%u\"" +msgid "removing corrupt two-phase state from memory for transaction %u" +msgstr "rimozione dello stato a due fasi corrotto dalla memoria per la transazione %u" #: access/transam/varsup.c:124 #, c-format @@ -1635,932 +1466,937 @@ msgstr "non è possibile effettuare più di 2^32-2 comandi in una transazione" msgid "maximum number of committed subtransactions (%d) exceeded" msgstr "il numero massimo di sottotransazioni committed (%d) è stato superato" -#: access/transam/xact.c:2268 +#: access/transam/xact.c:2265 #, c-format msgid "cannot PREPARE a transaction that has operated on temporary tables" msgstr "non è possibile eseguire PREPARE in una transazione che ha operato su tabelle temporanee" -#: access/transam/xact.c:2278 +#: access/transam/xact.c:2275 #, c-format msgid "cannot PREPARE a transaction that has exported snapshots" msgstr "non è possibile eseguire PREPARE in una transazione che ha esportato snapshot" +#: access/transam/xact.c:2284 +#, c-format +msgid "cannot PREPARE a transaction that has manipulated logical replication workers" +msgstr "non è possibile eseguire PREPARE in una transazione che ha manipolato i worker di replica logica" + #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3165 +#: access/transam/xact.c:3166 #, c-format msgid "%s cannot run inside a transaction block" msgstr "non è possibile eseguire %s all'interno di un blocco di transazione" # translator: %s represents an SQL statement name #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3175 +#: access/transam/xact.c:3176 #, c-format msgid "%s cannot run inside a subtransaction" msgstr "non è possibile eseguire %s all'interno di una sottotransazione" #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3185 +#: access/transam/xact.c:3186 #, c-format msgid "%s cannot be executed from a function or multi-command string" msgstr "una funzione o una stringa multi-comando non può eseguire %s" # translator: %s represents an SQL statement name #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3256 +#: access/transam/xact.c:3257 #, c-format msgid "%s can only be used in transaction blocks" msgstr "si può usare %s solo entro blocchi di transazione" -#: access/transam/xact.c:3440 +#: access/transam/xact.c:3441 #, c-format msgid "there is already a transaction in progress" msgstr "c'è già una transazione in corso" -#: access/transam/xact.c:3608 access/transam/xact.c:3711 +#: access/transam/xact.c:3609 access/transam/xact.c:3712 #, c-format msgid "there is no transaction in progress" msgstr "non c'è alcuna transazione in corso" -#: access/transam/xact.c:3619 +#: access/transam/xact.c:3620 #, c-format msgid "cannot commit during a parallel operation" msgstr "non è possibile effettuare un commit durante un'operazione parallela" -#: access/transam/xact.c:3722 +#: access/transam/xact.c:3723 #, c-format msgid "cannot abort during a parallel operation" msgstr "non è possibile interrompere durante un'operazione parallela" -#: access/transam/xact.c:3764 +#: access/transam/xact.c:3765 #, c-format msgid "cannot define savepoints during a parallel operation" msgstr "non è possibile definire un punto di salvataggio durante un'operazione parallela" -#: access/transam/xact.c:3831 +#: access/transam/xact.c:3832 #, c-format msgid "cannot release savepoints during a parallel operation" msgstr "non è possibile rilasciare un punto di salvataggio durante un'operazione parallela" -#: access/transam/xact.c:3842 access/transam/xact.c:3894 -#: access/transam/xact.c:3900 access/transam/xact.c:3956 -#: access/transam/xact.c:4006 access/transam/xact.c:4012 +#: access/transam/xact.c:3843 access/transam/xact.c:3895 +#: access/transam/xact.c:3901 access/transam/xact.c:3957 +#: access/transam/xact.c:4007 access/transam/xact.c:4013 #, c-format msgid "no such savepoint" msgstr "punto di salvataggio inesistente" -#: access/transam/xact.c:3944 +#: access/transam/xact.c:3945 #, c-format msgid "cannot rollback to savepoints during a parallel operation" msgstr "non è possibile effettuare un rollback durante un'operazione parallela" -#: access/transam/xact.c:4072 +#: access/transam/xact.c:4073 #, c-format msgid "cannot start subtransactions during a parallel operation" msgstr "non è possibile iniziare una sub-transazione durante un'operazione parallela" -#: access/transam/xact.c:4139 +#: access/transam/xact.c:4140 #, c-format msgid "cannot commit subtransactions during a parallel operation" msgstr "non è possibile effettuare il commit di una sub-transazione durante un'operazione parallela" -#: access/transam/xact.c:4747 +#: access/transam/xact.c:4769 #, c-format msgid "cannot have more than 2^32-1 subtransactions in a transaction" msgstr "non è possibile avere più di 2^32-1 comandi in una sottotransazione" -#: access/transam/xlog.c:2456 +#: access/transam/xlog.c:2455 #, c-format msgid "could not seek in log file %s to offset %u: %m" msgstr "spostamento nel file di log %s alla posizione %u fallito: %m" -#: access/transam/xlog.c:2478 +#: access/transam/xlog.c:2477 #, c-format msgid "could not write to log file %s at offset %u, length %zu: %m" msgstr "scrittura nel file di log %s in posizione %u, lunghezza %zu fallita: %m" -#: access/transam/xlog.c:2742 +#: access/transam/xlog.c:2741 #, c-format msgid "updated min recovery point to %X/%X on timeline %u" msgstr "punto di recupero minimo aggiornato a %X/%X sulla timeline %u" -#: access/transam/xlog.c:3389 +#: access/transam/xlog.c:3388 #, c-format msgid "not enough data in file \"%s\"" msgstr "il file \"%s\" non contiene abbastanza dati" -#: access/transam/xlog.c:3535 +#: access/transam/xlog.c:3534 #, c-format msgid "could not open write-ahead log file \"%s\": %m" msgstr "apertura del file di log write-ahead \"%s\" fallita: %m" -#: access/transam/xlog.c:3724 access/transam/xlog.c:5536 +#: access/transam/xlog.c:3723 access/transam/xlog.c:5541 #, c-format msgid "could not close log file %s: %m" msgstr "chiusura del file di log %s fallita: %m" -#: access/transam/xlog.c:3781 access/transam/xlogutils.c:701 -#: replication/walsender.c:2383 +#: access/transam/xlog.c:3780 access/transam/xlogutils.c:701 +#: replication/walsender.c:2380 #, c-format msgid "requested WAL segment %s has already been removed" msgstr "il segmento WAL richiesto %s è stato già rimosso" -#: access/transam/xlog.c:3841 access/transam/xlog.c:3916 -#: access/transam/xlog.c:4111 +#: access/transam/xlog.c:3840 access/transam/xlog.c:3915 +#: access/transam/xlog.c:4110 #, c-format msgid "could not open write-ahead log directory \"%s\": %m" msgstr "apertura della directory del log write-ahead \"%s\" fallita: %m" -#: access/transam/xlog.c:3997 +#: access/transam/xlog.c:3996 #, c-format msgid "recycled write-ahead log file \"%s\"" msgstr "riciclaggio del file di log write-ahead \"%s\"" -#: access/transam/xlog.c:4009 +#: access/transam/xlog.c:4008 #, c-format msgid "removing write-ahead log file \"%s\"" msgstr "rimozione del file di log write-ahead \"%s\"" -#: access/transam/xlog.c:4029 +#: access/transam/xlog.c:4028 #, c-format msgid "could not rename old write-ahead log file \"%s\": %m" msgstr "rinominazione del vecchio file di log write-ahead \"%s\" fallita: %m" -#: access/transam/xlog.c:4071 access/transam/xlog.c:4081 +#: access/transam/xlog.c:4070 access/transam/xlog.c:4080 #, c-format msgid "required WAL directory \"%s\" does not exist" msgstr "la directory dei file WAL \"%s\" necessaria non esiste" -#: access/transam/xlog.c:4087 +#: access/transam/xlog.c:4086 #, c-format msgid "creating missing WAL directory \"%s\"" msgstr "creazione della directory dei file WAL mancante \"%s\"" -#: access/transam/xlog.c:4090 +#: access/transam/xlog.c:4089 #, c-format msgid "could not create missing directory \"%s\": %m" msgstr "creazione della directory mancante \"%s\" fallita: %m" -#: access/transam/xlog.c:4201 +#: access/transam/xlog.c:4200 #, c-format msgid "unexpected timeline ID %u in log segment %s, offset %u" msgstr "ID di timeline %u inatteso nel segmento di log %s, offset %u" -#: access/transam/xlog.c:4323 +#: access/transam/xlog.c:4322 #, c-format msgid "new timeline %u is not a child of database system timeline %u" msgstr "la nuova timeline %u non è figlia della timeline %u del database" -#: access/transam/xlog.c:4337 +#: access/transam/xlog.c:4336 #, c-format msgid "new timeline %u forked off current database system timeline %u before current recovery point %X/%X" msgstr "la nuova timeline %u si è staccata dalla timeline attuale %u prima del punto di recupero corrente %X/%X" -#: access/transam/xlog.c:4356 +#: access/transam/xlog.c:4355 #, c-format msgid "new target timeline is %u" msgstr "la nuova timeline di destinazione %u" -#: access/transam/xlog.c:4431 +#: access/transam/xlog.c:4436 #, c-format msgid "could not create control file \"%s\": %m" msgstr "creazione del file di controllo \"%s\" fallita: %m" -#: access/transam/xlog.c:4443 access/transam/xlog.c:4669 +#: access/transam/xlog.c:4448 access/transam/xlog.c:4674 #, c-format msgid "could not write to control file: %m" msgstr "scrittura nel file di controllo fallita: %m" -#: access/transam/xlog.c:4451 access/transam/xlog.c:4677 +#: access/transam/xlog.c:4456 access/transam/xlog.c:4682 #, c-format msgid "could not fsync control file: %m" msgstr "fsync del file di controllo fallito: %m" -#: access/transam/xlog.c:4457 access/transam/xlog.c:4683 +#: access/transam/xlog.c:4462 access/transam/xlog.c:4688 #, c-format msgid "could not close control file: %m" msgstr "chiusura del file di controllo fallita: %m" -#: access/transam/xlog.c:4475 access/transam/xlog.c:4657 +#: access/transam/xlog.c:4480 access/transam/xlog.c:4662 #, c-format msgid "could not open control file \"%s\": %m" msgstr "apertura del file di controllo \"%s\" fallita: %m" -#: access/transam/xlog.c:4482 +#: access/transam/xlog.c:4487 #, c-format msgid "could not read from control file: %m" msgstr "lettura dal file di controllo fallita: %m" -#: access/transam/xlog.c:4496 access/transam/xlog.c:4505 -#: access/transam/xlog.c:4529 access/transam/xlog.c:4536 -#: access/transam/xlog.c:4543 access/transam/xlog.c:4548 -#: access/transam/xlog.c:4555 access/transam/xlog.c:4562 -#: access/transam/xlog.c:4569 access/transam/xlog.c:4576 -#: access/transam/xlog.c:4583 access/transam/xlog.c:4590 -#: access/transam/xlog.c:4597 access/transam/xlog.c:4606 -#: access/transam/xlog.c:4613 access/transam/xlog.c:4622 -#: access/transam/xlog.c:4629 utils/init/miscinit.c:1397 +#: access/transam/xlog.c:4501 access/transam/xlog.c:4510 +#: access/transam/xlog.c:4534 access/transam/xlog.c:4541 +#: access/transam/xlog.c:4548 access/transam/xlog.c:4553 +#: access/transam/xlog.c:4560 access/transam/xlog.c:4567 +#: access/transam/xlog.c:4574 access/transam/xlog.c:4581 +#: access/transam/xlog.c:4588 access/transam/xlog.c:4595 +#: access/transam/xlog.c:4602 access/transam/xlog.c:4611 +#: access/transam/xlog.c:4618 access/transam/xlog.c:4627 +#: access/transam/xlog.c:4634 utils/init/miscinit.c:1406 #, c-format msgid "database files are incompatible with server" msgstr "i file del database sono incompatibili col server" -#: access/transam/xlog.c:4497 +#: access/transam/xlog.c:4502 #, c-format msgid "The database cluster was initialized with PG_CONTROL_VERSION %d (0x%08x), but the server was compiled with PG_CONTROL_VERSION %d (0x%08x)." msgstr "Il cluster di database è stato inizializzato con PG_CONTROL_VERSION %d (0x%08x), ma il server è stato compilato con PG_CONTROL_VERSION %d (0x%08x)." -#: access/transam/xlog.c:4501 +#: access/transam/xlog.c:4506 #, c-format msgid "This could be a problem of mismatched byte ordering. It looks like you need to initdb." msgstr "Questo potrebbe essere un problema di ordinamento di byte che non combacia. Sembra sia necessario eseguire initdb." -#: access/transam/xlog.c:4506 +#: access/transam/xlog.c:4511 #, c-format msgid "The database cluster was initialized with PG_CONTROL_VERSION %d, but the server was compiled with PG_CONTROL_VERSION %d." msgstr "Il cluster di database è stato inizializzato con PG_CONTROL_VERSION %d, ma il server è stato compilato con PG_CONTROL_VERSION %d." -#: access/transam/xlog.c:4509 access/transam/xlog.c:4533 -#: access/transam/xlog.c:4540 access/transam/xlog.c:4545 +#: access/transam/xlog.c:4514 access/transam/xlog.c:4538 +#: access/transam/xlog.c:4545 access/transam/xlog.c:4550 #, c-format msgid "It looks like you need to initdb." msgstr "Sembra sia necessario eseguire initdb." -#: access/transam/xlog.c:4520 +#: access/transam/xlog.c:4525 #, c-format msgid "incorrect checksum in control file" msgstr "il checksum nel file di controllo non è corretto" -#: access/transam/xlog.c:4530 +#: access/transam/xlog.c:4535 #, c-format msgid "The database cluster was initialized with CATALOG_VERSION_NO %d, but the server was compiled with CATALOG_VERSION_NO %d." msgstr "Il cluster di database è stato inizializzato con CATALOG_VERSION_NO %d, ma il server è stato compilato con CATALOG_VERSION_NO %d." -#: access/transam/xlog.c:4537 +#: access/transam/xlog.c:4542 #, c-format msgid "The database cluster was initialized with MAXALIGN %d, but the server was compiled with MAXALIGN %d." msgstr "Il cluster di database è stato inizializzato con MAXALIGN %d, ma il server è stato compilato con MAXALIGN %d." -#: access/transam/xlog.c:4544 +#: access/transam/xlog.c:4549 #, c-format msgid "The database cluster appears to use a different floating-point number format than the server executable." msgstr "Il cluster di database sta usando un formato per i numeri in virgola mobile diverso da quello usato dall'eseguibile del server." -#: access/transam/xlog.c:4549 +#: access/transam/xlog.c:4554 #, c-format msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." msgstr "Il cluster di database è stato inizializzato con BLCKSZ %d, ma il server è stato compilato con BLCKSZ %d." -#: access/transam/xlog.c:4552 access/transam/xlog.c:4559 -#: access/transam/xlog.c:4566 access/transam/xlog.c:4573 -#: access/transam/xlog.c:4580 access/transam/xlog.c:4587 -#: access/transam/xlog.c:4594 access/transam/xlog.c:4601 -#: access/transam/xlog.c:4609 access/transam/xlog.c:4616 -#: access/transam/xlog.c:4625 access/transam/xlog.c:4632 +#: access/transam/xlog.c:4557 access/transam/xlog.c:4564 +#: access/transam/xlog.c:4571 access/transam/xlog.c:4578 +#: access/transam/xlog.c:4585 access/transam/xlog.c:4592 +#: access/transam/xlog.c:4599 access/transam/xlog.c:4606 +#: access/transam/xlog.c:4614 access/transam/xlog.c:4621 +#: access/transam/xlog.c:4630 access/transam/xlog.c:4637 #, c-format msgid "It looks like you need to recompile or initdb." msgstr "Si consiglia di ricompilare il sistema o di eseguire initdb." -#: access/transam/xlog.c:4556 +#: access/transam/xlog.c:4561 #, c-format msgid "The database cluster was initialized with RELSEG_SIZE %d, but the server was compiled with RELSEG_SIZE %d." msgstr "Il cluster di database è stato inizializzato con RELSEG_SIZE %d, ma il server è stato compilato con RELSEG_SIZE %d." -#: access/transam/xlog.c:4563 +#: access/transam/xlog.c:4568 #, c-format msgid "The database cluster was initialized with XLOG_BLCKSZ %d, but the server was compiled with XLOG_BLCKSZ %d." msgstr "Il cluster di database è stato inizializzato con XLOG_BLOCKSZ %d, ma il server è stato compilato con XLOG_BLOCKSZ %d." -#: access/transam/xlog.c:4570 +#: access/transam/xlog.c:4575 #, c-format msgid "The database cluster was initialized with XLOG_SEG_SIZE %d, but the server was compiled with XLOG_SEG_SIZE %d." msgstr "Il cluster di database è stato inizializzato con XLOG_SEG_SIZE %d, ma il server è stato compilato con XLOG_SEG_SIZE %d." -#: access/transam/xlog.c:4577 +#: access/transam/xlog.c:4582 #, c-format msgid "The database cluster was initialized with NAMEDATALEN %d, but the server was compiled with NAMEDATALEN %d." msgstr "Il cluster di database è stato inizializzato con NAMEDATALEN %d, ma il server è stato compilato con NAMEDATALEN %d." -#: access/transam/xlog.c:4584 +#: access/transam/xlog.c:4589 #, c-format msgid "The database cluster was initialized with INDEX_MAX_KEYS %d, but the server was compiled with INDEX_MAX_KEYS %d." msgstr "Il cluster di database è stato inizializzato con INDEX_MAX_KEYS %d, ma il server è stato compilato con INDEX_MAX_KEYS %d." -#: access/transam/xlog.c:4591 +#: access/transam/xlog.c:4596 #, c-format msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." msgstr "Il cluster di database è stato inizializzato con TOAST_MAX_CHUNK_SIZE %d, ma il server è stato compilato con TOAST_MAX_CHUNK_SIZE %d." -#: access/transam/xlog.c:4598 +#: access/transam/xlog.c:4603 #, c-format msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." msgstr "Il cluster di database è stato inizializzato con LOBLKSIZE %d, ma il server è stato compilato con LOBLKSIZE %d." -#: access/transam/xlog.c:4607 +#: access/transam/xlog.c:4612 #, c-format msgid "The database cluster was initialized without USE_FLOAT4_BYVAL but the server was compiled with USE_FLOAT4_BYVAL." msgstr "Il cluster di database è stato inizializzato senza USE_FLOAT4_BYVAL, ma il server è stato compilato con USE_FLOAT4_BYVAL." -#: access/transam/xlog.c:4614 +#: access/transam/xlog.c:4619 #, c-format msgid "The database cluster was initialized with USE_FLOAT4_BYVAL but the server was compiled without USE_FLOAT4_BYVAL." msgstr "Il cluster di database è stato inizializzato con USE_FLOAT4_BYVAL, ma il server è stato compilato senza USE_FLOAT4_BYVAL." -#: access/transam/xlog.c:4623 +#: access/transam/xlog.c:4628 #, c-format msgid "The database cluster was initialized without USE_FLOAT8_BYVAL but the server was compiled with USE_FLOAT8_BYVAL." msgstr "Il cluster di database è stato inizializzato senza USE_FLOAT8_BYVAL, ma il server è stato compilato con USE_FLOAT8_BYVAL." -#: access/transam/xlog.c:4630 +#: access/transam/xlog.c:4635 #, c-format msgid "The database cluster was initialized with USE_FLOAT8_BYVAL but the server was compiled without USE_FLOAT8_BYVAL." msgstr "Il cluster di database è stato inizializzato con USE_FLOAT8_BYVAL, ma il server è stato compilato senza USE_FLOAT8_BYVAL." -#: access/transam/xlog.c:4986 +#: access/transam/xlog.c:4991 #, c-format msgid "could not generate secret authorization token" msgstr "generazione del token segreto di autenticazione fallita" -#: access/transam/xlog.c:5076 +#: access/transam/xlog.c:5081 #, c-format msgid "could not write bootstrap write-ahead log file: %m" msgstr "scrittura del file di bootstrap del log write-ahead fallita: %m" -#: access/transam/xlog.c:5084 +#: access/transam/xlog.c:5089 #, c-format msgid "could not fsync bootstrap write-ahead log file: %m" msgstr "sincronizzazione del file di bootstrap del log write-ahead fallita: %m" -#: access/transam/xlog.c:5090 +#: access/transam/xlog.c:5095 #, c-format msgid "could not close bootstrap write-ahead log file: %m" msgstr "chiusura del file di bootstrap del log write-ahead fallita: %m" -#: access/transam/xlog.c:5166 +#: access/transam/xlog.c:5171 #, c-format msgid "could not open recovery command file \"%s\": %m" msgstr "apertura del file di ripristino \"%s\" fallita: %m" -#: access/transam/xlog.c:5212 access/transam/xlog.c:5314 +#: access/transam/xlog.c:5217 access/transam/xlog.c:5319 #, c-format msgid "invalid value for recovery parameter \"%s\": \"%s\"" msgstr "valore non valido per il parametro di ripristino \"%s\": \"%s\"" -#: access/transam/xlog.c:5215 +#: access/transam/xlog.c:5220 #, c-format msgid "Valid values are \"pause\", \"promote\", and \"shutdown\"." msgstr "I valori validi sono \"pause\", \"promote\" e \"shutdown\"." # da non tradurre # DV: perché (già tradotto peraltro) -#: access/transam/xlog.c:5235 +#: access/transam/xlog.c:5240 #, c-format msgid "recovery_target_timeline is not a valid number: \"%s\"" msgstr "recovery_target_timeline non ha un valore numerico valido: \"%s\"" -#: access/transam/xlog.c:5252 +#: access/transam/xlog.c:5257 #, c-format msgid "recovery_target_xid is not a valid number: \"%s\"" msgstr "recovery_target_xid non ha un valore numerico valido: \"%s\"" -#: access/transam/xlog.c:5283 +#: access/transam/xlog.c:5288 #, c-format msgid "recovery_target_name is too long (maximum %d characters)" msgstr "il recovery_target_name è troppo lungo (massimo %d caratteri)" -#: access/transam/xlog.c:5317 +#: access/transam/xlog.c:5322 #, c-format msgid "The only allowed value is \"immediate\"." msgstr "Il solo valore permesso è \"immediate\"." -#: access/transam/xlog.c:5330 access/transam/xlog.c:5341 -#: commands/extension.c:546 commands/extension.c:554 utils/misc/guc.c:5739 +#: access/transam/xlog.c:5335 access/transam/xlog.c:5346 +#: commands/extension.c:547 commands/extension.c:555 utils/misc/guc.c:5750 #, c-format msgid "parameter \"%s\" requires a Boolean value" msgstr "il parametro \"%s\" richiede un valore booleano" -#: access/transam/xlog.c:5376 +#: access/transam/xlog.c:5381 #, c-format msgid "parameter \"%s\" requires a temporal value" msgstr "il parametro \"%s\" richiede un valore temporale" -#: access/transam/xlog.c:5378 catalog/dependency.c:961 catalog/dependency.c:962 -#: catalog/dependency.c:968 catalog/dependency.c:969 catalog/dependency.c:980 -#: catalog/dependency.c:981 commands/tablecmds.c:927 commands/tablecmds.c:10336 -#: commands/user.c:1030 commands/view.c:505 libpq/auth.c:328 -#: replication/syncrep.c:1130 storage/lmgr/deadlock.c:1139 -#: storage/lmgr/proc.c:1313 utils/adt/acl.c:5248 utils/misc/guc.c:5761 -#: utils/misc/guc.c:5854 utils/misc/guc.c:9810 utils/misc/guc.c:9844 -#: utils/misc/guc.c:9878 utils/misc/guc.c:9912 utils/misc/guc.c:9947 +#: access/transam/xlog.c:5383 catalog/dependency.c:961 +#: catalog/dependency.c:962 catalog/dependency.c:968 catalog/dependency.c:969 +#: catalog/dependency.c:980 catalog/dependency.c:981 commands/tablecmds.c:946 +#: commands/tablecmds.c:10358 commands/user.c:1064 commands/view.c:505 +#: libpq/auth.c:328 replication/syncrep.c:1160 storage/lmgr/deadlock.c:1139 +#: storage/lmgr/proc.c:1313 utils/adt/acl.c:5250 utils/misc/guc.c:5772 +#: utils/misc/guc.c:5865 utils/misc/guc.c:9814 utils/misc/guc.c:9848 +#: utils/misc/guc.c:9882 utils/misc/guc.c:9916 utils/misc/guc.c:9951 #, c-format msgid "%s" msgstr "%s" -#: access/transam/xlog.c:5385 +#: access/transam/xlog.c:5390 #, c-format msgid "unrecognized recovery parameter \"%s\"" msgstr "parametro di ripristino \"%s\" sconosciuto" -#: access/transam/xlog.c:5396 +#: access/transam/xlog.c:5401 #, c-format msgid "recovery command file \"%s\" specified neither primary_conninfo nor restore_command" msgstr "il file dei comandi di ripristino \"%s\" non specifica né primary_conninfo né restore_command" -#: access/transam/xlog.c:5398 +#: access/transam/xlog.c:5403 #, c-format msgid "The database server will regularly poll the pg_wal subdirectory to check for files placed there." msgstr "Il server database ispezionerà regolarmente la sottodirectory pg_wal per controllare se vi vengono aggiunti dei file.\"" -#: access/transam/xlog.c:5405 +#: access/transam/xlog.c:5410 #, c-format msgid "recovery command file \"%s\" must specify restore_command when standby mode is not enabled" msgstr "il file dei comandi di ripristino \"%s\" deve specificare restore_command quando la modalità standby non è abilitata" -#: access/transam/xlog.c:5426 +#: access/transam/xlog.c:5431 #, c-format msgid "standby mode is not supported by single-user servers" msgstr "la modalità di standby non è supportata per i server a utente singolo" -#: access/transam/xlog.c:5445 +#: access/transam/xlog.c:5450 #, c-format msgid "recovery target timeline %u does not exist" msgstr "la timeline destinazione di recupero %u non esiste" -#: access/transam/xlog.c:5566 +#: access/transam/xlog.c:5571 #, c-format msgid "archive recovery complete" msgstr "il ripristino dell'archivio è stato completato" -#: access/transam/xlog.c:5625 access/transam/xlog.c:5891 +#: access/transam/xlog.c:5630 access/transam/xlog.c:5896 #, c-format msgid "recovery stopping after reaching consistency" msgstr "il ripristino è stato interrotto dopo aver raggiunto la consistenza" -#: access/transam/xlog.c:5646 +#: access/transam/xlog.c:5651 #, c-format msgid "recovery stopping before WAL location (LSN) \"%X/%X\"" msgstr "il ripristino è stato interrotto prima della locazione WAL (LSN) \"%X/%X\"" -#: access/transam/xlog.c:5732 +#: access/transam/xlog.c:5737 #, c-format msgid "recovery stopping before commit of transaction %u, time %s" msgstr "il ripristino è stato interrotto prima del commit della transazione %u, orario %s" -#: access/transam/xlog.c:5739 +#: access/transam/xlog.c:5744 #, c-format msgid "recovery stopping before abort of transaction %u, time %s" msgstr "il ripristino è stato interrotto prima dell'abort della transazione %u alle %s" -#: access/transam/xlog.c:5785 +#: access/transam/xlog.c:5790 #, c-format msgid "recovery stopping at restore point \"%s\", time %s" msgstr "il ripristino è stato interrotto al punto di ripristino \"%s\" alle %s" -#: access/transam/xlog.c:5803 +#: access/transam/xlog.c:5808 #, c-format msgid "recovery stopping after WAL location (LSN) \"%X/%X\"" msgstr "il ripristino è stato interrotto dopo la locazione WAL (LSN) \"%X/%X\"" -#: access/transam/xlog.c:5871 +#: access/transam/xlog.c:5876 #, c-format msgid "recovery stopping after commit of transaction %u, time %s" msgstr "il ripristino è stato interrotto dopo il commit della transazione %u alle %s" -#: access/transam/xlog.c:5879 +#: access/transam/xlog.c:5884 #, c-format msgid "recovery stopping after abort of transaction %u, time %s" msgstr "il ripristino è stato interrotto dopo l'abort della transazione %u alle %s" -#: access/transam/xlog.c:5919 +#: access/transam/xlog.c:5924 #, c-format msgid "recovery has paused" msgstr "ripristino in pausa" -#: access/transam/xlog.c:5920 +#: access/transam/xlog.c:5925 #, c-format msgid "Execute pg_wal_replay_resume() to continue." msgstr "Esegui pg_wal_replay_resume() per continuare." -#: access/transam/xlog.c:6128 +#: access/transam/xlog.c:6133 #, c-format msgid "hot standby is not possible because %s = %d is a lower setting than on the master server (its value was %d)" msgstr "l'hot standby non è possibile perché %s = %d è un'impostazione inferiore a quella del server master (il cui valore era %d)" -#: access/transam/xlog.c:6154 +#: access/transam/xlog.c:6159 #, c-format msgid "WAL was generated with wal_level=minimal, data may be missing" msgstr "il WAL è stato generato con wal_level=minimal, alcuni dati potrebbero mancare" -#: access/transam/xlog.c:6155 +#: access/transam/xlog.c:6160 #, c-format msgid "This happens if you temporarily set wal_level=minimal without taking a new base backup." msgstr "Questo avviene se imposti temporaneamente wal_level=minimal senza effettuare un nuovo backup di base." -#: access/transam/xlog.c:6166 +#: access/transam/xlog.c:6171 #, c-format msgid "hot standby is not possible because wal_level was not set to \"replica\" or higher on the master server" msgstr "l'hot standby non è possibile perché il wal_level non è impostato a \"replica\" o superiore nel server master" -#: access/transam/xlog.c:6167 +#: access/transam/xlog.c:6172 #, c-format msgid "Either set wal_level to \"replica\" on the master, or turn off hot_standby here." msgstr "Imposta il wal_level a \"replica\" sul master oppure disattiva hot_standby qui." -#: access/transam/xlog.c:6224 +#: access/transam/xlog.c:6229 #, c-format msgid "control file contains invalid data" msgstr "il file di controllo contiene dati non validi" -#: access/transam/xlog.c:6230 +#: access/transam/xlog.c:6235 #, c-format msgid "database system was shut down at %s" msgstr "il database è stato arrestato alle %s" -#: access/transam/xlog.c:6235 +#: access/transam/xlog.c:6240 #, c-format msgid "database system was shut down in recovery at %s" msgstr "il database è stato arrestato durante il ripristino alle %s" -#: access/transam/xlog.c:6239 +#: access/transam/xlog.c:6244 #, c-format msgid "database system shutdown was interrupted; last known up at %s" msgstr "l'arresto del database è stato interrotto; l'ultimo segno di vita risale alle %s" -#: access/transam/xlog.c:6243 +#: access/transam/xlog.c:6248 #, c-format msgid "database system was interrupted while in recovery at %s" msgstr "il database è stato interrotto alle %s mentre era in fase di ripristino" -#: access/transam/xlog.c:6245 +#: access/transam/xlog.c:6250 #, c-format msgid "This probably means that some data is corrupted and you will have to use the last backup for recovery." msgstr "Questo probabilmente significa che alcuni dati sono corrotti e dovrai usare il backup più recente per il ripristino." -#: access/transam/xlog.c:6249 +#: access/transam/xlog.c:6254 #, c-format msgid "database system was interrupted while in recovery at log time %s" msgstr "il database è stato interrotto all'orario di log %s mentre era in fase di ripristino" -#: access/transam/xlog.c:6251 +#: access/transam/xlog.c:6256 #, c-format msgid "If this has occurred more than once some data might be corrupted and you might need to choose an earlier recovery target." msgstr "Se ciò è avvenuto più di una volta, alcuni dati potrebbero essere corrotti e potresti dover scegliere una destinazione di ripristino precedente." -#: access/transam/xlog.c:6255 +#: access/transam/xlog.c:6260 #, c-format msgid "database system was interrupted; last known up at %s" msgstr "il database è stato interrotto; l'ultimo segno di vita risale alle %s" -#: access/transam/xlog.c:6311 +#: access/transam/xlog.c:6316 #, c-format msgid "entering standby mode" msgstr "inizio modalità standby" -#: access/transam/xlog.c:6314 +#: access/transam/xlog.c:6319 #, c-format msgid "starting point-in-time recovery to XID %u" msgstr "avvio del ripristino point-in-time allo XID %u" -#: access/transam/xlog.c:6318 +#: access/transam/xlog.c:6323 #, c-format msgid "starting point-in-time recovery to %s" msgstr "avvio del ripristino point-in-time alle %s" -#: access/transam/xlog.c:6322 +#: access/transam/xlog.c:6327 #, c-format msgid "starting point-in-time recovery to \"%s\"" msgstr "avvio del ripristino point-in-time a \"%s\"" -#: access/transam/xlog.c:6326 +#: access/transam/xlog.c:6331 #, c-format msgid "starting point-in-time recovery to WAL location (LSN) \"%X/%X\"" msgstr "avvio del ripristino point-in-time alla locazione WAL (LSN) \"%X/%X\"" -#: access/transam/xlog.c:6331 +#: access/transam/xlog.c:6336 #, c-format msgid "starting point-in-time recovery to earliest consistent point" msgstr "avvio del ripristino point-in-time al precedente punto consistente" -#: access/transam/xlog.c:6334 +#: access/transam/xlog.c:6339 #, c-format msgid "starting archive recovery" msgstr "avvio del ripristino dell'archivio" -#: access/transam/xlog.c:6385 access/transam/xlog.c:6513 +#: access/transam/xlog.c:6390 access/transam/xlog.c:6518 #, c-format msgid "checkpoint record is at %X/%X" msgstr "il record di checkpoint si trova in %X/%X" -#: access/transam/xlog.c:6399 +#: access/transam/xlog.c:6404 #, c-format msgid "could not find redo location referenced by checkpoint record" msgstr "localizzazione della posizione di redo referenziata dal record di checkpoint fallita" -#: access/transam/xlog.c:6400 access/transam/xlog.c:6407 +#: access/transam/xlog.c:6405 access/transam/xlog.c:6412 #, c-format msgid "If you are not restoring from a backup, try removing the file \"%s/backup_label\"." msgstr "Se non si sta effettuando il ripristino da backup, prova a rimuovere il file \"%s/backup_label\"." -#: access/transam/xlog.c:6406 +#: access/transam/xlog.c:6411 #, c-format msgid "could not locate required checkpoint record" msgstr "localizzazione del record di checkpoint richiesto fallita" -#: access/transam/xlog.c:6432 commands/tablespace.c:639 +#: access/transam/xlog.c:6437 commands/tablespace.c:639 #, c-format msgid "could not create symbolic link \"%s\": %m" msgstr "creazione del link simbolico \"%s\" fallita: %m" -#: access/transam/xlog.c:6464 access/transam/xlog.c:6470 +#: access/transam/xlog.c:6469 access/transam/xlog.c:6475 #, c-format msgid "ignoring file \"%s\" because no file \"%s\" exists" msgstr "il file \"%s\" verrà ignorato perché il file \"%s\" non esiste" -#: access/transam/xlog.c:6466 access/transam/xlog.c:11386 +#: access/transam/xlog.c:6471 access/transam/xlog.c:11400 #, c-format msgid "File \"%s\" was renamed to \"%s\"." msgstr "Il file \"%s\" è stato rinominato in \"%s\"." -#: access/transam/xlog.c:6472 +#: access/transam/xlog.c:6477 #, c-format msgid "Could not rename file \"%s\" to \"%s\": %m." msgstr "Cambio del nome del file da \"%s\" a \"%s\" fallito: %m." -#: access/transam/xlog.c:6523 access/transam/xlog.c:6538 +#: access/transam/xlog.c:6528 access/transam/xlog.c:6543 #, c-format msgid "could not locate a valid checkpoint record" msgstr "localizzazione di un record di checkpoint valido fallita" -#: access/transam/xlog.c:6532 +#: access/transam/xlog.c:6537 #, c-format msgid "using previous checkpoint record at %X/%X" msgstr "si sta usando il precedente record di checkpoint in %X/%X" -#: access/transam/xlog.c:6576 +#: access/transam/xlog.c:6581 #, c-format msgid "requested timeline %u is not a child of this server's history" msgstr "la timeline richiesta %u non è figlia della storia di questo server" -#: access/transam/xlog.c:6578 +#: access/transam/xlog.c:6583 #, c-format msgid "Latest checkpoint is at %X/%X on timeline %u, but in the history of the requested timeline, the server forked off from that timeline at %X/%X." msgstr "L'ultimo checkpoint è a %X/%X sulla timeline %u, ma nella storia della timeline richiesta, il server si è separato da quella timeline a %X/%X." -#: access/transam/xlog.c:6594 +#: access/transam/xlog.c:6599 #, c-format msgid "requested timeline %u does not contain minimum recovery point %X/%X on timeline %u" msgstr "la timeline richiesta %u non contiene il punto di recupero minimo %X/%X sulla timeline %u" -#: access/transam/xlog.c:6625 +#: access/transam/xlog.c:6630 #, c-format msgid "invalid next transaction ID" msgstr "l'ID della prossima transazione non è valido" -#: access/transam/xlog.c:6719 +#: access/transam/xlog.c:6724 #, c-format msgid "invalid redo in checkpoint record" msgstr "il redo nel record di checkpoint non è valido" -#: access/transam/xlog.c:6730 +#: access/transam/xlog.c:6735 #, c-format msgid "invalid redo record in shutdown checkpoint" msgstr "record di redo non valido nel checkpoint di arresto" -#: access/transam/xlog.c:6758 +#: access/transam/xlog.c:6763 #, c-format msgid "database system was not properly shut down; automatic recovery in progress" msgstr "il database non è stato arrestato correttamente; ripristino automatico in corso" -#: access/transam/xlog.c:6762 +#: access/transam/xlog.c:6767 #, c-format msgid "crash recovery starts in timeline %u and has target timeline %u" msgstr "il recupero dal crash comincia nella timeline %u e si conclude nella timeline %u" -#: access/transam/xlog.c:6806 +#: access/transam/xlog.c:6811 #, c-format msgid "backup_label contains data inconsistent with control file" msgstr "backup_label contiene dati non consistenti col file di controllo" -#: access/transam/xlog.c:6807 +#: access/transam/xlog.c:6812 #, c-format msgid "This means that the backup is corrupted and you will have to use another backup for recovery." msgstr "Questo vuol dire che il backup è corrotto e sarà necessario usare un altro backup per il ripristino." -#: access/transam/xlog.c:6881 +#: access/transam/xlog.c:6886 #, c-format msgid "initializing for hot standby" msgstr "inizializzazione per l'hot standby" -#: access/transam/xlog.c:7013 +#: access/transam/xlog.c:7018 #, c-format msgid "redo starts at %X/%X" msgstr "il redo inizia in %X/%X" -#: access/transam/xlog.c:7247 +#: access/transam/xlog.c:7252 #, c-format msgid "requested recovery stop point is before consistent recovery point" msgstr "lo stop point di ripristino è posto prima di un punto di ripristino consistente" -#: access/transam/xlog.c:7285 +#: access/transam/xlog.c:7290 #, c-format msgid "redo done at %X/%X" msgstr "redo concluso in %X/%X" -#: access/transam/xlog.c:7290 access/transam/xlog.c:9299 +#: access/transam/xlog.c:7295 access/transam/xlog.c:9309 #, c-format msgid "last completed transaction was at log time %s" msgstr "l'ultima transazione è stata completata all'orario di log %s" -#: access/transam/xlog.c:7299 +#: access/transam/xlog.c:7304 #, c-format msgid "redo is not required" msgstr "redo non richiesto" -#: access/transam/xlog.c:7374 access/transam/xlog.c:7378 +#: access/transam/xlog.c:7379 access/transam/xlog.c:7383 #, c-format msgid "WAL ends before end of online backup" msgstr "il WAL termina prima della fine del backup online" -#: access/transam/xlog.c:7375 +#: access/transam/xlog.c:7380 #, c-format msgid "All WAL generated while online backup was taken must be available at recovery." msgstr "Tutti i file WAL generati mentre il backup online veniva effettuato devono essere disponibili al momento del ripristino." -#: access/transam/xlog.c:7379 +#: access/transam/xlog.c:7384 #, c-format msgid "Online backup started with pg_start_backup() must be ended with pg_stop_backup(), and all WAL up to that point must be available at recovery." msgstr "Un backup online iniziato con pg_start_backup() deve essere terminato con pg_stop_backup(), e tutti i file WAL fino a quel punto devono essere disponibili per il ripristino." -#: access/transam/xlog.c:7382 +#: access/transam/xlog.c:7387 #, c-format msgid "WAL ends before consistent recovery point" msgstr "il WAL termina prima di un punto di ripristino consistente" -#: access/transam/xlog.c:7409 +#: access/transam/xlog.c:7414 #, c-format msgid "selected new timeline ID: %u" msgstr "l'ID della nuova timeline selezionata è %u" -#: access/transam/xlog.c:7838 +#: access/transam/xlog.c:7843 #, c-format msgid "consistent recovery state reached at %X/%X" msgstr "è stato raggiunto uno stato di ripristino consistente a %X/%X" -#: access/transam/xlog.c:8030 +#: access/transam/xlog.c:8035 #, c-format msgid "invalid primary checkpoint link in control file" msgstr "il link nel file di controllo al checkpoint primario non è valido" -#: access/transam/xlog.c:8034 +#: access/transam/xlog.c:8039 #, c-format msgid "invalid secondary checkpoint link in control file" msgstr "il link nel file di controllo al checkpoint secondario non è valido" -#: access/transam/xlog.c:8038 +#: access/transam/xlog.c:8043 #, c-format msgid "invalid checkpoint link in backup_label file" msgstr "il link al checkpoint nel file backup_label non è valido" -#: access/transam/xlog.c:8055 +#: access/transam/xlog.c:8060 #, c-format msgid "invalid primary checkpoint record" msgstr "il record del checkpoint primario non è valido" -#: access/transam/xlog.c:8059 +#: access/transam/xlog.c:8064 #, c-format msgid "invalid secondary checkpoint record" msgstr "il record del checkpoint secondario non è valido" -#: access/transam/xlog.c:8063 +#: access/transam/xlog.c:8068 #, c-format msgid "invalid checkpoint record" msgstr "il record del checkpoint non è valido" -#: access/transam/xlog.c:8074 +#: access/transam/xlog.c:8079 #, c-format msgid "invalid resource manager ID in primary checkpoint record" msgstr "l'ID del resource manager nel record del checkpoint primario non è valido" -#: access/transam/xlog.c:8078 +#: access/transam/xlog.c:8083 #, c-format msgid "invalid resource manager ID in secondary checkpoint record" msgstr "l'ID del resource manager nel record del checkpoint secondario non è valido" -#: access/transam/xlog.c:8082 +#: access/transam/xlog.c:8087 #, c-format msgid "invalid resource manager ID in checkpoint record" msgstr "l'ID del resource manager nel record del checkpoint non è valido" -#: access/transam/xlog.c:8095 +#: access/transam/xlog.c:8100 #, c-format msgid "invalid xl_info in primary checkpoint record" msgstr "l'xl_info nel record del checkpoint primario non è valido" -#: access/transam/xlog.c:8099 +#: access/transam/xlog.c:8104 #, c-format msgid "invalid xl_info in secondary checkpoint record" msgstr "l'xl_info nel record del checkpoint secondario non è valido" -#: access/transam/xlog.c:8103 +#: access/transam/xlog.c:8108 #, c-format msgid "invalid xl_info in checkpoint record" msgstr "l'xl_info nel record del checkpoint non è valido" -#: access/transam/xlog.c:8114 +#: access/transam/xlog.c:8119 #, c-format msgid "invalid length of primary checkpoint record" msgstr "la lunghezza del record del checkpoint primario non è valida" -#: access/transam/xlog.c:8118 +#: access/transam/xlog.c:8123 #, c-format msgid "invalid length of secondary checkpoint record" msgstr "la lunghezza del record del checkpoint secondario non è valida" -#: access/transam/xlog.c:8122 +#: access/transam/xlog.c:8127 #, c-format msgid "invalid length of checkpoint record" msgstr "la lunghezza del record del checkpoint non è valida" -#: access/transam/xlog.c:8325 +#: access/transam/xlog.c:8330 #, c-format msgid "shutting down" msgstr "arresto in corso" -#: access/transam/xlog.c:8639 +#: access/transam/xlog.c:8649 #, c-format -msgid "checkpoint skipped due to an idle system" -msgstr "checkpoint saltato per un sistema inattivo" +msgid "checkpoint skipped because system is idle" +msgstr "checkpoint saltato perché il sistema è inattivo" -#: access/transam/xlog.c:8844 +#: access/transam/xlog.c:8854 #, c-format msgid "concurrent write-ahead log activity while database system is shutting down" msgstr "attività concorrente del log write-ahead mentre il database è in fase di arresto" -#: access/transam/xlog.c:9098 +#: access/transam/xlog.c:9108 #, c-format msgid "skipping restartpoint, recovery has already ended" msgstr "si tralascia il restartpoint, il ripristino è ormai terminato" -#: access/transam/xlog.c:9121 +#: access/transam/xlog.c:9131 #, c-format msgid "skipping restartpoint, already performed at %X/%X" msgstr "si tralascia il restartpoint, già eseguito in %X/%X" -#: access/transam/xlog.c:9297 +#: access/transam/xlog.c:9307 #, c-format msgid "recovery restart point at %X/%X" msgstr "punto di avvio del ripristino in %X/%X" -#: access/transam/xlog.c:9433 +#: access/transam/xlog.c:9443 #, c-format msgid "restore point \"%s\" created at %X/%X" msgstr "punto di ripristino \"%s\" creato in %X/%X" -#: access/transam/xlog.c:9563 +#: access/transam/xlog.c:9573 #, c-format msgid "unexpected previous timeline ID %u (current timeline ID %u) in checkpoint record" msgstr "timeline precedente con ID %u non prevista (l'ID della timeline corrente è %u) nel record di checkpoint" -#: access/transam/xlog.c:9572 +#: access/transam/xlog.c:9582 #, c-format msgid "unexpected timeline ID %u (after %u) in checkpoint record" msgstr "timeline ID %u imprevista (dopo %u) nel record di checkpoint" -#: access/transam/xlog.c:9588 +#: access/transam/xlog.c:9598 #, c-format msgid "unexpected timeline ID %u in checkpoint record, before reaching minimum recovery point %X/%X on timeline %u" msgstr "timeline ID %u imprevista nel record di checkpoint, prima di raggiungere il punto di recupero minimo %X/%X sulla timeline %u" -#: access/transam/xlog.c:9664 +#: access/transam/xlog.c:9674 #, c-format msgid "online backup was canceled, recovery cannot continue" msgstr "il backup online è stato annullato, il ripristino non può continuare" -#: access/transam/xlog.c:9720 access/transam/xlog.c:9767 -#: access/transam/xlog.c:9790 +#: access/transam/xlog.c:9730 access/transam/xlog.c:9777 +#: access/transam/xlog.c:9800 #, c-format msgid "unexpected timeline ID %u (should be %u) in checkpoint record" msgstr "l'ID della timeline %u (che dovrebbe essere %u) non era prevista nel record di checkpoint" -#: access/transam/xlog.c:10066 +#: access/transam/xlog.c:10076 #, c-format msgid "could not fsync log segment %s: %m" msgstr "fsync del segmento di log %s fallito: %m" -#: access/transam/xlog.c:10091 +#: access/transam/xlog.c:10101 #, c-format msgid "could not fsync log file %s: %m" msgstr "fsync del file di log %s fallito: %m" -#: access/transam/xlog.c:10099 +#: access/transam/xlog.c:10109 #, c-format msgid "could not fsync write-through log file %s: %m" msgstr "fsync write-through del file di log %s fallito: %m" -#: access/transam/xlog.c:10108 +#: access/transam/xlog.c:10118 #, c-format msgid "could not fdatasync log file %s: %m" msgstr "fdatasync del file di log %s fallito: %m" -#: access/transam/xlog.c:10199 access/transam/xlog.c:10717 +#: access/transam/xlog.c:10209 access/transam/xlog.c:10727 #: access/transam/xlogfuncs.c:297 access/transam/xlogfuncs.c:324 #: access/transam/xlogfuncs.c:363 access/transam/xlogfuncs.c:384 #: access/transam/xlogfuncs.c:405 @@ -2568,65 +2404,66 @@ msgstr "fdatasync del file di log %s fallito: %m" msgid "WAL control functions cannot be executed during recovery." msgstr "le funzioni di controllo WAL non possono essere eseguite durante il ripristino." -#: access/transam/xlog.c:10208 access/transam/xlog.c:10726 +#: access/transam/xlog.c:10218 access/transam/xlog.c:10736 #, c-format msgid "WAL level not sufficient for making an online backup" msgstr "livello WAL non sufficiente per creare un backup online" -#: access/transam/xlog.c:10209 access/transam/xlog.c:10727 +#: access/transam/xlog.c:10219 access/transam/xlog.c:10737 #: access/transam/xlogfuncs.c:330 #, c-format msgid "wal_level must be set to \"replica\" or \"logical\" at server start." msgstr "Il wal_level deve essere impostato a \"replica\" o \"logical\" all'avvio del server." -#: access/transam/xlog.c:10214 +#: access/transam/xlog.c:10224 #, c-format msgid "backup label too long (max %d bytes)" msgstr "etichetta di backup troppo lunga (massimo %d byte)" -#: access/transam/xlog.c:10251 access/transam/xlog.c:10524 -#: access/transam/xlog.c:10562 +#: access/transam/xlog.c:10261 access/transam/xlog.c:10534 +#: access/transam/xlog.c:10572 #, c-format msgid "a backup is already in progress" msgstr "c'è già un backup in corso" -#: access/transam/xlog.c:10252 +#: access/transam/xlog.c:10262 #, c-format msgid "Run pg_stop_backup() and try again." msgstr "Esegui pg_stop_backup() e prova di nuovo." -#: access/transam/xlog.c:10347 +#: access/transam/xlog.c:10357 #, c-format msgid "WAL generated with full_page_writes=off was replayed since last restartpoint" msgstr "un WAL generato con full_page_writes=off è stato riprodotto dopo l'ultimo restartpoint" -#: access/transam/xlog.c:10349 access/transam/xlog.c:10907 +#: access/transam/xlog.c:10359 access/transam/xlog.c:10919 #, c-format msgid "This means that the backup being taken on the standby is corrupt and should not be used. Enable full_page_writes and run CHECKPOINT on the master, and then try an online backup again." msgstr "Ciò vuol dire che il backup che sta venendo preso sullo standby è corrotto e non dovrebbe essere usato. Abilita full_page_writes ed esegui CHECKPOINT sul master, poi prova ad effettuare nuovamente un backup online.\"" -#: access/transam/xlog.c:10416 replication/basebackup.c:1096 +#: access/transam/xlog.c:10426 replication/basebackup.c:1096 #: utils/adt/misc.c:497 #, c-format msgid "could not read symbolic link \"%s\": %m" msgstr "lettura del link simbolico \"%s\" fallita: %m" -#: access/transam/xlog.c:10423 replication/basebackup.c:1101 +#: access/transam/xlog.c:10433 replication/basebackup.c:1101 #: utils/adt/misc.c:502 #, c-format msgid "symbolic link \"%s\" target is too long" msgstr "la destinazione del link simbolico \"%s\" è troppo lunga" -#: access/transam/xlog.c:10476 commands/tablespace.c:389 -#: commands/tablespace.c:551 replication/basebackup.c:1116 utils/adt/misc.c:510 +#: access/transam/xlog.c:10486 commands/tablespace.c:389 +#: commands/tablespace.c:551 replication/basebackup.c:1116 +#: utils/adt/misc.c:510 #, c-format msgid "tablespaces are not supported on this platform" msgstr "i tablespace non sono supportati su questa piattaforma" -#: access/transam/xlog.c:10518 access/transam/xlog.c:10556 -#: access/transam/xlog.c:10765 access/transam/xlogarchive.c:105 -#: access/transam/xlogarchive.c:264 commands/copy.c:1866 commands/copy.c:3051 -#: commands/extension.c:3317 commands/tablespace.c:780 +#: access/transam/xlog.c:10528 access/transam/xlog.c:10566 +#: access/transam/xlog.c:10775 access/transam/xlogarchive.c:105 +#: access/transam/xlogarchive.c:264 commands/copy.c:1853 commands/copy.c:3155 +#: commands/extension.c:3319 commands/tablespace.c:780 #: commands/tablespace.c:871 replication/basebackup.c:480 #: replication/basebackup.c:548 replication/logical/snapbuild.c:1518 #: storage/file/copydir.c:72 storage/file/copydir.c:115 storage/file/fd.c:2954 @@ -2637,129 +2474,129 @@ msgstr "i tablespace non sono supportati su questa piattaforma" msgid "could not stat file \"%s\": %m" msgstr "non è stato possibile ottenere informazioni sul file \"%s\": %m" -#: access/transam/xlog.c:10525 access/transam/xlog.c:10563 +#: access/transam/xlog.c:10535 access/transam/xlog.c:10573 #, c-format msgid "If you're sure there is no backup in progress, remove file \"%s\" and try again." msgstr "Se si è certi che non ci sono backup in corso, rimuovi il file \"%s\" e prova di nuovo." -#: access/transam/xlog.c:10542 access/transam/xlog.c:10580 -#: access/transam/xlog.c:10968 postmaster/syslogger.c:1391 +#: access/transam/xlog.c:10552 access/transam/xlog.c:10590 +#: access/transam/xlog.c:10977 postmaster/syslogger.c:1391 #: postmaster/syslogger.c:1404 #, c-format msgid "could not write file \"%s\": %m" msgstr "scrittura nel file \"%s\" fallita: %m" -#: access/transam/xlog.c:10742 +#: access/transam/xlog.c:10752 #, c-format msgid "exclusive backup not in progress" msgstr "non c'è un backup esclusivo in corso" -#: access/transam/xlog.c:10769 +#: access/transam/xlog.c:10779 #, c-format msgid "a backup is not in progress" msgstr "non c'è un backup in esecuzione" -#: access/transam/xlog.c:10842 access/transam/xlog.c:10855 -#: access/transam/xlog.c:11196 access/transam/xlog.c:11202 -#: access/transam/xlog.c:11286 access/transam/xlogfuncs.c:698 +#: access/transam/xlog.c:10852 access/transam/xlog.c:10865 +#: access/transam/xlog.c:11210 access/transam/xlog.c:11216 +#: access/transam/xlog.c:11300 access/transam/xlogfuncs.c:698 #, c-format msgid "invalid data in file \"%s\"" msgstr "i dati nel file \"%s\" non sono validi" -#: access/transam/xlog.c:10859 replication/basebackup.c:994 +#: access/transam/xlog.c:10869 replication/basebackup.c:994 #, c-format msgid "the standby was promoted during online backup" msgstr "lo standby è stato promosso durante il backup online" -#: access/transam/xlog.c:10860 replication/basebackup.c:995 +#: access/transam/xlog.c:10870 replication/basebackup.c:995 #, c-format msgid "This means that the backup being taken is corrupt and should not be used. Try taking another online backup." msgstr "Ciò vuol dire che il backup che stava venendo salvato è corrotto e non dovrebbe essere usato. Prova ad effettuare un altro backup online." -#: access/transam/xlog.c:10905 +#: access/transam/xlog.c:10917 #, c-format msgid "WAL generated with full_page_writes=off was replayed during online backup" msgstr "un WAL generato con full_page_writes=off è stato riprodotto durante il backup online" -#: access/transam/xlog.c:11018 +#: access/transam/xlog.c:11032 #, c-format msgid "pg_stop_backup cleanup done, waiting for required WAL segments to be archived" msgstr "pulizia di pg_stop_backup effettuata, in attesa che i segmenti WAL richiesti vengano archiviati" -#: access/transam/xlog.c:11028 +#: access/transam/xlog.c:11042 #, c-format msgid "pg_stop_backup still waiting for all required WAL segments to be archived (%d seconds elapsed)" msgstr "pg_stop_backup è ancora in attesa che tutti i segmenti WAL richiesti siano stati archiviati (sono passati %d secondi)" -#: access/transam/xlog.c:11030 +#: access/transam/xlog.c:11044 #, c-format msgid "Check that your archive_command is executing properly. pg_stop_backup can be canceled safely, but the database backup will not be usable without all the WAL segments." msgstr "Controlla che il tuo archive_command venga eseguito correttamente. pg_stop_backup può essere interrotto in sicurezza ma il backup del database non sarà utilizzabile senza tutti i segmenti WAL." -#: access/transam/xlog.c:11037 +#: access/transam/xlog.c:11051 #, c-format msgid "pg_stop_backup complete, all required WAL segments have been archived" msgstr "pg_stop_backup completo, tutti i segmenti WAL richiesti sono stati archiviati" -#: access/transam/xlog.c:11041 +#: access/transam/xlog.c:11055 #, c-format msgid "WAL archiving is not enabled; you must ensure that all required WAL segments are copied through other means to complete the backup" msgstr "l'archiviazione WAL non è abilitata; devi verificare che tutti i segmenti WAL richiesti vengano copiati in qualche altro modo per completare il backup" #. translator: %s is a WAL record description -#: access/transam/xlog.c:11326 +#: access/transam/xlog.c:11340 #, c-format msgid "WAL redo at %X/%X for %s" msgstr "Ripristino WAL a %X/%X per %s" -#: access/transam/xlog.c:11375 +#: access/transam/xlog.c:11389 #, c-format msgid "online backup mode was not canceled" msgstr "la modalità di backup online non è stata annullata" -#: access/transam/xlog.c:11376 +#: access/transam/xlog.c:11390 #, c-format msgid "File \"%s\" could not be renamed to \"%s\": %m." msgstr "Non è stato possibile rinominare il file \"%s\" in \"%s\": %m." -#: access/transam/xlog.c:11385 access/transam/xlog.c:11397 -#: access/transam/xlog.c:11407 +#: access/transam/xlog.c:11399 access/transam/xlog.c:11411 +#: access/transam/xlog.c:11421 #, c-format msgid "online backup mode canceled" msgstr "modalità backup online annullata" -#: access/transam/xlog.c:11398 +#: access/transam/xlog.c:11412 #, c-format msgid "Files \"%s\" and \"%s\" were renamed to \"%s\" and \"%s\", respectively." msgstr "File \"%s\" e \"%s\" rinominati rispettivamente in \"%s\" e \"%s\"." -#: access/transam/xlog.c:11408 +#: access/transam/xlog.c:11422 #, c-format msgid "File \"%s\" was renamed to \"%s\", but file \"%s\" could not be renamed to \"%s\": %m." msgstr "File \"%s\" rinominato in \"%s\", ma non è stato possibile rinominare il file \"%s\" in \"%s\": %m." -#: access/transam/xlog.c:11530 access/transam/xlogutils.c:724 -#: replication/walreceiver.c:1006 replication/walsender.c:2400 +#: access/transam/xlog.c:11544 access/transam/xlogutils.c:724 +#: replication/walreceiver.c:1011 replication/walsender.c:2397 #, c-format msgid "could not seek in log segment %s to offset %u: %m" msgstr "spostamento nel segmento di log %s alla posizione %u fallito: %m" -#: access/transam/xlog.c:11544 +#: access/transam/xlog.c:11558 #, c-format msgid "could not read from log segment %s, offset %u: %m" msgstr "lettura del segmento di log %s, posizione %u fallita: %m" -#: access/transam/xlog.c:12033 +#: access/transam/xlog.c:12047 #, c-format msgid "received promote request" msgstr "richiesta di promozione ricevuta" -#: access/transam/xlog.c:12046 +#: access/transam/xlog.c:12060 #, c-format msgid "trigger file found: %s" msgstr "trovato il file trigger: %s" -#: access/transam/xlog.c:12055 +#: access/transam/xlog.c:12069 #, c-format msgid "could not stat trigger file \"%s\": %m" msgstr "non è stato possibile ottenere informazioni sul file trigger \"%s\": %m" @@ -2788,9 +2625,9 @@ msgid "%s \"%s\": %s" msgstr "%s \"%s\": %s" #: access/transam/xlogarchive.c:457 postmaster/syslogger.c:1415 -#: replication/logical/snapbuild.c:1645 replication/slot.c:534 -#: replication/slot.c:1131 replication/slot.c:1245 storage/file/fd.c:642 -#: storage/file/fd.c:737 utils/time/snapmgr.c:1306 +#: replication/logical/snapbuild.c:1645 replication/slot.c:590 +#: replication/slot.c:1190 replication/slot.c:1304 storage/file/fd.c:642 +#: storage/file/fd.c:737 utils/time/snapmgr.c:1318 #, c-format msgid "could not rename file \"%s\" to \"%s\": %m" msgstr "non è stato possibile rinominare il file \"%s\" in \"%s\": %m" @@ -2816,7 +2653,7 @@ msgid "a backup is already in progress in this session" msgstr "c'è già un backup in corso in questa sessione" #: access/transam/xlogfuncs.c:92 commands/tablespace.c:703 -#: commands/tablespace.c:713 postmaster/postmaster.c:1448 +#: commands/tablespace.c:713 postmaster/postmaster.c:1458 #: replication/basebackup.c:368 replication/basebackup.c:708 #: storage/file/copydir.c:53 storage/file/copydir.c:96 storage/file/fd.c:2420 #: storage/file/fd.c:3019 storage/ipc/dsm.c:301 utils/adt/genfile.c:440 @@ -2836,29 +2673,29 @@ msgid "Did you mean to use pg_stop_backup('f')?" msgstr "Forse intendevi usare pg_stop_backup('f')?" #: access/transam/xlogfuncs.c:205 commands/event_trigger.c:1471 -#: commands/event_trigger.c:2022 commands/extension.c:1894 -#: commands/extension.c:2003 commands/extension.c:2227 commands/prepare.c:721 -#: executor/execExpr.c:2106 executor/execSRF.c:688 executor/functions.c:1028 -#: foreign/foreign.c:488 libpq/hba.c:2563 replication/logical/launcher.c:923 -#: replication/logical/logicalfuncs.c:176 replication/logical/origin.c:1387 -#: replication/slotfuncs.c:197 replication/walsender.c:3154 -#: utils/adt/jsonfuncs.c:1688 utils/adt/jsonfuncs.c:1818 -#: utils/adt/jsonfuncs.c:2006 utils/adt/jsonfuncs.c:2133 -#: utils/adt/jsonfuncs.c:3473 utils/adt/pgstatfuncs.c:456 -#: utils/adt/pgstatfuncs.c:557 utils/fmgr/funcapi.c:62 utils/misc/guc.c:8538 -#: utils/mmgr/portalmem.c:1053 +#: commands/event_trigger.c:2022 commands/extension.c:1895 +#: commands/extension.c:2004 commands/extension.c:2228 commands/prepare.c:721 +#: executor/execExpr.c:2121 executor/execSRF.c:690 executor/functions.c:1029 +#: foreign/foreign.c:488 libpq/hba.c:2563 replication/logical/launcher.c:1026 +#: replication/logical/logicalfuncs.c:176 replication/logical/origin.c:1426 +#: replication/slotfuncs.c:197 replication/walsender.c:3166 +#: utils/adt/jsonfuncs.c:1689 utils/adt/jsonfuncs.c:1819 +#: utils/adt/jsonfuncs.c:2007 utils/adt/jsonfuncs.c:2134 +#: utils/adt/jsonfuncs.c:3489 utils/adt/pgstatfuncs.c:456 +#: utils/adt/pgstatfuncs.c:557 utils/fmgr/funcapi.c:62 utils/misc/guc.c:8549 +#: utils/mmgr/portalmem.c:1067 #, c-format msgid "set-valued function called in context that cannot accept a set" msgstr "la funzione che restituisce insiemi è chiamata in un contesto che non può accettare un insieme" #: access/transam/xlogfuncs.c:209 commands/event_trigger.c:1475 -#: commands/event_trigger.c:2026 commands/extension.c:1898 -#: commands/extension.c:2007 commands/extension.c:2231 commands/prepare.c:725 -#: foreign/foreign.c:493 libpq/hba.c:2567 replication/logical/launcher.c:927 -#: replication/logical/logicalfuncs.c:180 replication/logical/origin.c:1391 -#: replication/slotfuncs.c:201 replication/walsender.c:3158 +#: commands/event_trigger.c:2026 commands/extension.c:1899 +#: commands/extension.c:2008 commands/extension.c:2232 commands/prepare.c:725 +#: foreign/foreign.c:493 libpq/hba.c:2567 replication/logical/launcher.c:1030 +#: replication/logical/logicalfuncs.c:180 replication/logical/origin.c:1430 +#: replication/slotfuncs.c:201 replication/walsender.c:3170 #: utils/adt/pgstatfuncs.c:460 utils/adt/pgstatfuncs.c:561 -#: utils/misc/guc.c:8542 utils/misc/pg_config.c:44 utils/mmgr/portalmem.c:1057 +#: utils/misc/guc.c:8553 utils/misc/pg_config.c:44 utils/mmgr/portalmem.c:1071 #, c-format msgid "materialize mode required, but it is not allowed in this context" msgstr "necessaria modalità materializzata, ma non ammessa in questo contesto" @@ -3040,23 +2877,23 @@ msgstr "record con lunghezza non valida a %X/%X" msgid "invalid compressed image at %X/%X, block %d" msgstr "immagine compressa non valida a %X/%X, blocco %d" -#: access/transam/xlogutils.c:747 replication/walsender.c:2419 +#: access/transam/xlogutils.c:747 replication/walsender.c:2416 #, c-format msgid "could not read from log segment %s, offset %u, length %lu: %m" msgstr "lettura del segmento di log %s, posizione %u, lunghezza %lu fallita: %m" -#: bootstrap/bootstrap.c:272 postmaster/postmaster.c:815 tcop/postgres.c:3495 +#: bootstrap/bootstrap.c:272 postmaster/postmaster.c:819 tcop/postgres.c:3508 #, c-format msgid "--%s requires a value" msgstr "--%s richiede un valore" -#: bootstrap/bootstrap.c:277 postmaster/postmaster.c:820 tcop/postgres.c:3500 +#: bootstrap/bootstrap.c:277 postmaster/postmaster.c:824 tcop/postgres.c:3513 #, c-format msgid "-c %s requires a value" msgstr "-c %s richiede un valore" -#: bootstrap/bootstrap.c:288 postmaster/postmaster.c:832 -#: postmaster/postmaster.c:845 +#: bootstrap/bootstrap.c:288 postmaster/postmaster.c:836 +#: postmaster/postmaster.c:849 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Prova \"%s --help\" per maggiori informazioni.\n" @@ -3177,51 +3014,51 @@ msgid "column privileges are only valid for relations" msgstr "i privilegi della colonna sono validi solo per le relazioni" #: catalog/aclchk.c:696 catalog/aclchk.c:3926 catalog/aclchk.c:4708 -#: catalog/objectaddress.c:929 catalog/pg_largeobject.c:111 +#: catalog/objectaddress.c:928 catalog/pg_largeobject.c:111 #: storage/large_object/inv_api.c:291 #, c-format msgid "large object %u does not exist" msgstr "il large object %u non esiste" -#: catalog/aclchk.c:885 catalog/aclchk.c:894 commands/collationcmds.c:106 -#: commands/copy.c:1040 commands/copy.c:1060 commands/copy.c:1069 -#: commands/copy.c:1078 commands/copy.c:1087 commands/copy.c:1096 -#: commands/copy.c:1105 commands/copy.c:1114 commands/copy.c:1123 -#: commands/copy.c:1141 commands/copy.c:1157 commands/copy.c:1177 -#: commands/copy.c:1194 commands/dbcommands.c:155 commands/dbcommands.c:164 +#: catalog/aclchk.c:885 catalog/aclchk.c:894 commands/collationcmds.c:114 +#: commands/copy.c:1042 commands/copy.c:1062 commands/copy.c:1071 +#: commands/copy.c:1080 commands/copy.c:1089 commands/copy.c:1098 +#: commands/copy.c:1107 commands/copy.c:1116 commands/copy.c:1125 +#: commands/copy.c:1143 commands/copy.c:1159 commands/copy.c:1179 +#: commands/copy.c:1196 commands/dbcommands.c:155 commands/dbcommands.c:164 #: commands/dbcommands.c:173 commands/dbcommands.c:182 #: commands/dbcommands.c:191 commands/dbcommands.c:200 #: commands/dbcommands.c:209 commands/dbcommands.c:218 #: commands/dbcommands.c:227 commands/dbcommands.c:1427 #: commands/dbcommands.c:1436 commands/dbcommands.c:1445 -#: commands/dbcommands.c:1454 commands/extension.c:1677 -#: commands/extension.c:1687 commands/extension.c:1697 -#: commands/extension.c:1707 commands/extension.c:2947 +#: commands/dbcommands.c:1454 commands/extension.c:1678 +#: commands/extension.c:1688 commands/extension.c:1698 +#: commands/extension.c:1708 commands/extension.c:2949 #: commands/foreigncmds.c:537 commands/foreigncmds.c:546 #: commands/functioncmds.c:526 commands/functioncmds.c:643 #: commands/functioncmds.c:652 commands/functioncmds.c:661 -#: commands/functioncmds.c:670 commands/functioncmds.c:2077 -#: commands/functioncmds.c:2085 commands/publicationcmds.c:90 -#: commands/sequence.c:1288 commands/sequence.c:1297 commands/sequence.c:1306 -#: commands/sequence.c:1315 commands/sequence.c:1324 commands/sequence.c:1333 -#: commands/sequence.c:1342 commands/sequence.c:1351 commands/sequence.c:1360 -#: commands/subscriptioncmds.c:106 commands/subscriptioncmds.c:116 -#: commands/subscriptioncmds.c:126 commands/subscriptioncmds.c:136 -#: commands/subscriptioncmds.c:150 commands/subscriptioncmds.c:161 -#: commands/tablecmds.c:5951 commands/typecmds.c:298 commands/typecmds.c:1375 -#: commands/typecmds.c:1384 commands/typecmds.c:1392 commands/typecmds.c:1400 -#: commands/typecmds.c:1408 commands/user.c:135 commands/user.c:149 -#: commands/user.c:158 commands/user.c:167 commands/user.c:176 -#: commands/user.c:185 commands/user.c:194 commands/user.c:203 -#: commands/user.c:212 commands/user.c:221 commands/user.c:230 -#: commands/user.c:239 commands/user.c:248 commands/user.c:533 -#: commands/user.c:541 commands/user.c:549 commands/user.c:557 -#: commands/user.c:565 commands/user.c:573 commands/user.c:581 -#: commands/user.c:589 commands/user.c:598 commands/user.c:606 -#: commands/user.c:614 parser/parse_utilcmd.c:398 -#: replication/pgoutput/pgoutput.c:107 replication/pgoutput/pgoutput.c:128 -#: replication/walsender.c:807 replication/walsender.c:818 -#: replication/walsender.c:828 +#: commands/functioncmds.c:670 commands/functioncmds.c:2097 +#: commands/functioncmds.c:2105 commands/publicationcmds.c:90 +#: commands/sequence.c:1265 commands/sequence.c:1275 commands/sequence.c:1285 +#: commands/sequence.c:1295 commands/sequence.c:1305 commands/sequence.c:1315 +#: commands/sequence.c:1325 commands/sequence.c:1335 commands/sequence.c:1345 +#: commands/subscriptioncmds.c:110 commands/subscriptioncmds.c:120 +#: commands/subscriptioncmds.c:130 commands/subscriptioncmds.c:140 +#: commands/subscriptioncmds.c:154 commands/subscriptioncmds.c:165 +#: commands/subscriptioncmds.c:179 commands/tablecmds.c:5973 +#: commands/typecmds.c:298 commands/typecmds.c:1396 commands/typecmds.c:1405 +#: commands/typecmds.c:1413 commands/typecmds.c:1421 commands/typecmds.c:1429 +#: commands/user.c:134 commands/user.c:148 commands/user.c:157 +#: commands/user.c:166 commands/user.c:175 commands/user.c:184 +#: commands/user.c:193 commands/user.c:202 commands/user.c:211 +#: commands/user.c:220 commands/user.c:229 commands/user.c:238 +#: commands/user.c:247 commands/user.c:555 commands/user.c:563 +#: commands/user.c:571 commands/user.c:579 commands/user.c:587 +#: commands/user.c:595 commands/user.c:603 commands/user.c:611 +#: commands/user.c:620 commands/user.c:628 commands/user.c:636 +#: parser/parse_utilcmd.c:396 replication/pgoutput/pgoutput.c:108 +#: replication/pgoutput/pgoutput.c:129 replication/walsender.c:800 +#: replication/walsender.c:811 replication/walsender.c:821 #, c-format msgid "conflicting or redundant options" msgstr "opzioni contraddittorie o ridondanti" @@ -3236,25 +3073,25 @@ msgstr "i privilegi predefiniti non possono essere impostati sulle colonne" msgid "cannot use IN SCHEMA clause when using GRANT/REVOKE ON SCHEMAS" msgstr "non è possibile usare la clausola IN SCHEMA usando GRANT/REVOKE ON SCHEMAS" -#: catalog/aclchk.c:1521 catalog/objectaddress.c:1390 commands/analyze.c:390 -#: commands/copy.c:4670 commands/sequence.c:1731 commands/tablecmds.c:5599 -#: commands/tablecmds.c:5746 commands/tablecmds.c:5803 -#: commands/tablecmds.c:5876 commands/tablecmds.c:5970 -#: commands/tablecmds.c:6029 commands/tablecmds.c:6154 -#: commands/tablecmds.c:6208 commands/tablecmds.c:6300 -#: commands/tablecmds.c:6456 commands/tablecmds.c:8685 -#: commands/tablecmds.c:8961 commands/tablecmds.c:9396 commands/trigger.c:758 -#: parser/analyze.c:2310 parser/parse_relation.c:2699 -#: parser/parse_relation.c:2761 parser/parse_target.c:1002 -#: parser/parse_type.c:127 utils/adt/acl.c:2823 utils/adt/ruleutils.c:2349 +#: catalog/aclchk.c:1521 catalog/objectaddress.c:1389 commands/analyze.c:399 +#: commands/copy.c:4774 commands/sequence.c:1700 commands/tablecmds.c:5621 +#: commands/tablecmds.c:5768 commands/tablecmds.c:5825 +#: commands/tablecmds.c:5898 commands/tablecmds.c:5992 +#: commands/tablecmds.c:6051 commands/tablecmds.c:6176 +#: commands/tablecmds.c:6230 commands/tablecmds.c:6322 +#: commands/tablecmds.c:6478 commands/tablecmds.c:8707 +#: commands/tablecmds.c:8983 commands/tablecmds.c:9418 commands/trigger.c:817 +#: parser/analyze.c:2310 parser/parse_relation.c:2733 +#: parser/parse_relation.c:2795 parser/parse_target.c:1002 +#: parser/parse_type.c:127 utils/adt/acl.c:2825 utils/adt/ruleutils.c:2356 #, c-format msgid "column \"%s\" of relation \"%s\" does not exist" msgstr "la colonna \"%s\" della relazione \"%s\" non esiste" -#: catalog/aclchk.c:1787 catalog/objectaddress.c:1230 commands/sequence.c:1146 -#: commands/tablecmds.c:229 commands/tablecmds.c:13053 utils/adt/acl.c:2059 -#: utils/adt/acl.c:2089 utils/adt/acl.c:2121 utils/adt/acl.c:2153 -#: utils/adt/acl.c:2181 utils/adt/acl.c:2211 +#: catalog/aclchk.c:1787 catalog/objectaddress.c:1229 commands/sequence.c:1138 +#: commands/tablecmds.c:229 commands/tablecmds.c:13093 utils/adt/acl.c:2061 +#: utils/adt/acl.c:2091 utils/adt/acl.c:2123 utils/adt/acl.c:2155 +#: utils/adt/acl.c:2183 utils/adt/acl.c:2213 #, c-format msgid "\"%s\" is not a sequence" msgstr "\"%s\" non è una sequenza" @@ -3299,7 +3136,7 @@ msgstr "non è possibile impostare privilegi su tipi array" msgid "Set the privileges of the element type instead." msgstr "Puoi impostare i privilegi del tipo dell'elemento." -#: catalog/aclchk.c:3127 catalog/objectaddress.c:1520 commands/typecmds.c:3165 +#: catalog/aclchk.c:3127 catalog/objectaddress.c:1519 #, c-format msgid "\"%s\" is not a domain" msgstr "\"%s\" non è un dominio" @@ -3319,9 +3156,9 @@ msgstr "permesso negato per la colonna %s" msgid "permission denied for relation %s" msgstr "permesso negato per la relazione %s" -#: catalog/aclchk.c:3300 commands/sequence.c:608 commands/sequence.c:842 -#: commands/sequence.c:884 commands/sequence.c:925 commands/sequence.c:1822 -#: commands/sequence.c:1886 +#: catalog/aclchk.c:3300 commands/sequence.c:600 commands/sequence.c:834 +#: commands/sequence.c:876 commands/sequence.c:917 commands/sequence.c:1791 +#: commands/sequence.c:1855 #, c-format msgid "permission denied for sequence %s" msgstr "permesso negato per la sequenza %s" @@ -3597,7 +3434,7 @@ msgstr "il wrapper di dati esterni con OID %u non esiste" msgid "foreign server with OID %u does not exist" msgstr "il server esterno con OID %u non esiste" -#: catalog/aclchk.c:4246 catalog/aclchk.c:4585 utils/cache/typcache.c:238 +#: catalog/aclchk.c:4246 catalog/aclchk.c:4585 utils/cache/typcache.c:240 #, c-format msgid "type with OID %u does not exist" msgstr "il tipo con OID %u non esiste" @@ -3632,7 +3469,7 @@ msgstr "la configurazione di ricerca di testo con OID %u non esiste" msgid "event trigger with OID %u does not exist" msgstr "il trigger di evento con OID %u non esiste" -#: catalog/aclchk.c:5003 commands/collationcmds.c:319 +#: catalog/aclchk.c:5003 commands/collationcmds.c:348 #, c-format msgid "collation with OID %u does not exist" msgstr "l'ordinamento con OID %u non esiste" @@ -3652,7 +3489,7 @@ msgstr "l'estensione con OID %u non esiste" msgid "publication with OID %u does not exist" msgstr "la pubblicazione con OID %u non esiste" -#: catalog/aclchk.c:5123 commands/subscriptioncmds.c:1057 +#: catalog/aclchk.c:5123 commands/subscriptioncmds.c:1098 #, c-format msgid "subscription with OID %u does not exist" msgstr "la sottoscrizione con OID %u non esiste" @@ -3745,13 +3582,13 @@ msgstr "permesso di creare \"%s.%s\" negato" msgid "System catalog modifications are currently disallowed." msgstr "Le modifiche al catalogo di sistema non sono attualmente consentite." -#: catalog/heap.c:421 commands/tablecmds.c:1630 commands/tablecmds.c:2140 -#: commands/tablecmds.c:5203 +#: catalog/heap.c:421 commands/tablecmds.c:1649 commands/tablecmds.c:2159 +#: commands/tablecmds.c:5225 #, c-format msgid "tables can have at most %d columns" msgstr "le tabelle possono avere al massimo %d colonne" -#: catalog/heap.c:438 commands/tablecmds.c:5462 +#: catalog/heap.c:438 commands/tablecmds.c:5484 #, c-format msgid "column name \"%s\" conflicts with a system column name" msgstr "il nome della colonna \"%s\" è in conflitto con il nome di una colonna di sistema" @@ -3777,22 +3614,23 @@ msgid "no collation was derived for column \"%s\" with collatable type %s" msgstr "nessun ordinamento è stato derivato per la colonna \"%s\" con tipo ordinabile %s" #: catalog/heap.c:581 commands/createas.c:204 commands/createas.c:501 -#: commands/indexcmds.c:1136 commands/tablecmds.c:13319 commands/view.c:103 -#: regex/regc_pg_locale.c:263 utils/adt/formatting.c:1547 -#: utils/adt/formatting.c:1670 utils/adt/formatting.c:1794 utils/adt/like.c:184 -#: utils/adt/selfuncs.c:5526 utils/adt/varlena.c:1417 utils/adt/varlena.c:1862 +#: commands/indexcmds.c:1149 commands/tablecmds.c:13389 commands/view.c:103 +#: regex/regc_pg_locale.c:263 utils/adt/formatting.c:1546 +#: utils/adt/formatting.c:1670 utils/adt/formatting.c:1795 +#: utils/adt/like.c:184 utils/adt/selfuncs.c:5563 utils/adt/varlena.c:1417 +#: utils/adt/varlena.c:1854 #, c-format msgid "Use the COLLATE clause to set the collation explicitly." msgstr "Usa la clausola COLLATE per impostare esplicitamente l'ordinamento." -#: catalog/heap.c:1067 catalog/index.c:807 commands/tablecmds.c:2930 +#: catalog/heap.c:1067 catalog/index.c:806 commands/tablecmds.c:2943 #, c-format msgid "relation \"%s\" already exists" msgstr "la relazione \"%s\" esiste già" -#: catalog/heap.c:1083 catalog/pg_type.c:410 catalog/pg_type.c:717 +#: catalog/heap.c:1083 catalog/pg_type.c:410 catalog/pg_type.c:732 #: commands/typecmds.c:239 commands/typecmds.c:788 commands/typecmds.c:1139 -#: commands/typecmds.c:1350 commands/typecmds.c:2106 +#: commands/typecmds.c:1371 commands/typecmds.c:2127 #, c-format msgid "type \"%s\" already exists" msgstr "il tipo \"%s\" esiste già" @@ -3817,7 +3655,7 @@ msgstr "non si può aggiungere un vincolo NO INHERIT alla tabella partizionata \ msgid "check constraint \"%s\" already exists" msgstr "il vincolo di controllo \"%s\" esiste già" -#: catalog/heap.c:2504 catalog/pg_constraint.c:649 commands/tablecmds.c:6815 +#: catalog/heap.c:2504 catalog/pg_constraint.c:649 commands/tablecmds.c:6838 #, c-format msgid "constraint \"%s\" for relation \"%s\" already exists" msgstr "il vincolo \"%s\" per la relazione \"%s\" esiste già" @@ -3847,14 +3685,14 @@ msgstr "unione del vincolo \"%s\" con una definizione ereditata" msgid "cannot use column references in default expression" msgstr "non si possono usare riferimenti a colonne nell'espressione predefinita" -#: catalog/heap.c:2678 rewrite/rewriteHandler.c:1140 +#: catalog/heap.c:2678 rewrite/rewriteHandler.c:1171 #, c-format msgid "column \"%s\" is of type %s but default expression is of type %s" msgstr "la colonna \"%s\" è di tipo %s ma l'espressione predefinita è di tipo %s" #: catalog/heap.c:2683 commands/prepare.c:384 parser/parse_node.c:430 #: parser/parse_target.c:590 parser/parse_target.c:840 -#: parser/parse_target.c:850 rewrite/rewriteHandler.c:1145 +#: parser/parse_target.c:850 rewrite/rewriteHandler.c:1176 #, c-format msgid "You will need to rewrite or cast the expression." msgstr "Devi riscrivere o convertire il tipo dell'espressione" @@ -3889,70 +3727,64 @@ msgstr "La tabella \"%s\" referenzia \"%s\"." msgid "Truncate table \"%s\" at the same time, or use TRUNCATE ... CASCADE." msgstr "Troncare la tabella \"%s\" nello stesso tempo o usare TRUNCATE ... CASCADE." -#: catalog/index.c:210 parser/parse_utilcmd.c:1662 parser/parse_utilcmd.c:1748 +#: catalog/index.c:213 parser/parse_utilcmd.c:1672 parser/parse_utilcmd.c:1758 #, c-format msgid "multiple primary keys for table \"%s\" are not allowed" msgstr "non è possibile avere più di una chiave primaria per la tabella \"%s\"" -#: catalog/index.c:228 +#: catalog/index.c:231 #, c-format msgid "primary keys cannot be expressions" msgstr "le chiavi primarie non possono essere delle espressioni" -#: catalog/index.c:757 catalog/index.c:1175 +#: catalog/index.c:756 catalog/index.c:1174 #, c-format msgid "user-defined indexes on system catalog tables are not supported" msgstr "non sono supportati indici definiti dall'utente sulle tabelle del catalogo di sistema" -#: catalog/index.c:767 +#: catalog/index.c:766 #, c-format msgid "concurrent index creation on system catalog tables is not supported" msgstr "la creazione concorrente di indici sulle tabelle del catalogo di sistema non è supportata" -#: catalog/index.c:785 +#: catalog/index.c:784 #, c-format msgid "shared indexes cannot be created after initdb" msgstr "indici condivisi non possono essere creati dopo initdb" -#: catalog/index.c:799 commands/createas.c:250 commands/sequence.c:152 -#: parser/parse_utilcmd.c:198 +#: catalog/index.c:798 commands/createas.c:250 commands/sequence.c:152 +#: parser/parse_utilcmd.c:202 #, c-format msgid "relation \"%s\" already exists, skipping" msgstr "la relazione \"%s\" esiste già, saltata" -#: catalog/index.c:835 +#: catalog/index.c:834 #, c-format msgid "pg_class index OID value not set when in binary upgrade mode" msgstr "valore OID indice pg_class non impostato in modalità di aggiornamento binaria" -#: catalog/index.c:1436 +#: catalog/index.c:1435 #, c-format msgid "DROP INDEX CONCURRENTLY must be first action in transaction" msgstr "DROP INDEX CONCURRENTLY deve essere la prima azione della transazione" -#: catalog/index.c:2020 +#: catalog/index.c:2023 #, c-format msgid "building index \"%s\" on table \"%s\"" msgstr "creazione dell'indice \"%s\" sulla tabella \"%s\"" -#: catalog/index.c:3332 +#: catalog/index.c:3335 #, c-format msgid "cannot reindex temporary tables of other sessions" msgstr "non è possibile reindicizzare le tabelle temporanee di altre sessioni" -#: catalog/index.c:3463 +#: catalog/index.c:3466 #, c-format msgid "index \"%s\" was reindexed" msgstr "l'indice \"%s\" è stato reindicizzato" -#: catalog/index.c:3465 commands/vacuumlazy.c:1356 commands/vacuumlazy.c:1432 -#: commands/vacuumlazy.c:1621 commands/vacuumlazy.c:1831 -#, c-format -msgid "%s." -msgstr "%s." - #: catalog/namespace.c:235 catalog/namespace.c:433 catalog/namespace.c:527 -#: commands/trigger.c:4800 +#: commands/trigger.c:5148 #, c-format msgid "cross-database references are not implemented: \"%s.%s.%s\"" msgstr "i riferimenti tra database diversi non sono implementati: \"%s.%s.%s\"" @@ -3977,14 +3809,14 @@ msgstr "lock della relazione \"%s\" fallito" msgid "relation \"%s.%s\" does not exist" msgstr "la relazione \"%s.%s\" non esiste" -#: catalog/namespace.c:405 parser/parse_relation.c:1177 -#: parser/parse_relation.c:1185 +#: catalog/namespace.c:405 parser/parse_relation.c:1171 +#: parser/parse_relation.c:1179 #, c-format msgid "relation \"%s\" does not exist" msgstr "la relazione \"%s\" non esiste" -#: catalog/namespace.c:473 catalog/namespace.c:2949 commands/extension.c:1465 -#: commands/extension.c:1471 +#: catalog/namespace.c:473 catalog/namespace.c:2992 commands/extension.c:1466 +#: commands/extension.c:1472 #, c-format msgid "no schema has been selected to create in" msgstr "nessuna schema selezionato per crearci dentro" @@ -4004,315 +3836,315 @@ msgstr "non si possono creare relazioni temporanee in schemi non temporanei" msgid "only temporary relations may be created in temporary schemas" msgstr "solo relazioni temporanee possono essere create in schemi temporanei" -#: catalog/namespace.c:2139 +#: catalog/namespace.c:2182 #, c-format msgid "statistics object \"%s\" does not exist" msgstr "la statistica \"%s\" non esiste" -#: catalog/namespace.c:2262 +#: catalog/namespace.c:2305 #, c-format msgid "text search parser \"%s\" does not exist" msgstr "l'analizzatore di ricerca di testo \"%s\" non esiste" -#: catalog/namespace.c:2388 +#: catalog/namespace.c:2431 #, c-format msgid "text search dictionary \"%s\" does not exist" msgstr "il dizionario di ricerca di testo \"%s\" non esiste" -#: catalog/namespace.c:2515 +#: catalog/namespace.c:2558 #, c-format msgid "text search template \"%s\" does not exist" msgstr "il modello di ricerca di testo \"%s\" non esiste" -#: catalog/namespace.c:2641 commands/tsearchcmds.c:1185 +#: catalog/namespace.c:2684 commands/tsearchcmds.c:1185 #: utils/cache/ts_cache.c:612 #, c-format msgid "text search configuration \"%s\" does not exist" msgstr "la configurazione di ricerca di testo \"%s\" non esiste" -#: catalog/namespace.c:2754 parser/parse_expr.c:791 parser/parse_target.c:1192 +#: catalog/namespace.c:2797 parser/parse_expr.c:789 parser/parse_target.c:1192 #, c-format msgid "cross-database references are not implemented: %s" msgstr "i riferimenti tra database diversi non sono implementati: %s" -#: catalog/namespace.c:2760 parser/parse_expr.c:798 parser/parse_target.c:1199 -#: gram.y:14320 gram.y:15741 +#: catalog/namespace.c:2803 parser/parse_expr.c:796 parser/parse_target.c:1199 +#: gram.y:14300 gram.y:15721 #, c-format msgid "improper qualified name (too many dotted names): %s" msgstr "nome qualificato improprio (troppi nomi puntati): %s" -#: catalog/namespace.c:2891 +#: catalog/namespace.c:2934 #, c-format msgid "cannot move objects into or out of temporary schemas" msgstr "non posso spostare oggetti dentro o fuori gli schemi temporanei" -#: catalog/namespace.c:2897 +#: catalog/namespace.c:2940 #, c-format msgid "cannot move objects into or out of TOAST schema" msgstr "non posso spostare oggetti dentro o fuori lo schema TOAST" -#: catalog/namespace.c:2970 commands/schemacmds.c:256 commands/schemacmds.c:334 -#: commands/tablecmds.c:872 +#: catalog/namespace.c:3013 commands/schemacmds.c:256 +#: commands/schemacmds.c:334 commands/tablecmds.c:891 #, c-format msgid "schema \"%s\" does not exist" msgstr "lo schema \"%s\" non esiste" -#: catalog/namespace.c:3001 +#: catalog/namespace.c:3044 #, c-format msgid "improper relation name (too many dotted names): %s" msgstr "nome di relazione improprio (troppi nomi puntati): %s" -#: catalog/namespace.c:3511 +#: catalog/namespace.c:3538 #, c-format msgid "collation \"%s\" for encoding \"%s\" does not exist" msgstr "l'ordinamento \"%s\" per la codifica \"%s\" non esiste" -#: catalog/namespace.c:3566 +#: catalog/namespace.c:3593 #, c-format msgid "conversion \"%s\" does not exist" msgstr "la conversione \"%s\" non esiste" -#: catalog/namespace.c:3774 +#: catalog/namespace.c:3801 #, c-format msgid "permission denied to create temporary tables in database \"%s\"" msgstr "permesso di creare tabelle temporanee nel database \"%s\" negato" -#: catalog/namespace.c:3790 +#: catalog/namespace.c:3817 #, c-format msgid "cannot create temporary tables during recovery" msgstr "non è possibile creare tabelle temporanee durante il recupero" -#: catalog/namespace.c:3796 +#: catalog/namespace.c:3823 #, c-format -msgid "cannot create temporary tables in parallel mode" -msgstr "non è possibile creare tabelle temporanee in modalità parallela" +msgid "cannot create temporary tables during a parallel operation" +msgstr "non è possibile creare tabelle temporanee durante un'operazione parallela" -#: catalog/namespace.c:4045 commands/tablespace.c:1169 commands/variable.c:64 -#: utils/misc/guc.c:9979 utils/misc/guc.c:10057 +#: catalog/namespace.c:4072 commands/tablespace.c:1169 commands/variable.c:64 +#: utils/misc/guc.c:9983 utils/misc/guc.c:10061 #, c-format msgid "List syntax is invalid." msgstr "La sintassi della lista non è valida." -#: catalog/objectaddress.c:1238 catalog/pg_publication.c:66 +#: catalog/objectaddress.c:1237 catalog/pg_publication.c:66 #: commands/lockcmds.c:93 commands/policy.c:94 commands/policy.c:391 -#: commands/policy.c:480 commands/tablecmds.c:223 commands/tablecmds.c:265 -#: commands/tablecmds.c:1488 commands/tablecmds.c:4713 -#: commands/tablecmds.c:8801 +#: commands/policy.c:481 commands/tablecmds.c:223 commands/tablecmds.c:265 +#: commands/tablecmds.c:1507 commands/tablecmds.c:4722 +#: commands/tablecmds.c:8823 #, c-format msgid "\"%s\" is not a table" msgstr "\"%s\" non è una tabella" -#: catalog/objectaddress.c:1245 commands/tablecmds.c:235 -#: commands/tablecmds.c:4743 commands/tablecmds.c:13058 commands/view.c:141 +#: catalog/objectaddress.c:1244 commands/tablecmds.c:235 +#: commands/tablecmds.c:4752 commands/tablecmds.c:13098 commands/view.c:141 #, c-format msgid "\"%s\" is not a view" msgstr "\"%s\" non è una vista" -#: catalog/objectaddress.c:1252 commands/matview.c:174 commands/tablecmds.c:241 -#: commands/tablecmds.c:13063 +#: catalog/objectaddress.c:1251 commands/matview.c:174 +#: commands/tablecmds.c:241 commands/tablecmds.c:13103 #, c-format msgid "\"%s\" is not a materialized view" msgstr "\"%s\" non è una vista materializzata" -#: catalog/objectaddress.c:1259 commands/tablecmds.c:259 -#: commands/tablecmds.c:4746 commands/tablecmds.c:13068 +#: catalog/objectaddress.c:1258 commands/tablecmds.c:259 +#: commands/tablecmds.c:4755 commands/tablecmds.c:13108 #, c-format msgid "\"%s\" is not a foreign table" msgstr "\"%s\" non è una tabella esterna" -#: catalog/objectaddress.c:1300 +#: catalog/objectaddress.c:1299 #, c-format msgid "must specify relation and object name" msgstr "occorre specificare tabella e nome dell'oggetto" -#: catalog/objectaddress.c:1376 catalog/objectaddress.c:1429 +#: catalog/objectaddress.c:1375 catalog/objectaddress.c:1428 #, c-format msgid "column name must be qualified" msgstr "il nome della colonna deve essere qualificato" -#: catalog/objectaddress.c:1472 +#: catalog/objectaddress.c:1471 #, c-format msgid "default value for column \"%s\" of relation \"%s\" does not exist" msgstr "il valore di default per la colonna \"%s\" della relazione \"%s\" non esiste" -#: catalog/objectaddress.c:1509 commands/functioncmds.c:128 -#: commands/tablecmds.c:251 commands/typecmds.c:3233 parser/parse_type.c:226 -#: parser/parse_type.c:255 parser/parse_type.c:794 utils/adt/acl.c:4357 +#: catalog/objectaddress.c:1508 commands/functioncmds.c:128 +#: commands/tablecmds.c:251 commands/typecmds.c:3269 parser/parse_type.c:226 +#: parser/parse_type.c:255 parser/parse_type.c:794 utils/adt/acl.c:4359 #, c-format msgid "type \"%s\" does not exist" msgstr "il tipo \"%s\" non esiste" -#: catalog/objectaddress.c:1626 +#: catalog/objectaddress.c:1625 #, c-format msgid "operator %d (%s, %s) of %s does not exist" msgstr "l'operatore %d (%s, %s) di %s non esiste" -#: catalog/objectaddress.c:1657 +#: catalog/objectaddress.c:1656 #, c-format msgid "function %d (%s, %s) of %s does not exist" msgstr "la funzione %d (%s, %s) di %s non esiste" -#: catalog/objectaddress.c:1708 catalog/objectaddress.c:1734 +#: catalog/objectaddress.c:1707 catalog/objectaddress.c:1733 #, c-format msgid "user mapping for user \"%s\" on server \"%s\" does not exist" msgstr "la mappatura per l'utente \"%s\" sul server \"%s\" non esiste" -#: catalog/objectaddress.c:1723 commands/foreigncmds.c:428 +#: catalog/objectaddress.c:1722 commands/foreigncmds.c:428 #: commands/foreigncmds.c:1004 commands/foreigncmds.c:1377 #: foreign/foreign.c:688 #, c-format msgid "server \"%s\" does not exist" msgstr "il server \"%s\" non esiste" -#: catalog/objectaddress.c:1790 +#: catalog/objectaddress.c:1789 #, c-format msgid "publication relation \"%s\" in publication \"%s\" does not exist" msgstr "la tabella \"%s\" nella pubblicazione \"%s\" non esiste" -#: catalog/objectaddress.c:1852 +#: catalog/objectaddress.c:1851 #, c-format -msgid "unrecognized default ACL object type %c" -msgstr "tipo di oggetto ACL di default %c non riconosciuto" +msgid "unrecognized default ACL object type \"%c\"" +msgstr "tipo di oggetto ACL di default \"%c\" non riconosciuto" -#: catalog/objectaddress.c:1853 +#: catalog/objectaddress.c:1852 #, c-format -msgid "Valid object types are \"r\", \"S\", \"f\", \"T\" and \"s\"." -msgstr "I tipi di oggetti validi sono \"r\", \"S\", \"f\", \"T\" e \"s\"." +msgid "Valid object types are \"%c\", \"%c\", \"%c\", \"%c\", \"%c\"." +msgstr "Gli oggetti validi sono \"%c\", \"%c\", \"%c\", \"%c\", \"%c\"." -#: catalog/objectaddress.c:1899 +#: catalog/objectaddress.c:1903 #, c-format msgid "default ACL for user \"%s\" in schema \"%s\" on %s does not exist" msgstr "l'ACL di default per l'utente \"%s\" nello schema \"%s\" su %s non esiste" -#: catalog/objectaddress.c:1904 +#: catalog/objectaddress.c:1908 #, c-format msgid "default ACL for user \"%s\" on %s does not exist" msgstr "l'ACL di default per l'utente \"%s\" su %s non esiste" -#: catalog/objectaddress.c:1931 catalog/objectaddress.c:1989 -#: catalog/objectaddress.c:2044 +#: catalog/objectaddress.c:1935 catalog/objectaddress.c:1993 +#: catalog/objectaddress.c:2048 #, c-format msgid "name or argument lists may not contain nulls" msgstr "il nome o la lista di argomenti non può contenere valori nulli" -#: catalog/objectaddress.c:1965 +#: catalog/objectaddress.c:1969 #, c-format msgid "unsupported object type \"%s\"" msgstr "tipo di oggetto \"%s\" non supportato" -#: catalog/objectaddress.c:1985 catalog/objectaddress.c:2003 -#: catalog/objectaddress.c:2141 +#: catalog/objectaddress.c:1989 catalog/objectaddress.c:2007 +#: catalog/objectaddress.c:2145 #, c-format msgid "name list length must be exactly %d" msgstr "la lunghezza della lista dei nomi dev'essere %d" -#: catalog/objectaddress.c:2007 +#: catalog/objectaddress.c:2011 #, c-format msgid "large object OID may not be null" msgstr "l'OID di large object non può essere nullo" -#: catalog/objectaddress.c:2016 catalog/objectaddress.c:2077 -#: catalog/objectaddress.c:2084 +#: catalog/objectaddress.c:2020 catalog/objectaddress.c:2081 +#: catalog/objectaddress.c:2088 #, c-format msgid "name list length must be at least %d" msgstr "la lunghezza della lista dei nomi deve essere almeno %d" -#: catalog/objectaddress.c:2070 catalog/objectaddress.c:2090 +#: catalog/objectaddress.c:2074 catalog/objectaddress.c:2094 #, c-format msgid "argument list length must be exactly %d" msgstr "la lunghezza della lista degli argomenti deve essere %d" -#: catalog/objectaddress.c:2316 libpq/be-fsstubs.c:350 +#: catalog/objectaddress.c:2320 libpq/be-fsstubs.c:350 #, c-format msgid "must be owner of large object %u" msgstr "occorre essere proprietari del large object %u" -#: catalog/objectaddress.c:2331 commands/functioncmds.c:1420 +#: catalog/objectaddress.c:2335 commands/functioncmds.c:1440 #, c-format msgid "must be owner of type %s or type %s" msgstr "occorre essere proprietari del tipo %s o del tipo %s" -#: catalog/objectaddress.c:2381 catalog/objectaddress.c:2398 +#: catalog/objectaddress.c:2385 catalog/objectaddress.c:2402 #, c-format msgid "must be superuser" msgstr "occorre essere superutenti" -#: catalog/objectaddress.c:2388 +#: catalog/objectaddress.c:2392 #, c-format msgid "must have CREATEROLE privilege" msgstr "occorre avere privilegio CREATEROLE" -#: catalog/objectaddress.c:2467 +#: catalog/objectaddress.c:2471 #, c-format msgid "unrecognized object type \"%s\"" msgstr "tipo di oggetto \"%s\" non riconosciuto" -#: catalog/objectaddress.c:2662 +#: catalog/objectaddress.c:2666 #, c-format msgid " column %s" msgstr " colonna %s" -#: catalog/objectaddress.c:2668 +#: catalog/objectaddress.c:2672 #, c-format msgid "function %s" msgstr "funzione %s" -#: catalog/objectaddress.c:2673 +#: catalog/objectaddress.c:2677 #, c-format msgid "type %s" msgstr "tipo %s" -#: catalog/objectaddress.c:2703 +#: catalog/objectaddress.c:2707 #, c-format msgid "cast from %s to %s" msgstr "conversione da %s a %s" -#: catalog/objectaddress.c:2723 +#: catalog/objectaddress.c:2727 #, c-format msgid "collation %s" msgstr "ordinamento %s" -#: catalog/objectaddress.c:2747 +#: catalog/objectaddress.c:2751 #, c-format msgid "constraint %s on %s" msgstr "vincolo %s su %s" -#: catalog/objectaddress.c:2753 +#: catalog/objectaddress.c:2757 #, c-format msgid "constraint %s" msgstr "vincolo %s" -#: catalog/objectaddress.c:2770 +#: catalog/objectaddress.c:2774 #, c-format msgid "conversion %s" msgstr "conversione %s" -#: catalog/objectaddress.c:2807 +#: catalog/objectaddress.c:2811 #, c-format msgid "default for %s" msgstr "predefinito per %s" -#: catalog/objectaddress.c:2816 +#: catalog/objectaddress.c:2820 #, c-format msgid "language %s" msgstr "linguaggio %s" -#: catalog/objectaddress.c:2821 +#: catalog/objectaddress.c:2825 #, c-format msgid "large object %u" msgstr "large object %u" -#: catalog/objectaddress.c:2826 +#: catalog/objectaddress.c:2830 #, c-format msgid "operator %s" msgstr "operatore %s" -#: catalog/objectaddress.c:2858 +#: catalog/objectaddress.c:2862 #, c-format msgid "operator class %s for access method %s" msgstr "classe di operatori %s per il metodo di accesso %s" -#: catalog/objectaddress.c:2881 +#: catalog/objectaddress.c:2885 #, c-format msgid "access method %s" msgstr "metodo di accesso %s" @@ -4321,7 +4153,7 @@ msgstr "metodo di accesso %s" #. first two %s's are data type names, the third %s is the #. description of the operator family, and the last %s is the #. textual form of the operator with arguments. -#: catalog/objectaddress.c:2923 +#: catalog/objectaddress.c:2927 #, c-format msgid "operator %d (%s, %s) of %s: %s" msgstr "operatore %d (%s, %s) della %s: %s" @@ -4330,220 +4162,226 @@ msgstr "operatore %d (%s, %s) della %s: %s" #. are data type names, the third %s is the description of the #. operator family, and the last %s is the textual form of the #. function with arguments. -#: catalog/objectaddress.c:2973 +#: catalog/objectaddress.c:2977 #, c-format msgid "function %d (%s, %s) of %s: %s" msgstr "funzione %d (%s, %s) della %s: %s" -#: catalog/objectaddress.c:3013 +#: catalog/objectaddress.c:3017 #, c-format msgid "rule %s on " msgstr "regola %s on " -#: catalog/objectaddress.c:3048 +#: catalog/objectaddress.c:3052 #, c-format msgid "trigger %s on " msgstr "trigger %s su " -#: catalog/objectaddress.c:3065 +#: catalog/objectaddress.c:3069 #, c-format msgid "schema %s" msgstr "schema %s" -#: catalog/objectaddress.c:3082 +#: catalog/objectaddress.c:3086 #, c-format msgid "statistics object %s" msgstr "statistiche %s" -#: catalog/objectaddress.c:3098 +#: catalog/objectaddress.c:3102 #, c-format msgid "text search parser %s" msgstr "analizzatore di ricerca di testo %s" -#: catalog/objectaddress.c:3113 +#: catalog/objectaddress.c:3117 #, c-format msgid "text search dictionary %s" msgstr "dizionario di ricerca di testo %s" -#: catalog/objectaddress.c:3128 +#: catalog/objectaddress.c:3132 #, c-format msgid "text search template %s" msgstr "modello di ricerca di testo %s" -#: catalog/objectaddress.c:3143 +#: catalog/objectaddress.c:3147 #, c-format msgid "text search configuration %s" msgstr "configurazione di ricerca di testo %s" -#: catalog/objectaddress.c:3151 +#: catalog/objectaddress.c:3155 #, c-format msgid "role %s" msgstr "regola %s" -#: catalog/objectaddress.c:3164 +#: catalog/objectaddress.c:3168 #, c-format msgid "database %s" msgstr "database %s" -#: catalog/objectaddress.c:3176 +#: catalog/objectaddress.c:3180 #, c-format msgid "tablespace %s" msgstr "tablespace %s" -#: catalog/objectaddress.c:3185 +#: catalog/objectaddress.c:3189 #, c-format msgid "foreign-data wrapper %s" msgstr "wrapper di dati esterni %s" -#: catalog/objectaddress.c:3194 +#: catalog/objectaddress.c:3198 #, c-format msgid "server %s" msgstr "server %s" -#: catalog/objectaddress.c:3222 +#: catalog/objectaddress.c:3226 #, c-format msgid "user mapping for %s on server %s" msgstr "mappatura utenti per %s sul server %s" -#: catalog/objectaddress.c:3257 +#: catalog/objectaddress.c:3261 #, c-format msgid "default privileges on new relations belonging to role %s" msgstr "privilegi predefiniti sulle nuove relazioni appartenenti al ruolo %s" -#: catalog/objectaddress.c:3262 +#: catalog/objectaddress.c:3266 #, c-format msgid "default privileges on new sequences belonging to role %s" msgstr "privilegi predefiniti sulle nuove sequenze appartenenti al ruolo %s" -#: catalog/objectaddress.c:3267 +#: catalog/objectaddress.c:3271 #, c-format msgid "default privileges on new functions belonging to role %s" msgstr "privilegi predefiniti sulle nuove funzioni appartenenti al ruolo %s" -#: catalog/objectaddress.c:3272 +#: catalog/objectaddress.c:3276 #, c-format msgid "default privileges on new types belonging to role %s" msgstr "privilegi predefiniti sui nuovi tipi appartenenti al ruolo %s" -#: catalog/objectaddress.c:3277 +#: catalog/objectaddress.c:3281 #, c-format msgid "default privileges on new schemas belonging to role %s" msgstr "privilegi predefiniti sui nuovi schemi appartenenti al ruolo %s" -#: catalog/objectaddress.c:3283 +#: catalog/objectaddress.c:3287 #, c-format msgid "default privileges belonging to role %s" msgstr "privilegi predefiniti appartenenti al ruolo %s" -#: catalog/objectaddress.c:3291 +#: catalog/objectaddress.c:3295 #, c-format msgid " in schema %s" msgstr " nello schema %s" -#: catalog/objectaddress.c:3308 +#: catalog/objectaddress.c:3312 #, c-format msgid "extension %s" msgstr "estensione %s" -#: catalog/objectaddress.c:3321 +#: catalog/objectaddress.c:3325 #, c-format msgid "event trigger %s" msgstr "trigger di evento %s" -#: catalog/objectaddress.c:3353 +#: catalog/objectaddress.c:3357 #, c-format msgid "policy %s on " msgstr "regola di sicurezza %s su " -#: catalog/objectaddress.c:3364 +#: catalog/objectaddress.c:3368 #, c-format msgid "publication %s" msgstr "pubblicazione %s" -#: catalog/objectaddress.c:3384 +#: catalog/objectaddress.c:3388 #, c-format msgid "publication table %s in publication %s" msgstr "tabella %s nella pubblicazione %s" -#: catalog/objectaddress.c:3392 +#: catalog/objectaddress.c:3396 #, c-format msgid "subscription %s" msgstr "sottoscrizione %s" -#: catalog/objectaddress.c:3410 +#: catalog/objectaddress.c:3414 #, c-format msgid "transform for %s language %s" msgstr "trasformazione per %s linguaggio %s" -#: catalog/objectaddress.c:3471 +#: catalog/objectaddress.c:3475 #, c-format msgid "table %s" msgstr "tabella %s" -#: catalog/objectaddress.c:3475 +#: catalog/objectaddress.c:3479 #, c-format msgid "index %s" msgstr "indice %s" -#: catalog/objectaddress.c:3479 +#: catalog/objectaddress.c:3483 #, c-format msgid "sequence %s" msgstr "sequenza %s" -#: catalog/objectaddress.c:3483 +#: catalog/objectaddress.c:3487 #, c-format msgid "toast table %s" msgstr "tabella toast %s" -#: catalog/objectaddress.c:3487 +#: catalog/objectaddress.c:3491 #, c-format msgid "view %s" msgstr "vista %s" -#: catalog/objectaddress.c:3491 +#: catalog/objectaddress.c:3495 #, c-format msgid "materialized view %s" msgstr "vista materializzata %s" -#: catalog/objectaddress.c:3495 +#: catalog/objectaddress.c:3499 #, c-format msgid "composite type %s" msgstr "tipo composito %s" -#: catalog/objectaddress.c:3499 +#: catalog/objectaddress.c:3503 #, c-format msgid "foreign table %s" msgstr "tabella esterna %s" -#: catalog/objectaddress.c:3504 +#: catalog/objectaddress.c:3508 #, c-format msgid "relation %s" msgstr "relazione %s" -#: catalog/objectaddress.c:3541 +#: catalog/objectaddress.c:3545 #, c-format msgid "operator family %s for access method %s" msgstr "famiglia di operatori %s per il metodo d'accesso %s" -#: catalog/objectaddress.c:4910 +#: catalog/objectaddress.c:4914 #, c-format msgid "%s in publication %s" msgstr "%s nella pubblicazione %s" -#: catalog/partition.c:741 +#: catalog/partition.c:728 #, c-format -msgid "cannot create range partition with empty range" -msgstr "non è possibile creare una partizione su un intervallo vuoto" +msgid "empty range bound specified for partition \"%s\"" +msgstr "intervallo vuoto specificato come limite per la partizione \"%s\"" -#: catalog/partition.c:835 +#: catalog/partition.c:730 +#, c-format +msgid "Specified lower bound %s is greater than or equal to upper bound %s." +msgstr "Il limite inferiore specificato %s è maggiore o uguale al limite superiore %s." + +#: catalog/partition.c:814 #, c-format msgid "partition \"%s\" would overlap partition \"%s\"" msgstr "la partizione \"%s\" si sovrapporrebbe a \"%s\"" -#: catalog/partition.c:943 catalog/partition.c:1121 commands/analyze.c:1446 -#: commands/tablecmds.c:8863 executor/execExprInterp.c:2837 -#: executor/execMain.c:1928 executor/execMain.c:1975 executor/execMain.c:2017 -#: executor/execMain.c:3279 +#: catalog/partition.c:927 catalog/partition.c:1110 commands/analyze.c:1462 +#: commands/copy.c:2510 commands/tablecmds.c:8885 +#: executor/execExprInterp.c:2853 executor/execMain.c:1907 +#: executor/execMain.c:1985 executor/execMain.c:2033 executor/execMain.c:2143 +#: executor/execMain.c:3322 executor/nodeModifyTable.c:1533 msgid "could not convert row type" msgstr "conversione del tipo riga fallita" @@ -4589,7 +4427,7 @@ msgstr "non si può omettere initval quando la funzione di transizione è strict msgid "return type of inverse transition function %s is not %s" msgstr "il tipo restituito dalla funzione di transizione inversa %s non è %s" -#: catalog/pg_aggregate.c:351 executor/nodeWindowAgg.c:2294 +#: catalog/pg_aggregate.c:351 executor/nodeWindowAgg.c:2298 #, c-format msgid "strictness of aggregate's forward and inverse transition functions must match" msgstr "le ristrettezze della trasformazione diretta ed inversa di un aggregato devono combaciare" @@ -4606,8 +4444,8 @@ msgstr "il tipo restituito dalla funzione di combinazione %s non è %s" #: catalog/pg_aggregate.c:436 #, c-format -msgid "combine function with \"%s\" transition type must not be declared STRICT" -msgstr "una funzione di combinazione con \"%s\" tipi di transizione non può essere dichiarata STRICT" +msgid "combine function with transition type %s must not be declared STRICT" +msgstr "la funzione di combinazione con il tipo di transizione %s non deve essere dichiarata STRICT" #: catalog/pg_aggregate.c:455 #, c-format @@ -4649,12 +4487,12 @@ msgstr "l'implementazione dell'aggregazione mobile restituisce il tipo %s ma l'i msgid "sort operator can only be specified for single-argument aggregates" msgstr "l'operatore di ordinamento può essere specificato sono per aggregati con un solo argomento" -#: catalog/pg_aggregate.c:810 commands/typecmds.c:1698 commands/typecmds.c:1749 -#: commands/typecmds.c:1780 commands/typecmds.c:1803 commands/typecmds.c:1824 -#: commands/typecmds.c:1851 commands/typecmds.c:1878 commands/typecmds.c:1955 -#: commands/typecmds.c:1997 parser/parse_func.c:365 parser/parse_func.c:394 -#: parser/parse_func.c:419 parser/parse_func.c:433 parser/parse_func.c:508 -#: parser/parse_func.c:519 parser/parse_func.c:1958 +#: catalog/pg_aggregate.c:810 commands/typecmds.c:1719 +#: commands/typecmds.c:1770 commands/typecmds.c:1801 commands/typecmds.c:1824 +#: commands/typecmds.c:1845 commands/typecmds.c:1872 commands/typecmds.c:1899 +#: commands/typecmds.c:1976 commands/typecmds.c:2018 parser/parse_func.c:369 +#: parser/parse_func.c:398 parser/parse_func.c:423 parser/parse_func.c:437 +#: parser/parse_func.c:512 parser/parse_func.c:523 parser/parse_func.c:1977 #, c-format msgid "function %s does not exist" msgstr "la funzione %s non esiste" @@ -4674,22 +4512,22 @@ msgstr "la funzione %s deve accettare VARIADIC ANY per essere usata in questo ag msgid "function %s requires run-time type coercion" msgstr "la funzione %s richiede una coercizione di tipo a run-time" -#: catalog/pg_collation.c:85 catalog/pg_collation.c:127 +#: catalog/pg_collation.c:93 catalog/pg_collation.c:140 #, c-format msgid "collation \"%s\" already exists, skipping" msgstr "l'ordinamento \"%s\" esiste già, saltato" -#: catalog/pg_collation.c:87 +#: catalog/pg_collation.c:95 #, c-format msgid "collation \"%s\" for encoding \"%s\" already exists, skipping" msgstr "l'ordinamento \"%s\" per l'encoding \"%s\" esiste già, saltato" -#: catalog/pg_collation.c:95 catalog/pg_collation.c:134 +#: catalog/pg_collation.c:103 catalog/pg_collation.c:147 #, c-format msgid "collation \"%s\" already exists" msgstr "l'ordinamento \"%s\" esiste già" -#: catalog/pg_collation.c:97 +#: catalog/pg_collation.c:105 #, c-format msgid "collation \"%s\" for encoding \"%s\" already exists" msgstr "l'ordinamento \"%s\" per la codifica \"%s\" esiste già" @@ -4699,25 +4537,25 @@ msgstr "l'ordinamento \"%s\" per la codifica \"%s\" esiste già" msgid "constraint \"%s\" for domain %s already exists" msgstr "il vincolo \"%s\" per il dominio %s esiste già" -#: catalog/pg_constraint.c:788 +#: catalog/pg_constraint.c:788 catalog/pg_constraint.c:864 #, c-format msgid "table \"%s\" has multiple constraints named \"%s\"" msgstr "la tabella \"%s\" ha più di un vincolo di nome \"%s\"" -#: catalog/pg_constraint.c:800 +#: catalog/pg_constraint.c:800 catalog/pg_constraint.c:898 #, c-format msgid "constraint \"%s\" for table \"%s\" does not exist" msgstr "il vincolo \"%s\" per la tabella \"%s\" non esiste" -#: catalog/pg_constraint.c:846 +#: catalog/pg_constraint.c:944 #, c-format -msgid "domain \"%s\" has multiple constraints named \"%s\"" +msgid "domain %s has multiple constraints named \"%s\"" msgstr "il dominio \"%s\" ha più di un vincolo di nome \"%s\"" -#: catalog/pg_constraint.c:858 +#: catalog/pg_constraint.c:956 #, c-format -msgid "constraint \"%s\" for domain \"%s\" does not exist" -msgstr "il vincolo \"%s\" per la il dominio \"%s\" non esiste" +msgid "constraint \"%s\" for domain %s does not exist" +msgstr "il vincolo \"%s\" per il dominio %s non esiste" #: catalog/pg_conversion.c:66 #, c-format @@ -4729,7 +4567,7 @@ msgstr "la conversione \"%s\" esiste già" msgid "default conversion for %s to %s already exists" msgstr "la conversione predefinita da %s a %s esiste già" -#: catalog/pg_depend.c:163 commands/extension.c:3216 +#: catalog/pg_depend.c:163 commands/extension.c:3218 #, c-format msgid "%s is already a member of extension \"%s\"" msgstr "%s fa già parte dell'estensione \"%s\"" @@ -4839,7 +4677,7 @@ msgstr "l'operatore %s esiste già " msgid "operator cannot be its own negator or sort operator" msgstr "l'operatore non può negare o ordinare se stesso" -#: catalog/pg_proc.c:131 parser/parse_func.c:1982 parser/parse_func.c:2022 +#: catalog/pg_proc.c:131 parser/parse_func.c:2001 parser/parse_func.c:2041 #, c-format msgid "functions cannot have more than %d argument" msgid_plural "functions cannot have more than %d arguments" @@ -4932,12 +4770,12 @@ msgstr "Le funzioni SQL non possono restituire il tipo %s" msgid "SQL functions cannot have arguments of type %s" msgstr "le funzioni SQL non possono avere argomenti di tipo %s" -#: catalog/pg_proc.c:968 executor/functions.c:1428 +#: catalog/pg_proc.c:968 executor/functions.c:1429 #, c-format msgid "SQL function \"%s\"" msgstr "funzione SQL \"%s\"" -#: catalog/pg_publication.c:57 commands/trigger.c:194 commands/trigger.c:360 +#: catalog/pg_publication.c:57 commands/trigger.c:197 #, c-format msgid "\"%s\" is a partitioned table" msgstr "\"%s\" è una tabella partizionata" @@ -4977,12 +4815,12 @@ msgstr "la tabella \"%s\" non può essere replicata" msgid "Temporary and unlogged relations cannot be replicated." msgstr "Le relazioni temporanee e non loggate non possono essere replicate." -#: catalog/pg_publication.c:142 +#: catalog/pg_publication.c:166 #, c-format msgid "relation \"%s\" is already member of publication \"%s\"" msgstr "la relazione \"%s\" è già membra della pubblicazione \"%s\"" -#: catalog/pg_publication.c:369 catalog/pg_publication.c:390 +#: catalog/pg_publication.c:393 catalog/pg_publication.c:414 #: commands/publicationcmds.c:401 commands/publicationcmds.c:702 #, c-format msgid "publication \"%s\" does not exist" @@ -5051,8 +4889,8 @@ msgstr "non è possibile eliminare oggetti di proprietà di %s perché richiesti msgid "cannot reassign ownership of objects owned by %s because they are required by the database system" msgstr "non è possibile modificare il proprietario degli oggetti di proprietà di %s perché richiesti dal database" -#: catalog/pg_subscription.c:174 commands/subscriptioncmds.c:622 -#: commands/subscriptioncmds.c:826 commands/subscriptioncmds.c:1026 +#: catalog/pg_subscription.c:176 commands/subscriptioncmds.c:633 +#: commands/subscriptioncmds.c:843 commands/subscriptioncmds.c:1067 #, c-format msgid "subscription \"%s\" does not exist" msgstr "la sottoscrizione \"%s\" non esiste" @@ -5088,13 +4926,13 @@ msgstr "l'allineamento \"%c\" non è valido per il tipi a lunghezza variabile" msgid "fixed-size types must have storage PLAIN" msgstr "i tipi a dimensione fissa devono avere immagazzinamento PLAIN" -#: catalog/pg_type.c:781 +#: catalog/pg_type.c:801 #, c-format msgid "could not form array type name for type \"%s\"" msgstr "creazione del nome per il tipo array del tipo \"%s\" fallita" -#: catalog/toasting.c:105 commands/indexcmds.c:395 commands/tablecmds.c:4725 -#: commands/tablecmds.c:12946 +#: catalog/toasting.c:105 commands/indexcmds.c:399 commands/tablecmds.c:4734 +#: commands/tablecmds.c:12986 #, c-format msgid "\"%s\" is not a table or materialized view" msgstr "\"%s\" non è una tabella né una vista materializzata" @@ -5214,7 +5052,7 @@ msgstr "il linguaggio \"%s\" esiste già" msgid "publication \"%s\" already exists" msgstr "la pubblicazione \"%s\" esiste già" -#: commands/alter.c:99 commands/subscriptioncmds.c:343 +#: commands/alter.c:99 commands/subscriptioncmds.c:358 #, c-format msgid "subscription \"%s\" already exists" msgstr "la sottoscrizione \"%s\" esiste già" @@ -5279,7 +5117,7 @@ msgstr "il metodo di accesso \"%s\" esiste già" msgid "must be superuser to drop access methods" msgstr "occorre essere un superutente per eliminare un metodo di accesso" -#: commands/amcmds.c:174 commands/indexcmds.c:163 commands/indexcmds.c:502 +#: commands/amcmds.c:174 commands/indexcmds.c:163 commands/indexcmds.c:515 #: commands/opclasscmds.c:363 commands/opclasscmds.c:777 #, c-format msgid "access method \"%s\" does not exist" @@ -5292,107 +5130,112 @@ msgstr "funzione handler non specificata" #: commands/amcmds.c:262 commands/event_trigger.c:243 #: commands/foreigncmds.c:487 commands/proclang.c:117 commands/proclang.c:289 -#: commands/trigger.c:557 parser/parse_clause.c:985 +#: commands/trigger.c:616 parser/parse_clause.c:982 #, c-format msgid "function %s must return type %s" msgstr "la funzione %s deve restituire il tipo %s" -#: commands/analyze.c:151 +#: commands/analyze.c:156 #, c-format msgid "skipping analyze of \"%s\" --- lock not available" msgstr "analisi di \"%s\" saltata --- lock non disponibile" -#: commands/analyze.c:168 +#: commands/analyze.c:173 #, c-format msgid "skipping \"%s\" --- only superuser can analyze it" msgstr "\"%s\" saltato --- solo un superutente può analizzarlo" -#: commands/analyze.c:172 +#: commands/analyze.c:177 #, c-format msgid "skipping \"%s\" --- only superuser or database owner can analyze it" msgstr "\"%s\" saltato --- solo un superutente o il proprietario del database possono analizzarlo." -#: commands/analyze.c:176 +#: commands/analyze.c:181 #, c-format msgid "skipping \"%s\" --- only table or database owner can analyze it" msgstr "\"%s\" saltato --- solo il proprietario del database o della tabella possono analizzarlo" -#: commands/analyze.c:236 +#: commands/analyze.c:241 #, c-format msgid "skipping \"%s\" --- cannot analyze this foreign table" msgstr "\"%s\" saltato --- non è possibile analizzare questa tabella esterna" -#: commands/analyze.c:253 +#: commands/analyze.c:258 #, c-format msgid "skipping \"%s\" --- cannot analyze non-tables or special system tables" msgstr "\"%s\" saltato --- non è possibile analizzare non-tabelle o le tabelle speciali di sistema" -#: commands/analyze.c:334 +#: commands/analyze.c:339 #, c-format msgid "analyzing \"%s.%s\" inheritance tree" msgstr "analisi dell'albero di ereditarietà di \"%s.%s\"" -#: commands/analyze.c:339 +#: commands/analyze.c:344 #, c-format msgid "analyzing \"%s.%s\"" msgstr "analisi di \"%s.%s\"" -#: commands/analyze.c:668 +#: commands/analyze.c:404 +#, c-format +msgid "column \"%s\" of relation \"%s\" appears more than once" +msgstr "la colonna \"%s\" della relazione \"%s\" è specificata più di una volta" + +#: commands/analyze.c:684 #, c-format msgid "automatic analyze of table \"%s.%s.%s\" system usage: %s" msgstr "analisi automatica della tabella \"%s.%s.%s\" uso del sistema: %s" -#: commands/analyze.c:1220 +#: commands/analyze.c:1236 #, c-format msgid "\"%s\": scanned %d of %u pages, containing %.0f live rows and %.0f dead rows; %d rows in sample, %.0f estimated total rows" msgstr "\"%s\": esaminate %d pagine su %u, contenenti %.0f righe vive e %.0f righe morte; %d righe nel campione, %.0f righe totali stimate" -#: commands/analyze.c:1300 +#: commands/analyze.c:1316 #, c-format msgid "skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree contains no child tables" msgstr "analyze dell'albero di ereditarietà \"%s.%s\" saltato --- questo albero non ha tabelle figlie" -#: commands/analyze.c:1398 +#: commands/analyze.c:1414 #, c-format msgid "skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree contains no analyzable child tables" msgstr "analyze dell'albero di ereditarietà \"%s.%s\" saltato --- questo albero non ha tabelle figlie analizzabili" -#: commands/async.c:555 +#: commands/async.c:558 #, c-format msgid "channel name cannot be empty" msgstr "Il nome del canale non può essere vuoto" -#: commands/async.c:560 +#: commands/async.c:563 #, c-format msgid "channel name too long" msgstr "il nome del canale è troppo lungo" -#: commands/async.c:567 +#: commands/async.c:570 #, c-format msgid "payload string too long" msgstr "la stringa del carico è troppo lunga" -#: commands/async.c:753 +#: commands/async.c:756 #, c-format msgid "cannot PREPARE a transaction that has executed LISTEN, UNLISTEN, or NOTIFY" msgstr "non è possibile eseguire PREPARE in una transazione che ha eseguito LISTEN, UNLISTEN o NOTIFY" -#: commands/async.c:856 +#: commands/async.c:859 #, c-format msgid "too many notifications in the NOTIFY queue" msgstr "troppe notifiche nella coda di NOTIFY" -#: commands/async.c:1486 +#: commands/async.c:1491 #, c-format msgid "NOTIFY queue is %.0f%% full" msgstr "la coda di NOTIFY è piena al %.0f%%" -#: commands/async.c:1488 +#: commands/async.c:1493 #, c-format msgid "The server process with PID %d is among those with the oldest transactions." msgstr "Il processo server con PID %d è tra quelli con le transazioni più vecchie." -#: commands/async.c:1491 +#: commands/async.c:1496 #, c-format msgid "The NOTIFY queue cannot be emptied until that process ends its current transaction." msgstr "La coda di NOTIFY non può essere svuotata finché quel processo non avrà terminato la sua transazione corrente." @@ -5407,7 +5250,8 @@ msgstr "non è possibile raggruppare tabelle temporanee di altre sessioni" msgid "there is no previously clustered index for table \"%s\"" msgstr "non esiste un indice già raggruppato per la tabella \"%s\"" -#: commands/cluster.c:173 commands/tablecmds.c:10176 commands/tablecmds.c:12039 +#: commands/cluster.c:173 commands/tablecmds.c:10198 +#: commands/tablecmds.c:12079 #, c-format msgid "index \"%s\" for table \"%s\" does not exist" msgstr "l'indice \"%s\" per la tabella \"%s\" non esiste" @@ -5422,7 +5266,7 @@ msgstr "non è possibile raggruppare un catalogo condiviso" msgid "cannot vacuum temporary tables of other sessions" msgstr "non è possibile ripulire tabelle temporanee di altre sessioni" -#: commands/cluster.c:431 commands/tablecmds.c:12049 +#: commands/cluster.c:431 commands/tablecmds.c:12089 #, c-format msgid "\"%s\" is not an index for table \"%s\"" msgstr "\"%s\" non è un indice per la tabella \"%s\"" @@ -5452,7 +5296,7 @@ msgstr "raggruppamento di \"%s.%s\" usando una scansione sull'indice \"%s\"" msgid "clustering \"%s.%s\" using sequential scan and sort" msgstr "raggruppamento di \"%s.%s\" usando una scansione sequenziale e ordinamento" -#: commands/cluster.c:929 commands/vacuumlazy.c:491 +#: commands/cluster.c:929 commands/vacuumlazy.c:490 #, c-format msgid "vacuuming \"%s.%s\"" msgstr "pulizia di \"%s.%s\"" @@ -5471,81 +5315,71 @@ msgstr "" "%.0f versioni di riga morte non possono ancora essere rimosse.\n" "%s." -#: commands/collationcmds.c:93 +#: commands/collationcmds.c:101 #, c-format msgid "collation attribute \"%s\" not recognized" msgstr "attributo dell'ordinamento \"%s\" non riconosciuto" -#: commands/collationcmds.c:152 +#: commands/collationcmds.c:143 +#, c-format +msgid "collation \"default\" cannot be copied" +msgstr "l'ordinamento \"default\" non può essere copiato" + +#: commands/collationcmds.c:173 #, c-format msgid "unrecognized collation provider: %s" msgstr "fornitore di ordinamenti non riconosciuto: %s" -#: commands/collationcmds.c:161 +#: commands/collationcmds.c:182 #, c-format msgid "parameter \"lc_collate\" must be specified" msgstr "il parametro \"lc_collate\" deve essere specificato" -#: commands/collationcmds.c:166 +#: commands/collationcmds.c:187 #, c-format msgid "parameter \"lc_ctype\" must be specified" msgstr "il parametro \"lc_ctype\" deve essere specificato" -#: commands/collationcmds.c:217 +#: commands/collationcmds.c:246 #, c-format msgid "collation \"%s\" for encoding \"%s\" already exists in schema \"%s\"" msgstr "l'ordinamento \"%s\" per la codifica \"%s\" già esiste nello schema \"%s\"" -#: commands/collationcmds.c:228 +#: commands/collationcmds.c:257 #, c-format msgid "collation \"%s\" already exists in schema \"%s\"" msgstr "l'ordinamento \"%s\" già esiste nello schema \"%s\"" -#: commands/collationcmds.c:276 +#: commands/collationcmds.c:305 #, c-format msgid "changing version from %s to %s" msgstr "cambio della versione da %s a %s" -#: commands/collationcmds.c:291 +#: commands/collationcmds.c:320 #, c-format msgid "version has not changed" msgstr "la versione non è cambiata" -#: commands/collationcmds.c:382 +#: commands/collationcmds.c:451 #, c-format msgid "could not convert locale name \"%s\" to language tag: %s" msgstr "conversione del nome di locale \"%s\" in tag di linguaggio fallita: %s" -#: commands/collationcmds.c:401 -#, c-format -msgid "could get display name for locale \"%s\": %s" -msgstr "lettura del nome da mostrare per il locale \"%s\" fallita: %s" - -#: commands/collationcmds.c:432 +#: commands/collationcmds.c:512 #, c-format msgid "must be superuser to import system collations" msgstr "solo un superutente può importare gli ordinamenti di sistema" -#: commands/collationcmds.c:439 commands/copy.c:1829 commands/copy.c:3026 +#: commands/collationcmds.c:535 commands/copy.c:1807 commands/copy.c:3130 #, c-format msgid "could not execute command \"%s\": %m" msgstr "esecuzione del comando \"%s\" fallita: %m" -#: commands/collationcmds.c:536 +#: commands/collationcmds.c:666 #, c-format msgid "no usable system locales were found" msgstr "non è stato trovato nessun locale di sistema utilizzabile" -#: commands/collationcmds.c:544 utils/mb/encnames.c:473 -#, c-format -msgid "encoding \"%s\" not supported by ICU" -msgstr "codifica \"%s\" non supportata da ICU" - -#: commands/collationcmds.c:588 commands/collationcmds.c:609 -#, c-format -msgid "could not get keyword values for locale \"%s\": %s" -msgstr "errore nella lettura dei valori chiave per il locale \"%s\": %s" - #: commands/comment.c:61 commands/dbcommands.c:808 commands/dbcommands.c:996 #: commands/dbcommands.c:1100 commands/dbcommands.c:1290 #: commands/dbcommands.c:1513 commands/dbcommands.c:1627 @@ -5555,17 +5389,17 @@ msgstr "errore nella lettura dei valori chiave per il locale \"%s\": %s" msgid "database \"%s\" does not exist" msgstr "il database \"%s\" non esiste" -#: commands/comment.c:101 commands/seclabel.c:117 parser/parse_utilcmd.c:922 +#: commands/comment.c:101 commands/seclabel.c:117 parser/parse_utilcmd.c:932 #, c-format msgid "\"%s\" is not a table, view, materialized view, composite type, or foreign table" msgstr "\"%s\" non è una tabella, vista, vista materializzata, tipo composito né una tabella esterna" -#: commands/constraint.c:60 utils/adt/ri_triggers.c:2715 +#: commands/constraint.c:60 utils/adt/ri_triggers.c:2712 #, c-format msgid "function \"%s\" was not called by trigger manager" msgstr "la funzione \"%s\" non è stata invocata dal trigger manager" -#: commands/constraint.c:67 utils/adt/ri_triggers.c:2724 +#: commands/constraint.c:67 utils/adt/ri_triggers.c:2721 #, c-format msgid "function \"%s\" must be fired AFTER ROW" msgstr "la funzione \"%s\" deve essere eseguita AFTER ROW" @@ -5590,534 +5424,535 @@ msgstr "la codifica di destinazione \"%s\" non esiste" msgid "encoding conversion function %s must return type %s" msgstr "la funzione di conversione della codifica %s deve restituire il tipo %s" -#: commands/copy.c:371 commands/copy.c:405 +#: commands/copy.c:373 commands/copy.c:407 #, c-format msgid "COPY BINARY is not supported to stdout or from stdin" msgstr "COPY BINARY non è supportato verso stdout o da stdin" -#: commands/copy.c:505 +#: commands/copy.c:507 #, c-format msgid "could not write to COPY program: %m" msgstr "scrittura nel programma COPY fallita: %m" -#: commands/copy.c:510 +#: commands/copy.c:512 #, c-format msgid "could not write to COPY file: %m" msgstr "scrittura nel file COPY fallita: %m" -#: commands/copy.c:523 +#: commands/copy.c:525 #, c-format msgid "connection lost during COPY to stdout" msgstr "connessione persa durante COPY verso stdout" -#: commands/copy.c:567 +#: commands/copy.c:569 #, c-format msgid "could not read from COPY file: %m" msgstr "lettura dal file COPY fallita: %m" -#: commands/copy.c:583 commands/copy.c:604 commands/copy.c:608 -#: tcop/postgres.c:341 tcop/postgres.c:377 tcop/postgres.c:404 +#: commands/copy.c:585 commands/copy.c:606 commands/copy.c:610 +#: tcop/postgres.c:335 tcop/postgres.c:371 tcop/postgres.c:398 #, c-format msgid "unexpected EOF on client connection with an open transaction" msgstr "fine-file inaspettato sulla connessione del client con una transazione aperta" -#: commands/copy.c:621 +#: commands/copy.c:623 #, c-format msgid "COPY from stdin failed: %s" msgstr "COPY da stdin fallita: %s" -#: commands/copy.c:637 +#: commands/copy.c:639 #, c-format msgid "unexpected message type 0x%02X during COPY from stdin" msgstr "messaggio del tipo inaspettato 0x%02X durante COPY da stdin" -#: commands/copy.c:798 +#: commands/copy.c:800 #, c-format msgid "must be superuser to COPY to or from an external program" msgstr "occorre essere un superutente per effettuare COPY da o verso un programma esterno" -#: commands/copy.c:799 commands/copy.c:805 +#: commands/copy.c:801 commands/copy.c:807 #, c-format msgid "Anyone can COPY to stdout or from stdin. psql's \\copy command also works for anyone." msgstr "Chiunque può eseguire COPY verso stdout e da stdin. Anche il comando \\copy di psql funziona per chiunque." -#: commands/copy.c:804 +#: commands/copy.c:806 #, c-format msgid "must be superuser to COPY to or from a file" msgstr "bisogna essere un superutente per eseguire un COPY da o verso un file" -#: commands/copy.c:866 +#: commands/copy.c:868 #, c-format msgid "COPY FROM not supported with row-level security" msgstr "COPY FROM non supportato con il livello di sicurezza di righe" -#: commands/copy.c:867 +#: commands/copy.c:869 #, c-format msgid "Use INSERT statements instead." msgstr "Usa istruzioni INSERT invece." -#: commands/copy.c:1052 +#: commands/copy.c:1054 #, c-format msgid "COPY format \"%s\" not recognized" msgstr "Formato di COPY \"%s\" non riconosciuto" -#: commands/copy.c:1132 commands/copy.c:1148 commands/copy.c:1163 -#: commands/copy.c:1185 +#: commands/copy.c:1134 commands/copy.c:1150 commands/copy.c:1165 +#: commands/copy.c:1187 #, c-format msgid "argument to option \"%s\" must be a list of column names" msgstr "l'argomento dell'opzione \"%s\" dev'essere una lista di nomi di colonne" -#: commands/copy.c:1200 +#: commands/copy.c:1202 #, c-format msgid "argument to option \"%s\" must be a valid encoding name" msgstr "l'argomento dell'opzione \"%s\" dev'essere un nome di codifica valido" -#: commands/copy.c:1207 commands/dbcommands.c:242 commands/dbcommands.c:1461 +#: commands/copy.c:1209 commands/dbcommands.c:242 commands/dbcommands.c:1461 #, c-format msgid "option \"%s\" not recognized" msgstr "opzione \"%s\" non riconosciuta" -#: commands/copy.c:1219 +#: commands/copy.c:1221 #, c-format msgid "cannot specify DELIMITER in BINARY mode" msgstr "non è possibile specificare DELIMITER in BINARY mode" -#: commands/copy.c:1224 +#: commands/copy.c:1226 #, c-format msgid "cannot specify NULL in BINARY mode" msgstr "non è possibile specificare NULL in BINARY mode" -#: commands/copy.c:1246 +#: commands/copy.c:1248 #, c-format msgid "COPY delimiter must be a single one-byte character" msgstr "il delimitatore di COPY deve essere un solo carattere di un solo byte" -#: commands/copy.c:1253 +#: commands/copy.c:1255 #, c-format msgid "COPY delimiter cannot be newline or carriage return" msgstr "Il delimitatore di COPY non può essere una \"nuova riga\" o un \"ritorno carrello\"" -#: commands/copy.c:1259 +#: commands/copy.c:1261 #, c-format msgid "COPY null representation cannot use newline or carriage return" msgstr "la rappresentazione dei null in COPY non può usare \"nuova riga\" o \"ritorno carrello\"" -#: commands/copy.c:1276 +#: commands/copy.c:1278 #, c-format msgid "COPY delimiter cannot be \"%s\"" msgstr "il delimitatore di COPY non può essere \"%s\"" -#: commands/copy.c:1282 +#: commands/copy.c:1284 #, c-format msgid "COPY HEADER available only in CSV mode" msgstr "l'HEADER di COPY è disponibile solo in modalità CSV" -#: commands/copy.c:1288 +#: commands/copy.c:1290 #, c-format msgid "COPY quote available only in CSV mode" msgstr "il quoting di COPY è disponibile solo in modalità CSV" -#: commands/copy.c:1293 +#: commands/copy.c:1295 #, c-format msgid "COPY quote must be a single one-byte character" msgstr "il quote di COPY dev'essere un solo carattere di un byte" -#: commands/copy.c:1298 +#: commands/copy.c:1300 #, c-format msgid "COPY delimiter and quote must be different" msgstr "il delimitatore e il quote di COPY devono essere diversi" -#: commands/copy.c:1304 +#: commands/copy.c:1306 #, c-format msgid "COPY escape available only in CSV mode" msgstr "l'escape di COPY è disponibile solo in modalità CSV" -#: commands/copy.c:1309 +#: commands/copy.c:1311 #, c-format msgid "COPY escape must be a single one-byte character" msgstr "l'escape di COPY deve essere un solo carattere di un byte" -#: commands/copy.c:1315 +#: commands/copy.c:1317 #, c-format msgid "COPY force quote available only in CSV mode" msgstr "il \"force quote\" di COPY è disponibile solo in modalità CSV" -#: commands/copy.c:1319 +#: commands/copy.c:1321 #, c-format msgid "COPY force quote only available using COPY TO" msgstr "il \"force quote\" di COPY è disponibile solo in modalità CSV" -#: commands/copy.c:1325 +#: commands/copy.c:1327 #, c-format msgid "COPY force not null available only in CSV mode" msgstr "il \"force not null\" di COPY è disponibile solo in modalità CSV" -#: commands/copy.c:1329 +#: commands/copy.c:1331 #, c-format msgid "COPY force not null only available using COPY FROM" msgstr "il \"force not null\" di COPY è disponibile solo in COPY FROM" -#: commands/copy.c:1335 +#: commands/copy.c:1337 #, c-format msgid "COPY force null available only in CSV mode" msgstr "il \"force null\" di COPY è disponibile solo in modalità CSV" -#: commands/copy.c:1340 +#: commands/copy.c:1342 #, c-format msgid "COPY force null only available using COPY FROM" msgstr "il \"force null\" di COPY è disponibile solo usando COPY FROM" -#: commands/copy.c:1346 +#: commands/copy.c:1348 #, c-format msgid "COPY delimiter must not appear in the NULL specification" msgstr "il delimitatore di COPY non deve apparire nella specificazione di NULL" -#: commands/copy.c:1353 +#: commands/copy.c:1355 #, c-format msgid "CSV quote character must not appear in the NULL specification" msgstr "Il carattere quote del CSV non deve apparire nella specificazione di NULL" -#: commands/copy.c:1414 +#: commands/copy.c:1416 #, c-format msgid "table \"%s\" does not have OIDs" msgstr "la tabella \"%s\" non ha OID" -#: commands/copy.c:1455 +#: commands/copy.c:1433 #, c-format msgid "COPY (query) WITH OIDS is not supported" msgstr "COPY (query) WITH OIDS non è supportata" -#: commands/copy.c:1476 +#: commands/copy.c:1454 #, c-format msgid "DO INSTEAD NOTHING rules are not supported for COPY" msgstr "le regole DO INSTEAD NOTHING non sono supportate per COPY" -#: commands/copy.c:1490 +#: commands/copy.c:1468 #, c-format msgid "conditional DO INSTEAD rules are not supported for COPY" msgstr "le regole DO INSTEAD condizionali non sono supportate per COPY" -#: commands/copy.c:1494 +#: commands/copy.c:1472 #, c-format msgid "DO ALSO rules are not supported for the COPY" msgstr "le regole DO ALSO non sono supportate per COPY" -#: commands/copy.c:1499 +#: commands/copy.c:1477 #, c-format msgid "multi-statement DO INSTEAD rules are not supported for COPY" msgstr "le regole DO INSTEAD con più istruzioni non sono supportate per COPY" -#: commands/copy.c:1509 +#: commands/copy.c:1487 #, c-format msgid "COPY (SELECT INTO) is not supported" msgstr "COPY (SELECT INTO) non è supportata" -#: commands/copy.c:1526 +#: commands/copy.c:1504 #, c-format msgid "COPY query must have a RETURNING clause" msgstr "la query COPY deve avere una clausola RETURNING" -#: commands/copy.c:1554 +#: commands/copy.c:1532 #, c-format msgid "relation referenced by COPY statement has changed" msgstr "la relazione referenziata dall'istruzione COPY è cambiata" -#: commands/copy.c:1612 +#: commands/copy.c:1590 #, c-format msgid "FORCE_QUOTE column \"%s\" not referenced by COPY" msgstr "la colonna FORCE_QUOTE \"%s\" non è referenziata da COPY" -#: commands/copy.c:1634 +#: commands/copy.c:1612 #, c-format msgid "FORCE_NOT_NULL column \"%s\" not referenced by COPY" msgstr "la colonna FORCE_NOT_NULL \"%s\" non è referenziata da COPY" -#: commands/copy.c:1656 +#: commands/copy.c:1634 #, c-format msgid "FORCE_NULL column \"%s\" not referenced by COPY" msgstr "la colonna FORCE_NULL \"%s\" non è referenziata da COPY" -#: commands/copy.c:1721 +#: commands/copy.c:1699 #, c-format msgid "could not close pipe to external command: %m" msgstr "chiusura della pipe per verso il comando esterno fallita: %m" -#: commands/copy.c:1725 +#: commands/copy.c:1703 #, c-format msgid "program \"%s\" failed" msgstr "programma \"%s\" fallito" -#: commands/copy.c:1775 +#: commands/copy.c:1753 #, c-format msgid "cannot copy from view \"%s\"" msgstr "non è possibile copiare dalla vista \"%s\"" -#: commands/copy.c:1777 commands/copy.c:1783 commands/copy.c:1789 -#: commands/copy.c:1800 +#: commands/copy.c:1755 commands/copy.c:1761 commands/copy.c:1767 +#: commands/copy.c:1778 #, c-format msgid "Try the COPY (SELECT ...) TO variant." msgstr "Prova la variante COPY (SELECT ...) TO." -#: commands/copy.c:1781 +#: commands/copy.c:1759 #, c-format msgid "cannot copy from materialized view \"%s\"" msgstr "non è possibile copiare dalla vista materializzata \"%s\"" -#: commands/copy.c:1787 +#: commands/copy.c:1765 #, c-format msgid "cannot copy from foreign table \"%s\"" msgstr "non è possibile copiare dalla tabella esterna \"%s\"" -#: commands/copy.c:1793 +#: commands/copy.c:1771 #, c-format msgid "cannot copy from sequence \"%s\"" msgstr "non è possibile copiare dalla sequenza \"%s\"" -#: commands/copy.c:1798 +#: commands/copy.c:1776 #, c-format msgid "cannot copy from partitioned table \"%s\"" msgstr "non è possibile copiare dalla tabella partizionata \"%s\"" -#: commands/copy.c:1804 +#: commands/copy.c:1782 #, c-format msgid "cannot copy from non-table relation \"%s\"" msgstr "non è possibile copiare dalla relazione \"%s\" perché non è una tabella" -#: commands/copy.c:1844 +#: commands/copy.c:1822 #, c-format msgid "relative path not allowed for COPY to file" msgstr "i percorsi relativi non sono consentiti per il COPY verso un file" -#: commands/copy.c:1856 +#: commands/copy.c:1843 #, c-format msgid "could not open file \"%s\" for writing: %m" msgstr "apertura del file \"%s\" in scrittura fallita: %m" -#: commands/copy.c:1859 +#: commands/copy.c:1846 #, c-format msgid "COPY TO instructs the PostgreSQL server process to write a file. You may want a client-side facility such as psql's \\copy." msgstr "COPY TO fa scrivere un file al processo server PostgreSQL. Probabilmente ti serve un sistema lato client, per esempio il comando \\copy di psql." -#: commands/copy.c:1872 commands/copy.c:3057 +#: commands/copy.c:1859 commands/copy.c:3161 #, c-format msgid "\"%s\" is a directory" msgstr "\"%s\" è una directory" -#: commands/copy.c:2195 +#: commands/copy.c:2182 #, c-format msgid "COPY %s, line %d, column %s" msgstr "COPY %s, riga %d, colonna %s" -#: commands/copy.c:2199 commands/copy.c:2246 +#: commands/copy.c:2186 commands/copy.c:2233 #, c-format msgid "COPY %s, line %d" msgstr "COPY %s, riga %d" -#: commands/copy.c:2210 +#: commands/copy.c:2197 #, c-format msgid "COPY %s, line %d, column %s: \"%s\"" msgstr "COPY %s, riga %d, colonna %s: \"%s\"" -#: commands/copy.c:2218 +#: commands/copy.c:2205 #, c-format msgid "COPY %s, line %d, column %s: null input" msgstr "COPY %s, riga %d, colonna %s: input nullo" -#: commands/copy.c:2240 +#: commands/copy.c:2227 #, c-format msgid "COPY %s, line %d: \"%s\"" msgstr "COPY %s, riga %d: \"%s\"" -#: commands/copy.c:2334 +#: commands/copy.c:2321 #, c-format msgid "cannot copy to view \"%s\"" msgstr "non è possibile copiare verso la vista \"%s\"" -#: commands/copy.c:2336 +#: commands/copy.c:2323 #, c-format msgid "To enable copying to a view, provide an INSTEAD OF INSERT trigger." msgstr "Per consentire la copia in una vista occorre fornire un trigger INSTEAD OF INSERT." -#: commands/copy.c:2340 +#: commands/copy.c:2327 #, c-format msgid "cannot copy to materialized view \"%s\"" msgstr "non è possibile copiare verso la vista materializzata \"%s\"" -#: commands/copy.c:2345 +#: commands/copy.c:2332 #, c-format msgid "cannot copy to foreign table \"%s\"" msgstr "non è possibile copiare verso la tabella esterna \"%s\"" -#: commands/copy.c:2350 +#: commands/copy.c:2337 #, c-format msgid "cannot copy to sequence \"%s\"" msgstr "non è possibile copiare verso sequenza \"%s\"" -#: commands/copy.c:2355 +#: commands/copy.c:2342 #, c-format msgid "cannot copy to non-table relation \"%s\"" msgstr "non è possibile copiare verso la relazione \"%s\" perché non è una tabella" -#: commands/copy.c:2418 +#: commands/copy.c:2417 #, c-format msgid "cannot perform FREEZE because of prior transaction activity" msgstr "non è possibile eseguire FREEZE a causa di precedente attività della transazione" -#: commands/copy.c:2424 +#: commands/copy.c:2423 #, c-format msgid "cannot perform FREEZE because the table was not created or truncated in the current subtransaction" msgstr "non è possibile eseguire FREEZE perché la tabella non è stata creata o troncata nella sottotransazione corrente" -#: commands/copy.c:2587 executor/nodeModifyTable.c:311 +#: commands/copy.c:2645 executor/nodeModifyTable.c:311 #, c-format msgid "cannot route inserted tuples to a foreign table" msgstr "non è possibile instradare le tuple inserite in una tabella esterna" -#: commands/copy.c:3044 +#: commands/copy.c:3148 #, c-format msgid "COPY FROM instructs the PostgreSQL server process to read a file. You may want a client-side facility such as psql's \\copy." msgstr "COPY TO fa leggere un file dal processo server PostgreSQL. Probabilmente ti serve un sistema lato client, per esempio il comando \\copy di psql." -#: commands/copy.c:3077 +#: commands/copy.c:3181 #, c-format msgid "COPY file signature not recognized" msgstr "formato del file COPY non riconosciuto" -#: commands/copy.c:3082 +#: commands/copy.c:3186 #, c-format msgid "invalid COPY file header (missing flags)" msgstr "intestazione del file COPY non valida (flag mancanti)" -#: commands/copy.c:3088 +#: commands/copy.c:3192 #, c-format msgid "unrecognized critical flags in COPY file header" msgstr "alcune flag critici non sono stati riconosciuti nell'intestazione del file COPY" -#: commands/copy.c:3094 +#: commands/copy.c:3198 #, c-format msgid "invalid COPY file header (missing length)" msgstr "intestazione del file COPY non valida (manca la lunghezza)" -#: commands/copy.c:3101 +#: commands/copy.c:3205 #, c-format msgid "invalid COPY file header (wrong length)" msgstr "intestazione del file COPY non valida (lunghezza errata)" -#: commands/copy.c:3234 commands/copy.c:3941 commands/copy.c:4171 +#: commands/copy.c:3338 commands/copy.c:4045 commands/copy.c:4275 #, c-format msgid "extra data after last expected column" msgstr "ci sono dati in eccesso dopo l'ultima colonna attesa" -#: commands/copy.c:3244 +#: commands/copy.c:3348 #, c-format msgid "missing data for OID column" msgstr "dati per la colonna OID mancanti" -#: commands/copy.c:3250 +#: commands/copy.c:3354 #, c-format msgid "null OID in COPY data" msgstr "OID nullo nei dati da COPY" -#: commands/copy.c:3260 commands/copy.c:3383 +#: commands/copy.c:3364 commands/copy.c:3487 #, c-format msgid "invalid OID in COPY data" msgstr "OID non valido nei dati da COPY" -#: commands/copy.c:3275 +#: commands/copy.c:3379 #, c-format msgid "missing data for column \"%s\"" msgstr "dati mancanti per la colonna \"%s\"" -#: commands/copy.c:3358 +#: commands/copy.c:3462 #, c-format msgid "received copy data after EOF marker" msgstr "dati da copiare ricevuti dopo il segnalatore di fine file" -#: commands/copy.c:3365 +#: commands/copy.c:3469 #, c-format msgid "row field count is %d, expected %d" msgstr "il numero di campi è %d, ne erano attesi %d" -#: commands/copy.c:3705 commands/copy.c:3722 +#: commands/copy.c:3809 commands/copy.c:3826 #, c-format msgid "literal carriage return found in data" msgstr "\"ritorno carrello\" trovato nei dati" -#: commands/copy.c:3706 commands/copy.c:3723 +#: commands/copy.c:3810 commands/copy.c:3827 #, c-format msgid "unquoted carriage return found in data" msgstr "\"ritorno carrello\" non quotato trovato nei dati" -#: commands/copy.c:3708 commands/copy.c:3725 +#: commands/copy.c:3812 commands/copy.c:3829 #, c-format msgid "Use \"\\r\" to represent carriage return." msgstr "Usa \"\\r\" per rappresentare i caratteri \"ritorno carrello\"." -#: commands/copy.c:3709 commands/copy.c:3726 +#: commands/copy.c:3813 commands/copy.c:3830 #, c-format msgid "Use quoted CSV field to represent carriage return." msgstr "Usa un campo CSV quotato per rappresentare i caratteri \"ritorno carrello\"." -#: commands/copy.c:3738 +#: commands/copy.c:3842 #, c-format msgid "literal newline found in data" msgstr "\"nuova riga\" letterale trovato nei dati" -#: commands/copy.c:3739 +#: commands/copy.c:3843 #, c-format msgid "unquoted newline found in data" msgstr "\"nuova riga\" non quotato trovato nei dati" -#: commands/copy.c:3741 +#: commands/copy.c:3845 #, c-format msgid "Use \"\\n\" to represent newline." msgstr "Usa \"\\n\" per rappresentare i caratteri \"nuova riga\"." -#: commands/copy.c:3742 +#: commands/copy.c:3846 #, c-format msgid "Use quoted CSV field to represent newline." msgstr "Usa un campo CSV quotato per rappresentare i caratteri \"nuova riga\"." -#: commands/copy.c:3788 commands/copy.c:3824 +#: commands/copy.c:3892 commands/copy.c:3928 #, c-format msgid "end-of-copy marker does not match previous newline style" msgstr "il marcatore di fine copia non combacia con il precedente stile \"nuova riga\"" -#: commands/copy.c:3797 commands/copy.c:3813 +#: commands/copy.c:3901 commands/copy.c:3917 #, c-format msgid "end-of-copy marker corrupt" msgstr "il marcatore di fine copia è corrotto" -#: commands/copy.c:4255 +#: commands/copy.c:4359 #, c-format msgid "unterminated CSV quoted field" msgstr "campo CSV tra virgolette non terminato" -#: commands/copy.c:4332 commands/copy.c:4351 +#: commands/copy.c:4436 commands/copy.c:4455 #, c-format msgid "unexpected EOF in COPY data" msgstr "fine file inattesa dei dati da COPY" -#: commands/copy.c:4341 +#: commands/copy.c:4445 #, c-format msgid "invalid field size" msgstr "dimensione del campo non valida" -#: commands/copy.c:4364 +#: commands/copy.c:4468 #, c-format msgid "incorrect binary data format" msgstr "formato di dati binari non corretto" -#: commands/copy.c:4675 commands/indexcmds.c:1057 commands/tablecmds.c:1666 -#: commands/tablecmds.c:2168 commands/tablecmds.c:2600 -#: parser/parse_relation.c:3249 parser/parse_relation.c:3269 -#: utils/adt/tsvector_op.c:2561 +#: commands/copy.c:4779 commands/indexcmds.c:1070 commands/statscmds.c:183 +#: commands/tablecmds.c:1685 commands/tablecmds.c:2187 +#: commands/tablecmds.c:2613 parser/parse_relation.c:3287 +#: parser/parse_relation.c:3307 utils/adt/tsvector_op.c:2561 #, c-format msgid "column \"%s\" does not exist" msgstr "la colonna \"%s\" non esiste" -#: commands/copy.c:4682 commands/tablecmds.c:1692 commands/tablecmds.c:2194 -#: commands/trigger.c:767 parser/parse_target.c:1018 parser/parse_target.c:1029 +#: commands/copy.c:4786 commands/tablecmds.c:1711 commands/tablecmds.c:2213 +#: commands/trigger.c:826 parser/parse_target.c:1018 +#: parser/parse_target.c:1029 #, c-format msgid "column \"%s\" specified more than once" msgstr "la colonna \"%s\" è stata specificata più di una volta" @@ -6152,8 +5987,8 @@ msgstr "%d non è un codice di codifica valido" msgid "%s is not a valid encoding name" msgstr "%s non è un nome di codifica valido" -#: commands/dbcommands.c:292 commands/dbcommands.c:1494 commands/user.c:277 -#: commands/user.c:642 +#: commands/dbcommands.c:292 commands/dbcommands.c:1494 commands/user.c:276 +#: commands/user.c:664 #, c-format msgid "invalid connection limit: %d" msgstr "limite di connessioni non valido: %d" @@ -6402,7 +6237,7 @@ msgid "invalid argument for %s: \"%s\"" msgstr "argomento non valido per %s: \"%s\"" #: commands/dropcmds.c:104 commands/functioncmds.c:1201 -#: utils/adt/ruleutils.c:2445 +#: utils/adt/ruleutils.c:2453 #, c-format msgid "\"%s\" is an aggregate function" msgstr "\"%s\" è una funzione di aggregazione" @@ -6412,14 +6247,14 @@ msgstr "\"%s\" è una funzione di aggregazione" msgid "Use DROP AGGREGATE to drop aggregate functions." msgstr "Usa DROP AGGREGATE per rimuovere le funzioni di aggregazione." -#: commands/dropcmds.c:157 commands/sequence.c:437 commands/tablecmds.c:2684 -#: commands/tablecmds.c:2835 commands/tablecmds.c:2878 -#: commands/tablecmds.c:12422 tcop/utility.c:1168 +#: commands/dropcmds.c:157 commands/sequence.c:442 commands/tablecmds.c:2697 +#: commands/tablecmds.c:2848 commands/tablecmds.c:2891 +#: commands/tablecmds.c:12462 tcop/utility.c:1168 #, c-format msgid "relation \"%s\" does not exist, skipping" msgstr "la relazione \"%s\" non esiste, saltata" -#: commands/dropcmds.c:187 commands/dropcmds.c:286 commands/tablecmds.c:877 +#: commands/dropcmds.c:187 commands/dropcmds.c:286 commands/tablecmds.c:896 #, c-format msgid "schema \"%s\" does not exist, skipping" msgstr "lo schema \"%s\" non esiste, saltato" @@ -6636,234 +6471,234 @@ msgstr "l'opzione BUFFERS di EXPLAIN richiede ANALYZE" msgid "EXPLAIN option TIMING requires ANALYZE" msgstr "l'opzione TIMING di EXPLAIN richiede ANALYZE" -#: commands/extension.c:167 commands/extension.c:2905 +#: commands/extension.c:168 commands/extension.c:2907 #, c-format msgid "extension \"%s\" does not exist" msgstr "l'estensione \"%s\" non esiste" -#: commands/extension.c:266 commands/extension.c:275 commands/extension.c:287 -#: commands/extension.c:297 +#: commands/extension.c:267 commands/extension.c:276 commands/extension.c:288 +#: commands/extension.c:298 #, c-format msgid "invalid extension name: \"%s\"" msgstr "nome di estensione non valido: \"%s\"" -#: commands/extension.c:267 +#: commands/extension.c:268 #, c-format msgid "Extension names must not be empty." msgstr "I nomi delle estensioni non possono essere vuoti." -#: commands/extension.c:276 +#: commands/extension.c:277 #, c-format msgid "Extension names must not contain \"--\"." msgstr "I nomi delle estensioni non possono contenere \"--\"." -#: commands/extension.c:288 +#: commands/extension.c:289 #, c-format msgid "Extension names must not begin or end with \"-\"." msgstr "I nomi delle estensioni non possono iniziare o finire con \"-\"." -#: commands/extension.c:298 +#: commands/extension.c:299 #, c-format msgid "Extension names must not contain directory separator characters." msgstr "I nomi delle estensioni non possono contenere caratteri separatore directory." -#: commands/extension.c:313 commands/extension.c:322 commands/extension.c:331 -#: commands/extension.c:341 +#: commands/extension.c:314 commands/extension.c:323 commands/extension.c:332 +#: commands/extension.c:342 #, c-format msgid "invalid extension version name: \"%s\"" msgstr "nome di versione dell'estensione non valido: \"%s\"" -#: commands/extension.c:314 +#: commands/extension.c:315 #, c-format msgid "Version names must not be empty." msgstr "I nomi di versione non possono essere vuoti." -#: commands/extension.c:323 +#: commands/extension.c:324 #, c-format msgid "Version names must not contain \"--\"." msgstr "I nomi di versione non possono contenere \"--\"." -#: commands/extension.c:332 +#: commands/extension.c:333 #, c-format msgid "Version names must not begin or end with \"-\"." msgstr "I nomi di versione non possono iniziare o finire con \"-\"." -#: commands/extension.c:342 +#: commands/extension.c:343 #, c-format msgid "Version names must not contain directory separator characters." msgstr "I nomi di versione non possono contenere caratteri separatore directory." -#: commands/extension.c:492 +#: commands/extension.c:493 #, c-format msgid "could not open extension control file \"%s\": %m" msgstr "apertura del file di controllo dell'estensione \"%s\" fallita: %m" -#: commands/extension.c:514 commands/extension.c:524 +#: commands/extension.c:515 commands/extension.c:525 #, c-format msgid "parameter \"%s\" cannot be set in a secondary extension control file" msgstr "il parametro \"%s\" non può essere impostato in un file di controllo secondario di estensione" -#: commands/extension.c:563 +#: commands/extension.c:564 #, c-format msgid "\"%s\" is not a valid encoding name" msgstr "\"%s\" non è un nome di codifica valido" -#: commands/extension.c:577 +#: commands/extension.c:578 #, c-format msgid "parameter \"%s\" must be a list of extension names" msgstr "il parametro \"%s\" dev'essere una lista di nomi di estensioni" -#: commands/extension.c:584 +#: commands/extension.c:585 #, c-format msgid "unrecognized parameter \"%s\" in file \"%s\"" msgstr "parametro sconosciuto \"%s\" nel file \"%s\"" -#: commands/extension.c:593 +#: commands/extension.c:594 #, c-format msgid "parameter \"schema\" cannot be specified when \"relocatable\" is true" msgstr "il parametro \"schema\" non può essere specificato quando \"relocatable\" è abilitato" -#: commands/extension.c:760 +#: commands/extension.c:761 #, c-format msgid "transaction control statements are not allowed within an extension script" msgstr "le istruzioni di controllo di transazione non sono valide in uno script di estensione" -#: commands/extension.c:806 +#: commands/extension.c:807 #, c-format msgid "permission denied to create extension \"%s\"" msgstr "permesso di creare l'estensione \"%s\" negato" -#: commands/extension.c:808 +#: commands/extension.c:809 #, c-format msgid "Must be superuser to create this extension." msgstr "Solo un superutente può creare questa estensione." -#: commands/extension.c:812 +#: commands/extension.c:813 #, c-format msgid "permission denied to update extension \"%s\"" msgstr "permesso di modificare l'estensione \"%s\" negato" -#: commands/extension.c:814 +#: commands/extension.c:815 #, c-format msgid "Must be superuser to update this extension." msgstr "Solo un superutente può modificare questa estensione." -#: commands/extension.c:1096 +#: commands/extension.c:1097 #, c-format msgid "extension \"%s\" has no update path from version \"%s\" to version \"%s\"" msgstr "l'estensione \"%s\" non ha un percorso di aggiornamento dalla versione \"%s\" alla versione \"%s\"" -#: commands/extension.c:1303 commands/extension.c:2966 +#: commands/extension.c:1304 commands/extension.c:2968 #, c-format msgid "version to install must be specified" msgstr "il nome di versione da installare deve essere specificato" -#: commands/extension.c:1325 +#: commands/extension.c:1326 #, c-format msgid "FROM version must be different from installation target version \"%s\"" msgstr "la versione FROM dev'essere diversa dalla versione \"%s\" oggetto dell'installazione" -#: commands/extension.c:1390 +#: commands/extension.c:1391 #, c-format msgid "extension \"%s\" has no installation script nor update path for version \"%s\"" msgstr "l'estensione \"%s\" non ha uno script di installazione o un percorso di update per la versione \"%s\"" -#: commands/extension.c:1425 +#: commands/extension.c:1426 #, c-format msgid "extension \"%s\" must be installed in schema \"%s\"" msgstr "l'estensione \"%s\" dev'essere installata nello schema \"%s\"" -#: commands/extension.c:1578 +#: commands/extension.c:1579 #, c-format msgid "cyclic dependency detected between extensions \"%s\" and \"%s\"" msgstr "individuata una dipendenza ciclica tra le estensioni \"%s\" e \"%s\"" -#: commands/extension.c:1583 +#: commands/extension.c:1584 #, c-format msgid "installing required extension \"%s\"" msgstr "installazione dell'estensione richiesta \"%s\"" -#: commands/extension.c:1607 +#: commands/extension.c:1608 #, c-format msgid "required extension \"%s\" is not installed" msgstr "l'estensione richiesta \"%s\" non è installata" -#: commands/extension.c:1610 +#: commands/extension.c:1611 #, c-format msgid "Use CREATE EXTENSION ... CASCADE to install required extensions too." msgstr "Usa CREATE EXTENSION ... CASCADE per installare anche le estensioni richieste." -#: commands/extension.c:1647 +#: commands/extension.c:1648 #, c-format msgid "extension \"%s\" already exists, skipping" msgstr "l'estensione \"%s\" esiste già, saltata" -#: commands/extension.c:1654 +#: commands/extension.c:1655 #, c-format msgid "extension \"%s\" already exists" msgstr "l'estensione \"%s\" esiste già" -#: commands/extension.c:1665 +#: commands/extension.c:1666 #, c-format msgid "nested CREATE EXTENSION is not supported" msgstr "CREATE EXTENSION annidati non sono supportati" -#: commands/extension.c:1846 +#: commands/extension.c:1847 #, c-format msgid "cannot drop extension \"%s\" because it is being modified" msgstr "non è possibile eliminare l'estensione \"%s\" perché sta venendo modificata" -#: commands/extension.c:2348 +#: commands/extension.c:2349 #, c-format msgid "pg_extension_config_dump() can only be called from an SQL script executed by CREATE EXTENSION" msgstr "pg_extension_config_dump() può essere richiamata solo da uno script SQL eseguito da CREATE EXTENSION" -#: commands/extension.c:2360 +#: commands/extension.c:2361 #, c-format msgid "OID %u does not refer to a table" msgstr "l'OID %u non si riferisce ad una tabella" -#: commands/extension.c:2365 +#: commands/extension.c:2366 #, c-format msgid "table \"%s\" is not a member of the extension being created" msgstr "la tabella \"%s\" non è membra dell'estensione in fase di creazione" -#: commands/extension.c:2721 +#: commands/extension.c:2722 #, c-format msgid "cannot move extension \"%s\" into schema \"%s\" because the extension contains the schema" msgstr "non è possibile spostare l'estensione \"%s\" nello schema \"%s\" perché l'estensione contiene lo schema" -#: commands/extension.c:2761 commands/extension.c:2824 +#: commands/extension.c:2763 commands/extension.c:2826 #, c-format msgid "extension \"%s\" does not support SET SCHEMA" msgstr "l'estensione \"%s\" non supporta SET SCHEMA" -#: commands/extension.c:2826 +#: commands/extension.c:2828 #, c-format msgid "%s is not in the extension's schema \"%s\"" msgstr "%s non è nello schema dell'estensione \"%s\"" -#: commands/extension.c:2885 +#: commands/extension.c:2887 #, c-format msgid "nested ALTER EXTENSION is not supported" msgstr "ALTER EXTENSION annidati non sono supportati" -#: commands/extension.c:2977 +#: commands/extension.c:2979 #, c-format msgid "version \"%s\" of extension \"%s\" is already installed" msgstr "la versione \"%s\" dell'estensione \"%s\" è già installata" -#: commands/extension.c:3228 +#: commands/extension.c:3230 #, c-format msgid "cannot add schema \"%s\" to extension \"%s\" because the schema contains the extension" msgstr "non è possibile aggiungere lo schema \"%s\" all'estensione \"%s\" perché lo schema contiene l'estensione" -#: commands/extension.c:3256 +#: commands/extension.c:3258 #, c-format msgid "%s is not a member of extension \"%s\"" msgstr "%s non fa parte dell'estensione \"%s\"" -#: commands/extension.c:3322 +#: commands/extension.c:3324 #, c-format msgid "file \"%s\" is too large" msgstr "il file \"%s\" è troppo grande" @@ -7088,13 +6923,13 @@ msgstr "attributo di funzione sconosciuto \"%s\" ignorato" msgid "only one AS item needed for language \"%s\"" msgstr "solo un elemento AS è necessario per il linguaggio \"%s\"" -#: commands/functioncmds.c:930 commands/functioncmds.c:2111 +#: commands/functioncmds.c:930 commands/functioncmds.c:2131 #: commands/proclang.c:561 #, c-format msgid "language \"%s\" does not exist" msgstr "il linguaggio \"%s\" non esiste" -#: commands/functioncmds.c:932 commands/functioncmds.c:2113 +#: commands/functioncmds.c:932 commands/functioncmds.c:2133 #, c-format msgid "Use CREATE LANGUAGE to load the language into the database." msgstr "Usa CREATE LANGUAGE per caricare il linguaggio nel database." @@ -7119,345 +6954,346 @@ msgstr "il tipo di risultato della funzione dev'essere specificato" msgid "ROWS is not applicable when function does not return a set" msgstr "ROWS è non applicabile quando la funzione non restituisce un insieme" -#: commands/functioncmds.c:1406 +#: commands/functioncmds.c:1426 #, c-format msgid "source data type %s is a pseudo-type" msgstr "il tipo di dati di origine %s è uno pseudo-tipo" -#: commands/functioncmds.c:1412 +#: commands/functioncmds.c:1432 #, c-format msgid "target data type %s is a pseudo-type" msgstr "il tipo di dati di destinazione %s è uno pseudo-tipo" -#: commands/functioncmds.c:1436 +#: commands/functioncmds.c:1456 #, c-format msgid "cast will be ignored because the source data type is a domain" msgstr "la conversione verrà ignorata perché il tipo di dato di origine è un dominio" -#: commands/functioncmds.c:1441 +#: commands/functioncmds.c:1461 #, c-format msgid "cast will be ignored because the target data type is a domain" msgstr "la conversione verrà ignorata perché il tipo di dato di destinazione è un dominio" -#: commands/functioncmds.c:1466 +#: commands/functioncmds.c:1486 #, c-format msgid "cast function must take one to three arguments" msgstr "la funzione di conversione deve prendere da uno a tre argomenti" -#: commands/functioncmds.c:1470 +#: commands/functioncmds.c:1490 #, c-format msgid "argument of cast function must match or be binary-coercible from source data type" msgstr "l'argomento della funzione di conversione deve combaciare o essere convertibile a livello binario dal tipo di dato di origine" -#: commands/functioncmds.c:1474 +#: commands/functioncmds.c:1494 #, c-format msgid "second argument of cast function must be type %s" msgstr "il secondo argomento della funzione di conversione deve essere di tipo %s" -#: commands/functioncmds.c:1479 +#: commands/functioncmds.c:1499 #, c-format msgid "third argument of cast function must be type %s" msgstr "il terzo argomento della funzione di conversione deve essere di tipo %s" -#: commands/functioncmds.c:1484 +#: commands/functioncmds.c:1504 #, c-format msgid "return data type of cast function must match or be binary-coercible to target data type" msgstr "il tipo di dato restituito dalla funzione di conversione deve combaciare o essere convertibile a livello binario nel tipo di dato di destinazione" -#: commands/functioncmds.c:1495 +#: commands/functioncmds.c:1515 #, c-format msgid "cast function must not be volatile" msgstr "la funzione di conversione non può essere volatile" -#: commands/functioncmds.c:1500 +#: commands/functioncmds.c:1520 #, c-format msgid "cast function must not be an aggregate function" msgstr "la funzione di conversione non può essere una funzione di aggregazione" -#: commands/functioncmds.c:1504 +#: commands/functioncmds.c:1524 #, c-format msgid "cast function must not be a window function" msgstr "la funzione di conversione non può essere una funzione finestra" -#: commands/functioncmds.c:1508 +#: commands/functioncmds.c:1528 #, c-format msgid "cast function must not return a set" msgstr "la funzione di conversione non può restituire un insieme" -#: commands/functioncmds.c:1534 +#: commands/functioncmds.c:1554 #, c-format msgid "must be superuser to create a cast WITHOUT FUNCTION" msgstr "occorre essere un superutente per creare un cast WITHOUT FUNCTION" -#: commands/functioncmds.c:1549 +#: commands/functioncmds.c:1569 #, c-format msgid "source and target data types are not physically compatible" msgstr "i tipi di dati di origine e di destinazione non sono fisicamente compatibili" -#: commands/functioncmds.c:1564 +#: commands/functioncmds.c:1584 #, c-format msgid "composite data types are not binary-compatible" msgstr "i tipi di dati compositi non sono compatibili a livello binario" -#: commands/functioncmds.c:1570 +#: commands/functioncmds.c:1590 #, c-format msgid "enum data types are not binary-compatible" msgstr "le enumerazioni non sono compatibili a livello binario" -#: commands/functioncmds.c:1576 +#: commands/functioncmds.c:1596 #, c-format msgid "array data types are not binary-compatible" msgstr "i tipi di dati array non sono compatibili a livello binario" -#: commands/functioncmds.c:1593 +#: commands/functioncmds.c:1613 #, c-format msgid "domain data types must not be marked binary-compatible" msgstr "i tipi di dominio non devono essere marcati come compatibili a livello binario" -#: commands/functioncmds.c:1603 +#: commands/functioncmds.c:1623 #, c-format msgid "source data type and target data type are the same" msgstr "i tipi di dati di origine e di destinazione sono gli stessi" -#: commands/functioncmds.c:1636 +#: commands/functioncmds.c:1656 #, c-format msgid "cast from type %s to type %s already exists" msgstr "la conversione dal tipo %s al tipo %s esiste già" -#: commands/functioncmds.c:1709 +#: commands/functioncmds.c:1729 #, c-format msgid "cast from type %s to type %s does not exist" msgstr "la conversione dal tipo %s al tipo %s non esiste" -#: commands/functioncmds.c:1748 +#: commands/functioncmds.c:1768 #, c-format msgid "transform function must not be volatile" msgstr "la funzione di trasformazione non può essere volatile" -#: commands/functioncmds.c:1752 +#: commands/functioncmds.c:1772 #, c-format msgid "transform function must not be an aggregate function" msgstr "la funzione di trasformazione non può essere una funzione aggregata" -#: commands/functioncmds.c:1756 +#: commands/functioncmds.c:1776 #, c-format msgid "transform function must not be a window function" msgstr "la funzione di trasformazione non può essere una funzione finestra" -#: commands/functioncmds.c:1760 +#: commands/functioncmds.c:1780 #, c-format msgid "transform function must not return a set" msgstr "la funzione di trasformazione non può restituire un insieme" -#: commands/functioncmds.c:1764 +#: commands/functioncmds.c:1784 #, c-format msgid "transform function must take one argument" msgstr "la funzione di trasformazione deve poter ricevere un solo argomento" -#: commands/functioncmds.c:1768 +#: commands/functioncmds.c:1788 #, c-format msgid "first argument of transform function must be type %s" msgstr "il primo argomento della funzione di trasformazione deve essere di tipo %s" -#: commands/functioncmds.c:1806 +#: commands/functioncmds.c:1826 #, c-format msgid "data type %s is a pseudo-type" msgstr "il tipo di dato %s è uno pseudo-tipo" -#: commands/functioncmds.c:1812 +#: commands/functioncmds.c:1832 #, c-format msgid "data type %s is a domain" msgstr "il tipo di dato %s è un dominio" -#: commands/functioncmds.c:1852 +#: commands/functioncmds.c:1872 #, c-format msgid "return data type of FROM SQL function must be %s" msgstr "il tipo di dati restituito dalla funzione FROM SQL deve essere %s" -#: commands/functioncmds.c:1878 +#: commands/functioncmds.c:1898 #, c-format msgid "return data type of TO SQL function must be the transform data type" msgstr "il tipo di dati restituito da una funzione TO SQL dev'essere il tipo di dato della trasformazione" -#: commands/functioncmds.c:1905 +#: commands/functioncmds.c:1925 #, c-format msgid "transform for type %s language \"%s\" already exists" msgstr "la trasformazione per il tipo %s linguaggio \"%s\" esiste già" -#: commands/functioncmds.c:1994 +#: commands/functioncmds.c:2014 #, c-format msgid "transform for type %s language \"%s\" does not exist" msgstr "la trasformazione per il tipo %s linguaggio \"%s\" non esiste" -#: commands/functioncmds.c:2045 +#: commands/functioncmds.c:2065 #, c-format msgid "function %s already exists in schema \"%s\"" msgstr "la funzione %s esiste già nello schema \"%s\"" -#: commands/functioncmds.c:2098 +#: commands/functioncmds.c:2118 #, c-format msgid "no inline code specified" msgstr "nessun codice inline specificato" -#: commands/functioncmds.c:2143 +#: commands/functioncmds.c:2163 #, c-format msgid "language \"%s\" does not support inline code execution" msgstr "il linguaggio \"%s\" non supporta l'esecuzione di codice inline" -#: commands/indexcmds.c:350 +#: commands/indexcmds.c:354 #, c-format msgid "must specify at least one column" msgstr "occorre specificare almeno una colonna" -#: commands/indexcmds.c:354 +#: commands/indexcmds.c:358 #, c-format msgid "cannot use more than %d columns in an index" msgstr "non è possibile usare più di %d colonne in un indice" -#: commands/indexcmds.c:385 +#: commands/indexcmds.c:389 #, c-format msgid "cannot create index on foreign table \"%s\"" msgstr "non è possibile creare indici sulla tabella esterna \"%s\"" -#: commands/indexcmds.c:390 +#: commands/indexcmds.c:394 #, c-format msgid "cannot create index on partitioned table \"%s\"" msgstr "non è possibile creare indici sulla tabella partizionata \"%s\"" -#: commands/indexcmds.c:405 +#: commands/indexcmds.c:409 #, c-format msgid "cannot create indexes on temporary tables of other sessions" msgstr "non è possibile creare indici su tabelle temporanee di altre sessioni" -#: commands/indexcmds.c:461 commands/tablecmds.c:584 commands/tablecmds.c:10484 +#: commands/indexcmds.c:474 commands/tablecmds.c:593 +#: commands/tablecmds.c:10506 #, c-format msgid "only shared relations can be placed in pg_global tablespace" msgstr "solo le relazioni condivise possono essere poste nel tablespace pg_global" -#: commands/indexcmds.c:494 +#: commands/indexcmds.c:507 #, c-format msgid "substituting access method \"gist\" for obsolete method \"rtree\"" msgstr "sostituzione del metodo di accesso \"gist\" per il metodo obsoleto \"rtree\"" -#: commands/indexcmds.c:512 +#: commands/indexcmds.c:525 #, c-format msgid "access method \"%s\" does not support unique indexes" msgstr "il metodo di accesso \"%s\" non supporta gli indici univoci" -#: commands/indexcmds.c:517 +#: commands/indexcmds.c:530 #, c-format msgid "access method \"%s\" does not support multicolumn indexes" msgstr "il metodo di accesso \"%s\" non supporta gli indici multicolonna" -#: commands/indexcmds.c:522 +#: commands/indexcmds.c:535 #, c-format msgid "access method \"%s\" does not support exclusion constraints" msgstr "il metodo di accesso \"%s\" non supporta i vincoli di esclusione" -#: commands/indexcmds.c:594 commands/indexcmds.c:614 +#: commands/indexcmds.c:607 commands/indexcmds.c:627 #, c-format msgid "index creation on system columns is not supported" msgstr "la creazione di indici su colonne di sistema non è supportata" -#: commands/indexcmds.c:639 +#: commands/indexcmds.c:652 #, c-format msgid "%s %s will create implicit index \"%s\" for table \"%s\"" msgstr "%s %s creerà un indice implicito \"%s\" per la tabella \"%s\"" -#: commands/indexcmds.c:986 +#: commands/indexcmds.c:999 #, c-format msgid "functions in index predicate must be marked IMMUTABLE" msgstr "le funzioni nel predicato dell'indice devono essere marcate IMMUTABLE" -#: commands/indexcmds.c:1052 parser/parse_utilcmd.c:2067 +#: commands/indexcmds.c:1065 parser/parse_utilcmd.c:2077 #, c-format msgid "column \"%s\" named in key does not exist" msgstr "la colonna \"%s\" nominata nella chiave non esiste" -#: commands/indexcmds.c:1112 +#: commands/indexcmds.c:1125 #, c-format msgid "functions in index expression must be marked IMMUTABLE" msgstr "le funzioni nell'espressione dell'indice devono essere marcate IMMUTABLE" -#: commands/indexcmds.c:1135 +#: commands/indexcmds.c:1148 #, c-format msgid "could not determine which collation to use for index expression" msgstr "non è stato possibile determinare quale ordinamento usare per l'espressione dell'indice" -#: commands/indexcmds.c:1143 commands/tablecmds.c:13326 commands/typecmds.c:831 -#: parser/parse_expr.c:2730 parser/parse_type.c:549 parser/parse_utilcmd.c:3103 -#: utils/adt/misc.c:661 +#: commands/indexcmds.c:1156 commands/tablecmds.c:13396 +#: commands/typecmds.c:831 parser/parse_expr.c:2763 parser/parse_type.c:549 +#: parser/parse_utilcmd.c:3113 utils/adt/misc.c:661 #, c-format msgid "collations are not supported by type %s" msgstr "gli ordinamenti non sono supportati dal tipo %s" -#: commands/indexcmds.c:1181 +#: commands/indexcmds.c:1194 #, c-format msgid "operator %s is not commutative" msgstr "l'operatore %s non è commutativo" -#: commands/indexcmds.c:1183 +#: commands/indexcmds.c:1196 #, c-format msgid "Only commutative operators can be used in exclusion constraints." msgstr "Solo operatori commutativi possono essere usati nei vincoli di esclusione." -#: commands/indexcmds.c:1209 +#: commands/indexcmds.c:1222 #, c-format msgid "operator %s is not a member of operator family \"%s\"" msgstr "l'operatore %s non è membro della famiglia di operatori \"%s\"" -#: commands/indexcmds.c:1212 +#: commands/indexcmds.c:1225 #, c-format msgid "The exclusion operator must be related to the index operator class for the constraint." msgstr "L'operatore di esclusione dev'essere correlato alla classe di operatori dell'indice per il vincolo." -#: commands/indexcmds.c:1247 +#: commands/indexcmds.c:1260 #, c-format msgid "access method \"%s\" does not support ASC/DESC options" msgstr "il metodo di accesso \"%s\" non supporta le opzioni ASC/DESC" -#: commands/indexcmds.c:1252 +#: commands/indexcmds.c:1265 #, c-format msgid "access method \"%s\" does not support NULLS FIRST/LAST options" msgstr "il metodo di accesso \"%s\" non supporta le opzioni NULLS FIRST/LAST" -#: commands/indexcmds.c:1311 commands/typecmds.c:1928 +#: commands/indexcmds.c:1324 commands/typecmds.c:1949 #, c-format msgid "data type %s has no default operator class for access method \"%s\"" msgstr "il tipo di dati %s non ha una classe di operatori predefinita per il metodo di accesso \"%s\"" -#: commands/indexcmds.c:1313 +#: commands/indexcmds.c:1326 #, c-format msgid "You must specify an operator class for the index or define a default operator class for the data type." msgstr "Devi specificare una classe di operatori per l'indice o definire una classe di operatori predefinita per il tipo di dati" -#: commands/indexcmds.c:1342 commands/indexcmds.c:1350 +#: commands/indexcmds.c:1355 commands/indexcmds.c:1363 #: commands/opclasscmds.c:205 #, c-format msgid "operator class \"%s\" does not exist for access method \"%s\"" msgstr "la classe di operatori \"%s\" non esiste per il metodo di accesso \"%s\"" -#: commands/indexcmds.c:1363 commands/typecmds.c:1916 +#: commands/indexcmds.c:1376 commands/typecmds.c:1937 #, c-format msgid "operator class \"%s\" does not accept data type %s" msgstr "la classe di operatori \"%s\" non accetta il tipo di dati %s" -#: commands/indexcmds.c:1453 +#: commands/indexcmds.c:1466 #, c-format msgid "there are multiple default operator classes for data type %s" msgstr "il tipo di dati %s ha più di una classe di operatori predefinita" -#: commands/indexcmds.c:1844 +#: commands/indexcmds.c:1857 #, c-format msgid "table \"%s\" has no indexes" msgstr "la tabella \"%s\" non ha indici" -#: commands/indexcmds.c:1899 +#: commands/indexcmds.c:1912 #, c-format msgid "can only reindex the currently open database" msgstr "è possibile reindicizzare solo il database corrente" -#: commands/indexcmds.c:1999 +#: commands/indexcmds.c:2012 #, c-format msgid "table \"%s.%s\" was reindexed" msgstr "la tabella \"%s.%s\" è stata reindicizzata" @@ -7694,13 +7530,13 @@ msgstr "la funzione di stima del join %s deve restituire il tipo %s" msgid "operator attribute \"%s\" cannot be changed" msgstr "l'attributo dell'operatore \"%s\" non può essere cambiato" -#: commands/policy.c:87 commands/policy.c:397 commands/policy.c:486 -#: commands/tablecmds.c:1131 commands/tablecmds.c:1501 -#: commands/tablecmds.c:2494 commands/tablecmds.c:4695 -#: commands/tablecmds.c:7031 commands/tablecmds.c:12979 -#: commands/tablecmds.c:13014 commands/trigger.c:251 commands/trigger.c:1261 -#: commands/trigger.c:1370 rewrite/rewriteDefine.c:272 -#: rewrite/rewriteDefine.c:919 +#: commands/policy.c:87 commands/policy.c:397 commands/policy.c:487 +#: commands/tablecmds.c:1150 commands/tablecmds.c:1520 +#: commands/tablecmds.c:2507 commands/tablecmds.c:4704 +#: commands/tablecmds.c:7054 commands/tablecmds.c:13019 +#: commands/tablecmds.c:13054 commands/trigger.c:259 commands/trigger.c:1320 +#: commands/trigger.c:1429 rewrite/rewriteDefine.c:272 +#: rewrite/rewriteDefine.c:925 #, c-format msgid "permission denied: \"%s\" is a system catalog" msgstr "permesso negato: \"%s\" è un catalogo di sistema" @@ -7715,37 +7551,38 @@ msgstr "i ruoli specificati a parte PUBLIC verranno ignorati" msgid "All roles are members of the PUBLIC role." msgstr "TuttiTutti i ruoli sono membri del ruolo PUBLIC." -#: commands/policy.c:510 +#: commands/policy.c:511 #, c-format msgid "role \"%s\" could not be removed from policy \"%s\" on \"%s\"" msgstr "non è stato possibile rimuovere il ruolo \"%s\" dalla regola di sicurezza \"%s\" su \"%s\"" -#: commands/policy.c:716 +#: commands/policy.c:717 #, c-format msgid "WITH CHECK cannot be applied to SELECT or DELETE" msgstr "WITH CHECK non può essere applicato a SELECT o a DELETE" -#: commands/policy.c:725 commands/policy.c:1023 +#: commands/policy.c:726 commands/policy.c:1024 #, c-format msgid "only WITH CHECK expression allowed for INSERT" msgstr "solo le espressioni WITH CHECK sono consentite per INSERT" -#: commands/policy.c:798 commands/policy.c:1243 +#: commands/policy.c:799 commands/policy.c:1244 #, c-format msgid "policy \"%s\" for table \"%s\" already exists" msgstr "la regola di sicurezza \"%s\" per la tabella \"%s\" esiste già" -#: commands/policy.c:995 commands/policy.c:1271 commands/policy.c:1343 +#: commands/policy.c:996 commands/policy.c:1272 commands/policy.c:1344 #, c-format msgid "policy \"%s\" for table \"%s\" does not exist" msgstr "la regola di sicurezza \"%s\" per la tabella \"%s\" non esiste" -#: commands/policy.c:1013 +#: commands/policy.c:1014 #, c-format msgid "only USING expression allowed for SELECT, DELETE" msgstr "solo le espressioni USING sono permesse per SELECT e DELETE" -#: commands/portalcmds.c:58 commands/portalcmds.c:182 commands/portalcmds.c:234 +#: commands/portalcmds.c:58 commands/portalcmds.c:182 +#: commands/portalcmds.c:234 #, c-format msgid "invalid cursor name: must not be empty" msgstr "nome di cursore non valido: non deve essere vuoto" @@ -7761,7 +7598,7 @@ msgstr "il cursore \"%s\" non esiste" msgid "invalid statement name: must not be empty" msgstr "nome di istruzione non valido: non deve essere vuoto" -#: commands/prepare.c:141 parser/parse_param.c:304 tcop/postgres.c:1355 +#: commands/prepare.c:141 parser/parse_param.c:304 tcop/postgres.c:1349 #, c-format msgid "could not determine data type of parameter $%d" msgstr "non è stato possibile determinare il tipo di dato del parametro $%d" @@ -7826,7 +7663,7 @@ msgstr "I linguaggi supportati sono elencate nel catalogo di sistema pg_pltempla msgid "must be superuser to create custom procedural language" msgstr "solo i superutenti possono creare un linguaggio procedurale personalizzato" -#: commands/proclang.c:281 commands/trigger.c:549 commands/typecmds.c:457 +#: commands/proclang.c:281 commands/trigger.c:608 commands/typecmds.c:457 #: commands/typecmds.c:474 #, c-format msgid "changing return type of function %s from %s to %s" @@ -7834,8 +7671,8 @@ msgstr "modifica del tipo restituito dalla funzione %s da %s a %s" #: commands/publicationcmds.c:106 #, c-format -msgid "invalid publish list" -msgstr "lista di pubblicazione non valida" +msgid "invalid list syntax for \"publish\" option" +msgstr "sintassi di lista errata per l'opzione \"publish\"" #: commands/publicationcmds.c:122 #, c-format @@ -7912,123 +7749,123 @@ msgstr "il fornitore di etichette di sicurezza \"%s\" non è stato caricato" msgid "unlogged sequences are not supported" msgstr "le sequenze non loggate non sono supportate" -#: commands/sequence.c:707 +#: commands/sequence.c:699 #, c-format msgid "nextval: reached maximum value of sequence \"%s\" (%s)" msgstr "nextval: è stato raggiunto il valore massimo della sequenza \"%s\" (%s)" -#: commands/sequence.c:730 +#: commands/sequence.c:722 #, c-format msgid "nextval: reached minimum value of sequence \"%s\" (%s)" msgstr "nextval: è stato raggiunto il valore minimo della sequenza \"%s\" (%s)" -#: commands/sequence.c:848 +#: commands/sequence.c:840 #, c-format msgid "currval of sequence \"%s\" is not yet defined in this session" msgstr "il valore corrente della sequenza \"%s\" non è stato ancora definito in questa sessione" -#: commands/sequence.c:867 commands/sequence.c:873 +#: commands/sequence.c:859 commands/sequence.c:865 #, c-format msgid "lastval is not yet defined in this session" msgstr "lastval non è stato ancora definito in questa sessione" -#: commands/sequence.c:961 +#: commands/sequence.c:953 #, c-format msgid "setval: value %s is out of bounds for sequence \"%s\" (%s..%s)" msgstr "setval: il valore %s non rientra nei margini della sequenza \"%s\" (%s..%s)" -#: commands/sequence.c:1373 +#: commands/sequence.c:1358 #, c-format msgid "invalid sequence option SEQUENCE NAME" msgstr "opzione di sequenza SEQUENCE NAME non valido" -#: commands/sequence.c:1401 +#: commands/sequence.c:1384 #, c-format msgid "identity column type must be smallint, integer, or bigint" msgstr "il tipo della colonna identità deve essere smallint, integer o bigint" -#: commands/sequence.c:1402 +#: commands/sequence.c:1385 #, c-format msgid "sequence type must be smallint, integer, or bigint" msgstr "il tipo della sequenza deve essere smallint, integer o bigint" -#: commands/sequence.c:1439 +#: commands/sequence.c:1419 #, c-format msgid "INCREMENT must not be zero" msgstr "INCREMENT non può essere zero" -#: commands/sequence.c:1497 +#: commands/sequence.c:1472 #, c-format msgid "MAXVALUE (%s) is out of range for sequence data type %s" msgstr "MAXVALUE (%s) è al di fuori dell'intervallo consentito per il tipo di dati della sequenza %s" -#: commands/sequence.c:1536 +#: commands/sequence.c:1509 #, c-format msgid "MINVALUE (%s) is out of range for sequence data type %s" msgstr "MINVALUE (%s) è al di fuori dell'intervallo consentito per il tipo di dati della sequenza %s" -#: commands/sequence.c:1550 +#: commands/sequence.c:1523 #, c-format msgid "MINVALUE (%s) must be less than MAXVALUE (%s)" msgstr "MINVALUE (%s) deve essere minore del MAXVALUE (%s)" -#: commands/sequence.c:1579 +#: commands/sequence.c:1550 #, c-format msgid "START value (%s) cannot be less than MINVALUE (%s)" msgstr "il valore di START (%s) non può essere inferiore a quello di MINVALUE (%s)" -#: commands/sequence.c:1591 +#: commands/sequence.c:1562 #, c-format msgid "START value (%s) cannot be greater than MAXVALUE (%s)" msgstr "il valore di START (%s) non può essere superiore a quello di MAXVALUE (%s)" -#: commands/sequence.c:1621 +#: commands/sequence.c:1592 #, c-format msgid "RESTART value (%s) cannot be less than MINVALUE (%s)" msgstr "il valore di RESTART (%s) non può essere inferiore a quello di MINVALUE (%s)" -#: commands/sequence.c:1633 +#: commands/sequence.c:1604 #, c-format msgid "RESTART value (%s) cannot be greater than MAXVALUE (%s)" msgstr "il valore di RESTART (%s) non può essere superiore a quello di MAXVALUE (%s)" -#: commands/sequence.c:1649 +#: commands/sequence.c:1619 #, c-format msgid "CACHE (%s) must be greater than zero" msgstr "CACHE (%s) dev'essere maggiore di zero" -#: commands/sequence.c:1687 +#: commands/sequence.c:1656 #, c-format msgid "invalid OWNED BY option" msgstr "opzione OWNED BY non valida" -#: commands/sequence.c:1688 +#: commands/sequence.c:1657 #, c-format msgid "Specify OWNED BY table.column or OWNED BY NONE." msgstr "Specifica OWNED BY tabella.colonna oppure OWNED BY NONE." -#: commands/sequence.c:1713 +#: commands/sequence.c:1682 #, c-format msgid "referenced relation \"%s\" is not a table or foreign table" msgstr "la relazione referenziata \"%s\" non è una tabella né una tabella esterna" -#: commands/sequence.c:1720 +#: commands/sequence.c:1689 #, c-format msgid "sequence must have same owner as table it is linked to" msgstr "la sequenza deve avere lo stesso proprietario della tabella a cui è collegata" -#: commands/sequence.c:1724 +#: commands/sequence.c:1693 #, c-format msgid "sequence must be in same schema as table it is linked to" msgstr "la sequenza deve essere nello stesso schema della tabella a cui è collegata" -#: commands/sequence.c:1746 +#: commands/sequence.c:1715 #, c-format msgid "cannot change ownership of identity sequence" msgstr "non è possibile cambiare proprietario di una sequenza identità" -#: commands/sequence.c:1747 commands/tablecmds.c:9866 -#: commands/tablecmds.c:12442 +#: commands/sequence.c:1716 commands/tablecmds.c:9888 +#: commands/tablecmds.c:12482 #, c-format msgid "Sequence \"%s\" is linked to table \"%s\"." msgstr "La sequenza \"%s\" è collegata alla tabella \"%s\"." @@ -8058,11 +7895,6 @@ msgstr "la relazione \"%s\" non è una tabella, una tabella esterna o una vista msgid "only simple column references are allowed in CREATE STATISTICS" msgstr "solo riferimenti a colonne semplici sono consentiti in CREATE STATISTICS" -#: commands/statscmds.c:183 -#, c-format -msgid "column \"%s\" referenced in statistics does not exist" -msgstr "la colonna \"%s\" nominata nella statistica non esiste" - #: commands/statscmds.c:191 #, c-format msgid "statistics creation on system columns is not supported" @@ -8070,8 +7902,8 @@ msgstr "la creazione di statistiche su colonne di sistema non è supportata" #: commands/statscmds.c:198 #, c-format -msgid "column \"%s\" cannot be used in statistics because its type has no default btree operator class" -msgstr "la colonna \"%s\" non può essere usata in una statistica perché il suo tipo non ha una classe di operatori btree definita" +msgid "column \"%s\" cannot be used in statistics because its type %s has no default btree operator class" +msgstr "la colonna \"%s\" non può essere usata in una statistica perché il suo tipo %s non ha una classe di operatori btree definita" #: commands/statscmds.c:205 #, c-format @@ -8090,147 +7922,152 @@ msgstr "nome di colonna duplicato nella definizione della statistica" #: commands/statscmds.c:266 #, c-format -msgid "unrecognized statistic type \"%s\"" +msgid "unrecognized statistics kind \"%s\"" msgstr "tipo di statistica \"%s\" sconosciuto" -#: commands/subscriptioncmds.c:173 +#: commands/subscriptioncmds.c:187 #, c-format msgid "unrecognized subscription parameter: %s" msgstr "parametro di sottoscrizione sconosciuto: %s" -#: commands/subscriptioncmds.c:186 +#: commands/subscriptioncmds.c:200 #, c-format msgid "connect = false and enabled = true are mutually exclusive options" msgstr "connect = false ed enabled = true sono opzioni mutuamente esclusive" -#: commands/subscriptioncmds.c:191 +#: commands/subscriptioncmds.c:205 #, c-format msgid "connect = false and create_slot = true are mutually exclusive options" msgstr "connect = false e create_slot = true sono opzioni mutuamente esclusive" -#: commands/subscriptioncmds.c:196 +#: commands/subscriptioncmds.c:210 #, c-format msgid "connect = false and copy_data = true are mutually exclusive options" msgstr "connect = false e copy_data = true sono opzioni mutuamente esclusive" -#: commands/subscriptioncmds.c:213 +#: commands/subscriptioncmds.c:227 #, c-format msgid "slot_name = NONE and enabled = true are mutually exclusive options" msgstr "slot_name = NONE ed enabled = true sono opzioni mutuamente esclusive" -#: commands/subscriptioncmds.c:218 +#: commands/subscriptioncmds.c:232 #, c-format msgid "slot_name = NONE and create_slot = true are mutually exclusive options" msgstr "slot_name = NONE e create_slot = true sono opzioni mutuamente esclusive" -#: commands/subscriptioncmds.c:223 +#: commands/subscriptioncmds.c:237 #, c-format msgid "subscription with slot_name = NONE must also set enabled = false" msgstr "una sottoscrizione con slot_name = NONE deve avere anche enabled = false" -#: commands/subscriptioncmds.c:228 +#: commands/subscriptioncmds.c:242 #, c-format msgid "subscription with slot_name = NONE must also set create_slot = false" msgstr "una sottoscrizione con slot_name = NONE deve avere anche create_slot = false" -#: commands/subscriptioncmds.c:270 +#: commands/subscriptioncmds.c:284 #, c-format msgid "publication name \"%s\" used more than once" msgstr "nome di pubblicazione \"%s\" usato più di una volta" -#: commands/subscriptioncmds.c:332 +#: commands/subscriptioncmds.c:347 #, c-format msgid "must be superuser to create subscriptions" msgstr "occorre essere un superutente per creare sottoscrizioni" -#: commands/subscriptioncmds.c:412 commands/subscriptioncmds.c:508 -#: replication/logical/tablesync.c:798 replication/logical/worker.c:1579 +#: commands/subscriptioncmds.c:427 commands/subscriptioncmds.c:520 +#: replication/logical/tablesync.c:856 replication/logical/worker.c:1620 #, c-format msgid "could not connect to the publisher: %s" msgstr "connessione alla pubblicazione fallita: %s" -#: commands/subscriptioncmds.c:443 -#, c-format -msgid "synchronized table states" -msgstr "sincronizzazione degli stati della tabella" - -#: commands/subscriptioncmds.c:457 +#: commands/subscriptioncmds.c:469 #, c-format msgid "created replication slot \"%s\" on publisher" msgstr "creazione dello slot di replica \"%s\" sulla pubblicazione" -#: commands/subscriptioncmds.c:474 +#: commands/subscriptioncmds.c:486 #, c-format msgid "tables were not subscribed, you will have to run ALTER SUBSCRIPTION ... REFRESH PUBLICATION to subscribe the tables" msgstr "le tabelle non sono state sottoscritte, è necessario eseguire ALTER SUBSCRIPTION ... REFRESH PUBLICATION per sottoscrivere le tabelle" -#: commands/subscriptioncmds.c:564 +#: commands/subscriptioncmds.c:576 #, c-format -msgid "added subscription for table %s.%s" -msgstr "aggiunta sottoscrizione per la tabella %s.%s" +msgid "table \"%s.%s\" added to subscription \"%s\"" +msgstr "tabella \"%s.%s\" aggiunta alla sottoscrizione \"%s\"" -#: commands/subscriptioncmds.c:590 +#: commands/subscriptioncmds.c:600 #, c-format -msgid "removed subscription for table %s.%s" -msgstr "rimossa sottoscrizione per la tabella %s.%s" +msgid "table \"%s.%s\" removed from subscription \"%s\"" +msgstr "tabella \"%s.%s\" rimossa dalla sottoscrizione \"%s\"" -#: commands/subscriptioncmds.c:655 +#: commands/subscriptioncmds.c:669 #, c-format msgid "cannot set slot_name = NONE for enabled subscription" msgstr "non è possibile impostare slot_name = NONE per le sottoscrizioni attive" -#: commands/subscriptioncmds.c:689 +#: commands/subscriptioncmds.c:703 #, c-format msgid "cannot enable subscription that does not have a slot name" msgstr "non è possibile abilitare una sottoscrizione che non ha un nome di slot" -#: commands/subscriptioncmds.c:735 commands/subscriptioncmds.c:753 +#: commands/subscriptioncmds.c:749 +#, c-format +msgid "ALTER SUBSCRIPTION with refresh is not allowed for disabled subscriptions" +msgstr "ALTER SUBSCRIPTION con refresh non consentito per sottoscrizioni disabilitate" + +#: commands/subscriptioncmds.c:750 +#, c-format +msgid "Use ALTER SUBSCRIPTION ... SET PUBLICATION ... WITH (refresh = false)." +msgstr "Usa ALTER SUBSCRIPTION ... SET PUBLICATION ... WITH (refresh = false)." + +#: commands/subscriptioncmds.c:768 #, c-format msgid "ALTER SUBSCRIPTION ... REFRESH is not allowed for disabled subscriptions" msgstr "ALTER SUBSCRIPTION ... REFRESH non è consentito per sottoscrizioni disabilitate" -#: commands/subscriptioncmds.c:830 +#: commands/subscriptioncmds.c:847 #, c-format msgid "subscription \"%s\" does not exist, skipping" msgstr "la sottoscrizione \"%s\" non esiste, saltata" -#: commands/subscriptioncmds.c:931 +#: commands/subscriptioncmds.c:972 #, c-format msgid "could not connect to publisher when attempting to drop the replication slot \"%s\"" msgstr "non è possibile connettersi alla pubblicazione mentre si sta eliminando lo slot di replica \"%s\"" -#: commands/subscriptioncmds.c:933 commands/subscriptioncmds.c:947 -#: replication/logical/tablesync.c:847 replication/logical/tablesync.c:867 +#: commands/subscriptioncmds.c:974 commands/subscriptioncmds.c:988 +#: replication/logical/tablesync.c:906 replication/logical/tablesync.c:928 #, c-format msgid "The error was: %s" msgstr "L'errore è stato: %s" -#: commands/subscriptioncmds.c:934 +#: commands/subscriptioncmds.c:975 #, c-format msgid "Use ALTER SUBSCRIPTION ... SET (slot_name = NONE) to disassociate the subscription from the slot." msgstr "Usa ALTER SUBSCRIPTION ... SET (slot_name = NONE) per disassociare la sottoscrizione dallo slot." -#: commands/subscriptioncmds.c:945 +#: commands/subscriptioncmds.c:986 #, c-format msgid "could not drop the replication slot \"%s\" on publisher" msgstr "eliminazione dello slot di replica \"%s\" sulla pubblicazione fallita" -#: commands/subscriptioncmds.c:950 +#: commands/subscriptioncmds.c:991 #, c-format msgid "dropped replication slot \"%s\" on publisher" msgstr "eliminazione dello slot di replica \"%s\" sulla pubblicazione" -#: commands/subscriptioncmds.c:991 +#: commands/subscriptioncmds.c:1032 #, c-format msgid "permission denied to change owner of subscription \"%s\"" msgstr "permesso negato nel cambiare il proprietario della sottoscrizione \"%s\"" -#: commands/subscriptioncmds.c:993 +#: commands/subscriptioncmds.c:1034 #, c-format msgid "The owner of a subscription must be a superuser." msgstr "Il proprietario della sottoscrizione deve essere un superutente." -#: commands/subscriptioncmds.c:1106 +#: commands/subscriptioncmds.c:1147 #, c-format msgid "could not receive list of replicated tables from the publisher: %s" msgstr "errore nell'ottenere la lista delle tabelle replicate dalla pubblicazione: %s" @@ -8291,7 +8128,7 @@ msgstr "la vista materializzata \"%s\" non esiste, saltata" msgid "Use DROP MATERIALIZED VIEW to remove a materialized view." msgstr "Usa DROP MATERIALIZED VIEW per rimuovere una vista materializzata." -#: commands/tablecmds.c:245 parser/parse_utilcmd.c:1819 +#: commands/tablecmds.c:245 parser/parse_utilcmd.c:1829 #, c-format msgid "index \"%s\" does not exist" msgstr "l'indice \"%s\" non esiste" @@ -8314,8 +8151,8 @@ msgstr "\"%s\" non è un tipo" msgid "Use DROP TYPE to remove a type." msgstr "Usa DROP TYPE per eliminare un tipo." -#: commands/tablecmds.c:257 commands/tablecmds.c:9382 -#: commands/tablecmds.c:12222 +#: commands/tablecmds.c:257 commands/tablecmds.c:9404 +#: commands/tablecmds.c:12262 #, c-format msgid "foreign table \"%s\" does not exist" msgstr "la tabella esterna \"%s\" non esiste" @@ -8329,98 +8166,103 @@ msgstr "la tabella esterna \"%s\" non esiste, saltata" msgid "Use DROP FOREIGN TABLE to remove a foreign table." msgstr "Usa DROP FOREIGN TABLE per eliminare una tabella esterna." -#: commands/tablecmds.c:524 +#: commands/tablecmds.c:533 #, c-format msgid "ON COMMIT can only be used on temporary tables" msgstr "ON COMMIT può essere usato solo con le tabelle temporanee" -#: commands/tablecmds.c:552 +#: commands/tablecmds.c:561 #, c-format msgid "cannot create temporary table within security-restricted operation" msgstr "non è possibile creare la tabella temporanea nell'ambito di operazioni a sicurezza ristretta" -#: commands/tablecmds.c:653 +#: commands/tablecmds.c:662 #, c-format msgid "cannot create table with OIDs as partition of table without OIDs" msgstr "non è possibile creare una tabella con OID come partizione di una tabella senza OID" -#: commands/tablecmds.c:774 parser/parse_utilcmd.c:3270 +#: commands/tablecmds.c:783 parser/parse_utilcmd.c:3280 #, c-format msgid "\"%s\" is not partitioned" msgstr "\"%s\" non è partizionata" -#: commands/tablecmds.c:953 +#: commands/tablecmds.c:831 +#, c-format +msgid "cannot partition using more than %d columns" +msgstr "non è possibile partizionare usando più di %d colonne" + +#: commands/tablecmds.c:972 #, c-format msgid "DROP INDEX CONCURRENTLY does not support dropping multiple objects" msgstr "DROP INDEX CONCURRENTLY non supporta l'eliminazione di più di un oggetto" -#: commands/tablecmds.c:957 +#: commands/tablecmds.c:976 #, c-format msgid "DROP INDEX CONCURRENTLY does not support CASCADE" msgstr "DROP INDEX CONCURRENTLY non supporta CASCADE" -#: commands/tablecmds.c:1234 +#: commands/tablecmds.c:1253 #, c-format msgid "cannot truncate only a partitioned table" msgstr "non è possibile troncare solo una tabella partizionata" -#: commands/tablecmds.c:1235 +#: commands/tablecmds.c:1254 #, c-format msgid "Do not specify the ONLY keyword, or use truncate only on the partitions directly." msgstr "Non specificare la parola chiave ONLY, oppure usa TRUNCATE ONLY sulle partizioni direttamente." -#: commands/tablecmds.c:1263 +#: commands/tablecmds.c:1282 #, c-format msgid "truncate cascades to table \"%s\"" msgstr "truncate si propaga in cascata alla tabella \"%s\"" -#: commands/tablecmds.c:1511 +#: commands/tablecmds.c:1530 #, c-format msgid "cannot truncate temporary tables of other sessions" msgstr "non è possibile troncare tabelle temporanee di altre sessioni" -#: commands/tablecmds.c:1742 commands/tablecmds.c:10966 +#: commands/tablecmds.c:1761 commands/tablecmds.c:10989 #, c-format msgid "cannot inherit from partitioned table \"%s\"" msgstr "non è possibile ereditare dalla tabella partizionata \"%s\"" -#: commands/tablecmds.c:1747 +#: commands/tablecmds.c:1766 #, c-format msgid "cannot inherit from partition \"%s\"" msgstr "non è possibile ereditare dalla partizione \"%s\"" -#: commands/tablecmds.c:1755 parser/parse_utilcmd.c:2030 +#: commands/tablecmds.c:1774 parser/parse_utilcmd.c:2040 #, c-format msgid "inherited relation \"%s\" is not a table or foreign table" msgstr "la relazione ereditata \"%s\" non è una tabella o tabella esterna" -#: commands/tablecmds.c:1763 commands/tablecmds.c:10945 +#: commands/tablecmds.c:1782 commands/tablecmds.c:10968 #, c-format msgid "cannot inherit from temporary relation \"%s\"" msgstr "non è possibile ereditare dalla relazione temporanea \"%s\"" -#: commands/tablecmds.c:1773 commands/tablecmds.c:10953 +#: commands/tablecmds.c:1792 commands/tablecmds.c:10976 #, c-format msgid "cannot inherit from temporary relation of another session" msgstr "non è possibile ereditare da una relazione temporanea di un'altra sessione" -#: commands/tablecmds.c:1790 commands/tablecmds.c:11064 +#: commands/tablecmds.c:1809 commands/tablecmds.c:11100 #, c-format msgid "relation \"%s\" would be inherited from more than once" msgstr "la relazione \"%s\" sarebbe ereditata più di una volta" -#: commands/tablecmds.c:1838 +#: commands/tablecmds.c:1857 #, c-format msgid "merging multiple inherited definitions of column \"%s\"" msgstr "unione delle definizioni multiple ereditate della colonna \"%s\"" -#: commands/tablecmds.c:1846 +#: commands/tablecmds.c:1865 #, c-format msgid "inherited column \"%s\" has a type conflict" msgstr "la colonna ereditata \"%s\" ha un conflitto di tipo" -#: commands/tablecmds.c:1848 commands/tablecmds.c:1871 -#: commands/tablecmds.c:2077 commands/tablecmds.c:2107 +#: commands/tablecmds.c:1867 commands/tablecmds.c:1890 +#: commands/tablecmds.c:2096 commands/tablecmds.c:2126 #: parser/parse_coerce.c:1650 parser/parse_coerce.c:1670 #: parser/parse_coerce.c:1690 parser/parse_coerce.c:1736 #: parser/parse_coerce.c:1775 parser/parse_param.c:218 @@ -8428,1004 +8270,1034 @@ msgstr "la colonna ereditata \"%s\" ha un conflitto di tipo" msgid "%s versus %s" msgstr "tra %s e %s" -#: commands/tablecmds.c:1857 +#: commands/tablecmds.c:1876 #, c-format msgid "inherited column \"%s\" has a collation conflict" msgstr "la colonna ereditata \"%s\" ha un conflitto di ordinamento" -#: commands/tablecmds.c:1859 commands/tablecmds.c:2089 -#: commands/tablecmds.c:5140 +#: commands/tablecmds.c:1878 commands/tablecmds.c:2108 +#: commands/tablecmds.c:5162 #, c-format msgid "\"%s\" versus \"%s\"" msgstr "tra \"%s\" e \"%s\"" -#: commands/tablecmds.c:1869 +#: commands/tablecmds.c:1888 #, c-format msgid "inherited column \"%s\" has a storage parameter conflict" msgstr "la colonna ereditata \"%s\" ha un conflitto di parametro di memorizzazione" -#: commands/tablecmds.c:1983 commands/tablecmds.c:8872 -#: parser/parse_utilcmd.c:1113 parser/parse_utilcmd.c:1464 -#: parser/parse_utilcmd.c:1540 +#: commands/tablecmds.c:2002 commands/tablecmds.c:8894 +#: parser/parse_utilcmd.c:1123 parser/parse_utilcmd.c:1474 +#: parser/parse_utilcmd.c:1550 #, c-format msgid "cannot convert whole-row table reference" msgstr "non è possibile convertire riferimenti ad una riga intera di tabella" -#: commands/tablecmds.c:1984 parser/parse_utilcmd.c:1114 +#: commands/tablecmds.c:2003 parser/parse_utilcmd.c:1124 #, c-format msgid "Constraint \"%s\" contains a whole-row reference to table \"%s\"." msgstr "Il vincolo \"%s\" contiene un riferimento alla riga intera alla tabella \"%s\"." -#: commands/tablecmds.c:2063 +#: commands/tablecmds.c:2082 #, c-format msgid "merging column \"%s\" with inherited definition" msgstr "unione della colonna \"%s\" con la definizione ereditata" -#: commands/tablecmds.c:2067 +#: commands/tablecmds.c:2086 #, c-format msgid "moving and merging column \"%s\" with inherited definition" msgstr "spostamento e unione della colonna \"%s\" con la definizione ereditata" -#: commands/tablecmds.c:2068 +#: commands/tablecmds.c:2087 #, c-format msgid "User-specified column moved to the position of the inherited column." msgstr "Colonna specificata dall'utente spostata nella posizione della colonna ereditata." -#: commands/tablecmds.c:2075 +#: commands/tablecmds.c:2094 #, c-format msgid "column \"%s\" has a type conflict" msgstr "la colonna \"%s\" ha un conflitto di tipi" -#: commands/tablecmds.c:2087 +#: commands/tablecmds.c:2106 #, c-format msgid "column \"%s\" has a collation conflict" msgstr "la colonna \"%s\" ha un conflitto di ordinamento" -#: commands/tablecmds.c:2105 +#: commands/tablecmds.c:2124 #, c-format msgid "column \"%s\" has a storage parameter conflict" msgstr "la colonna \"%s\" ha un conflitto di parametri di memorizzazione" -#: commands/tablecmds.c:2216 +#: commands/tablecmds.c:2235 #, c-format msgid "column \"%s\" inherits conflicting default values" msgstr "la colonna \"%s\" eredita valori predefiniti in conflitto tra loro" -#: commands/tablecmds.c:2218 +#: commands/tablecmds.c:2237 #, c-format msgid "To resolve the conflict, specify a default explicitly." msgstr "Per risolvere il conflitto, specificare esplicitamente un valore predefinito." -#: commands/tablecmds.c:2265 +#: commands/tablecmds.c:2284 #, c-format msgid "check constraint name \"%s\" appears multiple times but with different expressions" msgstr "il nome del vincolo di controllo \"%s\" compare più di una volta ma con espressioni diverse" -#: commands/tablecmds.c:2464 +#: commands/tablecmds.c:2477 #, c-format msgid "cannot rename column of typed table" msgstr "non è possibile rinominare la colonna di una tabella con tipo" -#: commands/tablecmds.c:2482 +#: commands/tablecmds.c:2495 #, c-format msgid "\"%s\" is not a table, view, materialized view, composite type, index, or foreign table" msgstr "\"%s\" non è una tabella, vista, vista materializzata, tipo composito, indice né una tabella esterna" -#: commands/tablecmds.c:2576 +#: commands/tablecmds.c:2589 #, c-format msgid "inherited column \"%s\" must be renamed in child tables too" msgstr "la colonna ereditata \"%s\" dev'essere rinominata anche nelle tabelle figlie" -#: commands/tablecmds.c:2608 +#: commands/tablecmds.c:2621 #, c-format msgid "cannot rename system column \"%s\"" msgstr "non è possibile rinominare la colonna di sistema \"%s\"" -#: commands/tablecmds.c:2623 +#: commands/tablecmds.c:2636 #, c-format msgid "cannot rename inherited column \"%s\"" msgstr "non è possibile rinominare la colonna ereditata \"%s\"" -#: commands/tablecmds.c:2775 +#: commands/tablecmds.c:2788 #, c-format msgid "inherited constraint \"%s\" must be renamed in child tables too" msgstr "i vincoli ereditati \"%s\" devono essere rinominati anche nelle tabelle figlie" -#: commands/tablecmds.c:2782 +#: commands/tablecmds.c:2795 #, c-format msgid "cannot rename inherited constraint \"%s\"" msgstr "non è possibile rinominare il vincolo ereditato \"%s\"" #. translator: first %s is a SQL command, eg ALTER TABLE -#: commands/tablecmds.c:3006 +#: commands/tablecmds.c:3019 #, c-format msgid "cannot %s \"%s\" because it is being used by active queries in this session" msgstr "non è possibile effettuare %s \"%s\" perché è in uso da query attive in questa sessione" #. translator: first %s is a SQL command, eg ALTER TABLE -#: commands/tablecmds.c:3015 +#: commands/tablecmds.c:3028 #, c-format msgid "cannot %s \"%s\" because it has pending trigger events" msgstr "non è possibile effettuare %s \"%s\" perché ha eventi trigger in sospeso" -#: commands/tablecmds.c:4138 +#: commands/tablecmds.c:4147 #, c-format msgid "cannot rewrite system relation \"%s\"" msgstr "non è possibile riscrivere la relazione di sistema \"%s\"" -#: commands/tablecmds.c:4144 +#: commands/tablecmds.c:4153 #, c-format msgid "cannot rewrite table \"%s\" used as a catalog table" msgstr "non è possibile riscrivere la tabella \"%s\" usata come tabella di catalogo" -#: commands/tablecmds.c:4154 +#: commands/tablecmds.c:4163 #, c-format msgid "cannot rewrite temporary tables of other sessions" msgstr "non è possibile riscrivere tabelle temporanee di altre sessioni" -#: commands/tablecmds.c:4430 +#: commands/tablecmds.c:4439 #, c-format msgid "rewriting table \"%s\"" msgstr "riscrittura della tabella \"%s\"" -#: commands/tablecmds.c:4434 +#: commands/tablecmds.c:4443 #, c-format msgid "verifying table \"%s\"" msgstr "verifica della tabella \"%s\"" -#: commands/tablecmds.c:4547 +#: commands/tablecmds.c:4556 #, c-format msgid "column \"%s\" contains null values" msgstr "la colonna \"%s\" contiene valori null" -#: commands/tablecmds.c:4562 commands/tablecmds.c:8141 +#: commands/tablecmds.c:4571 commands/tablecmds.c:8163 #, c-format msgid "check constraint \"%s\" is violated by some row" msgstr "il vincolo di controllo \"%s\" è violato da alcune righe" -#: commands/tablecmds.c:4578 +#: commands/tablecmds.c:4587 #, c-format msgid "partition constraint is violated by some row" msgstr "il vincolo di partizione è violato da qualche riga" -#: commands/tablecmds.c:4716 commands/trigger.c:245 rewrite/rewriteDefine.c:266 -#: rewrite/rewriteDefine.c:914 +#: commands/tablecmds.c:4725 commands/trigger.c:253 +#: rewrite/rewriteDefine.c:266 rewrite/rewriteDefine.c:920 #, c-format msgid "\"%s\" is not a table or view" msgstr "\"%s\" non è una tabella né una vista" -#: commands/tablecmds.c:4719 commands/trigger.c:1255 commands/trigger.c:1361 +#: commands/tablecmds.c:4728 commands/trigger.c:1314 commands/trigger.c:1420 #, c-format msgid "\"%s\" is not a table, view, or foreign table" msgstr "\"%s\" non è una tabella, una vista né una tabella esterna" -#: commands/tablecmds.c:4722 +#: commands/tablecmds.c:4731 #, c-format msgid "\"%s\" is not a table, view, materialized view, or index" msgstr "\"%s\" non è una tabella, una vista, una vista materializzata né un indice" -#: commands/tablecmds.c:4728 +#: commands/tablecmds.c:4737 #, c-format msgid "\"%s\" is not a table, materialized view, or index" msgstr "\"%s\" non è una tabella, una vista materializzata né un indice" -#: commands/tablecmds.c:4731 +#: commands/tablecmds.c:4740 #, c-format msgid "\"%s\" is not a table, materialized view, or foreign table" msgstr "\"%s\" non è una tabella, una vista materializzata né una tabella esterna" -#: commands/tablecmds.c:4734 +#: commands/tablecmds.c:4743 #, c-format msgid "\"%s\" is not a table or foreign table" msgstr "\"%s\" non è una tabella né una tabella esterna" -#: commands/tablecmds.c:4737 +#: commands/tablecmds.c:4746 #, c-format msgid "\"%s\" is not a table, composite type, or foreign table" msgstr "\"%s\" non è una tabella, un tipo composito né una tabella esterna" -#: commands/tablecmds.c:4740 commands/tablecmds.c:6103 +#: commands/tablecmds.c:4749 commands/tablecmds.c:6125 #, c-format msgid "\"%s\" is not a table, materialized view, index, or foreign table" msgstr "\"%s\" non è una tabella, una vista materializzata, un indice né una tabella esterna" -#: commands/tablecmds.c:4750 +#: commands/tablecmds.c:4759 #, c-format msgid "\"%s\" is of the wrong type" msgstr "\"%s\" è del tipo sbagliato" -#: commands/tablecmds.c:4904 commands/tablecmds.c:4911 +#: commands/tablecmds.c:4934 commands/tablecmds.c:4941 #, c-format msgid "cannot alter type \"%s\" because column \"%s.%s\" uses it" msgstr "non è possibile modificare il tipo \"%s\" perché la colonna \"%s.%s\" lo usa" -#: commands/tablecmds.c:4918 +#: commands/tablecmds.c:4948 #, c-format msgid "cannot alter foreign table \"%s\" because column \"%s.%s\" uses its row type" msgstr "non è possibile modificare la tabella esterna \"%s\" perché la colonna \"%s.%s\" usa il suo tipo di riga" -#: commands/tablecmds.c:4925 +#: commands/tablecmds.c:4955 #, c-format msgid "cannot alter table \"%s\" because column \"%s.%s\" uses its row type" msgstr "non è possibile modificare la tabella \"%s\" perché la colonna \"%s.%s\" usa il suo tipo di riga" -#: commands/tablecmds.c:4987 +#: commands/tablecmds.c:5009 #, c-format msgid "cannot alter type \"%s\" because it is the type of a typed table" msgstr "non è possibile modificare il tipo \"%s\" perché è il tipo di una tabella con tipo" -#: commands/tablecmds.c:4989 +#: commands/tablecmds.c:5011 #, c-format msgid "Use ALTER ... CASCADE to alter the typed tables too." msgstr "Usa DROP ... CASCADE per eliminare anche le tabelle con tipo." -#: commands/tablecmds.c:5033 +#: commands/tablecmds.c:5055 #, c-format msgid "type %s is not a composite type" msgstr "il tipo %s non è un tipo composito" -#: commands/tablecmds.c:5059 +#: commands/tablecmds.c:5081 #, c-format msgid "cannot add column to typed table" msgstr "non è possibile aggiungere una colonna ad una tabella con tipo" -#: commands/tablecmds.c:5103 +#: commands/tablecmds.c:5125 #, c-format msgid "cannot add column to a partition" msgstr "non è possibile aggiungere una colonna ad una partizione" -#: commands/tablecmds.c:5132 commands/tablecmds.c:11190 +#: commands/tablecmds.c:5154 commands/tablecmds.c:11226 #, c-format msgid "child table \"%s\" has different type for column \"%s\"" msgstr "la tabella figlia \"%s\" ha tipo diverso per la colonna \"%s\"" -#: commands/tablecmds.c:5138 commands/tablecmds.c:11197 +#: commands/tablecmds.c:5160 commands/tablecmds.c:11233 #, c-format msgid "child table \"%s\" has different collation for column \"%s\"" msgstr "la tabella figlia \"%s\" ha ordinamento diverso per la colonna \"%s\"" -#: commands/tablecmds.c:5148 +#: commands/tablecmds.c:5170 #, c-format msgid "child table \"%s\" has a conflicting \"%s\" column" msgstr "la tabella figlia \"%s\" ha la colonna \"%s\" in conflitto" -#: commands/tablecmds.c:5159 +#: commands/tablecmds.c:5181 #, c-format msgid "merging definition of column \"%s\" for child \"%s\"" msgstr "unione delle definizioni della colonna \"%s\" per la tabella figlia \"%s\"" -#: commands/tablecmds.c:5183 +#: commands/tablecmds.c:5205 #, c-format msgid "cannot recursively add identity column to table that has child tables" msgstr "non è possibile aggiungere ricorsivamente una colonna identità ad una tabella che ha tabelle figlie" -#: commands/tablecmds.c:5395 +#: commands/tablecmds.c:5417 #, c-format msgid "column must be added to child tables too" msgstr "la colonna deve essere aggiunta anche alle tabelle figlie" -#: commands/tablecmds.c:5470 +#: commands/tablecmds.c:5492 #, c-format msgid "column \"%s\" of relation \"%s\" already exists, skipping" msgstr "la colonna \"%s\" della relazione \"%s\" esiste già, saltata" -#: commands/tablecmds.c:5477 +#: commands/tablecmds.c:5499 #, c-format msgid "column \"%s\" of relation \"%s\" already exists" msgstr "la colonna \"%s\" della relazione \"%s\" esiste già" -#: commands/tablecmds.c:5575 commands/tablecmds.c:8554 +#: commands/tablecmds.c:5597 commands/tablecmds.c:8576 #, c-format msgid "cannot remove constraint from only the partitioned table when partitions exist" msgstr "non è possibile rimuovere un vincolo solo da una tabella partizionata se ci sono partizioni esistenti" -#: commands/tablecmds.c:5576 commands/tablecmds.c:5723 -#: commands/tablecmds.c:6520 commands/tablecmds.c:8555 +#: commands/tablecmds.c:5598 commands/tablecmds.c:5745 +#: commands/tablecmds.c:6542 commands/tablecmds.c:8577 #, c-format msgid "Do not specify the ONLY keyword." msgstr "Non specificare la parola chiave ONLY." -#: commands/tablecmds.c:5608 commands/tablecmds.c:5755 -#: commands/tablecmds.c:5810 commands/tablecmds.c:5885 -#: commands/tablecmds.c:5979 commands/tablecmds.c:6038 -#: commands/tablecmds.c:6162 commands/tablecmds.c:6216 -#: commands/tablecmds.c:6308 commands/tablecmds.c:8694 -#: commands/tablecmds.c:9405 +#: commands/tablecmds.c:5630 commands/tablecmds.c:5777 +#: commands/tablecmds.c:5832 commands/tablecmds.c:5907 +#: commands/tablecmds.c:6001 commands/tablecmds.c:6060 +#: commands/tablecmds.c:6184 commands/tablecmds.c:6238 +#: commands/tablecmds.c:6330 commands/tablecmds.c:8716 +#: commands/tablecmds.c:9427 #, c-format msgid "cannot alter system column \"%s\"" msgstr "non è possibile modificare la colonna di sistema \"%s\"" -#: commands/tablecmds.c:5614 commands/tablecmds.c:5816 +#: commands/tablecmds.c:5636 commands/tablecmds.c:5838 #, c-format msgid "column \"%s\" of relation \"%s\" is an identity column" msgstr "la colonna \"%s\" della relazione \"%s\" è una colonna identità" -#: commands/tablecmds.c:5650 +#: commands/tablecmds.c:5672 #, c-format msgid "column \"%s\" is in a primary key" msgstr "la colonna \"%s\" è in una chiave primaria" -#: commands/tablecmds.c:5672 +#: commands/tablecmds.c:5694 #, c-format msgid "column \"%s\" is marked NOT NULL in parent table" msgstr "la colonna \"%s\" è specificata NOT NULL nella tabella padre" -#: commands/tablecmds.c:5722 +#: commands/tablecmds.c:5744 #, c-format msgid "cannot add constraint to only the partitioned table when partitions exist" msgstr "non è possibile aggiungere un vincolo solo alla tabella partizionata se esistono partizioni" -#: commands/tablecmds.c:5818 +#: commands/tablecmds.c:5840 #, c-format msgid "Use ALTER TABLE ... ALTER COLUMN ... DROP IDENTITY instead." msgstr "Usa invece ALTER TABLE ... ALTER COLUMN ... DROP IDENTITY." -#: commands/tablecmds.c:5896 +#: commands/tablecmds.c:5918 #, c-format msgid "column \"%s\" of relation \"%s\" must be declared NOT NULL before identity can be added" msgstr "la colonna \"%s\" della relazione \"%s\" deve essere dichiarata NOT NULL prima che possa essere aggiunta l'identità" -#: commands/tablecmds.c:5902 +#: commands/tablecmds.c:5924 #, c-format msgid "column \"%s\" of relation \"%s\" is already an identity column" msgstr "la colonna \"%s\" della relazione \"%s\" è già una colonna identità" -#: commands/tablecmds.c:5908 +#: commands/tablecmds.c:5930 #, c-format msgid "column \"%s\" of relation \"%s\" already has a default value" msgstr "la colonna \"%s\" della relazione \"%s\" ha già un valore predefinito" -#: commands/tablecmds.c:5985 commands/tablecmds.c:6046 +#: commands/tablecmds.c:6007 commands/tablecmds.c:6068 #, c-format msgid "column \"%s\" of relation \"%s\" is not an identity column" msgstr "la colonna \"%s\" della relazione \"%s\" non è una colonna identità" -#: commands/tablecmds.c:6051 +#: commands/tablecmds.c:6073 #, c-format msgid "column \"%s\" of relation \"%s\" is not an identity column, skipping" msgstr "la colonna \"%s\" della relazione \"%s\" non è una colonna identità, saltata" -#: commands/tablecmds.c:6135 +#: commands/tablecmds.c:6157 #, c-format msgid "statistics target %d is too low" msgstr "il target delle statistiche %d è troppo basso" -#: commands/tablecmds.c:6143 +#: commands/tablecmds.c:6165 #, c-format msgid "lowering statistics target to %d" msgstr "target delle statistiche abbassato a %d" -#: commands/tablecmds.c:6288 +#: commands/tablecmds.c:6310 #, c-format msgid "invalid storage type \"%s\"" msgstr "tipo di immagazzinamento non valido \"%s\"" -#: commands/tablecmds.c:6320 +#: commands/tablecmds.c:6342 #, c-format msgid "column data type %s can only have storage PLAIN" msgstr "il tipo di dato della colonna %s può avere solo immagazzinamento PLAIN" -#: commands/tablecmds.c:6355 +#: commands/tablecmds.c:6377 #, c-format msgid "cannot drop column from typed table" msgstr "non è possibile eliminare la colonna da una tabella con tipo" -#: commands/tablecmds.c:6462 +#: commands/tablecmds.c:6484 #, c-format msgid "column \"%s\" of relation \"%s\" does not exist, skipping" msgstr "la colonna \"%s\" della relazione \"%s\" non esiste, saltato" -#: commands/tablecmds.c:6475 +#: commands/tablecmds.c:6497 #, c-format msgid "cannot drop system column \"%s\"" msgstr "non è possibile eliminare la colonna di sistema \"%s\"" -#: commands/tablecmds.c:6482 +#: commands/tablecmds.c:6504 #, c-format msgid "cannot drop inherited column \"%s\"" msgstr "non è possibile eliminare la colonna ereditata \"%s\"" -#: commands/tablecmds.c:6491 +#: commands/tablecmds.c:6513 #, c-format msgid "cannot drop column named in partition key" msgstr "non è possibile eliminare una colonna nominata come chiave di partizione" -#: commands/tablecmds.c:6495 +#: commands/tablecmds.c:6517 #, c-format msgid "cannot drop column referenced in partition key expression" msgstr "non è possibile eliminare una colonna referenziata in un'espressione di partizione" -#: commands/tablecmds.c:6519 +#: commands/tablecmds.c:6541 #, c-format msgid "cannot drop column from only the partitioned table when partitions exist" msgstr "non è possibile eliminare una colonna solo dalla tabella partizionata se esistono delle partizioni" -#: commands/tablecmds.c:6736 +#: commands/tablecmds.c:6759 #, c-format msgid "ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index \"%s\" to \"%s\"" msgstr "ALTER TABLE / ADD CONSTRAINT USING INDEX rinominerà l'indice \"%s\" in \"%s\"" -#: commands/tablecmds.c:6948 +#: commands/tablecmds.c:6971 #, c-format msgid "constraint must be added to child tables too" msgstr "il vincolo deve essere aggiunto anche alle tabelle figlie" -#: commands/tablecmds.c:7019 +#: commands/tablecmds.c:7042 #, c-format msgid "cannot reference partitioned table \"%s\"" msgstr "non è possibile referenziare la tabella partizionata \"%s\"" -#: commands/tablecmds.c:7025 +#: commands/tablecmds.c:7048 #, c-format msgid "referenced relation \"%s\" is not a table" msgstr "la relazione referenziata \"%s\" non è una tabella" -#: commands/tablecmds.c:7048 +#: commands/tablecmds.c:7071 #, c-format msgid "constraints on permanent tables may reference only permanent tables" msgstr "i vincoli su tabelle permanenti possono referenziare solo tabelle permanenti" -#: commands/tablecmds.c:7055 +#: commands/tablecmds.c:7078 #, c-format msgid "constraints on unlogged tables may reference only permanent or unlogged tables" msgstr "i vincoli su tabelle non loggate possono referenziare solo tabelle permanenti o non loggate" -#: commands/tablecmds.c:7061 +#: commands/tablecmds.c:7084 #, c-format msgid "constraints on temporary tables may reference only temporary tables" msgstr "i vincoli su tabelle temporanee possono referenziare solo tabelle temporanee" -#: commands/tablecmds.c:7065 +#: commands/tablecmds.c:7088 #, c-format msgid "constraints on temporary tables must involve temporary tables of this session" msgstr "i vincoli su tabelle temporanee devono riferirsi a tabelle temporanee di questa sessione" -#: commands/tablecmds.c:7125 +#: commands/tablecmds.c:7148 #, c-format msgid "number of referencing and referenced columns for foreign key disagree" msgstr "i numeri di colonne referenzianti e referenziate per la chiave esterna non combaciano" -#: commands/tablecmds.c:7232 +#: commands/tablecmds.c:7255 #, c-format msgid "foreign key constraint \"%s\" cannot be implemented" msgstr "non è possibile implementare il vincolo di chiave esterna \"%s\"" -#: commands/tablecmds.c:7235 +#: commands/tablecmds.c:7258 #, c-format msgid "Key columns \"%s\" and \"%s\" are of incompatible types: %s and %s." msgstr "Le colonne chiave \"%s\" e \"%s\" hanno tipi incompatibili: %s e %s." -#: commands/tablecmds.c:7441 commands/tablecmds.c:7607 -#: commands/tablecmds.c:8522 commands/tablecmds.c:8590 +#: commands/tablecmds.c:7463 commands/tablecmds.c:7629 +#: commands/tablecmds.c:8544 commands/tablecmds.c:8612 #, c-format msgid "constraint \"%s\" of relation \"%s\" does not exist" msgstr "il vincolo \"%s\" della relazione \"%s\" non esiste" -#: commands/tablecmds.c:7447 +#: commands/tablecmds.c:7469 #, c-format msgid "constraint \"%s\" of relation \"%s\" is not a foreign key constraint" msgstr "il vincolo \"%s\" della relazione \"%s\" non è una chiave esterna" -#: commands/tablecmds.c:7614 +#: commands/tablecmds.c:7636 #, c-format msgid "constraint \"%s\" of relation \"%s\" is not a foreign key or check constraint" msgstr "il vincolo \"%s\" della relazione \"%s\" non è una chiave esterna o un vincolo di controllo" -#: commands/tablecmds.c:7684 +#: commands/tablecmds.c:7706 #, c-format msgid "constraint must be validated on child tables too" msgstr "i vincoli devono essere validati anche sulle tabelle figlie" -#: commands/tablecmds.c:7752 +#: commands/tablecmds.c:7774 #, c-format msgid "column \"%s\" referenced in foreign key constraint does not exist" msgstr "la colonna \"%s\" referenziata dal vincolo di chiave esterna non esiste" -#: commands/tablecmds.c:7757 +#: commands/tablecmds.c:7779 #, c-format msgid "cannot have more than %d keys in a foreign key" msgstr "non possono esserci più di %d chiavi in una chiave esterna" -#: commands/tablecmds.c:7822 +#: commands/tablecmds.c:7844 #, c-format msgid "cannot use a deferrable primary key for referenced table \"%s\"" msgstr "non è possibile usare una chiave primaria deferita per la tabella referenziata \"%s\"" -#: commands/tablecmds.c:7839 +#: commands/tablecmds.c:7861 #, c-format msgid "there is no primary key for referenced table \"%s\"" msgstr "la tabella referenziata \"%s\" non ha una chiave primaria" -#: commands/tablecmds.c:7904 +#: commands/tablecmds.c:7926 #, c-format msgid "foreign key referenced-columns list must not contain duplicates" msgstr "la lista di colonne referenziate dalla chiave esterna non deve contenere duplicati" -#: commands/tablecmds.c:7998 +#: commands/tablecmds.c:8020 #, c-format msgid "cannot use a deferrable unique constraint for referenced table \"%s\"" msgstr "non è possibile usare un vincolo univoco deferito per la tabella referenziata \"%s\"" -#: commands/tablecmds.c:8003 +#: commands/tablecmds.c:8025 #, c-format msgid "there is no unique constraint matching given keys for referenced table \"%s\"" msgstr "non c'è alcun vincolo univoco che corrisponda alle chiavi indicate per la tabella referenziata \"%s\"" -#: commands/tablecmds.c:8174 +#: commands/tablecmds.c:8196 #, c-format msgid "validating foreign key constraint \"%s\"" msgstr "validazione del vincolo di chiave esterna \"%s\"" -#: commands/tablecmds.c:8476 +#: commands/tablecmds.c:8498 #, c-format msgid "cannot drop inherited constraint \"%s\" of relation \"%s\"" msgstr "non è possibile eliminare il vincolo ereditato \"%s\" della relazione \"%s\"" -#: commands/tablecmds.c:8528 +#: commands/tablecmds.c:8550 #, c-format msgid "constraint \"%s\" of relation \"%s\" does not exist, skipping" msgstr "il vincolo \"%s\" della relazione \"%s\" non esiste, saltato" -#: commands/tablecmds.c:8678 +#: commands/tablecmds.c:8700 #, c-format msgid "cannot alter column type of typed table" msgstr "non è possibile modificare il tipo di colonna di una tabella con tipo" -#: commands/tablecmds.c:8701 +#: commands/tablecmds.c:8723 #, c-format msgid "cannot alter inherited column \"%s\"" msgstr "non è possibile modificare la colonna ereditata \"%s\"" -#: commands/tablecmds.c:8710 +#: commands/tablecmds.c:8732 #, c-format msgid "cannot alter type of column named in partition key" msgstr "non è possibile cambiare il tipo di una colonna in una chiave di partizione" -#: commands/tablecmds.c:8714 +#: commands/tablecmds.c:8736 #, c-format msgid "cannot alter type of column referenced in partition key expression" msgstr "non è possibile cambiare il tipo di una colonna referenziata in una espressione di partizione" -#: commands/tablecmds.c:8764 +#: commands/tablecmds.c:8786 #, c-format msgid "result of USING clause for column \"%s\" cannot be cast automatically to type %s" msgstr "il risultato della clausola USING per la colonna \"%s\" non può essere convertito automaticamente al tipo %s" -#: commands/tablecmds.c:8767 +#: commands/tablecmds.c:8789 #, c-format msgid "You might need to add an explicit cast." msgstr "Potresti dover aggiungere una conversione esplicita." -#: commands/tablecmds.c:8771 +#: commands/tablecmds.c:8793 #, c-format msgid "column \"%s\" cannot be cast automatically to type %s" msgstr "la colonna \"%s\" non può essere convertita automaticamente al tipo %s" #. translator: USING is SQL, don't translate it -#: commands/tablecmds.c:8774 +#: commands/tablecmds.c:8796 #, c-format msgid "You might need to specify \"USING %s::%s\"." msgstr "Potresti dover specificare \"USING %s::%s\"." -#: commands/tablecmds.c:8873 +#: commands/tablecmds.c:8895 #, c-format msgid "USING expression contains a whole-row table reference." msgstr "L'espressione USING contiene un riferimento alla riga completa della tabella." -#: commands/tablecmds.c:8884 +#: commands/tablecmds.c:8906 #, c-format msgid "type of inherited column \"%s\" must be changed in child tables too" msgstr "il tipo della colonna ereditata \"%s\" deve essere cambiato anche nelle tabelle figlie" -#: commands/tablecmds.c:8971 +#: commands/tablecmds.c:8993 #, c-format msgid "cannot alter type of column \"%s\" twice" msgstr "non è possibile cambiare il tipo della colonna \"%s\" due volte" -#: commands/tablecmds.c:9007 +#: commands/tablecmds.c:9029 #, c-format msgid "default for column \"%s\" cannot be cast automatically to type %s" msgstr "il valore predefinito della colonna \"%s\" non può essere convertito automaticamente al tipo %s" -#: commands/tablecmds.c:9133 +#: commands/tablecmds.c:9155 #, c-format msgid "cannot alter type of a column used by a view or rule" msgstr "non è possibile cambiare il tipo di una colonna usata in una vista o una regola" -#: commands/tablecmds.c:9134 commands/tablecmds.c:9153 -#: commands/tablecmds.c:9171 +#: commands/tablecmds.c:9156 commands/tablecmds.c:9175 +#: commands/tablecmds.c:9193 #, c-format msgid "%s depends on column \"%s\"" msgstr "%s dipende dalla colonna \"%s\"" -#: commands/tablecmds.c:9152 +#: commands/tablecmds.c:9174 #, c-format msgid "cannot alter type of a column used in a trigger definition" msgstr "non è possibile cambiare il tipo di una colonna usata nella definizione di un trigger" -#: commands/tablecmds.c:9170 +#: commands/tablecmds.c:9192 #, c-format msgid "cannot alter type of a column used in a policy definition" msgstr "non è possibile cambiare il tipo di una colonna usata nella definizione di una regola di sicurezza" -#: commands/tablecmds.c:9845 +#: commands/tablecmds.c:9867 #, c-format msgid "cannot change owner of index \"%s\"" msgstr "non è possibile cambiare il proprietario dell'indice \"%s\"" -#: commands/tablecmds.c:9847 +#: commands/tablecmds.c:9869 #, c-format msgid "Change the ownership of the index's table, instead." msgstr "Cambia il proprietario della tabella dell'indice invece." -#: commands/tablecmds.c:9864 +#: commands/tablecmds.c:9886 #, c-format msgid "cannot change owner of sequence \"%s\"" msgstr "non è possibile cambiare il proprietario della sequenza \"%s\"" -#: commands/tablecmds.c:9878 commands/tablecmds.c:13089 +#: commands/tablecmds.c:9900 commands/tablecmds.c:13129 #, c-format msgid "Use ALTER TYPE instead." msgstr "È possibile usare ALTER TYPE invece." -#: commands/tablecmds.c:9887 +#: commands/tablecmds.c:9909 #, c-format msgid "\"%s\" is not a table, view, sequence, or foreign table" msgstr "\"%s\" non è una tabella, una vista, una sequenza né una tabella esterna" -#: commands/tablecmds.c:10228 +#: commands/tablecmds.c:10250 #, c-format msgid "cannot have multiple SET TABLESPACE subcommands" msgstr "non è possibile avere più di un sottocomando SET TABLESPACE" -#: commands/tablecmds.c:10302 +#: commands/tablecmds.c:10324 #, c-format msgid "\"%s\" is not a table, view, materialized view, index, or TOAST table" msgstr "\"%s\" non è una tabella, una vista, una vista materializzata né una tabella TOAST" -#: commands/tablecmds.c:10335 commands/view.c:504 +#: commands/tablecmds.c:10357 commands/view.c:504 #, c-format msgid "WITH CHECK OPTION is supported only on automatically updatable views" msgstr "WITH CHECK OPTION è supportato solo su viste aggiornabili automaticamente" -#: commands/tablecmds.c:10477 +#: commands/tablecmds.c:10499 #, c-format msgid "cannot move system relation \"%s\"" msgstr "non è possibile spostare la relazione \"%s\"" -#: commands/tablecmds.c:10493 +#: commands/tablecmds.c:10515 #, c-format msgid "cannot move temporary tables of other sessions" msgstr "non è possibile spostare tabelle temporanee di altre sessioni" -#: commands/tablecmds.c:10629 +#: commands/tablecmds.c:10651 #, c-format msgid "only tables, indexes, and materialized views exist in tablespaces" msgstr "solo tabelle, indici e viste materializzate esistono nei tablespace" -#: commands/tablecmds.c:10641 +#: commands/tablecmds.c:10663 #, c-format msgid "cannot move relations in to or out of pg_global tablespace" msgstr "non è possibile spostare relazioni dentro o fuori il tablespace pg_global" -#: commands/tablecmds.c:10733 +#: commands/tablecmds.c:10755 #, c-format msgid "aborting because lock on relation \"%s.%s\" is not available" msgstr "interruzione perché non c'è un lock disponibile sulla relazione \"%s.%s\"" -#: commands/tablecmds.c:10749 +#: commands/tablecmds.c:10771 #, c-format msgid "no matching relations in tablespace \"%s\" found" msgstr "nessuna relazione corrispondente trovata nel tablespace \"%s\"" -#: commands/tablecmds.c:10823 storage/buffer/bufmgr.c:915 +#: commands/tablecmds.c:10845 storage/buffer/bufmgr.c:915 #, c-format msgid "invalid page in block %u of relation %s" msgstr "pagina non valida nel blocco %u della relazione %s" -#: commands/tablecmds.c:10905 +#: commands/tablecmds.c:10927 #, c-format msgid "cannot change inheritance of typed table" msgstr "non è possibile cambiare ereditarietà di tabelle con tipo" -#: commands/tablecmds.c:10910 commands/tablecmds.c:11438 +#: commands/tablecmds.c:10932 commands/tablecmds.c:11474 #, c-format msgid "cannot change inheritance of a partition" msgstr "non è possibile cambiare ereditarietà di una partizione" -#: commands/tablecmds.c:10915 +#: commands/tablecmds.c:10937 #, c-format msgid "cannot change inheritance of partitioned table" msgstr "non è possibile cambiare ereditarietà di una tabella partizionata" -#: commands/tablecmds.c:10960 +#: commands/tablecmds.c:10983 #, c-format msgid "cannot inherit to temporary relation of another session" msgstr "non è possibile ereditare tabelle temporanee di un'altra sessione" -#: commands/tablecmds.c:10973 +#: commands/tablecmds.c:10996 #, c-format msgid "cannot inherit from a partition" msgstr "non è possibile ereditare da una partizione" -#: commands/tablecmds.c:10995 commands/tablecmds.c:13441 +#: commands/tablecmds.c:11018 commands/tablecmds.c:13523 #, c-format msgid "circular inheritance not allowed" msgstr "l'ereditarietà circolare non è consentita" -#: commands/tablecmds.c:10996 commands/tablecmds.c:13442 +#: commands/tablecmds.c:11019 commands/tablecmds.c:13524 #, c-format msgid "\"%s\" is already a child of \"%s\"." msgstr "\"%s\" è già figlia di \"%s\"." -#: commands/tablecmds.c:11004 +#: commands/tablecmds.c:11027 #, c-format msgid "table \"%s\" without OIDs cannot inherit from table \"%s\" with OIDs" msgstr "la tabella \"%s\" senza OID non può ereditare dalla tabella \"%s\" con OID" -#: commands/tablecmds.c:11208 +#: commands/tablecmds.c:11040 +#, c-format +msgid "trigger \"%s\" prevents table \"%s\" from becoming an inheritance child" +msgstr "il trigger \"%s\" impedisce alla tabella \"%s\" di diventare figlia di ereditarietà" + +#: commands/tablecmds.c:11042 +#, c-format +msgid "ROW triggers with transition tables are not supported in inheritance hierarchies" +msgstr "i trigger ROW con tabelle di transizioni non sono supportati nelle gerarchie ereditarie" + +#: commands/tablecmds.c:11244 #, c-format msgid "column \"%s\" in child table must be marked NOT NULL" msgstr "la colonna \"%s\" nella tabella figlia dev'essere marcata NOT NULL" -#: commands/tablecmds.c:11235 commands/tablecmds.c:11274 +#: commands/tablecmds.c:11271 commands/tablecmds.c:11310 #, c-format msgid "child table is missing column \"%s\"" msgstr "la tabella figlia non ha la colonna \"%s\"" -#: commands/tablecmds.c:11362 +#: commands/tablecmds.c:11398 #, c-format msgid "child table \"%s\" has different definition for check constraint \"%s\"" msgstr "la tabella figlia \"%s\" ha una definizione diversa del vincolo di controllo \"%s\"" -#: commands/tablecmds.c:11370 +#: commands/tablecmds.c:11406 #, c-format msgid "constraint \"%s\" conflicts with non-inherited constraint on child table \"%s\"" msgstr "il vincolo \"%s\" è in conflitto con un vincolo non ereditato nella tabella figlia \"%s\"" -#: commands/tablecmds.c:11381 +#: commands/tablecmds.c:11417 #, c-format msgid "constraint \"%s\" conflicts with NOT VALID constraint on child table \"%s\"" msgstr "il vincolo \"%s\" è in conflitto con un vincolo non valido nella tabella figlia \"%s\"" -#: commands/tablecmds.c:11416 +#: commands/tablecmds.c:11452 #, c-format msgid "child table is missing constraint \"%s\"" msgstr "la tabella figlia non ha il vincolo \"%s\"" -#: commands/tablecmds.c:11532 +#: commands/tablecmds.c:11568 #, c-format msgid "relation \"%s\" is not a partition of relation \"%s\"" msgstr "la relazione \"%s\" non è una partizione della relazione \"%s\"" -#: commands/tablecmds.c:11538 +#: commands/tablecmds.c:11574 #, c-format msgid "relation \"%s\" is not a parent of relation \"%s\"" msgstr "la relazione \"%s\" non è genitore della relazione \"%s\"" -#: commands/tablecmds.c:11762 +#: commands/tablecmds.c:11800 #, c-format msgid "typed tables cannot inherit" msgstr "le tabelle con tipo non possono essere ereditate" -#: commands/tablecmds.c:11793 +#: commands/tablecmds.c:11831 #, c-format msgid "table is missing column \"%s\"" msgstr "la tabella non ha la colonna \"%s\"" -#: commands/tablecmds.c:11803 +#: commands/tablecmds.c:11841 #, c-format msgid "table has column \"%s\" where type requires \"%s\"" msgstr "la tabella ha la colonna \"%s\" laddove il tipo richiede \"%s\"" -#: commands/tablecmds.c:11812 +#: commands/tablecmds.c:11850 #, c-format msgid "table \"%s\" has different type for column \"%s\"" msgstr "la tabella \"%s\" ha tipo diverso per la colonna \"%s\"" -#: commands/tablecmds.c:11825 +#: commands/tablecmds.c:11863 #, c-format msgid "table has extra column \"%s\"" msgstr "la tabella ha la colonna \"%s\" in eccesso" -#: commands/tablecmds.c:11876 +#: commands/tablecmds.c:11915 #, c-format msgid "\"%s\" is not a typed table" msgstr "\"%s\" non è una tabella con tipo" -#: commands/tablecmds.c:12057 +#: commands/tablecmds.c:12097 #, c-format msgid "cannot use non-unique index \"%s\" as replica identity" msgstr "non è possibile usare l'indice non univoco \"%s\" come identità di replica" -#: commands/tablecmds.c:12063 +#: commands/tablecmds.c:12103 #, c-format msgid "cannot use non-immediate index \"%s\" as replica identity" msgstr "non è possibile usare l'indice non immediato \"%s\" come identità di replica" -#: commands/tablecmds.c:12069 +#: commands/tablecmds.c:12109 #, c-format msgid "cannot use expression index \"%s\" as replica identity" msgstr "non è possibile usare l'indice su espressione \"%s\" come identità di replica" -#: commands/tablecmds.c:12075 +#: commands/tablecmds.c:12115 #, c-format msgid "cannot use partial index \"%s\" as replica identity" msgstr "non è possibile usare l'indice parziale \"%s\" come identità di replica" -#: commands/tablecmds.c:12081 +#: commands/tablecmds.c:12121 #, c-format msgid "cannot use invalid index \"%s\" as replica identity" msgstr "non è possibile usare l'indice non valido \"%s\" come identità di replica" -#: commands/tablecmds.c:12102 +#: commands/tablecmds.c:12142 #, c-format msgid "index \"%s\" cannot be used as replica identity because column %d is a system column" msgstr "l'indice \"%s\" non può essere usato come identità di replica perché la colonna %d è una colonna di sistema" -#: commands/tablecmds.c:12109 +#: commands/tablecmds.c:12149 #, c-format msgid "index \"%s\" cannot be used as replica identity because column \"%s\" is nullable" msgstr "l'indice \"%s\" non può essere usato come identità di replica perché la colonna \"%s\" può essere NULL" -#: commands/tablecmds.c:12302 +#: commands/tablecmds.c:12342 #, c-format msgid "cannot change logged status of table \"%s\" because it is temporary" msgstr "non è possibile cambiare lo stato di log della tabella \"%s\" perché è temporanea" -#: commands/tablecmds.c:12326 +#: commands/tablecmds.c:12366 #, c-format msgid "cannot change table \"%s\" to unlogged because it is part of a publication" msgstr "non è possibile rendere la tabella \"%s\" non loggata perché è parte di una pubblicazione" -#: commands/tablecmds.c:12328 +#: commands/tablecmds.c:12368 #, c-format msgid "Unlogged relations cannot be replicated." msgstr "Le tabelle non loggate non possono essere replicate." -#: commands/tablecmds.c:12373 +#: commands/tablecmds.c:12413 #, c-format msgid "could not change table \"%s\" to logged because it references unlogged table \"%s\"" msgstr "non è possibile cambiare lo stato della tabella \"%s\" a loggata perché referenzia la tabella non loggata \"%s\"" -#: commands/tablecmds.c:12383 +#: commands/tablecmds.c:12423 #, c-format msgid "could not change table \"%s\" to unlogged because it references logged table \"%s\"" msgstr "non è possibile cambiare lo stato della tabella \"%s\" a non loggata perché referenzia la tabella loggata \"%s\"" -#: commands/tablecmds.c:12441 +#: commands/tablecmds.c:12481 #, c-format msgid "cannot move an owned sequence into another schema" msgstr "non è possibile spostare una sequenza con proprietario in uno schema diverso" -#: commands/tablecmds.c:12547 +#: commands/tablecmds.c:12587 #, c-format msgid "relation \"%s\" already exists in schema \"%s\"" msgstr "la relazione \"%s\" esiste già nello schema \"%s\"" -#: commands/tablecmds.c:13073 +#: commands/tablecmds.c:13113 #, c-format msgid "\"%s\" is not a composite type" msgstr "\"%s\" non è un tipo composito" -#: commands/tablecmds.c:13104 +#: commands/tablecmds.c:13144 #, c-format msgid "\"%s\" is not a table, view, materialized view, sequence, or foreign table" msgstr "\"%s\" non è una tabella, una vista, una vista materializzata, una sequenza né una tabella esterna" -#: commands/tablecmds.c:13135 +#: commands/tablecmds.c:13177 #, c-format msgid "unrecognized partitioning strategy \"%s\"" msgstr "strategia di partizionamento \"%s\" sconosciuta" -#: commands/tablecmds.c:13161 +#: commands/tablecmds.c:13185 +#, c-format +msgid "cannot use \"list\" partition strategy with more than one column" +msgstr "non è possibile usare la strategia di partizionamento \"list\" con più di una colonna" + +#: commands/tablecmds.c:13210 #, c-format msgid "column \"%s\" appears more than once in partition key" msgstr "la colonna \"%s\" appare più di una volta nella chiave di partizione" -#: commands/tablecmds.c:13209 +#: commands/tablecmds.c:13263 #, c-format msgid "column \"%s\" named in partition key does not exist" msgstr "la colonna \"%s\" nominata nella chiave di partizione non esiste" -#: commands/tablecmds.c:13216 +#: commands/tablecmds.c:13270 #, c-format msgid "cannot use system column \"%s\" in partition key" msgstr "non è possibile usare la colonna di sistema \"%s\" nella chiave di partizione" -#: commands/tablecmds.c:13274 +#: commands/tablecmds.c:13333 #, c-format msgid "functions in partition key expression must be marked IMMUTABLE" msgstr "le funzioni nelle espressioni di partizione devono essere IMMUTABLE" -#: commands/tablecmds.c:13283 -#, c-format -msgid "cannot use constant expression as partition key" -msgstr "non è possibile usare un'espressione costante come chiave di partizione" - -#: commands/tablecmds.c:13297 +#: commands/tablecmds.c:13350 #, c-format msgid "partition key expressions cannot contain whole-row references" msgstr "l'espressione di partizione non può contenere riferimenti alla riga intera" -#: commands/tablecmds.c:13318 +#: commands/tablecmds.c:13357 +#, c-format +msgid "partition key expressions cannot contain system column references" +msgstr "l'espressione di partizione non può contenere riferimenti a colonne di sistema" + +#: commands/tablecmds.c:13367 +#, c-format +msgid "cannot use constant expression as partition key" +msgstr "non è possibile usare un'espressione costante come chiave di partizione" + +#: commands/tablecmds.c:13388 #, c-format msgid "could not determine which collation to use for partition expression" msgstr "non è possibile determinare quale ordinamento usare per l'espressione di partizione" -#: commands/tablecmds.c:13343 +#: commands/tablecmds.c:13413 #, c-format msgid "data type %s has no default btree operator class" msgstr "il tipo di dati %s non ha una classe di operatori btree predefinita" -#: commands/tablecmds.c:13345 +#: commands/tablecmds.c:13415 #, c-format msgid "You must specify a btree operator class or define a default btree operator class for the data type." msgstr "Devi specificare una classe di operatori btree o definire una classe di operatori btree predefinita per il tipo di dati." -#: commands/tablecmds.c:13392 +#: commands/tablecmds.c:13463 #, c-format msgid "\"%s\" is already a partition" msgstr "\"%s\" è già una partizione" -#: commands/tablecmds.c:13398 +#: commands/tablecmds.c:13469 #, c-format msgid "cannot attach a typed table as partition" msgstr "non è possibile agganciare una tabella con tipo come partizione" -#: commands/tablecmds.c:13414 +#: commands/tablecmds.c:13485 #, c-format msgid "cannot attach inheritance child as partition" msgstr "non è possibile agganciare una tabella figlia di ereditarietà come partizione" -#: commands/tablecmds.c:13428 +#: commands/tablecmds.c:13499 #, c-format msgid "cannot attach inheritance parent as partition" msgstr "non è possibile agganciare una tabella padre di ereditarietà come partizione" -#: commands/tablecmds.c:13451 +#: commands/tablecmds.c:13533 #, c-format msgid "cannot attach a permanent relation as partition of temporary relation \"%s\"" msgstr "non è possibile agganciare una relazione permanente come partizione della relazione temporanea \"%s\"" -#: commands/tablecmds.c:13459 +#: commands/tablecmds.c:13541 #, c-format msgid "cannot attach as partition of temporary relation of another session" msgstr "non è possibile agganciare una partizione di relazione temporanea di un'altra sessione" -#: commands/tablecmds.c:13466 +#: commands/tablecmds.c:13548 #, c-format msgid "cannot attach temporary relation of another session as partition" msgstr "non è possibile agganciare una relazione temporanea di un'altra sessione come partizione" -#: commands/tablecmds.c:13472 +#: commands/tablecmds.c:13554 #, c-format msgid "cannot attach table \"%s\" without OIDs as partition of table \"%s\" with OIDs" msgstr "non è possibile agganciare la tabella \"%s\" senza OID come partizione della tabella \"%s\" con OID" -#: commands/tablecmds.c:13480 +#: commands/tablecmds.c:13562 #, c-format msgid "cannot attach table \"%s\" with OIDs as partition of table \"%s\" without OIDs" msgstr "non è possibile agganciare la tabella \"%s\" con OID come partizione della tabella \"%s\" senza OID" -#: commands/tablecmds.c:13502 +#: commands/tablecmds.c:13584 #, c-format msgid "table \"%s\" contains column \"%s\" not found in parent \"%s\"" msgstr "la tabella \"%s\" contiene la colonna \"%s\" che non è presente nel padre \"%s\"" -#: commands/tablecmds.c:13505 +#: commands/tablecmds.c:13587 #, c-format -msgid "New partition should contain only the columns present in parent." -msgstr "La partizione dovrebbe contenere solo le colonne presenti nella tabella padre." +msgid "The new partition may contain only the columns present in parent." +msgstr "La partizione può contenere solo le colonne presenti nella tabella padre." -#: commands/tablecmds.c:13677 +#: commands/tablecmds.c:13599 +#, c-format +msgid "trigger \"%s\" prevents table \"%s\" from becoming a partition" +msgstr "il trigger \"%s\" impedisce alla tabella \"%s\" di diventare una partizione" + +#: commands/tablecmds.c:13601 commands/trigger.c:393 +#, c-format +msgid "ROW triggers with transition tables are not supported on partitions" +msgstr "i trigger ROW con tabelle di transizioni non sono supportati sulle partizioni" + +#: commands/tablecmds.c:13726 #, c-format msgid "partition constraint for table \"%s\" is implied by existing constraints" msgstr "il vincolo di partizione per la tabella \"%s\" è implicito dai vincoli esistenti" #: commands/tablespace.c:162 commands/tablespace.c:179 #: commands/tablespace.c:190 commands/tablespace.c:198 -#: commands/tablespace.c:623 replication/slot.c:1119 storage/file/copydir.c:47 +#: commands/tablespace.c:623 replication/slot.c:1178 storage/file/copydir.c:47 #, c-format msgid "could not create directory \"%s\": %m" msgstr "creazione della directory \"%s\" fallita: %m" @@ -9553,230 +9425,241 @@ msgstr "rimozioni delle directory per il tablespace %u fallita" msgid "You can remove the directories manually if necessary." msgstr "Puoi rimuovere le directory manualmente se necessario." -#: commands/trigger.c:187 +#: commands/trigger.c:190 #, c-format msgid "\"%s\" is a table" msgstr "\"%s\" non è una tabella" -#: commands/trigger.c:189 +#: commands/trigger.c:192 #, c-format msgid "Tables cannot have INSTEAD OF triggers." msgstr "Le tabelle non possono avere trigger INSTEAD OF." -#: commands/trigger.c:196 +#: commands/trigger.c:199 #, c-format msgid "Partitioned tables cannot have ROW triggers." msgstr "Le tabelle partizionate non possono avere trigger ROW." -#: commands/trigger.c:207 commands/trigger.c:214 commands/trigger.c:374 +#: commands/trigger.c:210 commands/trigger.c:217 commands/trigger.c:375 #, c-format msgid "\"%s\" is a view" msgstr "\"%s\" è una vista" -#: commands/trigger.c:209 +#: commands/trigger.c:212 #, c-format msgid "Views cannot have row-level BEFORE or AFTER triggers." msgstr "Le viste non possono avere trigger di riga BEFORE o AFTER." -#: commands/trigger.c:216 +#: commands/trigger.c:219 #, c-format msgid "Views cannot have TRUNCATE triggers." msgstr "Le viste non possono avere trigger TRUNCATE." -#: commands/trigger.c:224 commands/trigger.c:231 commands/trigger.c:238 -#: commands/trigger.c:367 +#: commands/trigger.c:227 commands/trigger.c:234 commands/trigger.c:246 +#: commands/trigger.c:368 #, c-format msgid "\"%s\" is a foreign table" msgstr "\"%s\" è una tabella esterna" -#: commands/trigger.c:226 +#: commands/trigger.c:229 #, c-format msgid "Foreign tables cannot have INSTEAD OF triggers." msgstr "Le tabelle esterne non possono avere trigger INSTEAD OF." -#: commands/trigger.c:233 +#: commands/trigger.c:236 #, c-format msgid "Foreign tables cannot have TRUNCATE triggers." msgstr "Le tabelle esterne non possono avere trigger TRUNCATE." -#: commands/trigger.c:240 +#: commands/trigger.c:248 #, c-format msgid "Foreign tables cannot have constraint triggers." msgstr "Le tabelle esterne non possono avere trigger di vincolo." -#: commands/trigger.c:303 +#: commands/trigger.c:311 #, c-format msgid "TRUNCATE FOR EACH ROW triggers are not supported" msgstr "i trigger TRUNCATE FOR EACH ROW non sono supportati" -#: commands/trigger.c:311 +#: commands/trigger.c:319 #, c-format msgid "INSTEAD OF triggers must be FOR EACH ROW" msgstr "i trigger INSTEAD OF devono essere FOR EACH ROW" -#: commands/trigger.c:315 +#: commands/trigger.c:323 #, c-format msgid "INSTEAD OF triggers cannot have WHEN conditions" msgstr "i trigger INSTEAD OF non possono avere condizioni WHEN" -#: commands/trigger.c:319 +#: commands/trigger.c:327 #, c-format msgid "INSTEAD OF triggers cannot have column lists" msgstr "i trigger INSTEAD OF non possono avere liste di colonne" -#: commands/trigger.c:348 +#: commands/trigger.c:356 #, c-format msgid "ROW variable naming in the REFERENCING clause is not supported" msgstr "non è possibile nominare la variabile ROW nella clausola REFERENCING" -#: commands/trigger.c:349 +#: commands/trigger.c:357 #, c-format msgid "Use OLD TABLE or NEW TABLE for naming transition tables." msgstr "Usa OLD TABLE o NEW TABLE per nominare le tabelle di transizione." -#: commands/trigger.c:362 -#, c-format -msgid "Triggers on partitioned tables cannot have transition tables." -msgstr "I trigger sulle tabelle partizionate non possono avere tabelle di transizione." - -#: commands/trigger.c:369 +#: commands/trigger.c:370 #, c-format msgid "Triggers on foreign tables cannot have transition tables." msgstr "I trigger sulle tabelle esterne non possono avere tabelle di transizione." -#: commands/trigger.c:376 +#: commands/trigger.c:377 #, c-format msgid "Triggers on views cannot have transition tables." msgstr "I trigger sulle viste non possono avere tabelle di transizione." -#: commands/trigger.c:381 +#: commands/trigger.c:397 +#, c-format +msgid "ROW triggers with transition tables are not supported on inheritance children" +msgstr "i trigger ROW con tabelle di transizioni non sono supportati nei figli eredirari" + +#: commands/trigger.c:403 #, c-format msgid "transition table name can only be specified for an AFTER trigger" msgstr "il nome di una tabella di transizione può essere specificato solo per i trigger AFTER" -#: commands/trigger.c:386 +#: commands/trigger.c:408 #, c-format msgid "TRUNCATE triggers with transition tables are not supported" msgstr "trigger TRUNCATE con tabelle di transizione non sono supportati" -#: commands/trigger.c:394 +#: commands/trigger.c:425 +#, c-format +msgid "transition tables cannot be specified for triggers with more than one event" +msgstr "non si può specificare una tabella di transizione per trigger con più di un evento" + +#: commands/trigger.c:436 +#, c-format +msgid "transition tables cannot be specified for triggers with column lists" +msgstr "non si può specificare una tabella di transizione per trigger con una lista di colonne" + +#: commands/trigger.c:453 #, c-format msgid "NEW TABLE can only be specified for an INSERT or UPDATE trigger" msgstr "NEW TABLE può essere specificato solo per i trigger INSERT o UPDATE" -#: commands/trigger.c:399 +#: commands/trigger.c:458 #, c-format msgid "NEW TABLE cannot be specified multiple times" msgstr "NEW TABLE non può essere specificato più volte" -#: commands/trigger.c:409 +#: commands/trigger.c:468 #, c-format msgid "OLD TABLE can only be specified for a DELETE or UPDATE trigger" msgstr "OLD TABLE può essere specificato solo per i trigger DELETE o UPDATE" -#: commands/trigger.c:414 +#: commands/trigger.c:473 #, c-format msgid "OLD TABLE cannot be specified multiple times" msgstr "OLD TABLE non può essere specificato più volte" -#: commands/trigger.c:424 +#: commands/trigger.c:483 #, c-format msgid "OLD TABLE name and NEW TABLE name cannot be the same" msgstr "OLD TABLE e NEW TABLE non possono avere lo stesso nome" -#: commands/trigger.c:481 commands/trigger.c:494 +#: commands/trigger.c:540 commands/trigger.c:553 #, c-format msgid "statement trigger's WHEN condition cannot reference column values" msgstr "la condizione WHEN del trigger di istruzione non può riferirsi a valori di colonna" -#: commands/trigger.c:486 +#: commands/trigger.c:545 #, c-format msgid "INSERT trigger's WHEN condition cannot reference OLD values" msgstr "la condizione WHEN dei trigger INSERT non può usare OLD" -#: commands/trigger.c:499 +#: commands/trigger.c:558 #, c-format msgid "DELETE trigger's WHEN condition cannot reference NEW values" msgstr "la condizione WHEN del trigger DELETE non può usare NEW" -#: commands/trigger.c:504 +#: commands/trigger.c:563 #, c-format msgid "BEFORE trigger's WHEN condition cannot reference NEW system columns" msgstr "la condizione WHEN del trigger BEFORE non può usare le colonne di sistema NEW" -#: commands/trigger.c:669 commands/trigger.c:1440 +#: commands/trigger.c:728 commands/trigger.c:1499 #, c-format msgid "trigger \"%s\" for relation \"%s\" already exists" msgstr "il trigger \"%s\" per la relazione \"%s\" esiste già" -#: commands/trigger.c:965 +#: commands/trigger.c:1024 msgid "Found referenced table's UPDATE trigger." msgstr "Trovato trigger UPDATE della tabella referenziata." -#: commands/trigger.c:966 +#: commands/trigger.c:1025 msgid "Found referenced table's DELETE trigger." msgstr "Trovato trigger DELETE della tabella referenziata." -#: commands/trigger.c:967 +#: commands/trigger.c:1026 msgid "Found referencing table's trigger." msgstr "Trovato trigger della tabella referenziante." -#: commands/trigger.c:1076 commands/trigger.c:1092 +#: commands/trigger.c:1135 commands/trigger.c:1151 #, c-format msgid "ignoring incomplete trigger group for constraint \"%s\" %s" msgstr "ignorato gruppo di trigger incompleto per il vincolo \"%s\" %s" -#: commands/trigger.c:1105 +#: commands/trigger.c:1164 #, c-format msgid "converting trigger group into constraint \"%s\" %s" msgstr "conversione del gruppo di trigger nel vincolo \"%s\" %s" -#: commands/trigger.c:1326 commands/trigger.c:1485 commands/trigger.c:1600 +#: commands/trigger.c:1385 commands/trigger.c:1544 commands/trigger.c:1659 #, c-format msgid "trigger \"%s\" for table \"%s\" does not exist" msgstr "il trigger \"%s\" per la tabella \"%s\" non esiste" -#: commands/trigger.c:1568 +#: commands/trigger.c:1627 #, c-format msgid "permission denied: \"%s\" is a system trigger" msgstr "permesso negato: \"%s\" è un trigger di sistema" -#: commands/trigger.c:2123 +#: commands/trigger.c:2206 #, c-format msgid "trigger function %u returned null value" msgstr "la funzione trigger %u ha restituito un valore null" -#: commands/trigger.c:2184 commands/trigger.c:2390 commands/trigger.c:2601 -#: commands/trigger.c:2880 +#: commands/trigger.c:2272 commands/trigger.c:2487 commands/trigger.c:2706 +#: commands/trigger.c:2991 #, c-format msgid "BEFORE STATEMENT trigger cannot return a value" msgstr "il trigger BEFORE STATEMENT non può restituire un valore" -#: commands/trigger.c:2942 executor/nodeModifyTable.c:746 -#: executor/nodeModifyTable.c:1041 +#: commands/trigger.c:3053 executor/nodeModifyTable.c:798 +#: executor/nodeModifyTable.c:1095 #, c-format msgid "tuple to be updated was already modified by an operation triggered by the current command" msgstr "la tupla da aggiornare era stata già modificata da un'operazione fatta eseguire da un comando corrente" -#: commands/trigger.c:2943 executor/nodeModifyTable.c:747 -#: executor/nodeModifyTable.c:1042 +#: commands/trigger.c:3054 executor/nodeModifyTable.c:799 +#: executor/nodeModifyTable.c:1096 #, c-format msgid "Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows." msgstr "Considera l'utilizzo di un trigger AFTER invece di un trigger BEFORE per propagare i cambiamenti ad altre righe." -#: commands/trigger.c:2957 executor/execMain.c:2653 executor/nodeLockRows.c:216 -#: executor/nodeModifyTable.c:214 executor/nodeModifyTable.c:759 -#: executor/nodeModifyTable.c:1054 executor/nodeModifyTable.c:1220 +#: commands/trigger.c:3068 executor/execMain.c:2696 +#: executor/nodeLockRows.c:220 executor/nodeModifyTable.c:214 +#: executor/nodeModifyTable.c:811 executor/nodeModifyTable.c:1108 +#: executor/nodeModifyTable.c:1277 #, c-format msgid "could not serialize access due to concurrent update" msgstr "serializzazione dell'accesso fallita a causa di modifiche concorrenti" -#: commands/trigger.c:4852 +#: commands/trigger.c:5200 #, c-format msgid "constraint \"%s\" is not deferrable" msgstr "il vincolo \"%s\" non è deferibile" -#: commands/trigger.c:4875 +#: commands/trigger.c:5223 #, c-format msgid "constraint \"%s\" does not exist" msgstr "il vincolo \"%s\" non esiste" @@ -9881,7 +9764,7 @@ msgstr "formato di lista di parametri non valido: \"%s\"" msgid "must be superuser to create a base type" msgstr "solo un superutente può creare un tipo di base" -#: commands/typecmds.c:290 commands/typecmds.c:1414 +#: commands/typecmds.c:290 commands/typecmds.c:1435 #, c-format msgid "type attribute \"%s\" not recognized" msgstr "attributo del tipo \"%s\" non riconosciuto" @@ -9991,502 +9874,513 @@ msgstr "vincoli NULL/NOT NULL in conflitto" msgid "check constraints for domains cannot be marked NO INHERIT" msgstr "i vincoli di controllo per i domini non possono essere NO INHERIT" -#: commands/typecmds.c:993 commands/typecmds.c:2512 +#: commands/typecmds.c:993 commands/typecmds.c:2533 #, c-format msgid "unique constraints not possible for domains" msgstr "i vincoli univoci non sono ammessi per i domini" -#: commands/typecmds.c:999 commands/typecmds.c:2518 +#: commands/typecmds.c:999 commands/typecmds.c:2539 #, c-format msgid "primary key constraints not possible for domains" msgstr "i vincoli di chiave primaria non sono ammessi per i domini" -#: commands/typecmds.c:1005 commands/typecmds.c:2524 +#: commands/typecmds.c:1005 commands/typecmds.c:2545 #, c-format msgid "exclusion constraints not possible for domains" msgstr "i vincoli di esclusione non sono ammessi per i domini" -#: commands/typecmds.c:1011 commands/typecmds.c:2530 +#: commands/typecmds.c:1011 commands/typecmds.c:2551 #, c-format msgid "foreign key constraints not possible for domains" msgstr "i vincoli di chiave esterna non sono ammessi per i domini" -#: commands/typecmds.c:1020 commands/typecmds.c:2539 +#: commands/typecmds.c:1020 commands/typecmds.c:2560 #, c-format msgid "specifying constraint deferrability not supported for domains" msgstr "specificare la deferibilità dei vincoli non è ammesso per i domini" -#: commands/typecmds.c:1284 utils/cache/typcache.c:1648 +#: commands/typecmds.c:1305 utils/cache/typcache.c:1698 #, c-format msgid "%s is not an enum" msgstr "%s non è una enumerazione" -#: commands/typecmds.c:1422 +#: commands/typecmds.c:1443 #, c-format msgid "type attribute \"subtype\" is required" msgstr "l'attributo \"subtype\" del tipo è richiesto" -#: commands/typecmds.c:1427 +#: commands/typecmds.c:1448 #, c-format msgid "range subtype cannot be %s" msgstr "il sottotipo dell'intervallo non può essere %s" -#: commands/typecmds.c:1446 +#: commands/typecmds.c:1467 #, c-format msgid "range collation specified but subtype does not support collation" msgstr "è stato specificato un ordinamento per gli intervalli ma il sottotipo non supporta ordinamenti" -#: commands/typecmds.c:1680 +#: commands/typecmds.c:1701 #, c-format msgid "changing argument type of function %s from \"opaque\" to \"cstring\"" msgstr "modifica del tipo di argomento della funzione %s da \"opaque\" a \"cstring\"" -#: commands/typecmds.c:1731 +#: commands/typecmds.c:1752 #, c-format msgid "changing argument type of function %s from \"opaque\" to %s" msgstr "modifica del tipo di argomento della funzione %s da \"opaque\" a %s" -#: commands/typecmds.c:1830 +#: commands/typecmds.c:1851 #, c-format msgid "typmod_in function %s must return type %s" msgstr "la funzione %s typmod_in deve restituire il tipo %s" -#: commands/typecmds.c:1857 +#: commands/typecmds.c:1878 #, c-format msgid "typmod_out function %s must return type %s" msgstr "la funzione %s typmod_out deve restituire il tipo %s" -#: commands/typecmds.c:1884 +#: commands/typecmds.c:1905 #, c-format msgid "type analyze function %s must return type %s" msgstr "la funzione %s analyze deve restituire il tipo %s" -#: commands/typecmds.c:1930 +#: commands/typecmds.c:1951 #, c-format msgid "You must specify an operator class for the range type or define a default operator class for the subtype." msgstr "Occorre specificare una classe di operatori per l'intervallo o definire una classe di operatori predefinita per il sottotipo." -#: commands/typecmds.c:1961 +#: commands/typecmds.c:1982 #, c-format msgid "range canonical function %s must return range type" msgstr "la funzione canonica %s dell'intervallo deve restituire un intervallo" -#: commands/typecmds.c:1967 +#: commands/typecmds.c:1988 #, c-format msgid "range canonical function %s must be immutable" msgstr "la funzione canonica %s dell'intervallo deve essere immutabile" -#: commands/typecmds.c:2003 +#: commands/typecmds.c:2024 #, c-format msgid "range subtype diff function %s must return type %s" msgstr "la funzione %s di differenza sottotipo range deve restituire il tipo %s" -#: commands/typecmds.c:2010 +#: commands/typecmds.c:2031 #, c-format msgid "range subtype diff function %s must be immutable" msgstr "la funzione di differenza sottotipo %s deve essere immutabile" -#: commands/typecmds.c:2037 +#: commands/typecmds.c:2058 #, c-format msgid "pg_type array OID value not set when in binary upgrade mode" msgstr "valore di OID array di pg_type non impostato in modalità di aggiornamento binaria" -#: commands/typecmds.c:2340 +#: commands/typecmds.c:2361 #, c-format msgid "column \"%s\" of table \"%s\" contains null values" msgstr "la colonna \"%s\" della tabella \"%s\" contiene valori null" -#: commands/typecmds.c:2453 commands/typecmds.c:2636 +#: commands/typecmds.c:2474 commands/typecmds.c:2657 #, c-format msgid "constraint \"%s\" of domain \"%s\" does not exist" msgstr "il vincolo \"%s\" del dominio \"%s\" non esiste" -#: commands/typecmds.c:2457 +#: commands/typecmds.c:2478 #, c-format msgid "constraint \"%s\" of domain \"%s\" does not exist, skipping" msgstr "il vincolo \"%s\" del dominio \"%s\" non esiste, saltato" -#: commands/typecmds.c:2642 +#: commands/typecmds.c:2663 #, c-format msgid "constraint \"%s\" of domain \"%s\" is not a check constraint" msgstr "il vincolo \"%s\" del dominio \"%s\" non è un vincolo di controllo" -#: commands/typecmds.c:2747 +#: commands/typecmds.c:2768 #, c-format msgid "column \"%s\" of table \"%s\" contains values that violate the new constraint" msgstr "la colonna \"%s\" della tabella \"%s\" contiene valori che violano il nuovo vincolo" -#: commands/typecmds.c:2960 commands/typecmds.c:3247 commands/typecmds.c:3434 +#: commands/typecmds.c:2996 commands/typecmds.c:3201 commands/typecmds.c:3283 +#: commands/typecmds.c:3470 #, c-format msgid "%s is not a domain" msgstr "%s non è un dominio" -#: commands/typecmds.c:2994 +#: commands/typecmds.c:3030 #, c-format msgid "constraint \"%s\" for domain \"%s\" already exists" msgstr "il vincolo \"%s\" del dominio \"%s\" esiste già" -#: commands/typecmds.c:3045 +#: commands/typecmds.c:3081 #, c-format msgid "cannot use table references in domain check constraint" msgstr "non è possibile usare riferimenti a tabelle nel vincolo di controllo del dominio" -#: commands/typecmds.c:3177 commands/typecmds.c:3259 commands/typecmds.c:3551 +#: commands/typecmds.c:3213 commands/typecmds.c:3295 commands/typecmds.c:3587 #, c-format msgid "%s is a table's row type" msgstr "%s è il tipo della riga di una tabella" -#: commands/typecmds.c:3179 commands/typecmds.c:3261 commands/typecmds.c:3553 +#: commands/typecmds.c:3215 commands/typecmds.c:3297 commands/typecmds.c:3589 #, c-format msgid "Use ALTER TABLE instead." msgstr "Usa ALTER TABLE invece." -#: commands/typecmds.c:3186 commands/typecmds.c:3268 commands/typecmds.c:3466 +#: commands/typecmds.c:3222 commands/typecmds.c:3304 commands/typecmds.c:3502 #, c-format msgid "cannot alter array type %s" msgstr "non è possibile modificare il tipo di array %s" -#: commands/typecmds.c:3188 commands/typecmds.c:3270 commands/typecmds.c:3468 +#: commands/typecmds.c:3224 commands/typecmds.c:3306 commands/typecmds.c:3504 #, c-format msgid "You can alter type %s, which will alter the array type as well." msgstr "puoi modificare il tipo %s, il che modificherà il tipo dell'array come conseguenza." -#: commands/typecmds.c:3536 +#: commands/typecmds.c:3572 #, c-format msgid "type \"%s\" already exists in schema \"%s\"" msgstr "il tipo \"%s\" esiste già nello schema \"%s\"" -#: commands/user.c:142 +#: commands/user.c:141 #, c-format msgid "SYSID can no longer be specified" msgstr "SYSID non può più essere specificato" -#: commands/user.c:296 +#: commands/user.c:295 #, c-format msgid "must be superuser to create superusers" msgstr "solo i superutenti possono creare superutenti" -#: commands/user.c:303 +#: commands/user.c:302 #, c-format msgid "must be superuser to create replication users" msgstr "solo i superutenti possono creare utenti di replica" -#: commands/user.c:310 commands/user.c:685 +#: commands/user.c:309 commands/user.c:707 #, c-format msgid "must be superuser to change bypassrls attribute" msgstr "solo i superutenti possono cambiare l'attributo bypassrls" -#: commands/user.c:317 +#: commands/user.c:316 #, c-format msgid "permission denied to create role" msgstr "permesso di creare il ruolo negato" -#: commands/user.c:327 commands/user.c:1161 commands/user.c:1168 -#: utils/adt/acl.c:5246 utils/adt/acl.c:5252 gram.y:14485 gram.y:14520 +#: commands/user.c:326 commands/user.c:1195 commands/user.c:1202 +#: utils/adt/acl.c:5248 utils/adt/acl.c:5254 gram.y:14465 gram.y:14500 #, c-format msgid "role name \"%s\" is reserved" msgstr "il nome di ruolo \"%s\" è riservato" -#: commands/user.c:329 commands/user.c:1163 commands/user.c:1170 +#: commands/user.c:328 commands/user.c:1197 commands/user.c:1204 #, c-format msgid "Role names starting with \"pg_\" are reserved." msgstr "I nomi di ruoli che iniziano con \"pg_\" sono riservati." -#: commands/user.c:341 commands/user.c:1176 +#: commands/user.c:340 commands/user.c:1210 #, c-format msgid "role \"%s\" already exists" msgstr "il ruolo \"%s\" esiste già" -#: commands/user.c:415 +#: commands/user.c:406 commands/user.c:816 +#, c-format +msgid "empty string is not a valid password, clearing password" +msgstr "la stringa vuota non è una password valida, password rimossa" + +#: commands/user.c:437 #, c-format msgid "pg_authid OID value not set when in binary upgrade mode" msgstr "valore di OID di pg_authid non impostato in modalità di aggiornamento binaria" -#: commands/user.c:671 commands/user.c:881 commands/user.c:1415 -#: commands/user.c:1559 +#: commands/user.c:693 commands/user.c:915 commands/user.c:1449 +#: commands/user.c:1593 #, c-format msgid "must be superuser to alter superusers" msgstr "solo i superutenti possono modificare superutenti" -#: commands/user.c:678 +#: commands/user.c:700 #, c-format msgid "must be superuser to alter replication users" msgstr "solo i superutenti possono modificare utenti di replica" -#: commands/user.c:701 commands/user.c:889 +#: commands/user.c:723 commands/user.c:923 #, c-format msgid "permission denied" msgstr "permesso negato" -#: commands/user.c:919 +#: commands/user.c:953 #, c-format msgid "must be superuser to alter settings globally" msgstr "solo i superutenti possono alterare impostazioni globalmente" -#: commands/user.c:941 +#: commands/user.c:975 #, c-format msgid "permission denied to drop role" msgstr "permesso di eliminare il ruolo negato" -#: commands/user.c:965 +#: commands/user.c:999 #, c-format msgid "cannot use special role specifier in DROP ROLE" msgstr "non è possibile usare lo specificatore di ruolo speciale in DROP ROLE" -#: commands/user.c:975 commands/user.c:1132 commands/variable.c:822 -#: commands/variable.c:894 utils/adt/acl.c:5104 utils/adt/acl.c:5151 -#: utils/adt/acl.c:5179 utils/adt/acl.c:5197 utils/init/miscinit.c:503 +#: commands/user.c:1009 commands/user.c:1166 commands/variable.c:822 +#: commands/variable.c:894 utils/adt/acl.c:5106 utils/adt/acl.c:5153 +#: utils/adt/acl.c:5181 utils/adt/acl.c:5199 utils/init/miscinit.c:504 #, c-format msgid "role \"%s\" does not exist" msgstr "il ruolo \"%s\" non esiste" -#: commands/user.c:980 +#: commands/user.c:1014 #, c-format msgid "role \"%s\" does not exist, skipping" msgstr "il ruolo \"%s\" non esiste, saltato" -#: commands/user.c:992 commands/user.c:996 +#: commands/user.c:1026 commands/user.c:1030 #, c-format msgid "current user cannot be dropped" msgstr "l'utente corrente non può essere eliminato" -#: commands/user.c:1000 +#: commands/user.c:1034 #, c-format msgid "session user cannot be dropped" msgstr "l'utente della sessione non può essere eliminato" -#: commands/user.c:1011 +#: commands/user.c:1045 #, c-format msgid "must be superuser to drop superusers" msgstr "solo i superutenti possono eliminare superutenti" -#: commands/user.c:1027 +#: commands/user.c:1061 #, c-format msgid "role \"%s\" cannot be dropped because some objects depend on it" msgstr "il ruolo \"%s\" non può essere eliminato perché alcuni oggetti ne dipendono" -#: commands/user.c:1148 +#: commands/user.c:1182 #, c-format msgid "session user cannot be renamed" msgstr "l'utente della sessione non può essere rinominato" -#: commands/user.c:1152 +#: commands/user.c:1186 #, c-format msgid "current user cannot be renamed" msgstr "l'utente corrente non può essere eliminato" -#: commands/user.c:1186 +#: commands/user.c:1220 #, c-format msgid "must be superuser to rename superusers" msgstr "solo i superutenti possono rinominare superutenti" -#: commands/user.c:1193 +#: commands/user.c:1227 #, c-format msgid "permission denied to rename role" msgstr "permesso di rinominare il ruolo negato" -#: commands/user.c:1214 +#: commands/user.c:1248 #, c-format msgid "MD5 password cleared because of role rename" msgstr "L'MD5 della password è stato cancellato perché il ruolo è stato rinominato" -#: commands/user.c:1274 +#: commands/user.c:1308 #, c-format msgid "column names cannot be included in GRANT/REVOKE ROLE" msgstr "la colonna dei nomi non può essere inclusa in GRANT/REVOKE ROLE" -#: commands/user.c:1312 +#: commands/user.c:1346 #, c-format msgid "permission denied to drop objects" msgstr "permesso di eliminare gli oggetti negato" -#: commands/user.c:1339 commands/user.c:1348 +#: commands/user.c:1373 commands/user.c:1382 #, c-format msgid "permission denied to reassign objects" msgstr "permesso di riassegnare gli oggetti negato" -#: commands/user.c:1423 commands/user.c:1567 +#: commands/user.c:1457 commands/user.c:1601 #, c-format msgid "must have admin option on role \"%s\"" msgstr "occorre avere l'opzione admin sul ruolo \"%s\"" -#: commands/user.c:1440 +#: commands/user.c:1474 #, c-format msgid "must be superuser to set grantor" msgstr "solo i superutenti possono impostare chi ha concesso il privilegio" -#: commands/user.c:1465 +#: commands/user.c:1499 #, c-format msgid "role \"%s\" is a member of role \"%s\"" msgstr "il ruolo \"%s\" è membro del ruolo \"%s\"" -#: commands/user.c:1480 +#: commands/user.c:1514 #, c-format msgid "role \"%s\" is already a member of role \"%s\"" msgstr "il ruolo \"%s\" è già membro del ruolo \"%s\"" -#: commands/user.c:1589 +#: commands/user.c:1623 #, c-format msgid "role \"%s\" is not a member of role \"%s\"" msgstr "il ruolo \"%s\" non è membro del ruolo \"%s\"" -#: commands/vacuum.c:186 +#: commands/vacuum.c:188 #, c-format msgid "%s cannot be executed from VACUUM or ANALYZE" msgstr "%s non può essere eseguito da VACUUM o ANALYZE" -#: commands/vacuum.c:196 +#: commands/vacuum.c:198 #, c-format msgid "VACUUM option DISABLE_PAGE_SKIPPING cannot be used with FULL" msgstr "l'opzione DISABLE_PAGE_SKIPPING di VACUUM non può essere usata con FULL" -#: commands/vacuum.c:565 +#: commands/vacuum.c:577 #, c-format msgid "oldest xmin is far in the past" msgstr "il più vecchio xmin è molto lontano nel tempo" -#: commands/vacuum.c:566 +#: commands/vacuum.c:578 #, c-format msgid "Close open transactions soon to avoid wraparound problems." msgstr "Chiudi presto le transazioni per evitare problemi di wraparound." -#: commands/vacuum.c:605 +#: commands/vacuum.c:617 #, c-format msgid "oldest multixact is far in the past" msgstr "il multixact più vecchio è remoto" -#: commands/vacuum.c:606 +#: commands/vacuum.c:618 #, c-format msgid "Close open transactions with multixacts soon to avoid wraparound problems." msgstr "Chiudi presto le transazioni con multixact per evitare problemi di wraparound." -#: commands/vacuum.c:1176 +#: commands/vacuum.c:1188 #, c-format msgid "some databases have not been vacuumed in over 2 billion transactions" msgstr "alcuni database non sono stati ripuliti per più di 2 miliardi di transazioni" -#: commands/vacuum.c:1177 +#: commands/vacuum.c:1189 #, c-format msgid "You might have already suffered transaction-wraparound data loss." msgstr "Potresti aver già subito perdita di dati dovuta al wraparound delle transazioni." -#: commands/vacuum.c:1306 +#: commands/vacuum.c:1324 #, c-format msgid "skipping vacuum of \"%s\" --- lock not available" msgstr "pulizia di \"%s\" saltata --- lock non disponibile" -#: commands/vacuum.c:1332 +#: commands/vacuum.c:1350 #, c-format msgid "skipping \"%s\" --- only superuser can vacuum it" msgstr "\"%s\" saltato --- solo i superutenti possono pulirla" -#: commands/vacuum.c:1336 +#: commands/vacuum.c:1354 #, c-format msgid "skipping \"%s\" --- only superuser or database owner can vacuum it" msgstr "\"%s\" saltato --- solo i superutenti o il proprietario del database possono pulirla" -#: commands/vacuum.c:1340 +#: commands/vacuum.c:1358 #, c-format msgid "skipping \"%s\" --- only table or database owner can vacuum it" msgstr "\"%s\" saltato --- solo il proprietario del database o della tabella possono pulirla" -#: commands/vacuum.c:1359 +#: commands/vacuum.c:1377 #, c-format msgid "skipping \"%s\" --- cannot vacuum non-tables or special system tables" msgstr "\"%s\" saltato --- non è possibile ripulire non-tabelle o tabelle speciali di sistema" -#: commands/vacuumlazy.c:377 +#: commands/vacuumlazy.c:376 #, c-format msgid "automatic vacuum of table \"%s.%s.%s\": index scans: %d\n" msgstr "vacuum automatico della tabella \"%s.%s.%s\": scan di indici: %d\n" -#: commands/vacuumlazy.c:382 +#: commands/vacuumlazy.c:381 #, c-format msgid "pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n" msgstr "pagine: %u rimosse, %u restanti, %u saltate perché bloccate, %u congelate saltate\n" -#: commands/vacuumlazy.c:388 +#: commands/vacuumlazy.c:387 #, c-format msgid "tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, oldest xmin: %u\n" msgstr "tuple: %.0f rimosse, %.0f restanti, %.0f sono morte ma non possono essere ancora rimosse, xmin più vecchio: %u\n" -#: commands/vacuumlazy.c:394 +#: commands/vacuumlazy.c:393 #, c-format msgid "buffer usage: %d hits, %d misses, %d dirtied\n" msgstr "uso dei buffer: %d colpiti, %d mancati, %d sporcati\n" -#: commands/vacuumlazy.c:398 +#: commands/vacuumlazy.c:397 #, c-format msgid "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n" msgstr "velocità di lettura media: %.3f MB/s, velocità di scrittura media: %.3f MB/s\n" -#: commands/vacuumlazy.c:400 +#: commands/vacuumlazy.c:399 #, c-format msgid "system usage: %s" msgstr "utilizzo di sistema: %s" -#: commands/vacuumlazy.c:860 +#: commands/vacuumlazy.c:858 #, c-format msgid "relation \"%s\" page %u is uninitialized --- fixing" msgstr "la relazione \"%s\" pagina %u non è inizializzata --- in correzione" -#: commands/vacuumlazy.c:1330 +#: commands/vacuumlazy.c:1328 #, c-format msgid "\"%s\": removed %.0f row versions in %u pages" msgstr "\"%s\": %.0f versioni di riga rimosse in %u pagine" -#: commands/vacuumlazy.c:1340 +#: commands/vacuumlazy.c:1338 #, c-format msgid "%.0f dead row versions cannot be removed yet, oldest xmin: %u\n" msgstr "%.0f versioni di righe morte non possono essere ancora rimosse, xmin più vecchio: %u\n" -#: commands/vacuumlazy.c:1342 +#: commands/vacuumlazy.c:1340 #, c-format msgid "There were %.0f unused item pointers.\n" msgstr "C'erano %.0f puntatori ad elementi non usati.\n" -#: commands/vacuumlazy.c:1344 +#: commands/vacuumlazy.c:1342 #, c-format msgid "Skipped %u page due to buffer pins, " msgid_plural "Skipped %u pages due to buffer pins, " msgstr[0] "%u pagine saltate a causa di buffer pin, " msgstr[1] "%u pagine saltate a causa di buffer pin, " -#: commands/vacuumlazy.c:1348 +#: commands/vacuumlazy.c:1346 #, c-format msgid "%u frozen page.\n" msgid_plural "%u frozen pages.\n" msgstr[0] "%u pagine congelate.\n" msgstr[1] "%u pagine congelate.\n" -#: commands/vacuumlazy.c:1352 +#: commands/vacuumlazy.c:1350 #, c-format msgid "%u page is entirely empty.\n" msgid_plural "%u pages are entirely empty.\n" msgstr[0] "%u pagina è completamente vuota.\n" msgstr[1] "%u pagina sono completamente vuote.\n" -#: commands/vacuumlazy.c:1360 +#: commands/vacuumlazy.c:1354 +#, c-format +msgid "%s." +msgstr "%s." + +#: commands/vacuumlazy.c:1357 #, c-format msgid "\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages" msgstr "\"%s\": trovate %.0f versioni di riga removibili, %.0f non removibili in %u pagine su %u" -#: commands/vacuumlazy.c:1429 +#: commands/vacuumlazy.c:1426 #, c-format msgid "\"%s\": removed %d row versions in %d pages" msgstr "\"%s\": %d versioni di riga rimosse in %d pagine" -#: commands/vacuumlazy.c:1618 +#: commands/vacuumlazy.c:1614 #, c-format msgid "scanned index \"%s\" to remove %d row versions" msgstr "effettuata la scansione dell'indice \"%s\" per rimuovere %d versioni di riga" -#: commands/vacuumlazy.c:1664 +#: commands/vacuumlazy.c:1660 #, c-format msgid "index \"%s\" now contains %.0f row versions in %u pages" msgstr "l'indice \"%s\" ora contiene %.0f versioni di riga in %u pagine" -#: commands/vacuumlazy.c:1668 +#: commands/vacuumlazy.c:1664 #, c-format msgid "" "%.0f index row versions were removed.\n" @@ -10497,22 +10391,22 @@ msgstr "" "%u pagine dell'indice sono state cancellate, %u sono attualmente riusabili.\n" "%s." -#: commands/vacuumlazy.c:1763 +#: commands/vacuumlazy.c:1759 #, c-format msgid "\"%s\": stopping truncate due to conflicting lock request" msgstr "\"%s\": truncate interrotto a causa di una richiesta di lock in conflitto" -#: commands/vacuumlazy.c:1828 +#: commands/vacuumlazy.c:1824 #, c-format msgid "\"%s\": truncated %u to %u pages" msgstr "\"%s\": %u pagine ridotte a %u" -#: commands/vacuumlazy.c:1893 +#: commands/vacuumlazy.c:1889 #, c-format msgid "\"%s\": suspending truncate due to conflicting lock request" msgstr "\"%s\": annullamento del troncamento a causa di richieste di lock in conflitto" -#: commands/variable.c:165 utils/misc/guc.c:10019 utils/misc/guc.c:10081 +#: commands/variable.c:165 utils/misc/guc.c:10023 utils/misc/guc.c:10085 #, c-format msgid "Unrecognized key word: \"%s\"." msgstr "Parola chiave non riconosciuta: \"%s\"." @@ -10572,7 +10466,7 @@ msgstr "SET TRANSACTION ISOLATION LEVEL dev'essere invocato prima di qualsiasi q msgid "SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction" msgstr "SET TRANSACTION ISOLATION LEVEL non può essere invocato in una sotto-transazione" -#: commands/variable.c:571 storage/lmgr/predicate.c:1633 +#: commands/variable.c:571 storage/lmgr/predicate.c:1649 #, c-format msgid "cannot use serializable mode in a hot standby" msgstr "non è possibile usare la modalità SERIALIZABLE in un hot standby" @@ -10604,8 +10498,8 @@ msgstr "Non è possibile cambiare \"client_encoding\" ora." #: commands/variable.c:776 #, c-format -msgid "cannot change client_encoding in a parallel worker" -msgstr "non è possibile cambiare client_encoding in un worker parallelo" +msgid "cannot change client_encoding during a parallel operation" +msgstr "non è possibile cambiare client_encoding durante un'operazione parallela" #: commands/variable.c:912 #, c-format @@ -10702,124 +10596,124 @@ msgstr "il cursore \"%s\" non è posizionato su una riga" msgid "cursor \"%s\" is not a simply updatable scan of table \"%s\"" msgstr "il cursore \"%s\" non è una scansione semplice aggiornabile della tabella \"%s\"" -#: executor/execCurrent.c:231 executor/execExprInterp.c:1899 +#: executor/execCurrent.c:231 executor/execExprInterp.c:1889 #, c-format msgid "type of parameter %d (%s) does not match that when preparing the plan (%s)" msgstr "il tipo del parametro %d (%s) non combacia con quello usato alla preparazione del piano (%s)" -#: executor/execCurrent.c:243 executor/execExprInterp.c:1911 +#: executor/execCurrent.c:243 executor/execExprInterp.c:1901 #, c-format msgid "no value found for parameter %d" msgstr "nessun valore trovato per il parametro %d" -#: executor/execExpr.c:780 parser/parse_agg.c:764 +#: executor/execExpr.c:780 parser/parse_agg.c:779 #, c-format msgid "window function calls cannot be nested" msgstr "le chiamate a funzioni finestra non possono essere annidate" -#: executor/execExpr.c:1224 +#: executor/execExpr.c:1236 #, c-format msgid "target type is not an array" msgstr "il tipo di destinazione non è un array" -#: executor/execExpr.c:1547 +#: executor/execExpr.c:1559 #, c-format msgid "ROW() column has type %s instead of type %s" msgstr "la colonna ROW() è di tipo %s invece di %s" -#: executor/execExpr.c:2079 executor/execSRF.c:670 parser/parse_func.c:116 -#: parser/parse_func.c:543 parser/parse_func.c:902 +#: executor/execExpr.c:2094 executor/execSRF.c:672 parser/parse_func.c:120 +#: parser/parse_func.c:547 parser/parse_func.c:921 #, c-format msgid "cannot pass more than %d argument to a function" msgid_plural "cannot pass more than %d arguments to a function" msgstr[0] "non è possibile passare più di %d argomento ad una funzione" msgstr[1] "non è possibile passare più di %d argomenti ad una funzione" -#: executor/execExpr.c:2356 executor/execExpr.c:2362 -#: executor/execExprInterp.c:2210 utils/adt/arrayfuncs.c:260 +#: executor/execExpr.c:2371 executor/execExpr.c:2377 +#: executor/execExprInterp.c:2226 utils/adt/arrayfuncs.c:260 #: utils/adt/arrayfuncs.c:558 utils/adt/arrayfuncs.c:1288 -#: utils/adt/arrayfuncs.c:3361 utils/adt/arrayfuncs.c:5241 -#: utils/adt/arrayfuncs.c:5758 +#: utils/adt/arrayfuncs.c:3361 utils/adt/arrayfuncs.c:5239 +#: utils/adt/arrayfuncs.c:5756 #, c-format msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" msgstr "il numero di dimensioni dell'array (%d) eccede il massimo consentito (%d)" -#: executor/execExprInterp.c:1571 +#: executor/execExprInterp.c:1561 #, c-format msgid "attribute %d of type %s has been dropped" msgstr "l'attributo %d del tipo %s è stato rimosso" -#: executor/execExprInterp.c:1577 +#: executor/execExprInterp.c:1567 #, c-format msgid "attribute %d of type %s has wrong type" msgstr "l'attributo %d del tipo %s ha il tipo sbagliato" -#: executor/execExprInterp.c:1579 executor/execExprInterp.c:2496 +#: executor/execExprInterp.c:1569 executor/execExprInterp.c:2512 #, c-format msgid "Table has type %s, but query expects %s." msgstr "La tabella ha il tipo %s, ma la query prevede %s." -#: executor/execExprInterp.c:1989 +#: executor/execExprInterp.c:1979 #, c-format msgid "WHERE CURRENT OF is not supported for this table type" msgstr "WHERE CURRENT OF non è supportato per questo tipo di tabella" -#: executor/execExprInterp.c:2188 +#: executor/execExprInterp.c:2204 #, c-format msgid "cannot merge incompatible arrays" msgstr "non è possibile unire array non compatibili" -#: executor/execExprInterp.c:2189 +#: executor/execExprInterp.c:2205 #, c-format msgid "Array with element type %s cannot be included in ARRAY construct with element type %s." msgstr "Un array con tipo di elementi %s non può essere incluso nel costrutto ARRAY con elementi di tipo %s." -#: executor/execExprInterp.c:2230 executor/execExprInterp.c:2260 +#: executor/execExprInterp.c:2246 executor/execExprInterp.c:2276 #, c-format msgid "multidimensional arrays must have array expressions with matching dimensions" msgstr "gli array multidimensionali devono avere espressioni array di dimensioni corrispondenti" -#: executor/execExprInterp.c:2495 +#: executor/execExprInterp.c:2511 #, c-format msgid "attribute %d has wrong type" msgstr "l'attributo %d è di tipo errato" -#: executor/execExprInterp.c:2604 +#: executor/execExprInterp.c:2620 #, c-format msgid "array subscript in assignment must not be null" msgstr "l'indice di un array nell'assegnamento non può essere nullo" -#: executor/execExprInterp.c:3037 utils/adt/domains.c:148 +#: executor/execExprInterp.c:3053 utils/adt/domains.c:148 #, c-format msgid "domain %s does not allow null values" msgstr "il DOMAIN %s non consente valori nulli" -#: executor/execExprInterp.c:3052 utils/adt/domains.c:183 +#: executor/execExprInterp.c:3068 utils/adt/domains.c:183 #, c-format msgid "value for domain %s violates check constraint \"%s\"" msgstr "il valore per il DOMAIN %s viola il vincolo di controllo \"%s\"" -#: executor/execExprInterp.c:3419 executor/execExprInterp.c:3436 -#: executor/execExprInterp.c:3538 executor/nodeModifyTable.c:96 +#: executor/execExprInterp.c:3435 executor/execExprInterp.c:3452 +#: executor/execExprInterp.c:3554 executor/nodeModifyTable.c:96 #: executor/nodeModifyTable.c:106 executor/nodeModifyTable.c:123 #: executor/nodeModifyTable.c:131 #, c-format msgid "table row type and query-specified row type do not match" msgstr "il tipo della riga della tabella e il tipo di riga specificato dalla query non corrispondono" -#: executor/execExprInterp.c:3420 +#: executor/execExprInterp.c:3436 #, c-format msgid "Table row contains %d attribute, but query expects %d." msgid_plural "Table row contains %d attributes, but query expects %d." msgstr[0] "La riga della tabella contiene %d attributo, ma la query ne prevede %d." msgstr[1] "La riga della tabella contiene %d attributi, ma la query ne prevede %d." -#: executor/execExprInterp.c:3437 executor/nodeModifyTable.c:107 +#: executor/execExprInterp.c:3453 executor/nodeModifyTable.c:107 #, c-format msgid "Table has type %s at ordinal position %d, but query expects %s." msgstr "La tabella ha il tipo %s in posizione %d, ma la query prevede %s." -#: executor/execExprInterp.c:3539 executor/execSRF.c:925 +#: executor/execExprInterp.c:3555 executor/execSRF.c:927 #, c-format msgid "Physical storage mismatch on dropped attribute at ordinal position %d." msgstr "Il tipo di immagazzinamento fisico non corrisponde per l'attributo eliminato in posizione %d." @@ -10859,199 +10753,207 @@ msgstr "La chiave %s è in conflitto con la chiave esistente %s." msgid "Key conflicts with existing key." msgstr "Conflitti di chiave con chiave esistente." -#: executor/execMain.c:1111 +#: executor/execMain.c:1115 #, c-format msgid "cannot change sequence \"%s\"" msgstr "non è possibile modificare la sequenza \"%s\"" -#: executor/execMain.c:1117 +#: executor/execMain.c:1121 #, c-format msgid "cannot change TOAST relation \"%s\"" msgstr "non è possibile modificare la relazione TOAST \"%s\"" -#: executor/execMain.c:1135 rewrite/rewriteHandler.c:2704 +#: executor/execMain.c:1139 rewrite/rewriteHandler.c:2738 #, c-format msgid "cannot insert into view \"%s\"" msgstr "non è possibile inserire nella vista \"%s\"" -#: executor/execMain.c:1137 rewrite/rewriteHandler.c:2707 +#: executor/execMain.c:1141 rewrite/rewriteHandler.c:2741 #, c-format msgid "To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule." msgstr "Per consentire inserimenti nella vista occorre fornire un trigger INSTEAD OF INSERT oppure una regola ON INSERT DO INSTEAD senza condizioni." -#: executor/execMain.c:1143 rewrite/rewriteHandler.c:2712 +#: executor/execMain.c:1147 rewrite/rewriteHandler.c:2746 #, c-format msgid "cannot update view \"%s\"" msgstr "non è possibile modificare la vista \"%s\"" -#: executor/execMain.c:1145 rewrite/rewriteHandler.c:2715 +#: executor/execMain.c:1149 rewrite/rewriteHandler.c:2749 #, c-format msgid "To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule." msgstr "Per consentire modifiche alla vista occorre fornire un trigger INSTEAD OF UPDATE oppure una regola ON UPDATE DO INSTEAD senza condizioni." -#: executor/execMain.c:1151 rewrite/rewriteHandler.c:2720 +#: executor/execMain.c:1155 rewrite/rewriteHandler.c:2754 #, c-format msgid "cannot delete from view \"%s\"" msgstr "non è possibile cancellare dalla vista \"%s\"" -#: executor/execMain.c:1153 rewrite/rewriteHandler.c:2723 +#: executor/execMain.c:1157 rewrite/rewriteHandler.c:2757 #, c-format msgid "To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule." msgstr "Per consentire eliminazioni dalla vista occorre fornire un trigger INSTEAD OF DELETE oppure una regola ON DELETE DO INSTEAD senza condizioni." -#: executor/execMain.c:1164 +#: executor/execMain.c:1168 #, c-format msgid "cannot change materialized view \"%s\"" msgstr "non è possibile modificare la vista materializzata \"%s\"" -#: executor/execMain.c:1176 +#: executor/execMain.c:1187 #, c-format msgid "cannot insert into foreign table \"%s\"" msgstr "non è possibile inserire nella tabella esterna \"%s\"" -#: executor/execMain.c:1182 +#: executor/execMain.c:1193 #, c-format msgid "foreign table \"%s\" does not allow inserts" msgstr "la tabella esterna \"%s\" non consente inserimenti" -#: executor/execMain.c:1189 +#: executor/execMain.c:1200 #, c-format msgid "cannot update foreign table \"%s\"" msgstr "non è possibile modificare la tabella esterna \"%s\"" -#: executor/execMain.c:1195 +#: executor/execMain.c:1206 #, c-format msgid "foreign table \"%s\" does not allow updates" msgstr "la tabella esterna \"%s\" non consente modifiche" -#: executor/execMain.c:1202 +#: executor/execMain.c:1213 #, c-format msgid "cannot delete from foreign table \"%s\"" msgstr "non è possibile eliminare dalla tabella esterna \"%s\"" -#: executor/execMain.c:1208 +#: executor/execMain.c:1219 #, c-format msgid "foreign table \"%s\" does not allow deletes" msgstr "la tabella esterna \"%s\" non consente cancellazioni" -#: executor/execMain.c:1219 +#: executor/execMain.c:1230 #, c-format msgid "cannot change relation \"%s\"" msgstr "non è possibile modificare la relazione \"%s\"" -#: executor/execMain.c:1246 +#: executor/execMain.c:1257 #, c-format msgid "cannot lock rows in sequence \"%s\"" msgstr "non è possibile bloccare righe nella sequenza \"%s\"" -#: executor/execMain.c:1253 +#: executor/execMain.c:1264 #, c-format msgid "cannot lock rows in TOAST relation \"%s\"" msgstr "non è possibile bloccare righe nella relazione TOAST \"%s\"" -#: executor/execMain.c:1260 +#: executor/execMain.c:1271 #, c-format msgid "cannot lock rows in view \"%s\"" msgstr "non è possibile bloccare righe vista \"%s\"" -#: executor/execMain.c:1268 +#: executor/execMain.c:1279 #, c-format msgid "cannot lock rows in materialized view \"%s\"" msgstr "non è possibile bloccare righe nella vista materializzata \"%s\"" -#: executor/execMain.c:1277 executor/execMain.c:2887 -#: executor/nodeLockRows.c:132 +#: executor/execMain.c:1288 executor/execMain.c:2930 +#: executor/nodeLockRows.c:136 #, c-format msgid "cannot lock rows in foreign table \"%s\"" msgstr "non è possibile bloccare righe nella tabella esterna \"%s\"" -#: executor/execMain.c:1283 +#: executor/execMain.c:1294 #, c-format msgid "cannot lock rows in relation \"%s\"" msgstr "non è possibile bloccare righe nella relazione \"%s\"" -#: executor/execMain.c:1947 +#: executor/execMain.c:1926 #, c-format -msgid "null value in column \"%s\" violates not-null constraint" -msgstr "valori null nella colonna \"%s\" violano il vincolo non-null" +msgid "new row for relation \"%s\" violates partition constraint" +msgstr "la nuova riga per la partizione \"%s\" viola il vincolo di partizione" -#: executor/execMain.c:1949 executor/execMain.c:1995 executor/execMain.c:2037 -#: executor/execMain.c:2122 +#: executor/execMain.c:1928 executor/execMain.c:2007 executor/execMain.c:2054 +#: executor/execMain.c:2165 #, c-format msgid "Failing row contains %s." msgstr "La riga in errore contiene %s." -#: executor/execMain.c:1993 +#: executor/execMain.c:2005 #, c-format -msgid "new row for relation \"%s\" violates check constraint \"%s\"" -msgstr "la nuova riga per la relazione \"%s\" viola il vincolo di controllo \"%s\"" +msgid "null value in column \"%s\" violates not-null constraint" +msgstr "valori null nella colonna \"%s\" violano il vincolo non-null" -#: executor/execMain.c:2035 +#: executor/execMain.c:2052 #, c-format -msgid "new row for relation \"%s\" violates partition constraint" -msgstr "la nuova riga per la partizione \"%s\" viola il vincolo di partizione" +msgid "new row for relation \"%s\" violates check constraint \"%s\"" +msgstr "la nuova riga per la relazione \"%s\" viola il vincolo di controllo \"%s\"" -#: executor/execMain.c:2120 +#: executor/execMain.c:2163 #, c-format msgid "new row violates check option for view \"%s\"" msgstr "la nuova riga viola l'opzione di controllo della vista \"%s\"" -#: executor/execMain.c:2130 +#: executor/execMain.c:2173 #, c-format msgid "new row violates row-level security policy \"%s\" for table \"%s\"" msgstr "la nuova riga viola la regola di sicurezza per riga \"%s\" per la tabella \"%s\"" -#: executor/execMain.c:2135 +#: executor/execMain.c:2178 #, c-format msgid "new row violates row-level security policy for table \"%s\"" msgstr "la nuova riga viola la regola di sicurezza per riga per la tabella \"%s\"" -#: executor/execMain.c:2142 +#: executor/execMain.c:2185 #, c-format msgid "new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"" msgstr "la nuova riga viola la regola di sicurezza per riga \"%s\" (espressione USING) per la tabella \"%s\"" -#: executor/execMain.c:2147 +#: executor/execMain.c:2190 #, c-format msgid "new row violates row-level security policy (USING expression) for table \"%s\"" msgstr "la nuova riga viola la regola di sicurezza per riga (espressione USING) per la tabella \"%s\"" -#: executor/execMain.c:3341 +#: executor/execMain.c:3399 #, c-format msgid "no partition of relation \"%s\" found for row" msgstr "nessuna partizione della relazione \"%s\" trovata per la riga" -#: executor/execMain.c:3343 +#: executor/execMain.c:3401 #, c-format msgid "Partition key of the failing row contains %s." msgstr "La chiave di partizione della riga sbagliata contiene %s." -#: executor/execReplication.c:195 executor/execReplication.c:342 +#: executor/execReplication.c:196 executor/execReplication.c:354 #, c-format msgid "concurrent update, retrying" msgstr "modifica concorrente, sto riprovando" -#: executor/execReplication.c:544 +#: executor/execReplication.c:256 parser/parse_oper.c:228 +#: utils/adt/array_userfuncs.c:724 utils/adt/array_userfuncs.c:863 +#: utils/adt/arrayfuncs.c:3639 utils/adt/arrayfuncs.c:4077 +#: utils/adt/arrayfuncs.c:6037 utils/adt/rowtypes.c:1167 #, c-format -msgid "cannot update table \"%s\" because it does not have replica identity and publishes updates" -msgstr "non è possibile modificare la tabella \"%s\" perché non ha una replica di identità ma pubblica le righe modificate" +msgid "could not identify an equality operator for type %s" +msgstr "operatore di uguaglianza per il tipo %s non trovato" + +#: executor/execReplication.c:562 +#, c-format +msgid "cannot update table \"%s\" because it does not have a replica identity and publishes updates" +msgstr "non è possibile modificare la tabella \"%s\" perché non ha una identità di replica ma pubblica le righe modificate" -#: executor/execReplication.c:546 +#: executor/execReplication.c:564 #, c-format msgid "To enable updating the table, set REPLICA IDENTITY using ALTER TABLE." msgstr "Per abilitare le modifiche della tabella imposta REPLICA IDENTITY tramite ALTER TABLE." -#: executor/execReplication.c:550 +#: executor/execReplication.c:568 #, c-format -msgid "cannot delete from table \"%s\" because it does not have replica identity and publishes deletes" -msgstr "non è possibile cancellare dalla tabella \"%s\" perché non ha una replica di identità ma pubblica le righe cancellate" +msgid "cannot delete from table \"%s\" because it does not have a replica identity and publishes deletes" +msgstr "non è possibile cancellare dalla tabella \"%s\" perché non ha una identità di replica ma pubblica le righe cancellate" -#: executor/execReplication.c:552 +#: executor/execReplication.c:570 #, c-format msgid "To enable deleting from the table, set REPLICA IDENTITY using ALTER TABLE." msgstr "Per abilitare le cancellazioni sulla tabella imposta REPLICA IDENTITY tramite ALTER TABLE." -#: executor/execReplication.c:571 +#: executor/execReplication.c:589 #, c-format msgid "logical replication target relation \"%s.%s\" is not a table" msgstr "la relazione di destinazione per la replica logica \"%s.%s\" non è una tabella" @@ -11061,44 +10963,44 @@ msgstr "la relazione di destinazione per la replica logica \"%s.%s\" non è una msgid "rows returned by function are not all of the same row type" msgstr "le righe restituite dalla funzione non sono tutte dello stesso tipo" -#: executor/execSRF.c:356 executor/execSRF.c:620 +#: executor/execSRF.c:356 executor/execSRF.c:622 #, c-format msgid "table-function protocol for materialize mode was not followed" msgstr "il protocollo tabella-funzione del modo di materializzazione non è stato seguito" -#: executor/execSRF.c:363 executor/execSRF.c:638 +#: executor/execSRF.c:363 executor/execSRF.c:640 #, c-format msgid "unrecognized table-function returnMode: %d" msgstr "returnMode tabella-funzione sconosciuto: %d" -#: executor/execSRF.c:843 +#: executor/execSRF.c:845 #, c-format msgid "function returning setof record called in context that cannot accept type record" msgstr "funzione che restituisce un insieme di record invocata in un contesto che non accetta il tipo record" -#: executor/execSRF.c:898 executor/execSRF.c:914 executor/execSRF.c:924 +#: executor/execSRF.c:900 executor/execSRF.c:916 executor/execSRF.c:926 #, c-format msgid "function return row and query-specified return row do not match" msgstr "il tipo di riga restituito dalla funzione e il valore specificato dalla query non combaciano" -#: executor/execSRF.c:899 +#: executor/execSRF.c:901 #, c-format msgid "Returned row contains %d attribute, but query expects %d." msgid_plural "Returned row contains %d attributes, but query expects %d." msgstr[0] "La riga restituita contiene %d attributo, ma la query ne prevede %d." msgstr[1] "La riga restituita contiene %d attributi, ma la query ne prevede %d." -#: executor/execSRF.c:915 +#: executor/execSRF.c:917 #, c-format msgid "Returned type %s at ordinal position %d, but query expects %s." msgstr "Tipo %s restituito in posizione %d, ma la query prevede %s." -#: executor/execUtils.c:639 +#: executor/execUtils.c:646 #, c-format msgid "materialized view \"%s\" has not been populated" msgstr "la vista materializzata \"%s\" non è stata popolata" -#: executor/execUtils.c:641 +#: executor/execUtils.c:648 #, c-format msgid "Use the REFRESH MATERIALIZED VIEW command." msgstr "Usa il comando REFRESH MATERIALIZED VIEW." @@ -11108,136 +11010,136 @@ msgstr "Usa il comando REFRESH MATERIALIZED VIEW." msgid "could not determine actual type of argument declared %s" msgstr "non è stato possibile determinare il tipo reale dell'argomento dichiarato %s" -#: executor/functions.c:519 +#: executor/functions.c:520 #, c-format msgid "cannot COPY to/from client in a SQL function" msgstr "non è possibile usare COPY da o verso il client in una funzione SQL" #. translator: %s is a SQL statement name -#: executor/functions.c:525 +#: executor/functions.c:526 #, c-format msgid "%s is not allowed in a SQL function" msgstr "%s non è consentito in una funzione SQL" #. translator: %s is a SQL statement name -#: executor/functions.c:533 executor/spi.c:1282 executor/spi.c:2069 +#: executor/functions.c:534 executor/spi.c:1288 executor/spi.c:2075 #, c-format msgid "%s is not allowed in a non-volatile function" msgstr "%s non è consentito in una funzione non volatile" -#: executor/functions.c:653 +#: executor/functions.c:654 #, c-format msgid "could not determine actual result type for function declared to return type %s" msgstr "non è stato possibile determinare il tipo reale restituito dalla funzione dichiarata con tipo restituito %s" -#: executor/functions.c:1412 +#: executor/functions.c:1413 #, c-format msgid "SQL function \"%s\" statement %d" msgstr "funzione SQL \"%s\" istruzione %d" -#: executor/functions.c:1438 +#: executor/functions.c:1439 #, c-format msgid "SQL function \"%s\" during startup" msgstr "funzione SQL \"%s\" durante l'avvio" -#: executor/functions.c:1596 executor/functions.c:1633 -#: executor/functions.c:1645 executor/functions.c:1758 -#: executor/functions.c:1791 executor/functions.c:1821 +#: executor/functions.c:1597 executor/functions.c:1634 +#: executor/functions.c:1646 executor/functions.c:1759 +#: executor/functions.c:1792 executor/functions.c:1822 #, c-format msgid "return type mismatch in function declared to return %s" msgstr "il tipo restituito non combacia nella funzione dichiarata con tipo restituito %s" -#: executor/functions.c:1598 +#: executor/functions.c:1599 #, c-format msgid "Function's final statement must be SELECT or INSERT/UPDATE/DELETE RETURNING." msgstr "L'istruzione finale della funzione deve essere SELECT oppure INSERT/UPDATE/DELETE RETURNING." -#: executor/functions.c:1635 +#: executor/functions.c:1636 #, c-format msgid "Final statement must return exactly one column." msgstr "L'istruzione finale deve restituire esattamente una colonna." -#: executor/functions.c:1647 +#: executor/functions.c:1648 #, c-format msgid "Actual return type is %s." msgstr "Il tipo restituito realmente è %s." -#: executor/functions.c:1760 +#: executor/functions.c:1761 #, c-format msgid "Final statement returns too many columns." msgstr "L'istruzione finale restituisce troppe colonne." -#: executor/functions.c:1793 +#: executor/functions.c:1794 #, c-format msgid "Final statement returns %s instead of %s at column %d." msgstr "L'istruzione finale restituisce %s invece di %s alla colonna %d." -#: executor/functions.c:1823 +#: executor/functions.c:1824 #, c-format msgid "Final statement returns too few columns." msgstr "L'istruzione finale restituisce troppe poche colonne." -#: executor/functions.c:1872 +#: executor/functions.c:1873 #, c-format msgid "return type %s is not supported for SQL functions" msgstr "il tipo di risultato %s non è supportato per le funzioni SQL" -#: executor/nodeAgg.c:3471 +#: executor/nodeAgg.c:3459 parser/parse_agg.c:618 parser/parse_agg.c:648 +#, c-format +msgid "aggregate function calls cannot be nested" +msgstr "le chiamate a funzioni di aggregazione non possono essere annidate" + +#: executor/nodeAgg.c:3548 #, c-format msgid "combine function for aggregate %u must be declared as STRICT" msgstr "la funzione di combinazione per l'aggregato %u deve essere dichiarata STRICT" -#: executor/nodeAgg.c:3516 executor/nodeWindowAgg.c:2278 +#: executor/nodeAgg.c:3593 executor/nodeWindowAgg.c:2282 #, c-format msgid "aggregate %u needs to have compatible input type and transition type" msgstr "l'aggregato %u deve avere tipi di input e transizione compatibili" -#: executor/nodeAgg.c:3570 parser/parse_agg.c:618 parser/parse_agg.c:648 -#, c-format -msgid "aggregate function calls cannot be nested" -msgstr "le chiamate a funzioni di aggregazione non possono essere annidate" - -#: executor/nodeCustom.c:142 executor/nodeCustom.c:153 +#: executor/nodeCustom.c:152 executor/nodeCustom.c:163 #, c-format msgid "custom scan \"%s\" does not support MarkPos" msgstr "-lo scan personalizzato \"%s\" non supporta MarkPos" -#: executor/nodeHashjoin.c:767 executor/nodeHashjoin.c:797 +#: executor/nodeHashjoin.c:770 executor/nodeHashjoin.c:800 #, c-format msgid "could not rewind hash-join temporary file: %m" msgstr "riavvolgimento del file temporaneo per l'hash-join fallito: %m" -#: executor/nodeHashjoin.c:832 executor/nodeHashjoin.c:838 +#: executor/nodeHashjoin.c:835 executor/nodeHashjoin.c:841 #, c-format msgid "could not write to hash-join temporary file: %m" msgstr "scrittura nel file temporaneo per l'hash-join fallita: %m" -#: executor/nodeHashjoin.c:879 executor/nodeHashjoin.c:889 +#: executor/nodeHashjoin.c:882 executor/nodeHashjoin.c:892 #, c-format msgid "could not read from hash-join temporary file: %m" msgstr "lettura dal file temporaneo per l'hash-join fallita: %m" -#: executor/nodeIndexonlyscan.c:233 +#: executor/nodeIndexonlyscan.c:237 #, c-format msgid "lossy distance functions are not supported in index-only scans" msgstr "le funzioni di distanza lossy non sono supportate nelle scansioni dei soli indici" -#: executor/nodeLimit.c:252 +#: executor/nodeLimit.c:256 #, c-format msgid "OFFSET must not be negative" msgstr "OFFSET non può essere negativo" -#: executor/nodeLimit.c:278 +#: executor/nodeLimit.c:282 #, c-format msgid "LIMIT must not be negative" msgstr "LIMIT non può essere negativo" -#: executor/nodeMergejoin.c:1554 +#: executor/nodeMergejoin.c:1559 #, c-format msgid "RIGHT JOIN is only supported with merge-joinable join conditions" msgstr "RIGHT JOIN è supportato solo con condizioni di join che supportano merge" -#: executor/nodeMergejoin.c:1574 +#: executor/nodeMergejoin.c:1579 #, c-format msgid "FULL JOIN is only supported with merge-joinable join conditions" msgstr "FULL JOIN è supportato solo con condizioni di join che supportano merge" @@ -11257,53 +11159,53 @@ msgstr "La query produce un valore per una colonna eliminata in posizione %d." msgid "Query has too few columns." msgstr "La query ha troppe poche colonne." -#: executor/nodeModifyTable.c:1201 +#: executor/nodeModifyTable.c:1258 #, c-format msgid "ON CONFLICT DO UPDATE command cannot affect row a second time" msgstr "il comando ON CONFLICT DO UPDATE non può toccare le righe una seconda volta" -#: executor/nodeModifyTable.c:1202 +#: executor/nodeModifyTable.c:1259 #, c-format msgid "Ensure that no rows proposed for insertion within the same command have duplicate constrained values." msgstr "Assicurati che non ci siano righe proposte per l'inserimento nello stesso comando che abbiano valori vincolati uguali." -#: executor/nodeSamplescan.c:298 +#: executor/nodeSamplescan.c:301 #, c-format msgid "TABLESAMPLE parameter cannot be null" msgstr "il parametro TABLESAMPLE non può essere null" -#: executor/nodeSamplescan.c:310 +#: executor/nodeSamplescan.c:313 #, c-format msgid "TABLESAMPLE REPEATABLE parameter cannot be null" msgstr "il parametro TABLESAMPLE REPEATABLE non può essere null" -#: executor/nodeSubplan.c:333 executor/nodeSubplan.c:372 -#: executor/nodeSubplan.c:1004 +#: executor/nodeSubplan.c:336 executor/nodeSubplan.c:375 +#: executor/nodeSubplan.c:1009 #, c-format msgid "more than one row returned by a subquery used as an expression" msgstr "più di una riga restituita da una sottoquery usata come espressione" -#: executor/nodeTableFuncscan.c:365 +#: executor/nodeTableFuncscan.c:368 #, c-format msgid "namespace URI must not be null" msgstr "l'URI del namespace non può essere nullo" -#: executor/nodeTableFuncscan.c:376 +#: executor/nodeTableFuncscan.c:379 #, c-format msgid "row filter expression must not be null" msgstr "l'espressione di filtro della riga non può essere null" -#: executor/nodeTableFuncscan.c:401 +#: executor/nodeTableFuncscan.c:404 #, c-format msgid "column filter expression must not be null" msgstr "l'espressione del filtro di colonna non può essere null" -#: executor/nodeTableFuncscan.c:402 +#: executor/nodeTableFuncscan.c:405 #, c-format msgid "Filter for column \"%s\" is null." msgstr "Il filtro per la colonna \"%s\" è null." -#: executor/nodeTableFuncscan.c:481 +#: executor/nodeTableFuncscan.c:486 #, c-format msgid "null is not allowed in column \"%s\"" msgstr "null non ammessi nella colonna \"%s\"" @@ -11313,63 +11215,63 @@ msgstr "null non ammessi nella colonna \"%s\"" msgid "moving-aggregate transition function must not return null" msgstr "le funzioni di transizione per aggregati mobili non possono restituire null" -#: executor/nodeWindowAgg.c:1621 +#: executor/nodeWindowAgg.c:1624 #, c-format msgid "frame starting offset must not be null" msgstr "l'offset di inizio della finestra dev'essere non nullo" -#: executor/nodeWindowAgg.c:1634 +#: executor/nodeWindowAgg.c:1637 #, c-format msgid "frame starting offset must not be negative" msgstr "l'offset di inizio della finestra non può essere negativo" -#: executor/nodeWindowAgg.c:1646 +#: executor/nodeWindowAgg.c:1649 #, c-format msgid "frame ending offset must not be null" msgstr "l'offset di fine della finestra dev'essere non nullo" -#: executor/nodeWindowAgg.c:1659 +#: executor/nodeWindowAgg.c:1662 #, c-format msgid "frame ending offset must not be negative" msgstr "l'offset di fine della finestra non può essere negativo" -#: executor/spi.c:197 +#: executor/spi.c:198 #, c-format msgid "transaction left non-empty SPI stack" msgstr "la transazione ha lasciato lo stack SPI non vuoto" -#: executor/spi.c:198 executor/spi.c:261 +#: executor/spi.c:199 executor/spi.c:262 #, c-format msgid "Check for missing \"SPI_finish\" calls." msgstr "Verifica che non ci siano chiamate \"SPI_finish\" mancanti." -#: executor/spi.c:260 +#: executor/spi.c:261 #, c-format msgid "subtransaction left non-empty SPI stack" msgstr "la sottotransazione ha lasciato lo stack SPI non vuoto" -#: executor/spi.c:1143 +#: executor/spi.c:1149 #, c-format msgid "cannot open multi-query plan as cursor" msgstr "non è possibile aprire un piano multi-query come cursore" #. translator: %s is name of a SQL command, eg INSERT -#: executor/spi.c:1148 +#: executor/spi.c:1154 #, c-format msgid "cannot open %s query as cursor" msgstr "non è possibile aprire una query %s come cursore" -#: executor/spi.c:1253 +#: executor/spi.c:1259 #, c-format msgid "DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE is not supported" msgstr "DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE non è supportato" -#: executor/spi.c:1254 parser/analyze.c:2447 +#: executor/spi.c:1260 parser/analyze.c:2447 #, c-format msgid "Scrollable cursors must be READ ONLY." msgstr "Un cursore scorribile dev'essere READ ONLY." -#: executor/spi.c:2374 +#: executor/spi.c:2380 #, c-format msgid "SQL statement \"%s\"" msgstr "istruzione SQL \"%s\"" @@ -11399,100 +11301,118 @@ msgstr "Le opzioni valide in questo contesto sono: %s" msgid "Cannot enlarge string buffer containing %d bytes by %d more bytes." msgstr "Non è possibile aumentare il buffer della stringa contenente %d byte di altri %d byte." -#: libpq/auth-scram.c:208 +#: libpq/auth-scram.c:199 libpq/auth-scram.c:439 libpq/auth-scram.c:448 +#, c-format +msgid "invalid SCRAM verifier for user \"%s\"" +msgstr "verifica SCRAM non valido per l'utente \"%s\"" + +#: libpq/auth-scram.c:210 #, c-format msgid "User \"%s\" does not have a valid SCRAM verifier." msgstr "L'utente \"%s\" non ha una verifica SCRAM valida." -#: libpq/auth-scram.c:286 +#: libpq/auth-scram.c:288 libpq/auth-scram.c:293 libpq/auth-scram.c:587 +#: libpq/auth-scram.c:595 libpq/auth-scram.c:676 libpq/auth-scram.c:686 +#: libpq/auth-scram.c:804 libpq/auth-scram.c:811 libpq/auth-scram.c:826 +#: libpq/auth-scram.c:1056 libpq/auth-scram.c:1064 +#, c-format +msgid "malformed SCRAM message" +msgstr "messaggio SCRAM malformato" + +#: libpq/auth-scram.c:289 +#, c-format +msgid "The message is empty." +msgstr "Il messaggio è vuoto." + +#: libpq/auth-scram.c:294 #, c-format -msgid "malformed SCRAM message (empty message)" -msgstr "messaggio SCRAM malformato (messaggio vuoto)" +msgid "Message length does not match input length." +msgstr "La lunghezza del messaggio non combacia con la lunghezza dell'input." -#: libpq/auth-scram.c:290 +#: libpq/auth-scram.c:326 #, c-format -msgid "malformed SCRAM message (length mismatch)" -msgstr "messaggio SCRAM malformato (la lunghezza non combacia)" +msgid "invalid SCRAM response" +msgstr "risposta SCRAM non valida" -#: libpq/auth-scram.c:322 +#: libpq/auth-scram.c:327 #, c-format -msgid "invalid SCRAM response (nonce mismatch)" -msgstr "risposta SCRAM non valida (il nonce non combacia)" +msgid "Nonce does not match." +msgstr "Il nonce non combacia." -#: libpq/auth-scram.c:397 +#: libpq/auth-scram.c:401 #, c-format msgid "could not generate random salt" msgstr "errore nella generazione del sale casuale" -#: libpq/auth-scram.c:585 +#: libpq/auth-scram.c:588 #, c-format -msgid "malformed SCRAM message (attribute '%c' expected, %s found)" -msgstr "messaggio SCRAM non valido (previsto attributo '%c', trovato %s)" +msgid "Expected attribute \"%c\" but found \"%s\"." +msgstr "Atteso attributo \"%c\" ma trovato \"%s\"." -#: libpq/auth-scram.c:592 libpq/auth-scram.c:681 +#: libpq/auth-scram.c:596 libpq/auth-scram.c:687 #, c-format -msgid "malformed SCRAM message (expected = in attr %c)" -msgstr "messaggio SCRAM non valido (previsto = in attributo '%c')" +msgid "Expected character \"=\" for attribute \"%c\"." +msgstr "Atteso carattere \"=\" per l'attributo \"%c\"." -#: libpq/auth-scram.c:672 +#: libpq/auth-scram.c:677 #, c-format -msgid "malformed SCRAM message (attribute expected, invalid char %s found)" -msgstr "messaggio SCRAM non valido (previsto attributo, trovato carattere non valido %s)" +msgid "Attribute expected, but found invalid character \"%s\"." +msgstr "Atteso attributo, ma trovato carattere non valido \"%s\"." -#: libpq/auth-scram.c:794 +#: libpq/auth-scram.c:800 #, c-format msgid "client requires SCRAM channel binding, but it is not supported" msgstr "il client richiede il binding del canale SCRAM, ma non è supportato" -#: libpq/auth-scram.c:798 +#: libpq/auth-scram.c:805 #, c-format -msgid "malformed SCRAM message (unexpected channel-binding flag %s)" -msgstr "messaggio SCRAM non valido (flag di channel-binding %s non previsto)" +msgid "Unexpected channel-binding flag \"%s\"." +msgstr "Flag channel-binding \"%s\" non previsto." -#: libpq/auth-scram.c:804 +#: libpq/auth-scram.c:812 #, c-format -msgid "malformed SCRAM message (comma expected, got %s)" -msgstr "messaggio SCRAM non valido (prevista virgola, ricevuto %s)" +msgid "Comma expected, but found character \"%s\"." +msgstr "Attesa virgola, ma trovato carattere \"%s\"." -#: libpq/auth-scram.c:814 +#: libpq/auth-scram.c:822 #, c-format msgid "client uses authorization identity, but it is not supported" msgstr "il client usa l'autorizzazione identità, ma non è supportata" -#: libpq/auth-scram.c:818 +#: libpq/auth-scram.c:827 #, c-format -msgid "malformed SCRAM message (unexpected attribute %s in client-first-message)" -msgstr "messaggio SCRAM non valido (attributo '%s' non previsto nel primo messaggio client)" +msgid "Unexpected attribute \"%s\" in client-first-message." +msgstr "Attributo \"%s\" non atteso nel client-first-message." -#: libpq/auth-scram.c:834 +#: libpq/auth-scram.c:843 #, c-format -msgid "client requires mandatory SCRAM extension" -msgstr "il client richiede l'estensione SCRAM obbligatoria" +msgid "client requires an unsupported SCRAM extension" +msgstr "il client richiede un'estensione SCRAM non supportata" -#: libpq/auth-scram.c:848 +#: libpq/auth-scram.c:857 #, c-format msgid "non-printable characters in SCRAM nonce" msgstr "caratteri non stampabili nel nonce SCRAM" -#: libpq/auth-scram.c:965 +#: libpq/auth-scram.c:974 #, c-format msgid "could not generate random nonce" msgstr "errore nella generazione del nonce SCRAM" -#: libpq/auth-scram.c:1033 +#: libpq/auth-scram.c:1042 #, c-format msgid "unexpected SCRAM channel-binding attribute in client-final-message" msgstr "attributo channel-binding SCRAM non previsto nel messaggio finale del client" -#: libpq/auth-scram.c:1047 +#: libpq/auth-scram.c:1057 #, c-format -msgid "malformed SCRAM message (malformed proof in client-final-message" -msgstr "messaggio SCRAM non valido (prova errata nel messaggio finale del client)" +msgid "Malformed proof in client-final-message." +msgstr "Verifica malformata in client-final-message." -#: libpq/auth-scram.c:1054 +#: libpq/auth-scram.c:1065 #, c-format -msgid "malformed SCRAM message (garbage at end of client-final-message)" -msgstr "messaggio SCRAM non valido (dati in eccesso alla fine del messaggio finale del client)" +msgid "Garbage found at the end of client-final-message." +msgstr "Dati spuri alla fine del client-final-message." #: libpq/auth.c:274 #, c-format @@ -11657,382 +11577,382 @@ msgstr "era attesa una risposta password, ricevuto messaggio di tipo %d" msgid "invalid password packet size" msgstr "dimensione del pacchetto password non valida" -#: libpq/auth.c:809 libpq/hba.c:1325 +#: libpq/auth.c:707 +#, c-format +msgid "empty password returned by client" +msgstr "il client ha restituito una password vuota" + +#: libpq/auth.c:827 libpq/hba.c:1325 #, c-format msgid "MD5 authentication is not supported when \"db_user_namespace\" is enabled" msgstr "l'autenticazione MD5 non è supportata quando \"db_user_namespace\" è abilitato" -#: libpq/auth.c:815 +#: libpq/auth.c:833 #, c-format msgid "could not generate random MD5 salt" msgstr "errore nella generazione del sale casuale MD5" -#: libpq/auth.c:860 +#: libpq/auth.c:878 #, c-format msgid "SASL authentication is not supported in protocol version 2" msgstr "l'autenticazione SASL non è supportata nella versione 2 del protocollo" -#: libpq/auth.c:902 +#: libpq/auth.c:920 #, c-format msgid "expected SASL response, got message type %d" msgstr "attesa risposta SASL, ricevuto messaggio di tipo %d" -#: libpq/auth.c:939 +#: libpq/auth.c:957 #, c-format msgid "client selected an invalid SASL authentication mechanism" msgstr "il client a selezionato un meccanismo di autenticazione SASL non valido" -#: libpq/auth.c:1085 +#: libpq/auth.c:1104 #, c-format msgid "GSSAPI is not supported in protocol version 2" msgstr "GSSAPI non è supportato con la versione 2 del protocollo" -#: libpq/auth.c:1145 +#: libpq/auth.c:1164 #, c-format msgid "expected GSS response, got message type %d" msgstr "era attesa una risposta GSS, ricevuto messaggio di tipo %d" -#: libpq/auth.c:1207 +#: libpq/auth.c:1226 msgid "accepting GSS security context failed" msgstr "contesto di sicurezza accettazione GSS fallito" -#: libpq/auth.c:1233 +#: libpq/auth.c:1252 msgid "retrieving GSS user name failed" msgstr "la richiesta del GSS user name è fallita" -#: libpq/auth.c:1352 +#: libpq/auth.c:1372 #, c-format msgid "SSPI is not supported in protocol version 2" msgstr "SSPI non è supportato con la versione 2 del protocollo" -#: libpq/auth.c:1367 +#: libpq/auth.c:1387 msgid "could not acquire SSPI credentials" msgstr "non è stato possibile ottenere le credenziali SSPI" -#: libpq/auth.c:1385 +#: libpq/auth.c:1405 #, c-format msgid "expected SSPI response, got message type %d" msgstr "era attesa una risposta SSPI, ricevuto messaggio di tipo %d" -#: libpq/auth.c:1458 +#: libpq/auth.c:1478 msgid "could not accept SSPI security context" msgstr "non è stato possibile accettare il contesto di sicurezza SSPI" -#: libpq/auth.c:1520 +#: libpq/auth.c:1540 msgid "could not get token from SSPI security context" msgstr "non è stato possibile ottenere il token dal contesto di sicurezza SSPI" -#: libpq/auth.c:1639 libpq/auth.c:1658 +#: libpq/auth.c:1659 libpq/auth.c:1678 #, c-format msgid "could not translate name" msgstr "non è stato possibile tradurre il nome" -#: libpq/auth.c:1671 +#: libpq/auth.c:1691 #, c-format msgid "realm name too long" msgstr "nome di realm troppo lungo" -#: libpq/auth.c:1686 +#: libpq/auth.c:1706 #, c-format msgid "translated account name too long" msgstr "nome di account tradotto troppo lungo" -#: libpq/auth.c:1872 +#: libpq/auth.c:1892 #, c-format msgid "could not create socket for Ident connection: %m" msgstr "creazione del socket per la connessione Ident fallita: %m" -#: libpq/auth.c:1887 +#: libpq/auth.c:1907 #, c-format msgid "could not bind to local address \"%s\": %m" msgstr "bind sull'indirizzo locale \"%s\" fallito: %m" -#: libpq/auth.c:1899 +#: libpq/auth.c:1919 #, c-format msgid "could not connect to Ident server at address \"%s\", port %s: %m" msgstr "connessione al server Ident all'indirizzo \"%s\", porta %s fallita: %m" -#: libpq/auth.c:1921 +#: libpq/auth.c:1941 #, c-format msgid "could not send query to Ident server at address \"%s\", port %s: %m" msgstr "invio della query al server Ident all'indirizzo \"%s\", porta %s fallito: %m" -#: libpq/auth.c:1938 +#: libpq/auth.c:1958 #, c-format msgid "could not receive response from Ident server at address \"%s\", port %s: %m" msgstr "ricezione della risposta dal server Ident all'indirizzo \"%s\", porta %s fallita: %m" -#: libpq/auth.c:1948 +#: libpq/auth.c:1968 #, c-format msgid "invalidly formatted response from Ident server: \"%s\"" msgstr "risposta dal server Ident formattata in maniera non corretta: \"%s\"" -#: libpq/auth.c:1988 +#: libpq/auth.c:2008 #, c-format msgid "peer authentication is not supported on this platform" msgstr "il metodo di autenticazione peer non è supportato su questa piattaforma" -#: libpq/auth.c:1992 +#: libpq/auth.c:2012 #, c-format msgid "could not get peer credentials: %m" msgstr "non è stato possibile recuperare le credenziali del peer: %m" -#: libpq/auth.c:2001 +#: libpq/auth.c:2021 #, c-format msgid "could not look up local user ID %ld: %s" msgstr "ricerca dell'ID utente locale %ld fallita: %s" -#: libpq/auth.c:2085 libpq/auth.c:2411 libpq/auth.c:2724 -#, c-format -msgid "empty password returned by client" -msgstr "il client ha restituito una password vuota" - -#: libpq/auth.c:2095 +#: libpq/auth.c:2109 #, c-format msgid "error from underlying PAM layer: %s" msgstr "errore dal livello PAM sottostante: %s" -#: libpq/auth.c:2176 +#: libpq/auth.c:2190 #, c-format msgid "could not create PAM authenticator: %s" msgstr "creazione dell'autenticatore PAM fallita: %s" -#: libpq/auth.c:2187 +#: libpq/auth.c:2201 #, c-format msgid "pam_set_item(PAM_USER) failed: %s" msgstr "pam_set_item(PAM_USER) fallita: %s" -#: libpq/auth.c:2198 +#: libpq/auth.c:2212 #, c-format msgid "pam_set_item(PAM_RHOST) failed: %s" msgstr "pam_set_item(PAM_RHOST) fallita: %s" -#: libpq/auth.c:2209 +#: libpq/auth.c:2223 #, c-format msgid "pam_set_item(PAM_CONV) failed: %s" msgstr "pam_set_item(PAM_CONV) fallita: %s" -#: libpq/auth.c:2220 +#: libpq/auth.c:2234 #, c-format msgid "pam_authenticate failed: %s" msgstr "pam_authenticate fallita: %s" -#: libpq/auth.c:2231 +#: libpq/auth.c:2245 #, c-format msgid "pam_acct_mgmt failed: %s" msgstr "pam_acct_mgmt fallita: %s" -#: libpq/auth.c:2242 +#: libpq/auth.c:2256 #, c-format msgid "could not release PAM authenticator: %s" msgstr "rilascio dell'autenticatore PAM fallito: %s" -#: libpq/auth.c:2307 +#: libpq/auth.c:2323 #, c-format msgid "could not initialize LDAP: %m" msgstr "inizializzazione LDAP fallita: %m" -#: libpq/auth.c:2310 +#: libpq/auth.c:2326 #, c-format msgid "could not initialize LDAP: error code %d" msgstr "inizializzazione LDAP fallita: codice errore %d" -#: libpq/auth.c:2320 +#: libpq/auth.c:2336 #, c-format msgid "could not set LDAP protocol version: %s" msgstr "impostazione della versione del protocollo LDAP fallita: %s" -#: libpq/auth.c:2349 +#: libpq/auth.c:2365 #, c-format msgid "could not load wldap32.dll" msgstr "caricamento wldap32.dll fallito" -#: libpq/auth.c:2357 +#: libpq/auth.c:2373 #, c-format msgid "could not load function _ldap_start_tls_sA in wldap32.dll" msgstr "caricamento della funzione _ldap_start_tls_sA in wldap32.dll fallito" -#: libpq/auth.c:2358 +#: libpq/auth.c:2374 #, c-format msgid "LDAP over SSL is not supported on this platform." msgstr "LDAP su SSL non è supportato su questa piattaforma." -#: libpq/auth.c:2373 +#: libpq/auth.c:2389 #, c-format msgid "could not start LDAP TLS session: %s" msgstr "avvio della sessione TLS LDAP fallito: %s" -#: libpq/auth.c:2395 +#: libpq/auth.c:2411 #, c-format msgid "LDAP server not specified" msgstr "server LDAP non specificato" -#: libpq/auth.c:2448 +#: libpq/auth.c:2460 #, c-format msgid "invalid character in user name for LDAP authentication" msgstr "carattere non valido nel nome utente per l'autenticazione LDAP" -#: libpq/auth.c:2463 +#: libpq/auth.c:2476 #, c-format msgid "could not perform initial LDAP bind for ldapbinddn \"%s\" on server \"%s\": %s" msgstr "bind iniziale LDAP fallito per ldapbinddn \"%s\" sul server \"%s\": %s" -#: libpq/auth.c:2487 +#: libpq/auth.c:2501 #, c-format msgid "could not search LDAP for filter \"%s\" on server \"%s\": %s" msgstr "ricerca in LDAP del filtro \"%s\" sul server \"%s\" fallita: %s" -#: libpq/auth.c:2498 +#: libpq/auth.c:2513 #, c-format msgid "LDAP user \"%s\" does not exist" msgstr "l'utente LDAP \"%s\" non esiste" -#: libpq/auth.c:2499 +#: libpq/auth.c:2514 #, c-format msgid "LDAP search for filter \"%s\" on server \"%s\" returned no entries." msgstr "La ricerca LDAP del filtro \"%s\" sul server \"%s\" non ha restituito risultati." -#: libpq/auth.c:2503 +#: libpq/auth.c:2518 #, c-format msgid "LDAP user \"%s\" is not unique" msgstr "L'utente LDAP \"%s\" non è unico" -#: libpq/auth.c:2504 +#: libpq/auth.c:2519 #, c-format msgid "LDAP search for filter \"%s\" on server \"%s\" returned %d entry." msgid_plural "LDAP search for filter \"%s\" on server \"%s\" returned %d entries." msgstr[0] "La ricerca LDAP del filtro \"%s\" sul server \"%s\" ha restituito %d risultato." msgstr[1] "La ricerca LDAP del filtro \"%s\" sul server \"%s\" ha restituito %d risultati." -#: libpq/auth.c:2522 +#: libpq/auth.c:2538 #, c-format msgid "could not get dn for the first entry matching \"%s\" on server \"%s\": %s" msgstr "dn per il primo risultato di \"%s\" non trovato sul server \"%s\": %s" -#: libpq/auth.c:2542 +#: libpq/auth.c:2559 #, c-format msgid "could not unbind after searching for user \"%s\" on server \"%s\": %s" msgstr "unbind fallito dopo aver cercato l'utente \"%s\" sul server \"%s\": %s" -#: libpq/auth.c:2572 +#: libpq/auth.c:2591 #, c-format msgid "LDAP login failed for user \"%s\" on server \"%s\": %s" msgstr "login LDAP fallito per l'utente \"%s\" sul server \"%s\": %s" -#: libpq/auth.c:2600 +#: libpq/auth.c:2621 #, c-format msgid "certificate authentication failed for user \"%s\": client certificate contains no user name" msgstr "autenticazione con certificato fallita per l'utente \"%s\": il certificato del client non contiene alcun nome utente" -#: libpq/auth.c:2703 +#: libpq/auth.c:2724 #, c-format msgid "RADIUS server not specified" msgstr "server RADIUS non specificato" -#: libpq/auth.c:2710 +#: libpq/auth.c:2731 #, c-format msgid "RADIUS secret not specified" msgstr "segreto RADIUS non specificato" -#: libpq/auth.c:2731 +#: libpq/auth.c:2745 #, c-format msgid "RADIUS authentication does not support passwords longer than %d characters" msgstr "l'autenticazione RADIUS non supporta password più lunghe di %d caratteri" -#: libpq/auth.c:2828 libpq/hba.c:1876 +#: libpq/auth.c:2850 libpq/hba.c:1876 #, c-format msgid "could not translate RADIUS server name \"%s\" to address: %s" msgstr "conversione del nome del server RADIUS \"%s\" in indirizzo fallita: %s" -#: libpq/auth.c:2842 +#: libpq/auth.c:2864 #, c-format msgid "could not generate random encryption vector" msgstr "generazione del vettore di criptaggio casuale fallita" -#: libpq/auth.c:2876 +#: libpq/auth.c:2898 #, c-format msgid "could not perform MD5 encryption of password" msgstr "criptaggio MD5 della password fallito" -#: libpq/auth.c:2902 +#: libpq/auth.c:2924 #, c-format msgid "could not create RADIUS socket: %m" msgstr "creazione del socket RADIUS fallita: %m" -#: libpq/auth.c:2924 +#: libpq/auth.c:2946 #, c-format msgid "could not bind local RADIUS socket: %m" msgstr "bind del socket RADIUS fallito: %m" -#: libpq/auth.c:2934 +#: libpq/auth.c:2956 #, c-format msgid "could not send RADIUS packet: %m" msgstr "invio del pacchetto RADIUS fallito: %m" -#: libpq/auth.c:2967 libpq/auth.c:2993 +#: libpq/auth.c:2989 libpq/auth.c:3015 #, c-format msgid "timeout waiting for RADIUS response from %s" msgstr "timeout in attesa della risposta RADIUS da %s" -#: libpq/auth.c:2986 +#: libpq/auth.c:3008 #, c-format msgid "could not check status on RADIUS socket: %m" msgstr "controllo dello stato sul socket RADIUS fallito: %m" -#: libpq/auth.c:3016 +#: libpq/auth.c:3038 #, c-format msgid "could not read RADIUS response: %m" msgstr "lettura della risposta RADIUS fallita: %m" -#: libpq/auth.c:3029 libpq/auth.c:3033 +#: libpq/auth.c:3051 libpq/auth.c:3055 #, c-format msgid "RADIUS response from %s was sent from incorrect port: %d" msgstr "la risposta RADIUS da %s è stata inviata da una porta non valida: %d" -#: libpq/auth.c:3042 +#: libpq/auth.c:3064 #, c-format msgid "RADIUS response from %s too short: %d" msgstr "risposta RADIUS da %s troppo breve: %d" -#: libpq/auth.c:3049 +#: libpq/auth.c:3071 #, c-format msgid "RADIUS response from %s has corrupt length: %d (actual length %d)" msgstr "la risposta RADIUS da %s ha una lunghezza non valida: %d (la lunghezza reale è %d)" -#: libpq/auth.c:3057 +#: libpq/auth.c:3079 #, c-format msgid "RADIUS response from %s is to a different request: %d (should be %d)" msgstr "la risposta RADIUS da %s è di una richiesta diversa: %d (dovrebbe essere %d)" -#: libpq/auth.c:3082 +#: libpq/auth.c:3104 #, c-format msgid "could not perform MD5 encryption of received packet" msgstr "criptaggio MD5 dei pacchetti ricevuti fallito" -#: libpq/auth.c:3091 +#: libpq/auth.c:3113 #, c-format msgid "RADIUS response from %s has incorrect MD5 signature" msgstr "la risposta RADIUS da %s ha una firma MD5 non valida" -#: libpq/auth.c:3109 +#: libpq/auth.c:3131 #, c-format msgid "RADIUS response from %s has invalid code (%d) for user \"%s\"" msgstr "La risposta RADIUS da %s ha un codice non valido (%d) per l'utente \"%s\"" #: libpq/be-fsstubs.c:132 libpq/be-fsstubs.c:163 libpq/be-fsstubs.c:197 #: libpq/be-fsstubs.c:237 libpq/be-fsstubs.c:262 libpq/be-fsstubs.c:310 -#: libpq/be-fsstubs.c:333 libpq/be-fsstubs.c:581 +#: libpq/be-fsstubs.c:333 libpq/be-fsstubs.c:590 #, c-format msgid "invalid large-object descriptor: %d" msgstr "descrittore di large object non valido: %d" -#: libpq/be-fsstubs.c:178 libpq/be-fsstubs.c:216 libpq/be-fsstubs.c:600 -#: libpq/be-fsstubs.c:788 +#: libpq/be-fsstubs.c:178 libpq/be-fsstubs.c:216 libpq/be-fsstubs.c:609 +#: libpq/be-fsstubs.c:797 libpq/be-fsstubs.c:917 #, c-format msgid "permission denied for large object %u" msgstr "permesso per il large object %u negato" -#: libpq/be-fsstubs.c:203 libpq/be-fsstubs.c:587 +#: libpq/be-fsstubs.c:203 libpq/be-fsstubs.c:596 #, c-format msgid "large object descriptor %d was not opened for writing" msgstr "il descrittore per il large object %d non era aperto in scrittura" @@ -12077,172 +11997,207 @@ msgstr "solo un superutente può usare lo_export() lato server" msgid "Anyone can use the client-side lo_export() provided by libpq." msgstr "Chiunque può invece usare lo_export() lato client fornito da libpq." -#: libpq/be-fsstubs.c:547 +#: libpq/be-fsstubs.c:556 #, c-format msgid "could not create server file \"%s\": %m" msgstr "creazione del file del server \"%s\" fallita: %m" -#: libpq/be-fsstubs.c:559 +#: libpq/be-fsstubs.c:568 #, c-format msgid "could not write server file \"%s\": %m" msgstr "scrittura del file del server \"%s\" fallita: %m" -#: libpq/be-fsstubs.c:813 +#: libpq/be-fsstubs.c:822 #, c-format msgid "large object read request is too large" msgstr "la richiesta di lettura per il large object è troppo grande" -#: libpq/be-fsstubs.c:855 utils/adt/genfile.c:212 utils/adt/genfile.c:253 +#: libpq/be-fsstubs.c:864 utils/adt/genfile.c:212 utils/adt/genfile.c:253 #, c-format msgid "requested length cannot be negative" msgstr "la lunghezza richiesta non può essere negativa" -#: libpq/be-secure-openssl.c:197 +#: libpq/be-secure-openssl.c:166 #, c-format msgid "could not create SSL context: %s" msgstr "creazione del contesto SSL fallita: %s" -#: libpq/be-secure-openssl.c:225 +#: libpq/be-secure-openssl.c:194 #, c-format msgid "could not load server certificate file \"%s\": %s" msgstr "caricamento del file di certificato del server \"%s\" fallito: %s" -#: libpq/be-secure-openssl.c:234 +#: libpq/be-secure-openssl.c:203 #, c-format msgid "could not access private key file \"%s\": %m" msgstr "accesso fallito al file della chiave privata \"%s\": %m" -#: libpq/be-secure-openssl.c:243 +#: libpq/be-secure-openssl.c:212 #, c-format msgid "private key file \"%s\" is not a regular file" msgstr "il file di chiave privata \"%s\" non è un file regolare" -#: libpq/be-secure-openssl.c:258 +#: libpq/be-secure-openssl.c:227 #, c-format msgid "private key file \"%s\" must be owned by the database user or root" msgstr "il file di chiave privata \"%s\" deve essere di proprietà dell'utente del database o di root" -#: libpq/be-secure-openssl.c:281 +#: libpq/be-secure-openssl.c:250 #, c-format msgid "private key file \"%s\" has group or world access" msgstr "il file della chiave privata \"%s\" ha accesso al gruppo o a chiunque" -#: libpq/be-secure-openssl.c:283 +#: libpq/be-secure-openssl.c:252 #, c-format msgid "File must have permissions u=rw (0600) or less if owned by the database user, or permissions u=rw,g=r (0640) or less if owned by root." msgstr "Il file deve avere permesso u=rw (0600) o inferiore se di proprietà dell'utente database, o permesso u=rw,g=r (0640) o inferiore se di proprietà di root." -#: libpq/be-secure-openssl.c:300 +#: libpq/be-secure-openssl.c:269 #, c-format msgid "private key file \"%s\" cannot be reloaded because it requires a passphrase" msgstr "il file della chiave privata \"%s\" non può essere ricaricato perché richiede una passphrase" -#: libpq/be-secure-openssl.c:305 +#: libpq/be-secure-openssl.c:274 #, c-format msgid "could not load private key file \"%s\": %s" msgstr "caricamento del file della chiave privata \"%s\" fallito: %s" -#: libpq/be-secure-openssl.c:314 +#: libpq/be-secure-openssl.c:283 #, c-format msgid "check of private key failed: %s" msgstr "controllo della chiave privata fallito: %s" -#: libpq/be-secure-openssl.c:334 +#: libpq/be-secure-openssl.c:310 #, c-format msgid "could not set the cipher list (no valid ciphers available)" msgstr "impostazione della lista dei cifrari fallita (nessun cifrario valido disponibile)" -#: libpq/be-secure-openssl.c:352 +#: libpq/be-secure-openssl.c:328 #, c-format msgid "could not load root certificate file \"%s\": %s" msgstr "caricamento del file del certificato radice \"%s\" fallito: %s" -#: libpq/be-secure-openssl.c:379 +#: libpq/be-secure-openssl.c:355 #, c-format msgid "SSL certificate revocation list file \"%s\" ignored" msgstr "il file di lista di revoche di certificati SSL \"%s\" è stato ignorato" -#: libpq/be-secure-openssl.c:381 +#: libpq/be-secure-openssl.c:357 #, c-format msgid "SSL library does not support certificate revocation lists." msgstr "La libreria SSL non supporta le liste di revoca dei certificati." -#: libpq/be-secure-openssl.c:388 +#: libpq/be-secure-openssl.c:364 #, c-format msgid "could not load SSL certificate revocation list file \"%s\": %s" msgstr "caricamento del file di lista di revoche di certificati SSL \"%s\" fallito: %s" -#: libpq/be-secure-openssl.c:469 +#: libpq/be-secure-openssl.c:445 #, c-format msgid "could not initialize SSL connection: SSL context not set up" msgstr "inizializzazione della connessione SSL fallita: contesto SSL non impostato" -#: libpq/be-secure-openssl.c:477 +#: libpq/be-secure-openssl.c:453 #, c-format msgid "could not initialize SSL connection: %s" msgstr "inizializzazione della connessione SSL fallita: %s" -#: libpq/be-secure-openssl.c:485 +#: libpq/be-secure-openssl.c:461 #, c-format msgid "could not set SSL socket: %s" msgstr "impostazione del socket SSL fallita: %s" -#: libpq/be-secure-openssl.c:540 +#: libpq/be-secure-openssl.c:516 #, c-format msgid "could not accept SSL connection: %m" msgstr "accettazione della connessione SSL fallita: %m" -#: libpq/be-secure-openssl.c:544 libpq/be-secure-openssl.c:555 +#: libpq/be-secure-openssl.c:520 libpq/be-secure-openssl.c:531 #, c-format msgid "could not accept SSL connection: EOF detected" msgstr "accettazione della connessione SSL fallita: fine file individuata" -#: libpq/be-secure-openssl.c:549 +#: libpq/be-secure-openssl.c:525 #, c-format msgid "could not accept SSL connection: %s" msgstr "accettazione della connessione SSL fallita: %s" -#: libpq/be-secure-openssl.c:560 libpq/be-secure-openssl.c:699 -#: libpq/be-secure-openssl.c:759 +#: libpq/be-secure-openssl.c:536 libpq/be-secure-openssl.c:677 +#: libpq/be-secure-openssl.c:744 #, c-format msgid "unrecognized SSL error code: %d" msgstr "codice di errore SSL sconosciuto: %d" -#: libpq/be-secure-openssl.c:602 +#: libpq/be-secure-openssl.c:578 #, c-format msgid "SSL certificate's common name contains embedded null" msgstr "Il nome comune del certificato SSL contiene un null" -#: libpq/be-secure-openssl.c:613 +#: libpq/be-secure-openssl.c:589 #, c-format msgid "SSL connection from \"%s\"" msgstr "connessione SSL da \"%s\"" -#: libpq/be-secure-openssl.c:690 libpq/be-secure-openssl.c:750 +#: libpq/be-secure-openssl.c:666 libpq/be-secure-openssl.c:728 #, c-format msgid "SSL error: %s" msgstr "errore SSL: %s" -#: libpq/be-secure-openssl.c:1179 +#: libpq/be-secure-openssl.c:909 +#, c-format +msgid "could not open DH parameters file \"%s\": %m" +msgstr "errore nell'apertura del file di parametri DH \"%s\": %m" + +#: libpq/be-secure-openssl.c:921 +#, c-format +msgid "could not load DH parameters file: %s" +msgstr "errore nell'apertura del file di parametri DH: %s" + +#: libpq/be-secure-openssl.c:931 +#, c-format +msgid "invalid DH parameters: %s" +msgstr "parametri DH non validi: %s" + +#: libpq/be-secure-openssl.c:939 +#, c-format +msgid "invalid DH parameters: p is not prime" +msgstr "parametri DH non validi: p non è primo" + +#: libpq/be-secure-openssl.c:947 +#, c-format +msgid "invalid DH parameters: neither suitable generator or safe prime" +msgstr "parametri DH non validi: né un generatore adatto o un primo sicuro" + +#: libpq/be-secure-openssl.c:1088 +#, c-format +msgid "DH: could not load DH parameters" +msgstr "DH: errore nel caricamento dei parametri DH" + +#: libpq/be-secure-openssl.c:1096 +#, c-format +msgid "DH: could not set DH parameters: %s" +msgstr "DH: errore nell'impostazione dei parametri DH: %s" + +#: libpq/be-secure-openssl.c:1120 #, c-format msgid "ECDH: unrecognized curve name: %s" msgstr "ECDH: nome della curva non riconosciuto: %s" -#: libpq/be-secure-openssl.c:1188 +#: libpq/be-secure-openssl.c:1129 #, c-format msgid "ECDH: could not create key" msgstr "ECDH: chiave non creata" -#: libpq/be-secure-openssl.c:1216 +#: libpq/be-secure-openssl.c:1157 msgid "no SSL error reported" msgstr "nessun errore SSL riportato" -#: libpq/be-secure-openssl.c:1220 +#: libpq/be-secure-openssl.c:1161 #, c-format msgid "SSL error code %lu" msgstr "codice di errore SSL: %lu" -#: libpq/be-secure.c:188 libpq/be-secure.c:274 +#: libpq/be-secure.c:189 libpq/be-secure.c:275 #, c-format msgid "terminating connection due to unexpected postmaster exit" msgstr "la connessione è stata terminata a causa di un'uscita inattesa di postmaster" @@ -12257,27 +12212,22 @@ msgstr "Il ruolo \"%s\" non esiste." msgid "User \"%s\" has no password assigned." msgstr "L'utente \"%s\" non ha una password assegnata." -#: libpq/crypt.c:76 -#, c-format -msgid "User \"%s\" has an empty password." -msgstr "Il ruolo \"%s\" ha una password vuota." - -#: libpq/crypt.c:87 +#: libpq/crypt.c:79 #, c-format msgid "User \"%s\" has an expired password." msgstr "L'utente \"%s\" ha la password scaduta." -#: libpq/crypt.c:181 +#: libpq/crypt.c:173 #, c-format msgid "User \"%s\" has a password that cannot be used with MD5 authentication." msgstr "L'utente \"%s\" ha una password che non può essere usata con l'autenticazione MD5." -#: libpq/crypt.c:205 libpq/crypt.c:246 libpq/crypt.c:270 +#: libpq/crypt.c:197 libpq/crypt.c:238 libpq/crypt.c:262 #, c-format msgid "Password does not match for user \"%s\"." msgstr "Le password non combaciano per l'utente \"%s\"." -#: libpq/crypt.c:289 +#: libpq/crypt.c:281 #, c-format msgid "Password of user \"%s\" is in unrecognized format." msgstr "La password dell'utente \"%s\" è in un formato non riconosciuto." @@ -12506,8 +12456,8 @@ msgstr "la lista di segreti RADIUS non può essere vuota" #: libpq/hba.c:1611 #, c-format -msgid "the number of %s (%i) must be 1 or the same as the number of %s (%i)" -msgstr "il numero di %s (%i) deve essere 1 oppure lo stesso numero di %s (%i)" +msgid "the number of %s (%d) must be 1 or the same as the number of %s (%d)" +msgstr "il numero di %s (%d) deve essere 1 oppure lo stesso numero di %s (%d)" #: libpq/hba.c:1645 msgid "ident, peer, gssapi, sspi, and cert" @@ -12626,150 +12576,150 @@ msgstr "nessuna corrispondenza nella mappa utenti \"%s\" per l'utente \"%s\" aut msgid "could not open usermap file \"%s\": %m" msgstr "apertura del file usermap \"%s\" fallita: %m" -#: libpq/pqcomm.c:201 +#: libpq/pqcomm.c:220 #, c-format msgid "could not set socket to nonblocking mode: %m" msgstr "impossibile impostare il socket in modalità non bloccante: %m" -#: libpq/pqcomm.c:355 +#: libpq/pqcomm.c:374 #, c-format msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)" msgstr "Il percorso del socket di dominio unix \"%s\" è troppo lungo (massimo %d byte)" -#: libpq/pqcomm.c:376 +#: libpq/pqcomm.c:395 #, c-format msgid "could not translate host name \"%s\", service \"%s\" to address: %s" msgstr "conversione del nome host \"%s\", servizio \"%s\" in indirizzo fallita: %s" -#: libpq/pqcomm.c:380 +#: libpq/pqcomm.c:399 #, c-format msgid "could not translate service \"%s\" to address: %s" msgstr "conversione del servizio \"%s\" in indirizzo fallita: %s" -#: libpq/pqcomm.c:407 +#: libpq/pqcomm.c:426 #, c-format msgid "could not bind to all requested addresses: MAXLISTEN (%d) exceeded" msgstr "bind a tutti gli indirizzi richiesti fallito: MAXLISTEN (%d) superato" -#: libpq/pqcomm.c:416 +#: libpq/pqcomm.c:435 msgid "IPv4" msgstr "IPv4" -#: libpq/pqcomm.c:420 +#: libpq/pqcomm.c:439 msgid "IPv6" msgstr "IPv6" -#: libpq/pqcomm.c:425 +#: libpq/pqcomm.c:444 msgid "Unix" msgstr "Unix" -#: libpq/pqcomm.c:430 +#: libpq/pqcomm.c:449 #, c-format msgid "unrecognized address family %d" msgstr "famiglia di indirizzi %d sconosciuto" #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:456 +#: libpq/pqcomm.c:475 #, c-format msgid "could not create %s socket for address \"%s\": %m" msgstr "creazione del socket %s per l'indirizzo \"%s\" fallita: %m" #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:482 +#: libpq/pqcomm.c:501 #, c-format msgid "setsockopt(SO_REUSEADDR) failed for %s address \"%s\": %m" msgstr "setsockopt(SO_REUSEADDR) fallita per l'indirizzo %s \"%s\": %m" #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:499 +#: libpq/pqcomm.c:518 #, c-format msgid "setsockopt(IPV6_V6ONLY) failed for %s address \"%s\": %m" msgstr "setsockopt(IPV6_V6ONLY) fallita per l'indirizzo %s \"%s\": %m" #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:519 +#: libpq/pqcomm.c:538 #, c-format msgid "could not bind %s address \"%s\": %m" msgstr "bind dell'indirizzo %s \"%s\" fallito: %m" -#: libpq/pqcomm.c:522 +#: libpq/pqcomm.c:541 #, c-format msgid "Is another postmaster already running on port %d? If not, remove socket file \"%s\" and retry." msgstr "C'è già un altro postmaster in funzione sulla porta %d? Se non c'è, rimuovi il file socket \"%s\" e riprova." -#: libpq/pqcomm.c:525 +#: libpq/pqcomm.c:544 #, c-format msgid "Is another postmaster already running on port %d? If not, wait a few seconds and retry." msgstr "C'è già un altro postmaster in funzione sulla porta %d? Se non c'è, aspetta alcuni secondi e riprova." #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:558 +#: libpq/pqcomm.c:577 #, c-format msgid "could not listen on %s address \"%s\": %m" msgstr "listen sull'indirizzo %s \"%s\" fallito: %m" -#: libpq/pqcomm.c:567 +#: libpq/pqcomm.c:586 #, c-format msgid "listening on Unix socket \"%s\"" msgstr "in ascolto sul socket Unix \"%s\"" #. translator: first %s is IPv4 or IPv6 -#: libpq/pqcomm.c:573 +#: libpq/pqcomm.c:592 #, c-format msgid "listening on %s address \"%s\", port %d" msgstr "in ascolto sull'indirizzo %s \"%s\", porta %d" -#: libpq/pqcomm.c:656 +#: libpq/pqcomm.c:675 #, c-format msgid "group \"%s\" does not exist" msgstr "il gruppo \"%s\" non esiste" -#: libpq/pqcomm.c:666 +#: libpq/pqcomm.c:685 #, c-format msgid "could not set group of file \"%s\": %m" msgstr "impostazione del gruppo del file \"%s\" fallita: %m" -#: libpq/pqcomm.c:677 +#: libpq/pqcomm.c:696 #, c-format msgid "could not set permissions of file \"%s\": %m" msgstr "impostazione dei permessi del file \"%s\" fallita: %m" -#: libpq/pqcomm.c:707 +#: libpq/pqcomm.c:726 #, c-format msgid "could not accept new connection: %m" msgstr "impossibile accettare la nuova connessione: %m" -#: libpq/pqcomm.c:908 +#: libpq/pqcomm.c:927 #, c-format msgid "there is no client connection" msgstr "c'è alcuna connessione client" -#: libpq/pqcomm.c:959 libpq/pqcomm.c:1055 +#: libpq/pqcomm.c:978 libpq/pqcomm.c:1074 #, c-format msgid "could not receive data from client: %m" msgstr "ricezione dati dal client fallita: %m" -#: libpq/pqcomm.c:1200 tcop/postgres.c:3913 +#: libpq/pqcomm.c:1219 tcop/postgres.c:3926 #, c-format msgid "terminating connection because protocol synchronization was lost" msgstr "la connessione verrà terminata perché la sincronizzazione del protocollo è stata persa" -#: libpq/pqcomm.c:1266 +#: libpq/pqcomm.c:1285 #, c-format msgid "unexpected EOF within message length word" msgstr "fine file inattesa nella word della lunghezza del messaggio" -#: libpq/pqcomm.c:1277 +#: libpq/pqcomm.c:1296 #, c-format msgid "invalid message length" msgstr "lunghezza del messaggio non valida" -#: libpq/pqcomm.c:1299 libpq/pqcomm.c:1312 +#: libpq/pqcomm.c:1318 libpq/pqcomm.c:1331 #, c-format msgid "incomplete message from client" msgstr "messaggio incompleto dal client" -#: libpq/pqcomm.c:1445 +#: libpq/pqcomm.c:1464 #, c-format msgid "could not send data to client: %m" msgstr "invio dati al client fallito: %m" @@ -13110,7 +13060,7 @@ msgstr "ExtensibleNodeMethods \"%s\" non è stato registrato" #: nodes/nodeFuncs.c:123 nodes/nodeFuncs.c:154 parser/parse_coerce.c:1844 #: parser/parse_coerce.c:1872 parser/parse_coerce.c:1948 -#: parser/parse_expr.c:2089 parser/parse_func.c:598 parser/parse_oper.c:958 +#: parser/parse_expr.c:2110 parser/parse_func.c:602 parser/parse_oper.c:964 #, c-format msgid "could not find array type for data type %s" msgstr "non è stato possibile trovare il tipo di array per il tipo di dati %s" @@ -13127,44 +13077,44 @@ msgid "%s cannot be applied to the nullable side of an outer join" msgstr "%s non può essere applicato sul lato che può essere nullo di un join esterno" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: optimizer/plan/planner.c:1544 parser/analyze.c:1624 parser/analyze.c:1821 +#: optimizer/plan/planner.c:1572 parser/analyze.c:1624 parser/analyze.c:1821 #: parser/analyze.c:2615 #, c-format msgid "%s is not allowed with UNION/INTERSECT/EXCEPT" msgstr "%s non è consentito con UNION/INTERSECT/EXCEPT" -#: optimizer/plan/planner.c:2144 optimizer/plan/planner.c:4102 +#: optimizer/plan/planner.c:2172 optimizer/plan/planner.c:4130 #, c-format msgid "could not implement GROUP BY" msgstr "non è stato possibile implementare GROUP BY" -#: optimizer/plan/planner.c:2145 optimizer/plan/planner.c:4103 -#: optimizer/plan/planner.c:4843 optimizer/prep/prepunion.c:938 +#: optimizer/plan/planner.c:2173 optimizer/plan/planner.c:4131 +#: optimizer/plan/planner.c:4871 optimizer/prep/prepunion.c:938 #, c-format msgid "Some of the datatypes only support hashing, while others only support sorting." msgstr "Alcuni dei tipi di dati supportano solo l'hashing, mentre altri supportano solo l'ordinamento." -#: optimizer/plan/planner.c:4842 +#: optimizer/plan/planner.c:4870 #, c-format msgid "could not implement DISTINCT" msgstr "non è stato possibile implementare DISTINCT" -#: optimizer/plan/planner.c:5522 +#: optimizer/plan/planner.c:5550 #, c-format msgid "could not implement window PARTITION BY" msgstr "non è stato possibile implementare PARTITION BY della finestra" -#: optimizer/plan/planner.c:5523 +#: optimizer/plan/planner.c:5551 #, c-format msgid "Window partitioning columns must be of sortable datatypes." msgstr "La colonna di partizionamento della finestra dev'essere un tipo di dato ordinabile." -#: optimizer/plan/planner.c:5527 +#: optimizer/plan/planner.c:5555 #, c-format msgid "could not implement window ORDER BY" msgstr "non è stato possibile implementare ORDER BY della finestra" -#: optimizer/plan/planner.c:5528 +#: optimizer/plan/planner.c:5556 #, c-format msgid "Window ordering columns must be of sortable datatypes." msgstr "La colonna di ordinamento della finestra dev'essere un tipo di dato ordinabile." @@ -13190,7 +13140,7 @@ msgstr "Tutti i tipi di dati devono supportare l'hash." msgid "could not implement %s" msgstr "non è stato possibile implementare %s" -#: optimizer/util/clauses.c:4668 +#: optimizer/util/clauses.c:4693 #, c-format msgid "SQL function \"%s\" during inlining" msgstr "funzione SQL \"%s\" durante l'inlining" @@ -13555,7 +13505,7 @@ msgid "grouping operations are not allowed in partition key expression" msgstr "le funzioni raggruppamento non sono supportate nelle espressioni di partizione" #. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_agg.c:530 parser/parse_clause.c:1804 +#: parser/parse_agg.c:530 parser/parse_clause.c:1810 #, c-format msgid "aggregate functions are not allowed in %s" msgstr "le funzioni di aggregazione non sono ammesse in %s" @@ -13571,96 +13521,107 @@ msgstr "le operazioni di raggruppamento non sono ammesse in %s" msgid "outer-level aggregate cannot contain a lower-level variable in its direct arguments" msgstr "gli aggregati di livello esterno non possono contenere una variabile di livello inferiore tra gli argomenti diretti" -#: parser/parse_agg.c:712 +#: parser/parse_agg.c:720 +#, c-format +msgid "aggregate function calls cannot contain set-returning function calls" +msgstr "le chiamate a funzioni di aggregazione non possono contenere chiamate a funzioni che restituiscono insiemi" + +#: parser/parse_agg.c:721 parser/parse_expr.c:1761 parser/parse_expr.c:2237 +#: parser/parse_func.c:773 +#, c-format +msgid "You might be able to move the set-returning function into a LATERAL FROM item." +msgstr "Potresti riuscire a spostare la funzione che restituisce insiemi in un costrutto LATERAL FORM." + +#: parser/parse_agg.c:726 #, c-format msgid "aggregate function calls cannot contain window function calls" msgstr "le chiamate a funzioni di aggregazione non possono contenere chiamate a funzioni finestra" -#: parser/parse_agg.c:790 +#: parser/parse_agg.c:805 msgid "window functions are not allowed in JOIN conditions" msgstr "le funzioni finestra non sono ammesse nelle condizioni JOIN" -#: parser/parse_agg.c:797 +#: parser/parse_agg.c:812 msgid "window functions are not allowed in functions in FROM" msgstr "le funzioni finestra non sono ammesse nelle funzioni in FROM" -#: parser/parse_agg.c:803 +#: parser/parse_agg.c:818 msgid "window functions are not allowed in policy expressions" msgstr "le funzioni finestra non sono ammesse nell'espressione di una regola di sicurezza" -#: parser/parse_agg.c:815 +#: parser/parse_agg.c:830 msgid "window functions are not allowed in window definitions" msgstr "le funzioni finestra non sono ammesse nelle definizioni di finestre" -#: parser/parse_agg.c:847 +#: parser/parse_agg.c:862 msgid "window functions are not allowed in check constraints" msgstr "le funzioni finestra non sono ammesse nei vincoli di controllo" -#: parser/parse_agg.c:851 +#: parser/parse_agg.c:866 msgid "window functions are not allowed in DEFAULT expressions" msgstr "le funzioni finestra non sono ammesse nelle espressioni DEFAULT" -#: parser/parse_agg.c:854 +#: parser/parse_agg.c:869 msgid "window functions are not allowed in index expressions" msgstr "le funzioni finestra non sono ammesse nelle espressioni degli indici" -#: parser/parse_agg.c:857 +#: parser/parse_agg.c:872 msgid "window functions are not allowed in index predicates" msgstr "le funzioni finestra non sono ammesse nei predicati degli indici" -#: parser/parse_agg.c:860 +#: parser/parse_agg.c:875 msgid "window functions are not allowed in transform expressions" msgstr "le funzioni finestra non sono ammesse nelle espressioni di trasformazione" -#: parser/parse_agg.c:863 +#: parser/parse_agg.c:878 msgid "window functions are not allowed in EXECUTE parameters" msgstr "le funzioni finestra non sono ammesse nei parametri di EXECUTE" -#: parser/parse_agg.c:866 +#: parser/parse_agg.c:881 msgid "window functions are not allowed in trigger WHEN conditions" msgstr "le funzioni finestra non sono ammesse nelle condizioni WHEN dei trigger" -#: parser/parse_agg.c:869 +#: parser/parse_agg.c:884 msgid "window functions are not allowed in partition key expression" msgstr "le funzioni finestra non sono supportate nelle espressioni di partizione" #. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_agg.c:889 parser/parse_clause.c:1813 +#: parser/parse_agg.c:904 parser/parse_clause.c:1819 #, c-format msgid "window functions are not allowed in %s" msgstr "le funzioni finestra non sono ammesse in %s" -#: parser/parse_agg.c:923 parser/parse_clause.c:2647 +#: parser/parse_agg.c:938 parser/parse_clause.c:2653 #, c-format msgid "window \"%s\" does not exist" msgstr "la finestra \"%s\" non esiste" -#: parser/parse_agg.c:1008 +#: parser/parse_agg.c:1023 #, c-format msgid "too many grouping sets present (maximum 4096)" msgstr "troppi insiemi di raggruppamento presenti (il massimo è 4096)" -#: parser/parse_agg.c:1157 +#: parser/parse_agg.c:1172 #, c-format msgid "aggregate functions are not allowed in a recursive query's recursive term" msgstr "le funzioni di aggregazione non sono ammesse nel termine ricorsivo di una query ricorsiva" -#: parser/parse_agg.c:1350 +#: parser/parse_agg.c:1365 #, c-format msgid "column \"%s.%s\" must appear in the GROUP BY clause or be used in an aggregate function" msgstr "la colonna \"%s.%s\" deve comparire nella clausola GROUP BY o essere usata in una funzione di aggregazione" -#: parser/parse_agg.c:1353 +#: parser/parse_agg.c:1368 #, c-format msgid "Direct arguments of an ordered-set aggregate must use only grouped columns." msgstr "Gli argomenti diretti di un aggregato su insieme ordinato devono usare solo colonne raggruppate." -#: parser/parse_agg.c:1358 +#: parser/parse_agg.c:1373 #, c-format msgid "subquery uses ungrouped column \"%s.%s\" from outer query" msgstr "la sottoquery usa la colonna non raggruppata \"%s.%s\" dalla query esterna" -#: parser/parse_agg.c:1522 +#: parser/parse_agg.c:1537 #, c-format msgid "arguments to GROUPING must be grouping expressions of the associated query level" msgstr "gli argomenti di GROUPING devono essere espressioni di raggruppamento del livello della query associato" @@ -13670,236 +13631,242 @@ msgstr "gli argomenti di GROUPING devono essere espressioni di raggruppamento de msgid "relation \"%s\" cannot be the target of a modifying statement" msgstr "la relazione \"%s\" non può essere obiettivo di un comando di modifica" -#: parser/parse_clause.c:651 +#: parser/parse_clause.c:608 parser/parse_clause.c:636 +#: parser/parse_func.c:2153 +#, c-format +msgid "set-returning functions must appear at top level of FROM" +msgstr "le funzioni che restituiscono insiemi devono comparire al livello superiore del FROM" + +#: parser/parse_clause.c:648 #, c-format msgid "multiple column definition lists are not allowed for the same function" msgstr "non è consentita più di una lista di definizione di colonne multiple per la stessa funzione" -#: parser/parse_clause.c:684 +#: parser/parse_clause.c:681 #, c-format msgid "ROWS FROM() with multiple functions cannot have a column definition list" msgstr "ROWS FROM() con più di una funzione non può avere una lista di definizioni di colonne" -#: parser/parse_clause.c:685 +#: parser/parse_clause.c:682 #, c-format msgid "Put a separate column definition list for each function inside ROWS FROM()." msgstr "Specifica una lista di definizioni colonna separata per ogni funzione dentro ROWS FROM()" -#: parser/parse_clause.c:691 +#: parser/parse_clause.c:688 #, c-format msgid "UNNEST() with multiple arguments cannot have a column definition list" msgstr "UNNEST() con più di un argomento non può avere una lista di definizioni di colonne" -#: parser/parse_clause.c:692 +#: parser/parse_clause.c:689 #, c-format msgid "Use separate UNNEST() calls inside ROWS FROM(), and attach a column definition list to each one." msgstr "Usa una invocazione di UNNEST() separata in ROWS FROM() e collega una lista di definizioni di colonne ad ognuna di esse." -#: parser/parse_clause.c:699 +#: parser/parse_clause.c:696 #, c-format msgid "WITH ORDINALITY cannot be used with a column definition list" msgstr "WITH ORDINALITY non può essere usata con una lista di definizioni di colonne" -#: parser/parse_clause.c:700 +#: parser/parse_clause.c:697 #, c-format msgid "Put the column definition list inside ROWS FROM()." msgstr "Specifica la lista di definizioni di colonne dentro ROWS FROM()." -#: parser/parse_clause.c:803 +#: parser/parse_clause.c:800 #, c-format msgid "only one FOR ORDINALITY column is allowed" msgstr "solo una colonna FOR ORDINALITY consentita" -#: parser/parse_clause.c:864 +#: parser/parse_clause.c:861 #, c-format msgid "column name \"%s\" is not unique" msgstr "il nome della colonna \"%s\" non è unico" -#: parser/parse_clause.c:906 +#: parser/parse_clause.c:903 #, c-format msgid "namespace name \"%s\" is not unique" msgstr "il nome di namespace \"%s\" non è unico" -#: parser/parse_clause.c:916 +#: parser/parse_clause.c:913 #, c-format msgid "only one default namespace is allowed" msgstr "solo un nome predefinito di namespace consentito" -#: parser/parse_clause.c:977 +#: parser/parse_clause.c:974 #, c-format msgid "tablesample method %s does not exist" msgstr "il metodo di campionamento tabella %s non esiste" -#: parser/parse_clause.c:999 +#: parser/parse_clause.c:996 #, c-format msgid "tablesample method %s requires %d argument, not %d" msgid_plural "tablesample method %s requires %d arguments, not %d" msgstr[0] "il metodo di campionamento %s richiede %d argumenti, not %d" msgstr[1] "il metodo di campionamento %s richiede %d argumenti, not %d" -#: parser/parse_clause.c:1033 +#: parser/parse_clause.c:1030 #, c-format msgid "tablesample method %s does not support REPEATABLE" msgstr "il metodo di campionamento %s non supporta REPEATABLE" -#: parser/parse_clause.c:1194 +#: parser/parse_clause.c:1200 #, c-format msgid "TABLESAMPLE clause can only be applied to tables and materialized views" msgstr "la clausola TABLESAMPLE può essere applicata solo a tabelle e viste materializzate" -#: parser/parse_clause.c:1364 +#: parser/parse_clause.c:1370 #, c-format msgid "column name \"%s\" appears more than once in USING clause" msgstr "il nome della colonna \"%s\" compare più di una volta nella clausola USING" -#: parser/parse_clause.c:1379 +#: parser/parse_clause.c:1385 #, c-format msgid "common column name \"%s\" appears more than once in left table" msgstr "il nome comune della colonna \"%s\" compare più di una volta nella tabella di sinistra" -#: parser/parse_clause.c:1388 +#: parser/parse_clause.c:1394 #, c-format msgid "column \"%s\" specified in USING clause does not exist in left table" msgstr "la colonna \"%s\" specificata nella clausola USING non esiste nella tabella di sinistra" -#: parser/parse_clause.c:1402 +#: parser/parse_clause.c:1408 #, c-format msgid "common column name \"%s\" appears more than once in right table" msgstr "il nome comune della colonna \"%s\" compare più di una volta nella tabella di destra" -#: parser/parse_clause.c:1411 +#: parser/parse_clause.c:1417 #, c-format msgid "column \"%s\" specified in USING clause does not exist in right table" msgstr "la colonna \"%s\" specificata nella clausola USING non esiste nella tabella di destra" -#: parser/parse_clause.c:1465 +#: parser/parse_clause.c:1471 #, c-format msgid "column alias list for \"%s\" has too many entries" msgstr "la lista di alias delle colonne per \"%s\" ha troppi elementi" #. translator: %s is name of a SQL construct, eg LIMIT -#: parser/parse_clause.c:1774 +#: parser/parse_clause.c:1780 #, c-format msgid "argument of %s must not contain variables" msgstr "l'argomento di %s non può contenere variabili" #. translator: first %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1939 +#: parser/parse_clause.c:1945 #, c-format msgid "%s \"%s\" is ambiguous" msgstr "%s \"%s\" è ambiguo" #. translator: %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1968 +#: parser/parse_clause.c:1974 #, c-format msgid "non-integer constant in %s" msgstr "costante non intera in %s" # translator: %s is name of a SQL construct, eg ORDER BY #. translator: %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1990 +#: parser/parse_clause.c:1996 #, c-format msgid "%s position %d is not in select list" msgstr "%s in posizione %d non è nella lista SELECT" -#: parser/parse_clause.c:2431 +#: parser/parse_clause.c:2437 #, c-format msgid "CUBE is limited to 12 elements" msgstr "CUBE è limitato a 12 elementi" -#: parser/parse_clause.c:2635 +#: parser/parse_clause.c:2641 #, c-format msgid "window \"%s\" is already defined" msgstr "la finestra \"%s\" è già definita" -#: parser/parse_clause.c:2696 +#: parser/parse_clause.c:2702 #, c-format msgid "cannot override PARTITION BY clause of window \"%s\"" msgstr "non è possibile scavalcare la clausola PARTITION BY della finestra \"%s\"" -#: parser/parse_clause.c:2708 +#: parser/parse_clause.c:2714 #, c-format msgid "cannot override ORDER BY clause of window \"%s\"" msgstr "non è possibile scavalcare la clausola ORDER BY della finestra \"%s\"" -#: parser/parse_clause.c:2738 parser/parse_clause.c:2744 +#: parser/parse_clause.c:2744 parser/parse_clause.c:2750 #, c-format msgid "cannot copy window \"%s\" because it has a frame clause" msgstr "non è possibile copiare la finestra \"%s\" perché ha una clausola frame" -#: parser/parse_clause.c:2746 +#: parser/parse_clause.c:2752 #, c-format msgid "Omit the parentheses in this OVER clause." msgstr "Omettere le parentesi in questa clausola OVER." -#: parser/parse_clause.c:2812 +#: parser/parse_clause.c:2818 #, c-format msgid "in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list" msgstr "in un aggregato con DISTINCT, le espressioni ORDER BY devono figurare nella lista di argomenti" -#: parser/parse_clause.c:2813 +#: parser/parse_clause.c:2819 #, c-format msgid "for SELECT DISTINCT, ORDER BY expressions must appear in select list" msgstr "per SELECT DISTINCT, le espressioni ORDER BY devono figurare nella lista di argomenti" -#: parser/parse_clause.c:2845 +#: parser/parse_clause.c:2851 #, c-format msgid "an aggregate with DISTINCT must have at least one argument" msgstr "un aggregato con DISTINCT deve avere almeno un argomento" -#: parser/parse_clause.c:2846 +#: parser/parse_clause.c:2852 #, c-format msgid "SELECT DISTINCT must have at least one column" msgstr "SELECT DISTINCT deve avere almeno una colonna" -#: parser/parse_clause.c:2912 parser/parse_clause.c:2944 +#: parser/parse_clause.c:2918 parser/parse_clause.c:2950 #, c-format msgid "SELECT DISTINCT ON expressions must match initial ORDER BY expressions" msgstr "le espressioni SELECT DISTINCT ON devono coincidere con l'espressione ORDER BY iniziale" -#: parser/parse_clause.c:3022 +#: parser/parse_clause.c:3028 #, c-format msgid "ASC/DESC is not allowed in ON CONFLICT clause" msgstr "ASC/DESC non è permesso nelle clausole ON CONFLICT" -#: parser/parse_clause.c:3028 +#: parser/parse_clause.c:3034 #, c-format msgid "NULLS FIRST/LAST is not allowed in ON CONFLICT clause" msgstr "NULLS FIRST/LAST non è permesso nelle clausole ON CONFLICT" -#: parser/parse_clause.c:3108 +#: parser/parse_clause.c:3114 #, c-format msgid "ON CONFLICT DO UPDATE requires inference specification or constraint name" msgstr "ON CONFLICT DO UPDATE richiede una specifica di inferenza o il nome di un vincolo" -#: parser/parse_clause.c:3109 +#: parser/parse_clause.c:3115 #, c-format msgid "For example, ON CONFLICT (column_name)." msgstr "Per esempio, ON CONFLICT (nome_colonna)." -#: parser/parse_clause.c:3120 +#: parser/parse_clause.c:3126 #, c-format msgid "ON CONFLICT is not supported with system catalog tables" msgstr "ON CONFLICT non è supportato sulle tabelle del catalogo di sistema" -#: parser/parse_clause.c:3128 +#: parser/parse_clause.c:3134 #, c-format msgid "ON CONFLICT is not supported on table \"%s\" used as a catalog table" msgstr "ON CONFLICT non è supportato sulla tabella \"%s\" usata da una tabella di catalogo" -#: parser/parse_clause.c:3254 +#: parser/parse_clause.c:3277 #, c-format msgid "operator %s is not a valid ordering operator" msgstr "l'operatore %s non è un operatore di ordinamento valido" -#: parser/parse_clause.c:3256 +#: parser/parse_clause.c:3279 #, c-format msgid "Ordering operators must be \"<\" or \">\" members of btree operator families." msgstr "Gli operatori di ordinamento devono essere i membri \"<\" oppure \">\" di una famiglia di operatori btree." #: parser/parse_coerce.c:971 parser/parse_coerce.c:1001 #: parser/parse_coerce.c:1019 parser/parse_coerce.c:1034 -#: parser/parse_expr.c:2123 parser/parse_expr.c:2699 parser/parse_target.c:936 +#: parser/parse_expr.c:2144 parser/parse_expr.c:2732 parser/parse_target.c:936 #, c-format msgid "cannot cast type %s to %s" msgstr "non è possibile convertire il tipo %s in %s" @@ -14113,410 +14080,421 @@ msgstr "FOR UPDATE/SHARE non è implementato in una query ricorsiva" msgid "recursive reference to query \"%s\" must not appear more than once" msgstr "il riferimento ricorsivo alla query \"%s\" non può apparire più di una volta" -#: parser/parse_expr.c:357 +#: parser/parse_expr.c:350 #, c-format msgid "DEFAULT is not allowed in this context" msgstr "DEFAULT non ammesso in questo contesto" -#: parser/parse_expr.c:410 parser/parse_relation.c:3248 -#: parser/parse_relation.c:3268 +#: parser/parse_expr.c:403 parser/parse_relation.c:3286 +#: parser/parse_relation.c:3306 #, c-format msgid "column %s.%s does not exist" msgstr "la colonna %s.%s non esiste" -#: parser/parse_expr.c:422 +#: parser/parse_expr.c:415 #, c-format msgid "column \"%s\" not found in data type %s" msgstr "la colonna \"%s\" non è stata trovata nel tipo di dato %s" -#: parser/parse_expr.c:428 +#: parser/parse_expr.c:421 #, c-format msgid "could not identify column \"%s\" in record data type" msgstr "la colonna \"%s\" non identificata nel tipo di dato record" -#: parser/parse_expr.c:434 +#: parser/parse_expr.c:427 #, c-format msgid "column notation .%s applied to type %s, which is not a composite type" msgstr "la notazione della colonna .%s sembra essere di tipo %s, che non è un tipo composito" -#: parser/parse_expr.c:464 parser/parse_target.c:722 +#: parser/parse_expr.c:458 parser/parse_target.c:722 #, c-format msgid "row expansion via \"*\" is not supported here" msgstr "l'espansione della riga tramite \"*\" non è supportata qui" -#: parser/parse_expr.c:769 parser/parse_relation.c:689 +#: parser/parse_expr.c:767 parser/parse_relation.c:689 #: parser/parse_relation.c:789 parser/parse_target.c:1171 #, c-format msgid "column reference \"%s\" is ambiguous" msgstr "il riferimento alla colonna \"%s\" è ambiguo" -#: parser/parse_expr.c:825 parser/parse_param.c:110 parser/parse_param.c:142 +#: parser/parse_expr.c:823 parser/parse_param.c:110 parser/parse_param.c:142 #: parser/parse_param.c:199 parser/parse_param.c:298 #, c-format msgid "there is no parameter $%d" msgstr "parametro $%d non presente" -#: parser/parse_expr.c:1064 +#: parser/parse_expr.c:1066 #, c-format msgid "NULLIF requires = operator to yield boolean" msgstr "NULLIF richiede che l'operatore = restituisca un valore booleano" -#: parser/parse_expr.c:1508 parser/parse_expr.c:1540 +#. translator: %s is name of a SQL construct, eg NULLIF +#: parser/parse_expr.c:1072 parser/parse_expr.c:3048 +#, c-format +msgid "%s must not return a set" +msgstr "%s non può restituire un insieme" + +#: parser/parse_expr.c:1519 parser/parse_expr.c:1551 #, c-format msgid "number of columns does not match number of values" msgstr "il numero di colonne non corrisponde al numero di valori" -#: parser/parse_expr.c:1554 +#: parser/parse_expr.c:1565 #, c-format msgid "source for a multiple-column UPDATE item must be a sub-SELECT or ROW() expression" msgstr "l'origine per un UPDATE multi-colonna deve essere una sub-SELECT o espressione ROW()" -#: parser/parse_expr.c:1798 +#. translator: %s is name of a SQL construct, eg GROUP BY +#: parser/parse_expr.c:1759 parser/parse_expr.c:2235 parser/parse_func.c:2256 +#, c-format +msgid "set-returning functions are not allowed in %s" +msgstr "non si possono usare funzioni che restituiscono insiemi in %s" + +#: parser/parse_expr.c:1819 msgid "cannot use subquery in check constraint" msgstr "non si può usare una sottoquery nel vincolo di controllo" -#: parser/parse_expr.c:1802 +#: parser/parse_expr.c:1823 msgid "cannot use subquery in DEFAULT expression" msgstr "non si può usare una sottoquery in un'espressione DEFAULT" -#: parser/parse_expr.c:1805 +#: parser/parse_expr.c:1826 msgid "cannot use subquery in index expression" msgstr "non si possono usare sottoquery nell'espressione dell'indice" -#: parser/parse_expr.c:1808 +#: parser/parse_expr.c:1829 msgid "cannot use subquery in index predicate" msgstr "non è possibile usare sottoquery nel predicato dell'indice" -#: parser/parse_expr.c:1811 +#: parser/parse_expr.c:1832 msgid "cannot use subquery in transform expression" msgstr "non è possibile usare sottoquery in un'espressione di trasformazione" -#: parser/parse_expr.c:1814 +#: parser/parse_expr.c:1835 msgid "cannot use subquery in EXECUTE parameter" msgstr "non si possono usare sottoquery nel parametro EXECUTE" -#: parser/parse_expr.c:1817 +#: parser/parse_expr.c:1838 msgid "cannot use subquery in trigger WHEN condition" msgstr "non è possibile usare sottoquery nella condizione WHEN del trigger" -#: parser/parse_expr.c:1820 +#: parser/parse_expr.c:1841 msgid "cannot use subquery in partition key expression" msgstr "non è possibile usare sottowquery in un'espressione di partizione" -#: parser/parse_expr.c:1873 +#: parser/parse_expr.c:1894 #, c-format msgid "subquery must return only one column" msgstr "la sottoquery deve restituire solo una colonna" -#: parser/parse_expr.c:1957 +#: parser/parse_expr.c:1978 #, c-format msgid "subquery has too many columns" msgstr "la sottoquery ha troppe colonne" -#: parser/parse_expr.c:1962 +#: parser/parse_expr.c:1983 #, c-format msgid "subquery has too few columns" msgstr "la sottoquery ha troppe poche colonne" -#: parser/parse_expr.c:2063 +#: parser/parse_expr.c:2084 #, c-format msgid "cannot determine type of empty array" msgstr "non è possibile determinare il tipo di un array vuoto" -#: parser/parse_expr.c:2064 +#: parser/parse_expr.c:2085 #, c-format msgid "Explicitly cast to the desired type, for example ARRAY[]::integer[]." msgstr "Effettua una conversione esplicita al tipo desiderato, ad esempio ARRAY[]::integer[]." -#: parser/parse_expr.c:2078 +#: parser/parse_expr.c:2099 #, c-format msgid "could not find element type for data type %s" msgstr "tipo dell'elemento non trovato per il tipo di dato %s" -#: parser/parse_expr.c:2353 +#: parser/parse_expr.c:2386 #, c-format msgid "unnamed XML attribute value must be a column reference" msgstr "il valore dell'attributo XML senza nome dev'essere un riferimento ad una colonna" -#: parser/parse_expr.c:2354 +#: parser/parse_expr.c:2387 #, c-format msgid "unnamed XML element value must be a column reference" msgstr "il valore dell'elemento XML senza nome dev'essere un riferimento ad una colonna" -#: parser/parse_expr.c:2369 +#: parser/parse_expr.c:2402 #, c-format msgid "XML attribute name \"%s\" appears more than once" msgstr "l'attributo XML di nome \"%s\" compare più di una volta" -#: parser/parse_expr.c:2476 +#: parser/parse_expr.c:2509 #, c-format msgid "cannot cast XMLSERIALIZE result to %s" msgstr "non è possibile convertire il risultato di XMLSERIALIZE a %s" -#: parser/parse_expr.c:2772 parser/parse_expr.c:2967 +#: parser/parse_expr.c:2805 parser/parse_expr.c:3001 #, c-format msgid "unequal number of entries in row expressions" msgstr "numero di elementi differente nelle espressioni di riga" -#: parser/parse_expr.c:2782 +#: parser/parse_expr.c:2815 #, c-format msgid "cannot compare rows of zero length" msgstr "non possono comparire righe di lunghezza zero" -#: parser/parse_expr.c:2806 +#: parser/parse_expr.c:2840 #, c-format msgid "row comparison operator must yield type boolean, not type %s" msgstr "l'operatore di comparazione tra righe deve restituire il tipo booleano, non il tipo %s" -#: parser/parse_expr.c:2813 +#: parser/parse_expr.c:2847 #, c-format msgid "row comparison operator must not return a set" msgstr "l'operatore di comparazione tra righe non può restituire un insieme" -#: parser/parse_expr.c:2872 parser/parse_expr.c:2913 +#: parser/parse_expr.c:2906 parser/parse_expr.c:2947 #, c-format msgid "could not determine interpretation of row comparison operator %s" msgstr "non è stato possibile determinare un'interpretazione dell'operatore di comparazione tra righe %s" -#: parser/parse_expr.c:2874 +#: parser/parse_expr.c:2908 #, c-format msgid "Row comparison operators must be associated with btree operator families." msgstr "Gli operatori di comparazione tra righe devono essere associati a famiglie di operatori btree." -#: parser/parse_expr.c:2915 +#: parser/parse_expr.c:2949 #, c-format msgid "There are multiple equally-plausible candidates." msgstr "C'è più di un candidato egualmente plausibile." -#: parser/parse_expr.c:3007 +#: parser/parse_expr.c:3042 #, c-format msgid "IS DISTINCT FROM requires = operator to yield boolean" msgstr "IS DISTINCT FROM richiede che l'operatore = restituisca un valore booleano" -#: parser/parse_expr.c:3320 parser/parse_expr.c:3338 +#: parser/parse_expr.c:3361 parser/parse_expr.c:3379 #, c-format msgid "operator precedence change: %s is now lower precedence than %s" msgstr "cambio di precedenza di operatori: %s ora ha precedenza inferiore di %s" -#: parser/parse_func.c:175 +#: parser/parse_func.c:179 #, c-format msgid "argument name \"%s\" used more than once" msgstr "il nome dell'argomento \"%s\" è usato più di una volta" -#: parser/parse_func.c:186 +#: parser/parse_func.c:190 #, c-format msgid "positional argument cannot follow named argument" msgstr "gli argomenti posizionali non possono seguire gli argomenti con nome" -#: parser/parse_func.c:271 +#: parser/parse_func.c:275 #, c-format msgid "%s(*) specified, but %s is not an aggregate function" msgstr "%s(*) specificato, ma %s non è una funzione di aggregazione" -#: parser/parse_func.c:278 +#: parser/parse_func.c:282 #, c-format msgid "DISTINCT specified, but %s is not an aggregate function" msgstr "DISTINCT specificato, ma %s non è una funzione di aggregazione" -#: parser/parse_func.c:284 +#: parser/parse_func.c:288 #, c-format msgid "WITHIN GROUP specified, but %s is not an aggregate function" msgstr "WITHIN GROUP specificato, ma %s non è una funzione di aggregazione" -#: parser/parse_func.c:290 +#: parser/parse_func.c:294 #, c-format msgid "ORDER BY specified, but %s is not an aggregate function" msgstr "ORDER BY specificato, ma %s non è una funzione di aggregazione" -#: parser/parse_func.c:296 +#: parser/parse_func.c:300 #, c-format msgid "FILTER specified, but %s is not an aggregate function" msgstr "FILTER specificato, ma %s non è una funzione di aggregazione" -#: parser/parse_func.c:302 +#: parser/parse_func.c:306 #, c-format msgid "OVER specified, but %s is not a window function nor an aggregate function" msgstr "OVER specificato, ma %s non è una funzione finestra né una funzione di aggregazione" -#: parser/parse_func.c:332 +#: parser/parse_func.c:336 #, c-format msgid "WITHIN GROUP is required for ordered-set aggregate %s" msgstr "WITHIN GROUP è richiesto per l'aggregato su insieme ordinato %s" -#: parser/parse_func.c:338 +#: parser/parse_func.c:342 #, c-format msgid "OVER is not supported for ordered-set aggregate %s" msgstr "OVER non è supportato per l'aggregato su insieme ordinato %s" -#: parser/parse_func.c:369 parser/parse_func.c:398 +#: parser/parse_func.c:373 parser/parse_func.c:402 #, c-format msgid "There is an ordered-set aggregate %s, but it requires %d direct arguments, not %d." msgstr "Esiste un aggregato su insieme ordinato %s, ma richiede %d argomenti diretti, non %d." -#: parser/parse_func.c:423 +#: parser/parse_func.c:427 #, c-format msgid "To use the hypothetical-set aggregate %s, the number of hypothetical direct arguments (here %d) must match the number of ordering columns (here %d)." msgstr "Per usare l'aggregato su insieme ipotetico %s il numero di argomenti ipotetici diretti (qui %d) deve combaciare con quello di colonne di ordinamento (qui %d)." -#: parser/parse_func.c:437 +#: parser/parse_func.c:441 #, c-format msgid "There is an ordered-set aggregate %s, but it requires at least %d direct arguments." msgstr "Esiste un aggregato su insieme ordinato %s, ma richiede almeno %d argomenti diretti." -#: parser/parse_func.c:456 +#: parser/parse_func.c:460 #, c-format msgid "%s is not an ordered-set aggregate, so it cannot have WITHIN GROUP" msgstr "%s non è un aggregato su insieme ordinato, per cui non può avere WITHIN GROUP" -#: parser/parse_func.c:469 +#: parser/parse_func.c:473 #, c-format msgid "window function %s requires an OVER clause" msgstr "la funzione finestra %s richiede una clausola OVER" -#: parser/parse_func.c:476 +#: parser/parse_func.c:480 #, c-format msgid "window function %s cannot have WITHIN GROUP" msgstr "la funzione di aggregazione %s non può avere WITHIN GROUP" -#: parser/parse_func.c:497 +#: parser/parse_func.c:501 #, c-format msgid "function %s is not unique" msgstr "la funzione %s non è unica" -#: parser/parse_func.c:500 +#: parser/parse_func.c:504 #, c-format msgid "Could not choose a best candidate function. You might need to add explicit type casts." msgstr "Non è stato possibile scegliere la funzione migliore. Potrebbe essere necessario convertire i tipi esplicitamente." -#: parser/parse_func.c:511 +#: parser/parse_func.c:515 #, c-format msgid "No aggregate function matches the given name and argument types. Perhaps you misplaced ORDER BY; ORDER BY must appear after all regular arguments of the aggregate." msgstr "Nessuna funzione di aggregazione trovata con nome e tipi di argomenti forniti. Forse hai posizionato ORDER BY male: ORDER BY deve apparire dopo tutti gli argomenti regolari della funzione di aggregazione." -#: parser/parse_func.c:522 +#: parser/parse_func.c:526 #, c-format msgid "No function matches the given name and argument types. You might need to add explicit type casts." msgstr "Nessuna funzione trovata con nome e tipi di argomenti forniti. Potrebbe essere necessario convertire i tipi esplicitamente." -#: parser/parse_func.c:624 +#: parser/parse_func.c:628 #, c-format msgid "VARIADIC argument must be an array" msgstr "l'argomento VARIADIC deve essere un array" -#: parser/parse_func.c:676 parser/parse_func.c:740 +#: parser/parse_func.c:680 parser/parse_func.c:744 #, c-format msgid "%s(*) must be used to call a parameterless aggregate function" msgstr "%s(*) dev'essere usato per richiamare una funzione di aggregazione senza parametri" -#: parser/parse_func.c:683 +#: parser/parse_func.c:687 #, c-format msgid "aggregates cannot return sets" msgstr "le funzioni di aggregazione non possono restituire insiemi" -#: parser/parse_func.c:698 +#: parser/parse_func.c:702 #, c-format msgid "aggregates cannot use named arguments" msgstr "le funzioni di aggregazione non possono usare argomenti con nome" -#: parser/parse_func.c:730 +#: parser/parse_func.c:734 #, c-format msgid "DISTINCT is not implemented for window functions" msgstr "DISTINCT non è implementato per funzioni finestra" -#: parser/parse_func.c:750 +#: parser/parse_func.c:754 #, c-format msgid "aggregate ORDER BY is not implemented for window functions" msgstr "ORDER BY delle funzioni di aggregazione non è implementato per funzioni finestra" -#: parser/parse_func.c:759 +#: parser/parse_func.c:763 #, c-format msgid "FILTER is not implemented for non-aggregate window functions" msgstr "FILTER non è implementato per funzioni finestra non aggregate" -#: parser/parse_func.c:765 +#: parser/parse_func.c:772 +#, c-format +msgid "window function calls cannot contain set-returning function calls" +msgstr "le funzioni finestra non possono contenere richiami a funzioni che restituiscono insiemi" + +#: parser/parse_func.c:780 #, c-format msgid "window functions cannot return sets" msgstr "le funzioni finestra non possono restituire insiemi" -#: parser/parse_func.c:1931 +#: parser/parse_func.c:1950 #, c-format msgid "function name \"%s\" is not unique" msgstr "il nome della funzione \"%s\" non è univoco" -#: parser/parse_func.c:1933 +#: parser/parse_func.c:1952 #, c-format msgid "Specify the argument list to select the function unambiguously." msgstr "Specifica l'elenco degli argomenti per selezionare la funzione senza ambiguità." -#: parser/parse_func.c:1943 +#: parser/parse_func.c:1962 #, c-format msgid "could not find a function named \"%s\"" msgstr "funzione \"%s\" non trovata" -#: parser/parse_func.c:2045 +#: parser/parse_func.c:2064 #, c-format msgid "aggregate %s(*) does not exist" msgstr "la funzione di aggregazione %s(*) non esiste" -#: parser/parse_func.c:2050 +#: parser/parse_func.c:2069 #, c-format msgid "aggregate %s does not exist" msgstr "la funzione di aggregazione %s non esiste" -#: parser/parse_func.c:2069 +#: parser/parse_func.c:2088 #, c-format msgid "function %s is not an aggregate" msgstr "la funzione %s non è una funzione di aggregazione" -#: parser/parse_func.c:2117 +#: parser/parse_func.c:2140 msgid "set-returning functions are not allowed in JOIN conditions" msgstr "non si possono usare funzioni che restituiscono insiemi in condizioni JOIN" -#: parser/parse_func.c:2130 +#: parser/parse_func.c:2161 msgid "set-returning functions are not allowed in policy expressions" msgstr "non si possono usare funzioni che restituiscono insiemi nelle regole di sicurezza" -#: parser/parse_func.c:2145 +#: parser/parse_func.c:2176 msgid "set-returning functions are not allowed in window definitions" msgstr "non si possono usare funzioni che restituiscono insiemi nelle definizioni di finestre" -#: parser/parse_func.c:2183 +#: parser/parse_func.c:2214 msgid "set-returning functions are not allowed in check constraints" msgstr "non si possono usare funzioni che restituiscono insiemi nei vincoli di controllo" -#: parser/parse_func.c:2187 +#: parser/parse_func.c:2218 msgid "set-returning functions are not allowed in DEFAULT expressions" msgstr "non si possono usare funzioni che restituiscono insiemi nelle espressioni DEFAULT" -#: parser/parse_func.c:2190 +#: parser/parse_func.c:2221 msgid "set-returning functions are not allowed in index expressions" msgstr "non si possono usare funzioni che restituiscono insiemi nelle espressioni degli indici" -#: parser/parse_func.c:2193 +#: parser/parse_func.c:2224 msgid "set-returning functions are not allowed in index predicates" msgstr "non si possono usare funzioni che restituiscono insiemi nei predicati degli indici" -#: parser/parse_func.c:2196 +#: parser/parse_func.c:2227 msgid "set-returning functions are not allowed in transform expressions" msgstr "non si possono usare funzioni che restituiscono insiemi in espressioni di trasformazione" -#: parser/parse_func.c:2199 +#: parser/parse_func.c:2230 msgid "set-returning functions are not allowed in EXECUTE parameters" msgstr "non si possono usare funzioni che restituiscono insiemi in parametri EXECUTE" -#: parser/parse_func.c:2202 +#: parser/parse_func.c:2233 msgid "set-returning functions are not allowed in trigger WHEN conditions" msgstr "non si possono usare funzioni che restituiscono insiemi nelle condizioni WHEN dei trigger" -#: parser/parse_func.c:2205 -msgid "set-returning functions are not allowed in partition key expression" -msgstr "non si possono usare funzioni che restituiscono insiemi in espressioni di partizione" - -#. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_func.c:2225 -#, c-format -msgid "set-returning functions are not allowed in %s" -msgstr "non si possono usare funzioni che restituiscono insiemi in %s" +#: parser/parse_func.c:2236 +msgid "set-returning functions are not allowed in partition key expressions" +msgstr "non si possono usare funzioni che restituiscono insiemi come espressione di partizione " #: parser/parse_node.c:87 #, c-format @@ -14538,8 +14516,8 @@ msgstr "l'indice di un array dev'essere di tipo intero" msgid "array assignment requires type %s but expression is of type %s" msgstr "l'assegnamento all'array richiede il tipo %s ma l'espressione è di tipo %s" -#: parser/parse_oper.c:125 parser/parse_oper.c:724 utils/adt/regproc.c:519 -#: utils/adt/regproc.c:703 +#: parser/parse_oper.c:125 parser/parse_oper.c:724 utils/adt/regproc.c:520 +#: utils/adt/regproc.c:704 #, c-format msgid "operator does not exist: %s" msgstr "l'operatore non esiste: %s" @@ -14549,14 +14527,6 @@ msgstr "l'operatore non esiste: %s" msgid "Use an explicit ordering operator or modify the query." msgstr "Usa un operatore di ordinamento esplicito, oppure modifica la query." -#: parser/parse_oper.c:228 utils/adt/array_userfuncs.c:724 -#: utils/adt/array_userfuncs.c:863 utils/adt/arrayfuncs.c:3639 -#: utils/adt/arrayfuncs.c:4077 utils/adt/arrayfuncs.c:6039 -#: utils/adt/rowtypes.c:1167 -#, c-format -msgid "could not identify an equality operator for type %s" -msgstr "operatore di uguaglianza per il tipo %s non trovato" - #: parser/parse_oper.c:480 #, c-format msgid "operator requires run-time type coercion: %s" @@ -14577,22 +14547,22 @@ msgstr "Non è stato possibile scegliere l'operatore migliore. Potrebbe essere n msgid "No operator matches the given name and argument type(s). You might need to add explicit type casts." msgstr "Nessun operatore trovato con nome e tipi di argomenti forniti. Potrebbe essere necessario convertire i tipi esplicitamente." -#: parser/parse_oper.c:785 parser/parse_oper.c:903 +#: parser/parse_oper.c:787 parser/parse_oper.c:909 #, c-format msgid "operator is only a shell: %s" msgstr "l'operatore non è completamente definito: %s" -#: parser/parse_oper.c:891 +#: parser/parse_oper.c:897 #, c-format msgid "op ANY/ALL (array) requires array on right side" msgstr "op ANY/ALL (array) richiede un array sul lato destro" -#: parser/parse_oper.c:933 +#: parser/parse_oper.c:939 #, c-format msgid "op ANY/ALL (array) requires operator to yield boolean" msgstr "op ANY/ALL (array) richiede che l'operatore restituisca un valore booleano" -#: parser/parse_oper.c:938 +#: parser/parse_oper.c:944 #, c-format msgid "op ANY/ALL (array) requires operator not to return a set" msgstr "op ANY/ALL (array) richiede che l'operatore non restituisca un insieme" @@ -14617,12 +14587,12 @@ msgstr "il riferimento alla tabella %u è ambiguo" msgid "table name \"%s\" specified more than once" msgstr "la tabella di nome \"%s\" è stata specificata più di una volta" -#: parser/parse_relation.c:446 parser/parse_relation.c:3188 +#: parser/parse_relation.c:446 parser/parse_relation.c:3226 #, c-format msgid "invalid reference to FROM-clause entry for table \"%s\"" msgstr "riferimento non valido all'elemento della clausola FROM per la tabella \"%s\"" -#: parser/parse_relation.c:449 parser/parse_relation.c:3193 +#: parser/parse_relation.c:449 parser/parse_relation.c:3231 #, c-format msgid "There is an entry for table \"%s\", but it cannot be referenced from this part of the query." msgstr "C'è un elemento per la tabella \"%s\", ma non può essere referenziato da questa parte della query." @@ -14637,78 +14607,79 @@ msgstr "Il tipo del JOIN deve essere INNER oppure LEFT per un riferimento LATERA msgid "system column \"%s\" reference in check constraint is invalid" msgstr "la colonna di sistema \"%s\" referenziata nel vincolo di controllo non è valida" -#: parser/parse_relation.c:1086 parser/parse_relation.c:1372 -#: parser/parse_relation.c:1941 +#: parser/parse_relation.c:1086 parser/parse_relation.c:1366 +#: parser/parse_relation.c:1935 #, c-format msgid "table \"%s\" has %d columns available but %d columns specified" msgstr "la tabella \"%s\" ha %d colonne disponibili ma %d colonne specificate" -#: parser/parse_relation.c:1179 +#: parser/parse_relation.c:1173 #, c-format msgid "There is a WITH item named \"%s\", but it cannot be referenced from this part of the query." msgstr "C'è un elemento di WITH di nome \"%s\", ma non può essere referenziato da questa parte della query." -#: parser/parse_relation.c:1181 +#: parser/parse_relation.c:1175 #, c-format msgid "Use WITH RECURSIVE, or re-order the WITH items to remove forward references." msgstr "Usa WITH RECURSIVE, oppure riordina gli elementi di WITH per rimuovere i riferimenti in avanti." -#: parser/parse_relation.c:1492 +#: parser/parse_relation.c:1486 #, c-format msgid "a column definition list is only allowed for functions returning \"record\"" msgstr "la lista di definizione di colonne è consentita solo per funzioni che restituiscono \"record\"" -#: parser/parse_relation.c:1501 +#: parser/parse_relation.c:1495 #, c-format msgid "a column definition list is required for functions returning \"record\"" msgstr "la lista di definizione di colonne è necessaria per funzioni che restituiscono \"record\"" -#: parser/parse_relation.c:1580 +#: parser/parse_relation.c:1574 #, c-format msgid "function \"%s\" in FROM has unsupported return type %s" msgstr "la funzione \"%s\" in FROM restituisce il tipo non supportato %s" -#: parser/parse_relation.c:1769 +#: parser/parse_relation.c:1763 #, c-format msgid "VALUES lists \"%s\" have %d columns available but %d columns specified" msgstr "le liste VALUES \"%s\" hanno %d colonne disponibili ma %d colonne specificate" -#: parser/parse_relation.c:1824 +#: parser/parse_relation.c:1818 #, c-format msgid "joins can have at most %d columns" msgstr "i join possono avere al più %d colonne" -#: parser/parse_relation.c:1914 +#: parser/parse_relation.c:1908 #, c-format msgid "WITH query \"%s\" does not have a RETURNING clause" msgstr "la query WITH \"%s\" non ha una clausola RETURNING" -#: parser/parse_relation.c:2809 parser/parse_relation.c:2972 +#: parser/parse_relation.c:2843 parser/parse_relation.c:2881 +#: parser/parse_relation.c:3010 #, c-format msgid "column %d of relation \"%s\" does not exist" msgstr "la colonna %d della relazione \"%s\" non esiste" -#: parser/parse_relation.c:3191 +#: parser/parse_relation.c:3229 #, c-format msgid "Perhaps you meant to reference the table alias \"%s\"." msgstr "Forse intendevi utilizzare l'alias \"%s\" della tabella." -#: parser/parse_relation.c:3199 +#: parser/parse_relation.c:3237 #, c-format msgid "missing FROM-clause entry for table \"%s\"" msgstr "elemento FROM per la tabella \"%s\" mancante" -#: parser/parse_relation.c:3251 +#: parser/parse_relation.c:3289 #, c-format msgid "Perhaps you meant to reference the column \"%s.%s\"." msgstr "Forse intendevi referenziare la colonna \"%s.%s\"." -#: parser/parse_relation.c:3253 +#: parser/parse_relation.c:3291 #, c-format msgid "There is a column named \"%s\" in table \"%s\", but it cannot be referenced from this part of the query." msgstr "Esiste una colonna di nome \"%s\" nella tabella \"%s\", ma non può essere referenziata da questa parte della query." -#: parser/parse_relation.c:3270 +#: parser/parse_relation.c:3308 #, c-format msgid "Perhaps you meant to reference the column \"%s.%s\" or the column \"%s.%s\"." msgstr "Forse intendevi referenziare la colonna \"%s.%s\" o la colonna \"%s.%s\"." @@ -14773,7 +14744,7 @@ msgstr "riferimento %%TYPE improprio (troppi nomi puntati): %s" msgid "type reference %s converted to %s" msgstr "riferimento al tipo %s convertito in %s" -#: parser/parse_type.c:261 parser/parse_type.c:804 utils/cache/typcache.c:243 +#: parser/parse_type.c:261 parser/parse_type.c:804 utils/cache/typcache.c:245 #, c-format msgid "type \"%s\" is only a shell" msgstr "il tipo \"%s\" non è completamente definito" @@ -14793,315 +14764,319 @@ msgstr "i modificatori di tipo devono essere costanti o identificatori semplici" msgid "invalid type name \"%s\"" msgstr "nome di tipo \"%s\" non valido" -#: parser/parse_utilcmd.c:264 +#: parser/parse_utilcmd.c:266 #, c-format msgid "cannot create partitioned table as inheritance child" msgstr "non è possibile creare tabelle partizionate come figli di ereditarietà" -#: parser/parse_utilcmd.c:269 -#, c-format -msgid "cannot partition using more than %d columns" -msgstr "non è possibile partizionare usando più di %d colonne" - -#: parser/parse_utilcmd.c:276 -#, c-format -msgid "cannot list partition using more than one column" -msgstr "non è possibile elencare partizioni che usano più di una colonna" - -#: parser/parse_utilcmd.c:428 +#: parser/parse_utilcmd.c:436 #, c-format msgid "%s will create implicit sequence \"%s\" for serial column \"%s.%s\"" msgstr "%s creerà la sequenza implicita \"%s\" per la colonna serial \"%s.%s\"" -#: parser/parse_utilcmd.c:541 +#: parser/parse_utilcmd.c:551 #, c-format msgid "array of serial is not implemented" msgstr "gli array di serial non sono implementati" -#: parser/parse_utilcmd.c:617 parser/parse_utilcmd.c:629 +#: parser/parse_utilcmd.c:627 parser/parse_utilcmd.c:639 #, c-format msgid "conflicting NULL/NOT NULL declarations for column \"%s\" of table \"%s\"" msgstr "dichiarazioni NULL/NOT NULL in conflitto per la colonna \"%s\" della tabella \"%s\"" -#: parser/parse_utilcmd.c:641 +#: parser/parse_utilcmd.c:651 #, c-format msgid "multiple default values specified for column \"%s\" of table \"%s\"" msgstr "più di un valore predefinito specificato per la colonna \"%s\" della tabella \"%s\"" -#: parser/parse_utilcmd.c:662 +#: parser/parse_utilcmd.c:672 #, c-format msgid "multiple identity specifications for column \"%s\" of table \"%s\"" msgstr "specifica di identità multipla per la colonna \"%s\" della tabella \"%s\"" -#: parser/parse_utilcmd.c:685 parser/parse_utilcmd.c:802 +#: parser/parse_utilcmd.c:695 parser/parse_utilcmd.c:812 #, c-format msgid "primary key constraints are not supported on foreign tables" msgstr "i vincoli di chiave primaria non sono supportati sulle tabelle esterne" -#: parser/parse_utilcmd.c:691 parser/parse_utilcmd.c:808 +#: parser/parse_utilcmd.c:701 parser/parse_utilcmd.c:818 #, c-format msgid "primary key constraints are not supported on partitioned tables" msgstr "i vincoli di chiave primaria non sono supportati sulle tabelle partizionate" -#: parser/parse_utilcmd.c:700 parser/parse_utilcmd.c:818 +#: parser/parse_utilcmd.c:710 parser/parse_utilcmd.c:828 #, c-format msgid "unique constraints are not supported on foreign tables" msgstr "i vincoli di unicità non sono supportati sulle tabelle esterne" -#: parser/parse_utilcmd.c:706 parser/parse_utilcmd.c:824 +#: parser/parse_utilcmd.c:716 parser/parse_utilcmd.c:834 #, c-format msgid "unique constraints are not supported on partitioned tables" msgstr "i vincoli di unicità non sono supportati sulle tabelle partizionate" -#: parser/parse_utilcmd.c:723 parser/parse_utilcmd.c:854 +#: parser/parse_utilcmd.c:733 parser/parse_utilcmd.c:864 #, c-format msgid "foreign key constraints are not supported on foreign tables" msgstr "i vincoli di chiave esterna non sono supportati sulle tabelle esterne" -#: parser/parse_utilcmd.c:729 parser/parse_utilcmd.c:860 +#: parser/parse_utilcmd.c:739 parser/parse_utilcmd.c:870 #, c-format msgid "foreign key constraints are not supported on partitioned tables" msgstr "i vincoli di chiave esterna non sono supportati sulle tabelle partizionate" -#: parser/parse_utilcmd.c:757 +#: parser/parse_utilcmd.c:767 #, c-format msgid "both default and identity specified for column \"%s\" of table \"%s\"" msgstr "specificati sia il default che l'identità per la colonna \"%s\" della tabella \"%s\"" -#: parser/parse_utilcmd.c:834 +#: parser/parse_utilcmd.c:844 #, c-format msgid "exclusion constraints are not supported on foreign tables" msgstr "i vincoli esclusione non sono supportati sulle tabelle esterne" -#: parser/parse_utilcmd.c:840 +#: parser/parse_utilcmd.c:850 #, c-format msgid "exclusion constraints are not supported on partitioned tables" msgstr "i vincoli esclusione non sono supportati sulle tabelle partizionate" -#: parser/parse_utilcmd.c:910 +#: parser/parse_utilcmd.c:920 #, c-format msgid "LIKE is not supported for creating foreign tables" msgstr "LIKE non è supportato nella creazione di tabelle esterne" -#: parser/parse_utilcmd.c:1465 parser/parse_utilcmd.c:1541 +#: parser/parse_utilcmd.c:1475 parser/parse_utilcmd.c:1551 #, c-format msgid "Index \"%s\" contains a whole-row table reference." msgstr "L'indice \"%s\" contiene un riferimento all'intera riga della tabella." -#: parser/parse_utilcmd.c:1810 +#: parser/parse_utilcmd.c:1820 #, c-format msgid "cannot use an existing index in CREATE TABLE" msgstr "non è possibile usare un indice preesistente in CREATE TABLE" -#: parser/parse_utilcmd.c:1830 +#: parser/parse_utilcmd.c:1840 #, c-format msgid "index \"%s\" is already associated with a constraint" msgstr "l'indice \"%s\" è già associato ad un vincolo" -#: parser/parse_utilcmd.c:1838 +#: parser/parse_utilcmd.c:1848 #, c-format msgid "index \"%s\" does not belong to table \"%s\"" msgstr "l'indice \"%s\" non appartiene alla tabella \"%s\"" -#: parser/parse_utilcmd.c:1845 +#: parser/parse_utilcmd.c:1855 #, c-format msgid "index \"%s\" is not valid" msgstr "l'indice \"%s\" non è valido" -#: parser/parse_utilcmd.c:1851 +#: parser/parse_utilcmd.c:1861 #, c-format msgid "\"%s\" is not a unique index" msgstr "\"%s\" non è un indice univoco" -#: parser/parse_utilcmd.c:1852 parser/parse_utilcmd.c:1859 -#: parser/parse_utilcmd.c:1866 parser/parse_utilcmd.c:1936 +#: parser/parse_utilcmd.c:1862 parser/parse_utilcmd.c:1869 +#: parser/parse_utilcmd.c:1876 parser/parse_utilcmd.c:1946 #, c-format msgid "Cannot create a primary key or unique constraint using such an index." msgstr "Non è possibile creare una chiave primaria o un vincolo univoco usando tale indice." -#: parser/parse_utilcmd.c:1858 +#: parser/parse_utilcmd.c:1868 #, c-format msgid "index \"%s\" contains expressions" msgstr "l'indice \"%s\" contiene espressioni" -#: parser/parse_utilcmd.c:1865 +#: parser/parse_utilcmd.c:1875 #, c-format msgid "\"%s\" is a partial index" msgstr "\"%s\" è un indice parziale" -#: parser/parse_utilcmd.c:1877 +#: parser/parse_utilcmd.c:1887 #, c-format msgid "\"%s\" is a deferrable index" msgstr "\"%s\" è un indice deferibile" -#: parser/parse_utilcmd.c:1878 +#: parser/parse_utilcmd.c:1888 #, c-format msgid "Cannot create a non-deferrable constraint using a deferrable index." msgstr "Non è possibile creare un vincolo non deferibile usando un indice deferibile." -#: parser/parse_utilcmd.c:1935 +#: parser/parse_utilcmd.c:1945 #, c-format msgid "index \"%s\" does not have default sorting behavior" msgstr "l'indice \"%s\" non ha un ordinamento predefinito" -#: parser/parse_utilcmd.c:2079 +#: parser/parse_utilcmd.c:2089 #, c-format msgid "column \"%s\" appears twice in primary key constraint" msgstr "la colonna \"%s\" appare due volte nel vincolo di chiave primaria" -#: parser/parse_utilcmd.c:2085 +#: parser/parse_utilcmd.c:2095 #, c-format msgid "column \"%s\" appears twice in unique constraint" msgstr "la colonna \"%s\" appare due volte nel vincolo univoco" -#: parser/parse_utilcmd.c:2294 +#: parser/parse_utilcmd.c:2304 #, c-format msgid "index expressions and predicates can refer only to the table being indexed" msgstr "le espressioni e i predicati dell'indice possono riferirsi solo alla tabella indicizzata" -#: parser/parse_utilcmd.c:2340 +#: parser/parse_utilcmd.c:2350 #, c-format msgid "rules on materialized views are not supported" msgstr "le regole sulle viste materializzate non sono supportate" -#: parser/parse_utilcmd.c:2401 +#: parser/parse_utilcmd.c:2411 #, c-format msgid "rule WHERE condition cannot contain references to other relations" msgstr "le condizioni WHERE delle regole non possono avere riferimenti ad altre relazioni" -#: parser/parse_utilcmd.c:2473 +#: parser/parse_utilcmd.c:2483 #, c-format msgid "rules with WHERE conditions can only have SELECT, INSERT, UPDATE, or DELETE actions" msgstr "le regole con una condizione WHERE possono avere solo azione SELECT, INSERT, UPDATE o DELETE" -#: parser/parse_utilcmd.c:2491 parser/parse_utilcmd.c:2590 +#: parser/parse_utilcmd.c:2501 parser/parse_utilcmd.c:2600 #: rewrite/rewriteHandler.c:500 rewrite/rewriteManip.c:1015 #, c-format msgid "conditional UNION/INTERSECT/EXCEPT statements are not implemented" msgstr "le istruzioni UNION/INTERSECT/EXCEPT condizionali non sono implementate" -#: parser/parse_utilcmd.c:2509 +#: parser/parse_utilcmd.c:2519 #, c-format msgid "ON SELECT rule cannot use OLD" msgstr "la regola ON SELECT non può usare OLD" -#: parser/parse_utilcmd.c:2513 +#: parser/parse_utilcmd.c:2523 #, c-format msgid "ON SELECT rule cannot use NEW" msgstr "la regola ON SELECT non può usare NEW" -#: parser/parse_utilcmd.c:2522 +#: parser/parse_utilcmd.c:2532 #, c-format msgid "ON INSERT rule cannot use OLD" msgstr "la regola ON INSERT non può usare OLD" -#: parser/parse_utilcmd.c:2528 +#: parser/parse_utilcmd.c:2538 #, c-format msgid "ON DELETE rule cannot use NEW" msgstr "La regola ON DELETE non può usare NEW" -#: parser/parse_utilcmd.c:2556 +#: parser/parse_utilcmd.c:2566 #, c-format msgid "cannot refer to OLD within WITH query" msgstr "non ci si può riferire ad OLD nella query WITH" -#: parser/parse_utilcmd.c:2563 +#: parser/parse_utilcmd.c:2573 #, c-format msgid "cannot refer to NEW within WITH query" msgstr "non ci si può riferire a NEW nella query WITH" -#: parser/parse_utilcmd.c:2996 +#: parser/parse_utilcmd.c:3006 #, c-format msgid "misplaced DEFERRABLE clause" msgstr "clausola DEFERRABLE mal posizionata" -#: parser/parse_utilcmd.c:3001 parser/parse_utilcmd.c:3016 +#: parser/parse_utilcmd.c:3011 parser/parse_utilcmd.c:3026 #, c-format msgid "multiple DEFERRABLE/NOT DEFERRABLE clauses not allowed" msgstr "clausole DEFERRABLE/NOT DEFERRABLE multiple non consentite" -#: parser/parse_utilcmd.c:3011 +#: parser/parse_utilcmd.c:3021 #, c-format msgid "misplaced NOT DEFERRABLE clause" msgstr "clausola NOT DEFERRABLE mal posizionata" -#: parser/parse_utilcmd.c:3024 parser/parse_utilcmd.c:3050 gram.y:5373 +#: parser/parse_utilcmd.c:3034 parser/parse_utilcmd.c:3060 gram.y:5363 #, c-format msgid "constraint declared INITIALLY DEFERRED must be DEFERRABLE" msgstr "un vincolo dichiarato INITIALLY DEFERRED dev'essere DEFERRABLE" -#: parser/parse_utilcmd.c:3032 +#: parser/parse_utilcmd.c:3042 #, c-format msgid "misplaced INITIALLY DEFERRED clause" msgstr "clausola INITIALLY DEFERRED mal posizionata" -#: parser/parse_utilcmd.c:3037 parser/parse_utilcmd.c:3063 +#: parser/parse_utilcmd.c:3047 parser/parse_utilcmd.c:3073 #, c-format msgid "multiple INITIALLY IMMEDIATE/DEFERRED clauses not allowed" msgstr "clausole INITIALLY IMMEDIATE/DEFERRED multiple non sono consentite" -#: parser/parse_utilcmd.c:3058 +#: parser/parse_utilcmd.c:3068 #, c-format msgid "misplaced INITIALLY IMMEDIATE clause" msgstr "clausola INITIALLY IMMEDIATE mal posizionata" -#: parser/parse_utilcmd.c:3249 +#: parser/parse_utilcmd.c:3259 #, c-format msgid "CREATE specifies a schema (%s) different from the one being created (%s)" msgstr "CREATE specifica uno schema (%s) differente da quello che sta venendo creato (%s)" -#: parser/parse_utilcmd.c:3315 +#: parser/parse_utilcmd.c:3318 #, c-format msgid "invalid bound specification for a list partition" msgstr "specifica di estremità non valida per una partizione su lista" -#: parser/parse_utilcmd.c:3338 parser/parse_utilcmd.c:3472 -#: parser/parse_utilcmd.c:3499 -#, c-format -msgid "specified value cannot be cast to type \"%s\" of column \"%s\"" -msgstr "il valore specificato non può essere convertito al tipo \"%s\" della colonna \"%s\"" - -#: parser/parse_utilcmd.c:3378 +#: parser/parse_utilcmd.c:3374 #, c-format msgid "invalid bound specification for a range partition" msgstr "specifica di estremità non valida per una partizione su intervallo" -#: parser/parse_utilcmd.c:3386 +#: parser/parse_utilcmd.c:3380 #, c-format msgid "FROM must specify exactly one value per partitioning column" msgstr "FROM deve specificare esattamente un valore per colonna di partizionamento" -#: parser/parse_utilcmd.c:3390 +#: parser/parse_utilcmd.c:3384 #, c-format msgid "TO must specify exactly one value per partitioning column" msgstr "TO deve specificare esattamente un valore per colonna di partizionamento" -#: parser/parse_utilcmd.c:3407 parser/parse_utilcmd.c:3421 -#, c-format -msgid "cannot specify finite value after UNBOUNDED" -msgstr "non si può specificare un valore finito dopo UNBOUNDED" - -#: parser/parse_utilcmd.c:3461 parser/parse_utilcmd.c:3488 +#: parser/parse_utilcmd.c:3431 parser/parse_utilcmd.c:3445 #, c-format msgid "cannot specify NULL in range bound" msgstr "non si può specificare NULL nel limite di un margine" +#: parser/parse_utilcmd.c:3492 +#, c-format +msgid "every bound following MAXVALUE must also be MAXVALUE" +msgstr "ogni limite che segue MAXVALUE dev'essere anch'esso MAXVALUE" + +#: parser/parse_utilcmd.c:3498 +#, c-format +msgid "every bound following MINVALUE must also be MINVALUE" +msgstr "ogni limite che segue MINVALUE dev'essere anch'esso MINVALUE" + +#: parser/parse_utilcmd.c:3528 parser/parse_utilcmd.c:3540 +#, c-format +msgid "specified value cannot be cast to type %s for column \"%s\"" +msgstr "il valore specificato non può essere trasformato nel tipo %s per la colonna \"%s\"" + +#: parser/parse_utilcmd.c:3542 +#, c-format +msgid "The cast requires a non-immutable conversion." +msgstr "Il cast richiede una conversione non immutabile." + +#: parser/parse_utilcmd.c:3543 +#, c-format +msgid "Try putting the literal value in single quotes." +msgstr "Prova a mettere il valore letterale tra apici." + #: parser/scansup.c:204 #, c-format msgid "identifier \"%s\" will be truncated to \"%s\"" msgstr "l'identificativo \"%s\" sarà troncato a \"%s\"" -#: port/pg_shmem.c:195 port/sysv_shmem.c:195 +#: port/pg_shmem.c:196 port/sysv_shmem.c:196 #, c-format msgid "could not create shared memory segment: %m" msgstr "creazione del segmento di memoria condivisa fallita: %m" -#: port/pg_shmem.c:196 port/sysv_shmem.c:196 +#: port/pg_shmem.c:197 port/sysv_shmem.c:197 #, c-format msgid "Failed system call was shmget(key=%lu, size=%zu, 0%o)." msgstr "La chiamata di sistema fallita era shmget(key=%lu, size=%zu, 0%o)." -#: port/pg_shmem.c:200 port/sysv_shmem.c:200 +#: port/pg_shmem.c:201 port/sysv_shmem.c:201 #, c-format msgid "" "This error usually means that PostgreSQL's request for a shared memory segment exceeded your kernel's SHMMAX parameter, or possibly that it is less than your kernel's SHMMIN parameter.\n" @@ -15110,7 +15085,7 @@ msgstr "" "Questo errore di solito vuol dire che la richiesta di PostgreSQL di un segmento di memoria condivisa eccede il valore del parametro SHMMAX del tuo kernel, o anche che sia inferiore del parametro SHMMIN.\n" "La documentazione di PostgreSQL contiene ulteriori informazioni sulla configurazione della memoria condivisa." -#: port/pg_shmem.c:207 port/sysv_shmem.c:207 +#: port/pg_shmem.c:208 port/sysv_shmem.c:208 #, c-format msgid "" "This error usually means that PostgreSQL's request for a shared memory segment exceeded your kernel's SHMALL parameter. You might need to reconfigure the kernel with larger SHMALL.\n" @@ -15119,7 +15094,7 @@ msgstr "" "Questo errore di solito vuol dire che la richiesta di PostgreSQL di un segmento di memoria condivisa eccede il valore del parametro SHMALL del tuo kernel. Potresti dover riconfigurare il kernel con uno SHMALL più grande.\n" "La documentazione di PostgreSQL contiene ulteriori informazioni sulla configurazione della memoria condivisa." -#: port/pg_shmem.c:213 port/sysv_shmem.c:213 +#: port/pg_shmem.c:214 port/sysv_shmem.c:214 #, c-format msgid "" "This error does *not* mean that you have run out of disk space. It occurs either if all available shared memory IDs have been taken, in which case you need to raise the SHMMNI parameter in your kernel, or because the system's overall limit for shared memory has been reached.\n" @@ -15128,22 +15103,22 @@ msgstr "" "Questo errore *non* significa che è finito lo spazio su disco. Può succedere se tutti gli ID di memoria condivisa sono stati presi, nel cui caso è necessario aumentare il parametro SHMMNI del tuo kernel, oppure perché il limite globale la memoria condivisa di sistema è stato raggiunto.\n" "La documentazione di PostgreSQL contiene ulteriori informazioni sulla configurazione della memoria condivisa." -#: port/pg_shmem.c:504 port/sysv_shmem.c:504 +#: port/pg_shmem.c:505 port/sysv_shmem.c:505 #, c-format msgid "could not map anonymous shared memory: %m" msgstr "mappatura della memoria condivisa anonima fallita: %m" -#: port/pg_shmem.c:506 port/sysv_shmem.c:506 +#: port/pg_shmem.c:507 port/sysv_shmem.c:507 #, c-format msgid "This error usually means that PostgreSQL's request for a shared memory segment exceeded available memory, swap space, or huge pages. To reduce the request size (currently %zu bytes), reduce PostgreSQL's shared memory usage, perhaps by reducing shared_buffers or max_connections." msgstr "Questo errore di solito vuol dire che la richiesta di PostgreSQL di un segmento di memoria condivisa supera la memoria disponibile, lo spazio di swap o le pagine huge. Per ridurre la dimensione richiesta (attualmente %zu byte), riduci l'utilizzo di memoria condivisa di PostgreSQL, ad esempio riducendo shared_buffers o max_connections." -#: port/pg_shmem.c:572 port/sysv_shmem.c:572 port/win32_shmem.c:134 +#: port/pg_shmem.c:573 port/sysv_shmem.c:573 port/win32_shmem.c:134 #, c-format msgid "huge pages not supported on this platform" msgstr "pagine huge non supportate su questa piattaforma" -#: port/pg_shmem.c:667 port/sysv_shmem.c:667 +#: port/pg_shmem.c:668 port/sysv_shmem.c:668 #, c-format msgid "could not stat data directory \"%s\": %m" msgstr "non è stato possibile ottenere informazioni sulla directory dati \"%s\": %m" @@ -15262,57 +15237,57 @@ msgstr "La chiamata di sistema fallita era DuplicateHandle." msgid "Failed system call was MapViewOfFileEx." msgstr "La chiamata di sistema fallita era MapViewOfFileEx." -#: postmaster/autovacuum.c:416 +#: postmaster/autovacuum.c:406 #, c-format msgid "could not fork autovacuum launcher process: %m" msgstr "fork del processo di esecuzione di autovacuum fallito: %m" -#: postmaster/autovacuum.c:452 +#: postmaster/autovacuum.c:442 #, c-format msgid "autovacuum launcher started" msgstr "esecutore di autovacuum avviato" -#: postmaster/autovacuum.c:838 +#: postmaster/autovacuum.c:826 #, c-format msgid "autovacuum launcher shutting down" msgstr "arresto dell'esecutore di autovacuum" -#: postmaster/autovacuum.c:1500 +#: postmaster/autovacuum.c:1488 #, c-format msgid "could not fork autovacuum worker process: %m" msgstr "fork del processo di lavoro di autovacuum fallito: %m" -#: postmaster/autovacuum.c:1706 +#: postmaster/autovacuum.c:1686 #, c-format msgid "autovacuum: processing database \"%s\"" msgstr "autovacuum: elaborazione del database \"%s\"" -#: postmaster/autovacuum.c:2280 +#: postmaster/autovacuum.c:2261 #, c-format msgid "autovacuum: dropping orphan temp table \"%s.%s.%s\"" msgstr "autovacuum: eliminazione della tabella temporanea orfana \"%s.%s.%s\"" -#: postmaster/autovacuum.c:2486 +#: postmaster/autovacuum.c:2469 #, c-format msgid "automatic vacuum of table \"%s.%s.%s\"" msgstr "pulizia automatica della tabella \"%s.%s.%s\"" -#: postmaster/autovacuum.c:2489 +#: postmaster/autovacuum.c:2472 #, c-format msgid "automatic analyze of table \"%s.%s.%s\"" msgstr "analisi automatica della tabella \"%s.%s.%s\"" -#: postmaster/autovacuum.c:2700 +#: postmaster/autovacuum.c:2665 #, c-format msgid "processing work entry for relation \"%s.%s.%s\"" msgstr "processo a lavoro sulla relazione \"%s.%s.%s\"" -#: postmaster/autovacuum.c:3344 +#: postmaster/autovacuum.c:3240 #, c-format msgid "autovacuum not started because of misconfiguration" msgstr "autovacuum non avviato a causa di configurazione errata" -#: postmaster/autovacuum.c:3345 +#: postmaster/autovacuum.c:3241 #, c-format msgid "Enable the \"track_counts\" option." msgstr "Abilita l'opzione \"track_counts\"." @@ -15379,85 +15354,85 @@ msgstr[1] "Le impostazioni correnti consentono la registrazione di un massimo di msgid "Consider increasing the configuration parameter \"max_worker_processes\"." msgstr "Considera di incrementare il parametro di configurazione \"max_worker_processes\"." -#: postmaster/checkpointer.c:465 +#: postmaster/checkpointer.c:464 #, c-format msgid "checkpoints are occurring too frequently (%d second apart)" msgid_plural "checkpoints are occurring too frequently (%d seconds apart)" msgstr[0] "i checkpoint stanno avvenendo troppo frequentemente (a distanza di %d secondo)" msgstr[1] "i checkpoint stanno avvenendo troppo frequentemente (a distanza di %d secondi)" -#: postmaster/checkpointer.c:469 +#: postmaster/checkpointer.c:468 #, c-format msgid "Consider increasing the configuration parameter \"max_wal_size\"." msgstr "Considera di incrementare il parametro di configurazione \"max_wal_size\"." -#: postmaster/checkpointer.c:1088 +#: postmaster/checkpointer.c:1087 #, c-format msgid "checkpoint request failed" msgstr "richiesta di checkpoint fallita" -#: postmaster/checkpointer.c:1089 +#: postmaster/checkpointer.c:1088 #, c-format msgid "Consult recent messages in the server log for details." msgstr "Consulta i messaggi recenti nel log del server per i dettagli." -#: postmaster/checkpointer.c:1284 +#: postmaster/checkpointer.c:1283 #, c-format msgid "compacted fsync request queue from %d entries to %d entries" msgstr "coda di richieste di fsync ridotta da %d a %d elementi" -#: postmaster/pgarch.c:149 +#: postmaster/pgarch.c:148 #, c-format msgid "could not fork archiver: %m" msgstr "non è possibile fare un fork dell'archiver: %m" -#: postmaster/pgarch.c:457 +#: postmaster/pgarch.c:456 #, c-format msgid "archive_mode enabled, yet archive_command is not set" msgstr "archive_mode abilitato, ma archive_command non è impostato" -#: postmaster/pgarch.c:485 +#: postmaster/pgarch.c:484 #, c-format msgid "archiving write-ahead log file \"%s\" failed too many times, will try again later" msgstr "archiviazione del file di log write-ahead \"%s\" fallita troppe volte, verrà riprovato più tardi" -#: postmaster/pgarch.c:588 +#: postmaster/pgarch.c:587 #, c-format msgid "archive command failed with exit code %d" msgstr "comando di archiviazione fallito con codice di uscita %d" -#: postmaster/pgarch.c:590 postmaster/pgarch.c:600 postmaster/pgarch.c:607 -#: postmaster/pgarch.c:613 postmaster/pgarch.c:622 +#: postmaster/pgarch.c:589 postmaster/pgarch.c:599 postmaster/pgarch.c:606 +#: postmaster/pgarch.c:612 postmaster/pgarch.c:621 #, c-format msgid "The failed archive command was: %s" msgstr "Il comando di archiviazione fallito era: %s" -#: postmaster/pgarch.c:597 +#: postmaster/pgarch.c:596 #, c-format msgid "archive command was terminated by exception 0x%X" msgstr "comando di archiviazione terminato da eccezione 0x%X" -#: postmaster/pgarch.c:599 postmaster/postmaster.c:3541 +#: postmaster/pgarch.c:598 postmaster/postmaster.c:3567 #, c-format msgid "See C include file \"ntstatus.h\" for a description of the hexadecimal value." msgstr "Consulta il file include C \"ntstatus.h\" per una spiegazione del valore esadecimale." -#: postmaster/pgarch.c:604 +#: postmaster/pgarch.c:603 #, c-format msgid "archive command was terminated by signal %d: %s" msgstr "comando di archiviazione terminato dal segnale %d: %s" -#: postmaster/pgarch.c:611 +#: postmaster/pgarch.c:610 #, c-format msgid "archive command was terminated by signal %d" msgstr "comando di archiviazione terminato dal segnale %d" -#: postmaster/pgarch.c:620 +#: postmaster/pgarch.c:619 #, c-format msgid "archive command exited with unrecognized status %d" msgstr "processo di archiviazione uscito con stato sconosciuto %d" -#: postmaster/pgarch.c:680 +#: postmaster/pgarch.c:679 #, c-format msgid "could not open archive status directory \"%s\": %m" msgstr "apertura della directory dello stato dell'archivio \"%s\" fallita: %m" @@ -15542,213 +15517,213 @@ msgstr "destinazione di reset sconosciuta: \"%s\"" msgid "Target must be \"archiver\" or \"bgwriter\"." msgstr "La destinazione deve essere \"archiver\" o \"bgwriter\"." -#: postmaster/pgstat.c:4287 +#: postmaster/pgstat.c:4296 #, c-format msgid "could not read statistics message: %m" msgstr "lettura del messaggio delle statistiche fallito: %m" -#: postmaster/pgstat.c:4619 postmaster/pgstat.c:4776 +#: postmaster/pgstat.c:4628 postmaster/pgstat.c:4785 #, c-format msgid "could not open temporary statistics file \"%s\": %m" msgstr "apertura del file temporaneo delle statistiche \"%s\" fallita: %m" -#: postmaster/pgstat.c:4686 postmaster/pgstat.c:4821 +#: postmaster/pgstat.c:4695 postmaster/pgstat.c:4830 #, c-format msgid "could not write temporary statistics file \"%s\": %m" msgstr "scrittura del file temporaneo delle statistiche \"%s\" fallita: %m" -#: postmaster/pgstat.c:4695 postmaster/pgstat.c:4830 +#: postmaster/pgstat.c:4704 postmaster/pgstat.c:4839 #, c-format msgid "could not close temporary statistics file \"%s\": %m" msgstr "chiusura del file temporaneo delle statistiche \"%s\" fallita: %m" -#: postmaster/pgstat.c:4703 postmaster/pgstat.c:4838 +#: postmaster/pgstat.c:4712 postmaster/pgstat.c:4847 #, c-format msgid "could not rename temporary statistics file \"%s\" to \"%s\": %m" msgstr "non è stato possibile rinominare il file temporaneo delle statistiche \"%s\" in \"%s\": %m" -#: postmaster/pgstat.c:4927 postmaster/pgstat.c:5112 postmaster/pgstat.c:5265 +#: postmaster/pgstat.c:4936 postmaster/pgstat.c:5142 postmaster/pgstat.c:5295 #, c-format msgid "could not open statistics file \"%s\": %m" msgstr "apertura del file delle statistiche \"%s\" fallita: %m" -#: postmaster/pgstat.c:4939 postmaster/pgstat.c:4949 postmaster/pgstat.c:4959 -#: postmaster/pgstat.c:4980 postmaster/pgstat.c:4995 postmaster/pgstat.c:5049 -#: postmaster/pgstat.c:5124 postmaster/pgstat.c:5144 postmaster/pgstat.c:5162 -#: postmaster/pgstat.c:5178 postmaster/pgstat.c:5196 postmaster/pgstat.c:5212 -#: postmaster/pgstat.c:5277 postmaster/pgstat.c:5289 postmaster/pgstat.c:5301 -#: postmaster/pgstat.c:5326 postmaster/pgstat.c:5348 +#: postmaster/pgstat.c:4948 postmaster/pgstat.c:4958 postmaster/pgstat.c:4979 +#: postmaster/pgstat.c:5001 postmaster/pgstat.c:5016 postmaster/pgstat.c:5079 +#: postmaster/pgstat.c:5154 postmaster/pgstat.c:5174 postmaster/pgstat.c:5192 +#: postmaster/pgstat.c:5208 postmaster/pgstat.c:5226 postmaster/pgstat.c:5242 +#: postmaster/pgstat.c:5307 postmaster/pgstat.c:5319 postmaster/pgstat.c:5331 +#: postmaster/pgstat.c:5356 postmaster/pgstat.c:5378 #, c-format msgid "corrupted statistics file \"%s\"" msgstr "file delle statistiche corrotto \"%s\"" -#: postmaster/pgstat.c:5477 +#: postmaster/pgstat.c:5507 #, c-format msgid "using stale statistics instead of current ones because stats collector is not responding" msgstr "verranno utilizzate statistiche vecchie invece di quelle correnti perché il processo di raccolta statistiche non risponde" -#: postmaster/pgstat.c:5804 +#: postmaster/pgstat.c:5834 #, c-format msgid "database hash table corrupted during cleanup --- abort" msgstr "tabella hash del database corrotta durante la pulizia --- interruzione" -#: postmaster/postmaster.c:706 +#: postmaster/postmaster.c:710 #, c-format msgid "%s: invalid argument for option -f: \"%s\"\n" msgstr "%s: argomento non valido per l'opzione -f: \"%s\"\n" -#: postmaster/postmaster.c:792 +#: postmaster/postmaster.c:796 #, c-format msgid "%s: invalid argument for option -t: \"%s\"\n" msgstr "%s: argomento non valido per l'opzione -t: \"%s\"\n" -#: postmaster/postmaster.c:843 +#: postmaster/postmaster.c:847 #, c-format msgid "%s: invalid argument: \"%s\"\n" msgstr "%s: argomento non valido: \"%s\"\n" -#: postmaster/postmaster.c:882 +#: postmaster/postmaster.c:886 #, c-format msgid "%s: superuser_reserved_connections must be less than max_connections\n" msgstr "%s: superuser_reserved_connections dev'essere minore di max_connections\n" -#: postmaster/postmaster.c:887 +#: postmaster/postmaster.c:891 #, c-format msgid "%s: max_wal_senders must be less than max_connections\n" msgstr "%s: max_wal_senders dev'essere minore di max_connections\n" -#: postmaster/postmaster.c:892 +#: postmaster/postmaster.c:896 #, c-format msgid "WAL archival cannot be enabled when wal_level is \"minimal\"" msgstr "l'archiviazione dei WAL non può essere attivata quando wal_level è \"minimal\"" -#: postmaster/postmaster.c:895 +#: postmaster/postmaster.c:899 #, c-format msgid "WAL streaming (max_wal_senders > 0) requires wal_level \"replica\" or \"logical\"" msgstr "lo streaming WAL (max_wal_senders > 0) richiede wal_level \"replica\" oppure \"logical\"" -#: postmaster/postmaster.c:903 +#: postmaster/postmaster.c:907 #, c-format msgid "%s: invalid datetoken tables, please fix\n" msgstr "%s: datetoken tables non valido, per favore correggilo\n" -#: postmaster/postmaster.c:1006 postmaster/postmaster.c:1104 -#: utils/init/miscinit.c:1446 +#: postmaster/postmaster.c:1010 postmaster/postmaster.c:1108 +#: utils/init/miscinit.c:1455 #, c-format msgid "invalid list syntax in parameter \"%s\"" msgstr "sintassi di lista non valida nel parametro \"%s\"" -#: postmaster/postmaster.c:1037 +#: postmaster/postmaster.c:1041 #, c-format msgid "could not create listen socket for \"%s\"" msgstr "creazione del socket di ascolto per \"%s\" fallita" -#: postmaster/postmaster.c:1043 +#: postmaster/postmaster.c:1047 #, c-format msgid "could not create any TCP/IP sockets" msgstr "non è stato possibile creare alcun socket TCP/IP" -#: postmaster/postmaster.c:1126 +#: postmaster/postmaster.c:1130 #, c-format msgid "could not create Unix-domain socket in directory \"%s\"" msgstr "creazione del socket di dominio Unix fallita nella directory \"%s\"" -#: postmaster/postmaster.c:1132 +#: postmaster/postmaster.c:1136 #, c-format msgid "could not create any Unix-domain sockets" msgstr "creazione del socket di dominio Unix fallita" -#: postmaster/postmaster.c:1144 +#: postmaster/postmaster.c:1148 #, c-format msgid "no socket created for listening" msgstr "nessun socket per l'ascolto è stato creato" -#: postmaster/postmaster.c:1184 +#: postmaster/postmaster.c:1188 #, c-format msgid "could not create I/O completion port for child queue" msgstr "creazione della porta di completamento I/O per la coda dei figli fallita" -#: postmaster/postmaster.c:1213 +#: postmaster/postmaster.c:1217 #, c-format msgid "%s: could not change permissions of external PID file \"%s\": %s\n" msgstr "%s: modifica dei permessi del file PID esterno \"%s\" fallita: %s\n" -#: postmaster/postmaster.c:1217 +#: postmaster/postmaster.c:1221 #, c-format msgid "%s: could not write external PID file \"%s\": %s\n" msgstr "%s: scrittura del file PID esterno \"%s\" fallita: %s\n" -#: postmaster/postmaster.c:1274 +#: postmaster/postmaster.c:1278 #, c-format msgid "ending log output to stderr" msgstr "terminazione dell'output del log su stderr" -#: postmaster/postmaster.c:1275 +#: postmaster/postmaster.c:1279 #, c-format msgid "Future log output will go to log destination \"%s\"." msgstr "L'output dei prossimi log andrà su \"%s\"." -#: postmaster/postmaster.c:1301 utils/init/postinit.c:213 +#: postmaster/postmaster.c:1305 utils/init/postinit.c:213 #, c-format msgid "could not load pg_hba.conf" msgstr "caricamento di pg_hba.conf fallito" -#: postmaster/postmaster.c:1327 +#: postmaster/postmaster.c:1331 #, c-format msgid "postmaster became multithreaded during startup" msgstr "il postmaster è diventato multithread durante l'avvio" -#: postmaster/postmaster.c:1328 +#: postmaster/postmaster.c:1332 #, c-format msgid "Set the LC_ALL environment variable to a valid locale." msgstr "Imposta la variabile d'ambiente LC_ALL non corrisponde ad un locale valido." -#: postmaster/postmaster.c:1427 +#: postmaster/postmaster.c:1437 #, c-format msgid "%s: could not locate matching postgres executable" msgstr "%s: eseguibile postgres corrispondente non trovato" -#: postmaster/postmaster.c:1450 utils/misc/tzparser.c:341 +#: postmaster/postmaster.c:1460 utils/misc/tzparser.c:341 #, c-format msgid "This may indicate an incomplete PostgreSQL installation, or that the file \"%s\" has been moved away from its proper location." msgstr "Questo potrebbe indicare una installazione di PostgreSQL incompleta, o che il file \"%s\" sia stato spostato dalla sua posizione corretta." -#: postmaster/postmaster.c:1478 +#: postmaster/postmaster.c:1488 #, c-format msgid "data directory \"%s\" does not exist" msgstr "la directory dei dati \"%s\" non esiste" -#: postmaster/postmaster.c:1483 +#: postmaster/postmaster.c:1493 #, c-format msgid "could not read permissions of directory \"%s\": %m" msgstr "lettura dei permessi della directory \"%s\" fallita: %m" -#: postmaster/postmaster.c:1491 +#: postmaster/postmaster.c:1501 #, c-format msgid "specified data directory \"%s\" is not a directory" msgstr "la directory dei dati specificata \"%s\" non è una directory" -#: postmaster/postmaster.c:1507 +#: postmaster/postmaster.c:1517 #, c-format msgid "data directory \"%s\" has wrong ownership" msgstr "la directory dei dati \"%s\" ha il proprietario errato" -#: postmaster/postmaster.c:1509 +#: postmaster/postmaster.c:1519 #, c-format msgid "The server must be started by the user that owns the data directory." msgstr "Il server deve essere avviato dall'utente che possiede la directory dei dati." -#: postmaster/postmaster.c:1529 +#: postmaster/postmaster.c:1539 #, c-format msgid "data directory \"%s\" has group or world access" msgstr "la directory dei dati \"%s\" è accessibile dal gruppo o da tutti" -#: postmaster/postmaster.c:1531 +#: postmaster/postmaster.c:1541 #, c-format msgid "Permissions should be u=rwx (0700)." msgstr "I permessi dovrebbero essere u=rwx (0700)." -#: postmaster/postmaster.c:1542 +#: postmaster/postmaster.c:1552 #, c-format msgid "" "%s: could not find the database system\n" @@ -15759,386 +15734,396 @@ msgstr "" "Sarebbe dovuto essere nella directory \"%s\",\n" "ma l'apertura del file \"%s\" è fallita: %s\n" -#: postmaster/postmaster.c:1719 +#: postmaster/postmaster.c:1729 #, c-format msgid "select() failed in postmaster: %m" msgstr "select() fallita in postmaster: %m" -#: postmaster/postmaster.c:1870 +#: postmaster/postmaster.c:1884 #, c-format msgid "performing immediate shutdown because data directory lock file is invalid" msgstr "arresto immediato perché il file di lock della directory dati non è valido" -#: postmaster/postmaster.c:1948 postmaster/postmaster.c:1979 +#: postmaster/postmaster.c:1962 postmaster/postmaster.c:1993 #, c-format msgid "incomplete startup packet" msgstr "pacchetto di avvio incompleto" -#: postmaster/postmaster.c:1960 +#: postmaster/postmaster.c:1974 #, c-format msgid "invalid length of startup packet" msgstr "dimensione del pacchetto di avvio non valida" -#: postmaster/postmaster.c:2018 +#: postmaster/postmaster.c:2032 #, c-format msgid "failed to send SSL negotiation response: %m" msgstr "invio della risposta di negoziazione SSL fallito: %m" -#: postmaster/postmaster.c:2047 +#: postmaster/postmaster.c:2061 #, c-format msgid "unsupported frontend protocol %u.%u: server supports %u.0 to %u.%u" msgstr "protocollo frontend non supportato %u.%u: il server supporta da %u.0 a %u.%u" -#: postmaster/postmaster.c:2110 utils/misc/guc.c:5759 utils/misc/guc.c:5852 -#: utils/misc/guc.c:7153 utils/misc/guc.c:9907 utils/misc/guc.c:9941 +#: postmaster/postmaster.c:2124 utils/misc/guc.c:5770 utils/misc/guc.c:5863 +#: utils/misc/guc.c:7164 utils/misc/guc.c:9911 utils/misc/guc.c:9945 #, c-format msgid "invalid value for parameter \"%s\": \"%s\"" msgstr "valore non valido per il parametro \"%s\": \"%s\"" -#: postmaster/postmaster.c:2113 +#: postmaster/postmaster.c:2127 #, c-format msgid "Valid values are: \"false\", 0, \"true\", 1, \"database\"." msgstr "I valori validi sono: \"false\", 0, \"true\", 1, \"database\"." -#: postmaster/postmaster.c:2133 +#: postmaster/postmaster.c:2147 #, c-format msgid "invalid startup packet layout: expected terminator as last byte" msgstr "formato del pacchetto di avvio non valido: atteso il terminatore all'ultimo byte" -#: postmaster/postmaster.c:2161 +#: postmaster/postmaster.c:2175 #, c-format msgid "no PostgreSQL user name specified in startup packet" msgstr "nessun utente PostgreSQL specificato nel pacchetto di avvio" -#: postmaster/postmaster.c:2220 +#: postmaster/postmaster.c:2234 #, c-format msgid "the database system is starting up" msgstr "il database si sta avviando" -#: postmaster/postmaster.c:2225 +#: postmaster/postmaster.c:2239 #, c-format msgid "the database system is shutting down" msgstr "il database si sta spegnendo" -#: postmaster/postmaster.c:2230 +#: postmaster/postmaster.c:2244 #, c-format msgid "the database system is in recovery mode" msgstr "il database è in modalità di ripristino" -#: postmaster/postmaster.c:2235 storage/ipc/procarray.c:291 +#: postmaster/postmaster.c:2249 storage/ipc/procarray.c:292 #: storage/ipc/sinvaladt.c:298 storage/lmgr/proc.c:338 #, c-format msgid "sorry, too many clients already" msgstr "spiacente, troppi client già connessi" -#: postmaster/postmaster.c:2297 +#: postmaster/postmaster.c:2311 #, c-format msgid "wrong key in cancel request for process %d" msgstr "chiave sbagliata nella richiesta di annullamento per il processo %d" -#: postmaster/postmaster.c:2305 +#: postmaster/postmaster.c:2319 #, c-format msgid "PID %d in cancel request did not match any process" msgstr "il PID %d nella richiesta di annullamento non corrisponde ad alcun processo" -#: postmaster/postmaster.c:2516 +#: postmaster/postmaster.c:2530 #, c-format msgid "received SIGHUP, reloading configuration files" msgstr "SIGHUP ricevuto, sto ricaricando i file di configurazione" -#: postmaster/postmaster.c:2541 +#: postmaster/postmaster.c:2555 #, c-format msgid "pg_hba.conf was not reloaded" msgstr "pg_hba.conf non è stato ricaricato" -#: postmaster/postmaster.c:2545 +#: postmaster/postmaster.c:2559 #, c-format msgid "pg_ident.conf was not reloaded" msgstr "pg_ident.conf non è stato ricaricato" -#: postmaster/postmaster.c:2555 +#: postmaster/postmaster.c:2569 #, c-format msgid "SSL configuration was not reloaded" msgstr "la configurazione SSL non è stata ricaricata" -#: postmaster/postmaster.c:2603 +#: postmaster/postmaster.c:2617 #, c-format msgid "received smart shutdown request" msgstr "richiesta di arresto smart ricevuta" -#: postmaster/postmaster.c:2658 +#: postmaster/postmaster.c:2675 #, c-format msgid "received fast shutdown request" msgstr "richiesta di arresto fast ricevuta" -#: postmaster/postmaster.c:2688 +#: postmaster/postmaster.c:2708 #, c-format msgid "aborting any active transactions" msgstr "interruzione di tutte le transazioni attive" -#: postmaster/postmaster.c:2722 +#: postmaster/postmaster.c:2742 #, c-format msgid "received immediate shutdown request" msgstr "richiesta di arresto immediate ricevuta" -#: postmaster/postmaster.c:2786 +#: postmaster/postmaster.c:2809 #, c-format msgid "shutdown at recovery target" msgstr "arresto alla destinazione di recupero" -#: postmaster/postmaster.c:2802 postmaster/postmaster.c:2825 +#: postmaster/postmaster.c:2825 postmaster/postmaster.c:2848 msgid "startup process" msgstr "avvio del processo" -#: postmaster/postmaster.c:2805 +#: postmaster/postmaster.c:2828 #, c-format msgid "aborting startup due to startup process failure" msgstr "avvio interrotto a causa del fallimento del processo di avvio" -#: postmaster/postmaster.c:2866 +#: postmaster/postmaster.c:2889 #, c-format msgid "database system is ready to accept connections" msgstr "il database è pronto ad accettare connessioni" -#: postmaster/postmaster.c:2885 +#: postmaster/postmaster.c:2910 msgid "background writer process" msgstr "processo di scrittura in background" -#: postmaster/postmaster.c:2939 +#: postmaster/postmaster.c:2964 msgid "checkpointer process" msgstr "processo di creazione checkpoint" -#: postmaster/postmaster.c:2955 +#: postmaster/postmaster.c:2980 msgid "WAL writer process" msgstr "processo di scrittura WAL" -#: postmaster/postmaster.c:2969 +#: postmaster/postmaster.c:2995 msgid "WAL receiver process" msgstr "processo di ricezione WAL" -#: postmaster/postmaster.c:2984 +#: postmaster/postmaster.c:3010 msgid "autovacuum launcher process" msgstr "processo del lanciatore di autovacuum" -#: postmaster/postmaster.c:2999 +#: postmaster/postmaster.c:3025 msgid "archiver process" msgstr "processo di archiviazione" -#: postmaster/postmaster.c:3015 +#: postmaster/postmaster.c:3041 msgid "statistics collector process" msgstr "processo del raccoglitore di statistiche" -#: postmaster/postmaster.c:3029 +#: postmaster/postmaster.c:3055 msgid "system logger process" msgstr "processo del logger di sistema" -#: postmaster/postmaster.c:3091 +#: postmaster/postmaster.c:3117 msgid "worker process" msgstr "processo di lavoro" -#: postmaster/postmaster.c:3174 postmaster/postmaster.c:3194 -#: postmaster/postmaster.c:3201 postmaster/postmaster.c:3219 +#: postmaster/postmaster.c:3200 postmaster/postmaster.c:3220 +#: postmaster/postmaster.c:3227 postmaster/postmaster.c:3245 msgid "server process" msgstr "processo del server" -#: postmaster/postmaster.c:3273 +#: postmaster/postmaster.c:3299 #, c-format msgid "terminating any other active server processes" msgstr "interruzione di tutti gli altri processi attivi del server" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3529 +#: postmaster/postmaster.c:3555 #, c-format msgid "%s (PID %d) exited with exit code %d" msgstr "%s (PID %d) è uscito con codice di uscita %d" -#: postmaster/postmaster.c:3531 postmaster/postmaster.c:3542 -#: postmaster/postmaster.c:3553 postmaster/postmaster.c:3562 -#: postmaster/postmaster.c:3572 +#: postmaster/postmaster.c:3557 postmaster/postmaster.c:3568 +#: postmaster/postmaster.c:3579 postmaster/postmaster.c:3588 +#: postmaster/postmaster.c:3598 #, c-format msgid "Failed process was running: %s" msgstr "Il processo fallito stava eseguendo: %s" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3539 +#: postmaster/postmaster.c:3565 #, c-format msgid "%s (PID %d) was terminated by exception 0x%X" msgstr "%s (PID %d) è stato terminato dall'eccezione 0x%X" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3549 +#: postmaster/postmaster.c:3575 #, c-format msgid "%s (PID %d) was terminated by signal %d: %s" msgstr "%s (PID %d) è stato terminato dal segnale %d: %s" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3560 +#: postmaster/postmaster.c:3586 #, c-format msgid "%s (PID %d) was terminated by signal %d" msgstr "%s (PID %d) è stato terminato dal segnale %d" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3570 +#: postmaster/postmaster.c:3596 #, c-format msgid "%s (PID %d) exited with unrecognized status %d" msgstr "%s (PID %d) uscito con stato sconosciuto %d" -#: postmaster/postmaster.c:3760 +#: postmaster/postmaster.c:3783 #, c-format msgid "abnormal database system shutdown" msgstr "spegnimento anormale del database" -#: postmaster/postmaster.c:3800 +#: postmaster/postmaster.c:3823 #, c-format msgid "all server processes terminated; reinitializing" msgstr "tutti i processi server sono terminati; re-inizializzazione" -#: postmaster/postmaster.c:3966 postmaster/postmaster.c:5361 -#: postmaster/postmaster.c:5707 +#: postmaster/postmaster.c:3989 postmaster/postmaster.c:5400 +#: postmaster/postmaster.c:5764 #, c-format msgid "could not generate random cancel key" msgstr "generazione della chiave di annullamento casuale fallita" -#: postmaster/postmaster.c:4020 +#: postmaster/postmaster.c:4043 #, c-format msgid "could not fork new process for connection: %m" msgstr "fork del nuovo processo per la connessione fallito: %m" -#: postmaster/postmaster.c:4062 +#: postmaster/postmaster.c:4085 msgid "could not fork new process for connection: " msgstr "fork del nuovo processo per la connessione fallito: " -#: postmaster/postmaster.c:4176 +#: postmaster/postmaster.c:4199 #, c-format msgid "connection received: host=%s port=%s" msgstr "connessione ricevuta: host=%s porta=%s" -#: postmaster/postmaster.c:4181 +#: postmaster/postmaster.c:4204 #, c-format msgid "connection received: host=%s" msgstr "connessione ricevuta: host=%s" -#: postmaster/postmaster.c:4466 +#: postmaster/postmaster.c:4489 #, c-format msgid "could not execute server process \"%s\": %m" msgstr "esecuzione del processo del server \"%s\" fallita: %m" -#: postmaster/postmaster.c:4809 +#: postmaster/postmaster.c:4642 +#, c-format +msgid "giving up after too many tries to reserve shared memory" +msgstr "mi sono arreso dopo troppi tentativi di riservare memoria condivisa" + +#: postmaster/postmaster.c:4643 +#, c-format +msgid "This might be caused by ASLR or antivirus software." +msgstr "Ciò potrebbe essere causato da ASLR o software antivirus." + +#: postmaster/postmaster.c:4840 #, c-format msgid "SSL configuration could not be loaded in child process" msgstr "errore nel caricamento della configurazione SSL nel processo figlio" -#: postmaster/postmaster.c:4941 +#: postmaster/postmaster.c:4972 #, c-format msgid "Please report this to ." msgstr "Per favore segnala questo problema a ." -#: postmaster/postmaster.c:5020 +#: postmaster/postmaster.c:5059 #, c-format msgid "database system is ready to accept read only connections" msgstr "il database è pronto ad accettare connessioni in sola lettura" -#: postmaster/postmaster.c:5289 +#: postmaster/postmaster.c:5328 #, c-format msgid "could not fork startup process: %m" msgstr "fork del processo di avvio fallito: %m" -#: postmaster/postmaster.c:5293 +#: postmaster/postmaster.c:5332 #, c-format msgid "could not fork background writer process: %m" msgstr "fork del processo di scrittura in background fallito: %m" -#: postmaster/postmaster.c:5297 +#: postmaster/postmaster.c:5336 #, c-format msgid "could not fork checkpointer process: %m" msgstr "fork del processo di creazione dei checkpoint fallito: %m" -#: postmaster/postmaster.c:5301 +#: postmaster/postmaster.c:5340 #, c-format msgid "could not fork WAL writer process: %m" msgstr "fork del processo di scrittura dei WAL fallito: %m" -#: postmaster/postmaster.c:5305 +#: postmaster/postmaster.c:5344 #, c-format msgid "could not fork WAL receiver process: %m" msgstr "fork del processo di ricezione dei WAL fallito: %m" -#: postmaster/postmaster.c:5309 +#: postmaster/postmaster.c:5348 #, c-format msgid "could not fork process: %m" msgstr "fork del processo fallito: %m" -#: postmaster/postmaster.c:5478 postmaster/postmaster.c:5501 +#: postmaster/postmaster.c:5535 postmaster/postmaster.c:5558 #, c-format msgid "database connection requirement not indicated during registration" msgstr "requisiti di connessione a database non indicati durante la registrazione" -#: postmaster/postmaster.c:5485 postmaster/postmaster.c:5508 +#: postmaster/postmaster.c:5542 postmaster/postmaster.c:5565 #, c-format msgid "invalid processing mode in background worker" msgstr "modalità di processo non valida nel processo di lavoro in background" -#: postmaster/postmaster.c:5580 +#: postmaster/postmaster.c:5637 #, c-format msgid "starting background worker process \"%s\"" msgstr "avvio del processo di lavoro in background \"%s\"" -#: postmaster/postmaster.c:5592 +#: postmaster/postmaster.c:5649 #, c-format msgid "could not fork worker process: %m" msgstr "fork del processo di lavoro in background fallito: %m" -#: postmaster/postmaster.c:6016 +#: postmaster/postmaster.c:6073 #, c-format msgid "could not duplicate socket %d for use in backend: error code %d" msgstr "duplicazione del socket %d da usare nel backend fallita: codice errore %d" -#: postmaster/postmaster.c:6048 +#: postmaster/postmaster.c:6105 #, c-format msgid "could not create inherited socket: error code %d\n" msgstr "creazione del socket ereditato fallita: codice errore %d\n" -#: postmaster/postmaster.c:6077 +#: postmaster/postmaster.c:6134 #, c-format msgid "could not open backend variables file \"%s\": %s\n" msgstr "apertura del file delle variabili del backend \"%s\" fallita: %s\n" -#: postmaster/postmaster.c:6084 +#: postmaster/postmaster.c:6141 #, c-format msgid "could not read from backend variables file \"%s\": %s\n" msgstr "lettura dal file delle variabili del backend \"%s\" fallita: %s\n" -#: postmaster/postmaster.c:6093 +#: postmaster/postmaster.c:6150 #, c-format msgid "could not remove file \"%s\": %s\n" msgstr "rimozione del file \"%s\" fallita: %s\n" -#: postmaster/postmaster.c:6110 +#: postmaster/postmaster.c:6167 #, c-format msgid "could not map view of backend variables: error code %lu\n" msgstr "non è stato possibile mappare la vista delle variabili del backend: codice errore %lu\n" -#: postmaster/postmaster.c:6119 +#: postmaster/postmaster.c:6176 #, c-format msgid "could not unmap view of backend variables: error code %lu\n" msgstr "non è stato possibile rimuovere la mappa della vista delle variabili del backend: codice errore %lu\n" -#: postmaster/postmaster.c:6126 +#: postmaster/postmaster.c:6183 #, c-format msgid "could not close handle to backend parameter variables: error code %lu\n" msgstr "chiusura dell'handle dei parametri variabili del backend fallita: codice errore %lu\n" -#: postmaster/postmaster.c:6287 +#: postmaster/postmaster.c:6344 #, c-format msgid "could not read exit code for process\n" msgstr "lettura del codice di uscita del processo fallita\n" -#: postmaster/postmaster.c:6292 +#: postmaster/postmaster.c:6349 #, c-format msgid "could not post child completion status\n" msgstr "invio dello stato di completamento del figlio fallito\n" @@ -16237,7 +16222,7 @@ msgstr "invio dati da parte del backup di base fallito, backup interrotto" msgid "duplicate option \"%s\"" msgstr "opzione duplicata \"%s\"" -#: replication/basebackup.c:654 utils/misc/guc.c:5769 +#: replication/basebackup.c:654 utils/misc/guc.c:5780 #, c-format msgid "%d is outside the valid range for parameter \"%s\" (%d .. %d)" msgstr "%d non è compreso nell'intervallo di validità del il parametro \"%s\" (%d .. %d)" @@ -16262,156 +16247,156 @@ msgstr "nome del file troppo lungo per il formato tar: \"%s\"" msgid "symbolic link target too long for tar format: file name \"%s\", target \"%s\"" msgstr "destinazione del link simbolico troppo lunga per il formato tar: nome del file \"%s\", destinazione \"%s\"" -#: replication/libpqwalreceiver/libpqwalreceiver.c:226 +#: replication/libpqwalreceiver/libpqwalreceiver.c:231 #, c-format msgid "invalid connection string syntax: %s" msgstr "sintassi della stringa di connessione errata: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:250 +#: replication/libpqwalreceiver/libpqwalreceiver.c:255 #, c-format msgid "could not parse connection string: %s" msgstr "interpretazione della stringa di connessione fallita: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:300 +#: replication/libpqwalreceiver/libpqwalreceiver.c:305 #, c-format msgid "could not receive database system identifier and timeline ID from the primary server: %s" msgstr "ricezione fallita dell'identificativo del database e l'ID della timeline dal server primario: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:311 -#: replication/libpqwalreceiver/libpqwalreceiver.c:515 +#: replication/libpqwalreceiver/libpqwalreceiver.c:316 +#: replication/libpqwalreceiver/libpqwalreceiver.c:523 #, c-format msgid "invalid response from primary server" msgstr "risposta non valida dal server primario" -#: replication/libpqwalreceiver/libpqwalreceiver.c:312 +#: replication/libpqwalreceiver/libpqwalreceiver.c:317 #, c-format msgid "Could not identify system: got %d rows and %d fields, expected %d rows and %d or more fields." msgstr "Identificazione del sistema non riuscita: ricevute %d righe and %d campi, attese %d righe e %d o più campi." -#: replication/libpqwalreceiver/libpqwalreceiver.c:378 -#: replication/libpqwalreceiver/libpqwalreceiver.c:384 -#: replication/libpqwalreceiver/libpqwalreceiver.c:409 +#: replication/libpqwalreceiver/libpqwalreceiver.c:383 +#: replication/libpqwalreceiver/libpqwalreceiver.c:389 +#: replication/libpqwalreceiver/libpqwalreceiver.c:414 #, c-format msgid "could not start WAL streaming: %s" msgstr "avvio dello streaming dei WAL fallito: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:428 +#: replication/libpqwalreceiver/libpqwalreceiver.c:433 #, c-format msgid "could not send end-of-streaming message to primary: %s" msgstr "invio del messaggio di fine stream al primario fallito: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:450 +#: replication/libpqwalreceiver/libpqwalreceiver.c:455 #, c-format msgid "unexpected result set after end-of-streaming" msgstr "risultato imprevisto dopo la fine stream" -#: replication/libpqwalreceiver/libpqwalreceiver.c:470 +#: replication/libpqwalreceiver/libpqwalreceiver.c:469 +#, c-format +msgid "error while shutting down streaming COPY: %s" +msgstr "errore nel terminare il down streaming di COPY: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:478 #, c-format msgid "error reading result of streaming command: %s" msgstr "errore nella lettura del risultato del comando di streaming: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:478 -#: replication/libpqwalreceiver/libpqwalreceiver.c:688 +#: replication/libpqwalreceiver/libpqwalreceiver.c:486 +#: replication/libpqwalreceiver/libpqwalreceiver.c:714 #, c-format msgid "unexpected result after CommandComplete: %s" msgstr "risultato imprevisto dopo CommandComplete: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:504 +#: replication/libpqwalreceiver/libpqwalreceiver.c:512 #, c-format msgid "could not receive timeline history file from the primary server: %s" msgstr "errore nella ricezione del file di storia della timeline dal server primario: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:516 +#: replication/libpqwalreceiver/libpqwalreceiver.c:524 #, c-format msgid "Expected 1 tuple with 2 fields, got %d tuples with %d fields." msgstr "Attesa una tupla con 2 campi, ricevute %d tuple con %d campi." -#: replication/libpqwalreceiver/libpqwalreceiver.c:663 -#: replication/libpqwalreceiver/libpqwalreceiver.c:701 -#: replication/libpqwalreceiver/libpqwalreceiver.c:707 +#: replication/libpqwalreceiver/libpqwalreceiver.c:678 +#: replication/libpqwalreceiver/libpqwalreceiver.c:729 +#: replication/libpqwalreceiver/libpqwalreceiver.c:735 #, c-format msgid "could not receive data from WAL stream: %s" msgstr "ricezione dati dallo stream WAL fallita: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:726 +#: replication/libpqwalreceiver/libpqwalreceiver.c:754 #, c-format msgid "could not send data to WAL stream: %s" msgstr "invio dati allo stream WAL fallito: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:775 +#: replication/libpqwalreceiver/libpqwalreceiver.c:803 #, c-format msgid "could not create replication slot \"%s\": %s" msgstr "creazione dello slot di replica \"%s\" fallita: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:809 +#: replication/libpqwalreceiver/libpqwalreceiver.c:837 #, c-format -msgid "invalid query responser" -msgstr "risposta della query non valida" +msgid "invalid query response" +msgstr "risposta query non valida" -#: replication/libpqwalreceiver/libpqwalreceiver.c:810 +#: replication/libpqwalreceiver/libpqwalreceiver.c:838 #, c-format msgid "Expected %d fields, got %d fields." msgstr "Attesi %d campi, ricevuti %d campi." -#: replication/libpqwalreceiver/libpqwalreceiver.c:880 +#: replication/libpqwalreceiver/libpqwalreceiver.c:907 #, c-format msgid "the query interface requires a database connection" msgstr "l'interfaccia per le query richiede una connessione ad un database" -#: replication/libpqwalreceiver/libpqwalreceiver.c:911 +#: replication/libpqwalreceiver/libpqwalreceiver.c:938 msgid "empty query" msgstr "query vuota" -#: replication/logical/launcher.c:264 +#: replication/logical/launcher.c:298 #, c-format msgid "starting logical replication worker for subscription \"%s\"" msgstr "avvio del worker di replica logica per la sottoscrizione \"%s\"" -#: replication/logical/launcher.c:271 +#: replication/logical/launcher.c:305 #, c-format msgid "cannot start logical replication workers when max_replication_slots = 0" msgstr "non è possibile avviare worker di replica logica se max_replication_slots = 0" -#: replication/logical/launcher.c:351 +#: replication/logical/launcher.c:385 #, c-format msgid "out of logical replication worker slots" msgstr "worker di replica logica esauriti" -#: replication/logical/launcher.c:352 +#: replication/logical/launcher.c:386 #, c-format msgid "You might need to increase max_logical_replication_workers." msgstr "Potresti dover aumentare max_logical_replication_workers." -#: replication/logical/launcher.c:397 +#: replication/logical/launcher.c:440 #, c-format msgid "out of background worker slots" msgstr "worker di lavoro in background esauriti" -#: replication/logical/launcher.c:398 +#: replication/logical/launcher.c:441 #, c-format msgid "You might need to increase max_worker_processes." msgstr "Potresti dover aumentare max_worker_processes." -#: replication/logical/launcher.c:549 +#: replication/logical/launcher.c:624 #, c-format msgid "logical replication worker slot %d is empty, cannot attach" msgstr "lo slot del worker di replica logica %d è vuoto, non è possibile agganciarsi" -#: replication/logical/launcher.c:558 +#: replication/logical/launcher.c:633 #, c-format msgid "logical replication worker slot %d is already used by another worker, cannot attach" msgstr "lo slot del worker di replica logica %d è già in uso da un altro processo, non è possibile agganciarsi" -#: replication/logical/launcher.c:791 +#: replication/logical/launcher.c:885 #, c-format msgid "logical replication launcher started" msgstr "lanciatore di replica logica avviato" -#: replication/logical/launcher.c:899 -#, c-format -msgid "logical replication launcher shutting down" -msgstr "lanciatore di replica logica in arresto" - #: replication/logical/logical.c:83 #, c-format msgid "logical decoding requires wal_level >= logical" @@ -16487,8 +16472,8 @@ msgstr "l'array deve essere monodimensionale" msgid "array must not contain nulls" msgstr "l'array non deve contenere NULL" -#: replication/logical/logicalfuncs.c:222 utils/adt/json.c:2282 -#: utils/adt/jsonb.c:1357 +#: replication/logical/logicalfuncs.c:222 utils/adt/json.c:2246 +#: utils/adt/jsonb.c:1314 #, c-format msgid "array must have even number of elements" msgstr "l'array deve avere un numero pari di elementi" @@ -16498,79 +16483,89 @@ msgstr "l'array deve avere un numero pari di elementi" msgid "logical decoding output plugin \"%s\" produces binary output, but function \"%s\" expects textual data" msgstr "il plugin di output di decodifica logica \"%s\" produce dati binari, ma la funzione \"%s\" si aspetta dati testuali" -#: replication/logical/origin.c:180 +#: replication/logical/origin.c:185 #, c-format msgid "only superusers can query or manipulate replication origins" msgstr "solo i superutenti possono interrogare o replicare le origini di replica" -#: replication/logical/origin.c:185 +#: replication/logical/origin.c:190 #, c-format msgid "cannot query or manipulate replication origin when max_replication_slots = 0" msgstr "non è possibile interrogare o manipolare le origini di replica quando max_replication_slots = 0" -#: replication/logical/origin.c:190 +#: replication/logical/origin.c:195 #, c-format msgid "cannot manipulate replication origins during recovery" msgstr "non è possibile manipolare le origini di replica durante il recupero" -#: replication/logical/origin.c:314 +#: replication/logical/origin.c:230 +#, c-format +msgid "replication origin \"%s\" does not exist" +msgstr "l'origine di replica \"%s\" non esiste" + +#: replication/logical/origin.c:321 #, c-format msgid "could not find free replication origin OID" msgstr "non è stato trovato alcun OID di origine di replica libero" -#: replication/logical/origin.c:351 +#: replication/logical/origin.c:363 #, c-format msgid "could not drop replication origin with OID %d, in use by PID %d" msgstr "impossibile eliminare l'origine di replica con OID %d, in uso dal pid %d" -#: replication/logical/origin.c:667 +#: replication/logical/origin.c:444 +#, c-format +msgid "replication origin with OID %u does not exist" +msgstr "l'origine di replica con OID %u non esiste" + +#: replication/logical/origin.c:691 #, c-format msgid "replication checkpoint has wrong magic %u instead of %u" msgstr "il checkpoint di replica ha numero magico sbagliato %u invece di %u" -#: replication/logical/origin.c:699 +#: replication/logical/origin.c:723 #, c-format msgid "could not read file \"%s\": read %d of %zu" msgstr "lettura del file \"%s\" fallita: letti %d di %zu" -#: replication/logical/origin.c:708 +#: replication/logical/origin.c:732 #, c-format msgid "could not find free replication state, increase max_replication_slots" msgstr "nessuno stato di replica libero trovato, incrementa \"max_replication_slots\"" -#: replication/logical/origin.c:726 +#: replication/logical/origin.c:750 #, c-format msgid "replication slot checkpoint has wrong checksum %u, expected %u" msgstr "il checkpoint dello slot di replica ha il checksum sbagliato %u, atteso %u" -#: replication/logical/origin.c:850 +#: replication/logical/origin.c:874 #, c-format msgid "replication origin with OID %d is already active for PID %d" msgstr "l'origine di replica con OID %d è già attiva per il PID %d" -#: replication/logical/origin.c:861 replication/logical/origin.c:1041 +#: replication/logical/origin.c:885 replication/logical/origin.c:1072 #, c-format msgid "could not find free replication state slot for replication origin with OID %u" msgstr "nessuno slot di stato di replica trovato per l'origine di replica con OID %u" -#: replication/logical/origin.c:863 replication/logical/origin.c:1043 -#: replication/slot.c:1450 +#: replication/logical/origin.c:887 replication/logical/origin.c:1074 +#: replication/slot.c:1509 #, c-format msgid "Increase max_replication_slots and try again." msgstr "Incrementa max_replication_slots e prova di nuovo." -#: replication/logical/origin.c:1000 +#: replication/logical/origin.c:1031 #, c-format msgid "cannot setup replication origin when one is already setup" msgstr "non è possibile impostare l'origine di replica quando una è già impostata" -#: replication/logical/origin.c:1029 +#: replication/logical/origin.c:1060 #, c-format msgid "replication identifier %d is already active for PID %d" msgstr "l'identificativo di replica %d è già attivo per il PID %d" -#: replication/logical/origin.c:1075 replication/logical/origin.c:1270 -#: replication/logical/origin.c:1290 +#: replication/logical/origin.c:1111 replication/logical/origin.c:1309 +#: replication/logical/origin.c:1329 #, c-format msgid "no replication origin is configured" msgstr "nessuna origine di replica configurata" @@ -16580,27 +16575,27 @@ msgstr "nessuna origine di replica configurata" msgid "logical replication target relation \"%s.%s\" does not exist" msgstr "la relazione di destinazione di replica logica \"%s.%s\" non esiste" -#: replication/logical/relation.c:292 +#: replication/logical/relation.c:300 #, c-format msgid "logical replication target relation \"%s.%s\" is missing some replicated columns" msgstr "la relazione di destinazione di replica logica \"%s.%s\" ha alcune colonne replicate mancanti" -#: replication/logical/relation.c:332 +#: replication/logical/relation.c:340 #, c-format msgid "logical replication target relation \"%s.%s\" uses system columns in REPLICA IDENTITY index" msgstr "la relazione di destinazione di replica logica \"%s.%s\" usa colonne di sistema nell'indice REPLICA IDENTITY" -#: replication/logical/relation.c:448 +#: replication/logical/relation.c:456 #, c-format -msgid "builtin type %u not found" +msgid "built-in type %u not found" msgstr "tipo predefinito %u non trovato" -#: replication/logical/relation.c:449 +#: replication/logical/relation.c:457 #, c-format -msgid "This can be caused by having publisher with higher major version than subscriber" +msgid "This can be caused by having a publisher with a higher PostgreSQL major version than the subscriber." msgstr "Ciò può essere causato da un database di pubblicazione di versione maggiore di quello di sottoscrizione" -#: replication/logical/relation.c:481 +#: replication/logical/relation.c:488 #, c-format msgid "data type \"%s.%s\" required for logical replication does not exist" msgstr "il tipo di dato \"%s.%s\" richiesto per la replica logica non esiste" @@ -16701,237 +16696,247 @@ msgstr "La decodifica logica inizierà usando uno snapshot salvato." msgid "could not parse file name \"%s\"" msgstr "interpretazione del nome di file \"%s\" fallita" -#: replication/logical/tablesync.c:137 +#: replication/logical/tablesync.c:138 #, c-format -msgid "logical replication synchronization worker finished processing" -msgstr "il worked di sincronizzazione della replica logica ha terminato il lavoro" +msgid "logical replication table synchronization worker for subscription \"%s\", table \"%s\" has finished" +msgstr "worker di replica logica di sincronizzazione tabelle per la sottoscrizione \"%s\", la tabella \"%s\" è completata" -#: replication/logical/tablesync.c:632 +#: replication/logical/tablesync.c:685 #, c-format msgid "could not fetch table info for table \"%s.%s\" from publisher: %s" msgstr "errore nella lettura delle informazioni sulla tabella \"%s.%s\" per la pubblicazione: %s" -#: replication/logical/tablesync.c:638 +#: replication/logical/tablesync.c:691 #, c-format msgid "table \"%s.%s\" not found on publisher" msgstr "tabella \"%s.%s\" non trovata sul database di pubblicazione" -#: replication/logical/tablesync.c:668 +#: replication/logical/tablesync.c:721 #, c-format msgid "could not fetch table info for table \"%s.%s\": %s" msgstr "errore nella lettura delle informazioni sulla tabella \"%s.%s\": %s" -#: replication/logical/tablesync.c:738 +#: replication/logical/tablesync.c:791 #, c-format msgid "could not start initial contents copy for table \"%s.%s\": %s" msgstr "errore nell'avvio della copia iniziale dei contenuti per la tabella \"%s.%s\": %s" -#: replication/logical/tablesync.c:846 +#: replication/logical/tablesync.c:905 #, c-format msgid "table copy could not start transaction on publisher" msgstr "inizio della transazione non riuscito per la copia della tabella sul database di pubblicazione" -#: replication/logical/tablesync.c:866 +#: replication/logical/tablesync.c:927 #, c-format msgid "table copy could not finish transaction on publisher" msgstr "completamento della transazione non riuscito per la copia della tabella sul database di pubblicazione" -#: replication/logical/worker.c:283 +#: replication/logical/worker.c:291 #, c-format msgid "processing remote data for replication target relation \"%s.%s\" column \"%s\", remote type %s, local type %s" msgstr "processo dei dati remoti per la replica della tabella di destinazione \"%s.%s\" colonna \"%s\", tipo remoto %s, tipo locale %s" -#: replication/logical/worker.c:486 +#: replication/logical/worker.c:504 #, c-format msgid "ORIGIN message sent out of order" msgstr "messaggi ORIGIN inviati in ordine sbagliato" -#: replication/logical/worker.c:617 +#: replication/logical/worker.c:635 #, c-format -msgid "publisher does not send replica identity column expected by the logical replication target relation \"%s.%s\"" -msgstr "il database di pubblicazione non invia le colonne di identità di replica attese dalla relazione di destinazione di replica logica \"%s.%s\"" +msgid "publisher did not send replica identity column expected by the logical replication target relation \"%s.%s\"" +msgstr "il database di pubblicazione non ha inviato le colonne di identità di replica attese dalla relazione di destinazione di replica logica \"%s.%s\"" -#: replication/logical/worker.c:624 +#: replication/logical/worker.c:642 #, c-format msgid "logical replication target relation \"%s.%s\" has neither REPLICA IDENTITY index nor PRIMARY KEY and published relation does not have REPLICA IDENTITY FULL" msgstr "la relazione di destinazione di replica logica \"%s.%s\" non ha né un indice REPLICA IDENTITY né una PRIMARY KEY e la relazione pubblicata non ha REPLICA IDENTITY FULL" -#: replication/logical/worker.c:831 +#: replication/logical/worker.c:849 #, c-format -msgid "logical replication could not find row for delete in replication target %s" -msgstr "la replica logica non ha trovato la riga da cancellare nella destinazione %s" +msgid "logical replication could not find row for delete in replication target relation \"%s\"" +msgstr "la replica logica non ha trovato la riga da cancellare nella tabella di destinazione \"%s\"" -#: replication/logical/worker.c:898 +#: replication/logical/worker.c:916 #, c-format -msgid "invalid logical replication message type %c" -msgstr "messaggio di replica logica tipo %c non valido" +msgid "invalid logical replication message type \"%c\"" +msgstr "messaggio di replica logica tipo \"%c\" non valido" -#: replication/logical/worker.c:1036 +#: replication/logical/worker.c:1057 #, c-format msgid "data stream from publisher has ended" msgstr "stream di dati dal database di pubblicazione terminato" -#: replication/logical/worker.c:1181 +#: replication/logical/worker.c:1216 #, c-format msgid "terminating logical replication worker due to timeout" msgstr "arresto del worker di replica logica per timeout" -#: replication/logical/worker.c:1328 +#: replication/logical/worker.c:1364 #, c-format -msgid "logical replication worker for subscription \"%s\" will stop because the subscription was removed" -msgstr "il worker di replica logica per la sottoscrizione \"%s\" verrà terminato perché la sottoscrizione è stata rimossa" +msgid "logical replication apply worker for subscription \"%s\" will stop because the subscription was removed" +msgstr "il worker di replica logica per la sottoscrizione \"%s\" verrà fermato perché la sottoscrizione è stata rimossa" -#: replication/logical/worker.c:1343 +#: replication/logical/worker.c:1378 #, c-format -msgid "logical replication worker for subscription \"%s\" will stop because the subscription was disabled" -msgstr "il worker di replica logica per la sottoscrizione \"%s\" verrà terminato perché la sottoscrizione è stata disabilitata" +msgid "logical replication apply worker for subscription \"%s\" will stop because the subscription was disabled" +msgstr "il worker di replica logica per la sottoscrizione \"%s\" verrà fermato perché la sottoscrizione è stata disabilitata" -#: replication/logical/worker.c:1358 +#: replication/logical/worker.c:1392 #, c-format -msgid "logical replication worker for subscription \"%s\" will restart because the connection information was changed" -msgstr "il worker di replica logica per la sottoscrizione \"%s\" verrà riavviato perché le informazioni di connessione sono cambiate" +msgid "logical replication apply worker for subscription \"%s\" will restart because the connection information was changed" +msgstr "il worker di replica logica per la sottoscrizione \"%s\" verrà riavviato perché l'informazione di connessione è cambiata" -#: replication/logical/worker.c:1373 +#: replication/logical/worker.c:1406 #, c-format -msgid "logical replication worker for subscription \"%s\" will restart because subscription was renamed" +msgid "logical replication apply worker for subscription \"%s\" will restart because subscription was renamed" msgstr "il worker di replica logica per la sottoscrizione \"%s\" verrà riavviato perché la sottoscrizione è stata rinominata" -#: replication/logical/worker.c:1391 +#: replication/logical/worker.c:1423 #, c-format -msgid "logical replication worker for subscription \"%s\" will restart because the replication slot name was changed" +msgid "logical replication apply worker for subscription \"%s\" will restart because the replication slot name was changed" msgstr "il worker di replica logica per la sottoscrizione \"%s\" verrà riavviato perché il nome dello slot di replica è cambiato" -#: replication/logical/worker.c:1406 +#: replication/logical/worker.c:1437 #, c-format -msgid "logical replication worker for subscription \"%s\" will restart because subscription's publications were changed" -msgstr "il worker di replica logica per la sottoscrizione \"%s\" verrà riavviato perché le pubblicazioni della sottoscrizione sono cambiate" +msgid "logical replication apply worker for subscription \"%s\" will restart because subscription's publications were changed" +msgstr "il worker di replica logica per la sottoscrizione \"%s\" verrà riavviato le pubblicazioni della sottoscrizione sono cambiate" -#: replication/logical/worker.c:1506 +#: replication/logical/worker.c:1545 #, c-format -msgid "logical replication worker for subscription \"%s\" will not start because the subscription was disabled during startup" +msgid "logical replication apply worker for subscription \"%s\" will not start because the subscription was disabled during startup" msgstr "il worker di replica logica per la sottoscrizione \"%s\" non verrà avviato perché la sottoscrizione è stata disabilitata all'avvio" -#: replication/logical/worker.c:1562 +#: replication/logical/worker.c:1559 +#, c-format +msgid "logical replication table synchronization worker for subscription \"%s\", table \"%s\" has started" +msgstr "il worker di replica logica di sincronizzazione tabelle per la sottoscrizione \"%s\", la tabella \"%s\" è stata avviata" + +#: replication/logical/worker.c:1563 +#, c-format +msgid "logical replication apply worker for subscription \"%s\" has started" +msgstr "il worker di replica logica per la sottoscrizione \"%s\" è partito" + +#: replication/logical/worker.c:1603 #, c-format msgid "subscription has no replication slot set" msgstr "la sottoscrizione non ha uno slot di replica impostato" -#: replication/pgoutput/pgoutput.c:113 +#: replication/pgoutput/pgoutput.c:114 #, c-format msgid "invalid proto_version" msgstr "proto_version non valido" -#: replication/pgoutput/pgoutput.c:118 +#: replication/pgoutput/pgoutput.c:119 #, c-format -msgid "proto_verson \"%s\" out of range" +msgid "proto_version \"%s\" out of range" msgstr "proto_version \"%s\" fuori dall'intervallo consentito" -#: replication/pgoutput/pgoutput.c:135 +#: replication/pgoutput/pgoutput.c:136 #, c-format msgid "invalid publication_names syntax" msgstr "sintassi di publication_names non valida" -#: replication/pgoutput/pgoutput.c:179 +#: replication/pgoutput/pgoutput.c:180 #, c-format msgid "client sent proto_version=%d but we only support protocol %d or lower" msgstr "il cliente ha inviato proto_version=%d ma solo il protocollo %d o inferiore è supportato" -#: replication/pgoutput/pgoutput.c:185 +#: replication/pgoutput/pgoutput.c:186 #, c-format msgid "client sent proto_version=%d but we only support protocol %d or higher" msgstr "il cliente ha inviato proto_version=%d ma solo il protocollo %d o superiore è supportato" -#: replication/pgoutput/pgoutput.c:191 +#: replication/pgoutput/pgoutput.c:192 #, c-format msgid "publication_names parameter missing" msgstr "parametro publication_names mancante" -#: replication/slot.c:181 +#: replication/slot.c:182 #, c-format msgid "replication slot name \"%s\" is too short" msgstr "il nome dello slot di replica \"%s\" è troppo corto" -#: replication/slot.c:190 +#: replication/slot.c:191 #, c-format msgid "replication slot name \"%s\" is too long" msgstr "il nome dello slot di replica \"%s\" è troppo lungo" -#: replication/slot.c:203 +#: replication/slot.c:204 #, c-format msgid "replication slot name \"%s\" contains invalid character" msgstr "il nome dello slot di replica \"%s\" contiene caratteri non validi" -#: replication/slot.c:205 +#: replication/slot.c:206 #, c-format msgid "Replication slot names may only contain lower case letters, numbers, and the underscore character." msgstr "I nomi degli slot di replica possono contenere solo lettere minuscole, numeri e il carattere underscore." -#: replication/slot.c:252 +#: replication/slot.c:253 #, c-format msgid "replication slot \"%s\" already exists" msgstr "lo slot di replica \"%s\" esiste già" -#: replication/slot.c:262 +#: replication/slot.c:263 #, c-format msgid "all replication slots are in use" msgstr "tutti gli slot di replica sono in uso" -#: replication/slot.c:263 +#: replication/slot.c:264 #, c-format msgid "Free one or increase max_replication_slots." msgstr "Liberane uno o incrementa max_replication_slots." -#: replication/slot.c:359 +#: replication/slot.c:379 #, c-format msgid "replication slot \"%s\" does not exist" msgstr "lo slot di replica \"%s\" non esiste" -#: replication/slot.c:363 replication/slot.c:881 +#: replication/slot.c:390 replication/slot.c:940 #, c-format msgid "replication slot \"%s\" is active for PID %d" msgstr "lo slot di replica \"%s\" è attivo per il PID %d" -#: replication/slot.c:565 replication/slot.c:1062 replication/slot.c:1411 +#: replication/slot.c:624 replication/slot.c:1121 replication/slot.c:1470 #, c-format msgid "could not remove directory \"%s\"" msgstr "eliminazione della directory \"%s\" fallita" -#: replication/slot.c:911 +#: replication/slot.c:970 #, c-format msgid "replication slots can only be used if max_replication_slots > 0" msgstr "gli slot di replica possono essere usati solo se max_replication_slots > 0" -#: replication/slot.c:916 +#: replication/slot.c:975 #, c-format msgid "replication slots can only be used if wal_level >= replica" msgstr "gli slot di replica possono essere usati solo se wal_level >= replica" -#: replication/slot.c:1341 replication/slot.c:1381 +#: replication/slot.c:1400 replication/slot.c:1440 #, c-format msgid "could not read file \"%s\", read %d of %u: %m" msgstr "lettura del file \"%s\" fallita, letti %d su %u: %m" -#: replication/slot.c:1350 +#: replication/slot.c:1409 #, c-format msgid "replication slot file \"%s\" has wrong magic number: %u instead of %u" msgstr "il file dello slot di replica \"%s\" ha il numero magico sbagliato: %u invece di %u" -#: replication/slot.c:1357 +#: replication/slot.c:1416 #, c-format msgid "replication slot file \"%s\" has unsupported version %u" msgstr "il file dello slot di replica \"%s\" ha la versione non supportata %u" -#: replication/slot.c:1364 +#: replication/slot.c:1423 #, c-format msgid "replication slot file \"%s\" has corrupted length %u" msgstr "il file dello slot di replica \"%s\" ha la lunghezza corrotta %u" -#: replication/slot.c:1396 +#: replication/slot.c:1455 #, c-format msgid "checksum mismatch for replication slot file \"%s\": is %u, should be %u" msgstr "il checksum del file dello slot di replica \"%s\" non combacia: è %u, sarebbe dovuto essere %u" -#: replication/slot.c:1449 +#: replication/slot.c:1508 #, c-format msgid "too many replication slots active before shutdown" msgstr "troppi slot di replica attivi prima dell'arresto" @@ -16951,27 +16956,27 @@ msgstr "La transazione ha già effettuato il commit localmente, ma potrebbe non msgid "canceling wait for synchronous replication due to user request" msgstr "annullamento dell'attesa di replica sincrona su richiesta utente" -#: replication/syncrep.c:396 +#: replication/syncrep.c:399 #, c-format msgid "standby \"%s\" now has synchronous standby priority %u" msgstr "lo standby \"%s\" ha ora priorità di standby sincrono %u" -#: replication/syncrep.c:457 +#: replication/syncrep.c:460 #, c-format msgid "standby \"%s\" is now a synchronous standby with priority %u" msgstr "lo standby \"%s\" è ora uno standby sincrono con priorità %u" -#: replication/syncrep.c:461 +#: replication/syncrep.c:464 #, c-format msgid "standby \"%s\" is now a candidate for quorum synchronous standby" msgstr "lo standby \"%s\" è ora un candidato al quorum di standby sincroni" -#: replication/syncrep.c:1132 +#: replication/syncrep.c:1162 #, c-format msgid "synchronous_standby_names parser failed" msgstr "interpretazione di synchronous_standby_names non riuscita" -#: replication/syncrep.c:1138 +#: replication/syncrep.c:1168 #, c-format msgid "number of synchronous standbys (%d) must be greater than zero" msgstr "il numero di standby sincroni (%d) deve essere maggiore di zero" @@ -16981,194 +16986,194 @@ msgstr "il numero di standby sincroni (%d) deve essere maggiore di zero" msgid "terminating walreceiver process due to administrator command" msgstr "interruzione del processo walreceiver su comando dell'amministratore" -#: replication/walreceiver.c:301 +#: replication/walreceiver.c:306 #, c-format msgid "could not connect to the primary server: %s" msgstr "connessione al server primario fallita: %s" -#: replication/walreceiver.c:340 +#: replication/walreceiver.c:345 #, c-format msgid "database system identifier differs between the primary and standby" msgstr "l'identificativo del database è diverso tra il primario e lo standby" -#: replication/walreceiver.c:341 +#: replication/walreceiver.c:346 #, c-format msgid "The primary's identifier is %s, the standby's identifier is %s." msgstr "L'identificativo del primario è %s, quello dello standby è %s." -#: replication/walreceiver.c:352 +#: replication/walreceiver.c:357 #, c-format msgid "highest timeline %u of the primary is behind recovery timeline %u" msgstr "la timeline massima %u del primario è dietro la timeline di recupero %u" -#: replication/walreceiver.c:388 +#: replication/walreceiver.c:393 #, c-format msgid "started streaming WAL from primary at %X/%X on timeline %u" msgstr "streaming WAL avviato dal primario a %X/%X sulla timeline %u" -#: replication/walreceiver.c:393 +#: replication/walreceiver.c:398 #, c-format msgid "restarted WAL streaming at %X/%X on timeline %u" msgstr "streaming WAL riavviato sulla timeline %X/%X sulla timeline %u" -#: replication/walreceiver.c:422 +#: replication/walreceiver.c:427 #, c-format msgid "cannot continue WAL streaming, recovery has already ended" msgstr "non è possibile continuare lo streaming dei WAL, il recupero è già terminato" -#: replication/walreceiver.c:459 +#: replication/walreceiver.c:464 #, c-format msgid "replication terminated by primary server" msgstr "replica terminata dal server primario" -#: replication/walreceiver.c:460 +#: replication/walreceiver.c:465 #, c-format msgid "End of WAL reached on timeline %u at %X/%X." msgstr "Fine del WAL raggiunta sulla timeline %u a %X/%X." -#: replication/walreceiver.c:555 +#: replication/walreceiver.c:560 #, c-format msgid "terminating walreceiver due to timeout" msgstr "walreceiver terminato a causa di timeout" -#: replication/walreceiver.c:595 +#: replication/walreceiver.c:600 #, c-format msgid "primary server contains no more WAL on requested timeline %u" msgstr "il server primario non contiene più alcun WAL sulla timeline richiesta %u" -#: replication/walreceiver.c:610 replication/walreceiver.c:969 +#: replication/walreceiver.c:615 replication/walreceiver.c:974 #, c-format msgid "could not close log segment %s: %m" msgstr "chiusura del segmento di log %s fallita: %m" -#: replication/walreceiver.c:735 +#: replication/walreceiver.c:740 #, c-format msgid "fetching timeline history file for timeline %u from primary server" msgstr "recupero del file di storia della timeline %u dal server primario" -#: replication/walreceiver.c:1023 +#: replication/walreceiver.c:1028 #, c-format msgid "could not write to log segment %s at offset %u, length %lu: %m" msgstr "scrittura nel segmento di log %s in posizione %u, lunghezza %lu fallita: %m" -#: replication/walsender.c:492 +#: replication/walsender.c:490 #, c-format msgid "could not seek to beginning of file \"%s\": %m" msgstr "spostamento all'inizio del file \"%s\" fallito: %m" -#: replication/walsender.c:533 +#: replication/walsender.c:531 #, c-format msgid "IDENTIFY_SYSTEM has not been run before START_REPLICATION" msgstr "IDENTIFY_SYSTEM non eseguito prima di START_REPLICATION" -#: replication/walsender.c:550 +#: replication/walsender.c:548 #, c-format msgid "cannot use a logical replication slot for physical replication" msgstr "non si può usare una slot di replica logico per la replica fisica" -#: replication/walsender.c:613 +#: replication/walsender.c:611 #, c-format msgid "requested starting point %X/%X on timeline %u is not in this server's history" msgstr "il punto di avvio richiesto %X/%X sulla timeline %u non è nella storia di questo server" -#: replication/walsender.c:617 +#: replication/walsender.c:615 #, c-format msgid "This server's history forked from timeline %u at %X/%X." msgstr "La storia di questo server si è separata dalla timeline %u a %X/%X." -#: replication/walsender.c:662 +#: replication/walsender.c:660 #, c-format msgid "requested starting point %X/%X is ahead of the WAL flush position of this server %X/%X" msgstr "il punto di avvio richiesto %X/%X è più avanti della posizione di flush del WAL %X/%X di questo server" -#: replication/walsender.c:896 +#: replication/walsender.c:889 #, c-format msgid "CREATE_REPLICATION_SLOT ... EXPORT_SNAPSHOT must not be called inside a transaction" msgstr "CREATE_REPLICATION_SLOT ... EXPORT_SNAPSHOT non può essere eseguito in una transazione" -#: replication/walsender.c:905 +#: replication/walsender.c:898 #, c-format msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called inside a transaction" msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT deve essere eseguito in una transazione" -#: replication/walsender.c:910 +#: replication/walsender.c:903 #, c-format msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called in REPEATABLE READ isolation mode transaction" msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT deve essere eseguito in una transazione in modalità REPEATABLE READ" -#: replication/walsender.c:915 +#: replication/walsender.c:908 #, c-format msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called before any query" msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT deve essere eseguito prima di qualunque query" -#: replication/walsender.c:920 +#: replication/walsender.c:913 #, c-format msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must not be called in a subtransaction" msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT non può essere eseguito in una sottotransazione" -#: replication/walsender.c:1066 +#: replication/walsender.c:1059 #, c-format msgid "terminating walsender process after promotion" msgstr "interruzione del processo walsender dopo la promozione" -#: replication/walsender.c:1434 +#: replication/walsender.c:1437 #, c-format msgid "cannot execute new commands while WAL sender is in stopping mode" msgstr "non è possibile eseguire nuovi comandi mentre WAL sender è in modalità di arresto" -#: replication/walsender.c:1467 +#: replication/walsender.c:1470 #, c-format msgid "received replication command: %s" msgstr "ricevuto comando di replica: %s" -#: replication/walsender.c:1483 tcop/fastpath.c:281 tcop/postgres.c:1003 -#: tcop/postgres.c:1313 tcop/postgres.c:1572 tcop/postgres.c:1977 -#: tcop/postgres.c:2345 tcop/postgres.c:2420 +#: replication/walsender.c:1486 tcop/fastpath.c:281 tcop/postgres.c:997 +#: tcop/postgres.c:1307 tcop/postgres.c:1566 tcop/postgres.c:1971 +#: tcop/postgres.c:2339 tcop/postgres.c:2414 #, c-format msgid "current transaction is aborted, commands ignored until end of transaction block" msgstr "la transazione corrente è interrotta, i comandi saranno ignorati fino alla fine del blocco della transazione" -#: replication/walsender.c:1545 +#: replication/walsender.c:1548 #, c-format -msgid "not connected to database" -msgstr "non connesso ad un database" +msgid "cannot execute SQL commands in WAL sender for physical replication" +msgstr "non è possibile eseguire comandi SQL nel WAL sender di replica fisica" -#: replication/walsender.c:1585 replication/walsender.c:1601 +#: replication/walsender.c:1588 replication/walsender.c:1604 #, c-format msgid "unexpected EOF on standby connection" msgstr "fine del file inaspettato sulla connessione di standby" -#: replication/walsender.c:1615 +#: replication/walsender.c:1618 #, c-format msgid "unexpected standby message type \"%c\", after receiving CopyDone" msgstr "tipo di messaggio di standby \"%c\" imprevisto, dopo la ricezione di CopyDone" -#: replication/walsender.c:1653 +#: replication/walsender.c:1656 #, c-format msgid "invalid standby message type \"%c\"" msgstr "tipo di messaggio \"%c\" di standby non valido" -#: replication/walsender.c:1694 +#: replication/walsender.c:1697 #, c-format msgid "unexpected message type \"%c\"" msgstr "tipo di messaggio \"%c\" inatteso" -#: replication/walsender.c:2064 +#: replication/walsender.c:2067 #, c-format msgid "terminating walsender process due to replication timeout" msgstr "interruzione del processo walsender a causa di timeout di replica" -#: replication/walsender.c:2152 +#: replication/walsender.c:2156 #, c-format msgid "standby \"%s\" has now caught up with primary" msgstr "lo standby \"%s\" ha ora raggiunto il primario" -#: replication/walsender.c:2266 +#: replication/walsender.c:2263 #, c-format msgid "number of requested standby connections exceeds max_wal_senders (currently %d)" msgstr "il numero di richieste di connessioni di standby supera max_wal_senders (attualmente %d)" -#: rewrite/rewriteDefine.c:112 rewrite/rewriteDefine.c:975 +#: rewrite/rewriteDefine.c:112 rewrite/rewriteDefine.c:981 #, c-format msgid "rule \"%s\" for relation \"%s\" already exists" msgstr "la regola \"%s\" per la relazione \"%s\" esiste già" @@ -17235,136 +17240,141 @@ msgstr "la regola della vista \"%s\" deve essere chiamata \"%s\"" #: rewrite/rewriteDefine.c:428 #, c-format -msgid "could not convert partitioned table \"%s\" to a view" -msgstr "conversione della tabella partizionata \"%s\" in vista fallita" +msgid "cannot convert partitioned table \"%s\" to a view" +msgstr "non è possibile convertire la tabella partizionata \"%s\" in una vista" + +#: rewrite/rewriteDefine.c:434 +#, c-format +msgid "cannot convert partition \"%s\" to a view" +msgstr "non è possibile convertire la partizione \"%s\" in una vista" -#: rewrite/rewriteDefine.c:436 +#: rewrite/rewriteDefine.c:442 #, c-format msgid "could not convert table \"%s\" to a view because it is not empty" msgstr "conversione della tabella \"%s\" in vista fallita perché non è vuota" -#: rewrite/rewriteDefine.c:444 +#: rewrite/rewriteDefine.c:450 #, c-format msgid "could not convert table \"%s\" to a view because it has triggers" msgstr "conversione della tabella \"%s\" in vista fallita perché ha dei trigger" -#: rewrite/rewriteDefine.c:446 +#: rewrite/rewriteDefine.c:452 #, c-format msgid "In particular, the table cannot be involved in any foreign key relationships." msgstr "In particolare, la tabella non può prendere parte in alcuna relazione di chiave esterna." -#: rewrite/rewriteDefine.c:451 +#: rewrite/rewriteDefine.c:457 #, c-format msgid "could not convert table \"%s\" to a view because it has indexes" msgstr "conversione della tabella \"%s\" in vista fallita perché ha indici" -#: rewrite/rewriteDefine.c:457 +#: rewrite/rewriteDefine.c:463 #, c-format msgid "could not convert table \"%s\" to a view because it has child tables" msgstr "conversione della tabella \"%s\" in vista fallita perché ha tabelle figlie" -#: rewrite/rewriteDefine.c:463 +#: rewrite/rewriteDefine.c:469 #, c-format msgid "could not convert table \"%s\" to a view because it has row security enabled" msgstr "conversione della tabella \"%s\" in vista fallita perché ha la sicurezza delle righe abilitata" -#: rewrite/rewriteDefine.c:469 +#: rewrite/rewriteDefine.c:475 #, c-format msgid "could not convert table \"%s\" to a view because it has row security policies" msgstr "conversione della tabella \"%s\" in vista fallita perché ha regole di sicurezza per riga" -#: rewrite/rewriteDefine.c:496 +#: rewrite/rewriteDefine.c:502 #, c-format msgid "cannot have multiple RETURNING lists in a rule" msgstr "non è possibile avere più di una lista RETURNING in una regola" -#: rewrite/rewriteDefine.c:501 +#: rewrite/rewriteDefine.c:507 #, c-format msgid "RETURNING lists are not supported in conditional rules" msgstr "le liste RETURNING non sono supportate in regole condizionali" -#: rewrite/rewriteDefine.c:505 +#: rewrite/rewriteDefine.c:511 #, c-format msgid "RETURNING lists are not supported in non-INSTEAD rules" msgstr "le liste RETURNING non sono supportate in regole che non siano INSTEAD" -#: rewrite/rewriteDefine.c:670 +#: rewrite/rewriteDefine.c:676 #, c-format msgid "SELECT rule's target list has too many entries" msgstr "la lista di destinazione della regola SELECT ha troppi elementi" -#: rewrite/rewriteDefine.c:671 +#: rewrite/rewriteDefine.c:677 #, c-format msgid "RETURNING list has too many entries" msgstr "la lista RETURNING ha troppi elementi" -#: rewrite/rewriteDefine.c:698 +#: rewrite/rewriteDefine.c:704 #, c-format msgid "cannot convert relation containing dropped columns to view" msgstr "non è possibile convertire una relazione contenente colonne eliminate in una vista" -#: rewrite/rewriteDefine.c:699 +#: rewrite/rewriteDefine.c:705 #, c-format msgid "cannot create a RETURNING list for a relation containing dropped columns" msgstr "non è possibile creare una lista RETURNING per una relazione che contiene colonne eliminate" -#: rewrite/rewriteDefine.c:705 +#: rewrite/rewriteDefine.c:711 #, c-format msgid "SELECT rule's target entry %d has different column name from column \"%s\"" msgstr "elemento di destinazione %d della regola SELECT ha nome di colonna diverso dalla colonna \"%s\"" -#: rewrite/rewriteDefine.c:707 +#: rewrite/rewriteDefine.c:713 #, c-format msgid "SELECT target entry is named \"%s\"." msgstr "L'elemento di destinazione di SELECT si chiama \"%s\"." -#: rewrite/rewriteDefine.c:716 +#: rewrite/rewriteDefine.c:722 #, c-format msgid "SELECT rule's target entry %d has different type from column \"%s\"" msgstr "l'elemento %d di destinazione della regola SELECT è di tipo diverso dalla colonna \"%s\"" -#: rewrite/rewriteDefine.c:718 +#: rewrite/rewriteDefine.c:724 #, c-format msgid "RETURNING list's entry %d has different type from column \"%s\"" msgstr "l'elemento %d della lista RETURNING è di tipo diverso dalla colonna \"%s\"" -#: rewrite/rewriteDefine.c:721 rewrite/rewriteDefine.c:745 +#: rewrite/rewriteDefine.c:727 rewrite/rewriteDefine.c:751 #, c-format msgid "SELECT target entry has type %s, but column has type %s." msgstr "L'elemento di destinazione di SELECT è di tipo %s, ma la colonna è di tipo %s." -#: rewrite/rewriteDefine.c:724 rewrite/rewriteDefine.c:749 +#: rewrite/rewriteDefine.c:730 rewrite/rewriteDefine.c:755 #, c-format msgid "RETURNING list entry has type %s, but column has type %s." msgstr "la lista di elementi di RETURNING è di tipo %s, ma la colonna è di tipo %s." -#: rewrite/rewriteDefine.c:740 +#: rewrite/rewriteDefine.c:746 #, c-format msgid "SELECT rule's target entry %d has different size from column \"%s\"" msgstr "l'elemento %d di destinazione della regola SELECT ha dimensione diversa dalla colonna \"%s\"" -#: rewrite/rewriteDefine.c:742 +#: rewrite/rewriteDefine.c:748 #, c-format msgid "RETURNING list's entry %d has different size from column \"%s\"" msgstr "l'elemento %d della lista RETURNING ha dimensione diversa dalla colonna \"%s\"" -#: rewrite/rewriteDefine.c:759 +#: rewrite/rewriteDefine.c:765 #, c-format msgid "SELECT rule's target list has too few entries" msgstr "la lista di destinazione della regola SELECT ha troppi pochi elementi" -#: rewrite/rewriteDefine.c:760 +#: rewrite/rewriteDefine.c:766 #, c-format msgid "RETURNING list has too few entries" msgstr "la lista RETURNING ha troppi pochi elementi" -#: rewrite/rewriteDefine.c:852 rewrite/rewriteDefine.c:966 +#: rewrite/rewriteDefine.c:858 rewrite/rewriteDefine.c:972 #: rewrite/rewriteSupport.c:109 #, c-format msgid "rule \"%s\" for relation \"%s\" does not exist" msgstr "la regola \"%s\" per la relazione \"%s\" non esiste" -#: rewrite/rewriteDefine.c:985 +#: rewrite/rewriteDefine.c:991 #, c-format msgid "renaming an ON SELECT rule is not allowed" msgstr "non è consentire rinominare una regola ON SELECT" @@ -17399,152 +17409,152 @@ msgstr "Usa OVERRIDING SYSTEM VALUE per sovrascrivere." msgid "column \"%s\" can only be updated to DEFAULT" msgstr "la colonna \"%s\" può essere modificata solo a DEFAULT" -#: rewrite/rewriteHandler.c:984 rewrite/rewriteHandler.c:1002 +#: rewrite/rewriteHandler.c:1005 rewrite/rewriteHandler.c:1023 #, c-format msgid "multiple assignments to same column \"%s\"" msgstr "più di un assegnamento alla stessa colonna \"%s\"" -#: rewrite/rewriteHandler.c:1778 rewrite/rewriteHandler.c:3397 +#: rewrite/rewriteHandler.c:1809 rewrite/rewriteHandler.c:3431 #, c-format msgid "infinite recursion detected in rules for relation \"%s\"" msgstr "ricorsione infinita individuata nelle regole per la relazione \"%s\"" -#: rewrite/rewriteHandler.c:1863 +#: rewrite/rewriteHandler.c:1895 #, c-format msgid "infinite recursion detected in policy for relation \"%s\"" msgstr "ricorsione infinita individuata nella regola di sicurezza per la relazione \"%s\"" -#: rewrite/rewriteHandler.c:2180 +#: rewrite/rewriteHandler.c:2212 msgid "Junk view columns are not updatable." msgstr "Le colonne junk di una vista non sono aggiornabili." -#: rewrite/rewriteHandler.c:2185 +#: rewrite/rewriteHandler.c:2217 msgid "View columns that are not columns of their base relation are not updatable." msgstr "Le colonne di vista che non sono colonne della loro relazione di base non sono aggiornabili." -#: rewrite/rewriteHandler.c:2188 +#: rewrite/rewriteHandler.c:2220 msgid "View columns that refer to system columns are not updatable." msgstr "Le colonne di vista che si riferiscono a colonne di sistema non sono aggiornabili." -#: rewrite/rewriteHandler.c:2191 +#: rewrite/rewriteHandler.c:2223 msgid "View columns that return whole-row references are not updatable." msgstr "Le colonne di vista che restituiscono riferimenti a righe intere non sono aggiornabili." -#: rewrite/rewriteHandler.c:2249 +#: rewrite/rewriteHandler.c:2281 msgid "Views containing DISTINCT are not automatically updatable." msgstr "Le viste contenenti DISTINCT non sono aggiornabili automaticamente." -#: rewrite/rewriteHandler.c:2252 +#: rewrite/rewriteHandler.c:2284 msgid "Views containing GROUP BY are not automatically updatable." msgstr "Le viste contenenti GROUP BY non sono aggiornabili automaticamente." -#: rewrite/rewriteHandler.c:2255 +#: rewrite/rewriteHandler.c:2287 msgid "Views containing HAVING are not automatically updatable." msgstr "Le viste contenenti HAVING non sono aggiornabili automaticamente." -#: rewrite/rewriteHandler.c:2258 +#: rewrite/rewriteHandler.c:2290 msgid "Views containing UNION, INTERSECT, or EXCEPT are not automatically updatable." msgstr "Le viste che contengono UNION, INTERSECT o EXCEPT non sono automaticamente aggiornabili." -#: rewrite/rewriteHandler.c:2261 +#: rewrite/rewriteHandler.c:2293 msgid "Views containing WITH are not automatically updatable." msgstr "Le viste contenenti WITH non sono aggiornabili automaticamente." -#: rewrite/rewriteHandler.c:2264 +#: rewrite/rewriteHandler.c:2296 msgid "Views containing LIMIT or OFFSET are not automatically updatable." msgstr "Le viste contenenti LIMIT o OFFSET non sono aggiornabili automaticamente." -#: rewrite/rewriteHandler.c:2276 +#: rewrite/rewriteHandler.c:2308 msgid "Views that return aggregate functions are not automatically updatable." msgstr "Le viste che restituiscono funzioni di aggregazione non sono aggiornabili automaticamente." -#: rewrite/rewriteHandler.c:2279 +#: rewrite/rewriteHandler.c:2311 msgid "Views that return window functions are not automatically updatable." msgstr "Le viste che restituiscono funzioni finestra non sono aggiornabili automaticamente." -#: rewrite/rewriteHandler.c:2282 +#: rewrite/rewriteHandler.c:2314 msgid "Views that return set-returning functions are not automatically updatable." msgstr "Le viste che restituiscono funzioni insieme non sono aggiornabili automaticamente" -#: rewrite/rewriteHandler.c:2289 rewrite/rewriteHandler.c:2293 -#: rewrite/rewriteHandler.c:2301 +#: rewrite/rewriteHandler.c:2321 rewrite/rewriteHandler.c:2325 +#: rewrite/rewriteHandler.c:2333 msgid "Views that do not select from a single table or view are not automatically updatable." msgstr "Le viste che non leggono da una singola tabella o vista non sono aggiornabili automaticamente." -#: rewrite/rewriteHandler.c:2304 +#: rewrite/rewriteHandler.c:2336 msgid "Views containing TABLESAMPLE are not automatically updatable." msgstr "Le viste che contengono TABLESAMPLE non sono automaticamente aggiornabili." -#: rewrite/rewriteHandler.c:2328 +#: rewrite/rewriteHandler.c:2360 msgid "Views that have no updatable columns are not automatically updatable." msgstr "Le viste che non hanno colonne aggiornabili non sono automaticamente aggiornabili." -#: rewrite/rewriteHandler.c:2780 +#: rewrite/rewriteHandler.c:2814 #, c-format msgid "cannot insert into column \"%s\" of view \"%s\"" msgstr "non si può inserire nella colonna \"%s\" della vista \"%s\"" -#: rewrite/rewriteHandler.c:2788 +#: rewrite/rewriteHandler.c:2822 #, c-format msgid "cannot update column \"%s\" of view \"%s\"" msgstr "non si può modificare la colonna \"%s\" della vista \"%s\"" -#: rewrite/rewriteHandler.c:3191 +#: rewrite/rewriteHandler.c:3225 #, c-format msgid "DO INSTEAD NOTHING rules are not supported for data-modifying statements in WITH" msgstr "le regole DO INSTEAD NOTHING non sono supportate per istruzioni di modifica dei dati nel WITH" -#: rewrite/rewriteHandler.c:3205 +#: rewrite/rewriteHandler.c:3239 #, c-format msgid "conditional DO INSTEAD rules are not supported for data-modifying statements in WITH" msgstr "le regole DO INSTEAD NOTHING condizionali non sono supportate per istruzioni di modifica dei dati nel WITH" -#: rewrite/rewriteHandler.c:3209 +#: rewrite/rewriteHandler.c:3243 #, c-format msgid "DO ALSO rules are not supported for data-modifying statements in WITH" msgstr "le regole DO ALSO non sono supportate per istruzioni di modifica dei dati nel WITH" -#: rewrite/rewriteHandler.c:3214 +#: rewrite/rewriteHandler.c:3248 #, c-format msgid "multi-statement DO INSTEAD rules are not supported for data-modifying statements in WITH" msgstr "le regole DO INSTEAD multi-istruzione non sono supportate per istruzioni di modifica dei dati nel WITH" -#: rewrite/rewriteHandler.c:3434 +#: rewrite/rewriteHandler.c:3468 #, c-format msgid "cannot perform INSERT RETURNING on relation \"%s\"" msgstr "non è possibile eseguire INSERT RETURNING sulla relazione \"%s\"" -#: rewrite/rewriteHandler.c:3436 +#: rewrite/rewriteHandler.c:3470 #, c-format msgid "You need an unconditional ON INSERT DO INSTEAD rule with a RETURNING clause." msgstr "È necessaria una regola ON INSERT DO INSTEAD non condizionale con una clausola RETURNING." -#: rewrite/rewriteHandler.c:3441 +#: rewrite/rewriteHandler.c:3475 #, c-format msgid "cannot perform UPDATE RETURNING on relation \"%s\"" msgstr "non è possibile eseguire UPDATE RETURNING sulla relazione \"%s\"" -#: rewrite/rewriteHandler.c:3443 +#: rewrite/rewriteHandler.c:3477 #, c-format msgid "You need an unconditional ON UPDATE DO INSTEAD rule with a RETURNING clause." msgstr "È necessaria una regola ON UPDATE DO INSTEAD non condizionale con una clausola RETURNING." -#: rewrite/rewriteHandler.c:3448 +#: rewrite/rewriteHandler.c:3482 #, c-format msgid "cannot perform DELETE RETURNING on relation \"%s\"" msgstr "non è possibile eseguire DELETE RETURNING sulla relazione \"%s\"" -#: rewrite/rewriteHandler.c:3450 +#: rewrite/rewriteHandler.c:3484 #, c-format msgid "You need an unconditional ON DELETE DO INSTEAD rule with a RETURNING clause." msgstr "È necessaria una regola ON DELETE DO INSTEAD non condizionale con una clausola RETURNING." -#: rewrite/rewriteHandler.c:3468 +#: rewrite/rewriteHandler.c:3502 #, c-format msgid "INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules" msgstr "INSERT con clausola ON CONFLICT non può essere usato in tabelle con una regola su INSERT o UPDATE" -#: rewrite/rewriteHandler.c:3525 +#: rewrite/rewriteHandler.c:3559 #, c-format msgid "WITH cannot be used in a query that is rewritten by rules into multiple queries" msgstr "WITH non può essere usato in una query che viene riscritta da regole in più di una query" @@ -17559,7 +17569,7 @@ msgstr "i comandi di utilità condizionali non sono implementati" msgid "WHERE CURRENT OF on a view is not implemented" msgstr "WHERE CURRENT OF su una vista non è implementato" -#: rewrite/rewriteManip.c:1434 +#: rewrite/rewriteManip.c:1463 #, c-format msgid "NEW variables in ON UPDATE rules cannot reference columns that are part of a multiple assignment in the subject UPDATE command" msgstr "la variabile NEW nelle regole ON UPDATE non può riferirsi a colonne che fanno parte di un assegnamento multiplo nel comando UPDATE soggetto" @@ -17590,12 +17600,12 @@ msgstr "parametro Snowball sconosciuto: \"%s\"" msgid "missing Language parameter" msgstr "parametro Language mancante" -#: statistics/dependencies.c:542 +#: statistics/dependencies.c:534 #, c-format msgid "invalid zero-length item array in MVDependencies" msgstr "elemento array a lunghezza zero non valido in MVDependencies" -#: statistics/dependencies.c:673 statistics/dependencies.c:726 +#: statistics/dependencies.c:665 statistics/dependencies.c:718 #: statistics/mvdistinct.c:338 statistics/mvdistinct.c:391 #: utils/adt/pseudotypes.c:94 utils/adt/pseudotypes.c:122 #: utils/adt/pseudotypes.c:147 utils/adt/pseudotypes.c:171 @@ -17777,79 +17787,79 @@ msgstr "il segmento di controllo della memoria dinamica condivisa non è valido" msgid "too many dynamic shared memory segments" msgstr "troppi segmenti di memoria dinamica condivisa" -#: storage/ipc/dsm_impl.c:261 storage/ipc/dsm_impl.c:361 -#: storage/ipc/dsm_impl.c:533 storage/ipc/dsm_impl.c:648 -#: storage/ipc/dsm_impl.c:819 storage/ipc/dsm_impl.c:963 +#: storage/ipc/dsm_impl.c:262 storage/ipc/dsm_impl.c:363 +#: storage/ipc/dsm_impl.c:580 storage/ipc/dsm_impl.c:695 +#: storage/ipc/dsm_impl.c:866 storage/ipc/dsm_impl.c:1010 #, c-format msgid "could not unmap shared memory segment \"%s\": %m" msgstr "unmap del segmento di memoria condivisa \"%s\" fallito: %m" -#: storage/ipc/dsm_impl.c:271 storage/ipc/dsm_impl.c:543 -#: storage/ipc/dsm_impl.c:658 storage/ipc/dsm_impl.c:829 +#: storage/ipc/dsm_impl.c:272 storage/ipc/dsm_impl.c:590 +#: storage/ipc/dsm_impl.c:705 storage/ipc/dsm_impl.c:876 #, c-format msgid "could not remove shared memory segment \"%s\": %m" msgstr "rimozione del segmento di memoria condivisa \"%s\" fallito: %m" -#: storage/ipc/dsm_impl.c:292 storage/ipc/dsm_impl.c:729 -#: storage/ipc/dsm_impl.c:843 +#: storage/ipc/dsm_impl.c:293 storage/ipc/dsm_impl.c:776 +#: storage/ipc/dsm_impl.c:890 #, c-format msgid "could not open shared memory segment \"%s\": %m" msgstr "apertura del segmento di memoria condivisa \"%s\" fallito: %m" -#: storage/ipc/dsm_impl.c:316 storage/ipc/dsm_impl.c:559 -#: storage/ipc/dsm_impl.c:774 storage/ipc/dsm_impl.c:867 +#: storage/ipc/dsm_impl.c:317 storage/ipc/dsm_impl.c:606 +#: storage/ipc/dsm_impl.c:821 storage/ipc/dsm_impl.c:914 #, c-format msgid "could not stat shared memory segment \"%s\": %m" msgstr "lettura informazioni sul segmento di memoria condivisa \"%s\" fallito: %m" -#: storage/ipc/dsm_impl.c:335 storage/ipc/dsm_impl.c:886 -#: storage/ipc/dsm_impl.c:936 +#: storage/ipc/dsm_impl.c:337 storage/ipc/dsm_impl.c:933 +#: storage/ipc/dsm_impl.c:983 #, c-format msgid "could not resize shared memory segment \"%s\" to %zu bytes: %m" msgstr "ridimensionamento del segmento di memoria condivisa \"%s\" a %zu byte fallito: %m" -#: storage/ipc/dsm_impl.c:385 storage/ipc/dsm_impl.c:580 -#: storage/ipc/dsm_impl.c:750 storage/ipc/dsm_impl.c:987 +#: storage/ipc/dsm_impl.c:387 storage/ipc/dsm_impl.c:627 +#: storage/ipc/dsm_impl.c:797 storage/ipc/dsm_impl.c:1034 #, c-format msgid "could not map shared memory segment \"%s\": %m" msgstr "map del segmento di memoria condivisa \"%s\" fallito: %m" -#: storage/ipc/dsm_impl.c:515 +#: storage/ipc/dsm_impl.c:562 #, c-format msgid "could not get shared memory segment: %m" msgstr "impossibile ottenere un segmento di memoria condivisa: %m" -#: storage/ipc/dsm_impl.c:714 +#: storage/ipc/dsm_impl.c:761 #, c-format msgid "could not create shared memory segment \"%s\": %m" msgstr "creazione del segmento di memoria condivisa \"%s\" fallito: %m" -#: storage/ipc/dsm_impl.c:1029 storage/ipc/dsm_impl.c:1077 +#: storage/ipc/dsm_impl.c:1076 storage/ipc/dsm_impl.c:1124 #, c-format msgid "could not duplicate handle for \"%s\": %m" msgstr "duplicazione dell'handle per \"%s\" fallita: %m" -#: storage/ipc/latch.c:828 +#: storage/ipc/latch.c:829 #, c-format msgid "epoll_ctl() failed: %m" msgstr "epoll_ctl() fallita: %m" -#: storage/ipc/latch.c:1057 +#: storage/ipc/latch.c:1060 #, c-format msgid "epoll_wait() failed: %m" msgstr "epoll_wait() fallita: %m" -#: storage/ipc/latch.c:1179 +#: storage/ipc/latch.c:1182 #, c-format msgid "poll() failed: %m" msgstr "poll() fallito: %m" -#: storage/ipc/shm_toc.c:108 storage/ipc/shm_toc.c:189 storage/lmgr/lock.c:883 +#: storage/ipc/shm_toc.c:108 storage/ipc/shm_toc.c:190 storage/lmgr/lock.c:883 #: storage/lmgr/lock.c:917 storage/lmgr/lock.c:2679 storage/lmgr/lock.c:4004 #: storage/lmgr/lock.c:4069 storage/lmgr/lock.c:4361 -#: storage/lmgr/predicate.c:2382 storage/lmgr/predicate.c:2397 -#: storage/lmgr/predicate.c:3789 storage/lmgr/predicate.c:4932 -#: utils/hash/dynahash.c:1043 +#: storage/lmgr/predicate.c:2401 storage/lmgr/predicate.c:2416 +#: storage/lmgr/predicate.c:3808 storage/lmgr/predicate.c:4951 +#: utils/hash/dynahash.c:1061 #, c-format msgid "out of shared memory" msgstr "memoria condivisa esaurita" @@ -17879,12 +17889,12 @@ msgstr "memoria condivisa per la struttura di dati \"%s\" insufficiente (richies msgid "requested shared memory size overflows size_t" msgstr "la dimensione richiesta di memoria condivisa supera size_t" -#: storage/ipc/standby.c:531 tcop/postgres.c:2970 +#: storage/ipc/standby.c:531 tcop/postgres.c:2983 #, c-format msgid "canceling statement due to conflict with recovery" msgstr "annullamento dell'istruzione a causa di un conflitto con il ripristino" -#: storage/ipc/standby.c:532 tcop/postgres.c:2277 +#: storage/ipc/standby.c:532 tcop/postgres.c:2271 #, c-format msgid "User transaction caused buffer deadlock with recovery." msgstr "La transazione utente ha causato un deadlock del buffer con il ripristino." @@ -18045,82 +18055,82 @@ msgstr "Potrebbe essere necessario incrementare max_locks_per_transaction." msgid "cannot PREPARE while holding both session-level and transaction-level locks on the same object" msgstr "non è possibile eseguire PREPARE tenendo sia lock a livello di sessione che di transazione sullo stesso oggetto" -#: storage/lmgr/predicate.c:683 +#: storage/lmgr/predicate.c:686 #, c-format msgid "not enough elements in RWConflictPool to record a read/write conflict" msgstr "elementi non sufficienti in RWConflictPool per registrare un conflitto di lettura/scrittura" -#: storage/lmgr/predicate.c:684 storage/lmgr/predicate.c:712 +#: storage/lmgr/predicate.c:687 storage/lmgr/predicate.c:715 #, c-format msgid "You might need to run fewer transactions at a time or increase max_connections." msgstr "Potrebbe essere necessario eseguire meno transazioni per volta oppure incrementare max_connections." -#: storage/lmgr/predicate.c:711 +#: storage/lmgr/predicate.c:714 #, c-format msgid "not enough elements in RWConflictPool to record a potential read/write conflict" msgstr "elementi non sufficienti in RWConflictPool per registrare un potenziale conflitto di lettura/scrittura" -#: storage/lmgr/predicate.c:917 +#: storage/lmgr/predicate.c:921 #, c-format msgid "memory for serializable conflict tracking is nearly exhausted" msgstr "la memoria per il tracciamento dei conflitti di serializzazione è quasi esaurita" -#: storage/lmgr/predicate.c:918 +#: storage/lmgr/predicate.c:922 #, c-format msgid "There might be an idle transaction or a forgotten prepared transaction causing this." msgstr "Ciò potrebbe essere causato da una transazione inattiva o una transazione preparata dimenticata." -#: storage/lmgr/predicate.c:1545 +#: storage/lmgr/predicate.c:1561 #, c-format msgid "deferrable snapshot was unsafe; trying a new one" msgstr "lo snapshot deferibile era insicuro; ne sto provando uno nuovo" -#: storage/lmgr/predicate.c:1634 +#: storage/lmgr/predicate.c:1650 #, c-format msgid "\"default_transaction_isolation\" is set to \"serializable\"." msgstr "\"default_transaction_isolation\" è impostato a \"serializable\"." -#: storage/lmgr/predicate.c:1635 +#: storage/lmgr/predicate.c:1651 #, c-format msgid "You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default." msgstr "Puoi usare \"SET default_transaction_isolation = 'repeatable read'\" per cambiare il valore predefinito." -#: storage/lmgr/predicate.c:1674 +#: storage/lmgr/predicate.c:1691 #, c-format msgid "a snapshot-importing transaction must not be READ ONLY DEFERRABLE" msgstr "una transazione che importa uno snapshot non può essere READ ONLY DEFERRABLE" -#: storage/lmgr/predicate.c:1752 utils/time/snapmgr.c:617 -#: utils/time/snapmgr.c:623 +#: storage/lmgr/predicate.c:1771 utils/time/snapmgr.c:621 +#: utils/time/snapmgr.c:627 #, c-format msgid "could not import the requested snapshot" msgstr "non è stato possibile importare lo snapshot richiesto" -#: storage/lmgr/predicate.c:1753 utils/time/snapmgr.c:624 +#: storage/lmgr/predicate.c:1772 utils/time/snapmgr.c:628 #, c-format -msgid "The source transaction %u is not running anymore." -msgstr "La transazione di origine %u non è più in esecuzione." +msgid "The source process with PID %d is not running anymore." +msgstr "Il processo di origine con PID %d non è più in esecuzione." -#: storage/lmgr/predicate.c:2383 storage/lmgr/predicate.c:2398 -#: storage/lmgr/predicate.c:3790 +#: storage/lmgr/predicate.c:2402 storage/lmgr/predicate.c:2417 +#: storage/lmgr/predicate.c:3809 #, c-format msgid "You might need to increase max_pred_locks_per_transaction." msgstr "Potrebbe essere necessario incrementare max_pred_locks_per_transaction." -#: storage/lmgr/predicate.c:3944 storage/lmgr/predicate.c:4033 -#: storage/lmgr/predicate.c:4041 storage/lmgr/predicate.c:4080 -#: storage/lmgr/predicate.c:4319 storage/lmgr/predicate.c:4656 -#: storage/lmgr/predicate.c:4668 storage/lmgr/predicate.c:4710 -#: storage/lmgr/predicate.c:4748 +#: storage/lmgr/predicate.c:3963 storage/lmgr/predicate.c:4052 +#: storage/lmgr/predicate.c:4060 storage/lmgr/predicate.c:4099 +#: storage/lmgr/predicate.c:4338 storage/lmgr/predicate.c:4675 +#: storage/lmgr/predicate.c:4687 storage/lmgr/predicate.c:4729 +#: storage/lmgr/predicate.c:4767 #, c-format msgid "could not serialize access due to read/write dependencies among transactions" msgstr "serializzazione dell'accesso fallita a causa di dipendenze di lettura/scrittura tra le transazioni" -#: storage/lmgr/predicate.c:3946 storage/lmgr/predicate.c:4035 -#: storage/lmgr/predicate.c:4043 storage/lmgr/predicate.c:4082 -#: storage/lmgr/predicate.c:4321 storage/lmgr/predicate.c:4658 -#: storage/lmgr/predicate.c:4670 storage/lmgr/predicate.c:4712 -#: storage/lmgr/predicate.c:4750 +#: storage/lmgr/predicate.c:3965 storage/lmgr/predicate.c:4054 +#: storage/lmgr/predicate.c:4062 storage/lmgr/predicate.c:4101 +#: storage/lmgr/predicate.c:4340 storage/lmgr/predicate.c:4677 +#: storage/lmgr/predicate.c:4689 storage/lmgr/predicate.c:4731 +#: storage/lmgr/predicate.c:4769 #, c-format msgid "The transaction might succeed if retried." msgstr "La transazione potrebbe riuscire se ritentata." @@ -18187,12 +18197,17 @@ msgstr "puntatore di elemento corrotto: %u" msgid "corrupted item lengths: total %u, available space %u" msgstr "lunghezza dell'elemento corrotta: totale %u, spazio disponibile %u" -#: storage/page/bufpage.c:767 storage/page/bufpage.c:905 -#: storage/page/bufpage.c:993 storage/page/bufpage.c:1103 +#: storage/page/bufpage.c:767 storage/page/bufpage.c:993 +#: storage/page/bufpage.c:1103 #, c-format msgid "corrupted item pointer: offset = %u, size = %u" msgstr "puntatore di elemento corrotto: offset = %u, size = %u" +#: storage/page/bufpage.c:905 +#, c-format +msgid "corrupted item pointer: offset = %u, length = %u" +msgstr "puntatore di elemeno corrotto: offset = %u, lunghezza = %u" + #: storage/smgr/md.c:448 storage/smgr/md.c:974 #, c-format msgid "could not truncate file \"%s\": %m" @@ -18283,8 +18298,8 @@ msgstr "La dimensione dell'argomento %d non è valida nel messaggi di chiamata d msgid "fastpath function call: \"%s\" (OID %u)" msgstr "chiamata funzione fastpath: \"%s\" (OID %u)" -#: tcop/fastpath.c:391 tcop/postgres.c:1175 tcop/postgres.c:1438 -#: tcop/postgres.c:1818 tcop/postgres.c:2036 +#: tcop/fastpath.c:391 tcop/postgres.c:1169 tcop/postgres.c:1432 +#: tcop/postgres.c:1812 tcop/postgres.c:2030 #, c-format msgid "duration: %s ms" msgstr "durata: %s ms" @@ -18309,156 +18324,156 @@ msgstr "la chiamata alla funzione contiene %d formati di parametri ma %d paramet msgid "incorrect binary data format in function argument %d" msgstr "formato dei dati binari non corretto nell'argomento %d della funzione" -#: tcop/postgres.c:352 tcop/postgres.c:388 tcop/postgres.c:415 +#: tcop/postgres.c:346 tcop/postgres.c:382 tcop/postgres.c:409 #, c-format msgid "unexpected EOF on client connection" msgstr "fine file inaspettata nella connessione al client" -#: tcop/postgres.c:438 tcop/postgres.c:450 tcop/postgres.c:461 -#: tcop/postgres.c:473 tcop/postgres.c:4301 +#: tcop/postgres.c:432 tcop/postgres.c:444 tcop/postgres.c:455 +#: tcop/postgres.c:467 tcop/postgres.c:4314 #, c-format msgid "invalid frontend message type %d" msgstr "messaggio frontend di tipo %d non valido" -#: tcop/postgres.c:944 +#: tcop/postgres.c:938 #, c-format msgid "statement: %s" msgstr "istruzione: %s" -#: tcop/postgres.c:1180 +#: tcop/postgres.c:1174 #, c-format msgid "duration: %s ms statement: %s" msgstr "durata: %s ms istruzione: %s" -#: tcop/postgres.c:1230 +#: tcop/postgres.c:1224 #, c-format msgid "parse %s: %s" msgstr "analisi di %s: %s" -#: tcop/postgres.c:1286 +#: tcop/postgres.c:1280 #, c-format msgid "cannot insert multiple commands into a prepared statement" msgstr "non è possibile inserire comandi multipli in una istruzione preparata" -#: tcop/postgres.c:1443 +#: tcop/postgres.c:1437 #, c-format msgid "duration: %s ms parse %s: %s" msgstr "durata: %s ms analisi di %s: %s" -#: tcop/postgres.c:1488 +#: tcop/postgres.c:1482 #, c-format msgid "bind %s to %s" msgstr "bind di %s a %s" -#: tcop/postgres.c:1507 tcop/postgres.c:2326 +#: tcop/postgres.c:1501 tcop/postgres.c:2320 #, c-format msgid "unnamed prepared statement does not exist" msgstr "l'istruzione preparata senza nome non esiste" -#: tcop/postgres.c:1549 +#: tcop/postgres.c:1543 #, c-format msgid "bind message has %d parameter formats but %d parameters" msgstr "il messaggio di bind ha %d formati di parametri ma %d parametri" -#: tcop/postgres.c:1555 +#: tcop/postgres.c:1549 #, c-format msgid "bind message supplies %d parameters, but prepared statement \"%s\" requires %d" -msgstr "il messaggio di bind fornisce %d paramatri, ma l'istruzione preparata \"%s\" ne richiede %d" +msgstr "il messaggio di bind fornisce %d parametri, ma l'istruzione preparata \"%s\" ne richiede %d" -#: tcop/postgres.c:1725 +#: tcop/postgres.c:1719 #, c-format msgid "incorrect binary data format in bind parameter %d" msgstr "formato del dato binario errato nel parametro di bind %d" -#: tcop/postgres.c:1823 +#: tcop/postgres.c:1817 #, c-format msgid "duration: %s ms bind %s%s%s: %s" msgstr "durata: %s ms bind %s%s%s: %s" -#: tcop/postgres.c:1871 tcop/postgres.c:2406 +#: tcop/postgres.c:1865 tcop/postgres.c:2400 #, c-format msgid "portal \"%s\" does not exist" msgstr "il portale \"%s\" non esiste" -#: tcop/postgres.c:1956 +#: tcop/postgres.c:1950 #, c-format msgid "%s %s%s%s: %s" msgstr "%s %s%s%s: %s" -#: tcop/postgres.c:1958 tcop/postgres.c:2044 +#: tcop/postgres.c:1952 tcop/postgres.c:2038 msgid "execute fetch from" msgstr "esecuzione di fetch da" -#: tcop/postgres.c:1959 tcop/postgres.c:2045 +#: tcop/postgres.c:1953 tcop/postgres.c:2039 msgid "execute" msgstr "esecuzione di" -#: tcop/postgres.c:2041 +#: tcop/postgres.c:2035 #, c-format msgid "duration: %s ms %s %s%s%s: %s" msgstr "durata: %s ms %s %s%s%s: %s" -#: tcop/postgres.c:2167 +#: tcop/postgres.c:2161 #, c-format msgid "prepare: %s" msgstr "preparazione: %s" -#: tcop/postgres.c:2230 +#: tcop/postgres.c:2224 #, c-format msgid "parameters: %s" msgstr "parametri: %s" -#: tcop/postgres.c:2249 +#: tcop/postgres.c:2243 #, c-format msgid "abort reason: recovery conflict" msgstr "motivo dell'interruzione: conflitto di recupero" -#: tcop/postgres.c:2265 +#: tcop/postgres.c:2259 #, c-format msgid "User was holding shared buffer pin for too long." msgstr "L'utente stava trattenendo un pin di shared buffer troppo a lungo." -#: tcop/postgres.c:2268 +#: tcop/postgres.c:2262 #, c-format msgid "User was holding a relation lock for too long." msgstr "L'utente stava trattenendo un lock di relazione troppo a lungo." -#: tcop/postgres.c:2271 +#: tcop/postgres.c:2265 #, c-format msgid "User was or might have been using tablespace that must be dropped." msgstr "L'utente stava usando o potrebbe aver usato un tablespace che deve essere eliminato." -#: tcop/postgres.c:2274 +#: tcop/postgres.c:2268 #, c-format msgid "User query might have needed to see row versions that must be removed." msgstr "L'utente potrebbe aver avuto bisogno di vedere versioni di righe che devono essere rimosse." -#: tcop/postgres.c:2280 +#: tcop/postgres.c:2274 #, c-format msgid "User was connected to a database that must be dropped." msgstr "L'utente era connesso ad un database che deve essere eliminato." -#: tcop/postgres.c:2589 +#: tcop/postgres.c:2583 #, c-format msgid "terminating connection because of crash of another server process" msgstr "la connessione è stata terminata a causa del crash di un altro processo del server" -#: tcop/postgres.c:2590 +#: tcop/postgres.c:2584 #, c-format msgid "The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory." msgstr "Il postmaster ha obbligato questo processo del server di attuare il roll back della transazione corrente e di uscire, perché un altro processo del server è terminato anormalmente e con possibile corruzione della memoria condivisa." -#: tcop/postgres.c:2594 tcop/postgres.c:2898 +#: tcop/postgres.c:2588 tcop/postgres.c:2913 #, c-format msgid "In a moment you should be able to reconnect to the database and repeat your command." msgstr "In un momento sarai in grado di riconnetterti al database e di ripetere il comando." -#: tcop/postgres.c:2680 +#: tcop/postgres.c:2674 #, c-format msgid "floating-point exception" msgstr "eccezione floating-point" -#: tcop/postgres.c:2681 +#: tcop/postgres.c:2675 #, c-format msgid "An invalid floating-point operation was signaled. This probably means an out-of-range result or an invalid operation, such as division by zero." msgstr "Un'operazione in floating-point non valida è stata segnalata. Questo probabilmente sta a significare che il risultato è un valore fuori limite o l'operazione non è valida, ad esempio una divisione per zero." @@ -18473,122 +18488,132 @@ msgstr "annullamento dell'autenticazione a causa di timeout" msgid "terminating autovacuum process due to administrator command" msgstr "interruzione del processo autovacuum su comando dell'amministratore" -#: tcop/postgres.c:2853 tcop/postgres.c:2863 tcop/postgres.c:2896 +#: tcop/postgres.c:2851 +#, c-format +msgid "terminating logical replication worker due to administrator command" +msgstr "interruzione del worker di replica logica su comando dell'amministratore" + +#: tcop/postgres.c:2855 +#, c-format +msgid "logical replication launcher shutting down" +msgstr "lanciatore di replica logica in arresto" + +#: tcop/postgres.c:2868 tcop/postgres.c:2878 tcop/postgres.c:2911 #, c-format msgid "terminating connection due to conflict with recovery" msgstr "interruzione della connessione a causa di conflitto con il ripristino" -#: tcop/postgres.c:2869 +#: tcop/postgres.c:2884 #, c-format msgid "terminating connection due to administrator command" msgstr "interruzione della connessione su comando dell'amministratore" -#: tcop/postgres.c:2879 +#: tcop/postgres.c:2894 #, c-format msgid "connection to client lost" msgstr "connessione al client persa" -#: tcop/postgres.c:2947 +#: tcop/postgres.c:2960 #, c-format msgid "canceling statement due to lock timeout" msgstr "annullamento dell'istruzione a causa di timeout di lock" -#: tcop/postgres.c:2954 +#: tcop/postgres.c:2967 #, c-format msgid "canceling statement due to statement timeout" msgstr "annullamento dell'istruzione a causa di timeout" -#: tcop/postgres.c:2961 +#: tcop/postgres.c:2974 #, c-format msgid "canceling autovacuum task" msgstr "annullamento del task di autovacuum" -#: tcop/postgres.c:2984 +#: tcop/postgres.c:2997 #, c-format msgid "canceling statement due to user request" msgstr "annullamento dell'istruzione su richiesta dell'utente" -#: tcop/postgres.c:2994 +#: tcop/postgres.c:3007 #, c-format msgid "terminating connection due to idle-in-transaction timeout" msgstr "la connessione è stata terminata a causa di timeout di inattività durante una transazione" -#: tcop/postgres.c:3108 +#: tcop/postgres.c:3121 #, c-format msgid "stack depth limit exceeded" msgstr "limite di profondità dello stack superato" -#: tcop/postgres.c:3109 +#: tcop/postgres.c:3122 #, c-format msgid "Increase the configuration parameter \"max_stack_depth\" (currently %dkB), after ensuring the platform's stack depth limit is adequate." msgstr "Incrementa il parametro di configurazione \"max_stack_depth\" (attualmente %dkB), dopo esserti assicurato che il limite dello stack della piattaforma sia adeguato." -#: tcop/postgres.c:3172 +#: tcop/postgres.c:3185 #, c-format msgid "\"max_stack_depth\" must not exceed %ldkB." msgstr "\"max_stack_depth\" non deve superare %ldkB" -#: tcop/postgres.c:3174 +#: tcop/postgres.c:3187 #, c-format msgid "Increase the platform's stack depth limit via \"ulimit -s\" or local equivalent." msgstr "Incrementa il limite dello stack della piattaforma usando \"ulimit -s\" on un comando equivalente." -#: tcop/postgres.c:3534 +#: tcop/postgres.c:3547 #, c-format msgid "invalid command-line argument for server process: %s" msgstr "argomento della riga di comando non valido per il processo server: %s" -#: tcop/postgres.c:3535 tcop/postgres.c:3541 +#: tcop/postgres.c:3548 tcop/postgres.c:3554 #, c-format msgid "Try \"%s --help\" for more information." msgstr "Prova \"%s --help\" per maggiori informazioni." -#: tcop/postgres.c:3539 +#: tcop/postgres.c:3552 #, c-format msgid "%s: invalid command-line argument: %s" msgstr "%s: argomento della riga di comando non valido: %s" -#: tcop/postgres.c:3601 +#: tcop/postgres.c:3614 #, c-format msgid "%s: no database nor user name specified" msgstr "%s: nessun database né nome utente specificato" -#: tcop/postgres.c:4209 +#: tcop/postgres.c:4222 #, c-format msgid "invalid CLOSE message subtype %d" msgstr "sottotipo %d del messaggio CLOSE non valido" -#: tcop/postgres.c:4244 +#: tcop/postgres.c:4257 #, c-format msgid "invalid DESCRIBE message subtype %d" msgstr "sottotipo %d del messaggio DESCRIBE non valido" -#: tcop/postgres.c:4322 +#: tcop/postgres.c:4335 #, c-format msgid "fastpath function calls not supported in a replication connection" msgstr "le chiamate di funzione fastpath non sono supportate in una connessione di replica" -#: tcop/postgres.c:4326 +#: tcop/postgres.c:4339 #, c-format msgid "extended query protocol not supported in a replication connection" msgstr "il protocollo di query esteso non è supportato in una connessione di replica" -#: tcop/postgres.c:4496 +#: tcop/postgres.c:4509 #, c-format msgid "disconnection: session time: %d:%02d:%02d.%03d user=%s database=%s host=%s%s%s" msgstr "disconnessione: tempo della sessione: %d:%02d:%02d.%03d utente=%s database=%s host=%s%s%s" -#: tcop/pquery.c:646 +#: tcop/pquery.c:645 #, c-format msgid "bind message has %d result formats but query has %d columns" msgstr "il messaggio di bind ha %d formati di risultato ma la query ha %d colonne" -#: tcop/pquery.c:953 +#: tcop/pquery.c:952 #, c-format msgid "cursor can only scan forward" msgstr "il cursore effettuare solo scansioni in avanti" -#: tcop/pquery.c:954 +#: tcop/pquery.c:953 #, c-format msgid "Declare it with SCROLL option to enable backward scan." msgstr "Dichiaralo con l'opzione SCROLL per abilitare le scansioni all'indietro." @@ -18774,7 +18799,7 @@ msgid "invalid regular expression: %s" msgstr "espressione regolare non valida: %s" #: tsearch/spell.c:954 tsearch/spell.c:971 tsearch/spell.c:988 -#: tsearch/spell.c:1005 tsearch/spell.c:1070 gram.y:15311 gram.y:15328 +#: tsearch/spell.c:1005 tsearch/spell.c:1070 gram.y:15291 gram.y:15308 #, c-format msgid "syntax error" msgstr "errore di sintassi" @@ -18804,7 +18829,8 @@ msgstr "numero di alias nel vettore di flag non valido" msgid "affix file contains both old-style and new-style commands" msgstr "il file affix contiene comandi sia vecchio stile che nuovo stile" -#: tsearch/to_tsany.c:179 utils/adt/tsvector.c:271 utils/adt/tsvector_op.c:1134 +#: tsearch/to_tsany.c:185 utils/adt/tsvector.c:271 +#: utils/adt/tsvector_op.c:1134 #, c-format msgid "string is too long for tsvector (%d bytes, max %d bytes)" msgstr "la stringa è troppo lunga per tsvector (%d byte, massimo %d byte)" @@ -18941,59 +18967,59 @@ msgstr "gli array di ACL non possono contenere valori nulli" msgid "extra garbage at the end of the ACL specification" msgstr "ci sono caratteri spuri al termine della specifica dell'ACL" -#: utils/adt/acl.c:1196 +#: utils/adt/acl.c:1198 #, c-format msgid "grant options cannot be granted back to your own grantor" msgstr "le opzioni di concessione non possono essere concesse a chi le ha concesse a te" -#: utils/adt/acl.c:1257 +#: utils/adt/acl.c:1259 #, c-format msgid "dependent privileges exist" msgstr "esistono privilegi dipendenti" -#: utils/adt/acl.c:1258 +#: utils/adt/acl.c:1260 #, c-format msgid "Use CASCADE to revoke them too." msgstr "Usa CASCADE per revocare anch'essi." -#: utils/adt/acl.c:1520 +#: utils/adt/acl.c:1522 #, c-format msgid "aclinsert is no longer supported" msgstr "aclinsert non è più supportato" -#: utils/adt/acl.c:1530 +#: utils/adt/acl.c:1532 #, c-format msgid "aclremove is no longer supported" msgstr "aclremove non è più supportato" -#: utils/adt/acl.c:1616 utils/adt/acl.c:1670 +#: utils/adt/acl.c:1618 utils/adt/acl.c:1672 #, c-format msgid "unrecognized privilege type: \"%s\"" msgstr "tipo di privilegio sconosciuto: \"%s\"" -#: utils/adt/acl.c:3410 utils/adt/regproc.c:101 utils/adt/regproc.c:276 +#: utils/adt/acl.c:3412 utils/adt/regproc.c:102 utils/adt/regproc.c:277 #, c-format msgid "function \"%s\" does not exist" msgstr "la funzione \"%s\" non esiste" -#: utils/adt/acl.c:4864 +#: utils/adt/acl.c:4866 #, c-format msgid "must be member of role \"%s\"" msgstr "occorre far parte del ruolo \"%s\"" #: utils/adt/array_expanded.c:274 utils/adt/arrayfuncs.c:931 #: utils/adt/arrayfuncs.c:1519 utils/adt/arrayfuncs.c:3251 -#: utils/adt/arrayfuncs.c:3389 utils/adt/arrayfuncs.c:5848 -#: utils/adt/arrayfuncs.c:6159 utils/adt/arrayutils.c:93 +#: utils/adt/arrayfuncs.c:3389 utils/adt/arrayfuncs.c:5846 +#: utils/adt/arrayfuncs.c:6157 utils/adt/arrayutils.c:93 #: utils/adt/arrayutils.c:102 utils/adt/arrayutils.c:109 #, c-format msgid "array size exceeds the maximum allowed (%d)" msgstr "la dimensione dell'array supera il massimo consentito (%d)" #: utils/adt/array_userfuncs.c:79 utils/adt/array_userfuncs.c:471 -#: utils/adt/array_userfuncs.c:551 utils/adt/json.c:1764 utils/adt/json.c:1859 -#: utils/adt/json.c:1897 utils/adt/jsonb.c:1127 utils/adt/jsonb.c:1156 -#: utils/adt/jsonb.c:1592 utils/adt/jsonb.c:1756 utils/adt/jsonb.c:1766 +#: utils/adt/array_userfuncs.c:551 utils/adt/json.c:1765 utils/adt/json.c:1860 +#: utils/adt/json.c:1898 utils/adt/jsonb.c:1128 utils/adt/jsonb.c:1157 +#: utils/adt/jsonb.c:1549 utils/adt/jsonb.c:1713 utils/adt/jsonb.c:1723 #, c-format msgid "could not determine input data type" msgstr "non è stato possibile determinare il tipo di dato di input" @@ -19012,7 +19038,7 @@ msgstr "il tipo di dati in input non è un array" #: utils/adt/int.c:1001 utils/adt/int.c:1028 utils/adt/int.c:1061 #: utils/adt/int.c:1144 utils/adt/int8.c:1298 utils/adt/numeric.c:2953 #: utils/adt/numeric.c:2962 utils/adt/varbit.c:1173 utils/adt/varbit.c:1575 -#: utils/adt/varlena.c:1054 utils/adt/varlena.c:2940 +#: utils/adt/varlena.c:1054 utils/adt/varlena.c:2957 #, c-format msgid "integer out of range" msgstr "intero fuori dall'intervallo" @@ -19134,7 +19160,7 @@ msgstr "Elemento dell'array inatteso." msgid "Unmatched \"%c\" character." msgstr "Il carattere \"%c\" non combacia." -#: utils/adt/arrayfuncs.c:597 utils/adt/jsonfuncs.c:2380 +#: utils/adt/arrayfuncs.c:597 utils/adt/jsonfuncs.c:2381 #, c-format msgid "Multidimensional arrays must have sub-arrays with matching dimensions." msgstr "Gli array multidimensionali devono avere sotto-array con dimensioni corrispondenti." @@ -19145,7 +19171,7 @@ msgid "Junk after closing right brace." msgstr "Caratteri spuri dopo la parentesi chiusa." #: utils/adt/arrayfuncs.c:1284 utils/adt/arrayfuncs.c:3357 -#: utils/adt/arrayfuncs.c:5754 +#: utils/adt/arrayfuncs.c:5752 #, c-format msgid "invalid number of dimensions: %d" msgstr "numero di dimensioni non valido: %d" @@ -19184,11 +19210,12 @@ msgstr "le sezioni di array a lunghezza fissa non sono implementate" #: utils/adt/arrayfuncs.c:2230 utils/adt/arrayfuncs.c:2252 #: utils/adt/arrayfuncs.c:2301 utils/adt/arrayfuncs.c:2537 -#: utils/adt/arrayfuncs.c:2848 utils/adt/arrayfuncs.c:5740 -#: utils/adt/arrayfuncs.c:5766 utils/adt/arrayfuncs.c:5777 -#: utils/adt/json.c:2295 utils/adt/json.c:2370 utils/adt/jsonb.c:1370 -#: utils/adt/jsonb.c:1456 utils/adt/jsonfuncs.c:4125 utils/adt/jsonfuncs.c:4276 -#: utils/adt/jsonfuncs.c:4321 utils/adt/jsonfuncs.c:4368 +#: utils/adt/arrayfuncs.c:2848 utils/adt/arrayfuncs.c:5738 +#: utils/adt/arrayfuncs.c:5764 utils/adt/arrayfuncs.c:5775 +#: utils/adt/json.c:2259 utils/adt/json.c:2334 utils/adt/jsonb.c:1327 +#: utils/adt/jsonb.c:1413 utils/adt/jsonfuncs.c:4158 +#: utils/adt/jsonfuncs.c:4309 utils/adt/jsonfuncs.c:4354 +#: utils/adt/jsonfuncs.c:4401 #, c-format msgid "wrong number of array subscripts" msgstr "il numero di indici di array è errato" @@ -19240,57 +19267,57 @@ msgstr "non è possibile confrontare array con elementi di tipo diverso" msgid "could not identify a hash function for type %s" msgstr "non è stato possibile trovare una funzione di hash per il tipo %s" -#: utils/adt/arrayfuncs.c:5154 +#: utils/adt/arrayfuncs.c:5152 #, c-format msgid "data type %s is not an array type" msgstr "il tipo di dati %s non è un tipo array" -#: utils/adt/arrayfuncs.c:5209 +#: utils/adt/arrayfuncs.c:5207 #, c-format msgid "cannot accumulate null arrays" msgstr "non è possibile accumulare array nulli" -#: utils/adt/arrayfuncs.c:5237 +#: utils/adt/arrayfuncs.c:5235 #, c-format msgid "cannot accumulate empty arrays" msgstr "non è possibile accumulare array vuoti" -#: utils/adt/arrayfuncs.c:5266 utils/adt/arrayfuncs.c:5272 +#: utils/adt/arrayfuncs.c:5264 utils/adt/arrayfuncs.c:5270 #, c-format msgid "cannot accumulate arrays of different dimensionality" msgstr "non è possibile accumulare array di dimensioni diverse" -#: utils/adt/arrayfuncs.c:5638 utils/adt/arrayfuncs.c:5678 +#: utils/adt/arrayfuncs.c:5636 utils/adt/arrayfuncs.c:5676 #, c-format msgid "dimension array or low bound array cannot be null" msgstr "la dimensione dell'array o il suo limite inferiore non possono essere nulli" -#: utils/adt/arrayfuncs.c:5741 utils/adt/arrayfuncs.c:5767 +#: utils/adt/arrayfuncs.c:5739 utils/adt/arrayfuncs.c:5765 #, c-format msgid "Dimension array must be one dimensional." msgstr "L'array delle dimensioni deve avere una sola dimensione." -#: utils/adt/arrayfuncs.c:5746 utils/adt/arrayfuncs.c:5772 +#: utils/adt/arrayfuncs.c:5744 utils/adt/arrayfuncs.c:5770 #, c-format msgid "dimension values cannot be null" msgstr "i valori di dimensione non possono essere nulli" -#: utils/adt/arrayfuncs.c:5778 +#: utils/adt/arrayfuncs.c:5776 #, c-format msgid "Low bound array has different size than dimensions array." msgstr "L'array dei valori inferiori ha dimensione differente dal numero di dimensioni dell'array." -#: utils/adt/arrayfuncs.c:6024 +#: utils/adt/arrayfuncs.c:6022 #, c-format msgid "removing elements from multidimensional arrays is not supported" msgstr "la rimozione di elementi da array multidimensionali non è supportata" -#: utils/adt/arrayfuncs.c:6301 +#: utils/adt/arrayfuncs.c:6299 #, c-format msgid "thresholds must be one-dimensional array" msgstr "la soglia dev'essere un array monodimensionale" -#: utils/adt/arrayfuncs.c:6306 +#: utils/adt/arrayfuncs.c:6304 #, c-format msgid "thresholds array must not contain NULLs" msgstr "l'array delle soglie non può contenere NULL" @@ -19327,8 +19354,8 @@ msgstr "la conversione di codifica da %s a ASCII non è supportata" #: utils/adt/mac8.c:93 utils/adt/mac8.c:166 utils/adt/mac8.c:184 #: utils/adt/mac8.c:202 utils/adt/mac8.c:221 utils/adt/nabstime.c:1539 #: utils/adt/network.c:58 utils/adt/numeric.c:593 utils/adt/numeric.c:620 -#: utils/adt/numeric.c:5488 utils/adt/numeric.c:5512 utils/adt/numeric.c:5536 -#: utils/adt/numeric.c:6338 utils/adt/numeric.c:6364 utils/adt/oid.c:44 +#: utils/adt/numeric.c:5498 utils/adt/numeric.c:5522 utils/adt/numeric.c:5546 +#: utils/adt/numeric.c:6348 utils/adt/numeric.c:6374 utils/adt/oid.c:44 #: utils/adt/oid.c:58 utils/adt/oid.c:64 utils/adt/oid.c:86 #: utils/adt/pg_lsn.c:44 utils/adt/pg_lsn.c:50 utils/adt/tid.c:72 #: utils/adt/tid.c:80 utils/adt/tid.c:88 utils/adt/txid.c:405 @@ -19351,8 +19378,9 @@ msgstr "valore \"%s\" fuori dall'intervallo consentito per il tipo %s" #: utils/adt/int.c:846 utils/adt/int.c:954 utils/adt/int.c:1043 #: utils/adt/int.c:1082 utils/adt/int.c:1110 utils/adt/int8.c:597 #: utils/adt/int8.c:657 utils/adt/int8.c:897 utils/adt/int8.c:1005 -#: utils/adt/int8.c:1094 utils/adt/int8.c:1202 utils/adt/numeric.c:6902 -#: utils/adt/numeric.c:7191 utils/adt/numeric.c:8204 utils/adt/timestamp.c:3216 +#: utils/adt/int8.c:1094 utils/adt/int8.c:1202 utils/adt/numeric.c:6912 +#: utils/adt/numeric.c:7201 utils/adt/numeric.c:8213 +#: utils/adt/timestamp.c:3216 #, c-format msgid "division by zero" msgstr "divisione per zero" @@ -19383,8 +19411,8 @@ msgstr "la precisione di TIME(%d)%s è stata ridotta al massimo consentito (%d)" msgid "date/time value \"current\" is no longer supported" msgstr "il valore \"current\" per i tipi date/time non è più supportato" -#: utils/adt/date.c:172 utils/adt/date.c:180 utils/adt/formatting.c:3582 -#: utils/adt/formatting.c:3591 +#: utils/adt/date.c:172 utils/adt/date.c:180 utils/adt/formatting.c:3585 +#: utils/adt/formatting.c:3594 #, c-format msgid "date out of range: \"%s\"" msgstr "data fuori dall'intervallo consentito: \"%s\"" @@ -19408,10 +19436,10 @@ msgstr "data fuori dall'intervallo consentito: %d-%02d-%02d" #: utils/adt/date.c:327 utils/adt/date.c:350 utils/adt/date.c:376 #: utils/adt/date.c:1092 utils/adt/date.c:1138 utils/adt/date.c:1672 #: utils/adt/date.c:1703 utils/adt/date.c:1732 utils/adt/date.c:2469 -#: utils/adt/datetime.c:1690 utils/adt/formatting.c:3457 -#: utils/adt/formatting.c:3489 utils/adt/formatting.c:3557 -#: utils/adt/json.c:1539 utils/adt/json.c:1561 utils/adt/jsonb.c:824 -#: utils/adt/jsonb.c:848 utils/adt/nabstime.c:456 utils/adt/nabstime.c:499 +#: utils/adt/datetime.c:1690 utils/adt/formatting.c:3460 +#: utils/adt/formatting.c:3492 utils/adt/formatting.c:3560 +#: utils/adt/json.c:1540 utils/adt/json.c:1562 utils/adt/jsonb.c:825 +#: utils/adt/jsonb.c:849 utils/adt/nabstime.c:456 utils/adt/nabstime.c:499 #: utils/adt/nabstime.c:529 utils/adt/nabstime.c:572 utils/adt/timestamp.c:230 #: utils/adt/timestamp.c:262 utils/adt/timestamp.c:692 #: utils/adt/timestamp.c:701 utils/adt/timestamp.c:779 @@ -19587,47 +19615,37 @@ msgstr "fine sequenza base64 non valida" msgid "Input data is missing padding, is truncated, or is otherwise corrupted." msgstr "I dati di input mancano di padding, sono troncati o comunque corrotti." -#: utils/adt/encode.c:442 utils/adt/encode.c:507 utils/adt/json.c:785 -#: utils/adt/json.c:825 utils/adt/json.c:841 utils/adt/json.c:853 -#: utils/adt/json.c:863 utils/adt/json.c:914 utils/adt/json.c:946 -#: utils/adt/json.c:965 utils/adt/json.c:977 utils/adt/json.c:989 -#: utils/adt/json.c:1134 utils/adt/json.c:1148 utils/adt/json.c:1159 -#: utils/adt/json.c:1167 utils/adt/json.c:1175 utils/adt/json.c:1183 -#: utils/adt/json.c:1191 utils/adt/json.c:1199 utils/adt/json.c:1207 -#: utils/adt/json.c:1215 utils/adt/json.c:1245 utils/adt/varlena.c:296 +#: utils/adt/encode.c:442 utils/adt/encode.c:507 utils/adt/json.c:786 +#: utils/adt/json.c:826 utils/adt/json.c:842 utils/adt/json.c:854 +#: utils/adt/json.c:864 utils/adt/json.c:915 utils/adt/json.c:947 +#: utils/adt/json.c:966 utils/adt/json.c:978 utils/adt/json.c:990 +#: utils/adt/json.c:1135 utils/adt/json.c:1149 utils/adt/json.c:1160 +#: utils/adt/json.c:1168 utils/adt/json.c:1176 utils/adt/json.c:1184 +#: utils/adt/json.c:1192 utils/adt/json.c:1200 utils/adt/json.c:1208 +#: utils/adt/json.c:1216 utils/adt/json.c:1246 utils/adt/varlena.c:296 #: utils/adt/varlena.c:337 #, c-format msgid "invalid input syntax for type %s" msgstr "sintassi di input non valida per il tipo %s" -#: utils/adt/enum.c:115 -#, c-format -msgid "unsafe use of new value \"%s\" of enum type %s" -msgstr "uso non sicuro del nuovo valore \"%s\" dell'enumerazione %s" - -#: utils/adt/enum.c:118 -#, c-format -msgid "New enum values must be committed before they can be used." -msgstr "I nuovi valori di enumerazione devono ricevere un commit prima di poter essere usati." - -#: utils/adt/enum.c:136 utils/adt/enum.c:146 utils/adt/enum.c:204 -#: utils/adt/enum.c:214 +#: utils/adt/enum.c:48 utils/adt/enum.c:58 utils/adt/enum.c:113 +#: utils/adt/enum.c:123 #, c-format msgid "invalid input value for enum %s: \"%s\"" msgstr "la sintassi per l'enumerazione %s non è valida: \"%s\"" -#: utils/adt/enum.c:176 utils/adt/enum.c:242 utils/adt/enum.c:301 +#: utils/adt/enum.c:85 utils/adt/enum.c:148 utils/adt/enum.c:207 #, c-format msgid "invalid internal value for enum: %u" msgstr "il valore interno per l'enumerazione non è valido: %u" -#: utils/adt/enum.c:461 utils/adt/enum.c:490 utils/adt/enum.c:530 -#: utils/adt/enum.c:550 +#: utils/adt/enum.c:360 utils/adt/enum.c:389 utils/adt/enum.c:429 +#: utils/adt/enum.c:449 #, c-format msgid "could not determine actual enum type" msgstr "determinazione del tipo reale di enumerazione fallita" -#: utils/adt/enum.c:469 utils/adt/enum.c:498 +#: utils/adt/enum.c:368 utils/adt/enum.c:397 #, c-format msgid "enum %s contains no values" msgstr "l'enumerazione %s non contiene valori" @@ -19660,7 +19678,7 @@ msgstr "\"%s\" è fuori dall'intervallo consentito per il tipo double precision" msgid "smallint out of range" msgstr "il valore è fuori dall'intervallo consentito per il tipo smallint" -#: utils/adt/float.c:1430 utils/adt/numeric.c:7624 +#: utils/adt/float.c:1430 utils/adt/numeric.c:7634 #, c-format msgid "cannot take square root of a negative number" msgstr "non è possibile estrarre la radice quadrata di un numero negativo" @@ -19675,12 +19693,12 @@ msgstr "zero elevato a potenza negativa non è definito" msgid "a negative number raised to a non-integer power yields a complex result" msgstr "un numero negativo elevato a potenza non intera è un valore di tipo complesso" -#: utils/adt/float.c:1542 utils/adt/float.c:1572 utils/adt/numeric.c:7890 +#: utils/adt/float.c:1542 utils/adt/float.c:1572 utils/adt/numeric.c:7900 #, c-format msgid "cannot take logarithm of zero" msgstr "non è possibile calcolare il logaritmo di zero" -#: utils/adt/float.c:1546 utils/adt/float.c:1576 utils/adt/numeric.c:7894 +#: utils/adt/float.c:1546 utils/adt/float.c:1576 utils/adt/numeric.c:7904 #, c-format msgid "cannot take logarithm of a negative number" msgstr "non è possibile calcolare il logaritmo di un numero negativo" @@ -19803,7 +19821,7 @@ msgstr "\"%s\" non è un numero" msgid "case conversion failed: %s" msgstr "conversione maiuscole/minuscole fallita: %s" -#: utils/adt/formatting.c:1546 +#: utils/adt/formatting.c:1545 #, c-format msgid "could not determine which collation to use for lower() function" msgstr "non è stato possibile determinare quale ordinamento usare per la funzione lower()" @@ -19813,118 +19831,118 @@ msgstr "non è stato possibile determinare quale ordinamento usare per la funzio msgid "could not determine which collation to use for upper() function" msgstr "non è stato possibile determinare quale ordinamento usare per la funzione upper()" -#: utils/adt/formatting.c:1793 +#: utils/adt/formatting.c:1794 #, c-format msgid "could not determine which collation to use for initcap() function" msgstr "non è stato possibile determinare quale ordinamento usare per la funzione initcap()" -#: utils/adt/formatting.c:2160 +#: utils/adt/formatting.c:2163 #, c-format msgid "invalid combination of date conventions" msgstr "la combinazione di convenzioni di date non è valida" -#: utils/adt/formatting.c:2161 +#: utils/adt/formatting.c:2164 #, c-format msgid "Do not mix Gregorian and ISO week date conventions in a formatting template." msgstr "Non è possibile usare la convenzione gregoriana e ISO per settimane in un modello di formattazione." -#: utils/adt/formatting.c:2178 +#: utils/adt/formatting.c:2181 #, c-format msgid "conflicting values for \"%s\" field in formatting string" msgstr "sono presenti valori contraddittori per il campo \"%s\" nella stringa di formattazione" -#: utils/adt/formatting.c:2180 +#: utils/adt/formatting.c:2183 #, c-format msgid "This value contradicts a previous setting for the same field type." msgstr "Questo valore contraddice una impostazione precedente per lo stesso tipo di campo" -#: utils/adt/formatting.c:2241 +#: utils/adt/formatting.c:2244 #, c-format msgid "source string too short for \"%s\" formatting field" msgstr "la stringa di origine è troppo corta per il campo di formattazione \"%s\"" -#: utils/adt/formatting.c:2243 +#: utils/adt/formatting.c:2246 #, c-format msgid "Field requires %d characters, but only %d remain." msgstr "Il campo necessita di %d caratteri ma ne restano solo %d." -#: utils/adt/formatting.c:2246 utils/adt/formatting.c:2260 +#: utils/adt/formatting.c:2249 utils/adt/formatting.c:2263 #, c-format msgid "If your source string is not fixed-width, try using the \"FM\" modifier." msgstr "Se la stringa di partenza non ha lunghezza fissa, prova ad usare il modificatore \"FM\"." -#: utils/adt/formatting.c:2256 utils/adt/formatting.c:2269 -#: utils/adt/formatting.c:2399 +#: utils/adt/formatting.c:2259 utils/adt/formatting.c:2272 +#: utils/adt/formatting.c:2402 #, c-format msgid "invalid value \"%s\" for \"%s\"" msgstr "valore \"%s\" per \"%s\" non valido" -#: utils/adt/formatting.c:2258 +#: utils/adt/formatting.c:2261 #, c-format msgid "Field requires %d characters, but only %d could be parsed." msgstr "Il campo necessita di %d caratteri, ma è stato possibile analizzarne solo %d." -#: utils/adt/formatting.c:2271 +#: utils/adt/formatting.c:2274 #, c-format msgid "Value must be an integer." msgstr "Il valore deve essere un integer." -#: utils/adt/formatting.c:2276 +#: utils/adt/formatting.c:2279 #, c-format msgid "value for \"%s\" in source string is out of range" msgstr "il valore \"%s\" nella stringa di origine è fuori dall'intervallo consentito" -#: utils/adt/formatting.c:2278 +#: utils/adt/formatting.c:2281 #, c-format msgid "Value must be in the range %d to %d." msgstr "Il valore deve essere compreso fra %d e %d." -#: utils/adt/formatting.c:2401 +#: utils/adt/formatting.c:2404 #, c-format msgid "The given value did not match any of the allowed values for this field." msgstr "Il valore fornito non corrisponde a nessuno di quelli consentiti per questo campo." -#: utils/adt/formatting.c:2586 utils/adt/formatting.c:2606 -#: utils/adt/formatting.c:2626 utils/adt/formatting.c:2646 -#: utils/adt/formatting.c:2665 utils/adt/formatting.c:2684 -#: utils/adt/formatting.c:2708 utils/adt/formatting.c:2726 -#: utils/adt/formatting.c:2744 utils/adt/formatting.c:2762 -#: utils/adt/formatting.c:2779 utils/adt/formatting.c:2796 +#: utils/adt/formatting.c:2589 utils/adt/formatting.c:2609 +#: utils/adt/formatting.c:2629 utils/adt/formatting.c:2649 +#: utils/adt/formatting.c:2668 utils/adt/formatting.c:2687 +#: utils/adt/formatting.c:2711 utils/adt/formatting.c:2729 +#: utils/adt/formatting.c:2747 utils/adt/formatting.c:2765 +#: utils/adt/formatting.c:2782 utils/adt/formatting.c:2799 #, c-format msgid "localized string format value too long" msgstr "valore del formato della stringa localizzata troppo lungo" -#: utils/adt/formatting.c:3083 +#: utils/adt/formatting.c:3086 #, c-format msgid "formatting field \"%s\" is only supported in to_char" msgstr "il campo di formattazione \"%s\" è supportato solo in to_char" -#: utils/adt/formatting.c:3194 +#: utils/adt/formatting.c:3197 #, c-format msgid "invalid input string for \"Y,YYY\"" msgstr "stringa di input non valida per \"Y,YYY\"" -#: utils/adt/formatting.c:3700 +#: utils/adt/formatting.c:3703 #, c-format msgid "hour \"%d\" is invalid for the 12-hour clock" msgstr "l'ora \"%d\" non è valida su un orologio a 12 ore" -#: utils/adt/formatting.c:3702 +#: utils/adt/formatting.c:3705 #, c-format msgid "Use the 24-hour clock, or give an hour between 1 and 12." msgstr "Usa l'orologio a 24 ore o fornisci un'ora compresa fra 1 e 12." -#: utils/adt/formatting.c:3808 +#: utils/adt/formatting.c:3811 #, c-format msgid "cannot calculate day of year without year information" msgstr "non è possibile calcolare il giorno dell'anno senza informazioni sull'anno" -#: utils/adt/formatting.c:4675 +#: utils/adt/formatting.c:4678 #, c-format msgid "\"EEEE\" not supported for input" msgstr "l'uso di \"EEEE\" non è supportato per l'input" -#: utils/adt/formatting.c:4687 +#: utils/adt/formatting.c:4690 #, c-format msgid "\"RN\" not supported for input" msgstr "l'uso di \"RN\" non è supportato per l'input" @@ -20076,8 +20094,8 @@ msgstr "il valore del passo non può essere uguale a zero" #: utils/adt/int8.c:98 utils/adt/int8.c:133 utils/adt/numutils.c:51 #: utils/adt/numutils.c:61 utils/adt/numutils.c:105 #, c-format -msgid "invalid input syntax for %s: \"%s\"" -msgstr "sintassi di input non valida per %s: \"%s\"" +msgid "invalid input syntax for integer: \"%s\"" +msgstr "sintassi di input non valida per un intero: \"%s\"" #: utils/adt/int8.c:500 utils/adt/int8.c:529 utils/adt/int8.c:550 #: utils/adt/int8.c:581 utils/adt/int8.c:615 utils/adt/int8.c:640 @@ -20098,189 +20116,174 @@ msgstr "bigint fuori dall'intervallo consentito" msgid "OID out of range" msgstr "OID fuori dall'intervallo consentito" -#: utils/adt/json.c:786 +#: utils/adt/json.c:787 #, c-format msgid "Character with value 0x%02x must be escaped." msgstr "Il carattere con valore 0x%02x deve essere sottoposto ad escape." -#: utils/adt/json.c:827 +#: utils/adt/json.c:828 #, c-format msgid "\"\\u\" must be followed by four hexadecimal digits." msgstr "\"\\u\" deve essere seguito da quattro cifre esadecimali." -#: utils/adt/json.c:843 +#: utils/adt/json.c:844 #, c-format msgid "Unicode high surrogate must not follow a high surrogate." msgstr "un carattere surrogato alto Unicode non può seguire un altro surrogato alto" -#: utils/adt/json.c:854 utils/adt/json.c:864 utils/adt/json.c:916 -#: utils/adt/json.c:978 utils/adt/json.c:990 +#: utils/adt/json.c:855 utils/adt/json.c:865 utils/adt/json.c:917 +#: utils/adt/json.c:979 utils/adt/json.c:991 #, c-format msgid "Unicode low surrogate must follow a high surrogate." msgstr "un carattere surrogato basso Unicode deve seguire un surrogato alto" -#: utils/adt/json.c:879 utils/adt/json.c:902 +#: utils/adt/json.c:880 utils/adt/json.c:903 #, c-format msgid "unsupported Unicode escape sequence" msgstr "sequenza di escape Unicode non supportata" -#: utils/adt/json.c:880 +#: utils/adt/json.c:881 #, c-format msgid "\\u0000 cannot be converted to text." msgstr "\\u0000 non può essere convertito in testo." -#: utils/adt/json.c:903 +#: utils/adt/json.c:904 #, c-format msgid "Unicode escape values cannot be used for code point values above 007F when the server encoding is not UTF8." msgstr "i codici escape Unicode non possono essere usati per caratteri con codice superiore ad 007F quando l'encoding del server non è UTF8" -#: utils/adt/json.c:948 utils/adt/json.c:966 +#: utils/adt/json.c:949 utils/adt/json.c:967 #, c-format msgid "Escape sequence \"\\%s\" is invalid." msgstr "La sequenza di escape \"\\%s\" non è valida." -#: utils/adt/json.c:1135 +#: utils/adt/json.c:1136 #, c-format msgid "The input string ended unexpectedly." msgstr "La stringa di input è terminata inaspettatamente." -#: utils/adt/json.c:1149 +#: utils/adt/json.c:1150 #, c-format msgid "Expected end of input, but found \"%s\"." msgstr "Era prevista la fine dell'input, trovato \"%s\" invece." -#: utils/adt/json.c:1160 +#: utils/adt/json.c:1161 #, c-format msgid "Expected JSON value, but found \"%s\"." msgstr "Era previsto un valore JSON, trovato \"%s\" invece." -#: utils/adt/json.c:1168 utils/adt/json.c:1216 +#: utils/adt/json.c:1169 utils/adt/json.c:1217 #, c-format msgid "Expected string, but found \"%s\"." msgstr "Era prevista una stringa, trovato \"%s\" invece." -#: utils/adt/json.c:1176 +#: utils/adt/json.c:1177 #, c-format msgid "Expected array element or \"]\", but found \"%s\"." msgstr "Era previsto un elemento di array oppure \"]\", trovato \"%s\" invece." -#: utils/adt/json.c:1184 +#: utils/adt/json.c:1185 #, c-format msgid "Expected \",\" or \"]\", but found \"%s\"." msgstr "Era previsto \",\" oppure \"]\", trovato \"%s\" invece." -#: utils/adt/json.c:1192 +#: utils/adt/json.c:1193 #, c-format msgid "Expected string or \"}\", but found \"%s\"." msgstr "Era prevista una stringa oppure \"}\", trovato \"%s\" invece." -#: utils/adt/json.c:1200 +#: utils/adt/json.c:1201 #, c-format msgid "Expected \":\", but found \"%s\"." msgstr "Era previsto \":\", trovato \"%s\" invece." -#: utils/adt/json.c:1208 +#: utils/adt/json.c:1209 #, c-format msgid "Expected \",\" or \"}\", but found \"%s\"." msgstr "Era previsto \",\" oppure \"}\", trovato \"%s\" invece." -#: utils/adt/json.c:1246 +#: utils/adt/json.c:1247 #, c-format msgid "Token \"%s\" is invalid." msgstr "Il token \"%s\" non è valido." -#: utils/adt/json.c:1318 +#: utils/adt/json.c:1319 #, c-format msgid "JSON data, line %d: %s%s%s" msgstr "dati JSON, riga %d: %s%s%s" -#: utils/adt/json.c:1474 utils/adt/jsonb.c:725 +#: utils/adt/json.c:1475 utils/adt/jsonb.c:726 #, c-format msgid "key value must be scalar, not array, composite, or json" msgstr "la chiave deve essere uno scalare, non array, composito né json" -#: utils/adt/json.c:2011 -#, c-format -msgid "could not determine data type for argument 1" -msgstr "impossibile determinare il tipo di dato per l'argomento 1" - -#: utils/adt/json.c:2021 +#: utils/adt/json.c:2012 utils/adt/json.c:2022 utils/fmgr/funcapi.c:1501 #, c-format -msgid "could not determine data type for argument 2" -msgstr "impossibile determinare il tipo di dato per l'argomento 2" +msgid "could not determine data type for argument %d" +msgstr "impossibile determinare il tipo di dato per l'argomento %d" -#: utils/adt/json.c:2045 utils/adt/jsonb.c:1782 +#: utils/adt/json.c:2046 utils/adt/jsonb.c:1739 #, c-format msgid "field name must not be null" msgstr "il nome del campo non può essere nullo" -#: utils/adt/json.c:2122 +#: utils/adt/json.c:2130 utils/adt/jsonb.c:1191 #, c-format msgid "argument list must have even number of elements" msgstr "la lista di argomenti deve avere un numero pari di elementi" -#: utils/adt/json.c:2123 +#: utils/adt/json.c:2131 #, c-format msgid "The arguments of json_build_object() must consist of alternating keys and values." msgstr "Gli argomenti di json_build_object() devono consistere in una serie alternata di chiavi e valori." -#: utils/adt/json.c:2147 utils/adt/json.c:2168 utils/adt/json.c:2227 -#, c-format -msgid "could not determine data type for argument %d" -msgstr "impossibile determinare il tipo di dato per l'argomento %d" - -#: utils/adt/json.c:2153 +#: utils/adt/json.c:2146 #, c-format msgid "argument %d cannot be null" msgstr "l'argomento %d non può essere nullo" -#: utils/adt/json.c:2154 +#: utils/adt/json.c:2147 #, c-format msgid "Object keys should be text." msgstr "Le chiavi degli oggetti devono essere testo." -#: utils/adt/json.c:2289 utils/adt/jsonb.c:1364 +#: utils/adt/json.c:2253 utils/adt/jsonb.c:1321 #, c-format msgid "array must have two columns" msgstr "l'array deve avere due colonne" -#: utils/adt/json.c:2313 utils/adt/json.c:2397 utils/adt/jsonb.c:1388 -#: utils/adt/jsonb.c:1483 +#: utils/adt/json.c:2277 utils/adt/json.c:2361 utils/adt/jsonb.c:1345 +#: utils/adt/jsonb.c:1440 #, c-format msgid "null value not allowed for object key" msgstr "valori null non ammessi per le chiavi di oggetti" -#: utils/adt/json.c:2386 utils/adt/jsonb.c:1472 +#: utils/adt/json.c:2350 utils/adt/jsonb.c:1429 #, c-format msgid "mismatched array dimensions" msgstr "le dimensioni degli array non combaciano" -#: utils/adt/jsonb.c:257 +#: utils/adt/jsonb.c:258 #, c-format msgid "string too long to represent as jsonb string" msgstr "la stringa è troppo lunga per essere rappresentata come stringa jsonb" -#: utils/adt/jsonb.c:258 +#: utils/adt/jsonb.c:259 #, c-format msgid "Due to an implementation restriction, jsonb strings cannot exceed %d bytes." msgstr "A causa di una restrizione nell'implementazione le stringhe jsonb non possono superare i %d byte." -#: utils/adt/jsonb.c:1183 +#: utils/adt/jsonb.c:1192 #, c-format -msgid "invalid number of arguments: object must be matched key value pairs" -msgstr "numero di argomenti non valido: gli oggetti devono essere coppie chiave-valore appaiate" +msgid "The arguments of jsonb_build_object() must consist of alternating keys and values." +msgstr "Gli argomenti di jsonb_build_object() devono consistere in una serie alternata di chiavi e valori." -#: utils/adt/jsonb.c:1196 +#: utils/adt/jsonb.c:1204 #, c-format msgid "argument %d: key must not be null" msgstr "argomento %d: la chiave non può essere null" -#: utils/adt/jsonb.c:1215 utils/adt/jsonb.c:1238 utils/adt/jsonb.c:1298 -#, c-format -msgid "argument %d: could not determine data type" -msgstr "argomento %d: impossibile determinare il tipo di dati" - -#: utils/adt/jsonb.c:1835 +#: utils/adt/jsonb.c:1792 #, c-format msgid "object keys must be strings" msgstr "le chiavi dell'oggetto devono essere stringhe" @@ -20306,153 +20309,153 @@ msgstr "la dimensione totale degli elementi dell'array jsonb supera il massimo d msgid "total size of jsonb object elements exceeds the maximum of %u bytes" msgstr "la dimensione totale degli elementi dell'oggetto jsonb supera il massimo di %u byte" -#: utils/adt/jsonfuncs.c:510 utils/adt/jsonfuncs.c:675 -#: utils/adt/jsonfuncs.c:2262 utils/adt/jsonfuncs.c:3377 -#: utils/adt/jsonfuncs.c:3661 +#: utils/adt/jsonfuncs.c:511 utils/adt/jsonfuncs.c:676 +#: utils/adt/jsonfuncs.c:2263 utils/adt/jsonfuncs.c:2699 +#: utils/adt/jsonfuncs.c:3393 utils/adt/jsonfuncs.c:3694 #, c-format msgid "cannot call %s on a scalar" msgstr "non è possibile eseguire %s su uno scalare" -#: utils/adt/jsonfuncs.c:515 utils/adt/jsonfuncs.c:662 -#: utils/adt/jsonfuncs.c:3366 +#: utils/adt/jsonfuncs.c:516 utils/adt/jsonfuncs.c:663 +#: utils/adt/jsonfuncs.c:2701 utils/adt/jsonfuncs.c:3382 #, c-format msgid "cannot call %s on an array" msgstr "non è possibile eseguire %s su un array" -#: utils/adt/jsonfuncs.c:1578 utils/adt/jsonfuncs.c:1613 +#: utils/adt/jsonfuncs.c:1579 utils/adt/jsonfuncs.c:1614 #, c-format msgid "cannot get array length of a scalar" msgstr "non è possibile ottenere la lunghezza di uno scalare" -#: utils/adt/jsonfuncs.c:1582 utils/adt/jsonfuncs.c:1601 +#: utils/adt/jsonfuncs.c:1583 utils/adt/jsonfuncs.c:1602 #, c-format msgid "cannot get array length of a non-array" msgstr "non è possibile ottenere la lunghezza di un oggetto che non è un array" -#: utils/adt/jsonfuncs.c:1678 +#: utils/adt/jsonfuncs.c:1679 #, c-format msgid "cannot call %s on a non-object" msgstr "non è possibile eseguire %s su un argomento che non è un oggetto" -#: utils/adt/jsonfuncs.c:1696 utils/adt/jsonfuncs.c:3192 -#: utils/adt/jsonfuncs.c:3486 +#: utils/adt/jsonfuncs.c:1697 utils/adt/jsonfuncs.c:3208 +#: utils/adt/jsonfuncs.c:3510 #, c-format msgid "function returning record called in context that cannot accept type record" msgstr "funzione che restituisce record eseguita in un contesto che non può accettare il tipo record" -#: utils/adt/jsonfuncs.c:1935 +#: utils/adt/jsonfuncs.c:1936 #, c-format msgid "cannot deconstruct an array as an object" msgstr "non è possibile decostruire un array come un oggetto" -#: utils/adt/jsonfuncs.c:1947 +#: utils/adt/jsonfuncs.c:1948 #, c-format msgid "cannot deconstruct a scalar" msgstr "non è possibile decostruire uno scalare" -#: utils/adt/jsonfuncs.c:1993 +#: utils/adt/jsonfuncs.c:1994 #, c-format msgid "cannot extract elements from a scalar" msgstr "non è possibile estrarre elementi da uno scalare" -#: utils/adt/jsonfuncs.c:1997 +#: utils/adt/jsonfuncs.c:1998 #, c-format msgid "cannot extract elements from an object" msgstr "non è possibile estrarre elementi da un oggetto" -#: utils/adt/jsonfuncs.c:2249 utils/adt/jsonfuncs.c:3550 +#: utils/adt/jsonfuncs.c:2250 utils/adt/jsonfuncs.c:3583 #, c-format msgid "cannot call %s on a non-array" msgstr "non è possibile eseguire %s su un argomento che non è un array" -#: utils/adt/jsonfuncs.c:2315 utils/adt/jsonfuncs.c:2320 -#: utils/adt/jsonfuncs.c:2337 utils/adt/jsonfuncs.c:2343 +#: utils/adt/jsonfuncs.c:2316 utils/adt/jsonfuncs.c:2321 +#: utils/adt/jsonfuncs.c:2338 utils/adt/jsonfuncs.c:2344 #, c-format msgid "expected json array" msgstr "atteso un array json" -#: utils/adt/jsonfuncs.c:2316 +#: utils/adt/jsonfuncs.c:2317 #, c-format -msgid "see the value of key \"%s\"" -msgstr "vedi il valore della chiave \"%s\"" +msgid "See the value of key \"%s\"." +msgstr "Vedi il valore della chiave \"%s\"." -#: utils/adt/jsonfuncs.c:2338 +#: utils/adt/jsonfuncs.c:2339 #, c-format -msgid "see the array element %s of key \"%s\"" -msgstr "vedi l'elemento dell'array %s della chiave \"%s\"" +msgid "See the array element %s of key \"%s\"." +msgstr "Vedi l'elemento dell'array %s della chiave \"%s\"." -#: utils/adt/jsonfuncs.c:2344 +#: utils/adt/jsonfuncs.c:2345 #, c-format -msgid "see the array element %s" -msgstr "vedi l'elemento dell'array %s" +msgid "See the array element %s." +msgstr "Vedi l'elemento dell'array %s." -#: utils/adt/jsonfuncs.c:2379 +#: utils/adt/jsonfuncs.c:2380 #, c-format msgid "malformed json array" msgstr "array json non valido" -#: utils/adt/jsonfuncs.c:3152 utils/adt/jsonfuncs.c:3462 +#: utils/adt/jsonfuncs.c:3168 utils/adt/jsonfuncs.c:3478 #, c-format msgid "first argument of %s must be a row type" msgstr "il primo elemento di %s deve essere di tipo riga" -#: utils/adt/jsonfuncs.c:3194 +#: utils/adt/jsonfuncs.c:3210 #, c-format msgid "Try calling the function in the FROM clause using a column definition list." msgstr "Prova ad eseguire la funzione nella clausola FROM usando una lista di definizioni di colonne." -#: utils/adt/jsonfuncs.c:3567 utils/adt/jsonfuncs.c:3643 +#: utils/adt/jsonfuncs.c:3600 utils/adt/jsonfuncs.c:3676 #, c-format msgid "argument of %s must be an array of objects" msgstr "l'argomento di %s deve essere un array di oggetti" -#: utils/adt/jsonfuncs.c:3595 +#: utils/adt/jsonfuncs.c:3628 #, c-format msgid "cannot call %s on an object" msgstr "non è possibile eseguire %s su un oggetto" -#: utils/adt/jsonfuncs.c:4071 utils/adt/jsonfuncs.c:4130 -#: utils/adt/jsonfuncs.c:4210 +#: utils/adt/jsonfuncs.c:4104 utils/adt/jsonfuncs.c:4163 +#: utils/adt/jsonfuncs.c:4243 #, c-format msgid "cannot delete from scalar" msgstr "non è possibile eliminare da uno scalare" -#: utils/adt/jsonfuncs.c:4215 +#: utils/adt/jsonfuncs.c:4248 #, c-format msgid "cannot delete from object using integer index" msgstr "non è possibile eliminare da un oggetto usando numeri interi come indici" -#: utils/adt/jsonfuncs.c:4281 utils/adt/jsonfuncs.c:4373 +#: utils/adt/jsonfuncs.c:4314 utils/adt/jsonfuncs.c:4406 #, c-format msgid "cannot set path in scalar" msgstr "non è possibile impostare un percorso in uno scalare" -#: utils/adt/jsonfuncs.c:4326 +#: utils/adt/jsonfuncs.c:4359 #, c-format msgid "cannot delete path in scalar" msgstr "non è possibile eliminare un percorso in uno scalare" -#: utils/adt/jsonfuncs.c:4496 +#: utils/adt/jsonfuncs.c:4529 #, c-format msgid "invalid concatenation of jsonb objects" msgstr "concatenazione invalida di oggetti jsonb" -#: utils/adt/jsonfuncs.c:4530 +#: utils/adt/jsonfuncs.c:4563 #, c-format msgid "path element at position %d is null" msgstr "l'elemento di percorso in posizione %d è nullo" -#: utils/adt/jsonfuncs.c:4616 +#: utils/adt/jsonfuncs.c:4649 #, c-format msgid "cannot replace existing key" msgstr "non è possibile sostituire una chiave esistente" -#: utils/adt/jsonfuncs.c:4617 +#: utils/adt/jsonfuncs.c:4650 #, c-format msgid "Try using the function jsonb_set to replace key value." msgstr "Prova ad utilizzare la funzione jsonb_set per rimpiazzare il valore della chiave." -#: utils/adt/jsonfuncs.c:4699 +#: utils/adt/jsonfuncs.c:4732 #, c-format msgid "path element at position %d is not an integer: \"%s\"" msgstr "l'elemento di percorso in posizione %d non è un intero: \"%s\"" @@ -20462,7 +20465,7 @@ msgstr "l'elemento di percorso in posizione %d non è un intero: \"%s\"" msgid "levenshtein argument exceeds maximum length of %d characters" msgstr "L'argomento levenshtein supera la lunghezza massima di %d caratteri" -#: utils/adt/like.c:183 utils/adt/selfuncs.c:5525 +#: utils/adt/like.c:183 utils/adt/selfuncs.c:5562 #, c-format msgid "could not determine which collation to use for ILIKE" msgstr "non è stato possibile determinare quale ordinamento usare per ILIKE" @@ -20499,8 +20502,8 @@ msgstr "dato macaddr8 fuori dall'intervallo valido per convertire a macaddr" #: utils/adt/mac8.c:555 #, c-format -msgid "Only addresses that have FF and FE as values in the 4th and 5th bytes, from the left, for example: XX-XX-XX-FF-FE-XX-XX-XX, are eligible to be converted from macaddr8 to macaddr." -msgstr "Solo gli indirizzi che hanno valori FF ed FE nel 4º e 5º byte da sinistra, per esempio XX-XX-XX-FF-FE-XX-XX-XX, possono essere convertiti da macaddr8 a macaddr." +msgid "Only addresses that have FF and FE as values in the 4th and 5th bytes from the left, for example xx:xx:xx:ff:fe:xx:xx:xx, are eligible to be converted from macaddr8 to macaddr." +msgstr "Solo gli indirizzi che hanno valori FF ed FE nel 4º e 5º byte da sinistra, per esempio XX-XX-XX-ff-fe-XX-XX-XX, possono essere convertiti da macaddr8 a macaddr." #: utils/adt/misc.c:238 #, c-format @@ -20738,9 +20741,9 @@ msgstr "il valore di fine non può essere NaN" msgid "step size cannot be NaN" msgstr "la dimensione dell'intervallo non può essere NaN" -#: utils/adt/numeric.c:2589 utils/adt/numeric.c:5551 utils/adt/numeric.c:5996 -#: utils/adt/numeric.c:7700 utils/adt/numeric.c:8125 utils/adt/numeric.c:8240 -#: utils/adt/numeric.c:8313 +#: utils/adt/numeric.c:2589 utils/adt/numeric.c:5561 utils/adt/numeric.c:6006 +#: utils/adt/numeric.c:7710 utils/adt/numeric.c:8135 utils/adt/numeric.c:8249 +#: utils/adt/numeric.c:8322 #, c-format msgid "value overflows numeric format" msgstr "il valore causa un overflow nel formato numeric" @@ -20760,12 +20763,17 @@ msgstr "non è possibile convertire NaN in un bigint" msgid "cannot convert NaN to smallint" msgstr "non è possibile convertire NaN in uno smallint" -#: utils/adt/numeric.c:6066 +#: utils/adt/numeric.c:3079 utils/adt/numeric.c:3150 +#, c-format +msgid "cannot convert infinity to numeric" +msgstr "non è possibile convertire infinity in numeric" + +#: utils/adt/numeric.c:6076 #, c-format msgid "numeric field overflow" msgstr "il campo numeric causa un overflow" -#: utils/adt/numeric.c:6067 +#: utils/adt/numeric.c:6077 #, c-format msgid "A field with precision %d, scale %d must round to an absolute value less than %s%d." msgstr "Un campo con precisione %d e %d cifre decimali deve arrotondarsi ad un valore assoluto inferiore a %s%d." @@ -20811,82 +20819,87 @@ msgstr "il valore percentile %g non è tra 0 e 1" msgid "Apply system library package updates." msgstr "Applica gli aggiornamenti ai pacchetti di sistema." -#: utils/adt/pg_locale.c:1239 +#: utils/adt/pg_locale.c:1249 #, c-format msgid "could not create locale \"%s\": %m" msgstr "creazione del locale \"%s\" fallita: %m" -#: utils/adt/pg_locale.c:1242 +#: utils/adt/pg_locale.c:1252 #, c-format msgid "The operating system could not find any locale data for the locale name \"%s\"." msgstr "Il sistema operativo non ha trovato dati di locale per il locale di nome \"%s\"." -#: utils/adt/pg_locale.c:1339 +#: utils/adt/pg_locale.c:1353 #, c-format msgid "collations with different collate and ctype values are not supported on this platform" msgstr "le collazioni con tipi diversi di ordinamento e ctype non sono supportati su questa piattaforma" -#: utils/adt/pg_locale.c:1348 +#: utils/adt/pg_locale.c:1362 #, c-format msgid "collation provider LIBC is not supported on this platform" msgstr "fornitore di ordinamento LIBC non supportato su questa piattaforma" -#: utils/adt/pg_locale.c:1361 utils/adt/pg_locale.c:1443 +#: utils/adt/pg_locale.c:1374 +#, c-format +msgid "collations with different collate and ctype values are not supported by ICU" +msgstr "ordinamenti con valori diversi di collate e ctype non sono supportati da ICU" + +#: utils/adt/pg_locale.c:1380 utils/adt/pg_locale.c:1468 #, c-format msgid "could not open collator for locale \"%s\": %s" msgstr "apertura dell'ordinamento per il locale \"%s\" fallita: %s" -#: utils/adt/pg_locale.c:1370 +#: utils/adt/pg_locale.c:1391 #, c-format msgid "ICU is not supported in this build" msgstr "ICU non supportato in questo build" -#: utils/adt/pg_locale.c:1371 +#: utils/adt/pg_locale.c:1392 #, c-format msgid "You need to rebuild PostgreSQL using --with-icu." msgstr "Occorre ricompilare PostgreSQL usando --with-icu." -#: utils/adt/pg_locale.c:1391 +#: utils/adt/pg_locale.c:1412 #, c-format msgid "collation \"%s\" has no actual version, but a version was specified" msgstr "l'ordinamento \"%s\" non ha una versione, ma una versione è stata specificata" -#: utils/adt/pg_locale.c:1398 +#: utils/adt/pg_locale.c:1419 #, c-format msgid "collation \"%s\" has version mismatch" msgstr "la versione dell'ordinamento \"%s\" non combacia" -#: utils/adt/pg_locale.c:1400 +#: utils/adt/pg_locale.c:1421 #, c-format msgid "The collation in the database was created using version %s, but the operating system provides version %s." msgstr "L'ordinamento nel database è stato creato usando la versione %s, ma il sistema operativo fornisce la versione %s." -#: utils/adt/pg_locale.c:1403 +#: utils/adt/pg_locale.c:1424 #, c-format msgid "Rebuild all objects affected by this collation and run ALTER COLLATION %s REFRESH VERSION, or build PostgreSQL with the right library version." msgstr "Ricostruisci tutti gli oggetti che usano questo ordinamento ed esegui ALTER COLLATION %s REFRESH VERSION, oppure ricompila PostgreSQL con la versione giusta della libreria." -#: utils/adt/pg_locale.c:1483 +#: utils/adt/pg_locale.c:1508 #, c-format msgid "could not open ICU converter for encoding \"%s\": %s" msgstr "apertura del convertitore ICU per l'encoding \"%s\" fallita: %s" -#: utils/adt/pg_locale.c:1503 +#: utils/adt/pg_locale.c:1539 utils/adt/pg_locale.c:1548 #, c-format msgid "ucnv_toUChars failed: %s" msgstr "ucnv_toUChars fallito: %s" -#: utils/adt/pg_locale.c:1521 +#: utils/adt/pg_locale.c:1577 utils/adt/pg_locale.c:1586 #, c-format msgid "ucnv_fromUChars failed: %s" msgstr "ucnv_fromUChars fallito: %s" -#: utils/adt/pg_locale.c:1693 +#: utils/adt/pg_locale.c:1759 #, c-format msgid "invalid multibyte character for locale" msgstr "carattere multibyte non valido per il locale" -#: utils/adt/pg_locale.c:1694 +#: utils/adt/pg_locale.c:1760 #, c-format msgid "The server's LC_CTYPE locale is probably incompatible with the database encoding." msgstr "Il locale LC_CTYPE del server probabilmente non è compatibile con la codifica del database." @@ -20986,7 +20999,7 @@ msgstr "Troppe virgole." msgid "Junk after right parenthesis or bracket." msgstr "Caratteri spuri dopo la parentesi chiusa." -#: utils/adt/regexp.c:285 utils/adt/regexp.c:1344 utils/adt/varlena.c:3948 +#: utils/adt/regexp.c:285 utils/adt/regexp.c:1344 utils/adt/varlena.c:3967 #, c-format msgid "regular expression failed: %s" msgstr "l'espressione regolare %s è fallita" @@ -21016,142 +21029,142 @@ msgstr "regexp_split_to_table non supporta l'opzione globale" msgid "regexp_split_to_array does not support the global option" msgstr "regexp_split_to_array non supporta l'opzione globale" -#: utils/adt/regproc.c:105 +#: utils/adt/regproc.c:106 #, c-format msgid "more than one function named \"%s\"" msgstr "più di una funzione si chiama \"%s\"" -#: utils/adt/regproc.c:523 +#: utils/adt/regproc.c:524 #, c-format msgid "more than one operator named %s" msgstr "più di un operatore si chiama %s" -#: utils/adt/regproc.c:690 utils/adt/regproc.c:731 gram.y:7854 +#: utils/adt/regproc.c:691 utils/adt/regproc.c:732 gram.y:7844 #, c-format msgid "missing argument" msgstr "argomento mancante" -#: utils/adt/regproc.c:691 utils/adt/regproc.c:732 gram.y:7855 +#: utils/adt/regproc.c:692 utils/adt/regproc.c:733 gram.y:7845 #, c-format msgid "Use NONE to denote the missing argument of a unary operator." msgstr "Usa NONE per indicare l'argomento mancante in un operatore unario." -#: utils/adt/regproc.c:695 utils/adt/regproc.c:736 utils/adt/regproc.c:1864 -#: utils/adt/ruleutils.c:8888 utils/adt/ruleutils.c:9056 +#: utils/adt/regproc.c:696 utils/adt/regproc.c:737 utils/adt/regproc.c:1865 +#: utils/adt/ruleutils.c:8959 utils/adt/ruleutils.c:9127 #, c-format msgid "too many arguments" msgstr "troppi argomenti" -#: utils/adt/regproc.c:696 utils/adt/regproc.c:737 +#: utils/adt/regproc.c:697 utils/adt/regproc.c:738 #, c-format msgid "Provide two argument types for operator." msgstr "Fornisci due tipi di argomento per l'operatore." -#: utils/adt/regproc.c:1448 utils/adt/regproc.c:1472 utils/adt/regproc.c:1573 -#: utils/adt/regproc.c:1597 utils/adt/regproc.c:1699 utils/adt/regproc.c:1704 -#: utils/adt/varlena.c:3203 utils/adt/varlena.c:3208 +#: utils/adt/regproc.c:1449 utils/adt/regproc.c:1473 utils/adt/regproc.c:1574 +#: utils/adt/regproc.c:1598 utils/adt/regproc.c:1700 utils/adt/regproc.c:1705 +#: utils/adt/varlena.c:3220 utils/adt/varlena.c:3225 #, c-format msgid "invalid name syntax" msgstr "la sintassi per il nome non è valida" -#: utils/adt/regproc.c:1762 +#: utils/adt/regproc.c:1763 #, c-format msgid "expected a left parenthesis" msgstr "era attesa un parentesi tonda aperta" -#: utils/adt/regproc.c:1778 +#: utils/adt/regproc.c:1779 #, c-format msgid "expected a right parenthesis" msgstr "era attesa un parentesi tonda chiusa" -#: utils/adt/regproc.c:1797 +#: utils/adt/regproc.c:1798 #, c-format msgid "expected a type name" msgstr "era atteso il nome di un tipo" -#: utils/adt/regproc.c:1829 +#: utils/adt/regproc.c:1830 #, c-format msgid "improper type name" msgstr "il nome del tipo non è corretto" -#: utils/adt/ri_triggers.c:314 utils/adt/ri_triggers.c:371 -#: utils/adt/ri_triggers.c:790 utils/adt/ri_triggers.c:1013 -#: utils/adt/ri_triggers.c:1169 utils/adt/ri_triggers.c:1350 -#: utils/adt/ri_triggers.c:1515 utils/adt/ri_triggers.c:1691 -#: utils/adt/ri_triggers.c:1871 utils/adt/ri_triggers.c:2062 -#: utils/adt/ri_triggers.c:2120 utils/adt/ri_triggers.c:2225 -#: utils/adt/ri_triggers.c:2402 gram.y:3678 +#: utils/adt/ri_triggers.c:311 utils/adt/ri_triggers.c:368 +#: utils/adt/ri_triggers.c:787 utils/adt/ri_triggers.c:1010 +#: utils/adt/ri_triggers.c:1166 utils/adt/ri_triggers.c:1347 +#: utils/adt/ri_triggers.c:1512 utils/adt/ri_triggers.c:1688 +#: utils/adt/ri_triggers.c:1868 utils/adt/ri_triggers.c:2059 +#: utils/adt/ri_triggers.c:2117 utils/adt/ri_triggers.c:2222 +#: utils/adt/ri_triggers.c:2399 gram.y:3656 #, c-format msgid "MATCH PARTIAL not yet implemented" msgstr "il MATCH PARTIAL non è stato ancora implementato" -#: utils/adt/ri_triggers.c:343 utils/adt/ri_triggers.c:2490 -#: utils/adt/ri_triggers.c:3315 +#: utils/adt/ri_triggers.c:340 utils/adt/ri_triggers.c:2487 +#: utils/adt/ri_triggers.c:3312 #, c-format msgid "insert or update on table \"%s\" violates foreign key constraint \"%s\"" msgstr "la INSERT o l'UPDATE sulla tabella \"%s\" viola il vincolo di chiave esterna \"%s\"" -#: utils/adt/ri_triggers.c:346 utils/adt/ri_triggers.c:2493 +#: utils/adt/ri_triggers.c:343 utils/adt/ri_triggers.c:2490 #, c-format msgid "MATCH FULL does not allow mixing of null and nonnull key values." msgstr "MATCH FULL non consente l'uso di valori chiave nulli e non nulli insieme." -#: utils/adt/ri_triggers.c:2732 +#: utils/adt/ri_triggers.c:2729 #, c-format msgid "function \"%s\" must be fired for INSERT" msgstr "la funzione \"%s\" deve essere eseguita per un INSERT" -#: utils/adt/ri_triggers.c:2738 +#: utils/adt/ri_triggers.c:2735 #, c-format msgid "function \"%s\" must be fired for UPDATE" msgstr "la funzione \"%s\" deve essere eseguita per un UPDATE" -#: utils/adt/ri_triggers.c:2744 +#: utils/adt/ri_triggers.c:2741 #, c-format msgid "function \"%s\" must be fired for DELETE" msgstr "la funzione \"%s\" deve essere eseguita per una DELETE" -#: utils/adt/ri_triggers.c:2767 +#: utils/adt/ri_triggers.c:2764 #, c-format msgid "no pg_constraint entry for trigger \"%s\" on table \"%s\"" msgstr "non ci sono elementi pg_constraint per il trigger \"%s\" sulla tabella \"%s\"" -#: utils/adt/ri_triggers.c:2769 +#: utils/adt/ri_triggers.c:2766 #, c-format msgid "Remove this referential integrity trigger and its mates, then do ALTER TABLE ADD CONSTRAINT." msgstr "Rimuovi questo trigger di integrità referenziale e relativi elementi collegati, poi esegui ALTER TABLE ADD CONSTRAINT." -#: utils/adt/ri_triggers.c:3225 +#: utils/adt/ri_triggers.c:3222 #, c-format msgid "referential integrity query on \"%s\" from constraint \"%s\" on \"%s\" gave unexpected result" msgstr "la query di integrità referenziale su \"%s\" dal vincolo \"%s\" su \"%s\" ha restituito un risultato inatteso" -#: utils/adt/ri_triggers.c:3229 +#: utils/adt/ri_triggers.c:3226 #, c-format msgid "This is most likely due to a rule having rewritten the query." msgstr "Ciò è probabilmente dovuto ad una RULE che ha riscritto la query." -#: utils/adt/ri_triggers.c:3319 +#: utils/adt/ri_triggers.c:3316 #, c-format msgid "Key (%s)=(%s) is not present in table \"%s\"." msgstr "La chiave (%s)=(%s) non è presente nella tabella \"%s\"." -#: utils/adt/ri_triggers.c:3322 +#: utils/adt/ri_triggers.c:3319 #, c-format msgid "Key is not present in table \"%s\"." msgstr "La chiave non è presente nella tabella \"%s\"." -#: utils/adt/ri_triggers.c:3328 +#: utils/adt/ri_triggers.c:3325 #, c-format msgid "update or delete on table \"%s\" violates foreign key constraint \"%s\" on table \"%s\"" msgstr "l'istruzione UPDATE o DELETE sulla tabella \"%s\" viola il vincolo di chiave esterna \"%s\" sulla tabella \"%s\"" -#: utils/adt/ri_triggers.c:3333 +#: utils/adt/ri_triggers.c:3330 #, c-format msgid "Key (%s)=(%s) is still referenced from table \"%s\"." msgstr "La chiave (%s)=(%s) è ancora referenziata dalla tabella \"%s\"." -#: utils/adt/ri_triggers.c:3336 +#: utils/adt/ri_triggers.c:3333 #, c-format msgid "Key is still referenced from table \"%s\"." msgstr "La chiave è ancora referenziata dalla tabella \"%s\"." @@ -21202,29 +21215,29 @@ msgstr "il tipo di dati non è corretto, %u invece di %u" msgid "improper binary format in record column %d" msgstr "il formato binario nella colonna %d del record non è corretto" -#: utils/adt/rowtypes.c:902 utils/adt/rowtypes.c:1142 utils/adt/rowtypes.c:1396 -#: utils/adt/rowtypes.c:1673 +#: utils/adt/rowtypes.c:902 utils/adt/rowtypes.c:1142 +#: utils/adt/rowtypes.c:1396 utils/adt/rowtypes.c:1673 #, c-format msgid "cannot compare dissimilar column types %s and %s at record column %d" msgstr "non è possibile confrontare i tipi di colonne dissimili %s e %s alla colonna %d del record" -#: utils/adt/rowtypes.c:991 utils/adt/rowtypes.c:1213 utils/adt/rowtypes.c:1529 -#: utils/adt/rowtypes.c:1769 +#: utils/adt/rowtypes.c:991 utils/adt/rowtypes.c:1213 +#: utils/adt/rowtypes.c:1529 utils/adt/rowtypes.c:1769 #, c-format msgid "cannot compare record types with different numbers of columns" msgstr "non è possibile confrontare tipi di record con diverso numero di colonne" -#: utils/adt/ruleutils.c:4655 +#: utils/adt/ruleutils.c:4668 #, c-format msgid "rule \"%s\" has unsupported event type %d" msgstr "la regola \"%s\" ha un tipo di evento non supportato %d" -#: utils/adt/selfuncs.c:5510 +#: utils/adt/selfuncs.c:5547 #, c-format msgid "case insensitive matching not supported on type bytea" msgstr "il confronto case insensitive sul tipo bytea non è supportato" -#: utils/adt/selfuncs.c:5612 +#: utils/adt/selfuncs.c:5649 #, c-format msgid "regular-expression matching not supported on type bytea" msgstr "il confronto con espressioni regolari sul tipo bytea non è supportato" @@ -21547,26 +21560,14 @@ msgstr "le informazioni di posizione nel tsvector sono errate: \"%s\"" #: utils/adt/txid.c:135 #, c-format -msgid "transaction ID " -msgstr "ID transazione " +msgid "transaction ID %s is in the future" +msgstr "l'ID di transazione %s è nel futuro" #: utils/adt/txid.c:624 #, c-format msgid "invalid external txid_snapshot data" msgstr "dati txid_snapshot esterni non validi" -#: utils/adt/txid.c:758 utils/adt/txid.c:779 -msgid "in progress" -msgstr "in esecuzione" - -#: utils/adt/txid.c:760 -msgid "committed" -msgstr "completata" - -#: utils/adt/txid.c:762 utils/adt/txid.c:777 -msgid "aborted" -msgstr "annullata" - #: utils/adt/varbit.c:58 utils/adt/varchar.c:51 #, c-format msgid "length for type %s must be at least 1" @@ -21608,8 +21609,8 @@ msgid "bit string too long for type bit varying(%d)" msgstr "la stringa di bit è troppo lunga per il tipo bit varying(%d)" #: utils/adt/varbit.c:1066 utils/adt/varbit.c:1168 utils/adt/varlena.c:841 -#: utils/adt/varlena.c:905 utils/adt/varlena.c:1049 utils/adt/varlena.c:2868 -#: utils/adt/varlena.c:2935 +#: utils/adt/varlena.c:905 utils/adt/varlena.c:1049 utils/adt/varlena.c:2885 +#: utils/adt/varlena.c:2952 #, c-format msgid "negative substring length not allowed" msgstr "non è consentita una stringa con lunghezza negativa" @@ -21634,7 +21635,7 @@ msgstr "non è possibile eseguire lo XOR fra stringhe di bit di dimensioni diver msgid "bit index %d out of valid range (0..%d)" msgstr "l'indice %d è fuori dall'intervallo valido (0..%d)" -#: utils/adt/varbit.c:1812 utils/adt/varlena.c:3127 +#: utils/adt/varbit.c:1812 utils/adt/varlena.c:3144 #, c-format msgid "new bit must be 0 or 1" msgstr "il nuovo bit deve essere 0 o 1" @@ -21649,78 +21650,78 @@ msgstr "il valore è troppo lungo per il tipo character(%d)" msgid "value too long for type character varying(%d)" msgstr "il valore è troppo lungo per il tipo character varying(%d)" -#: utils/adt/varlena.c:1416 utils/adt/varlena.c:1861 +#: utils/adt/varlena.c:1416 utils/adt/varlena.c:1853 #, c-format msgid "could not determine which collation to use for string comparison" msgstr "non è stato possibile determinare quale ordinamento usare per la comparazione tra stringhe" -#: utils/adt/varlena.c:1472 utils/adt/varlena.c:1485 +#: utils/adt/varlena.c:1473 utils/adt/varlena.c:1486 #, c-format msgid "could not convert string to UTF-16: error code %lu" msgstr "conversione della stringa in UTF-16 fallita: codice errore %lu" -#: utils/adt/varlena.c:1500 +#: utils/adt/varlena.c:1501 #, c-format msgid "could not compare Unicode strings: %m" msgstr "comparazione delle stringhe Unicode fallita: %m" -#: utils/adt/varlena.c:1555 utils/adt/varlena.c:2141 +#: utils/adt/varlena.c:1556 utils/adt/varlena.c:2149 #, c-format msgid "collation failed: %s" msgstr "ordinamento fallito: %s" -#: utils/adt/varlena.c:2356 +#: utils/adt/varlena.c:2367 #, c-format msgid "sort key generation failed: %s" msgstr "generazione della chiave di ordinamento fallita: %s" -#: utils/adt/varlena.c:3013 utils/adt/varlena.c:3044 utils/adt/varlena.c:3079 -#: utils/adt/varlena.c:3115 +#: utils/adt/varlena.c:3030 utils/adt/varlena.c:3061 utils/adt/varlena.c:3096 +#: utils/adt/varlena.c:3132 #, c-format msgid "index %d out of valid range, 0..%d" msgstr "l'indice %d è fuori dall'intervallo valido, 0..%d" -#: utils/adt/varlena.c:4044 +#: utils/adt/varlena.c:4063 #, c-format msgid "field position must be greater than zero" msgstr "il campo deve essere maggiore di zero" -#: utils/adt/varlena.c:4934 +#: utils/adt/varlena.c:4953 #, c-format msgid "unterminated format() type specifier" msgstr "specifica di tipo per format() non terminata" -#: utils/adt/varlena.c:4935 utils/adt/varlena.c:5069 utils/adt/varlena.c:5190 +#: utils/adt/varlena.c:4954 utils/adt/varlena.c:5088 utils/adt/varlena.c:5209 #, c-format msgid "For a single \"%%\" use \"%%%%\"." msgstr "Per un singolo \"%%\" usa \"%%%%\"." -#: utils/adt/varlena.c:5067 utils/adt/varlena.c:5188 +#: utils/adt/varlena.c:5086 utils/adt/varlena.c:5207 #, c-format msgid "unrecognized format() type specifier \"%c\"" msgstr "specifica di tipo per format() \"%c\" non riconosciuta" -#: utils/adt/varlena.c:5080 utils/adt/varlena.c:5137 +#: utils/adt/varlena.c:5099 utils/adt/varlena.c:5156 #, c-format msgid "too few arguments for format()" msgstr "numero di argomenti non sufficiente per format()" -#: utils/adt/varlena.c:5232 utils/adt/varlena.c:5415 +#: utils/adt/varlena.c:5251 utils/adt/varlena.c:5434 #, c-format msgid "number is out of range" msgstr "il numero è al di fuori dell'intervallo consentito" -#: utils/adt/varlena.c:5296 utils/adt/varlena.c:5324 +#: utils/adt/varlena.c:5315 utils/adt/varlena.c:5343 #, c-format msgid "format specifies argument 0, but arguments are numbered from 1" msgstr "il formato specifica l'argomento 0, ma gli argomenti sono numerati a partire da 1" -#: utils/adt/varlena.c:5317 +#: utils/adt/varlena.c:5336 #, c-format msgid "width argument position must be ended by \"$\"" msgstr "la posizione dell'argomento di larghezza deve finire con \"$\"" -#: utils/adt/varlena.c:5362 +#: utils/adt/varlena.c:5381 #, c-format msgid "null values cannot be formatted as an SQL identifier" msgstr "i valori vuoti non possono essere formattati come un identificativo SQL" @@ -21915,17 +21916,17 @@ msgstr "nessuna funzione di output disponibile per il tipo %s" msgid "cached plan must not change result type" msgstr "il cached plan non deve cambiare il tipo del risultato" -#: utils/cache/relcache.c:5791 +#: utils/cache/relcache.c:5795 #, c-format msgid "could not create relation-cache initialization file \"%s\": %m" msgstr "creazione del file di inizializzazione della cache delle relazioni \"%s\" fallita: %m" -#: utils/cache/relcache.c:5793 +#: utils/cache/relcache.c:5797 #, c-format msgid "Continuing anyway, but there's something wrong." msgstr "Proseguo in ogni caso, ma c'è qualcosa che non funziona." -#: utils/cache/relcache.c:6063 +#: utils/cache/relcache.c:6067 #, c-format msgid "could not remove cache file \"%s\": %m" msgstr "rimozione del file di cache \"%s\" fallita: %m" @@ -21970,12 +21971,12 @@ msgstr "fsync del file della mappa delle relazioni \"%s\" fallito: %m" msgid "could not close relation mapping file \"%s\": %m" msgstr "chiusura del file della mappa delle relazioni \"%s\" fallita: %m" -#: utils/cache/typcache.c:1223 +#: utils/cache/typcache.c:1273 #, c-format msgid "type %s is not composite" msgstr "il tipo %s non è composito" -#: utils/cache/typcache.c:1237 +#: utils/cache/typcache.c:1287 #, c-format msgid "record type has not been registered" msgstr "il tipo del record non è stato registrato" @@ -22222,163 +22223,163 @@ msgstr "non è stato fornito nessun alias colonna" msgid "could not determine row description for function returning record" msgstr "non è stato possibile determinare la descrizione della riga per la funzione che restituisce record" -#: utils/init/miscinit.c:122 +#: utils/init/miscinit.c:123 #, c-format msgid "could not change directory to \"%s\": %m" msgstr "spostamento nella directory \"%s\" fallito: %m" -#: utils/init/miscinit.c:450 utils/misc/guc.c:6115 +#: utils/init/miscinit.c:451 utils/misc/guc.c:6126 #, c-format msgid "cannot set parameter \"%s\" within security-restricted operation" msgstr "non è possibile impostare il parametro \"%s\" nell'ambito di operazioni a sicurezza ristretta" -#: utils/init/miscinit.c:511 +#: utils/init/miscinit.c:512 #, c-format msgid "role with OID %u does not exist" msgstr "il ruolo con OID %u non esiste" -#: utils/init/miscinit.c:541 +#: utils/init/miscinit.c:542 #, c-format msgid "role \"%s\" is not permitted to log in" msgstr "al ruolo \"%s\" non è consentito effettuare il login" -#: utils/init/miscinit.c:559 +#: utils/init/miscinit.c:560 #, c-format msgid "too many connections for role \"%s\"" msgstr "troppe connessioni per il ruolo \"%s\"" -#: utils/init/miscinit.c:619 +#: utils/init/miscinit.c:620 #, c-format msgid "permission denied to set session authorization" msgstr "permesso di impostare l'autorizzazione della sessione negato" -#: utils/init/miscinit.c:702 +#: utils/init/miscinit.c:703 #, c-format msgid "invalid role OID: %u" msgstr "OID del ruolo non valido: %u" -#: utils/init/miscinit.c:756 +#: utils/init/miscinit.c:757 #, c-format msgid "database system is shut down" msgstr "il database è stato arrestato" -#: utils/init/miscinit.c:843 +#: utils/init/miscinit.c:844 #, c-format msgid "could not create lock file \"%s\": %m" msgstr "creazione del file di lock \"%s\" fallita: %m" -#: utils/init/miscinit.c:857 +#: utils/init/miscinit.c:858 #, c-format msgid "could not open lock file \"%s\": %m" msgstr "apertura del file di lock \"%s\" fallita: %m" -#: utils/init/miscinit.c:864 +#: utils/init/miscinit.c:865 #, c-format msgid "could not read lock file \"%s\": %m" msgstr "lettura dal file di lock \"%s\" fallita: %m" -#: utils/init/miscinit.c:873 +#: utils/init/miscinit.c:874 #, c-format msgid "lock file \"%s\" is empty" msgstr "il file di lock \"%s\" è vuoto" -#: utils/init/miscinit.c:874 +#: utils/init/miscinit.c:875 #, c-format msgid "Either another server is starting, or the lock file is the remnant of a previous server startup crash." msgstr "O c'è un altro server in avvio, oppure il file di lock è rimasto da un precedente crash in avvio del server." -#: utils/init/miscinit.c:921 +#: utils/init/miscinit.c:922 #, c-format msgid "lock file \"%s\" already exists" msgstr "il file di lock \"%s\" esiste già" -#: utils/init/miscinit.c:925 +#: utils/init/miscinit.c:926 #, c-format msgid "Is another postgres (PID %d) running in data directory \"%s\"?" msgstr "C'è un altro postgres (PID %d) in esecuzione nella directory dei dati \"%s\"?" -#: utils/init/miscinit.c:927 +#: utils/init/miscinit.c:928 #, c-format msgid "Is another postmaster (PID %d) running in data directory \"%s\"?" msgstr "C'è un altro postmaster (PID %d) in esecuzione nella directory dei dati \"%s\"?" -#: utils/init/miscinit.c:930 +#: utils/init/miscinit.c:931 #, c-format msgid "Is another postgres (PID %d) using socket file \"%s\"?" msgstr "C'è un altro postgres (PID %d) che sta usando il file socket \"%s\"?" -#: utils/init/miscinit.c:932 +#: utils/init/miscinit.c:933 #, c-format msgid "Is another postmaster (PID %d) using socket file \"%s\"?" msgstr "C'è un altro postmaster (PID %d) che sta usando il file socket \"%s\"?" -#: utils/init/miscinit.c:968 +#: utils/init/miscinit.c:969 #, c-format msgid "pre-existing shared memory block (key %lu, ID %lu) is still in use" msgstr "il blocco di memoria condivisa preesistente (key %lu, ID %lu) è ancora in uso" -#: utils/init/miscinit.c:971 +#: utils/init/miscinit.c:972 #, c-format msgid "If you're sure there are no old server processes still running, remove the shared memory block or just delete the file \"%s\"." msgstr "Se sei sicuro che non ci siano vecchi processi server ancora in esecuzione, rimuovi il blocco di memoria condivisa, o semplicemente cancella il file \"%s\"." -#: utils/init/miscinit.c:987 +#: utils/init/miscinit.c:988 #, c-format msgid "could not remove old lock file \"%s\": %m" msgstr "rimozione del vecchio file di lock \"%s\" fallita: %m" -#: utils/init/miscinit.c:989 +#: utils/init/miscinit.c:990 #, c-format msgid "The file seems accidentally left over, but it could not be removed. Please remove the file by hand and try again." msgstr "Sembra che il file sia stato abbandonato accidentalmente, ma non può essere rimosso. Per favore rimuovilo manualmente e riprova." -#: utils/init/miscinit.c:1026 utils/init/miscinit.c:1040 -#: utils/init/miscinit.c:1051 +#: utils/init/miscinit.c:1027 utils/init/miscinit.c:1041 +#: utils/init/miscinit.c:1052 #, c-format msgid "could not write lock file \"%s\": %m" msgstr "scrittura del file di lock \"%s\" fallita: %m" -#: utils/init/miscinit.c:1182 utils/init/miscinit.c:1318 utils/misc/guc.c:8920 +#: utils/init/miscinit.c:1184 utils/init/miscinit.c:1327 utils/misc/guc.c:8931 #, c-format msgid "could not read from file \"%s\": %m" msgstr "lettura dal file \"%s\" fallita: %m" -#: utils/init/miscinit.c:1306 +#: utils/init/miscinit.c:1315 #, c-format msgid "could not open file \"%s\": %m; continuing anyway" msgstr "apertura del file \"%s\" fallita: %m; si procederà comunque" -#: utils/init/miscinit.c:1331 +#: utils/init/miscinit.c:1340 #, c-format msgid "lock file \"%s\" contains wrong PID: %ld instead of %ld" msgstr "il file di lock \"%s\" contiene il PID sbagliato: %ld invece di %ld" -#: utils/init/miscinit.c:1370 utils/init/miscinit.c:1386 +#: utils/init/miscinit.c:1379 utils/init/miscinit.c:1395 #, c-format msgid "\"%s\" is not a valid data directory" msgstr "\"%s\" non è una directory di dati valida" -#: utils/init/miscinit.c:1372 +#: utils/init/miscinit.c:1381 #, c-format msgid "File \"%s\" is missing." msgstr "Il file \"%s\" è mancante." -#: utils/init/miscinit.c:1388 +#: utils/init/miscinit.c:1397 #, c-format msgid "File \"%s\" does not contain valid data." msgstr "Il file \"%s\" non contiene dati validi." -#: utils/init/miscinit.c:1390 +#: utils/init/miscinit.c:1399 #, c-format msgid "You might need to initdb." msgstr "Potrebbe essere necessario eseguire initdb." -#: utils/init/miscinit.c:1398 +#: utils/init/miscinit.c:1407 #, c-format msgid "The data directory was initialized by PostgreSQL version %s, which is not compatible with this version %s." msgstr "La directory dei dati è stata inizializzata da PostgreSQL versione %s, che non è compatibile con questa versione %s." -#: utils/init/miscinit.c:1469 +#: utils/init/miscinit.c:1474 #, c-format msgid "loaded library \"%s\"" msgstr "libreria \"%s\" caricata" @@ -22533,6 +22534,11 @@ msgstr "ID di codifica %d non previsto per il set di caratteri ISO 8859" msgid "unexpected encoding ID %d for WIN character sets" msgstr "ID di codifica %d non previsto per il set di caratteri WIN" +#: utils/mb/encnames.c:473 +#, c-format +msgid "encoding \"%s\" not supported by ICU" +msgstr "codifica \"%s\" non supportata da ICU" + #: utils/mb/encnames.c:572 #, c-format msgid "encoding name too long" @@ -23424,7 +23430,7 @@ msgid "Sets the maximum number of predicate-locked pages and tuples per relation msgstr "Imposta il numero di pagine e tuple bloccate da lock di predicato per relazione." #: utils/misc/guc.c:2203 -msgid "If more than this total of pages and tuples in the same relation are locked by a connection, those locks are replaced by a relation level lock." +msgid "If more than this total of pages and tuples in the same relation are locked by a connection, those locks are replaced by a relation-level lock." msgstr "Se più di questo numero totale di pagine e tuple nella stessa relazione sono bloccate da una connessione, questi lock verranno sostituiti da un lock a livello di relazione." #: utils/misc/guc.c:2213 @@ -23432,7 +23438,7 @@ msgid "Sets the maximum number of predicate-locked tuples per page." msgstr "Imposta il numero massimo di tuple bloccate da lock di predicato per pagina." #: utils/misc/guc.c:2214 -msgid "If more than this number of tuples on the same page are locked by a connection, those locks are replaced by a page level lock." +msgid "If more than this number of tuples on the same page are locked by a connection, those locks are replaced by a page-level lock." msgstr "Se più di questo numero di tuple nella stessa pagina sono bloccate da una connessione, questi lock verranno sostituiti da un lock a livello di pagina." #: utils/misc/guc.c:2224 @@ -23996,145 +24002,149 @@ msgid "Sets the curve to use for ECDH." msgstr "Imposta la curva da usare per l'ECHD." #: utils/misc/guc.c:3611 +msgid "Location of the SSL DH parameters file." +msgstr "Posizione del file di parametri SSH DH." + +#: utils/misc/guc.c:3622 msgid "Sets the application name to be reported in statistics and logs." msgstr "Imposta il nome dell'applicazione da riportare nelle statistiche e nei log." -#: utils/misc/guc.c:3622 +#: utils/misc/guc.c:3633 msgid "Sets the name of the cluster, which is included in the process title." msgstr "Imposta il nome del cluster, che è incluso nel titolo del processo." -#: utils/misc/guc.c:3633 +#: utils/misc/guc.c:3644 msgid "Sets the WAL resource managers for which WAL consistency checks are done." msgstr "Imposta i gestori di risorse WAL per cui vengono effettuati i controlli di consistenza WAL." -#: utils/misc/guc.c:3634 +#: utils/misc/guc.c:3645 msgid "Full-page images will be logged for all data blocks and cross-checked against the results of WAL replay." msgstr "Immagini di pagine complete verranno loggate per tutti i blocchi di dati e comparati con i risultati del replay del WAL." -#: utils/misc/guc.c:3653 +#: utils/misc/guc.c:3664 msgid "Sets whether \"\\'\" is allowed in string literals." msgstr "Imposta se \"\\'\" è consentito nei letterali stringa." -#: utils/misc/guc.c:3663 +#: utils/misc/guc.c:3674 msgid "Sets the output format for bytea." msgstr "Imposta il formato di output di bytea." -#: utils/misc/guc.c:3673 +#: utils/misc/guc.c:3684 msgid "Sets the message levels that are sent to the client." msgstr "Imposta quali livelli di messaggi sono inviati al client" -#: utils/misc/guc.c:3674 utils/misc/guc.c:3727 utils/misc/guc.c:3738 -#: utils/misc/guc.c:3804 +#: utils/misc/guc.c:3685 utils/misc/guc.c:3738 utils/misc/guc.c:3749 +#: utils/misc/guc.c:3815 msgid "Each level includes all the levels that follow it. The later the level, the fewer messages are sent." msgstr "Ogni livello include tutti i livelli che lo seguono. Più avanti il livello, meno messaggi sono inviati." -#: utils/misc/guc.c:3684 +#: utils/misc/guc.c:3695 msgid "Enables the planner to use constraints to optimize queries." msgstr "Permette al planner di usare i vincoli per ottimizzare le query." -#: utils/misc/guc.c:3685 +#: utils/misc/guc.c:3696 msgid "Table scans will be skipped if their constraints guarantee that no rows match the query." msgstr "La scansioni delle tabelle saranno evitate se i loro vincoli garantiscono che nessuna riga corrisponda con la query." -#: utils/misc/guc.c:3695 +#: utils/misc/guc.c:3706 msgid "Sets the transaction isolation level of each new transaction." msgstr "Imposta il livello di isolamento predefinito per ogni nuova transazione." -#: utils/misc/guc.c:3705 +#: utils/misc/guc.c:3716 msgid "Sets the display format for interval values." msgstr "Imposta il formato di visualizzazione per intervalli." -#: utils/misc/guc.c:3716 +#: utils/misc/guc.c:3727 msgid "Sets the verbosity of logged messages." msgstr "Imposta la prolissità dei messaggi registrati." -#: utils/misc/guc.c:3726 +#: utils/misc/guc.c:3737 msgid "Sets the message levels that are logged." msgstr "Imposta i livelli dei messaggi registrati." -#: utils/misc/guc.c:3737 +#: utils/misc/guc.c:3748 msgid "Causes all statements generating error at or above this level to be logged." msgstr "Fa in modo che tutti gli eventi che generano errore a questo livello o a un livello superiore siano registrati nel log." -#: utils/misc/guc.c:3748 +#: utils/misc/guc.c:3759 msgid "Sets the type of statements logged." msgstr "Imposta il tipo di istruzioni registrato nel log." -#: utils/misc/guc.c:3758 +#: utils/misc/guc.c:3769 msgid "Sets the syslog \"facility\" to be used when syslog enabled." msgstr "Imposta la \"facility\" da usare quando syslog è abilitato." -#: utils/misc/guc.c:3773 +#: utils/misc/guc.c:3784 msgid "Sets the session's behavior for triggers and rewrite rules." msgstr "Imposta il comportamento delle sessioni per i trigger e le regole di riscrittura." -#: utils/misc/guc.c:3783 +#: utils/misc/guc.c:3794 msgid "Sets the current transaction's synchronization level." msgstr "Imposta il livello di sincronizzazione della transazione corrente." -#: utils/misc/guc.c:3793 +#: utils/misc/guc.c:3804 msgid "Allows archiving of WAL files using archive_command." msgstr "Consente l'archiviazione dei file WAL con l'uso di archive_command." -#: utils/misc/guc.c:3803 +#: utils/misc/guc.c:3814 msgid "Enables logging of recovery-related debugging information." msgstr "Abilita il logging di informazioni di debug relative al recupero." -#: utils/misc/guc.c:3819 +#: utils/misc/guc.c:3830 msgid "Collects function-level statistics on database activity." msgstr "Raccogli statistiche al livello di funzioni sull'attività del database." -#: utils/misc/guc.c:3829 +#: utils/misc/guc.c:3840 msgid "Set the level of information written to the WAL." msgstr "Imposta il livello delle informazioni scritte nel WAL." -#: utils/misc/guc.c:3839 +#: utils/misc/guc.c:3850 msgid "Selects the dynamic shared memory implementation used." msgstr "Seleziona l'implementazione di memoria dinamica condivisa utilizzata." -#: utils/misc/guc.c:3849 +#: utils/misc/guc.c:3860 msgid "Selects the method used for forcing WAL updates to disk." msgstr "Seleziona il metodo usato per forzare aggiornamenti WAL su disco." -#: utils/misc/guc.c:3859 +#: utils/misc/guc.c:3870 msgid "Sets how binary values are to be encoded in XML." msgstr "imposta come i valori binari devono essere codificati nel formato XML." -#: utils/misc/guc.c:3869 +#: utils/misc/guc.c:3880 msgid "Sets whether XML data in implicit parsing and serialization operations is to be considered as documents or content fragments." msgstr "Imposta se qualunque dato XML nelle operazioni di parsing e serializzazione implicite debba essere considerato come un documento o frammento di un contenuto." -#: utils/misc/guc.c:3880 +#: utils/misc/guc.c:3891 msgid "Use of huge pages on Linux." msgstr "Uso delle pagine huge su Linux." -#: utils/misc/guc.c:3890 +#: utils/misc/guc.c:3901 msgid "Forces use of parallel query facilities." msgstr "Forza l'uso delle query parallele." -#: utils/misc/guc.c:3891 +#: utils/misc/guc.c:3902 msgid "If possible, run query using a parallel worker and with parallel restrictions." msgstr "Se possibile, effettua le query usando worker paralleli e con restrizioni di parallelismo." -#: utils/misc/guc.c:3900 +#: utils/misc/guc.c:3911 msgid "Encrypt passwords." msgstr "Cripta le password." -#: utils/misc/guc.c:3901 +#: utils/misc/guc.c:3912 msgid "When a password is specified in CREATE USER or ALTER USER without writing either ENCRYPTED or UNENCRYPTED, this parameter determines whether the password is to be encrypted." msgstr "Quando si indica una password in CREATE USER o ALTER USER senza indicare ENCRYPTED o UNENCRYPTED, questo parametro determina se la password debba essere criptata o meno." -#: utils/misc/guc.c:4703 +#: utils/misc/guc.c:4714 #, c-format msgid "%s: could not access directory \"%s\": %s\n" msgstr "%s: accesso alla directory \"%s\" fallito: %s\n" -#: utils/misc/guc.c:4708 +#: utils/misc/guc.c:4719 #, c-format msgid "Run initdb or pg_basebackup to initialize a PostgreSQL data directory.\n" msgstr "Esegui initdb o pg_basebackup per inizializzare una directory di dati PostgreSQL.\n" -#: utils/misc/guc.c:4728 +#: utils/misc/guc.c:4739 #, c-format msgid "" "%s does not know where to find the server configuration file.\n" @@ -24143,12 +24153,12 @@ msgstr "" "%s non sa dove trovare il file di configurazione del server.\n" "Devi specificare le opzioni --config-file o -D, oppure impostare la variabile d'ambiente PGDATA.\n" -#: utils/misc/guc.c:4747 +#: utils/misc/guc.c:4758 #, c-format msgid "%s: could not access the server configuration file \"%s\": %s\n" msgstr "%s: accesso al file di configurazione del server \"%s\" fallito: %s\n" -#: utils/misc/guc.c:4773 +#: utils/misc/guc.c:4784 #, c-format msgid "" "%s does not know where to find the database system data.\n" @@ -24157,7 +24167,7 @@ msgstr "" "%s non sa dove trovare i dati di sistema del database.\n" "Possono essere specificati come \"data_directory\" in \"%s\", oppure dall'opzione -D, oppure dalla variabile d'ambiente PGDATA.\n" -#: utils/misc/guc.c:4821 +#: utils/misc/guc.c:4832 #, c-format msgid "" "%s does not know where to find the \"hba\" configuration file.\n" @@ -24166,7 +24176,7 @@ msgstr "" "%s non sa dove trovare il file di configurazione \"hba\".\n" "Può essere specificato come \"hba_file\" in \"%s\", oppure dall'opzione -D, oppure dalla variabile d'ambiente PGDATA.\n" -#: utils/misc/guc.c:4844 +#: utils/misc/guc.c:4855 #, c-format msgid "" "%s does not know where to find the \"ident\" configuration file.\n" @@ -24175,145 +24185,145 @@ msgstr "" "%s non sa dove trovare il file di configurazione \"ident\".\n" "Può essere specificato come \"ident_file\" in \"%s\", oppure dall'opzione -D, oppure dalla variabile d'ambiente PGDATA.\n" -#: utils/misc/guc.c:5518 utils/misc/guc.c:5565 +#: utils/misc/guc.c:5529 utils/misc/guc.c:5576 msgid "Value exceeds integer range." msgstr "Il valore non rientra nel limite possibile per gli interi." -#: utils/misc/guc.c:5788 +#: utils/misc/guc.c:5799 #, c-format msgid "parameter \"%s\" requires a numeric value" msgstr "il parametro \"%s\" richiede un valore numerico" -#: utils/misc/guc.c:5797 +#: utils/misc/guc.c:5808 #, c-format msgid "%g is outside the valid range for parameter \"%s\" (%g .. %g)" msgstr "%g non è compreso nell'intervallo di validità del il parametro \"%s\" (%g .. %g)" -#: utils/misc/guc.c:5950 utils/misc/guc.c:7296 +#: utils/misc/guc.c:5961 utils/misc/guc.c:7307 #, c-format msgid "cannot set parameters during a parallel operation" msgstr "non è possibile impostare parametri durante un'operazione parallela" -#: utils/misc/guc.c:5957 utils/misc/guc.c:6708 utils/misc/guc.c:6761 -#: utils/misc/guc.c:7124 utils/misc/guc.c:7883 utils/misc/guc.c:8051 -#: utils/misc/guc.c:9727 +#: utils/misc/guc.c:5968 utils/misc/guc.c:6719 utils/misc/guc.c:6772 +#: utils/misc/guc.c:7135 utils/misc/guc.c:7894 utils/misc/guc.c:8062 +#: utils/misc/guc.c:9731 #, c-format msgid "unrecognized configuration parameter \"%s\"" msgstr "parametro di configurazione \"%s\" sconosciuto" -#: utils/misc/guc.c:5972 utils/misc/guc.c:7136 +#: utils/misc/guc.c:5983 utils/misc/guc.c:7147 #, c-format msgid "parameter \"%s\" cannot be changed" msgstr "il parametro \"%s\" non può essere cambiato" -#: utils/misc/guc.c:5995 utils/misc/guc.c:6188 utils/misc/guc.c:6278 -#: utils/misc/guc.c:6368 utils/misc/guc.c:6476 utils/misc/guc.c:6571 +#: utils/misc/guc.c:6006 utils/misc/guc.c:6199 utils/misc/guc.c:6289 +#: utils/misc/guc.c:6379 utils/misc/guc.c:6487 utils/misc/guc.c:6582 #: guc-file.l:350 #, c-format msgid "parameter \"%s\" cannot be changed without restarting the server" msgstr "il parametro \"%s\" non può essere cambiato senza riavviare il server" -#: utils/misc/guc.c:6005 +#: utils/misc/guc.c:6016 #, c-format msgid "parameter \"%s\" cannot be changed now" msgstr "il parametro \"%s\" non può essere cambiato ora" -#: utils/misc/guc.c:6023 utils/misc/guc.c:6069 utils/misc/guc.c:9743 +#: utils/misc/guc.c:6034 utils/misc/guc.c:6080 utils/misc/guc.c:9747 #, c-format msgid "permission denied to set parameter \"%s\"" msgstr "permesso di impostare il parametro \"%s\" negato" -#: utils/misc/guc.c:6059 +#: utils/misc/guc.c:6070 #, c-format msgid "parameter \"%s\" cannot be set after connection start" msgstr "il parametro \"%s\" non può essere impostato dopo l'avvio della connessione" -#: utils/misc/guc.c:6107 +#: utils/misc/guc.c:6118 #, c-format msgid "cannot set parameter \"%s\" within security-definer function" msgstr "il parametro \"%s\" non può essere impostato da una funzione che ha i privilegi del creatore" -#: utils/misc/guc.c:6716 utils/misc/guc.c:6766 utils/misc/guc.c:8058 +#: utils/misc/guc.c:6727 utils/misc/guc.c:6777 utils/misc/guc.c:8069 #, c-format msgid "must be superuser or a member of pg_read_all_settings to examine \"%s\"" msgstr "occorre essere un superutente o un membro di pg_read_all_settings per esaminare \"%s\"" -#: utils/misc/guc.c:6833 +#: utils/misc/guc.c:6844 #, c-format msgid "SET %s takes only one argument" msgstr "SET %s accetta un unico argomento" -#: utils/misc/guc.c:7084 +#: utils/misc/guc.c:7095 #, c-format msgid "must be superuser to execute ALTER SYSTEM command" msgstr "solo un superutente può eseguire il comando ALTER SYSTEM" -#: utils/misc/guc.c:7169 +#: utils/misc/guc.c:7180 #, c-format msgid "parameter value for ALTER SYSTEM must not contain a newline" msgstr "il valore del parametro di ALTER SYSTEM non può contenere un \"a capo\"" -#: utils/misc/guc.c:7214 +#: utils/misc/guc.c:7225 #, c-format msgid "could not parse contents of file \"%s\"" msgstr "non è possibile analizzare il contenuto del file \"%s\"" -#: utils/misc/guc.c:7372 +#: utils/misc/guc.c:7383 #, c-format msgid "SET LOCAL TRANSACTION SNAPSHOT is not implemented" msgstr "SET LOCAL TRANSACTION SNAPSHOT non è implementato" -#: utils/misc/guc.c:7456 +#: utils/misc/guc.c:7467 #, c-format msgid "SET requires parameter name" msgstr "SET richiede il nome del parametro" -#: utils/misc/guc.c:7580 +#: utils/misc/guc.c:7591 #, c-format msgid "attempt to redefine parameter \"%s\"" msgstr "tentativo di ridefinire il parametro \"%s\"" -#: utils/misc/guc.c:9360 +#: utils/misc/guc.c:9364 #, c-format msgid "parameter \"%s\" could not be set" msgstr "il parametro \"%s\" non può essere impostato" -#: utils/misc/guc.c:9447 +#: utils/misc/guc.c:9451 #, c-format msgid "could not parse setting for parameter \"%s\"" msgstr "non è stato possibile interpretare l'impostazione del parametro \"%s\"" -#: utils/misc/guc.c:9805 utils/misc/guc.c:9839 +#: utils/misc/guc.c:9809 utils/misc/guc.c:9843 #, c-format msgid "invalid value for parameter \"%s\": %d" msgstr "valore non valido per il parametro \"%s\": %d" -#: utils/misc/guc.c:9873 +#: utils/misc/guc.c:9877 #, c-format msgid "invalid value for parameter \"%s\": %g" msgstr "valore non valido per il parametro \"%s\": %g" -#: utils/misc/guc.c:10143 +#: utils/misc/guc.c:10147 #, c-format msgid "\"temp_buffers\" cannot be changed after any temporary tables have been accessed in the session." msgstr "\"temp_buffers\" non può essere modificato dopo che la sessione ha utilizzato qualsiasi tabella temporanea." -#: utils/misc/guc.c:10155 +#: utils/misc/guc.c:10159 #, c-format msgid "Bonjour is not supported by this build" msgstr "Bonjour non è supportato in questo binario" -#: utils/misc/guc.c:10168 +#: utils/misc/guc.c:10172 #, c-format msgid "SSL is not supported by this build" msgstr "SSL non è supportato in questo binario" -#: utils/misc/guc.c:10180 +#: utils/misc/guc.c:10184 #, c-format msgid "Cannot enable parameter when \"log_statement_stats\" is true." msgstr "Non è possibile abilitare il parametro quando \"log_statement_stats\" è abilitato." -#: utils/misc/guc.c:10192 +#: utils/misc/guc.c:10196 #, c-format msgid "Cannot enable \"log_statement_stats\" when \"log_parser_stats\", \"log_planner_stats\", or \"log_executor_stats\" is true." msgstr "Non è possibile abilitare \"log_statement_stats\" quando \"log_parser_stats\", \"log_planner_stats\" o \"log_executor_stats\" sono abilitati." @@ -24334,6 +24344,11 @@ msgstr "la tupla che la query specifica e il tipo restituito dalla funzione non msgid "calculated CRC checksum does not match value stored in file" msgstr "il CRC di controllo calcolato non combacia con quello nel file" +#: utils/misc/pg_rusage.c:64 +#, c-format +msgid "CPU: user: %d.%02d s, system: %d.%02d s, elapsed: %d.%02d s" +msgstr "CPU: utente: %d.%02d s, sistema: %d.%02d s, passati: %d.%02d s" + #: utils/misc/rls.c:128 #, c-format msgid "query would be affected by row-level security policy for table \"%s\"" @@ -24472,17 +24487,17 @@ msgstr "lettura del blocco %ld dal file temporaneo fallita: %m" msgid "cannot have more than %d runs for an external sort" msgstr "non è possibile avere più di %d esecuzioni per un sort esterno" -#: utils/sort/tuplesort.c:4141 +#: utils/sort/tuplesort.c:4146 #, c-format msgid "could not create unique index \"%s\"" msgstr "creazione dell'indice univoco \"%s\" fallita" -#: utils/sort/tuplesort.c:4143 +#: utils/sort/tuplesort.c:4148 #, c-format msgid "Key %s is duplicated." msgstr "La chiave %s è duplicata." -#: utils/sort/tuplesort.c:4144 +#: utils/sort/tuplesort.c:4149 #, c-format msgid "Duplicate keys exist." msgstr "Esistono chiavi duplicate." @@ -24508,348 +24523,349 @@ msgstr "lettura dal file temporaneo tuplestore fallita: %m" msgid "could not write to tuplestore temporary file: %m" msgstr "scrittura nel file temporaneo tuplestore fallita: %m" -#: utils/time/snapmgr.c:618 +#: utils/time/snapmgr.c:622 #, c-format msgid "The source transaction is not running anymore." msgstr "La transazione di origine non è più in esecuzione." # translator: %s represents an SQL statement name -#: utils/time/snapmgr.c:1198 +#: utils/time/snapmgr.c:1200 #, c-format msgid "cannot export a snapshot from a subtransaction" msgstr "non è possibile esportare uno snapshot da una sotto-transazione" -#: utils/time/snapmgr.c:1347 utils/time/snapmgr.c:1352 -#: utils/time/snapmgr.c:1357 utils/time/snapmgr.c:1372 -#: utils/time/snapmgr.c:1377 utils/time/snapmgr.c:1382 -#: utils/time/snapmgr.c:1481 utils/time/snapmgr.c:1497 -#: utils/time/snapmgr.c:1522 +#: utils/time/snapmgr.c:1359 utils/time/snapmgr.c:1364 +#: utils/time/snapmgr.c:1369 utils/time/snapmgr.c:1384 +#: utils/time/snapmgr.c:1389 utils/time/snapmgr.c:1394 +#: utils/time/snapmgr.c:1409 utils/time/snapmgr.c:1414 +#: utils/time/snapmgr.c:1419 utils/time/snapmgr.c:1519 +#: utils/time/snapmgr.c:1535 utils/time/snapmgr.c:1560 #, c-format msgid "invalid snapshot data in file \"%s\"" msgstr "dati dello snapshot non validi nel file \"%s\"" -#: utils/time/snapmgr.c:1419 +#: utils/time/snapmgr.c:1456 #, c-format msgid "SET TRANSACTION SNAPSHOT must be called before any query" msgstr "SET TRANSACTION SNAPSHOT dev'essere invocato prima di qualunque query" -#: utils/time/snapmgr.c:1428 +#: utils/time/snapmgr.c:1465 #, c-format msgid "a snapshot-importing transaction must have isolation level SERIALIZABLE or REPEATABLE READ" msgstr "una transazione che importa uno snapshot deve avere livello di isolamento SERIALIZABLE o REPEATABLE READ" -#: utils/time/snapmgr.c:1437 utils/time/snapmgr.c:1446 +#: utils/time/snapmgr.c:1474 utils/time/snapmgr.c:1483 #, c-format msgid "invalid snapshot identifier: \"%s\"" msgstr "identificativo di snapshot non valido: \"%s\"" -#: utils/time/snapmgr.c:1535 +#: utils/time/snapmgr.c:1573 #, c-format msgid "a serializable transaction cannot import a snapshot from a non-serializable transaction" msgstr "una transazione serializzabile non può importare uno snapshot da una transazione non serializzabile" -#: utils/time/snapmgr.c:1539 +#: utils/time/snapmgr.c:1577 #, c-format msgid "a non-read-only serializable transaction cannot import a snapshot from a read-only transaction" msgstr "una transazione non di sola lettura non può importare uno snapshot da una transazione di sola lettura" -#: utils/time/snapmgr.c:1554 +#: utils/time/snapmgr.c:1592 #, c-format msgid "cannot import a snapshot from a different database" msgstr "non è possibile importare uno snapshot da un database diverso" -#: gram.y:1008 +#: gram.y:1002 #, c-format msgid "UNENCRYPTED PASSWORD is no longer supported" msgstr "UNENCRYPTED PASSWORD non è più supportato" -#: gram.y:1009 +#: gram.y:1003 #, c-format msgid "Remove UNENCRYPTED to store the password in encrypted form instead." msgstr "Rimuovi UNENCRYPTED per memorizzare la password in formato criptato." -#: gram.y:1071 +#: gram.y:1065 #, c-format msgid "unrecognized role option \"%s\"" msgstr "opzione di ruolo \"%s\" sconosciuta" -#: gram.y:1345 gram.y:1360 +#: gram.y:1312 gram.y:1327 #, c-format msgid "CREATE SCHEMA IF NOT EXISTS cannot include schema elements" msgstr "CREATE SCHEMA IF NOT EXISTS non può includere elementi dello schema" -#: gram.y:1505 +#: gram.y:1472 #, c-format msgid "current database cannot be changed" msgstr "il database corrente non può essere cambiato" -#: gram.y:1629 +#: gram.y:1596 #, c-format msgid "time zone interval must be HOUR or HOUR TO MINUTE" msgstr "l'intervallo della time zone deve essere HOUR o HOUR TO MINUTE" -#: gram.y:2644 +#: gram.y:2612 #, c-format msgid "sequence option \"%s\" not supported here" msgstr "l'opzione della sequenza \"%s\" non è supportata qui" -#: gram.y:2857 gram.y:2886 +#: gram.y:2835 gram.y:2864 #, c-format msgid "STDIN/STDOUT not allowed with PROGRAM" msgstr "STDIN/STDOUT non sono consentiti con PROGRAM" -#: gram.y:3196 gram.y:3203 gram.y:11092 gram.y:11100 +#: gram.y:3174 gram.y:3181 gram.y:11072 gram.y:11080 #, c-format msgid "GLOBAL is deprecated in temporary table creation" msgstr "GLOBAL è deprecato nella creazione di tabelle temporanee" -#: gram.y:5128 +#: gram.y:5118 #, c-format msgid "unrecognized row security option \"%s\"" msgstr "opzione di sicurezza riga \"%s\" non riconosciuta" -#: gram.y:5129 +#: gram.y:5119 #, c-format msgid "Only PERMISSIVE or RESTRICTIVE policies are supported currently." msgstr "Solo le regole PERMISSIVE o RESTRICTIVE sono attualmente supportate." -#: gram.y:5237 +#: gram.y:5227 msgid "duplicate trigger events specified" msgstr "evento del trigger specificato più volte" -#: gram.y:5380 +#: gram.y:5370 #, c-format msgid "conflicting constraint properties" msgstr "proprietà del vincolo in conflitto" -#: gram.y:5486 +#: gram.y:5476 #, c-format msgid "CREATE ASSERTION is not yet implemented" msgstr "CREATE ASSERTION non è stata ancora implementata" -#: gram.y:5501 +#: gram.y:5491 #, c-format msgid "DROP ASSERTION is not yet implemented" msgstr "DROP ASSERTION non è stata ancora implementata" -#: gram.y:5881 +#: gram.y:5871 #, c-format msgid "RECHECK is no longer required" msgstr "RECHECK non è più richiesto" -#: gram.y:5882 +#: gram.y:5872 #, c-format msgid "Update your data type." msgstr "Aggiorna il tuo tipo di dato." -#: gram.y:7525 +#: gram.y:7515 #, c-format msgid "aggregates cannot have output arguments" msgstr "gli aggregati non possono avere argomenti di output" -#: gram.y:9667 gram.y:9685 +#: gram.y:9647 gram.y:9665 #, c-format msgid "WITH CHECK OPTION not supported on recursive views" msgstr "WITH CHECK OPTION non supportato su viste ricorsive" -#: gram.y:10218 +#: gram.y:10198 #, c-format msgid "unrecognized VACUUM option \"%s\"" msgstr "opzione di VACUUM \"%s\" sconosciuta" -#: gram.y:11200 +#: gram.y:11180 #, c-format msgid "LIMIT #,# syntax is not supported" msgstr "La sintassi LIMIT #,# non è supportata" -#: gram.y:11201 +#: gram.y:11181 #, c-format msgid "Use separate LIMIT and OFFSET clauses." msgstr "Usa separatamente le clausole LIMIT ed OFFSET." -#: gram.y:11482 gram.y:11507 +#: gram.y:11462 gram.y:11487 #, c-format msgid "VALUES in FROM must have an alias" msgstr "VALUES nel FROM deve avere un alias" -#: gram.y:11483 gram.y:11508 +#: gram.y:11463 gram.y:11488 #, c-format msgid "For example, FROM (VALUES ...) [AS] foo." msgstr "Per esempio, FROM (VALUES ...) [AS] foo." -#: gram.y:11488 gram.y:11513 +#: gram.y:11468 gram.y:11493 #, c-format msgid "subquery in FROM must have an alias" msgstr "la sottoquery in FROM deve avere un alias" -#: gram.y:11489 gram.y:11514 +#: gram.y:11469 gram.y:11494 #, c-format msgid "For example, FROM (SELECT ...) [AS] foo." msgstr "Per esempio, FROM (SELECT ...) [AS] foo." -#: gram.y:11968 +#: gram.y:11948 #, c-format msgid "only one DEFAULT value is allowed" msgstr "solo un valore DEFAULT è consentito" -#: gram.y:11977 +#: gram.y:11957 #, c-format msgid "only one PATH value per column is allowed" msgstr "solo un valore PATH per colonna è consentito" -#: gram.y:11986 +#: gram.y:11966 #, c-format msgid "conflicting or redundant NULL / NOT NULL declarations for column \"%s\"" msgstr "dichiarazioni NULL / NOT NULL in conflitto o ridondanti per la colonna \"%s\"" -#: gram.y:11995 +#: gram.y:11975 #, c-format msgid "unrecognized column option \"%s\"" msgstr "opzione di colonna \"%s\" non riconosciuta" -#: gram.y:12249 +#: gram.y:12229 #, c-format msgid "precision for type float must be at least 1 bit" msgstr "la precisione per il tipo float dev'essere di almeno un bit" -#: gram.y:12258 +#: gram.y:12238 #, c-format msgid "precision for type float must be less than 54 bits" msgstr "la precisione per il tipo float dev'essere inferiore a 54 bit" -#: gram.y:12749 +#: gram.y:12729 #, c-format msgid "wrong number of parameters on left side of OVERLAPS expression" msgstr "numero errato di parametri a sinistra dell'espressione OVERLAPS" -#: gram.y:12754 +#: gram.y:12734 #, c-format msgid "wrong number of parameters on right side of OVERLAPS expression" msgstr "numero errato di parametri a destra dell'espressione OVERLAPS" -#: gram.y:12929 +#: gram.y:12909 #, c-format msgid "UNIQUE predicate is not yet implemented" msgstr "il predicato UNIQUE non è stato ancora implementato" -#: gram.y:13276 +#: gram.y:13256 #, c-format msgid "cannot use multiple ORDER BY clauses with WITHIN GROUP" msgstr "non si può usare più di una clausola ORDER BY con WITHIN GROUOP" -#: gram.y:13281 +#: gram.y:13261 #, c-format msgid "cannot use DISTINCT with WITHIN GROUP" msgstr "non si può usare DISTINCT con WITHIN GROUP" -#: gram.y:13286 +#: gram.y:13266 #, c-format msgid "cannot use VARIADIC with WITHIN GROUP" msgstr "non si può usare VARIADIC con WITHIN GROUP" -#: gram.y:13712 +#: gram.y:13692 #, c-format msgid "RANGE PRECEDING is only supported with UNBOUNDED" msgstr "RANGE PRECEDING è supportato solo con UNBOUNDED" -#: gram.y:13718 +#: gram.y:13698 #, c-format msgid "RANGE FOLLOWING is only supported with UNBOUNDED" msgstr "RANGE FOLLOWING è supportato solo con UNBOUNDED" -#: gram.y:13745 gram.y:13768 +#: gram.y:13725 gram.y:13748 #, c-format msgid "frame start cannot be UNBOUNDED FOLLOWING" msgstr "l'inizio della finestra non può essere UNBOUNDED FOLLOWING" -#: gram.y:13750 +#: gram.y:13730 #, c-format msgid "frame starting from following row cannot end with current row" msgstr "una finestra che inizia dalla riga seguente non può terminare alla riga corrente" -#: gram.y:13773 +#: gram.y:13753 #, c-format msgid "frame end cannot be UNBOUNDED PRECEDING" msgstr "la fine della finestra non può essere UNBOUNDED PRECEDING" -#: gram.y:13779 +#: gram.y:13759 #, c-format msgid "frame starting from current row cannot have preceding rows" msgstr "una finestra che inizia dalla riga corrente non può avere righe precedenti" -#: gram.y:13786 +#: gram.y:13766 #, c-format msgid "frame starting from following row cannot have preceding rows" msgstr "una finestra che inizia dalla riga seguente non può avere righe precedenti" -#: gram.y:14421 +#: gram.y:14401 #, c-format msgid "type modifier cannot have parameter name" msgstr "un modificatore di tipo non può avere un nome di parametro" -#: gram.y:14427 +#: gram.y:14407 #, c-format msgid "type modifier cannot have ORDER BY" msgstr "un modificatore di tipo non può avere ORDER BY" -#: gram.y:14491 gram.y:14497 +#: gram.y:14471 gram.y:14477 #, c-format msgid "%s cannot be used as a role name here" msgstr "%s non può essere usato come nome di ruolo qui" -#: gram.y:15159 gram.y:15348 +#: gram.y:15139 gram.y:15328 msgid "improper use of \"*\"" msgstr "uso improprio di \"*\"" -#: gram.y:15412 +#: gram.y:15392 #, c-format msgid "an ordered-set aggregate with a VARIADIC direct argument must have one VARIADIC aggregated argument of the same data type" msgstr "un aggregato su insiemi ordinati con un argomento diretto VARIADIC deve avere un argomento aggregato VARIADIC sullo stesso tipo" -#: gram.y:15449 +#: gram.y:15429 #, c-format msgid "multiple ORDER BY clauses not allowed" msgstr "non è possibile avere più di una clausola ORDER BY" -#: gram.y:15460 +#: gram.y:15440 #, c-format msgid "multiple OFFSET clauses not allowed" msgstr "non è possibile avere più di una clausola OFFSET" -#: gram.y:15469 +#: gram.y:15449 #, c-format msgid "multiple LIMIT clauses not allowed" msgstr "non è possibile avere più di una clausola LIMIT" -#: gram.y:15478 +#: gram.y:15458 #, c-format msgid "multiple WITH clauses not allowed" msgstr "non è possibile avere più di una clausola WITH" -#: gram.y:15682 +#: gram.y:15662 #, c-format msgid "OUT and INOUT arguments aren't allowed in TABLE functions" msgstr "gli argomenti OUT e INOUT non sono permessi nelle funzioni TABLE" -#: gram.y:15783 +#: gram.y:15763 #, c-format msgid "multiple COLLATE clauses not allowed" msgstr "non è possibile avere più di una clausola COLLATE" #. translator: %s is CHECK, UNIQUE, or similar -#: gram.y:15821 gram.y:15834 +#: gram.y:15801 gram.y:15814 #, c-format msgid "%s constraints cannot be marked DEFERRABLE" msgstr "un vincolo %s non può essere marcato DEFERRABLE" #. translator: %s is CHECK, UNIQUE, or similar -#: gram.y:15847 +#: gram.y:15827 #, c-format msgid "%s constraints cannot be marked NOT VALID" msgstr "un vincolo %s non può essere marcato NOT VALID" #. translator: %s is CHECK, UNIQUE, or similar -#: gram.y:15860 +#: gram.y:15840 #, c-format msgid "%s constraints cannot be marked NO INHERIT" msgstr "un vincolo %s non può essere marcato NO INHERIT" @@ -24914,16 +24930,16 @@ msgstr "troppi errori di sintassi, file \"%s\" abbandonato" msgid "could not open configuration directory \"%s\": %m" msgstr "apertura della directory di configurazione \"%s\" fallita: %m" -#: repl_gram.y:320 repl_gram.y:352 +#: repl_gram.y:330 repl_gram.y:362 #, c-format msgid "invalid timeline %u" msgstr "timeline %u non valida" -#: repl_scanner.l:125 +#: repl_scanner.l:126 msgid "invalid streaming start location" msgstr "posizione di avvio dello streaming non valida" -#: repl_scanner.l:176 scan.l:670 +#: repl_scanner.l:177 scan.l:670 msgid "unterminated quoted string" msgstr "stringa tra virgolette non terminata" diff --git a/src/backend/po/ko.po b/src/backend/po/ko.po index c5225ffa19..92ce1a8233 100644 --- a/src/backend/po/ko.po +++ b/src/backend/po/ko.po @@ -3,10 +3,10 @@ # msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 9.6\n" +"Project-Id-Version: postgres (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-03-02 11:23+0900\n" -"PO-Revision-Date: 2017-03-02 11:28:26+0900\n" +"POT-Creation-Date: 2017-09-19 09:51+0900\n" +"PO-Revision-Date: 2017-09-19 10:26+0900\n" "Last-Translator: Ioseph Kim \n" "Language-Team: Korean Team \n" "Language: ko\n" @@ -15,65 +15,48 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=1; plural=0;\n" -#: ../common/config_info.c:131 ../common/config_info.c:139 -#: ../common/config_info.c:147 ../common/config_info.c:155 -#: ../common/config_info.c:163 ../common/config_info.c:171 -#: ../common/config_info.c:179 ../common/config_info.c:187 -#: ../common/config_info.c:195 +#: ../common/config_info.c:130 ../common/config_info.c:138 +#: ../common/config_info.c:146 ../common/config_info.c:154 +#: ../common/config_info.c:162 ../common/config_info.c:170 +#: ../common/config_info.c:178 ../common/config_info.c:186 +#: ../common/config_info.c:194 msgid "not recorded" msgstr "기록되어 있지 않음" -#: ../common/controldata_utils.c:52 commands/copy.c:2833 -#: commands/extension.c:3141 utils/adt/genfile.c:134 +#: ../common/controldata_utils.c:57 commands/copy.c:3124 +#: commands/extension.c:3330 utils/adt/genfile.c:135 #, c-format msgid "could not open file \"%s\" for reading: %m" msgstr "\"%s\" 파일 일기 모드로 열기 실패: %m" -#: ../common/controldata_utils.c:56 +#: ../common/controldata_utils.c:61 #, c-format msgid "%s: could not open file \"%s\" for reading: %s\n" msgstr "%s: \"%s\" 파일 읽기 모드로 열기 실패: %s\n" -#: ../common/controldata_utils.c:66 access/transam/timeline.c:346 -#: access/transam/xlog.c:3220 access/transam/xlog.c:10423 -#: access/transam/xlog.c:10436 access/transam/xlog.c:10828 -#: access/transam/xlog.c:10871 access/transam/xlog.c:10910 -#: access/transam/xlog.c:10953 access/transam/xlogfuncs.c:665 -#: access/transam/xlogfuncs.c:684 commands/extension.c:3151 -#: replication/logical/origin.c:665 replication/logical/origin.c:695 -#: replication/logical/reorderbuffer.c:3099 replication/walsender.c:499 -#: storage/file/copydir.c:176 utils/adt/genfile.c:151 +#: ../common/controldata_utils.c:71 access/transam/timeline.c:348 +#: access/transam/xlog.c:3384 access/transam/xlog.c:10787 +#: access/transam/xlog.c:10800 access/transam/xlog.c:11196 +#: access/transam/xlog.c:11239 access/transam/xlog.c:11278 +#: access/transam/xlog.c:11321 access/transam/xlogfuncs.c:668 +#: access/transam/xlogfuncs.c:687 commands/extension.c:3340 libpq/hba.c:499 +#: replication/logical/origin.c:681 replication/logical/origin.c:711 +#: replication/logical/reorderbuffer.c:3064 replication/walsender.c:506 +#: storage/file/copydir.c:178 utils/adt/genfile.c:152 utils/adt/misc.c:924 #, c-format msgid "could not read file \"%s\": %m" msgstr "\"%s\" 파일을 읽을 수 없음: %m" -#: ../common/controldata_utils.c:69 +#: ../common/controldata_utils.c:74 #, c-format msgid "%s: could not read file \"%s\": %s\n" msgstr "%s: \"%s\" 파일을 읽을 수 없습니다: %s\n" -#: ../common/controldata_utils.c:86 -msgid "calculated CRC checksum does not match value stored in file" -msgstr "계산된 CRC 체크섬 값이 파일에 저장된 값과 다름" - -#: ../common/controldata_utils.c:88 -#, c-format -msgid "" -"WARNING: Calculated CRC checksum does not match value stored in file.\n" -"Either the file is corrupt, or it has a different layout than this program\n" -"is expecting. The results below are untrustworthy.\n" -"\n" -msgstr "" -"경고: 계산된 CRC 체크섬값이 파일에 있는 값과 틀립니다.\n" -"이 경우는 파일이 손상되었거나, 이 프로그램과 컨트롤 파일의 버전이 틀린\n" -"경우입니다. 결과값들은 믿지 못할 값들이 출력될 수 있습니다.\n" -"\n" - -#: ../common/controldata_utils.c:97 +#: ../common/controldata_utils.c:95 msgid "byte ordering mismatch" msgstr "바이트 순서 불일치" -#: ../common/controldata_utils.c:99 +#: ../common/controldata_utils.c:97 #, c-format msgid "" "WARNING: possible byte ordering mismatch\n" @@ -126,7 +109,9 @@ msgstr "pclose 실패: %s" #: ../common/fe_memutils.c:35 ../common/fe_memutils.c:75 #: ../common/fe_memutils.c:98 ../common/psprintf.c:181 ../port/path.c:632 -#: ../port/path.c:670 ../port/path.c:687 +#: ../port/path.c:670 ../port/path.c:687 utils/misc/ps_status.c:171 +#: utils/misc/ps_status.c:179 utils/misc/ps_status.c:209 +#: utils/misc/ps_status.c:217 #, c-format msgid "out of memory\n" msgstr "메모리 부족\n" @@ -136,6 +121,37 @@ msgstr "메모리 부족\n" msgid "cannot duplicate null pointer (internal error)\n" msgstr "null 포인터를 중복할 수 없음 (내부 오류)\n" +#: ../common/file_utils.c:82 ../common/file_utils.c:186 +#, c-format +msgid "%s: could not stat file \"%s\": %s\n" +msgstr "%s: \"%s\" 파일을 상태 정보를 읽을 수 없습니다: %s\n" + +#: ../common/file_utils.c:162 +#, c-format +msgid "%s: could not open directory \"%s\": %s\n" +msgstr "%s: \"%s\" 디렉터리 열 수 없음: %s\n" + +#: ../common/file_utils.c:198 +#, c-format +msgid "%s: could not read directory \"%s\": %s\n" +msgstr "%s: \"%s\" 디렉터리를 읽을 수 없음: %s\n" + +#: ../common/file_utils.c:231 ../common/file_utils.c:291 +#: ../common/file_utils.c:367 +#, c-format +msgid "%s: could not open file \"%s\": %s\n" +msgstr "%s: \"%s\" 파일을 열 수 없음: %s\n" + +#: ../common/file_utils.c:304 ../common/file_utils.c:376 +#, c-format +msgid "%s: could not fsync file \"%s\": %s\n" +msgstr "%s: \"%s\" 파일 fsync 실패: %s\n" + +#: ../common/file_utils.c:387 +#, c-format +msgid "%s: could not rename file \"%s\" to \"%s\": %s\n" +msgstr "%s: \"%s\" 파일을 \"%s\" 파일로 이름을 바꿀 수 없음: %s\n" + #: ../common/pgfnames.c:45 #, c-format msgid "could not open directory \"%s\": %s\n" @@ -152,31 +168,32 @@ msgid "could not close directory \"%s\": %s\n" msgstr "\"%s\" 디렉터리를 닫을 수 없음: %s\n" #: ../common/psprintf.c:179 ../port/path.c:630 ../port/path.c:668 -#: ../port/path.c:685 access/transam/twophase.c:1262 -#: access/transam/xlog.c:6108 lib/stringinfo.c:258 libpq/auth.c:850 -#: libpq/auth.c:1213 libpq/auth.c:1281 libpq/auth.c:1797 -#: postmaster/bgworker.c:289 postmaster/bgworker.c:796 -#: postmaster/postmaster.c:2335 postmaster/postmaster.c:2366 -#: postmaster/postmaster.c:3899 postmaster/postmaster.c:4589 -#: postmaster/postmaster.c:4664 postmaster/postmaster.c:5339 -#: postmaster/postmaster.c:5603 -#: replication/libpqwalreceiver/libpqwalreceiver.c:143 -#: replication/logical/logical.c:168 storage/buffer/localbuf.c:436 -#: storage/file/fd.c:736 storage/file/fd.c:1164 storage/file/fd.c:1282 -#: storage/file/fd.c:1993 storage/ipc/procarray.c:1061 -#: storage/ipc/procarray.c:1547 storage/ipc/procarray.c:1554 -#: storage/ipc/procarray.c:1968 storage/ipc/procarray.c:2571 -#: utils/adt/formatting.c:1522 utils/adt/formatting.c:1642 -#: utils/adt/formatting.c:1763 utils/adt/pg_locale.c:463 -#: utils/adt/pg_locale.c:647 utils/adt/regexp.c:219 utils/adt/varlena.c:4440 -#: utils/adt/varlena.c:4461 utils/fmgr/dfmgr.c:216 utils/hash/dynahash.c:429 -#: utils/hash/dynahash.c:535 utils/hash/dynahash.c:1047 utils/mb/mbutils.c:376 -#: utils/mb/mbutils.c:709 utils/misc/guc.c:3888 utils/misc/guc.c:3904 -#: utils/misc/guc.c:3917 utils/misc/guc.c:6863 utils/misc/tzparser.c:468 -#: utils/mmgr/aset.c:509 utils/mmgr/mcxt.c:767 utils/mmgr/mcxt.c:802 -#: utils/mmgr/mcxt.c:839 utils/mmgr/mcxt.c:876 utils/mmgr/mcxt.c:910 -#: utils/mmgr/mcxt.c:939 utils/mmgr/mcxt.c:973 utils/mmgr/mcxt.c:1055 -#: utils/mmgr/mcxt.c:1089 utils/mmgr/mcxt.c:1138 +#: ../port/path.c:685 access/transam/twophase.c:1306 +#: access/transam/xlog.c:6355 lib/stringinfo.c:258 libpq/auth.c:1126 +#: libpq/auth.c:1492 libpq/auth.c:1560 libpq/auth.c:2076 +#: postmaster/bgworker.c:337 postmaster/bgworker.c:908 +#: postmaster/postmaster.c:2391 postmaster/postmaster.c:2413 +#: postmaster/postmaster.c:3975 postmaster/postmaster.c:4683 +#: postmaster/postmaster.c:4758 postmaster/postmaster.c:5436 +#: postmaster/postmaster.c:5773 +#: replication/libpqwalreceiver/libpqwalreceiver.c:256 +#: replication/logical/logical.c:170 storage/buffer/localbuf.c:436 +#: storage/file/fd.c:773 storage/file/fd.c:1201 storage/file/fd.c:1319 +#: storage/file/fd.c:2044 storage/ipc/procarray.c:1058 +#: storage/ipc/procarray.c:1546 storage/ipc/procarray.c:1553 +#: storage/ipc/procarray.c:1970 storage/ipc/procarray.c:2581 +#: utils/adt/formatting.c:1579 utils/adt/formatting.c:1703 +#: utils/adt/formatting.c:1828 utils/adt/pg_locale.c:468 +#: utils/adt/pg_locale.c:652 utils/adt/regexp.c:219 utils/adt/varlena.c:4585 +#: utils/adt/varlena.c:4606 utils/fmgr/dfmgr.c:221 utils/hash/dynahash.c:444 +#: utils/hash/dynahash.c:553 utils/hash/dynahash.c:1065 utils/mb/mbutils.c:376 +#: utils/mb/mbutils.c:709 utils/misc/guc.c:3998 utils/misc/guc.c:4014 +#: utils/misc/guc.c:4027 utils/misc/guc.c:6976 utils/misc/tzparser.c:468 +#: utils/mmgr/aset.c:404 utils/mmgr/dsa.c:713 utils/mmgr/dsa.c:795 +#: utils/mmgr/mcxt.c:725 utils/mmgr/mcxt.c:760 utils/mmgr/mcxt.c:797 +#: utils/mmgr/mcxt.c:834 utils/mmgr/mcxt.c:868 utils/mmgr/mcxt.c:897 +#: utils/mmgr/mcxt.c:931 utils/mmgr/mcxt.c:982 utils/mmgr/mcxt.c:1016 +#: utils/mmgr/mcxt.c:1050 #, c-format msgid "out of memory" msgstr "메모리 부족" @@ -236,67 +253,67 @@ msgstr "\"%s\" 파일이나 디렉터리 상태를 확인할 수 없음: %s\n" msgid "could not remove file or directory \"%s\": %s\n" msgstr "\"%s\" 디렉터리를 삭제할 수 없음: %s\n" -#: ../common/username.c:45 +# # nonun 부분 begin +#: ../common/saslprep.c:1090 +#, c-format +msgid "password too long" +msgstr "비밀번호가 너무 깁니다." + +#: ../common/username.c:43 #, c-format msgid "could not look up effective user ID %ld: %s" msgstr "%ld UID를 찾을 수 없음: %s" -#: ../common/username.c:47 libpq/auth.c:1744 +#: ../common/username.c:45 libpq/auth.c:2023 msgid "user does not exist" msgstr "사용자 없음" -#: ../common/username.c:62 +#: ../common/username.c:60 #, c-format msgid "user name lookup failure: error code %lu" msgstr "사용자 이름 찾기 실패: 오류 코드 %lu" -#: ../common/wait_error.c:47 +#: ../common/wait_error.c:45 #, c-format msgid "command not executable" msgstr "명령을 실행할 수 없음" -#: ../common/wait_error.c:51 +#: ../common/wait_error.c:49 #, c-format msgid "command not found" msgstr "해당 명령어 없음" -#: ../common/wait_error.c:56 +#: ../common/wait_error.c:54 #, c-format msgid "child process exited with exit code %d" msgstr "하위 프로그램은 %d 코드로 마쳤습니다" -#: ../common/wait_error.c:63 +#: ../common/wait_error.c:61 #, c-format msgid "child process was terminated by exception 0x%X" msgstr "0x%X 예외처리로 하위 프로세스가 종료되었습니다" -#: ../common/wait_error.c:73 +#: ../common/wait_error.c:71 #, c-format msgid "child process was terminated by signal %s" msgstr "%s 시그널이 감지되어 하위 프로세스가 종료되었습니다" -#: ../common/wait_error.c:77 +#: ../common/wait_error.c:75 #, c-format msgid "child process was terminated by signal %d" msgstr "하위 프로그램은 %d 신호에 의해서 종료되었습니다" -#: ../common/wait_error.c:82 +#: ../common/wait_error.c:80 #, c-format msgid "child process exited with unrecognized status %d" msgstr "하위 프로그램 프로그램은 예상치 못한 %d 상태값으로 종료되었습니다" -#: ../port/chklocale.c:293 +#: ../port/chklocale.c:288 #, c-format msgid "could not determine encoding for codeset \"%s\"" msgstr "\"%s\" 코드 세트 환경에 사용할 인코딩을 결정할 수 없습니다" -#: ../port/chklocale.c:294 ../port/chklocale.c:423 -#: postmaster/postmaster.c:4868 -#, c-format -msgid "Please report this to ." -msgstr "이 내용을 주소로 보고하십시오." - -#: ../port/chklocale.c:415 ../port/chklocale.c:421 +#: ../port/chklocale.c:409 ../port/chklocale.c:415 #, c-format msgid "could not determine encoding for locale \"%s\": codeset is \"%s\"" msgstr "" @@ -322,25 +339,25 @@ msgstr "\"%s\" 파일의 정션을 구할 수 없음: %s" msgid "could not get junction for \"%s\": %s\n" msgstr "\"%s\" 파일의 정션을 구할 수 없음: %s\n" -#: ../port/open.c:112 +#: ../port/open.c:111 #, c-format msgid "could not open file \"%s\": %s" msgstr "\"%s\" 파일을 열 수 없음: %s" -#: ../port/open.c:113 +#: ../port/open.c:112 msgid "lock violation" msgstr "잠금 위반" -#: ../port/open.c:113 +#: ../port/open.c:112 msgid "sharing violation" msgstr "공유 위반" -#: ../port/open.c:114 +#: ../port/open.c:113 #, c-format msgid "Continuing to retry for 30 seconds." msgstr "30초 동안 계속해서 다시 시도합니다." -#: ../port/open.c:115 +#: ../port/open.c:114 #, c-format msgid "" "You might have antivirus, backup, or similar software interfering with the " @@ -359,175 +376,216 @@ msgstr "현재 작업 디렉터리를 알 수 없음: %s\n" msgid "unrecognized error %d" msgstr "알 수 없는 오류 %d" -#: ../port/win32security.c:68 -#, c-format -msgid "could not open process token: error code %lu\n" -msgstr "프로세스 토큰을 열 수 없음: 오류 코드 %lu\n" - -#: ../port/win32security.c:89 +#: ../port/win32security.c:62 #, c-format msgid "could not get SID for Administrators group: error code %lu\n" msgstr "Administrators 그룹의 SID를 가져올 수 없음: 오류 코드 %lu\n" -#: ../port/win32security.c:99 +#: ../port/win32security.c:72 #, c-format msgid "could not get SID for PowerUsers group: error code %lu\n" msgstr "PowerUsers 그룹의 SID를 가져올 수 없음: 오류 코드 %lu\n" -#: access/brin/brin.c:810 +#: ../port/win32security.c:80 +#, c-format +msgid "could not check access token membership: error code %lu\n" +msgstr "토큰 맴버쉽 접근을 확인 할 수 없음: 오류 코드 %lu\n" + +#: access/brin/brin.c:867 access/brin/brin.c:938 +#, c-format +msgid "block number out of range: %s" +msgstr "블록 번호가 범위를 벗어남: %s" + +#: access/brin/brin.c:890 access/brin/brin.c:961 #, c-format msgid "\"%s\" is not a BRIN index" msgstr "\"%s\" 객체는 BRIN 인덱스가 아닙니다" -#: access/brin/brin.c:826 +#: access/brin/brin.c:906 access/brin/brin.c:977 #, c-format msgid "could not open parent table of index %s" msgstr "%s 인덱스에 대한 상위 테이블을 열 수 없음" -#: access/brin/brin_pageops.c:76 access/brin/brin_pageops.c:362 -#: access/brin/brin_pageops.c:828 +#: access/brin/brin_pageops.c:76 access/brin/brin_pageops.c:358 +#: access/brin/brin_pageops.c:824 access/gin/ginentrypage.c:110 +#: access/gist/gist.c:1363 access/nbtree/nbtinsert.c:577 +#: access/nbtree/nbtsort.c:488 access/spgist/spgdoinsert.c:1933 +#, c-format +msgid "index row size %zu exceeds maximum %zu for index \"%s\"" +msgstr "인덱스 행 크기 %zu이(가) 최대값 %zu(\"%s\" 인덱스)을(를) 초과함" + +#: access/brin/brin_revmap.c:382 access/brin/brin_revmap.c:388 #, c-format -msgid "index row size %lu exceeds maximum %lu for index \"%s\"" -msgstr "인덱스 행 크기 %lu이(가) 최대값 %lu(\"%s\" 인덱스)을(를) 초과함" +msgid "corrupted BRIN index: inconsistent range map" +msgstr "" + +#: access/brin/brin_revmap.c:404 +#, c-format +msgid "leftover placeholder tuple detected in BRIN index \"%s\", deleting" +msgstr "" -#: access/brin/brin_revmap.c:459 +#: access/brin/brin_revmap.c:601 #, c-format msgid "unexpected page type 0x%04X in BRIN index \"%s\" block %u" msgstr "예상치 못한 0x%04X 페이지 타입: \"%s\" BRIN 인덱스 %u 블록" -#: access/brin/brin_validate.c:115 +#: access/brin/brin_validate.c:116 access/gin/ginvalidate.c:149 +#: access/gist/gistvalidate.c:146 access/hash/hashvalidate.c:131 +#: access/nbtree/nbtvalidate.c:101 access/spgist/spgvalidate.c:116 #, c-format msgid "" -"brin operator family \"%s\" contains function %s with invalid support number " -"%d" +"operator family \"%s\" of access method %s contains function %s with invalid " +"support number %d" msgstr "" -"\"%s\" brin 연산자 패밀리에 %s 함수가 잘못된 지원 번호(%d)로 지정되었습니다." +"\"%s\" 연산자 패밀리(접근 방법: %s)에 포함된 %s 함수가 잘못된 지원 번호 %d " +"로 지정되었습니다." -#: access/brin/brin_validate.c:131 +#: access/brin/brin_validate.c:132 access/gin/ginvalidate.c:161 +#: access/gist/gistvalidate.c:158 access/hash/hashvalidate.c:114 +#: access/nbtree/nbtvalidate.c:113 access/spgist/spgvalidate.c:128 #, c-format msgid "" -"brin operator family \"%s\" contains function %s with wrong signature for " -"support number %d" +"operator family \"%s\" of access method %s contains function %s with wrong " +"signature for support number %d" msgstr "" -"\"%s\" brin 연산자 패밀리에 %s 함수가 잘못된 signature 번호(%d)로 지정되었습" -"니다." +"\"%s\" 연산자 패밀리(접근 방법: %s)에 포함된 %s 함수가 잘못된 signature 지원 " +"번호 %d 로 지정되었습니다." -#: access/brin/brin_validate.c:153 +#: access/brin/brin_validate.c:154 access/gin/ginvalidate.c:180 +#: access/gist/gistvalidate.c:178 access/hash/hashvalidate.c:152 +#: access/nbtree/nbtvalidate.c:133 access/spgist/spgvalidate.c:147 #, c-format msgid "" -"brin operator family \"%s\" contains operator %s with invalid strategy " -"number %d" +"operator family \"%s\" of access method %s contains operator %s with invalid " +"strategy number %d" msgstr "" -"\"%s\" brin 연산자 패밀리에 %s 연산자가 잘못된 전략 번호(%d)로 지정되었습니" -"다." +"\"%s\" 연산자 패밀리(접근 방법: %s)에 포함된 %s 연산자의 %d 번 전략 번호가 잘" +"못되었습니다." -#: access/brin/brin_validate.c:182 +#: access/brin/brin_validate.c:183 access/gin/ginvalidate.c:193 +#: access/hash/hashvalidate.c:165 access/nbtree/nbtvalidate.c:146 +#: access/spgist/spgvalidate.c:160 #, c-format msgid "" -"brin operator family \"%s\" contains invalid ORDER BY specification for " -"operator %s" +"operator family \"%s\" of access method %s contains invalid ORDER BY " +"specification for operator %s" msgstr "" -"\"%s\" brin 연산자 패밀리에 %s 연산자가 잘못된 ORDER BY 명세를 사용합니다." +"\"%s\" 연산자 패밀리(접근 방법: %s)에 %s 연산자가 잘못된 ORDER BY 명세를 사용" +"합니다." -#: access/brin/brin_validate.c:195 +#: access/brin/brin_validate.c:196 access/gin/ginvalidate.c:206 +#: access/gist/gistvalidate.c:226 access/hash/hashvalidate.c:178 +#: access/nbtree/nbtvalidate.c:159 access/spgist/spgvalidate.c:173 #, c-format -msgid "brin operator family \"%s\" contains operator %s with wrong signature" -msgstr "\"%s\" brin 연산자 패밀리에 %s 연산자가 잘못된 signature를 사용합니다." +msgid "" +"operator family \"%s\" of access method %s contains operator %s with wrong " +"signature" +msgstr "" +"\"%s\" 연산자 패밀리(접근 방법: %s)에 %s 연산자가 잘못된 기호를 사용합니다." -#: access/brin/brin_validate.c:233 +#: access/brin/brin_validate.c:234 access/hash/hashvalidate.c:218 +#: access/nbtree/nbtvalidate.c:201 access/spgist/spgvalidate.c:201 #, c-format -msgid "brin operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "\"%s\" brin 연산자 패밀리에는 %s, %s 자료형용 연산자가 없습니다" +msgid "" +"operator family \"%s\" of access method %s is missing operator(s) for types " +"%s and %s" +msgstr "" +"\"%s\" 연산자 패밀리(접근 방법: %s)에는 %s, %s 자료형용 연산자가 없습니다" -#: access/brin/brin_validate.c:243 +#: access/brin/brin_validate.c:244 #, c-format msgid "" -"brin operator family \"%s\" is missing support function(s) for types %s and " -"%s" -msgstr "\"%s\" brin 연산자 패밀리에는 %s, %s 자료형용으로 쓸 함수가 없습니다" +"operator family \"%s\" of access method %s is missing support function(s) " +"for types %s and %s" +msgstr "" +"\"%s\" 연산자 패밀리(접근 방법: %s)에는 %s, %s 자료형용으로 쓸 함수가 없습니" +"다" -#: access/brin/brin_validate.c:256 +#: access/brin/brin_validate.c:257 access/hash/hashvalidate.c:232 +#: access/nbtree/nbtvalidate.c:225 access/spgist/spgvalidate.c:234 #, c-format -msgid "brin operator class \"%s\" is missing operator(s)" -msgstr "\"%s\" brin 연산자 클래스에 연산자가 빠졌습니다" +msgid "operator class \"%s\" of access method %s is missing operator(s)" +msgstr "\"%s\" 연산자 클래스(접근 방법: %s)에 연산자가 빠졌습니다" -#: access/brin/brin_validate.c:267 +#: access/brin/brin_validate.c:268 access/gin/ginvalidate.c:247 +#: access/gist/gistvalidate.c:265 #, c-format -msgid "brin operator class \"%s\" is missing support function %d" -msgstr "\"%s\" brin 연산자 클래스에 %d 지원 함수가 없음" +msgid "" +"operator class \"%s\" of access method %s is missing support function %d" +msgstr "\"%s\" 연산자 클래스(접근 방법: %s)에 %d 지원 함수가 빠졌습니다." -#: access/common/heaptuple.c:708 access/common/heaptuple.c:1339 +#: access/common/heaptuple.c:708 access/common/heaptuple.c:1405 #, c-format msgid "number of columns (%d) exceeds limit (%d)" -msgstr "열 수(%d)가 최대값(%d)을 초과했습니다" +msgstr "칼럼 개수(%d)가 최대값(%d)을 초과했습니다" #: access/common/indextuple.c:60 #, c-format msgid "number of index columns (%d) exceeds limit (%d)" -msgstr "인덱스 열 수(%d)가 최대값(%d)을 초과했습니다" +msgstr "인덱스 칼럼 개수(%d)가 최대값(%d)을 초과했습니다" -#: access/common/indextuple.c:176 access/spgist/spgutils.c:642 +#: access/common/indextuple.c:176 access/spgist/spgutils.c:647 #, c-format msgid "index row requires %zu bytes, maximum size is %zu" msgstr "인덱스 행(row)은 %zu 바이트를 필요로 함, 최대 크기는 %zu" -#: access/common/printtup.c:292 tcop/fastpath.c:182 tcop/fastpath.c:544 -#: tcop/postgres.c:1719 +#: access/common/printtup.c:290 tcop/fastpath.c:182 tcop/fastpath.c:532 +#: tcop/postgres.c:1726 #, c-format msgid "unsupported format code: %d" msgstr "지원하지 않는 포맷 코드: %d" -#: access/common/reloptions.c:493 +#: access/common/reloptions.c:540 #, c-format msgid "user-defined relation parameter types limit exceeded" msgstr "사용자 정의 관계 매개 변수 형식 제한을 초과함" -#: access/common/reloptions.c:775 +#: access/common/reloptions.c:821 #, c-format msgid "RESET must not include values for parameters" msgstr "매개 변수의 값으로 RESET은 올 수 없음" -#: access/common/reloptions.c:808 +#: access/common/reloptions.c:854 #, c-format msgid "unrecognized parameter namespace \"%s\"" msgstr "\"%s\" 매개 변수 네임스페이스를 인식할 수 없음" -#: access/common/reloptions.c:1050 parser/parse_clause.c:281 +#: access/common/reloptions.c:1094 parser/parse_clause.c:270 #, c-format msgid "unrecognized parameter \"%s\"" msgstr "알 수 없는 환경 설정 이름입니다 \"%s\"" -#: access/common/reloptions.c:1080 +#: access/common/reloptions.c:1124 #, c-format msgid "parameter \"%s\" specified more than once" msgstr "\"%s\" 매개 변수가 여러 번 지정됨" -#: access/common/reloptions.c:1096 +#: access/common/reloptions.c:1140 #, c-format msgid "invalid value for boolean option \"%s\": %s" msgstr "\"%s\" 부울 옵션 값이 잘못됨: %s" -#: access/common/reloptions.c:1108 +#: access/common/reloptions.c:1152 #, c-format msgid "invalid value for integer option \"%s\": %s" msgstr "\"%s\" 정수 옵션 값이 잘못됨: %s" -#: access/common/reloptions.c:1114 access/common/reloptions.c:1134 +#: access/common/reloptions.c:1158 access/common/reloptions.c:1178 #, c-format msgid "value %s out of bounds for option \"%s\"" msgstr "값 %s은(는) \"%s\" 옵션 범위를 벗어남" -#: access/common/reloptions.c:1116 +#: access/common/reloptions.c:1160 #, c-format msgid "Valid values are between \"%d\" and \"%d\"." msgstr "유효한 값은 \"%d\"에서 \"%d\" 사이입니다." -#: access/common/reloptions.c:1128 +#: access/common/reloptions.c:1172 #, c-format msgid "invalid value for floating point option \"%s\": %s" msgstr "\"%s\" 부동 소수점 옵션 값이 잘못됨: %s" -#: access/common/reloptions.c:1136 +#: access/common/reloptions.c:1180 #, c-format msgid "Valid values are between \"%f\" and \"%f\"." msgstr "유효한 값은 \"%f\"에서 \"%f\" 사이입니다." @@ -545,7 +603,7 @@ msgid "" "Number of returned columns (%d) does not match expected column count (%d)." msgstr "반환할 칼럼 수(%d)와 예상되는 칼럼수(%d)가 다릅니다." -#: access/common/tupconvert.c:314 +#: access/common/tupconvert.c:318 #, c-format msgid "" "Attribute \"%s\" of type %s does not match corresponding attribute of type " @@ -553,15 +611,16 @@ msgid "" msgstr "" " \"%s\" 속성(대상 자료형 %s)이 %s 자료형의 속성 가운데 관련된 것이 없습니다" -#: access/common/tupconvert.c:326 +#: access/common/tupconvert.c:330 #, c-format msgid "Attribute \"%s\" of type %s does not exist in type %s." msgstr "\"%s\" 속성(대상 자료형 %s)이 %s 자료형에는 없습니다." -#: access/common/tupdesc.c:635 parser/parse_relation.c:1518 +#: access/common/tupdesc.c:728 parser/parse_clause.c:841 +#: parser/parse_relation.c:1544 #, c-format msgid "column \"%s\" cannot be declared SETOF" -msgstr "\"%s\" 열은 SETOF를 지정할 수 없습니다" +msgstr "\"%s\" 칼럼은 SETOF를 지정할 수 없습니다" #: access/gin/ginbulk.c:44 #, c-format @@ -573,33 +632,26 @@ msgstr "포스팅 목록이 너무 깁니다" msgid "Reduce maintenance_work_mem." msgstr "maintenance_work_mem 설정값을 줄이세요." -#: access/gin/ginentrypage.c:109 access/gist/gist.c:1337 -#: access/nbtree/nbtinsert.c:576 access/nbtree/nbtsort.c:488 -#: access/spgist/spgdoinsert.c:1907 -#, c-format -msgid "index row size %zu exceeds maximum %zu for index \"%s\"" -msgstr "인덱스 행 크기 %zu이(가) 최대값 %zu(\"%s\" 인덱스)을(를) 초과함" - -#: access/gin/ginfast.c:989 access/transam/xlog.c:9858 -#: access/transam/xlog.c:10362 access/transam/xlogfuncs.c:293 -#: access/transam/xlogfuncs.c:320 access/transam/xlogfuncs.c:359 -#: access/transam/xlogfuncs.c:380 access/transam/xlogfuncs.c:401 -#: access/transam/xlogfuncs.c:471 access/transam/xlogfuncs.c:527 +#: access/gin/ginfast.c:991 access/transam/xlog.c:10208 +#: access/transam/xlog.c:10726 access/transam/xlogfuncs.c:296 +#: access/transam/xlogfuncs.c:323 access/transam/xlogfuncs.c:362 +#: access/transam/xlogfuncs.c:383 access/transam/xlogfuncs.c:404 +#: access/transam/xlogfuncs.c:474 access/transam/xlogfuncs.c:530 #, c-format msgid "recovery is in progress" msgstr "복구 작업 진행 중" -#: access/gin/ginfast.c:990 +#: access/gin/ginfast.c:992 #, c-format msgid "GIN pending list cannot be cleaned up during recovery." msgstr "GIN 팬딩 목록은 복구 작업 중에는 정리될 수 없습니다." -#: access/gin/ginfast.c:997 +#: access/gin/ginfast.c:999 #, c-format msgid "\"%s\" is not a GIN index" msgstr "\"%s\" 객체는 GIN 인덱스가 아닙니다" -#: access/gin/ginfast.c:1008 +#: access/gin/ginfast.c:1010 #, c-format msgid "cannot access temporary indexes of other sessions" msgstr "다른 세션의 임시 인덱스는 접근할 수 없음" @@ -616,71 +668,37 @@ msgstr "" msgid "To fix this, do REINDEX INDEX \"%s\"." msgstr "이 문제를 고치려면, 다음 명령을 수행하세요: REINDEX INDEX \"%s\"" -#: access/gin/ginvalidate.c:92 -#, c-format -msgid "" -"gin operator family \"%s\" contains support procedure %s with cross-type " -"registration" -msgstr "" -"\"%s\" gin 연산자 패밀리에 cross-type registration 으로 %s 지원 프로시져가 포" -"함되어 있습니다." - -#: access/gin/ginvalidate.c:148 +#: access/gin/ginutil.c:134 executor/execExpr.c:1780 +#: utils/adt/arrayfuncs.c:3803 utils/adt/arrayfuncs.c:6323 +#: utils/adt/rowtypes.c:927 #, c-format -msgid "" -"gin operator family \"%s\" contains function %s with invalid support number " -"%d" -msgstr "" -"\"%s\" gin 연산자 패밀리에 포함된 %s 함수가 잘못된 지원 번호 %d 로 지정되었습" -"니다." +msgid "could not identify a comparison function for type %s" +msgstr "%s 자료형에서 사용할 비교함수를 찾을 수 없습니다." -#: access/gin/ginvalidate.c:160 +#: access/gin/ginvalidate.c:93 access/gist/gistvalidate.c:93 +#: access/hash/hashvalidate.c:99 access/spgist/spgvalidate.c:93 #, c-format msgid "" -"gin operator family \"%s\" contains function %s with wrong signature for " -"support number %d" +"operator family \"%s\" of access method %s contains support procedure %s " +"with different left and right input types" msgstr "" -"\"%s\" gin 연산자 패밀리에 포함된 %s 함수가 잘못된 signature 지원 번호 %d 로 " -"지정되었습니다." +"\"%s\" 연산자 패밀리(접근 방법: %s)에 서로 다른 양쪽 입력 자료형 인자를 사용" +"할 수 있는 %s 지원 프로시져가 포함되어 있습니다." -#: access/gin/ginvalidate.c:179 +#: access/gin/ginvalidate.c:257 #, c-format msgid "" -"gin operator family \"%s\" contains operator %s with invalid strategy number " +"operator class \"%s\" of access method %s is missing support function %d or " "%d" msgstr "" -"\"%s\" gin 연산자 패밀리에 포함된 %s 연산자의 %d 번 전략 번호가 잘못되었습니" -"다." - -#: access/gin/ginvalidate.c:192 -#, c-format -msgid "" -"gin operator family \"%s\" contains invalid ORDER BY specification for " -"operator %s" -msgstr "" -"\"%s\" gin 연산자 패밀리에 %s 연산자가 잘못된 ORDER BY 명세를 사용합니다." - -#: access/gin/ginvalidate.c:205 -#, c-format -msgid "gin operator family \"%s\" contains operator %s with wrong signature" -msgstr "\"%s\" gin 연산자 패밀리에 %s 연산자가 잘못된 기호를 사용합니다." - -#: access/gin/ginvalidate.c:246 -#, c-format -msgid "gin operator class \"%s\" is missing support function %d" -msgstr "\"%s\" gin 연산자 클래스에 %d 지원 함수가 빠졌습니다." +"\"%s\" 연산자 클래스(접근 방법: %s)에는 %d 또는 %d 지원 함수가 빠졌습니다" -#: access/gin/ginvalidate.c:256 -#, c-format -msgid "gin operator class \"%s\" is missing support function %d or %d" -msgstr "\"%s\" gin 연산자 클래스에는 %d 또는 %d 지원 함수가 빠졌습니다" - -#: access/gist/gist.c:680 access/gist/gistvacuum.c:258 +#: access/gist/gist.c:706 access/gist/gistvacuum.c:258 #, c-format msgid "index \"%s\" contains an inner tuple marked as invalid" msgstr "\"%s\" 인덱스에 잘못된 내부 튜플이 있다고 확인되었습니다." -#: access/gist/gist.c:682 access/gist/gistvacuum.c:260 +#: access/gist/gist.c:708 access/gist/gistvacuum.c:260 #, c-format msgid "" "This is caused by an incomplete page split at crash recovery before " @@ -689,26 +707,26 @@ msgstr "" "이 문제는 PostgreSQL 9.1 버전으로 업그레이드 하기 전에 장애 복구 처리에서 잘" "못된 페이지 분리 때문에 발생했습니다." -#: access/gist/gist.c:683 access/gist/gistutil.c:738 -#: access/gist/gistutil.c:749 access/gist/gistvacuum.c:261 -#: access/hash/hashutil.c:172 access/hash/hashutil.c:183 -#: access/hash/hashutil.c:195 access/hash/hashutil.c:216 -#: access/nbtree/nbtpage.c:518 access/nbtree/nbtpage.c:529 +#: access/gist/gist.c:709 access/gist/gistutil.c:739 +#: access/gist/gistutil.c:750 access/gist/gistvacuum.c:261 +#: access/hash/hashutil.c:241 access/hash/hashutil.c:252 +#: access/hash/hashutil.c:264 access/hash/hashutil.c:285 +#: access/nbtree/nbtpage.c:519 access/nbtree/nbtpage.c:530 #, c-format msgid "Please REINDEX it." msgstr "REINDEX 명령으로 다시 인덱스를 만드세요" -#: access/gist/gistbuild.c:249 +#: access/gist/gistbuild.c:250 #, c-format msgid "invalid value for \"buffering\" option" msgstr "\"buffering\" 옵션 값이 올바르지 않습니다" -#: access/gist/gistbuild.c:250 +#: access/gist/gistbuild.c:251 #, c-format msgid "Valid values are \"on\", \"off\", and \"auto\"." msgstr "유효한 값: \"on\", \"off\", \"auto\"" -#: access/gist/gistbuildbuffers.c:778 utils/sort/logtape.c:209 +#: access/gist/gistbuildbuffers.c:778 utils/sort/logtape.c:231 #, c-format msgid "could not write block %ld of temporary file: %m" msgstr "임시파일의 %ld 블럭을 쓸 수 없음: %m" @@ -716,7 +734,7 @@ msgstr "임시파일의 %ld 블럭을 쓸 수 없음: %m" #: access/gist/gistsplit.c:446 #, c-format msgid "picksplit method for column %d of index \"%s\" failed" -msgstr "%d 열(\"%s\" 인덱스)에 대한 picksplit 메서드 실패" +msgstr "%d 칼럼(\"%s\" 인덱스)에 대한 picksplit 메서드 실패" #: access/gist/gistsplit.c:448 #, c-format @@ -725,355 +743,260 @@ msgid "" "the column as the second one in the CREATE INDEX command." msgstr "" "인덱스가 최적화되지 않았습니다. 최적화하려면 개발자에게 문의하거나, CREATE " -"INDEX 명령에서 해당 열을 두 번째 인덱스로 사용하십시오." +"INDEX 명령에서 해당 칼럼을 두 번째 인덱스로 사용하십시오." -#: access/gist/gistutil.c:735 access/hash/hashutil.c:169 -#: access/nbtree/nbtpage.c:515 +#: access/gist/gistutil.c:736 access/hash/hashutil.c:238 +#: access/nbtree/nbtpage.c:516 #, c-format msgid "index \"%s\" contains unexpected zero page at block %u" msgstr "\"%s\" 인덱스의 %u번째 블럭에서 예상치 않은 zero page가 있습니다" -#: access/gist/gistutil.c:746 access/hash/hashutil.c:180 -#: access/hash/hashutil.c:192 access/nbtree/nbtpage.c:526 +#: access/gist/gistutil.c:747 access/hash/hashutil.c:249 +#: access/hash/hashutil.c:261 access/nbtree/nbtpage.c:527 #, c-format msgid "index \"%s\" contains corrupted page at block %u" msgstr "\"%s\" 인덱스트 %u번째 블럭이 속상되었습니다" -#: access/gist/gistvalidate.c:92 -#, c-format -msgid "" -"gist operator family \"%s\" contains support procedure %s with cross-type " -"registration" -msgstr "" -"\"%s\" gist 연산자 패밀리에 cross-type registration 으로 %s 지원 프로시져가 " -"포함되어 있습니다." - -#: access/gist/gistvalidate.c:145 -#, c-format -msgid "" -"gist operator family \"%s\" contains function %s with invalid support number " -"%d" -msgstr "" -"\"%s\" gist 연산자 패밀리에 포함된 %s 함수가 잘못된 지원 번호 %d 로 지정되었" -"습니다." - -#: access/gist/gistvalidate.c:157 -#, c-format -msgid "" -"gist operator family \"%s\" contains function %s with wrong signature for " -"support number %d" -msgstr "" -"\"%s\" gist 연산자 패밀리에 포함된 %s 함수가 잘못된 signature 지원 번호 %d " -"로 지정되었습니다." - -#: access/gist/gistvalidate.c:177 -#, c-format -msgid "" -"gist operator family \"%s\" contains operator %s with invalid strategy " -"number %d" -msgstr "" -"\"%s\" gist 연산자 패밀리에 포함된 %s 연산자의 %d 번 전략 번호가 잘못되었습니" -"다." - -#: access/gist/gistvalidate.c:195 +#: access/gist/gistvalidate.c:196 #, c-format msgid "" -"gist operator family \"%s\" contains unsupported ORDER BY specification for " -"operator %s" +"operator family \"%s\" of access method %s contains unsupported ORDER BY " +"specification for operator %s" msgstr "" -"\"%s\" gist 연산자 패밀리에 %s 연산자가 지원하지 않는 ORDER BY 명세를 사용합" -"니다." +"\"%s\" 연산자 패밀리(접근 방법: %s)에 %s 연산자가 지원하지 않는 ORDER BY 명세" +"를 사용합니다." -#: access/gist/gistvalidate.c:206 +#: access/gist/gistvalidate.c:207 #, c-format msgid "" -"gist operator family \"%s\" contains incorrect ORDER BY opfamily " -"specification for operator %s" +"operator family \"%s\" of access method %s contains incorrect ORDER BY " +"opfamily specification for operator %s" msgstr "" -"\"%s\" gist 연산자 패밀리에 %s 연산자가 잘못된 ORDER BY 명세를 사용합니다." - -#: access/gist/gistvalidate.c:225 -#, c-format -msgid "gist operator family \"%s\" contains operator %s with wrong signature" -msgstr "\"%s\" gist 연산자 패밀리에 %s 연산자가 잘못된 기호를 사용합니다." - -#: access/gist/gistvalidate.c:264 -#, c-format -msgid "gist operator class \"%s\" is missing support function %d" -msgstr "\"%s\" gist 연산자 클래스에 %d 지원 함수가 빠졌습니다." +"\"%s\" 연산자 패밀리(접근 방법: %s)에 %s 연산자가 잘못된 ORDER BY 명세를 사용" +"합니다." -#: access/hash/hashinsert.c:70 +#: access/hash/hashinsert.c:82 #, c-format msgid "index row size %zu exceeds hash maximum %zu" msgstr "인덱스 행 크기가 초과됨: 현재값 %zu, 최대값 %zu" -#: access/hash/hashinsert.c:72 access/spgist/spgdoinsert.c:1911 -#: access/spgist/spgutils.c:703 +#: access/hash/hashinsert.c:84 access/spgist/spgdoinsert.c:1937 +#: access/spgist/spgutils.c:708 #, c-format msgid "Values larger than a buffer page cannot be indexed." msgstr "버퍼 페이지보다 큰 값은 인덱싱할 수 없습니다." -#: access/hash/hashovfl.c:546 +#: access/hash/hashovfl.c:87 +#, c-format +msgid "invalid overflow block number %u" +msgstr "잘못된 오버플로우 블록 번호: %u" + +#: access/hash/hashovfl.c:283 access/hash/hashpage.c:462 #, c-format msgid "out of overflow pages in hash index \"%s\"" msgstr "\"%s\" 해시 인덱스에서 오버플로우 페이지 초과" -#: access/hash/hashsearch.c:153 +#: access/hash/hashsearch.c:250 #, c-format msgid "hash indexes do not support whole-index scans" msgstr "해시 인덱스는 whole-index scan을 지원하지 않음" -#: access/hash/hashutil.c:208 +#: access/hash/hashutil.c:277 #, c-format msgid "index \"%s\" is not a hash index" msgstr "\"%s\" 인덱스는 해시 인덱스가 아님" -#: access/hash/hashutil.c:214 +#: access/hash/hashutil.c:283 #, c-format msgid "index \"%s\" has wrong hash version" msgstr "\"%s\" 인덱스는 잘못된 해시 버전임" -#: access/hash/hashvalidate.c:98 -#, c-format -msgid "" -"hash operator family \"%s\" contains support procedure %s with cross-type " -"registration" -msgstr "" -"\"%s\" hash 연산자 패밀리에 cross-type registration 으로 %s 지원 프로시져가 " -"포함되어 있습니다." - -#: access/hash/hashvalidate.c:113 -#, c-format -msgid "" -"hash operator family \"%s\" contains function %s with wrong signature for " -"support number %d" -msgstr "" -"\"%s\" hash 연산자 패밀리에 포함된 %s 함수가 잘못된 signature 지원 번호 %d " -"로 지정되었습니다." - -#: access/hash/hashvalidate.c:130 -#, c-format -msgid "" -"hash operator family \"%s\" contains function %s with invalid support number " -"%d" -msgstr "" -"\"%s\" hash 연산자 패밀리에 포함된 %s 함수가 잘못된 지원 번호 %d 로 지정되었" -"습니다." - -#: access/hash/hashvalidate.c:151 -#, c-format -msgid "" -"hash operator family \"%s\" contains operator %s with invalid strategy " -"number %d" -msgstr "" -"\"%s\" hash 연산자 패밀리에 포함된 %s 연산자의 %d 번 전략 번호가 잘못되었습니" -"다." - -#: access/hash/hashvalidate.c:164 +#: access/hash/hashvalidate.c:190 #, c-format msgid "" -"hash operator family \"%s\" contains invalid ORDER BY specification for " +"operator family \"%s\" of access method %s lacks support function for " "operator %s" -msgstr "" -"\"%s\" hash 연산자 패밀리에 %s 연산자가 잘못된 ORDER BY 명세를 사용합니다." - -#: access/hash/hashvalidate.c:177 -#, c-format -msgid "hash operator family \"%s\" contains operator %s with wrong signature" -msgstr "\"%s\" hash 연산자 패밀리에 %s 연산자가 잘못된 기호를 사용합니다." - -#: access/hash/hashvalidate.c:189 -#, c-format -msgid "hash operator family \"%s\" lacks support function for operator %s" -msgstr "\"%s\" hash 연산자 패밀리에 %s 연산자용 지원 함수가 없음" - -#: access/hash/hashvalidate.c:217 -#, c-format -msgid "hash operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "" -"\"%s\" hash 연산자 패밀리에 %s 자료형과 %s 자료형을 다루는 연산자가 없음" - -#: access/hash/hashvalidate.c:231 -#, c-format -msgid "hash operator class \"%s\" is missing operator(s)" -msgstr "%s hash 연산자 클래스에 연산자가 빠졌음" +msgstr "\"%s\" 연산자 패밀리(접근 방법: %s)에 %s 연산자용 지원 함수가 없음" -#: access/hash/hashvalidate.c:247 +#: access/hash/hashvalidate.c:248 access/nbtree/nbtvalidate.c:242 #, c-format -msgid "hash operator family \"%s\" is missing cross-type operator(s)" -msgstr "%s hash 연산자 클래스에 cross-type 연산자가 빠졌음" +msgid "" +"operator family \"%s\" of access method %s is missing cross-type operator(s)" +msgstr "%s 연산자 패밀리(접근 방법: %s)에 cross-type 연산자가 빠졌음" -#: access/heap/heapam.c:1295 access/heap/heapam.c:1323 -#: access/heap/heapam.c:1355 catalog/aclchk.c:1756 +#: access/heap/heapam.c:1293 access/heap/heapam.c:1321 +#: access/heap/heapam.c:1353 catalog/aclchk.c:1772 #, c-format msgid "\"%s\" is an index" msgstr "\"%s\" 객체는 인덱스임" -#: access/heap/heapam.c:1300 access/heap/heapam.c:1328 -#: access/heap/heapam.c:1360 catalog/aclchk.c:1763 commands/tablecmds.c:9081 -#: commands/tablecmds.c:12189 +#: access/heap/heapam.c:1298 access/heap/heapam.c:1326 +#: access/heap/heapam.c:1358 catalog/aclchk.c:1779 commands/tablecmds.c:9898 +#: commands/tablecmds.c:13128 #, c-format msgid "\"%s\" is a composite type" msgstr "\"%s\" 객체는 복합 자료형입니다" -#: access/heap/heapam.c:2567 +#: access/heap/heapam.c:2592 #, c-format msgid "cannot insert tuples during a parallel operation" msgstr "병렬 작업 중에는 튜플을 추가 할 수 없음" -#: access/heap/heapam.c:3017 +#: access/heap/heapam.c:3042 #, c-format msgid "cannot delete tuples during a parallel operation" msgstr "병렬 작업 중에는 튜플을 지울 수 없음" -#: access/heap/heapam.c:3063 +#: access/heap/heapam.c:3088 #, c-format msgid "attempted to delete invisible tuple" msgstr "볼 수 없는 튜플을 삭제 하려고 함" -#: access/heap/heapam.c:3489 access/heap/heapam.c:6240 +#: access/heap/heapam.c:3514 access/heap/heapam.c:6248 #, c-format msgid "cannot update tuples during a parallel operation" msgstr "병렬 작업 중에 튜플 갱신은 할 수 없음" -#: access/heap/heapam.c:3611 +#: access/heap/heapam.c:3662 #, c-format msgid "attempted to update invisible tuple" msgstr "볼 수 없는 튜플을 변경하려고 함" -#: access/heap/heapam.c:4963 access/heap/heapam.c:5001 -#: access/heap/heapam.c:5253 executor/execMain.c:2314 +#: access/heap/heapam.c:4938 access/heap/heapam.c:4976 +#: access/heap/heapam.c:5228 executor/execMain.c:2630 #, c-format msgid "could not obtain lock on row in relation \"%s\"" msgstr "\"%s\" 릴레이션의 잠금 정보를 구할 수 없음" -#: access/heap/hio.c:322 access/heap/rewriteheap.c:664 +#: access/heap/hio.c:322 access/heap/rewriteheap.c:666 #, c-format msgid "row is too big: size %zu, maximum size %zu" -msgstr "열(row)이 너무 큽니다: 크기 %zu, 최대값 %zu" +msgstr "로우가 너무 큽니다: 크기 %zu, 최대값 %zu" -#: access/heap/rewriteheap.c:923 +#: access/heap/rewriteheap.c:926 #, c-format msgid "could not write to file \"%s\", wrote %d of %d: %m" msgstr "\"%s\" 파일 쓰기 실패, %d / %d 기록함: %m." -#: access/heap/rewriteheap.c:963 access/heap/rewriteheap.c:1175 -#: access/heap/rewriteheap.c:1272 access/transam/timeline.c:407 -#: access/transam/timeline.c:483 access/transam/xlog.c:3087 -#: access/transam/xlog.c:3249 replication/logical/snapbuild.c:1605 -#: replication/slot.c:1088 replication/slot.c:1173 storage/file/fd.c:631 -#: storage/file/fd.c:3129 storage/smgr/md.c:1041 storage/smgr/md.c:1274 -#: storage/smgr/md.c:1447 utils/misc/guc.c:6885 +#: access/heap/rewriteheap.c:966 access/heap/rewriteheap.c:1183 +#: access/heap/rewriteheap.c:1282 access/transam/timeline.c:412 +#: access/transam/timeline.c:492 access/transam/xlog.c:3249 +#: access/transam/xlog.c:3417 replication/logical/snapbuild.c:1630 +#: replication/slot.c:1291 replication/slot.c:1378 storage/file/fd.c:631 +#: storage/file/fd.c:3180 storage/smgr/md.c:1044 storage/smgr/md.c:1277 +#: storage/smgr/md.c:1450 utils/misc/guc.c:6998 #, c-format msgid "could not fsync file \"%s\": %m" msgstr "\"%s\" 파일 fsync 실패: %m" -#: access/heap/rewriteheap.c:1018 access/heap/rewriteheap.c:1138 -#: access/transam/timeline.c:315 access/transam/timeline.c:461 -#: access/transam/xlog.c:3043 access/transam/xlog.c:3192 -#: access/transam/xlog.c:10192 access/transam/xlog.c:10230 -#: access/transam/xlog.c:10603 postmaster/postmaster.c:4364 -#: replication/logical/origin.c:542 replication/slot.c:1045 -#: storage/file/copydir.c:162 storage/smgr/md.c:327 utils/time/snapmgr.c:1275 +#: access/heap/rewriteheap.c:1021 access/heap/rewriteheap.c:1141 +#: access/transam/timeline.c:315 access/transam/timeline.c:467 +#: access/transam/xlog.c:3202 access/transam/xlog.c:3355 +#: access/transam/xlog.c:10543 access/transam/xlog.c:10581 +#: access/transam/xlog.c:10965 postmaster/postmaster.c:4450 +#: replication/logical/origin.c:555 replication/slot.c:1243 +#: storage/file/copydir.c:162 storage/smgr/md.c:327 utils/time/snapmgr.c:1297 #, c-format msgid "could not create file \"%s\": %m" msgstr "\"%s\" 파일을 만들 수 없음: %m" -#: access/heap/rewriteheap.c:1147 +#: access/heap/rewriteheap.c:1151 #, c-format msgid "could not truncate file \"%s\" to %u: %m" msgstr "\"%s\" 파일을 %u 크기로 정리할 수 없음: %m" -#: access/heap/rewriteheap.c:1154 replication/walsender.c:481 -#: storage/smgr/md.c:1899 +#: access/heap/rewriteheap.c:1159 replication/walsender.c:486 +#: storage/smgr/md.c:1949 #, c-format msgid "could not seek to end of file \"%s\": %m" msgstr "\"%s\" 파일의 끝을 찾을 수 없음: %m" -#: access/heap/rewriteheap.c:1165 access/transam/timeline.c:367 -#: access/transam/timeline.c:401 access/transam/timeline.c:477 -#: access/transam/xlog.c:3078 access/transam/xlog.c:3242 -#: postmaster/postmaster.c:4374 postmaster/postmaster.c:4384 -#: replication/logical/origin.c:551 replication/logical/origin.c:587 -#: replication/logical/origin.c:603 replication/logical/snapbuild.c:1589 -#: replication/slot.c:1074 storage/file/copydir.c:187 -#: utils/init/miscinit.c:1228 utils/init/miscinit.c:1237 -#: utils/init/miscinit.c:1244 utils/misc/guc.c:6846 utils/misc/guc.c:6877 -#: utils/misc/guc.c:8727 utils/misc/guc.c:8741 utils/time/snapmgr.c:1280 -#: utils/time/snapmgr.c:1287 +#: access/heap/rewriteheap.c:1171 access/transam/timeline.c:370 +#: access/transam/timeline.c:405 access/transam/timeline.c:484 +#: access/transam/xlog.c:3238 access/transam/xlog.c:3408 +#: postmaster/postmaster.c:4460 postmaster/postmaster.c:4470 +#: replication/logical/origin.c:564 replication/logical/origin.c:603 +#: replication/logical/origin.c:619 replication/logical/snapbuild.c:1612 +#: replication/slot.c:1274 storage/file/copydir.c:191 +#: utils/init/miscinit.c:1249 utils/init/miscinit.c:1260 +#: utils/init/miscinit.c:1268 utils/misc/guc.c:6959 utils/misc/guc.c:6990 +#: utils/misc/guc.c:8840 utils/misc/guc.c:8854 utils/time/snapmgr.c:1302 +#: utils/time/snapmgr.c:1309 #, c-format msgid "could not write to file \"%s\": %m" msgstr "\"%s\" 파일 쓰기 실패: %m" -#: access/heap/rewriteheap.c:1248 access/transam/xlog.c:10441 -#: access/transam/xlogarchive.c:114 access/transam/xlogarchive.c:468 -#: replication/logical/origin.c:529 replication/logical/reorderbuffer.c:2632 -#: replication/logical/reorderbuffer.c:2689 -#: replication/logical/snapbuild.c:1533 replication/logical/snapbuild.c:1908 -#: replication/slot.c:1147 storage/ipc/dsm.c:326 storage/smgr/md.c:427 -#: storage/smgr/md.c:476 storage/smgr/md.c:1394 +#: access/heap/rewriteheap.c:1257 access/transam/xlogarchive.c:113 +#: access/transam/xlogarchive.c:467 postmaster/postmaster.c:1257 +#: postmaster/syslogger.c:1371 replication/logical/origin.c:542 +#: replication/logical/reorderbuffer.c:2595 +#: replication/logical/reorderbuffer.c:2652 +#: replication/logical/snapbuild.c:1560 replication/logical/snapbuild.c:1936 +#: replication/slot.c:1351 storage/file/fd.c:682 storage/ipc/dsm.c:327 +#: storage/smgr/md.c:426 storage/smgr/md.c:475 storage/smgr/md.c:1397 #, c-format msgid "could not remove file \"%s\": %m" msgstr "\"%s\" 파일을 삭제할 수 없음: %m" -#: access/heap/rewriteheap.c:1262 access/transam/timeline.c:111 +#: access/heap/rewriteheap.c:1271 access/transam/timeline.c:111 #: access/transam/timeline.c:236 access/transam/timeline.c:334 -#: access/transam/xlog.c:3019 access/transam/xlog.c:3136 -#: access/transam/xlog.c:3177 access/transam/xlog.c:3450 -#: access/transam/xlog.c:3528 access/transam/xlogutils.c:701 -#: replication/basebackup.c:403 replication/basebackup.c:1150 -#: replication/logical/origin.c:658 replication/logical/reorderbuffer.c:2156 -#: replication/logical/reorderbuffer.c:2402 -#: replication/logical/reorderbuffer.c:3081 -#: replication/logical/snapbuild.c:1582 replication/logical/snapbuild.c:1666 -#: replication/slot.c:1162 replication/walsender.c:474 -#: replication/walsender.c:2100 storage/file/copydir.c:155 -#: storage/file/fd.c:614 storage/file/fd.c:3041 storage/file/fd.c:3108 -#: storage/smgr/md.c:609 utils/error/elog.c:1879 utils/init/miscinit.c:1163 -#: utils/init/miscinit.c:1284 utils/init/miscinit.c:1362 utils/misc/guc.c:7105 -#: utils/misc/guc.c:7138 +#: access/transam/xlog.c:3178 access/transam/xlog.c:3299 +#: access/transam/xlog.c:3340 access/transam/xlog.c:3619 +#: access/transam/xlog.c:3697 access/transam/xlogutils.c:706 +#: postmaster/syslogger.c:1380 replication/basebackup.c:474 +#: replication/basebackup.c:1218 replication/logical/origin.c:674 +#: replication/logical/reorderbuffer.c:2112 +#: replication/logical/reorderbuffer.c:2361 +#: replication/logical/reorderbuffer.c:3044 +#: replication/logical/snapbuild.c:1604 replication/logical/snapbuild.c:1692 +#: replication/slot.c:1366 replication/walsender.c:479 +#: replication/walsender.c:2385 storage/file/copydir.c:155 +#: storage/file/fd.c:614 storage/file/fd.c:3092 storage/file/fd.c:3159 +#: storage/smgr/md.c:608 utils/error/elog.c:1879 utils/init/miscinit.c:1173 +#: utils/init/miscinit.c:1308 utils/init/miscinit.c:1385 utils/misc/guc.c:7218 +#: utils/misc/guc.c:7251 #, c-format msgid "could not open file \"%s\": %m" msgstr "\"%s\" 파일을 열 수 없음: %m" -#: access/index/amapi.c:82 commands/amcmds.c:164 +#: access/index/amapi.c:83 commands/amcmds.c:163 #, c-format msgid "access method \"%s\" is not of type %s" msgstr "\"%s\" 접근 방법은 %s 자료형에는 쓸 수 없음" -#: access/index/amapi.c:98 +#: access/index/amapi.c:99 #, c-format msgid "index access method \"%s\" does not have a handler" msgstr "\"%s\" 인덱스 접근 방법에 대한 핸들러가 없음" -#: access/index/indexam.c:155 catalog/objectaddress.c:1196 -#: commands/indexcmds.c:1800 commands/tablecmds.c:242 -#: commands/tablecmds.c:12180 +#: access/index/indexam.c:160 catalog/objectaddress.c:1222 +#: commands/indexcmds.c:1819 commands/tablecmds.c:247 +#: commands/tablecmds.c:13119 #, c-format msgid "\"%s\" is not an index" msgstr "\"%s\" 객체는 인덱스가 아닙니다" -#: access/nbtree/nbtinsert.c:428 +#: access/nbtree/nbtinsert.c:429 #, c-format msgid "duplicate key value violates unique constraint \"%s\"" msgstr "중복된 키 값이 \"%s\" 고유 제약 조건을 위반함" -#: access/nbtree/nbtinsert.c:430 +#: access/nbtree/nbtinsert.c:431 #, c-format msgid "Key %s already exists." msgstr "%s 키가 이미 있습니다." -#: access/nbtree/nbtinsert.c:497 +#: access/nbtree/nbtinsert.c:498 #, c-format msgid "failed to re-find tuple within index \"%s\"" msgstr "\"%s\" 인덱스에서 튜플 재검색 실패" -#: access/nbtree/nbtinsert.c:499 +#: access/nbtree/nbtinsert.c:500 #, c-format msgid "This may be because of a non-immutable index expression." msgstr "이 문제는 non-immutable 인덱스 표현식 때문인듯 합니다." -#: access/nbtree/nbtinsert.c:579 access/nbtree/nbtsort.c:491 +#: access/nbtree/nbtinsert.c:580 access/nbtree/nbtsort.c:491 #, c-format msgid "" "Values larger than 1/3 of a buffer page cannot be indexed.\n" @@ -1083,24 +1006,24 @@ msgstr "" "버퍼 페이지의 1/3보다 큰 값은 인덱싱할 수 없습니다.\n" "값의 MD5 해시 함수 인덱스를 고려하거나 전체 텍스트 인덱싱을 사용하십시오." -#: access/nbtree/nbtpage.c:168 access/nbtree/nbtpage.c:371 -#: access/nbtree/nbtpage.c:458 parser/parse_utilcmd.c:1702 +#: access/nbtree/nbtpage.c:169 access/nbtree/nbtpage.c:372 +#: access/nbtree/nbtpage.c:459 parser/parse_utilcmd.c:1901 #, c-format msgid "index \"%s\" is not a btree" msgstr "\"%s\" 인덱스는 btree 인덱스가 아닙니다" -#: access/nbtree/nbtpage.c:174 access/nbtree/nbtpage.c:377 -#: access/nbtree/nbtpage.c:464 +#: access/nbtree/nbtpage.c:175 access/nbtree/nbtpage.c:378 +#: access/nbtree/nbtpage.c:465 #, c-format msgid "version mismatch in index \"%s\": file version %d, code version %d" msgstr "\"%s\" 인덱스의 버전이 틀립니다: 파일 버전 %d, 코드 버전 %d" -#: access/nbtree/nbtpage.c:1152 +#: access/nbtree/nbtpage.c:1153 #, c-format msgid "index \"%s\" contains a half-dead internal page" msgstr "\"%s\" 인덱스에 반쯤 죽은(half-dead) 내부 페이지가 있음" -#: access/nbtree/nbtpage.c:1154 +#: access/nbtree/nbtpage.c:1155 #, c-format msgid "" "This can be caused by an interrupted VACUUM in version 9.3 or older, before " @@ -1109,162 +1032,51 @@ msgstr "" "이 문제는 9.3 버전 이하 환경에서 VACUUM 작업이 중지되고, 그 상태로 업그레이드" "되었을 가능성이 큽니다. 해당 인덱스를 다시 만드십시오." -#: access/nbtree/nbtvalidate.c:100 -#, c-format -msgid "" -"btree operator family \"%s\" contains function %s with invalid support " -"number %d" -msgstr "" -"\"%s\" btree 연산자 패밀리에 포함된 %s 함수가 잘못된 지원 번호 %d 로 지정되었" -"습니다." - -#: access/nbtree/nbtvalidate.c:112 -#, c-format -msgid "" -"btree operator family \"%s\" contains function %s with wrong signature for " -"support number %d" -msgstr "" -"\"%s\" btree 연산자 패밀리에 포함된 %s 함수가 잘못된 signature 지원 번호 %d " -"로 지정되었습니다." - -#: access/nbtree/nbtvalidate.c:132 -#, c-format -msgid "" -"btree operator family \"%s\" contains operator %s with invalid strategy " -"number %d" -msgstr "" -"\"%s\" btree 연산자 패밀리에 포함된 %s 연산자의 %d 번 전략 번호가 잘못되었습" -"니다." - -#: access/nbtree/nbtvalidate.c:145 -#, c-format -msgid "" -"btree operator family \"%s\" contains invalid ORDER BY specification for " -"operator %s" -msgstr "" -"\"%s\" btree 연산자 패밀리에 %s 연산자가 잘못된 ORDER BY 명세를 사용합니다." - -#: access/nbtree/nbtvalidate.c:158 -#, c-format -msgid "btree operator family \"%s\" contains operator %s with wrong signature" -msgstr "\"%s\" btree 연산자 패밀리에 %s 연산자가 잘못된 기호를 사용합니다." - -#: access/nbtree/nbtvalidate.c:200 -#, c-format -msgid "btree operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "\"%s\" btree 연산자 패밀리에는 %s 자료형과 %s 자료형용 연산자가 빠졌음" - -#: access/nbtree/nbtvalidate.c:210 +#: access/nbtree/nbtvalidate.c:211 #, c-format msgid "" -"btree operator family \"%s\" is missing support function for types %s and %s" +"operator family \"%s\" of access method %s is missing support function for " +"types %s and %s" msgstr "" -"\"%s\" btree 연산자 패밀리에는 %s 자료형과 %s 자료형용 지원 함수가 빠졌음" - -#: access/nbtree/nbtvalidate.c:224 -#, c-format -msgid "btree operator class \"%s\" is missing operator(s)" -msgstr "\"%s\" btree 연산자 클래스에는 연산자가 빠졌음" - -#: access/nbtree/nbtvalidate.c:241 -#, c-format -msgid "btree operator family \"%s\" is missing cross-type operator(s)" -msgstr "\"%s\" btree 연산자 클래스에는 cross-type 연산자가 빠졌음" +"\"%s\" 연산자 패밀리(접근 방법: %s)에는 %s 자료형과 %s 자료형용 지원 함수가 " +"빠졌음" -#: access/spgist/spgutils.c:700 +#: access/spgist/spgutils.c:705 #, c-format msgid "SP-GiST inner tuple size %zu exceeds maximum %zu" msgstr "SP-GiST 내부 튜플 크기가 초과됨: 현재값 %zu, 최대값 %zu" -#: access/spgist/spgvalidate.c:92 -#, c-format -msgid "" -"spgist operator family \"%s\" contains support procedure %s with cross-type " -"registration" -msgstr "" -"\"%s\" spgist 연산자 패밀리에 cross-type registration 으로 %s 지원 프로시져" -"가 포함되어 있습니다." - -#: access/spgist/spgvalidate.c:115 -#, c-format -msgid "" -"spgist operator family \"%s\" contains function %s with invalid support " -"number %d" -msgstr "" -"\"%s\" spgist 연산자 패밀리에 포함된 %s 함수가 잘못된 지원 번호 %d 로 지정되" -"었습니다." - -#: access/spgist/spgvalidate.c:127 -#, c-format -msgid "" -"spgist operator family \"%s\" contains function %s with wrong signature for " -"support number %d" -msgstr "" -"\"%s\" spgist 연산자 패밀리에 포함된 %s 함수가 잘못된 signature 지원 번호 %d " -"로 지정되었습니다." - -#: access/spgist/spgvalidate.c:146 +#: access/spgist/spgvalidate.c:221 #, c-format msgid "" -"spgist operator family \"%s\" contains operator %s with invalid strategy " -"number %d" +"operator family \"%s\" of access method %s is missing support function %d " +"for type %s" msgstr "" -"\"%s\" spgist 연산자 패밀리에 포함된 %s 연산자의 %d 번 전략 번호가 잘못되었습" -"니다." - -#: access/spgist/spgvalidate.c:159 -#, c-format -msgid "" -"spgist operator family \"%s\" contains invalid ORDER BY specification for " -"operator %s" -msgstr "" -"\"%s\" spgist 연산자 패밀리에 %s 연산자가 잘못된 ORDER BY 명세를 사용합니다." - -#: access/spgist/spgvalidate.c:172 -#, c-format -msgid "spgist operator family \"%s\" contains operator %s with wrong signature" -msgstr "\"%s\" spgist 연산자 패밀리에 %s 연산자가 잘못된 기호를 사용합니다." - -#: access/spgist/spgvalidate.c:200 -#, c-format -msgid "" -"spgist operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "" -"\"%s\" spgist 연산자 패밀리에서 %s 자료형과 %s 자료형용 연산자가 빠졌음" - -#: access/spgist/spgvalidate.c:220 -#, c-format -msgid "" -"spgist operator family \"%s\" is missing support function %d for type %s" -msgstr "\"%s\" spgist 연산자 패밀리에 %d 지원 함수가 %s 자료형용으로 없습니다." - -#: access/spgist/spgvalidate.c:233 -#, c-format -msgid "spgist operator class \"%s\" is missing operator(s)" -msgstr "\"%s\" spgist 연산자 클래스에 연산자가 빠졌음" +"\"%s\" 연산자 패밀리(접근 방법: %s)에 %d 지원 함수가 %s 자료형용으로 없습니" +"다." #: access/tablesample/bernoulli.c:152 access/tablesample/system.c:156 #, c-format msgid "sample percentage must be between 0 and 100" msgstr "샘플 퍼센트 값은 0에서 100 사이여야 함" -#: access/transam/commit_ts.c:294 +#: access/transam/commit_ts.c:295 #, c-format msgid "cannot retrieve commit timestamp for transaction %u" msgstr "%u 트랜잭션의 커밋 타임스탬프를 알 수 없음" -#: access/transam/commit_ts.c:392 +#: access/transam/commit_ts.c:393 #, c-format msgid "could not get commit timestamp data" msgstr "커밋 타임스탬프 자료를 찾을 수 없음" -#: access/transam/commit_ts.c:394 +#: access/transam/commit_ts.c:395 #, c-format msgid "" "Make sure the configuration parameter \"%s\" is set on the master server." msgstr "운영 서버에서 \"%s\" 환경 설정 매개 변수값을 지정 하세요." -#: access/transam/commit_ts.c:396 libpq/hba.c:1439 +#: access/transam/commit_ts.c:397 #, c-format msgid "Make sure the configuration parameter \"%s\" is set." msgstr "\"%s\" 환경 설정 매개 변수를 지정하세요." @@ -1297,7 +1109,7 @@ msgstr "" "%u OID 데이터베이스 자료 손실을 막기 위해 새로운 MultiXactId 만드는 작업을 " "더 이상 할 수 없습니다." -#: access/transam/multixact.c:1028 access/transam/multixact.c:2314 +#: access/transam/multixact.c:1028 access/transam/multixact.c:2318 #, c-format msgid "database \"%s\" must be vacuumed before %u more MultiXactId is used" msgid_plural "" @@ -1306,7 +1118,7 @@ msgstr[0] "" "\"%s\" 데이터베이스는 %u번의 트랜잭션이 발생되기 전에 VACUUM 작업을 해야 합니" "다." -#: access/transam/multixact.c:1037 access/transam/multixact.c:2323 +#: access/transam/multixact.c:1037 access/transam/multixact.c:2327 #, c-format msgid "" "database with OID %u must be vacuumed before %u more MultiXactId is used" @@ -1373,14 +1185,14 @@ msgstr "%u번 MultiXactId 더이상 없음 -- 번호 겹침 현상 발생" msgid "MultiXactId %u has not been created yet -- apparent wraparound" msgstr "%u번 MultiXactId를 만들 수 없음 -- 번호 겹침 현상 발생" -#: access/transam/multixact.c:2264 +#: access/transam/multixact.c:2268 #, c-format msgid "MultiXactId wrap limit is %u, limited by database with OID %u" msgstr "MultiXactId 겹침 한계는 %u 입니다. %u OID 데이터베이스에서 제한됨" -#: access/transam/multixact.c:2319 access/transam/multixact.c:2328 +#: access/transam/multixact.c:2323 access/transam/multixact.c:2332 #: access/transam/varsup.c:146 access/transam/varsup.c:153 -#: access/transam/varsup.c:384 access/transam/varsup.c:391 +#: access/transam/varsup.c:405 access/transam/varsup.c:412 #, c-format msgid "" "To avoid a database shutdown, execute a database-wide VACUUM in that " @@ -1391,12 +1203,12 @@ msgstr "" "오.\n" "또한 오래된 트랜잭션을 커밋하거나 롤백할 필요가 있습니다." -#: access/transam/multixact.c:2598 +#: access/transam/multixact.c:2602 #, c-format msgid "oldest MultiXactId member is at offset %u" msgstr "제일 오래된 MultiXactId 값은 %u 위치에 있음" -#: access/transam/multixact.c:2602 +#: access/transam/multixact.c:2606 #, c-format msgid "" "MultiXact member wraparound protections are disabled because oldest " @@ -1405,24 +1217,24 @@ msgstr "" "가장 오래된 체크포인트 작업이 완료된 %u 멀티 트랜잭션 번호가 디스크에 없기 때" "문에, 멀티 트랜잭션 번호 겹침 방지 기능이 비활성화 되어 있습니다." -#: access/transam/multixact.c:2624 +#: access/transam/multixact.c:2628 #, c-format msgid "MultiXact member wraparound protections are now enabled" msgstr "멀티 트랜잭션 번호 겹침 방지 기능이 활성화 되었음" -#: access/transam/multixact.c:2626 +#: access/transam/multixact.c:2631 #, c-format msgid "MultiXact member stop limit is now %u based on MultiXact %u" msgstr "멀티 트랜잭션 중지 제한 번호는 %u 입니다. (%u 멀티트랜잭션에 기초함)" -#: access/transam/multixact.c:3006 +#: access/transam/multixact.c:3011 #, c-format msgid "" "oldest MultiXact %u not found, earliest MultiXact %u, skipping truncation" msgstr "" "가장 오래된 멀티 트랜잭션 번호는 %u, 가장 최신 것은 %u, truncate 작업 건너뜀" -#: access/transam/multixact.c:3024 +#: access/transam/multixact.c:3029 #, c-format msgid "" "cannot truncate up to MultiXact %u because it does not exist on disk, " @@ -1431,83 +1243,83 @@ msgstr "" "디스크에 해당 멀티 트랜잭션 번호가 없어, %u 멀티 트랜잭션 번호로 truncate 못" "함, truncate 작업 건너뜀" -#: access/transam/multixact.c:3350 +#: access/transam/multixact.c:3355 #, c-format msgid "invalid MultiXactId: %u" msgstr "잘못된 MultiXactId: %u" -#: access/transam/parallel.c:589 +#: access/transam/parallel.c:577 #, c-format msgid "postmaster exited during a parallel transaction" msgstr "병렬 트랜잭션 처리 중 postmaster 종료됨" -#: access/transam/parallel.c:774 +#: access/transam/parallel.c:764 #, c-format msgid "lost connection to parallel worker" msgstr "병렬 처리 작업자 프로세스 연결 끊김" -#: access/transam/parallel.c:833 access/transam/parallel.c:835 +#: access/transam/parallel.c:823 access/transam/parallel.c:825 msgid "parallel worker" msgstr "병렬 처리 작업자" -#: access/transam/parallel.c:974 +#: access/transam/parallel.c:968 #, c-format msgid "could not map dynamic shared memory segment" msgstr "동적 공유 메모리 세그먼트를 할당할 수 없음" -#: access/transam/parallel.c:979 +#: access/transam/parallel.c:973 #, c-format msgid "invalid magic number in dynamic shared memory segment" msgstr "동적 공유 메모리 세그먼트에 잘못된 매직 번호가 있음" -#: access/transam/slru.c:665 +#: access/transam/slru.c:668 #, c-format msgid "file \"%s\" doesn't exist, reading as zeroes" msgstr "\"%s\" 파일 없음, 0으로 읽음" -#: access/transam/slru.c:895 access/transam/slru.c:901 -#: access/transam/slru.c:908 access/transam/slru.c:915 -#: access/transam/slru.c:922 access/transam/slru.c:929 +#: access/transam/slru.c:907 access/transam/slru.c:913 +#: access/transam/slru.c:920 access/transam/slru.c:927 +#: access/transam/slru.c:934 access/transam/slru.c:941 #, c-format msgid "could not access status of transaction %u" msgstr "%u 트랜잭션의 상태를 액세스할 수 없음" -#: access/transam/slru.c:896 +#: access/transam/slru.c:908 #, c-format msgid "Could not open file \"%s\": %m." msgstr "\"%s\" 파일을 열 수 없음: %m." -#: access/transam/slru.c:902 +#: access/transam/slru.c:914 #, c-format msgid "Could not seek in file \"%s\" to offset %u: %m." msgstr "\"%s\" 파일에서 %u 위치를 찾을 수 없음: %m." -#: access/transam/slru.c:909 +#: access/transam/slru.c:921 #, c-format msgid "Could not read from file \"%s\" at offset %u: %m." msgstr "\"%s\" 파일에서 %u 위치를 읽을 수 없음: %m." -#: access/transam/slru.c:916 +#: access/transam/slru.c:928 #, c-format msgid "Could not write to file \"%s\" at offset %u: %m." msgstr "\"%s\" 파일에서 %u 위치에 쓸 수 없음: %m." -#: access/transam/slru.c:923 +#: access/transam/slru.c:935 #, c-format msgid "Could not fsync file \"%s\": %m." msgstr "\"%s\" 파일 fsync 실패: %m." -#: access/transam/slru.c:930 +#: access/transam/slru.c:942 #, c-format msgid "Could not close file \"%s\": %m." msgstr "\"%s\" 파일을 닫을 수 없음: %m." -#: access/transam/slru.c:1185 +#: access/transam/slru.c:1199 #, c-format msgid "could not truncate directory \"%s\": apparent wraparound" msgstr "\"%s\" 디렉터리를 비울 수 없음: 랩어라운드 발생" -#: access/transam/slru.c:1240 access/transam/slru.c:1296 +#: access/transam/slru.c:1254 access/transam/slru.c:1310 #, c-format msgid "removing file \"%s\"" msgstr "\"%s\" 파일 삭제 중" @@ -1524,7 +1336,7 @@ msgstr "숫자 타임라인 ID가 필요합니다." #: access/transam/timeline.c:154 #, c-format -msgid "Expected a transaction log switchpoint location." +msgid "Expected a write-ahead log switchpoint location." msgstr "트랜잭션 로그 전환 위치 값이 있어야 함" #: access/transam/timeline.c:158 @@ -1547,169 +1359,183 @@ msgstr "작업내역 파일에 잘못된 자료가 있음: \"%s\"" msgid "Timeline IDs must be less than child timeline's ID." msgstr "타임라인 ID는 하위 타임라인 ID보다 작아야 합니다." -#: access/transam/timeline.c:412 access/transam/timeline.c:488 -#: access/transam/xlog.c:3093 access/transam/xlog.c:3254 -#: access/transam/xlogfuncs.c:690 commands/copy.c:1708 -#: storage/file/copydir.c:201 +#: access/transam/timeline.c:418 access/transam/timeline.c:498 +#: access/transam/xlog.c:3256 access/transam/xlog.c:3423 +#: access/transam/xlogfuncs.c:693 commands/copy.c:1723 +#: storage/file/copydir.c:206 #, c-format msgid "could not close file \"%s\": %m" msgstr "\"%s\" 파일을 닫을 수 없음: %m" -#: access/transam/timeline.c:570 +#: access/transam/timeline.c:580 #, c-format msgid "requested timeline %u is not in this server's history" msgstr "요청한 %u 타이라인이 이 서버 내역에는 없음" -#: access/transam/twophase.c:363 +#: access/transam/twophase.c:383 #, c-format msgid "transaction identifier \"%s\" is too long" msgstr "\"%s\" 트랜잭션 식별자가 너무 깁니다" -#: access/transam/twophase.c:370 +#: access/transam/twophase.c:390 #, c-format msgid "prepared transactions are disabled" msgstr "준비된 트랜잭션이 비활성화됨" -#: access/transam/twophase.c:371 +#: access/transam/twophase.c:391 #, c-format msgid "Set max_prepared_transactions to a nonzero value." msgstr "max_prepared_transactions 설정값을 0이 아닌 값으로 설정하십시오." -#: access/transam/twophase.c:390 +#: access/transam/twophase.c:410 #, c-format msgid "transaction identifier \"%s\" is already in use" msgstr "\"%s\" 이름의 트랜잭션 식별자가 이미 사용 중입니다" -#: access/transam/twophase.c:399 +#: access/transam/twophase.c:419 access/transam/twophase.c:2340 #, c-format msgid "maximum number of prepared transactions reached" msgstr "준비된 트랜잭션의 최대 개수를 모두 사용했습니다" -#: access/transam/twophase.c:400 +#: access/transam/twophase.c:420 access/transam/twophase.c:2341 #, c-format msgid "Increase max_prepared_transactions (currently %d)." msgstr "max_prepared_transactions 값을 늘려주세요 (현재 %d)." -#: access/transam/twophase.c:540 +#: access/transam/twophase.c:587 #, c-format msgid "prepared transaction with identifier \"%s\" is busy" msgstr "\"%s\" 이름의 준비된 트랜잭션 식별자가 여러 곳에서 쓰이고 있습니다" -#: access/transam/twophase.c:546 +#: access/transam/twophase.c:593 #, c-format msgid "permission denied to finish prepared transaction" msgstr "준비된 트랜잭션 끝내기 작업 권한 없음" -#: access/transam/twophase.c:547 +#: access/transam/twophase.c:594 #, c-format msgid "Must be superuser or the user that prepared the transaction." msgstr "해당 준비된 트랜잭션의 소유주이거나 superuser여야합니다" -#: access/transam/twophase.c:558 +#: access/transam/twophase.c:605 #, c-format msgid "prepared transaction belongs to another database" msgstr "준비된 트랜잭션이 다른 데이터베이스에 속해 있음" -#: access/transam/twophase.c:559 +#: access/transam/twophase.c:606 #, c-format msgid "" "Connect to the database where the transaction was prepared to finish it." msgstr "작업을 마치려면 그 준비된 트랜잭션이 있는 데이터베이스에 연결하십시오." -#: access/transam/twophase.c:574 +#: access/transam/twophase.c:621 #, c-format msgid "prepared transaction with identifier \"%s\" does not exist" msgstr "\"%s\" 이름의 준비된 트랜잭션이 없습니다" -#: access/transam/twophase.c:1043 +#: access/transam/twophase.c:1086 #, c-format msgid "two-phase state file maximum length exceeded" msgstr "2단계 상태 파일 최대 길이를 초과함" -#: access/transam/twophase.c:1161 +#: access/transam/twophase.c:1204 #, c-format msgid "could not open two-phase state file \"%s\": %m" msgstr "\"%s\" 이름의 two-phase 상태정보 파일을 열 수 없음: %m" -#: access/transam/twophase.c:1178 +#: access/transam/twophase.c:1221 #, c-format msgid "could not stat two-phase state file \"%s\": %m" msgstr "\"%s\" 이름의 two-phase 상태정보 파일의 파일정보를 알 수 없음: %m" -#: access/transam/twophase.c:1210 +#: access/transam/twophase.c:1255 #, c-format msgid "could not read two-phase state file \"%s\": %m" msgstr "\"%s\" 이름의 two-phase 상태정보 파일을 읽을 수 없음: %m" -#: access/transam/twophase.c:1263 access/transam/xlog.c:6109 +#: access/transam/twophase.c:1307 access/transam/xlog.c:6356 #, c-format -msgid "Failed while allocating an XLog reading processor." -msgstr "XLog 읽기 프로세서를 할당하는 중에 오류 발생" +msgid "Failed while allocating a WAL reading processor." +msgstr "WAL 읽기 프로세서를 할당하는 중에 오류 발생" -#: access/transam/twophase.c:1269 +#: access/transam/twophase.c:1313 #, c-format -msgid "could not read two-phase state from xlog at %X/%X" -msgstr "two-phase 상태정보을 읽을 수 없음 xlog 위치: %X/%X" +msgid "could not read two-phase state from WAL at %X/%X" +msgstr "two-phase 상태정보을 읽을 수 없음 WAL 위치: %X/%X" -#: access/transam/twophase.c:1277 +#: access/transam/twophase.c:1321 #, c-format -msgid "expected two-phase state data is not present in xlog at %X/%X" -msgstr "xlog %X/%X 위치에 2단계 커밋 상태 자료가 없습니다" +msgid "expected two-phase state data is not present in WAL at %X/%X" +msgstr "WAL %X/%X 위치에 2단계 커밋 상태 자료가 없습니다" -#: access/transam/twophase.c:1512 +#: access/transam/twophase.c:1558 #, c-format msgid "could not remove two-phase state file \"%s\": %m" msgstr "\"%s\" 이름의 two-phase 상태정보 파일을 삭제할 수 없음: %m" -#: access/transam/twophase.c:1542 +#: access/transam/twophase.c:1588 #, c-format msgid "could not recreate two-phase state file \"%s\": %m" msgstr "\"%s\" 이름의 two-phase 상태정보 파일을 다시 만들 수 없음: %m" -#: access/transam/twophase.c:1551 access/transam/twophase.c:1558 +#: access/transam/twophase.c:1599 access/transam/twophase.c:1607 #, c-format msgid "could not write two-phase state file: %m" msgstr "two-phase 상태정보 파일을 쓸 수 없음: %m" -#: access/transam/twophase.c:1570 +#: access/transam/twophase.c:1621 #, c-format msgid "could not fsync two-phase state file: %m" msgstr "two-phase 상태정보 파일의 fsync 작업 실패: %m" -#: access/transam/twophase.c:1576 +#: access/transam/twophase.c:1628 #, c-format msgid "could not close two-phase state file: %m" msgstr "two-phase 상태정보 파일을 닫을 수 없음: %m" -#: access/transam/twophase.c:1649 +#: access/transam/twophase.c:1716 #, c-format msgid "" -"%u two-phase state file was written for long-running prepared transactions" +"%u two-phase state file was written for a long-running prepared transaction" msgid_plural "" "%u two-phase state files were written for long-running prepared transactions" msgstr[0] "" -#: access/transam/twophase.c:1713 +#: access/transam/twophase.c:1944 +#, c-format +msgid "recovering prepared transaction %u from shared memory" +msgstr "공유 메모리에서 %u 준비된 트랜잭션을 복구함" + +#: access/transam/twophase.c:2034 #, c-format -msgid "removing future two-phase state file \"%s\"" -msgstr "\"%s\" 이름의 future two-phase 상태정보 파일을 삭제함" +msgid "removing stale two-phase state file for transaction %u" +msgstr "%u 트랜잭션에서 사용하는 오래된 two-phase 상태정보 파일을 삭제함" -#: access/transam/twophase.c:1729 access/transam/twophase.c:1740 -#: access/transam/twophase.c:1860 access/transam/twophase.c:1871 -#: access/transam/twophase.c:1948 +#: access/transam/twophase.c:2041 #, c-format -msgid "removing corrupt two-phase state file \"%s\"" -msgstr "\"%s\" 이름의 잘못된 two-phase 상태정보 파일을 삭제함" +msgid "removing stale two-phase state from memory for transaction %u" +msgstr "" +"%u 트랜잭션에서 사용하는 오래된 two-phase 상태정보를 공유 메모리에서 삭제함" + +#: access/transam/twophase.c:2054 +#, c-format +msgid "removing future two-phase state file for transaction %u" +msgstr "%u 트랜잭션에서 사용하는 future two-phase 상태정보 파일을 삭제함" + +#: access/transam/twophase.c:2061 +#, c-format +msgid "removing future two-phase state from memory for transaction %u" +msgstr "%u 트랜잭션에서 사용하는 future two-phase 상태정보를 메모리에서 삭제함" -#: access/transam/twophase.c:1849 access/transam/twophase.c:1937 +#: access/transam/twophase.c:2075 access/transam/twophase.c:2094 #, c-format -msgid "removing stale two-phase state file \"%s\"" -msgstr "\"%s\" 이름의 오래된 two-phase 상태정보 파일을 삭제함" +msgid "removing corrupt two-phase state file for transaction %u" +msgstr "%u 트랜잭션에서 사용하는 잘못된 two-phase 상태정보 파일을 삭제함" -#: access/transam/twophase.c:1955 +#: access/transam/twophase.c:2101 #, c-format -msgid "recovering prepared transaction %u" -msgstr "%u 준비된 트랜잭션을 복구함" +msgid "removing corrupt two-phase state from memory for transaction %u" +msgstr "%u 트랜잭션에서 사용하는 잘못된 two-phase 상태정보를 메모리에서 삭제함" #: access/transam/varsup.c:124 #, c-format @@ -1739,271 +1565,266 @@ msgstr "" "%u OID 데이터베이스에서 자료 겹침으로 발생할 수 있는 자료 손실을 방지하기 위" "해 명령을 수락하지 않음" -#: access/transam/varsup.c:143 access/transam/varsup.c:381 +#: access/transam/varsup.c:143 access/transam/varsup.c:402 #, c-format msgid "database \"%s\" must be vacuumed within %u transactions" msgstr "\"%s\" 데이터베이스는 %u번의 트랜잭션이 발생되기 전에 청소해야 합니다" -#: access/transam/varsup.c:150 access/transam/varsup.c:388 +#: access/transam/varsup.c:150 access/transam/varsup.c:409 #, c-format msgid "database with OID %u must be vacuumed within %u transactions" msgstr "%u OID 데이터베이스는 %u번의 트랜잭션이 발생되기 전에 청소해야 합니다" -#: access/transam/varsup.c:346 +#: access/transam/varsup.c:367 #, c-format msgid "transaction ID wrap limit is %u, limited by database with OID %u" msgstr "트랜잭션 ID 겹침 제한은 %u번 입니다., %u OID 데이터베이스에서 제한됨" -#: access/transam/xact.c:943 +#: access/transam/xact.c:946 #, c-format msgid "cannot have more than 2^32-2 commands in a transaction" msgstr "하나의 트랜잭션 안에서는 2^32-2 개의 명령을 초과할 수 없음" -#: access/transam/xact.c:1467 +#: access/transam/xact.c:1471 #, c-format msgid "maximum number of committed subtransactions (%d) exceeded" msgstr "커밋된 하위 트랜잭션 수(%d)가 최대치를 초과함" -#: access/transam/xact.c:2263 +#: access/transam/xact.c:2265 #, c-format msgid "cannot PREPARE a transaction that has operated on temporary tables" msgstr "임시 테이블에 대해 실행된 트랜잭션을 PREPARE할 수 없음" -#: access/transam/xact.c:2273 +#: access/transam/xact.c:2275 #, c-format msgid "cannot PREPARE a transaction that has exported snapshots" msgstr "스냅샷으로 내보낸 트랜잭션은 PREPARE 작업을 할 수 없음" +#: access/transam/xact.c:2284 +#, c-format +msgid "" +"cannot PREPARE a transaction that has manipulated logical replication workers" +msgstr "논리 복제 작업자를 사용하는 트랜잭션은 PREPARE 할 수 없음" + #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3155 +#: access/transam/xact.c:3166 #, c-format msgid "%s cannot run inside a transaction block" msgstr "%s 명령은 트랜잭션 블럭안에서 실행할 수 없음" #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3165 +#: access/transam/xact.c:3176 #, c-format msgid "%s cannot run inside a subtransaction" msgstr "%s 명령은 서브트랜잭션 블럭안에서 실행할 수 없음" #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3175 +#: access/transam/xact.c:3186 #, c-format msgid "%s cannot be executed from a function or multi-command string" msgstr "%s 명령은 함수나 다중명령에서 실행할 수 없음" #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3246 +#: access/transam/xact.c:3257 #, c-format msgid "%s can only be used in transaction blocks" msgstr "%s 명령은 트랜잭션 블럭에서만 사용될 수 있음" -#: access/transam/xact.c:3430 +#: access/transam/xact.c:3441 #, c-format msgid "there is already a transaction in progress" msgstr "이미 트랜잭션 작업이 진행 중입니다" -#: access/transam/xact.c:3598 access/transam/xact.c:3701 +#: access/transam/xact.c:3609 access/transam/xact.c:3712 #, c-format msgid "there is no transaction in progress" msgstr "현재 트랜잭션 작업을 하지 않고 있습니다" -#: access/transam/xact.c:3609 +#: access/transam/xact.c:3620 #, c-format msgid "cannot commit during a parallel operation" msgstr "데이터베이스 트랜잭션을 commit 할 수 없음" -#: access/transam/xact.c:3712 +#: access/transam/xact.c:3723 #, c-format msgid "cannot abort during a parallel operation" msgstr "병렬 작업 중에는 중지 할 수 없음" -#: access/transam/xact.c:3754 +#: access/transam/xact.c:3765 #, c-format msgid "cannot define savepoints during a parallel operation" msgstr "병렬 작업 중에는 savepoint 지정을 할 수 없음" -#: access/transam/xact.c:3821 +#: access/transam/xact.c:3832 #, c-format msgid "cannot release savepoints during a parallel operation" msgstr "병렬 작업 중에는 savepoint를 지울 수 없음" -#: access/transam/xact.c:3832 access/transam/xact.c:3884 -#: access/transam/xact.c:3890 access/transam/xact.c:3946 -#: access/transam/xact.c:3996 access/transam/xact.c:4002 +#: access/transam/xact.c:3843 access/transam/xact.c:3895 +#: access/transam/xact.c:3901 access/transam/xact.c:3957 +#: access/transam/xact.c:4007 access/transam/xact.c:4013 #, c-format msgid "no such savepoint" msgstr "그런 savepoint가 없습니다" -#: access/transam/xact.c:3934 +#: access/transam/xact.c:3945 #, c-format msgid "cannot rollback to savepoints during a parallel operation" msgstr "병렬 작업 중에는 savepoint 지정 취소 작업을 할 수 없음" -#: access/transam/xact.c:4062 +#: access/transam/xact.c:4073 #, c-format msgid "cannot start subtransactions during a parallel operation" msgstr "병렬 처리 중에는 하위트랜잭션을 시작할 수 없음" -#: access/transam/xact.c:4129 +#: access/transam/xact.c:4140 #, c-format msgid "cannot commit subtransactions during a parallel operation" msgstr "병렬 처리 중에는 하위트랜잭션을 커밋할 수 없음" -#: access/transam/xact.c:4737 +#: access/transam/xact.c:4769 #, c-format msgid "cannot have more than 2^32-1 subtransactions in a transaction" msgstr "하나의 트랜잭션 안에서는 2^32-1 개의 하위트랜잭션을 초과할 수 없음" -#: access/transam/xlog.c:2299 +#: access/transam/xlog.c:2455 #, c-format msgid "could not seek in log file %s to offset %u: %m" msgstr "%s 파일에서 %u 위치를 찾을 수 없음: %m" -#: access/transam/xlog.c:2319 +#: access/transam/xlog.c:2477 #, c-format msgid "could not write to log file %s at offset %u, length %zu: %m" msgstr "%s 로그 파일 쓰기 실패, 위치 %u, 길이 %zu: %m" -#: access/transam/xlog.c:2582 +#: access/transam/xlog.c:2741 #, c-format msgid "updated min recovery point to %X/%X on timeline %u" msgstr "최소 복구 지점: %X/%X, 타임라인: %u 변경 완료" -#: access/transam/xlog.c:3224 +#: access/transam/xlog.c:3388 #, c-format msgid "not enough data in file \"%s\"" msgstr "\"%s\" 파일에 자료가 불충분합니다" -#: access/transam/xlog.c:3365 +#: access/transam/xlog.c:3534 #, c-format -msgid "could not open transaction log file \"%s\": %m" -msgstr "\"%s\" 트랜잭션 로그 파일을 열 수 없음: %m" +msgid "could not open write-ahead log file \"%s\": %m" +msgstr "\"%s\" WAL 파일을 열 수 없음: %m" -#: access/transam/xlog.c:3554 access/transam/xlog.c:5339 +#: access/transam/xlog.c:3723 access/transam/xlog.c:5541 #, c-format msgid "could not close log file %s: %m" msgstr "%s 로그 파일을 닫을 수 없음: %m" -#: access/transam/xlog.c:3611 access/transam/xlogutils.c:696 -#: replication/walsender.c:2095 +#: access/transam/xlog.c:3780 access/transam/xlogutils.c:701 +#: replication/walsender.c:2380 #, c-format msgid "requested WAL segment %s has already been removed" msgstr "요청한 %s WAL 조각 파일은 이미 지워졌음" -#: access/transam/xlog.c:3671 access/transam/xlog.c:3746 -#: access/transam/xlog.c:3944 +#: access/transam/xlog.c:3840 access/transam/xlog.c:3915 +#: access/transam/xlog.c:4110 #, c-format -msgid "could not open transaction log directory \"%s\": %m" +msgid "could not open write-ahead log directory \"%s\": %m" msgstr "\"%s\" 트랜잭션 로그 디렉터리 열기 실패: %m" -#: access/transam/xlog.c:3827 +#: access/transam/xlog.c:3996 #, c-format -msgid "recycled transaction log file \"%s\"" +msgid "recycled write-ahead log file \"%s\"" msgstr "\"%s\" 트랜잭션 로그 파일 재활용함" -#: access/transam/xlog.c:3839 +#: access/transam/xlog.c:4008 #, c-format -msgid "removing transaction log file \"%s\"" +msgid "removing write-ahead log file \"%s\"" msgstr "\"%s\" 트랜잭션 로그 파일 삭제 중" -#: access/transam/xlog.c:3859 +#: access/transam/xlog.c:4028 #, c-format -msgid "could not rename old transaction log file \"%s\": %m" +msgid "could not rename old write-ahead log file \"%s\": %m" msgstr "이전 트랜잭션 로그 파일 \"%s\"의 이름을 바꿀 수 없음: %m" -#: access/transam/xlog.c:3871 -#, c-format -msgid "could not remove old transaction log file \"%s\": %m" -msgstr "이전 트랜잭션 로그 파일 \"%s\"을(를) 제거할 수 없음: %m" - -#: access/transam/xlog.c:3904 access/transam/xlog.c:3914 +#: access/transam/xlog.c:4070 access/transam/xlog.c:4080 #, c-format msgid "required WAL directory \"%s\" does not exist" msgstr "필요한 WAL 디렉터리 \"%s\"이(가) 없음" -#: access/transam/xlog.c:3920 +#: access/transam/xlog.c:4086 #, c-format msgid "creating missing WAL directory \"%s\"" msgstr "누락된 WAL 디렉터리 \"%s\"을(를) 만드는 중" -#: access/transam/xlog.c:3923 +#: access/transam/xlog.c:4089 #, c-format msgid "could not create missing directory \"%s\": %m" msgstr "누락된 \"%s\" 디렉터리를 만들 수 없음: %m" -#: access/transam/xlog.c:3954 -#, c-format -msgid "removing transaction log backup history file \"%s\"" -msgstr "\"%s\" 트랜잭션 로그 백업 히스토리 파일 삭제 중" - -#: access/transam/xlog.c:4035 +#: access/transam/xlog.c:4200 #, c-format msgid "unexpected timeline ID %u in log segment %s, offset %u" msgstr "예상치 못한 타임라인 ID %u, 로그 조각: %s, 위치: %u" -#: access/transam/xlog.c:4157 +#: access/transam/xlog.c:4322 #, c-format msgid "new timeline %u is not a child of database system timeline %u" msgstr "요청한 %u 타임라인은 %u 데이터베이스 시스템 타임라인의 하위가 아님" -#: access/transam/xlog.c:4171 +#: access/transam/xlog.c:4336 #, c-format msgid "" "new timeline %u forked off current database system timeline %u before " "current recovery point %X/%X" msgstr "" -#: access/transam/xlog.c:4190 +#: access/transam/xlog.c:4355 #, c-format msgid "new target timeline is %u" msgstr "새 대상 타임라인: %u" -#: access/transam/xlog.c:4270 +#: access/transam/xlog.c:4436 #, c-format msgid "could not create control file \"%s\": %m" msgstr "\"%s\" 컨트롤 파일 만들 수 없음: %m" -#: access/transam/xlog.c:4281 access/transam/xlog.c:4517 +#: access/transam/xlog.c:4448 access/transam/xlog.c:4674 #, c-format msgid "could not write to control file: %m" msgstr "컨트롤 파일을 쓸 수 없음: %m" -#: access/transam/xlog.c:4287 access/transam/xlog.c:4523 +#: access/transam/xlog.c:4456 access/transam/xlog.c:4682 #, c-format msgid "could not fsync control file: %m" msgstr "컨트롤 파일 fsync 실패: %m" -#: access/transam/xlog.c:4292 access/transam/xlog.c:4528 +#: access/transam/xlog.c:4462 access/transam/xlog.c:4688 #, c-format msgid "could not close control file: %m" msgstr "컨트롤 파일 닫기 실패: %m" -#: access/transam/xlog.c:4310 access/transam/xlog.c:4506 +#: access/transam/xlog.c:4480 access/transam/xlog.c:4662 #, c-format msgid "could not open control file \"%s\": %m" msgstr "\"%s\" 컨트롤 파일 열기 실패: %m" -#: access/transam/xlog.c:4316 +#: access/transam/xlog.c:4487 #, c-format msgid "could not read from control file: %m" msgstr "컨트롤 파일 읽기 실패: %m" -#: access/transam/xlog.c:4329 access/transam/xlog.c:4338 -#: access/transam/xlog.c:4362 access/transam/xlog.c:4369 -#: access/transam/xlog.c:4376 access/transam/xlog.c:4381 -#: access/transam/xlog.c:4388 access/transam/xlog.c:4395 -#: access/transam/xlog.c:4402 access/transam/xlog.c:4409 -#: access/transam/xlog.c:4416 access/transam/xlog.c:4423 -#: access/transam/xlog.c:4430 access/transam/xlog.c:4439 -#: access/transam/xlog.c:4446 access/transam/xlog.c:4455 -#: access/transam/xlog.c:4462 access/transam/xlog.c:4471 -#: access/transam/xlog.c:4478 utils/init/miscinit.c:1380 +#: access/transam/xlog.c:4501 access/transam/xlog.c:4510 +#: access/transam/xlog.c:4534 access/transam/xlog.c:4541 +#: access/transam/xlog.c:4548 access/transam/xlog.c:4553 +#: access/transam/xlog.c:4560 access/transam/xlog.c:4567 +#: access/transam/xlog.c:4574 access/transam/xlog.c:4581 +#: access/transam/xlog.c:4588 access/transam/xlog.c:4595 +#: access/transam/xlog.c:4602 access/transam/xlog.c:4611 +#: access/transam/xlog.c:4618 access/transam/xlog.c:4627 +#: access/transam/xlog.c:4634 utils/init/miscinit.c:1406 #, c-format msgid "database files are incompatible with server" msgstr "데이터베이스 파일들이 서버와 호환성이 없습니다" -#: access/transam/xlog.c:4330 +#: access/transam/xlog.c:4502 #, c-format msgid "" "The database cluster was initialized with PG_CONTROL_VERSION %d (0x%08x), " @@ -2012,7 +1833,7 @@ msgstr "" "데이터베이스 클러스터는 PG_CONTROL_VERSION %d (0x%08x)(으)로 초기화되었지만 " "서버는 PG_CONTROL_VERSION %d (0x%08x)(으)로 컴파일되었습니다." -#: access/transam/xlog.c:4334 +#: access/transam/xlog.c:4506 #, c-format msgid "" "This could be a problem of mismatched byte ordering. It looks like you need " @@ -2020,7 +1841,7 @@ msgid "" msgstr "" "이것은 바이트 순서 불일치 문제일 수 있습니다. initdb 작업이 필요해 보입니다." -#: access/transam/xlog.c:4339 +#: access/transam/xlog.c:4511 #, c-format msgid "" "The database cluster was initialized with PG_CONTROL_VERSION %d, but the " @@ -2029,18 +1850,18 @@ msgstr "" "이 데이터베이스 클러스터는 PG_CONTROL_VERSION %d 버전으로 초기화 되었지만, 서" "버는 PG_CONTROL_VERSION %d 버전으로 컴파일 되어있습니다." -#: access/transam/xlog.c:4342 access/transam/xlog.c:4366 -#: access/transam/xlog.c:4373 access/transam/xlog.c:4378 +#: access/transam/xlog.c:4514 access/transam/xlog.c:4538 +#: access/transam/xlog.c:4545 access/transam/xlog.c:4550 #, c-format msgid "It looks like you need to initdb." msgstr "initdb 명령이 필요한 듯 합니다" -#: access/transam/xlog.c:4353 +#: access/transam/xlog.c:4525 #, c-format msgid "incorrect checksum in control file" msgstr "컨트롤 파일에 잘못된 체크섬 값이 있습니다" -#: access/transam/xlog.c:4363 +#: access/transam/xlog.c:4535 #, c-format msgid "" "The database cluster was initialized with CATALOG_VERSION_NO %d, but the " @@ -2049,7 +1870,7 @@ msgstr "" "이 데이터베이스 클러스터는 CATALOG_VERSION_NO %d 버전으로 초기화 되었지만, 서" "버는 CATALOG_VERSION_NO %d 버전으로 컴파일 되어있습니다." -#: access/transam/xlog.c:4370 +#: access/transam/xlog.c:4542 #, c-format msgid "" "The database cluster was initialized with MAXALIGN %d, but the server was " @@ -2058,7 +1879,7 @@ msgstr "" "이 데이터베이스 클러스터는 MAXALIGN %d (으)로 초기화 되었지만, 서버는 " "MAXALIGN %d (으)로 컴파일 되어있습니다." -#: access/transam/xlog.c:4377 +#: access/transam/xlog.c:4549 #, c-format msgid "" "The database cluster appears to use a different floating-point number format " @@ -2067,7 +1888,7 @@ msgstr "" "데이터베이스 클러스터와 서버 실행 파일이 서로 다른 부동 소수점 숫자 형식을 사" "용하고 있습니다." -#: access/transam/xlog.c:4382 +#: access/transam/xlog.c:4554 #, c-format msgid "" "The database cluster was initialized with BLCKSZ %d, but the server was " @@ -2076,20 +1897,19 @@ msgstr "" "이 데이터베이스 클러스터는 BLCKSZ %d (으)로 초기화 되었지만, 서버는 BLCKSZ " "%d (으)로 컴파일 되어있습니다." -#: access/transam/xlog.c:4385 access/transam/xlog.c:4392 -#: access/transam/xlog.c:4399 access/transam/xlog.c:4406 -#: access/transam/xlog.c:4413 access/transam/xlog.c:4420 -#: access/transam/xlog.c:4427 access/transam/xlog.c:4434 -#: access/transam/xlog.c:4442 access/transam/xlog.c:4449 -#: access/transam/xlog.c:4458 access/transam/xlog.c:4465 -#: access/transam/xlog.c:4474 access/transam/xlog.c:4481 +#: access/transam/xlog.c:4557 access/transam/xlog.c:4564 +#: access/transam/xlog.c:4571 access/transam/xlog.c:4578 +#: access/transam/xlog.c:4585 access/transam/xlog.c:4592 +#: access/transam/xlog.c:4599 access/transam/xlog.c:4606 +#: access/transam/xlog.c:4614 access/transam/xlog.c:4621 +#: access/transam/xlog.c:4630 access/transam/xlog.c:4637 #, c-format msgid "It looks like you need to recompile or initdb." msgstr "" "서버를 새로 컴파일 하거나 initdb 명령을 사용해 새로 데이터베이스 클러스터를 " "다시 만들거나 해야할 것 같습니다." -#: access/transam/xlog.c:4389 +#: access/transam/xlog.c:4561 #, c-format msgid "" "The database cluster was initialized with RELSEG_SIZE %d, but the server was " @@ -2098,7 +1918,7 @@ msgstr "" "이 데이터베이스 클러스터는 RELSEG_SIZE %d (으)로 초기화 되었지만, 서버는 " "RELSEG_SIZE %d (으)로 컴파일 되어있습니다." -#: access/transam/xlog.c:4396 +#: access/transam/xlog.c:4568 #, c-format msgid "" "The database cluster was initialized with XLOG_BLCKSZ %d, but the server was " @@ -2107,7 +1927,7 @@ msgstr "" "이 데이터베이스 클러스터는 XLOG_BLCKSZ %d (으)로 초기화 되었지만, 서버는 " "XLOG_BLCKSZ %d (으)로 컴파일 되어있습니다." -#: access/transam/xlog.c:4403 +#: access/transam/xlog.c:4575 #, c-format msgid "" "The database cluster was initialized with XLOG_SEG_SIZE %d, but the server " @@ -2116,7 +1936,7 @@ msgstr "" "이 데이터베이스 클러스터는 XLOG_SEG_SIZE %d (으)로 초기화 되었지만, 서버는 " "XLOG_SEG_SIZE %d (으)로 컴파일 되어있습니다." -#: access/transam/xlog.c:4410 +#: access/transam/xlog.c:4582 #, c-format msgid "" "The database cluster was initialized with NAMEDATALEN %d, but the server was " @@ -2125,7 +1945,7 @@ msgstr "" "이 데이터베이스 클러스터는 NAMEDATALEN %d (으)로 초기화 되었지만, 서버는 " "NAMEDATALEN %d (으)로 컴파일 되어있습니다." -#: access/transam/xlog.c:4417 +#: access/transam/xlog.c:4589 #, c-format msgid "" "The database cluster was initialized with INDEX_MAX_KEYS %d, but the server " @@ -2134,7 +1954,7 @@ msgstr "" "이 데이터베이스 클러스터는 INDEX_MAX_KEYS %d (으)로 초기화 되었지만, 서버는 " "INDEX_MAX_KEYS %d (으)로 컴파일 되어있습니다." -#: access/transam/xlog.c:4424 +#: access/transam/xlog.c:4596 #, c-format msgid "" "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the " @@ -2143,7 +1963,7 @@ msgstr "" "데이터베이스 클러스터는 TOAST_MAX_CHUNK_SIZE %d(으)로 초기화되었지만 서버는 " "TOAST_MAX_CHUNK_SIZE %d(으)로 컴파일 되었습니다." -#: access/transam/xlog.c:4431 +#: access/transam/xlog.c:4603 #, c-format msgid "" "The database cluster was initialized with LOBLKSIZE %d, but the server was " @@ -2152,25 +1972,7 @@ msgstr "" "이 데이터베이스 클러스터는 LOBLKSIZE %d(으)로 초기화 되었지만, 서버는 " "LOBLKSIZE %d (으)로 컴파일 되어있습니다." -#: access/transam/xlog.c:4440 -#, c-format -msgid "" -"The database cluster was initialized without HAVE_INT64_TIMESTAMP but the " -"server was compiled with HAVE_INT64_TIMESTAMP." -msgstr "" -"이 데이터베이스 클러스터는 HAVE_INT64_TIMESTAMP 값이 false로 초기화 되었지" -"만, 서버는 HAVE_INT64_TIMESTAMP 값이 true로 컴파일 되어있습니다." - -#: access/transam/xlog.c:4447 -#, c-format -msgid "" -"The database cluster was initialized with HAVE_INT64_TIMESTAMP but the " -"server was compiled without HAVE_INT64_TIMESTAMP." -msgstr "" -"이 데이터베이스 클러스터는 HAVE_INT64_TIMESTAMP 값이 true로 초기화 되었지만, " -"서버는 HAVE_INT64_TIMESTAMP 값이 false로 컴파일 되어있습니다." - -#: access/transam/xlog.c:4456 +#: access/transam/xlog.c:4612 #, c-format msgid "" "The database cluster was initialized without USE_FLOAT4_BYVAL but the server " @@ -2179,7 +1981,7 @@ msgstr "" "데이터베이스 클러스터는 USE_FLOAT4_BYVAL 없이 초기화되었지만, 서버는 " "USE_FLOAT4_BYVAL을 사용하여 컴파일되었습니다." -#: access/transam/xlog.c:4463 +#: access/transam/xlog.c:4619 #, c-format msgid "" "The database cluster was initialized with USE_FLOAT4_BYVAL but the server " @@ -2188,7 +1990,7 @@ msgstr "" "데이터베이스 클러스터는 USE_FLOAT4_BYVAL을 사용하여 초기화되었지만, 서버는 " "USE_FLOAT4_BYVAL 없이 컴파일되었습니다." -#: access/transam/xlog.c:4472 +#: access/transam/xlog.c:4628 #, c-format msgid "" "The database cluster was initialized without USE_FLOAT8_BYVAL but the server " @@ -2197,7 +1999,7 @@ msgstr "" "데이터베이스 클러스터는 USE_FLOAT8_BYVAL 없이 초기화되었지만, 서버는 " "USE_FLOAT8_BYVAL을 사용하여 컴파일되었습니다." -#: access/transam/xlog.c:4479 +#: access/transam/xlog.c:4635 #, c-format msgid "" "The database cluster was initialized with USE_FLOAT8_BYVAL but the server " @@ -2206,86 +2008,90 @@ msgstr "" "데이터베이스 클러스터는 USE_FLOAT8_BYVAL을 사용하여 초기화되었지만, 서버는 " "USE_FLOAT8_BYVAL 없이 컴파일되었습니다." -#: access/transam/xlog.c:4900 +#: access/transam/xlog.c:4991 #, c-format -msgid "could not write bootstrap transaction log file: %m" +msgid "could not generate secret authorization token" +msgstr "비밀 인증 토큰을 만들 수 없음" + +#: access/transam/xlog.c:5081 +#, c-format +msgid "could not write bootstrap write-ahead log file: %m" msgstr "bootstrap 트랜잭션 로그 파일을 쓸 수 없음: %m" -#: access/transam/xlog.c:4906 +#: access/transam/xlog.c:5089 #, c-format -msgid "could not fsync bootstrap transaction log file: %m" +msgid "could not fsync bootstrap write-ahead log file: %m" msgstr "bootstrap 트랜잭션 로그 파일을 fsync할 수 없음: %m" -#: access/transam/xlog.c:4911 +#: access/transam/xlog.c:5095 #, c-format -msgid "could not close bootstrap transaction log file: %m" +msgid "could not close bootstrap write-ahead log file: %m" msgstr "bootstrap 트랜잭션 로그 파일을 닫을 수 없음: %m" -#: access/transam/xlog.c:4986 +#: access/transam/xlog.c:5171 #, c-format msgid "could not open recovery command file \"%s\": %m" msgstr "복구명령 파일 \"%s\"을 열 수 없습니다: %m" -#: access/transam/xlog.c:5032 access/transam/xlog.c:5117 +#: access/transam/xlog.c:5217 access/transam/xlog.c:5319 #, c-format msgid "invalid value for recovery parameter \"%s\": \"%s\"" msgstr "잘못된 \"%s\" 복구 매개 변수의 값: \"%s\"" -#: access/transam/xlog.c:5035 +#: access/transam/xlog.c:5220 #, c-format msgid "Valid values are \"pause\", \"promote\", and \"shutdown\"." msgstr "사용할 수 있는 값: \"pause\", \"promote\", \"shutdown\"" -#: access/transam/xlog.c:5055 +#: access/transam/xlog.c:5240 #, c-format msgid "recovery_target_timeline is not a valid number: \"%s\"" msgstr "recovery_target_timeline 값으로 잘못된 숫자: \"%s\"" -#: access/transam/xlog.c:5072 +#: access/transam/xlog.c:5257 #, c-format msgid "recovery_target_xid is not a valid number: \"%s\"" msgstr "recovery_target_xid 값으로 잘못된 숫자: \"%s\"" -#: access/transam/xlog.c:5103 +#: access/transam/xlog.c:5288 #, c-format msgid "recovery_target_name is too long (maximum %d characters)" msgstr "recovery_target_name 설정값이 너무 깁니다 (최대 %d 문자)" -#: access/transam/xlog.c:5120 +#: access/transam/xlog.c:5322 #, c-format msgid "The only allowed value is \"immediate\"." msgstr "이 값으로는 \"immediate\" 만 허용합니다." -#: access/transam/xlog.c:5133 access/transam/xlog.c:5144 -#: commands/extension.c:534 commands/extension.c:542 utils/misc/guc.c:5640 +#: access/transam/xlog.c:5335 access/transam/xlog.c:5346 +#: commands/extension.c:547 commands/extension.c:555 utils/misc/guc.c:5750 #, c-format msgid "parameter \"%s\" requires a Boolean value" msgstr "\"%s\" 매개 변수의 값은 boolean 값이어야합니다." -#: access/transam/xlog.c:5179 +#: access/transam/xlog.c:5381 #, c-format msgid "parameter \"%s\" requires a temporal value" msgstr "\"%s\" 매개 변수의 값은 시간값이어야 합니다." -#: access/transam/xlog.c:5181 catalog/dependency.c:990 -#: catalog/dependency.c:991 catalog/dependency.c:997 catalog/dependency.c:998 -#: catalog/dependency.c:1009 catalog/dependency.c:1010 -#: catalog/objectaddress.c:1100 commands/tablecmds.c:796 -#: commands/tablecmds.c:9542 commands/user.c:1045 commands/view.c:499 -#: libpq/auth.c:307 replication/syncrep.c:919 storage/lmgr/deadlock.c:1139 -#: storage/lmgr/proc.c:1278 utils/adt/acl.c:5281 utils/misc/guc.c:5662 -#: utils/misc/guc.c:5755 utils/misc/guc.c:9708 utils/misc/guc.c:9742 -#: utils/misc/guc.c:9776 utils/misc/guc.c:9810 utils/misc/guc.c:9845 +#: access/transam/xlog.c:5383 catalog/dependency.c:961 +#: catalog/dependency.c:962 catalog/dependency.c:968 catalog/dependency.c:969 +#: catalog/dependency.c:980 catalog/dependency.c:981 commands/tablecmds.c:946 +#: commands/tablecmds.c:10358 commands/user.c:1064 commands/view.c:505 +#: libpq/auth.c:328 replication/syncrep.c:1160 storage/lmgr/deadlock.c:1139 +#: storage/lmgr/proc.c:1313 utils/adt/acl.c:5248 utils/misc/guc.c:5772 +#: utils/misc/guc.c:5865 utils/misc/guc.c:9821 utils/misc/guc.c:9855 +#: utils/misc/guc.c:9889 utils/misc/guc.c:9923 utils/misc/guc.c:9958 #, c-format msgid "%s" msgstr "%s" -#: access/transam/xlog.c:5188 +#: access/transam/xlog.c:5390 #, c-format msgid "unrecognized recovery parameter \"%s\"" msgstr "알 수 없는 복구 매개 변수 이름: \"%s\"" -#: access/transam/xlog.c:5199 +#: access/transam/xlog.c:5401 #, c-format msgid "" "recovery command file \"%s\" specified neither primary_conninfo nor " @@ -2294,16 +2100,16 @@ msgstr "" "복구 명령 파일 \"%s\"에서 primary_conninfo 설정도, restore_command 설정도 없" "습니다." -#: access/transam/xlog.c:5201 +#: access/transam/xlog.c:5403 #, c-format msgid "" -"The database server will regularly poll the pg_xlog subdirectory to check " -"for files placed there." +"The database server will regularly poll the pg_wal subdirectory to check for " +"files placed there." msgstr "" "데이터베이스 서버는 일반적으로 주 서버에서 발생한 트랜잭션 로그를 반영하기 위" -"해 pg_xlog 하위 디렉터리를 조사할 것입니다." +"해 pg_wal 하위 디렉터리를 조사할 것입니다." -#: access/transam/xlog.c:5208 +#: access/transam/xlog.c:5410 #, c-format msgid "" "recovery command file \"%s\" must specify restore_command when standby mode " @@ -2312,62 +2118,72 @@ msgstr "" "대기 모드를 활성화 하지 않았다면(standby_mode = off), 복구 명령 파일 \"%s\"에" "서 restore_command 설정은 반드시 있어야 합니다." -#: access/transam/xlog.c:5229 +#: access/transam/xlog.c:5431 #, c-format msgid "standby mode is not supported by single-user servers" msgstr "단일 사용자 서버를 대상으로 대기 모드를 사용할 수 없습니다." -#: access/transam/xlog.c:5248 +#: access/transam/xlog.c:5450 #, c-format msgid "recovery target timeline %u does not exist" msgstr "%u 복구 대상 타임라인이 없음" -#: access/transam/xlog.c:5369 +#: access/transam/xlog.c:5571 #, c-format msgid "archive recovery complete" msgstr "아카이브 복구 완료" -#: access/transam/xlog.c:5428 access/transam/xlog.c:5656 +#: access/transam/xlog.c:5630 access/transam/xlog.c:5896 #, c-format msgid "recovery stopping after reaching consistency" msgstr "일관성을 다 맞추어 복구 작업을 중지합니다." -#: access/transam/xlog.c:5516 +#: access/transam/xlog.c:5651 +#, c-format +msgid "recovery stopping before WAL location (LSN) \"%X/%X\"" +msgstr "복구 중지 위치(LSN): \"%X/%X\" 이전" + +#: access/transam/xlog.c:5737 #, c-format msgid "recovery stopping before commit of transaction %u, time %s" msgstr "%u 트랜잭션 커밋 전 복구 중지함, 시간 %s" -#: access/transam/xlog.c:5523 +#: access/transam/xlog.c:5744 #, c-format msgid "recovery stopping before abort of transaction %u, time %s" msgstr "%u 트랜잭션 중단 전 복구 중지함, 시간 %s" -#: access/transam/xlog.c:5568 +#: access/transam/xlog.c:5790 #, c-format msgid "recovery stopping at restore point \"%s\", time %s" msgstr "복구 중지함, 복구 위치 \"%s\", 시간 %s" -#: access/transam/xlog.c:5636 +#: access/transam/xlog.c:5808 +#, c-format +msgid "recovery stopping after WAL location (LSN) \"%X/%X\"" +msgstr "복구 중지 위치(LSN): \"%X/%X\" 이후" + +#: access/transam/xlog.c:5876 #, c-format msgid "recovery stopping after commit of transaction %u, time %s" msgstr "%u 트랜잭션 커밋 후 복구 중지함, 시간 %s" -#: access/transam/xlog.c:5644 +#: access/transam/xlog.c:5884 #, c-format msgid "recovery stopping after abort of transaction %u, time %s" msgstr "%u 트랜잭션 중단 후 복구 중지함, 시간 %s" -#: access/transam/xlog.c:5683 +#: access/transam/xlog.c:5924 #, c-format msgid "recovery has paused" msgstr "복구 작업이 일시 중지 됨" -#: access/transam/xlog.c:5684 +#: access/transam/xlog.c:5925 #, c-format -msgid "Execute pg_xlog_replay_resume() to continue." -msgstr "계속 진행하려면, pg_xlog_replay_resume() 함수를 호출하세요." +msgid "Execute pg_wal_replay_resume() to continue." +msgstr "계속 진행하려면, pg_wal_replay_resume() 함수를 호출하세요." -#: access/transam/xlog.c:5891 +#: access/transam/xlog.c:6133 #, c-format msgid "" "hot standby is not possible because %s = %d is a lower setting than on the " @@ -2376,14 +2192,14 @@ msgstr "" "읽기 전용 대기 서버로 운영이 불가능합니다. 현재 %s = %d 설정은 주 서버의 설정" "값(%d)보다 낮게 설정 되어 있기 때문입니다." -#: access/transam/xlog.c:5917 +#: access/transam/xlog.c:6159 #, c-format msgid "WAL was generated with wal_level=minimal, data may be missing" msgstr "" "WAL 내용이 wal_level=minimal 설정으로 만들여졌습니다. 자료가 손실 될 수 있습" "니다." -#: access/transam/xlog.c:5918 +#: access/transam/xlog.c:6160 #, c-format msgid "" "This happens if you temporarily set wal_level=minimal without taking a new " @@ -2392,7 +2208,7 @@ msgstr "" "이 문제는 새 베이스 백업을 받지 않은 상태에서 서버가 일시적으로 " "wal_level=minimal 설정으로 운영된 적이 있다면 발생합니다." -#: access/transam/xlog.c:5929 +#: access/transam/xlog.c:6171 #, c-format msgid "" "hot standby is not possible because wal_level was not set to \"replica\" or " @@ -2401,7 +2217,7 @@ msgstr "" "주 서버 wal_level 설정이 \"replica\" 또는 그 이상 수준으로 설정되지 않아, 읽" "기 전용 보조 서버로 운영될 수 없음" -#: access/transam/xlog.c:5930 +#: access/transam/xlog.c:6172 #, c-format msgid "" "Either set wal_level to \"replica\" on the master, or turn off hot_standby " @@ -2410,34 +2226,34 @@ msgstr "" "운영 서버의 환경 설정에서 wal_leve = \"replica\" 형태로 지정하든가 " "hot_standby = off 형태로 지정하십시오." -#: access/transam/xlog.c:5987 +#: access/transam/xlog.c:6229 #, c-format msgid "control file contains invalid data" msgstr "컨트롤 파일에 잘못된 데이터가 있습니다" -#: access/transam/xlog.c:5993 +#: access/transam/xlog.c:6235 #, c-format msgid "database system was shut down at %s" msgstr "데이터베이스 시스템 마지막 가동 중지 시각: %s" -#: access/transam/xlog.c:5998 +#: access/transam/xlog.c:6240 #, c-format msgid "database system was shut down in recovery at %s" msgstr "복구 중 데이터베이스 시스템 마지막 가동 중지 시각: %s" -#: access/transam/xlog.c:6002 +#: access/transam/xlog.c:6244 #, c-format msgid "database system shutdown was interrupted; last known up at %s" msgstr "" "데이터베이스 시스템 셧다운 작업이 비정상적으로 종료되었음; 마지막 운영시간: " "%s" -#: access/transam/xlog.c:6006 +#: access/transam/xlog.c:6248 #, c-format msgid "database system was interrupted while in recovery at %s" msgstr "데이터베이스 시스템 복구하는 도중 비정상적으로 가동 중지된 시각: %s" -#: access/transam/xlog.c:6008 +#: access/transam/xlog.c:6250 #, c-format msgid "" "This probably means that some data is corrupted and you will have to use the " @@ -2446,12 +2262,12 @@ msgstr "" "이 사태는 몇몇 데이터가 손상되었을 의미할 수도 있습니다. 확인해 보고, 필요하" "다면, 마지막 백업 자료로 복구해서 사용하세요." -#: access/transam/xlog.c:6012 +#: access/transam/xlog.c:6254 #, c-format msgid "database system was interrupted while in recovery at log time %s" msgstr "데이터베이스 시스템이 로그 시간 %s에 복구 도중 중지 되었음" -#: access/transam/xlog.c:6014 +#: access/transam/xlog.c:6256 #, c-format msgid "" "If this has occurred more than once some data might be corrupted and you " @@ -2460,52 +2276,57 @@ msgstr "" "이 사태로 몇몇 자료가 손상되었을 수도 있는데, 이런 경우라면,확인해 보고, 필요" "하다면, 마지막 백업 자료로 복구해서 사용하세요." -#: access/transam/xlog.c:6018 +#: access/transam/xlog.c:6260 #, c-format msgid "database system was interrupted; last known up at %s" msgstr "데이터베이스 시스템이 비정상적으로 종료되었음; 마지막 운영시간: %s" -#: access/transam/xlog.c:6074 +#: access/transam/xlog.c:6316 #, c-format msgid "entering standby mode" msgstr "대기 모드로 전환합니다" -#: access/transam/xlog.c:6077 +#: access/transam/xlog.c:6319 #, c-format msgid "starting point-in-time recovery to XID %u" msgstr "%u XID까지 시점 기반 복구 작업을 시작합니다" -#: access/transam/xlog.c:6081 +#: access/transam/xlog.c:6323 #, c-format msgid "starting point-in-time recovery to %s" msgstr "%s 까지 시점 복구 작업을 시작합니다" -#: access/transam/xlog.c:6085 +#: access/transam/xlog.c:6327 #, c-format msgid "starting point-in-time recovery to \"%s\"" msgstr "\"%s\" 복구 대상 이름까지 시점 복구 작업을 시작합니다" -#: access/transam/xlog.c:6089 +#: access/transam/xlog.c:6331 +#, c-format +msgid "starting point-in-time recovery to WAL location (LSN) \"%X/%X\"" +msgstr "\"%X/%X\" 위치(LSN)까지 시점 복구 작업을 시작합니다" + +#: access/transam/xlog.c:6336 #, c-format msgid "starting point-in-time recovery to earliest consistent point" msgstr "동기화 할 수 있는 마지막 지점까지 시점 복구 작업을 시작합니다" -#: access/transam/xlog.c:6092 +#: access/transam/xlog.c:6339 #, c-format msgid "starting archive recovery" msgstr "아카이브 복구 작업을 시작합니다" -#: access/transam/xlog.c:6136 access/transam/xlog.c:6264 +#: access/transam/xlog.c:6390 access/transam/xlog.c:6518 #, c-format msgid "checkpoint record is at %X/%X" msgstr "체크포인트 레코드 위치: %X/%X" -#: access/transam/xlog.c:6150 +#: access/transam/xlog.c:6404 #, c-format msgid "could not find redo location referenced by checkpoint record" msgstr "체크포인트 기록으로 참조하는 재실행 위치를 찾을 수 없음" -#: access/transam/xlog.c:6151 access/transam/xlog.c:6158 +#: access/transam/xlog.c:6405 access/transam/xlog.c:6412 #, c-format msgid "" "If you are not restoring from a backup, try removing the file \"%s/" @@ -2514,47 +2335,47 @@ msgstr "" "실시간 백업 자료로부터 복구 작업을 하지 않으려면, \"%s/backup_lable\" 파일을 " "삭제 하세요." -#: access/transam/xlog.c:6157 +#: access/transam/xlog.c:6411 #, c-format msgid "could not locate required checkpoint record" msgstr "요청된 체크포인트 레코드의 위치를 바르게 잡을 수 없음" -#: access/transam/xlog.c:6183 commands/tablespace.c:641 +#: access/transam/xlog.c:6437 commands/tablespace.c:639 #, c-format msgid "could not create symbolic link \"%s\": %m" msgstr "\"%s\" 심벌릭 링크를 만들 수 없음: %m" -#: access/transam/xlog.c:6215 access/transam/xlog.c:6221 +#: access/transam/xlog.c:6469 access/transam/xlog.c:6475 #, c-format msgid "ignoring file \"%s\" because no file \"%s\" exists" msgstr "\"%s\" 파일 무시함, \"%s\" 파일 없음" -#: access/transam/xlog.c:6217 access/transam/xlog.c:11032 +#: access/transam/xlog.c:6471 access/transam/xlog.c:11400 #, c-format msgid "File \"%s\" was renamed to \"%s\"." msgstr "\"%s\" 파일을 \"%s\" 파일로 이름을 바꿨습니다." -#: access/transam/xlog.c:6223 +#: access/transam/xlog.c:6477 #, c-format msgid "Could not rename file \"%s\" to \"%s\": %m." msgstr "\"%s\" 파일을 \"%s\" 파일로 이름을 바꿀 수 없음: %m" -#: access/transam/xlog.c:6274 access/transam/xlog.c:6289 +#: access/transam/xlog.c:6528 access/transam/xlog.c:6543 #, c-format msgid "could not locate a valid checkpoint record" msgstr "체크포인트 레코드의 위치를 바르게 잡을 수 없음" -#: access/transam/xlog.c:6283 +#: access/transam/xlog.c:6537 #, c-format msgid "using previous checkpoint record at %X/%X" msgstr "이전 체크포인트 레코드를 사용함, 위치: %X/%X" -#: access/transam/xlog.c:6327 +#: access/transam/xlog.c:6581 #, c-format msgid "requested timeline %u is not a child of this server's history" msgstr "요청한 %u 타임라인은 서버 타임라인의 하위가 아님" -#: access/transam/xlog.c:6329 +#: access/transam/xlog.c:6583 #, c-format msgid "" "Latest checkpoint is at %X/%X on timeline %u, but in the history of the " @@ -2563,7 +2384,7 @@ msgstr "" "마지막 체크포인트 위치는 %X/%X (%u 타임라인)입니다. 하지만, 요청받은 타임라" "인 내역파일에는 그 타임라인 %X/%X 위치에서 분기되었습니다." -#: access/transam/xlog.c:6345 +#: access/transam/xlog.c:6599 #, c-format msgid "" "requested timeline %u does not contain minimum recovery point %X/%X on " @@ -2571,22 +2392,22 @@ msgid "" msgstr "" "요청한 %u 타임라인은 %X/%X 최소 복구 위치가 없습니다, 기존 타임라인: %u" -#: access/transam/xlog.c:6376 +#: access/transam/xlog.c:6630 #, c-format msgid "invalid next transaction ID" msgstr "잘못된 다음 트랜잭션 ID" -#: access/transam/xlog.c:6459 +#: access/transam/xlog.c:6724 #, c-format msgid "invalid redo in checkpoint record" msgstr "체크포인트 레코드 안에 잘못된 redo 정보가 있음" -#: access/transam/xlog.c:6470 +#: access/transam/xlog.c:6735 #, c-format msgid "invalid redo record in shutdown checkpoint" msgstr "운영 중지 체크포인트에서 잘못된 재실행 정보 발견" -#: access/transam/xlog.c:6498 +#: access/transam/xlog.c:6763 #, c-format msgid "" "database system was not properly shut down; automatic recovery in progress" @@ -2594,18 +2415,18 @@ msgstr "" "데이터베이스 시스템이 정상적으로 종료되지 못했습니다, 자동 복구 작업을 진행합" "니다" -#: access/transam/xlog.c:6502 +#: access/transam/xlog.c:6767 #, c-format msgid "crash recovery starts in timeline %u and has target timeline %u" msgstr "" "%u 타임라인으로 비정상 중지에 대한 복구작업을 시작함, 기존 타임라인: %u" -#: access/transam/xlog.c:6546 +#: access/transam/xlog.c:6811 #, c-format msgid "backup_label contains data inconsistent with control file" msgstr "backup_label 파일 안에 컨트롤 파일과 일관성이 맞지 않는 자료가 있음" -#: access/transam/xlog.c:6547 +#: access/transam/xlog.c:6812 #, c-format msgid "" "This means that the backup is corrupted and you will have to use another " @@ -2614,42 +2435,42 @@ msgstr "" "이 문제는 백업 자료 자체가 손상 되었음을 말합니다. 다른 백업본으로 복구 작업" "을 진행해야 합니다." -#: access/transam/xlog.c:6621 +#: access/transam/xlog.c:6886 #, c-format msgid "initializing for hot standby" msgstr "읽기 전용 보조 서버로 초기화 중입니다." -#: access/transam/xlog.c:6753 +#: access/transam/xlog.c:7018 #, c-format msgid "redo starts at %X/%X" msgstr "%X/%X에서 redo 작업 시작됨" -#: access/transam/xlog.c:6978 +#: access/transam/xlog.c:7252 #, c-format msgid "requested recovery stop point is before consistent recovery point" msgstr "요청한 복구 중지 지점이 일치하는 복구 지점 앞에 있음" -#: access/transam/xlog.c:7016 +#: access/transam/xlog.c:7290 #, c-format msgid "redo done at %X/%X" msgstr "%X/%X에서 redo 작업 완료" -#: access/transam/xlog.c:7021 access/transam/xlog.c:8969 +#: access/transam/xlog.c:7295 access/transam/xlog.c:9309 #, c-format msgid "last completed transaction was at log time %s" msgstr "마지막 완료된 트랜잭션 기록 시간: %s" -#: access/transam/xlog.c:7030 +#: access/transam/xlog.c:7304 #, c-format msgid "redo is not required" msgstr "재반영해야 할 트랜잭션이 없음" -#: access/transam/xlog.c:7105 access/transam/xlog.c:7109 +#: access/transam/xlog.c:7379 access/transam/xlog.c:7383 #, c-format msgid "WAL ends before end of online backup" msgstr "온라인 백업 작업 끝나기전에 WAL 작업 종료됨" -#: access/transam/xlog.c:7106 +#: access/transam/xlog.c:7380 #, c-format msgid "" "All WAL generated while online backup was taken must be available at " @@ -2658,7 +2479,7 @@ msgstr "" "온라인 백업 중 만들어진 WAL 조각 파일은 복구 작업에서 반드시 모두 있어야 합니" "다." -#: access/transam/xlog.c:7110 +#: access/transam/xlog.c:7384 #, c-format msgid "" "Online backup started with pg_start_backup() must be ended with " @@ -2668,128 +2489,133 @@ msgstr "" "로 종료되어야 하며, 그 사이 만들어진 WAL 조각 파일은 복구 작업에서 모두 필요" "합니다." -#: access/transam/xlog.c:7113 +#: access/transam/xlog.c:7387 #, c-format msgid "WAL ends before consistent recovery point" msgstr "WAL이 일치하는 복구 지점 앞에서 종료됨" -#: access/transam/xlog.c:7140 +#: access/transam/xlog.c:7414 #, c-format msgid "selected new timeline ID: %u" msgstr "지정한 새 타임라인 ID: %u" -#: access/transam/xlog.c:7551 +#: access/transam/xlog.c:7843 #, c-format msgid "consistent recovery state reached at %X/%X" msgstr "%X/%X 위치에서 복구 일관성을 맞춤" -#: access/transam/xlog.c:7742 +#: access/transam/xlog.c:8035 #, c-format msgid "invalid primary checkpoint link in control file" msgstr "컨트롤 파일에서 잘못된 primary checkpoint 링크 발견" -#: access/transam/xlog.c:7746 +#: access/transam/xlog.c:8039 #, c-format msgid "invalid secondary checkpoint link in control file" msgstr "컨트롤 파일에서 잘못된 secondary checkpoint 링크 발견" -#: access/transam/xlog.c:7750 +#: access/transam/xlog.c:8043 #, c-format msgid "invalid checkpoint link in backup_label file" msgstr "백업 라벨 파일에서 잘못된 체크포인트 링크 발견" -#: access/transam/xlog.c:7767 +#: access/transam/xlog.c:8060 #, c-format msgid "invalid primary checkpoint record" msgstr "잘못된 primary checkpoint 레코드" -#: access/transam/xlog.c:7771 +#: access/transam/xlog.c:8064 #, c-format msgid "invalid secondary checkpoint record" msgstr "잘못된 secondary checkpoint 레코드" -#: access/transam/xlog.c:7775 +#: access/transam/xlog.c:8068 #, c-format msgid "invalid checkpoint record" msgstr "잘못된 checkpoint 레코드" -#: access/transam/xlog.c:7786 +#: access/transam/xlog.c:8079 #, c-format msgid "invalid resource manager ID in primary checkpoint record" msgstr "primary checkpoint 레코드에서 잘못된 자원 관리자 ID 발견" -#: access/transam/xlog.c:7790 +#: access/transam/xlog.c:8083 #, c-format msgid "invalid resource manager ID in secondary checkpoint record" msgstr "secondary checkpoint 레코드에서 잘못된 자원 관리자 ID 발견" -#: access/transam/xlog.c:7794 +#: access/transam/xlog.c:8087 #, c-format msgid "invalid resource manager ID in checkpoint record" msgstr "checkpoint 레코드에서 잘못된 자원 관리자 ID 발견" -#: access/transam/xlog.c:7806 +#: access/transam/xlog.c:8100 #, c-format msgid "invalid xl_info in primary checkpoint record" msgstr "primary checkpoint 레코드에서 잘못된 xl_info 발견" -#: access/transam/xlog.c:7810 +#: access/transam/xlog.c:8104 #, c-format msgid "invalid xl_info in secondary checkpoint record" msgstr "secondary checkpoint 레코드에서 잘못된 xl_info 발견" -#: access/transam/xlog.c:7814 +#: access/transam/xlog.c:8108 #, c-format msgid "invalid xl_info in checkpoint record" msgstr "checkpoint 레코드에서 잘못된 xl_info 발견" -#: access/transam/xlog.c:7825 +#: access/transam/xlog.c:8119 #, c-format msgid "invalid length of primary checkpoint record" msgstr "primary checkpoint 레코드 길이가 잘못되었음" -#: access/transam/xlog.c:7829 +#: access/transam/xlog.c:8123 #, c-format msgid "invalid length of secondary checkpoint record" msgstr "secondary checkpoint 레코드 길이가 잘못되었음" -#: access/transam/xlog.c:7833 +#: access/transam/xlog.c:8127 #, c-format msgid "invalid length of checkpoint record" msgstr "checkpoint 레코드 길이가 잘못되었음" -#: access/transam/xlog.c:8001 +#: access/transam/xlog.c:8330 #, c-format msgid "shutting down" msgstr "서비스를 멈추고 있습니다" -#: access/transam/xlog.c:8514 +#: access/transam/xlog.c:8649 +#, c-format +msgid "checkpoint skipped because system is idle" +msgstr "시스템이 놀고 있어 체크포인트 작업 건너뜀" + +#: access/transam/xlog.c:8854 #, c-format msgid "" -"concurrent transaction log activity while database system is shutting down" -msgstr "데이터베이스 시스템이 중지되는 동안 현재 트랜잭션 로그가 활성화 되었음" +"concurrent write-ahead log activity while database system is shutting down" +msgstr "데이터베이스 시스템이 중지되는 동안 동시 트랜잭션 로그가 활성화 되었음" -#: access/transam/xlog.c:8768 +#: access/transam/xlog.c:9108 #, c-format msgid "skipping restartpoint, recovery has already ended" msgstr "다시 시작 지점을 건너뜀, 복구가 이미 종료됨" -#: access/transam/xlog.c:8791 +#: access/transam/xlog.c:9131 #, c-format msgid "skipping restartpoint, already performed at %X/%X" msgstr "다시 시작 지점을 건너뜀, %X/%X에서 이미 수행됨" -#: access/transam/xlog.c:8967 +#: access/transam/xlog.c:9307 #, c-format msgid "recovery restart point at %X/%X" msgstr "%X/%X에서 복구 작업 시작함" -#: access/transam/xlog.c:9100 +#: access/transam/xlog.c:9443 #, c-format msgid "restore point \"%s\" created at %X/%X" msgstr "\"%s\" 이름의 복구 위치는 %X/%X에 만들었음" -#: access/transam/xlog.c:9230 +#: access/transam/xlog.c:9573 #, c-format msgid "" "unexpected previous timeline ID %u (current timeline ID %u) in checkpoint " @@ -2797,12 +2623,12 @@ msgid "" msgstr "" "체크포인트 레코드에 예기치 않은 이전 타임라인ID %u(현재 타임라인ID: %u)" -#: access/transam/xlog.c:9239 +#: access/transam/xlog.c:9582 #, c-format msgid "unexpected timeline ID %u (after %u) in checkpoint record" msgstr "체크포인트 레코드에 예기치 않은 타임라인 ID %u이(가) 있음(%u 뒤)" -#: access/transam/xlog.c:9255 +#: access/transam/xlog.c:9598 #, c-format msgid "" "unexpected timeline ID %u in checkpoint record, before reaching minimum " @@ -2811,74 +2637,74 @@ msgstr "" "체크포인트 내역 안에 %u 타임라인 ID가 기대한 것과 다릅니다. 발생 위치: %X/%X " "(타임라인: %u) 최소 복구 위치 이전" -#: access/transam/xlog.c:9326 +#: access/transam/xlog.c:9674 #, c-format msgid "online backup was canceled, recovery cannot continue" msgstr "온라인 백어이 취소되었음, 복구를 계속 할 수 없음" -#: access/transam/xlog.c:9382 access/transam/xlog.c:9429 -#: access/transam/xlog.c:9452 +#: access/transam/xlog.c:9730 access/transam/xlog.c:9777 +#: access/transam/xlog.c:9800 #, c-format msgid "unexpected timeline ID %u (should be %u) in checkpoint record" msgstr "체크포인트 레코드에 예기치 않은 타임라인 ID %u이(가) 있음(%u이어야 함)" -#: access/transam/xlog.c:9727 +#: access/transam/xlog.c:10076 #, c-format msgid "could not fsync log segment %s: %m" msgstr "%s 로그 조각 fsync 실패: %m" -#: access/transam/xlog.c:9751 +#: access/transam/xlog.c:10101 #, c-format msgid "could not fsync log file %s: %m" msgstr "\"%s\" 로그 파일 fsync 실패: %m" -#: access/transam/xlog.c:9759 +#: access/transam/xlog.c:10109 #, c-format msgid "could not fsync write-through log file %s: %m" msgstr "write-through 로그 파일(%s)을 fsync할 수 없음: %m" -#: access/transam/xlog.c:9768 +#: access/transam/xlog.c:10118 #, c-format msgid "could not fdatasync log file %s: %m" msgstr "%s 로그파일을 fdatasync할 수 없음: %m" -#: access/transam/xlog.c:9859 access/transam/xlog.c:10363 -#: access/transam/xlogfuncs.c:294 access/transam/xlogfuncs.c:321 -#: access/transam/xlogfuncs.c:360 access/transam/xlogfuncs.c:381 -#: access/transam/xlogfuncs.c:402 +#: access/transam/xlog.c:10209 access/transam/xlog.c:10727 +#: access/transam/xlogfuncs.c:297 access/transam/xlogfuncs.c:324 +#: access/transam/xlogfuncs.c:363 access/transam/xlogfuncs.c:384 +#: access/transam/xlogfuncs.c:405 #, c-format msgid "WAL control functions cannot be executed during recovery." msgstr "WAL 제어 함수는 복구 작업 중에는 실행 될 수 없음" -#: access/transam/xlog.c:9868 access/transam/xlog.c:10372 +#: access/transam/xlog.c:10218 access/transam/xlog.c:10736 #, c-format msgid "WAL level not sufficient for making an online backup" msgstr "온라인 백업 작업을 하기 위한 WAL 수준이 충분치 않습니다." -#: access/transam/xlog.c:9869 access/transam/xlog.c:10373 -#: access/transam/xlogfuncs.c:327 +#: access/transam/xlog.c:10219 access/transam/xlog.c:10737 +#: access/transam/xlogfuncs.c:330 #, c-format msgid "wal_level must be set to \"replica\" or \"logical\" at server start." msgstr "" "wal_level 값을 \"replica\" 또는 \"logical\"로 지정하고 서버를 실행하십시오." -#: access/transam/xlog.c:9874 +#: access/transam/xlog.c:10224 #, c-format msgid "backup label too long (max %d bytes)" msgstr "백업 라벨 이름이 너무 긺(최대 %d 바이트)" -#: access/transam/xlog.c:9911 access/transam/xlog.c:10183 -#: access/transam/xlog.c:10221 +#: access/transam/xlog.c:10261 access/transam/xlog.c:10534 +#: access/transam/xlog.c:10572 #, c-format msgid "a backup is already in progress" msgstr "이미 백업 작업이 진행 중입니다" -#: access/transam/xlog.c:9912 +#: access/transam/xlog.c:10262 #, c-format msgid "Run pg_stop_backup() and try again." msgstr "pg_stop_backup() 함수를 실행하고 나서 다시 시도하세요." -#: access/transam/xlog.c:10007 +#: access/transam/xlog.c:10357 #, c-format msgid "" "WAL generated with full_page_writes=off was replayed since last restartpoint" @@ -2886,7 +2712,7 @@ msgstr "" "마지막 재시작 위치부터 재반영된 WAL 내용이 full_page_writes=off 설정으로 만들" "어진 내용입니다." -#: access/transam/xlog.c:10009 access/transam/xlog.c:10554 +#: access/transam/xlog.c:10359 access/transam/xlog.c:10919 #, c-format msgid "" "This means that the backup being taken on the standby is corrupt and should " @@ -2897,40 +2723,40 @@ msgstr "" "정을 활성화 하고, 주 서버에서 CHECKPOINT 명령을 실행하고, 온라인 백업을 다시 " "해서 사용하세요." -#: access/transam/xlog.c:10076 replication/basebackup.c:1026 -#: utils/adt/misc.c:498 +#: access/transam/xlog.c:10426 replication/basebackup.c:1096 +#: utils/adt/misc.c:497 #, c-format msgid "could not read symbolic link \"%s\": %m" msgstr "\"%s\" 심볼릭 링크 파일을 읽을 수 없음: %m" -#: access/transam/xlog.c:10083 replication/basebackup.c:1031 -#: utils/adt/misc.c:503 +#: access/transam/xlog.c:10433 replication/basebackup.c:1101 +#: utils/adt/misc.c:502 #, c-format msgid "symbolic link \"%s\" target is too long" msgstr "\"%s\" 심볼릭 링크의 대상이 너무 긺" -#: access/transam/xlog.c:10136 commands/tablespace.c:391 -#: commands/tablespace.c:553 replication/basebackup.c:1047 -#: utils/adt/misc.c:511 +#: access/transam/xlog.c:10486 commands/tablespace.c:389 +#: commands/tablespace.c:551 replication/basebackup.c:1116 +#: utils/adt/misc.c:510 #, c-format msgid "tablespaces are not supported on this platform" msgstr "테이블스페이스 기능은 이 플랫폼에서는 지원하지 않습니다." -#: access/transam/xlog.c:10177 access/transam/xlog.c:10215 -#: access/transam/xlog.c:10411 access/transam/xlogarchive.c:106 -#: access/transam/xlogarchive.c:265 commands/copy.c:1815 commands/copy.c:2839 -#: commands/extension.c:3130 commands/tablespace.c:782 -#: commands/tablespace.c:873 replication/basebackup.c:409 -#: replication/basebackup.c:477 replication/logical/snapbuild.c:1491 -#: storage/file/copydir.c:72 storage/file/copydir.c:115 storage/file/fd.c:2903 -#: storage/file/fd.c:2995 utils/adt/dbsize.c:70 utils/adt/dbsize.c:220 -#: utils/adt/dbsize.c:300 utils/adt/genfile.c:114 utils/adt/genfile.c:333 +#: access/transam/xlog.c:10528 access/transam/xlog.c:10566 +#: access/transam/xlog.c:10775 access/transam/xlogarchive.c:105 +#: access/transam/xlogarchive.c:264 commands/copy.c:1844 commands/copy.c:3134 +#: commands/extension.c:3319 commands/tablespace.c:780 +#: commands/tablespace.c:871 replication/basebackup.c:480 +#: replication/basebackup.c:548 replication/logical/snapbuild.c:1518 +#: storage/file/copydir.c:72 storage/file/copydir.c:115 storage/file/fd.c:2954 +#: storage/file/fd.c:3046 utils/adt/dbsize.c:70 utils/adt/dbsize.c:227 +#: utils/adt/dbsize.c:307 utils/adt/genfile.c:115 utils/adt/genfile.c:334 #: guc-file.l:1002 #, c-format msgid "could not stat file \"%s\": %m" msgstr "\"%s\" 파일의 상태값을 알 수 없음: %m" -#: access/transam/xlog.c:10184 access/transam/xlog.c:10222 +#: access/transam/xlog.c:10535 access/transam/xlog.c:10573 #, c-format msgid "" "If you're sure there is no backup in progress, remove file \"%s\" and try " @@ -2939,35 +2765,36 @@ msgstr "" "실재로는 백업 작업을 안하고 있다고 확신한다면, \"%s\" 파일을 삭제하고 다시 시" "도해 보십시오." -#: access/transam/xlog.c:10201 access/transam/xlog.c:10239 -#: access/transam/xlog.c:10615 +#: access/transam/xlog.c:10552 access/transam/xlog.c:10590 +#: access/transam/xlog.c:10977 postmaster/syslogger.c:1391 +#: postmaster/syslogger.c:1404 #, c-format msgid "could not write file \"%s\": %m" msgstr "\"%s\" 파일 쓰기 실패: %m" -#: access/transam/xlog.c:10388 +#: access/transam/xlog.c:10752 #, c-format msgid "exclusive backup not in progress" msgstr "exclusive 백업 작업을 하지 않고 있습니다" -#: access/transam/xlog.c:10415 +#: access/transam/xlog.c:10779 #, c-format msgid "a backup is not in progress" msgstr "현재 백업 작업을 하지 않고 있습니다" -#: access/transam/xlog.c:10489 access/transam/xlog.c:10502 -#: access/transam/xlog.c:10842 access/transam/xlog.c:10848 -#: access/transam/xlog.c:10932 access/transam/xlogfuncs.c:695 +#: access/transam/xlog.c:10852 access/transam/xlog.c:10865 +#: access/transam/xlog.c:11210 access/transam/xlog.c:11216 +#: access/transam/xlog.c:11300 access/transam/xlogfuncs.c:698 #, c-format msgid "invalid data in file \"%s\"" msgstr "\"%s\" 파일에 유효하지 않은 자료가 있습니다" -#: access/transam/xlog.c:10506 replication/basebackup.c:938 +#: access/transam/xlog.c:10869 replication/basebackup.c:994 #, c-format msgid "the standby was promoted during online backup" msgstr "대기 서버가 온라인 백업 중 주 서버로 전환되었습니다" -#: access/transam/xlog.c:10507 replication/basebackup.c:939 +#: access/transam/xlog.c:10870 replication/basebackup.c:995 #, c-format msgid "" "This means that the backup being taken is corrupt and should not be used. " @@ -2976,7 +2803,7 @@ msgstr "" "이런 경우, 해당 백업 자료가 손상되었을 가능성이 있습니다. 다른 백업본을 이용" "하세요." -#: access/transam/xlog.c:10552 +#: access/transam/xlog.c:10917 #, c-format msgid "" "WAL generated with full_page_writes=off was replayed during online backup" @@ -2984,7 +2811,7 @@ msgstr "" "온라인 백업 도중 full_page_writes=off 설정으로 만들어진 WAL 내용이 재반영되었" "습니다." -#: access/transam/xlog.c:10664 +#: access/transam/xlog.c:11032 #, c-format msgid "" "pg_stop_backup cleanup done, waiting for required WAL segments to be archived" @@ -2992,14 +2819,14 @@ msgstr "" "pg_stop_backup 작업이 끝났습니다. 필요한 WAL 조각 파일이 아카이브 되길 기다리" "고 있습니다." -#: access/transam/xlog.c:10674 +#: access/transam/xlog.c:11042 #, c-format msgid "" "pg_stop_backup still waiting for all required WAL segments to be archived " "(%d seconds elapsed)" msgstr "pg_stop_backup에서 아카이빙이 완료되기를 기다리고 있음 (%d초 경과)" -#: access/transam/xlog.c:10676 +#: access/transam/xlog.c:11044 #, c-format msgid "" "Check that your archive_command is executing properly. pg_stop_backup can " @@ -3009,14 +2836,14 @@ msgstr "" "archive_command 설정을 살펴보세요. pg_stop_backup 작업은 안전하게 취소 할 " "수 있지만, 데이터베이스 백업은 모든 WAL 조각 없이는 사용될 수 없습니다." -#: access/transam/xlog.c:10683 +#: access/transam/xlog.c:11051 #, c-format msgid "pg_stop_backup complete, all required WAL segments have been archived" msgstr "" "pg_stop_backup 작업이 끝났습니다. 모든 필요한 WAL 조각들이 아카이브 되었습니" "다." -#: access/transam/xlog.c:10687 +#: access/transam/xlog.c:11055 #, c-format msgid "" "WAL archiving is not enabled; you must ensure that all required WAL segments " @@ -3026,36 +2853,36 @@ msgstr "" "모든 WAL 조각 파일들을 직접 찾아서 따로 보관해 두어야 바르게 복구 할 수 있습" "니다." -#. translator: %s is an XLog record description -#: access/transam/xlog.c:10972 +#. translator: %s is a WAL record description +#: access/transam/xlog.c:11340 #, c-format -msgid "xlog redo at %X/%X for %s" -msgstr "xlog redo 위치: %X/%X, 대상: %s" +msgid "WAL redo at %X/%X for %s" +msgstr "WAL redo 위치: %X/%X, 대상: %s" -#: access/transam/xlog.c:11021 +#: access/transam/xlog.c:11389 #, c-format msgid "online backup mode was not canceled" msgstr "온라인 백업 모드가 취소되지 않았음" -#: access/transam/xlog.c:11022 +#: access/transam/xlog.c:11390 #, c-format msgid "File \"%s\" could not be renamed to \"%s\": %m." msgstr "\"%s\" 파일을 \"%s\" 파일로 이름을 바꿀 수 없음: %m." -#: access/transam/xlog.c:11031 access/transam/xlog.c:11043 -#: access/transam/xlog.c:11053 +#: access/transam/xlog.c:11399 access/transam/xlog.c:11411 +#: access/transam/xlog.c:11421 #, c-format msgid "online backup mode canceled" msgstr "온라인 백업 모드가 취소됨" -#: access/transam/xlog.c:11044 +#: access/transam/xlog.c:11412 #, c-format msgid "" "Files \"%s\" and \"%s\" were renamed to \"%s\" and \"%s\", respectively." msgstr "" "예상한 것처럼, \"%s\", \"%s\" 파일을 \"%s\", \"%s\" 이름으로 바꿨습니다." -#: access/transam/xlog.c:11054 +#: access/transam/xlog.c:11422 #, c-format msgid "" "File \"%s\" was renamed to \"%s\", but file \"%s\" could not be renamed to " @@ -3064,43 +2891,43 @@ msgstr "" "\"%s\" 파일은 \"%s\" 이름으로 바꿨지만, \"%s\" 파일은 \"%s\" 이름으로 바꾸지 " "못했습니다: %m." -#: access/transam/xlog.c:11176 access/transam/xlogutils.c:718 -#: replication/walreceiver.c:994 replication/walsender.c:2112 +#: access/transam/xlog.c:11544 access/transam/xlogutils.c:724 +#: replication/walreceiver.c:1005 replication/walsender.c:2397 #, c-format msgid "could not seek in log segment %s to offset %u: %m" msgstr "%s 로그 조각에서 해당 위치를 찾을 수 없음: %u: %m" -#: access/transam/xlog.c:11188 +#: access/transam/xlog.c:11558 #, c-format msgid "could not read from log segment %s, offset %u: %m" msgstr "%s 로그 조각에서 읽기 실패, 위치: %u: %m" -#: access/transam/xlog.c:11662 +#: access/transam/xlog.c:12047 #, c-format msgid "received promote request" msgstr "운영 전환 신호를 받았습니다." -#: access/transam/xlog.c:11675 +#: access/transam/xlog.c:12060 #, c-format msgid "trigger file found: %s" msgstr "트리거 파일이 있음: %s" -#: access/transam/xlog.c:11684 +#: access/transam/xlog.c:12069 #, c-format msgid "could not stat trigger file \"%s\": %m" msgstr "\"%s\" 트리거 파일의 상태값을 알 수 없음: %m" -#: access/transam/xlogarchive.c:244 +#: access/transam/xlogarchive.c:243 #, c-format msgid "archive file \"%s\" has wrong size: %lu instead of %lu" msgstr "\"%s\" 기록 파일의 크기가 이상합니다: 현재값 %lu, 원래값 %lu" -#: access/transam/xlogarchive.c:253 +#: access/transam/xlogarchive.c:252 #, c-format msgid "restored log file \"%s\" from archive" msgstr "아카이브에서 \"%s\" 로그파일을 복구했음" -#: access/transam/xlogarchive.c:303 +#: access/transam/xlogarchive.c:302 #, c-format msgid "could not restore file \"%s\" from archive: %s" msgstr "아카이브에서 \"%s\" 파일 복원 실패: %s" @@ -3108,127 +2935,128 @@ msgstr "아카이브에서 \"%s\" 파일 복원 실패: %s" #. translator: First %s represents a recovery.conf parameter name like #. "recovery_end_command", the 2nd is the value of that parameter, the #. third an already translated error message. -#: access/transam/xlogarchive.c:415 +#: access/transam/xlogarchive.c:414 #, c-format msgid "%s \"%s\": %s" msgstr "%s \"%s\": %s" -#: access/transam/xlogarchive.c:458 replication/logical/snapbuild.c:1619 -#: replication/slot.c:480 replication/slot.c:992 replication/slot.c:1100 -#: storage/file/fd.c:642 storage/file/fd.c:700 utils/time/snapmgr.c:1298 +#: access/transam/xlogarchive.c:457 postmaster/syslogger.c:1415 +#: replication/logical/snapbuild.c:1645 replication/slot.c:590 +#: replication/slot.c:1190 replication/slot.c:1304 storage/file/fd.c:642 +#: storage/file/fd.c:737 utils/time/snapmgr.c:1318 #, c-format msgid "could not rename file \"%s\" to \"%s\": %m" msgstr "\"%s\" 파일을 \"%s\" 파일로 이름을 바꿀 수 없음: %m" -#: access/transam/xlogarchive.c:525 access/transam/xlogarchive.c:589 +#: access/transam/xlogarchive.c:524 access/transam/xlogarchive.c:588 #, c-format msgid "could not create archive status file \"%s\": %m" msgstr "\"%s\" archive status 파일을 만들 수 없습니다: %m" -#: access/transam/xlogarchive.c:533 access/transam/xlogarchive.c:597 +#: access/transam/xlogarchive.c:532 access/transam/xlogarchive.c:596 #, c-format msgid "could not write archive status file \"%s\": %m" msgstr "\"%s\" archive status 파일에 쓸 수 없습니다: %m" -#: access/transam/xlogfuncs.c:58 +#: access/transam/xlogfuncs.c:55 #, c-format msgid "aborting backup due to backend exiting before pg_stop_backup was called" msgstr "" "pg_stop_backup 작업이 호출되기 전에 백엔드가 종료되어 백업을 중지합니다." -#: access/transam/xlogfuncs.c:88 +#: access/transam/xlogfuncs.c:86 #, c-format msgid "a backup is already in progress in this session" msgstr "이미 이 세션에서 백업 작업이 진행 중입니다" -#: access/transam/xlogfuncs.c:94 commands/tablespace.c:705 -#: commands/tablespace.c:715 postmaster/postmaster.c:1406 -#: replication/basebackup.c:297 replication/basebackup.c:637 -#: storage/file/copydir.c:53 storage/file/copydir.c:96 storage/file/fd.c:2369 -#: storage/file/fd.c:2968 storage/ipc/dsm.c:300 utils/adt/genfile.c:439 -#: utils/adt/misc.c:411 utils/misc/tzparser.c:339 +#: access/transam/xlogfuncs.c:92 commands/tablespace.c:703 +#: commands/tablespace.c:713 postmaster/postmaster.c:1458 +#: replication/basebackup.c:368 replication/basebackup.c:708 +#: storage/file/copydir.c:53 storage/file/copydir.c:96 storage/file/fd.c:2420 +#: storage/file/fd.c:3019 storage/ipc/dsm.c:301 utils/adt/genfile.c:440 +#: utils/adt/misc.c:410 utils/misc/tzparser.c:339 #, c-format msgid "could not open directory \"%s\": %m" msgstr "\"%s\" 디렉터리 열 수 없음: %m" -#: access/transam/xlogfuncs.c:155 access/transam/xlogfuncs.c:229 +#: access/transam/xlogfuncs.c:152 access/transam/xlogfuncs.c:234 #, c-format msgid "non-exclusive backup in progress" msgstr "non-exclusive 백업 진행 중입니다" -#: access/transam/xlogfuncs.c:156 access/transam/xlogfuncs.c:230 +#: access/transam/xlogfuncs.c:153 access/transam/xlogfuncs.c:235 #, c-format msgid "Did you mean to use pg_stop_backup('f')?" msgstr "pg_stop_backup('f') 형태로 함수를 호출했나요?" -#: access/transam/xlogfuncs.c:200 commands/event_trigger.c:1445 -#: commands/event_trigger.c:1996 commands/extension.c:1729 -#: commands/extension.c:1838 commands/extension.c:2031 commands/prepare.c:702 -#: executor/execQual.c:1757 executor/execQual.c:1782 executor/execQual.c:2157 -#: executor/execQual.c:5438 executor/functions.c:1031 foreign/foreign.c:492 -#: replication/logical/logicalfuncs.c:175 replication/logical/origin.c:1391 -#: replication/slotfuncs.c:189 replication/walsender.c:2761 -#: utils/adt/jsonfuncs.c:1483 utils/adt/jsonfuncs.c:1613 -#: utils/adt/jsonfuncs.c:1801 utils/adt/jsonfuncs.c:1928 -#: utils/adt/jsonfuncs.c:2694 utils/adt/pgstatfuncs.c:554 -#: utils/adt/pgstatfuncs.c:655 utils/fmgr/funcapi.c:61 utils/misc/guc.c:8436 -#: utils/mmgr/portalmem.c:1074 +#: access/transam/xlogfuncs.c:205 commands/event_trigger.c:1471 +#: commands/event_trigger.c:2022 commands/extension.c:1895 +#: commands/extension.c:2004 commands/extension.c:2228 commands/prepare.c:721 +#: executor/execExpr.c:2121 executor/execSRF.c:688 executor/functions.c:1029 +#: foreign/foreign.c:488 libpq/hba.c:2563 replication/logical/launcher.c:1026 +#: replication/logical/logicalfuncs.c:176 replication/logical/origin.c:1422 +#: replication/slotfuncs.c:197 replication/walsender.c:3166 +#: utils/adt/jsonfuncs.c:1689 utils/adt/jsonfuncs.c:1819 +#: utils/adt/jsonfuncs.c:2007 utils/adt/jsonfuncs.c:2134 +#: utils/adt/jsonfuncs.c:3489 utils/adt/pgstatfuncs.c:456 +#: utils/adt/pgstatfuncs.c:557 utils/fmgr/funcapi.c:62 utils/misc/guc.c:8549 +#: utils/mmgr/portalmem.c:1067 #, c-format msgid "set-valued function called in context that cannot accept a set" msgstr "" "set-values 함수(테이블 리턴 함수)가 set 정의 없이 사용되었습니다 (테이블과 해" "당 열 alias 지정하세요)" -#: access/transam/xlogfuncs.c:204 commands/event_trigger.c:1449 -#: commands/event_trigger.c:2000 commands/extension.c:1733 -#: commands/extension.c:1842 commands/extension.c:2035 commands/prepare.c:706 -#: foreign/foreign.c:497 replication/logical/logicalfuncs.c:179 -#: replication/logical/origin.c:1395 replication/slotfuncs.c:193 -#: replication/walsender.c:2765 utils/adt/pgstatfuncs.c:558 -#: utils/adt/pgstatfuncs.c:659 utils/misc/guc.c:8440 utils/misc/pg_config.c:44 -#: utils/mmgr/portalmem.c:1078 +#: access/transam/xlogfuncs.c:209 commands/event_trigger.c:1475 +#: commands/event_trigger.c:2026 commands/extension.c:1899 +#: commands/extension.c:2008 commands/extension.c:2232 commands/prepare.c:725 +#: foreign/foreign.c:493 libpq/hba.c:2567 replication/logical/launcher.c:1030 +#: replication/logical/logicalfuncs.c:180 replication/logical/origin.c:1426 +#: replication/slotfuncs.c:201 replication/walsender.c:3170 +#: utils/adt/pgstatfuncs.c:460 utils/adt/pgstatfuncs.c:561 +#: utils/misc/guc.c:8553 utils/misc/pg_config.c:44 utils/mmgr/portalmem.c:1071 #, c-format msgid "materialize mode required, but it is not allowed in this context" msgstr "materialize 모드가 필요합니다만, 이 구문에서는 허용되지 않습니다" -#: access/transam/xlogfuncs.c:247 +#: access/transam/xlogfuncs.c:251 #, c-format msgid "non-exclusive backup is not in progress" msgstr "non-exclusive 백업 상태가 아닙니다" -#: access/transam/xlogfuncs.c:248 +#: access/transam/xlogfuncs.c:252 #, c-format msgid "Did you mean to use pg_stop_backup('t')?" msgstr "pg_stop_backup('t') 형태로 함수를 호출했나요?" -#: access/transam/xlogfuncs.c:326 +#: access/transam/xlogfuncs.c:329 #, c-format msgid "WAL level not sufficient for creating a restore point" msgstr "WAL 수준이 복원 위치를 만들 수 없는 수준입니다" -#: access/transam/xlogfuncs.c:334 +#: access/transam/xlogfuncs.c:337 #, c-format msgid "value too long for restore point (maximum %d characters)" msgstr "복원 위치 이름이 너무 깁니다. (최대값, %d 글자)" -#: access/transam/xlogfuncs.c:472 +#: access/transam/xlogfuncs.c:475 #, c-format -msgid "pg_xlogfile_name_offset() cannot be executed during recovery." -msgstr "복구 중에는 pg_xlogfile_name_offset() 함수를 실행할 수 없습니다." +msgid "pg_walfile_name_offset() cannot be executed during recovery." +msgstr "복구 중에는 pg_walfile_name_offset() 함수를 실행할 수 없습니다." -#: access/transam/xlogfuncs.c:528 +#: access/transam/xlogfuncs.c:531 #, c-format -msgid "pg_xlogfile_name() cannot be executed during recovery." -msgstr "복구 중에는 pg_xlogfile_name() 함수를 실행할 수 없습니다." +msgid "pg_walfile_name() cannot be executed during recovery." +msgstr "복구 중에는 pg_walfile_name() 함수를 실행할 수 없습니다." -#: access/transam/xlogfuncs.c:548 access/transam/xlogfuncs.c:568 -#: access/transam/xlogfuncs.c:585 +#: access/transam/xlogfuncs.c:551 access/transam/xlogfuncs.c:571 +#: access/transam/xlogfuncs.c:588 #, c-format msgid "recovery is not in progress" msgstr "현재 복구 작업 상태가 아닙니다" -#: access/transam/xlogfuncs.c:549 access/transam/xlogfuncs.c:569 -#: access/transam/xlogfuncs.c:586 +#: access/transam/xlogfuncs.c:552 access/transam/xlogfuncs.c:572 +#: access/transam/xlogfuncs.c:589 #, c-format msgid "Recovery control functions can only be executed during recovery." msgstr "복구 제어 함수는 복구 작업일 때만 실행할 수 있습니다." @@ -3243,7 +3071,7 @@ msgstr "잘못된 레코드 위치: %X/%X" msgid "contrecord is requested by %X/%X" msgstr "%X/%X에서 contrecord를 필요로 함" -#: access/transam/xlogreader.c:325 access/transam/xlogreader.c:624 +#: access/transam/xlogreader.c:325 access/transam/xlogreader.c:625 #, c-format msgid "invalid record length at %X/%X: wanted %u, got %u" msgstr "잘못된 레코드 길이: %X/%X, 기대값 %u, 실재값 %u" @@ -3263,32 +3091,32 @@ msgstr "%X/%X 위치에 contrecord 플래그가 없음" msgid "invalid contrecord length %u at %X/%X" msgstr "잘못된 contrecord 길이 %u, 위치 %X/%X" -#: access/transam/xlogreader.c:632 +#: access/transam/xlogreader.c:633 #, c-format msgid "invalid resource manager ID %u at %X/%X" msgstr "잘못된 자원 관리 ID %u, 위치: %X/%X" -#: access/transam/xlogreader.c:646 access/transam/xlogreader.c:663 +#: access/transam/xlogreader.c:647 access/transam/xlogreader.c:664 #, c-format msgid "record with incorrect prev-link %X/%X at %X/%X" msgstr "레코드의 잘못된 프리링크 %X/%X, 해당 레코드 %X/%X" -#: access/transam/xlogreader.c:700 +#: access/transam/xlogreader.c:701 #, c-format msgid "incorrect resource manager data checksum in record at %X/%X" msgstr "잘못된 자원관리자 데이터 체크섬, 위치: %X/%X 레코드" -#: access/transam/xlogreader.c:733 +#: access/transam/xlogreader.c:734 #, c-format msgid "invalid magic number %04X in log segment %s, offset %u" msgstr "%04X 매직 번호가 잘못됨, 로그 파일 %s, 위치 %u" -#: access/transam/xlogreader.c:747 access/transam/xlogreader.c:798 +#: access/transam/xlogreader.c:748 access/transam/xlogreader.c:799 #, c-format msgid "invalid info bits %04X in log segment %s, offset %u" msgstr "잘못된 정보 비트 %04X, 로그 파일 %s, 위치 %u" -#: access/transam/xlogreader.c:773 +#: access/transam/xlogreader.c:774 #, c-format msgid "" "WAL file is from different database system: WAL file database system " @@ -3297,7 +3125,7 @@ msgstr "" "WAL 파일이 다른 시스템의 것입니다. WAL 파일의 시스템 식별자는 %s, pg_control " "의 식별자는 %s" -#: access/transam/xlogreader.c:780 +#: access/transam/xlogreader.c:781 #, c-format msgid "" "WAL file is from different database system: incorrect XLOG_SEG_SIZE in page " @@ -3306,7 +3134,7 @@ msgstr "" "WAL 파일이 다른 데이터베이스 시스템의 것입니다: 페이지 헤더의 XLOG_SEG_SIZE " "값이 바르지 않음" -#: access/transam/xlogreader.c:786 +#: access/transam/xlogreader.c:787 #, c-format msgid "" "WAL file is from different database system: incorrect XLOG_BLCKSZ in page " @@ -3315,32 +3143,32 @@ msgstr "" "WAL 파일이 다른 데이터베이스 시스템의 것입니다: 페이지 헤더의 XLOG_BLCKSZ 값" "이 바르지 않음" -#: access/transam/xlogreader.c:812 +#: access/transam/xlogreader.c:813 #, c-format msgid "unexpected pageaddr %X/%X in log segment %s, offset %u" msgstr "잘못된 페이지 주소 %X/%X, 로그 파일 %s, 위치 %u" -#: access/transam/xlogreader.c:837 +#: access/transam/xlogreader.c:838 #, c-format msgid "out-of-sequence timeline ID %u (after %u) in log segment %s, offset %u" msgstr "타임라인 범위 벗어남 %u (이전 번호 %u), 로그 파일 %s, 위치 %u" -#: access/transam/xlogreader.c:1081 +#: access/transam/xlogreader.c:1083 #, c-format msgid "out-of-order block_id %u at %X/%X" msgstr "%u block_id는 범위를 벗어남, 위치 %X/%X" -#: access/transam/xlogreader.c:1103 +#: access/transam/xlogreader.c:1106 #, c-format msgid "BKPBLOCK_HAS_DATA set, but no data included at %X/%X" msgstr "BKPBLOCK_HAS_DATA 지정했지만, %X/%X 에 자료가 없음" -#: access/transam/xlogreader.c:1110 +#: access/transam/xlogreader.c:1113 #, c-format msgid "BKPBLOCK_HAS_DATA not set, but data length is %u at %X/%X" msgstr "BKPBLOCK_HAS_DATA 지정 않았지만, %u 길이의 자료가 있음, 위치 %X/%X" -#: access/transam/xlogreader.c:1143 +#: access/transam/xlogreader.c:1149 #, c-format msgid "" "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at " @@ -3349,19 +3177,19 @@ msgstr "" "BKPIMAGE_HAS_HOLE 설정이 되어 있지만, 옵셋: %u, 길이: %u, 블록 이미지 길이: " "%u, 대상: %X/%X" -#: access/transam/xlogreader.c:1159 +#: access/transam/xlogreader.c:1165 #, c-format msgid "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X" msgstr "" "BKPIMAGE_HAS_HOLE 설정이 안되어 있지만, 옵셋: %u, 길이: %u, 대상: %X/%X" -#: access/transam/xlogreader.c:1174 +#: access/transam/xlogreader.c:1180 #, c-format msgid "BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X" msgstr "" "BKPIMAGE_IS_COMPRESSED 설정이 되어 있지만, 블록 이미지 길이: %u, 대상: %X/%X" -#: access/transam/xlogreader.c:1189 +#: access/transam/xlogreader.c:1195 #, c-format msgid "" "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image " @@ -3370,259 +3198,272 @@ msgstr "" "BKPIMAGE_HAS_HOLE, BKPIMAGE_IS_COMPRESSED 지정 안되어 있으나, 블록 이미지 길" "이는 %u, 대상: %X/%X" -#: access/transam/xlogreader.c:1205 +#: access/transam/xlogreader.c:1211 #, c-format msgid "BKPBLOCK_SAME_REL set but no previous rel at %X/%X" msgstr "BKPBLOCK_SAME_REL 설정이 되어 있지만, %X/%X 에 이전 릴레이션 없음" -#: access/transam/xlogreader.c:1217 +#: access/transam/xlogreader.c:1223 #, c-format msgid "invalid block_id %u at %X/%X" msgstr "잘못된 block_id %u, 위치 %X/%X" -#: access/transam/xlogreader.c:1282 +#: access/transam/xlogreader.c:1291 #, c-format msgid "record with invalid length at %X/%X" msgstr "잘못된 레코드 길이, 위치 %X/%X" -#: access/transam/xlogreader.c:1371 +#: access/transam/xlogreader.c:1380 #, c-format msgid "invalid compressed image at %X/%X, block %d" msgstr "잘못된 압축 이미지, 위치 %X/%X, 블록 %d" -#: access/transam/xlogutils.c:739 replication/walsender.c:2129 +#: access/transam/xlogutils.c:747 replication/walsender.c:2416 #, c-format msgid "could not read from log segment %s, offset %u, length %lu: %m" msgstr "%s 로그 조각 읽기 실패, 위치 %u, 길이 %lu: %m" -#: bootstrap/bootstrap.c:269 postmaster/postmaster.c:793 tcop/postgres.c:3501 +#: bootstrap/bootstrap.c:272 postmaster/postmaster.c:819 tcop/postgres.c:3510 #, c-format msgid "--%s requires a value" msgstr "--%s 옵션은 해당 값을 지정해야합니다" -#: bootstrap/bootstrap.c:274 postmaster/postmaster.c:798 tcop/postgres.c:3506 +#: bootstrap/bootstrap.c:277 postmaster/postmaster.c:824 tcop/postgres.c:3515 #, c-format msgid "-c %s requires a value" msgstr "-c %s 옵션은 해당 값을 지정해야합니다" -#: bootstrap/bootstrap.c:285 postmaster/postmaster.c:810 -#: postmaster/postmaster.c:823 +#: bootstrap/bootstrap.c:288 postmaster/postmaster.c:836 +#: postmaster/postmaster.c:849 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "자제한 사항은 \"%s --help\" 명령으로 살펴보십시오.\n" -#: bootstrap/bootstrap.c:294 +#: bootstrap/bootstrap.c:297 #, c-format msgid "%s: invalid command-line arguments\n" msgstr "%s: 잘못된 명령행 인자\n" -#: catalog/aclchk.c:201 +#: catalog/aclchk.c:203 #, c-format msgid "grant options can only be granted to roles" msgstr "grant 옵션들은 롤에서만 지정될 수 있습니다" -#: catalog/aclchk.c:324 +#: catalog/aclchk.c:326 #, c-format msgid "no privileges were granted for column \"%s\" of relation \"%s\"" msgstr "\"%s\" 칼럼(해당 릴레이션: \"%s\")에 대한 권한이 부여되지 않았음" -#: catalog/aclchk.c:329 +#: catalog/aclchk.c:331 #, c-format msgid "no privileges were granted for \"%s\"" msgstr "\"%s\"에 대한 권한이 부여되지 않았음" -#: catalog/aclchk.c:337 +#: catalog/aclchk.c:339 #, c-format msgid "not all privileges were granted for column \"%s\" of relation \"%s\"" msgstr "\"%s\" 칼럼(해당 릴레이션: \"%s\")에 대한 일부 권한이 부여되지 않았음" -#: catalog/aclchk.c:342 +#: catalog/aclchk.c:344 #, c-format msgid "not all privileges were granted for \"%s\"" msgstr "\"%s\"에 대한 일부 권한이 부여되지 않았음" -#: catalog/aclchk.c:353 +#: catalog/aclchk.c:355 #, c-format msgid "no privileges could be revoked for column \"%s\" of relation \"%s\"" msgstr "\"%s\" 칼럼(해당 릴레이션: \"%s\")에 대한 권한을 취소할 수 없음" -#: catalog/aclchk.c:358 +#: catalog/aclchk.c:360 #, c-format msgid "no privileges could be revoked for \"%s\"" msgstr "\"%s\"에 대한 권한을 취소할 수 없음" -#: catalog/aclchk.c:366 +#: catalog/aclchk.c:368 #, c-format msgid "" "not all privileges could be revoked for column \"%s\" of relation \"%s\"" msgstr "\"%s\" 칼럼(해당 릴레이션: \"%s\")의 일부 권한을 박탈할 수 없음" -#: catalog/aclchk.c:371 +#: catalog/aclchk.c:373 #, c-format msgid "not all privileges could be revoked for \"%s\"" msgstr "\"%s\"에 대한 일부 권한을 취소할 수 없음" -#: catalog/aclchk.c:453 catalog/aclchk.c:943 +#: catalog/aclchk.c:455 catalog/aclchk.c:948 #, c-format msgid "invalid privilege type %s for relation" msgstr "릴레이션의 %s 권한은 잘못된 종류임" -#: catalog/aclchk.c:457 catalog/aclchk.c:947 +#: catalog/aclchk.c:459 catalog/aclchk.c:952 #, c-format msgid "invalid privilege type %s for sequence" msgstr "시퀀스의 %s 권한은 잘못된 종류임" -#: catalog/aclchk.c:461 +#: catalog/aclchk.c:463 #, c-format msgid "invalid privilege type %s for database" msgstr "%s 권한은 데이터베이스에는 사용할 수 없은 권한 형태임" -#: catalog/aclchk.c:465 +#: catalog/aclchk.c:467 #, c-format msgid "invalid privilege type %s for domain" msgstr "%s 권한은 도메인에서 유효하지 않음" -#: catalog/aclchk.c:469 catalog/aclchk.c:951 +#: catalog/aclchk.c:471 catalog/aclchk.c:956 #, c-format msgid "invalid privilege type %s for function" msgstr "%s 권한은 함수에서 사용할 수 없은 권한 형태임" -#: catalog/aclchk.c:473 +#: catalog/aclchk.c:475 #, c-format msgid "invalid privilege type %s for language" msgstr "%s 권한은 프로시주얼 언어에서 사용할 수 없은 권한 형태임" -#: catalog/aclchk.c:477 +#: catalog/aclchk.c:479 #, c-format msgid "invalid privilege type %s for large object" msgstr "%s 권한은 대형 객체에서 사용할 수 없은 권한 형태임" -#: catalog/aclchk.c:481 +#: catalog/aclchk.c:483 catalog/aclchk.c:964 #, c-format msgid "invalid privilege type %s for schema" msgstr "%s 권한은 스키마(schema)에서 사용할 수 없은 권한 형태임" -#: catalog/aclchk.c:485 +#: catalog/aclchk.c:487 #, c-format msgid "invalid privilege type %s for tablespace" msgstr "%s 권한은 테이블스페이스에서 사용할 수 없은 권한 형태임" -#: catalog/aclchk.c:489 catalog/aclchk.c:955 +#: catalog/aclchk.c:491 catalog/aclchk.c:960 #, c-format msgid "invalid privilege type %s for type" msgstr "%s 권한은 자료형에서 사용할 수 없은 권한 형태임" -#: catalog/aclchk.c:493 +#: catalog/aclchk.c:495 #, c-format msgid "invalid privilege type %s for foreign-data wrapper" msgstr "%s 권한 형식은 외부 데이터 래퍼에 유효하지 않음" -#: catalog/aclchk.c:497 +#: catalog/aclchk.c:499 #, c-format msgid "invalid privilege type %s for foreign server" msgstr "%s 권한 형식은 외부 서버에 유효하지 않음" -#: catalog/aclchk.c:536 +#: catalog/aclchk.c:538 #, c-format msgid "column privileges are only valid for relations" msgstr "칼럼 권한은 릴레이션에서만 유효함" -#: catalog/aclchk.c:695 catalog/aclchk.c:3923 catalog/aclchk.c:4705 -#: catalog/objectaddress.c:873 catalog/pg_largeobject.c:113 +#: catalog/aclchk.c:696 catalog/aclchk.c:3926 catalog/aclchk.c:4708 +#: catalog/objectaddress.c:928 catalog/pg_largeobject.c:111 #: storage/large_object/inv_api.c:291 #, c-format msgid "large object %u does not exist" msgstr "%u large object 없음" -#: catalog/aclchk.c:882 catalog/aclchk.c:890 commands/collationcmds.c:92 -#: commands/copy.c:1047 commands/copy.c:1065 commands/copy.c:1073 -#: commands/copy.c:1081 commands/copy.c:1089 commands/copy.c:1097 -#: commands/copy.c:1105 commands/copy.c:1113 commands/copy.c:1121 -#: commands/copy.c:1137 commands/copy.c:1151 commands/copy.c:1170 -#: commands/copy.c:1185 commands/dbcommands.c:155 commands/dbcommands.c:163 -#: commands/dbcommands.c:171 commands/dbcommands.c:179 -#: commands/dbcommands.c:187 commands/dbcommands.c:195 -#: commands/dbcommands.c:203 commands/dbcommands.c:211 -#: commands/dbcommands.c:219 commands/dbcommands.c:1397 -#: commands/dbcommands.c:1405 commands/dbcommands.c:1413 -#: commands/dbcommands.c:1421 commands/extension.c:1219 -#: commands/extension.c:1227 commands/extension.c:1235 -#: commands/extension.c:1243 commands/extension.c:2761 -#: commands/foreigncmds.c:539 commands/foreigncmds.c:548 -#: commands/functioncmds.c:533 commands/functioncmds.c:649 -#: commands/functioncmds.c:657 commands/functioncmds.c:665 -#: commands/functioncmds.c:673 commands/functioncmds.c:2085 -#: commands/functioncmds.c:2093 commands/sequence.c:1189 -#: commands/sequence.c:1197 commands/sequence.c:1205 commands/sequence.c:1213 -#: commands/sequence.c:1221 commands/sequence.c:1229 commands/sequence.c:1237 -#: commands/sequence.c:1245 commands/typecmds.c:295 commands/typecmds.c:1382 -#: commands/typecmds.c:1391 commands/typecmds.c:1399 commands/typecmds.c:1407 -#: commands/typecmds.c:1415 commands/user.c:139 commands/user.c:156 -#: commands/user.c:164 commands/user.c:172 commands/user.c:180 -#: commands/user.c:188 commands/user.c:196 commands/user.c:204 -#: commands/user.c:212 commands/user.c:220 commands/user.c:228 -#: commands/user.c:236 commands/user.c:244 commands/user.c:537 -#: commands/user.c:549 commands/user.c:557 commands/user.c:565 -#: commands/user.c:573 commands/user.c:581 commands/user.c:589 -#: commands/user.c:597 commands/user.c:606 commands/user.c:614 -#: commands/user.c:622 +#: catalog/aclchk.c:885 catalog/aclchk.c:894 commands/collationcmds.c:114 +#: commands/copy.c:1042 commands/copy.c:1062 commands/copy.c:1071 +#: commands/copy.c:1080 commands/copy.c:1089 commands/copy.c:1098 +#: commands/copy.c:1107 commands/copy.c:1116 commands/copy.c:1125 +#: commands/copy.c:1143 commands/copy.c:1159 commands/copy.c:1179 +#: commands/copy.c:1196 commands/dbcommands.c:155 commands/dbcommands.c:164 +#: commands/dbcommands.c:173 commands/dbcommands.c:182 +#: commands/dbcommands.c:191 commands/dbcommands.c:200 +#: commands/dbcommands.c:209 commands/dbcommands.c:218 +#: commands/dbcommands.c:227 commands/dbcommands.c:1427 +#: commands/dbcommands.c:1436 commands/dbcommands.c:1445 +#: commands/dbcommands.c:1454 commands/extension.c:1678 +#: commands/extension.c:1688 commands/extension.c:1698 +#: commands/extension.c:1708 commands/extension.c:2949 +#: commands/foreigncmds.c:537 commands/foreigncmds.c:546 +#: commands/functioncmds.c:526 commands/functioncmds.c:643 +#: commands/functioncmds.c:652 commands/functioncmds.c:661 +#: commands/functioncmds.c:670 commands/functioncmds.c:2097 +#: commands/functioncmds.c:2105 commands/publicationcmds.c:90 +#: commands/sequence.c:1265 commands/sequence.c:1275 commands/sequence.c:1285 +#: commands/sequence.c:1295 commands/sequence.c:1305 commands/sequence.c:1315 +#: commands/sequence.c:1325 commands/sequence.c:1335 commands/sequence.c:1345 +#: commands/subscriptioncmds.c:110 commands/subscriptioncmds.c:120 +#: commands/subscriptioncmds.c:130 commands/subscriptioncmds.c:140 +#: commands/subscriptioncmds.c:154 commands/subscriptioncmds.c:165 +#: commands/subscriptioncmds.c:179 commands/tablecmds.c:5973 +#: commands/typecmds.c:298 commands/typecmds.c:1375 commands/typecmds.c:1384 +#: commands/typecmds.c:1392 commands/typecmds.c:1400 commands/typecmds.c:1408 +#: commands/user.c:134 commands/user.c:148 commands/user.c:157 +#: commands/user.c:166 commands/user.c:175 commands/user.c:184 +#: commands/user.c:193 commands/user.c:202 commands/user.c:211 +#: commands/user.c:220 commands/user.c:229 commands/user.c:238 +#: commands/user.c:247 commands/user.c:555 commands/user.c:563 +#: commands/user.c:571 commands/user.c:579 commands/user.c:587 +#: commands/user.c:595 commands/user.c:603 commands/user.c:611 +#: commands/user.c:620 commands/user.c:628 commands/user.c:636 +#: parser/parse_utilcmd.c:396 replication/pgoutput/pgoutput.c:107 +#: replication/pgoutput/pgoutput.c:128 replication/walsender.c:800 +#: replication/walsender.c:811 replication/walsender.c:821 #, c-format msgid "conflicting or redundant options" msgstr "상충하거나 중복된 옵션들" -#: catalog/aclchk.c:988 +#: catalog/aclchk.c:997 #, c-format msgid "default privileges cannot be set for columns" msgstr "default privileges 설정은 칼럼 대상으로 할 수 없음" -#: catalog/aclchk.c:1502 catalog/objectaddress.c:1390 commands/analyze.c:376 -#: commands/copy.c:4458 commands/sequence.c:1491 commands/tablecmds.c:5198 -#: commands/tablecmds.c:5304 commands/tablecmds.c:5364 -#: commands/tablecmds.c:5477 commands/tablecmds.c:5534 -#: commands/tablecmds.c:5628 commands/tablecmds.c:5724 -#: commands/tablecmds.c:7915 commands/tablecmds.c:8177 -#: commands/tablecmds.c:8597 commands/trigger.c:642 parser/analyze.c:2228 -#: parser/parse_relation.c:2628 parser/parse_relation.c:2690 -#: parser/parse_target.c:951 parser/parse_type.c:127 utils/adt/acl.c:2840 -#: utils/adt/ruleutils.c:1984 +#: catalog/aclchk.c:1157 +#, c-format +msgid "cannot use IN SCHEMA clause when using GRANT/REVOKE ON SCHEMAS" +msgstr "" + +#: catalog/aclchk.c:1521 catalog/objectaddress.c:1389 commands/analyze.c:390 +#: commands/copy.c:4753 commands/sequence.c:1700 commands/tablecmds.c:5621 +#: commands/tablecmds.c:5768 commands/tablecmds.c:5825 +#: commands/tablecmds.c:5898 commands/tablecmds.c:5992 +#: commands/tablecmds.c:6051 commands/tablecmds.c:6176 +#: commands/tablecmds.c:6230 commands/tablecmds.c:6322 +#: commands/tablecmds.c:6478 commands/tablecmds.c:8707 +#: commands/tablecmds.c:8983 commands/tablecmds.c:9418 commands/trigger.c:817 +#: parser/analyze.c:2310 parser/parse_relation.c:2728 +#: parser/parse_relation.c:2790 parser/parse_target.c:1002 +#: parser/parse_type.c:127 utils/adt/acl.c:2823 utils/adt/ruleutils.c:2356 #, c-format msgid "column \"%s\" of relation \"%s\" does not exist" -msgstr "\"%s\" 열은 \"%s\" 릴레이션(relation)에 없음" +msgstr "\"%s\" 칼럼은 \"%s\" 릴레이션(relation)에 없음" -#: catalog/aclchk.c:1771 catalog/objectaddress.c:1203 commands/sequence.c:1078 -#: commands/tablecmds.c:224 commands/tablecmds.c:12154 utils/adt/acl.c:2076 -#: utils/adt/acl.c:2106 utils/adt/acl.c:2138 utils/adt/acl.c:2170 -#: utils/adt/acl.c:2198 utils/adt/acl.c:2228 +#: catalog/aclchk.c:1787 catalog/objectaddress.c:1229 commands/sequence.c:1138 +#: commands/tablecmds.c:229 commands/tablecmds.c:13093 utils/adt/acl.c:2059 +#: utils/adt/acl.c:2089 utils/adt/acl.c:2121 utils/adt/acl.c:2153 +#: utils/adt/acl.c:2181 utils/adt/acl.c:2211 #, c-format msgid "\"%s\" is not a sequence" msgstr "\"%s\" 시퀀스가 아님" -#: catalog/aclchk.c:1809 +#: catalog/aclchk.c:1825 #, c-format msgid "sequence \"%s\" only supports USAGE, SELECT, and UPDATE privileges" msgstr "\"%s\" 시퀀스는 USAGE, SELECT 및 UPDATE 권한만 지원함" -#: catalog/aclchk.c:1826 +#: catalog/aclchk.c:1842 #, c-format -msgid "invalid privilege type USAGE for table" -msgstr "테이블에서 USAGE 권한은 잘못되었음" +msgid "invalid privilege type %s for table" +msgstr "%s 권한은 테이블에서 사용할 수 없은 권한 형태임" -#: catalog/aclchk.c:1994 +#: catalog/aclchk.c:2008 #, c-format msgid "invalid privilege type %s for column" msgstr "%s 권한 형식은 칼럼에서 유효하지 않음" -#: catalog/aclchk.c:2007 +#: catalog/aclchk.c:2021 #, c-format msgid "sequence \"%s\" only supports SELECT column privileges" msgstr "\"%s\" 시퀀스는 SELECT 열 권한만 지원함" -#: catalog/aclchk.c:2601 +#: catalog/aclchk.c:2603 #, c-format msgid "language \"%s\" is not trusted" msgstr "\"%s\" 프로시주얼 언어는 안전하지 못합니다" -#: catalog/aclchk.c:2603 +#: catalog/aclchk.c:2605 #, c-format msgid "" "GRANT and REVOKE are not allowed on untrusted languages, because only " @@ -3631,363 +3472,410 @@ msgstr "" "안전하지 않은 프로시져 언어에 대해서는 GRANT 또는 REVOKE 작업을 허용하지 않습" "니다, 안전하지 않은 프로시져 언어는 슈퍼유저만 사용할 수 있기 때문입니다." -#: catalog/aclchk.c:3129 +#: catalog/aclchk.c:3119 #, c-format msgid "cannot set privileges of array types" msgstr "배열형 자료형에 권한 설정을 할 수 없음" -#: catalog/aclchk.c:3130 +#: catalog/aclchk.c:3120 #, c-format msgid "Set the privileges of the element type instead." msgstr "그 배열 요소에 해당하는 자료형에 대해서 접근 권한 설정을 하세요." -#: catalog/aclchk.c:3137 catalog/objectaddress.c:1523 commands/typecmds.c:3146 +#: catalog/aclchk.c:3127 catalog/objectaddress.c:1519 #, c-format msgid "\"%s\" is not a domain" msgstr "\"%s\" 이름의 객체는 도메인이 아닙니다" -#: catalog/aclchk.c:3260 +#: catalog/aclchk.c:3247 #, c-format msgid "unrecognized privilege type \"%s\"" msgstr "알 수 없는 권한 타입 \"%s\"" -#: catalog/aclchk.c:3309 +#: catalog/aclchk.c:3296 #, c-format msgid "permission denied for column %s" msgstr "%s 칼럼에 대한 접근 권한 없음" -#: catalog/aclchk.c:3311 +#: catalog/aclchk.c:3298 #, c-format msgid "permission denied for relation %s" msgstr "%s 릴레이션(relation) 접근 권한 없음" -#: catalog/aclchk.c:3313 commands/sequence.c:561 commands/sequence.c:786 -#: commands/sequence.c:828 commands/sequence.c:865 commands/sequence.c:1543 +#: catalog/aclchk.c:3300 commands/sequence.c:600 commands/sequence.c:834 +#: commands/sequence.c:876 commands/sequence.c:917 commands/sequence.c:1791 +#: commands/sequence.c:1855 #, c-format msgid "permission denied for sequence %s" msgstr "%s 시퀀스 접근 권한 없음" -#: catalog/aclchk.c:3315 +#: catalog/aclchk.c:3302 #, c-format msgid "permission denied for database %s" msgstr "%s 데이터베이스 접근 권한 없음" -#: catalog/aclchk.c:3317 +#: catalog/aclchk.c:3304 #, c-format msgid "permission denied for function %s" msgstr "%s 함수 접근 권한 없음" -#: catalog/aclchk.c:3319 +#: catalog/aclchk.c:3306 #, c-format msgid "permission denied for operator %s" msgstr "%s 연산자 접근 권한 없음" -#: catalog/aclchk.c:3321 +#: catalog/aclchk.c:3308 #, c-format msgid "permission denied for type %s" msgstr "%s 자료형 접근 권한 없음" -#: catalog/aclchk.c:3323 +#: catalog/aclchk.c:3310 #, c-format msgid "permission denied for language %s" msgstr "%s 프로시주얼 언어 접근 권한 없음" -#: catalog/aclchk.c:3325 +#: catalog/aclchk.c:3312 #, c-format msgid "permission denied for large object %s" msgstr "%s 대형 객체 접근 권한 없음" -#: catalog/aclchk.c:3327 +#: catalog/aclchk.c:3314 #, c-format msgid "permission denied for schema %s" msgstr "%s 스키마(schema) 접근 권한 없음" -#: catalog/aclchk.c:3329 +#: catalog/aclchk.c:3316 #, c-format msgid "permission denied for operator class %s" msgstr "%s 연산자 클래스 접근 권한 없음" -#: catalog/aclchk.c:3331 +#: catalog/aclchk.c:3318 #, c-format msgid "permission denied for operator family %s" msgstr "%s 연산자 패밀리 접근 권한 없음" -#: catalog/aclchk.c:3333 +#: catalog/aclchk.c:3320 #, c-format msgid "permission denied for collation %s" msgstr "%s 정렬정의(collation) 접근 권한 없음" -#: catalog/aclchk.c:3335 +#: catalog/aclchk.c:3322 #, c-format msgid "permission denied for conversion %s" msgstr "%s 문자코드변환규칙(conversion) 접근 권한 없음" -#: catalog/aclchk.c:3337 +#: catalog/aclchk.c:3324 +#, c-format +msgid "permission denied for statistics object %s" +msgstr "%s 객체 통계정보 접근 권한 없음" + +#: catalog/aclchk.c:3326 #, c-format msgid "permission denied for tablespace %s" msgstr "%s 테이블스페이스 접근 권한 없음" -#: catalog/aclchk.c:3339 +#: catalog/aclchk.c:3328 #, c-format msgid "permission denied for text search dictionary %s" msgstr "%s 전문 검색 사전 접근 권한 없음" -#: catalog/aclchk.c:3341 +#: catalog/aclchk.c:3330 #, c-format msgid "permission denied for text search configuration %s" msgstr "%s 전문 검색 구성 접근 권한 없음" -#: catalog/aclchk.c:3343 +#: catalog/aclchk.c:3332 #, c-format msgid "permission denied for foreign-data wrapper %s" msgstr "%s 외부 데이터 래퍼 접근 권한 없음" -#: catalog/aclchk.c:3345 +#: catalog/aclchk.c:3334 #, c-format msgid "permission denied for foreign server %s" msgstr "%s 외부 서버 접근 권한 없음" -#: catalog/aclchk.c:3347 +#: catalog/aclchk.c:3336 #, c-format msgid "permission denied for event trigger %s" msgstr "%s 이벤트 트리거 접근 권한 없음" -#: catalog/aclchk.c:3349 +#: catalog/aclchk.c:3338 #, c-format msgid "permission denied for extension %s" msgstr "%s 확장 모듈 접근 권한 없음" -#: catalog/aclchk.c:3355 catalog/aclchk.c:3357 +#: catalog/aclchk.c:3340 +#, c-format +msgid "permission denied for publication %s" +msgstr "%s 발행 접근 권한 없음" + +#: catalog/aclchk.c:3342 +#, c-format +msgid "permission denied for subscription %s" +msgstr "%s 구독 접근 권한 없음" + +#: catalog/aclchk.c:3348 catalog/aclchk.c:3350 #, c-format msgid "must be owner of relation %s" msgstr "%s 릴레이션(relation)의 소유주여야만 합니다" -#: catalog/aclchk.c:3359 +#: catalog/aclchk.c:3352 #, c-format msgid "must be owner of sequence %s" msgstr "%s 시퀀스의 소유주여야만 합니다" -#: catalog/aclchk.c:3361 +#: catalog/aclchk.c:3354 #, c-format msgid "must be owner of database %s" msgstr "%s 데이터베이스의 소유주여야만 합니다" -#: catalog/aclchk.c:3363 +#: catalog/aclchk.c:3356 #, c-format msgid "must be owner of function %s" msgstr "%s 함수의 소유주여야만 합니다" -#: catalog/aclchk.c:3365 +#: catalog/aclchk.c:3358 #, c-format msgid "must be owner of operator %s" msgstr "%s 연산자의 소유주여야만 합니다" -#: catalog/aclchk.c:3367 +#: catalog/aclchk.c:3360 #, c-format msgid "must be owner of type %s" msgstr "%s 자료형의 소유주여야만 합니다" -#: catalog/aclchk.c:3369 +#: catalog/aclchk.c:3362 #, c-format msgid "must be owner of language %s" msgstr "%s 프로시주얼 언어의 소유주여야만 합니다" -#: catalog/aclchk.c:3371 +#: catalog/aclchk.c:3364 #, c-format msgid "must be owner of large object %s" msgstr "%s 대형 객체의 소유주여야만 합니다" -#: catalog/aclchk.c:3373 +#: catalog/aclchk.c:3366 #, c-format msgid "must be owner of schema %s" msgstr "%s 스키마(schema)의 소유주여야만 합니다" -#: catalog/aclchk.c:3375 +#: catalog/aclchk.c:3368 #, c-format msgid "must be owner of operator class %s" msgstr "%s 연산자 클래스의 소유주여야만 합니다" -#: catalog/aclchk.c:3377 +#: catalog/aclchk.c:3370 #, c-format msgid "must be owner of operator family %s" msgstr "%s 연산자 패밀리의 소유주여야 함" -#: catalog/aclchk.c:3379 +#: catalog/aclchk.c:3372 #, c-format msgid "must be owner of collation %s" msgstr "%s 정렬정의(collation)의 소유주여야만 합니다" -#: catalog/aclchk.c:3381 +#: catalog/aclchk.c:3374 #, c-format msgid "must be owner of conversion %s" msgstr "%s 문자코드변환규칙(conversion)의 소유주여야만 합니다" -#: catalog/aclchk.c:3383 +#: catalog/aclchk.c:3376 +#, c-format +msgid "must be owner of statistics object %s" +msgstr "%s 통계정보 객체의 소유주여야만 합니다" + +#: catalog/aclchk.c:3378 #, c-format msgid "must be owner of tablespace %s" msgstr "%s 테이블스페이스의 소유주여야만 합니다" -#: catalog/aclchk.c:3385 +#: catalog/aclchk.c:3380 #, c-format msgid "must be owner of text search dictionary %s" msgstr "%s 전문 검색 사전의 소유주여야 함" -#: catalog/aclchk.c:3387 +#: catalog/aclchk.c:3382 #, c-format msgid "must be owner of text search configuration %s" msgstr "%s 전문 검색 구성의 소유주여야 함" -#: catalog/aclchk.c:3389 +#: catalog/aclchk.c:3384 #, c-format msgid "must be owner of foreign-data wrapper %s" msgstr "%s 외부 데이터 래퍼의 소유주여야 함" -#: catalog/aclchk.c:3391 +#: catalog/aclchk.c:3386 #, c-format msgid "must be owner of foreign server %s" msgstr "%s 외부 서버의 소유주여야 함" -#: catalog/aclchk.c:3393 +#: catalog/aclchk.c:3388 #, c-format msgid "must be owner of event trigger %s" msgstr "%s 이벤트 트리거의 소유주여야만 합니다" -#: catalog/aclchk.c:3395 +#: catalog/aclchk.c:3390 #, c-format msgid "must be owner of extension %s" msgstr "%s 확장 모듈의 소유주여야만 합니다" -#: catalog/aclchk.c:3437 +#: catalog/aclchk.c:3392 +#, c-format +msgid "must be owner of publication %s" +msgstr "%s 발행의 소유주여야만 합니다" + +#: catalog/aclchk.c:3394 +#, c-format +msgid "must be owner of subscription %s" +msgstr "%s 구독의 소유주여야만 합니다" + +#: catalog/aclchk.c:3436 #, c-format msgid "permission denied for column \"%s\" of relation \"%s\"" msgstr "\"%s\" 칼럼(해당 릴레이션: \"%s\") 접근 권한 없음" -#: catalog/aclchk.c:3556 catalog/aclchk.c:3564 +#: catalog/aclchk.c:3559 catalog/aclchk.c:3567 #, c-format msgid "attribute %d of relation with OID %u does not exist" msgstr "%d번째 속성(해당 릴레이션 OID: %u)이 없음" -#: catalog/aclchk.c:3637 catalog/aclchk.c:4556 +#: catalog/aclchk.c:3640 catalog/aclchk.c:4559 #, c-format msgid "relation with OID %u does not exist" msgstr "OID %u 릴레이션(relation) 없음" -#: catalog/aclchk.c:3736 catalog/aclchk.c:4974 +#: catalog/aclchk.c:3739 catalog/aclchk.c:4977 #, c-format msgid "database with OID %u does not exist" msgstr "OID %u 데이터베이스 없음" -#: catalog/aclchk.c:3790 catalog/aclchk.c:4634 tcop/fastpath.c:223 +#: catalog/aclchk.c:3793 catalog/aclchk.c:4637 tcop/fastpath.c:223 +#: utils/fmgr/fmgr.c:2117 #, c-format msgid "function with OID %u does not exist" msgstr "OID %u 함수 없음" -#: catalog/aclchk.c:3844 catalog/aclchk.c:4660 +#: catalog/aclchk.c:3847 catalog/aclchk.c:4663 #, c-format msgid "language with OID %u does not exist" msgstr "OID %u 언어 없음" -#: catalog/aclchk.c:4008 catalog/aclchk.c:4732 +#: catalog/aclchk.c:4011 catalog/aclchk.c:4735 #, c-format msgid "schema with OID %u does not exist" msgstr "OID %u 스키마 없음" -#: catalog/aclchk.c:4062 catalog/aclchk.c:4759 +#: catalog/aclchk.c:4065 catalog/aclchk.c:4762 #, c-format msgid "tablespace with OID %u does not exist" msgstr "OID %u 테이블스페이스 없음" -#: catalog/aclchk.c:4121 catalog/aclchk.c:4893 commands/foreigncmds.c:325 +#: catalog/aclchk.c:4124 catalog/aclchk.c:4896 commands/foreigncmds.c:324 #, c-format msgid "foreign-data wrapper with OID %u does not exist" msgstr "OID가 %u인 외부 데이터 래퍼가 없음" -#: catalog/aclchk.c:4183 catalog/aclchk.c:4920 commands/foreigncmds.c:461 +#: catalog/aclchk.c:4186 catalog/aclchk.c:4923 commands/foreigncmds.c:459 #, c-format msgid "foreign server with OID %u does not exist" msgstr "OID가 %u인 외부 서버가 없음" -#: catalog/aclchk.c:4243 catalog/aclchk.c:4582 +#: catalog/aclchk.c:4246 catalog/aclchk.c:4585 utils/cache/typcache.c:238 #, c-format msgid "type with OID %u does not exist" msgstr "OID %u 자료형 없음" -#: catalog/aclchk.c:4608 +#: catalog/aclchk.c:4611 #, c-format msgid "operator with OID %u does not exist" msgstr "OID %u 연산자 없음" -#: catalog/aclchk.c:4785 +#: catalog/aclchk.c:4788 #, c-format msgid "operator class with OID %u does not exist" msgstr "OID %u 연산자 클래스 없음" -#: catalog/aclchk.c:4812 +#: catalog/aclchk.c:4815 #, c-format msgid "operator family with OID %u does not exist" msgstr "OID가 %u인 연산자 패밀리가 없음" -#: catalog/aclchk.c:4839 +#: catalog/aclchk.c:4842 #, c-format msgid "text search dictionary with OID %u does not exist" msgstr "OID가 %u인 전문 검색 사전이 없음" -#: catalog/aclchk.c:4866 +#: catalog/aclchk.c:4869 #, c-format msgid "text search configuration with OID %u does not exist" msgstr "OID가 %u인 텍스트 검색 구성이 없음" -#: catalog/aclchk.c:4947 commands/event_trigger.c:587 +#: catalog/aclchk.c:4950 commands/event_trigger.c:588 #, c-format msgid "event trigger with OID %u does not exist" msgstr "OID %u 이벤트 트리거가 없음" -#: catalog/aclchk.c:5000 +#: catalog/aclchk.c:5003 commands/collationcmds.c:348 #, c-format msgid "collation with OID %u does not exist" msgstr "OID %u 정렬정의(collation) 없음" -#: catalog/aclchk.c:5026 +#: catalog/aclchk.c:5029 #, c-format msgid "conversion with OID %u does not exist" msgstr "OID %u 인코딩 변환규칙(conversion) 없음" -#: catalog/aclchk.c:5067 +#: catalog/aclchk.c:5070 #, c-format msgid "extension with OID %u does not exist" msgstr "OID %u 확장 모듈이 없음" -#: catalog/dependency.c:645 +#: catalog/aclchk.c:5097 commands/publicationcmds.c:733 +#, c-format +msgid "publication with OID %u does not exist" +msgstr "OID %u 발행 없음" + +#: catalog/aclchk.c:5123 commands/subscriptioncmds.c:1098 +#, c-format +msgid "subscription with OID %u does not exist" +msgstr "OID %u 구독 없음" + +#: catalog/aclchk.c:5149 +#, c-format +msgid "statistics object with OID %u does not exist" +msgstr "OID %u 통계정보 객체 없음" + +#: catalog/dependency.c:613 #, c-format msgid "cannot drop %s because %s requires it" msgstr "%s 삭제할 수 없음, %s에서 필요로함" -#: catalog/dependency.c:648 +#: catalog/dependency.c:616 #, c-format msgid "You can drop %s instead." msgstr "대신에, drop %s 명령을 사용할 수 있음." -#: catalog/dependency.c:810 catalog/pg_shdepend.c:576 +#: catalog/dependency.c:779 catalog/pg_shdepend.c:574 #, c-format msgid "cannot drop %s because it is required by the database system" msgstr "%s 객체는 데이터베이스 시스템에서 필요하기 때문에 삭제 될 수 없음" -#: catalog/dependency.c:926 +#: catalog/dependency.c:897 #, c-format msgid "drop auto-cascades to %s" msgstr "%s 객체가 자동으로 덩달아 삭제됨" -#: catalog/dependency.c:938 catalog/dependency.c:947 +#: catalog/dependency.c:909 catalog/dependency.c:918 #, c-format msgid "%s depends on %s" msgstr "%s 의존대상: %s" -#: catalog/dependency.c:959 catalog/dependency.c:968 +#: catalog/dependency.c:930 catalog/dependency.c:939 #, c-format msgid "drop cascades to %s" msgstr "%s 객체가 덩달아 삭제됨" -#: catalog/dependency.c:976 catalog/pg_shdepend.c:687 +#: catalog/dependency.c:947 catalog/pg_shdepend.c:685 #, c-format msgid "" "\n" @@ -3998,107 +3886,96 @@ msgid_plural "" msgstr[0] "" msgstr[1] "" -#: catalog/dependency.c:988 +#: catalog/dependency.c:959 #, c-format msgid "cannot drop %s because other objects depend on it" msgstr "기타 다른 객체들이 이 객체에 의존하고 있어, %s 삭제할 수 없음" -#: catalog/dependency.c:992 catalog/dependency.c:999 +#: catalog/dependency.c:963 catalog/dependency.c:970 #, c-format msgid "Use DROP ... CASCADE to drop the dependent objects too." msgstr "" "이 객체와 관계된 모든 객체들을 함께 삭제하려면 DROP ... CASCADE 명령을 사용하" "십시오" -#: catalog/dependency.c:996 +#: catalog/dependency.c:967 #, c-format msgid "cannot drop desired object(s) because other objects depend on them" msgstr "다른 객체가 원하는 객체를 사용하고 있으므로 해당 객체를 삭제할 수 없음" #. translator: %d always has a value larger than 1 -#: catalog/dependency.c:1005 +#: catalog/dependency.c:976 #, c-format msgid "drop cascades to %d other object" msgid_plural "drop cascades to %d other objects" msgstr[0] "%d개의 다른 객체에 대한 관련 항목 삭제" -#: catalog/dependency.c:1633 +#: catalog/dependency.c:1635 #, c-format -msgid "constant of the type \"regrole\" cannot be used here" -msgstr "\"regrole\" 자료형 상수는 여기서 사용할 수 없음" +msgid "constant of the type %s cannot be used here" +msgstr "%s 자료형은 여기서 사용할 수 없음" -#: catalog/heap.c:278 +#: catalog/heap.c:283 #, c-format msgid "permission denied to create \"%s.%s\"" msgstr "\"%s.%s\" 만들 권한이 없음" -#: catalog/heap.c:280 +#: catalog/heap.c:285 #, c-format msgid "System catalog modifications are currently disallowed." msgstr "시스템 카탈로그 변경은 현재 허용하지 않습니다." -#: catalog/heap.c:415 commands/tablecmds.c:1439 commands/tablecmds.c:1896 -#: commands/tablecmds.c:4820 +#: catalog/heap.c:421 commands/tablecmds.c:1649 commands/tablecmds.c:2159 +#: commands/tablecmds.c:5225 #, c-format msgid "tables can have at most %d columns" msgstr "한 테이블에 지정할 수 있는 최대 열 수는 %d입니다" -#: catalog/heap.c:432 commands/tablecmds.c:5081 +#: catalog/heap.c:438 commands/tablecmds.c:5484 #, c-format msgid "column name \"%s\" conflicts with a system column name" msgstr "\"%s\" 열 이름은 시스템 열 이름과 충돌합니다" -#: catalog/heap.c:448 +#: catalog/heap.c:454 #, c-format msgid "column name \"%s\" specified more than once" msgstr "\"%s\" 칼럼 이름이 여러 번 지정됨" -#: catalog/heap.c:498 -#, c-format -msgid "column \"%s\" has type \"unknown\"" -msgstr "\"%s\" 열의 자료형이 \"unknown\" 입니다" - -#: catalog/heap.c:499 -#, c-format -msgid "Proceeding with relation creation anyway." -msgstr "관계 작성을 계속합니다." - -#: catalog/heap.c:512 +#: catalog/heap.c:507 #, c-format msgid "column \"%s\" has pseudo-type %s" -msgstr "\"%s\" 열은 %s 의사 자료형(pseudo-type)을 사용합니다" +msgstr "\"%s\" 칼럼은 %s 의사 자료형(pseudo-type)을 사용합니다" -#: catalog/heap.c:542 +#: catalog/heap.c:537 #, c-format msgid "composite type %s cannot be made a member of itself" msgstr "%s 복합 자료형은 자기 자신의 구성원으로 만들 수 없음" -#: catalog/heap.c:584 commands/createas.c:201 commands/createas.c:498 +#: catalog/heap.c:579 commands/createas.c:201 commands/createas.c:498 #, c-format msgid "no collation was derived for column \"%s\" with collatable type %s" msgstr "" "column \"%s\" 칼럼에 사용하는 %s 자료형에서 사용할 정렬규칙을 결정할 수 없습" "니다." -#: catalog/heap.c:586 commands/createas.c:204 commands/createas.c:501 -#: commands/indexcmds.c:1133 commands/view.c:103 regex/regc_pg_locale.c:262 -#: utils/adt/formatting.c:1513 utils/adt/formatting.c:1565 -#: utils/adt/formatting.c:1633 utils/adt/formatting.c:1685 -#: utils/adt/formatting.c:1754 utils/adt/formatting.c:1818 -#: utils/adt/like.c:213 utils/adt/selfuncs.c:5334 utils/adt/varlena.c:1421 -#: utils/adt/varlena.c:1826 +#: catalog/heap.c:581 commands/createas.c:204 commands/createas.c:501 +#: commands/indexcmds.c:1149 commands/tablecmds.c:13389 commands/view.c:103 +#: regex/regc_pg_locale.c:263 utils/adt/formatting.c:1547 +#: utils/adt/formatting.c:1671 utils/adt/formatting.c:1796 +#: utils/adt/like.c:184 utils/adt/selfuncs.c:5563 utils/adt/varlena.c:1417 +#: utils/adt/varlena.c:1866 #, c-format msgid "Use the COLLATE clause to set the collation explicitly." msgstr "명시적으로 정렬 규칙을 지정하려면 COLLATE 절을 사용하세요." -#: catalog/heap.c:1067 catalog/index.c:792 commands/tablecmds.c:2623 +#: catalog/heap.c:1067 catalog/index.c:806 commands/tablecmds.c:2943 #, c-format msgid "relation \"%s\" already exists" msgstr "\"%s\" 이름의 릴레이션(relation)이 이미 있습니다" -#: catalog/heap.c:1083 catalog/pg_type.c:412 catalog/pg_type.c:722 -#: commands/typecmds.c:237 commands/typecmds.c:784 commands/typecmds.c:1135 -#: commands/typecmds.c:1357 commands/typecmds.c:2113 +#: catalog/heap.c:1083 catalog/pg_type.c:410 catalog/pg_type.c:732 +#: commands/typecmds.c:239 commands/typecmds.c:788 commands/typecmds.c:1139 +#: commands/typecmds.c:1350 commands/typecmds.c:2106 #, c-format msgid "type \"%s\" already exists" msgstr "\"%s\" 자료형이 이미 있습니다" @@ -4112,81 +3989,81 @@ msgstr "" "하나의 릴레이션은 그 이름과 같은 자료형과 관계합니다. 그래서, 이미 같은 이름" "의 자료형이 있다면 해당 릴레이션을 만들 수 없습니다. 다른 이름을 사용하세요." -#: catalog/heap.c:1112 +#: catalog/heap.c:1113 #, c-format msgid "pg_class heap OID value not set when in binary upgrade mode" msgstr "이진 업그레이드 작업 때, pg_class 자료 OID 값이 지정되지 않았습니다" -#: catalog/heap.c:2291 +#: catalog/heap.c:2078 +#, c-format +msgid "cannot add NO INHERIT constraint to partitioned table \"%s\"" +msgstr "\"%s\" 파티션 테이블에는 NO INHERIT 조건을 사용할 수 없음" + +#: catalog/heap.c:2336 #, c-format msgid "check constraint \"%s\" already exists" msgstr "\"%s\" 이름의 체크 제약 조건이 이미 있습니다" -#: catalog/heap.c:2456 catalog/pg_constraint.c:654 commands/tablecmds.c:6069 +#: catalog/heap.c:2504 catalog/pg_constraint.c:649 commands/tablecmds.c:6838 #, c-format msgid "constraint \"%s\" for relation \"%s\" already exists" msgstr "" "\"%s\" 제약 조건이 이미 \"%s\" 릴레이션(relation)에서 사용되고 있습니다" -#: catalog/heap.c:2463 +#: catalog/heap.c:2511 #, c-format msgid "" "constraint \"%s\" conflicts with non-inherited constraint on relation \"%s\"" msgstr "" "\"%s\" 제약 조건이 비상속 제약 조건과 충돌합니다, 해당 릴레이션: \"%s\"" -#: catalog/heap.c:2474 +#: catalog/heap.c:2522 #, c-format msgid "" "constraint \"%s\" conflicts with inherited constraint on relation \"%s\"" msgstr "\"%s\" 제약 조건이 상속 제약 조건과 충돌합니다, 해당 릴레이션: \"%s\"" -#: catalog/heap.c:2484 +#: catalog/heap.c:2532 #, c-format msgid "" "constraint \"%s\" conflicts with NOT VALID constraint on relation \"%s\"" msgstr "" "\"%s\" 제약 조건이 NOT VALID 제약 조건과 충돌합니다, 해당 릴레이션: \"%s\"" -#: catalog/heap.c:2489 +#: catalog/heap.c:2537 #, c-format msgid "merging constraint \"%s\" with inherited definition" msgstr "\"%s\" 제약 조건을 상속된 정의와 병합하는 중" -#: catalog/heap.c:2595 +#: catalog/heap.c:2653 #, c-format msgid "cannot use column references in default expression" msgstr "default 표현식에서는 열 reference를 사용할 수 없음" -#: catalog/heap.c:2606 -#, c-format -msgid "default expression must not return a set" -msgstr "default 표현식은 하나의 set을 반환하면 안됩니다" - -#: catalog/heap.c:2625 rewrite/rewriteHandler.c:1084 +#: catalog/heap.c:2678 rewrite/rewriteHandler.c:1171 #, c-format msgid "column \"%s\" is of type %s but default expression is of type %s" msgstr "" -"\"%s\" 열의 자료형은 %s 인데, default 표현식에서는 %s 자료형을 사용했습니다" +"\"%s\" 칼럼의 자료형은 %s 인데, default 표현식에서는 %s 자료형을 사용했습니다" -#: catalog/heap.c:2630 commands/prepare.c:374 parser/parse_node.c:428 -#: parser/parse_target.c:539 parser/parse_target.c:789 -#: parser/parse_target.c:799 rewrite/rewriteHandler.c:1089 +#: catalog/heap.c:2683 commands/prepare.c:384 parser/parse_node.c:430 +#: parser/parse_target.c:590 parser/parse_target.c:840 +#: parser/parse_target.c:850 rewrite/rewriteHandler.c:1176 #, c-format msgid "You will need to rewrite or cast the expression." msgstr "다시 정의하거나 형변화자를 사용해보십시오" -#: catalog/heap.c:2677 +#: catalog/heap.c:2730 #, c-format msgid "only table \"%s\" can be referenced in check constraint" msgstr "\"%s\" 테이블만이 체크 제약 조건에서 참조될 수 있습니다" -#: catalog/heap.c:2917 +#: catalog/heap.c:2970 #, c-format msgid "unsupported ON COMMIT and foreign key combination" msgstr "ON COMMIT 및 외래 키 조합이 지원되지 않음" -#: catalog/heap.c:2918 +#: catalog/heap.c:2971 #, c-format msgid "" "Table \"%s\" references \"%s\", but they do not have the same ON COMMIT " @@ -4194,316 +4071,279 @@ msgid "" msgstr "" "\"%s\" 테이블에서 \"%s\" 테이블을 참조하는데 ON COMMIT 설정이 같지 않습니다." -#: catalog/heap.c:2923 +#: catalog/heap.c:2976 #, c-format msgid "cannot truncate a table referenced in a foreign key constraint" msgstr "" "_^_ 테이블 내용을 모두 삭제할 수 없음, 참조키(foreign key) 제약 조건 안에서" -#: catalog/heap.c:2924 +#: catalog/heap.c:2977 #, c-format msgid "Table \"%s\" references \"%s\"." msgstr "\"%s\" 테이블은 \"%s\" 객체를 참조합니다." -#: catalog/heap.c:2926 +#: catalog/heap.c:2979 #, c-format msgid "Truncate table \"%s\" at the same time, or use TRUNCATE ... CASCADE." msgstr "" "\"%s\" 테이블도 함께 자료를 지우거나, TRUNCATE ... CASCADE 구문을 사용하세요." -#: catalog/index.c:210 parser/parse_utilcmd.c:1473 parser/parse_utilcmd.c:1559 +#: catalog/index.c:213 parser/parse_utilcmd.c:1672 parser/parse_utilcmd.c:1758 #, c-format msgid "multiple primary keys for table \"%s\" are not allowed" msgstr "\"%s\" 테이블에는 이미 기본키가 있습니다" -#: catalog/index.c:228 +#: catalog/index.c:231 #, c-format msgid "primary keys cannot be expressions" msgstr "기본기(primary key)를 표현할 수 없음" -#: catalog/index.c:742 catalog/index.c:1160 +#: catalog/index.c:756 catalog/index.c:1174 #, c-format msgid "user-defined indexes on system catalog tables are not supported" msgstr "시스템 카탈로그 테이블에는 사용자 정의 인덱스를 지정할 수 없습니다" -#: catalog/index.c:752 +#: catalog/index.c:766 #, c-format msgid "concurrent index creation on system catalog tables is not supported" msgstr "시스템 카탈로그 테이블에서 공존하는 인덱스 만들기는 지원하지 않습니다" -#: catalog/index.c:770 +#: catalog/index.c:784 #, c-format msgid "shared indexes cannot be created after initdb" msgstr "" "공유되는 인덱스들은 initdb 명령으로 데이터베이스 클러스터를 만든 다음에는 만" "들 수 없습니다" -#: catalog/index.c:784 commands/createas.c:249 commands/sequence.c:141 -#: parser/parse_utilcmd.c:191 +#: catalog/index.c:798 commands/createas.c:250 commands/sequence.c:152 +#: parser/parse_utilcmd.c:202 #, c-format msgid "relation \"%s\" already exists, skipping" msgstr "\"%s\" 이름의 릴레이션(relation)이 이미 있습니다, 건너뜀" -#: catalog/index.c:820 +#: catalog/index.c:834 #, c-format msgid "pg_class index OID value not set when in binary upgrade mode" msgstr "이진 업그레이드 작업 때, pg_class 인덱스 OID 값이 지정되지 않았습니다" -#: catalog/index.c:1422 +#: catalog/index.c:1435 #, c-format msgid "DROP INDEX CONCURRENTLY must be first action in transaction" msgstr "DROP INDEX CONCURRENTLY 명령은 트랜잭션 내 가장 처음에 있어야 합니다" -#: catalog/index.c:2004 +#: catalog/index.c:2023 #, c-format msgid "building index \"%s\" on table \"%s\"" msgstr "\"%s\" 인덱스를 \"%s\" 테이블에서 만드는 중" -#: catalog/index.c:3322 +#: catalog/index.c:3335 #, c-format msgid "cannot reindex temporary tables of other sessions" msgstr "임시 테이블의 인덱스 재생성 작업은 다른 세션에서 할 수 없음" -#: catalog/index.c:3454 +#: catalog/index.c:3466 #, c-format msgid "index \"%s\" was reindexed" msgstr "\"%s\" 인덱스가 다시 만들어졌음" -#: catalog/index.c:3456 commands/vacuumlazy.c:1338 commands/vacuumlazy.c:1414 -#: commands/vacuumlazy.c:1603 commands/vacuumlazy.c:1813 -#, c-format -msgid "%s." -msgstr "%s." - -#: catalog/namespace.c:249 catalog/namespace.c:447 catalog/namespace.c:541 -#: commands/trigger.c:4523 +#: catalog/namespace.c:235 catalog/namespace.c:433 catalog/namespace.c:527 +#: commands/trigger.c:5148 #, c-format msgid "cross-database references are not implemented: \"%s.%s.%s\"" msgstr "서로 다른 데이터베이스간의 참조는 구현되어있지 않습니다: \"%s.%s.%s\"" -#: catalog/namespace.c:306 +#: catalog/namespace.c:292 #, c-format msgid "temporary tables cannot specify a schema name" msgstr "임시 테이블은 스키마 이름을 지정할 수 없음" -#: catalog/namespace.c:385 +#: catalog/namespace.c:371 #, c-format msgid "could not obtain lock on relation \"%s.%s\"" msgstr "\"%s.%s\" 릴레이션의 잠금 정보를 구할 수 없음" -#: catalog/namespace.c:390 commands/lockcmds.c:146 +#: catalog/namespace.c:376 commands/lockcmds.c:145 #, c-format msgid "could not obtain lock on relation \"%s\"" msgstr "\"%s\" 릴레이션의 잠금 정보를 구할 수 없음" -#: catalog/namespace.c:414 parser/parse_relation.c:1138 +#: catalog/namespace.c:400 parser/parse_relation.c:1158 #, c-format msgid "relation \"%s.%s\" does not exist" msgstr "\"%s.%s\" 이름의 릴레이션(relation)이 없습니다" -#: catalog/namespace.c:419 parser/parse_relation.c:1151 -#: parser/parse_relation.c:1159 utils/adt/regproc.c:1034 +#: catalog/namespace.c:405 parser/parse_relation.c:1177 +#: parser/parse_relation.c:1185 #, c-format msgid "relation \"%s\" does not exist" msgstr "\"%s\" 이름의 릴레이션(relation)이 없습니다" -#: catalog/namespace.c:487 catalog/namespace.c:2841 commands/extension.c:1383 -#: commands/extension.c:1389 +#: catalog/namespace.c:473 catalog/namespace.c:2992 commands/extension.c:1466 +#: commands/extension.c:1472 #, c-format msgid "no schema has been selected to create in" msgstr "선택된 스키마 없음, 대상:" -#: catalog/namespace.c:639 catalog/namespace.c:652 +#: catalog/namespace.c:625 catalog/namespace.c:638 #, c-format msgid "cannot create relations in temporary schemas of other sessions" msgstr "다른 세션의 임시 스키마 안에는 릴레이션을 만들 수 없음" -#: catalog/namespace.c:643 +#: catalog/namespace.c:629 #, c-format msgid "cannot create temporary relation in non-temporary schema" msgstr "임시 스키마가 아닌 스키마에 임시 릴레이션을 만들 수 없음" -#: catalog/namespace.c:658 +#: catalog/namespace.c:644 #, c-format msgid "only temporary relations may be created in temporary schemas" msgstr "임시 스키마 안에는 임시 릴레이션만 만들 수 있음" -#: catalog/namespace.c:2154 +#: catalog/namespace.c:2182 +#, c-format +msgid "statistics object \"%s\" does not exist" +msgstr "\"%s\" 통계정보 객체가 없음" + +#: catalog/namespace.c:2305 #, c-format msgid "text search parser \"%s\" does not exist" msgstr "\"%s\" 전문 검색 파서가 없음" -#: catalog/namespace.c:2280 +#: catalog/namespace.c:2431 #, c-format msgid "text search dictionary \"%s\" does not exist" msgstr "\"%s\" 전문 검색 사전이 없음" -#: catalog/namespace.c:2407 +#: catalog/namespace.c:2558 #, c-format msgid "text search template \"%s\" does not exist" msgstr "\"%s\" 전문 검색 템플릿이 없음" -#: catalog/namespace.c:2533 commands/tsearchcmds.c:1197 -#: utils/cache/ts_cache.c:611 +#: catalog/namespace.c:2684 commands/tsearchcmds.c:1185 +#: utils/cache/ts_cache.c:612 #, c-format msgid "text search configuration \"%s\" does not exist" msgstr "\"%s\" 전문 검색 구성이 없음" -#: catalog/namespace.c:2646 parser/parse_expr.c:792 parser/parse_target.c:1141 +#: catalog/namespace.c:2797 parser/parse_expr.c:789 parser/parse_target.c:1192 #, c-format msgid "cross-database references are not implemented: %s" msgstr "서로 다른 데이터베이스간의 참조는 구현되어있지 않습니다: %s" -#: catalog/namespace.c:2652 parser/parse_expr.c:799 parser/parse_target.c:1148 -#: gram.y:13454 gram.y:14823 +#: catalog/namespace.c:2803 parser/parse_expr.c:796 parser/parse_target.c:1199 +#: gram.y:14300 gram.y:15721 #, c-format msgid "improper qualified name (too many dotted names): %s" msgstr "적당하지 않은 qualified 이름 입니다 (너무 많은 점이 있네요): %s" -#: catalog/namespace.c:2783 +#: catalog/namespace.c:2934 #, c-format msgid "cannot move objects into or out of temporary schemas" msgstr "임시 스키마로(에서) 객체를 이동할 수 없습니다" -#: catalog/namespace.c:2789 +#: catalog/namespace.c:2940 #, c-format msgid "cannot move objects into or out of TOAST schema" msgstr "TOAST 스키마로(에서) 객체를 이동할 수 없습니다" -#: catalog/namespace.c:2862 commands/schemacmds.c:238 -#: commands/schemacmds.c:317 commands/tablecmds.c:741 +#: catalog/namespace.c:3013 commands/schemacmds.c:256 +#: commands/schemacmds.c:334 commands/tablecmds.c:891 #, c-format msgid "schema \"%s\" does not exist" msgstr "\"%s\" 스키마(schema) 없음" -#: catalog/namespace.c:2893 +#: catalog/namespace.c:3044 #, c-format msgid "improper relation name (too many dotted names): %s" msgstr "" "적당하지 않은 릴레이션(relation) 이름 입니다 (너무 많은 점이 있네요): %s" -#: catalog/namespace.c:3403 +#: catalog/namespace.c:3538 #, c-format msgid "collation \"%s\" for encoding \"%s\" does not exist" msgstr "\"%s\" 정렬정의(collation)가 \"%s\" 인코딩에서는 쓸 수 없음" -#: catalog/namespace.c:3458 +#: catalog/namespace.c:3593 #, c-format msgid "conversion \"%s\" does not exist" msgstr "\"%s\" 문자코드변환규칙(conversion) 없음" -#: catalog/namespace.c:3666 +#: catalog/namespace.c:3801 #, c-format msgid "permission denied to create temporary tables in database \"%s\"" msgstr "\"%s\" 데이터베이스에서 임시 파일을 만들 권한이 없음" -#: catalog/namespace.c:3682 +#: catalog/namespace.c:3817 #, c-format msgid "cannot create temporary tables during recovery" msgstr "복구 작업 중에는 임시 테이블을 만들 수 없음" -#: catalog/namespace.c:3688 +#: catalog/namespace.c:3823 #, c-format -msgid "cannot create temporary tables in parallel mode" -msgstr "병렬 모드에서는 임시 테이블을 만들 수 없음" +msgid "cannot create temporary tables during a parallel operation" +msgstr "병렬 작업 중에 임시 테이블을 만들 수 없음" -#: catalog/namespace.c:3932 commands/tablespace.c:1173 commands/variable.c:63 -#: utils/misc/guc.c:9875 +#: catalog/namespace.c:4072 commands/tablespace.c:1169 commands/variable.c:64 +#: utils/misc/guc.c:9990 utils/misc/guc.c:10068 #, c-format msgid "List syntax is invalid." msgstr "목록 문법이 틀렸습니다." -#: catalog/objectaddress.c:1065 -msgid "access method name cannot be qualified" -msgstr "접근 방법 이름이 적당치 않습니다" - -#: catalog/objectaddress.c:1068 -msgid "database name cannot be qualified" -msgstr "데이터베이스 이름이 적당치 않습니다" - -#: catalog/objectaddress.c:1071 commands/extension.c:2507 -#, c-format -msgid "extension name cannot be qualified" -msgstr "확장 모듈 이름으로 적당하지 않습니다" - -#: catalog/objectaddress.c:1074 -msgid "tablespace name cannot be qualified" -msgstr "테이블스페이스 이름으로 적당하지 않습니다" - -#: catalog/objectaddress.c:1077 -msgid "role name cannot be qualified" -msgstr "롤(role)이름으로 적당하지 않습니다" - -#: catalog/objectaddress.c:1080 -msgid "schema name cannot be qualified" -msgstr "스키마 이름이 적당치 않습니다" - -#: catalog/objectaddress.c:1083 -msgid "language name cannot be qualified" -msgstr "프로시주얼 언어 이름이 적당치 않습니다" - -#: catalog/objectaddress.c:1086 -msgid "foreign-data wrapper name cannot be qualified" -msgstr "외부자료 랩퍼 이름이 적당치 않습니다" - -#: catalog/objectaddress.c:1089 -msgid "server name cannot be qualified" -msgstr "서버 이름으로 적당하지 않습니다" - -#: catalog/objectaddress.c:1092 -msgid "event trigger name cannot be qualified" -msgstr "이벤트 트리거 이름이 적당치 않습니다" - -#: catalog/objectaddress.c:1210 commands/lockcmds.c:94 commands/policy.c:94 -#: commands/policy.c:382 commands/policy.c:471 commands/tablecmds.c:218 -#: commands/tablecmds.c:1300 commands/tablecmds.c:4347 -#: commands/tablecmds.c:8017 +#: catalog/objectaddress.c:1237 catalog/pg_publication.c:66 +#: commands/lockcmds.c:93 commands/policy.c:94 commands/policy.c:391 +#: commands/policy.c:481 commands/tablecmds.c:223 commands/tablecmds.c:265 +#: commands/tablecmds.c:1507 commands/tablecmds.c:4722 +#: commands/tablecmds.c:8823 #, c-format msgid "\"%s\" is not a table" msgstr "\"%s\" 객체는 테이블이 아님" -#: catalog/objectaddress.c:1217 commands/tablecmds.c:230 -#: commands/tablecmds.c:4377 commands/tablecmds.c:12159 commands/view.c:141 +#: catalog/objectaddress.c:1244 commands/tablecmds.c:235 +#: commands/tablecmds.c:4752 commands/tablecmds.c:13098 commands/view.c:141 #, c-format msgid "\"%s\" is not a view" msgstr "\"%s\" 객체는 뷰가 아님" -#: catalog/objectaddress.c:1224 commands/matview.c:174 -#: commands/tablecmds.c:236 commands/tablecmds.c:12164 +#: catalog/objectaddress.c:1251 commands/matview.c:174 +#: commands/tablecmds.c:241 commands/tablecmds.c:13103 #, c-format msgid "\"%s\" is not a materialized view" msgstr "\"%s\" 객체는 구체화된 뷰(materialized view)가 아닙니다" -#: catalog/objectaddress.c:1231 commands/tablecmds.c:254 -#: commands/tablecmds.c:4380 commands/tablecmds.c:12169 +#: catalog/objectaddress.c:1258 commands/tablecmds.c:259 +#: commands/tablecmds.c:4755 commands/tablecmds.c:13108 #, c-format msgid "\"%s\" is not a foreign table" msgstr "\"%s\" 객체는 외부 테이블이 아님" -#: catalog/objectaddress.c:1376 catalog/objectaddress.c:1429 +#: catalog/objectaddress.c:1299 +#, c-format +msgid "must specify relation and object name" +msgstr "릴레이션과 객체 이름을 지정해야 합니다" + +#: catalog/objectaddress.c:1375 catalog/objectaddress.c:1428 #, c-format msgid "column name must be qualified" msgstr "칼럼 이름으로 적당하지 않습니다" -#: catalog/objectaddress.c:1472 +#: catalog/objectaddress.c:1471 #, c-format msgid "default value for column \"%s\" of relation \"%s\" does not exist" msgstr "\"%s\" 칼럼(해당 릴레이션: \"%s\")의 기본값을 지정하지 않았음" -#: catalog/objectaddress.c:1512 commands/functioncmds.c:128 -#: commands/tablecmds.c:246 commands/typecmds.c:3214 parser/parse_type.c:226 -#: parser/parse_type.c:255 parser/parse_type.c:795 utils/adt/acl.c:4374 -#: utils/adt/regproc.c:1225 +#: catalog/objectaddress.c:1508 commands/functioncmds.c:128 +#: commands/tablecmds.c:251 commands/typecmds.c:3248 parser/parse_type.c:226 +#: parser/parse_type.c:255 parser/parse_type.c:794 utils/adt/acl.c:4357 #, c-format msgid "type \"%s\" does not exist" msgstr "\"%s\" 자료형 없음" -#: catalog/objectaddress.c:1629 +#: catalog/objectaddress.c:1625 #, c-format msgid "operator %d (%s, %s) of %s does not exist" msgstr "%d (%s, %s) 연산자(대상 %s) 없음" -#: catalog/objectaddress.c:1658 +#: catalog/objectaddress.c:1656 #, c-format msgid "function %d (%s, %s) of %s does not exist" msgstr "%d (%s, %s) 함수(대상 %s) 없음" @@ -4513,343 +4353,402 @@ msgstr "%d (%s, %s) 함수(대상 %s) 없음" msgid "user mapping for user \"%s\" on server \"%s\" does not exist" msgstr "\"%s\" 사용자에 대한 사용자 맵핑 정보(대상 서버: \"%s\")가 없음" -#: catalog/objectaddress.c:1722 commands/foreigncmds.c:430 -#: commands/foreigncmds.c:997 commands/foreigncmds.c:1359 -#: foreign/foreign.c:692 +#: catalog/objectaddress.c:1722 commands/foreigncmds.c:428 +#: commands/foreigncmds.c:1004 commands/foreigncmds.c:1377 +#: foreign/foreign.c:688 #, c-format msgid "server \"%s\" does not exist" msgstr "\"%s\" 이름의 서버가 없음" -#: catalog/objectaddress.c:1794 +#: catalog/objectaddress.c:1789 #, c-format -msgid "unrecognized default ACL object type %c" -msgstr "알 수 없는 기본 ACL 객체 타입 %c" +msgid "publication relation \"%s\" in publication \"%s\" does not exist" +msgstr "\"%s\" 발행 릴레이션은 \"%s\" 발행에 없습니다." -#: catalog/objectaddress.c:1795 +#: catalog/objectaddress.c:1851 #, c-format -msgid "Valid object types are \"r\", \"S\", \"f\", and \"T\"." -msgstr "유효한 객체 형태는 \"r\", \"S\", \"f\", \"T\"." +msgid "unrecognized default ACL object type \"%c\"" +msgstr "알 수 없는 기본 ACL 객체 타입 \"%c\"" -#: catalog/objectaddress.c:1841 +#: catalog/objectaddress.c:1852 +#, c-format +msgid "Valid object types are \"%c\", \"%c\", \"%c\", \"%c\", \"%c\"." +msgstr "유효한 객체 형태는 \"%c\", \"%c\", \"%c\", \"%c\", \"%c\"." + +#: catalog/objectaddress.c:1903 #, c-format msgid "default ACL for user \"%s\" in schema \"%s\" on %s does not exist" msgstr "\"%s\" 사용자용 기본 ACL 없음. (해당 스키마: \"%s\", 해당 객체: %s)" -#: catalog/objectaddress.c:1846 +#: catalog/objectaddress.c:1908 #, c-format msgid "default ACL for user \"%s\" on %s does not exist" msgstr "\"%s\" 사용자용 기본 ACL 없음. (해당 객체: %s)" -#: catalog/objectaddress.c:1873 catalog/objectaddress.c:1929 -#: catalog/objectaddress.c:1984 +#: catalog/objectaddress.c:1935 catalog/objectaddress.c:1993 +#: catalog/objectaddress.c:2048 #, c-format msgid "name or argument lists may not contain nulls" msgstr "이름이나 인자 목록에는 null이 포함되지 않아야 함" -#: catalog/objectaddress.c:1905 +#: catalog/objectaddress.c:1969 #, c-format msgid "unsupported object type \"%s\"" msgstr "\"%s\" 형 지원하지 않음" -#: catalog/objectaddress.c:1925 catalog/objectaddress.c:1943 +#: catalog/objectaddress.c:1989 catalog/objectaddress.c:2007 +#: catalog/objectaddress.c:2145 #, c-format msgid "name list length must be exactly %d" msgstr "이름 목록 길이는 %d 이어야 합니다." -#: catalog/objectaddress.c:1947 +#: catalog/objectaddress.c:2011 #, c-format msgid "large object OID may not be null" msgstr "대형 객체 OID는 null 값을 사용할 수 없음" -#: catalog/objectaddress.c:1956 catalog/objectaddress.c:2016 -#: catalog/objectaddress.c:2023 +#: catalog/objectaddress.c:2020 catalog/objectaddress.c:2081 +#: catalog/objectaddress.c:2088 #, c-format msgid "name list length must be at least %d" msgstr "이름 목록 길이는 적어도 %d 개 이상이어야 함" -#: catalog/objectaddress.c:2009 catalog/objectaddress.c:2029 +#: catalog/objectaddress.c:2074 catalog/objectaddress.c:2094 #, c-format msgid "argument list length must be exactly %d" msgstr "인자 목록은 %d 개여야 함" -#: catalog/objectaddress.c:2165 libpq/be-fsstubs.c:350 +#: catalog/objectaddress.c:2320 libpq/be-fsstubs.c:350 #, c-format msgid "must be owner of large object %u" msgstr "%u 대경 객체의 소유주여야만 합니다" -#: catalog/objectaddress.c:2180 commands/functioncmds.c:1426 +#: catalog/objectaddress.c:2335 commands/functioncmds.c:1440 #, c-format msgid "must be owner of type %s or type %s" msgstr "%s, %s 자료형의 소유주여야합니다" -#: catalog/objectaddress.c:2220 catalog/objectaddress.c:2237 +#: catalog/objectaddress.c:2385 catalog/objectaddress.c:2402 #, c-format msgid "must be superuser" msgstr "슈퍼유져여야함" -#: catalog/objectaddress.c:2227 +#: catalog/objectaddress.c:2392 #, c-format msgid "must have CREATEROLE privilege" msgstr "CREATEROLE 권한이 있어야 함" -#: catalog/objectaddress.c:2302 +#: catalog/objectaddress.c:2471 #, c-format msgid "unrecognized object type \"%s\"" msgstr "알 수 없는 객체 형태 \"%s\"" -#: catalog/objectaddress.c:2497 +#: catalog/objectaddress.c:2666 #, c-format msgid " column %s" msgstr " %s 열" -#: catalog/objectaddress.c:2503 +#: catalog/objectaddress.c:2672 #, c-format msgid "function %s" msgstr "%s 함수" -#: catalog/objectaddress.c:2508 +#: catalog/objectaddress.c:2677 #, c-format msgid "type %s" msgstr "%s 자료형" -#: catalog/objectaddress.c:2538 +#: catalog/objectaddress.c:2707 #, c-format msgid "cast from %s to %s" msgstr "%s 자료형을 %s 자료형으로 바꾸는 작업" -#: catalog/objectaddress.c:2558 +#: catalog/objectaddress.c:2727 #, c-format msgid "collation %s" msgstr "collation %s" -#: catalog/objectaddress.c:2582 +#: catalog/objectaddress.c:2751 #, c-format msgid "constraint %s on %s" msgstr "%s 제약 조건(해당 객체: %s)" -#: catalog/objectaddress.c:2588 +#: catalog/objectaddress.c:2757 #, c-format msgid "constraint %s" msgstr "%s 제약 조건" -#: catalog/objectaddress.c:2605 +#: catalog/objectaddress.c:2774 #, c-format msgid "conversion %s" msgstr "%s 문자코드변환규칙" -#: catalog/objectaddress.c:2642 +#: catalog/objectaddress.c:2811 #, c-format msgid "default for %s" msgstr "default for %s" -#: catalog/objectaddress.c:2651 +#: catalog/objectaddress.c:2820 #, c-format msgid "language %s" msgstr "프로시주얼 언어 %s" -#: catalog/objectaddress.c:2656 +#: catalog/objectaddress.c:2825 #, c-format msgid "large object %u" msgstr "%u 대형 객체" -#: catalog/objectaddress.c:2661 +#: catalog/objectaddress.c:2830 #, c-format msgid "operator %s" msgstr "%s 연산자" -#: catalog/objectaddress.c:2693 +#: catalog/objectaddress.c:2862 #, c-format msgid "operator class %s for access method %s" msgstr "%s 연산자 클래스, %s 인덱스 액세스 방법" +#: catalog/objectaddress.c:2885 +#, c-format +msgid "access method %s" +msgstr "%s 접근 방법" + #. translator: %d is the operator strategy (a number), the #. first two %s's are data type names, the third %s is the #. description of the operator family, and the last %s is the #. textual form of the operator with arguments. -#: catalog/objectaddress.c:2743 +#: catalog/objectaddress.c:2927 #, c-format msgid "operator %d (%s, %s) of %s: %s" -msgstr "%d (%s, %s) 연산자 (연산자 가족: %s): %s" +msgstr "%d (%s, %s) 연산자 (연산자 패밀리: %s): %s" #. translator: %d is the function number, the first two %s's #. are data type names, the third %s is the description of the #. operator family, and the last %s is the textual form of the #. function with arguments. -#: catalog/objectaddress.c:2793 +#: catalog/objectaddress.c:2977 #, c-format msgid "function %d (%s, %s) of %s: %s" -msgstr "%d (%s, %s) 함수 (연산자 가족: %s): %s" +msgstr "%d (%s, %s) 함수 (연산자 패밀리: %s): %s" -#: catalog/objectaddress.c:2833 +#: catalog/objectaddress.c:3017 #, c-format msgid "rule %s on " msgstr "%s 룰(rule), 해당 테이블: " -#: catalog/objectaddress.c:2855 -#, c-format -msgid "transform for %s language %s" -msgstr "%s 형 변환자, 대상언어: %s" - -#: catalog/objectaddress.c:2889 +#: catalog/objectaddress.c:3052 #, c-format msgid "trigger %s on " msgstr "%s 트리거, 해당 테이블: " -#: catalog/objectaddress.c:2906 +#: catalog/objectaddress.c:3069 #, c-format msgid "schema %s" msgstr "%s 스키마" -#: catalog/objectaddress.c:2919 +#: catalog/objectaddress.c:3086 +#, c-format +msgid "statistics object %s" +msgstr "%s 통계정보 객체" + +#: catalog/objectaddress.c:3102 #, c-format msgid "text search parser %s" msgstr "%s 전문 검색 파서" -#: catalog/objectaddress.c:2934 +#: catalog/objectaddress.c:3117 #, c-format msgid "text search dictionary %s" msgstr "%s 전문 검색 사전" -#: catalog/objectaddress.c:2949 +#: catalog/objectaddress.c:3132 #, c-format msgid "text search template %s" msgstr "%s 전문 검색 템플릿" -#: catalog/objectaddress.c:2964 +#: catalog/objectaddress.c:3147 #, c-format msgid "text search configuration %s" msgstr "%s 전문 검색 구성" -#: catalog/objectaddress.c:2972 +#: catalog/objectaddress.c:3155 #, c-format msgid "role %s" msgstr "%s 롤" -#: catalog/objectaddress.c:2985 +#: catalog/objectaddress.c:3168 #, c-format msgid "database %s" msgstr "%s 데이터베이스" -#: catalog/objectaddress.c:2997 +#: catalog/objectaddress.c:3180 #, c-format msgid "tablespace %s" msgstr "%s 테이블스페이스" -#: catalog/objectaddress.c:3006 +#: catalog/objectaddress.c:3189 #, c-format msgid "foreign-data wrapper %s" msgstr "%s 외부 데이터 래퍼" -#: catalog/objectaddress.c:3015 +#: catalog/objectaddress.c:3198 #, c-format msgid "server %s" msgstr "%s 서버" -#: catalog/objectaddress.c:3043 +#: catalog/objectaddress.c:3226 #, c-format msgid "user mapping for %s on server %s" msgstr "%s에 대한 사용자 매핑, 해당 서버: %s" -#: catalog/objectaddress.c:3078 +#: catalog/objectaddress.c:3261 #, c-format msgid "default privileges on new relations belonging to role %s" msgstr "%s 롤이 새 테이블을 만들 때 기본적으로 지정할 접근 권한" -#: catalog/objectaddress.c:3083 +#: catalog/objectaddress.c:3266 #, c-format msgid "default privileges on new sequences belonging to role %s" msgstr "%s 롤이 새 시퀀스를 만들 때 기본적으로 지정할 접근 권한" -#: catalog/objectaddress.c:3088 +#: catalog/objectaddress.c:3271 #, c-format msgid "default privileges on new functions belonging to role %s" msgstr "%s 롤이 새 함수를 만들 때 기본적으로 지정할 접근 권한" -#: catalog/objectaddress.c:3093 +#: catalog/objectaddress.c:3276 #, c-format msgid "default privileges on new types belonging to role %s" msgstr "%s 롤이 새 자료형을 만들 때 기본적으로 지정할 접근 권한" -#: catalog/objectaddress.c:3099 +#: catalog/objectaddress.c:3281 +#, c-format +msgid "default privileges on new schemas belonging to role %s" +msgstr "%s 롤이 새 시퀀스를 만들 때 기본적으로 지정할 접근 권한" + +#: catalog/objectaddress.c:3287 #, c-format msgid "default privileges belonging to role %s" msgstr "%s 롤의 기본 접근 권한" -#: catalog/objectaddress.c:3107 +#: catalog/objectaddress.c:3295 #, c-format msgid " in schema %s" msgstr ", 대상 스키마: %s" -#: catalog/objectaddress.c:3124 +#: catalog/objectaddress.c:3312 #, c-format msgid "extension %s" msgstr "%s 확장 모듈" -#: catalog/objectaddress.c:3137 +#: catalog/objectaddress.c:3325 #, c-format msgid "event trigger %s" msgstr "%s 이벤트 트리거" -#: catalog/objectaddress.c:3169 +#: catalog/objectaddress.c:3357 #, c-format msgid "policy %s on " msgstr "%s 정책 " -#: catalog/objectaddress.c:3187 +#: catalog/objectaddress.c:3368 #, c-format -msgid "access method %s" -msgstr "%s 접근 방법" +msgid "publication %s" +msgstr "%s 발행" + +#: catalog/objectaddress.c:3388 +#, c-format +msgid "publication table %s in publication %s" +msgstr "" + +#: catalog/objectaddress.c:3396 +#, c-format +msgid "subscription %s" +msgstr "%s 구독" + +#: catalog/objectaddress.c:3414 +#, c-format +msgid "transform for %s language %s" +msgstr "%s 형 변환자, 대상언어: %s" -#: catalog/objectaddress.c:3247 +#: catalog/objectaddress.c:3475 #, c-format msgid "table %s" msgstr "%s 테이블" -#: catalog/objectaddress.c:3251 +#: catalog/objectaddress.c:3479 #, c-format msgid "index %s" msgstr "%s 인덱스" -#: catalog/objectaddress.c:3255 +#: catalog/objectaddress.c:3483 #, c-format msgid "sequence %s" msgstr "%s 시퀀스" -#: catalog/objectaddress.c:3259 +#: catalog/objectaddress.c:3487 #, c-format msgid "toast table %s" msgstr "%s 토스트 테이블" -#: catalog/objectaddress.c:3263 +#: catalog/objectaddress.c:3491 #, c-format msgid "view %s" msgstr "%s 뷰" -#: catalog/objectaddress.c:3267 +#: catalog/objectaddress.c:3495 #, c-format msgid "materialized view %s" msgstr "%s 구체화된 뷰" -#: catalog/objectaddress.c:3271 +#: catalog/objectaddress.c:3499 #, c-format msgid "composite type %s" msgstr "%s 복합 자료형" -#: catalog/objectaddress.c:3275 +#: catalog/objectaddress.c:3503 #, c-format msgid "foreign table %s" msgstr "%s 외부 테이블" -#: catalog/objectaddress.c:3280 +#: catalog/objectaddress.c:3508 #, c-format msgid "relation %s" msgstr "%s 릴레이션" -#: catalog/objectaddress.c:3317 +#: catalog/objectaddress.c:3545 #, c-format msgid "operator family %s for access method %s" msgstr "%s 연산자 페밀리, 접근 방법: %s" +#: catalog/objectaddress.c:4914 +#, c-format +msgid "%s in publication %s" +msgstr "%s (해당 발행: %s)" + +#: catalog/partition.c:728 +#, c-format +msgid "empty range bound specified for partition \"%s\"" +msgstr "" + +#: catalog/partition.c:730 +#, c-format +msgid "Specified lower bound %s is greater than or equal to upper bound %s." +msgstr "하한값(%s)은 상한값(%s)과 같거나 커야 합니다" + +#: catalog/partition.c:814 +#, c-format +msgid "partition \"%s\" would overlap partition \"%s\"" +msgstr "\"%s\" 파티션이 \"%s\" 파티션과 겹칩니다." + +#: catalog/partition.c:927 catalog/partition.c:1110 commands/analyze.c:1446 +#: commands/copy.c:2489 commands/tablecmds.c:8885 +#: executor/execExprInterp.c:2853 executor/execMain.c:1906 +#: executor/execMain.c:1984 executor/execMain.c:2032 executor/execMain.c:2142 +#: executor/execMain.c:3321 executor/nodeModifyTable.c:1533 +msgid "could not convert row type" +msgstr "로우 자료형을 변환 할 수 없음" + #: catalog/pg_aggregate.c:125 #, c-format msgid "aggregates cannot have more than %d argument" @@ -4900,7 +4799,7 @@ msgstr "" msgid "return type of inverse transition function %s is not %s" msgstr "%s inverse transition 함수의 반환 자료형이 %s 형이 아닙니다." -#: catalog/pg_aggregate.c:351 executor/nodeWindowAgg.c:2334 +#: catalog/pg_aggregate.c:351 executor/nodeWindowAgg.c:2298 #, c-format msgid "" "strictness of aggregate's forward and inverse transition functions must match" @@ -4918,9 +4817,9 @@ msgstr "%s combine 함수의 반환 자료형이 %s 형이 아닙니다" #: catalog/pg_aggregate.c:436 #, c-format -msgid "" -"combine function with \"%s\" transition type must not be declared STRICT" +msgid "combine function with transition type %s must not be declared STRICT" msgstr "" +"%s 자료형을 전달 값으로 사용하는 조합 함수는 STRICT 속성을 가져야 합니다" #: catalog/pg_aggregate.c:455 #, c-format @@ -4932,7 +4831,7 @@ msgstr "%s serialization 함수의 반환 자료형이 %s 형이 아닙니다." msgid "return type of deserialization function %s is not %s" msgstr "%s deserialization 함수의 반환 자료형이 %s 형이 아닙니다" -#: catalog/pg_aggregate.c:491 catalog/pg_proc.c:246 catalog/pg_proc.c:253 +#: catalog/pg_aggregate.c:491 catalog/pg_proc.c:243 catalog/pg_proc.c:250 #, c-format msgid "cannot determine result data type" msgstr "결과 자료형을 결정할 수 없음" @@ -4945,12 +4844,12 @@ msgid "" msgstr "" "다형 자료형을 반환하는 집계에는 다형 자료형 인자가 하나 이상 있어야 합니다." -#: catalog/pg_aggregate.c:504 catalog/pg_proc.c:259 +#: catalog/pg_aggregate.c:504 catalog/pg_proc.c:256 #, c-format msgid "unsafe use of pseudo-type \"internal\"" msgstr "\"internal\" 의사-자료형의 사용이 안전하지 않습니다" -#: catalog/pg_aggregate.c:505 catalog/pg_proc.c:260 +#: catalog/pg_aggregate.c:505 catalog/pg_proc.c:257 #, c-format msgid "" "A function returning \"internal\" must have at least one \"internal\" " @@ -4971,65 +4870,75 @@ msgstr "" msgid "sort operator can only be specified for single-argument aggregates" msgstr "정렬 연산자는 단일 인자 집계에만 지정할 수 있음" -#: catalog/pg_aggregate.c:812 commands/typecmds.c:1705 -#: commands/typecmds.c:1756 commands/typecmds.c:1787 commands/typecmds.c:1810 -#: commands/typecmds.c:1831 commands/typecmds.c:1858 commands/typecmds.c:1885 -#: commands/typecmds.c:1962 commands/typecmds.c:2004 parser/parse_func.c:364 -#: parser/parse_func.c:393 parser/parse_func.c:418 parser/parse_func.c:432 -#: parser/parse_func.c:507 parser/parse_func.c:518 parser/parse_func.c:1923 +#: catalog/pg_aggregate.c:810 commands/typecmds.c:1698 +#: commands/typecmds.c:1749 commands/typecmds.c:1780 commands/typecmds.c:1803 +#: commands/typecmds.c:1824 commands/typecmds.c:1851 commands/typecmds.c:1878 +#: commands/typecmds.c:1955 commands/typecmds.c:1997 parser/parse_func.c:369 +#: parser/parse_func.c:398 parser/parse_func.c:423 parser/parse_func.c:437 +#: parser/parse_func.c:512 parser/parse_func.c:523 parser/parse_func.c:1977 #, c-format msgid "function %s does not exist" msgstr "%s 이름의 함수가 없음" -#: catalog/pg_aggregate.c:818 +#: catalog/pg_aggregate.c:816 #, c-format msgid "function %s returns a set" msgstr "%s 함수는 한 set을 리턴함" -#: catalog/pg_aggregate.c:833 +#: catalog/pg_aggregate.c:831 #, c-format msgid "function %s must accept VARIADIC ANY to be used in this aggregate" msgstr "%s 함수가 이 집계작업에 사용되려면 VARIADIC ANY 형을 수용해야 합니다." -#: catalog/pg_aggregate.c:857 +#: catalog/pg_aggregate.c:855 #, c-format msgid "function %s requires run-time type coercion" msgstr "%s 함수는 run-time type coercion을 필요로 함" -#: catalog/pg_collation.c:77 +#: catalog/pg_collation.c:93 catalog/pg_collation.c:140 #, c-format -msgid "collation \"%s\" for encoding \"%s\" already exists" -msgstr "\"%s\" 정렬규칙이 \"%s\" 인코딩에 이미 지정되어 있습니다" +msgid "collation \"%s\" already exists, skipping" +msgstr "\"%s\" 이름의 정렬규칙이 이미 있습니다, 건너뜀" + +#: catalog/pg_collation.c:95 +#, c-format +msgid "collation \"%s\" for encoding \"%s\" already exists, skipping" +msgstr "\"%s\" 정렬규칙이 \"%s\" 인코딩에 이미 지정되어 있습니다, 건너뜀" -#: catalog/pg_collation.c:91 +#: catalog/pg_collation.c:103 catalog/pg_collation.c:147 #, c-format msgid "collation \"%s\" already exists" msgstr "\"%s\" 정렬규칙이 이미 있습니다" -#: catalog/pg_constraint.c:663 +#: catalog/pg_collation.c:105 +#, c-format +msgid "collation \"%s\" for encoding \"%s\" already exists" +msgstr "\"%s\" 정렬규칙이 \"%s\" 인코딩에 이미 지정되어 있습니다" + +#: catalog/pg_constraint.c:658 #, c-format msgid "constraint \"%s\" for domain %s already exists" msgstr "\"%s\" 제약 조건이 %s 도메인에 이미 지정되어 있습니다" -#: catalog/pg_constraint.c:797 +#: catalog/pg_constraint.c:788 #, c-format msgid "table \"%s\" has multiple constraints named \"%s\"" msgstr "\"%s\" 테이블에는 \"%s\" 이름의 제약 조건이 여러개 있습니다" -#: catalog/pg_constraint.c:809 +#: catalog/pg_constraint.c:800 #, c-format msgid "constraint \"%s\" for table \"%s\" does not exist" msgstr "\"%s\" 제약 조건은 \"%s\" 테이블에 없음" -#: catalog/pg_constraint.c:855 +#: catalog/pg_constraint.c:846 #, c-format -msgid "domain \"%s\" has multiple constraints named \"%s\"" -msgstr "\"%s\" 도메인에는 \"%s\" 이름의 제약 조건이 여러개 있습니다" +msgid "domain %s has multiple constraints named \"%s\"" +msgstr "%s 도메인에 \"%s\" 이름의 제약 조건이 여러개 있습니다" -#: catalog/pg_constraint.c:867 +#: catalog/pg_constraint.c:858 #, c-format -msgid "constraint \"%s\" for domain \"%s\" does not exist" -msgstr "\"%s\" 제약 조건은 \"%s\" 도메인에 없음" +msgid "constraint \"%s\" for domain %s does not exist" +msgstr "\"%s\" 제약 조건은 %s 도메인에 없음" #: catalog/pg_conversion.c:66 #, c-format @@ -5041,131 +4950,131 @@ msgstr "\"%s\" 이름의 변환규칙(conversion)이 이미 있음" msgid "default conversion for %s to %s already exists" msgstr "%s 코드에서 %s 코드로 변환하는 기본 변환규칙(conversion)은 이미 있음" -#: catalog/pg_depend.c:165 commands/extension.c:3029 +#: catalog/pg_depend.c:163 commands/extension.c:3218 #, c-format msgid "%s is already a member of extension \"%s\"" msgstr "%s 객체는 \"%s\" 확장모듈에 이미 구성원입니다" -#: catalog/pg_depend.c:324 +#: catalog/pg_depend.c:322 #, c-format msgid "cannot remove dependency on %s because it is a system object" msgstr "%s 의존객체들은 시스템 객체이기 때문에 삭제 될 수 없습니다" -#: catalog/pg_enum.c:115 catalog/pg_enum.c:202 +#: catalog/pg_enum.c:115 catalog/pg_enum.c:201 catalog/pg_enum.c:488 #, c-format msgid "invalid enum label \"%s\"" msgstr "\"%s\" 열거형 라벨이 잘못됨" -#: catalog/pg_enum.c:116 catalog/pg_enum.c:203 +#: catalog/pg_enum.c:116 catalog/pg_enum.c:202 catalog/pg_enum.c:489 #, c-format msgid "Labels must be %d characters or less." msgstr "라벨은 %d자 이하여야 합니다." -#: catalog/pg_enum.c:231 +#: catalog/pg_enum.c:230 #, c-format msgid "enum label \"%s\" already exists, skipping" msgstr "\"%s\" 이름의 열거형 라벨이 이미 있음, 건너뜀" -#: catalog/pg_enum.c:238 +#: catalog/pg_enum.c:237 catalog/pg_enum.c:532 #, c-format msgid "enum label \"%s\" already exists" msgstr "\"%s\" 이름의 열거형 라벨이 이미 있음" -#: catalog/pg_enum.c:293 +#: catalog/pg_enum.c:292 catalog/pg_enum.c:527 #, c-format msgid "\"%s\" is not an existing enum label" msgstr "\"%s\" 열거형 라벨이 없음" -#: catalog/pg_enum.c:349 +#: catalog/pg_enum.c:350 #, c-format msgid "pg_enum OID value not set when in binary upgrade mode" msgstr "이진 업그레이드 작업 때 pg_enum OID 값이 지정되지 않았습니다" -#: catalog/pg_enum.c:359 +#: catalog/pg_enum.c:360 #, c-format msgid "ALTER TYPE ADD BEFORE/AFTER is incompatible with binary upgrade" msgstr "" "ALTER TYPE ADD BEFORE/AFTER 구문은 이진 업그레이드 작업에서 호환하지 않습니다" -#: catalog/pg_namespace.c:61 commands/schemacmds.c:246 +#: catalog/pg_namespace.c:63 commands/schemacmds.c:264 #, c-format msgid "schema \"%s\" already exists" msgstr "\"%s\" 이름의 스키마(schema)가 이미 있음" -#: catalog/pg_operator.c:219 catalog/pg_operator.c:360 +#: catalog/pg_operator.c:219 catalog/pg_operator.c:358 #, c-format msgid "\"%s\" is not a valid operator name" msgstr "\"%s\" 타당한 연산자 이름이 아님" -#: catalog/pg_operator.c:369 +#: catalog/pg_operator.c:367 #, c-format msgid "only binary operators can have commutators" msgstr "_^_ 바이너리 연산자만이 commutator를 가질 수 있음" -#: catalog/pg_operator.c:373 commands/operatorcmds.c:485 +#: catalog/pg_operator.c:371 commands/operatorcmds.c:482 #, c-format msgid "only binary operators can have join selectivity" msgstr "_^_ 바이너리 연산자만이 join selectivity를 가질 수 있음" -#: catalog/pg_operator.c:377 +#: catalog/pg_operator.c:375 #, c-format msgid "only binary operators can merge join" msgstr "_^_ 바이너리 연산자만이 merge join할 수 있음" -#: catalog/pg_operator.c:381 +#: catalog/pg_operator.c:379 #, c-format msgid "only binary operators can hash" msgstr "_^_ 바이너리 연산자만이 해시할 수 있음" -#: catalog/pg_operator.c:392 +#: catalog/pg_operator.c:390 #, c-format msgid "only boolean operators can have negators" msgstr "부울 연산자만 부정어를 포함할 수 있음" -#: catalog/pg_operator.c:396 commands/operatorcmds.c:493 +#: catalog/pg_operator.c:394 commands/operatorcmds.c:490 #, c-format msgid "only boolean operators can have restriction selectivity" msgstr "부울 연산자만 제한 선택을 포함할 수 있음" -#: catalog/pg_operator.c:400 commands/operatorcmds.c:497 +#: catalog/pg_operator.c:398 commands/operatorcmds.c:494 #, c-format msgid "only boolean operators can have join selectivity" msgstr "부울 연산자만 조인 선택을 포함할 수 있음" -#: catalog/pg_operator.c:404 +#: catalog/pg_operator.c:402 #, c-format msgid "only boolean operators can merge join" msgstr "부울 연산자만 머지 조인을 지정할 수 있음" -#: catalog/pg_operator.c:408 +#: catalog/pg_operator.c:406 #, c-format msgid "only boolean operators can hash" msgstr "부울 연산자만 해시를 지정할 수 있음" -#: catalog/pg_operator.c:420 +#: catalog/pg_operator.c:418 #, c-format msgid "operator %s already exists" msgstr "%s 연산자가 이미 있음" -#: catalog/pg_operator.c:617 +#: catalog/pg_operator.c:612 #, c-format msgid "operator cannot be its own negator or sort operator" msgstr "연산자는 자신의 negator나 sort 연산자가 될 수 없습니다" -#: catalog/pg_proc.c:134 parser/parse_func.c:1947 parser/parse_func.c:1987 +#: catalog/pg_proc.c:131 parser/parse_func.c:2001 parser/parse_func.c:2041 #, c-format msgid "functions cannot have more than %d argument" msgid_plural "functions cannot have more than %d arguments" msgstr[0] "함수는 %d개 이상의 인자를 사용할 수 없음" -#: catalog/pg_proc.c:247 +#: catalog/pg_proc.c:244 #, c-format msgid "" "A function returning a polymorphic type must have at least one polymorphic " "argument." msgstr "다형 형식을 반환하는 함수에는 다형 인자가 하나 이상 있어야 합니다." -#: catalog/pg_proc.c:254 +#: catalog/pg_proc.c:251 #, c-format msgid "" "A function returning \"anyrange\" must have at least one \"anyrange\" " @@ -5174,88 +5083,139 @@ msgstr "" "\"anyrange\" 자료형을 반환하는 함수는 적어도 하나 이상의 인자가 \"anyrange\" " "자료형이어야합니다." -#: catalog/pg_proc.c:272 +#: catalog/pg_proc.c:269 #, c-format msgid "\"%s\" is already an attribute of type %s" msgstr "\"%s\"은(는) 이미 %s 형식의 속성임" -#: catalog/pg_proc.c:403 +#: catalog/pg_proc.c:400 #, c-format msgid "function \"%s\" already exists with same argument types" msgstr "이미 같은 인자 자료형을 사용하는 \"%s\" 함수가 있습니다" -#: catalog/pg_proc.c:417 catalog/pg_proc.c:440 +#: catalog/pg_proc.c:414 catalog/pg_proc.c:437 #, c-format msgid "cannot change return type of existing function" msgstr "이미 있는 함수의 리턴 자료형은 바꿀 수 없습니다" -#: catalog/pg_proc.c:418 catalog/pg_proc.c:442 catalog/pg_proc.c:485 -#: catalog/pg_proc.c:509 catalog/pg_proc.c:536 +#: catalog/pg_proc.c:415 catalog/pg_proc.c:439 catalog/pg_proc.c:482 +#: catalog/pg_proc.c:506 catalog/pg_proc.c:532 #, c-format msgid "Use DROP FUNCTION %s first." msgstr "먼저 DROP FUNCTION %s 명령으로 함수를 삭제 하세요" -#: catalog/pg_proc.c:441 +#: catalog/pg_proc.c:438 #, c-format msgid "Row type defined by OUT parameters is different." msgstr "OUT 매개 변수에 정의된 행 형식이 다릅니다." -#: catalog/pg_proc.c:483 +#: catalog/pg_proc.c:480 #, c-format msgid "cannot change name of input parameter \"%s\"" msgstr "\"%s\" 입력 매개 변수 이름을 바꿀 수 없음" -#: catalog/pg_proc.c:508 +#: catalog/pg_proc.c:505 #, c-format msgid "cannot remove parameter defaults from existing function" msgstr "기존 함수에서 매개 변수 기본 값을 제거할 수 없음" -#: catalog/pg_proc.c:535 +#: catalog/pg_proc.c:531 #, c-format msgid "cannot change data type of existing parameter default value" msgstr "기존 매개 변수 기본 값의 데이터 형식을 바꿀 수 없음" -#: catalog/pg_proc.c:548 +#: catalog/pg_proc.c:544 #, c-format msgid "function \"%s\" is an aggregate function" msgstr "\"%s\" 함수는 집계 함수임" -#: catalog/pg_proc.c:553 +#: catalog/pg_proc.c:549 #, c-format msgid "function \"%s\" is not an aggregate function" msgstr "\"%s\" 함수는 집계 함수가 아님" -#: catalog/pg_proc.c:561 +#: catalog/pg_proc.c:557 #, c-format msgid "function \"%s\" is a window function" msgstr "\"%s\" 함수는 윈도우 함수임" -#: catalog/pg_proc.c:566 +#: catalog/pg_proc.c:562 #, c-format msgid "function \"%s\" is not a window function" msgstr "\"%s\" 함수는 윈도우 함수가 아님" -#: catalog/pg_proc.c:774 +#: catalog/pg_proc.c:768 #, c-format msgid "there is no built-in function named \"%s\"" msgstr "\"%s\" 이름의 내장 함수가 없음" -#: catalog/pg_proc.c:872 +#: catalog/pg_proc.c:866 #, c-format msgid "SQL functions cannot return type %s" msgstr "SQL 함수는 %s 자료형을 리턴할 수 없음" -#: catalog/pg_proc.c:887 +#: catalog/pg_proc.c:881 #, c-format msgid "SQL functions cannot have arguments of type %s" msgstr "SQL 함수의 인자로 %s 자료형은 사용될 수 없습니다" -#: catalog/pg_proc.c:973 executor/functions.c:1431 +#: catalog/pg_proc.c:968 executor/functions.c:1429 #, c-format msgid "SQL function \"%s\"" msgstr "\"%s\" SQL 함수" -#: catalog/pg_shdepend.c:694 +#: catalog/pg_publication.c:57 commands/trigger.c:197 +#, c-format +msgid "\"%s\" is a partitioned table" +msgstr "\"%s\" 객체는 파티션된 테이블임" + +#: catalog/pg_publication.c:59 +#, c-format +msgid "Adding partitioned tables to publications is not supported." +msgstr "파티션된 테이블을 발행하는 것은 지원하지 않습니다" + +#: catalog/pg_publication.c:60 +#, c-format +msgid "You can add the table partitions individually." +msgstr "파티션 테이블을 각각 발행에 추가할 수는 있습니다." + +#: catalog/pg_publication.c:68 +#, c-format +msgid "Only tables can be added to publications." +msgstr "테이블 객체만 발행에 추가할 수 있습니다." + +#: catalog/pg_publication.c:74 +#, c-format +msgid "\"%s\" is a system table" +msgstr "\"%s\" 객체는 시스템 테이블입니다." + +#: catalog/pg_publication.c:76 +#, c-format +msgid "System tables cannot be added to publications." +msgstr "시스템 테이블은 발행에 추가할 수 없습니다." + +#: catalog/pg_publication.c:82 +#, c-format +msgid "table \"%s\" cannot be replicated" +msgstr "\"%s\" 테이블은 복제될 수 없음" + +#: catalog/pg_publication.c:84 +#, c-format +msgid "Temporary and unlogged relations cannot be replicated." +msgstr "임시 테이블, unlogged 테이블은 복제될 수 없음" + +#: catalog/pg_publication.c:166 +#, c-format +msgid "relation \"%s\" is already member of publication \"%s\"" +msgstr "\"%s\" 릴레이션은 이미 \"%s\" 발행에 포함되어 있습니다" + +#: catalog/pg_publication.c:393 catalog/pg_publication.c:414 +#: commands/publicationcmds.c:401 commands/publicationcmds.c:702 +#, c-format +msgid "publication \"%s\" does not exist" +msgstr "\"%s\" 이름의 발행은 없습니다" + +#: catalog/pg_shdepend.c:692 #, c-format msgid "" "\n" @@ -5265,44 +5225,44 @@ msgid_plural "" "and objects in %d other databases (see server log for list)" msgstr[0] "" -#: catalog/pg_shdepend.c:1006 +#: catalog/pg_shdepend.c:998 #, c-format msgid "role %u was concurrently dropped" msgstr "%u 롤이 동시에 삭제되었음" -#: catalog/pg_shdepend.c:1025 +#: catalog/pg_shdepend.c:1017 #, c-format msgid "tablespace %u was concurrently dropped" msgstr "%u 테이블스페이스는 현재 삭제되었습니다" -#: catalog/pg_shdepend.c:1040 +#: catalog/pg_shdepend.c:1032 #, c-format msgid "database %u was concurrently dropped" msgstr "%u 데이터베이스는 현재 삭제되었습니다" -#: catalog/pg_shdepend.c:1085 +#: catalog/pg_shdepend.c:1077 #, c-format msgid "owner of %s" msgstr "%s 객체의 소유주" -#: catalog/pg_shdepend.c:1087 +#: catalog/pg_shdepend.c:1079 #, c-format msgid "privileges for %s" msgstr "\"%s\"에 대한 권한" -#: catalog/pg_shdepend.c:1089 +#: catalog/pg_shdepend.c:1081 #, c-format msgid "target of %s" msgstr "%s 객체 대상" #. translator: %s will always be "database %s" -#: catalog/pg_shdepend.c:1097 +#: catalog/pg_shdepend.c:1089 #, c-format msgid "%d object in %s" msgid_plural "%d objects in %s" msgstr[0] "%d 객체(데이터베이스: %s)" -#: catalog/pg_shdepend.c:1208 +#: catalog/pg_shdepend.c:1200 #, c-format msgid "" "cannot drop objects owned by %s because they are required by the database " @@ -5311,7 +5271,7 @@ msgstr "" "%s 소유주의 객체 삭제는 그 데이터베이스 시스템에서 필요하기 때문에 삭제 될 " "수 없음" -#: catalog/pg_shdepend.c:1323 +#: catalog/pg_shdepend.c:1315 #, c-format msgid "" "cannot reassign ownership of objects owned by %s because they are required " @@ -5320,44 +5280,50 @@ msgstr "" "%s 소유주의 객체 삭제는 그 데이터베이스 시스템에서 필요하기 때문에 삭제 될 " "수 없음" -#: catalog/pg_type.c:136 catalog/pg_type.c:454 +#: catalog/pg_subscription.c:176 commands/subscriptioncmds.c:633 +#: commands/subscriptioncmds.c:843 commands/subscriptioncmds.c:1067 +#, c-format +msgid "subscription \"%s\" does not exist" +msgstr "\"%s\" 이름의 구독은 없습니다." + +#: catalog/pg_type.c:136 catalog/pg_type.c:452 #, c-format msgid "pg_type OID value not set when in binary upgrade mode" msgstr "이진 업그레이드 작업 때 pg_type OID 값이 지정되지 않았습니다" -#: catalog/pg_type.c:253 +#: catalog/pg_type.c:251 #, c-format msgid "invalid type internal size %d" msgstr "잘못된 자료형의 내부 크기 %d" -#: catalog/pg_type.c:269 catalog/pg_type.c:277 catalog/pg_type.c:285 -#: catalog/pg_type.c:294 +#: catalog/pg_type.c:267 catalog/pg_type.c:275 catalog/pg_type.c:283 +#: catalog/pg_type.c:292 #, c-format msgid "alignment \"%c\" is invalid for passed-by-value type of size %d" msgstr "\"%c\" 정렬은 크기가 %d인 전달 값 형식에 유효하지 않음" -#: catalog/pg_type.c:301 +#: catalog/pg_type.c:299 #, c-format msgid "internal size %d is invalid for passed-by-value type" msgstr "내부 크기 %d은(는) 전달 값 형식에 유효하지 않음" -#: catalog/pg_type.c:310 catalog/pg_type.c:316 +#: catalog/pg_type.c:308 catalog/pg_type.c:314 #, c-format msgid "alignment \"%c\" is invalid for variable-length type" msgstr "\"%c\" 정렬은 가변 길이 형식에 유효하지 않음" -#: catalog/pg_type.c:324 +#: catalog/pg_type.c:322 #, c-format msgid "fixed-size types must have storage PLAIN" msgstr "_^_ 고정크기 자료형은 PLAIN 저장방법을 가져야만 합니다" -#: catalog/pg_type.c:789 +#: catalog/pg_type.c:801 #, c-format msgid "could not form array type name for type \"%s\"" msgstr "\"%s\" 형식의 배열 형식 이름을 생성할 수 없음" -#: catalog/toasting.c:105 commands/indexcmds.c:389 commands/tablecmds.c:4359 -#: commands/tablecmds.c:12047 +#: catalog/toasting.c:105 commands/indexcmds.c:399 commands/tablecmds.c:4734 +#: commands/tablecmds.c:12986 #, c-format msgid "\"%s\" is not a table or materialized view" msgstr "\"%s\" 객체는 테이블도 구체화된 뷰도 아닙니다" @@ -5367,145 +5333,160 @@ msgstr "\"%s\" 객체는 테이블도 구체화된 뷰도 아닙니다" msgid "shared tables cannot be toasted after initdb" msgstr "공유되는 테이블은 initdb 뒤에는 toast 될 수 없습니다" -#: commands/aggregatecmds.c:159 +#: commands/aggregatecmds.c:157 #, c-format msgid "only ordered-set aggregates can be hypothetical" msgstr "순서 있는 세트 집계함수만 가설적일 수 있습니다" -#: commands/aggregatecmds.c:184 +#: commands/aggregatecmds.c:182 #, c-format msgid "aggregate attribute \"%s\" not recognized" msgstr "\"%s\" 속성을 aggregate에서 알 수 없음" -#: commands/aggregatecmds.c:194 +#: commands/aggregatecmds.c:192 #, c-format msgid "aggregate stype must be specified" msgstr "aggregate stype 값을 지정하셔야합니다" -#: commands/aggregatecmds.c:198 +#: commands/aggregatecmds.c:196 #, c-format msgid "aggregate sfunc must be specified" msgstr "aggregate sfunc 값을 지정하셔야합니다" -#: commands/aggregatecmds.c:210 +#: commands/aggregatecmds.c:208 #, c-format msgid "aggregate msfunc must be specified when mstype is specified" msgstr "mstype 옵션을 사용하면 msfunc 옵션도 함께 지정 해야 함" -#: commands/aggregatecmds.c:214 +#: commands/aggregatecmds.c:212 #, c-format msgid "aggregate minvfunc must be specified when mstype is specified" msgstr "mstype 옵션을 사용하면 minvfunc 옵션도 함께 지정 해야 함" -#: commands/aggregatecmds.c:221 +#: commands/aggregatecmds.c:219 #, c-format msgid "aggregate msfunc must not be specified without mstype" msgstr "msfunc 옵션은 mstype 옵션과 함께 사용해야 함" -#: commands/aggregatecmds.c:225 +#: commands/aggregatecmds.c:223 #, c-format msgid "aggregate minvfunc must not be specified without mstype" msgstr "minvfunc 옵션은 mstype 옵션과 함께 사용해야 함" -#: commands/aggregatecmds.c:229 +#: commands/aggregatecmds.c:227 #, c-format msgid "aggregate mfinalfunc must not be specified without mstype" msgstr "mfinalfunc 옵션은 mstype 옵션과 함께 사용해야 함" -#: commands/aggregatecmds.c:233 +#: commands/aggregatecmds.c:231 #, c-format msgid "aggregate msspace must not be specified without mstype" msgstr "msspace 옵션은 mstype 옵션과 함께 사용해야 함" -#: commands/aggregatecmds.c:237 +#: commands/aggregatecmds.c:235 #, c-format msgid "aggregate minitcond must not be specified without mstype" msgstr "minitcond 옵션은 mstype 옵션과 함께 사용해야 함" -#: commands/aggregatecmds.c:257 +#: commands/aggregatecmds.c:255 #, c-format msgid "aggregate input type must be specified" msgstr "aggregate 입력 자료형을 지정해야 합니다" -#: commands/aggregatecmds.c:287 +#: commands/aggregatecmds.c:285 #, c-format msgid "basetype is redundant with aggregate input type specification" msgstr "집계 입력 형식 지정에서 basetype이 중복됨" -#: commands/aggregatecmds.c:328 commands/aggregatecmds.c:369 +#: commands/aggregatecmds.c:326 commands/aggregatecmds.c:367 #, c-format msgid "aggregate transition data type cannot be %s" msgstr "%s 자료형은 aggregate transition 자료형으로 사용할 수 없습니다" -#: commands/aggregatecmds.c:340 +#: commands/aggregatecmds.c:338 #, c-format msgid "" "serialization functions may be specified only when the aggregate transition " "data type is %s" msgstr "" -#: commands/aggregatecmds.c:350 +#: commands/aggregatecmds.c:348 #, c-format msgid "" "must specify both or neither of serialization and deserialization functions" msgstr "" -#: commands/aggregatecmds.c:415 commands/functioncmds.c:570 +#: commands/aggregatecmds.c:413 commands/functioncmds.c:564 #, c-format msgid "parameter \"parallel\" must be SAFE, RESTRICTED, or UNSAFE" msgstr "\"parallel\" 옵션 값은 SAFE, RESTRICTED, UNSAFE 만 지정할 수 있음" -#: commands/alter.c:80 commands/event_trigger.c:231 +#: commands/alter.c:84 commands/event_trigger.c:234 #, c-format msgid "event trigger \"%s\" already exists" msgstr "\"%s\" 이름의 이벤트 트리거가 이미 있음" -#: commands/alter.c:83 commands/foreigncmds.c:597 +#: commands/alter.c:87 commands/foreigncmds.c:595 #, c-format msgid "foreign-data wrapper \"%s\" already exists" msgstr "\"%s\" 이름의 외부 자료 래퍼가 이미 있음" -#: commands/alter.c:86 commands/foreigncmds.c:890 +#: commands/alter.c:90 commands/foreigncmds.c:898 #, c-format msgid "server \"%s\" already exists" msgstr "\"%s\" 이름의 서버가 이미 있음" -#: commands/alter.c:89 commands/proclang.c:366 +#: commands/alter.c:93 commands/proclang.c:367 #, c-format msgid "language \"%s\" already exists" msgstr "\"%s\" 이름의 프로시주얼 언어가 이미 있습니다" -#: commands/alter.c:112 +#: commands/alter.c:96 commands/publicationcmds.c:170 +#, c-format +msgid "publication \"%s\" already exists" +msgstr "\"%s\" 이름의 발행이 이미 있습니다" + +#: commands/alter.c:99 commands/subscriptioncmds.c:358 +#, c-format +msgid "subscription \"%s\" already exists" +msgstr "\"%s\" 이름의 구독이 이미 있습니다" + +#: commands/alter.c:122 #, c-format msgid "conversion \"%s\" already exists in schema \"%s\"" msgstr "\"%s\" 이름의 변환규칙(conversin)이 \"%s\" 스키마에 이미 있습니다" -#: commands/alter.c:116 +#: commands/alter.c:126 +#, c-format +msgid "statistics object \"%s\" already exists in schema \"%s\"" +msgstr "\"%s\" 이름의 통계정보 객체는 \"%s\" 스키마에 이미 있습니다" + +#: commands/alter.c:130 #, c-format msgid "text search parser \"%s\" already exists in schema \"%s\"" msgstr "\"%s\" 전문 검색 파서가 \"%s\" 스키마 안에 이미 있음" -#: commands/alter.c:120 +#: commands/alter.c:134 #, c-format msgid "text search dictionary \"%s\" already exists in schema \"%s\"" msgstr "\"%s\" 전문 검색 사전이 \"%s\" 스키마 안에 이미 있음" -#: commands/alter.c:124 +#: commands/alter.c:138 #, c-format msgid "text search template \"%s\" already exists in schema \"%s\"" msgstr "\"%s\" 전문 검색 템플릿이 \"%s\" 스키마 안에 이미 있음" -#: commands/alter.c:128 +#: commands/alter.c:142 #, c-format msgid "text search configuration \"%s\" already exists in schema \"%s\"" msgstr "\"%s\" 전문 검색 구성이 \"%s\" 스키마 안에 이미 있음" -#: commands/alter.c:202 +#: commands/alter.c:216 #, c-format msgid "must be superuser to rename %s" msgstr "%s 이름 변경 작업은 슈퍼유저만 할 수 있음" -#: commands/alter.c:655 +#: commands/alter.c:709 #, c-format msgid "must be superuser to set schema of %s" msgstr "%s의 스키마 지정은 슈퍼유져여야합니다" @@ -5525,77 +5506,77 @@ msgstr "슈퍼유저만 접근 방법을 만들 수 있습니다." msgid "access method \"%s\" already exists" msgstr "\"%s\" 이름의 인덱스 접근 방법이 이미 있습니다." -#: commands/amcmds.c:124 +#: commands/amcmds.c:123 #, c-format msgid "must be superuser to drop access methods" msgstr "접근 방법은 슈퍼유저만 삭제할 수 있습니다." -#: commands/amcmds.c:175 commands/indexcmds.c:164 commands/indexcmds.c:496 -#: commands/opclasscmds.c:365 commands/opclasscmds.c:790 +#: commands/amcmds.c:174 commands/indexcmds.c:163 commands/indexcmds.c:515 +#: commands/opclasscmds.c:363 commands/opclasscmds.c:777 #, c-format msgid "access method \"%s\" does not exist" msgstr "\"%s\" 인덱스 접근 방법이 없습니다" -#: commands/amcmds.c:251 +#: commands/amcmds.c:250 #, c-format msgid "handler function is not specified" msgstr "핸들러 함수 부분이 빠졌습니다" -#: commands/amcmds.c:263 commands/event_trigger.c:240 -#: commands/foreigncmds.c:489 commands/proclang.c:117 commands/proclang.c:288 -#: commands/trigger.c:441 parser/parse_clause.c:761 +#: commands/amcmds.c:262 commands/event_trigger.c:243 +#: commands/foreigncmds.c:487 commands/proclang.c:117 commands/proclang.c:289 +#: commands/trigger.c:616 parser/parse_clause.c:1011 #, c-format msgid "function %s must return type %s" msgstr "%s 함수는 %s 자료형을 반환해야 함" -#: commands/analyze.c:145 +#: commands/analyze.c:151 #, c-format msgid "skipping analyze of \"%s\" --- lock not available" msgstr "\"%s\" 분석 건너뜀 --- 잠글 수 없음" -#: commands/analyze.c:162 +#: commands/analyze.c:168 #, c-format msgid "skipping \"%s\" --- only superuser can analyze it" msgstr "\"%s\" 분석 건너뜀 --- 슈퍼유저만 분석할 수 있음" -#: commands/analyze.c:166 +#: commands/analyze.c:172 #, c-format msgid "skipping \"%s\" --- only superuser or database owner can analyze it" msgstr "" "\"%s\" 분석 건너뜀 --- 슈퍼유저 또는 데이터베이스 소유주만 분석할 수 있음" -#: commands/analyze.c:170 +#: commands/analyze.c:176 #, c-format msgid "skipping \"%s\" --- only table or database owner can analyze it" msgstr "\"%s\" 건너뜀 --- 테이블이나 데이터베이스 소유주만이 분석할 수 있음" -#: commands/analyze.c:230 +#: commands/analyze.c:236 #, c-format msgid "skipping \"%s\" --- cannot analyze this foreign table" msgstr "\"%s\" 건너뜀 --- 외부 테이블은 분석할 수 없음" -#: commands/analyze.c:241 +#: commands/analyze.c:253 #, c-format msgid "skipping \"%s\" --- cannot analyze non-tables or special system tables" msgstr "" "\"%s\" 건너뜀 --- 테이블이 아니거나, 특수 시스템 테이블들은 분석할 수 없음" -#: commands/analyze.c:320 +#: commands/analyze.c:334 #, c-format msgid "analyzing \"%s.%s\" inheritance tree" msgstr "\"%s.%s\" 상속 관계 분석중" -#: commands/analyze.c:325 +#: commands/analyze.c:339 #, c-format msgid "analyzing \"%s.%s\"" msgstr "\"%s.%s\" 자료 통계 수집 중" -#: commands/analyze.c:650 +#: commands/analyze.c:668 #, c-format msgid "automatic analyze of table \"%s.%s.%s\" system usage: %s" msgstr "\"%s.%s.%s\" 테이블의 시스템 사용 자동 분석: %s" -#: commands/analyze.c:1204 +#: commands/analyze.c:1220 #, c-format msgid "" "\"%s\": scanned %d of %u pages, containing %.0f live rows and %.0f dead " @@ -5604,7 +5585,7 @@ msgstr "" "\"%s\": 탐색한 페이지: %d, 전체페이지: %u, 실자료: %.0f개, 쓰레기자료: %.0f" "개; 표본 추출 자료: %d개, 예상한 총 자료: %.0f개" -#: commands/analyze.c:1283 +#: commands/analyze.c:1300 #, c-format msgid "" "skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree " @@ -5613,7 +5594,7 @@ msgstr "" "\"%s.%s\" 상속 나무의 통계 수집 건너뜀 --- 이 상속 나무에는 하위 테이블이 없" "음" -#: commands/analyze.c:1372 +#: commands/analyze.c:1398 #, c-format msgid "" "skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree " @@ -5622,10 +5603,6 @@ msgstr "" "\"%s.%s\" 상속 나무의 통계 수집 건너뜀 --- 이 상속 나무에는 통계 수집할 하위 " "테이블이 없음" -#: commands/analyze.c:1420 commands/tablecmds.c:8079 executor/execQual.c:2927 -msgid "could not convert row type" -msgstr "로우 자료형을 변환 할 수 없음" - #: commands/async.c:555 #, c-format msgid "channel name cannot be empty" @@ -5683,7 +5660,8 @@ msgstr "다른 세션의 임시 테이블은 cluster 작업을 할 수 없습니 msgid "there is no previously clustered index for table \"%s\"" msgstr "\"%s\" 테이블을 위한 previously clustered 인덱스가 없음" -#: commands/cluster.c:173 commands/tablecmds.c:9383 commands/tablecmds.c:11143 +#: commands/cluster.c:173 commands/tablecmds.c:10198 +#: commands/tablecmds.c:12079 #, c-format msgid "index \"%s\" for table \"%s\" does not exist" msgstr "\"%s\" 인덱스는 \"%s\" 테이블에 없음" @@ -5698,7 +5676,7 @@ msgstr "공유된 카탈로그는 클러스터 작업을 할 수 없음" msgid "cannot vacuum temporary tables of other sessions" msgstr "다른 세션의 임시 테이블은 vacuum 작업을 할 수 없음" -#: commands/cluster.c:431 commands/tablecmds.c:11153 +#: commands/cluster.c:431 commands/tablecmds.c:12089 #, c-format msgid "\"%s\" is not an index for table \"%s\"" msgstr "\"%s\" 객체는 \"%s\" 테이블을 위한 인덱스가 아님" @@ -5732,12 +5710,12 @@ msgstr " \"%s.%s\" 클러스터링 중 (사용 인덱스: \"%s\")" msgid "clustering \"%s.%s\" using sequential scan and sort" msgstr "순차 탐색과 정렬을 이용해서 \"%s.%s\" 객체 클러스터링 중" -#: commands/cluster.c:929 commands/vacuumlazy.c:479 +#: commands/cluster.c:929 commands/vacuumlazy.c:490 #, c-format msgid "vacuuming \"%s.%s\"" msgstr "\"%s.%s\" 청소 중" -#: commands/cluster.c:1088 +#: commands/cluster.c:1084 #, c-format msgid "" "\"%s\": found %.0f removable, %.0f nonremovable row versions in %u pages" @@ -5745,7 +5723,7 @@ msgstr "" "\"%s\": 삭제가능한 %.0f개, 삭제불가능한 %.0f개의 행 버전을 %u 페이지에서 발견" "했음." -#: commands/cluster.c:1092 +#: commands/cluster.c:1088 #, c-format msgid "" "%.0f dead row versions cannot be removed yet.\n" @@ -5754,41 +5732,81 @@ msgstr "" "%.0f 개의 사용하지 않는 로우 버전을 아직 지우지 못했음.\n" "%s." -#: commands/collationcmds.c:80 +#: commands/collationcmds.c:101 #, c-format msgid "collation attribute \"%s\" not recognized" msgstr "\"%s\" 연산자 속성을 처리할 수 없음" -#: commands/collationcmds.c:125 +#: commands/collationcmds.c:143 +#, c-format +msgid "collation \"default\" cannot be copied" +msgstr "\"default\" 정렬규칙은 복사될 수 없음" + +#: commands/collationcmds.c:173 +#, c-format +msgid "unrecognized collation provider: %s" +msgstr "알 수 없는 정렬규칙 제공자 이름: %s" + +#: commands/collationcmds.c:182 #, c-format msgid "parameter \"lc_collate\" must be specified" msgstr "\"lc_collate\" 옵션을 지정해야 함" -#: commands/collationcmds.c:130 +#: commands/collationcmds.c:187 #, c-format msgid "parameter \"lc_ctype\" must be specified" msgstr "\"lc_ctype\" 옵션을 지정해야 함" -#: commands/collationcmds.c:166 +#: commands/collationcmds.c:246 #, c-format msgid "collation \"%s\" for encoding \"%s\" already exists in schema \"%s\"" msgstr "\"%s\" 정렬규칙(대상 인코딩: \"%s\")이 \"%s\" 스키마 안에 이미 있음" -#: commands/collationcmds.c:177 +#: commands/collationcmds.c:257 #, c-format msgid "collation \"%s\" already exists in schema \"%s\"" msgstr "\"%s\" 정렬규칙이 \"%s\" 스키마에 이미 있습니다" -#: commands/comment.c:62 commands/dbcommands.c:797 commands/dbcommands.c:962 -#: commands/dbcommands.c:1067 commands/dbcommands.c:1257 -#: commands/dbcommands.c:1477 commands/dbcommands.c:1594 -#: commands/dbcommands.c:2011 utils/init/postinit.c:841 -#: utils/init/postinit.c:943 utils/init/postinit.c:960 +#: commands/collationcmds.c:305 +#, c-format +msgid "changing version from %s to %s" +msgstr "%s에서 %s 버전으로 바꿉니다" + +#: commands/collationcmds.c:320 +#, c-format +msgid "version has not changed" +msgstr "버전이 바뀌지 않았습니다" + +#: commands/collationcmds.c:451 +#, c-format +msgid "could not convert locale name \"%s\" to language tag: %s" +msgstr "\"%s\" 로케일 이름을 언어 태그로 변환할 수 없음: %s" + +#: commands/collationcmds.c:512 +#, c-format +msgid "must be superuser to import system collations" +msgstr "시스템 정렬규칙을 가져오려면 슈퍼유저여야함" + +#: commands/collationcmds.c:535 commands/copy.c:1807 commands/copy.c:3109 +#, c-format +msgid "could not execute command \"%s\": %m" +msgstr "\"%s\" 명령을 실행할 수 없음: %m" + +#: commands/collationcmds.c:666 +#, c-format +msgid "no usable system locales were found" +msgstr "" + +#: commands/comment.c:61 commands/dbcommands.c:808 commands/dbcommands.c:996 +#: commands/dbcommands.c:1100 commands/dbcommands.c:1290 +#: commands/dbcommands.c:1513 commands/dbcommands.c:1627 +#: commands/dbcommands.c:2043 utils/init/postinit.c:846 +#: utils/init/postinit.c:951 utils/init/postinit.c:968 #, c-format msgid "database \"%s\" does not exist" msgstr "\"%s\" 데이터베이스 없음" -#: commands/comment.c:101 commands/seclabel.c:116 parser/parse_utilcmd.c:753 +#: commands/comment.c:101 commands/seclabel.c:117 parser/parse_utilcmd.c:932 #, c-format msgid "" "\"%s\" is not a table, view, materialized view, composite type, or foreign " @@ -5797,12 +5815,12 @@ msgstr "" "\"%s\" 객체는 테이블도, 뷰도, 구체화된 뷰도, 복합 자료형도, 외부 테이블도 아" "닙니다." -#: commands/constraint.c:60 utils/adt/ri_triggers.c:2715 +#: commands/constraint.c:60 utils/adt/ri_triggers.c:2712 #, c-format msgid "function \"%s\" was not called by trigger manager" msgstr "\"%s\" 함수가 트리거 관리자에서 호출되지 않았음" -#: commands/constraint.c:67 utils/adt/ri_triggers.c:2724 +#: commands/constraint.c:67 utils/adt/ri_triggers.c:2721 #, c-format msgid "function \"%s\" must be fired AFTER ROW" msgstr "AFTER ROW에서 \"%s\" 함수를 실행해야 함" @@ -5812,395 +5830,407 @@ msgstr "AFTER ROW에서 \"%s\" 함수를 실행해야 함" msgid "function \"%s\" must be fired for INSERT or UPDATE" msgstr "INSERT 또는 UPDATE에 대해 \"%s\" 함수를 실행해야 함" -#: commands/conversioncmds.c:67 +#: commands/conversioncmds.c:66 #, c-format msgid "source encoding \"%s\" does not exist" msgstr "\"%s\" 원본 인코딩 없음" -#: commands/conversioncmds.c:74 +#: commands/conversioncmds.c:73 #, c-format msgid "destination encoding \"%s\" does not exist" msgstr "\"%s\" 대상 인코딩 없음" -#: commands/conversioncmds.c:88 +#: commands/conversioncmds.c:87 #, c-format msgid "encoding conversion function %s must return type %s" msgstr "%s 인코딩 변환 함수는 %s 형을 반환해야 함" -#: commands/copy.c:362 commands/copy.c:374 commands/copy.c:408 -#: commands/copy.c:420 +#: commands/copy.c:373 commands/copy.c:407 #, c-format msgid "COPY BINARY is not supported to stdout or from stdin" msgstr "COPY BINARY 명령은 stdout, stdin 입출력을 지원하지 않습니다" -#: commands/copy.c:520 +#: commands/copy.c:507 #, c-format msgid "could not write to COPY program: %m" msgstr "COPY 프로그램으로 파일을 쓸 수 없습니다: %m" -#: commands/copy.c:525 +#: commands/copy.c:512 #, c-format msgid "could not write to COPY file: %m" msgstr "COPY 파일로로 파일을 쓸 수 없습니다: %m" -#: commands/copy.c:538 +#: commands/copy.c:525 #, c-format msgid "connection lost during COPY to stdout" msgstr "COPY 명령에서 stdout으로 자료를 내보내는 동안 연결이 끊겼습니다" -#: commands/copy.c:579 +#: commands/copy.c:569 #, c-format msgid "could not read from COPY file: %m" msgstr "COPY 명령에 사용할 파일을 읽을 수 없습니다: %m" -#: commands/copy.c:595 commands/copy.c:616 commands/copy.c:620 -#: tcop/postgres.c:341 tcop/postgres.c:377 tcop/postgres.c:404 +#: commands/copy.c:585 commands/copy.c:606 commands/copy.c:610 +#: tcop/postgres.c:335 tcop/postgres.c:371 tcop/postgres.c:398 #, c-format msgid "unexpected EOF on client connection with an open transaction" msgstr "열린 트랜잭션과 함께 클라이언트 연결에서 예상치 않은 EOF 발견됨" -#: commands/copy.c:633 +#: commands/copy.c:623 #, c-format msgid "COPY from stdin failed: %s" msgstr "COPY 명령에서 stdin으로 자료 가져오기 실패: %s" -#: commands/copy.c:649 +#: commands/copy.c:639 #, c-format msgid "unexpected message type 0x%02X during COPY from stdin" msgstr "" "COPY 명령으로 stdin으로 자료를 가져오는 동안 예상치 않은 메시지 타입 0x%02X " "발견됨" -#: commands/copy.c:806 +#: commands/copy.c:800 #, c-format msgid "must be superuser to COPY to or from an external program" msgstr "외부 프로그램을 이용하는 COPY 작업은 슈퍼유저만 허용합니다." -#: commands/copy.c:807 commands/copy.c:813 +#: commands/copy.c:801 commands/copy.c:807 #, c-format msgid "" "Anyone can COPY to stdout or from stdin. psql's \\copy command also works " "for anyone." msgstr "일반 사용자인데, 이 작업이 필요하면, psql의 \\copy 명령을 이용하세요" -#: commands/copy.c:812 +#: commands/copy.c:806 #, c-format msgid "must be superuser to COPY to or from a file" msgstr "" "COPY 명령으로 자료를 파일로 내보내거나 파일에서 가져오려면, superuser여야만 " "합니다" -#: commands/copy.c:879 +#: commands/copy.c:868 #, c-format msgid "COPY FROM not supported with row-level security" msgstr "로우 단위 보안 기능으로 COPY FROM 명령을 사용할 수 없음" -#: commands/copy.c:880 +#: commands/copy.c:869 #, c-format msgid "Use INSERT statements instead." msgstr "대신에 INSERT 구문을 사용하십시오." -#: commands/copy.c:1058 +#: commands/copy.c:1054 #, c-format msgid "COPY format \"%s\" not recognized" msgstr "\"%s\" COPY 양식은 지원하지 않음" -#: commands/copy.c:1129 commands/copy.c:1143 commands/copy.c:1157 -#: commands/copy.c:1177 +#: commands/copy.c:1134 commands/copy.c:1150 commands/copy.c:1165 +#: commands/copy.c:1187 #, c-format msgid "argument to option \"%s\" must be a list of column names" msgstr "\"%s\" 옵션에 대한 인자는 칼럼 이름 목록이어야 합니다." -#: commands/copy.c:1190 +#: commands/copy.c:1202 #, c-format msgid "argument to option \"%s\" must be a valid encoding name" msgstr "\"%s\" 옵션에 대한 인자는 인코딩 이름이어야 합니다." -#: commands/copy.c:1196 commands/dbcommands.c:232 commands/dbcommands.c:1427 +#: commands/copy.c:1209 commands/dbcommands.c:242 commands/dbcommands.c:1461 #, c-format msgid "option \"%s\" not recognized" msgstr "\"%s\" 옵션은 타당하지 않습니다." -#: commands/copy.c:1207 +#: commands/copy.c:1221 #, c-format msgid "cannot specify DELIMITER in BINARY mode" msgstr "BINARY 모드에서는 DELIMITER 값을 지정할 수 없음" -#: commands/copy.c:1212 +#: commands/copy.c:1226 #, c-format msgid "cannot specify NULL in BINARY mode" msgstr "BINARY 모드에서는 NULL 값을 지정할 수 없음" -#: commands/copy.c:1234 +#: commands/copy.c:1248 #, c-format msgid "COPY delimiter must be a single one-byte character" msgstr "COPY 구분자는 1바이트의 단일 문자여야 함" -#: commands/copy.c:1241 +#: commands/copy.c:1255 #, c-format msgid "COPY delimiter cannot be newline or carriage return" msgstr "COPY 명령에서 사용할 칼럼 구분자로 줄바꿈 문자들을 사용할 수 없습니다" -#: commands/copy.c:1247 +#: commands/copy.c:1261 #, c-format msgid "COPY null representation cannot use newline or carriage return" msgstr "COPY null 표현에서 줄바꿈 또는 캐리지 리턴을 사용할 수 없음" -#: commands/copy.c:1264 +#: commands/copy.c:1278 #, c-format msgid "COPY delimiter cannot be \"%s\"" msgstr "COPY 구분자는 \"%s\"일 수 없음" -#: commands/copy.c:1270 +#: commands/copy.c:1284 #, c-format msgid "COPY HEADER available only in CSV mode" msgstr "COPY HEADER는 CSV 모드에서만 사용할 수 있음" -#: commands/copy.c:1276 +#: commands/copy.c:1290 #, c-format msgid "COPY quote available only in CSV mode" msgstr "COPY 따옴표는 CSV 모드에서만 사용할 수 있음" -#: commands/copy.c:1281 +#: commands/copy.c:1295 #, c-format msgid "COPY quote must be a single one-byte character" msgstr "COPY 따옴표는 1바이트의 단일 문자여야 함" -#: commands/copy.c:1286 +#: commands/copy.c:1300 #, c-format msgid "COPY delimiter and quote must be different" msgstr "COPY 구분자 및 따옴표는 서로 달라야 함" -#: commands/copy.c:1292 +#: commands/copy.c:1306 #, c-format msgid "COPY escape available only in CSV mode" msgstr "COPY 이스케이프는 CSV 모드에서만 사용할 수 있음" -#: commands/copy.c:1297 +#: commands/copy.c:1311 #, c-format msgid "COPY escape must be a single one-byte character" msgstr "COPY 이스케이프는 1바이트의 단일 문자여야 함" -#: commands/copy.c:1303 +#: commands/copy.c:1317 #, c-format msgid "COPY force quote available only in CSV mode" msgstr "COPY force quote는 CSV 모드에서만 사용할 수 있음" -#: commands/copy.c:1307 +#: commands/copy.c:1321 #, c-format msgid "COPY force quote only available using COPY TO" msgstr "COPY force quote는 COPY TO에서만 사용할 수 있음" -#: commands/copy.c:1313 +#: commands/copy.c:1327 #, c-format msgid "COPY force not null available only in CSV mode" msgstr "COPY force not null은 CSV 모드에서만 사용할 수 있음" -#: commands/copy.c:1317 +#: commands/copy.c:1331 #, c-format msgid "COPY force not null only available using COPY FROM" msgstr "COPY force not null은 COPY FROM에서만 사용할 수 있음" -#: commands/copy.c:1323 +#: commands/copy.c:1337 #, c-format msgid "COPY force null available only in CSV mode" msgstr "COPY force null은 CSV 모드에서만 사용할 수 있음" -#: commands/copy.c:1328 +#: commands/copy.c:1342 #, c-format msgid "COPY force null only available using COPY FROM" msgstr "COPY force null은 COPY FROM에서만 사용할 수 있음" -#: commands/copy.c:1334 +#: commands/copy.c:1348 #, c-format msgid "COPY delimiter must not appear in the NULL specification" msgstr "COPY 구분자는 NULL 지정에 표시되지 않아야 함" -#: commands/copy.c:1341 +#: commands/copy.c:1355 #, c-format msgid "CSV quote character must not appear in the NULL specification" msgstr "CSV 따옴표는 NULL 지정에 표시되지 않아야 함" -#: commands/copy.c:1402 +#: commands/copy.c:1416 #, c-format msgid "table \"%s\" does not have OIDs" msgstr "" "\"%s\" 테이블은 without oids 속성으로 만들어졌기에 OID 값을 구할 수 없습니다" -#: commands/copy.c:1419 +#: commands/copy.c:1433 #, c-format msgid "COPY (query) WITH OIDS is not supported" msgstr "COPY (쿼리) WITH OIDS 지원하지 않음" -#: commands/copy.c:1439 +#: commands/copy.c:1454 #, c-format msgid "DO INSTEAD NOTHING rules are not supported for COPY" msgstr "DO INSTEAD NOTHING 룰(rule)은 COPY 구문에서 지원하지 않습니다." -#: commands/copy.c:1453 +#: commands/copy.c:1468 #, c-format msgid "conditional DO INSTEAD rules are not supported for COPY" msgstr "선택적 DO INSTEAD 룰은 COPY 구문에서 지원하지 않음" -#: commands/copy.c:1457 +#: commands/copy.c:1472 #, c-format msgid "DO ALSO rules are not supported for the COPY" msgstr "DO ALSO 룰(rule)은 COPY 구문에서 지원하지 않습니다." -#: commands/copy.c:1462 +#: commands/copy.c:1477 #, c-format msgid "multi-statement DO INSTEAD rules are not supported for COPY" msgstr "다중 구문 DO INSTEAD 룰은 COPY 구문에서 지원하지 않음" -#: commands/copy.c:1472 +#: commands/copy.c:1487 #, c-format msgid "COPY (SELECT INTO) is not supported" msgstr "COPY (SELECT INTO) 지원하지 않음" -#: commands/copy.c:1489 +#: commands/copy.c:1504 #, c-format msgid "COPY query must have a RETURNING clause" msgstr "COPY 쿼리는 RETURNING 절이 있어야 합니다" -#: commands/copy.c:1517 +#: commands/copy.c:1532 #, c-format msgid "relation referenced by COPY statement has changed" msgstr "COPY 문에 의해 참조된 릴레이션이 변경 되었음" -#: commands/copy.c:1575 +#: commands/copy.c:1590 #, c-format msgid "FORCE_QUOTE column \"%s\" not referenced by COPY" msgstr "\"%s\" FORCE_QUOTE 칼럼은 COPY에서 참조되지 않음" -#: commands/copy.c:1597 +#: commands/copy.c:1612 #, c-format msgid "FORCE_NOT_NULL column \"%s\" not referenced by COPY" msgstr "\"%s\" FORCE_NOT_NULL 칼럼은 COPY에서 참조되지 않음" -#: commands/copy.c:1619 +#: commands/copy.c:1634 #, c-format msgid "FORCE_NULL column \"%s\" not referenced by COPY" -msgstr "\"%s\" FORCE_NULL 열은 COPY에서 참조되지 않음" +msgstr "\"%s\" FORCE_NULL 칼럼은 COPY에서 참조되지 않음" -#: commands/copy.c:1684 +#: commands/copy.c:1699 #, c-format msgid "could not close pipe to external command: %m" msgstr "외부 명령으로 파이프를 닫을 수 없음: %m" -#: commands/copy.c:1688 +#: commands/copy.c:1703 #, c-format msgid "program \"%s\" failed" msgstr "\"%s\" 프로그램 실패" -#: commands/copy.c:1738 +#: commands/copy.c:1753 #, c-format msgid "cannot copy from view \"%s\"" msgstr "\"%s\" 이름의 객체는 뷰(view)입니다. 자료를 내보낼 수 없습니다" -#: commands/copy.c:1740 commands/copy.c:1746 commands/copy.c:1752 +#: commands/copy.c:1755 commands/copy.c:1761 commands/copy.c:1767 +#: commands/copy.c:1778 #, c-format msgid "Try the COPY (SELECT ...) TO variant." msgstr "COPY (SELECT ...) TO 변형을 시도하십시오." -#: commands/copy.c:1744 +#: commands/copy.c:1759 #, c-format msgid "cannot copy from materialized view \"%s\"" msgstr "\"%s\" 이름의 객체는 구체화된 뷰입니다. 자료를 내보낼 수 없습니다" -#: commands/copy.c:1750 +#: commands/copy.c:1765 #, c-format msgid "cannot copy from foreign table \"%s\"" msgstr "\"%s\" 이름의 객체는 외부 테이블입니다. 자료를 내보낼 수 없습니다" -#: commands/copy.c:1756 +#: commands/copy.c:1771 #, c-format msgid "cannot copy from sequence \"%s\"" msgstr "\"%s\" 이름의 객체는 시퀀스입니다. 자료를 내보낼 수 없습니다" -#: commands/copy.c:1761 +#: commands/copy.c:1776 +#, c-format +msgid "cannot copy from partitioned table \"%s\"" +msgstr "\"%s\" 파티션 된 테이블에서 복사할 수 없음" + +#: commands/copy.c:1782 #, c-format msgid "cannot copy from non-table relation \"%s\"" msgstr "" "\"%s\" 객체는 테이블이 아닌 릴레이션(relation)이기에 자료를 내보낼 수 없습니" "다" -#: commands/copy.c:1786 commands/copy.c:2822 -#, c-format -msgid "could not execute command \"%s\": %m" -msgstr "\"%s\" 명령을 실행할 수 없음: %m" - -#: commands/copy.c:1801 +#: commands/copy.c:1822 #, c-format msgid "relative path not allowed for COPY to file" msgstr "COPY 명령에 사용할 파일 이름으로 상대경로는 사용할 수 없습니다" -#: commands/copy.c:1809 +#: commands/copy.c:1834 #, c-format msgid "could not open file \"%s\" for writing: %m" msgstr "\"%s\" 파일 열기 실패: %m" -#: commands/copy.c:1821 commands/copy.c:2845 +#: commands/copy.c:1837 +#, c-format +msgid "" +"COPY TO instructs the PostgreSQL server process to write a file. You may " +"want a client-side facility such as psql's \\copy." +msgstr "" + +#: commands/copy.c:1850 commands/copy.c:3140 #, c-format msgid "\"%s\" is a directory" msgstr "\"%s\" 디렉터리임" -#: commands/copy.c:2144 +#: commands/copy.c:2173 #, c-format msgid "COPY %s, line %d, column %s" msgstr "%s 복사, %d번째 줄, %s 열" -#: commands/copy.c:2148 commands/copy.c:2195 +#: commands/copy.c:2177 commands/copy.c:2224 #, c-format msgid "COPY %s, line %d" msgstr "%s 복사, %d번째 줄" -#: commands/copy.c:2159 +#: commands/copy.c:2188 #, c-format msgid "COPY %s, line %d, column %s: \"%s\"" msgstr "%s 복사, %d번째 줄, %s 열: \"%s\"" -#: commands/copy.c:2167 +#: commands/copy.c:2196 #, c-format msgid "COPY %s, line %d, column %s: null input" msgstr "COPY %s, %d행, %s 열: null 입력" -#: commands/copy.c:2189 +#: commands/copy.c:2218 #, c-format msgid "COPY %s, line %d: \"%s\"" msgstr "%s 복사, %d번째 줄: \"%s\"" -#: commands/copy.c:2273 +#: commands/copy.c:2312 #, c-format msgid "cannot copy to view \"%s\"" msgstr "\"%s\" 뷰(view)에 복사할 수 없음" -#: commands/copy.c:2278 +#: commands/copy.c:2314 +#, c-format +msgid "To enable copying to a view, provide an INSTEAD OF INSERT trigger." +msgstr "뷰를 통해 자료를 입력하려면, INSTEAD OF INSERT 트리거를 사용하세요" + +#: commands/copy.c:2318 #, c-format msgid "cannot copy to materialized view \"%s\"" msgstr "\"%s\" 구체화된 뷰(view)에 복사할 수 없음" -#: commands/copy.c:2283 +#: commands/copy.c:2323 #, c-format msgid "cannot copy to foreign table \"%s\"" msgstr "\"%s\" 외부 테이블에 복사할 수 없음" -#: commands/copy.c:2288 +#: commands/copy.c:2328 #, c-format msgid "cannot copy to sequence \"%s\"" msgstr "\"%s\" 시퀀스에 복사할 수 없음" -#: commands/copy.c:2293 +#: commands/copy.c:2333 #, c-format msgid "cannot copy to non-table relation \"%s\"" msgstr "\"%s\" 객체는 테이블이 아닌 릴레이션(relation)이기에 복사할 수 없음" -#: commands/copy.c:2356 +#: commands/copy.c:2396 #, c-format msgid "cannot perform FREEZE because of prior transaction activity" msgstr "" "먼저 시작한 다른 트랜잭션이 아직 활성 상태여서 FREEZE 작업은 진행할 수 없음" -#: commands/copy.c:2362 +#: commands/copy.c:2402 #, c-format msgid "" "cannot perform FREEZE because the table was not created or truncated in the " @@ -6209,149 +6239,166 @@ msgstr "" "현재 하위 트랜잭션에서 만들어지거나 비워진 테이블이 아니기 때문에 FREEZE 작업" "을 할 수 없음" -#: commands/copy.c:2865 +#: commands/copy.c:2624 executor/nodeModifyTable.c:311 +#, c-format +msgid "cannot route inserted tuples to a foreign table" +msgstr "외부 테이블에 자료를 입력할 수 없음" + +#: commands/copy.c:3127 +#, c-format +msgid "" +"COPY FROM instructs the PostgreSQL server process to read a file. You may " +"want a client-side facility such as psql's \\copy." +msgstr "" +"COPY FROM 명령은 PostgreSQL 서버 프로세스가 한 파일을 읽어 처리합니다. 클라이" +"언트 쪽에 있는 파일을 읽어 처리 하려면, psql의 \\copy 내장 명령어를 사용하세" +"요." + +#: commands/copy.c:3160 #, c-format msgid "COPY file signature not recognized" msgstr "file signature 복사는 인식되지 않았음" -#: commands/copy.c:2870 +#: commands/copy.c:3165 #, c-format msgid "invalid COPY file header (missing flags)" msgstr "COPY 명령에서 잘못된 파일 헤더를 사용함(플래그 빠졌음)" -#: commands/copy.c:2876 +#: commands/copy.c:3171 #, c-format msgid "unrecognized critical flags in COPY file header" msgstr "_^_ 복사 파일 헤더안에 critical flags 값들을 인식할 수 없음" -#: commands/copy.c:2882 +#: commands/copy.c:3177 #, c-format msgid "invalid COPY file header (missing length)" msgstr "복사 파일 헤더에 length 값이 빠졌음" -#: commands/copy.c:2889 +#: commands/copy.c:3184 #, c-format msgid "invalid COPY file header (wrong length)" msgstr "복사 파일 헤더에 length 값이 잘못되었음" -#: commands/copy.c:3022 commands/copy.c:3729 commands/copy.c:3959 +#: commands/copy.c:3317 commands/copy.c:4024 commands/copy.c:4254 #, c-format msgid "extra data after last expected column" -msgstr "마지막 열을 초과해서 또 다른 데이터가 있음" +msgstr "마지막 칼럼을 초과해서 또 다른 데이터가 있음" -#: commands/copy.c:3032 +#: commands/copy.c:3327 #, c-format msgid "missing data for OID column" msgstr "OID 열에 자료가 없음" -#: commands/copy.c:3038 +#: commands/copy.c:3333 #, c-format msgid "null OID in COPY data" msgstr "복사 데이터에 null OID 값이 있음" -#: commands/copy.c:3048 commands/copy.c:3171 +#: commands/copy.c:3343 commands/copy.c:3466 #, c-format msgid "invalid OID in COPY data" msgstr "복사 데이터에 잘못된 OID 값이 있음" -#: commands/copy.c:3063 +#: commands/copy.c:3358 #, c-format msgid "missing data for column \"%s\"" -msgstr "\"%s\" 열의 자료가 빠졌음" +msgstr "\"%s\" 칼럼의 자료가 빠졌음" -#: commands/copy.c:3146 +#: commands/copy.c:3441 #, c-format msgid "received copy data after EOF marker" msgstr "EOF 표시 뒤에도 복사 데이터를 받았음" -#: commands/copy.c:3153 +#: commands/copy.c:3448 #, c-format msgid "row field count is %d, expected %d" msgstr "행(row) 필드 갯수가 %d 임, 예상값은 %d" -#: commands/copy.c:3493 commands/copy.c:3510 +#: commands/copy.c:3788 commands/copy.c:3805 #, c-format msgid "literal carriage return found in data" msgstr "데이터에 carriage return 값이 잘못되었음" -#: commands/copy.c:3494 commands/copy.c:3511 +#: commands/copy.c:3789 commands/copy.c:3806 #, c-format msgid "unquoted carriage return found in data" msgstr "데이터에 carriage return 값 표기가 잘못 되었음" -#: commands/copy.c:3496 commands/copy.c:3513 +#: commands/copy.c:3791 commands/copy.c:3808 #, c-format msgid "Use \"\\r\" to represent carriage return." msgstr "carriage return값으로 \"\\r\" 문자를 사용하세요" -#: commands/copy.c:3497 commands/copy.c:3514 +#: commands/copy.c:3792 commands/copy.c:3809 #, c-format msgid "Use quoted CSV field to represent carriage return." msgstr "" "carriage return 문자를 그대로 적용하려면, quoted CSV 필드를 사용하세요." -#: commands/copy.c:3526 +#: commands/copy.c:3821 #, c-format msgid "literal newline found in data" msgstr "데이터에 newline 값이 잘못되었음" -#: commands/copy.c:3527 +#: commands/copy.c:3822 #, c-format msgid "unquoted newline found in data" msgstr "데이터에 newline 값이 잘못 되었음" -#: commands/copy.c:3529 +#: commands/copy.c:3824 #, c-format msgid "Use \"\\n\" to represent newline." msgstr "newline 값으로 \"\\n\" 문자를 사용하세요" -#: commands/copy.c:3530 +#: commands/copy.c:3825 #, c-format msgid "Use quoted CSV field to represent newline." msgstr "newline 문자를 그대로 적용하려면, quoted CSV 필드를 사용하세요." -#: commands/copy.c:3576 commands/copy.c:3612 +#: commands/copy.c:3871 commands/copy.c:3907 #, c-format msgid "end-of-copy marker does not match previous newline style" msgstr "end-of-copy 마크는 이전 newline 모양가 틀립니다" -#: commands/copy.c:3585 commands/copy.c:3601 +#: commands/copy.c:3880 commands/copy.c:3896 #, c-format msgid "end-of-copy marker corrupt" msgstr "end-of-copy 마크가 잘못되었음" -#: commands/copy.c:4043 +#: commands/copy.c:4338 #, c-format msgid "unterminated CSV quoted field" msgstr "종료되지 않은 CSV 따옴표 필드" -#: commands/copy.c:4120 commands/copy.c:4139 +#: commands/copy.c:4415 commands/copy.c:4434 #, c-format msgid "unexpected EOF in COPY data" msgstr "복사 자료 안에 예상치 않은 EOF 발견" -#: commands/copy.c:4129 +#: commands/copy.c:4424 #, c-format msgid "invalid field size" msgstr "잘못된 필드 크기" -#: commands/copy.c:4152 +#: commands/copy.c:4447 #, c-format msgid "incorrect binary data format" msgstr "잘못된 바이너리 자료 포맷" -#: commands/copy.c:4463 commands/indexcmds.c:1054 commands/tablecmds.c:1464 -#: commands/tablecmds.c:2291 parser/parse_relation.c:3177 -#: parser/parse_relation.c:3197 utils/adt/tsvector_op.c:2559 +#: commands/copy.c:4758 commands/indexcmds.c:1070 commands/statscmds.c:183 +#: commands/tablecmds.c:1685 commands/tablecmds.c:2187 +#: commands/tablecmds.c:2613 parser/parse_relation.c:3282 +#: parser/parse_relation.c:3302 utils/adt/tsvector_op.c:2561 #, c-format msgid "column \"%s\" does not exist" -msgstr "\"%s\" 이름의 열이 없습니다" +msgstr "\"%s\" 이름의 칼럼은 없습니다" -#: commands/copy.c:4470 commands/tablecmds.c:1490 commands/trigger.c:651 -#: parser/parse_target.c:967 parser/parse_target.c:978 +#: commands/copy.c:4765 commands/tablecmds.c:1711 commands/tablecmds.c:2213 +#: commands/trigger.c:826 parser/parse_target.c:1018 +#: parser/parse_target.c:1029 #, c-format msgid "column \"%s\" specified more than once" -msgstr "\"%s\" 열을 하나 이상 지정했음" +msgstr "\"%s\" 칼럼을 하나 이상 지정했음" #: commands/createas.c:213 commands/createas.c:509 #, c-format @@ -6363,65 +6410,65 @@ msgstr "너무 많은 칼럼 이름을 지정했습니다." msgid "policies not yet implemented for this command" msgstr "이 명령을 위한 정책은 아직 구현되어 있지 않습니다" -#: commands/dbcommands.c:226 +#: commands/dbcommands.c:235 #, c-format msgid "LOCATION is not supported anymore" msgstr "LOCATION 예약어는 이제 더이상 지원하지 않습니다" -#: commands/dbcommands.c:227 +#: commands/dbcommands.c:236 #, c-format msgid "Consider using tablespaces instead." msgstr "대신에 테이블스페이스를 이용하세요." -#: commands/dbcommands.c:251 utils/adt/ascii.c:144 +#: commands/dbcommands.c:262 utils/adt/ascii.c:145 #, c-format msgid "%d is not a valid encoding code" msgstr "%d 값은 잘못된 인코딩 코드임" -#: commands/dbcommands.c:261 utils/adt/ascii.c:126 +#: commands/dbcommands.c:273 utils/adt/ascii.c:127 #, c-format msgid "%s is not a valid encoding name" msgstr "%s 이름은 잘못된 인코딩 이름임" -#: commands/dbcommands.c:279 commands/dbcommands.c:1458 commands/user.c:272 -#: commands/user.c:650 +#: commands/dbcommands.c:292 commands/dbcommands.c:1494 commands/user.c:276 +#: commands/user.c:664 #, c-format msgid "invalid connection limit: %d" msgstr "잘못된 연결 제한: %d" -#: commands/dbcommands.c:298 +#: commands/dbcommands.c:311 #, c-format msgid "permission denied to create database" msgstr "데이터베이스를 만들 권한이 없음" -#: commands/dbcommands.c:321 +#: commands/dbcommands.c:334 #, c-format msgid "template database \"%s\" does not exist" msgstr "\"%s\" 템플릿 데이터베이스 없음" -#: commands/dbcommands.c:333 +#: commands/dbcommands.c:346 #, c-format msgid "permission denied to copy database \"%s\"" msgstr "\"%s\" 데이터베이스를 복사할 권한이 없음" -#: commands/dbcommands.c:349 +#: commands/dbcommands.c:362 #, c-format msgid "invalid server encoding %d" msgstr "잘못된 서버 인코딩 %d" -#: commands/dbcommands.c:355 commands/dbcommands.c:360 +#: commands/dbcommands.c:368 commands/dbcommands.c:373 #, c-format msgid "invalid locale name: \"%s\"" msgstr "\"%s\" 로케일 이름이 잘못됨" -#: commands/dbcommands.c:380 +#: commands/dbcommands.c:393 #, c-format msgid "" "new encoding (%s) is incompatible with the encoding of the template database " "(%s)" msgstr "새 인코딩(%s)이 템플릿 데이터베이스의 인코딩(%s)과 호환되지 않음" -#: commands/dbcommands.c:383 +#: commands/dbcommands.c:396 #, c-format msgid "" "Use the same encoding as in the template database, or use template0 as " @@ -6430,7 +6477,7 @@ msgstr "" "템플릿 데이터베이스와 동일한 인코딩을 사용하거나 template0을 템플릿으로 사용" "하십시오." -#: commands/dbcommands.c:388 +#: commands/dbcommands.c:401 #, c-format msgid "" "new collation (%s) is incompatible with the collation of the template " @@ -6439,7 +6486,7 @@ msgstr "" "새 데이터 정렬 규칙 (%s)이 템플릿 데이터베이스의 데이터 정렬 규칙(%s)과 호환" "되지 않음" -#: commands/dbcommands.c:390 +#: commands/dbcommands.c:403 #, c-format msgid "" "Use the same collation as in the template database, or use template0 as " @@ -6448,14 +6495,14 @@ msgstr "" "템플릿 데이터베이스와 동일한 데이터 정렬 규칙을 사용하거나 template0을 템플릿" "으로 사용하십시오." -#: commands/dbcommands.c:395 +#: commands/dbcommands.c:408 #, c-format msgid "" "new LC_CTYPE (%s) is incompatible with the LC_CTYPE of the template database " "(%s)" msgstr "새 LC_CTYPE (%s)이 템플릿 데이터베이스의 LC_CTYPE (%s)과 호환되지 않음" -#: commands/dbcommands.c:397 +#: commands/dbcommands.c:410 #, c-format msgid "" "Use the same LC_CTYPE as in the template database, or use template0 as " @@ -6464,17 +6511,17 @@ msgstr "" "템플릿 데이터베이스와 동일한 LC_CTYPE을 사용하거나 template0을 템플릿으로 사" "용하십시오." -#: commands/dbcommands.c:419 commands/dbcommands.c:1113 +#: commands/dbcommands.c:432 commands/dbcommands.c:1146 #, c-format msgid "pg_global cannot be used as default tablespace" msgstr "pg_global을 기본 테이블스페이스로 사용할 수 없음" -#: commands/dbcommands.c:445 +#: commands/dbcommands.c:458 #, c-format msgid "cannot assign new default tablespace \"%s\"" msgstr "새 \"%s\" 테이블스페이스를 지정할 수 없습니다." -#: commands/dbcommands.c:447 +#: commands/dbcommands.c:460 #, c-format msgid "" "There is a conflict because database \"%s\" already has some tables in this " @@ -6483,85 +6530,96 @@ msgstr "" "\"%s\" 데이터베이스 소속 몇몇 테이블들이 이 테이블스페이스안에 있어서 충돌이 " "일어납니다." -#: commands/dbcommands.c:467 commands/dbcommands.c:982 +#: commands/dbcommands.c:480 commands/dbcommands.c:1016 #, c-format msgid "database \"%s\" already exists" msgstr "\"%s\" 이름의 데이터베이스는 이미 있음" -#: commands/dbcommands.c:481 +#: commands/dbcommands.c:494 #, c-format msgid "source database \"%s\" is being accessed by other users" msgstr "\"%s\" 원본 데이터베이스를 다른 사용자가 액세스하기 시작했습니다" -#: commands/dbcommands.c:726 commands/dbcommands.c:741 +#: commands/dbcommands.c:736 commands/dbcommands.c:751 #, c-format msgid "encoding \"%s\" does not match locale \"%s\"" msgstr "\"%s\" 인코딩은 \"%s\" 로케일과 일치하지 않음" -#: commands/dbcommands.c:729 +#: commands/dbcommands.c:739 #, c-format msgid "The chosen LC_CTYPE setting requires encoding \"%s\"." msgstr "선택한 LC_CTYPE 설정에는 \"%s\" 인코딩이 필요합니다." -#: commands/dbcommands.c:744 +#: commands/dbcommands.c:754 #, c-format msgid "The chosen LC_COLLATE setting requires encoding \"%s\"." msgstr "선택한 LC_COLLATE 설정에는 \"%s\" 인코딩이 필요합니다." -#: commands/dbcommands.c:804 +#: commands/dbcommands.c:815 #, c-format msgid "database \"%s\" does not exist, skipping" msgstr "\"%s\" 데이터베이스 없음, 건너 뜀" -#: commands/dbcommands.c:828 +#: commands/dbcommands.c:839 #, c-format msgid "cannot drop a template database" msgstr "템플릿 데이터베이스는 삭제할 수 없습니다" -#: commands/dbcommands.c:834 +#: commands/dbcommands.c:845 #, c-format msgid "cannot drop the currently open database" msgstr "현재 열려 있는 데이터베이스는 삭제할 수 없습니다" -#: commands/dbcommands.c:844 +#: commands/dbcommands.c:858 #, c-format -msgid "database \"%s\" is used by a logical replication slot" -msgstr "\"%s\" 데이터베이스가 논리 복제 슬롯에의해 사용되었음" +msgid "database \"%s\" is used by an active logical replication slot" +msgstr "\"%s\" 데이터베이스는 논리 복제 슬롯이 활성화 되어 있습니다" -#: commands/dbcommands.c:846 +#: commands/dbcommands.c:860 #, c-format -msgid "There is %d slot, %d of them active." -msgid_plural "There are %d slots, %d of them active." +msgid "There is %d active slot" +msgid_plural "There are %d active slots" msgstr[0] "" -#: commands/dbcommands.c:860 commands/dbcommands.c:1004 -#: commands/dbcommands.c:1135 +#: commands/dbcommands.c:874 commands/dbcommands.c:1038 +#: commands/dbcommands.c:1168 #, c-format msgid "database \"%s\" is being accessed by other users" msgstr "\"%s\" 데이터베이스를 다른 사용자가 액세스하기 시작했습니다" -#: commands/dbcommands.c:973 +#: commands/dbcommands.c:887 +#, c-format +msgid "database \"%s\" is being used by logical replication subscription" +msgstr "\"%s\" 데이터베이스가 논리 복제 구독으로 사용되었음" + +#: commands/dbcommands.c:889 +#, c-format +msgid "There is %d subscription." +msgid_plural "There are %d subscriptions." +msgstr[0] "" + +#: commands/dbcommands.c:1007 #, c-format msgid "permission denied to rename database" msgstr "데이터베이스 이름을 바꿀 권한이 없습니다" -#: commands/dbcommands.c:993 +#: commands/dbcommands.c:1027 #, c-format msgid "current database cannot be renamed" msgstr "현재 데이터베이스의 이름을 바꿀 수 없음" -#: commands/dbcommands.c:1091 +#: commands/dbcommands.c:1124 #, c-format msgid "cannot change the tablespace of the currently open database" msgstr "현재 열려 있는 데이터베이스의 테이블스페이스를 바꿀 수 없음" -#: commands/dbcommands.c:1194 +#: commands/dbcommands.c:1227 #, c-format msgid "some relations of database \"%s\" are already in tablespace \"%s\"" msgstr "" "\"%s\" 데이터베이스의 일부 릴레이션들이 \"%s\" 테이블스페이스에 이미 있음" -#: commands/dbcommands.c:1196 +#: commands/dbcommands.c:1229 #, c-format msgid "" "You must move them back to the database's default tablespace before using " @@ -6570,30 +6628,30 @@ msgstr "" "이 명령을 사용하기 전에 데이터베이스의 기본 테이블스페이스로 다시 이동해야 합" "니다." -#: commands/dbcommands.c:1325 commands/dbcommands.c:1868 -#: commands/dbcommands.c:2072 commands/dbcommands.c:2120 -#: commands/tablespace.c:606 +#: commands/dbcommands.c:1355 commands/dbcommands.c:1900 +#: commands/dbcommands.c:2104 commands/dbcommands.c:2159 +#: commands/tablespace.c:604 #, c-format msgid "some useless files may be left behind in old database directory \"%s\"" msgstr "" "불필요한 일부 파일이 이전 데이터베이스 디렉터리 \"%s\"에 남아 있을 수 있음" -#: commands/dbcommands.c:1440 +#: commands/dbcommands.c:1475 #, c-format msgid "option \"%s\" cannot be specified with other options" msgstr "\"%s\" 옵션은 다른 옵션들과 함께 사용할 수 없습니다." -#: commands/dbcommands.c:1494 +#: commands/dbcommands.c:1530 #, c-format msgid "cannot disallow connections for current database" msgstr "현재 데이터베이스 연결을 허용하지 않습니다." -#: commands/dbcommands.c:1634 +#: commands/dbcommands.c:1667 #, c-format msgid "permission denied to change owner of database" msgstr "데이터베이스 소유주를 바꿀 권한이 없습니다" -#: commands/dbcommands.c:1955 +#: commands/dbcommands.c:1987 #, c-format msgid "" "There are %d other session(s) and %d prepared transaction(s) using the " @@ -6601,20 +6659,20 @@ msgid "" msgstr "" "데이터베이스를 사용하는 %d개의 다른 세션과 %d개의 준비된 트랜잭션이 있습니다." -#: commands/dbcommands.c:1958 +#: commands/dbcommands.c:1990 #, c-format msgid "There is %d other session using the database." msgid_plural "There are %d other sessions using the database." msgstr[0] "데이터베이스를 사용하는 %d개의 다른 세션이 있습니다." -#: commands/dbcommands.c:1963 +#: commands/dbcommands.c:1995 #, c-format msgid "There is %d prepared transaction using the database." msgid_plural "There are %d prepared transactions using the database." msgstr[0] "데이터베이스를 사용하는 %d개의 준비된 트랜잭션이 있습니다." #: commands/define.c:54 commands/define.c:228 commands/define.c:260 -#: commands/define.c:288 +#: commands/define.c:288 commands/define.c:334 #, c-format msgid "%s requires a parameter" msgstr "%s 매개 변수를 필요로 함" @@ -6650,70 +6708,75 @@ msgstr "%s의 인자는 자료형 이름이어야합니다" msgid "invalid argument for %s: \"%s\"" msgstr "%s의 잘못된 인자: \"%s\"" -#: commands/dropcmds.c:112 commands/functioncmds.c:1203 -#: utils/adt/ruleutils.c:2080 +#: commands/dropcmds.c:104 commands/functioncmds.c:1201 +#: utils/adt/ruleutils.c:2453 #, c-format msgid "\"%s\" is an aggregate function" msgstr "\"%s\" 함수는 집계 함수입니다" -#: commands/dropcmds.c:114 +#: commands/dropcmds.c:106 #, c-format msgid "Use DROP AGGREGATE to drop aggregate functions." msgstr "집계 함수는 DROP AGGREGATE 명령으로 삭제할 수 있습니다" -#: commands/dropcmds.c:165 commands/sequence.c:424 commands/tablecmds.c:2378 -#: commands/tablecmds.c:2529 commands/tablecmds.c:2571 -#: commands/tablecmds.c:11524 tcop/utility.c:1119 +#: commands/dropcmds.c:157 commands/sequence.c:442 commands/tablecmds.c:2697 +#: commands/tablecmds.c:2848 commands/tablecmds.c:2891 +#: commands/tablecmds.c:12462 tcop/utility.c:1168 #, c-format msgid "relation \"%s\" does not exist, skipping" msgstr "\"%s\" 릴레이션 없음, 건너뜀" -#: commands/dropcmds.c:195 commands/dropcmds.c:296 commands/tablecmds.c:746 +#: commands/dropcmds.c:187 commands/dropcmds.c:286 commands/tablecmds.c:896 #, c-format msgid "schema \"%s\" does not exist, skipping" msgstr "\"%s\" 스키마(schema) 없음, 건너뜀" -#: commands/dropcmds.c:237 commands/dropcmds.c:276 commands/tablecmds.c:247 +#: commands/dropcmds.c:227 commands/dropcmds.c:266 commands/tablecmds.c:252 #, c-format msgid "type \"%s\" does not exist, skipping" msgstr "\"%s\" 자료형 없음, 건너뜀" -#: commands/dropcmds.c:266 +#: commands/dropcmds.c:256 #, c-format msgid "access method \"%s\" does not exist, skipping" msgstr "\"%s\" 인덱스 접근 방법 없음, 건너뜀" -#: commands/dropcmds.c:284 +#: commands/dropcmds.c:274 #, c-format msgid "collation \"%s\" does not exist, skipping" msgstr "\"%s\" 정렬규칙 없음, 건너뜀" -#: commands/dropcmds.c:291 +#: commands/dropcmds.c:281 #, c-format msgid "conversion \"%s\" does not exist, skipping" msgstr "\"%s\" 문자코드변환규칙(conversion) 없음, 건너뜀" -#: commands/dropcmds.c:302 +#: commands/dropcmds.c:292 +#, c-format +msgid "statistics object \"%s\" does not exist, skipping" +msgstr "\"%s\" 통계정보 객체 없음, 무시함" + +#: commands/dropcmds.c:299 #, c-format msgid "text search parser \"%s\" does not exist, skipping" msgstr "\"%s\" 전문 검색 파서가 없음, 건너뜀" -#: commands/dropcmds.c:309 +#: commands/dropcmds.c:306 #, c-format msgid "text search dictionary \"%s\" does not exist, skipping" msgstr "\"%s\" 전문 검색 사전이 없음, 건너뜀" -#: commands/dropcmds.c:316 +#: commands/dropcmds.c:313 #, c-format msgid "text search template \"%s\" does not exist, skipping" msgstr "\"%s\" 전문 검색 템플릿이 없음, 건너뜀" -#: commands/dropcmds.c:323 +#: commands/dropcmds.c:320 #, c-format msgid "text search configuration \"%s\" does not exist, skipping" msgstr "\"%s\" 전문 검색 구성이 없음, 건너뜀" -#: commands/dropcmds.c:328 +#: commands/dropcmds.c:325 #, c-format msgid "extension \"%s\" does not exist, skipping" msgstr "\"%s\" 확장 모듈 없음, 건너 뜀" @@ -6723,276 +6786,281 @@ msgstr "\"%s\" 확장 모듈 없음, 건너 뜀" msgid "function %s(%s) does not exist, skipping" msgstr "%s(%s) 함수가 없음, 건너뜀" -#: commands/dropcmds.c:344 +#: commands/dropcmds.c:348 #, c-format msgid "aggregate %s(%s) does not exist, skipping" msgstr "%s(%s) 집계 함수 없음, 건너뜀" -#: commands/dropcmds.c:353 +#: commands/dropcmds.c:361 #, c-format msgid "operator %s does not exist, skipping" msgstr "%s 연산자가 없음, 건너뜀" -#: commands/dropcmds.c:358 +#: commands/dropcmds.c:367 #, c-format msgid "language \"%s\" does not exist, skipping" msgstr "\"%s\" 프로시주얼 언어 없음, 건너뜀" -#: commands/dropcmds.c:367 +#: commands/dropcmds.c:376 #, c-format msgid "cast from type %s to type %s does not exist, skipping" msgstr "%s 형에서 %s 형으로 바꾸는 형변환 규칙(cast)이 없음, 건너뜀" -#: commands/dropcmds.c:376 +#: commands/dropcmds.c:385 #, c-format msgid "transform for type %s language \"%s\" does not exist, skipping" msgstr "%s 형변환자 (사용언어 \"%s\") 없음, 건너뜀" -#: commands/dropcmds.c:384 +#: commands/dropcmds.c:393 #, c-format msgid "trigger \"%s\" for relation \"%s\" does not exist, skipping" msgstr " \"%s\" 트리거가 \"%s\" 릴레이션에 지정된 것이 없음, 건너뜀" -#: commands/dropcmds.c:393 +#: commands/dropcmds.c:402 #, c-format msgid "policy \"%s\" for relation \"%s\" does not exist, skipping" msgstr " \"%s\" 정책이 \"%s\" 릴레이션에 지정된 것이 없음, 건너뜀" -#: commands/dropcmds.c:400 +#: commands/dropcmds.c:409 #, c-format msgid "event trigger \"%s\" does not exist, skipping" msgstr "\"%s\" 이벤트 트리거 없음, 건너뜀" -#: commands/dropcmds.c:406 +#: commands/dropcmds.c:415 #, c-format msgid "rule \"%s\" for relation \"%s\" does not exist, skipping" msgstr " \"%s\" 룰(rule)이 \"%s\" 릴레이션에 지정된 것이 없음, 건너뜀" -#: commands/dropcmds.c:413 +#: commands/dropcmds.c:422 #, c-format msgid "foreign-data wrapper \"%s\" does not exist, skipping" msgstr "\"%s\" 외부 자료 래퍼가 없음, 건너뜀" -#: commands/dropcmds.c:417 +#: commands/dropcmds.c:426 #, c-format msgid "server \"%s\" does not exist, skipping" msgstr "\"%s\" 서버가 없음, 건너뜀" -#: commands/dropcmds.c:426 +#: commands/dropcmds.c:435 #, c-format msgid "operator class \"%s\" does not exist for access method \"%s\", skipping" msgstr "" "\"%s\" 연산자 클래스는 \"%s\" 인덱스 접근 방법에서 사용할 수 없음, 건너뜀" -#: commands/dropcmds.c:438 +#: commands/dropcmds.c:447 #, c-format msgid "" "operator family \"%s\" does not exist for access method \"%s\", skipping" msgstr "\"%s\" 연산자 패밀리(\"%s\" 접근 방법)가 없음, 건너뜀" -#: commands/event_trigger.c:182 +#: commands/dropcmds.c:454 +#, c-format +msgid "publication \"%s\" does not exist, skipping" +msgstr "\"%s\" 발행 없음, 건너뜀" + +#: commands/event_trigger.c:185 #, c-format msgid "permission denied to create event trigger \"%s\"" msgstr "\"%s\" 이벤트 트리거를 만들 권한이 없음" -#: commands/event_trigger.c:184 +#: commands/event_trigger.c:187 #, c-format msgid "Must be superuser to create an event trigger." msgstr "슈퍼유저만 이벤트 트리거를 만들 수 있습니다." -#: commands/event_trigger.c:193 +#: commands/event_trigger.c:196 #, c-format msgid "unrecognized event name \"%s\"" msgstr "알 수 없는 이벤트 이름: \"%s\"" -#: commands/event_trigger.c:210 +#: commands/event_trigger.c:213 #, c-format msgid "unrecognized filter variable \"%s\"" msgstr "알 수 없는 필터 변수: \"%s\"" -#: commands/event_trigger.c:265 +#: commands/event_trigger.c:268 #, c-format msgid "filter value \"%s\" not recognized for filter variable \"%s\"" msgstr "\"%s\" 필터값은 \"%s\" 필터 변수으로 쓸 수 없음" #. translator: %s represents an SQL statement name -#: commands/event_trigger.c:271 commands/event_trigger.c:341 +#: commands/event_trigger.c:274 commands/event_trigger.c:344 #, c-format msgid "event triggers are not supported for %s" msgstr "%s 용 이벤트 트리거는 지원하지 않음" -#: commands/event_trigger.c:364 +#: commands/event_trigger.c:367 #, c-format msgid "filter variable \"%s\" specified more than once" msgstr "\"%s\" 필터 변수가 한 번 이상 사용되었습니다." -#: commands/event_trigger.c:512 commands/event_trigger.c:556 +#: commands/event_trigger.c:514 commands/event_trigger.c:557 #: commands/event_trigger.c:649 #, c-format msgid "event trigger \"%s\" does not exist" msgstr "\"%s\" 이벤트 트리거 없음" -#: commands/event_trigger.c:617 +#: commands/event_trigger.c:618 #, c-format msgid "permission denied to change owner of event trigger \"%s\"" msgstr "\"%s\" 이벤트 트리거 소유주를 변경할 권한이 없음" -#: commands/event_trigger.c:619 +#: commands/event_trigger.c:620 #, c-format msgid "The owner of an event trigger must be a superuser." msgstr "이벤트 트리거 소유주는 슈퍼유저여야 합니다." -#: commands/event_trigger.c:1438 +#: commands/event_trigger.c:1464 #, c-format msgid "%s can only be called in a sql_drop event trigger function" msgstr "%s 객체는 sql_drop 이벤트 트리거 함수 안에서만 호출 되어야 합니다." -#: commands/event_trigger.c:1558 commands/event_trigger.c:1579 +#: commands/event_trigger.c:1584 commands/event_trigger.c:1605 #, c-format msgid "%s can only be called in a table_rewrite event trigger function" msgstr "" "%s 객체는 table_rewrite 이벤트 트리거 함수 안에서만 호출 되어야 합니다." -#: commands/event_trigger.c:1989 +#: commands/event_trigger.c:2015 #, c-format msgid "%s can only be called in an event trigger function" msgstr "%s 객체는 이벤트 트리거 함수 안에서만 호출 되어야 합니다." -#: commands/explain.c:185 +#: commands/explain.c:194 #, c-format msgid "unrecognized value for EXPLAIN option \"%s\": \"%s\"" msgstr "\"%s\" EXPLAIN 옵션에서 쓸 수 없는 값: \"%s\"" -#: commands/explain.c:191 +#: commands/explain.c:201 #, c-format msgid "unrecognized EXPLAIN option \"%s\"" msgstr "잘못된 EXPLAIN 옵션: \"%s\"" -#: commands/explain.c:198 +#: commands/explain.c:209 #, c-format msgid "EXPLAIN option BUFFERS requires ANALYZE" msgstr "BUFFERS 옵션은 EXPLAIN ANALYZE에서만 쓸 수 있습니다." -#: commands/explain.c:207 +#: commands/explain.c:218 #, c-format msgid "EXPLAIN option TIMING requires ANALYZE" msgstr "TIMING 옵션은 EXPLAIN ANALYZE에서만 쓸 수 있습니다." -#: commands/extension.c:155 commands/extension.c:2719 +#: commands/extension.c:168 commands/extension.c:2907 #, c-format msgid "extension \"%s\" does not exist" msgstr "\"%s\" 이름의 확장 모듈이 없습니다" -#: commands/extension.c:254 commands/extension.c:263 commands/extension.c:275 -#: commands/extension.c:285 +#: commands/extension.c:267 commands/extension.c:276 commands/extension.c:288 +#: commands/extension.c:298 #, c-format msgid "invalid extension name: \"%s\"" msgstr "잘못된 확장 모듈 이름: \"%s\"" -#: commands/extension.c:255 +#: commands/extension.c:268 #, c-format msgid "Extension names must not be empty." msgstr "확장 모듈 이름을 지정하세요." -#: commands/extension.c:264 +#: commands/extension.c:277 #, c-format msgid "Extension names must not contain \"--\"." msgstr "확장 모듈 이름에 \"--\" 문자가 포함될 수 없습니다." -#: commands/extension.c:276 +#: commands/extension.c:289 #, c-format msgid "Extension names must not begin or end with \"-\"." msgstr "확장 모듈 이름의 시작과 끝에는 \"-\" 문자를 사용할 수 없습니다." -#: commands/extension.c:286 +#: commands/extension.c:299 #, c-format msgid "Extension names must not contain directory separator characters." msgstr "확장 모듈 이름에는 디렉터리 구분 문자를 사용할 수 없습니다." -#: commands/extension.c:301 commands/extension.c:310 commands/extension.c:319 -#: commands/extension.c:329 +#: commands/extension.c:314 commands/extension.c:323 commands/extension.c:332 +#: commands/extension.c:342 #, c-format msgid "invalid extension version name: \"%s\"" msgstr "잘못된 확장 모듈 버전 이름: \"%s\"" -#: commands/extension.c:302 +#: commands/extension.c:315 #, c-format msgid "Version names must not be empty." msgstr "버전 이름은 비어있으면 안됩니다" -#: commands/extension.c:311 +#: commands/extension.c:324 #, c-format msgid "Version names must not contain \"--\"." msgstr "버전 이름에 \"--\" 문자가 포함될 수 없습니다." -#: commands/extension.c:320 +#: commands/extension.c:333 #, c-format msgid "Version names must not begin or end with \"-\"." msgstr "버전 이름의 앞 뒤에 \"-\" 문자를 쓸 수 없습니다." -#: commands/extension.c:330 +#: commands/extension.c:343 #, c-format msgid "Version names must not contain directory separator characters." msgstr "버전 이름에는 디렉터리 분리 문자를 쓸 수 없습니다." -#: commands/extension.c:480 +#: commands/extension.c:493 #, c-format msgid "could not open extension control file \"%s\": %m" msgstr "\"%s\" 확장 모듈 제어 파일 열기 실패: %m" -#: commands/extension.c:502 commands/extension.c:512 +#: commands/extension.c:515 commands/extension.c:525 #, c-format msgid "parameter \"%s\" cannot be set in a secondary extension control file" msgstr "\"%s\" 매개 변수는 이차 확장 모듈 제어 파일에서는 사용할 수 없습니다." -#: commands/extension.c:551 +#: commands/extension.c:564 #, c-format msgid "\"%s\" is not a valid encoding name" msgstr "\"%s\" 이름은 잘못된 인코딩 이름임" -#: commands/extension.c:565 +#: commands/extension.c:578 #, c-format msgid "parameter \"%s\" must be a list of extension names" msgstr "\"%s\" 매개 변수는 확장 모듈 이름 목록이어야 함" -#: commands/extension.c:572 +#: commands/extension.c:585 #, c-format msgid "unrecognized parameter \"%s\" in file \"%s\"" msgstr "알 수 없는 \"%s\" 매개 변수가 \"%s\" 파일 안에 있습니다." -#: commands/extension.c:581 +#: commands/extension.c:594 #, c-format msgid "parameter \"schema\" cannot be specified when \"relocatable\" is true" msgstr "" "\"relocatable\" 값이 true 인 경우 \"schema\" 매개 변수는 사용할 수 없습니다." -#: commands/extension.c:722 +#: commands/extension.c:761 #, c-format msgid "" "transaction control statements are not allowed within an extension script" msgstr "확장 모듈 스크립트 안에서는 트랜잭션 제어 구문은 사용할 수 없습니다." -#: commands/extension.c:790 +#: commands/extension.c:807 #, c-format msgid "permission denied to create extension \"%s\"" msgstr "\"%s\" 확장 모듈을 만들 권한이 없습니다" -#: commands/extension.c:792 +#: commands/extension.c:809 #, c-format msgid "Must be superuser to create this extension." msgstr "확장 모듈은 슈퍼유저만 만들 수 있습니다." -#: commands/extension.c:796 +#: commands/extension.c:813 #, c-format msgid "permission denied to update extension \"%s\"" msgstr "\"%s\" 확장 모듈을 업데이트할 권한이 없습니다." -#: commands/extension.c:798 +#: commands/extension.c:815 #, c-format msgid "Must be superuser to update this extension." msgstr "슈퍼유저만 해당 모듈을 업데이트 할 수 있습니다." -#: commands/extension.c:1080 +#: commands/extension.c:1097 #, c-format msgid "" "extension \"%s\" has no update path from version \"%s\" to version \"%s\"" @@ -7000,64 +7068,72 @@ msgstr "" "\"%s\" 확장 모듈을 \"%s\" 버전에서 \"%s\" 버전으로 업데이트할 방법이 없습니" "다." -#: commands/extension.c:1262 commands/extension.c:2779 +#: commands/extension.c:1304 commands/extension.c:2968 #, c-format msgid "version to install must be specified" msgstr "설치할 버전을 지정해야 합니다." -#: commands/extension.c:1279 +#: commands/extension.c:1326 #, c-format msgid "FROM version must be different from installation target version \"%s\"" msgstr "FROM 절에 지정한 버전은 설치된 \"%s\" 버전과 달라야 합니다" -#: commands/extension.c:1344 +#: commands/extension.c:1391 +#, c-format +msgid "" +"extension \"%s\" has no installation script nor update path for version \"%s" +"\"" +msgstr "" +"\"%s\" 확장 모듈에는 \"%s\" 버전용 설치나 업데이트 스크립트가 없습니다." + +#: commands/extension.c:1426 #, c-format msgid "extension \"%s\" must be installed in schema \"%s\"" msgstr "\"%s\" 확장 모듈은 \"%s\" 스키마 안에 설치되어야 합니다." -#: commands/extension.c:1436 +#: commands/extension.c:1579 #, c-format msgid "cyclic dependency detected between extensions \"%s\" and \"%s\"" msgstr "\"%s\" 확장 모듈과 \"%s\" 확장 모듈이 서로 의존 관계입니다" -#: commands/extension.c:1441 +#: commands/extension.c:1584 #, c-format msgid "installing required extension \"%s\"" msgstr "\"%s\" 확장 모듈이 필요해서 실치 하는 중" -#: commands/extension.c:1469 commands/extension.c:2924 +#: commands/extension.c:1608 #, c-format msgid "required extension \"%s\" is not installed" msgstr "\"%s\" 확장 모듈이 필요한데, 설치되어 있지 않습니다." -#: commands/extension.c:1471 +#: commands/extension.c:1611 #, c-format msgid "Use CREATE EXTENSION ... CASCADE to install required extensions too." msgstr "" "필요한 모듈을 함께 설치하려면, CREATE EXTENSION ... CASCADE 구문을 사용하세" "요." -#: commands/extension.c:1535 +#: commands/extension.c:1648 #, c-format msgid "extension \"%s\" already exists, skipping" msgstr "\"%s\" 확장 모듈이 이미 있음, 건너뜀" -#: commands/extension.c:1542 +#: commands/extension.c:1655 #, c-format msgid "extension \"%s\" already exists" msgstr "\"%s\" 이름의 확장 모듈이 이미 있습니다" -#: commands/extension.c:1553 +#: commands/extension.c:1666 #, c-format msgid "nested CREATE EXTENSION is not supported" msgstr "중첩된 CREATE EXTENSION 구문은 지원하지 않습니다." -#: commands/extension.c:1681 +#: commands/extension.c:1847 #, c-format msgid "cannot drop extension \"%s\" because it is being modified" msgstr "%s 의존객체들은 시스템 객체이기 때문에 삭제 될 수 없습니다" -#: commands/extension.c:2152 +#: commands/extension.c:2349 #, c-format msgid "" "pg_extension_config_dump() can only be called from an SQL script executed by " @@ -7066,44 +7142,44 @@ msgstr "" "pg_extension_config_dump() 함수는 CREATE EXTENSION 명령에서 내부적으로 사용하" "는 SQL 스크립트 내에서만 사용할 수 있습니다." -#: commands/extension.c:2164 +#: commands/extension.c:2361 #, c-format msgid "OID %u does not refer to a table" msgstr "%u OID 자료가 테이블에 없습니다" -#: commands/extension.c:2169 +#: commands/extension.c:2366 #, c-format msgid "table \"%s\" is not a member of the extension being created" msgstr "\"%s\" 테이블은 만들려고 하는 확장 모듈의 구성 요소가 아닙니다." -#: commands/extension.c:2534 +#: commands/extension.c:2722 #, c-format msgid "" "cannot move extension \"%s\" into schema \"%s\" because the extension " "contains the schema" msgstr "\"%s\" 확장 모듈이 \"%s\" 스키마에 이미 있어 옮길 수 없습니다." -#: commands/extension.c:2574 commands/extension.c:2637 +#: commands/extension.c:2763 commands/extension.c:2826 #, c-format msgid "extension \"%s\" does not support SET SCHEMA" msgstr "\"%s\" 확장 모듈은 SET SCHEMA 구문을 지원하지 않음" -#: commands/extension.c:2639 +#: commands/extension.c:2828 #, c-format msgid "%s is not in the extension's schema \"%s\"" msgstr "%s 객체가 확장 모듈 스키마인 \"%s\" 안에 없음" -#: commands/extension.c:2699 +#: commands/extension.c:2887 #, c-format msgid "nested ALTER EXTENSION is not supported" msgstr "중첩된 ALTER EXTENSION 구문을 지원하지 않음" -#: commands/extension.c:2790 +#: commands/extension.c:2979 #, c-format msgid "version \"%s\" of extension \"%s\" is already installed" msgstr "\"%s\" 버전의 \"%s\" 확장 모듈이 이미 설치 되어 있음" -#: commands/extension.c:3041 +#: commands/extension.c:3230 #, c-format msgid "" "cannot add schema \"%s\" to extension \"%s\" because the schema contains the " @@ -7112,12 +7188,12 @@ msgstr "" "\"%s\" 스키마에 \"%s\" 확장 모듈을 추가할 수 없음, 이미 해당 스키마 안에 포" "함되어 있음" -#: commands/extension.c:3069 +#: commands/extension.c:3258 #, c-format msgid "%s is not a member of extension \"%s\"" msgstr "\"%s\" 객체는 \"%s\" 확장 모듈의 구성 요소가 아닙니다" -#: commands/extension.c:3135 +#: commands/extension.c:3324 #, c-format msgid "file \"%s\" is too large" msgstr "\"%s\" 파일이 너무 큽니다." @@ -7147,32 +7223,32 @@ msgstr "슈퍼유저만 외부 자료 래퍼의 소유주를 바꿀 수 있습 msgid "The owner of a foreign-data wrapper must be a superuser." msgstr "외부 자료 래퍼의 소유주는 슈퍼유저여야 합니다." -#: commands/foreigncmds.c:292 commands/foreigncmds.c:709 foreign/foreign.c:671 +#: commands/foreigncmds.c:291 commands/foreigncmds.c:706 foreign/foreign.c:667 #, c-format msgid "foreign-data wrapper \"%s\" does not exist" msgstr "\"%s\" 외부 자료 래퍼가 없음" -#: commands/foreigncmds.c:584 +#: commands/foreigncmds.c:582 #, c-format msgid "permission denied to create foreign-data wrapper \"%s\"" msgstr "\"%s\" 외부 자료 래퍼를 만들 권한이 없음" -#: commands/foreigncmds.c:586 +#: commands/foreigncmds.c:584 #, c-format msgid "Must be superuser to create a foreign-data wrapper." msgstr "슈퍼유저만 외부 자료 래퍼를 만들 수 있습니다." -#: commands/foreigncmds.c:699 +#: commands/foreigncmds.c:696 #, c-format msgid "permission denied to alter foreign-data wrapper \"%s\"" msgstr "\"%s\" 외부 자료 래퍼를 변경할 권한이 없음" -#: commands/foreigncmds.c:701 +#: commands/foreigncmds.c:698 #, c-format msgid "Must be superuser to alter a foreign-data wrapper." msgstr "슈퍼유저만 외부 자료 래퍼를 변경할 수 있습니다." -#: commands/foreigncmds.c:732 +#: commands/foreigncmds.c:729 #, c-format msgid "" "changing the foreign-data wrapper handler can change behavior of existing " @@ -7181,7 +7257,7 @@ msgstr "" "외부 자료 랩퍼 핸들러를 바꾸면, 그것을 사용하는 외부 테이블의 내용이 바뀔 수 " "있습니다." -#: commands/foreigncmds.c:747 +#: commands/foreigncmds.c:744 #, c-format msgid "" "changing the foreign-data wrapper validator can cause the options for " @@ -7190,37 +7266,47 @@ msgstr "" "외부 자료 래퍼 유효성 검사기를 바꾸면 종속 객체에 대한 옵션이 유효하지 않을 " "수 있음" -#: commands/foreigncmds.c:1165 +#: commands/foreigncmds.c:890 +#, c-format +msgid "server \"%s\" already exists, skipping" +msgstr "\"%s\" 이름의 외부 서버가 이미 있음, 건너뜀" + +#: commands/foreigncmds.c:1175 #, c-format -msgid "user mapping \"%s\" already exists for server %s" +msgid "user mapping for \"%s\" already exists for server %s, skipping" +msgstr "\"%s\" 사용자 매핑이 %s 서버용으로 이미 있음, 건너뜀" + +#: commands/foreigncmds.c:1185 +#, c-format +msgid "user mapping for \"%s\" already exists for server %s" msgstr "\"%s\" 사용자 매핑이 %s 서버용으로 이미 있음" -#: commands/foreigncmds.c:1259 commands/foreigncmds.c:1375 +#: commands/foreigncmds.c:1278 commands/foreigncmds.c:1393 #, c-format -msgid "user mapping \"%s\" does not exist for the server" +msgid "user mapping for \"%s\" does not exist for the server" msgstr "해당 서버용 \"%s\" 사용자 매핑이 없음" -#: commands/foreigncmds.c:1362 +#: commands/foreigncmds.c:1380 #, c-format msgid "server does not exist, skipping" msgstr "서버가 없음, 건너뜀" -#: commands/foreigncmds.c:1380 +#: commands/foreigncmds.c:1398 #, c-format -msgid "user mapping \"%s\" does not exist for the server, skipping" +msgid "user mapping for \"%s\" does not exist for the server, skipping" msgstr "\"%s\" 사용자 매핑이 해당 서버용으로 없음, 건너뜀" -#: commands/foreigncmds.c:1532 foreign/foreign.c:361 +#: commands/foreigncmds.c:1549 foreign/foreign.c:357 #, c-format msgid "foreign-data wrapper \"%s\" has no handler" msgstr "\"%s\" 외부 자료 래퍼용 핸들러가 없음" -#: commands/foreigncmds.c:1538 +#: commands/foreigncmds.c:1555 #, c-format msgid "foreign-data wrapper \"%s\" does not support IMPORT FOREIGN SCHEMA" msgstr "\"%s\" 외부 자료 래퍼는 IMPORT FOREIGN SCHEMA 구문을 지원하지 않음" -#: commands/foreigncmds.c:1631 +#: commands/foreigncmds.c:1658 #, c-format msgid "importing foreign table \"%s\"" msgstr "\"%s\" 외부 테이블 가져 오는 중" @@ -7250,89 +7336,89 @@ msgstr "\"%s\" 자료형이 아직 정의되지 않았음" msgid "Creating a shell type definition." msgstr "셸 타입 정의를 만들고 있습니다" -#: commands/functioncmds.c:239 +#: commands/functioncmds.c:233 #, c-format msgid "SQL function cannot accept shell type %s" msgstr "SQL 함수는 셸 타입 %s 수용할 수 없음" -#: commands/functioncmds.c:245 +#: commands/functioncmds.c:239 #, c-format msgid "aggregate cannot accept shell type %s" msgstr "집계 함수는 셸 타입 %s 수용할 수 없음" -#: commands/functioncmds.c:250 +#: commands/functioncmds.c:244 #, c-format msgid "argument type %s is only a shell" msgstr "%s 인자 자료형은 단지 셸입니다" -#: commands/functioncmds.c:260 +#: commands/functioncmds.c:254 #, c-format msgid "type %s does not exist" msgstr "%s 자료형 없음" -#: commands/functioncmds.c:274 +#: commands/functioncmds.c:268 #, c-format msgid "aggregates cannot accept set arguments" msgstr "집계 함수는 세트 인자를 입력 인자로 쓸 수 없음" -#: commands/functioncmds.c:278 +#: commands/functioncmds.c:272 #, c-format msgid "functions cannot accept set arguments" msgstr "함수는 세트 인자를 입력 인자로 쓸 수 없음" -#: commands/functioncmds.c:288 +#: commands/functioncmds.c:282 #, c-format msgid "VARIADIC parameter must be the last input parameter" msgstr "VARIADIC 매개 변수는 마지막 입력 매개 변수여야 함" -#: commands/functioncmds.c:316 +#: commands/functioncmds.c:310 #, c-format msgid "VARIADIC parameter must be an array" msgstr "VARIADIC 매개 변수는 배열이어야 함" -#: commands/functioncmds.c:356 +#: commands/functioncmds.c:350 #, c-format msgid "parameter name \"%s\" used more than once" msgstr "\"%s\" 매개 변수가 여러 번 사용 됨" -#: commands/functioncmds.c:371 +#: commands/functioncmds.c:365 #, c-format msgid "only input parameters can have default values" msgstr "입력 매개 변수에서만 기본값을 사용할 수 있음" -#: commands/functioncmds.c:386 +#: commands/functioncmds.c:380 #, c-format msgid "cannot use table references in parameter default value" msgstr "입력 매개 변수 초기값으로 테이블 참조형은 사용할 수 없음" -#: commands/functioncmds.c:410 +#: commands/functioncmds.c:404 #, c-format msgid "input parameters after one with a default value must also have defaults" msgstr "" "기본 값이 있는 입력 매개 변수 뒤에 오는 입력 매개 변수에도 기본 값이 있어야 " "함" -#: commands/functioncmds.c:701 +#: commands/functioncmds.c:700 #, c-format msgid "no function body specified" msgstr "함수 본문(body) 부분이 빠졌습니다" -#: commands/functioncmds.c:711 +#: commands/functioncmds.c:710 #, c-format msgid "no language specified" msgstr "처리할 프로시주얼 언어를 지정하지 않았습니다" -#: commands/functioncmds.c:736 commands/functioncmds.c:1243 +#: commands/functioncmds.c:735 commands/functioncmds.c:1242 #, c-format msgid "COST must be positive" msgstr "COST는 양수여야 함" -#: commands/functioncmds.c:744 commands/functioncmds.c:1251 +#: commands/functioncmds.c:743 commands/functioncmds.c:1250 #, c-format msgid "ROWS must be positive" msgstr "ROWS는 양수여야 함" -#: commands/functioncmds.c:785 +#: commands/functioncmds.c:784 #, c-format msgid "unrecognized function attribute \"%s\" ignored" msgstr "알수 없는 함수 속성 \"%s\" 무시됨" @@ -7342,20 +7428,20 @@ msgstr "알수 없는 함수 속성 \"%s\" 무시됨" msgid "only one AS item needed for language \"%s\"" msgstr "\"%s\" 언어에는 하나의 AS 항목만 필요함" -#: commands/functioncmds.c:929 commands/functioncmds.c:2119 -#: commands/proclang.c:563 +#: commands/functioncmds.c:930 commands/functioncmds.c:2131 +#: commands/proclang.c:561 #, c-format msgid "language \"%s\" does not exist" msgstr "\"%s\" 프로시주얼 언어 없음" -#: commands/functioncmds.c:931 commands/functioncmds.c:2121 +#: commands/functioncmds.c:932 commands/functioncmds.c:2133 #, c-format msgid "Use CREATE LANGUAGE to load the language into the database." msgstr "" "데이터베이스 내에서 프로시주얼 언어를 사용하려면 먼저 CREATE LANGUAGE 명령으" "로 사용할 언어를 등록하세요." -#: commands/functioncmds.c:966 commands/functioncmds.c:1235 +#: commands/functioncmds.c:967 commands/functioncmds.c:1234 #, c-format msgid "only superuser can define a leakproof function" msgstr "슈퍼유저만 leakproof 함수를 만들 수 있습니다" @@ -7370,37 +7456,37 @@ msgstr "OUT 매개 변수로 인해 함수 결과 형식은 %s이어야 함" msgid "function result type must be specified" msgstr "함수의 리턴 자료형을 지정해야 합니다" -#: commands/functioncmds.c:1077 commands/functioncmds.c:1255 +#: commands/functioncmds.c:1077 commands/functioncmds.c:1254 #, c-format msgid "ROWS is not applicable when function does not return a set" msgstr "함수에서 세트를 반환하지 않는 경우 ROWS를 적용할 수 없음" -#: commands/functioncmds.c:1412 +#: commands/functioncmds.c:1426 #, c-format msgid "source data type %s is a pseudo-type" msgstr "%s 원본 자료형이 의사자료형(pseudo-type) 입니다" -#: commands/functioncmds.c:1418 +#: commands/functioncmds.c:1432 #, c-format msgid "target data type %s is a pseudo-type" msgstr "%s 대상 자료형이 의사자료형(pseudo-type) 입니다" -#: commands/functioncmds.c:1442 +#: commands/functioncmds.c:1456 #, c-format msgid "cast will be ignored because the source data type is a domain" msgstr "원본 자료형이 도메인이어서 자료형 변환을 무시합니다." -#: commands/functioncmds.c:1447 +#: commands/functioncmds.c:1461 #, c-format msgid "cast will be ignored because the target data type is a domain" msgstr "대상 자료형이 도메인이어서 자료형 변환을 무시합니다." -#: commands/functioncmds.c:1474 +#: commands/functioncmds.c:1486 #, c-format msgid "cast function must take one to three arguments" msgstr "형변환 함수는 1-3개의 인자만 지정할 수 있습니다" -#: commands/functioncmds.c:1478 +#: commands/functioncmds.c:1490 #, c-format msgid "" "argument of cast function must match or be binary-coercible from source data " @@ -7409,17 +7495,17 @@ msgstr "" "형변환 함수의 인자로 쓸 자료형은 원본 자료형과 일치하거나 바이너리 차원으로 " "같은 자료형이어야 함" -#: commands/functioncmds.c:1482 +#: commands/functioncmds.c:1494 #, c-format -msgid "second argument of cast function must be type integer" -msgstr "형변화 함수의 두번째 인자 자료형은 반드시 integer여야합니다" +msgid "second argument of cast function must be type %s" +msgstr "형변화 함수의 두번째 인자 자료형은 반드시 %s 형이여야합니다" -#: commands/functioncmds.c:1486 +#: commands/functioncmds.c:1499 #, c-format -msgid "third argument of cast function must be type boolean" -msgstr "형변화 함수의 세번째 인자 자료형은 반드시 boolean이여야합니다" +msgid "third argument of cast function must be type %s" +msgstr "형변화 함수의 세번째 인자 자료형은 반드시 %s 형이여야합니다" -#: commands/functioncmds.c:1490 +#: commands/functioncmds.c:1504 #, c-format msgid "" "return data type of cast function must match or be binary-coercible to " @@ -7428,251 +7514,253 @@ msgstr "" "형변환 함수의 반환 자료형은 대상 자료형과 일치하거나 바이너리 차원으로 같은 " "자료형이어야 함" -#: commands/functioncmds.c:1501 +#: commands/functioncmds.c:1515 #, c-format msgid "cast function must not be volatile" msgstr "형변환 함수는 volatile 특성이 없어야합니다" -#: commands/functioncmds.c:1506 +#: commands/functioncmds.c:1520 #, c-format msgid "cast function must not be an aggregate function" msgstr "형변환 함수는 집계 함수가 아니여야합니다" -#: commands/functioncmds.c:1510 +#: commands/functioncmds.c:1524 #, c-format msgid "cast function must not be a window function" msgstr "형변환 함수는 윈도우 함수가 아니여야 함" -#: commands/functioncmds.c:1514 +#: commands/functioncmds.c:1528 #, c-format msgid "cast function must not return a set" msgstr "형변환 함수는 세트(set)를 리턴할 수 없습니다" -#: commands/functioncmds.c:1540 +#: commands/functioncmds.c:1554 #, c-format msgid "must be superuser to create a cast WITHOUT FUNCTION" msgstr "CREATE CAST ... WITHOUT FUNCTION 명령은 슈퍼유저만 실행할 수 있습니다" -#: commands/functioncmds.c:1555 +#: commands/functioncmds.c:1569 #, c-format msgid "source and target data types are not physically compatible" msgstr "원본 자료형과 대상 자료형이 서로 논리적인 호환성이 없습니다" -#: commands/functioncmds.c:1570 +#: commands/functioncmds.c:1584 #, c-format msgid "composite data types are not binary-compatible" msgstr "복합 자료형은 바이너리와 호환되지 않음" -#: commands/functioncmds.c:1576 +#: commands/functioncmds.c:1590 #, c-format msgid "enum data types are not binary-compatible" msgstr "열거 자료형은 바이너리와 호환되지 않음" -#: commands/functioncmds.c:1582 +#: commands/functioncmds.c:1596 #, c-format msgid "array data types are not binary-compatible" msgstr "배열 자료형은 바이너리와 호환되지 않음" -#: commands/functioncmds.c:1599 +#: commands/functioncmds.c:1613 #, c-format msgid "domain data types must not be marked binary-compatible" msgstr "도메인 자료형은 바이너리와 호환되지 않음" -#: commands/functioncmds.c:1609 +#: commands/functioncmds.c:1623 #, c-format msgid "source data type and target data type are the same" msgstr "원본 자료형과 대상 자료형의 형태가 같습니다" -#: commands/functioncmds.c:1642 +#: commands/functioncmds.c:1656 #, c-format msgid "cast from type %s to type %s already exists" msgstr "%s 형에서 %s 형으로 변환하는 형변환 규칙(cast)이 이미 있습니다" -#: commands/functioncmds.c:1717 +#: commands/functioncmds.c:1729 #, c-format msgid "cast from type %s to type %s does not exist" msgstr "%s 형에서 %s 형으로 바꾸는 형변환 규칙(cast)가 없음" -#: commands/functioncmds.c:1756 +#: commands/functioncmds.c:1768 #, c-format msgid "transform function must not be volatile" msgstr "형변환 함수는 volatile 특성이 없어야합니다" -#: commands/functioncmds.c:1760 +#: commands/functioncmds.c:1772 #, c-format msgid "transform function must not be an aggregate function" msgstr "형변환 함수는 집계 함수가 아니여야합니다" -#: commands/functioncmds.c:1764 +#: commands/functioncmds.c:1776 #, c-format msgid "transform function must not be a window function" msgstr "형변환 함수는 윈도우 함수가 아니여야 함" -#: commands/functioncmds.c:1768 +#: commands/functioncmds.c:1780 #, c-format msgid "transform function must not return a set" msgstr "형변환 함수는 세트(set)를 리턴할 수 없습니다" -#: commands/functioncmds.c:1772 +#: commands/functioncmds.c:1784 #, c-format msgid "transform function must take one argument" msgstr "형변환 함수는 1개의 인자만 지정할 수 있습니다" -#: commands/functioncmds.c:1776 +#: commands/functioncmds.c:1788 #, c-format -msgid "first argument of transform function must be type \"internal\"" -msgstr "형변화 함수의 첫번째 인자 자료형은 반드시 \"internal\"이여야합니다" +msgid "first argument of transform function must be type %s" +msgstr "형변화 함수의 첫번째 인자 자료형은 반드시 %s 형이여야합니다" -#: commands/functioncmds.c:1813 +#: commands/functioncmds.c:1826 #, c-format msgid "data type %s is a pseudo-type" msgstr "%s 자료형은 의사자료형(pseudo-type) 입니다" -#: commands/functioncmds.c:1819 +#: commands/functioncmds.c:1832 #, c-format msgid "data type %s is a domain" msgstr "%s 자료형은 도메인입니다" -#: commands/functioncmds.c:1859 +#: commands/functioncmds.c:1872 #, c-format -msgid "return data type of FROM SQL function must be \"internal\"" -msgstr "FROM SQL 함수의 반환 자료형은 \"internal\" 이어야 함" +msgid "return data type of FROM SQL function must be %s" +msgstr "FROM SQL 함수의 반환 자료형은 %s 형이어야 함" -#: commands/functioncmds.c:1884 +#: commands/functioncmds.c:1898 #, c-format msgid "return data type of TO SQL function must be the transform data type" msgstr "TO SQL 함수의 반환 자료형은 변환 자료형이어야 함" -#: commands/functioncmds.c:1911 +#: commands/functioncmds.c:1925 #, c-format msgid "transform for type %s language \"%s\" already exists" msgstr "%s 자료형(대상 언어: \"%s\")을 위한 형변환 규칙은 이미 있습니다." -#: commands/functioncmds.c:2002 +#: commands/functioncmds.c:2014 #, c-format msgid "transform for type %s language \"%s\" does not exist" msgstr "%s 자료형(대상 언어: \"%s\")을 위한 형변환 규칙은 없습니다." -#: commands/functioncmds.c:2053 +#: commands/functioncmds.c:2065 #, c-format msgid "function %s already exists in schema \"%s\"" msgstr "%s 함수는 이미 \"%s\" 스키마안에 있습니다" -#: commands/functioncmds.c:2106 +#: commands/functioncmds.c:2118 #, c-format msgid "no inline code specified" msgstr "내장 코드가 빠졌습니다" -#: commands/functioncmds.c:2151 +#: commands/functioncmds.c:2163 #, c-format msgid "language \"%s\" does not support inline code execution" msgstr "\"%s\" 프로시주얼 언어는 내장 코드 실행 기능을 지원하지 않습니다" -#: commands/indexcmds.c:349 +#: commands/indexcmds.c:354 #, c-format msgid "must specify at least one column" -msgstr "적어도 하나 이상의 열을 지정해 주십시오" +msgstr "적어도 하나 이상의 칼럼을 지정해 주십시오" -#: commands/indexcmds.c:353 +#: commands/indexcmds.c:358 #, c-format msgid "cannot use more than %d columns in an index" -msgstr "하나의 인덱스에서는 %d개보다 많은 열을 사용할 수 없습니다" +msgstr "하나의 인덱스에서는 %d개보다 많은 칼럼을 사용할 수 없습니다" -#: commands/indexcmds.c:384 +#: commands/indexcmds.c:389 #, c-format msgid "cannot create index on foreign table \"%s\"" msgstr "\"%s\" 외부 테이블 대상으로 인덱스를 만들 수 없음" -#: commands/indexcmds.c:399 +#: commands/indexcmds.c:394 +#, c-format +msgid "cannot create index on partitioned table \"%s\"" +msgstr "\"%s\" 파티션된 테이블 대상으로 인덱스를 만들 수 없음" + +#: commands/indexcmds.c:409 #, c-format msgid "cannot create indexes on temporary tables of other sessions" msgstr "다른 세션의 임시 테이블에 인덱스를 만들 수는 없습니다" -#: commands/indexcmds.c:455 commands/tablecmds.c:546 commands/tablecmds.c:9694 +#: commands/indexcmds.c:474 commands/tablecmds.c:593 +#: commands/tablecmds.c:10506 #, c-format msgid "only shared relations can be placed in pg_global tablespace" msgstr "공유 관계만 pg_global 테이블스페이스에 배치할 수 있음" -#: commands/indexcmds.c:488 +#: commands/indexcmds.c:507 #, c-format msgid "substituting access method \"gist\" for obsolete method \"rtree\"" msgstr "사용하지 않는 \"rtree\" 방법을 \"gist\" 액세스 방법으로 대체하는 중" -#: commands/indexcmds.c:506 -#, c-format -msgid "hash indexes are not WAL-logged and their use is discouraged" -msgstr "hash 인덱스는 WAL 기록을 하지 않습니다. 이 사용은 권장하지 않습니다" - -#: commands/indexcmds.c:511 +#: commands/indexcmds.c:525 #, c-format msgid "access method \"%s\" does not support unique indexes" msgstr "\"%s\" 인덱스 액세스 방법은 고유 인덱스를 지원하지 않습니다" -#: commands/indexcmds.c:516 +#: commands/indexcmds.c:530 #, c-format msgid "access method \"%s\" does not support multicolumn indexes" msgstr "\"%s\" 인덱스 액세스 방법은 다중 열 인덱스를 지원하지 않습니다" -#: commands/indexcmds.c:521 +#: commands/indexcmds.c:535 #, c-format msgid "access method \"%s\" does not support exclusion constraints" msgstr "\"%s\" 인덱스 접근 방법은 exclusion 제약 조건을 지원하지 않습니다" -#: commands/indexcmds.c:591 commands/indexcmds.c:611 +#: commands/indexcmds.c:607 commands/indexcmds.c:627 #, c-format msgid "index creation on system columns is not supported" msgstr "시스템 카탈로그 테이블에 대한 인덱스 만들기는 지원하지 않습니다" -#: commands/indexcmds.c:636 +#: commands/indexcmds.c:652 #, c-format msgid "%s %s will create implicit index \"%s\" for table \"%s\"" msgstr "%s %s 명령으로 \"%s\" 인덱스를 \"%s\" 테이블에 자동으로 만들었음" -#: commands/indexcmds.c:983 +#: commands/indexcmds.c:999 #, c-format msgid "functions in index predicate must be marked IMMUTABLE" msgstr "" "인덱스 술어(predicate)에서 사용하는 함수는 IMMUTABLE 특성이 있어야합니다" -#: commands/indexcmds.c:1049 parser/parse_utilcmd.c:1881 +#: commands/indexcmds.c:1065 parser/parse_utilcmd.c:2077 #, c-format msgid "column \"%s\" named in key does not exist" -msgstr "키에서 지정한 \"%s\" 열이 없습니다" +msgstr "키에서 지정한 \"%s\" 칼럼이 없습니다" -#: commands/indexcmds.c:1109 +#: commands/indexcmds.c:1125 #, c-format msgid "functions in index expression must be marked IMMUTABLE" msgstr "인덱스 식(expression)에 사용하는 함수는 IMMUTABLE 특성이 있어야합니다" -#: commands/indexcmds.c:1132 +#: commands/indexcmds.c:1148 #, c-format msgid "could not determine which collation to use for index expression" msgstr "해당 인덱스에서 사용할 정렬규칙(collation)을 결정할 수 없습니다." -#: commands/indexcmds.c:1140 commands/typecmds.c:827 parser/parse_expr.c:2608 -#: parser/parse_type.c:550 parser/parse_utilcmd.c:2807 utils/adt/misc.c:666 +#: commands/indexcmds.c:1156 commands/tablecmds.c:13396 +#: commands/typecmds.c:831 parser/parse_expr.c:2763 parser/parse_type.c:549 +#: parser/parse_utilcmd.c:3113 utils/adt/misc.c:661 #, c-format msgid "collations are not supported by type %s" msgstr "%s 자료형은 collation 지원 안함" -#: commands/indexcmds.c:1178 +#: commands/indexcmds.c:1194 #, c-format msgid "operator %s is not commutative" msgstr "%s 연산자는 교환법칙이 성립하지 않습니다" -#: commands/indexcmds.c:1180 +#: commands/indexcmds.c:1196 #, c-format msgid "Only commutative operators can be used in exclusion constraints." msgstr "" "exclude 제약조건용 인덱스를 만들 때는 교환법칙이 성립하는 연산자만 사용할 수 " "있습니다." -#: commands/indexcmds.c:1206 +#: commands/indexcmds.c:1222 #, c-format msgid "operator %s is not a member of operator family \"%s\"" -msgstr "%s 연산자는 \"%s\" 연산자 가족 구성원이 아닙니다." +msgstr "%s 연산자는 \"%s\" 연산자 패밀리 구성원이 아닙니다." -#: commands/indexcmds.c:1209 +#: commands/indexcmds.c:1225 #, c-format msgid "" "The exclusion operator must be related to the index operator class for the " @@ -7680,24 +7768,24 @@ msgid "" msgstr "" "제외 연산자는 해당 제약 조건용 인덱스 연산자 클래스의 소속이어야 합니다." -#: commands/indexcmds.c:1244 +#: commands/indexcmds.c:1260 #, c-format msgid "access method \"%s\" does not support ASC/DESC options" msgstr "\"%s\" 접근 방법은 ASC/DESC 옵션을 지원하지 않음" -#: commands/indexcmds.c:1249 +#: commands/indexcmds.c:1265 #, c-format msgid "access method \"%s\" does not support NULLS FIRST/LAST options" msgstr "\"%s\" 접근 방법은 NULLS FIRST/LAST 옵션을 지원하지 않음" -#: commands/indexcmds.c:1305 commands/typecmds.c:1935 +#: commands/indexcmds.c:1324 commands/typecmds.c:1928 #, c-format msgid "data type %s has no default operator class for access method \"%s\"" msgstr "" "%s 자료형은 \"%s\" 인덱스 액세스 방법을 위한 기본 연산자 클래스(operator " "class)가 없습니다. " -#: commands/indexcmds.c:1307 +#: commands/indexcmds.c:1326 #, c-format msgid "" "You must specify an operator class for the index or define a default " @@ -7706,34 +7794,34 @@ msgstr "" "이 인덱스를 위한 연산자 클래스를 지정하거나 먼저 이 자료형을 위한 기본 연산" "자 클래스를 정의해 두어야합니다" -#: commands/indexcmds.c:1336 commands/indexcmds.c:1344 +#: commands/indexcmds.c:1355 commands/indexcmds.c:1363 #: commands/opclasscmds.c:205 #, c-format msgid "operator class \"%s\" does not exist for access method \"%s\"" msgstr "" "\"%s\" 연산자 클래스는 \"%s\" 인덱스 액세스 방법에서 사용할 수 없습니다" -#: commands/indexcmds.c:1357 commands/typecmds.c:1923 +#: commands/indexcmds.c:1376 commands/typecmds.c:1916 #, c-format msgid "operator class \"%s\" does not accept data type %s" msgstr "\"%s\" 연산자 클래스는 %s 자료형을 사용할 수 없습니다" -#: commands/indexcmds.c:1447 +#: commands/indexcmds.c:1466 #, c-format msgid "there are multiple default operator classes for data type %s" msgstr "%s 자료형을 위한 기본 연산자 클래스가 여러개 있습니다" -#: commands/indexcmds.c:1838 +#: commands/indexcmds.c:1857 #, c-format msgid "table \"%s\" has no indexes" msgstr "\"%s\" 테이블에는 사용할 수 있는 인덱스가 없습니다" -#: commands/indexcmds.c:1893 +#: commands/indexcmds.c:1912 #, c-format msgid "can only reindex the currently open database" msgstr "열려있는 현재 데이터베이스에서만 reindex 명령을 사용할 수 있습니다" -#: commands/indexcmds.c:1993 +#: commands/indexcmds.c:2012 #, c-format msgid "table \"%s.%s\" was reindexed" msgstr "\"%s.%s\" 테이블의 인덱스들을 다시 만들었습니다." @@ -7764,7 +7852,7 @@ msgstr "" "구체화된 뷰의 하나 또는 하나 이상의 칼럼에 대한 WHERE 절 없는 고유 인덱스를 " "만드세요." -#: commands/matview.c:657 +#: commands/matview.c:678 #, c-format msgid "" "new data for materialized view \"%s\" contains duplicate rows without any " @@ -7773,7 +7861,7 @@ msgstr "" "\"%s\" 구체화된 뷰의 새 자료에 아무런 null 칼럼 없이 중복된 로우를 포함하고 " "있습니다" -#: commands/matview.c:659 +#: commands/matview.c:680 #, c-format msgid "Row: %s" msgstr "로우: %s" @@ -7781,163 +7869,163 @@ msgstr "로우: %s" #: commands/opclasscmds.c:126 #, c-format msgid "operator family \"%s\" does not exist for access method \"%s\"" -msgstr "\"%s\" 연산자 패밀리가 없음, 해당 접근 방법: \"%s\"" +msgstr "\"%s\" 연산자 없음, 해당 접근 방법: \"%s\"" #: commands/opclasscmds.c:264 #, c-format msgid "operator family \"%s\" for access method \"%s\" already exists" msgstr "\"%s\" 연산자 패밀리가 이미 있음, 해당 접근 방법: \"%s\"" -#: commands/opclasscmds.c:404 +#: commands/opclasscmds.c:402 #, c-format msgid "must be superuser to create an operator class" msgstr "연산자 클래스는 슈퍼유저만 만들 수 있습니다" -#: commands/opclasscmds.c:478 commands/opclasscmds.c:863 -#: commands/opclasscmds.c:996 +#: commands/opclasscmds.c:475 commands/opclasscmds.c:849 +#: commands/opclasscmds.c:973 #, c-format msgid "invalid operator number %d, must be between 1 and %d" msgstr "잘못된 연산자 번호: %d, 타당한 번호는 1부터 %d까지 입니다" -#: commands/opclasscmds.c:529 commands/opclasscmds.c:914 -#: commands/opclasscmds.c:1011 +#: commands/opclasscmds.c:519 commands/opclasscmds.c:893 +#: commands/opclasscmds.c:988 #, c-format msgid "invalid procedure number %d, must be between 1 and %d" msgstr "잘못된 프로시저 번호 %d, 이 번호는 1부터 %d까지입니다" -#: commands/opclasscmds.c:559 +#: commands/opclasscmds.c:548 #, c-format msgid "storage type specified more than once" msgstr "저장 방법이 중복되었습니다" -#: commands/opclasscmds.c:586 +#: commands/opclasscmds.c:575 #, c-format msgid "" "storage type cannot be different from data type for access method \"%s\"" msgstr "스토리지 자료형은 \"%s\" 접근 방법의 자료형과 같아야 합니다." -#: commands/opclasscmds.c:602 +#: commands/opclasscmds.c:591 #, c-format msgid "operator class \"%s\" for access method \"%s\" already exists" msgstr "\"%s\" 연산자 클래스에는 이미 \"%s\" 액세스 방법이 사용되고 있습니다" -#: commands/opclasscmds.c:630 +#: commands/opclasscmds.c:619 #, c-format msgid "could not make operator class \"%s\" be default for type %s" msgstr "\"%s\" 연산자 클래스를 %s 자료형의 기본값으로 지정할 수 없습니다" -#: commands/opclasscmds.c:633 +#: commands/opclasscmds.c:622 #, c-format msgid "Operator class \"%s\" already is the default." msgstr "\"%s\" 연산자 클래스는 이미 기본 연산자 클래스입니다" -#: commands/opclasscmds.c:760 +#: commands/opclasscmds.c:747 #, c-format msgid "must be superuser to create an operator family" msgstr "슈퍼유저만 연산자 패밀리를 만들 수 있음" -#: commands/opclasscmds.c:816 +#: commands/opclasscmds.c:803 #, c-format msgid "must be superuser to alter an operator family" msgstr "슈퍼유저만 연산자 패밀리를 변경할 수 있음" -#: commands/opclasscmds.c:879 +#: commands/opclasscmds.c:858 #, c-format msgid "operator argument types must be specified in ALTER OPERATOR FAMILY" msgstr "연산자 인자 형식이 ALTER OPERATOR FAMILY에 지정되어 있어야 함" -#: commands/opclasscmds.c:943 +#: commands/opclasscmds.c:921 #, c-format msgid "STORAGE cannot be specified in ALTER OPERATOR FAMILY" msgstr "ALTER OPERATOR FAMILY에서 STORAGE를 지정할 수 없음" -#: commands/opclasscmds.c:1066 +#: commands/opclasscmds.c:1043 #, c-format msgid "one or two argument types must be specified" msgstr "한두 개의 인자 형식을 지정해야 함" -#: commands/opclasscmds.c:1092 +#: commands/opclasscmds.c:1069 #, c-format msgid "index operators must be binary" msgstr "인덱스 연산자는 바이너리여야 함" -#: commands/opclasscmds.c:1111 +#: commands/opclasscmds.c:1088 #, c-format msgid "access method \"%s\" does not support ordering operators" msgstr "\"%s\" 접근 방법은 정렬 작업을 지원하지 않음" -#: commands/opclasscmds.c:1122 +#: commands/opclasscmds.c:1099 #, c-format msgid "index search operators must return boolean" msgstr "인덱스 검색 연산자는 부울형을 반환해야 함" -#: commands/opclasscmds.c:1164 +#: commands/opclasscmds.c:1141 #, c-format msgid "btree comparison procedures must have two arguments" msgstr "btree 비교 프로시저에는 두 개의 인자가 있어야 함" -#: commands/opclasscmds.c:1168 +#: commands/opclasscmds.c:1145 #, c-format msgid "btree comparison procedures must return integer" msgstr "btree 비교 프로시저는 반드시 integer 자료형을 반환해야 함" -#: commands/opclasscmds.c:1185 +#: commands/opclasscmds.c:1162 #, c-format msgid "btree sort support procedures must accept type \"internal\"" msgstr "" "btree 정렬 지원 프로시저는 반드시 \"internal\" 자료형 입력 인자로 사용해야함" -#: commands/opclasscmds.c:1189 +#: commands/opclasscmds.c:1166 #, c-format msgid "btree sort support procedures must return void" msgstr "btree 정렬 지원 프로시저는 반드시 void 값을 반환해야 함" -#: commands/opclasscmds.c:1201 +#: commands/opclasscmds.c:1178 #, c-format msgid "hash procedures must have one argument" msgstr "해시 프로시저에는 하나의 인자가 있어야 함" -#: commands/opclasscmds.c:1205 +#: commands/opclasscmds.c:1182 #, c-format msgid "hash procedures must return integer" msgstr "해시 프로시저는 정수를 반환해야 함" -#: commands/opclasscmds.c:1229 +#: commands/opclasscmds.c:1206 #, c-format msgid "associated data types must be specified for index support procedure" msgstr "인덱스 지원 프로시저에 대해 관련 데이터 형식을 지정해야 함" -#: commands/opclasscmds.c:1254 +#: commands/opclasscmds.c:1231 #, c-format msgid "procedure number %d for (%s,%s) appears more than once" msgstr "프로시저 번호 %d이(가) (%s,%s)에 대해 여러 번 표시됨" -#: commands/opclasscmds.c:1261 +#: commands/opclasscmds.c:1238 #, c-format msgid "operator number %d for (%s,%s) appears more than once" msgstr "연산자 번호 %d이(가) (%s,%s)에 대해 여러 번 표시됨" -#: commands/opclasscmds.c:1310 +#: commands/opclasscmds.c:1287 #, c-format msgid "operator %d(%s,%s) already exists in operator family \"%s\"" msgstr "%d(%s,%s) 연산자가 \"%s\" 연산자 패밀리에 이미 있음" -#: commands/opclasscmds.c:1426 +#: commands/opclasscmds.c:1401 #, c-format msgid "function %d(%s,%s) already exists in operator family \"%s\"" msgstr "%d(%s,%s) 함수가 \"%s\" 연산자 패밀리에 이미 있음" -#: commands/opclasscmds.c:1516 +#: commands/opclasscmds.c:1489 #, c-format msgid "operator %d(%s,%s) does not exist in operator family \"%s\"" msgstr "%d(%s,%s) 연산자가 \"%s\" 연산자 패밀리에 없음" -#: commands/opclasscmds.c:1556 +#: commands/opclasscmds.c:1529 #, c-format msgid "function %d(%s,%s) does not exist in operator family \"%s\"" msgstr "%d(%s,%s) 함수가 \"%s\" 연산자 패밀리에 없음" -#: commands/opclasscmds.c:1686 +#: commands/opclasscmds.c:1659 #, c-format msgid "" "operator class \"%s\" for access method \"%s\" already exists in schema \"%s" @@ -7946,7 +8034,7 @@ msgstr "" "\"%s\" 연산자 클래스(\"%s\" 액세스 방법을 사용하는)는 이미 \"%s\" 스키마 안" "에 있습니다" -#: commands/opclasscmds.c:1709 +#: commands/opclasscmds.c:1682 #, c-format msgid "" "operator family \"%s\" for access method \"%s\" already exists in schema \"%s" @@ -7958,7 +8046,7 @@ msgstr "\"%s\" 연산자 패밀리(접근 방법: \"%s\")가 \"%s\" 스키마에 msgid "SETOF type not allowed for operator argument" msgstr "SETOF 형식은 연산자 인자에 허용되지 않음" -#: commands/operatorcmds.c:152 commands/operatorcmds.c:457 +#: commands/operatorcmds.c:152 commands/operatorcmds.c:454 #, c-format msgid "operator attribute \"%s\" not recognized" msgstr "\"%s\" 연산자 속성을 처리할 수 없음" @@ -7983,18 +8071,18 @@ msgstr "%s 제한 예상 함수는 %s 자료형을 반환해야 함" msgid "join estimator function %s must return type %s" msgstr "%s 조인 예상 함수는 %s 자료형을 반환해야 함" -#: commands/operatorcmds.c:451 +#: commands/operatorcmds.c:448 #, c-format msgid "operator attribute \"%s\" cannot be changed" msgstr "\"%s\" 연산자 속성 바꿀 수 없음" -#: commands/policy.c:87 commands/policy.c:388 commands/policy.c:477 -#: commands/tablecmds.c:971 commands/tablecmds.c:1313 -#: commands/tablecmds.c:2185 commands/tablecmds.c:4329 -#: commands/tablecmds.c:6280 commands/tablecmds.c:12080 -#: commands/tablecmds.c:12115 commands/trigger.c:241 commands/trigger.c:1125 -#: commands/trigger.c:1233 rewrite/rewriteDefine.c:273 -#: rewrite/rewriteDefine.c:917 +#: commands/policy.c:87 commands/policy.c:397 commands/policy.c:487 +#: commands/tablecmds.c:1150 commands/tablecmds.c:1520 +#: commands/tablecmds.c:2507 commands/tablecmds.c:4704 +#: commands/tablecmds.c:7054 commands/tablecmds.c:13019 +#: commands/tablecmds.c:13054 commands/trigger.c:259 commands/trigger.c:1320 +#: commands/trigger.c:1429 rewrite/rewriteDefine.c:272 +#: rewrite/rewriteDefine.c:925 #, c-format msgid "permission denied: \"%s\" is a system catalog" msgstr "액세스 권한 없음: \"%s\" 시스템 카탈로그임" @@ -8009,89 +8097,89 @@ msgstr "PUBLIC 아닌 지정한 모든 롤 무시함" msgid "All roles are members of the PUBLIC role." msgstr "모든 롤이 PUBLIC 롤의 소속입니다." -#: commands/policy.c:501 +#: commands/policy.c:511 #, c-format msgid "role \"%s\" could not be removed from policy \"%s\" on \"%s\"" msgstr "\"%s\" 롤을 \"%s\" 정책 (대상 릴레이션: \"%s\")에서 삭제될 수 없음" -#: commands/policy.c:710 +#: commands/policy.c:717 #, c-format msgid "WITH CHECK cannot be applied to SELECT or DELETE" msgstr "WITH CHECK 옵션은 SELECT나 DELETE 작업에 적용 될 수 없음" -#: commands/policy.c:719 commands/policy.c:1019 +#: commands/policy.c:726 commands/policy.c:1024 #, c-format msgid "only WITH CHECK expression allowed for INSERT" msgstr "INSERT 구문에 대해서만 WITH CHECK 옵션을 허용합니다" -#: commands/policy.c:792 commands/policy.c:1242 +#: commands/policy.c:799 commands/policy.c:1244 #, c-format msgid "policy \"%s\" for table \"%s\" already exists" msgstr "\"%s\" 정책이 \"%s\" 테이블에 이미 지정되어있습니다" -#: commands/policy.c:991 commands/policy.c:1270 commands/policy.c:1345 +#: commands/policy.c:996 commands/policy.c:1272 commands/policy.c:1344 #, c-format msgid "policy \"%s\" for table \"%s\" does not exist" msgstr "\"%s\" 정책이 \"%s\" 테이블에 없음" -#: commands/policy.c:1009 +#: commands/policy.c:1014 #, c-format msgid "only USING expression allowed for SELECT, DELETE" msgstr "USING 구문만 SELECT, DELETE 작업에 쓸 수 있음" -#: commands/portalcmds.c:61 commands/portalcmds.c:160 -#: commands/portalcmds.c:212 +#: commands/portalcmds.c:58 commands/portalcmds.c:182 +#: commands/portalcmds.c:234 #, c-format msgid "invalid cursor name: must not be empty" msgstr "잘못된 커서 이름: 비어있으면 안됩니다" -#: commands/portalcmds.c:168 commands/portalcmds.c:222 -#: executor/execCurrent.c:67 utils/adt/xml.c:2389 utils/adt/xml.c:2556 +#: commands/portalcmds.c:190 commands/portalcmds.c:244 +#: executor/execCurrent.c:67 utils/adt/xml.c:2469 utils/adt/xml.c:2639 #, c-format msgid "cursor \"%s\" does not exist" msgstr "\"%s\" 이름의 커서가 없음" -#: commands/prepare.c:71 +#: commands/prepare.c:75 #, c-format msgid "invalid statement name: must not be empty" msgstr "잘못된 명령문 이름: 비어있으면 안됩니다" -#: commands/prepare.c:129 parser/parse_param.c:304 tcop/postgres.c:1343 +#: commands/prepare.c:141 parser/parse_param.c:304 tcop/postgres.c:1349 #, c-format msgid "could not determine data type of parameter $%d" msgstr "$%d 매개 변수의 자료형을 알수가 없습니다." -#: commands/prepare.c:147 +#: commands/prepare.c:159 #, c-format msgid "utility statements cannot be prepared" msgstr "utility 명령문들은 미리 준비할 수 없습니다" -#: commands/prepare.c:257 commands/prepare.c:264 +#: commands/prepare.c:269 commands/prepare.c:274 #, c-format msgid "prepared statement is not a SELECT" msgstr "준비된 명령문이 SELECT 구문이 아닙니다." -#: commands/prepare.c:332 +#: commands/prepare.c:342 #, c-format msgid "wrong number of parameters for prepared statement \"%s\"" msgstr "prepared statement \"%s\"에 매개 변수 수가 틀렸습니다" -#: commands/prepare.c:334 +#: commands/prepare.c:344 #, c-format msgid "Expected %d parameters but got %d." msgstr "%d 개의 매개 변수가 요구되는데 %d 개만이 존재합니다" -#: commands/prepare.c:370 +#: commands/prepare.c:380 #, c-format msgid "parameter $%d of type %s cannot be coerced to the expected type %s" msgstr "??? parameter $%d of type %s 는 expected type %s 로 강요할 수 없다" -#: commands/prepare.c:465 +#: commands/prepare.c:474 #, c-format msgid "prepared statement \"%s\" already exists" msgstr "\"%s\" 이름의 준비된 명령문(prepared statement)이 이미 있습니다" -#: commands/prepare.c:504 +#: commands/prepare.c:513 #, c-format msgid "prepared statement \"%s\" does not exist" msgstr "\"%s\" 이름의 준비된 명령문(prepared statement) 없음" @@ -8121,370 +8209,702 @@ msgstr "지원하는 언어 목록은 pg_pltemplate 시스템 카탈로그에 msgid "must be superuser to create custom procedural language" msgstr "슈퍼유저만 사용자 지정 프로시저 언어를 만들 수 있음" -#: commands/proclang.c:281 +#: commands/proclang.c:281 commands/trigger.c:608 commands/typecmds.c:457 +#: commands/typecmds.c:474 #, c-format -msgid "" -"changing return type of function %s from \"opaque\" to \"language_handler\"" -msgstr "" -"%s 함수에서 \"opaque\" 자료형을 \"language_handler\" 자료형으로 리턴 자료형" -"을 바꿉니다" +msgid "changing return type of function %s from %s to %s" +msgstr "%s 함수의 반환 자료형을 %s에서 %s 자료형으로 바꿉니다" -#: commands/schemacmds.c:99 commands/schemacmds.c:262 +#: commands/publicationcmds.c:106 #, c-format -msgid "unacceptable schema name \"%s\"" -msgstr "\"%s\" 스키마 이름이 적당하지 못합니다" +msgid "invalid list syntax for \"publish\" option" +msgstr "\"publish\" 옵션의 목록 문법이 잘못됨" -#: commands/schemacmds.c:100 commands/schemacmds.c:263 +#: commands/publicationcmds.c:122 #, c-format -msgid "The prefix \"pg_\" is reserved for system schemas." -msgstr "" -"\"pg_\" 문자로 시작하는 스키마는 시스템에서 사용하는 예약된 스키마입니다." +msgid "unrecognized \"publish\" value: \"%s\"" +msgstr "알 수 없는 \"publish\" 값: \"%s\"" -#: commands/schemacmds.c:114 +#: commands/publicationcmds.c:128 #, c-format -msgid "schema \"%s\" already exists, skipping" -msgstr "\"%s\" 이름의 스키마(schema)가 이미 있음, 건너뜀" +msgid "unrecognized publication parameter: %s" +msgstr "인식할 수 없는 발행 매개 변수: %s" -#: commands/seclabel.c:60 +#: commands/publicationcmds.c:160 #, c-format -msgid "no security label providers have been loaded" -msgstr "로드된 보안 라벨 제공자가 없음" +msgid "must be superuser to create FOR ALL TABLES publication" +msgstr "FOR ALL TABLES 옵션의 발행을 만드려면 슈퍼유저여야만 합니다" -#: commands/seclabel.c:64 +#: commands/publicationcmds.c:321 #, c-format -msgid "" -"must specify provider when multiple security label providers have been loaded" -msgstr "다중 보안 레이블 제공자가 로드 될 때 제공자를 지정해야 합니다." +msgid "publication \"%s\" is defined as FOR ALL TABLES" +msgstr "" -#: commands/seclabel.c:82 +#: commands/publicationcmds.c:323 +#, c-format +msgid "Tables cannot be added to or dropped from FOR ALL TABLES publications." +msgstr "" + +#: commands/publicationcmds.c:624 +#, c-format +msgid "relation \"%s\" is not part of the publication" +msgstr "\"%s\" 릴레이션은 해당 발행에 포함되어 있지 않습니다" + +#: commands/publicationcmds.c:667 +#, c-format +msgid "permission denied to change owner of publication \"%s\"" +msgstr "\"%s\" 발행의 소유주를 바꿀 권한이 없습니다" + +#: commands/publicationcmds.c:669 +#, c-format +msgid "The owner of a FOR ALL TABLES publication must be a superuser." +msgstr "FOR ALL TABLES 옵션용 발행의 소유주는 슈퍼유저여야만 합니다" + +#: commands/schemacmds.c:106 commands/schemacmds.c:280 +#, c-format +msgid "unacceptable schema name \"%s\"" +msgstr "\"%s\" 스키마 이름이 적당하지 못합니다" + +#: commands/schemacmds.c:107 commands/schemacmds.c:281 +#, c-format +msgid "The prefix \"pg_\" is reserved for system schemas." +msgstr "" +"\"pg_\" 문자로 시작하는 스키마는 시스템에서 사용하는 예약된 스키마입니다." + +#: commands/schemacmds.c:121 +#, c-format +msgid "schema \"%s\" already exists, skipping" +msgstr "\"%s\" 이름의 스키마(schema)가 이미 있음, 건너뜀" + +#: commands/seclabel.c:60 +#, c-format +msgid "no security label providers have been loaded" +msgstr "로드된 보안 라벨 제공자가 없음" + +#: commands/seclabel.c:64 +#, c-format +msgid "" +"must specify provider when multiple security label providers have been loaded" +msgstr "다중 보안 레이블 제공자가 로드 될 때 제공자를 지정해야 합니다." + +#: commands/seclabel.c:82 #, c-format msgid "security label provider \"%s\" is not loaded" msgstr "\"%s\" 이름의 보안 라벨 제공자가 로드되어 있지 않음" -#: commands/sequence.c:127 +#: commands/sequence.c:138 #, c-format msgid "unlogged sequences are not supported" msgstr "로그를 남기지 않는 시퀀스는 지원하지 않음" -#: commands/sequence.c:651 +#: commands/sequence.c:699 #, c-format msgid "nextval: reached maximum value of sequence \"%s\" (%s)" msgstr "nextval: \"%s\" 시퀀스의 최대값(%s)이 되었습니다" -#: commands/sequence.c:674 +#: commands/sequence.c:722 #, c-format msgid "nextval: reached minimum value of sequence \"%s\" (%s)" msgstr "nextval: \"%s\" 시퀀스의 최소값(%s)이 되었습니다" -#: commands/sequence.c:792 +#: commands/sequence.c:840 #, c-format msgid "currval of sequence \"%s\" is not yet defined in this session" msgstr "\"%s\" 시퀀스의 currval 값이 현재 세션에 지정되어 있지 않습니다" -#: commands/sequence.c:811 commands/sequence.c:817 +#: commands/sequence.c:859 commands/sequence.c:865 #, c-format msgid "lastval is not yet defined in this session" msgstr "이 세션에는 lastval 값이 아직까지 지정되지 않았습니다" -#: commands/sequence.c:893 +#: commands/sequence.c:953 #, c-format msgid "setval: value %s is out of bounds for sequence \"%s\" (%s..%s)" msgstr "setval: %s 값은 \"%s\" 시퀀스의 범위(%s..%s)를 벗어났습니다" -#: commands/sequence.c:1267 +#: commands/sequence.c:1358 +#, c-format +msgid "invalid sequence option SEQUENCE NAME" +msgstr "" + +#: commands/sequence.c:1384 +#, c-format +msgid "identity column type must be smallint, integer, or bigint" +msgstr "" + +#: commands/sequence.c:1385 +#, c-format +msgid "sequence type must be smallint, integer, or bigint" +msgstr "" + +#: commands/sequence.c:1419 #, c-format msgid "INCREMENT must not be zero" msgstr "INCREMENT 값은 0(zero)이 될 수 없습니다" -#: commands/sequence.c:1323 +#: commands/sequence.c:1472 +#, c-format +msgid "MAXVALUE (%s) is out of range for sequence data type %s" +msgstr "MAXVALUE (%s) 값이 허용 범위 밖임, 해당 시퀀스 자료형: %s" + +#: commands/sequence.c:1509 +#, c-format +msgid "MINVALUE (%s) is out of range for sequence data type %s" +msgstr "MAXVALUE (%s) 값이 허용 범위 밖임, 해당 시퀀스 자료형: %s" + +#: commands/sequence.c:1523 #, c-format msgid "MINVALUE (%s) must be less than MAXVALUE (%s)" msgstr "MINVALUE (%s) 값은 MAXVALUE (%s) 값보다 작아야합니다" -#: commands/sequence.c:1348 +#: commands/sequence.c:1550 #, c-format msgid "START value (%s) cannot be less than MINVALUE (%s)" msgstr "START 값(%s)은 MINVALUE(%s)보다 작을 수 없음" -#: commands/sequence.c:1360 +#: commands/sequence.c:1562 #, c-format msgid "START value (%s) cannot be greater than MAXVALUE (%s)" msgstr "START 값(%s)은 MAXVALUE(%s)보다 클 수 없음" -#: commands/sequence.c:1390 +#: commands/sequence.c:1592 #, c-format msgid "RESTART value (%s) cannot be less than MINVALUE (%s)" msgstr "RESTART 값(%s)은 MINVALUE(%s)보다 작을 수 없음" -#: commands/sequence.c:1402 +#: commands/sequence.c:1604 #, c-format msgid "RESTART value (%s) cannot be greater than MAXVALUE (%s)" msgstr "RESTART 값(%s)은 MAXVALUE(%s)보다 클 수 없음" -#: commands/sequence.c:1417 +#: commands/sequence.c:1619 #, c-format msgid "CACHE (%s) must be greater than zero" msgstr "CACHE (%s) 값은 0(zero)보다 커야합니다" -#: commands/sequence.c:1449 +#: commands/sequence.c:1656 #, c-format msgid "invalid OWNED BY option" msgstr "잘못된 OWNED BY 옵션" -#: commands/sequence.c:1450 +#: commands/sequence.c:1657 #, c-format msgid "Specify OWNED BY table.column or OWNED BY NONE." msgstr "OWNED BY 테이블.열 또는 OWNED BY NONE을 지정하십시오." -#: commands/sequence.c:1473 +#: commands/sequence.c:1682 #, c-format msgid "referenced relation \"%s\" is not a table or foreign table" msgstr "참조되는 \"%s\" 릴레이션은 테이블 또는 외부 테이블이 아닙니다" -#: commands/sequence.c:1480 +#: commands/sequence.c:1689 #, c-format msgid "sequence must have same owner as table it is linked to" msgstr "시퀀스 및 이 시퀀스가 연결된 테이블의 소유주가 같아야 함" -#: commands/sequence.c:1484 +#: commands/sequence.c:1693 #, c-format msgid "sequence must be in same schema as table it is linked to" msgstr "시퀀스 및 이 시퀀스가 연결된 테이블이 같은 스키마에 있어야 함" -#: commands/tablecmds.c:216 +#: commands/sequence.c:1715 +#, c-format +msgid "cannot change ownership of identity sequence" +msgstr "식별 시퀀스의 소유주는 바꿀 수 없음" + +#: commands/sequence.c:1716 commands/tablecmds.c:9888 +#: commands/tablecmds.c:12482 +#, c-format +msgid "Sequence \"%s\" is linked to table \"%s\"." +msgstr "\"%s\" 시퀀스는 \"%s\" 테이블에 종속되어 있습니다." + +#: commands/statscmds.c:93 +#, c-format +msgid "statistics object \"%s\" already exists, skipping" +msgstr "\"%s\" 이름의 통계정보 객체가 이미 있습니다, 건너뜀" + +#: commands/statscmds.c:100 +#, c-format +msgid "statistics object \"%s\" already exists" +msgstr "\"%s\" 이름의 통계정보 객체가 이미 있음" + +#: commands/statscmds.c:112 commands/statscmds.c:121 +#, c-format +msgid "only a single relation is allowed in CREATE STATISTICS" +msgstr "CREATE STATISTICS 명령에서는 하나의 릴레이션만 사용할 수 있음" + +#: commands/statscmds.c:139 +#, c-format +msgid "relation \"%s\" is not a table, foreign table, or materialized view" +msgstr "\"%s\" 객체는 테이블도, 외부 테이블도, 구체화된 뷰도 아닙니다" + +#: commands/statscmds.c:170 commands/statscmds.c:176 +#, c-format +msgid "only simple column references are allowed in CREATE STATISTICS" +msgstr "CREATE STATISTICS 명령에서는 단순 칼럼 참조만 허용합니다." + +#: commands/statscmds.c:191 +#, c-format +msgid "statistics creation on system columns is not supported" +msgstr "시스템 칼럼에 대한 통계정보 객체 만들기는 지원하지 않습니다" + +#: commands/statscmds.c:198 +#, c-format +msgid "" +"column \"%s\" cannot be used in statistics because its type %s has no " +"default btree operator class" +msgstr "" +"\"%s\" 칼럼은 사용자 통계정보 수집이 불가능합니다. %s 자료형은 기본 btree 연" +"산자 클래스를 정의하지 않았습니다" + +#: commands/statscmds.c:205 +#, c-format +msgid "cannot have more than %d columns in statistics" +msgstr "통계정보 객체에서는 %d개보다 많은 칼럼을 사용할 수 없습니다" + +#: commands/statscmds.c:220 +#, c-format +msgid "extended statistics require at least 2 columns" +msgstr "확장된 통계정보는 두 개 이상의 칼럼이 필요합니다." + +#: commands/statscmds.c:238 +#, c-format +msgid "duplicate column name in statistics definition" +msgstr "통계정보 정의에서 사용하는 칼럼이 중복되었습니다" + +#: commands/statscmds.c:266 +#, c-format +msgid "unrecognized statistics kind \"%s\"" +msgstr "알 수 없는 통계정보 종류 \"%s\"" + +#: commands/subscriptioncmds.c:187 +#, c-format +msgid "unrecognized subscription parameter: %s" +msgstr "알 수 없는 구독 매개 변수: \"%s\"" + +#: commands/subscriptioncmds.c:200 +#, c-format +msgid "connect = false and enabled = true are mutually exclusive options" +msgstr "connect = false 옵션과 enabled = true 옵션은 함께 사용할 수 없음" + +#: commands/subscriptioncmds.c:205 +#, c-format +msgid "connect = false and create_slot = true are mutually exclusive options" +msgstr "connect = false 옵션과 create_slot = true 옵션은 함께 사용할 수 없음" + +#: commands/subscriptioncmds.c:210 +#, c-format +msgid "connect = false and copy_data = true are mutually exclusive options" +msgstr "connect = false 옵션과 copy_data = true 옵션은 함께 사용할 수 없음" + +#: commands/subscriptioncmds.c:227 +#, c-format +msgid "slot_name = NONE and enabled = true are mutually exclusive options" +msgstr "slot_name = NONE 옵션과 enabled = true 옵션은 함께 사용할 수 없음" + +#: commands/subscriptioncmds.c:232 +#, c-format +msgid "slot_name = NONE and create_slot = true are mutually exclusive options" +msgstr "slot_name = NONE 옵션과 create_slot = true 옵션은 함께 사용할 수 없음" + +#: commands/subscriptioncmds.c:237 +#, c-format +msgid "subscription with slot_name = NONE must also set enabled = false" +msgstr "" +"구독에서 slot_name = NONE 옵션은 enabled = false 옵션과 함께 사용해야 함" + +#: commands/subscriptioncmds.c:242 +#, c-format +msgid "subscription with slot_name = NONE must also set create_slot = false" +msgstr "" +"구독에서 slot_name = NONE 옵션은 create_slot = false 옵션과 함께 사용해야 함" + +#: commands/subscriptioncmds.c:284 +#, c-format +msgid "publication name \"%s\" used more than once" +msgstr "\"%s\" 발행 이름이 여러 번 사용 됨" + +#: commands/subscriptioncmds.c:347 +#, c-format +msgid "must be superuser to create subscriptions" +msgstr "구독 만들기는 슈퍼유져 권한이 필요합니다" + +#: commands/subscriptioncmds.c:427 commands/subscriptioncmds.c:520 +#: replication/logical/tablesync.c:856 replication/logical/worker.c:1617 +#, c-format +msgid "could not connect to the publisher: %s" +msgstr "발행 서버에 연결 할 수 없음: %s" + +#: commands/subscriptioncmds.c:469 +#, c-format +msgid "created replication slot \"%s\" on publisher" +msgstr "\"%s\" 이름의 복제 슬롯이 없습니다" + +#: commands/subscriptioncmds.c:486 +#, c-format +msgid "" +"tables were not subscribed, you will have to run ALTER SUBSCRIPTION ... " +"REFRESH PUBLICATION to subscribe the tables" +msgstr "" + +#: commands/subscriptioncmds.c:576 +#, c-format +msgid "table \"%s.%s\" added to subscription \"%s\"" +msgstr "\"%s.%s\" 테이블을 \"%s\" 구독에 추가했습니다" + +#: commands/subscriptioncmds.c:600 +#, c-format +msgid "table \"%s.%s\" removed from subscription \"%s\"" +msgstr "\"%s.%s\" 테이블을 \"%s\" 구독에서 삭제했습니다" + +#: commands/subscriptioncmds.c:669 +#, c-format +msgid "cannot set slot_name = NONE for enabled subscription" +msgstr "구독 활성화를 위해서는 slot_name = NONE 옵션은 사용할 수 없습니다" + +#: commands/subscriptioncmds.c:703 +#, c-format +msgid "cannot enable subscription that does not have a slot name" +msgstr "슬롯 이름 없이는 구독을 활성화 할 수 없음" + +#: commands/subscriptioncmds.c:749 +#, c-format +msgid "" +"ALTER SUBSCRIPTION with refresh is not allowed for disabled subscriptions" +msgstr "" + +#: commands/subscriptioncmds.c:750 +#, c-format +msgid "Use ALTER SUBSCRIPTION ... SET PUBLICATION ... WITH (refresh = false)." +msgstr "" + +#: commands/subscriptioncmds.c:768 +#, c-format +msgid "" +"ALTER SUBSCRIPTION ... REFRESH is not allowed for disabled subscriptions" +msgstr "" + +#: commands/subscriptioncmds.c:847 +#, c-format +msgid "subscription \"%s\" does not exist, skipping" +msgstr "\"%s\" 구독 없음, 건너뜀" + +#: commands/subscriptioncmds.c:972 +#, c-format +msgid "" +"could not connect to publisher when attempting to drop the replication slot " +"\"%s\"" +msgstr "" + +#: commands/subscriptioncmds.c:974 commands/subscriptioncmds.c:988 +#: replication/logical/tablesync.c:906 replication/logical/tablesync.c:928 +#, c-format +msgid "The error was: %s" +msgstr "해당 오류: %s" + +#: commands/subscriptioncmds.c:975 +#, c-format +msgid "" +"Use ALTER SUBSCRIPTION ... SET (slot_name = NONE) to disassociate the " +"subscription from the slot." +msgstr "" + +#: commands/subscriptioncmds.c:986 +#, c-format +msgid "could not drop the replication slot \"%s\" on publisher" +msgstr "발행용 \"%s\" 복제 슬롯을 삭제 할 수 없음" + +#: commands/subscriptioncmds.c:991 +#, c-format +msgid "dropped replication slot \"%s\" on publisher" +msgstr "발행에서 \"%s\" 복제 슬롯을 삭제했음" + +#: commands/subscriptioncmds.c:1032 +#, c-format +msgid "permission denied to change owner of subscription \"%s\"" +msgstr "\"%s\" 구독 소유주를 변경할 권한이 없음" + +#: commands/subscriptioncmds.c:1034 +#, c-format +msgid "The owner of a subscription must be a superuser." +msgstr "구독 소유주는 슈퍼유저여야 합니다." + +#: commands/subscriptioncmds.c:1147 +#, c-format +msgid "could not receive list of replicated tables from the publisher: %s" +msgstr "구독에서 복제 테이블 목록을 구할 수 없음: %s" + +#: commands/tablecmds.c:221 commands/tablecmds.c:263 #, c-format msgid "table \"%s\" does not exist" msgstr "\"%s\" 테이블 없음" -#: commands/tablecmds.c:217 +#: commands/tablecmds.c:222 commands/tablecmds.c:264 #, c-format msgid "table \"%s\" does not exist, skipping" msgstr "\"%s\" 테이블 없음, 무시함" -#: commands/tablecmds.c:219 +#: commands/tablecmds.c:224 commands/tablecmds.c:266 msgid "Use DROP TABLE to remove a table." msgstr "테이블을 삭제하려면, DROP TABLE 명령을 사용하세요." -#: commands/tablecmds.c:222 +#: commands/tablecmds.c:227 #, c-format msgid "sequence \"%s\" does not exist" msgstr "\"%s\" 시퀀스 없음" -#: commands/tablecmds.c:223 +#: commands/tablecmds.c:228 #, c-format msgid "sequence \"%s\" does not exist, skipping" msgstr "\"%s\" 시퀀스 없음, 무시함" -#: commands/tablecmds.c:225 +#: commands/tablecmds.c:230 msgid "Use DROP SEQUENCE to remove a sequence." msgstr "시퀀스를 삭제하려면 DROP SEQUENCE 명령을 사용하세요." -#: commands/tablecmds.c:228 +#: commands/tablecmds.c:233 #, c-format msgid "view \"%s\" does not exist" msgstr "\"%s\" 뷰(view) 없음" -#: commands/tablecmds.c:229 +#: commands/tablecmds.c:234 #, c-format msgid "view \"%s\" does not exist, skipping" msgstr "\"%s\" 뷰(view) 없음, 무시함" -#: commands/tablecmds.c:231 +#: commands/tablecmds.c:236 msgid "Use DROP VIEW to remove a view." msgstr "뷰를 삭제하려면, DROP VIEW 명령을 사용하세요." -#: commands/tablecmds.c:234 +#: commands/tablecmds.c:239 #, c-format msgid "materialized view \"%s\" does not exist" msgstr "\"%s\" 이름의 구체화된 뷰가 없음" -#: commands/tablecmds.c:235 +#: commands/tablecmds.c:240 #, c-format msgid "materialized view \"%s\" does not exist, skipping" msgstr "\"%s\" 구체화된 뷰 없음, 건너뜀" -#: commands/tablecmds.c:237 +#: commands/tablecmds.c:242 msgid "Use DROP MATERIALIZED VIEW to remove a materialized view." msgstr "구체화된 뷰를 삭제하려면, DROP MATERIALIZED VIEW 명령을 사용하세요." -#: commands/tablecmds.c:240 parser/parse_utilcmd.c:1630 +#: commands/tablecmds.c:245 parser/parse_utilcmd.c:1829 #, c-format msgid "index \"%s\" does not exist" msgstr "\"%s\" 인덱스 없음" -#: commands/tablecmds.c:241 +#: commands/tablecmds.c:246 #, c-format msgid "index \"%s\" does not exist, skipping" msgstr "\"%s\" 인덱스 없음, 무시함" -#: commands/tablecmds.c:243 +#: commands/tablecmds.c:248 msgid "Use DROP INDEX to remove an index." msgstr "인덱스를 삭제하려면, DROP INDEX 명령을 사용하세요." -#: commands/tablecmds.c:248 +#: commands/tablecmds.c:253 #, c-format msgid "\"%s\" is not a type" msgstr "\"%s\" 객체는 자료형이 아님" -#: commands/tablecmds.c:249 +#: commands/tablecmds.c:254 msgid "Use DROP TYPE to remove a type." msgstr "자료형을 삭제하려면 DROP TYPE 명령을 사용하세요." -#: commands/tablecmds.c:252 commands/tablecmds.c:8583 -#: commands/tablecmds.c:11335 +#: commands/tablecmds.c:257 commands/tablecmds.c:9404 +#: commands/tablecmds.c:12262 #, c-format msgid "foreign table \"%s\" does not exist" msgstr "\"%s\" 외부 테이블 없음" -#: commands/tablecmds.c:253 +#: commands/tablecmds.c:258 #, c-format msgid "foreign table \"%s\" does not exist, skipping" msgstr "\"%s\" 외부 테이블 없음, 건너뜀" -#: commands/tablecmds.c:255 +#: commands/tablecmds.c:260 msgid "Use DROP FOREIGN TABLE to remove a foreign table." msgstr "외부 테이블을 삭제하려면, DROP FOREIGN TABLE 명령을 사용하세요." -#: commands/tablecmds.c:494 +#: commands/tablecmds.c:533 #, c-format msgid "ON COMMIT can only be used on temporary tables" msgstr "ON COMMIT 옵션은 임시 테이블에서만 사용될 수 있습니다" -#: commands/tablecmds.c:514 +#: commands/tablecmds.c:561 #, c-format msgid "cannot create temporary table within security-restricted operation" msgstr "보안 제한 작업 내에서 임시 테이블을 만들 수 없음" -#: commands/tablecmds.c:822 +#: commands/tablecmds.c:662 +#, c-format +msgid "cannot create table with OIDs as partition of table without OIDs" +msgstr "OID 없는 테이블의 파티션 테이블로 OID 있는 테이블을 만들 수 없음" + +#: commands/tablecmds.c:783 parser/parse_utilcmd.c:3280 +#, c-format +msgid "\"%s\" is not partitioned" +msgstr "\"%s\" 파티션 된 테이블 아님" + +#: commands/tablecmds.c:831 +#, c-format +msgid "cannot partition using more than %d columns" +msgstr "%d개보다 많은 칼럼을 이용해서 파티션할 수 없음" + +#: commands/tablecmds.c:972 #, c-format msgid "DROP INDEX CONCURRENTLY does not support dropping multiple objects" msgstr "DROP INDEX CONCURRENTLY 명령은 하나의 인덱스만 지울 수 있습니다" -#: commands/tablecmds.c:826 +#: commands/tablecmds.c:976 #, c-format msgid "DROP INDEX CONCURRENTLY does not support CASCADE" msgstr "DROP INDEX CONCURRENTLY 명령에서는 CASCADE 옵션을 사용할 수 없음" -#: commands/tablecmds.c:1085 +#: commands/tablecmds.c:1253 +#, c-format +msgid "cannot truncate only a partitioned table" +msgstr "파티션 된 테이블만 truncate 할 수 없음" + +#: commands/tablecmds.c:1254 +#, c-format +msgid "" +"Do not specify the ONLY keyword, or use truncate only on the partitions " +"directly." +msgstr "" + +#: commands/tablecmds.c:1282 #, c-format msgid "truncate cascades to table \"%s\"" msgstr "\"%s\" 객체의 자료도 함께 삭제됨" -#: commands/tablecmds.c:1323 +#: commands/tablecmds.c:1530 #, c-format msgid "cannot truncate temporary tables of other sessions" msgstr "다른 세션의 임시 테이블 자료는 비울(truncate) 수 없습니다" -#: commands/tablecmds.c:1529 parser/parse_utilcmd.c:1844 +#: commands/tablecmds.c:1761 commands/tablecmds.c:10989 +#, c-format +msgid "cannot inherit from partitioned table \"%s\"" +msgstr "\"%s\" 파티션 된 테이블로부터 상속할 수 없습니다" + +#: commands/tablecmds.c:1766 +#, c-format +msgid "cannot inherit from partition \"%s\"" +msgstr "\"%s\" 파티션 테이블입니다, 그래서 상속 대상이 될 수 없습니다" + +#: commands/tablecmds.c:1774 parser/parse_utilcmd.c:2040 #, c-format msgid "inherited relation \"%s\" is not a table or foreign table" msgstr "상속할 \"%s\" 릴레이션(relation)은 테이블도, 외부 테이블도 아닙니다" -#: commands/tablecmds.c:1536 commands/tablecmds.c:10150 +#: commands/tablecmds.c:1782 commands/tablecmds.c:10968 #, c-format msgid "cannot inherit from temporary relation \"%s\"" msgstr "\"%s\" 임시 테이블입니다, 그래서 상속 대상이 될 수 없습니다" -#: commands/tablecmds.c:1544 commands/tablecmds.c:10158 +#: commands/tablecmds.c:1792 commands/tablecmds.c:10976 #, c-format msgid "cannot inherit from temporary relation of another session" msgstr "다른 세션의 임시 테이블입니다, 그래서 상속 대상이 될 수 없습니다" -#: commands/tablecmds.c:1560 commands/tablecmds.c:10192 +#: commands/tablecmds.c:1809 commands/tablecmds.c:11100 #, c-format msgid "relation \"%s\" would be inherited from more than once" msgstr "\"%s\" 테이블이 여러 번 상속됨" -#: commands/tablecmds.c:1608 +#: commands/tablecmds.c:1857 #, c-format msgid "merging multiple inherited definitions of column \"%s\"" msgstr "\"%s\" 칼럼이 중복되어 상속됩니다." -#: commands/tablecmds.c:1616 +#: commands/tablecmds.c:1865 #, c-format msgid "inherited column \"%s\" has a type conflict" -msgstr "상위 테이블에서 지정한 \"%s\" 열의 자료형들이 일치하지 않습니다" +msgstr "상위 테이블에서 지정한 \"%s\" 칼럼의 자료형들이 일치하지 않습니다" -#: commands/tablecmds.c:1618 commands/tablecmds.c:1641 -#: commands/tablecmds.c:1839 commands/tablecmds.c:1863 -#: parser/parse_coerce.c:1630 parser/parse_coerce.c:1650 -#: parser/parse_coerce.c:1670 parser/parse_coerce.c:1715 -#: parser/parse_coerce.c:1752 parser/parse_param.c:218 +#: commands/tablecmds.c:1867 commands/tablecmds.c:1890 +#: commands/tablecmds.c:2096 commands/tablecmds.c:2126 +#: parser/parse_coerce.c:1650 parser/parse_coerce.c:1670 +#: parser/parse_coerce.c:1690 parser/parse_coerce.c:1736 +#: parser/parse_coerce.c:1775 parser/parse_param.c:218 #, c-format msgid "%s versus %s" msgstr "%s 형과 %s 형" -#: commands/tablecmds.c:1627 +#: commands/tablecmds.c:1876 #, c-format msgid "inherited column \"%s\" has a collation conflict" msgstr "상속 받은 \"%s\" 칼럼의 정렬규칙에서 충돌합니다." -#: commands/tablecmds.c:1629 commands/tablecmds.c:1851 -#: commands/tablecmds.c:4767 +#: commands/tablecmds.c:1878 commands/tablecmds.c:2108 +#: commands/tablecmds.c:5162 #, c-format msgid "\"%s\" versus \"%s\"" msgstr "\"%s\" 형과 \"%s\" 형" -#: commands/tablecmds.c:1639 +#: commands/tablecmds.c:1888 #, c-format msgid "inherited column \"%s\" has a storage parameter conflict" msgstr "상속 받은 \"%s\" 칼럼의 스토리지 설정값에서 충돌합니다" -#: commands/tablecmds.c:1752 commands/tablecmds.c:8088 -#: parser/parse_utilcmd.c:923 parser/parse_utilcmd.c:1274 -#: parser/parse_utilcmd.c:1350 +#: commands/tablecmds.c:2002 commands/tablecmds.c:8894 +#: parser/parse_utilcmd.c:1123 parser/parse_utilcmd.c:1474 +#: parser/parse_utilcmd.c:1550 #, c-format msgid "cannot convert whole-row table reference" msgstr "전체 로우 테이블 참조형으로 변환할 수 없음" -#: commands/tablecmds.c:1753 parser/parse_utilcmd.c:924 +#: commands/tablecmds.c:2003 parser/parse_utilcmd.c:1124 #, c-format msgid "Constraint \"%s\" contains a whole-row reference to table \"%s\"." msgstr "\"%s\" 제약조건에 \"%s\" 테이블 전체 로우 참조가 있습니다" -#: commands/tablecmds.c:1825 +#: commands/tablecmds.c:2082 #, c-format msgid "merging column \"%s\" with inherited definition" msgstr "\"%s\" 칼럼을 상속된 정의와 병합하는 중" -#: commands/tablecmds.c:1829 +#: commands/tablecmds.c:2086 #, c-format msgid "moving and merging column \"%s\" with inherited definition" msgstr "\"%s\" 칼럼을 상속된 정의와 이동, 병합하는 중" -#: commands/tablecmds.c:1830 +#: commands/tablecmds.c:2087 #, c-format msgid "User-specified column moved to the position of the inherited column." msgstr "사용자 지정 칼럼이 상속된 칼럼의 위치로 이동되었습니다" -#: commands/tablecmds.c:1837 +#: commands/tablecmds.c:2094 #, c-format msgid "column \"%s\" has a type conflict" msgstr "\"%s\" 칼럼의 자료형이 충돌합니다" -#: commands/tablecmds.c:1849 +#: commands/tablecmds.c:2106 #, c-format msgid "column \"%s\" has a collation conflict" msgstr "\"%s\" 칼럼의 정렬규칙이 충돌합니다" -#: commands/tablecmds.c:1861 +#: commands/tablecmds.c:2124 #, c-format msgid "column \"%s\" has a storage parameter conflict" msgstr "\"%s\" 칼럼의 스토리지 설정값이 충돌합니다" -#: commands/tablecmds.c:1913 +#: commands/tablecmds.c:2235 #, c-format msgid "column \"%s\" inherits conflicting default values" msgstr "" "상속 받는 \"%s\" 열 자료형과 이 열에 지정한 default 값의 자료형이 서로 다릅니" "다" -#: commands/tablecmds.c:1915 +#: commands/tablecmds.c:2237 #, c-format msgid "To resolve the conflict, specify a default explicitly." msgstr "이 충돌을 피하려면, default 값을 바르게 지정하십시오." -#: commands/tablecmds.c:1962 +#: commands/tablecmds.c:2284 #, c-format msgid "" "check constraint name \"%s\" appears multiple times but with different " @@ -8492,12 +8912,12 @@ msgid "" msgstr "" "\"%s\" 체크 제약 조건 이름이 여러 번 나타나지만, 각각 다른 식으로 되어있음" -#: commands/tablecmds.c:2156 +#: commands/tablecmds.c:2477 #, c-format msgid "cannot rename column of typed table" msgstr "칼럼 이름을 바꿀 수 없음" -#: commands/tablecmds.c:2173 +#: commands/tablecmds.c:2495 #, c-format msgid "" "\"%s\" is not a table, view, materialized view, composite type, index, or " @@ -8506,261 +8926,356 @@ msgstr "" "\"%s\" 객체는 테이블도, 뷰도, 구체화된 뷰도, 복합 자료형도, 인덱스도, 외부 테" "이블도 아닙니다." -#: commands/tablecmds.c:2267 +#: commands/tablecmds.c:2589 #, c-format msgid "inherited column \"%s\" must be renamed in child tables too" -msgstr "하위 테이블에서도 상속된 \"%s\" 열의 이름을 바꾸어야 함" +msgstr "하위 테이블에서도 상속된 \"%s\" 칼럼의 이름을 바꾸어야 함" -#: commands/tablecmds.c:2299 +#: commands/tablecmds.c:2621 #, c-format msgid "cannot rename system column \"%s\"" -msgstr "\"%s\" 이름의 열은 시스템 열입니다, 이름을 바꿀 수 없습니다" +msgstr "\"%s\" 이름의 칼럼은 시스템 칼럼입니다, 이름을 바꿀 수 없습니다" -#: commands/tablecmds.c:2314 +#: commands/tablecmds.c:2636 #, c-format msgid "cannot rename inherited column \"%s\"" -msgstr "\"%s\" 이름의 열은 상속 받은 열입니다, 이름을 바꿀 수 없습니다" +msgstr "\"%s\" 이름의 칼럼은 상속 받은 칼럼입니다, 이름을 바꿀 수 없습니다" -#: commands/tablecmds.c:2469 +#: commands/tablecmds.c:2788 #, c-format msgid "inherited constraint \"%s\" must be renamed in child tables too" msgstr "" "하위 테이블에서도 상속된 \"%s\" 제약조건은 하위 테이블에서도 이름이 바뀌어야 " "함" -#: commands/tablecmds.c:2476 +#: commands/tablecmds.c:2795 #, c-format msgid "cannot rename inherited constraint \"%s\"" msgstr "\"%s\" 상속된 제약조건은 이름을 바꿀 수 없습니다" #. translator: first %s is a SQL command, eg ALTER TABLE -#: commands/tablecmds.c:2702 +#: commands/tablecmds.c:3019 #, c-format msgid "" "cannot %s \"%s\" because it is being used by active queries in this session" msgstr "이 세션의 활성 쿼리에서 사용 중이므로 %s \"%s\" 작업을 할 수 없음" #. translator: first %s is a SQL command, eg ALTER TABLE -#: commands/tablecmds.c:2711 +#: commands/tablecmds.c:3028 #, c-format msgid "cannot %s \"%s\" because it has pending trigger events" msgstr "보류 중인 트리거 이벤트가 있으므로 %s \"%s\" 작업을 할 수 없음" -#: commands/tablecmds.c:3785 +#: commands/tablecmds.c:4147 #, c-format msgid "cannot rewrite system relation \"%s\"" msgstr "\"%s\" 시스템 릴레이션을 다시 쓰기(rewrite) 할 수 없음" -#: commands/tablecmds.c:3791 +#: commands/tablecmds.c:4153 #, c-format msgid "cannot rewrite table \"%s\" used as a catalog table" msgstr "카탈로그 테이블로 사용되어 \"%s\" 테이블을 rewrite 못함" -#: commands/tablecmds.c:3801 +#: commands/tablecmds.c:4163 #, c-format msgid "cannot rewrite temporary tables of other sessions" msgstr "다른 세션의 임시 테이블을 다시 쓰기(rewrite) 할 수 없음" -#: commands/tablecmds.c:4069 +#: commands/tablecmds.c:4439 #, c-format msgid "rewriting table \"%s\"" msgstr "\"%s\" 파일 다시 쓰는 중" -#: commands/tablecmds.c:4073 +#: commands/tablecmds.c:4443 #, c-format msgid "verifying table \"%s\"" msgstr "\"%s\" 파일 검사 중" -#: commands/tablecmds.c:4187 +#: commands/tablecmds.c:4556 #, c-format msgid "column \"%s\" contains null values" msgstr "\"%s\" 열에는 null 값 자료가 있습니다" -#: commands/tablecmds.c:4202 commands/tablecmds.c:7385 +#: commands/tablecmds.c:4571 commands/tablecmds.c:8163 #, c-format msgid "check constraint \"%s\" is violated by some row" msgstr "\"%s\" 체크 제약 조건을 위반하는 몇몇 자료가 이미 있습니다" -#: commands/tablecmds.c:4350 commands/trigger.c:235 -#: rewrite/rewriteDefine.c:267 rewrite/rewriteDefine.c:912 +#: commands/tablecmds.c:4587 +#, c-format +msgid "partition constraint is violated by some row" +msgstr "파티션 제약 조건을 위반하는 몇몇 자료가 이미 있습니다" + +#: commands/tablecmds.c:4725 commands/trigger.c:253 +#: rewrite/rewriteDefine.c:266 rewrite/rewriteDefine.c:920 #, c-format msgid "\"%s\" is not a table or view" msgstr "\"%s\" 객체는 테이블도 뷰도 아닙니다" -#: commands/tablecmds.c:4353 commands/trigger.c:1119 commands/trigger.c:1224 +#: commands/tablecmds.c:4728 commands/trigger.c:1314 commands/trigger.c:1420 #, c-format msgid "\"%s\" is not a table, view, or foreign table" msgstr "\"%s\" 객체는 테이블, 뷰, 외부 테이블 그 어느 것도 아닙니다" -#: commands/tablecmds.c:4356 +#: commands/tablecmds.c:4731 #, c-format msgid "\"%s\" is not a table, view, materialized view, or index" msgstr "\"%s\" 객체는 테이블, 뷰, 구체화된 뷰, 인덱스 그 어느 것도 아닙니다" -#: commands/tablecmds.c:4362 +#: commands/tablecmds.c:4737 #, c-format msgid "\"%s\" is not a table, materialized view, or index" msgstr "\"%s\" 객체는 테이블, 구체화된 뷰, 인덱스 그 어느 것도 아닙니다" -#: commands/tablecmds.c:4365 +#: commands/tablecmds.c:4740 #, c-format msgid "\"%s\" is not a table, materialized view, or foreign table" msgstr "\"%s\" 객체는 테이블, 구체화된 뷰, 외부 테이블 그 어느 것도 아닙니다." -#: commands/tablecmds.c:4368 +#: commands/tablecmds.c:4743 #, c-format msgid "\"%s\" is not a table or foreign table" msgstr "\"%s\" 객체는 테이블도 외부 테이블도 아닙니다" -#: commands/tablecmds.c:4371 +#: commands/tablecmds.c:4746 #, c-format msgid "\"%s\" is not a table, composite type, or foreign table" msgstr "\"%s\" 객체는 테이블, 복합 자료형, 외부 테이블 그 어느 것도 아닙니다." -#: commands/tablecmds.c:4374 commands/tablecmds.c:5426 +#: commands/tablecmds.c:4749 commands/tablecmds.c:6125 #, c-format msgid "\"%s\" is not a table, materialized view, index, or foreign table" msgstr "" "\"%s\" 객체는 테이블, 구체화된 뷰, 인덱스, 외부 테이블 그 어느 것도 아닙니다." -#: commands/tablecmds.c:4384 +#: commands/tablecmds.c:4759 #, c-format msgid "\"%s\" is of the wrong type" msgstr "\"%s\" 객체는 잘못된 객체형입니다." -#: commands/tablecmds.c:4536 commands/tablecmds.c:4543 +#: commands/tablecmds.c:4934 commands/tablecmds.c:4941 #, c-format msgid "cannot alter type \"%s\" because column \"%s.%s\" uses it" msgstr "\"%s\" 자료형 변경할 수 없음(\"%s.%s\" 칼럼에서 해당 형식을 사용함)" -#: commands/tablecmds.c:4550 +#: commands/tablecmds.c:4948 #, c-format msgid "" "cannot alter foreign table \"%s\" because column \"%s.%s\" uses its row type" msgstr "" "\"%s\" 외부 테이블을 변경할 수 없음(\"%s.%s\" 칼럼에서 해당 로우 형을 사용함)" -#: commands/tablecmds.c:4557 +#: commands/tablecmds.c:4955 #, c-format msgid "cannot alter table \"%s\" because column \"%s.%s\" uses its row type" msgstr "" "\"%s\" 테이블을 변경할 수 없음(\"%s.%s\" 칼럼에서 해당 로우 형식을 사용함)" -#: commands/tablecmds.c:4619 +#: commands/tablecmds.c:5009 #, c-format msgid "cannot alter type \"%s\" because it is the type of a typed table" msgstr "" "\"%s\" 자료형을 변경할 수 없음, 이 자료형은 typed 테이블의 자료형이기 때문" -#: commands/tablecmds.c:4621 +#: commands/tablecmds.c:5011 #, c-format msgid "Use ALTER ... CASCADE to alter the typed tables too." msgstr "" "이 객체와 관계된 모든 객체들을 함께 변경하려면 ALTER ... CASCADE 명령을 사용" "하십시오" -#: commands/tablecmds.c:4665 +#: commands/tablecmds.c:5055 #, c-format msgid "type %s is not a composite type" msgstr "%s 자료형은 복합 자료형이 아닙니다" -#: commands/tablecmds.c:4691 +#: commands/tablecmds.c:5081 #, c-format msgid "cannot add column to typed table" msgstr "typed 테이블에는 칼럼을 추가 할 수 없음" -#: commands/tablecmds.c:4759 commands/tablecmds.c:10351 +#: commands/tablecmds.c:5125 +#, c-format +msgid "cannot add column to a partition" +msgstr "파티션 테이블에는 칼럼을 추가 할 수 없습니다" + +#: commands/tablecmds.c:5154 commands/tablecmds.c:11226 #, c-format msgid "child table \"%s\" has different type for column \"%s\"" msgstr "" "\"%s\" 상속된 테이블의 \"%s\" 열 자료형이 상위 테이블의 자료형과 틀립니다" -#: commands/tablecmds.c:4765 commands/tablecmds.c:10358 +#: commands/tablecmds.c:5160 commands/tablecmds.c:11233 #, c-format msgid "child table \"%s\" has different collation for column \"%s\"" msgstr "" "\"%s\" 상속된 테이블의 \"%s\" 칼럼 정렬규칙이 상위 테이블의 정렬규칙과 틀립니" "다" -#: commands/tablecmds.c:4775 +#: commands/tablecmds.c:5170 #, c-format msgid "child table \"%s\" has a conflicting \"%s\" column" msgstr "\"%s\" 하위 테이블에 충돌하는 \"%s\" 칼럼이 있음" -#: commands/tablecmds.c:4787 +#: commands/tablecmds.c:5181 #, c-format msgid "merging definition of column \"%s\" for child \"%s\"" msgstr "\"%s\" 열(\"%s\" 하위)의 정의를 병합하는 중" -#: commands/tablecmds.c:5014 +#: commands/tablecmds.c:5205 +#, c-format +msgid "cannot recursively add identity column to table that has child tables" +msgstr "" + +#: commands/tablecmds.c:5417 #, c-format msgid "column must be added to child tables too" -msgstr "하위 테이블에도 열을 추가해야 함" +msgstr "하위 테이블에도 칼럼을 추가해야 함" -#: commands/tablecmds.c:5089 +#: commands/tablecmds.c:5492 #, c-format msgid "column \"%s\" of relation \"%s\" already exists, skipping" msgstr "\"%s\" 이름의 칼럼이 \"%s\" 릴레이션에 이미 있습니다, 건너뜀" -#: commands/tablecmds.c:5096 +#: commands/tablecmds.c:5499 #, c-format msgid "column \"%s\" of relation \"%s\" already exists" -msgstr "\"%s\" 이름의 열이 \"%s\" 릴레이션에 이미 있습니다" +msgstr "\"%s\" 이름의 칼럼은 \"%s\" 릴레이션에 이미 있습니다" + +#: commands/tablecmds.c:5597 commands/tablecmds.c:8576 +#, c-format +msgid "" +"cannot remove constraint from only the partitioned table when partitions " +"exist" +msgstr "" -#: commands/tablecmds.c:5207 commands/tablecmds.c:5313 -#: commands/tablecmds.c:5371 commands/tablecmds.c:5485 -#: commands/tablecmds.c:5542 commands/tablecmds.c:5636 -#: commands/tablecmds.c:7924 commands/tablecmds.c:8606 +#: commands/tablecmds.c:5598 commands/tablecmds.c:5745 +#: commands/tablecmds.c:6542 commands/tablecmds.c:8577 +#, c-format +msgid "Do not specify the ONLY keyword." +msgstr "" + +#: commands/tablecmds.c:5630 commands/tablecmds.c:5777 +#: commands/tablecmds.c:5832 commands/tablecmds.c:5907 +#: commands/tablecmds.c:6001 commands/tablecmds.c:6060 +#: commands/tablecmds.c:6184 commands/tablecmds.c:6238 +#: commands/tablecmds.c:6330 commands/tablecmds.c:8716 +#: commands/tablecmds.c:9427 #, c-format msgid "cannot alter system column \"%s\"" -msgstr "\"%s\" 열은 시스템 열입니다. 그래서 변경될 수 없습니다" +msgstr "\"%s\" 칼럼은 시스템 칼럼입니다. 그래서 변경될 수 없습니다" + +#: commands/tablecmds.c:5636 commands/tablecmds.c:5838 +#, c-format +msgid "column \"%s\" of relation \"%s\" is an identity column" +msgstr "\"%s\" 칼럼(해당 테이블: \"%s\")은 식별 칼럼입니다." -#: commands/tablecmds.c:5243 +#: commands/tablecmds.c:5672 #, c-format msgid "column \"%s\" is in a primary key" -msgstr "\"%s\" 열은 기본키 열입니다" +msgstr "\"%s\" 칼럼은 기본키 칼럼입니다" + +#: commands/tablecmds.c:5694 +#, c-format +msgid "column \"%s\" is marked NOT NULL in parent table" +msgstr "파티션 테이블에서 \"%s\" 칼럼은 NOT NULL 속성으로 되어 있습니다" + +#: commands/tablecmds.c:5744 +#, c-format +msgid "" +"cannot add constraint to only the partitioned table when partitions exist" +msgstr "" + +#: commands/tablecmds.c:5840 +#, c-format +msgid "Use ALTER TABLE ... ALTER COLUMN ... DROP IDENTITY instead." +msgstr "" + +#: commands/tablecmds.c:5918 +#, c-format +msgid "" +"column \"%s\" of relation \"%s\" must be declared NOT NULL before identity " +"can be added" +msgstr "" + +#: commands/tablecmds.c:5924 +#, c-format +msgid "column \"%s\" of relation \"%s\" is already an identity column" +msgstr "\"%s\" 이름의 칼럼(해당 릴레이션: \"%s\")은 이미 식별 칼럼입니다" + +#: commands/tablecmds.c:5930 +#, c-format +msgid "column \"%s\" of relation \"%s\" already has a default value" +msgstr "\"%s\" 이름의 칼럼(해당 릴레이션: \"%s\")은 이미 default 입니다" + +#: commands/tablecmds.c:6007 commands/tablecmds.c:6068 +#, c-format +msgid "column \"%s\" of relation \"%s\" is not an identity column" +msgstr "\"%s\" 이름의 칼럼(해당 릴레이션: \"%s\")은 식별 칼럼이 아닙니다" + +#: commands/tablecmds.c:6073 +#, c-format +msgid "column \"%s\" of relation \"%s\" is not an identity column, skipping" +msgstr "\"%s\" 이름의 칼럼(해당 릴레이션: \"%s\")은 식별 칼럼이 아님, 건너뜀" -#: commands/tablecmds.c:5458 +#: commands/tablecmds.c:6157 #, c-format msgid "statistics target %d is too low" msgstr "대상 통계값(%d)이 너무 낮습니다" -#: commands/tablecmds.c:5466 +#: commands/tablecmds.c:6165 #, c-format msgid "lowering statistics target to %d" msgstr "%d 값으로 대상 통계값을 낮춥니다" -#: commands/tablecmds.c:5616 +#: commands/tablecmds.c:6310 #, c-format msgid "invalid storage type \"%s\"" msgstr "잘못된 STORAGE 값: \"%s\"" -#: commands/tablecmds.c:5648 +#: commands/tablecmds.c:6342 #, c-format msgid "column data type %s can only have storage PLAIN" msgstr "%s 자료형의 column의 STORAGE 값은 반드시 PLAIN 이어야합니다" -#: commands/tablecmds.c:5686 +#: commands/tablecmds.c:6377 #, c-format msgid "cannot drop column from typed table" msgstr "typed 테이블에서 칼럼을 삭제할 수 없음" -#: commands/tablecmds.c:5730 +#: commands/tablecmds.c:6484 #, c-format msgid "column \"%s\" of relation \"%s\" does not exist, skipping" msgstr "\"%s\" 칼럼은 \"%s\" 릴레이션에 없음, 건너뜀" -#: commands/tablecmds.c:5743 +#: commands/tablecmds.c:6497 #, c-format msgid "cannot drop system column \"%s\"" msgstr "\"%s\" 칼럼은 시스템 칼럼입니다, 삭제될 수 없습니다" -#: commands/tablecmds.c:5750 +#: commands/tablecmds.c:6504 #, c-format msgid "cannot drop inherited column \"%s\"" msgstr "\"%s\" 칼럼은 상속받은 칼럼입니다, 삭제될 수 없습니다" -#: commands/tablecmds.c:5990 +#: commands/tablecmds.c:6513 +#, c-format +msgid "cannot drop column named in partition key" +msgstr "파티션 키로 쓰이는 칼럼은 삭제할 수 없음" + +#: commands/tablecmds.c:6517 +#, c-format +msgid "cannot drop column referenced in partition key expression" +msgstr "파티션 표현식에서 참조하는 칼럼은 삭제할 수 없음" + +#: commands/tablecmds.c:6541 +#, c-format +msgid "" +"cannot drop column from only the partitioned table when partitions exist" +msgstr "" +"파티션 테이블이 있는 파티션된 테이블에서 그 테이블만 칼럼을 삭제 할 수 없음" + +#: commands/tablecmds.c:6759 #, c-format msgid "" "ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index \"%s\" to \"%s\"" @@ -8768,22 +9283,27 @@ msgstr "" "ALTER TABLE / ADD CONSTRAINT USING INDEX 작업은 \"%s\" 인덱스를 \"%s\" 이름으" "로 바꿀 것입니다." -#: commands/tablecmds.c:6203 +#: commands/tablecmds.c:6971 #, c-format msgid "constraint must be added to child tables too" msgstr "하위 테이블에도 제약 조건을 추가해야 함" -#: commands/tablecmds.c:6274 +#: commands/tablecmds.c:7042 +#, c-format +msgid "cannot reference partitioned table \"%s\"" +msgstr "\"%s\" 파티션된 테이블을 참조 할 수 없음" + +#: commands/tablecmds.c:7048 #, c-format msgid "referenced relation \"%s\" is not a table" -msgstr "참조되는 \"%s\" 릴레이션은 테이블이 아닙니다" +msgstr "참조된 \"%s\" 릴레이션은 테이블이 아닙니다" -#: commands/tablecmds.c:6297 +#: commands/tablecmds.c:7071 #, c-format msgid "constraints on permanent tables may reference only permanent tables" msgstr "영구 저장용 테이블의 제약 조건은 영구 저장용 테이블을 참조 합니다." -#: commands/tablecmds.c:6304 +#: commands/tablecmds.c:7078 #, c-format msgid "" "constraints on unlogged tables may reference only permanent or unlogged " @@ -8792,120 +9312,130 @@ msgstr "" "unlogged 테이블의 제약 조건은 영구 저장용 테이블 또는 unlogged 테이블을 참조" "합니다." -#: commands/tablecmds.c:6310 +#: commands/tablecmds.c:7084 #, c-format msgid "constraints on temporary tables may reference only temporary tables" msgstr "임시 테이블의 제약 조건은 임시 테이블에 대해서만 참조할 것입니다." -#: commands/tablecmds.c:6314 +#: commands/tablecmds.c:7088 #, c-format msgid "" "constraints on temporary tables must involve temporary tables of this session" msgstr "" "임시 테이블의 제약 조건은 이 세션용 임시 테이블에 대해서만 적용 됩니다." -#: commands/tablecmds.c:6375 +#: commands/tablecmds.c:7148 #, c-format msgid "number of referencing and referenced columns for foreign key disagree" msgstr "참조키(foreign key) disagree를 위한 참조하는, 또는 참조되는 열 수" -#: commands/tablecmds.c:6482 +#: commands/tablecmds.c:7255 #, c-format msgid "foreign key constraint \"%s\" cannot be implemented" msgstr "\"%s\" 참조키(foreign key) 제약 조건은 구현되어질 수 없습니다" -#: commands/tablecmds.c:6485 +#: commands/tablecmds.c:7258 #, c-format msgid "Key columns \"%s\" and \"%s\" are of incompatible types: %s and %s." msgstr "" "\"%s\" 열과 \"%s\" 열 인덱스는 함께 사용할 수 없는 자료형입니다: %s and %s." -#: commands/tablecmds.c:6692 commands/tablecmds.c:6860 -#: commands/tablecmds.c:7763 commands/tablecmds.c:7819 +#: commands/tablecmds.c:7463 commands/tablecmds.c:7629 +#: commands/tablecmds.c:8544 commands/tablecmds.c:8612 #, c-format msgid "constraint \"%s\" of relation \"%s\" does not exist" msgstr "\"%s\" 제약 조건이 \"%s\" 릴레이션에 없습니다." -#: commands/tablecmds.c:6698 +#: commands/tablecmds.c:7469 #, c-format msgid "constraint \"%s\" of relation \"%s\" is not a foreign key constraint" msgstr "\"%s\" 제약 조건(해당 테이블: \"%s\")은 참조키 제약조건이 아닙니다." -#: commands/tablecmds.c:6867 +#: commands/tablecmds.c:7636 #, c-format msgid "" "constraint \"%s\" of relation \"%s\" is not a foreign key or check constraint" msgstr "" "\"%s\" 제약 조건(해당 테이블: \"%s\")은 참조키도 체크 제약 조건도 아닙니다." -#: commands/tablecmds.c:6935 +#: commands/tablecmds.c:7706 #, c-format msgid "constraint must be validated on child tables too" msgstr "하위 테이블에도 제약 조건이 유효해야 함" -#: commands/tablecmds.c:7004 +#: commands/tablecmds.c:7774 #, c-format msgid "column \"%s\" referenced in foreign key constraint does not exist" -msgstr "참조키(foreign key) 제약 조건에서 참조하는 \"%s\" 열이 없음" +msgstr "참조키(foreign key) 제약 조건에서 참조하는 \"%s\" 칼럼이 없음" -#: commands/tablecmds.c:7009 +#: commands/tablecmds.c:7779 #, c-format msgid "cannot have more than %d keys in a foreign key" msgstr "참조키(foreign key)에서 %d 키 개수보다 많이 가질 수 없음" -#: commands/tablecmds.c:7074 +#: commands/tablecmds.c:7844 #, c-format msgid "cannot use a deferrable primary key for referenced table \"%s\"" msgstr "참조되는 \"%s\" 테이블의 지연 가능한 기본키를 사용할 수 없음" -#: commands/tablecmds.c:7091 +#: commands/tablecmds.c:7861 #, c-format msgid "there is no primary key for referenced table \"%s\"" msgstr "참조되는 \"%s\" 테이블에는 기본키(primary key)가 없습니다" -#: commands/tablecmds.c:7156 +#: commands/tablecmds.c:7926 #, c-format msgid "foreign key referenced-columns list must not contain duplicates" msgstr "참조키의 참조 칼럼 목록에 칼럼이 중복되면 안됩니다" -#: commands/tablecmds.c:7250 +#: commands/tablecmds.c:8020 #, c-format msgid "cannot use a deferrable unique constraint for referenced table \"%s\"" msgstr "참조되는 \"%s\" 테이블의 지연 가능한 유니크 제약 조건을 사용할 수 없음" -#: commands/tablecmds.c:7255 +#: commands/tablecmds.c:8025 #, c-format msgid "" "there is no unique constraint matching given keys for referenced table \"%s\"" msgstr "" "참조되는 \"%s\" 테이블을 위한 주워진 키와 일치하는 고유 제약 조건이 없습니다" -#: commands/tablecmds.c:7418 +#: commands/tablecmds.c:8196 #, c-format msgid "validating foreign key constraint \"%s\"" msgstr "\"%s\" 참조키 제약 조건 검사 중" -#: commands/tablecmds.c:7717 +#: commands/tablecmds.c:8498 #, c-format msgid "cannot drop inherited constraint \"%s\" of relation \"%s\"" msgstr "상속된 \"%s\" 제약 조건(해당 테이블: \"%s\")을 삭제할 수 없음" -#: commands/tablecmds.c:7769 +#: commands/tablecmds.c:8550 #, c-format msgid "constraint \"%s\" of relation \"%s\" does not exist, skipping" msgstr "\"%s\" 제약 조건(해당 테이블: \"%s\")이 없음, 건너뜀" -#: commands/tablecmds.c:7908 +#: commands/tablecmds.c:8700 #, c-format msgid "cannot alter column type of typed table" msgstr "typed 테이블의 칼럼 자료형은 변경할 수 없음" -#: commands/tablecmds.c:7931 +#: commands/tablecmds.c:8723 #, c-format msgid "cannot alter inherited column \"%s\"" -msgstr "\"%s\" 이름의 열은 상속 받은 열입니다, 이름을 바꿀 수 없습니다" +msgstr "\"%s\" 이름의 칼럼은 상속 받은 칼럼입니다, 이름을 바꿀 수 없습니다" + +#: commands/tablecmds.c:8732 +#, c-format +msgid "cannot alter type of column named in partition key" +msgstr "파티션 키로 사용되는 칼럼의 자료형은 바꿀 수 없음" + +#: commands/tablecmds.c:8736 +#, c-format +msgid "cannot alter type of column referenced in partition key expression" +msgstr "파티션 키 표현식에서 참조되는 칼럼의 자료형은 바꿀 수 없음" -#: commands/tablecmds.c:7980 +#: commands/tablecmds.c:8786 #, c-format msgid "" "result of USING clause for column \"%s\" cannot be cast automatically to " @@ -8913,189 +9443,213 @@ msgid "" msgstr "" "\"%s\" 칼럼에서 쓰인 USING 절의 결과가 %s 자료형으로 자동 형변환을 할 수 없음" -#: commands/tablecmds.c:7983 +#: commands/tablecmds.c:8789 #, c-format msgid "You might need to add an explicit cast." msgstr "명시적 형변환을 해야할 것 같습니다." -#: commands/tablecmds.c:7987 +#: commands/tablecmds.c:8793 #, c-format msgid "column \"%s\" cannot be cast automatically to type %s" msgstr "\"%s\" 칼럼의 자료형을 %s 형으로 형변환할 수 없음" #. translator: USING is SQL, don't translate it -#: commands/tablecmds.c:7990 +#: commands/tablecmds.c:8796 #, c-format msgid "You might need to specify \"USING %s::%s\"." msgstr "\"USING %s::%s\" 구문을 추가해야 할 것 같습니다." -#: commands/tablecmds.c:8089 +#: commands/tablecmds.c:8895 #, c-format msgid "USING expression contains a whole-row table reference." msgstr "USING 표현식에서 전체 로우 테이블 참조를 포함하고 있습니다." -#: commands/tablecmds.c:8100 +#: commands/tablecmds.c:8906 #, c-format msgid "type of inherited column \"%s\" must be changed in child tables too" -msgstr "하위 테이블에서도 상속된 \"%s\" 열의 형식을 바꾸어야 함" +msgstr "하위 테이블에서도 상속된 \"%s\" 칼럼의 형식을 바꾸어야 함" -#: commands/tablecmds.c:8187 +#: commands/tablecmds.c:8993 #, c-format msgid "cannot alter type of column \"%s\" twice" -msgstr "\"%s\" 열은 시스템 열입니다. 그래서 변경될 수 없습니다" +msgstr "\"%s\" 칼럼은 시스템 칼럼입니다. 그래서 변경될 수 없습니다" -#: commands/tablecmds.c:8223 +#: commands/tablecmds.c:9029 #, c-format msgid "default for column \"%s\" cannot be cast automatically to type %s" msgstr "\"%s\" 칼럼의 기본 값을 %s 형으로 형변환할 수 없음" -#: commands/tablecmds.c:8349 +#: commands/tablecmds.c:9155 #, c-format msgid "cannot alter type of a column used by a view or rule" -msgstr "뷰 또는 규칙에서 사용하는 열의 형식을 변경할 수 없음" +msgstr "뷰 또는 규칙에서 사용하는 칼럼의 형식을 변경할 수 없음" -#: commands/tablecmds.c:8350 commands/tablecmds.c:8369 -#: commands/tablecmds.c:8387 +#: commands/tablecmds.c:9156 commands/tablecmds.c:9175 +#: commands/tablecmds.c:9193 #, c-format msgid "%s depends on column \"%s\"" msgstr "%s 의존대상 열: \"%s\"" -#: commands/tablecmds.c:8368 +#: commands/tablecmds.c:9174 #, c-format msgid "cannot alter type of a column used in a trigger definition" msgstr "트리거 정의에서 사용하는 칼럼의 자료형을 변경할 수 없음" -#: commands/tablecmds.c:8386 +#: commands/tablecmds.c:9192 #, c-format msgid "cannot alter type of a column used in a policy definition" msgstr "정책 정의에서 사용하는 칼럼의 자료형을 변경할 수 없음" -#: commands/tablecmds.c:9051 +#: commands/tablecmds.c:9867 #, c-format msgid "cannot change owner of index \"%s\"" msgstr "\"%s\" 인덱스의 소유주를 바꿀 수 없음" -#: commands/tablecmds.c:9053 +#: commands/tablecmds.c:9869 #, c-format msgid "Change the ownership of the index's table, instead." msgstr "대신에 그 인덱스의 해당 테이블 소유자을 변경하세요." -#: commands/tablecmds.c:9069 +#: commands/tablecmds.c:9886 #, c-format msgid "cannot change owner of sequence \"%s\"" msgstr "\"%s\" 시퀀스의 소유주를 바꿀 수 없음" -#: commands/tablecmds.c:9071 commands/tablecmds.c:11543 -#, c-format -msgid "Sequence \"%s\" is linked to table \"%s\"." -msgstr "\"%s\" 시퀀스는 \"%s\" 테이블에 종속되어 있습니다." - -#: commands/tablecmds.c:9083 commands/tablecmds.c:12190 +#: commands/tablecmds.c:9900 commands/tablecmds.c:13129 #, c-format msgid "Use ALTER TYPE instead." msgstr "대신 ALTER TYPE을 사용하십시오." -#: commands/tablecmds.c:9092 +#: commands/tablecmds.c:9909 #, c-format msgid "\"%s\" is not a table, view, sequence, or foreign table" msgstr "\"%s\" 객체는 테이블, 뷰, 시퀀스, 외부 테이블 그 어느 것도 아닙니다" -#: commands/tablecmds.c:9435 +#: commands/tablecmds.c:10250 #, c-format msgid "cannot have multiple SET TABLESPACE subcommands" msgstr "SET TABLESPACE 구문이 중복 사용되었습니다" -#: commands/tablecmds.c:9508 +#: commands/tablecmds.c:10324 #, c-format msgid "\"%s\" is not a table, view, materialized view, index, or TOAST table" msgstr "" "\"%s\" 객체는 테이블, 뷰, 구체화된 뷰, 인덱스, TOAST 테이블 그 어느 것도 아닙" "니다." -#: commands/tablecmds.c:9541 commands/view.c:498 +#: commands/tablecmds.c:10357 commands/view.c:504 #, c-format msgid "WITH CHECK OPTION is supported only on automatically updatable views" msgstr "" "WITH CHECK OPTION 옵션은 자동 갱신 가능한 뷰에 대해서만 사용할 수 있습니다" -#: commands/tablecmds.c:9687 +#: commands/tablecmds.c:10499 #, c-format msgid "cannot move system relation \"%s\"" msgstr "\"%s\" 시스템 릴레이션입니다. 이동할 수 없습니다" -#: commands/tablecmds.c:9703 +#: commands/tablecmds.c:10515 #, c-format msgid "cannot move temporary tables of other sessions" msgstr "다른 세션의 임시 테이블들은 이동할 수 없습니다" -#: commands/tablecmds.c:9840 +#: commands/tablecmds.c:10651 #, c-format msgid "only tables, indexes, and materialized views exist in tablespaces" msgstr "테이블스페이스에 테이블과 인덱스와 구체화된 뷰만 있습니다." -#: commands/tablecmds.c:9852 +#: commands/tablecmds.c:10663 #, c-format msgid "cannot move relations in to or out of pg_global tablespace" msgstr "" "해당 객체를 pg_global 테이블스페이스로 옮기거나 그 반대로 작업할 수 없음" -#: commands/tablecmds.c:9943 +#: commands/tablecmds.c:10755 #, c-format msgid "aborting because lock on relation \"%s.%s\" is not available" msgstr "\"%s.%s\" 릴레이션을 잠글 수 없어 중지 중입니다" -#: commands/tablecmds.c:9959 +#: commands/tablecmds.c:10771 #, c-format msgid "no matching relations in tablespace \"%s\" found" msgstr "검색조건에 일치하는 릴레이션이 \"%s\" 테이블스페이스에 없음" -#: commands/tablecmds.c:10033 storage/buffer/bufmgr.c:915 +#: commands/tablecmds.c:10845 storage/buffer/bufmgr.c:915 #, c-format msgid "invalid page in block %u of relation %s" msgstr "%u 블록(해당 릴레이션: %s)에 잘못된 페이지가 있음" -#: commands/tablecmds.c:10115 +#: commands/tablecmds.c:10927 #, c-format msgid "cannot change inheritance of typed table" msgstr "typed 테이블의 상속 정보는 변경할 수 없음" -#: commands/tablecmds.c:10165 +#: commands/tablecmds.c:10932 commands/tablecmds.c:11474 +#, c-format +msgid "cannot change inheritance of a partition" +msgstr "파티션 테이블의 상속 정보는 바꿀 수 없음" + +#: commands/tablecmds.c:10937 +#, c-format +msgid "cannot change inheritance of partitioned table" +msgstr "파티션된 테이블의 상속 정보는 바꿀 수 없음" + +#: commands/tablecmds.c:10983 #, c-format msgid "cannot inherit to temporary relation of another session" msgstr "다른 세션의 임시 테이블을 상속할 수 없음" -#: commands/tablecmds.c:10219 +#: commands/tablecmds.c:10996 +#, c-format +msgid "cannot inherit from a partition" +msgstr "파티션 테이블에서 상속 할 수 없음" + +#: commands/tablecmds.c:11018 commands/tablecmds.c:13523 #, c-format msgid "circular inheritance not allowed" msgstr "순환 되는 상속은 허용하지 않습니다" -#: commands/tablecmds.c:10220 +#: commands/tablecmds.c:11019 commands/tablecmds.c:13524 #, c-format msgid "\"%s\" is already a child of \"%s\"." msgstr "\"%s\" 객체는 이미 \"%s\" 객체로부터 상속받은 상태입니다." -#: commands/tablecmds.c:10228 +#: commands/tablecmds.c:11027 #, c-format msgid "table \"%s\" without OIDs cannot inherit from table \"%s\" with OIDs" msgstr "" "\"%s\" 테이블이 OID 열 없이 OID 있는 \"%s\" 테이블에서 상속 될 수 없습니다." -#: commands/tablecmds.c:10369 +#: commands/tablecmds.c:11040 +#, c-format +msgid "trigger \"%s\" prevents table \"%s\" from becoming an inheritance child" +msgstr "" +"\"%s\" 트리거(해당 테이블 \"%s\")은 하위테이블 상속과 관련되어 보호되고 있습" +"니다." + +#: commands/tablecmds.c:11042 +#, c-format +msgid "" +"ROW triggers with transition tables are not supported in inheritance " +"hierarchies" +msgstr "" + +#: commands/tablecmds.c:11244 #, c-format msgid "column \"%s\" in child table must be marked NOT NULL" -msgstr "자식 테이블의 \"%s\" 열은 NOT NULL 속성이 있어야합니다" +msgstr "자식 테이블의 \"%s\" 칼럼은 NOT NULL 속성이 있어야합니다" -#: commands/tablecmds.c:10385 commands/tablecmds.c:10418 +#: commands/tablecmds.c:11271 commands/tablecmds.c:11310 #, c-format msgid "child table is missing column \"%s\"" -msgstr "자식 테이블에는 \"%s\" 열이 없습니다" +msgstr "자식 테이블에는 \"%s\" 칼럼이 없습니다" -#: commands/tablecmds.c:10501 +#: commands/tablecmds.c:11398 #, c-format msgid "child table \"%s\" has different definition for check constraint \"%s\"" msgstr "\"%s\" 하위 테이블에 \"%s\" 체크 제약 조건에 대한 다른 정의가 있음" -#: commands/tablecmds.c:10509 +#: commands/tablecmds.c:11406 #, c-format msgid "" "constraint \"%s\" conflicts with non-inherited constraint on child table \"%s" @@ -9103,80 +9657,85 @@ msgid "" msgstr "" "\"%s\" 제약 조건이 \"%s\" 하위 테이블에 있는 비 상속 제약 조건과 충돌합니다" -#: commands/tablecmds.c:10520 +#: commands/tablecmds.c:11417 #, c-format msgid "" "constraint \"%s\" conflicts with NOT VALID constraint on child table \"%s\"" msgstr "" "\"%s\" 제약 조건이 \"%s\" 하위 테이블에 있는 NOT VALID 제약 조건과 충돌합니다" -#: commands/tablecmds.c:10544 +#: commands/tablecmds.c:11452 #, c-format msgid "child table is missing constraint \"%s\"" msgstr "자식 테이블에 \"%s\" 제약 조건이 없습니다" -#: commands/tablecmds.c:10628 +#: commands/tablecmds.c:11568 +#, c-format +msgid "relation \"%s\" is not a partition of relation \"%s\"" +msgstr "\"%s\" 릴레이션은 \"%s\" 릴레이션의 파티션이 아닙니다" + +#: commands/tablecmds.c:11574 #, c-format msgid "relation \"%s\" is not a parent of relation \"%s\"" msgstr "\"%s\" 릴레이션은 \"%s\" 릴레이션의 부모가 아닙니다" -#: commands/tablecmds.c:10862 +#: commands/tablecmds.c:11800 #, c-format msgid "typed tables cannot inherit" msgstr "typed 테이블은 상속할 수 없음" -#: commands/tablecmds.c:10893 +#: commands/tablecmds.c:11831 #, c-format msgid "table is missing column \"%s\"" msgstr "테이블에는 \"%s\" 칼럼이 없습니다" -#: commands/tablecmds.c:10903 +#: commands/tablecmds.c:11841 #, c-format msgid "table has column \"%s\" where type requires \"%s\"" msgstr "\"%s\" 칼럼은 \"%s\" 자료형입니다." -#: commands/tablecmds.c:10912 +#: commands/tablecmds.c:11850 #, c-format msgid "table \"%s\" has different type for column \"%s\"" msgstr "\"%s\" 테이블의 \"%s\" 칼럼 자료형 틀립니다" -#: commands/tablecmds.c:10925 +#: commands/tablecmds.c:11863 #, c-format msgid "table has extra column \"%s\"" msgstr "\"%s\" 칼럼은 확장형입니다" -#: commands/tablecmds.c:10977 +#: commands/tablecmds.c:11915 #, c-format msgid "\"%s\" is not a typed table" msgstr "\"%s\" 테이블은 typed 테이블이 아닙니다" -#: commands/tablecmds.c:11161 +#: commands/tablecmds.c:12097 #, c-format msgid "cannot use non-unique index \"%s\" as replica identity" msgstr "\"%s\" 인덱스는 유니크 인덱스가 아니여서, 복제 식별자로 사용할 수 없음" -#: commands/tablecmds.c:11167 +#: commands/tablecmds.c:12103 #, c-format msgid "cannot use non-immediate index \"%s\" as replica identity" msgstr "\"%s\" non-immediate 인덱스는 복제 식별자로 사용할 수 없음" -#: commands/tablecmds.c:11173 +#: commands/tablecmds.c:12109 #, c-format msgid "cannot use expression index \"%s\" as replica identity" msgstr "\"%s\" 인덱스는 expression 인덱스여서, 복제 식별자로 사용할 수 없음" -#: commands/tablecmds.c:11179 +#: commands/tablecmds.c:12115 #, c-format msgid "cannot use partial index \"%s\" as replica identity" msgstr "\"%s\" 인덱스가 부분인덱스여서, 복제 식별자로 사용할 수 없음" -#: commands/tablecmds.c:11185 +#: commands/tablecmds.c:12121 #, c-format msgid "cannot use invalid index \"%s\" as replica identity" msgstr "" "\"%s\" 인덱스는 사용할 수 없는 인덱스여서, 복제 식별자로 사용할 수 없음" -#: commands/tablecmds.c:11206 +#: commands/tablecmds.c:12142 #, c-format msgid "" "index \"%s\" cannot be used as replica identity because column %d is a " @@ -9184,69 +9743,225 @@ msgid "" msgstr "" "\"%s\" 인덱스는 복제 식별자로 사용할 수 없음, %d 번째 칼럼이 시스템 칼럼임" -#: commands/tablecmds.c:11213 +#: commands/tablecmds.c:12149 +#, c-format +msgid "" +"index \"%s\" cannot be used as replica identity because column \"%s\" is " +"nullable" +msgstr "" +"\"%s\" 인덱스는 복제 식별자로 사용할 수 없음, \"%s\" 칼럼이 null 값 사용가능 " +"속성임" + +#: commands/tablecmds.c:12342 +#, c-format +msgid "cannot change logged status of table \"%s\" because it is temporary" +msgstr "\"%s\" 테이블은 임시 테이블이기에, 통계 정보를 변경 할 수 없음" + +#: commands/tablecmds.c:12366 +#, c-format +msgid "" +"cannot change table \"%s\" to unlogged because it is part of a publication" +msgstr "\"%s\" 테이블은 발생에 사용하고 있어, unlogged 속성으로 바꿀 수 없음" + +#: commands/tablecmds.c:12368 +#, c-format +msgid "Unlogged relations cannot be replicated." +msgstr "unlogged 릴레이션 복제할 수 없습니다." + +#: commands/tablecmds.c:12413 +#, c-format +msgid "" +"could not change table \"%s\" to logged because it references unlogged table " +"\"%s\"" +msgstr "" +"\"%s\" 테이블이 \"%s\" unlogged 테이블을 참조하고 있어 logged 속성으로 바꿀 " +"수 없음" + +#: commands/tablecmds.c:12423 +#, c-format +msgid "" +"could not change table \"%s\" to unlogged because it references logged table " +"\"%s\"" +msgstr "" +"\"%s\" 테이블이 \"%s\" logged 테이블을 참조하고 있어 unlogged 속성으로 바꿀 " +"수 없음" + +#: commands/tablecmds.c:12481 +#, c-format +msgid "cannot move an owned sequence into another schema" +msgstr "소유된 시퀀스를 다른 스키마로 이동할 수 없음" + +#: commands/tablecmds.c:12587 +#, c-format +msgid "relation \"%s\" already exists in schema \"%s\"" +msgstr "\"%s\" 릴레이션이 \"%s\" 스키마에 이미 있습니다" + +#: commands/tablecmds.c:13113 +#, c-format +msgid "\"%s\" is not a composite type" +msgstr "\"%s\" 객체는 복합 자료형입니다" + +#: commands/tablecmds.c:13144 +#, c-format +msgid "" +"\"%s\" is not a table, view, materialized view, sequence, or foreign table" +msgstr "" +"\"%s\" 객체는 테이블, 뷰, 구체화된 뷰, 시퀀스, 외부 테이블 그 어느 것도 아닙" +"니다" + +#: commands/tablecmds.c:13177 +#, c-format +msgid "unrecognized partitioning strategy \"%s\"" +msgstr "알 수 없는 파티션 규칙 \"%s\"" + +#: commands/tablecmds.c:13185 +#, c-format +msgid "cannot use \"list\" partition strategy with more than one column" +msgstr "둘 이상의 칼럼을 사용할 \"list\" 파티션은 사용할 수 없습니다" + +#: commands/tablecmds.c:13210 +#, c-format +msgid "column \"%s\" appears more than once in partition key" +msgstr "\"%s\" 칼럼이 파티션 키로 한번 이상 사용되었습니다" + +#: commands/tablecmds.c:13263 +#, c-format +msgid "column \"%s\" named in partition key does not exist" +msgstr "\"%s\" 칼럼이 파티션 키로 사용되고 있지 않습니다" + +#: commands/tablecmds.c:13270 +#, c-format +msgid "cannot use system column \"%s\" in partition key" +msgstr "\"%s\" 칼럼은 시스템 칼럼입니다. 그래서 파티션 키로 사용될 수 없습니다" + +#: commands/tablecmds.c:13333 +#, c-format +msgid "functions in partition key expression must be marked IMMUTABLE" +msgstr "파티션 키로 사용할 함수는 IMMUTABLE 특성이 있어야합니다" + +#: commands/tablecmds.c:13350 +#, c-format +msgid "partition key expressions cannot contain whole-row references" +msgstr "파티션 키 표현식에서 전체 로우 참조를 포함할 수 없습니다" + +#: commands/tablecmds.c:13357 +#, c-format +msgid "partition key expressions cannot contain system column references" +msgstr "파티션 키 표현식에서는 시스템 칼럼 참조를 포함할 수 없습니다" + +#: commands/tablecmds.c:13367 +#, c-format +msgid "cannot use constant expression as partition key" +msgstr "파티션 키로 상수는 쓸 수 없습니다" + +#: commands/tablecmds.c:13388 +#, c-format +msgid "could not determine which collation to use for partition expression" +msgstr "파티션 표현식에 쓸 문자 정렬 규칙을 결정할 수 없습니다" + +#: commands/tablecmds.c:13413 +#, c-format +msgid "data type %s has no default btree operator class" +msgstr "%s 자료형은 기본 btree 연산자 클래스를 정의하지 않았습니다" + +#: commands/tablecmds.c:13415 +#, c-format +msgid "" +"You must specify a btree operator class or define a default btree operator " +"class for the data type." +msgstr "" +"해당 자료형을 위한 btree 연산자 클래스를 지정하거나 기본 btree 연산자 클래스" +"를 정의해 두어야합니다" + +#: commands/tablecmds.c:13463 +#, c-format +msgid "\"%s\" is already a partition" +msgstr "\"%s\" 이름의 파티션 테이블이 이미 있습니다" + +#: commands/tablecmds.c:13469 +#, c-format +msgid "cannot attach a typed table as partition" +msgstr "파티션 테이블로 typed 테이블을 추가할 수 없음" + +#: commands/tablecmds.c:13485 +#, c-format +msgid "cannot attach inheritance child as partition" +msgstr "파티션 테이블로 상속을 이용한 하위 테이블을 추가할 수 없음" + +#: commands/tablecmds.c:13499 +#, c-format +msgid "cannot attach inheritance parent as partition" +msgstr "파티션 테이블로 상속용 상위 테이블을 추가할 수 없음" + +#: commands/tablecmds.c:13533 #, c-format msgid "" -"index \"%s\" cannot be used as replica identity because column \"%s\" is " -"nullable" -msgstr "" -"\"%s\" 인덱스는 복제 식별자로 사용할 수 없음, \"%s\" 칼럼이 null 값 사용가능 " -"속성임" +"cannot attach a permanent relation as partition of temporary relation \"%s\"" +msgstr "\"%s\" 임시 테이블입니다, 파티션 테이블로 추가할 수 없습니다" -#: commands/tablecmds.c:11416 +#: commands/tablecmds.c:13541 #, c-format -msgid "cannot change logged status of table \"%s\" because it is temporary" -msgstr "\"%s\" 테이블은 임시 테이블이기에, 통계 정보를 변경 할 수 없음" +msgid "cannot attach as partition of temporary relation of another session" +msgstr "다른 세션의 임시 테이블을 파티션 테이블로 추가할 수 없습니다" + +#: commands/tablecmds.c:13548 +#, c-format +msgid "cannot attach temporary relation of another session as partition" +msgstr "다른 세션의 임시 테이블을 파티션 테이블로 추가할 수 없습니다" -#: commands/tablecmds.c:11475 +#: commands/tablecmds.c:13554 #, c-format msgid "" -"could not change table \"%s\" to logged because it references unlogged table " -"\"%s\"" +"cannot attach table \"%s\" without OIDs as partition of table \"%s\" with " +"OIDs" msgstr "" -"\"%s\" 테이블이 \"%s\" unlogged 테이블을 참조하고 있어 logged 속성으로 바꿀 " -"수 없음" +"without oids \"%s\" 테이블이 with oids \"%s\" 파티션 테이블로 추가할 수 없음" -#: commands/tablecmds.c:11485 +#: commands/tablecmds.c:13562 #, c-format msgid "" -"could not change table \"%s\" to unlogged because it references logged table " -"\"%s\"" +"cannot attach table \"%s\" with OIDs as partition of table \"%s\" without " +"OIDs" msgstr "" -"\"%s\" 테이블이 \"%s\" logged 테이블을 참조하고 있어 unlogged 속성으로 바꿀 " -"수 없음" +"with oids \"%s\" 테이블이 without oids \"%s\" 파티션 테이블로 추가할 수 없음" -#: commands/tablecmds.c:11542 +#: commands/tablecmds.c:13584 #, c-format -msgid "cannot move an owned sequence into another schema" -msgstr "소유된 시퀀스를 다른 스키마로 이동할 수 없음" +msgid "table \"%s\" contains column \"%s\" not found in parent \"%s\"" +msgstr "\"%s\" 테이블의 \"%s\" 칼럼이 상위 테이블인 \"%s\"에 없음" -#: commands/tablecmds.c:11647 +#: commands/tablecmds.c:13587 #, c-format -msgid "relation \"%s\" already exists in schema \"%s\"" -msgstr "\"%s\" 릴레이션이 \"%s\" 스키마에 이미 있습니다" +msgid "The new partition may contain only the columns present in parent." +msgstr "새 파티션 테이블은 상위 테이블의 칼럼과 동일해야 합니다." -#: commands/tablecmds.c:12174 +#: commands/tablecmds.c:13599 #, c-format -msgid "\"%s\" is not a composite type" -msgstr "\"%s\" 객체는 복합 자료형입니다" +msgid "trigger \"%s\" prevents table \"%s\" from becoming a partition" +msgstr "" +"\"%s\" 트리거가 \"%s\" 테이블에 있어 파티션 테이블로 포함 될 수 없습니다" -#: commands/tablecmds.c:12204 +#: commands/tablecmds.c:13601 commands/trigger.c:393 +#, c-format +msgid "ROW triggers with transition tables are not supported on partitions" +msgstr "" +"ROW 트리거들이 있는 테이블을 파티션 테이블로 포함하는 기능은 지원하지 않습니" +"다" + +#: commands/tablecmds.c:13726 #, c-format msgid "" -"\"%s\" is not a table, view, materialized view, sequence, or foreign table" +"partition constraint for table \"%s\" is implied by existing constraints" msgstr "" -"\"%s\" 객체는 테이블, 뷰, 구체화된 뷰, 시퀀스, 외부 테이블 그 어느 것도 아닙" -"니다" #: commands/tablespace.c:162 commands/tablespace.c:179 #: commands/tablespace.c:190 commands/tablespace.c:198 -#: commands/tablespace.c:625 replication/slot.c:980 storage/file/copydir.c:47 +#: commands/tablespace.c:623 replication/slot.c:1178 storage/file/copydir.c:47 #, c-format msgid "could not create directory \"%s\": %m" msgstr "\"%s\" 디렉터리를 만들 수 없음: %m" -#: commands/tablespace.c:209 +#: commands/tablespace.c:209 utils/adt/genfile.c:538 #, c-format msgid "could not stat directory \"%s\": %m" msgstr "\"%s\" 디렉터리 상태를 파악할 수 없음: %m" @@ -9286,229 +10001,302 @@ msgstr "테이블스페이스 경로가 너무 깁니다: \"%s\"" msgid "tablespace location should not be inside the data directory" msgstr "테이블스페이스 경로는 데이터 디렉토리 안에 있으면 안됩니다" -#: commands/tablespace.c:304 commands/tablespace.c:952 +#: commands/tablespace.c:304 commands/tablespace.c:950 #, c-format msgid "unacceptable tablespace name \"%s\"" msgstr "\"%s\" 테이블스페이스 이름은 적당치 않습니다" -#: commands/tablespace.c:306 commands/tablespace.c:953 +#: commands/tablespace.c:306 commands/tablespace.c:951 #, c-format msgid "The prefix \"pg_\" is reserved for system tablespaces." msgstr "\"pg_\" 문자로 시작하는 테이블스페이스는 시스템 테이블스페이스입니다." -#: commands/tablespace.c:316 commands/tablespace.c:965 +#: commands/tablespace.c:316 commands/tablespace.c:963 #, c-format msgid "tablespace \"%s\" already exists" msgstr "\"%s\" 이름의 테이블스페이스는 이미 있음" -#: commands/tablespace.c:430 commands/tablespace.c:935 -#: commands/tablespace.c:1016 commands/tablespace.c:1085 -#: commands/tablespace.c:1218 commands/tablespace.c:1418 +#: commands/tablespace.c:428 commands/tablespace.c:933 +#: commands/tablespace.c:1013 commands/tablespace.c:1081 +#: commands/tablespace.c:1214 commands/tablespace.c:1414 #, c-format msgid "tablespace \"%s\" does not exist" msgstr "\"%s\" 테이블스페이스 없음" -#: commands/tablespace.c:436 +#: commands/tablespace.c:434 #, c-format msgid "tablespace \"%s\" does not exist, skipping" msgstr "\"%s\" 테이블스페이스 없음, 건너 뜀" -#: commands/tablespace.c:512 +#: commands/tablespace.c:510 #, c-format msgid "tablespace \"%s\" is not empty" msgstr "\"%s\" 테이블스페이스는 비어있지 않음" -#: commands/tablespace.c:584 +#: commands/tablespace.c:582 #, c-format msgid "directory \"%s\" does not exist" msgstr "\"%s\" 디렉터리 없음" -#: commands/tablespace.c:585 +#: commands/tablespace.c:583 #, c-format msgid "Create this directory for the tablespace before restarting the server." msgstr "이 서버를 재시작하기 전에 이 테이블스페이스 용 디렉터리를 만드세요." -#: commands/tablespace.c:590 +#: commands/tablespace.c:588 #, c-format msgid "could not set permissions on directory \"%s\": %m" msgstr "\"%s\" 디렉터리 액세스 권한을 지정할 수 없음: %m" -#: commands/tablespace.c:620 +#: commands/tablespace.c:618 #, c-format msgid "directory \"%s\" already in use as a tablespace" msgstr "\"%s\" 디렉터리는 이미 테이블스페이스로 사용 중임" -#: commands/tablespace.c:744 commands/tablespace.c:757 -#: commands/tablespace.c:793 commands/tablespace.c:885 +#: commands/tablespace.c:742 commands/tablespace.c:755 +#: commands/tablespace.c:791 commands/tablespace.c:883 #, c-format msgid "could not remove directory \"%s\": %m" msgstr "\"%s\" 디렉터리를 삭제할 수 없음: %m" -#: commands/tablespace.c:806 commands/tablespace.c:894 +#: commands/tablespace.c:804 commands/tablespace.c:892 #, c-format msgid "could not remove symbolic link \"%s\": %m" msgstr "\"%s\" 심벌릭 링크를 삭제할 수 없음: %m" -#: commands/tablespace.c:816 commands/tablespace.c:903 +#: commands/tablespace.c:814 commands/tablespace.c:901 #, c-format msgid "\"%s\" is not a directory or symbolic link" msgstr "\"%s\" 디렉터리도, 심볼릭 링크도 아님" -#: commands/tablespace.c:1090 +#: commands/tablespace.c:1086 #, c-format msgid "Tablespace \"%s\" does not exist." msgstr "\"%s\" 테이블스페이스 없음" -#: commands/tablespace.c:1517 +#: commands/tablespace.c:1513 #, c-format msgid "directories for tablespace %u could not be removed" msgstr "%u OID 테이블스페이스용 디렉터리는 삭제될 수 없음" -#: commands/tablespace.c:1519 +#: commands/tablespace.c:1515 #, c-format msgid "You can remove the directories manually if necessary." msgstr "필요하다면 OS 작업으로 그 디레터리를 삭제하세요" -#: commands/trigger.c:184 +#: commands/trigger.c:190 #, c-format msgid "\"%s\" is a table" msgstr "\"%s\" 객체는 테이블입니다." -#: commands/trigger.c:186 +#: commands/trigger.c:192 #, c-format msgid "Tables cannot have INSTEAD OF triggers." msgstr "테이블에 INSTEAD OF 트리거는 설정할 수 없음" -#: commands/trigger.c:197 commands/trigger.c:204 +#: commands/trigger.c:199 +#, c-format +msgid "Partitioned tables cannot have ROW triggers." +msgstr "파티션된 테이블은 ROW 트리거를 사용할 수 없음" + +#: commands/trigger.c:210 commands/trigger.c:217 commands/trigger.c:375 #, c-format msgid "\"%s\" is a view" msgstr "\"%s\" 객체는 뷰입니다." -#: commands/trigger.c:199 +#: commands/trigger.c:212 #, c-format msgid "Views cannot have row-level BEFORE or AFTER triggers." msgstr "뷰에 로우 단위 BEFORE, AFTER 트리거는 설정할 수 없음" -#: commands/trigger.c:206 +#: commands/trigger.c:219 #, c-format msgid "Views cannot have TRUNCATE triggers." msgstr "뷰에 TRUNCATE 트리거는 설정할 수 없음" -#: commands/trigger.c:214 commands/trigger.c:221 commands/trigger.c:228 +#: commands/trigger.c:227 commands/trigger.c:234 commands/trigger.c:246 +#: commands/trigger.c:368 #, c-format msgid "\"%s\" is a foreign table" msgstr "\"%s\" 객체는 외부 테이블입니다." -#: commands/trigger.c:216 +#: commands/trigger.c:229 #, c-format msgid "Foreign tables cannot have INSTEAD OF triggers." msgstr "외부테이블에 INSTEAD OF 트리거는 설정할 수 없음" -#: commands/trigger.c:223 +#: commands/trigger.c:236 #, c-format msgid "Foreign tables cannot have TRUNCATE triggers." msgstr "외부 테이블에는 TRUNCATE 트리거를 사용할 수 없음" -#: commands/trigger.c:230 +#: commands/trigger.c:248 #, c-format msgid "Foreign tables cannot have constraint triggers." msgstr "외부 테이블에 제약 조건 트리거는 설정할 수 없음" -#: commands/trigger.c:293 +#: commands/trigger.c:311 #, c-format msgid "TRUNCATE FOR EACH ROW triggers are not supported" msgstr "TRUNCATE FOR EACH ROW 트리거는 지원되지 않음" -#: commands/trigger.c:301 +#: commands/trigger.c:319 #, c-format msgid "INSTEAD OF triggers must be FOR EACH ROW" msgstr "INSTEAD OF 트리거는 FOR EACH ROW 옵션으로 설정해야 함" -#: commands/trigger.c:305 +#: commands/trigger.c:323 #, c-format msgid "INSTEAD OF triggers cannot have WHEN conditions" msgstr "INSTEAD OF 트리거는 WHEN 조건을 사용할 수 없음" -#: commands/trigger.c:309 +#: commands/trigger.c:327 #, c-format msgid "INSTEAD OF triggers cannot have column lists" msgstr "INSTEAD OF 트리거는 칼럼 목록을 사용할 수 없음" -#: commands/trigger.c:366 commands/trigger.c:379 +#: commands/trigger.c:356 +#, c-format +msgid "ROW variable naming in the REFERENCING clause is not supported" +msgstr "" + +#: commands/trigger.c:357 +#, c-format +msgid "Use OLD TABLE or NEW TABLE for naming transition tables." +msgstr "" + +#: commands/trigger.c:370 +#, c-format +msgid "Triggers on foreign tables cannot have transition tables." +msgstr "외부 테이블의 트리거들은 전환 테이블을 가질 수 없음." + +#: commands/trigger.c:377 +#, c-format +msgid "Triggers on views cannot have transition tables." +msgstr "뷰에 정의한 트리거들은 전환 테이블을 가질 수 없음." + +#: commands/trigger.c:397 +#, c-format +msgid "" +"ROW triggers with transition tables are not supported on inheritance children" +msgstr "" + +#: commands/trigger.c:403 +#, c-format +msgid "transition table name can only be specified for an AFTER trigger" +msgstr "" + +#: commands/trigger.c:408 +#, c-format +msgid "TRUNCATE triggers with transition tables are not supported" +msgstr "전환 테이블에서 TRUNCATE 트리거는 지원하지 않습니다" + +#: commands/trigger.c:425 +#, c-format +msgid "" +"transition tables cannot be specified for triggers with more than one event" +msgstr "전환 테이블은 하나 이상의 이벤트에 대한 트리거를 지정할 수 없습니다" + +#: commands/trigger.c:436 +#, c-format +msgid "transition tables cannot be specified for triggers with column lists" +msgstr "전환 테이블은 칼럼 목록들에 대한 트리거를 지정할 수 없습니다" + +#: commands/trigger.c:453 +#, c-format +msgid "NEW TABLE can only be specified for an INSERT or UPDATE trigger" +msgstr "" + +#: commands/trigger.c:458 +#, c-format +msgid "NEW TABLE cannot be specified multiple times" +msgstr "" + +#: commands/trigger.c:468 +#, c-format +msgid "OLD TABLE can only be specified for a DELETE or UPDATE trigger" +msgstr "" + +#: commands/trigger.c:473 +#, c-format +msgid "OLD TABLE cannot be specified multiple times" +msgstr "" + +#: commands/trigger.c:483 +#, c-format +msgid "OLD TABLE name and NEW TABLE name cannot be the same" +msgstr "" + +#: commands/trigger.c:540 commands/trigger.c:553 #, c-format msgid "statement trigger's WHEN condition cannot reference column values" msgstr "트리거의 WHEN 조건에는 칼럼 값을 참조할 수는 없음" -#: commands/trigger.c:371 +#: commands/trigger.c:545 #, c-format msgid "INSERT trigger's WHEN condition cannot reference OLD values" msgstr "INSERT 트리거에서의 WHEN 조건에는 OLD 값을 참조할 수 없음" -#: commands/trigger.c:384 +#: commands/trigger.c:558 #, c-format msgid "DELETE trigger's WHEN condition cannot reference NEW values" msgstr "DELETE 트리거에서의 WHEN 조건에는 NEW 값을 참조할 수 없음" -#: commands/trigger.c:389 +#: commands/trigger.c:563 #, c-format msgid "BEFORE trigger's WHEN condition cannot reference NEW system columns" msgstr "WHEN 조건절이 있는 BEFORE 트리거는 NEW 시스템 칼럼을 참조할 수 없음" -#: commands/trigger.c:434 -#, c-format -msgid "changing return type of function %s from \"opaque\" to \"trigger\"" -msgstr "%s 함수의 리턴 자료형을 \"opaque\"에서 \"trigger\"로 바꿉니다" - -#: commands/trigger.c:553 commands/trigger.c:1303 +#: commands/trigger.c:728 commands/trigger.c:1499 #, c-format msgid "trigger \"%s\" for relation \"%s\" already exists" msgstr "\"%s\" 이름의 트리거가 \"%s\" 테이블에 이미 있습니다" -#: commands/trigger.c:838 +#: commands/trigger.c:1024 msgid "Found referenced table's UPDATE trigger." msgstr "참조된 테이블의 UPDATE 트리거를 찾았습니다." -#: commands/trigger.c:839 +#: commands/trigger.c:1025 msgid "Found referenced table's DELETE trigger." msgstr "참조된 테이블의 DELETE 트리거를 찾았습니다." -#: commands/trigger.c:840 +#: commands/trigger.c:1026 msgid "Found referencing table's trigger." msgstr "참조 테이블의 트리거를 찾았습니다." -#: commands/trigger.c:949 commands/trigger.c:965 +#: commands/trigger.c:1135 commands/trigger.c:1151 #, c-format msgid "ignoring incomplete trigger group for constraint \"%s\" %s" msgstr "\"%s\" %s 제약 조건에 대한 불완전한 트리거 그룹을 무시하는 중" -#: commands/trigger.c:977 +#: commands/trigger.c:1164 #, c-format msgid "converting trigger group into constraint \"%s\" %s" msgstr "트리거 그룹을 \"%s\" %s 제약 조건으로 변환하는 중" -#: commands/trigger.c:1190 commands/trigger.c:1351 commands/trigger.c:1469 +#: commands/trigger.c:1385 commands/trigger.c:1544 commands/trigger.c:1659 #, c-format msgid "trigger \"%s\" for table \"%s\" does not exist" msgstr "\"%s\" 트리거는 \"%s\" 테이블에 없음" -#: commands/trigger.c:1434 +#: commands/trigger.c:1627 #, c-format msgid "permission denied: \"%s\" is a system trigger" msgstr "액세스 권한 없음: \"%s\" 객체는 시스템 트리거임" -#: commands/trigger.c:1930 +#: commands/trigger.c:2206 #, c-format msgid "trigger function %u returned null value" msgstr "%u 트리거 함수가 null 값을 리턴했습니다" -#: commands/trigger.c:1989 commands/trigger.c:2188 commands/trigger.c:2392 -#: commands/trigger.c:2664 +#: commands/trigger.c:2272 commands/trigger.c:2487 commands/trigger.c:2706 +#: commands/trigger.c:2991 #, c-format msgid "BEFORE STATEMENT trigger cannot return a value" msgstr "BEFORE STATEMENT 트리거는 리턴값이 있으면 안됩니다" -#: commands/trigger.c:2726 executor/nodeModifyTable.c:679 -#: executor/nodeModifyTable.c:972 +#: commands/trigger.c:3053 executor/nodeModifyTable.c:798 +#: executor/nodeModifyTable.c:1095 #, c-format msgid "" "tuple to be updated was already modified by an operation triggered by the " @@ -9516,8 +10304,8 @@ msgid "" msgstr "" "현재 명령으로 실행된 트리거 작업으로 변경해야할 자료가 이미 바뀌었습니다." -#: commands/trigger.c:2727 executor/nodeModifyTable.c:680 -#: executor/nodeModifyTable.c:973 +#: commands/trigger.c:3054 executor/nodeModifyTable.c:799 +#: executor/nodeModifyTable.c:1096 #, c-format msgid "" "Consider using an AFTER trigger instead of a BEFORE trigger to propagate " @@ -9526,25 +10314,25 @@ msgstr "" "다른 로우를 변경하는 일을 BEFORE 트리거 대신에 AFTER 트리거 사용을 고려해 보" "십시오" -#: commands/trigger.c:2741 executor/execMain.c:2379 -#: executor/nodeLockRows.c:216 executor/nodeModifyTable.c:213 -#: executor/nodeModifyTable.c:692 executor/nodeModifyTable.c:985 -#: executor/nodeModifyTable.c:1151 +#: commands/trigger.c:3068 executor/execMain.c:2695 +#: executor/nodeLockRows.c:220 executor/nodeModifyTable.c:214 +#: executor/nodeModifyTable.c:811 executor/nodeModifyTable.c:1108 +#: executor/nodeModifyTable.c:1277 #, c-format msgid "could not serialize access due to concurrent update" msgstr "동시 업데이트 때문에 순차적 액세스가 불가능합니다" -#: commands/trigger.c:4575 +#: commands/trigger.c:5200 #, c-format msgid "constraint \"%s\" is not deferrable" msgstr "\"%s\" 제약 조건은 DEFERRABLE 속성으로 만들어지지 않았습니다" -#: commands/trigger.c:4598 +#: commands/trigger.c:5223 #, c-format msgid "constraint \"%s\" does not exist" msgstr "\"%s\" 이름의 제약 조건이 없음" -#: commands/tsearchcmds.c:115 commands/tsearchcmds.c:685 +#: commands/tsearchcmds.c:115 commands/tsearchcmds.c:679 #, c-format msgid "function %s should return type %s" msgstr "%s 함수는 %s 자료형을 반환해야 함" @@ -9579,261 +10367,256 @@ msgstr "텍스트 검색 파서 end 메서드가 필요함" msgid "text search parser lextypes method is required" msgstr "텍스트 검색 파서 lextypes 메서드가 필요함" -#: commands/tsearchcmds.c:386 +#: commands/tsearchcmds.c:384 #, c-format msgid "text search template \"%s\" does not accept options" msgstr "\"%s\" 전문 검색 템플릿이 옵션을 수락하지 않음" -#: commands/tsearchcmds.c:460 +#: commands/tsearchcmds.c:458 #, c-format msgid "text search template is required" msgstr "전문 검색 템플릿이 필요함" -#: commands/tsearchcmds.c:752 +#: commands/tsearchcmds.c:746 #, c-format msgid "must be superuser to create text search templates" msgstr "슈퍼유저만 전문 검색 템플릿을 만들 수 있음" -#: commands/tsearchcmds.c:789 +#: commands/tsearchcmds.c:783 #, c-format msgid "text search template parameter \"%s\" not recognized" msgstr "\"%s\" 전문 검색 템플릿 매개 변수를 인식할 수 없음" -#: commands/tsearchcmds.c:799 +#: commands/tsearchcmds.c:793 #, c-format msgid "text search template lexize method is required" msgstr "전문 검색 템플릿 lexize 메서드가 필요함" -#: commands/tsearchcmds.c:1008 +#: commands/tsearchcmds.c:1000 #, c-format msgid "text search configuration parameter \"%s\" not recognized" msgstr "\"%s\" 전문 검색 구성 매개 변수를 인식할 수 없음" -#: commands/tsearchcmds.c:1015 +#: commands/tsearchcmds.c:1007 #, c-format msgid "cannot specify both PARSER and COPY options" msgstr "PARSER 옵션과 COPY 옵션을 모두 지정할 수 없음" -#: commands/tsearchcmds.c:1051 +#: commands/tsearchcmds.c:1043 #, c-format msgid "text search parser is required" msgstr "전문 검색 파서가 필요함" -#: commands/tsearchcmds.c:1278 +#: commands/tsearchcmds.c:1266 #, c-format msgid "token type \"%s\" does not exist" msgstr "\"%s\" 토큰 형식이 없음" -#: commands/tsearchcmds.c:1502 +#: commands/tsearchcmds.c:1487 #, c-format msgid "mapping for token type \"%s\" does not exist" msgstr "\"%s\" 토큰 형식에 대한 매핑이 없음" -#: commands/tsearchcmds.c:1508 +#: commands/tsearchcmds.c:1493 #, c-format msgid "mapping for token type \"%s\" does not exist, skipping" msgstr "\"%s\" 토큰 형식에 대한 매핑이 없음, 건너뜀" -#: commands/tsearchcmds.c:1663 commands/tsearchcmds.c:1774 +#: commands/tsearchcmds.c:1648 commands/tsearchcmds.c:1759 #, c-format msgid "invalid parameter list format: \"%s\"" msgstr "잘못된 매개 변수 목록 형식: \"%s\"" -#: commands/typecmds.c:181 +#: commands/typecmds.c:183 #, c-format msgid "must be superuser to create a base type" msgstr "슈퍼유저만 기본 형식을 만들 수 있음" -#: commands/typecmds.c:288 commands/typecmds.c:1421 +#: commands/typecmds.c:290 commands/typecmds.c:1414 #, c-format msgid "type attribute \"%s\" not recognized" msgstr "잘못된 \"%s\" 속성의 자료형" -#: commands/typecmds.c:342 +#: commands/typecmds.c:346 #, c-format msgid "invalid type category \"%s\": must be simple ASCII" msgstr "\"%s\" 형식 범주가 잘못됨: 단순 ASCII여야 함" -#: commands/typecmds.c:361 +#: commands/typecmds.c:365 #, c-format msgid "array element type cannot be %s" msgstr "배열 요소의 자료형으로 %s 자료형을 사용할 수 없습니다" -#: commands/typecmds.c:393 +#: commands/typecmds.c:397 #, c-format msgid "alignment \"%s\" not recognized" msgstr "잘못된 ALIGNMENT 값: \"%s\"" -#: commands/typecmds.c:410 +#: commands/typecmds.c:414 #, c-format msgid "storage \"%s\" not recognized" msgstr "잘못된 STORAGE 값: \"%s\"" -#: commands/typecmds.c:421 +#: commands/typecmds.c:425 #, c-format msgid "type input function must be specified" msgstr "자료형 입력 함수를 지정하십시오" -#: commands/typecmds.c:425 +#: commands/typecmds.c:429 #, c-format msgid "type output function must be specified" msgstr "자료형 출력 함수를 지정하십시오" -#: commands/typecmds.c:430 +#: commands/typecmds.c:434 #, c-format msgid "" "type modifier output function is useless without a type modifier input " "function" msgstr "형식 한정자 입력 함수가 없으면 형식 한정자 출력 함수는 의미가 없음" -#: commands/typecmds.c:453 commands/typecmds.c:470 -#, c-format -msgid "changing return type of function %s from %s to %s" -msgstr "%s 함수의 반환 자료형을 %s에서 %s 자료형으로 바꿉니다" - -#: commands/typecmds.c:460 +#: commands/typecmds.c:464 #, c-format msgid "type input function %s must return type %s" msgstr "자료형 %s 입력 함수의 %s 자료형을 반환해야합니다" -#: commands/typecmds.c:477 +#: commands/typecmds.c:481 #, c-format msgid "type output function %s must return type %s" msgstr "%s 자료형 출력 함수는 %s 자료형을 반환해야합니다" -#: commands/typecmds.c:486 +#: commands/typecmds.c:490 #, c-format msgid "type receive function %s must return type %s" msgstr "%s 자료형 receive 함수는 %s 자료형을 반환해야합니다" -#: commands/typecmds.c:495 +#: commands/typecmds.c:499 #, c-format msgid "type send function %s must return type %s" msgstr "%s 자료형 전송 함수는 %s 자료형을 반환해야합니다" -#: commands/typecmds.c:560 +#: commands/typecmds.c:564 #, c-format msgid "type input function %s should not be volatile" msgstr "%s 자료형 입력 함수는 volatile 특성이 없어야합니다" -#: commands/typecmds.c:565 +#: commands/typecmds.c:569 #, c-format msgid "type output function %s should not be volatile" msgstr "%s 자료형 출력 함수는 volatile 특성이 없어야합니다" -#: commands/typecmds.c:570 +#: commands/typecmds.c:574 #, c-format msgid "type receive function %s should not be volatile" msgstr "%s 자료형 수신 함수는 volatile 특성이 없어야합니다" -#: commands/typecmds.c:575 +#: commands/typecmds.c:579 #, c-format msgid "type send function %s should not be volatile" msgstr "%s 자료형 송신 함수는 volatile 특성이 없어야합니다" -#: commands/typecmds.c:580 +#: commands/typecmds.c:584 #, c-format msgid "type modifier input function %s should not be volatile" msgstr "%s 자료형 형변환 입력 함수는 volatile 특성이 없어야합니다" -#: commands/typecmds.c:585 +#: commands/typecmds.c:589 #, c-format msgid "type modifier output function %s should not be volatile" msgstr "%s 자료형 형변환 출력 함수는 volatile 특성이 없어야합니다" -#: commands/typecmds.c:807 +#: commands/typecmds.c:811 #, c-format msgid "\"%s\" is not a valid base type for a domain" msgstr "\"%s\" 자료형은 도메인의 기반 자료형이 아닙니다" -#: commands/typecmds.c:893 +#: commands/typecmds.c:897 #, c-format msgid "multiple default expressions" msgstr "default 표현식 여러개 있음" -#: commands/typecmds.c:955 commands/typecmds.c:964 +#: commands/typecmds.c:959 commands/typecmds.c:968 #, c-format msgid "conflicting NULL/NOT NULL constraints" msgstr "NULL/NOT NULL 조건이 함께 있음" -#: commands/typecmds.c:980 +#: commands/typecmds.c:984 #, c-format msgid "check constraints for domains cannot be marked NO INHERIT" msgstr "도메인용 체크 제약 조건에는 NO INHERIT 옵션을 사용할 수 없음" -#: commands/typecmds.c:989 commands/typecmds.c:2522 +#: commands/typecmds.c:993 commands/typecmds.c:2512 #, c-format msgid "unique constraints not possible for domains" msgstr "고유 제약 조건은 도메인 정의에 사용할 수 없음" -#: commands/typecmds.c:995 commands/typecmds.c:2528 +#: commands/typecmds.c:999 commands/typecmds.c:2518 #, c-format msgid "primary key constraints not possible for domains" msgstr "기본키 제약 조건을 도메인 정의에 사용할 수 없음" -#: commands/typecmds.c:1001 commands/typecmds.c:2534 +#: commands/typecmds.c:1005 commands/typecmds.c:2524 #, c-format msgid "exclusion constraints not possible for domains" msgstr "exclusion 제약 조건은 도메인에는 사용할 수 없음" -#: commands/typecmds.c:1007 commands/typecmds.c:2540 +#: commands/typecmds.c:1011 commands/typecmds.c:2530 #, c-format msgid "foreign key constraints not possible for domains" msgstr "참조키(foreign key) 제약 조건은 도메인(domain) 정의에 사용할 수 없음" -#: commands/typecmds.c:1016 commands/typecmds.c:2549 +#: commands/typecmds.c:1020 commands/typecmds.c:2539 #, c-format msgid "specifying constraint deferrability not supported for domains" msgstr "도메인에 대해 제약 조건 지연을 지정할 수 없음" -#: commands/typecmds.c:1291 utils/cache/typcache.c:1630 +#: commands/typecmds.c:1284 utils/cache/typcache.c:1648 #, c-format msgid "%s is not an enum" msgstr "%s 객체는 나열형이 아님" -#: commands/typecmds.c:1429 +#: commands/typecmds.c:1422 #, c-format msgid "type attribute \"subtype\" is required" msgstr "\"subtype\" 속성이 필요함" -#: commands/typecmds.c:1434 +#: commands/typecmds.c:1427 #, c-format msgid "range subtype cannot be %s" msgstr "range subtype은 %s 아니여야 함" -#: commands/typecmds.c:1453 +#: commands/typecmds.c:1446 #, c-format msgid "range collation specified but subtype does not support collation" msgstr "" "range 형에 정렬 규칙을 지정했지만, 소속 자료형이 그 정렬 규칙을 지원하지 않습" "니다" -#: commands/typecmds.c:1687 +#: commands/typecmds.c:1680 #, c-format msgid "changing argument type of function %s from \"opaque\" to \"cstring\"" msgstr "%s 함수의 인자 자료형을 \"opaque\"에서 \"cstring\"으로 바꿉니다" -#: commands/typecmds.c:1738 +#: commands/typecmds.c:1731 #, c-format msgid "changing argument type of function %s from \"opaque\" to %s" msgstr "%s 함수의 인자 자료형을 \"opaque\"에서 %s 자료형으로 바꿉니다" -#: commands/typecmds.c:1837 +#: commands/typecmds.c:1830 #, c-format msgid "typmod_in function %s must return type %s" msgstr "%s typmod_in 함수는 %s 자료형을 반환해야 함" -#: commands/typecmds.c:1864 +#: commands/typecmds.c:1857 #, c-format msgid "typmod_out function %s must return type %s" msgstr "%s typmod_out 함수는 %s 자료형을 반환해야 함" -#: commands/typecmds.c:1891 +#: commands/typecmds.c:1884 #, c-format msgid "type analyze function %s must return type %s" msgstr "%s 자료형 분석 함수는 %s 자료형을 반환해야 함" -#: commands/typecmds.c:1937 +#: commands/typecmds.c:1930 #, c-format msgid "" "You must specify an operator class for the range type or define a default " @@ -9842,52 +10625,52 @@ msgstr "" "subtype을 위한 기본 연산자 클래스나 range 자료형을 위한 하나의 연산자 클래스" "를 지정해야 합니다" -#: commands/typecmds.c:1968 +#: commands/typecmds.c:1961 #, c-format msgid "range canonical function %s must return range type" msgstr "%s 범위 기준 함수는 range 자료형을 반환해야합니다" -#: commands/typecmds.c:1974 +#: commands/typecmds.c:1967 #, c-format msgid "range canonical function %s must be immutable" msgstr "%s 범위 기준 함수는 immutable 속성이어야 합니다" -#: commands/typecmds.c:2010 +#: commands/typecmds.c:2003 #, c-format msgid "range subtype diff function %s must return type %s" msgstr "%s 범위 하위 자료 비교 함수는 %s 자료형을 반환해야합니다" -#: commands/typecmds.c:2017 +#: commands/typecmds.c:2010 #, c-format msgid "range subtype diff function %s must be immutable" msgstr "%s 범위 하위 자료 비교 함수는 immutable 속성이어야 합니다" -#: commands/typecmds.c:2044 +#: commands/typecmds.c:2037 #, c-format msgid "pg_type array OID value not set when in binary upgrade mode" msgstr "이진 업그레이드 작업 때 pg_type 배열 OID 값이 지정되지 않았습니다" -#: commands/typecmds.c:2348 +#: commands/typecmds.c:2340 #, c-format msgid "column \"%s\" of table \"%s\" contains null values" msgstr "\"%s\" 열(해당 테이블 \"%s\")의 자료 가운데 null 값이 있습니다" -#: commands/typecmds.c:2463 commands/typecmds.c:2646 +#: commands/typecmds.c:2453 commands/typecmds.c:2636 #, c-format msgid "constraint \"%s\" of domain \"%s\" does not exist" msgstr "\"%s\" 제약 조건 \"%s\" 도메인에 포함되어 있지 않습니다." -#: commands/typecmds.c:2467 +#: commands/typecmds.c:2457 #, c-format msgid "constraint \"%s\" of domain \"%s\" does not exist, skipping" msgstr "\"%s\" 제약 조건 \"%s\" 도메인에 포함되어 있지 않음, 건너뜀" -#: commands/typecmds.c:2652 +#: commands/typecmds.c:2642 #, c-format msgid "constraint \"%s\" of domain \"%s\" is not a check constraint" msgstr "\"%s\" 제약 조건(해당 도메인: \"%s\")은 check 제약조건이 아님" -#: commands/typecmds.c:2758 +#: commands/typecmds.c:2747 #, c-format msgid "" "column \"%s\" of table \"%s\" contains values that violate the new constraint" @@ -9895,226 +10678,232 @@ msgstr "" "\"%s\" 열(해당 테이블 \"%s\")의 자료 중에, 새 제약 조건을 위반하는 자료가 있" "습니다" -#: commands/typecmds.c:2971 commands/typecmds.c:3228 commands/typecmds.c:3417 +#: commands/typecmds.c:2975 commands/typecmds.c:3180 commands/typecmds.c:3262 +#: commands/typecmds.c:3449 #, c-format msgid "%s is not a domain" msgstr "\"%s\" 이름의 객체는 도메인이 아닙니다" -#: commands/typecmds.c:3005 +#: commands/typecmds.c:3009 #, c-format msgid "constraint \"%s\" for domain \"%s\" already exists" msgstr "\"%s\" 제약 조건이 \"%s\" 도메인에 이미 지정되어 있습니다" -#: commands/typecmds.c:3055 +#: commands/typecmds.c:3060 #, c-format msgid "cannot use table references in domain check constraint" msgstr "도메인 용 체크 제약 조건에서는 테이블 참조를 사용할 수 없습니다" -#: commands/typecmds.c:3158 commands/typecmds.c:3240 commands/typecmds.c:3534 +#: commands/typecmds.c:3192 commands/typecmds.c:3274 commands/typecmds.c:3566 #, c-format msgid "%s is a table's row type" msgstr "%s 자료형은 테이블의 행 자료형(row type)입니다" -#: commands/typecmds.c:3160 commands/typecmds.c:3242 commands/typecmds.c:3536 +#: commands/typecmds.c:3194 commands/typecmds.c:3276 commands/typecmds.c:3568 #, c-format msgid "Use ALTER TABLE instead." msgstr "대신 ALTER TABLE을 사용하십시오." -#: commands/typecmds.c:3167 commands/typecmds.c:3249 commands/typecmds.c:3449 +#: commands/typecmds.c:3201 commands/typecmds.c:3283 commands/typecmds.c:3481 #, c-format msgid "cannot alter array type %s" msgstr "%s 배열 형식을 변경할 수 없음" -#: commands/typecmds.c:3169 commands/typecmds.c:3251 commands/typecmds.c:3451 +#: commands/typecmds.c:3203 commands/typecmds.c:3285 commands/typecmds.c:3483 #, c-format msgid "You can alter type %s, which will alter the array type as well." msgstr "%s 형식을 변경할 수 있으며, 이렇게 하면 배열 형식도 변경됩니다." -#: commands/typecmds.c:3519 +#: commands/typecmds.c:3551 #, c-format msgid "type \"%s\" already exists in schema \"%s\"" msgstr "%s 자료형이 이미 \"%s\" 스키마 안에 있습니다" -#: commands/user.c:149 +#: commands/user.c:141 #, c-format msgid "SYSID can no longer be specified" msgstr "SYSID는 더 이상 지정할 수 없음" -#: commands/user.c:291 +#: commands/user.c:295 #, c-format msgid "must be superuser to create superusers" msgstr "새 슈퍼유저를 만드려면 슈퍼유져여야만 합니다" -#: commands/user.c:298 +#: commands/user.c:302 #, c-format msgid "must be superuser to create replication users" msgstr "새 복제작업용 사용자를 만드려면 슈퍼유저여야만 합니다" -#: commands/user.c:305 commands/user.c:693 +#: commands/user.c:309 commands/user.c:707 #, c-format msgid "must be superuser to change bypassrls attribute" msgstr "슈퍼유저만 bypassrls 속성을 바꿀 수 있음" -#: commands/user.c:312 +#: commands/user.c:316 #, c-format msgid "permission denied to create role" msgstr "롤 만들 권한 없음" -#: commands/user.c:322 commands/user.c:1176 commands/user.c:1183 -#: utils/adt/acl.c:5279 utils/adt/acl.c:5285 gram.y:13619 gram.y:13654 +#: commands/user.c:326 commands/user.c:1195 commands/user.c:1202 +#: utils/adt/acl.c:5246 utils/adt/acl.c:5252 gram.y:14465 gram.y:14500 #, c-format msgid "role name \"%s\" is reserved" msgstr "\"%s\" 롤 이름은 내부적으로 사용되고 있습니다" -#: commands/user.c:324 commands/user.c:1178 commands/user.c:1185 +#: commands/user.c:328 commands/user.c:1197 commands/user.c:1204 #, c-format msgid "Role names starting with \"pg_\" are reserved." msgstr "\"pg_\"로 시작하는 롤 이름은 사용할 수 없습니다." -#: commands/user.c:336 commands/user.c:1191 +#: commands/user.c:340 commands/user.c:1210 #, c-format msgid "role \"%s\" already exists" msgstr "\"%s\" 롤 이름이 이미 있습니다" -#: commands/user.c:414 +#: commands/user.c:406 commands/user.c:816 +#, c-format +msgid "empty string is not a valid password, clearing password" +msgstr "비밀번호로 빈 문자열을 사용할 수 없습니다. 비밀번호를 없앱니다" + +#: commands/user.c:437 #, c-format msgid "pg_authid OID value not set when in binary upgrade mode" msgstr "이진 업그레이드 작업 때 pg_authid OID 값이 지정되지 않았습니다" -#: commands/user.c:679 commands/user.c:896 commands/user.c:1432 -#: commands/user.c:1578 +#: commands/user.c:693 commands/user.c:915 commands/user.c:1449 +#: commands/user.c:1593 #, c-format msgid "must be superuser to alter superusers" msgstr "슈퍼유저의 속성을 변경하련 슈퍼유져여야만 합니다" -#: commands/user.c:686 +#: commands/user.c:700 #, c-format msgid "must be superuser to alter replication users" msgstr "복제작업용 사용자의 속성을 변경하련 슈퍼유져여야만 합니다" -#: commands/user.c:709 commands/user.c:904 +#: commands/user.c:723 commands/user.c:923 #, c-format msgid "permission denied" msgstr "권한 없음" -#: commands/user.c:934 +#: commands/user.c:953 #, c-format msgid "must be superuser to alter settings globally" msgstr "슈퍼유저만 전역 환경 설정을 바꿀 수 있습니다." -#: commands/user.c:956 +#: commands/user.c:975 #, c-format msgid "permission denied to drop role" msgstr "롤을 삭제할 권한이 없습니다" -#: commands/user.c:980 +#: commands/user.c:999 #, c-format msgid "cannot use special role specifier in DROP ROLE" msgstr "DROP ROLE 명령으로 삭제할 수 없는 특별한 롤입니다" -#: commands/user.c:990 commands/user.c:1147 commands/variable.c:825 -#: commands/variable.c:897 utils/adt/acl.c:5121 utils/adt/acl.c:5173 -#: utils/adt/acl.c:5206 utils/adt/acl.c:5224 utils/init/miscinit.c:502 +#: commands/user.c:1009 commands/user.c:1166 commands/variable.c:822 +#: commands/variable.c:894 utils/adt/acl.c:5104 utils/adt/acl.c:5151 +#: utils/adt/acl.c:5179 utils/adt/acl.c:5197 utils/init/miscinit.c:504 #, c-format msgid "role \"%s\" does not exist" msgstr "\"%s\" 롤(role) 없음" -#: commands/user.c:995 +#: commands/user.c:1014 #, c-format msgid "role \"%s\" does not exist, skipping" msgstr "\"%s\" 룰(rule) 없음, 건너 뜀" -#: commands/user.c:1007 commands/user.c:1011 +#: commands/user.c:1026 commands/user.c:1030 #, c-format msgid "current user cannot be dropped" msgstr "현재 사용자는 삭제 될 수 없습니다" -#: commands/user.c:1015 +#: commands/user.c:1034 #, c-format msgid "session user cannot be dropped" msgstr "세션 사용자는 삭제 될 수 없습니다" -#: commands/user.c:1026 +#: commands/user.c:1045 #, c-format msgid "must be superuser to drop superusers" msgstr "superuser를 사용자를 삭제하려면 superuser여야만 합니다" -#: commands/user.c:1042 +#: commands/user.c:1061 #, c-format msgid "role \"%s\" cannot be dropped because some objects depend on it" msgstr "기타 다른 객체들이 이 롤에 의존하고 있어, \"%s\" 롤을 삭제할 수 없음" -#: commands/user.c:1163 +#: commands/user.c:1182 #, c-format msgid "session user cannot be renamed" msgstr "세션 사용자의 이름은 바꿀 수 없습니다" -#: commands/user.c:1167 +#: commands/user.c:1186 #, c-format msgid "current user cannot be renamed" msgstr "현재 사용자의 이름은 바꿀 수 없습니다" -#: commands/user.c:1201 +#: commands/user.c:1220 #, c-format msgid "must be superuser to rename superusers" msgstr "superuser의 이름을 바꾸려면 superuser여야 합니다" -#: commands/user.c:1208 +#: commands/user.c:1227 #, c-format msgid "permission denied to rename role" msgstr "롤 이름 바꾸기 권한 없음" -#: commands/user.c:1229 +#: commands/user.c:1248 #, c-format msgid "MD5 password cleared because of role rename" msgstr "롤 이름이 변경 되어 MD5 암호를 지웠습니다" -#: commands/user.c:1291 +#: commands/user.c:1308 #, c-format msgid "column names cannot be included in GRANT/REVOKE ROLE" msgstr "GRANT/REVOKE ROLE에 열 이름을 포함할 수 없음" -#: commands/user.c:1329 +#: commands/user.c:1346 #, c-format msgid "permission denied to drop objects" msgstr "객체를 삭제할 권한이 없음" -#: commands/user.c:1356 commands/user.c:1365 +#: commands/user.c:1373 commands/user.c:1382 #, c-format msgid "permission denied to reassign objects" msgstr "객체 권한을 재 지정할 권한이 없음" -#: commands/user.c:1440 commands/user.c:1586 +#: commands/user.c:1457 commands/user.c:1601 #, c-format msgid "must have admin option on role \"%s\"" msgstr "\"%s\" 역할에 admin 옵션이 있어야 함" -#: commands/user.c:1457 +#: commands/user.c:1474 #, c-format msgid "must be superuser to set grantor" msgstr "grantor(?)를 지정하려면 슈퍼유져여야합니다" -#: commands/user.c:1482 +#: commands/user.c:1499 #, c-format msgid "role \"%s\" is a member of role \"%s\"" msgstr "\"%s\" 롤은 \"%s\" 롤의 구성원입니다" -#: commands/user.c:1497 +#: commands/user.c:1514 #, c-format msgid "role \"%s\" is already a member of role \"%s\"" msgstr "role \"%s\" is already a member of role \"%s\"" -#: commands/user.c:1608 +#: commands/user.c:1623 #, c-format msgid "role \"%s\" is not a member of role \"%s\"" msgstr "\"%s\" 롤은 \"%s\"롤의 구성원이 아닙니다" -#: commands/vacuum.c:185 +#: commands/vacuum.c:186 #, c-format msgid "%s cannot be executed from VACUUM or ANALYZE" msgstr "%s 명령은 VACUUM, ANALYZE 명령에서 실행 될 수 없음" -#: commands/vacuum.c:195 +#: commands/vacuum.c:196 #, c-format msgid "VACUUM option DISABLE_PAGE_SKIPPING cannot be used with FULL" msgstr "" @@ -10122,12 +10911,12 @@ msgstr "" "니다." # # search5 부분 -#: commands/vacuum.c:535 +#: commands/vacuum.c:565 #, c-format msgid "oldest xmin is far in the past" msgstr "가장 오래된 xmin이 너무 옛날 것입니다." -#: commands/vacuum.c:536 +#: commands/vacuum.c:566 #, c-format msgid "Close open transactions soon to avoid wraparound problems." msgstr "" @@ -10135,12 +10924,12 @@ msgstr "" "려 있는 모든 트랜잭션을 닫으십시오." # # search5 부분 -#: commands/vacuum.c:575 +#: commands/vacuum.c:605 #, c-format msgid "oldest multixact is far in the past" msgstr "가장 오래된 multixact 값이 너무 옛날 것입니다." -#: commands/vacuum.c:576 +#: commands/vacuum.c:606 #, c-format msgid "" "Close open transactions with multixacts soon to avoid wraparound problems." @@ -10148,110 +10937,124 @@ msgstr "" "멀티 트랜잭션 ID 겹침 사고를 막기 위해 빨리 열린 멀티 트랜잭션들을 닫으십시" "오." -#: commands/vacuum.c:1146 +#: commands/vacuum.c:1176 #, c-format msgid "some databases have not been vacuumed in over 2 billion transactions" msgstr "" "몇몇 데이터베이스가 20억 이상의 트랜잭션을 처리했음에도 불구하고 청소가되지 " "않았습니다" -#: commands/vacuum.c:1147 +#: commands/vacuum.c:1177 #, c-format msgid "You might have already suffered transaction-wraparound data loss." msgstr "이미 트래잭션 ID 겹침 현상으로 자료 손실이 발생했을 수도 있습니다." -#: commands/vacuum.c:1268 +#: commands/vacuum.c:1306 #, c-format msgid "skipping vacuum of \"%s\" --- lock not available" msgstr "\"%s\" 객체 vacuum 건너뜀 --- 사용 가능한 잠금이 없음" -#: commands/vacuum.c:1294 +#: commands/vacuum.c:1332 #, c-format msgid "skipping \"%s\" --- only superuser can vacuum it" msgstr "\"%s\" 건너뜀 --- 슈퍼유저만 청소할 수 있음" -#: commands/vacuum.c:1298 +#: commands/vacuum.c:1336 #, c-format msgid "skipping \"%s\" --- only superuser or database owner can vacuum it" msgstr "\"%s\" 건너뜀 --- 슈퍼유저 또는 데이터베이스 소유주만 청소할 수 있음" -#: commands/vacuum.c:1302 +#: commands/vacuum.c:1340 #, c-format msgid "skipping \"%s\" --- only table or database owner can vacuum it" msgstr "\"%s\" 건너뜀 --- 이 테이블이나 데이터베이스의 소유주만 청소할 수 있음" -#: commands/vacuum.c:1320 +#: commands/vacuum.c:1359 #, c-format msgid "skipping \"%s\" --- cannot vacuum non-tables or special system tables" msgstr "" "\"%s\" 건너뜀 --- 테이블이 아닌 것 또는 특별 시스템 테이블 등은 청소할 수 없" "음" -#: commands/vacuumlazy.c:366 +#: commands/vacuumlazy.c:376 #, c-format msgid "automatic vacuum of table \"%s.%s.%s\": index scans: %d\n" msgstr "\"%s.%s.%s\" 테이블 자동 청소: 인덱스 탐색: %d\n" -#: commands/vacuumlazy.c:371 +#: commands/vacuumlazy.c:381 #, c-format msgid "" "pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n" msgstr "페이지: %u 삭제됨, %u 남음, %u 핀닝으로 건너뜀, %u 동결되어 건너뜀\n" -#: commands/vacuumlazy.c:377 +#: commands/vacuumlazy.c:387 #, c-format msgid "" -"tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable\n" -msgstr "튜플: %.0f 삭제됨, %.0f 남음, %.0f 삭제할 수 없는 죽은 튜플\n" +"tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, " +"oldest xmin: %u\n" +msgstr "" +"튜플: %.0f 삭제됨, %.0f 남음, %.0f 삭제할 수 없는 죽은 튜플, 제일 늙은 xmin: " +"%u\n" -#: commands/vacuumlazy.c:382 +#: commands/vacuumlazy.c:393 #, c-format msgid "buffer usage: %d hits, %d misses, %d dirtied\n" msgstr "버퍼 사용량: %d 조회, %d 놓침, %d 변경됨\n" -#: commands/vacuumlazy.c:386 +#: commands/vacuumlazy.c:397 #, c-format msgid "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n" msgstr "평균 읽기 속도: %.3f MB/s, 평균 쓰기 속도: %.3f MB/s\n" -#: commands/vacuumlazy.c:388 +#: commands/vacuumlazy.c:399 #, c-format msgid "system usage: %s" msgstr "시스템 사용량: %s" -#: commands/vacuumlazy.c:846 +#: commands/vacuumlazy.c:858 #, c-format msgid "relation \"%s\" page %u is uninitialized --- fixing" msgstr "\"%s\" 릴레이션 %u 페이지는 초기화되지 않았음 --- 수정함" -#: commands/vacuumlazy.c:1316 +#: commands/vacuumlazy.c:1328 #, c-format msgid "\"%s\": removed %.0f row versions in %u pages" msgstr "\"%s\": %.0f개의 행 버전을 %u개 페이지에서 삭제했습니다." -#: commands/vacuumlazy.c:1326 +#: commands/vacuumlazy.c:1338 #, c-format -msgid "%.0f dead row versions cannot be removed yet.\n" -msgstr "%.0f개의 죽은 로우 버전을 아직 지울 수 없습니다.\n" +msgid "%.0f dead row versions cannot be removed yet, oldest xmin: %u\n" +msgstr "%.0f개의 죽은 로우 버전을 아직 지울 수 없습니다, 제일 늙은 xmin: %u\n" -#: commands/vacuumlazy.c:1328 +#: commands/vacuumlazy.c:1340 #, c-format msgid "There were %.0f unused item pointers.\n" msgstr "%.0f개의 사용되지 않은 아이템 포인터가 있습니다.\n" -#: commands/vacuumlazy.c:1330 +#: commands/vacuumlazy.c:1342 +#, c-format +msgid "Skipped %u page due to buffer pins, " +msgid_plural "Skipped %u pages due to buffer pins, " +msgstr[0] "%u 페이지를 버퍼 핀닝으로 건너 뛰었습니다, " + +#: commands/vacuumlazy.c:1346 #, c-format -msgid "Skipped %u page due to buffer pins.\n" -msgid_plural "Skipped %u pages due to buffer pins.\n" -msgstr[0] "%u 페이지를 버퍼 핀닝으로 건너 뛰었습니다.\n" +msgid "%u frozen page.\n" +msgid_plural "%u frozen pages.\n" +msgstr[0] "" -#: commands/vacuumlazy.c:1334 +#: commands/vacuumlazy.c:1350 #, c-format msgid "%u page is entirely empty.\n" msgid_plural "%u pages are entirely empty.\n" msgstr[0] "" -#: commands/vacuumlazy.c:1342 +#: commands/vacuumlazy.c:1354 +#, c-format +msgid "%s." +msgstr "%s." + +#: commands/vacuumlazy.c:1357 #, c-format msgid "" "\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u " @@ -10260,22 +11063,22 @@ msgstr "" "\"%s\": 지울 수 있는 자료 %.0f개, 지울 수 없는 자료 %.0f개를 %u/%u개 페이지에" "서 찾았음" -#: commands/vacuumlazy.c:1411 +#: commands/vacuumlazy.c:1426 #, c-format msgid "\"%s\": removed %d row versions in %d pages" msgstr "\"%s\": %d 개 자료를 %d 페이지에서 삭제했음" -#: commands/vacuumlazy.c:1600 +#: commands/vacuumlazy.c:1614 #, c-format msgid "scanned index \"%s\" to remove %d row versions" msgstr "\"%s\" 인덱스를 스캔해서 %d개의 행 버전들을 지웠습니다" -#: commands/vacuumlazy.c:1646 +#: commands/vacuumlazy.c:1660 #, c-format msgid "index \"%s\" now contains %.0f row versions in %u pages" msgstr "\"%s\" 인덱스는 %.0f 행 버전을 %u 페이지에서 포함하고 있습니다." -#: commands/vacuumlazy.c:1650 +#: commands/vacuumlazy.c:1664 #, c-format msgid "" "%.0f index row versions were removed.\n" @@ -10286,119 +11089,119 @@ msgstr "" "%u개 인덱스 페이지를 삭제해서, %u개 페이지를 다시 사용합니다.\n" "%s." -#: commands/vacuumlazy.c:1745 +#: commands/vacuumlazy.c:1759 #, c-format msgid "\"%s\": stopping truncate due to conflicting lock request" msgstr "\"%s\": 잠금 요청 충돌로 자료 비우기 작업을 중지합니다" -#: commands/vacuumlazy.c:1810 +#: commands/vacuumlazy.c:1824 #, c-format msgid "\"%s\": truncated %u to %u pages" msgstr "\"%s\": %u 에서 %u 페이지로 정지했음" -#: commands/vacuumlazy.c:1866 +#: commands/vacuumlazy.c:1889 #, c-format msgid "\"%s\": suspending truncate due to conflicting lock request" msgstr "\"%s\": 잠금 요청 충돌로 자료 비우기 작업을 지연합니다" -#: commands/variable.c:164 utils/misc/guc.c:9899 +#: commands/variable.c:165 utils/misc/guc.c:10030 utils/misc/guc.c:10092 #, c-format msgid "Unrecognized key word: \"%s\"." msgstr "알 수 없는 키워드: \"%s\"" -#: commands/variable.c:176 +#: commands/variable.c:177 #, c-format msgid "Conflicting \"datestyle\" specifications." msgstr "\"datestyle\" 지정이 충돌함" -#: commands/variable.c:298 +#: commands/variable.c:299 #, c-format msgid "Cannot specify months in time zone interval." msgstr "타임 존 간격에 달을 지정할 수 없음" -#: commands/variable.c:304 +#: commands/variable.c:305 #, c-format msgid "Cannot specify days in time zone interval." msgstr "타임 존 간격에 일을 지정할 수 없음" -#: commands/variable.c:346 commands/variable.c:428 +#: commands/variable.c:343 commands/variable.c:425 #, c-format msgid "time zone \"%s\" appears to use leap seconds" msgstr "\"%s\" time zone 에서 leap second를 사용합니다" -#: commands/variable.c:348 commands/variable.c:430 +#: commands/variable.c:345 commands/variable.c:427 #, c-format msgid "PostgreSQL does not support leap seconds." msgstr "PostgreSQL에서는 leap second를 지원하지 않습니다" -#: commands/variable.c:357 +#: commands/variable.c:354 #, c-format msgid "UTC timezone offset is out of range." msgstr "UTC 타입존 오프세트 범위가 벗어남." -#: commands/variable.c:497 +#: commands/variable.c:494 #, c-format msgid "cannot set transaction read-write mode inside a read-only transaction" msgstr "읽기 전용 트랜잭션 내에서 트랜잭션을 읽기/쓰기 모드로 설정할 수 없음" -#: commands/variable.c:504 +#: commands/variable.c:501 #, c-format msgid "transaction read-write mode must be set before any query" msgstr "읽기/쓰기 모드 트랜잭션은 모든 쿼리 앞에 지정해야 합니다." -#: commands/variable.c:511 +#: commands/variable.c:508 #, c-format msgid "cannot set transaction read-write mode during recovery" msgstr "복구 작업 중에는 트랜잭션을 읽기/쓰기 모드로 설정할 수 없음" -#: commands/variable.c:560 +#: commands/variable.c:557 #, c-format msgid "SET TRANSACTION ISOLATION LEVEL must be called before any query" msgstr "쿼리보다 먼저 SET TRANSACTION ISOLATION LEVEL을 호출해야 함" -#: commands/variable.c:567 +#: commands/variable.c:564 #, c-format msgid "SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction" msgstr "하위 트랜잭션에서 SET TRANSACTION ISOLATION LEVEL을 호출하지 않아야 함" -#: commands/variable.c:574 storage/lmgr/predicate.c:1587 +#: commands/variable.c:571 storage/lmgr/predicate.c:1649 #, c-format msgid "cannot use serializable mode in a hot standby" msgstr "읽기 전용 보조 서버 상태에서는 serializable 모드를 사용할 수 없음" -#: commands/variable.c:575 +#: commands/variable.c:572 #, c-format msgid "You can use REPEATABLE READ instead." msgstr "대신에, REPEATABLE READ 명령을 사용할 수 있음." -#: commands/variable.c:623 +#: commands/variable.c:620 #, c-format msgid "" "SET TRANSACTION [NOT] DEFERRABLE cannot be called within a subtransaction" msgstr "" "하위 트랜잭션에서 SET TRANSACTION [NOT] DEFERRABLE 구문은 사용할 수 없음" -#: commands/variable.c:629 +#: commands/variable.c:626 #, c-format msgid "SET TRANSACTION [NOT] DEFERRABLE must be called before any query" msgstr "모든 쿼리보다 먼저 SET TRANSACTION [NOT] DEFERRABLE 구문을 사용해야 함" -#: commands/variable.c:711 +#: commands/variable.c:708 #, c-format msgid "Conversion between %s and %s is not supported." msgstr "%s 인코딩과 %s 인코딩 사이의 변환은 지원하지 않습니다" -#: commands/variable.c:718 +#: commands/variable.c:715 #, c-format msgid "Cannot change \"client_encoding\" now." -msgstr "\"client_encoding\" 값을 바꿀 수 없음" +msgstr "\"client_encoding\" 값을 지금은 바꿀 수 없음" -#: commands/variable.c:779 +#: commands/variable.c:776 #, c-format -msgid "cannot change client_encoding in a parallel worker" -msgstr "병렬 작업자에서는 client_encoding 설정을 할 수 없음" +msgid "cannot change client_encoding during a parallel operation" +msgstr "병렬 작업 중에는 client_encoding 설정을 할 수 없음" -#: commands/variable.c:915 +#: commands/variable.c:912 #, c-format msgid "permission denied to set role \"%s\"" msgstr "\"%s\" 롤 권한을 지정할 수 없음" @@ -10423,43 +11226,43 @@ msgstr "\"%s\" 칼럼 자료 처리를 위한 정렬 규칙을 결정할 수 없 msgid "view must have at least one column" msgstr "뷰에는 적어도 한 개 이상의 칼럼이 있어야 합니다" -#: commands/view.c:280 commands/view.c:292 +#: commands/view.c:281 commands/view.c:293 #, c-format msgid "cannot drop columns from view" msgstr "뷰에서 칼럼을 삭제할 수 없음" -#: commands/view.c:297 +#: commands/view.c:298 #, c-format msgid "cannot change name of view column \"%s\" to \"%s\"" msgstr "뷰에서 \"%s\" 칼럼 이름을 \"%s\"(으)로 바꿀 수 없음" -#: commands/view.c:305 +#: commands/view.c:306 #, c-format msgid "cannot change data type of view column \"%s\" from %s to %s" msgstr "뷰에서 \"%s\" 칼럼 자료형을을 %s에서 %s(으)로 바꿀 수 없음" -#: commands/view.c:444 +#: commands/view.c:451 #, c-format msgid "views must not contain SELECT INTO" msgstr "뷰에는 SELECT INTO 구문을 포함할 수 없음" -#: commands/view.c:457 +#: commands/view.c:463 #, c-format msgid "views must not contain data-modifying statements in WITH" msgstr "뷰로 사용될 쿼리의 WITH 절에는 자료 변경 구문이 있으면 안됩니다." -#: commands/view.c:528 +#: commands/view.c:533 #, c-format msgid "CREATE VIEW specifies more column names than columns" msgstr "CREATE VIEW 는 columns 보다는 좀더 많은 열 이름을 명시해야 한다" -#: commands/view.c:536 +#: commands/view.c:541 #, c-format msgid "views cannot be unlogged because they do not have storage" msgstr "" "뷰는 저장 공간을 사용하지 않기 때문에 unlogged 속성을 지정할 수 없습니다." -#: commands/view.c:550 +#: commands/view.c:555 #, c-format msgid "view \"%s\" will be a temporary view" msgstr "\"%s\" 뷰는 임시적인 뷰로 만들어집니다" @@ -10496,19 +11299,136 @@ msgstr "\"%s\" 커서가 로우에 놓여 있지 않음" msgid "cursor \"%s\" is not a simply updatable scan of table \"%s\"" msgstr "\"%s\" 커서는 \"%s\" 테이블의 단순 업데이트 가능한 스캔이 아님" -#: executor/execCurrent.c:231 executor/execQual.c:1178 +#: executor/execCurrent.c:231 executor/execExprInterp.c:1889 #, c-format msgid "" "type of parameter %d (%s) does not match that when preparing the plan (%s)" msgstr "" "%d번째 매개 변수의 자료형(%s)이 미리 준비된 실행계획의 자료형(%s)과 다릅니다" -#: executor/execCurrent.c:243 executor/execQual.c:1190 +#: executor/execCurrent.c:243 executor/execExprInterp.c:1901 #, c-format msgid "no value found for parameter %d" msgstr "%d번째 매개 변수 값이 없습니다" -#: executor/execIndexing.c:544 +#: executor/execExpr.c:780 parser/parse_agg.c:779 +#, c-format +msgid "window function calls cannot be nested" +msgstr "윈도우 함수 호출을 중첩할 수 없음" + +#: executor/execExpr.c:1236 +#, c-format +msgid "target type is not an array" +msgstr "대상 자료형이 배열이 아닙니다." + +#: executor/execExpr.c:1559 +#, c-format +msgid "ROW() column has type %s instead of type %s" +msgstr "ROW() 칼럼은 %s 자료형을 가집니다. %s 자료형 대신에" + +#: executor/execExpr.c:2094 executor/execSRF.c:670 parser/parse_func.c:120 +#: parser/parse_func.c:547 parser/parse_func.c:921 +#, c-format +msgid "cannot pass more than %d argument to a function" +msgid_plural "cannot pass more than %d arguments to a function" +msgstr[0] "함수에 최대 %d개의 인자를 전달할 수 있음" + +#: executor/execExpr.c:2371 executor/execExpr.c:2377 +#: executor/execExprInterp.c:2226 utils/adt/arrayfuncs.c:260 +#: utils/adt/arrayfuncs.c:558 utils/adt/arrayfuncs.c:1288 +#: utils/adt/arrayfuncs.c:3361 utils/adt/arrayfuncs.c:5239 +#: utils/adt/arrayfuncs.c:5756 +#, c-format +msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" +msgstr "지정한 배열 크기(%d)가 최대치(%d)를 초과했습니다" + +#: executor/execExprInterp.c:1561 +#, c-format +msgid "attribute %d of type %s has been dropped" +msgstr "%d 번째 속성(대상 자료형 %s)이 삭제되었음" + +#: executor/execExprInterp.c:1567 +#, c-format +msgid "attribute %d of type %s has wrong type" +msgstr "%d 번째 속성(대상 자료형 %s)의 자료형이 잘못되었음" + +#: executor/execExprInterp.c:1569 executor/execExprInterp.c:2512 +#, c-format +msgid "Table has type %s, but query expects %s." +msgstr "테이블에는 %s 자료형이지만, 쿼리에서는 %s 자료형입니다." + +#: executor/execExprInterp.c:1979 +#, c-format +msgid "WHERE CURRENT OF is not supported for this table type" +msgstr "WHERE CURRENT OF 구문은 이 테이블 형 대상으로 지원하지 않습니다." + +#: executor/execExprInterp.c:2204 +#, c-format +msgid "cannot merge incompatible arrays" +msgstr "배열 형태가 서로 틀려 병합할 수 없습니다" + +#: executor/execExprInterp.c:2205 +#, c-format +msgid "" +"Array with element type %s cannot be included in ARRAY construct with " +"element type %s." +msgstr "" +"%s 자료형의 요소로 구성된 배열은 %s 자료형의 요소로 구성된 ARRAY 구문에 포함" +"될 수 없습니다." + +#: executor/execExprInterp.c:2246 executor/execExprInterp.c:2276 +#, c-format +msgid "" +"multidimensional arrays must have array expressions with matching dimensions" +msgstr "다차원 배열에는 일치하는 차원이 포함된 배열 식이 있어야 함" + +#: executor/execExprInterp.c:2511 +#, c-format +msgid "attribute %d has wrong type" +msgstr "%d 속성의 형식이 잘못됨" + +#: executor/execExprInterp.c:2620 +#, c-format +msgid "array subscript in assignment must not be null" +msgstr "배열 하위 스크립트로 지정하는 값으로 null 값을 사용할 수 없습니다" + +#: executor/execExprInterp.c:3053 utils/adt/domains.c:148 +#, c-format +msgid "domain %s does not allow null values" +msgstr "%s 도메인에서는 null 값을 허용하지 않습니다" + +#: executor/execExprInterp.c:3068 utils/adt/domains.c:183 +#, c-format +msgid "value for domain %s violates check constraint \"%s\"" +msgstr "%s 도메인용 값이 \"%s\" 체크 제약 조건을 위반했습니다" + +#: executor/execExprInterp.c:3435 executor/execExprInterp.c:3452 +#: executor/execExprInterp.c:3554 executor/nodeModifyTable.c:96 +#: executor/nodeModifyTable.c:106 executor/nodeModifyTable.c:123 +#: executor/nodeModifyTable.c:131 +#, c-format +msgid "table row type and query-specified row type do not match" +msgstr "테이블 행 형식과 쿼리 지정 행 형식이 일치하지 않음" + +#: executor/execExprInterp.c:3436 +#, c-format +msgid "Table row contains %d attribute, but query expects %d." +msgid_plural "Table row contains %d attributes, but query expects %d." +msgstr[0] "" +"테이블 행에는 %d개 속성이 포함되어 있는데 쿼리에는 %d개가 필요합니다." + +#: executor/execExprInterp.c:3453 executor/nodeModifyTable.c:107 +#, c-format +msgid "Table has type %s at ordinal position %d, but query expects %s." +msgstr "" +"테이블에는 %s 형식이 있는데(서수 위치 %d) 쿼리에는 %s이(가) 필요합니다." + +#: executor/execExprInterp.c:3555 executor/execSRF.c:925 +#, c-format +msgid "Physical storage mismatch on dropped attribute at ordinal position %d." +msgstr "서수 위치 %d의 삭제된 속성에서 실제 스토리지 불일치가 발생합니다." + +#: executor/execIndexing.c:543 #, c-format msgid "" "ON CONFLICT does not support deferrable unique constraints/exclusion " @@ -10517,52 +11437,52 @@ msgstr "" "지연 가능한 고유 제약조건이나 제외 제약 조건은 ON CONFLICT 판별자로 사용할 " "수 없습니다." -#: executor/execIndexing.c:821 +#: executor/execIndexing.c:818 #, c-format msgid "could not create exclusion constraint \"%s\"" msgstr "\"%s\" exclusion 제약 조건을 만들 수 없음" -#: executor/execIndexing.c:824 +#: executor/execIndexing.c:821 #, c-format msgid "Key %s conflicts with key %s." msgstr "%s 키와 %s 가 충돌함" -#: executor/execIndexing.c:826 +#: executor/execIndexing.c:823 #, c-format msgid "Key conflicts exist." msgstr "키 충돌 발생" -#: executor/execIndexing.c:832 +#: executor/execIndexing.c:829 #, c-format msgid "conflicting key value violates exclusion constraint \"%s\"" msgstr "\"%s\" exclusion 제약 조건에 따라 키 값 충돌이 발생했습니다." -#: executor/execIndexing.c:835 +#: executor/execIndexing.c:832 #, c-format msgid "Key %s conflicts with existing key %s." msgstr "%s 키가 이미 있는 %s 키와 충돌합니다." -#: executor/execIndexing.c:837 +#: executor/execIndexing.c:834 #, c-format msgid "Key conflicts with existing key." msgstr "키가 기존 키와 충돌함" -#: executor/execMain.c:1027 +#: executor/execMain.c:1115 #, c-format msgid "cannot change sequence \"%s\"" msgstr "\"%s\" 시퀀스를 바꿀 수 없음" -#: executor/execMain.c:1033 +#: executor/execMain.c:1121 #, c-format msgid "cannot change TOAST relation \"%s\"" msgstr "\"%s\" TOAST 릴레이션을 바꿀 수 없음" -#: executor/execMain.c:1051 rewrite/rewriteHandler.c:2648 +#: executor/execMain.c:1139 rewrite/rewriteHandler.c:2738 #, c-format msgid "cannot insert into view \"%s\"" msgstr "\"%s\" 뷰에 자료를 입력할 수 없습니다" -#: executor/execMain.c:1053 rewrite/rewriteHandler.c:2651 +#: executor/execMain.c:1141 rewrite/rewriteHandler.c:2741 #, c-format msgid "" "To enable inserting into the view, provide an INSTEAD OF INSERT trigger or " @@ -10571,12 +11491,12 @@ msgstr "" "뷰를 통해 자료를 입력하려면, INSTEAD OF INSERT 트리거나 ON INSERT DO INSTEAD " "룰을 사용하세요" -#: executor/execMain.c:1059 rewrite/rewriteHandler.c:2656 +#: executor/execMain.c:1147 rewrite/rewriteHandler.c:2746 #, c-format msgid "cannot update view \"%s\"" msgstr "\"%s\" 뷰로는 자료를 갱신할 수 없습니다" -#: executor/execMain.c:1061 rewrite/rewriteHandler.c:2659 +#: executor/execMain.c:1149 rewrite/rewriteHandler.c:2749 #, c-format msgid "" "To enable updating the view, provide an INSTEAD OF UPDATE trigger or an " @@ -10585,12 +11505,12 @@ msgstr "" "뷰 자료 갱신 기능은 INSTEAD OF UPDATE 트리거를 사용하거나, ON UPDATE DO " "INSTEAD 속성으로 룰을 만들어서 사용해 보세요." -#: executor/execMain.c:1067 rewrite/rewriteHandler.c:2664 +#: executor/execMain.c:1155 rewrite/rewriteHandler.c:2754 #, c-format msgid "cannot delete from view \"%s\"" msgstr "\"%s\" 뷰로는 자료를 삭제할 수 없습니다" -#: executor/execMain.c:1069 rewrite/rewriteHandler.c:2667 +#: executor/execMain.c:1157 rewrite/rewriteHandler.c:2757 #, c-format msgid "" "To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an " @@ -10599,303 +11519,234 @@ msgstr "" "뷰 자료 삭제 기능은 INSTEAD OF DELETE 트리거를 사용하거나, ON DELETE DO " "INSTEAD 속성으로 룰을 만들어서 사용해 보세요." -#: executor/execMain.c:1080 +#: executor/execMain.c:1168 #, c-format msgid "cannot change materialized view \"%s\"" msgstr "\"%s\" 구체화된 뷰를 바꿀 수 없음" -#: executor/execMain.c:1092 +#: executor/execMain.c:1187 #, c-format msgid "cannot insert into foreign table \"%s\"" msgstr "\"%s\" 외부 테이블에 자료를 입력할 수 없음" -#: executor/execMain.c:1098 +#: executor/execMain.c:1193 #, c-format msgid "foreign table \"%s\" does not allow inserts" msgstr "\"%s\" 외부 테이블은 자료 입력을 허용하지 않음" -#: executor/execMain.c:1105 +#: executor/execMain.c:1200 #, c-format msgid "cannot update foreign table \"%s\"" msgstr "\"%s\" 외부 테이블에 자료를 변경 할 수 없음" -#: executor/execMain.c:1111 +#: executor/execMain.c:1206 #, c-format msgid "foreign table \"%s\" does not allow updates" msgstr "\"%s\" 외부 테이블은 자료 변경을 허용하지 않음" -#: executor/execMain.c:1118 +#: executor/execMain.c:1213 #, c-format msgid "cannot delete from foreign table \"%s\"" msgstr "\"%s\" 외부 테이블에 자료를 삭제 할 수 없음" -#: executor/execMain.c:1124 +#: executor/execMain.c:1219 #, c-format msgid "foreign table \"%s\" does not allow deletes" msgstr "\"%s\" 외부 테이블은 자료 삭제를 허용하지 않음" -#: executor/execMain.c:1135 +#: executor/execMain.c:1230 #, c-format msgid "cannot change relation \"%s\"" msgstr "\"%s\" 릴레이션을 바꿀 수 없음" -#: executor/execMain.c:1161 +#: executor/execMain.c:1257 #, c-format msgid "cannot lock rows in sequence \"%s\"" msgstr "\"%s\" 시퀀스에서 로우를 잠글 수 없음" -#: executor/execMain.c:1168 +#: executor/execMain.c:1264 #, c-format msgid "cannot lock rows in TOAST relation \"%s\"" msgstr "\"%s\" TOAST 릴레이션에서 로우를 잠글 수 없음" -#: executor/execMain.c:1175 +#: executor/execMain.c:1271 #, c-format msgid "cannot lock rows in view \"%s\"" msgstr "\"%s\" 뷰에서 로우를 잠글 수 없음" -#: executor/execMain.c:1183 +#: executor/execMain.c:1279 #, c-format msgid "cannot lock rows in materialized view \"%s\"" msgstr "\"%s\" 구체화된 뷰에서 로우를 잠글 수 없음" -#: executor/execMain.c:1192 executor/execMain.c:2613 -#: executor/nodeLockRows.c:132 +#: executor/execMain.c:1288 executor/execMain.c:2929 +#: executor/nodeLockRows.c:136 #, c-format msgid "cannot lock rows in foreign table \"%s\"" msgstr "\"%s\" 외부 테이블에서 로우를 잠글 수 없음" -#: executor/execMain.c:1198 +#: executor/execMain.c:1294 #, c-format msgid "cannot lock rows in relation \"%s\"" msgstr "\"%s\" 릴레이션에서 로우를 잠글 수 없음" -#: executor/execMain.c:1731 -#, c-format -msgid "null value in column \"%s\" violates not-null constraint" -msgstr "\"%s\" 칼럼의 null 값이 not null 제약조건을 위반했습니다." - -#: executor/execMain.c:1733 executor/execMain.c:1759 executor/execMain.c:1848 -#, c-format -msgid "Failing row contains %s." -msgstr "실패한 자료: %s" - -#: executor/execMain.c:1757 -#, c-format -msgid "new row for relation \"%s\" violates check constraint \"%s\"" -msgstr "새 자료가 \"%s\" 릴레이션의 \"%s\" 체크 제약 조건을 위반했습니다" - -#: executor/execMain.c:1846 -#, c-format -msgid "new row violates check option for view \"%s\"" -msgstr "새 자료가 \"%s\" 뷰의 체크 제약 조건을 위반했습니다" - -#: executor/execMain.c:1856 -#, c-format -msgid "new row violates row-level security policy \"%s\" for table \"%s\"" -msgstr "" -"새 자료가 \"%s\" 로우 단위 보안 정책을 위반했습니다, 해당 테이블: \"%s\"" - -#: executor/execMain.c:1861 -#, c-format -msgid "new row violates row-level security policy for table \"%s\"" -msgstr "새 자료가 \"%s\" 테이블의 로우 단위 보안 정책을 위반했습니다." - -#: executor/execMain.c:1868 -#, c-format -msgid "" -"new row violates row-level security policy \"%s\" (USING expression) for " -"table \"%s\"" -msgstr "" -"새 자료가 \"%s\" 로우 단위 보안 정책(USING 절 사용)을 위반했습니다, 해당 테이" -"블: \"%s\"" - -#: executor/execMain.c:1873 -#, c-format -msgid "" -"new row violates row-level security policy (USING expression) for table \"%s" -"\"" -msgstr "" -"새 자료가 \"%s\" 테이블의 로우 단위 보안 정책(USING 절 사용)을 위반했습니다." - -#: executor/execQual.c:302 executor/execQual.c:339 executor/execQual.c:3236 -#: utils/adt/array_userfuncs.c:484 utils/adt/arrayfuncs.c:260 -#: utils/adt/arrayfuncs.c:558 utils/adt/arrayfuncs.c:1288 -#: utils/adt/arrayfuncs.c:3361 utils/adt/arrayfuncs.c:5241 -#: utils/adt/arrayfuncs.c:5758 -#, c-format -msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" -msgstr "지정한 배열 크기(%d)가 최대치(%d)를 초과했습니다" - -#: executor/execQual.c:324 executor/execQual.c:360 -#, c-format -msgid "array subscript in assignment must not be null" -msgstr "배열 하위 스크립트로 지정하는 값으로 null 값을 사용할 수 없습니다" - -#: executor/execQual.c:657 executor/execQual.c:4183 -#, c-format -msgid "attribute %d has wrong type" -msgstr "%d 속성의 형식이 잘못됨" - -#: executor/execQual.c:658 executor/execQual.c:4184 -#, c-format -msgid "Table has type %s, but query expects %s." -msgstr "테이블에는 %s 자료형이지만, 쿼리에서는 %s 자료형입니다." - -#: executor/execQual.c:851 executor/execQual.c:868 executor/execQual.c:1068 -#: executor/nodeModifyTable.c:95 executor/nodeModifyTable.c:105 -#: executor/nodeModifyTable.c:122 executor/nodeModifyTable.c:130 -#, c-format -msgid "table row type and query-specified row type do not match" -msgstr "테이블 행 형식과 쿼리 지정 행 형식이 일치하지 않음" - -#: executor/execQual.c:852 -#, c-format -msgid "Table row contains %d attribute, but query expects %d." -msgid_plural "Table row contains %d attributes, but query expects %d." -msgstr[0] "" -"테이블 행에는 %d개 속성이 포함되어 있는데 쿼리에는 %d개가 필요합니다." +#: executor/execMain.c:1925 +#, c-format +msgid "new row for relation \"%s\" violates partition constraint" +msgstr "새 자료가 \"%s\" 릴레이션의 파티션 제약 조건을 위반했습니다" -#: executor/execQual.c:869 executor/nodeModifyTable.c:106 +#: executor/execMain.c:1927 executor/execMain.c:2006 executor/execMain.c:2053 +#: executor/execMain.c:2164 #, c-format -msgid "Table has type %s at ordinal position %d, but query expects %s." -msgstr "" -"테이블에는 %s 형식이 있는데(서수 위치 %d) 쿼리에는 %s이(가) 필요합니다." +msgid "Failing row contains %s." +msgstr "실패한 자료: %s" -#: executor/execQual.c:1069 executor/execQual.c:1665 +#: executor/execMain.c:2004 #, c-format -msgid "Physical storage mismatch on dropped attribute at ordinal position %d." -msgstr "서수 위치 %d의 삭제된 속성에서 실제 스토리지 불일치가 발생합니다." +msgid "null value in column \"%s\" violates not-null constraint" +msgstr "\"%s\" 칼럼의 null 값이 not null 제약조건을 위반했습니다." -#: executor/execQual.c:1344 parser/parse_func.c:115 parser/parse_func.c:542 -#: parser/parse_func.c:897 +#: executor/execMain.c:2051 #, c-format -msgid "cannot pass more than %d argument to a function" -msgid_plural "cannot pass more than %d arguments to a function" -msgstr[0] "함수에 최대 %d개의 인자를 전달할 수 있음" +msgid "new row for relation \"%s\" violates check constraint \"%s\"" +msgstr "새 자료가 \"%s\" 릴레이션의 \"%s\" 체크 제약 조건을 위반했습니다" -#: executor/execQual.c:1533 +#: executor/execMain.c:2162 #, c-format -msgid "functions and operators can take at most one set argument" -msgstr "함수와 연산자는 set 인자로는 오직 한 개만 사용할 수 있습니다" +msgid "new row violates check option for view \"%s\"" +msgstr "새 자료가 \"%s\" 뷰의 체크 제약 조건을 위반했습니다" -#: executor/execQual.c:1583 +#: executor/execMain.c:2172 #, c-format -msgid "" -"function returning setof record called in context that cannot accept type " -"record" +msgid "new row violates row-level security policy \"%s\" for table \"%s\"" msgstr "" -"setof 레코드 반환 함수가 type 레코드를 허용하지 않는 컨텍스트에서 호출됨" +"새 자료가 \"%s\" 로우 단위 보안 정책을 위반했습니다, 해당 테이블: \"%s\"" -#: executor/execQual.c:1638 executor/execQual.c:1654 executor/execQual.c:1664 +#: executor/execMain.c:2177 #, c-format -msgid "function return row and query-specified return row do not match" -msgstr "함수 반환 행과 쿼리 지정 반환 행이 일치하지 않음" +msgid "new row violates row-level security policy for table \"%s\"" +msgstr "새 자료가 \"%s\" 테이블의 로우 단위 보안 정책을 위반했습니다." -#: executor/execQual.c:1639 +#: executor/execMain.c:2184 #, c-format -msgid "Returned row contains %d attribute, but query expects %d." -msgid_plural "Returned row contains %d attributes, but query expects %d." -msgstr[0] "" -"반환된 행에는 %d개 속성이 포함되어 있는데 쿼리에는 %d개가 필요합니다." +msgid "" +"new row violates row-level security policy \"%s\" (USING expression) for " +"table \"%s\"" +msgstr "" +"새 자료가 \"%s\" 로우 단위 보안 정책(USING 절 사용)을 위반했습니다, 해당 테이" +"블: \"%s\"" -#: executor/execQual.c:1655 +#: executor/execMain.c:2189 #, c-format -msgid "Returned type %s at ordinal position %d, but query expects %s." -msgstr "반환된 형식은 %s인데(서수 위치 %d) 쿼리에는 %s이(가) 필요합니다." +msgid "" +"new row violates row-level security policy (USING expression) for table \"%s" +"\"" +msgstr "" +"새 자료가 \"%s\" 테이블의 로우 단위 보안 정책(USING 절 사용)을 위반했습니다." -#: executor/execQual.c:1897 executor/execQual.c:2335 +#: executor/execMain.c:3398 #, c-format -msgid "table-function protocol for materialize mode was not followed" -msgstr "materialize 모드를 위한 테이블 함수 프로토콜이 뒤이어 오지 않았습니다" +msgid "no partition of relation \"%s\" found for row" +msgstr "해당 로우를 위한 \"%s\" 릴레이션용 파티션이 없음" -#: executor/execQual.c:1917 executor/execQual.c:2342 +#: executor/execMain.c:3400 #, c-format -msgid "unrecognized table-function returnMode: %d" -msgstr "알 수 없는 테이블-함수 리턴모드: %d" +msgid "Partition key of the failing row contains %s." +msgstr "실패한 로우의 파티션 키 값: %s" -#: executor/execQual.c:2287 +#: executor/execReplication.c:196 executor/execReplication.c:354 #, c-format -msgid "rows returned by function are not all of the same row type" -msgstr "함수 호출로 반환되는 로우가 같은 로우형의 전부가 아닙니다" +msgid "concurrent update, retrying" +msgstr "동시 업데이트, 다시 시도 중" -#: executor/execQual.c:2522 +#: executor/execReplication.c:256 parser/parse_oper.c:228 +#: utils/adt/array_userfuncs.c:724 utils/adt/array_userfuncs.c:863 +#: utils/adt/arrayfuncs.c:3639 utils/adt/arrayfuncs.c:4077 +#: utils/adt/arrayfuncs.c:6037 utils/adt/rowtypes.c:1167 #, c-format -msgid "IS DISTINCT FROM does not support set arguments" -msgstr "IS DISTINCT FROM 구문에서는 set 인자들을 지원하지 않습니다" +msgid "could not identify an equality operator for type %s" +msgstr "" +"%s 자료형에서 사용할 동등 연산자(equality operator)를 찾을 수 없습니다." -#: executor/execQual.c:2599 +#: executor/execReplication.c:562 #, c-format -msgid "op ANY/ALL (array) does not support set arguments" -msgstr "op ANY/ALL (array) 에서는 set 인자들을 지원하지 않습니다" +msgid "" +"cannot update table \"%s\" because it does not have a replica identity and " +"publishes updates" +msgstr "" +"\"%s\" 테이블 업데이트 실패, 이 테이블에는 복제용 식별자를 지정하지 않았거" +"나, updates 옵션 없이 발행했습니다" -#: executor/execQual.c:3214 +#: executor/execReplication.c:564 #, c-format -msgid "cannot merge incompatible arrays" -msgstr "배열 형태가 서로 틀려 병합할 수 없습니다" +msgid "To enable updating the table, set REPLICA IDENTITY using ALTER TABLE." +msgstr "" +"업데이트를 하려면, ALTER TABLE 명령어에서 REPLICA IDENTITY 옵션을 사용하세요" -#: executor/execQual.c:3215 +#: executor/execReplication.c:568 #, c-format msgid "" -"Array with element type %s cannot be included in ARRAY construct with " -"element type %s." -msgstr "" -"%s 자료형의 요소로 구성된 배열은 %s 자료형의 요소로 구성된 ARRAY 구문에 포함" -"될 수 없습니다." +"cannot delete from table \"%s\" because it does not have a replica identity " +"and publishes deletes" +msgstr "\"%s\" 테이블 자료 삭제 실패, 복제 식별자와 deletes 발행을 안함" -#: executor/execQual.c:3256 executor/execQual.c:3283 +#: executor/execReplication.c:570 #, c-format msgid "" -"multidimensional arrays must have array expressions with matching dimensions" -msgstr "다차원 배열에는 일치하는 차원이 포함된 배열 식이 있어야 함" +"To enable deleting from the table, set REPLICA IDENTITY using ALTER TABLE." +msgstr "삭제 하려면, ALTER TABLE 명령어에서 REPLICA IDENTITY 옵션을 사용하세요" -#: executor/execQual.c:3798 +#: executor/execReplication.c:589 #, c-format -msgid "NULLIF does not support set arguments" -msgstr "NULLIF는 set 인자들을 지원하지 않습니다" +msgid "logical replication target relation \"%s.%s\" is not a table" +msgstr "\"%s.%s\" 논리 복제 대상 릴레이션은 테이블이 아닙니다" -#: executor/execQual.c:4046 utils/adt/domains.c:137 +#: executor/execSRF.c:308 #, c-format -msgid "domain %s does not allow null values" -msgstr "%s 도메인에서는 null 값을 허용하지 않습니다" +msgid "rows returned by function are not all of the same row type" +msgstr "함수 호출로 반환되는 로우가 같은 로우형의 전부가 아닙니다" -#: executor/execQual.c:4083 utils/adt/domains.c:179 +#: executor/execSRF.c:356 executor/execSRF.c:620 #, c-format -msgid "value for domain %s violates check constraint \"%s\"" -msgstr "%s 도메인용 값이 \"%s\" 체크 제약 조건을 위반했습니다" +msgid "table-function protocol for materialize mode was not followed" +msgstr "materialize 모드를 위한 테이블 함수 프로토콜이 뒤이어 오지 않았습니다" -#: executor/execQual.c:4438 +#: executor/execSRF.c:363 executor/execSRF.c:638 #, c-format -msgid "WHERE CURRENT OF is not supported for this table type" -msgstr "WHERE CURRENT OF 구문은 이 테이블 형 대상으로 지원하지 않습니다." +msgid "unrecognized table-function returnMode: %d" +msgstr "알 수 없는 테이블-함수 리턴모드: %d" -#: executor/execQual.c:4627 parser/parse_agg.c:758 +#: executor/execSRF.c:843 #, c-format -msgid "window function calls cannot be nested" -msgstr "윈도우 함수 호출을 중첩할 수 없음" +msgid "" +"function returning setof record called in context that cannot accept type " +"record" +msgstr "" +"setof 레코드 반환 함수가 type 레코드를 허용하지 않는 컨텍스트에서 호출됨" -#: executor/execQual.c:4839 +#: executor/execSRF.c:898 executor/execSRF.c:914 executor/execSRF.c:924 #, c-format -msgid "target type is not an array" -msgstr "대상 자료형이 배열이 아닙니다." +msgid "function return row and query-specified return row do not match" +msgstr "함수 반환 행과 쿼리 지정 반환 행이 일치하지 않음" -#: executor/execQual.c:4956 +#: executor/execSRF.c:899 #, c-format -msgid "ROW() column has type %s instead of type %s" -msgstr "ROW() 열은 %s 자료형을 가집니다. %s 자료형 대신에" +msgid "Returned row contains %d attribute, but query expects %d." +msgid_plural "Returned row contains %d attributes, but query expects %d." +msgstr[0] "" +"반환된 행에는 %d개 속성이 포함되어 있는데 쿼리에는 %d개가 필요합니다." -#: executor/execQual.c:5091 utils/adt/arrayfuncs.c:3803 -#: utils/adt/arrayfuncs.c:6325 utils/adt/rowtypes.c:927 +#: executor/execSRF.c:915 #, c-format -msgid "could not identify a comparison function for type %s" -msgstr "%s 자료형에서 사용할 비교함수를 찾을 수 없습니다." +msgid "Returned type %s at ordinal position %d, but query expects %s." +msgstr "반환된 형식은 %s인데(서수 위치 %d) 쿼리에는 %s이(가) 필요합니다." -#: executor/execUtils.c:813 +#: executor/execUtils.c:644 #, c-format msgid "materialized view \"%s\" has not been populated" msgstr "\"%s\" 구체화된 뷰가 아직 구체화되지 못했습니다." -#: executor/execUtils.c:815 +#: executor/execUtils.c:646 #, c-format msgid "Use the REFRESH MATERIALIZED VIEW command." msgstr "REFRESH MATERIALIZED VIEW 명령을 사용하세요." @@ -10905,24 +11756,24 @@ msgstr "REFRESH MATERIALIZED VIEW 명령을 사용하세요." msgid "could not determine actual type of argument declared %s" msgstr "%s 인자의 자료형으로 지정한 자료형의 기본 자료형을 찾을 수 없습니다" -#: executor/functions.c:511 +#: executor/functions.c:520 #, c-format msgid "cannot COPY to/from client in a SQL function" msgstr "SQL 함수에서 클라이언트 대상 COPY 작업을 할 수 없음" #. translator: %s is a SQL statement name -#: executor/functions.c:517 +#: executor/functions.c:526 #, c-format msgid "%s is not allowed in a SQL function" msgstr "SQL 함수에서 %s 지원되지 않음" #. translator: %s is a SQL statement name -#: executor/functions.c:524 executor/spi.c:1364 executor/spi.c:2154 +#: executor/functions.c:534 executor/spi.c:1282 executor/spi.c:2069 #, c-format msgid "%s is not allowed in a non-volatile function" msgstr "%s 구문은 비휘발성 함수(non-volatile function)에서 허용하지 않습니다" -#: executor/functions.c:650 +#: executor/functions.c:654 #, c-format msgid "" "could not determine actual result type for function declared to return type " @@ -10931,24 +11782,24 @@ msgstr "" "%s 자료형을 반환한다고 정의한 함수인데, 실재 반환 자료형을 결정할 수 없습니" "다." -#: executor/functions.c:1415 +#: executor/functions.c:1413 #, c-format msgid "SQL function \"%s\" statement %d" msgstr "SQL 함수 \"%s\"의 문 %d" -#: executor/functions.c:1441 +#: executor/functions.c:1439 #, c-format msgid "SQL function \"%s\" during startup" msgstr "시작 중 SQL 함수 \"%s\"" -#: executor/functions.c:1600 executor/functions.c:1637 -#: executor/functions.c:1649 executor/functions.c:1762 -#: executor/functions.c:1795 executor/functions.c:1825 +#: executor/functions.c:1597 executor/functions.c:1634 +#: executor/functions.c:1646 executor/functions.c:1759 +#: executor/functions.c:1792 executor/functions.c:1822 #, c-format msgid "return type mismatch in function declared to return %s" msgstr "리턴 자료형이 함수 정의에서 지정한 %s 리턴 자료형과 틀립니다" -#: executor/functions.c:1602 +#: executor/functions.c:1599 #, c-format msgid "" "Function's final statement must be SELECT or INSERT/UPDATE/DELETE RETURNING." @@ -10956,203 +11807,228 @@ msgstr "" "함수 내용의 맨 마지막 구문은 SELECT 또는 INSERT/UPDATE/DELETE RETURNING이어" "야 합니다." -#: executor/functions.c:1639 +#: executor/functions.c:1636 #, c-format msgid "Final statement must return exactly one column." msgstr "맨 마지막 구문은 정확히 하나의 칼럼만 반환해야 합니다." -#: executor/functions.c:1651 +#: executor/functions.c:1648 #, c-format msgid "Actual return type is %s." msgstr "실재 반환 자료형은 %s" -#: executor/functions.c:1764 +#: executor/functions.c:1761 #, c-format msgid "Final statement returns too many columns." msgstr "맨 마지막 구문이 너무 많은 칼럼을 반환합니다." -#: executor/functions.c:1797 +#: executor/functions.c:1794 #, c-format msgid "Final statement returns %s instead of %s at column %d." msgstr "" "맨 마지막 구문이 %s(기대되는 자료형: %s) 자료형을 %d 번째 칼럼에서 반환합니" "다." -#: executor/functions.c:1827 +#: executor/functions.c:1824 #, c-format msgid "Final statement returns too few columns." msgstr "맨 마지막 구문이 너무 적은 칼럼을 반환합니다." -#: executor/functions.c:1876 +#: executor/functions.c:1873 #, c-format msgid "return type %s is not supported for SQL functions" msgstr "반환 자료형인 %s 자료형은 SQL 함수에서 지원되지 않음" -#: executor/nodeAgg.c:3038 +#: executor/nodeAgg.c:3480 #, c-format msgid "combine function for aggregate %u must be declared as STRICT" msgstr "%u OID 집계함수에서 쓸 조합 함수는 STRICT 속성을 가져야 합니다" -#: executor/nodeAgg.c:3083 executor/nodeWindowAgg.c:2318 +#: executor/nodeAgg.c:3525 executor/nodeWindowAgg.c:2282 #, c-format msgid "aggregate %u needs to have compatible input type and transition type" msgstr "%u OID 집계함수에 호환 가능한 입력 형식과 변환 형식이 있어야 함" -#: executor/nodeAgg.c:3149 parser/parse_agg.c:612 parser/parse_agg.c:642 +#: executor/nodeAgg.c:3579 parser/parse_agg.c:618 parser/parse_agg.c:648 #, c-format msgid "aggregate function calls cannot be nested" msgstr "집계 함수는 중첩되어 호출 할 수 없음" -#: executor/nodeCustom.c:148 executor/nodeCustom.c:159 +#: executor/nodeCustom.c:152 executor/nodeCustom.c:163 #, c-format msgid "custom scan \"%s\" does not support MarkPos" msgstr "\"%s\" 이름의 칼럼 탐색은 MarkPos 기능을 지원하지 않음" -#: executor/nodeHashjoin.c:823 executor/nodeHashjoin.c:853 +#: executor/nodeHashjoin.c:770 executor/nodeHashjoin.c:800 #, c-format msgid "could not rewind hash-join temporary file: %m" msgstr "해시-조인 임시 파일을 되감을 수 없음: %m" -#: executor/nodeHashjoin.c:888 executor/nodeHashjoin.c:894 +#: executor/nodeHashjoin.c:835 executor/nodeHashjoin.c:841 #, c-format msgid "could not write to hash-join temporary file: %m" msgstr "hash-join 임시 파일을 쓸 수 없습니다: %m" -#: executor/nodeHashjoin.c:935 executor/nodeHashjoin.c:945 +#: executor/nodeHashjoin.c:882 executor/nodeHashjoin.c:892 #, c-format msgid "could not read from hash-join temporary file: %m" msgstr "해시-조인 임시 파일을 읽을 수 없음: %m" -#: executor/nodeIndexonlyscan.c:179 +#: executor/nodeIndexonlyscan.c:237 #, c-format msgid "lossy distance functions are not supported in index-only scans" msgstr "lossy distance 함수들은 인덱스 단독 탐색을 지원하지 않음" -#: executor/nodeLimit.c:253 +#: executor/nodeLimit.c:256 #, c-format msgid "OFFSET must not be negative" msgstr "OFFSET은 음수가 아니어야 함" -#: executor/nodeLimit.c:280 +#: executor/nodeLimit.c:282 #, c-format msgid "LIMIT must not be negative" msgstr "LIMIT는 음수가 아니어야 함" -#: executor/nodeMergejoin.c:1584 +#: executor/nodeMergejoin.c:1559 #, c-format msgid "RIGHT JOIN is only supported with merge-joinable join conditions" msgstr "RIGHT JOIN은 병합-조인 가능 조인 조건에서만 지원됨" -#: executor/nodeMergejoin.c:1604 +#: executor/nodeMergejoin.c:1579 #, c-format msgid "FULL JOIN is only supported with merge-joinable join conditions" msgstr "FULL JOIN은 병합-조인 가능 조인 조건에서만 지원됨" -#: executor/nodeModifyTable.c:96 +#: executor/nodeModifyTable.c:97 #, c-format msgid "Query has too many columns." msgstr "쿼리에 칼럼이 너무 많습니다." -#: executor/nodeModifyTable.c:123 +#: executor/nodeModifyTable.c:124 #, c-format msgid "Query provides a value for a dropped column at ordinal position %d." -msgstr "쿼리에서 서수 위치 %d에 있는 삭제된 열의 값을 제공합니다." +msgstr "쿼리에서 서수 위치 %d에 있는 삭제된 칼럼의 값을 제공합니다." -#: executor/nodeModifyTable.c:131 +#: executor/nodeModifyTable.c:132 #, c-format msgid "Query has too few columns." msgstr "쿼리에 칼럼이 너무 적습니다." -#: executor/nodeModifyTable.c:1132 +#: executor/nodeModifyTable.c:1258 #, c-format msgid "ON CONFLICT DO UPDATE command cannot affect row a second time" msgstr "" -#: executor/nodeModifyTable.c:1133 +#: executor/nodeModifyTable.c:1259 #, c-format msgid "" "Ensure that no rows proposed for insertion within the same command have " "duplicate constrained values." msgstr "" -#: executor/nodeSamplescan.c:307 +#: executor/nodeSamplescan.c:301 #, c-format msgid "TABLESAMPLE parameter cannot be null" msgstr "TABLESAMPLE 절에는 반드시 부가 옵션값들이 있어야 합니다" -#: executor/nodeSamplescan.c:320 +#: executor/nodeSamplescan.c:313 #, c-format msgid "TABLESAMPLE REPEATABLE parameter cannot be null" msgstr "TABLESAMPLE REPEATABLE 절은 더 이상의 부가 옵션을 쓰면 안됩니다." -#: executor/nodeSubplan.c:345 executor/nodeSubplan.c:384 -#: executor/nodeSubplan.c:1036 +#: executor/nodeSubplan.c:336 executor/nodeSubplan.c:375 +#: executor/nodeSubplan.c:1009 #, c-format msgid "more than one row returned by a subquery used as an expression" msgstr "표현식에 사용된 서브쿼리 결과가 하나 이상의 행을 리턴했습니다" +#: executor/nodeTableFuncscan.c:368 +#, c-format +msgid "namespace URI must not be null" +msgstr "네임스페이스 URI 값은 null 일 수 없습니다." + +#: executor/nodeTableFuncscan.c:379 +#, c-format +msgid "row filter expression must not be null" +msgstr "로우 필터 표현식은 null값이 아니여야 함" + +#: executor/nodeTableFuncscan.c:404 +#, c-format +msgid "column filter expression must not be null" +msgstr "칼럼 필터 표현식은 null값이 아니여야 함" + +#: executor/nodeTableFuncscan.c:405 +#, c-format +msgid "Filter for column \"%s\" is null." +msgstr "\"%s\" 칼럼용 필터가 null입니다." + +#: executor/nodeTableFuncscan.c:486 +#, c-format +msgid "null is not allowed in column \"%s\"" +msgstr "\"%s\" 칼럼은 null 값을 허용하지 않습니다" + #: executor/nodeWindowAgg.c:353 #, c-format msgid "moving-aggregate transition function must not return null" msgstr "moving-aggregate transition 함수는 null 값을 반환하면 안됩니다." -#: executor/nodeWindowAgg.c:1642 +#: executor/nodeWindowAgg.c:1624 #, c-format msgid "frame starting offset must not be null" msgstr "프래임 시작 위치값으로 null 값을 사용할 수 없습니다." -#: executor/nodeWindowAgg.c:1655 +#: executor/nodeWindowAgg.c:1637 #, c-format msgid "frame starting offset must not be negative" msgstr "프래임 시작 위치으로 음수 값을 사용할 수 없습니다." -#: executor/nodeWindowAgg.c:1668 +#: executor/nodeWindowAgg.c:1649 #, c-format msgid "frame ending offset must not be null" msgstr "프래임 끝 위치값으로 null 값을 사용할 수 없습니다." -#: executor/nodeWindowAgg.c:1681 +#: executor/nodeWindowAgg.c:1662 #, c-format msgid "frame ending offset must not be negative" msgstr "프래임 끝 위치값으로 음수 값을 사용할 수 없습니다." -#: executor/spi.c:210 +#: executor/spi.c:197 #, c-format msgid "transaction left non-empty SPI stack" msgstr "트랜잭션이 비어있지 않은 SPI 스택을 남겼습니다" -#: executor/spi.c:211 executor/spi.c:275 +#: executor/spi.c:198 executor/spi.c:261 #, c-format msgid "Check for missing \"SPI_finish\" calls." msgstr "\"SPI_finish\" 호출이 빠졌는지 확인하세요" -#: executor/spi.c:274 +#: executor/spi.c:260 #, c-format msgid "subtransaction left non-empty SPI stack" msgstr "하위 트랜잭션이 비어있지 않은 SPI 스택을 남겼습니다" -#: executor/spi.c:1225 +#: executor/spi.c:1143 #, c-format msgid "cannot open multi-query plan as cursor" msgstr "멀티 쿼리를 커서로 열 수는 없습니다" #. translator: %s is name of a SQL command, eg INSERT -#: executor/spi.c:1230 +#: executor/spi.c:1148 #, c-format msgid "cannot open %s query as cursor" msgstr "%s 쿼리로 커서를 열 수 없음." -#: executor/spi.c:1338 +#: executor/spi.c:1253 #, c-format msgid "DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE is not supported" msgstr "DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE는 지원되지 않음" -#: executor/spi.c:1339 parser/analyze.c:2360 +#: executor/spi.c:1254 parser/analyze.c:2447 #, c-format msgid "Scrollable cursors must be READ ONLY." msgstr "스크롤 가능 커서는 READ ONLY여야 합니다." -#: executor/spi.c:2459 +#: executor/spi.c:2374 #, c-format msgid "SQL statement \"%s\"" msgstr "SQL 구문: \"%s\"" @@ -11162,17 +12038,17 @@ msgstr "SQL 구문: \"%s\"" msgid "could not send tuple to shared-memory queue" msgstr "공유 메모리 큐로 튜플을 보낼 수 없음" -#: foreign/foreign.c:192 +#: foreign/foreign.c:188 #, c-format msgid "user mapping not found for \"%s\"" msgstr "\"%s\"에 대한 사용자 매핑을 찾을 수 없음" -#: foreign/foreign.c:644 +#: foreign/foreign.c:640 #, c-format msgid "invalid option \"%s\"" msgstr "\"%s\" 옵션이 잘못됨" -#: foreign/foreign.c:645 +#: foreign/foreign.c:641 #, c-format msgid "Valid options in this context are: %s" msgstr "이 컨텍스트에서 유효한 옵션: %s" @@ -11182,82 +12058,203 @@ msgstr "이 컨텍스트에서 유효한 옵션: %s" msgid "Cannot enlarge string buffer containing %d bytes by %d more bytes." msgstr "%d바이트가 포함된 문자열 버퍼를 %d바이트 더 확장할 수 없습니다." -#: libpq/auth.c:254 +#: libpq/auth-scram.c:199 libpq/auth-scram.c:439 libpq/auth-scram.c:448 +#, c-format +msgid "invalid SCRAM verifier for user \"%s\"" +msgstr "\"%s\" 사용자에 대한 잘못된 SCRAM 유요성 검사" + +#: libpq/auth-scram.c:210 +#, c-format +msgid "User \"%s\" does not have a valid SCRAM verifier." +msgstr "\"%s\" 사용자용 바른 SCRAM 유효성 검사가 없습니다." + +#: libpq/auth-scram.c:288 libpq/auth-scram.c:293 libpq/auth-scram.c:587 +#: libpq/auth-scram.c:595 libpq/auth-scram.c:676 libpq/auth-scram.c:686 +#: libpq/auth-scram.c:804 libpq/auth-scram.c:811 libpq/auth-scram.c:826 +#: libpq/auth-scram.c:1056 libpq/auth-scram.c:1064 +#, c-format +msgid "malformed SCRAM message" +msgstr "SCRAM 메시지가 형식에 맞지 않습니다" + +#: libpq/auth-scram.c:289 +#, c-format +msgid "The message is empty." +msgstr "메시지가 비었습니다." + +#: libpq/auth-scram.c:294 +#, c-format +msgid "Message length does not match input length." +msgstr "메시지 길이가 입력 길이와 같지 않습니다." + +#: libpq/auth-scram.c:326 +#, c-format +msgid "invalid SCRAM response" +msgstr "잘못된 SCRAM 응답" + +#: libpq/auth-scram.c:327 +#, c-format +msgid "Nonce does not match." +msgstr "토큰 불일치" + +#: libpq/auth-scram.c:401 +#, c-format +msgid "could not generate random salt" +msgstr "무작위 솔트 생성 실패" + +#: libpq/auth-scram.c:588 +#, c-format +msgid "Expected attribute \"%c\" but found \"%s\"." +msgstr "\"%c\" 속성이어야 하는데, \"%s\" 임." + +#: libpq/auth-scram.c:596 libpq/auth-scram.c:687 +#, c-format +msgid "Expected character \"=\" for attribute \"%c\"." +msgstr "\"%c\" 속성에는 \"=\" 문자가 와야합니다." + +#: libpq/auth-scram.c:677 +#, c-format +msgid "Attribute expected, but found invalid character \"%s\"." +msgstr "속성값이 와야하는데, \"%s\" 잘못된 문자가 발견되었음." + +#: libpq/auth-scram.c:800 +#, c-format +msgid "client requires SCRAM channel binding, but it is not supported" +msgstr "" + +#: libpq/auth-scram.c:805 +#, c-format +msgid "Unexpected channel-binding flag \"%s\"." +msgstr "예상치 못한 채널 바인딩 플래그 \"%s\"." + +#: libpq/auth-scram.c:812 +#, c-format +msgid "Comma expected, but found character \"%s\"." +msgstr "쉼표가 와야하는데, \"%s\" 문자가 발견되었음." + +#: libpq/auth-scram.c:822 +#, c-format +msgid "client uses authorization identity, but it is not supported" +msgstr "" + +#: libpq/auth-scram.c:827 +#, c-format +msgid "Unexpected attribute \"%s\" in client-first-message." +msgstr "" + +#: libpq/auth-scram.c:843 +#, c-format +msgid "client requires an unsupported SCRAM extension" +msgstr "" + +#: libpq/auth-scram.c:857 +#, c-format +msgid "non-printable characters in SCRAM nonce" +msgstr "SCRAM 토큰에 인쇄할 수 없는 문자가 있음" + +#: libpq/auth-scram.c:974 +#, c-format +msgid "could not generate random nonce" +msgstr "무작위 토큰을 만들 수 없음" + +#: libpq/auth-scram.c:1042 +#, c-format +msgid "unexpected SCRAM channel-binding attribute in client-final-message" +msgstr "" + +#: libpq/auth-scram.c:1057 +#, c-format +msgid "Malformed proof in client-final-message." +msgstr "" + +#: libpq/auth-scram.c:1065 +#, c-format +msgid "Garbage found at the end of client-final-message." +msgstr "" + +#: libpq/auth.c:274 #, c-format msgid "authentication failed for user \"%s\": host rejected" msgstr "사용자 \"%s\"의 인증을 실패했습니다: 호스트 거부됨" -#: libpq/auth.c:257 +#: libpq/auth.c:277 #, c-format msgid "\"trust\" authentication failed for user \"%s\"" msgstr "사용자 \"%s\"의 \"trust\" 인증을 실패했습니다." -#: libpq/auth.c:260 +#: libpq/auth.c:280 #, c-format msgid "Ident authentication failed for user \"%s\"" msgstr "사용자 \"%s\"의 Ident 인증을 실패했습니다." -#: libpq/auth.c:263 +#: libpq/auth.c:283 #, c-format msgid "Peer authentication failed for user \"%s\"" msgstr "사용자 \"%s\"의 peer 인증을 실패했습니다." -#: libpq/auth.c:267 +#: libpq/auth.c:288 #, c-format msgid "password authentication failed for user \"%s\"" msgstr "사용자 \"%s\"의 password 인증을 실패했습니다" -#: libpq/auth.c:272 +#: libpq/auth.c:293 #, c-format msgid "GSSAPI authentication failed for user \"%s\"" msgstr "\"%s\" 사용자에 대한 GSSAPI 인증을 실패했습니다." -#: libpq/auth.c:275 +#: libpq/auth.c:296 #, c-format msgid "SSPI authentication failed for user \"%s\"" msgstr "\"%s\" 사용자에 대한 SSPI 인증을 실패했습니다." -#: libpq/auth.c:278 +#: libpq/auth.c:299 #, c-format msgid "PAM authentication failed for user \"%s\"" msgstr "사용자 \"%s\"의 PAM 인증을 실패했습니다." -#: libpq/auth.c:281 +#: libpq/auth.c:302 #, c-format msgid "BSD authentication failed for user \"%s\"" msgstr "\"%s\" 사용자에 대한 BSD 인증을 실패했습니다." -#: libpq/auth.c:284 +#: libpq/auth.c:305 #, c-format msgid "LDAP authentication failed for user \"%s\"" msgstr "\"%s\" 사용자의 LDAP 인증을 실패했습니다." -#: libpq/auth.c:287 +#: libpq/auth.c:308 #, c-format msgid "certificate authentication failed for user \"%s\"" msgstr "사용자 \"%s\"의 인증서 인증을 실패했습니다" -#: libpq/auth.c:290 +#: libpq/auth.c:311 #, c-format msgid "RADIUS authentication failed for user \"%s\"" msgstr "사용자 \"%s\"의 RADIUS 인증을 실패했습니다." -#: libpq/auth.c:293 +#: libpq/auth.c:314 #, c-format msgid "authentication failed for user \"%s\": invalid authentication method" msgstr "사용자 \"%s\"의 인증을 실패했습니다: 잘못된 인증 방법" -#: libpq/auth.c:297 +#: libpq/auth.c:318 #, c-format msgid "Connection matched pg_hba.conf line %d: \"%s\"" msgstr "pg_hba.conf 파일의 %d번째 줄에 지정한 인증 설정이 사용됨: \"%s\"" -#: libpq/auth.c:352 +#: libpq/auth.c:365 +#, c-format +msgid "" +"client certificates can only be checked if a root certificate store is " +"available" +msgstr "" +"루트 인증서 저장소가 사용 가능한 경우에만 클라이언트 인증서를 검사할 수 있음" + +#: libpq/auth.c:376 #, c-format msgid "connection requires a valid client certificate" msgstr "연결에 유효한 클라이언트 인증서가 필요함" -#: libpq/auth.c:394 +#: libpq/auth.c:409 #, c-format msgid "" "pg_hba.conf rejects replication connection for host \"%s\", user \"%s\", %s" @@ -11265,22 +12262,22 @@ msgstr "" "호스트 \"%s\", 사용자 \"%s\", %s 연결이 복제용 연결로는 pg_hba.conf 파일 설정" "에 따라 거부됩니다" -#: libpq/auth.c:396 libpq/auth.c:412 libpq/auth.c:470 libpq/auth.c:488 +#: libpq/auth.c:411 libpq/auth.c:427 libpq/auth.c:485 libpq/auth.c:503 msgid "SSL off" msgstr "SSL 중지" -#: libpq/auth.c:396 libpq/auth.c:412 libpq/auth.c:470 libpq/auth.c:488 +#: libpq/auth.c:411 libpq/auth.c:427 libpq/auth.c:485 libpq/auth.c:503 msgid "SSL on" msgstr "SSL 동작" -#: libpq/auth.c:400 +#: libpq/auth.c:415 #, c-format msgid "pg_hba.conf rejects replication connection for host \"%s\", user \"%s\"" msgstr "" "호스트 \"%s\", 사용자 \"%s\" 연결이 복제용 연결로는 pg_hba.conf 파일 설정에 " "따라 거부됩니다" -#: libpq/auth.c:409 +#: libpq/auth.c:424 #, c-format msgid "" "pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s" @@ -11289,7 +12286,7 @@ msgstr "" "호스트 \"%s\", 사용자 \"%s\", 데이터베이스 \"%s\", %s 연결이 pg_hba.conf 파" "일 설정에 따라 거부됩니다" -#: libpq/auth.c:416 +#: libpq/auth.c:431 #, c-format msgid "" "pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s\"" @@ -11297,35 +12294,35 @@ msgstr "" "호스트 \"%s\", 사용자 \"%s\", 데이터베이스 \"%s\" 연결이 pg_hba.conf 파일 설" "정에 따라 거부됩니다" -#: libpq/auth.c:445 +#: libpq/auth.c:460 #, c-format msgid "Client IP address resolved to \"%s\", forward lookup matches." msgstr "" "클라이언트 IP 주소가 \"%s\" 이름으로 확인됨, 호스트 이름 확인 기능으로 맞음" -#: libpq/auth.c:448 +#: libpq/auth.c:463 #, c-format msgid "Client IP address resolved to \"%s\", forward lookup not checked." msgstr "" "클라이언트 IP 주소가 \"%s\" 이름으로 확인됨, 호스트 이름 확인 기능 사용안함" -#: libpq/auth.c:451 +#: libpq/auth.c:466 #, c-format msgid "Client IP address resolved to \"%s\", forward lookup does not match." msgstr "" "클라이언트 IP 주소가 \"%s\" 이름으로 확인됨, 호스트 이름 확인 기능으로 틀림" -#: libpq/auth.c:454 +#: libpq/auth.c:469 #, c-format msgid "Could not translate client host name \"%s\" to IP address: %s." msgstr "\"%s\" 클라이언트 호스트 이름을 %s IP 주소로 전환할 수 없음." -#: libpq/auth.c:459 +#: libpq/auth.c:474 #, c-format msgid "Could not resolve client IP address to a host name: %s." msgstr "클라이언트 IP 주소를 파악할 수 없음: 대상 호스트 이름: %s" -#: libpq/auth.c:468 +#: libpq/auth.c:483 #, c-format msgid "" "no pg_hba.conf entry for replication connection from host \"%s\", user \"%s" @@ -11334,7 +12331,7 @@ msgstr "" "호스트 \"%s\", 사용자 \"%s\", %s 연결이 복제용 연결로 pg_hba.conf 파일에 설정" "되어 있지 않습니다" -#: libpq/auth.c:475 +#: libpq/auth.c:490 #, c-format msgid "" "no pg_hba.conf entry for replication connection from host \"%s\", user \"%s\"" @@ -11342,228 +12339,248 @@ msgstr "" "호스트 \"%s\", 사용자 \"%s\" 연결이 복제용 연결로 pg_hba.conf 파일에 설정되" "어 있지 않습니다" -#: libpq/auth.c:485 +#: libpq/auth.c:500 #, c-format msgid "no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s" msgstr "" "호스트 \"%s\", 사용자 \"%s\", 데이터베이스 \"%s\", %s 연결에 대한 설정이 " "pg_hba.conf 파일에 없습니다." -#: libpq/auth.c:493 +#: libpq/auth.c:508 #, c-format msgid "no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"" msgstr "" "호스트 \"%s\", 사용자 \"%s\", 데이터베이스 \"%s\" 연결에 대한 설정이 pg_hba." "conf 파일에 없습니다." -#: libpq/auth.c:536 libpq/hba.c:1178 +#: libpq/auth.c:661 +#, c-format +msgid "expected password response, got message type %d" +msgstr "메시지 타입 %d를 얻는 예상된 암호 응답" + +#: libpq/auth.c:689 +#, c-format +msgid "invalid password packet size" +msgstr "유효하지 않은 암호 패킷 사이즈" + +#: libpq/auth.c:707 +#, c-format +msgid "empty password returned by client" +msgstr "비어있는 암호는 클라이언트에 의해 돌려보냈습니다" + +#: libpq/auth.c:827 libpq/hba.c:1325 #, c-format msgid "" "MD5 authentication is not supported when \"db_user_namespace\" is enabled" msgstr "\"db_user_namespace\"가 사용 가능한 경우 MD5 인증은 지원되지 않음" -#: libpq/auth.c:670 +#: libpq/auth.c:833 #, c-format -msgid "expected password response, got message type %d" -msgstr "메시지 타입 %d를 얻는 예상된 암호 응답" +msgid "could not generate random MD5 salt" +msgstr "무작위 MD5 솔트 생성 실패" -#: libpq/auth.c:698 +#: libpq/auth.c:878 #, c-format -msgid "invalid password packet size" -msgstr "유효하지 않은 암호 패킷 사이즈" +msgid "SASL authentication is not supported in protocol version 2" +msgstr "프로토콜 버전 2에서는 SASL 인증을 지원되지 않음" + +#: libpq/auth.c:920 +#, c-format +msgid "expected SASL response, got message type %d" +msgstr "SASL 응답이 필요한데 메시지 형식 %d을(를) 받음" -#: libpq/auth.c:828 +#: libpq/auth.c:957 +#, c-format +msgid "client selected an invalid SASL authentication mechanism" +msgstr "클라이언트가 잘못된 SASL 인증 메카니즘을 선택했음" + +#: libpq/auth.c:1104 #, c-format msgid "GSSAPI is not supported in protocol version 2" msgstr "프로토콜 버전 2에서는 GSSAPI가 지원되지 않음" -#: libpq/auth.c:888 +#: libpq/auth.c:1164 #, c-format msgid "expected GSS response, got message type %d" msgstr "GSS 응답이 필요한데 메시지 형식 %d을(를) 받음" -#: libpq/auth.c:949 +#: libpq/auth.c:1226 msgid "accepting GSS security context failed" msgstr "GSS 보안 컨텍스트를 수락하지 못함" -#: libpq/auth.c:975 +#: libpq/auth.c:1252 msgid "retrieving GSS user name failed" msgstr "GSS 사용자 이름을 검색하지 못함" -#: libpq/auth.c:1094 +#: libpq/auth.c:1372 #, c-format msgid "SSPI is not supported in protocol version 2" msgstr "프로토콜 버전 2에서는 SSPI가 지원되지 않음" -#: libpq/auth.c:1109 +#: libpq/auth.c:1387 msgid "could not acquire SSPI credentials" msgstr "SSPI 자격 증명을 가져올 수 없음" -#: libpq/auth.c:1127 +#: libpq/auth.c:1405 #, c-format msgid "expected SSPI response, got message type %d" msgstr "SSPI 응답이 필요한데 메시지 형식 %d을(를) 받음" -#: libpq/auth.c:1199 +#: libpq/auth.c:1478 msgid "could not accept SSPI security context" msgstr "SSPI 보안 컨텍스트를 수락할 수 없음" -#: libpq/auth.c:1261 +#: libpq/auth.c:1540 msgid "could not get token from SSPI security context" msgstr "SSPI 보안 컨텍스트에서 토큰을 가져올 수 없음" -#: libpq/auth.c:1380 libpq/auth.c:1399 +#: libpq/auth.c:1659 libpq/auth.c:1678 #, c-format msgid "could not translate name" msgstr "이름을 변환할 수 없음" -#: libpq/auth.c:1412 +#: libpq/auth.c:1691 #, c-format msgid "realm name too long" msgstr "realm 이름이 너무 긺" -#: libpq/auth.c:1427 +#: libpq/auth.c:1706 #, c-format msgid "translated account name too long" msgstr "변환된 접속자 이름이 너무 깁니다" -#: libpq/auth.c:1613 +#: libpq/auth.c:1892 #, c-format msgid "could not create socket for Ident connection: %m" msgstr "Ident 연결에 소켓을 생성할 수 없습니다: %m" -#: libpq/auth.c:1628 +#: libpq/auth.c:1907 #, c-format msgid "could not bind to local address \"%s\": %m" msgstr "로컬 주소 \"%s\"에 바인드할 수 없습니다: %m" -#: libpq/auth.c:1640 +#: libpq/auth.c:1919 #, c-format msgid "could not connect to Ident server at address \"%s\", port %s: %m" msgstr "주소 \"%s\", 포트 %s의 Ident 서버에게 연결할 수 없습니다: %m" -#: libpq/auth.c:1662 +#: libpq/auth.c:1941 #, c-format msgid "could not send query to Ident server at address \"%s\", port %s: %m" msgstr "주소 \"%s\", 포트 %s의 Ident 서버에게 질의를 보낼 수 없습니다: %m" -#: libpq/auth.c:1679 +#: libpq/auth.c:1958 #, c-format msgid "" "could not receive response from Ident server at address \"%s\", port %s: %m" msgstr "주소 \"%s\", 포트 %s의 Ident 서버로부터 응답을 받지 못했습니다: %m" -#: libpq/auth.c:1689 +#: libpq/auth.c:1968 #, c-format msgid "invalidly formatted response from Ident server: \"%s\"" msgstr "Ident 서버로부터 잘못된 형태의 응답를 보냈습니다: \"%s\"" -#: libpq/auth.c:1729 +#: libpq/auth.c:2008 #, c-format msgid "peer authentication is not supported on this platform" msgstr "이 플랫폼에서는 peer 인증이 지원되지 않음" -#: libpq/auth.c:1733 +#: libpq/auth.c:2012 #, c-format msgid "could not get peer credentials: %m" msgstr "신뢰성 피어를 얻을 수 없습니다: %m" -#: libpq/auth.c:1742 +#: libpq/auth.c:2021 #, c-format msgid "could not look up local user ID %ld: %s" msgstr "UID %ld 해당하는 사용자를 찾을 수 없음: %s" -#: libpq/auth.c:1826 libpq/auth.c:2152 libpq/auth.c:2512 -#, c-format -msgid "empty password returned by client" -msgstr "비어있는 암호는 클라이언트에 의해 돌려보냈습니다" - -#: libpq/auth.c:1836 +#: libpq/auth.c:2109 #, c-format msgid "error from underlying PAM layer: %s" msgstr "잠재적인 PAM 레이어에서의 에러: %s" -#: libpq/auth.c:1917 +#: libpq/auth.c:2190 #, c-format msgid "could not create PAM authenticator: %s" msgstr "PAM 인증자를 생성할 수 없습니다: %s" -#: libpq/auth.c:1928 +#: libpq/auth.c:2201 #, c-format msgid "pam_set_item(PAM_USER) failed: %s" msgstr "pam_set_item(PAM_USER) 실패: %s" -#: libpq/auth.c:1939 +#: libpq/auth.c:2212 #, c-format msgid "pam_set_item(PAM_RHOST) failed: %s" msgstr "pam_set_item(PAM_RHOST) 실패: %s" -#: libpq/auth.c:1950 +#: libpq/auth.c:2223 #, c-format msgid "pam_set_item(PAM_CONV) failed: %s" msgstr "pam_set_item(PAM_CONV) 실패: %s" -#: libpq/auth.c:1961 +#: libpq/auth.c:2234 #, c-format msgid "pam_authenticate failed: %s" msgstr "PAM 인증 실패: %s" -#: libpq/auth.c:1972 +#: libpq/auth.c:2245 #, c-format msgid "pam_acct_mgmt failed: %s" msgstr "pam_acct_mgmt 실패: %s" -#: libpq/auth.c:1983 +#: libpq/auth.c:2256 #, c-format msgid "could not release PAM authenticator: %s" msgstr "PAM 인증자를 릴리즈할 수 없습니다: %s" -#: libpq/auth.c:2048 +#: libpq/auth.c:2323 #, c-format msgid "could not initialize LDAP: %m" msgstr "LDAP 초기화 실패: %m" -#: libpq/auth.c:2051 +#: libpq/auth.c:2326 #, c-format msgid "could not initialize LDAP: error code %d" msgstr "LDAP 초기화 실패: 오류번호 %d" -#: libpq/auth.c:2061 +#: libpq/auth.c:2336 #, c-format msgid "could not set LDAP protocol version: %s" msgstr "LDAP 프로토콜 버전을 지정할 수 없음: %s" -#: libpq/auth.c:2090 +#: libpq/auth.c:2365 #, c-format msgid "could not load wldap32.dll" msgstr "could not load wldap32.dll" -#: libpq/auth.c:2098 +#: libpq/auth.c:2373 #, c-format msgid "could not load function _ldap_start_tls_sA in wldap32.dll" msgstr "could not load function _ldap_start_tls_sA in wldap32.dll" -#: libpq/auth.c:2099 +#: libpq/auth.c:2374 #, c-format msgid "LDAP over SSL is not supported on this platform." msgstr "이 플랫폼에서는 SSL을 이용한 LDAP 기능을 지원하지 않음." -#: libpq/auth.c:2114 +#: libpq/auth.c:2389 #, c-format msgid "could not start LDAP TLS session: %s" msgstr "LDAP TLS 세션을 시작할 수 없음: %s" -#: libpq/auth.c:2136 +#: libpq/auth.c:2411 #, c-format msgid "LDAP server not specified" msgstr "LDAP 서버가 지정되지 않음" -#: libpq/auth.c:2189 +#: libpq/auth.c:2460 #, c-format msgid "invalid character in user name for LDAP authentication" msgstr "LDAP 인증을 위한 사용자 이름에 사용할 수 없는 문자가 있습니다" -#: libpq/auth.c:2204 +#: libpq/auth.c:2476 #, c-format msgid "" "could not perform initial LDAP bind for ldapbinddn \"%s\" on server \"%s\": " @@ -11572,50 +12589,50 @@ msgstr "" "\"%s\" ldapbinddn (해당 서버: \"%s\") 설정에 대한 LDAP 바인드 초기화를 할 수 " "없음: %s" -#: libpq/auth.c:2228 +#: libpq/auth.c:2501 #, c-format msgid "could not search LDAP for filter \"%s\" on server \"%s\": %s" msgstr "\"%s\" 필터로 LDAP 검색 실패함, 대상 서버: \"%s\": %s" -#: libpq/auth.c:2239 +#: libpq/auth.c:2513 #, c-format msgid "LDAP user \"%s\" does not exist" msgstr "\"%s\" LDAP 사용자가 없음" -#: libpq/auth.c:2240 +#: libpq/auth.c:2514 #, c-format msgid "LDAP search for filter \"%s\" on server \"%s\" returned no entries." msgstr "\"%s\" 필터로 \"%s\" 서버에서 LDAP 검색을 했으나, 해당 자료가 없음" -#: libpq/auth.c:2244 +#: libpq/auth.c:2518 #, c-format msgid "LDAP user \"%s\" is not unique" msgstr "\"%s\" LDAP 사용자가 유일하지 않습니다" -#: libpq/auth.c:2245 +#: libpq/auth.c:2519 #, c-format msgid "LDAP search for filter \"%s\" on server \"%s\" returned %d entry." msgid_plural "" "LDAP search for filter \"%s\" on server \"%s\" returned %d entries." msgstr[0] "\"%s\" 필터로 \"%s\" 서버에서 LDAP 검색 결과 %d 항목을 반환함" -#: libpq/auth.c:2263 +#: libpq/auth.c:2538 #, c-format msgid "" "could not get dn for the first entry matching \"%s\" on server \"%s\": %s" msgstr "\"%s\" 첫번째 항목 조회용 dn 값을 \"%s\" 서버에서 찾을 수 없음: %s" -#: libpq/auth.c:2283 +#: libpq/auth.c:2559 #, c-format msgid "could not unbind after searching for user \"%s\" on server \"%s\": %s" msgstr "\"%s\" 사용자 검색 후 unbind 작업을 \"%s\" 서버에서 할 수 없음: %s" -#: libpq/auth.c:2313 +#: libpq/auth.c:2591 #, c-format msgid "LDAP login failed for user \"%s\" on server \"%s\": %s" msgstr "\"%s\" 사용자의 \"%s\" LDAP 서버 로그인 실패: %s" -#: libpq/auth.c:2341 +#: libpq/auth.c:2621 #, c-format msgid "" "certificate authentication failed for user \"%s\": client certificate " @@ -11624,104 +12641,104 @@ msgstr "" "\"%s\" 사용자에 대한 인증서 로그인 실패: 클라이언트 인증서에 사용자 이름이 없" "음" -#: libpq/auth.c:2468 +#: libpq/auth.c:2724 #, c-format msgid "RADIUS server not specified" msgstr "RADIUS 서버가 지정되지 않음" -#: libpq/auth.c:2475 +#: libpq/auth.c:2731 #, c-format msgid "RADIUS secret not specified" msgstr "RADIUS 비밀키가 지정되지 않음" -#: libpq/auth.c:2491 libpq/hba.c:1632 -#, c-format -msgid "could not translate RADIUS server name \"%s\" to address: %s" -msgstr "\"%s\" RADIUS 서버 이름을 주소로 바꿀 수 없음: %s" - -#: libpq/auth.c:2519 +#: libpq/auth.c:2745 #, c-format msgid "" "RADIUS authentication does not support passwords longer than %d characters" msgstr "RADIUS 인증은 %d 글자 보다 큰 비밀번호 인증을 지원하지 않습니다" -#: libpq/auth.c:2531 +#: libpq/auth.c:2850 libpq/hba.c:1876 +#, c-format +msgid "could not translate RADIUS server name \"%s\" to address: %s" +msgstr "\"%s\" RADIUS 서버 이름을 주소로 바꿀 수 없음: %s" + +#: libpq/auth.c:2864 #, c-format msgid "could not generate random encryption vector" msgstr "무작위 암호화 벡터를 만들 수 없음" -#: libpq/auth.c:2569 +#: libpq/auth.c:2898 #, c-format msgid "could not perform MD5 encryption of password" msgstr "비밀번호의 MD5 암호를 만들 수 없음" # translator: %s is IPv4, IPv6, or Unix -#: libpq/auth.c:2594 +#: libpq/auth.c:2924 #, c-format msgid "could not create RADIUS socket: %m" msgstr "RADIUS 소켓을 생성할 수 없습니다: %m" # translator: %s is IPv4, IPv6, or Unix -#: libpq/auth.c:2615 +#: libpq/auth.c:2946 #, c-format msgid "could not bind local RADIUS socket: %m" msgstr "RADIUS 소켓에 바인드할 수 없습니다: %m" -#: libpq/auth.c:2625 +#: libpq/auth.c:2956 #, c-format msgid "could not send RADIUS packet: %m" msgstr "RADIUS 패킷을 보낼 수 없음: %m" -#: libpq/auth.c:2658 libpq/auth.c:2683 +#: libpq/auth.c:2989 libpq/auth.c:3015 #, c-format -msgid "timeout waiting for RADIUS response" -msgstr "서버 시작을 기다리는 동안 시간 초과됨" +msgid "timeout waiting for RADIUS response from %s" +msgstr "%s 에서 RADIUS 응답 대기 시간 초과" # translator: %s is IPv4, IPv6, or Unix -#: libpq/auth.c:2676 +#: libpq/auth.c:3008 #, c-format msgid "could not check status on RADIUS socket: %m" msgstr "RADIUS 소켓 상태를 확인할 수 없음: %m" -#: libpq/auth.c:2705 +#: libpq/auth.c:3038 #, c-format msgid "could not read RADIUS response: %m" msgstr "RADIUS 응답을 읽을 수 없음: %m" -#: libpq/auth.c:2717 libpq/auth.c:2721 +#: libpq/auth.c:3051 libpq/auth.c:3055 #, c-format -msgid "RADIUS response was sent from incorrect port: %d" -msgstr "RADIUS 응답이 바르지 않은 포트로부터 보내졌음: %d" +msgid "RADIUS response from %s was sent from incorrect port: %d" +msgstr "%s에서 RADIUS 응답이 바르지 않은 포트로부터 보내졌음: %d" -#: libpq/auth.c:2730 +#: libpq/auth.c:3064 #, c-format -msgid "RADIUS response too short: %d" -msgstr "RADIUS 응답이 너무 짧음: %d" +msgid "RADIUS response from %s too short: %d" +msgstr "%s에서 RADIUS 응답이 너무 짧음: %d" -#: libpq/auth.c:2737 +#: libpq/auth.c:3071 #, c-format -msgid "RADIUS response has corrupt length: %d (actual length %d)" -msgstr "RADIUS 응답 길이가 이상함: %d (실재 길이: %d)" +msgid "RADIUS response from %s has corrupt length: %d (actual length %d)" +msgstr "%s에서 RADIUS 응답 길이가 이상함: %d (실재 길이: %d)" -#: libpq/auth.c:2745 +#: libpq/auth.c:3079 #, c-format -msgid "RADIUS response is to a different request: %d (should be %d)" -msgstr "RADIUS 응답이 요청과 다름: %d (기대값: %d)" +msgid "RADIUS response from %s is to a different request: %d (should be %d)" +msgstr "%s에서 RADIUS 응답이 요청과 다름: %d (기대값: %d)" -#: libpq/auth.c:2770 +#: libpq/auth.c:3104 #, c-format msgid "could not perform MD5 encryption of received packet" msgstr "받은 패킷을 대상으로 MD5 암호화 작업할 수 없음" -#: libpq/auth.c:2779 +#: libpq/auth.c:3113 #, c-format -msgid "RADIUS response has incorrect MD5 signature" -msgstr "RADIUS 응답의 MD5 값이 이상함" +msgid "RADIUS response from %s has incorrect MD5 signature" +msgstr "%s에서 RADIUS 응답의 MD5 값이 이상함" -#: libpq/auth.c:2796 +#: libpq/auth.c:3131 #, c-format -msgid "RADIUS response has invalid code (%d) for user \"%s\"" -msgstr "RADIUS 응답이 바르지 않은 값임 (%d), 대상 사용자: \"%s\"" +msgid "RADIUS response from %s has invalid code (%d) for user \"%s\"" +msgstr "%s에서 RADIUS 응답이 바르지 않은 값임 (%d), 대상 사용자: \"%s\"" #: libpq/be-fsstubs.c:132 libpq/be-fsstubs.c:163 libpq/be-fsstubs.c:197 #: libpq/be-fsstubs.c:237 libpq/be-fsstubs.c:262 libpq/be-fsstubs.c:310 @@ -11731,7 +12748,7 @@ msgid "invalid large-object descriptor: %d" msgstr "유효하지 않은 대형 객체 설명: %d" #: libpq/be-fsstubs.c:178 libpq/be-fsstubs.c:216 libpq/be-fsstubs.c:600 -#: libpq/be-fsstubs.c:788 +#: libpq/be-fsstubs.c:788 libpq/be-fsstubs.c:908 #, c-format msgid "permission denied for large object %u" msgstr "%u 대형 객체에 대한 접근 권한 없음" @@ -11798,43 +12815,43 @@ msgstr "서버 파일 \"%s\"에 쓸 수 없습니다: %m" msgid "large object read request is too large" msgstr "대형 객체 읽기 요청이 너무 큽니다" -#: libpq/be-fsstubs.c:855 utils/adt/genfile.c:211 utils/adt/genfile.c:252 +#: libpq/be-fsstubs.c:855 utils/adt/genfile.c:212 utils/adt/genfile.c:253 #, c-format msgid "requested length cannot be negative" msgstr "요청한 길이는 음수일 수 없음" -#: libpq/be-secure-openssl.c:189 +#: libpq/be-secure-openssl.c:166 #, c-format msgid "could not create SSL context: %s" msgstr "SSL 컨텍스트 정보를 생성할 수 없습니다: %s" -#: libpq/be-secure-openssl.c:205 +#: libpq/be-secure-openssl.c:194 #, c-format msgid "could not load server certificate file \"%s\": %s" msgstr "서버 인증서 파일 \"%s\"을 불러들일 수 없습니다: %s" -#: libpq/be-secure-openssl.c:211 +#: libpq/be-secure-openssl.c:203 #, c-format msgid "could not access private key file \"%s\": %m" msgstr "비밀키 \"%s\"에 액세스할 수 없습니다: %m" -#: libpq/be-secure-openssl.c:217 +#: libpq/be-secure-openssl.c:212 #, c-format msgid "private key file \"%s\" is not a regular file" msgstr "\"%s\" 개인 키 파일은 일반 파일이 아님" -#: libpq/be-secure-openssl.c:229 +#: libpq/be-secure-openssl.c:227 #, c-format msgid "private key file \"%s\" must be owned by the database user or root" msgstr "" "\"%s\" 개인 키 파일의 소유주는 데이터베이스 사용자이거나 root 여야 합니다." -#: libpq/be-secure-openssl.c:249 +#: libpq/be-secure-openssl.c:250 #, c-format msgid "private key file \"%s\" has group or world access" msgstr "\"%s\" 개인 키 파일에 그룹 또는 익명 액세스 권한이 있음" -#: libpq/be-secure-openssl.c:251 +#: libpq/be-secure-openssl.c:252 #, c-format msgid "" "File must have permissions u=rw (0600) or less if owned by the database " @@ -11844,336 +12861,397 @@ msgstr "" "(0600) 또는 더 작게 설정하고, root가 소유주라면 u=rw,g=r (0640) 권한으로 지정" "하세요" -#: libpq/be-secure-openssl.c:258 +#: libpq/be-secure-openssl.c:269 +#, c-format +msgid "" +"private key file \"%s\" cannot be reloaded because it requires a passphrase" +msgstr "" +"\"%s\" 개인 키 파일은 비밀번호를 입력해야 해서 자동으로 다시 불러올 수 없습니" +"다." + +#: libpq/be-secure-openssl.c:274 #, c-format msgid "could not load private key file \"%s\": %s" msgstr "비밀키 파일 \"%s\"을 불러들일 수 없습니다: %s" -#: libpq/be-secure-openssl.c:263 +#: libpq/be-secure-openssl.c:283 #, c-format msgid "check of private key failed: %s" msgstr "비밀키의 확인 실패: %s" -#: libpq/be-secure-openssl.c:292 +#: libpq/be-secure-openssl.c:310 +#, c-format +msgid "could not set the cipher list (no valid ciphers available)" +msgstr "" + +#: libpq/be-secure-openssl.c:328 #, c-format msgid "could not load root certificate file \"%s\": %s" msgstr "root 인증서 파일 \"%s\"을 불러들일 수 없습니다: %s" -#: libpq/be-secure-openssl.c:316 +#: libpq/be-secure-openssl.c:355 #, c-format msgid "SSL certificate revocation list file \"%s\" ignored" msgstr "\"%s\" SSL 인증서 파기 목록 파일이 무시되었음" -#: libpq/be-secure-openssl.c:318 +#: libpq/be-secure-openssl.c:357 #, c-format msgid "SSL library does not support certificate revocation lists." msgstr "SSL 라이브러리가 인증서 파기 목록을 지원하지 않습니다." -#: libpq/be-secure-openssl.c:323 +#: libpq/be-secure-openssl.c:364 #, c-format msgid "could not load SSL certificate revocation list file \"%s\": %s" msgstr "\"%s\" SSL 인증서 회수 목록 파일을 불러들일 수 없습니다: %s" -#: libpq/be-secure-openssl.c:370 +#: libpq/be-secure-openssl.c:445 +#, c-format +msgid "could not initialize SSL connection: SSL context not set up" +msgstr "SSL연결을 초기화할 수 없습니다: SSL 컨텍스트를 설정 못함" + +#: libpq/be-secure-openssl.c:453 #, c-format msgid "could not initialize SSL connection: %s" msgstr "SSL연결을 초기화할 수 없습니다: %s" -#: libpq/be-secure-openssl.c:378 +#: libpq/be-secure-openssl.c:461 #, c-format msgid "could not set SSL socket: %s" msgstr "SSL 소켓을 지정할 수 없습니다: %s" -#: libpq/be-secure-openssl.c:432 +#: libpq/be-secure-openssl.c:516 #, c-format msgid "could not accept SSL connection: %m" msgstr "SSL 연결을 받아드릴 수 없습니다: %m" -#: libpq/be-secure-openssl.c:436 libpq/be-secure-openssl.c:447 +#: libpq/be-secure-openssl.c:520 libpq/be-secure-openssl.c:531 #, c-format msgid "could not accept SSL connection: EOF detected" msgstr "SSL 연결을 받아드릴 수 없습니다: EOF 감지됨" -#: libpq/be-secure-openssl.c:441 +#: libpq/be-secure-openssl.c:525 #, c-format msgid "could not accept SSL connection: %s" msgstr "SSL 연결을 받아드릴 수 없습니다: %s" -#: libpq/be-secure-openssl.c:452 libpq/be-secure-openssl.c:593 -#: libpq/be-secure-openssl.c:653 +#: libpq/be-secure-openssl.c:536 libpq/be-secure-openssl.c:677 +#: libpq/be-secure-openssl.c:744 #, c-format msgid "unrecognized SSL error code: %d" msgstr "인식되지 않은 SSL 에러 코드 %d" -#: libpq/be-secure-openssl.c:496 +#: libpq/be-secure-openssl.c:578 #, c-format msgid "SSL certificate's common name contains embedded null" msgstr "SSL 인증서의 일반 이름에 포함된 null이 있음" -#: libpq/be-secure-openssl.c:507 +#: libpq/be-secure-openssl.c:589 #, c-format msgid "SSL connection from \"%s\"" msgstr "\"%s\" 로부터의 SSL 연결" -#: libpq/be-secure-openssl.c:584 libpq/be-secure-openssl.c:644 +#: libpq/be-secure-openssl.c:666 libpq/be-secure-openssl.c:728 #, c-format msgid "SSL error: %s" msgstr "SSL 에러: %s" -#: libpq/be-secure-openssl.c:1055 +#: libpq/be-secure-openssl.c:909 +#, c-format +msgid "could not open DH parameters file \"%s\": %m" +msgstr "\"%s\" DH 매개 변수 파일을 열 수 없습니다: %m" + +#: libpq/be-secure-openssl.c:921 +#, c-format +msgid "could not load DH parameters file: %s" +msgstr "DH 매개 변수 파일을 불러들일 수 없습니다: %s" + +#: libpq/be-secure-openssl.c:931 +#, c-format +msgid "invalid DH parameters: %s" +msgstr "잘못된 DH 매개 변수: %s" + +#: libpq/be-secure-openssl.c:939 +#, c-format +msgid "invalid DH parameters: p is not prime" +msgstr "잘못된 DH 매개 변수값: p는 prime 아님" + +#: libpq/be-secure-openssl.c:947 +#, c-format +msgid "invalid DH parameters: neither suitable generator or safe prime" +msgstr "" + +#: libpq/be-secure-openssl.c:1088 +#, c-format +msgid "DH: could not load DH parameters" +msgstr "DH: DH 매개 변수 불러오기 실패" + +#: libpq/be-secure-openssl.c:1096 +#, c-format +msgid "DH: could not set DH parameters: %s" +msgstr "DH: DH 매개 변수 설정 실패: %s" + +#: libpq/be-secure-openssl.c:1120 #, c-format msgid "ECDH: unrecognized curve name: %s" msgstr "ECDH: 알 수 없는 curve 이름: %s" -#: libpq/be-secure-openssl.c:1060 +#: libpq/be-secure-openssl.c:1129 #, c-format msgid "ECDH: could not create key" msgstr "ECDH: 키 생성 실패" -#: libpq/be-secure-openssl.c:1084 +#: libpq/be-secure-openssl.c:1157 msgid "no SSL error reported" msgstr "SSL 오류 없음" -#: libpq/be-secure-openssl.c:1088 +#: libpq/be-secure-openssl.c:1161 #, c-format msgid "SSL error code %lu" msgstr "SSL 오류 번호 %lu" -#: libpq/be-secure.c:171 libpq/be-secure.c:256 +#: libpq/be-secure.c:189 libpq/be-secure.c:275 #, c-format msgid "terminating connection due to unexpected postmaster exit" msgstr "postmaster의 예상치 못한 종료로 연결을 종료합니다" -#: libpq/crypt.c:54 +#: libpq/crypt.c:51 #, c-format msgid "Role \"%s\" does not exist." msgstr "\"%s\" 롤 없음" -#: libpq/crypt.c:64 +#: libpq/crypt.c:61 #, c-format msgid "User \"%s\" has no password assigned." msgstr "\"%s\" 사용자 비밀번호가 아직 할당되지 않음" #: libpq/crypt.c:79 #, c-format -msgid "User \"%s\" has an empty password." -msgstr "\"%s\" 사용자 비밀번호가 설정되어 있지 않습니다." - -#: libpq/crypt.c:159 -#, c-format msgid "User \"%s\" has an expired password." msgstr "\"%s\" 사용자 비밀번호가 기한 만료되었습니다." -#: libpq/crypt.c:167 +#: libpq/crypt.c:173 +#, c-format +msgid "User \"%s\" has a password that cannot be used with MD5 authentication." +msgstr "" + +#: libpq/crypt.c:197 libpq/crypt.c:238 libpq/crypt.c:262 #, c-format msgid "Password does not match for user \"%s\"." msgstr "\"%s\" 사용자의 비밀번호가 틀립니다." -#: libpq/hba.c:188 +#: libpq/crypt.c:281 +#, c-format +msgid "Password of user \"%s\" is in unrecognized format." +msgstr "" + +#: libpq/hba.c:235 #, c-format msgid "authentication file token too long, skipping: \"%s\"" msgstr "인증 파일의 토큰이 너무 길어서 건너뜁니다: \"%s\"" -#: libpq/hba.c:332 +#: libpq/hba.c:407 #, c-format msgid "could not open secondary authentication file \"@%s\" as \"%s\": %m" msgstr "2차 인증파일 \"%s\"으로 \"@%s\"를 열 수 없다: %m" -#: libpq/hba.c:407 +#: libpq/hba.c:509 #, c-format msgid "authentication file line too long" msgstr "인증 파일 줄이 너무 깁니다" -#: libpq/hba.c:408 libpq/hba.c:755 libpq/hba.c:771 libpq/hba.c:801 -#: libpq/hba.c:847 libpq/hba.c:860 libpq/hba.c:882 libpq/hba.c:891 -#: libpq/hba.c:912 libpq/hba.c:924 libpq/hba.c:943 libpq/hba.c:964 -#: libpq/hba.c:975 libpq/hba.c:1030 libpq/hba.c:1048 libpq/hba.c:1060 -#: libpq/hba.c:1077 libpq/hba.c:1087 libpq/hba.c:1101 libpq/hba.c:1117 -#: libpq/hba.c:1132 libpq/hba.c:1143 libpq/hba.c:1179 libpq/hba.c:1217 -#: libpq/hba.c:1228 libpq/hba.c:1248 libpq/hba.c:1259 libpq/hba.c:1276 -#: libpq/hba.c:1325 libpq/hba.c:1362 libpq/hba.c:1372 libpq/hba.c:1428 -#: libpq/hba.c:1440 libpq/hba.c:1453 libpq/hba.c:1545 libpq/hba.c:1634 -#: libpq/hba.c:1652 libpq/hba.c:1673 tsearch/ts_locale.c:182 +#: libpq/hba.c:510 libpq/hba.c:867 libpq/hba.c:887 libpq/hba.c:925 +#: libpq/hba.c:975 libpq/hba.c:989 libpq/hba.c:1011 libpq/hba.c:1020 +#: libpq/hba.c:1041 libpq/hba.c:1054 libpq/hba.c:1074 libpq/hba.c:1096 +#: libpq/hba.c:1108 libpq/hba.c:1164 libpq/hba.c:1184 libpq/hba.c:1198 +#: libpq/hba.c:1217 libpq/hba.c:1228 libpq/hba.c:1243 libpq/hba.c:1261 +#: libpq/hba.c:1277 libpq/hba.c:1289 libpq/hba.c:1326 libpq/hba.c:1367 +#: libpq/hba.c:1380 libpq/hba.c:1402 libpq/hba.c:1414 libpq/hba.c:1432 +#: libpq/hba.c:1482 libpq/hba.c:1521 libpq/hba.c:1532 libpq/hba.c:1549 +#: libpq/hba.c:1559 libpq/hba.c:1617 libpq/hba.c:1655 libpq/hba.c:1671 +#: libpq/hba.c:1770 libpq/hba.c:1859 libpq/hba.c:1878 libpq/hba.c:1907 +#: libpq/hba.c:1920 libpq/hba.c:1943 libpq/hba.c:1965 libpq/hba.c:1979 +#: tsearch/ts_locale.c:182 #, c-format msgid "line %d of configuration file \"%s\"" msgstr "%d번째 줄(\"%s\" 환경 설정 파일)" #. translator: the second %s is a list of auth methods -#: libpq/hba.c:753 +#: libpq/hba.c:865 #, c-format msgid "" "authentication option \"%s\" is only valid for authentication methods %s" msgstr "\"%s\" 인증 옵션은 %s 인증 방법에만 유효함" -#: libpq/hba.c:769 +#: libpq/hba.c:885 #, c-format msgid "authentication method \"%s\" requires argument \"%s\" to be set" msgstr "\"%s\" 인증 방법의 경우 \"%s\" 인자를 설정해야 함" -#: libpq/hba.c:790 +#: libpq/hba.c:913 #, c-format msgid "missing entry in file \"%s\" at end of line %d" msgstr "\"%s\" 파일의 %d번째 줄의 끝 라인에 빠진 엔트리가 있습니다 " -#: libpq/hba.c:800 +#: libpq/hba.c:924 #, c-format msgid "multiple values in ident field" msgstr "ident 자리에 여러 값이 있음" -#: libpq/hba.c:845 +#: libpq/hba.c:973 #, c-format msgid "multiple values specified for connection type" msgstr "연결 형식 자리에 여러 값이 있음" -#: libpq/hba.c:846 +#: libpq/hba.c:974 #, c-format msgid "Specify exactly one connection type per line." msgstr "한 줄에 하나의 연결 형태만 지정해야 합니다" -#: libpq/hba.c:859 +#: libpq/hba.c:988 #, c-format msgid "local connections are not supported by this build" msgstr "로컬 접속 기능을 뺀 채로 서버가 만들어졌습니다." -#: libpq/hba.c:880 +#: libpq/hba.c:1009 #, c-format -msgid "hostssl requires SSL to be turned on" -msgstr "hostssl 접속은 SSL 기능이 활성화 되어 있어야 합니다" +msgid "hostssl record cannot match because SSL is disabled" +msgstr "" -#: libpq/hba.c:881 +#: libpq/hba.c:1010 #, c-format msgid "Set ssl = on in postgresql.conf." msgstr "postgresql.conf 파일에 ssl = on 설정을 하세요." -#: libpq/hba.c:889 +#: libpq/hba.c:1018 #, c-format -msgid "hostssl is not supported by this build" -msgstr "이 서버는 hostssl 접속 기능을 지원하지 않습니다." +msgid "hostssl record cannot match because SSL is not supported by this build" +msgstr "" +"이 서버는 ssl 접속 기능을 지원하지 않아 hostssl 인증을 지원하지 않습니다." -#: libpq/hba.c:890 +#: libpq/hba.c:1019 #, c-format msgid "Compile with --with-openssl to use SSL connections." msgstr "" "SSL 연결을 사용하기 위해 --enable-ssl 옵션을 사용해서 서버를 다시 컴파일 하세" "요" -#: libpq/hba.c:910 +#: libpq/hba.c:1039 #, c-format msgid "invalid connection type \"%s\"" msgstr "\"%s\" 값은 잘못된 연결 형식입니다" -#: libpq/hba.c:923 +#: libpq/hba.c:1053 #, c-format msgid "end-of-line before database specification" msgstr "데이터베이스 지정 전에 줄 끝에 도달함" -#: libpq/hba.c:942 +#: libpq/hba.c:1073 #, c-format msgid "end-of-line before role specification" msgstr "롤 지정 전에 줄 끝에 도달함" -#: libpq/hba.c:963 +#: libpq/hba.c:1095 #, c-format msgid "end-of-line before IP address specification" msgstr "IP 주소 지정 전에 줄 끝에 도달함" -#: libpq/hba.c:973 +#: libpq/hba.c:1106 #, c-format msgid "multiple values specified for host address" msgstr "호스트 주소 부분에 여러 값이 지정됨" -#: libpq/hba.c:974 +#: libpq/hba.c:1107 #, c-format msgid "Specify one address range per line." msgstr "한 줄에 하나의 주소 범위가 있어야 합니다." -#: libpq/hba.c:1028 +#: libpq/hba.c:1162 #, c-format msgid "invalid IP address \"%s\": %s" msgstr "\"%s\" 형태는 잘못된 IP 주소 형태입니다: %s" -#: libpq/hba.c:1046 +#: libpq/hba.c:1182 #, c-format msgid "specifying both host name and CIDR mask is invalid: \"%s\"" msgstr "호스트 이름과 CIDR 마스크는 함께 쓸 수 없습니다: \"%s\"" -#: libpq/hba.c:1058 +#: libpq/hba.c:1196 #, c-format msgid "invalid CIDR mask in address \"%s\"" msgstr "\"%s\" 주소에 잘못된 CIDR 마스크가 있음" -#: libpq/hba.c:1075 +#: libpq/hba.c:1215 #, c-format msgid "end-of-line before netmask specification" msgstr "넷마스크 지정 전에 줄 끝에 도달함" -#: libpq/hba.c:1076 +#: libpq/hba.c:1216 #, c-format msgid "" "Specify an address range in CIDR notation, or provide a separate netmask." msgstr "주소 범위는 CIDR 표기법을 쓰거나 넷마스크 표기법을 쓰세요" -#: libpq/hba.c:1086 +#: libpq/hba.c:1227 #, c-format msgid "multiple values specified for netmask" msgstr "넷마스크 부분에 여러 값이 지정됨" -#: libpq/hba.c:1099 +#: libpq/hba.c:1241 #, c-format msgid "invalid IP mask \"%s\": %s" msgstr "잘못된 IP 마스크, \"%s\": %s" -#: libpq/hba.c:1116 +#: libpq/hba.c:1260 #, c-format msgid "IP address and mask do not match" msgstr "IP 주소와 마스크가 맞지 않습니다" -#: libpq/hba.c:1131 +#: libpq/hba.c:1276 #, c-format msgid "end-of-line before authentication method" msgstr "인증 방법 전에 줄 끝에 도달함" -#: libpq/hba.c:1141 +#: libpq/hba.c:1287 #, c-format msgid "multiple values specified for authentication type" msgstr "인증 방법 부분에 여러 값이 지정됨" -#: libpq/hba.c:1142 +#: libpq/hba.c:1288 #, c-format msgid "Specify exactly one authentication type per line." msgstr "하나의 인증 방법에 대해서 한 줄씩 지정해야 합니다" -#: libpq/hba.c:1215 +#: libpq/hba.c:1365 #, c-format msgid "invalid authentication method \"%s\"" msgstr "\"%s\" 인증 방법이 잘못됨" -#: libpq/hba.c:1226 +#: libpq/hba.c:1378 #, c-format msgid "invalid authentication method \"%s\": not supported by this build" msgstr "\"%s\" 인증 방법이 잘못됨: 이 서버에서 지원되지 않음" -#: libpq/hba.c:1247 +#: libpq/hba.c:1401 #, c-format msgid "gssapi authentication is not supported on local sockets" msgstr "gssapi 인증은 로컬 소켓에서 지원되지 않음" -#: libpq/hba.c:1258 +#: libpq/hba.c:1413 #, c-format msgid "peer authentication is only supported on local sockets" msgstr "peer 인증은 로컬 소켓에서만 지원함" -#: libpq/hba.c:1275 +#: libpq/hba.c:1431 #, c-format msgid "cert authentication is only supported on hostssl connections" msgstr "cert 인증은 hostssl 연결에서만 지원됨" -#: libpq/hba.c:1324 +#: libpq/hba.c:1481 #, c-format msgid "authentication option not in name=value format: %s" msgstr "인증 옵션이 이름=값 형태가 아님: %s" -#: libpq/hba.c:1361 +#: libpq/hba.c:1520 #, c-format msgid "" "cannot use ldapbasedn, ldapbinddn, ldapbindpasswd, ldapsearchattribute, or " @@ -12182,7 +13260,7 @@ msgstr "" "ldapbasedn, ldapbinddn, ldapbindpasswd, ldapsearchattribute, ldapurl 옵션은 " "ldapprefix 옵션과 함께 사용할 수 없음" -#: libpq/hba.c:1371 +#: libpq/hba.c:1531 #, c-format msgid "" "authentication method \"ldap\" requires argument \"ldapbasedn\", \"ldapprefix" @@ -12191,181 +13269,209 @@ msgstr "" "\"ldap\" 인증 방법의 경우 \"ldapbasedn\", \"ldapprefix\", \"ldapsuffix\"옵션" "이 있어야 함" -#: libpq/hba.c:1414 +#: libpq/hba.c:1548 +#, c-format +msgid "list of RADIUS servers cannot be empty" +msgstr "RADIUS 서버 목록은 비어 있을 수 없음" + +#: libpq/hba.c:1558 +#, c-format +msgid "list of RADIUS secrets cannot be empty" +msgstr "RADIUS 비밀키 목록은 비어 있을 수 없음" + +#: libpq/hba.c:1611 +#, c-format +msgid "the number of %s (%d) must be 1 or the same as the number of %s (%d)" +msgstr "서버 목록과 키 목록이 안 맞음: %s (%d) / %s (%d)" + +#: libpq/hba.c:1645 msgid "ident, peer, gssapi, sspi, and cert" msgstr "ident, peer, gssapi, sspi 및 cert" -#: libpq/hba.c:1427 +#: libpq/hba.c:1654 #, c-format msgid "clientcert can only be configured for \"hostssl\" rows" msgstr "clientcert는 \"hostssl\" 행에 대해서만 구성할 수 있음" -#: libpq/hba.c:1438 -#, c-format -msgid "" -"client certificates can only be checked if a root certificate store is " -"available" -msgstr "" -"루트 인증서 저장소가 사용 가능한 경우에만 클라이언트 인증서를 검사할 수 있음" - -#: libpq/hba.c:1452 +#: libpq/hba.c:1670 #, c-format msgid "clientcert can not be set to 0 when using \"cert\" authentication" msgstr "\"cert\" 인증을 사용하는 경우 clientcert를 0으로 설정할 수 없음" -#: libpq/hba.c:1488 +#: libpq/hba.c:1707 #, c-format msgid "could not parse LDAP URL \"%s\": %s" msgstr "\"%s\" LDAP URL을 분석할 수 없음: %s" -#: libpq/hba.c:1496 +#: libpq/hba.c:1717 #, c-format msgid "unsupported LDAP URL scheme: %s" msgstr "지원하지 않는 LDAP URL 스킴: %s" -#: libpq/hba.c:1512 +#: libpq/hba.c:1735 #, c-format msgid "filters not supported in LDAP URLs" msgstr "LDAP URL에서 필터 속성을 지원하지 않음" -#: libpq/hba.c:1520 +#: libpq/hba.c:1744 #, c-format msgid "LDAP URLs not supported on this platform" msgstr "이 플랫폼에서는 LDAP URL 기능을 지원하지 않음." -#: libpq/hba.c:1544 +#: libpq/hba.c:1769 #, c-format msgid "invalid LDAP port number: \"%s\"" msgstr "LDAP 포트 번호가 잘못됨: \"%s\"" -#: libpq/hba.c:1584 libpq/hba.c:1591 +#: libpq/hba.c:1810 libpq/hba.c:1817 msgid "gssapi and sspi" msgstr "gssapi 및 sspi" -#: libpq/hba.c:1600 libpq/hba.c:1609 +#: libpq/hba.c:1826 libpq/hba.c:1835 msgid "sspi" msgstr "sspi" -#: libpq/hba.c:1651 +#: libpq/hba.c:1857 +#, c-format +msgid "could not parse RADIUS server list \"%s\"" +msgstr "RADIUS 서버 목록 분석 실패: \"%s\"" + +#: libpq/hba.c:1905 +#, c-format +msgid "could not parse RADIUS port list \"%s\"" +msgstr "RADIUS 서버 포트 목록 분석 실패: \"%s\"" + +#: libpq/hba.c:1919 #, c-format msgid "invalid RADIUS port number: \"%s\"" msgstr "RADIUS 포트 번호가 잘못됨: \"%s\"" -#: libpq/hba.c:1671 +# translator: %s is IPv4, IPv6, or Unix +#: libpq/hba.c:1941 +#, c-format +msgid "could not parse RADIUS secret list \"%s\"" +msgstr "RADIUS 서버 비밀키 목록 분석 실패: \"%s\"" + +#: libpq/hba.c:1963 +#, c-format +msgid "could not parse RADIUS identifiers list \"%s\"" +msgstr "RADIUS 서버 식별자 목록 분석 실패: \"%s\"" + +#: libpq/hba.c:1977 #, c-format msgid "unrecognized authentication option name: \"%s\"" msgstr "알 수 없는 인증 옵션 이름: \"%s\"" -#: libpq/hba.c:1806 guc-file.l:594 +#: libpq/hba.c:2110 libpq/hba.c:2510 guc-file.l:594 #, c-format msgid "could not open configuration file \"%s\": %m" msgstr "\"%s\" 설정 파일 을 열수 없습니다: %m" -#: libpq/hba.c:1855 +#: libpq/hba.c:2161 #, c-format msgid "configuration file \"%s\" contains no entries" msgstr "\"%s\" 설정 파일에 구성 항목이 없음" -#: libpq/hba.c:1951 +#: libpq/hba.c:2666 #, c-format msgid "invalid regular expression \"%s\": %s" msgstr "\"%s\" 정규식이 잘못됨: %s" -#: libpq/hba.c:2011 +#: libpq/hba.c:2726 #, c-format msgid "regular expression match for \"%s\" failed: %s" msgstr "\"%s\"에 대한 정규식 일치 실패: %s" -#: libpq/hba.c:2030 +#: libpq/hba.c:2745 #, c-format msgid "" "regular expression \"%s\" has no subexpressions as requested by " "backreference in \"%s\"" msgstr "\"%s\" 정규식에는 \"%s\"의 backreference에서 요청된 하위 식이 없음" -#: libpq/hba.c:2127 +#: libpq/hba.c:2842 #, c-format msgid "provided user name (%s) and authenticated user name (%s) do not match" msgstr "제공된 사용자 이름(%s) 및 인증된 사용자 이름(%s)이 일치하지 않음" -#: libpq/hba.c:2147 +#: libpq/hba.c:2862 #, c-format msgid "no match in usermap \"%s\" for user \"%s\" authenticated as \"%s\"" msgstr "" "\"%s\" 사용자맵 파일에 \"%s\" 사용자를 \"%s\" 사용자로 인증할 설정이 없음" -#: libpq/hba.c:2182 +#: libpq/hba.c:2895 #, c-format msgid "could not open usermap file \"%s\": %m" msgstr "\"%s\" 사용자맵 파일을 열 수 없습니다: %m" -#: libpq/pqcomm.c:202 +#: libpq/pqcomm.c:220 #, c-format msgid "could not set socket to nonblocking mode: %m" msgstr "소켓을 nonblocking 모드로 지정할 수 없음: %m" -#: libpq/pqcomm.c:354 +#: libpq/pqcomm.c:374 #, c-format msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)" msgstr "\"%s\" 유닉스 도메인 소켓 경로가 너무 깁니다 (최대 %d 바이트)" -#: libpq/pqcomm.c:375 +#: libpq/pqcomm.c:395 #, c-format msgid "could not translate host name \"%s\", service \"%s\" to address: %s" msgstr "호스트 이름 \"%s\", 서비스 \"%s\"를 변환할 수 없습니다. 주소 : %s" -#: libpq/pqcomm.c:379 +#: libpq/pqcomm.c:399 #, c-format msgid "could not translate service \"%s\" to address: %s" msgstr "서비스 \"%s\"를 변환할 수 없습니다. 주소 : %s" -#: libpq/pqcomm.c:406 +#: libpq/pqcomm.c:426 #, c-format msgid "could not bind to all requested addresses: MAXLISTEN (%d) exceeded" msgstr "최대 접속자 수 MAXLISTEN (%d) 초과로 더 이상 접속이 불가능합니다" -#: libpq/pqcomm.c:415 +#: libpq/pqcomm.c:435 msgid "IPv4" msgstr "IPv4" -#: libpq/pqcomm.c:419 +#: libpq/pqcomm.c:439 msgid "IPv6" msgstr "IPv6" -#: libpq/pqcomm.c:424 +#: libpq/pqcomm.c:444 msgid "Unix" msgstr "유닉스" -#: libpq/pqcomm.c:429 +#: libpq/pqcomm.c:449 #, c-format msgid "unrecognized address family %d" msgstr "%d는 인식되지 않는 가족 주소입니다" -# translator: %s is IPv4, IPv6, or Unix -#. translator: %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:440 +#. translator: first %s is IPv4, IPv6, or Unix +#: libpq/pqcomm.c:475 #, c-format -msgid "could not create %s socket: %m" -msgstr "%s 소켓을 생성할 수 없습니다: %m" +msgid "could not create %s socket for address \"%s\": %m" +msgstr "%s 소켓 만들기 실패, 대상 주소: \"%s\": %m" -#: libpq/pqcomm.c:465 +#. translator: first %s is IPv4, IPv6, or Unix +#: libpq/pqcomm.c:501 #, c-format -msgid "setsockopt(SO_REUSEADDR) failed: %m" -msgstr "setsockopt(SO_REUSEADDR) 실패: %m" +msgid "setsockopt(SO_REUSEADDR) failed for %s address \"%s\": %m" +msgstr "%s setsockopt(SO_REUSEADDR) 실패, 대상 주소: \"%s\": %m" -#: libpq/pqcomm.c:480 +#. translator: first %s is IPv4, IPv6, or Unix +#: libpq/pqcomm.c:518 #, c-format -msgid "setsockopt(IPV6_V6ONLY) failed: %m" -msgstr "setsockopt(IPV6_V6ONLY) 실패: %m" +msgid "setsockopt(IPV6_V6ONLY) failed for %s address \"%s\": %m" +msgstr "%s setsockopt(IPV6_V6ONLY) 실패, 대상 주소: \"%s\": %m" -# translator: %s is IPv4, IPv6, or Unix -#. translator: %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:499 +#. translator: first %s is IPv4, IPv6, or Unix +#: libpq/pqcomm.c:538 #, c-format -msgid "could not bind %s socket: %m" -msgstr "%s 소켓에 바인드할 수 없습니다: %m" +msgid "could not bind %s address \"%s\": %m" +msgstr "%s 바인드 실패, 대상 주소: \"%s\": %m" -#: libpq/pqcomm.c:502 +#: libpq/pqcomm.c:541 #, c-format msgid "" "Is another postmaster already running on port %d? If not, remove socket file " @@ -12374,7 +13480,7 @@ msgstr "" "다른 postmaster 가 포트 %d에서 이미 실행중인것 같습니다? 그렇지 않다면 소켓 " "파일 \"%s\"을 제거하고 다시 시도해보십시오" -#: libpq/pqcomm.c:505 +#: libpq/pqcomm.c:544 #, c-format msgid "" "Is another postmaster already running on port %d? If not, wait a few seconds " @@ -12383,64 +13489,75 @@ msgstr "" "다른 postmaster 가 포트 %d에서 이미 실행중인것 같습니다? 그렇지 않다면 몇 초" "를 기다렸다가 다시 시도해보십시오." +#. translator: first %s is IPv4, IPv6, or Unix +#: libpq/pqcomm.c:577 +#, c-format +msgid "could not listen on %s address \"%s\": %m" +msgstr "%s 리슨 실패, 대상 주소: \"%s\": %m" + # translator: %s is IPv4, IPv6, or Unix -#. translator: %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:538 +#: libpq/pqcomm.c:586 +#, c-format +msgid "listening on Unix socket \"%s\"" +msgstr "\"%s\" 유닉스 도메인 소켓으로 접속을 허용합니다" + +#. translator: first %s is IPv4 or IPv6 +#: libpq/pqcomm.c:592 #, c-format -msgid "could not listen on %s socket: %m" -msgstr "%s 소켓을 들을 수 없습니다: %m" +msgid "listening on %s address \"%s\", port %d" +msgstr "%s, 주소: \"%s\", 포트 %d 번으로 접속을 허용합니다" -#: libpq/pqcomm.c:623 +#: libpq/pqcomm.c:675 #, c-format msgid "group \"%s\" does not exist" msgstr "\"%s\" 그룹 없음" -#: libpq/pqcomm.c:633 +#: libpq/pqcomm.c:685 #, c-format msgid "could not set group of file \"%s\": %m" msgstr "파일 \"%s\" 의 그룹을 세팅할 수 없습니다: %m" -#: libpq/pqcomm.c:644 +#: libpq/pqcomm.c:696 #, c-format msgid "could not set permissions of file \"%s\": %m" msgstr "파일 \"%s\" 의 퍼미션을 세팅할 수 없습니다: %m" -#: libpq/pqcomm.c:674 +#: libpq/pqcomm.c:726 #, c-format msgid "could not accept new connection: %m" msgstr "새로운 연결을 생성할 수 없습니다: %m" -#: libpq/pqcomm.c:885 +#: libpq/pqcomm.c:927 #, c-format msgid "there is no client connection" msgstr "클라이언트 연결이 없음" -#: libpq/pqcomm.c:936 libpq/pqcomm.c:1032 +#: libpq/pqcomm.c:978 libpq/pqcomm.c:1074 #, c-format msgid "could not receive data from client: %m" msgstr "클라이언트에게 데이터를 받을 수 없습니다: %m" -#: libpq/pqcomm.c:1177 tcop/postgres.c:3917 +#: libpq/pqcomm.c:1219 tcop/postgres.c:3928 #, c-format msgid "terminating connection because protocol synchronization was lost" msgstr "프로토콜 동기화 작업 실패로 연결을 종료합니다" -#: libpq/pqcomm.c:1243 +#: libpq/pqcomm.c:1285 #, c-format msgid "unexpected EOF within message length word" msgstr "예상치 못한 EOF가 메시지의 길이 워드안에서 발생했습니다." -#: libpq/pqcomm.c:1254 +#: libpq/pqcomm.c:1296 #, c-format msgid "invalid message length" msgstr "메시지의 길이가 유효하지 않습니다" -#: libpq/pqcomm.c:1276 libpq/pqcomm.c:1289 +#: libpq/pqcomm.c:1318 libpq/pqcomm.c:1331 #, c-format msgid "incomplete message from client" msgstr "클라이언트으로부터의 완전하지 못한 메시지입니다" -#: libpq/pqcomm.c:1422 +#: libpq/pqcomm.c:1464 #, c-format msgid "could not send data to client: %m" msgstr "클라이언트에게 데이터를 보낼 수 없습니다: %m" @@ -12782,35 +13899,14 @@ msgstr "\"%s\" 이름의 확장가능한 노드 형이 이미 있습니다" msgid "ExtensibleNodeMethods \"%s\" was not registered" msgstr "\"%s\" ExtensibleNodeMethods가 등록되어 있지 않음" -#: nodes/nodeFuncs.c:124 nodes/nodeFuncs.c:155 parser/parse_coerce.c:1820 -#: parser/parse_coerce.c:1848 parser/parse_coerce.c:1924 -#: parser/parse_expr.c:2019 parser/parse_func.c:597 parser/parse_oper.c:952 +#: nodes/nodeFuncs.c:123 nodes/nodeFuncs.c:154 parser/parse_coerce.c:1844 +#: parser/parse_coerce.c:1872 parser/parse_coerce.c:1948 +#: parser/parse_expr.c:2110 parser/parse_func.c:602 parser/parse_oper.c:964 #, c-format msgid "could not find array type for data type %s" msgstr "자료형 %s 에 대해서는 배열 자료형을 사용할 수 없습니다" -#: optimizer/path/allpaths.c:2653 -#, c-format -msgid "WHERE CURRENT OF is not supported on a view with no underlying relation" -msgstr "" -"no underlying 릴레이션이 있는 뷰에서는 WHERE CURRENT OF 구문을 지원하지 않음" - -#: optimizer/path/allpaths.c:2658 -#, c-format -msgid "" -"WHERE CURRENT OF is not supported on a view with more than one underlying " -"relation" -msgstr "" -"WHERE CURRENT OF 옵션은 하나 이상의 릴레이션을 사용하는 뷰에서는 사용할 수 없" -"음" - -#: optimizer/path/allpaths.c:2663 -#, c-format -msgid "" -"WHERE CURRENT OF is not supported on a view with grouping or aggregation" -msgstr "WHERE CURRENT OF 옵션은 그룹화나 집계 작업용 뷰에서는 사용할 수 없음" - -#: optimizer/path/joinrels.c:802 +#: optimizer/path/joinrels.c:826 #, c-format msgid "" "FULL JOIN is only supported with merge-joinable or hash-joinable join " @@ -12820,7 +13916,7 @@ msgstr "" "니다" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: optimizer/plan/initsplan.c:1124 +#: optimizer/plan/initsplan.c:1200 #, c-format msgid "%s cannot be applied to the nullable side of an outer join" msgstr "" @@ -12828,97 +13924,97 @@ msgstr "" "다" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: optimizer/plan/planner.c:1480 parser/analyze.c:1549 parser/analyze.c:1747 -#: parser/analyze.c:2528 +#: optimizer/plan/planner.c:1550 parser/analyze.c:1624 parser/analyze.c:1821 +#: parser/analyze.c:2615 #, c-format msgid "%s is not allowed with UNION/INTERSECT/EXCEPT" msgstr "%s 구문은 UNION/INTERSECT/EXCEPT 예약어들과 함께 사용할 수 없습니다." -#: optimizer/plan/planner.c:3809 +#: optimizer/plan/planner.c:2150 optimizer/plan/planner.c:4108 #, c-format msgid "could not implement GROUP BY" msgstr "GROUP BY를 구현할 수 없음" -#: optimizer/plan/planner.c:3810 optimizer/plan/planner.c:4203 -#: optimizer/prep/prepunion.c:929 +#: optimizer/plan/planner.c:2151 optimizer/plan/planner.c:4109 +#: optimizer/plan/planner.c:4849 optimizer/prep/prepunion.c:938 #, c-format msgid "" "Some of the datatypes only support hashing, while others only support " "sorting." msgstr "해싱만 지원하는 자료형도 있고, 정렬만 지원하는 자료형도 있습니다." -#: optimizer/plan/planner.c:4202 +#: optimizer/plan/planner.c:4848 #, c-format msgid "could not implement DISTINCT" msgstr "DISTINCT를 구현할 수 없음" -#: optimizer/plan/planner.c:4832 +#: optimizer/plan/planner.c:5528 #, c-format msgid "could not implement window PARTITION BY" msgstr "창 PARTITION BY를 구현할 수 없음" -#: optimizer/plan/planner.c:4833 +#: optimizer/plan/planner.c:5529 #, c-format msgid "Window partitioning columns must be of sortable datatypes." -msgstr "창 분할 열은 정렬 가능한 데이터 형식이어야 합니다." +msgstr "창 분할 칼럼은 정렬 가능한 데이터 형식이어야 합니다." -#: optimizer/plan/planner.c:4837 +#: optimizer/plan/planner.c:5533 #, c-format msgid "could not implement window ORDER BY" msgstr "창 ORDER BY를 구현할 수 없음" -#: optimizer/plan/planner.c:4838 +#: optimizer/plan/planner.c:5534 #, c-format msgid "Window ordering columns must be of sortable datatypes." -msgstr "창 순서 지정 열은 정렬 가능한 데이터 형식이어야 합니다." +msgstr "창 순서 지정 칼럼은 정렬 가능한 데이터 형식이어야 합니다." -#: optimizer/plan/setrefs.c:415 +#: optimizer/plan/setrefs.c:413 #, c-format msgid "too many range table entries" msgstr "너무 많은 테이블이 사용되었습니다" -#: optimizer/prep/prepunion.c:484 +#: optimizer/prep/prepunion.c:493 #, c-format msgid "could not implement recursive UNION" msgstr "재귀 UNION을 구현할 수 없음" -#: optimizer/prep/prepunion.c:485 +#: optimizer/prep/prepunion.c:494 #, c-format msgid "All column datatypes must be hashable." msgstr "모든 열 데이터 형식은 해시 가능해야 합니다." #. translator: %s is UNION, INTERSECT, or EXCEPT -#: optimizer/prep/prepunion.c:928 +#: optimizer/prep/prepunion.c:937 #, c-format msgid "could not implement %s" msgstr "%s 구문은 구현할 수 없음" -#: optimizer/util/clauses.c:4634 +#: optimizer/util/clauses.c:4689 #, c-format msgid "SQL function \"%s\" during inlining" msgstr "" -#: optimizer/util/plancat.c:114 +#: optimizer/util/plancat.c:120 #, c-format msgid "cannot access temporary or unlogged relations during recovery" msgstr "복구 작업 중에는 임시 테이블이나, 언로그드 테이블을 접근할 수 없음" -#: optimizer/util/plancat.c:611 +#: optimizer/util/plancat.c:620 #, c-format msgid "whole row unique index inference specifications are not supported" msgstr "" -#: optimizer/util/plancat.c:628 +#: optimizer/util/plancat.c:637 #, c-format msgid "constraint in ON CONFLICT clause has no associated index" msgstr "ON CONFLICT 처리를 위해 관련된 인덱스가 없습니다" -#: optimizer/util/plancat.c:679 +#: optimizer/util/plancat.c:688 #, c-format msgid "ON CONFLICT DO UPDATE not supported with exclusion constraints" msgstr "제외 제약 조건이 있어 ON CONFLICT DO UPDATE 작업은 할 수 없습니다" -#: optimizer/util/plancat.c:784 +#: optimizer/util/plancat.c:793 #, c-format msgid "" "there is no unique or exclusion constraint matching the ON CONFLICT " @@ -12926,68 +14022,68 @@ msgid "" msgstr "" "ON CONFLICT 절을 사용하는 경우, unique 나 exclude 제약 조건이 있어야 함" -#: parser/analyze.c:663 parser/analyze.c:1321 +#: parser/analyze.c:700 parser/analyze.c:1387 #, c-format msgid "VALUES lists must all be the same length" msgstr "VALUES 목록은 모두 같은 길이여야 함" -#: parser/analyze.c:859 +#: parser/analyze.c:855 +#, c-format +msgid "ON CONFLICT clause is not supported with partitioned tables" +msgstr "ON CONFLICT 절은 파티션 테이블에서는 사용할 수 없습니다" + +#: parser/analyze.c:918 #, c-format msgid "INSERT has more expressions than target columns" msgstr "INSERT 구문에 target columns 보다 더 많은 표현식이 존재하고 있다" -#: parser/analyze.c:877 +#: parser/analyze.c:936 #, c-format msgid "INSERT has more target columns than expressions" msgstr "" "INSERT 구문에 target columns 보다 더 많은 표현식(expressions)이 존재하고 있다" -#: parser/analyze.c:881 +#: parser/analyze.c:940 #, c-format msgid "" "The insertion source is a row expression containing the same number of " "columns expected by the INSERT. Did you accidentally use extra parentheses?" msgstr "" -#: parser/analyze.c:1142 parser/analyze.c:1522 +#: parser/analyze.c:1200 parser/analyze.c:1597 #, c-format msgid "SELECT ... INTO is not allowed here" msgstr "SELECT ... INTO 구문은 여기서는 사용할 수 없음" -#: parser/analyze.c:1335 -#, c-format -msgid "DEFAULT can only appear in a VALUES list within INSERT" -msgstr "DEFAULT는 INSERT 내의 VALUES 목록에만 표시될 수 있음" - #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:1454 parser/analyze.c:2698 +#: parser/analyze.c:1529 parser/analyze.c:2794 #, c-format msgid "%s cannot be applied to VALUES" msgstr "%s 구문은 VALUES 에 적용할 수 없음" -#: parser/analyze.c:1675 +#: parser/analyze.c:1748 #, c-format msgid "invalid UNION/INTERSECT/EXCEPT ORDER BY clause" msgstr "UNION/INTERSECT/EXCEPT ORDER BY 절이 잘못됨" -#: parser/analyze.c:1676 +#: parser/analyze.c:1749 #, c-format msgid "Only result column names can be used, not expressions or functions." msgstr "결과 열 이름만 사용할 수 있고 식 또는 함수는 사용할 수 없습니다." -#: parser/analyze.c:1677 +#: parser/analyze.c:1750 #, c-format msgid "" "Add the expression/function to every SELECT, or move the UNION into a FROM " "clause." msgstr "모든 SELECT에 식/함수를 추가하거나 UNION을 FROM 절로 이동하십시오." -#: parser/analyze.c:1737 +#: parser/analyze.c:1811 #, c-format msgid "INTO is only allowed on first SELECT of UNION/INTERSECT/EXCEPT" msgstr "INTO 는 UNION/INTERSECT/EXCEPT 의 첫번째 SELECT 에만 허용된다" -#: parser/analyze.c:1801 +#: parser/analyze.c:1883 #, c-format msgid "" "UNION/INTERSECT/EXCEPT member statement cannot refer to other relations of " @@ -12996,22 +14092,22 @@ msgstr "" "UNION/INTERSECT/EXCEPT 멤버 문에서 같은 쿼리 수준의 다른 관계를 참조할 수 없" "음" -#: parser/analyze.c:1890 +#: parser/analyze.c:1972 #, c-format msgid "each %s query must have the same number of columns" msgstr "각각의 %s query 는 같은 수의 columns 를 가져야 한다." -#: parser/analyze.c:2283 +#: parser/analyze.c:2365 #, c-format msgid "RETURNING must have at least one column" msgstr "RETURNING 절에는 적어도 하나 이상의 칼럼이 있어야 합니다" -#: parser/analyze.c:2320 +#: parser/analyze.c:2406 #, c-format msgid "cannot specify both SCROLL and NO SCROLL" msgstr "SCROLL 과 NO SCROLL 둘다를 명시할 수 없다" -#: parser/analyze.c:2338 +#: parser/analyze.c:2425 #, c-format msgid "DECLARE CURSOR must not contain data-modifying statements in WITH" msgstr "" @@ -13019,333 +14115,370 @@ msgstr "" "다" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2346 +#: parser/analyze.c:2433 #, c-format msgid "DECLARE CURSOR WITH HOLD ... %s is not supported" msgstr "DECLARE CURSOR WITH HOLD ... %s 구문은 지원되지 않음" -#: parser/analyze.c:2349 +#: parser/analyze.c:2436 #, c-format msgid "Holdable cursors must be READ ONLY." msgstr "보류 가능 커서는 READ ONLY여야 합니다." #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2357 +#: parser/analyze.c:2444 #, c-format msgid "DECLARE SCROLL CURSOR ... %s is not supported" msgstr "DECLARE SCROLL CURSOR ... %s 구문은 지원되지 않음" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2368 +#: parser/analyze.c:2455 #, c-format msgid "DECLARE INSENSITIVE CURSOR ... %s is not supported" msgstr "DECLARE INSENSITIVE CURSOR ... %s 구문은 지원되지 않음" -#: parser/analyze.c:2371 +#: parser/analyze.c:2458 #, c-format msgid "Insensitive cursors must be READ ONLY." msgstr "민감하지 않은 커서는 READ ONLY여야 합니다." -#: parser/analyze.c:2437 +#: parser/analyze.c:2524 #, c-format msgid "materialized views must not use data-modifying statements in WITH" msgstr "" "구체화된 뷰 정의에 사용한 WITH 절 안에는 자료 변경 구문이 없어야 합니다" -#: parser/analyze.c:2447 +#: parser/analyze.c:2534 #, c-format msgid "materialized views must not use temporary tables or views" msgstr "구체화된 뷰는 임시 테이블이나 뷰를 사용할 수 없습니다" -#: parser/analyze.c:2457 +#: parser/analyze.c:2544 #, c-format msgid "materialized views may not be defined using bound parameters" msgstr "" -#: parser/analyze.c:2469 +#: parser/analyze.c:2556 #, c-format msgid "materialized views cannot be UNLOGGED" msgstr "구체화된 뷰는 UNLOGGED 옵션을 사용할 수 없습니다." #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2535 +#: parser/analyze.c:2622 #, c-format msgid "%s is not allowed with DISTINCT clause" msgstr "%s 절은 DISTINCT 절과 함께 사용할 수 없습니다" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2542 +#: parser/analyze.c:2629 #, c-format msgid "%s is not allowed with GROUP BY clause" msgstr "%s 절은 GROUP BY 절과 함께 사용할 수 없습니다" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2549 +#: parser/analyze.c:2636 #, c-format msgid "%s is not allowed with HAVING clause" msgstr "%s 절은 HAVING 절과 함께 사용할 수 없습니다" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2556 +#: parser/analyze.c:2643 #, c-format msgid "%s is not allowed with aggregate functions" msgstr "%s 절은 집계 함수와 함께 사용할 수 없습니다" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2563 +#: parser/analyze.c:2650 #, c-format msgid "%s is not allowed with window functions" msgstr "%s 절은 윈도우 함수와 함께 사용할 수 없습니다" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2570 +#: parser/analyze.c:2657 #, c-format msgid "%s is not allowed with set-returning functions in the target list" msgstr "%s 절은 대상 목록에서 세트 반환 함수와 함께 사용할 수 없습니다." #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2649 +#: parser/analyze.c:2736 #, c-format msgid "%s must specify unqualified relation names" msgstr "%s 절에는 unqualified 릴레이션 이름을 지정해야 합니다." #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2680 +#: parser/analyze.c:2767 #, c-format msgid "%s cannot be applied to a join" msgstr "%s 절은 조인을 적용할 수 없습니다." #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2689 +#: parser/analyze.c:2776 #, c-format msgid "%s cannot be applied to a function" msgstr "%s 절은 함수에 적용할 수 없습니다." #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2707 +#: parser/analyze.c:2785 +#, c-format +msgid "%s cannot be applied to a table function" +msgstr "%s 절은 테이블 함수에 적용할 수 없습니다." + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2803 #, c-format msgid "%s cannot be applied to a WITH query" msgstr "%s 절은 WITH 쿼리에 적용할 수 없음" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2724 +#: parser/analyze.c:2812 +#, c-format +msgid "%s cannot be applied to a named tuplestore" +msgstr "%s 절은 named tuplestore에 적용할 수 없음" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2829 #, c-format msgid "relation \"%s\" in %s clause not found in FROM clause" msgstr "\"%s\" 릴레이션 (대상 구문: %s) 이 FROM 절 내에 없습니다" -#: parser/parse_agg.c:223 parser/parse_oper.c:220 +#: parser/parse_agg.c:221 parser/parse_oper.c:222 #, c-format msgid "could not identify an ordering operator for type %s" msgstr "%s 자료형에서 사용할 순서 정하는 연산자를 찾을 수 없습니다." -#: parser/parse_agg.c:225 +#: parser/parse_agg.c:223 #, c-format msgid "Aggregates with DISTINCT must be able to sort their inputs." msgstr "" "DISTINCT와 함께 작업하는 집계 작업은 그 입력 자료가 정렬될 수 있어야 합니다" -#: parser/parse_agg.c:260 +#: parser/parse_agg.c:258 #, c-format msgid "GROUPING must have fewer than 32 arguments" msgstr "GROUPING 인자로는 32개 이내로 지정해야 합니다" -#: parser/parse_agg.c:363 +#: parser/parse_agg.c:361 msgid "aggregate functions are not allowed in JOIN conditions" msgstr "JOIN 조건문에서는 집계 함수가 허용되지 않습니다" -#: parser/parse_agg.c:365 +#: parser/parse_agg.c:363 msgid "grouping operations are not allowed in JOIN conditions" msgstr "JOIN 조건문에서는 그룹핑 연산이 허용되지 않습니다" -#: parser/parse_agg.c:377 +#: parser/parse_agg.c:375 msgid "" "aggregate functions are not allowed in FROM clause of their own query level" msgstr "집계 함수는 자신의 쿼리 수준의 FROM 절에서는 사용할 수 없습니다." -#: parser/parse_agg.c:379 +#: parser/parse_agg.c:377 msgid "" "grouping operations are not allowed in FROM clause of their own query level" msgstr "" -#: parser/parse_agg.c:384 +#: parser/parse_agg.c:382 msgid "aggregate functions are not allowed in functions in FROM" msgstr "FROM 절 내의 함수 표현식 내에서는 집계 함수를 사용할 수 없습니다" -#: parser/parse_agg.c:386 +#: parser/parse_agg.c:384 msgid "grouping operations are not allowed in functions in FROM" msgstr "FROM 절 내의 함수 표현식 내에서는 그룹핑 연산이 허용되지 않습니다" -#: parser/parse_agg.c:394 +#: parser/parse_agg.c:392 msgid "aggregate functions are not allowed in policy expressions" msgstr "정책 표현식에서는 집계 함수 사용을 허용하지 않습니다" -#: parser/parse_agg.c:396 +#: parser/parse_agg.c:394 msgid "grouping operations are not allowed in policy expressions" msgstr "정책 표현식에서는 그룹핑 연산이 허용되지 않습니다" -#: parser/parse_agg.c:413 +#: parser/parse_agg.c:411 msgid "aggregate functions are not allowed in window RANGE" msgstr "윈도우 RANGE 안에서는 집계 함수를 사용할 수 없습니다" -#: parser/parse_agg.c:415 +#: parser/parse_agg.c:413 msgid "grouping operations are not allowed in window RANGE" msgstr "윈도우 RANGE 안에서는 그룹핑 연산이 허용되지 않습니다" -#: parser/parse_agg.c:420 +#: parser/parse_agg.c:418 msgid "aggregate functions are not allowed in window ROWS" msgstr "윈도우 ROWS 안에서는 집계 함수를 사용할 수 없습니다" -#: parser/parse_agg.c:422 +#: parser/parse_agg.c:420 msgid "grouping operations are not allowed in window ROWS" msgstr "윈도우 ROWS 안에서는 그룹핑 연산이 허용되지 않습니다" -#: parser/parse_agg.c:455 +#: parser/parse_agg.c:454 msgid "aggregate functions are not allowed in check constraints" msgstr "체크 제약 조건에서는 집계 함수를 사용할 수 없습니다" -#: parser/parse_agg.c:457 +#: parser/parse_agg.c:456 msgid "grouping operations are not allowed in check constraints" msgstr "체크 제약 조건에서는 그룹핑 연산이 허용되지 않습니다" -#: parser/parse_agg.c:464 +#: parser/parse_agg.c:463 msgid "aggregate functions are not allowed in DEFAULT expressions" msgstr "DEFAULT 표현식에서는 집계 함수를 사용할 수 없습니다" -#: parser/parse_agg.c:466 +#: parser/parse_agg.c:465 msgid "grouping operations are not allowed in DEFAULT expressions" msgstr "DEFAULT 표현식에서는 그룹핑 연산이 허용되지 않습니다" -#: parser/parse_agg.c:471 +#: parser/parse_agg.c:470 msgid "aggregate functions are not allowed in index expressions" msgstr "인덱스 표현식에서는 집계 함수를 사용할 수 없습니다" -#: parser/parse_agg.c:473 +#: parser/parse_agg.c:472 msgid "grouping operations are not allowed in index expressions" msgstr "인덱스 표현식에서는 그룹핑 연산이 허용되지 않습니다" -#: parser/parse_agg.c:478 +#: parser/parse_agg.c:477 msgid "aggregate functions are not allowed in index predicates" msgstr "집계 함수는 함수 기반 인덱스의 함수로 사용할 수 없습니다" -#: parser/parse_agg.c:480 +#: parser/parse_agg.c:479 msgid "grouping operations are not allowed in index predicates" msgstr "그룹핑 작업은 함수 기반 인덱스의 함수로 사용할 수 없습니다" -#: parser/parse_agg.c:485 +#: parser/parse_agg.c:484 msgid "aggregate functions are not allowed in transform expressions" msgstr "transform 식(expression)에 집계 함수를 사용할 수 없습니다" -#: parser/parse_agg.c:487 +#: parser/parse_agg.c:486 msgid "grouping operations are not allowed in transform expressions" msgstr "transform 식(expression)에 그룹핑 작업를 사용할 수 없습니다" -#: parser/parse_agg.c:492 +#: parser/parse_agg.c:491 msgid "aggregate functions are not allowed in EXECUTE parameters" msgstr "EXECUTE 매개 변수로 집계 함수를 사용할 수 없습니다" -#: parser/parse_agg.c:494 +#: parser/parse_agg.c:493 msgid "grouping operations are not allowed in EXECUTE parameters" msgstr "EXECUTE 매개 변수로 그룹핑 작업을 사용할 수 없습니다" -#: parser/parse_agg.c:499 +#: parser/parse_agg.c:498 msgid "aggregate functions are not allowed in trigger WHEN conditions" msgstr "트리거의 WHEN 조건절에 집계 함수가 허용되지 않습니다" -#: parser/parse_agg.c:501 +#: parser/parse_agg.c:500 msgid "grouping operations are not allowed in trigger WHEN conditions" msgstr "트리거의 WHEN 조건절에 그룹핑 작업이 허용되지 않습니다" +#: parser/parse_agg.c:505 +msgid "aggregate functions are not allowed in partition key expression" +msgstr "파티션 키 표현식에서는 집계 함수를 사용할 수 없습니다" + +#: parser/parse_agg.c:507 +msgid "grouping operations are not allowed in partition key expression" +msgstr "파티션 키 표현식에서는 그룹핑 연산이 허용되지 않습니다" + #. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_agg.c:524 parser/parse_clause.c:1550 +#: parser/parse_agg.c:530 parser/parse_clause.c:1830 #, c-format msgid "aggregate functions are not allowed in %s" msgstr "집계 함수는 %s 절에서 사용할 수 없습니다." #. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_agg.c:527 +#: parser/parse_agg.c:533 #, c-format msgid "grouping operations are not allowed in %s" msgstr "그룹핑 작업은 %s 절에서 사용할 수 없습니다." -#: parser/parse_agg.c:635 +#: parser/parse_agg.c:641 #, c-format msgid "" "outer-level aggregate cannot contain a lower-level variable in its direct " "arguments" msgstr "" -#: parser/parse_agg.c:706 +#: parser/parse_agg.c:720 +#, c-format +msgid "aggregate function calls cannot contain set-returning function calls" +msgstr "집계 함수 호출은 집합 반환 함수 호출을 포함할 수 없음" + +#: parser/parse_agg.c:721 parser/parse_expr.c:1761 parser/parse_expr.c:2237 +#: parser/parse_func.c:773 +#, c-format +msgid "" +"You might be able to move the set-returning function into a LATERAL FROM " +"item." +msgstr "집합 반환 함수를 LATERAL FROM 쪽으로 옮겨서 구현할 수도 있습니다." + +#: parser/parse_agg.c:726 #, c-format msgid "aggregate function calls cannot contain window function calls" msgstr "집계 함수 호출은 윈도우 함수 호출을 포함할 수 없음" -#: parser/parse_agg.c:784 +#: parser/parse_agg.c:805 msgid "window functions are not allowed in JOIN conditions" msgstr "윈도우 함수는 JOIN 조건에 사용할 수 없음" -#: parser/parse_agg.c:791 +#: parser/parse_agg.c:812 msgid "window functions are not allowed in functions in FROM" msgstr "윈도우 함수는 FROM 절에 있는 함수로 사용할 수 없음" -#: parser/parse_agg.c:797 +#: parser/parse_agg.c:818 msgid "window functions are not allowed in policy expressions" msgstr "윈도우 함수는 정책 식에 사용할 수 없음" -#: parser/parse_agg.c:809 +#: parser/parse_agg.c:830 msgid "window functions are not allowed in window definitions" msgstr "윈도우 함수는 윈도우 함수 정의에 사용할 수 없음" -#: parser/parse_agg.c:840 +#: parser/parse_agg.c:862 msgid "window functions are not allowed in check constraints" msgstr "윈도우 함수는 check 제약조건에 사용할 수 없음" -#: parser/parse_agg.c:844 +#: parser/parse_agg.c:866 msgid "window functions are not allowed in DEFAULT expressions" msgstr "윈도우 함수는 DEFAULT 식에서 사용할 수 없음" -#: parser/parse_agg.c:847 +#: parser/parse_agg.c:869 msgid "window functions are not allowed in index expressions" msgstr "윈도우 함수는 인덱스 식에서 사용할 수 없음" -#: parser/parse_agg.c:850 +#: parser/parse_agg.c:872 msgid "window functions are not allowed in index predicates" msgstr "윈도우 함수는 함수 기반 인덱스에서 사용할 수 없음" -#: parser/parse_agg.c:853 +#: parser/parse_agg.c:875 msgid "window functions are not allowed in transform expressions" msgstr "윈도우 함수는 transform 식에서 사용할 수 없음" -#: parser/parse_agg.c:856 +#: parser/parse_agg.c:878 msgid "window functions are not allowed in EXECUTE parameters" msgstr "윈도우 함수는 EXECUTE 매개 변수 설정 값으로 사용할 수 없음" -#: parser/parse_agg.c:859 +#: parser/parse_agg.c:881 msgid "window functions are not allowed in trigger WHEN conditions" msgstr "윈도우 함수는 트리거의 WHEN 조건절에서 사용할 수 없음" +#: parser/parse_agg.c:884 +msgid "window functions are not allowed in partition key expression" +msgstr "윈도우 함수는 파티션 키 표현식에서 사용할 수 없음" + #. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_agg.c:879 parser/parse_clause.c:1559 +#: parser/parse_agg.c:904 parser/parse_clause.c:1839 #, c-format msgid "window functions are not allowed in %s" msgstr "%s 안에서는 윈도우 함수를 사용할 수 없음" -#: parser/parse_agg.c:913 parser/parse_clause.c:2396 +#: parser/parse_agg.c:938 parser/parse_clause.c:2673 #, c-format msgid "window \"%s\" does not exist" msgstr "\"%s\" 윈도우 함수가 없음" -#: parser/parse_agg.c:998 +#: parser/parse_agg.c:1023 #, c-format msgid "too many grouping sets present (maximum 4096)" msgstr "너무 많은 그룹핑 세트가 있습니다 (최대값 4096)" -#: parser/parse_agg.c:1147 +#: parser/parse_agg.c:1172 #, c-format msgid "" "aggregate functions are not allowed in a recursive query's recursive term" msgstr "집계 함수는 재귀 쿼리의 재귀 조건에 사용할 수 없음" -#: parser/parse_agg.c:1340 +#: parser/parse_agg.c:1365 #, c-format msgid "" "column \"%s.%s\" must appear in the GROUP BY clause or be used in an " @@ -13354,170 +14487,201 @@ msgstr "" "column \"%s.%s\" 는 반드시 GROUP BY 절내에 있어야 하던지 또는 집계 함수 내에" "서 사용되어져야 한다" -#: parser/parse_agg.c:1343 +#: parser/parse_agg.c:1368 #, c-format msgid "" "Direct arguments of an ordered-set aggregate must use only grouped columns." msgstr "" -#: parser/parse_agg.c:1348 +#: parser/parse_agg.c:1373 #, c-format msgid "subquery uses ungrouped column \"%s.%s\" from outer query" msgstr "" "subquery 가 outer query 에서 그룹화 되지 않은 열인 \"%s.%s\"를 사용합니다" -#: parser/parse_agg.c:1512 +#: parser/parse_agg.c:1537 #, c-format msgid "" "arguments to GROUPING must be grouping expressions of the associated query " "level" msgstr "" -#: parser/parse_clause.c:649 +#: parser/parse_clause.c:192 +#, c-format +msgid "relation \"%s\" cannot be the target of a modifying statement" +msgstr "\"%s\" 릴레이션은 자료 변경 구문의 대상이 될 수 없음" + +#: parser/parse_clause.c:637 parser/parse_clause.c:665 +#: parser/parse_func.c:2153 +#, c-format +msgid "set-returning functions must appear at top level of FROM" +msgstr "" + +#: parser/parse_clause.c:677 #, c-format msgid "multiple column definition lists are not allowed for the same function" msgstr "다중 칼럼 정의 목록은 같은 함수용으로 허용하지 않음" -#: parser/parse_clause.c:682 +#: parser/parse_clause.c:710 #, c-format msgid "" "ROWS FROM() with multiple functions cannot have a column definition list" msgstr "" -#: parser/parse_clause.c:683 +#: parser/parse_clause.c:711 #, c-format msgid "" "Put a separate column definition list for each function inside ROWS FROM()." msgstr "" -#: parser/parse_clause.c:689 +#: parser/parse_clause.c:717 #, c-format msgid "UNNEST() with multiple arguments cannot have a column definition list" msgstr "" -#: parser/parse_clause.c:690 +#: parser/parse_clause.c:718 #, c-format msgid "" "Use separate UNNEST() calls inside ROWS FROM(), and attach a column " "definition list to each one." msgstr "" -#: parser/parse_clause.c:697 +#: parser/parse_clause.c:725 #, c-format msgid "WITH ORDINALITY cannot be used with a column definition list" msgstr "WITH ORDINALITY 구문은 칼럼 정의 목록과 함께 쓸 수 없습니다." -#: parser/parse_clause.c:698 +#: parser/parse_clause.c:726 #, c-format msgid "Put the column definition list inside ROWS FROM()." msgstr "ROWS FROM() 안에 칼럼 정의 목록을 넣으세요." -#: parser/parse_clause.c:753 +#: parser/parse_clause.c:829 +#, c-format +msgid "only one FOR ORDINALITY column is allowed" +msgstr "" + +#: parser/parse_clause.c:890 +#, c-format +msgid "column name \"%s\" is not unique" +msgstr "\"%s\" 칼럼은 유일성을 가지지 못합니다(not unique)" + +#: parser/parse_clause.c:932 +#, c-format +msgid "namespace name \"%s\" is not unique" +msgstr "\"%s\" 네임스페이스는 유일성을 가지지 못합니다(not unique)" + +#: parser/parse_clause.c:942 +#, c-format +msgid "only one default namespace is allowed" +msgstr "기본 네임스페이스는 하나만 허용합니다" + +#: parser/parse_clause.c:1003 #, c-format msgid "tablesample method %s does not exist" msgstr "\"%s\" 테이블 샘플링 방법이 없습니다" -#: parser/parse_clause.c:775 +#: parser/parse_clause.c:1025 #, c-format msgid "tablesample method %s requires %d argument, not %d" msgid_plural "tablesample method %s requires %d arguments, not %d" msgstr[0] "\"%s\" 테이블 샘플링 방법 %d개 인자를 지정해야함, (현재 %d개)" -#: parser/parse_clause.c:809 +#: parser/parse_clause.c:1059 #, c-format msgid "tablesample method %s does not support REPEATABLE" msgstr "\"%s\" 테이블 샘플링 방법은 REPEATABLE 옵션을 지원하지 않음" -#: parser/parse_clause.c:940 +#: parser/parse_clause.c:1220 #, c-format msgid "TABLESAMPLE clause can only be applied to tables and materialized views" msgstr "TABLESAMPLE 절은 테이블과 구체화된 뷰에서만 사용할 수 있습니다" -#: parser/parse_clause.c:1110 +#: parser/parse_clause.c:1390 #, c-format msgid "column name \"%s\" appears more than once in USING clause" msgstr "USING 절 내에 열 이름 \"%s\" 가 한번 이상 사용되었습니다" -#: parser/parse_clause.c:1125 +#: parser/parse_clause.c:1405 #, c-format msgid "common column name \"%s\" appears more than once in left table" msgstr "left table 내에 common column 이름 \"%s\" 가 한번 이상 사용되었다" -#: parser/parse_clause.c:1134 +#: parser/parse_clause.c:1414 #, c-format msgid "column \"%s\" specified in USING clause does not exist in left table" -msgstr "USING 조건절에서 지정한 \"%s\" 열이 왼쪽 테이블에 없음" +msgstr "USING 조건절에서 지정한 \"%s\" 칼럼이 왼쪽 테이블에 없음" -#: parser/parse_clause.c:1148 +#: parser/parse_clause.c:1428 #, c-format msgid "common column name \"%s\" appears more than once in right table" msgstr "common column name \"%s\"가 right table 에 한번 이상 사용되었다" -#: parser/parse_clause.c:1157 +#: parser/parse_clause.c:1437 #, c-format msgid "column \"%s\" specified in USING clause does not exist in right table" -msgstr "USING 조건절에서 지정한 \"%s\" 열이 오른쪽 테이블에 없음" +msgstr "USING 조건절에서 지정한 \"%s\" 칼럼이 오른쪽 테이블에 없음" -#: parser/parse_clause.c:1211 +#: parser/parse_clause.c:1491 #, c-format msgid "column alias list for \"%s\" has too many entries" msgstr " \"%s\" 를 위한 열 alias list 에 너무 많은 entry 가 포함되어 있다" #. translator: %s is name of a SQL construct, eg LIMIT -#: parser/parse_clause.c:1520 +#: parser/parse_clause.c:1800 #, c-format msgid "argument of %s must not contain variables" msgstr "%s 의 인자로 변수를 포함할 수 없습니다." #. translator: first %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1685 +#: parser/parse_clause.c:1965 #, c-format msgid "%s \"%s\" is ambiguous" msgstr "%s \"%s\" 가 명확하지 않은 표현입니다." #. translator: %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1714 +#: parser/parse_clause.c:1994 #, c-format msgid "non-integer constant in %s" msgstr "정수가 아닌 상수가 %s 에 포함되어 있습니다" #. translator: %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1736 +#: parser/parse_clause.c:2016 #, c-format msgid "%s position %d is not in select list" msgstr "%s position %d 가 select list 에 포함되어 있지 않습니다" -#: parser/parse_clause.c:2178 +#: parser/parse_clause.c:2457 #, c-format msgid "CUBE is limited to 12 elements" msgstr "CUBE 인자로는 12개 이하의 인자만 허용합니다" -#: parser/parse_clause.c:2384 +#: parser/parse_clause.c:2661 #, c-format msgid "window \"%s\" is already defined" msgstr "\"%s\" 이름의 윈도우 함수가 이미 정의됨" -#: parser/parse_clause.c:2446 +#: parser/parse_clause.c:2722 #, c-format msgid "cannot override PARTITION BY clause of window \"%s\"" msgstr "\"%s\" 창의 PARTITION BY 절을 재정의할 수 없음" -#: parser/parse_clause.c:2458 +#: parser/parse_clause.c:2734 #, c-format msgid "cannot override ORDER BY clause of window \"%s\"" msgstr "\"%s\" 창의 ORDER BY 절을 재정의할 수 없음" -#: parser/parse_clause.c:2488 parser/parse_clause.c:2494 +#: parser/parse_clause.c:2764 parser/parse_clause.c:2770 #, c-format msgid "cannot copy window \"%s\" because it has a frame clause" msgstr "프래임 절이 있어, \"%s\" 윈도우를 복사할 수 없음." -#: parser/parse_clause.c:2496 +#: parser/parse_clause.c:2772 #, c-format msgid "Omit the parentheses in this OVER clause." msgstr "OVER 절에 괄호가 빠졌음" -#: parser/parse_clause.c:2562 +#: parser/parse_clause.c:2838 #, c-format msgid "" "in an aggregate with DISTINCT, ORDER BY expressions must appear in argument " @@ -13526,68 +14690,68 @@ msgstr "" "DISTINCT, ORDER BY 표현식을 집계 함수와 쓸 때는, 반드시 select list 에 나타나" "야만 합니다" -#: parser/parse_clause.c:2563 +#: parser/parse_clause.c:2839 #, c-format msgid "for SELECT DISTINCT, ORDER BY expressions must appear in select list" msgstr "" "SELECT DISTINCT, ORDER BY 표현식을 위해서 반드시 select list 에 나타나야만 합" "니다" -#: parser/parse_clause.c:2596 +#: parser/parse_clause.c:2871 #, c-format msgid "an aggregate with DISTINCT must have at least one argument" msgstr "DISTINCT 예약어로 집계를 할 경우 적어도 하나의 인자는 있어야 함" -#: parser/parse_clause.c:2597 +#: parser/parse_clause.c:2872 #, c-format msgid "SELECT DISTINCT must have at least one column" msgstr "SELECT DISTINCT 구문은 적어도 한 개 이상의 칼럼이 있어야 합니다" -#: parser/parse_clause.c:2663 parser/parse_clause.c:2695 +#: parser/parse_clause.c:2938 parser/parse_clause.c:2970 #, c-format msgid "SELECT DISTINCT ON expressions must match initial ORDER BY expressions" msgstr "" "SELECT DISTINCT ON 표현식은 반드시 초기 ORDER BY 표현식과 일치하여야 한다" -#: parser/parse_clause.c:2774 +#: parser/parse_clause.c:3048 #, c-format msgid "ASC/DESC is not allowed in ON CONFLICT clause" msgstr "ASC/DESC 예약어는 ON CONFLICT 절과 함께 사용할 수 없습니다." -#: parser/parse_clause.c:2780 +#: parser/parse_clause.c:3054 #, c-format msgid "NULLS FIRST/LAST is not allowed in ON CONFLICT clause" msgstr "NULLS FIRST/LAST 절은 ON CONFLICT 절과 함께 사용할 수 없습니다." -#: parser/parse_clause.c:2860 +#: parser/parse_clause.c:3134 #, c-format msgid "" "ON CONFLICT DO UPDATE requires inference specification or constraint name" msgstr "" -#: parser/parse_clause.c:2861 +#: parser/parse_clause.c:3135 #, c-format msgid "For example, ON CONFLICT (column_name)." msgstr "사용예, ON CONFLICT (칼럼이름)." -#: parser/parse_clause.c:2872 +#: parser/parse_clause.c:3146 #, c-format msgid "ON CONFLICT is not supported with system catalog tables" msgstr "ON CONFLICT 절은 시스템 카탈로그 테이블에서는 사용할 수 없습니다" -#: parser/parse_clause.c:2880 +#: parser/parse_clause.c:3154 #, c-format msgid "ON CONFLICT is not supported on table \"%s\" used as a catalog table" msgstr "" "\"%s\" 테이블에는 ON CONFLICT 기능을 사용할 수 없습니다. 이 테이블은 카탈로" "그 테이블로 사용됩니다." -#: parser/parse_clause.c:3012 +#: parser/parse_clause.c:3280 #, c-format msgid "operator %s is not a valid ordering operator" msgstr "%s 연산자는 유효한 순서 지정 연산자가 아님" -#: parser/parse_clause.c:3014 +#: parser/parse_clause.c:3282 #, c-format msgid "" "Ordering operators must be \"<\" or \">\" members of btree operator families." @@ -13596,7 +14760,7 @@ msgstr "" #: parser/parse_coerce.c:971 parser/parse_coerce.c:1001 #: parser/parse_coerce.c:1019 parser/parse_coerce.c:1034 -#: parser/parse_expr.c:2053 parser/parse_expr.c:2577 parser/parse_target.c:885 +#: parser/parse_expr.c:2144 parser/parse_expr.c:2732 parser/parse_target.c:936 #, c-format msgid "cannot cast type %s to %s" msgstr "%s 자료형을 %s 자료형으로 형변환할 수 없습니다." @@ -13604,7 +14768,7 @@ msgstr "%s 자료형을 %s 자료형으로 형변환할 수 없습니다." #: parser/parse_coerce.c:1004 #, c-format msgid "Input has too few columns." -msgstr "입력에 너무 적은 열을 지정했습니다." +msgstr "입력에 너무 적은 칼럼을 지정했습니다." #: parser/parse_coerce.c:1022 #, c-format @@ -13614,119 +14778,103 @@ msgstr "%s 자료형을 %s 자료형으로 형변환할 수 없습니다 해당 #: parser/parse_coerce.c:1037 #, c-format msgid "Input has too many columns." -msgstr "입력에 너무 많은 열을 지정했습니다." +msgstr "입력에 너무 많은 칼럼을 지정했습니다." #. translator: first %s is name of a SQL construct, eg WHERE -#: parser/parse_coerce.c:1080 +#. translator: first %s is name of a SQL construct, eg LIMIT +#: parser/parse_coerce.c:1080 parser/parse_coerce.c:1128 #, c-format -msgid "argument of %s must be type boolean, not type %s" -msgstr "%s의 인자는 %s 자료형이 아니라, boolean 자료형이어야 합니다" +msgid "argument of %s must be type %s, not type %s" +msgstr "%s의 인자는 %s 자료형이어야 함(%s 자료형이 아님)" #. translator: %s is name of a SQL construct, eg WHERE #. translator: %s is name of a SQL construct, eg LIMIT -#: parser/parse_coerce.c:1090 parser/parse_coerce.c:1139 +#: parser/parse_coerce.c:1091 parser/parse_coerce.c:1140 #, c-format msgid "argument of %s must not return a set" msgstr "%s 의 인자는 set(집합) 을 return할수 없습니다." -#. translator: first %s is name of a SQL construct, eg LIMIT -#: parser/parse_coerce.c:1127 -#, c-format -msgid "argument of %s must be type %s, not type %s" -msgstr "%s의 인자는 %s 자료형이어야 함(%s 자료형이 아님)" - #. translator: first %s is name of a SQL construct, eg CASE -#: parser/parse_coerce.c:1260 +#: parser/parse_coerce.c:1280 #, c-format msgid "%s types %s and %s cannot be matched" msgstr "%s 자료형 %s 와 %s 는 서로 매치되지 않습니다" #. translator: first %s is name of a SQL construct, eg CASE -#: parser/parse_coerce.c:1327 +#: parser/parse_coerce.c:1347 #, c-format msgid "%s could not convert type %s to %s" msgstr "%s 는 자료형 %s 자료형에서 %s 자료형으로 변환될 수 없습니다." -#: parser/parse_coerce.c:1629 +#: parser/parse_coerce.c:1649 #, c-format msgid "arguments declared \"anyelement\" are not all alike" msgstr "\"anyelement\" 로 선언된 인자들이 모두 같지 않습니다" -#: parser/parse_coerce.c:1649 +#: parser/parse_coerce.c:1669 #, c-format msgid "arguments declared \"anyarray\" are not all alike" msgstr "\"anyarray\" 로 선언된 인자들이 모두 같지 않습니다." -#: parser/parse_coerce.c:1669 +#: parser/parse_coerce.c:1689 #, c-format msgid "arguments declared \"anyrange\" are not all alike" msgstr "\"anyarray\" 로 선언된 인자들이 모두 같지 않습니다." -#: parser/parse_coerce.c:1698 parser/parse_coerce.c:1909 -#: parser/parse_coerce.c:1943 +#: parser/parse_coerce.c:1718 parser/parse_coerce.c:1933 +#: parser/parse_coerce.c:1967 #, c-format -msgid "argument declared \"anyarray\" is not an array but type %s" -msgstr "\"anyarray\" 로 선언된 인자들이 array 가 아니고, %s 자료형입니다" +msgid "argument declared %s is not an array but type %s" +msgstr "%s 이름으로 선언된 인자가 array가 아니고, %s 자료형입니다" -#: parser/parse_coerce.c:1714 +#: parser/parse_coerce.c:1734 parser/parse_coerce.c:1773 #, c-format -msgid "" -"argument declared \"anyarray\" is not consistent with argument declared " -"\"anyelement\"" +msgid "argument declared %s is not consistent with argument declared %s" msgstr "" -"\"anyarray\" 로 선언된 인자들이 \"anyelement\" 로 선언된 인자들과 일관성이 있" -"질 않습니다" - -#: parser/parse_coerce.c:1735 parser/parse_coerce.c:1956 -#, c-format -msgid "argument declared \"anyrange\" is not a range type but type %s" -msgstr "\"anyarray\" 로 선언된 인자들이 range 자료형이 아니고, %s 자료형입니다" +"%s 이름으로 선언된 인자가 %s 형으로 선언된 인자들과 일관성이 없습니다질 않습" +"니다" -#: parser/parse_coerce.c:1751 +#: parser/parse_coerce.c:1756 parser/parse_coerce.c:1980 #, c-format -msgid "" -"argument declared \"anyrange\" is not consistent with argument declared " -"\"anyelement\"" -msgstr "" -"\"anyrange\" 로 선언된 인자들이 \"anyelement\" 로 선언된 인자들과 일관성이 있" -"질 않습니다" +msgid "argument declared %s is not a range type but type %s" +msgstr "%s 로 선언된 인자가 range 자료형이 아니고, %s 자료형입니다" -#: parser/parse_coerce.c:1771 +#: parser/parse_coerce.c:1794 #, c-format -msgid "could not determine polymorphic type because input has type \"unknown\"" -msgstr "입력에 \"unknown\" 형식이 있으므로 다변 형식을 확인할 수 없음" +msgid "could not determine polymorphic type because input has type %s" +msgstr "입력에 %s 형이 있어 다변 형식을 확인할 수 없음" -#: parser/parse_coerce.c:1781 +#: parser/parse_coerce.c:1805 #, c-format msgid "type matched to anynonarray is an array type: %s" msgstr "anynonarray에 일치된 형식이 배열 형식임: %s" -#: parser/parse_coerce.c:1791 +#: parser/parse_coerce.c:1815 #, c-format msgid "type matched to anyenum is not an enum type: %s" msgstr "anyenum에 일치된 형식이 열거 형식이 아님: %s" -#: parser/parse_coerce.c:1831 parser/parse_coerce.c:1861 +#: parser/parse_coerce.c:1855 parser/parse_coerce.c:1885 #, c-format msgid "could not find range type for data type %s" msgstr "자료형 %s 에 대해서는 배열 자료형을 사용할 수 없습니다" #: parser/parse_collate.c:228 parser/parse_collate.c:475 -#: parser/parse_collate.c:986 +#: parser/parse_collate.c:981 #, c-format msgid "collation mismatch between implicit collations \"%s\" and \"%s\"" msgstr "" "암묵적으로 선택된 \"%s\" 정렬 규칙와 \"%s\" 정렬 규칙이 매칭되지 않습니다" #: parser/parse_collate.c:231 parser/parse_collate.c:478 -#: parser/parse_collate.c:989 +#: parser/parse_collate.c:984 #, c-format msgid "" "You can choose the collation by applying the COLLATE clause to one or both " "expressions." msgstr "한 쪽 또는 서로 COLLATE 절을 이용해 정렬 규칙을 지정하세요" -#: parser/parse_collate.c:834 +#: parser/parse_collate.c:831 #, c-format msgid "collation mismatch between explicit collations \"%s\" and \"%s\"" msgstr "" @@ -13799,324 +14947,352 @@ msgstr "" msgid "Use the COLLATE clause to set the collation of the non-recursive term." msgstr "" -#: parser/parse_cte.c:419 +#: parser/parse_cte.c:418 #, c-format msgid "WITH query \"%s\" has %d columns available but %d columns specified" msgstr "" "\"%s\" WITH 쿼리에는 %d개의 칼럼을 사용할 수 있는데 %d개의 칼럼이 지정됨" -#: parser/parse_cte.c:599 +#: parser/parse_cte.c:598 #, c-format msgid "mutual recursion between WITH items is not implemented" msgstr "WITH 항목 간의 상호 재귀가 구현되지 않음" -#: parser/parse_cte.c:651 +#: parser/parse_cte.c:650 #, c-format msgid "recursive query \"%s\" must not contain data-modifying statements" msgstr "\"%s\" 재귀 쿼리에 자료 변경 구문이 포함될 수 없습니다." -#: parser/parse_cte.c:659 +#: parser/parse_cte.c:658 #, c-format msgid "" "recursive query \"%s\" does not have the form non-recursive-term UNION [ALL] " "recursive-term" msgstr "\"%s\" 재귀 쿼리에 비재귀 조건 형태의 UNION [ALL] 재귀 조건이 없음" -#: parser/parse_cte.c:703 +#: parser/parse_cte.c:702 #, c-format msgid "ORDER BY in a recursive query is not implemented" msgstr "재귀 쿼리의 ORDER BY가 구현되지 않음" -#: parser/parse_cte.c:709 +#: parser/parse_cte.c:708 #, c-format msgid "OFFSET in a recursive query is not implemented" msgstr "재귀 쿼리의 OFFSET이 구현되지 않음" -#: parser/parse_cte.c:715 +#: parser/parse_cte.c:714 #, c-format msgid "LIMIT in a recursive query is not implemented" msgstr "재귀 쿼리의 LIMIT가 구현되지 않음" -#: parser/parse_cte.c:721 +#: parser/parse_cte.c:720 #, c-format msgid "FOR UPDATE/SHARE in a recursive query is not implemented" msgstr "재귀 쿼리의 FOR UPDATE/SHARE가 구현되지 않음" -#: parser/parse_cte.c:778 +#: parser/parse_cte.c:777 #, c-format msgid "recursive reference to query \"%s\" must not appear more than once" msgstr "\"%s\" 쿼리에 대한 재귀 참조가 여러 번 표시되지 않아야 함" -#: parser/parse_expr.c:390 parser/parse_relation.c:3176 -#: parser/parse_relation.c:3196 +#: parser/parse_expr.c:350 +#, c-format +msgid "DEFAULT is not allowed in this context" +msgstr "이 영역에서는 DEFAULT를 사용할 수 없습니다" + +#: parser/parse_expr.c:403 parser/parse_relation.c:3281 +#: parser/parse_relation.c:3301 #, c-format msgid "column %s.%s does not exist" msgstr "%s.%s 칼럼 없음" -#: parser/parse_expr.c:402 +#: parser/parse_expr.c:415 #, c-format msgid "column \"%s\" not found in data type %s" -msgstr "\"%s\" 열은 %s 자료형을 찾을 수 없음" +msgstr "\"%s\" 칼럼은 %s 자료형을 찾을 수 없음" -#: parser/parse_expr.c:408 +#: parser/parse_expr.c:421 #, c-format msgid "could not identify column \"%s\" in record data type" -msgstr "레코드 데이터 형식에서 \"%s\" 열을 식별할 수 없음" +msgstr "레코드 데이터 형식에서 \"%s\" 칼럼을 식별할 수 없음" -#: parser/parse_expr.c:414 +#: parser/parse_expr.c:427 #, c-format msgid "column notation .%s applied to type %s, which is not a composite type" msgstr "" ".%s 표현이 %s 자료형 사용되었는데, 이는 복소수형 (complex type)이 아닙니다" -#: parser/parse_expr.c:444 parser/parse_target.c:671 +#: parser/parse_expr.c:458 parser/parse_target.c:722 #, c-format msgid "row expansion via \"*\" is not supported here" msgstr "\"*\"를 통한 칼럼 확장은 여기서 지원되지 않음" -#: parser/parse_expr.c:770 parser/parse_relation.c:668 -#: parser/parse_relation.c:768 parser/parse_target.c:1120 +#: parser/parse_expr.c:767 parser/parse_relation.c:689 +#: parser/parse_relation.c:789 parser/parse_target.c:1171 #, c-format msgid "column reference \"%s\" is ambiguous" msgstr "칼럼 참조 \"%s\" 가 모호합니다." -#: parser/parse_expr.c:826 parser/parse_param.c:110 parser/parse_param.c:142 +#: parser/parse_expr.c:823 parser/parse_param.c:110 parser/parse_param.c:142 #: parser/parse_param.c:199 parser/parse_param.c:298 #, c-format msgid "there is no parameter $%d" msgstr "$%d 매개 변수가 없습니다" -#: parser/parse_expr.c:1067 +#: parser/parse_expr.c:1066 #, c-format msgid "NULLIF requires = operator to yield boolean" msgstr "NULIF 절은 boolean 값을 얻기 위해서 = 연산자를 필요로 합니다" -#: parser/parse_expr.c:1501 gram.y:9887 +#. translator: %s is name of a SQL construct, eg NULLIF +#: parser/parse_expr.c:1072 parser/parse_expr.c:3048 +#, c-format +msgid "%s must not return a set" +msgstr "%s에서는 집합을 반환할 수 없습니다." + +#: parser/parse_expr.c:1519 parser/parse_expr.c:1551 #, c-format msgid "number of columns does not match number of values" -msgstr "열의 개수와, values의 개수가 틀립니다" +msgstr "칼럼의 개수와, values의 개수가 틀립니다" + +#: parser/parse_expr.c:1565 +#, c-format +msgid "" +"source for a multiple-column UPDATE item must be a sub-SELECT or ROW() " +"expression" +msgstr "" + +#. translator: %s is name of a SQL construct, eg GROUP BY +#: parser/parse_expr.c:1759 parser/parse_expr.c:2235 parser/parse_func.c:2256 +#, c-format +msgid "set-returning functions are not allowed in %s" +msgstr "%s 안에서는 집합 반환 함수를 사용할 수 없음" -#: parser/parse_expr.c:1730 +#: parser/parse_expr.c:1819 msgid "cannot use subquery in check constraint" msgstr "체크 제약 조건에서는 서브쿼리를 사용할 수 없습니다" -#: parser/parse_expr.c:1734 +#: parser/parse_expr.c:1823 msgid "cannot use subquery in DEFAULT expression" msgstr "DEFAULT 식에서는 서브쿼리를 사용할 수 없습니다" -#: parser/parse_expr.c:1737 +#: parser/parse_expr.c:1826 msgid "cannot use subquery in index expression" msgstr "인덱스 식(expression)에 서브쿼리를 사용할 수 없습니다" -#: parser/parse_expr.c:1740 +#: parser/parse_expr.c:1829 msgid "cannot use subquery in index predicate" msgstr "인덱스 술어(predicate)에 서브쿼리를 사용할 수 없습니다" -#: parser/parse_expr.c:1743 +#: parser/parse_expr.c:1832 msgid "cannot use subquery in transform expression" msgstr "transform 식(expression)에 서브쿼리를 사용할 수 없습니다" -#: parser/parse_expr.c:1746 +#: parser/parse_expr.c:1835 msgid "cannot use subquery in EXECUTE parameter" msgstr "EXECUTE 매개 변수로 서브쿼리를 사용할 수 없습니다" -#: parser/parse_expr.c:1749 +#: parser/parse_expr.c:1838 msgid "cannot use subquery in trigger WHEN condition" msgstr "트리거 WHEN 조건절에서는 서브쿼리를 사용할 수 없습니다" -#: parser/parse_expr.c:1803 +#: parser/parse_expr.c:1841 +msgid "cannot use subquery in partition key expression" +msgstr "파티션 키 표현식에 서브쿼리를 사용할 수 없습니다" + +#: parser/parse_expr.c:1894 #, c-format msgid "subquery must return only one column" msgstr "subquery는 오로지 한개의 열만을 돌려 주어야 합니다." -#: parser/parse_expr.c:1887 +#: parser/parse_expr.c:1978 #, c-format msgid "subquery has too many columns" -msgstr "subquery 에가 너무 많은 열을 가집니다" +msgstr "subquery 에가 너무 많은 칼럼을 가집니다" -#: parser/parse_expr.c:1892 +#: parser/parse_expr.c:1983 #, c-format msgid "subquery has too few columns" msgstr "subquery 에 명시된 열 수가 너무 적다" -#: parser/parse_expr.c:1993 +#: parser/parse_expr.c:2084 #, c-format msgid "cannot determine type of empty array" msgstr "빈 배열의 자료형을 확인할 수 없음" -#: parser/parse_expr.c:1994 +#: parser/parse_expr.c:2085 #, c-format msgid "Explicitly cast to the desired type, for example ARRAY[]::integer[]." msgstr "원하는 형식으로 명시적으로 형변환하십시오(예: ARRAY[]::integer[])." -#: parser/parse_expr.c:2008 +#: parser/parse_expr.c:2099 #, c-format msgid "could not find element type for data type %s" msgstr "%s 자료형의 요소 자료형을 찾을 수 없음" -#: parser/parse_expr.c:2231 +#: parser/parse_expr.c:2386 #, c-format msgid "unnamed XML attribute value must be a column reference" msgstr "이름이 지정되지 않은 XML 속성 값은 열 참조여야 함" -#: parser/parse_expr.c:2232 +#: parser/parse_expr.c:2387 #, c-format msgid "unnamed XML element value must be a column reference" msgstr "이름이 지정되지 않은 XML 요소 값은 열 참조여야 함" -#: parser/parse_expr.c:2247 +#: parser/parse_expr.c:2402 #, c-format msgid "XML attribute name \"%s\" appears more than once" msgstr "\"%s\" XML 속성 이름이 여러 번 표시됨" -#: parser/parse_expr.c:2354 +#: parser/parse_expr.c:2509 #, c-format msgid "cannot cast XMLSERIALIZE result to %s" msgstr "XMLSERIALIZE 결과를 %s 형으로 바꿀 수 없음" -#: parser/parse_expr.c:2650 parser/parse_expr.c:2846 +#: parser/parse_expr.c:2805 parser/parse_expr.c:3001 #, c-format msgid "unequal number of entries in row expressions" msgstr "행 표현식에서 항목 수가 일치하지 않습니다" -#: parser/parse_expr.c:2660 +#: parser/parse_expr.c:2815 #, c-format msgid "cannot compare rows of zero length" msgstr "길이가 영(0)인 행들은 비교할 수 없습니다" -#: parser/parse_expr.c:2685 +#: parser/parse_expr.c:2840 #, c-format msgid "row comparison operator must yield type boolean, not type %s" msgstr "" "행 비교 연산자는 boolean형을 리턴해야합니다. %s 자료형을 사용할 수 없습니다" -#: parser/parse_expr.c:2692 +#: parser/parse_expr.c:2847 #, c-format msgid "row comparison operator must not return a set" msgstr "행 비교 연산자는 set을 리턴할 수 없습니다" -#: parser/parse_expr.c:2751 parser/parse_expr.c:2792 +#: parser/parse_expr.c:2906 parser/parse_expr.c:2947 #, c-format msgid "could not determine interpretation of row comparison operator %s" msgstr "%s 행 비교 연산자의 구문을 분석할 수 없습니다" -#: parser/parse_expr.c:2753 +#: parser/parse_expr.c:2908 #, c-format msgid "" "Row comparison operators must be associated with btree operator families." msgstr "로우 비교 연산자를 btree 연산자 패밀리와 연결해야 함" -#: parser/parse_expr.c:2794 +#: parser/parse_expr.c:2949 #, c-format msgid "There are multiple equally-plausible candidates." msgstr "여러 가지 등식들이 성립할 수 있는 가능성이 있습니다" -#: parser/parse_expr.c:2886 +#: parser/parse_expr.c:3042 #, c-format msgid "IS DISTINCT FROM requires = operator to yield boolean" msgstr "" "IS DISTINCT FROM 절에서 boolean 값을 얻기 위해서 = 연산자를 필요로 합니다" -#: parser/parse_expr.c:3199 parser/parse_expr.c:3217 +#: parser/parse_expr.c:3361 parser/parse_expr.c:3379 #, c-format msgid "operator precedence change: %s is now lower precedence than %s" msgstr "연산자 우선순위 변경됨: %s 연산자 우선순위가 %s 연산보다 낮습니다" -#: parser/parse_func.c:174 +#: parser/parse_func.c:179 #, c-format msgid "argument name \"%s\" used more than once" msgstr "\"%s\" 이름의 매개 변수가 여러 번 사용 됨" -#: parser/parse_func.c:185 +#: parser/parse_func.c:190 #, c-format msgid "positional argument cannot follow named argument" msgstr "" -#: parser/parse_func.c:270 +#: parser/parse_func.c:275 #, c-format msgid "%s(*) specified, but %s is not an aggregate function" msgstr "%s(*) 가 명시되어 있는데, 이 %s 함수는 집계 함수가 아닙니다." -#: parser/parse_func.c:277 +#: parser/parse_func.c:282 #, c-format msgid "DISTINCT specified, but %s is not an aggregate function" msgstr "DISTINCT 가 명시되어 있는데, 그러나 이 %s 함수는 집계 함수가 아닙니다" -#: parser/parse_func.c:283 +#: parser/parse_func.c:288 #, c-format msgid "WITHIN GROUP specified, but %s is not an aggregate function" msgstr "WITHIN GROUP 절이 명시되어 있는데, 이 %s 함수는 집계 함수가 아닙니다" -#: parser/parse_func.c:289 +#: parser/parse_func.c:294 #, c-format msgid "ORDER BY specified, but %s is not an aggregate function" msgstr "ORDER BY 절이 명시되어 있는데, 이 %s 함수는 집계 함수가 아닙니다." -#: parser/parse_func.c:295 +#: parser/parse_func.c:300 #, c-format msgid "FILTER specified, but %s is not an aggregate function" msgstr "FILTER 절이 명시되어 있는데, 이 %s 함수는 집계 함수가 아닙니다" -#: parser/parse_func.c:301 +#: parser/parse_func.c:306 #, c-format msgid "" "OVER specified, but %s is not a window function nor an aggregate function" msgstr "OVER 절이 지정되었는데 %s 함수는 윈도우 함수 또는 집계 함수가 아님" -#: parser/parse_func.c:331 +#: parser/parse_func.c:336 #, c-format msgid "WITHIN GROUP is required for ordered-set aggregate %s" msgstr "순서가 있는 집계함수인 %s 때문에 WITHIN GROUP 절이 필요합니다" -#: parser/parse_func.c:337 +#: parser/parse_func.c:342 #, c-format msgid "OVER is not supported for ordered-set aggregate %s" msgstr "OVER 절에서 정렬된 세트 집계 %s 함수를 지원하지 않음" -#: parser/parse_func.c:368 parser/parse_func.c:397 +#: parser/parse_func.c:373 parser/parse_func.c:402 #, c-format msgid "" "There is an ordered-set aggregate %s, but it requires %d direct arguments, " "not %d." msgstr "" -#: parser/parse_func.c:422 +#: parser/parse_func.c:427 #, c-format msgid "" "To use the hypothetical-set aggregate %s, the number of hypothetical direct " "arguments (here %d) must match the number of ordering columns (here %d)." msgstr "" -#: parser/parse_func.c:436 +#: parser/parse_func.c:441 #, c-format msgid "" "There is an ordered-set aggregate %s, but it requires at least %d direct " "arguments." msgstr "" -#: parser/parse_func.c:455 +#: parser/parse_func.c:460 #, c-format msgid "%s is not an ordered-set aggregate, so it cannot have WITHIN GROUP" msgstr "" "%s 함수는 순사가 있는 세트 집계함수가 아니여서 WITHIN GROUP 절을 사용할 수 없" "습니다" -#: parser/parse_func.c:468 +#: parser/parse_func.c:473 #, c-format msgid "window function %s requires an OVER clause" msgstr "%s 윈도우 함수 호출에는 OVER 절이 필요함" -#: parser/parse_func.c:475 +#: parser/parse_func.c:480 #, c-format msgid "window function %s cannot have WITHIN GROUP" msgstr "%s 윈도우 함수는 WITHIN GROUP 절을 사용할 수 없음" -#: parser/parse_func.c:496 +#: parser/parse_func.c:501 #, c-format msgid "function %s is not unique" msgstr "함수 %s 는 유일성을 가지지 못합니다(not unique)" -#: parser/parse_func.c:499 +#: parser/parse_func.c:504 #, c-format msgid "" "Could not choose a best candidate function. You might need to add explicit " @@ -14125,7 +15301,7 @@ msgstr "" "제일 적당한 함수를 선택할 수 없습니다. 명시적 형변환자를 추가해야 할 수도 있" "습니다." -#: parser/parse_func.c:510 +#: parser/parse_func.c:515 #, c-format msgid "" "No aggregate function matches the given name and argument types. Perhaps you " @@ -14136,7 +15312,7 @@ msgstr "" "른 위치에 쓰지 않은 것 같습니다. ORDER BY 절은 모든 집계용 인자들 맨 뒤에 있" "어야 합니다." -#: parser/parse_func.c:521 +#: parser/parse_func.c:526 #, c-format msgid "" "No function matches the given name and argument types. You might need to add " @@ -14145,115 +15321,170 @@ msgstr "" "지정된 이름 및 인자 자료형과 일치하는 함수가 없습니다. 명시적 형변환자를 추가" "해야 할 수도 있습니다." -#: parser/parse_func.c:623 +#: parser/parse_func.c:628 #, c-format msgid "VARIADIC argument must be an array" msgstr "VARIADIC 매개 변수는 배열이어야 함" -#: parser/parse_func.c:671 parser/parse_func.c:735 +#: parser/parse_func.c:680 parser/parse_func.c:744 #, c-format msgid "%s(*) must be used to call a parameterless aggregate function" msgstr "%s(*) 사용할 때는 이 함수가 매개 변수 없는 집계 함수여야 합니다" -#: parser/parse_func.c:678 +#: parser/parse_func.c:687 #, c-format msgid "aggregates cannot return sets" msgstr "집계 함수는 세트를 반환할 수 없음" -#: parser/parse_func.c:693 +#: parser/parse_func.c:702 #, c-format msgid "aggregates cannot use named arguments" msgstr "집계 함수는 인자 이름을 사용할 수 없음" -#: parser/parse_func.c:725 +#: parser/parse_func.c:734 #, c-format msgid "DISTINCT is not implemented for window functions" msgstr "윈도우 함수에 대해 DISTINCT가 구현되지 않음" -#: parser/parse_func.c:745 +#: parser/parse_func.c:754 #, c-format msgid "aggregate ORDER BY is not implemented for window functions" msgstr "윈도우 함수에 대해 집계용 ORDER BY가 구현되지 않음" -#: parser/parse_func.c:754 +#: parser/parse_func.c:763 #, c-format msgid "FILTER is not implemented for non-aggregate window functions" msgstr "비집계 윈도우 함수에 대해 FILTER가 구현되지 않음" -#: parser/parse_func.c:760 +#: parser/parse_func.c:772 +#, c-format +msgid "window function calls cannot contain set-returning function calls" +msgstr "윈도우 함수 호출에 집합 반환 함수 호출을 포함할 수 없음" + +#: parser/parse_func.c:780 #, c-format msgid "window functions cannot return sets" msgstr "윈도우 함수는 세트를 반환할 수 없음" -#: parser/parse_func.c:2010 +#: parser/parse_func.c:1950 +#, c-format +msgid "function name \"%s\" is not unique" +msgstr "\"%s\" 함수 이름은 유일성을 가지지 못합니다(not unique)" + +#: parser/parse_func.c:1952 +#, c-format +msgid "Specify the argument list to select the function unambiguously." +msgstr "입력 인자를 다르게 해서 이 모호함을 피하세요." + +#: parser/parse_func.c:1962 +#, c-format +msgid "could not find a function named \"%s\"" +msgstr "\"%s\" 함수를 찾을 수 없음" + +#: parser/parse_func.c:2064 #, c-format msgid "aggregate %s(*) does not exist" msgstr "%s(*) 집계 함수 없음" -#: parser/parse_func.c:2015 +#: parser/parse_func.c:2069 #, c-format msgid "aggregate %s does not exist" msgstr "%s 집계 함수 없음" -#: parser/parse_func.c:2034 +#: parser/parse_func.c:2088 #, c-format msgid "function %s is not an aggregate" msgstr "%s 함수는 집계 함수가 아닙니다" -#: parser/parse_node.c:84 +#: parser/parse_func.c:2140 +msgid "set-returning functions are not allowed in JOIN conditions" +msgstr "집합 반환 함수는 JOIN 조건에 사용할 수 없음" + +#: parser/parse_func.c:2161 +msgid "set-returning functions are not allowed in policy expressions" +msgstr "집합 반환 함수는 정책 식에 사용할 수 없음" + +#: parser/parse_func.c:2176 +msgid "set-returning functions are not allowed in window definitions" +msgstr "집합 반환 함수는 윈도우 함수 정의에 사용할 수 없음" + +#: parser/parse_func.c:2214 +msgid "set-returning functions are not allowed in check constraints" +msgstr "집합 반환 함수는 check 제약조건에 사용할 수 없음" + +#: parser/parse_func.c:2218 +msgid "set-returning functions are not allowed in DEFAULT expressions" +msgstr "집합 반환 함수는 DEFAULT 식에서 사용할 수 없음" + +#: parser/parse_func.c:2221 +msgid "set-returning functions are not allowed in index expressions" +msgstr "집합 반환 함수는 인덱스 식에서 사용할 수 없음" + +#: parser/parse_func.c:2224 +msgid "set-returning functions are not allowed in index predicates" +msgstr "집합 반환 함수는 함수 기반 인덱스에서 사용할 수 없음" + +#: parser/parse_func.c:2227 +msgid "set-returning functions are not allowed in transform expressions" +msgstr "집합 반환 함수는 transform 식에서 사용할 수 없음" + +#: parser/parse_func.c:2230 +msgid "set-returning functions are not allowed in EXECUTE parameters" +msgstr "집합 반환 함수는 EXECUTE 매개 변수 설정 값으로 사용할 수 없음" + +#: parser/parse_func.c:2233 +msgid "set-returning functions are not allowed in trigger WHEN conditions" +msgstr "집합 반환 함수는 트리거의 WHEN 조건절에서 사용할 수 없음" + +#: parser/parse_func.c:2236 +msgid "set-returning functions are not allowed in partition key expressions" +msgstr "집합 반환 함수는 인덱스 식에서 사용할 수 없음" + +#: parser/parse_node.c:87 #, c-format msgid "target lists can have at most %d entries" msgstr "대상 목록은 최대 %d 개의 항목을 지정할 수 있습니다" -#: parser/parse_node.c:253 +#: parser/parse_node.c:256 #, c-format msgid "cannot subscript type %s because it is not an array" msgstr "" "자료형 %s 는 배열이 아니기 때문에 배열 하위 스크립트를 기술할 수 없습니다." -#: parser/parse_node.c:356 parser/parse_node.c:393 +#: parser/parse_node.c:358 parser/parse_node.c:395 #, c-format msgid "array subscript must have type integer" msgstr "배열 하위 스크립트는 반드시 정수형이어야 합니다." -#: parser/parse_node.c:424 +#: parser/parse_node.c:426 #, c-format msgid "array assignment requires type %s but expression is of type %s" msgstr "배열할당은 자료형 %s 가 필요하지만, 현재 표현식이 %s 자료형입니다" -#: parser/parse_oper.c:125 parser/parse_oper.c:722 utils/adt/regproc.c:583 -#: utils/adt/regproc.c:603 utils/adt/regproc.c:787 +#: parser/parse_oper.c:125 parser/parse_oper.c:724 utils/adt/regproc.c:520 +#: utils/adt/regproc.c:704 #, c-format msgid "operator does not exist: %s" msgstr "연산자 없음: %s" -#: parser/parse_oper.c:222 +#: parser/parse_oper.c:224 #, c-format msgid "Use an explicit ordering operator or modify the query." msgstr "" "명시적으로 순차연산자(ordering operator) 를 사용하던지, 또는 query 를 수정하" "도록 하세요." -#: parser/parse_oper.c:226 utils/adt/array_userfuncs.c:794 -#: utils/adt/array_userfuncs.c:933 utils/adt/arrayfuncs.c:3639 -#: utils/adt/arrayfuncs.c:4077 utils/adt/arrayfuncs.c:6039 -#: utils/adt/rowtypes.c:1167 -#, c-format -msgid "could not identify an equality operator for type %s" -msgstr "" -"%s 자료형에서 사용할 동등 연산자(equality operator)를 찾을 수 없습니다." - -#: parser/parse_oper.c:478 +#: parser/parse_oper.c:480 #, c-format msgid "operator requires run-time type coercion: %s" msgstr "이 연산자는 실행시에 형 강제전화이 필요합니다: %s" -#: parser/parse_oper.c:714 +#: parser/parse_oper.c:716 #, c-format msgid "operator is not unique: %s" msgstr "연산자가 고유하지 않습니다: %s" -#: parser/parse_oper.c:716 +#: parser/parse_oper.c:718 #, c-format msgid "" "Could not choose a best candidate operator. You might need to add explicit " @@ -14262,7 +15493,7 @@ msgstr "" "가장 적당한 연산자를 선택할 수 없습니다. 명시적 형변환자를 추가해야 할 수도 " "있습니다." -#: parser/parse_oper.c:724 +#: parser/parse_oper.c:726 #, c-format msgid "" "No operator matches the given name and argument type(s). You might need to " @@ -14271,22 +15502,22 @@ msgstr "" "지정된 이름 및 인자 형식과 일치하는 연산자가 없습니다. 명시적 형변환자를 추가" "해야 할 수도 있습니다." -#: parser/parse_oper.c:783 parser/parse_oper.c:897 +#: parser/parse_oper.c:787 parser/parse_oper.c:909 #, c-format msgid "operator is only a shell: %s" msgstr "연산자는 셸일 뿐임: %s" -#: parser/parse_oper.c:885 +#: parser/parse_oper.c:897 #, c-format msgid "op ANY/ALL (array) requires array on right side" msgstr "op ANY/ALL (array) 는 우측에 배열이 있어야 합니다." -#: parser/parse_oper.c:927 +#: parser/parse_oper.c:939 #, c-format msgid "op ANY/ALL (array) requires operator to yield boolean" msgstr "op ANY/ALL (array) 는 boolean 을 얻기 위한 연산자가 필요합니다." -#: parser/parse_oper.c:932 +#: parser/parse_oper.c:944 #, c-format msgid "op ANY/ALL (array) requires operator not to return a set" msgstr "op ANY/ALL (array) 는 set 을 return 하지 않는 연산자가 요구 됩니다." @@ -14296,27 +15527,27 @@ msgstr "op ANY/ALL (array) 는 set 을 return 하지 않는 연산자가 요구 msgid "inconsistent types deduced for parameter $%d" msgstr "inconsistent types deduced for parameter $%d" -#: parser/parse_relation.c:175 +#: parser/parse_relation.c:176 #, c-format msgid "table reference \"%s\" is ambiguous" msgstr "테이블 참조 \"%s\" 가 명확하지 않습니다 (ambiguous)." -#: parser/parse_relation.c:219 +#: parser/parse_relation.c:220 #, c-format msgid "table reference %u is ambiguous" msgstr "테이블 참조 %u 가 명확하지 않습니다 (ambiguous)." -#: parser/parse_relation.c:398 +#: parser/parse_relation.c:419 #, c-format msgid "table name \"%s\" specified more than once" msgstr "테이블 이름 \"%s\" 가 한번 이상 명시되어 있습니다." -#: parser/parse_relation.c:425 parser/parse_relation.c:3116 +#: parser/parse_relation.c:446 parser/parse_relation.c:3221 #, c-format msgid "invalid reference to FROM-clause entry for table \"%s\"" msgstr "\"%s\" 테이블을 사용하는 FROM 절에 대한 참조가 잘못 되었습니다." -#: parser/parse_relation.c:428 parser/parse_relation.c:3121 +#: parser/parse_relation.c:449 parser/parse_relation.c:3226 #, c-format msgid "" "There is an entry for table \"%s\", but it cannot be referenced from this " @@ -14324,30 +15555,31 @@ msgid "" msgstr "" "\"%s\" 테이블에 대한 항목이 있지만 이 쿼리 부분에서 참조할 수 없습니다." -#: parser/parse_relation.c:430 +#: parser/parse_relation.c:451 #, c-format msgid "The combining JOIN type must be INNER or LEFT for a LATERAL reference." msgstr "" -#: parser/parse_relation.c:706 +#: parser/parse_relation.c:727 #, c-format msgid "system column \"%s\" reference in check constraint is invalid" msgstr "제약 조건에서 참조하는 \"%s\" 시스템 칼럼이 없음" -#: parser/parse_relation.c:1066 parser/parse_relation.c:1346 -#: parser/parse_relation.c:1848 +#: parser/parse_relation.c:1086 parser/parse_relation.c:1372 +#: parser/parse_relation.c:1941 #, c-format msgid "table \"%s\" has %d columns available but %d columns specified" -msgstr "테이블 \"%s\" 에는 %d 개의 열이 있는데, %d 개의 열만이 명시되었습니다." +msgstr "" +"테이블 \"%s\" 에는 %d 개의 칼럼이 있는데, %d 개의 칼럼만 명시되었습니다." -#: parser/parse_relation.c:1153 +#: parser/parse_relation.c:1179 #, c-format msgid "" "There is a WITH item named \"%s\", but it cannot be referenced from this " "part of the query." msgstr "\"%s\"(이)라는 WITH 항목이 있지만 이 쿼리 부분에서 참조할 수 없습니다." -#: parser/parse_relation.c:1155 +#: parser/parse_relation.c:1181 #, c-format msgid "" "Use WITH RECURSIVE, or re-order the WITH items to remove forward references." @@ -14355,7 +15587,7 @@ msgstr "" "WITH RECURSIVE를 사용하거나 WITH 항목의 순서를 변경하여 정방향 참조를 제거하" "십시오." -#: parser/parse_relation.c:1466 +#: parser/parse_relation.c:1492 #, c-format msgid "" "a column definition list is only allowed for functions returning \"record\"" @@ -14363,56 +15595,58 @@ msgstr "" "열 정의 리스트 (column definition list) 는 오로지 \"record\" 를 리턴하는 함" "수 내에서만 허용됩니다." -#: parser/parse_relation.c:1475 +#: parser/parse_relation.c:1501 #, c-format msgid "a column definition list is required for functions returning \"record\"" msgstr "" "열 정의 리스트(column definition list)는 \"record\" 를 리턴하는 함수를 필요" "로 합니다" -#: parser/parse_relation.c:1554 +#: parser/parse_relation.c:1580 #, c-format msgid "function \"%s\" in FROM has unsupported return type %s" msgstr "" "FROM 절 내의 함수 \"%s\" 에 지원되지 않는 return 자료형 %s 이 있습니다." -#: parser/parse_relation.c:1676 +#: parser/parse_relation.c:1769 #, c-format msgid "VALUES lists \"%s\" have %d columns available but %d columns specified" msgstr "" -"VALUES 뒤에 오는 \"%s\" 구문에는 %d개의 열이 있는데, 지정한 열은 %d개 입니다" +"VALUES 뒤에 오는 \"%s\" 구문에는 %d개의 칼럼이 있는데, 지정한 칼럼은 %d개 입" +"니다" -#: parser/parse_relation.c:1731 +#: parser/parse_relation.c:1824 #, c-format msgid "joins can have at most %d columns" msgstr "조인에는 최대 %d개의 칼럼을 포함할 수 있음" -#: parser/parse_relation.c:1821 +#: parser/parse_relation.c:1914 #, c-format msgid "WITH query \"%s\" does not have a RETURNING clause" msgstr "" -#: parser/parse_relation.c:2738 parser/parse_relation.c:2900 +#: parser/parse_relation.c:2838 parser/parse_relation.c:2876 +#: parser/parse_relation.c:3005 #, c-format msgid "column %d of relation \"%s\" does not exist" -msgstr "%d번째 열이 없습니다. 해당 릴레이션: \"%s\"" +msgstr "%d번째 칼럼이 없습니다. 해당 릴레이션: \"%s\"" -#: parser/parse_relation.c:3119 +#: parser/parse_relation.c:3224 #, c-format msgid "Perhaps you meant to reference the table alias \"%s\"." msgstr "아 \"%s\" alias를 참조해야 할 것 같습니다." -#: parser/parse_relation.c:3127 +#: parser/parse_relation.c:3232 #, c-format msgid "missing FROM-clause entry for table \"%s\"" msgstr "테이블 \"%s\"에 FROM 절이 빠져 있습니다." -#: parser/parse_relation.c:3179 +#: parser/parse_relation.c:3284 #, c-format msgid "Perhaps you meant to reference the column \"%s.%s\"." msgstr "아마 \"%s.%s\" 칼럼을 참조하는 것 같습니다." -#: parser/parse_relation.c:3181 +#: parser/parse_relation.c:3286 #, c-format msgid "" "There is a column named \"%s\" in table \"%s\", but it cannot be referenced " @@ -14421,33 +15655,33 @@ msgstr "" "\"%s\" 이름의 칼럼이 \"%s\" 테이블에 있지만, 이 쿼리의 이 부분에서는 참조될 " "수 없습니다." -#: parser/parse_relation.c:3198 +#: parser/parse_relation.c:3303 #, c-format msgid "" "Perhaps you meant to reference the column \"%s.%s\" or the column \"%s.%s\"." msgstr "아마 \"%s.%s\" 칼럼이나 \"%s.%s\" 칼럼을 참조하는 것 같습니다." -#: parser/parse_target.c:432 parser/parse_target.c:724 +#: parser/parse_target.c:483 parser/parse_target.c:775 #, c-format msgid "cannot assign to system column \"%s\"" msgstr "시스템 열 \"%s\"에 할당할 수 없습니다." -#: parser/parse_target.c:460 +#: parser/parse_target.c:511 #, c-format msgid "cannot set an array element to DEFAULT" msgstr "배열 요소를 DEFAULT 로 설정할 수 없습니다." -#: parser/parse_target.c:465 +#: parser/parse_target.c:516 #, c-format msgid "cannot set a subfield to DEFAULT" msgstr "하위필드를 DEFAULT로 설정할 수 없습니다." -#: parser/parse_target.c:534 +#: parser/parse_target.c:585 #, c-format msgid "column \"%s\" is of type %s but expression is of type %s" msgstr "열 \"%s\"은(는) %s 자료형인데 표현식은 %s 자료형입니다." -#: parser/parse_target.c:708 +#: parser/parse_target.c:759 #, c-format msgid "" "cannot assign to field \"%s\" of column \"%s\" because its type %s is not a " @@ -14456,28 +15690,28 @@ msgstr "" "\"%s\" 필드 (대상 열 \"%s\")를 지정할 수 없음, %s 자료형은 복합자료형이 아니" "기 때문" -#: parser/parse_target.c:717 +#: parser/parse_target.c:768 #, c-format msgid "" "cannot assign to field \"%s\" of column \"%s\" because there is no such " "column in data type %s" msgstr "" -"\"%s\" 필드 (대상 열 \"%s\")를 지정할 수 없음, %s 자료형에서 그런 열을 찾을 " -"수 없음" +"\"%s\" 필드 (대상 열 \"%s\")를 지정할 수 없음, %s 자료형에서 그런 칼럼을 찾" +"을 수 없음" -#: parser/parse_target.c:784 +#: parser/parse_target.c:835 #, c-format msgid "" "array assignment to \"%s\" requires type %s but expression is of type %s" msgstr "" "\"%s\" 열에 사용된 자료형은 %s 가 필요하지만, 현재 표현식이 %s 자료형입니다" -#: parser/parse_target.c:794 +#: parser/parse_target.c:845 #, c-format msgid "subfield \"%s\" is of type %s but expression is of type %s" msgstr "하위필드 \"%s\" 는 %s 자료형인데 표현식은 %s 자료형입니다." -#: parser/parse_target.c:1210 +#: parser/parse_target.c:1261 #, c-format msgid "SELECT * with no tables specified is not valid" msgstr "테이블이 명시되지 않은 SELECT * 구문은 유효하지 않습니다." @@ -14499,7 +15733,7 @@ msgstr "" msgid "type reference %s converted to %s" msgstr "ype reference %s 가 %s 로 변환되었습니다." -#: parser/parse_type.c:261 parser/parse_type.c:805 utils/cache/typcache.c:239 +#: parser/parse_type.c:261 parser/parse_type.c:804 utils/cache/typcache.c:243 #, c-format msgid "type \"%s\" is only a shell" msgstr "자료형 \"%s\" 는 오로지 shell 에만 있습니다. " @@ -14514,151 +15748,181 @@ msgstr "\"%s\" 형식에는 형식 한정자를 사용할 수 없음" msgid "type modifiers must be simple constants or identifiers" msgstr "자료형 한정자는 단순 상수 또는 식별자여야 함" -#: parser/parse_type.c:671 parser/parse_type.c:770 +#: parser/parse_type.c:670 parser/parse_type.c:769 #, c-format msgid "invalid type name \"%s\"" msgstr "\"%s\" 자료형 이름은 유효하지 않은 자료형입니다." -#: parser/parse_utilcmd.c:384 +#: parser/parse_utilcmd.c:266 #, c-format -msgid "array of serial is not implemented" -msgstr "serial 배열이 구현되지 않음" +msgid "cannot create partitioned table as inheritance child" +msgstr "상속 하위 테이블로 파티션된 테이블을 만들 수 없음" -#: parser/parse_utilcmd.c:432 +#: parser/parse_utilcmd.c:436 #, c-format msgid "%s will create implicit sequence \"%s\" for serial column \"%s.%s\"" msgstr "" "%s 명령으로 \"%s\" 시퀀스가 자동으로 만들어짐 (\"%s.%s\" serial 열 때문)" -#: parser/parse_utilcmd.c:526 parser/parse_utilcmd.c:538 +#: parser/parse_utilcmd.c:551 +#, c-format +msgid "array of serial is not implemented" +msgstr "serial 배열이 구현되지 않음" + +#: parser/parse_utilcmd.c:627 parser/parse_utilcmd.c:639 #, c-format msgid "" "conflicting NULL/NOT NULL declarations for column \"%s\" of table \"%s\"" msgstr "NULL/NOT NULL 선언이 서로 충돌합니다 : column \"%s\" of table \"%s\"" -#: parser/parse_utilcmd.c:550 +#: parser/parse_utilcmd.c:651 #, c-format msgid "multiple default values specified for column \"%s\" of table \"%s\"" -msgstr "\"%s\" 열(\"%s\" 테이블)에 대해 여러 개의 기본 값이 지정됨" +msgstr "\"%s\" 칼럼(\"%s\" 테이블)에 대해 여러 개의 기본 값이 지정됨" + +#: parser/parse_utilcmd.c:672 +#, c-format +msgid "multiple identity specifications for column \"%s\" of table \"%s\"" +msgstr "\"%s\" 칼럼(\"%s\" 테이블)에 대해 여러 개의 식별자 지정이 사용되었음" -#: parser/parse_utilcmd.c:567 parser/parse_utilcmd.c:658 +#: parser/parse_utilcmd.c:695 parser/parse_utilcmd.c:812 #, c-format msgid "primary key constraints are not supported on foreign tables" msgstr "기본키 제약 조건을 외부 테이블에서는 사용할 수 없음" -#: parser/parse_utilcmd.c:576 parser/parse_utilcmd.c:668 +#: parser/parse_utilcmd.c:701 parser/parse_utilcmd.c:818 +#, c-format +msgid "primary key constraints are not supported on partitioned tables" +msgstr "기본키 제약 조건은 파티션된 테이블에서는 사용할 수 없음" + +#: parser/parse_utilcmd.c:710 parser/parse_utilcmd.c:828 #, c-format msgid "unique constraints are not supported on foreign tables" msgstr "유니크 제약 조건은 외부 테이블에서는 사용할 수 없음" -#: parser/parse_utilcmd.c:593 parser/parse_utilcmd.c:692 +#: parser/parse_utilcmd.c:716 parser/parse_utilcmd.c:834 +#, c-format +msgid "unique constraints are not supported on partitioned tables" +msgstr "유니크 제약 조건은 파티션된 테이블에서는 사용할 수 없음" + +#: parser/parse_utilcmd.c:733 parser/parse_utilcmd.c:864 #, c-format msgid "foreign key constraints are not supported on foreign tables" msgstr "참조키 제약 조건은 외부 테이블에서는 사용할 수 없음" -#: parser/parse_utilcmd.c:678 +#: parser/parse_utilcmd.c:739 parser/parse_utilcmd.c:870 +#, c-format +msgid "foreign key constraints are not supported on partitioned tables" +msgstr "참조키 제약 조건은 파티션된 테이블에서는 사용할 수 없음" + +#: parser/parse_utilcmd.c:767 +#, c-format +msgid "both default and identity specified for column \"%s\" of table \"%s\"" +msgstr "\"%s\" 칼럼(\"%s\" 테이블)에 대해 default와 식별자 정의가 함께 있음" + +#: parser/parse_utilcmd.c:844 #, c-format msgid "exclusion constraints are not supported on foreign tables" msgstr "제외 제약 조건은 외부 테이블에서는 사용할 수 없음" -#: parser/parse_utilcmd.c:742 +#: parser/parse_utilcmd.c:850 +#, c-format +msgid "exclusion constraints are not supported on partitioned tables" +msgstr "제외 제약 조건은 파티션된 테이블에서는 사용할 수 없음" + +#: parser/parse_utilcmd.c:920 #, c-format msgid "LIKE is not supported for creating foreign tables" msgstr "외부 테이블을 만들 때는 LIKE 옵션을 쓸 수 없음" -#: parser/parse_utilcmd.c:1275 parser/parse_utilcmd.c:1351 +#: parser/parse_utilcmd.c:1475 parser/parse_utilcmd.c:1551 #, c-format msgid "Index \"%s\" contains a whole-row table reference." msgstr "" -#: parser/parse_utilcmd.c:1621 +#: parser/parse_utilcmd.c:1820 #, c-format msgid "cannot use an existing index in CREATE TABLE" msgstr "" -#: parser/parse_utilcmd.c:1641 +#: parser/parse_utilcmd.c:1840 #, c-format msgid "index \"%s\" is already associated with a constraint" msgstr "" -#: parser/parse_utilcmd.c:1649 +#: parser/parse_utilcmd.c:1848 #, c-format msgid "index \"%s\" does not belong to table \"%s\"" msgstr "\"%s\" 인덱스가 \"%s\" 테이블용이 아님" -#: parser/parse_utilcmd.c:1656 +#: parser/parse_utilcmd.c:1855 #, c-format msgid "index \"%s\" is not valid" msgstr "\"%s\" 인덱스는 사용가능 상태가 아님" -#: parser/parse_utilcmd.c:1662 +#: parser/parse_utilcmd.c:1861 #, c-format msgid "\"%s\" is not a unique index" msgstr "\"%s\" 객체는 유니크 인덱스가 아닙니다" -#: parser/parse_utilcmd.c:1663 parser/parse_utilcmd.c:1670 -#: parser/parse_utilcmd.c:1677 parser/parse_utilcmd.c:1747 +#: parser/parse_utilcmd.c:1862 parser/parse_utilcmd.c:1869 +#: parser/parse_utilcmd.c:1876 parser/parse_utilcmd.c:1946 #, c-format msgid "Cannot create a primary key or unique constraint using such an index." msgstr "" -#: parser/parse_utilcmd.c:1669 +#: parser/parse_utilcmd.c:1868 #, c-format msgid "index \"%s\" contains expressions" msgstr "\"%s\" 인덱스에 표현식이 포함되어 있음" -#: parser/parse_utilcmd.c:1676 +#: parser/parse_utilcmd.c:1875 #, c-format msgid "\"%s\" is a partial index" msgstr "\"%s\" 객체는 부분 인덱스임" -#: parser/parse_utilcmd.c:1688 +#: parser/parse_utilcmd.c:1887 #, c-format msgid "\"%s\" is a deferrable index" msgstr "\"%s\" 객체는 지연가능한 인덱스임" -#: parser/parse_utilcmd.c:1689 +#: parser/parse_utilcmd.c:1888 #, c-format msgid "Cannot create a non-deferrable constraint using a deferrable index." msgstr "" -#: parser/parse_utilcmd.c:1746 +#: parser/parse_utilcmd.c:1945 #, c-format msgid "index \"%s\" does not have default sorting behavior" msgstr "\"%s\" 인덱스는 기본 정렬 방법이 없음" -#: parser/parse_utilcmd.c:1893 +#: parser/parse_utilcmd.c:2089 #, c-format msgid "column \"%s\" appears twice in primary key constraint" -msgstr "기본키 제약 조건에서 \"%s\" 열이 두 번 지정되었습니다" +msgstr "기본키 제약 조건에서 \"%s\" 칼럼이 두 번 지정되었습니다" -#: parser/parse_utilcmd.c:1899 +#: parser/parse_utilcmd.c:2095 #, c-format msgid "column \"%s\" appears twice in unique constraint" -msgstr "고유 제약 조건에서 \"%s\" 열이 두 번 지정되었습니다" - -#: parser/parse_utilcmd.c:2103 -#, c-format -msgid "index expression cannot return a set" -msgstr "인덱스 식은 세트를 반환할 수 없음" +msgstr "고유 제약 조건에서 \"%s\" 칼럼이 두 번 지정되었습니다" -#: parser/parse_utilcmd.c:2114 +#: parser/parse_utilcmd.c:2304 #, c-format msgid "" "index expressions and predicates can refer only to the table being indexed" msgstr "인덱스 식 및 술어는 인덱싱되는 테이블만 참조할 수 있음" -#: parser/parse_utilcmd.c:2160 +#: parser/parse_utilcmd.c:2350 #, c-format msgid "rules on materialized views are not supported" msgstr "구체화된 뷰에서의 룰은 지원하지 않음" -#: parser/parse_utilcmd.c:2221 +#: parser/parse_utilcmd.c:2411 #, c-format msgid "rule WHERE condition cannot contain references to other relations" msgstr "룰에서 지정한 WHERE 조건에 다른 릴레이션에 대한 참조를 포함할 수 없음" -#: parser/parse_utilcmd.c:2293 +#: parser/parse_utilcmd.c:2483 #, c-format msgid "" "rules with WHERE conditions can only have SELECT, INSERT, UPDATE, or DELETE " @@ -14667,144 +15931,149 @@ msgstr "" "룰에서 지정한 WHERE 조건이 있는 규칙에는 SELECT, INSERT, UPDATE 또는 DELETE " "작업만 포함할 수 있음" -#: parser/parse_utilcmd.c:2311 parser/parse_utilcmd.c:2410 -#: rewrite/rewriteHandler.c:485 rewrite/rewriteManip.c:1015 +#: parser/parse_utilcmd.c:2501 parser/parse_utilcmd.c:2600 +#: rewrite/rewriteHandler.c:500 rewrite/rewriteManip.c:1015 #, c-format msgid "conditional UNION/INTERSECT/EXCEPT statements are not implemented" msgstr "conditional UNION/INTERSECT/EXCEPT 구문은 구현되어 있지 않다" -#: parser/parse_utilcmd.c:2329 +#: parser/parse_utilcmd.c:2519 #, c-format msgid "ON SELECT rule cannot use OLD" msgstr "ON SELECT 룰은 OLD를 사용할 수 없음" -#: parser/parse_utilcmd.c:2333 +#: parser/parse_utilcmd.c:2523 #, c-format msgid "ON SELECT rule cannot use NEW" msgstr "ON SELECT 룰은 NEW를 사용할 수 없음" -#: parser/parse_utilcmd.c:2342 +#: parser/parse_utilcmd.c:2532 #, c-format msgid "ON INSERT rule cannot use OLD" msgstr "ON INSERT 룰은 OLD를 사용할 수 없음" -#: parser/parse_utilcmd.c:2348 +#: parser/parse_utilcmd.c:2538 #, c-format msgid "ON DELETE rule cannot use NEW" msgstr "ON DELETE 룰은 NEW를 사용할 수 없음" -#: parser/parse_utilcmd.c:2376 +#: parser/parse_utilcmd.c:2566 #, c-format msgid "cannot refer to OLD within WITH query" msgstr "" -#: parser/parse_utilcmd.c:2383 +#: parser/parse_utilcmd.c:2573 #, c-format msgid "cannot refer to NEW within WITH query" msgstr "" -#: parser/parse_utilcmd.c:2586 -#, c-format -msgid "transform expression must not return a set" -msgstr "transform 표현식은 하나의 세트를 리턴하면 안됩니다" - -#: parser/parse_utilcmd.c:2700 +#: parser/parse_utilcmd.c:3006 #, c-format msgid "misplaced DEFERRABLE clause" msgstr "DEFERABLE 절이 잘못 놓여져 있습니다" -#: parser/parse_utilcmd.c:2705 parser/parse_utilcmd.c:2720 +#: parser/parse_utilcmd.c:3011 parser/parse_utilcmd.c:3026 #, c-format msgid "multiple DEFERRABLE/NOT DEFERRABLE clauses not allowed" msgstr "여러 개의 DEFERRABLE/NOT DEFERRABLE절은 사용할 수 없습니다" -#: parser/parse_utilcmd.c:2715 +#: parser/parse_utilcmd.c:3021 #, c-format msgid "misplaced NOT DEFERRABLE clause" msgstr "NOT DEFERABLE 절이 잘못 놓여 있습니다" -#: parser/parse_utilcmd.c:2728 parser/parse_utilcmd.c:2754 gram.y:4902 +#: parser/parse_utilcmd.c:3034 parser/parse_utilcmd.c:3060 gram.y:5363 #, c-format msgid "constraint declared INITIALLY DEFERRED must be DEFERRABLE" msgstr "INITIALLY DEFERRED 로 선언된 조건문은 반드시 DEFERABLE 여야만 한다" -#: parser/parse_utilcmd.c:2736 +#: parser/parse_utilcmd.c:3042 #, c-format msgid "misplaced INITIALLY DEFERRED clause" msgstr "INITIALLY DEFERRED 절이 잘못 놓여 있습니다" -#: parser/parse_utilcmd.c:2741 parser/parse_utilcmd.c:2767 +#: parser/parse_utilcmd.c:3047 parser/parse_utilcmd.c:3073 #, c-format msgid "multiple INITIALLY IMMEDIATE/DEFERRED clauses not allowed" msgstr "여러 개의 INITIALLY IMMEDIATE/DEFERRED 절은 허용되지 않습니다" -#: parser/parse_utilcmd.c:2762 +#: parser/parse_utilcmd.c:3068 #, c-format msgid "misplaced INITIALLY IMMEDIATE clause" msgstr "INITIALLY IMMEDIATE 절이 잘못 놓여 있습니다" -#: parser/parse_utilcmd.c:2953 +#: parser/parse_utilcmd.c:3259 #, c-format msgid "" "CREATE specifies a schema (%s) different from the one being created (%s)" msgstr "CREATE 구문에 명시된 schema (%s) 가 생성된 (%s) 의 것과 다릅니다" -#: parser/scansup.c:204 +#: parser/parse_utilcmd.c:3318 #, c-format -msgid "identifier \"%s\" will be truncated to \"%s\"" -msgstr "\"%s\" 식별자는 \"%s\"(으)로 잘림" +msgid "invalid bound specification for a list partition" +msgstr "list 파티션을 위한 범위 설정이 잘못됨" -#: port/pg_sema.c:113 port/sysv_sema.c:113 +#: parser/parse_utilcmd.c:3374 #, c-format -msgid "could not create semaphores: %m" -msgstr "세마포어를 만들 수 없음: %m" +msgid "invalid bound specification for a range partition" +msgstr "range 파티션을 위한 범위 설정이 잘못됨" -#: port/pg_sema.c:114 port/sysv_sema.c:114 +#: parser/parse_utilcmd.c:3380 #, c-format -msgid "Failed system call was semget(%lu, %d, 0%o)." -msgstr "semget(%lu, %d, 0%o) 호출에 의한 시스템 콜 실패" +msgid "FROM must specify exactly one value per partitioning column" +msgstr "FROM에는 파티션 칼럼 당 딱 하나의 값만 지정해야 함" -#: port/pg_sema.c:118 port/sysv_sema.c:118 +#: parser/parse_utilcmd.c:3384 #, c-format -msgid "" -"This error does *not* mean that you have run out of disk space. It occurs " -"when either the system limit for the maximum number of semaphore sets " -"(SEMMNI), or the system wide maximum number of semaphores (SEMMNS), would be " -"exceeded. You need to raise the respective kernel parameter. " -"Alternatively, reduce PostgreSQL's consumption of semaphores by reducing its " -"max_connections parameter.\n" -"The PostgreSQL documentation contains more information about configuring " -"your system for PostgreSQL." +msgid "TO must specify exactly one value per partitioning column" +msgstr "TO에는 파티션 칼럼 당 딱 하나의 값만 지정해야 함" + +#: parser/parse_utilcmd.c:3431 parser/parse_utilcmd.c:3445 +#, c-format +msgid "cannot specify NULL in range bound" +msgstr "range 범위에는 NULL 값을 사용할 수 없음" + +#: parser/parse_utilcmd.c:3492 +#, c-format +msgid "every bound following MAXVALUE must also be MAXVALUE" msgstr "" -"이 오류는 서버를 실행하는데 필요한 디스크 공간이 부족해서 발생한 것이 아닙니" -"다.\n" -"이 오류는 시스템에서 지정한 최소 세마포어 수(SEMMNI)가 너무 크거나, 최대 세마" -"포어 수(SEMMNS)가 너무 적어서 서버를 실행할 수 없을 때 발생합니다. 이에 따" -"라, 정상적으로 서버가 실행되려면, 시스템 값들을 조정할 필요가 있습니다. 아니" -"면, 다른 방법으로, PostgreSQL의 환경 설정에서 max_connections 값을 줄여서 세" -"마포어 사용 수를 줄여보십시오.\n" -"보다 자세한 내용은 PostgreSQL 관리자 메뉴얼을 참조 하십시오." -#: port/pg_sema.c:148 port/sysv_sema.c:148 +#: parser/parse_utilcmd.c:3498 #, c-format -msgid "" -"You possibly need to raise your kernel's SEMVMX value to be at least %d. " -"Look into the PostgreSQL documentation for details." +msgid "every bound following MINVALUE must also be MINVALUE" msgstr "" -"커널의 SEMVMX 값을 적어도 %d 정도로 늘려야할 필요가 있는 것 같습니다. 자세" -"한 것은 PostgreSQL 문서를 참조하세요." -#: port/pg_shmem.c:175 port/sysv_shmem.c:175 +#: parser/parse_utilcmd.c:3528 parser/parse_utilcmd.c:3540 +#, c-format +msgid "specified value cannot be cast to type %s for column \"%s\"" +msgstr "지정된 값은 %s 형으로 형변환 할 수 없음, 해당 칼럼: \"%s\"" + +#: parser/parse_utilcmd.c:3542 +#, c-format +msgid "The cast requires a non-immutable conversion." +msgstr "형변환은 non-immutable 변환이 필요함" + +#: parser/parse_utilcmd.c:3543 +#, c-format +msgid "Try putting the literal value in single quotes." +msgstr "" + +#: parser/scansup.c:204 +#, c-format +msgid "identifier \"%s\" will be truncated to \"%s\"" +msgstr "\"%s\" 식별자는 \"%s\"(으)로 잘림" + +#: port/pg_shmem.c:196 port/sysv_shmem.c:196 #, c-format msgid "could not create shared memory segment: %m" msgstr "공유 메모리 세그먼트를 만들 수 없음: %m" -#: port/pg_shmem.c:176 port/sysv_shmem.c:176 +#: port/pg_shmem.c:197 port/sysv_shmem.c:197 #, c-format msgid "Failed system call was shmget(key=%lu, size=%zu, 0%o)." msgstr "shmget(키=%lu, 크기=%zu, 0%o) 시스템 콜 실패" -#: port/pg_shmem.c:180 port/sysv_shmem.c:180 +#: port/pg_shmem.c:201 port/sysv_shmem.c:201 #, c-format msgid "" "This error usually means that PostgreSQL's request for a shared memory " @@ -14817,7 +16086,7 @@ msgstr "" "값보다 크거나, SHMMIN 값보다 적은 경우 발생합니다.\n" "공유 메모리 설정에 대한 보다 자세한 내용은 PostgreSQL 문서를 참조하십시오." -#: port/pg_shmem.c:187 port/sysv_shmem.c:187 +#: port/pg_shmem.c:208 port/sysv_shmem.c:208 #, c-format msgid "" "This error usually means that PostgreSQL's request for a shared memory " @@ -14830,7 +16099,7 @@ msgstr "" "큰 경우 발생합니다. 커널 환경 변수인 SHMALL 값을 좀 더 크게 설정하세요.\n" "공유 메모리 설정에 대한 보다 자세한 내용은 PostgreSQL 문서를 참조하십시오." -#: port/pg_shmem.c:193 port/sysv_shmem.c:193 +#: port/pg_shmem.c:214 port/sysv_shmem.c:214 #, c-format msgid "" "This error does *not* mean that you have run out of disk space. It occurs " @@ -14846,12 +16115,12 @@ msgstr "" "확보하세요.\n" "공유 메모리 설정에 대한 보다 자세한 내용은 PostgreSQL 문서를 참조하십시오." -#: port/pg_shmem.c:483 port/sysv_shmem.c:483 +#: port/pg_shmem.c:505 port/sysv_shmem.c:505 #, c-format msgid "could not map anonymous shared memory: %m" msgstr "가용 공유 메모리 확보 실패: %m" -#: port/pg_shmem.c:485 port/sysv_shmem.c:485 +#: port/pg_shmem.c:507 port/sysv_shmem.c:507 #, c-format msgid "" "This error usually means that PostgreSQL's request for a shared memory " @@ -14864,38 +16133,78 @@ msgstr "" "여 보십시오. 줄이는 방법은, shared_buffers 값을 줄이거나 max_connections 값" "을 줄여 보십시오." -#: port/pg_shmem.c:551 port/sysv_shmem.c:551 port/win32_shmem.c:134 +#: port/pg_shmem.c:573 port/sysv_shmem.c:573 port/win32_shmem.c:134 #, c-format msgid "huge pages not supported on this platform" msgstr "huge page 기능은 이 플랫폼에서 지원되지 않음" -#: port/pg_shmem.c:646 port/sysv_shmem.c:646 +#: port/pg_shmem.c:668 port/sysv_shmem.c:668 #, c-format msgid "could not stat data directory \"%s\": %m" msgstr "\"%s\" 데이터 디렉터리 상태를 파악할 수 없음: %m" -#: port/win32/crashdump.c:122 +#: port/sysv_sema.c:123 +#, c-format +msgid "could not create semaphores: %m" +msgstr "세마포어를 만들 수 없음: %m" + +#: port/sysv_sema.c:124 +#, c-format +msgid "Failed system call was semget(%lu, %d, 0%o)." +msgstr "semget(%lu, %d, 0%o) 호출에 의한 시스템 콜 실패" + +#: port/sysv_sema.c:128 +#, c-format +msgid "" +"This error does *not* mean that you have run out of disk space. It occurs " +"when either the system limit for the maximum number of semaphore sets " +"(SEMMNI), or the system wide maximum number of semaphores (SEMMNS), would be " +"exceeded. You need to raise the respective kernel parameter. " +"Alternatively, reduce PostgreSQL's consumption of semaphores by reducing its " +"max_connections parameter.\n" +"The PostgreSQL documentation contains more information about configuring " +"your system for PostgreSQL." +msgstr "" +"이 오류는 서버를 실행하는데 필요한 디스크 공간이 부족해서 발생한 것이 아닙니" +"다.\n" +"이 오류는 시스템에서 지정한 최소 세마포어 수(SEMMNI)가 너무 크거나, 최대 세마" +"포어 수(SEMMNS)가 너무 적어서 서버를 실행할 수 없을 때 발생합니다. 이에 따" +"라, 정상적으로 서버가 실행되려면, 시스템 값들을 조정할 필요가 있습니다. 아니" +"면, 다른 방법으로, PostgreSQL의 환경 설정에서 max_connections 값을 줄여서 세" +"마포어 사용 수를 줄여보십시오.\n" +"보다 자세한 내용은 PostgreSQL 관리자 메뉴얼을 참조 하십시오." + +#: port/sysv_sema.c:158 +#, c-format +msgid "" +"You possibly need to raise your kernel's SEMVMX value to be at least %d. " +"Look into the PostgreSQL documentation for details." +msgstr "" +"커널의 SEMVMX 값을 적어도 %d 정도로 늘려야할 필요가 있는 것 같습니다. 자세" +"한 것은 PostgreSQL 문서를 참조하세요." + +#: port/win32/crashdump.c:121 #, c-format msgid "could not load dbghelp.dll, cannot write crash dump\n" msgstr "" -#: port/win32/crashdump.c:130 +#: port/win32/crashdump.c:129 #, c-format msgid "" "could not load required functions in dbghelp.dll, cannot write crash dump\n" msgstr "" -#: port/win32/crashdump.c:161 +#: port/win32/crashdump.c:160 #, c-format msgid "could not open crash dump file \"%s\" for writing: error code %lu\n" msgstr "\"%s\" 장애 덤프 파일을 쓰기 위해 열 수 없음: 오류 번호 %lu\n" -#: port/win32/crashdump.c:168 +#: port/win32/crashdump.c:167 #, c-format msgid "wrote crash dump to file \"%s\"\n" msgstr "\"%s\" 장애 덤프 파일을 만들었습니다.\n" -#: port/win32/crashdump.c:170 +#: port/win32/crashdump.c:169 #, c-format msgid "could not write crash dump to file \"%s\": error code %lu\n" msgstr "\"%s\" 장애 덤프 파일을 쓰기 실패: 오류 번호 %lu\n" @@ -14915,22 +16224,22 @@ msgstr "신호 수신기 파이프를 만들 수 없음: 오류 번호 %lu, 다 msgid "could not create signal dispatch thread: error code %lu\n" msgstr "시그널 디스패치 쓰레드를 만들 수 없음: 오류 번호 %lu\n" -#: port/win32_sema.c:94 +#: port/win32_sema.c:104 #, c-format msgid "could not create semaphore: error code %lu" msgstr "세마포어를 만들 수 없음: 오류 번호 %lu" -#: port/win32_sema.c:167 +#: port/win32_sema.c:181 #, c-format msgid "could not lock semaphore: error code %lu" msgstr "세마포어를 잠글 수 없음: 오류 번호 %lu" -#: port/win32_sema.c:187 +#: port/win32_sema.c:201 #, c-format msgid "could not unlock semaphore: error code %lu" msgstr "세마포어 잠금을 해제할 수 없음: 오류 번호 %lu" -#: port/win32_sema.c:216 +#: port/win32_sema.c:231 #, c-format msgid "could not try-lock semaphore: error code %lu" msgstr "세마포어 잠금 시도 실패: 오류 번호 %lu" @@ -14967,100 +16276,103 @@ msgstr "실패한 시스템 호출은 DuplicateHandle입니다." msgid "Failed system call was MapViewOfFileEx." msgstr "실패한 시스템 호출은 MapViewOfFileEx입니다." -#: postmaster/autovacuum.c:380 +#: postmaster/autovacuum.c:406 #, c-format msgid "could not fork autovacuum launcher process: %m" msgstr "autovacuum 실행기 프로세스를 실행할 수 없음: %m" -#: postmaster/autovacuum.c:416 +#: postmaster/autovacuum.c:442 #, c-format msgid "autovacuum launcher started" msgstr "autovacuum 실행기가 시작됨" -#: postmaster/autovacuum.c:779 +#: postmaster/autovacuum.c:826 #, c-format msgid "autovacuum launcher shutting down" msgstr "autovacuum 실행기를 종료하는 중" -#: postmaster/autovacuum.c:1441 +#: postmaster/autovacuum.c:1488 #, c-format msgid "could not fork autovacuum worker process: %m" msgstr "autovacuum 작업자 프로세스를 실행할 수 없음: %m" -#: postmaster/autovacuum.c:1639 +#: postmaster/autovacuum.c:1686 #, c-format msgid "autovacuum: processing database \"%s\"" msgstr "autovacuum: \"%s\" 데이터베이스 처리 중" -#: postmaster/autovacuum.c:2052 +#: postmaster/autovacuum.c:2261 #, c-format -msgid "autovacuum: dropping orphan temp table \"%s\".\"%s\" in database \"%s\"" +msgid "autovacuum: dropping orphan temp table \"%s.%s.%s\"" msgstr "" -"autovacuum: \"%s\".\"%s\" 사용 않는 임시 테이블을 \"%s\" 데이터베이스에서 삭" -"제하는 중" +"autovacuum: 더 이상 사용하지 않는 \"%s.%s.%s\" 임시 테이블을 삭제하는 중" -#: postmaster/autovacuum.c:2064 -#, c-format -msgid "autovacuum: found orphan temp table \"%s\".\"%s\" in database \"%s\"" -msgstr "" -"autovacuum: \"%s\".\"%s\" 사용 않는 임시 테이블을 \"%s\" 데이터베이스에서 찾" -"았음" - -#: postmaster/autovacuum.c:2347 +#: postmaster/autovacuum.c:2467 #, c-format msgid "automatic vacuum of table \"%s.%s.%s\"" msgstr "\"%s.%s.%s\" 테이블 대상으로 자동 vacuum 작업 함" -#: postmaster/autovacuum.c:2350 +#: postmaster/autovacuum.c:2470 #, c-format msgid "automatic analyze of table \"%s.%s.%s\"" msgstr "\"%s.%s.%s\" 테이블 자동 분석" -#: postmaster/autovacuum.c:2899 +#: postmaster/autovacuum.c:2656 +#, c-format +msgid "processing work entry for relation \"%s.%s.%s\"" +msgstr "\"%s.%s.%s\" 릴레이션 작업 항목 작업 중" + +#: postmaster/autovacuum.c:3228 #, c-format msgid "autovacuum not started because of misconfiguration" msgstr "서버 설정 정보가 잘못되어 자동 청소 작업이 실행되지 못했습니다." -#: postmaster/autovacuum.c:2900 +#: postmaster/autovacuum.c:3229 #, c-format msgid "Enable the \"track_counts\" option." msgstr "\"track_counts\" 옵션을 사용하십시오." -#: postmaster/bgworker.c:346 postmaster/bgworker.c:745 +#: postmaster/bgworker.c:393 postmaster/bgworker.c:856 #, c-format msgid "registering background worker \"%s\"" msgstr "" -#: postmaster/bgworker.c:375 +#: postmaster/bgworker.c:425 #, c-format msgid "unregistering background worker \"%s\"" msgstr "" -#: postmaster/bgworker.c:484 +#: postmaster/bgworker.c:590 #, c-format msgid "" "background worker \"%s\": must attach to shared memory in order to request a " "database connection" msgstr "" -#: postmaster/bgworker.c:493 +#: postmaster/bgworker.c:599 #, c-format msgid "" "background worker \"%s\": cannot request database access if starting at " "postmaster start" msgstr "" -#: postmaster/bgworker.c:507 +#: postmaster/bgworker.c:613 #, c-format msgid "background worker \"%s\": invalid restart interval" msgstr "\"%s\" 백그라운드 작업자: 잘못된 재실행 간격" -#: postmaster/bgworker.c:552 +#: postmaster/bgworker.c:628 +#, c-format +msgid "" +"background worker \"%s\": parallel workers may not be configured for restart" +msgstr "\"%s\" 백그라운드 작업자: 이 병렬 작업자는 재실행 설정이 없음" + +#: postmaster/bgworker.c:673 #, c-format msgid "terminating background worker \"%s\" due to administrator command" msgstr "관리자 명령에 의해 \"%s\" 백그라운드 작업자를 종료합니다." -#: postmaster/bgworker.c:752 +#: postmaster/bgworker.c:864 #, c-format msgid "" "background worker \"%s\": must be registered in shared_preload_libraries" @@ -15068,7 +16380,7 @@ msgstr "" "\"%s\" 백그라운드 작업자: 먼저 shared_preload_libraries 설정값으로 등록되어" "야 합니다." -#: postmaster/bgworker.c:764 +#: postmaster/bgworker.c:876 #, c-format msgid "" "background worker \"%s\": only dynamic background workers can request " @@ -15076,56 +16388,51 @@ msgid "" msgstr "" "\"%s\" 백그라운드 작업자: 동적 백그라운드 작업자만 알림을 요청할 수 있음" -#: postmaster/bgworker.c:779 +#: postmaster/bgworker.c:891 #, c-format msgid "too many background workers" msgstr "백그라운드 작업자가 너무 많음" -#: postmaster/bgworker.c:780 +#: postmaster/bgworker.c:892 #, c-format msgid "Up to %d background worker can be registered with the current settings." msgid_plural "" "Up to %d background workers can be registered with the current settings." msgstr[0] "현재 설정으로는 %d개의 백그라운드 작업자를 사용할 수 있습니다." -#: postmaster/bgworker.c:784 +#: postmaster/bgworker.c:896 #, c-format msgid "" "Consider increasing the configuration parameter \"max_worker_processes\"." msgstr "\"max_worker_processes\" 환경 매개 변수 값을 좀 느려보십시오." -#: postmaster/checkpointer.c:463 +#: postmaster/checkpointer.c:464 #, c-format msgid "checkpoints are occurring too frequently (%d second apart)" msgid_plural "checkpoints are occurring too frequently (%d seconds apart)" msgstr[0] "체크포인트가 너무 자주 발생함 (%d초 간격)" -#: postmaster/checkpointer.c:467 +#: postmaster/checkpointer.c:468 #, c-format msgid "Consider increasing the configuration parameter \"max_wal_size\"." msgstr "\"max_wal_size\" 환경 매개 변수 값을 좀 느려보십시오." -#: postmaster/checkpointer.c:614 -#, c-format -msgid "transaction log switch forced (archive_timeout=%d)" -msgstr "강제로 트랜잭션 로그를 바꿨습니다 (archive_timeout=%d)" - -#: postmaster/checkpointer.c:1072 +#: postmaster/checkpointer.c:1087 #, c-format msgid "checkpoint request failed" msgstr "체크포인트 요청 실패" -#: postmaster/checkpointer.c:1073 +#: postmaster/checkpointer.c:1088 #, c-format msgid "Consult recent messages in the server log for details." msgstr "더 자세한 것은 서버 로그 파일을 살펴보십시오." -#: postmaster/checkpointer.c:1268 +#: postmaster/checkpointer.c:1283 #, c-format msgid "compacted fsync request queue from %d entries to %d entries" msgstr "" -#: postmaster/pgarch.c:149 +#: postmaster/pgarch.c:148 #, c-format msgid "could not fork archiver: %m" msgstr "archiver 할당(fork) 실패: %m" @@ -15138,7 +16445,7 @@ msgstr "archive_mode가 사용 설정되었는데 archive_command가 설정되 #: postmaster/pgarch.c:484 #, c-format msgid "" -"archiving transaction log file \"%s\" failed too many times, will try again " +"archiving write-ahead log file \"%s\" failed too many times, will try again " "later" msgstr "" "\"%s\" 트랜잭션 로그 파일 아카이브 작업이 계속 실패하고 있습니다. 다음에 또 " @@ -15160,7 +16467,7 @@ msgstr "실패한 아카이브 명령: %s" msgid "archive command was terminated by exception 0x%X" msgstr "0x%X 예외로 인해 아카이브 명령이 종료됨" -#: postmaster/pgarch.c:598 postmaster/postmaster.c:3491 +#: postmaster/pgarch.c:598 postmaster/postmaster.c:3567 #, c-format msgid "" "See C include file \"ntstatus.h\" for a description of the hexadecimal value." @@ -15181,139 +16488,134 @@ msgstr "%d번 시그널로 인해 아카이브 명령이 종료됨" msgid "archive command exited with unrecognized status %d" msgstr "아카이브 명령이 인식할 수 없는 %d 상태로 종료됨" -#: postmaster/pgarch.c:631 -#, c-format -msgid "archived transaction log file \"%s\"" -msgstr "\"%s\" 트랜잭션 로그파일이 아카이브 됨" - -#: postmaster/pgarch.c:680 +#: postmaster/pgarch.c:679 #, c-format msgid "could not open archive status directory \"%s\": %m" msgstr "\"%s\" 디렉터리를 열 수 없습니다: %m" -#: postmaster/pgstat.c:355 +#: postmaster/pgstat.c:395 #, c-format msgid "could not resolve \"localhost\": %s" msgstr "\"localhost\" 이름의 호스트 IP를 구할 수 없습니다: %s" -#: postmaster/pgstat.c:378 +#: postmaster/pgstat.c:418 #, c-format msgid "trying another address for the statistics collector" msgstr "통계 수집기에서 사용할 다른 주소를 찾습니다" -#: postmaster/pgstat.c:387 +#: postmaster/pgstat.c:427 #, c-format msgid "could not create socket for statistics collector: %m" msgstr "통계 수집기에서 사용할 소켓을 만들 수 없습니다: %m" -#: postmaster/pgstat.c:399 +#: postmaster/pgstat.c:439 #, c-format msgid "could not bind socket for statistics collector: %m" msgstr "통계 수집기에서 사용할 소켓과 bind할 수 없습니다: %m" -#: postmaster/pgstat.c:410 +#: postmaster/pgstat.c:450 #, c-format msgid "could not get address of socket for statistics collector: %m" msgstr "통계 수집기에서 사용할 소켓의 주소를 구할 수 없습니다: %m" -#: postmaster/pgstat.c:426 +#: postmaster/pgstat.c:466 #, c-format msgid "could not connect socket for statistics collector: %m" msgstr "통계 수집기에서 사용할 소켓에 연결할 수 없습니다: %m" -#: postmaster/pgstat.c:447 +#: postmaster/pgstat.c:487 #, c-format msgid "could not send test message on socket for statistics collector: %m" msgstr "통계 수집기에서 사용할 소켓으로 테스트 메시지를 보낼 수 없습니다: %m" -#: postmaster/pgstat.c:473 +#: postmaster/pgstat.c:513 #, c-format msgid "select() failed in statistics collector: %m" msgstr "통계 수집기에서 select() 작업 오류: %m" -#: postmaster/pgstat.c:488 +#: postmaster/pgstat.c:528 #, c-format msgid "test message did not get through on socket for statistics collector" msgstr "통계 수집기에서 사용할 소켓으로 테스트 메시지를 처리할 수 없습니다" -#: postmaster/pgstat.c:503 +#: postmaster/pgstat.c:543 #, c-format msgid "could not receive test message on socket for statistics collector: %m" msgstr "통계 수집기에서 사용할 소켓으로 테스트 메시지를 받을 수 없습니다: %m" -#: postmaster/pgstat.c:513 +#: postmaster/pgstat.c:553 #, c-format msgid "incorrect test message transmission on socket for statistics collector" msgstr "통계 수집기에서 사용할 소켓으로 잘못된 테스트 메시지가 전달 되었습니다" -#: postmaster/pgstat.c:536 +#: postmaster/pgstat.c:576 #, c-format msgid "could not set statistics collector socket to nonblocking mode: %m" msgstr "" "통계 수집기에서 사용하는 소켓 모드를 nonblocking 모드로 지정할 수 없습니다: " "%m" -#: postmaster/pgstat.c:546 +#: postmaster/pgstat.c:615 #, c-format msgid "disabling statistics collector for lack of working socket" msgstr "현재 작업 소켓의 원할한 소통을 위해 통계 수집기 기능을 중지합니다" -#: postmaster/pgstat.c:693 +#: postmaster/pgstat.c:762 #, c-format msgid "could not fork statistics collector: %m" msgstr "통계 수집기를 fork할 수 없습니다: %m" -#: postmaster/pgstat.c:1261 +#: postmaster/pgstat.c:1342 #, c-format msgid "unrecognized reset target: \"%s\"" msgstr "알 수 없는 리셋 타겟: \"%s\"" -#: postmaster/pgstat.c:1262 +#: postmaster/pgstat.c:1343 #, c-format msgid "Target must be \"archiver\" or \"bgwriter\"." msgstr "사용 가능한 타겟은 \"archiver\" 또는 \"bgwriter\"" -#: postmaster/pgstat.c:3587 +#: postmaster/pgstat.c:4296 #, c-format msgid "could not read statistics message: %m" msgstr "통계 메시지를 읽을 수 없음: %m" -#: postmaster/pgstat.c:3918 postmaster/pgstat.c:4075 +#: postmaster/pgstat.c:4628 postmaster/pgstat.c:4785 #, c-format msgid "could not open temporary statistics file \"%s\": %m" msgstr "\"%s\" 임시 통계 파일을 열 수 없음: %m" -#: postmaster/pgstat.c:3985 postmaster/pgstat.c:4120 +#: postmaster/pgstat.c:4695 postmaster/pgstat.c:4830 #, c-format msgid "could not write temporary statistics file \"%s\": %m" msgstr "\"%s\" 임시 통계 파일에 쓰기 실패: %m" -#: postmaster/pgstat.c:3994 postmaster/pgstat.c:4129 +#: postmaster/pgstat.c:4704 postmaster/pgstat.c:4839 #, c-format msgid "could not close temporary statistics file \"%s\": %m" msgstr "\"%s\" 임시 통계 파일을 닫을 수 없습니다: %m" -#: postmaster/pgstat.c:4002 postmaster/pgstat.c:4137 +#: postmaster/pgstat.c:4712 postmaster/pgstat.c:4847 #, c-format msgid "could not rename temporary statistics file \"%s\" to \"%s\": %m" msgstr "\"%s\" 임시 통계 파일 이름을 \"%s\" (으)로 바꿀 수 없습니다: %m" -#: postmaster/pgstat.c:4226 postmaster/pgstat.c:4411 postmaster/pgstat.c:4564 +#: postmaster/pgstat.c:4936 postmaster/pgstat.c:5142 postmaster/pgstat.c:5295 #, c-format msgid "could not open statistics file \"%s\": %m" msgstr "\"%s\" 통계 파일을 열 수 없음: %m" -#: postmaster/pgstat.c:4238 postmaster/pgstat.c:4248 postmaster/pgstat.c:4258 -#: postmaster/pgstat.c:4279 postmaster/pgstat.c:4294 postmaster/pgstat.c:4348 -#: postmaster/pgstat.c:4423 postmaster/pgstat.c:4443 postmaster/pgstat.c:4461 -#: postmaster/pgstat.c:4477 postmaster/pgstat.c:4495 postmaster/pgstat.c:4511 -#: postmaster/pgstat.c:4576 postmaster/pgstat.c:4588 postmaster/pgstat.c:4600 -#: postmaster/pgstat.c:4625 postmaster/pgstat.c:4647 +#: postmaster/pgstat.c:4948 postmaster/pgstat.c:4958 postmaster/pgstat.c:4979 +#: postmaster/pgstat.c:5001 postmaster/pgstat.c:5016 postmaster/pgstat.c:5079 +#: postmaster/pgstat.c:5154 postmaster/pgstat.c:5174 postmaster/pgstat.c:5192 +#: postmaster/pgstat.c:5208 postmaster/pgstat.c:5226 postmaster/pgstat.c:5242 +#: postmaster/pgstat.c:5307 postmaster/pgstat.c:5319 postmaster/pgstat.c:5331 +#: postmaster/pgstat.c:5356 postmaster/pgstat.c:5378 #, c-format msgid "corrupted statistics file \"%s\"" msgstr "\"%s\" 통계 파일이 손상되었음" -#: postmaster/pgstat.c:4776 +#: postmaster/pgstat.c:5507 #, c-format msgid "" "using stale statistics instead of current ones because stats collector is " @@ -15321,43 +16623,43 @@ msgid "" msgstr "" "현재 통계 수집기가 반응하지 않아 부정확한 통계정보가 사용되고 있습니다." -#: postmaster/pgstat.c:5103 +#: postmaster/pgstat.c:5834 #, c-format msgid "database hash table corrupted during cleanup --- abort" msgstr "정리하는 동안 데이터베이스 해시 테이블이 손상 되었습니다 --- 중지함" -#: postmaster/postmaster.c:684 +#: postmaster/postmaster.c:710 #, c-format msgid "%s: invalid argument for option -f: \"%s\"\n" msgstr "%s: -f 옵션의 잘못된 인자: \"%s\"\n" -#: postmaster/postmaster.c:770 +#: postmaster/postmaster.c:796 #, c-format msgid "%s: invalid argument for option -t: \"%s\"\n" msgstr "%s: -t 옵션의 잘못된 인자: \"%s\"\n" -#: postmaster/postmaster.c:821 +#: postmaster/postmaster.c:847 #, c-format msgid "%s: invalid argument: \"%s\"\n" msgstr "%s: 잘못된 인자: \"%s\"\n" -#: postmaster/postmaster.c:860 +#: postmaster/postmaster.c:886 #, c-format msgid "%s: superuser_reserved_connections must be less than max_connections\n" msgstr "" "%s: superuser_reserved_connections 값은 max_connections 값보다 작아야합니다\n" -#: postmaster/postmaster.c:865 +#: postmaster/postmaster.c:891 #, c-format msgid "%s: max_wal_senders must be less than max_connections\n" msgstr "%s: max_wal_senders 값은 max_connections 값보다 작아야합니다\n" -#: postmaster/postmaster.c:870 +#: postmaster/postmaster.c:896 #, c-format msgid "WAL archival cannot be enabled when wal_level is \"minimal\"" msgstr "wal_level 값이 \"minimal\"일 때는 아카이브 작업을 할 수 없습니다." -#: postmaster/postmaster.c:873 +#: postmaster/postmaster.c:899 #, c-format msgid "" "WAL streaming (max_wal_senders > 0) requires wal_level \"replica\" or " @@ -15366,88 +16668,88 @@ msgstr "" "WAL 스트리밍 작업(max_wal_senders > 0 인경우)은 wal_level 값이 \"replica\" 또" "는 \"logical\" 이어야 합니다." -#: postmaster/postmaster.c:881 +#: postmaster/postmaster.c:907 #, c-format msgid "%s: invalid datetoken tables, please fix\n" msgstr "%s: 잘못된 datetoken 테이블들, 복구하십시오.\n" -#: postmaster/postmaster.c:973 postmaster/postmaster.c:1071 -#: utils/init/miscinit.c:1429 +#: postmaster/postmaster.c:1010 postmaster/postmaster.c:1108 +#: utils/init/miscinit.c:1455 #, c-format msgid "invalid list syntax in parameter \"%s\"" msgstr "\"%s\" 매개 변수 구문이 잘못 되었습니다" -#: postmaster/postmaster.c:1004 +#: postmaster/postmaster.c:1041 #, c-format msgid "could not create listen socket for \"%s\"" msgstr "\"%s\" 응당 소켓을 만들 수 없습니다" -#: postmaster/postmaster.c:1010 +#: postmaster/postmaster.c:1047 #, c-format msgid "could not create any TCP/IP sockets" msgstr "TCP/IP 소켓을 만들 수 없습니다." -#: postmaster/postmaster.c:1093 +#: postmaster/postmaster.c:1130 #, c-format msgid "could not create Unix-domain socket in directory \"%s\"" msgstr "\"%s\" 디렉터리에 유닉스 도메인 소켓을 만들 수 없습니다" -#: postmaster/postmaster.c:1099 +#: postmaster/postmaster.c:1136 #, c-format msgid "could not create any Unix-domain sockets" msgstr "유닉스 도메인 소켓을 만들 수 없습니다" -#: postmaster/postmaster.c:1111 +#: postmaster/postmaster.c:1148 #, c-format msgid "no socket created for listening" msgstr "서버 접속 대기 작업을 위한 소켓을 만들 수 없음" -#: postmaster/postmaster.c:1151 +#: postmaster/postmaster.c:1188 #, c-format msgid "could not create I/O completion port for child queue" msgstr "하위 대기열에 대해 I/O 완료 포트를 만들 수 없음" -#: postmaster/postmaster.c:1180 +#: postmaster/postmaster.c:1217 #, c-format msgid "%s: could not change permissions of external PID file \"%s\": %s\n" msgstr "%s: \"%s\" 외부 PID 파일의 접근 권한을 바꿀 수 없음: %s\n" -#: postmaster/postmaster.c:1184 +#: postmaster/postmaster.c:1221 #, c-format msgid "%s: could not write external PID file \"%s\": %s\n" msgstr "%s: 외부 pid 파일 \"%s\" 를 쓸 수 없음: %s\n" -#: postmaster/postmaster.c:1234 +#: postmaster/postmaster.c:1278 #, c-format msgid "ending log output to stderr" msgstr "stderr 쪽 로그 출력을 중지합니다." -#: postmaster/postmaster.c:1235 +#: postmaster/postmaster.c:1279 #, c-format msgid "Future log output will go to log destination \"%s\"." msgstr "자세한 로그는 \"%s\" 쪽으로 기록됩니다." -#: postmaster/postmaster.c:1261 utils/init/postinit.c:213 +#: postmaster/postmaster.c:1305 utils/init/postinit.c:213 #, c-format msgid "could not load pg_hba.conf" msgstr "pg_hba.conf를 로드할 수 없음" -#: postmaster/postmaster.c:1287 +#: postmaster/postmaster.c:1331 #, c-format msgid "postmaster became multithreaded during startup" msgstr "" -#: postmaster/postmaster.c:1288 +#: postmaster/postmaster.c:1332 #, c-format msgid "Set the LC_ALL environment variable to a valid locale." msgstr "LC_ALL 환경 설정값으로 알맞은 로케일 이름을 지정하세요." -#: postmaster/postmaster.c:1385 +#: postmaster/postmaster.c:1437 #, c-format msgid "%s: could not locate matching postgres executable" msgstr "%s: 실행가능한 postgres 프로그램을 찾을 수 없습니다" -#: postmaster/postmaster.c:1408 utils/misc/tzparser.c:341 +#: postmaster/postmaster.c:1460 utils/misc/tzparser.c:341 #, c-format msgid "" "This may indicate an incomplete PostgreSQL installation, or that the file " @@ -15456,42 +16758,42 @@ msgstr "" "이 문제는 PostgreSQL 설치가 불완전하게 되었거나, \"%s\" 파일이 올바른 위치에 " "있지 않아서 발생했습니다." -#: postmaster/postmaster.c:1436 +#: postmaster/postmaster.c:1488 #, c-format msgid "data directory \"%s\" does not exist" msgstr "\"%s\" 데이터 디렉터리 없음" -#: postmaster/postmaster.c:1441 +#: postmaster/postmaster.c:1493 #, c-format msgid "could not read permissions of directory \"%s\": %m" msgstr "\"%s\" 디렉터리 읽기 권한 없음: %m" -#: postmaster/postmaster.c:1449 +#: postmaster/postmaster.c:1501 #, c-format msgid "specified data directory \"%s\" is not a directory" msgstr "지정한 \"%s\" 데이터 디렉터리는 디렉터리가 아님" -#: postmaster/postmaster.c:1465 +#: postmaster/postmaster.c:1517 #, c-format msgid "data directory \"%s\" has wrong ownership" msgstr "\"%s\" 데이터 디렉터리 소유주가 잘못 되었습니다." -#: postmaster/postmaster.c:1467 +#: postmaster/postmaster.c:1519 #, c-format msgid "The server must be started by the user that owns the data directory." msgstr "서버는 지정한 데이터 디렉터리의 소유주 권한으로 시작되어야합니다." -#: postmaster/postmaster.c:1487 +#: postmaster/postmaster.c:1539 #, c-format msgid "data directory \"%s\" has group or world access" msgstr "\"%s\" 데이터 디렉터리 액세스 권한이 잘못 되었습니다" -#: postmaster/postmaster.c:1489 +#: postmaster/postmaster.c:1541 #, c-format msgid "Permissions should be u=rwx (0700)." msgstr "액세스 권한은 u=rwx (0700) 값이어야 합니다." -#: postmaster/postmaster.c:1500 +#: postmaster/postmaster.c:1552 #, c-format msgid "" "%s: could not find the database system\n" @@ -15502,489 +16804,520 @@ msgstr "" "\"%s\" 디렉터리 안에 해당 자료가 있기를 기대했는데,\n" "\"%s\" 파일을 열 수가 없었습니다: %s\n" -#: postmaster/postmaster.c:1677 +#: postmaster/postmaster.c:1729 #, c-format msgid "select() failed in postmaster: %m" msgstr "postmaster에서 select() 작동 실패: %m" -#: postmaster/postmaster.c:1828 +#: postmaster/postmaster.c:1884 #, c-format msgid "" "performing immediate shutdown because data directory lock file is invalid" msgstr "" -#: postmaster/postmaster.c:1906 postmaster/postmaster.c:1937 +#: postmaster/postmaster.c:1962 postmaster/postmaster.c:1993 #, c-format msgid "incomplete startup packet" msgstr "아직 완료되지 않은 시작 패킷" -#: postmaster/postmaster.c:1918 +#: postmaster/postmaster.c:1974 #, c-format msgid "invalid length of startup packet" msgstr "시작 패킷의 길이가 잘못 되었습니다" -#: postmaster/postmaster.c:1976 +#: postmaster/postmaster.c:2032 #, c-format msgid "failed to send SSL negotiation response: %m" msgstr "SSL 연결 작업에 오류가 발생했습니다: %m" -#: postmaster/postmaster.c:2005 +#: postmaster/postmaster.c:2061 #, c-format msgid "unsupported frontend protocol %u.%u: server supports %u.0 to %u.%u" msgstr "" "지원하지 않는 frontend 프로토콜 %u.%u: 서버에서 지원하는 프로토콜 %u.0 .. %u." "%u" -#: postmaster/postmaster.c:2068 utils/misc/guc.c:5660 utils/misc/guc.c:5753 -#: utils/misc/guc.c:7051 utils/misc/guc.c:9805 utils/misc/guc.c:9839 +#: postmaster/postmaster.c:2124 utils/misc/guc.c:5770 utils/misc/guc.c:5863 +#: utils/misc/guc.c:7164 utils/misc/guc.c:9918 utils/misc/guc.c:9952 #, c-format msgid "invalid value for parameter \"%s\": \"%s\"" msgstr "잘못된 \"%s\" 매개 변수의 값: \"%s\"" -#: postmaster/postmaster.c:2071 +#: postmaster/postmaster.c:2127 #, c-format msgid "Valid values are: \"false\", 0, \"true\", 1, \"database\"." msgstr "" -#: postmaster/postmaster.c:2091 +#: postmaster/postmaster.c:2147 #, c-format msgid "invalid startup packet layout: expected terminator as last byte" msgstr "잘못된 시작 패킷 레이아웃: 마지막 바이트로 종결문자가 발견되었음" -#: postmaster/postmaster.c:2119 +#: postmaster/postmaster.c:2175 #, c-format msgid "no PostgreSQL user name specified in startup packet" msgstr "시작 패킷에서 지정한 사용자는 PostgreSQL 사용자 이름이 아닙니다" -#: postmaster/postmaster.c:2178 +#: postmaster/postmaster.c:2234 #, c-format msgid "the database system is starting up" msgstr "데이터베이스 시스템이 새로 가동 중입니다." -#: postmaster/postmaster.c:2183 +#: postmaster/postmaster.c:2239 #, c-format msgid "the database system is shutting down" msgstr "데이터베이스 시스템이 중지 중입니다" -#: postmaster/postmaster.c:2188 +#: postmaster/postmaster.c:2244 #, c-format msgid "the database system is in recovery mode" msgstr "데이터베이스 시스템이 자동 복구 작업 중입니다." -#: postmaster/postmaster.c:2193 storage/ipc/procarray.c:297 -#: storage/ipc/sinvaladt.c:298 storage/lmgr/proc.c:340 +#: postmaster/postmaster.c:2249 storage/ipc/procarray.c:292 +#: storage/ipc/sinvaladt.c:298 storage/lmgr/proc.c:338 #, c-format msgid "sorry, too many clients already" msgstr "최대 동시 접속자 수를 초과했습니다." -#: postmaster/postmaster.c:2255 +#: postmaster/postmaster.c:2311 #, c-format msgid "wrong key in cancel request for process %d" msgstr "프로세스 %d에 대한 취소 요청에 잘못된 키가 있음" -#: postmaster/postmaster.c:2263 +#: postmaster/postmaster.c:2319 #, c-format msgid "PID %d in cancel request did not match any process" msgstr "취소 요청의 PID %d과(와) 일치하는 프로세스가 없음" -#: postmaster/postmaster.c:2483 +#: postmaster/postmaster.c:2530 #, c-format msgid "received SIGHUP, reloading configuration files" msgstr "SIGHUP 신호를 받아서, 환경설정파일을 다시 읽고 있습니다." -#: postmaster/postmaster.c:2508 +#: postmaster/postmaster.c:2555 +#, c-format +msgid "pg_hba.conf was not reloaded" +msgstr "pg_hba.conf 파일이 다시 로드되지 않았음" + +#: postmaster/postmaster.c:2559 #, c-format -msgid "pg_hba.conf not reloaded" -msgstr "pg_hba.conf가 다시 로드되지 않음" +msgid "pg_ident.conf was not reloaded" +msgstr "pg_ident.conf 파일이 다시 로드되지 않았음" -#: postmaster/postmaster.c:2512 +#: postmaster/postmaster.c:2569 #, c-format -msgid "pg_ident.conf not reloaded" -msgstr "pg_ident.conf 파일이 다시 로드되지 않음" +msgid "SSL configuration was not reloaded" +msgstr "SSL 설정이 다시 로드되지 않았음" -#: postmaster/postmaster.c:2553 +#: postmaster/postmaster.c:2617 #, c-format msgid "received smart shutdown request" msgstr "smart 중지 요청을 받았습니다." -#: postmaster/postmaster.c:2608 +#: postmaster/postmaster.c:2675 #, c-format msgid "received fast shutdown request" msgstr "fast 중지 요청을 받았습니다." -#: postmaster/postmaster.c:2638 +#: postmaster/postmaster.c:2708 #, c-format msgid "aborting any active transactions" msgstr "모든 활성화 되어있는 트랜잭션을 중지하고 있습니다." -#: postmaster/postmaster.c:2672 +#: postmaster/postmaster.c:2742 #, c-format msgid "received immediate shutdown request" msgstr "immediate 중지 요청을 받았습니다." -#: postmaster/postmaster.c:2736 +#: postmaster/postmaster.c:2809 #, c-format msgid "shutdown at recovery target" msgstr "복구 타겟에서 중지함" -#: postmaster/postmaster.c:2752 postmaster/postmaster.c:2775 +#: postmaster/postmaster.c:2825 postmaster/postmaster.c:2848 msgid "startup process" msgstr "시작 프로세스" -#: postmaster/postmaster.c:2755 +#: postmaster/postmaster.c:2828 #, c-format msgid "aborting startup due to startup process failure" msgstr "시작 프로세스 실패 때문에 서버 시작이 중지 되었습니다" -#: postmaster/postmaster.c:2816 +#: postmaster/postmaster.c:2889 #, c-format msgid "database system is ready to accept connections" msgstr "이제 데이터베이스 서버로 접속할 수 있습니다" -#: postmaster/postmaster.c:2835 +#: postmaster/postmaster.c:2910 msgid "background writer process" msgstr "백그라운드 writer 프로세스" -#: postmaster/postmaster.c:2889 +#: postmaster/postmaster.c:2964 msgid "checkpointer process" msgstr "체크포인트 프로세스" -#: postmaster/postmaster.c:2905 +#: postmaster/postmaster.c:2980 msgid "WAL writer process" msgstr "WAL 쓰기 프로세스" -#: postmaster/postmaster.c:2919 +#: postmaster/postmaster.c:2995 msgid "WAL receiver process" msgstr "WAL 수신 프로세스" -#: postmaster/postmaster.c:2934 +#: postmaster/postmaster.c:3010 msgid "autovacuum launcher process" msgstr "autovacuum 실행기 프로세스" -#: postmaster/postmaster.c:2949 +#: postmaster/postmaster.c:3025 msgid "archiver process" msgstr "archiver 프로세스" -#: postmaster/postmaster.c:2965 +#: postmaster/postmaster.c:3041 msgid "statistics collector process" msgstr "통계 수집기 프로세스" -#: postmaster/postmaster.c:2979 +#: postmaster/postmaster.c:3055 msgid "system logger process" msgstr "시스템 로그 프로세스" -#: postmaster/postmaster.c:3041 +#: postmaster/postmaster.c:3117 msgid "worker process" msgstr "작업자 프로세스" -#: postmaster/postmaster.c:3124 postmaster/postmaster.c:3144 -#: postmaster/postmaster.c:3151 postmaster/postmaster.c:3169 +#: postmaster/postmaster.c:3200 postmaster/postmaster.c:3220 +#: postmaster/postmaster.c:3227 postmaster/postmaster.c:3245 msgid "server process" msgstr "서버 프로세스" -#: postmaster/postmaster.c:3223 +#: postmaster/postmaster.c:3299 #, c-format msgid "terminating any other active server processes" msgstr "다른 활성화 되어있는 서버 프로세스를 마치고 있는 중입니다" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3479 +#: postmaster/postmaster.c:3555 #, c-format msgid "%s (PID %d) exited with exit code %d" msgstr "%s (PID %d) 프로그램은 %d 코드로 마쳤습니다" -#: postmaster/postmaster.c:3481 postmaster/postmaster.c:3492 -#: postmaster/postmaster.c:3503 postmaster/postmaster.c:3512 -#: postmaster/postmaster.c:3522 +#: postmaster/postmaster.c:3557 postmaster/postmaster.c:3568 +#: postmaster/postmaster.c:3579 postmaster/postmaster.c:3588 +#: postmaster/postmaster.c:3598 #, c-format msgid "Failed process was running: %s" msgstr "" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3489 +#: postmaster/postmaster.c:3565 #, c-format msgid "%s (PID %d) was terminated by exception 0x%X" msgstr "%s (PID %d) 프로세스가 0x%X 예외로 인해 종료됨" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3499 +#: postmaster/postmaster.c:3575 #, c-format msgid "%s (PID %d) was terminated by signal %d: %s" msgstr "%s (PID %d) 프로세스가 %d번 시그널을 받아 종료됨: %s" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3510 +#: postmaster/postmaster.c:3586 #, c-format msgid "%s (PID %d) was terminated by signal %d" msgstr "%s (PID %d) 프로세스가 %d번 시그널을 받아 종료됨" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3520 +#: postmaster/postmaster.c:3596 #, c-format msgid "%s (PID %d) exited with unrecognized status %d" msgstr "%s (PID %d) 프로세스가 인식할 수 없는 %d 상태로 종료됨" -#: postmaster/postmaster.c:3707 +#: postmaster/postmaster.c:3783 #, c-format msgid "abnormal database system shutdown" msgstr "비정상적인 데이터베이스 시스템 서비스를 중지" -#: postmaster/postmaster.c:3747 +#: postmaster/postmaster.c:3823 #, c-format msgid "all server processes terminated; reinitializing" msgstr "모든 서버 프로세스가 중지 되었습니다; 재 초기화 중" -#: postmaster/postmaster.c:3959 +#: postmaster/postmaster.c:3989 postmaster/postmaster.c:5400 +#: postmaster/postmaster.c:5764 +#, c-format +msgid "could not generate random cancel key" +msgstr "무작위 취소 키를 만들 수 없음" + +#: postmaster/postmaster.c:4043 #, c-format msgid "could not fork new process for connection: %m" msgstr "연결을 위한 새 프로세스 할당(fork) 실패: %m" -#: postmaster/postmaster.c:4001 +#: postmaster/postmaster.c:4085 msgid "could not fork new process for connection: " msgstr "연결을 위한 새 프로세스 할당(fork) 실패: " -#: postmaster/postmaster.c:4115 +#: postmaster/postmaster.c:4199 #, c-format msgid "connection received: host=%s port=%s" msgstr "접속 수락: host=%s port=%s" -#: postmaster/postmaster.c:4120 +#: postmaster/postmaster.c:4204 #, c-format msgid "connection received: host=%s" msgstr "접속 수락: host=%s" -#: postmaster/postmaster.c:4403 +#: postmaster/postmaster.c:4489 #, c-format msgid "could not execute server process \"%s\": %m" msgstr "\"%s\" 서버 프로세스를 실행할 수 없음: %m" -#: postmaster/postmaster.c:4947 +#: postmaster/postmaster.c:4642 +#, c-format +msgid "giving up after too many tries to reserve shared memory" +msgstr "" + +#: postmaster/postmaster.c:4643 +#, c-format +msgid "This might be caused by ASLR or antivirus software." +msgstr "" + +#: postmaster/postmaster.c:4840 +#, c-format +msgid "SSL configuration could not be loaded in child process" +msgstr "" + +#: postmaster/postmaster.c:4972 +#, c-format +msgid "Please report this to ." +msgstr "이 내용을 주소로 보고하십시오." + +#: postmaster/postmaster.c:5059 #, c-format msgid "database system is ready to accept read only connections" msgstr "데이터베이스 시스템이 읽기 전용으로 연결을 수락할 준비가 되었습니다." -#: postmaster/postmaster.c:5238 +#: postmaster/postmaster.c:5328 #, c-format msgid "could not fork startup process: %m" msgstr "시작 프로세스 할당(fork) 실패: %m" -#: postmaster/postmaster.c:5242 +#: postmaster/postmaster.c:5332 #, c-format msgid "could not fork background writer process: %m" msgstr "백그라운 writer 프로세스를 할당(fork)할 수 없습니다: %m" -#: postmaster/postmaster.c:5246 +#: postmaster/postmaster.c:5336 #, c-format msgid "could not fork checkpointer process: %m" msgstr "체크포인트 프로세스를 할당(fork)할 수 없습니다: %m" -#: postmaster/postmaster.c:5250 +#: postmaster/postmaster.c:5340 #, c-format msgid "could not fork WAL writer process: %m" msgstr "WAL 쓰기 프로세스를 할당(fork)할 수 없음: %m" -#: postmaster/postmaster.c:5254 +#: postmaster/postmaster.c:5344 #, c-format msgid "could not fork WAL receiver process: %m" msgstr "WAL 수신 프로세스를 할당(fork)할 수 없음: %m" -#: postmaster/postmaster.c:5258 +#: postmaster/postmaster.c:5348 #, c-format msgid "could not fork process: %m" msgstr "프로세스 할당(fork) 실패: %m" -#: postmaster/postmaster.c:5420 postmaster/postmaster.c:5443 +#: postmaster/postmaster.c:5535 postmaster/postmaster.c:5558 #, c-format msgid "database connection requirement not indicated during registration" msgstr "" -#: postmaster/postmaster.c:5427 postmaster/postmaster.c:5450 +#: postmaster/postmaster.c:5542 postmaster/postmaster.c:5565 #, c-format msgid "invalid processing mode in background worker" msgstr "백그라운드 작업자에서 잘못된 프로세싱 모드가 사용됨" -#: postmaster/postmaster.c:5502 +#: postmaster/postmaster.c:5637 #, c-format msgid "starting background worker process \"%s\"" msgstr "\"%s\" 백그라운드 작업자 프로세스를 시작합니다." -#: postmaster/postmaster.c:5513 +#: postmaster/postmaster.c:5649 #, c-format msgid "could not fork worker process: %m" msgstr "작업자 프로세스를 할당(fork)할 수 없음: %m" -#: postmaster/postmaster.c:5901 +#: postmaster/postmaster.c:6073 #, c-format msgid "could not duplicate socket %d for use in backend: error code %d" msgstr "백엔드에서 사용하기 위해 %d 소켓을 복사할 수 없음: 오류 코드 %d" -#: postmaster/postmaster.c:5933 +#: postmaster/postmaster.c:6105 #, c-format msgid "could not create inherited socket: error code %d\n" msgstr "상속된 소켓을 만들 수 없음: 오류 코드 %d\n" -#: postmaster/postmaster.c:5962 +#: postmaster/postmaster.c:6134 #, c-format msgid "could not open backend variables file \"%s\": %s\n" msgstr "\"%s\" 백엔드 변수 파일을 열 수 없음: %s\n" -#: postmaster/postmaster.c:5969 +#: postmaster/postmaster.c:6141 #, c-format msgid "could not read from backend variables file \"%s\": %s\n" msgstr "\"%s\" 백엔드 변수 파일을 읽을 수 없음: %s\n" -#: postmaster/postmaster.c:5978 +#: postmaster/postmaster.c:6150 #, c-format msgid "could not remove file \"%s\": %s\n" msgstr "\"%s\" 파일을 삭제할 수 없음: %s\n" -#: postmaster/postmaster.c:5995 +#: postmaster/postmaster.c:6167 #, c-format msgid "could not map view of backend variables: error code %lu\n" msgstr "백엔드 변수 파일의 view를 map할 수 없음: 오류 코드 %lu\n" -#: postmaster/postmaster.c:6004 +#: postmaster/postmaster.c:6176 #, c-format msgid "could not unmap view of backend variables: error code %lu\n" msgstr "백엔드 변수 파일의 view를 unmap할 수 없음: 오류 코드 %lu\n" -#: postmaster/postmaster.c:6011 +#: postmaster/postmaster.c:6183 #, c-format msgid "could not close handle to backend parameter variables: error code %lu\n" msgstr "백엔드 변수 파일을 닫을 수 없음: 오류 코드 %lu\n" -#: postmaster/postmaster.c:6172 +#: postmaster/postmaster.c:6344 #, c-format msgid "could not read exit code for process\n" msgstr "프로세스의 종료 코드를 읽을 수 없음\n" -#: postmaster/postmaster.c:6177 +#: postmaster/postmaster.c:6349 #, c-format msgid "could not post child completion status\n" msgstr "하위 완료 상태를 게시할 수 없음\n" -#: postmaster/syslogger.c:441 postmaster/syslogger.c:1041 +#: postmaster/syslogger.c:452 postmaster/syslogger.c:1053 #, c-format msgid "could not read from logger pipe: %m" msgstr "로그 파이프에서 읽기 실패: %m" -#: postmaster/syslogger.c:490 +#: postmaster/syslogger.c:502 #, c-format msgid "logger shutting down" msgstr "로그 작업 끝내는 중" -#: postmaster/syslogger.c:534 postmaster/syslogger.c:548 +#: postmaster/syslogger.c:546 postmaster/syslogger.c:560 #, c-format msgid "could not create pipe for syslog: %m" msgstr "syslog에서 사용할 파이프를 만들 수 없습니다: %m" -#: postmaster/syslogger.c:584 +#: postmaster/syslogger.c:596 #, c-format msgid "could not fork system logger: %m" msgstr "시스템 로거(logger)를 확보하질 못 했습니다: %m" -#: postmaster/syslogger.c:620 +#: postmaster/syslogger.c:632 #, c-format msgid "redirecting log output to logging collector process" -msgstr "" +msgstr "서버 로그를 로그 수집 프로세스로 보냅니다." -#: postmaster/syslogger.c:621 +#: postmaster/syslogger.c:633 #, c-format msgid "Future log output will appear in directory \"%s\"." -msgstr "" +msgstr "이제부터 서버 로그는 \"%s\" 디렉토리에 보관됩니다." -#: postmaster/syslogger.c:629 +#: postmaster/syslogger.c:641 #, c-format msgid "could not redirect stdout: %m" msgstr "표준출력을 redirect 하지 못했습니다: %m" -#: postmaster/syslogger.c:634 postmaster/syslogger.c:651 +#: postmaster/syslogger.c:646 postmaster/syslogger.c:663 #, c-format msgid "could not redirect stderr: %m" msgstr "표준오류(stderr)를 redirect 하지 못했습니다: %m" -#: postmaster/syslogger.c:996 +#: postmaster/syslogger.c:1008 #, c-format msgid "could not write to log file: %s\n" msgstr "로그파일 쓰기 실패: %s\n" -#: postmaster/syslogger.c:1136 +#: postmaster/syslogger.c:1150 #, c-format msgid "could not open log file \"%s\": %m" msgstr "\"%s\" 잠금파일을 열 수 없음: %m" -#: postmaster/syslogger.c:1198 postmaster/syslogger.c:1242 +#: postmaster/syslogger.c:1212 postmaster/syslogger.c:1256 #, c-format msgid "disabling automatic rotation (use SIGHUP to re-enable)" msgstr "" "로그파일 자동 교체 기능을 금지합니다(교체하려면 SIGHUP 시그널을 사용함)" -#: regex/regc_pg_locale.c:261 +#: regex/regc_pg_locale.c:262 #, c-format msgid "could not determine which collation to use for regular expression" msgstr "정규식을 사용해서 사용할 정렬규칙(collation)을 찾을 수 없음" -#: replication/basebackup.c:232 +#: replication/basebackup.c:303 #, c-format msgid "could not stat control file \"%s\": %m" msgstr "\"%s\" 컨트롤 파일의 정보를 구할 수 없음: %m" -#: replication/basebackup.c:341 +#: replication/basebackup.c:412 #, c-format msgid "could not find any WAL files" msgstr "어떤 WAL 파일도 찾을 수 없음" -#: replication/basebackup.c:354 replication/basebackup.c:368 -#: replication/basebackup.c:377 +#: replication/basebackup.c:425 replication/basebackup.c:439 +#: replication/basebackup.c:448 #, c-format msgid "could not find WAL file \"%s\"" msgstr "\"%s\" WAL 파일 찾기 실패" -#: replication/basebackup.c:416 replication/basebackup.c:442 +#: replication/basebackup.c:487 replication/basebackup.c:513 #, c-format msgid "unexpected WAL file size \"%s\"" msgstr "\"%s\" WAL 파일의 크기가 알맞지 않음" -#: replication/basebackup.c:428 replication/basebackup.c:1160 +#: replication/basebackup.c:499 replication/basebackup.c:1228 #, c-format msgid "base backup could not send data, aborting backup" msgstr "베이스 백업에서 자료를 보낼 수 없음. 백업을 중지합니다." -#: replication/basebackup.c:530 replication/basebackup.c:539 -#: replication/basebackup.c:548 replication/basebackup.c:557 -#: replication/basebackup.c:566 replication/basebackup.c:577 -#: replication/basebackup.c:594 +#: replication/basebackup.c:601 replication/basebackup.c:610 +#: replication/basebackup.c:619 replication/basebackup.c:628 +#: replication/basebackup.c:637 replication/basebackup.c:648 +#: replication/basebackup.c:665 #, c-format msgid "duplicate option \"%s\"" msgstr "\"%s\" 옵션을 두 번 지정했습니다" -#: replication/basebackup.c:583 utils/misc/guc.c:5670 +#: replication/basebackup.c:654 utils/misc/guc.c:5780 #, c-format msgid "%d is outside the valid range for parameter \"%s\" (%d .. %d)" msgstr "" "%d 값은 \"%s\" 매개 변수의 값으로 타당한 범위(%d .. %d)를 벗어났습니다." -#: replication/basebackup.c:857 replication/basebackup.c:959 +#: replication/basebackup.c:928 replication/basebackup.c:1025 #, c-format msgid "could not stat file or directory \"%s\": %m" msgstr "파일 또는 디렉터리 \"%s\"의 상태를 확인할 수 없음: %m" -#: replication/basebackup.c:1112 +#: replication/basebackup.c:1180 #, c-format msgid "skipping special file \"%s\"" msgstr "\"%s\" 특수 파일을 건너뜀" -#: replication/basebackup.c:1223 +#: replication/basebackup.c:1293 #, c-format msgid "file name too long for tar format: \"%s\"" msgstr "tar 파일로 묶기에는 파일 이름이 너무 긺: \"%s\"" -#: replication/basebackup.c:1228 +#: replication/basebackup.c:1298 #, c-format msgid "" "symbolic link target too long for tar format: file name \"%s\", target \"%s\"" @@ -15992,17 +17325,17 @@ msgstr "" "tar 포멧을 사용하기에는 심볼릭 링크의 대상 경로가 너무 깁니다: 파일 이름 \"%s" "\", 대상 \"%s\"" -#: replication/libpqwalreceiver/libpqwalreceiver.c:119 +#: replication/libpqwalreceiver/libpqwalreceiver.c:231 #, c-format -msgid "could not connect to the primary server: %s" -msgstr "주 서버에 연결 할 수 없음: %s" +msgid "invalid connection string syntax: %s" +msgstr "잘못된 연결 문자열 구문: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:142 +#: replication/libpqwalreceiver/libpqwalreceiver.c:255 #, c-format msgid "could not parse connection string: %s" msgstr "접속 문자열을 분석할 수 없음: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:192 +#: replication/libpqwalreceiver/libpqwalreceiver.c:305 #, c-format msgid "" "could not receive database system identifier and timeline ID from the " @@ -16010,13 +17343,13 @@ msgid "" msgstr "" "주 서버에서 데이터베이스 시스템 식별번호와 타임라인 번호를 받을 수 없음: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:203 -#: replication/libpqwalreceiver/libpqwalreceiver.c:357 +#: replication/libpqwalreceiver/libpqwalreceiver.c:316 +#: replication/libpqwalreceiver/libpqwalreceiver.c:523 #, c-format msgid "invalid response from primary server" msgstr "주 서버에서 잘못된 응답이 왔음" -#: replication/libpqwalreceiver/libpqwalreceiver.c:204 +#: replication/libpqwalreceiver/libpqwalreceiver.c:317 #, c-format msgid "" "Could not identify system: got %d rows and %d fields, expected %d rows and " @@ -16025,74 +17358,133 @@ msgstr "" "시스템을 식별할 수 없음: 로우수 %d, 필드수 %d, 예상값: 로우수 %d, 필드수 %d " "이상" -#: replication/libpqwalreceiver/libpqwalreceiver.c:220 -#, c-format -msgid "database system identifier differs between the primary and standby" -msgstr "데이터베이스 시스템 식별번호가 주 서버와 대기 서버가 서로 다름" - -#: replication/libpqwalreceiver/libpqwalreceiver.c:221 -#, c-format -msgid "The primary's identifier is %s, the standby's identifier is %s." -msgstr "주 서버: %s, 대기 서버: %s." - -#: replication/libpqwalreceiver/libpqwalreceiver.c:263 +#: replication/libpqwalreceiver/libpqwalreceiver.c:383 +#: replication/libpqwalreceiver/libpqwalreceiver.c:389 +#: replication/libpqwalreceiver/libpqwalreceiver.c:414 #, c-format msgid "could not start WAL streaming: %s" msgstr "WAL 스트리밍 작업을 시작할 수 없음: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:281 +#: replication/libpqwalreceiver/libpqwalreceiver.c:433 #, c-format msgid "could not send end-of-streaming message to primary: %s" msgstr "주 서버로 스트리밍 종료 메시지를 보낼 수 없음: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:303 +#: replication/libpqwalreceiver/libpqwalreceiver.c:455 #, c-format msgid "unexpected result set after end-of-streaming" msgstr "스트리밍 종료 요청에 대한 잘못된 응답을 받음" -#: replication/libpqwalreceiver/libpqwalreceiver.c:315 +#: replication/libpqwalreceiver/libpqwalreceiver.c:469 +#, c-format +msgid "error while shutting down streaming COPY: %s" +msgstr "COPY 스트리밍 종료 중 오류 발생: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:478 #, c-format msgid "error reading result of streaming command: %s" msgstr "스트리밍 명령에 대한 결과 처리에서 오류 발생: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:323 +#: replication/libpqwalreceiver/libpqwalreceiver.c:486 +#: replication/libpqwalreceiver/libpqwalreceiver.c:714 #, c-format msgid "unexpected result after CommandComplete: %s" msgstr "CommandComplete 작업 후 예상치 못한 결과를 받음: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:346 +#: replication/libpqwalreceiver/libpqwalreceiver.c:512 #, c-format msgid "could not receive timeline history file from the primary server: %s" msgstr "주 서버에서 타임라인 내역 파일을 받을 수 없음: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:358 +#: replication/libpqwalreceiver/libpqwalreceiver.c:524 #, c-format msgid "Expected 1 tuple with 2 fields, got %d tuples with %d fields." msgstr "2개의 칼럼으로 된 하나의 튜플을 예상하지만, %d 튜플 (%d 칼럼)을 수신함" -#: replication/libpqwalreceiver/libpqwalreceiver.c:386 -#, c-format -msgid "invalid socket: %s" -msgstr "잘못된 소켓: %s" - -#: replication/libpqwalreceiver/libpqwalreceiver.c:426 -#: storage/ipc/latch.c:1280 -#, c-format -msgid "select() failed: %m" -msgstr "select() 실패: %m" - -#: replication/libpqwalreceiver/libpqwalreceiver.c:549 -#: replication/libpqwalreceiver/libpqwalreceiver.c:576 -#: replication/libpqwalreceiver/libpqwalreceiver.c:582 +#: replication/libpqwalreceiver/libpqwalreceiver.c:678 +#: replication/libpqwalreceiver/libpqwalreceiver.c:729 +#: replication/libpqwalreceiver/libpqwalreceiver.c:735 #, c-format msgid "could not receive data from WAL stream: %s" msgstr "WAL 스트림에서 자료 받기 실패: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:601 +#: replication/libpqwalreceiver/libpqwalreceiver.c:754 #, c-format msgid "could not send data to WAL stream: %s" msgstr "WAL 스트림에 데이터를 보낼 수 없음: %s" +#: replication/libpqwalreceiver/libpqwalreceiver.c:803 +#, c-format +msgid "could not create replication slot \"%s\": %s" +msgstr "\"%s\" 복제 슬롯을 만들 수 없음: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:837 +#, c-format +msgid "invalid query response" +msgstr "잘못된 쿼리 응답" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:838 +#, c-format +msgid "Expected %d fields, got %d fields." +msgstr "%d개의 칼럼을 예상하지만, %d개의 칼럼을 수신함" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:907 +#, c-format +msgid "the query interface requires a database connection" +msgstr "이 쿼리 인터페이스는 데이터베이스 연결이 필요합니다" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:938 +msgid "empty query" +msgstr "빈 쿼리" + +#: replication/logical/launcher.c:298 +#, c-format +msgid "starting logical replication worker for subscription \"%s\"" +msgstr "\"%s\" 구독을 위해 논리 복제 작업자를 시작합니다" + +#: replication/logical/launcher.c:305 +#, c-format +msgid "cannot start logical replication workers when max_replication_slots = 0" +msgstr "" +"max_replication_slots = 0 설정 때문에 논리 복제 작업자를 시작 할 수 없습니다" + +#: replication/logical/launcher.c:385 +#, c-format +msgid "out of logical replication worker slots" +msgstr "더 이상의 논리 복제 작업자용 슬롯이 없습니다" + +#: replication/logical/launcher.c:386 +#, c-format +msgid "You might need to increase max_logical_replication_workers." +msgstr "max_logical_replication_workers 값을 늘리세요." + +#: replication/logical/launcher.c:440 +#, c-format +msgid "out of background worker slots" +msgstr "백그라운 작업자 슬롯이 모자랍니다" + +#: replication/logical/launcher.c:441 +#, c-format +msgid "You might need to increase max_worker_processes." +msgstr "max_worker_processes 값을 늘리세요." + +#: replication/logical/launcher.c:624 +#, c-format +msgid "logical replication worker slot %d is empty, cannot attach" +msgstr "" + +#: replication/logical/launcher.c:633 +#, c-format +msgid "" +"logical replication worker slot %d is already used by another worker, cannot " +"attach" +msgstr "" + +#: replication/logical/launcher.c:885 +#, c-format +msgid "logical replication launcher started" +msgstr "논리 복제 관리자가 시작됨" + #: replication/logical/logical.c:83 #, c-format msgid "logical decoding requires wal_level >= logical" @@ -16108,17 +17500,17 @@ msgstr "논리적 디코딩 기능은 데이터베이스 연결이 필요합니 msgid "logical decoding cannot be used while in recovery" msgstr "논리적 디코딩 기능은 복구 상태에서는 사용할 수 없음" -#: replication/logical/logical.c:236 replication/logical/logical.c:348 +#: replication/logical/logical.c:243 replication/logical/logical.c:365 #, c-format msgid "cannot use physical replication slot for logical decoding" msgstr "논리적 디코딩에서는 물리적 복제 슬롯을 사용할 수 없음" -#: replication/logical/logical.c:241 replication/logical/logical.c:353 +#: replication/logical/logical.c:248 replication/logical/logical.c:370 #, c-format msgid "replication slot \"%s\" was not created in this database" msgstr "\"%s\" 복제 슬롯이 이 데이터베이스 만들어져있지 않음" -#: replication/logical/logical.c:248 +#: replication/logical/logical.c:255 #, c-format msgid "" "cannot create logical replication slot in transaction that has performed " @@ -16126,252 +17518,479 @@ msgid "" msgstr "" "자료 변경 작업이 있는 트랜잭션 안에서는 논리적 복제 슬롯을 만들 수 없음" -#: replication/logical/logical.c:390 +#: replication/logical/logical.c:408 #, c-format msgid "starting logical decoding for slot \"%s\"" msgstr "\"%s\" 이름의 논리적 복제 슬롯을 만드는 중" -#: replication/logical/logical.c:392 +#: replication/logical/logical.c:410 #, c-format msgid "streaming transactions committing after %X/%X, reading WAL from %X/%X" msgstr "" -#: replication/logical/logical.c:527 +#: replication/logical/logical.c:557 #, c-format msgid "" "slot \"%s\", output plugin \"%s\", in the %s callback, associated LSN %X/%X" msgstr "" -#: replication/logical/logical.c:534 +#: replication/logical/logical.c:564 #, c-format msgid "slot \"%s\", output plugin \"%s\", in the %s callback" msgstr "" -#: replication/logical/logicalfuncs.c:113 replication/slotfuncs.c:32 +#: replication/logical/logicalfuncs.c:114 replication/slotfuncs.c:32 #, c-format msgid "must be superuser or replication role to use replication slots" msgstr "" "복제 슬롯은 superuser 또는 replication 롤 옵션을 포함한 사용자만 사용할 수 있" "습니다." -#: replication/logical/logicalfuncs.c:152 +#: replication/logical/logicalfuncs.c:153 #, c-format msgid "slot name must not be null" msgstr "슬롯 이름으로 null 값을 사용할 수 없습니다" -#: replication/logical/logicalfuncs.c:168 +#: replication/logical/logicalfuncs.c:169 #, c-format msgid "options array must not be null" msgstr "옵션 배열은 null 값을 사용할 수 없습니다." -#: replication/logical/logicalfuncs.c:199 +#: replication/logical/logicalfuncs.c:200 #, c-format msgid "array must be one-dimensional" msgstr "배열은 일차원 배열이어야합니다" -#: replication/logical/logicalfuncs.c:205 +#: replication/logical/logicalfuncs.c:206 #, c-format msgid "array must not contain nulls" msgstr "배열에는 null 값을 포함할 수 없습니다" -#: replication/logical/logicalfuncs.c:221 utils/adt/json.c:2277 -#: utils/adt/jsonb.c:1356 +#: replication/logical/logicalfuncs.c:222 utils/adt/json.c:2282 +#: utils/adt/jsonb.c:1357 #, c-format msgid "array must have even number of elements" msgstr "배열은 그 요소의 개수가 짝수여야 함" -#: replication/logical/logicalfuncs.c:264 +#: replication/logical/logicalfuncs.c:268 #, c-format msgid "" "logical decoding output plugin \"%s\" produces binary output, but function " "\"%s\" expects textual data" msgstr "" -#: replication/logical/origin.c:181 +#: replication/logical/origin.c:185 #, c-format msgid "only superusers can query or manipulate replication origins" msgstr "슈퍼유저만 복제 원본에 대한 쿼리나, 관리를 할 수 있습니다." -#: replication/logical/origin.c:186 +#: replication/logical/origin.c:190 #, c-format msgid "" "cannot query or manipulate replication origin when max_replication_slots = 0" msgstr "" -#: replication/logical/origin.c:191 +#: replication/logical/origin.c:195 #, c-format msgid "cannot manipulate replication origins during recovery" msgstr "" -#: replication/logical/origin.c:316 +#: replication/logical/origin.c:319 #, c-format msgid "could not find free replication origin OID" msgstr "비어있는 복제 오리진 OID를 찾을 수 없음" -#: replication/logical/origin.c:353 +#: replication/logical/origin.c:361 #, c-format msgid "could not drop replication origin with OID %d, in use by PID %d" msgstr "" -#: replication/logical/origin.c:671 +#: replication/logical/origin.c:687 #, c-format msgid "replication checkpoint has wrong magic %u instead of %u" msgstr "복제 체크포인트의 잘못된 매직 번호: %u, 기대값: %u" -#: replication/logical/origin.c:703 +#: replication/logical/origin.c:719 #, c-format msgid "could not read file \"%s\": read %d of %zu" msgstr "\"%s\" 파일을 읽을 수 없음: %d 읽음, 전체 %zu" -#: replication/logical/origin.c:712 +#: replication/logical/origin.c:728 #, c-format msgid "could not find free replication state, increase max_replication_slots" msgstr "" "사용 가능한 복제 슬롯이 부족합니다. max_replication_slots 값을 늘리세요" -#: replication/logical/origin.c:730 +#: replication/logical/origin.c:746 #, c-format msgid "replication slot checkpoint has wrong checksum %u, expected %u" msgstr "복제 슬롯 체크포인트의 체크섬 값이 잘못됨: %u, 기대값 %u" -#: replication/logical/origin.c:854 +#: replication/logical/origin.c:870 #, c-format msgid "replication origin with OID %d is already active for PID %d" msgstr "" -#: replication/logical/origin.c:865 replication/logical/origin.c:1045 +#: replication/logical/origin.c:881 replication/logical/origin.c:1068 #, c-format msgid "" "could not find free replication state slot for replication origin with OID %u" msgstr "%u OID 복제 오리진을 위한 여유 복제 슬롯을 찾을 수 없음" -#: replication/logical/origin.c:867 replication/logical/origin.c:1047 -#: replication/slot.c:1299 +#: replication/logical/origin.c:883 replication/logical/origin.c:1070 +#: replication/slot.c:1509 #, c-format msgid "Increase max_replication_slots and try again." msgstr "max_replication_slots 값을 늘린 후 다시 시도해 보세요" -#: replication/logical/origin.c:1004 +#: replication/logical/origin.c:1027 #, c-format msgid "cannot setup replication origin when one is already setup" msgstr "하나가 이미 설정되어 더 이상 복제 오리진 설정을 할 수 없음" -#: replication/logical/origin.c:1033 +#: replication/logical/origin.c:1056 #, c-format msgid "replication identifier %d is already active for PID %d" msgstr "%d번 복제 식별자가 %d PID에서 사용하고 있습니다." -#: replication/logical/origin.c:1079 replication/logical/origin.c:1274 -#: replication/logical/origin.c:1294 +#: replication/logical/origin.c:1107 replication/logical/origin.c:1305 +#: replication/logical/origin.c:1325 #, c-format msgid "no replication origin is configured" msgstr "복제 오리진 설정이 없습니다" -#: replication/logical/reorderbuffer.c:2330 +#: replication/logical/relation.c:259 +#, c-format +msgid "logical replication target relation \"%s.%s\" does not exist" +msgstr "\"%s.%s\" 이름의 논리 복제 대상 릴레이션이 없습니다." + +#: replication/logical/relation.c:300 +#, c-format +msgid "" +"logical replication target relation \"%s.%s\" is missing some replicated " +"columns" +msgstr "" + +#: replication/logical/relation.c:340 +#, c-format +msgid "" +"logical replication target relation \"%s.%s\" uses system columns in REPLICA " +"IDENTITY index" +msgstr "" + +#: replication/logical/relation.c:456 +#, c-format +msgid "built-in type %u not found" +msgstr "%u oid 용 내장 자료형이 없습니다" + +#: replication/logical/relation.c:457 +#, c-format +msgid "" +"This can be caused by having a publisher with a higher PostgreSQL major " +"version than the subscriber." +msgstr "" + +#: replication/logical/relation.c:488 +#, c-format +msgid "data type \"%s.%s\" required for logical replication does not exist" +msgstr "논리 복제용 \"%s.%s\" 자료형이 없습니다" + +#: replication/logical/reorderbuffer.c:2288 #, c-format msgid "could not write to data file for XID %u: %m" msgstr "%u XID 내용을 데이터 파일에 쓸 수 없음: %m" -#: replication/logical/reorderbuffer.c:2426 -#: replication/logical/reorderbuffer.c:2446 +#: replication/logical/reorderbuffer.c:2387 +#: replication/logical/reorderbuffer.c:2409 #, c-format msgid "could not read from reorderbuffer spill file: %m" msgstr "reorderbuffer 처리용 파일에서 읽기 실패: %m" -#: replication/logical/reorderbuffer.c:2430 -#: replication/logical/reorderbuffer.c:2450 +#: replication/logical/reorderbuffer.c:2391 +#: replication/logical/reorderbuffer.c:2413 #, c-format msgid "" "could not read from reorderbuffer spill file: read %d instead of %u bytes" msgstr "" "reorderbuffer 처리용 파일에서 읽기 실패: %d 바이트 읽음, 기대값 %u 바이트" -#: replication/logical/reorderbuffer.c:3106 +#: replication/logical/reorderbuffer.c:3071 #, c-format msgid "could not read from file \"%s\": read %d instead of %d bytes" msgstr "\"%s\" 파일에서 읽기 실패: %d 바이트 읽음, 기대값 %d 바이트" -#: replication/logical/snapbuild.c:598 +#: replication/logical/snapbuild.c:612 +#, c-format +msgid "initial slot snapshot too large" +msgstr "초기 슬롯 스냅샷이 너무 큽니다." + +#: replication/logical/snapbuild.c:664 #, c-format msgid "exported logical decoding snapshot: \"%s\" with %u transaction ID" msgid_plural "" "exported logical decoding snapshot: \"%s\" with %u transaction IDs" msgstr[0] "" -#: replication/logical/snapbuild.c:917 replication/logical/snapbuild.c:1282 -#: replication/logical/snapbuild.c:1813 +#: replication/logical/snapbuild.c:1262 replication/logical/snapbuild.c:1355 +#: replication/logical/snapbuild.c:1842 #, c-format msgid "logical decoding found consistent point at %X/%X" msgstr "논리적 디코딩 이어서 시작할 위치: %X/%X" -#: replication/logical/snapbuild.c:919 -#, c-format -msgid "Transaction ID %u finished; no more running transactions." -msgstr "%u 트랜잭션 ID 마침; 더 처리할 트랜잭션이 없음" - -#: replication/logical/snapbuild.c:1284 +#: replication/logical/snapbuild.c:1264 #, c-format msgid "There are no running transactions." msgstr "실행할 트랜잭션이 없음" -#: replication/logical/snapbuild.c:1346 +#: replication/logical/snapbuild.c:1306 #, c-format msgid "logical decoding found initial starting point at %X/%X" msgstr "논리적 디코딩 시작 위치: %X/%X" -#: replication/logical/snapbuild.c:1348 +#: replication/logical/snapbuild.c:1308 replication/logical/snapbuild.c:1332 +#, c-format +msgid "Waiting for transactions (approximately %d) older than %u to end." +msgstr "" + +#: replication/logical/snapbuild.c:1330 +#, c-format +msgid "logical decoding found initial consistent point at %X/%X" +msgstr "논리적 디코딩을 이어서 시작할 위치: %X/%X" + +#: replication/logical/snapbuild.c:1357 #, c-format -msgid "%u transaction needs to finish." -msgid_plural "%u transactions need to finish." -msgstr[0] "마치려면 %u개의 트랜잭션이 필요합니다." +msgid "There are no old transactions anymore." +msgstr "더이상 오래된 트랜잭션이 없습니다." -#: replication/logical/snapbuild.c:1687 replication/logical/snapbuild.c:1713 -#: replication/logical/snapbuild.c:1727 replication/logical/snapbuild.c:1741 +#: replication/logical/snapbuild.c:1715 replication/logical/snapbuild.c:1743 +#: replication/logical/snapbuild.c:1760 replication/logical/snapbuild.c:1776 #, c-format msgid "could not read file \"%s\", read %d of %d: %m" msgstr "\"%s\" 파일을 읽을 수 없음, %d/%d 바이트 읽음: %m" -#: replication/logical/snapbuild.c:1693 +#: replication/logical/snapbuild.c:1721 #, c-format msgid "snapbuild state file \"%s\" has wrong magic number: %u instead of %u" msgstr "\"%s\" snapbuild 상태 파일의 매직 번호가 이상함: 현재값 %u, 기대값 %u" -#: replication/logical/snapbuild.c:1698 +#: replication/logical/snapbuild.c:1726 #, c-format msgid "snapbuild state file \"%s\" has unsupported version: %u instead of %u" msgstr "\"%s\" snapbuild 상태 파일의 버전이 이상함: 현재값 %u, 기대값 %u" -#: replication/logical/snapbuild.c:1754 +#: replication/logical/snapbuild.c:1789 #, c-format msgid "checksum mismatch for snapbuild state file \"%s\": is %u, should be %u" msgstr "" -#: replication/logical/snapbuild.c:1815 +#: replication/logical/snapbuild.c:1844 #, c-format msgid "Logical decoding will begin using saved snapshot." msgstr "" -#: replication/logical/snapbuild.c:1888 +#: replication/logical/snapbuild.c:1916 #, c-format msgid "could not parse file name \"%s\"" msgstr "\"%s\" 파일 이름을 분석할 수 없음" -#: replication/slot.c:183 +#: replication/logical/tablesync.c:138 +#, c-format +msgid "" +"logical replication table synchronization worker for subscription \"%s\", " +"table \"%s\" has finished" +msgstr "" + +#: replication/logical/tablesync.c:685 +#, c-format +msgid "could not fetch table info for table \"%s.%s\" from publisher: %s" +msgstr "\"%s.%s\" 테이블용 테이블 정보를 구할 수 없습니다, 해당 발행: %s" + +#: replication/logical/tablesync.c:691 +#, c-format +msgid "table \"%s.%s\" not found on publisher" +msgstr "" + +#: replication/logical/tablesync.c:721 +#, c-format +msgid "could not fetch table info for table \"%s.%s\": %s" +msgstr "\"%s.%s\" 테이블용 테이블 정보를 구할 수 없습니다: %s" + +#: replication/logical/tablesync.c:791 +#, c-format +msgid "could not start initial contents copy for table \"%s.%s\": %s" +msgstr "\"%s.%s\" 테이블용 초기 자료 복사를 시작할 수 없습니다: %s" + +#: replication/logical/tablesync.c:905 +#, c-format +msgid "table copy could not start transaction on publisher" +msgstr "발행 서버에서는 테이블 복사 트랜잭션을 시작할 수 없음" + +#: replication/logical/tablesync.c:927 +#, c-format +msgid "table copy could not finish transaction on publisher" +msgstr "" + +#: replication/logical/worker.c:291 +#, c-format +msgid "" +"processing remote data for replication target relation \"%s.%s\" column \"%s" +"\", remote type %s, local type %s" +msgstr "" + +#: replication/logical/worker.c:501 +#, c-format +msgid "ORIGIN message sent out of order" +msgstr "" + +#: replication/logical/worker.c:632 +#, c-format +msgid "" +"publisher did not send replica identity column expected by the logical " +"replication target relation \"%s.%s\"" +msgstr "" + +#: replication/logical/worker.c:639 +#, c-format +msgid "" +"logical replication target relation \"%s.%s\" has neither REPLICA IDENTITY " +"index nor PRIMARY KEY and published relation does not have REPLICA IDENTITY " +"FULL" +msgstr "" + +#: replication/logical/worker.c:846 +#, c-format +msgid "" +"logical replication could not find row for delete in replication target " +"relation \"%s\"" +msgstr "논리 복제 작업으로 \"%s\" 복제 대상에서 삭제용 로우를 찾을 수 없습니다" + +#: replication/logical/worker.c:913 +#, c-format +msgid "invalid logical replication message type \"%c\"" +msgstr "잘못된 논리 복제 메시지 형태 \"%c\"" + +#: replication/logical/worker.c:1054 +#, c-format +msgid "data stream from publisher has ended" +msgstr "" + +#: replication/logical/worker.c:1213 +#, c-format +msgid "terminating logical replication worker due to timeout" +msgstr "시간 제한으로 논리 복제 작업자를 중지합니다." + +#: replication/logical/worker.c:1361 +#, c-format +msgid "" +"logical replication apply worker for subscription \"%s\" will stop because " +"the subscription was removed" +msgstr "" + +#: replication/logical/worker.c:1375 +#, c-format +msgid "" +"logical replication apply worker for subscription \"%s\" will stop because " +"the subscription was disabled" +msgstr "" + +#: replication/logical/worker.c:1389 +#, c-format +msgid "" +"logical replication apply worker for subscription \"%s\" will restart " +"because the connection information was changed" +msgstr "" + +#: replication/logical/worker.c:1403 +#, c-format +msgid "" +"logical replication apply worker for subscription \"%s\" will restart " +"because subscription was renamed" +msgstr "" + +#: replication/logical/worker.c:1420 +#, c-format +msgid "" +"logical replication apply worker for subscription \"%s\" will restart " +"because the replication slot name was changed" +msgstr "" + +#: replication/logical/worker.c:1434 +#, c-format +msgid "" +"logical replication apply worker for subscription \"%s\" will restart " +"because subscription's publications were changed" +msgstr "" + +#: replication/logical/worker.c:1542 +#, c-format +msgid "" +"logical replication apply worker for subscription \"%s\" will not start " +"because the subscription was disabled during startup" +msgstr "" + +#: replication/logical/worker.c:1556 +#, c-format +msgid "" +"logical replication table synchronization worker for subscription \"%s\", " +"table \"%s\" has started" +msgstr "" + +#: replication/logical/worker.c:1560 +#, c-format +msgid "logical replication apply worker for subscription \"%s\" has started" +msgstr "" + +#: replication/logical/worker.c:1600 +#, c-format +msgid "subscription has no replication slot set" +msgstr "" + +#: replication/pgoutput/pgoutput.c:113 +#, c-format +msgid "invalid proto_version" +msgstr "잘못된 proto_version" + +#: replication/pgoutput/pgoutput.c:118 +#, c-format +msgid "proto_version \"%s\" out of range" +msgstr "proto_verson \"%s\" 범위 벗어남" + +#: replication/pgoutput/pgoutput.c:135 +#, c-format +msgid "invalid publication_names syntax" +msgstr "잘못된 publication_names 구문" + +#: replication/pgoutput/pgoutput.c:179 +#, c-format +msgid "client sent proto_version=%d but we only support protocol %d or lower" +msgstr "" + +#: replication/pgoutput/pgoutput.c:185 +#, c-format +msgid "client sent proto_version=%d but we only support protocol %d or higher" +msgstr "" + +#: replication/pgoutput/pgoutput.c:191 +#, c-format +msgid "publication_names parameter missing" +msgstr "publication_names 매개 변수가 빠졌음" + +#: replication/slot.c:182 #, c-format msgid "replication slot name \"%s\" is too short" msgstr "\"%s\" 복제 슬롯 이름이 너무 짧음" -#: replication/slot.c:192 +#: replication/slot.c:191 #, c-format msgid "replication slot name \"%s\" is too long" msgstr "\"%s\" 복제 슬롯 이름이 너무 긺" -#: replication/slot.c:205 +#: replication/slot.c:204 #, c-format msgid "replication slot name \"%s\" contains invalid character" msgstr "\"%s\" 복제 슬롯 이름에 사용할 수 없는 문자가 있음" -#: replication/slot.c:207 +#: replication/slot.c:206 #, c-format msgid "" "Replication slot names may only contain lower case letters, numbers, and the " @@ -16380,77 +17999,77 @@ msgstr "" "복제 슬롯 이름으로 사용할 수 있는 문자는 영문 소문자, 숫자, 밑줄(_) 문자입니" "다." -#: replication/slot.c:254 +#: replication/slot.c:253 #, c-format msgid "replication slot \"%s\" already exists" msgstr "\"%s\" 이름의 복제 슬롯이 이미 있습니다." -#: replication/slot.c:264 +#: replication/slot.c:263 #, c-format msgid "all replication slots are in use" msgstr "모든 복제 슬롯이 사용 중입니다." -#: replication/slot.c:265 +#: replication/slot.c:264 #, c-format msgid "Free one or increase max_replication_slots." msgstr "하나를 비우든지, max_replication_slots 설정값을 늘리세요." -#: replication/slot.c:361 +#: replication/slot.c:379 #, c-format msgid "replication slot \"%s\" does not exist" msgstr "\"%s\" 이름의 복제 슬롯이 없습니다" -#: replication/slot.c:365 +#: replication/slot.c:390 replication/slot.c:940 #, c-format msgid "replication slot \"%s\" is active for PID %d" msgstr "\"%s\" 이름의 복제 슬롯을 %d PID 프로세스가 사용중입니다." -#: replication/slot.c:511 replication/slot.c:923 replication/slot.c:1260 +#: replication/slot.c:624 replication/slot.c:1121 replication/slot.c:1470 #, c-format msgid "could not remove directory \"%s\"" msgstr "\"%s\" 디렉터리를 삭제할 수 없음" -#: replication/slot.c:772 +#: replication/slot.c:970 #, c-format msgid "replication slots can only be used if max_replication_slots > 0" msgstr "복제 슬롯은 max_replication_slots > 0 상태에서 사용될 수 있습니다." -#: replication/slot.c:777 +#: replication/slot.c:975 #, c-format msgid "replication slots can only be used if wal_level >= replica" msgstr "복제 슬롯은 wal_level >= replica 상태에서 사용될 수 있습니다." -#: replication/slot.c:1192 replication/slot.c:1230 +#: replication/slot.c:1400 replication/slot.c:1440 #, c-format msgid "could not read file \"%s\", read %d of %u: %m" msgstr "\"%s\" 파일을 읽을 수 없음, %d/%u 바이트 읽음: %m" -#: replication/slot.c:1201 +#: replication/slot.c:1409 #, c-format msgid "replication slot file \"%s\" has wrong magic number: %u instead of %u" msgstr "\"%s\" 복제 슬롯 파일의 매직 번호가 이상합니다: 현재값 %u, 기대값 %u" -#: replication/slot.c:1208 +#: replication/slot.c:1416 #, c-format msgid "replication slot file \"%s\" has unsupported version %u" msgstr "\"%s\" 복제 슬롯 파일은 지원하지 않는 %u 버전 파일입니다" -#: replication/slot.c:1215 +#: replication/slot.c:1423 #, c-format msgid "replication slot file \"%s\" has corrupted length %u" msgstr "\"%s\" 복제 슬롯 파일이 %u 길이로 손상되었습니다." -#: replication/slot.c:1245 +#: replication/slot.c:1455 #, c-format msgid "checksum mismatch for replication slot file \"%s\": is %u, should be %u" msgstr "\"%s\" 복제 슬롯 파일의 체크섬 값이 이상합니다: 현재값 %u, 기대값 %u" -#: replication/slot.c:1298 +#: replication/slot.c:1508 #, c-format msgid "too many replication slots active before shutdown" msgstr "서버 중지 전에 너무 많은 복제 슬롯이 활성화 상태입니다" -#: replication/syncrep.c:221 +#: replication/syncrep.c:248 #, c-format msgid "" "canceling the wait for synchronous replication and terminating connection " @@ -16458,7 +18077,7 @@ msgid "" msgstr "" "관리자 명령에 의해 동기식 복제의 대기 작업과 접속 끊기 작업을 취소합니다." -#: replication/syncrep.c:222 replication/syncrep.c:239 +#: replication/syncrep.c:249 replication/syncrep.c:266 #, c-format msgid "" "The transaction has already committed locally, but might not have been " @@ -16467,512 +18086,630 @@ msgstr "" "주 서버에서는 이 트랜잭션이 커밋되었지만, 복제용 대기 서버에서는 아직 커밋 되" "지 않았을 가능성이 있습니다." -#: replication/syncrep.c:238 +#: replication/syncrep.c:265 #, c-format msgid "canceling wait for synchronous replication due to user request" msgstr "사용자 요청에 의해 동기식 복제 작업을 취소합니다." -#: replication/syncrep.c:368 +#: replication/syncrep.c:399 #, c-format msgid "standby \"%s\" now has synchronous standby priority %u" msgstr "\"%s\" 대기 서버의 동기식 복제 우선순위가 %u 입니다" -#: replication/syncrep.c:428 +#: replication/syncrep.c:460 #, c-format msgid "standby \"%s\" is now a synchronous standby with priority %u" msgstr "\"%s\" 대기 서버의 동기식 복제 우선순위가 %u 로 변경되었습니다." -#: replication/syncrep.c:921 +#: replication/syncrep.c:464 +#, c-format +msgid "standby \"%s\" is now a candidate for quorum synchronous standby" +msgstr "\"%s\" 대기 서버가 동기식 대기 서버 후보가 되었습니다" + +#: replication/syncrep.c:1162 #, c-format msgid "synchronous_standby_names parser failed" msgstr "synchronous_standby_names 값을 분석할 수 없음" -#: replication/syncrep.c:927 +#: replication/syncrep.c:1168 #, c-format msgid "number of synchronous standbys (%d) must be greater than zero" msgstr "동기식 대기 서버 수 (%d)는 0보다 커야 합니다." -#: replication/walreceiver.c:173 +#: replication/walreceiver.c:168 #, c-format msgid "terminating walreceiver process due to administrator command" msgstr "관리자 명령으로 인해 WAL 수신기를 종료합니다." -#: replication/walreceiver.c:344 +#: replication/walreceiver.c:300 +#, c-format +msgid "could not connect to the primary server: %s" +msgstr "주 서버에 연결 할 수 없음: %s" + +#: replication/walreceiver.c:339 +#, c-format +msgid "database system identifier differs between the primary and standby" +msgstr "데이터베이스 시스템 식별번호가 주 서버와 대기 서버가 서로 다름" + +#: replication/walreceiver.c:340 +#, c-format +msgid "The primary's identifier is %s, the standby's identifier is %s." +msgstr "주 서버: %s, 대기 서버: %s." + +#: replication/walreceiver.c:351 #, c-format msgid "highest timeline %u of the primary is behind recovery timeline %u" msgstr "" "주 서버의 제일 최신의 타임라인은 %u 인데, 복구 타임라인 %u 보다 옛것입니다" -#: replication/walreceiver.c:377 +#: replication/walreceiver.c:387 #, c-format msgid "started streaming WAL from primary at %X/%X on timeline %u" msgstr "주 서버의 WAL 스트리밍 시작 위치: %X/%X (타임라인 %u)" -#: replication/walreceiver.c:382 +#: replication/walreceiver.c:392 #, c-format msgid "restarted WAL streaming at %X/%X on timeline %u" msgstr "WAL 스트리밍 재시작 위치: %X/%X (타임라인 %u)" -#: replication/walreceiver.c:411 +#: replication/walreceiver.c:421 #, c-format msgid "cannot continue WAL streaming, recovery has already ended" msgstr "WAL 스트리밍 계속할 수 없음, 복구가 이미 종료됨" -#: replication/walreceiver.c:448 +#: replication/walreceiver.c:458 #, c-format msgid "replication terminated by primary server" msgstr "주 서버에 의해서 복제가 끝남" -#: replication/walreceiver.c:449 +#: replication/walreceiver.c:459 #, c-format msgid "End of WAL reached on timeline %u at %X/%X." msgstr "타임라인 %u, 위치 %X/%X 에서 WAL 끝에 도달함" -#: replication/walreceiver.c:543 +#: replication/walreceiver.c:554 #, c-format msgid "terminating walreceiver due to timeout" msgstr "시간 제한으로 wal 수신기를 중지합니다." -#: replication/walreceiver.c:583 +#: replication/walreceiver.c:594 #, c-format msgid "primary server contains no more WAL on requested timeline %u" msgstr "주 서버에는 요청 받은 %u 타임라인의 WAL가 더 이상 없습니다." -#: replication/walreceiver.c:598 replication/walreceiver.c:957 +#: replication/walreceiver.c:609 replication/walreceiver.c:968 #, c-format msgid "could not close log segment %s: %m" msgstr "%s 로그 조각 파일을 닫을 수 없음: %m" -#: replication/walreceiver.c:722 +#: replication/walreceiver.c:734 #, c-format msgid "fetching timeline history file for timeline %u from primary server" msgstr "주 서버에서 %u 타임라인용 타임라인 내역 파일을 가져옵니다." -#: replication/walreceiver.c:1011 +#: replication/walreceiver.c:1022 #, c-format msgid "could not write to log segment %s at offset %u, length %lu: %m" msgstr "%s 로그 조각 파일 쓰기 실패: 위치 %u, 길이 %lu: %m" -#: replication/walsender.c:485 +#: replication/walsender.c:490 #, c-format msgid "could not seek to beginning of file \"%s\": %m" msgstr "\"%s\" 파일에서 시작 위치를 찾을 수 없음: %m" -#: replication/walsender.c:536 +#: replication/walsender.c:531 +#, c-format +msgid "IDENTIFY_SYSTEM has not been run before START_REPLICATION" +msgstr "" + +#: replication/walsender.c:548 #, c-format msgid "cannot use a logical replication slot for physical replication" msgstr "물리적 복제에서 논리적 복제 슬롯을 사용할 수 없음" -#: replication/walsender.c:599 +#: replication/walsender.c:611 #, c-format msgid "" "requested starting point %X/%X on timeline %u is not in this server's history" msgstr "요청된 %X/%X 시작 위치(타임라인 %u)가 이 서버 내역에 없습니다." -#: replication/walsender.c:603 +#: replication/walsender.c:615 #, c-format msgid "This server's history forked from timeline %u at %X/%X." msgstr "이 서버의 시작 위치: 타임라인 %u, 위치 %X/%X" -#: replication/walsender.c:648 +#: replication/walsender.c:660 #, c-format msgid "" "requested starting point %X/%X is ahead of the WAL flush position of this " "server %X/%X" msgstr "" -#: replication/walsender.c:972 +#: replication/walsender.c:889 +#, c-format +msgid "" +"CREATE_REPLICATION_SLOT ... EXPORT_SNAPSHOT must not be called inside a " +"transaction" +msgstr "" +"CREATE_REPLICATION_SLOT ... EXPORT_SNAPSHOT 구문은 트랜잭션 안에서는 쓸 수 없" +"습니다." + +#: replication/walsender.c:898 +#, c-format +msgid "" +"CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called inside a transaction" +msgstr "" +"CREATE_REPLICATION_SLOT ... USE_SNAPSHOT 구문은 트랜잭션 안에서만 쓸 수 있습" +"니다." + +#: replication/walsender.c:903 +#, c-format +msgid "" +"CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called in REPEATABLE READ " +"isolation mode transaction" +msgstr "" +"CREATE_REPLICATION_SLOT ... USE_SNAPSHOT 구문은 격리 수준이 REPEATABLE READ " +"일때만 사용할 수 있습니다." + +#: replication/walsender.c:908 +#, c-format +msgid "" +"CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called before any query" +msgstr "" +"CREATE_REPLICATION_SLOT ... USE_SNAPSHOT 구문은 맨 처음 호출 해야 합니다." + +#: replication/walsender.c:913 +#, c-format +msgid "" +"CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must not be called in a " +"subtransaction" +msgstr "" +"CREATE_REPLICATION_SLOT ... USE_SNAPSHOT 구문은 하위 트랜잭션에서는 호출 할 " +"수 없습니다." + +#: replication/walsender.c:1059 #, c-format msgid "terminating walsender process after promotion" msgstr "운영전환 뒤 wal 송신기 프로세스를 중지합니다." -#: replication/walsender.c:1298 +#: replication/walsender.c:1437 +#, c-format +msgid "cannot execute new commands while WAL sender is in stopping mode" +msgstr "" + +#: replication/walsender.c:1470 +#, c-format +msgid "received replication command: %s" +msgstr "수신된 복제 명령: %s" + +#: replication/walsender.c:1486 tcop/fastpath.c:281 tcop/postgres.c:997 +#: tcop/postgres.c:1307 tcop/postgres.c:1566 tcop/postgres.c:1971 +#: tcop/postgres.c:2339 tcop/postgres.c:2414 +#, c-format +msgid "" +"current transaction is aborted, commands ignored until end of transaction " +"block" +msgstr "" +"현재 트랜잭션은 중지되어 있습니다. 이 트랜잭션을 종료하기 전까지는 모든 명령" +"이 무시될 것입니다" + +#: replication/walsender.c:1548 #, c-format -msgid "received replication command: %s" -msgstr "수신된 복제 명령: %s" +msgid "cannot execute SQL commands in WAL sender for physical replication" +msgstr "물리적 복제를 위한 WAL 송신기에서 SQL 명령을 실행할 수 없음" -#: replication/walsender.c:1397 replication/walsender.c:1413 +#: replication/walsender.c:1588 replication/walsender.c:1604 #, c-format msgid "unexpected EOF on standby connection" msgstr "대기 서버 연결에서 예상치 못한 EOF 발견함" -#: replication/walsender.c:1427 +#: replication/walsender.c:1618 #, c-format msgid "unexpected standby message type \"%c\", after receiving CopyDone" msgstr "" -#: replication/walsender.c:1465 +#: replication/walsender.c:1656 #, c-format msgid "invalid standby message type \"%c\"" msgstr "잘못된 대기 서버 메시지 형태 \"%c\"" -#: replication/walsender.c:1506 +#: replication/walsender.c:1697 #, c-format msgid "unexpected message type \"%c\"" msgstr "예상치 못한 메시지 형태: \"%c\"" -#: replication/walsender.c:1790 +#: replication/walsender.c:2067 #, c-format msgid "terminating walsender process due to replication timeout" msgstr "복제 시간 제한으로 wal 송신기 프로세스를 종료합니다." -#: replication/walsender.c:1875 +#: replication/walsender.c:2156 #, c-format msgid "standby \"%s\" has now caught up with primary" msgstr "\"%s\" 대기 서버가 운영 서버로 전환합니다" -#: replication/walsender.c:1978 +#: replication/walsender.c:2263 #, c-format msgid "" "number of requested standby connections exceeds max_wal_senders (currently " "%d)" msgstr "대기 서버 연결 수가 max_wal_senders 설정값(현재 %d)을 초과했습니다" -#: rewrite/rewriteDefine.c:112 rewrite/rewriteDefine.c:973 +#: rewrite/rewriteDefine.c:112 rewrite/rewriteDefine.c:981 #, c-format msgid "rule \"%s\" for relation \"%s\" already exists" msgstr "\"%s\" 이름의 룰(rule)이 \"%s\" 테이블에 이미 지정되어있습니다" -#: rewrite/rewriteDefine.c:297 +#: rewrite/rewriteDefine.c:296 #, c-format msgid "rule actions on OLD are not implemented" msgstr "OLD에 대한 실행 룰(rule)은 아직 구현되지 않았습니다" -#: rewrite/rewriteDefine.c:298 +#: rewrite/rewriteDefine.c:297 #, c-format msgid "Use views or triggers instead." msgstr "대신에 뷰나 트리거를 사용하십시오." -#: rewrite/rewriteDefine.c:302 +#: rewrite/rewriteDefine.c:301 #, c-format msgid "rule actions on NEW are not implemented" msgstr "NEW에 대한 실행 룰(rule)은 아직 구현되지 않았습니다" -#: rewrite/rewriteDefine.c:303 +#: rewrite/rewriteDefine.c:302 #, c-format msgid "Use triggers instead." msgstr "대신에 트리거를 사용하십시오." -#: rewrite/rewriteDefine.c:316 +#: rewrite/rewriteDefine.c:315 #, c-format msgid "INSTEAD NOTHING rules on SELECT are not implemented" msgstr "SELECT 에서 INSTEAD NOTHING 룰(rule)은 구현되지 않았습니다" -#: rewrite/rewriteDefine.c:317 +#: rewrite/rewriteDefine.c:316 #, c-format msgid "Use views instead." msgstr "대신에 뷰를 사용하십시오." -#: rewrite/rewriteDefine.c:325 +#: rewrite/rewriteDefine.c:324 #, c-format msgid "multiple actions for rules on SELECT are not implemented" msgstr "SELECT에 대한 다중 실행 룰(rule)은 구현되지 않았습니다" -#: rewrite/rewriteDefine.c:336 +#: rewrite/rewriteDefine.c:334 #, c-format msgid "rules on SELECT must have action INSTEAD SELECT" msgstr "" "SELECT에 대한 룰(rule)은 그 지정에 INSTEAD SELECT 실행규칙을 지정해야만합니다" -#: rewrite/rewriteDefine.c:344 +#: rewrite/rewriteDefine.c:342 #, c-format msgid "rules on SELECT must not contain data-modifying statements in WITH" msgstr "" -#: rewrite/rewriteDefine.c:352 +#: rewrite/rewriteDefine.c:350 #, c-format msgid "event qualifications are not implemented for rules on SELECT" msgstr "" "이벤트 자격(event qualifications)은 SELECT 룰(rule)에서 구현되지 않았습니다" -#: rewrite/rewriteDefine.c:379 +#: rewrite/rewriteDefine.c:377 #, c-format msgid "\"%s\" is already a view" msgstr "\"%s\" 이름의 뷰가 이미 있습니다" -#: rewrite/rewriteDefine.c:403 +#: rewrite/rewriteDefine.c:401 #, c-format msgid "view rule for \"%s\" must be named \"%s\"" msgstr "\"%s\" 위한 뷰 룰(view rule)의 이름은 \"%s\" 여야만합니다" -#: rewrite/rewriteDefine.c:432 +#: rewrite/rewriteDefine.c:428 +#, c-format +msgid "cannot convert partitioned table \"%s\" to a view" +msgstr "\"%s\" 파티션된 테이블은 뷰로 변환할 수 없습니다" + +#: rewrite/rewriteDefine.c:434 +#, c-format +msgid "cannot convert partition \"%s\" to a view" +msgstr "\"%s\" 파티션 테이블은 뷰로 변환할 수 없습니다" + +#: rewrite/rewriteDefine.c:442 #, c-format msgid "could not convert table \"%s\" to a view because it is not empty" msgstr "\"%s\" 테이블에 자료가 있기 때문에, 테이블을 뷰로 변환할 수 없습니다" -#: rewrite/rewriteDefine.c:440 +#: rewrite/rewriteDefine.c:450 #, c-format msgid "could not convert table \"%s\" to a view because it has triggers" msgstr "\"%s\" 테이블에 트리거가 포함되어 있어 뷰로 변환할 수 없습니다" -#: rewrite/rewriteDefine.c:442 +#: rewrite/rewriteDefine.c:452 #, c-format msgid "" "In particular, the table cannot be involved in any foreign key relationships." msgstr "특히 테이블은 참조키 관계에 관련될 수 없습니다." -#: rewrite/rewriteDefine.c:447 +#: rewrite/rewriteDefine.c:457 #, c-format msgid "could not convert table \"%s\" to a view because it has indexes" msgstr "\"%s\" 테이블에 인덱스가 포함되어 있어 뷰로 변환할 수 없습니다" -#: rewrite/rewriteDefine.c:453 +#: rewrite/rewriteDefine.c:463 #, c-format msgid "could not convert table \"%s\" to a view because it has child tables" msgstr "\"%s\" 테이블을 상속 받는 테이블이 있어 뷰로 변활할 수 없습니다" -#: rewrite/rewriteDefine.c:459 +#: rewrite/rewriteDefine.c:469 #, c-format msgid "" "could not convert table \"%s\" to a view because it has row security enabled" msgstr "" "로우단위 보안 기능을 사용하고 있어 \"%s\" 테이블을 뷰로 변환할 수 없습니다" -#: rewrite/rewriteDefine.c:465 +#: rewrite/rewriteDefine.c:475 #, c-format msgid "" "could not convert table \"%s\" to a view because it has row security policies" msgstr "로우단위 보안 설정이 되어 있어 \"%s\" 테이블을 뷰로 변환할 수 없습니다" -#: rewrite/rewriteDefine.c:492 +#: rewrite/rewriteDefine.c:502 #, c-format msgid "cannot have multiple RETURNING lists in a rule" msgstr "하나의 rule에서 여러개의 RETURNING 목록을 지정할 수 없습니다" -#: rewrite/rewriteDefine.c:497 +#: rewrite/rewriteDefine.c:507 #, c-format msgid "RETURNING lists are not supported in conditional rules" msgstr "RETURNING 목록은 conditional rule에서는 지원하지 않습니다" -#: rewrite/rewriteDefine.c:501 +#: rewrite/rewriteDefine.c:511 #, c-format msgid "RETURNING lists are not supported in non-INSTEAD rules" msgstr "RETURNING 목록은 non-INSTEAD rule에서는 지원하지 않습니다" -#: rewrite/rewriteDefine.c:667 +#: rewrite/rewriteDefine.c:676 #, c-format msgid "SELECT rule's target list has too many entries" msgstr "SELECT 룰(rule)의 대상 목록이 너무 많은 엔트리를 가지고 있습니다" -#: rewrite/rewriteDefine.c:668 +#: rewrite/rewriteDefine.c:677 #, c-format msgid "RETURNING list has too many entries" msgstr "RETURNING 목록이 너무 많은 항목를 가지고 있습니다" -#: rewrite/rewriteDefine.c:695 +#: rewrite/rewriteDefine.c:704 #, c-format msgid "cannot convert relation containing dropped columns to view" -msgstr "뷰에서 삭제된 열을 포함하고 있는 릴레이션을 변환할 수 없습니다" +msgstr "뷰에서 삭제된 칼럼을 포함하고 있는 릴레이션을 변환할 수 없습니다" -#: rewrite/rewriteDefine.c:696 +#: rewrite/rewriteDefine.c:705 #, c-format msgid "" "cannot create a RETURNING list for a relation containing dropped columns" msgstr "" -"릴레이션에 삭제된 열을 포함하고 있는 RETURNING 목록을 만들 수 없습니다." +"릴레이션에 삭제된 칼럼을 포함하고 있는 RETURNING 목록을 만들 수 없습니다." -#: rewrite/rewriteDefine.c:702 +#: rewrite/rewriteDefine.c:711 #, c-format msgid "" "SELECT rule's target entry %d has different column name from column \"%s\"" msgstr "SELECT 룰(rule)의 대상 엔트리 번호가(%d)가 \"%s\" 칼럼 이름과 틀립니다" -#: rewrite/rewriteDefine.c:704 +#: rewrite/rewriteDefine.c:713 #, c-format msgid "SELECT target entry is named \"%s\"." msgstr "SELECT 대상 엔트리 이름은 \"%s\" 입니다." -#: rewrite/rewriteDefine.c:713 +#: rewrite/rewriteDefine.c:722 #, c-format msgid "SELECT rule's target entry %d has different type from column \"%s\"" msgstr "SELECT 룰(rule)의 대상 엔트리 번호(%d)가 \"%s\" 칼럼 자료형과 틀립니다" -#: rewrite/rewriteDefine.c:715 +#: rewrite/rewriteDefine.c:724 #, c-format msgid "RETURNING list's entry %d has different type from column \"%s\"" msgstr "RETURNING 목록의 %d번째 항목의 자료형이 \"%s\" 칼럼 자료형과 틀립니다" -#: rewrite/rewriteDefine.c:718 rewrite/rewriteDefine.c:742 +#: rewrite/rewriteDefine.c:727 rewrite/rewriteDefine.c:751 #, c-format msgid "SELECT target entry has type %s, but column has type %s." msgstr "SELECT 대상 엔트리 자료형은 %s 형이지만, 칼럼 자료형은 %s 형입니다." -#: rewrite/rewriteDefine.c:721 rewrite/rewriteDefine.c:746 +#: rewrite/rewriteDefine.c:730 rewrite/rewriteDefine.c:755 #, c-format msgid "RETURNING list entry has type %s, but column has type %s." msgstr "RETURNING 목록은 %s 자료형이지만, 칼럼 자료형은 %s 형입니다." -#: rewrite/rewriteDefine.c:737 +#: rewrite/rewriteDefine.c:746 #, c-format msgid "SELECT rule's target entry %d has different size from column \"%s\"" msgstr "SELECT 룰(rule)의 대상 엔트리 번호(%d)가 \"%s\" 칼럼 크기와 틀립니다" -#: rewrite/rewriteDefine.c:739 +#: rewrite/rewriteDefine.c:748 #, c-format msgid "RETURNING list's entry %d has different size from column \"%s\"" msgstr "RETURNING 목록의 %d번째 항목의 크기가 \"%s\" 칼럼 크기와 틀립니다" -#: rewrite/rewriteDefine.c:756 +#: rewrite/rewriteDefine.c:765 #, c-format msgid "SELECT rule's target list has too few entries" msgstr "SELECT 룰(rule)의 대상 목록이 너무 적은 엔트리를 가지고 있습니다" -#: rewrite/rewriteDefine.c:757 +#: rewrite/rewriteDefine.c:766 #, c-format msgid "RETURNING list has too few entries" msgstr "RETURNING 목록에 너무 적은 항목이 있습니다" -#: rewrite/rewriteDefine.c:849 rewrite/rewriteDefine.c:964 -#: rewrite/rewriteSupport.c:112 +#: rewrite/rewriteDefine.c:858 rewrite/rewriteDefine.c:972 +#: rewrite/rewriteSupport.c:109 #, c-format msgid "rule \"%s\" for relation \"%s\" does not exist" msgstr " \"%s\" 룰(rule)이 \"%s\" 관계(relation)에 지정된 것이 없음" -#: rewrite/rewriteDefine.c:983 +#: rewrite/rewriteDefine.c:991 #, c-format msgid "renaming an ON SELECT rule is not allowed" msgstr "ON SELECT 룰의 이름 바꾸기는 허용하지 않습니다" -#: rewrite/rewriteHandler.c:528 +#: rewrite/rewriteHandler.c:543 #, c-format msgid "" "WITH query name \"%s\" appears in both a rule action and the query being " "rewritten" msgstr "" -#: rewrite/rewriteHandler.c:588 +#: rewrite/rewriteHandler.c:603 #, c-format msgid "cannot have RETURNING lists in multiple rules" msgstr "multiple rule에 RETURNING 목록을 지정할 수 없습니다" -#: rewrite/rewriteHandler.c:928 rewrite/rewriteHandler.c:946 +#: rewrite/rewriteHandler.c:818 +#, c-format +msgid "cannot insert into column \"%s\"" +msgstr "\"%s\" 칼럼에 자료를 입력할 수 없습니다" + +#: rewrite/rewriteHandler.c:819 rewrite/rewriteHandler.c:834 +#, c-format +msgid "Column \"%s\" is an identity column defined as GENERATED ALWAYS." +msgstr "" + +#: rewrite/rewriteHandler.c:821 +#, c-format +msgid "Use OVERRIDING SYSTEM VALUE to override." +msgstr "" + +#: rewrite/rewriteHandler.c:833 +#, c-format +msgid "column \"%s\" can only be updated to DEFAULT" +msgstr "\"%s\" 칼럼은 DEFAULT 로만 업데이트 가능합니다" + +#: rewrite/rewriteHandler.c:1005 rewrite/rewriteHandler.c:1023 #, c-format msgid "multiple assignments to same column \"%s\"" msgstr "같은 \"%s\" 열에 지정값(assignment)이 중복되었습니다" -#: rewrite/rewriteHandler.c:1721 rewrite/rewriteHandler.c:3331 +#: rewrite/rewriteHandler.c:1809 rewrite/rewriteHandler.c:3431 #, c-format msgid "infinite recursion detected in rules for relation \"%s\"" msgstr "" "\"%s\" 릴레이션(relation)에서 지정된 룰에서 잘못된 재귀호출이 발견되었습니다" -#: rewrite/rewriteHandler.c:1806 +#: rewrite/rewriteHandler.c:1895 #, c-format msgid "infinite recursion detected in policy for relation \"%s\"" msgstr "\"%s\" 릴레이션의 정책에서 무한 재귀 호출이 발견 됨" -#: rewrite/rewriteHandler.c:2123 +#: rewrite/rewriteHandler.c:2212 msgid "Junk view columns are not updatable." msgstr "정크 뷰 칼럼은 업데이트할 수 없습니다." -#: rewrite/rewriteHandler.c:2128 +#: rewrite/rewriteHandler.c:2217 msgid "" "View columns that are not columns of their base relation are not updatable." msgstr "" -#: rewrite/rewriteHandler.c:2131 +#: rewrite/rewriteHandler.c:2220 msgid "View columns that refer to system columns are not updatable." msgstr "" -#: rewrite/rewriteHandler.c:2134 +#: rewrite/rewriteHandler.c:2223 msgid "View columns that return whole-row references are not updatable." msgstr "" -#: rewrite/rewriteHandler.c:2192 +#: rewrite/rewriteHandler.c:2281 msgid "Views containing DISTINCT are not automatically updatable." msgstr "" -#: rewrite/rewriteHandler.c:2195 +#: rewrite/rewriteHandler.c:2284 msgid "Views containing GROUP BY are not automatically updatable." msgstr "" -#: rewrite/rewriteHandler.c:2198 +#: rewrite/rewriteHandler.c:2287 msgid "Views containing HAVING are not automatically updatable." msgstr "" -#: rewrite/rewriteHandler.c:2201 +#: rewrite/rewriteHandler.c:2290 msgid "" "Views containing UNION, INTERSECT, or EXCEPT are not automatically updatable." msgstr "" -#: rewrite/rewriteHandler.c:2204 +#: rewrite/rewriteHandler.c:2293 msgid "Views containing WITH are not automatically updatable." msgstr "" -#: rewrite/rewriteHandler.c:2207 +#: rewrite/rewriteHandler.c:2296 msgid "Views containing LIMIT or OFFSET are not automatically updatable." msgstr "" -#: rewrite/rewriteHandler.c:2219 +#: rewrite/rewriteHandler.c:2308 msgid "Views that return aggregate functions are not automatically updatable." msgstr "" -#: rewrite/rewriteHandler.c:2222 +#: rewrite/rewriteHandler.c:2311 msgid "Views that return window functions are not automatically updatable." msgstr "" -#: rewrite/rewriteHandler.c:2225 +#: rewrite/rewriteHandler.c:2314 msgid "" "Views that return set-returning functions are not automatically updatable." msgstr "" -#: rewrite/rewriteHandler.c:2232 rewrite/rewriteHandler.c:2236 -#: rewrite/rewriteHandler.c:2243 +#: rewrite/rewriteHandler.c:2321 rewrite/rewriteHandler.c:2325 +#: rewrite/rewriteHandler.c:2333 msgid "" "Views that do not select from a single table or view are not automatically " "updatable." msgstr "" -#: rewrite/rewriteHandler.c:2246 +#: rewrite/rewriteHandler.c:2336 msgid "Views containing TABLESAMPLE are not automatically updatable." msgstr "" -#: rewrite/rewriteHandler.c:2270 +#: rewrite/rewriteHandler.c:2360 msgid "Views that have no updatable columns are not automatically updatable." msgstr "" -#: rewrite/rewriteHandler.c:2724 +#: rewrite/rewriteHandler.c:2814 #, c-format msgid "cannot insert into column \"%s\" of view \"%s\"" msgstr "\"%s\" 칼럼 (해당 뷰: \"%s\")에 자료를 입력할 수 없습니다" -#: rewrite/rewriteHandler.c:2732 +#: rewrite/rewriteHandler.c:2822 #, c-format msgid "cannot update column \"%s\" of view \"%s\"" msgstr "\"%s\" 칼럼 (해당 뷰: \"%s\")에 자료를 갱신할 수 없습니다" -#: rewrite/rewriteHandler.c:3130 +#: rewrite/rewriteHandler.c:3225 #, c-format msgid "" "DO INSTEAD NOTHING rules are not supported for data-modifying statements in " "WITH" msgstr "" -#: rewrite/rewriteHandler.c:3144 +#: rewrite/rewriteHandler.c:3239 #, c-format msgid "" "conditional DO INSTEAD rules are not supported for data-modifying statements " "in WITH" msgstr "" -#: rewrite/rewriteHandler.c:3148 +#: rewrite/rewriteHandler.c:3243 #, c-format msgid "DO ALSO rules are not supported for data-modifying statements in WITH" msgstr "" -#: rewrite/rewriteHandler.c:3153 +#: rewrite/rewriteHandler.c:3248 #, c-format msgid "" "multi-statement DO INSTEAD rules are not supported for data-modifying " "statements in WITH" msgstr "" -#: rewrite/rewriteHandler.c:3368 +#: rewrite/rewriteHandler.c:3468 #, c-format msgid "cannot perform INSERT RETURNING on relation \"%s\"" msgstr "\"%s\" 릴레이션에서 INSERT RETURNING 관련을 구성할 수 없음" -#: rewrite/rewriteHandler.c:3370 +#: rewrite/rewriteHandler.c:3470 #, c-format msgid "" "You need an unconditional ON INSERT DO INSTEAD rule with a RETURNING clause." @@ -16980,12 +18717,12 @@ msgstr "" "RETURNING 절에서는 무조건 ON INSERT DO INSTEAD 속성으로 rule이 사용되어야합니" "다." -#: rewrite/rewriteHandler.c:3375 +#: rewrite/rewriteHandler.c:3475 #, c-format msgid "cannot perform UPDATE RETURNING on relation \"%s\"" msgstr "\"%s\" 릴레이션에서 UPDATE RETURNING 관련을 구성할 수 없습니다." -#: rewrite/rewriteHandler.c:3377 +#: rewrite/rewriteHandler.c:3477 #, c-format msgid "" "You need an unconditional ON UPDATE DO INSTEAD rule with a RETURNING clause." @@ -16993,26 +18730,26 @@ msgstr "" "RETURNING 절에서는 무조건 ON UPDATE DO INSTEAD 속성으로 rule이 사용되어야합니" "다." -#: rewrite/rewriteHandler.c:3382 +#: rewrite/rewriteHandler.c:3482 #, c-format msgid "cannot perform DELETE RETURNING on relation \"%s\"" msgstr "\"%s\" 릴레이션에서 DELETE RETURNING 관련을 구성할 수 없습니다." -#: rewrite/rewriteHandler.c:3384 +#: rewrite/rewriteHandler.c:3484 #, c-format msgid "" "You need an unconditional ON DELETE DO INSTEAD rule with a RETURNING clause." msgstr "" "TURNING 절에서는 무조건 ON DELETE DO INSTEAD 속성으로 rule이 사용되어야합니다" -#: rewrite/rewriteHandler.c:3402 +#: rewrite/rewriteHandler.c:3502 #, c-format msgid "" "INSERT with ON CONFLICT clause cannot be used with table that has INSERT or " "UPDATE rules" msgstr "" -#: rewrite/rewriteHandler.c:3459 +#: rewrite/rewriteHandler.c:3559 #, c-format msgid "" "WITH cannot be used in a query that is rewritten by rules into multiple " @@ -17031,35 +18768,20 @@ msgstr "" msgid "WHERE CURRENT OF on a view is not implemented" msgstr "뷰에 대한 WHERE CURRENT OF 구문이 구현되지 않음" -#: rewrite/rewriteManip.c:1434 +#: rewrite/rewriteManip.c:1463 #, c-format msgid "" "NEW variables in ON UPDATE rules cannot reference columns that are part of a " "multiple assignment in the subject UPDATE command" msgstr "" -#: rewrite/rewriteSupport.c:154 -#, c-format -msgid "rule \"%s\" does not exist" -msgstr "\"%s\" 룰(rule) 없음" - -#: rewrite/rewriteSupport.c:167 -#, c-format -msgid "there are multiple rules named \"%s\"" -msgstr "\"%s\" 이름의 룰(rule)이 여러개 있습니다" - -#: rewrite/rewriteSupport.c:168 -#, c-format -msgid "Specify a relation name as well as a rule name." -msgstr "룰(rule) 이름과 함께 릴레이션(relation) 이름도 지정하십시오" - #: snowball/dict_snowball.c:177 #, c-format msgid "no Snowball stemmer available for language \"%s\" and encoding \"%s\"" msgstr "\"%s\" 언어 및 \"%s\" 인코딩에 사용 가능한 Snowball stemmer가 없음" -#: snowball/dict_snowball.c:200 tsearch/dict_ispell.c:73 -#: tsearch/dict_simple.c:48 +#: snowball/dict_snowball.c:200 tsearch/dict_ispell.c:74 +#: tsearch/dict_simple.c:49 #, c-format msgid "multiple StopWords parameters" msgstr "StopWords 매개 변수가 여러 개 있음" @@ -17079,6 +18801,48 @@ msgstr "인식할 수 없는 Snowball 매개 변수: \"%s\"" msgid "missing Language parameter" msgstr "Language 매개 변수가 누락됨" +#: statistics/dependencies.c:534 +#, c-format +msgid "invalid zero-length item array in MVDependencies" +msgstr "" + +#: statistics/dependencies.c:665 statistics/dependencies.c:718 +#: statistics/mvdistinct.c:338 statistics/mvdistinct.c:391 +#: utils/adt/pseudotypes.c:94 utils/adt/pseudotypes.c:122 +#: utils/adt/pseudotypes.c:147 utils/adt/pseudotypes.c:171 +#: utils/adt/pseudotypes.c:282 utils/adt/pseudotypes.c:307 +#: utils/adt/pseudotypes.c:335 utils/adt/pseudotypes.c:363 +#: utils/adt/pseudotypes.c:393 +#, c-format +msgid "cannot accept a value of type %s" +msgstr "%s 형식의 값은 사용할 수 없음" + +#: statistics/extended_stats.c:102 +#, c-format +msgid "" +"statistics object \"%s.%s\" could not be computed for relation \"%s.%s\"" +msgstr "\"%s.%s\" 통계정보 객체를 계산 할 수 없음: 대상 릴레이션: \"%s.%s\"" + +#: statistics/mvdistinct.c:259 +#, c-format +msgid "invalid ndistinct magic %08x (expected %08x)" +msgstr "" + +#: statistics/mvdistinct.c:264 +#, c-format +msgid "invalid ndistinct type %d (expected %d)" +msgstr "잘못된 ndistinct 형 %d (예상값 %d)" + +#: statistics/mvdistinct.c:269 +#, c-format +msgid "invalid zero-length item array in MVNDistinct" +msgstr "MVNDistinct에서 잘못된 zero-length 항목 배열" + +#: statistics/mvdistinct.c:278 +#, c-format +msgid "invalid MVNDistinct size %zd (expected at least %zd)" +msgstr "" + #: storage/buffer/bufmgr.c:544 storage/buffer/bufmgr.c:657 #, c-format msgid "cannot access temporary tables of other sessions" @@ -17102,22 +18866,22 @@ msgid "invalid page in block %u of relation %s; zeroing out page" msgstr "" "%u 블록(해당 릴레이션: %s)에 잘못된 페이지 헤더가 있음, 페이지를 삭제하는 중" -#: storage/buffer/bufmgr.c:3952 +#: storage/buffer/bufmgr.c:4013 #, c-format msgid "could not write block %u of %s" msgstr "%u/%s 블록을 쓸 수 없음" -#: storage/buffer/bufmgr.c:3954 +#: storage/buffer/bufmgr.c:4015 #, c-format msgid "Multiple failures --- write error might be permanent." msgstr "여러 번 실패 --- 쓰기 오류가 영구적일 수 있습니다." -#: storage/buffer/bufmgr.c:3975 storage/buffer/bufmgr.c:3994 +#: storage/buffer/bufmgr.c:4036 storage/buffer/bufmgr.c:4055 #, c-format msgid "writing block %u of relation %s" msgstr "%u 블록(해당 릴레이션: %s)을 쓰는 중" -#: storage/buffer/bufmgr.c:4295 +#: storage/buffer/bufmgr.c:4356 #, c-format msgid "snapshot too old" msgstr "" @@ -17147,17 +18911,17 @@ msgstr "dirty 자료 크기를 확인할 수 없음: %m" msgid "could not munmap() while flushing data: %m" msgstr "자료 flush 작업 도중 munmap() 호출 실패: %m" -#: storage/file/fd.c:689 +#: storage/file/fd.c:726 #, c-format msgid "could not link file \"%s\" to \"%s\": %m" msgstr "\"%s\" 파일을 \"%s\" 파일로 링크할 수 없음: %m" -#: storage/file/fd.c:783 +#: storage/file/fd.c:820 #, c-format msgid "getrlimit failed: %m" msgstr "getrlimit 실패: %m" -#: storage/file/fd.c:873 +#: storage/file/fd.c:910 #, c-format msgid "insufficient file descriptors available to start server process" msgstr "" @@ -17165,77 +18929,77 @@ msgstr "" "그램에서 너무 많은 파일을 열어 두고 있습니다. 다른 프로그램들을 좀 닫고 다시 " "시도해 보십시오" -#: storage/file/fd.c:874 +#: storage/file/fd.c:911 #, c-format msgid "System allows %d, we need at least %d." msgstr "시스템 허용치 %d, 서버 최소 허용치 %d." -#: storage/file/fd.c:915 storage/file/fd.c:2078 storage/file/fd.c:2171 -#: storage/file/fd.c:2319 +#: storage/file/fd.c:952 storage/file/fd.c:2129 storage/file/fd.c:2222 +#: storage/file/fd.c:2370 #, c-format msgid "out of file descriptors: %m; release and retry" msgstr "" "열려 있는 파일이 너무 많습니다: %m; 다른 프로그램들을 좀 닫고 다시 시도해 보" "십시오" -#: storage/file/fd.c:1520 +#: storage/file/fd.c:1557 #, c-format msgid "temporary file: path \"%s\", size %lu" msgstr "임시 파일: 경로 \"%s\", 크기 %lu" -#: storage/file/fd.c:1717 +#: storage/file/fd.c:1760 #, c-format msgid "temporary file size exceeds temp_file_limit (%dkB)" msgstr "임시 파일 크기가 temp_file_limit (%dkB)를 초과했습니다" -#: storage/file/fd.c:2054 storage/file/fd.c:2104 +#: storage/file/fd.c:2105 storage/file/fd.c:2155 #, c-format msgid "exceeded maxAllocatedDescs (%d) while trying to open file \"%s\"" msgstr "" -#: storage/file/fd.c:2144 +#: storage/file/fd.c:2195 #, c-format msgid "exceeded maxAllocatedDescs (%d) while trying to execute command \"%s\"" msgstr "" -#: storage/file/fd.c:2295 +#: storage/file/fd.c:2346 #, c-format msgid "exceeded maxAllocatedDescs (%d) while trying to open directory \"%s\"" msgstr "" -#: storage/file/fd.c:2381 +#: storage/file/fd.c:2432 utils/adt/genfile.c:511 #, c-format msgid "could not read directory \"%s\": %m" msgstr "\"%s\" 디렉터리를 읽을 수 없음: %m" -#: storage/ipc/dsm.c:363 +#: storage/ipc/dsm.c:364 #, c-format msgid "dynamic shared memory control segment is corrupt" msgstr "동적 공유 메모리 제어 조각이 손상되었음" -#: storage/ipc/dsm.c:410 +#: storage/ipc/dsm.c:411 #, c-format msgid "dynamic shared memory is disabled" msgstr "동적 공유 메모리 기능이 비활성화 되어있음" -#: storage/ipc/dsm.c:411 +#: storage/ipc/dsm.c:412 #, c-format msgid "Set dynamic_shared_memory_type to a value other than \"none\"." msgstr "dynamic_shared_memory_type 설정값을 \"none\" 아닌 값으로 지정하세요." -#: storage/ipc/dsm.c:431 +#: storage/ipc/dsm.c:432 #, c-format msgid "dynamic shared memory control segment is not valid" msgstr "동적 공유 메모리 제어 조각이 타당하지 않음" -#: storage/ipc/dsm.c:516 +#: storage/ipc/dsm.c:528 #, c-format msgid "too many dynamic shared memory segments" msgstr "너무 많은 동적 공유 메모리 조각이 있음" #: storage/ipc/dsm_impl.c:261 storage/ipc/dsm_impl.c:361 #: storage/ipc/dsm_impl.c:533 storage/ipc/dsm_impl.c:648 -#: storage/ipc/dsm_impl.c:819 storage/ipc/dsm_impl.c:961 +#: storage/ipc/dsm_impl.c:819 storage/ipc/dsm_impl.c:963 #, c-format msgid "could not unmap shared memory segment \"%s\": %m" msgstr "\"%s\" 공유 메모리 조각을 unmap 할 수 없음: %m" @@ -17259,13 +19023,13 @@ msgid "could not stat shared memory segment \"%s\": %m" msgstr "\"%s\" 공유 메모리 조각 파일의 상태를 알 수 없음: %m" #: storage/ipc/dsm_impl.c:335 storage/ipc/dsm_impl.c:886 -#: storage/ipc/dsm_impl.c:934 +#: storage/ipc/dsm_impl.c:936 #, c-format msgid "could not resize shared memory segment \"%s\" to %zu bytes: %m" msgstr "\"%s\" 공유 메모리 조각 파일을 %zu 바이트로 크기 조절 할 수 없음: %m" #: storage/ipc/dsm_impl.c:385 storage/ipc/dsm_impl.c:580 -#: storage/ipc/dsm_impl.c:750 storage/ipc/dsm_impl.c:985 +#: storage/ipc/dsm_impl.c:750 storage/ipc/dsm_impl.c:987 #, c-format msgid "could not map shared memory segment \"%s\": %m" msgstr "\"%s\" 공유 메모리 조각을 map 할 수 없음: %m" @@ -17280,48 +19044,47 @@ msgstr "공유 메모리 조각을 가져올 수 없음: %m" msgid "could not create shared memory segment \"%s\": %m" msgstr "\"%s\" 공유 메모리 조각을 만들 수 없음: %m" -#: storage/ipc/dsm_impl.c:1026 +#: storage/ipc/dsm_impl.c:1029 storage/ipc/dsm_impl.c:1077 #, c-format msgid "could not duplicate handle for \"%s\": %m" msgstr "\"%s\" 용 헨들러를 이중화 할 수 없음: %m" -#: storage/ipc/latch.c:778 +#: storage/ipc/latch.c:829 #, c-format msgid "epoll_ctl() failed: %m" msgstr "epoll_ctl() 실패: %m" -#: storage/ipc/latch.c:1002 +#: storage/ipc/latch.c:1060 #, c-format msgid "epoll_wait() failed: %m" msgstr "epoll_wait() 실패: %m" -#: storage/ipc/latch.c:1122 +#: storage/ipc/latch.c:1182 #, c-format msgid "poll() failed: %m" msgstr "poll() 실패: %m" -#: storage/ipc/shm_toc.c:108 storage/ipc/shm_toc.c:189 storage/ipc/shmem.c:212 -#: storage/lmgr/lock.c:883 storage/lmgr/lock.c:917 storage/lmgr/lock.c:2682 -#: storage/lmgr/lock.c:4007 storage/lmgr/lock.c:4072 storage/lmgr/lock.c:4364 -#: storage/lmgr/predicate.c:2329 storage/lmgr/predicate.c:2344 -#: storage/lmgr/predicate.c:3736 storage/lmgr/predicate.c:4879 -#: storage/lmgr/proc.c:203 utils/hash/dynahash.c:1043 +#: storage/ipc/shm_toc.c:108 storage/ipc/shm_toc.c:190 storage/lmgr/lock.c:883 +#: storage/lmgr/lock.c:917 storage/lmgr/lock.c:2679 storage/lmgr/lock.c:4004 +#: storage/lmgr/lock.c:4069 storage/lmgr/lock.c:4361 +#: storage/lmgr/predicate.c:2401 storage/lmgr/predicate.c:2416 +#: storage/lmgr/predicate.c:3808 storage/lmgr/predicate.c:4951 +#: utils/hash/dynahash.c:1061 #, c-format msgid "out of shared memory" msgstr "공유 메모리 부족" -#: storage/ipc/shmem.c:370 storage/ipc/shmem.c:421 +#: storage/ipc/shmem.c:165 storage/ipc/shmem.c:246 #, c-format -msgid "" -"not enough shared memory for data structure \"%s\" (%zu bytes requested)" -msgstr "\"%s\" 자료 구조체용 공유 메모리가 부족함 (%zu 바이트가 필요함)" +msgid "out of shared memory (%zu bytes requested)" +msgstr "공유 메모리가 부족함 (%zu 바이트가 필요함)" -#: storage/ipc/shmem.c:389 +#: storage/ipc/shmem.c:421 #, c-format msgid "could not create ShmemIndex entry for data structure \"%s\"" msgstr "\"%s\" 자료 구조체용 ShmemIndex 항목을 만들 수 없음" -#: storage/ipc/shmem.c:404 +#: storage/ipc/shmem.c:436 #, c-format msgid "" "ShmemIndex entry size is wrong for data structure \"%s\": expected %zu, " @@ -17329,17 +19092,23 @@ msgid "" msgstr "" "\"%s\" 자료 구조체용 ShmemIndex 항목 크기가 잘못됨: 기대값 %zu, 현재값 %zu" -#: storage/ipc/shmem.c:452 storage/ipc/shmem.c:471 +#: storage/ipc/shmem.c:453 +#, c-format +msgid "" +"not enough shared memory for data structure \"%s\" (%zu bytes requested)" +msgstr "\"%s\" 자료 구조체용 공유 메모리가 부족함 (%zu 바이트가 필요함)" + +#: storage/ipc/shmem.c:484 storage/ipc/shmem.c:503 #, c-format msgid "requested shared memory size overflows size_t" msgstr "지정한 공유 메모리 사이즈가 size_t 크기를 초과했습니다" -#: storage/ipc/standby.c:530 tcop/postgres.c:2976 +#: storage/ipc/standby.c:531 tcop/postgres.c:2985 #, c-format msgid "canceling statement due to conflict with recovery" msgstr "복구 작업 중 충돌이 발생해 작업을 중지합니다." -#: storage/ipc/standby.c:531 tcop/postgres.c:2263 +#: storage/ipc/standby.c:532 tcop/postgres.c:2271 #, c-format msgid "User transaction caused buffer deadlock with recovery." msgstr "복구 작업 중 사용자 트랜잭션이 버퍼 데드락을 만들었습니다." @@ -17495,131 +19264,124 @@ msgid "" "recovery." msgstr "" -#: storage/lmgr/lock.c:884 storage/lmgr/lock.c:918 storage/lmgr/lock.c:2683 -#: storage/lmgr/lock.c:4008 storage/lmgr/lock.c:4073 storage/lmgr/lock.c:4365 +#: storage/lmgr/lock.c:884 storage/lmgr/lock.c:918 storage/lmgr/lock.c:2680 +#: storage/lmgr/lock.c:4005 storage/lmgr/lock.c:4070 storage/lmgr/lock.c:4362 #, c-format msgid "You might need to increase max_locks_per_transaction." msgstr "max_locks_per_transaction을 늘려야 할 수도 있습니다." -#: storage/lmgr/lock.c:3124 storage/lmgr/lock.c:3240 +#: storage/lmgr/lock.c:3121 storage/lmgr/lock.c:3237 #, c-format msgid "" "cannot PREPARE while holding both session-level and transaction-level locks " "on the same object" msgstr "" -#: storage/lmgr/predicate.c:675 +#: storage/lmgr/predicate.c:686 #, c-format msgid "not enough elements in RWConflictPool to record a read/write conflict" msgstr "" -#: storage/lmgr/predicate.c:676 storage/lmgr/predicate.c:704 +#: storage/lmgr/predicate.c:687 storage/lmgr/predicate.c:715 #, c-format msgid "" "You might need to run fewer transactions at a time or increase " "max_connections." msgstr "" -#: storage/lmgr/predicate.c:703 +#: storage/lmgr/predicate.c:714 #, c-format msgid "" "not enough elements in RWConflictPool to record a potential read/write " "conflict" msgstr "" -#: storage/lmgr/predicate.c:909 +#: storage/lmgr/predicate.c:921 #, c-format msgid "memory for serializable conflict tracking is nearly exhausted" msgstr "" -#: storage/lmgr/predicate.c:910 +#: storage/lmgr/predicate.c:922 #, c-format msgid "" "There might be an idle transaction or a forgotten prepared transaction " "causing this." msgstr "" -#: storage/lmgr/predicate.c:1190 storage/lmgr/predicate.c:1261 -#, c-format -msgid "" -"not enough shared memory for elements of data structure \"%s\" (%zu bytes " -"requested)" -msgstr "" - -#: storage/lmgr/predicate.c:1549 +#: storage/lmgr/predicate.c:1561 #, c-format msgid "deferrable snapshot was unsafe; trying a new one" msgstr "" -#: storage/lmgr/predicate.c:1588 +#: storage/lmgr/predicate.c:1650 #, c-format msgid "\"default_transaction_isolation\" is set to \"serializable\"." msgstr "" -#: storage/lmgr/predicate.c:1589 +#: storage/lmgr/predicate.c:1651 #, c-format msgid "" "You can use \"SET default_transaction_isolation = 'repeatable read'\" to " "change the default." msgstr "" -#: storage/lmgr/predicate.c:1628 +#: storage/lmgr/predicate.c:1691 #, c-format msgid "a snapshot-importing transaction must not be READ ONLY DEFERRABLE" msgstr "" -#: storage/lmgr/predicate.c:1706 utils/time/snapmgr.c:617 -#: utils/time/snapmgr.c:623 +#: storage/lmgr/predicate.c:1771 utils/time/snapmgr.c:621 +#: utils/time/snapmgr.c:627 #, c-format msgid "could not import the requested snapshot" msgstr "" -#: storage/lmgr/predicate.c:1707 utils/time/snapmgr.c:624 +#: storage/lmgr/predicate.c:1772 utils/time/snapmgr.c:628 #, c-format -msgid "The source transaction %u is not running anymore." -msgstr "%u 소스 트랜잭션은 더이상 실행 중이지 않습니다." +msgid "The source process with PID %d is not running anymore." +msgstr "%d PID 소스 프로세스는 더이상 실행 중이지 않습니다." -#: storage/lmgr/predicate.c:2330 storage/lmgr/predicate.c:2345 -#: storage/lmgr/predicate.c:3737 +#: storage/lmgr/predicate.c:2402 storage/lmgr/predicate.c:2417 +#: storage/lmgr/predicate.c:3809 #, c-format msgid "You might need to increase max_pred_locks_per_transaction." msgstr "max_pred_locks_per_transaction 값을 늘려야 할 수도 있습니다." -#: storage/lmgr/predicate.c:3891 storage/lmgr/predicate.c:3980 -#: storage/lmgr/predicate.c:3988 storage/lmgr/predicate.c:4027 -#: storage/lmgr/predicate.c:4266 storage/lmgr/predicate.c:4603 -#: storage/lmgr/predicate.c:4615 storage/lmgr/predicate.c:4657 -#: storage/lmgr/predicate.c:4695 +#: storage/lmgr/predicate.c:3963 storage/lmgr/predicate.c:4052 +#: storage/lmgr/predicate.c:4060 storage/lmgr/predicate.c:4099 +#: storage/lmgr/predicate.c:4338 storage/lmgr/predicate.c:4675 +#: storage/lmgr/predicate.c:4687 storage/lmgr/predicate.c:4729 +#: storage/lmgr/predicate.c:4767 #, c-format msgid "" "could not serialize access due to read/write dependencies among transactions" msgstr "트랜잭션간 읽기/쓰기 의존성 때문에 serialize 접근을 할 수 없음" -#: storage/lmgr/predicate.c:3893 storage/lmgr/predicate.c:3982 -#: storage/lmgr/predicate.c:3990 storage/lmgr/predicate.c:4029 -#: storage/lmgr/predicate.c:4268 storage/lmgr/predicate.c:4605 -#: storage/lmgr/predicate.c:4617 storage/lmgr/predicate.c:4659 -#: storage/lmgr/predicate.c:4697 +#: storage/lmgr/predicate.c:3965 storage/lmgr/predicate.c:4054 +#: storage/lmgr/predicate.c:4062 storage/lmgr/predicate.c:4101 +#: storage/lmgr/predicate.c:4340 storage/lmgr/predicate.c:4677 +#: storage/lmgr/predicate.c:4689 storage/lmgr/predicate.c:4731 +#: storage/lmgr/predicate.c:4769 #, c-format msgid "The transaction might succeed if retried." msgstr "재시도하면 그 트랜잭션이 성공할 것입니다." -#: storage/lmgr/proc.c:1265 +#: storage/lmgr/proc.c:1300 #, c-format msgid "Process %d waits for %s on %s." msgstr "%d 프로세스가 대기중, 잠금종류: %s, 내용: %s" -#: storage/lmgr/proc.c:1276 +#: storage/lmgr/proc.c:1311 #, c-format msgid "sending cancel to blocking autovacuum PID %d" msgstr "%d PID autovacuum 블럭킹하기 위해 취소 신호를 보냅니다" -#: storage/lmgr/proc.c:1294 utils/adt/misc.c:270 +#: storage/lmgr/proc.c:1329 utils/adt/misc.c:269 #, c-format msgid "could not send signal to process %d: %m" msgstr "%d 프로세스로 시스템신호(signal)를 보낼 수 없습니다: %m" -#: storage/lmgr/proc.c:1396 +#: storage/lmgr/proc.c:1431 #, c-format msgid "" "process %d avoided deadlock for %s on %s by rearranging queue order after " @@ -17628,241 +19390,230 @@ msgstr "" "%d PID 프로세스는 %s(%s)에 대해 교착 상태가 발생하지 않도록 %ld.%03dms 후에 " "대기열 순서를 다시 조정함" -#: storage/lmgr/proc.c:1411 +#: storage/lmgr/proc.c:1446 #, c-format msgid "" "process %d detected deadlock while waiting for %s on %s after %ld.%03d ms" msgstr "%d PID 프로세스에서 %s(%s) 대기중 %ld.%03dms 후에 교착 상태를 감지함" -#: storage/lmgr/proc.c:1420 +#: storage/lmgr/proc.c:1455 #, c-format msgid "process %d still waiting for %s on %s after %ld.%03d ms" msgstr "%d PID 프로세스에서 여전히 %s(%s) 작업을 기다리고 있음(%ld.%03dms 후)" -#: storage/lmgr/proc.c:1427 +#: storage/lmgr/proc.c:1462 #, c-format msgid "process %d acquired %s on %s after %ld.%03d ms" msgstr "%d PID 프로세스가 %s(%s) 작업을 위해 잠금 취득함(%ld.%03dms 후)" -#: storage/lmgr/proc.c:1443 +#: storage/lmgr/proc.c:1478 #, c-format msgid "process %d failed to acquire %s on %s after %ld.%03d ms" msgstr "프로세스 %d에서 %s(%s)을(를) 취득하지 못함(%ld.%03dms 후)" -#: storage/page/bufpage.c:144 +#: storage/page/bufpage.c:151 #, c-format msgid "page verification failed, calculated checksum %u but expected %u" msgstr "페이지 검사 실패, 계산된 체크섬은 %u, 기대값은 %u" -#: storage/page/bufpage.c:203 storage/page/bufpage.c:522 -#: storage/page/bufpage.c:737 storage/page/bufpage.c:868 -#: storage/page/bufpage.c:968 +#: storage/page/bufpage.c:213 storage/page/bufpage.c:505 +#: storage/page/bufpage.c:748 storage/page/bufpage.c:881 +#: storage/page/bufpage.c:977 storage/page/bufpage.c:1087 #, c-format msgid "corrupted page pointers: lower = %u, upper = %u, special = %u" msgstr "손상된 페이지 위치: 하위값 = %u, 상위값 = %u, 특수값 = %u" -#: storage/page/bufpage.c:566 +#: storage/page/bufpage.c:549 #, c-format msgid "corrupted item pointer: %u" msgstr "손상된 아이템 위치: %u" -#: storage/page/bufpage.c:577 storage/page/bufpage.c:919 -#: storage/page/bufpage.c:1074 +#: storage/page/bufpage.c:560 storage/page/bufpage.c:932 #, c-format msgid "corrupted item lengths: total %u, available space %u" msgstr "손상된 아이템 길이: 전체 %u, 사용가능한 공간 %u" -#: storage/page/bufpage.c:756 storage/page/bufpage.c:892 +#: storage/page/bufpage.c:767 storage/page/bufpage.c:993 +#: storage/page/bufpage.c:1103 #, c-format msgid "corrupted item pointer: offset = %u, size = %u" msgstr "손상된 아이템 위치: 오프셋 = %u, 크기 = %u" -#: storage/page/bufpage.c:997 +#: storage/page/bufpage.c:905 #, c-format msgid "corrupted item pointer: offset = %u, length = %u" msgstr "손상된 아이템 위치: 오프셋 = %u, 크기 = %u" -#: storage/smgr/md.c:449 storage/smgr/md.c:971 +#: storage/smgr/md.c:448 storage/smgr/md.c:974 #, c-format msgid "could not truncate file \"%s\": %m" msgstr "\"%s\" 파일을 비울 수 없음: %m" -#: storage/smgr/md.c:516 +#: storage/smgr/md.c:515 #, c-format msgid "cannot extend file \"%s\" beyond %u blocks" msgstr "\"%s\" 파일을 %u개 블록을 초과하여 확장할 수 없음" -#: storage/smgr/md.c:538 storage/smgr/md.c:751 storage/smgr/md.c:827 +#: storage/smgr/md.c:537 storage/smgr/md.c:754 storage/smgr/md.c:830 #, c-format msgid "could not seek to block %u in file \"%s\": %m" msgstr "%u 블록을 찾을 수 없음(해당 파일: \"%s\"): %m" -#: storage/smgr/md.c:546 +#: storage/smgr/md.c:545 #, c-format msgid "could not extend file \"%s\": %m" msgstr "\"%s\" 파일을 확장할 수 없음: %m" -#: storage/smgr/md.c:548 storage/smgr/md.c:555 storage/smgr/md.c:854 +#: storage/smgr/md.c:547 storage/smgr/md.c:554 storage/smgr/md.c:857 #, c-format msgid "Check free disk space." msgstr "디스크 여유 공간을 확인해 주십시오." -#: storage/smgr/md.c:552 +#: storage/smgr/md.c:551 #, c-format msgid "could not extend file \"%s\": wrote only %d of %d bytes at block %u" msgstr "\"%s\" 파일을 확장할 수 없음: %d/%d바이트만 %u 블록에 썼음" -#: storage/smgr/md.c:769 +#: storage/smgr/md.c:772 #, c-format msgid "could not read block %u in file \"%s\": %m" msgstr "%u 블럭을 \"%s\" 파일에서 읽을 수 없음: %m" -#: storage/smgr/md.c:785 +#: storage/smgr/md.c:788 #, c-format msgid "could not read block %u in file \"%s\": read only %d of %d bytes" msgstr "%u 블럭을 \"%s\" 파일에서 읽을 수 없음: %d / %d 바이트만 읽음" -#: storage/smgr/md.c:845 +#: storage/smgr/md.c:848 #, c-format msgid "could not write block %u in file \"%s\": %m" msgstr "%u 블럭을 \"%s\" 파일에 쓸 수 없음: %m" -#: storage/smgr/md.c:850 +#: storage/smgr/md.c:853 #, c-format msgid "could not write block %u in file \"%s\": wrote only %d of %d bytes" msgstr "%u 블럭을 \"%s\" 파일에 쓸 수 없음: %d / %d 바이트만 씀" -#: storage/smgr/md.c:947 +#: storage/smgr/md.c:945 #, c-format msgid "could not truncate file \"%s\" to %u blocks: it's only %u blocks now" msgstr "\"%s\" 파일을 %u 블럭으로 비울 수 없음: 현재 %u 블럭 뿐 임" -#: storage/smgr/md.c:997 +#: storage/smgr/md.c:1000 #, c-format msgid "could not truncate file \"%s\" to %u blocks: %m" msgstr "\"%s\" 파일을 %u 블럭으로 정리할 수 없음: %m" -#: storage/smgr/md.c:1279 +#: storage/smgr/md.c:1282 #, c-format msgid "could not fsync file \"%s\" but retrying: %m" msgstr "\"%s\" 파일 fsync 실패, 재시도함: %m" -#: storage/smgr/md.c:1442 +#: storage/smgr/md.c:1445 #, c-format msgid "could not forward fsync request because request queue is full" msgstr "요청 큐가 가득차 forward fsync 요청을 처리할 수 없음" -#: storage/smgr/md.c:1863 +#: storage/smgr/md.c:1914 #, c-format msgid "" "could not open file \"%s\" (target block %u): previous segment is only %u " "blocks" msgstr "\"%s\" 파일을 열기 실패(대상 블록: %u): 이전 조각은 %u 블럭 뿐임" -#: storage/smgr/md.c:1877 +#: storage/smgr/md.c:1928 #, c-format msgid "could not open file \"%s\" (target block %u): %m" msgstr "\"%s\" 파일을 열기 실패(대상 블록: %u): %m" -#: tcop/fastpath.c:111 tcop/fastpath.c:475 tcop/fastpath.c:605 +#: tcop/fastpath.c:111 tcop/fastpath.c:463 tcop/fastpath.c:593 #, c-format msgid "invalid argument size %d in function call message" msgstr "함수 호출 메시지 안에 있는 잘못된 %d 인자 크기" -#: tcop/fastpath.c:291 tcop/postgres.c:992 tcop/postgres.c:1301 -#: tcop/postgres.c:1559 tcop/postgres.c:1964 tcop/postgres.c:2331 -#: tcop/postgres.c:2406 -#, c-format -msgid "" -"current transaction is aborted, commands ignored until end of transaction " -"block" -msgstr "" -"현재 트랜잭션은 중지되어 있습니다. 이 트랜잭션을 종료하기 전까지는 모든 명령" -"이 무시될 것입니다" - -#: tcop/fastpath.c:319 +#: tcop/fastpath.c:309 #, c-format msgid "fastpath function call: \"%s\" (OID %u)" msgstr "fastpath 함수 호출: \"%s\" (OID %u)" -#: tcop/fastpath.c:401 tcop/postgres.c:1163 tcop/postgres.c:1426 -#: tcop/postgres.c:1805 tcop/postgres.c:2022 +#: tcop/fastpath.c:391 tcop/postgres.c:1169 tcop/postgres.c:1432 +#: tcop/postgres.c:1812 tcop/postgres.c:2030 #, c-format msgid "duration: %s ms" msgstr "실행시간: %s ms" -#: tcop/fastpath.c:405 +#: tcop/fastpath.c:395 #, c-format msgid "duration: %s ms fastpath function call: \"%s\" (OID %u)" msgstr "작업시간: %s ms fastpath 함수 호출: \"%s\" (OID %u)" -#: tcop/fastpath.c:443 tcop/fastpath.c:570 +#: tcop/fastpath.c:431 tcop/fastpath.c:558 #, c-format msgid "function call message contains %d arguments but function requires %d" msgstr "함수 호출 메시지는 %d 인자를 사용하지만, 함수는 %d 인자가 필요합니다" -#: tcop/fastpath.c:451 +#: tcop/fastpath.c:439 #, c-format msgid "function call message contains %d argument formats but %d arguments" msgstr "함수 호출 메시지는 %d 인자를 사용하지만, 함수는 %d 인자가 필요합니다" -#: tcop/fastpath.c:538 tcop/fastpath.c:621 +#: tcop/fastpath.c:526 tcop/fastpath.c:609 #, c-format msgid "incorrect binary data format in function argument %d" msgstr "함수 인자 %d 안에 잘못된 바이너리 자료 형식 발견됨" -#: tcop/postgres.c:352 tcop/postgres.c:388 tcop/postgres.c:415 +#: tcop/postgres.c:346 tcop/postgres.c:382 tcop/postgres.c:409 #, c-format msgid "unexpected EOF on client connection" msgstr "클라이언트 연결에서 예상치 않은 EOF 발견됨" -#: tcop/postgres.c:438 tcop/postgres.c:450 tcop/postgres.c:461 -#: tcop/postgres.c:473 tcop/postgres.c:4314 +#: tcop/postgres.c:432 tcop/postgres.c:444 tcop/postgres.c:455 +#: tcop/postgres.c:467 tcop/postgres.c:4316 #, c-format msgid "invalid frontend message type %d" msgstr "잘못된 frontend 메시지 형태 %d" -#: tcop/postgres.c:933 +#: tcop/postgres.c:938 #, c-format msgid "statement: %s" msgstr "명령 구문: %s" -#: tcop/postgres.c:1168 +#: tcop/postgres.c:1174 #, c-format msgid "duration: %s ms statement: %s" msgstr "실행시간: %s ms 명령 구문: %s" -#: tcop/postgres.c:1218 +#: tcop/postgres.c:1224 #, c-format msgid "parse %s: %s" msgstr "구문 %s: %s" -#: tcop/postgres.c:1274 +#: tcop/postgres.c:1280 #, c-format msgid "cannot insert multiple commands into a prepared statement" msgstr "준비된 명령 구문에는 다중 명령을 삽입할 수 없습니다" -#: tcop/postgres.c:1431 +#: tcop/postgres.c:1437 #, c-format msgid "duration: %s ms parse %s: %s" msgstr "실행시간: %s ms %s 구문분석: %s" -#: tcop/postgres.c:1476 +#: tcop/postgres.c:1482 #, c-format msgid "bind %s to %s" msgstr "바인드: %s -> %s" -#: tcop/postgres.c:1495 tcop/postgres.c:2312 +#: tcop/postgres.c:1501 tcop/postgres.c:2320 #, c-format msgid "unnamed prepared statement does not exist" msgstr "이름없는 준비된 명령 구문(unnamed prepared statement) 없음" -#: tcop/postgres.c:1537 +#: tcop/postgres.c:1543 #, c-format msgid "bind message has %d parameter formats but %d parameters" msgstr "바인드 메시지는 %d 매개 변수 형태지만, %d 매개 변수여야함" -#: tcop/postgres.c:1543 +#: tcop/postgres.c:1549 #, c-format msgid "" "bind message supplies %d parameters, but prepared statement \"%s\" requires " @@ -17871,85 +19622,85 @@ msgstr "" "바인드 메시지는 %d개의 매개 변수를 지원하지만, \"%s\" 준비된 명령 구문" "(prepared statement)에서는%d 개의 매개 변수가 필요합니다" -#: tcop/postgres.c:1712 +#: tcop/postgres.c:1719 #, c-format msgid "incorrect binary data format in bind parameter %d" msgstr "바인드 매개 변수 %d 안에 잘못된 바이너리 자료 형태가 있음" -#: tcop/postgres.c:1810 +#: tcop/postgres.c:1817 #, c-format msgid "duration: %s ms bind %s%s%s: %s" msgstr "실행시간: %s ms %s%s%s 접속: %s" -#: tcop/postgres.c:1858 tcop/postgres.c:2392 +#: tcop/postgres.c:1865 tcop/postgres.c:2400 #, c-format msgid "portal \"%s\" does not exist" msgstr "\"%s\" portal 없음" -#: tcop/postgres.c:1943 +#: tcop/postgres.c:1950 #, c-format msgid "%s %s%s%s: %s" msgstr "%s %s%s%s: %s" -#: tcop/postgres.c:1945 tcop/postgres.c:2030 +#: tcop/postgres.c:1952 tcop/postgres.c:2038 msgid "execute fetch from" msgstr "자료뽑기" -#: tcop/postgres.c:1946 tcop/postgres.c:2031 +#: tcop/postgres.c:1953 tcop/postgres.c:2039 msgid "execute" msgstr "쿼리실행" -#: tcop/postgres.c:2027 +#: tcop/postgres.c:2035 #, c-format msgid "duration: %s ms %s %s%s%s: %s" msgstr "수행시간: %s ms %s %s%s%s: %s" -#: tcop/postgres.c:2153 +#: tcop/postgres.c:2161 #, c-format msgid "prepare: %s" msgstr "prepare: %s" -#: tcop/postgres.c:2216 +#: tcop/postgres.c:2224 #, c-format msgid "parameters: %s" msgstr "매개 변수: %s" -#: tcop/postgres.c:2235 +#: tcop/postgres.c:2243 #, c-format msgid "abort reason: recovery conflict" msgstr "중지 이유: 복구 충돌" -#: tcop/postgres.c:2251 +#: tcop/postgres.c:2259 #, c-format msgid "User was holding shared buffer pin for too long." msgstr "" -#: tcop/postgres.c:2254 +#: tcop/postgres.c:2262 #, c-format msgid "User was holding a relation lock for too long." msgstr "" -#: tcop/postgres.c:2257 +#: tcop/postgres.c:2265 #, c-format msgid "User was or might have been using tablespace that must be dropped." msgstr "" -#: tcop/postgres.c:2260 +#: tcop/postgres.c:2268 #, c-format msgid "User query might have needed to see row versions that must be removed." msgstr "" -#: tcop/postgres.c:2266 +#: tcop/postgres.c:2274 #, c-format msgid "User was connected to a database that must be dropped." msgstr "삭제 되어져야할 데이터베이스 사용자 접속해 있습니다." -#: tcop/postgres.c:2595 +#: tcop/postgres.c:2583 #, c-format msgid "terminating connection because of crash of another server process" msgstr "다른 서버 프로세스가 손상을 입어 현재 연결을 중지합니다" -#: tcop/postgres.c:2596 +#: tcop/postgres.c:2584 #, c-format msgid "" "The postmaster has commanded this server process to roll back the current " @@ -17960,19 +19711,19 @@ msgstr "" "와의 연결을 끊으라는 명령을 보냈습니다. 왜냐하면, 다른 서버 프로세스가 비정상" "적으로 중지되어 공유 메모리가 손상되었을 가능성이 있기 때문입니다" -#: tcop/postgres.c:2600 tcop/postgres.c:2904 +#: tcop/postgres.c:2588 tcop/postgres.c:2913 #, c-format msgid "" "In a moment you should be able to reconnect to the database and repeat your " "command." msgstr "잠시 뒤에 다시 연결 해서 작업을 계속 하십시오" -#: tcop/postgres.c:2686 +#: tcop/postgres.c:2674 #, c-format msgid "floating-point exception" msgstr "부동소수점 예외발생" -#: tcop/postgres.c:2687 +#: tcop/postgres.c:2675 #, c-format msgid "" "An invalid floating-point operation was signaled. This probably means an out-" @@ -17981,62 +19732,72 @@ msgstr "" "잘못된 부동소수점 작업이 감지 되었습니다. 이것은 아마도 결과값 범위초과나 0으" "로 나누는 작업과 같은 잘못된 연산 때문에 발생한 것 같습니다" -#: tcop/postgres.c:2849 +#: tcop/postgres.c:2843 #, c-format msgid "canceling authentication due to timeout" msgstr "시간 초과로 인증 작업을 취소합니다." -#: tcop/postgres.c:2853 +#: tcop/postgres.c:2847 #, c-format msgid "terminating autovacuum process due to administrator command" msgstr "관리자 명령으로 인해 자동 청소 프로세스를 종료하는 중" -#: tcop/postgres.c:2859 tcop/postgres.c:2869 tcop/postgres.c:2902 +#: tcop/postgres.c:2851 +#, c-format +msgid "terminating logical replication worker due to administrator command" +msgstr "관리자 요청에 의해서 논리 복제 작업자를 끝냅니다" + +#: tcop/postgres.c:2855 +#, c-format +msgid "logical replication launcher shutting down" +msgstr "논리 복제 관리자를 중지하고 있습니다" + +#: tcop/postgres.c:2868 tcop/postgres.c:2878 tcop/postgres.c:2911 #, c-format msgid "terminating connection due to conflict with recovery" msgstr "복구 작업 중 충돌로 연결을 종료합니다." -#: tcop/postgres.c:2875 +#: tcop/postgres.c:2884 #, c-format msgid "terminating connection due to administrator command" msgstr "관리자 요청에 의해서 연결을 끝냅니다" -#: tcop/postgres.c:2885 +#: tcop/postgres.c:2894 #, c-format msgid "connection to client lost" msgstr "서버로부터 연결이 끊어졌습니다." -#: tcop/postgres.c:2953 +#: tcop/postgres.c:2962 #, c-format msgid "canceling statement due to lock timeout" msgstr "잠금 대기 시간 초과로 작업을 취소합니다." -#: tcop/postgres.c:2960 +#: tcop/postgres.c:2969 #, c-format msgid "canceling statement due to statement timeout" msgstr "명령실행시간 초과로 작업을 취소합니다." -#: tcop/postgres.c:2967 +#: tcop/postgres.c:2976 #, c-format msgid "canceling autovacuum task" msgstr "자동 청소 작업을 취소하는 중" -#: tcop/postgres.c:2990 +#: tcop/postgres.c:2999 #, c-format msgid "canceling statement due to user request" msgstr "사용자 요청에 의해 작업을 취소합니다." -#: tcop/postgres.c:3000 +#: tcop/postgres.c:3009 #, c-format msgid "terminating connection due to idle-in-transaction timeout" msgstr "idle-in-transaction 시간 초과로 연결을 끝냅니다" -#: tcop/postgres.c:3114 +#: tcop/postgres.c:3123 #, c-format msgid "stack depth limit exceeded" msgstr "스택 깊이를 초과했습니다" -#: tcop/postgres.c:3115 +#: tcop/postgres.c:3124 #, c-format msgid "" "Increase the configuration parameter \"max_stack_depth\" (currently %dkB), " @@ -18045,59 +19806,59 @@ msgstr "" "먼저 OS에서 지원하는 스택 depth 최대값을 확인한 뒤, 허용범위 안에서 " "\"max_stack_depth\" (현재값: %dkB) 매개 변수 값의 설정치를 증가시키세요." -#: tcop/postgres.c:3178 +#: tcop/postgres.c:3187 #, c-format msgid "\"max_stack_depth\" must not exceed %ldkB." msgstr "\"max_stack_depth\" 값은 %ldkB를 초과할 수 없습니다" -#: tcop/postgres.c:3180 +#: tcop/postgres.c:3189 #, c-format msgid "" "Increase the platform's stack depth limit via \"ulimit -s\" or local " "equivalent." msgstr "OS의 \"ulimit -s\" 명령과 같은 것으로 스택 깊이를 늘려주십시오." -#: tcop/postgres.c:3540 +#: tcop/postgres.c:3549 #, c-format msgid "invalid command-line argument for server process: %s" msgstr "서버 프로세스의 명령행 인자가 잘못되었습니다: %s" -#: tcop/postgres.c:3541 tcop/postgres.c:3547 +#: tcop/postgres.c:3550 tcop/postgres.c:3556 #, c-format msgid "Try \"%s --help\" for more information." msgstr "자세한 사항은 \"%s --help\" 명령으로 살펴보세요." -#: tcop/postgres.c:3545 +#: tcop/postgres.c:3554 #, c-format msgid "%s: invalid command-line argument: %s" msgstr "%s: 잘못된 명령행 인자: %s" -#: tcop/postgres.c:3607 +#: tcop/postgres.c:3616 #, c-format msgid "%s: no database nor user name specified" msgstr "%s: 데이터베이스와 사용자를 지정하지 않았습니다" -#: tcop/postgres.c:4222 +#: tcop/postgres.c:4224 #, c-format msgid "invalid CLOSE message subtype %d" msgstr "잘못된 CLOSE 메시지 서브타입 %d" -#: tcop/postgres.c:4257 +#: tcop/postgres.c:4259 #, c-format msgid "invalid DESCRIBE message subtype %d" msgstr "잘못된 DESCRIBE 메시지 서브타입 %d" -#: tcop/postgres.c:4335 +#: tcop/postgres.c:4337 #, c-format msgid "fastpath function calls not supported in a replication connection" msgstr "복제 연결에서는 fastpath 함수 호출을 지원하지 않습니다" -#: tcop/postgres.c:4339 +#: tcop/postgres.c:4341 #, c-format msgid "extended query protocol not supported in a replication connection" msgstr "" -#: tcop/postgres.c:4509 +#: tcop/postgres.c:4511 #, c-format msgid "" "disconnection: session time: %d:%02d:%02d.%03d user=%s database=%s host=%s%s" @@ -18106,170 +19867,170 @@ msgstr "" "연결종료: 세션 시간: %d:%02d:%02d.%03d 사용자=%s 데이터베이스=%s 호스트=%s%s" "%s" -#: tcop/pquery.c:665 +#: tcop/pquery.c:645 #, c-format msgid "bind message has %d result formats but query has %d columns" msgstr "" -"바인드 메시지는 %d 결과 포멧을 가지고 있고, 쿼리는 %d 열을 가지고 있습니다" +"바인드 메시지는 %d 결과 포멧을 가지고 있고, 쿼리는 %d 칼럼을 가지고 있습니다" -#: tcop/pquery.c:967 +#: tcop/pquery.c:952 #, c-format msgid "cursor can only scan forward" msgstr "이 커서는 앞으로 이동 전용입니다" -#: tcop/pquery.c:968 +#: tcop/pquery.c:953 #, c-format msgid "Declare it with SCROLL option to enable backward scan." msgstr "" "뒤로 이동 가능한 커서를 만드려면 SCROLL 옵션을 추가해서 커서를 만드세요." #. translator: %s is name of a SQL command, eg CREATE -#: tcop/utility.c:235 +#: tcop/utility.c:242 #, c-format msgid "cannot execute %s in a read-only transaction" msgstr "읽기 전용 트랜잭션에서는 %s 명령을 실행할 수 없습니다." #. translator: %s is name of a SQL command, eg CREATE -#: tcop/utility.c:253 +#: tcop/utility.c:260 #, c-format msgid "cannot execute %s during a parallel operation" msgstr "병렬 처리 작업에서는 %s 명령을 실행할 수 없습니다." #. translator: %s is name of a SQL command, eg CREATE -#: tcop/utility.c:272 +#: tcop/utility.c:279 #, c-format msgid "cannot execute %s during recovery" msgstr "복구 작업 중에는 %s 명령을 실행할 수 없습니다." #. translator: %s is name of a SQL command, eg PREPARE -#: tcop/utility.c:290 +#: tcop/utility.c:297 #, c-format msgid "cannot execute %s within security-restricted operation" msgstr "보안 제한 작업 내에서 %s을(를) 실행할 수 없음" -#: tcop/utility.c:744 +#: tcop/utility.c:765 #, c-format msgid "must be superuser to do CHECKPOINT" msgstr "CHECKPOINT 명령은 슈퍼유저만 사용할 수 있습니다" -#: tsearch/dict_ispell.c:51 tsearch/dict_thesaurus.c:623 +#: tsearch/dict_ispell.c:52 tsearch/dict_thesaurus.c:624 #, c-format msgid "multiple DictFile parameters" msgstr "DictFile 매개 변수가 여러 개 있음" -#: tsearch/dict_ispell.c:62 +#: tsearch/dict_ispell.c:63 #, c-format msgid "multiple AffFile parameters" msgstr "AffFile 매개 변수가 여러 개 있음" -#: tsearch/dict_ispell.c:81 +#: tsearch/dict_ispell.c:82 #, c-format msgid "unrecognized Ispell parameter: \"%s\"" msgstr "인식할 수 없는 Ispell 매개 변수: \"%s\"" -#: tsearch/dict_ispell.c:95 +#: tsearch/dict_ispell.c:96 #, c-format msgid "missing AffFile parameter" msgstr "AffFile 매개 변수가 누락됨" -#: tsearch/dict_ispell.c:101 tsearch/dict_thesaurus.c:647 +#: tsearch/dict_ispell.c:102 tsearch/dict_thesaurus.c:648 #, c-format msgid "missing DictFile parameter" msgstr "DictFile 매개 변수가 누락됨" -#: tsearch/dict_simple.c:57 +#: tsearch/dict_simple.c:58 #, c-format msgid "multiple Accept parameters" msgstr "Accept 매개 변수가 여러 개 있음" -#: tsearch/dict_simple.c:65 +#: tsearch/dict_simple.c:66 #, c-format msgid "unrecognized simple dictionary parameter: \"%s\"" msgstr "인식할 수 없는 simple 사전 매개 변수: \"%s\"" -#: tsearch/dict_synonym.c:117 +#: tsearch/dict_synonym.c:118 #, c-format msgid "unrecognized synonym parameter: \"%s\"" msgstr "인식할 수 없는 synonym 매개 변수: \"%s\"" -#: tsearch/dict_synonym.c:124 +#: tsearch/dict_synonym.c:125 #, c-format msgid "missing Synonyms parameter" msgstr "Synonyms 매개 변수가 누락됨" -#: tsearch/dict_synonym.c:131 +#: tsearch/dict_synonym.c:132 #, c-format msgid "could not open synonym file \"%s\": %m" msgstr "\"%s\" 동의어 파일을 열 수 없음: %m" -#: tsearch/dict_thesaurus.c:178 +#: tsearch/dict_thesaurus.c:179 #, c-format msgid "could not open thesaurus file \"%s\": %m" msgstr "\"%s\" 기준어 파일을 열 수 없음: %m" -#: tsearch/dict_thesaurus.c:211 +#: tsearch/dict_thesaurus.c:212 #, c-format msgid "unexpected delimiter" msgstr "예기치 않은 구분자" -#: tsearch/dict_thesaurus.c:261 tsearch/dict_thesaurus.c:277 +#: tsearch/dict_thesaurus.c:262 tsearch/dict_thesaurus.c:278 #, c-format msgid "unexpected end of line or lexeme" msgstr "예기치 않은 줄 끝 또는 어휘소" -#: tsearch/dict_thesaurus.c:286 +#: tsearch/dict_thesaurus.c:287 #, c-format msgid "unexpected end of line" msgstr "예기치 않은 줄 끝" -#: tsearch/dict_thesaurus.c:296 +#: tsearch/dict_thesaurus.c:297 #, c-format msgid "too many lexemes in thesaurus entry" msgstr "기준어 항목에 너무 많은 어휘소가 있음" -#: tsearch/dict_thesaurus.c:420 +#: tsearch/dict_thesaurus.c:421 #, c-format msgid "" "thesaurus sample word \"%s\" isn't recognized by subdictionary (rule %d)" msgstr "\"%s\" 기준 단어는 하위 사전에서 인식할 수 없음(규칙 %d)" -#: tsearch/dict_thesaurus.c:426 +#: tsearch/dict_thesaurus.c:427 #, c-format msgid "thesaurus sample word \"%s\" is a stop word (rule %d)" msgstr "\"%s\" 동의어 사전 샘플 단어는 중지 단어임(규칙 %d)" -#: tsearch/dict_thesaurus.c:429 +#: tsearch/dict_thesaurus.c:430 #, c-format msgid "Use \"?\" to represent a stop word within a sample phrase." msgstr "샘플 구 내에서 중지 단어를 나타내려면 \"?\"를 사용하십시오." -#: tsearch/dict_thesaurus.c:575 +#: tsearch/dict_thesaurus.c:576 #, c-format msgid "thesaurus substitute word \"%s\" is a stop word (rule %d)" msgstr "\"%s\" 동의어 사전 대체 단어는 중지 단어임(규칙 %d)" -#: tsearch/dict_thesaurus.c:582 +#: tsearch/dict_thesaurus.c:583 #, c-format msgid "" "thesaurus substitute word \"%s\" isn't recognized by subdictionary (rule %d)" msgstr "\"%s\" 동의어 사전 대체 단어는 하위 사전에서 인식할 수 없음(규칙 %d)" -#: tsearch/dict_thesaurus.c:594 +#: tsearch/dict_thesaurus.c:595 #, c-format msgid "thesaurus substitute phrase is empty (rule %d)" msgstr "동의어 사전 대체 구가 비어 있음(규칙 %d)" -#: tsearch/dict_thesaurus.c:632 +#: tsearch/dict_thesaurus.c:633 #, c-format msgid "multiple Dictionary parameters" msgstr "Dictionary 매개 변수가 여러 개 있음" -#: tsearch/dict_thesaurus.c:639 +#: tsearch/dict_thesaurus.c:640 #, c-format msgid "unrecognized Thesaurus parameter: \"%s\"" msgstr "인식할 수 없는 Thesaurus 매개 변수: \"%s\"" -#: tsearch/dict_thesaurus.c:651 +#: tsearch/dict_thesaurus.c:652 #, c-format msgid "missing Dictionary parameter" msgstr "Dictionary 매개 변수가 누락됨" @@ -18306,7 +20067,7 @@ msgid "invalid regular expression: %s" msgstr "잘못된 정규식: %s" #: tsearch/spell.c:954 tsearch/spell.c:971 tsearch/spell.c:988 -#: tsearch/spell.c:1005 tsearch/spell.c:1070 gram.y:14405 gram.y:14422 +#: tsearch/spell.c:1005 tsearch/spell.c:1070 gram.y:15291 gram.y:15308 #, c-format msgid "syntax error" msgstr "구문 오류" @@ -18338,8 +20099,8 @@ msgstr "잘못된 플래그 백터 별칭 개수" msgid "affix file contains both old-style and new-style commands" msgstr "affix 파일에 옛방식과 새방식 명령이 함께 있습니다" -#: tsearch/to_tsany.c:170 utils/adt/tsvector.c:270 -#: utils/adt/tsvector_op.c:1133 +#: tsearch/to_tsany.c:185 utils/adt/tsvector.c:271 +#: utils/adt/tsvector_op.c:1134 #, c-format msgid "string is too long for tsvector (%d bytes, max %d bytes)" msgstr "" @@ -18377,7 +20138,7 @@ msgstr "\"%s\" 전문 검색 구성 파일 이름이 잘못됨" msgid "could not open stop-word file \"%s\": %m" msgstr "\"%s\" 중지 단어 파일을 열 수 없음: %m" -#: tsearch/wparser.c:306 +#: tsearch/wparser.c:322 tsearch/wparser.c:410 tsearch/wparser.c:487 #, c-format msgid "text search parser does not support headline creation" msgstr "전문 검색 분석기에서 헤드라인 작성을 지원하지 않음" @@ -18493,45 +20254,44 @@ msgstr "???의존(적인) 권한이 존재합니다" msgid "Use CASCADE to revoke them too." msgstr "그것들을 취소하려면 \"CASCADE\"를 사용하세요." -#: utils/adt/acl.c:1537 +#: utils/adt/acl.c:1520 #, c-format msgid "aclinsert is no longer supported" msgstr "aclinsert 더이상 지원하지 않음" -#: utils/adt/acl.c:1547 +#: utils/adt/acl.c:1530 #, c-format msgid "aclremove is no longer supported" msgstr "aclremovie 더이상 지원하지 않음" -#: utils/adt/acl.c:1633 utils/adt/acl.c:1687 +#: utils/adt/acl.c:1616 utils/adt/acl.c:1670 #, c-format msgid "unrecognized privilege type: \"%s\"" msgstr "알 수 없는 권한 타입: \"%s\"" -#: utils/adt/acl.c:3427 utils/adt/regproc.c:123 utils/adt/regproc.c:144 -#: utils/adt/regproc.c:319 +#: utils/adt/acl.c:3410 utils/adt/regproc.c:102 utils/adt/regproc.c:277 #, c-format msgid "function \"%s\" does not exist" msgstr "\"%s\" 함수가 없습니다." -#: utils/adt/acl.c:4881 +#: utils/adt/acl.c:4864 #, c-format msgid "must be member of role \"%s\"" msgstr "\"%s\" 롤의 구성원이어야 함" #: utils/adt/array_expanded.c:274 utils/adt/arrayfuncs.c:931 #: utils/adt/arrayfuncs.c:1519 utils/adt/arrayfuncs.c:3251 -#: utils/adt/arrayfuncs.c:3389 utils/adt/arrayfuncs.c:5848 -#: utils/adt/arrayfuncs.c:6159 utils/adt/arrayutils.c:93 +#: utils/adt/arrayfuncs.c:3389 utils/adt/arrayfuncs.c:5846 +#: utils/adt/arrayfuncs.c:6157 utils/adt/arrayutils.c:93 #: utils/adt/arrayutils.c:102 utils/adt/arrayutils.c:109 #, c-format msgid "array size exceeds the maximum allowed (%d)" msgstr "배열 크기가 최대치 (%d)를 초과했습니다" -#: utils/adt/array_userfuncs.c:79 utils/adt/array_userfuncs.c:541 -#: utils/adt/array_userfuncs.c:621 utils/adt/json.c:1759 utils/adt/json.c:1854 -#: utils/adt/json.c:1892 utils/adt/jsonb.c:1126 utils/adt/jsonb.c:1155 -#: utils/adt/jsonb.c:1591 utils/adt/jsonb.c:1755 utils/adt/jsonb.c:1765 +#: utils/adt/array_userfuncs.c:79 utils/adt/array_userfuncs.c:471 +#: utils/adt/array_userfuncs.c:551 utils/adt/json.c:1764 utils/adt/json.c:1859 +#: utils/adt/json.c:1897 utils/adt/jsonb.c:1127 utils/adt/jsonb.c:1156 +#: utils/adt/jsonb.c:1592 utils/adt/jsonb.c:1756 utils/adt/jsonb.c:1766 #, c-format msgid "could not determine input data type" msgstr "입력 자료형을 결정할 수 없음" @@ -18543,14 +20303,14 @@ msgstr "입력 자료형이 배열이 아닙니다." #: utils/adt/array_userfuncs.c:132 utils/adt/array_userfuncs.c:186 #: utils/adt/arrayfuncs.c:1322 utils/adt/float.c:1228 utils/adt/float.c:1287 -#: utils/adt/float.c:3556 utils/adt/float.c:3572 utils/adt/int.c:623 -#: utils/adt/int.c:652 utils/adt/int.c:673 utils/adt/int.c:704 -#: utils/adt/int.c:737 utils/adt/int.c:759 utils/adt/int.c:907 -#: utils/adt/int.c:928 utils/adt/int.c:955 utils/adt/int.c:995 -#: utils/adt/int.c:1016 utils/adt/int.c:1043 utils/adt/int.c:1076 -#: utils/adt/int.c:1159 utils/adt/int8.c:1298 utils/adt/numeric.c:2903 -#: utils/adt/numeric.c:2912 utils/adt/varbit.c:1173 utils/adt/varbit.c:1575 -#: utils/adt/varlena.c:1055 utils/adt/varlena.c:2807 +#: utils/adt/float.c:3556 utils/adt/float.c:3572 utils/adt/int.c:608 +#: utils/adt/int.c:637 utils/adt/int.c:658 utils/adt/int.c:689 +#: utils/adt/int.c:722 utils/adt/int.c:744 utils/adt/int.c:892 +#: utils/adt/int.c:913 utils/adt/int.c:940 utils/adt/int.c:980 +#: utils/adt/int.c:1001 utils/adt/int.c:1028 utils/adt/int.c:1061 +#: utils/adt/int.c:1144 utils/adt/int8.c:1298 utils/adt/numeric.c:2953 +#: utils/adt/numeric.c:2962 utils/adt/varbit.c:1173 utils/adt/varbit.c:1575 +#: utils/adt/varlena.c:1054 utils/adt/varlena.c:2953 #, c-format msgid "integer out of range" msgstr "정수 범위를 벗어남" @@ -18590,18 +20350,12 @@ msgstr "차원(배열 깊이)이 다른 배열들을 서로 합칠 수 없습니 msgid "Arrays with differing dimensions are not compatible for concatenation." msgstr "차원(배열 깊이)이 다른 배열들을 서로 합칠 수 없습니다" -#: utils/adt/array_userfuncs.c:480 utils/adt/arrayfuncs.c:1284 -#: utils/adt/arrayfuncs.c:3357 utils/adt/arrayfuncs.c:5754 -#, c-format -msgid "invalid number of dimensions: %d" -msgstr "잘못된 배열 차원(배열 깊이): %d" - -#: utils/adt/array_userfuncs.c:737 utils/adt/array_userfuncs.c:889 +#: utils/adt/array_userfuncs.c:667 utils/adt/array_userfuncs.c:819 #, c-format msgid "searching for elements in multidimensional arrays is not supported" msgstr "다차원 배열에서 요소 검색 기능은 지원하지 않음" -#: utils/adt/array_userfuncs.c:761 +#: utils/adt/array_userfuncs.c:691 #, c-format msgid "initial position must not be null" msgstr "초기 위치값은 null값이 아니여야 함" @@ -18659,7 +20413,7 @@ msgid "Specified array dimensions do not match array contents." msgstr "지정한 배열 차원에 해당하는 배열이 없습니다." #: utils/adt/arrayfuncs.c:489 utils/adt/arrayfuncs.c:516 -#: utils/adt/rangetypes.c:2124 utils/adt/rangetypes.c:2132 +#: utils/adt/rangetypes.c:2114 utils/adt/rangetypes.c:2122 #: utils/adt/rowtypes.c:208 utils/adt/rowtypes.c:216 #, c-format msgid "Unexpected end of input." @@ -18681,7 +20435,7 @@ msgstr "예기치 않은 배열 요소" msgid "Unmatched \"%c\" character." msgstr "짝이 안 맞는 \"%c\" 문자" -#: utils/adt/arrayfuncs.c:597 +#: utils/adt/arrayfuncs.c:597 utils/adt/jsonfuncs.c:2381 #, c-format msgid "Multidimensional arrays must have sub-arrays with matching dimensions." msgstr "다차원 배열에는 일치하는 차원이 포함된 배열 식이 있어야 함" @@ -18691,6 +20445,12 @@ msgstr "다차원 배열에는 일치하는 차원이 포함된 배열 식이 msgid "Junk after closing right brace." msgstr "오른쪽 닫기 괄호 뒤에 정크" +#: utils/adt/arrayfuncs.c:1284 utils/adt/arrayfuncs.c:3357 +#: utils/adt/arrayfuncs.c:5752 +#, c-format +msgid "invalid number of dimensions: %d" +msgstr "잘못된 배열 차원(배열 깊이): %d" + #: utils/adt/arrayfuncs.c:1295 #, c-format msgid "invalid array flags" @@ -18702,7 +20462,7 @@ msgid "wrong element type" msgstr "잘못된 요소 타입" #: utils/adt/arrayfuncs.c:1353 utils/adt/rangetypes.c:334 -#: utils/cache/lsyscache.c:2651 +#: utils/cache/lsyscache.c:2683 #, c-format msgid "no binary input function available for type %s" msgstr "%s 자료형에서 사용할 바이너리 입력 함수가 없습니다." @@ -18713,7 +20473,7 @@ msgid "improper binary format in array element %d" msgstr "%d 번째 배열 요소의 포맷이 부적절합니다." #: utils/adt/arrayfuncs.c:1574 utils/adt/rangetypes.c:339 -#: utils/cache/lsyscache.c:2684 +#: utils/cache/lsyscache.c:2716 #, c-format msgid "no binary output function available for type %s" msgstr "%s 자료형에서 사용할 바이너리 출력 함수가 없습니다." @@ -18725,11 +20485,12 @@ msgstr "특정 크기로 배열을 절단하는 기능은 구현되지 않습니 #: utils/adt/arrayfuncs.c:2230 utils/adt/arrayfuncs.c:2252 #: utils/adt/arrayfuncs.c:2301 utils/adt/arrayfuncs.c:2537 -#: utils/adt/arrayfuncs.c:2848 utils/adt/arrayfuncs.c:5740 -#: utils/adt/arrayfuncs.c:5766 utils/adt/arrayfuncs.c:5777 -#: utils/adt/json.c:2290 utils/adt/json.c:2365 utils/adt/jsonb.c:1369 -#: utils/adt/jsonb.c:1455 utils/adt/jsonfuncs.c:3529 -#: utils/adt/jsonfuncs.c:3574 utils/adt/jsonfuncs.c:3621 +#: utils/adt/arrayfuncs.c:2848 utils/adt/arrayfuncs.c:5738 +#: utils/adt/arrayfuncs.c:5764 utils/adt/arrayfuncs.c:5775 +#: utils/adt/json.c:2295 utils/adt/json.c:2370 utils/adt/jsonb.c:1370 +#: utils/adt/jsonb.c:1456 utils/adt/jsonfuncs.c:4141 +#: utils/adt/jsonfuncs.c:4292 utils/adt/jsonfuncs.c:4337 +#: utils/adt/jsonfuncs.c:4384 #, c-format msgid "wrong number of array subscripts" msgstr "잘못된 배열 하위 스크립트(1,2...차원 배열 표시 문제)" @@ -18781,59 +20542,59 @@ msgstr "배열 요소 자료형이 서로 틀린 배열은 비교할 수 없습 #: utils/adt/arrayfuncs.c:3962 utils/adt/rangetypes.c:1253 #, c-format msgid "could not identify a hash function for type %s" -msgstr "%s 자료형에서 사용할 해쉬함수를 찾을 수 없습니다." +msgstr "%s 자료형에서 사용할 해시 함수를 찾을 수 없습니다." -#: utils/adt/arrayfuncs.c:5154 +#: utils/adt/arrayfuncs.c:5152 #, c-format msgid "data type %s is not an array type" msgstr "%s 자료형은 배열이 아닙니다." -#: utils/adt/arrayfuncs.c:5209 +#: utils/adt/arrayfuncs.c:5207 #, c-format msgid "cannot accumulate null arrays" msgstr "null 배열을 누적할 수 없음" -#: utils/adt/arrayfuncs.c:5237 +#: utils/adt/arrayfuncs.c:5235 #, c-format msgid "cannot accumulate empty arrays" msgstr "빈 배열을 누적할 수 없음" -#: utils/adt/arrayfuncs.c:5266 utils/adt/arrayfuncs.c:5272 +#: utils/adt/arrayfuncs.c:5264 utils/adt/arrayfuncs.c:5270 #, c-format msgid "cannot accumulate arrays of different dimensionality" msgstr "배열 차수가 서로 틀린 배열은 누적할 수 없음" -#: utils/adt/arrayfuncs.c:5638 utils/adt/arrayfuncs.c:5678 +#: utils/adt/arrayfuncs.c:5636 utils/adt/arrayfuncs.c:5676 #, c-format msgid "dimension array or low bound array cannot be null" msgstr "차원 배열 또는 하한 배열은 NULL일 수 없음" -#: utils/adt/arrayfuncs.c:5741 utils/adt/arrayfuncs.c:5767 +#: utils/adt/arrayfuncs.c:5739 utils/adt/arrayfuncs.c:5765 #, c-format msgid "Dimension array must be one dimensional." msgstr "차원 배열은 일차원 배열이어야 합니다." -#: utils/adt/arrayfuncs.c:5746 utils/adt/arrayfuncs.c:5772 +#: utils/adt/arrayfuncs.c:5744 utils/adt/arrayfuncs.c:5770 #, c-format msgid "dimension values cannot be null" msgstr "차원 값은 null일 수 없음" -#: utils/adt/arrayfuncs.c:5778 +#: utils/adt/arrayfuncs.c:5776 #, c-format msgid "Low bound array has different size than dimensions array." msgstr "하한 배열의 크기가 차원 배열과 다릅니다." -#: utils/adt/arrayfuncs.c:6024 +#: utils/adt/arrayfuncs.c:6022 #, c-format msgid "removing elements from multidimensional arrays is not supported" msgstr "다차원 배열에서 요소 삭제기능은 지원되지 않음" -#: utils/adt/arrayfuncs.c:6301 +#: utils/adt/arrayfuncs.c:6299 #, c-format msgid "thresholds must be one-dimensional array" msgstr "threshold 값은 1차원 배열이어야 합니다." -#: utils/adt/arrayfuncs.c:6306 +#: utils/adt/arrayfuncs.c:6304 #, c-format msgid "thresholds array must not contain NULLs" msgstr "threshold 배열에는 null이 포함되지 않아야 함" @@ -18853,31 +20614,50 @@ msgstr "typmod 배열은 일차원 배열이어야 함" msgid "typmod array must not contain nulls" msgstr "typmod 배열에는 null이 포함되지 않아야 함" -#: utils/adt/ascii.c:75 +#: utils/adt/ascii.c:76 #, c-format msgid "encoding conversion from %s to ASCII not supported" msgstr "%s 인코딩을 ASCII 인코딩으로의 변환은 지원하지 않습니다." -#: utils/adt/bool.c:153 +#. translator: first %s is inet or cidr +#: utils/adt/bool.c:153 utils/adt/cash.c:278 utils/adt/datetime.c:3799 +#: utils/adt/float.c:244 utils/adt/float.c:318 utils/adt/float.c:342 +#: utils/adt/float.c:461 utils/adt/float.c:544 utils/adt/float.c:570 +#: utils/adt/geo_ops.c:156 utils/adt/geo_ops.c:166 utils/adt/geo_ops.c:178 +#: utils/adt/geo_ops.c:210 utils/adt/geo_ops.c:255 utils/adt/geo_ops.c:265 +#: utils/adt/geo_ops.c:935 utils/adt/geo_ops.c:1321 utils/adt/geo_ops.c:1356 +#: utils/adt/geo_ops.c:1364 utils/adt/geo_ops.c:3430 utils/adt/geo_ops.c:4563 +#: utils/adt/geo_ops.c:4579 utils/adt/geo_ops.c:4586 utils/adt/mac.c:94 +#: utils/adt/mac8.c:93 utils/adt/mac8.c:166 utils/adt/mac8.c:184 +#: utils/adt/mac8.c:202 utils/adt/mac8.c:221 utils/adt/nabstime.c:1539 +#: utils/adt/network.c:58 utils/adt/numeric.c:593 utils/adt/numeric.c:620 +#: utils/adt/numeric.c:5488 utils/adt/numeric.c:5512 utils/adt/numeric.c:5536 +#: utils/adt/numeric.c:6338 utils/adt/numeric.c:6364 utils/adt/oid.c:44 +#: utils/adt/oid.c:58 utils/adt/oid.c:64 utils/adt/oid.c:86 +#: utils/adt/pg_lsn.c:44 utils/adt/pg_lsn.c:50 utils/adt/tid.c:72 +#: utils/adt/tid.c:80 utils/adt/tid.c:88 utils/adt/txid.c:405 +#: utils/adt/uuid.c:136 #, c-format -msgid "invalid input syntax for type boolean: \"%s\"" -msgstr "boolean 자료형에 대한 잘못된 입력: \"%s\"" +msgid "invalid input syntax for type %s: \"%s\"" +msgstr "%s 자료형 대한 잘못된 입력: \"%s\"" -#: utils/adt/cash.c:246 +#: utils/adt/cash.c:211 utils/adt/cash.c:238 utils/adt/cash.c:249 +#: utils/adt/cash.c:292 utils/adt/int8.c:114 utils/adt/numutils.c:75 +#: utils/adt/numutils.c:82 utils/adt/oid.c:70 utils/adt/oid.c:109 #, c-format -msgid "invalid input syntax for type money: \"%s\"" -msgstr "money 자료형에 대한 잘못된 입력: \"%s\"" +msgid "value \"%s\" is out of range for type %s" +msgstr "입력한 \"%s\" 값은 %s 자료형 범위를 초과했습니다" -#: utils/adt/cash.c:607 utils/adt/cash.c:657 utils/adt/cash.c:708 -#: utils/adt/cash.c:757 utils/adt/cash.c:809 utils/adt/cash.c:859 +#: utils/adt/cash.c:653 utils/adt/cash.c:703 utils/adt/cash.c:754 +#: utils/adt/cash.c:803 utils/adt/cash.c:855 utils/adt/cash.c:905 #: utils/adt/float.c:855 utils/adt/float.c:919 utils/adt/float.c:3315 -#: utils/adt/float.c:3378 utils/adt/geo_ops.c:4093 utils/adt/int.c:719 -#: utils/adt/int.c:861 utils/adt/int.c:969 utils/adt/int.c:1058 -#: utils/adt/int.c:1097 utils/adt/int.c:1125 utils/adt/int8.c:597 +#: utils/adt/float.c:3378 utils/adt/geo_ops.c:4093 utils/adt/int.c:704 +#: utils/adt/int.c:846 utils/adt/int.c:954 utils/adt/int.c:1043 +#: utils/adt/int.c:1082 utils/adt/int.c:1110 utils/adt/int8.c:597 #: utils/adt/int8.c:657 utils/adt/int8.c:897 utils/adt/int8.c:1005 -#: utils/adt/int8.c:1094 utils/adt/int8.c:1202 utils/adt/numeric.c:6818 -#: utils/adt/numeric.c:7107 utils/adt/numeric.c:8120 -#: utils/adt/timestamp.c:3499 +#: utils/adt/int8.c:1094 utils/adt/int8.c:1202 utils/adt/numeric.c:6902 +#: utils/adt/numeric.c:7191 utils/adt/numeric.c:8203 +#: utils/adt/timestamp.c:3216 #, c-format msgid "division by zero" msgstr "0으로는 나눌수 없습니다." @@ -18887,176 +20667,164 @@ msgstr "0으로는 나눌수 없습니다." msgid "\"char\" out of range" msgstr "\"char\" 범위를 벗어났습니다." -#: utils/adt/date.c:67 utils/adt/timestamp.c:94 utils/adt/varbit.c:52 -#: utils/adt/varchar.c:45 +#: utils/adt/date.c:67 utils/adt/timestamp.c:95 utils/adt/varbit.c:53 +#: utils/adt/varchar.c:46 #, c-format msgid "invalid type modifier" msgstr "잘못된 자료형 한정자" -#: utils/adt/date.c:72 +#: utils/adt/date.c:79 #, c-format msgid "TIME(%d)%s precision must not be negative" msgstr "TIME(%d)%s 정밀도로 음수를 사용할 수 없습니다" -#: utils/adt/date.c:78 +#: utils/adt/date.c:85 #, c-format msgid "TIME(%d)%s precision reduced to maximum allowed, %d" msgstr "TIME(%d)%s 정밀도는 최대값(%d)으로 줄였습니다" -#: utils/adt/date.c:141 utils/adt/datetime.c:1278 utils/adt/datetime.c:2191 +#: utils/adt/date.c:146 utils/adt/datetime.c:1209 utils/adt/datetime.c:2117 #, c-format msgid "date/time value \"current\" is no longer supported" msgstr "날자와 시간 입력을 위한 \"current\" 는 더이상 지원하지 않습니다." -#: utils/adt/date.c:167 utils/adt/date.c:175 utils/adt/formatting.c:3529 -#: utils/adt/formatting.c:3538 +#: utils/adt/date.c:172 utils/adt/date.c:180 utils/adt/formatting.c:3585 +#: utils/adt/formatting.c:3594 #, c-format msgid "date out of range: \"%s\"" msgstr "날짜 범위가 벗어났음: \"%s\"" -#: utils/adt/date.c:222 utils/adt/date.c:456 utils/adt/date.c:480 -#: utils/adt/xml.c:2027 +#: utils/adt/date.c:227 utils/adt/date.c:539 utils/adt/date.c:563 +#: utils/adt/xml.c:2089 #, c-format msgid "date out of range" msgstr "날짜가 범위를 벗어남" -#: utils/adt/date.c:264 utils/adt/timestamp.c:593 +#: utils/adt/date.c:273 utils/adt/timestamp.c:564 #, c-format msgid "date field value out of range: %d-%02d-%02d" msgstr "날짜 필드의 값이 범위를 벗어남: %d-%02d-%02d" -#: utils/adt/date.c:271 utils/adt/date.c:280 utils/adt/timestamp.c:599 +#: utils/adt/date.c:280 utils/adt/date.c:289 utils/adt/timestamp.c:570 #, c-format msgid "date out of range: %d-%02d-%02d" msgstr "날짜 범위가 벗어났음: %d-%02d-%02d" -#: utils/adt/date.c:431 +#: utils/adt/date.c:327 utils/adt/date.c:350 utils/adt/date.c:376 +#: utils/adt/date.c:1092 utils/adt/date.c:1138 utils/adt/date.c:1672 +#: utils/adt/date.c:1703 utils/adt/date.c:1732 utils/adt/date.c:2469 +#: utils/adt/datetime.c:1690 utils/adt/formatting.c:3460 +#: utils/adt/formatting.c:3492 utils/adt/formatting.c:3560 +#: utils/adt/json.c:1539 utils/adt/json.c:1561 utils/adt/jsonb.c:824 +#: utils/adt/jsonb.c:848 utils/adt/nabstime.c:456 utils/adt/nabstime.c:499 +#: utils/adt/nabstime.c:529 utils/adt/nabstime.c:572 utils/adt/timestamp.c:230 +#: utils/adt/timestamp.c:262 utils/adt/timestamp.c:692 +#: utils/adt/timestamp.c:701 utils/adt/timestamp.c:779 +#: utils/adt/timestamp.c:812 utils/adt/timestamp.c:2795 +#: utils/adt/timestamp.c:2816 utils/adt/timestamp.c:2829 +#: utils/adt/timestamp.c:2838 utils/adt/timestamp.c:2846 +#: utils/adt/timestamp.c:2901 utils/adt/timestamp.c:2924 +#: utils/adt/timestamp.c:2937 utils/adt/timestamp.c:2948 +#: utils/adt/timestamp.c:2956 utils/adt/timestamp.c:3512 +#: utils/adt/timestamp.c:3637 utils/adt/timestamp.c:3678 +#: utils/adt/timestamp.c:3759 utils/adt/timestamp.c:3805 +#: utils/adt/timestamp.c:3908 utils/adt/timestamp.c:4307 +#: utils/adt/timestamp.c:4406 utils/adt/timestamp.c:4416 +#: utils/adt/timestamp.c:4508 utils/adt/timestamp.c:4610 +#: utils/adt/timestamp.c:4620 utils/adt/timestamp.c:4852 +#: utils/adt/timestamp.c:4866 utils/adt/timestamp.c:4871 +#: utils/adt/timestamp.c:4885 utils/adt/timestamp.c:4930 +#: utils/adt/timestamp.c:4962 utils/adt/timestamp.c:4969 +#: utils/adt/timestamp.c:5002 utils/adt/timestamp.c:5006 +#: utils/adt/timestamp.c:5075 utils/adt/timestamp.c:5079 +#: utils/adt/timestamp.c:5093 utils/adt/timestamp.c:5127 utils/adt/xml.c:2111 +#: utils/adt/xml.c:2118 utils/adt/xml.c:2138 utils/adt/xml.c:2145 +#, c-format +msgid "timestamp out of range" +msgstr "타임스탬프 범위를 벗어남" + +#: utils/adt/date.c:514 #, c-format msgid "cannot subtract infinite dates" msgstr "무한 날짜를 뺄 수 없음" -#: utils/adt/date.c:509 utils/adt/date.c:544 utils/adt/date.c:566 -#: utils/adt/date.c:2629 utils/adt/date.c:2643 +#: utils/adt/date.c:592 utils/adt/date.c:623 utils/adt/date.c:641 +#: utils/adt/date.c:2506 utils/adt/date.c:2516 #, c-format msgid "date out of range for timestamp" msgstr "날짜가 타임스탬프 범위를 벗어남" -#: utils/adt/date.c:1022 utils/adt/date.c:1068 utils/adt/date.c:1678 -#: utils/adt/date.c:1714 utils/adt/date.c:1748 utils/adt/date.c:2592 -#: utils/adt/datetime.c:1759 utils/adt/formatting.c:3404 -#: utils/adt/formatting.c:3436 utils/adt/formatting.c:3504 -#: utils/adt/json.c:1534 utils/adt/json.c:1556 utils/adt/jsonb.c:823 -#: utils/adt/jsonb.c:847 utils/adt/nabstime.c:455 utils/adt/nabstime.c:498 -#: utils/adt/nabstime.c:528 utils/adt/nabstime.c:571 utils/adt/timestamp.c:224 -#: utils/adt/timestamp.c:268 utils/adt/timestamp.c:726 -#: utils/adt/timestamp.c:735 utils/adt/timestamp.c:817 -#: utils/adt/timestamp.c:857 utils/adt/timestamp.c:3074 -#: utils/adt/timestamp.c:3095 utils/adt/timestamp.c:3108 -#: utils/adt/timestamp.c:3117 utils/adt/timestamp.c:3125 -#: utils/adt/timestamp.c:3180 utils/adt/timestamp.c:3203 -#: utils/adt/timestamp.c:3216 utils/adt/timestamp.c:3227 -#: utils/adt/timestamp.c:3235 utils/adt/timestamp.c:3809 -#: utils/adt/timestamp.c:3938 utils/adt/timestamp.c:3979 -#: utils/adt/timestamp.c:4067 utils/adt/timestamp.c:4113 -#: utils/adt/timestamp.c:4224 utils/adt/timestamp.c:4631 -#: utils/adt/timestamp.c:4747 utils/adt/timestamp.c:4757 -#: utils/adt/timestamp.c:4853 utils/adt/timestamp.c:4972 -#: utils/adt/timestamp.c:4982 utils/adt/timestamp.c:5234 -#: utils/adt/timestamp.c:5248 utils/adt/timestamp.c:5253 -#: utils/adt/timestamp.c:5267 utils/adt/timestamp.c:5316 -#: utils/adt/timestamp.c:5348 utils/adt/timestamp.c:5355 -#: utils/adt/timestamp.c:5381 utils/adt/timestamp.c:5385 -#: utils/adt/timestamp.c:5454 utils/adt/timestamp.c:5458 -#: utils/adt/timestamp.c:5472 utils/adt/timestamp.c:5510 utils/adt/xml.c:2049 -#: utils/adt/xml.c:2056 utils/adt/xml.c:2076 utils/adt/xml.c:2083 -#, c-format -msgid "timestamp out of range" -msgstr "타임스탬프 범위를 벗어남" - -#: utils/adt/date.c:1094 +#: utils/adt/date.c:1164 #, c-format msgid "cannot convert reserved abstime value to date" msgstr "예약된 abstime 값을 date로 형변환할 수 없습니다." -#: utils/adt/date.c:1112 utils/adt/date.c:1118 +#: utils/adt/date.c:1182 utils/adt/date.c:1188 #, c-format msgid "abstime out of range for date" msgstr "abstime의 날짜값이 범위를 벗어남" -#: utils/adt/date.c:1258 utils/adt/date.c:1265 utils/adt/date.c:2082 -#: utils/adt/date.c:2089 +#: utils/adt/date.c:1301 utils/adt/date.c:2020 #, c-format msgid "time out of range" msgstr "시간 범위를 벗어남" -#: utils/adt/date.c:1326 utils/adt/timestamp.c:618 +#: utils/adt/date.c:1357 utils/adt/timestamp.c:589 #, c-format msgid "time field value out of range: %d:%02d:%02g" msgstr "시간 필드의 값이 범위를 벗어남: %d:%02d:%02g" -#: utils/adt/date.c:1960 utils/adt/date.c:1977 +#: utils/adt/date.c:1907 utils/adt/date.c:1920 #, c-format msgid "\"time\" units \"%s\" not recognized" msgstr "\"%s\" 는 \"time\" 자료형 단위가 아닙니다." -#: utils/adt/date.c:2098 +#: utils/adt/date.c:2028 #, c-format msgid "time zone displacement out of range" msgstr "타임 존 변위가 범위를 벗어남" -#: utils/adt/date.c:2740 utils/adt/date.c:2757 +#: utils/adt/date.c:2601 utils/adt/date.c:2614 #, c-format msgid "\"time with time zone\" units \"%s\" not recognized" msgstr "\"%s\" 는 \"time with time zone\" 자료형의 단위가 아닙니다." -#: utils/adt/date.c:2830 utils/adt/datetime.c:995 utils/adt/datetime.c:1917 -#: utils/adt/datetime.c:4743 utils/adt/timestamp.c:532 -#: utils/adt/timestamp.c:559 utils/adt/timestamp.c:5259 -#: utils/adt/timestamp.c:5464 +#: utils/adt/date.c:2687 utils/adt/datetime.c:931 utils/adt/datetime.c:1848 +#: utils/adt/datetime.c:4636 utils/adt/timestamp.c:503 +#: utils/adt/timestamp.c:530 utils/adt/timestamp.c:4877 +#: utils/adt/timestamp.c:5085 #, c-format msgid "time zone \"%s\" not recognized" msgstr "\"%s\" 이름의 시간대는 없습니다." -#: utils/adt/date.c:2870 utils/adt/timestamp.c:5301 utils/adt/timestamp.c:5495 +#: utils/adt/date.c:2719 utils/adt/timestamp.c:4919 utils/adt/timestamp.c:5116 #, c-format msgid "interval time zone \"%s\" must not include months or days" msgstr "" "\"%s\" 시간대 간격(interval time zone) 값으로 달(month) 또는 일(day)을 포함" "할 수 없습니다" -#: utils/adt/datetime.c:3878 utils/adt/datetime.c:3885 +#: utils/adt/datetime.c:3772 utils/adt/datetime.c:3779 #, c-format msgid "date/time field value out of range: \"%s\"" msgstr "날짜/시간 필드의 값이 범위를 벗어남: \"%s\"" -#: utils/adt/datetime.c:3887 +#: utils/adt/datetime.c:3781 #, c-format msgid "Perhaps you need a different \"datestyle\" setting." msgstr "날짜 표현 방식(\"datestyle\")을 다른 것으로 사용하고 있는 듯 합니다." -#: utils/adt/datetime.c:3892 +#: utils/adt/datetime.c:3786 #, c-format msgid "interval field value out of range: \"%s\"" msgstr "interval 필드의 값이 범위를 벗어남: \"%s\"" -#: utils/adt/datetime.c:3898 +#: utils/adt/datetime.c:3792 #, c-format msgid "time zone displacement out of range: \"%s\"" msgstr "표준시간대 범위를 벗어남: \"%s\"" -#. translator: first %s is inet or cidr -#: utils/adt/datetime.c:3905 utils/adt/float.c:461 utils/adt/float.c:544 -#: utils/adt/float.c:570 utils/adt/geo_ops.c:156 utils/adt/geo_ops.c:166 -#: utils/adt/geo_ops.c:178 utils/adt/geo_ops.c:210 utils/adt/geo_ops.c:255 -#: utils/adt/geo_ops.c:265 utils/adt/geo_ops.c:935 utils/adt/geo_ops.c:1321 -#: utils/adt/geo_ops.c:1356 utils/adt/geo_ops.c:1364 utils/adt/geo_ops.c:3430 -#: utils/adt/geo_ops.c:4563 utils/adt/geo_ops.c:4579 utils/adt/geo_ops.c:4586 -#: utils/adt/network.c:58 -#, c-format -msgid "invalid input syntax for type %s: \"%s\"" -msgstr "%s 자료형 대한 잘못된 입력: \"%s\"" - -#: utils/adt/datetime.c:4745 +#: utils/adt/datetime.c:4638 #, c-format msgid "" "This time zone name appears in the configuration file for time zone " @@ -19068,28 +20836,28 @@ msgstr "" msgid "invalid Datum pointer" msgstr "잘못된 Datum 포인터" -#: utils/adt/dbsize.c:110 +#: utils/adt/dbsize.c:116 #, c-format msgid "could not open tablespace directory \"%s\": %m" msgstr "\"%s\" 테이블 스페이스 디렉터리 열 수 없음: %m" -#: utils/adt/dbsize.c:757 utils/adt/dbsize.c:825 +#: utils/adt/dbsize.c:764 utils/adt/dbsize.c:832 #, c-format msgid "invalid size: \"%s\"" msgstr "잘못된 크기: \"%s\"" -#: utils/adt/dbsize.c:826 +#: utils/adt/dbsize.c:833 #, c-format msgid "Invalid size unit: \"%s\"." msgstr "잘못된 크기 단위: \"%s\"" -#: utils/adt/dbsize.c:827 +#: utils/adt/dbsize.c:834 #, c-format msgid "Valid units are \"bytes\", \"kB\", \"MB\", \"GB\", and \"TB\"." msgstr "" "이 매개 변수에 유효한 단위는 \"bytes\",\"kB\", \"MB\", \"GB\", \"TB\"입니다." -#: utils/adt/domains.c:86 +#: utils/adt/domains.c:91 #, c-format msgid "type %s is not a domain" msgstr "%s 자료형은 도메인이 아닙니다" @@ -19129,30 +20897,47 @@ msgstr "base64 마침 조합이 잘못되었음" msgid "Input data is missing padding, is truncated, or is otherwise corrupted." msgstr "입력값에 여백 처리값이 빠졌거나, 자료가 손상되었습니다." -#: utils/adt/encode.c:442 utils/adt/encode.c:507 utils/adt/varlena.c:297 -#: utils/adt/varlena.c:338 +#: utils/adt/encode.c:442 utils/adt/encode.c:507 utils/adt/json.c:785 +#: utils/adt/json.c:825 utils/adt/json.c:841 utils/adt/json.c:853 +#: utils/adt/json.c:863 utils/adt/json.c:914 utils/adt/json.c:946 +#: utils/adt/json.c:965 utils/adt/json.c:977 utils/adt/json.c:989 +#: utils/adt/json.c:1134 utils/adt/json.c:1148 utils/adt/json.c:1159 +#: utils/adt/json.c:1167 utils/adt/json.c:1175 utils/adt/json.c:1183 +#: utils/adt/json.c:1191 utils/adt/json.c:1199 utils/adt/json.c:1207 +#: utils/adt/json.c:1215 utils/adt/json.c:1245 utils/adt/varlena.c:296 +#: utils/adt/varlena.c:337 #, c-format -msgid "invalid input syntax for type bytea" -msgstr "bytea 자료형에 대한 잘못된 입력" +msgid "invalid input syntax for type %s" +msgstr "%s 자료형에 대한 잘못된 입력 구문" -#: utils/adt/enum.c:48 utils/adt/enum.c:58 utils/adt/enum.c:113 -#: utils/adt/enum.c:123 +#: utils/adt/enum.c:115 +#, c-format +msgid "unsafe use of new value \"%s\" of enum type %s" +msgstr "" + +#: utils/adt/enum.c:118 +#, c-format +msgid "New enum values must be committed before they can be used." +msgstr "" + +#: utils/adt/enum.c:136 utils/adt/enum.c:146 utils/adt/enum.c:204 +#: utils/adt/enum.c:214 #, c-format msgid "invalid input value for enum %s: \"%s\"" msgstr "%s 열거형의 입력 값이 잘못됨: \"%s\"" -#: utils/adt/enum.c:85 utils/adt/enum.c:148 utils/adt/enum.c:198 +#: utils/adt/enum.c:176 utils/adt/enum.c:242 utils/adt/enum.c:301 #, c-format msgid "invalid internal value for enum: %u" msgstr "열거형의 내부 값이 잘못됨: %u" -#: utils/adt/enum.c:356 utils/adt/enum.c:385 utils/adt/enum.c:425 -#: utils/adt/enum.c:445 +#: utils/adt/enum.c:461 utils/adt/enum.c:490 utils/adt/enum.c:530 +#: utils/adt/enum.c:550 #, c-format msgid "could not determine actual enum type" msgstr "실제 열거형의 자료형을 확인할 수 없음" -#: utils/adt/enum.c:364 utils/adt/enum.c:393 +#: utils/adt/enum.c:469 utils/adt/enum.c:498 #, c-format msgid "enum %s contains no values" msgstr "\"%s\" 열거형 자료에 값이 없음" @@ -19167,11 +20952,6 @@ msgstr "값이 범위를 벗어남: 오버플로" msgid "value out of range: underflow" msgstr "값이 범위를 벗어남: 언더플로" -#: utils/adt/float.c:244 utils/adt/float.c:318 utils/adt/float.c:342 -#, c-format -msgid "invalid input syntax for type real: \"%s\"" -msgstr "real 자료형에 대한 잘못된 입력: \"%s\"" - #: utils/adt/float.c:312 #, c-format msgid "\"%s\" is out of range for type real" @@ -19182,35 +20962,35 @@ msgstr "\"%s\"는 real 자료형의 범위를 벗어납니다." msgid "\"%s\" is out of range for type double precision" msgstr "\"%s\"는 double precision 자료형의 범위를 벗어납니다." -#: utils/adt/float.c:1246 utils/adt/float.c:1304 utils/adt/int.c:349 -#: utils/adt/int.c:775 utils/adt/int.c:804 utils/adt/int.c:825 -#: utils/adt/int.c:845 utils/adt/int.c:879 utils/adt/int.c:1174 -#: utils/adt/int8.c:1323 utils/adt/numeric.c:3000 utils/adt/numeric.c:3009 +#: utils/adt/float.c:1246 utils/adt/float.c:1304 utils/adt/int.c:334 +#: utils/adt/int.c:760 utils/adt/int.c:789 utils/adt/int.c:810 +#: utils/adt/int.c:830 utils/adt/int.c:864 utils/adt/int.c:1159 +#: utils/adt/int8.c:1323 utils/adt/numeric.c:3050 utils/adt/numeric.c:3059 #, c-format msgid "smallint out of range" msgstr "smallint의 범위를 벗어났습니다." -#: utils/adt/float.c:1430 utils/adt/numeric.c:7540 +#: utils/adt/float.c:1430 utils/adt/numeric.c:7624 #, c-format msgid "cannot take square root of a negative number" msgstr "음수의 제곱근을 구할 수 없습니다." -#: utils/adt/float.c:1472 utils/adt/numeric.c:2803 +#: utils/adt/float.c:1472 utils/adt/numeric.c:2853 #, c-format msgid "zero raised to a negative power is undefined" msgstr "0의 음수 거듭제곱이 정의되어 있지 않음" -#: utils/adt/float.c:1476 utils/adt/numeric.c:2809 +#: utils/adt/float.c:1476 utils/adt/numeric.c:2859 #, c-format msgid "a negative number raised to a non-integer power yields a complex result" msgstr "음수의 비정수 거듭제곱을 계산하면 복잡한 결과가 생성됨" -#: utils/adt/float.c:1542 utils/adt/float.c:1572 utils/adt/numeric.c:7806 +#: utils/adt/float.c:1542 utils/adt/float.c:1572 utils/adt/numeric.c:7890 #, c-format msgid "cannot take logarithm of zero" msgstr "0의 대수를 구할 수 없습니다." -#: utils/adt/float.c:1546 utils/adt/float.c:1576 utils/adt/numeric.c:7810 +#: utils/adt/float.c:1546 utils/adt/float.c:1576 utils/adt/numeric.c:7894 #, c-format msgid "cannot take logarithm of a negative number" msgstr "음수의 대수를 구할 수 없습니다." @@ -19223,12 +21003,12 @@ msgstr "음수의 대수를 구할 수 없습니다." msgid "input is out of range" msgstr "입력값이 범위를 벗어났습니다." -#: utils/adt/float.c:3532 utils/adt/numeric.c:1443 +#: utils/adt/float.c:3532 utils/adt/numeric.c:1493 #, c-format msgid "count must be greater than zero" msgstr "카운트 값은 0 보다 커야합니다" -#: utils/adt/float.c:3537 utils/adt/numeric.c:1450 +#: utils/adt/float.c:3537 utils/adt/numeric.c:1500 #, c-format msgid "operand, lower bound, and upper bound cannot be NaN" msgstr "피연산자, 하한 및 상한은 NaN일 수 없음" @@ -19238,264 +21018,269 @@ msgstr "피연산자, 하한 및 상한은 NaN일 수 없음" msgid "lower and upper bounds must be finite" msgstr "하한 및 상한은 유한한 값이어야 함" -#: utils/adt/float.c:3581 utils/adt/numeric.c:1463 +#: utils/adt/float.c:3581 utils/adt/numeric.c:1513 #, c-format msgid "lower bound cannot equal upper bound" msgstr "하한값은 상한값과 같을 수 없습니다" -#: utils/adt/formatting.c:485 +#: utils/adt/formatting.c:493 #, c-format msgid "invalid format specification for an interval value" msgstr "간격 값에 대한 형식 지정이 잘못됨" -#: utils/adt/formatting.c:486 +#: utils/adt/formatting.c:494 #, c-format msgid "Intervals are not tied to specific calendar dates." msgstr "간격이 특정 달력 날짜에 연결되어 있지 않습니다." -#: utils/adt/formatting.c:1058 +#: utils/adt/formatting.c:1060 #, c-format msgid "\"EEEE\" must be the last pattern used" msgstr "" -#: utils/adt/formatting.c:1066 +#: utils/adt/formatting.c:1068 #, c-format msgid "\"9\" must be ahead of \"PR\"" msgstr "???\"9\"는 \"PR\" 앞이어야 한다." -#: utils/adt/formatting.c:1082 +#: utils/adt/formatting.c:1084 #, c-format msgid "\"0\" must be ahead of \"PR\"" msgstr "???\"0\"은 \"PR\" 앞이어야 한다." -#: utils/adt/formatting.c:1109 +#: utils/adt/formatting.c:1111 #, c-format msgid "multiple decimal points" msgstr "???여러개의 소숫점" -#: utils/adt/formatting.c:1113 utils/adt/formatting.c:1196 +#: utils/adt/formatting.c:1115 utils/adt/formatting.c:1198 #, c-format msgid "cannot use \"V\" and decimal point together" msgstr "\"V\" 와 소숫점을 함께 쓸 수 없습니다." -#: utils/adt/formatting.c:1125 +#: utils/adt/formatting.c:1127 #, c-format msgid "cannot use \"S\" twice" msgstr "\"S\"를 두 번 사용할 수 없음" -#: utils/adt/formatting.c:1129 +#: utils/adt/formatting.c:1131 #, c-format msgid "cannot use \"S\" and \"PL\"/\"MI\"/\"SG\"/\"PR\" together" msgstr "\"S\" 와 \"PL\"/\"MI\"/\"SG\"/\"PR\" 를 함께 쓸 수 없습니다." -#: utils/adt/formatting.c:1149 +#: utils/adt/formatting.c:1151 #, c-format msgid "cannot use \"S\" and \"MI\" together" msgstr "\"S\" 와 \"MI\" 를 함께 쓸 수 없습니다." -#: utils/adt/formatting.c:1159 +#: utils/adt/formatting.c:1161 #, c-format msgid "cannot use \"S\" and \"PL\" together" msgstr "\"S\" 와 \"PL\" 를 함께 쓸 수 없습니다." -#: utils/adt/formatting.c:1169 +#: utils/adt/formatting.c:1171 #, c-format msgid "cannot use \"S\" and \"SG\" together" msgstr "\"S\" 와 \"SG\" 를 함께 쓸 수 없습니다." -#: utils/adt/formatting.c:1178 +#: utils/adt/formatting.c:1180 #, c-format msgid "cannot use \"PR\" and \"S\"/\"PL\"/\"MI\"/\"SG\" together" msgstr "\"PR\" 와 \"S\"/\"PL\"/\"MI\"/\"SG\" 를 함께 쓸 수 없습니다." -#: utils/adt/formatting.c:1204 +#: utils/adt/formatting.c:1206 #, c-format msgid "cannot use \"EEEE\" twice" msgstr "\"EEEE\"를 두 번 사용할 수 없음" -#: utils/adt/formatting.c:1210 +#: utils/adt/formatting.c:1212 #, c-format msgid "\"EEEE\" is incompatible with other formats" msgstr "\"EEEE\"는 다른 포맷과 호환하지 않습니다" -#: utils/adt/formatting.c:1211 +#: utils/adt/formatting.c:1213 #, c-format msgid "" "\"EEEE\" may only be used together with digit and decimal point patterns." msgstr "" -#: utils/adt/formatting.c:1411 +#: utils/adt/formatting.c:1402 #, c-format msgid "\"%s\" is not a number" msgstr "\"%s\"는 숫자가 아닙니다." -#: utils/adt/formatting.c:1512 utils/adt/formatting.c:1564 +#: utils/adt/formatting.c:1480 +#, c-format +msgid "case conversion failed: %s" +msgstr "잘못된 형 변환 규칙: %s" + +#: utils/adt/formatting.c:1546 #, c-format msgid "could not determine which collation to use for lower() function" msgstr "lower() 함수에서 사용할 정렬규칙(collation)을 결정할 수 없음" -#: utils/adt/formatting.c:1632 utils/adt/formatting.c:1684 +#: utils/adt/formatting.c:1670 #, c-format msgid "could not determine which collation to use for upper() function" msgstr "upper() 함수에서 사용할 정렬규칙(collation)을 결정할 수 없음" -#: utils/adt/formatting.c:1753 utils/adt/formatting.c:1817 +#: utils/adt/formatting.c:1795 #, c-format msgid "could not determine which collation to use for initcap() function" msgstr "initcap() 함수에서 사용할 정렬규칙(collation)을 결정할 수 없음" -#: utils/adt/formatting.c:2114 +#: utils/adt/formatting.c:2163 #, c-format msgid "invalid combination of date conventions" msgstr "날짜 변환을 위한 잘못된 조합" -#: utils/adt/formatting.c:2115 +#: utils/adt/formatting.c:2164 #, c-format msgid "" "Do not mix Gregorian and ISO week date conventions in a formatting template." msgstr "" "형식 템플릿에 그레고리오력과 ISO week date 변환을 함께 사용하지 마십시오." -#: utils/adt/formatting.c:2132 +#: utils/adt/formatting.c:2181 #, c-format msgid "conflicting values for \"%s\" field in formatting string" msgstr "형식 문자열에서 \"%s\" 필드의 값이 충돌함" -#: utils/adt/formatting.c:2134 +#: utils/adt/formatting.c:2183 #, c-format msgid "This value contradicts a previous setting for the same field type." msgstr "이 값은 동일한 필드 형식의 이전 설정과 모순됩니다." -#: utils/adt/formatting.c:2195 +#: utils/adt/formatting.c:2244 #, c-format msgid "source string too short for \"%s\" formatting field" msgstr "소스 문자열이 너무 짧아서 \"%s\" 형식 필드에 사용할 수 없음" -#: utils/adt/formatting.c:2197 +#: utils/adt/formatting.c:2246 #, c-format msgid "Field requires %d characters, but only %d remain." msgstr "필드에 %d자가 필요한데 %d자만 남았습니다." -#: utils/adt/formatting.c:2200 utils/adt/formatting.c:2214 +#: utils/adt/formatting.c:2249 utils/adt/formatting.c:2263 #, c-format msgid "" "If your source string is not fixed-width, try using the \"FM\" modifier." msgstr "소스 문자열이 고정 너비가 아닌 경우 \"FM\" 한정자를 사용해 보십시오." -#: utils/adt/formatting.c:2210 utils/adt/formatting.c:2223 -#: utils/adt/formatting.c:2353 +#: utils/adt/formatting.c:2259 utils/adt/formatting.c:2272 +#: utils/adt/formatting.c:2402 #, c-format msgid "invalid value \"%s\" for \"%s\"" msgstr "\"%s\" 값은 \"%s\"에 유효하지 않음" -#: utils/adt/formatting.c:2212 +#: utils/adt/formatting.c:2261 #, c-format msgid "Field requires %d characters, but only %d could be parsed." msgstr "필드에 %d자가 필요한데 %d자만 구문 분석할 수 있습니다." -#: utils/adt/formatting.c:2225 +#: utils/adt/formatting.c:2274 #, c-format msgid "Value must be an integer." msgstr "값은 정수여야 합니다." -#: utils/adt/formatting.c:2230 +#: utils/adt/formatting.c:2279 #, c-format msgid "value for \"%s\" in source string is out of range" msgstr "소스 문자열의 \"%s\" 값이 범위를 벗어남" -#: utils/adt/formatting.c:2232 +#: utils/adt/formatting.c:2281 #, c-format msgid "Value must be in the range %d to %d." msgstr "값은 %d에서 %d 사이의 범위에 있어야 합니다." -#: utils/adt/formatting.c:2355 +#: utils/adt/formatting.c:2404 #, c-format msgid "The given value did not match any of the allowed values for this field." msgstr "지정된 값이 이 필드에 허용되는 값과 일치하지 않습니다." -#: utils/adt/formatting.c:2550 utils/adt/formatting.c:2570 -#: utils/adt/formatting.c:2590 utils/adt/formatting.c:2610 -#: utils/adt/formatting.c:2629 utils/adt/formatting.c:2648 -#: utils/adt/formatting.c:2672 utils/adt/formatting.c:2690 -#: utils/adt/formatting.c:2708 utils/adt/formatting.c:2726 -#: utils/adt/formatting.c:2743 utils/adt/formatting.c:2760 +#: utils/adt/formatting.c:2589 utils/adt/formatting.c:2609 +#: utils/adt/formatting.c:2629 utils/adt/formatting.c:2649 +#: utils/adt/formatting.c:2668 utils/adt/formatting.c:2687 +#: utils/adt/formatting.c:2711 utils/adt/formatting.c:2729 +#: utils/adt/formatting.c:2747 utils/adt/formatting.c:2765 +#: utils/adt/formatting.c:2782 utils/adt/formatting.c:2799 #, c-format msgid "localized string format value too long" msgstr "" -#: utils/adt/formatting.c:3047 +#: utils/adt/formatting.c:3086 #, c-format -msgid "\"TZ\"/\"tz\"/\"OF\" format patterns are not supported in to_date" -msgstr "\"TZ\"/\"tz\"\"OF\" 형식 패턴은 to_date에서 지원되지 않음" +msgid "formatting field \"%s\" is only supported in to_char" +msgstr "\"%s\" 필드 양식은 to_char 함수에서만 지원합니다." -#: utils/adt/formatting.c:3156 +#: utils/adt/formatting.c:3197 #, c-format msgid "invalid input string for \"Y,YYY\"" msgstr "\"Y,YYY\"에 대한 입력 문자열이 잘못됨" -#: utils/adt/formatting.c:3668 +#: utils/adt/formatting.c:3703 #, c-format msgid "hour \"%d\" is invalid for the 12-hour clock" msgstr "시간 \"%d\"은(는) 12시간제에 유효하지 않음" -#: utils/adt/formatting.c:3670 +#: utils/adt/formatting.c:3705 #, c-format msgid "Use the 24-hour clock, or give an hour between 1 and 12." msgstr "24시간제를 사용하거나 1에서 12 사이의 시간을 지정하십시오." -#: utils/adt/formatting.c:3765 +#: utils/adt/formatting.c:3811 #, c-format msgid "cannot calculate day of year without year information" msgstr "연도 정보 없이 몇번째 날(day of year) 인지 계산할 수 없습니다." -#: utils/adt/formatting.c:4614 +#: utils/adt/formatting.c:4678 #, c-format msgid "\"EEEE\" not supported for input" msgstr "\"EEEE\" 입력 양식은 지원되지 않습니다." -#: utils/adt/formatting.c:4626 +#: utils/adt/formatting.c:4690 #, c-format msgid "\"RN\" not supported for input" msgstr "\"RN\" 입력 양식은 지원되지 않습니다." -#: utils/adt/genfile.c:62 +#: utils/adt/genfile.c:63 #, c-format msgid "reference to parent directory (\"..\") not allowed" msgstr "상위 디렉터리(\"..\") 참조는 허용되지 않음" -#: utils/adt/genfile.c:73 +#: utils/adt/genfile.c:74 #, c-format msgid "absolute path not allowed" msgstr "절대 경로는 허용하지 않음" -#: utils/adt/genfile.c:78 +#: utils/adt/genfile.c:79 #, c-format msgid "path must be in or below the current directory" msgstr "경로는 현재 디렉토리와 그 하위 디렉터리여야 합니다." -#: utils/adt/genfile.c:125 utils/adt/oracle_compat.c:184 +#: utils/adt/genfile.c:126 utils/adt/oracle_compat.c:184 #: utils/adt/oracle_compat.c:282 utils/adt/oracle_compat.c:758 #: utils/adt/oracle_compat.c:1059 #, c-format msgid "requested length too large" msgstr "요청된 길이가 너무 깁니다" -#: utils/adt/genfile.c:142 +#: utils/adt/genfile.c:143 #, c-format msgid "could not seek in file \"%s\": %m" msgstr "\"%s\" 파일에서 seek 작업을 할 수 없음: %m" -#: utils/adt/genfile.c:200 utils/adt/genfile.c:241 +#: utils/adt/genfile.c:201 utils/adt/genfile.c:242 #, c-format msgid "must be superuser to read files" msgstr "파일을 읽으려면 슈퍼유져여야함" -#: utils/adt/genfile.c:318 +#: utils/adt/genfile.c:319 #, c-format msgid "must be superuser to get file information" msgstr "파일 정보를 보려면 superuser여야함" -#: utils/adt/genfile.c:404 +#: utils/adt/genfile.c:405 #, c-format msgid "must be superuser to get directory listings" msgstr "디렉터리 목록을 보려면 superuser여야함" @@ -19591,27 +21376,22 @@ msgstr "int2vector 는 너무 많은 요소를 가지고 있습니다." msgid "invalid int2vector data" msgstr "잘못된 int2vector 자료" -#: utils/adt/int.c:243 utils/adt/oid.c:212 utils/adt/oid.c:293 +#: utils/adt/int.c:243 utils/adt/oid.c:215 utils/adt/oid.c:296 #, c-format msgid "oidvector has too many elements" msgstr "oidvector에 너무 많은 요소가 있습니다" -#: utils/adt/int.c:1362 utils/adt/int8.c:1460 utils/adt/numeric.c:1351 -#: utils/adt/timestamp.c:5561 utils/adt/timestamp.c:5642 +#: utils/adt/int.c:1347 utils/adt/int8.c:1460 utils/adt/numeric.c:1401 +#: utils/adt/timestamp.c:5178 utils/adt/timestamp.c:5259 #, c-format msgid "step size cannot equal zero" msgstr "단계 크기는 0일 수 없음" #: utils/adt/int8.c:98 utils/adt/int8.c:133 utils/adt/numutils.c:51 -#: utils/adt/numutils.c:61 utils/adt/numutils.c:103 +#: utils/adt/numutils.c:61 utils/adt/numutils.c:105 #, c-format msgid "invalid input syntax for integer: \"%s\"" -msgstr "잘못된 integer 자료형 입력 구문: \"%s\"" - -#: utils/adt/int8.c:114 -#, c-format -msgid "value \"%s\" is out of range for type bigint" -msgstr "입력한 \"%s\" 값은 bigint 자료형 범위를 초과했습니다" +msgstr "정수 자료형 대한 잘못된 입력 구문: \"%s\"" #: utils/adt/int8.c:500 utils/adt/int8.c:529 utils/adt/int8.c:550 #: utils/adt/int8.c:581 utils/adt/int8.c:615 utils/adt/int8.c:640 @@ -19621,7 +21401,7 @@ msgstr "입력한 \"%s\" 값은 bigint 자료형 범위를 초과했습니다" #: utils/adt/int8.c:964 utils/adt/int8.c:991 utils/adt/int8.c:1031 #: utils/adt/int8.c:1052 utils/adt/int8.c:1079 utils/adt/int8.c:1112 #: utils/adt/int8.c:1140 utils/adt/int8.c:1161 utils/adt/int8.c:1188 -#: utils/adt/int8.c:1361 utils/adt/int8.c:1400 utils/adt/numeric.c:2955 +#: utils/adt/int8.c:1361 utils/adt/int8.c:1400 utils/adt/numeric.c:3005 #: utils/adt/varbit.c:1655 #, c-format msgid "bigint out of range" @@ -19632,49 +21412,38 @@ msgstr "bigint의 범위를 벗어났습니다." msgid "OID out of range" msgstr "OID의 범위를 벗어났습니다." -#: utils/adt/json.c:785 utils/adt/json.c:825 utils/adt/json.c:840 -#: utils/adt/json.c:851 utils/adt/json.c:861 utils/adt/json.c:912 -#: utils/adt/json.c:943 utils/adt/json.c:961 utils/adt/json.c:973 -#: utils/adt/json.c:985 utils/adt/json.c:1130 utils/adt/json.c:1144 -#: utils/adt/json.c:1155 utils/adt/json.c:1163 utils/adt/json.c:1171 -#: utils/adt/json.c:1179 utils/adt/json.c:1187 utils/adt/json.c:1195 -#: utils/adt/json.c:1203 utils/adt/json.c:1211 utils/adt/json.c:1241 -#, c-format -msgid "invalid input syntax for type json" -msgstr "json 자료형에 대한 잘못된 입력" - #: utils/adt/json.c:786 #, c-format msgid "Character with value 0x%02x must be escaped." msgstr "" -#: utils/adt/json.c:826 +#: utils/adt/json.c:827 #, c-format msgid "\"\\u\" must be followed by four hexadecimal digits." msgstr "\"\\u\" 표기법은 뒤에 4개의 16진수가 와야합니다." -#: utils/adt/json.c:841 +#: utils/adt/json.c:843 #, c-format msgid "Unicode high surrogate must not follow a high surrogate." msgstr "" -#: utils/adt/json.c:852 utils/adt/json.c:862 utils/adt/json.c:913 -#: utils/adt/json.c:974 utils/adt/json.c:986 +#: utils/adt/json.c:854 utils/adt/json.c:864 utils/adt/json.c:916 +#: utils/adt/json.c:978 utils/adt/json.c:990 #, c-format msgid "Unicode low surrogate must follow a high surrogate." msgstr "" -#: utils/adt/json.c:877 utils/adt/json.c:900 +#: utils/adt/json.c:879 utils/adt/json.c:902 #, c-format msgid "unsupported Unicode escape sequence" msgstr "지원하지 않는 유니코드 이스케이프 조합" -#: utils/adt/json.c:878 +#: utils/adt/json.c:880 #, c-format msgid "\\u0000 cannot be converted to text." msgstr "\\u0000 값은 text 형으로 변환할 수 없음." -#: utils/adt/json.c:901 +#: utils/adt/json.c:903 #, c-format msgid "" "Unicode escape values cannot be used for code point values above 007F when " @@ -19683,93 +21452,90 @@ msgstr "" "서버 인코딩이 UTF8이 아닌 경우 007F보다 큰 코드 지점 값에는 유니코드 이스케이" "프 값을 사용할 수 없음" -#: utils/adt/json.c:944 utils/adt/json.c:962 +#: utils/adt/json.c:948 utils/adt/json.c:966 #, c-format msgid "Escape sequence \"\\%s\" is invalid." msgstr "잘못된 이스케이프 조합: \"\\%s\"" -#: utils/adt/json.c:1131 +#: utils/adt/json.c:1135 #, c-format msgid "The input string ended unexpectedly." msgstr "입력 문자열이 예상치 않게 끝났음." -#: utils/adt/json.c:1145 +#: utils/adt/json.c:1149 #, c-format msgid "Expected end of input, but found \"%s\"." msgstr "입력 자료의 끝을 기대했는데, \"%s\" 값이 더 있음." -#: utils/adt/json.c:1156 +#: utils/adt/json.c:1160 #, c-format msgid "Expected JSON value, but found \"%s\"." msgstr "JSON 값을 기대했는데, \"%s\" 값임" -#: utils/adt/json.c:1164 utils/adt/json.c:1212 +#: utils/adt/json.c:1168 utils/adt/json.c:1216 #, c-format msgid "Expected string, but found \"%s\"." msgstr "문자열 값을 기대했는데, \"%s\" 값임" -#: utils/adt/json.c:1172 +#: utils/adt/json.c:1176 #, c-format msgid "Expected array element or \"]\", but found \"%s\"." msgstr "\"]\" 가 필요한데 \"%s\"이(가) 있음" -#: utils/adt/json.c:1180 +#: utils/adt/json.c:1184 #, c-format msgid "Expected \",\" or \"]\", but found \"%s\"." msgstr "\",\" 또는 \"]\"가 필요한데 \"%s\"이(가) 있음" -#: utils/adt/json.c:1188 +#: utils/adt/json.c:1192 #, c-format msgid "Expected string or \"}\", but found \"%s\"." msgstr "\"}\"가 필요한데 \"%s\"이(가) 있음" -#: utils/adt/json.c:1196 +#: utils/adt/json.c:1200 #, c-format msgid "Expected \":\", but found \"%s\"." msgstr "\":\"가 필요한데 \"%s\"이(가) 있음" -#: utils/adt/json.c:1204 +#: utils/adt/json.c:1208 #, c-format msgid "Expected \",\" or \"}\", but found \"%s\"." msgstr "\",\" 또는 \"}\"가 필요한데 \"%s\"이(가) 있음" -#: utils/adt/json.c:1242 +#: utils/adt/json.c:1246 #, c-format msgid "Token \"%s\" is invalid." msgstr "잘못된 토큰: \"%s\"" -#: utils/adt/json.c:1314 +#: utils/adt/json.c:1318 #, c-format msgid "JSON data, line %d: %s%s%s" msgstr "JSON 자료, %d 번째 줄: %s%s%s" -#: utils/adt/json.c:1469 utils/adt/jsonb.c:724 +#: utils/adt/json.c:1474 utils/adt/jsonb.c:725 #, c-format msgid "key value must be scalar, not array, composite, or json" msgstr "" "키 값은 스칼라 형이어야 함. 배열, 복합 자료형, json 형은 사용할 수 없음" -#: utils/adt/json.c:2006 -#, c-format -msgid "could not determine data type for argument 1" -msgstr "첫번째 매개 변수의 자료형을 알수가 없습니다." - -#: utils/adt/json.c:2016 +#: utils/adt/json.c:2011 utils/adt/json.c:2021 utils/adt/json.c:2147 +#: utils/adt/json.c:2168 utils/adt/json.c:2227 utils/adt/jsonb.c:1215 +#: utils/adt/jsonb.c:1238 utils/adt/jsonb.c:1298 #, c-format -msgid "could not determine data type for argument 2" -msgstr "두번째 매개 변수의 자료형을 알수가 없습니다." +msgid "could not determine data type for argument %d" +msgstr "%d번째 인자의 자료형을 알수가 없습니다." -#: utils/adt/json.c:2040 utils/adt/jsonb.c:1781 +#: utils/adt/json.c:2045 utils/adt/jsonb.c:1782 #, c-format msgid "field name must not be null" msgstr "필드 이름이 null 이면 안됩니다" -#: utils/adt/json.c:2117 +#: utils/adt/json.c:2122 #, c-format msgid "argument list must have even number of elements" msgstr "인자 목록은 요소수의 짝수개여야 합니다." -#: utils/adt/json.c:2118 +#: utils/adt/json.c:2123 #, c-format msgid "" "The arguments of json_build_object() must consist of alternating keys and " @@ -19777,33 +21543,28 @@ msgid "" msgstr "" "json_build_object() 함수의 인자들은 각각 key, value 쌍으로 있어야 합니다." -#: utils/adt/json.c:2142 utils/adt/json.c:2163 utils/adt/json.c:2222 -#, c-format -msgid "could not determine data type for argument %d" -msgstr "%d번째 인자의 자료형을 알수가 없습니다." - -#: utils/adt/json.c:2148 +#: utils/adt/json.c:2153 #, c-format msgid "argument %d cannot be null" msgstr "%d 번째 인자는 null 이면 안됩니다" -#: utils/adt/json.c:2149 +#: utils/adt/json.c:2154 #, c-format msgid "Object keys should be text." msgstr "객체 키는 문자열이어야 합니다." -#: utils/adt/json.c:2284 utils/adt/jsonb.c:1363 +#: utils/adt/json.c:2289 utils/adt/jsonb.c:1364 #, c-format msgid "array must have two columns" msgstr "배열은 두개의 칼럼이어야 함" -#: utils/adt/json.c:2308 utils/adt/json.c:2392 utils/adt/jsonb.c:1387 -#: utils/adt/jsonb.c:1482 +#: utils/adt/json.c:2313 utils/adt/json.c:2397 utils/adt/jsonb.c:1388 +#: utils/adt/jsonb.c:1483 #, c-format msgid "null value not allowed for object key" msgstr "객체 키 값으로 null 을 허용하지 않음" -#: utils/adt/json.c:2381 utils/adt/jsonb.c:1471 +#: utils/adt/json.c:2386 utils/adt/jsonb.c:1472 #, c-format msgid "mismatched array dimensions" msgstr "배열 차수가 안맞음" @@ -19819,169 +21580,191 @@ msgid "" "Due to an implementation restriction, jsonb strings cannot exceed %d bytes." msgstr "구현상 제한으로 jsonb 문자열은 %d 바이트를 넘을 수 없습니다." -#: utils/adt/jsonb.c:1182 +#: utils/adt/jsonb.c:1183 #, c-format msgid "invalid number of arguments: object must be matched key value pairs" msgstr "잘못된 인자 번호: 객체는 key - value 쌍으로 구성되어야 합니다" -#: utils/adt/jsonb.c:1195 +#: utils/adt/jsonb.c:1196 #, c-format msgid "argument %d: key must not be null" msgstr "%d 번째 인자: 키 값은 null이면 안됩니다." -#: utils/adt/jsonb.c:1214 utils/adt/jsonb.c:1237 utils/adt/jsonb.c:1297 -#, c-format -msgid "argument %d: could not determine data type" -msgstr "%d 번째 인자: 자료형을 파악할 수 없음" - -#: utils/adt/jsonb.c:1834 +#: utils/adt/jsonb.c:1835 #, c-format msgid "object keys must be strings" msgstr "객체 키는 문자열이어야 합니다" -#: utils/adt/jsonb_util.c:656 +#: utils/adt/jsonb_util.c:657 #, c-format msgid "number of jsonb object pairs exceeds the maximum allowed (%zu)" msgstr "jsonb 객체 쌍의 개수가 최대치를 초과함 (%zu)" -#: utils/adt/jsonb_util.c:697 +#: utils/adt/jsonb_util.c:698 #, c-format msgid "number of jsonb array elements exceeds the maximum allowed (%zu)" msgstr "jsonb 배열 요소 개수가 최대치를 초과함 (%zu)" -#: utils/adt/jsonb_util.c:1525 utils/adt/jsonb_util.c:1545 +#: utils/adt/jsonb_util.c:1526 utils/adt/jsonb_util.c:1546 #, c-format msgid "total size of jsonb array elements exceeds the maximum of %u bytes" msgstr "jsonb 배열 요소 총 크기가 최대치를 초과함 (%u 바이트)" -#: utils/adt/jsonb_util.c:1606 utils/adt/jsonb_util.c:1641 -#: utils/adt/jsonb_util.c:1661 +#: utils/adt/jsonb_util.c:1607 utils/adt/jsonb_util.c:1642 +#: utils/adt/jsonb_util.c:1662 #, c-format msgid "total size of jsonb object elements exceeds the maximum of %u bytes" msgstr "jsonb 객체 요소들의 총 크기가 최대치를 초과함 (%u 바이트)" -#: utils/adt/jsonfuncs.c:305 utils/adt/jsonfuncs.c:470 -#: utils/adt/jsonfuncs.c:2057 utils/adt/jsonfuncs.c:2498 -#: utils/adt/jsonfuncs.c:3004 +#: utils/adt/jsonfuncs.c:511 utils/adt/jsonfuncs.c:676 +#: utils/adt/jsonfuncs.c:2263 utils/adt/jsonfuncs.c:2699 +#: utils/adt/jsonfuncs.c:3393 utils/adt/jsonfuncs.c:3677 #, c-format msgid "cannot call %s on a scalar" msgstr "스칼라형에서는 %s 호출 할 수 없음" -#: utils/adt/jsonfuncs.c:310 utils/adt/jsonfuncs.c:457 -#: utils/adt/jsonfuncs.c:2487 +#: utils/adt/jsonfuncs.c:516 utils/adt/jsonfuncs.c:663 +#: utils/adt/jsonfuncs.c:2701 utils/adt/jsonfuncs.c:3382 #, c-format msgid "cannot call %s on an array" msgstr "배열형에서는 %s 호출 할 수 없음" -#: utils/adt/jsonfuncs.c:1373 utils/adt/jsonfuncs.c:1408 +#: utils/adt/jsonfuncs.c:1579 utils/adt/jsonfuncs.c:1614 #, c-format msgid "cannot get array length of a scalar" msgstr "스칼라형의 배열 길이를 구할 수 없음" -#: utils/adt/jsonfuncs.c:1377 utils/adt/jsonfuncs.c:1396 +#: utils/adt/jsonfuncs.c:1583 utils/adt/jsonfuncs.c:1602 #, c-format msgid "cannot get array length of a non-array" msgstr "비배열형 자료의 배열 길이를 구할 수 없음" -#: utils/adt/jsonfuncs.c:1473 +#: utils/adt/jsonfuncs.c:1679 #, c-format msgid "cannot call %s on a non-object" msgstr "비객체형에서 %s 호출 할 수 없음" -#: utils/adt/jsonfuncs.c:1491 utils/adt/jsonfuncs.c:2170 -#: utils/adt/jsonfuncs.c:2707 +#: utils/adt/jsonfuncs.c:1697 utils/adt/jsonfuncs.c:3208 +#: utils/adt/jsonfuncs.c:3502 #, c-format msgid "" "function returning record called in context that cannot accept type record" msgstr "반환 자료형이 record인데 함수가 그 자료형으로 반환하지 않음" -#: utils/adt/jsonfuncs.c:1730 +#: utils/adt/jsonfuncs.c:1936 #, c-format msgid "cannot deconstruct an array as an object" msgstr "" -#: utils/adt/jsonfuncs.c:1742 +#: utils/adt/jsonfuncs.c:1948 #, c-format msgid "cannot deconstruct a scalar" msgstr "스칼라형으로 재구축할 수 없음" -#: utils/adt/jsonfuncs.c:1788 +#: utils/adt/jsonfuncs.c:1994 #, c-format msgid "cannot extract elements from a scalar" msgstr "스칼라형에서 요소를 추출할 수 없음" -#: utils/adt/jsonfuncs.c:1792 +#: utils/adt/jsonfuncs.c:1998 #, c-format msgid "cannot extract elements from an object" msgstr "객체형에서 요소를 추출할 수 없음" -#: utils/adt/jsonfuncs.c:2044 utils/adt/jsonfuncs.c:2803 +#: utils/adt/jsonfuncs.c:2250 utils/adt/jsonfuncs.c:3566 #, c-format msgid "cannot call %s on a non-array" msgstr "비배열형에서 %s 호출 할 수 없음" -#: utils/adt/jsonfuncs.c:2131 utils/adt/jsonfuncs.c:2683 +#: utils/adt/jsonfuncs.c:2316 utils/adt/jsonfuncs.c:2321 +#: utils/adt/jsonfuncs.c:2338 utils/adt/jsonfuncs.c:2344 +#, c-format +msgid "expected json array" +msgstr "예기치 않은 json 배열" + +#: utils/adt/jsonfuncs.c:2317 +#, c-format +msgid "See the value of key \"%s\"." +msgstr "\"%s\" 키의 값을 지정하세요" + +#: utils/adt/jsonfuncs.c:2339 +#, c-format +msgid "See the array element %s of key \"%s\"." +msgstr "%s 배열 요소, 해당 키: \"%s\" 참조" + +#: utils/adt/jsonfuncs.c:2345 +#, c-format +msgid "See the array element %s." +msgstr "배열 요소: %s 참조" + +#: utils/adt/jsonfuncs.c:2380 +#, c-format +msgid "malformed json array" +msgstr "잘못된 json 배열" + +#: utils/adt/jsonfuncs.c:3168 utils/adt/jsonfuncs.c:3478 #, c-format msgid "first argument of %s must be a row type" msgstr "%s의 첫번째 인자는 row 형이어야 합니다" -#: utils/adt/jsonfuncs.c:2172 +#: utils/adt/jsonfuncs.c:3210 #, c-format msgid "" "Try calling the function in the FROM clause using a column definition list." msgstr "함수를 호출 할 때 FROM 절에서 칼럼 정의 목록도 함께 지정해야 합니다." -#: utils/adt/jsonfuncs.c:2819 utils/adt/jsonfuncs.c:2986 +#: utils/adt/jsonfuncs.c:3583 utils/adt/jsonfuncs.c:3659 #, c-format msgid "argument of %s must be an array of objects" msgstr "%s의 인자는 객체의 배열이어야 합니다" -#: utils/adt/jsonfuncs.c:2843 +#: utils/adt/jsonfuncs.c:3611 #, c-format msgid "cannot call %s on an object" msgstr "객체에서 %s 호출할 수 없음" -#: utils/adt/jsonfuncs.c:3410 utils/adt/jsonfuncs.c:3463 +#: utils/adt/jsonfuncs.c:4087 utils/adt/jsonfuncs.c:4146 +#: utils/adt/jsonfuncs.c:4226 #, c-format msgid "cannot delete from scalar" msgstr "스칼라형에서 삭제 할 수 없음" -#: utils/adt/jsonfuncs.c:3468 +#: utils/adt/jsonfuncs.c:4231 #, c-format msgid "cannot delete from object using integer index" msgstr "인덱스 번호를 사용해서 객체에서 삭제 할 수 없음" -#: utils/adt/jsonfuncs.c:3534 utils/adt/jsonfuncs.c:3626 +#: utils/adt/jsonfuncs.c:4297 utils/adt/jsonfuncs.c:4389 #, c-format msgid "cannot set path in scalar" msgstr "스칼라형에는 path 를 지정할 수 없음" -#: utils/adt/jsonfuncs.c:3579 +#: utils/adt/jsonfuncs.c:4342 #, c-format msgid "cannot delete path in scalar" msgstr "스칼라형에서 path를 지울 수 없음" -#: utils/adt/jsonfuncs.c:3749 +#: utils/adt/jsonfuncs.c:4512 #, c-format msgid "invalid concatenation of jsonb objects" msgstr "jsonb 객체들의 잘못된 결합" -#: utils/adt/jsonfuncs.c:3783 +#: utils/adt/jsonfuncs.c:4546 #, c-format msgid "path element at position %d is null" msgstr "%d 위치의 path 요소는 null 입니다." -#: utils/adt/jsonfuncs.c:3869 +#: utils/adt/jsonfuncs.c:4632 #, c-format msgid "cannot replace existing key" msgstr "이미 있는 키로는 대체할 수 없음" -#: utils/adt/jsonfuncs.c:3870 +#: utils/adt/jsonfuncs.c:4633 #, c-format msgid "Try using the function jsonb_set to replace key value." msgstr "키 값을 변경하려면, jsonb_set 함수를 사용하세요." -#: utils/adt/jsonfuncs.c:3952 +#: utils/adt/jsonfuncs.c:4715 #, c-format msgid "path element at position %d is not an integer: \"%s\"" msgstr "%d 번째 위치의 path 요소는 정수가 아님: \"%s\"" @@ -19991,7 +21774,7 @@ msgstr "%d 번째 위치의 path 요소는 정수가 아님: \"%s\"" msgid "levenshtein argument exceeds maximum length of %d characters" msgstr "levenshtein 인자값으로 그 길이가 %d 문자의 최대 길이를 초과했음" -#: utils/adt/like.c:212 utils/adt/selfuncs.c:5333 +#: utils/adt/like.c:183 utils/adt/selfuncs.c:5562 #, c-format msgid "could not determine which collation to use for ILIKE" msgstr "ILIKE 연산에서 사용할 정렬규칙(collation)을 결정할 수 없음" @@ -20011,32 +21794,40 @@ msgstr "잘못된 이스케이프 문자열" msgid "Escape string must be empty or one character." msgstr "이스케이프 문자열은 비어있거나 한개의 문자여야 합니다." -#: utils/adt/lockfuncs.c:545 +#: utils/adt/lockfuncs.c:664 #, c-format msgid "cannot use advisory locks during a parallel operation" msgstr "병렬 작업 중에는 자문 자금을 사용할 없습니다" -#: utils/adt/mac.c:68 -#, c-format -msgid "invalid input syntax for type macaddr: \"%s\"" -msgstr "macaddr 자료형에 대한 잘못된 입력: \"%s\"" - -#: utils/adt/mac.c:75 +#: utils/adt/mac.c:102 #, c-format msgid "invalid octet value in \"macaddr\" value: \"%s\"" msgstr "\"macaddr\"에 대한 잘못된 옥텟(octet) 값: \"%s\"" -#: utils/adt/misc.c:239 +#: utils/adt/mac8.c:554 +#, c-format +msgid "macaddr8 data out of range to convert to macaddr" +msgstr "" + +#: utils/adt/mac8.c:555 +#, c-format +msgid "" +"Only addresses that have FF and FE as values in the 4th and 5th bytes from " +"the left, for example xx:xx:xx:ff:fe:xx:xx:xx, are eligible to be converted " +"from macaddr8 to macaddr." +msgstr "" + +#: utils/adt/misc.c:238 #, c-format msgid "PID %d is not a PostgreSQL server process" msgstr "PID %d 프로그램은 PostgreSQL 서버 프로세스가 아닙니다" -#: utils/adt/misc.c:290 +#: utils/adt/misc.c:289 #, c-format msgid "must be a superuser to cancel superuser query" msgstr "슈퍼유저의 쿼리를 중지하려면 슈퍼유저여야 합니다." -#: utils/adt/misc.c:295 +#: utils/adt/misc.c:294 #, c-format msgid "" "must be a member of the role whose query is being canceled or member of " @@ -20045,12 +21836,12 @@ msgstr "" "쿼리 작업 취소하려면 작업자의 소속 맴버이거나 pg_signal_backend 소속 맴버여" "야 합니다" -#: utils/adt/misc.c:314 +#: utils/adt/misc.c:313 #, c-format msgid "must be a superuser to terminate superuser process" msgstr "슈퍼유저의 세션을 정리하려면 슈퍼유저여야 합니다." -#: utils/adt/misc.c:319 +#: utils/adt/misc.c:318 #, c-format msgid "" "must be a member of the role whose process is being terminated or member of " @@ -20059,93 +21850,98 @@ msgstr "" "세션을 종료하려면 접속자의 소속 맴버이거나 pg_signal_backend 소속 맴버여야 합" "니다" -#: utils/adt/misc.c:336 +#: utils/adt/misc.c:335 #, c-format msgid "failed to send signal to postmaster: %m" msgstr "postmaster로 시그널 보내기 실패: %m" -#: utils/adt/misc.c:356 +#: utils/adt/misc.c:355 #, c-format msgid "rotation not possible because log collection not active" msgstr "로그 수집이 활성 상태가 아니므로 회전할 수 없음" -#: utils/adt/misc.c:393 +#: utils/adt/misc.c:392 #, c-format msgid "global tablespace never has databases" msgstr "전역 테이블스페이스는 데이터베이스를 결코 포함하지 않습니다." -#: utils/adt/misc.c:414 +#: utils/adt/misc.c:413 #, c-format msgid "%u is not a tablespace OID" msgstr "%u 테이블스페이스 OID가 아님" -#: utils/adt/misc.c:611 +#: utils/adt/misc.c:606 msgid "unreserved" msgstr "예약되지 않음" -#: utils/adt/misc.c:615 +#: utils/adt/misc.c:610 msgid "unreserved (cannot be function or type name)" msgstr "예약되지 않음(함수, 자료형 이름일 수 없음)" -#: utils/adt/misc.c:619 +#: utils/adt/misc.c:614 msgid "reserved (can be function or type name)" msgstr "예약됨(함수, 자료형 이름일 수 있음)" -#: utils/adt/misc.c:623 +#: utils/adt/misc.c:618 msgid "reserved" msgstr "예약됨" -#: utils/adt/misc.c:797 utils/adt/misc.c:811 utils/adt/misc.c:850 -#: utils/adt/misc.c:856 utils/adt/misc.c:862 utils/adt/misc.c:885 +#: utils/adt/misc.c:792 utils/adt/misc.c:806 utils/adt/misc.c:845 +#: utils/adt/misc.c:851 utils/adt/misc.c:857 utils/adt/misc.c:880 #, c-format msgid "string is not a valid identifier: \"%s\"" msgstr "문자열이 타당한 식별자가 아님: \"%s\"" -#: utils/adt/misc.c:799 +#: utils/adt/misc.c:794 #, c-format msgid "String has unclosed double quotes." msgstr "문자열 표기에서 큰따옴표 짝이 안맞습니다." -#: utils/adt/misc.c:813 +#: utils/adt/misc.c:808 #, c-format msgid "Quoted identifier must not be empty." msgstr "인용부호 있는 식별자: 비어있으면 안됩니다" -#: utils/adt/misc.c:852 +#: utils/adt/misc.c:847 #, c-format msgid "No valid identifier before \".\"." msgstr "\".\" 전에 타당한 식별자가 없음" -#: utils/adt/misc.c:858 +#: utils/adt/misc.c:853 #, c-format msgid "No valid identifier after \".\"." msgstr "\".\" 뒤에 타당한 식별자 없음" -#: utils/adt/nabstime.c:136 +#: utils/adt/misc.c:914 +#, c-format +msgid "log format \"%s\" is not supported" +msgstr "\"%s\" 양식의 로그는 지원하지 않습니다" + +#: utils/adt/misc.c:915 +#, c-format +msgid "The supported log formats are \"stderr\" and \"csvlog\"." +msgstr "" + +#: utils/adt/nabstime.c:137 #, c-format msgid "invalid time zone name: \"%s\"" msgstr "잘못된 타임존 이름: \"%s\"" -#: utils/adt/nabstime.c:481 utils/adt/nabstime.c:554 +#: utils/adt/nabstime.c:482 utils/adt/nabstime.c:555 #, c-format msgid "cannot convert abstime \"invalid\" to timestamp" msgstr "\"invalid\" abstime 자료형을 timestamp 자료형으로 변환할 수 없습니다." -#: utils/adt/nabstime.c:781 +#: utils/adt/nabstime.c:782 #, c-format msgid "invalid status in external \"tinterval\" value" msgstr "외부 \"tinterval\" 값에 잘못된 상태가 있음" -#: utils/adt/nabstime.c:855 +#: utils/adt/nabstime.c:852 #, c-format msgid "cannot convert reltime \"invalid\" to interval" msgstr "reltime \"invalid\"를 interval로 변환할 수 없음" -#: utils/adt/nabstime.c:1550 -#, c-format -msgid "invalid input syntax for type tinterval: \"%s\"" -msgstr "tinterval 자료형에 대한 잘못된 입력: \"%s\"" - #: utils/adt/network.c:69 #, c-format msgid "invalid cidr value: \"%s\"" @@ -20156,8 +21952,8 @@ msgstr "cidr 자료형에 대한 잘못된 입력: \"%s\"" msgid "Value has bits set to right of mask." msgstr "마스크 오른쪽에 설정된 비트가 값에 포함되어 있습니다." -#: utils/adt/network.c:111 utils/adt/network.c:607 utils/adt/network.c:632 -#: utils/adt/network.c:657 +#: utils/adt/network.c:111 utils/adt/network.c:582 utils/adt/network.c:607 +#: utils/adt/network.c:632 #, c-format msgid "could not format inet value: %m" msgstr "inet 값의 형식을 지정할 수 없음: %m" @@ -20185,120 +21981,114 @@ msgstr "외부 \"%s\" 값의 길이가 잘못 되었음" msgid "invalid external \"cidr\" value" msgstr "외부 \"cidr\" 값이 잘못됨" -#: utils/adt/network.c:321 utils/adt/network.c:348 +#: utils/adt/network.c:295 utils/adt/network.c:318 #, c-format msgid "invalid mask length: %d" msgstr "잘못된 마스크 길이: %d" -#: utils/adt/network.c:675 +#: utils/adt/network.c:650 #, c-format msgid "could not format cidr value: %m" msgstr "cidr 값을 처리할 수 없음: %m" -#: utils/adt/network.c:917 +#: utils/adt/network.c:883 #, c-format msgid "cannot merge addresses from different families" msgstr "서로 다른 페밀리에서는 주소를 병합할 수 없음" -#: utils/adt/network.c:1343 +#: utils/adt/network.c:1302 #, c-format msgid "cannot AND inet values of different sizes" msgstr "서로 크기가 틀린 inet 값들은 AND 연산을 할 수 없습니다." -#: utils/adt/network.c:1375 +#: utils/adt/network.c:1334 #, c-format msgid "cannot OR inet values of different sizes" msgstr "서로 크기가 틀린 inet 값들은 OR 연산을 할 수 없습니다." -#: utils/adt/network.c:1436 utils/adt/network.c:1512 +#: utils/adt/network.c:1395 utils/adt/network.c:1471 #, c-format msgid "result is out of range" msgstr "결과가 범위를 벗어났습니다." -#: utils/adt/network.c:1477 +#: utils/adt/network.c:1436 #, c-format msgid "cannot subtract inet values of different sizes" msgstr "inet 값에서 서로 크기가 틀리게 부분 추출(subtract)할 수 없음" -#: utils/adt/numeric.c:542 utils/adt/numeric.c:569 utils/adt/numeric.c:5405 -#: utils/adt/numeric.c:5428 utils/adt/numeric.c:5452 -#, c-format -msgid "invalid input syntax for type numeric: \"%s\"" -msgstr "수치 자료형의 입력 구문에 오류가 있습니다: \"%s\"" - -#: utils/adt/numeric.c:768 +#: utils/adt/numeric.c:819 #, c-format msgid "invalid sign in external \"numeric\" value" msgstr "외부 \"numeric\" 값의 부호가 잘못됨" -#: utils/adt/numeric.c:774 +#: utils/adt/numeric.c:825 #, c-format msgid "invalid scale in external \"numeric\" value" msgstr "외부 \"numeric\" 값의 잘못된 스케일" -#: utils/adt/numeric.c:783 +#: utils/adt/numeric.c:834 #, c-format msgid "invalid digit in external \"numeric\" value" msgstr "외부 \"numeric\" 값의 숫자가 잘못됨" -#: utils/adt/numeric.c:974 utils/adt/numeric.c:988 +#: utils/adt/numeric.c:1024 utils/adt/numeric.c:1038 #, c-format msgid "NUMERIC precision %d must be between 1 and %d" msgstr "NUMERIC 정밀도 %d 값은 범위(1 .. %d)를 벗어났습니다." -#: utils/adt/numeric.c:979 +#: utils/adt/numeric.c:1029 #, c-format msgid "NUMERIC scale %d must be between 0 and precision %d" msgstr "NUMERIC 스케일 %d 값은 정밀도 범위(0 .. %d)를 벗어났습니다." -#: utils/adt/numeric.c:997 +#: utils/adt/numeric.c:1047 #, c-format msgid "invalid NUMERIC type modifier" msgstr "잘못된 NUMERIC 형식 한정자" -#: utils/adt/numeric.c:1329 +#: utils/adt/numeric.c:1379 #, c-format msgid "start value cannot be NaN" msgstr "시작값은 NaN 일 수 없음" -#: utils/adt/numeric.c:1334 +#: utils/adt/numeric.c:1384 #, c-format msgid "stop value cannot be NaN" msgstr "종료값은 NaN 일 수 없음" -#: utils/adt/numeric.c:1344 +#: utils/adt/numeric.c:1394 #, c-format msgid "step size cannot be NaN" msgstr "단계 크기는 NaN 일 수 없음" -#: utils/adt/numeric.c:2539 utils/adt/numeric.c:5467 utils/adt/numeric.c:5912 -#: utils/adt/numeric.c:7616 utils/adt/numeric.c:8041 utils/adt/numeric.c:8156 -#: utils/adt/numeric.c:8229 +#: utils/adt/numeric.c:2589 utils/adt/numeric.c:5551 utils/adt/numeric.c:5996 +#: utils/adt/numeric.c:7700 utils/adt/numeric.c:8125 utils/adt/numeric.c:8239 +#: utils/adt/numeric.c:8312 #, c-format msgid "value overflows numeric format" msgstr "값이 수치 형식에 넘처남" -#: utils/adt/numeric.c:2881 +#: utils/adt/numeric.c:2931 #, c-format msgid "cannot convert NaN to integer" msgstr "NaN 값을 정수형으로 변환할 수 없습니다" -#: utils/adt/numeric.c:2947 +#: utils/adt/numeric.c:2997 #, c-format msgid "cannot convert NaN to bigint" msgstr "NaN 값을 bigint형으로 변환할 수 없습니다" -#: utils/adt/numeric.c:2992 +#: utils/adt/numeric.c:3042 #, c-format msgid "cannot convert NaN to smallint" msgstr "NaN 값을 smallint형으로 변환할 수 없습니다" -#: utils/adt/numeric.c:5982 +#: utils/adt/numeric.c:6066 #, c-format msgid "numeric field overflow" msgstr "수치 필드 오버플로우" -#: utils/adt/numeric.c:5983 +#: utils/adt/numeric.c:6067 #, c-format msgid "" "A field with precision %d, scale %d must round to an absolute value less " @@ -20307,37 +22097,12 @@ msgstr "" "전체 자릿수 %d, 소수 자릿수 %d의 필드는 %s%d보다 작은 절대 값으로 반올림해야 " "합니다." -#: utils/adt/numeric.c:6254 utils/adt/numeric.c:6280 -#, c-format -msgid "invalid input syntax for type double precision: \"%s\"" -msgstr "double precision 자료형에 대한 잘못된 입력: \"%s\"" - -#: utils/adt/numutils.c:75 -#, c-format -msgid "value \"%s\" is out of range for type integer" -msgstr "입력한 \"%s\" 값은 integer 자료형 범위를 초과했습니다" - -#: utils/adt/numutils.c:81 -#, c-format -msgid "value \"%s\" is out of range for type smallint" -msgstr "입력한 \"%s\" 값은 smallint 자료형 범위를 초과했습니다" - -#: utils/adt/numutils.c:87 +#: utils/adt/numutils.c:89 #, c-format msgid "value \"%s\" is out of range for 8-bit integer" msgstr "값 \"%s\"은(는) 8비트 정수의 범위를 벗어남" -#: utils/adt/oid.c:43 utils/adt/oid.c:57 utils/adt/oid.c:63 utils/adt/oid.c:84 -#, c-format -msgid "invalid input syntax for type oid: \"%s\"" -msgstr "잘못된 oid 자료형의 입력: \"%s\"" - -#: utils/adt/oid.c:69 utils/adt/oid.c:107 -#, c-format -msgid "value \"%s\" is out of range for type oid" -msgstr "입력한 \"%s\" 값은 oid 자료형 범위를 초과했습니다" - -#: utils/adt/oid.c:287 +#: utils/adt/oid.c:290 #, c-format msgid "invalid oidvector data" msgstr "잘못된 oidvector 자료" @@ -20362,30 +22127,30 @@ msgstr "요청한 문자가 인코딩용으로 타당치 않음: %d" msgid "null character not permitted" msgstr "null 문자는 허용되지 않음" -#: utils/adt/orderedsetaggs.c:425 utils/adt/orderedsetaggs.c:530 -#: utils/adt/orderedsetaggs.c:669 +#: utils/adt/orderedsetaggs.c:426 utils/adt/orderedsetaggs.c:531 +#: utils/adt/orderedsetaggs.c:670 #, c-format msgid "percentile value %g is not between 0 and 1" msgstr "%g 퍼센트 값이 0과 1사이가 아닙니다." -#: utils/adt/pg_locale.c:1029 +#: utils/adt/pg_locale.c:1034 #, c-format msgid "Apply system library package updates." msgstr "OS 라이브러리 패키지를 업데이트 하세요." -#: utils/adt/pg_locale.c:1234 +#: utils/adt/pg_locale.c:1249 #, c-format msgid "could not create locale \"%s\": %m" msgstr "\"%s\" 로케일을 만들 수 없음: %m" -#: utils/adt/pg_locale.c:1237 +#: utils/adt/pg_locale.c:1252 #, c-format msgid "" "The operating system could not find any locale data for the locale name \"%s" "\"." msgstr "운영체제에서 \"%s\" 로케일 이름에 대한 로케일 파일을 찾을 수 없습니다." -#: utils/adt/pg_locale.c:1324 +#: utils/adt/pg_locale.c:1352 #, c-format msgid "" "collations with different collate and ctype values are not supported on this " @@ -20394,185 +22159,115 @@ msgstr "" "이 플랫폼에서는 서로 다른 정렬규칙(collation)과 문자집합(ctype)을 함께 쓸 수 " "없습니다." -#: utils/adt/pg_locale.c:1339 -#, c-format -msgid "nondefault collations are not supported on this platform" -msgstr "" -"이 플랫폼에서는 기본값이 아닌 정렬규칙(collation)을 사용할 수 없습니다." - -#: utils/adt/pg_locale.c:1510 +#: utils/adt/pg_locale.c:1361 #, c-format -msgid "invalid multibyte character for locale" -msgstr "로케일을 위한 잘못된 멀티바이트 문자" +msgid "collation provider LIBC is not supported on this platform" +msgstr "이 플랫폼에서는 LIBC 문자 정렬 제공자 기능(ICU)을 지원하지 않음." -#: utils/adt/pg_locale.c:1511 +#: utils/adt/pg_locale.c:1373 #, c-format msgid "" -"The server's LC_CTYPE locale is probably incompatible with the database " -"encoding." -msgstr "서버의 LC_CTYPE 로케일은 이 데이터베이스 인코딩과 호환되지 않습니다." - -#: utils/adt/pg_lsn.c:44 utils/adt/pg_lsn.c:49 -#, c-format -msgid "invalid input syntax for type pg_lsn: \"%s\"" -msgstr "pg_lsn 자료형에 대한 잘못된 입력: \"%s\"" - -#: utils/adt/pg_upgrade_support.c:40 -#, c-format -msgid "function can only be called when server is in binary upgrade mode" -msgstr "함수는 서버가 이진 업그레이드 상태에서만 호출 될 수 있습니다" - -#: utils/adt/pgstatfuncs.c:571 -#, c-format -msgid "invalid command name: \"%s\"" -msgstr "잘못된 명령어 이름: \"%s\"" - -#: utils/adt/pseudotypes.c:95 -#, c-format -msgid "cannot accept a value of type any" -msgstr "any 형식의 값은 사용할 수 없음" - -#: utils/adt/pseudotypes.c:108 -#, c-format -msgid "cannot display a value of type any" -msgstr "any 형식의 값은 표시할 수 없음" - -#: utils/adt/pseudotypes.c:122 utils/adt/pseudotypes.c:150 -#, c-format -msgid "cannot accept a value of type anyarray" -msgstr "anyarray 형식의 값은 사용할 수 없음" - -#: utils/adt/pseudotypes.c:175 -#, c-format -msgid "cannot accept a value of type anyenum" -msgstr "anyenum 자료형 값으로 사용할 수 없음" - -#: utils/adt/pseudotypes.c:199 -#, c-format -msgid "cannot accept a value of type anyrange" -msgstr "anyrange 자료형 값으로 사용할 수 없음" - -#: utils/adt/pseudotypes.c:276 -#, c-format -msgid "cannot accept a value of type trigger" -msgstr "trigger 자료형 값으로 사용할 수 없음" - -#: utils/adt/pseudotypes.c:289 -#, c-format -msgid "cannot display a value of type trigger" -msgstr "trigger 자료형 값으로 표시할 수 없음" - -#: utils/adt/pseudotypes.c:303 -#, c-format -msgid "cannot accept a value of type event_trigger" -msgstr "event_trigger 형식의 값은 사용할 수 없음" - -#: utils/adt/pseudotypes.c:316 -#, c-format -msgid "cannot display a value of type event_trigger" -msgstr "event_trigger 자료형 값은 표시할 수 없음" - -#: utils/adt/pseudotypes.c:330 -#, c-format -msgid "cannot accept a value of type language_handler" -msgstr "language_handler 자료형 값은 사용할 수 없음" - -#: utils/adt/pseudotypes.c:343 -#, c-format -msgid "cannot display a value of type language_handler" -msgstr "language_handler 자료형 값은 표시할 수 없음" +"collations with different collate and ctype values are not supported by ICU" +msgstr "" +"ICU 지원 기능에서는 서로 다른 정렬규칙(collation)과 문자집합(ctype)을 함께 " +"쓸 수 없습니다." -#: utils/adt/pseudotypes.c:357 +#: utils/adt/pg_locale.c:1379 utils/adt/pg_locale.c:1461 #, c-format -msgid "cannot accept a value of type fdw_handler" -msgstr "fdw_handler 자료형 값은 사용할 수 없음" +msgid "could not open collator for locale \"%s\": %s" +msgstr "\"%s\" 로케일용 문자 정렬 규칙 열기 실패: %s" -#: utils/adt/pseudotypes.c:370 +#: utils/adt/pg_locale.c:1388 #, c-format -msgid "cannot display a value of type fdw_handler" -msgstr "fdw_handler 형식의 값은 표시할 수 없음" +msgid "ICU is not supported in this build" +msgstr "ICU 지원 기능을 뺀 채로 서버가 만들어졌습니다." -#: utils/adt/pseudotypes.c:384 +#: utils/adt/pg_locale.c:1389 #, c-format -msgid "cannot accept a value of type index_am_handler" -msgstr "index_am_handler 형식의 값은 사용할 수 없음" +msgid "You need to rebuild PostgreSQL using --with-icu." +msgstr "--with-icu 옵션을 사용하여 PostgreSQL을 다시 빌드해야 합니다." -#: utils/adt/pseudotypes.c:397 +#: utils/adt/pg_locale.c:1409 #, c-format -msgid "cannot display a value of type index_am_handler" -msgstr "index_am_handler 형식의 값은 표시할 수 없음" +msgid "collation \"%s\" has no actual version, but a version was specified" +msgstr "\"%s\" 정렬규칙은 분명한 버전이 없는데 버전을 지정했음" -#: utils/adt/pseudotypes.c:411 +#: utils/adt/pg_locale.c:1416 #, c-format -msgid "cannot accept a value of type tsm_handler" -msgstr "tsm_handler 형식의 값은 사용할 수 없음" +msgid "collation \"%s\" has version mismatch" +msgstr "\"%s\" 정렬규칙은 버전이 맞지 않음" -#: utils/adt/pseudotypes.c:424 +#: utils/adt/pg_locale.c:1418 #, c-format -msgid "cannot display a value of type tsm_handler" -msgstr "tsm_handler 형식의 값은 표시할 수 없음" +msgid "" +"The collation in the database was created using version %s, but the " +"operating system provides version %s." +msgstr "" -#: utils/adt/pseudotypes.c:438 +#: utils/adt/pg_locale.c:1421 #, c-format -msgid "cannot accept a value of type internal" -msgstr "internal 형식의 값은 사용할 수 없음" +msgid "" +"Rebuild all objects affected by this collation and run ALTER COLLATION %s " +"REFRESH VERSION, or build PostgreSQL with the right library version." +msgstr "" -#: utils/adt/pseudotypes.c:451 +#: utils/adt/pg_locale.c:1501 #, c-format -msgid "cannot display a value of type internal" -msgstr "internal 형식의 값은 표시할 수 없음" +msgid "could not open ICU converter for encoding \"%s\": %s" +msgstr "\"%s\" 인코딩용 ICU 변환기 열기 실패: %s" -#: utils/adt/pseudotypes.c:465 +#: utils/adt/pg_locale.c:1532 utils/adt/pg_locale.c:1541 #, c-format -msgid "cannot accept a value of type opaque" -msgstr "opaque 형식의 값은 사용할 수 없음" +msgid "ucnv_toUChars failed: %s" +msgstr "ucnv_toUChars 실패: %s" -#: utils/adt/pseudotypes.c:478 +#: utils/adt/pg_locale.c:1570 utils/adt/pg_locale.c:1579 #, c-format -msgid "cannot display a value of type opaque" -msgstr "opaque 형식의 값은 표시할 수 없음" +msgid "ucnv_fromUChars failed: %s" +msgstr "ucnv_fromUChars 실패: %s" -#: utils/adt/pseudotypes.c:492 +#: utils/adt/pg_locale.c:1752 #, c-format -msgid "cannot accept a value of type anyelement" -msgstr "anyelement 형식의 값은 사용할 수 없음" +msgid "invalid multibyte character for locale" +msgstr "로케일을 위한 잘못된 멀티바이트 문자" -#: utils/adt/pseudotypes.c:505 +#: utils/adt/pg_locale.c:1753 #, c-format -msgid "cannot display a value of type anyelement" -msgstr "anyelement 형식의 값은 표시할 수 없음" +msgid "" +"The server's LC_CTYPE locale is probably incompatible with the database " +"encoding." +msgstr "서버의 LC_CTYPE 로케일은 이 데이터베이스 인코딩과 호환되지 않습니다." -#: utils/adt/pseudotypes.c:518 +#: utils/adt/pg_upgrade_support.c:28 #, c-format -msgid "cannot accept a value of type anynonarray" -msgstr "anynonarray 형식의 값은 사용할 수 없음" +msgid "function can only be called when server is in binary upgrade mode" +msgstr "함수는 서버가 이진 업그레이드 상태에서만 호출 될 수 있습니다" -#: utils/adt/pseudotypes.c:531 +#: utils/adt/pgstatfuncs.c:473 #, c-format -msgid "cannot display a value of type anynonarray" -msgstr "anynonarray 형식의 값은 표시할 수 없음" +msgid "invalid command name: \"%s\"" +msgstr "잘못된 명령어 이름: \"%s\"" -#: utils/adt/pseudotypes.c:544 +#: utils/adt/pseudotypes.c:247 #, c-format msgid "cannot accept a value of a shell type" msgstr "셸 형태 값은 사용할 수 없음" -#: utils/adt/pseudotypes.c:557 +#: utils/adt/pseudotypes.c:260 #, c-format msgid "cannot display a value of a shell type" msgstr "shell 형식의 값은 표시할 수 없음" -#: utils/adt/pseudotypes.c:579 utils/adt/pseudotypes.c:604 -#: utils/adt/pseudotypes.c:632 utils/adt/pseudotypes.c:660 -#, c-format -msgid "cannot accept a value of type %s" -msgstr "%s 형식의 값은 사용할 수 없음" - -#: utils/adt/pseudotypes.c:647 utils/adt/pseudotypes.c:673 +#: utils/adt/pseudotypes.c:350 utils/adt/pseudotypes.c:376 #, c-format msgid "cannot output a value of type %s" msgstr "%s 형식의 값은 출력할 수 없음" +#: utils/adt/pseudotypes.c:403 +#, c-format +msgid "cannot display a value of type %s" +msgstr "%s 자료형의 값은 표시할 수 없음" + #: utils/adt/rangetypes.c:405 #, c-format msgid "range constructor flags argument must not be null" @@ -20588,57 +22283,57 @@ msgstr "" msgid "result of range union would not be contiguous" msgstr "" -#: utils/adt/rangetypes.c:1543 +#: utils/adt/rangetypes.c:1533 #, c-format msgid "range lower bound must be less than or equal to range upper bound" msgstr "range 자료형의 하한값은 상한값과 같거나 작아야 합니다" -#: utils/adt/rangetypes.c:1926 utils/adt/rangetypes.c:1939 -#: utils/adt/rangetypes.c:1953 +#: utils/adt/rangetypes.c:1916 utils/adt/rangetypes.c:1929 +#: utils/adt/rangetypes.c:1943 #, c-format msgid "invalid range bound flags" msgstr "잘못된 range 구성 플래그" -#: utils/adt/rangetypes.c:1927 utils/adt/rangetypes.c:1940 -#: utils/adt/rangetypes.c:1954 +#: utils/adt/rangetypes.c:1917 utils/adt/rangetypes.c:1930 +#: utils/adt/rangetypes.c:1944 #, c-format msgid "Valid values are \"[]\", \"[)\", \"(]\", and \"()\"." msgstr "유효한 값은 \"[]\", \"[)\", \"(]\", \"()\"." -#: utils/adt/rangetypes.c:2019 utils/adt/rangetypes.c:2036 -#: utils/adt/rangetypes.c:2049 utils/adt/rangetypes.c:2067 -#: utils/adt/rangetypes.c:2078 utils/adt/rangetypes.c:2122 -#: utils/adt/rangetypes.c:2130 +#: utils/adt/rangetypes.c:2009 utils/adt/rangetypes.c:2026 +#: utils/adt/rangetypes.c:2039 utils/adt/rangetypes.c:2057 +#: utils/adt/rangetypes.c:2068 utils/adt/rangetypes.c:2112 +#: utils/adt/rangetypes.c:2120 #, c-format msgid "malformed range literal: \"%s\"" msgstr "비정상적인 range 문자: \"%s\"" -#: utils/adt/rangetypes.c:2021 +#: utils/adt/rangetypes.c:2011 #, c-format msgid "Junk after \"empty\" key word." msgstr " \"empty\" 키워드 뒤에 정크가 있음" -#: utils/adt/rangetypes.c:2038 +#: utils/adt/rangetypes.c:2028 #, c-format msgid "Missing left parenthesis or bracket." msgstr "왼쪽 괄호가 빠졌음" -#: utils/adt/rangetypes.c:2051 +#: utils/adt/rangetypes.c:2041 #, c-format msgid "Missing comma after lower bound." msgstr "하한값 뒤에 쉼표가 빠졌음" -#: utils/adt/rangetypes.c:2069 +#: utils/adt/rangetypes.c:2059 #, c-format msgid "Too many commas." msgstr "칼럼이 너무 많습니다." -#: utils/adt/rangetypes.c:2080 +#: utils/adt/rangetypes.c:2070 #, c-format msgid "Junk after right parenthesis or bracket." msgstr "오른쪽 괄호 다음에 정크가 있음" -#: utils/adt/regexp.c:285 utils/adt/regexp.c:1288 utils/adt/varlena.c:3829 +#: utils/adt/regexp.c:285 utils/adt/regexp.c:1344 utils/adt/varlena.c:3963 #, c-format msgid "regular expression failed: %s" msgstr "잘못된 정규식: %s" @@ -20648,114 +22343,129 @@ msgstr "잘못된 정규식: %s" msgid "invalid regexp option: \"%c\"" msgstr "잘못된 regexp 옵션: \"%c\"" -#: utils/adt/regexp.c:948 +#: utils/adt/regexp.c:862 +#, c-format +msgid "regexp_match does not support the global option" +msgstr "regexp_match는 글로벌 옵션을 지원하지 않음" + +#: utils/adt/regexp.c:863 +#, c-format +msgid "Use the regexp_matches function instead." +msgstr "대신에 regexp_matches 함수를 사용하세요." + +#: utils/adt/regexp.c:1163 #, c-format -msgid "regexp_split does not support the global option" -msgstr "regexp_split는 글로벌 옵션을 지원하지 않음" +msgid "regexp_split_to_table does not support the global option" +msgstr "regexp_split_to_table은 글로벌 옵션을 지원하지 않음" -#: utils/adt/regproc.c:128 utils/adt/regproc.c:148 +#: utils/adt/regexp.c:1219 +#, c-format +msgid "regexp_split_to_array does not support the global option" +msgstr "regexp_splitto_array는 글로벌 옵션을 지원하지 않음" + +#: utils/adt/regproc.c:106 #, c-format msgid "more than one function named \"%s\"" msgstr "\"%s\"(이)라는 함수가 두 개 이상 있음" -#: utils/adt/regproc.c:587 utils/adt/regproc.c:607 +#: utils/adt/regproc.c:524 #, c-format msgid "more than one operator named %s" msgstr "%s(이)라는 연산자가 두 개 이상 있음" -#: utils/adt/regproc.c:774 utils/adt/regproc.c:815 gram.y:7302 +#: utils/adt/regproc.c:691 utils/adt/regproc.c:732 gram.y:7844 #, c-format msgid "missing argument" msgstr "인자가 빠졌음" -#: utils/adt/regproc.c:775 utils/adt/regproc.c:816 gram.y:7303 +#: utils/adt/regproc.c:692 utils/adt/regproc.c:733 gram.y:7845 #, c-format msgid "Use NONE to denote the missing argument of a unary operator." msgstr "단항 연산자에서 인자 없음을 표시할 때는 NONE 인자를 사용하세요." -#: utils/adt/regproc.c:779 utils/adt/regproc.c:820 utils/adt/regproc.c:2006 -#: utils/adt/ruleutils.c:8367 utils/adt/ruleutils.c:8536 +#: utils/adt/regproc.c:696 utils/adt/regproc.c:737 utils/adt/regproc.c:1865 +#: utils/adt/ruleutils.c:8959 utils/adt/ruleutils.c:9127 #, c-format msgid "too many arguments" msgstr "인자가 너무 많습니다" -#: utils/adt/regproc.c:780 utils/adt/regproc.c:821 +#: utils/adt/regproc.c:697 utils/adt/regproc.c:738 #, c-format msgid "Provide two argument types for operator." msgstr "연산자를 위해서는 두개의 인자 자료형을 지정하십시오." -#: utils/adt/regproc.c:1594 utils/adt/regproc.c:1618 utils/adt/regproc.c:1715 -#: utils/adt/regproc.c:1739 utils/adt/regproc.c:1841 utils/adt/regproc.c:1846 -#: utils/adt/varlena.c:3084 utils/adt/varlena.c:3089 +#: utils/adt/regproc.c:1449 utils/adt/regproc.c:1473 utils/adt/regproc.c:1574 +#: utils/adt/regproc.c:1598 utils/adt/regproc.c:1700 utils/adt/regproc.c:1705 +#: utils/adt/varlena.c:3216 utils/adt/varlena.c:3221 #, c-format msgid "invalid name syntax" msgstr "잘못된 이름 구문" -#: utils/adt/regproc.c:1904 +#: utils/adt/regproc.c:1763 #, c-format msgid "expected a left parenthesis" msgstr "왼쪽 괄호가 필요합니다." -#: utils/adt/regproc.c:1920 +#: utils/adt/regproc.c:1779 #, c-format msgid "expected a right parenthesis" msgstr "오른쪽 괄호가 필요합니다." -#: utils/adt/regproc.c:1939 +#: utils/adt/regproc.c:1798 #, c-format msgid "expected a type name" msgstr "자료형 이름을 지정하십시오" -#: utils/adt/regproc.c:1971 +#: utils/adt/regproc.c:1830 #, c-format msgid "improper type name" msgstr "부적절한 형식 이름" -#: utils/adt/ri_triggers.c:314 utils/adt/ri_triggers.c:371 -#: utils/adt/ri_triggers.c:790 utils/adt/ri_triggers.c:1013 -#: utils/adt/ri_triggers.c:1169 utils/adt/ri_triggers.c:1350 -#: utils/adt/ri_triggers.c:1515 utils/adt/ri_triggers.c:1691 -#: utils/adt/ri_triggers.c:1871 utils/adt/ri_triggers.c:2062 -#: utils/adt/ri_triggers.c:2120 utils/adt/ri_triggers.c:2225 -#: utils/adt/ri_triggers.c:2402 gram.y:3343 +#: utils/adt/ri_triggers.c:311 utils/adt/ri_triggers.c:368 +#: utils/adt/ri_triggers.c:787 utils/adt/ri_triggers.c:1010 +#: utils/adt/ri_triggers.c:1166 utils/adt/ri_triggers.c:1347 +#: utils/adt/ri_triggers.c:1512 utils/adt/ri_triggers.c:1688 +#: utils/adt/ri_triggers.c:1868 utils/adt/ri_triggers.c:2059 +#: utils/adt/ri_triggers.c:2117 utils/adt/ri_triggers.c:2222 +#: utils/adt/ri_triggers.c:2399 gram.y:3656 #, c-format msgid "MATCH PARTIAL not yet implemented" msgstr "MATCH PARTIAL 기능은 아직 구현 안되었습니다" -#: utils/adt/ri_triggers.c:343 utils/adt/ri_triggers.c:2490 -#: utils/adt/ri_triggers.c:3315 +#: utils/adt/ri_triggers.c:340 utils/adt/ri_triggers.c:2487 +#: utils/adt/ri_triggers.c:3312 #, c-format msgid "insert or update on table \"%s\" violates foreign key constraint \"%s\"" msgstr "" "\"%s\" 테이블에서 자료 추가, 갱신 작업이 \"%s\" 참조키(foreign key) 제약 조건" "을 위배했습니다" -#: utils/adt/ri_triggers.c:346 utils/adt/ri_triggers.c:2493 +#: utils/adt/ri_triggers.c:343 utils/adt/ri_triggers.c:2490 #, c-format msgid "MATCH FULL does not allow mixing of null and nonnull key values." msgstr "MATCH FULL에 null 키 값과 nonnull 키 값을 함께 사용할 수 없습니다." -#: utils/adt/ri_triggers.c:2732 +#: utils/adt/ri_triggers.c:2729 #, c-format msgid "function \"%s\" must be fired for INSERT" msgstr "INSERT에 대해 \"%s\" 함수를 실행해야 함" -#: utils/adt/ri_triggers.c:2738 +#: utils/adt/ri_triggers.c:2735 #, c-format msgid "function \"%s\" must be fired for UPDATE" msgstr "UPDATE에 대해 \"%s\" 함수를 실행해야 함" -#: utils/adt/ri_triggers.c:2744 +#: utils/adt/ri_triggers.c:2741 #, c-format msgid "function \"%s\" must be fired for DELETE" msgstr "DELETE에 대해 \"%s\" 함수를 실행해야 함" -#: utils/adt/ri_triggers.c:2767 +#: utils/adt/ri_triggers.c:2764 #, c-format msgid "no pg_constraint entry for trigger \"%s\" on table \"%s\"" msgstr "\"%s\" 트리거(해당 테이블: \"%s\")에 대한 pg_constraint 항목이 없음" -#: utils/adt/ri_triggers.c:2769 +#: utils/adt/ri_triggers.c:2766 #, c-format msgid "" "Remove this referential integrity trigger and its mates, then do ALTER TABLE " @@ -20764,7 +22474,7 @@ msgstr "" "해당 트리거 관련 객체를 제거한 후 ALTER TABLE ADD CONSTRAINT 명령으로 추가하" "세요" -#: utils/adt/ri_triggers.c:3225 +#: utils/adt/ri_triggers.c:3222 #, c-format msgid "" "referential integrity query on \"%s\" from constraint \"%s\" on \"%s\" gave " @@ -20773,22 +22483,22 @@ msgstr "" "\"%s\"에 대한 참조 무결성 쿼리(제약조건: \"%s\", 해당 릴레이션: \"%s\")를 실" "행하면 예기치 않은 결과가 발생함" -#: utils/adt/ri_triggers.c:3229 +#: utils/adt/ri_triggers.c:3226 #, c-format msgid "This is most likely due to a rule having rewritten the query." msgstr "이 문제는 주로 룰이 재작성 되었을 때 발생합니다." -#: utils/adt/ri_triggers.c:3319 +#: utils/adt/ri_triggers.c:3316 #, c-format msgid "Key (%s)=(%s) is not present in table \"%s\"." msgstr "(%s)=(%s) 키가 \"%s\" 테이블에 없습니다." -#: utils/adt/ri_triggers.c:3322 +#: utils/adt/ri_triggers.c:3319 #, c-format msgid "Key is not present in table \"%s\"." msgstr "\"%s\" 테이블에 키가 없습니다." -#: utils/adt/ri_triggers.c:3328 +#: utils/adt/ri_triggers.c:3325 #, c-format msgid "" "update or delete on table \"%s\" violates foreign key constraint \"%s\" on " @@ -20797,12 +22507,12 @@ msgstr "" "\"%s\" 테이블의 자료 갱신, 삭제 작업이 \"%s\" 참조키(foreign key) 제약 조건 " "- \"%s\" 테이블 - 을 위반했습니다" -#: utils/adt/ri_triggers.c:3333 +#: utils/adt/ri_triggers.c:3330 #, c-format msgid "Key (%s)=(%s) is still referenced from table \"%s\"." msgstr "(%s)=(%s) 키가 \"%s\" 테이블에서 여전히 참조됩니다." -#: utils/adt/ri_triggers.c:3336 +#: utils/adt/ri_triggers.c:3333 #, c-format msgid "Key is still referenced from table \"%s\"." msgstr "\"%s\" 테이블에서 키가 여전히 참조됩니다." @@ -20826,12 +22536,12 @@ msgstr "왼쪽 괄호가 필요합니다." #: utils/adt/rowtypes.c:184 #, c-format msgid "Too few columns." -msgstr "열 수가 너무 적다" +msgstr "칼럼이 너무 적습니다." #: utils/adt/rowtypes.c:267 #, c-format msgid "Too many columns." -msgstr "열이 너무 많습니다." +msgstr "칼럼이 너무 많습니다." #: utils/adt/rowtypes.c:275 #, c-format @@ -20865,158 +22575,153 @@ msgstr "서로 다른 열 형식 %s과(와) %s(레코드 열 %d)을(를) 비교 msgid "cannot compare record types with different numbers of columns" msgstr "칼럼 수가 서로 다른 레코드 자료형을 비교할 수 없음" -#: utils/adt/ruleutils.c:4289 +#: utils/adt/ruleutils.c:4668 #, c-format msgid "rule \"%s\" has unsupported event type %d" msgstr "\"%s\" 룰은 %d 이벤트 형태를 지원하지 않습니다" -#: utils/adt/selfuncs.c:5318 +#: utils/adt/selfuncs.c:5547 #, c-format msgid "case insensitive matching not supported on type bytea" msgstr "bytea 형식에서는 대/소문자를 구분하지 않는 일치가 지원되지 않음" -#: utils/adt/selfuncs.c:5421 +#: utils/adt/selfuncs.c:5649 #, c-format msgid "regular-expression matching not supported on type bytea" msgstr "bytea 형식에서는 정규식 일치가 지원되지 않음" -#: utils/adt/tid.c:71 utils/adt/tid.c:79 utils/adt/tid.c:87 -#, c-format -msgid "invalid input syntax for type tid: \"%s\"" -msgstr "tid 형식의 입력 구문이 잘못됨: \"%s\"" - -#: utils/adt/timestamp.c:99 +#: utils/adt/timestamp.c:107 #, c-format msgid "TIMESTAMP(%d)%s precision must not be negative" msgstr "TIMESTAMP(%d)%s 정밀도로 음수를 사용할 수 없습니다" -#: utils/adt/timestamp.c:105 +#: utils/adt/timestamp.c:113 #, c-format msgid "TIMESTAMP(%d)%s precision reduced to maximum allowed, %d" msgstr "TIMESTAMP(%d)%s 정밀도는 최대값(%d)으로 줄였습니다" -#: utils/adt/timestamp.c:170 utils/adt/timestamp.c:445 +#: utils/adt/timestamp.c:176 utils/adt/timestamp.c:416 #, c-format msgid "timestamp out of range: \"%s\"" msgstr "타임스탬프 값이 범위를 벗어났음: \"%s\"" -#: utils/adt/timestamp.c:188 utils/adt/timestamp.c:463 -#: utils/adt/timestamp.c:990 +#: utils/adt/timestamp.c:194 utils/adt/timestamp.c:434 +#: utils/adt/timestamp.c:941 #, c-format msgid "date/time value \"%s\" is no longer supported" -msgstr "날짜/시간 값 \"%s\"은(는) 더 이상 지원되지 않음" - -#: utils/adt/timestamp.c:258 utils/adt/timestamp.c:754 -#, c-format -msgid "timestamp cannot be NaN" -msgstr "타임스탬프 값으로 NaN 값을 지정할 수 없음" +msgstr "날짜/시간 값 \"%s\"은(는) 더 이상 지원되지 않음" -#: utils/adt/timestamp.c:380 +#: utils/adt/timestamp.c:362 #, c-format msgid "timestamp(%d) precision must be between %d and %d" msgstr "타임스탬프(%d) 정밀도는 %d에서 %d 사이여야 함" -#: utils/adt/timestamp.c:513 +#: utils/adt/timestamp.c:484 #, c-format msgid "invalid input syntax for numeric time zone: \"%s\"" msgstr "숫자형 타임 존 입력에 문법 오류가 있음: \"%s\"" -#: utils/adt/timestamp.c:515 +#: utils/adt/timestamp.c:486 #, c-format msgid "Numeric time zones must have \"-\" or \"+\" as first character." msgstr "숫자형 타임 존 형식은 처음에 \"-\" 또는 \"+\" 문자가 있어야 합니다." -#: utils/adt/timestamp.c:528 +#: utils/adt/timestamp.c:499 #, c-format msgid "numeric time zone \"%s\" out of range" msgstr "\"%s\" 숫자형 타임 존 범위 벗어남" -#: utils/adt/timestamp.c:631 utils/adt/timestamp.c:641 -#: utils/adt/timestamp.c:653 +#: utils/adt/timestamp.c:601 utils/adt/timestamp.c:611 +#: utils/adt/timestamp.c:619 #, c-format msgid "timestamp out of range: %d-%02d-%02d %d:%02d:%02g" msgstr "타임스탬프 값이 범위를 벗어났음: %d-%02d-%02d %d:%02d:%02g" -#: utils/adt/timestamp.c:772 utils/adt/timestamp.c:788 +#: utils/adt/timestamp.c:720 +#, c-format +msgid "timestamp cannot be NaN" +msgstr "타임스탬프 값으로 NaN 값을 지정할 수 없음" + +#: utils/adt/timestamp.c:738 utils/adt/timestamp.c:750 #, c-format msgid "timestamp out of range: \"%g\"" msgstr "타임스탬프 값이 범위를 벗어났음: \"%g\"" -#: utils/adt/timestamp.c:984 utils/adt/timestamp.c:1608 -#: utils/adt/timestamp.c:2121 utils/adt/timestamp.c:3273 -#: utils/adt/timestamp.c:3278 utils/adt/timestamp.c:3283 -#: utils/adt/timestamp.c:3333 utils/adt/timestamp.c:3340 -#: utils/adt/timestamp.c:3347 utils/adt/timestamp.c:3367 -#: utils/adt/timestamp.c:3374 utils/adt/timestamp.c:3381 -#: utils/adt/timestamp.c:3411 utils/adt/timestamp.c:3419 -#: utils/adt/timestamp.c:3464 utils/adt/timestamp.c:3804 -#: utils/adt/timestamp.c:3933 utils/adt/timestamp.c:4324 +#: utils/adt/timestamp.c:935 utils/adt/timestamp.c:1505 +#: utils/adt/timestamp.c:1918 utils/adt/timestamp.c:2994 +#: utils/adt/timestamp.c:2999 utils/adt/timestamp.c:3004 +#: utils/adt/timestamp.c:3054 utils/adt/timestamp.c:3061 +#: utils/adt/timestamp.c:3068 utils/adt/timestamp.c:3088 +#: utils/adt/timestamp.c:3095 utils/adt/timestamp.c:3102 +#: utils/adt/timestamp.c:3132 utils/adt/timestamp.c:3140 +#: utils/adt/timestamp.c:3184 utils/adt/timestamp.c:3507 +#: utils/adt/timestamp.c:3632 utils/adt/timestamp.c:4000 #, c-format msgid "interval out of range" msgstr "간격이 범위를 벗어남" -#: utils/adt/timestamp.c:1125 utils/adt/timestamp.c:1158 +#: utils/adt/timestamp.c:1068 utils/adt/timestamp.c:1101 #, c-format msgid "invalid INTERVAL type modifier" msgstr "잘못된 INTERVAL 형식 한정자" -#: utils/adt/timestamp.c:1141 +#: utils/adt/timestamp.c:1084 #, c-format msgid "INTERVAL(%d) precision must not be negative" msgstr "INTERVAL(%d) 정밀도로 음수값이 올 수 없습니다" -#: utils/adt/timestamp.c:1147 +#: utils/adt/timestamp.c:1090 #, c-format msgid "INTERVAL(%d) precision reduced to maximum allowed, %d" msgstr "INTERVAL(%d) 정밀도는 허용 최대치(%d)로 감소 되었습니다" -#: utils/adt/timestamp.c:1552 +#: utils/adt/timestamp.c:1462 #, c-format msgid "interval(%d) precision must be between %d and %d" msgstr "간격(%d) 정밀도는 %d에서 %d 사이여야 함" -#: utils/adt/timestamp.c:2850 +#: utils/adt/timestamp.c:2595 #, c-format msgid "cannot subtract infinite timestamps" msgstr "타임스탬프 무한값을 추출 할 수 없음" -#: utils/adt/timestamp.c:4059 utils/adt/timestamp.c:4584 -#: utils/adt/timestamp.c:4768 utils/adt/timestamp.c:4793 +#: utils/adt/timestamp.c:3751 utils/adt/timestamp.c:4260 +#: utils/adt/timestamp.c:4427 utils/adt/timestamp.c:4448 #, c-format msgid "timestamp units \"%s\" not supported" msgstr "\"%s\" timestamp 유닛은 지원하지 않습니다" -#: utils/adt/timestamp.c:4073 utils/adt/timestamp.c:4538 -#: utils/adt/timestamp.c:4803 +#: utils/adt/timestamp.c:3765 utils/adt/timestamp.c:4214 +#: utils/adt/timestamp.c:4458 #, c-format msgid "timestamp units \"%s\" not recognized" msgstr "\"%s\" timestamp 유닛을 처리하지 못했습니다" -#: utils/adt/timestamp.c:4213 utils/adt/timestamp.c:4579 -#: utils/adt/timestamp.c:4990 utils/adt/timestamp.c:5016 +#: utils/adt/timestamp.c:3897 utils/adt/timestamp.c:4255 +#: utils/adt/timestamp.c:4628 utils/adt/timestamp.c:4650 #, c-format msgid "timestamp with time zone units \"%s\" not supported" msgstr "\"%s\" 시간대 유닛이 있는 timestamp 자료형은 지원하지 않습니다" -#: utils/adt/timestamp.c:4230 utils/adt/timestamp.c:4533 -#: utils/adt/timestamp.c:5025 +#: utils/adt/timestamp.c:3914 utils/adt/timestamp.c:4209 +#: utils/adt/timestamp.c:4659 #, c-format msgid "timestamp with time zone units \"%s\" not recognized" msgstr "\"%s\" 시간대 유닛이 있는 timestamp 값을 처리하지 못했습니다" -#: utils/adt/timestamp.c:4311 +#: utils/adt/timestamp.c:3987 #, c-format msgid "" "interval units \"%s\" not supported because months usually have fractional " "weeks" msgstr "" -#: utils/adt/timestamp.c:4317 utils/adt/timestamp.c:5131 +#: utils/adt/timestamp.c:3993 utils/adt/timestamp.c:4753 #, c-format msgid "interval units \"%s\" not supported" msgstr "\"%s\" 유닛 간격(interval units)은 지원하지 않습니다" -#: utils/adt/timestamp.c:4333 utils/adt/timestamp.c:5158 +#: utils/adt/timestamp.c:4009 utils/adt/timestamp.c:4776 #, c-format msgid "interval units \"%s\" not recognized" msgstr "\"%s\" 유닛 간격(interval units)을 처리하지 못했습니다" @@ -21041,7 +22746,7 @@ msgstr "suppress_redundant_updates_trigger: 업데이트 전에 호출되어야 msgid "suppress_redundant_updates_trigger: must be called for each row" msgstr "suppress_redundant_updates_trigger: 각 행에 대해 호출되어야 함" -#: utils/adt/tsgistidx.c:99 +#: utils/adt/tsgistidx.c:100 #, c-format msgid "gtsvector_in not implemented" msgstr "gtsvector_in이 구현되어 있지 않음" @@ -21096,7 +22801,7 @@ msgstr "" "텍스트 검색 쿼리에 중지 단어만 포함되어 있거나 어휘소가 포함되어 있지 않음, " "무시됨" -#: utils/adt/tsquery_op.c:122 +#: utils/adt/tsquery_op.c:123 #, c-format msgid "distance in phrase operator should be non-negative and less than %d" msgstr "분석 작업에서 사용한 거리값은 %d 보다 작고 양수값만 사용할 수 있습니다" @@ -21104,91 +22809,91 @@ msgstr "분석 작업에서 사용한 거리값은 %d 보다 작고 양수값만 #: utils/adt/tsquery_rewrite.c:321 #, c-format msgid "ts_rewrite query must return two tsquery columns" -msgstr "ts_rewrite 쿼리는 두 개의 tsquery 열을 반환해야 함" +msgstr "ts_rewrite 쿼리는 두 개의 tsquery 칼럼을 반환해야 함" -#: utils/adt/tsrank.c:412 +#: utils/adt/tsrank.c:413 #, c-format msgid "array of weight must be one-dimensional" msgstr "가중치 배열은 일차원 배열이어야 함" -#: utils/adt/tsrank.c:417 +#: utils/adt/tsrank.c:418 #, c-format msgid "array of weight is too short" msgstr "가중치 배열이 너무 짧음" -#: utils/adt/tsrank.c:422 +#: utils/adt/tsrank.c:423 #, c-format msgid "array of weight must not contain nulls" msgstr "가중치 배열에는 null이 포함되지 않아야 함" -#: utils/adt/tsrank.c:431 utils/adt/tsrank.c:868 +#: utils/adt/tsrank.c:432 utils/adt/tsrank.c:869 #, c-format msgid "weight out of range" msgstr "가중치가 범위를 벗어남" -#: utils/adt/tsvector.c:213 +#: utils/adt/tsvector.c:214 #, c-format msgid "word is too long (%ld bytes, max %ld bytes)" msgstr "단어가 너무 긺(%ld바이트, 최대 %ld바이트)" -#: utils/adt/tsvector.c:220 +#: utils/adt/tsvector.c:221 #, c-format msgid "string is too long for tsvector (%ld bytes, max %ld bytes)" msgstr "" "문자열이 너무 길어서 tsvector에 사용할 수 없음(%ld바이트, 최대 %ld바이트)" -#: utils/adt/tsvector_op.c:322 utils/adt/tsvector_op.c:609 -#: utils/adt/tsvector_op.c:777 +#: utils/adt/tsvector_op.c:323 utils/adt/tsvector_op.c:610 +#: utils/adt/tsvector_op.c:778 #, c-format msgid "lexeme array may not contain nulls" msgstr "어휘소 배열에는 null이 포함되지 않아야 함" -#: utils/adt/tsvector_op.c:852 +#: utils/adt/tsvector_op.c:853 #, c-format msgid "weight array may not contain nulls" msgstr "가중치 배열에는 null이 포함되지 않아야 함" -#: utils/adt/tsvector_op.c:876 +#: utils/adt/tsvector_op.c:877 #, c-format msgid "unrecognized weight: \"%c\"" msgstr "알 수 없는 가중치: \"%c\"" -#: utils/adt/tsvector_op.c:2313 +#: utils/adt/tsvector_op.c:2314 #, c-format msgid "ts_stat query must return one tsvector column" msgstr "ts_stat 쿼리는 하나의 tsvector 칼럼을 반환해야 함" -#: utils/adt/tsvector_op.c:2495 +#: utils/adt/tsvector_op.c:2496 #, c-format msgid "tsvector column \"%s\" does not exist" msgstr "\"%s\" tsvector 칼럼이 없음" -#: utils/adt/tsvector_op.c:2501 +#: utils/adt/tsvector_op.c:2503 #, c-format msgid "column \"%s\" is not of tsvector type" msgstr "\"%s\" 칼럼은 tsvector 형식이 아님" -#: utils/adt/tsvector_op.c:2513 +#: utils/adt/tsvector_op.c:2515 #, c-format msgid "configuration column \"%s\" does not exist" msgstr "\"%s\" 구성 칼럼이 없음" -#: utils/adt/tsvector_op.c:2519 +#: utils/adt/tsvector_op.c:2521 #, c-format msgid "column \"%s\" is not of regconfig type" msgstr "\"%s\" 칼럼은 regconfig 형이 아님" -#: utils/adt/tsvector_op.c:2526 +#: utils/adt/tsvector_op.c:2528 #, c-format msgid "configuration column \"%s\" must not be null" msgstr "\"%s\" 구성 칼럼은 null이 아니어야 함" -#: utils/adt/tsvector_op.c:2539 +#: utils/adt/tsvector_op.c:2541 #, c-format msgid "text search configuration name \"%s\" must be schema-qualified" msgstr "\"%s\" 텍스트 검색 구성 이름이 스키마로 한정되어야 함" -#: utils/adt/tsvector_op.c:2564 +#: utils/adt/tsvector_op.c:2566 #, c-format msgid "column \"%s\" is not of a character type" msgstr "\"%s\" 칼럼은 문자형이 아님" @@ -21208,65 +22913,60 @@ msgstr "이스케이프 문자가 없음: \"%s\"" msgid "wrong position info in tsvector: \"%s\"" msgstr "tsvector에 잘못된 위치 정보가 있음: \"%s\"" -#: utils/adt/txid.c:339 +#: utils/adt/txid.c:135 #, c-format -msgid "invalid input syntax for type txid_snapshot: \"%s\"" -msgstr "txid_snapshot 형의 입력 구문이 잘못됨: \"%s\"" +msgid "transaction ID %s is in the future" +msgstr "%s 트랜잭션 ID는 미래의 것입니다" -#: utils/adt/txid.c:534 +#: utils/adt/txid.c:624 #, c-format msgid "invalid external txid_snapshot data" msgstr "외부 txid_snapshot 값이 잘못됨" -#: utils/adt/uuid.c:145 -#, c-format -msgid "invalid input syntax for uuid: \"%s\"" -msgstr "uuid의 입력 구문이 잘못됨: \"%s\"" - -#: utils/adt/varbit.c:57 utils/adt/varchar.c:50 +#: utils/adt/varbit.c:58 utils/adt/varchar.c:51 #, c-format msgid "length for type %s must be at least 1" msgstr "%s 자료형의 길이는 최소 1 이상이어야합니다" -#: utils/adt/varbit.c:62 utils/adt/varchar.c:54 +#: utils/adt/varbit.c:63 utils/adt/varchar.c:55 #, c-format msgid "length for type %s cannot exceed %d" msgstr "%s 자료형의 길이는 최대 %d 이하여야합니다" -#: utils/adt/varbit.c:163 utils/adt/varbit.c:475 utils/adt/varbit.c:973 +#: utils/adt/varbit.c:164 utils/adt/varbit.c:476 utils/adt/varbit.c:973 #, c-format msgid "bit string length exceeds the maximum allowed (%d)" msgstr "비트 문자열 길이가 최대치 (%d)를 초과했습니다" -#: utils/adt/varbit.c:177 utils/adt/varbit.c:320 utils/adt/varbit.c:377 +#: utils/adt/varbit.c:178 utils/adt/varbit.c:321 utils/adt/varbit.c:378 #, c-format msgid "bit string length %d does not match type bit(%d)" msgstr "" "길이가 %d인 비트 문자열 자료는 bit(%d) 자료형의 길이와 일치하지 않습니다" -#: utils/adt/varbit.c:199 utils/adt/varbit.c:511 +#: utils/adt/varbit.c:200 utils/adt/varbit.c:512 #, c-format msgid "\"%c\" is not a valid binary digit" msgstr "\"%c\" 문자는 2진수 문자가 아닙니다" -#: utils/adt/varbit.c:224 utils/adt/varbit.c:536 +#: utils/adt/varbit.c:225 utils/adt/varbit.c:537 #, c-format msgid "\"%c\" is not a valid hexadecimal digit" msgstr "\"%c\" 문자는 16진수 문자가 아닙니다" -#: utils/adt/varbit.c:311 utils/adt/varbit.c:627 +#: utils/adt/varbit.c:312 utils/adt/varbit.c:628 #, c-format msgid "invalid length in external bit string" msgstr "외부 비트 문자열의 길이가 잘못되었습니다" -#: utils/adt/varbit.c:489 utils/adt/varbit.c:636 utils/adt/varbit.c:731 +#: utils/adt/varbit.c:490 utils/adt/varbit.c:637 utils/adt/varbit.c:731 #, c-format msgid "bit string too long for type bit varying(%d)" msgstr "비트 문자열이 너무 깁니다(해당 자료형 bit varying(%d))" -#: utils/adt/varbit.c:1066 utils/adt/varbit.c:1168 utils/adt/varlena.c:842 -#: utils/adt/varlena.c:906 utils/adt/varlena.c:1050 utils/adt/varlena.c:2735 -#: utils/adt/varlena.c:2802 +#: utils/adt/varbit.c:1066 utils/adt/varbit.c:1168 utils/adt/varlena.c:841 +#: utils/adt/varlena.c:905 utils/adt/varlena.c:1049 utils/adt/varlena.c:2881 +#: utils/adt/varlena.c:2948 #, c-format msgid "negative substring length not allowed" msgstr "substring에서 음수 길이는 허용하지 않음" @@ -21291,84 +22991,94 @@ msgstr "서로 크기가 틀린 비트 문자열은 XOR 연산을 할 수 없습 msgid "bit index %d out of valid range (0..%d)" msgstr "비트 %d 인덱스의 범위를 벗어남 (0..%d)" -#: utils/adt/varbit.c:1812 utils/adt/varlena.c:3002 +#: utils/adt/varbit.c:1812 utils/adt/varlena.c:3140 #, c-format msgid "new bit must be 0 or 1" msgstr "새 비트값은 0 또는 1 이어야합니다" -#: utils/adt/varchar.c:154 utils/adt/varchar.c:307 +#: utils/adt/varchar.c:155 utils/adt/varchar.c:308 #, c-format msgid "value too long for type character(%d)" msgstr "character(%d) 자료형에 너무 긴 자료를 담으려고 합니다." -#: utils/adt/varchar.c:469 utils/adt/varchar.c:623 +#: utils/adt/varchar.c:470 utils/adt/varchar.c:623 #, c-format msgid "value too long for type character varying(%d)" msgstr "character varying(%d) 자료형에 너무 긴 자료를 담으려고 합니다." -#: utils/adt/varlena.c:1420 utils/adt/varlena.c:1825 +#: utils/adt/varlena.c:1416 utils/adt/varlena.c:1865 #, c-format msgid "could not determine which collation to use for string comparison" msgstr "문자열 비교 작업에 사용할 정렬규칙(collation)을 결정할 수 없음" -#: utils/adt/varlena.c:1478 utils/adt/varlena.c:1491 +#: utils/adt/varlena.c:1473 utils/adt/varlena.c:1486 #, c-format msgid "could not convert string to UTF-16: error code %lu" msgstr "UTF-16 인코딩으로 문자열을 변환할 수 없음: 오류번호 %lu" -#: utils/adt/varlena.c:1506 +#: utils/adt/varlena.c:1501 #, c-format msgid "could not compare Unicode strings: %m" msgstr "유니코드 문자열 비교 실패: %m" -#: utils/adt/varlena.c:2880 utils/adt/varlena.c:2911 utils/adt/varlena.c:2947 -#: utils/adt/varlena.c:2990 +#: utils/adt/varlena.c:1556 utils/adt/varlena.c:2145 +#, c-format +msgid "collation failed: %s" +msgstr "문자열 정렬: %s" + +#: utils/adt/varlena.c:2363 +#, c-format +msgid "sort key generation failed: %s" +msgstr "정렬 키 생성 실패: %s" + +#: utils/adt/varlena.c:3026 utils/adt/varlena.c:3057 utils/adt/varlena.c:3092 +#: utils/adt/varlena.c:3128 #, c-format msgid "index %d out of valid range, 0..%d" msgstr "%d 인덱스의 범위를 벗어남, 0..%d" -#: utils/adt/varlena.c:3925 +#: utils/adt/varlena.c:4059 #, c-format msgid "field position must be greater than zero" msgstr "필드 위치 값은 0 보다 커야합니다" -#: utils/adt/varlena.c:4804 +#: utils/adt/varlena.c:4949 #, c-format msgid "unterminated format() type specifier" msgstr "마무리 안된 format() 형 식별자" -#: utils/adt/varlena.c:4805 utils/adt/varlena.c:4939 utils/adt/varlena.c:5060 +#: utils/adt/varlena.c:4950 utils/adt/varlena.c:5084 utils/adt/varlena.c:5205 #, c-format msgid "For a single \"%%\" use \"%%%%\"." msgstr "하나의 \"%%\" 문자를 표시하려면, \"%%%%\" 형태로 사용하세요" -#: utils/adt/varlena.c:4937 utils/adt/varlena.c:5058 +#: utils/adt/varlena.c:5082 utils/adt/varlena.c:5203 #, c-format msgid "unrecognized format() type specifier \"%c\"" msgstr "인식할 수 없는 format() 형 식별자 \"%c\"" -#: utils/adt/varlena.c:4950 utils/adt/varlena.c:5007 +#: utils/adt/varlena.c:5095 utils/adt/varlena.c:5152 #, c-format msgid "too few arguments for format()" msgstr "format() 작업을 위한 인자가 너무 적음" -#: utils/adt/varlena.c:5102 utils/adt/varlena.c:5285 +#: utils/adt/varlena.c:5247 utils/adt/varlena.c:5430 #, c-format msgid "number is out of range" msgstr "수치 범위를 벗어남" -#: utils/adt/varlena.c:5166 utils/adt/varlena.c:5194 +#: utils/adt/varlena.c:5311 utils/adt/varlena.c:5339 #, c-format msgid "format specifies argument 0, but arguments are numbered from 1" msgstr "" "format 함수에서 사용할 수 있는 인자 위치 번호는 0이 아니라, 1부터 시작합니다" -#: utils/adt/varlena.c:5187 +#: utils/adt/varlena.c:5332 #, c-format msgid "width argument position must be ended by \"$\"" msgstr "넓이 인자 위치값은 \"$\" 문자로 끝나야 합니다" -#: utils/adt/varlena.c:5232 +#: utils/adt/varlena.c:5377 #, c-format msgid "null values cannot be formatted as an SQL identifier" msgstr "null 값은 SQL 식별자로 포멧될 수 없음" @@ -21383,74 +23093,74 @@ msgstr "ntile의 인자는 0보다 커야 함" msgid "argument of nth_value must be greater than zero" msgstr "nth_value의 인자는 0보다 커야 함" -#: utils/adt/xml.c:171 +#: utils/adt/xml.c:220 #, c-format msgid "unsupported XML feature" msgstr "지원되지 않는 XML 기능" -#: utils/adt/xml.c:172 +#: utils/adt/xml.c:221 #, c-format msgid "This functionality requires the server to be built with libxml support." msgstr "이 기능을 사용하려면 libxml 지원으로 서버를 빌드해야 합니다." -#: utils/adt/xml.c:173 +#: utils/adt/xml.c:222 #, c-format msgid "You need to rebuild PostgreSQL using --with-libxml." msgstr "--with-libxml을 사용하여 PostgreSQL을 다시 빌드해야 합니다." -#: utils/adt/xml.c:192 utils/mb/mbutils.c:523 +#: utils/adt/xml.c:241 utils/mb/mbutils.c:523 #, c-format msgid "invalid encoding name \"%s\"" msgstr "\"%s\" 인코딩 이름이 잘못됨" -#: utils/adt/xml.c:435 utils/adt/xml.c:440 +#: utils/adt/xml.c:484 utils/adt/xml.c:489 #, c-format msgid "invalid XML comment" msgstr "잘못된 XML 주석" -#: utils/adt/xml.c:569 +#: utils/adt/xml.c:618 #, c-format msgid "not an XML document" msgstr "XML 문서가 아님" -#: utils/adt/xml.c:728 utils/adt/xml.c:751 +#: utils/adt/xml.c:777 utils/adt/xml.c:800 #, c-format msgid "invalid XML processing instruction" msgstr "잘못된 XML 처리 명령" -#: utils/adt/xml.c:729 +#: utils/adt/xml.c:778 #, c-format msgid "XML processing instruction target name cannot be \"%s\"." msgstr "XML 처리 명령 대상 이름은 \"%s\"일 수 없습니다." -#: utils/adt/xml.c:752 +#: utils/adt/xml.c:801 #, c-format msgid "XML processing instruction cannot contain \"?>\"." msgstr "XML 처리 명령에는 \"?>\"를 포함할 수 없습니다." -#: utils/adt/xml.c:831 +#: utils/adt/xml.c:880 #, c-format msgid "xmlvalidate is not implemented" msgstr "xmlvalidate가 구현되어 있지 않음" -#: utils/adt/xml.c:910 +#: utils/adt/xml.c:959 #, c-format msgid "could not initialize XML library" msgstr "XML 라이브러리를 초기화할 수 없음" -#: utils/adt/xml.c:911 +#: utils/adt/xml.c:960 #, c-format msgid "" "libxml2 has incompatible char type: sizeof(char)=%u, sizeof(xmlChar)=%u." msgstr "" "libxml2에 호환되지 않는 문자 자료형 있음: sizeof(char)=%u, sizeof(xmlChar)=%u" -#: utils/adt/xml.c:997 +#: utils/adt/xml.c:1046 #, c-format msgid "could not set up XML error handler" msgstr "XML 오류 핸들러를 설정할 수 없음" -#: utils/adt/xml.c:998 +#: utils/adt/xml.c:1047 #, c-format msgid "" "This probably indicates that the version of libxml2 being used is not " @@ -21459,161 +23169,181 @@ msgstr "" "이 문제는 PostgreSQL 서버를 만들 때 사용한 libxml2 헤더 파일이 호환성이 없는 " "것 같습니다." -#: utils/adt/xml.c:1735 +#: utils/adt/xml.c:1797 msgid "Invalid character value." msgstr "잘못된 문자 값입니다." -#: utils/adt/xml.c:1738 +#: utils/adt/xml.c:1800 msgid "Space required." msgstr "공간이 필요합니다." -#: utils/adt/xml.c:1741 +#: utils/adt/xml.c:1803 msgid "standalone accepts only 'yes' or 'no'." msgstr "독립 실행형은 'yes' 또는 'no'만 허용합니다." -#: utils/adt/xml.c:1744 +#: utils/adt/xml.c:1806 msgid "Malformed declaration: missing version." msgstr "선언 형식이 잘못됨: 버전이 누락되었습니다." -#: utils/adt/xml.c:1747 +#: utils/adt/xml.c:1809 msgid "Missing encoding in text declaration." msgstr "텍스트 선언에서 인코딩이 누락되었습니다." -#: utils/adt/xml.c:1750 +#: utils/adt/xml.c:1812 msgid "Parsing XML declaration: '?>' expected." msgstr "XML 선언 구문 분석 중: '?>'가 필요합니다." -#: utils/adt/xml.c:1753 +#: utils/adt/xml.c:1815 #, c-format msgid "Unrecognized libxml error code: %d." msgstr "인식할 수 없는 libxml 오류 코드: %d." -#: utils/adt/xml.c:2028 +#: utils/adt/xml.c:2090 #, c-format msgid "XML does not support infinite date values." msgstr "XML은 무한 날짜 값을 지원하지 않습니다." -#: utils/adt/xml.c:2050 utils/adt/xml.c:2077 +#: utils/adt/xml.c:2112 utils/adt/xml.c:2139 #, c-format msgid "XML does not support infinite timestamp values." msgstr "XML은 무한 타임스탬프 값을 지원하지 않습니다." -#: utils/adt/xml.c:2468 +#: utils/adt/xml.c:2551 #, c-format msgid "invalid query" msgstr "잘못된 쿼리" -#: utils/adt/xml.c:3793 +#: utils/adt/xml.c:3870 #, c-format msgid "invalid array for XML namespace mapping" msgstr "XML 네임스페이스 매핑에 사용할 배열이 잘못됨" -#: utils/adt/xml.c:3794 +#: utils/adt/xml.c:3871 #, c-format msgid "" "The array must be two-dimensional with length of the second axis equal to 2." msgstr "" "이 배열은 key, value로 구성된 배열을 요소로 하는 2차원 배열이어야 합니다." -#: utils/adt/xml.c:3818 +#: utils/adt/xml.c:3895 #, c-format msgid "empty XPath expression" msgstr "XPath 식이 비어 있음" -#: utils/adt/xml.c:3867 +#: utils/adt/xml.c:3939 #, c-format msgid "neither namespace name nor URI may be null" msgstr "네임스페이스 이름 및 URI는 null일 수 없음" -#: utils/adt/xml.c:3874 +#: utils/adt/xml.c:3946 #, c-format msgid "could not register XML namespace with name \"%s\" and URI \"%s\"" msgstr "" "이름 \"%s\" 및 URI \"%s\"을(를) 사용하여 XML 네임스페이스를 등록할 수 없음" +#: utils/adt/xml.c:4300 +#, c-format +msgid "DEFAULT namespace is not supported" +msgstr "DEFAULT 네임스페이스는 지원하지 않습니다." + +#: utils/adt/xml.c:4329 +#, c-format +msgid "row path filter must not be empty string" +msgstr "로우 경로 필터는 비어있으면 안됩니다" + +#: utils/adt/xml.c:4360 +#, c-format +msgid "column path filter must not be empty string" +msgstr "칼럼 경로 필터는 비어있으면 안됩니다" + +#: utils/adt/xml.c:4542 +#, c-format +msgid "more than one value returned by column XPath expression" +msgstr "칼럼 XPath 표현식에 사용된 결과가 하나 이상의 값을 사용합니다" + # # nonun 부분 end -#: utils/cache/lsyscache.c:2580 utils/cache/lsyscache.c:2613 -#: utils/cache/lsyscache.c:2646 utils/cache/lsyscache.c:2679 +#: utils/cache/lsyscache.c:2612 utils/cache/lsyscache.c:2645 +#: utils/cache/lsyscache.c:2678 utils/cache/lsyscache.c:2711 #, c-format msgid "type %s is only a shell" msgstr "%s 형식은 셸일 뿐임" -#: utils/cache/lsyscache.c:2585 +#: utils/cache/lsyscache.c:2617 #, c-format msgid "no input function available for type %s" msgstr "%s 자료형을 위한 입력 함수가 없습니다" -#: utils/cache/lsyscache.c:2618 +#: utils/cache/lsyscache.c:2650 #, c-format msgid "no output function available for type %s" msgstr "%s 자료형을 위한 출력 함수가 없습니다" -#: utils/cache/plancache.c:718 +#: utils/cache/plancache.c:722 #, c-format msgid "cached plan must not change result type" msgstr "캐시된 계획에서 결과 형식을 바꾸지 않아야 함" -#: utils/cache/relcache.c:5226 +#: utils/cache/relcache.c:5795 #, c-format msgid "could not create relation-cache initialization file \"%s\": %m" msgstr "\"%s\" 릴레이션-캐시 초기화 파일을 만들 수 없음: %m" -#: utils/cache/relcache.c:5228 +#: utils/cache/relcache.c:5797 #, c-format msgid "Continuing anyway, but there's something wrong." msgstr "어쨌든 계속하는데, 뭔가 잘못 된 것이 있습니다." -#: utils/cache/relcache.c:5502 +#: utils/cache/relcache.c:6067 #, c-format msgid "could not remove cache file \"%s\": %m" msgstr "\"%s\" 캐쉬 파일을 삭제할 수 없음: %m" -#: utils/cache/relmapper.c:508 +#: utils/cache/relmapper.c:509 #, c-format msgid "cannot PREPARE a transaction that modified relation mapping" msgstr "릴레이션 맵핑을 변경하는 트랜잭셜을 PREPARE할 수 없음" -#: utils/cache/relmapper.c:651 utils/cache/relmapper.c:751 +#: utils/cache/relmapper.c:652 utils/cache/relmapper.c:754 #, c-format msgid "could not open relation mapping file \"%s\": %m" msgstr "\"%s\" 릴레이션 맵핑 파일을 열 수 없음: %m" -#: utils/cache/relmapper.c:664 +#: utils/cache/relmapper.c:666 #, c-format msgid "could not read relation mapping file \"%s\": %m" msgstr "\"%s\" 릴레이션 맵핑 파일을 읽을 수 없음: %m" -#: utils/cache/relmapper.c:674 +#: utils/cache/relmapper.c:677 #, c-format msgid "relation mapping file \"%s\" contains invalid data" msgstr "\"%s\" 릴레이션 맵핑 파일에 잘못된 데이터가 있습니다" -#: utils/cache/relmapper.c:684 +#: utils/cache/relmapper.c:687 #, c-format msgid "relation mapping file \"%s\" contains incorrect checksum" msgstr "\"%s\" 릴레이션 맵핑 파일에 잘못된 checksum 값이 있음" -#: utils/cache/relmapper.c:784 +#: utils/cache/relmapper.c:788 #, c-format msgid "could not write to relation mapping file \"%s\": %m" msgstr "\"%s\" 릴레이션 맵핑 파일을 쓸 수 없습니다: %m" -#: utils/cache/relmapper.c:797 +#: utils/cache/relmapper.c:803 #, c-format msgid "could not fsync relation mapping file \"%s\": %m" msgstr "\"%s\" 릴레이션 맵핑 파일을 fsync 할 수 없음: %m" -#: utils/cache/relmapper.c:803 +#: utils/cache/relmapper.c:810 #, c-format msgid "could not close relation mapping file \"%s\": %m" msgstr "\"%s\" 릴레이션 맵핑 파일을 닫을 수 없음: %m" -#: utils/cache/typcache.c:1207 +#: utils/cache/typcache.c:1223 #, c-format msgid "type %s is not composite" msgstr "%s 자료형은 복합 자료형이 아닙니다" -#: utils/cache/typcache.c:1221 +#: utils/cache/typcache.c:1237 #, c-format msgid "record type has not been registered" msgstr "레코드 형식이 등록되지 않았음" @@ -21726,243 +23456,249 @@ msgstr "치명적오류" msgid "PANIC" msgstr "손상" -#: utils/fmgr/dfmgr.c:117 +#: utils/fmgr/dfmgr.c:121 #, c-format msgid "could not find function \"%s\" in file \"%s\"" msgstr "\"%s\" 함수를 \"%s\" 파일에서 찾을 수 없음" -#: utils/fmgr/dfmgr.c:196 utils/fmgr/dfmgr.c:405 utils/fmgr/dfmgr.c:453 +#: utils/fmgr/dfmgr.c:201 utils/fmgr/dfmgr.c:418 utils/fmgr/dfmgr.c:466 #, c-format msgid "could not access file \"%s\": %m" msgstr "\"%s\" 파일에 액세스할 수 없음: %m" -#: utils/fmgr/dfmgr.c:234 +#: utils/fmgr/dfmgr.c:239 #, c-format msgid "could not load library \"%s\": %s" msgstr "\"%s\" 라이브러리를 불러 올 수 없음: %s" -#: utils/fmgr/dfmgr.c:266 +#: utils/fmgr/dfmgr.c:271 #, c-format msgid "incompatible library \"%s\": missing magic block" msgstr "\"%s\" 라이브러리는 사용할 수 없습니다: magic black 없음" -#: utils/fmgr/dfmgr.c:268 +#: utils/fmgr/dfmgr.c:273 #, c-format msgid "Extension libraries are required to use the PG_MODULE_MAGIC macro." msgstr "확장 라이브러리를 만들 때, PG_MODULE_MAGIC 매크로를 사용해서 만드세요." -#: utils/fmgr/dfmgr.c:304 +#: utils/fmgr/dfmgr.c:319 #, c-format msgid "incompatible library \"%s\": version mismatch" msgstr "\"%s\" 라이브러리는 사용할 수 없습니다: 버전이 틀림" -#: utils/fmgr/dfmgr.c:306 +#: utils/fmgr/dfmgr.c:321 #, c-format -msgid "Server is version %d.%d, library is version %d.%d." -msgstr "서버 버전 = %d.%d, 라이브러리 버전 %d.%d." +msgid "Server is version %d, library is version %s." +msgstr "서버 버전 = %d, 라이브러리 버전 %s." -#: utils/fmgr/dfmgr.c:325 +#: utils/fmgr/dfmgr.c:338 #, c-format msgid "Server has FUNC_MAX_ARGS = %d, library has %d." msgstr "서버의 경우 FUNC_MAX_ARGS = %d인데 라이브러리에 %d이(가) 있습니다." -#: utils/fmgr/dfmgr.c:334 +#: utils/fmgr/dfmgr.c:347 #, c-format msgid "Server has INDEX_MAX_KEYS = %d, library has %d." msgstr "서버의 경우 INDEX_MAX_KEYS = %d인데 라이브러리에 %d이(가) 있습니다." -#: utils/fmgr/dfmgr.c:343 +#: utils/fmgr/dfmgr.c:356 #, c-format msgid "Server has NAMEDATALEN = %d, library has %d." msgstr "서버의 경우 NAMEDATALEN = %d인데 라이브러리에 %d이(가) 있습니다." -#: utils/fmgr/dfmgr.c:352 +#: utils/fmgr/dfmgr.c:365 #, c-format msgid "Server has FLOAT4PASSBYVAL = %s, library has %s." msgstr "서버의 경우 FLOAT4PASSBYVAL = %s인데 라이브러리에 %s이(가) 있습니다." -#: utils/fmgr/dfmgr.c:361 +#: utils/fmgr/dfmgr.c:374 #, c-format msgid "Server has FLOAT8PASSBYVAL = %s, library has %s." msgstr "서버의 경우 FLOAT8PASSBYVAL = %s인데 라이브러리에 %s이(가) 있습니다." -#: utils/fmgr/dfmgr.c:368 +#: utils/fmgr/dfmgr.c:381 msgid "Magic block has unexpected length or padding difference." msgstr "매직 블록에 예기치 않은 길이 또는 여백 차이가 있습니다." -#: utils/fmgr/dfmgr.c:371 +#: utils/fmgr/dfmgr.c:384 #, c-format msgid "incompatible library \"%s\": magic block mismatch" msgstr "\"%s\" 라이브러리는 사용할 수 없습니다: magic black 틀림" -#: utils/fmgr/dfmgr.c:535 +#: utils/fmgr/dfmgr.c:548 #, c-format msgid "access to library \"%s\" is not allowed" msgstr "\"%s\" 라이브러리 사용이 금지되어있습니다" -#: utils/fmgr/dfmgr.c:561 +#: utils/fmgr/dfmgr.c:574 #, c-format msgid "invalid macro name in dynamic library path: %s" msgstr "동적 라이브러리 경로에서 잘못된 매크로 이름: %s" -#: utils/fmgr/dfmgr.c:601 +#: utils/fmgr/dfmgr.c:614 #, c-format msgid "zero-length component in parameter \"dynamic_library_path\"" msgstr "\"dynamic_library_path\" 매개 변수 값으로 길이가 0인 값을 사용했음" -#: utils/fmgr/dfmgr.c:620 +#: utils/fmgr/dfmgr.c:633 #, c-format msgid "component in parameter \"dynamic_library_path\" is not an absolute path" msgstr "\"dynamic_library_path\" 매개 변수 값으로 절대 경로를 사용할 수 없음" -#: utils/fmgr/fmgr.c:272 +#: utils/fmgr/fmgr.c:239 #, c-format msgid "internal function \"%s\" is not in internal lookup table" msgstr "\"%s\" 내부 함수를 내부 검색 테이블에서 찾을 수 없습니다" -#: utils/fmgr/fmgr.c:479 +#: utils/fmgr/fmgr.c:399 #, c-format -msgid "unrecognized API version %d reported by info function \"%s\"" -msgstr "_^_ %d 알수 없는 API 버전이 \"%s\" 함수에 의해서 보고되었음" +msgid "could not find function information for function \"%s\"" +msgstr "\"%s\" 함수의 함수 정보를 찾을 수 없음" + +#: utils/fmgr/fmgr.c:401 +#, c-format +msgid "" +"SQL-callable functions need an accompanying PG_FUNCTION_INFO_V1(funcname)." +msgstr "" -#: utils/fmgr/fmgr.c:849 utils/fmgr/fmgr.c:2106 +#: utils/fmgr/fmgr.c:419 #, c-format -msgid "function %u has too many arguments (%d, maximum is %d)" -msgstr "%u 함수는 너무 많은 인자를 사용하고 있음 (%d, 최대 %d)" +msgid "unrecognized API version %d reported by info function \"%s\"" +msgstr "_^_ %d 알수 없는 API 버전이 \"%s\" 함수에 의해서 보고되었음" -#: utils/fmgr/fmgr.c:2527 +#: utils/fmgr/fmgr.c:2132 #, c-format msgid "language validation function %u called for language %u instead of %u" msgstr "" "%u OID 언어 유효성 검사 함수가 %u OID 프로시져 언어용으로 호출되었음, 원래 언" "어는 %u" -#: utils/fmgr/funcapi.c:353 +#: utils/fmgr/funcapi.c:354 #, c-format msgid "" "could not determine actual result type for function \"%s\" declared to " "return type %s" msgstr "\"%s\" 함수의 실재 리턴 자료형을 알 수 없음, 정의된 리턴 자료형: %s" -#: utils/fmgr/funcapi.c:1340 utils/fmgr/funcapi.c:1371 +#: utils/fmgr/funcapi.c:1341 utils/fmgr/funcapi.c:1372 #, c-format msgid "number of aliases does not match number of columns" msgstr "alias 수가 열 수와 틀립니다" -#: utils/fmgr/funcapi.c:1365 +#: utils/fmgr/funcapi.c:1366 #, c-format msgid "no column alias was provided" msgstr "열 별칭이 제공되지 않았음" -#: utils/fmgr/funcapi.c:1389 +#: utils/fmgr/funcapi.c:1390 #, c-format msgid "could not determine row description for function returning record" msgstr "레코드를 리턴하는 함수를 위한 행(row) 구성 정보를 구할 수 없음" -#: utils/init/miscinit.c:121 +#: utils/init/miscinit.c:123 #, c-format msgid "could not change directory to \"%s\": %m" msgstr "\"%s\" 이름의 디렉터리로 이동할 수 없습니다: %m" -#: utils/init/miscinit.c:449 utils/misc/guc.c:6016 +#: utils/init/miscinit.c:451 utils/misc/guc.c:6126 #, c-format msgid "cannot set parameter \"%s\" within security-restricted operation" msgstr "보안 제한 작업 내에서 \"%s\" 매개 변수를 설정할 수 없음" -#: utils/init/miscinit.c:510 +#: utils/init/miscinit.c:512 #, c-format msgid "role with OID %u does not exist" msgstr "%u OID 롤이 없음" -#: utils/init/miscinit.c:540 +#: utils/init/miscinit.c:542 #, c-format msgid "role \"%s\" is not permitted to log in" msgstr "\"%s\" 롤은 접속을 허용하지 않음" -#: utils/init/miscinit.c:558 +#: utils/init/miscinit.c:560 #, c-format msgid "too many connections for role \"%s\"" msgstr "\"%s\" 롤의 최대 동시 접속수를 초과했습니다" -#: utils/init/miscinit.c:618 +#: utils/init/miscinit.c:620 #, c-format msgid "permission denied to set session authorization" msgstr "세션 인증을 지정하기 위한 권한이 없음" -#: utils/init/miscinit.c:701 +#: utils/init/miscinit.c:703 #, c-format msgid "invalid role OID: %u" msgstr "잘못된 롤 OID: %u" -#: utils/init/miscinit.c:755 +#: utils/init/miscinit.c:757 #, c-format msgid "database system is shut down" msgstr "데이터베이스 시스템 서비스를 중지했습니다" -#: utils/init/miscinit.c:842 +#: utils/init/miscinit.c:844 #, c-format msgid "could not create lock file \"%s\": %m" msgstr "\"%s\" 잠금 파일을 만들 수 없음: %m" -#: utils/init/miscinit.c:856 +#: utils/init/miscinit.c:858 #, c-format msgid "could not open lock file \"%s\": %m" msgstr "\"%s\" 잠금파일을 열 수 없음: %m" -#: utils/init/miscinit.c:862 +#: utils/init/miscinit.c:865 #, c-format msgid "could not read lock file \"%s\": %m" msgstr "\"%s\" 잠금 파일을 읽을 수 없음: %m" -#: utils/init/miscinit.c:870 +#: utils/init/miscinit.c:874 #, c-format msgid "lock file \"%s\" is empty" msgstr "\"%s\" 잠금 파일이 비었음" -#: utils/init/miscinit.c:871 +#: utils/init/miscinit.c:875 #, c-format msgid "" "Either another server is starting, or the lock file is the remnant of a " "previous server startup crash." msgstr "" -#: utils/init/miscinit.c:918 +#: utils/init/miscinit.c:922 #, c-format msgid "lock file \"%s\" already exists" msgstr "\"%s\" 잠금 파일이 이미 있음" -#: utils/init/miscinit.c:922 +#: utils/init/miscinit.c:926 #, c-format msgid "Is another postgres (PID %d) running in data directory \"%s\"?" msgstr "" "다른 postgres 프로그램(PID %d)이 \"%s\" 데이터 디렉터리를 사용해서 실행중입니" "까?" -#: utils/init/miscinit.c:924 +#: utils/init/miscinit.c:928 #, c-format msgid "Is another postmaster (PID %d) running in data directory \"%s\"?" msgstr "" "다른 postmaster 프로그램(PID %d)이 \"%s\" 데이터 디렉터리를 사용해서 실행중입" "니까?" -#: utils/init/miscinit.c:927 +#: utils/init/miscinit.c:931 #, c-format msgid "Is another postgres (PID %d) using socket file \"%s\"?" msgstr "" "다른 postgres 프로그램(PID %d)이 \"%s\" 소켓 파일을 사용해서 실행중입니까?" -#: utils/init/miscinit.c:929 +#: utils/init/miscinit.c:933 #, c-format msgid "Is another postmaster (PID %d) using socket file \"%s\"?" msgstr "" "다른 postmaster 프로그램(PID %d)이 \"%s\" 소켓 파일을 사용해서 실행중입니까?" -#: utils/init/miscinit.c:965 +#: utils/init/miscinit.c:969 #, c-format msgid "pre-existing shared memory block (key %lu, ID %lu) is still in use" msgstr "미리 확보된 공유 메모리 영역 (%lu 키, %lu ID)이 여전히 사용중입니다" -#: utils/init/miscinit.c:968 +#: utils/init/miscinit.c:972 #, c-format msgid "" "If you're sure there are no old server processes still running, remove the " @@ -21971,12 +23707,12 @@ msgstr "" "확실하게 공유 메모리를 사용하는 다른 프로세스가 없다고 판단되면, 공유 메모리 " "영역을 삭제하거나 \"%s\" 파일을 지우십시오." -#: utils/init/miscinit.c:984 +#: utils/init/miscinit.c:988 #, c-format msgid "could not remove old lock file \"%s\": %m" msgstr "\"%s\" 옛 잠금 파일을 삭제할 수 없음: %m" -#: utils/init/miscinit.c:986 +#: utils/init/miscinit.c:990 #, c-format msgid "" "The file seems accidentally left over, but it could not be removed. Please " @@ -21986,57 +23722,57 @@ msgstr "" "셸 명령을 이용해서 파일을 삭제 하고 다시 시도해 보십시오. - 내용 참 거시기 하" "네" -#: utils/init/miscinit.c:1022 utils/init/miscinit.c:1033 -#: utils/init/miscinit.c:1043 +#: utils/init/miscinit.c:1027 utils/init/miscinit.c:1041 +#: utils/init/miscinit.c:1052 #, c-format msgid "could not write lock file \"%s\": %m" msgstr "\"%s\" 잠금 파일에 쓸 수 없음: %m" -#: utils/init/miscinit.c:1172 utils/init/miscinit.c:1301 utils/misc/guc.c:8818 +#: utils/init/miscinit.c:1184 utils/init/miscinit.c:1327 utils/misc/guc.c:8931 #, c-format msgid "could not read from file \"%s\": %m" msgstr "\"%s\" 파일을 읽을 수 없음: %m" -#: utils/init/miscinit.c:1291 +#: utils/init/miscinit.c:1315 #, c-format msgid "could not open file \"%s\": %m; continuing anyway" msgstr "\"%s\" 파일을 열 수 없음: %m; 어째든 계속 진행함" -#: utils/init/miscinit.c:1314 +#: utils/init/miscinit.c:1340 #, c-format msgid "lock file \"%s\" contains wrong PID: %ld instead of %ld" msgstr "\"%s\" 잠금 파일에 있는 PID 값이 이상합니다: 현재값 %ld, 원래값 %ld" -#: utils/init/miscinit.c:1356 utils/init/miscinit.c:1369 +#: utils/init/miscinit.c:1379 utils/init/miscinit.c:1395 #, c-format msgid "\"%s\" is not a valid data directory" msgstr "\"%s\" 값은 바른 데이터디렉터리가 아닙니다" -#: utils/init/miscinit.c:1358 +#: utils/init/miscinit.c:1381 #, c-format msgid "File \"%s\" is missing." msgstr "\"%s\" 파일이 없습니다." -#: utils/init/miscinit.c:1371 +#: utils/init/miscinit.c:1397 #, c-format msgid "File \"%s\" does not contain valid data." msgstr "\"%s\" 파일에 잘못된 자료가 기록되어 있습니다." -#: utils/init/miscinit.c:1373 +#: utils/init/miscinit.c:1399 #, c-format msgid "You might need to initdb." msgstr "initdb 명령을 실행해 새 클러스터를 만들어야 할 수도 있습니다." -#: utils/init/miscinit.c:1381 +#: utils/init/miscinit.c:1407 #, c-format msgid "" -"The data directory was initialized by PostgreSQL version %ld.%ld, which is " -"not compatible with this version %s." +"The data directory was initialized by PostgreSQL version %s, which is not " +"compatible with this version %s." msgstr "" -"이 데이터 디렉터리는 PostgreSQL %ld.%ld 버전으로 초기화 되어있는데, 이 서버" -"의 %s 버전은 이 버전과 호환성이 없습니다." +"이 데이터 디렉터리는 PostgreSQL %s 버전으로 초기화 되어있는데, 이 서버의 %s " +"버전은 이 버전과 호환성이 없습니다." -#: utils/init/miscinit.c:1452 +#: utils/init/miscinit.c:1474 #, c-format msgid "loaded library \"%s\"" msgstr "\"%s\" 라이브러리 로드 완료" @@ -22136,83 +23872,88 @@ msgstr "" "setlocale()에서 인식할 수 없는 \"%s\" LC_CTYPE 값으로 데이터베이스가 초기화되" "었습니다." -#: utils/init/postinit.c:714 +#: utils/init/postinit.c:719 #, c-format msgid "no roles are defined in this database system" msgstr "이 데이터베이스에는 어떠한 롤 정의도 없습니다" -#: utils/init/postinit.c:715 +#: utils/init/postinit.c:720 #, c-format msgid "You should immediately run CREATE USER \"%s\" SUPERUSER;." msgstr "다음 명령을 먼저 실행하십시오: CREATE USER \"%s\" SUPERUSER;." -#: utils/init/postinit.c:751 +#: utils/init/postinit.c:756 #, c-format msgid "new replication connections are not allowed during database shutdown" msgstr "데이터베이스 중지 중에는 새로운 복제 연결을 할 수 없습니다." -#: utils/init/postinit.c:755 +#: utils/init/postinit.c:760 #, c-format msgid "must be superuser to connect during database shutdown" msgstr "슈퍼유저만 데이터베이스 종료 중에 연결할 수 있음" -#: utils/init/postinit.c:765 +#: utils/init/postinit.c:770 #, c-format msgid "must be superuser to connect in binary upgrade mode" msgstr "슈퍼유저만 바이너리 업그레이드 모드 중에 연결 할 수 있음" -#: utils/init/postinit.c:779 +#: utils/init/postinit.c:784 #, c-format msgid "" "remaining connection slots are reserved for non-replication superuser " "connections" msgstr "남은 연결 슬롯은 non-replication 슈퍼유저 연결용으로 남겨 놓았음" -#: utils/init/postinit.c:789 +#: utils/init/postinit.c:794 #, c-format msgid "must be superuser or replication role to start walsender" msgstr "" "superuser 또는 replication 권한을 가진 롤만 walsender 프로세스를 시작할 수 있" "음" -#: utils/init/postinit.c:858 +#: utils/init/postinit.c:863 #, c-format msgid "database %u does not exist" msgstr "%u 데이터베이스가 없음" -#: utils/init/postinit.c:944 +#: utils/init/postinit.c:952 #, c-format msgid "It seems to have just been dropped or renamed." msgstr "삭제되었거나 이름이 바뀐 것 같습니다." -#: utils/init/postinit.c:962 +#: utils/init/postinit.c:970 #, c-format msgid "The database subdirectory \"%s\" is missing." msgstr "데이터베이스 디렉터리에 \"%s\" 하위 디렉터리가 없습니다" -#: utils/init/postinit.c:967 +#: utils/init/postinit.c:975 #, c-format msgid "could not access directory \"%s\": %m" msgstr "\"%s\" 디렉터리를 액세스할 수 없습니다: %m" -#: utils/mb/conv.c:405 utils/mb/conv.c:591 +#: utils/mb/conv.c:488 utils/mb/conv.c:680 #, c-format msgid "invalid encoding number: %d" msgstr "잘못된 인코딩 번호: %d" -#: utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c:137 -#: utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c:169 +#: utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c:122 +#: utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c:154 #, c-format msgid "unexpected encoding ID %d for ISO 8859 character sets" msgstr "%d은(는) ISO 8859 문자 집합에 대한 예기치 않은 인코딩 ID임" -#: utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c:127 -#: utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c:159 +#: utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c:103 +#: utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c:135 #, c-format msgid "unexpected encoding ID %d for WIN character sets" msgstr "%d은(는) WIN 문자 집합에 대한 예기치 않은 인코딩 ID임" -#: utils/mb/encnames.c:496 +#: utils/mb/encnames.c:473 +#, c-format +msgid "encoding \"%s\" not supported by ICU" +msgstr "\"%s\" 인코딩은 ICU 기능을 지원하지 않음" + +#: utils/mb/encnames.c:572 #, c-format msgid "encoding name too long" msgstr "인코딩 이름이 너무 깁니다" @@ -22268,281 +24009,289 @@ msgstr "" "%s 바이트로 조합된 문자(인코딩: \"%s\")와 대응되는 문자 코드가 \"%s\" 인코딩" "에는 없습니다" -#: utils/misc/guc.c:548 +#: utils/misc/guc.c:570 msgid "Ungrouped" msgstr "소속그룹없음" -#: utils/misc/guc.c:550 +#: utils/misc/guc.c:572 msgid "File Locations" msgstr "파일 위치" -#: utils/misc/guc.c:552 +#: utils/misc/guc.c:574 msgid "Connections and Authentication" msgstr "연결과 인증" -#: utils/misc/guc.c:554 +#: utils/misc/guc.c:576 msgid "Connections and Authentication / Connection Settings" msgstr "연결과 인증 / 연결 설정값" -#: utils/misc/guc.c:556 +#: utils/misc/guc.c:578 msgid "Connections and Authentication / Security and Authentication" msgstr "연결과 안증 / 보안과 인증" -#: utils/misc/guc.c:558 +#: utils/misc/guc.c:580 msgid "Resource Usage" msgstr "자원 사용량" -#: utils/misc/guc.c:560 +#: utils/misc/guc.c:582 msgid "Resource Usage / Memory" msgstr "자원 사용량 / 메모리" -#: utils/misc/guc.c:562 +#: utils/misc/guc.c:584 msgid "Resource Usage / Disk" msgstr "자원 사용량 / 디스크" -#: utils/misc/guc.c:564 +#: utils/misc/guc.c:586 msgid "Resource Usage / Kernel Resources" msgstr "자원 사용량 / 커널 자원" -#: utils/misc/guc.c:566 +#: utils/misc/guc.c:588 msgid "Resource Usage / Cost-Based Vacuum Delay" msgstr "자원 사용량 / 비용기반 청소 지연" -#: utils/misc/guc.c:568 +#: utils/misc/guc.c:590 msgid "Resource Usage / Background Writer" msgstr "자원 사용량 / 백그라운드 쓰기" -#: utils/misc/guc.c:570 +#: utils/misc/guc.c:592 msgid "Resource Usage / Asynchronous Behavior" msgstr "자원 사용량 / 비동기 기능" -#: utils/misc/guc.c:572 +#: utils/misc/guc.c:594 msgid "Write-Ahead Log" msgstr "Write-Ahead 로그" -#: utils/misc/guc.c:574 +#: utils/misc/guc.c:596 msgid "Write-Ahead Log / Settings" msgstr "Write-Ahead 로그 / 설정값" -#: utils/misc/guc.c:576 +#: utils/misc/guc.c:598 msgid "Write-Ahead Log / Checkpoints" msgstr "Write-Ahead 로그 / 체크포인트" -#: utils/misc/guc.c:578 +#: utils/misc/guc.c:600 msgid "Write-Ahead Log / Archiving" msgstr "Write-Ahead 로그 / 아카이브" -#: utils/misc/guc.c:580 +#: utils/misc/guc.c:602 msgid "Replication" msgstr "복제" -#: utils/misc/guc.c:582 +#: utils/misc/guc.c:604 msgid "Replication / Sending Servers" msgstr "복제 / 보내기 서버" -#: utils/misc/guc.c:584 +#: utils/misc/guc.c:606 msgid "Replication / Master Server" msgstr "복제 / 주 서버" -#: utils/misc/guc.c:586 +#: utils/misc/guc.c:608 msgid "Replication / Standby Servers" msgstr "복제 / 대기 서버" -#: utils/misc/guc.c:588 +#: utils/misc/guc.c:610 +msgid "Replication / Subscribers" +msgstr "복제 / 구독" + +#: utils/misc/guc.c:612 msgid "Query Tuning" msgstr "쿼리 튜닝" -#: utils/misc/guc.c:590 +#: utils/misc/guc.c:614 msgid "Query Tuning / Planner Method Configuration" msgstr "쿼리 튜닝 / 실행계획기 메서드 설정" -#: utils/misc/guc.c:592 +#: utils/misc/guc.c:616 msgid "Query Tuning / Planner Cost Constants" msgstr "쿼리 튜닝 / 실행계획기 비용 상수" -#: utils/misc/guc.c:594 +#: utils/misc/guc.c:618 msgid "Query Tuning / Genetic Query Optimizer" msgstr "쿼리 튜닝 / 일반적인 쿼리 최적화기" -#: utils/misc/guc.c:596 +#: utils/misc/guc.c:620 msgid "Query Tuning / Other Planner Options" msgstr "쿼리 튜닝 / 기타 실행계획기 옵션들" -#: utils/misc/guc.c:598 +#: utils/misc/guc.c:622 msgid "Reporting and Logging" msgstr "보고와 로그" -#: utils/misc/guc.c:600 +#: utils/misc/guc.c:624 msgid "Reporting and Logging / Where to Log" msgstr "보고와 로그 / 로그 위치" -#: utils/misc/guc.c:602 +#: utils/misc/guc.c:626 msgid "Reporting and Logging / When to Log" msgstr "보고와 로그 / 로그 시점" -#: utils/misc/guc.c:604 +#: utils/misc/guc.c:628 msgid "Reporting and Logging / What to Log" msgstr "보고와 로그 / 로그 내용" -#: utils/misc/guc.c:606 +#: utils/misc/guc.c:630 msgid "Process Title" msgstr "프로세스 제목" -#: utils/misc/guc.c:608 +#: utils/misc/guc.c:632 msgid "Statistics" msgstr "통계" -#: utils/misc/guc.c:610 +#: utils/misc/guc.c:634 msgid "Statistics / Monitoring" msgstr "통계 / 모니터링" -#: utils/misc/guc.c:612 +#: utils/misc/guc.c:636 msgid "Statistics / Query and Index Statistics Collector" msgstr "통계 / 쿼리 및 인덱스 사용 통계 수집기" -#: utils/misc/guc.c:614 +#: utils/misc/guc.c:638 msgid "Autovacuum" msgstr "Autovacuum" -#: utils/misc/guc.c:616 +#: utils/misc/guc.c:640 msgid "Client Connection Defaults" msgstr "클라이언트 연결 초기값" -#: utils/misc/guc.c:618 +#: utils/misc/guc.c:642 msgid "Client Connection Defaults / Statement Behavior" msgstr "클라이언트 연결 초기값 / 구문 특성" -#: utils/misc/guc.c:620 +#: utils/misc/guc.c:644 msgid "Client Connection Defaults / Locale and Formatting" msgstr "클라이언트 연결 초기값 / 로케일과 출력양식" -#: utils/misc/guc.c:622 +#: utils/misc/guc.c:646 msgid "Client Connection Defaults / Shared Library Preloading" msgstr "클라이언트 연결 초기값 / 공유 라이브러리 미리 로딩" -#: utils/misc/guc.c:624 +#: utils/misc/guc.c:648 msgid "Client Connection Defaults / Other Defaults" msgstr "클라이언트 연결 초기값 / 기타 초기값" -#: utils/misc/guc.c:626 +#: utils/misc/guc.c:650 msgid "Lock Management" msgstr "잠금 관리" -#: utils/misc/guc.c:628 +#: utils/misc/guc.c:652 msgid "Version and Platform Compatibility" msgstr "버전과 플랫폼 호환성" -#: utils/misc/guc.c:630 +#: utils/misc/guc.c:654 msgid "Version and Platform Compatibility / Previous PostgreSQL Versions" msgstr "버전과 플랫폼 호환성 / 이전 PostgreSQL 버전" -#: utils/misc/guc.c:632 +#: utils/misc/guc.c:656 msgid "Version and Platform Compatibility / Other Platforms and Clients" msgstr "버전과 플랫폼 호환성 / 다른 플랫폼과 클라이언트" -#: utils/misc/guc.c:634 +#: utils/misc/guc.c:658 msgid "Error Handling" msgstr "오류 처리" -#: utils/misc/guc.c:636 +#: utils/misc/guc.c:660 msgid "Preset Options" msgstr "프리셋 옵션들" -#: utils/misc/guc.c:638 +#: utils/misc/guc.c:662 msgid "Customized Options" msgstr "사용자 정의 옵션들" -#: utils/misc/guc.c:640 +#: utils/misc/guc.c:664 msgid "Developer Options" msgstr "개발자 옵션들" -#: utils/misc/guc.c:697 +#: utils/misc/guc.c:721 msgid "Valid units for this parameter are \"kB\", \"MB\", \"GB\", and \"TB\"." msgstr "이 매개 변수에 유효한 단위는 \"kB\", \"MB\",\"GB\", \"TB\" 입니다." -#: utils/misc/guc.c:724 +#: utils/misc/guc.c:748 msgid "" "Valid units for this parameter are \"ms\", \"s\", \"min\", \"h\", and \"d\"." msgstr "" "이 매개 변수에 유효한 단위는 \"ms\", \"s\", \"min\", \"h\", \"d\" 입니다." -#: utils/misc/guc.c:783 +#: utils/misc/guc.c:807 msgid "Enables the planner's use of sequential-scan plans." msgstr "실행계획자가 순차적-스캔(sequential-sca) 계획을 사용함" -#: utils/misc/guc.c:792 +#: utils/misc/guc.c:816 msgid "Enables the planner's use of index-scan plans." msgstr "실행계획자가 인덱스-스캔 계획을 사용함." -#: utils/misc/guc.c:801 +#: utils/misc/guc.c:825 msgid "Enables the planner's use of index-only-scan plans." msgstr "실행계획자가 인덱스-전용-탐색 계획을 사용함." -#: utils/misc/guc.c:810 +#: utils/misc/guc.c:834 msgid "Enables the planner's use of bitmap-scan plans." msgstr "실행계획기가 bitmap-scan 계획을 사용하도록 함" -#: utils/misc/guc.c:819 +#: utils/misc/guc.c:843 msgid "Enables the planner's use of TID scan plans." msgstr "실행계획자가 TID 스캔 계획을 사용함" -#: utils/misc/guc.c:828 +#: utils/misc/guc.c:852 msgid "Enables the planner's use of explicit sort steps." msgstr "실행계획자가 명시 정렬 단계(explicit sort step)를 사용함" -#: utils/misc/guc.c:837 +#: utils/misc/guc.c:861 msgid "Enables the planner's use of hashed aggregation plans." msgstr "실행계획자가 해시된 집계 계획을 사용함" -#: utils/misc/guc.c:846 +#: utils/misc/guc.c:870 msgid "Enables the planner's use of materialization." msgstr "실행계획자가 materialization 계획을 사용함" -#: utils/misc/guc.c:855 +#: utils/misc/guc.c:879 msgid "Enables the planner's use of nested-loop join plans." msgstr "실행계획자가 근접순환 조인(nested-loop join) 계획을 사용함" -#: utils/misc/guc.c:864 +#: utils/misc/guc.c:888 msgid "Enables the planner's use of merge join plans." msgstr "실행계획자가 병합 조인(merge join) 계획을 사용함" -#: utils/misc/guc.c:873 +#: utils/misc/guc.c:897 msgid "Enables the planner's use of hash join plans." msgstr "실행계획자가 해시 조인(hash join) 계획을 사용함" -#: utils/misc/guc.c:883 +#: utils/misc/guc.c:906 +msgid "Enables the planner's use of gather merge plans." +msgstr "실행계획자가 병합 수집(gather merge) 계획을 사용함" + +#: utils/misc/guc.c:916 msgid "Enables genetic query optimization." msgstr "유전적 쿼리 최적화(GEQO)를 사용함" -#: utils/misc/guc.c:884 +#: utils/misc/guc.c:917 msgid "This algorithm attempts to do planning without exhaustive searching." msgstr "이 알고리즘은 실행계획기의 과도한 작업 비용을 낮춥니다" -#: utils/misc/guc.c:894 +#: utils/misc/guc.c:927 msgid "Shows whether the current user is a superuser." msgstr "현재 사용자가 슈퍼유저인지 보여줍니다." -#: utils/misc/guc.c:904 +#: utils/misc/guc.c:937 msgid "Enables advertising the server via Bonjour." msgstr "Bonjour 서버 사용" -#: utils/misc/guc.c:913 +#: utils/misc/guc.c:946 msgid "Collects transaction commit time." msgstr "트랜잭션 커밋 시간을 수집함" -#: utils/misc/guc.c:922 +#: utils/misc/guc.c:955 msgid "Enables SSL connections." msgstr "SSL 연결을 가능하게 함." -#: utils/misc/guc.c:931 +#: utils/misc/guc.c:964 msgid "Give priority to server ciphersuite order." msgstr "SSL 인증 알고리즘 우선 순위를 정함" -#: utils/misc/guc.c:940 +#: utils/misc/guc.c:973 msgid "Forces synchronization of updates to disk." msgstr "강제로 변경된 버퍼 자료를 디스크와 동기화 시킴." -#: utils/misc/guc.c:941 +#: utils/misc/guc.c:974 msgid "" "The server will use the fsync() system call in several places to make sure " "that updates are physically written to disk. This insures that a database " @@ -22554,11 +24303,11 @@ msgstr "" "스템의 비정상적인 동작이나, 하드웨어에서 오류가 발생되었을 경우에도 자료를 안" "전하게 지킬 수 있도록 도와줄 것입니다." -#: utils/misc/guc.c:952 +#: utils/misc/guc.c:985 msgid "Continues processing after a checksum failure." msgstr "체크섬 실패 후 처리 계속 함" -#: utils/misc/guc.c:953 +#: utils/misc/guc.c:986 msgid "" "Detection of a checksum failure normally causes PostgreSQL to report an " "error, aborting the current transaction. Setting ignore_checksum_failure to " @@ -22573,11 +24322,11 @@ msgstr "" "니다. 이 설정은 데이터 클러스터에서 체크섬 기능이 활성화 되어 있는 경우에만 " "영향을 받습니다." -#: utils/misc/guc.c:967 +#: utils/misc/guc.c:1000 msgid "Continues processing past damaged page headers." msgstr "손상된 자료 헤더 발견시 작업 진행 여부 선택" -#: utils/misc/guc.c:968 +#: utils/misc/guc.c:1001 msgid "" "Detection of a damaged page header normally causes PostgreSQL to report an " "error, aborting the current transaction. Setting zero_damaged_pages to true " @@ -22591,11 +24340,11 @@ msgstr "" "계속 진행합니다. 이 기능을 사용한다 함은 손상된 자료를 없애겠다는 것을 의미합" "니다. 이것은 곧 저장되어있는 자료가 삭제 될 수도 있음을 의미하기도 합니다." -#: utils/misc/guc.c:981 +#: utils/misc/guc.c:1014 msgid "Writes full pages to WAL when first modified after a checkpoint." msgstr "체크포인트 후 처음 수정할 때 전체 페이지를 WAL에 씁니다." -#: utils/misc/guc.c:982 +#: utils/misc/guc.c:1015 msgid "" "A page write in process during an operating system crash might be only " "partially written to disk. During recovery, the row changes stored in WAL " @@ -22607,7 +24356,7 @@ msgstr "" "없을 수도 있습니다. 이 옵션은 안전하게 복구가 가능하도록 체크포인트 후 처음 " "수정한 페이지는 그 페이지 전체를 WAL에 씁니다." -#: utils/misc/guc.c:995 +#: utils/misc/guc.c:1028 msgid "" "Writes full pages to WAL when first modified after a checkpoint, even for a " "non-critical modifications." @@ -22615,85 +24364,85 @@ msgstr "" "체크포인트 작업 후 자료 페이지에 첫 변경이 있는 경우, WAL에 변경된 내용만 기" "록하는 것이 아니라, 해당 페이지 전체를 기록합니다." -#: utils/misc/guc.c:1005 +#: utils/misc/guc.c:1038 msgid "Compresses full-page writes written in WAL file." msgstr "WAL 파일에 기록되는 전체 페이지를 압축함" -#: utils/misc/guc.c:1015 +#: utils/misc/guc.c:1048 msgid "Logs each checkpoint." msgstr "체크포인트 관련 정보를 기록합니다." -#: utils/misc/guc.c:1024 +#: utils/misc/guc.c:1057 msgid "Logs each successful connection." msgstr "연결 성공한 정보들 모두를 기록함" -#: utils/misc/guc.c:1033 +#: utils/misc/guc.c:1066 msgid "Logs end of a session, including duration." msgstr "기간을 포함하여 세션의 끝을 기록합니다." -#: utils/misc/guc.c:1042 +#: utils/misc/guc.c:1075 msgid "Logs each replication command." msgstr "복제 관련 작업 내역을 기록합니다." -#: utils/misc/guc.c:1051 +#: utils/misc/guc.c:1084 msgid "Shows whether the running server has assertion checks enabled." msgstr "서버가 assertion 검사 기능이 활성화 되어 실행되는지 보여 줌" -#: utils/misc/guc.c:1066 +#: utils/misc/guc.c:1099 msgid "Terminate session on any error." msgstr "어떤 오류가 생기면 세션을 종료함" -#: utils/misc/guc.c:1075 +#: utils/misc/guc.c:1108 msgid "Reinitialize server after backend crash." msgstr "백엔드가 비정상 종료되면 서버를 재초기화함" -#: utils/misc/guc.c:1085 +#: utils/misc/guc.c:1118 msgid "Logs the duration of each completed SQL statement." msgstr "SQL 명령 구문의 실행완료 시간을 기록함" -#: utils/misc/guc.c:1094 +#: utils/misc/guc.c:1127 msgid "Logs each query's parse tree." msgstr "각 쿼리의 구문 분석 트리를 기록합니다." -#: utils/misc/guc.c:1103 +#: utils/misc/guc.c:1136 msgid "Logs each query's rewritten parse tree." msgstr "각 쿼리의 재작성된 구문 분석 트리를 기록합니다." -#: utils/misc/guc.c:1112 +#: utils/misc/guc.c:1145 msgid "Logs each query's execution plan." msgstr "각 쿼리의 실행 계획을 기록합니다." -#: utils/misc/guc.c:1121 +#: utils/misc/guc.c:1154 msgid "Indents parse and plan tree displays." msgstr "구문과 실행계획을 보여 줄때, 들여쓰기를 함." -#: utils/misc/guc.c:1130 +#: utils/misc/guc.c:1163 msgid "Writes parser performance statistics to the server log." msgstr "구문분석 성능 통계를 서버 로그에 기록함." -#: utils/misc/guc.c:1139 +#: utils/misc/guc.c:1172 msgid "Writes planner performance statistics to the server log." msgstr "실행계획자 성능 통계를 서버 로그에 기록함." -#: utils/misc/guc.c:1148 +#: utils/misc/guc.c:1181 msgid "Writes executor performance statistics to the server log." msgstr "실행자 성능 통계를 서버 로그에 기록함." -#: utils/misc/guc.c:1157 +#: utils/misc/guc.c:1190 msgid "Writes cumulative performance statistics to the server log." msgstr "누적 성능 통계를 서버 로그에 기록함." -#: utils/misc/guc.c:1167 +#: utils/misc/guc.c:1200 msgid "" "Logs system resource usage statistics (memory and CPU) on various B-tree " "operations." msgstr "다양한 B트리 작업에 자원(메모리, CPU) 사용 통계를 기록에 남기" -#: utils/misc/guc.c:1179 +#: utils/misc/guc.c:1212 msgid "Collects information about executing commands." msgstr "명령 실행에 대한 정보를 수집함" -#: utils/misc/guc.c:1180 +#: utils/misc/guc.c:1213 msgid "" "Enables the collection of information on the currently executing command of " "each session, along with the time at which that command began execution." @@ -22701,59 +24450,59 @@ msgstr "" "각 세션에서 사용하고 있는 현재 실행 중인 명령의 수행 시간, 명령 내용등에 대" "한 정보를 수집하도록 함" -#: utils/misc/guc.c:1190 +#: utils/misc/guc.c:1223 msgid "Collects statistics on database activity." msgstr "데이터베이스 활동에 대한 통계를 수집합니다." -#: utils/misc/guc.c:1199 +#: utils/misc/guc.c:1232 msgid "Collects timing statistics for database I/O activity." msgstr "데이터베이스 I/O 활동에 대한 통계를 수집합니다." -#: utils/misc/guc.c:1209 +#: utils/misc/guc.c:1242 msgid "Updates the process title to show the active SQL command." msgstr "활성 SQL 명령을 표시하도록 프로세스 제목을 업데이트합니다." -#: utils/misc/guc.c:1210 +#: utils/misc/guc.c:1243 msgid "" "Enables updating of the process title every time a new SQL command is " "received by the server." msgstr "" "서버가 새 SQL 명령을 받을 때마다 프로세스 제목이 업데이트될 수 있도록 합니다." -#: utils/misc/guc.c:1223 +#: utils/misc/guc.c:1256 msgid "Starts the autovacuum subprocess." msgstr "자동 청소 하위 프로세스를 실행함" -#: utils/misc/guc.c:1233 +#: utils/misc/guc.c:1266 msgid "Generates debugging output for LISTEN and NOTIFY." msgstr "LISTEN, NOTIFY 명령 사용을 위한 디버깅 출력을 만듦." -#: utils/misc/guc.c:1245 +#: utils/misc/guc.c:1278 msgid "Emits information about lock usage." msgstr "잠금 사용 정보를 로그로 남김" -#: utils/misc/guc.c:1255 +#: utils/misc/guc.c:1288 msgid "Emits information about user lock usage." msgstr "사용자 잠금 사용 정보를 로그로 남김" -#: utils/misc/guc.c:1265 +#: utils/misc/guc.c:1298 msgid "Emits information about lightweight lock usage." msgstr "가벼운 잠금 사용 정보를 로그로 남김" -#: utils/misc/guc.c:1275 +#: utils/misc/guc.c:1308 msgid "" "Dumps information about all current locks when a deadlock timeout occurs." msgstr "교착 잠금 시간 제한 상황이 발생하면 그 때의 모든 잠금 정보를 보여줌" -#: utils/misc/guc.c:1287 +#: utils/misc/guc.c:1320 msgid "Logs long lock waits." msgstr "긴 잠금 대기를 기록합니다." -#: utils/misc/guc.c:1297 +#: utils/misc/guc.c:1330 msgid "Logs the host name in the connection logs." msgstr "연결 기록에서 호스트 이름을 기록함." -#: utils/misc/guc.c:1298 +#: utils/misc/guc.c:1331 msgid "" "By default, connection logs only show the IP address of the connecting host. " "If you want them to show the host name you can turn this on, but depending " @@ -22764,30 +24513,11 @@ msgstr "" "true로 바꾼다면, 이 IP의 호스트 이름을 구해서 이 이름을 사용합니다 이것의 성" "능은 OS의 IP에서 이름구하기 성능과 관계됩니다." -#: utils/misc/guc.c:1309 -msgid "Causes subtables to be included by default in various commands." -msgstr "" -"다양한 명령들에서 기본적으로 상속되는 테이블들 함께 사용할 것인지 정함." - -#: utils/misc/guc.c:1318 -msgid "Encrypt passwords." -msgstr "암호를 암호화 해서 기록함" - -#: utils/misc/guc.c:1319 -msgid "" -"When a password is specified in CREATE USER or ALTER USER without writing " -"either ENCRYPTED or UNENCRYPTED, this parameter determines whether the " -"password is to be encrypted." -msgstr "" -"CREATE USER 또는 ALTER USER 명령에서 ENCRYPTED 또는 UNENCRYPTED 속성을 특별" -"히 지정하지 않았고 사용자 암호를 지정했을 때, 그 암호를 암호화 해서 저장할 것" -"인지 아닌지를 지정함" - -#: utils/misc/guc.c:1329 +#: utils/misc/guc.c:1342 msgid "Treats \"expr=NULL\" as \"expr IS NULL\"." msgstr "\"표현=NULL\" 식을 \"표현 IS NULL\"로 취급함." -#: utils/misc/guc.c:1330 +#: utils/misc/guc.c:1343 msgid "" "When turned on, expressions of the form expr = NULL (or NULL = expr) are " "treated as expr IS NULL, that is, they return true if expr evaluates to the " @@ -22798,23 +24528,23 @@ msgstr "" "expr = NULL 구문을 expr IS NULL 구문으로 바꾸어서 처리하도록 함이렇게하면, " "윗 구문은 true 를 리턴함" -#: utils/misc/guc.c:1342 +#: utils/misc/guc.c:1355 msgid "Enables per-database user names." msgstr "per-database 사용자 이름 활성화." -#: utils/misc/guc.c:1351 +#: utils/misc/guc.c:1364 msgid "Sets the default read-only status of new transactions." msgstr "새로운 트랜잭션의 상태를 초기값으로 읽기전용으로 설정합니다." -#: utils/misc/guc.c:1360 +#: utils/misc/guc.c:1373 msgid "Sets the current transaction's read-only status." msgstr "현재 트랜잭셕의 읽기 전용 상태를 지정합니다." -#: utils/misc/guc.c:1370 +#: utils/misc/guc.c:1383 msgid "Sets the default deferrable status of new transactions." msgstr "새 트랜잭션의 기본 지연 가능한 상태를 지정" -#: utils/misc/guc.c:1379 +#: utils/misc/guc.c:1392 msgid "" "Whether to defer a read-only serializable transaction until it can be " "executed with no possible serialization failures." @@ -22822,24 +24552,24 @@ msgstr "" "읽기 전용 직렬화 가능한 트랜잭션이 직렬 처리에서 오류가 없을 때까지 그 트랜잭" "션을 지연할 것이지 결정함" -#: utils/misc/guc.c:1389 +#: utils/misc/guc.c:1402 msgid "Enable row security." msgstr "로우 단위 보안 기능을 활성화" -#: utils/misc/guc.c:1390 +#: utils/misc/guc.c:1403 msgid "When enabled, row security will be applied to all users." msgstr "이 값이 활성화 되면 로우 단위 보안 기능이 모든 사용자 대상으로 적용됨" -#: utils/misc/guc.c:1398 +#: utils/misc/guc.c:1411 msgid "Check function bodies during CREATE FUNCTION." msgstr "" "CREATE FUNCTION 명령으로 함수를 만들 때, 함수 본문 부분의 구문을 검사합니다." -#: utils/misc/guc.c:1407 +#: utils/misc/guc.c:1420 msgid "Enable input of NULL elements in arrays." msgstr "배열에 NULL 요소가 입력될 수 있도록 합니다." -#: utils/misc/guc.c:1408 +#: utils/misc/guc.c:1421 msgid "" "When turned on, unquoted NULL in an array input value means a null value; " "otherwise it is taken literally." @@ -22847,42 +24577,42 @@ msgstr "" "이 값이 on이면 배열 입력 값에 따옴표 없이 입력된 NULL이 null 값을 의미하고, " "그렇지 않으면 문자 그대로 처리됩니다." -#: utils/misc/guc.c:1418 +#: utils/misc/guc.c:1431 msgid "Create new tables with OIDs by default." msgstr "기본적으로 OID를 사용하여 새 테이블을 만듭니다." -#: utils/misc/guc.c:1427 +#: utils/misc/guc.c:1440 msgid "" "Start a subprocess to capture stderr output and/or csvlogs into log files." msgstr "" "로그 기록 하위 프로세스를 시작하여 stderr 출력 및/또는 csvlog를 로그 파일에 " "씁니다." -#: utils/misc/guc.c:1436 +#: utils/misc/guc.c:1449 msgid "Truncate existing log files of same name during log rotation." msgstr "로그 회전 중 동일한 이름의 기존 로그 파일을 자릅니다." -#: utils/misc/guc.c:1447 +#: utils/misc/guc.c:1460 msgid "Emit information about resource usage in sorting." msgstr "정렬 시 리소스 사용 정보를 내보냅니다." -#: utils/misc/guc.c:1461 +#: utils/misc/guc.c:1474 msgid "Generate debugging output for synchronized scanning." msgstr "동기화된 스캔을 위해 디버깅 출력을 생성합니다." -#: utils/misc/guc.c:1476 +#: utils/misc/guc.c:1489 msgid "Enable bounded sorting using heap sort." msgstr "힙 정렬을 통해 제한적 정렬을 사용합니다." -#: utils/misc/guc.c:1489 +#: utils/misc/guc.c:1502 msgid "Emit WAL-related debugging output." msgstr "WAL 관련 디버깅 출력을 내보냅니다." -#: utils/misc/guc.c:1501 +#: utils/misc/guc.c:1514 msgid "Datetimes are integer based." msgstr "datetime 형을 정수형으로 사용함" -#: utils/misc/guc.c:1516 +#: utils/misc/guc.c:1525 msgid "" "Sets whether Kerberos and GSSAPI user names should be treated as case-" "insensitive." @@ -22890,38 +24620,38 @@ msgstr "" "Kerberos 및 GSSAPI 사용자 이름에서 대/소문자를 구분하지 않을지 여부를 설정합" "니다." -#: utils/misc/guc.c:1526 +#: utils/misc/guc.c:1535 msgid "Warn about backslash escapes in ordinary string literals." msgstr "일반 문자열 리터럴의 백슬래시 이스케이프에 대해 경고합니다." -#: utils/misc/guc.c:1536 +#: utils/misc/guc.c:1545 msgid "Causes '...' strings to treat backslashes literally." msgstr "'...' 문자열에서 백슬래시가 리터럴로 처리되도록 합니다." -#: utils/misc/guc.c:1547 +#: utils/misc/guc.c:1556 msgid "Enable synchronized sequential scans." msgstr "동기화된 순차적 스캔을 사용합니다." -#: utils/misc/guc.c:1557 +#: utils/misc/guc.c:1566 msgid "Allows connections and queries during recovery." msgstr "복구 중에서도 접속과 쿼리 사용을 허용함" -#: utils/misc/guc.c:1567 +#: utils/misc/guc.c:1576 msgid "" "Allows feedback from a hot standby to the primary that will avoid query " "conflicts." msgstr "" "읽기 전용 보조 서버가 보내는 쿼리 충돌을 피하기 위한 피드백을 주 서버가 받음" -#: utils/misc/guc.c:1577 +#: utils/misc/guc.c:1586 msgid "Allows modifications of the structure of system tables." msgstr "시스템 테이블의 구조를 수정할 수 있도록 합니다." -#: utils/misc/guc.c:1588 +#: utils/misc/guc.c:1597 msgid "Disables reading from system indexes." msgstr "시스템 인덱스 읽기를 금지함" -#: utils/misc/guc.c:1589 +#: utils/misc/guc.c:1598 msgid "" "It does not prevent updating the indexes, so it is safe to use. The worst " "consequence is slowness." @@ -22929,12 +24659,12 @@ msgstr "" "이 설정이 활성화 되어도 그 인덱스는 갱신되어 사용하는데는 안전합니다. 하지" "만 서버가 전체적으로 늦어질 수 있습니다." -#: utils/misc/guc.c:1600 +#: utils/misc/guc.c:1609 msgid "" "Enables backward compatibility mode for privilege checks on large objects." msgstr "대형 객체에 대한 접근 권한 검사를 위한 하위 호환성이 있게 함" -#: utils/misc/guc.c:1601 +#: utils/misc/guc.c:1610 msgid "" "Skips privilege checks when reading or modifying large objects, for " "compatibility with PostgreSQL releases prior to 9.0." @@ -22942,60 +24672,60 @@ msgstr "" "PostgreSQL 9.0 이전 버전의 호환성을 위해 대형 객체에 대한 읽기, 변경 시 접근 " "권한 검사를 안 하도록 설정함" -#: utils/misc/guc.c:1611 +#: utils/misc/guc.c:1620 msgid "" "Emit a warning for constructs that changed meaning since PostgreSQL 9.4." msgstr "PostgreSQL 9.4 버전까지 사용되었던 우선 순위가 적용되면 경고를 보여줌" -#: utils/misc/guc.c:1621 +#: utils/misc/guc.c:1630 msgid "When generating SQL fragments, quote all identifiers." msgstr "SQL 구문을 만들 때, 모든 식별자는 따옴표를 사용함" -#: utils/misc/guc.c:1631 +#: utils/misc/guc.c:1640 msgid "Shows whether data checksums are turned on for this cluster." msgstr "" -#: utils/misc/guc.c:1642 +#: utils/misc/guc.c:1651 msgid "Add sequence number to syslog messages to avoid duplicate suppression." msgstr "syslog 사용시 메시지 중복을 방지하기 위해 일련 번호를 매깁니다." -#: utils/misc/guc.c:1652 +#: utils/misc/guc.c:1661 msgid "Split messages sent to syslog by lines and to fit into 1024 bytes." msgstr "syslog 사용시 메시지를 한 줄에 1024 바이트만 쓰도록 나눕니다" -#: utils/misc/guc.c:1671 +#: utils/misc/guc.c:1680 msgid "" -"Forces a switch to the next xlog file if a new file has not been started " +"Forces a switch to the next WAL file if a new file has not been started " "within N seconds." msgstr "" -"새 파일이 N초 내에 시작되지 않은 경우 강제로 다음 xlog 파일로 전환합니다." +"새 파일이 N초 내에 시작되지 않은 경우 강제로 다음 WAL 파일로 전환합니다." -#: utils/misc/guc.c:1682 +#: utils/misc/guc.c:1691 msgid "Waits N seconds on connection startup after authentication." msgstr "연결 작업에서 인증이 끝난 뒤 N초 기다림" -#: utils/misc/guc.c:1683 utils/misc/guc.c:2206 +#: utils/misc/guc.c:1692 utils/misc/guc.c:2237 msgid "This allows attaching a debugger to the process." msgstr "이렇게 하면 디버거를 프로세스에 연결할 수 있습니다." -#: utils/misc/guc.c:1692 +#: utils/misc/guc.c:1701 msgid "Sets the default statistics target." msgstr "기본 통계 대상을 지정합니다." -#: utils/misc/guc.c:1693 +#: utils/misc/guc.c:1702 msgid "" "This applies to table columns that have not had a column-specific target set " "via ALTER TABLE SET STATISTICS." msgstr "" -"특정 열을 지정하지 않고 ALTER TABLE SET STATISTICS 명령을 사용했을 때, 통계 " -"대상이 될 열을 지정합니다." +"특정 칼럼을 지정하지 않고 ALTER TABLE SET STATISTICS 명령을 사용했을 때, 통" +"계 대상이 될 칼럼을 지정합니다." -#: utils/misc/guc.c:1702 +#: utils/misc/guc.c:1711 msgid "Sets the FROM-list size beyond which subqueries are not collapsed." msgstr "" "이 크기를 초과할 경우 하위 쿼리가 축소되지 않는 FROM 목록 크기를 설정합니다." -#: utils/misc/guc.c:1704 +#: utils/misc/guc.c:1713 msgid "" "The planner will merge subqueries into upper queries if the resulting FROM " "list would have no more than this many items." @@ -23003,12 +24733,12 @@ msgstr "" "결과 FROM 목록에 포함된 항목이 이 개수를 넘지 않는 경우 계획 관리자가 하" "위 쿼리를 상위 쿼리에 병합합니다." -#: utils/misc/guc.c:1714 +#: utils/misc/guc.c:1723 msgid "Sets the FROM-list size beyond which JOIN constructs are not flattened." msgstr "" "이 크기를 초과할 경우 JOIN 구문이 결합되지 않는 FROM 목록 크기를 설정합니다." -#: utils/misc/guc.c:1716 +#: utils/misc/guc.c:1725 msgid "" "The planner will flatten explicit JOIN constructs into lists of FROM items " "whenever a list of no more than this many items would result." @@ -23016,32 +24746,32 @@ msgstr "" "결과 목록에 포함된 항목이 이 개수를 넘지 않을 때마다 계획 관리자가 명시" "적 JOIN 구문을 FROM 항목 목록에 결합합니다." -#: utils/misc/guc.c:1726 +#: utils/misc/guc.c:1735 msgid "Sets the threshold of FROM items beyond which GEQO is used." msgstr "" "이 임계값을 초과할 경우 GEQO가 사용되는 FROM 항목의 임계값을 설정합니다." -#: utils/misc/guc.c:1735 +#: utils/misc/guc.c:1744 msgid "GEQO: effort is used to set the default for other GEQO parameters." msgstr "GEQO: 다른 GEQO 매개 변수의 기본 값을 설정하는 데 사용됩니다." -#: utils/misc/guc.c:1744 +#: utils/misc/guc.c:1753 msgid "GEQO: number of individuals in the population." msgstr "GEQO: 모집단의 개인 수입니다." -#: utils/misc/guc.c:1745 utils/misc/guc.c:1754 +#: utils/misc/guc.c:1754 utils/misc/guc.c:1763 msgid "Zero selects a suitable default value." msgstr "0을 지정하면 적절한 기본 값이 선택됩니다." -#: utils/misc/guc.c:1753 +#: utils/misc/guc.c:1762 msgid "GEQO: number of iterations of the algorithm." msgstr "GEQO: 알고리즘의 반복 수입니다." -#: utils/misc/guc.c:1764 +#: utils/misc/guc.c:1773 msgid "Sets the time to wait on a lock before checking for deadlock." msgstr "교착 상태를 확인하기 전에 잠금을 기다릴 시간을 설정합니다." -#: utils/misc/guc.c:1775 +#: utils/misc/guc.c:1784 msgid "" "Sets the maximum delay before canceling queries when a hot standby server is " "processing archived WAL data." @@ -23049,48 +24779,48 @@ msgstr "" "읽기 전용 보조 서버가 아카이브된 WAL 자료를 처리할 때, 지연될 수 있는 최대 시" "간" -#: utils/misc/guc.c:1786 +#: utils/misc/guc.c:1795 msgid "" "Sets the maximum delay before canceling queries when a hot standby server is " "processing streamed WAL data." msgstr "" "읽기 전용 보조 서버가 스트림 WAL 자료를 처리할 때, 지연될 수 있는 최대 시간" -#: utils/misc/guc.c:1797 +#: utils/misc/guc.c:1806 msgid "" "Sets the maximum interval between WAL receiver status reports to the primary." msgstr "주 서버로 WAL 수신기 상태를 보고하는 최대 간격" -#: utils/misc/guc.c:1808 +#: utils/misc/guc.c:1817 msgid "Sets the maximum wait time to receive data from the primary." msgstr "" "주 서버에서 보낸 자료를 받기위해 기다릴 수 있는 최대 허용 시간을 설정합니다." -#: utils/misc/guc.c:1819 +#: utils/misc/guc.c:1828 msgid "Sets the maximum number of concurrent connections." msgstr "최대 동시 접속수를 지정합니다." -#: utils/misc/guc.c:1829 +#: utils/misc/guc.c:1838 msgid "Sets the number of connection slots reserved for superusers." msgstr "superuser 동시 접속수를 지정합니다." -#: utils/misc/guc.c:1843 +#: utils/misc/guc.c:1852 msgid "Sets the number of shared memory buffers used by the server." msgstr "서버에서 사용할 공유 메모리의 개수를 지정함" -#: utils/misc/guc.c:1854 +#: utils/misc/guc.c:1863 msgid "Sets the maximum number of temporary buffers used by each session." msgstr "각 세션에서 사용하는 임시 버퍼의 최대 개수를 지정" -#: utils/misc/guc.c:1865 +#: utils/misc/guc.c:1874 msgid "Sets the TCP port the server listens on." msgstr "TCP 포트 번호를 지정함." -#: utils/misc/guc.c:1875 +#: utils/misc/guc.c:1884 msgid "Sets the access permissions of the Unix-domain socket." msgstr "유닉스 도메인 소켓 파일의 액세스 권한을 지정함" -#: utils/misc/guc.c:1876 +#: utils/misc/guc.c:1885 msgid "" "Unix-domain sockets use the usual Unix file system permission set. The " "parameter value is expected to be a numeric mode specification in the form " @@ -23101,11 +24831,11 @@ msgstr "" "수 값은 chmod 및 umask 시스템 호출에서 수락되는 형태의 숫자 모드 지정이어야 " "합니다. (일반적인 8진수 형식을 사용하려면 숫자가 0으로 시작해야 합니다.)" -#: utils/misc/guc.c:1890 +#: utils/misc/guc.c:1899 msgid "Sets the file permissions for log files." msgstr "로그 파일의 파일 접근 권한을 지정합니다." -#: utils/misc/guc.c:1891 +#: utils/misc/guc.c:1900 msgid "" "The parameter value is expected to be a numeric mode specification in the " "form accepted by the chmod and umask system calls. (To use the customary " @@ -23115,11 +24845,11 @@ msgstr "" "이어야 합니다. (일반적인 8진수 형식을 사용하려면 숫자가 0으로 시작해야 합니" "다.)" -#: utils/misc/guc.c:1904 +#: utils/misc/guc.c:1913 msgid "Sets the maximum memory to be used for query workspaces." msgstr "쿼리 작업공간을 위해 사용될 메모리의 최대값을 지정함." -#: utils/misc/guc.c:1905 +#: utils/misc/guc.c:1914 msgid "" "This much memory can be used by each internal sort operation and hash table " "before switching to temporary disk files." @@ -23127,130 +24857,130 @@ msgstr "" "임시 디스크 파일로 전환하기 전에 각 내부 정렬 작업과 해시 테이블에서 이 크기" "의 메모리를 사용할 수 있습니다." -#: utils/misc/guc.c:1917 +#: utils/misc/guc.c:1926 msgid "Sets the maximum memory to be used for maintenance operations." msgstr "관리 작업을 위해 사용될 메모리의 최대값을 지정함." -#: utils/misc/guc.c:1918 +#: utils/misc/guc.c:1927 msgid "This includes operations such as VACUUM and CREATE INDEX." msgstr "관리작업은 VACUUM, CREATE INDEX 같은 작업을 뜻합니다." -#: utils/misc/guc.c:1928 +#: utils/misc/guc.c:1937 msgid "" "Sets the maximum number of tuples to be sorted using replacement selection." msgstr "replacement selection 기능을 이용할 최대 튜플 수" -#: utils/misc/guc.c:1929 +#: utils/misc/guc.c:1938 msgid "When more tuples than this are present, quicksort will be used." msgstr "이 튜플 수 보다 많으면, quicksort 를 사용함" -#: utils/misc/guc.c:1943 +#: utils/misc/guc.c:1952 msgid "Sets the maximum stack depth, in kilobytes." msgstr "스택깊이(KB 단위) 최대값을 지정합니다." -#: utils/misc/guc.c:1954 +#: utils/misc/guc.c:1963 msgid "Limits the total size of all temporary files used by each process." msgstr "각 프로세스에서 사용하는 모든 임시 파일의 총 크기 제한" -#: utils/misc/guc.c:1955 +#: utils/misc/guc.c:1964 msgid "-1 means no limit." msgstr "-1은 제한 없음" -#: utils/misc/guc.c:1965 +#: utils/misc/guc.c:1974 msgid "Vacuum cost for a page found in the buffer cache." msgstr "버퍼 캐시에 있는 페이지의 청소 비용입니다." -#: utils/misc/guc.c:1975 +#: utils/misc/guc.c:1984 msgid "Vacuum cost for a page not found in the buffer cache." msgstr "버퍼 캐시에 없는 페이지의 청소 비용입니다." -#: utils/misc/guc.c:1985 +#: utils/misc/guc.c:1994 msgid "Vacuum cost for a page dirtied by vacuum." msgstr "청소로 페이지 변경 시 부과되는 비용입니다." -#: utils/misc/guc.c:1995 +#: utils/misc/guc.c:2004 msgid "Vacuum cost amount available before napping." msgstr "청소가 중지되는 청소 비용 합계입니다." -#: utils/misc/guc.c:2005 +#: utils/misc/guc.c:2014 msgid "Vacuum cost delay in milliseconds." msgstr "청소 비용 지연(밀리초)입니다." -#: utils/misc/guc.c:2016 +#: utils/misc/guc.c:2025 msgid "Vacuum cost delay in milliseconds, for autovacuum." msgstr "자동 청소에 대한 청소 비용 지연(밀리초)입니다." -#: utils/misc/guc.c:2027 +#: utils/misc/guc.c:2036 msgid "Vacuum cost amount available before napping, for autovacuum." msgstr "자동 청소에 대한 청소가 중지되는 청소 비용 합계입니다." -#: utils/misc/guc.c:2037 +#: utils/misc/guc.c:2046 msgid "" "Sets the maximum number of simultaneously open files for each server process." msgstr "각각의 서버 프로세스에서 동시에 열릴 수 있는 최대 파일 갯수를 지정함." -#: utils/misc/guc.c:2050 +#: utils/misc/guc.c:2059 msgid "Sets the maximum number of simultaneously prepared transactions." msgstr "동시에 준비된 트랜잭션 최대 개수 지정" -#: utils/misc/guc.c:2061 +#: utils/misc/guc.c:2070 msgid "Sets the minimum OID of tables for tracking locks." msgstr "잠금 추적을 위한 테이블의 최소 OID 지정" -#: utils/misc/guc.c:2062 +#: utils/misc/guc.c:2071 msgid "Is used to avoid output on system tables." msgstr "" -#: utils/misc/guc.c:2071 +#: utils/misc/guc.c:2080 msgid "Sets the OID of the table with unconditionally lock tracing." msgstr "" -#: utils/misc/guc.c:2083 +#: utils/misc/guc.c:2092 msgid "Sets the maximum allowed duration of any statement." msgstr "모든 쿼리문에 적용되는 허용되는 최대 수행시간" -#: utils/misc/guc.c:2084 utils/misc/guc.c:2095 utils/misc/guc.c:2106 +#: utils/misc/guc.c:2093 utils/misc/guc.c:2104 utils/misc/guc.c:2115 msgid "A value of 0 turns off the timeout." msgstr "이 값이 0이면 이런 제한이 없음." -#: utils/misc/guc.c:2094 +#: utils/misc/guc.c:2103 msgid "Sets the maximum allowed duration of any wait for a lock." msgstr "모든 잠금에 적용되는 기다리는 최대 대기 시간" -#: utils/misc/guc.c:2105 +#: utils/misc/guc.c:2114 msgid "Sets the maximum allowed duration of any idling transaction." msgstr "idle-in-transaction 상태로 있을 수 있는 최대 시간" -#: utils/misc/guc.c:2116 +#: utils/misc/guc.c:2125 msgid "Minimum age at which VACUUM should freeze a table row." msgstr "VACUUM에서 테이블 행을 동결할 때까지의 최소 기간입니다." -#: utils/misc/guc.c:2126 +#: utils/misc/guc.c:2135 msgid "Age at which VACUUM should scan whole table to freeze tuples." msgstr "" "VACUUM에서 튜플을 동결하기 위해 전체 테이블을 스캔할 때까지의 기간입니다." -#: utils/misc/guc.c:2136 +#: utils/misc/guc.c:2145 msgid "Minimum age at which VACUUM should freeze a MultiXactId in a table row." msgstr "VACUUM에서 테이블 MultiXactId 동결할 때까지의 최소 기간입니다." -#: utils/misc/guc.c:2146 +#: utils/misc/guc.c:2155 msgid "Multixact age at which VACUUM should scan whole table to freeze tuples." msgstr "" "VACUUM에서 튜플을 동결하기 위해 전체 테이블을 스캔할 때까지의 멀티트랜잭션 기" "간입니다." -#: utils/misc/guc.c:2156 +#: utils/misc/guc.c:2165 msgid "" "Number of transactions by which VACUUM and HOT cleanup should be deferred, " "if any." msgstr "" -#: utils/misc/guc.c:2169 +#: utils/misc/guc.c:2178 msgid "Sets the maximum number of locks per transaction." msgstr "하나의 트랜잭션에서 사용할 수 있는 최대 잠금 횟수를 지정함." -#: utils/misc/guc.c:2170 +#: utils/misc/guc.c:2179 msgid "" "The shared lock table is sized on the assumption that at most " "max_locks_per_transaction * max_connections distinct objects will need to be " @@ -23260,11 +24990,11 @@ msgstr "" "max_locks_per_transaction * max_connections를 넘지 않는다는 가정 하에 크기가 " "지정됩니다." -#: utils/misc/guc.c:2181 +#: utils/misc/guc.c:2190 msgid "Sets the maximum number of predicate locks per transaction." msgstr "하나의 트랜잭션에서 사용할 수 있는 최대 잠금 횟수를 지정함." -#: utils/misc/guc.c:2182 +#: utils/misc/guc.c:2191 msgid "" "The shared predicate lock table is sized on the assumption that at most " "max_pred_locks_per_transaction * max_connections distinct objects will need " @@ -23274,36 +25004,57 @@ msgstr "" "max_pred_locks_per_transaction * max_connections를 넘지 않는다는 가정 하에 크" "기가 지정됩니다." -#: utils/misc/guc.c:2193 +#: utils/misc/guc.c:2202 +msgid "" +"Sets the maximum number of predicate-locked pages and tuples per relation." +msgstr "하나의 트랜잭션에서 사용할 수 있는 페이지와 튜플의 최대수 지정함." + +#: utils/misc/guc.c:2203 +msgid "" +"If more than this total of pages and tuples in the same relation are locked " +"by a connection, those locks are replaced by a relation-level lock." +msgstr "" + +#: utils/misc/guc.c:2213 +msgid "Sets the maximum number of predicate-locked tuples per page." +msgstr "페이지당 잠금 튜플 최대 수 지정." + +#: utils/misc/guc.c:2214 +msgid "" +"If more than this number of tuples on the same page are locked by a " +"connection, those locks are replaced by a page-level lock." +msgstr "" + +#: utils/misc/guc.c:2224 msgid "Sets the maximum allowed time to complete client authentication." msgstr "클라이언트 인증을 완료할 수 있는 최대 허용 시간을 설정합니다." -#: utils/misc/guc.c:2205 +#: utils/misc/guc.c:2236 msgid "Waits N seconds on connection startup before authentication." msgstr "인증 전에 연결이 시작되도록 N초 동안 기다립니다." -#: utils/misc/guc.c:2216 +#: utils/misc/guc.c:2247 msgid "Sets the number of WAL files held for standby servers." msgstr "대기 서버를 위해 보관하고 있을 WAL 파일 개수 지정" -#: utils/misc/guc.c:2226 +#: utils/misc/guc.c:2257 msgid "Sets the minimum size to shrink the WAL to." msgstr "WAL 최소 크기" -#: utils/misc/guc.c:2237 +#: utils/misc/guc.c:2268 msgid "Sets the WAL size that triggers a checkpoint." msgstr "체크포인트 작업을 할 WAL 크기 지정" -#: utils/misc/guc.c:2248 +#: utils/misc/guc.c:2279 msgid "Sets the maximum time between automatic WAL checkpoints." msgstr "자동 WAL 체크포인트 사이의 최대 간격을 설정합니다." -#: utils/misc/guc.c:2259 +#: utils/misc/guc.c:2290 msgid "" "Enables warnings if checkpoint segments are filled more frequently than this." msgstr "지정 시간 안에 체크포인트 조각이 모두 채워지면 경고를 냄" -#: utils/misc/guc.c:2261 +#: utils/misc/guc.c:2292 msgid "" "Write a message to the server log if checkpoints caused by the filling of " "checkpoint segment files happens more frequently than this number of " @@ -23313,53 +25064,53 @@ msgstr "" "용이 꽉 차는 사태가 발생하면 경고 메시지를 서버 로그에 남깁니다. 이 값을 0으" "로 지정하면 이 기능 없음" -#: utils/misc/guc.c:2273 utils/misc/guc.c:2430 utils/misc/guc.c:2457 +#: utils/misc/guc.c:2304 utils/misc/guc.c:2461 utils/misc/guc.c:2488 msgid "" "Number of pages after which previously performed writes are flushed to disk." msgstr "" -#: utils/misc/guc.c:2284 +#: utils/misc/guc.c:2315 msgid "Sets the number of disk-page buffers in shared memory for WAL." msgstr "" "WAL 기능을 위해 공유 메모리에서 사용할 디스크 페이지 버퍼 개수를 지정함." -#: utils/misc/guc.c:2295 +#: utils/misc/guc.c:2326 msgid "Time between WAL flushes performed in the WAL writer." msgstr "WAL 기록자가 지정 시간 만큼 쉬고 쓰기 작업을 반복함" -#: utils/misc/guc.c:2306 +#: utils/misc/guc.c:2337 msgid "Amount of WAL written out by WAL writer that triggers a flush." msgstr "" -#: utils/misc/guc.c:2318 +#: utils/misc/guc.c:2349 msgid "Sets the maximum number of simultaneously running WAL sender processes." msgstr "동시에 작동할 WAL 송신 프로세스 최대 수 지정" -#: utils/misc/guc.c:2329 +#: utils/misc/guc.c:2360 msgid "Sets the maximum number of simultaneously defined replication slots." msgstr "동시에 사용할 수 있는 복제 슬롯 최대 수 지정" -#: utils/misc/guc.c:2339 +#: utils/misc/guc.c:2370 msgid "Sets the maximum time to wait for WAL replication." msgstr "WAL 복제를 위해 기다릴 최대 시간 설정" -#: utils/misc/guc.c:2350 +#: utils/misc/guc.c:2381 msgid "" "Sets the delay in microseconds between transaction commit and flushing WAL " "to disk." msgstr "" "트랜잭션과 트랜잭션 로그의 적용 사이의 간격을 microsecond 단위로 지정함" -#: utils/misc/guc.c:2362 +#: utils/misc/guc.c:2393 msgid "" "Sets the minimum concurrent open transactions before performing commit_delay." msgstr "commit_delay 처리하기 전에 있는 최소 동시 열려 있는 트랜잭션 개수." -#: utils/misc/guc.c:2373 +#: utils/misc/guc.c:2404 msgid "Sets the number of digits displayed for floating-point values." msgstr "부동소수형 값을 표기할 때 " -#: utils/misc/guc.c:2374 +#: utils/misc/guc.c:2405 msgid "" "This affects real, double precision, and geometric data types. The parameter " "value is added to the standard number of digits (FLT_DIG or DBL_DIG as " @@ -23368,17 +25119,17 @@ msgstr "" "이 값은 real, duoble 부동 소숫점과 지리정보 자료형에 영향을 끼칩니다. 이 값" "은 정수여야합니다(FLT_DIG or DBL_DIG as appropriate - 무슨 말인지)." -#: utils/misc/guc.c:2385 +#: utils/misc/guc.c:2416 msgid "Sets the minimum execution time above which statements will be logged." msgstr "" "이 시간을 초과할 경우 쿼리문을 로그로 남길 최소 실행 시간을 설정합니다." -#: utils/misc/guc.c:2387 +#: utils/misc/guc.c:2418 msgid "Zero prints all queries. -1 turns this feature off." msgstr "" "0을 지정하면 모든 쿼리가 인쇄됩니다. -1을 지정하면 이 기능이 해제됩니다." -#: utils/misc/guc.c:2397 +#: utils/misc/guc.c:2428 msgid "" "Sets the minimum execution time above which autovacuum actions will be " "logged." @@ -23386,149 +25137,162 @@ msgstr "" "이 시간을 초과할 경우 자동 청소 작업 로그를 남길 최소 실행 시간을 설정합니" "다." -#: utils/misc/guc.c:2399 +#: utils/misc/guc.c:2430 msgid "Zero prints all actions. -1 turns autovacuum logging off." msgstr "" "0을 지정하면 모든 작업이 인쇄됩니다. -1을 지정하면 자동 청소 기록이 해제됩니" "다." -#: utils/misc/guc.c:2409 +#: utils/misc/guc.c:2440 msgid "Background writer sleep time between rounds." msgstr "백그라운드 기록자의 잠자는 시간" -#: utils/misc/guc.c:2420 +#: utils/misc/guc.c:2451 msgid "Background writer maximum number of LRU pages to flush per round." msgstr "라운드당 플러시할 백그라운드 작성기 최대 LRU 페이지 수입니다." -#: utils/misc/guc.c:2443 +#: utils/misc/guc.c:2474 msgid "" "Number of simultaneous requests that can be handled efficiently by the disk " "subsystem." msgstr "" "디스크 하위 시스템에서 효율적으로 처리할 수 있는 동시 요청 수입니다." -#: utils/misc/guc.c:2444 +#: utils/misc/guc.c:2475 msgid "" "For RAID arrays, this should be approximately the number of drive spindles " "in the array." msgstr "RAID 배열의 경우 이 값은 대략 배열의 드라이브 스핀들 수입니다." -#: utils/misc/guc.c:2470 +#: utils/misc/guc.c:2501 msgid "Maximum number of concurrent worker processes." msgstr "동시 작업자 프로세스의 최대 수" -#: utils/misc/guc.c:2480 +#: utils/misc/guc.c:2513 +msgid "Maximum number of logical replication worker processes." +msgstr "논리 복제 작업자 프로세스의 최대 수" + +#: utils/misc/guc.c:2525 +msgid "Maximum number of table synchronization workers per subscription." +msgstr "구독을 위한 테이블 동기화 작업자의 최대 수" + +#: utils/misc/guc.c:2535 msgid "Automatic log file rotation will occur after N minutes." msgstr "N분 후에 자동 로그 파일 회전이 발생합니다." -#: utils/misc/guc.c:2491 +#: utils/misc/guc.c:2546 msgid "Automatic log file rotation will occur after N kilobytes." msgstr "N킬로바이트 후에 자동 로그 파일 회전이 발생합니다." -#: utils/misc/guc.c:2502 +#: utils/misc/guc.c:2557 msgid "Shows the maximum number of function arguments." msgstr "함수 인자의 최대 갯수를 보여줍니다" -#: utils/misc/guc.c:2513 +#: utils/misc/guc.c:2568 msgid "Shows the maximum number of index keys." msgstr "인덱스 키의 최대개수를 보여줍니다." -#: utils/misc/guc.c:2524 +#: utils/misc/guc.c:2579 msgid "Shows the maximum identifier length." msgstr "최대 식별자 길이를 표시합니다." -#: utils/misc/guc.c:2535 +#: utils/misc/guc.c:2590 msgid "Shows the size of a disk block." msgstr "디스크 블록의 크기를 표시합니다." -#: utils/misc/guc.c:2546 +#: utils/misc/guc.c:2601 msgid "Shows the number of pages per disk file." msgstr "디스크 파일당 페이지 수를 표시합니다." -#: utils/misc/guc.c:2557 +#: utils/misc/guc.c:2612 msgid "Shows the block size in the write ahead log." msgstr "미리 쓰기 로그의 블록 크기를 표시합니다." -#: utils/misc/guc.c:2568 +#: utils/misc/guc.c:2623 msgid "" "Sets the time to wait before retrying to retrieve WAL after a failed attempt." msgstr "" -#: utils/misc/guc.c:2580 +#: utils/misc/guc.c:2635 msgid "Shows the number of pages per write ahead log segment." msgstr "미리 쓰기 로그 세그먼트당 페이지 수를 표시합니다." -#: utils/misc/guc.c:2593 +#: utils/misc/guc.c:2648 msgid "Time to sleep between autovacuum runs." msgstr "자동 청소 실행 사이의 절전 모드 시간입니다." -#: utils/misc/guc.c:2603 +#: utils/misc/guc.c:2658 msgid "Minimum number of tuple updates or deletes prior to vacuum." msgstr "청소 전의 최소 튜플 업데이트 또는 삭제 수입니다." -#: utils/misc/guc.c:2612 +#: utils/misc/guc.c:2667 msgid "Minimum number of tuple inserts, updates, or deletes prior to analyze." msgstr "통계 정보 수집을 위한 최소 튜플 삽입, 업데이트 또는 삭제 수입니다." -#: utils/misc/guc.c:2622 +#: utils/misc/guc.c:2677 msgid "" "Age at which to autovacuum a table to prevent transaction ID wraparound." msgstr "" "트랜잭션 ID 겹침 방지를 위해 테이블에 대해 autovacuum 작업을 수행할 테이블 나" "이를 지정합니다." -#: utils/misc/guc.c:2633 +#: utils/misc/guc.c:2688 msgid "" "Multixact age at which to autovacuum a table to prevent multixact wraparound." msgstr "" "멀티 트랜잭션 ID 겹침 방지를 위해 테이블에 대해 autovacuum 작업을 수행할 트랜" "잭션 나이를 지정합니다." -#: utils/misc/guc.c:2643 +#: utils/misc/guc.c:2698 msgid "" "Sets the maximum number of simultaneously running autovacuum worker " "processes." msgstr "동시에 작업할 수 있는 autovacuum 작업자 최대 수 지정" -#: utils/misc/guc.c:2653 +#: utils/misc/guc.c:2708 msgid "Sets the maximum number of parallel processes per executor node." msgstr "실행 노드당 최대 병렬 처리 수 지정" -#: utils/misc/guc.c:2663 +#: utils/misc/guc.c:2718 +msgid "" +"Sets the maximum number of parallel workers than can be active at one time." +msgstr "한번에 작업할 수 있는 병렬 작업자 최대 수 지정" + +#: utils/misc/guc.c:2728 msgid "Sets the maximum memory to be used by each autovacuum worker process." msgstr "각 autovacuum 작업자 프로세스가 사용할 메모리 최대치" -#: utils/misc/guc.c:2674 +#: utils/misc/guc.c:2739 msgid "" "Time before a snapshot is too old to read pages changed after the snapshot " "was taken." msgstr "" -#: utils/misc/guc.c:2675 +#: utils/misc/guc.c:2740 msgid "A value of -1 disables this feature." msgstr "이 값이 -1 이면 이 기능 사용 안함" -#: utils/misc/guc.c:2685 +#: utils/misc/guc.c:2750 msgid "Time between issuing TCP keepalives." msgstr "TCP 연결 유지 실행 간격입니다." -#: utils/misc/guc.c:2686 utils/misc/guc.c:2697 +#: utils/misc/guc.c:2751 utils/misc/guc.c:2762 msgid "A value of 0 uses the system default." msgstr "이 값이 0이면 시스템 기본 값" -#: utils/misc/guc.c:2696 +#: utils/misc/guc.c:2761 msgid "Time between TCP keepalive retransmits." msgstr "TCP keepalive 시간 설정" -#: utils/misc/guc.c:2707 +#: utils/misc/guc.c:2772 msgid "SSL renegotiation is no longer supported; this can only be 0." msgstr "" -#: utils/misc/guc.c:2718 +#: utils/misc/guc.c:2783 msgid "Maximum number of TCP keepalive retransmits." msgstr "TCP keepalive 확인 최대 횟수" -#: utils/misc/guc.c:2719 +#: utils/misc/guc.c:2784 msgid "" "This controls the number of consecutive keepalive retransmits that can be " "lost before a connection is considered dead. A value of 0 uses the system " @@ -23537,15 +25301,15 @@ msgstr "" "이 값은 연결이 중단된 것으로 간주되기 전에 손실될 수 있는 연속 연결 유" "지 재전송 수를 제어합니다. 값 0을 지정하면 시스템 기본 값이 사용됩니다." -#: utils/misc/guc.c:2730 +#: utils/misc/guc.c:2795 msgid "Sets the maximum allowed result for exact search by GIN." msgstr "정확한 GIN 기준 검색에 허용되는 최대 결과 수를 설정합니다." -#: utils/misc/guc.c:2741 +#: utils/misc/guc.c:2806 msgid "Sets the planner's assumption about the size of the disk cache." msgstr "디스크 캐시 크기에 대한 계획 관리자의 가정을 설정합니다." -#: utils/misc/guc.c:2742 +#: utils/misc/guc.c:2807 msgid "" "That is, the portion of the kernel's disk cache that will be used for " "PostgreSQL data files. This is measured in disk pages, which are normally 8 " @@ -23554,39 +25318,55 @@ msgstr "" "즉, PostgreSQL 데이터 파일에 사용될 커널의 디스크 캐시 부분입니다. 이 값" "은 디스크 페이지 단위로 측정되며, 일반적으로 각각 8KB입니다." -#: utils/misc/guc.c:2754 -msgid "Sets the minimum size of relations to be considered for parallel scan." +#: utils/misc/guc.c:2819 +msgid "Sets the minimum amount of table data for a parallel scan." +msgstr "병렬 조회를 위한 최소 테이블 자료량 지정" + +#: utils/misc/guc.c:2820 +msgid "" +"If the planner estimates that it will read a number of table pages too small " +"to reach this limit, a parallel scan will not be considered." +msgstr "" + +#: utils/misc/guc.c:2830 +msgid "Sets the minimum amount of index data for a parallel scan." +msgstr "병렬 조회를 위한 최소 인덱스 자료량 지정" + +#: utils/misc/guc.c:2831 +msgid "" +"If the planner estimates that it will read a number of index pages too small " +"to reach this limit, a parallel scan will not be considered." msgstr "" -#: utils/misc/guc.c:2766 +#: utils/misc/guc.c:2842 msgid "Shows the server version as an integer." msgstr "서버 버전을 정수형으로 보여줍니다" -#: utils/misc/guc.c:2777 +#: utils/misc/guc.c:2853 msgid "Log the use of temporary files larger than this number of kilobytes." msgstr "이 킬로바이트 수보다 큰 임시 파일의 사용을 기록합니다." -#: utils/misc/guc.c:2778 +#: utils/misc/guc.c:2854 msgid "Zero logs all files. The default is -1 (turning this feature off)." msgstr "" "0을 지정하면 모든 파일이 기록됩니다. 기본 값은 -1로, 이 기능이 해제됩니다." -#: utils/misc/guc.c:2788 +#: utils/misc/guc.c:2864 msgid "Sets the size reserved for pg_stat_activity.query, in bytes." msgstr "pg_stat_activity.query에 예약되는 크기(바이트)를 설정합니다." -#: utils/misc/guc.c:2803 +#: utils/misc/guc.c:2879 msgid "Sets the maximum size of the pending list for GIN index." msgstr "GIN 인덱스를 위한 팬딩(pending) 목록의 최대 크기 지정" -#: utils/misc/guc.c:2823 +#: utils/misc/guc.c:2899 msgid "" "Sets the planner's estimate of the cost of a sequentially fetched disk page." msgstr "" "순차적으로 접근하는 디스크 페이지에 대한 계획 관리자의 예상 비용을 설정합니" "다." -#: utils/misc/guc.c:2833 +#: utils/misc/guc.c:2909 msgid "" "Sets the planner's estimate of the cost of a nonsequentially fetched disk " "page." @@ -23594,11 +25374,11 @@ msgstr "" "비순차적으로 접근하는 디스크 페이지에 대한 계획 관리자의 예상 비용을 설정합니" "다." -#: utils/misc/guc.c:2843 +#: utils/misc/guc.c:2919 msgid "Sets the planner's estimate of the cost of processing each tuple (row)." msgstr "각 튜플(행)에 대한 계획 관리자의 예상 처리 비용을 설정합니다." -#: utils/misc/guc.c:2853 +#: utils/misc/guc.c:2929 msgid "" "Sets the planner's estimate of the cost of processing each index entry " "during an index scan." @@ -23606,7 +25386,7 @@ msgstr "" "실행 계획기의 비용 계산에 사용될 인덱스 스캔으로 각 인덱스 항목을 처리하는 예" "상 처리 비용을 설정합니다." -#: utils/misc/guc.c:2863 +#: utils/misc/guc.c:2939 msgid "" "Sets the planner's estimate of the cost of processing each operator or " "function call." @@ -23614,41 +25394,41 @@ msgstr "" "실행 계획기의 비용 계산에 사용될 함수 호출이나 연산자 연산 처리하는 예상 처" "리 비용을 설정합니다." -#: utils/misc/guc.c:2873 +#: utils/misc/guc.c:2949 msgid "" "Sets the planner's estimate of the cost of passing each tuple (row) from " "worker to master backend." msgstr "각 튜플(행)에 대한 계획 관리자의 예상 처리 비용을 설정합니다." -#: utils/misc/guc.c:2883 +#: utils/misc/guc.c:2959 msgid "" "Sets the planner's estimate of the cost of starting up worker processes for " "parallel query." msgstr "" -#: utils/misc/guc.c:2894 +#: utils/misc/guc.c:2970 msgid "" "Sets the planner's estimate of the fraction of a cursor's rows that will be " "retrieved." msgstr "검색될 커서 행에 대한 계획 관리자의 예상 분수 값을 설정합니다." -#: utils/misc/guc.c:2905 +#: utils/misc/guc.c:2981 msgid "GEQO: selective pressure within the population." msgstr "GEQO: 모집단 내의 선택 압력입니다." -#: utils/misc/guc.c:2915 +#: utils/misc/guc.c:2991 msgid "GEQO: seed for random path selection." msgstr "GEQO: 무작위 경로 선택을 위한 씨드" -#: utils/misc/guc.c:2925 +#: utils/misc/guc.c:3001 msgid "Multiple of the average buffer usage to free per round." msgstr "라운드당 해제할 평균 버퍼 사용의 배수입니다." -#: utils/misc/guc.c:2935 +#: utils/misc/guc.c:3011 msgid "Sets the seed for random-number generation." msgstr "난수 생성 속도를 설정합니다." -#: utils/misc/guc.c:2946 +#: utils/misc/guc.c:3022 msgid "" "Number of tuple updates or deletes prior to vacuum as a fraction of " "reltuples." @@ -23656,7 +25436,7 @@ msgstr "" "vacuum 작업을 진행할 update, delete 작업량을 전체 자료에 대한 분수값으로 지정" "합니다." -#: utils/misc/guc.c:2955 +#: utils/misc/guc.c:3031 msgid "" "Number of tuple inserts, updates, or deletes prior to analyze as a fraction " "of reltuples." @@ -23664,7 +25444,7 @@ msgstr "" "통계 수집 작업을 진행할 insert, update, delete 작업량을 전체 자료에 대한 분수" "값으로 지정합니다." -#: utils/misc/guc.c:2965 +#: utils/misc/guc.c:3041 msgid "" "Time spent flushing dirty buffers during checkpoint, as fraction of " "checkpoint interval." @@ -23672,51 +25452,51 @@ msgstr "" "체크포인트 도중 변경된 버퍼 플러시에 사용된 시간으로, 체크포인트 간격의 " "분수 값입니다." -#: utils/misc/guc.c:2984 +#: utils/misc/guc.c:3060 msgid "Sets the shell command that will be called to archive a WAL file." msgstr "WAL 파일을 아카이빙하기 위해 호출될 셸 명령을 설정합니다." -#: utils/misc/guc.c:2994 +#: utils/misc/guc.c:3070 msgid "Sets the client's character set encoding." msgstr "클라이언트 문자 세트 인코딩을 지정함" -#: utils/misc/guc.c:3005 +#: utils/misc/guc.c:3081 msgid "Controls information prefixed to each log line." msgstr "각 로그 줄 앞에 추가할 정보를 제어합니다." -#: utils/misc/guc.c:3006 +#: utils/misc/guc.c:3082 msgid "If blank, no prefix is used." msgstr "비워 두면 접두사가 사용되지 않습니다." -#: utils/misc/guc.c:3015 +#: utils/misc/guc.c:3091 msgid "Sets the time zone to use in log messages." msgstr "로그 메시지에 사용할 표준 시간대를 설정합니다." -#: utils/misc/guc.c:3025 +#: utils/misc/guc.c:3101 msgid "Sets the display format for date and time values." msgstr "날짜와 시간 값을 나타내는 모양을 지정합니다." -#: utils/misc/guc.c:3026 +#: utils/misc/guc.c:3102 msgid "Also controls interpretation of ambiguous date inputs." msgstr "또한 모호한 날짜 입력의 해석을 제어합니다." -#: utils/misc/guc.c:3037 +#: utils/misc/guc.c:3113 msgid "Sets the default tablespace to create tables and indexes in." msgstr "테이블 및 인덱스를 만들 기본 테이블스페이스를 설정합니다." -#: utils/misc/guc.c:3038 +#: utils/misc/guc.c:3114 msgid "An empty string selects the database's default tablespace." msgstr "빈 문자열을 지정하면 데이터베이스의 기본 테이블스페이스가 선택됩니다." -#: utils/misc/guc.c:3048 +#: utils/misc/guc.c:3124 msgid "Sets the tablespace(s) to use for temporary tables and sort files." msgstr "임시 테이블 및 정렬 파일에 사용할 테이블스페이스를 설정합니다." -#: utils/misc/guc.c:3059 +#: utils/misc/guc.c:3135 msgid "Sets the path for dynamically loadable modules." msgstr "동적으로 불러올 수 있는 모듈들이 있는 경로를 지정함." -#: utils/misc/guc.c:3060 +#: utils/misc/guc.c:3136 msgid "" "If a dynamically loadable module needs to be opened and the specified name " "does not have a directory component (i.e., the name does not contain a " @@ -23726,76 +25506,76 @@ msgstr "" "소가 없는 경우(즉, 이름에 슬래시가 없음) 시스템은 이 경로에서 지정한 파일을 " "검색합니다." -#: utils/misc/guc.c:3073 +#: utils/misc/guc.c:3149 msgid "Sets the location of the Kerberos server key file." msgstr "Kerberos 서버 키 파일의 위치를 지정함." -#: utils/misc/guc.c:3084 +#: utils/misc/guc.c:3160 msgid "Sets the Bonjour service name." msgstr "Bonjour 서비스 이름을 지정" -#: utils/misc/guc.c:3096 +#: utils/misc/guc.c:3172 msgid "Shows the collation order locale." msgstr "데이터 정렬 순서 로케일을 표시합니다." -#: utils/misc/guc.c:3107 +#: utils/misc/guc.c:3183 msgid "Shows the character classification and case conversion locale." msgstr "문자 분류 및 대/소문자 변환 로케일을 표시합니다." -#: utils/misc/guc.c:3118 +#: utils/misc/guc.c:3194 msgid "Sets the language in which messages are displayed." msgstr "보여질 메시지로 사용할 언어 지정." -#: utils/misc/guc.c:3128 +#: utils/misc/guc.c:3204 msgid "Sets the locale for formatting monetary amounts." msgstr "통화금액 표현 양식으로 사용할 로케일 지정." -#: utils/misc/guc.c:3138 +#: utils/misc/guc.c:3214 msgid "Sets the locale for formatting numbers." msgstr "숫자 표현 양식으로 사용할 로케일 지정." -#: utils/misc/guc.c:3148 +#: utils/misc/guc.c:3224 msgid "Sets the locale for formatting date and time values." msgstr "날짜와 시간 값을 표현할 양식으로 사용할 로케일 지정." -#: utils/misc/guc.c:3158 +#: utils/misc/guc.c:3234 msgid "Lists shared libraries to preload into each backend." msgstr "각각의 백엔드에 미리 불러올 공유 라이브러리들을 지정합니다" -#: utils/misc/guc.c:3169 +#: utils/misc/guc.c:3245 msgid "Lists shared libraries to preload into server." msgstr "서버에 미리 불러올 공유 라이브러리들을 지정합니다" -#: utils/misc/guc.c:3180 +#: utils/misc/guc.c:3256 msgid "Lists unprivileged shared libraries to preload into each backend." msgstr "" "각각의 백엔드에 미리 불러올 접근제한 없는 공유 라이브러리들을 지정합니다" -#: utils/misc/guc.c:3191 +#: utils/misc/guc.c:3267 msgid "Sets the schema search order for names that are not schema-qualified." msgstr "스키마로 한정되지 않은 이름의 스키마 검색 순서를 설정합니다." -#: utils/misc/guc.c:3203 +#: utils/misc/guc.c:3279 msgid "Sets the server (database) character set encoding." msgstr "서버 문자 코드 세트 인코딩 지정." -#: utils/misc/guc.c:3215 +#: utils/misc/guc.c:3291 msgid "Shows the server version." msgstr "서버 버전 보임." -#: utils/misc/guc.c:3227 +#: utils/misc/guc.c:3303 msgid "Sets the current role." msgstr "현재 롤을 지정" -#: utils/misc/guc.c:3239 +#: utils/misc/guc.c:3315 msgid "Sets the session user name." msgstr "세션 사용자 이름 지정." -#: utils/misc/guc.c:3250 +#: utils/misc/guc.c:3326 msgid "Sets the destination for server log output." msgstr "서버 로그 출력을 위한 대상을 지정합니다." -#: utils/misc/guc.c:3251 +#: utils/misc/guc.c:3327 msgid "" "Valid values are combinations of \"stderr\", \"syslog\", \"csvlog\", and " "\"eventlog\", depending on the platform." @@ -23803,138 +25583,153 @@ msgstr "" "유효한 값은 플랫폼에 따라 \"stderr\", \"syslog\", \"csvlog\" 및 \"eventlog" "\"의 조합입니다." -#: utils/misc/guc.c:3262 +#: utils/misc/guc.c:3338 msgid "Sets the destination directory for log files." msgstr "로그 파일의 대상 디렉터리를 설정합니다." -#: utils/misc/guc.c:3263 +#: utils/misc/guc.c:3339 msgid "Can be specified as relative to the data directory or as absolute path." msgstr "데이터 디렉터리의 상대 경로 또는 절대 경로로 지정할 수 있습니다." -#: utils/misc/guc.c:3273 +#: utils/misc/guc.c:3349 msgid "Sets the file name pattern for log files." msgstr "로그 파일의 파일 이름 패턴을 설정합니다." -#: utils/misc/guc.c:3284 +#: utils/misc/guc.c:3360 msgid "Sets the program name used to identify PostgreSQL messages in syslog." msgstr "syslog에서 구분할 PostgreSQL 메시지에 사용될 프로그램 이름을 지정." -#: utils/misc/guc.c:3295 +#: utils/misc/guc.c:3371 msgid "" "Sets the application name used to identify PostgreSQL messages in the event " "log." msgstr "" "이벤트 로그에서 PostgreSQL 메시지 식별자로 사용할 응용프로그램 이름 지정" -#: utils/misc/guc.c:3306 +#: utils/misc/guc.c:3382 msgid "Sets the time zone for displaying and interpreting time stamps." msgstr "시간대(time zone)를 지정함." -#: utils/misc/guc.c:3316 +#: utils/misc/guc.c:3392 msgid "Selects a file of time zone abbreviations." msgstr "표준 시간대 약어 파일을 선택합니다." -#: utils/misc/guc.c:3326 +#: utils/misc/guc.c:3402 msgid "Sets the current transaction's isolation level." msgstr "현재 트랜잭션 독립성 수준(isolation level)을 지정함." -#: utils/misc/guc.c:3337 +#: utils/misc/guc.c:3413 msgid "Sets the owning group of the Unix-domain socket." msgstr "유닉스 도메인 소켓의 소유주를 지정" -#: utils/misc/guc.c:3338 +#: utils/misc/guc.c:3414 msgid "" "The owning user of the socket is always the user that starts the server." msgstr "소켓 소유자는 항상 서버를 시작하는 사용자입니다." -#: utils/misc/guc.c:3348 +#: utils/misc/guc.c:3424 msgid "Sets the directories where Unix-domain sockets will be created." msgstr "유닉스 도메인 소켓을 만들 디렉터리를 지정합니다." -#: utils/misc/guc.c:3363 +#: utils/misc/guc.c:3439 msgid "Sets the host name or IP address(es) to listen to." msgstr "서비스할 호스트이름이나, IP를 지정함." -#: utils/misc/guc.c:3378 +#: utils/misc/guc.c:3454 msgid "Sets the server's data directory." msgstr "서버의 데이터 디렉터리 위치를 지정합니다." -#: utils/misc/guc.c:3389 +#: utils/misc/guc.c:3465 msgid "Sets the server's main configuration file." msgstr "서버의 기본 환경설정 파일 경로를 지정합니다." -#: utils/misc/guc.c:3400 +#: utils/misc/guc.c:3476 msgid "Sets the server's \"hba\" configuration file." msgstr "서버의 \"hba\" 구성 파일을 설정합니다." -#: utils/misc/guc.c:3411 +#: utils/misc/guc.c:3487 msgid "Sets the server's \"ident\" configuration file." msgstr "서버의 \"ident\" 구성 파일을 설정합니다." -#: utils/misc/guc.c:3422 +#: utils/misc/guc.c:3498 msgid "Writes the postmaster PID to the specified file." msgstr "postmaster PID가 기록된 파일의 경로를 지정합니다." -#: utils/misc/guc.c:3433 +#: utils/misc/guc.c:3509 msgid "Location of the SSL server certificate file." msgstr "서버 인증서 파일 위치를 지정함" -#: utils/misc/guc.c:3443 +#: utils/misc/guc.c:3519 msgid "Location of the SSL server private key file." msgstr "SSL 서버 개인 키 파일의 위치를 지정함." -#: utils/misc/guc.c:3453 +#: utils/misc/guc.c:3529 msgid "Location of the SSL certificate authority file." msgstr "" -#: utils/misc/guc.c:3463 +#: utils/misc/guc.c:3539 msgid "Location of the SSL certificate revocation list file." msgstr "SSL 인증서 파기 목록 파일의 위치" -#: utils/misc/guc.c:3473 +#: utils/misc/guc.c:3549 msgid "Writes temporary statistics files to the specified directory." msgstr "지정한 디렉터리에 임시 통계 파일을 씁니다." -#: utils/misc/guc.c:3484 +#: utils/misc/guc.c:3560 msgid "" "Number of synchronous standbys and list of names of potential synchronous " "ones." msgstr "" -#: utils/misc/guc.c:3495 +#: utils/misc/guc.c:3571 msgid "Sets default text search configuration." msgstr "기본 텍스트 검색 구성을 설정합니다." -#: utils/misc/guc.c:3505 +#: utils/misc/guc.c:3581 msgid "Sets the list of allowed SSL ciphers." msgstr "허용되는 SSL 암호 목록을 설정합니다." -#: utils/misc/guc.c:3520 +#: utils/misc/guc.c:3596 msgid "Sets the curve to use for ECDH." msgstr "ECDH에 사용할 curve 설정" -#: utils/misc/guc.c:3535 +#: utils/misc/guc.c:3611 +msgid "Location of the SSL DH parameters file." +msgstr "SSL DH 매개 변수 파일의 위치." + +#: utils/misc/guc.c:3622 msgid "Sets the application name to be reported in statistics and logs." msgstr "" -#: utils/misc/guc.c:3546 +#: utils/misc/guc.c:3633 msgid "Sets the name of the cluster, which is included in the process title." msgstr "" -#: utils/misc/guc.c:3566 +#: utils/misc/guc.c:3644 +msgid "" +"Sets the WAL resource managers for which WAL consistency checks are done." +msgstr "" + +#: utils/misc/guc.c:3645 +msgid "" +"Full-page images will be logged for all data blocks and cross-checked " +"against the results of WAL replay." +msgstr "" + +#: utils/misc/guc.c:3664 msgid "Sets whether \"\\'\" is allowed in string literals." msgstr "문자열에서 \"\\'\" 문자 사용을 허용할 것인지를 정하세요" -#: utils/misc/guc.c:3576 +#: utils/misc/guc.c:3674 msgid "Sets the output format for bytea." msgstr "bytea 값의 표시 형식을 설정합니다." -#: utils/misc/guc.c:3586 +#: utils/misc/guc.c:3684 msgid "Sets the message levels that are sent to the client." msgstr "클라이언트 측에 보여질 메시지 수준을 지정함." -#: utils/misc/guc.c:3587 utils/misc/guc.c:3640 utils/misc/guc.c:3651 -#: utils/misc/guc.c:3717 +#: utils/misc/guc.c:3685 utils/misc/guc.c:3738 utils/misc/guc.c:3749 +#: utils/misc/guc.c:3815 msgid "" "Each level includes all the levels that follow it. The later the level, the " "fewer messages are sent." @@ -23942,11 +25737,11 @@ msgstr "" "각 수준에는 이 수준 뒤에 있는 모든 수준이 포함됩니다. 수준이 뒤에 있을수" "록 전송되는 메시지 수가 적습니다." -#: utils/misc/guc.c:3597 +#: utils/misc/guc.c:3695 msgid "Enables the planner to use constraints to optimize queries." msgstr "실행계획기가 쿼리 최적화 작업에서 제약 조건을 사용하도록 함" -#: utils/misc/guc.c:3598 +#: utils/misc/guc.c:3696 msgid "" "Table scans will be skipped if their constraints guarantee that no rows " "match the query." @@ -23954,73 +25749,73 @@ msgstr "" "제약 조건에 의해 쿼리와 일치하는 행이 없는 경우 테이블 스캔을 건너뜁니" "다." -#: utils/misc/guc.c:3608 +#: utils/misc/guc.c:3706 msgid "Sets the transaction isolation level of each new transaction." msgstr "각 새 트랜잭션의 트랜잭션 격리 수준을 설정합니다." -#: utils/misc/guc.c:3618 +#: utils/misc/guc.c:3716 msgid "Sets the display format for interval values." msgstr "간격 값의 표시 형식을 설정합니다." -#: utils/misc/guc.c:3629 +#: utils/misc/guc.c:3727 msgid "Sets the verbosity of logged messages." msgstr "기록되는 메시지의 상세 정도를 지정합니다." -#: utils/misc/guc.c:3639 +#: utils/misc/guc.c:3737 msgid "Sets the message levels that are logged." msgstr "서버 로그에 기록될 메시지 수준을 지정함." -#: utils/misc/guc.c:3650 +#: utils/misc/guc.c:3748 msgid "" "Causes all statements generating error at or above this level to be logged." msgstr "" "오류가 있는 모든 쿼리문이나 지정한 로그 레벨 이상의 쿼리문을 로그로 남김" -#: utils/misc/guc.c:3661 +#: utils/misc/guc.c:3759 msgid "Sets the type of statements logged." msgstr "서버로그에 기록될 구문 종류를 지정합니다." -#: utils/misc/guc.c:3671 +#: utils/misc/guc.c:3769 msgid "Sets the syslog \"facility\" to be used when syslog enabled." msgstr "syslog 기능을 사용할 때, 사용할 syslog \"facility\" 값을 지정." -#: utils/misc/guc.c:3686 +#: utils/misc/guc.c:3784 msgid "Sets the session's behavior for triggers and rewrite rules." msgstr "트리거 및 다시 쓰기 규칙에 대한 세션의 동작을 설정합니다." -#: utils/misc/guc.c:3696 +#: utils/misc/guc.c:3794 msgid "Sets the current transaction's synchronization level." msgstr "현재 트랜잭션 격리 수준(isolation level)을 지정함." -#: utils/misc/guc.c:3706 +#: utils/misc/guc.c:3804 msgid "Allows archiving of WAL files using archive_command." msgstr "archive_command를 사용하여 WAL 파일을 따로 보관하도록 설정합니다." -#: utils/misc/guc.c:3716 +#: utils/misc/guc.c:3814 msgid "Enables logging of recovery-related debugging information." msgstr "복구 작업과 관련된 디버깅 정보를 기록하도록 합니다." -#: utils/misc/guc.c:3732 +#: utils/misc/guc.c:3830 msgid "Collects function-level statistics on database activity." msgstr "데이터베이스 활동에 대한 함수 수준 통계를 수집합니다." -#: utils/misc/guc.c:3742 +#: utils/misc/guc.c:3840 msgid "Set the level of information written to the WAL." msgstr "WAL에 저장할 내용 수준을 지정합니다." -#: utils/misc/guc.c:3752 +#: utils/misc/guc.c:3850 msgid "Selects the dynamic shared memory implementation used." msgstr "사용할 동적 공유 메모리 관리방식을 선택합니다." -#: utils/misc/guc.c:3762 +#: utils/misc/guc.c:3860 msgid "Selects the method used for forcing WAL updates to disk." msgstr "디스크에 대한 강제 WAL 업데이트에 사용되는 방법을 선택합니다." -#: utils/misc/guc.c:3772 +#: utils/misc/guc.c:3870 msgid "Sets how binary values are to be encoded in XML." msgstr "XML에서 바이너리 값이 인코딩되는 방식을 설정합니다." -#: utils/misc/guc.c:3782 +#: utils/misc/guc.c:3880 msgid "" "Sets whether XML data in implicit parsing and serialization operations is to " "be considered as documents or content fragments." @@ -24028,26 +25823,40 @@ msgstr "" "암시적 구문 분석 및 직렬화 작업의 XML 데이터를 문서 또는 내용 조각으로 간주할" "지 여부를 설정합니다." -#: utils/misc/guc.c:3793 +#: utils/misc/guc.c:3891 msgid "Use of huge pages on Linux." msgstr "리눅스 huge 페이지 사용 여부" -#: utils/misc/guc.c:3803 +#: utils/misc/guc.c:3901 msgid "Forces use of parallel query facilities." msgstr "병렬 쿼리 기능을 활성화" -#: utils/misc/guc.c:3804 +#: utils/misc/guc.c:3902 msgid "" "If possible, run query using a parallel worker and with parallel " "restrictions." msgstr "" -#: utils/misc/guc.c:4604 +#: utils/misc/guc.c:3911 +msgid "Encrypt passwords." +msgstr "암호를 암호화 해서 기록함" + +#: utils/misc/guc.c:3912 +msgid "" +"When a password is specified in CREATE USER or ALTER USER without writing " +"either ENCRYPTED or UNENCRYPTED, this parameter determines whether the " +"password is to be encrypted." +msgstr "" +"CREATE USER 또는 ALTER USER 명령에서 ENCRYPTED 또는 UNENCRYPTED 속성을 특별" +"히 지정하지 않았고 사용자 암호를 지정했을 때, 그 암호를 암호화 해서 저장할 것" +"인지 아닌지를 지정함" + +#: utils/misc/guc.c:4714 #, c-format msgid "%s: could not access directory \"%s\": %s\n" msgstr "%s: \"%s\" 디렉터리에 액세스할 수 없음: %s\n" -#: utils/misc/guc.c:4609 +#: utils/misc/guc.c:4719 #, c-format msgid "" "Run initdb or pg_basebackup to initialize a PostgreSQL data directory.\n" @@ -24055,7 +25864,7 @@ msgstr "" "initdb 명령이나, pg_basebackup 명령으로 PostgreSQL 데이터 디렉토리를 초기화 " "하세요.\n" -#: utils/misc/guc.c:4629 +#: utils/misc/guc.c:4739 #, c-format msgid "" "%s does not know where to find the server configuration file.\n" @@ -24067,12 +25876,12 @@ msgstr "" "PGDATA 이름의 환경 변수를 만들고 그 값으로 해당 디렉터리를 지정한 뒤,\n" "이 프로그램을 다시 실행해 보십시오.\n" -#: utils/misc/guc.c:4648 +#: utils/misc/guc.c:4758 #, c-format msgid "%s: could not access the server configuration file \"%s\": %s\n" msgstr "%s: \"%s\" 환경 설정 파일을 접근할 수 없습니다: %s\n" -#: utils/misc/guc.c:4674 +#: utils/misc/guc.c:4784 #, c-format msgid "" "%s does not know where to find the database system data.\n" @@ -24085,7 +25894,7 @@ msgstr "" "PGDATA 이름의 환경 변수를 만들고 그 값으로 해당 디렉터리를 지정한 뒤,\n" "이 프로그램을 다시 실행해 보십시오.\n" -#: utils/misc/guc.c:4722 +#: utils/misc/guc.c:4832 #, c-format msgid "" "%s does not know where to find the \"hba\" configuration file.\n" @@ -24098,7 +25907,7 @@ msgstr "" "PGDATA 이름의 환경 변수를 만들고 그 값으로 해당 디렉터리를 지정한 뒤,\n" "이 프로그램을 다시 실행해 보십시오.\n" -#: utils/misc/guc.c:4745 +#: utils/misc/guc.c:4855 #, c-format msgid "" "%s does not know where to find the \"ident\" configuration file.\n" @@ -24111,127 +25920,127 @@ msgstr "" "PGDATA 이름의 환경 변수를 만들고 그 값으로 해당 디렉터리를 지정한 뒤,\n" "이 프로그램을 다시 실행해 보십시오.\n" -#: utils/misc/guc.c:5419 utils/misc/guc.c:5466 +#: utils/misc/guc.c:5529 utils/misc/guc.c:5576 msgid "Value exceeds integer range." msgstr "값이 정수 범위를 초과합니다." -#: utils/misc/guc.c:5689 +#: utils/misc/guc.c:5799 #, c-format msgid "parameter \"%s\" requires a numeric value" msgstr "\"%s\" 매개 변수의 값은 숫자형이어야합니다." -#: utils/misc/guc.c:5698 +#: utils/misc/guc.c:5808 #, c-format msgid "%g is outside the valid range for parameter \"%s\" (%g .. %g)" msgstr "" "%g 값은 \"%s\" 매개 변수의 값으로 타당한 범위(%g .. %g)를 벗어났습니다." -#: utils/misc/guc.c:5851 utils/misc/guc.c:7194 +#: utils/misc/guc.c:5961 utils/misc/guc.c:7307 #, c-format msgid "cannot set parameters during a parallel operation" msgstr "병렬 작업 중에는 매개 변수를 설정할 수 없음" -#: utils/misc/guc.c:5858 utils/misc/guc.c:6609 utils/misc/guc.c:6661 -#: utils/misc/guc.c:7022 utils/misc/guc.c:7782 utils/misc/guc.c:7950 -#: utils/misc/guc.c:9625 +#: utils/misc/guc.c:5968 utils/misc/guc.c:6719 utils/misc/guc.c:6772 +#: utils/misc/guc.c:7135 utils/misc/guc.c:7894 utils/misc/guc.c:8062 +#: utils/misc/guc.c:9738 #, c-format msgid "unrecognized configuration parameter \"%s\"" msgstr "알 수 없는 환경 매개 변수 이름: \"%s\"" -#: utils/misc/guc.c:5873 utils/misc/guc.c:7034 +#: utils/misc/guc.c:5983 utils/misc/guc.c:7147 #, c-format msgid "parameter \"%s\" cannot be changed" msgstr "\"%s\" 매개 변수는 변경될 수 없음" -#: utils/misc/guc.c:5896 utils/misc/guc.c:6089 utils/misc/guc.c:6179 -#: utils/misc/guc.c:6269 utils/misc/guc.c:6377 utils/misc/guc.c:6472 +#: utils/misc/guc.c:6006 utils/misc/guc.c:6199 utils/misc/guc.c:6289 +#: utils/misc/guc.c:6379 utils/misc/guc.c:6487 utils/misc/guc.c:6582 #: guc-file.l:351 #, c-format msgid "parameter \"%s\" cannot be changed without restarting the server" msgstr "\"%s\" 매개 변수는 서버 재실행 없이 지금 변경 될 수 없음" -#: utils/misc/guc.c:5906 +#: utils/misc/guc.c:6016 #, c-format msgid "parameter \"%s\" cannot be changed now" msgstr "\"%s\" 매개 변수는 지금 변경 될 수 없음" -#: utils/misc/guc.c:5924 utils/misc/guc.c:5970 utils/misc/guc.c:9641 +#: utils/misc/guc.c:6034 utils/misc/guc.c:6080 utils/misc/guc.c:9754 #, c-format msgid "permission denied to set parameter \"%s\"" msgstr "\"%s\" 매개 변수를 지정할 권한이 없습니다." -#: utils/misc/guc.c:5960 +#: utils/misc/guc.c:6070 #, c-format msgid "parameter \"%s\" cannot be set after connection start" msgstr "\"%s\" 매개 변수값은 연결 시작한 뒤에는 변경할 수 없습니다" -#: utils/misc/guc.c:6008 +#: utils/misc/guc.c:6118 #, c-format msgid "cannot set parameter \"%s\" within security-definer function" msgstr "보안 정의자 함수 내에서 \"%s\" 매개 변수를 설정할 수 없음" -#: utils/misc/guc.c:6617 utils/misc/guc.c:6665 utils/misc/guc.c:7956 +#: utils/misc/guc.c:6727 utils/misc/guc.c:6777 utils/misc/guc.c:8069 #, c-format -msgid "must be superuser to examine \"%s\"" -msgstr "\"%s\" 검사를 위해서는 superuser여야합니다" +msgid "must be superuser or a member of pg_read_all_settings to examine \"%s\"" +msgstr "\"%s\" 검사를 위한 pg_read_all_settings의 맴버는 superuser여야합니다" -#: utils/misc/guc.c:6731 +#: utils/misc/guc.c:6844 #, c-format msgid "SET %s takes only one argument" msgstr "SET %s 명령은 하나의 값만 지정해야합니다" -#: utils/misc/guc.c:6982 +#: utils/misc/guc.c:7095 #, c-format msgid "must be superuser to execute ALTER SYSTEM command" msgstr "슈퍼유저만 ALTER SYSTEM 명령을 실행할 수 있음" -#: utils/misc/guc.c:7067 +#: utils/misc/guc.c:7180 #, c-format msgid "parameter value for ALTER SYSTEM must not contain a newline" msgstr "" "ALTER SYSTEM 명령으로 지정하는 매개 변수 값에는 줄바꿈 문자가 없어야 합니다" -#: utils/misc/guc.c:7112 +#: utils/misc/guc.c:7225 #, c-format msgid "could not parse contents of file \"%s\"" msgstr "\"%s\" 파일의 내용을 분석할 수 없음" -#: utils/misc/guc.c:7270 +#: utils/misc/guc.c:7383 #, c-format msgid "SET LOCAL TRANSACTION SNAPSHOT is not implemented" msgstr "SET LOCAL TRANSACTION SNAPSHOT 명령은 아직 구현 되지 않았습니다" -#: utils/misc/guc.c:7355 +#: utils/misc/guc.c:7467 #, c-format msgid "SET requires parameter name" msgstr "SET 명령은 매개 변수 이름이 필요합니다" -#: utils/misc/guc.c:7479 +#: utils/misc/guc.c:7591 #, c-format msgid "attempt to redefine parameter \"%s\"" msgstr "\"%s\" 매개 변수를 다시 정의하려고 함" -#: utils/misc/guc.c:9258 +#: utils/misc/guc.c:9371 #, c-format msgid "parameter \"%s\" could not be set" msgstr "\"%s\" 매개 변수는 설정할 수 없음" -#: utils/misc/guc.c:9345 +#: utils/misc/guc.c:9458 #, c-format msgid "could not parse setting for parameter \"%s\"" msgstr "지정한 \"%s\" 매개 변수값의 구문분석을 실패했습니다." -#: utils/misc/guc.c:9703 utils/misc/guc.c:9737 +#: utils/misc/guc.c:9816 utils/misc/guc.c:9850 #, c-format msgid "invalid value for parameter \"%s\": %d" msgstr "잘못된 \"%s\" 매개 변수의 값: %d" -#: utils/misc/guc.c:9771 +#: utils/misc/guc.c:9884 #, c-format msgid "invalid value for parameter \"%s\": %g" msgstr "잘못된 \"%s\" 매개 변수의 값: %g" -#: utils/misc/guc.c:9961 +#: utils/misc/guc.c:10154 #, c-format msgid "" "\"temp_buffers\" cannot be changed after any temporary tables have been " @@ -24240,22 +26049,22 @@ msgstr "" "해당 세션에서 어떤 임시 테이블도 사용하고 있지 않아야 \"temp_buffers\" 설정" "을 변경할 수 있습니다." -#: utils/misc/guc.c:9973 +#: utils/misc/guc.c:10166 #, c-format msgid "Bonjour is not supported by this build" msgstr "Bonjour 기능을 뺀 채로 서버가 만들어졌습니다." -#: utils/misc/guc.c:9986 +#: utils/misc/guc.c:10179 #, c-format msgid "SSL is not supported by this build" msgstr "SSL 접속 기능을 뺀 채로 서버가 만들어졌습니다." -#: utils/misc/guc.c:9998 +#: utils/misc/guc.c:10191 #, c-format msgid "Cannot enable parameter when \"log_statement_stats\" is true." msgstr "\"log_statement_stats\" 값이 true 일 때는 이 값을 활성화할 수 없습니다" -#: utils/misc/guc.c:10010 +#: utils/misc/guc.c:10203 #, c-format msgid "" "Cannot enable \"log_statement_stats\" when \"log_parser_stats\", " @@ -24275,12 +26084,23 @@ msgid "" "query-specified return tuple and function return type are not compatible" msgstr "" -#: utils/misc/rls.c:127 +#: utils/misc/pg_controldata.c:58 utils/misc/pg_controldata.c:138 +#: utils/misc/pg_controldata.c:244 utils/misc/pg_controldata.c:311 +#, c-format +msgid "calculated CRC checksum does not match value stored in file" +msgstr "계산된 CRC 체크섬 값이 파일에 저장된 값과 다름" + +#: utils/misc/pg_rusage.c:64 +#, c-format +msgid "CPU: user: %d.%02d s, system: %d.%02d s, elapsed: %d.%02d s" +msgstr "" + +#: utils/misc/rls.c:128 #, c-format msgid "query would be affected by row-level security policy for table \"%s\"" msgstr "\"%s\" 테이블의 로우 단위 보안 정책에 의해 쿼리가 영향을 받음" -#: utils/misc/rls.c:129 +#: utils/misc/rls.c:130 #, c-format msgid "" "To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW " @@ -24370,115 +26190,126 @@ msgstr "\"%s\" 표준 시간대 파일의 %d번째 줄이 너무 깁니다." msgid "@INCLUDE without file name in time zone file \"%s\", line %d" msgstr "\"%s\" 표준 시간대 파일의 %d번째 줄에 파일 이름이 없는 @INCLUDE가 있음" -#: utils/mmgr/aset.c:510 +#: utils/mmgr/aset.c:405 #, c-format msgid "Failed while creating memory context \"%s\"." msgstr "\"%s\" 메모리 컨텍스트를 만드는 동안 오류가 발생했습니다." -#: utils/mmgr/mcxt.c:768 utils/mmgr/mcxt.c:803 utils/mmgr/mcxt.c:840 -#: utils/mmgr/mcxt.c:877 utils/mmgr/mcxt.c:911 utils/mmgr/mcxt.c:940 -#: utils/mmgr/mcxt.c:974 utils/mmgr/mcxt.c:1056 utils/mmgr/mcxt.c:1090 -#: utils/mmgr/mcxt.c:1139 +#: utils/mmgr/dsa.c:518 utils/mmgr/dsa.c:1323 +#, c-format +msgid "could not attach to dynamic shared area" +msgstr "동적 공유 메모리 영역을 할당할 수 없음" + +#: utils/mmgr/dsa.c:714 utils/mmgr/dsa.c:796 +#, c-format +msgid "Failed on DSA request of size %zu." +msgstr "크기가 %zu인 DSA 요청에서 오류가 발생했습니다." + +#: utils/mmgr/mcxt.c:726 utils/mmgr/mcxt.c:761 utils/mmgr/mcxt.c:798 +#: utils/mmgr/mcxt.c:835 utils/mmgr/mcxt.c:869 utils/mmgr/mcxt.c:898 +#: utils/mmgr/mcxt.c:932 utils/mmgr/mcxt.c:983 utils/mmgr/mcxt.c:1017 +#: utils/mmgr/mcxt.c:1051 #, c-format msgid "Failed on request of size %zu." msgstr "크기가 %zu인 요청에서 오류가 발생했습니다." -#: utils/mmgr/portalmem.c:207 +#: utils/mmgr/portalmem.c:186 #, c-format msgid "cursor \"%s\" already exists" msgstr "\"%s\" 이름의 커서가 이미 있음" -#: utils/mmgr/portalmem.c:211 +#: utils/mmgr/portalmem.c:190 #, c-format msgid "closing existing cursor \"%s\"" msgstr "이미 있는 \"%s\" 커서를 닫습니다" -#: utils/mmgr/portalmem.c:415 +#: utils/mmgr/portalmem.c:394 #, c-format msgid "portal \"%s\" cannot be run" msgstr "\"%s\" portal 실행할 수 없음" -#: utils/mmgr/portalmem.c:495 +#: utils/mmgr/portalmem.c:474 #, c-format msgid "cannot drop active portal \"%s\"" msgstr "\"%s\" 활성 포털을 삭제할 수 없음" -#: utils/mmgr/portalmem.c:699 +#: utils/mmgr/portalmem.c:678 #, c-format msgid "cannot PREPARE a transaction that has created a cursor WITH HOLD" msgstr "WITH HOLD 옵션으로 커서를 만든 트랜잭션을 PREPARE할 수 없음" -#: utils/sort/logtape.c:226 +#: utils/sort/logtape.c:252 #, c-format msgid "could not read block %ld of temporary file: %m" msgstr "임시 파일의 %ld 블럭을 읽을 수 없음: %m" -#: utils/sort/tuplesort.c:3402 +#: utils/sort/tuplesort.c:3072 #, c-format msgid "cannot have more than %d runs for an external sort" msgstr "외부 정렬을 위해 %d 개 이상의 런을 만들 수 없음" -#: utils/sort/tuplesort.c:4474 +#: utils/sort/tuplesort.c:4146 #, c-format msgid "could not create unique index \"%s\"" msgstr "\"%s\" 고유 인덱스를 만들 수 없음" -#: utils/sort/tuplesort.c:4476 +#: utils/sort/tuplesort.c:4148 #, c-format msgid "Key %s is duplicated." msgstr "%s 키가 중복됨" -#: utils/sort/tuplesort.c:4477 +#: utils/sort/tuplesort.c:4149 #, c-format msgid "Duplicate keys exist." msgstr "중복된 키가 있음" -#: utils/sort/tuplestore.c:515 utils/sort/tuplestore.c:525 -#: utils/sort/tuplestore.c:852 utils/sort/tuplestore.c:956 -#: utils/sort/tuplestore.c:1020 utils/sort/tuplestore.c:1037 -#: utils/sort/tuplestore.c:1239 utils/sort/tuplestore.c:1304 -#: utils/sort/tuplestore.c:1313 +#: utils/sort/tuplestore.c:518 utils/sort/tuplestore.c:528 +#: utils/sort/tuplestore.c:869 utils/sort/tuplestore.c:973 +#: utils/sort/tuplestore.c:1037 utils/sort/tuplestore.c:1054 +#: utils/sort/tuplestore.c:1256 utils/sort/tuplestore.c:1321 +#: utils/sort/tuplestore.c:1330 #, c-format msgid "could not seek in tuplestore temporary file: %m" msgstr "tuplestore 파일에서 seek 작업을 할 수 없음: %m" -#: utils/sort/tuplestore.c:1460 utils/sort/tuplestore.c:1533 -#: utils/sort/tuplestore.c:1539 +#: utils/sort/tuplestore.c:1477 utils/sort/tuplestore.c:1550 +#: utils/sort/tuplestore.c:1556 #, c-format msgid "could not read from tuplestore temporary file: %m" msgstr "tuplestore 임시 파일을 읽을 수 없음: %m" -#: utils/sort/tuplestore.c:1501 utils/sort/tuplestore.c:1506 -#: utils/sort/tuplestore.c:1512 +#: utils/sort/tuplestore.c:1518 utils/sort/tuplestore.c:1523 +#: utils/sort/tuplestore.c:1529 #, c-format msgid "could not write to tuplestore temporary file: %m" msgstr "tuplestore 임시 파일을 쓸 수 없습니다: %m" -#: utils/time/snapmgr.c:618 +#: utils/time/snapmgr.c:622 #, c-format msgid "The source transaction is not running anymore." msgstr "소스 트랜잭션이 더 이상 실행중이지 않음" -#: utils/time/snapmgr.c:1190 +#: utils/time/snapmgr.c:1200 #, c-format msgid "cannot export a snapshot from a subtransaction" msgstr "서브트랜잭션에서 스냅샷을 내보낼 수 없음" -#: utils/time/snapmgr.c:1339 utils/time/snapmgr.c:1344 -#: utils/time/snapmgr.c:1349 utils/time/snapmgr.c:1364 -#: utils/time/snapmgr.c:1369 utils/time/snapmgr.c:1374 -#: utils/time/snapmgr.c:1473 utils/time/snapmgr.c:1489 -#: utils/time/snapmgr.c:1514 +#: utils/time/snapmgr.c:1359 utils/time/snapmgr.c:1364 +#: utils/time/snapmgr.c:1369 utils/time/snapmgr.c:1384 +#: utils/time/snapmgr.c:1389 utils/time/snapmgr.c:1394 +#: utils/time/snapmgr.c:1409 utils/time/snapmgr.c:1414 +#: utils/time/snapmgr.c:1419 utils/time/snapmgr.c:1519 +#: utils/time/snapmgr.c:1535 utils/time/snapmgr.c:1560 #, c-format msgid "invalid snapshot data in file \"%s\"" msgstr "\"%s\" 파일에 유효하지 않은 스냅샷 자료가 있습니다" -#: utils/time/snapmgr.c:1411 +#: utils/time/snapmgr.c:1456 #, c-format msgid "SET TRANSACTION SNAPSHOT must be called before any query" msgstr "쿼리보다 먼저 SET TRANSACTION SNAPSHOP 명령을 호출해야 함" -#: utils/time/snapmgr.c:1420 +#: utils/time/snapmgr.c:1465 #, c-format msgid "" "a snapshot-importing transaction must have isolation level SERIALIZABLE or " @@ -24487,12 +26318,12 @@ msgstr "" "스냅샷 가져오기 트랜잭션은 그 격리 수준이 SERIALIZABLE 또는 REPEATABLE READ " "여야 함" -#: utils/time/snapmgr.c:1429 utils/time/snapmgr.c:1438 +#: utils/time/snapmgr.c:1474 utils/time/snapmgr.c:1483 #, c-format msgid "invalid snapshot identifier: \"%s\"" msgstr "잘못된 스냅샷 식별자: \"%s\"" -#: utils/time/snapmgr.c:1527 +#: utils/time/snapmgr.c:1573 #, c-format msgid "" "a serializable transaction cannot import a snapshot from a non-serializable " @@ -24501,7 +26332,7 @@ msgstr "" "직렬화 가능한 트랜잭션은 직렬화 가능하지 않은 트랜잭션에서 스냅샷을 가져올 " "수 없음" -#: utils/time/snapmgr.c:1531 +#: utils/time/snapmgr.c:1577 #, c-format msgid "" "a non-read-only serializable transaction cannot import a snapshot from a " @@ -24509,263 +26340,308 @@ msgid "" msgstr "" "읽기-쓰기 직렬화된 트랜잭션이 읽기 전용 트랜잭션의 스냅샷을 가져올 수 없음" -#: utils/time/snapmgr.c:1546 +#: utils/time/snapmgr.c:1592 #, c-format msgid "cannot import a snapshot from a different database" msgstr "서로 다른 데이터베이스를 대상으로는 스냅샷을 가져올 수 없음" -#: gram.y:1004 +#: gram.y:1002 +#, c-format +msgid "UNENCRYPTED PASSWORD is no longer supported" +msgstr "UNENCRYPTED PASSWORD 옵션은 더이상 지원하지 않음" + +#: gram.y:1003 +#, c-format +msgid "Remove UNENCRYPTED to store the password in encrypted form instead." +msgstr "" + +#: gram.y:1065 #, c-format msgid "unrecognized role option \"%s\"" msgstr "인식할 수 없는 롤 옵션 \"%s\"" -#: gram.y:1278 gram.y:1293 +#: gram.y:1312 gram.y:1327 #, c-format msgid "CREATE SCHEMA IF NOT EXISTS cannot include schema elements" msgstr "" "CREATE SCHEMA IF NOT EXISTS 구문에서는 스키마 요소들을 포함할 수 없습니다." -#: gram.y:1438 +#: gram.y:1472 #, c-format msgid "current database cannot be changed" msgstr "현재 데이터베이스를 바꿀 수 없음" -#: gram.y:1562 +#: gram.y:1596 #, c-format msgid "time zone interval must be HOUR or HOUR TO MINUTE" msgstr "" "지역시간대 간격(time zone interval) 값은 시(HOUR) 또는 시분(HOUR TO MINUTE) " "값이어야합니다" -#: gram.y:2600 gram.y:2629 +#: gram.y:2612 +#, c-format +msgid "sequence option \"%s\" not supported here" +msgstr "\"%s\" 시퀀스 옵션은 지원되지 않음" + +#: gram.y:2835 gram.y:2864 #, c-format msgid "STDIN/STDOUT not allowed with PROGRAM" msgstr "PROGRAM 옵션과 STDIN/STDOUT 옵션은 함께 쓸 수 없습니다" -#: gram.y:2895 gram.y:2902 gram.y:10295 gram.y:10303 +#: gram.y:3174 gram.y:3181 gram.y:11072 gram.y:11080 #, c-format msgid "GLOBAL is deprecated in temporary table creation" msgstr "GLOBAL 예약어는 임시 테이블 만들기에서 더 이상 사용하지 않습니다" -#: gram.y:4809 +#: gram.y:5118 +#, c-format +msgid "unrecognized row security option \"%s\"" +msgstr "인식할 수 없는 로우 단위 보안 옵션 \"%s\"" + +#: gram.y:5119 +#, c-format +msgid "Only PERMISSIVE or RESTRICTIVE policies are supported currently." +msgstr "" + +#: gram.y:5227 msgid "duplicate trigger events specified" msgstr "중복 트리거 이벤트가 지정됨" -#: gram.y:4909 +#: gram.y:5370 #, c-format msgid "conflicting constraint properties" msgstr "제약조건 속성이 충돌함" -#: gram.y:5041 +#: gram.y:5476 #, c-format msgid "CREATE ASSERTION is not yet implemented" msgstr "CREATE ASSERTION 명령은 아직 구현 되지 않았습니다" -#: gram.y:5057 +#: gram.y:5491 #, c-format msgid "DROP ASSERTION is not yet implemented" msgstr "CREATE ASSERTION 명령은 아직 구현 되지 않았습니다" -#: gram.y:5403 +#: gram.y:5871 #, c-format msgid "RECHECK is no longer required" msgstr "RECHECK는 더 이상 필요하지 않음" -#: gram.y:5404 +#: gram.y:5872 #, c-format msgid "Update your data type." msgstr "자료형을 업데이트하십시오." -#: gram.y:6983 +#: gram.y:7515 #, c-format msgid "aggregates cannot have output arguments" msgstr "집계 함수는 output 인자를 지정할 수 없음" -#: gram.y:8853 gram.y:8871 +#: gram.y:9647 gram.y:9665 #, c-format msgid "WITH CHECK OPTION not supported on recursive views" msgstr "WITH CHECK OPTION 구문은 재귀적인 뷰에서 지원하지 않습니다" -#: gram.y:9389 +#: gram.y:10198 #, c-format msgid "unrecognized VACUUM option \"%s\"" msgstr "인식할 수 없는 VACUUM 옵션 \"%s\"" -#: gram.y:10403 +#: gram.y:11180 #, c-format msgid "LIMIT #,# syntax is not supported" msgstr "LIMIT #,# 구문은 지원하지 않습니다." -#: gram.y:10404 +#: gram.y:11181 #, c-format msgid "Use separate LIMIT and OFFSET clauses." msgstr "LIMIT # OFFSET # 구문을 사용하세요." -#: gram.y:10667 gram.y:10692 +#: gram.y:11462 gram.y:11487 #, c-format msgid "VALUES in FROM must have an alias" msgstr "FROM 안의 VALUES는 반드시 alias가 있어야합니다" -#: gram.y:10668 gram.y:10693 +#: gram.y:11463 gram.y:11488 #, c-format msgid "For example, FROM (VALUES ...) [AS] foo." msgstr "예, FROM (VALUES ...) [AS] foo." -#: gram.y:10673 gram.y:10698 +#: gram.y:11468 gram.y:11493 #, c-format msgid "subquery in FROM must have an alias" msgstr "FROM 절 내의 subquery 에는 반드시 alias 를 가져야만 합니다" -#: gram.y:10674 gram.y:10699 +#: gram.y:11469 gram.y:11494 #, c-format msgid "For example, FROM (SELECT ...) [AS] foo." msgstr "예, FROM (SELECT ...) [AS] foo." -#: gram.y:11273 +#: gram.y:11948 +#, c-format +msgid "only one DEFAULT value is allowed" +msgstr "" + +#: gram.y:11957 +#, c-format +msgid "only one PATH value per column is allowed" +msgstr "" + +#: gram.y:11966 +#, c-format +msgid "conflicting or redundant NULL / NOT NULL declarations for column \"%s\"" +msgstr "NULL/NOT NULL 선언이 서로 충돌합니다 : \"%s\" 칼럼" + +#: gram.y:11975 +#, c-format +msgid "unrecognized column option \"%s\"" +msgstr "인식할 수 없는 칼럼 옵션 \"%s\"" + +#: gram.y:12229 #, c-format msgid "precision for type float must be at least 1 bit" msgstr "실수형 자료의 정밀도 값으로는 적어도 1 bit 이상을 지정해야합니다." -#: gram.y:11282 +#: gram.y:12238 #, c-format msgid "precision for type float must be less than 54 bits" msgstr "실수형 자료의 정밀도 값으로 최대 54 bit 까지입니다." -#: gram.y:11786 +#: gram.y:12729 #, c-format msgid "wrong number of parameters on left side of OVERLAPS expression" msgstr "OVERLAPS 식의 왼쪽에 있는 매개 변수 수가 잘못됨" -#: gram.y:11791 +#: gram.y:12734 #, c-format msgid "wrong number of parameters on right side of OVERLAPS expression" msgstr "OVERLAPS 식의 오른쪽에 있는 매개 변수 수가 잘못됨" -#: gram.y:11966 +#: gram.y:12909 #, c-format msgid "UNIQUE predicate is not yet implemented" msgstr "UNIQUE 술어는 아직 구현되지 못했습니다" -#: gram.y:12300 +#: gram.y:13256 #, c-format msgid "cannot use multiple ORDER BY clauses with WITHIN GROUP" msgstr "WITHIN GROUP 구문 안에서 중복된 ORDER BY 구문은 허용하지 않습니다" -#: gram.y:12305 +#: gram.y:13261 #, c-format msgid "cannot use DISTINCT with WITHIN GROUP" msgstr "DISTINCT과 WITHIN GROUP을 함께 쓸 수 없습니다" -#: gram.y:12310 +#: gram.y:13266 #, c-format msgid "cannot use VARIADIC with WITHIN GROUP" msgstr "VARIADIC과 WITHIN GROUP을 함께 쓸 수 없습니다" -#: gram.y:12816 +#: gram.y:13692 #, c-format msgid "RANGE PRECEDING is only supported with UNBOUNDED" msgstr "RANGE PRECEDING은 UNBOUNDED와 함께 사용해야 합니다" -#: gram.y:12822 +#: gram.y:13698 #, c-format msgid "RANGE FOLLOWING is only supported with UNBOUNDED" msgstr "RANGE FOLLOWING은 UNBOUNDED와 함께 사용해야 합니다" -#: gram.y:12849 gram.y:12872 +#: gram.y:13725 gram.y:13748 #, c-format msgid "frame start cannot be UNBOUNDED FOLLOWING" msgstr "프레임 시작은 UNBOUNDED FOLLOWING일 수 없음" -#: gram.y:12854 +#: gram.y:13730 #, c-format msgid "frame starting from following row cannot end with current row" msgstr "따라오는 로우의 프레임 시작은 현재 로우의 끝일 수 없습니다" -#: gram.y:12877 +#: gram.y:13753 #, c-format msgid "frame end cannot be UNBOUNDED PRECEDING" msgstr "프레임 끝은 UNBOUNDED PRECEDING일 수 없음" -#: gram.y:12883 +#: gram.y:13759 #, c-format msgid "frame starting from current row cannot have preceding rows" msgstr "현재 로우의 프레임 시작은 선행하는 로우를 가질 수 없습니다" -#: gram.y:12890 +#: gram.y:13766 #, c-format msgid "frame starting from following row cannot have preceding rows" msgstr "따라오는 로우의 프레임 시작은 선행하는 로우를 가질 수 없습니다" -#: gram.y:13555 +#: gram.y:14401 #, c-format msgid "type modifier cannot have parameter name" msgstr "자료형 한정자는 매개 변수 이름을 사용할 수 없음" -#: gram.y:13561 +#: gram.y:14407 #, c-format msgid "type modifier cannot have ORDER BY" msgstr "자료형 한정자는 ORDER BY 구문을 사용할 수 없음" -#: gram.y:13625 gram.y:13631 +#: gram.y:14471 gram.y:14477 #, c-format msgid "%s cannot be used as a role name here" msgstr "%s 이름은 여기서 롤 이름으로 사용할 수 없음" -#: gram.y:14253 gram.y:14442 +#: gram.y:15139 gram.y:15328 msgid "improper use of \"*\"" msgstr "\"*\" 사용이 잘못됨" -#: gram.y:14506 +#: gram.y:15392 #, c-format msgid "" "an ordered-set aggregate with a VARIADIC direct argument must have one " "VARIADIC aggregated argument of the same data type" msgstr "" -#: gram.y:14543 +#: gram.y:15429 #, c-format msgid "multiple ORDER BY clauses not allowed" msgstr "중복된 ORDER BY 구문은 허용하지 않습니다" -#: gram.y:14554 +#: gram.y:15440 #, c-format msgid "multiple OFFSET clauses not allowed" msgstr "중복된 OFFSET 구문은 허용하지 않습니다" -#: gram.y:14563 +#: gram.y:15449 #, c-format msgid "multiple LIMIT clauses not allowed" msgstr "중복된 LIMIT 구문은 허용하지 않습니다" -#: gram.y:14572 +#: gram.y:15458 #, c-format msgid "multiple WITH clauses not allowed" msgstr "중복된 WITH 절은 허용하지 않음" -#: gram.y:14764 +#: gram.y:15662 #, c-format msgid "OUT and INOUT arguments aren't allowed in TABLE functions" msgstr "OUT 및 INOUT 인자는 TABLE 함수에 사용할 수 없음" -#: gram.y:14865 +#: gram.y:15763 #, c-format msgid "multiple COLLATE clauses not allowed" msgstr "중복된 COLLATE 구문은 허용하지 않습니다" #. translator: %s is CHECK, UNIQUE, or similar -#: gram.y:14903 gram.y:14916 +#: gram.y:15801 gram.y:15814 #, c-format msgid "%s constraints cannot be marked DEFERRABLE" msgstr "%s 제약조건에는 DEFERRABLE 옵션을 쓸 수 없음" #. translator: %s is CHECK, UNIQUE, or similar -#: gram.y:14929 +#: gram.y:15827 #, c-format msgid "%s constraints cannot be marked NOT VALID" msgstr "%s 제약조건에는 NOT VALID 옵션을 쓸 수 없음" #. translator: %s is CHECK, UNIQUE, or similar -#: gram.y:14942 +#: gram.y:15840 #, c-format msgid "%s constraints cannot be marked NO INHERIT" msgstr "%s 제약조건에는 NO INHERIT 옵션을 쓸 수 없음" @@ -24832,24 +26708,19 @@ msgstr "구문 오류가 너무 많습니다. \"%s\" 파일을 무시합니다" msgid "could not open configuration directory \"%s\": %m" msgstr "\"%s\" 환경 설정 디렉터리를 열 수 없습니다: %m" -#: repl_gram.y:260 repl_gram.y:292 +#: repl_gram.y:330 repl_gram.y:362 #, c-format msgid "invalid timeline %u" msgstr "잘못된 타임라인: %u" -#: repl_scanner.l:120 +#: repl_scanner.l:126 msgid "invalid streaming start location" msgstr "잘못된 스트리밍 시작 위치" -#: repl_scanner.l:171 scan.l:670 +#: repl_scanner.l:177 scan.l:670 msgid "unterminated quoted string" msgstr "마무리 안된 따옴표 안의 문자열" -#: repl_scanner.l:181 -#, c-format -msgid "syntax error: unexpected character \"%s\"" -msgstr "구문 오류: \"%s\" 부근" - # # advance 끝 #: scan.l:432 msgid "unterminated /* comment" @@ -24917,7 +26788,7 @@ msgstr "마무리 안된 달러-따옴표 안의 문자열" msgid "zero-length delimited identifier" msgstr "길이가 0인 구분 식별자" -#: scan.l:793 syncrep_scanner.l:84 +#: scan.l:793 syncrep_scanner.l:89 msgid "unterminated quoted identifier" msgstr "마무리 안된 따옴표 안의 식별자" diff --git a/src/backend/po/ru.po b/src/backend/po/ru.po index a4ce728f18..c3e84a30fa 100644 --- a/src/backend/po/ru.po +++ b/src/backend/po/ru.po @@ -4,14 +4,14 @@ # Serguei A. Mokhov , 2001-2005. # Oleg Bartunov , 2004-2005. # Dmitriy Olshevskiy , 2014. -# Alexander Lakhin , 2012-2017. -# +# Alexander Lakhin , 2012-2017, 2018. msgid "" msgstr "" "Project-Id-Version: postgres (PostgreSQL current)\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-04-04 04:08+0000\n" -"PO-Revision-Date: 2017-04-06 17:55+0300\n" +"POT-Creation-Date: 2018-02-12 12:28+0300\n" +"PO-Revision-Date: 2018-02-06 17:01+0300\n" +"Last-Translator: Alexander Lakhin \n" "Language-Team: Russian \n" "Language: ru\n" "MIME-Version: 1.0\n" @@ -19,18 +19,17 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"Last-Translator: Alexander Lakhin \n" -#: ../common/config_info.c:131 ../common/config_info.c:139 -#: ../common/config_info.c:147 ../common/config_info.c:155 -#: ../common/config_info.c:163 ../common/config_info.c:171 -#: ../common/config_info.c:179 ../common/config_info.c:187 -#: ../common/config_info.c:195 +#: ../common/config_info.c:130 ../common/config_info.c:138 +#: ../common/config_info.c:146 ../common/config_info.c:154 +#: ../common/config_info.c:162 ../common/config_info.c:170 +#: ../common/config_info.c:178 ../common/config_info.c:186 +#: ../common/config_info.c:194 msgid "not recorded" msgstr "не записано" -#: ../common/controldata_utils.c:57 commands/copy.c:3042 -#: commands/extension.c:3325 utils/adt/genfile.c:135 +#: ../common/controldata_utils.c:57 commands/copy.c:3145 +#: commands/extension.c:3330 utils/adt/genfile.c:135 #, c-format msgid "could not open file \"%s\" for reading: %m" msgstr "не удалось открыть файл \"%s\" для чтения: %m" @@ -41,14 +40,14 @@ msgid "%s: could not open file \"%s\" for reading: %s\n" msgstr "%s: не удалось открыть файл \"%s\" для чтения: %s\n" #: ../common/controldata_utils.c:71 access/transam/timeline.c:348 -#: access/transam/xlog.c:3381 access/transam/xlog.c:10756 -#: access/transam/xlog.c:10769 access/transam/xlog.c:11161 -#: access/transam/xlog.c:11204 access/transam/xlog.c:11243 -#: access/transam/xlog.c:11286 access/transam/xlogfuncs.c:667 -#: access/transam/xlogfuncs.c:686 commands/extension.c:3335 libpq/hba.c:499 -#: replication/logical/origin.c:658 replication/logical/origin.c:688 -#: replication/logical/reorderbuffer.c:3064 replication/walsender.c:498 -#: storage/file/copydir.c:178 utils/adt/genfile.c:152 utils/adt/misc.c:924 +#: access/transam/xlog.c:3384 access/transam/xlog.c:10802 +#: access/transam/xlog.c:10815 access/transam/xlog.c:11232 +#: access/transam/xlog.c:11275 access/transam/xlog.c:11314 +#: access/transam/xlog.c:11357 access/transam/xlogfuncs.c:668 +#: access/transam/xlogfuncs.c:687 commands/extension.c:3340 libpq/hba.c:499 +#: replication/logical/origin.c:702 replication/logical/origin.c:732 +#: replication/logical/reorderbuffer.c:3079 replication/walsender.c:507 +#: storage/file/copydir.c:204 utils/adt/genfile.c:152 utils/adt/misc.c:924 #, c-format msgid "could not read file \"%s\": %m" msgstr "не удалось прочитать файл \"%s\": %m" @@ -172,26 +171,26 @@ msgid "could not close directory \"%s\": %s\n" msgstr "не удалось закрыть каталог \"%s\": %s\n" #: ../common/psprintf.c:179 ../port/path.c:630 ../port/path.c:668 -#: ../port/path.c:685 access/transam/twophase.c:1264 access/transam/xlog.c:6347 -#: lib/stringinfo.c:300 libpq/auth.c:1063 libpq/auth.c:1428 libpq/auth.c:1496 -#: libpq/auth.c:2012 postmaster/bgworker.c:332 postmaster/bgworker.c:874 -#: postmaster/postmaster.c:2363 postmaster/postmaster.c:2385 -#: postmaster/postmaster.c:3935 postmaster/postmaster.c:4635 -#: postmaster/postmaster.c:4710 postmaster/postmaster.c:5379 -#: postmaster/postmaster.c:5660 -#: replication/libpqwalreceiver/libpqwalreceiver.c:251 -#: replication/logical/logical.c:168 storage/buffer/localbuf.c:436 -#: storage/file/fd.c:773 storage/file/fd.c:1201 storage/file/fd.c:1319 -#: storage/file/fd.c:2044 storage/ipc/procarray.c:1054 -#: storage/ipc/procarray.c:1542 storage/ipc/procarray.c:1549 -#: storage/ipc/procarray.c:1963 storage/ipc/procarray.c:2566 -#: utils/adt/formatting.c:1567 utils/adt/formatting.c:1685 -#: utils/adt/formatting.c:1804 utils/adt/pg_locale.c:468 -#: utils/adt/pg_locale.c:652 utils/adt/regexp.c:219 utils/adt/varlena.c:4552 -#: utils/adt/varlena.c:4573 utils/fmgr/dfmgr.c:216 utils/hash/dynahash.c:429 -#: utils/hash/dynahash.c:535 utils/hash/dynahash.c:1047 utils/mb/mbutils.c:376 -#: utils/mb/mbutils.c:709 utils/misc/guc.c:3967 utils/misc/guc.c:3983 -#: utils/misc/guc.c:3996 utils/misc/guc.c:6945 utils/misc/tzparser.c:468 +#: ../port/path.c:685 access/transam/twophase.c:1306 access/transam/xlog.c:6363 +#: lib/stringinfo.c:258 libpq/auth.c:1126 libpq/auth.c:1492 libpq/auth.c:1560 +#: libpq/auth.c:2076 postmaster/bgworker.c:337 postmaster/bgworker.c:908 +#: postmaster/postmaster.c:2439 postmaster/postmaster.c:2461 +#: postmaster/postmaster.c:4023 postmaster/postmaster.c:4731 +#: postmaster/postmaster.c:4806 postmaster/postmaster.c:5484 +#: postmaster/postmaster.c:5821 +#: replication/libpqwalreceiver/libpqwalreceiver.c:256 +#: replication/logical/logical.c:170 storage/buffer/localbuf.c:436 +#: storage/file/fd.c:772 storage/file/fd.c:1200 storage/file/fd.c:1318 +#: storage/file/fd.c:2049 storage/ipc/procarray.c:1058 +#: storage/ipc/procarray.c:1546 storage/ipc/procarray.c:1553 +#: storage/ipc/procarray.c:1970 storage/ipc/procarray.c:2581 +#: utils/adt/formatting.c:1579 utils/adt/formatting.c:1703 +#: utils/adt/formatting.c:1828 utils/adt/pg_locale.c:468 +#: utils/adt/pg_locale.c:652 utils/adt/regexp.c:219 utils/adt/varlena.c:4589 +#: utils/adt/varlena.c:4610 utils/fmgr/dfmgr.c:221 utils/hash/dynahash.c:444 +#: utils/hash/dynahash.c:553 utils/hash/dynahash.c:1065 utils/mb/mbutils.c:376 +#: utils/mb/mbutils.c:709 utils/misc/guc.c:3998 utils/misc/guc.c:4014 +#: utils/misc/guc.c:4027 utils/misc/guc.c:6976 utils/misc/tzparser.c:468 #: utils/mmgr/aset.c:404 utils/mmgr/dsa.c:713 utils/mmgr/dsa.c:795 #: utils/mmgr/mcxt.c:725 utils/mmgr/mcxt.c:760 utils/mmgr/mcxt.c:797 #: utils/mmgr/mcxt.c:834 utils/mmgr/mcxt.c:868 utils/mmgr/mcxt.c:897 @@ -258,12 +257,17 @@ msgstr "не удалось получить информацию о файле msgid "could not remove file or directory \"%s\": %s\n" msgstr "ошибка при удалении файла или каталога \"%s\": %s\n" +#: ../common/saslprep.c:1090 +#, c-format +msgid "password too long" +msgstr "слишком длинный пароль" + #: ../common/username.c:43 #, c-format msgid "could not look up effective user ID %ld: %s" msgstr "выяснить эффективный идентификатор пользователя (%ld) не удалось: %s" -#: ../common/username.c:45 libpq/auth.c:1959 +#: ../common/username.c:45 libpq/auth.c:2023 msgid "user does not exist" msgstr "пользователь не существует" @@ -392,112 +396,135 @@ msgid "could not check access token membership: error code %lu\n" msgstr "" "не удалось проверить вхождение в маркере безопасности (код ошибки: %lu)\n" -#: access/brin/brin.c:860 access/brin/brin.c:931 +#: access/brin/brin.c:867 access/brin/brin.c:938 #, c-format msgid "block number out of range: %s" msgstr "номер блока вне диапазона: %s" -#: access/brin/brin.c:883 access/brin/brin.c:954 +#: access/brin/brin.c:890 access/brin/brin.c:961 #, c-format msgid "\"%s\" is not a BRIN index" msgstr "\"%s\" - это не индекс BRIN" -#: access/brin/brin.c:899 access/brin/brin.c:970 +#: access/brin/brin.c:906 access/brin/brin.c:977 #, c-format msgid "could not open parent table of index %s" msgstr "не удалось родительскую таблицу индекса %s" -#: access/brin/brin_pageops.c:76 access/brin/brin_pageops.c:360 -#: access/brin/brin_pageops.c:826 +#: access/brin/brin_pageops.c:76 access/brin/brin_pageops.c:364 +#: access/brin/brin_pageops.c:830 access/gin/ginentrypage.c:110 +#: access/gist/gist.c:1364 access/nbtree/nbtinsert.c:577 +#: access/nbtree/nbtsort.c:488 access/spgist/spgdoinsert.c:1933 #, c-format -msgid "index row size %lu exceeds maximum %lu for index \"%s\"" +msgid "index row size %zu exceeds maximum %zu for index \"%s\"" msgstr "" -"размер строки индекса (%lu) больше предельного размера (%lu) (индекс \"%s\")" +"размер строки индекса (%zu) больше предельного размера (%zu) (индекс \"%s\")" -#: access/brin/brin_revmap.c:379 access/brin/brin_revmap.c:385 +#: access/brin/brin_revmap.c:382 access/brin/brin_revmap.c:388 #, c-format msgid "corrupted BRIN index: inconsistent range map" msgstr "испорченный индекс BRIN: несогласованность в карте диапазонов" -#: access/brin/brin_revmap.c:401 +#: access/brin/brin_revmap.c:404 #, c-format msgid "leftover placeholder tuple detected in BRIN index \"%s\", deleting" msgstr "" "в BRIN-индексе \"%s\" обнаружен оставшийся кортеж-местозаполнитель, он " "удаляется" -#: access/brin/brin_revmap.c:597 +#: access/brin/brin_revmap.c:601 #, c-format msgid "unexpected page type 0x%04X in BRIN index \"%s\" block %u" msgstr "неожиданный тип страницы 0x%04X в BRIN-индексе \"%s\" (блок: %u)" -#: access/brin/brin_validate.c:116 +#: access/brin/brin_validate.c:116 access/gin/ginvalidate.c:149 +#: access/gist/gistvalidate.c:146 access/hash/hashvalidate.c:131 +#: access/nbtree/nbtvalidate.c:101 access/spgist/spgvalidate.c:116 #, c-format msgid "" -"brin operator family \"%s\" contains function %s with invalid support number " -"%d" +"operator family \"%s\" of access method %s contains function %s with invalid " +"support number %d" msgstr "" -"семейство операторов brin \"%s\" содержит функцию %s с неправильным опорным " -"номером %d" +"семейство операторов \"%s\" метода доступа %s содержит функцию %s с " +"неправильным опорным номером %d" -#: access/brin/brin_validate.c:132 +#: access/brin/brin_validate.c:132 access/gin/ginvalidate.c:161 +#: access/gist/gistvalidate.c:158 access/hash/hashvalidate.c:114 +#: access/nbtree/nbtvalidate.c:113 access/spgist/spgvalidate.c:128 #, c-format msgid "" -"brin operator family \"%s\" contains function %s with wrong signature for " -"support number %d" +"operator family \"%s\" of access method %s contains function %s with wrong " +"signature for support number %d" msgstr "" -"семейство операторов brin \"%s\" содержит функцию %s с неподходящим " -"объявлением для опорного номера %d" +"семейство операторов \"%s\" метода доступа %s содержит функцию %s с " +"неподходящим объявлением для опорного номера %d" -#: access/brin/brin_validate.c:154 +#: access/brin/brin_validate.c:154 access/gin/ginvalidate.c:180 +#: access/gist/gistvalidate.c:178 access/hash/hashvalidate.c:152 +#: access/nbtree/nbtvalidate.c:133 access/spgist/spgvalidate.c:147 #, c-format msgid "" -"brin operator family \"%s\" contains operator %s with invalid strategy " -"number %d" +"operator family \"%s\" of access method %s contains operator %s with invalid " +"strategy number %d" msgstr "" -"семейство операторов brin \"%s\" содержит оператор %s с неправильным номером " -"стратегии %d" +"семейство операторов \"%s\" метода доступа %s содержит оператор %s с " +"неправильным номером стратегии %d" -#: access/brin/brin_validate.c:183 +#: access/brin/brin_validate.c:183 access/gin/ginvalidate.c:193 +#: access/hash/hashvalidate.c:165 access/nbtree/nbtvalidate.c:146 +#: access/spgist/spgvalidate.c:160 #, c-format msgid "" -"brin operator family \"%s\" contains invalid ORDER BY specification for " -"operator %s" +"operator family \"%s\" of access method %s contains invalid ORDER BY " +"specification for operator %s" msgstr "" -"семейство операторов brin \"%s\" содержит некорректное определение ORDER BY " -"для оператора %s" +"семейство операторов \"%s\" метода доступа %s содержит некорректное " +"определение ORDER BY для оператора %s" -#: access/brin/brin_validate.c:196 +#: access/brin/brin_validate.c:196 access/gin/ginvalidate.c:206 +#: access/gist/gistvalidate.c:226 access/hash/hashvalidate.c:178 +#: access/nbtree/nbtvalidate.c:159 access/spgist/spgvalidate.c:173 #, c-format -msgid "brin operator family \"%s\" contains operator %s with wrong signature" +msgid "" +"operator family \"%s\" of access method %s contains operator %s with wrong " +"signature" msgstr "" -"семейство операторов brin \"%s\" содержит оператор %s с неподходящим " -"объявлением" +"семейство операторов \"%s\" метода доступа %s содержит оператор %s с " +"неподходящим объявлением" -#: access/brin/brin_validate.c:234 +#: access/brin/brin_validate.c:234 access/hash/hashvalidate.c:218 +#: access/nbtree/nbtvalidate.c:201 access/spgist/spgvalidate.c:201 #, c-format -msgid "brin operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "в семействе операторов brin \"%s\" нет оператора(ов) для типов %s и %s" +msgid "" +"operator family \"%s\" of access method %s is missing operator(s) for types " +"%s and %s" +msgstr "" +"в семействе операторов \"%s\" метода доступа %s нет оператора(ов) для типов " +"%s и %s" #: access/brin/brin_validate.c:244 #, c-format msgid "" -"brin operator family \"%s\" is missing support function(s) for types %s and " -"%s" +"operator family \"%s\" of access method %s is missing support function(s) " +"for types %s and %s" msgstr "" -"в семействе операторов brin \"%s\" нет опорных функций для типов %s и %s" +"в семействе операторов \"%s\" метода доступа %s нет опорных функций для " +"типов %s и %s" -#: access/brin/brin_validate.c:257 +#: access/brin/brin_validate.c:257 access/hash/hashvalidate.c:232 +#: access/nbtree/nbtvalidate.c:225 access/spgist/spgvalidate.c:234 #, c-format -msgid "brin operator class \"%s\" is missing operator(s)" -msgstr "в классе операторов brin \"%s\" нет оператора(ов)" +msgid "operator class \"%s\" of access method %s is missing operator(s)" +msgstr "в классе операторов \"%s\" метода доступа %s нет оператора(ов)" -#: access/brin/brin_validate.c:268 +#: access/brin/brin_validate.c:268 access/gin/ginvalidate.c:247 +#: access/gist/gistvalidate.c:265 #, c-format -msgid "brin operator class \"%s\" is missing support function %d" -msgstr "в классе операторов brin \"%s\" нет опорной функции %d" +msgid "" +"operator class \"%s\" of access method %s is missing support function %d" +msgstr "в классе операторов \"%s\" метода доступа %s нет опорной функции %d" -#: access/common/heaptuple.c:708 access/common/heaptuple.c:1407 +#: access/common/heaptuple.c:708 access/common/heaptuple.c:1405 #, c-format msgid "number of columns (%d) exceeds limit (%d)" msgstr "число столбцов (%d) превышает предел (%d)" @@ -512,8 +539,8 @@ msgstr "число столбцов индекса (%d) превышает пр msgid "index row requires %zu bytes, maximum size is %zu" msgstr "строка индекса требует байт: %zu, при максимуме: %zu" -#: access/common/printtup.c:290 tcop/fastpath.c:182 tcop/fastpath.c:544 -#: tcop/postgres.c:1732 +#: access/common/printtup.c:290 tcop/fastpath.c:182 tcop/fastpath.c:532 +#: tcop/postgres.c:1726 #, c-format msgid "unsupported format code: %d" msgstr "неподдерживаемый код формата: %d" @@ -585,7 +612,7 @@ msgid "" msgstr "" "Число возвращённых столбцов (%d) не соответствует ожидаемому числу (%d)." -#: access/common/tupconvert.c:316 +#: access/common/tupconvert.c:318 #, c-format msgid "" "Attribute \"%s\" of type %s does not match corresponding attribute of type " @@ -593,13 +620,13 @@ msgid "" msgstr "" "Атрибут \"%s\" типа %s несовместим с соответствующим атрибутом типа %s." -#: access/common/tupconvert.c:328 +#: access/common/tupconvert.c:330 #, c-format msgid "Attribute \"%s\" of type %s does not exist in type %s." msgstr "Атрибут \"%s\" типа %s не существует в типе %s." -#: access/common/tupdesc.c:722 parser/parse_clause.c:816 -#: parser/parse_relation.c:1543 +#: access/common/tupdesc.c:728 parser/parse_clause.c:812 +#: parser/parse_relation.c:1538 #, c-format msgid "column \"%s\" cannot be declared SETOF" msgstr "столбец \"%s\" не может быть объявлен как SETOF" @@ -614,34 +641,26 @@ msgstr "слишком длинный список указателей" msgid "Reduce maintenance_work_mem." msgstr "Уменьшите maintenance_work_mem." -#: access/gin/ginentrypage.c:110 access/gist/gist.c:1363 -#: access/nbtree/nbtinsert.c:577 access/nbtree/nbtsort.c:488 -#: access/spgist/spgdoinsert.c:1933 -#, c-format -msgid "index row size %zu exceeds maximum %zu for index \"%s\"" -msgstr "" -"размер строки индекса (%zu) больше предельного размера (%zu) (индекс \"%s\")" - -#: access/gin/ginfast.c:991 access/transam/xlog.c:10178 -#: access/transam/xlog.c:10695 access/transam/xlogfuncs.c:295 -#: access/transam/xlogfuncs.c:322 access/transam/xlogfuncs.c:361 -#: access/transam/xlogfuncs.c:382 access/transam/xlogfuncs.c:403 -#: access/transam/xlogfuncs.c:473 access/transam/xlogfuncs.c:529 +#: access/gin/ginfast.c:995 access/transam/xlog.c:10216 +#: access/transam/xlog.c:10741 access/transam/xlogfuncs.c:296 +#: access/transam/xlogfuncs.c:323 access/transam/xlogfuncs.c:362 +#: access/transam/xlogfuncs.c:383 access/transam/xlogfuncs.c:404 +#: access/transam/xlogfuncs.c:474 access/transam/xlogfuncs.c:530 #, c-format msgid "recovery is in progress" msgstr "идёт процесс восстановления" -#: access/gin/ginfast.c:992 +#: access/gin/ginfast.c:996 #, c-format msgid "GIN pending list cannot be cleaned up during recovery." msgstr "Очередь записей GIN нельзя очистить в процессе восстановления." -#: access/gin/ginfast.c:999 +#: access/gin/ginfast.c:1003 #, c-format msgid "\"%s\" is not a GIN index" msgstr "\"%s\" - это не индекс GIN" -#: access/gin/ginfast.c:1010 +#: access/gin/ginfast.c:1014 #, c-format msgid "cannot access temporary indexes of other sessions" msgstr "обращаться к временным индексам других сеансов нельзя" @@ -657,81 +676,37 @@ msgstr "" msgid "To fix this, do REINDEX INDEX \"%s\"." msgstr "Для исправления выполните REINDEX INDEX \"%s\"." -#: access/gin/ginutil.c:134 executor/execExpr.c:1759 -#: utils/adt/arrayfuncs.c:3803 utils/adt/arrayfuncs.c:6325 +#: access/gin/ginutil.c:134 executor/execExpr.c:1780 +#: utils/adt/arrayfuncs.c:3803 utils/adt/arrayfuncs.c:6323 #: utils/adt/rowtypes.c:927 #, c-format msgid "could not identify a comparison function for type %s" msgstr "не удалось найти функцию сравнения для типа %s" -#: access/gin/ginvalidate.c:93 -#, c-format -msgid "" -"gin operator family \"%s\" contains support procedure %s with cross-type " -"registration" -msgstr "" -"семейство операторов gin \"%s\" содержит опорную процедуру %s с межтиповой " -"регистрацией" - -#: access/gin/ginvalidate.c:149 -#, c-format -msgid "" -"gin operator family \"%s\" contains function %s with invalid support number " -"%d" -msgstr "" -"семейство операторов gin \"%s\" содержит функцию %s с неправильным опорным " -"номером %d" - -#: access/gin/ginvalidate.c:161 +#: access/gin/ginvalidate.c:93 access/gist/gistvalidate.c:93 +#: access/hash/hashvalidate.c:99 access/spgist/spgvalidate.c:93 #, c-format msgid "" -"gin operator family \"%s\" contains function %s with wrong signature for " -"support number %d" +"operator family \"%s\" of access method %s contains support procedure %s " +"with different left and right input types" msgstr "" -"семейство операторов gin \"%s\" содержит функцию %s с неподходящим " -"объявлением для опорного номера %d" +"семейство операторов \"%s\" метода доступа %s содержит опорную процедуру %s " +"с межтиповой регистрацией" -#: access/gin/ginvalidate.c:180 +#: access/gin/ginvalidate.c:257 #, c-format msgid "" -"gin operator family \"%s\" contains operator %s with invalid strategy number " +"operator class \"%s\" of access method %s is missing support function %d or " "%d" msgstr "" -"семейство операторов gin \"%s\" содержит оператор %s с неправильным номером " -"стратегии %d" - -#: access/gin/ginvalidate.c:193 -#, c-format -msgid "" -"gin operator family \"%s\" contains invalid ORDER BY specification for " -"operator %s" -msgstr "" -"семейство операторов gin \"%s\" содержит некорректное определение ORDER BY " -"для оператора %s" - -#: access/gin/ginvalidate.c:206 -#, c-format -msgid "gin operator family \"%s\" contains operator %s with wrong signature" -msgstr "" -"семейство операторов gin \"%s\" содержит оператор %s с неподходящим " -"объявлением" - -#: access/gin/ginvalidate.c:247 -#, c-format -msgid "gin operator class \"%s\" is missing support function %d" -msgstr "в классе операторов gin \"%s\" нет опорной функции %d" - -#: access/gin/ginvalidate.c:257 -#, c-format -msgid "gin operator class \"%s\" is missing support function %d or %d" -msgstr "в классе операторов gin \"%s\" нет опорной функции %d или %d" +"в классе операторов \"%s\" метода доступа %s нет опорной функции %d или %d" -#: access/gist/gist.c:706 access/gist/gistvacuum.c:258 +#: access/gist/gist.c:707 access/gist/gistvacuum.c:258 #, c-format msgid "index \"%s\" contains an inner tuple marked as invalid" msgstr "индекс \"%s\" содержит внутренний кортеж, отмеченный как ошибочный" -#: access/gist/gist.c:708 access/gist/gistvacuum.c:260 +#: access/gist/gist.c:709 access/gist/gistvacuum.c:260 #, c-format msgid "" "This is caused by an incomplete page split at crash recovery before " @@ -740,10 +715,10 @@ msgstr "" "Это вызвано неполным разделением страницы при восстановлении после сбоя в " "PostgreSQL до версии 9.1." -#: access/gist/gist.c:709 access/gist/gistutil.c:739 access/gist/gistutil.c:750 -#: access/gist/gistvacuum.c:261 access/hash/hashutil.c:240 -#: access/hash/hashutil.c:251 access/hash/hashutil.c:263 -#: access/hash/hashutil.c:284 access/nbtree/nbtpage.c:519 +#: access/gist/gist.c:710 access/gist/gistutil.c:739 access/gist/gistutil.c:750 +#: access/gist/gistvacuum.c:261 access/hash/hashutil.c:241 +#: access/hash/hashutil.c:252 access/hash/hashutil.c:264 +#: access/hash/hashutil.c:285 access/nbtree/nbtpage.c:519 #: access/nbtree/nbtpage.c:530 #, c-format msgid "Please REINDEX it." @@ -779,83 +754,35 @@ msgstr "" "разработчиками или попробуйте указать этот столбец в команде CREATE INDEX " "вторым." -#: access/gist/gistutil.c:736 access/hash/hashutil.c:237 +#: access/gist/gistutil.c:736 access/hash/hashutil.c:238 #: access/nbtree/nbtpage.c:516 #, c-format msgid "index \"%s\" contains unexpected zero page at block %u" msgstr "в индексе \"%s\" неожиданно оказалась нулевая страница в блоке %u" -#: access/gist/gistutil.c:747 access/hash/hashutil.c:248 -#: access/hash/hashutil.c:260 access/nbtree/nbtpage.c:527 +#: access/gist/gistutil.c:747 access/hash/hashutil.c:249 +#: access/hash/hashutil.c:261 access/nbtree/nbtpage.c:527 #, c-format msgid "index \"%s\" contains corrupted page at block %u" msgstr "индекс \"%s\" содержит испорченную страницу в блоке %u" -#: access/gist/gistvalidate.c:93 -#, c-format -msgid "" -"gist operator family \"%s\" contains support procedure %s with cross-type " -"registration" -msgstr "" -"семейство операторов gist \"%s\" содержит опорную процедуру %s с межтиповой " -"регистрацией" - -#: access/gist/gistvalidate.c:146 -#, c-format -msgid "" -"gist operator family \"%s\" contains function %s with invalid support number " -"%d" -msgstr "" -"семейство операторов gist \"%s\" содержит функцию %s с неправильным опорным " -"номером %d" - -#: access/gist/gistvalidate.c:158 -#, c-format -msgid "" -"gist operator family \"%s\" contains function %s with wrong signature for " -"support number %d" -msgstr "" -"семейство операторов gist \"%s\" содержит функцию %s с неподходящим " -"объявлением для опорного номера %d" - -#: access/gist/gistvalidate.c:178 -#, c-format -msgid "" -"gist operator family \"%s\" contains operator %s with invalid strategy " -"number %d" -msgstr "" -"семейство операторов gist \"%s\" содержит оператор %s с неправильным номером " -"стратегии %d" - #: access/gist/gistvalidate.c:196 #, c-format msgid "" -"gist operator family \"%s\" contains unsupported ORDER BY specification for " -"operator %s" +"operator family \"%s\" of access method %s contains unsupported ORDER BY " +"specification for operator %s" msgstr "" -"семейство операторов gist \"%s\" содержит неподдерживаемое определение ORDER " -"BY для оператора %s" +"семейство операторов \"%s\" метода доступа %s содержит неподдерживаемое " +"определение ORDER BY для оператора %s" #: access/gist/gistvalidate.c:207 #, c-format msgid "" -"gist operator family \"%s\" contains incorrect ORDER BY opfamily " -"specification for operator %s" -msgstr "" -"семейство операторов gist \"%s\" содержит некорректное определение ORDER BY " -"для оператора %s" - -#: access/gist/gistvalidate.c:226 -#, c-format -msgid "gist operator family \"%s\" contains operator %s with wrong signature" +"operator family \"%s\" of access method %s contains incorrect ORDER BY " +"opfamily specification for operator %s" msgstr "" -"семейство операторов gist \"%s\" содержит оператор %s с неподходящим " -"объявлением" - -#: access/gist/gistvalidate.c:265 -#, c-format -msgid "gist operator class \"%s\" is missing support function %d" -msgstr "в классе операторов gist \"%s\" нет опорной функции %d" +"семейство операторов \"%s\" метода доступа %s содержит некорректное " +"определение ORDER BY для оператора %s" #: access/hash/hashinsert.c:82 #, c-format @@ -873,7 +800,7 @@ msgstr "Значения, не умещающиеся в страницу буф msgid "invalid overflow block number %u" msgstr "неверный номер блока переполнения: %u" -#: access/hash/hashovfl.c:283 access/hash/hashpage.c:453 +#: access/hash/hashovfl.c:283 access/hash/hashpage.c:462 #, c-format msgid "out of overflow pages in hash index \"%s\"" msgstr "в хеш-индексе \"%s\" не хватает страниц переполнения" @@ -883,89 +810,31 @@ msgstr "в хеш-индексе \"%s\" не хватает страниц пе msgid "hash indexes do not support whole-index scans" msgstr "хеш-индексы не поддерживают сканирование всего индекса" -#: access/hash/hashutil.c:276 +#: access/hash/hashutil.c:277 #, c-format msgid "index \"%s\" is not a hash index" msgstr "индекс \"%s\" не является хеш-индексом" -#: access/hash/hashutil.c:282 +#: access/hash/hashutil.c:283 #, c-format msgid "index \"%s\" has wrong hash version" msgstr "индекс \"%s\" имеет неправильную версию хеша" -#: access/hash/hashvalidate.c:99 -#, c-format -msgid "" -"hash operator family \"%s\" contains support procedure %s with cross-type " -"registration" -msgstr "" -"семейство операторов hash \"%s\" содержит опорную процедуру %s с межтиповой " -"регистрацией" - -#: access/hash/hashvalidate.c:114 -#, c-format -msgid "" -"hash operator family \"%s\" contains function %s with wrong signature for " -"support number %d" -msgstr "" -"семейство операторов hash \"%s\" содержит функцию %s с неподходящим " -"объявлением для опорного номера %d" - -#: access/hash/hashvalidate.c:131 -#, c-format -msgid "" -"hash operator family \"%s\" contains function %s with invalid support number " -"%d" -msgstr "" -"семейство операторов hash \"%s\" содержит функцию %s с неправильным опорным " -"номером %d" - -#: access/hash/hashvalidate.c:152 -#, c-format -msgid "" -"hash operator family \"%s\" contains operator %s with invalid strategy " -"number %d" -msgstr "" -"семейство операторов hash \"%s\" содержит оператор %s с неправильным номером " -"стратегии %d" - -#: access/hash/hashvalidate.c:165 +#: access/hash/hashvalidate.c:190 #, c-format msgid "" -"hash operator family \"%s\" contains invalid ORDER BY specification for " +"operator family \"%s\" of access method %s lacks support function for " "operator %s" msgstr "" -"семейство операторов hash \"%s\" содержит некорректное определение ORDER BY " +"в семействе операторов \"%s\" метода доступа %s не хватает опорной функции " "для оператора %s" -#: access/hash/hashvalidate.c:178 +#: access/hash/hashvalidate.c:248 access/nbtree/nbtvalidate.c:242 #, c-format -msgid "hash operator family \"%s\" contains operator %s with wrong signature" -msgstr "" -"семейство операторов hash \"%s\" содержит оператор %s с неподходящим " -"объявлением" - -#: access/hash/hashvalidate.c:190 -#, c-format -msgid "hash operator family \"%s\" lacks support function for operator %s" +msgid "" +"operator family \"%s\" of access method %s is missing cross-type operator(s)" msgstr "" -"в семействе операторов hash \"%s\" не хватает опорной функции для оператора " -"%s" - -#: access/hash/hashvalidate.c:218 -#, c-format -msgid "hash operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "в семействе операторов hash \"%s\" нет оператора(ов) для типов %s и %s" - -#: access/hash/hashvalidate.c:232 -#, c-format -msgid "hash operator class \"%s\" is missing operator(s)" -msgstr "в классе операторов hash \"%s\" нет оператора(ов)" - -#: access/hash/hashvalidate.c:248 -#, c-format -msgid "hash operator family \"%s\" is missing cross-type operator(s)" -msgstr "в семействе операторов hash \"%s\" нет межтипового оператора(ов)" +"в семействе операторов \"%s\" метода доступа %s нет межтипового оператора(ов)" #: access/heap/heapam.c:1293 access/heap/heapam.c:1321 #: access/heap/heapam.c:1353 catalog/aclchk.c:1772 @@ -974,8 +843,8 @@ msgid "\"%s\" is an index" msgstr "\"%s\" - это индекс" #: access/heap/heapam.c:1298 access/heap/heapam.c:1326 -#: access/heap/heapam.c:1358 catalog/aclchk.c:1779 commands/tablecmds.c:9557 -#: commands/tablecmds.c:12768 +#: access/heap/heapam.c:1358 catalog/aclchk.c:1779 commands/tablecmds.c:9912 +#: commands/tablecmds.c:13142 #, c-format msgid "\"%s\" is a composite type" msgstr "\"%s\" - это составной тип" @@ -995,109 +864,109 @@ msgstr "удалять кортежи во время параллельных msgid "attempted to delete invisible tuple" msgstr "попытка удаления невидимого кортежа" -#: access/heap/heapam.c:3514 access/heap/heapam.c:6213 +#: access/heap/heapam.c:3514 access/heap/heapam.c:6248 #, c-format msgid "cannot update tuples during a parallel operation" msgstr "изменять кортежи во время параллельных операций нельзя" -#: access/heap/heapam.c:3661 +#: access/heap/heapam.c:3662 #, c-format msgid "attempted to update invisible tuple" msgstr "попытка изменения невидимого кортежа" -#: access/heap/heapam.c:4936 access/heap/heapam.c:4974 -#: access/heap/heapam.c:5226 executor/execMain.c:2497 +#: access/heap/heapam.c:4938 access/heap/heapam.c:4976 +#: access/heap/heapam.c:5228 executor/execMain.c:2631 #, c-format msgid "could not obtain lock on row in relation \"%s\"" msgstr "не удалось получить блокировку строки в таблице \"%s\"" -#: access/heap/hio.c:322 access/heap/rewriteheap.c:666 +#: access/heap/hio.c:322 access/heap/rewriteheap.c:669 #, c-format msgid "row is too big: size %zu, maximum size %zu" msgstr "размер строки (%zu) превышает предел (%zu)" -#: access/heap/rewriteheap.c:926 +#: access/heap/rewriteheap.c:929 #, c-format msgid "could not write to file \"%s\", wrote %d of %d: %m" msgstr "не удалось записать в файл \"%s\" (записано байт: %d из %d): %m" -#: access/heap/rewriteheap.c:966 access/heap/rewriteheap.c:1183 -#: access/heap/rewriteheap.c:1282 access/transam/timeline.c:412 -#: access/transam/timeline.c:492 access/transam/xlog.c:3246 -#: access/transam/xlog.c:3414 replication/logical/snapbuild.c:1626 -#: replication/slot.c:1219 replication/slot.c:1306 storage/file/fd.c:631 -#: storage/file/fd.c:3180 storage/smgr/md.c:1044 storage/smgr/md.c:1277 -#: storage/smgr/md.c:1450 utils/misc/guc.c:6967 +#: access/heap/rewriteheap.c:969 access/heap/rewriteheap.c:1186 +#: access/heap/rewriteheap.c:1285 access/transam/timeline.c:412 +#: access/transam/timeline.c:492 access/transam/xlog.c:3249 +#: access/transam/xlog.c:3417 replication/logical/snapbuild.c:1630 +#: replication/slot.c:1291 replication/slot.c:1378 storage/file/fd.c:630 +#: storage/file/fd.c:3202 storage/smgr/md.c:1044 storage/smgr/md.c:1277 +#: storage/smgr/md.c:1450 utils/misc/guc.c:6998 #, c-format msgid "could not fsync file \"%s\": %m" msgstr "не удалось синхронизировать с ФС файл \"%s\": %m" -#: access/heap/rewriteheap.c:1021 access/heap/rewriteheap.c:1141 +#: access/heap/rewriteheap.c:1024 access/heap/rewriteheap.c:1144 #: access/transam/timeline.c:315 access/transam/timeline.c:467 -#: access/transam/xlog.c:3199 access/transam/xlog.c:3352 -#: access/transam/xlog.c:10512 access/transam/xlog.c:10550 -#: access/transam/xlog.c:10935 postmaster/postmaster.c:4410 -#: replication/logical/origin.c:535 replication/slot.c:1171 -#: storage/file/copydir.c:162 storage/smgr/md.c:327 utils/time/snapmgr.c:1275 +#: access/transam/xlog.c:3202 access/transam/xlog.c:3355 +#: access/transam/xlog.c:10551 access/transam/xlog.c:10589 +#: access/transam/xlog.c:10993 postmaster/postmaster.c:4498 +#: replication/logical/origin.c:576 replication/slot.c:1243 +#: storage/file/copydir.c:176 storage/smgr/md.c:327 utils/time/snapmgr.c:1297 #, c-format msgid "could not create file \"%s\": %m" msgstr "создать файл \"%s\" не удалось: %m" -#: access/heap/rewriteheap.c:1151 +#: access/heap/rewriteheap.c:1154 #, c-format msgid "could not truncate file \"%s\" to %u: %m" msgstr "не удалось обрезать файл \"%s\" до нужного размера (%u): %m" -#: access/heap/rewriteheap.c:1159 replication/walsender.c:478 +#: access/heap/rewriteheap.c:1162 replication/walsender.c:487 #: storage/smgr/md.c:1949 #, c-format msgid "could not seek to end of file \"%s\": %m" msgstr "не удалось перейти к концу файла \"%s\": %m" -#: access/heap/rewriteheap.c:1171 access/transam/timeline.c:370 +#: access/heap/rewriteheap.c:1174 access/transam/timeline.c:370 #: access/transam/timeline.c:405 access/transam/timeline.c:484 -#: access/transam/xlog.c:3235 access/transam/xlog.c:3405 -#: postmaster/postmaster.c:4420 postmaster/postmaster.c:4430 -#: replication/logical/origin.c:544 replication/logical/origin.c:580 -#: replication/logical/origin.c:596 replication/logical/snapbuild.c:1608 -#: replication/slot.c:1202 storage/file/copydir.c:191 -#: utils/init/miscinit.c:1240 utils/init/miscinit.c:1251 -#: utils/init/miscinit.c:1259 utils/misc/guc.c:6928 utils/misc/guc.c:6959 -#: utils/misc/guc.c:8811 utils/misc/guc.c:8825 utils/time/snapmgr.c:1280 -#: utils/time/snapmgr.c:1287 +#: access/transam/xlog.c:3238 access/transam/xlog.c:3408 +#: postmaster/postmaster.c:4508 postmaster/postmaster.c:4518 +#: replication/logical/origin.c:585 replication/logical/origin.c:624 +#: replication/logical/origin.c:640 replication/logical/snapbuild.c:1612 +#: replication/slot.c:1274 storage/file/copydir.c:217 +#: utils/init/miscinit.c:1249 utils/init/miscinit.c:1260 +#: utils/init/miscinit.c:1268 utils/misc/guc.c:6959 utils/misc/guc.c:6990 +#: utils/misc/guc.c:8840 utils/misc/guc.c:8854 utils/time/snapmgr.c:1302 +#: utils/time/snapmgr.c:1309 #, c-format msgid "could not write to file \"%s\": %m" msgstr "записать в файл \"%s\" не удалось: %m" -#: access/heap/rewriteheap.c:1257 access/transam/xlogarchive.c:113 -#: access/transam/xlogarchive.c:467 postmaster/postmaster.c:1239 -#: postmaster/syslogger.c:1371 replication/logical/origin.c:522 -#: replication/logical/reorderbuffer.c:2595 -#: replication/logical/reorderbuffer.c:2652 -#: replication/logical/snapbuild.c:1551 replication/logical/snapbuild.c:1938 -#: replication/slot.c:1279 storage/file/fd.c:682 storage/ipc/dsm.c:327 +#: access/heap/rewriteheap.c:1260 access/transam/xlogarchive.c:113 +#: access/transam/xlogarchive.c:467 postmaster/postmaster.c:1259 +#: postmaster/syslogger.c:1371 replication/logical/origin.c:563 +#: replication/logical/reorderbuffer.c:2610 +#: replication/logical/reorderbuffer.c:2667 +#: replication/logical/snapbuild.c:1560 replication/logical/snapbuild.c:1936 +#: replication/slot.c:1351 storage/file/fd.c:681 storage/ipc/dsm.c:327 #: storage/smgr/md.c:426 storage/smgr/md.c:475 storage/smgr/md.c:1397 #, c-format msgid "could not remove file \"%s\": %m" msgstr "не удалось стереть файл \"%s\": %m" -#: access/heap/rewriteheap.c:1271 access/transam/timeline.c:111 +#: access/heap/rewriteheap.c:1274 access/transam/timeline.c:111 #: access/transam/timeline.c:236 access/transam/timeline.c:334 -#: access/transam/xlog.c:3175 access/transam/xlog.c:3296 -#: access/transam/xlog.c:3337 access/transam/xlog.c:3616 -#: access/transam/xlog.c:3694 access/transam/xlogutils.c:706 -#: postmaster/syslogger.c:1380 replication/basebackup.c:474 -#: replication/basebackup.c:1218 replication/logical/origin.c:651 -#: replication/logical/reorderbuffer.c:2113 -#: replication/logical/reorderbuffer.c:2361 -#: replication/logical/reorderbuffer.c:3044 -#: replication/logical/snapbuild.c:1600 replication/logical/snapbuild.c:1688 -#: replication/slot.c:1294 replication/walsender.c:471 -#: replication/walsender.c:2314 storage/file/copydir.c:155 -#: storage/file/fd.c:614 storage/file/fd.c:3092 storage/file/fd.c:3159 -#: storage/smgr/md.c:608 utils/error/elog.c:1879 utils/init/miscinit.c:1171 -#: utils/init/miscinit.c:1299 utils/init/miscinit.c:1376 utils/misc/guc.c:7187 -#: utils/misc/guc.c:7220 +#: access/transam/xlog.c:3178 access/transam/xlog.c:3299 +#: access/transam/xlog.c:3340 access/transam/xlog.c:3619 +#: access/transam/xlog.c:3697 access/transam/xlogutils.c:706 +#: postmaster/syslogger.c:1380 replication/basebackup.c:475 +#: replication/basebackup.c:1219 replication/logical/origin.c:695 +#: replication/logical/reorderbuffer.c:2127 +#: replication/logical/reorderbuffer.c:2376 +#: replication/logical/reorderbuffer.c:3059 +#: replication/logical/snapbuild.c:1604 replication/logical/snapbuild.c:1692 +#: replication/slot.c:1366 replication/walsender.c:480 +#: replication/walsender.c:2400 storage/file/copydir.c:169 +#: storage/file/fd.c:613 storage/file/fd.c:3114 storage/file/fd.c:3181 +#: storage/smgr/md.c:608 utils/error/elog.c:1879 utils/init/miscinit.c:1173 +#: utils/init/miscinit.c:1308 utils/init/miscinit.c:1385 utils/misc/guc.c:7218 +#: utils/misc/guc.c:7251 #, c-format msgid "could not open file \"%s\": %m" msgstr "не удалось открыть файл \"%s\": %m" @@ -1112,9 +981,9 @@ msgstr "метод доступа \"%s\" имеет не тип %s" msgid "index access method \"%s\" does not have a handler" msgstr "для метода доступа индекса \"%s\" не задан обработчик" -#: access/index/indexam.c:160 catalog/objectaddress.c:1223 -#: commands/indexcmds.c:1806 commands/tablecmds.c:247 -#: commands/tablecmds.c:12759 +#: access/index/indexam.c:160 catalog/objectaddress.c:1222 +#: commands/indexcmds.c:1822 commands/tablecmds.c:247 +#: commands/tablecmds.c:13133 #, c-format msgid "\"%s\" is not an index" msgstr "\"%s\" - это не индекс" @@ -1152,7 +1021,7 @@ msgstr "" "полнотекстовую индексацию." #: access/nbtree/nbtpage.c:169 access/nbtree/nbtpage.c:372 -#: access/nbtree/nbtpage.c:459 parser/parse_utilcmd.c:1770 +#: access/nbtree/nbtpage.c:459 parser/parse_utilcmd.c:1921 #, c-format msgid "index \"%s\" is not a btree" msgstr "индекс \"%s\" не является b-деревом" @@ -1178,148 +1047,28 @@ msgstr "" "Причиной тому могло быть прерывание операции VACUUM в версии 9.3 или старее, " "до обновления. Этот индекс нужно перестроить (REINDEX)." -#: access/nbtree/nbtvalidate.c:101 -#, c-format -msgid "" -"btree operator family \"%s\" contains function %s with invalid support " -"number %d" -msgstr "" -"семейство операторов btree \"%s\" содержит функцию %s с неправильным опорным " -"номером %d" - -#: access/nbtree/nbtvalidate.c:113 -#, c-format -msgid "" -"btree operator family \"%s\" contains function %s with wrong signature for " -"support number %d" -msgstr "" -"семейство операторов btree \"%s\" содержит функцию %s с неподходящим " -"объявлением для опорного номера %d" - -#: access/nbtree/nbtvalidate.c:133 -#, c-format -msgid "" -"btree operator family \"%s\" contains operator %s with invalid strategy " -"number %d" -msgstr "" -"семейство операторов btree \"%s\" содержит оператор %s с неправильным " -"номером стратегии %d" - -#: access/nbtree/nbtvalidate.c:146 -#, c-format -msgid "" -"btree operator family \"%s\" contains invalid ORDER BY specification for " -"operator %s" -msgstr "" -"семейство операторов btree \"%s\" содержит некорректное определение ORDER BY " -"для оператора %s" - -#: access/nbtree/nbtvalidate.c:159 -#, c-format -msgid "btree operator family \"%s\" contains operator %s with wrong signature" -msgstr "" -"семейство операторов btree \"%s\" содержит оператор %s с неподходящим " -"объявлением" - -#: access/nbtree/nbtvalidate.c:201 -#, c-format -msgid "btree operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "" -"в семействе операторов btree \"%s\" нет оператора(ов) для типов %s и %s" - #: access/nbtree/nbtvalidate.c:211 #, c-format msgid "" -"btree operator family \"%s\" is missing support function for types %s and %s" +"operator family \"%s\" of access method %s is missing support function for " +"types %s and %s" msgstr "" -"в семействе операторов btree \"%s\" нет опорных функций для типов %s и %s" - -#: access/nbtree/nbtvalidate.c:225 -#, c-format -msgid "btree operator class \"%s\" is missing operator(s)" -msgstr "в классе операторов btree \"%s\" нет оператора(ов)" - -#: access/nbtree/nbtvalidate.c:242 -#, c-format -msgid "btree operator family \"%s\" is missing cross-type operator(s)" -msgstr "в семействе операторов btree \"%s\" нет межтипового оператора(ов)" +"в семействе операторов \"%s\" метода доступа %s нет опорной функции для " +"типов %s и %s" #: access/spgist/spgutils.c:705 #, c-format msgid "SP-GiST inner tuple size %zu exceeds maximum %zu" msgstr "внутренний размер кортежа SP-GiST (%zu) превышает максимум (%zu)" -#: access/spgist/spgvalidate.c:93 -#, c-format -msgid "" -"spgist operator family \"%s\" contains support procedure %s with cross-type " -"registration" -msgstr "" -"семейство операторов spgist \"%s\" содержит опорную процедуру %s с " -"межтиповой регистрацией" - -#: access/spgist/spgvalidate.c:116 -#, c-format -msgid "" -"spgist operator family \"%s\" contains function %s with invalid support " -"number %d" -msgstr "" -"семейство операторов spgist \"%s\" содержит функцию %s с неправильным " -"опорным номером %d" - -#: access/spgist/spgvalidate.c:128 -#, c-format -msgid "" -"spgist operator family \"%s\" contains function %s with wrong signature for " -"support number %d" -msgstr "" -"семейство операторов spgist \"%s\" содержит функцию %s с неподходящим " -"объявлением для опорного номера %d" - -#: access/spgist/spgvalidate.c:147 -#, c-format -msgid "" -"spgist operator family \"%s\" contains operator %s with invalid strategy " -"number %d" -msgstr "" -"семейство операторов spgist \"%s\" содержит оператор %s с неправильным " -"номером стратегии %d" - -#: access/spgist/spgvalidate.c:160 -#, c-format -msgid "" -"spgist operator family \"%s\" contains invalid ORDER BY specification for " -"operator %s" -msgstr "" -"семейство операторов spgist \"%s\" содержит некорректное определение ORDER " -"BY для оператора %s" - -#: access/spgist/spgvalidate.c:173 -#, c-format -msgid "spgist operator family \"%s\" contains operator %s with wrong signature" -msgstr "" -"семейство операторов spgist \"%s\" содержит оператор %s с неподходящим " -"объявлением" - -#: access/spgist/spgvalidate.c:201 -#, c-format -msgid "" -"spgist operator family \"%s\" is missing operator(s) for types %s and %s" -msgstr "" -"в семействе операторов spgist \"%s\" нет оператора(ов) для типов %s и %s" - #: access/spgist/spgvalidate.c:221 #, c-format msgid "" -"spgist operator family \"%s\" is missing support function %d for type %s" +"operator family \"%s\" of access method %s is missing support function %d " +"for type %s" msgstr "" -"в семействе операторов spgist \"%s\" отсутствует опорная функция %d для типа " -"%s" - -#: access/spgist/spgvalidate.c:234 -#, c-format -msgid "spgist operator class \"%s\" is missing operator(s)" -msgstr "в классе операторов spgist \"%s\" нет оператора(ов)" +"в семействе операторов \"%s\" метода доступа %s нет опорной функции %d для " +"типа %s" #: access/tablesample/bernoulli.c:152 access/tablesample/system.c:156 #, c-format @@ -1546,78 +1295,88 @@ msgstr "" msgid "invalid MultiXactId: %u" msgstr "неверный MultiXactId: %u" -#: access/transam/parallel.c:592 +#: access/transam/parallel.c:604 +#, c-format +msgid "parallel worker failed to initialize" +msgstr "не удалось инициализировать параллельный исполнитель" + +#: access/transam/parallel.c:605 +#, c-format +msgid "More details may be available in the server log." +msgstr "Дополнительная информация может быть в журнале сервера." + +#: access/transam/parallel.c:666 #, c-format msgid "postmaster exited during a parallel transaction" msgstr "postmaster завершился в процессе параллельной транзакции" -#: access/transam/parallel.c:777 +#: access/transam/parallel.c:853 #, c-format msgid "lost connection to parallel worker" msgstr "потеряно подключение к параллельному исполнителю" -#: access/transam/parallel.c:836 access/transam/parallel.c:838 +#: access/transam/parallel.c:915 access/transam/parallel.c:917 msgid "parallel worker" msgstr "параллельный исполнитель" -#: access/transam/parallel.c:977 +#: access/transam/parallel.c:1060 #, c-format msgid "could not map dynamic shared memory segment" msgstr "не удалось отобразить динамический сегмент разделяемой памяти" -#: access/transam/parallel.c:982 +#: access/transam/parallel.c:1065 #, c-format msgid "invalid magic number in dynamic shared memory segment" msgstr "неверное магическое число в динамическом сегменте разделяемой памяти" -#: access/transam/slru.c:664 +#: access/transam/slru.c:668 #, c-format msgid "file \"%s\" doesn't exist, reading as zeroes" msgstr "файл \"%s\" не существует, считается нулевым" -#: access/transam/slru.c:903 access/transam/slru.c:909 -#: access/transam/slru.c:916 access/transam/slru.c:923 -#: access/transam/slru.c:930 access/transam/slru.c:937 +#: access/transam/slru.c:907 access/transam/slru.c:913 +#: access/transam/slru.c:920 access/transam/slru.c:927 +#: access/transam/slru.c:934 access/transam/slru.c:941 #, c-format msgid "could not access status of transaction %u" msgstr "не удалось получить состояние транзакции %u" -#: access/transam/slru.c:904 +#: access/transam/slru.c:908 #, c-format msgid "Could not open file \"%s\": %m." msgstr "Не удалось открыть файл \"%s\": %m." -#: access/transam/slru.c:910 +#: access/transam/slru.c:914 #, c-format msgid "Could not seek in file \"%s\" to offset %u: %m." msgstr "Не удалось переместиться в файле \"%s\" к смещению %u: %m." -#: access/transam/slru.c:917 +#: access/transam/slru.c:921 #, c-format msgid "Could not read from file \"%s\" at offset %u: %m." msgstr "Не удалось прочитать файл \"%s\" (по смещению %u): %m." -#: access/transam/slru.c:924 +#: access/transam/slru.c:928 #, c-format msgid "Could not write to file \"%s\" at offset %u: %m." msgstr "Не удалось записать в файл \"%s\" (по смещению %u): %m." -#: access/transam/slru.c:931 +#: access/transam/slru.c:935 #, c-format msgid "Could not fsync file \"%s\": %m." msgstr "Не удалось синхронизировать с ФС файл \"%s\": %m." -#: access/transam/slru.c:938 +#: access/transam/slru.c:942 #, c-format msgid "Could not close file \"%s\": %m." msgstr "Не удалось закрыть файл \"%s\": %m." -#: access/transam/slru.c:1195 +#: access/transam/slru.c:1199 #, c-format msgid "could not truncate directory \"%s\": apparent wraparound" msgstr "не удалось очистить каталог \"%s\": видимо, произошло наложение" -#: access/transam/slru.c:1250 access/transam/slru.c:1306 +#: access/transam/slru.c:1254 access/transam/slru.c:1310 #, c-format msgid "removing file \"%s\"" msgstr "удаляется файл \"%s\"" @@ -1634,8 +1393,8 @@ msgstr "Ожидается числовой идентификатор лини #: access/transam/timeline.c:154 #, c-format -msgid "Expected a transaction log switchpoint location." -msgstr "Ожидается положение точки переключения журнала транзакций." +msgid "Expected a write-ahead log switchpoint location." +msgstr "Ожидается положение точки переключения журнала предзаписи." #: access/transam/timeline.c:158 #, c-format @@ -1659,9 +1418,9 @@ msgstr "" "Идентификаторы линий времени должны быть меньше идентификатора линии-потомка." #: access/transam/timeline.c:418 access/transam/timeline.c:498 -#: access/transam/xlog.c:3253 access/transam/xlog.c:3420 -#: access/transam/xlogfuncs.c:692 commands/copy.c:1751 -#: storage/file/copydir.c:206 +#: access/transam/xlog.c:3256 access/transam/xlog.c:3423 +#: access/transam/xlogfuncs.c:693 commands/copy.c:1723 +#: storage/file/copydir.c:228 #, c-format msgid "could not close file \"%s\": %m" msgstr "не удалось закрыть файл \"%s\": %m" @@ -1671,59 +1430,59 @@ msgstr "не удалось закрыть файл \"%s\": %m" msgid "requested timeline %u is not in this server's history" msgstr "в истории сервера нет запрошенной линии времени %u" -#: access/transam/twophase.c:362 +#: access/transam/twophase.c:383 #, c-format msgid "transaction identifier \"%s\" is too long" msgstr "идентификатор транзакции \"%s\" слишком длинный" -#: access/transam/twophase.c:369 +#: access/transam/twophase.c:390 #, c-format msgid "prepared transactions are disabled" msgstr "подготовленные транзакции отключены" -#: access/transam/twophase.c:370 +#: access/transam/twophase.c:391 #, c-format msgid "Set max_prepared_transactions to a nonzero value." msgstr "Установите ненулевое значение параметра max_prepared_transactions." -#: access/transam/twophase.c:389 +#: access/transam/twophase.c:410 #, c-format msgid "transaction identifier \"%s\" is already in use" msgstr "идентификатор транзакции \"%s\" уже используется" -#: access/transam/twophase.c:398 +#: access/transam/twophase.c:419 access/transam/twophase.c:2340 #, c-format msgid "maximum number of prepared transactions reached" msgstr "достигнут предел числа подготовленных транзакций" -#: access/transam/twophase.c:399 +#: access/transam/twophase.c:420 access/transam/twophase.c:2341 #, c-format msgid "Increase max_prepared_transactions (currently %d)." msgstr "Увеличьте параметр max_prepared_transactions (текущее значение %d)." -#: access/transam/twophase.c:539 +#: access/transam/twophase.c:587 #, c-format msgid "prepared transaction with identifier \"%s\" is busy" msgstr "подготовленная транзакция с идентификатором \"%s\" занята" -#: access/transam/twophase.c:545 +#: access/transam/twophase.c:593 #, c-format msgid "permission denied to finish prepared transaction" msgstr "нет доступа для завершения подготовленной транзакции" -#: access/transam/twophase.c:546 +#: access/transam/twophase.c:594 #, c-format msgid "Must be superuser or the user that prepared the transaction." msgstr "" "Это разрешено только суперпользователю и пользователю, подготовившему " "транзакцию." -#: access/transam/twophase.c:557 +#: access/transam/twophase.c:605 #, c-format msgid "prepared transaction belongs to another database" msgstr "подготовленная транзакция относится к другой базе данных" -#: access/transam/twophase.c:558 +#: access/transam/twophase.c:606 #, c-format msgid "" "Connect to the database where the transaction was prepared to finish it." @@ -1732,74 +1491,74 @@ msgstr "" "подготовлена." # [SM]: TO REVIEW -#: access/transam/twophase.c:573 +#: access/transam/twophase.c:621 #, c-format msgid "prepared transaction with identifier \"%s\" does not exist" msgstr "подготовленной транзакции с идентификатором \"%s\" нет" -#: access/transam/twophase.c:1042 +#: access/transam/twophase.c:1086 #, c-format msgid "two-phase state file maximum length exceeded" msgstr "превышен предельный размер файла состояния 2PC" -#: access/transam/twophase.c:1160 +#: access/transam/twophase.c:1204 #, c-format msgid "could not open two-phase state file \"%s\": %m" msgstr "не удалось открыть файл состояния 2PC \"%s\": %m" -#: access/transam/twophase.c:1177 +#: access/transam/twophase.c:1221 #, c-format msgid "could not stat two-phase state file \"%s\": %m" msgstr "не удалось получить информацию о файле состояния 2PC \"%s\": %m" -#: access/transam/twophase.c:1211 +#: access/transam/twophase.c:1255 #, c-format msgid "could not read two-phase state file \"%s\": %m" msgstr "не удалось прочитать файл состояния 2PC \"%s\": %m" -#: access/transam/twophase.c:1265 access/transam/xlog.c:6348 +#: access/transam/twophase.c:1307 access/transam/xlog.c:6364 #, c-format msgid "Failed while allocating a WAL reading processor." msgstr "Не удалось разместить обработчик журнала транзакций." -#: access/transam/twophase.c:1271 +#: access/transam/twophase.c:1313 #, c-format msgid "could not read two-phase state from WAL at %X/%X" msgstr "не удалось прочитать состояние 2PC из WAL в позиции %X/%X" -#: access/transam/twophase.c:1279 +#: access/transam/twophase.c:1321 #, c-format msgid "expected two-phase state data is not present in WAL at %X/%X" msgstr "" "ожидаемые данные состояния двухфазной фиксации отсутствуют в WAL в позиции " "%X/%X" -#: access/transam/twophase.c:1514 +#: access/transam/twophase.c:1558 #, c-format msgid "could not remove two-phase state file \"%s\": %m" msgstr "не удалось стереть файл состояния 2PC \"%s\": %m" -#: access/transam/twophase.c:1544 +#: access/transam/twophase.c:1588 #, c-format msgid "could not recreate two-phase state file \"%s\": %m" msgstr "не удалось пересоздать файл состояния 2PC \"%s\": %m" -#: access/transam/twophase.c:1555 access/transam/twophase.c:1563 +#: access/transam/twophase.c:1599 access/transam/twophase.c:1607 #, c-format msgid "could not write two-phase state file: %m" msgstr "не удалось записать в файл состояния 2PC: %m" -#: access/transam/twophase.c:1577 +#: access/transam/twophase.c:1621 #, c-format msgid "could not fsync two-phase state file: %m" msgstr "не удалось синхронизировать с ФС файл состояния 2PC: %m" -#: access/transam/twophase.c:1584 +#: access/transam/twophase.c:1628 #, c-format msgid "could not close two-phase state file: %m" msgstr "не удалось закрыть файл состояния 2PC: %m" -#: access/transam/twophase.c:1665 +#: access/transam/twophase.c:1716 #, c-format msgid "" "%u two-phase state file was written for a long-running prepared transaction" @@ -1812,27 +1571,40 @@ msgstr[1] "" msgstr[2] "" "для длительных подготовленных транзакций записано файлов состояния 2PC: %u" -#: access/transam/twophase.c:1729 +#: access/transam/twophase.c:1944 +#, c-format +msgid "recovering prepared transaction %u from shared memory" +msgstr "восстановление подготовленной транзакции %u из разделяемой памяти" + +#: access/transam/twophase.c:2034 #, c-format -msgid "removing future two-phase state file \"%s\"" -msgstr "удаление будущего файла состояния 2PC \"%s\"" +msgid "removing stale two-phase state file for transaction %u" +msgstr "удаление устаревшего файла состояния 2PC для транзакции %u" -#: access/transam/twophase.c:1745 access/transam/twophase.c:1756 -#: access/transam/twophase.c:1876 access/transam/twophase.c:1887 -#: access/transam/twophase.c:1964 +#: access/transam/twophase.c:2041 #, c-format -msgid "removing corrupt two-phase state file \"%s\"" -msgstr "удаление испорченного файла состояния 2PC \"%s\"" +msgid "removing stale two-phase state from memory for transaction %u" +msgstr "удаление из памяти устаревшего состояния 2PC для транзакции %u" -#: access/transam/twophase.c:1865 access/transam/twophase.c:1953 +#: access/transam/twophase.c:2054 #, c-format -msgid "removing stale two-phase state file \"%s\"" -msgstr "удаление устаревшего файла состояния 2PC \"%s\"" +msgid "removing future two-phase state file for transaction %u" +msgstr "удаление файла будущего состояния 2PC для транзакции %u" -#: access/transam/twophase.c:1971 +#: access/transam/twophase.c:2061 #, c-format -msgid "recovering prepared transaction %u" -msgstr "восстановление подготовленной транзакции %u" +msgid "removing future two-phase state from memory for transaction %u" +msgstr "удаление из памяти будущего состояния 2PC для транзакции %u" + +#: access/transam/twophase.c:2075 access/transam/twophase.c:2094 +#, c-format +msgid "removing corrupt two-phase state file for transaction %u" +msgstr "удаление испорченного файла состояния 2PC для транзакции %u" + +#: access/transam/twophase.c:2101 +#, c-format +msgid "removing corrupt two-phase state from memory for transaction %u" +msgstr "удаление из памяти испорченного состояния 2PC для транзакции %u" #: access/transam/varsup.c:124 #, c-format @@ -1892,31 +1664,39 @@ msgstr "в одной транзакции не может быть больше msgid "maximum number of committed subtransactions (%d) exceeded" msgstr "превышен предел числа зафиксированных подтранзакций (%d)" -#: access/transam/xact.c:2268 +#: access/transam/xact.c:2265 #, c-format msgid "cannot PREPARE a transaction that has operated on temporary tables" msgstr "" -"выполнить PREPARE для транзакции, оперирующей с временными таблицами, нельзя" +"нельзя выполнить PREPARE для транзакции, оперирующей с временными таблицами" -#: access/transam/xact.c:2278 +#: access/transam/xact.c:2275 #, c-format msgid "cannot PREPARE a transaction that has exported snapshots" msgstr "нельзя выполнить PREPARE для транзакции, снимки которой экспортированы" +#: access/transam/xact.c:2284 +#, c-format +msgid "" +"cannot PREPARE a transaction that has manipulated logical replication workers" +msgstr "" +"нельзя выполнить PREPARE для транзакции, задействующей процессы логической " +"репликации" + #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3163 +#: access/transam/xact.c:3166 #, c-format msgid "%s cannot run inside a transaction block" msgstr "%s не может выполняться внутри блока транзакции" #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3173 +#: access/transam/xact.c:3176 #, c-format msgid "%s cannot run inside a subtransaction" msgstr "%s не может выполняться внутри подтранзакции" #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3183 +#: access/transam/xact.c:3186 #, c-format msgid "%s cannot be executed from a function or multi-command string" msgstr "" @@ -1924,157 +1704,152 @@ msgstr "" "команд" #. translator: %s represents an SQL statement name -#: access/transam/xact.c:3254 +#: access/transam/xact.c:3257 #, c-format msgid "%s can only be used in transaction blocks" msgstr "%s может выполняться только внутри блоков транзакций" -#: access/transam/xact.c:3438 +#: access/transam/xact.c:3441 #, c-format msgid "there is already a transaction in progress" msgstr "транзакция уже выполняется" -#: access/transam/xact.c:3606 access/transam/xact.c:3709 +#: access/transam/xact.c:3609 access/transam/xact.c:3712 #, c-format msgid "there is no transaction in progress" msgstr "нет незавершённой транзакции" -#: access/transam/xact.c:3617 +#: access/transam/xact.c:3620 #, c-format msgid "cannot commit during a parallel operation" msgstr "фиксировать транзакции во время параллельных операций нельзя" -#: access/transam/xact.c:3720 +#: access/transam/xact.c:3723 #, c-format msgid "cannot abort during a parallel operation" msgstr "прерывание во время параллельных операций невозможно" -#: access/transam/xact.c:3762 +#: access/transam/xact.c:3765 #, c-format msgid "cannot define savepoints during a parallel operation" msgstr "определять точки сохранения во время параллельных операций нельзя" -#: access/transam/xact.c:3829 +#: access/transam/xact.c:3832 #, c-format msgid "cannot release savepoints during a parallel operation" msgstr "высвобождать точки сохранения во время параллельных операций нельзя" -#: access/transam/xact.c:3840 access/transam/xact.c:3892 -#: access/transam/xact.c:3898 access/transam/xact.c:3954 -#: access/transam/xact.c:4004 access/transam/xact.c:4010 +#: access/transam/xact.c:3843 access/transam/xact.c:3895 +#: access/transam/xact.c:3901 access/transam/xact.c:3957 +#: access/transam/xact.c:4007 access/transam/xact.c:4013 #, c-format msgid "no such savepoint" msgstr "нет такой точки сохранения" -#: access/transam/xact.c:3942 +#: access/transam/xact.c:3945 #, c-format msgid "cannot rollback to savepoints during a parallel operation" msgstr "откатиться к точке сохранения во время параллельных операций нельзя" -#: access/transam/xact.c:4070 +#: access/transam/xact.c:4073 #, c-format msgid "cannot start subtransactions during a parallel operation" msgstr "запускать подтранзакции во время параллельных операций нельзя" -#: access/transam/xact.c:4137 +#: access/transam/xact.c:4140 #, c-format msgid "cannot commit subtransactions during a parallel operation" msgstr "фиксировать подтранзакции во время параллельных операций нельзя" -#: access/transam/xact.c:4745 +#: access/transam/xact.c:4772 #, c-format msgid "cannot have more than 2^32-1 subtransactions in a transaction" msgstr "в одной транзакции не может быть больше 2^32-1 подтранзакций" -#: access/transam/xlog.c:2452 +#: access/transam/xlog.c:2455 #, c-format msgid "could not seek in log file %s to offset %u: %m" msgstr "не удалось переместиться в файле журнала %s к смещению %u: %m" -#: access/transam/xlog.c:2474 +#: access/transam/xlog.c:2477 #, c-format msgid "could not write to log file %s at offset %u, length %zu: %m" msgstr "не удалось записать в файл журнала %s (смещение: %u, длина: %zu): %m" -#: access/transam/xlog.c:2738 +#: access/transam/xlog.c:2741 #, c-format msgid "updated min recovery point to %X/%X on timeline %u" msgstr "минимальная точка восстановления изменена на %X/%X на линии времени %u" -#: access/transam/xlog.c:3385 +#: access/transam/xlog.c:3388 #, c-format msgid "not enough data in file \"%s\"" msgstr "недостаточно данных в файле\"%s\"" -#: access/transam/xlog.c:3531 +#: access/transam/xlog.c:3534 #, c-format -msgid "could not open transaction log file \"%s\": %m" -msgstr "не удалось открыть файл журнала транзакций \"%s\": %m" +msgid "could not open write-ahead log file \"%s\": %m" +msgstr "не удалось открыть файл журнала предзаписи \"%s\": %m" -#: access/transam/xlog.c:3720 access/transam/xlog.c:5533 +#: access/transam/xlog.c:3723 access/transam/xlog.c:5549 #, c-format msgid "could not close log file %s: %m" msgstr "не удалось закрыть файл журнала \"%s\": %m" -#: access/transam/xlog.c:3777 access/transam/xlogutils.c:701 -#: replication/walsender.c:2309 +#: access/transam/xlog.c:3787 access/transam/xlogutils.c:701 +#: replication/walsender.c:2395 #, c-format msgid "requested WAL segment %s has already been removed" msgstr "запрошенный сегмент WAL %s уже удалён" -#: access/transam/xlog.c:3837 access/transam/xlog.c:3912 -#: access/transam/xlog.c:4107 +#: access/transam/xlog.c:3848 access/transam/xlog.c:3923 +#: access/transam/xlog.c:4118 #, c-format -msgid "could not open transaction log directory \"%s\": %m" -msgstr "не удалось открыть каталог журнала транзакций \"%s\": %m" +msgid "could not open write-ahead log directory \"%s\": %m" +msgstr "не удалось открыть каталог журнала предзаписи \"%s\": %m" -#: access/transam/xlog.c:3993 +#: access/transam/xlog.c:4004 #, c-format -msgid "recycled transaction log file \"%s\"" -msgstr "файл журнала транзакций \"%s\" используется повторно" +msgid "recycled write-ahead log file \"%s\"" +msgstr "файл журнала предзаписи \"%s\" используется повторно" -#: access/transam/xlog.c:4005 +#: access/transam/xlog.c:4016 #, c-format -msgid "removing transaction log file \"%s\"" -msgstr "файл журнала транзакций \"%s\" удаляется" +msgid "removing write-ahead log file \"%s\"" +msgstr "файл журнала предзаписи \"%s\" удаляется" -#: access/transam/xlog.c:4025 +#: access/transam/xlog.c:4036 #, c-format -msgid "could not rename old transaction log file \"%s\": %m" -msgstr "не удалось переименовать старый файл журнала транзакций \"%s\": %m" +msgid "could not rename old write-ahead log file \"%s\": %m" +msgstr "не удалось переименовать старый файл журнала предзаписи \"%s\": %m" -#: access/transam/xlog.c:4067 access/transam/xlog.c:4077 +#: access/transam/xlog.c:4078 access/transam/xlog.c:4088 #, c-format msgid "required WAL directory \"%s\" does not exist" msgstr "требуемый каталог WAL \"%s\" не существует" -#: access/transam/xlog.c:4083 +#: access/transam/xlog.c:4094 #, c-format msgid "creating missing WAL directory \"%s\"" msgstr "создаётся отсутствующий каталог WAL \"%s\"" -#: access/transam/xlog.c:4086 +#: access/transam/xlog.c:4097 #, c-format msgid "could not create missing directory \"%s\": %m" msgstr "не удалось создать отсутствующий каталог \"%s\": %m" -#: access/transam/xlog.c:4117 -#, c-format -msgid "removing transaction log backup history file \"%s\"" -msgstr "удаляется файл истории копирования журнала: \"%s\"" - -#: access/transam/xlog.c:4198 +#: access/transam/xlog.c:4208 #, c-format msgid "unexpected timeline ID %u in log segment %s, offset %u" msgstr "неожиданный ID линии времени %u в сегменте журнала %s, смещение %u" -#: access/transam/xlog.c:4320 +#: access/transam/xlog.c:4330 #, c-format msgid "new timeline %u is not a child of database system timeline %u" msgstr "" "новая линия времени %u не является ответвлением линии времени системы БД %u" -#: access/transam/xlog.c:4334 +#: access/transam/xlog.c:4344 #, c-format msgid "" "new timeline %u forked off current database system timeline %u before " @@ -2083,55 +1858,55 @@ msgstr "" "новая линия времени %u ответвилась от текущей линии времени базы данных %u " "до текущей точки восстановления %X/%X" -#: access/transam/xlog.c:4353 +#: access/transam/xlog.c:4363 #, c-format msgid "new target timeline is %u" msgstr "новая целевая линия времени %u" -#: access/transam/xlog.c:4428 +#: access/transam/xlog.c:4444 #, c-format msgid "could not create control file \"%s\": %m" msgstr "не удалось создать файл \"%s\": %m" -#: access/transam/xlog.c:4440 access/transam/xlog.c:4666 +#: access/transam/xlog.c:4456 access/transam/xlog.c:4682 #, c-format msgid "could not write to control file: %m" msgstr "не удалось записать в файл pg_control: %m" -#: access/transam/xlog.c:4448 access/transam/xlog.c:4674 +#: access/transam/xlog.c:4464 access/transam/xlog.c:4690 #, c-format msgid "could not fsync control file: %m" msgstr "не удалось синхронизировать с ФС файл pg_control: %m" -#: access/transam/xlog.c:4454 access/transam/xlog.c:4680 +#: access/transam/xlog.c:4470 access/transam/xlog.c:4696 #, c-format msgid "could not close control file: %m" msgstr "не удалось закрыть файл pg_control: %m" -#: access/transam/xlog.c:4472 access/transam/xlog.c:4654 +#: access/transam/xlog.c:4488 access/transam/xlog.c:4670 #, c-format msgid "could not open control file \"%s\": %m" msgstr "не удалось открыть файл \"%s\": %m" -#: access/transam/xlog.c:4479 +#: access/transam/xlog.c:4495 #, c-format msgid "could not read from control file: %m" msgstr "не удалось прочитать файл pg_control: %m" -#: access/transam/xlog.c:4493 access/transam/xlog.c:4502 -#: access/transam/xlog.c:4526 access/transam/xlog.c:4533 -#: access/transam/xlog.c:4540 access/transam/xlog.c:4545 -#: access/transam/xlog.c:4552 access/transam/xlog.c:4559 -#: access/transam/xlog.c:4566 access/transam/xlog.c:4573 -#: access/transam/xlog.c:4580 access/transam/xlog.c:4587 -#: access/transam/xlog.c:4594 access/transam/xlog.c:4603 +#: access/transam/xlog.c:4509 access/transam/xlog.c:4518 +#: access/transam/xlog.c:4542 access/transam/xlog.c:4549 +#: access/transam/xlog.c:4556 access/transam/xlog.c:4561 +#: access/transam/xlog.c:4568 access/transam/xlog.c:4575 +#: access/transam/xlog.c:4582 access/transam/xlog.c:4589 +#: access/transam/xlog.c:4596 access/transam/xlog.c:4603 #: access/transam/xlog.c:4610 access/transam/xlog.c:4619 -#: access/transam/xlog.c:4626 utils/init/miscinit.c:1397 +#: access/transam/xlog.c:4626 access/transam/xlog.c:4635 +#: access/transam/xlog.c:4642 utils/init/miscinit.c:1406 #, c-format msgid "database files are incompatible with server" msgstr "файлы базы данных не совместимы с сервером" -#: access/transam/xlog.c:4494 +#: access/transam/xlog.c:4510 #, c-format msgid "" "The database cluster was initialized with PG_CONTROL_VERSION %d (0x%08x), " @@ -2140,7 +1915,7 @@ msgstr "" "Кластер баз данных был инициализирован с PG_CONTROL_VERSION %d (0x%08x), но " "сервер скомпилирован с PG_CONTROL_VERSION %d (0x%08x)." -#: access/transam/xlog.c:4498 +#: access/transam/xlog.c:4514 #, c-format msgid "" "This could be a problem of mismatched byte ordering. It looks like you need " @@ -2149,7 +1924,7 @@ msgstr "" "Возможно, проблема вызвана разным порядком байт. Кажется, вам надо выполнить " "initdb." -#: access/transam/xlog.c:4503 +#: access/transam/xlog.c:4519 #, c-format msgid "" "The database cluster was initialized with PG_CONTROL_VERSION %d, but the " @@ -2158,18 +1933,18 @@ msgstr "" "Кластер баз данных был инициализирован с PG_CONTROL_VERSION %d, но сервер " "скомпилирован с PG_CONTROL_VERSION %d." -#: access/transam/xlog.c:4506 access/transam/xlog.c:4530 -#: access/transam/xlog.c:4537 access/transam/xlog.c:4542 +#: access/transam/xlog.c:4522 access/transam/xlog.c:4546 +#: access/transam/xlog.c:4553 access/transam/xlog.c:4558 #, c-format msgid "It looks like you need to initdb." msgstr "Кажется, вам надо выполнить initdb." -#: access/transam/xlog.c:4517 +#: access/transam/xlog.c:4533 #, c-format msgid "incorrect checksum in control file" msgstr "ошибка контрольной суммы в файле pg_control" -#: access/transam/xlog.c:4527 +#: access/transam/xlog.c:4543 #, c-format msgid "" "The database cluster was initialized with CATALOG_VERSION_NO %d, but the " @@ -2178,7 +1953,7 @@ msgstr "" "Кластер баз данных был инициализирован с CATALOG_VERSION_NO %d, но сервер " "скомпилирован с CATALOG_VERSION_NO %d." -#: access/transam/xlog.c:4534 +#: access/transam/xlog.c:4550 #, c-format msgid "" "The database cluster was initialized with MAXALIGN %d, but the server was " @@ -2187,7 +1962,7 @@ msgstr "" "Кластер баз данных был инициализирован с MAXALIGN %d, но сервер " "скомпилирован с MAXALIGN %d." -#: access/transam/xlog.c:4541 +#: access/transam/xlog.c:4557 #, c-format msgid "" "The database cluster appears to use a different floating-point number format " @@ -2196,7 +1971,7 @@ msgstr "" "Кажется, в кластере баз данных и в программе сервера используются разные " "форматы чисел с плавающей точкой." -#: access/transam/xlog.c:4546 +#: access/transam/xlog.c:4562 #, c-format msgid "" "The database cluster was initialized with BLCKSZ %d, but the server was " @@ -2205,17 +1980,17 @@ msgstr "" "Кластер баз данных был инициализирован с BLCKSZ %d, но сервер скомпилирован " "с BLCKSZ %d." -#: access/transam/xlog.c:4549 access/transam/xlog.c:4556 -#: access/transam/xlog.c:4563 access/transam/xlog.c:4570 -#: access/transam/xlog.c:4577 access/transam/xlog.c:4584 -#: access/transam/xlog.c:4591 access/transam/xlog.c:4598 -#: access/transam/xlog.c:4606 access/transam/xlog.c:4613 +#: access/transam/xlog.c:4565 access/transam/xlog.c:4572 +#: access/transam/xlog.c:4579 access/transam/xlog.c:4586 +#: access/transam/xlog.c:4593 access/transam/xlog.c:4600 +#: access/transam/xlog.c:4607 access/transam/xlog.c:4614 #: access/transam/xlog.c:4622 access/transam/xlog.c:4629 +#: access/transam/xlog.c:4638 access/transam/xlog.c:4645 #, c-format msgid "It looks like you need to recompile or initdb." msgstr "Кажется, вам надо перекомпилировать сервер или выполнить initdb." -#: access/transam/xlog.c:4553 +#: access/transam/xlog.c:4569 #, c-format msgid "" "The database cluster was initialized with RELSEG_SIZE %d, but the server was " @@ -2224,7 +1999,7 @@ msgstr "" "Кластер баз данных был инициализирован с RELSEG_SIZE %d, но сервер " "скомпилирован с RELSEG_SIZE %d." -#: access/transam/xlog.c:4560 +#: access/transam/xlog.c:4576 #, c-format msgid "" "The database cluster was initialized with XLOG_BLCKSZ %d, but the server was " @@ -2233,7 +2008,7 @@ msgstr "" "Кластер баз данных был инициализирован с XLOG_BLCKSZ %d, но сервер " "скомпилирован с XLOG_BLCKSZ %d." -#: access/transam/xlog.c:4567 +#: access/transam/xlog.c:4583 #, c-format msgid "" "The database cluster was initialized with XLOG_SEG_SIZE %d, but the server " @@ -2242,7 +2017,7 @@ msgstr "" "Кластер баз данных был инициализирован с XLOG_SEG_SIZE %d, но сервер " "скомпилирован с XLOG_SEG_SIZE %d." -#: access/transam/xlog.c:4574 +#: access/transam/xlog.c:4590 #, c-format msgid "" "The database cluster was initialized with NAMEDATALEN %d, but the server was " @@ -2251,7 +2026,7 @@ msgstr "" "Кластер баз данных был инициализирован с NAMEDATALEN %d, но сервер " "скомпилирован с NAMEDATALEN %d." -#: access/transam/xlog.c:4581 +#: access/transam/xlog.c:4597 #, c-format msgid "" "The database cluster was initialized with INDEX_MAX_KEYS %d, but the server " @@ -2260,7 +2035,7 @@ msgstr "" "Кластер баз данных был инициализирован с INDEX_MAX_KEYS %d, но сервер " "скомпилирован с INDEX_MAX_KEYS %d." -#: access/transam/xlog.c:4588 +#: access/transam/xlog.c:4604 #, c-format msgid "" "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the " @@ -2269,7 +2044,7 @@ msgstr "" "Кластер баз данных был инициализирован с TOAST_MAX_CHUNK_SIZE %d, но сервер " "скомпилирован с TOAST_MAX_CHUNK_SIZE %d." -#: access/transam/xlog.c:4595 +#: access/transam/xlog.c:4611 #, c-format msgid "" "The database cluster was initialized with LOBLKSIZE %d, but the server was " @@ -2278,7 +2053,7 @@ msgstr "" "Кластер баз данных был инициализирован с LOBLKSIZE %d, но сервер " "скомпилирован с LOBLKSIZE %d." -#: access/transam/xlog.c:4604 +#: access/transam/xlog.c:4620 #, c-format msgid "" "The database cluster was initialized without USE_FLOAT4_BYVAL but the server " @@ -2287,7 +2062,7 @@ msgstr "" "Кластер баз данных был инициализирован без USE_FLOAT4_BYVAL, но сервер " "скомпилирован с USE_FLOAT4_BYVAL." -#: access/transam/xlog.c:4611 +#: access/transam/xlog.c:4627 #, c-format msgid "" "The database cluster was initialized with USE_FLOAT4_BYVAL but the server " @@ -2296,7 +2071,7 @@ msgstr "" "Кластер баз данных был инициализирован с USE_FLOAT4_BYVAL, но сервер " "скомпилирован без USE_FLOAT4_BYVAL." -#: access/transam/xlog.c:4620 +#: access/transam/xlog.c:4636 #, c-format msgid "" "The database cluster was initialized without USE_FLOAT8_BYVAL but the server " @@ -2305,7 +2080,7 @@ msgstr "" "Кластер баз данных был инициализирован без USE_FLOAT8_BYVAL, но сервер " "скомпилирован с USE_FLOAT8_BYVAL." -#: access/transam/xlog.c:4627 +#: access/transam/xlog.c:4643 #, c-format msgid "" "The database cluster was initialized with USE_FLOAT8_BYVAL but the server " @@ -2314,90 +2089,90 @@ msgstr "" "Кластер баз данных был инициализирован с USE_FLOAT8_BYVAL, но сервер был " "скомпилирован без USE_FLOAT8_BYVAL." -#: access/transam/xlog.c:4983 +#: access/transam/xlog.c:4999 #, c-format msgid "could not generate secret authorization token" msgstr "не удалось сгенерировать случайное число для аутентификации" -#: access/transam/xlog.c:5073 +#: access/transam/xlog.c:5089 #, c-format -msgid "could not write bootstrap transaction log file: %m" -msgstr "не удалось записать начальный файл журнала транзакций: %m" +msgid "could not write bootstrap write-ahead log file: %m" +msgstr "не удалось записать начальный файл журнала предзаписи: %m" -#: access/transam/xlog.c:5081 +#: access/transam/xlog.c:5097 #, c-format -msgid "could not fsync bootstrap transaction log file: %m" -msgstr "не удалось синхронизировать с ФС начальный файл журнала транзакций: %m" +msgid "could not fsync bootstrap write-ahead log file: %m" +msgstr "не удалось сбросить на диск начальный файл журнала предзаписи: %m" -#: access/transam/xlog.c:5087 +#: access/transam/xlog.c:5103 #, c-format -msgid "could not close bootstrap transaction log file: %m" -msgstr "не удалось закрыть начальный файл журнала транзакций: %m" +msgid "could not close bootstrap write-ahead log file: %m" +msgstr "не удалось закрыть начальный файл журнала предзаписи: %m" -#: access/transam/xlog.c:5163 +#: access/transam/xlog.c:5179 #, c-format msgid "could not open recovery command file \"%s\": %m" msgstr "не удалось открыть файл команд восстановления \"%s\": %m" -#: access/transam/xlog.c:5209 access/transam/xlog.c:5311 +#: access/transam/xlog.c:5225 access/transam/xlog.c:5327 #, c-format msgid "invalid value for recovery parameter \"%s\": \"%s\"" msgstr "неверное значение для параметра восстановления \"%s\": \"%s\"" -#: access/transam/xlog.c:5212 +#: access/transam/xlog.c:5228 #, c-format msgid "Valid values are \"pause\", \"promote\", and \"shutdown\"." msgstr "Допустимые значения: \"pause\", \"promote\" и \"shutdown\"." -#: access/transam/xlog.c:5232 +#: access/transam/xlog.c:5248 #, c-format msgid "recovery_target_timeline is not a valid number: \"%s\"" msgstr "recovery_target_timeline не является допустимым числом: \"%s\"" -#: access/transam/xlog.c:5249 +#: access/transam/xlog.c:5265 #, c-format msgid "recovery_target_xid is not a valid number: \"%s\"" msgstr "recovery_target_xid не является допустимым числом: \"%s\"" -#: access/transam/xlog.c:5280 +#: access/transam/xlog.c:5296 #, c-format msgid "recovery_target_name is too long (maximum %d characters)" msgstr "длина recovery_target_name превышает предел (%d)" -#: access/transam/xlog.c:5314 +#: access/transam/xlog.c:5330 #, c-format msgid "The only allowed value is \"immediate\"." msgstr "Единственное допустимое значение: \"immediate\"." -#: access/transam/xlog.c:5327 access/transam/xlog.c:5338 -#: commands/extension.c:546 commands/extension.c:554 utils/misc/guc.c:5719 +#: access/transam/xlog.c:5343 access/transam/xlog.c:5354 +#: commands/extension.c:547 commands/extension.c:555 utils/misc/guc.c:5750 #, c-format msgid "parameter \"%s\" requires a Boolean value" msgstr "параметр \"%s\" требует логическое значение" -#: access/transam/xlog.c:5373 +#: access/transam/xlog.c:5389 #, c-format msgid "parameter \"%s\" requires a temporal value" msgstr "параметр \"%s\" требует временное значение" -#: access/transam/xlog.c:5375 catalog/dependency.c:961 catalog/dependency.c:962 +#: access/transam/xlog.c:5391 catalog/dependency.c:961 catalog/dependency.c:962 #: catalog/dependency.c:968 catalog/dependency.c:969 catalog/dependency.c:980 -#: catalog/dependency.c:981 commands/tablecmds.c:946 commands/tablecmds.c:10017 -#: commands/user.c:1052 commands/view.c:505 libpq/auth.c:328 -#: replication/syncrep.c:1118 storage/lmgr/deadlock.c:1139 -#: storage/lmgr/proc.c:1313 utils/adt/acl.c:5248 utils/misc/guc.c:5741 -#: utils/misc/guc.c:5834 utils/misc/guc.c:9792 utils/misc/guc.c:9826 -#: utils/misc/guc.c:9860 utils/misc/guc.c:9894 utils/misc/guc.c:9929 +#: catalog/dependency.c:981 commands/tablecmds.c:946 commands/tablecmds.c:10372 +#: commands/user.c:1064 commands/view.c:505 libpq/auth.c:328 +#: replication/syncrep.c:1160 storage/lmgr/deadlock.c:1139 +#: storage/lmgr/proc.c:1313 utils/adt/acl.c:5253 utils/misc/guc.c:5772 +#: utils/misc/guc.c:5865 utils/misc/guc.c:9814 utils/misc/guc.c:9848 +#: utils/misc/guc.c:9882 utils/misc/guc.c:9916 utils/misc/guc.c:9951 #, c-format msgid "%s" msgstr "%s" -#: access/transam/xlog.c:5382 +#: access/transam/xlog.c:5398 #, c-format msgid "unrecognized recovery parameter \"%s\"" msgstr "нераспознанный параметр восстановления \"%s\"" -#: access/transam/xlog.c:5393 +#: access/transam/xlog.c:5409 #, c-format msgid "" "recovery command file \"%s\" specified neither primary_conninfo nor " @@ -2406,7 +2181,7 @@ msgstr "" "в файле команд восстановления \"%s\" не указан параметр primary_conninfo или " "restore_command" -#: access/transam/xlog.c:5395 +#: access/transam/xlog.c:5411 #, c-format msgid "" "The database server will regularly poll the pg_wal subdirectory to check for " @@ -2415,7 +2190,7 @@ msgstr "" "Сервер БД будет регулярно опрашивать подкаталог pg_wal и проверять " "содержащиеся в нём файлы." -#: access/transam/xlog.c:5402 +#: access/transam/xlog.c:5418 #, c-format msgid "" "recovery command file \"%s\" must specify restore_command when standby mode " @@ -2424,78 +2199,78 @@ msgstr "" "в файле команд восстановления \"%s\" может отсутствовать restore_command, " "только если это резервный сервер" -#: access/transam/xlog.c:5423 +#: access/transam/xlog.c:5439 #, c-format msgid "standby mode is not supported by single-user servers" msgstr "" "режим резервного сервера не поддерживается однопользовательским сервером" -#: access/transam/xlog.c:5442 +#: access/transam/xlog.c:5458 #, c-format msgid "recovery target timeline %u does not exist" msgstr "целевая линия времени для восстановления %u не существует" -#: access/transam/xlog.c:5563 +#: access/transam/xlog.c:5579 #, c-format msgid "archive recovery complete" msgstr "восстановление архива завершено" -#: access/transam/xlog.c:5622 access/transam/xlog.c:5888 +#: access/transam/xlog.c:5638 access/transam/xlog.c:5904 #, c-format msgid "recovery stopping after reaching consistency" msgstr "" "восстановление останавливается после достижения согласованного состояния" -#: access/transam/xlog.c:5643 +#: access/transam/xlog.c:5659 #, c-format -msgid "recovery stopping before WAL position (LSN) \"%X/%X\"" +msgid "recovery stopping before WAL location (LSN) \"%X/%X\"" msgstr "восстановление останавливается перед позицией в WAL (LSN) \"%X/%X\"" -#: access/transam/xlog.c:5729 +#: access/transam/xlog.c:5745 #, c-format msgid "recovery stopping before commit of transaction %u, time %s" msgstr "" "восстановление останавливается перед фиксированием транзакции %u, время %s" -#: access/transam/xlog.c:5736 +#: access/transam/xlog.c:5752 #, c-format msgid "recovery stopping before abort of transaction %u, time %s" msgstr "" "восстановление останавливается перед прерыванием транзакции %u, время %s" -#: access/transam/xlog.c:5782 +#: access/transam/xlog.c:5798 #, c-format msgid "recovery stopping at restore point \"%s\", time %s" msgstr "восстановление останавливается в точке восстановления \"%s\", время %s" -#: access/transam/xlog.c:5800 +#: access/transam/xlog.c:5816 #, c-format -msgid "recovery stopping after WAL position (LSN) \"%X/%X\"" +msgid "recovery stopping after WAL location (LSN) \"%X/%X\"" msgstr "восстановление останавливается после позиции в WAL (LSN) \"%X/%X\"" -#: access/transam/xlog.c:5868 +#: access/transam/xlog.c:5884 #, c-format msgid "recovery stopping after commit of transaction %u, time %s" msgstr "" "восстановление останавливается после фиксирования транзакции %u, время %s" -#: access/transam/xlog.c:5876 +#: access/transam/xlog.c:5892 #, c-format msgid "recovery stopping after abort of transaction %u, time %s" msgstr "" "восстановление останавливается после прерывания транзакции %u, время %s" -#: access/transam/xlog.c:5916 +#: access/transam/xlog.c:5932 #, c-format msgid "recovery has paused" msgstr "восстановление приостановлено" -#: access/transam/xlog.c:5917 +#: access/transam/xlog.c:5933 #, c-format msgid "Execute pg_wal_replay_resume() to continue." msgstr "Выполните pg_wal_replay_resume() для продолжения." -#: access/transam/xlog.c:6125 +#: access/transam/xlog.c:6141 #, c-format msgid "" "hot standby is not possible because %s = %d is a lower setting than on the " @@ -2504,12 +2279,12 @@ msgstr "" "режим горячего резерва невозможен, так как параметр %s = %d, меньше чем на " "главном сервере (на нём было значение %d)" -#: access/transam/xlog.c:6151 +#: access/transam/xlog.c:6167 #, c-format msgid "WAL was generated with wal_level=minimal, data may be missing" msgstr "WAL был создан с параметром wal_level=minimal, возможна потеря данных" -#: access/transam/xlog.c:6152 +#: access/transam/xlog.c:6168 #, c-format msgid "" "This happens if you temporarily set wal_level=minimal without taking a new " @@ -2518,7 +2293,7 @@ msgstr "" "Это происходит, если вы на время установили wal_level=minimal и не сделали " "резервную копию базу данных." -#: access/transam/xlog.c:6163 +#: access/transam/xlog.c:6179 #, c-format msgid "" "hot standby is not possible because wal_level was not set to \"replica\" or " @@ -2527,7 +2302,7 @@ msgstr "" "режим горячего резерва невозможен, так как на главном сервере установлен " "неподходящий wal_level (должен быть \"replica\" или выше)" -#: access/transam/xlog.c:6164 +#: access/transam/xlog.c:6180 #, c-format msgid "" "Either set wal_level to \"replica\" on the master, or turn off hot_standby " @@ -2536,32 +2311,32 @@ msgstr "" "Либо установите для wal_level значение \"replica\" на главном сервере, либо " "выключите hot_standby здесь." -#: access/transam/xlog.c:6221 +#: access/transam/xlog.c:6237 #, c-format msgid "control file contains invalid data" msgstr "файл pg_control содержит неверные данные" -#: access/transam/xlog.c:6227 +#: access/transam/xlog.c:6243 #, c-format msgid "database system was shut down at %s" msgstr "система БД была выключена: %s" -#: access/transam/xlog.c:6232 +#: access/transam/xlog.c:6248 #, c-format msgid "database system was shut down in recovery at %s" msgstr "система БД была выключена в процессе восстановления: %s" -#: access/transam/xlog.c:6236 +#: access/transam/xlog.c:6252 #, c-format msgid "database system shutdown was interrupted; last known up at %s" msgstr "выключение системы БД было прервано; последний момент работы: %s" -#: access/transam/xlog.c:6240 +#: access/transam/xlog.c:6256 #, c-format msgid "database system was interrupted while in recovery at %s" msgstr "работа системы БД была прервана во время восстановления: %s" -#: access/transam/xlog.c:6242 +#: access/transam/xlog.c:6258 #, c-format msgid "" "This probably means that some data is corrupted and you will have to use the " @@ -2570,14 +2345,14 @@ msgstr "" "Это скорее всего означает, что некоторые данные повреждены и вам придётся " "восстановить БД из последней резервной копии." -#: access/transam/xlog.c:6246 +#: access/transam/xlog.c:6262 #, c-format msgid "database system was interrupted while in recovery at log time %s" msgstr "" "работа системы БД была прервана в процессе восстановления, время в журнале: " "%s" -#: access/transam/xlog.c:6248 +#: access/transam/xlog.c:6264 #, c-format msgid "" "If this has occurred more than once some data might be corrupted and you " @@ -2586,59 +2361,59 @@ msgstr "" "Если это происходит постоянно, возможно, какие-то данные были испорчены и " "для восстановления стоит выбрать более раннюю точку." -#: access/transam/xlog.c:6252 +#: access/transam/xlog.c:6268 #, c-format msgid "database system was interrupted; last known up at %s" msgstr "работа системы БД была прервана; последний момент работы: %s" -#: access/transam/xlog.c:6308 +#: access/transam/xlog.c:6324 #, c-format msgid "entering standby mode" msgstr "переход в режим резервного сервера" -#: access/transam/xlog.c:6311 +#: access/transam/xlog.c:6327 #, c-format msgid "starting point-in-time recovery to XID %u" msgstr "начинается восстановление точки во времени до XID %u" -#: access/transam/xlog.c:6315 +#: access/transam/xlog.c:6331 #, c-format msgid "starting point-in-time recovery to %s" msgstr "начинается восстановление точки во времени до %s" -#: access/transam/xlog.c:6319 +#: access/transam/xlog.c:6335 #, c-format msgid "starting point-in-time recovery to \"%s\"" msgstr "начинается восстановление точки во времени до \"%s\"" -#: access/transam/xlog.c:6323 +#: access/transam/xlog.c:6339 #, c-format -msgid "starting point-in-time recovery to WAL position (LSN) \"%X/%X\"" +msgid "starting point-in-time recovery to WAL location (LSN) \"%X/%X\"" msgstr "" "начинается восстановление точки во времени до позиции в WAL (LSN) \"%X/%X\"" -#: access/transam/xlog.c:6328 +#: access/transam/xlog.c:6344 #, c-format msgid "starting point-in-time recovery to earliest consistent point" msgstr "" "начинается восстановление точки во времени до первой точки согласованности" -#: access/transam/xlog.c:6331 +#: access/transam/xlog.c:6347 #, c-format msgid "starting archive recovery" msgstr "начинается восстановление архива" -#: access/transam/xlog.c:6382 access/transam/xlog.c:6510 +#: access/transam/xlog.c:6398 access/transam/xlog.c:6526 #, c-format msgid "checkpoint record is at %X/%X" msgstr "запись о контрольной точке по смещению %X/%X" -#: access/transam/xlog.c:6396 +#: access/transam/xlog.c:6412 #, c-format msgid "could not find redo location referenced by checkpoint record" msgstr "не удалось найти положение REDO, указанное записью контрольной точки" -#: access/transam/xlog.c:6397 access/transam/xlog.c:6404 +#: access/transam/xlog.c:6413 access/transam/xlog.c:6420 #, c-format msgid "" "If you are not restoring from a backup, try removing the file \"%s/" @@ -2647,47 +2422,47 @@ msgstr "" "Если вы не восстанавливаете БД из резервной копии, попробуйте удалить файл " "\"%s/backup_label\"." -#: access/transam/xlog.c:6403 +#: access/transam/xlog.c:6419 #, c-format msgid "could not locate required checkpoint record" msgstr "не удалось считать нужную запись контрольной точки" -#: access/transam/xlog.c:6429 commands/tablespace.c:639 +#: access/transam/xlog.c:6445 commands/tablespace.c:639 #, c-format msgid "could not create symbolic link \"%s\": %m" msgstr "не удалось создать символическую ссылку \"%s\": %m" -#: access/transam/xlog.c:6461 access/transam/xlog.c:6467 +#: access/transam/xlog.c:6477 access/transam/xlog.c:6483 #, c-format msgid "ignoring file \"%s\" because no file \"%s\" exists" msgstr "файл \"%s\" игнорируется ввиду отсутствия файла \"%s\"" -#: access/transam/xlog.c:6463 access/transam/xlog.c:11365 +#: access/transam/xlog.c:6479 access/transam/xlog.c:11436 #, c-format msgid "File \"%s\" was renamed to \"%s\"." msgstr "Файл \"%s\" был переименован в \"%s\"." -#: access/transam/xlog.c:6469 +#: access/transam/xlog.c:6485 #, c-format msgid "Could not rename file \"%s\" to \"%s\": %m." msgstr "Не удалось переименовать файл \"%s\" в \"%s\" (%m)." -#: access/transam/xlog.c:6520 access/transam/xlog.c:6535 +#: access/transam/xlog.c:6536 access/transam/xlog.c:6551 #, c-format msgid "could not locate a valid checkpoint record" msgstr "не удалось считать правильную запись контрольной точки" -#: access/transam/xlog.c:6529 +#: access/transam/xlog.c:6545 #, c-format msgid "using previous checkpoint record at %X/%X" msgstr "используется предыдущая запись контрольной точки по смещению %X/%X" -#: access/transam/xlog.c:6573 +#: access/transam/xlog.c:6589 #, c-format msgid "requested timeline %u is not a child of this server's history" msgstr "в истории сервера нет ответвления запрошенной линии времени %u" -#: access/transam/xlog.c:6575 +#: access/transam/xlog.c:6591 #, c-format msgid "" "Latest checkpoint is at %X/%X on timeline %u, but in the history of the " @@ -2696,7 +2471,7 @@ msgstr "" "Последняя контрольная точка: %X/%X на линии времени %u, но в истории " "запрошенной линии времени сервер ответвился с этой линии в %X/%X." -#: access/transam/xlog.c:6591 +#: access/transam/xlog.c:6607 #, c-format msgid "" "requested timeline %u does not contain minimum recovery point %X/%X on " @@ -2705,22 +2480,22 @@ msgstr "" "запрошенная линия времени %u не содержит минимальную точку восстановления %X/" "%X на линии времени %u" -#: access/transam/xlog.c:6622 +#: access/transam/xlog.c:6638 #, c-format msgid "invalid next transaction ID" msgstr "неверный ID следующей транзакции" -#: access/transam/xlog.c:6706 +#: access/transam/xlog.c:6732 #, c-format msgid "invalid redo in checkpoint record" msgstr "неверная запись REDO в контрольной точке" -#: access/transam/xlog.c:6717 +#: access/transam/xlog.c:6743 #, c-format msgid "invalid redo record in shutdown checkpoint" msgstr "неверная запись REDO в контрольной точке выключения" -#: access/transam/xlog.c:6745 +#: access/transam/xlog.c:6771 #, c-format msgid "" "database system was not properly shut down; automatic recovery in progress" @@ -2728,19 +2503,19 @@ msgstr "" "система БД была остановлена нештатно; производится автоматическое " "восстановление" -#: access/transam/xlog.c:6749 +#: access/transam/xlog.c:6775 #, c-format msgid "crash recovery starts in timeline %u and has target timeline %u" msgstr "" "восстановление после сбоя начинается на линии времени %u, целевая линия " "времени: %u" -#: access/transam/xlog.c:6793 +#: access/transam/xlog.c:6819 #, c-format msgid "backup_label contains data inconsistent with control file" msgstr "backup_label содержит данные, не согласованные с файлом pg_control" -#: access/transam/xlog.c:6794 +#: access/transam/xlog.c:6820 #, c-format msgid "" "This means that the backup is corrupted and you will have to use another " @@ -2749,44 +2524,44 @@ msgstr "" "Это означает, что резервная копия повреждена и для восстановления БД " "придётся использовать другую копию." -#: access/transam/xlog.c:6868 +#: access/transam/xlog.c:6894 #, c-format msgid "initializing for hot standby" msgstr "инициализация для горячего резерва" -#: access/transam/xlog.c:7000 +#: access/transam/xlog.c:7026 #, c-format msgid "redo starts at %X/%X" msgstr "запись REDO начинается со смещения %X/%X" -#: access/transam/xlog.c:7234 +#: access/transam/xlog.c:7260 #, c-format msgid "requested recovery stop point is before consistent recovery point" msgstr "" "запрошенная точка остановки восстановления предшествует согласованной точке " "восстановления" -#: access/transam/xlog.c:7272 +#: access/transam/xlog.c:7298 #, c-format msgid "redo done at %X/%X" msgstr "записи REDO обработаны до смещения %X/%X" -#: access/transam/xlog.c:7277 access/transam/xlog.c:9280 +#: access/transam/xlog.c:7303 access/transam/xlog.c:9317 #, c-format msgid "last completed transaction was at log time %s" msgstr "последняя завершённая транзакция была выполнена в %s" -#: access/transam/xlog.c:7286 +#: access/transam/xlog.c:7312 #, c-format msgid "redo is not required" msgstr "данные REDO не требуются" -#: access/transam/xlog.c:7361 access/transam/xlog.c:7365 +#: access/transam/xlog.c:7387 access/transam/xlog.c:7391 #, c-format msgid "WAL ends before end of online backup" msgstr "WAL закончился без признака окончания копирования" -#: access/transam/xlog.c:7362 +#: access/transam/xlog.c:7388 #, c-format msgid "" "All WAL generated while online backup was taken must be available at " @@ -2795,7 +2570,7 @@ msgstr "" "Все журналы WAL, созданные во время резервного копирования \"на ходу\", " "должны быть в наличии для восстановления." -#: access/transam/xlog.c:7366 +#: access/transam/xlog.c:7392 #, c-format msgid "" "Online backup started with pg_start_backup() must be ended with " @@ -2805,137 +2580,137 @@ msgstr "" "должно закончиться pg_stop_backup(), и для восстановления должны быть " "доступны все журналы WAL." -#: access/transam/xlog.c:7369 +#: access/transam/xlog.c:7395 #, c-format msgid "WAL ends before consistent recovery point" msgstr "WAL закончился до согласованной точки восстановления" -#: access/transam/xlog.c:7396 +#: access/transam/xlog.c:7422 #, c-format msgid "selected new timeline ID: %u" msgstr "выбранный ID новой линии времени: %u" -#: access/transam/xlog.c:7825 +#: access/transam/xlog.c:7851 #, c-format msgid "consistent recovery state reached at %X/%X" msgstr "согласованное состояние восстановления достигнуто по смещению %X/%X" -#: access/transam/xlog.c:8017 +#: access/transam/xlog.c:8043 #, c-format msgid "invalid primary checkpoint link in control file" msgstr "неверная ссылка на первичную контрольную точку в файле pg_control" -#: access/transam/xlog.c:8021 +#: access/transam/xlog.c:8047 #, c-format msgid "invalid secondary checkpoint link in control file" msgstr "неверная ссылка на вторичную контрольную точку в файле pg_control" -#: access/transam/xlog.c:8025 +#: access/transam/xlog.c:8051 #, c-format msgid "invalid checkpoint link in backup_label file" msgstr "неверная ссылка на контрольную точку в файле backup_label" -#: access/transam/xlog.c:8042 +#: access/transam/xlog.c:8068 #, c-format msgid "invalid primary checkpoint record" msgstr "неверная запись первичной контрольной точки" -#: access/transam/xlog.c:8046 +#: access/transam/xlog.c:8072 #, c-format msgid "invalid secondary checkpoint record" msgstr "неверная запись вторичной контрольной точки" -#: access/transam/xlog.c:8050 +#: access/transam/xlog.c:8076 #, c-format msgid "invalid checkpoint record" msgstr "неверная запись контрольной точки" -#: access/transam/xlog.c:8061 +#: access/transam/xlog.c:8087 #, c-format msgid "invalid resource manager ID in primary checkpoint record" msgstr "неверный ID менеджера ресурсов в записи первичной контрольной точки" -#: access/transam/xlog.c:8065 +#: access/transam/xlog.c:8091 #, c-format msgid "invalid resource manager ID in secondary checkpoint record" msgstr "неверный ID менеджера ресурсов в записи вторичной контрольной точки" -#: access/transam/xlog.c:8069 +#: access/transam/xlog.c:8095 #, c-format msgid "invalid resource manager ID in checkpoint record" msgstr "неверный ID менеджера ресурсов в записи контрольной точки" -#: access/transam/xlog.c:8082 +#: access/transam/xlog.c:8108 #, c-format msgid "invalid xl_info in primary checkpoint record" msgstr "неверные флаги xl_info в записи первичной контрольной точки" -#: access/transam/xlog.c:8086 +#: access/transam/xlog.c:8112 #, c-format msgid "invalid xl_info in secondary checkpoint record" msgstr "неверные флаги xl_info в записи вторичной контрольной точки" -#: access/transam/xlog.c:8090 +#: access/transam/xlog.c:8116 #, c-format msgid "invalid xl_info in checkpoint record" msgstr "неверные флаги xl_info в записи контрольной точки" -#: access/transam/xlog.c:8101 +#: access/transam/xlog.c:8127 #, c-format msgid "invalid length of primary checkpoint record" msgstr "неверная длина записи первичной контрольной точки" -#: access/transam/xlog.c:8105 +#: access/transam/xlog.c:8131 #, c-format msgid "invalid length of secondary checkpoint record" msgstr "неверная длина записи вторичной контрольной точки" -#: access/transam/xlog.c:8109 +#: access/transam/xlog.c:8135 #, c-format msgid "invalid length of checkpoint record" msgstr "неверная длина записи контрольной точки" -#: access/transam/xlog.c:8312 +#: access/transam/xlog.c:8338 #, c-format msgid "shutting down" msgstr "выключение" -#: access/transam/xlog.c:8620 +#: access/transam/xlog.c:8657 #, c-format -msgid "checkpoint skipped due to an idle system" +msgid "checkpoint skipped because system is idle" msgstr "контрольная точка пропущена ввиду простоя системы" -#: access/transam/xlog.c:8825 +#: access/transam/xlog.c:8862 #, c-format msgid "" -"concurrent transaction log activity while database system is shutting down" +"concurrent write-ahead log activity while database system is shutting down" msgstr "" "во время выключения системы баз данных отмечена активность в журнале " -"транзакций" +"предзаписи" -#: access/transam/xlog.c:9079 +#: access/transam/xlog.c:9116 #, c-format msgid "skipping restartpoint, recovery has already ended" msgstr "" "создание точки перезапуска пропускается, восстановление уже закончилось" -#: access/transam/xlog.c:9102 +#: access/transam/xlog.c:9139 #, c-format msgid "skipping restartpoint, already performed at %X/%X" msgstr "" "создание точки перезапуска пропускается, она уже создана по смещению %X/%X" -#: access/transam/xlog.c:9278 +#: access/transam/xlog.c:9315 #, c-format msgid "recovery restart point at %X/%X" msgstr "точка перезапуска восстановления по смещению %X/%X" -#: access/transam/xlog.c:9414 +#: access/transam/xlog.c:9451 #, c-format msgid "restore point \"%s\" created at %X/%X" msgstr "точка восстановления \"%s\" создана по смещению %X/%X" -#: access/transam/xlog.c:9544 +#: access/transam/xlog.c:9581 #, c-format msgid "" "unexpected previous timeline ID %u (current timeline ID %u) in checkpoint " @@ -2944,13 +2719,13 @@ msgstr "" "неожиданный ID предыдущей линии времени %u (ID текущей линии времени %u) в " "записи контрольной точки" -#: access/transam/xlog.c:9553 +#: access/transam/xlog.c:9590 #, c-format msgid "unexpected timeline ID %u (after %u) in checkpoint record" msgstr "неожиданный ID линии времени %u (после %u) в записи контрольной точки" # skip-rule: capital-letter-first -#: access/transam/xlog.c:9569 +#: access/transam/xlog.c:9606 #, c-format msgid "" "unexpected timeline ID %u in checkpoint record, before reaching minimum " @@ -2959,79 +2734,79 @@ msgstr "" "неожиданный ID линии времени %u в записи контрольной точки, до достижения " "минимальной к. т. %X/%X на линии времени %u" -#: access/transam/xlog.c:9644 +#: access/transam/xlog.c:9682 #, c-format msgid "online backup was canceled, recovery cannot continue" msgstr "" "резервное копирование \"на ходу\" было отменено, продолжить восстановление " "нельзя" -#: access/transam/xlog.c:9700 access/transam/xlog.c:9747 -#: access/transam/xlog.c:9770 +#: access/transam/xlog.c:9738 access/transam/xlog.c:9785 +#: access/transam/xlog.c:9808 #, c-format msgid "unexpected timeline ID %u (should be %u) in checkpoint record" msgstr "" "неожиданный ID линии времени %u (должен быть %u) в записи точки " "восстановления" -#: access/transam/xlog.c:10046 +#: access/transam/xlog.c:10084 #, c-format msgid "could not fsync log segment %s: %m" msgstr "не удалось синхронизировать с ФС сегмент журнала %s: %m" -#: access/transam/xlog.c:10071 +#: access/transam/xlog.c:10109 #, c-format msgid "could not fsync log file %s: %m" msgstr "не удалось синхронизировать с ФС файл журнала %s: %m" -#: access/transam/xlog.c:10079 +#: access/transam/xlog.c:10117 #, c-format msgid "could not fsync write-through log file %s: %m" msgstr "не удалось синхронизировать с ФС файл журнала сквозной записи %s: %m" -#: access/transam/xlog.c:10088 +#: access/transam/xlog.c:10126 #, c-format msgid "could not fdatasync log file %s: %m" msgstr "" "не удалось синхронизировать с ФС данные (fdatasync) файла журнала %s: %m" -#: access/transam/xlog.c:10179 access/transam/xlog.c:10696 -#: access/transam/xlogfuncs.c:296 access/transam/xlogfuncs.c:323 -#: access/transam/xlogfuncs.c:362 access/transam/xlogfuncs.c:383 -#: access/transam/xlogfuncs.c:404 +#: access/transam/xlog.c:10217 access/transam/xlog.c:10742 +#: access/transam/xlogfuncs.c:297 access/transam/xlogfuncs.c:324 +#: access/transam/xlogfuncs.c:363 access/transam/xlogfuncs.c:384 +#: access/transam/xlogfuncs.c:405 #, c-format msgid "WAL control functions cannot be executed during recovery." msgstr "Функции управления WAL нельзя использовать в процессе восстановления." -#: access/transam/xlog.c:10188 access/transam/xlog.c:10705 +#: access/transam/xlog.c:10226 access/transam/xlog.c:10751 #, c-format msgid "WAL level not sufficient for making an online backup" msgstr "" "Выбранный уровень WAL недостаточен для резервного копирования \"на ходу\"" -#: access/transam/xlog.c:10189 access/transam/xlog.c:10706 -#: access/transam/xlogfuncs.c:329 +#: access/transam/xlog.c:10227 access/transam/xlog.c:10752 +#: access/transam/xlogfuncs.c:330 #, c-format msgid "wal_level must be set to \"replica\" or \"logical\" at server start." msgstr "Установите wal_level \"replica\" или \"logical\" при запуске сервера." -#: access/transam/xlog.c:10194 +#: access/transam/xlog.c:10232 #, c-format msgid "backup label too long (max %d bytes)" msgstr "длина метки резервной копии превышает предел (%d байт)" -#: access/transam/xlog.c:10231 access/transam/xlog.c:10503 -#: access/transam/xlog.c:10541 +#: access/transam/xlog.c:10269 access/transam/xlog.c:10542 +#: access/transam/xlog.c:10580 #, c-format msgid "a backup is already in progress" msgstr "резервное копирование уже выполняется" -#: access/transam/xlog.c:10232 +#: access/transam/xlog.c:10270 #, c-format msgid "Run pg_stop_backup() and try again." msgstr "Выполните pg_stop_backup() и повторите операцию." -#: access/transam/xlog.c:10327 +#: access/transam/xlog.c:10365 #, c-format msgid "" "WAL generated with full_page_writes=off was replayed since last restartpoint" @@ -3039,7 +2814,7 @@ msgstr "" "После последней точки перезапуска был воспроизведён WAL, созданный в режиме " "full_page_writes=off." -#: access/transam/xlog.c:10329 access/transam/xlog.c:10886 +#: access/transam/xlog.c:10367 access/transam/xlog.c:10947 #, c-format msgid "" "This means that the backup being taken on the standby is corrupt and should " @@ -3051,39 +2826,39 @@ msgstr "" "CHECKPOINT на главном сервере, а затем попробуйте резервное копирование \"на " "ходу\" ещё раз." -#: access/transam/xlog.c:10396 replication/basebackup.c:1096 +#: access/transam/xlog.c:10434 replication/basebackup.c:1097 #: utils/adt/misc.c:497 #, c-format msgid "could not read symbolic link \"%s\": %m" msgstr "не удалось прочитать символическую ссылку \"%s\": %m" -#: access/transam/xlog.c:10403 replication/basebackup.c:1101 +#: access/transam/xlog.c:10441 replication/basebackup.c:1102 #: utils/adt/misc.c:502 #, c-format msgid "symbolic link \"%s\" target is too long" msgstr "целевой путь символической ссылки \"%s\" слишком длинный" -#: access/transam/xlog.c:10456 commands/tablespace.c:389 -#: commands/tablespace.c:551 replication/basebackup.c:1116 utils/adt/misc.c:510 +#: access/transam/xlog.c:10494 commands/tablespace.c:389 +#: commands/tablespace.c:551 replication/basebackup.c:1117 utils/adt/misc.c:510 #, c-format msgid "tablespaces are not supported on this platform" msgstr "табличные пространства не поддерживаются на этой платформе" -#: access/transam/xlog.c:10497 access/transam/xlog.c:10535 -#: access/transam/xlog.c:10744 access/transam/xlogarchive.c:105 -#: access/transam/xlogarchive.c:264 commands/copy.c:1872 commands/copy.c:3052 -#: commands/extension.c:3314 commands/tablespace.c:780 -#: commands/tablespace.c:871 replication/basebackup.c:480 -#: replication/basebackup.c:548 replication/logical/snapbuild.c:1509 -#: storage/file/copydir.c:72 storage/file/copydir.c:115 storage/file/fd.c:2954 -#: storage/file/fd.c:3046 utils/adt/dbsize.c:70 utils/adt/dbsize.c:227 +#: access/transam/xlog.c:10536 access/transam/xlog.c:10574 +#: access/transam/xlog.c:10790 access/transam/xlogarchive.c:105 +#: access/transam/xlogarchive.c:264 commands/copy.c:1853 commands/copy.c:3155 +#: commands/extension.c:3319 commands/tablespace.c:780 +#: commands/tablespace.c:871 replication/basebackup.c:481 +#: replication/basebackup.c:549 replication/logical/snapbuild.c:1518 +#: storage/file/copydir.c:72 storage/file/copydir.c:115 storage/file/fd.c:2976 +#: storage/file/fd.c:3068 utils/adt/dbsize.c:70 utils/adt/dbsize.c:227 #: utils/adt/dbsize.c:307 utils/adt/genfile.c:115 utils/adt/genfile.c:334 -#: guc-file.l:1001 +#: guc-file.l:1002 #, c-format msgid "could not stat file \"%s\": %m" msgstr "не удалось получить информацию о файле \"%s\": %m" -#: access/transam/xlog.c:10504 access/transam/xlog.c:10542 +#: access/transam/xlog.c:10543 access/transam/xlog.c:10581 #, c-format msgid "" "If you're sure there is no backup in progress, remove file \"%s\" and try " @@ -3092,37 +2867,37 @@ msgstr "" "Если вы считаете, что информация о резервном копировании неверна, удалите " "файл \"%s\" и попробуйте снова." -#: access/transam/xlog.c:10521 access/transam/xlog.c:10559 -#: access/transam/xlog.c:10947 postmaster/syslogger.c:1391 +#: access/transam/xlog.c:10560 access/transam/xlog.c:10598 +#: access/transam/xlog.c:11005 postmaster/syslogger.c:1391 #: postmaster/syslogger.c:1404 #, c-format msgid "could not write file \"%s\": %m" msgstr "не удалось записать файл \"%s\": %m" -#: access/transam/xlog.c:10721 +#: access/transam/xlog.c:10767 #, c-format msgid "exclusive backup not in progress" msgstr "монопольное резервное копирование не выполняется" -#: access/transam/xlog.c:10748 +#: access/transam/xlog.c:10794 #, c-format msgid "a backup is not in progress" msgstr "резервное копирование не выполняется" -#: access/transam/xlog.c:10821 access/transam/xlog.c:10834 -#: access/transam/xlog.c:11175 access/transam/xlog.c:11181 -#: access/transam/xlog.c:11265 access/transam/xlogfuncs.c:697 +#: access/transam/xlog.c:10880 access/transam/xlog.c:10893 +#: access/transam/xlog.c:11246 access/transam/xlog.c:11252 +#: access/transam/xlog.c:11336 access/transam/xlogfuncs.c:698 #, c-format msgid "invalid data in file \"%s\"" msgstr "неверные данные в файле \"%s\"" -#: access/transam/xlog.c:10838 replication/basebackup.c:994 +#: access/transam/xlog.c:10897 replication/basebackup.c:995 #, c-format msgid "the standby was promoted during online backup" msgstr "" "дежурный сервер был повышен в процессе резервного копирования \"на ходу\"" -#: access/transam/xlog.c:10839 replication/basebackup.c:995 +#: access/transam/xlog.c:10898 replication/basebackup.c:996 #, c-format msgid "" "This means that the backup being taken is corrupt and should not be used. " @@ -3131,7 +2906,7 @@ msgstr "" "Это означает, что создаваемая резервная копия испорчена и использовать её не " "следует. Попробуйте резервное копирование \"на ходу\" ещё раз." -#: access/transam/xlog.c:10884 +#: access/transam/xlog.c:10945 #, c-format msgid "" "WAL generated with full_page_writes=off was replayed during online backup" @@ -3139,7 +2914,7 @@ msgstr "" "В процессе резервного копирования \"на ходу\" был воспроизведён WAL, " "созданный в режиме full_page_writes=off" -#: access/transam/xlog.c:10997 +#: access/transam/xlog.c:11060 #, c-format msgid "" "pg_stop_backup cleanup done, waiting for required WAL segments to be archived" @@ -3147,7 +2922,7 @@ msgstr "" "очистка в pg_stop_backup выполнена, ожидаются требуемые сегменты WAL для " "архивации" -#: access/transam/xlog.c:11007 +#: access/transam/xlog.c:11070 #, c-format msgid "" "pg_stop_backup still waiting for all required WAL segments to be archived " @@ -3156,7 +2931,7 @@ msgstr "" "pg_stop_backup всё ещё ждёт все требуемые сегменты WAL для архивации (прошло " "%d сек.)" -#: access/transam/xlog.c:11009 +#: access/transam/xlog.c:11072 #, c-format msgid "" "Check that your archive_command is executing properly. pg_stop_backup can " @@ -3167,13 +2942,13 @@ msgstr "" "можно отменить безопасно, но резервная копия базы данных будет непригодна " "без всех сегментов WAL." -#: access/transam/xlog.c:11016 +#: access/transam/xlog.c:11079 #, c-format msgid "pg_stop_backup complete, all required WAL segments have been archived" msgstr "" "команда pg_stop_backup завершена, все требуемые сегменты WAL заархивированы" -#: access/transam/xlog.c:11020 +#: access/transam/xlog.c:11083 #, c-format msgid "" "WAL archiving is not enabled; you must ensure that all required WAL segments " @@ -3183,35 +2958,35 @@ msgstr "" "сегментов WAL другими средствами для получения резервной копии" #. translator: %s is a WAL record description -#: access/transam/xlog.c:11305 +#: access/transam/xlog.c:11376 #, c-format msgid "WAL redo at %X/%X for %s" msgstr "запись REDO в WAL в позиции %X/%X для %s" -#: access/transam/xlog.c:11354 +#: access/transam/xlog.c:11425 #, c-format msgid "online backup mode was not canceled" msgstr "режим копирования \"на ходу\" не был отменён" -#: access/transam/xlog.c:11355 +#: access/transam/xlog.c:11426 #, c-format msgid "File \"%s\" could not be renamed to \"%s\": %m." msgstr "Не удалось переименовать файл \"%s\" в \"%s\": %m." -#: access/transam/xlog.c:11364 access/transam/xlog.c:11376 -#: access/transam/xlog.c:11386 +#: access/transam/xlog.c:11435 access/transam/xlog.c:11447 +#: access/transam/xlog.c:11457 #, c-format msgid "online backup mode canceled" msgstr "режим копирования \"на ходу\" отменён" -#: access/transam/xlog.c:11377 +#: access/transam/xlog.c:11448 #, c-format msgid "" "Files \"%s\" and \"%s\" were renamed to \"%s\" and \"%s\", respectively." msgstr "" "Файлы \"%s\" и \"%s\" были переименованы в \"%s\" и \"%s\", соответственно." -#: access/transam/xlog.c:11387 +#: access/transam/xlog.c:11458 #, c-format msgid "" "File \"%s\" was renamed to \"%s\", but file \"%s\" could not be renamed to " @@ -3220,28 +2995,28 @@ msgstr "" "Файл \"%s\" был переименован в \"%s\", но переименовать \"%s\" в \"%s\" не " "удалось: %m." -#: access/transam/xlog.c:11509 access/transam/xlogutils.c:724 -#: replication/walreceiver.c:1006 replication/walsender.c:2326 +#: access/transam/xlog.c:11580 access/transam/xlogutils.c:724 +#: replication/walreceiver.c:1011 replication/walsender.c:2412 #, c-format msgid "could not seek in log segment %s to offset %u: %m" msgstr "не удалось переместиться в сегменте журнала %s к смещению %u: %m" -#: access/transam/xlog.c:11523 +#: access/transam/xlog.c:11594 #, c-format msgid "could not read from log segment %s, offset %u: %m" msgstr "не удалось прочитать сегмент журнала %s, смещение %u: %m" -#: access/transam/xlog.c:12012 +#: access/transam/xlog.c:12083 #, c-format msgid "received promote request" msgstr "получен запрос повышения статуса" -#: access/transam/xlog.c:12025 +#: access/transam/xlog.c:12096 #, c-format msgid "trigger file found: %s" msgstr "найден файл триггера: %s" -#: access/transam/xlog.c:12034 +#: access/transam/xlog.c:12105 #, c-format msgid "could not stat trigger file \"%s\": %m" msgstr "не удалось получить информацию о файле триггера \"%s\": %m" @@ -3270,9 +3045,9 @@ msgid "%s \"%s\": %s" msgstr "%s \"%s\": %s" #: access/transam/xlogarchive.c:457 postmaster/syslogger.c:1415 -#: replication/logical/snapbuild.c:1641 replication/slot.c:518 -#: replication/slot.c:1118 replication/slot.c:1232 storage/file/fd.c:642 -#: storage/file/fd.c:737 utils/time/snapmgr.c:1298 +#: replication/logical/snapbuild.c:1645 replication/slot.c:590 +#: replication/slot.c:1190 replication/slot.c:1304 storage/file/fd.c:641 +#: storage/file/fd.c:736 utils/time/snapmgr.c:1318 #, c-format msgid "could not rename file \"%s\" to \"%s\": %m" msgstr "не удалось переименовать файл \"%s\" в \"%s\": %m" @@ -3300,94 +3075,94 @@ msgid "a backup is already in progress in this session" msgstr "резервное копирование уже выполняется в этом сеансе" #: access/transam/xlogfuncs.c:92 commands/tablespace.c:703 -#: commands/tablespace.c:713 postmaster/postmaster.c:1434 -#: replication/basebackup.c:368 replication/basebackup.c:708 -#: storage/file/copydir.c:53 storage/file/copydir.c:96 storage/file/fd.c:2420 -#: storage/file/fd.c:3019 storage/ipc/dsm.c:301 utils/adt/genfile.c:440 +#: commands/tablespace.c:713 postmaster/postmaster.c:1460 +#: replication/basebackup.c:369 replication/basebackup.c:709 +#: storage/file/copydir.c:53 storage/file/copydir.c:96 storage/file/fd.c:2434 +#: storage/file/fd.c:3041 storage/ipc/dsm.c:301 utils/adt/genfile.c:440 #: utils/adt/misc.c:410 utils/misc/tzparser.c:339 #, c-format msgid "could not open directory \"%s\": %m" msgstr "не удалось открыть каталог \"%s\": %m" -#: access/transam/xlogfuncs.c:152 access/transam/xlogfuncs.c:233 +#: access/transam/xlogfuncs.c:152 access/transam/xlogfuncs.c:234 #, c-format msgid "non-exclusive backup in progress" msgstr "выполняется не монопольное резервное копирование" -#: access/transam/xlogfuncs.c:153 access/transam/xlogfuncs.c:234 +#: access/transam/xlogfuncs.c:153 access/transam/xlogfuncs.c:235 #, c-format msgid "Did you mean to use pg_stop_backup('f')?" msgstr "Вероятно, подразумевалось pg_stop_backup('f')?" -#: access/transam/xlogfuncs.c:204 commands/event_trigger.c:1453 -#: commands/event_trigger.c:2004 commands/extension.c:1891 -#: commands/extension.c:2000 commands/extension.c:2224 commands/prepare.c:721 -#: executor/execExpr.c:2088 executor/execSRF.c:686 executor/functions.c:1028 -#: foreign/foreign.c:488 libpq/hba.c:2560 replication/logical/launcher.c:762 -#: replication/logical/logicalfuncs.c:176 replication/logical/origin.c:1384 -#: replication/slotfuncs.c:196 replication/walsender.c:3019 -#: utils/adt/jsonfuncs.c:1515 utils/adt/jsonfuncs.c:1645 -#: utils/adt/jsonfuncs.c:1833 utils/adt/jsonfuncs.c:1960 -#: utils/adt/jsonfuncs.c:2726 utils/adt/pgstatfuncs.c:456 -#: utils/adt/pgstatfuncs.c:557 utils/fmgr/funcapi.c:62 utils/misc/guc.c:8520 -#: utils/mmgr/portalmem.c:1053 +#: access/transam/xlogfuncs.c:205 commands/event_trigger.c:1471 +#: commands/event_trigger.c:2022 commands/extension.c:1895 +#: commands/extension.c:2004 commands/extension.c:2228 commands/prepare.c:721 +#: executor/execExpr.c:2121 executor/execSRF.c:690 executor/functions.c:1029 +#: foreign/foreign.c:488 libpq/hba.c:2565 replication/logical/launcher.c:1026 +#: replication/logical/logicalfuncs.c:176 replication/logical/origin.c:1443 +#: replication/slotfuncs.c:197 replication/walsender.c:3181 +#: utils/adt/jsonfuncs.c:1689 utils/adt/jsonfuncs.c:1819 +#: utils/adt/jsonfuncs.c:2007 utils/adt/jsonfuncs.c:2134 +#: utils/adt/jsonfuncs.c:3489 utils/adt/pgstatfuncs.c:456 +#: utils/adt/pgstatfuncs.c:557 utils/fmgr/funcapi.c:62 utils/misc/guc.c:8549 +#: utils/mmgr/portalmem.c:1067 #, c-format msgid "set-valued function called in context that cannot accept a set" msgstr "" "функция, возвращающая множество, вызвана в контексте, где ему нет места" -#: access/transam/xlogfuncs.c:208 commands/event_trigger.c:1457 -#: commands/event_trigger.c:2008 commands/extension.c:1895 -#: commands/extension.c:2004 commands/extension.c:2228 commands/prepare.c:725 -#: foreign/foreign.c:493 libpq/hba.c:2564 replication/logical/launcher.c:766 -#: replication/logical/logicalfuncs.c:180 replication/logical/origin.c:1388 -#: replication/slotfuncs.c:200 replication/walsender.c:3023 +#: access/transam/xlogfuncs.c:209 commands/event_trigger.c:1475 +#: commands/event_trigger.c:2026 commands/extension.c:1899 +#: commands/extension.c:2008 commands/extension.c:2232 commands/prepare.c:725 +#: foreign/foreign.c:493 libpq/hba.c:2569 replication/logical/launcher.c:1030 +#: replication/logical/logicalfuncs.c:180 replication/logical/origin.c:1447 +#: replication/slotfuncs.c:201 replication/walsender.c:3185 #: utils/adt/pgstatfuncs.c:460 utils/adt/pgstatfuncs.c:561 -#: utils/misc/guc.c:8524 utils/misc/pg_config.c:44 utils/mmgr/portalmem.c:1057 +#: utils/misc/guc.c:8553 utils/misc/pg_config.c:44 utils/mmgr/portalmem.c:1071 #, c-format msgid "materialize mode required, but it is not allowed in this context" msgstr "требуется режим материализации, но он недопустим в этом контексте" -#: access/transam/xlogfuncs.c:250 +#: access/transam/xlogfuncs.c:251 #, c-format msgid "non-exclusive backup is not in progress" msgstr "немонопольное резервное копирование не выполняется" -#: access/transam/xlogfuncs.c:251 +#: access/transam/xlogfuncs.c:252 #, c-format msgid "Did you mean to use pg_stop_backup('t')?" msgstr "Вероятно, подразумевалось pg_stop_backup('t')?" -#: access/transam/xlogfuncs.c:328 +#: access/transam/xlogfuncs.c:329 #, c-format msgid "WAL level not sufficient for creating a restore point" msgstr "Выбранный уровень WAL не достаточен для создания точки восстановления" # well-spelled: симв -#: access/transam/xlogfuncs.c:336 +#: access/transam/xlogfuncs.c:337 #, c-format msgid "value too long for restore point (maximum %d characters)" msgstr "значение для точки восстановления превышает предел (%d симв.)" -#: access/transam/xlogfuncs.c:474 +#: access/transam/xlogfuncs.c:475 #, c-format msgid "pg_walfile_name_offset() cannot be executed during recovery." msgstr "" "Функцию pg_walfile_name_offset() нельзя вызывать во время восстановления." -#: access/transam/xlogfuncs.c:530 +#: access/transam/xlogfuncs.c:531 #, c-format msgid "pg_walfile_name() cannot be executed during recovery." msgstr "Функцию pg_walfile_name() нельзя вызывать в процессе восстановления." -#: access/transam/xlogfuncs.c:550 access/transam/xlogfuncs.c:570 -#: access/transam/xlogfuncs.c:587 +#: access/transam/xlogfuncs.c:551 access/transam/xlogfuncs.c:571 +#: access/transam/xlogfuncs.c:588 #, c-format msgid "recovery is not in progress" msgstr "восстановление не выполняется" -#: access/transam/xlogfuncs.c:551 access/transam/xlogfuncs.c:571 -#: access/transam/xlogfuncs.c:588 +#: access/transam/xlogfuncs.c:552 access/transam/xlogfuncs.c:572 +#: access/transam/xlogfuncs.c:589 #, c-format msgid "Recovery control functions can only be executed during recovery." msgstr "" @@ -3550,38 +3325,38 @@ msgstr "" msgid "invalid block_id %u at %X/%X" msgstr "неверный идентификатор блока %u в позиции %X/%X" -#: access/transam/xlogreader.c:1291 +#: access/transam/xlogreader.c:1306 #, c-format msgid "record with invalid length at %X/%X" msgstr "запись с неверной длиной в позиции %X/%X" -#: access/transam/xlogreader.c:1380 +#: access/transam/xlogreader.c:1395 #, c-format msgid "invalid compressed image at %X/%X, block %d" msgstr "неверный сжатый образ в позиции %X/%X, блок %d" -#: access/transam/xlogutils.c:747 replication/walsender.c:2345 +#: access/transam/xlogutils.c:747 replication/walsender.c:2431 #, c-format msgid "could not read from log segment %s, offset %u, length %lu: %m" msgstr "не удалось прочитать сегмент журнала %s (смещение %u, длина %lu): %m" -#: bootstrap/bootstrap.c:271 postmaster/postmaster.c:801 tcop/postgres.c:3495 +#: bootstrap/bootstrap.c:272 postmaster/postmaster.c:821 tcop/postgres.c:3508 #, c-format msgid "--%s requires a value" msgstr "для --%s требуется значение" -#: bootstrap/bootstrap.c:276 postmaster/postmaster.c:806 tcop/postgres.c:3500 +#: bootstrap/bootstrap.c:277 postmaster/postmaster.c:826 tcop/postgres.c:3513 #, c-format msgid "-c %s requires a value" msgstr "для -c %s требуется значение" -#: bootstrap/bootstrap.c:287 postmaster/postmaster.c:818 -#: postmaster/postmaster.c:831 +#: bootstrap/bootstrap.c:288 postmaster/postmaster.c:838 +#: postmaster/postmaster.c:851 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Для дополнительной информации попробуйте \"%s --help\".\n" -#: bootstrap/bootstrap.c:296 +#: bootstrap/bootstrap.c:297 #, c-format msgid "%s: invalid command-line arguments\n" msgstr "%s: неверные аргументы командной строки\n" @@ -3699,54 +3474,51 @@ msgid "column privileges are only valid for relations" msgstr "права для столбцов применимы только к отношениям" #: catalog/aclchk.c:696 catalog/aclchk.c:3926 catalog/aclchk.c:4708 -#: catalog/objectaddress.c:929 catalog/pg_largeobject.c:111 +#: catalog/objectaddress.c:928 catalog/pg_largeobject.c:111 #: storage/large_object/inv_api.c:291 #, c-format msgid "large object %u does not exist" msgstr "большой объект %u не существует" -#: catalog/aclchk.c:885 catalog/aclchk.c:894 commands/collationcmds.c:106 -#: commands/copy.c:1046 commands/copy.c:1066 commands/copy.c:1075 -#: commands/copy.c:1084 commands/copy.c:1093 commands/copy.c:1102 -#: commands/copy.c:1111 commands/copy.c:1120 commands/copy.c:1129 -#: commands/copy.c:1147 commands/copy.c:1163 commands/copy.c:1183 -#: commands/copy.c:1200 commands/dbcommands.c:155 commands/dbcommands.c:164 +#: catalog/aclchk.c:885 catalog/aclchk.c:894 commands/collationcmds.c:114 +#: commands/copy.c:1042 commands/copy.c:1062 commands/copy.c:1071 +#: commands/copy.c:1080 commands/copy.c:1089 commands/copy.c:1098 +#: commands/copy.c:1107 commands/copy.c:1116 commands/copy.c:1125 +#: commands/copy.c:1143 commands/copy.c:1159 commands/copy.c:1179 +#: commands/copy.c:1196 commands/dbcommands.c:155 commands/dbcommands.c:164 #: commands/dbcommands.c:173 commands/dbcommands.c:182 #: commands/dbcommands.c:191 commands/dbcommands.c:200 #: commands/dbcommands.c:209 commands/dbcommands.c:218 #: commands/dbcommands.c:227 commands/dbcommands.c:1427 #: commands/dbcommands.c:1436 commands/dbcommands.c:1445 -#: commands/dbcommands.c:1454 commands/extension.c:1674 -#: commands/extension.c:1684 commands/extension.c:1694 -#: commands/extension.c:1704 commands/extension.c:2944 +#: commands/dbcommands.c:1454 commands/extension.c:1678 +#: commands/extension.c:1688 commands/extension.c:1698 +#: commands/extension.c:1708 commands/extension.c:2949 #: commands/foreigncmds.c:537 commands/foreigncmds.c:546 #: commands/functioncmds.c:526 commands/functioncmds.c:643 #: commands/functioncmds.c:652 commands/functioncmds.c:661 -#: commands/functioncmds.c:670 commands/functioncmds.c:2076 -#: commands/functioncmds.c:2084 commands/publicationcmds.c:89 -#: commands/publicationcmds.c:99 commands/publicationcmds.c:109 -#: commands/publicationcmds.c:119 commands/publicationcmds.c:129 -#: commands/publicationcmds.c:139 commands/sequence.c:1247 -#: commands/sequence.c:1256 commands/sequence.c:1265 commands/sequence.c:1274 -#: commands/sequence.c:1283 commands/sequence.c:1292 commands/sequence.c:1301 -#: commands/sequence.c:1310 commands/sequence.c:1319 -#: commands/subscriptioncmds.c:94 commands/subscriptioncmds.c:104 -#: commands/subscriptioncmds.c:114 commands/subscriptioncmds.c:124 -#: commands/subscriptioncmds.c:134 commands/subscriptioncmds.c:144 -#: commands/subscriptioncmds.c:153 commands/subscriptioncmds.c:163 -#: commands/typecmds.c:298 commands/typecmds.c:1375 commands/typecmds.c:1384 -#: commands/typecmds.c:1392 commands/typecmds.c:1400 commands/typecmds.c:1408 -#: commands/user.c:138 commands/user.c:161 commands/user.c:170 -#: commands/user.c:179 commands/user.c:188 commands/user.c:197 -#: commands/user.c:206 commands/user.c:215 commands/user.c:224 -#: commands/user.c:233 commands/user.c:242 commands/user.c:251 -#: commands/user.c:260 commands/user.c:547 commands/user.c:564 -#: commands/user.c:572 commands/user.c:580 commands/user.c:588 -#: commands/user.c:596 commands/user.c:604 commands/user.c:612 -#: commands/user.c:621 commands/user.c:629 commands/user.c:637 -#: replication/pgoutput/pgoutput.c:107 replication/pgoutput/pgoutput.c:128 -#: replication/walsender.c:797 replication/walsender.c:808 -#: replication/walsender.c:818 +#: commands/functioncmds.c:670 commands/functioncmds.c:2097 +#: commands/functioncmds.c:2105 commands/publicationcmds.c:90 +#: commands/sequence.c:1265 commands/sequence.c:1275 commands/sequence.c:1285 +#: commands/sequence.c:1295 commands/sequence.c:1305 commands/sequence.c:1315 +#: commands/sequence.c:1325 commands/sequence.c:1335 commands/sequence.c:1345 +#: commands/subscriptioncmds.c:110 commands/subscriptioncmds.c:120 +#: commands/subscriptioncmds.c:130 commands/subscriptioncmds.c:140 +#: commands/subscriptioncmds.c:154 commands/subscriptioncmds.c:165 +#: commands/subscriptioncmds.c:179 commands/tablecmds.c:5987 +#: commands/typecmds.c:298 commands/typecmds.c:1396 commands/typecmds.c:1405 +#: commands/typecmds.c:1413 commands/typecmds.c:1421 commands/typecmds.c:1429 +#: commands/user.c:134 commands/user.c:148 commands/user.c:157 +#: commands/user.c:166 commands/user.c:175 commands/user.c:184 +#: commands/user.c:193 commands/user.c:202 commands/user.c:211 +#: commands/user.c:220 commands/user.c:229 commands/user.c:238 +#: commands/user.c:247 commands/user.c:555 commands/user.c:563 +#: commands/user.c:571 commands/user.c:579 commands/user.c:587 +#: commands/user.c:595 commands/user.c:603 commands/user.c:611 +#: commands/user.c:620 commands/user.c:628 commands/user.c:636 +#: parser/parse_utilcmd.c:399 replication/pgoutput/pgoutput.c:108 +#: replication/pgoutput/pgoutput.c:129 replication/walsender.c:801 +#: replication/walsender.c:812 replication/walsender.c:822 #, c-format msgid "conflicting or redundant options" msgstr "конфликтующие или избыточные параметры" @@ -3761,24 +3533,25 @@ msgstr "права по умолчанию нельзя определить д msgid "cannot use IN SCHEMA clause when using GRANT/REVOKE ON SCHEMAS" msgstr "предложение IN SCHEMA нельзя использовать в GRANT/REVOKE ON SCHEMAS" -#: catalog/aclchk.c:1521 catalog/objectaddress.c:1390 commands/analyze.c:390 -#: commands/copy.c:4671 commands/sequence.c:1625 commands/tablecmds.c:5530 -#: commands/tablecmds.c:5691 commands/tablecmds.c:5748 -#: commands/tablecmds.c:5862 commands/tablecmds.c:5916 -#: commands/tablecmds.c:6008 commands/tablecmds.c:6164 -#: commands/tablecmds.c:8385 commands/tablecmds.c:8661 -#: commands/tablecmds.c:9078 commands/trigger.c:739 parser/analyze.c:2308 -#: parser/parse_relation.c:2698 parser/parse_relation.c:2760 -#: parser/parse_target.c:1002 parser/parse_type.c:127 utils/adt/acl.c:2823 -#: utils/adt/ruleutils.c:2259 +#: catalog/aclchk.c:1521 catalog/objectaddress.c:1389 commands/analyze.c:399 +#: commands/copy.c:4774 commands/sequence.c:1700 commands/tablecmds.c:5635 +#: commands/tablecmds.c:5782 commands/tablecmds.c:5839 +#: commands/tablecmds.c:5912 commands/tablecmds.c:6006 +#: commands/tablecmds.c:6065 commands/tablecmds.c:6190 +#: commands/tablecmds.c:6244 commands/tablecmds.c:6336 +#: commands/tablecmds.c:6492 commands/tablecmds.c:8721 +#: commands/tablecmds.c:8997 commands/tablecmds.c:9432 commands/trigger.c:817 +#: parser/analyze.c:2310 parser/parse_relation.c:2733 +#: parser/parse_relation.c:2795 parser/parse_target.c:1002 +#: parser/parse_type.c:127 utils/adt/acl.c:2828 utils/adt/ruleutils.c:2356 #, c-format msgid "column \"%s\" of relation \"%s\" does not exist" msgstr "столбец \"%s\" в таблице \"%s\" не существует" -#: catalog/aclchk.c:1787 catalog/objectaddress.c:1230 commands/sequence.c:1137 -#: commands/tablecmds.c:229 commands/tablecmds.c:12733 utils/adt/acl.c:2059 -#: utils/adt/acl.c:2089 utils/adt/acl.c:2121 utils/adt/acl.c:2153 -#: utils/adt/acl.c:2181 utils/adt/acl.c:2211 +#: catalog/aclchk.c:1787 catalog/objectaddress.c:1229 commands/sequence.c:1138 +#: commands/tablecmds.c:229 commands/tablecmds.c:13107 utils/adt/acl.c:2061 +#: utils/adt/acl.c:2091 utils/adt/acl.c:2123 utils/adt/acl.c:2155 +#: utils/adt/acl.c:2183 utils/adt/acl.c:2213 #, c-format msgid "\"%s\" is not a sequence" msgstr "\"%s\" - это не последовательность" @@ -3829,7 +3602,7 @@ msgstr "для типов массивов нельзя определить п msgid "Set the privileges of the element type instead." msgstr "Вместо этого установите права для типа элемента." -#: catalog/aclchk.c:3127 catalog/objectaddress.c:1520 commands/typecmds.c:3165 +#: catalog/aclchk.c:3127 catalog/objectaddress.c:1519 #, c-format msgid "\"%s\" is not a domain" msgstr "\"%s\" - это не домен" @@ -3849,9 +3622,9 @@ msgstr "нет доступа к столбцу %s" msgid "permission denied for relation %s" msgstr "нет доступа к отношению %s" -#: catalog/aclchk.c:3300 commands/sequence.c:599 commands/sequence.c:833 -#: commands/sequence.c:875 commands/sequence.c:916 commands/sequence.c:1671 -#: commands/sequence.c:1735 +#: catalog/aclchk.c:3300 commands/sequence.c:600 commands/sequence.c:834 +#: commands/sequence.c:876 commands/sequence.c:917 commands/sequence.c:1791 +#: commands/sequence.c:1855 #, c-format msgid "permission denied for sequence %s" msgstr "нет доступа к последовательности %s" @@ -3913,8 +3686,8 @@ msgstr "нет доступа к преобразованию %s" #: catalog/aclchk.c:3324 #, c-format -msgid "permission denied for statistics %s" -msgstr "нет доступа к статистике %s" +msgid "permission denied for statistics object %s" +msgstr "нет доступа к объекту статистики %s" #: catalog/aclchk.c:3326 #, c-format @@ -4028,8 +3801,8 @@ msgstr "нужно быть владельцем преобразования %s #: catalog/aclchk.c:3376 #, c-format -msgid "must be owner of statistics %s" -msgstr "нужно быть владельцем статистики %s" +msgid "must be owner of statistics object %s" +msgstr "нужно быть владельцем объекта статистики %s" #: catalog/aclchk.c:3378 #, c-format @@ -4127,7 +3900,7 @@ msgstr "обёртка сторонних данных с OID %u не сущес msgid "foreign server with OID %u does not exist" msgstr "сторонний сервер с OID %u не существует" -#: catalog/aclchk.c:4246 catalog/aclchk.c:4585 utils/cache/typcache.c:238 +#: catalog/aclchk.c:4246 catalog/aclchk.c:4585 utils/cache/typcache.c:240 #, c-format msgid "type with OID %u does not exist" msgstr "тип с OID %u не существует" @@ -4162,7 +3935,7 @@ msgstr "конфигурация текстового поиска с OID %u н msgid "event trigger with OID %u does not exist" msgstr "событийный триггер с OID %u не существует" -#: catalog/aclchk.c:5003 commands/collationcmds.c:319 +#: catalog/aclchk.c:5003 commands/collationcmds.c:348 #, c-format msgid "collation with OID %u does not exist" msgstr "правило сортировки с OID %u не существует" @@ -4177,20 +3950,20 @@ msgstr "преобразование с OID %u не существует" msgid "extension with OID %u does not exist" msgstr "расширение с OID %u не существует" -#: catalog/aclchk.c:5097 commands/publicationcmds.c:760 +#: catalog/aclchk.c:5097 commands/publicationcmds.c:733 #, c-format msgid "publication with OID %u does not exist" msgstr "публикация с OID %u не существует" -#: catalog/aclchk.c:5123 commands/subscriptioncmds.c:943 +#: catalog/aclchk.c:5123 commands/subscriptioncmds.c:1098 #, c-format msgid "subscription with OID %u does not exist" msgstr "подписка с OID %u не существует" #: catalog/aclchk.c:5149 #, c-format -msgid "statistics with OID %u do not exist" -msgstr "статистика с OID %u не существует" +msgid "statistics object with OID %u does not exist" +msgstr "объект статистики с OID %u не существует" #: catalog/dependency.c:613 #, c-format @@ -4265,7 +4038,7 @@ msgstr[0] "удаление распространяется на ещё %d об msgstr[1] "удаление распространяется на ещё %d объекта" msgstr[2] "удаление распространяется на ещё %d объектов" -#: catalog/dependency.c:1622 +#: catalog/dependency.c:1635 #, c-format msgid "constant of the type %s cannot be used here" msgstr "константу типа %s здесь использовать нельзя" @@ -4280,13 +4053,13 @@ msgstr "нет прав для создания отношения \"%s.%s\"" msgid "System catalog modifications are currently disallowed." msgstr "Изменение системного каталога в текущем состоянии запрещено." -#: catalog/heap.c:421 commands/tablecmds.c:1619 commands/tablecmds.c:2122 -#: commands/tablecmds.c:5141 +#: catalog/heap.c:421 commands/tablecmds.c:1649 commands/tablecmds.c:2159 +#: commands/tablecmds.c:5225 #, c-format msgid "tables can have at most %d columns" msgstr "максимальное число столбцов в таблице: %d" -#: catalog/heap.c:438 commands/tablecmds.c:5399 +#: catalog/heap.c:438 commands/tablecmds.c:5498 #, c-format msgid "column name \"%s\" conflicts with a system column name" msgstr "имя столбца \"%s\" конфликтует с системным столбцом" @@ -4314,27 +4087,27 @@ msgstr "" "сортировки" #: catalog/heap.c:581 commands/createas.c:204 commands/createas.c:501 -#: commands/indexcmds.c:1136 commands/tablecmds.c:12999 commands/view.c:103 -#: regex/regc_pg_locale.c:263 utils/adt/formatting.c:1537 -#: utils/adt/formatting.c:1656 utils/adt/formatting.c:1775 utils/adt/like.c:184 -#: utils/adt/selfuncs.c:5506 utils/adt/varlena.c:1417 utils/adt/varlena.c:1860 +#: commands/indexcmds.c:1152 commands/tablecmds.c:13403 commands/view.c:103 +#: regex/regc_pg_locale.c:263 utils/adt/formatting.c:1546 +#: utils/adt/formatting.c:1670 utils/adt/formatting.c:1795 utils/adt/like.c:184 +#: utils/adt/selfuncs.c:5590 utils/adt/varlena.c:1417 utils/adt/varlena.c:1854 #, c-format msgid "Use the COLLATE clause to set the collation explicitly." msgstr "Задайте правило сортировки явно в предложении COLLATE." -#: catalog/heap.c:1066 catalog/index.c:806 commands/tablecmds.c:2903 +#: catalog/heap.c:1067 catalog/index.c:806 commands/tablecmds.c:2943 #, c-format msgid "relation \"%s\" already exists" msgstr "отношение \"%s\" уже существует" -#: catalog/heap.c:1082 catalog/pg_type.c:410 catalog/pg_type.c:717 +#: catalog/heap.c:1083 catalog/pg_type.c:410 catalog/pg_type.c:732 #: commands/typecmds.c:239 commands/typecmds.c:788 commands/typecmds.c:1139 -#: commands/typecmds.c:1350 commands/typecmds.c:2106 +#: commands/typecmds.c:1371 commands/typecmds.c:2127 #, c-format msgid "type \"%s\" already exists" msgstr "тип \"%s\" уже существует" -#: catalog/heap.c:1083 +#: catalog/heap.c:1084 #, c-format msgid "" "A relation has an associated type of the same name, so you must use a name " @@ -4343,42 +4116,42 @@ msgstr "" "С отношением уже связан тип с таким же именем; выберите имя, не " "конфликтующее с существующими типами." -#: catalog/heap.c:1112 +#: catalog/heap.c:1113 #, c-format msgid "pg_class heap OID value not set when in binary upgrade mode" msgstr "значение OID кучи в pg_class не задано в режиме двоичного обновления" -#: catalog/heap.c:2078 +#: catalog/heap.c:2080 #, c-format msgid "cannot add NO INHERIT constraint to partitioned table \"%s\"" msgstr "" "добавить ограничение NO INHERIT к секционированной таблице \"%s\" нельзя" -#: catalog/heap.c:2336 +#: catalog/heap.c:2338 #, c-format msgid "check constraint \"%s\" already exists" msgstr "ограничение-проверка \"%s\" уже существует" -#: catalog/heap.c:2504 catalog/pg_constraint.c:649 commands/tablecmds.c:6522 +#: catalog/heap.c:2506 catalog/pg_constraint.c:649 commands/tablecmds.c:6852 #, c-format msgid "constraint \"%s\" for relation \"%s\" already exists" msgstr "ограничение \"%s\" для отношения \"%s\" уже существует" -#: catalog/heap.c:2511 +#: catalog/heap.c:2513 #, c-format msgid "" "constraint \"%s\" conflicts with non-inherited constraint on relation \"%s\"" msgstr "" "ограничение \"%s\" конфликтует с ненаследуемым ограничением таблицы \"%s\"" -#: catalog/heap.c:2522 +#: catalog/heap.c:2524 #, c-format msgid "" "constraint \"%s\" conflicts with inherited constraint on relation \"%s\"" msgstr "" "ограничение \"%s\" конфликтует с наследуемым ограничением таблицы \"%s\"" -#: catalog/heap.c:2532 +#: catalog/heap.c:2534 #, c-format msgid "" "constraint \"%s\" conflicts with NOT VALID constraint on relation \"%s\"" @@ -4386,39 +4159,39 @@ msgstr "" "ограничение \"%s\" конфликтует с непроверенным (NOT VALID) ограничением " "таблицы \"%s\"" -#: catalog/heap.c:2537 +#: catalog/heap.c:2539 #, c-format msgid "merging constraint \"%s\" with inherited definition" msgstr "слияние ограничения \"%s\" с унаследованным определением" -#: catalog/heap.c:2653 +#: catalog/heap.c:2655 #, c-format msgid "cannot use column references in default expression" msgstr "в выражении по умолчанию нельзя ссылаться на столбцы" -#: catalog/heap.c:2678 rewrite/rewriteHandler.c:1097 +#: catalog/heap.c:2680 rewrite/rewriteHandler.c:1176 #, c-format msgid "column \"%s\" is of type %s but default expression is of type %s" msgstr "столбец \"%s\" имеет тип %s, но тип выражения по умолчанию %s" -#: catalog/heap.c:2683 commands/prepare.c:384 parser/parse_node.c:430 +#: catalog/heap.c:2685 commands/prepare.c:384 parser/parse_node.c:430 #: parser/parse_target.c:590 parser/parse_target.c:840 -#: parser/parse_target.c:850 rewrite/rewriteHandler.c:1102 +#: parser/parse_target.c:850 rewrite/rewriteHandler.c:1181 #, c-format msgid "You will need to rewrite or cast the expression." msgstr "Перепишите выражение или преобразуйте его тип." -#: catalog/heap.c:2730 +#: catalog/heap.c:2732 #, c-format msgid "only table \"%s\" can be referenced in check constraint" msgstr "в ограничении-проверке можно ссылаться только на таблицу \"%s\"" -#: catalog/heap.c:3039 +#: catalog/heap.c:2972 #, c-format msgid "unsupported ON COMMIT and foreign key combination" msgstr "неподдерживаемое сочетание внешнего ключа с ON COMMIT" -#: catalog/heap.c:3040 +#: catalog/heap.c:2973 #, c-format msgid "" "Table \"%s\" references \"%s\", but they do not have the same ON COMMIT " @@ -4426,28 +4199,28 @@ msgid "" msgstr "" "Таблица \"%s\" ссылается на \"%s\", и для них задан разный режим ON COMMIT." -#: catalog/heap.c:3045 +#: catalog/heap.c:2978 #, c-format msgid "cannot truncate a table referenced in a foreign key constraint" msgstr "опустошить таблицу, на которую ссылается внешний ключ, нельзя" -#: catalog/heap.c:3046 +#: catalog/heap.c:2979 #, c-format msgid "Table \"%s\" references \"%s\"." msgstr "Таблица \"%s\" ссылается на \"%s\"." -#: catalog/heap.c:3048 +#: catalog/heap.c:2981 #, c-format msgid "Truncate table \"%s\" at the same time, or use TRUNCATE ... CASCADE." msgstr "" "Опустошите таблицу \"%s\" параллельно или используйте TRUNCATE ... CASCADE." -#: catalog/index.c:210 parser/parse_utilcmd.c:1541 parser/parse_utilcmd.c:1627 +#: catalog/index.c:213 parser/parse_utilcmd.c:1692 parser/parse_utilcmd.c:1778 #, c-format msgid "multiple primary keys for table \"%s\" are not allowed" msgstr "таблица \"%s\" не может иметь несколько первичных ключей" -#: catalog/index.c:228 +#: catalog/index.c:231 #, c-format msgid "primary keys cannot be expressions" msgstr "первичные ключи не могут быть выражениями" @@ -4470,8 +4243,8 @@ msgstr "" msgid "shared indexes cannot be created after initdb" msgstr "нельзя создать разделяемые индексы после initdb" -#: catalog/index.c:798 commands/createas.c:250 commands/sequence.c:149 -#: parser/parse_utilcmd.c:197 +#: catalog/index.c:798 commands/createas.c:250 commands/sequence.c:152 +#: parser/parse_utilcmd.c:203 #, c-format msgid "relation \"%s\" already exists, skipping" msgstr "отношение \"%s\" уже существует, пропускается" @@ -4487,398 +4260,396 @@ msgstr "" msgid "DROP INDEX CONCURRENTLY must be first action in transaction" msgstr "DROP INDEX CONCURRENTLY должен быть первым действием в транзакции" -#: catalog/index.c:2019 +#: catalog/index.c:2023 #, c-format msgid "building index \"%s\" on table \"%s\"" msgstr "создание индекса \"%s\" для таблицы \"%s\"" -#: catalog/index.c:3331 +#: catalog/index.c:3335 #, c-format msgid "cannot reindex temporary tables of other sessions" msgstr "переиндексировать временные таблицы других сеансов нельзя" -#: catalog/index.c:3462 +#: catalog/index.c:3466 #, c-format msgid "index \"%s\" was reindexed" msgstr "индекс \"%s\" был перестроен" -#: catalog/index.c:3464 commands/vacuumlazy.c:1356 commands/vacuumlazy.c:1432 -#: commands/vacuumlazy.c:1621 commands/vacuumlazy.c:1831 -#, c-format -msgid "%s." -msgstr "%s." - -#: catalog/namespace.c:234 catalog/namespace.c:432 catalog/namespace.c:526 -#: commands/trigger.c:4789 +#: catalog/namespace.c:235 catalog/namespace.c:433 catalog/namespace.c:527 +#: commands/trigger.c:5148 #, c-format msgid "cross-database references are not implemented: \"%s.%s.%s\"" msgstr "ссылки между базами не реализованы: \"%s.%s.%s\"" -#: catalog/namespace.c:291 +#: catalog/namespace.c:292 #, c-format msgid "temporary tables cannot specify a schema name" msgstr "для временных таблиц имя схемы не указывается" -#: catalog/namespace.c:370 +#: catalog/namespace.c:371 #, c-format msgid "could not obtain lock on relation \"%s.%s\"" msgstr "не удалось получить блокировку таблицы \"%s.%s\"" -#: catalog/namespace.c:375 commands/lockcmds.c:145 +#: catalog/namespace.c:376 commands/lockcmds.c:145 #, c-format msgid "could not obtain lock on relation \"%s\"" msgstr "не удалось получить блокировку таблицы \"%s\"" -#: catalog/namespace.c:399 parser/parse_relation.c:1158 +#: catalog/namespace.c:400 parser/parse_relation.c:1158 #, c-format msgid "relation \"%s.%s\" does not exist" msgstr "отношение \"%s.%s\" не существует" -#: catalog/namespace.c:404 parser/parse_relation.c:1176 -#: parser/parse_relation.c:1184 utils/adt/regproc.c:1036 +#: catalog/namespace.c:405 parser/parse_relation.c:1171 +#: parser/parse_relation.c:1179 #, c-format msgid "relation \"%s\" does not exist" msgstr "отношение \"%s\" не существует" -#: catalog/namespace.c:472 catalog/namespace.c:2882 commands/extension.c:1462 -#: commands/extension.c:1468 +#: catalog/namespace.c:473 catalog/namespace.c:2992 commands/extension.c:1466 +#: commands/extension.c:1472 #, c-format msgid "no schema has been selected to create in" msgstr "схема для создания объектов не выбрана" -#: catalog/namespace.c:624 catalog/namespace.c:637 +#: catalog/namespace.c:625 catalog/namespace.c:638 #, c-format msgid "cannot create relations in temporary schemas of other sessions" msgstr "во временных схемах других сеансов нельзя создавать отношения" -#: catalog/namespace.c:628 +#: catalog/namespace.c:629 #, c-format msgid "cannot create temporary relation in non-temporary schema" msgstr "создавать временные отношения можно только во временных схемах" -#: catalog/namespace.c:643 +#: catalog/namespace.c:644 #, c-format msgid "only temporary relations may be created in temporary schemas" msgstr "во временных схемах можно создавать только временные отношения" -#: catalog/namespace.c:2138 +#: catalog/namespace.c:2182 #, c-format -msgid "statistics \"%s\" do not exist" -msgstr "статистика \"%s\" не существует" +msgid "statistics object \"%s\" does not exist" +msgstr "объект статистики \"%s\" не существует" -#: catalog/namespace.c:2195 +#: catalog/namespace.c:2305 #, c-format msgid "text search parser \"%s\" does not exist" msgstr "анализатор текстового поиска \"%s\" не существует" -#: catalog/namespace.c:2321 +#: catalog/namespace.c:2431 #, c-format msgid "text search dictionary \"%s\" does not exist" msgstr "словарь текстового поиска \"%s\" не существует" -#: catalog/namespace.c:2448 +#: catalog/namespace.c:2558 #, c-format msgid "text search template \"%s\" does not exist" msgstr "шаблон текстового поиска \"%s\" не существует" -#: catalog/namespace.c:2574 commands/tsearchcmds.c:1185 +#: catalog/namespace.c:2684 commands/tsearchcmds.c:1185 #: utils/cache/ts_cache.c:612 #, c-format msgid "text search configuration \"%s\" does not exist" msgstr "конфигурация текстового поиска \"%s\" не существует" -#: catalog/namespace.c:2687 parser/parse_expr.c:791 parser/parse_target.c:1192 +#: catalog/namespace.c:2797 parser/parse_expr.c:789 parser/parse_target.c:1192 #, c-format msgid "cross-database references are not implemented: %s" msgstr "ссылки между базами не реализованы: %s" -#: catalog/namespace.c:2693 parser/parse_expr.c:798 parser/parse_target.c:1199 -#: gram.y:14221 gram.y:15642 +#: catalog/namespace.c:2803 parser/parse_expr.c:796 parser/parse_target.c:1199 +#: gram.y:14300 gram.y:15721 #, c-format msgid "improper qualified name (too many dotted names): %s" msgstr "неверное полное имя (слишком много компонентов): %s" -#: catalog/namespace.c:2824 +#: catalog/namespace.c:2934 #, c-format msgid "cannot move objects into or out of temporary schemas" msgstr "перемещать объекты в/из внутренних схем нельзя" -#: catalog/namespace.c:2830 +#: catalog/namespace.c:2940 #, c-format msgid "cannot move objects into or out of TOAST schema" msgstr "перемещать объекты в/из схем TOAST нельзя" -#: catalog/namespace.c:2903 commands/schemacmds.c:256 commands/schemacmds.c:334 +#: catalog/namespace.c:3013 commands/schemacmds.c:256 commands/schemacmds.c:334 #: commands/tablecmds.c:891 #, c-format msgid "schema \"%s\" does not exist" msgstr "схема \"%s\" не существует" -#: catalog/namespace.c:2934 +#: catalog/namespace.c:3044 #, c-format msgid "improper relation name (too many dotted names): %s" msgstr "неверное имя отношения (слишком много компонентов): %s" -#: catalog/namespace.c:3444 +#: catalog/namespace.c:3538 #, c-format msgid "collation \"%s\" for encoding \"%s\" does not exist" msgstr "правило сортировки \"%s\" для кодировки \"%s\" не существует" -#: catalog/namespace.c:3499 +#: catalog/namespace.c:3593 #, c-format msgid "conversion \"%s\" does not exist" msgstr "преобразование \"%s\" не существует" -#: catalog/namespace.c:3707 +#: catalog/namespace.c:3801 #, c-format msgid "permission denied to create temporary tables in database \"%s\"" msgstr "нет прав для создания временных таблиц в базе \"%s\"" -#: catalog/namespace.c:3723 +#: catalog/namespace.c:3817 #, c-format msgid "cannot create temporary tables during recovery" msgstr "создавать временные таблицы в процессе восстановления нельзя" -#: catalog/namespace.c:3729 +#: catalog/namespace.c:3823 #, c-format -msgid "cannot create temporary tables in parallel mode" -msgstr "создавать временные таблицы в параллельном режиме нельзя" +msgid "cannot create temporary tables during a parallel operation" +msgstr "создавать временные таблицы во время параллельных операций нельзя" -#: catalog/namespace.c:3978 commands/tablespace.c:1169 commands/variable.c:64 -#: utils/misc/guc.c:9961 utils/misc/guc.c:10039 +#: catalog/namespace.c:4072 commands/tablespace.c:1169 commands/variable.c:64 +#: utils/misc/guc.c:9983 utils/misc/guc.c:10061 #, c-format msgid "List syntax is invalid." msgstr "Ошибка синтаксиса в списке." -#: catalog/objectaddress.c:1238 catalog/pg_publication.c:57 +#: catalog/objectaddress.c:1237 catalog/pg_publication.c:66 #: commands/lockcmds.c:93 commands/policy.c:94 commands/policy.c:391 -#: commands/policy.c:480 commands/tablecmds.c:223 commands/tablecmds.c:265 -#: commands/tablecmds.c:1477 commands/tablecmds.c:4662 -#: commands/tablecmds.c:8501 +#: commands/policy.c:481 commands/tablecmds.c:223 commands/tablecmds.c:265 +#: commands/tablecmds.c:1507 commands/tablecmds.c:4722 +#: commands/tablecmds.c:8837 #, c-format msgid "\"%s\" is not a table" msgstr "\"%s\" - это не таблица" -#: catalog/objectaddress.c:1245 commands/tablecmds.c:235 -#: commands/tablecmds.c:4692 commands/tablecmds.c:12738 commands/view.c:141 +#: catalog/objectaddress.c:1244 commands/tablecmds.c:235 +#: commands/tablecmds.c:4752 commands/tablecmds.c:13112 commands/view.c:141 #, c-format msgid "\"%s\" is not a view" msgstr "\"%s\" - это не представление" -#: catalog/objectaddress.c:1252 commands/matview.c:174 commands/tablecmds.c:241 -#: commands/tablecmds.c:12743 +#: catalog/objectaddress.c:1251 commands/matview.c:174 commands/tablecmds.c:241 +#: commands/tablecmds.c:13117 #, c-format msgid "\"%s\" is not a materialized view" msgstr "\"%s\" - это не материализованное представление" -#: catalog/objectaddress.c:1259 commands/tablecmds.c:259 -#: commands/tablecmds.c:4695 commands/tablecmds.c:12748 +#: catalog/objectaddress.c:1258 commands/tablecmds.c:259 +#: commands/tablecmds.c:4755 commands/tablecmds.c:13122 #, c-format msgid "\"%s\" is not a foreign table" msgstr "\"%s\" - это не сторонняя таблица" -#: catalog/objectaddress.c:1300 +#: catalog/objectaddress.c:1299 #, c-format msgid "must specify relation and object name" msgstr "необходимо указать имя отношения и объекта" -#: catalog/objectaddress.c:1376 catalog/objectaddress.c:1429 +#: catalog/objectaddress.c:1375 catalog/objectaddress.c:1428 #, c-format msgid "column name must be qualified" msgstr "имя столбца нужно указать в полной форме" -#: catalog/objectaddress.c:1472 +#: catalog/objectaddress.c:1471 #, c-format msgid "default value for column \"%s\" of relation \"%s\" does not exist" msgstr "" "значение по умолчанию для столбца \"%s\" отношения \"%s\" не существует" -#: catalog/objectaddress.c:1509 commands/functioncmds.c:128 -#: commands/tablecmds.c:251 commands/typecmds.c:3233 parser/parse_type.c:226 -#: parser/parse_type.c:255 parser/parse_type.c:794 utils/adt/acl.c:4357 -#: utils/adt/regproc.c:1227 +#: catalog/objectaddress.c:1508 commands/functioncmds.c:128 +#: commands/tablecmds.c:251 commands/typecmds.c:3269 parser/parse_type.c:226 +#: parser/parse_type.c:255 parser/parse_type.c:794 utils/adt/acl.c:4362 #, c-format msgid "type \"%s\" does not exist" msgstr "тип \"%s\" не существует" -#: catalog/objectaddress.c:1626 +#: catalog/objectaddress.c:1625 #, c-format msgid "operator %d (%s, %s) of %s does not exist" msgstr "оператор %d (%s, %s) из семейства %s не существует" -#: catalog/objectaddress.c:1657 +#: catalog/objectaddress.c:1656 #, c-format msgid "function %d (%s, %s) of %s does not exist" msgstr "функция %d (%s, %s) из семейства %s не существует" -#: catalog/objectaddress.c:1708 catalog/objectaddress.c:1734 +#: catalog/objectaddress.c:1707 catalog/objectaddress.c:1733 #, c-format msgid "user mapping for user \"%s\" on server \"%s\" does not exist" msgstr "сопоставление для пользователя \"%s\" на сервере \"%s\" не существует" -#: catalog/objectaddress.c:1723 commands/foreigncmds.c:428 +#: catalog/objectaddress.c:1722 commands/foreigncmds.c:428 #: commands/foreigncmds.c:1004 commands/foreigncmds.c:1377 #: foreign/foreign.c:688 #, c-format msgid "server \"%s\" does not exist" msgstr "сервер \"%s\" не существует" -#: catalog/objectaddress.c:1790 +#: catalog/objectaddress.c:1789 #, c-format msgid "publication relation \"%s\" in publication \"%s\" does not exist" msgstr "публикуемое отношение \"%s\" в публикации \"%s\" не существует" -#: catalog/objectaddress.c:1852 +#: catalog/objectaddress.c:1851 #, c-format -msgid "unrecognized default ACL object type %c" -msgstr "нераспознанный тип объекта ACL по умолчанию: %c" +msgid "unrecognized default ACL object type \"%c\"" +msgstr "нераспознанный тип объекта ACL по умолчанию: \"%c\"" -#: catalog/objectaddress.c:1853 +#: catalog/objectaddress.c:1852 #, c-format -msgid "Valid object types are \"r\", \"S\", \"f\", \"T\" and \"s\"." -msgstr "Допустимые типы объектов: \"r\", \"S\", \"f\", \"T\" и \"s\"." +msgid "Valid object types are \"%c\", \"%c\", \"%c\", \"%c\", \"%c\"." +msgstr "Допустимые типы объектов: \"%c\", \"%c\", \"%c\", \"%c\", \"%c\"." -#: catalog/objectaddress.c:1899 +#: catalog/objectaddress.c:1903 #, c-format msgid "default ACL for user \"%s\" in schema \"%s\" on %s does not exist" msgstr "" "ACL по умолчанию для пользователя \"%s\" в схеме \"%s\" для объекта %s не " "существует" -#: catalog/objectaddress.c:1904 +#: catalog/objectaddress.c:1908 #, c-format msgid "default ACL for user \"%s\" on %s does not exist" msgstr "" "ACL по умолчанию для пользователя \"%s\" и для объекта %s не существует" -#: catalog/objectaddress.c:1931 catalog/objectaddress.c:1989 -#: catalog/objectaddress.c:2044 +#: catalog/objectaddress.c:1935 catalog/objectaddress.c:1993 +#: catalog/objectaddress.c:2048 #, c-format msgid "name or argument lists may not contain nulls" msgstr "списки имён и аргументов не должны содержать NULL" -#: catalog/objectaddress.c:1965 +#: catalog/objectaddress.c:1969 #, c-format msgid "unsupported object type \"%s\"" msgstr "неподдерживаемый тип объекта: \"%s\"" -#: catalog/objectaddress.c:1985 catalog/objectaddress.c:2003 -#: catalog/objectaddress.c:2141 +#: catalog/objectaddress.c:1989 catalog/objectaddress.c:2007 +#: catalog/objectaddress.c:2145 #, c-format msgid "name list length must be exactly %d" msgstr "длина списка имён должна быть равна %d" -#: catalog/objectaddress.c:2007 +#: catalog/objectaddress.c:2011 #, c-format msgid "large object OID may not be null" msgstr "OID большого объекта не может быть NULL" -#: catalog/objectaddress.c:2016 catalog/objectaddress.c:2077 -#: catalog/objectaddress.c:2084 +#: catalog/objectaddress.c:2020 catalog/objectaddress.c:2081 +#: catalog/objectaddress.c:2088 #, c-format msgid "name list length must be at least %d" msgstr "длина списка аргументов должна быть не меньше %d" -#: catalog/objectaddress.c:2070 catalog/objectaddress.c:2090 +#: catalog/objectaddress.c:2074 catalog/objectaddress.c:2094 #, c-format msgid "argument list length must be exactly %d" msgstr "длина списка аргументов должна быть равна %d" -#: catalog/objectaddress.c:2316 libpq/be-fsstubs.c:350 +#: catalog/objectaddress.c:2320 libpq/be-fsstubs.c:350 #, c-format msgid "must be owner of large object %u" msgstr "нужно быть владельцем большого объекта %u" -#: catalog/objectaddress.c:2331 commands/functioncmds.c:1419 +#: catalog/objectaddress.c:2335 commands/functioncmds.c:1440 #, c-format msgid "must be owner of type %s or type %s" msgstr "это разрешено только владельцу типа %s или %s" -#: catalog/objectaddress.c:2381 catalog/objectaddress.c:2398 +#: catalog/objectaddress.c:2385 catalog/objectaddress.c:2402 #, c-format msgid "must be superuser" msgstr "требуются права суперпользователя" -#: catalog/objectaddress.c:2388 +#: catalog/objectaddress.c:2392 #, c-format msgid "must have CREATEROLE privilege" msgstr "требуется право CREATEROLE" -#: catalog/objectaddress.c:2467 +#: catalog/objectaddress.c:2471 #, c-format msgid "unrecognized object type \"%s\"" msgstr "нераспознанный тип объекта \"%s\"" -#: catalog/objectaddress.c:2662 +#: catalog/objectaddress.c:2666 #, c-format msgid " column %s" msgstr " столбец %s" -#: catalog/objectaddress.c:2668 +#: catalog/objectaddress.c:2672 #, c-format msgid "function %s" msgstr "функция %s" -#: catalog/objectaddress.c:2673 +#: catalog/objectaddress.c:2677 #, c-format msgid "type %s" msgstr "тип %s" -#: catalog/objectaddress.c:2703 +#: catalog/objectaddress.c:2707 #, c-format msgid "cast from %s to %s" msgstr "приведение %s к %s" -#: catalog/objectaddress.c:2723 +#: catalog/objectaddress.c:2727 #, c-format msgid "collation %s" msgstr "правило сортировки %s" -#: catalog/objectaddress.c:2747 +#: catalog/objectaddress.c:2751 #, c-format msgid "constraint %s on %s" msgstr "ограничение %s в отношении %s" -#: catalog/objectaddress.c:2753 +#: catalog/objectaddress.c:2757 #, c-format msgid "constraint %s" msgstr "ограничение %s" -#: catalog/objectaddress.c:2770 +#: catalog/objectaddress.c:2774 #, c-format msgid "conversion %s" msgstr "преобразование %s" -#: catalog/objectaddress.c:2807 +#: catalog/objectaddress.c:2811 #, c-format msgid "default for %s" msgstr "значение по умолчанию, %s" -#: catalog/objectaddress.c:2816 +#: catalog/objectaddress.c:2820 #, c-format msgid "language %s" msgstr "язык %s" -#: catalog/objectaddress.c:2821 +#: catalog/objectaddress.c:2825 #, c-format msgid "large object %u" msgstr "большой объект %u" -#: catalog/objectaddress.c:2826 +#: catalog/objectaddress.c:2830 #, c-format msgid "operator %s" msgstr "оператор %s" -#: catalog/objectaddress.c:2858 +#: catalog/objectaddress.c:2862 #, c-format msgid "operator class %s for access method %s" msgstr "класс операторов %s для метода доступа %s" +#: catalog/objectaddress.c:2885 +#, c-format +msgid "access method %s" +msgstr "метод доступа %s" + #. translator: %d is the operator strategy (a number), the #. first two %s's are data type names, the third %s is the #. description of the operator family, and the last %s is the #. textual form of the operator with arguments. -#: catalog/objectaddress.c:2908 +#: catalog/objectaddress.c:2927 #, c-format msgid "operator %d (%s, %s) of %s: %s" msgstr "оператор %d (%s, %s) из семейства \"%s\": %s" @@ -4887,228 +4658,230 @@ msgstr "оператор %d (%s, %s) из семейства \"%s\": %s" #. are data type names, the third %s is the description of the #. operator family, and the last %s is the textual form of the #. function with arguments. -#: catalog/objectaddress.c:2958 +#: catalog/objectaddress.c:2977 #, c-format msgid "function %d (%s, %s) of %s: %s" msgstr "функция %d (%s, %s) из семейства \"%s\": %s" -#: catalog/objectaddress.c:2998 +#: catalog/objectaddress.c:3017 #, c-format msgid "rule %s on " msgstr "правило %s для отношения: " -#: catalog/objectaddress.c:3020 -#, c-format -msgid "transform for %s language %s" -msgstr "преобразование для %s, языка %s" - -#: catalog/objectaddress.c:3054 +#: catalog/objectaddress.c:3052 #, c-format msgid "trigger %s on " msgstr "триггер %s в отношении: " -#: catalog/objectaddress.c:3071 +#: catalog/objectaddress.c:3069 #, c-format msgid "schema %s" msgstr "схема %s" -#: catalog/objectaddress.c:3084 +#: catalog/objectaddress.c:3086 +#, c-format +msgid "statistics object %s" +msgstr "объект статистики %s" + +#: catalog/objectaddress.c:3102 #, c-format msgid "text search parser %s" msgstr "анализатор текстового поиска %s" -#: catalog/objectaddress.c:3099 +#: catalog/objectaddress.c:3117 #, c-format msgid "text search dictionary %s" msgstr "словарь текстового поиска %s" -#: catalog/objectaddress.c:3114 +#: catalog/objectaddress.c:3132 #, c-format msgid "text search template %s" msgstr "шаблон текстового поиска %s" -#: catalog/objectaddress.c:3129 +#: catalog/objectaddress.c:3147 #, c-format msgid "text search configuration %s" msgstr "конфигурация текстового поиска %s" -#: catalog/objectaddress.c:3137 +#: catalog/objectaddress.c:3155 #, c-format msgid "role %s" msgstr "роль %s" -#: catalog/objectaddress.c:3150 +#: catalog/objectaddress.c:3168 #, c-format msgid "database %s" msgstr "база данных %s" -#: catalog/objectaddress.c:3162 +#: catalog/objectaddress.c:3180 #, c-format msgid "tablespace %s" msgstr "табличное пространство %s" -#: catalog/objectaddress.c:3171 +#: catalog/objectaddress.c:3189 #, c-format msgid "foreign-data wrapper %s" msgstr "обёртка сторонних данных %s" -#: catalog/objectaddress.c:3180 +#: catalog/objectaddress.c:3198 #, c-format msgid "server %s" msgstr "сервер %s" -#: catalog/objectaddress.c:3208 +#: catalog/objectaddress.c:3226 #, c-format msgid "user mapping for %s on server %s" msgstr "сопоставление для пользователя %s на сервере %s" -#: catalog/objectaddress.c:3243 +#: catalog/objectaddress.c:3261 #, c-format msgid "default privileges on new relations belonging to role %s" msgstr "права по умолчанию для новых отношений, принадлежащих роли %s" -#: catalog/objectaddress.c:3248 +#: catalog/objectaddress.c:3266 #, c-format msgid "default privileges on new sequences belonging to role %s" msgstr "" "права по умолчанию для новых последовательностей, принадлежащих роли %s" -#: catalog/objectaddress.c:3253 +#: catalog/objectaddress.c:3271 #, c-format msgid "default privileges on new functions belonging to role %s" msgstr "права по умолчанию для новых функций, принадлежащих роли %s" -#: catalog/objectaddress.c:3258 +#: catalog/objectaddress.c:3276 #, c-format msgid "default privileges on new types belonging to role %s" msgstr "права по умолчанию для новых типов, принадлежащих роли %s" -#: catalog/objectaddress.c:3263 +#: catalog/objectaddress.c:3281 #, c-format msgid "default privileges on new schemas belonging to role %s" msgstr "права по умолчанию для новых схем, принадлежащих роли %s" -#: catalog/objectaddress.c:3269 +#: catalog/objectaddress.c:3287 #, c-format msgid "default privileges belonging to role %s" msgstr "права по умолчанию для новых объектов, принадлежащих роли %s" -#: catalog/objectaddress.c:3277 +#: catalog/objectaddress.c:3295 #, c-format msgid " in schema %s" msgstr " в схеме %s" -#: catalog/objectaddress.c:3294 +#: catalog/objectaddress.c:3312 #, c-format msgid "extension %s" msgstr "расширение %s" -#: catalog/objectaddress.c:3307 +#: catalog/objectaddress.c:3325 #, c-format msgid "event trigger %s" msgstr "событийный триггер %s" -#: catalog/objectaddress.c:3339 +#: catalog/objectaddress.c:3357 #, c-format msgid "policy %s on " msgstr "политика %s отношения " -#: catalog/objectaddress.c:3357 -#, c-format -msgid "access method %s" -msgstr "метод доступа %s" - -#: catalog/objectaddress.c:3365 +#: catalog/objectaddress.c:3368 #, c-format msgid "publication %s" msgstr "публикация %s" -#: catalog/objectaddress.c:3385 +#: catalog/objectaddress.c:3388 #, c-format msgid "publication table %s in publication %s" msgstr "публикуемая таблица %s в публикации %s" -#: catalog/objectaddress.c:3393 +#: catalog/objectaddress.c:3396 #, c-format msgid "subscription %s" msgstr "подписка %s" -#: catalog/objectaddress.c:3453 +#: catalog/objectaddress.c:3414 +#, c-format +msgid "transform for %s language %s" +msgstr "преобразование для %s, языка %s" + +#: catalog/objectaddress.c:3475 #, c-format msgid "table %s" msgstr "таблица %s" -#: catalog/objectaddress.c:3457 +#: catalog/objectaddress.c:3479 #, c-format msgid "index %s" msgstr "индекс %s" -#: catalog/objectaddress.c:3461 +#: catalog/objectaddress.c:3483 #, c-format msgid "sequence %s" msgstr "последовательность %s" -#: catalog/objectaddress.c:3465 +#: catalog/objectaddress.c:3487 #, c-format msgid "toast table %s" msgstr "TOAST-таблица %s" -#: catalog/objectaddress.c:3469 +#: catalog/objectaddress.c:3491 #, c-format msgid "view %s" msgstr "представление %s" -#: catalog/objectaddress.c:3473 +#: catalog/objectaddress.c:3495 #, c-format msgid "materialized view %s" msgstr "материализованное представление %s" -#: catalog/objectaddress.c:3477 +#: catalog/objectaddress.c:3499 #, c-format msgid "composite type %s" msgstr "составной тип %s" -#: catalog/objectaddress.c:3481 +#: catalog/objectaddress.c:3503 #, c-format msgid "foreign table %s" msgstr "сторонняя таблица %s" -#: catalog/objectaddress.c:3486 +#: catalog/objectaddress.c:3508 #, c-format msgid "relation %s" msgstr "отношение %s" -#: catalog/objectaddress.c:3523 +#: catalog/objectaddress.c:3545 #, c-format msgid "operator family %s for access method %s" msgstr "семейство операторов %s для метода доступа %s" -#: catalog/objectaddress.c:4902 +#: catalog/objectaddress.c:4914 #, c-format msgid "%s in publication %s" msgstr "%s в публикации %s" -#: catalog/partition.c:741 +#: catalog/partition.c:728 +#, c-format +msgid "empty range bound specified for partition \"%s\"" +msgstr "для секции \"%s\" заданы границы, образующие пустой диапазон" + +#: catalog/partition.c:730 #, c-format -msgid "cannot create range partition with empty range" -msgstr "создать диапазонную секцию с пустым диапазоном нельзя" +msgid "Specified lower bound %s is greater than or equal to upper bound %s." +msgstr "Указанная нижняя граница %s больше или равна верхней границе %s." -#: catalog/partition.c:835 +#: catalog/partition.c:814 #, c-format msgid "partition \"%s\" would overlap partition \"%s\"" msgstr "секция \"%s\" пересекается с секцией \"%s\"" -#: catalog/partition.c:943 catalog/partition.c:1092 commands/analyze.c:1446 -#: commands/tablecmds.c:8563 executor/execExprInterp.c:2814 -#: executor/execMain.c:3195 +#: catalog/partition.c:927 catalog/partition.c:1110 commands/analyze.c:1462 +#: commands/copy.c:2510 commands/tablecmds.c:8899 +#: executor/execExprInterp.c:2853 executor/execMain.c:1907 +#: executor/execMain.c:1985 executor/execMain.c:2033 executor/execMain.c:2143 +#: executor/execMain.c:3322 executor/nodeModifyTable.c:1533 msgid "could not convert row type" msgstr "не удалось преобразовать тип строки" -#: catalog/partition.c:1729 -#, c-format -msgid "range partition key of row contains null" -msgstr "ключ разбиения по диапазонам в строке таблицы содержит NULL" - #: catalog/pg_aggregate.c:125 #, c-format msgid "aggregates cannot have more than %d argument" @@ -5166,7 +4939,7 @@ msgstr "" msgid "return type of inverse transition function %s is not %s" msgstr "обратная функция перехода %s должна возвращать тип %s" -#: catalog/pg_aggregate.c:351 executor/nodeWindowAgg.c:2294 +#: catalog/pg_aggregate.c:351 executor/nodeWindowAgg.c:2298 #, c-format msgid "" "strictness of aggregate's forward and inverse transition functions must match" @@ -5187,10 +4960,9 @@ msgstr "комбинирующая функция %s должна возвращ #: catalog/pg_aggregate.c:436 #, c-format -msgid "" -"combine function with \"%s\" transition type must not be declared STRICT" +msgid "combine function with transition type %s must not be declared STRICT" msgstr "" -"комбинирующая функция с переходным типом \"%s\" не должна объявляться как " +"комбинирующая функция с переходным типом %s не должна объявляться как " "строгая (STRICT)" #: catalog/pg_aggregate.c:455 @@ -5247,12 +5019,12 @@ msgstr "" "оператор сортировки можно указать только для агрегатных функций с одним " "аргументом" -#: catalog/pg_aggregate.c:810 commands/typecmds.c:1698 commands/typecmds.c:1749 -#: commands/typecmds.c:1780 commands/typecmds.c:1803 commands/typecmds.c:1824 -#: commands/typecmds.c:1851 commands/typecmds.c:1878 commands/typecmds.c:1955 -#: commands/typecmds.c:1997 parser/parse_func.c:365 parser/parse_func.c:394 -#: parser/parse_func.c:419 parser/parse_func.c:433 parser/parse_func.c:508 -#: parser/parse_func.c:519 parser/parse_func.c:1958 +#: catalog/pg_aggregate.c:810 commands/typecmds.c:1719 commands/typecmds.c:1770 +#: commands/typecmds.c:1801 commands/typecmds.c:1824 commands/typecmds.c:1845 +#: commands/typecmds.c:1872 commands/typecmds.c:1899 commands/typecmds.c:1976 +#: commands/typecmds.c:2018 parser/parse_func.c:369 parser/parse_func.c:398 +#: parser/parse_func.c:423 parser/parse_func.c:437 parser/parse_func.c:512 +#: parser/parse_func.c:523 parser/parse_func.c:1977 #, c-format msgid "function %s does not exist" msgstr "функция %s не существует" @@ -5274,23 +5046,23 @@ msgstr "" msgid "function %s requires run-time type coercion" msgstr "функции %s требуется приведение типов во время выполнения" -#: catalog/pg_collation.c:85 catalog/pg_collation.c:127 +#: catalog/pg_collation.c:93 catalog/pg_collation.c:140 #, c-format msgid "collation \"%s\" already exists, skipping" msgstr "правило сортировки \"%s\" уже существует, пропускается" -#: catalog/pg_collation.c:87 +#: catalog/pg_collation.c:95 #, c-format msgid "collation \"%s\" for encoding \"%s\" already exists, skipping" msgstr "" "правило сортировки \"%s\" для кодировки \"%s\" уже существует, пропускается" -#: catalog/pg_collation.c:95 catalog/pg_collation.c:134 +#: catalog/pg_collation.c:103 catalog/pg_collation.c:147 #, c-format msgid "collation \"%s\" already exists" msgstr "правило сортировки \"%s\" уже существует" -#: catalog/pg_collation.c:97 +#: catalog/pg_collation.c:105 #, c-format msgid "collation \"%s\" for encoding \"%s\" already exists" msgstr "правило сортировки \"%s\" для кодировки \"%s\" уже существует" @@ -5300,25 +5072,25 @@ msgstr "правило сортировки \"%s\" для кодировки \"% msgid "constraint \"%s\" for domain %s already exists" msgstr "ограничение \"%s\" для домена %s уже существует" -#: catalog/pg_constraint.c:788 +#: catalog/pg_constraint.c:788 catalog/pg_constraint.c:864 #, c-format msgid "table \"%s\" has multiple constraints named \"%s\"" msgstr "таблица \"%s\" содержит несколько ограничений с именем \"%s\"" -#: catalog/pg_constraint.c:800 +#: catalog/pg_constraint.c:800 catalog/pg_constraint.c:898 #, c-format msgid "constraint \"%s\" for table \"%s\" does not exist" msgstr "ограничение \"%s\" для таблицы \"%s\" не существует" -#: catalog/pg_constraint.c:846 +#: catalog/pg_constraint.c:944 #, c-format -msgid "domain \"%s\" has multiple constraints named \"%s\"" -msgstr "домен \"%s\" содержит несколько ограничений с именем \"%s\"" +msgid "domain %s has multiple constraints named \"%s\"" +msgstr "домен %s содержит несколько ограничений с именем \"%s\"" -#: catalog/pg_constraint.c:858 +#: catalog/pg_constraint.c:956 #, c-format -msgid "constraint \"%s\" for domain \"%s\" does not exist" -msgstr "ограничение \"%s\" для домена \"%s\" не существует" +msgid "constraint \"%s\" for domain %s does not exist" +msgstr "ограничение \"%s\" для домена %s не существует" #: catalog/pg_conversion.c:66 #, c-format @@ -5330,7 +5102,7 @@ msgstr "преобразование \"%s\" уже существует" msgid "default conversion for %s to %s already exists" msgstr "преобразование по умолчанию из %s в %s уже существует" -#: catalog/pg_depend.c:163 commands/extension.c:3213 +#: catalog/pg_depend.c:163 commands/extension.c:3218 #, c-format msgid "%s is already a member of extension \"%s\"" msgstr "%s уже относится к расширению \"%s\"" @@ -5450,7 +5222,7 @@ msgid "operator cannot be its own negator or sort operator" msgstr "" "оператор не может быть обратным к себе или собственным оператором сортировки" -#: catalog/pg_proc.c:131 parser/parse_func.c:1982 parser/parse_func.c:2022 +#: catalog/pg_proc.c:131 parser/parse_func.c:2001 parser/parse_func.c:2041 #, c-format msgid "functions cannot have more than %d argument" msgid_plural "functions cannot have more than %d arguments" @@ -5554,43 +5326,58 @@ msgstr "SQL-функции не могут возвращать тип %s" msgid "SQL functions cannot have arguments of type %s" msgstr "SQL-функции не могут иметь аргументы типа %s" -#: catalog/pg_proc.c:968 executor/functions.c:1428 +#: catalog/pg_proc.c:968 executor/functions.c:1429 #, c-format msgid "SQL function \"%s\"" msgstr "SQL-функция \"%s\"" +#: catalog/pg_publication.c:57 commands/trigger.c:197 +#, c-format +msgid "\"%s\" is a partitioned table" +msgstr "\"%s\" - секционированная таблица" + #: catalog/pg_publication.c:59 #, c-format -msgid "Only tables can be added to publications." -msgstr "В публикации можно добавлять только таблицы." +msgid "Adding partitioned tables to publications is not supported." +msgstr "Добавление секционированных таблиц в публикации не поддерживается." -#: catalog/pg_publication.c:65 +#: catalog/pg_publication.c:60 #, c-format -msgid "\"%s\" is a system table" +msgid "You can add the table partitions individually." +msgstr "Но вы можете добавить секции таблицы по одной." + +#: catalog/pg_publication.c:68 +#, c-format +msgid "Only tables can be added to publications." +msgstr "В публикации можно добавлять только таблицы." + +#: catalog/pg_publication.c:74 +#, c-format +msgid "\"%s\" is a system table" msgstr "\"%s\" - это системная таблица" -#: catalog/pg_publication.c:67 +#: catalog/pg_publication.c:76 #, c-format msgid "System tables cannot be added to publications." msgstr "Системные таблицы нельзя добавлять в публикации." -#: catalog/pg_publication.c:73 +#: catalog/pg_publication.c:82 #, c-format msgid "table \"%s\" cannot be replicated" msgstr "реплицировать таблицу \"%s\" нельзя" -#: catalog/pg_publication.c:75 +#: catalog/pg_publication.c:84 #, c-format msgid "Temporary and unlogged relations cannot be replicated." msgstr "Временные и нежурналируемые отношения не поддерживают репликацию." -#: catalog/pg_publication.c:134 +#: catalog/pg_publication.c:166 #, c-format msgid "relation \"%s\" is already member of publication \"%s\"" msgstr "отношение \"%s\" уже включено в публикацию \"%s\"" -#: catalog/pg_publication.c:361 catalog/pg_publication.c:382 -#: commands/publicationcmds.c:430 commands/publicationcmds.c:729 +#: catalog/pg_publication.c:393 catalog/pg_publication.c:414 +#: commands/publicationcmds.c:401 commands/publicationcmds.c:702 #, c-format msgid "publication \"%s\" does not exist" msgstr "публикация \"%s\" не существует" @@ -5670,8 +5457,8 @@ msgstr "" "изменить владельца объектов, принадлежащих роли %s, нельзя, так как они " "нужны системе баз данных" -#: catalog/pg_subscription.c:163 commands/subscriptioncmds.c:565 -#: commands/subscriptioncmds.c:734 commands/subscriptioncmds.c:912 +#: catalog/pg_subscription.c:176 commands/subscriptioncmds.c:633 +#: commands/subscriptioncmds.c:843 commands/subscriptioncmds.c:1067 #, c-format msgid "subscription \"%s\" does not exist" msgstr "подписка \"%s\" не существует" @@ -5709,13 +5496,13 @@ msgstr "выравнивание \"%c\" не подходит для типа п msgid "fixed-size types must have storage PLAIN" msgstr "для типов постоянного размера применим только режим хранения PLAIN" -#: catalog/pg_type.c:781 +#: catalog/pg_type.c:801 #, c-format msgid "could not form array type name for type \"%s\"" msgstr "не удалось сформировать имя типа массива для типа \"%s\"" -#: catalog/toasting.c:105 commands/indexcmds.c:395 commands/tablecmds.c:4674 -#: commands/tablecmds.c:12626 +#: catalog/toasting.c:105 commands/indexcmds.c:399 commands/tablecmds.c:4734 +#: commands/tablecmds.c:13000 #, c-format msgid "\"%s\" is not a table or materialized view" msgstr "\"%s\" - это не таблица и не материализованное представление" @@ -5836,12 +5623,12 @@ msgstr "сервер \"%s\" уже существует" msgid "language \"%s\" already exists" msgstr "язык \"%s\" уже существует" -#: commands/alter.c:96 commands/publicationcmds.c:189 +#: commands/alter.c:96 commands/publicationcmds.c:170 #, c-format msgid "publication \"%s\" already exists" msgstr "публикация \"%s\" уже существует" -#: commands/alter.c:99 commands/subscriptioncmds.c:308 +#: commands/alter.c:99 commands/subscriptioncmds.c:358 #, c-format msgid "subscription \"%s\" already exists" msgstr "подписка \"%s\" уже существует" @@ -5853,8 +5640,8 @@ msgstr "преобразование \"%s\" уже существует в сх #: commands/alter.c:126 #, c-format -msgid "statistics \"%s\" already exists in schema \"%s\"" -msgstr "статистика \"%s\" уже существует в схеме \"%s\"" +msgid "statistics object \"%s\" already exists in schema \"%s\"" +msgstr "объект статистики \"%s\" уже существует в схеме \"%s\"" #: commands/alter.c:130 #, c-format @@ -5881,7 +5668,7 @@ msgstr "конфигурация текстового поиска \"%s\" уже msgid "must be superuser to rename %s" msgstr "переименовать \"%s\" может только суперпользователь" -#: commands/alter.c:677 +#: commands/alter.c:709 #, c-format msgid "must be superuser to set schema of %s" msgstr "для назначения схемы объекта %s нужно быть суперпользователем" @@ -5906,7 +5693,7 @@ msgstr "метод доступа \"%s\" уже существует" msgid "must be superuser to drop access methods" msgstr "для удаления методов доступа нужно быть суперпользователем" -#: commands/amcmds.c:174 commands/indexcmds.c:163 commands/indexcmds.c:502 +#: commands/amcmds.c:174 commands/indexcmds.c:163 commands/indexcmds.c:515 #: commands/opclasscmds.c:363 commands/opclasscmds.c:777 #, c-format msgid "access method \"%s\" does not exist" @@ -5919,65 +5706,70 @@ msgstr "не указана функция-обработчик" #: commands/amcmds.c:262 commands/event_trigger.c:243 #: commands/foreigncmds.c:487 commands/proclang.c:117 commands/proclang.c:289 -#: commands/trigger.c:538 parser/parse_clause.c:986 +#: commands/trigger.c:616 parser/parse_clause.c:982 #, c-format msgid "function %s must return type %s" msgstr "функция %s должна возвращать тип %s" -#: commands/analyze.c:151 +#: commands/analyze.c:156 #, c-format msgid "skipping analyze of \"%s\" --- lock not available" msgstr "анализ \"%s\" пропускается --- блокировка недоступна" -#: commands/analyze.c:168 +#: commands/analyze.c:173 #, c-format msgid "skipping \"%s\" --- only superuser can analyze it" msgstr "" "\"%s\" пропускается --- только суперпользователь может анализировать этот " "объект" -#: commands/analyze.c:172 +#: commands/analyze.c:177 #, c-format msgid "skipping \"%s\" --- only superuser or database owner can analyze it" msgstr "" "\"%s\" пропускается --- только суперпользователь или владелец БД может " "анализировать этот объект" -#: commands/analyze.c:176 +#: commands/analyze.c:181 #, c-format msgid "skipping \"%s\" --- only table or database owner can analyze it" msgstr "" "\"%s\" пропускается --- только владелец таблицы или БД может анализировать " "этот объект" -#: commands/analyze.c:236 +#: commands/analyze.c:241 #, c-format msgid "skipping \"%s\" --- cannot analyze this foreign table" msgstr "\"%s\" пропускается --- анализировать эту стороннюю таблицу нельзя" -#: commands/analyze.c:253 +#: commands/analyze.c:258 #, c-format msgid "skipping \"%s\" --- cannot analyze non-tables or special system tables" msgstr "" "\"%s\" пропускается --- анализировать не таблицы или специальные системные " "таблицы нельзя" -#: commands/analyze.c:334 +#: commands/analyze.c:339 #, c-format msgid "analyzing \"%s.%s\" inheritance tree" msgstr "анализируется дерево наследования \"%s.%s\"" -#: commands/analyze.c:339 +#: commands/analyze.c:344 #, c-format msgid "analyzing \"%s.%s\"" msgstr "анализируется \"%s.%s\"" -#: commands/analyze.c:668 +#: commands/analyze.c:404 +#, c-format +msgid "column \"%s\" of relation \"%s\" appears more than once" +msgstr "столбец \"%s\" отношения \"%s\" указан неоднократно" + +#: commands/analyze.c:684 #, c-format msgid "automatic analyze of table \"%s.%s.%s\" system usage: %s" msgstr "автоматический анализ таблицы \"%s.%s.%s\"; нагрузка системы: %s" -#: commands/analyze.c:1220 +#: commands/analyze.c:1236 #, c-format msgid "" "\"%s\": scanned %d of %u pages, containing %.0f live rows and %.0f dead " @@ -5987,7 +5779,7 @@ msgstr "" "%.0f, \"мёртвых\" строк: %.0f; строк в выборке: %d, примерное общее число " "строк: %.0f" -#: commands/analyze.c:1300 +#: commands/analyze.c:1316 #, c-format msgid "" "skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree " @@ -5996,7 +5788,7 @@ msgstr "" "пропускается анализ дерева наследования \"%s.%s\" --- это дерево " "наследования не содержит дочерних таблиц" -#: commands/analyze.c:1398 +#: commands/analyze.c:1414 #, c-format msgid "" "skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree " @@ -6005,22 +5797,22 @@ msgstr "" "пропускается анализ дерева наследования \"%s.%s\" --- это дерево " "наследования не содержит анализируемых дочерних таблиц" -#: commands/async.c:555 +#: commands/async.c:558 #, c-format msgid "channel name cannot be empty" msgstr "имя канала не может быть пустым" -#: commands/async.c:560 +#: commands/async.c:563 #, c-format msgid "channel name too long" msgstr "слишком длинное имя канала" -#: commands/async.c:567 +#: commands/async.c:570 #, c-format msgid "payload string too long" msgstr "слишком длинная строка сообщения-нагрузки" -#: commands/async.c:753 +#: commands/async.c:756 #, c-format msgid "" "cannot PREPARE a transaction that has executed LISTEN, UNLISTEN, or NOTIFY" @@ -6028,17 +5820,17 @@ msgstr "" "выполнить PREPARE для транзакции с командами LISTEN, UNLISTEN или NOTIFY " "нельзя" -#: commands/async.c:856 +#: commands/async.c:859 #, c-format msgid "too many notifications in the NOTIFY queue" msgstr "слишком много уведомлений в очереди NOTIFY" -#: commands/async.c:1486 +#: commands/async.c:1491 #, c-format msgid "NOTIFY queue is %.0f%% full" msgstr "очередь NOTIFY заполнена на %.0f%%" -#: commands/async.c:1488 +#: commands/async.c:1493 #, c-format msgid "" "The server process with PID %d is among those with the oldest transactions." @@ -6046,7 +5838,7 @@ msgstr "" "В число серверных процессов с самыми старыми транзакциями входит процесс с " "PID %d." -#: commands/async.c:1491 +#: commands/async.c:1496 #, c-format msgid "" "The NOTIFY queue cannot be emptied until that process ends its current " @@ -6065,7 +5857,7 @@ msgstr "кластеризовать временные таблицы друг msgid "there is no previously clustered index for table \"%s\"" msgstr "таблица \"%s\" ранее не кластеризовалась по какому-либо индексу" -#: commands/cluster.c:173 commands/tablecmds.c:9857 commands/tablecmds.c:11720 +#: commands/cluster.c:173 commands/tablecmds.c:10212 commands/tablecmds.c:12093 #, c-format msgid "index \"%s\" for table \"%s\" does not exist" msgstr "индекс \"%s\" для таблицы \"%s\" не существует" @@ -6080,7 +5872,7 @@ msgstr "кластеризовать разделяемый каталог не msgid "cannot vacuum temporary tables of other sessions" msgstr "очищать временные таблицы других сеансов нельзя" -#: commands/cluster.c:431 commands/tablecmds.c:11730 +#: commands/cluster.c:431 commands/tablecmds.c:12103 #, c-format msgid "\"%s\" is not an index for table \"%s\"" msgstr "\"%s\" не является индексом таблицы \"%s\"" @@ -6103,23 +5895,23 @@ msgstr "кластеризовать по частичному индексу \" msgid "cannot cluster on invalid index \"%s\"" msgstr "нельзя кластеризовать таблицу по неверному индексу \"%s\"" -#: commands/cluster.c:918 +#: commands/cluster.c:922 #, c-format msgid "clustering \"%s.%s\" using index scan on \"%s\"" msgstr "кластеризация \"%s.%s\" путём сканирования индекса \"%s\"" -#: commands/cluster.c:924 +#: commands/cluster.c:928 #, c-format msgid "clustering \"%s.%s\" using sequential scan and sort" msgstr "" "кластеризация \"%s.%s\" путём последовательного сканирования и сортировки" -#: commands/cluster.c:929 commands/vacuumlazy.c:491 +#: commands/cluster.c:933 commands/vacuumlazy.c:492 #, c-format msgid "vacuuming \"%s.%s\"" msgstr "очистка \"%s.%s\"" -#: commands/cluster.c:1084 +#: commands/cluster.c:1090 #, c-format msgid "" "\"%s\": found %.0f removable, %.0f nonremovable row versions in %u pages" @@ -6127,7 +5919,7 @@ msgstr "" "\"%s\": найдено удаляемых версий строк: %.0f, неудаляемых - %.0f, " "просмотрено страниц: %u" -#: commands/cluster.c:1088 +#: commands/cluster.c:1094 #, c-format msgid "" "%.0f dead row versions cannot be removed yet.\n" @@ -6136,83 +5928,73 @@ msgstr "" "В данный момент нельзя удалить \"мёртвых\" строк %.0f.\n" "%s." -#: commands/collationcmds.c:93 +#: commands/collationcmds.c:101 #, c-format msgid "collation attribute \"%s\" not recognized" msgstr "атрибут COLLATION \"%s\" не распознан" -#: commands/collationcmds.c:152 +#: commands/collationcmds.c:143 +#, c-format +msgid "collation \"default\" cannot be copied" +msgstr "правило сортировки \"default\" нельзя скопировать" + +#: commands/collationcmds.c:173 #, c-format msgid "unrecognized collation provider: %s" msgstr "нераспознанный поставщик правил сортировки: %s" -#: commands/collationcmds.c:161 +#: commands/collationcmds.c:182 #, c-format msgid "parameter \"lc_collate\" must be specified" msgstr "необходимо указать параметр \"lc_collate\"" -#: commands/collationcmds.c:166 +#: commands/collationcmds.c:187 #, c-format msgid "parameter \"lc_ctype\" must be specified" msgstr "необходимо указать параметр \"lc_ctype\"" -#: commands/collationcmds.c:217 +#: commands/collationcmds.c:246 #, c-format msgid "collation \"%s\" for encoding \"%s\" already exists in schema \"%s\"" msgstr "" "правило сортировки \"%s\" для кодировки \"%s\" уже существует в схеме \"%s\"" -#: commands/collationcmds.c:228 +#: commands/collationcmds.c:257 #, c-format msgid "collation \"%s\" already exists in schema \"%s\"" msgstr "правило сортировки \"%s\" уже существует в схеме \"%s\"" -#: commands/collationcmds.c:276 +#: commands/collationcmds.c:305 #, c-format msgid "changing version from %s to %s" msgstr "изменение версии с %s на %s" -#: commands/collationcmds.c:291 +#: commands/collationcmds.c:320 #, c-format msgid "version has not changed" msgstr "версия не была изменена" -#: commands/collationcmds.c:382 +#: commands/collationcmds.c:451 #, c-format msgid "could not convert locale name \"%s\" to language tag: %s" msgstr "не удалось получить из названия локали \"%s\" метку языка: %s" -#: commands/collationcmds.c:401 -#, c-format -msgid "could get display name for locale \"%s\": %s" -msgstr "не удалось получить отображаемое название локали \"%s\": %s" - -#: commands/collationcmds.c:432 +#: commands/collationcmds.c:512 #, c-format msgid "must be superuser to import system collations" msgstr "" "импортировать системные правила сортировки может только суперпользователь" -#: commands/collationcmds.c:439 commands/copy.c:1835 commands/copy.c:3027 +#: commands/collationcmds.c:535 commands/copy.c:1807 commands/copy.c:3130 #, c-format msgid "could not execute command \"%s\": %m" msgstr "не удалось выполнить команду \"%s\": %m" -#: commands/collationcmds.c:536 +#: commands/collationcmds.c:666 #, c-format msgid "no usable system locales were found" msgstr "пригодные системные локали не найдены" -#: commands/collationcmds.c:544 utils/mb/encnames.c:473 -#, c-format -msgid "encoding \"%s\" not supported by ICU" -msgstr "ICU не поддерживает кодировку \"%s\"" - -#: commands/collationcmds.c:588 commands/collationcmds.c:609 -#, c-format -msgid "could not get keyword values for locale \"%s\": %s" -msgstr "не удалось получить значения ключевых слов для локали \"%s\": %s" - #: commands/comment.c:61 commands/dbcommands.c:808 commands/dbcommands.c:996 #: commands/dbcommands.c:1100 commands/dbcommands.c:1290 #: commands/dbcommands.c:1513 commands/dbcommands.c:1627 @@ -6222,7 +6004,7 @@ msgstr "не удалось получить значения ключевых msgid "database \"%s\" does not exist" msgstr "база данных \"%s\" не существует" -#: commands/comment.c:100 commands/seclabel.c:117 parser/parse_utilcmd.c:824 +#: commands/comment.c:101 commands/seclabel.c:117 parser/parse_utilcmd.c:952 #, c-format msgid "" "\"%s\" is not a table, view, materialized view, composite type, or foreign " @@ -6231,12 +6013,12 @@ msgstr "" "\"%s\" - это не таблица, представление, мат. представление, составной тип " "или сторонняя таблица" -#: commands/constraint.c:60 utils/adt/ri_triggers.c:2715 +#: commands/constraint.c:60 utils/adt/ri_triggers.c:2712 #, c-format msgid "function \"%s\" was not called by trigger manager" msgstr "функция \"%s\" была вызвана не менеджером триггеров" -#: commands/constraint.c:67 utils/adt/ri_triggers.c:2724 +#: commands/constraint.c:67 utils/adt/ri_triggers.c:2721 #, c-format msgid "function \"%s\" must be fired AFTER ROW" msgstr "функция \"%s\" должна запускаться в триггере AFTER для строк" @@ -6261,54 +6043,54 @@ msgstr "целевая кодировка \"%s\" не существует" msgid "encoding conversion function %s must return type %s" msgstr "функция преобразования кодировки %s должна возвращать тип %s" -#: commands/copy.c:370 commands/copy.c:404 +#: commands/copy.c:373 commands/copy.c:407 #, c-format msgid "COPY BINARY is not supported to stdout or from stdin" msgstr "COPY BINARY не поддерживает стандартный вывод (stdout) и ввод (stdin)" -#: commands/copy.c:504 +#: commands/copy.c:507 #, c-format msgid "could not write to COPY program: %m" msgstr "не удалось записать в канал программы COPY: %m" -#: commands/copy.c:509 +#: commands/copy.c:512 #, c-format msgid "could not write to COPY file: %m" msgstr "не удалось записать в файл COPY: %m" -#: commands/copy.c:522 +#: commands/copy.c:525 #, c-format msgid "connection lost during COPY to stdout" msgstr "в процессе вывода данных COPY в stdout потеряно соединение" -#: commands/copy.c:566 +#: commands/copy.c:569 #, c-format msgid "could not read from COPY file: %m" msgstr "не удалось прочитать файл COPY: %m" -#: commands/copy.c:582 commands/copy.c:603 commands/copy.c:607 -#: tcop/postgres.c:341 tcop/postgres.c:377 tcop/postgres.c:404 +#: commands/copy.c:585 commands/copy.c:606 commands/copy.c:610 +#: tcop/postgres.c:335 tcop/postgres.c:371 tcop/postgres.c:398 #, c-format msgid "unexpected EOF on client connection with an open transaction" msgstr "неожиданный обрыв соединения с клиентом при открытой транзакции" -#: commands/copy.c:620 +#: commands/copy.c:623 #, c-format msgid "COPY from stdin failed: %s" msgstr "ошибка при вводе данных COPY из stdin: %s" -#: commands/copy.c:636 +#: commands/copy.c:639 #, c-format msgid "unexpected message type 0x%02X during COPY from stdin" msgstr "неожиданный тип сообщения 0x%02X при вводе данных COPY из stdin" -#: commands/copy.c:798 +#: commands/copy.c:800 #, c-format msgid "must be superuser to COPY to or from an external program" msgstr "" "для использования COPY с внешними программами нужно быть суперпользователем" -#: commands/copy.c:799 commands/copy.c:805 +#: commands/copy.c:801 commands/copy.c:807 #, c-format msgid "" "Anyone can COPY to stdout or from stdin. psql's \\copy command also works " @@ -6317,263 +6099,263 @@ msgstr "" "Не имея административных прав, можно использовать COPY с stdout и stdin (а " "также команду psql \\copy)." -#: commands/copy.c:804 +#: commands/copy.c:806 #, c-format msgid "must be superuser to COPY to or from a file" msgstr "для использования COPY с файлами нужно быть суперпользователем" -#: commands/copy.c:871 +#: commands/copy.c:868 #, c-format msgid "COPY FROM not supported with row-level security" msgstr "COPY FROM не поддерживается с защитой на уровне строк." -#: commands/copy.c:872 +#: commands/copy.c:869 #, c-format msgid "Use INSERT statements instead." msgstr "Используйте операторы INSERT." -#: commands/copy.c:1058 +#: commands/copy.c:1054 #, c-format msgid "COPY format \"%s\" not recognized" msgstr "формат \"%s\" для COPY не распознан" -#: commands/copy.c:1138 commands/copy.c:1154 commands/copy.c:1169 -#: commands/copy.c:1191 +#: commands/copy.c:1134 commands/copy.c:1150 commands/copy.c:1165 +#: commands/copy.c:1187 #, c-format msgid "argument to option \"%s\" must be a list of column names" msgstr "аргументом параметра \"%s\" должен быть список имён столбцов" -#: commands/copy.c:1206 +#: commands/copy.c:1202 #, c-format msgid "argument to option \"%s\" must be a valid encoding name" msgstr "аргументом параметра \"%s\" должно быть название допустимой кодировки" -#: commands/copy.c:1213 commands/dbcommands.c:242 commands/dbcommands.c:1461 +#: commands/copy.c:1209 commands/dbcommands.c:242 commands/dbcommands.c:1461 #, c-format msgid "option \"%s\" not recognized" msgstr "параметр \"%s\" не распознан" -#: commands/copy.c:1225 +#: commands/copy.c:1221 #, c-format msgid "cannot specify DELIMITER in BINARY mode" msgstr "в режиме BINARY нельзя указывать DELIMITER" -#: commands/copy.c:1230 +#: commands/copy.c:1226 #, c-format msgid "cannot specify NULL in BINARY mode" msgstr "в режиме BINARY нельзя указывать NULL" -#: commands/copy.c:1252 +#: commands/copy.c:1248 #, c-format msgid "COPY delimiter must be a single one-byte character" msgstr "разделитель для COPY должен быть однобайтным символом" -#: commands/copy.c:1259 +#: commands/copy.c:1255 #, c-format msgid "COPY delimiter cannot be newline or carriage return" msgstr "" "разделителем для COPY не может быть символ новой строки или возврата каретки" -#: commands/copy.c:1265 +#: commands/copy.c:1261 #, c-format msgid "COPY null representation cannot use newline or carriage return" msgstr "" "представление NULL для COPY не может включать символ новой строки или " "возврата каретки" -#: commands/copy.c:1282 +#: commands/copy.c:1278 #, c-format msgid "COPY delimiter cannot be \"%s\"" msgstr "\"%s\" не может быть разделителем для COPY" -#: commands/copy.c:1288 +#: commands/copy.c:1284 #, c-format msgid "COPY HEADER available only in CSV mode" msgstr "COPY HEADER можно использовать только в режиме CSV" -#: commands/copy.c:1294 +#: commands/copy.c:1290 #, c-format msgid "COPY quote available only in CSV mode" msgstr "определить кавычки для COPY можно только в режиме CSV" -#: commands/copy.c:1299 +#: commands/copy.c:1295 #, c-format msgid "COPY quote must be a single one-byte character" msgstr "символ кавычек для COPY должен быть однобайтным" -#: commands/copy.c:1304 +#: commands/copy.c:1300 #, c-format msgid "COPY delimiter and quote must be different" msgstr "символ кавычек для COPY должен отличаться от разделителя" -#: commands/copy.c:1310 +#: commands/copy.c:1306 #, c-format msgid "COPY escape available only in CSV mode" msgstr "определить спецсимвол для COPY можно только в режиме CSV" -#: commands/copy.c:1315 +#: commands/copy.c:1311 #, c-format msgid "COPY escape must be a single one-byte character" msgstr "спецсимвол для COPY должен быть однобайтным" -#: commands/copy.c:1321 +#: commands/copy.c:1317 #, c-format msgid "COPY force quote available only in CSV mode" msgstr "параметр force quote для COPY можно использовать только в режиме CSV" -#: commands/copy.c:1325 +#: commands/copy.c:1321 #, c-format msgid "COPY force quote only available using COPY TO" msgstr "параметр force quote для COPY можно использовать только с COPY TO" -#: commands/copy.c:1331 +#: commands/copy.c:1327 #, c-format msgid "COPY force not null available only in CSV mode" msgstr "" "параметр force not null для COPY можно использовать только в режиме CSV" -#: commands/copy.c:1335 +#: commands/copy.c:1331 #, c-format msgid "COPY force not null only available using COPY FROM" msgstr "параметр force not null для COPY можно использовать только с COPY FROM" -#: commands/copy.c:1341 +#: commands/copy.c:1337 #, c-format msgid "COPY force null available only in CSV mode" msgstr "параметр force null для COPY можно использовать только в режиме CSV" -#: commands/copy.c:1346 +#: commands/copy.c:1342 #, c-format msgid "COPY force null only available using COPY FROM" msgstr "параметр force null для COPY можно использовать только с COPY FROM" -#: commands/copy.c:1352 +#: commands/copy.c:1348 #, c-format msgid "COPY delimiter must not appear in the NULL specification" msgstr "разделитель для COPY не должен присутствовать в представлении NULL" -#: commands/copy.c:1359 +#: commands/copy.c:1355 #, c-format msgid "CSV quote character must not appear in the NULL specification" msgstr "символ кавычек в CSV не должен присутствовать в представлении NULL" -#: commands/copy.c:1420 +#: commands/copy.c:1416 #, c-format msgid "table \"%s\" does not have OIDs" msgstr "таблица \"%s\" не содержит OID" -#: commands/copy.c:1461 +#: commands/copy.c:1433 #, c-format msgid "COPY (query) WITH OIDS is not supported" msgstr "COPY (запрос) WITH OIDS не поддерживается" -#: commands/copy.c:1482 +#: commands/copy.c:1454 #, c-format msgid "DO INSTEAD NOTHING rules are not supported for COPY" msgstr "правила DO INSTEAD NOTHING не поддерживаются с COPY" -#: commands/copy.c:1496 +#: commands/copy.c:1468 #, c-format msgid "conditional DO INSTEAD rules are not supported for COPY" msgstr "условные правила DO INSTEAD не поддерживаются с COPY" -#: commands/copy.c:1500 +#: commands/copy.c:1472 #, c-format msgid "DO ALSO rules are not supported for the COPY" msgstr "правила DO ALSO не поддерживаются с COPY" -#: commands/copy.c:1505 +#: commands/copy.c:1477 #, c-format msgid "multi-statement DO INSTEAD rules are not supported for COPY" msgstr "составные правила DO INSTEAD не поддерживаются с COPY" -#: commands/copy.c:1515 +#: commands/copy.c:1487 #, c-format msgid "COPY (SELECT INTO) is not supported" msgstr "COPY (SELECT INTO) не поддерживается" -#: commands/copy.c:1532 +#: commands/copy.c:1504 #, c-format msgid "COPY query must have a RETURNING clause" msgstr "в запросе COPY должно быть предложение RETURNING" -#: commands/copy.c:1560 +#: commands/copy.c:1532 #, c-format msgid "relation referenced by COPY statement has changed" msgstr "отношение, задействованное в операторе COPY, изменилось" -#: commands/copy.c:1618 +#: commands/copy.c:1590 #, c-format msgid "FORCE_QUOTE column \"%s\" not referenced by COPY" msgstr "столбец FORCE_QUOTE \"%s\" не фигурирует в COPY" -#: commands/copy.c:1640 +#: commands/copy.c:1612 #, c-format msgid "FORCE_NOT_NULL column \"%s\" not referenced by COPY" msgstr "столбец FORCE_NOT_NULL \"%s\" не фигурирует в COPY" -#: commands/copy.c:1662 +#: commands/copy.c:1634 #, c-format msgid "FORCE_NULL column \"%s\" not referenced by COPY" msgstr "столбец FORCE_NULL \"%s\" не фигурирует в COPY" -#: commands/copy.c:1727 +#: commands/copy.c:1699 #, c-format msgid "could not close pipe to external command: %m" msgstr "не удалось закрыть канал сообщений с внешней командой: %m" -#: commands/copy.c:1731 +#: commands/copy.c:1703 #, c-format msgid "program \"%s\" failed" msgstr "сбой программы \"%s\"" -#: commands/copy.c:1781 +#: commands/copy.c:1753 #, c-format msgid "cannot copy from view \"%s\"" msgstr "копировать из представления \"%s\" нельзя" -#: commands/copy.c:1783 commands/copy.c:1789 commands/copy.c:1795 -#: commands/copy.c:1806 +#: commands/copy.c:1755 commands/copy.c:1761 commands/copy.c:1767 +#: commands/copy.c:1778 #, c-format msgid "Try the COPY (SELECT ...) TO variant." msgstr "Попробуйте вариацию COPY (SELECT ...) TO." -#: commands/copy.c:1787 +#: commands/copy.c:1759 #, c-format msgid "cannot copy from materialized view \"%s\"" msgstr "копировать из материализованного представления \"%s\" нельзя" -#: commands/copy.c:1793 +#: commands/copy.c:1765 #, c-format msgid "cannot copy from foreign table \"%s\"" msgstr "копировать из сторонней таблицы \"%s\" нельзя" -#: commands/copy.c:1799 +#: commands/copy.c:1771 #, c-format msgid "cannot copy from sequence \"%s\"" msgstr "копировать из последовательности \"%s\" нельзя" -#: commands/copy.c:1804 +#: commands/copy.c:1776 #, c-format msgid "cannot copy from partitioned table \"%s\"" msgstr "копировать из секционированной таблицы \"%s\" нельзя" -#: commands/copy.c:1810 +#: commands/copy.c:1782 #, c-format msgid "cannot copy from non-table relation \"%s\"" msgstr "копировать из отношения \"%s\", не являющегося таблицей, нельзя" -#: commands/copy.c:1850 +#: commands/copy.c:1822 #, c-format msgid "relative path not allowed for COPY to file" msgstr "при выполнении COPY в файл нельзя указывать относительный путь" -#: commands/copy.c:1862 +#: commands/copy.c:1843 #, c-format msgid "could not open file \"%s\" for writing: %m" msgstr "не удалось открыть файл \"%s\" для записи: %m" -#: commands/copy.c:1865 +#: commands/copy.c:1846 #, c-format msgid "" "COPY TO instructs the PostgreSQL server process to write a file. You may " @@ -6583,74 +6365,74 @@ msgstr "" "Возможно, на самом деле вам нужно клиентское средство, например, \\copy в " "psql." -#: commands/copy.c:1878 commands/copy.c:3058 +#: commands/copy.c:1859 commands/copy.c:3161 #, c-format msgid "\"%s\" is a directory" msgstr "\"%s\" - это каталог" -#: commands/copy.c:2201 +#: commands/copy.c:2182 #, c-format msgid "COPY %s, line %d, column %s" msgstr "COPY %s, строка %d, столбец %s" -#: commands/copy.c:2205 commands/copy.c:2252 +#: commands/copy.c:2186 commands/copy.c:2233 #, c-format msgid "COPY %s, line %d" msgstr "COPY %s, строка %d" -#: commands/copy.c:2216 +#: commands/copy.c:2197 #, c-format msgid "COPY %s, line %d, column %s: \"%s\"" msgstr "COPY %s, строка %d, столбец %s: \"%s\"" -#: commands/copy.c:2224 +#: commands/copy.c:2205 #, c-format msgid "COPY %s, line %d, column %s: null input" msgstr "COPY %s, строка %d, столбец %s: значение NULL" -#: commands/copy.c:2246 +#: commands/copy.c:2227 #, c-format msgid "COPY %s, line %d: \"%s\"" msgstr "COPY %s, строка %d: \"%s\"" -#: commands/copy.c:2340 +#: commands/copy.c:2321 #, c-format msgid "cannot copy to view \"%s\"" msgstr "копировать в представление \"%s\" нельзя" -#: commands/copy.c:2342 +#: commands/copy.c:2323 #, c-format msgid "To enable copying to a view, provide an INSTEAD OF INSERT trigger." msgstr "" "Чтобы представление допускало копирование данных в него, установите триггер " "INSTEAD OF INSERT." -#: commands/copy.c:2346 +#: commands/copy.c:2327 #, c-format msgid "cannot copy to materialized view \"%s\"" msgstr "копировать в материализованное представление \"%s\" нельзя" -#: commands/copy.c:2351 +#: commands/copy.c:2332 #, c-format msgid "cannot copy to foreign table \"%s\"" msgstr "копировать в стороннюю таблицу \"%s\" нельзя" -#: commands/copy.c:2356 +#: commands/copy.c:2337 #, c-format msgid "cannot copy to sequence \"%s\"" msgstr "копировать в последовательность \"%s\" нельзя" -#: commands/copy.c:2361 +#: commands/copy.c:2342 #, c-format msgid "cannot copy to non-table relation \"%s\"" msgstr "копировать в отношение \"%s\", не являющееся таблицей, нельзя" -#: commands/copy.c:2424 +#: commands/copy.c:2417 #, c-format msgid "cannot perform FREEZE because of prior transaction activity" msgstr "выполнить FREEZE нельзя из-за предыдущей активности в транзакции" -#: commands/copy.c:2430 +#: commands/copy.c:2423 #, c-format msgid "" "cannot perform FREEZE because the table was not created or truncated in the " @@ -6659,12 +6441,12 @@ msgstr "" "выполнить FREEZE нельзя, так как таблица не была создана или усечена в " "текущей подтранзакции" -#: commands/copy.c:2595 executor/nodeModifyTable.c:312 +#: commands/copy.c:2645 executor/nodeModifyTable.c:311 #, c-format msgid "cannot route inserted tuples to a foreign table" msgstr "направить вставляемые кортежи в стороннюю таблицу нельзя" -#: commands/copy.c:3045 +#: commands/copy.c:3148 #, c-format msgid "" "COPY FROM instructs the PostgreSQL server process to read a file. You may " @@ -6674,146 +6456,146 @@ msgstr "" "файла. Возможно, на самом деле вам нужно клиентское средство, например, " "\\copy в psql." -#: commands/copy.c:3078 +#: commands/copy.c:3181 #, c-format msgid "COPY file signature not recognized" msgstr "подпись COPY-файла не распознана" -#: commands/copy.c:3083 +#: commands/copy.c:3186 #, c-format msgid "invalid COPY file header (missing flags)" msgstr "неверный заголовок файла COPY (отсутствуют флаги)" -#: commands/copy.c:3089 +#: commands/copy.c:3192 #, c-format msgid "unrecognized critical flags in COPY file header" msgstr "не распознаны важные флаги в заголовке файла COPY" -#: commands/copy.c:3095 +#: commands/copy.c:3198 #, c-format msgid "invalid COPY file header (missing length)" msgstr "неверный заголовок файла COPY (отсутствует длина)" -#: commands/copy.c:3102 +#: commands/copy.c:3205 #, c-format msgid "invalid COPY file header (wrong length)" msgstr "неверный заголовок файла COPY (неправильная длина)" -#: commands/copy.c:3235 commands/copy.c:3942 commands/copy.c:4172 +#: commands/copy.c:3338 commands/copy.c:4045 commands/copy.c:4275 #, c-format msgid "extra data after last expected column" msgstr "лишние данные после содержимого последнего столбца" -#: commands/copy.c:3245 +#: commands/copy.c:3348 #, c-format msgid "missing data for OID column" msgstr "нет данных для столбца OID" -#: commands/copy.c:3251 +#: commands/copy.c:3354 #, c-format msgid "null OID in COPY data" msgstr "неверное значение OID (NULL) в данных COPY" -#: commands/copy.c:3261 commands/copy.c:3384 +#: commands/copy.c:3364 commands/copy.c:3487 #, c-format msgid "invalid OID in COPY data" msgstr "неверный OID в данных COPY" -#: commands/copy.c:3276 +#: commands/copy.c:3379 #, c-format msgid "missing data for column \"%s\"" msgstr "нет данных для столбца \"%s\"" -#: commands/copy.c:3359 +#: commands/copy.c:3462 #, c-format msgid "received copy data after EOF marker" msgstr "после маркера конца файла продолжаются данные COPY" -#: commands/copy.c:3366 +#: commands/copy.c:3469 #, c-format msgid "row field count is %d, expected %d" msgstr "количество полей в строке: %d, ожидалось: %d" -#: commands/copy.c:3706 commands/copy.c:3723 +#: commands/copy.c:3809 commands/copy.c:3826 #, c-format msgid "literal carriage return found in data" msgstr "в данных обнаружен явный возврат каретки" -#: commands/copy.c:3707 commands/copy.c:3724 +#: commands/copy.c:3810 commands/copy.c:3827 #, c-format msgid "unquoted carriage return found in data" msgstr "в данных обнаружен возврат каретки не в кавычках" -#: commands/copy.c:3709 commands/copy.c:3726 +#: commands/copy.c:3812 commands/copy.c:3829 #, c-format msgid "Use \"\\r\" to represent carriage return." msgstr "Представьте возврат каретки как \"\\r\"." -#: commands/copy.c:3710 commands/copy.c:3727 +#: commands/copy.c:3813 commands/copy.c:3830 #, c-format msgid "Use quoted CSV field to represent carriage return." msgstr "Заключите возврат каретки в кавычки CSV." -#: commands/copy.c:3739 +#: commands/copy.c:3842 #, c-format msgid "literal newline found in data" msgstr "в данных обнаружен явный символ новой строки" -#: commands/copy.c:3740 +#: commands/copy.c:3843 #, c-format msgid "unquoted newline found in data" msgstr "в данных обнаружен явный символ новой строки не в кавычках" -#: commands/copy.c:3742 +#: commands/copy.c:3845 #, c-format msgid "Use \"\\n\" to represent newline." msgstr "Представьте символ новой строки как \"\\n\"." -#: commands/copy.c:3743 +#: commands/copy.c:3846 #, c-format msgid "Use quoted CSV field to represent newline." msgstr "Заключите символ новой строки в кавычки CSV." -#: commands/copy.c:3789 commands/copy.c:3825 +#: commands/copy.c:3892 commands/copy.c:3928 #, c-format msgid "end-of-copy marker does not match previous newline style" msgstr "маркер \"конец копии\" не соответствует предыдущему стилю новой строки" -#: commands/copy.c:3798 commands/copy.c:3814 +#: commands/copy.c:3901 commands/copy.c:3917 #, c-format msgid "end-of-copy marker corrupt" msgstr "маркер \"конец копии\" испорчен" -#: commands/copy.c:4256 +#: commands/copy.c:4359 #, c-format msgid "unterminated CSV quoted field" msgstr "незавершённое поле в кавычках CSV" -#: commands/copy.c:4333 commands/copy.c:4352 +#: commands/copy.c:4436 commands/copy.c:4455 #, c-format msgid "unexpected EOF in COPY data" msgstr "неожиданный конец данных COPY" -#: commands/copy.c:4342 +#: commands/copy.c:4445 #, c-format msgid "invalid field size" msgstr "неверный размер поля" -#: commands/copy.c:4365 +#: commands/copy.c:4468 #, c-format msgid "incorrect binary data format" msgstr "неверный двоичный формат данных" -#: commands/copy.c:4676 commands/indexcmds.c:1057 commands/tablecmds.c:1655 -#: commands/tablecmds.c:2150 commands/tablecmds.c:2573 -#: parser/parse_relation.c:3248 parser/parse_relation.c:3268 -#: utils/adt/tsvector_op.c:2561 +#: commands/copy.c:4779 commands/indexcmds.c:1073 commands/statscmds.c:183 +#: commands/tablecmds.c:1685 commands/tablecmds.c:2187 +#: commands/tablecmds.c:2613 parser/parse_relation.c:3287 +#: parser/parse_relation.c:3307 utils/adt/tsvector_op.c:2561 #, c-format msgid "column \"%s\" does not exist" msgstr "столбец \"%s\" не существует" -#: commands/copy.c:4683 commands/tablecmds.c:1681 commands/trigger.c:748 -#: parser/parse_target.c:1018 parser/parse_target.c:1029 +#: commands/copy.c:4786 commands/tablecmds.c:1711 commands/tablecmds.c:2213 +#: commands/trigger.c:826 parser/parse_target.c:1018 parser/parse_target.c:1029 #, c-format msgid "column \"%s\" specified more than once" msgstr "столбец \"%s\" указан неоднократно" @@ -6848,8 +6630,8 @@ msgstr "%d не является верным кодом кодировки" msgid "%s is not a valid encoding name" msgstr "%s не является верным названием кодировки" -#: commands/dbcommands.c:292 commands/dbcommands.c:1494 commands/user.c:289 -#: commands/user.c:665 +#: commands/dbcommands.c:292 commands/dbcommands.c:1494 commands/user.c:276 +#: commands/user.c:664 #, c-format msgid "invalid connection limit: %d" msgstr "неверный предел подключений: %d" @@ -7057,7 +6839,7 @@ msgstr "" "пространство по умолчанию для этой базы данных." #: commands/dbcommands.c:1355 commands/dbcommands.c:1900 -#: commands/dbcommands.c:2104 commands/dbcommands.c:2158 +#: commands/dbcommands.c:2104 commands/dbcommands.c:2159 #: commands/tablespace.c:604 #, c-format msgid "some useless files may be left behind in old database directory \"%s\"" @@ -7140,8 +6922,8 @@ msgstr "аргументом %s должно быть имя типа" msgid "invalid argument for %s: \"%s\"" msgstr "неверный аргумент для %s: \"%s\"" -#: commands/dropcmds.c:104 commands/functioncmds.c:1200 -#: utils/adt/ruleutils.c:2355 +#: commands/dropcmds.c:104 commands/functioncmds.c:1201 +#: utils/adt/ruleutils.c:2453 #, c-format msgid "\"%s\" is an aggregate function" msgstr "функция \"%s\" является агрегатной" @@ -7151,9 +6933,9 @@ msgstr "функция \"%s\" является агрегатной" msgid "Use DROP AGGREGATE to drop aggregate functions." msgstr "Используйте DROP AGGREGATE для удаления агрегатных функций." -#: commands/dropcmds.c:157 commands/sequence.c:430 commands/tablecmds.c:2657 -#: commands/tablecmds.c:2808 commands/tablecmds.c:2851 -#: commands/tablecmds.c:12103 tcop/utility.c:1168 +#: commands/dropcmds.c:157 commands/sequence.c:442 commands/tablecmds.c:2697 +#: commands/tablecmds.c:2848 commands/tablecmds.c:2891 +#: commands/tablecmds.c:12476 tcop/utility.c:1168 #, c-format msgid "relation \"%s\" does not exist, skipping" msgstr "отношение \"%s\" не существует, пропускается" @@ -7185,8 +6967,8 @@ msgstr "преобразование \"%s\" не существует, проп #: commands/dropcmds.c:292 #, c-format -msgid "extended statistics \"%s\" do not exist, skipping" -msgstr "расширенная статистика \"%s\" не существует, пропускается" +msgid "statistics object \"%s\" does not exist, skipping" +msgstr "объект статистики \"%s\" не существует, пропускается" #: commands/dropcmds.c:299 #, c-format @@ -7213,73 +6995,73 @@ msgstr "конфигурация текстового поиска \"%s\" не msgid "extension \"%s\" does not exist, skipping" msgstr "расширение \"%s\" не существует, пропускается" -#: commands/dropcmds.c:334 +#: commands/dropcmds.c:335 #, c-format msgid "function %s(%s) does not exist, skipping" msgstr "функция %s(%s) не существует, пропускается" -#: commands/dropcmds.c:346 +#: commands/dropcmds.c:348 #, c-format msgid "aggregate %s(%s) does not exist, skipping" msgstr "агрегатная функция %s(%s) не существует, пропускается" -#: commands/dropcmds.c:358 +#: commands/dropcmds.c:361 #, c-format msgid "operator %s does not exist, skipping" msgstr "оператор %s не существует, пропускается" -#: commands/dropcmds.c:364 +#: commands/dropcmds.c:367 #, c-format msgid "language \"%s\" does not exist, skipping" msgstr "язык \"%s\" не существует, пропускается" -#: commands/dropcmds.c:373 +#: commands/dropcmds.c:376 #, c-format msgid "cast from type %s to type %s does not exist, skipping" msgstr "приведение %s к типу %s не существует, пропускается" -#: commands/dropcmds.c:382 +#: commands/dropcmds.c:385 #, c-format msgid "transform for type %s language \"%s\" does not exist, skipping" msgstr "преобразование для типа %s, языка \"%s\" не существует, пропускается" -#: commands/dropcmds.c:390 +#: commands/dropcmds.c:393 #, c-format msgid "trigger \"%s\" for relation \"%s\" does not exist, skipping" msgstr "триггер \"%s\" для отношения \"%s\" не существует, пропускается" -#: commands/dropcmds.c:399 +#: commands/dropcmds.c:402 #, c-format msgid "policy \"%s\" for relation \"%s\" does not exist, skipping" msgstr "политика \"%s\" для отношения \"%s\" не существует, пропускается" -#: commands/dropcmds.c:406 +#: commands/dropcmds.c:409 #, c-format msgid "event trigger \"%s\" does not exist, skipping" msgstr "событийный триггер \"%s\" не существует, пропускается" -#: commands/dropcmds.c:412 +#: commands/dropcmds.c:415 #, c-format msgid "rule \"%s\" for relation \"%s\" does not exist, skipping" msgstr "правило \"%s\" для отношения \"%s\" не существует, пропускается" -#: commands/dropcmds.c:419 +#: commands/dropcmds.c:422 #, c-format msgid "foreign-data wrapper \"%s\" does not exist, skipping" msgstr "обёртка сторонних данных \"%s\" не существует, пропускается" -#: commands/dropcmds.c:423 +#: commands/dropcmds.c:426 #, c-format msgid "server \"%s\" does not exist, skipping" msgstr "сервер \"%s\" не существует, пропускается" -#: commands/dropcmds.c:432 +#: commands/dropcmds.c:435 #, c-format msgid "operator class \"%s\" does not exist for access method \"%s\", skipping" msgstr "" "класс операторов \"%s\" не существует для метода доступа \"%s\", пропускается" -#: commands/dropcmds.c:444 +#: commands/dropcmds.c:447 #, c-format msgid "" "operator family \"%s\" does not exist for access method \"%s\", skipping" @@ -7287,7 +7069,7 @@ msgstr "" "семейство операторов \"%s\" не существует для метода доступа \"%s\", " "пропускается" -#: commands/dropcmds.c:451 +#: commands/dropcmds.c:454 #, c-format msgid "publication \"%s\" does not exist, skipping" msgstr "публикация \"%s\" не существует, пропускается" @@ -7344,17 +7126,17 @@ msgstr "нет прав на изменение владельца событи msgid "The owner of an event trigger must be a superuser." msgstr "Владельцем событийного триггера должен быть суперпользователь." -#: commands/event_trigger.c:1446 +#: commands/event_trigger.c:1464 #, c-format msgid "%s can only be called in a sql_drop event trigger function" msgstr "%s можно вызывать только в событийной триггерной функции sql_drop" -#: commands/event_trigger.c:1566 commands/event_trigger.c:1587 +#: commands/event_trigger.c:1584 commands/event_trigger.c:1605 #, c-format msgid "%s can only be called in a table_rewrite event trigger function" msgstr "%s можно вызывать только в событийной триггерной функции table_rewrite" -#: commands/event_trigger.c:1997 +#: commands/event_trigger.c:2015 #, c-format msgid "%s can only be called in an event trigger function" msgstr "%s можно вызывать только в событийной триггерной функции" @@ -7379,123 +7161,123 @@ msgstr "параметр BUFFERS оператора EXPLAIN требует ук msgid "EXPLAIN option TIMING requires ANALYZE" msgstr "параметр TIMING оператора EXPLAIN требует указания ANALYZE" -#: commands/extension.c:167 commands/extension.c:2902 +#: commands/extension.c:168 commands/extension.c:2907 #, c-format msgid "extension \"%s\" does not exist" msgstr "расширение \"%s\" не существует" -#: commands/extension.c:266 commands/extension.c:275 commands/extension.c:287 -#: commands/extension.c:297 +#: commands/extension.c:267 commands/extension.c:276 commands/extension.c:288 +#: commands/extension.c:298 #, c-format msgid "invalid extension name: \"%s\"" msgstr "неверное имя расширения: \"%s\"" -#: commands/extension.c:267 +#: commands/extension.c:268 #, c-format msgid "Extension names must not be empty." msgstr "Имя расширения не может быть пустым." -#: commands/extension.c:276 +#: commands/extension.c:277 #, c-format msgid "Extension names must not contain \"--\"." msgstr "Имя расширения не может содержать \"--\"." -#: commands/extension.c:288 +#: commands/extension.c:289 #, c-format msgid "Extension names must not begin or end with \"-\"." msgstr "Имя расширения не может начинаться или заканчиваться символом \"-\"." -#: commands/extension.c:298 +#: commands/extension.c:299 #, c-format msgid "Extension names must not contain directory separator characters." msgstr "Имя расширения не может содержать разделители пути." -#: commands/extension.c:313 commands/extension.c:322 commands/extension.c:331 -#: commands/extension.c:341 +#: commands/extension.c:314 commands/extension.c:323 commands/extension.c:332 +#: commands/extension.c:342 #, c-format msgid "invalid extension version name: \"%s\"" msgstr "неверный идентификатор версии расширения: \"%s\"" -#: commands/extension.c:314 +#: commands/extension.c:315 #, c-format msgid "Version names must not be empty." msgstr "Идентификатор версии не может быть пустым." -#: commands/extension.c:323 +#: commands/extension.c:324 #, c-format msgid "Version names must not contain \"--\"." msgstr "Идентификатор версии не может содержать \"--\"." -#: commands/extension.c:332 +#: commands/extension.c:333 #, c-format msgid "Version names must not begin or end with \"-\"." msgstr "" "Идентификатор версии не может начинаться или заканчиваться символом \"-\"." -#: commands/extension.c:342 +#: commands/extension.c:343 #, c-format msgid "Version names must not contain directory separator characters." msgstr "Идентификатор версии не может содержать разделители пути." -#: commands/extension.c:492 +#: commands/extension.c:493 #, c-format msgid "could not open extension control file \"%s\": %m" msgstr "не удалось открыть управляющий файл расширения \"%s\": %m" -#: commands/extension.c:514 commands/extension.c:524 +#: commands/extension.c:515 commands/extension.c:525 #, c-format msgid "parameter \"%s\" cannot be set in a secondary extension control file" msgstr "" "параметр \"%s\" нельзя задавать в дополнительном управляющем файле расширения" -#: commands/extension.c:563 +#: commands/extension.c:564 #, c-format msgid "\"%s\" is not a valid encoding name" msgstr "неверное имя кодировки %s" -#: commands/extension.c:577 +#: commands/extension.c:578 #, c-format msgid "parameter \"%s\" must be a list of extension names" msgstr "параметр \"%s\" должен содержать список имён расширений" -#: commands/extension.c:584 +#: commands/extension.c:585 #, c-format msgid "unrecognized parameter \"%s\" in file \"%s\"" msgstr "нераспознанный параметр \"%s\" в файле \"%s\"" -#: commands/extension.c:593 +#: commands/extension.c:594 #, c-format msgid "parameter \"schema\" cannot be specified when \"relocatable\" is true" msgstr "" "параметр \"schema\" не может быть указан вместе с \"relocatable\" = true" -#: commands/extension.c:757 +#: commands/extension.c:761 #, c-format msgid "" "transaction control statements are not allowed within an extension script" msgstr "в скрипте расширения не должно быть операторов управления транзакциями" -#: commands/extension.c:803 +#: commands/extension.c:807 #, c-format msgid "permission denied to create extension \"%s\"" msgstr "нет прав на создание расширения \"%s\"" -#: commands/extension.c:805 +#: commands/extension.c:809 #, c-format msgid "Must be superuser to create this extension." msgstr "Для создания этого расширения нужно быть суперпользователем." -#: commands/extension.c:809 +#: commands/extension.c:813 #, c-format msgid "permission denied to update extension \"%s\"" msgstr "нет прав на изменение расширения \"%s\"" -#: commands/extension.c:811 +#: commands/extension.c:815 #, c-format msgid "Must be superuser to update this extension." msgstr "Для изменения этого расширения нужно быть суперпользователем." -#: commands/extension.c:1093 +#: commands/extension.c:1097 #, c-format msgid "" "extension \"%s\" has no update path from version \"%s\" to version \"%s\"" @@ -7503,17 +7285,17 @@ msgstr "" "для расширения \"%s\" не определён путь обновления с версии \"%s\" до версии " "\"%s\"" -#: commands/extension.c:1300 commands/extension.c:2963 +#: commands/extension.c:1304 commands/extension.c:2968 #, c-format msgid "version to install must be specified" msgstr "нужно указать версию для установки" -#: commands/extension.c:1322 +#: commands/extension.c:1326 #, c-format msgid "FROM version must be different from installation target version \"%s\"" msgstr "версия FROM должна отличаться от устанавливаемой версии \"%s\"" -#: commands/extension.c:1387 +#: commands/extension.c:1391 #, c-format msgid "" "extension \"%s\" has no installation script nor update path for version \"%s" @@ -7522,54 +7304,54 @@ msgstr "" "для расширения \"%s\" не определён путь установки или обновления для версии " "\"%s\"" -#: commands/extension.c:1422 +#: commands/extension.c:1426 #, c-format msgid "extension \"%s\" must be installed in schema \"%s\"" msgstr "расширение \"%s\" должно устанавливаться в схему \"%s\"" -#: commands/extension.c:1575 +#: commands/extension.c:1579 #, c-format msgid "cyclic dependency detected between extensions \"%s\" and \"%s\"" msgstr "выявлена циклическая зависимость между расширениями \"%s\" и \"%s\"" -#: commands/extension.c:1580 +#: commands/extension.c:1584 #, c-format msgid "installing required extension \"%s\"" msgstr "установка требуемого расширения \"%s\"" -#: commands/extension.c:1604 +#: commands/extension.c:1608 #, c-format msgid "required extension \"%s\" is not installed" msgstr "требуемое расширение \"%s\" не установлено" -#: commands/extension.c:1607 +#: commands/extension.c:1611 #, c-format msgid "Use CREATE EXTENSION ... CASCADE to install required extensions too." msgstr "" "Выполните CREATE EXTENSION ... CASCADE, чтобы установить также требуемые " "расширения." -#: commands/extension.c:1644 +#: commands/extension.c:1648 #, c-format msgid "extension \"%s\" already exists, skipping" msgstr "расширение \"%s\" уже существует, пропускается" -#: commands/extension.c:1651 +#: commands/extension.c:1655 #, c-format msgid "extension \"%s\" already exists" msgstr "расширение \"%s\" уже существует" -#: commands/extension.c:1662 +#: commands/extension.c:1666 #, c-format msgid "nested CREATE EXTENSION is not supported" msgstr "вложенные операторы CREATE EXTENSION не поддерживаются" -#: commands/extension.c:1843 +#: commands/extension.c:1847 #, c-format msgid "cannot drop extension \"%s\" because it is being modified" msgstr "удалить расширение \"%s\" нельзя, так как это модифицируемый объект" -#: commands/extension.c:2345 +#: commands/extension.c:2349 #, c-format msgid "" "pg_extension_config_dump() can only be called from an SQL script executed by " @@ -7578,17 +7360,17 @@ msgstr "" "функцию pg_extension_config_dump() можно вызывать только из SQL-скрипта, " "запускаемого в CREATE EXTENSION" -#: commands/extension.c:2357 +#: commands/extension.c:2361 #, c-format msgid "OID %u does not refer to a table" msgstr "OID %u не относится к таблице" -#: commands/extension.c:2362 +#: commands/extension.c:2366 #, c-format msgid "table \"%s\" is not a member of the extension being created" msgstr "таблица \"%s\" не относится к созданному расширению" -#: commands/extension.c:2718 +#: commands/extension.c:2722 #, c-format msgid "" "cannot move extension \"%s\" into schema \"%s\" because the extension " @@ -7597,27 +7379,27 @@ msgstr "" "переместить расширение \"%s\" в схему \"%s\" нельзя, так как оно содержит " "схему" -#: commands/extension.c:2758 commands/extension.c:2821 +#: commands/extension.c:2763 commands/extension.c:2826 #, c-format msgid "extension \"%s\" does not support SET SCHEMA" msgstr "расширение \"%s\" не поддерживает SET SCHEMA" -#: commands/extension.c:2823 +#: commands/extension.c:2828 #, c-format msgid "%s is not in the extension's schema \"%s\"" msgstr "объект %s не принадлежит схеме расширения \"%s\"" -#: commands/extension.c:2882 +#: commands/extension.c:2887 #, c-format msgid "nested ALTER EXTENSION is not supported" msgstr "вложенные операторы ALTER EXTENSION не поддерживаются" -#: commands/extension.c:2974 +#: commands/extension.c:2979 #, c-format msgid "version \"%s\" of extension \"%s\" is already installed" msgstr "версия \"%s\" расширения \"%s\" уже установлена" -#: commands/extension.c:3225 +#: commands/extension.c:3230 #, c-format msgid "" "cannot add schema \"%s\" to extension \"%s\" because the schema contains the " @@ -7626,12 +7408,12 @@ msgstr "" "добавить схему \"%s\" к расширению \"%s\" нельзя, так как схема содержит " "расширение" -#: commands/extension.c:3253 +#: commands/extension.c:3258 #, c-format msgid "%s is not a member of extension \"%s\"" msgstr "%s не относится к расширению \"%s\"" -#: commands/extension.c:3319 +#: commands/extension.c:3324 #, c-format msgid "file \"%s\" is too large" msgstr "файл \"%s\" слишком большой" @@ -7851,12 +7633,12 @@ msgstr "не указано тело функции" msgid "no language specified" msgstr "язык не указан" -#: commands/functioncmds.c:735 commands/functioncmds.c:1241 +#: commands/functioncmds.c:735 commands/functioncmds.c:1242 #, c-format msgid "COST must be positive" msgstr "значение COST должно быть положительным" -#: commands/functioncmds.c:743 commands/functioncmds.c:1249 +#: commands/functioncmds.c:743 commands/functioncmds.c:1250 #, c-format msgid "ROWS must be positive" msgstr "значение ROWS должно быть положительным" @@ -7871,67 +7653,67 @@ msgstr "нераспознанный атрибут функции \"%s\" --- и msgid "only one AS item needed for language \"%s\"" msgstr "для языка \"%s\" нужно только одно выражение AS" -#: commands/functioncmds.c:930 commands/functioncmds.c:2110 +#: commands/functioncmds.c:930 commands/functioncmds.c:2131 #: commands/proclang.c:561 #, c-format msgid "language \"%s\" does not exist" msgstr "язык \"%s\" не существует" -#: commands/functioncmds.c:932 commands/functioncmds.c:2112 +#: commands/functioncmds.c:932 commands/functioncmds.c:2133 #, c-format msgid "Use CREATE LANGUAGE to load the language into the database." msgstr "Выполните CREATE LANGUAGE, чтобы загрузить язык в базу данных." -#: commands/functioncmds.c:967 commands/functioncmds.c:1233 +#: commands/functioncmds.c:967 commands/functioncmds.c:1234 #, c-format msgid "only superuser can define a leakproof function" msgstr "" "только суперпользователь может определить функцию с атрибутом LEAKPROOF" -#: commands/functioncmds.c:1009 +#: commands/functioncmds.c:1010 #, c-format msgid "function result type must be %s because of OUT parameters" msgstr "" "результат функции должен иметь тип %s (в соответствии с параметрами OUT)" -#: commands/functioncmds.c:1022 +#: commands/functioncmds.c:1023 #, c-format msgid "function result type must be specified" msgstr "необходимо указать тип результата функции" -#: commands/functioncmds.c:1076 commands/functioncmds.c:1253 +#: commands/functioncmds.c:1077 commands/functioncmds.c:1254 #, c-format msgid "ROWS is not applicable when function does not return a set" msgstr "указание ROWS неприменимо, когда функция возвращает не множество" -#: commands/functioncmds.c:1405 +#: commands/functioncmds.c:1426 #, c-format msgid "source data type %s is a pseudo-type" msgstr "исходный тип данных %s является псевдотипом" -#: commands/functioncmds.c:1411 +#: commands/functioncmds.c:1432 #, c-format msgid "target data type %s is a pseudo-type" msgstr "целевой тип данных %s является псевдотипом" -#: commands/functioncmds.c:1435 +#: commands/functioncmds.c:1456 #, c-format msgid "cast will be ignored because the source data type is a domain" msgstr "" "приведение будет проигнорировано, так как исходные данные имеют тип домен" -#: commands/functioncmds.c:1440 +#: commands/functioncmds.c:1461 #, c-format msgid "cast will be ignored because the target data type is a domain" msgstr "" "приведение будет проигнорировано, так как целевые данные имеют тип домен" -#: commands/functioncmds.c:1465 +#: commands/functioncmds.c:1486 #, c-format msgid "cast function must take one to three arguments" msgstr "функция приведения должна принимать от одного до трёх аргументов" -#: commands/functioncmds.c:1469 +#: commands/functioncmds.c:1490 #, c-format msgid "" "argument of cast function must match or be binary-coercible from source data " @@ -7940,17 +7722,17 @@ msgstr "" "аргумент функции приведения должен совпадать или быть двоично-совместимым с " "исходным типом данных" -#: commands/functioncmds.c:1473 +#: commands/functioncmds.c:1494 #, c-format msgid "second argument of cast function must be type %s" msgstr "второй аргумент функции приведения должен иметь тип %s" -#: commands/functioncmds.c:1478 +#: commands/functioncmds.c:1499 #, c-format msgid "third argument of cast function must be type %s" msgstr "третий аргумент функции приведения должен иметь тип %s" -#: commands/functioncmds.c:1483 +#: commands/functioncmds.c:1504 #, c-format msgid "" "return data type of cast function must match or be binary-coercible to " @@ -7959,252 +7741,252 @@ msgstr "" "тип возвращаемых данных функции приведения должен совпадать или быть двоично-" "совместимым с целевым типом данных" -#: commands/functioncmds.c:1494 +#: commands/functioncmds.c:1515 #, c-format msgid "cast function must not be volatile" msgstr "функция приведения не может быть изменчивой (volatile)" -#: commands/functioncmds.c:1499 +#: commands/functioncmds.c:1520 #, c-format msgid "cast function must not be an aggregate function" msgstr "функция приведения не может быть агрегатной" -#: commands/functioncmds.c:1503 +#: commands/functioncmds.c:1524 #, c-format msgid "cast function must not be a window function" msgstr "функция приведения не может быть оконной" -#: commands/functioncmds.c:1507 +#: commands/functioncmds.c:1528 #, c-format msgid "cast function must not return a set" msgstr "функция приведения не может возвращать множество" -#: commands/functioncmds.c:1533 +#: commands/functioncmds.c:1554 #, c-format msgid "must be superuser to create a cast WITHOUT FUNCTION" msgstr "для создания приведения WITHOUT FUNCTION нужно быть суперпользователем" -#: commands/functioncmds.c:1548 +#: commands/functioncmds.c:1569 #, c-format msgid "source and target data types are not physically compatible" msgstr "исходный и целевой типы данных не совместимы физически" -#: commands/functioncmds.c:1563 +#: commands/functioncmds.c:1584 #, c-format msgid "composite data types are not binary-compatible" msgstr "составные типы данных не совместимы на двоичном уровне" -#: commands/functioncmds.c:1569 +#: commands/functioncmds.c:1590 #, c-format msgid "enum data types are not binary-compatible" msgstr "типы-перечисления не совместимы на двоичном уровне" -#: commands/functioncmds.c:1575 +#: commands/functioncmds.c:1596 #, c-format msgid "array data types are not binary-compatible" msgstr "типы-массивы не совместимы на двоичном уровне" -#: commands/functioncmds.c:1592 +#: commands/functioncmds.c:1613 #, c-format msgid "domain data types must not be marked binary-compatible" msgstr "типы-домены не могут считаться двоично-совместимыми" -#: commands/functioncmds.c:1602 +#: commands/functioncmds.c:1623 #, c-format msgid "source data type and target data type are the same" msgstr "исходный тип данных совпадает с целевым" -#: commands/functioncmds.c:1635 +#: commands/functioncmds.c:1656 #, c-format msgid "cast from type %s to type %s already exists" msgstr "приведение типа %s к типу %s уже существует" -#: commands/functioncmds.c:1708 +#: commands/functioncmds.c:1729 #, c-format msgid "cast from type %s to type %s does not exist" msgstr "приведение типа %s к типу %s не существует" -#: commands/functioncmds.c:1747 +#: commands/functioncmds.c:1768 #, c-format msgid "transform function must not be volatile" msgstr "функция преобразования не может быть изменчивой" -#: commands/functioncmds.c:1751 +#: commands/functioncmds.c:1772 #, c-format msgid "transform function must not be an aggregate function" msgstr "функция преобразования не может быть агрегатной" -#: commands/functioncmds.c:1755 +#: commands/functioncmds.c:1776 #, c-format msgid "transform function must not be a window function" msgstr "функция преобразования не может быть оконной" -#: commands/functioncmds.c:1759 +#: commands/functioncmds.c:1780 #, c-format msgid "transform function must not return a set" msgstr "функция преобразования не может возвращать множество" -#: commands/functioncmds.c:1763 +#: commands/functioncmds.c:1784 #, c-format msgid "transform function must take one argument" msgstr "функция преобразования должна принимать один аргумент" -#: commands/functioncmds.c:1767 +#: commands/functioncmds.c:1788 #, c-format msgid "first argument of transform function must be type %s" msgstr "первый аргумент функции преобразования должен иметь тип %s" -#: commands/functioncmds.c:1805 +#: commands/functioncmds.c:1826 #, c-format msgid "data type %s is a pseudo-type" msgstr "тип данных %s является псевдотипом" -#: commands/functioncmds.c:1811 +#: commands/functioncmds.c:1832 #, c-format msgid "data type %s is a domain" msgstr "тип данных \"%s\" является доменом" -#: commands/functioncmds.c:1851 +#: commands/functioncmds.c:1872 #, c-format msgid "return data type of FROM SQL function must be %s" msgstr "результат функции FROM SQL должен иметь тип %s" -#: commands/functioncmds.c:1877 +#: commands/functioncmds.c:1898 #, c-format msgid "return data type of TO SQL function must be the transform data type" msgstr "результат функции TO SQL должен иметь тип данных преобразования" -#: commands/functioncmds.c:1904 +#: commands/functioncmds.c:1925 #, c-format msgid "transform for type %s language \"%s\" already exists" msgstr "преобразование для типа %s, языка \"%s\" уже существует" -#: commands/functioncmds.c:1993 +#: commands/functioncmds.c:2014 #, c-format msgid "transform for type %s language \"%s\" does not exist" msgstr "преобразование для типа %s, языка \"%s\" не существует" -#: commands/functioncmds.c:2044 +#: commands/functioncmds.c:2065 #, c-format msgid "function %s already exists in schema \"%s\"" msgstr "функция %s уже существует в схеме \"%s\"" -#: commands/functioncmds.c:2097 +#: commands/functioncmds.c:2118 #, c-format msgid "no inline code specified" msgstr "нет внедрённого кода" -#: commands/functioncmds.c:2142 +#: commands/functioncmds.c:2163 #, c-format msgid "language \"%s\" does not support inline code execution" msgstr "язык \"%s\" не поддерживает выполнение внедрённого кода" -#: commands/indexcmds.c:350 +#: commands/indexcmds.c:354 #, c-format msgid "must specify at least one column" msgstr "нужно указать минимум один столбец" -#: commands/indexcmds.c:354 +#: commands/indexcmds.c:358 #, c-format msgid "cannot use more than %d columns in an index" msgstr "число столбцов в индексе не может превышать %d" -#: commands/indexcmds.c:385 +#: commands/indexcmds.c:389 #, c-format msgid "cannot create index on foreign table \"%s\"" msgstr "создать индекс в сторонней таблице \"%s\" нельзя" -#: commands/indexcmds.c:390 +#: commands/indexcmds.c:394 #, c-format msgid "cannot create index on partitioned table \"%s\"" msgstr "создать индекс в секционированной таблице \"%s\" нельзя" -#: commands/indexcmds.c:405 +#: commands/indexcmds.c:409 #, c-format msgid "cannot create indexes on temporary tables of other sessions" msgstr "создавать индексы во временных таблицах других сеансов нельзя" -#: commands/indexcmds.c:461 commands/tablecmds.c:579 commands/tablecmds.c:10165 +#: commands/indexcmds.c:474 commands/tablecmds.c:593 commands/tablecmds.c:10520 #, c-format msgid "only shared relations can be placed in pg_global tablespace" msgstr "" "в табличное пространство pg_global можно поместить только разделяемые таблицы" -#: commands/indexcmds.c:494 +#: commands/indexcmds.c:507 #, c-format msgid "substituting access method \"gist\" for obsolete method \"rtree\"" msgstr "устаревший метод доступа \"rtree\" подменяется методом \"gist\"" -#: commands/indexcmds.c:512 +#: commands/indexcmds.c:525 #, c-format msgid "access method \"%s\" does not support unique indexes" msgstr "метод доступа \"%s\" не поддерживает уникальные индексы" -#: commands/indexcmds.c:517 +#: commands/indexcmds.c:530 #, c-format msgid "access method \"%s\" does not support multicolumn indexes" msgstr "метод доступа \"%s\" не поддерживает индексы по многим столбцам" -#: commands/indexcmds.c:522 +#: commands/indexcmds.c:535 #, c-format msgid "access method \"%s\" does not support exclusion constraints" msgstr "метод доступа \"%s\" не поддерживает ограничения-исключения" -#: commands/indexcmds.c:594 commands/indexcmds.c:614 +#: commands/indexcmds.c:607 commands/indexcmds.c:627 #, c-format msgid "index creation on system columns is not supported" msgstr "создание индекса для системных столбцов не поддерживается" -#: commands/indexcmds.c:639 +#: commands/indexcmds.c:652 #, c-format msgid "%s %s will create implicit index \"%s\" for table \"%s\"" msgstr "%s %s создаст неявный индекс \"%s\" для таблицы \"%s\"" -#: commands/indexcmds.c:986 +#: commands/indexcmds.c:1002 #, c-format msgid "functions in index predicate must be marked IMMUTABLE" msgstr "функции в предикате индекса должны быть помечены как IMMUTABLE" -#: commands/indexcmds.c:1052 parser/parse_utilcmd.c:1946 +#: commands/indexcmds.c:1068 parser/parse_utilcmd.c:2097 #, c-format msgid "column \"%s\" named in key does not exist" msgstr "указанный в ключе столбец \"%s\" не существует" -#: commands/indexcmds.c:1112 +#: commands/indexcmds.c:1128 #, c-format msgid "functions in index expression must be marked IMMUTABLE" msgstr "функции в индексном выражении должны быть помечены как IMMUTABLE" -#: commands/indexcmds.c:1135 +#: commands/indexcmds.c:1151 #, c-format msgid "could not determine which collation to use for index expression" msgstr "не удалось определить правило сортировки для индексного выражения" -#: commands/indexcmds.c:1143 commands/tablecmds.c:13006 commands/typecmds.c:831 -#: parser/parse_expr.c:2730 parser/parse_type.c:549 parser/parse_utilcmd.c:2873 +#: commands/indexcmds.c:1159 commands/tablecmds.c:13410 commands/typecmds.c:831 +#: parser/parse_expr.c:2763 parser/parse_type.c:549 parser/parse_utilcmd.c:3134 #: utils/adt/misc.c:661 #, c-format msgid "collations are not supported by type %s" msgstr "тип %s не поддерживает сортировку (COLLATION)" -#: commands/indexcmds.c:1181 +#: commands/indexcmds.c:1197 #, c-format msgid "operator %s is not commutative" msgstr "оператор %s не коммутативен" -#: commands/indexcmds.c:1183 +#: commands/indexcmds.c:1199 #, c-format msgid "Only commutative operators can be used in exclusion constraints." msgstr "" "В ограничениях-исключениях могут использоваться только коммутативные " "операторы." -#: commands/indexcmds.c:1209 +#: commands/indexcmds.c:1225 #, c-format msgid "operator %s is not a member of operator family \"%s\"" msgstr "оператор \"%s\" не входит в семейство операторов \"%s\"" -#: commands/indexcmds.c:1212 +#: commands/indexcmds.c:1228 #, c-format msgid "" "The exclusion operator must be related to the index operator class for the " @@ -8213,24 +7995,24 @@ msgstr "" "Оператор исключения для ограничения должен относиться к классу операторов " "индекса." -#: commands/indexcmds.c:1247 +#: commands/indexcmds.c:1263 #, c-format msgid "access method \"%s\" does not support ASC/DESC options" msgstr "метод доступа \"%s\" не поддерживает сортировку ASC/DESC" -#: commands/indexcmds.c:1252 +#: commands/indexcmds.c:1268 #, c-format msgid "access method \"%s\" does not support NULLS FIRST/LAST options" msgstr "метод доступа \"%s\" не поддерживает параметр NULLS FIRST/LAST" -#: commands/indexcmds.c:1311 commands/typecmds.c:1928 +#: commands/indexcmds.c:1327 commands/typecmds.c:1949 #, c-format msgid "data type %s has no default operator class for access method \"%s\"" msgstr "" "для типа данных %s не определён класс операторов по умолчанию для метода " "доступа \"%s\"" -#: commands/indexcmds.c:1313 +#: commands/indexcmds.c:1329 #, c-format msgid "" "You must specify an operator class for the index or define a default " @@ -8239,34 +8021,34 @@ msgstr "" "Вы должны указать класс операторов для индекса или определить класс " "операторов по умолчанию для этого типа данных." -#: commands/indexcmds.c:1342 commands/indexcmds.c:1350 +#: commands/indexcmds.c:1358 commands/indexcmds.c:1366 #: commands/opclasscmds.c:205 #, c-format msgid "operator class \"%s\" does not exist for access method \"%s\"" msgstr "класс операторов \"%s\" для метода доступа \"%s\" не существует" -#: commands/indexcmds.c:1363 commands/typecmds.c:1916 +#: commands/indexcmds.c:1379 commands/typecmds.c:1937 #, c-format msgid "operator class \"%s\" does not accept data type %s" msgstr "класс операторов \"%s\" не принимает тип данных %s" -#: commands/indexcmds.c:1453 +#: commands/indexcmds.c:1469 #, c-format msgid "there are multiple default operator classes for data type %s" msgstr "" "для типа данных %s определено несколько классов операторов по умолчанию" -#: commands/indexcmds.c:1844 +#: commands/indexcmds.c:1860 #, c-format msgid "table \"%s\" has no indexes" msgstr "таблица \"%s\" не имеет индексов" -#: commands/indexcmds.c:1899 +#: commands/indexcmds.c:1915 #, c-format msgid "can only reindex the currently open database" msgstr "переиндексировать можно только текущую базу данных" -#: commands/indexcmds.c:1999 +#: commands/indexcmds.c:2015 #, c-format msgid "table \"%s.%s\" was reindexed" msgstr "таблица \"%s.%s\" переиндексирована" @@ -8525,13 +8307,13 @@ msgstr "функция оценки соединения %s должна воз msgid "operator attribute \"%s\" cannot be changed" msgstr "атрибут оператора \"%s\" нельзя изменить" -#: commands/policy.c:87 commands/policy.c:397 commands/policy.c:486 -#: commands/tablecmds.c:1134 commands/tablecmds.c:1490 -#: commands/tablecmds.c:2467 commands/tablecmds.c:4644 -#: commands/tablecmds.c:6738 commands/tablecmds.c:12659 -#: commands/tablecmds.c:12694 commands/trigger.c:251 commands/trigger.c:1242 -#: commands/trigger.c:1351 rewrite/rewriteDefine.c:272 -#: rewrite/rewriteDefine.c:911 +#: commands/policy.c:87 commands/policy.c:397 commands/policy.c:487 +#: commands/tablecmds.c:1150 commands/tablecmds.c:1520 +#: commands/tablecmds.c:2507 commands/tablecmds.c:4704 +#: commands/tablecmds.c:7068 commands/tablecmds.c:13033 +#: commands/tablecmds.c:13068 commands/trigger.c:259 commands/trigger.c:1320 +#: commands/trigger.c:1429 rewrite/rewriteDefine.c:272 +#: rewrite/rewriteDefine.c:925 #, c-format msgid "permission denied: \"%s\" is a system catalog" msgstr "доступ запрещён: \"%s\" - это системный каталог" @@ -8546,32 +8328,32 @@ msgstr "все указанные роли, кроме PUBLIC, игнориру msgid "All roles are members of the PUBLIC role." msgstr "Роль PUBLIC включает в себя все остальные роли." -#: commands/policy.c:510 +#: commands/policy.c:511 #, c-format msgid "role \"%s\" could not be removed from policy \"%s\" on \"%s\"" msgstr "роль \"%s\" нельзя удалить из политики \"%s\" отношения \"%s\"" -#: commands/policy.c:716 +#: commands/policy.c:717 #, c-format msgid "WITH CHECK cannot be applied to SELECT or DELETE" msgstr "WITH CHECK нельзя применить к SELECT или DELETE" -#: commands/policy.c:725 commands/policy.c:1023 +#: commands/policy.c:726 commands/policy.c:1024 #, c-format msgid "only WITH CHECK expression allowed for INSERT" msgstr "для INSERT допускается только выражение WITH CHECK" -#: commands/policy.c:798 commands/policy.c:1243 +#: commands/policy.c:799 commands/policy.c:1244 #, c-format msgid "policy \"%s\" for table \"%s\" already exists" msgstr "политика \"%s\" для таблицы \"%s\" уже существует" -#: commands/policy.c:995 commands/policy.c:1271 commands/policy.c:1343 +#: commands/policy.c:996 commands/policy.c:1272 commands/policy.c:1344 #, c-format msgid "policy \"%s\" for table \"%s\" does not exist" msgstr "политика \"%s\" для таблицы \"%s\" не существует" -#: commands/policy.c:1013 +#: commands/policy.c:1014 #, c-format msgid "only USING expression allowed for SELECT, DELETE" msgstr "для SELECT, DELETE допускается только выражение USING" @@ -8582,7 +8364,7 @@ msgid "invalid cursor name: must not be empty" msgstr "имя курсора не может быть пустым" #: commands/portalcmds.c:190 commands/portalcmds.c:244 -#: executor/execCurrent.c:67 utils/adt/xml.c:2459 utils/adt/xml.c:2626 +#: executor/execCurrent.c:67 utils/adt/xml.c:2469 utils/adt/xml.c:2639 #, c-format msgid "cursor \"%s\" does not exist" msgstr "курсор \"%s\" не существует" @@ -8592,7 +8374,7 @@ msgstr "курсор \"%s\" не существует" msgid "invalid statement name: must not be empty" msgstr "неверный оператор: имя не должно быть пустым" -#: commands/prepare.c:141 parser/parse_param.c:304 tcop/postgres.c:1355 +#: commands/prepare.c:141 parser/parse_param.c:304 tcop/postgres.c:1349 #, c-format msgid "could not determine data type of parameter $%d" msgstr "не удалось определить тип данных параметра $%d" @@ -8663,38 +8445,53 @@ msgid "must be superuser to create custom procedural language" msgstr "" "для создания дополнительного процедурного языка нужно быть суперпользователем" -#: commands/proclang.c:281 commands/trigger.c:530 commands/typecmds.c:457 +#: commands/proclang.c:281 commands/trigger.c:608 commands/typecmds.c:457 #: commands/typecmds.c:474 #, c-format msgid "changing return type of function %s from %s to %s" msgstr "изменение типа возврата функции %s с %s на %s" -#: commands/publicationcmds.c:179 +#: commands/publicationcmds.c:106 +#, c-format +msgid "invalid list syntax for \"publish\" option" +msgstr "неверный синтаксис параметра \"publish\"" + +#: commands/publicationcmds.c:122 +#, c-format +msgid "unrecognized \"publish\" value: \"%s\"" +msgstr "нераспознанное значение \"publish\": \"%s\"" + +#: commands/publicationcmds.c:128 +#, c-format +msgid "unrecognized publication parameter: %s" +msgstr "нераспознанный параметр репликации: %s" + +#: commands/publicationcmds.c:160 #, c-format msgid "must be superuser to create FOR ALL TABLES publication" msgstr "для создания публикации всех таблиц нужно быть суперпользователем" -#: commands/publicationcmds.c:351 +#: commands/publicationcmds.c:321 #, c-format msgid "publication \"%s\" is defined as FOR ALL TABLES" msgstr "публикация \"%s\" определена для всех таблиц (FOR ALL TABLES)" -#: commands/publicationcmds.c:353 +#: commands/publicationcmds.c:323 #, c-format msgid "Tables cannot be added to or dropped from FOR ALL TABLES publications." msgstr "В публикации всех таблиц нельзя добавлять или удалять таблицы." -#: commands/publicationcmds.c:651 +#: commands/publicationcmds.c:624 #, c-format msgid "relation \"%s\" is not part of the publication" msgstr "отношение \"%s\" не включено в публикацию" -#: commands/publicationcmds.c:694 +#: commands/publicationcmds.c:667 #, c-format msgid "permission denied to change owner of publication \"%s\"" msgstr "нет прав на изменение владельца публикации \"%s\"" -#: commands/publicationcmds.c:696 +#: commands/publicationcmds.c:669 #, c-format msgid "The owner of a FOR ALL TABLES publication must be a superuser." msgstr "" @@ -8734,212 +8531,272 @@ msgstr "" msgid "security label provider \"%s\" is not loaded" msgstr "поставщик меток безопасности \"%s\" не загружен" -#: commands/sequence.c:135 +#: commands/sequence.c:138 #, c-format msgid "unlogged sequences are not supported" msgstr "нежурналируемые последовательности не поддерживаются" -#: commands/sequence.c:698 +#: commands/sequence.c:699 #, c-format msgid "nextval: reached maximum value of sequence \"%s\" (%s)" msgstr "функция nextval достигла максимума для последовательности \"%s\" (%s)" -#: commands/sequence.c:721 +#: commands/sequence.c:722 #, c-format msgid "nextval: reached minimum value of sequence \"%s\" (%s)" msgstr "функция nextval достигла минимума для последовательности \"%s\" (%s)" -#: commands/sequence.c:839 +#: commands/sequence.c:840 #, c-format msgid "currval of sequence \"%s\" is not yet defined in this session" msgstr "" "текущее значение (currval) для последовательности \"%s\" ещё не определено в " "этом сеансе" -#: commands/sequence.c:858 commands/sequence.c:864 +#: commands/sequence.c:859 commands/sequence.c:865 #, c-format msgid "lastval is not yet defined in this session" msgstr "последнее значение (lastval) ещё не определено в этом сеансе" -#: commands/sequence.c:952 +#: commands/sequence.c:953 #, c-format msgid "setval: value %s is out of bounds for sequence \"%s\" (%s..%s)" msgstr "" "setval передано значение %s вне пределов последовательности \"%s\" (%s..%s)" -#: commands/sequence.c:1344 +#: commands/sequence.c:1358 +#, c-format +msgid "invalid sequence option SEQUENCE NAME" +msgstr "неверное свойство последовательности SEQUENCE NAME" + +#: commands/sequence.c:1384 +#, c-format +msgid "identity column type must be smallint, integer, or bigint" +msgstr "" +"типом столбца идентификации может быть только smallint, integer или bigint" + +#: commands/sequence.c:1385 #, c-format msgid "sequence type must be smallint, integer, or bigint" msgstr "" "типом последовательности может быть только smallint, integer или bigint" -#: commands/sequence.c:1356 +#: commands/sequence.c:1419 #, c-format msgid "INCREMENT must not be zero" msgstr "INCREMENT не может быть нулевым" -#: commands/sequence.c:1405 +#: commands/sequence.c:1472 #, c-format msgid "MAXVALUE (%s) is out of range for sequence data type %s" msgstr "MAXVALUE (%s) выходит за пределы типа данных последовательности (%s)" -#: commands/sequence.c:1442 +#: commands/sequence.c:1509 #, c-format msgid "MINVALUE (%s) is out of range for sequence data type %s" msgstr "MINVALUE (%s) выходит за пределы типа данных последовательности (%s)" -#: commands/sequence.c:1456 +#: commands/sequence.c:1523 #, c-format msgid "MINVALUE (%s) must be less than MAXVALUE (%s)" msgstr "MINVALUE (%s) должно быть меньше MAXVALUE (%s)" -#: commands/sequence.c:1481 +#: commands/sequence.c:1550 #, c-format msgid "START value (%s) cannot be less than MINVALUE (%s)" msgstr "значение START (%s) не может быть меньше MINVALUE (%s)" -#: commands/sequence.c:1493 +#: commands/sequence.c:1562 #, c-format msgid "START value (%s) cannot be greater than MAXVALUE (%s)" msgstr "значение START (%s) не может быть больше MAXVALUE (%s)" -#: commands/sequence.c:1523 +#: commands/sequence.c:1592 #, c-format msgid "RESTART value (%s) cannot be less than MINVALUE (%s)" msgstr "значение RESTART (%s) не может быть меньше MINVALUE (%s)" -#: commands/sequence.c:1535 +#: commands/sequence.c:1604 #, c-format msgid "RESTART value (%s) cannot be greater than MAXVALUE (%s)" msgstr "значение RESTART (%s) не может быть больше MAXVALUE (%s)" -#: commands/sequence.c:1550 +#: commands/sequence.c:1619 #, c-format msgid "CACHE (%s) must be greater than zero" msgstr "значение CACHE (%s) должно быть больше нуля" -#: commands/sequence.c:1582 +#: commands/sequence.c:1656 #, c-format msgid "invalid OWNED BY option" msgstr "неверное указание OWNED BY" # skip-rule: no-space-after-period -#: commands/sequence.c:1583 +#: commands/sequence.c:1657 #, c-format msgid "Specify OWNED BY table.column or OWNED BY NONE." msgstr "Укажите OWNED BY таблица.столбец или OWNED BY NONE." -#: commands/sequence.c:1607 +#: commands/sequence.c:1682 #, c-format msgid "referenced relation \"%s\" is not a table or foreign table" msgstr "указанный объект \"%s\" не является таблицей или сторонней таблицей" -#: commands/sequence.c:1614 +#: commands/sequence.c:1689 #, c-format msgid "sequence must have same owner as table it is linked to" msgstr "" "последовательность должна иметь того же владельца, что и таблица, с которой " "она связана" -#: commands/sequence.c:1618 +#: commands/sequence.c:1693 #, c-format msgid "sequence must be in same schema as table it is linked to" msgstr "" "последовательность должна быть в той же схеме, что и таблица, с которой она " "связана" -#: commands/statscmds.c:88 +#: commands/sequence.c:1715 +#, c-format +msgid "cannot change ownership of identity sequence" +msgstr "сменить владельца последовательности идентификации нельзя" + +#: commands/sequence.c:1716 commands/tablecmds.c:9902 +#: commands/tablecmds.c:12496 +#, c-format +msgid "Sequence \"%s\" is linked to table \"%s\"." +msgstr "Последовательность \"%s\" связана с таблицей \"%s\"." + +#: commands/statscmds.c:93 +#, c-format +msgid "statistics object \"%s\" already exists, skipping" +msgstr "объект статистики \"%s\" уже существует, пропускается" + +#: commands/statscmds.c:100 #, c-format -msgid "statistics \"%s\" already exist, skipping" -msgstr "статистика \"%s\" уже существует, пропускается" +msgid "statistics object \"%s\" already exists" +msgstr "объект статистики \"%s\" уже существует" -#: commands/statscmds.c:95 +#: commands/statscmds.c:112 commands/statscmds.c:121 #, c-format -msgid "statistics \"%s\" already exist" -msgstr "статистика \"%s\" уже существует" +msgid "only a single relation is allowed in CREATE STATISTICS" +msgstr "в CREATE STATISTICS можно указать только одно отношение" -#: commands/statscmds.c:105 +#: commands/statscmds.c:139 #, c-format -msgid "relation \"%s\" is not a table or materialized view" -msgstr "отношение \"%s\" - это не таблица и не материализованное представление" +msgid "relation \"%s\" is not a table, foreign table, or materialized view" +msgstr "" +"отношение \"%s\" - это не таблица, не сторонняя таблица и не " +"материализованное представление" -#: commands/statscmds.c:123 +#: commands/statscmds.c:170 commands/statscmds.c:176 #, c-format -msgid "column \"%s\" referenced in statistics does not exist" -msgstr "столбец \"%s\", указанный в статистике, не существует" +msgid "only simple column references are allowed in CREATE STATISTICS" +msgstr "в CREATE STATISTICS допускаются только простые ссылки на столбцы" -#: commands/statscmds.c:131 +#: commands/statscmds.c:191 #, c-format -msgid "statistic creation on system columns is not supported" +msgid "statistics creation on system columns is not supported" msgstr "создание статистики для системных столбцов не поддерживается" -#: commands/statscmds.c:138 +#: commands/statscmds.c:198 #, c-format -msgid "only scalar types can be used in extended statistics" -msgstr "в расширенной статистике могут использоваться только скалярные типы" +msgid "" +"column \"%s\" cannot be used in statistics because its type %s has no " +"default btree operator class" +msgstr "" +"столбец \"%s\" нельзя использовать в статистике, так как для его типа %s не " +"определён класс операторов B-дерева по умолчанию" -#: commands/statscmds.c:144 +#: commands/statscmds.c:205 #, c-format -msgid "cannot have more than %d keys in statistics" -msgstr "в статистике не может быть больше %d ключей" +msgid "cannot have more than %d columns in statistics" +msgstr "в статистике не может быть больше %d столбцов" -#: commands/statscmds.c:159 +#: commands/statscmds.c:220 #, c-format -msgid "statistics require at least 2 columns" -msgstr "в статистике должно быть минимум 2 столбца" +msgid "extended statistics require at least 2 columns" +msgstr "для расширенной статистики требуются минимум 2 столбца" -#: commands/statscmds.c:176 +#: commands/statscmds.c:238 #, c-format msgid "duplicate column name in statistics definition" msgstr "повторяющееся имя столбца в определении статистики" -#: commands/statscmds.c:197 +#: commands/statscmds.c:266 +#, c-format +msgid "unrecognized statistics kind \"%s\"" +msgstr "нераспознанный вид статистики \"%s\"" + +#: commands/subscriptioncmds.c:187 #, c-format -msgid "unrecognized STATISTICS option \"%s\"" -msgstr "нераспознанное указание для STATISTICS: \"%s\"" +msgid "unrecognized subscription parameter: %s" +msgstr "нераспознанный параметр подписки: \"%s\"" -#: commands/subscriptioncmds.c:182 +#: commands/subscriptioncmds.c:200 #, c-format -msgid "noconnect and enabled are mutually exclusive options" -msgstr "указания noconnect и enabled являются взаимоисключающими" +msgid "connect = false and enabled = true are mutually exclusive options" +msgstr "указания connect = false и enabled = true являются взаимоисключающими" -#: commands/subscriptioncmds.c:187 +#: commands/subscriptioncmds.c:205 +#, c-format +msgid "connect = false and create_slot = true are mutually exclusive options" +msgstr "" +"указания connect = false и create_slot = true являются взаимоисключающими" + +#: commands/subscriptioncmds.c:210 +#, c-format +msgid "connect = false and copy_data = true are mutually exclusive options" +msgstr "" +"указания connect = false и copy_data = true являются взаимоисключающими" + +#: commands/subscriptioncmds.c:227 +#, c-format +msgid "slot_name = NONE and enabled = true are mutually exclusive options" +msgstr "указания slot_name = NONE и enabled = true являются взаимоисключающими" + +#: commands/subscriptioncmds.c:232 +#, c-format +msgid "slot_name = NONE and create_slot = true are mutually exclusive options" +msgstr "" +"указания slot_name = NONE и create_slot = true являются взаимоисключающими" + +#: commands/subscriptioncmds.c:237 #, c-format -msgid "noconnect and create slot are mutually exclusive options" -msgstr "указания noconnect и create slot являются взаимоисключающими" +msgid "subscription with slot_name = NONE must also set enabled = false" +msgstr "" +"для подписки с параметром slot_name = NONE необходимо также задать enabled = " +"false" -#: commands/subscriptioncmds.c:192 +#: commands/subscriptioncmds.c:242 #, c-format -msgid "noconnect and copy data are mutually exclusive options" -msgstr "указания noconnect и copy data являются взаимоисключающими" +msgid "subscription with slot_name = NONE must also set create_slot = false" +msgstr "" +"для подписки с параметром slot_name = NONE необходимо также задать " +"create_slot = false" -#: commands/subscriptioncmds.c:239 +#: commands/subscriptioncmds.c:284 #, c-format msgid "publication name \"%s\" used more than once" msgstr "имя публикации \"%s\" используется неоднократно" -#: commands/subscriptioncmds.c:297 +#: commands/subscriptioncmds.c:347 #, c-format msgid "must be superuser to create subscriptions" msgstr "для создания подписок нужно быть суперпользователем" -#: commands/subscriptioncmds.c:368 commands/subscriptioncmds.c:457 -#: replication/logical/tablesync.c:726 replication/logical/worker.c:1547 +#: commands/subscriptioncmds.c:427 commands/subscriptioncmds.c:520 +#: replication/logical/tablesync.c:856 replication/logical/worker.c:1622 #, c-format msgid "could not connect to the publisher: %s" msgstr "не удалось подключиться к серверу публикации: %s" -#: commands/subscriptioncmds.c:382 +#: commands/subscriptioncmds.c:469 #, c-format msgid "created replication slot \"%s\" on publisher" msgstr "на сервере публикации создан слот репликации \"%s\"" -#: commands/subscriptioncmds.c:409 -#, c-format -msgid "synchronized table states" -msgstr "состояние таблиц синхронизировано" - -#: commands/subscriptioncmds.c:424 +#: commands/subscriptioncmds.c:486 #, c-format msgid "" "tables were not subscribed, you will have to run ALTER SUBSCRIPTION ... " @@ -8948,22 +8805,51 @@ msgstr "" "в подписке отсутствуют таблицы, потребуется выполнить ALTER SUBSCRIPTION ... " "REFRESH PUBLICATION, чтобы подписаться на таблицы" -#: commands/subscriptioncmds.c:508 +#: commands/subscriptioncmds.c:576 +#, c-format +msgid "table \"%s.%s\" added to subscription \"%s\"" +msgstr "таблица \"%s.%s\" добавлена в подписку \"%s\"" + +#: commands/subscriptioncmds.c:600 +#, c-format +msgid "table \"%s.%s\" removed from subscription \"%s\"" +msgstr "таблица \"%s.%s\" удалена из подписки \"%s\"" + +#: commands/subscriptioncmds.c:669 +#, c-format +msgid "cannot set slot_name = NONE for enabled subscription" +msgstr "для включённой подписки нельзя задать slot_name = NONE" + +#: commands/subscriptioncmds.c:703 #, c-format -msgid "added subscription for table %s.%s" -msgstr "добавлена подписка на таблицу %s.%s" +msgid "cannot enable subscription that does not have a slot name" +msgstr "включить подписку, для которой не задано имя слота, нельзя" -#: commands/subscriptioncmds.c:534 +#: commands/subscriptioncmds.c:749 #, c-format -msgid "removed subscription for table %s.%s" -msgstr "удалена подписка на таблицу %s.%s" +msgid "" +"ALTER SUBSCRIPTION with refresh is not allowed for disabled subscriptions" +msgstr "" +"ALTER SUBSCRIPTION с обновлением для отключённых подписок не допускается" + +#: commands/subscriptioncmds.c:750 +#, c-format +msgid "Use ALTER SUBSCRIPTION ... SET PUBLICATION ... WITH (refresh = false)." +msgstr "" +"Выполните ALTER SUBSCRIPTION ... SET PUBLICATION ... WITH (refresh = false)." + +#: commands/subscriptioncmds.c:768 +#, c-format +msgid "" +"ALTER SUBSCRIPTION ... REFRESH is not allowed for disabled subscriptions" +msgstr "ALTER SUBSCRIPTION ... REFRESH для отключённых подписок не допускается" -#: commands/subscriptioncmds.c:738 +#: commands/subscriptioncmds.c:847 #, c-format msgid "subscription \"%s\" does not exist, skipping" msgstr "подписка \"%s\" не существует, пропускается" -#: commands/subscriptioncmds.c:820 +#: commands/subscriptioncmds.c:972 #, c-format msgid "" "could not connect to publisher when attempting to drop the replication slot " @@ -8972,33 +8858,42 @@ msgstr "" "не удалось подключиться к серверу публикации для удаления слота репликации " "\"%s\"" -#: commands/subscriptioncmds.c:822 commands/subscriptioncmds.c:833 -#: replication/logical/tablesync.c:775 replication/logical/tablesync.c:795 +#: commands/subscriptioncmds.c:974 commands/subscriptioncmds.c:988 +#: replication/logical/tablesync.c:906 replication/logical/tablesync.c:928 #, c-format msgid "The error was: %s" msgstr "Произошла ошибка: %s" -#: commands/subscriptioncmds.c:831 +#: commands/subscriptioncmds.c:975 +#, c-format +msgid "" +"Use ALTER SUBSCRIPTION ... SET (slot_name = NONE) to disassociate the " +"subscription from the slot." +msgstr "" +"Воспользуйтесь ALTER SUBSCRIPTION ... SET (slot_name = NONE), чтобы отвязать " +"подписку от слота." + +#: commands/subscriptioncmds.c:986 #, c-format msgid "could not drop the replication slot \"%s\" on publisher" msgstr "слот репликации \"%s\" на сервере публикации не был удалён" -#: commands/subscriptioncmds.c:836 +#: commands/subscriptioncmds.c:991 #, c-format msgid "dropped replication slot \"%s\" on publisher" msgstr "слот репликации \"%s\" удалён на сервере репликации" -#: commands/subscriptioncmds.c:877 +#: commands/subscriptioncmds.c:1032 #, c-format msgid "permission denied to change owner of subscription \"%s\"" msgstr "нет прав на изменение владельца подписки \"%s\"" -#: commands/subscriptioncmds.c:879 +#: commands/subscriptioncmds.c:1034 #, c-format -msgid "The owner of an subscription must be a superuser." +msgid "The owner of a subscription must be a superuser." msgstr "Владельцем подписки должен быть суперпользователь." -#: commands/subscriptioncmds.c:992 +#: commands/subscriptioncmds.c:1147 #, c-format msgid "could not receive list of replicated tables from the publisher: %s" msgstr "" @@ -9062,7 +8957,7 @@ msgstr "" "Выполните DROP MATERIALIZED VIEW для удаления материализованного " "представления." -#: commands/tablecmds.c:245 parser/parse_utilcmd.c:1698 +#: commands/tablecmds.c:245 parser/parse_utilcmd.c:1849 #, c-format msgid "index \"%s\" does not exist" msgstr "индекс \"%s\" не существует" @@ -9085,8 +8980,8 @@ msgstr "\"%s\" - это не тип" msgid "Use DROP TYPE to remove a type." msgstr "Выполните DROP TYPE для удаления типа." -#: commands/tablecmds.c:257 commands/tablecmds.c:9064 -#: commands/tablecmds.c:11903 +#: commands/tablecmds.c:257 commands/tablecmds.c:9418 +#: commands/tablecmds.c:12276 #, c-format msgid "foreign table \"%s\" does not exist" msgstr "сторонняя таблица \"%s\" не существует" @@ -9100,28 +8995,33 @@ msgstr "сторонняя таблица \"%s\" не существует, пр msgid "Use DROP FOREIGN TABLE to remove a foreign table." msgstr "Выполните DROP FOREIGN TABLE для удаления сторонней таблицы." -#: commands/tablecmds.c:519 +#: commands/tablecmds.c:533 #, c-format msgid "ON COMMIT can only be used on temporary tables" msgstr "ON COMMIT можно использовать только для временных таблиц" -#: commands/tablecmds.c:547 +#: commands/tablecmds.c:561 #, c-format msgid "cannot create temporary table within security-restricted operation" msgstr "" "в рамках операции с ограничениями по безопасности нельзя создать временную " "таблицу" -#: commands/tablecmds.c:646 +#: commands/tablecmds.c:662 #, c-format msgid "cannot create table with OIDs as partition of table without OIDs" msgstr "создать таблицу с OID в виде секции таблицы без OID нельзя" -#: commands/tablecmds.c:764 parser/parse_utilcmd.c:3040 +#: commands/tablecmds.c:783 parser/parse_utilcmd.c:3301 #, c-format msgid "\"%s\" is not partitioned" msgstr "отношение \"%s\" не секционировано" +#: commands/tablecmds.c:831 +#, c-format +msgid "cannot partition using more than %d columns" +msgstr "число столбцов в ключе секционирования не может превышать %d" + #: commands/tablecmds.c:972 #, c-format msgid "DROP INDEX CONCURRENTLY does not support dropping multiple objects" @@ -9132,64 +9032,73 @@ msgstr "DROP INDEX CONCURRENTLY не поддерживает удаление msgid "DROP INDEX CONCURRENTLY does not support CASCADE" msgstr "DROP INDEX CONCURRENTLY не поддерживает режим CASCADE" -#: commands/tablecmds.c:1224 +#: commands/tablecmds.c:1253 #, c-format -msgid "must truncate child tables too" -msgstr "опустошаться должны также и дочерние таблицы" +msgid "cannot truncate only a partitioned table" +msgstr "опустошить собственно секционированную таблицу нельзя" -#: commands/tablecmds.c:1252 +#: commands/tablecmds.c:1254 +#, c-format +msgid "" +"Do not specify the ONLY keyword, or use truncate only on the partitions " +"directly." +msgstr "" +"Не указывайте ключевое слово ONLY или произведите опустошение " +"непосредственно секций." + +#: commands/tablecmds.c:1282 #, c-format msgid "truncate cascades to table \"%s\"" msgstr "опустошение распространяется на таблицу %s" -#: commands/tablecmds.c:1500 +#: commands/tablecmds.c:1530 #, c-format msgid "cannot truncate temporary tables of other sessions" msgstr "временные таблицы других сеансов нельзя опустошить" -#: commands/tablecmds.c:1731 commands/tablecmds.c:10647 +#: commands/tablecmds.c:1761 commands/tablecmds.c:11003 #, c-format msgid "cannot inherit from partitioned table \"%s\"" msgstr "наследование от секционированной таблицы \"%s\" не допускается" -#: commands/tablecmds.c:1736 +#: commands/tablecmds.c:1766 #, c-format msgid "cannot inherit from partition \"%s\"" msgstr "наследование от секции \"%s\" не допускается" -#: commands/tablecmds.c:1744 parser/parse_utilcmd.c:1909 +#: commands/tablecmds.c:1774 parser/parse_utilcmd.c:2060 #, c-format msgid "inherited relation \"%s\" is not a table or foreign table" msgstr "" "наследуемое отношение \"%s\" не является таблицей или сторонней таблицей" -#: commands/tablecmds.c:1752 commands/tablecmds.c:10626 +#: commands/tablecmds.c:1782 commands/tablecmds.c:10982 #, c-format msgid "cannot inherit from temporary relation \"%s\"" msgstr "временное отношение \"%s\" не может наследоваться" -#: commands/tablecmds.c:1762 commands/tablecmds.c:10634 +#: commands/tablecmds.c:1792 commands/tablecmds.c:10990 #, c-format msgid "cannot inherit from temporary relation of another session" msgstr "наследование от временного отношения другого сеанса невозможно" -#: commands/tablecmds.c:1779 commands/tablecmds.c:10745 +#: commands/tablecmds.c:1809 commands/tablecmds.c:11114 #, c-format msgid "relation \"%s\" would be inherited from more than once" msgstr "отношение \"%s\" наследуется неоднократно" -#: commands/tablecmds.c:1827 +#: commands/tablecmds.c:1857 #, c-format msgid "merging multiple inherited definitions of column \"%s\"" msgstr "слияние нескольких наследованных определений столбца \"%s\"" -#: commands/tablecmds.c:1835 +#: commands/tablecmds.c:1865 #, c-format msgid "inherited column \"%s\" has a type conflict" msgstr "конфликт типов в наследованном столбце \"%s\"" -#: commands/tablecmds.c:1837 commands/tablecmds.c:1860 -#: commands/tablecmds.c:2065 commands/tablecmds.c:2089 +#: commands/tablecmds.c:1867 commands/tablecmds.c:1890 +#: commands/tablecmds.c:2096 commands/tablecmds.c:2126 #: parser/parse_coerce.c:1650 parser/parse_coerce.c:1670 #: parser/parse_coerce.c:1690 parser/parse_coerce.c:1736 #: parser/parse_coerce.c:1775 parser/parse_param.c:218 @@ -9197,76 +9106,76 @@ msgstr "конфликт типов в наследованном столбце msgid "%s versus %s" msgstr "%s и %s" -#: commands/tablecmds.c:1846 +#: commands/tablecmds.c:1876 #, c-format msgid "inherited column \"%s\" has a collation conflict" msgstr "конфликт правил сортировки в наследованном столбце \"%s\"" -#: commands/tablecmds.c:1848 commands/tablecmds.c:2077 -#: commands/tablecmds.c:5089 +#: commands/tablecmds.c:1878 commands/tablecmds.c:2108 +#: commands/tablecmds.c:5162 #, c-format msgid "\"%s\" versus \"%s\"" msgstr "\"%s\" и \"%s\"" -#: commands/tablecmds.c:1858 +#: commands/tablecmds.c:1888 #, c-format msgid "inherited column \"%s\" has a storage parameter conflict" msgstr "конфликт параметров хранения в наследованном столбце \"%s\"" -#: commands/tablecmds.c:1971 commands/tablecmds.c:8572 -#: parser/parse_utilcmd.c:993 parser/parse_utilcmd.c:1343 -#: parser/parse_utilcmd.c:1419 +#: commands/tablecmds.c:2002 commands/tablecmds.c:8908 +#: parser/parse_utilcmd.c:1143 parser/parse_utilcmd.c:1494 +#: parser/parse_utilcmd.c:1570 #, c-format msgid "cannot convert whole-row table reference" msgstr "преобразовать ссылку на тип всей строки таблицы нельзя" -#: commands/tablecmds.c:1972 parser/parse_utilcmd.c:994 +#: commands/tablecmds.c:2003 parser/parse_utilcmd.c:1144 #, c-format msgid "Constraint \"%s\" contains a whole-row reference to table \"%s\"." msgstr "Ограничение \"%s\" ссылается на тип всей строки в таблице \"%s\"." -#: commands/tablecmds.c:2051 +#: commands/tablecmds.c:2082 #, c-format msgid "merging column \"%s\" with inherited definition" msgstr "слияние столбца \"%s\" с наследованным определением" -#: commands/tablecmds.c:2055 +#: commands/tablecmds.c:2086 #, c-format msgid "moving and merging column \"%s\" with inherited definition" msgstr "перемещение и слияние столбца \"%s\" с наследуемым определением" -#: commands/tablecmds.c:2056 +#: commands/tablecmds.c:2087 #, c-format msgid "User-specified column moved to the position of the inherited column." msgstr "" "Определённый пользователем столбец перемещён в позицию наследуемого столбца." -#: commands/tablecmds.c:2063 +#: commands/tablecmds.c:2094 #, c-format msgid "column \"%s\" has a type conflict" msgstr "конфликт типов в столбце \"%s\"" -#: commands/tablecmds.c:2075 +#: commands/tablecmds.c:2106 #, c-format msgid "column \"%s\" has a collation conflict" msgstr "конфликт правил сортировки в столбце \"%s\"" -#: commands/tablecmds.c:2087 +#: commands/tablecmds.c:2124 #, c-format msgid "column \"%s\" has a storage parameter conflict" msgstr "конфликт параметров хранения в столбце \"%s\"" -#: commands/tablecmds.c:2189 +#: commands/tablecmds.c:2235 #, c-format msgid "column \"%s\" inherits conflicting default values" msgstr "столбец \"%s\" наследует конфликтующие значения по умолчанию" -#: commands/tablecmds.c:2191 +#: commands/tablecmds.c:2237 #, c-format msgid "To resolve the conflict, specify a default explicitly." msgstr "Для решения конфликта укажите желаемое значение по умолчанию." -#: commands/tablecmds.c:2238 +#: commands/tablecmds.c:2284 #, c-format msgid "" "check constraint name \"%s\" appears multiple times but with different " @@ -9275,12 +9184,12 @@ msgstr "" "имя ограничения-проверки \"%s\" фигурирует несколько раз, но с разными " "выражениями" -#: commands/tablecmds.c:2437 +#: commands/tablecmds.c:2477 #, c-format msgid "cannot rename column of typed table" msgstr "переименовать столбец типизированной таблицы нельзя" -#: commands/tablecmds.c:2455 +#: commands/tablecmds.c:2495 #, c-format msgid "" "\"%s\" is not a table, view, materialized view, composite type, index, or " @@ -9289,37 +9198,37 @@ msgstr "" "\"%s\" - это не таблица, представление, материализованное представление, " "составной тип, индекс или сторонняя таблица" -#: commands/tablecmds.c:2549 +#: commands/tablecmds.c:2589 #, c-format msgid "inherited column \"%s\" must be renamed in child tables too" msgstr "" "наследованный столбец \"%s\" должен быть также переименован в дочерних " "таблицах" -#: commands/tablecmds.c:2581 +#: commands/tablecmds.c:2621 #, c-format msgid "cannot rename system column \"%s\"" msgstr "нельзя переименовать системный столбец \"%s\"" -#: commands/tablecmds.c:2596 +#: commands/tablecmds.c:2636 #, c-format msgid "cannot rename inherited column \"%s\"" msgstr "нельзя переименовать наследованный столбец \"%s\"" -#: commands/tablecmds.c:2748 +#: commands/tablecmds.c:2788 #, c-format msgid "inherited constraint \"%s\" must be renamed in child tables too" msgstr "" "наследуемое ограничение \"%s\" должно быть также переименовано в дочерних " "таблицах" -#: commands/tablecmds.c:2755 +#: commands/tablecmds.c:2795 #, c-format msgid "cannot rename inherited constraint \"%s\"" msgstr "нельзя переименовать наследованное ограничение \"%s\"" #. translator: first %s is a SQL command, eg ALTER TABLE -#: commands/tablecmds.c:2979 +#: commands/tablecmds.c:3019 #, c-format msgid "" "cannot %s \"%s\" because it is being used by active queries in this session" @@ -9328,112 +9237,112 @@ msgstr "" "запросами в данном сеансе" #. translator: first %s is a SQL command, eg ALTER TABLE -#: commands/tablecmds.c:2988 +#: commands/tablecmds.c:3028 #, c-format msgid "cannot %s \"%s\" because it has pending trigger events" msgstr "" "нельзя выполнить %s \"%s\", так как с этим объектом связаны отложенные " "события триггеров" -#: commands/tablecmds.c:4087 +#: commands/tablecmds.c:4147 #, c-format msgid "cannot rewrite system relation \"%s\"" msgstr "перезаписать системное отношение \"%s\" нельзя" -#: commands/tablecmds.c:4093 +#: commands/tablecmds.c:4153 #, c-format msgid "cannot rewrite table \"%s\" used as a catalog table" msgstr "перезаписать таблицу \"%s\", используемую как таблицу каталога, нельзя" -#: commands/tablecmds.c:4103 +#: commands/tablecmds.c:4163 #, c-format msgid "cannot rewrite temporary tables of other sessions" msgstr "перезаписывать временные таблицы других сеансов нельзя" -#: commands/tablecmds.c:4379 +#: commands/tablecmds.c:4439 #, c-format msgid "rewriting table \"%s\"" msgstr "перезапись таблицы \"%s\"" -#: commands/tablecmds.c:4383 +#: commands/tablecmds.c:4443 #, c-format msgid "verifying table \"%s\"" msgstr "проверка таблицы \"%s\"" -#: commands/tablecmds.c:4496 +#: commands/tablecmds.c:4556 #, c-format msgid "column \"%s\" contains null values" msgstr "столбец \"%s\" содержит значения NULL" -#: commands/tablecmds.c:4511 commands/tablecmds.c:7843 +#: commands/tablecmds.c:4571 commands/tablecmds.c:8177 #, c-format msgid "check constraint \"%s\" is violated by some row" msgstr "ограничение-проверку \"%s\" нарушает некоторая строка" -#: commands/tablecmds.c:4527 +#: commands/tablecmds.c:4587 #, c-format msgid "partition constraint is violated by some row" msgstr "ограничение секции нарушает некоторая строка" -#: commands/tablecmds.c:4665 commands/trigger.c:245 rewrite/rewriteDefine.c:266 -#: rewrite/rewriteDefine.c:906 +#: commands/tablecmds.c:4725 commands/trigger.c:253 rewrite/rewriteDefine.c:266 +#: rewrite/rewriteDefine.c:920 #, c-format msgid "\"%s\" is not a table or view" msgstr "\"%s\" - это не таблица и не представление" -#: commands/tablecmds.c:4668 commands/trigger.c:1236 commands/trigger.c:1342 +#: commands/tablecmds.c:4728 commands/trigger.c:1314 commands/trigger.c:1420 #, c-format msgid "\"%s\" is not a table, view, or foreign table" msgstr "\"%s\" - это не таблица, представление и не сторонняя таблица" -#: commands/tablecmds.c:4671 +#: commands/tablecmds.c:4731 #, c-format msgid "\"%s\" is not a table, view, materialized view, or index" msgstr "" "\"%s\" - это не таблица, представление, материализованное представление или " "индекс" -#: commands/tablecmds.c:4677 +#: commands/tablecmds.c:4737 #, c-format msgid "\"%s\" is not a table, materialized view, or index" msgstr "\"%s\" - это не таблица, материализованное представление или индекс" -#: commands/tablecmds.c:4680 +#: commands/tablecmds.c:4740 #, c-format msgid "\"%s\" is not a table, materialized view, or foreign table" msgstr "" "\"%s\" - это не таблица, материализованное представление или сторонняя " "таблица" -#: commands/tablecmds.c:4683 +#: commands/tablecmds.c:4743 #, c-format msgid "\"%s\" is not a table or foreign table" msgstr "\"%s\" - это не таблица и не сторонняя таблица" -#: commands/tablecmds.c:4686 +#: commands/tablecmds.c:4746 #, c-format msgid "\"%s\" is not a table, composite type, or foreign table" msgstr "\"%s\" - это не таблица, составной тип или сторонняя таблица" -#: commands/tablecmds.c:4689 commands/tablecmds.c:5811 +#: commands/tablecmds.c:4749 commands/tablecmds.c:6139 #, c-format msgid "\"%s\" is not a table, materialized view, index, or foreign table" msgstr "" "\"%s\" - это не таблица, материализованное представление, индекс или " "сторонняя таблица" -#: commands/tablecmds.c:4699 +#: commands/tablecmds.c:4759 #, c-format msgid "\"%s\" is of the wrong type" msgstr "неправильный тип \"%s\"" -#: commands/tablecmds.c:4853 commands/tablecmds.c:4860 +#: commands/tablecmds.c:4934 commands/tablecmds.c:4941 #, c-format msgid "cannot alter type \"%s\" because column \"%s.%s\" uses it" msgstr "" "изменить тип \"%s\" нельзя, так как он задействован в столбце \"%s.%s\"" -#: commands/tablecmds.c:4867 +#: commands/tablecmds.c:4948 #, c-format msgid "" "cannot alter foreign table \"%s\" because column \"%s.%s\" uses its row type" @@ -9441,188 +9350,253 @@ msgstr "" "изменить стороннюю таблицу \"%s\" нельзя, так как столбец \"%s.%s\" " "задействует тип её строки" -#: commands/tablecmds.c:4874 +#: commands/tablecmds.c:4955 #, c-format msgid "cannot alter table \"%s\" because column \"%s.%s\" uses its row type" msgstr "" "изменить таблицу \"%s\" нельзя, так как столбец \"%s.%s\" задействует тип её " "строки" -#: commands/tablecmds.c:4936 +#: commands/tablecmds.c:5009 #, c-format msgid "cannot alter type \"%s\" because it is the type of a typed table" msgstr "изменить тип \"%s\", так как это тип типизированной таблицы" -#: commands/tablecmds.c:4938 +#: commands/tablecmds.c:5011 #, c-format msgid "Use ALTER ... CASCADE to alter the typed tables too." msgstr "" "Чтобы изменить также типизированные таблицы, выполните ALTER ... CASCADE." -#: commands/tablecmds.c:4982 +#: commands/tablecmds.c:5055 #, c-format msgid "type %s is not a composite type" msgstr "тип %s не является составным" -#: commands/tablecmds.c:5008 +#: commands/tablecmds.c:5081 #, c-format msgid "cannot add column to typed table" msgstr "добавить столбец в типизированную таблицу нельзя" -#: commands/tablecmds.c:5052 +#: commands/tablecmds.c:5125 #, c-format msgid "cannot add column to a partition" msgstr "добавить столбец в секцию нельзя" -#: commands/tablecmds.c:5081 commands/tablecmds.c:10871 +#: commands/tablecmds.c:5154 commands/tablecmds.c:11240 #, c-format msgid "child table \"%s\" has different type for column \"%s\"" msgstr "дочерняя таблица \"%s\" имеет другой тип для столбца \"%s\"" -#: commands/tablecmds.c:5087 commands/tablecmds.c:10878 +#: commands/tablecmds.c:5160 commands/tablecmds.c:11247 #, c-format msgid "child table \"%s\" has different collation for column \"%s\"" msgstr "" "дочерняя таблица \"%s\" имеет другое правило сортировки для столбца \"%s\"" -#: commands/tablecmds.c:5097 +#: commands/tablecmds.c:5170 #, c-format msgid "child table \"%s\" has a conflicting \"%s\" column" msgstr "дочерняя таблица \"%s\" содержит конфликтующий столбец \"%s\"" -#: commands/tablecmds.c:5108 +#: commands/tablecmds.c:5181 #, c-format msgid "merging definition of column \"%s\" for child \"%s\"" msgstr "объединение определений столбца \"%s\" для потомка \"%s\"" -#: commands/tablecmds.c:5332 +#: commands/tablecmds.c:5205 +#, c-format +msgid "cannot recursively add identity column to table that has child tables" +msgstr "" +"добавить столбец идентификации в таблицу, у которой есть дочерние, нельзя" + +#: commands/tablecmds.c:5431 #, c-format msgid "column must be added to child tables too" msgstr "столбец также должен быть добавлен к дочерним таблицам" -#: commands/tablecmds.c:5407 +#: commands/tablecmds.c:5506 #, c-format msgid "column \"%s\" of relation \"%s\" already exists, skipping" msgstr "столбец \"%s\" отношения \"%s\" уже существует, пропускается" -#: commands/tablecmds.c:5414 +#: commands/tablecmds.c:5513 #, c-format msgid "column \"%s\" of relation \"%s\" already exists" msgstr "столбец \"%s\" отношения \"%s\" уже существует" -#: commands/tablecmds.c:5508 commands/tablecmds.c:8245 +#: commands/tablecmds.c:5611 commands/tablecmds.c:8590 +#, c-format +msgid "" +"cannot remove constraint from only the partitioned table when partitions " +"exist" +msgstr "" +"удалить ограничение только из секционированной таблицы, когда существуют " +"секции, нельзя" + +#: commands/tablecmds.c:5612 commands/tablecmds.c:5759 +#: commands/tablecmds.c:6556 commands/tablecmds.c:8591 #, c-format -msgid "constraint must be dropped from child tables too" -msgstr "ограничение также должно удаляться из дочерних таблиц" +msgid "Do not specify the ONLY keyword." +msgstr "Не указывайте ключевое слово ONLY." -#: commands/tablecmds.c:5539 commands/tablecmds.c:5700 -#: commands/tablecmds.c:5755 commands/tablecmds.c:5870 -#: commands/tablecmds.c:5924 commands/tablecmds.c:6016 -#: commands/tablecmds.c:8394 commands/tablecmds.c:9087 +#: commands/tablecmds.c:5644 commands/tablecmds.c:5791 +#: commands/tablecmds.c:5846 commands/tablecmds.c:5921 +#: commands/tablecmds.c:6015 commands/tablecmds.c:6074 +#: commands/tablecmds.c:6198 commands/tablecmds.c:6252 +#: commands/tablecmds.c:6344 commands/tablecmds.c:8730 +#: commands/tablecmds.c:9441 #, c-format msgid "cannot alter system column \"%s\"" msgstr "системный столбец \"%s\" нельзя изменить" -#: commands/tablecmds.c:5575 +#: commands/tablecmds.c:5650 commands/tablecmds.c:5852 +#, c-format +msgid "column \"%s\" of relation \"%s\" is an identity column" +msgstr "столбец \"%s\" отношения \"%s\" является столбцом идентификации" + +#: commands/tablecmds.c:5686 #, c-format msgid "column \"%s\" is in a primary key" msgstr "столбец \"%s\" входит в первичный ключ" -#: commands/tablecmds.c:5597 +#: commands/tablecmds.c:5708 #, c-format msgid "column \"%s\" is marked NOT NULL in parent table" msgstr "столбец \"%s\" в родительской таблице помечен как NOT NULL" -#: commands/tablecmds.c:5622 +#: commands/tablecmds.c:5758 +#, c-format +msgid "" +"cannot add constraint to only the partitioned table when partitions exist" +msgstr "" +"добавить ограничение только в секционированную таблицу, когда существуют " +"секции, нельзя" + +#: commands/tablecmds.c:5854 #, c-format -msgid "column \"%s\" is in range partition key" -msgstr "столбец \"%s\" входит в ключ разбиения по диапазонам" +msgid "Use ALTER TABLE ... ALTER COLUMN ... DROP IDENTITY instead." +msgstr "Вместо этого выполните ALTER TABLE ... ALTER COLUMN ... DROP IDENTITY." -#: commands/tablecmds.c:5669 commands/tablecmds.c:6655 +#: commands/tablecmds.c:5932 #, c-format -msgid "constraint must be added to child tables too" -msgstr "ограничение также должно быть добавлено к дочерним таблицам" +msgid "" +"column \"%s\" of relation \"%s\" must be declared NOT NULL before identity " +"can be added" +msgstr "" +"столбец \"%s\" отношения \"%s\" должен быть объявлен как NOT NULL, чтобы его " +"можно было сделать столбцом идентификации" + +#: commands/tablecmds.c:5938 +#, c-format +msgid "column \"%s\" of relation \"%s\" is already an identity column" +msgstr "столбец \"%s\" отношения \"%s\" уже является столбцом идентификации" + +#: commands/tablecmds.c:5944 +#, c-format +msgid "column \"%s\" of relation \"%s\" already has a default value" +msgstr "столбец \"%s\" отношения \"%s\" уже имеет значение по умолчанию" + +#: commands/tablecmds.c:6021 commands/tablecmds.c:6082 +#, c-format +msgid "column \"%s\" of relation \"%s\" is not an identity column" +msgstr "столбец \"%s\" отношения \"%s\" не является столбцом идентификации" + +#: commands/tablecmds.c:6087 +#, c-format +msgid "column \"%s\" of relation \"%s\" is not an identity column, skipping" +msgstr "" +"столбец \"%s\" отношения \"%s\" не является столбцом идентификации, " +"пропускается" -#: commands/tablecmds.c:5843 +#: commands/tablecmds.c:6171 #, c-format msgid "statistics target %d is too low" msgstr "целевое ограничение статистики слишком мало (%d)" -#: commands/tablecmds.c:5851 +#: commands/tablecmds.c:6179 #, c-format msgid "lowering statistics target to %d" msgstr "целевое ограничение статистики снижается до %d" -#: commands/tablecmds.c:5996 +#: commands/tablecmds.c:6324 #, c-format msgid "invalid storage type \"%s\"" msgstr "неверный тип хранилища \"%s\"" -#: commands/tablecmds.c:6028 +#: commands/tablecmds.c:6356 #, c-format msgid "column data type %s can only have storage PLAIN" msgstr "тип данных столбца %s совместим только с хранилищем PLAIN" -#: commands/tablecmds.c:6063 +#: commands/tablecmds.c:6391 #, c-format msgid "cannot drop column from typed table" msgstr "нельзя удалить столбец в типизированной таблице" -#: commands/tablecmds.c:6170 +#: commands/tablecmds.c:6498 #, c-format msgid "column \"%s\" of relation \"%s\" does not exist, skipping" msgstr "столбец \"%s\" в таблице\"%s\" не существует, пропускается" -#: commands/tablecmds.c:6183 +#: commands/tablecmds.c:6511 #, c-format msgid "cannot drop system column \"%s\"" msgstr "нельзя удалить системный столбец \"%s\"" -#: commands/tablecmds.c:6190 +#: commands/tablecmds.c:6518 #, c-format msgid "cannot drop inherited column \"%s\"" msgstr "нельзя удалить наследованный столбец \"%s\"" -#: commands/tablecmds.c:6199 +#: commands/tablecmds.c:6527 #, c-format msgid "cannot drop column named in partition key" msgstr "нельзя удалить столбец, входящий в ключ разбиения" -#: commands/tablecmds.c:6203 +#: commands/tablecmds.c:6531 #, c-format msgid "cannot drop column referenced in partition key expression" msgstr "нельзя удалить столбец, задействованный в выражении ключа разбиения" -#: commands/tablecmds.c:6227 +#: commands/tablecmds.c:6555 #, c-format -msgid "column must be dropped from child tables too" -msgstr "столбец также должен удаляться из дочерних таблиц" +msgid "" +"cannot drop column from only the partitioned table when partitions exist" +msgstr "" +"удалить столбец только из секционированной таблицы, когда существуют секции, " +"нельзя" -#: commands/tablecmds.c:6443 +#: commands/tablecmds.c:6773 #, c-format msgid "" "ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index \"%s\" to \"%s\"" msgstr "" "ALTER TABLE / ADD CONSTRAINT USING INDEX переименует индекс \"%s\" в \"%s\"" -#: commands/tablecmds.c:6726 +#: commands/tablecmds.c:6985 +#, c-format +msgid "constraint must be added to child tables too" +msgstr "ограничение также должно быть добавлено к дочерним таблицам" + +#: commands/tablecmds.c:7056 #, c-format msgid "cannot reference partitioned table \"%s\"" msgstr "ссылаться на секционированную таблицу \"%s\" нельзя" -#: commands/tablecmds.c:6732 +#: commands/tablecmds.c:7062 #, c-format msgid "referenced relation \"%s\" is not a table" msgstr "указанный объект \"%s\" не является таблицей" -#: commands/tablecmds.c:6755 +#: commands/tablecmds.c:7085 #, c-format msgid "constraints on permanent tables may reference only permanent tables" msgstr "" "ограничения в постоянных таблицах могут ссылаться только на постоянные " "таблицы" -#: commands/tablecmds.c:6762 +#: commands/tablecmds.c:7092 #, c-format msgid "" "constraints on unlogged tables may reference only permanent or unlogged " @@ -9631,13 +9605,13 @@ msgstr "" "ограничения в нежурналируемых таблицах могут ссылаться только на постоянные " "или нежурналируемые таблицы" -#: commands/tablecmds.c:6768 +#: commands/tablecmds.c:7098 #, c-format msgid "constraints on temporary tables may reference only temporary tables" msgstr "" "ограничения во временных таблицах могут ссылаться только на временные таблицы" -#: commands/tablecmds.c:6772 +#: commands/tablecmds.c:7102 #, c-format msgid "" "constraints on temporary tables must involve temporary tables of this session" @@ -9645,33 +9619,33 @@ msgstr "" "ограничения во временных таблицах должны ссылаться только на временные " "таблицы текущего сеанса" -#: commands/tablecmds.c:6832 +#: commands/tablecmds.c:7162 #, c-format msgid "number of referencing and referenced columns for foreign key disagree" msgstr "число столбцов в источнике и назначении внешнего ключа не совпадает" -#: commands/tablecmds.c:6939 +#: commands/tablecmds.c:7269 #, c-format msgid "foreign key constraint \"%s\" cannot be implemented" msgstr "ограничение внешнего ключа \"%s\" нельзя реализовать" -#: commands/tablecmds.c:6942 +#: commands/tablecmds.c:7272 #, c-format msgid "Key columns \"%s\" and \"%s\" are of incompatible types: %s and %s." msgstr "Столбцы ключа \"%s\" и \"%s\" имеют несовместимые типы: %s и %s." -#: commands/tablecmds.c:7148 commands/tablecmds.c:7314 -#: commands/tablecmds.c:8224 commands/tablecmds.c:8290 +#: commands/tablecmds.c:7477 commands/tablecmds.c:7643 +#: commands/tablecmds.c:8558 commands/tablecmds.c:8626 #, c-format msgid "constraint \"%s\" of relation \"%s\" does not exist" msgstr "ограничение \"%s\" в таблице \"%s\" не существует" -#: commands/tablecmds.c:7154 +#: commands/tablecmds.c:7483 #, c-format msgid "constraint \"%s\" of relation \"%s\" is not a foreign key constraint" msgstr "ограничение \"%s\" в таблице \"%s\" не является внешним ключом" -#: commands/tablecmds.c:7321 +#: commands/tablecmds.c:7650 #, c-format msgid "" "constraint \"%s\" of relation \"%s\" is not a foreign key or check constraint" @@ -9679,46 +9653,46 @@ msgstr "" "ограничение \"%s\" в таблице \"%s\" не является внешним ключом или " "ограничением-проверкой" -#: commands/tablecmds.c:7390 +#: commands/tablecmds.c:7720 #, c-format msgid "constraint must be validated on child tables too" msgstr "ограничение также должно соблюдаться в дочерних таблицах" -#: commands/tablecmds.c:7458 +#: commands/tablecmds.c:7788 #, c-format msgid "column \"%s\" referenced in foreign key constraint does not exist" msgstr "столбец \"%s\", указанный в ограничении внешнего ключа, не существует" -#: commands/tablecmds.c:7463 +#: commands/tablecmds.c:7793 #, c-format msgid "cannot have more than %d keys in a foreign key" msgstr "во внешнем ключе не может быть больше %d столбцов" -#: commands/tablecmds.c:7528 +#: commands/tablecmds.c:7858 #, c-format msgid "cannot use a deferrable primary key for referenced table \"%s\"" msgstr "" "использовать откладываемый первичный ключ в целевой внешней таблице \"%s\" " "нельзя" -#: commands/tablecmds.c:7545 +#: commands/tablecmds.c:7875 #, c-format msgid "there is no primary key for referenced table \"%s\"" msgstr "в целевой внешней таблице \"%s\" нет первичного ключа" -#: commands/tablecmds.c:7610 +#: commands/tablecmds.c:7940 #, c-format msgid "foreign key referenced-columns list must not contain duplicates" msgstr "в списке столбцов внешнего ключа не должно быть повторений" -#: commands/tablecmds.c:7704 +#: commands/tablecmds.c:8034 #, c-format msgid "cannot use a deferrable unique constraint for referenced table \"%s\"" msgstr "" "использовать откладываемое ограничение уникальности в целевой внешней " "таблице \"%s\" нельзя" -#: commands/tablecmds.c:7709 +#: commands/tablecmds.c:8039 #, c-format msgid "" "there is no unique constraint matching given keys for referenced table \"%s\"" @@ -9726,43 +9700,43 @@ msgstr "" "в целевой внешней таблице \"%s\" нет ограничения уникальности, " "соответствующего данным ключам" -#: commands/tablecmds.c:7876 +#: commands/tablecmds.c:8210 #, c-format msgid "validating foreign key constraint \"%s\"" msgstr "проверка ограничения внешнего ключа \"%s\"" -#: commands/tablecmds.c:8178 +#: commands/tablecmds.c:8512 #, c-format msgid "cannot drop inherited constraint \"%s\" of relation \"%s\"" msgstr "удалить наследованное ограничение \"%s\" таблицы \"%s\" нельзя" -#: commands/tablecmds.c:8230 +#: commands/tablecmds.c:8564 #, c-format msgid "constraint \"%s\" of relation \"%s\" does not exist, skipping" msgstr "ограничение \"%s\" в таблице \"%s\" не существует, пропускается" -#: commands/tablecmds.c:8378 +#: commands/tablecmds.c:8714 #, c-format msgid "cannot alter column type of typed table" msgstr "изменить тип столбца в типизированной таблице нельзя" -#: commands/tablecmds.c:8401 +#: commands/tablecmds.c:8737 #, c-format msgid "cannot alter inherited column \"%s\"" msgstr "изменить наследованный столбец \"%s\" нельзя" -#: commands/tablecmds.c:8410 +#: commands/tablecmds.c:8746 #, c-format msgid "cannot alter type of column named in partition key" msgstr "нельзя изменить тип столбца, составляющего ключ разбиения" -#: commands/tablecmds.c:8414 +#: commands/tablecmds.c:8750 #, c-format msgid "cannot alter type of column referenced in partition key expression" msgstr "" "нельзя изменить тип столбца, задействованного в выражении ключа разбиения" -#: commands/tablecmds.c:8464 +#: commands/tablecmds.c:8800 #, c-format msgid "" "result of USING clause for column \"%s\" cannot be cast automatically to " @@ -9770,214 +9744,225 @@ msgid "" msgstr "" "результат USING для столбца \"%s\" нельзя автоматически привести к типу %s" -#: commands/tablecmds.c:8467 +#: commands/tablecmds.c:8803 #, c-format msgid "You might need to add an explicit cast." msgstr "Возможно, необходимо добавить явное приведение." -#: commands/tablecmds.c:8471 +#: commands/tablecmds.c:8807 #, c-format msgid "column \"%s\" cannot be cast automatically to type %s" msgstr "столбец \"%s\" нельзя автоматически привести к типу %s" # skip-rule: double-colons #. translator: USING is SQL, don't translate it -#: commands/tablecmds.c:8474 +#: commands/tablecmds.c:8810 #, c-format msgid "You might need to specify \"USING %s::%s\"." msgstr "Возможно, необходимо указать \"USING %s::%s\"." -#: commands/tablecmds.c:8573 +#: commands/tablecmds.c:8909 #, c-format msgid "USING expression contains a whole-row table reference." msgstr "Выражение USING ссылается на тип всей строки таблицы." -#: commands/tablecmds.c:8584 +#: commands/tablecmds.c:8920 #, c-format msgid "type of inherited column \"%s\" must be changed in child tables too" msgstr "" "тип наследованного столбца \"%s\" должен быть изменён и в дочерних таблицах" -#: commands/tablecmds.c:8671 +#: commands/tablecmds.c:9007 #, c-format msgid "cannot alter type of column \"%s\" twice" msgstr "нельзя изменить тип столбца \"%s\" дважды" -#: commands/tablecmds.c:8707 +#: commands/tablecmds.c:9043 #, c-format msgid "default for column \"%s\" cannot be cast automatically to type %s" msgstr "" "значение по умолчанию для столбца \"%s\" нельзя автоматически привести к " "типу %s" -#: commands/tablecmds.c:8833 +#: commands/tablecmds.c:9169 #, c-format msgid "cannot alter type of a column used by a view or rule" msgstr "" "изменить тип столбца, задействованного в представлении или правиле, нельзя" -#: commands/tablecmds.c:8834 commands/tablecmds.c:8853 -#: commands/tablecmds.c:8871 +#: commands/tablecmds.c:9170 commands/tablecmds.c:9189 +#: commands/tablecmds.c:9207 #, c-format msgid "%s depends on column \"%s\"" msgstr "%s зависит от столбца \"%s\"" -#: commands/tablecmds.c:8852 +#: commands/tablecmds.c:9188 #, c-format msgid "cannot alter type of a column used in a trigger definition" msgstr "изменить тип столбца, задействованного в определении триггера, нельзя" -#: commands/tablecmds.c:8870 +#: commands/tablecmds.c:9206 #, c-format msgid "cannot alter type of a column used in a policy definition" msgstr "изменить тип столбца, задействованного в определении политики, нельзя" -#: commands/tablecmds.c:9527 +#: commands/tablecmds.c:9881 #, c-format msgid "cannot change owner of index \"%s\"" msgstr "сменить владельца индекса \"%s\" нельзя" -#: commands/tablecmds.c:9529 +#: commands/tablecmds.c:9883 #, c-format msgid "Change the ownership of the index's table, instead." msgstr "Однако возможно сменить владельца таблицы, содержащей этот индекс." -#: commands/tablecmds.c:9545 +#: commands/tablecmds.c:9900 #, c-format msgid "cannot change owner of sequence \"%s\"" msgstr "сменить владельца последовательности \"%s\" нельзя" -#: commands/tablecmds.c:9547 commands/tablecmds.c:12122 -#, c-format -msgid "Sequence \"%s\" is linked to table \"%s\"." -msgstr "Последовательность \"%s\" связана с таблицей \"%s\"." - -#: commands/tablecmds.c:9559 commands/tablecmds.c:12769 +#: commands/tablecmds.c:9914 commands/tablecmds.c:13143 #, c-format msgid "Use ALTER TYPE instead." msgstr "Используйте ALTER TYPE." -#: commands/tablecmds.c:9568 +#: commands/tablecmds.c:9923 #, c-format msgid "\"%s\" is not a table, view, sequence, or foreign table" msgstr "" "\"%s\" - это не таблица, TOAST-таблица, индекс, представление или " "последовательность" -#: commands/tablecmds.c:9909 +#: commands/tablecmds.c:10264 #, c-format msgid "cannot have multiple SET TABLESPACE subcommands" msgstr "в одной инструкции не может быть несколько подкоманд SET TABLESPACE" -#: commands/tablecmds.c:9983 +#: commands/tablecmds.c:10338 #, c-format msgid "\"%s\" is not a table, view, materialized view, index, or TOAST table" msgstr "" "\"%s\" - это не таблица, представление, материализованное представление, " "индекс или TOAST-таблица" -#: commands/tablecmds.c:10016 commands/view.c:504 +#: commands/tablecmds.c:10371 commands/view.c:504 #, c-format msgid "WITH CHECK OPTION is supported only on automatically updatable views" msgstr "" "WITH CHECK OPTION поддерживается только с автообновляемыми представлениями" -#: commands/tablecmds.c:10158 +#: commands/tablecmds.c:10513 #, c-format msgid "cannot move system relation \"%s\"" msgstr "переместить системную таблицу \"%s\" нельзя" -#: commands/tablecmds.c:10174 +#: commands/tablecmds.c:10529 #, c-format msgid "cannot move temporary tables of other sessions" msgstr "перемещать временные таблицы других сеансов нельзя" -#: commands/tablecmds.c:10310 +#: commands/tablecmds.c:10665 #, c-format msgid "only tables, indexes, and materialized views exist in tablespaces" msgstr "" "в табличных пространствах есть только таблицы, индексы и материализованные " "представления" -#: commands/tablecmds.c:10322 +#: commands/tablecmds.c:10677 #, c-format msgid "cannot move relations in to or out of pg_global tablespace" msgstr "перемещать объекты в/из табличного пространства pg_global нельзя" -#: commands/tablecmds.c:10414 +#: commands/tablecmds.c:10769 #, c-format msgid "aborting because lock on relation \"%s.%s\" is not available" msgstr "" "обработка прерывается из-за невозможности заблокировать отношение \"%s.%s\"" -#: commands/tablecmds.c:10430 +#: commands/tablecmds.c:10785 #, c-format msgid "no matching relations in tablespace \"%s\" found" msgstr "в табличном пространстве \"%s\" не найдены подходящие отношения" -#: commands/tablecmds.c:10504 storage/buffer/bufmgr.c:915 +#: commands/tablecmds.c:10859 storage/buffer/bufmgr.c:915 #, c-format msgid "invalid page in block %u of relation %s" msgstr "неверная страница в блоке %u отношения %s" -#: commands/tablecmds.c:10586 +#: commands/tablecmds.c:10941 #, c-format msgid "cannot change inheritance of typed table" msgstr "изменить наследование типизированной таблицы нельзя" -#: commands/tablecmds.c:10591 commands/tablecmds.c:11119 +#: commands/tablecmds.c:10946 commands/tablecmds.c:11488 #, c-format msgid "cannot change inheritance of a partition" msgstr "изменить наследование секции нельзя" -#: commands/tablecmds.c:10596 +#: commands/tablecmds.c:10951 #, c-format msgid "cannot change inheritance of partitioned table" msgstr "изменить наследование секционированной таблицы нельзя" -#: commands/tablecmds.c:10641 +#: commands/tablecmds.c:10997 #, c-format msgid "cannot inherit to temporary relation of another session" msgstr "наследование для временного отношения другого сеанса невозможно" -#: commands/tablecmds.c:10654 +#: commands/tablecmds.c:11010 #, c-format msgid "cannot inherit from a partition" msgstr "наследование от секции невозможно" -#: commands/tablecmds.c:10676 commands/tablecmds.c:13121 +#: commands/tablecmds.c:11032 commands/tablecmds.c:13537 #, c-format msgid "circular inheritance not allowed" msgstr "циклическое наследование недопустимо" -#: commands/tablecmds.c:10677 commands/tablecmds.c:13122 +#: commands/tablecmds.c:11033 commands/tablecmds.c:13538 #, c-format msgid "\"%s\" is already a child of \"%s\"." msgstr "\"%s\" уже является потомком \"%s\"." -#: commands/tablecmds.c:10685 +#: commands/tablecmds.c:11041 #, c-format msgid "table \"%s\" without OIDs cannot inherit from table \"%s\" with OIDs" msgstr "таблица \"%s\" без OID не может наследоваться от таблицы \"%s\" с OID" -#: commands/tablecmds.c:10889 +#: commands/tablecmds.c:11054 +#, c-format +msgid "trigger \"%s\" prevents table \"%s\" from becoming an inheritance child" +msgstr "" +"триггер \"%s\" не позволяет таблице \"%s\" стать потомком в иерархии " +"наследования" + +#: commands/tablecmds.c:11056 +#, c-format +msgid "" +"ROW triggers with transition tables are not supported in inheritance " +"hierarchies" +msgstr "" +"триггеры ROW с переходными таблицами не поддерживаются в иерархиях " +"наследования" + +#: commands/tablecmds.c:11258 #, c-format msgid "column \"%s\" in child table must be marked NOT NULL" msgstr "столбец \"%s\" в дочерней таблице должен быть помечен как NOT NULL" -#: commands/tablecmds.c:10916 commands/tablecmds.c:10955 +#: commands/tablecmds.c:11285 commands/tablecmds.c:11324 #, c-format msgid "child table is missing column \"%s\"" msgstr "в дочерней таблице не хватает столбца \"%s\"" -#: commands/tablecmds.c:11043 +#: commands/tablecmds.c:11412 #, c-format msgid "child table \"%s\" has different definition for check constraint \"%s\"" msgstr "" "дочерняя таблица \"%s\" содержит другое определение ограничения-проверки \"%s" "\"" -#: commands/tablecmds.c:11051 +#: commands/tablecmds.c:11420 #, c-format msgid "" "constraint \"%s\" conflicts with non-inherited constraint on child table \"%s" @@ -9986,7 +9971,7 @@ msgstr "" "ограничение \"%s\" конфликтует с ненаследуемым ограничением дочерней таблицы " "\"%s\"" -#: commands/tablecmds.c:11062 +#: commands/tablecmds.c:11431 #, c-format msgid "" "constraint \"%s\" conflicts with NOT VALID constraint on child table \"%s\"" @@ -9994,81 +9979,81 @@ msgstr "" "ограничение \"%s\" конфликтует с непроверенным (NOT VALID) ограничением " "дочерней таблицы \"%s\"" -#: commands/tablecmds.c:11097 +#: commands/tablecmds.c:11466 #, c-format msgid "child table is missing constraint \"%s\"" msgstr "в дочерней таблице не хватает ограничения \"%s\"" -#: commands/tablecmds.c:11213 +#: commands/tablecmds.c:11582 #, c-format msgid "relation \"%s\" is not a partition of relation \"%s\"" msgstr "отношение \"%s\" не является секцией отношения \"%s\"" -#: commands/tablecmds.c:11219 +#: commands/tablecmds.c:11588 #, c-format msgid "relation \"%s\" is not a parent of relation \"%s\"" msgstr "отношение \"%s\" не является предком отношения \"%s\"" -#: commands/tablecmds.c:11443 +#: commands/tablecmds.c:11814 #, c-format msgid "typed tables cannot inherit" msgstr "типизированные таблицы не могут наследоваться" -#: commands/tablecmds.c:11474 +#: commands/tablecmds.c:11845 #, c-format msgid "table is missing column \"%s\"" msgstr "в таблице не хватает столбца \"%s\"" -#: commands/tablecmds.c:11484 +#: commands/tablecmds.c:11855 #, c-format msgid "table has column \"%s\" where type requires \"%s\"" msgstr "таблица содержит столбец \"%s\", тогда как тип требует \"%s\"" -#: commands/tablecmds.c:11493 +#: commands/tablecmds.c:11864 #, c-format msgid "table \"%s\" has different type for column \"%s\"" msgstr "таблица \"%s\" содержит столбец \"%s\" другого типа" -#: commands/tablecmds.c:11506 +#: commands/tablecmds.c:11877 #, c-format msgid "table has extra column \"%s\"" msgstr "таблица содержит лишний столбец \"%s\"" -#: commands/tablecmds.c:11557 +#: commands/tablecmds.c:11929 #, c-format msgid "\"%s\" is not a typed table" msgstr "\"%s\" - это не типизированная таблица" -#: commands/tablecmds.c:11738 +#: commands/tablecmds.c:12111 #, c-format msgid "cannot use non-unique index \"%s\" as replica identity" msgstr "" "для идентификации реплики нельзя использовать неуникальный индекс \"%s\"" -#: commands/tablecmds.c:11744 +#: commands/tablecmds.c:12117 #, c-format msgid "cannot use non-immediate index \"%s\" as replica identity" msgstr "" "для идентификации реплики нельзя использовать не непосредственный индекс \"%s" "\"" -#: commands/tablecmds.c:11750 +#: commands/tablecmds.c:12123 #, c-format msgid "cannot use expression index \"%s\" as replica identity" msgstr "" "для идентификации реплики нельзя использовать индекс с выражением \"%s\"" -#: commands/tablecmds.c:11756 +#: commands/tablecmds.c:12129 #, c-format msgid "cannot use partial index \"%s\" as replica identity" msgstr "для идентификации реплики нельзя использовать частичный индекс \"%s\"" -#: commands/tablecmds.c:11762 +#: commands/tablecmds.c:12135 #, c-format msgid "cannot use invalid index \"%s\" as replica identity" msgstr "для идентификации реплики нельзя использовать нерабочий индекс \"%s\"" -#: commands/tablecmds.c:11783 +#: commands/tablecmds.c:12156 #, c-format msgid "" "index \"%s\" cannot be used as replica identity because column %d is a " @@ -10077,7 +10062,7 @@ msgstr "" "индекс \"%s\" нельзя использовать для идентификации реплики, так как столбец " "%d - системный" -#: commands/tablecmds.c:11790 +#: commands/tablecmds.c:12163 #, c-format msgid "" "index \"%s\" cannot be used as replica identity because column \"%s\" is " @@ -10086,13 +10071,13 @@ msgstr "" "индекс \"%s\" нельзя использовать для идентификации реплики, так как столбец " "\"%s\" допускает NULL" -#: commands/tablecmds.c:11983 +#: commands/tablecmds.c:12356 #, c-format msgid "cannot change logged status of table \"%s\" because it is temporary" msgstr "" "изменить состояние журналирования таблицы %s нельзя, так как она временная" -#: commands/tablecmds.c:12007 +#: commands/tablecmds.c:12380 #, c-format msgid "" "cannot change table \"%s\" to unlogged because it is part of a publication" @@ -10100,12 +10085,12 @@ msgstr "" "таблицу \"%s\" нельзя сделать нежурналируемой, так как она включена в " "публикацию" -#: commands/tablecmds.c:12009 +#: commands/tablecmds.c:12382 #, c-format msgid "Unlogged relations cannot be replicated." msgstr "Нежурналируемые отношения не поддерживают репликацию." -#: commands/tablecmds.c:12054 +#: commands/tablecmds.c:12427 #, c-format msgid "" "could not change table \"%s\" to logged because it references unlogged table " @@ -10114,7 +10099,7 @@ msgstr "" "не удалось сделать таблицу \"%s\" журналируемой, так как она ссылается на " "нежурналируемую таблицу \"%s\"" -#: commands/tablecmds.c:12064 +#: commands/tablecmds.c:12437 #, c-format msgid "" "could not change table \"%s\" to unlogged because it references logged table " @@ -10123,22 +10108,22 @@ msgstr "" "не удалось сделать таблицу \"%s\" нежурналируемой, так как она ссылается на " "журналируемую таблицу \"%s\"" -#: commands/tablecmds.c:12121 +#: commands/tablecmds.c:12495 #, c-format msgid "cannot move an owned sequence into another schema" msgstr "переместить последовательность с владельцем в другую схему нельзя" -#: commands/tablecmds.c:12227 +#: commands/tablecmds.c:12601 #, c-format msgid "relation \"%s\" already exists in schema \"%s\"" msgstr "отношение \"%s\" уже существует в схеме \"%s\"" -#: commands/tablecmds.c:12753 +#: commands/tablecmds.c:13127 #, c-format msgid "\"%s\" is not a composite type" msgstr "\"%s\" - это не составной тип" -#: commands/tablecmds.c:12784 +#: commands/tablecmds.c:13158 #, c-format msgid "" "\"%s\" is not a table, view, materialized view, sequence, or foreign table" @@ -10146,52 +10131,63 @@ msgstr "" "\"%s\" - это не таблица, представление, мат. представление, " "последовательность или сторонняя таблица" -#: commands/tablecmds.c:12815 +#: commands/tablecmds.c:13191 #, c-format msgid "unrecognized partitioning strategy \"%s\"" msgstr "нераспознанная стратегия секционирования \"%s\"" -#: commands/tablecmds.c:12841 +#: commands/tablecmds.c:13199 +#, c-format +msgid "cannot use \"list\" partition strategy with more than one column" +msgstr "стратегия секционирования по списку не поддерживает несколько столбцов" + +#: commands/tablecmds.c:13224 #, c-format msgid "column \"%s\" appears more than once in partition key" msgstr "столбец \"%s\" фигурирует в ключе разбиения неоднократно" -#: commands/tablecmds.c:12889 +#: commands/tablecmds.c:13277 #, c-format msgid "column \"%s\" named in partition key does not exist" msgstr "столбец \"%s\", упомянутый в ключе разбиения, не существует" -#: commands/tablecmds.c:12896 +#: commands/tablecmds.c:13284 #, c-format msgid "cannot use system column \"%s\" in partition key" msgstr "системный столбец \"%s\" нельзя использовать в ключе разбиения" -#: commands/tablecmds.c:12954 +#: commands/tablecmds.c:13347 #, c-format msgid "functions in partition key expression must be marked IMMUTABLE" msgstr "функции в выражении ключа разбиения должны быть помечены как IMMUTABLE" -#: commands/tablecmds.c:12963 -#, c-format -msgid "cannot use constant expression as partition key" -msgstr "в качестве ключа разбиения нельзя использовать константное выражение" - -#: commands/tablecmds.c:12977 +#: commands/tablecmds.c:13364 #, c-format msgid "partition key expressions cannot contain whole-row references" msgstr "выражения ключей разбиения не могут содержать ссылки на кортеж целиком" -#: commands/tablecmds.c:12998 +#: commands/tablecmds.c:13371 +#, c-format +msgid "partition key expressions cannot contain system column references" +msgstr "" +"выражения ключей разбиения не могут содержать ссылки на системный столбец" + +#: commands/tablecmds.c:13381 +#, c-format +msgid "cannot use constant expression as partition key" +msgstr "в качестве ключа разбиения нельзя использовать константное выражение" + +#: commands/tablecmds.c:13402 #, c-format msgid "could not determine which collation to use for partition expression" msgstr "не удалось определить правило сортировки для выражения разбиения" -#: commands/tablecmds.c:13023 +#: commands/tablecmds.c:13427 #, c-format msgid "data type %s has no default btree operator class" msgstr "для типа данных %s не определён класс операторов B-дерева по умолчанию" -#: commands/tablecmds.c:13025 +#: commands/tablecmds.c:13429 #, c-format msgid "" "You must specify a btree operator class or define a default btree operator " @@ -10200,27 +10196,27 @@ msgstr "" "Вы должны указать класс операторов B-дерева или определить класс операторов " "B-дерева по умолчанию для этого типа данных." -#: commands/tablecmds.c:13072 +#: commands/tablecmds.c:13477 #, c-format msgid "\"%s\" is already a partition" msgstr "\"%s\" уже является секцией" -#: commands/tablecmds.c:13078 +#: commands/tablecmds.c:13483 #, c-format msgid "cannot attach a typed table as partition" msgstr "подключить типизированную таблицу в качестве секции нельзя" -#: commands/tablecmds.c:13094 +#: commands/tablecmds.c:13499 #, c-format msgid "cannot attach inheritance child as partition" msgstr "подключить потомок в иерархии наследования в качестве секции нельзя" -#: commands/tablecmds.c:13108 +#: commands/tablecmds.c:13513 #, c-format msgid "cannot attach inheritance parent as partition" msgstr "подключить родитель в иерархии наследования в качестве секции нельзя" -#: commands/tablecmds.c:13131 +#: commands/tablecmds.c:13547 #, c-format msgid "" "cannot attach a permanent relation as partition of temporary relation \"%s\"" @@ -10228,18 +10224,18 @@ msgstr "" "подключить постоянное отношение в качестве секции временного отношения \"%s" "\" нельзя" -#: commands/tablecmds.c:13139 +#: commands/tablecmds.c:13555 #, c-format msgid "cannot attach as partition of temporary relation of another session" msgstr "подключить секцию к временному отношению в другом сеансе нельзя" -#: commands/tablecmds.c:13146 +#: commands/tablecmds.c:13562 #, c-format msgid "cannot attach temporary relation of another session as partition" msgstr "" "подключить временное отношение из другого сеанса в качестве секции нельзя" -#: commands/tablecmds.c:13152 +#: commands/tablecmds.c:13568 #, c-format msgid "" "cannot attach table \"%s\" without OIDs as partition of table \"%s\" with " @@ -10248,7 +10244,7 @@ msgstr "" "нельзя подключить таблицу \"%s\" без OID в качестве секции таблицы \"%s\" с " "OID" -#: commands/tablecmds.c:13160 +#: commands/tablecmds.c:13576 #, c-format msgid "" "cannot attach table \"%s\" with OIDs as partition of table \"%s\" without " @@ -10257,20 +10253,30 @@ msgstr "" "нельзя подключить таблицу \"%s\" с OID в качестве секции таблицы \"%s\" без " "OID" -#: commands/tablecmds.c:13182 +#: commands/tablecmds.c:13598 #, c-format msgid "table \"%s\" contains column \"%s\" not found in parent \"%s\"" msgstr "" "таблица \"%s\" содержит столбец \"%s\", отсутствующий в родителе \"%s\"" -#: commands/tablecmds.c:13185 +#: commands/tablecmds.c:13601 #, c-format -msgid "New partition should contain only the columns present in parent." +msgid "The new partition may contain only the columns present in parent." msgstr "" -"Новая секция должна содержать только столбцы, имеющиеся в родительской " +"Новая секция может содержать только столбцы, имеющиеся в родительской " "таблице." -#: commands/tablecmds.c:13357 +#: commands/tablecmds.c:13613 +#, c-format +msgid "trigger \"%s\" prevents table \"%s\" from becoming a partition" +msgstr "триггер \"%s\" не позволяет сделать таблицу \"%s\" секцией" + +#: commands/tablecmds.c:13615 commands/trigger.c:393 +#, c-format +msgid "ROW triggers with transition tables are not supported on partitions" +msgstr "триггеры ROW с переходными таблицами для секций не поддерживаются" + +#: commands/tablecmds.c:13740 #, c-format msgid "" "partition constraint for table \"%s\" is implied by existing constraints" @@ -10280,7 +10286,7 @@ msgstr "" #: commands/tablespace.c:162 commands/tablespace.c:179 #: commands/tablespace.c:190 commands/tablespace.c:198 -#: commands/tablespace.c:623 replication/slot.c:1106 storage/file/copydir.c:47 +#: commands/tablespace.c:623 replication/slot.c:1178 storage/file/copydir.c:47 #, c-format msgid "could not create directory \"%s\": %m" msgstr "не удалось создать каталог \"%s\": %m" @@ -10409,201 +10415,227 @@ msgstr "удалить каталоги табличного пространс msgid "You can remove the directories manually if necessary." msgstr "При необходимости вы можете удалить их вручную." -#: commands/trigger.c:187 +#: commands/trigger.c:190 #, c-format msgid "\"%s\" is a table" msgstr "\"%s\" - это таблица" -#: commands/trigger.c:189 +#: commands/trigger.c:192 #, c-format msgid "Tables cannot have INSTEAD OF triggers." msgstr "У таблиц не может быть триггеров INSTEAD OF." -#: commands/trigger.c:194 commands/trigger.c:360 -#, c-format -msgid "\"%s\" is a partitioned table" -msgstr "\"%s\" - секционированная таблица" - -#: commands/trigger.c:196 +#: commands/trigger.c:199 #, c-format msgid "Partitioned tables cannot have ROW triggers." msgstr "У секционированных таблиц не может быть триггеров на уровне строк." -#: commands/trigger.c:207 commands/trigger.c:214 +#: commands/trigger.c:210 commands/trigger.c:217 commands/trigger.c:375 #, c-format msgid "\"%s\" is a view" msgstr "\"%s\" - это представление" -#: commands/trigger.c:209 +#: commands/trigger.c:212 #, c-format msgid "Views cannot have row-level BEFORE or AFTER triggers." msgstr "У представлений не может быть строковых триггеров BEFORE/AFTER." -#: commands/trigger.c:216 +#: commands/trigger.c:219 #, c-format msgid "Views cannot have TRUNCATE triggers." msgstr "У представлений не может быть триггеров TRUNCATE." -#: commands/trigger.c:224 commands/trigger.c:231 commands/trigger.c:238 +#: commands/trigger.c:227 commands/trigger.c:234 commands/trigger.c:246 +#: commands/trigger.c:368 #, c-format msgid "\"%s\" is a foreign table" msgstr "\"%s\" - сторонняя таблица" -#: commands/trigger.c:226 +#: commands/trigger.c:229 #, c-format msgid "Foreign tables cannot have INSTEAD OF triggers." msgstr "У сторонних таблиц не может быть триггеров INSTEAD OF." -#: commands/trigger.c:233 +#: commands/trigger.c:236 #, c-format msgid "Foreign tables cannot have TRUNCATE triggers." msgstr "У сторонних таблиц не может быть триггеров TRUNCATE." -#: commands/trigger.c:240 +#: commands/trigger.c:248 #, c-format msgid "Foreign tables cannot have constraint triggers." msgstr "У сторонних таблиц не может быть ограничивающих триггеров." -#: commands/trigger.c:303 +#: commands/trigger.c:311 #, c-format msgid "TRUNCATE FOR EACH ROW triggers are not supported" msgstr "триггеры TRUNCATE FOR EACH ROW не поддерживаются" -#: commands/trigger.c:311 +#: commands/trigger.c:319 #, c-format msgid "INSTEAD OF triggers must be FOR EACH ROW" msgstr "триггеры INSTEAD OF должны иметь тип FOR EACH ROW" -#: commands/trigger.c:315 +#: commands/trigger.c:323 #, c-format msgid "INSTEAD OF triggers cannot have WHEN conditions" msgstr "триггеры INSTEAD OF несовместимы с условиями WHEN" -#: commands/trigger.c:319 +#: commands/trigger.c:327 #, c-format msgid "INSTEAD OF triggers cannot have column lists" msgstr "для триггеров INSTEAD OF нельзя задать список столбцов" -#: commands/trigger.c:348 +#: commands/trigger.c:356 #, c-format msgid "ROW variable naming in the REFERENCING clause is not supported" msgstr "" "указание переменной типа кортеж в предложении REFERENCING не поддерживается" -#: commands/trigger.c:349 +#: commands/trigger.c:357 #, c-format msgid "Use OLD TABLE or NEW TABLE for naming transition tables." msgstr "Используйте OLD TABLE или NEW TABLE для именования переходных таблиц." -#: commands/trigger.c:362 +#: commands/trigger.c:370 +#, c-format +msgid "Triggers on foreign tables cannot have transition tables." +msgstr "Триггеры сторонних таблиц не могут использовать переходные таблицы." + +#: commands/trigger.c:377 +#, c-format +msgid "Triggers on views cannot have transition tables." +msgstr "Триггеры представлений не могут использовать переходные таблицы." + +#: commands/trigger.c:397 #, c-format -msgid "Triggers on partitioned tables cannot have transition tables." +msgid "" +"ROW triggers with transition tables are not supported on inheritance children" msgstr "" -"Триггеры секционированных таблиц не могут использовать переходные таблицы." +"триггеры ROW с переходными таблицами для потомков в иерархии наследования не " +"поддерживаются" -#: commands/trigger.c:367 +#: commands/trigger.c:403 #, c-format msgid "transition table name can only be specified for an AFTER trigger" msgstr "имя переходной таблицы можно задать только для триггера AFTER" -#: commands/trigger.c:375 +#: commands/trigger.c:408 +#, c-format +msgid "TRUNCATE triggers with transition tables are not supported" +msgstr "триггеры TRUNCATE с переходными таблицами не поддерживаются" + +#: commands/trigger.c:425 +#, c-format +msgid "" +"transition tables cannot be specified for triggers with more than one event" +msgstr "" +"переходные таблицы нельзя задать для триггеров, назначаемых для нескольких " +"событий" + +#: commands/trigger.c:436 +#, c-format +msgid "transition tables cannot be specified for triggers with column lists" +msgstr "переходные таблицы нельзя задать для триггеров со списками столбцов" + +#: commands/trigger.c:453 #, c-format msgid "NEW TABLE can only be specified for an INSERT or UPDATE trigger" msgstr "NEW TABLE можно задать только для триггеров INSERT или UPDATE" -#: commands/trigger.c:380 +#: commands/trigger.c:458 #, c-format msgid "NEW TABLE cannot be specified multiple times" msgstr "NEW TABLE нельзя задать несколько раз" -#: commands/trigger.c:390 +#: commands/trigger.c:468 #, c-format msgid "OLD TABLE can only be specified for a DELETE or UPDATE trigger" msgstr "OLD TABLE можно задать только для триггеров DELETE или UPDATE" -#: commands/trigger.c:395 +#: commands/trigger.c:473 #, c-format msgid "OLD TABLE cannot be specified multiple times" msgstr "OLD TABLE нельзя задать несколько раз" -#: commands/trigger.c:405 +#: commands/trigger.c:483 #, c-format msgid "OLD TABLE name and NEW TABLE name cannot be the same" msgstr "имя OLD TABLE не должно совпадать с именем NEW TABLE" -#: commands/trigger.c:462 commands/trigger.c:475 +#: commands/trigger.c:540 commands/trigger.c:553 #, c-format msgid "statement trigger's WHEN condition cannot reference column values" msgstr "" "в условии WHEN для операторного триггера нельзя ссылаться на значения " "столбцов" -#: commands/trigger.c:467 +#: commands/trigger.c:545 #, c-format msgid "INSERT trigger's WHEN condition cannot reference OLD values" msgstr "в условии WHEN для триггера INSERT нельзя ссылаться на значения OLD" -#: commands/trigger.c:480 +#: commands/trigger.c:558 #, c-format msgid "DELETE trigger's WHEN condition cannot reference NEW values" msgstr "в условии WHEN для триггера DELETE нельзя ссылаться на значения NEW" -#: commands/trigger.c:485 +#: commands/trigger.c:563 #, c-format msgid "BEFORE trigger's WHEN condition cannot reference NEW system columns" msgstr "" "в условии WHEN для триггера BEFORE нельзя ссылаться на системные столбцы NEW" -#: commands/trigger.c:650 commands/trigger.c:1421 +#: commands/trigger.c:728 commands/trigger.c:1499 #, c-format msgid "trigger \"%s\" for relation \"%s\" already exists" msgstr "триггер \"%s\" для отношения \"%s\" уже существует" -#: commands/trigger.c:946 +#: commands/trigger.c:1024 msgid "Found referenced table's UPDATE trigger." msgstr "Найден триггер UPDATE в главной таблице." -#: commands/trigger.c:947 +#: commands/trigger.c:1025 msgid "Found referenced table's DELETE trigger." msgstr "Найден триггер DELETE в главной таблице." -#: commands/trigger.c:948 +#: commands/trigger.c:1026 msgid "Found referencing table's trigger." msgstr "Найден триггер в подчинённой таблице." -#: commands/trigger.c:1057 commands/trigger.c:1073 +#: commands/trigger.c:1135 commands/trigger.c:1151 #, c-format msgid "ignoring incomplete trigger group for constraint \"%s\" %s" msgstr "неполный набор триггеров для ограничения \"%s\" %s игнорируется" -#: commands/trigger.c:1086 +#: commands/trigger.c:1164 #, c-format msgid "converting trigger group into constraint \"%s\" %s" msgstr "преобразование набора триггеров в ограничение \"%s\" %s" -#: commands/trigger.c:1307 commands/trigger.c:1466 commands/trigger.c:1581 +#: commands/trigger.c:1385 commands/trigger.c:1544 commands/trigger.c:1659 #, c-format msgid "trigger \"%s\" for table \"%s\" does not exist" msgstr "триггер \"%s\" для таблицы \"%s\" не существует" -#: commands/trigger.c:1549 +#: commands/trigger.c:1627 #, c-format msgid "permission denied: \"%s\" is a system trigger" msgstr "нет доступа: \"%s\" - это системный триггер" -#: commands/trigger.c:2104 +#: commands/trigger.c:2206 #, c-format msgid "trigger function %u returned null value" msgstr "триггерная функция %u вернула значение NULL" -#: commands/trigger.c:2165 commands/trigger.c:2371 commands/trigger.c:2582 -#: commands/trigger.c:2861 +#: commands/trigger.c:2272 commands/trigger.c:2487 commands/trigger.c:2706 +#: commands/trigger.c:2991 #, c-format msgid "BEFORE STATEMENT trigger cannot return a value" msgstr "триггер BEFORE STATEMENT не может возвращать значение" -#: commands/trigger.c:2923 executor/nodeModifyTable.c:747 -#: executor/nodeModifyTable.c:1042 +#: commands/trigger.c:3053 executor/nodeModifyTable.c:798 +#: executor/nodeModifyTable.c:1095 #, c-format msgid "" "tuple to be updated was already modified by an operation triggered by the " @@ -10612,8 +10644,8 @@ msgstr "" "кортеж, который должен быть изменён, уже модифицирован в операции, вызванной " "текущей командой" -#: commands/trigger.c:2924 executor/nodeModifyTable.c:748 -#: executor/nodeModifyTable.c:1043 +#: commands/trigger.c:3054 executor/nodeModifyTable.c:799 +#: executor/nodeModifyTable.c:1096 #, c-format msgid "" "Consider using an AFTER trigger instead of a BEFORE trigger to propagate " @@ -10622,19 +10654,19 @@ msgstr "" "Возможно, для распространения изменений в другие строки следует использовать " "триггер AFTER вместо BEFORE." -#: commands/trigger.c:2938 executor/execMain.c:2562 executor/nodeLockRows.c:216 -#: executor/nodeModifyTable.c:214 executor/nodeModifyTable.c:760 -#: executor/nodeModifyTable.c:1055 executor/nodeModifyTable.c:1221 +#: commands/trigger.c:3068 executor/execMain.c:2696 executor/nodeLockRows.c:220 +#: executor/nodeModifyTable.c:214 executor/nodeModifyTable.c:811 +#: executor/nodeModifyTable.c:1108 executor/nodeModifyTable.c:1277 #, c-format msgid "could not serialize access due to concurrent update" msgstr "не удалось сериализовать доступ из-за параллельного изменения" -#: commands/trigger.c:4841 +#: commands/trigger.c:5200 #, c-format msgid "constraint \"%s\" is not deferrable" msgstr "ограничение \"%s\" не является откладываемым" -#: commands/trigger.c:4864 +#: commands/trigger.c:5223 #, c-format msgid "constraint \"%s\" does not exist" msgstr "ограничение \"%s\" не существует" @@ -10740,7 +10772,7 @@ msgstr "неверный формат списка параметров: \"%s\"" msgid "must be superuser to create a base type" msgstr "для создания базового типа нужно быть суперпользователем" -#: commands/typecmds.c:290 commands/typecmds.c:1414 +#: commands/typecmds.c:290 commands/typecmds.c:1435 #, c-format msgid "type attribute \"%s\" not recognized" msgstr "атрибут типа \"%s\" не распознан" @@ -10855,80 +10887,80 @@ msgid "check constraints for domains cannot be marked NO INHERIT" msgstr "" "ограничения-проверки для доменов не могут иметь характеристики NO INHERIT" -#: commands/typecmds.c:993 commands/typecmds.c:2512 +#: commands/typecmds.c:993 commands/typecmds.c:2533 #, c-format msgid "unique constraints not possible for domains" msgstr "ограничения уникальности невозможны для доменов" -#: commands/typecmds.c:999 commands/typecmds.c:2518 +#: commands/typecmds.c:999 commands/typecmds.c:2539 #, c-format msgid "primary key constraints not possible for domains" msgstr "ограничения первичного ключа невозможны для доменов" -#: commands/typecmds.c:1005 commands/typecmds.c:2524 +#: commands/typecmds.c:1005 commands/typecmds.c:2545 #, c-format msgid "exclusion constraints not possible for domains" msgstr "ограничения-исключения невозможны для доменов" -#: commands/typecmds.c:1011 commands/typecmds.c:2530 +#: commands/typecmds.c:1011 commands/typecmds.c:2551 #, c-format msgid "foreign key constraints not possible for domains" msgstr "ограничения внешних ключей невозможны для доменов" -#: commands/typecmds.c:1020 commands/typecmds.c:2539 +#: commands/typecmds.c:1020 commands/typecmds.c:2560 #, c-format msgid "specifying constraint deferrability not supported for domains" msgstr "" "возможность определения отложенных ограничений для доменов не поддерживается" -#: commands/typecmds.c:1284 utils/cache/typcache.c:1648 +#: commands/typecmds.c:1305 utils/cache/typcache.c:1698 #, c-format msgid "%s is not an enum" msgstr "\"%s\" не является перечислением" -#: commands/typecmds.c:1422 +#: commands/typecmds.c:1443 #, c-format msgid "type attribute \"subtype\" is required" msgstr "требуется атрибут типа \"subtype\"" -#: commands/typecmds.c:1427 +#: commands/typecmds.c:1448 #, c-format msgid "range subtype cannot be %s" msgstr "%s не может быть подтипом диапазона" -#: commands/typecmds.c:1446 +#: commands/typecmds.c:1467 #, c-format msgid "range collation specified but subtype does not support collation" msgstr "" "указано правило сортировки для диапазона, но подтип не поддерживает " "сортировку" -#: commands/typecmds.c:1680 +#: commands/typecmds.c:1701 #, c-format msgid "changing argument type of function %s from \"opaque\" to \"cstring\"" msgstr "изменение типа аргумента функции %s с \"opaque\" на \"cstring\"" -#: commands/typecmds.c:1731 +#: commands/typecmds.c:1752 #, c-format msgid "changing argument type of function %s from \"opaque\" to %s" msgstr "изменение типа аргумента функции %s с \"opaque\" на %s" -#: commands/typecmds.c:1830 +#: commands/typecmds.c:1851 #, c-format msgid "typmod_in function %s must return type %s" msgstr "функция TYPMOD_IN %s должна возвращать тип %s" -#: commands/typecmds.c:1857 +#: commands/typecmds.c:1878 #, c-format msgid "typmod_out function %s must return type %s" msgstr "функция TYPMOD_OUT %s должна возвращать тип %s" -#: commands/typecmds.c:1884 +#: commands/typecmds.c:1905 #, c-format msgid "type analyze function %s must return type %s" msgstr "функция анализа типа %s должна возвращать тип %s" -#: commands/typecmds.c:1930 +#: commands/typecmds.c:1951 #, c-format msgid "" "You must specify an operator class for the range type or define a default " @@ -10937,306 +10969,312 @@ msgstr "" "Вы должны указать класс операторов для типа диапазона или определить класс " "операторов по умолчанию для этого подтипа." -#: commands/typecmds.c:1961 +#: commands/typecmds.c:1982 #, c-format msgid "range canonical function %s must return range type" msgstr "" "функция получения канонического диапазона %s должна возвращать диапазон" -#: commands/typecmds.c:1967 +#: commands/typecmds.c:1988 #, c-format msgid "range canonical function %s must be immutable" msgstr "" "функция получения канонического диапазона %s должна быть постоянной " "(IMMUTABLE)" -#: commands/typecmds.c:2003 +#: commands/typecmds.c:2024 #, c-format msgid "range subtype diff function %s must return type %s" msgstr "функция различий для подтипа диапазона (%s) должна возвращать тип %s" -#: commands/typecmds.c:2010 +#: commands/typecmds.c:2031 #, c-format msgid "range subtype diff function %s must be immutable" msgstr "" "функция различий для подтипа диапазона (%s) должна быть постоянной " "(IMMUTABLE)" -#: commands/typecmds.c:2037 +#: commands/typecmds.c:2058 #, c-format msgid "pg_type array OID value not set when in binary upgrade mode" msgstr "значение OID массива в pg_type не задано в режиме двоичного обновления" -#: commands/typecmds.c:2340 +#: commands/typecmds.c:2361 #, c-format msgid "column \"%s\" of table \"%s\" contains null values" msgstr "столбец \"%s\" таблицы \"%s\" содержит значения NULL" -#: commands/typecmds.c:2453 commands/typecmds.c:2636 +#: commands/typecmds.c:2474 commands/typecmds.c:2657 #, c-format msgid "constraint \"%s\" of domain \"%s\" does not exist" msgstr "ограничение \"%s\" для домена \"%s\" не существует" -#: commands/typecmds.c:2457 +#: commands/typecmds.c:2478 #, c-format msgid "constraint \"%s\" of domain \"%s\" does not exist, skipping" msgstr "ограничение \"%s\" для домена \"%s\" не существует, пропускается" -#: commands/typecmds.c:2642 +#: commands/typecmds.c:2663 #, c-format msgid "constraint \"%s\" of domain \"%s\" is not a check constraint" msgstr "" "ограничение \"%s\" для домена \"%s\" не является ограничением-проверкой" -#: commands/typecmds.c:2747 +#: commands/typecmds.c:2768 #, c-format msgid "" "column \"%s\" of table \"%s\" contains values that violate the new constraint" msgstr "" "столбец \"%s\" таблицы \"%s\" содержит значения, нарушающие новое ограничение" -#: commands/typecmds.c:2960 commands/typecmds.c:3247 commands/typecmds.c:3434 +#: commands/typecmds.c:2996 commands/typecmds.c:3201 commands/typecmds.c:3283 +#: commands/typecmds.c:3470 #, c-format msgid "%s is not a domain" msgstr "\"%s\" - это не домен" -#: commands/typecmds.c:2994 +#: commands/typecmds.c:3030 #, c-format msgid "constraint \"%s\" for domain \"%s\" already exists" msgstr "ограничение \"%s\" для домена \"%s\" уже существует" -#: commands/typecmds.c:3045 +#: commands/typecmds.c:3081 #, c-format msgid "cannot use table references in domain check constraint" msgstr "в ограничении-проверке для домена нельзя ссылаться на таблицы" -#: commands/typecmds.c:3177 commands/typecmds.c:3259 commands/typecmds.c:3551 +#: commands/typecmds.c:3213 commands/typecmds.c:3295 commands/typecmds.c:3587 #, c-format msgid "%s is a table's row type" msgstr "%s - это тип строк таблицы" -#: commands/typecmds.c:3179 commands/typecmds.c:3261 commands/typecmds.c:3553 +#: commands/typecmds.c:3215 commands/typecmds.c:3297 commands/typecmds.c:3589 #, c-format msgid "Use ALTER TABLE instead." msgstr "Изменить его можно с помощью ALTER TABLE." -#: commands/typecmds.c:3186 commands/typecmds.c:3268 commands/typecmds.c:3466 +#: commands/typecmds.c:3222 commands/typecmds.c:3304 commands/typecmds.c:3502 #, c-format msgid "cannot alter array type %s" msgstr "изменить тип массива \"%s\" нельзя" -#: commands/typecmds.c:3188 commands/typecmds.c:3270 commands/typecmds.c:3468 +#: commands/typecmds.c:3224 commands/typecmds.c:3306 commands/typecmds.c:3504 #, c-format msgid "You can alter type %s, which will alter the array type as well." msgstr "Однако можно изменить тип %s, что повлечёт изменение типа массива." -#: commands/typecmds.c:3536 +#: commands/typecmds.c:3572 #, c-format msgid "type \"%s\" already exists in schema \"%s\"" msgstr "тип \"%s\" уже существует в схеме \"%s\"" -#: commands/user.c:154 +#: commands/user.c:141 #, c-format msgid "SYSID can no longer be specified" msgstr "SYSID уже не нужно указывать" -#: commands/user.c:308 +#: commands/user.c:295 #, c-format msgid "must be superuser to create superusers" msgstr "для создания суперпользователей нужно быть суперпользователем" -#: commands/user.c:315 +#: commands/user.c:302 #, c-format msgid "must be superuser to create replication users" msgstr "для создания пользователей-репликаторов нужно быть суперпользователем" -#: commands/user.c:322 commands/user.c:708 +#: commands/user.c:309 commands/user.c:707 #, c-format msgid "must be superuser to change bypassrls attribute" msgstr "для изменения атрибута bypassrls нужно быть суперпользователем" -#: commands/user.c:329 +#: commands/user.c:316 #, c-format msgid "permission denied to create role" msgstr "нет прав для создания роли" -#: commands/user.c:339 commands/user.c:1183 commands/user.c:1190 -#: utils/adt/acl.c:5246 utils/adt/acl.c:5252 gram.y:14386 gram.y:14421 +#: commands/user.c:326 commands/user.c:1195 commands/user.c:1202 +#: utils/adt/acl.c:5251 utils/adt/acl.c:5257 gram.y:14465 gram.y:14500 #, c-format msgid "role name \"%s\" is reserved" msgstr "имя роли \"%s\" зарезервировано" -#: commands/user.c:341 commands/user.c:1185 commands/user.c:1192 +#: commands/user.c:328 commands/user.c:1197 commands/user.c:1204 #, c-format msgid "Role names starting with \"pg_\" are reserved." msgstr "Имена ролей, начинающиеся с \"pg_\", зарезервированы." -#: commands/user.c:353 commands/user.c:1198 +#: commands/user.c:340 commands/user.c:1210 #, c-format msgid "role \"%s\" already exists" msgstr "роль \"%s\" уже существует" -#: commands/user.c:426 +#: commands/user.c:406 commands/user.c:816 +#, c-format +msgid "empty string is not a valid password, clearing password" +msgstr "пустая строка не является допустимым паролем; пароль сбрасывается" + +#: commands/user.c:437 #, c-format msgid "pg_authid OID value not set when in binary upgrade mode" msgstr "значение OID в pg_authid не задано в режиме двоичного обновления" -#: commands/user.c:694 commands/user.c:903 commands/user.c:1437 -#: commands/user.c:1581 +#: commands/user.c:693 commands/user.c:915 commands/user.c:1449 +#: commands/user.c:1593 #, c-format msgid "must be superuser to alter superusers" msgstr "для модификации суперпользователей нужно быть суперпользователем" -#: commands/user.c:701 +#: commands/user.c:700 #, c-format msgid "must be superuser to alter replication users" msgstr "" "для модификации пользователей-репликаторов нужно быть суперпользователем" -#: commands/user.c:724 commands/user.c:911 +#: commands/user.c:723 commands/user.c:923 #, c-format msgid "permission denied" msgstr "нет доступа" -#: commands/user.c:941 +#: commands/user.c:953 #, c-format msgid "must be superuser to alter settings globally" msgstr "для глобального изменения параметров нужно быть суперпользователем" -#: commands/user.c:963 +#: commands/user.c:975 #, c-format msgid "permission denied to drop role" msgstr "нет прав для удаления роли" -#: commands/user.c:987 +#: commands/user.c:999 #, c-format msgid "cannot use special role specifier in DROP ROLE" msgstr "использовать специальную роль в DROP ROLE нельзя" -#: commands/user.c:997 commands/user.c:1154 commands/variable.c:822 -#: commands/variable.c:894 utils/adt/acl.c:5104 utils/adt/acl.c:5151 -#: utils/adt/acl.c:5179 utils/adt/acl.c:5197 utils/init/miscinit.c:503 +#: commands/user.c:1009 commands/user.c:1166 commands/variable.c:822 +#: commands/variable.c:894 utils/adt/acl.c:5109 utils/adt/acl.c:5156 +#: utils/adt/acl.c:5184 utils/adt/acl.c:5202 utils/init/miscinit.c:504 #, c-format msgid "role \"%s\" does not exist" msgstr "роль \"%s\" не существует" -#: commands/user.c:1002 +#: commands/user.c:1014 #, c-format msgid "role \"%s\" does not exist, skipping" msgstr "роль \"%s\" не существует, пропускается" -#: commands/user.c:1014 commands/user.c:1018 +#: commands/user.c:1026 commands/user.c:1030 #, c-format msgid "current user cannot be dropped" msgstr "пользователь не может удалить сам себя" -#: commands/user.c:1022 +#: commands/user.c:1034 #, c-format msgid "session user cannot be dropped" msgstr "пользователя текущего сеанса нельзя удалить" -#: commands/user.c:1033 +#: commands/user.c:1045 #, c-format msgid "must be superuser to drop superusers" msgstr "для удаления суперпользователей нужно быть суперпользователем" -#: commands/user.c:1049 +#: commands/user.c:1061 #, c-format msgid "role \"%s\" cannot be dropped because some objects depend on it" msgstr "роль \"%s\" нельзя удалить, так как есть зависящие от неё объекты" -#: commands/user.c:1170 +#: commands/user.c:1182 #, c-format msgid "session user cannot be renamed" msgstr "пользователя текущего сеанса нельзя переименовать" -#: commands/user.c:1174 +#: commands/user.c:1186 #, c-format msgid "current user cannot be renamed" msgstr "пользователь не может переименовать сам себя" -#: commands/user.c:1208 +#: commands/user.c:1220 #, c-format msgid "must be superuser to rename superusers" msgstr "для переименования суперпользователей нужно быть суперпользователем" -#: commands/user.c:1215 +#: commands/user.c:1227 #, c-format msgid "permission denied to rename role" msgstr "нет прав на переименование роли" -#: commands/user.c:1236 +#: commands/user.c:1248 #, c-format msgid "MD5 password cleared because of role rename" msgstr "в результате переименования роли очищен MD5-хеш пароля" -#: commands/user.c:1296 +#: commands/user.c:1308 #, c-format msgid "column names cannot be included in GRANT/REVOKE ROLE" msgstr "в GRANT/REVOKE ROLE нельзя включать названия столбцов" -#: commands/user.c:1334 +#: commands/user.c:1346 #, c-format msgid "permission denied to drop objects" msgstr "нет прав на удаление объектов" -#: commands/user.c:1361 commands/user.c:1370 +#: commands/user.c:1373 commands/user.c:1382 #, c-format msgid "permission denied to reassign objects" msgstr "нет прав для переназначения объектов" -#: commands/user.c:1445 commands/user.c:1589 +#: commands/user.c:1457 commands/user.c:1601 #, c-format msgid "must have admin option on role \"%s\"" msgstr "требуется право admin для роли \"%s\"" -#: commands/user.c:1462 +#: commands/user.c:1474 #, c-format msgid "must be superuser to set grantor" msgstr "для назначения права управления правами нужно быть суперпользователем" -#: commands/user.c:1487 +#: commands/user.c:1499 #, c-format msgid "role \"%s\" is a member of role \"%s\"" msgstr "роль \"%s\" включена в роль \"%s\"" -#: commands/user.c:1502 +#: commands/user.c:1514 #, c-format msgid "role \"%s\" is already a member of role \"%s\"" msgstr "роль \"%s\" уже включена в роль \"%s\"" -#: commands/user.c:1611 +#: commands/user.c:1623 #, c-format msgid "role \"%s\" is not a member of role \"%s\"" msgstr "роль \"%s\" не включена в роль \"%s\"" -#: commands/vacuum.c:186 +#: commands/vacuum.c:188 #, c-format msgid "%s cannot be executed from VACUUM or ANALYZE" msgstr "%s нельзя выполнить в ходе VACUUM или ANALYZE" -#: commands/vacuum.c:196 +#: commands/vacuum.c:198 #, c-format msgid "VACUUM option DISABLE_PAGE_SKIPPING cannot be used with FULL" msgstr "Параметр VACUUM DISABLE_PAGE_SKIPPING нельзя использовать с FULL" -#: commands/vacuum.c:565 +#: commands/vacuum.c:577 #, c-format msgid "oldest xmin is far in the past" msgstr "самый старый xmin далеко в прошлом" -#: commands/vacuum.c:566 +#: commands/vacuum.c:578 #, c-format msgid "Close open transactions soon to avoid wraparound problems." msgstr "" "Скорее закройте открытые транзакции, чтобы избежать проблемы наложения." -#: commands/vacuum.c:605 +#: commands/vacuum.c:617 #, c-format msgid "oldest multixact is far in the past" msgstr "самый старый multixact далеко в прошлом" -#: commands/vacuum.c:606 +#: commands/vacuum.c:618 #, c-format msgid "" "Close open transactions with multixacts soon to avoid wraparound problems." @@ -11244,57 +11282,57 @@ msgstr "" "Скорее закройте открытые транзакции в мультитранзакциях, чтобы избежать " "проблемы наложения." -#: commands/vacuum.c:1176 +#: commands/vacuum.c:1188 #, c-format msgid "some databases have not been vacuumed in over 2 billion transactions" msgstr "" "есть базы данных, которые не очищались на протяжении более чем 2 миллиардов " "транзакций" -#: commands/vacuum.c:1177 +#: commands/vacuum.c:1189 #, c-format msgid "You might have already suffered transaction-wraparound data loss." msgstr "Возможно, вы уже потеряли данные в результате наложения ID транзакций." -#: commands/vacuum.c:1306 +#: commands/vacuum.c:1324 #, c-format msgid "skipping vacuum of \"%s\" --- lock not available" msgstr "очистка \"%s\" пропускается --- блокировка недоступна" -#: commands/vacuum.c:1332 +#: commands/vacuum.c:1350 #, c-format msgid "skipping \"%s\" --- only superuser can vacuum it" msgstr "" "\"%s\" пропускается --- только суперпользователь может очистить эту таблицу" -#: commands/vacuum.c:1336 +#: commands/vacuum.c:1354 #, c-format msgid "skipping \"%s\" --- only superuser or database owner can vacuum it" msgstr "" "пропускается \"%s\" --- только суперпользователь или владелец БД может " "очистить эту таблицу" -#: commands/vacuum.c:1340 +#: commands/vacuum.c:1358 #, c-format msgid "skipping \"%s\" --- only table or database owner can vacuum it" msgstr "" "\"%s\" пропускается --- только владелец базы данных или этой таблицы может " "очистить её" -#: commands/vacuum.c:1359 +#: commands/vacuum.c:1377 #, c-format msgid "skipping \"%s\" --- cannot vacuum non-tables or special system tables" msgstr "" "\"%s\" пропускается --- очищать не таблицы или специальные системные таблицы " "нельзя" -#: commands/vacuumlazy.c:377 +#: commands/vacuumlazy.c:376 #, c-format msgid "automatic vacuum of table \"%s.%s.%s\": index scans: %d\n" msgstr "" "автоматическая очистка таблицы \"%s.%s.%s\": сканирований индекса: %d\n" -#: commands/vacuumlazy.c:382 +#: commands/vacuumlazy.c:381 #, c-format msgid "" "pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n" @@ -11302,7 +11340,7 @@ msgstr "" "страниц удалено: %u, осталось: %u, пропущено закреплённых: %u, пропущено " "замороженных: %u\n" -#: commands/vacuumlazy.c:388 +#: commands/vacuumlazy.c:387 #, c-format msgid "" "tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, " @@ -11311,19 +11349,19 @@ msgstr "" "версий строк: удалено: %.0f, осталось: %.0f, «мёртвых», но ещё не подлежащих " "удалению: %.0f, старейший xmin: %u\n" -#: commands/vacuumlazy.c:394 +#: commands/vacuumlazy.c:393 #, c-format msgid "buffer usage: %d hits, %d misses, %d dirtied\n" msgstr "" "использование буфера: попаданий: %d, промахов: %d, «грязных» записей: %d\n" -#: commands/vacuumlazy.c:398 +#: commands/vacuumlazy.c:397 #, c-format msgid "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n" msgstr "" "средняя скорость чтения: %.3f МБ/с, средняя скорость записи: %.3f МБ/с\n" -#: commands/vacuumlazy.c:400 +#: commands/vacuumlazy.c:399 #, c-format msgid "system usage: %s" msgstr "нагрузка системы: %s" @@ -11334,23 +11372,23 @@ msgid "relation \"%s\" page %u is uninitialized --- fixing" msgstr "" "в отношении \"%s\" не инициализирована страница %u --- ситуация исправляется" -#: commands/vacuumlazy.c:1330 +#: commands/vacuumlazy.c:1339 #, c-format msgid "\"%s\": removed %.0f row versions in %u pages" msgstr "\"%s\": удалено версий строк: %.0f, обработано страниц: %u" -#: commands/vacuumlazy.c:1340 +#: commands/vacuumlazy.c:1349 #, c-format msgid "%.0f dead row versions cannot be removed yet, oldest xmin: %u\n" msgstr "" "В данный момент нельзя удалить \"мёртвых\" строк: %.0f, старейший xmin: %u\n" -#: commands/vacuumlazy.c:1342 +#: commands/vacuumlazy.c:1351 #, c-format msgid "There were %.0f unused item pointers.\n" msgstr "Найдено неиспользованных указателей: %.0f.\n" -#: commands/vacuumlazy.c:1344 +#: commands/vacuumlazy.c:1353 #, c-format msgid "Skipped %u page due to buffer pins, " msgid_plural "Skipped %u pages due to buffer pins, " @@ -11358,7 +11396,7 @@ msgstr[0] "Пропущено страниц, закреплённых в буф msgstr[1] "Пропущено страниц, закреплённых в буфере: %u," msgstr[2] "Пропущено страниц, закреплённых в буфере: %u," -#: commands/vacuumlazy.c:1348 +#: commands/vacuumlazy.c:1357 #, c-format msgid "%u frozen page.\n" msgid_plural "%u frozen pages.\n" @@ -11366,7 +11404,7 @@ msgstr[0] "замороженных страниц: %u.\n" msgstr[1] "замороженных страниц: %u.\n" msgstr[2] "замороженных страниц: %u.\n" -#: commands/vacuumlazy.c:1352 +#: commands/vacuumlazy.c:1361 #, c-format msgid "%u page is entirely empty.\n" msgid_plural "%u pages are entirely empty.\n" @@ -11374,7 +11412,12 @@ msgstr[0] "Полностью пустых страниц: %u.\n" msgstr[1] "Полностью пустых страниц: %u.\n" msgstr[2] "Полностью пустых страниц: %u.\n" -#: commands/vacuumlazy.c:1360 +#: commands/vacuumlazy.c:1365 +#, c-format +msgid "%s." +msgstr "%s." + +#: commands/vacuumlazy.c:1368 #, c-format msgid "" "\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u " @@ -11383,22 +11426,22 @@ msgstr "" "\"%s\": найдено удаляемых версий строк: %.0f, неудаляемых - %.0f, обработано " "страниц: %u, всего страниц: %u" -#: commands/vacuumlazy.c:1429 +#: commands/vacuumlazy.c:1437 #, c-format msgid "\"%s\": removed %d row versions in %d pages" msgstr "\"%s\": удалено версий строк: %d, обработано страниц: %d" -#: commands/vacuumlazy.c:1618 +#: commands/vacuumlazy.c:1625 #, c-format msgid "scanned index \"%s\" to remove %d row versions" msgstr "просканирован индекс \"%s\", удалено версий строк: %d" -#: commands/vacuumlazy.c:1664 +#: commands/vacuumlazy.c:1671 #, c-format msgid "index \"%s\" now contains %.0f row versions in %u pages" msgstr "индекс \"%s\" теперь содержит версий строк: %.0f, в страницах: %u" -#: commands/vacuumlazy.c:1668 +#: commands/vacuumlazy.c:1675 #, c-format msgid "" "%.0f index row versions were removed.\n" @@ -11409,22 +11452,22 @@ msgstr "" "Удалено индексных страниц: %u, пригодно для повторного использования: %u.\n" "%s." -#: commands/vacuumlazy.c:1763 +#: commands/vacuumlazy.c:1770 #, c-format msgid "\"%s\": stopping truncate due to conflicting lock request" msgstr "\"%s\": остановка усечения из-за конфликтующего запроса блокировки" -#: commands/vacuumlazy.c:1828 +#: commands/vacuumlazy.c:1835 #, c-format msgid "\"%s\": truncated %u to %u pages" msgstr "\"%s\": усечение (было страниц: %u, стало: %u)" -#: commands/vacuumlazy.c:1893 +#: commands/vacuumlazy.c:1900 #, c-format msgid "\"%s\": suspending truncate due to conflicting lock request" msgstr "\"%s\": приостановка усечения из-за конфликтующего запроса блокировки" -#: commands/variable.c:165 utils/misc/guc.c:10001 utils/misc/guc.c:10063 +#: commands/variable.c:165 utils/misc/guc.c:10023 utils/misc/guc.c:10085 #, c-format msgid "Unrecognized key word: \"%s\"." msgstr "нераспознанное ключевое слово: \"%s\"." @@ -11491,7 +11534,7 @@ msgid "SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction" msgstr "" "команда SET TRANSACTION ISOLATION LEVEL не должна вызываться в подтранзакции" -#: commands/variable.c:571 storage/lmgr/predicate.c:1576 +#: commands/variable.c:571 storage/lmgr/predicate.c:1649 #, c-format msgid "cannot use serializable mode in a hot standby" msgstr "использовать сериализуемый режим в горячем резерве нельзя" @@ -11526,8 +11569,8 @@ msgstr "Изменить клиентскую кодировку сейчас н #: commands/variable.c:776 #, c-format -msgid "cannot change client_encoding in a parallel worker" -msgstr "изменить клиентскую кодировку в параллельном исполнителе нельзя" +msgid "cannot change client_encoding during a parallel operation" +msgstr "изменить клиентскую кодировку во время параллельной операции нельзя" #: commands/variable.c:912 #, c-format @@ -11628,7 +11671,7 @@ msgid "cursor \"%s\" is not a simply updatable scan of table \"%s\"" msgstr "" "для курсора \"%s\" не выполняется обновляемое сканирование таблицы \"%s\"" -#: executor/execCurrent.c:231 executor/execExprInterp.c:1876 +#: executor/execCurrent.c:231 executor/execExprInterp.c:1889 #, c-format msgid "" "type of parameter %d (%s) does not match that when preparing the plan (%s)" @@ -11636,28 +11679,28 @@ msgstr "" "тип параметра %d (%s) не соответствует тому, с которым подготавливался план " "(%s)" -#: executor/execCurrent.c:243 executor/execExprInterp.c:1888 +#: executor/execCurrent.c:243 executor/execExprInterp.c:1901 #, c-format msgid "no value found for parameter %d" msgstr "не найдено значение параметра %d" -#: executor/execExpr.c:774 parser/parse_agg.c:764 +#: executor/execExpr.c:780 parser/parse_agg.c:779 #, c-format msgid "window function calls cannot be nested" msgstr "вложенные вызовы оконных функций недопустимы" -#: executor/execExpr.c:1218 +#: executor/execExpr.c:1236 #, c-format msgid "target type is not an array" msgstr "целевой тип не является массивом" -#: executor/execExpr.c:1541 +#: executor/execExpr.c:1559 #, c-format msgid "ROW() column has type %s instead of type %s" msgstr "столбец ROW() имеет тип %s, а должен - %s" -#: executor/execExpr.c:2061 executor/execSRF.c:668 parser/parse_func.c:116 -#: parser/parse_func.c:543 parser/parse_func.c:902 +#: executor/execExpr.c:2094 executor/execSRF.c:672 parser/parse_func.c:120 +#: parser/parse_func.c:547 parser/parse_func.c:921 #, c-format msgid "cannot pass more than %d argument to a function" msgid_plural "cannot pass more than %d arguments to a function" @@ -11665,41 +11708,41 @@ msgstr[0] "функции нельзя передать больше %d аргу msgstr[1] "функции нельзя передать больше %d аргументов" msgstr[2] "функции нельзя передать больше %d аргументов" -#: executor/execExpr.c:2336 executor/execExpr.c:2342 -#: executor/execExprInterp.c:2187 utils/adt/array_userfuncs.c:484 -#: utils/adt/arrayfuncs.c:260 utils/adt/arrayfuncs.c:558 -#: utils/adt/arrayfuncs.c:1288 utils/adt/arrayfuncs.c:3361 -#: utils/adt/arrayfuncs.c:5241 utils/adt/arrayfuncs.c:5758 +#: executor/execExpr.c:2371 executor/execExpr.c:2377 +#: executor/execExprInterp.c:2226 utils/adt/arrayfuncs.c:260 +#: utils/adt/arrayfuncs.c:558 utils/adt/arrayfuncs.c:1288 +#: utils/adt/arrayfuncs.c:3361 utils/adt/arrayfuncs.c:5239 +#: utils/adt/arrayfuncs.c:5756 #, c-format msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" msgstr "число размерностей массива (%d) превышает предел (%d)" -#: executor/execExprInterp.c:1548 +#: executor/execExprInterp.c:1561 #, c-format msgid "attribute %d of type %s has been dropped" msgstr "атрибут %d типа %s был удалён" -#: executor/execExprInterp.c:1554 +#: executor/execExprInterp.c:1567 #, c-format msgid "attribute %d of type %s has wrong type" msgstr "атрибут %d типа %s имеет неправильный тип" -#: executor/execExprInterp.c:1556 executor/execExprInterp.c:2473 +#: executor/execExprInterp.c:1569 executor/execExprInterp.c:2512 #, c-format msgid "Table has type %s, but query expects %s." msgstr "В таблице задан тип %s, а в запросе ожидается %s." -#: executor/execExprInterp.c:1966 +#: executor/execExprInterp.c:1979 #, c-format msgid "WHERE CURRENT OF is not supported for this table type" msgstr "WHERE CURRENT OF для таблиц такого типа не поддерживается" -#: executor/execExprInterp.c:2165 +#: executor/execExprInterp.c:2204 #, c-format msgid "cannot merge incompatible arrays" msgstr "не удалось объединить несовместимые массивы" -#: executor/execExprInterp.c:2166 +#: executor/execExprInterp.c:2205 #, c-format msgid "" "Array with element type %s cannot be included in ARRAY construct with " @@ -11708,7 +11751,7 @@ msgstr "" "Массив с типом элементов %s нельзя включить в конструкцию ARRAY с типом " "элементов %s." -#: executor/execExprInterp.c:2207 executor/execExprInterp.c:2237 +#: executor/execExprInterp.c:2246 executor/execExprInterp.c:2276 #, c-format msgid "" "multidimensional arrays must have array expressions with matching dimensions" @@ -11716,35 +11759,35 @@ msgstr "" "для многомерных массивов должны задаваться выражения с соответствующими " "размерностями" -#: executor/execExprInterp.c:2472 +#: executor/execExprInterp.c:2511 #, c-format msgid "attribute %d has wrong type" msgstr "атрибут %d имеет неверный тип" -#: executor/execExprInterp.c:2581 +#: executor/execExprInterp.c:2620 #, c-format msgid "array subscript in assignment must not be null" msgstr "индекс элемента массива в присваивании не может быть NULL" -#: executor/execExprInterp.c:3004 utils/adt/domains.c:148 +#: executor/execExprInterp.c:3053 utils/adt/domains.c:148 #, c-format msgid "domain %s does not allow null values" msgstr "домен %s не допускает значения null" -#: executor/execExprInterp.c:3019 utils/adt/domains.c:183 +#: executor/execExprInterp.c:3068 utils/adt/domains.c:183 #, c-format msgid "value for domain %s violates check constraint \"%s\"" msgstr "значение домена %s нарушает ограничение-проверку \"%s\"" -#: executor/execExprInterp.c:3386 executor/execExprInterp.c:3403 -#: executor/execExprInterp.c:3505 executor/nodeModifyTable.c:96 +#: executor/execExprInterp.c:3435 executor/execExprInterp.c:3452 +#: executor/execExprInterp.c:3554 executor/nodeModifyTable.c:96 #: executor/nodeModifyTable.c:106 executor/nodeModifyTable.c:123 #: executor/nodeModifyTable.c:131 #, c-format msgid "table row type and query-specified row type do not match" msgstr "тип строки таблицы отличается от типа строки-результата запроса" -#: executor/execExprInterp.c:3387 +#: executor/execExprInterp.c:3436 #, c-format msgid "Table row contains %d attribute, but query expects %d." msgid_plural "Table row contains %d attributes, but query expects %d." @@ -11752,14 +11795,14 @@ msgstr[0] "Строка таблицы содержит %d атрибут, а в msgstr[1] "Строка таблицы содержит %d атрибута, а в запросе ожидается %d." msgstr[2] "Строка таблицы содержит %d атрибутов, а в запросе ожидается %d." -#: executor/execExprInterp.c:3404 executor/nodeModifyTable.c:107 +#: executor/execExprInterp.c:3453 executor/nodeModifyTable.c:107 #, c-format msgid "Table has type %s at ordinal position %d, but query expects %s." msgstr "" "В таблице определён тип %s (номер столбца: %d), а в запросе предполагается " "%s." -#: executor/execExprInterp.c:3506 executor/execSRF.c:921 +#: executor/execExprInterp.c:3555 executor/execSRF.c:927 #, c-format msgid "Physical storage mismatch on dropped attribute at ordinal position %d." msgstr "" @@ -11805,22 +11848,22 @@ msgstr "Ключ %s конфликтует с существующим ключ msgid "Key conflicts with existing key." msgstr "Ключ конфликтует с уже существующим." -#: executor/execMain.c:1074 +#: executor/execMain.c:1115 #, c-format msgid "cannot change sequence \"%s\"" msgstr "последовательность \"%s\" изменить нельзя" -#: executor/execMain.c:1080 +#: executor/execMain.c:1121 #, c-format msgid "cannot change TOAST relation \"%s\"" msgstr "TOAST-отношение \"%s\" изменить нельзя" -#: executor/execMain.c:1098 rewrite/rewriteHandler.c:2661 +#: executor/execMain.c:1139 rewrite/rewriteHandler.c:2747 #, c-format msgid "cannot insert into view \"%s\"" msgstr "вставить данные в представление \"%s\" нельзя" -#: executor/execMain.c:1100 rewrite/rewriteHandler.c:2664 +#: executor/execMain.c:1141 rewrite/rewriteHandler.c:2750 #, c-format msgid "" "To enable inserting into the view, provide an INSTEAD OF INSERT trigger or " @@ -11829,12 +11872,12 @@ msgstr "" "Чтобы представление допускало добавление данных, установите триггер INSTEAD " "OF INSERT или безусловное правило ON INSERT DO INSTEAD." -#: executor/execMain.c:1106 rewrite/rewriteHandler.c:2669 +#: executor/execMain.c:1147 rewrite/rewriteHandler.c:2755 #, c-format msgid "cannot update view \"%s\"" msgstr "изменить данные в представлении \"%s\" нельзя" -#: executor/execMain.c:1108 rewrite/rewriteHandler.c:2672 +#: executor/execMain.c:1149 rewrite/rewriteHandler.c:2758 #, c-format msgid "" "To enable updating the view, provide an INSTEAD OF UPDATE trigger or an " @@ -11843,12 +11886,12 @@ msgstr "" "Чтобы представление допускало изменение данных, установите триггер INSTEAD " "OF UPDATE или безусловное правило ON UPDATE DO INSTEAD." -#: executor/execMain.c:1114 rewrite/rewriteHandler.c:2677 +#: executor/execMain.c:1155 rewrite/rewriteHandler.c:2763 #, c-format msgid "cannot delete from view \"%s\"" msgstr "удалить данные из представления \"%s\" нельзя" -#: executor/execMain.c:1116 rewrite/rewriteHandler.c:2680 +#: executor/execMain.c:1157 rewrite/rewriteHandler.c:2766 #, c-format msgid "" "To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an " @@ -11857,117 +11900,117 @@ msgstr "" "Чтобы представление допускало удаление данных, установите триггер INSTEAD OF " "DELETE или безусловное правило ON DELETE DO INSTEAD." -#: executor/execMain.c:1127 +#: executor/execMain.c:1168 #, c-format msgid "cannot change materialized view \"%s\"" msgstr "изменить материализованное представление \"%s\" нельзя" -#: executor/execMain.c:1139 +#: executor/execMain.c:1187 #, c-format msgid "cannot insert into foreign table \"%s\"" msgstr "вставлять данные в стороннюю таблицу \"%s\" нельзя" -#: executor/execMain.c:1145 +#: executor/execMain.c:1193 #, c-format msgid "foreign table \"%s\" does not allow inserts" msgstr "сторонняя таблица \"%s\" не допускает добавления" -#: executor/execMain.c:1152 +#: executor/execMain.c:1200 #, c-format msgid "cannot update foreign table \"%s\"" msgstr "изменять данные в сторонней таблице \"%s\"" -#: executor/execMain.c:1158 +#: executor/execMain.c:1206 #, c-format msgid "foreign table \"%s\" does not allow updates" msgstr "сторонняя таблица \"%s\" не допускает изменения" -#: executor/execMain.c:1165 +#: executor/execMain.c:1213 #, c-format msgid "cannot delete from foreign table \"%s\"" msgstr "удалять данные из сторонней таблицы \"%s\" нельзя" -#: executor/execMain.c:1171 +#: executor/execMain.c:1219 #, c-format msgid "foreign table \"%s\" does not allow deletes" msgstr "сторонняя таблица \"%s\" не допускает удаления" -#: executor/execMain.c:1182 +#: executor/execMain.c:1230 #, c-format msgid "cannot change relation \"%s\"" msgstr "отношение \"%s\" изменить нельзя" -#: executor/execMain.c:1209 +#: executor/execMain.c:1257 #, c-format msgid "cannot lock rows in sequence \"%s\"" msgstr "блокировать строки в последовательности \"%s\" нельзя" -#: executor/execMain.c:1216 +#: executor/execMain.c:1264 #, c-format msgid "cannot lock rows in TOAST relation \"%s\"" msgstr "блокировать строки в TOAST-отношении \"%s\" нельзя" -#: executor/execMain.c:1223 +#: executor/execMain.c:1271 #, c-format msgid "cannot lock rows in view \"%s\"" msgstr "блокировать строки в представлении \"%s\" нельзя" -#: executor/execMain.c:1231 +#: executor/execMain.c:1279 #, c-format msgid "cannot lock rows in materialized view \"%s\"" msgstr "блокировать строки в материализованном представлении \"%s\" нельзя" -#: executor/execMain.c:1240 executor/execMain.c:2796 -#: executor/nodeLockRows.c:132 +#: executor/execMain.c:1288 executor/execMain.c:2930 +#: executor/nodeLockRows.c:136 #, c-format msgid "cannot lock rows in foreign table \"%s\"" msgstr "блокировать строки в сторонней таблице \"%s\" нельзя" -#: executor/execMain.c:1246 +#: executor/execMain.c:1294 #, c-format msgid "cannot lock rows in relation \"%s\"" msgstr "блокировать строки в отношении \"%s\" нельзя" -#: executor/execMain.c:1880 +#: executor/execMain.c:1926 #, c-format -msgid "null value in column \"%s\" violates not-null constraint" -msgstr "нулевое значение в столбце \"%s\" нарушает ограничение NOT NULL" +msgid "new row for relation \"%s\" violates partition constraint" +msgstr "новая строка в отношении \"%s\" нарушает ограничение секции" -#: executor/execMain.c:1882 executor/execMain.c:1916 executor/execMain.c:1946 -#: executor/execMain.c:2031 +#: executor/execMain.c:1928 executor/execMain.c:2007 executor/execMain.c:2054 +#: executor/execMain.c:2165 #, c-format msgid "Failing row contains %s." msgstr "Ошибочная строка содержит %s." -#: executor/execMain.c:1914 +#: executor/execMain.c:2005 #, c-format -msgid "new row for relation \"%s\" violates check constraint \"%s\"" -msgstr "новая строка в отношении \"%s\" нарушает ограничение-проверку \"%s\"" +msgid "null value in column \"%s\" violates not-null constraint" +msgstr "нулевое значение в столбце \"%s\" нарушает ограничение NOT NULL" -#: executor/execMain.c:1944 +#: executor/execMain.c:2052 #, c-format -msgid "new row for relation \"%s\" violates partition constraint" -msgstr "новая строка в отношении \"%s\" нарушает ограничение секции" +msgid "new row for relation \"%s\" violates check constraint \"%s\"" +msgstr "новая строка в отношении \"%s\" нарушает ограничение-проверку \"%s\"" -#: executor/execMain.c:2029 +#: executor/execMain.c:2163 #, c-format msgid "new row violates check option for view \"%s\"" msgstr "новая строка нарушает ограничение-проверку для представления \"%s\"" -#: executor/execMain.c:2039 +#: executor/execMain.c:2173 #, c-format msgid "new row violates row-level security policy \"%s\" for table \"%s\"" msgstr "" "новая строка нарушает политику защиты на уровне строк \"%s\" для таблицы \"%s" "\"" -#: executor/execMain.c:2044 +#: executor/execMain.c:2178 #, c-format msgid "new row violates row-level security policy for table \"%s\"" msgstr "" "новая строка нарушает политику защиты на уровне строк для таблицы \"%s\"" -#: executor/execMain.c:2051 +#: executor/execMain.c:2185 #, c-format msgid "" "new row violates row-level security policy \"%s\" (USING expression) for " @@ -11976,7 +12019,7 @@ msgstr "" "новая строка нарушает политику защиты на уровне строк \"%s\" (выражение " "USING) для таблицы \"%s\"" -#: executor/execMain.c:2056 +#: executor/execMain.c:2190 #, c-format msgid "" "new row violates row-level security policy (USING expression) for table \"%s" @@ -11985,47 +12028,55 @@ msgstr "" "новая строка нарушает политику защиты на уровне строк (выражение USING) для " "таблицы \"%s\"" -#: executor/execMain.c:3257 +#: executor/execMain.c:3399 #, c-format msgid "no partition of relation \"%s\" found for row" msgstr "для строки не найдена секция в отношении \"%s\"" -#: executor/execMain.c:3259 +#: executor/execMain.c:3401 #, c-format msgid "Partition key of the failing row contains %s." msgstr "Ключ разбиения для неподходящей строки содержит %s." -#: executor/execReplication.c:195 executor/execReplication.c:342 +#: executor/execReplication.c:196 executor/execReplication.c:354 #, c-format msgid "concurrent update, retrying" msgstr "параллельное изменение; следует повторная попытка" -#: executor/execReplication.c:544 +#: executor/execReplication.c:256 parser/parse_oper.c:228 +#: utils/adt/array_userfuncs.c:724 utils/adt/array_userfuncs.c:863 +#: utils/adt/arrayfuncs.c:3639 utils/adt/arrayfuncs.c:4077 +#: utils/adt/arrayfuncs.c:6037 utils/adt/rowtypes.c:1167 +#, c-format +msgid "could not identify an equality operator for type %s" +msgstr "не удалось найти оператор равенства для типа %s" + +#: executor/execReplication.c:562 #, c-format msgid "" -"cannot update table \"%s\" because it does not have replica identity and " +"cannot update table \"%s\" because it does not have a replica identity and " "publishes updates" msgstr "" "изменение в таблице \"%s\" невозможно, так как в ней отсутствует " "идентификатор реплики, но она публикует изменения" -#: executor/execReplication.c:546 +#: executor/execReplication.c:564 #, c-format msgid "To enable updating the table, set REPLICA IDENTITY using ALTER TABLE." msgstr "" "Чтобы эта таблица поддерживала изменение, установите REPLICA IDENTITY, " "выполнив ALTER TABLE." -#: executor/execReplication.c:550 +#: executor/execReplication.c:568 #, c-format msgid "" -"cannot delete from table \"%s\" because it does not have replica identity " +"cannot delete from table \"%s\" because it does not have a replica identity " "and publishes deletes" msgstr "" "удаление из таблицы \"%s\" невозможно, так как в ней отсутствует " "идентификатор реплики, но она публикует удаления" -#: executor/execReplication.c:552 +#: executor/execReplication.c:570 #, c-format msgid "" "To enable deleting from the table, set REPLICA IDENTITY using ALTER TABLE." @@ -12033,22 +12084,27 @@ msgstr "" "Чтобы эта таблица поддерживала удаление, установите REPLICA IDENTITY, " "выполнив ALTER TABLE." -#: executor/execSRF.c:307 +#: executor/execReplication.c:589 +#, c-format +msgid "logical replication target relation \"%s.%s\" is not a table" +msgstr "целевое отношение логической репликации \"%s.%s\" не является таблицей" + +#: executor/execSRF.c:308 #, c-format msgid "rows returned by function are not all of the same row type" msgstr "строки, возвращённые функцией, имеют разные типы" -#: executor/execSRF.c:355 executor/execSRF.c:619 +#: executor/execSRF.c:356 executor/execSRF.c:622 #, c-format msgid "table-function protocol for materialize mode was not followed" msgstr "нарушение протокола табличной функции в режиме материализации" -#: executor/execSRF.c:362 executor/execSRF.c:637 +#: executor/execSRF.c:363 executor/execSRF.c:640 #, c-format msgid "unrecognized table-function returnMode: %d" msgstr "нераспознанный режим возврата табличной функции: %d" -#: executor/execSRF.c:839 +#: executor/execSRF.c:845 #, c-format msgid "" "function returning setof record called in context that cannot accept type " @@ -12057,12 +12113,12 @@ msgstr "" "функция, возвращающая запись SET OF, вызвана в контексте, не допускающем " "этот тип" -#: executor/execSRF.c:894 executor/execSRF.c:910 executor/execSRF.c:920 +#: executor/execSRF.c:900 executor/execSRF.c:916 executor/execSRF.c:926 #, c-format msgid "function return row and query-specified return row do not match" msgstr "тип результат функции отличается от типа строки-результата запроса" -#: executor/execSRF.c:895 +#: executor/execSRF.c:901 #, c-format msgid "Returned row contains %d attribute, but query expects %d." msgid_plural "Returned row contains %d attributes, but query expects %d." @@ -12072,17 +12128,17 @@ msgstr[1] "" msgstr[2] "" "Возвращённая строка содержит %d атрибутов, но запрос предполагает %d." -#: executor/execSRF.c:911 +#: executor/execSRF.c:917 #, c-format msgid "Returned type %s at ordinal position %d, but query expects %s." msgstr "Возвращён тип %s (номер столбца: %d), а в запросе предполагается %s." -#: executor/execUtils.c:636 +#: executor/execUtils.c:646 #, c-format msgid "materialized view \"%s\" has not been populated" msgstr "материализованное представление \"%s\" не было наполнено" -#: executor/execUtils.c:638 +#: executor/execUtils.c:648 #, c-format msgid "Use the REFRESH MATERIALIZED VIEW command." msgstr "Примените команду REFRESH MATERIALIZED VIEW." @@ -12092,24 +12148,24 @@ msgstr "Примените команду REFRESH MATERIALIZED VIEW." msgid "could not determine actual type of argument declared %s" msgstr "не удалось определить фактический тип аргумента, объявленного как %s" -#: executor/functions.c:519 +#: executor/functions.c:520 #, c-format msgid "cannot COPY to/from client in a SQL function" msgstr "в функции SQL нельзя выполнить COPY с участием клиента" #. translator: %s is a SQL statement name -#: executor/functions.c:525 +#: executor/functions.c:526 #, c-format msgid "%s is not allowed in a SQL function" msgstr "%s нельзя использовать в SQL-функции" #. translator: %s is a SQL statement name -#: executor/functions.c:533 executor/spi.c:1282 executor/spi.c:2066 +#: executor/functions.c:534 executor/spi.c:1288 executor/spi.c:2075 #, c-format msgid "%s is not allowed in a non-volatile function" msgstr "%s нельзя использовать в не изменчивой (volatile) функции" -#: executor/functions.c:653 +#: executor/functions.c:654 #, c-format msgid "" "could not determine actual result type for function declared to return type " @@ -12118,24 +12174,24 @@ msgstr "" "не удалось определить фактический тип результата для функции (в объявлении " "указан тип %s)" -#: executor/functions.c:1412 +#: executor/functions.c:1413 #, c-format msgid "SQL function \"%s\" statement %d" msgstr "SQL-функция \"%s\", оператор %d" -#: executor/functions.c:1438 +#: executor/functions.c:1439 #, c-format msgid "SQL function \"%s\" during startup" msgstr "SQL-функция \"%s\" (при старте)" -#: executor/functions.c:1596 executor/functions.c:1633 -#: executor/functions.c:1645 executor/functions.c:1758 -#: executor/functions.c:1791 executor/functions.c:1821 +#: executor/functions.c:1597 executor/functions.c:1634 +#: executor/functions.c:1646 executor/functions.c:1759 +#: executor/functions.c:1792 executor/functions.c:1822 #, c-format msgid "return type mismatch in function declared to return %s" msgstr "несовпадение типа возврата в функции (в объявлении указан тип %s)" -#: executor/functions.c:1598 +#: executor/functions.c:1599 #, c-format msgid "" "Function's final statement must be SELECT or INSERT/UPDATE/DELETE RETURNING." @@ -12143,97 +12199,97 @@ msgstr "" "Последним оператором в функции должен быть SELECT или INSERT/UPDATE/DELETE " "RETURNING." -#: executor/functions.c:1635 +#: executor/functions.c:1636 #, c-format msgid "Final statement must return exactly one column." msgstr "Последний оператор должен возвращать один столбец." -#: executor/functions.c:1647 +#: executor/functions.c:1648 #, c-format msgid "Actual return type is %s." msgstr "Фактический тип возврата: %s." -#: executor/functions.c:1760 +#: executor/functions.c:1761 #, c-format msgid "Final statement returns too many columns." msgstr "Последний оператор возвращает слишком много столбцов." -#: executor/functions.c:1793 +#: executor/functions.c:1794 #, c-format msgid "Final statement returns %s instead of %s at column %d." msgstr "Последний оператор возвращает %s вместо %s для столбца %d." -#: executor/functions.c:1823 +#: executor/functions.c:1824 #, c-format msgid "Final statement returns too few columns." msgstr "Последний оператор возвращает слишком мало столбцов." -#: executor/functions.c:1872 +#: executor/functions.c:1873 #, c-format msgid "return type %s is not supported for SQL functions" msgstr "для SQL-функций тип возврата %s не поддерживается" -#: executor/nodeAgg.c:3468 +#: executor/nodeAgg.c:3470 parser/parse_agg.c:618 parser/parse_agg.c:648 +#, c-format +msgid "aggregate function calls cannot be nested" +msgstr "вложенные вызовы агрегатных функций недопустимы" + +#: executor/nodeAgg.c:3559 #, c-format msgid "combine function for aggregate %u must be declared as STRICT" msgstr "" "комбинирующая функция для агрегата %u должна объявляться как строгая (STRICT)" -#: executor/nodeAgg.c:3513 executor/nodeWindowAgg.c:2278 +#: executor/nodeAgg.c:3604 executor/nodeWindowAgg.c:2282 #, c-format msgid "aggregate %u needs to have compatible input type and transition type" msgstr "" "агрегатная функция %u должна иметь совместимые входной и переходный типы" -#: executor/nodeAgg.c:3567 parser/parse_agg.c:618 parser/parse_agg.c:648 -#, c-format -msgid "aggregate function calls cannot be nested" -msgstr "вложенные вызовы агрегатных функций недопустимы" - -#: executor/nodeCustom.c:142 executor/nodeCustom.c:153 +#: executor/nodeCustom.c:152 executor/nodeCustom.c:163 #, c-format msgid "custom scan \"%s\" does not support MarkPos" msgstr "нестандартное сканирование \"%s\" не поддерживает MarkPos" -#: executor/nodeHashjoin.c:760 executor/nodeHashjoin.c:790 +#: executor/nodeHashjoin.c:770 executor/nodeHashjoin.c:800 #, c-format msgid "could not rewind hash-join temporary file: %m" msgstr "не удалось переместиться во временном файле хеш-соединения: %m" -#: executor/nodeHashjoin.c:825 executor/nodeHashjoin.c:831 +#: executor/nodeHashjoin.c:835 executor/nodeHashjoin.c:841 #, c-format msgid "could not write to hash-join temporary file: %m" msgstr "не удалось записать во временный файл хеш-соединения: %m" -#: executor/nodeHashjoin.c:872 executor/nodeHashjoin.c:882 +#: executor/nodeHashjoin.c:882 executor/nodeHashjoin.c:892 #, c-format msgid "could not read from hash-join temporary file: %m" msgstr "не удалось прочитать временный файл хеш-соединения: %m" -#: executor/nodeIndexonlyscan.c:233 +#: executor/nodeIndexonlyscan.c:237 #, c-format msgid "lossy distance functions are not supported in index-only scans" msgstr "" "функции неточной дистанции не поддерживаются в сканировании только по индексу" -#: executor/nodeLimit.c:252 +#: executor/nodeLimit.c:256 #, c-format msgid "OFFSET must not be negative" msgstr "OFFSET не может быть отрицательным" -#: executor/nodeLimit.c:278 +#: executor/nodeLimit.c:282 #, c-format msgid "LIMIT must not be negative" msgstr "LIMIT не может быть отрицательным" -#: executor/nodeMergejoin.c:1530 +#: executor/nodeMergejoin.c:1563 #, c-format msgid "RIGHT JOIN is only supported with merge-joinable join conditions" msgstr "" "RIGHT JOIN поддерживается только с условиями, допускающими соединение " "слиянием" -#: executor/nodeMergejoin.c:1550 +#: executor/nodeMergejoin.c:1583 #, c-format msgid "FULL JOIN is only supported with merge-joinable join conditions" msgstr "" @@ -12255,12 +12311,12 @@ msgstr "" msgid "Query has too few columns." msgstr "Запрос возвращает меньше столбцов." -#: executor/nodeModifyTable.c:1202 +#: executor/nodeModifyTable.c:1258 #, c-format msgid "ON CONFLICT DO UPDATE command cannot affect row a second time" msgstr "команда ON CONFLICT DO UPDATE не может менять строку повторно" -#: executor/nodeModifyTable.c:1203 +#: executor/nodeModifyTable.c:1259 #, c-format msgid "" "Ensure that no rows proposed for insertion within the same command have " @@ -12269,43 +12325,43 @@ msgstr "" "Проверьте, не содержат ли строки, которые должна добавить команда, " "дублирующиеся значения, подпадающие под ограничения." -#: executor/nodeSamplescan.c:298 +#: executor/nodeSamplescan.c:301 #, c-format msgid "TABLESAMPLE parameter cannot be null" msgstr "параметр TABLESAMPLE не может быть NULL" -#: executor/nodeSamplescan.c:310 +#: executor/nodeSamplescan.c:313 #, c-format msgid "TABLESAMPLE REPEATABLE parameter cannot be null" msgstr "параметр TABLESAMPLE REPEATABLE не может быть NULL" -#: executor/nodeSubplan.c:333 executor/nodeSubplan.c:372 -#: executor/nodeSubplan.c:1004 +#: executor/nodeSubplan.c:336 executor/nodeSubplan.c:375 +#: executor/nodeSubplan.c:1009 #, c-format msgid "more than one row returned by a subquery used as an expression" msgstr "подзапрос в выражении вернул больше одной строки" -#: executor/nodeTableFuncscan.c:365 +#: executor/nodeTableFuncscan.c:368 #, c-format msgid "namespace URI must not be null" msgstr "URI пространства имён должен быть не NULL" -#: executor/nodeTableFuncscan.c:376 +#: executor/nodeTableFuncscan.c:379 #, c-format msgid "row filter expression must not be null" msgstr "выражение отбора строк должно быть не NULL" -#: executor/nodeTableFuncscan.c:401 +#: executor/nodeTableFuncscan.c:404 #, c-format msgid "column filter expression must not be null" msgstr "выражение отбора столбца должно быть не NULL" -#: executor/nodeTableFuncscan.c:402 +#: executor/nodeTableFuncscan.c:405 #, c-format msgid "Filter for column \"%s\" is null." msgstr "Для столбца \"%s\" задано выражение NULL." -#: executor/nodeTableFuncscan.c:481 +#: executor/nodeTableFuncscan.c:486 #, c-format msgid "null is not allowed in column \"%s\"" msgstr "в столбце \"%s\" не допускается NULL" @@ -12315,63 +12371,63 @@ msgstr "в столбце \"%s\" не допускается NULL" msgid "moving-aggregate transition function must not return null" msgstr "функция перехода движимого агрегата не должна возвращать NULL" -#: executor/nodeWindowAgg.c:1621 +#: executor/nodeWindowAgg.c:1624 #, c-format msgid "frame starting offset must not be null" msgstr "смещение начала рамки не может быть NULL" -#: executor/nodeWindowAgg.c:1634 +#: executor/nodeWindowAgg.c:1637 #, c-format msgid "frame starting offset must not be negative" msgstr "смещение начала рамки не может быть отрицательным" -#: executor/nodeWindowAgg.c:1646 +#: executor/nodeWindowAgg.c:1649 #, c-format msgid "frame ending offset must not be null" msgstr "смещение конца рамки не может быть NULL" -#: executor/nodeWindowAgg.c:1659 +#: executor/nodeWindowAgg.c:1662 #, c-format msgid "frame ending offset must not be negative" msgstr "смещение конца рамки не может быть отрицательным" -#: executor/spi.c:197 +#: executor/spi.c:198 #, c-format msgid "transaction left non-empty SPI stack" msgstr "после транзакции остался непустой стек SPI" -#: executor/spi.c:198 executor/spi.c:261 +#: executor/spi.c:199 executor/spi.c:262 #, c-format msgid "Check for missing \"SPI_finish\" calls." msgstr "Проверьте наличие вызова \"SPI_finish\"." -#: executor/spi.c:260 +#: executor/spi.c:261 #, c-format msgid "subtransaction left non-empty SPI stack" msgstr "после подтранзакции остался непустой стек SPI" -#: executor/spi.c:1143 +#: executor/spi.c:1149 #, c-format msgid "cannot open multi-query plan as cursor" msgstr "не удалось открыть план нескольких запросов как курсор" #. translator: %s is name of a SQL command, eg INSERT -#: executor/spi.c:1148 +#: executor/spi.c:1154 #, c-format msgid "cannot open %s query as cursor" msgstr "не удалось открыть запрос %s как курсор" -#: executor/spi.c:1256 +#: executor/spi.c:1259 #, c-format msgid "DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE is not supported" msgstr "DECLARE SCROLL CURSOR ... FOR UPDATE/SHARE не поддерживается" -#: executor/spi.c:1257 parser/analyze.c:2445 +#: executor/spi.c:1260 parser/analyze.c:2447 #, c-format msgid "Scrollable cursors must be READ ONLY." msgstr "Прокручиваемые курсоры должны быть READ ONLY." -#: executor/spi.c:2371 +#: executor/spi.c:2383 #, c-format msgid "SQL statement \"%s\"" msgstr "SQL-оператор: \"%s\"" @@ -12396,115 +12452,125 @@ msgstr "неверный параметр \"%s\"" msgid "Valid options in this context are: %s" msgstr "В данном контексте допустимы параметры: %s" -#: lib/stringinfo.c:301 +#: lib/stringinfo.c:259 #, c-format msgid "Cannot enlarge string buffer containing %d bytes by %d more bytes." msgstr "" "Не удалось увеличить строковый буфер (в буфере байт: %d, требовалось ещё %d)." -#: libpq/auth-scram.c:191 +#: libpq/auth-scram.c:201 libpq/auth-scram.c:441 libpq/auth-scram.c:450 +#, c-format +msgid "invalid SCRAM verifier for user \"%s\"" +msgstr "неверный проверочный код SCRAM для пользователя \"%s\"" + +#: libpq/auth-scram.c:212 #, c-format msgid "User \"%s\" does not have a valid SCRAM verifier." msgstr "У пользователя \"%s\" нет подходящих данных для проверки SCRAM." -#: libpq/auth-scram.c:246 +#: libpq/auth-scram.c:290 libpq/auth-scram.c:295 libpq/auth-scram.c:589 +#: libpq/auth-scram.c:597 libpq/auth-scram.c:678 libpq/auth-scram.c:688 +#: libpq/auth-scram.c:807 libpq/auth-scram.c:814 libpq/auth-scram.c:829 +#: libpq/auth-scram.c:1062 libpq/auth-scram.c:1070 +#, c-format +msgid "malformed SCRAM message" +msgstr "неправильное сообщение SCRAM" + +#: libpq/auth-scram.c:291 #, c-format -msgid "malformed SCRAM message (empty message)" -msgstr "неправильное сообщение SCRAM (пустое содержимое)" +msgid "The message is empty." +msgstr "Сообщение пустое." -#: libpq/auth-scram.c:250 +#: libpq/auth-scram.c:296 #, c-format -msgid "malformed SCRAM message (length mismatch)" -msgstr "неправильное сообщение SCRAM (некорректная длина)" +msgid "Message length does not match input length." +msgstr "Длина сообщения не соответствует входной длине." -#: libpq/auth-scram.c:282 +#: libpq/auth-scram.c:328 #, c-format -msgid "invalid SCRAM response (nonce mismatch)" -msgstr "неверный ответ SCRAM (несовпадение проверочного кода)" +msgid "invalid SCRAM response" +msgstr "неверный ответ SCRAM" -#: libpq/auth-scram.c:355 +#: libpq/auth-scram.c:329 +#, c-format +msgid "Nonce does not match." +msgstr "Разовый код не совпадает." + +#: libpq/auth-scram.c:403 #, c-format msgid "could not generate random salt" msgstr "не удалось сгенерировать случайную соль" -#: libpq/auth-scram.c:541 +#: libpq/auth-scram.c:590 #, c-format -msgid "malformed SCRAM message (attribute '%c' expected, %s found)" -msgstr "неправильное сообщение SCRAM (ожидался атрибут '%c', получено: %s)" +msgid "Expected attribute \"%c\" but found \"%s\"." +msgstr "Ожидался атрибут \"%c\", но обнаружено \"%s\"." -#: libpq/auth-scram.c:548 libpq/auth-scram.c:637 +#: libpq/auth-scram.c:598 libpq/auth-scram.c:689 #, c-format -msgid "malformed SCRAM message (expected = in attr %c)" -msgstr "неправильное сообщение SCRAM (в атрибуте %c ожидалось =)" +msgid "Expected character \"=\" for attribute \"%c\"." +msgstr "Ожидался символ \"=\" для атрибута \"%c\"." -#: libpq/auth-scram.c:628 +#: libpq/auth-scram.c:679 #, c-format -msgid "malformed SCRAM message (attribute expected, invalid char %s found)" -msgstr "" -"неправильное сообщение SCRAM (ожидался атрибут, получен некорректный символ " -"%s)" +msgid "Attribute expected, but found invalid character \"%s\"." +msgstr "Ожидался атрибут, но обнаружен неправильный символ \"%s\"." -#: libpq/auth-scram.c:750 +#: libpq/auth-scram.c:803 #, c-format msgid "client requires SCRAM channel binding, but it is not supported" msgstr "клиенту требуется привязка канала SCRAM, но она не поддерживается" -#: libpq/auth-scram.c:754 +#: libpq/auth-scram.c:808 #, c-format -msgid "malformed SCRAM message (unexpected channel-binding flag %s)" -msgstr "неправильное сообщение SCRAM (неожиданный флаг привязки канала %s)" +msgid "Unexpected channel-binding flag \"%s\"." +msgstr "Неожиданный флаг привязки канала \"%s\"." -#: libpq/auth-scram.c:760 +#: libpq/auth-scram.c:815 #, c-format -msgid "malformed SCRAM message (comma expected, got %s)" -msgstr "неправильное сообщение SCRAM (ожидалась запятая, получено: %s)" +msgid "Comma expected, but found character \"%s\"." +msgstr "Ожидалась запятая, но обнаружен символ \"%s\"." -#: libpq/auth-scram.c:770 +#: libpq/auth-scram.c:825 #, c-format msgid "client uses authorization identity, but it is not supported" msgstr "клиент передал идентификатор для авторизации, но это не поддерживается" -#: libpq/auth-scram.c:774 +#: libpq/auth-scram.c:830 #, c-format -msgid "" -"malformed SCRAM message (unexpected attribute %s in client-first-message)" -msgstr "" -"неправильное сообщение SCRAM (неожиданный атрибут %s в первом сообщении " -"клиента)" +msgid "Unexpected attribute \"%s\" in client-first-message." +msgstr "Неожиданный атрибут \"%s\" в первом сообщении клиента." -#: libpq/auth-scram.c:790 +#: libpq/auth-scram.c:846 #, c-format -msgid "client requires mandatory SCRAM extension" -msgstr "клиент требует поддержки обязательного расширения SCRAM" +msgid "client requires an unsupported SCRAM extension" +msgstr "клиенту требуется неподдерживаемое расширение SCRAM" -#: libpq/auth-scram.c:804 +#: libpq/auth-scram.c:860 #, c-format msgid "non-printable characters in SCRAM nonce" msgstr "непечатаемые символы в разовом коде SCRAM" -#: libpq/auth-scram.c:921 +#: libpq/auth-scram.c:977 #, c-format msgid "could not generate random nonce" msgstr "не удалось сгенерировать разовый код" -#: libpq/auth-scram.c:989 +#: libpq/auth-scram.c:1048 #, c-format msgid "unexpected SCRAM channel-binding attribute in client-final-message" msgstr "" "неожиданный атрибут привязки канала в последнем сообщении клиента SCRAM" -#: libpq/auth-scram.c:1003 +#: libpq/auth-scram.c:1063 #, c-format -msgid "malformed SCRAM message (malformed proof in client-final-message" -msgstr "" -"неправильное сообщение SCRAM (некорректное подтверждение в последнем " -"сообщении клиента)" +msgid "Malformed proof in client-final-message." +msgstr "Некорректное подтверждение в последнем сообщении клиента." -#: libpq/auth-scram.c:1010 +#: libpq/auth-scram.c:1071 #, c-format -msgid "malformed SCRAM message (garbage at end of client-final-message)" -msgstr "" -"неправильное сообщение SCRAM (мусор в конце последнего сообщения клиента)" +msgid "Garbage found at the end of client-final-message." +msgstr "Мусор в конце последнего сообщения клиента." #: libpq/auth.c:274 #, c-format @@ -12693,17 +12759,22 @@ msgstr "" "в pg_hba.conf нет записи для компьютера \"%s\", пользователя \"%s\", базы " "\"%s\"" -#: libpq/auth.c:660 +#: libpq/auth.c:661 #, c-format msgid "expected password response, got message type %d" msgstr "ожидался ответ с паролем, но получено сообщение %d" -#: libpq/auth.c:688 +#: libpq/auth.c:689 #, c-format msgid "invalid password packet size" msgstr "неверный размер пакета с паролем" -#: libpq/auth.c:818 libpq/hba.c:1322 +#: libpq/auth.c:707 +#, c-format +msgid "empty password returned by client" +msgstr "клиент возвратил пустой пароль" + +#: libpq/auth.c:827 libpq/hba.c:1325 #, c-format msgid "" "MD5 authentication is not supported when \"db_user_namespace\" is enabled" @@ -12711,215 +12782,215 @@ msgstr "" "проверка подлинности MD5 не поддерживается, когда включён режим " "\"db_user_namespace\"" -#: libpq/auth.c:824 +#: libpq/auth.c:833 #, c-format msgid "could not generate random MD5 salt" msgstr "не удалось сгенерировать случайную соль для MD5" -#: libpq/auth.c:866 +#: libpq/auth.c:878 #, c-format msgid "SASL authentication is not supported in protocol version 2" msgstr "аутентификация SASL не поддерживается в протоколе версии 2" -#: libpq/auth.c:904 +#: libpq/auth.c:920 #, c-format msgid "expected SASL response, got message type %d" msgstr "ожидался ответ SASL, но получено сообщение %d" -#: libpq/auth.c:1041 +#: libpq/auth.c:957 +#, c-format +msgid "client selected an invalid SASL authentication mechanism" +msgstr "клиент выбрал неверный механизм аутентификации SASL" + +#: libpq/auth.c:1104 #, c-format msgid "GSSAPI is not supported in protocol version 2" msgstr "GSSAPI не поддерживается в протоколе версии 2" -#: libpq/auth.c:1101 +#: libpq/auth.c:1164 #, c-format msgid "expected GSS response, got message type %d" msgstr "ожидался ответ GSS, но получено сообщение %d" -#: libpq/auth.c:1163 +#: libpq/auth.c:1226 msgid "accepting GSS security context failed" msgstr "принять контекст безопасности GSS не удалось" -#: libpq/auth.c:1189 +#: libpq/auth.c:1252 msgid "retrieving GSS user name failed" msgstr "получить имя пользователя GSS не удалось" -#: libpq/auth.c:1308 +#: libpq/auth.c:1372 #, c-format msgid "SSPI is not supported in protocol version 2" msgstr "SSPI не поддерживается в протоколе версии 2" -#: libpq/auth.c:1323 +#: libpq/auth.c:1387 msgid "could not acquire SSPI credentials" msgstr "не удалось получить удостоверение SSPI" -#: libpq/auth.c:1341 +#: libpq/auth.c:1405 #, c-format msgid "expected SSPI response, got message type %d" msgstr "ожидался ответ SSPI, но получено сообщение %d" -#: libpq/auth.c:1414 +#: libpq/auth.c:1478 msgid "could not accept SSPI security context" msgstr "принять контекст безопасности SSPI не удалось" -#: libpq/auth.c:1476 +#: libpq/auth.c:1540 msgid "could not get token from SSPI security context" msgstr "не удалось получить маркер из контекста безопасности SSPI" -#: libpq/auth.c:1595 libpq/auth.c:1614 +#: libpq/auth.c:1659 libpq/auth.c:1678 #, c-format msgid "could not translate name" msgstr "не удалось преобразовать имя" -#: libpq/auth.c:1627 +#: libpq/auth.c:1691 #, c-format msgid "realm name too long" msgstr "имя области слишком длинное" -#: libpq/auth.c:1642 +#: libpq/auth.c:1706 #, c-format msgid "translated account name too long" msgstr "преобразованное имя учётной записи слишком длинное" -#: libpq/auth.c:1828 +#: libpq/auth.c:1892 #, c-format msgid "could not create socket for Ident connection: %m" msgstr "не удалось создать сокет для подключения к серверу Ident: %m" -#: libpq/auth.c:1843 +#: libpq/auth.c:1907 #, c-format msgid "could not bind to local address \"%s\": %m" msgstr "не удалось привязаться к локальному адресу \"%s\": %m" -#: libpq/auth.c:1855 +#: libpq/auth.c:1919 #, c-format msgid "could not connect to Ident server at address \"%s\", port %s: %m" msgstr "не удалось подключиться к серверу Ident по адресу \"%s\", порт %s: %m" -#: libpq/auth.c:1877 +#: libpq/auth.c:1941 #, c-format msgid "could not send query to Ident server at address \"%s\", port %s: %m" msgstr "" "не удалось отправить запрос серверу Ident по адресу \"%s\", порт %s: %m" -#: libpq/auth.c:1894 +#: libpq/auth.c:1958 #, c-format msgid "" "could not receive response from Ident server at address \"%s\", port %s: %m" msgstr "" "не удалось получить ответ от сервера Ident по адресу \"%s\", порт %s: %m" -#: libpq/auth.c:1904 +#: libpq/auth.c:1968 #, c-format msgid "invalidly formatted response from Ident server: \"%s\"" msgstr "неверно форматированный ответ от сервера Ident: \"%s\"" -#: libpq/auth.c:1944 +#: libpq/auth.c:2008 #, c-format msgid "peer authentication is not supported on this platform" msgstr "проверка подлинности peer в этой ОС не поддерживается" -#: libpq/auth.c:1948 +#: libpq/auth.c:2012 #, c-format msgid "could not get peer credentials: %m" msgstr "не удалось получить данные пользователя через механизм peer: %m" -#: libpq/auth.c:1957 +#: libpq/auth.c:2021 #, c-format msgid "could not look up local user ID %ld: %s" msgstr "найти локального пользователя по идентификатору (%ld) не удалось: %s" -#: libpq/auth.c:2041 libpq/auth.c:2367 libpq/auth.c:2680 -#, c-format -msgid "empty password returned by client" -msgstr "клиент возвратил пустой пароль" - -#: libpq/auth.c:2051 +#: libpq/auth.c:2109 #, c-format msgid "error from underlying PAM layer: %s" msgstr "ошибка в нижележащем слое PAM: %s" -#: libpq/auth.c:2132 +#: libpq/auth.c:2190 #, c-format msgid "could not create PAM authenticator: %s" msgstr "не удалось создать аутентификатор PAM: %s" -#: libpq/auth.c:2143 +#: libpq/auth.c:2201 #, c-format msgid "pam_set_item(PAM_USER) failed: %s" msgstr "ошибка в pam_set_item(PAM_USER): %s" -#: libpq/auth.c:2154 +#: libpq/auth.c:2212 #, c-format msgid "pam_set_item(PAM_RHOST) failed: %s" msgstr "ошибка в pam_set_item(PAM_RHOST): %s" -#: libpq/auth.c:2165 +#: libpq/auth.c:2223 #, c-format msgid "pam_set_item(PAM_CONV) failed: %s" msgstr "ошибка в pam_set_item(PAM_CONV): %s" -#: libpq/auth.c:2176 +#: libpq/auth.c:2234 #, c-format msgid "pam_authenticate failed: %s" msgstr "ошибка в pam_authenticate: %s" -#: libpq/auth.c:2187 +#: libpq/auth.c:2245 #, c-format msgid "pam_acct_mgmt failed: %s" msgstr "ошибка в pam_acct_mgmt: %s" -#: libpq/auth.c:2198 +#: libpq/auth.c:2256 #, c-format msgid "could not release PAM authenticator: %s" msgstr "не удалось освободить аутентификатор PAM: %s" -#: libpq/auth.c:2263 +#: libpq/auth.c:2323 #, c-format msgid "could not initialize LDAP: %m" msgstr "не удалось инициализировать LDAP: %m" -#: libpq/auth.c:2266 +#: libpq/auth.c:2326 #, c-format msgid "could not initialize LDAP: error code %d" msgstr "не удалось инициализировать LDAP (код ошибки: %d)" -#: libpq/auth.c:2276 +#: libpq/auth.c:2336 #, c-format msgid "could not set LDAP protocol version: %s" msgstr "не удалось задать версию протокола LDAP: %s" -#: libpq/auth.c:2305 +#: libpq/auth.c:2365 #, c-format msgid "could not load wldap32.dll" msgstr "не удалось загрузить wldap32.dll" -#: libpq/auth.c:2313 +#: libpq/auth.c:2373 #, c-format msgid "could not load function _ldap_start_tls_sA in wldap32.dll" msgstr "не удалось найти функцию _ldap_start_tls_sA в wldap32.dll" -#: libpq/auth.c:2314 +#: libpq/auth.c:2374 #, c-format msgid "LDAP over SSL is not supported on this platform." msgstr "LDAP через SSL не поддерживается в этой ОС." -#: libpq/auth.c:2329 +#: libpq/auth.c:2389 #, c-format msgid "could not start LDAP TLS session: %s" msgstr "не удалось начать сеанс LDAP TLS: %s" -#: libpq/auth.c:2351 +#: libpq/auth.c:2411 #, c-format msgid "LDAP server not specified" msgstr "LDAP-сервер не определён" -#: libpq/auth.c:2404 +#: libpq/auth.c:2460 #, c-format msgid "invalid character in user name for LDAP authentication" msgstr "недопустимый символ в имени пользователя для проверки подлинности LDAP" -#: libpq/auth.c:2419 +#: libpq/auth.c:2476 #, c-format msgid "" "could not perform initial LDAP bind for ldapbinddn \"%s\" on server \"%s\": " @@ -12928,28 +12999,28 @@ msgstr "" "не удалось выполнить начальную привязку LDAP для ldapbinddn \"%s\" на " "сервере \"%s\": %s" -#: libpq/auth.c:2443 +#: libpq/auth.c:2502 #, c-format msgid "could not search LDAP for filter \"%s\" on server \"%s\": %s" msgstr "" "не удалось выполнить LDAP-поиск по фильтру \"%s\" на сервере \"%s\": %s" -#: libpq/auth.c:2454 +#: libpq/auth.c:2514 #, c-format msgid "LDAP user \"%s\" does not exist" msgstr "в LDAP нет пользователя \"%s\"" -#: libpq/auth.c:2455 +#: libpq/auth.c:2515 #, c-format msgid "LDAP search for filter \"%s\" on server \"%s\" returned no entries." msgstr "LDAP-поиск по фильтру \"%s\" на сервере \"%s\" не вернул результатов" -#: libpq/auth.c:2459 +#: libpq/auth.c:2519 #, c-format msgid "LDAP user \"%s\" is not unique" msgstr "пользователь LDAP \"%s\" не уникален" -#: libpq/auth.c:2460 +#: libpq/auth.c:2520 #, c-format msgid "LDAP search for filter \"%s\" on server \"%s\" returned %d entry." msgid_plural "" @@ -12958,7 +13029,7 @@ msgstr[0] "LDAP-поиск по фильтру \"%s\" на сервере \"%s\" msgstr[1] "LDAP-поиск по фильтру \"%s\" на сервере \"%s\" вернул %d записи." msgstr[2] "LDAP-поиск по фильтру \"%s\" на сервере \"%s\" вернул %d записей." -#: libpq/auth.c:2478 +#: libpq/auth.c:2539 #, c-format msgid "" "could not get dn for the first entry matching \"%s\" on server \"%s\": %s" @@ -12966,19 +13037,19 @@ msgstr "" "не удалось получить dn для первого результата, соответствующего \"%s\" на " "сервере \"%s\": %s" -#: libpq/auth.c:2498 +#: libpq/auth.c:2560 #, c-format msgid "could not unbind after searching for user \"%s\" on server \"%s\": %s" msgstr "" "не удалось отвязаться после поиска пользователя \"%s\" на сервере \"%s\": %s" -#: libpq/auth.c:2528 +#: libpq/auth.c:2592 #, c-format msgid "LDAP login failed for user \"%s\" on server \"%s\": %s" msgstr "" "ошибка при регистрации в LDAP пользователя \"%s\" на сервере \"%s\": %s" -#: libpq/auth.c:2556 +#: libpq/auth.c:2622 #, c-format msgid "" "certificate authentication failed for user \"%s\": client certificate " @@ -12987,117 +13058,117 @@ msgstr "" "ошибка проверки подлинности пользователя \"%s\" по сертификату: сертификат " "клиента не содержит имя пользователя" -#: libpq/auth.c:2659 +#: libpq/auth.c:2725 #, c-format msgid "RADIUS server not specified" msgstr "RADIUS-сервер не определён" -#: libpq/auth.c:2666 +#: libpq/auth.c:2732 #, c-format msgid "RADIUS secret not specified" msgstr "секрет RADIUS не определён" # well-spelled: симв -#: libpq/auth.c:2687 +#: libpq/auth.c:2746 #, c-format msgid "" "RADIUS authentication does not support passwords longer than %d characters" msgstr "проверка подлинности RADIUS не поддерживает пароли длиннее %d симв." -#: libpq/auth.c:2784 libpq/hba.c:1873 +#: libpq/auth.c:2851 libpq/hba.c:1878 #, c-format msgid "could not translate RADIUS server name \"%s\" to address: %s" msgstr "не удалось преобразовать имя сервера RADIUS \"%s\" в адрес: %s" -#: libpq/auth.c:2798 +#: libpq/auth.c:2865 #, c-format msgid "could not generate random encryption vector" msgstr "не удалось сгенерировать случайный вектор шифрования" -#: libpq/auth.c:2832 +#: libpq/auth.c:2899 #, c-format msgid "could not perform MD5 encryption of password" msgstr "не удалось вычислить MD5-хеш пароля" -#: libpq/auth.c:2858 +#: libpq/auth.c:2925 #, c-format msgid "could not create RADIUS socket: %m" msgstr "не удалось создать сокет RADIUS: %m" -#: libpq/auth.c:2880 +#: libpq/auth.c:2947 #, c-format msgid "could not bind local RADIUS socket: %m" msgstr "не удалось привязаться к локальному сокету RADIUS: %m" -#: libpq/auth.c:2890 +#: libpq/auth.c:2957 #, c-format msgid "could not send RADIUS packet: %m" msgstr "не удалось отправить пакет RADIUS: %m" -#: libpq/auth.c:2923 libpq/auth.c:2949 +#: libpq/auth.c:2990 libpq/auth.c:3016 #, c-format msgid "timeout waiting for RADIUS response from %s" msgstr "превышено время ожидания ответа RADIUS от %s" -#: libpq/auth.c:2942 +#: libpq/auth.c:3009 #, c-format msgid "could not check status on RADIUS socket: %m" msgstr "не удалось проверить состояние сокета RADIUS: %m" -#: libpq/auth.c:2972 +#: libpq/auth.c:3039 #, c-format msgid "could not read RADIUS response: %m" msgstr "не удалось прочитать ответ RADIUS: %m" -#: libpq/auth.c:2985 libpq/auth.c:2989 +#: libpq/auth.c:3052 libpq/auth.c:3056 #, c-format msgid "RADIUS response from %s was sent from incorrect port: %d" msgstr "ответ RADIUS от %s был отправлен с неверного порта: %d" -#: libpq/auth.c:2998 +#: libpq/auth.c:3065 #, c-format msgid "RADIUS response from %s too short: %d" msgstr "слишком короткий ответ RADIUS от %s: %d" -#: libpq/auth.c:3005 +#: libpq/auth.c:3072 #, c-format msgid "RADIUS response from %s has corrupt length: %d (actual length %d)" msgstr "в ответе RADIUS от %s испорчена длина: %d (фактическая длина %d)" -#: libpq/auth.c:3013 +#: libpq/auth.c:3080 #, c-format msgid "RADIUS response from %s is to a different request: %d (should be %d)" msgstr "пришёл ответ RADIUS от %s на другой запрос: %d (ожидался %d)" -#: libpq/auth.c:3038 +#: libpq/auth.c:3105 #, c-format msgid "could not perform MD5 encryption of received packet" msgstr "не удалось вычислить MD5 для принятого пакета" -#: libpq/auth.c:3047 +#: libpq/auth.c:3114 #, c-format msgid "RADIUS response from %s has incorrect MD5 signature" msgstr "ответ RADIUS от %s содержит неверную подпись MD5" -#: libpq/auth.c:3065 +#: libpq/auth.c:3132 #, c-format msgid "RADIUS response from %s has invalid code (%d) for user \"%s\"" msgstr "ответ RADIUS от %s содержит неверный код (%d) для пользователя \"%s\"" #: libpq/be-fsstubs.c:132 libpq/be-fsstubs.c:163 libpq/be-fsstubs.c:197 #: libpq/be-fsstubs.c:237 libpq/be-fsstubs.c:262 libpq/be-fsstubs.c:310 -#: libpq/be-fsstubs.c:333 libpq/be-fsstubs.c:581 +#: libpq/be-fsstubs.c:333 libpq/be-fsstubs.c:590 #, c-format msgid "invalid large-object descriptor: %d" msgstr "неверный дескриптор большого объекта: %d" -#: libpq/be-fsstubs.c:178 libpq/be-fsstubs.c:216 libpq/be-fsstubs.c:600 -#: libpq/be-fsstubs.c:788 +#: libpq/be-fsstubs.c:178 libpq/be-fsstubs.c:216 libpq/be-fsstubs.c:609 +#: libpq/be-fsstubs.c:797 libpq/be-fsstubs.c:917 #, c-format msgid "permission denied for large object %u" msgstr "нет доступа к большому объекту %u" -#: libpq/be-fsstubs.c:203 libpq/be-fsstubs.c:587 +#: libpq/be-fsstubs.c:203 libpq/be-fsstubs.c:596 #, c-format msgid "large object descriptor %d was not opened for writing" msgstr "дескриптор большого объекта %d был открыт не для записи" @@ -13146,59 +13217,59 @@ msgstr "для использования lo_export() на сервере нуж msgid "Anyone can use the client-side lo_export() provided by libpq." msgstr "Использовать lo_export() на стороне клиента через libpq могут все." -#: libpq/be-fsstubs.c:547 +#: libpq/be-fsstubs.c:556 #, c-format msgid "could not create server file \"%s\": %m" msgstr "не удалось создать файл сервера \"%s\": %m" -#: libpq/be-fsstubs.c:559 +#: libpq/be-fsstubs.c:568 #, c-format msgid "could not write server file \"%s\": %m" msgstr "не удалось записать файл сервера \"%s\": %m" -#: libpq/be-fsstubs.c:813 +#: libpq/be-fsstubs.c:822 #, c-format msgid "large object read request is too large" msgstr "при чтении большого объекта запрошен чрезмерный размер" -#: libpq/be-fsstubs.c:855 utils/adt/genfile.c:212 utils/adt/genfile.c:253 +#: libpq/be-fsstubs.c:864 utils/adt/genfile.c:212 utils/adt/genfile.c:253 #, c-format msgid "requested length cannot be negative" msgstr "запрошенная длина не может быть отрицательной" -#: libpq/be-secure-openssl.c:197 +#: libpq/be-secure-openssl.c:166 #, c-format msgid "could not create SSL context: %s" msgstr "не удалось создать контекст SSL: %s" -#: libpq/be-secure-openssl.c:225 +#: libpq/be-secure-openssl.c:194 #, c-format msgid "could not load server certificate file \"%s\": %s" msgstr "не удалось загрузить сертификат сервера \"%s\": %s" -#: libpq/be-secure-openssl.c:234 +#: libpq/be-secure-openssl.c:203 #, c-format msgid "could not access private key file \"%s\": %m" msgstr "не удалось обратиться к файлу закрытого ключа \"%s\": %m" -#: libpq/be-secure-openssl.c:243 +#: libpq/be-secure-openssl.c:212 #, c-format msgid "private key file \"%s\" is not a regular file" msgstr "файл закрытого ключа \"%s\" не является обычным" -#: libpq/be-secure-openssl.c:258 +#: libpq/be-secure-openssl.c:227 #, c-format msgid "private key file \"%s\" must be owned by the database user or root" msgstr "" "файл закрытого ключа \"%s\" должен принадлежать пользователю, запускающему " "сервер, или root" -#: libpq/be-secure-openssl.c:281 +#: libpq/be-secure-openssl.c:250 #, c-format msgid "private key file \"%s\" has group or world access" msgstr "к файлу закрытого ключа \"%s\" имеют доступ все или группа" -#: libpq/be-secure-openssl.c:283 +#: libpq/be-secure-openssl.c:252 #, c-format msgid "" "File must have permissions u=rw (0600) or less if owned by the database " @@ -13208,121 +13279,158 @@ msgstr "" "он принадлежит пользователю сервера, либо u=rw,g=r (0640) или более строгие, " "если он принадлежит root." -#: libpq/be-secure-openssl.c:300 +#: libpq/be-secure-openssl.c:269 #, c-format msgid "" "private key file \"%s\" cannot be reloaded because it requires a passphrase" msgstr "" "файл закрытого ключа \"%s\" нельзя перезагрузить, так как он защищён паролем" -#: libpq/be-secure-openssl.c:305 +#: libpq/be-secure-openssl.c:274 #, c-format msgid "could not load private key file \"%s\": %s" msgstr "не удалось загрузить файл закрытого ключа \"%s\": %s" -#: libpq/be-secure-openssl.c:314 +#: libpq/be-secure-openssl.c:283 #, c-format msgid "check of private key failed: %s" msgstr "ошибка при проверке закрытого ключа: %s" -#: libpq/be-secure-openssl.c:334 +#: libpq/be-secure-openssl.c:310 #, c-format msgid "could not set the cipher list (no valid ciphers available)" msgstr "не удалось установить список шифров (подходящие шифры отсутствуют)" -#: libpq/be-secure-openssl.c:352 +#: libpq/be-secure-openssl.c:328 #, c-format msgid "could not load root certificate file \"%s\": %s" msgstr "не удалось загрузить файл корневых сертификатов \"%s\": %s" -#: libpq/be-secure-openssl.c:379 +#: libpq/be-secure-openssl.c:355 #, c-format msgid "SSL certificate revocation list file \"%s\" ignored" msgstr "файл со списком отзыва сертификатов SSL \"%s\" игнорируется" -#: libpq/be-secure-openssl.c:381 +#: libpq/be-secure-openssl.c:357 #, c-format msgid "SSL library does not support certificate revocation lists." msgstr "Библиотека SSL не поддерживает списки отзыва сертификатов." -#: libpq/be-secure-openssl.c:388 +#: libpq/be-secure-openssl.c:364 #, c-format msgid "could not load SSL certificate revocation list file \"%s\": %s" msgstr "" "не удалось загрузить файл со списком отзыва сертификатов SSL \"%s\": %s" -#: libpq/be-secure-openssl.c:469 +#: libpq/be-secure-openssl.c:445 #, c-format msgid "could not initialize SSL connection: SSL context not set up" msgstr "" "инициализировать SSL-подключение не удалось: контекст SSL не установлен" -#: libpq/be-secure-openssl.c:477 +#: libpq/be-secure-openssl.c:453 #, c-format msgid "could not initialize SSL connection: %s" msgstr "инициализировать SSL-подключение не удалось: %s" -#: libpq/be-secure-openssl.c:485 +#: libpq/be-secure-openssl.c:461 #, c-format msgid "could not set SSL socket: %s" msgstr "не удалось создать SSL-сокет: %s" -#: libpq/be-secure-openssl.c:540 +#: libpq/be-secure-openssl.c:516 #, c-format msgid "could not accept SSL connection: %m" msgstr "не удалось принять SSL-подключение: %m" -#: libpq/be-secure-openssl.c:544 libpq/be-secure-openssl.c:555 +#: libpq/be-secure-openssl.c:520 libpq/be-secure-openssl.c:531 #, c-format msgid "could not accept SSL connection: EOF detected" msgstr "не удалось принять SSL-подключение: обрыв данных" -#: libpq/be-secure-openssl.c:549 +#: libpq/be-secure-openssl.c:525 #, c-format msgid "could not accept SSL connection: %s" msgstr "не удалось принять SSL-подключение: %s" -#: libpq/be-secure-openssl.c:560 libpq/be-secure-openssl.c:699 -#: libpq/be-secure-openssl.c:759 +#: libpq/be-secure-openssl.c:536 libpq/be-secure-openssl.c:677 +#: libpq/be-secure-openssl.c:744 #, c-format msgid "unrecognized SSL error code: %d" msgstr "нераспознанный код ошибки SSL: %d" -#: libpq/be-secure-openssl.c:602 +#: libpq/be-secure-openssl.c:578 #, c-format msgid "SSL certificate's common name contains embedded null" msgstr "Имя SSL-сертификата включает нулевой байт" -#: libpq/be-secure-openssl.c:613 +#: libpq/be-secure-openssl.c:589 #, c-format msgid "SSL connection from \"%s\"" msgstr "SSL-подключение от \"%s\"" -#: libpq/be-secure-openssl.c:690 libpq/be-secure-openssl.c:750 +#: libpq/be-secure-openssl.c:666 libpq/be-secure-openssl.c:728 #, c-format msgid "SSL error: %s" msgstr "ошибка SSL: %s" -#: libpq/be-secure-openssl.c:1179 +#: libpq/be-secure-openssl.c:909 +#, c-format +msgid "could not open DH parameters file \"%s\": %m" +msgstr "не удалось открыть файл параметров DH \"%s\": %m" + +#: libpq/be-secure-openssl.c:921 +#, c-format +msgid "could not load DH parameters file: %s" +msgstr "не удалось загрузить файл параметров DH: %s" + +#: libpq/be-secure-openssl.c:931 +#, c-format +msgid "invalid DH parameters: %s" +msgstr "неверные параметры DH: %s" + +#: libpq/be-secure-openssl.c:939 +#, c-format +msgid "invalid DH parameters: p is not prime" +msgstr "неверные параметры DH: p - не простое число" + +#: libpq/be-secure-openssl.c:947 +#, c-format +msgid "invalid DH parameters: neither suitable generator or safe prime" +msgstr "" +"неверные параметры DH: нет подходящего генератора или небезопасное простое " +"число" + +#: libpq/be-secure-openssl.c:1088 +#, c-format +msgid "DH: could not load DH parameters" +msgstr "DH: не удалось загрузить параметры DH" + +#: libpq/be-secure-openssl.c:1096 +#, c-format +msgid "DH: could not set DH parameters: %s" +msgstr "DH: не удалось задать параметры DH: %s" + +#: libpq/be-secure-openssl.c:1120 #, c-format msgid "ECDH: unrecognized curve name: %s" msgstr "ECDH: нераспознанное имя кривой: %s" -#: libpq/be-secure-openssl.c:1188 +#: libpq/be-secure-openssl.c:1129 #, c-format msgid "ECDH: could not create key" msgstr "ECDH: не удалось создать ключ" -#: libpq/be-secure-openssl.c:1216 +#: libpq/be-secure-openssl.c:1157 msgid "no SSL error reported" msgstr "нет сообщения об ошибке SSL" -#: libpq/be-secure-openssl.c:1220 +#: libpq/be-secure-openssl.c:1161 #, c-format msgid "SSL error code %lu" msgstr "код ошибки SSL: %lu" -#: libpq/be-secure.c:188 libpq/be-secure.c:274 +#: libpq/be-secure.c:189 libpq/be-secure.c:275 #, c-format msgid "terminating connection due to unexpected postmaster exit" msgstr "закрытие подключения из-за неожиданного завершения главного процесса" @@ -13337,28 +13445,23 @@ msgstr "Роль \"%s\" не существует." msgid "User \"%s\" has no password assigned." msgstr "Пользователь \"%s\" не имеет пароля." -#: libpq/crypt.c:76 -#, c-format -msgid "User \"%s\" has an empty password." -msgstr "У пользователя \"%s\" пустой пароль." - -#: libpq/crypt.c:87 +#: libpq/crypt.c:79 #, c-format msgid "User \"%s\" has an expired password." msgstr "Срок пароля пользователя \"%s\" истёк." -#: libpq/crypt.c:242 +#: libpq/crypt.c:173 #, c-format msgid "User \"%s\" has a password that cannot be used with MD5 authentication." msgstr "" "Пользователь \"%s\" имеет пароль, неподходящий для аутентификации по MD5." -#: libpq/crypt.c:251 libpq/crypt.c:292 libpq/crypt.c:316 libpq/crypt.c:327 +#: libpq/crypt.c:197 libpq/crypt.c:238 libpq/crypt.c:262 #, c-format msgid "Password does not match for user \"%s\"." msgstr "Пароль не подходит для пользователя \"%s\"." -#: libpq/crypt.c:338 +#: libpq/crypt.c:281 #, c-format msgid "Password of user \"%s\" is in unrecognized format." msgstr "Пароль пользователя \"%s\" представлен в неизвестном формате." @@ -13382,201 +13485,201 @@ msgstr "" msgid "authentication file line too long" msgstr "слишком длинная строка в файле конфигурации безопасности" -#: libpq/hba.c:510 libpq/hba.c:864 libpq/hba.c:884 libpq/hba.c:922 -#: libpq/hba.c:972 libpq/hba.c:986 libpq/hba.c:1008 libpq/hba.c:1017 -#: libpq/hba.c:1038 libpq/hba.c:1051 libpq/hba.c:1071 libpq/hba.c:1093 -#: libpq/hba.c:1105 libpq/hba.c:1161 libpq/hba.c:1181 libpq/hba.c:1195 -#: libpq/hba.c:1214 libpq/hba.c:1225 libpq/hba.c:1240 libpq/hba.c:1258 -#: libpq/hba.c:1274 libpq/hba.c:1286 libpq/hba.c:1323 libpq/hba.c:1364 -#: libpq/hba.c:1377 libpq/hba.c:1399 libpq/hba.c:1411 libpq/hba.c:1429 -#: libpq/hba.c:1479 libpq/hba.c:1518 libpq/hba.c:1529 libpq/hba.c:1546 -#: libpq/hba.c:1556 libpq/hba.c:1614 libpq/hba.c:1652 libpq/hba.c:1668 -#: libpq/hba.c:1767 libpq/hba.c:1856 libpq/hba.c:1875 libpq/hba.c:1904 -#: libpq/hba.c:1917 libpq/hba.c:1940 libpq/hba.c:1962 libpq/hba.c:1976 +#: libpq/hba.c:510 libpq/hba.c:867 libpq/hba.c:887 libpq/hba.c:925 +#: libpq/hba.c:975 libpq/hba.c:989 libpq/hba.c:1011 libpq/hba.c:1020 +#: libpq/hba.c:1041 libpq/hba.c:1054 libpq/hba.c:1074 libpq/hba.c:1096 +#: libpq/hba.c:1108 libpq/hba.c:1164 libpq/hba.c:1184 libpq/hba.c:1198 +#: libpq/hba.c:1217 libpq/hba.c:1228 libpq/hba.c:1243 libpq/hba.c:1261 +#: libpq/hba.c:1277 libpq/hba.c:1289 libpq/hba.c:1326 libpq/hba.c:1367 +#: libpq/hba.c:1380 libpq/hba.c:1402 libpq/hba.c:1414 libpq/hba.c:1432 +#: libpq/hba.c:1482 libpq/hba.c:1521 libpq/hba.c:1532 libpq/hba.c:1549 +#: libpq/hba.c:1559 libpq/hba.c:1617 libpq/hba.c:1655 libpq/hba.c:1671 +#: libpq/hba.c:1772 libpq/hba.c:1861 libpq/hba.c:1880 libpq/hba.c:1909 +#: libpq/hba.c:1922 libpq/hba.c:1945 libpq/hba.c:1967 libpq/hba.c:1981 #: tsearch/ts_locale.c:182 #, c-format msgid "line %d of configuration file \"%s\"" msgstr "строка %d файла конфигурации \"%s\"" #. translator: the second %s is a list of auth methods -#: libpq/hba.c:862 +#: libpq/hba.c:865 #, c-format msgid "" "authentication option \"%s\" is only valid for authentication methods %s" msgstr "параметр проверки подлинности \"%s\" допускается только для методов %s" -#: libpq/hba.c:882 +#: libpq/hba.c:885 #, c-format msgid "authentication method \"%s\" requires argument \"%s\" to be set" msgstr "" "для метода проверки подлинности \"%s\" требуется определить аргумент \"%s\"" -#: libpq/hba.c:910 +#: libpq/hba.c:913 #, c-format msgid "missing entry in file \"%s\" at end of line %d" msgstr "отсутствует запись в файле \"%s\" в конце строки %d" -#: libpq/hba.c:921 +#: libpq/hba.c:924 #, c-format msgid "multiple values in ident field" msgstr "множественные значения в поле ident" -#: libpq/hba.c:970 +#: libpq/hba.c:973 #, c-format msgid "multiple values specified for connection type" msgstr "для типа подключения указано несколько значений" -#: libpq/hba.c:971 +#: libpq/hba.c:974 #, c-format msgid "Specify exactly one connection type per line." msgstr "Определите в строке единственный тип подключения." -#: libpq/hba.c:985 +#: libpq/hba.c:988 #, c-format msgid "local connections are not supported by this build" msgstr "локальные подключения не поддерживаются в этой сборке" -#: libpq/hba.c:1006 +#: libpq/hba.c:1009 #, c-format msgid "hostssl record cannot match because SSL is disabled" msgstr "запись с hostssl недействительна, так как поддержка SSL отключена" -#: libpq/hba.c:1007 +#: libpq/hba.c:1010 #, c-format msgid "Set ssl = on in postgresql.conf." msgstr "Установите ssl = on в postgresql.conf." -#: libpq/hba.c:1015 +#: libpq/hba.c:1018 #, c-format msgid "hostssl record cannot match because SSL is not supported by this build" msgstr "" "запись с hostssl недействительна, так как SSL не поддерживается в этой сборке" -#: libpq/hba.c:1016 +#: libpq/hba.c:1019 #, c-format msgid "Compile with --with-openssl to use SSL connections." -msgstr "Для работы с SSL скомпилируйте posgresql с ключом --with-openssl." +msgstr "Для работы с SSL скомпилируйте postgresql с ключом --with-openssl." -#: libpq/hba.c:1036 +#: libpq/hba.c:1039 #, c-format msgid "invalid connection type \"%s\"" msgstr "неверный тип подключения \"%s\"" -#: libpq/hba.c:1050 +#: libpq/hba.c:1053 #, c-format msgid "end-of-line before database specification" msgstr "конец строки перед определением базы данных" -#: libpq/hba.c:1070 +#: libpq/hba.c:1073 #, c-format msgid "end-of-line before role specification" msgstr "конец строки перед определением роли" -#: libpq/hba.c:1092 +#: libpq/hba.c:1095 #, c-format msgid "end-of-line before IP address specification" msgstr "конец строки перед определением IP-адресов" -#: libpq/hba.c:1103 +#: libpq/hba.c:1106 #, c-format msgid "multiple values specified for host address" msgstr "для адреса узла указано несколько значений" -#: libpq/hba.c:1104 +#: libpq/hba.c:1107 #, c-format msgid "Specify one address range per line." msgstr "Определите в строке один диапазон адресов." -#: libpq/hba.c:1159 +#: libpq/hba.c:1162 #, c-format msgid "invalid IP address \"%s\": %s" msgstr "неверный IP-адрес \"%s\": %s" -#: libpq/hba.c:1179 +#: libpq/hba.c:1182 #, c-format msgid "specifying both host name and CIDR mask is invalid: \"%s\"" msgstr "указать одновременно и имя узла, и маску CIDR нельзя: \"%s\"" -#: libpq/hba.c:1193 +#: libpq/hba.c:1196 #, c-format msgid "invalid CIDR mask in address \"%s\"" msgstr "неверная маска CIDR в адресе \"%s\"" -#: libpq/hba.c:1212 +#: libpq/hba.c:1215 #, c-format msgid "end-of-line before netmask specification" msgstr "конец строки перед определением маски сети" -#: libpq/hba.c:1213 +#: libpq/hba.c:1216 #, c-format msgid "" "Specify an address range in CIDR notation, or provide a separate netmask." msgstr "" "Укажите диапазон адресов в формате CIDR или задайте отдельную маску сети." -#: libpq/hba.c:1224 +#: libpq/hba.c:1227 #, c-format msgid "multiple values specified for netmask" msgstr "для сетевой маски указано несколько значений" -#: libpq/hba.c:1238 +#: libpq/hba.c:1241 #, c-format msgid "invalid IP mask \"%s\": %s" msgstr "неверная маска IP \"%s\": %s" -#: libpq/hba.c:1257 +#: libpq/hba.c:1260 #, c-format msgid "IP address and mask do not match" msgstr "IP-адрес не соответствует маске" -#: libpq/hba.c:1273 +#: libpq/hba.c:1276 #, c-format msgid "end-of-line before authentication method" msgstr "конец строки перед методом проверки подлинности" -#: libpq/hba.c:1284 +#: libpq/hba.c:1287 #, c-format msgid "multiple values specified for authentication type" msgstr "для типа проверки подлинности указано несколько значений" -#: libpq/hba.c:1285 +#: libpq/hba.c:1288 #, c-format msgid "Specify exactly one authentication type per line." msgstr "Определите в строке единственный тип проверки подлинности." -#: libpq/hba.c:1362 +#: libpq/hba.c:1365 #, c-format msgid "invalid authentication method \"%s\"" msgstr "неверный метод проверки подлинности \"%s\"" -#: libpq/hba.c:1375 +#: libpq/hba.c:1378 #, c-format msgid "invalid authentication method \"%s\": not supported by this build" msgstr "" "неверный метод проверки подлинности \"%s\": не поддерживается в этой сборке" -#: libpq/hba.c:1398 +#: libpq/hba.c:1401 #, c-format msgid "gssapi authentication is not supported on local sockets" msgstr "проверка подлинности gssapi для локальных сокетов не поддерживается" -#: libpq/hba.c:1410 +#: libpq/hba.c:1413 #, c-format msgid "peer authentication is only supported on local sockets" msgstr "проверка подлинности peer поддерживается только для локальных сокетов" -#: libpq/hba.c:1428 +#: libpq/hba.c:1431 #, c-format msgid "cert authentication is only supported on hostssl connections" msgstr "" "проверка подлинности cert поддерживается только для подключений hostssl" -#: libpq/hba.c:1478 +#: libpq/hba.c:1481 #, c-format msgid "authentication option not in name=value format: %s" msgstr "параметр проверки подлинности указан не в формате имя=значение: %s" -#: libpq/hba.c:1517 +#: libpq/hba.c:1520 #, c-format msgid "" "cannot use ldapbasedn, ldapbinddn, ldapbindpasswd, ldapsearchattribute, or " @@ -13585,7 +13688,7 @@ msgstr "" "нельзя использовать ldapbasedn, ldapbinddn, ldapbindpasswd, " "ldapsearchattribute или ldapurl вместе с ldapprefix" -#: libpq/hba.c:1528 +#: libpq/hba.c:1531 #, c-format msgid "" "authentication method \"ldap\" requires argument \"ldapbasedn\", \"ldapprefix" @@ -13594,123 +13697,123 @@ msgstr "" "для метода проверки подлинности \"ldap\" требуется установить аргументы " "\"ldapbasedn\" и \"ldapprefix\" или \"ldapsuffix\"" -#: libpq/hba.c:1545 +#: libpq/hba.c:1548 #, c-format msgid "list of RADIUS servers cannot be empty" msgstr "список серверов RADIUS не может быть пустым" -#: libpq/hba.c:1555 +#: libpq/hba.c:1558 #, c-format msgid "list of RADIUS secrets cannot be empty" msgstr "список секретов RADIUS не может быть пустым" -#: libpq/hba.c:1608 +#: libpq/hba.c:1611 #, c-format -msgid "the number of %s (%i) must be 1 or the same as the number of %s (%i)" +msgid "the number of %s (%d) must be 1 or the same as the number of %s (%d)" msgstr "" -"количество элементов %s (%i) должно равняться 1 или количеству элементов %s " -"(%i)" +"количество элементов %s (%d) должно равняться 1 или количеству элементов %s " +"(%d)" -#: libpq/hba.c:1642 +#: libpq/hba.c:1645 msgid "ident, peer, gssapi, sspi, and cert" msgstr "ident, peer, gssapi, sspi и cert" -#: libpq/hba.c:1651 +#: libpq/hba.c:1654 #, c-format msgid "clientcert can only be configured for \"hostssl\" rows" msgstr "clientcert можно определить только в строках \"hostssl\"" -#: libpq/hba.c:1667 +#: libpq/hba.c:1670 #, c-format msgid "clientcert can not be set to 0 when using \"cert\" authentication" msgstr "" "clientcert нельзя установить в 0 при использовании проверки подлинности " "\"cert\"" -#: libpq/hba.c:1704 +#: libpq/hba.c:1707 #, c-format msgid "could not parse LDAP URL \"%s\": %s" msgstr "не удалось разобрать URL-адрес LDAP \"%s\": %s" -#: libpq/hba.c:1714 +#: libpq/hba.c:1717 #, c-format msgid "unsupported LDAP URL scheme: %s" msgstr "неподдерживаемая схема в URL-адресе LDAP: %s" -#: libpq/hba.c:1732 +#: libpq/hba.c:1737 #, c-format msgid "filters not supported in LDAP URLs" msgstr "фильтры в URL-адресах LDAP не поддерживаются" -#: libpq/hba.c:1741 +#: libpq/hba.c:1746 #, c-format msgid "LDAP URLs not supported on this platform" msgstr "URL-адреса LDAP не поддерживаются в этой ОС" -#: libpq/hba.c:1766 +#: libpq/hba.c:1771 #, c-format msgid "invalid LDAP port number: \"%s\"" msgstr "неверный номер порта LDAP: \"%s\"" -#: libpq/hba.c:1807 libpq/hba.c:1814 +#: libpq/hba.c:1812 libpq/hba.c:1819 msgid "gssapi and sspi" msgstr "gssapi и sspi" -#: libpq/hba.c:1823 libpq/hba.c:1832 +#: libpq/hba.c:1828 libpq/hba.c:1837 msgid "sspi" msgstr "sspi" -#: libpq/hba.c:1854 +#: libpq/hba.c:1859 #, c-format msgid "could not parse RADIUS server list \"%s\"" msgstr "не удалось разобрать список серверов RADIUS \"%s\"" -#: libpq/hba.c:1902 +#: libpq/hba.c:1907 #, c-format msgid "could not parse RADIUS port list \"%s\"" msgstr "не удалось разобрать список портов RADIUS \"%s\"" -#: libpq/hba.c:1916 +#: libpq/hba.c:1921 #, c-format msgid "invalid RADIUS port number: \"%s\"" msgstr "неверный номер порта RADIUS: \"%s\"" -#: libpq/hba.c:1938 +#: libpq/hba.c:1943 #, c-format msgid "could not parse RADIUS secret list \"%s\"" msgstr "не удалось разобрать список секретов RADIUS \"%s\"" -#: libpq/hba.c:1960 +#: libpq/hba.c:1965 #, c-format msgid "could not parse RADIUS identifiers list \"%s\"" msgstr "не удалось разобрать список идентификаторов RADIUS \"%s\"" -#: libpq/hba.c:1974 +#: libpq/hba.c:1979 #, c-format msgid "unrecognized authentication option name: \"%s\"" msgstr "нераспознанное имя атрибута проверки подлинности: \"%s\"" -#: libpq/hba.c:2107 libpq/hba.c:2507 guc-file.l:593 +#: libpq/hba.c:2112 libpq/hba.c:2512 guc-file.l:594 #, c-format msgid "could not open configuration file \"%s\": %m" msgstr "открыть файл конфигурации \"%s\" не удалось: %m" -#: libpq/hba.c:2158 +#: libpq/hba.c:2163 #, c-format msgid "configuration file \"%s\" contains no entries" msgstr "файл конфигурации \"%s\" не содержит записей" -#: libpq/hba.c:2663 +#: libpq/hba.c:2668 #, c-format msgid "invalid regular expression \"%s\": %s" msgstr "неверное регулярное выражение \"%s\": %s" -#: libpq/hba.c:2723 +#: libpq/hba.c:2728 #, c-format msgid "regular expression match for \"%s\" failed: %s" msgstr "ошибка при поиске по регулярному выражению для \"%s\": %s" -#: libpq/hba.c:2742 +#: libpq/hba.c:2747 #, c-format msgid "" "regular expression \"%s\" has no subexpressions as requested by " @@ -13719,94 +13822,94 @@ msgstr "" "в регулярном выражении \"%s\" нет подвыражений, требуемых для обратной " "ссылки в \"%s\"" -#: libpq/hba.c:2839 +#: libpq/hba.c:2844 #, c-format msgid "provided user name (%s) and authenticated user name (%s) do not match" msgstr "" "указанное имя пользователя (%s) не совпадает с именем прошедшего проверку " "(%s)" -#: libpq/hba.c:2859 +#: libpq/hba.c:2864 #, c-format msgid "no match in usermap \"%s\" for user \"%s\" authenticated as \"%s\"" msgstr "" "нет соответствия в файле сопоставлений \"%s\" для пользователя \"%s\", " "прошедшего проверку как \"%s\"" -#: libpq/hba.c:2892 +#: libpq/hba.c:2897 #, c-format msgid "could not open usermap file \"%s\": %m" msgstr "не удалось открыть файл сопоставлений пользователей \"%s\": %m" -#: libpq/pqcomm.c:201 +#: libpq/pqcomm.c:220 #, c-format msgid "could not set socket to nonblocking mode: %m" msgstr "не удалось перевести сокет в неблокирующий режим: %m" -#: libpq/pqcomm.c:355 +#: libpq/pqcomm.c:374 #, c-format msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)" -msgstr "длина пути доменного сокета \"%s\" превышает предел (%d байт)" +msgstr "длина пути Unix-сокета \"%s\" превышает предел (%d байт)" -#: libpq/pqcomm.c:376 +#: libpq/pqcomm.c:395 #, c-format msgid "could not translate host name \"%s\", service \"%s\" to address: %s" msgstr "перевести имя узла \"%s\", службы \"%s\" в адрес не удалось: %s" -#: libpq/pqcomm.c:380 +#: libpq/pqcomm.c:399 #, c-format msgid "could not translate service \"%s\" to address: %s" msgstr "не удалось перевести имя службы \"%s\" в адрес: %s" -#: libpq/pqcomm.c:407 +#: libpq/pqcomm.c:426 #, c-format msgid "could not bind to all requested addresses: MAXLISTEN (%d) exceeded" msgstr "" "не удалось привязаться ко всем запрошенным адресам: превышен предел " "MAXLISTEN (%d)" -#: libpq/pqcomm.c:416 +#: libpq/pqcomm.c:435 msgid "IPv4" msgstr "IPv4" -#: libpq/pqcomm.c:420 +#: libpq/pqcomm.c:439 msgid "IPv6" msgstr "IPv6" -#: libpq/pqcomm.c:425 +#: libpq/pqcomm.c:444 msgid "Unix" msgstr "Unix" -#: libpq/pqcomm.c:430 +#: libpq/pqcomm.c:449 #, c-format msgid "unrecognized address family %d" msgstr "нераспознанное семейство адресов: %d" #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:456 +#: libpq/pqcomm.c:475 #, c-format msgid "could not create %s socket for address \"%s\": %m" msgstr "не удалось создать сокет %s для адреса \"%s\": %m" #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:482 +#: libpq/pqcomm.c:501 #, c-format msgid "setsockopt(SO_REUSEADDR) failed for %s address \"%s\": %m" msgstr "ошибка в setsockopt(SO_REUSEADDR) для адреса %s \"%s\": %m" #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:499 +#: libpq/pqcomm.c:518 #, c-format msgid "setsockopt(IPV6_V6ONLY) failed for %s address \"%s\": %m" msgstr "ошибка в setsockopt(IPV6_V6ONLY) для адреса %s \"%s\": %m" #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:519 +#: libpq/pqcomm.c:538 #, c-format msgid "could not bind %s address \"%s\": %m" msgstr "не удалось привязаться к адресу %s \"%s\": %m" -#: libpq/pqcomm.c:522 +#: libpq/pqcomm.c:541 #, c-format msgid "" "Is another postmaster already running on port %d? If not, remove socket file " @@ -13815,7 +13918,7 @@ msgstr "" "Возможно порт %d занят другим процессом postmaster? Если нет, удалите файл " "\"%s\" и повторите попытку." -#: libpq/pqcomm.c:525 +#: libpq/pqcomm.c:544 #, c-format msgid "" "Is another postmaster already running on port %d? If not, wait a few seconds " @@ -13825,73 +13928,73 @@ msgstr "" "попытку через несколько секунд." #. translator: first %s is IPv4, IPv6, or Unix -#: libpq/pqcomm.c:558 +#: libpq/pqcomm.c:577 #, c-format msgid "could not listen on %s address \"%s\": %m" msgstr "не удалось привязаться к адресу %s \"%s\": %m" -#: libpq/pqcomm.c:567 +#: libpq/pqcomm.c:586 #, c-format msgid "listening on Unix socket \"%s\"" -msgstr "для приёма подключений открыт сокет Unix \"%s\"" +msgstr "для приёма подключений открыт Unix-сокет \"%s\"" #. translator: first %s is IPv4 or IPv6 -#: libpq/pqcomm.c:573 +#: libpq/pqcomm.c:592 #, c-format msgid "listening on %s address \"%s\", port %d" msgstr "для приёма подключений по адресу %s \"%s\" открыт порт %d" -#: libpq/pqcomm.c:656 +#: libpq/pqcomm.c:675 #, c-format msgid "group \"%s\" does not exist" msgstr "группа \"%s\" не существует" -#: libpq/pqcomm.c:666 +#: libpq/pqcomm.c:685 #, c-format msgid "could not set group of file \"%s\": %m" msgstr "не удалось установить группу для файла \"%s\": %m" -#: libpq/pqcomm.c:677 +#: libpq/pqcomm.c:696 #, c-format msgid "could not set permissions of file \"%s\": %m" msgstr "не удалось установить права доступа для файла \"%s\": %m" -#: libpq/pqcomm.c:707 +#: libpq/pqcomm.c:726 #, c-format msgid "could not accept new connection: %m" msgstr "не удалось принять новое подключение: %m" -#: libpq/pqcomm.c:908 +#: libpq/pqcomm.c:927 #, c-format msgid "there is no client connection" msgstr "нет клиентского подключения" -#: libpq/pqcomm.c:959 libpq/pqcomm.c:1055 +#: libpq/pqcomm.c:978 libpq/pqcomm.c:1074 #, c-format msgid "could not receive data from client: %m" msgstr "не удалось получить данные от клиента: %m" -#: libpq/pqcomm.c:1200 tcop/postgres.c:3913 +#: libpq/pqcomm.c:1219 tcop/postgres.c:3926 #, c-format msgid "terminating connection because protocol synchronization was lost" msgstr "закрытие подключения из-за потери синхронизации протокола" -#: libpq/pqcomm.c:1266 +#: libpq/pqcomm.c:1285 #, c-format msgid "unexpected EOF within message length word" msgstr "неожиданный обрыв данных в слове длины сообщения" -#: libpq/pqcomm.c:1277 +#: libpq/pqcomm.c:1296 #, c-format msgid "invalid message length" msgstr "неверная длина сообщения" -#: libpq/pqcomm.c:1299 libpq/pqcomm.c:1312 +#: libpq/pqcomm.c:1318 libpq/pqcomm.c:1331 #, c-format msgid "incomplete message from client" msgstr "неполное сообщение от клиента" -#: libpq/pqcomm.c:1445 +#: libpq/pqcomm.c:1464 #, c-format msgid "could not send data to client: %m" msgstr "не удалось послать данные клиенту: %m" @@ -13996,7 +14099,7 @@ msgstr " -i включить соединения TCP/IP\n" #: main/main.c:340 #, c-format msgid " -k DIRECTORY Unix-domain socket location\n" -msgstr " -k КАТАЛОГ расположение доменных сокетов Unix\n" +msgstr " -k КАТАЛОГ расположение Unix-сокетов\n" #: main/main.c:342 #, c-format @@ -14239,7 +14342,7 @@ msgstr "методы расширенного узла \"%s\" не зареги #: nodes/nodeFuncs.c:123 nodes/nodeFuncs.c:154 parser/parse_coerce.c:1844 #: parser/parse_coerce.c:1872 parser/parse_coerce.c:1948 -#: parser/parse_expr.c:2089 parser/parse_func.c:598 parser/parse_oper.c:958 +#: parser/parse_expr.c:2110 parser/parse_func.c:602 parser/parse_oper.c:964 #, c-format msgid "could not find array type for data type %s" msgstr "тип массива для типа данных %s не найден" @@ -14260,19 +14363,19 @@ msgid "%s cannot be applied to the nullable side of an outer join" msgstr "%s не может применяться к NULL-содержащей стороне внешнего соединения" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: optimizer/plan/planner.c:1542 parser/analyze.c:1622 parser/analyze.c:1819 -#: parser/analyze.c:2613 +#: optimizer/plan/planner.c:1572 parser/analyze.c:1624 parser/analyze.c:1821 +#: parser/analyze.c:2615 #, c-format msgid "%s is not allowed with UNION/INTERSECT/EXCEPT" msgstr "%s несовместимо с UNION/INTERSECT/EXCEPT" -#: optimizer/plan/planner.c:2142 optimizer/plan/planner.c:4100 +#: optimizer/plan/planner.c:2166 optimizer/plan/planner.c:4124 #, c-format msgid "could not implement GROUP BY" msgstr "не удалось реализовать GROUP BY" -#: optimizer/plan/planner.c:2143 optimizer/plan/planner.c:4101 -#: optimizer/plan/planner.c:4841 optimizer/prep/prepunion.c:928 +#: optimizer/plan/planner.c:2167 optimizer/plan/planner.c:4125 +#: optimizer/plan/planner.c:4865 optimizer/prep/prepunion.c:935 #, c-format msgid "" "Some of the datatypes only support hashing, while others only support " @@ -14281,27 +14384,27 @@ msgstr "" "Одни типы данных поддерживают только хеширование, а другие - только " "сортировку." -#: optimizer/plan/planner.c:4840 +#: optimizer/plan/planner.c:4864 #, c-format msgid "could not implement DISTINCT" msgstr "не удалось реализовать DISTINCT" -#: optimizer/plan/planner.c:5520 +#: optimizer/plan/planner.c:5544 #, c-format msgid "could not implement window PARTITION BY" msgstr "не удалось реализовать PARTITION BY для окна" -#: optimizer/plan/planner.c:5521 +#: optimizer/plan/planner.c:5545 #, c-format msgid "Window partitioning columns must be of sortable datatypes." msgstr "Столбцы, разбивающие окна, должны иметь сортируемые типы данных." -#: optimizer/plan/planner.c:5525 +#: optimizer/plan/planner.c:5549 #, c-format msgid "could not implement window ORDER BY" msgstr "не удалось реализовать ORDER BY для окна" -#: optimizer/plan/planner.c:5526 +#: optimizer/plan/planner.c:5550 #, c-format msgid "Window ordering columns must be of sortable datatypes." msgstr "Столбцы, сортирующие окна, должны иметь сортируемые типы данных." @@ -14311,52 +14414,52 @@ msgstr "Столбцы, сортирующие окна, должны иметь msgid "too many range table entries" msgstr "слишком много элементов RTE" -#: optimizer/prep/prepunion.c:483 +#: optimizer/prep/prepunion.c:496 #, c-format msgid "could not implement recursive UNION" msgstr "не удалось реализовать рекурсивный UNION" -#: optimizer/prep/prepunion.c:484 +#: optimizer/prep/prepunion.c:497 #, c-format msgid "All column datatypes must be hashable." msgstr "Все столбцы должны иметь хешируемые типы данных." #. translator: %s is UNION, INTERSECT, or EXCEPT -#: optimizer/prep/prepunion.c:927 +#: optimizer/prep/prepunion.c:934 #, c-format msgid "could not implement %s" msgstr "не удалось реализовать %s" -#: optimizer/util/clauses.c:4634 +#: optimizer/util/clauses.c:4693 #, c-format msgid "SQL function \"%s\" during inlining" msgstr "внедрённая в код SQL-функция \"%s\"" -#: optimizer/util/plancat.c:119 +#: optimizer/util/plancat.c:120 #, c-format msgid "cannot access temporary or unlogged relations during recovery" msgstr "" "обращаться к временным или нежурналируемым отношениям в процессе " "восстановления нельзя" -#: optimizer/util/plancat.c:619 +#: optimizer/util/plancat.c:620 #, c-format msgid "whole row unique index inference specifications are not supported" msgstr "" "указания со ссылкой на всю строку для выбора уникального индекса не " "поддерживаются" -#: optimizer/util/plancat.c:636 +#: optimizer/util/plancat.c:637 #, c-format msgid "constraint in ON CONFLICT clause has no associated index" msgstr "ограничению в ON CONFLICT не соответствует индекс" -#: optimizer/util/plancat.c:687 +#: optimizer/util/plancat.c:688 #, c-format msgid "ON CONFLICT DO UPDATE not supported with exclusion constraints" msgstr "ON CONFLICT DO UPDATE не поддерживается с ограничениями-исключениями" -#: optimizer/util/plancat.c:792 +#: optimizer/util/plancat.c:793 #, c-format msgid "" "there is no unique or exclusion constraint matching the ON CONFLICT " @@ -14365,28 +14468,28 @@ msgstr "" "нет уникального ограничения или ограничения-исключения, соответствующего " "указанию ON CONFLICT" -#: parser/analyze.c:698 parser/analyze.c:1385 +#: parser/analyze.c:700 parser/analyze.c:1387 #, c-format msgid "VALUES lists must all be the same length" msgstr "списки VALUES должны иметь одинаковую длину" -#: parser/analyze.c:853 +#: parser/analyze.c:855 #, c-format msgid "ON CONFLICT clause is not supported with partitioned tables" msgstr "" "предложение ON CONFLICT с секционированными таблицами не поддерживается" -#: parser/analyze.c:916 +#: parser/analyze.c:918 #, c-format msgid "INSERT has more expressions than target columns" msgstr "INSERT содержит больше выражений, чем целевых столбцов" -#: parser/analyze.c:934 +#: parser/analyze.c:936 #, c-format msgid "INSERT has more target columns than expressions" msgstr "INSERT содержит больше целевых столбцов, чем выражений" -#: parser/analyze.c:938 +#: parser/analyze.c:940 #, c-format msgid "" "The insertion source is a row expression containing the same number of " @@ -14395,29 +14498,29 @@ msgstr "" "Источником данных является строка, включающая столько же столбцов, сколько " "требуется для INSERT. Вы намеренно использовали скобки?" -#: parser/analyze.c:1198 parser/analyze.c:1595 +#: parser/analyze.c:1200 parser/analyze.c:1597 #, c-format msgid "SELECT ... INTO is not allowed here" msgstr "SELECT ... INTO здесь не допускается" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:1527 parser/analyze.c:2792 +#: parser/analyze.c:1529 parser/analyze.c:2794 #, c-format msgid "%s cannot be applied to VALUES" msgstr "%s нельзя применять к VALUES" -#: parser/analyze.c:1746 +#: parser/analyze.c:1748 #, c-format msgid "invalid UNION/INTERSECT/EXCEPT ORDER BY clause" msgstr "неверное предложение UNION/INTERSECT/EXCEPT ORDER BY" -#: parser/analyze.c:1747 +#: parser/analyze.c:1749 #, c-format msgid "Only result column names can be used, not expressions or functions." msgstr "" "Допустимо использование только имён столбцов, но не выражений или функций." -#: parser/analyze.c:1748 +#: parser/analyze.c:1750 #, c-format msgid "" "Add the expression/function to every SELECT, or move the UNION into a FROM " @@ -14426,12 +14529,12 @@ msgstr "" "Добавьте выражение/функцию в каждый SELECT или перенесите UNION в " "предложение FROM." -#: parser/analyze.c:1809 +#: parser/analyze.c:1811 #, c-format msgid "INTO is only allowed on first SELECT of UNION/INTERSECT/EXCEPT" msgstr "INTO можно добавить только в первый SELECT в UNION/INTERSECT/EXCEPT" -#: parser/analyze.c:1881 +#: parser/analyze.c:1883 #, c-format msgid "" "UNION/INTERSECT/EXCEPT member statement cannot refer to other relations of " @@ -14440,155 +14543,155 @@ msgstr "" "оператор, составляющий UNION/INTERSECT/EXCEPT, не может ссылаться на другие " "отношения на том же уровне запроса" -#: parser/analyze.c:1970 +#: parser/analyze.c:1972 #, c-format msgid "each %s query must have the same number of columns" msgstr "все запросы в %s должны возвращать одинаковое число столбцов" -#: parser/analyze.c:2363 +#: parser/analyze.c:2365 #, c-format msgid "RETURNING must have at least one column" msgstr "в RETURNING должен быть минимум один столбец" -#: parser/analyze.c:2404 +#: parser/analyze.c:2406 #, c-format msgid "cannot specify both SCROLL and NO SCROLL" msgstr "противоречивые указания SCROLL и NO SCROLL" -#: parser/analyze.c:2423 +#: parser/analyze.c:2425 #, c-format msgid "DECLARE CURSOR must not contain data-modifying statements in WITH" msgstr "DECLARE CURSOR не может содержать операторы, изменяющие данные, в WITH" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2431 +#: parser/analyze.c:2433 #, c-format msgid "DECLARE CURSOR WITH HOLD ... %s is not supported" msgstr "DECLARE CURSOR WITH HOLD ... %s не поддерживается" -#: parser/analyze.c:2434 +#: parser/analyze.c:2436 #, c-format msgid "Holdable cursors must be READ ONLY." msgstr "Сохраняемые курсоры должны быть READ ONLY." #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2442 +#: parser/analyze.c:2444 #, c-format msgid "DECLARE SCROLL CURSOR ... %s is not supported" msgstr "DECLARE SCROLL CURSOR ... %s не поддерживается" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2453 +#: parser/analyze.c:2455 #, c-format msgid "DECLARE INSENSITIVE CURSOR ... %s is not supported" msgstr "DECLARE INSENSITIVE CURSOR ... %s не поддерживается" -#: parser/analyze.c:2456 +#: parser/analyze.c:2458 #, c-format msgid "Insensitive cursors must be READ ONLY." msgstr "Независимые курсоры должны быть READ ONLY." -#: parser/analyze.c:2522 +#: parser/analyze.c:2524 #, c-format msgid "materialized views must not use data-modifying statements in WITH" msgstr "" "в материализованных представлениях не должны использоваться операторы, " "изменяющие данные в WITH" -#: parser/analyze.c:2532 +#: parser/analyze.c:2534 #, c-format msgid "materialized views must not use temporary tables or views" msgstr "" "в материализованных представлениях не должны использоваться временные " "таблицы и представления" -#: parser/analyze.c:2542 +#: parser/analyze.c:2544 #, c-format msgid "materialized views may not be defined using bound parameters" msgstr "" "определять материализованные представления со связанными параметрами нельзя" -#: parser/analyze.c:2554 +#: parser/analyze.c:2556 #, c-format msgid "materialized views cannot be UNLOGGED" msgstr "" "материализованные представления не могут быть нежурналируемыми (UNLOGGED)" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2620 +#: parser/analyze.c:2622 #, c-format msgid "%s is not allowed with DISTINCT clause" msgstr "%s несовместимо с предложением DISTINCT" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2627 +#: parser/analyze.c:2629 #, c-format msgid "%s is not allowed with GROUP BY clause" msgstr "%s несовместимо с предложением GROUP BY" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2634 +#: parser/analyze.c:2636 #, c-format msgid "%s is not allowed with HAVING clause" msgstr "%s несовместимо с предложением HAVING" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2641 +#: parser/analyze.c:2643 #, c-format msgid "%s is not allowed with aggregate functions" msgstr "%s несовместимо с агрегатными функциями" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2648 +#: parser/analyze.c:2650 #, c-format msgid "%s is not allowed with window functions" msgstr "%s несовместимо с оконными функциями" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2655 +#: parser/analyze.c:2657 #, c-format msgid "%s is not allowed with set-returning functions in the target list" msgstr "" "%s не допускается с функциями, возвращающие множества, в списке результатов" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2734 +#: parser/analyze.c:2736 #, c-format msgid "%s must specify unqualified relation names" msgstr "для %s нужно указывать неполные имена отношений" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2765 +#: parser/analyze.c:2767 #, c-format msgid "%s cannot be applied to a join" msgstr "%s нельзя применить к соединению" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2774 +#: parser/analyze.c:2776 #, c-format msgid "%s cannot be applied to a function" msgstr "%s нельзя применить к функции" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2783 +#: parser/analyze.c:2785 #, c-format msgid "%s cannot be applied to a table function" msgstr "%s нельзя применить к табличной функции" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2801 +#: parser/analyze.c:2803 #, c-format msgid "%s cannot be applied to a WITH query" msgstr "%s нельзя применить к запросу WITH" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2810 +#: parser/analyze.c:2812 #, c-format msgid "%s cannot be applied to a named tuplestore" msgstr "%s нельзя применить к именованному источнику кортежей" #. translator: %s is a SQL row locking clause such as FOR UPDATE -#: parser/analyze.c:2827 +#: parser/analyze.c:2829 #, c-format msgid "relation \"%s\" in %s clause not found in FROM clause" msgstr "отношение \"%s\" в определении %s отсутствует в предложении FROM" @@ -14725,7 +14828,7 @@ msgid "grouping operations are not allowed in partition key expression" msgstr "операции группировки нельзя применять в выражении ключа разбиения" #. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_agg.c:530 parser/parse_clause.c:1806 +#: parser/parse_agg.c:530 parser/parse_clause.c:1810 #, c-format msgid "aggregate functions are not allowed in %s" msgstr "агрегатные функции нельзя применять в конструкции %s" @@ -14745,83 +14848,100 @@ msgstr "" "агрегатная функция внешнего уровня не может содержать в своих аргументах " "переменные нижнего уровня" -#: parser/parse_agg.c:712 +#: parser/parse_agg.c:720 +#, c-format +msgid "aggregate function calls cannot contain set-returning function calls" +msgstr "" +"вызовы агрегатных функций не могут включать вызовы функций, возвращающих " +"множества" + +#: parser/parse_agg.c:721 parser/parse_expr.c:1761 parser/parse_expr.c:2237 +#: parser/parse_func.c:773 +#, c-format +msgid "" +"You might be able to move the set-returning function into a LATERAL FROM " +"item." +msgstr "" +"Исправить ситуацию можно, переместив функцию, возвращающую множество, в " +"элемент LATERAL FROM." + +#: parser/parse_agg.c:726 #, c-format msgid "aggregate function calls cannot contain window function calls" msgstr "вызовы агрегатных функций не могут включать вызовы оконных функции" -#: parser/parse_agg.c:790 +#: parser/parse_agg.c:805 msgid "window functions are not allowed in JOIN conditions" msgstr "оконные функции нельзя применять в условиях JOIN" -#: parser/parse_agg.c:797 +#: parser/parse_agg.c:812 msgid "window functions are not allowed in functions in FROM" msgstr "оконные функции нельзя применять в функциях во FROM" -#: parser/parse_agg.c:803 +#: parser/parse_agg.c:818 msgid "window functions are not allowed in policy expressions" msgstr "оконные функции нельзя применять в выражениях политик" -#: parser/parse_agg.c:815 +#: parser/parse_agg.c:830 msgid "window functions are not allowed in window definitions" msgstr "оконные функции нельзя применять в определении окна" -#: parser/parse_agg.c:847 +#: parser/parse_agg.c:862 msgid "window functions are not allowed in check constraints" msgstr "оконные функции нельзя применять в ограничениях-проверках" -#: parser/parse_agg.c:851 +#: parser/parse_agg.c:866 msgid "window functions are not allowed in DEFAULT expressions" msgstr "оконные функции нельзя применять в выражениях DEFAULT" -#: parser/parse_agg.c:854 +#: parser/parse_agg.c:869 msgid "window functions are not allowed in index expressions" msgstr "оконные функции нельзя применять в выражениях индексов" -#: parser/parse_agg.c:857 +#: parser/parse_agg.c:872 msgid "window functions are not allowed in index predicates" msgstr "оконные функции нельзя применять в предикатах индексов" -#: parser/parse_agg.c:860 +#: parser/parse_agg.c:875 msgid "window functions are not allowed in transform expressions" msgstr "оконные функции нельзя применять в выражениях преобразований" -#: parser/parse_agg.c:863 +#: parser/parse_agg.c:878 msgid "window functions are not allowed in EXECUTE parameters" msgstr "оконные функции нельзя применять в параметрах EXECUTE" -#: parser/parse_agg.c:866 +#: parser/parse_agg.c:881 msgid "window functions are not allowed in trigger WHEN conditions" msgstr "оконные функции нельзя применять в условиях WHEN для триггеров" -#: parser/parse_agg.c:869 +#: parser/parse_agg.c:884 msgid "window functions are not allowed in partition key expression" msgstr "оконные функции нельзя применять в выражении ключа разбиения" #. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_agg.c:889 parser/parse_clause.c:1815 +#: parser/parse_agg.c:904 parser/parse_clause.c:1819 #, c-format msgid "window functions are not allowed in %s" msgstr "оконные функции нельзя применять в конструкции %s" -#: parser/parse_agg.c:923 parser/parse_clause.c:2649 +#: parser/parse_agg.c:938 parser/parse_clause.c:2653 #, c-format msgid "window \"%s\" does not exist" msgstr "окно \"%s\" не существует" -#: parser/parse_agg.c:1008 +#: parser/parse_agg.c:1023 #, c-format msgid "too many grouping sets present (maximum 4096)" msgstr "слишком много наборов группирования (при максимуме 4096)" -#: parser/parse_agg.c:1157 +#: parser/parse_agg.c:1172 #, c-format msgid "" "aggregate functions are not allowed in a recursive query's recursive term" msgstr "" "в рекурсивной части рекурсивного запроса агрегатные функции недопустимы" -#: parser/parse_agg.c:1350 +#: parser/parse_agg.c:1365 #, c-format msgid "" "column \"%s.%s\" must appear in the GROUP BY clause or be used in an " @@ -14830,7 +14950,7 @@ msgstr "" "столбец \"%s.%s\" должен фигурировать в предложении GROUP BY или " "использоваться в агрегатной функции" -#: parser/parse_agg.c:1353 +#: parser/parse_agg.c:1368 #, c-format msgid "" "Direct arguments of an ordered-set aggregate must use only grouped columns." @@ -14838,13 +14958,13 @@ msgstr "" "Прямые аргументы сортирующей агрегатной функции могут включать только " "группируемые столбцы." -#: parser/parse_agg.c:1358 +#: parser/parse_agg.c:1373 #, c-format msgid "subquery uses ungrouped column \"%s.%s\" from outer query" msgstr "" "подзапрос использует негруппированный столбец \"%s.%s\" из внешнего запроса" -#: parser/parse_agg.c:1522 +#: parser/parse_agg.c:1537 #, c-format msgid "" "arguments to GROUPING must be grouping expressions of the associated query " @@ -14858,14 +14978,20 @@ msgstr "" msgid "relation \"%s\" cannot be the target of a modifying statement" msgstr "отношение \"%s\" не может быть целевым в операторе, изменяющем данные" -#: parser/parse_clause.c:651 +#: parser/parse_clause.c:608 parser/parse_clause.c:636 parser/parse_func.c:2153 +#, c-format +msgid "set-returning functions must appear at top level of FROM" +msgstr "" +"функции, возвращающие множества, должны находиться на верхнем уровне FROM" + +#: parser/parse_clause.c:648 #, c-format msgid "multiple column definition lists are not allowed for the same function" msgstr "" "для одной и той же функции нельзя задать разные списки с определениями " "столбцов" -#: parser/parse_clause.c:684 +#: parser/parse_clause.c:681 #, c-format msgid "" "ROWS FROM() with multiple functions cannot have a column definition list" @@ -14873,7 +14999,7 @@ msgstr "" "у ROWS FROM() с несколькими функциями не может быть списка с определениями " "столбцов" -#: parser/parse_clause.c:685 +#: parser/parse_clause.c:682 #, c-format msgid "" "Put a separate column definition list for each function inside ROWS FROM()." @@ -14881,14 +15007,14 @@ msgstr "" "Добавьте отдельные списки с определениями столбцов для каждой функции в ROWS " "FROM()." -#: parser/parse_clause.c:691 +#: parser/parse_clause.c:688 #, c-format msgid "UNNEST() with multiple arguments cannot have a column definition list" msgstr "" "у UNNEST() с несколькими аргументами не может быть списка с определениями " "столбцов" -#: parser/parse_clause.c:692 +#: parser/parse_clause.c:689 #, c-format msgid "" "Use separate UNNEST() calls inside ROWS FROM(), and attach a column " @@ -14897,43 +15023,43 @@ msgstr "" "Напишите отдельные вызовы UNNEST() внутри ROWS FROM() и добавьте список с " "определениями столбцов к каждому." -#: parser/parse_clause.c:699 +#: parser/parse_clause.c:696 #, c-format msgid "WITH ORDINALITY cannot be used with a column definition list" msgstr "" "WITH ORDINALITY нельзя использовать со списком с определениями столбцов" -#: parser/parse_clause.c:700 +#: parser/parse_clause.c:697 #, c-format msgid "Put the column definition list inside ROWS FROM()." msgstr "Поместите список с определениями столбцов внутрь ROWS FROM()." -#: parser/parse_clause.c:804 +#: parser/parse_clause.c:800 #, c-format msgid "only one FOR ORDINALITY column is allowed" msgstr "FOR ORDINALITY допускается только для одного столбца" -#: parser/parse_clause.c:865 +#: parser/parse_clause.c:861 #, c-format msgid "column name \"%s\" is not unique" msgstr "имя столбца \"%s\" не уникально" -#: parser/parse_clause.c:907 +#: parser/parse_clause.c:903 #, c-format msgid "namespace name \"%s\" is not unique" msgstr "имя пространства имён \"%s\" не уникально" -#: parser/parse_clause.c:917 +#: parser/parse_clause.c:913 #, c-format msgid "only one default namespace is allowed" msgstr "допускается только одно пространство имён по умолчанию" -#: parser/parse_clause.c:978 +#: parser/parse_clause.c:974 #, c-format msgid "tablesample method %s does not exist" msgstr "метод %s для получения выборки не существует" -#: parser/parse_clause.c:1000 +#: parser/parse_clause.c:996 #, c-format msgid "tablesample method %s requires %d argument, not %d" msgid_plural "tablesample method %s requires %d arguments, not %d" @@ -14941,103 +15067,103 @@ msgstr[0] "метод %s для получения выборки требует msgstr[1] "метод %s для получения выборки требует аргументов: %d, получено: %d" msgstr[2] "метод %s для получения выборки требует аргументов: %d, получено: %d" -#: parser/parse_clause.c:1034 +#: parser/parse_clause.c:1030 #, c-format msgid "tablesample method %s does not support REPEATABLE" msgstr "метод %s для получения выборки не поддерживает REPEATABLE" -#: parser/parse_clause.c:1196 +#: parser/parse_clause.c:1200 #, c-format msgid "TABLESAMPLE clause can only be applied to tables and materialized views" msgstr "" "предложение TABLESAMPLE можно применять только к таблицам и " "материализованным представлениям" -#: parser/parse_clause.c:1366 +#: parser/parse_clause.c:1370 #, c-format msgid "column name \"%s\" appears more than once in USING clause" msgstr "имя столбца \"%s\" фигурирует в предложении USING неоднократно" -#: parser/parse_clause.c:1381 +#: parser/parse_clause.c:1385 #, c-format msgid "common column name \"%s\" appears more than once in left table" msgstr "имя общего столбца \"%s\" фигурирует в таблице слева неоднократно" -#: parser/parse_clause.c:1390 +#: parser/parse_clause.c:1394 #, c-format msgid "column \"%s\" specified in USING clause does not exist in left table" msgstr "в таблице слева нет столбца \"%s\", указанного в предложении USING" -#: parser/parse_clause.c:1404 +#: parser/parse_clause.c:1408 #, c-format msgid "common column name \"%s\" appears more than once in right table" msgstr "имя общего столбца \"%s\" фигурирует в таблице справа неоднократно" -#: parser/parse_clause.c:1413 +#: parser/parse_clause.c:1417 #, c-format msgid "column \"%s\" specified in USING clause does not exist in right table" msgstr "в таблице справа нет столбца \"%s\", указанного в предложении USING" -#: parser/parse_clause.c:1467 +#: parser/parse_clause.c:1471 #, c-format msgid "column alias list for \"%s\" has too many entries" msgstr "слишком много записей в списке псевдонимов столбца \"%s\"" #. translator: %s is name of a SQL construct, eg LIMIT -#: parser/parse_clause.c:1776 +#: parser/parse_clause.c:1780 #, c-format msgid "argument of %s must not contain variables" msgstr "аргумент %s не может содержать переменные" #. translator: first %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1941 +#: parser/parse_clause.c:1945 #, c-format msgid "%s \"%s\" is ambiguous" msgstr "выражение %s \"%s\" неоднозначно" #. translator: %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1970 +#: parser/parse_clause.c:1974 #, c-format msgid "non-integer constant in %s" msgstr "не целочисленная константа в %s" #. translator: %s is name of a SQL construct, eg ORDER BY -#: parser/parse_clause.c:1992 +#: parser/parse_clause.c:1996 #, c-format msgid "%s position %d is not in select list" msgstr "в списке выборки %s нет элемента %d" -#: parser/parse_clause.c:2433 +#: parser/parse_clause.c:2437 #, c-format msgid "CUBE is limited to 12 elements" msgstr "CUBE имеет ограничение в 12 элементов" -#: parser/parse_clause.c:2637 +#: parser/parse_clause.c:2641 #, c-format msgid "window \"%s\" is already defined" msgstr "окно \"%s\" уже определено" -#: parser/parse_clause.c:2698 +#: parser/parse_clause.c:2702 #, c-format msgid "cannot override PARTITION BY clause of window \"%s\"" msgstr "переопределить предложение PARTITION BY для окна \"%s\" нельзя" -#: parser/parse_clause.c:2710 +#: parser/parse_clause.c:2714 #, c-format msgid "cannot override ORDER BY clause of window \"%s\"" msgstr "переопределить предложение ORDER BY для окна \"%s\" нельзя" -#: parser/parse_clause.c:2740 parser/parse_clause.c:2746 +#: parser/parse_clause.c:2744 parser/parse_clause.c:2750 #, c-format msgid "cannot copy window \"%s\" because it has a frame clause" msgstr "скопировать окно \"%s\", имеющее предложение рамки, нельзя" -#: parser/parse_clause.c:2748 +#: parser/parse_clause.c:2752 #, c-format msgid "Omit the parentheses in this OVER clause." msgstr "Уберите скобки в предложении OVER." -#: parser/parse_clause.c:2814 +#: parser/parse_clause.c:2818 #, c-format msgid "" "in an aggregate with DISTINCT, ORDER BY expressions must appear in argument " @@ -15046,68 +15172,68 @@ msgstr "" "для агрегатной функции с DISTINCT, выражения ORDER BY должны быть в списке " "аргументов" -#: parser/parse_clause.c:2815 +#: parser/parse_clause.c:2819 #, c-format msgid "for SELECT DISTINCT, ORDER BY expressions must appear in select list" msgstr "" "в конструкции SELECT DISTINCT выражения ORDER BY должны быть в списке выборки" -#: parser/parse_clause.c:2847 +#: parser/parse_clause.c:2851 #, c-format msgid "an aggregate with DISTINCT must have at least one argument" msgstr "агрегатной функции с DISTINCT нужен минимум один аргумент" -#: parser/parse_clause.c:2848 +#: parser/parse_clause.c:2852 #, c-format msgid "SELECT DISTINCT must have at least one column" msgstr "в SELECT DISTINCT нужен минимум один столбец" -#: parser/parse_clause.c:2914 parser/parse_clause.c:2946 +#: parser/parse_clause.c:2918 parser/parse_clause.c:2950 #, c-format msgid "SELECT DISTINCT ON expressions must match initial ORDER BY expressions" msgstr "" "выражения SELECT DISTINCT ON должны соответствовать начальным выражениям " "ORDER BY" -#: parser/parse_clause.c:3024 +#: parser/parse_clause.c:3028 #, c-format msgid "ASC/DESC is not allowed in ON CONFLICT clause" msgstr "ASC/DESC нельзя использовать в ON CONFLICT" -#: parser/parse_clause.c:3030 +#: parser/parse_clause.c:3034 #, c-format msgid "NULLS FIRST/LAST is not allowed in ON CONFLICT clause" msgstr "NULLS FIRST/LAST нельзя использовать в ON CONFLICT" -#: parser/parse_clause.c:3110 +#: parser/parse_clause.c:3114 #, c-format msgid "" "ON CONFLICT DO UPDATE requires inference specification or constraint name" msgstr "" "в ON CONFLICT DO UPDATE требуется наводящее указание или имя ограничения" -#: parser/parse_clause.c:3111 +#: parser/parse_clause.c:3115 #, c-format msgid "For example, ON CONFLICT (column_name)." msgstr "Например: ON CONFLICT (имя_столбца)." -#: parser/parse_clause.c:3122 +#: parser/parse_clause.c:3126 #, c-format msgid "ON CONFLICT is not supported with system catalog tables" msgstr "ON CONFLICT с таблицами системного каталога не поддерживается" -#: parser/parse_clause.c:3130 +#: parser/parse_clause.c:3134 #, c-format msgid "ON CONFLICT is not supported on table \"%s\" used as a catalog table" msgstr "" "ON CONFLICT не поддерживается для таблицы \"%s\", служащей таблицей каталога" -#: parser/parse_clause.c:3256 +#: parser/parse_clause.c:3277 #, c-format msgid "operator %s is not a valid ordering operator" msgstr "оператор %s не годится для сортировки" -#: parser/parse_clause.c:3258 +#: parser/parse_clause.c:3279 #, c-format msgid "" "Ordering operators must be \"<\" or \">\" members of btree operator families." @@ -15117,7 +15243,7 @@ msgstr "" #: parser/parse_coerce.c:971 parser/parse_coerce.c:1001 #: parser/parse_coerce.c:1019 parser/parse_coerce.c:1034 -#: parser/parse_expr.c:2123 parser/parse_expr.c:2699 parser/parse_target.c:936 +#: parser/parse_expr.c:2144 parser/parse_expr.c:2732 parser/parse_target.c:936 #, c-format msgid "cannot cast type %s to %s" msgstr "привести тип %s к %s нельзя" @@ -15366,62 +15492,68 @@ msgstr "FOR UPDATE/SHARE в рекурсивном запросе не подд msgid "recursive reference to query \"%s\" must not appear more than once" msgstr "рекурсивная ссылка на запрос \"%s\" указана неоднократно" -#: parser/parse_expr.c:357 +#: parser/parse_expr.c:350 #, c-format msgid "DEFAULT is not allowed in this context" msgstr "DEFAULT не допускается в данном контексте" -#: parser/parse_expr.c:410 parser/parse_relation.c:3247 -#: parser/parse_relation.c:3267 +#: parser/parse_expr.c:403 parser/parse_relation.c:3286 +#: parser/parse_relation.c:3306 #, c-format msgid "column %s.%s does not exist" msgstr "столбец %s.%s не существует" -#: parser/parse_expr.c:422 +#: parser/parse_expr.c:415 #, c-format msgid "column \"%s\" not found in data type %s" msgstr "столбец \"%s\" не найден в типе данных %s" -#: parser/parse_expr.c:428 +#: parser/parse_expr.c:421 #, c-format msgid "could not identify column \"%s\" in record data type" msgstr "не удалось идентифицировать столбец \"%s\" в типе записи" # skip-rule: space-before-period -#: parser/parse_expr.c:434 +#: parser/parse_expr.c:427 #, c-format msgid "column notation .%s applied to type %s, which is not a composite type" msgstr "" "запись имени столбца .%s применена к типу %s, который не является составным" -#: parser/parse_expr.c:464 parser/parse_target.c:722 +#: parser/parse_expr.c:458 parser/parse_target.c:722 #, c-format msgid "row expansion via \"*\" is not supported here" msgstr "расширение строки через \"*\" здесь не поддерживается" -#: parser/parse_expr.c:769 parser/parse_relation.c:689 +#: parser/parse_expr.c:767 parser/parse_relation.c:689 #: parser/parse_relation.c:789 parser/parse_target.c:1171 #, c-format msgid "column reference \"%s\" is ambiguous" msgstr "неоднозначная ссылка на столбец \"%s\"" -#: parser/parse_expr.c:825 parser/parse_param.c:110 parser/parse_param.c:142 +#: parser/parse_expr.c:823 parser/parse_param.c:110 parser/parse_param.c:142 #: parser/parse_param.c:199 parser/parse_param.c:298 #, c-format msgid "there is no parameter $%d" msgstr "параметр $%d не существует" -#: parser/parse_expr.c:1064 +#: parser/parse_expr.c:1066 #, c-format msgid "NULLIF requires = operator to yield boolean" msgstr "для NULLIF требуется, чтобы оператор = возвращал логическое значение" -#: parser/parse_expr.c:1508 parser/parse_expr.c:1540 +#. translator: %s is name of a SQL construct, eg NULLIF +#: parser/parse_expr.c:1072 parser/parse_expr.c:3048 +#, c-format +msgid "%s must not return a set" +msgstr "%s не должна возвращать множество" + +#: parser/parse_expr.c:1519 parser/parse_expr.c:1551 #, c-format msgid "number of columns does not match number of values" msgstr "число столбцов не равно числу значений" -#: parser/parse_expr.c:1554 +#: parser/parse_expr.c:1565 #, c-format msgid "" "source for a multiple-column UPDATE item must be a sub-SELECT or ROW() " @@ -15430,193 +15562,199 @@ msgstr "" "источником для элемента UPDATE с несколькими столбцами должен быть вложенный " "SELECT или выражение ROW()" -#: parser/parse_expr.c:1798 +#. translator: %s is name of a SQL construct, eg GROUP BY +#: parser/parse_expr.c:1759 parser/parse_expr.c:2235 parser/parse_func.c:2256 +#, c-format +msgid "set-returning functions are not allowed in %s" +msgstr "функции, возвращающие множества, нельзя применять в конструкции %s" + +#: parser/parse_expr.c:1819 msgid "cannot use subquery in check constraint" msgstr "в ограничении-проверке нельзя использовать подзапросы" -#: parser/parse_expr.c:1802 +#: parser/parse_expr.c:1823 msgid "cannot use subquery in DEFAULT expression" msgstr "в выражении DEFAULT нельзя использовать подзапросы" -#: parser/parse_expr.c:1805 +#: parser/parse_expr.c:1826 msgid "cannot use subquery in index expression" msgstr "в индексном выражении нельзя использовать подзапросы" -#: parser/parse_expr.c:1808 +#: parser/parse_expr.c:1829 msgid "cannot use subquery in index predicate" msgstr "в предикате индекса нельзя использовать подзапросы" -#: parser/parse_expr.c:1811 +#: parser/parse_expr.c:1832 msgid "cannot use subquery in transform expression" msgstr "нельзя использовать подзапрос в выражении преобразования" -#: parser/parse_expr.c:1814 +#: parser/parse_expr.c:1835 msgid "cannot use subquery in EXECUTE parameter" msgstr "в качестве параметра EXECUTE нельзя использовать подзапрос" -#: parser/parse_expr.c:1817 +#: parser/parse_expr.c:1838 msgid "cannot use subquery in trigger WHEN condition" msgstr "в условии WHEN для триггера нельзя использовать подзапросы" -#: parser/parse_expr.c:1820 +#: parser/parse_expr.c:1841 msgid "cannot use subquery in partition key expression" msgstr "в выражении ключа разбиения нельзя использовать подзапросы" -#: parser/parse_expr.c:1873 +#: parser/parse_expr.c:1894 #, c-format msgid "subquery must return only one column" msgstr "подзапрос должен вернуть только один столбец" -#: parser/parse_expr.c:1957 +#: parser/parse_expr.c:1978 #, c-format msgid "subquery has too many columns" msgstr "в подзапросе слишком много столбцов" -#: parser/parse_expr.c:1962 +#: parser/parse_expr.c:1983 #, c-format msgid "subquery has too few columns" msgstr "в подзапросе недостаточно столбцов" -#: parser/parse_expr.c:2063 +#: parser/parse_expr.c:2084 #, c-format msgid "cannot determine type of empty array" msgstr "тип пустого массива определить нельзя" -#: parser/parse_expr.c:2064 +#: parser/parse_expr.c:2085 #, c-format msgid "Explicitly cast to the desired type, for example ARRAY[]::integer[]." msgstr "" "Приведите его к желаемому типу явным образом, например ARRAY[]::integer[]." -#: parser/parse_expr.c:2078 +#: parser/parse_expr.c:2099 #, c-format msgid "could not find element type for data type %s" msgstr "не удалось определить тип элемента для типа данных %s" -#: parser/parse_expr.c:2353 +#: parser/parse_expr.c:2386 #, c-format msgid "unnamed XML attribute value must be a column reference" msgstr "вместо значения XML-атрибута без имени должен указываться столбец" -#: parser/parse_expr.c:2354 +#: parser/parse_expr.c:2387 #, c-format msgid "unnamed XML element value must be a column reference" msgstr "вместо значения XML-элемента без имени должен указываться столбец" -#: parser/parse_expr.c:2369 +#: parser/parse_expr.c:2402 #, c-format msgid "XML attribute name \"%s\" appears more than once" msgstr "имя XML-атрибута \"%s\" указано неоднократно" -#: parser/parse_expr.c:2476 +#: parser/parse_expr.c:2509 #, c-format msgid "cannot cast XMLSERIALIZE result to %s" msgstr "привести результат XMLSERIALIZE к типу %s нельзя" -#: parser/parse_expr.c:2772 parser/parse_expr.c:2967 +#: parser/parse_expr.c:2805 parser/parse_expr.c:3001 #, c-format msgid "unequal number of entries in row expressions" msgstr "разное число элементов в строках" -#: parser/parse_expr.c:2782 +#: parser/parse_expr.c:2815 #, c-format msgid "cannot compare rows of zero length" msgstr "строки нулевой длины сравнивать нельзя" -#: parser/parse_expr.c:2806 +#: parser/parse_expr.c:2840 #, c-format msgid "row comparison operator must yield type boolean, not type %s" msgstr "" "оператор сравнения строк должен выдавать результат логического типа, а не %s" -#: parser/parse_expr.c:2813 +#: parser/parse_expr.c:2847 #, c-format msgid "row comparison operator must not return a set" msgstr "оператор сравнения строк не должен возвращать множество" -#: parser/parse_expr.c:2872 parser/parse_expr.c:2913 +#: parser/parse_expr.c:2906 parser/parse_expr.c:2947 #, c-format msgid "could not determine interpretation of row comparison operator %s" msgstr "не удалось выбрать интерпретацию оператора сравнения строк %s" -#: parser/parse_expr.c:2874 +#: parser/parse_expr.c:2908 #, c-format msgid "" "Row comparison operators must be associated with btree operator families." msgstr "" "Операторы сравнения строк должны быть связаны с семейством операторов btree." -#: parser/parse_expr.c:2915 +#: parser/parse_expr.c:2949 #, c-format msgid "There are multiple equally-plausible candidates." msgstr "Оказалось несколько равноценных кандидатур." -#: parser/parse_expr.c:3007 +#: parser/parse_expr.c:3042 #, c-format msgid "IS DISTINCT FROM requires = operator to yield boolean" msgstr "" "для IS DISTINCT FROM требуется, чтобы оператор = возвращал логическое " "значение" -#: parser/parse_expr.c:3320 parser/parse_expr.c:3338 +#: parser/parse_expr.c:3361 parser/parse_expr.c:3379 #, c-format msgid "operator precedence change: %s is now lower precedence than %s" msgstr "" "приоритет операторов изменён: %s теперь имеет меньший приоритет, чем %s" -#: parser/parse_func.c:175 +#: parser/parse_func.c:179 #, c-format msgid "argument name \"%s\" used more than once" msgstr "имя аргумента \"%s\" используется неоднократно" -#: parser/parse_func.c:186 +#: parser/parse_func.c:190 #, c-format msgid "positional argument cannot follow named argument" msgstr "нумерованный аргумент не может следовать за именованным аргументом" -#: parser/parse_func.c:271 +#: parser/parse_func.c:275 #, c-format msgid "%s(*) specified, but %s is not an aggregate function" msgstr "выражение %s(*) недопустимо, так как %s - не агрегатная функция" -#: parser/parse_func.c:278 +#: parser/parse_func.c:282 #, c-format msgid "DISTINCT specified, but %s is not an aggregate function" msgstr "в аргументах %s указан DISTINCT, но это не агрегатная функция" -#: parser/parse_func.c:284 +#: parser/parse_func.c:288 #, c-format msgid "WITHIN GROUP specified, but %s is not an aggregate function" msgstr "в аргументах %s указано WITHIN GROUP, но это не агрегатная функция" -#: parser/parse_func.c:290 +#: parser/parse_func.c:294 #, c-format msgid "ORDER BY specified, but %s is not an aggregate function" msgstr "в аргументах %s указан ORDER BY, но это не агрегатная функция" -#: parser/parse_func.c:296 +#: parser/parse_func.c:300 #, c-format msgid "FILTER specified, but %s is not an aggregate function" msgstr "в аргументах %s указан FILTER, но это не агрегатная функция" -#: parser/parse_func.c:302 +#: parser/parse_func.c:306 #, c-format msgid "" "OVER specified, but %s is not a window function nor an aggregate function" msgstr "" "вызов %s включает предложение OVER, но это не оконная и не агрегатная функция" -#: parser/parse_func.c:332 +#: parser/parse_func.c:336 #, c-format msgid "WITHIN GROUP is required for ordered-set aggregate %s" msgstr "для сортирующего агрегата %s требуется WITHIN GROUP" -#: parser/parse_func.c:338 +#: parser/parse_func.c:342 #, c-format msgid "OVER is not supported for ordered-set aggregate %s" msgstr "сортирующий агрегат %s не поддерживает OVER" -#: parser/parse_func.c:369 parser/parse_func.c:398 +#: parser/parse_func.c:373 parser/parse_func.c:402 #, c-format msgid "" "There is an ordered-set aggregate %s, but it requires %d direct arguments, " @@ -15625,7 +15763,7 @@ msgstr "" "Есть сортирующий агрегат %s, но прямых аргументов у него должно быть %d, а " "не %d." -#: parser/parse_func.c:423 +#: parser/parse_func.c:427 #, c-format msgid "" "To use the hypothetical-set aggregate %s, the number of hypothetical direct " @@ -15635,7 +15773,7 @@ msgstr "" "гипотетических аргументов (%d) должно равняться числу сортируемых столбцов " "(здесь: %d)." -#: parser/parse_func.c:437 +#: parser/parse_func.c:441 #, c-format msgid "" "There is an ordered-set aggregate %s, but it requires at least %d direct " @@ -15644,27 +15782,27 @@ msgstr "" "Есть сортирующий агрегат %s, но он требует минимум %d непосредственных " "аргументов." -#: parser/parse_func.c:456 +#: parser/parse_func.c:460 #, c-format msgid "%s is not an ordered-set aggregate, so it cannot have WITHIN GROUP" msgstr "%s - не сортирующая агрегатная функция, WITHIN GROUP к ней неприменимо" -#: parser/parse_func.c:469 +#: parser/parse_func.c:473 #, c-format msgid "window function %s requires an OVER clause" msgstr "для оконной функции %s требуется предложение OVER" -#: parser/parse_func.c:476 +#: parser/parse_func.c:480 #, c-format msgid "window function %s cannot have WITHIN GROUP" msgstr "для оконной функции %s неприменимо WITHIN GROUP" -#: parser/parse_func.c:497 +#: parser/parse_func.c:501 #, c-format msgid "function %s is not unique" msgstr "функция %s не уникальна" -#: parser/parse_func.c:500 +#: parser/parse_func.c:504 #, c-format msgid "" "Could not choose a best candidate function. You might need to add explicit " @@ -15673,7 +15811,7 @@ msgstr "" "Не удалось выбрать лучшую кандидатуру функции. Возможно, вам следует " "добавить явные приведения типов." -#: parser/parse_func.c:511 +#: parser/parse_func.c:515 #, c-format msgid "" "No aggregate function matches the given name and argument types. Perhaps you " @@ -15684,7 +15822,7 @@ msgstr "" "Возможно, неверно расположено предложение ORDER BY - оно должно следовать за " "всеми обычными аргументами функции." -#: parser/parse_func.c:522 +#: parser/parse_func.c:526 #, c-format msgid "" "No function matches the given name and argument types. You might need to add " @@ -15693,133 +15831,134 @@ msgstr "" "Функция с данными именем и типами аргументов не найдена. Возможно, вам " "следует добавить явные приведения типов." -#: parser/parse_func.c:624 +#: parser/parse_func.c:628 #, c-format msgid "VARIADIC argument must be an array" msgstr "параметр VARIADIC должен быть массивом" -#: parser/parse_func.c:676 parser/parse_func.c:740 +#: parser/parse_func.c:680 parser/parse_func.c:744 #, c-format msgid "%s(*) must be used to call a parameterless aggregate function" msgstr "агрегатная функция без параметров должна вызываться так: %s(*)" -#: parser/parse_func.c:683 +#: parser/parse_func.c:687 #, c-format msgid "aggregates cannot return sets" msgstr "агрегатные функции не могут возвращать множества" -#: parser/parse_func.c:698 +#: parser/parse_func.c:702 #, c-format msgid "aggregates cannot use named arguments" msgstr "у агрегатных функций не может быть именованных аргументов" -#: parser/parse_func.c:730 +#: parser/parse_func.c:734 #, c-format msgid "DISTINCT is not implemented for window functions" msgstr "предложение DISTINCT для оконных функций не реализовано" -#: parser/parse_func.c:750 +#: parser/parse_func.c:754 #, c-format msgid "aggregate ORDER BY is not implemented for window functions" msgstr "агрегатное предложение ORDER BY для оконных функций не реализовано" -#: parser/parse_func.c:759 +#: parser/parse_func.c:763 #, c-format msgid "FILTER is not implemented for non-aggregate window functions" msgstr "предложение FILTER для не агрегатных оконных функций не реализовано" -#: parser/parse_func.c:765 +#: parser/parse_func.c:772 +#, c-format +msgid "window function calls cannot contain set-returning function calls" +msgstr "" +"вызовы оконных функций не могут включать вызовы функций, возвращающих " +"множества" + +#: parser/parse_func.c:780 #, c-format msgid "window functions cannot return sets" msgstr "оконные функции не могут возвращать множества" -#: parser/parse_func.c:1931 +#: parser/parse_func.c:1950 #, c-format msgid "function name \"%s\" is not unique" msgstr "имя функции \"%s\" не уникально" -#: parser/parse_func.c:1933 +#: parser/parse_func.c:1952 #, c-format msgid "Specify the argument list to select the function unambiguously." msgstr "Задайте список аргументов для однозначного выбора функции." -#: parser/parse_func.c:1943 +#: parser/parse_func.c:1962 #, c-format msgid "could not find a function named \"%s\"" msgstr "не удалось найти функцию с именем \"%s\"" -#: parser/parse_func.c:2045 +#: parser/parse_func.c:2064 #, c-format msgid "aggregate %s(*) does not exist" msgstr "агрегатная функция %s(*) не существует" -#: parser/parse_func.c:2050 +#: parser/parse_func.c:2069 #, c-format msgid "aggregate %s does not exist" msgstr "агрегатная функция %s не существует" -#: parser/parse_func.c:2069 +#: parser/parse_func.c:2088 #, c-format msgid "function %s is not an aggregate" msgstr "функция \"%s\" не является агрегатной" -#: parser/parse_func.c:2117 +#: parser/parse_func.c:2140 msgid "set-returning functions are not allowed in JOIN conditions" msgstr "функции, возвращающие множества, нельзя применять в условиях JOIN" -#: parser/parse_func.c:2130 +#: parser/parse_func.c:2161 msgid "set-returning functions are not allowed in policy expressions" msgstr "функции, возвращающие множества, нельзя применять в выражениях политик" -#: parser/parse_func.c:2145 +#: parser/parse_func.c:2176 msgid "set-returning functions are not allowed in window definitions" msgstr "функции, возвращающие множества, нельзя применять в определении окна" -#: parser/parse_func.c:2183 +#: parser/parse_func.c:2214 msgid "set-returning functions are not allowed in check constraints" msgstr "" "функции, возвращающие множества, нельзя применять в ограничениях-проверках" -#: parser/parse_func.c:2187 +#: parser/parse_func.c:2218 msgid "set-returning functions are not allowed in DEFAULT expressions" msgstr "функции, возвращающие множества, нельзя применять в выражениях DEFAULT" -#: parser/parse_func.c:2190 +#: parser/parse_func.c:2221 msgid "set-returning functions are not allowed in index expressions" msgstr "" "функции, возвращающие множества, нельзя применять в выражениях индексов" -#: parser/parse_func.c:2193 +#: parser/parse_func.c:2224 msgid "set-returning functions are not allowed in index predicates" msgstr "" "функции, возвращающие множества, нельзя применять в предикатах индексов" -#: parser/parse_func.c:2196 +#: parser/parse_func.c:2227 msgid "set-returning functions are not allowed in transform expressions" msgstr "" "функции, возвращающие множества, нельзя применять в выражениях преобразований" -#: parser/parse_func.c:2199 +#: parser/parse_func.c:2230 msgid "set-returning functions are not allowed in EXECUTE parameters" msgstr "функции, возвращающие множества, нельзя применять в параметрах EXECUTE" -#: parser/parse_func.c:2202 +#: parser/parse_func.c:2233 msgid "set-returning functions are not allowed in trigger WHEN conditions" msgstr "" "функции, возвращающие множества, нельзя применять в условиях WHEN для " "триггеров" -#: parser/parse_func.c:2205 -msgid "set-returning functions are not allowed in partition key expression" +#: parser/parse_func.c:2236 +msgid "set-returning functions are not allowed in partition key expressions" msgstr "" "функции, возвращающие множества, нельзя применять в выражении ключа разбиения" -#. translator: %s is name of a SQL construct, eg GROUP BY -#: parser/parse_func.c:2225 -#, c-format -msgid "set-returning functions are not allowed in %s" -msgstr "функции, возвращающие множества, нельзя применять в конструкции %s" - #: parser/parse_node.c:87 #, c-format msgid "target lists can have at most %d entries" @@ -15841,8 +15980,8 @@ msgid "array assignment requires type %s but expression is of type %s" msgstr "" "для присваивания массива требуется тип %s, однако выражение имеет тип %s" -#: parser/parse_oper.c:125 parser/parse_oper.c:724 utils/adt/regproc.c:585 -#: utils/adt/regproc.c:605 utils/adt/regproc.c:789 +#: parser/parse_oper.c:125 parser/parse_oper.c:724 utils/adt/regproc.c:520 +#: utils/adt/regproc.c:704 #, c-format msgid "operator does not exist: %s" msgstr "оператор не существует: %s" @@ -15852,14 +15991,6 @@ msgstr "оператор не существует: %s" msgid "Use an explicit ordering operator or modify the query." msgstr "Используйте явный оператор сортировки или измените запрос." -#: parser/parse_oper.c:228 utils/adt/array_userfuncs.c:794 -#: utils/adt/array_userfuncs.c:933 utils/adt/arrayfuncs.c:3639 -#: utils/adt/arrayfuncs.c:4077 utils/adt/arrayfuncs.c:6039 -#: utils/adt/rowtypes.c:1167 -#, c-format -msgid "could not identify an equality operator for type %s" -msgstr "не удалось найти оператор равенства для типа %s" - #: parser/parse_oper.c:480 #, c-format msgid "operator requires run-time type coercion: %s" @@ -15888,24 +16019,24 @@ msgstr "" "Оператор с данными именем и типами аргументов не найден. Возможно, вам " "следует добавить явные приведения типов." -#: parser/parse_oper.c:785 parser/parse_oper.c:903 +#: parser/parse_oper.c:787 parser/parse_oper.c:909 #, c-format msgid "operator is only a shell: %s" msgstr "оператор \"%s\" - лишь оболочка" -#: parser/parse_oper.c:891 +#: parser/parse_oper.c:897 #, c-format msgid "op ANY/ALL (array) requires array on right side" msgstr "для операторов ANY/ALL (с массивом) требуется массив справа" -#: parser/parse_oper.c:933 +#: parser/parse_oper.c:939 #, c-format msgid "op ANY/ALL (array) requires operator to yield boolean" msgstr "" "для операторов ANY/ALL (с массивом) требуется, чтобы оператор = возвращал " "логическое значение" -#: parser/parse_oper.c:938 +#: parser/parse_oper.c:944 #, c-format msgid "op ANY/ALL (array) requires operator not to return a set" msgstr "" @@ -15932,12 +16063,12 @@ msgstr "ссылка на таблицу %u неоднозначна" msgid "table name \"%s\" specified more than once" msgstr "имя таблицы \"%s\" указано больше одного раза" -#: parser/parse_relation.c:446 parser/parse_relation.c:3187 +#: parser/parse_relation.c:446 parser/parse_relation.c:3226 #, c-format msgid "invalid reference to FROM-clause entry for table \"%s\"" msgstr "в элементе предложения FROM неверная ссылка на таблицу \"%s\"" -#: parser/parse_relation.c:449 parser/parse_relation.c:3192 +#: parser/parse_relation.c:449 parser/parse_relation.c:3231 #, c-format msgid "" "There is an entry for table \"%s\", but it cannot be referenced from this " @@ -15956,13 +16087,13 @@ msgstr "Для ссылки LATERAL тип JOIN должен быть INNER ил msgid "system column \"%s\" reference in check constraint is invalid" msgstr "в ограничении-проверке указан недопустимый системный столбец \"%s\"" -#: parser/parse_relation.c:1086 parser/parse_relation.c:1371 -#: parser/parse_relation.c:1940 +#: parser/parse_relation.c:1086 parser/parse_relation.c:1366 +#: parser/parse_relation.c:1935 #, c-format msgid "table \"%s\" has %d columns available but %d columns specified" msgstr "в таблице \"%s\" содержится столбцов: %d, но указано: %d" -#: parser/parse_relation.c:1178 +#: parser/parse_relation.c:1173 #, c-format msgid "" "There is a WITH item named \"%s\", but it cannot be referenced from this " @@ -15971,7 +16102,7 @@ msgstr "" "В WITH есть элемент \"%s\", но на него нельзя ссылаться из этой части " "запроса." -#: parser/parse_relation.c:1180 +#: parser/parse_relation.c:1175 #, c-format msgid "" "Use WITH RECURSIVE, or re-order the WITH items to remove forward references." @@ -15979,7 +16110,7 @@ msgstr "" "Используйте WITH RECURSIVE или исключите ссылки вперёд, переупорядочив " "элементы WITH." -#: parser/parse_relation.c:1491 +#: parser/parse_relation.c:1486 #, c-format msgid "" "a column definition list is only allowed for functions returning \"record\"" @@ -15987,54 +16118,55 @@ msgstr "" "список с определением столбцов может быть только у функций, возвращающих " "запись" -#: parser/parse_relation.c:1500 +#: parser/parse_relation.c:1495 #, c-format msgid "a column definition list is required for functions returning \"record\"" msgstr "" "у функций, возвращающих запись, должен быть список с определением столбцов" -#: parser/parse_relation.c:1579 +#: parser/parse_relation.c:1574 #, c-format msgid "function \"%s\" in FROM has unsupported return type %s" msgstr "" "функция \"%s\", используемая во FROM, возвращает неподдерживаемый тип %s" -#: parser/parse_relation.c:1768 +#: parser/parse_relation.c:1763 #, c-format msgid "VALUES lists \"%s\" have %d columns available but %d columns specified" msgstr "в списках VALUES \"%s\" содержится столбцов: %d, но указано: %d" -#: parser/parse_relation.c:1823 +#: parser/parse_relation.c:1818 #, c-format msgid "joins can have at most %d columns" msgstr "число столбцов в соединениях ограничено %d" -#: parser/parse_relation.c:1913 +#: parser/parse_relation.c:1908 #, c-format msgid "WITH query \"%s\" does not have a RETURNING clause" msgstr "в запросе \"%s\" в WITH нет предложения RETURNING" -#: parser/parse_relation.c:2808 parser/parse_relation.c:2971 +#: parser/parse_relation.c:2843 parser/parse_relation.c:2881 +#: parser/parse_relation.c:3010 #, c-format msgid "column %d of relation \"%s\" does not exist" msgstr "столбец %d отношения \"%s\" не существует" -#: parser/parse_relation.c:3190 +#: parser/parse_relation.c:3229 #, c-format msgid "Perhaps you meant to reference the table alias \"%s\"." msgstr "Возможно, предполагалась ссылка на псевдоним таблицы \"%s\"." -#: parser/parse_relation.c:3198 +#: parser/parse_relation.c:3237 #, c-format msgid "missing FROM-clause entry for table \"%s\"" msgstr "таблица \"%s\" отсутствует в предложении FROM" -#: parser/parse_relation.c:3250 +#: parser/parse_relation.c:3289 #, c-format msgid "Perhaps you meant to reference the column \"%s.%s\"." msgstr "Возможно, предполагалась ссылка на столбец \"%s.%s\"." -#: parser/parse_relation.c:3252 +#: parser/parse_relation.c:3291 #, c-format msgid "" "There is a column named \"%s\" in table \"%s\", but it cannot be referenced " @@ -16043,7 +16175,7 @@ msgstr "" "Столбец \"%s\" есть в таблице \"%s\", но на него нельзя ссылаться из этой " "части запроса." -#: parser/parse_relation.c:3269 +#: parser/parse_relation.c:3308 #, c-format msgid "" "Perhaps you meant to reference the column \"%s.%s\" or the column \"%s.%s\"." @@ -16121,7 +16253,7 @@ msgstr "неправильное указание %%TYPE (слишком мно msgid "type reference %s converted to %s" msgstr "ссылка на тип %s преобразована в тип %s" -#: parser/parse_type.c:261 parser/parse_type.c:804 utils/cache/typcache.c:243 +#: parser/parse_type.c:261 parser/parse_type.c:804 utils/cache/typcache.c:245 #, c-format msgid "type \"%s\" is only a shell" msgstr "тип \"%s\" - лишь пустышка" @@ -16141,165 +16273,178 @@ msgstr "модификатором типа должна быть простая msgid "invalid type name \"%s\"" msgstr "неверное имя типа \"%s\"" -#: parser/parse_utilcmd.c:263 +#: parser/parse_utilcmd.c:269 #, c-format msgid "cannot create partitioned table as inheritance child" msgstr "создать секционированную таблицу в виде потомка нельзя" -#: parser/parse_utilcmd.c:268 -#, c-format -msgid "cannot partition using more than %d columns" -msgstr "число столбцов в ключе секционирования не может превышать %d" - -#: parser/parse_utilcmd.c:275 +#: parser/parse_utilcmd.c:439 #, c-format -msgid "cannot list partition using more than one column" -msgstr "секционирование по списку возможно только с одним столбцом" +msgid "%s will create implicit sequence \"%s\" for serial column \"%s.%s\"" +msgstr "%s создаст последовательность \"%s\" для столбца serial \"%s.%s\"" -#: parser/parse_utilcmd.c:413 +#: parser/parse_utilcmd.c:562 #, c-format msgid "array of serial is not implemented" msgstr "массивы с типом serial не реализованы" -#: parser/parse_utilcmd.c:461 -#, c-format -msgid "%s will create implicit sequence \"%s\" for serial column \"%s.%s\"" -msgstr "%s создаст последовательность \"%s\" для столбца serial \"%s.%s\"" - -#: parser/parse_utilcmd.c:554 parser/parse_utilcmd.c:566 +#: parser/parse_utilcmd.c:638 parser/parse_utilcmd.c:650 #, c-format msgid "" "conflicting NULL/NOT NULL declarations for column \"%s\" of table \"%s\"" msgstr "конфликт NULL/NOT NULL в объявлении столбца \"%s\" таблицы \"%s\"" -#: parser/parse_utilcmd.c:578 +#: parser/parse_utilcmd.c:662 #, c-format msgid "multiple default values specified for column \"%s\" of table \"%s\"" msgstr "" "для столбца \"%s\" таблицы \"%s\" указано несколько значений по умолчанию" -#: parser/parse_utilcmd.c:595 parser/parse_utilcmd.c:704 +#: parser/parse_utilcmd.c:679 +#, c-format +msgid "identity columns are not supported on typed tables" +msgstr "столбцы идентификации не поддерживаются с типизированными таблицами" + +#: parser/parse_utilcmd.c:683 +#, c-format +msgid "identity columns are not supported on partitions" +msgstr "столбцы идентификации не поддерживаются с секциями" + +#: parser/parse_utilcmd.c:692 +#, c-format +msgid "multiple identity specifications for column \"%s\" of table \"%s\"" +msgstr "" +"для столбца \"%s\" таблицы \"%s\" свойство identity задано неоднократно" + +#: parser/parse_utilcmd.c:715 parser/parse_utilcmd.c:832 #, c-format msgid "primary key constraints are not supported on foreign tables" msgstr "ограничения первичного ключа для сторонних таблиц не поддерживаются" -#: parser/parse_utilcmd.c:601 parser/parse_utilcmd.c:710 +#: parser/parse_utilcmd.c:721 parser/parse_utilcmd.c:838 #, c-format msgid "primary key constraints are not supported on partitioned tables" msgstr "" "ограничения первичного ключа для секционированных таблиц не поддерживаются" -#: parser/parse_utilcmd.c:610 parser/parse_utilcmd.c:720 +#: parser/parse_utilcmd.c:730 parser/parse_utilcmd.c:848 #, c-format msgid "unique constraints are not supported on foreign tables" msgstr "ограничения уникальности для сторонних таблиц не поддерживаются" -#: parser/parse_utilcmd.c:616 parser/parse_utilcmd.c:726 +#: parser/parse_utilcmd.c:736 parser/parse_utilcmd.c:854 #, c-format msgid "unique constraints are not supported on partitioned tables" msgstr "ограничения уникальности для секционированных таблиц не поддерживаются" -#: parser/parse_utilcmd.c:633 parser/parse_utilcmd.c:756 +#: parser/parse_utilcmd.c:753 parser/parse_utilcmd.c:884 #, c-format msgid "foreign key constraints are not supported on foreign tables" msgstr "ограничения внешнего ключа для сторонних таблиц не поддерживаются" -#: parser/parse_utilcmd.c:639 parser/parse_utilcmd.c:762 +#: parser/parse_utilcmd.c:759 parser/parse_utilcmd.c:890 #, c-format msgid "foreign key constraints are not supported on partitioned tables" msgstr "" "ограничения внешнего ключа для секционированных таблиц не поддерживаются" -#: parser/parse_utilcmd.c:736 +#: parser/parse_utilcmd.c:787 +#, c-format +msgid "both default and identity specified for column \"%s\" of table \"%s\"" +msgstr "" +"для столбца \"%s\" таблицы \"%s\" задано и значение по умолчанию, и свойство " +"identity" + +#: parser/parse_utilcmd.c:864 #, c-format msgid "exclusion constraints are not supported on foreign tables" msgstr "ограничения-исключения для сторонних таблиц не поддерживаются" -#: parser/parse_utilcmd.c:742 +#: parser/parse_utilcmd.c:870 #, c-format msgid "exclusion constraints are not supported on partitioned tables" msgstr "ограничения-исключения для секционированных таблиц не поддерживаются" -#: parser/parse_utilcmd.c:812 +#: parser/parse_utilcmd.c:940 #, c-format msgid "LIKE is not supported for creating foreign tables" msgstr "LIKE при создании сторонних таблиц не поддерживается" -#: parser/parse_utilcmd.c:1344 parser/parse_utilcmd.c:1420 +#: parser/parse_utilcmd.c:1495 parser/parse_utilcmd.c:1571 #, c-format msgid "Index \"%s\" contains a whole-row table reference." msgstr "Индекс \"%s\" ссылается на тип всей строки таблицы." -#: parser/parse_utilcmd.c:1689 +#: parser/parse_utilcmd.c:1840 #, c-format msgid "cannot use an existing index in CREATE TABLE" msgstr "в CREATE TABLE нельзя использовать существующий индекс" -#: parser/parse_utilcmd.c:1709 +#: parser/parse_utilcmd.c:1860 #, c-format msgid "index \"%s\" is already associated with a constraint" msgstr "индекс \"%s\" уже связан с ограничением" -#: parser/parse_utilcmd.c:1717 +#: parser/parse_utilcmd.c:1868 #, c-format msgid "index \"%s\" does not belong to table \"%s\"" msgstr "индекс \"%s\" не принадлежит таблице \"%s\"" -#: parser/parse_utilcmd.c:1724 +#: parser/parse_utilcmd.c:1875 #, c-format msgid "index \"%s\" is not valid" msgstr "индекс \"%s\" - нерабочий" -#: parser/parse_utilcmd.c:1730 +#: parser/parse_utilcmd.c:1881 #, c-format msgid "\"%s\" is not a unique index" msgstr "\"%s\" не является уникальным индексом" -#: parser/parse_utilcmd.c:1731 parser/parse_utilcmd.c:1738 -#: parser/parse_utilcmd.c:1745 parser/parse_utilcmd.c:1815 +#: parser/parse_utilcmd.c:1882 parser/parse_utilcmd.c:1889 +#: parser/parse_utilcmd.c:1896 parser/parse_utilcmd.c:1966 #, c-format msgid "Cannot create a primary key or unique constraint using such an index." msgstr "" "Создать первичный ключ или ограничение уникальности для такого индекса " "нельзя." -#: parser/parse_utilcmd.c:1737 +#: parser/parse_utilcmd.c:1888 #, c-format msgid "index \"%s\" contains expressions" msgstr "индекс \"%s\" содержит выражения" -#: parser/parse_utilcmd.c:1744 +#: parser/parse_utilcmd.c:1895 #, c-format msgid "\"%s\" is a partial index" msgstr "\"%s\" - частичный индекс" -#: parser/parse_utilcmd.c:1756 +#: parser/parse_utilcmd.c:1907 #, c-format msgid "\"%s\" is a deferrable index" msgstr "\"%s\" - откладываемый индекс" -#: parser/parse_utilcmd.c:1757 +#: parser/parse_utilcmd.c:1908 #, c-format msgid "Cannot create a non-deferrable constraint using a deferrable index." msgstr "" "Создать не откладываемое ограничение на базе откладываемого индекса нельзя." -#: parser/parse_utilcmd.c:1814 +#: parser/parse_utilcmd.c:1965 #, c-format msgid "index \"%s\" does not have default sorting behavior" msgstr "для индекса \"%s\" не определено поведение при сортировке по умолчанию" -#: parser/parse_utilcmd.c:1958 +#: parser/parse_utilcmd.c:2109 #, c-format msgid "column \"%s\" appears twice in primary key constraint" msgstr "столбец \"%s\" фигурирует в первичном ключе дважды" -#: parser/parse_utilcmd.c:1964 +#: parser/parse_utilcmd.c:2115 #, c-format msgid "column \"%s\" appears twice in unique constraint" msgstr "столбец \"%s\" фигурирует в ограничении уникальности дважды" -#: parser/parse_utilcmd.c:2173 +#: parser/parse_utilcmd.c:2324 #, c-format msgid "" "index expressions and predicates can refer only to the table being indexed" @@ -16307,17 +16452,17 @@ msgstr "" "индексные выражения и предикаты могут ссылаться только на индексируемую " "таблицу" -#: parser/parse_utilcmd.c:2219 +#: parser/parse_utilcmd.c:2370 #, c-format msgid "rules on materialized views are not supported" msgstr "правила для материализованных представлений не поддерживаются" -#: parser/parse_utilcmd.c:2280 +#: parser/parse_utilcmd.c:2431 #, c-format msgid "rule WHERE condition cannot contain references to other relations" msgstr "в условиях WHERE для правил нельзя ссылаться на другие отношения" -#: parser/parse_utilcmd.c:2352 +#: parser/parse_utilcmd.c:2503 #, c-format msgid "" "rules with WHERE conditions can only have SELECT, INSERT, UPDATE, or DELETE " @@ -16326,132 +16471,151 @@ msgstr "" "правила с условиями WHERE могут содержать только действия SELECT, INSERT, " "UPDATE или DELETE" -#: parser/parse_utilcmd.c:2370 parser/parse_utilcmd.c:2469 +#: parser/parse_utilcmd.c:2521 parser/parse_utilcmd.c:2620 #: rewrite/rewriteHandler.c:498 rewrite/rewriteManip.c:1015 #, c-format msgid "conditional UNION/INTERSECT/EXCEPT statements are not implemented" msgstr "условные операторы UNION/INTERSECT/EXCEPT не реализованы" -#: parser/parse_utilcmd.c:2388 +#: parser/parse_utilcmd.c:2539 #, c-format msgid "ON SELECT rule cannot use OLD" msgstr "в правиле ON SELECT нельзя использовать OLD" -#: parser/parse_utilcmd.c:2392 +#: parser/parse_utilcmd.c:2543 #, c-format msgid "ON SELECT rule cannot use NEW" msgstr "в правиле ON SELECT нельзя использовать NEW" -#: parser/parse_utilcmd.c:2401 +#: parser/parse_utilcmd.c:2552 #, c-format msgid "ON INSERT rule cannot use OLD" msgstr "в правиле ON INSERT нельзя использовать OLD" -#: parser/parse_utilcmd.c:2407 +#: parser/parse_utilcmd.c:2558 #, c-format msgid "ON DELETE rule cannot use NEW" msgstr "в правиле ON DELETE нельзя использовать NEW" -#: parser/parse_utilcmd.c:2435 +#: parser/parse_utilcmd.c:2586 #, c-format msgid "cannot refer to OLD within WITH query" msgstr "в запросе WITH нельзя ссылаться на OLD" -#: parser/parse_utilcmd.c:2442 +#: parser/parse_utilcmd.c:2593 #, c-format msgid "cannot refer to NEW within WITH query" msgstr "в запросе WITH нельзя ссылаться на NEW" -#: parser/parse_utilcmd.c:2766 +#: parser/parse_utilcmd.c:3027 #, c-format msgid "misplaced DEFERRABLE clause" msgstr "предложение DEFERRABLE расположено неправильно" -#: parser/parse_utilcmd.c:2771 parser/parse_utilcmd.c:2786 +#: parser/parse_utilcmd.c:3032 parser/parse_utilcmd.c:3047 #, c-format msgid "multiple DEFERRABLE/NOT DEFERRABLE clauses not allowed" msgstr "DEFERRABLE/NOT DEFERRABLE можно указать только один раз" -#: parser/parse_utilcmd.c:2781 +#: parser/parse_utilcmd.c:3042 #, c-format msgid "misplaced NOT DEFERRABLE clause" msgstr "предложение NOT DEFERRABLE расположено неправильно" -#: parser/parse_utilcmd.c:2794 parser/parse_utilcmd.c:2820 gram.y:5282 +#: parser/parse_utilcmd.c:3055 parser/parse_utilcmd.c:3081 gram.y:5363 #, c-format msgid "constraint declared INITIALLY DEFERRED must be DEFERRABLE" msgstr "" "ограничение с характеристикой INITIALLY DEFERRED должно быть объявлено как " "DEFERRABLE" -#: parser/parse_utilcmd.c:2802 +#: parser/parse_utilcmd.c:3063 #, c-format msgid "misplaced INITIALLY DEFERRED clause" msgstr "предложение INITIALLY DEFERRED расположено неправильно" -#: parser/parse_utilcmd.c:2807 parser/parse_utilcmd.c:2833 +#: parser/parse_utilcmd.c:3068 parser/parse_utilcmd.c:3094 #, c-format msgid "multiple INITIALLY IMMEDIATE/DEFERRED clauses not allowed" msgstr "INITIALLY IMMEDIATE/DEFERRED можно указать только один раз" -#: parser/parse_utilcmd.c:2828 +#: parser/parse_utilcmd.c:3089 #, c-format msgid "misplaced INITIALLY IMMEDIATE clause" msgstr "предложение INITIALLY IMMEDIATE расположено неправильно" -#: parser/parse_utilcmd.c:3019 +#: parser/parse_utilcmd.c:3280 #, c-format msgid "" "CREATE specifies a schema (%s) different from the one being created (%s)" msgstr "в CREATE указана схема (%s), отличная от создаваемой (%s)" -#: parser/parse_utilcmd.c:3085 +#: parser/parse_utilcmd.c:3339 #, c-format msgid "invalid bound specification for a list partition" msgstr "неправильное указание ограничения для секции по списку" -#: parser/parse_utilcmd.c:3108 parser/parse_utilcmd.c:3208 -#: parser/parse_utilcmd.c:3235 -#, c-format -msgid "specified value cannot be cast to type \"%s\" of column \"%s\"" -msgstr "указанное значение нельзя привести к типу \"%s\" столбца \"%s\"" - -#: parser/parse_utilcmd.c:3147 +#: parser/parse_utilcmd.c:3395 #, c-format msgid "invalid bound specification for a range partition" msgstr "неправильное указание ограничения для секции по диапазону" -#: parser/parse_utilcmd.c:3155 +#: parser/parse_utilcmd.c:3401 #, c-format msgid "FROM must specify exactly one value per partitioning column" msgstr "во FROM должно указываться ровно одно значение для столбца разбиения" -#: parser/parse_utilcmd.c:3159 +#: parser/parse_utilcmd.c:3405 #, c-format msgid "TO must specify exactly one value per partitioning column" msgstr "в TO должно указываться ровно одно значение для столбца разбиения" -#: parser/parse_utilcmd.c:3197 parser/parse_utilcmd.c:3224 +#: parser/parse_utilcmd.c:3452 parser/parse_utilcmd.c:3466 #, c-format msgid "cannot specify NULL in range bound" msgstr "указать NULL в диапазонном ограничении нельзя" +#: parser/parse_utilcmd.c:3513 +#, c-format +msgid "every bound following MAXVALUE must also be MAXVALUE" +msgstr "за границей MAXVALUE могут следовать только границы MAXVALUE" + +#: parser/parse_utilcmd.c:3519 +#, c-format +msgid "every bound following MINVALUE must also be MINVALUE" +msgstr "за границей MINVALUE могут следовать только границы MINVALUE" + +#: parser/parse_utilcmd.c:3549 parser/parse_utilcmd.c:3561 +#, c-format +msgid "specified value cannot be cast to type %s for column \"%s\"" +msgstr "указанное значение нельзя привести к типу %s столбца \"%s\"" + +#: parser/parse_utilcmd.c:3563 +#, c-format +msgid "The cast requires a non-immutable conversion." +msgstr "Для этого приведения требуется непостоянное преобразование." + +#: parser/parse_utilcmd.c:3564 +#, c-format +msgid "Try putting the literal value in single quotes." +msgstr "Попробуйте заключить буквальное значение в апострофы." + #: parser/scansup.c:204 #, c-format msgid "identifier \"%s\" will be truncated to \"%s\"" msgstr "идентификатор \"%s\" будет усечён до \"%s\"" -#: port/pg_shmem.c:175 port/sysv_shmem.c:175 +#: port/pg_shmem.c:196 port/sysv_shmem.c:196 #, c-format msgid "could not create shared memory segment: %m" msgstr "не удалось создать сегмент разделяемой памяти: %m" -#: port/pg_shmem.c:176 port/sysv_shmem.c:176 +#: port/pg_shmem.c:197 port/sysv_shmem.c:197 #, c-format msgid "Failed system call was shmget(key=%lu, size=%zu, 0%o)." msgstr "Ошибка в системном вызове shmget(ключ=%lu, размер=%zu, 0%o)." -#: port/pg_shmem.c:180 port/sysv_shmem.c:180 +#: port/pg_shmem.c:201 port/sysv_shmem.c:201 #, c-format msgid "" "This error usually means that PostgreSQL's request for a shared memory " @@ -16465,7 +16629,7 @@ msgstr "" "Подробная информация о настройке разделяемой памяти содержится в " "документации PostgreSQL." -#: port/pg_shmem.c:187 port/sysv_shmem.c:187 +#: port/pg_shmem.c:208 port/sysv_shmem.c:208 #, c-format msgid "" "This error usually means that PostgreSQL's request for a shared memory " @@ -16480,7 +16644,7 @@ msgstr "" "Подробная информация о настройке разделяемой памяти содержится в " "документации PostgreSQL." -#: port/pg_shmem.c:193 port/sysv_shmem.c:193 +#: port/pg_shmem.c:214 port/sysv_shmem.c:214 #, c-format msgid "" "This error does *not* mean that you have run out of disk space. It occurs " @@ -16497,12 +16661,12 @@ msgstr "" "Подробная информация о настройке разделяемой памяти содержится в " "документации PostgreSQL." -#: port/pg_shmem.c:483 port/sysv_shmem.c:483 +#: port/pg_shmem.c:505 port/sysv_shmem.c:505 #, c-format msgid "could not map anonymous shared memory: %m" msgstr "не удалось получить анонимную разделяемую память: %m" -#: port/pg_shmem.c:485 port/sysv_shmem.c:485 +#: port/pg_shmem.c:507 port/sysv_shmem.c:507 #, c-format msgid "" "This error usually means that PostgreSQL's request for a shared memory " @@ -16516,12 +16680,12 @@ msgstr "" "Б) можно снизить использование разделяемой памяти, возможно, уменьшив " "shared_buffers или max_connections." -#: port/pg_shmem.c:551 port/sysv_shmem.c:551 port/win32_shmem.c:134 +#: port/pg_shmem.c:573 port/sysv_shmem.c:573 port/win32_shmem.c:134 #, c-format msgid "huge pages not supported on this platform" msgstr "гигантские страницы на этой платформе не поддерживаются" -#: port/pg_shmem.c:646 port/sysv_shmem.c:646 +#: port/pg_shmem.c:668 port/sysv_shmem.c:668 #, c-format msgid "could not stat data directory \"%s\": %m" msgstr "не удалось получить информацию о каталоге данных \"%s\": %m" @@ -16664,73 +16828,73 @@ msgstr "Ошибка в системном вызове DuplicateHandle." msgid "Failed system call was MapViewOfFileEx." msgstr "Ошибка в системном вызове MapViewOfFileEx." -#: postmaster/autovacuum.c:416 +#: postmaster/autovacuum.c:405 #, c-format msgid "could not fork autovacuum launcher process: %m" msgstr "породить процесс запуска автоочистки не удалось: %m" -#: postmaster/autovacuum.c:452 +#: postmaster/autovacuum.c:441 #, c-format msgid "autovacuum launcher started" msgstr "процесс запуска автоочистки создан" -#: postmaster/autovacuum.c:838 +#: postmaster/autovacuum.c:825 #, c-format msgid "autovacuum launcher shutting down" msgstr "процесс запуска автоочистки завершается" -#: postmaster/autovacuum.c:1500 +#: postmaster/autovacuum.c:1487 #, c-format msgid "could not fork autovacuum worker process: %m" msgstr "не удалось породить рабочий процесс автоочистки: %m" -#: postmaster/autovacuum.c:1706 +#: postmaster/autovacuum.c:1685 #, c-format msgid "autovacuum: processing database \"%s\"" msgstr "автоочистка: обработка базы данных \"%s\"" # skip-rule: capital-letter-first -#: postmaster/autovacuum.c:2280 +#: postmaster/autovacuum.c:2260 #, c-format msgid "autovacuum: dropping orphan temp table \"%s.%s.%s\"" msgstr "автоочистка: удаление устаревшей врем. таблицы \"%s.%s.%s\"" -#: postmaster/autovacuum.c:2486 +#: postmaster/autovacuum.c:2468 #, c-format msgid "automatic vacuum of table \"%s.%s.%s\"" msgstr "автоматическая очистка таблицы \"%s.%s.%s\"" -#: postmaster/autovacuum.c:2489 +#: postmaster/autovacuum.c:2471 #, c-format msgid "automatic analyze of table \"%s.%s.%s\"" msgstr "автоматический анализ таблицы \"%s.%s.%s\"" -#: postmaster/autovacuum.c:2700 +#: postmaster/autovacuum.c:2664 #, c-format msgid "processing work entry for relation \"%s.%s.%s\"" msgstr "обработка рабочей записи для отношения \"%s.%s.%s\"" -#: postmaster/autovacuum.c:3344 +#: postmaster/autovacuum.c:3239 #, c-format msgid "autovacuum not started because of misconfiguration" msgstr "автоочистка не запущена из-за неправильной конфигурации" -#: postmaster/autovacuum.c:3345 +#: postmaster/autovacuum.c:3240 #, c-format msgid "Enable the \"track_counts\" option." msgstr "Включите параметр \"track_counts\"." -#: postmaster/bgworker.c:388 postmaster/bgworker.c:822 +#: postmaster/bgworker.c:393 postmaster/bgworker.c:856 #, c-format msgid "registering background worker \"%s\"" msgstr "регистрация фонового процесса \"%s\"" -#: postmaster/bgworker.c:420 +#: postmaster/bgworker.c:425 #, c-format msgid "unregistering background worker \"%s\"" msgstr "разрегистрация фонового процесса \"%s\"" -#: postmaster/bgworker.c:564 +#: postmaster/bgworker.c:590 #, c-format msgid "" "background worker \"%s\": must attach to shared memory in order to request a " @@ -16739,7 +16903,7 @@ msgstr "" "фоновый процесс \"%s\" должен иметь доступ к общей памяти, чтобы запросить " "подключение к БД" -#: postmaster/bgworker.c:573 +#: postmaster/bgworker.c:599 #, c-format msgid "" "background worker \"%s\": cannot request database access if starting at " @@ -16748,24 +16912,32 @@ msgstr "" "фоновый процесс \"%s\" не может получить доступ к БД, если он запущен при " "старте главного процесса" -#: postmaster/bgworker.c:587 +#: postmaster/bgworker.c:613 #, c-format msgid "background worker \"%s\": invalid restart interval" msgstr "фоновый процесс \"%s\": неправильный интервал перезапуска" -#: postmaster/bgworker.c:632 +#: postmaster/bgworker.c:628 +#, c-format +msgid "" +"background worker \"%s\": parallel workers may not be configured for restart" +msgstr "" +"фоновый процесс \"%s\": параллельные исполнители не могут быть настроены для " +"перезапуска" + +#: postmaster/bgworker.c:673 #, c-format msgid "terminating background worker \"%s\" due to administrator command" msgstr "завершение фонового процесса \"%s\" по команде администратора" -#: postmaster/bgworker.c:830 +#: postmaster/bgworker.c:864 #, c-format msgid "" "background worker \"%s\": must be registered in shared_preload_libraries" msgstr "" "фоновой процесс \"%s\" должен быть зарегистрирован в shared_preload_libraries" -#: postmaster/bgworker.c:842 +#: postmaster/bgworker.c:876 #, c-format msgid "" "background worker \"%s\": only dynamic background workers can request " @@ -16774,12 +16946,12 @@ msgstr "" "фоновый процесс \"%s\": только динамические фоновые процессы могут " "запрашивать уведомление" -#: postmaster/bgworker.c:857 +#: postmaster/bgworker.c:891 #, c-format msgid "too many background workers" msgstr "слишком много фоновых процессов" -#: postmaster/bgworker.c:858 +#: postmaster/bgworker.c:892 #, c-format msgid "Up to %d background worker can be registered with the current settings." msgid_plural "" @@ -16791,13 +16963,13 @@ msgstr[1] "" msgstr[2] "" "Максимально возможное число фоновых процессов при текущих параметрах: %d." -#: postmaster/bgworker.c:862 +#: postmaster/bgworker.c:896 #, c-format msgid "" "Consider increasing the configuration parameter \"max_worker_processes\"." msgstr "Возможно, стоит увеличить параметр \"max_worker_processes\"." -#: postmaster/checkpointer.c:465 +#: postmaster/checkpointer.c:464 #, c-format msgid "checkpoints are occurring too frequently (%d second apart)" msgid_plural "checkpoints are occurring too frequently (%d seconds apart)" @@ -16805,67 +16977,62 @@ msgstr[0] "контрольные точки происходят слишком msgstr[1] "контрольные точки происходят слишком часто (через %d сек.)" msgstr[2] "контрольные точки происходят слишком часто (через %d сек.)" -#: postmaster/checkpointer.c:469 +#: postmaster/checkpointer.c:468 #, c-format msgid "Consider increasing the configuration parameter \"max_wal_size\"." msgstr "Возможно, стоит увеличить параметр \"max_wal_size\"." -#: postmaster/checkpointer.c:629 -#, c-format -msgid "transaction log switch forced (archive_timeout=%d)" -msgstr "принудительное переключение журнала транзакций (archive_timeout=%d)" - -#: postmaster/checkpointer.c:1088 +#: postmaster/checkpointer.c:1087 #, c-format msgid "checkpoint request failed" msgstr "сбой при запросе контрольной точки" -#: postmaster/checkpointer.c:1089 +#: postmaster/checkpointer.c:1088 #, c-format msgid "Consult recent messages in the server log for details." msgstr "Смотрите подробности в протоколе сервера." -#: postmaster/checkpointer.c:1284 +#: postmaster/checkpointer.c:1283 #, c-format msgid "compacted fsync request queue from %d entries to %d entries" msgstr "очередь запросов fsync сжата (было записей: %d, стало: %d)" -#: postmaster/pgarch.c:149 +#: postmaster/pgarch.c:148 #, c-format msgid "could not fork archiver: %m" msgstr "не удалось породить процесс архивации: %m" -#: postmaster/pgarch.c:457 +#: postmaster/pgarch.c:456 #, c-format msgid "archive_mode enabled, yet archive_command is not set" msgstr "режим архивации включён, но команда архивации не задана" -#: postmaster/pgarch.c:485 +#: postmaster/pgarch.c:484 #, c-format msgid "" -"archiving transaction log file \"%s\" failed too many times, will try again " +"archiving write-ahead log file \"%s\" failed too many times, will try again " "later" msgstr "" -"заархивировать файл журнала транзакций \"%s\" не удалось много раз подряд; " +"заархивировать файл журнала предзаписи \"%s\" не удалось много раз подряд; " "следующая попытка будет сделана позже" -#: postmaster/pgarch.c:588 +#: postmaster/pgarch.c:587 #, c-format msgid "archive command failed with exit code %d" msgstr "команда архивации завершилась ошибкой с кодом %d" -#: postmaster/pgarch.c:590 postmaster/pgarch.c:600 postmaster/pgarch.c:607 -#: postmaster/pgarch.c:613 postmaster/pgarch.c:622 +#: postmaster/pgarch.c:589 postmaster/pgarch.c:599 postmaster/pgarch.c:606 +#: postmaster/pgarch.c:612 postmaster/pgarch.c:621 #, c-format msgid "The failed archive command was: %s" msgstr "Команда архивации с ошибкой: %s" -#: postmaster/pgarch.c:597 +#: postmaster/pgarch.c:596 #, c-format msgid "archive command was terminated by exception 0x%X" msgstr "команда архивации была прервана исключением 0x%X" -#: postmaster/pgarch.c:599 postmaster/postmaster.c:3527 +#: postmaster/pgarch.c:598 postmaster/postmaster.c:3615 #, c-format msgid "" "See C include file \"ntstatus.h\" for a description of the hexadecimal value." @@ -16873,157 +17040,152 @@ msgstr "" "Описание этого шестнадцатеричного значения ищите во включаемом C-файле " "\"ntstatus.h\"" -#: postmaster/pgarch.c:604 +#: postmaster/pgarch.c:603 #, c-format msgid "archive command was terminated by signal %d: %s" msgstr "команда архивации завершена по сигналу %d: %s" -#: postmaster/pgarch.c:611 +#: postmaster/pgarch.c:610 #, c-format msgid "archive command was terminated by signal %d" msgstr "команда архивации завершена по сигналу %d" -#: postmaster/pgarch.c:620 +#: postmaster/pgarch.c:619 #, c-format msgid "archive command exited with unrecognized status %d" msgstr "команда архивации завершилась с неизвестным кодом состояния %d" -#: postmaster/pgarch.c:632 -#, c-format -msgid "archived transaction log file \"%s\"" -msgstr "файл архива журнала транзакций \"%s\"" - -#: postmaster/pgarch.c:681 +#: postmaster/pgarch.c:679 #, c-format msgid "could not open archive status directory \"%s\": %m" msgstr "не удалось открыть каталог состояния архива \"%s\": %m" -#: postmaster/pgstat.c:392 +#: postmaster/pgstat.c:395 #, c-format msgid "could not resolve \"localhost\": %s" msgstr "не удалось разрешить \"localhost\": %s" -#: postmaster/pgstat.c:415 +#: postmaster/pgstat.c:418 #, c-format msgid "trying another address for the statistics collector" msgstr "проба другого адреса для сборщика статистики" -#: postmaster/pgstat.c:424 +#: postmaster/pgstat.c:427 #, c-format msgid "could not create socket for statistics collector: %m" msgstr "не удалось создать сокет для сборщика статистики: %m" -#: postmaster/pgstat.c:436 +#: postmaster/pgstat.c:439 #, c-format msgid "could not bind socket for statistics collector: %m" msgstr "не удалось привязаться к сокету для сборщика статистики: %m" -#: postmaster/pgstat.c:447 +#: postmaster/pgstat.c:450 #, c-format msgid "could not get address of socket for statistics collector: %m" msgstr "не удалось получить адрес сокета для сборщика статистики: %m" -#: postmaster/pgstat.c:463 +#: postmaster/pgstat.c:466 #, c-format msgid "could not connect socket for statistics collector: %m" msgstr "не удалось подключить сокет для сборщика статистики: %m" -#: postmaster/pgstat.c:484 +#: postmaster/pgstat.c:487 #, c-format msgid "could not send test message on socket for statistics collector: %m" msgstr "" "не удалось послать тестовое сообщение в сокет для сборщика статистики: %m" -#: postmaster/pgstat.c:510 +#: postmaster/pgstat.c:513 #, c-format msgid "select() failed in statistics collector: %m" msgstr "сбой select() в сборщике статистики: %m" -#: postmaster/pgstat.c:525 +#: postmaster/pgstat.c:528 #, c-format msgid "test message did not get through on socket for statistics collector" msgstr "тестовое сообщение не прошло через сокет для сборщика статистики" -#: postmaster/pgstat.c:540 +#: postmaster/pgstat.c:543 #, c-format msgid "could not receive test message on socket for statistics collector: %m" msgstr "" "тестовое сообщение через сокет для сборщика статистики получить не удалось: " "%m" -#: postmaster/pgstat.c:550 +#: postmaster/pgstat.c:553 #, c-format msgid "incorrect test message transmission on socket for statistics collector" msgstr "тестовое сообщение через сокет для сборщика статистики прошло неверно" -#: postmaster/pgstat.c:573 +#: postmaster/pgstat.c:576 #, c-format msgid "could not set statistics collector socket to nonblocking mode: %m" msgstr "" "не удалось переключить сокет сборщика статистики в неблокирующий режим: %m" -#: postmaster/pgstat.c:583 +#: postmaster/pgstat.c:615 #, c-format msgid "disabling statistics collector for lack of working socket" msgstr "сборщик статистики отключается из-за нехватки рабочего сокета" -#: postmaster/pgstat.c:730 +#: postmaster/pgstat.c:762 #, c-format msgid "could not fork statistics collector: %m" msgstr "не удалось породить процесс сборщика статистики: %m" -#: postmaster/pgstat.c:1306 +#: postmaster/pgstat.c:1342 #, c-format msgid "unrecognized reset target: \"%s\"" msgstr "запрошен сброс неизвестного счётчика: \"%s\"" -#: postmaster/pgstat.c:1307 +#: postmaster/pgstat.c:1343 #, c-format msgid "Target must be \"archiver\" or \"bgwriter\"." msgstr "Допустимый счётчик: \"archiver\" или \"bgwriter\"." -#: postmaster/pgstat.c:4246 +#: postmaster/pgstat.c:4296 #, c-format msgid "could not read statistics message: %m" msgstr "не удалось прочитать сообщение статистики: %m" -#: postmaster/pgstat.c:4578 postmaster/pgstat.c:4735 +#: postmaster/pgstat.c:4628 postmaster/pgstat.c:4785 #, c-format msgid "could not open temporary statistics file \"%s\": %m" msgstr "не удалось открыть временный файл статистики \"%s\": %m" -#: postmaster/pgstat.c:4645 postmaster/pgstat.c:4780 +#: postmaster/pgstat.c:4695 postmaster/pgstat.c:4830 #, c-format msgid "could not write temporary statistics file \"%s\": %m" msgstr "не удалось записать во временный файл статистики \"%s\": %m" -#: postmaster/pgstat.c:4654 postmaster/pgstat.c:4789 +#: postmaster/pgstat.c:4704 postmaster/pgstat.c:4839 #, c-format msgid "could not close temporary statistics file \"%s\": %m" msgstr "не удалось закрыть временный файл статистики \"%s\": %m" -#: postmaster/pgstat.c:4662 postmaster/pgstat.c:4797 +#: postmaster/pgstat.c:4712 postmaster/pgstat.c:4847 #, c-format msgid "could not rename temporary statistics file \"%s\" to \"%s\": %m" msgstr "" "не удалось переименовать временный файл статистики из \"%s\" в \"%s\": %m" -#: postmaster/pgstat.c:4886 postmaster/pgstat.c:5071 postmaster/pgstat.c:5224 +#: postmaster/pgstat.c:4936 postmaster/pgstat.c:5142 postmaster/pgstat.c:5295 #, c-format msgid "could not open statistics file \"%s\": %m" msgstr "не удалось открыть файл статистики \"%s\": %m" -#: postmaster/pgstat.c:4898 postmaster/pgstat.c:4908 postmaster/pgstat.c:4918 -#: postmaster/pgstat.c:4939 postmaster/pgstat.c:4954 postmaster/pgstat.c:5008 -#: postmaster/pgstat.c:5083 postmaster/pgstat.c:5103 postmaster/pgstat.c:5121 -#: postmaster/pgstat.c:5137 postmaster/pgstat.c:5155 postmaster/pgstat.c:5171 -#: postmaster/pgstat.c:5236 postmaster/pgstat.c:5248 postmaster/pgstat.c:5260 -#: postmaster/pgstat.c:5285 postmaster/pgstat.c:5307 +#: postmaster/pgstat.c:4948 postmaster/pgstat.c:4958 postmaster/pgstat.c:4979 +#: postmaster/pgstat.c:5001 postmaster/pgstat.c:5016 postmaster/pgstat.c:5079 +#: postmaster/pgstat.c:5154 postmaster/pgstat.c:5174 postmaster/pgstat.c:5192 +#: postmaster/pgstat.c:5208 postmaster/pgstat.c:5226 postmaster/pgstat.c:5242 +#: postmaster/pgstat.c:5307 postmaster/pgstat.c:5319 postmaster/pgstat.c:5331 +#: postmaster/pgstat.c:5356 postmaster/pgstat.c:5378 #, c-format msgid "corrupted statistics file \"%s\"" msgstr "файл статистики \"%s\" испорчен" -#: postmaster/pgstat.c:5436 +#: postmaster/pgstat.c:5507 #, c-format msgid "" "using stale statistics instead of current ones because stats collector is " @@ -17032,44 +17194,44 @@ msgstr "" "используется просроченная статистика вместо текущей, так как сборщик " "статистики не отвечает" -#: postmaster/pgstat.c:5763 +#: postmaster/pgstat.c:5834 #, c-format msgid "database hash table corrupted during cleanup --- abort" msgstr "таблица хеша базы данных испорчена при очистке --- прерывание" -#: postmaster/postmaster.c:692 +#: postmaster/postmaster.c:712 #, c-format msgid "%s: invalid argument for option -f: \"%s\"\n" msgstr "%s: неверный аргумент для параметра -f: \"%s\"\n" -#: postmaster/postmaster.c:778 +#: postmaster/postmaster.c:798 #, c-format msgid "%s: invalid argument for option -t: \"%s\"\n" msgstr "%s: неверный аргумент для параметра -t: \"%s\"\n" -#: postmaster/postmaster.c:829 +#: postmaster/postmaster.c:849 #, c-format msgid "%s: invalid argument: \"%s\"\n" msgstr "%s: неверный аргумент: \"%s\"\n" -#: postmaster/postmaster.c:868 +#: postmaster/postmaster.c:888 #, c-format msgid "%s: superuser_reserved_connections must be less than max_connections\n" msgstr "" "%s: параметр superuser_reserved_connections должен быть меньше " "max_connections\n" -#: postmaster/postmaster.c:873 +#: postmaster/postmaster.c:893 #, c-format msgid "%s: max_wal_senders must be less than max_connections\n" msgstr "%s: параметр max_wal_senders должен быть меньше max_connections\n" -#: postmaster/postmaster.c:878 +#: postmaster/postmaster.c:898 #, c-format msgid "WAL archival cannot be enabled when wal_level is \"minimal\"" msgstr "Архивацию WAL нельзя включить, если установлен wal_level \"minimal\"" -#: postmaster/postmaster.c:881 +#: postmaster/postmaster.c:901 #, c-format msgid "" "WAL streaming (max_wal_senders > 0) requires wal_level \"replica\" or " @@ -17078,88 +17240,88 @@ msgstr "" "Для потоковой трансляции WAL (max_wal_senders > 0) wal_level должен быть " "\"replica\" или \"logical\"" -#: postmaster/postmaster.c:889 +#: postmaster/postmaster.c:909 #, c-format msgid "%s: invalid datetoken tables, please fix\n" msgstr "%s: ошибка в таблицах маркеров времени, требуется исправление\n" -#: postmaster/postmaster.c:992 postmaster/postmaster.c:1090 -#: utils/init/miscinit.c:1446 +#: postmaster/postmaster.c:1012 postmaster/postmaster.c:1110 +#: utils/init/miscinit.c:1455 #, c-format msgid "invalid list syntax in parameter \"%s\"" msgstr "неверный формат списка в параметре \"%s\"" -#: postmaster/postmaster.c:1023 +#: postmaster/postmaster.c:1043 #, c-format msgid "could not create listen socket for \"%s\"" msgstr "не удалось создать принимающий сокет для \"%s\"" -#: postmaster/postmaster.c:1029 +#: postmaster/postmaster.c:1049 #, c-format msgid "could not create any TCP/IP sockets" msgstr "не удалось создать сокеты TCP/IP" -#: postmaster/postmaster.c:1112 +#: postmaster/postmaster.c:1132 #, c-format msgid "could not create Unix-domain socket in directory \"%s\"" -msgstr "не удалось создать доменный сокет в каталоге \"%s\"" +msgstr "не удалось создать Unix-сокет в каталоге \"%s\"" -#: postmaster/postmaster.c:1118 +#: postmaster/postmaster.c:1138 #, c-format msgid "could not create any Unix-domain sockets" -msgstr "ни один доменный сокет создать не удалось" +msgstr "ни один Unix-сокет создать не удалось" -#: postmaster/postmaster.c:1130 +#: postmaster/postmaster.c:1150 #, c-format msgid "no socket created for listening" msgstr "отсутствуют принимающие сокеты" -#: postmaster/postmaster.c:1170 +#: postmaster/postmaster.c:1190 #, c-format msgid "could not create I/O completion port for child queue" msgstr "не удалось создать порт завершения ввода/вывода для очереди потомков" -#: postmaster/postmaster.c:1199 +#: postmaster/postmaster.c:1219 #, c-format msgid "%s: could not change permissions of external PID file \"%s\": %s\n" msgstr "%s: не удалось поменять права для внешнего файла PID \"%s\": %s\n" -#: postmaster/postmaster.c:1203 +#: postmaster/postmaster.c:1223 #, c-format msgid "%s: could not write external PID file \"%s\": %s\n" msgstr "%s: не удалось записать внешний файл PID \"%s\": %s\n" -#: postmaster/postmaster.c:1260 +#: postmaster/postmaster.c:1280 #, c-format msgid "ending log output to stderr" msgstr "завершение вывода в stderr" -#: postmaster/postmaster.c:1261 +#: postmaster/postmaster.c:1281 #, c-format msgid "Future log output will go to log destination \"%s\"." msgstr "В дальнейшем протокол будет выводиться в \"%s\"." -#: postmaster/postmaster.c:1287 utils/init/postinit.c:213 +#: postmaster/postmaster.c:1307 utils/init/postinit.c:213 #, c-format msgid "could not load pg_hba.conf" msgstr "не удалось загрузить pg_hba.conf" -#: postmaster/postmaster.c:1313 +#: postmaster/postmaster.c:1333 #, c-format msgid "postmaster became multithreaded during startup" msgstr "процесс postmaster стал многопоточным при запуске" -#: postmaster/postmaster.c:1314 +#: postmaster/postmaster.c:1334 #, c-format msgid "Set the LC_ALL environment variable to a valid locale." msgstr "Установите в переменной окружения LC_ALL правильную локаль." -#: postmaster/postmaster.c:1413 +#: postmaster/postmaster.c:1439 #, c-format msgid "%s: could not locate matching postgres executable" msgstr "%s: подходящий исполняемый файл postgres не найден" -#: postmaster/postmaster.c:1436 utils/misc/tzparser.c:341 +#: postmaster/postmaster.c:1462 utils/misc/tzparser.c:341 #, c-format msgid "" "This may indicate an incomplete PostgreSQL installation, or that the file " @@ -17168,43 +17330,43 @@ msgstr "" "Возможно, PostgreSQL установлен не полностью или файла \"%s\" нет в " "положенном месте." -#: postmaster/postmaster.c:1464 +#: postmaster/postmaster.c:1490 #, c-format msgid "data directory \"%s\" does not exist" msgstr "каталог данных \"%s\" не существует" -#: postmaster/postmaster.c:1469 +#: postmaster/postmaster.c:1495 #, c-format msgid "could not read permissions of directory \"%s\": %m" msgstr "не удалось считать права на каталог \"%s\": %m" -#: postmaster/postmaster.c:1477 +#: postmaster/postmaster.c:1503 #, c-format msgid "specified data directory \"%s\" is not a directory" msgstr "указанный каталог данных \"%s\" не существует" -#: postmaster/postmaster.c:1493 +#: postmaster/postmaster.c:1519 #, c-format msgid "data directory \"%s\" has wrong ownership" msgstr "владелец каталога данных \"%s\" определён неверно" -#: postmaster/postmaster.c:1495 +#: postmaster/postmaster.c:1521 #, c-format msgid "The server must be started by the user that owns the data directory." msgstr "" "Сервер должен запускать пользователь, являющийся владельцем каталога данных." -#: postmaster/postmaster.c:1515 +#: postmaster/postmaster.c:1541 #, c-format msgid "data directory \"%s\" has group or world access" msgstr "к каталогу данных \"%s\" имеют доступ все или группа" -#: postmaster/postmaster.c:1517 +#: postmaster/postmaster.c:1543 #, c-format msgid "Permissions should be u=rwx (0700)." msgstr "Права должны быть: u=rwx (0700)." -#: postmaster/postmaster.c:1528 +#: postmaster/postmaster.c:1554 #, c-format msgid "" "%s: could not find the database system\n" @@ -17215,398 +17377,409 @@ msgstr "" "Ожидалось найти её в каталоге \"%s\",\n" "но открыть файл \"%s\" не удалось: %s\n" -#: postmaster/postmaster.c:1705 +#: postmaster/postmaster.c:1731 #, c-format msgid "select() failed in postmaster: %m" msgstr "сбой select() в postmaster'е: %m" -#: postmaster/postmaster.c:1856 +#: postmaster/postmaster.c:1886 #, c-format msgid "" "performing immediate shutdown because data directory lock file is invalid" msgstr "" "немедленное отключение из-за ошибочного файла блокировки каталога данных" -#: postmaster/postmaster.c:1934 postmaster/postmaster.c:1965 +#: postmaster/postmaster.c:1964 postmaster/postmaster.c:1995 #, c-format msgid "incomplete startup packet" msgstr "неполный стартовый пакет" -#: postmaster/postmaster.c:1946 +#: postmaster/postmaster.c:1976 #, c-format msgid "invalid length of startup packet" msgstr "неверная длина стартового пакета" -#: postmaster/postmaster.c:2004 +#: postmaster/postmaster.c:2034 #, c-format msgid "failed to send SSL negotiation response: %m" msgstr "не удалось отправить ответ в процессе SSL-согласования: %m" -#: postmaster/postmaster.c:2033 +#: postmaster/postmaster.c:2060 #, c-format msgid "unsupported frontend protocol %u.%u: server supports %u.0 to %u.%u" msgstr "" "неподдерживаемый протокол клиентского приложения %u.%u; сервер поддерживает " "%u.0 - %u.%u" -#: postmaster/postmaster.c:2096 utils/misc/guc.c:5739 utils/misc/guc.c:5832 -#: utils/misc/guc.c:7133 utils/misc/guc.c:9889 utils/misc/guc.c:9923 +#: postmaster/postmaster.c:2124 utils/misc/guc.c:5770 utils/misc/guc.c:5863 +#: utils/misc/guc.c:7164 utils/misc/guc.c:9911 utils/misc/guc.c:9945 #, c-format msgid "invalid value for parameter \"%s\": \"%s\"" msgstr "неверное значение для параметра \"%s\": \"%s\"" -#: postmaster/postmaster.c:2099 +#: postmaster/postmaster.c:2127 #, c-format msgid "Valid values are: \"false\", 0, \"true\", 1, \"database\"." msgstr "Допустимые значения: \"false\", 0, \"true\", 1, \"database\"." -#: postmaster/postmaster.c:2119 +#: postmaster/postmaster.c:2157 #, c-format msgid "invalid startup packet layout: expected terminator as last byte" msgstr "" "неверная структура стартового пакета: последним байтом должен быть терминатор" -#: postmaster/postmaster.c:2147 +#: postmaster/postmaster.c:2195 #, c-format msgid "no PostgreSQL user name specified in startup packet" msgstr "в стартовом пакете не указано имя пользователя PostgreSQL" -#: postmaster/postmaster.c:2206 +#: postmaster/postmaster.c:2254 #, c-format msgid "the database system is starting up" msgstr "система баз данных запускается" -#: postmaster/postmaster.c:2211 +#: postmaster/postmaster.c:2259 #, c-format msgid "the database system is shutting down" msgstr "система баз данных останавливается" -#: postmaster/postmaster.c:2216 +#: postmaster/postmaster.c:2264 #, c-format msgid "the database system is in recovery mode" msgstr "система баз данных в режиме восстановления" -#: postmaster/postmaster.c:2221 storage/ipc/procarray.c:290 +#: postmaster/postmaster.c:2269 storage/ipc/procarray.c:292 #: storage/ipc/sinvaladt.c:298 storage/lmgr/proc.c:338 #, c-format msgid "sorry, too many clients already" msgstr "извините, уже слишком много клиентов" -#: postmaster/postmaster.c:2283 +#: postmaster/postmaster.c:2359 #, c-format msgid "wrong key in cancel request for process %d" msgstr "неправильный ключ в запросе на отмену процесса %d" -#: postmaster/postmaster.c:2291 +#: postmaster/postmaster.c:2367 #, c-format msgid "PID %d in cancel request did not match any process" msgstr "процесс с кодом %d, полученным в запросе на отмену, не найден" -#: postmaster/postmaster.c:2502 +#: postmaster/postmaster.c:2578 #, c-format msgid "received SIGHUP, reloading configuration files" msgstr "получен SIGHUP, файлы конфигурации перезагружаются" -#: postmaster/postmaster.c:2527 +#: postmaster/postmaster.c:2603 #, c-format msgid "pg_hba.conf was not reloaded" msgstr "pg_hba.conf не был перезагружен" -#: postmaster/postmaster.c:2531 +#: postmaster/postmaster.c:2607 #, c-format msgid "pg_ident.conf was not reloaded" msgstr "pg_ident.conf не был перезагружен" -#: postmaster/postmaster.c:2541 +#: postmaster/postmaster.c:2617 #, c-format msgid "SSL configuration was not reloaded" msgstr "конфигурация SSL не была перезагружена" -#: postmaster/postmaster.c:2589 +#: postmaster/postmaster.c:2665 #, c-format msgid "received smart shutdown request" msgstr "получен запрос на \"вежливое\" выключение" -#: postmaster/postmaster.c:2644 +#: postmaster/postmaster.c:2723 #, c-format msgid "received fast shutdown request" msgstr "получен запрос на быстрое выключение" -#: postmaster/postmaster.c:2674 +#: postmaster/postmaster.c:2756 #, c-format msgid "aborting any active transactions" msgstr "прерывание всех активных транзакций" -#: postmaster/postmaster.c:2708 +#: postmaster/postmaster.c:2790 #, c-format msgid "received immediate shutdown request" msgstr "получен запрос на немедленное выключение" -#: postmaster/postmaster.c:2772 +#: postmaster/postmaster.c:2857 #, c-format msgid "shutdown at recovery target" msgstr "выключение при достижении цели восстановления" -#: postmaster/postmaster.c:2788 postmaster/postmaster.c:2811 +#: postmaster/postmaster.c:2873 postmaster/postmaster.c:2896 msgid "startup process" msgstr "стартовый процесс" -#: postmaster/postmaster.c:2791 +#: postmaster/postmaster.c:2876 #, c-format msgid "aborting startup due to startup process failure" msgstr "прерывание запуска из-за ошибки в стартовом процессе" -#: postmaster/postmaster.c:2852 +#: postmaster/postmaster.c:2937 #, c-format msgid "database system is ready to accept connections" msgstr "система БД готова принимать подключения" -#: postmaster/postmaster.c:2871 +#: postmaster/postmaster.c:2958 msgid "background writer process" msgstr "процесс фоновой записи" -#: postmaster/postmaster.c:2925 +#: postmaster/postmaster.c:3012 msgid "checkpointer process" msgstr "процесс контрольных точек" -#: postmaster/postmaster.c:2941 +#: postmaster/postmaster.c:3028 msgid "WAL writer process" msgstr "процесс записи WAL" -#: postmaster/postmaster.c:2955 +#: postmaster/postmaster.c:3043 msgid "WAL receiver process" msgstr "процесс считывания WAL" -#: postmaster/postmaster.c:2970 +#: postmaster/postmaster.c:3058 msgid "autovacuum launcher process" msgstr "процесс запуска автоочистки" -#: postmaster/postmaster.c:2985 +#: postmaster/postmaster.c:3073 msgid "archiver process" msgstr "процесс архивации" -#: postmaster/postmaster.c:3001 +#: postmaster/postmaster.c:3089 msgid "statistics collector process" msgstr "процесс сбора статистики" -#: postmaster/postmaster.c:3015 +#: postmaster/postmaster.c:3103 msgid "system logger process" msgstr "процесс системного протоколирования" -#: postmaster/postmaster.c:3077 +#: postmaster/postmaster.c:3165 msgid "worker process" msgstr "рабочий процесс" -#: postmaster/postmaster.c:3160 postmaster/postmaster.c:3180 -#: postmaster/postmaster.c:3187 postmaster/postmaster.c:3205 +#: postmaster/postmaster.c:3248 postmaster/postmaster.c:3268 +#: postmaster/postmaster.c:3275 postmaster/postmaster.c:3293 msgid "server process" msgstr "процесс сервера" -#: postmaster/postmaster.c:3259 +#: postmaster/postmaster.c:3347 #, c-format msgid "terminating any other active server processes" msgstr "завершение всех остальных активных серверных процессов" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3515 +#: postmaster/postmaster.c:3603 #, c-format msgid "%s (PID %d) exited with exit code %d" msgstr "%s (PID %d) завершился с кодом выхода %d" -#: postmaster/postmaster.c:3517 postmaster/postmaster.c:3528 -#: postmaster/postmaster.c:3539 postmaster/postmaster.c:3548 -#: postmaster/postmaster.c:3558 +#: postmaster/postmaster.c:3605 postmaster/postmaster.c:3616 +#: postmaster/postmaster.c:3627 postmaster/postmaster.c:3636 +#: postmaster/postmaster.c:3646 #, c-format msgid "Failed process was running: %s" msgstr "Завершившийся процесс выполнял действие: %s" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3525 +#: postmaster/postmaster.c:3613 #, c-format msgid "%s (PID %d) was terminated by exception 0x%X" msgstr "%s (PID %d) был прерван исключением 0x%X" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3535 +#: postmaster/postmaster.c:3623 #, c-format msgid "%s (PID %d) was terminated by signal %d: %s" msgstr "%s (PID %d) был завершён по сигналу %d: %s" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3546 +#: postmaster/postmaster.c:3634 #, c-format msgid "%s (PID %d) was terminated by signal %d" msgstr "%s (PID %d) был завершён по сигналу %d" #. translator: %s is a noun phrase describing a child process, such as #. "server process" -#: postmaster/postmaster.c:3556 +#: postmaster/postmaster.c:3644 #, c-format msgid "%s (PID %d) exited with unrecognized status %d" msgstr "%s (PID %d) завершился с неизвестным кодом состояния %d" -#: postmaster/postmaster.c:3743 +#: postmaster/postmaster.c:3831 #, c-format msgid "abnormal database system shutdown" msgstr "аварийное выключение системы БД" -#: postmaster/postmaster.c:3783 +#: postmaster/postmaster.c:3871 #, c-format msgid "all server processes terminated; reinitializing" msgstr "все серверные процессы завершены... переинициализация" -#: postmaster/postmaster.c:3949 postmaster/postmaster.c:5343 -#: postmaster/postmaster.c:5649 +#: postmaster/postmaster.c:4037 postmaster/postmaster.c:5448 +#: postmaster/postmaster.c:5812 #, c-format msgid "could not generate random cancel key" msgstr "не удалось сгенерировать случайный ключ отмены" -#: postmaster/postmaster.c:4003 +#: postmaster/postmaster.c:4091 #, c-format msgid "could not fork new process for connection: %m" msgstr "породить новый процесс для соединения не удалось: %m" -#: postmaster/postmaster.c:4045 +#: postmaster/postmaster.c:4133 msgid "could not fork new process for connection: " msgstr "породить новый процесс для соединения не удалось: " -#: postmaster/postmaster.c:4159 +#: postmaster/postmaster.c:4247 #, c-format msgid "connection received: host=%s port=%s" msgstr "принято подключение: узел=%s порт=%s" -#: postmaster/postmaster.c:4164 +#: postmaster/postmaster.c:4252 #, c-format msgid "connection received: host=%s" msgstr "принято подключение: узел=%s" -#: postmaster/postmaster.c:4449 +#: postmaster/postmaster.c:4537 #, c-format msgid "could not execute server process \"%s\": %m" msgstr "запустить серверный процесс \"%s\" не удалось: %m" -#: postmaster/postmaster.c:4792 +#: postmaster/postmaster.c:4690 +#, c-format +msgid "giving up after too many tries to reserve shared memory" +msgstr "" +"число повторных попыток резервирования разделяемой памяти достигло предела" + +#: postmaster/postmaster.c:4691 +#, c-format +msgid "This might be caused by ASLR or antivirus software." +msgstr "Это может быть вызвано антивирусным ПО или механизмом ASLR." + +#: postmaster/postmaster.c:4888 #, c-format msgid "SSL configuration could not be loaded in child process" msgstr "не удалось загрузить конфигурацию SSL в дочерний процесс" -#: postmaster/postmaster.c:4924 +#: postmaster/postmaster.c:5020 #, c-format msgid "Please report this to ." msgstr "" "Пожалуйста, напишите об этой ошибке по адресу ." -#: postmaster/postmaster.c:5003 +#: postmaster/postmaster.c:5107 #, c-format msgid "database system is ready to accept read only connections" msgstr "система БД готова к подключениям в режиме \"только чтение\"" -#: postmaster/postmaster.c:5271 +#: postmaster/postmaster.c:5376 #, c-format msgid "could not fork startup process: %m" msgstr "породить стартовый процесс не удалось: %m" -#: postmaster/postmaster.c:5275 +#: postmaster/postmaster.c:5380 #, c-format msgid "could not fork background writer process: %m" msgstr "породить процесс фоновой записи не удалось: %m" -#: postmaster/postmaster.c:5279 +#: postmaster/postmaster.c:5384 #, c-format msgid "could not fork checkpointer process: %m" msgstr "породить процесс контрольных точек не удалось: %m" -#: postmaster/postmaster.c:5283 +#: postmaster/postmaster.c:5388 #, c-format msgid "could not fork WAL writer process: %m" msgstr "породить процесс записи WAL не удалось: %m" -#: postmaster/postmaster.c:5287 +#: postmaster/postmaster.c:5392 #, c-format msgid "could not fork WAL receiver process: %m" msgstr "породить процесс считывания WAL не удалось: %m" -#: postmaster/postmaster.c:5291 +#: postmaster/postmaster.c:5396 #, c-format msgid "could not fork process: %m" msgstr "породить процесс не удалось: %m" -#: postmaster/postmaster.c:5460 postmaster/postmaster.c:5483 +#: postmaster/postmaster.c:5583 postmaster/postmaster.c:5606 #, c-format msgid "database connection requirement not indicated during registration" msgstr "" "при регистрации фонового процесса не указывалось, что ему требуется " "подключение к БД" -#: postmaster/postmaster.c:5467 postmaster/postmaster.c:5490 +#: postmaster/postmaster.c:5590 postmaster/postmaster.c:5613 #, c-format msgid "invalid processing mode in background worker" msgstr "неправильный режим обработки в фоновом процессе" -#: postmaster/postmaster.c:5542 +#: postmaster/postmaster.c:5685 #, c-format msgid "starting background worker process \"%s\"" msgstr "запуск фонового рабочего процесса \"%s\"" -#: postmaster/postmaster.c:5553 +#: postmaster/postmaster.c:5697 #, c-format msgid "could not fork worker process: %m" msgstr "породить рабочий процесс не удалось: %m" -#: postmaster/postmaster.c:5950 +#: postmaster/postmaster.c:6130 #, c-format msgid "could not duplicate socket %d for use in backend: error code %d" msgstr "" "продублировать сокет %d для серверного процесса не удалось (код ошибки: %d)" -#: postmaster/postmaster.c:5982 +#: postmaster/postmaster.c:6162 #, c-format msgid "could not create inherited socket: error code %d\n" msgstr "создать наследуемый сокет не удалось (код ошибки: %d)\n" -#: postmaster/postmaster.c:6011 +#: postmaster/postmaster.c:6191 #, c-format msgid "could not open backend variables file \"%s\": %s\n" msgstr "открыть файл серверных переменных \"%s\" не удалось: %s\n" -#: postmaster/postmaster.c:6018 +#: postmaster/postmaster.c:6198 #, c-format msgid "could not read from backend variables file \"%s\": %s\n" msgstr "прочитать файл серверных переменных \"%s\" не удалось: %s\n" -#: postmaster/postmaster.c:6027 +#: postmaster/postmaster.c:6207 #, c-format msgid "could not remove file \"%s\": %s\n" msgstr "не удалось стереть файл \"%s\": %s\n" -#: postmaster/postmaster.c:6044 +#: postmaster/postmaster.c:6224 #, c-format msgid "could not map view of backend variables: error code %lu\n" msgstr "отобразить файл серверных переменных не удалось (код ошибки: %lu)\n" -#: postmaster/postmaster.c:6053 +#: postmaster/postmaster.c:6233 #, c-format msgid "could not unmap view of backend variables: error code %lu\n" msgstr "" "отключить отображение файла серверных переменных не удалось (код ошибки: " "%lu)\n" -#: postmaster/postmaster.c:6060 +#: postmaster/postmaster.c:6240 #, c-format msgid "could not close handle to backend parameter variables: error code %lu\n" msgstr "" "закрыть указатель файла серверных переменных не удалось (код ошибки: %lu)\n" -#: postmaster/postmaster.c:6221 +#: postmaster/postmaster.c:6401 #, c-format msgid "could not read exit code for process\n" msgstr "прочитать код завершения процесса не удалось\n" -#: postmaster/postmaster.c:6226 +#: postmaster/postmaster.c:6406 #, c-format msgid "could not post child completion status\n" msgstr "отправить состояние завершения потомка не удалось\n" @@ -17678,58 +17851,58 @@ msgstr "" msgid "could not stat control file \"%s\": %m" msgstr "не удалось найти управляющий файл \"%s\": %m" -#: replication/basebackup.c:412 +#: replication/basebackup.c:413 #, c-format msgid "could not find any WAL files" msgstr "не удалось найти ни одного файла WAL" -#: replication/basebackup.c:425 replication/basebackup.c:439 -#: replication/basebackup.c:448 +#: replication/basebackup.c:426 replication/basebackup.c:440 +#: replication/basebackup.c:449 #, c-format msgid "could not find WAL file \"%s\"" msgstr "не удалось найти файл WAL \"%s\"" -#: replication/basebackup.c:487 replication/basebackup.c:513 +#: replication/basebackup.c:488 replication/basebackup.c:514 #, c-format msgid "unexpected WAL file size \"%s\"" msgstr "неприемлемый размер файла WAL \"%s\"" -#: replication/basebackup.c:499 replication/basebackup.c:1228 +#: replication/basebackup.c:500 replication/basebackup.c:1229 #, c-format msgid "base backup could not send data, aborting backup" msgstr "" "в процессе базового резервного копирования не удалось передать данные, " "копирование прерывается" -#: replication/basebackup.c:601 replication/basebackup.c:610 -#: replication/basebackup.c:619 replication/basebackup.c:628 -#: replication/basebackup.c:637 replication/basebackup.c:648 -#: replication/basebackup.c:665 +#: replication/basebackup.c:602 replication/basebackup.c:611 +#: replication/basebackup.c:620 replication/basebackup.c:629 +#: replication/basebackup.c:638 replication/basebackup.c:649 +#: replication/basebackup.c:666 #, c-format msgid "duplicate option \"%s\"" msgstr "повторяющийся параметр \"%s\"" -#: replication/basebackup.c:654 utils/misc/guc.c:5749 +#: replication/basebackup.c:655 utils/misc/guc.c:5780 #, c-format msgid "%d is outside the valid range for parameter \"%s\" (%d .. %d)" msgstr "%d вне диапазона, допустимого для параметра \"%s\" (%d .. %d)" -#: replication/basebackup.c:928 replication/basebackup.c:1025 +#: replication/basebackup.c:929 replication/basebackup.c:1026 #, c-format msgid "could not stat file or directory \"%s\": %m" msgstr "не удалось получить информацию о файле или каталоге \"%s\": %m" -#: replication/basebackup.c:1180 +#: replication/basebackup.c:1181 #, c-format msgid "skipping special file \"%s\"" msgstr "специальный файл \"%s\" пропускается" -#: replication/basebackup.c:1293 +#: replication/basebackup.c:1294 #, c-format msgid "file name too long for tar format: \"%s\"" msgstr "слишком длинное имя файла для формата tar: \"%s\"" -#: replication/basebackup.c:1298 +#: replication/basebackup.c:1299 #, c-format msgid "" "symbolic link target too long for tar format: file name \"%s\", target \"%s\"" @@ -17737,17 +17910,17 @@ msgstr "" "цель символической ссылки слишком длинная для формата tar: имя файла \"%s\", " "цель \"%s\"" -#: replication/libpqwalreceiver/libpqwalreceiver.c:226 +#: replication/libpqwalreceiver/libpqwalreceiver.c:231 #, c-format msgid "invalid connection string syntax: %s" msgstr "ошибочный синтаксис строки подключения: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:250 +#: replication/libpqwalreceiver/libpqwalreceiver.c:255 #, c-format msgid "could not parse connection string: %s" msgstr "не удалось разобрать строку подключения: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:300 +#: replication/libpqwalreceiver/libpqwalreceiver.c:305 #, c-format msgid "" "could not receive database system identifier and timeline ID from the " @@ -17756,13 +17929,13 @@ msgstr "" "не удалось получить идентификатор СУБД и код линии времени с главного " "сервера: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:311 -#: replication/libpqwalreceiver/libpqwalreceiver.c:515 +#: replication/libpqwalreceiver/libpqwalreceiver.c:316 +#: replication/libpqwalreceiver/libpqwalreceiver.c:523 #, c-format msgid "invalid response from primary server" msgstr "неверный ответ главного сервера" -#: replication/libpqwalreceiver/libpqwalreceiver.c:312 +#: replication/libpqwalreceiver/libpqwalreceiver.c:317 #, c-format msgid "" "Could not identify system: got %d rows and %d fields, expected %d rows and " @@ -17771,127 +17944,137 @@ msgstr "" "Не удалось идентифицировать систему, получено строк: %d, полей: %d " "(ожидалось: %d и %d (или более))." -#: replication/libpqwalreceiver/libpqwalreceiver.c:378 -#: replication/libpqwalreceiver/libpqwalreceiver.c:384 -#: replication/libpqwalreceiver/libpqwalreceiver.c:409 +#: replication/libpqwalreceiver/libpqwalreceiver.c:383 +#: replication/libpqwalreceiver/libpqwalreceiver.c:389 +#: replication/libpqwalreceiver/libpqwalreceiver.c:414 #, c-format msgid "could not start WAL streaming: %s" msgstr "не удалось начать трансляцию WAL: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:428 +#: replication/libpqwalreceiver/libpqwalreceiver.c:433 #, c-format msgid "could not send end-of-streaming message to primary: %s" msgstr "не удалось отправить главному серверу сообщение о конце передачи: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:450 +#: replication/libpqwalreceiver/libpqwalreceiver.c:455 #, c-format msgid "unexpected result set after end-of-streaming" msgstr "неожиданный набор данных после конца передачи" -#: replication/libpqwalreceiver/libpqwalreceiver.c:470 +#: replication/libpqwalreceiver/libpqwalreceiver.c:469 +#, c-format +msgid "error while shutting down streaming COPY: %s" +msgstr "ошибка при остановке потоковой операции COPY: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:478 #, c-format msgid "error reading result of streaming command: %s" msgstr "ошибка при чтении результата команды передачи: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:478 -#: replication/libpqwalreceiver/libpqwalreceiver.c:688 +#: replication/libpqwalreceiver/libpqwalreceiver.c:486 +#: replication/libpqwalreceiver/libpqwalreceiver.c:714 #, c-format msgid "unexpected result after CommandComplete: %s" msgstr "неожиданный результат после CommandComplete: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:504 +#: replication/libpqwalreceiver/libpqwalreceiver.c:512 #, c-format msgid "could not receive timeline history file from the primary server: %s" msgstr "не удалось получить файл истории линии времени с главного сервера: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:516 +#: replication/libpqwalreceiver/libpqwalreceiver.c:524 #, c-format msgid "Expected 1 tuple with 2 fields, got %d tuples with %d fields." msgstr "Ожидался 1 кортеж с 2 полями, однако получено кортежей: %d, полей: %d." -#: replication/libpqwalreceiver/libpqwalreceiver.c:663 -#: replication/libpqwalreceiver/libpqwalreceiver.c:701 -#: replication/libpqwalreceiver/libpqwalreceiver.c:707 +#: replication/libpqwalreceiver/libpqwalreceiver.c:678 +#: replication/libpqwalreceiver/libpqwalreceiver.c:729 +#: replication/libpqwalreceiver/libpqwalreceiver.c:735 #, c-format msgid "could not receive data from WAL stream: %s" msgstr "не удалось извлечь данные из потока WAL: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:726 +#: replication/libpqwalreceiver/libpqwalreceiver.c:754 #, c-format msgid "could not send data to WAL stream: %s" msgstr "не удалось отправить данные в поток WAL: %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:775 +#: replication/libpqwalreceiver/libpqwalreceiver.c:803 #, c-format msgid "could not create replication slot \"%s\": %s" msgstr "не удалось создать слот репликации \"%s\": %s" -#: replication/libpqwalreceiver/libpqwalreceiver.c:809 +#: replication/libpqwalreceiver/libpqwalreceiver.c:837 #, c-format -msgid "invalid query responser" +msgid "invalid query response" msgstr "неверный ответ на запрос" -#: replication/libpqwalreceiver/libpqwalreceiver.c:810 +#: replication/libpqwalreceiver/libpqwalreceiver.c:838 #, c-format msgid "Expected %d fields, got %d fields." msgstr "Ожидалось полей: %d, получено: %d." -#: replication/libpqwalreceiver/libpqwalreceiver.c:880 +#: replication/libpqwalreceiver/libpqwalreceiver.c:907 #, c-format msgid "the query interface requires a database connection" msgstr "для интерфейса запросов требуется подключение к БД" -#: replication/libpqwalreceiver/libpqwalreceiver.c:911 +#: replication/libpqwalreceiver/libpqwalreceiver.c:938 msgid "empty query" msgstr "пустой запрос" -#: replication/logical/launcher.c:242 +#: replication/logical/launcher.c:298 #, c-format msgid "starting logical replication worker for subscription \"%s\"" msgstr "" "запускается процесс-обработчик логической репликации для подписки \"%s\"" -#: replication/logical/launcher.c:249 +#: replication/logical/launcher.c:305 #, c-format msgid "cannot start logical replication workers when max_replication_slots = 0" msgstr "" "нельзя запустить процессы-обработчики логической репликации при " "max_replication_slots = 0" -#: replication/logical/launcher.c:273 +#: replication/logical/launcher.c:385 #, c-format -msgid "out of logical replication workers slots" +msgid "out of logical replication worker slots" msgstr "недостаточно слотов для процессов логической репликации" -#: replication/logical/launcher.c:274 +#: replication/logical/launcher.c:386 #, c-format msgid "You might need to increase max_logical_replication_workers." msgstr "Возможно, следует увеличить параметр max_logical_replication_workers." -#: replication/logical/launcher.c:315 +#: replication/logical/launcher.c:440 #, c-format -msgid "out of background workers slots" +msgid "out of background worker slots" msgstr "недостаточно слотов для фоновых рабочих процессов" -#: replication/logical/launcher.c:316 +#: replication/logical/launcher.c:441 #, c-format msgid "You might need to increase max_worker_processes." msgstr "Возможно, следует увеличить параметр max_worker_processes." -#: replication/logical/launcher.c:457 +#: replication/logical/launcher.c:624 #, c-format -msgid "logical replication worker slot %d already used by another worker" -msgstr "слот обработчика логической репликации %d уже занят другим процессом" +msgid "logical replication worker slot %d is empty, cannot attach" +msgstr "" +"слот обработчика логической репликации %d пуст, подключиться к нему нельзя" -#: replication/logical/launcher.c:637 +#: replication/logical/launcher.c:633 #, c-format -msgid "logical replication launcher started" -msgstr "процесс запуска логической репликации запущен" +msgid "" +"logical replication worker slot %d is already used by another worker, cannot " +"attach" +msgstr "" +"слот обработчика логической репликации %d уже занят другим процессом, " +"подключиться к нему нельзя" -#: replication/logical/launcher.c:738 +#: replication/logical/launcher.c:885 #, c-format -msgid "logical replication launcher shutting down" -msgstr "процесс запуска логической репликации остановлен" +msgid "logical replication launcher started" +msgstr "процесс запуска логической репликации запущен" #: replication/logical/logical.c:83 #, c-format @@ -17908,18 +18091,18 @@ msgstr "для логического декодирования требует msgid "logical decoding cannot be used while in recovery" msgstr "логическое декодирование нельзя использовать в процессе восстановления" -#: replication/logical/logical.c:236 replication/logical/logical.c:348 +#: replication/logical/logical.c:243 replication/logical/logical.c:365 #, c-format msgid "cannot use physical replication slot for logical decoding" msgstr "" "физический слот репликации нельзя использовать для логического декодирования" -#: replication/logical/logical.c:241 replication/logical/logical.c:353 +#: replication/logical/logical.c:248 replication/logical/logical.c:370 #, c-format msgid "replication slot \"%s\" was not created in this database" msgstr "слот репликации \"%s\" создан не в этой базе данных" -#: replication/logical/logical.c:248 +#: replication/logical/logical.c:255 #, c-format msgid "" "cannot create logical replication slot in transaction that has performed " @@ -17927,24 +18110,24 @@ msgid "" msgstr "" "нельзя создать логический слот репликации в транзакции, осуществляющей запись" -#: replication/logical/logical.c:390 +#: replication/logical/logical.c:408 #, c-format msgid "starting logical decoding for slot \"%s\"" msgstr "начинается логическое декодирование для слота \"%s\"" -#: replication/logical/logical.c:392 +#: replication/logical/logical.c:410 #, c-format msgid "streaming transactions committing after %X/%X, reading WAL from %X/%X" msgstr "передача транзакций, фиксируемых после %X/%X, чтение WAL с %X/%X" -#: replication/logical/logical.c:527 +#: replication/logical/logical.c:557 #, c-format msgid "" "slot \"%s\", output plugin \"%s\", in the %s callback, associated LSN %X/%X" msgstr "" "слот \"%s\", модуль вывода \"%s\", в обработчике %s, связанный LSN: %X/%X" -#: replication/logical/logical.c:534 +#: replication/logical/logical.c:564 #, c-format msgid "slot \"%s\", output plugin \"%s\", in the %s callback" msgstr "слот \"%s\", модуль вывода \"%s\", в обработчике %s" @@ -17976,8 +18159,8 @@ msgstr "массив должен быть одномерным" msgid "array must not contain nulls" msgstr "массив не должен содержать элементы null" -#: replication/logical/logicalfuncs.c:222 utils/adt/json.c:2282 -#: utils/adt/jsonb.c:1357 +#: replication/logical/logicalfuncs.c:222 utils/adt/json.c:2246 +#: utils/adt/jsonb.c:1314 #, c-format msgid "array must have even number of elements" msgstr "в массиве должно быть чётное число элементов" @@ -17991,14 +18174,14 @@ msgstr "" "модуль вывода логического декодирования \"%s\" выдаёт двоичные данные, но " "функция \"%s\" ожидает текстовые" -#: replication/logical/origin.c:180 +#: replication/logical/origin.c:185 #, c-format msgid "only superusers can query or manipulate replication origins" msgstr "" "запрашивать или модифицировать источники репликации могут только " "суперпользователи" -#: replication/logical/origin.c:185 +#: replication/logical/origin.c:190 #, c-format msgid "" "cannot query or manipulate replication origin when max_replication_slots = 0" @@ -18006,54 +18189,64 @@ msgstr "" "запрашивать или модифицировать источники репликации при " "max_replication_slots = 0 нельзя" -#: replication/logical/origin.c:190 +#: replication/logical/origin.c:195 #, c-format msgid "cannot manipulate replication origins during recovery" msgstr "модифицировать источники репликации во время восстановления нельзя" -#: replication/logical/origin.c:314 +#: replication/logical/origin.c:230 +#, c-format +msgid "replication origin \"%s\" does not exist" +msgstr "источник репликации \"%s\" не существует" + +#: replication/logical/origin.c:321 #, c-format msgid "could not find free replication origin OID" msgstr "найти свободный OID для источника репликации не удалось" -#: replication/logical/origin.c:351 +#: replication/logical/origin.c:369 #, c-format msgid "could not drop replication origin with OID %d, in use by PID %d" msgstr "" "удалить источник репликации с OID %d нельзя, он используется процессом с PID " "%d" -#: replication/logical/origin.c:664 +#: replication/logical/origin.c:461 +#, c-format +msgid "replication origin with OID %u does not exist" +msgstr "источник репликации с OID %u не существует" + +#: replication/logical/origin.c:708 #, c-format msgid "replication checkpoint has wrong magic %u instead of %u" msgstr "" "контрольная точка репликации имеет неправильную сигнатуру (%u вместо %u)" -#: replication/logical/origin.c:696 +#: replication/logical/origin.c:740 #, c-format msgid "could not read file \"%s\": read %d of %zu" msgstr "не удалось прочитать файл \"%s\" (прочитано байт: %d из %zu)" -#: replication/logical/origin.c:705 +#: replication/logical/origin.c:749 #, c-format msgid "could not find free replication state, increase max_replication_slots" msgstr "" "не удалось найти свободную ячейку для состояния репликации, увеличьте " "max_replication_slots" -#: replication/logical/origin.c:723 +#: replication/logical/origin.c:767 #, c-format msgid "replication slot checkpoint has wrong checksum %u, expected %u" msgstr "" "неверная контрольная сумма файла контрольной точки для слота репликации (%u " "вместо %u)" -#: replication/logical/origin.c:847 +#: replication/logical/origin.c:891 #, c-format msgid "replication origin with OID %d is already active for PID %d" msgstr "источник репликации с OID %d уже занят процессом с PID %d" -#: replication/logical/origin.c:858 replication/logical/origin.c:1038 +#: replication/logical/origin.c:902 replication/logical/origin.c:1089 #, c-format msgid "" "could not find free replication state slot for replication origin with OID %u" @@ -18061,39 +18254,34 @@ msgstr "" "не удалось найти свободный слот состояния репликации для источника " "репликации с OID %u" -#: replication/logical/origin.c:860 replication/logical/origin.c:1040 -#: replication/slot.c:1437 +#: replication/logical/origin.c:904 replication/logical/origin.c:1091 +#: replication/slot.c:1509 #, c-format msgid "Increase max_replication_slots and try again." msgstr "Увеличьте параметр max_replication_slots и повторите попытку." -#: replication/logical/origin.c:997 +#: replication/logical/origin.c:1048 #, c-format msgid "cannot setup replication origin when one is already setup" msgstr "нельзя настроить источник репликации, когда он уже настроен" -#: replication/logical/origin.c:1026 +#: replication/logical/origin.c:1077 #, c-format msgid "replication identifier %d is already active for PID %d" msgstr "идентификатор репликации %d уже занят процессом с PID %d" -#: replication/logical/origin.c:1072 replication/logical/origin.c:1267 -#: replication/logical/origin.c:1287 +#: replication/logical/origin.c:1128 replication/logical/origin.c:1326 +#: replication/logical/origin.c:1346 #, c-format msgid "no replication origin is configured" msgstr "ни один источник репликации не настроен" -#: replication/logical/relation.c:266 +#: replication/logical/relation.c:259 #, c-format msgid "logical replication target relation \"%s.%s\" does not exist" msgstr "целевое отношение логической репликации \"%s.%s\" не существует" -#: replication/logical/relation.c:277 -#, c-format -msgid "logical replication target relation \"%s.%s\" is not a table" -msgstr "целевое отношение логической репликации \"%s.%s\" не является таблицей" - -#: replication/logical/relation.c:304 +#: replication/logical/relation.c:300 #, c-format msgid "" "logical replication target relation \"%s.%s\" is missing some replicated " @@ -18102,7 +18290,7 @@ msgstr "" "в целевом отношении логической репликации (\"%s.%s\") отсутствуют некоторые " "реплицируемые столбцы" -#: replication/logical/relation.c:343 +#: replication/logical/relation.c:340 #, c-format msgid "" "logical replication target relation \"%s.%s\" uses system columns in REPLICA " @@ -18111,39 +18299,39 @@ msgstr "" "в целевом отношении логической репликации (\"%s.%s\") в индексе REPLICA " "IDENTITY используются системные столбцы" -#: replication/logical/relation.c:459 +#: replication/logical/relation.c:456 #, c-format -msgid "builtin type %u not found" +msgid "built-in type %u not found" msgstr "встроенный тип %u не найден" -#: replication/logical/relation.c:460 +#: replication/logical/relation.c:457 #, c-format msgid "" -"This can be caused by having publisher with higher major version than " -"subscriber" +"This can be caused by having a publisher with a higher PostgreSQL major " +"version than the subscriber." msgstr "" "Это может быть вызвано тем, что на сервере публикации установлена более " -"новая основная версия, чем на подписчике" +"новая основная версия PostgreSQL, чем на подписчике." -#: replication/logical/relation.c:492 +#: replication/logical/relation.c:488 #, c-format msgid "data type \"%s.%s\" required for logical replication does not exist" msgstr "" "тип данных \"%s.%s\", требуемый для логической репликации, не существует" -#: replication/logical/reorderbuffer.c:2288 +#: replication/logical/reorderbuffer.c:2303 #, c-format msgid "could not write to data file for XID %u: %m" msgstr "не удалось записать в файл данных для XID %u: %m" -#: replication/logical/reorderbuffer.c:2387 -#: replication/logical/reorderbuffer.c:2409 +#: replication/logical/reorderbuffer.c:2402 +#: replication/logical/reorderbuffer.c:2424 #, c-format msgid "could not read from reorderbuffer spill file: %m" msgstr "не удалось прочитать из файла подкачки буфера пересортировки: %m" -#: replication/logical/reorderbuffer.c:2391 -#: replication/logical/reorderbuffer.c:2413 +#: replication/logical/reorderbuffer.c:2406 +#: replication/logical/reorderbuffer.c:2428 #, c-format msgid "" "could not read from reorderbuffer spill file: read %d instead of %u bytes" @@ -18151,19 +18339,19 @@ msgstr "" "не удалось прочитать из файла подкачки буфера пересортировки (прочитано " "байт: %d, требовалось: %u)" -#: replication/logical/reorderbuffer.c:3071 +#: replication/logical/reorderbuffer.c:3086 #, c-format msgid "could not read from file \"%s\": read %d instead of %d bytes" msgstr "" "не удалось прочитать из файла \"%s\" (прочитано байт: %d, требовалось: %d)" -#: replication/logical/snapbuild.c:564 +#: replication/logical/snapbuild.c:612 #, c-format msgid "initial slot snapshot too large" msgstr "изначальный снимок слота слишком большой" # skip-rule: capital-letter-first -#: replication/logical/snapbuild.c:616 +#: replication/logical/snapbuild.c:664 #, c-format msgid "exported logical decoding snapshot: \"%s\" with %u transaction ID" msgid_plural "" @@ -18175,110 +18363,117 @@ msgstr[1] "" msgstr[2] "" "экспортирован снимок логического декодирования: \"%s\" (ид. транзакций: %u)" -#: replication/logical/snapbuild.c:935 replication/logical/snapbuild.c:1300 -#: replication/logical/snapbuild.c:1843 +#: replication/logical/snapbuild.c:1262 replication/logical/snapbuild.c:1355 +#: replication/logical/snapbuild.c:1842 #, c-format msgid "logical decoding found consistent point at %X/%X" msgstr "процесс логического декодирования достиг точки согласованности в %X/%X" -#: replication/logical/snapbuild.c:937 -#, c-format -msgid "Transaction ID %u finished; no more running transactions." -msgstr "Транзакция %u завершена, больше активных транзакций нет." - -#: replication/logical/snapbuild.c:1302 +#: replication/logical/snapbuild.c:1264 #, c-format msgid "There are no running transactions." msgstr "Больше активных транзакций нет." -#: replication/logical/snapbuild.c:1364 +#: replication/logical/snapbuild.c:1306 #, c-format msgid "logical decoding found initial starting point at %X/%X" msgstr "" "процесс логического декодирования нашёл начальную стартовую точку в %X/%X" -#: replication/logical/snapbuild.c:1366 +#: replication/logical/snapbuild.c:1308 replication/logical/snapbuild.c:1332 +#, c-format +msgid "Waiting for transactions (approximately %d) older than %u to end." +msgstr "Ожидание транзакций (примерно %d), старее %u до конца." + +#: replication/logical/snapbuild.c:1330 +#, c-format +msgid "logical decoding found initial consistent point at %X/%X" +msgstr "" +"при логическом декодировании найдена начальная точка согласованности в %X/%X" + +#: replication/logical/snapbuild.c:1357 #, c-format -msgid "%u transaction needs to finish." -msgid_plural "%u transactions need to finish." -msgstr[0] "Необходимо дождаться завершения транзакций (%u)." -msgstr[1] "Необходимо дождаться завершения транзакций (%u)." -msgstr[2] "Необходимо дождаться завершения транзакций (%u)." +msgid "There are no old transactions anymore." +msgstr "Больше старых транзакций нет." -#: replication/logical/snapbuild.c:1711 replication/logical/snapbuild.c:1739 -#: replication/logical/snapbuild.c:1755 replication/logical/snapbuild.c:1771 +#: replication/logical/snapbuild.c:1715 replication/logical/snapbuild.c:1743 +#: replication/logical/snapbuild.c:1760 replication/logical/snapbuild.c:1776 #, c-format msgid "could not read file \"%s\", read %d of %d: %m" msgstr "не удалось прочитать файл \"%s\" (прочитано байт: %d из %d): %m" -#: replication/logical/snapbuild.c:1717 +#: replication/logical/snapbuild.c:1721 #, c-format msgid "snapbuild state file \"%s\" has wrong magic number: %u instead of %u" msgstr "" "файл состояния snapbuild \"%s\" имеет неправильную сигнатуру (%u вместо %u)" -#: replication/logical/snapbuild.c:1722 +#: replication/logical/snapbuild.c:1726 #, c-format msgid "snapbuild state file \"%s\" has unsupported version: %u instead of %u" msgstr "" "файл состояния snapbuild \"%s\" имеет неправильную версию (%u вместо %u)" -#: replication/logical/snapbuild.c:1784 +#: replication/logical/snapbuild.c:1789 #, c-format msgid "checksum mismatch for snapbuild state file \"%s\": is %u, should be %u" msgstr "" "в файле состояния snapbuild \"%s\" неверная контрольная сумма (%u вместо %u)" -#: replication/logical/snapbuild.c:1845 +#: replication/logical/snapbuild.c:1844 #, c-format msgid "Logical decoding will begin using saved snapshot." msgstr "Логическое декодирование начнётся с сохранённого снимка." -#: replication/logical/snapbuild.c:1918 +#: replication/logical/snapbuild.c:1916 #, c-format msgid "could not parse file name \"%s\"" msgstr "не удалось разобрать имя файла \"%s\"" -#: replication/logical/tablesync.c:128 +#: replication/logical/tablesync.c:138 #, c-format -msgid "logical replication synchronization worker finished processing" -msgstr "процесс синхронизации логической репликации закончил обработку" +msgid "" +"logical replication table synchronization worker for subscription \"%s\", " +"table \"%s\" has finished" +msgstr "" +"процесс синхронизации таблицы при логической репликации для подписки \"%s\", " +"таблицы \"%s\" закончил обработку" -#: replication/logical/tablesync.c:566 +#: replication/logical/tablesync.c:685 #, c-format msgid "could not fetch table info for table \"%s.%s\" from publisher: %s" msgstr "" "не удалось получить информацию о таблице \"%s.%s\" с сервера публикации: %s" -#: replication/logical/tablesync.c:572 +#: replication/logical/tablesync.c:691 #, c-format msgid "table \"%s.%s\" not found on publisher" msgstr "таблица \"%s.%s\" не найдена на сервере публикации" -#: replication/logical/tablesync.c:602 +#: replication/logical/tablesync.c:721 #, c-format msgid "could not fetch table info for table \"%s.%s\": %s" msgstr "не удалось получить информацию о таблице \"%s.%s\": %s" -#: replication/logical/tablesync.c:671 +#: replication/logical/tablesync.c:791 #, c-format msgid "could not start initial contents copy for table \"%s.%s\": %s" msgstr "" "не удалось начать копирование начального содержимого таблицы \"%s.%s\": %s" -#: replication/logical/tablesync.c:774 +#: replication/logical/tablesync.c:905 #, c-format msgid "table copy could not start transaction on publisher" msgstr "" "при копировании таблицы не удалось начать транзакцию на сервере публикации" -#: replication/logical/tablesync.c:794 +#: replication/logical/tablesync.c:927 #, c-format msgid "table copy could not finish transaction on publisher" msgstr "" "при копировании таблицы не удалось завершить транзакцию на сервере публикации" -#: replication/logical/worker.c:282 +#: replication/logical/worker.c:293 #, c-format msgid "" "processing remote data for replication target relation \"%s.%s\" column \"%s" @@ -18287,21 +18482,21 @@ msgstr "" "обработка внешних данных для целевого отношения репликации \"%s.%s\" столбца " "\"%s\", удалённый тип %s, локальный тип %s" -#: replication/logical/worker.c:483 +#: replication/logical/worker.c:506 #, c-format msgid "ORIGIN message sent out of order" msgstr "сообщение ORIGIN отправлено неуместно" -#: replication/logical/worker.c:614 +#: replication/logical/worker.c:637 #, c-format msgid "" -"publisher does not send replica identity column expected by the logical " +"publisher did not send replica identity column expected by the logical " "replication target relation \"%s.%s\"" msgstr "" "сервер публикации не передал столбец идентификации реплики, ожидаемый для " "целевого отношения логической репликации \"%s.%s\"" -#: replication/logical/worker.c:621 +#: replication/logical/worker.c:644 #, c-format msgid "" "logical replication target relation \"%s.%s\" has neither REPLICA IDENTITY " @@ -18312,140 +18507,161 @@ msgstr "" "IDENTITY, ни ключа PRIMARY KEY, и публикуемое отношение не имеет " "характеристики REPLICA IDENTITY FULL" -#: replication/logical/worker.c:828 +#: replication/logical/worker.c:851 #, c-format msgid "" -"logical replication could not find row for delete in replication target %s" +"logical replication could not find row for delete in replication target " +"relation \"%s\"" msgstr "" "при логической репликации не удалось найти строку для удаления в целевом " -"отношении репликации %s" +"отношении репликации \"%s\"" -#: replication/logical/worker.c:895 +#: replication/logical/worker.c:918 #, c-format -msgid "invalid logical replication message type %c" -msgstr "неверный тип сообщения логической репликации %c" +msgid "invalid logical replication message type \"%c\"" +msgstr "неверный тип сообщения логической репликации \"%c\"" -#: replication/logical/worker.c:1032 +#: replication/logical/worker.c:1059 #, c-format msgid "data stream from publisher has ended" msgstr "поток данных с сервера публикации закончился" -#: replication/logical/worker.c:1168 +#: replication/logical/worker.c:1218 #, c-format msgid "terminating logical replication worker due to timeout" msgstr "завершение обработчика логической репликации из-за таймаута" -#: replication/logical/worker.c:1315 +#: replication/logical/worker.c:1366 #, c-format msgid "" -"logical replication worker for subscription \"%s\" will stop because the " -"subscription was removed" +"logical replication apply worker for subscription \"%s\" will stop because " +"the subscription was removed" msgstr "" -"процесс-обработчик логической репликации для подписки \"%s\" будет " +"применяющий процесс логической репликации для подписки \"%s\" будет " "остановлен, так как подписка была удалена" -#: replication/logical/worker.c:1330 +#: replication/logical/worker.c:1380 #, c-format msgid "" -"logical replication worker for subscription \"%s\" will restart because the " -"connection information was changed" +"logical replication apply worker for subscription \"%s\" will stop because " +"the subscription was disabled" msgstr "" -"процесс-обработчик логической репликации для подписки \"%s\" будет " +"применяющий процесс логической репликации для подписки \"%s\" будет " +"остановлен, так как подписка была отключена" + +#: replication/logical/worker.c:1394 +#, c-format +msgid "" +"logical replication apply worker for subscription \"%s\" will restart " +"because the connection information was changed" +msgstr "" +"применяющий процесс логической репликации для подписки \"%s\" будет " "перезапущен из-за изменения информации о подключении" -#: replication/logical/worker.c:1345 +#: replication/logical/worker.c:1408 #, c-format msgid "" -"logical replication worker for subscription \"%s\" will restart because " -"subscription was renamed" +"logical replication apply worker for subscription \"%s\" will restart " +"because subscription was renamed" msgstr "" -"процесс-обработчик логической репликации для подписки \"%s\" будет " +"применяющий процесс логической репликации для подписки \"%s\" будет " "перезапущен, так как подписка была переименована" -#: replication/logical/worker.c:1360 +#: replication/logical/worker.c:1425 #, c-format msgid "" -"logical replication worker for subscription \"%s\" will restart because the " -"replication slot name was changed" +"logical replication apply worker for subscription \"%s\" will restart " +"because the replication slot name was changed" msgstr "" -"процесс-обработчик логической репликации для подписки \"%s\" будет " +"применяющий процесс логической репликации для подписки \"%s\" будет " "перезапущен, так как было изменено имя слота репликации" -#: replication/logical/worker.c:1375 +#: replication/logical/worker.c:1439 #, c-format msgid "" -"logical replication worker for subscription \"%s\" will restart because " -"subscription's publications were changed" +"logical replication apply worker for subscription \"%s\" will restart " +"because subscription's publications were changed" msgstr "" -"процесс-обработчик логической репликации для подписки \"%s\" будет " +"применяющий процесс логической репликации для подписки \"%s\" будет " "перезапущен из-за изменения публикаций подписки" -#: replication/logical/worker.c:1391 +#: replication/logical/worker.c:1547 #, c-format msgid "" -"logical replication worker for subscription \"%s\" will stop because the " -"subscription was disabled" +"logical replication apply worker for subscription \"%s\" will not start " +"because the subscription was disabled during startup" msgstr "" -"процесс-обработчик логической репликации для подписки \"%s\" будет " -"остановлен, так как подписка была отключена" +"применяющий процесс логической репликации для подписки \"%s\" не будет " +"запущен, так как подписка была отключена при старте" -#: replication/logical/worker.c:1483 +#: replication/logical/worker.c:1561 #, c-format msgid "" -"logical replication worker for subscription \"%s\" will not start because " -"the subscription was disabled during startup" +"logical replication table synchronization worker for subscription \"%s\", " +"table \"%s\" has started" msgstr "" -"процесс-обработчик логической репликации для подписки \"%s\" не будет " -"запущен, так как подписка была отключена при старте" +"процесс синхронизации таблицы при логической репликации для подписки \"%s\", " +"таблицы \"%s\" запущен" -#: replication/pgoutput/pgoutput.c:113 +#: replication/logical/worker.c:1565 +#, c-format +msgid "logical replication apply worker for subscription \"%s\" has started" +msgstr "" +"запускается применяющий процесс логической репликации для подписки \"%s\"" + +#: replication/logical/worker.c:1605 +#, c-format +msgid "subscription has no replication slot set" +msgstr "для подписки не задан слот репликации" + +#: replication/pgoutput/pgoutput.c:114 #, c-format msgid "invalid proto_version" msgstr "неверное значение proto_version" -#: replication/pgoutput/pgoutput.c:118 +#: replication/pgoutput/pgoutput.c:119 #, c-format -msgid "proto_verson \"%s\" out of range" +msgid "proto_version \"%s\" out of range" msgstr "значение proto_verson \"%s\" вне диапазона" -#: replication/pgoutput/pgoutput.c:135 +#: replication/pgoutput/pgoutput.c:136 #, c-format msgid "invalid publication_names syntax" msgstr "неверный синтаксис publication_names" -#: replication/pgoutput/pgoutput.c:179 +#: replication/pgoutput/pgoutput.c:180 #, c-format msgid "client sent proto_version=%d but we only support protocol %d or lower" msgstr "" "клиент передал proto_version=%d, но мы поддерживаем только протокол %d и ниже" -#: replication/pgoutput/pgoutput.c:185 +#: replication/pgoutput/pgoutput.c:186 #, c-format msgid "client sent proto_version=%d but we only support protocol %d or higher" msgstr "" "клиент передал proto_version=%d, но мы поддерживает только протокол %d и выше" -#: replication/pgoutput/pgoutput.c:191 +#: replication/pgoutput/pgoutput.c:192 #, c-format msgid "publication_names parameter missing" msgstr "отсутствует параметр publication_names" -#: replication/slot.c:181 +#: replication/slot.c:182 #, c-format msgid "replication slot name \"%s\" is too short" msgstr "имя слота репликации \"%s\" слишком короткое" -#: replication/slot.c:190 +#: replication/slot.c:191 #, c-format msgid "replication slot name \"%s\" is too long" msgstr "имя слота репликации \"%s\" слишком длинное" -#: replication/slot.c:203 +#: replication/slot.c:204 #, c-format msgid "replication slot name \"%s\" contains invalid character" msgstr "имя слота репликации \"%s\" содержит недопустимый символ" -#: replication/slot.c:205 +#: replication/slot.c:206 #, c-format msgid "" "Replication slot names may only contain lower case letters, numbers, and the " @@ -18454,80 +18670,80 @@ msgstr "" "Имя слота репликации может содержать только буквы в нижнем регистре, цифры и " "знак подчёркивания." -#: replication/slot.c:252 +#: replication/slot.c:253 #, c-format msgid "replication slot \"%s\" already exists" msgstr "слот репликации \"%s\" уже существует" -#: replication/slot.c:262 +#: replication/slot.c:263 #, c-format msgid "all replication slots are in use" msgstr "используются все слоты репликации" -#: replication/slot.c:263 +#: replication/slot.c:264 #, c-format msgid "Free one or increase max_replication_slots." msgstr "Освободите ненужные или увеличьте параметр max_replication_slots." -#: replication/slot.c:359 +#: replication/slot.c:379 #, c-format msgid "replication slot \"%s\" does not exist" msgstr "слот репликации \"%s\" не существует" -#: replication/slot.c:363 +#: replication/slot.c:390 replication/slot.c:940 #, c-format msgid "replication slot \"%s\" is active for PID %d" msgstr "слот репликации \"%s\" занят процессом с PID %d" -#: replication/slot.c:549 replication/slot.c:1049 replication/slot.c:1398 +#: replication/slot.c:624 replication/slot.c:1121 replication/slot.c:1470 #, c-format msgid "could not remove directory \"%s\"" msgstr "ошибка при удалении каталога \"%s\"" -#: replication/slot.c:898 +#: replication/slot.c:970 #, c-format msgid "replication slots can only be used if max_replication_slots > 0" msgstr "" "слоты репликации можно использовать, только если max_replication_slots > 0" -#: replication/slot.c:903 +#: replication/slot.c:975 #, c-format msgid "replication slots can only be used if wal_level >= replica" msgstr "слоты репликации можно использовать, только если wal_level >= replica" -#: replication/slot.c:1328 replication/slot.c:1368 +#: replication/slot.c:1400 replication/slot.c:1440 #, c-format msgid "could not read file \"%s\", read %d of %u: %m" msgstr "не удалось прочитать файл \"%s\" (прочитано байт: %d из %u): %m" -#: replication/slot.c:1337 +#: replication/slot.c:1409 #, c-format msgid "replication slot file \"%s\" has wrong magic number: %u instead of %u" msgstr "" "файл слота репликации \"%s\" имеет неправильную сигнатуру (%u вместо %u)" -#: replication/slot.c:1344 +#: replication/slot.c:1416 #, c-format msgid "replication slot file \"%s\" has unsupported version %u" msgstr "файл состояния snapbuild \"%s\" имеет неподдерживаемую версию %u" -#: replication/slot.c:1351 +#: replication/slot.c:1423 #, c-format msgid "replication slot file \"%s\" has corrupted length %u" msgstr "у файла слота репликации \"%s\" неверная длина: %u" -#: replication/slot.c:1383 +#: replication/slot.c:1455 #, c-format msgid "checksum mismatch for replication slot file \"%s\": is %u, should be %u" msgstr "" "в файле слота репликации \"%s\" неверная контрольная сумма (%u вместо %u)" -#: replication/slot.c:1436 +#: replication/slot.c:1508 #, c-format msgid "too many replication slots active before shutdown" msgstr "перед завершением активно слишком много слотов репликации" -#: replication/syncrep.c:244 +#: replication/syncrep.c:248 #, c-format msgid "" "canceling the wait for synchronous replication and terminating connection " @@ -18536,7 +18752,7 @@ msgstr "" "отмена ожидания синхронной репликации и закрытие соединения по команде " "администратора" -#: replication/syncrep.c:245 replication/syncrep.c:262 +#: replication/syncrep.c:249 replication/syncrep.c:266 #, c-format msgid "" "The transaction has already committed locally, but might not have been " @@ -18545,35 +18761,35 @@ msgstr "" "Транзакция уже была зафиксирована локально, но возможно не была " "реплицирована на резервный сервер." -#: replication/syncrep.c:261 +#: replication/syncrep.c:265 #, c-format msgid "canceling wait for synchronous replication due to user request" msgstr "отмена ожидания синхронной репликации по запросу пользователя" -#: replication/syncrep.c:392 +#: replication/syncrep.c:399 #, c-format msgid "standby \"%s\" now has synchronous standby priority %u" msgstr "" "резервный сервер \"%s\" теперь имеет приоритет синхронной репликации %u" -#: replication/syncrep.c:453 +#: replication/syncrep.c:460 #, c-format msgid "standby \"%s\" is now a synchronous standby with priority %u" msgstr "резервный сервер \"%s\" стал синхронным с приоритетом %u" -#: replication/syncrep.c:457 +#: replication/syncrep.c:464 #, c-format msgid "standby \"%s\" is now a candidate for quorum synchronous standby" msgstr "" "резервный сервер \"%s\" стал кандидатом для включения в кворум синхронных " "резервных" -#: replication/syncrep.c:1120 +#: replication/syncrep.c:1162 #, c-format msgid "synchronous_standby_names parser failed" msgstr "ошибка при разборе synchronous_standby_names" -#: replication/syncrep.c:1126 +#: replication/syncrep.c:1168 #, c-format msgid "number of synchronous standbys (%d) must be greater than zero" msgstr "число синхронных резервных серверов (%d) должно быть больше нуля" @@ -18583,110 +18799,110 @@ msgstr "число синхронных резервных серверов (%d) msgid "terminating walreceiver process due to administrator command" msgstr "завершение процесса считывания журнала по команде администратора" -#: replication/walreceiver.c:301 +#: replication/walreceiver.c:306 #, c-format msgid "could not connect to the primary server: %s" msgstr "не удалось подключиться к главному серверу: %s" -#: replication/walreceiver.c:340 +#: replication/walreceiver.c:345 #, c-format msgid "database system identifier differs between the primary and standby" msgstr "идентификаторы СУБД на главном и резервном серверах различаются" -#: replication/walreceiver.c:341 +#: replication/walreceiver.c:346 #, c-format msgid "The primary's identifier is %s, the standby's identifier is %s." msgstr "Идентификатор на главном сервере: %s, на резервном: %s." -#: replication/walreceiver.c:352 +#: replication/walreceiver.c:357 #, c-format msgid "highest timeline %u of the primary is behind recovery timeline %u" msgstr "" "последняя линия времени %u на главном сервере отстаёт от восстанавливаемой " "линии времени %u" -#: replication/walreceiver.c:388 +#: replication/walreceiver.c:393 #, c-format msgid "started streaming WAL from primary at %X/%X on timeline %u" msgstr "" "начало передачи журнала с главного сервера, с позиции %X/%X на линии времени " "%u" -#: replication/walreceiver.c:393 +#: replication/walreceiver.c:398 #, c-format msgid "restarted WAL streaming at %X/%X on timeline %u" msgstr "перезапуск передачи журнала с позиции %X/%X на линии времени %u" -#: replication/walreceiver.c:422 +#: replication/walreceiver.c:427 #, c-format msgid "cannot continue WAL streaming, recovery has already ended" msgstr "продолжить передачу WAL нельзя, восстановление уже окончено" -#: replication/walreceiver.c:459 +#: replication/walreceiver.c:464 #, c-format msgid "replication terminated by primary server" msgstr "репликация прекращена главным сервером" -#: replication/walreceiver.c:460 +#: replication/walreceiver.c:465 #, c-format msgid "End of WAL reached on timeline %u at %X/%X." msgstr "На линии времени %u в %X/%X достигнут конец журнала." -#: replication/walreceiver.c:555 +#: replication/walreceiver.c:560 #, c-format msgid "terminating walreceiver due to timeout" msgstr "завершение приёма журнала из-за таймаута" -#: replication/walreceiver.c:595 +#: replication/walreceiver.c:600 #, c-format msgid "primary server contains no more WAL on requested timeline %u" msgstr "" "на главном сервере больше нет журналов для запрошенной линии времени %u" -#: replication/walreceiver.c:610 replication/walreceiver.c:969 +#: replication/walreceiver.c:615 replication/walreceiver.c:974 #, c-format msgid "could not close log segment %s: %m" msgstr "не удалось закрыть сегмент журнала %s: %m" -#: replication/walreceiver.c:735 +#: replication/walreceiver.c:740 #, c-format msgid "fetching timeline history file for timeline %u from primary server" msgstr "загрузка файла истории для линии времени %u с главного сервера" -#: replication/walreceiver.c:1023 +#: replication/walreceiver.c:1028 #, c-format msgid "could not write to log segment %s at offset %u, length %lu: %m" msgstr "не удалось записать в сегмент журнала %s (смещение %u, длина %lu): %m" -#: replication/walsender.c:482 +#: replication/walsender.c:491 #, c-format msgid "could not seek to beginning of file \"%s\": %m" msgstr "не удалось перейти к началу файла \"%s\": %m" -#: replication/walsender.c:523 +#: replication/walsender.c:532 #, c-format msgid "IDENTIFY_SYSTEM has not been run before START_REPLICATION" msgstr "Команда IDENTIFY_SYSTEM не выполнялась до START_REPLICATION" -#: replication/walsender.c:540 +#: replication/walsender.c:549 #, c-format msgid "cannot use a logical replication slot for physical replication" msgstr "" "логический слот репликации нельзя использовать для физической репликации" -#: replication/walsender.c:603 +#: replication/walsender.c:612 #, c-format msgid "" "requested starting point %X/%X on timeline %u is not in this server's history" msgstr "" "в истории сервера нет запрошенной начальной точки %X/%X на линии времени %u" -#: replication/walsender.c:607 +#: replication/walsender.c:616 #, c-format msgid "This server's history forked from timeline %u at %X/%X." msgstr "История этого сервера ответвилась от линии времени %u в %X/%X." -#: replication/walsender.c:652 +#: replication/walsender.c:661 #, c-format msgid "" "requested starting point %X/%X is ahead of the WAL flush position of this " @@ -18695,7 +18911,7 @@ msgstr "" "запрошенная начальная точка %X/%X впереди позиции сброшенных данных журнала " "на этом сервере (%X/%X)" -#: replication/walsender.c:885 +#: replication/walsender.c:890 #, c-format msgid "" "CREATE_REPLICATION_SLOT ... EXPORT_SNAPSHOT must not be called inside a " @@ -18704,7 +18920,7 @@ msgstr "" "Команда CREATE_REPLICATION_SLOT ... EXPORT_SNAPSHOT не должна вызываться " "внутри транзакции" -#: replication/walsender.c:892 +#: replication/walsender.c:899 #, c-format msgid "" "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called inside a transaction" @@ -18712,7 +18928,7 @@ msgstr "" "Команда CREATE_REPLICATION_SLOT ... USE_SNAPSHOT должна вызываться внутри " "транзакции" -#: replication/walsender.c:897 +#: replication/walsender.c:904 #, c-format msgid "" "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called in REPEATABLE READ " @@ -18721,7 +18937,7 @@ msgstr "" "Команда CREATE_REPLICATION_SLOT ... USE_SNAPSHOT должна вызываться в " "транзакции уровня изоляции REPEATABLE READ" -#: replication/walsender.c:902 +#: replication/walsender.c:909 #, c-format msgid "" "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called before any query" @@ -18729,7 +18945,7 @@ msgstr "" "Команда CREATE_REPLICATION_SLOT ... USE_SNAPSHOT должна вызываться перед " "любыми другими запросами" -#: replication/walsender.c:907 +#: replication/walsender.c:914 #, c-format msgid "" "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must not be called in a " @@ -18738,19 +18954,26 @@ msgstr "" "Команда CREATE_REPLICATION_SLOT ... USE_SNAPSHOT не должна вызываться в " "подтранзакции" -#: replication/walsender.c:1050 +#: replication/walsender.c:1060 #, c-format msgid "terminating walsender process after promotion" msgstr "завершение процесса передачи журнала после повышения" -#: replication/walsender.c:1377 +#: replication/walsender.c:1446 +#, c-format +msgid "cannot execute new commands while WAL sender is in stopping mode" +msgstr "" +"нельзя выполнять новые команды, пока процесс передачи WAL находится в режиме " +"остановки" + +#: replication/walsender.c:1479 #, c-format msgid "received replication command: %s" msgstr "получена команда репликации: %s" -#: replication/walsender.c:1416 tcop/fastpath.c:291 tcop/postgres.c:1003 -#: tcop/postgres.c:1313 tcop/postgres.c:1572 tcop/postgres.c:1977 -#: tcop/postgres.c:2345 tcop/postgres.c:2420 +#: replication/walsender.c:1495 tcop/fastpath.c:281 tcop/postgres.c:997 +#: tcop/postgres.c:1307 tcop/postgres.c:1566 tcop/postgres.c:1971 +#: tcop/postgres.c:2339 tcop/postgres.c:2414 #, c-format msgid "" "current transaction is aborted, commands ignored until end of transaction " @@ -18758,43 +18981,45 @@ msgid "" msgstr "" "текущая транзакция прервана, команды до конца блока транзакции игнорируются" -#: replication/walsender.c:1478 +#: replication/walsender.c:1560 #, c-format -msgid "not connected to database" -msgstr "нет подключения к базе данных" +msgid "cannot execute SQL commands in WAL sender for physical replication" +msgstr "" +"нельзя выполнять команды SQL в процессе, передающем WAL для физической " +"репликации" -#: replication/walsender.c:1518 replication/walsender.c:1534 +#: replication/walsender.c:1606 replication/walsender.c:1622 #, c-format msgid "unexpected EOF on standby connection" msgstr "неожиданный обрыв соединения с резервным сервером" -#: replication/walsender.c:1548 +#: replication/walsender.c:1636 #, c-format msgid "unexpected standby message type \"%c\", after receiving CopyDone" msgstr "" "после CopyDone резервный сервер передал сообщение неожиданного типа \"%c\"" -#: replication/walsender.c:1586 +#: replication/walsender.c:1674 #, c-format msgid "invalid standby message type \"%c\"" msgstr "неверный тип сообщения резервного сервера: \"%c\"" -#: replication/walsender.c:1627 +#: replication/walsender.c:1715 #, c-format msgid "unexpected message type \"%c\"" msgstr "неожиданный тип сообщения \"%c\"" -#: replication/walsender.c:1997 +#: replication/walsender.c:2085 #, c-format msgid "terminating walsender process due to replication timeout" msgstr "завершение процесса передачи журнала из-за таймаута репликации" -#: replication/walsender.c:2085 +#: replication/walsender.c:2171 #, c-format msgid "standby \"%s\" has now caught up with primary" msgstr "резервный сервер \"%s\" нагнал главный" -#: replication/walsender.c:2192 +#: replication/walsender.c:2278 #, c-format msgid "" "number of requested standby connections exceeds max_wal_senders (currently " @@ -18803,7 +19028,7 @@ msgstr "" "число запрошенных подключений резервных серверов превосходит max_wal_senders " "(сейчас: %d)" -#: rewrite/rewriteDefine.c:112 rewrite/rewriteDefine.c:967 +#: rewrite/rewriteDefine.c:112 rewrite/rewriteDefine.c:981 #, c-format msgid "rule \"%s\" for relation \"%s\" already exists" msgstr "правило \"%s\" для отношения \"%s\" уже существует" @@ -18869,42 +19094,52 @@ msgstr "\"%s\" уже является представлением" msgid "view rule for \"%s\" must be named \"%s\"" msgstr "правило представления для \"%s\" должно называться \"%s\"" -#: rewrite/rewriteDefine.c:430 +#: rewrite/rewriteDefine.c:428 +#, c-format +msgid "cannot convert partitioned table \"%s\" to a view" +msgstr "преобразовать секционированную таблицу \"%s\" в представление нельзя" + +#: rewrite/rewriteDefine.c:434 +#, c-format +msgid "cannot convert partition \"%s\" to a view" +msgstr "преобразовать секцию \"%s\" в представление нельзя" + +#: rewrite/rewriteDefine.c:442 #, c-format msgid "could not convert table \"%s\" to a view because it is not empty" msgstr "" "не удалось преобразовать таблицу \"%s\" в представление, так как она не " "пуста1" -#: rewrite/rewriteDefine.c:438 +#: rewrite/rewriteDefine.c:450 #, c-format msgid "could not convert table \"%s\" to a view because it has triggers" msgstr "" "не удалось преобразовать таблицу \"%s\" в представление, так как она " "содержит триггеры" -#: rewrite/rewriteDefine.c:440 +#: rewrite/rewriteDefine.c:452 #, c-format msgid "" "In particular, the table cannot be involved in any foreign key relationships." msgstr "" "Кроме того, таблица не может быть задействована в ссылках по внешнему ключу." -#: rewrite/rewriteDefine.c:445 +#: rewrite/rewriteDefine.c:457 #, c-format msgid "could not convert table \"%s\" to a view because it has indexes" msgstr "" "не удалось преобразовать таблицу \"%s\" в представление, так как она имеет " "индексы" -#: rewrite/rewriteDefine.c:451 +#: rewrite/rewriteDefine.c:463 #, c-format msgid "could not convert table \"%s\" to a view because it has child tables" msgstr "" "не удалось преобразовать таблицу \"%s\" в представление, так как она имеет " "подчинённые таблицы" -#: rewrite/rewriteDefine.c:457 +#: rewrite/rewriteDefine.c:469 #, c-format msgid "" "could not convert table \"%s\" to a view because it has row security enabled" @@ -18912,7 +19147,7 @@ msgstr "" "не удалось преобразовать таблицу \"%s\" в представление, так как для неё " "включена защита на уровне строк" -#: rewrite/rewriteDefine.c:463 +#: rewrite/rewriteDefine.c:475 #, c-format msgid "" "could not convert table \"%s\" to a view because it has row security policies" @@ -18920,45 +19155,45 @@ msgstr "" "не удалось преобразовать таблицу \"%s\" в представление, так как к ней " "применены политики защиты строк" -#: rewrite/rewriteDefine.c:490 +#: rewrite/rewriteDefine.c:502 #, c-format msgid "cannot have multiple RETURNING lists in a rule" msgstr "в правиле нельзя указать несколько списков RETURNING" -#: rewrite/rewriteDefine.c:495 +#: rewrite/rewriteDefine.c:507 #, c-format msgid "RETURNING lists are not supported in conditional rules" msgstr "списки RETURNING в условных правилах не поддерживаются" -#: rewrite/rewriteDefine.c:499 +#: rewrite/rewriteDefine.c:511 #, c-format msgid "RETURNING lists are not supported in non-INSTEAD rules" msgstr "списки RETURNING поддерживаются только в правилах INSTEAD" -#: rewrite/rewriteDefine.c:664 +#: rewrite/rewriteDefine.c:676 #, c-format msgid "SELECT rule's target list has too many entries" msgstr "список результата правила для SELECT содержит слишком много столбцов" -#: rewrite/rewriteDefine.c:665 +#: rewrite/rewriteDefine.c:677 #, c-format msgid "RETURNING list has too many entries" msgstr "список RETURNING содержит слишком много столбцов" -#: rewrite/rewriteDefine.c:692 +#: rewrite/rewriteDefine.c:704 #, c-format msgid "cannot convert relation containing dropped columns to view" msgstr "" "преобразовать отношение, содержащее удалённые столбцы, в представление нельзя" -#: rewrite/rewriteDefine.c:693 +#: rewrite/rewriteDefine.c:705 #, c-format msgid "" "cannot create a RETURNING list for a relation containing dropped columns" msgstr "" "создать список RETURNING для отношения, содержащего удалённые столбцы, нельзя" -#: rewrite/rewriteDefine.c:699 +#: rewrite/rewriteDefine.c:711 #, c-format msgid "" "SELECT rule's target entry %d has different column name from column \"%s\"" @@ -18966,62 +19201,62 @@ msgstr "" "элементу %d результата правила для SELECT присвоено имя, отличное от имени " "столбца \"%s\"" -#: rewrite/rewriteDefine.c:701 +#: rewrite/rewriteDefine.c:713 #, c-format msgid "SELECT target entry is named \"%s\"." msgstr "Имя элемента результата SELECT: \"%s\"." -#: rewrite/rewriteDefine.c:710 +#: rewrite/rewriteDefine.c:722 #, c-format msgid "SELECT rule's target entry %d has different type from column \"%s\"" msgstr "" "элемент %d результата правила для SELECT имеет тип, отличный от типа столбца " "\"%s\"" -#: rewrite/rewriteDefine.c:712 +#: rewrite/rewriteDefine.c:724 #, c-format msgid "RETURNING list's entry %d has different type from column \"%s\"" msgstr "элемент %d списка RETURNING имеет тип, отличный от типа столбца \"%s\"" -#: rewrite/rewriteDefine.c:715 rewrite/rewriteDefine.c:739 +#: rewrite/rewriteDefine.c:727 rewrite/rewriteDefine.c:751 #, c-format msgid "SELECT target entry has type %s, but column has type %s." msgstr "Элемент результата SELECT имеет тип %s, тогда как тип столбца - %s." -#: rewrite/rewriteDefine.c:718 rewrite/rewriteDefine.c:743 +#: rewrite/rewriteDefine.c:730 rewrite/rewriteDefine.c:755 #, c-format msgid "RETURNING list entry has type %s, but column has type %s." msgstr "Элемент списка RETURNING имеет тип %s, тогда как тип столбца - %s." -#: rewrite/rewriteDefine.c:734 +#: rewrite/rewriteDefine.c:746 #, c-format msgid "SELECT rule's target entry %d has different size from column \"%s\"" msgstr "" "элемент %d результата правила для SELECT имеет размер, отличный от столбца " "\"%s\"" -#: rewrite/rewriteDefine.c:736 +#: rewrite/rewriteDefine.c:748 #, c-format msgid "RETURNING list's entry %d has different size from column \"%s\"" msgstr "элемент %d списка RETURNING имеет размер, отличный от столбца \"%s\"" -#: rewrite/rewriteDefine.c:753 +#: rewrite/rewriteDefine.c:765 #, c-format msgid "SELECT rule's target list has too few entries" msgstr "список результата правила для SELECT содержит недостаточно элементов" -#: rewrite/rewriteDefine.c:754 +#: rewrite/rewriteDefine.c:766 #, c-format msgid "RETURNING list has too few entries" msgstr "список RETURNING содержит недостаточно элементов" -#: rewrite/rewriteDefine.c:846 rewrite/rewriteDefine.c:958 +#: rewrite/rewriteDefine.c:858 rewrite/rewriteDefine.c:972 #: rewrite/rewriteSupport.c:109 #, c-format msgid "rule \"%s\" for relation \"%s\" does not exist" msgstr "правило \"%s\" для отношения\"%s\" не существует" -#: rewrite/rewriteDefine.c:977 +#: rewrite/rewriteDefine.c:991 #, c-format msgid "renaming an ON SELECT rule is not allowed" msgstr "переименовывать правило ON SELECT нельзя" @@ -19040,87 +19275,108 @@ msgstr "" msgid "cannot have RETURNING lists in multiple rules" msgstr "RETURNING можно определить только для одного правила" -#: rewrite/rewriteHandler.c:941 rewrite/rewriteHandler.c:959 +#: rewrite/rewriteHandler.c:823 +#, c-format +msgid "cannot insert into column \"%s\"" +msgstr "вставить данные в столбец \"%s\" нельзя" + +#: rewrite/rewriteHandler.c:824 rewrite/rewriteHandler.c:839 +#, c-format +msgid "Column \"%s\" is an identity column defined as GENERATED ALWAYS." +msgstr "" +"Столбец \"%s\" является столбцом идентификации со свойством GENERATED ALWAYS." + +#: rewrite/rewriteHandler.c:826 +#, c-format +msgid "Use OVERRIDING SYSTEM VALUE to override." +msgstr "Для переопределения укажите OVERRIDING SYSTEM VALUE." + +#: rewrite/rewriteHandler.c:838 +#, c-format +msgid "column \"%s\" can only be updated to DEFAULT" +msgstr "столбцу \"%s\" можно присвоить только значение DEFAULT" + +#: rewrite/rewriteHandler.c:1000 rewrite/rewriteHandler.c:1018 #, c-format msgid "multiple assignments to same column \"%s\"" msgstr "многочисленные присвоения одному столбцу \"%s\"" -#: rewrite/rewriteHandler.c:1735 rewrite/rewriteHandler.c:3349 +#: rewrite/rewriteHandler.c:1818 rewrite/rewriteHandler.c:3419 #, c-format msgid "infinite recursion detected in rules for relation \"%s\"" msgstr "обнаружена бесконечная рекурсия в правилах для отношения \"%s\"" -#: rewrite/rewriteHandler.c:1820 +#: rewrite/rewriteHandler.c:1904 #, c-format msgid "infinite recursion detected in policy for relation \"%s\"" msgstr "обнаружена бесконечная рекурсия в политике для отношения \"%s\"" -#: rewrite/rewriteHandler.c:2137 +#: rewrite/rewriteHandler.c:2221 msgid "Junk view columns are not updatable." msgstr "Утилизируемые столбцы представлений не обновляются." -#: rewrite/rewriteHandler.c:2142 +#: rewrite/rewriteHandler.c:2226 msgid "" "View columns that are not columns of their base relation are not updatable." msgstr "" "Столбцы представлений, не являющиеся столбцами базовых отношений, не " "обновляются." -#: rewrite/rewriteHandler.c:2145 +#: rewrite/rewriteHandler.c:2229 msgid "View columns that refer to system columns are not updatable." msgstr "" "Столбцы представлений, ссылающиеся на системные столбцы, не обновляются." -#: rewrite/rewriteHandler.c:2148 +#: rewrite/rewriteHandler.c:2232 msgid "View columns that return whole-row references are not updatable." msgstr "" "Столбцы представлений, возвращающие ссылки на всю строку, не обновляются." -#: rewrite/rewriteHandler.c:2206 +#: rewrite/rewriteHandler.c:2290 msgid "Views containing DISTINCT are not automatically updatable." msgstr "Представления с DISTINCT не обновляются автоматически." -#: rewrite/rewriteHandler.c:2209 +#: rewrite/rewriteHandler.c:2293 msgid "Views containing GROUP BY are not automatically updatable." msgstr "Представления с GROUP BY не обновляются автоматически." -#: rewrite/rewriteHandler.c:2212 +#: rewrite/rewriteHandler.c:2296 msgid "Views containing HAVING are not automatically updatable." msgstr "Представления с HAVING не обновляются автоматически." -#: rewrite/rewriteHandler.c:2215 +#: rewrite/rewriteHandler.c:2299 msgid "" "Views containing UNION, INTERSECT, or EXCEPT are not automatically updatable." msgstr "" "Представления с UNION, INTERSECT или EXCEPT не обновляются автоматически." -#: rewrite/rewriteHandler.c:2218 +#: rewrite/rewriteHandler.c:2302 msgid "Views containing WITH are not automatically updatable." msgstr "Представления с WITH не обновляются автоматически." -#: rewrite/rewriteHandler.c:2221 +#: rewrite/rewriteHandler.c:2305 msgid "Views containing LIMIT or OFFSET are not automatically updatable." msgstr "Представления с LIMIT или OFFSET не обновляются автоматически." -#: rewrite/rewriteHandler.c:2233 +#: rewrite/rewriteHandler.c:2317 msgid "Views that return aggregate functions are not automatically updatable." msgstr "" "Представления, возвращающие агрегатные функции, не обновляются автоматически." -#: rewrite/rewriteHandler.c:2236 +#: rewrite/rewriteHandler.c:2320 msgid "Views that return window functions are not automatically updatable." msgstr "" "Представления, возвращающие оконные функции, не обновляются автоматически." -#: rewrite/rewriteHandler.c:2239 +#: rewrite/rewriteHandler.c:2323 msgid "" "Views that return set-returning functions are not automatically updatable." msgstr "" "Представления, возвращающие функции с результатом-множеством, не обновляются " "автоматически." -#: rewrite/rewriteHandler.c:2246 rewrite/rewriteHandler.c:2250 -#: rewrite/rewriteHandler.c:2258 +#: rewrite/rewriteHandler.c:2330 rewrite/rewriteHandler.c:2334 +#: rewrite/rewriteHandler.c:2342 msgid "" "Views that do not select from a single table or view are not automatically " "updatable." @@ -19128,27 +19384,27 @@ msgstr "" "Представления, выбирающие данные не из одной таблицы или представления, не " "обновляются автоматически." -#: rewrite/rewriteHandler.c:2261 +#: rewrite/rewriteHandler.c:2345 msgid "Views containing TABLESAMPLE are not automatically updatable." msgstr "Представления, содержащие TABLESAMPLE, не обновляются автоматически." -#: rewrite/rewriteHandler.c:2285 +#: rewrite/rewriteHandler.c:2369 msgid "Views that have no updatable columns are not automatically updatable." msgstr "" "Представления, не содержащие обновляемых столбцов, не обновляются " "автоматически." -#: rewrite/rewriteHandler.c:2737 +#: rewrite/rewriteHandler.c:2823 #, c-format msgid "cannot insert into column \"%s\" of view \"%s\"" msgstr "вставить данные в столбец \"%s\" представления \"%s\" нельзя" -#: rewrite/rewriteHandler.c:2745 +#: rewrite/rewriteHandler.c:2831 #, c-format msgid "cannot update column \"%s\" of view \"%s\"" msgstr "изменить данные в столбце \"%s\" представления \"%s\" нельзя" -#: rewrite/rewriteHandler.c:3148 +#: rewrite/rewriteHandler.c:3214 #, c-format msgid "" "DO INSTEAD NOTHING rules are not supported for data-modifying statements in " @@ -19157,7 +19413,7 @@ msgstr "" "правила DO INSTEAD NOTHING не поддерживаются в операторах, изменяющих " "данные, в WITH" -#: rewrite/rewriteHandler.c:3162 +#: rewrite/rewriteHandler.c:3228 #, c-format msgid "" "conditional DO INSTEAD rules are not supported for data-modifying statements " @@ -19166,13 +19422,13 @@ msgstr "" "условные правила DO INSTEAD не поддерживаются для операторов, изменяющих " "данные, в WITH" -#: rewrite/rewriteHandler.c:3166 +#: rewrite/rewriteHandler.c:3232 #, c-format msgid "DO ALSO rules are not supported for data-modifying statements in WITH" msgstr "" "правила DO ALSO не поддерживаются для операторов, изменяющих данные, в WITH" -#: rewrite/rewriteHandler.c:3171 +#: rewrite/rewriteHandler.c:3237 #, c-format msgid "" "multi-statement DO INSTEAD rules are not supported for data-modifying " @@ -19181,43 +19437,43 @@ msgstr "" "составные правила DO INSTEAD не поддерживаются для операторов, изменяющих " "данные, в WITH" -#: rewrite/rewriteHandler.c:3386 +#: rewrite/rewriteHandler.c:3456 #, c-format msgid "cannot perform INSERT RETURNING on relation \"%s\"" msgstr "выполнить INSERT RETURNING для отношения \"%s\" нельзя" -#: rewrite/rewriteHandler.c:3388 +#: rewrite/rewriteHandler.c:3458 #, c-format msgid "" "You need an unconditional ON INSERT DO INSTEAD rule with a RETURNING clause." msgstr "" "Необходимо безусловное правило ON INSERT DO INSTEAD с предложением RETURNING." -#: rewrite/rewriteHandler.c:3393 +#: rewrite/rewriteHandler.c:3463 #, c-format msgid "cannot perform UPDATE RETURNING on relation \"%s\"" msgstr "выполнить UPDATE RETURNING для отношения \"%s\" нельзя" -#: rewrite/rewriteHandler.c:3395 +#: rewrite/rewriteHandler.c:3465 #, c-format msgid "" "You need an unconditional ON UPDATE DO INSTEAD rule with a RETURNING clause." msgstr "" "Необходимо безусловное правило ON UPDATE DO INSTEAD с предложением RETURNING." -#: rewrite/rewriteHandler.c:3400 +#: rewrite/rewriteHandler.c:3470 #, c-format msgid "cannot perform DELETE RETURNING on relation \"%s\"" msgstr "выполнить DELETE RETURNING для отношения \"%s\" нельзя" -#: rewrite/rewriteHandler.c:3402 +#: rewrite/rewriteHandler.c:3472 #, c-format msgid "" "You need an unconditional ON DELETE DO INSTEAD rule with a RETURNING clause." msgstr "" "Необходимо безусловное правило ON DELETE DO INSTEAD с предложением RETURNING." -#: rewrite/rewriteHandler.c:3420 +#: rewrite/rewriteHandler.c:3490 #, c-format msgid "" "INSERT with ON CONFLICT clause cannot be used with table that has INSERT or " @@ -19226,7 +19482,7 @@ msgstr "" "INSERT c предложением ON CONFLICT нельзя использовать с таблицей, для " "которой заданы правила INSERT или UPDATE" -#: rewrite/rewriteHandler.c:3477 +#: rewrite/rewriteHandler.c:3547 #, c-format msgid "" "WITH cannot be used in a query that is rewritten by rules into multiple " @@ -19245,7 +19501,7 @@ msgstr "условные служебные операторы не реализ msgid "WHERE CURRENT OF on a view is not implemented" msgstr "условие WHERE CURRENT OF для представлений не реализовано" -#: rewrite/rewriteManip.c:1434 +#: rewrite/rewriteManip.c:1463 #, c-format msgid "" "NEW variables in ON UPDATE rules cannot reference columns that are part of a " @@ -19280,48 +19536,48 @@ msgstr "нераспознанный параметр Snowball: \"%s\"" msgid "missing Language parameter" msgstr "отсутствует параметр Language" -#: statistics/extended_stats.c:228 +#: statistics/dependencies.c:534 #, c-format -msgid "" -"extended statistics could not be collected for column \"%s\" of relation %s." -"%s" -msgstr "" -"для столбца \"%s\" отношения %s.%s нельзя собрать расширенную статистику" +msgid "invalid zero-length item array in MVDependencies" +msgstr "недопустимый массив нулевой длины в MVDependencies" + +#: statistics/dependencies.c:665 statistics/dependencies.c:718 +#: statistics/mvdistinct.c:338 statistics/mvdistinct.c:391 +#: utils/adt/pseudotypes.c:94 utils/adt/pseudotypes.c:122 +#: utils/adt/pseudotypes.c:147 utils/adt/pseudotypes.c:171 +#: utils/adt/pseudotypes.c:282 utils/adt/pseudotypes.c:307 +#: utils/adt/pseudotypes.c:335 utils/adt/pseudotypes.c:363 +#: utils/adt/pseudotypes.c:393 +#, c-format +msgid "cannot accept a value of type %s" +msgstr "значение типа %s нельзя ввести" -#: statistics/extended_stats.c:232 +#: statistics/extended_stats.c:103 #, c-format -msgid "Consider ALTER TABLE \"%s\".\"%s\" ALTER \"%s\" SET STATISTICS -1" -msgstr "Попробуйте ALTER TABLE \"%s\".\"%s\" ALTER \"%s\" SET STATISTICS -1" +msgid "" +"statistics object \"%s.%s\" could not be computed for relation \"%s.%s\"" +msgstr "" +"объект статистики \"%s.%s\" не может быть вычислен для отношения \"%s.%s\"" -#: statistics/mvdistinct.c:258 +#: statistics/mvdistinct.c:259 #, c-format msgid "invalid ndistinct magic %08x (expected %08x)" msgstr "неверное магическое число ndistinct: %08x (ожидалось: %08x)" -#: statistics/mvdistinct.c:263 +#: statistics/mvdistinct.c:264 #, c-format msgid "invalid ndistinct type %d (expected %d)" msgstr "неверный тип ndistinct: %d (ожидался: %d)" -#: statistics/mvdistinct.c:268 +#: statistics/mvdistinct.c:269 #, c-format msgid "invalid zero-length item array in MVNDistinct" msgstr "недопустимый массив нулевой длины в MVNDistinct" -#: statistics/mvdistinct.c:277 +#: statistics/mvdistinct.c:278 #, c-format -msgid "invalid MVNDistinct size %ld (expected at least %ld)" -msgstr "неправильный размер MVNDistinct: %ld (ожидался не меньше %ld)" - -#: statistics/mvdistinct.c:337 statistics/mvdistinct.c:385 -#: utils/adt/pseudotypes.c:94 utils/adt/pseudotypes.c:122 -#: utils/adt/pseudotypes.c:147 utils/adt/pseudotypes.c:171 -#: utils/adt/pseudotypes.c:282 utils/adt/pseudotypes.c:307 -#: utils/adt/pseudotypes.c:335 utils/adt/pseudotypes.c:363 -#: utils/adt/pseudotypes.c:393 -#, c-format -msgid "cannot accept a value of type %s" -msgstr "значение типа %s нельзя ввести" +msgid "invalid MVNDistinct size %zd (expected at least %zd)" +msgstr "неправильный размер MVNDistinct: %zd (ожидался не меньше %zd)" #: storage/buffer/bufmgr.c:544 storage/buffer/bufmgr.c:657 #, c-format @@ -19362,7 +19618,7 @@ msgstr "Множественные сбои - возможно, постоянн msgid "writing block %u of relation %s" msgstr "запись блока %u отношения %s" -#: storage/buffer/bufmgr.c:4356 +#: storage/buffer/bufmgr.c:4358 #, c-format msgid "snapshot too old" msgstr "снимок слишком стар" @@ -19377,75 +19633,75 @@ msgstr "нет пустого локального буфера" msgid "cannot access temporary tables during a parallel operation" msgstr "обращаться к временным таблицам во время параллельных операций нельзя" -#: storage/file/fd.c:443 storage/file/fd.c:515 storage/file/fd.c:551 +#: storage/file/fd.c:442 storage/file/fd.c:514 storage/file/fd.c:550 #, c-format msgid "could not flush dirty data: %m" msgstr "не удалось сбросить грязные данные: %m" -#: storage/file/fd.c:473 +#: storage/file/fd.c:472 #, c-format msgid "could not determine dirty data size: %m" msgstr "не удалось определить размер грязных данных: %m" -#: storage/file/fd.c:525 +#: storage/file/fd.c:524 #, c-format msgid "could not munmap() while flushing data: %m" msgstr "ошибка в munmap() при сбросе данных на диск: %m" -#: storage/file/fd.c:726 +#: storage/file/fd.c:725 #, c-format msgid "could not link file \"%s\" to \"%s\": %m" msgstr "для файла \"%s\" не удалось создать ссылку \"%s\": %m" -#: storage/file/fd.c:820 +#: storage/file/fd.c:819 #, c-format msgid "getrlimit failed: %m" msgstr "ошибка в getrlimit(): %m" -#: storage/file/fd.c:910 +#: storage/file/fd.c:909 #, c-format msgid "insufficient file descriptors available to start server process" msgstr "недостаточно дескрипторов файлов для запуска серверного процесса" -#: storage/file/fd.c:911 +#: storage/file/fd.c:910 #, c-format msgid "System allows %d, we need at least %d." msgstr "Система выделяет: %d, а требуется минимум: %d." -#: storage/file/fd.c:952 storage/file/fd.c:2129 storage/file/fd.c:2222 -#: storage/file/fd.c:2370 +#: storage/file/fd.c:951 storage/file/fd.c:2134 storage/file/fd.c:2227 +#: storage/file/fd.c:2379 #, c-format msgid "out of file descriptors: %m; release and retry" msgstr "нехватка дескрипторов файлов: %m; освободите их и повторите попытку" -#: storage/file/fd.c:1557 +#: storage/file/fd.c:1562 #, c-format msgid "temporary file: path \"%s\", size %lu" msgstr "временный файл: путь \"%s\", размер %lu" -#: storage/file/fd.c:1760 +#: storage/file/fd.c:1765 #, c-format msgid "temporary file size exceeds temp_file_limit (%dkB)" msgstr "размер временного файла превышает предел temp_file_limit (%d КБ)" -#: storage/file/fd.c:2105 storage/file/fd.c:2155 +#: storage/file/fd.c:2110 storage/file/fd.c:2160 #, c-format msgid "exceeded maxAllocatedDescs (%d) while trying to open file \"%s\"" msgstr "превышен предел maxAllocatedDescs (%d) при попытке открыть файл \"%s\"" -#: storage/file/fd.c:2195 +#: storage/file/fd.c:2200 #, c-format msgid "exceeded maxAllocatedDescs (%d) while trying to execute command \"%s\"" msgstr "" "превышен предел maxAllocatedDescs (%d) при попытке выполнить команду \"%s\"" -#: storage/file/fd.c:2346 +#: storage/file/fd.c:2355 #, c-format msgid "exceeded maxAllocatedDescs (%d) while trying to open directory \"%s\"" msgstr "" "превышен предел maxAllocatedDescs (%d) при попытке открыть каталог \"%s\"" -#: storage/file/fd.c:2432 utils/adt/genfile.c:511 +#: storage/file/fd.c:2446 utils/adt/genfile.c:511 #, c-format msgid "could not read directory \"%s\": %m" msgstr "не удалось прочитать каталог \"%s\": %m" @@ -19476,85 +19732,80 @@ msgstr "сегмент управления динамической разде msgid "too many dynamic shared memory segments" msgstr "слишком много сегментов динамической разделяемой памяти" -#: storage/ipc/dsm_impl.c:261 storage/ipc/dsm_impl.c:361 -#: storage/ipc/dsm_impl.c:533 storage/ipc/dsm_impl.c:648 -#: storage/ipc/dsm_impl.c:819 storage/ipc/dsm_impl.c:963 +#: storage/ipc/dsm_impl.c:262 storage/ipc/dsm_impl.c:363 +#: storage/ipc/dsm_impl.c:580 storage/ipc/dsm_impl.c:695 +#: storage/ipc/dsm_impl.c:866 storage/ipc/dsm_impl.c:1010 #, c-format msgid "could not unmap shared memory segment \"%s\": %m" msgstr "не удалось освободить сегмент разделяемой памяти %s: %m" -#: storage/ipc/dsm_impl.c:271 storage/ipc/dsm_impl.c:543 -#: storage/ipc/dsm_impl.c:658 storage/ipc/dsm_impl.c:829 +#: storage/ipc/dsm_impl.c:272 storage/ipc/dsm_impl.c:590 +#: storage/ipc/dsm_impl.c:705 storage/ipc/dsm_impl.c:876 #, c-format msgid "could not remove shared memory segment \"%s\": %m" msgstr "ошибка при удалении сегмента разделяемой памяти \"%s\": %m" -#: storage/ipc/dsm_impl.c:292 storage/ipc/dsm_impl.c:729 -#: storage/ipc/dsm_impl.c:843 +#: storage/ipc/dsm_impl.c:293 storage/ipc/dsm_impl.c:776 +#: storage/ipc/dsm_impl.c:890 #, c-format msgid "could not open shared memory segment \"%s\": %m" msgstr "не удалось открыть сегмент разделяемой памяти \"%s\": %m" -#: storage/ipc/dsm_impl.c:316 storage/ipc/dsm_impl.c:559 -#: storage/ipc/dsm_impl.c:774 storage/ipc/dsm_impl.c:867 +#: storage/ipc/dsm_impl.c:317 storage/ipc/dsm_impl.c:606 +#: storage/ipc/dsm_impl.c:821 storage/ipc/dsm_impl.c:914 #, c-format msgid "could not stat shared memory segment \"%s\": %m" msgstr "не удалось обратиться к сегменту разделяемой памяти \"%s\": %m" -#: storage/ipc/dsm_impl.c:335 storage/ipc/dsm_impl.c:886 -#: storage/ipc/dsm_impl.c:936 +#: storage/ipc/dsm_impl.c:337 storage/ipc/dsm_impl.c:933 +#: storage/ipc/dsm_impl.c:983 #, c-format msgid "could not resize shared memory segment \"%s\" to %zu bytes: %m" msgstr "" "не удалось изменить размер сегмента разделяемой памяти \"%s\" до %zu байт: %m" -#: storage/ipc/dsm_impl.c:385 storage/ipc/dsm_impl.c:580 -#: storage/ipc/dsm_impl.c:750 storage/ipc/dsm_impl.c:987 +#: storage/ipc/dsm_impl.c:387 storage/ipc/dsm_impl.c:627 +#: storage/ipc/dsm_impl.c:797 storage/ipc/dsm_impl.c:1034 #, c-format msgid "could not map shared memory segment \"%s\": %m" msgstr "не удалось отобразить сегмент разделяемой памяти \"%s\": %m" -#: storage/ipc/dsm_impl.c:515 +#: storage/ipc/dsm_impl.c:562 #, c-format msgid "could not get shared memory segment: %m" msgstr "не удалось получить сегмент разделяемой памяти: %m" -#: storage/ipc/dsm_impl.c:714 +#: storage/ipc/dsm_impl.c:761 #, c-format msgid "could not create shared memory segment \"%s\": %m" msgstr "не удалось создать сегмент разделяемой памяти \"%s\": %m" -#: storage/ipc/dsm_impl.c:1029 storage/ipc/dsm_impl.c:1077 +#: storage/ipc/dsm_impl.c:1076 storage/ipc/dsm_impl.c:1124 #, c-format msgid "could not duplicate handle for \"%s\": %m" msgstr "не удалось продублировать указатель для \"%s\": %m" -#: storage/ipc/latch.c:780 +#: storage/ipc/latch.c:829 #, c-format msgid "epoll_ctl() failed: %m" msgstr "ошибка в epoll_ctl(): %m" -#: storage/ipc/latch.c:1009 +#: storage/ipc/latch.c:1060 #, c-format msgid "epoll_wait() failed: %m" msgstr "ошибка в epoll_wait(): %m" -#: storage/ipc/latch.c:1129 +#: storage/ipc/latch.c:1182 #, c-format msgid "poll() failed: %m" msgstr "ошибка в poll(): %m" -#: storage/ipc/latch.c:1287 -#, c-format -msgid "select() failed: %m" -msgstr "ошибка в select(): %m" - -#: storage/ipc/shm_toc.c:108 storage/ipc/shm_toc.c:189 storage/lmgr/lock.c:883 +#: storage/ipc/shm_toc.c:108 storage/ipc/shm_toc.c:190 storage/lmgr/lock.c:883 #: storage/lmgr/lock.c:917 storage/lmgr/lock.c:2679 storage/lmgr/lock.c:4004 #: storage/lmgr/lock.c:4069 storage/lmgr/lock.c:4361 -#: storage/lmgr/predicate.c:2318 storage/lmgr/predicate.c:2333 -#: storage/lmgr/predicate.c:3725 storage/lmgr/predicate.c:4868 -#: utils/hash/dynahash.c:1043 +#: storage/lmgr/predicate.c:2401 storage/lmgr/predicate.c:2416 +#: storage/lmgr/predicate.c:3808 storage/lmgr/predicate.c:4951 +#: utils/hash/dynahash.c:1061 #, c-format msgid "out of shared memory" msgstr "нехватка разделяемой памяти" @@ -19591,13 +19842,13 @@ msgstr "" msgid "requested shared memory size overflows size_t" msgstr "запрошенный размер разделяемой памяти не умещается в size_t" -#: storage/ipc/standby.c:531 tcop/postgres.c:2970 +#: storage/ipc/standby.c:531 tcop/postgres.c:2983 #, c-format msgid "canceling statement due to conflict with recovery" msgstr "" "выполнение оператора отменено из-за конфликта с процессом восстановления" -#: storage/ipc/standby.c:532 tcop/postgres.c:2277 +#: storage/ipc/standby.c:532 tcop/postgres.c:2271 #, c-format msgid "User transaction caused buffer deadlock with recovery." msgstr "" @@ -19646,98 +19897,98 @@ msgstr "обнаружена взаимоблокировка" msgid "See server log for query details." msgstr "Подробности запроса смотрите в протоколе сервера." -#: storage/lmgr/lmgr.c:719 +#: storage/lmgr/lmgr.c:745 #, c-format msgid "while updating tuple (%u,%u) in relation \"%s\"" msgstr "при изменении кортежа (%u,%u) в отношении \"%s\"" -#: storage/lmgr/lmgr.c:722 +#: storage/lmgr/lmgr.c:748 #, c-format msgid "while deleting tuple (%u,%u) in relation \"%s\"" msgstr "при удалении кортежа (%u,%u) в отношении \"%s\"" -#: storage/lmgr/lmgr.c:725 +#: storage/lmgr/lmgr.c:751 #, c-format msgid "while locking tuple (%u,%u) in relation \"%s\"" msgstr "при блокировке кортежа (%u,%u) в отношении \"%s\"" -#: storage/lmgr/lmgr.c:728 +#: storage/lmgr/lmgr.c:754 #, c-format msgid "while locking updated version (%u,%u) of tuple in relation \"%s\"" msgstr "при блокировке изменённой версии (%u,%u) кортежа в отношении \"%s\"" -#: storage/lmgr/lmgr.c:731 +#: storage/lmgr/lmgr.c:757 #, c-format msgid "while inserting index tuple (%u,%u) in relation \"%s\"" msgstr "при добавлении кортежа индекса (%u,%u) в отношении \"%s\"" -#: storage/lmgr/lmgr.c:734 +#: storage/lmgr/lmgr.c:760 #, c-format msgid "while checking uniqueness of tuple (%u,%u) in relation \"%s\"" msgstr "при проверке уникальности кортежа (%u,%u) в отношении \"%s\"" -#: storage/lmgr/lmgr.c:737 +#: storage/lmgr/lmgr.c:763 #, c-format msgid "while rechecking updated tuple (%u,%u) in relation \"%s\"" msgstr "при перепроверке изменённого кортежа (%u,%u) в отношении \"%s\"" -#: storage/lmgr/lmgr.c:740 +#: storage/lmgr/lmgr.c:766 #, c-format msgid "while checking exclusion constraint on tuple (%u,%u) in relation \"%s\"" msgstr "" "при проверке ограничения-исключения для кортежа (%u,%u) в отношении \"%s\"" -#: storage/lmgr/lmgr.c:960 +#: storage/lmgr/lmgr.c:986 #, c-format msgid "relation %u of database %u" msgstr "отношение %u базы данных %u" -#: storage/lmgr/lmgr.c:966 +#: storage/lmgr/lmgr.c:992 #, c-format msgid "extension of relation %u of database %u" msgstr "расширение отношения %u базы данных %u" -#: storage/lmgr/lmgr.c:972 +#: storage/lmgr/lmgr.c:998 #, c-format msgid "page %u of relation %u of database %u" msgstr "страница %u отношения %u базы данных %u" -#: storage/lmgr/lmgr.c:979 +#: storage/lmgr/lmgr.c:1005 #, c-format msgid "tuple (%u,%u) of relation %u of database %u" msgstr "кортеж (%u,%u) отношения %u базы данных %u" -#: storage/lmgr/lmgr.c:987 +#: storage/lmgr/lmgr.c:1013 #, c-format msgid "transaction %u" msgstr "транзакция %u" -#: storage/lmgr/lmgr.c:992 +#: storage/lmgr/lmgr.c:1018 #, c-format msgid "virtual transaction %d/%u" msgstr "виртуальная транзакция %d/%u" -#: storage/lmgr/lmgr.c:998 +#: storage/lmgr/lmgr.c:1024 #, c-format msgid "speculative token %u of transaction %u" msgstr "спекулятивный маркер %u транзакции %u" -#: storage/lmgr/lmgr.c:1004 +#: storage/lmgr/lmgr.c:1030 #, c-format msgid "object %u of class %u of database %u" msgstr "объект %u класса %u базы данных %u" -#: storage/lmgr/lmgr.c:1012 +#: storage/lmgr/lmgr.c:1038 #, c-format msgid "user lock [%u,%u,%u]" msgstr "пользовательская блокировка [%u,%u,%u]" -#: storage/lmgr/lmgr.c:1019 +#: storage/lmgr/lmgr.c:1045 #, c-format msgid "advisory lock [%u,%u,%u,%u]" msgstr "рекомендательная блокировка [%u,%u,%u,%u]" -#: storage/lmgr/lmgr.c:1027 +#: storage/lmgr/lmgr.c:1053 #, c-format msgid "unrecognized locktag type %d" msgstr "нераспознанный тип блокировки %d" @@ -19774,12 +20025,12 @@ msgstr "" "нельзя выполнить PREPARE, удерживая блокировки на уровне сеанса и на уровне " "транзакции для одного объекта" -#: storage/lmgr/predicate.c:676 +#: storage/lmgr/predicate.c:686 #, c-format msgid "not enough elements in RWConflictPool to record a read/write conflict" msgstr "в пуле недостаточно элементов для записи о конфликте чтения/записи" -#: storage/lmgr/predicate.c:677 storage/lmgr/predicate.c:705 +#: storage/lmgr/predicate.c:687 storage/lmgr/predicate.c:715 #, c-format msgid "" "You might need to run fewer transactions at a time or increase " @@ -19788,7 +20039,7 @@ msgstr "" "Попробуйте уменьшить число транзакций в секунду или увеличить параметр " "max_connections." -#: storage/lmgr/predicate.c:704 +#: storage/lmgr/predicate.c:714 #, c-format msgid "" "not enough elements in RWConflictPool to record a potential read/write " @@ -19797,12 +20048,12 @@ msgstr "" "в пуле недостаточно элементов для записи о потенциальном конфликте чтения/" "записи" -#: storage/lmgr/predicate.c:910 +#: storage/lmgr/predicate.c:921 #, c-format msgid "memory for serializable conflict tracking is nearly exhausted" msgstr "память для отслеживания конфликтов сериализации практически исчерпана" -#: storage/lmgr/predicate.c:911 +#: storage/lmgr/predicate.c:922 #, c-format msgid "" "There might be an idle transaction or a forgotten prepared transaction " @@ -19811,18 +20062,18 @@ msgstr "" "Вероятно, эта ситуация вызвана забытой подготовленной транзакцией или " "транзакцией, простаивающей долгое время." -#: storage/lmgr/predicate.c:1538 +#: storage/lmgr/predicate.c:1561 #, c-format msgid "deferrable snapshot was unsafe; trying a new one" msgstr "откладываемый снимок был небезопасен; пробуем более новый" -#: storage/lmgr/predicate.c:1577 +#: storage/lmgr/predicate.c:1650 #, c-format msgid "\"default_transaction_isolation\" is set to \"serializable\"." msgstr "" "Параметр \"default_transaction_isolation\" имеет значение \"serializable\"." -#: storage/lmgr/predicate.c:1578 +#: storage/lmgr/predicate.c:1651 #, c-format msgid "" "You can use \"SET default_transaction_isolation = 'repeatable read'\" to " @@ -19831,34 +20082,34 @@ msgstr "" "Чтобы изменить режим по умолчанию, выполните \"SET " "default_transaction_isolation = 'repeatable read'\"." -#: storage/lmgr/predicate.c:1617 +#: storage/lmgr/predicate.c:1691 #, c-format msgid "a snapshot-importing transaction must not be READ ONLY DEFERRABLE" msgstr "транзакция, импортирующая снимок, не должна быть READ ONLY DEFERRABLE" -#: storage/lmgr/predicate.c:1695 utils/time/snapmgr.c:617 -#: utils/time/snapmgr.c:623 +#: storage/lmgr/predicate.c:1771 utils/time/snapmgr.c:621 +#: utils/time/snapmgr.c:627 #, c-format msgid "could not import the requested snapshot" msgstr "не удалось импортировать запрошенный снимок" -#: storage/lmgr/predicate.c:1696 utils/time/snapmgr.c:624 +#: storage/lmgr/predicate.c:1772 utils/time/snapmgr.c:628 #, c-format -msgid "The source transaction %u is not running anymore." -msgstr "Исходная транзакция %u уже не выполняется." +msgid "The source process with PID %d is not running anymore." +msgstr "Исходный процесс с PID %d уже не работает." -#: storage/lmgr/predicate.c:2319 storage/lmgr/predicate.c:2334 -#: storage/lmgr/predicate.c:3726 +#: storage/lmgr/predicate.c:2402 storage/lmgr/predicate.c:2417 +#: storage/lmgr/predicate.c:3809 #, c-format msgid "You might need to increase max_pred_locks_per_transaction." msgstr "" "Возможно, следует увеличить значение параметра max_locks_per_transaction." -#: storage/lmgr/predicate.c:3880 storage/lmgr/predicate.c:3969 -#: storage/lmgr/predicate.c:3977 storage/lmgr/predicate.c:4016 -#: storage/lmgr/predicate.c:4255 storage/lmgr/predicate.c:4592 -#: storage/lmgr/predicate.c:4604 storage/lmgr/predicate.c:4646 -#: storage/lmgr/predicate.c:4684 +#: storage/lmgr/predicate.c:3963 storage/lmgr/predicate.c:4052 +#: storage/lmgr/predicate.c:4060 storage/lmgr/predicate.c:4099 +#: storage/lmgr/predicate.c:4338 storage/lmgr/predicate.c:4675 +#: storage/lmgr/predicate.c:4687 storage/lmgr/predicate.c:4729 +#: storage/lmgr/predicate.c:4767 #, c-format msgid "" "could not serialize access due to read/write dependencies among transactions" @@ -19866,11 +20117,11 @@ msgstr "" "не удалось сериализовать доступ из-за зависимостей чтения/записи между " "транзакциями" -#: storage/lmgr/predicate.c:3882 storage/lmgr/predicate.c:3971 -#: storage/lmgr/predicate.c:3979 storage/lmgr/predicate.c:4018 -#: storage/lmgr/predicate.c:4257 storage/lmgr/predicate.c:4594 -#: storage/lmgr/predicate.c:4606 storage/lmgr/predicate.c:4648 -#: storage/lmgr/predicate.c:4686 +#: storage/lmgr/predicate.c:3965 storage/lmgr/predicate.c:4054 +#: storage/lmgr/predicate.c:4062 storage/lmgr/predicate.c:4101 +#: storage/lmgr/predicate.c:4340 storage/lmgr/predicate.c:4677 +#: storage/lmgr/predicate.c:4689 storage/lmgr/predicate.c:4731 +#: storage/lmgr/predicate.c:4769 #, c-format msgid "The transaction might succeed if retried." msgstr "Транзакция может завершиться успешно при следующей попытке." @@ -19949,12 +20200,17 @@ msgstr "испорченный указатель элемента: %u" msgid "corrupted item lengths: total %u, available space %u" msgstr "испорченный размер элемента (общий размер: %u, доступно: %u)" -#: storage/page/bufpage.c:767 storage/page/bufpage.c:905 -#: storage/page/bufpage.c:993 storage/page/bufpage.c:1103 +#: storage/page/bufpage.c:767 storage/page/bufpage.c:993 +#: storage/page/bufpage.c:1103 #, c-format msgid "corrupted item pointer: offset = %u, size = %u" msgstr "испорченный указатель элемента: смещение = %u, размер = %u" +#: storage/page/bufpage.c:905 +#, c-format +msgid "corrupted item pointer: offset = %u, length = %u" +msgstr "испорченный указатель элемента: смещение = %u, длина = %u" + #: storage/smgr/md.c:448 storage/smgr/md.c:974 #, c-format msgid "could not truncate file \"%s\": %m" @@ -20043,100 +20299,100 @@ msgstr "" msgid "could not open file \"%s\" (target block %u): %m" msgstr "не удалось открыть файл file \"%s\" (целевой блок %u): %m" -#: tcop/fastpath.c:111 tcop/fastpath.c:475 tcop/fastpath.c:605 +#: tcop/fastpath.c:111 tcop/fastpath.c:463 tcop/fastpath.c:593 #, c-format msgid "invalid argument size %d in function call message" msgstr "неверный размер аргумента (%d) в сообщении вызова функции" -#: tcop/fastpath.c:319 +#: tcop/fastpath.c:309 #, c-format msgid "fastpath function call: \"%s\" (OID %u)" msgstr "вызов функции fastpath: \"%s\" (OID %u)" -#: tcop/fastpath.c:401 tcop/postgres.c:1175 tcop/postgres.c:1438 -#: tcop/postgres.c:1818 tcop/postgres.c:2036 +#: tcop/fastpath.c:391 tcop/postgres.c:1169 tcop/postgres.c:1432 +#: tcop/postgres.c:1812 tcop/postgres.c:2030 #, c-format msgid "duration: %s ms" msgstr "продолжительность: %s мс" -#: tcop/fastpath.c:405 +#: tcop/fastpath.c:395 #, c-format msgid "duration: %s ms fastpath function call: \"%s\" (OID %u)" msgstr "продолжительность %s мс, вызов функции fastpath: \"%s\" (OID %u)" -#: tcop/fastpath.c:443 tcop/fastpath.c:570 +#: tcop/fastpath.c:431 tcop/fastpath.c:558 #, c-format msgid "function call message contains %d arguments but function requires %d" msgstr "" "сообщение вызова функции содержит неверное число аргументов (%d, а требуется " "%d)" -#: tcop/fastpath.c:451 +#: tcop/fastpath.c:439 #, c-format msgid "function call message contains %d argument formats but %d arguments" msgstr "" "сообщение вызова функции содержит неверное число форматов (%d, а аргументов " "%d)" -#: tcop/fastpath.c:538 tcop/fastpath.c:621 +#: tcop/fastpath.c:526 tcop/fastpath.c:609 #, c-format msgid "incorrect binary data format in function argument %d" msgstr "неправильный формат двоичных данных в аргументе функции %d" -#: tcop/postgres.c:352 tcop/postgres.c:388 tcop/postgres.c:415 +#: tcop/postgres.c:346 tcop/postgres.c:382 tcop/postgres.c:409 #, c-format msgid "unexpected EOF on client connection" msgstr "неожиданный обрыв соединения с клиентом" -#: tcop/postgres.c:438 tcop/postgres.c:450 tcop/postgres.c:461 -#: tcop/postgres.c:473 tcop/postgres.c:4313 +#: tcop/postgres.c:432 tcop/postgres.c:444 tcop/postgres.c:455 +#: tcop/postgres.c:467 tcop/postgres.c:4314 #, c-format msgid "invalid frontend message type %d" msgstr "неправильный тип клиентского сообщения %d" -#: tcop/postgres.c:944 +#: tcop/postgres.c:938 #, c-format msgid "statement: %s" msgstr "оператор: %s" -#: tcop/postgres.c:1180 +#: tcop/postgres.c:1174 #, c-format msgid "duration: %s ms statement: %s" msgstr "продолжительность: %s мс, оператор: %s" -#: tcop/postgres.c:1230 +#: tcop/postgres.c:1224 #, c-format msgid "parse %s: %s" msgstr "разбор %s: %s" -#: tcop/postgres.c:1286 +#: tcop/postgres.c:1280 #, c-format msgid "cannot insert multiple commands into a prepared statement" msgstr "в подготовленный оператор нельзя вставить несколько команд" -#: tcop/postgres.c:1443 +#: tcop/postgres.c:1437 #, c-format msgid "duration: %s ms parse %s: %s" msgstr "продолжительность: %s мс, разбор %s: %s" -#: tcop/postgres.c:1488 +#: tcop/postgres.c:1482 #, c-format msgid "bind %s to %s" msgstr "привязка %s к %s" # [SM]: TO REVIEW -#: tcop/postgres.c:1507 tcop/postgres.c:2326 +#: tcop/postgres.c:1501 tcop/postgres.c:2320 #, c-format msgid "unnamed prepared statement does not exist" msgstr "безымянный подготовленный оператор не существует" -#: tcop/postgres.c:1549 +#: tcop/postgres.c:1543 #, c-format msgid "bind message has %d parameter formats but %d parameters" msgstr "" "неверное число форматов параметров в сообщении Bind (%d, а параметров %d)" -#: tcop/postgres.c:1555 +#: tcop/postgres.c:1549 #, c-format msgid "" "bind message supplies %d parameters, but prepared statement \"%s\" requires " @@ -20145,88 +20401,88 @@ msgstr "" "в сообщении Bind передано неверное число параметров (%d, а подготовленный " "оператор \"%s\" требует %d)" -#: tcop/postgres.c:1725 +#: tcop/postgres.c:1719 #, c-format msgid "incorrect binary data format in bind parameter %d" msgstr "неверный формат двоичных данных в параметре Bind %d" -#: tcop/postgres.c:1823 +#: tcop/postgres.c:1817 #, c-format msgid "duration: %s ms bind %s%s%s: %s" msgstr "продолжительность: %s мс, сообщение Bind %s%s%s: %s" -#: tcop/postgres.c:1871 tcop/postgres.c:2406 +#: tcop/postgres.c:1865 tcop/postgres.c:2400 #, c-format msgid "portal \"%s\" does not exist" msgstr "портал \"%s\" не существует" -#: tcop/postgres.c:1956 +#: tcop/postgres.c:1950 #, c-format msgid "%s %s%s%s: %s" msgstr "%s %s%s%s: %s" -#: tcop/postgres.c:1958 tcop/postgres.c:2044 +#: tcop/postgres.c:1952 tcop/postgres.c:2038 msgid "execute fetch from" msgstr "выборка из" -#: tcop/postgres.c:1959 tcop/postgres.c:2045 +#: tcop/postgres.c:1953 tcop/postgres.c:2039 msgid "execute" msgstr "выполнение" -#: tcop/postgres.c:2041 +#: tcop/postgres.c:2035 #, c-format msgid "duration: %s ms %s %s%s%s: %s" msgstr "продолжительность: %s мс %s %s%s%s: %s" -#: tcop/postgres.c:2167 +#: tcop/postgres.c:2161 #, c-format msgid "prepare: %s" msgstr "подготовка: %s" -#: tcop/postgres.c:2230 +#: tcop/postgres.c:2224 #, c-format msgid "parameters: %s" msgstr "параметры: %s" -#: tcop/postgres.c:2249 +#: tcop/postgres.c:2243 #, c-format msgid "abort reason: recovery conflict" msgstr "причина прерывания: конфликт при восстановлении" -#: tcop/postgres.c:2265 +#: tcop/postgres.c:2259 #, c-format msgid "User was holding shared buffer pin for too long." msgstr "Пользователь удерживал фиксатор разделяемого буфера слишком долго." -#: tcop/postgres.c:2268 +#: tcop/postgres.c:2262 #, c-format msgid "User was holding a relation lock for too long." msgstr "Пользователь удерживал блокировку таблицы слишком долго." -#: tcop/postgres.c:2271 +#: tcop/postgres.c:2265 #, c-format msgid "User was or might have been using tablespace that must be dropped." msgstr "" "Пользователь использовал табличное пространство, которое должно быть удалено." -#: tcop/postgres.c:2274 +#: tcop/postgres.c:2268 #, c-format msgid "User query might have needed to see row versions that must be removed." msgstr "" "Запросу пользователя нужно было видеть версии строк, которые должны быть " "удалены." -#: tcop/postgres.c:2280 +#: tcop/postgres.c:2274 #, c-format msgid "User was connected to a database that must be dropped." msgstr "Пользователь был подключён к базе данных, которая должна быть удалена." -#: tcop/postgres.c:2589 +#: tcop/postgres.c:2583 #, c-format msgid "terminating connection because of crash of another server process" msgstr "закрытие подключения из-за краха другого серверного процесса" -#: tcop/postgres.c:2590 +#: tcop/postgres.c:2584 #, c-format msgid "" "The postmaster has commanded this server process to roll back the current " @@ -20237,7 +20493,7 @@ msgstr "" "транзакцию и завершиться, так как другой серверный процесс завершился " "аварийно и возможно разрушил разделяемую память." -#: tcop/postgres.c:2594 tcop/postgres.c:2898 +#: tcop/postgres.c:2588 tcop/postgres.c:2913 #, c-format msgid "" "In a moment you should be able to reconnect to the database and repeat your " @@ -20246,12 +20502,12 @@ msgstr "" "Вы сможете переподключиться к базе данных и повторить вашу команду сию " "минуту." -#: tcop/postgres.c:2680 +#: tcop/postgres.c:2674 #, c-format msgid "floating-point exception" msgstr "исключение в операции с плавающей точкой" -#: tcop/postgres.c:2681 +#: tcop/postgres.c:2675 #, c-format msgid "" "An invalid floating-point operation was signaled. This probably means an out-" @@ -20271,52 +20527,62 @@ msgstr "отмена проверки подлинности из-за тайм msgid "terminating autovacuum process due to administrator command" msgstr "прекращение процесса автоочистки по команде администратора" -#: tcop/postgres.c:2853 tcop/postgres.c:2863 tcop/postgres.c:2896 +#: tcop/postgres.c:2851 +#, c-format +msgid "terminating logical replication worker due to administrator command" +msgstr "завершение обработчика логической репликации по команде администратора" + +#: tcop/postgres.c:2855 +#, c-format +msgid "logical replication launcher shutting down" +msgstr "процесс запуска логической репликации остановлен" + +#: tcop/postgres.c:2868 tcop/postgres.c:2878 tcop/postgres.c:2911 #, c-format msgid "terminating connection due to conflict with recovery" msgstr "закрытие подключения из-за конфликта с процессом восстановления" -#: tcop/postgres.c:2869 +#: tcop/postgres.c:2884 #, c-format msgid "terminating connection due to administrator command" msgstr "закрытие подключения по команде администратора" -#: tcop/postgres.c:2879 +#: tcop/postgres.c:2894 #, c-format msgid "connection to client lost" msgstr "подключение к клиенту потеряно" -#: tcop/postgres.c:2947 +#: tcop/postgres.c:2960 #, c-format msgid "canceling statement due to lock timeout" msgstr "выполнение оператора отменено из-за таймаута блокировки" -#: tcop/postgres.c:2954 +#: tcop/postgres.c:2967 #, c-format msgid "canceling statement due to statement timeout" msgstr "выполнение оператора отменено из-за таймаута" -#: tcop/postgres.c:2961 +#: tcop/postgres.c:2974 #, c-format msgid "canceling autovacuum task" msgstr "отмена задачи автоочистки" -#: tcop/postgres.c:2984 +#: tcop/postgres.c:2997 #, c-format msgid "canceling statement due to user request" msgstr "выполнение оператора отменено по запросу пользователя" -#: tcop/postgres.c:2994 +#: tcop/postgres.c:3007 #, c-format msgid "terminating connection due to idle-in-transaction timeout" msgstr "закрытие подключения из-за таймаута простоя в транзакции" -#: tcop/postgres.c:3108 +#: tcop/postgres.c:3121 #, c-format msgid "stack depth limit exceeded" msgstr "превышен предел глубины стека" -#: tcop/postgres.c:3109 +#: tcop/postgres.c:3122 #, c-format msgid "" "Increase the configuration parameter \"max_stack_depth\" (currently %dkB), " @@ -20326,12 +20592,12 @@ msgstr "" "КБ), предварительно убедившись, что ОС предоставляет достаточный размер " "стека." -#: tcop/postgres.c:3172 +#: tcop/postgres.c:3185 #, c-format msgid "\"max_stack_depth\" must not exceed %ldkB." msgstr "Значение \"max_stack_depth\" не должно превышать %ld КБ." -#: tcop/postgres.c:3174 +#: tcop/postgres.c:3187 #, c-format msgid "" "Increase the platform's stack depth limit via \"ulimit -s\" or local " @@ -20340,48 +20606,48 @@ msgstr "" "Увеличьте предел глубины стека в системе с помощью команды \"ulimit -s\" или " "эквивалента в вашей ОС." -#: tcop/postgres.c:3534 +#: tcop/postgres.c:3547 #, c-format msgid "invalid command-line argument for server process: %s" msgstr "неверный аргумент командной строки для серверного процесса: %s" -#: tcop/postgres.c:3535 tcop/postgres.c:3541 +#: tcop/postgres.c:3548 tcop/postgres.c:3554 #, c-format msgid "Try \"%s --help\" for more information." msgstr "Для дополнительной информации попробуйте \"%s --help\"." -#: tcop/postgres.c:3539 +#: tcop/postgres.c:3552 #, c-format msgid "%s: invalid command-line argument: %s" msgstr "%s: неверный аргумент командной строки: %s" -#: tcop/postgres.c:3601 +#: tcop/postgres.c:3614 #, c-format msgid "%s: no database nor user name specified" msgstr "%s: не указаны ни база данных, ни пользователь" -#: tcop/postgres.c:4221 +#: tcop/postgres.c:4222 #, c-format msgid "invalid CLOSE message subtype %d" msgstr "неверный подтип сообщения CLOSE: %d" -#: tcop/postgres.c:4256 +#: tcop/postgres.c:4257 #, c-format msgid "invalid DESCRIBE message subtype %d" msgstr "неверный подтип сообщения DESCRIBE: %d" -#: tcop/postgres.c:4334 +#: tcop/postgres.c:4335 #, c-format msgid "fastpath function calls not supported in a replication connection" msgstr "вызовы функции fastpath не поддерживаются для реплицирующих соединений" -#: tcop/postgres.c:4338 +#: tcop/postgres.c:4339 #, c-format msgid "extended query protocol not supported in a replication connection" msgstr "" "протокол расширенных запросов не поддерживается для реплицирующих соединений" -#: tcop/postgres.c:4508 +#: tcop/postgres.c:4509 #, c-format msgid "" "disconnection: session time: %d:%02d:%02d.%03d user=%s database=%s host=%s%s" @@ -20390,19 +20656,19 @@ msgstr "" "отключение: время сеанса: %d:%02d:%02d.%03d пользователь=%s база данных=%s " "компьютер=%s%s%s" -#: tcop/pquery.c:646 +#: tcop/pquery.c:645 #, c-format msgid "bind message has %d result formats but query has %d columns" msgstr "" "число форматов результатов в сообщении Bind (%d) не равно числу столбцов в " "запросе (%d)" -#: tcop/pquery.c:953 +#: tcop/pquery.c:952 #, c-format msgid "cursor can only scan forward" msgstr "курсор может сканировать только вперёд" -#: tcop/pquery.c:954 +#: tcop/pquery.c:953 #, c-format msgid "Declare it with SCROLL option to enable backward scan." msgstr "Добавьте в его объявление SCROLL, чтобы он мог перемещаться назад." @@ -20592,7 +20858,7 @@ msgid "invalid regular expression: %s" msgstr "неверное регулярное выражение: %s" #: tsearch/spell.c:954 tsearch/spell.c:971 tsearch/spell.c:988 -#: tsearch/spell.c:1005 tsearch/spell.c:1070 gram.y:15212 gram.y:15229 +#: tsearch/spell.c:1005 tsearch/spell.c:1070 gram.y:15291 gram.y:15308 #, c-format msgid "syntax error" msgstr "ошибка синтаксиса" @@ -20626,7 +20892,7 @@ msgstr "неверное количество векторов флагов" msgid "affix file contains both old-style and new-style commands" msgstr "файл аффиксов содержит команды и в старом, и в новом стиле" -#: tsearch/to_tsany.c:179 utils/adt/tsvector.c:271 utils/adt/tsvector_op.c:1134 +#: tsearch/to_tsany.c:185 utils/adt/tsvector.c:271 utils/adt/tsvector_op.c:1134 #, c-format msgid "string is too long for tsvector (%d bytes, max %d bytes)" msgstr "строка слишком длинна для tsvector (%d Б, при максимуме %d)" @@ -20663,7 +20929,7 @@ msgstr "неверное имя файла конфигурации тексто msgid "could not open stop-word file \"%s\": %m" msgstr "не удалось открыть файл стоп-слов \"%s\": %m" -#: tsearch/wparser.c:321 tsearch/wparser.c:408 tsearch/wparser.c:484 +#: tsearch/wparser.c:322 tsearch/wparser.c:410 tsearch/wparser.c:487 #, c-format msgid "text search parser does not support headline creation" msgstr "анализатор текстового поиска не поддерживает создание выдержек" @@ -20763,60 +21029,59 @@ msgstr "Массивы ACL не должны содержать значения msgid "extra garbage at the end of the ACL specification" msgstr "лишний мусор в конце спецификации ACL" -#: utils/adt/acl.c:1196 +#: utils/adt/acl.c:1198 #, c-format msgid "grant options cannot be granted back to your own grantor" msgstr "привилегию назначения прав нельзя вернуть тому, кто назначил её вам" -#: utils/adt/acl.c:1257 +#: utils/adt/acl.c:1259 #, c-format msgid "dependent privileges exist" msgstr "существуют зависимые права" -#: utils/adt/acl.c:1258 +#: utils/adt/acl.c:1260 #, c-format msgid "Use CASCADE to revoke them too." msgstr "Используйте CASCADE, чтобы отозвать и их." -#: utils/adt/acl.c:1520 +#: utils/adt/acl.c:1522 #, c-format msgid "aclinsert is no longer supported" msgstr "aclinsert больше не поддерживается" -#: utils/adt/acl.c:1530 +#: utils/adt/acl.c:1532 #, c-format msgid "aclremove is no longer supported" msgstr "aclremove больше не поддерживается" -#: utils/adt/acl.c:1616 utils/adt/acl.c:1670 +#: utils/adt/acl.c:1618 utils/adt/acl.c:1672 #, c-format msgid "unrecognized privilege type: \"%s\"" msgstr "нераспознанный тип прав: \"%s\"" -#: utils/adt/acl.c:3410 utils/adt/regproc.c:125 utils/adt/regproc.c:146 -#: utils/adt/regproc.c:321 +#: utils/adt/acl.c:3415 utils/adt/regproc.c:102 utils/adt/regproc.c:277 #, c-format msgid "function \"%s\" does not exist" msgstr "функция \"%s\" не существует" -#: utils/adt/acl.c:4864 +#: utils/adt/acl.c:4869 #, c-format msgid "must be member of role \"%s\"" msgstr "нужно быть членом роли \"%s\"" #: utils/adt/array_expanded.c:274 utils/adt/arrayfuncs.c:931 #: utils/adt/arrayfuncs.c:1519 utils/adt/arrayfuncs.c:3251 -#: utils/adt/arrayfuncs.c:3389 utils/adt/arrayfuncs.c:5848 -#: utils/adt/arrayfuncs.c:6159 utils/adt/arrayutils.c:93 +#: utils/adt/arrayfuncs.c:3389 utils/adt/arrayfuncs.c:5846 +#: utils/adt/arrayfuncs.c:6157 utils/adt/arrayutils.c:93 #: utils/adt/arrayutils.c:102 utils/adt/arrayutils.c:109 #, c-format msgid "array size exceeds the maximum allowed (%d)" msgstr "размер массива превышает предел (%d)" -#: utils/adt/array_userfuncs.c:79 utils/adt/array_userfuncs.c:541 -#: utils/adt/array_userfuncs.c:621 utils/adt/json.c:1764 utils/adt/json.c:1859 -#: utils/adt/json.c:1897 utils/adt/jsonb.c:1127 utils/adt/jsonb.c:1156 -#: utils/adt/jsonb.c:1592 utils/adt/jsonb.c:1756 utils/adt/jsonb.c:1766 +#: utils/adt/array_userfuncs.c:79 utils/adt/array_userfuncs.c:471 +#: utils/adt/array_userfuncs.c:551 utils/adt/json.c:1765 utils/adt/json.c:1860 +#: utils/adt/json.c:1898 utils/adt/jsonb.c:1128 utils/adt/jsonb.c:1157 +#: utils/adt/jsonb.c:1549 utils/adt/jsonb.c:1713 utils/adt/jsonb.c:1723 #, c-format msgid "could not determine input data type" msgstr "не удалось определить тип входных данных" @@ -20835,7 +21100,7 @@ msgstr "тип входных данных не является массиво #: utils/adt/int.c:1001 utils/adt/int.c:1028 utils/adt/int.c:1061 #: utils/adt/int.c:1144 utils/adt/int8.c:1298 utils/adt/numeric.c:2953 #: utils/adt/numeric.c:2962 utils/adt/varbit.c:1173 utils/adt/varbit.c:1575 -#: utils/adt/varlena.c:1054 utils/adt/varlena.c:2933 +#: utils/adt/varlena.c:1054 utils/adt/varlena.c:2957 #, c-format msgid "integer out of range" msgstr "целое вне диапазона" @@ -20875,18 +21140,12 @@ msgstr "Массивы с разными размерностями элемен msgid "Arrays with differing dimensions are not compatible for concatenation." msgstr "Массивы с разными размерностями несовместимы для соединения." -#: utils/adt/array_userfuncs.c:480 utils/adt/arrayfuncs.c:1284 -#: utils/adt/arrayfuncs.c:3357 utils/adt/arrayfuncs.c:5754 -#, c-format -msgid "invalid number of dimensions: %d" -msgstr "неверное число размерностей: %d" - -#: utils/adt/array_userfuncs.c:737 utils/adt/array_userfuncs.c:889 +#: utils/adt/array_userfuncs.c:667 utils/adt/array_userfuncs.c:819 #, c-format msgid "searching for elements in multidimensional arrays is not supported" msgstr "поиск элементов в многомерных массивах не поддерживается" -#: utils/adt/array_userfuncs.c:761 +#: utils/adt/array_userfuncs.c:691 #, c-format msgid "initial position must not be null" msgstr "начальная позиция не может быть NULL" @@ -20966,7 +21225,7 @@ msgstr "Неожиданный элемент массива." msgid "Unmatched \"%c\" character." msgstr "Непарный знак \"%c\"." -#: utils/adt/arrayfuncs.c:597 +#: utils/adt/arrayfuncs.c:597 utils/adt/jsonfuncs.c:2381 #, c-format msgid "Multidimensional arrays must have sub-arrays with matching dimensions." msgstr "" @@ -20978,6 +21237,12 @@ msgstr "" msgid "Junk after closing right brace." msgstr "Мусор после закрывающей фигурной скобки." +#: utils/adt/arrayfuncs.c:1284 utils/adt/arrayfuncs.c:3357 +#: utils/adt/arrayfuncs.c:5752 +#, c-format +msgid "invalid number of dimensions: %d" +msgstr "неверное число размерностей: %d" + #: utils/adt/arrayfuncs.c:1295 #, c-format msgid "invalid array flags" @@ -20989,7 +21254,7 @@ msgid "wrong element type" msgstr "неверный тип элемента" #: utils/adt/arrayfuncs.c:1353 utils/adt/rangetypes.c:334 -#: utils/cache/lsyscache.c:2651 +#: utils/cache/lsyscache.c:2683 #, c-format msgid "no binary input function available for type %s" msgstr "для типа %s нет функции ввода двоичных данных" @@ -21000,7 +21265,7 @@ msgid "improper binary format in array element %d" msgstr "неподходящий двоичный формат в элементе массива %d" #: utils/adt/arrayfuncs.c:1574 utils/adt/rangetypes.c:339 -#: utils/cache/lsyscache.c:2684 +#: utils/cache/lsyscache.c:2716 #, c-format msgid "no binary output function available for type %s" msgstr "для типа %s нет функции вывода двоичных данных" @@ -21012,11 +21277,11 @@ msgstr "разрезание массивов постоянной длины н #: utils/adt/arrayfuncs.c:2230 utils/adt/arrayfuncs.c:2252 #: utils/adt/arrayfuncs.c:2301 utils/adt/arrayfuncs.c:2537 -#: utils/adt/arrayfuncs.c:2848 utils/adt/arrayfuncs.c:5740 -#: utils/adt/arrayfuncs.c:5766 utils/adt/arrayfuncs.c:5777 -#: utils/adt/json.c:2295 utils/adt/json.c:2370 utils/adt/jsonb.c:1370 -#: utils/adt/jsonb.c:1456 utils/adt/jsonfuncs.c:3496 utils/adt/jsonfuncs.c:3647 -#: utils/adt/jsonfuncs.c:3692 utils/adt/jsonfuncs.c:3739 +#: utils/adt/arrayfuncs.c:2848 utils/adt/arrayfuncs.c:5738 +#: utils/adt/arrayfuncs.c:5764 utils/adt/arrayfuncs.c:5775 +#: utils/adt/json.c:2259 utils/adt/json.c:2334 utils/adt/jsonb.c:1327 +#: utils/adt/jsonb.c:1413 utils/adt/jsonfuncs.c:4158 utils/adt/jsonfuncs.c:4309 +#: utils/adt/jsonfuncs.c:4354 utils/adt/jsonfuncs.c:4401 #, c-format msgid "wrong number of array subscripts" msgstr "неверное число индексов массива" @@ -21072,57 +21337,57 @@ msgstr "нельзя сравнивать массивы с элементами msgid "could not identify a hash function for type %s" msgstr "не удалось найти функцию хеширования для типа %s" -#: utils/adt/arrayfuncs.c:5154 +#: utils/adt/arrayfuncs.c:5152 #, c-format msgid "data type %s is not an array type" msgstr "тип данных %s не является типом массива" -#: utils/adt/arrayfuncs.c:5209 +#: utils/adt/arrayfuncs.c:5207 #, c-format msgid "cannot accumulate null arrays" msgstr "аккумулировать NULL-массивы нельзя" -#: utils/adt/arrayfuncs.c:5237 +#: utils/adt/arrayfuncs.c:5235 #, c-format msgid "cannot accumulate empty arrays" msgstr "аккумулировать пустые массивы нельзя" -#: utils/adt/arrayfuncs.c:5266 utils/adt/arrayfuncs.c:5272 +#: utils/adt/arrayfuncs.c:5264 utils/adt/arrayfuncs.c:5270 #, c-format msgid "cannot accumulate arrays of different dimensionality" msgstr "аккумулировать массивы различной размерности нельзя" -#: utils/adt/arrayfuncs.c:5638 utils/adt/arrayfuncs.c:5678 +#: utils/adt/arrayfuncs.c:5636 utils/adt/arrayfuncs.c:5676 #, c-format msgid "dimension array or low bound array cannot be null" msgstr "массив размерностей или массив нижних границ не может быть null" -#: utils/adt/arrayfuncs.c:5741 utils/adt/arrayfuncs.c:5767 +#: utils/adt/arrayfuncs.c:5739 utils/adt/arrayfuncs.c:5765 #, c-format msgid "Dimension array must be one dimensional." msgstr "Массив размерностей должен быть одномерным." -#: utils/adt/arrayfuncs.c:5746 utils/adt/arrayfuncs.c:5772 +#: utils/adt/arrayfuncs.c:5744 utils/adt/arrayfuncs.c:5770 #, c-format msgid "dimension values cannot be null" msgstr "значения размерностей не могут быть null" -#: utils/adt/arrayfuncs.c:5778 +#: utils/adt/arrayfuncs.c:5776 #, c-format msgid "Low bound array has different size than dimensions array." msgstr "Массив нижних границ и массив размерностей имеют разные размеры." -#: utils/adt/arrayfuncs.c:6024 +#: utils/adt/arrayfuncs.c:6022 #, c-format msgid "removing elements from multidimensional arrays is not supported" msgstr "удаление элементов из многомерных массивов не поддерживается" -#: utils/adt/arrayfuncs.c:6301 +#: utils/adt/arrayfuncs.c:6299 #, c-format msgid "thresholds must be one-dimensional array" msgstr "границы должны задаваться одномерным массивом" -#: utils/adt/arrayfuncs.c:6306 +#: utils/adt/arrayfuncs.c:6304 #, c-format msgid "thresholds array must not contain NULLs" msgstr "массив границ не должен содержать NULL" @@ -21159,8 +21424,8 @@ msgstr "преобразование кодировки из %s в ASCII не п #: utils/adt/mac8.c:93 utils/adt/mac8.c:166 utils/adt/mac8.c:184 #: utils/adt/mac8.c:202 utils/adt/mac8.c:221 utils/adt/nabstime.c:1539 #: utils/adt/network.c:58 utils/adt/numeric.c:593 utils/adt/numeric.c:620 -#: utils/adt/numeric.c:5488 utils/adt/numeric.c:5512 utils/adt/numeric.c:5536 -#: utils/adt/numeric.c:6338 utils/adt/numeric.c:6364 utils/adt/oid.c:44 +#: utils/adt/numeric.c:5498 utils/adt/numeric.c:5522 utils/adt/numeric.c:5546 +#: utils/adt/numeric.c:6348 utils/adt/numeric.c:6374 utils/adt/oid.c:44 #: utils/adt/oid.c:58 utils/adt/oid.c:64 utils/adt/oid.c:86 #: utils/adt/pg_lsn.c:44 utils/adt/pg_lsn.c:50 utils/adt/tid.c:72 #: utils/adt/tid.c:80 utils/adt/tid.c:88 utils/adt/txid.c:405 @@ -21170,21 +21435,21 @@ msgid "invalid input syntax for type %s: \"%s\"" msgstr "неверный синтаксис для типа %s: \"%s\"" #: utils/adt/cash.c:211 utils/adt/cash.c:238 utils/adt/cash.c:249 -#: utils/adt/cash.c:290 utils/adt/int8.c:114 utils/adt/numutils.c:75 +#: utils/adt/cash.c:292 utils/adt/int8.c:114 utils/adt/numutils.c:75 #: utils/adt/numutils.c:82 utils/adt/oid.c:70 utils/adt/oid.c:109 #, c-format msgid "value \"%s\" is out of range for type %s" msgstr "значение \"%s\" вне диапазона для типа %s" -#: utils/adt/cash.c:651 utils/adt/cash.c:701 utils/adt/cash.c:752 -#: utils/adt/cash.c:801 utils/adt/cash.c:853 utils/adt/cash.c:903 +#: utils/adt/cash.c:653 utils/adt/cash.c:703 utils/adt/cash.c:754 +#: utils/adt/cash.c:803 utils/adt/cash.c:855 utils/adt/cash.c:905 #: utils/adt/float.c:855 utils/adt/float.c:919 utils/adt/float.c:3315 #: utils/adt/float.c:3378 utils/adt/geo_ops.c:4093 utils/adt/int.c:704 #: utils/adt/int.c:846 utils/adt/int.c:954 utils/adt/int.c:1043 #: utils/adt/int.c:1082 utils/adt/int.c:1110 utils/adt/int8.c:597 #: utils/adt/int8.c:657 utils/adt/int8.c:897 utils/adt/int8.c:1005 -#: utils/adt/int8.c:1094 utils/adt/int8.c:1202 utils/adt/numeric.c:6902 -#: utils/adt/numeric.c:7191 utils/adt/numeric.c:8204 utils/adt/timestamp.c:3186 +#: utils/adt/int8.c:1094 utils/adt/int8.c:1202 utils/adt/numeric.c:6912 +#: utils/adt/numeric.c:7201 utils/adt/numeric.c:8213 utils/adt/timestamp.c:3216 #, c-format msgid "division by zero" msgstr "деление на ноль" @@ -21194,7 +21459,7 @@ msgstr "деление на ноль" msgid "\"char\" out of range" msgstr "значение \"char\" вне диапазона" -#: utils/adt/date.c:67 utils/adt/timestamp.c:94 utils/adt/varbit.c:53 +#: utils/adt/date.c:67 utils/adt/timestamp.c:95 utils/adt/varbit.c:53 #: utils/adt/varchar.c:46 #, c-format msgid "invalid type modifier" @@ -21215,24 +21480,24 @@ msgstr "TIME(%d)%s: точность уменьшена до дозволенн msgid "date/time value \"current\" is no longer supported" msgstr "значение \"current\" для даты/времени больше не поддерживается" -#: utils/adt/date.c:172 utils/adt/date.c:180 utils/adt/formatting.c:3558 -#: utils/adt/formatting.c:3567 +#: utils/adt/date.c:172 utils/adt/date.c:180 utils/adt/formatting.c:3585 +#: utils/adt/formatting.c:3594 #, c-format msgid "date out of range: \"%s\"" msgstr "дата вне диапазона: \"%s\"" #: utils/adt/date.c:227 utils/adt/date.c:539 utils/adt/date.c:563 -#: utils/adt/xml.c:2085 +#: utils/adt/xml.c:2089 #, c-format msgid "date out of range" msgstr "дата вне диапазона" -#: utils/adt/date.c:273 utils/adt/timestamp.c:563 +#: utils/adt/date.c:273 utils/adt/timestamp.c:564 #, c-format msgid "date field value out of range: %d-%02d-%02d" msgstr "значение поля типа date вне диапазона: %d-%02d-%02d" -#: utils/adt/date.c:280 utils/adt/date.c:289 utils/adt/timestamp.c:569 +#: utils/adt/date.c:280 utils/adt/date.c:289 utils/adt/timestamp.c:570 #, c-format msgid "date out of range: %d-%02d-%02d" msgstr "дата вне диапазона: %d-%02d-%02d" @@ -21240,32 +21505,32 @@ msgstr "дата вне диапазона: %d-%02d-%02d" #: utils/adt/date.c:327 utils/adt/date.c:350 utils/adt/date.c:376 #: utils/adt/date.c:1092 utils/adt/date.c:1138 utils/adt/date.c:1672 #: utils/adt/date.c:1703 utils/adt/date.c:1732 utils/adt/date.c:2469 -#: utils/adt/datetime.c:1690 utils/adt/formatting.c:3433 -#: utils/adt/formatting.c:3465 utils/adt/formatting.c:3533 -#: utils/adt/json.c:1539 utils/adt/json.c:1561 utils/adt/jsonb.c:824 -#: utils/adt/jsonb.c:848 utils/adt/nabstime.c:456 utils/adt/nabstime.c:499 -#: utils/adt/nabstime.c:529 utils/adt/nabstime.c:572 utils/adt/timestamp.c:229 -#: utils/adt/timestamp.c:261 utils/adt/timestamp.c:691 -#: utils/adt/timestamp.c:700 utils/adt/timestamp.c:778 -#: utils/adt/timestamp.c:811 utils/adt/timestamp.c:2765 -#: utils/adt/timestamp.c:2786 utils/adt/timestamp.c:2799 -#: utils/adt/timestamp.c:2808 utils/adt/timestamp.c:2816 -#: utils/adt/timestamp.c:2871 utils/adt/timestamp.c:2894 -#: utils/adt/timestamp.c:2907 utils/adt/timestamp.c:2918 -#: utils/adt/timestamp.c:2926 utils/adt/timestamp.c:3482 -#: utils/adt/timestamp.c:3607 utils/adt/timestamp.c:3648 -#: utils/adt/timestamp.c:3729 utils/adt/timestamp.c:3775 -#: utils/adt/timestamp.c:3878 utils/adt/timestamp.c:4277 -#: utils/adt/timestamp.c:4376 utils/adt/timestamp.c:4386 -#: utils/adt/timestamp.c:4478 utils/adt/timestamp.c:4580 -#: utils/adt/timestamp.c:4590 utils/adt/timestamp.c:4822 -#: utils/adt/timestamp.c:4836 utils/adt/timestamp.c:4841 -#: utils/adt/timestamp.c:4855 utils/adt/timestamp.c:4900 -#: utils/adt/timestamp.c:4932 utils/adt/timestamp.c:4939 -#: utils/adt/timestamp.c:4972 utils/adt/timestamp.c:4976 -#: utils/adt/timestamp.c:5045 utils/adt/timestamp.c:5049 -#: utils/adt/timestamp.c:5063 utils/adt/timestamp.c:5097 utils/adt/xml.c:2107 -#: utils/adt/xml.c:2114 utils/adt/xml.c:2134 utils/adt/xml.c:2141 +#: utils/adt/datetime.c:1690 utils/adt/formatting.c:3460 +#: utils/adt/formatting.c:3492 utils/adt/formatting.c:3560 +#: utils/adt/json.c:1540 utils/adt/json.c:1562 utils/adt/jsonb.c:825 +#: utils/adt/jsonb.c:849 utils/adt/nabstime.c:456 utils/adt/nabstime.c:499 +#: utils/adt/nabstime.c:529 utils/adt/nabstime.c:572 utils/adt/timestamp.c:230 +#: utils/adt/timestamp.c:262 utils/adt/timestamp.c:692 +#: utils/adt/timestamp.c:701 utils/adt/timestamp.c:779 +#: utils/adt/timestamp.c:812 utils/adt/timestamp.c:2795 +#: utils/adt/timestamp.c:2816 utils/adt/timestamp.c:2829 +#: utils/adt/timestamp.c:2838 utils/adt/timestamp.c:2846 +#: utils/adt/timestamp.c:2901 utils/adt/timestamp.c:2924 +#: utils/adt/timestamp.c:2937 utils/adt/timestamp.c:2948 +#: utils/adt/timestamp.c:2956 utils/adt/timestamp.c:3512 +#: utils/adt/timestamp.c:3637 utils/adt/timestamp.c:3678 +#: utils/adt/timestamp.c:3759 utils/adt/timestamp.c:3805 +#: utils/adt/timestamp.c:3908 utils/adt/timestamp.c:4307 +#: utils/adt/timestamp.c:4406 utils/adt/timestamp.c:4416 +#: utils/adt/timestamp.c:4508 utils/adt/timestamp.c:4610 +#: utils/adt/timestamp.c:4620 utils/adt/timestamp.c:4852 +#: utils/adt/timestamp.c:4866 utils/adt/timestamp.c:4871 +#: utils/adt/timestamp.c:4885 utils/adt/timestamp.c:4930 +#: utils/adt/timestamp.c:4962 utils/adt/timestamp.c:4969 +#: utils/adt/timestamp.c:5002 utils/adt/timestamp.c:5006 +#: utils/adt/timestamp.c:5075 utils/adt/timestamp.c:5079 +#: utils/adt/timestamp.c:5093 utils/adt/timestamp.c:5127 utils/adt/xml.c:2111 +#: utils/adt/xml.c:2118 utils/adt/xml.c:2138 utils/adt/xml.c:2145 #, c-format msgid "timestamp out of range" msgstr "timestamp вне диапазона" @@ -21296,7 +21561,7 @@ msgstr "abstime вне диапазона для типа даты" msgid "time out of range" msgstr "время вне диапазона" -#: utils/adt/date.c:1357 utils/adt/timestamp.c:588 +#: utils/adt/date.c:1357 utils/adt/timestamp.c:589 #, c-format msgid "time field value out of range: %d:%02d:%02g" msgstr "значение поля типа time вне диапазона: %d:%02d:%02g" @@ -21317,14 +21582,14 @@ msgid "\"time with time zone\" units \"%s\" not recognized" msgstr "\"время с часовым поясом\" содержит нераспознанные единицы \"%s\"" #: utils/adt/date.c:2687 utils/adt/datetime.c:931 utils/adt/datetime.c:1848 -#: utils/adt/datetime.c:4636 utils/adt/timestamp.c:502 -#: utils/adt/timestamp.c:529 utils/adt/timestamp.c:4847 -#: utils/adt/timestamp.c:5055 +#: utils/adt/datetime.c:4636 utils/adt/timestamp.c:503 +#: utils/adt/timestamp.c:530 utils/adt/timestamp.c:4877 +#: utils/adt/timestamp.c:5085 #, c-format msgid "time zone \"%s\" not recognized" msgstr "часовой пояс \"%s\" не распознан" -#: utils/adt/date.c:2719 utils/adt/timestamp.c:4889 utils/adt/timestamp.c:5086 +#: utils/adt/date.c:2719 utils/adt/timestamp.c:4919 utils/adt/timestamp.c:5116 #, c-format msgid "interval time zone \"%s\" must not include months or days" msgstr "" @@ -21426,48 +21691,37 @@ msgid "Input data is missing padding, is truncated, or is otherwise corrupted." msgstr "" "Входные данные лишены выравнивания, обрезаны или повреждены иным образом." -#: utils/adt/encode.c:442 utils/adt/encode.c:507 utils/adt/json.c:785 -#: utils/adt/json.c:825 utils/adt/json.c:841 utils/adt/json.c:853 -#: utils/adt/json.c:863 utils/adt/json.c:914 utils/adt/json.c:946 -#: utils/adt/json.c:965 utils/adt/json.c:977 utils/adt/json.c:989 -#: utils/adt/json.c:1134 utils/adt/json.c:1148 utils/adt/json.c:1159 -#: utils/adt/json.c:1167 utils/adt/json.c:1175 utils/adt/json.c:1183 -#: utils/adt/json.c:1191 utils/adt/json.c:1199 utils/adt/json.c:1207 -#: utils/adt/json.c:1215 utils/adt/json.c:1245 utils/adt/varlena.c:296 +#: utils/adt/encode.c:442 utils/adt/encode.c:507 utils/adt/json.c:786 +#: utils/adt/json.c:826 utils/adt/json.c:842 utils/adt/json.c:854 +#: utils/adt/json.c:864 utils/adt/json.c:915 utils/adt/json.c:947 +#: utils/adt/json.c:966 utils/adt/json.c:978 utils/adt/json.c:990 +#: utils/adt/json.c:1135 utils/adt/json.c:1149 utils/adt/json.c:1160 +#: utils/adt/json.c:1168 utils/adt/json.c:1176 utils/adt/json.c:1184 +#: utils/adt/json.c:1192 utils/adt/json.c:1200 utils/adt/json.c:1208 +#: utils/adt/json.c:1216 utils/adt/json.c:1246 utils/adt/varlena.c:296 #: utils/adt/varlena.c:337 #, c-format msgid "invalid input syntax for type %s" msgstr "неверный синтаксис для типа %s" -#: utils/adt/enum.c:115 -#, c-format -msgid "unsafe use of new value \"%s\" of enum type %s" -msgstr "небезопасное использование нового значения \"%s\" типа-перечисления %s" - -#: utils/adt/enum.c:118 -#, c-format -msgid "New enum values must be committed before they can be used." -msgstr "" -"Новые значения перечисления должны быть зафиксированы перед использованием." - -#: utils/adt/enum.c:136 utils/adt/enum.c:146 utils/adt/enum.c:204 -#: utils/adt/enum.c:214 +#: utils/adt/enum.c:48 utils/adt/enum.c:58 utils/adt/enum.c:113 +#: utils/adt/enum.c:123 #, c-format msgid "invalid input value for enum %s: \"%s\"" msgstr "неверное значение для перечисления %s: \"%s\"" -#: utils/adt/enum.c:176 utils/adt/enum.c:242 utils/adt/enum.c:301 +#: utils/adt/enum.c:85 utils/adt/enum.c:148 utils/adt/enum.c:207 #, c-format msgid "invalid internal value for enum: %u" msgstr "неверное внутреннее значение для перечисления: %u" -#: utils/adt/enum.c:461 utils/adt/enum.c:490 utils/adt/enum.c:530 -#: utils/adt/enum.c:550 +#: utils/adt/enum.c:360 utils/adt/enum.c:389 utils/adt/enum.c:429 +#: utils/adt/enum.c:449 #, c-format msgid "could not determine actual enum type" msgstr "не удалось определить фактический тип перечисления" -#: utils/adt/enum.c:469 utils/adt/enum.c:498 +#: utils/adt/enum.c:368 utils/adt/enum.c:397 #, c-format msgid "enum %s contains no values" msgstr "перечисление %s не содержит значений" @@ -21500,7 +21754,7 @@ msgstr "\"%s\" вне диапазона для типа double precision" msgid "smallint out of range" msgstr "smallint вне диапазона" -#: utils/adt/float.c:1430 utils/adt/numeric.c:7624 +#: utils/adt/float.c:1430 utils/adt/numeric.c:7634 #, c-format msgid "cannot take square root of a negative number" msgstr "извлечь квадратный корень отрицательного числа нельзя" @@ -21515,12 +21769,12 @@ msgstr "ноль в отрицательной степени даёт неоп msgid "a negative number raised to a non-integer power yields a complex result" msgstr "отрицательное число в дробной степени даёт комплексный результат" -#: utils/adt/float.c:1542 utils/adt/float.c:1572 utils/adt/numeric.c:7890 +#: utils/adt/float.c:1542 utils/adt/float.c:1572 utils/adt/numeric.c:7900 #, c-format msgid "cannot take logarithm of zero" msgstr "вычислить логарифм нуля нельзя" -#: utils/adt/float.c:1546 utils/adt/float.c:1576 utils/adt/numeric.c:7894 +#: utils/adt/float.c:1546 utils/adt/float.c:1576 utils/adt/numeric.c:7904 #, c-format msgid "cannot take logarithm of a negative number" msgstr "вычислить логарифм отрицательного числа нельзя" @@ -21640,38 +21894,38 @@ msgstr "" msgid "\"%s\" is not a number" msgstr "\"%s\" не является числом" -#: utils/adt/formatting.c:1472 +#: utils/adt/formatting.c:1480 #, c-format msgid "case conversion failed: %s" msgstr "преобразовать регистр не удалось: %s" -#: utils/adt/formatting.c:1536 +#: utils/adt/formatting.c:1545 #, c-format msgid "could not determine which collation to use for lower() function" msgstr "" "не удалось определить, какое правило сортировки использовать для функции " "lower()" -#: utils/adt/formatting.c:1655 +#: utils/adt/formatting.c:1669 #, c-format msgid "could not determine which collation to use for upper() function" msgstr "" "не удалось определить, какое правило сортировки использовать для функции " "upper()" -#: utils/adt/formatting.c:1774 +#: utils/adt/formatting.c:1794 #, c-format msgid "could not determine which collation to use for initcap() function" msgstr "" "не удалось определить, какое правило сортировки использовать для функции " "initcap()" -#: utils/adt/formatting.c:2136 +#: utils/adt/formatting.c:2163 #, c-format msgid "invalid combination of date conventions" msgstr "неверное сочетание стилей дат" -#: utils/adt/formatting.c:2137 +#: utils/adt/formatting.c:2164 #, c-format msgid "" "Do not mix Gregorian and ISO week date conventions in a formatting template." @@ -21679,27 +21933,27 @@ msgstr "" "Не смешивайте Григорианский стиль дат (недель) с ISO в одном шаблоне " "форматирования." -#: utils/adt/formatting.c:2154 +#: utils/adt/formatting.c:2181 #, c-format msgid "conflicting values for \"%s\" field in formatting string" msgstr "конфликтующие значения поля \"%s\" в строке форматирования" -#: utils/adt/formatting.c:2156 +#: utils/adt/formatting.c:2183 #, c-format msgid "This value contradicts a previous setting for the same field type." msgstr "Это значение противоречит предыдущему значению поля того же типа." -#: utils/adt/formatting.c:2217 +#: utils/adt/formatting.c:2244 #, c-format msgid "source string too short for \"%s\" formatting field" msgstr "входная строка короче, чем требует поле форматирования \"%s\"" -#: utils/adt/formatting.c:2219 +#: utils/adt/formatting.c:2246 #, c-format msgid "Field requires %d characters, but only %d remain." msgstr "Требуется символов: %d, а осталось только %d." -#: utils/adt/formatting.c:2222 utils/adt/formatting.c:2236 +#: utils/adt/formatting.c:2249 utils/adt/formatting.c:2263 #, c-format msgid "" "If your source string is not fixed-width, try using the \"FM\" modifier." @@ -21707,80 +21961,80 @@ msgstr "" "Если входная строка имеет переменную длину, попробуйте использовать " "модификатор \"FM\"." -#: utils/adt/formatting.c:2232 utils/adt/formatting.c:2245 -#: utils/adt/formatting.c:2375 +#: utils/adt/formatting.c:2259 utils/adt/formatting.c:2272 +#: utils/adt/formatting.c:2402 #, c-format msgid "invalid value \"%s\" for \"%s\"" msgstr "неверное значение \"%s\" для \"%s\"" -#: utils/adt/formatting.c:2234 +#: utils/adt/formatting.c:2261 #, c-format msgid "Field requires %d characters, but only %d could be parsed." msgstr "Поле должно поглотить символов: %d, но удалось разобрать только %d." -#: utils/adt/formatting.c:2247 +#: utils/adt/formatting.c:2274 #, c-format msgid "Value must be an integer." msgstr "Значение должно быть целым числом." -#: utils/adt/formatting.c:2252 +#: utils/adt/formatting.c:2279 #, c-format msgid "value for \"%s\" in source string is out of range" msgstr "значение \"%s\" во входной строке вне диапазона" -#: utils/adt/formatting.c:2254 +#: utils/adt/formatting.c:2281 #, c-format msgid "Value must be in the range %d to %d." msgstr "Значение должно быть в интервале %d..%d." -#: utils/adt/formatting.c:2377 +#: utils/adt/formatting.c:2404 #, c-format msgid "The given value did not match any of the allowed values for this field." msgstr "" "Данное значение не соответствует ни одному из допустимых значений для этого " "поля." -#: utils/adt/formatting.c:2562 utils/adt/formatting.c:2582 -#: utils/adt/formatting.c:2602 utils/adt/formatting.c:2622 -#: utils/adt/formatting.c:2641 utils/adt/formatting.c:2660 -#: utils/adt/formatting.c:2684 utils/adt/formatting.c:2702 -#: utils/adt/formatting.c:2720 utils/adt/formatting.c:2738 -#: utils/adt/formatting.c:2755 utils/adt/formatting.c:2772 +#: utils/adt/formatting.c:2589 utils/adt/formatting.c:2609 +#: utils/adt/formatting.c:2629 utils/adt/formatting.c:2649 +#: utils/adt/formatting.c:2668 utils/adt/formatting.c:2687 +#: utils/adt/formatting.c:2711 utils/adt/formatting.c:2729 +#: utils/adt/formatting.c:2747 utils/adt/formatting.c:2765 +#: utils/adt/formatting.c:2782 utils/adt/formatting.c:2799 #, c-format msgid "localized string format value too long" msgstr "слишком длинное значение формата локализованной строки" -#: utils/adt/formatting.c:3059 +#: utils/adt/formatting.c:3086 #, c-format msgid "formatting field \"%s\" is only supported in to_char" msgstr "поле форматирования \"%s\" поддерживается только в функции to_char" -#: utils/adt/formatting.c:3170 +#: utils/adt/formatting.c:3197 #, c-format msgid "invalid input string for \"Y,YYY\"" msgstr "ошибка синтаксиса в значении для шаблона \"Y,YYY\"" -#: utils/adt/formatting.c:3676 +#: utils/adt/formatting.c:3703 #, c-format msgid "hour \"%d\" is invalid for the 12-hour clock" msgstr "час \"%d\" не соответствует 12-часовому формату времени" -#: utils/adt/formatting.c:3678 +#: utils/adt/formatting.c:3705 #, c-format msgid "Use the 24-hour clock, or give an hour between 1 and 12." msgstr "Используйте 24-часовой формат или передавайте часы от 1 до 12." -#: utils/adt/formatting.c:3784 +#: utils/adt/formatting.c:3811 #, c-format msgid "cannot calculate day of year without year information" msgstr "нельзя рассчитать день года без информации о годе" -#: utils/adt/formatting.c:4651 +#: utils/adt/formatting.c:4678 #, c-format msgid "\"EEEE\" not supported for input" msgstr "\"EEEE\" не поддерживается при вводе" -#: utils/adt/formatting.c:4663 +#: utils/adt/formatting.c:4690 #, c-format msgid "\"RN\" not supported for input" msgstr "\"RN\" не поддерживается при вводе" @@ -21924,7 +22178,7 @@ msgid "oidvector has too many elements" msgstr "oidvector содержит слишком много элементов" #: utils/adt/int.c:1347 utils/adt/int8.c:1460 utils/adt/numeric.c:1401 -#: utils/adt/timestamp.c:5148 utils/adt/timestamp.c:5229 +#: utils/adt/timestamp.c:5178 utils/adt/timestamp.c:5259 #, c-format msgid "step size cannot equal zero" msgstr "размер шага не может быть нулевым" @@ -21932,8 +22186,8 @@ msgstr "размер шага не может быть нулевым" #: utils/adt/int8.c:98 utils/adt/int8.c:133 utils/adt/numutils.c:51 #: utils/adt/numutils.c:61 utils/adt/numutils.c:105 #, c-format -msgid "invalid input syntax for %s: \"%s\"" -msgstr "неверный синтаксис для %s: \"%s\"" +msgid "invalid input syntax for integer: \"%s\"" +msgstr "неверное значение для целого числа: \"%s\"" #: utils/adt/int8.c:500 utils/adt/int8.c:529 utils/adt/int8.c:550 #: utils/adt/int8.c:581 utils/adt/int8.c:615 utils/adt/int8.c:640 @@ -21954,39 +22208,39 @@ msgstr "bigint вне диапазона" msgid "OID out of range" msgstr "OID вне диапазона" -#: utils/adt/json.c:786 +#: utils/adt/json.c:787 #, c-format msgid "Character with value 0x%02x must be escaped." msgstr "Символ с кодом 0x%02x необходимо экранировать." -#: utils/adt/json.c:827 +#: utils/adt/json.c:828 #, c-format msgid "\"\\u\" must be followed by four hexadecimal digits." msgstr "За \"\\u\" должны следовать четыре шестнадцатеричные цифры." -#: utils/adt/json.c:843 +#: utils/adt/json.c:844 #, c-format msgid "Unicode high surrogate must not follow a high surrogate." msgstr "" "Старшее слово суррогата Unicode не может следовать за другим старшим словом." -#: utils/adt/json.c:854 utils/adt/json.c:864 utils/adt/json.c:916 -#: utils/adt/json.c:978 utils/adt/json.c:990 +#: utils/adt/json.c:855 utils/adt/json.c:865 utils/adt/json.c:917 +#: utils/adt/json.c:979 utils/adt/json.c:991 #, c-format msgid "Unicode low surrogate must follow a high surrogate." msgstr "Младшее слово суррогата Unicode должно следовать за старшим словом." -#: utils/adt/json.c:879 utils/adt/json.c:902 +#: utils/adt/json.c:880 utils/adt/json.c:903 #, c-format msgid "unsupported Unicode escape sequence" msgstr "неподдерживаемая спецпоследовательность Unicode" -#: utils/adt/json.c:880 +#: utils/adt/json.c:881 #, c-format msgid "\\u0000 cannot be converted to text." msgstr "\\u0000 нельзя преобразовать в текст." -#: utils/adt/json.c:903 +#: utils/adt/json.c:904 #, c-format msgid "" "Unicode escape values cannot be used for code point values above 007F when " @@ -21995,93 +22249,88 @@ msgstr "" "Спецкоды Unicode для значений выше 007F можно использовать только с " "серверной кодировкой UTF8." -#: utils/adt/json.c:948 utils/adt/json.c:966 +#: utils/adt/json.c:949 utils/adt/json.c:967 #, c-format msgid "Escape sequence \"\\%s\" is invalid." msgstr "Неверная спецпоследовательность: \"\\%s\"." -#: utils/adt/json.c:1135 +#: utils/adt/json.c:1136 #, c-format msgid "The input string ended unexpectedly." msgstr "Неожиданный конец входной строки." -#: utils/adt/json.c:1149 +#: utils/adt/json.c:1150 #, c-format msgid "Expected end of input, but found \"%s\"." msgstr "Ожидался конец текста, но обнаружено продолжение \"%s\"." -#: utils/adt/json.c:1160 +#: utils/adt/json.c:1161 #, c-format msgid "Expected JSON value, but found \"%s\"." msgstr "Ожидалось значение JSON, но обнаружено \"%s\"." -#: utils/adt/json.c:1168 utils/adt/json.c:1216 +#: utils/adt/json.c:1169 utils/adt/json.c:1217 #, c-format msgid "Expected string, but found \"%s\"." msgstr "Ожидалась строка, но обнаружено \"%s\"." -#: utils/adt/json.c:1176 +#: utils/adt/json.c:1177 #, c-format msgid "Expected array element or \"]\", but found \"%s\"." msgstr "Ожидался элемент массива или \"]\", но обнаружено \"%s\"." -#: utils/adt/json.c:1184 +#: utils/adt/json.c:1185 #, c-format msgid "Expected \",\" or \"]\", but found \"%s\"." msgstr "Ожидалась \",\" или \"]\", но обнаружено \"%s\"." -#: utils/adt/json.c:1192 +#: utils/adt/json.c:1193 #, c-format msgid "Expected string or \"}\", but found \"%s\"." msgstr "Ожидалась строка или \"}\", но обнаружено \"%s\"." -#: utils/adt/json.c:1200 +#: utils/adt/json.c:1201 #, c-format msgid "Expected \":\", but found \"%s\"." msgstr "Ожидалось \":\", но обнаружено \"%s\"." -#: utils/adt/json.c:1208 +#: utils/adt/json.c:1209 #, c-format msgid "Expected \",\" or \"}\", but found \"%s\"." msgstr "Ожидалась \",\" или \"}\", но обнаружено \"%s\"." -#: utils/adt/json.c:1246 +#: utils/adt/json.c:1247 #, c-format msgid "Token \"%s\" is invalid." msgstr "Ошибочный элемент текста \"%s\"." -#: utils/adt/json.c:1318 +#: utils/adt/json.c:1319 #, c-format msgid "JSON data, line %d: %s%s%s" msgstr "данные JSON, строка %d: %s%s%s" -#: utils/adt/json.c:1474 utils/adt/jsonb.c:725 +#: utils/adt/json.c:1475 utils/adt/jsonb.c:726 #, c-format msgid "key value must be scalar, not array, composite, or json" msgstr "" "значением ключа должен быть скаляр (не массив, композитный тип или json)" -#: utils/adt/json.c:2011 +#: utils/adt/json.c:2012 utils/adt/json.c:2022 utils/fmgr/funcapi.c:1501 #, c-format -msgid "could not determine data type for argument 1" -msgstr "не удалось определить тип данных аргумента 1" - -#: utils/adt/json.c:2021 -#, c-format -msgid "could not determine data type for argument 2" -msgstr "не удалось определить тип данных аргумента 2" +msgid "could not determine data type for argument %d" +msgstr "не удалось определить тип данных аргумента %d" -#: utils/adt/json.c:2045 utils/adt/jsonb.c:1782 +#: utils/adt/json.c:2046 utils/adt/jsonb.c:1739 #, c-format msgid "field name must not be null" msgstr "имя поля не может быть NULL" -#: utils/adt/json.c:2122 +#: utils/adt/json.c:2130 utils/adt/jsonb.c:1191 #, c-format msgid "argument list must have even number of elements" msgstr "в списке аргументов должно быть чётное число элементов" -#: utils/adt/json.c:2123 +#: utils/adt/json.c:2131 #, c-format msgid "" "The arguments of json_build_object() must consist of alternating keys and " @@ -22089,66 +22338,59 @@ msgid "" msgstr "" "Аргументы json_build_object() должны состоять из пар ключей и значений." -#: utils/adt/json.c:2147 utils/adt/json.c:2168 utils/adt/json.c:2227 -#, c-format -msgid "could not determine data type for argument %d" -msgstr "не удалось определить тип данных аргумента %d" - -#: utils/adt/json.c:2153 +#: utils/adt/json.c:2146 #, c-format msgid "argument %d cannot be null" msgstr "аргумент %d не может быть NULL" -#: utils/adt/json.c:2154 +#: utils/adt/json.c:2147 #, c-format msgid "Object keys should be text." msgstr "Ключи объектов должны быть текстовыми." -#: utils/adt/json.c:2289 utils/adt/jsonb.c:1364 +#: utils/adt/json.c:2253 utils/adt/jsonb.c:1321 #, c-format msgid "array must have two columns" msgstr "массив должен иметь два столбца" -#: utils/adt/json.c:2313 utils/adt/json.c:2397 utils/adt/jsonb.c:1388 -#: utils/adt/jsonb.c:1483 +#: utils/adt/json.c:2277 utils/adt/json.c:2361 utils/adt/jsonb.c:1345 +#: utils/adt/jsonb.c:1440 #, c-format msgid "null value not allowed for object key" msgstr "значение null не может быть ключом объекта" -#: utils/adt/json.c:2386 utils/adt/jsonb.c:1472 +#: utils/adt/json.c:2350 utils/adt/jsonb.c:1429 #, c-format msgid "mismatched array dimensions" msgstr "неподходящие размерности массива" -#: utils/adt/jsonb.c:257 +#: utils/adt/jsonb.c:258 #, c-format msgid "string too long to represent as jsonb string" msgstr "слишком длинная строка для представления в виде строки jsonb" -#: utils/adt/jsonb.c:258 +#: utils/adt/jsonb.c:259 #, c-format msgid "" "Due to an implementation restriction, jsonb strings cannot exceed %d bytes." msgstr "" "Из-за ограничений реализации строки jsonb не могут быть длиннее %d байт." -#: utils/adt/jsonb.c:1183 +#: utils/adt/jsonb.c:1192 #, c-format -msgid "invalid number of arguments: object must be matched key value pairs" +msgid "" +"The arguments of jsonb_build_object() must consist of alternating keys and " +"values." msgstr "" -"неверное число аргументов: объект должен составляться из пар ключ-значение" +"Аргументы json_build_object() должны состоять из перемежающихся ключей и " +"значений." -#: utils/adt/jsonb.c:1196 +#: utils/adt/jsonb.c:1204 #, c-format msgid "argument %d: key must not be null" msgstr "аргумент %d: ключ не может быть NULL" -#: utils/adt/jsonb.c:1215 utils/adt/jsonb.c:1238 utils/adt/jsonb.c:1298 -#, c-format -msgid "argument %d: could not determine data type" -msgstr "аргумент %d: не удалось определить тип данных" - -#: utils/adt/jsonb.c:1835 +#: utils/adt/jsonb.c:1792 #, c-format msgid "object keys must be strings" msgstr "ключи объектов должны быть строковыми" @@ -22174,73 +22416,99 @@ msgstr "общий размер элементов массива jsonb прев msgid "total size of jsonb object elements exceeds the maximum of %u bytes" msgstr "общий размер элементов объекта jsonb превышает предел (%u байт)" -#: utils/adt/jsonfuncs.c:337 utils/adt/jsonfuncs.c:502 -#: utils/adt/jsonfuncs.c:2089 utils/adt/jsonfuncs.c:2530 -#: utils/adt/jsonfuncs.c:3036 +#: utils/adt/jsonfuncs.c:511 utils/adt/jsonfuncs.c:676 +#: utils/adt/jsonfuncs.c:2263 utils/adt/jsonfuncs.c:2699 +#: utils/adt/jsonfuncs.c:3393 utils/adt/jsonfuncs.c:3694 #, c-format msgid "cannot call %s on a scalar" msgstr "вызывать %s со скаляром нельзя" -#: utils/adt/jsonfuncs.c:342 utils/adt/jsonfuncs.c:489 -#: utils/adt/jsonfuncs.c:2519 +#: utils/adt/jsonfuncs.c:516 utils/adt/jsonfuncs.c:663 +#: utils/adt/jsonfuncs.c:2701 utils/adt/jsonfuncs.c:3382 #, c-format msgid "cannot call %s on an array" msgstr "вызывать %s с массивом нельзя" -#: utils/adt/jsonfuncs.c:1405 utils/adt/jsonfuncs.c:1440 +#: utils/adt/jsonfuncs.c:1579 utils/adt/jsonfuncs.c:1614 #, c-format msgid "cannot get array length of a scalar" msgstr "получить длину скаляра нельзя" -#: utils/adt/jsonfuncs.c:1409 utils/adt/jsonfuncs.c:1428 +#: utils/adt/jsonfuncs.c:1583 utils/adt/jsonfuncs.c:1602 #, c-format msgid "cannot get array length of a non-array" msgstr "получить длину массива для не массива нельзя" -#: utils/adt/jsonfuncs.c:1505 +#: utils/adt/jsonfuncs.c:1679 #, c-format msgid "cannot call %s on a non-object" msgstr "вызывать %s с не объектом нельзя" -#: utils/adt/jsonfuncs.c:1523 utils/adt/jsonfuncs.c:2202 -#: utils/adt/jsonfuncs.c:2739 +#: utils/adt/jsonfuncs.c:1697 utils/adt/jsonfuncs.c:3208 +#: utils/adt/jsonfuncs.c:3510 #, c-format msgid "" "function returning record called in context that cannot accept type record" msgstr "" "функция, возвращающая запись, вызвана в контексте, не допускающем этот тип" -#: utils/adt/jsonfuncs.c:1762 +#: utils/adt/jsonfuncs.c:1936 #, c-format msgid "cannot deconstruct an array as an object" msgstr "извлечь массив в виде объекта нельзя" -#: utils/adt/jsonfuncs.c:1774 +#: utils/adt/jsonfuncs.c:1948 #, c-format msgid "cannot deconstruct a scalar" msgstr "извлечь скаляр нельзя" -#: utils/adt/jsonfuncs.c:1820 +#: utils/adt/jsonfuncs.c:1994 #, c-format msgid "cannot extract elements from a scalar" msgstr "извлечь элементы из скаляра нельзя" -#: utils/adt/jsonfuncs.c:1824 +#: utils/adt/jsonfuncs.c:1998 #, c-format msgid "cannot extract elements from an object" msgstr "извлечь элементы из объекта нельзя" -#: utils/adt/jsonfuncs.c:2076 utils/adt/jsonfuncs.c:2835 +#: utils/adt/jsonfuncs.c:2250 utils/adt/jsonfuncs.c:3583 #, c-format msgid "cannot call %s on a non-array" msgstr "вызывать %s с не массивом нельзя" -#: utils/adt/jsonfuncs.c:2163 utils/adt/jsonfuncs.c:2715 +#: utils/adt/jsonfuncs.c:2316 utils/adt/jsonfuncs.c:2321 +#: utils/adt/jsonfuncs.c:2338 utils/adt/jsonfuncs.c:2344 +#, c-format +msgid "expected json array" +msgstr "ожидался массив json" + +#: utils/adt/jsonfuncs.c:2317 +#, c-format +msgid "See the value of key \"%s\"." +msgstr "Проверьте значение ключа \"%s\"." + +#: utils/adt/jsonfuncs.c:2339 +#, c-format +msgid "See the array element %s of key \"%s\"." +msgstr "Проверьте элемент массива %s ключа \"%s\"." + +#: utils/adt/jsonfuncs.c:2345 +#, c-format +msgid "See the array element %s." +msgstr "Проверьте элемент массива %s." + +#: utils/adt/jsonfuncs.c:2380 +#, c-format +msgid "malformed json array" +msgstr "неправильный массив json" + +#: utils/adt/jsonfuncs.c:3168 utils/adt/jsonfuncs.c:3478 #, c-format msgid "first argument of %s must be a row type" msgstr "первым аргументом %s должен быть кортеж" -#: utils/adt/jsonfuncs.c:2204 +#: utils/adt/jsonfuncs.c:3210 #, c-format msgid "" "Try calling the function in the FROM clause using a column definition list." @@ -22248,58 +22516,58 @@ msgstr "" "Попробуйте вызвать эту функцию в предложении FROM, используя список с " "определениями столбцов." -#: utils/adt/jsonfuncs.c:2851 utils/adt/jsonfuncs.c:3018 +#: utils/adt/jsonfuncs.c:3600 utils/adt/jsonfuncs.c:3676 #, c-format msgid "argument of %s must be an array of objects" msgstr "аргументом %s должен быть массив объектов" -#: utils/adt/jsonfuncs.c:2875 +#: utils/adt/jsonfuncs.c:3628 #, c-format msgid "cannot call %s on an object" msgstr "вызывать %s с объектом нельзя" -#: utils/adt/jsonfuncs.c:3442 utils/adt/jsonfuncs.c:3501 -#: utils/adt/jsonfuncs.c:3581 +#: utils/adt/jsonfuncs.c:4104 utils/adt/jsonfuncs.c:4163 +#: utils/adt/jsonfuncs.c:4243 #, c-format msgid "cannot delete from scalar" msgstr "удаление из скаляра невозможно" -#: utils/adt/jsonfuncs.c:3586 +#: utils/adt/jsonfuncs.c:4248 #, c-format msgid "cannot delete from object using integer index" msgstr "удаление из объекта по числовому индексу невозможно" -#: utils/adt/jsonfuncs.c:3652 utils/adt/jsonfuncs.c:3744 +#: utils/adt/jsonfuncs.c:4314 utils/adt/jsonfuncs.c:4406 #, c-format msgid "cannot set path in scalar" msgstr "задать путь в скаляре нельзя" -#: utils/adt/jsonfuncs.c:3697 +#: utils/adt/jsonfuncs.c:4359 #, c-format msgid "cannot delete path in scalar" msgstr "удалить путь в скаляре нельзя" -#: utils/adt/jsonfuncs.c:3867 +#: utils/adt/jsonfuncs.c:4529 #, c-format msgid "invalid concatenation of jsonb objects" msgstr "неверная конкатенация объектов jsonb" -#: utils/adt/jsonfuncs.c:3901 +#: utils/adt/jsonfuncs.c:4563 #, c-format msgid "path element at position %d is null" msgstr "элемент пути в позиции %d равен NULL" -#: utils/adt/jsonfuncs.c:3987 +#: utils/adt/jsonfuncs.c:4649 #, c-format msgid "cannot replace existing key" msgstr "заменить существующий ключ нельзя" -#: utils/adt/jsonfuncs.c:3988 +#: utils/adt/jsonfuncs.c:4650 #, c-format msgid "Try using the function jsonb_set to replace key value." msgstr "Попробуйте применить функцию jsonb_set для замены значения ключа." -#: utils/adt/jsonfuncs.c:4070 +#: utils/adt/jsonfuncs.c:4732 #, c-format msgid "path element at position %d is not an integer: \"%s\"" msgstr "элемент пути в позиции %d - не целочисленный: \"%s\"" @@ -22310,7 +22578,7 @@ msgstr "элемент пути в позиции %d - не целочислен msgid "levenshtein argument exceeds maximum length of %d characters" msgstr "длина аргумента levenshtein() превышает максимум (%d симв.)" -#: utils/adt/like.c:183 utils/adt/selfuncs.c:5505 +#: utils/adt/like.c:183 utils/adt/selfuncs.c:5589 #, c-format msgid "could not determine which collation to use for ILIKE" msgstr "не удалось определить, какой порядок сортировки использовать для ILIKE" @@ -22330,7 +22598,7 @@ msgstr "неверный защитный символ" msgid "Escape string must be empty or one character." msgstr "Защитный символ должен быть пустым или состоять из одного байта." -#: utils/adt/lockfuncs.c:545 +#: utils/adt/lockfuncs.c:664 #, c-format msgid "cannot use advisory locks during a parallel operation" msgstr "" @@ -22350,12 +22618,12 @@ msgstr "значение в macaddr8 не допускает преобразо #: utils/adt/mac8.c:555 #, c-format msgid "" -"Only addresses that have FF and FE as values in the 4th and 5th bytes, from " -"the left, for example: XX-XX-XX-FF-FE-XX-XX-XX, are eligible to be converted " +"Only addresses that have FF and FE as values in the 4th and 5th bytes from " +"the left, for example xx:xx:xx:ff:fe:xx:xx:xx, are eligible to be converted " "from macaddr8 to macaddr." msgstr "" "Преобразование из macaddr8 в macaddr возможно только для адресов, содержащих " -"FF и FE в 4-ом и 5-ом байтах слева, например: XX-XX-XX-FF-FE-XX-XX-XX." +"FF и FE в 4-ом и 5-ом байтах слева, например xx:xx:xx:ff:fe:xx:xx:xx." #: utils/adt/misc.c:238 #, c-format @@ -22601,9 +22869,9 @@ msgstr "конечное значение не может быть NaN" msgid "step size cannot be NaN" msgstr "размер шага не может быть NaN" -#: utils/adt/numeric.c:2589 utils/adt/numeric.c:5551 utils/adt/numeric.c:5996 -#: utils/adt/numeric.c:7700 utils/adt/numeric.c:8125 utils/adt/numeric.c:8240 -#: utils/adt/numeric.c:8313 +#: utils/adt/numeric.c:2589 utils/adt/numeric.c:5561 utils/adt/numeric.c:6006 +#: utils/adt/numeric.c:7710 utils/adt/numeric.c:8135 utils/adt/numeric.c:8249 +#: utils/adt/numeric.c:8322 #, c-format msgid "value overflows numeric format" msgstr "значение переполняет формат numeric" @@ -22623,12 +22891,17 @@ msgstr "нельзя преобразовать NaN в bigint" msgid "cannot convert NaN to smallint" msgstr "нельзя преобразовать NaN в smallint" -#: utils/adt/numeric.c:6066 +#: utils/adt/numeric.c:3079 utils/adt/numeric.c:3150 +#, c-format +msgid "cannot convert infinity to numeric" +msgstr "нельзя представить бесконечность в numeric" + +#: utils/adt/numeric.c:6076 #, c-format msgid "numeric field overflow" msgstr "переполнение поля numeric" -#: utils/adt/numeric.c:6067 +#: utils/adt/numeric.c:6077 #, c-format msgid "" "A field with precision %d, scale %d must round to an absolute value less " @@ -22678,19 +22951,19 @@ msgstr "значение перцентиля %g лежит не в диапаз msgid "Apply system library package updates." msgstr "Обновите пакет с системной библиотекой." -#: utils/adt/pg_locale.c:1239 +#: utils/adt/pg_locale.c:1249 #, c-format msgid "could not create locale \"%s\": %m" msgstr "не удалось создать локаль \"%s\": %m" -#: utils/adt/pg_locale.c:1242 +#: utils/adt/pg_locale.c:1252 #, c-format msgid "" "The operating system could not find any locale data for the locale name \"%s" "\"." msgstr "Операционная система не может найти данные локали с именем \"%s\"." -#: utils/adt/pg_locale.c:1339 +#: utils/adt/pg_locale.c:1353 #, c-format msgid "" "collations with different collate and ctype values are not supported on this " @@ -22699,37 +22972,44 @@ msgstr "" "правила сортировки с разными значениями collate и ctype не поддерживаются на " "этой платформе" -#: utils/adt/pg_locale.c:1348 +#: utils/adt/pg_locale.c:1362 #, c-format msgid "collation provider LIBC is not supported on this platform" msgstr "поставщик правил сортировки LIBC не поддерживается на этой платформе" -#: utils/adt/pg_locale.c:1361 utils/adt/pg_locale.c:1439 +#: utils/adt/pg_locale.c:1374 +#, c-format +msgid "" +"collations with different collate and ctype values are not supported by ICU" +msgstr "" +"ICU не поддерживает правила сортировки с разными значениями collate и ctype" + +#: utils/adt/pg_locale.c:1380 utils/adt/pg_locale.c:1468 #, c-format msgid "could not open collator for locale \"%s\": %s" msgstr "не удалось открыть сортировщик для локали \"%s\": %s" -#: utils/adt/pg_locale.c:1370 +#: utils/adt/pg_locale.c:1391 #, c-format msgid "ICU is not supported in this build" msgstr "ICU не поддерживается в данной сборке" -#: utils/adt/pg_locale.c:1371 +#: utils/adt/pg_locale.c:1392 #, c-format msgid "You need to rebuild PostgreSQL using --with-icu." msgstr "Необходимо перекомпилировать PostgreSQL с ключом --with-icu." -#: utils/adt/pg_locale.c:1388 +#: utils/adt/pg_locale.c:1412 #, c-format msgid "collation \"%s\" has no actual version, but a version was specified" msgstr "для правила сортировки \"%s\", лишённого версии, была задана версия" -#: utils/adt/pg_locale.c:1394 +#: utils/adt/pg_locale.c:1419 #, c-format msgid "collation \"%s\" has version mismatch" msgstr "несовпадение версии для правила сортировки \"%s\"" -#: utils/adt/pg_locale.c:1396 +#: utils/adt/pg_locale.c:1421 #, c-format msgid "" "The collation in the database was created using version %s, but the " @@ -22738,7 +23018,7 @@ msgstr "" "Правило сортировки в базе данных было создано с версией %s, но операционная " "версия предоставляет версию %s." -#: utils/adt/pg_locale.c:1399 +#: utils/adt/pg_locale.c:1424 #, c-format msgid "" "Rebuild all objects affected by this collation and run ALTER COLLATION %s " @@ -22748,27 +23028,27 @@ msgstr "" "ALTER COLLATION %s REFRESH VERSION либо соберите PostgreSQL с правильной " "версией библиотеки." -#: utils/adt/pg_locale.c:1479 +#: utils/adt/pg_locale.c:1508 #, c-format msgid "could not open ICU converter for encoding \"%s\": %s" msgstr "не удалось открыть преобразователь ICU для кодировки \"%s\": %s" -#: utils/adt/pg_locale.c:1499 +#: utils/adt/pg_locale.c:1539 utils/adt/pg_locale.c:1548 #, c-format msgid "ucnv_toUChars failed: %s" msgstr "ошибка ucnv_toUChars: %s" -#: utils/adt/pg_locale.c:1517 +#: utils/adt/pg_locale.c:1577 utils/adt/pg_locale.c:1586 #, c-format msgid "ucnv_fromUChars failed: %s" msgstr "ошибка ucnv_fromUChars: %s" -#: utils/adt/pg_locale.c:1689 +#: utils/adt/pg_locale.c:1759 #, c-format msgid "invalid multibyte character for locale" msgstr "неверный многобайтный символ для локали" -#: utils/adt/pg_locale.c:1690 +#: utils/adt/pg_locale.c:1760 #, c-format msgid "" "The server's LC_CTYPE locale is probably incompatible with the database " @@ -22872,7 +23152,7 @@ msgstr "Слишком много запятых." msgid "Junk after right parenthesis or bracket." msgstr "Мусор после правой скобки." -#: utils/adt/regexp.c:285 utils/adt/regexp.c:1344 utils/adt/varlena.c:3941 +#: utils/adt/regexp.c:285 utils/adt/regexp.c:1344 utils/adt/varlena.c:3967 #, c-format msgid "regular expression failed: %s" msgstr "ошибка в регулярном выражении: %s" @@ -22904,109 +23184,109 @@ msgstr "regexp_split_to_table не поддерживает глобальный msgid "regexp_split_to_array does not support the global option" msgstr "regexp_split_to_array не поддерживает глобальный поиск" -#: utils/adt/regproc.c:130 utils/adt/regproc.c:150 +#: utils/adt/regproc.c:106 #, c-format msgid "more than one function named \"%s\"" msgstr "имя \"%s\" имеют несколько функций" -#: utils/adt/regproc.c:589 utils/adt/regproc.c:609 +#: utils/adt/regproc.c:524 #, c-format msgid "more than one operator named %s" msgstr "имя %s имеют несколько операторов" -#: utils/adt/regproc.c:776 utils/adt/regproc.c:817 gram.y:7767 +#: utils/adt/regproc.c:691 utils/adt/regproc.c:732 gram.y:7844 #, c-format msgid "missing argument" msgstr "отсутствует аргумент" -#: utils/adt/regproc.c:777 utils/adt/regproc.c:818 gram.y:7768 +#: utils/adt/regproc.c:692 utils/adt/regproc.c:733 gram.y:7845 #, c-format msgid "Use NONE to denote the missing argument of a unary operator." msgstr "" "Чтобы обозначить отсутствующий аргумент унарного оператора, укажите NONE." -#: utils/adt/regproc.c:781 utils/adt/regproc.c:822 utils/adt/regproc.c:2008 -#: utils/adt/ruleutils.c:8790 utils/adt/ruleutils.c:8958 +#: utils/adt/regproc.c:696 utils/adt/regproc.c:737 utils/adt/regproc.c:1865 +#: utils/adt/ruleutils.c:8959 utils/adt/ruleutils.c:9127 #, c-format msgid "too many arguments" msgstr "слишком много аргументов" -#: utils/adt/regproc.c:782 utils/adt/regproc.c:823 +#: utils/adt/regproc.c:697 utils/adt/regproc.c:738 #, c-format msgid "Provide two argument types for operator." msgstr "Предоставьте для оператора два типа аргументов." -#: utils/adt/regproc.c:1596 utils/adt/regproc.c:1620 utils/adt/regproc.c:1717 -#: utils/adt/regproc.c:1741 utils/adt/regproc.c:1843 utils/adt/regproc.c:1848 -#: utils/adt/varlena.c:3196 utils/adt/varlena.c:3201 +#: utils/adt/regproc.c:1449 utils/adt/regproc.c:1473 utils/adt/regproc.c:1574 +#: utils/adt/regproc.c:1598 utils/adt/regproc.c:1700 utils/adt/regproc.c:1705 +#: utils/adt/varlena.c:3220 utils/adt/varlena.c:3225 #, c-format msgid "invalid name syntax" msgstr "ошибка синтаксиса в имени" -#: utils/adt/regproc.c:1906 +#: utils/adt/regproc.c:1763 #, c-format msgid "expected a left parenthesis" msgstr "ожидалась левая скобка" -#: utils/adt/regproc.c:1922 +#: utils/adt/regproc.c:1779 #, c-format msgid "expected a right parenthesis" msgstr "ожидалась правая скобка" -#: utils/adt/regproc.c:1941 +#: utils/adt/regproc.c:1798 #, c-format msgid "expected a type name" msgstr "ожидалось имя типа" -#: utils/adt/regproc.c:1973 +#: utils/adt/regproc.c:1830 #, c-format msgid "improper type name" msgstr "ошибочное имя типа" -#: utils/adt/ri_triggers.c:314 utils/adt/ri_triggers.c:371 -#: utils/adt/ri_triggers.c:790 utils/adt/ri_triggers.c:1013 -#: utils/adt/ri_triggers.c:1169 utils/adt/ri_triggers.c:1350 -#: utils/adt/ri_triggers.c:1515 utils/adt/ri_triggers.c:1691 -#: utils/adt/ri_triggers.c:1871 utils/adt/ri_triggers.c:2062 -#: utils/adt/ri_triggers.c:2120 utils/adt/ri_triggers.c:2225 -#: utils/adt/ri_triggers.c:2402 gram.y:3594 +#: utils/adt/ri_triggers.c:311 utils/adt/ri_triggers.c:368 +#: utils/adt/ri_triggers.c:787 utils/adt/ri_triggers.c:1010 +#: utils/adt/ri_triggers.c:1166 utils/adt/ri_triggers.c:1347 +#: utils/adt/ri_triggers.c:1512 utils/adt/ri_triggers.c:1688 +#: utils/adt/ri_triggers.c:1868 utils/adt/ri_triggers.c:2059 +#: utils/adt/ri_triggers.c:2117 utils/adt/ri_triggers.c:2222 +#: utils/adt/ri_triggers.c:2399 gram.y:3656 #, c-format msgid "MATCH PARTIAL not yet implemented" msgstr "выражение MATCH PARTIAL ещё не реализовано" -#: utils/adt/ri_triggers.c:343 utils/adt/ri_triggers.c:2490 -#: utils/adt/ri_triggers.c:3315 +#: utils/adt/ri_triggers.c:340 utils/adt/ri_triggers.c:2487 +#: utils/adt/ri_triggers.c:3312 #, c-format msgid "insert or update on table \"%s\" violates foreign key constraint \"%s\"" msgstr "" "INSERT или UPDATE в таблице \"%s\" нарушает ограничение внешнего ключа \"%s\"" -#: utils/adt/ri_triggers.c:346 utils/adt/ri_triggers.c:2493 +#: utils/adt/ri_triggers.c:343 utils/adt/ri_triggers.c:2490 #, c-format msgid "MATCH FULL does not allow mixing of null and nonnull key values." msgstr "MATCH FULL не позволяет смешивать в значении ключа null и не null." -#: utils/adt/ri_triggers.c:2732 +#: utils/adt/ri_triggers.c:2729 #, c-format msgid "function \"%s\" must be fired for INSERT" msgstr "функция \"%s\" должна запускаться для INSERT" -#: utils/adt/ri_triggers.c:2738 +#: utils/adt/ri_triggers.c:2735 #, c-format msgid "function \"%s\" must be fired for UPDATE" msgstr "функция \"%s\" должна запускаться для UPDATE" -#: utils/adt/ri_triggers.c:2744 +#: utils/adt/ri_triggers.c:2741 #, c-format msgid "function \"%s\" must be fired for DELETE" msgstr "функция \"%s\" должна запускаться для DELETE" -#: utils/adt/ri_triggers.c:2767 +#: utils/adt/ri_triggers.c:2764 #, c-format msgid "no pg_constraint entry for trigger \"%s\" on table \"%s\"" msgstr "для триггера \"%s\" таблицы \"%s\" нет записи pg_constraint" -#: utils/adt/ri_triggers.c:2769 +#: utils/adt/ri_triggers.c:2766 #, c-format msgid "" "Remove this referential integrity trigger and its mates, then do ALTER TABLE " @@ -23015,7 +23295,7 @@ msgstr "" "Удалите этот триггер ссылочной целостности и связанные объекты, а затем " "выполните ALTER TABLE ADD CONSTRAINT." -#: utils/adt/ri_triggers.c:3225 +#: utils/adt/ri_triggers.c:3222 #, c-format msgid "" "referential integrity query on \"%s\" from constraint \"%s\" on \"%s\" gave " @@ -23024,22 +23304,22 @@ msgstr "" "неожиданный результат запроса ссылочной целостности к \"%s\" из ограничения " "\"%s\" таблицы \"%s\"" -#: utils/adt/ri_triggers.c:3229 +#: utils/adt/ri_triggers.c:3226 #, c-format msgid "This is most likely due to a rule having rewritten the query." msgstr "Скорее всего это вызвано правилом, переписавшим запрос." -#: utils/adt/ri_triggers.c:3319 +#: utils/adt/ri_triggers.c:3316 #, c-format msgid "Key (%s)=(%s) is not present in table \"%s\"." msgstr "Ключ (%s)=(%s) отсутствует в таблице \"%s\"." -#: utils/adt/ri_triggers.c:3322 +#: utils/adt/ri_triggers.c:3319 #, c-format msgid "Key is not present in table \"%s\"." msgstr "Ключ отсутствует в таблице \"%s\"." -#: utils/adt/ri_triggers.c:3328 +#: utils/adt/ri_triggers.c:3325 #, c-format msgid "" "update or delete on table \"%s\" violates foreign key constraint \"%s\" on " @@ -23048,12 +23328,12 @@ msgstr "" "UPDATE или DELETE в таблице \"%s\" нарушает ограничение внешнего ключа \"%s" "\" таблицы \"%s\"" -#: utils/adt/ri_triggers.c:3333 +#: utils/adt/ri_triggers.c:3330 #, c-format msgid "Key (%s)=(%s) is still referenced from table \"%s\"." msgstr "На ключ (%s)=(%s) всё ещё есть ссылки в таблице \"%s\"." -#: utils/adt/ri_triggers.c:3336 +#: utils/adt/ri_triggers.c:3333 #, c-format msgid "Key is still referenced from table \"%s\"." msgstr "На ключ всё ещё есть ссылки в таблице \"%s\"." @@ -23116,142 +23396,142 @@ msgstr "не удалось сравнить различные типы сто msgid "cannot compare record types with different numbers of columns" msgstr "сравнивать типы записей с разным числом столбцов нельзя" -#: utils/adt/ruleutils.c:4565 +#: utils/adt/ruleutils.c:4668 #, c-format msgid "rule \"%s\" has unsupported event type %d" msgstr "правило \"%s\" имеет неподдерживаемый тип событий %d" -#: utils/adt/selfuncs.c:5490 +#: utils/adt/selfuncs.c:5574 #, c-format msgid "case insensitive matching not supported on type bytea" msgstr "регистронезависимое сравнение не поддерживается для типа bytea" -#: utils/adt/selfuncs.c:5592 +#: utils/adt/selfuncs.c:5676 #, c-format msgid "regular-expression matching not supported on type bytea" msgstr "сравнение с регулярными выражениями не поддерживается для типа bytea" -#: utils/adt/timestamp.c:106 +#: utils/adt/timestamp.c:107 #, c-format msgid "TIMESTAMP(%d)%s precision must not be negative" msgstr "TIMESTAMP(%d)%s: точность должна быть неотрицательна" -#: utils/adt/timestamp.c:112 +#: utils/adt/timestamp.c:113 #, c-format msgid "TIMESTAMP(%d)%s precision reduced to maximum allowed, %d" msgstr "TIMESTAMP(%d)%s: точность уменьшена до дозволенного максимума: %d" -#: utils/adt/timestamp.c:175 utils/adt/timestamp.c:415 +#: utils/adt/timestamp.c:176 utils/adt/timestamp.c:416 #, c-format msgid "timestamp out of range: \"%s\"" msgstr "timestamp вне диапазона: \"%s\"" -#: utils/adt/timestamp.c:193 utils/adt/timestamp.c:433 -#: utils/adt/timestamp.c:940 +#: utils/adt/timestamp.c:194 utils/adt/timestamp.c:434 +#: utils/adt/timestamp.c:941 #, c-format msgid "date/time value \"%s\" is no longer supported" msgstr "значение даты/времени \"%s\" более не поддерживается" -#: utils/adt/timestamp.c:361 +#: utils/adt/timestamp.c:362 #, c-format msgid "timestamp(%d) precision must be between %d and %d" msgstr "точность timestamp(%d) должна быть между %d и %d" -#: utils/adt/timestamp.c:483 +#: utils/adt/timestamp.c:484 #, c-format msgid "invalid input syntax for numeric time zone: \"%s\"" msgstr "неверный синтаксис для числового часового пояса: \"%s\"" -#: utils/adt/timestamp.c:485 +#: utils/adt/timestamp.c:486 #, c-format msgid "Numeric time zones must have \"-\" or \"+\" as first character." msgstr "" "Запись числового часового пояса должна начинаться с символа \"-\" или \"+\"." -#: utils/adt/timestamp.c:498 +#: utils/adt/timestamp.c:499 #, c-format msgid "numeric time zone \"%s\" out of range" msgstr "числовой часовой пояс \"%s\" вне диапазона" -#: utils/adt/timestamp.c:600 utils/adt/timestamp.c:610 -#: utils/adt/timestamp.c:618 +#: utils/adt/timestamp.c:601 utils/adt/timestamp.c:611 +#: utils/adt/timestamp.c:619 #, c-format msgid "timestamp out of range: %d-%02d-%02d %d:%02d:%02g" msgstr "timestamp вне диапазона: %d-%02d-%02d %d:%02d:%02g" -#: utils/adt/timestamp.c:719 +#: utils/adt/timestamp.c:720 #, c-format msgid "timestamp cannot be NaN" msgstr "timestamp не может быть NaN" -#: utils/adt/timestamp.c:737 utils/adt/timestamp.c:749 +#: utils/adt/timestamp.c:738 utils/adt/timestamp.c:750 #, c-format msgid "timestamp out of range: \"%g\"" msgstr "timestamp вне диапазона: \"%g\"" -#: utils/adt/timestamp.c:934 utils/adt/timestamp.c:1504 -#: utils/adt/timestamp.c:1917 utils/adt/timestamp.c:2964 -#: utils/adt/timestamp.c:2969 utils/adt/timestamp.c:2974 -#: utils/adt/timestamp.c:3024 utils/adt/timestamp.c:3031 -#: utils/adt/timestamp.c:3038 utils/adt/timestamp.c:3058 -#: utils/adt/timestamp.c:3065 utils/adt/timestamp.c:3072 -#: utils/adt/timestamp.c:3102 utils/adt/timestamp.c:3110 -#: utils/adt/timestamp.c:3154 utils/adt/timestamp.c:3477 -#: utils/adt/timestamp.c:3602 utils/adt/timestamp.c:3970 +#: utils/adt/timestamp.c:935 utils/adt/timestamp.c:1505 +#: utils/adt/timestamp.c:1918 utils/adt/timestamp.c:2994 +#: utils/adt/timestamp.c:2999 utils/adt/timestamp.c:3004 +#: utils/adt/timestamp.c:3054 utils/adt/timestamp.c:3061 +#: utils/adt/timestamp.c:3068 utils/adt/timestamp.c:3088 +#: utils/adt/timestamp.c:3095 utils/adt/timestamp.c:3102 +#: utils/adt/timestamp.c:3132 utils/adt/timestamp.c:3140 +#: utils/adt/timestamp.c:3184 utils/adt/timestamp.c:3507 +#: utils/adt/timestamp.c:3632 utils/adt/timestamp.c:4000 #, c-format msgid "interval out of range" msgstr "interval вне диапазона" -#: utils/adt/timestamp.c:1067 utils/adt/timestamp.c:1100 +#: utils/adt/timestamp.c:1068 utils/adt/timestamp.c:1101 #, c-format msgid "invalid INTERVAL type modifier" msgstr "неверный модификатор типа INTERVAL" -#: utils/adt/timestamp.c:1083 +#: utils/adt/timestamp.c:1084 #, c-format msgid "INTERVAL(%d) precision must not be negative" msgstr "INTERVAL(%d): точность должна быть неотрицательна" -#: utils/adt/timestamp.c:1089 +#: utils/adt/timestamp.c:1090 #, c-format msgid "INTERVAL(%d) precision reduced to maximum allowed, %d" msgstr "INTERVAL(%d): точность уменьшена до максимально возможной: %d" -#: utils/adt/timestamp.c:1461 +#: utils/adt/timestamp.c:1462 #, c-format msgid "interval(%d) precision must be between %d and %d" msgstr "точность interval(%d) должна быть между %d и %d" -#: utils/adt/timestamp.c:2565 +#: utils/adt/timestamp.c:2595 #, c-format msgid "cannot subtract infinite timestamps" msgstr "вычитать бесконечные значения timestamp нельзя" -#: utils/adt/timestamp.c:3721 utils/adt/timestamp.c:4230 -#: utils/adt/timestamp.c:4397 utils/adt/timestamp.c:4418 +#: utils/adt/timestamp.c:3751 utils/adt/timestamp.c:4260 +#: utils/adt/timestamp.c:4427 utils/adt/timestamp.c:4448 #, c-format msgid "timestamp units \"%s\" not supported" msgstr "единицы timestamp \"%s\" не поддерживаются" -#: utils/adt/timestamp.c:3735 utils/adt/timestamp.c:4184 -#: utils/adt/timestamp.c:4428 +#: utils/adt/timestamp.c:3765 utils/adt/timestamp.c:4214 +#: utils/adt/timestamp.c:4458 #, c-format msgid "timestamp units \"%s\" not recognized" msgstr "единицы timestamp \"%s\" не распознаны" -#: utils/adt/timestamp.c:3867 utils/adt/timestamp.c:4225 -#: utils/adt/timestamp.c:4598 utils/adt/timestamp.c:4620 +#: utils/adt/timestamp.c:3897 utils/adt/timestamp.c:4255 +#: utils/adt/timestamp.c:4628 utils/adt/timestamp.c:4650 #, c-format msgid "timestamp with time zone units \"%s\" not supported" msgstr "единицы timestamp с часовым поясом \"%s\" не поддерживаются" -#: utils/adt/timestamp.c:3884 utils/adt/timestamp.c:4179 -#: utils/adt/timestamp.c:4629 +#: utils/adt/timestamp.c:3914 utils/adt/timestamp.c:4209 +#: utils/adt/timestamp.c:4659 #, c-format msgid "timestamp with time zone units \"%s\" not recognized" msgstr "единицы timestamp с часовым поясом \"%s\" не распознаны" -#: utils/adt/timestamp.c:3957 +#: utils/adt/timestamp.c:3987 #, c-format msgid "" "interval units \"%s\" not supported because months usually have fractional " @@ -23260,12 +23540,12 @@ msgstr "" "единицы интервала \"%s\" не поддерживаются, так как в месяцах дробное число " "недель" -#: utils/adt/timestamp.c:3963 utils/adt/timestamp.c:4723 +#: utils/adt/timestamp.c:3993 utils/adt/timestamp.c:4753 #, c-format msgid "interval units \"%s\" not supported" msgstr "единицы interval \"%s\" не поддерживаются" -#: utils/adt/timestamp.c:3979 utils/adt/timestamp.c:4746 +#: utils/adt/timestamp.c:4009 utils/adt/timestamp.c:4776 #, c-format msgid "interval units \"%s\" not recognized" msgstr "единицы interval \"%s\" не распознаны" @@ -23466,26 +23746,14 @@ msgstr "неверная информация о позиции в tsvector: \"% #: utils/adt/txid.c:135 #, c-format -msgid "transaction ID " -msgstr "идентификатор транзакции " +msgid "transaction ID %s is in the future" +msgstr "идентификатор транзакции %s относится к будущему" #: utils/adt/txid.c:624 #, c-format msgid "invalid external txid_snapshot data" msgstr "неверное внешнее представление txid_snapshot" -#: utils/adt/txid.c:758 utils/adt/txid.c:779 -msgid "in progress" -msgstr "выполняется" - -#: utils/adt/txid.c:760 -msgid "committed" -msgstr "зафиксирована" - -#: utils/adt/txid.c:762 utils/adt/txid.c:777 -msgid "aborted" -msgstr "прервана" - #: utils/adt/varbit.c:58 utils/adt/varchar.c:51 #, c-format msgid "length for type %s must be at least 1" @@ -23527,8 +23795,8 @@ msgid "bit string too long for type bit varying(%d)" msgstr "строка битов не умещается в тип bit varying(%d)" #: utils/adt/varbit.c:1066 utils/adt/varbit.c:1168 utils/adt/varlena.c:841 -#: utils/adt/varlena.c:905 utils/adt/varlena.c:1049 utils/adt/varlena.c:2861 -#: utils/adt/varlena.c:2928 +#: utils/adt/varlena.c:905 utils/adt/varlena.c:1049 utils/adt/varlena.c:2885 +#: utils/adt/varlena.c:2952 #, c-format msgid "negative substring length not allowed" msgstr "подстрока должна иметь неотрицательную длину" @@ -23554,7 +23822,7 @@ msgstr "" msgid "bit index %d out of valid range (0..%d)" msgstr "индекс бита %d вне диапазона 0..%d" -#: utils/adt/varbit.c:1812 utils/adt/varlena.c:3120 +#: utils/adt/varbit.c:1812 utils/adt/varlena.c:3144 #, c-format msgid "new bit must be 0 or 1" msgstr "значением бита должен быть 0 или 1" @@ -23569,80 +23837,80 @@ msgstr "значение не умещается в тип character(%d)" msgid "value too long for type character varying(%d)" msgstr "значение не умещается в тип character varying(%d)" -#: utils/adt/varlena.c:1416 utils/adt/varlena.c:1859 +#: utils/adt/varlena.c:1416 utils/adt/varlena.c:1853 #, c-format msgid "could not determine which collation to use for string comparison" msgstr "" "не удалось определить, какое правило сортировки использовать для сравнения " "строк" -#: utils/adt/varlena.c:1472 utils/adt/varlena.c:1485 +#: utils/adt/varlena.c:1473 utils/adt/varlena.c:1486 #, c-format msgid "could not convert string to UTF-16: error code %lu" msgstr "не удалось преобразовать строку в UTF-16 (код ошибки: %lu)" -#: utils/adt/varlena.c:1500 +#: utils/adt/varlena.c:1501 #, c-format msgid "could not compare Unicode strings: %m" msgstr "не удалось сравнить строки в Unicode: %m" -#: utils/adt/varlena.c:1555 utils/adt/varlena.c:2139 +#: utils/adt/varlena.c:1556 utils/adt/varlena.c:2149 #, c-format msgid "collation failed: %s" msgstr "ошибка в библиотеке сортировки: %s" -#: utils/adt/varlena.c:2349 +#: utils/adt/varlena.c:2367 #, c-format msgid "sort key generation failed: %s" msgstr "не удалось сгенерировать ключ сортировки: %s" -#: utils/adt/varlena.c:3006 utils/adt/varlena.c:3037 utils/adt/varlena.c:3072 -#: utils/adt/varlena.c:3108 +#: utils/adt/varlena.c:3030 utils/adt/varlena.c:3061 utils/adt/varlena.c:3096 +#: utils/adt/varlena.c:3132 #, c-format msgid "index %d out of valid range, 0..%d" msgstr "индекс %d вне диапазона 0..%d" -#: utils/adt/varlena.c:4037 +#: utils/adt/varlena.c:4063 #, c-format msgid "field position must be greater than zero" msgstr "позиция поля должна быть больше нуля" -#: utils/adt/varlena.c:4916 +#: utils/adt/varlena.c:4953 #, c-format msgid "unterminated format() type specifier" msgstr "незавершённый спецификатор типа format()" -#: utils/adt/varlena.c:4917 utils/adt/varlena.c:5051 utils/adt/varlena.c:5172 +#: utils/adt/varlena.c:4954 utils/adt/varlena.c:5088 utils/adt/varlena.c:5209 #, c-format msgid "For a single \"%%\" use \"%%%%\"." msgstr "Для представления одного знака \"%%\" запишите \"%%%%\"." -#: utils/adt/varlena.c:5049 utils/adt/varlena.c:5170 +#: utils/adt/varlena.c:5086 utils/adt/varlena.c:5207 #, c-format msgid "unrecognized format() type specifier \"%c\"" msgstr "нераспознанный спецификатор типа format(): \"%c\"" -#: utils/adt/varlena.c:5062 utils/adt/varlena.c:5119 +#: utils/adt/varlena.c:5099 utils/adt/varlena.c:5156 #, c-format msgid "too few arguments for format()" msgstr "мало аргументов для format()" -#: utils/adt/varlena.c:5214 utils/adt/varlena.c:5397 +#: utils/adt/varlena.c:5251 utils/adt/varlena.c:5434 #, c-format msgid "number is out of range" msgstr "число вне диапазона" -#: utils/adt/varlena.c:5278 utils/adt/varlena.c:5306 +#: utils/adt/varlena.c:5315 utils/adt/varlena.c:5343 #, c-format msgid "format specifies argument 0, but arguments are numbered from 1" msgstr "формат ссылается на аргумент 0, но аргументы нумеруются с 1" -#: utils/adt/varlena.c:5299 +#: utils/adt/varlena.c:5336 #, c-format msgid "width argument position must be ended by \"$\"" msgstr "указание аргумента ширины должно оканчиваться \"$\"" -#: utils/adt/varlena.c:5344 +#: utils/adt/varlena.c:5381 #, c-format msgid "null values cannot be formatted as an SQL identifier" msgstr "значения null нельзя представить в виде SQL-идентификатора" @@ -23657,73 +23925,73 @@ msgstr "аргумент ntile должен быть больше нуля" msgid "argument of nth_value must be greater than zero" msgstr "аргумент nth_value должен быть больше нуля" -#: utils/adt/xml.c:216 +#: utils/adt/xml.c:220 #, c-format msgid "unsupported XML feature" msgstr "XML-функции не поддерживаются" -#: utils/adt/xml.c:217 +#: utils/adt/xml.c:221 #, c-format msgid "This functionality requires the server to be built with libxml support." msgstr "Для этой функциональности в сервере не хватает поддержки libxml." -#: utils/adt/xml.c:218 +#: utils/adt/xml.c:222 #, c-format msgid "You need to rebuild PostgreSQL using --with-libxml." msgstr "Необходимо перекомпилировать PostgreSQL с ключом --with-libxml." -#: utils/adt/xml.c:237 utils/mb/mbutils.c:523 +#: utils/adt/xml.c:241 utils/mb/mbutils.c:523 #, c-format msgid "invalid encoding name \"%s\"" msgstr "неверное имя кодировки: \"%s\"" -#: utils/adt/xml.c:480 utils/adt/xml.c:485 +#: utils/adt/xml.c:484 utils/adt/xml.c:489 #, c-format msgid "invalid XML comment" msgstr "ошибка в XML-комментарии" -#: utils/adt/xml.c:614 +#: utils/adt/xml.c:618 #, c-format msgid "not an XML document" msgstr "не XML-документ" -#: utils/adt/xml.c:773 utils/adt/xml.c:796 +#: utils/adt/xml.c:777 utils/adt/xml.c:800 #, c-format msgid "invalid XML processing instruction" msgstr "неправильная XML-инструкция обработки (PI)" -#: utils/adt/xml.c:774 +#: utils/adt/xml.c:778 #, c-format msgid "XML processing instruction target name cannot be \"%s\"." msgstr "назначением XML-инструкции обработки (PI) не может быть \"%s\"." -#: utils/adt/xml.c:797 +#: utils/adt/xml.c:801 #, c-format msgid "XML processing instruction cannot contain \"?>\"." msgstr "XML-инструкция обработки (PI) не может содержать \"?>\"." -#: utils/adt/xml.c:876 +#: utils/adt/xml.c:880 #, c-format msgid "xmlvalidate is not implemented" msgstr "функция xmlvalidate не реализована" -#: utils/adt/xml.c:955 +#: utils/adt/xml.c:959 #, c-format msgid "could not initialize XML library" msgstr "не удалось инициализировать библиотеку XML" -#: utils/adt/xml.c:956 +#: utils/adt/xml.c:960 #, c-format msgid "" "libxml2 has incompatible char type: sizeof(char)=%u, sizeof(xmlChar)=%u." msgstr "другой тип char в libxml2: sizeof(char)=%u, sizeof(xmlChar)=%u." -#: utils/adt/xml.c:1042 +#: utils/adt/xml.c:1046 #, c-format msgid "could not set up XML error handler" msgstr "не удалось установить обработчик XML-ошибок" -#: utils/adt/xml.c:1043 +#: utils/adt/xml.c:1047 #, c-format msgid "" "This probably indicates that the version of libxml2 being used is not " @@ -23732,110 +24000,110 @@ msgstr "" "Возможно это означает, что используемая версия libxml2 не совместима с " "заголовочными файлами libxml2, с которыми был собран PostgreSQL." -#: utils/adt/xml.c:1793 +#: utils/adt/xml.c:1797 msgid "Invalid character value." msgstr "Неверный символ." -#: utils/adt/xml.c:1796 +#: utils/adt/xml.c:1800 msgid "Space required." msgstr "Требуется пробел." -#: utils/adt/xml.c:1799 +#: utils/adt/xml.c:1803 msgid "standalone accepts only 'yes' or 'no'." msgstr "значениями атрибута standalone могут быть только 'yes' и 'no'." -#: utils/adt/xml.c:1802 +#: utils/adt/xml.c:1806 msgid "Malformed declaration: missing version." msgstr "Ошибочное объявление: не указана версия." -#: utils/adt/xml.c:1805 +#: utils/adt/xml.c:1809 msgid "Missing encoding in text declaration." msgstr "В объявлении не указана кодировка." -#: utils/adt/xml.c:1808 +#: utils/adt/xml.c:1812 msgid "Parsing XML declaration: '?>' expected." msgstr "Ошибка при разборе XML-объявления: ожидается '?>'." -#: utils/adt/xml.c:1811 +#: utils/adt/xml.c:1815 #, c-format msgid "Unrecognized libxml error code: %d." msgstr "Нераспознанный код ошибки libxml: %d." -#: utils/adt/xml.c:2086 +#: utils/adt/xml.c:2090 #, c-format msgid "XML does not support infinite date values." msgstr "XML не поддерживает бесконечность в датах." -#: utils/adt/xml.c:2108 utils/adt/xml.c:2135 +#: utils/adt/xml.c:2112 utils/adt/xml.c:2139 #, c-format msgid "XML does not support infinite timestamp values." msgstr "XML не поддерживает бесконечность в timestamp." -#: utils/adt/xml.c:2538 +#: utils/adt/xml.c:2551 #, c-format msgid "invalid query" msgstr "неверный запрос" -#: utils/adt/xml.c:3857 +#: utils/adt/xml.c:3871 #, c-format msgid "invalid array for XML namespace mapping" msgstr "неправильный массив с сопоставлениями пространств имён XML" -#: utils/adt/xml.c:3858 +#: utils/adt/xml.c:3872 #, c-format msgid "" "The array must be two-dimensional with length of the second axis equal to 2." msgstr "Массив должен быть двухмерным и содержать 2 элемента по второй оси." -#: utils/adt/xml.c:3882 +#: utils/adt/xml.c:3896 #, c-format msgid "empty XPath expression" msgstr "пустое выражение XPath" -#: utils/adt/xml.c:3926 +#: utils/adt/xml.c:3951 #, c-format msgid "neither namespace name nor URI may be null" msgstr "ни префикс, ни URI пространства имён не может быть null" -#: utils/adt/xml.c:3933 +#: utils/adt/xml.c:3958 #, c-format msgid "could not register XML namespace with name \"%s\" and URI \"%s\"" msgstr "" "не удалось зарегистрировать пространство имён XML с префиксом \"%s\" и URI " "\"%s\"" -#: utils/adt/xml.c:4287 +#: utils/adt/xml.c:4312 #, c-format msgid "DEFAULT namespace is not supported" msgstr "пространство имён DEFAULT не поддерживается" -#: utils/adt/xml.c:4316 +#: utils/adt/xml.c:4341 #, c-format msgid "row path filter must not be empty string" msgstr "путь отбираемых строк не должен быть пустым" -#: utils/adt/xml.c:4347 +#: utils/adt/xml.c:4372 #, c-format msgid "column path filter must not be empty string" msgstr "путь отбираемого столбца не должен быть пустым" -#: utils/adt/xml.c:4530 +#: utils/adt/xml.c:4554 #, c-format msgid "more than one value returned by column XPath expression" msgstr "выражение XPath, отбирающее столбец, возвратило более одного значения" -#: utils/cache/lsyscache.c:2580 utils/cache/lsyscache.c:2613 -#: utils/cache/lsyscache.c:2646 utils/cache/lsyscache.c:2679 +#: utils/cache/lsyscache.c:2612 utils/cache/lsyscache.c:2645 +#: utils/cache/lsyscache.c:2678 utils/cache/lsyscache.c:2711 #, c-format msgid "type %s is only a shell" msgstr "тип %s - лишь оболочка" -#: utils/cache/lsyscache.c:2585 +#: utils/cache/lsyscache.c:2617 #, c-format msgid "no input function available for type %s" msgstr "для типа %s нет функции ввода" -#: utils/cache/lsyscache.c:2618 +#: utils/cache/lsyscache.c:2650 #, c-format msgid "no output function available for type %s" msgstr "для типа %s нет функции вывода" @@ -23845,17 +24113,17 @@ msgstr "для типа %s нет функции вывода" msgid "cached plan must not change result type" msgstr "в кешированном плане не должен изменяться тип результата" -#: utils/cache/relcache.c:5779 +#: utils/cache/relcache.c:5800 #, c-format msgid "could not create relation-cache initialization file \"%s\": %m" msgstr "создать файл инициализации для кеша отношений \"%s\" не удалось: %m" -#: utils/cache/relcache.c:5781 +#: utils/cache/relcache.c:5802 #, c-format msgid "Continuing anyway, but there's something wrong." msgstr "Продолжаем всё равно, хотя что-то не так." -#: utils/cache/relcache.c:6051 +#: utils/cache/relcache.c:6072 #, c-format msgid "could not remove cache file \"%s\": %m" msgstr "не удалось стереть файл кеша \"%s\": %m" @@ -23902,12 +24170,12 @@ msgstr "" msgid "could not close relation mapping file \"%s\": %m" msgstr "закрыть файл сопоставления отношений \"%s\" не удалось: %m" -#: utils/cache/typcache.c:1223 +#: utils/cache/typcache.c:1273 #, c-format msgid "type %s is not composite" msgstr "тип %s не является составным" -#: utils/cache/typcache.c:1237 +#: utils/cache/typcache.c:1287 #, c-format msgid "record type has not been registered" msgstr "тип записи не зарегистрирован" @@ -23938,174 +24206,174 @@ msgstr "открыть файл \"%s\" как stderr не удалось: %m" msgid "could not reopen file \"%s\" as stdout: %m" msgstr "открыть файл \"%s\" как stdout не удалось: %m" -#: utils/error/elog.c:2389 utils/error/elog.c:2406 utils/error/elog.c:2422 +#: utils/error/elog.c:2394 utils/error/elog.c:2411 utils/error/elog.c:2427 msgid "[unknown]" msgstr "[н/д]" -#: utils/error/elog.c:2882 utils/error/elog.c:3185 utils/error/elog.c:3293 +#: utils/error/elog.c:2887 utils/error/elog.c:3190 utils/error/elog.c:3298 msgid "missing error text" msgstr "отсутствует текст ошибки" -#: utils/error/elog.c:2885 utils/error/elog.c:2888 utils/error/elog.c:3296 -#: utils/error/elog.c:3299 +#: utils/error/elog.c:2890 utils/error/elog.c:2893 utils/error/elog.c:3301 +#: utils/error/elog.c:3304 #, c-format msgid " at character %d" msgstr " (символ %d)" -#: utils/error/elog.c:2898 utils/error/elog.c:2905 +#: utils/error/elog.c:2903 utils/error/elog.c:2910 msgid "DETAIL: " msgstr "ПОДРОБНОСТИ: " -#: utils/error/elog.c:2912 +#: utils/error/elog.c:2917 msgid "HINT: " msgstr "ПОДСКАЗКА: " -#: utils/error/elog.c:2919 +#: utils/error/elog.c:2924 msgid "QUERY: " msgstr "ЗАПРОС: " -#: utils/error/elog.c:2926 +#: utils/error/elog.c:2931 msgid "CONTEXT: " msgstr "КОНТЕКСТ: " -#: utils/error/elog.c:2936 +#: utils/error/elog.c:2941 #, c-format msgid "LOCATION: %s, %s:%d\n" msgstr "ПОЛОЖЕНИЕ: %s, %s:%d\n" -#: utils/error/elog.c:2943 +#: utils/error/elog.c:2948 #, c-format msgid "LOCATION: %s:%d\n" msgstr "ПОЛОЖЕНИЕ: %s:%d\n" -#: utils/error/elog.c:2957 +#: utils/error/elog.c:2962 msgid "STATEMENT: " msgstr "ОПЕРАТОР: " #. translator: This string will be truncated at 47 #. characters expanded. -#: utils/error/elog.c:3414 +#: utils/error/elog.c:3419 #, c-format msgid "operating system error %d" msgstr "ошибка ОС %d" -#: utils/error/elog.c:3612 +#: utils/error/elog.c:3617 msgid "DEBUG" msgstr "ОТЛАДКА" -#: utils/error/elog.c:3616 +#: utils/error/elog.c:3621 msgid "LOG" msgstr "СООБЩЕНИЕ" -#: utils/error/elog.c:3619 +#: utils/error/elog.c:3624 msgid "INFO" msgstr "ИНФОРМАЦИЯ" -#: utils/error/elog.c:3622 +#: utils/error/elog.c:3627 msgid "NOTICE" msgstr "ЗАМЕЧАНИЕ" -#: utils/error/elog.c:3625 +#: utils/error/elog.c:3630 msgid "WARNING" msgstr "ПРЕДУПРЕЖДЕНИЕ" -#: utils/error/elog.c:3628 +#: utils/error/elog.c:3633 msgid "ERROR" msgstr "ОШИБКА" -#: utils/error/elog.c:3631 +#: utils/error/elog.c:3636 msgid "FATAL" msgstr "ВАЖНО" -#: utils/error/elog.c:3634 +#: utils/error/elog.c:3639 msgid "PANIC" msgstr "ПАНИКА" -#: utils/fmgr/dfmgr.c:117 +#: utils/fmgr/dfmgr.c:121 #, c-format msgid "could not find function \"%s\" in file \"%s\"" msgstr "не удалось найти функцию \"%s\" в файле \"%s\"" -#: utils/fmgr/dfmgr.c:196 utils/fmgr/dfmgr.c:413 utils/fmgr/dfmgr.c:461 +#: utils/fmgr/dfmgr.c:201 utils/fmgr/dfmgr.c:418 utils/fmgr/dfmgr.c:466 #, c-format msgid "could not access file \"%s\": %m" msgstr "нет доступа к файлу \"%s\": %m" -#: utils/fmgr/dfmgr.c:234 +#: utils/fmgr/dfmgr.c:239 #, c-format msgid "could not load library \"%s\": %s" msgstr "загрузить библиотеку \"%s\" не удалось: %s" -#: utils/fmgr/dfmgr.c:266 +#: utils/fmgr/dfmgr.c:271 #, c-format msgid "incompatible library \"%s\": missing magic block" msgstr "несовместимая библиотека \"%s\": нет отличительного блока" -#: utils/fmgr/dfmgr.c:268 +#: utils/fmgr/dfmgr.c:273 #, c-format msgid "Extension libraries are required to use the PG_MODULE_MAGIC macro." msgstr "Внешние библиотеки должны использовать макрос PG_MODULE_MAGIC." -#: utils/fmgr/dfmgr.c:314 +#: utils/fmgr/dfmgr.c:319 #, c-format msgid "incompatible library \"%s\": version mismatch" msgstr "несовместимая библиотека \"%s\": несовпадение версий" -#: utils/fmgr/dfmgr.c:316 +#: utils/fmgr/dfmgr.c:321 #, c-format msgid "Server is version %d, library is version %s." msgstr "Версия сервера: %d, версия библиотеки: %s." -#: utils/fmgr/dfmgr.c:333 +#: utils/fmgr/dfmgr.c:338 #, c-format msgid "Server has FUNC_MAX_ARGS = %d, library has %d." msgstr "В сервере FUNC_MAX_ARGS = %d, в библиотеке: %d." -#: utils/fmgr/dfmgr.c:342 +#: utils/fmgr/dfmgr.c:347 #, c-format msgid "Server has INDEX_MAX_KEYS = %d, library has %d." msgstr "В сервере INDEX_MAX_KEYS = %d, в библиотеке: %d." -#: utils/fmgr/dfmgr.c:351 +#: utils/fmgr/dfmgr.c:356 #, c-format msgid "Server has NAMEDATALEN = %d, library has %d." msgstr "В сервере NAMEDATALEN = %d, в библиотеке: %d." -#: utils/fmgr/dfmgr.c:360 +#: utils/fmgr/dfmgr.c:365 #, c-format msgid "Server has FLOAT4PASSBYVAL = %s, library has %s." msgstr "В сервере FLOAT4PASSBYVAL = %s, в библиотеке: %s." -#: utils/fmgr/dfmgr.c:369 +#: utils/fmgr/dfmgr.c:374 #, c-format msgid "Server has FLOAT8PASSBYVAL = %s, library has %s." msgstr "В сервере FLOAT8PASSBYVAL = %s, в библиотеке: %s." -#: utils/fmgr/dfmgr.c:376 +#: utils/fmgr/dfmgr.c:381 msgid "Magic block has unexpected length or padding difference." msgstr "Отличительный блок имеет неверную длину или дополнен по-другому." -#: utils/fmgr/dfmgr.c:379 +#: utils/fmgr/dfmgr.c:384 #, c-format msgid "incompatible library \"%s\": magic block mismatch" msgstr "несовместимая библиотека \"%s\": несоответствие отличительного блока" -#: utils/fmgr/dfmgr.c:543 +#: utils/fmgr/dfmgr.c:548 #, c-format msgid "access to library \"%s\" is not allowed" msgstr "доступ к библиотеке \"%s\" не разрешён" -#: utils/fmgr/dfmgr.c:569 +#: utils/fmgr/dfmgr.c:574 #, c-format msgid "invalid macro name in dynamic library path: %s" msgstr "неправильный макрос в пути динамической библиотеки: %s" -#: utils/fmgr/dfmgr.c:609 +#: utils/fmgr/dfmgr.c:614 #, c-format msgid "zero-length component in parameter \"dynamic_library_path\"" msgstr "параметр dynamic_library_path содержит компонент нулевой длины" -#: utils/fmgr/dfmgr.c:628 +#: utils/fmgr/dfmgr.c:633 #, c-format msgid "component in parameter \"dynamic_library_path\" is not an absolute path" msgstr "" @@ -24165,69 +24433,69 @@ msgstr "псевдоним столбца не указан" msgid "could not determine row description for function returning record" msgstr "не удалось определить описание строки для функции, возвращающей запись" -#: utils/init/miscinit.c:122 +#: utils/init/miscinit.c:123 #, c-format msgid "could not change directory to \"%s\": %m" msgstr "не удалось перейти в каталог \"%s\": %m" -#: utils/init/miscinit.c:450 utils/misc/guc.c:6095 +#: utils/init/miscinit.c:451 utils/misc/guc.c:6126 #, c-format msgid "cannot set parameter \"%s\" within security-restricted operation" msgstr "" "параметр \"%s\" нельзя задать в рамках операции с ограничениями по " "безопасности" -#: utils/init/miscinit.c:511 +#: utils/init/miscinit.c:512 #, c-format msgid "role with OID %u does not exist" msgstr "роль с OID %u не существует" -#: utils/init/miscinit.c:541 +#: utils/init/miscinit.c:542 #, c-format msgid "role \"%s\" is not permitted to log in" msgstr "для роли \"%s\" вход запрещён" -#: utils/init/miscinit.c:559 +#: utils/init/miscinit.c:560 #, c-format msgid "too many connections for role \"%s\"" msgstr "слишком много подключений для роли \"%s\"" -#: utils/init/miscinit.c:619 +#: utils/init/miscinit.c:620 #, c-format msgid "permission denied to set session authorization" msgstr "нет прав для смены объекта авторизации в сеансе" -#: utils/init/miscinit.c:702 +#: utils/init/miscinit.c:703 #, c-format msgid "invalid role OID: %u" msgstr "неверный OID роли: %u" -#: utils/init/miscinit.c:756 +#: utils/init/miscinit.c:757 #, c-format msgid "database system is shut down" msgstr "система БД выключена" -#: utils/init/miscinit.c:843 +#: utils/init/miscinit.c:844 #, c-format msgid "could not create lock file \"%s\": %m" msgstr "не удалось создать файл блокировки \"%s\": %m" -#: utils/init/miscinit.c:857 +#: utils/init/miscinit.c:858 #, c-format msgid "could not open lock file \"%s\": %m" msgstr "не удалось открыть файл блокировки \"%s\": %m" -#: utils/init/miscinit.c:864 +#: utils/init/miscinit.c:865 #, c-format msgid "could not read lock file \"%s\": %m" msgstr "не удалось прочитать файл блокировки \"%s\": %m" -#: utils/init/miscinit.c:873 +#: utils/init/miscinit.c:874 #, c-format msgid "lock file \"%s\" is empty" msgstr "файл блокировки \"%s\" пуст" -#: utils/init/miscinit.c:874 +#: utils/init/miscinit.c:875 #, c-format msgid "" "Either another server is starting, or the lock file is the remnant of a " @@ -24236,40 +24504,40 @@ msgstr "" "Либо сейчас запускается другой сервер, либо этот файл остался в результате " "сбоя при предыдущем запуске." -#: utils/init/miscinit.c:921 +#: utils/init/miscinit.c:922 #, c-format msgid "lock file \"%s\" already exists" msgstr "файл блокировки \"%s\" уже существует" -#: utils/init/miscinit.c:925 +#: utils/init/miscinit.c:926 #, c-format msgid "Is another postgres (PID %d) running in data directory \"%s\"?" msgstr "Другой экземпляр postgres (PID %d) работает с каталогом данных \"%s\"?" -#: utils/init/miscinit.c:927 +#: utils/init/miscinit.c:928 #, c-format msgid "Is another postmaster (PID %d) running in data directory \"%s\"?" msgstr "" "Другой экземпляр postmaster (PID %d) работает с каталогом данных \"%s\"?" -#: utils/init/miscinit.c:930 +#: utils/init/miscinit.c:931 #, c-format msgid "Is another postgres (PID %d) using socket file \"%s\"?" msgstr "Другой экземпляр postgres (PID %d) использует файл сокета \"%s\"?" -#: utils/init/miscinit.c:932 +#: utils/init/miscinit.c:933 #, c-format msgid "Is another postmaster (PID %d) using socket file \"%s\"?" msgstr "Другой экземпляр postmaster (PID %d) использует файл сокета \"%s\"?" -#: utils/init/miscinit.c:968 +#: utils/init/miscinit.c:969 #, c-format msgid "pre-existing shared memory block (key %lu, ID %lu) is still in use" msgstr "" "ранее выделенный блок разделяемой памяти (ключ %lu, ID %lu) по-прежнему " "используется" -#: utils/init/miscinit.c:971 +#: utils/init/miscinit.c:972 #, c-format msgid "" "If you're sure there are no old server processes still running, remove the " @@ -24278,12 +24546,12 @@ msgstr "" "Если вы уверены, что процессов старого сервера уже не осталось, освободите " "этот блок разделяемой памяти или просто удалите файл \"%s\"." -#: utils/init/miscinit.c:987 +#: utils/init/miscinit.c:988 #, c-format msgid "could not remove old lock file \"%s\": %m" msgstr "не удалось стереть старый файл блокировки \"%s\": %m" -#: utils/init/miscinit.c:989 +#: utils/init/miscinit.c:990 #, c-format msgid "" "The file seems accidentally left over, but it could not be removed. Please " @@ -24292,48 +24560,48 @@ msgstr "" "Кажется, файл сохранился по ошибке, но удалить его не получилось. " "Пожалуйста, удалите файл вручную и повторите попытку." -#: utils/init/miscinit.c:1026 utils/init/miscinit.c:1040 -#: utils/init/miscinit.c:1051 +#: utils/init/miscinit.c:1027 utils/init/miscinit.c:1041 +#: utils/init/miscinit.c:1052 #, c-format msgid "could not write lock file \"%s\": %m" msgstr "не удалось записать файл блокировки \"%s\": %m" -#: utils/init/miscinit.c:1182 utils/init/miscinit.c:1318 utils/misc/guc.c:8902 +#: utils/init/miscinit.c:1184 utils/init/miscinit.c:1327 utils/misc/guc.c:8931 #, c-format msgid "could not read from file \"%s\": %m" msgstr "не удалось прочитать файл \"%s\": %m" -#: utils/init/miscinit.c:1306 +#: utils/init/miscinit.c:1315 #, c-format msgid "could not open file \"%s\": %m; continuing anyway" msgstr "не удалось открыть файл \"%s\": %m; ошибка игнорируется" -#: utils/init/miscinit.c:1331 +#: utils/init/miscinit.c:1340 #, c-format msgid "lock file \"%s\" contains wrong PID: %ld instead of %ld" msgstr "файл блокировки \"%s\" содержит неверный PID: %ld вместо %ld" -#: utils/init/miscinit.c:1370 utils/init/miscinit.c:1386 +#: utils/init/miscinit.c:1379 utils/init/miscinit.c:1395 #, c-format msgid "\"%s\" is not a valid data directory" msgstr "\"%s\" не является каталогом данных" -#: utils/init/miscinit.c:1372 +#: utils/init/miscinit.c:1381 #, c-format msgid "File \"%s\" is missing." msgstr "Файл \"%s\" отсутствует." -#: utils/init/miscinit.c:1388 +#: utils/init/miscinit.c:1397 #, c-format msgid "File \"%s\" does not contain valid data." msgstr "Файл \"%s\" содержит неприемлемые данные." -#: utils/init/miscinit.c:1390 +#: utils/init/miscinit.c:1399 #, c-format msgid "You might need to initdb." msgstr "Возможно, вам нужно выполнить initdb." -#: utils/init/miscinit.c:1398 +#: utils/init/miscinit.c:1407 #, c-format msgid "" "The data directory was initialized by PostgreSQL version %s, which is not " @@ -24342,7 +24610,7 @@ msgstr "" "Каталог данных инициализирован сервером PostgreSQL версии %s, не совместимой " "с данной версией (%s)." -#: utils/init/miscinit.c:1469 +#: utils/init/miscinit.c:1474 #, c-format msgid "loaded library \"%s\"" msgstr "загружена библиотека \"%s\"" @@ -24492,7 +24760,7 @@ msgstr "" #: utils/init/postinit.c:863 #, c-format msgid "database %u does not exist" -msgstr "база данных \"%u не существует" +msgstr "база данных %u не существует" #: utils/init/postinit.c:952 #, c-format @@ -24509,7 +24777,7 @@ msgstr "Подкаталог базы данных \"%s\" отсутствует msgid "could not access directory \"%s\": %m" msgstr "ошибка доступа к каталогу \"%s\": %m" -#: utils/mb/conv.c:488 utils/mb/conv.c:679 +#: utils/mb/conv.c:488 utils/mb/conv.c:680 #, c-format msgid "invalid encoding number: %d" msgstr "неверный номер кодировки: %d" @@ -24526,6 +24794,11 @@ msgstr "неожиданный ID кодировки %d для наборов с msgid "unexpected encoding ID %d for WIN character sets" msgstr "неожиданный ID кодировки %d для наборов символов WIN" +#: utils/mb/encnames.c:473 +#, c-format +msgid "encoding \"%s\" not supported by ICU" +msgstr "ICU не поддерживает кодировку \"%s\"" + #: utils/mb/encnames.c:572 #, c-format msgid "encoding name too long" @@ -24582,294 +24855,298 @@ msgstr "" "для символа с последовательностью байт %s из кодировки \"%s\" нет " "эквивалента в \"%s\"" -#: utils/misc/guc.c:574 +#: utils/misc/guc.c:570 msgid "Ungrouped" msgstr "Разное" -#: utils/misc/guc.c:576 +#: utils/misc/guc.c:572 msgid "File Locations" msgstr "Расположения файлов" -#: utils/misc/guc.c:578 +#: utils/misc/guc.c:574 msgid "Connections and Authentication" msgstr "Подключения и аутентификация" -#: utils/misc/guc.c:580 +#: utils/misc/guc.c:576 msgid "Connections and Authentication / Connection Settings" msgstr "Подключения и аутентификация / Параметры подключений" -#: utils/misc/guc.c:582 +#: utils/misc/guc.c:578 msgid "Connections and Authentication / Security and Authentication" msgstr "Подключения и аутентификация / Безопасность и аутентификация" -#: utils/misc/guc.c:584 +#: utils/misc/guc.c:580 msgid "Resource Usage" msgstr "Использование ресурсов" -#: utils/misc/guc.c:586 +#: utils/misc/guc.c:582 msgid "Resource Usage / Memory" msgstr "Использование ресурсов / Память" -#: utils/misc/guc.c:588 +#: utils/misc/guc.c:584 msgid "Resource Usage / Disk" msgstr "Использование ресурсов / Диск" -#: utils/misc/guc.c:590 +#: utils/misc/guc.c:586 msgid "Resource Usage / Kernel Resources" msgstr "Использование ресурсов / Ресурсы ядра" -#: utils/misc/guc.c:592 +#: utils/misc/guc.c:588 msgid "Resource Usage / Cost-Based Vacuum Delay" msgstr "Использование ресурсов / Задержка очистки по стоимости" -#: utils/misc/guc.c:594 +#: utils/misc/guc.c:590 msgid "Resource Usage / Background Writer" msgstr "Использование ресурсов / Фоновая запись" -#: utils/misc/guc.c:596 +#: utils/misc/guc.c:592 msgid "Resource Usage / Asynchronous Behavior" msgstr "Использование ресурсов / Асинхронное поведение" -#: utils/misc/guc.c:598 +#: utils/misc/guc.c:594 msgid "Write-Ahead Log" msgstr "Журнал WAL" -#: utils/misc/guc.c:600 +#: utils/misc/guc.c:596 msgid "Write-Ahead Log / Settings" msgstr "Журнал WAL / Параметры" -#: utils/misc/guc.c:602 +#: utils/misc/guc.c:598 msgid "Write-Ahead Log / Checkpoints" msgstr "Журнал WAL / Контрольные точки" -#: utils/misc/guc.c:604 +#: utils/misc/guc.c:600 msgid "Write-Ahead Log / Archiving" msgstr "Журнал WAL / Архивация" -#: utils/misc/guc.c:606 +#: utils/misc/guc.c:602 msgid "Replication" msgstr "Репликация" -#: utils/misc/guc.c:608 +#: utils/misc/guc.c:604 msgid "Replication / Sending Servers" msgstr "Репликация / Передающие серверы" -#: utils/misc/guc.c:610 +#: utils/misc/guc.c:606 msgid "Replication / Master Server" msgstr "Репликация / Главный сервер" -#: utils/misc/guc.c:612 +#: utils/misc/guc.c:608 msgid "Replication / Standby Servers" msgstr "Репликация / Резервные серверы" -#: utils/misc/guc.c:614 +#: utils/misc/guc.c:610 +msgid "Replication / Subscribers" +msgstr "Репликация / Подписчики" + +#: utils/misc/guc.c:612 msgid "Query Tuning" msgstr "Настройка запросов" -#: utils/misc/guc.c:616 +#: utils/misc/guc.c:614 msgid "Query Tuning / Planner Method Configuration" msgstr "Настройка запросов / Конфигурация методов планировщика" -#: utils/misc/guc.c:618 +#: utils/misc/guc.c:616 msgid "Query Tuning / Planner Cost Constants" msgstr "Настройка запросов / Константы стоимости для планировщика" -#: utils/misc/guc.c:620 +#: utils/misc/guc.c:618 msgid "Query Tuning / Genetic Query Optimizer" msgstr "Настройка запросов / Генетический оптимизатор запросов" -#: utils/misc/guc.c:622 +#: utils/misc/guc.c:620 msgid "Query Tuning / Other Planner Options" msgstr "Настройка запросов / Другие параметры планировщика" -#: utils/misc/guc.c:624 +#: utils/misc/guc.c:622 msgid "Reporting and Logging" msgstr "Отчёты и протоколы" -#: utils/misc/guc.c:626 +#: utils/misc/guc.c:624 msgid "Reporting and Logging / Where to Log" msgstr "Отчёты и протоколы / Куда записывать" -#: utils/misc/guc.c:628 +#: utils/misc/guc.c:626 msgid "Reporting and Logging / When to Log" msgstr "Отчёты и протоколы / Когда записывать" -#: utils/misc/guc.c:630 +#: utils/misc/guc.c:628 msgid "Reporting and Logging / What to Log" msgstr "Отчёты и протоколы / Что записывать" -#: utils/misc/guc.c:632 +#: utils/misc/guc.c:630 msgid "Process Title" msgstr "Заголовок процесса" -#: utils/misc/guc.c:634 +#: utils/misc/guc.c:632 msgid "Statistics" msgstr "Статистика" -#: utils/misc/guc.c:636 +#: utils/misc/guc.c:634 msgid "Statistics / Monitoring" msgstr "Статистика / Мониторинг" -#: utils/misc/guc.c:638 +#: utils/misc/guc.c:636 msgid "Statistics / Query and Index Statistics Collector" msgstr "Статистика / Сбор статистики по запросам и индексам" -#: utils/misc/guc.c:640 +#: utils/misc/guc.c:638 msgid "Autovacuum" msgstr "Автоочистка" -#: utils/misc/guc.c:642 +#: utils/misc/guc.c:640 msgid "Client Connection Defaults" msgstr "Параметры клиентских сеансов по умолчанию" -#: utils/misc/guc.c:644 +#: utils/misc/guc.c:642 msgid "Client Connection Defaults / Statement Behavior" msgstr "Параметры клиентских подключений по умолчанию / Поведение команд" -#: utils/misc/guc.c:646 +#: utils/misc/guc.c:644 msgid "Client Connection Defaults / Locale and Formatting" msgstr "" "Параметры клиентских подключений по умолчанию / Языковая среда и форматы" -#: utils/misc/guc.c:648 +#: utils/misc/guc.c:646 msgid "Client Connection Defaults / Shared Library Preloading" msgstr "" "Параметры клиентских подключений по умолчанию / Предзагрузка разделяемых " "библиотек" -#: utils/misc/guc.c:650 +#: utils/misc/guc.c:648 msgid "Client Connection Defaults / Other Defaults" msgstr "Параметры клиентских подключений по умолчанию / Другие параметры" -#: utils/misc/guc.c:652 +#: utils/misc/guc.c:650 msgid "Lock Management" msgstr "Управление блокировками" -#: utils/misc/guc.c:654 +#: utils/misc/guc.c:652 msgid "Version and Platform Compatibility" msgstr "Совместимость с разными версиями и платформами" -#: utils/misc/guc.c:656 +#: utils/misc/guc.c:654 msgid "Version and Platform Compatibility / Previous PostgreSQL Versions" msgstr "Версия и совместимость платформ / Предыдущие версии PostgreSQL" -#: utils/misc/guc.c:658 +#: utils/misc/guc.c:656 msgid "Version and Platform Compatibility / Other Platforms and Clients" msgstr "Версия и совместимость платформ / Другие платформы и клиенты" -#: utils/misc/guc.c:660 +#: utils/misc/guc.c:658 msgid "Error Handling" msgstr "Обработка ошибок" -#: utils/misc/guc.c:662 +#: utils/misc/guc.c:660 msgid "Preset Options" msgstr "Предопределённые параметры" -#: utils/misc/guc.c:664 +#: utils/misc/guc.c:662 msgid "Customized Options" msgstr "Внесистемные параметры" -#: utils/misc/guc.c:666 +#: utils/misc/guc.c:664 msgid "Developer Options" msgstr "Параметры для разработчиков" -#: utils/misc/guc.c:723 +#: utils/misc/guc.c:721 msgid "Valid units for this parameter are \"kB\", \"MB\", \"GB\", and \"TB\"." msgstr "" "Допустимые единицы измерения для этого параметра - \"kB\", \"MB\", \"GB\" и " "\"TB\"." -#: utils/misc/guc.c:750 +#: utils/misc/guc.c:748 msgid "" "Valid units for this parameter are \"ms\", \"s\", \"min\", \"h\", and \"d\"." msgstr "" "Допустимые единицы измерения для этого параметра - \"ms\", \"s\", \"min\", " "\"h\" и \"d\"." -#: utils/misc/guc.c:809 +#: utils/misc/guc.c:807 msgid "Enables the planner's use of sequential-scan plans." msgstr "" "Разрешает планировщику использовать планы последовательного сканирования." -#: utils/misc/guc.c:818 +#: utils/misc/guc.c:816 msgid "Enables the planner's use of index-scan plans." msgstr "Разрешает планировщику использовать планы сканирования по индексу." -#: utils/misc/guc.c:827 +#: utils/misc/guc.c:825 msgid "Enables the planner's use of index-only-scan plans." msgstr "Разрешает планировщику использовать планы сканирования только индекса." -#: utils/misc/guc.c:836 +#: utils/misc/guc.c:834 msgid "Enables the planner's use of bitmap-scan plans." msgstr "" "Разрешает планировщику использовать планы сканирования по битовой карте." -#: utils/misc/guc.c:845 +#: utils/misc/guc.c:843 msgid "Enables the planner's use of TID scan plans." msgstr "Разрешает планировщику использовать планы сканирования TID." -#: utils/misc/guc.c:854 +#: utils/misc/guc.c:852 msgid "Enables the planner's use of explicit sort steps." msgstr "Разрешает планировщику использовать шаги с явной сортировкой." -#: utils/misc/guc.c:863 +#: utils/misc/guc.c:861 msgid "Enables the planner's use of hashed aggregation plans." msgstr "Разрешает планировщику использовать планы агрегирования по хешу." -#: utils/misc/guc.c:872 +#: utils/misc/guc.c:870 msgid "Enables the planner's use of materialization." msgstr "Разрешает планировщику использовать материализацию." -#: utils/misc/guc.c:881 +#: utils/misc/guc.c:879 msgid "Enables the planner's use of nested-loop join plans." msgstr "" "Разрешает планировщику использовать планы соединения с вложенными циклами." -#: utils/misc/guc.c:890 +#: utils/misc/guc.c:888 msgid "Enables the planner's use of merge join plans." msgstr "Разрешает планировщику использовать планы соединения слиянием." -#: utils/misc/guc.c:899 +#: utils/misc/guc.c:897 msgid "Enables the planner's use of hash join plans." msgstr "Разрешает планировщику использовать планы соединения по хешу." -#: utils/misc/guc.c:908 +#: utils/misc/guc.c:906 msgid "Enables the planner's use of gather merge plans." msgstr "Разрешает планировщику использовать планы сбора слиянием." -#: utils/misc/guc.c:918 +#: utils/misc/guc.c:916 msgid "Enables genetic query optimization." msgstr "Включает генетическую оптимизацию запросов." -#: utils/misc/guc.c:919 +#: utils/misc/guc.c:917 msgid "This algorithm attempts to do planning without exhaustive searching." msgstr "Этот алгоритм пытается построить план без полного перебора." -#: utils/misc/guc.c:929 +#: utils/misc/guc.c:927 msgid "Shows whether the current user is a superuser." msgstr "Показывает, является ли текущий пользователь суперпользователем." -#: utils/misc/guc.c:939 +#: utils/misc/guc.c:937 msgid "Enables advertising the server via Bonjour." msgstr "Включает объявление сервера посредством Bonjour." -#: utils/misc/guc.c:948 +#: utils/misc/guc.c:946 msgid "Collects transaction commit time." msgstr "Записывает время фиксации транзакций." -#: utils/misc/guc.c:957 +#: utils/misc/guc.c:955 msgid "Enables SSL connections." msgstr "Разрешает SSL-подключения." -#: utils/misc/guc.c:966 +#: utils/misc/guc.c:964 msgid "Give priority to server ciphersuite order." msgstr "Назначает более приоритетным набор шифров сервера." -#: utils/misc/guc.c:975 +#: utils/misc/guc.c:973 msgid "Forces synchronization of updates to disk." msgstr "Принудительная запись изменений на диск." -#: utils/misc/guc.c:976 +#: utils/misc/guc.c:974 msgid "" "The server will use the fsync() system call in several places to make sure " "that updates are physically written to disk. This insures that a database " @@ -24880,11 +25157,11 @@ msgstr "" "физической записи данных на диск. Это позволит привести кластер БД в " "целостное состояние после отказа ОС или оборудования." -#: utils/misc/guc.c:987 +#: utils/misc/guc.c:985 msgid "Continues processing after a checksum failure." msgstr "Продолжает обработку при ошибке контрольной суммы." -#: utils/misc/guc.c:988 +#: utils/misc/guc.c:986 msgid "" "Detection of a checksum failure normally causes PostgreSQL to report an " "error, aborting the current transaction. Setting ignore_checksum_failure to " @@ -24898,11 +25175,11 @@ msgstr "" "что может привести к сбоям или другим серьёзным проблемам. Это имеет место, " "только если включён контроль целостности страниц." -#: utils/misc/guc.c:1002 +#: utils/misc/guc.c:1000 msgid "Continues processing past damaged page headers." msgstr "Продолжает обработку при повреждении заголовков страниц." -#: utils/misc/guc.c:1003 +#: utils/misc/guc.c:1001 msgid "" "Detection of a damaged page header normally causes PostgreSQL to report an " "error, aborting the current transaction. Setting zero_damaged_pages to true " @@ -24916,12 +25193,12 @@ msgstr "" "продолжит работу. Это приведёт к потере данных, а именно строк в " "повреждённой странице." -#: utils/misc/guc.c:1016 +#: utils/misc/guc.c:1014 msgid "Writes full pages to WAL when first modified after a checkpoint." msgstr "" "Запись полных страниц в WAL при первом изменении после контрольной точки." -#: utils/misc/guc.c:1017 +#: utils/misc/guc.c:1015 msgid "" "A page write in process during an operating system crash might be only " "partially written to disk. During recovery, the row changes stored in WAL " @@ -24934,7 +25211,7 @@ msgstr "" "при первом изменении после контрольной точки, что позволяет полностью " "восстановить данные." -#: utils/misc/guc.c:1030 +#: utils/misc/guc.c:1028 msgid "" "Writes full pages to WAL when first modified after a checkpoint, even for a " "non-critical modifications." @@ -24942,75 +25219,75 @@ msgstr "" "Запись полных страниц в WAL при первом изменении после контрольной точки, " "даже при некритических изменениях." -#: utils/misc/guc.c:1040 +#: utils/misc/guc.c:1038 msgid "Compresses full-page writes written in WAL file." msgstr "Сжимать данные при записи полных страниц в журнал." -#: utils/misc/guc.c:1050 +#: utils/misc/guc.c:1048 msgid "Logs each checkpoint." msgstr "Протоколировать каждую контрольную точку." -#: utils/misc/guc.c:1059 +#: utils/misc/guc.c:1057 msgid "Logs each successful connection." msgstr "Протоколировать устанавливаемые соединения." -#: utils/misc/guc.c:1068 +#: utils/misc/guc.c:1066 msgid "Logs end of a session, including duration." msgstr "Протоколировать конец сеанса, отмечая длительность." -#: utils/misc/guc.c:1077 +#: utils/misc/guc.c:1075 msgid "Logs each replication command." msgstr "Протоколировать каждую команду репликации." -#: utils/misc/guc.c:1086 +#: utils/misc/guc.c:1084 msgid "Shows whether the running server has assertion checks enabled." msgstr "Показывает, включены ли проверки истинности на работающем сервере." -#: utils/misc/guc.c:1101 +#: utils/misc/guc.c:1099 msgid "Terminate session on any error." msgstr "Завершать сеансы при любой ошибке." -#: utils/misc/guc.c:1110 +#: utils/misc/guc.c:1108 msgid "Reinitialize server after backend crash." msgstr "Перезапускать систему БД при аварии серверного процесса." -#: utils/misc/guc.c:1120 +#: utils/misc/guc.c:1118 msgid "Logs the duration of each completed SQL statement." msgstr "Протоколировать длительность каждого выполненного SQL-оператора." -#: utils/misc/guc.c:1129 +#: utils/misc/guc.c:1127 msgid "Logs each query's parse tree." msgstr "Протоколировать дерево разбора для каждого запроса." -#: utils/misc/guc.c:1138 +#: utils/misc/guc.c:1136 msgid "Logs each query's rewritten parse tree." msgstr "Протоколировать перезаписанное дерево разбора для каждого запроса." -#: utils/misc/guc.c:1147 +#: utils/misc/guc.c:1145 msgid "Logs each query's execution plan." msgstr "Протоколировать план выполнения каждого запроса." -#: utils/misc/guc.c:1156 +#: utils/misc/guc.c:1154 msgid "Indents parse and plan tree displays." msgstr "Отступы при отображении деревьев разбора и плана запросов." -#: utils/misc/guc.c:1165 +#: utils/misc/guc.c:1163 msgid "Writes parser performance statistics to the server log." msgstr "Запись статистики разбора запросов в протокол сервера." -#: utils/misc/guc.c:1174 +#: utils/misc/guc.c:1172 msgid "Writes planner performance statistics to the server log." msgstr "Запись статистики планирования в протокол сервера." -#: utils/misc/guc.c:1183 +#: utils/misc/guc.c:1181 msgid "Writes executor performance statistics to the server log." msgstr "Запись статистики выполнения запросов в протокол сервера." -#: utils/misc/guc.c:1192 +#: utils/misc/guc.c:1190 msgid "Writes cumulative performance statistics to the server log." msgstr "Запись общей статистики производительности в протокол сервера." -#: utils/misc/guc.c:1202 +#: utils/misc/guc.c:1200 msgid "" "Logs system resource usage statistics (memory and CPU) on various B-tree " "operations." @@ -25018,11 +25295,11 @@ msgstr "" "Фиксировать статистику использования системных ресурсов (памяти и " "процессора) при различных операциях с b-деревом." -#: utils/misc/guc.c:1214 +#: utils/misc/guc.c:1212 msgid "Collects information about executing commands." msgstr "Собирает информацию о выполняющихся командах." -#: utils/misc/guc.c:1215 +#: utils/misc/guc.c:1213 msgid "" "Enables the collection of information on the currently executing command of " "each session, along with the time at which that command began execution." @@ -25030,60 +25307,60 @@ msgstr "" "Включает сбор информации о командах, выполняющихся во всех сеансах, а также " "время запуска команды." -#: utils/misc/guc.c:1225 +#: utils/misc/guc.c:1223 msgid "Collects statistics on database activity." msgstr "Собирает статистику активности в БД." -#: utils/misc/guc.c:1234 +#: utils/misc/guc.c:1232 msgid "Collects timing statistics for database I/O activity." msgstr "Собирает статистику по времени активности ввода/вывода." -#: utils/misc/guc.c:1244 +#: utils/misc/guc.c:1242 msgid "Updates the process title to show the active SQL command." msgstr "Выводит в заголовок процесса активную SQL-команду." -#: utils/misc/guc.c:1245 +#: utils/misc/guc.c:1243 msgid "" "Enables updating of the process title every time a new SQL command is " "received by the server." msgstr "Отражает в заголовке процесса каждую SQL-команду, поступающую серверу." -#: utils/misc/guc.c:1258 +#: utils/misc/guc.c:1256 msgid "Starts the autovacuum subprocess." msgstr "Запускает подпроцесс автоочистки." -#: utils/misc/guc.c:1268 +#: utils/misc/guc.c:1266 msgid "Generates debugging output for LISTEN and NOTIFY." msgstr "Генерирует отладочные сообщения для LISTEN и NOTIFY." -#: utils/misc/guc.c:1280 +#: utils/misc/guc.c:1278 msgid "Emits information about lock usage." msgstr "Выдавать информацию о применяемых блокировках." -#: utils/misc/guc.c:1290 +#: utils/misc/guc.c:1288 msgid "Emits information about user lock usage." msgstr "Выдавать информацию о применяемых пользовательских блокировках." -#: utils/misc/guc.c:1300 +#: utils/misc/guc.c:1298 msgid "Emits information about lightweight lock usage." msgstr "Выдавать информацию о применяемых лёгких блокировках." -#: utils/misc/guc.c:1310 +#: utils/misc/guc.c:1308 msgid "" "Dumps information about all current locks when a deadlock timeout occurs." msgstr "" "Выводить информацию обо всех текущих блокировках в случае таймаута при " "взаимоблокировке." -#: utils/misc/guc.c:1322 +#: utils/misc/guc.c:1320 msgid "Logs long lock waits." msgstr "Протоколировать длительные ожидания в блокировках." -#: utils/misc/guc.c:1332 +#: utils/misc/guc.c:1330 msgid "Logs the host name in the connection logs." msgstr "Записывать имя узла в протоколы подключений." -#: utils/misc/guc.c:1333 +#: utils/misc/guc.c:1331 msgid "" "By default, connection logs only show the IP address of the connecting host. " "If you want them to show the host name you can turn this on, but depending " @@ -25095,11 +25372,11 @@ msgstr "" "параметр, но учтите, что это может значительно повлиять на " "производительность." -#: utils/misc/guc.c:1344 +#: utils/misc/guc.c:1342 msgid "Treats \"expr=NULL\" as \"expr IS NULL\"." msgstr "Обрабатывать \"expr=NULL\" как \"expr IS NULL\"." -#: utils/misc/guc.c:1345 +#: utils/misc/guc.c:1343 msgid "" "When turned on, expressions of the form expr = NULL (or NULL = expr) are " "treated as expr IS NULL, that is, they return true if expr evaluates to the " @@ -25111,25 +25388,25 @@ msgstr "" "совпадает с NULL, и false в противном случае. По правилам expr = NULL всегда " "должно возвращать null (неопределённость)." -#: utils/misc/guc.c:1357 +#: utils/misc/guc.c:1355 msgid "Enables per-database user names." msgstr "Включает связывание имён пользователей с базами данных." -#: utils/misc/guc.c:1366 +#: utils/misc/guc.c:1364 msgid "Sets the default read-only status of new transactions." msgstr "" "Устанавливает режим \"только чтение\" по умолчанию для новых транзакций." -#: utils/misc/guc.c:1375 +#: utils/misc/guc.c:1373 msgid "Sets the current transaction's read-only status." msgstr "Устанавливает режим \"только чтение\" для текущей транзакции." -#: utils/misc/guc.c:1385 +#: utils/misc/guc.c:1383 msgid "Sets the default deferrable status of new transactions." msgstr "" "Устанавливает режим отложенного выполнения по умолчанию для новых транзакций." -#: utils/misc/guc.c:1394 +#: utils/misc/guc.c:1392 msgid "" "Whether to defer a read-only serializable transaction until it can be " "executed with no possible serialization failures." @@ -25137,25 +25414,25 @@ msgstr "" "Определяет, откладывать ли сериализуемую транзакцию \"только чтение\" до " "момента, когда сбой сериализации будет исключён." -#: utils/misc/guc.c:1404 +#: utils/misc/guc.c:1402 msgid "Enable row security." msgstr "Включает защиту на уровне строк." -#: utils/misc/guc.c:1405 +#: utils/misc/guc.c:1403 msgid "When enabled, row security will be applied to all users." msgstr "" "Когда включена, защита на уровне строк распространяется на всех " "пользователей." -#: utils/misc/guc.c:1413 +#: utils/misc/guc.c:1411 msgid "Check function bodies during CREATE FUNCTION." msgstr "Проверять тело функций в момент CREATE FUNCTION." -#: utils/misc/guc.c:1422 +#: utils/misc/guc.c:1420 msgid "Enable input of NULL elements in arrays." msgstr "Разрешать ввод элементов NULL в массивах." -#: utils/misc/guc.c:1423 +#: utils/misc/guc.c:1421 msgid "" "When turned on, unquoted NULL in an array input value means a null value; " "otherwise it is taken literally." @@ -25163,68 +25440,68 @@ msgstr "" "Когда этот параметр включён, NULL без кавычек при вводе в массив " "воспринимается как значение NULL, иначе - как строка." -#: utils/misc/guc.c:1433 +#: utils/misc/guc.c:1431 msgid "Create new tables with OIDs by default." msgstr "По умолчанию создавать новые таблицы со столбцом OID." -#: utils/misc/guc.c:1442 +#: utils/misc/guc.c:1440 msgid "" "Start a subprocess to capture stderr output and/or csvlogs into log files." msgstr "" "Запускает подпроцесс для чтения stderr и/или csv-файлов и записи в файлы " "протоколов." -#: utils/misc/guc.c:1451 +#: utils/misc/guc.c:1449 msgid "Truncate existing log files of same name during log rotation." msgstr "" "Очищать уже существующий файл с тем же именем при прокручивании протокола." -#: utils/misc/guc.c:1462 +#: utils/misc/guc.c:1460 msgid "Emit information about resource usage in sorting." msgstr "Выдавать сведения об использовании ресурсов при сортировке." -#: utils/misc/guc.c:1476 +#: utils/misc/guc.c:1474 msgid "Generate debugging output for synchronized scanning." msgstr "Выдавать отладочные сообщения для синхронного сканирования." -#: utils/misc/guc.c:1491 +#: utils/misc/guc.c:1489 msgid "Enable bounded sorting using heap sort." msgstr "" "Разрешить ограниченную сортировку с применением пирамидальной сортировки." -#: utils/misc/guc.c:1504 +#: utils/misc/guc.c:1502 msgid "Emit WAL-related debugging output." msgstr "Выдавать отладочные сообщения, связанные с WAL." -#: utils/misc/guc.c:1516 +#: utils/misc/guc.c:1514 msgid "Datetimes are integer based." msgstr "Целочисленная реализация даты/времени." -#: utils/misc/guc.c:1527 +#: utils/misc/guc.c:1525 msgid "" "Sets whether Kerberos and GSSAPI user names should be treated as case-" "insensitive." msgstr "" "Включает регистронезависимую обработку имён пользователей Kerberos и GSSAPI." -#: utils/misc/guc.c:1537 +#: utils/misc/guc.c:1535 msgid "Warn about backslash escapes in ordinary string literals." msgstr "Предупреждения о спецсимволах '\\' в обычных строках." -#: utils/misc/guc.c:1547 +#: utils/misc/guc.c:1545 msgid "Causes '...' strings to treat backslashes literally." msgstr "Включает буквальную обработку символов '\\' в строках '...'." -#: utils/misc/guc.c:1558 +#: utils/misc/guc.c:1556 msgid "Enable synchronized sequential scans." msgstr "Включить синхронизацию последовательного сканирования." -#: utils/misc/guc.c:1568 +#: utils/misc/guc.c:1566 msgid "Allows connections and queries during recovery." msgstr "" "Разрешает принимать новые подключения и запросы в процессе восстановления." -#: utils/misc/guc.c:1578 +#: utils/misc/guc.c:1576 msgid "" "Allows feedback from a hot standby to the primary that will avoid query " "conflicts." @@ -25232,15 +25509,15 @@ msgstr "" "Разрешает обратную связь сервера горячего резерва с основным для " "предотвращения конфликтов при длительных запросах." -#: utils/misc/guc.c:1588 +#: utils/misc/guc.c:1586 msgid "Allows modifications of the structure of system tables." msgstr "Разрешает модифицировать структуру системных таблиц." -#: utils/misc/guc.c:1599 +#: utils/misc/guc.c:1597 msgid "Disables reading from system indexes." msgstr "Запрещает использование системных индексов." -#: utils/misc/guc.c:1600 +#: utils/misc/guc.c:1598 msgid "" "It does not prevent updating the indexes, so it is safe to use. The worst " "consequence is slowness." @@ -25248,14 +25525,14 @@ msgstr "" "При этом индексы продолжают обновляться, так что данное поведение безопасно. " "Худшее следствие - замедление." -#: utils/misc/guc.c:1611 +#: utils/misc/guc.c:1609 msgid "" "Enables backward compatibility mode for privilege checks on large objects." msgstr "" "Включает режим обратной совместимости при проверке привилегий для больших " "объектов." -#: utils/misc/guc.c:1612 +#: utils/misc/guc.c:1610 msgid "" "Skips privilege checks when reading or modifying large objects, for " "compatibility with PostgreSQL releases prior to 9.0." @@ -25263,35 +25540,35 @@ msgstr "" "Пропускает проверки привилегий при чтении или изменении больших объектов " "(для совместимости с версиями PostgreSQL до 9.0)." -#: utils/misc/guc.c:1622 +#: utils/misc/guc.c:1620 msgid "" "Emit a warning for constructs that changed meaning since PostgreSQL 9.4." msgstr "" "Выдаёт предупреждение о конструкциях, поведение которых изменилось после " "PostgreSQL 9.4." -#: utils/misc/guc.c:1632 +#: utils/misc/guc.c:1630 msgid "When generating SQL fragments, quote all identifiers." msgstr "" "Генерируя SQL-фрагменты, заключать все идентификаторы в двойные кавычки." -#: utils/misc/guc.c:1642 +#: utils/misc/guc.c:1640 msgid "Shows whether data checksums are turned on for this cluster." msgstr "Показывает, включён ли в этом кластере контроль целостности данных." -#: utils/misc/guc.c:1653 +#: utils/misc/guc.c:1651 msgid "Add sequence number to syslog messages to avoid duplicate suppression." msgstr "" "Добавлять последовательный номер в сообщения syslog во избежание подавления " "повторов." -#: utils/misc/guc.c:1663 +#: utils/misc/guc.c:1661 msgid "Split messages sent to syslog by lines and to fit into 1024 bytes." msgstr "" "Разбивать сообщения, передаваемые в syslog, по строкам размером не больше " "1024 байт." -#: utils/misc/guc.c:1682 +#: utils/misc/guc.c:1680 msgid "" "Forces a switch to the next WAL file if a new file has not been started " "within N seconds." @@ -25299,19 +25576,19 @@ msgstr "" "Принудительно переключаться на следующий файл WAL, если начать новый файл за " "N секунд не удалось." -#: utils/misc/guc.c:1693 +#: utils/misc/guc.c:1691 msgid "Waits N seconds on connection startup after authentication." msgstr "Ждать N секунд при подключении после проверки подлинности." -#: utils/misc/guc.c:1694 utils/misc/guc.c:2217 +#: utils/misc/guc.c:1692 utils/misc/guc.c:2237 msgid "This allows attaching a debugger to the process." msgstr "Это позволяет подключить к процессу отладчик." -#: utils/misc/guc.c:1703 +#: utils/misc/guc.c:1701 msgid "Sets the default statistics target." msgstr "Устанавливает целевое ограничение статистики по умолчанию." -#: utils/misc/guc.c:1704 +#: utils/misc/guc.c:1702 msgid "" "This applies to table columns that have not had a column-specific target set " "via ALTER TABLE SET STATISTICS." @@ -25319,13 +25596,13 @@ msgstr "" "Это значение распространяется на столбцы таблицы, для которых целевое " "ограничение не задано явно через ALTER TABLE SET STATISTICS." -#: utils/misc/guc.c:1713 +#: utils/misc/guc.c:1711 msgid "Sets the FROM-list size beyond which subqueries are not collapsed." msgstr "" "Задаёт предел для списка FROM, при превышении которого подзапросы не " "сворачиваются." -#: utils/misc/guc.c:1715 +#: utils/misc/guc.c:1713 msgid "" "The planner will merge subqueries into upper queries if the resulting FROM " "list would have no more than this many items." @@ -25333,13 +25610,13 @@ msgstr "" "Планировщик объединит вложенные запросы с внешними, если в полученном списке " "FROM будет не больше заданного числа элементов." -#: utils/misc/guc.c:1725 +#: utils/misc/guc.c:1723 msgid "Sets the FROM-list size beyond which JOIN constructs are not flattened." msgstr "" "Задаёт предел для списка FROM, при превышении которого конструкции JOIN " "сохраняются." -#: utils/misc/guc.c:1727 +#: utils/misc/guc.c:1725 msgid "" "The planner will flatten explicit JOIN constructs into lists of FROM items " "whenever a list of no more than this many items would result." @@ -25347,34 +25624,34 @@ msgstr "" "Планировщик будет сносить явные конструкции JOIN в списки FROM, пока в " "результирующем списке не больше заданного числа элементов." -#: utils/misc/guc.c:1737 +#: utils/misc/guc.c:1735 msgid "Sets the threshold of FROM items beyond which GEQO is used." msgstr "" "Задаёт предел для списка FROM, при превышении которого применяется GEQO." -#: utils/misc/guc.c:1746 +#: utils/misc/guc.c:1744 msgid "GEQO: effort is used to set the default for other GEQO parameters." msgstr "" "GEQO: оценка усилий для планирования, задающая значения по умолчанию для " "других параметров GEQO." -#: utils/misc/guc.c:1755 +#: utils/misc/guc.c:1753 msgid "GEQO: number of individuals in the population." msgstr "GEQO: число особей в популяции." -#: utils/misc/guc.c:1756 utils/misc/guc.c:1765 +#: utils/misc/guc.c:1754 utils/misc/guc.c:1763 msgid "Zero selects a suitable default value." msgstr "При нуле выбирается подходящее значение по умолчанию." -#: utils/misc/guc.c:1764 +#: utils/misc/guc.c:1762 msgid "GEQO: number of iterations of the algorithm." msgstr "GEQO: число итераций алгоритма." -#: utils/misc/guc.c:1775 +#: utils/misc/guc.c:1773 msgid "Sets the time to wait on a lock before checking for deadlock." msgstr "Задаёт интервал ожидания в блокировке до проверки на взаимоблокировку." -#: utils/misc/guc.c:1786 +#: utils/misc/guc.c:1784 msgid "" "Sets the maximum delay before canceling queries when a hot standby server is " "processing archived WAL data." @@ -25382,7 +25659,7 @@ msgstr "" "Задаёт максимальную задержку до отмены запроса, когда сервер горячего " "резерва обрабатывает данные WAL из архива." -#: utils/misc/guc.c:1797 +#: utils/misc/guc.c:1795 msgid "" "Sets the maximum delay before canceling queries when a hot standby server is " "processing streamed WAL data." @@ -25390,58 +25667,58 @@ msgstr "" "Задаёт максимальную задержку до отмены запроса, когда сервер горячего " "резерва обрабатывает данные WAL из потока." -#: utils/misc/guc.c:1808 +#: utils/misc/guc.c:1806 msgid "" "Sets the maximum interval between WAL receiver status reports to the primary." msgstr "Задаёт максимальный интервал для отчётов о состоянии получателей WAL." -#: utils/misc/guc.c:1819 +#: utils/misc/guc.c:1817 msgid "Sets the maximum wait time to receive data from the primary." msgstr "" "Задаёт предельное время ожидания для получения данных с главного сервера." -#: utils/misc/guc.c:1830 +#: utils/misc/guc.c:1828 msgid "Sets the maximum number of concurrent connections." msgstr "Задаёт максимально возможное число подключений." -#: utils/misc/guc.c:1840 +#: utils/misc/guc.c:1838 msgid "Sets the number of connection slots reserved for superusers." msgstr "" "Определяет, сколько слотов подключений забронировано для суперпользователей." -#: utils/misc/guc.c:1854 +#: utils/misc/guc.c:1852 msgid "Sets the number of shared memory buffers used by the server." msgstr "Задаёт количество буферов в разделяемой памяти, используемых сервером." -#: utils/misc/guc.c:1865 +#: utils/misc/guc.c:1863 msgid "Sets the maximum number of temporary buffers used by each session." msgstr "Задаёт предельное число временных буферов на один сеанс." -#: utils/misc/guc.c:1876 +#: utils/misc/guc.c:1874 msgid "Sets the TCP port the server listens on." msgstr "Задаёт TCP-порт для работы сервера." -#: utils/misc/guc.c:1886 +#: utils/misc/guc.c:1884 msgid "Sets the access permissions of the Unix-domain socket." -msgstr "Задаёт права доступа для доменного сокета Unix." +msgstr "Задаёт права доступа для Unix-сокета." -#: utils/misc/guc.c:1887 +#: utils/misc/guc.c:1885 msgid "" "Unix-domain sockets use the usual Unix file system permission set. The " "parameter value is expected to be a numeric mode specification in the form " "accepted by the chmod and umask system calls. (To use the customary octal " "format the number must start with a 0 (zero).)" msgstr "" -"Для доменных сокетов используется обычный набор разрешений, как в файловых " +"Для Unix-сокетов используется обычный набор разрешений, как в файловых " "системах Unix. Значение параметра указывается в числовом виде, " "воспринимаемом системными функциями chmod и umask. (Чтобы использовать " "привычный восьмеричный формат, добавьте в начало ноль (0).)" -#: utils/misc/guc.c:1901 +#: utils/misc/guc.c:1899 msgid "Sets the file permissions for log files." msgstr "Задаёт права доступа к файлам протоколов." -#: utils/misc/guc.c:1902 +#: utils/misc/guc.c:1900 msgid "" "The parameter value is expected to be a numeric mode specification in the " "form accepted by the chmod and umask system calls. (To use the customary " @@ -25451,11 +25728,11 @@ msgstr "" "функциями chmod и umask. (Чтобы использовать привычный восьмеричный формат, " "добавьте в начало ноль (0).)" -#: utils/misc/guc.c:1915 +#: utils/misc/guc.c:1913 msgid "Sets the maximum memory to be used for query workspaces." msgstr "Задаёт предельный объём памяти для рабочих пространств запросов." -#: utils/misc/guc.c:1916 +#: utils/misc/guc.c:1914 msgid "" "This much memory can be used by each internal sort operation and hash table " "before switching to temporary disk files." @@ -25463,131 +25740,131 @@ msgstr "" "Такой объём памяти может использоваться каждой внутренней операцией " "сортировки и таблицей хешей до переключения на временные файлы на диске." -#: utils/misc/guc.c:1928 +#: utils/misc/guc.c:1926 msgid "Sets the maximum memory to be used for maintenance operations." msgstr "Задаёт предельный объём памяти для операций по обслуживанию." -#: utils/misc/guc.c:1929 +#: utils/misc/guc.c:1927 msgid "This includes operations such as VACUUM and CREATE INDEX." msgstr "Подразумеваются в частности операции VACUUM и CREATE INDEX." -#: utils/misc/guc.c:1939 +#: utils/misc/guc.c:1937 msgid "" "Sets the maximum number of tuples to be sorted using replacement selection." msgstr "" "Задаёт предельное число кортежей, сортируемое посредством алгоритма выбора с " "замещением." -#: utils/misc/guc.c:1940 +#: utils/misc/guc.c:1938 msgid "When more tuples than this are present, quicksort will be used." msgstr "Когда кортежей больше этого количества, будет применяться quicksort." -#: utils/misc/guc.c:1954 +#: utils/misc/guc.c:1952 msgid "Sets the maximum stack depth, in kilobytes." msgstr "Задаёт максимальную глубину стека (в КБ)." -#: utils/misc/guc.c:1965 +#: utils/misc/guc.c:1963 msgid "Limits the total size of all temporary files used by each process." msgstr "" "Ограничивает общий размер всех временных файлов, доступный для каждого " "процесса." -#: utils/misc/guc.c:1966 +#: utils/misc/guc.c:1964 msgid "-1 means no limit." msgstr "-1 отключает ограничение." -#: utils/misc/guc.c:1976 +#: utils/misc/guc.c:1974 msgid "Vacuum cost for a page found in the buffer cache." msgstr "Стоимость очистки для страницы, найденной в кеше." -#: utils/misc/guc.c:1986 +#: utils/misc/guc.c:1984 msgid "Vacuum cost for a page not found in the buffer cache." msgstr "Стоимость очистки для страницы, не найденной в кеше." -#: utils/misc/guc.c:1996 +#: utils/misc/guc.c:1994 msgid "Vacuum cost for a page dirtied by vacuum." msgstr "Стоимость очистки для страницы, которая не была \"грязной\"." -#: utils/misc/guc.c:2006 +#: utils/misc/guc.c:2004 msgid "Vacuum cost amount available before napping." msgstr "Суммарная стоимость очистки, при которой нужна передышка." -#: utils/misc/guc.c:2016 +#: utils/misc/guc.c:2014 msgid "Vacuum cost delay in milliseconds." msgstr "Задержка очистки (в миллисекундах)." -#: utils/misc/guc.c:2027 +#: utils/misc/guc.c:2025 msgid "Vacuum cost delay in milliseconds, for autovacuum." msgstr "Задержка очистки для автоочистки (в миллисекундах)." -#: utils/misc/guc.c:2038 +#: utils/misc/guc.c:2036 msgid "Vacuum cost amount available before napping, for autovacuum." msgstr "" "Суммарная стоимость очистки, при которой нужна передышка, для автоочистки." -#: utils/misc/guc.c:2048 +#: utils/misc/guc.c:2046 msgid "" "Sets the maximum number of simultaneously open files for each server process." msgstr "" "Задаёт предельное число одновременно открытых файлов для каждого серверного " "процесса." -#: utils/misc/guc.c:2061 +#: utils/misc/guc.c:2059 msgid "Sets the maximum number of simultaneously prepared transactions." msgstr "Задаёт предельное число одновременно подготовленных транзакций." -#: utils/misc/guc.c:2072 +#: utils/misc/guc.c:2070 msgid "Sets the minimum OID of tables for tracking locks." msgstr "Задаёт минимальный OID таблиц, для которых отслеживаются блокировки." -#: utils/misc/guc.c:2073 +#: utils/misc/guc.c:2071 msgid "Is used to avoid output on system tables." msgstr "Применяется для игнорирования системных таблиц." -#: utils/misc/guc.c:2082 +#: utils/misc/guc.c:2080 msgid "Sets the OID of the table with unconditionally lock tracing." msgstr "Задаёт OID таблицы для безусловного отслеживания блокировок." -#: utils/misc/guc.c:2094 +#: utils/misc/guc.c:2092 msgid "Sets the maximum allowed duration of any statement." msgstr "Задаёт предельную длительность для любого оператора." -#: utils/misc/guc.c:2095 utils/misc/guc.c:2106 utils/misc/guc.c:2117 +#: utils/misc/guc.c:2093 utils/misc/guc.c:2104 utils/misc/guc.c:2115 msgid "A value of 0 turns off the timeout." msgstr "Нулевое значение отключает таймаут." -#: utils/misc/guc.c:2105 +#: utils/misc/guc.c:2103 msgid "Sets the maximum allowed duration of any wait for a lock." msgstr "Задаёт максимальную продолжительность ожидания блокировок." -#: utils/misc/guc.c:2116 +#: utils/misc/guc.c:2114 msgid "Sets the maximum allowed duration of any idling transaction." msgstr "Задаёт предельно допустимую длительность для простаивающих транзакций." -#: utils/misc/guc.c:2127 +#: utils/misc/guc.c:2125 msgid "Minimum age at which VACUUM should freeze a table row." msgstr "" "Минимальный возраст строк таблицы, при котором VACUUM может их заморозить." -#: utils/misc/guc.c:2137 +#: utils/misc/guc.c:2135 msgid "Age at which VACUUM should scan whole table to freeze tuples." msgstr "" "Возраст, при котором VACUUM должен сканировать всю таблицу с целью " "заморозить кортежи." -#: utils/misc/guc.c:2147 +#: utils/misc/guc.c:2145 msgid "Minimum age at which VACUUM should freeze a MultiXactId in a table row." msgstr "" "Минимальный возраст, при котором VACUUM будет замораживать MultiXactId в " "строке таблицы." -#: utils/misc/guc.c:2157 +#: utils/misc/guc.c:2155 msgid "Multixact age at which VACUUM should scan whole table to freeze tuples." msgstr "" "Возраст multixact, при котором VACUUM должен сканировать всю таблицу с целью " "заморозить кортежи." -#: utils/misc/guc.c:2167 +#: utils/misc/guc.c:2165 msgid "" "Number of transactions by which VACUUM and HOT cleanup should be deferred, " "if any." @@ -25595,11 +25872,11 @@ msgstr "" "Определяет, на сколько транзакций следует задержать старые строки, выполняя " "VACUUM или \"горячее\" обновление." -#: utils/misc/guc.c:2180 +#: utils/misc/guc.c:2178 msgid "Sets the maximum number of locks per transaction." msgstr "Задаёт предельное число блокировок на транзакцию." -#: utils/misc/guc.c:2181 +#: utils/misc/guc.c:2179 msgid "" "The shared lock table is sized on the assumption that at most " "max_locks_per_transaction * max_connections distinct objects will need to be " @@ -25609,11 +25886,11 @@ msgstr "" "один момент времени потребуется заблокировать не больше чем " "max_locks_per_transaction * max_connections различных объектов." -#: utils/misc/guc.c:2192 +#: utils/misc/guc.c:2190 msgid "Sets the maximum number of predicate locks per transaction." msgstr "Задаёт предельное число предикатных блокировок на транзакцию." -#: utils/misc/guc.c:2193 +#: utils/misc/guc.c:2191 msgid "" "The shared predicate lock table is sized on the assumption that at most " "max_pred_locks_per_transaction * max_connections distinct objects will need " @@ -25623,39 +25900,68 @@ msgstr "" "предположения, что в один момент времени потребуется заблокировать не больше " "чем max_pred_locks_per_transaction * max_connections различных объектов." -#: utils/misc/guc.c:2204 +#: utils/misc/guc.c:2202 +msgid "" +"Sets the maximum number of predicate-locked pages and tuples per relation." +msgstr "" +"Задаёт максимальное число страниц и кортежей, блокируемых предикатными " +"блокировками в одном отношении." + +#: utils/misc/guc.c:2203 +msgid "" +"If more than this total of pages and tuples in the same relation are locked " +"by a connection, those locks are replaced by a relation-level lock." +msgstr "" +"Если одним соединением блокируется больше этого общего числа страниц и " +"кортежей, эти блокировки заменяются блокировкой на уровне отношения." + +#: utils/misc/guc.c:2213 +msgid "Sets the maximum number of predicate-locked tuples per page." +msgstr "" +"Задаёт максимальное число кортежей, блокируемых предикатными блокировками в " +"одной странице." + +#: utils/misc/guc.c:2214 +msgid "" +"If more than this number of tuples on the same page are locked by a " +"connection, those locks are replaced by a page-level lock." +msgstr "" +"Если одним соединением блокируется больше этого числа кортежей на одной " +"странице, эти блокировки заменяются блокировкой на уровне страницы." + +#: utils/misc/guc.c:2224 msgid "Sets the maximum allowed time to complete client authentication." msgstr "Ограничивает время, за которое клиент должен пройти аутентификацию." -#: utils/misc/guc.c:2216 +#: utils/misc/guc.c:2236 msgid "Waits N seconds on connection startup before authentication." msgstr "Ждать N секунд при подключении до проверки подлинности." -#: utils/misc/guc.c:2227 +#: utils/misc/guc.c:2247 msgid "Sets the number of WAL files held for standby servers." msgstr "Определяет, сколько файлов WAL нужно сохранять для резервных серверов." -#: utils/misc/guc.c:2237 +#: utils/misc/guc.c:2257 msgid "Sets the minimum size to shrink the WAL to." msgstr "Задаёт минимальный размер WAL при сжатии." -#: utils/misc/guc.c:2248 +#: utils/misc/guc.c:2268 msgid "Sets the WAL size that triggers a checkpoint." msgstr "Задаёт размер WAL, при котором инициируется контрольная точка." -#: utils/misc/guc.c:2259 +#: utils/misc/guc.c:2279 msgid "Sets the maximum time between automatic WAL checkpoints." msgstr "" "Задаёт максимальное время между автоматическими контрольными точками WAL." -#: utils/misc/guc.c:2270 +#: utils/misc/guc.c:2290 msgid "" "Enables warnings if checkpoint segments are filled more frequently than this." msgstr "" "Выдаёт предупреждения, когда сегменты контрольных точек заполняются за это " "время." -#: utils/misc/guc.c:2272 +#: utils/misc/guc.c:2292 msgid "" "Write a message to the server log if checkpoints caused by the filling of " "checkpoint segment files happens more frequently than this number of " @@ -25665,41 +25971,41 @@ msgstr "" "переполнением файлов сегментов, происходят за столько секунд. Нулевое " "значение отключает эти предупреждения." -#: utils/misc/guc.c:2284 utils/misc/guc.c:2441 utils/misc/guc.c:2468 +#: utils/misc/guc.c:2304 utils/misc/guc.c:2461 utils/misc/guc.c:2488 msgid "" "Number of pages after which previously performed writes are flushed to disk." msgstr "" "Число страниц, по достижении которого ранее выполненные операции записи " "сбрасываются на диск." -#: utils/misc/guc.c:2295 +#: utils/misc/guc.c:2315 msgid "Sets the number of disk-page buffers in shared memory for WAL." msgstr "Задаёт число буферов дисковых страниц в разделяемой памяти для WAL." -#: utils/misc/guc.c:2306 +#: utils/misc/guc.c:2326 msgid "Time between WAL flushes performed in the WAL writer." msgstr "Задержка между сбросом WAL в процессе, записывающем WAL." -#: utils/misc/guc.c:2317 +#: utils/misc/guc.c:2337 msgid "Amount of WAL written out by WAL writer that triggers a flush." msgstr "" "Объём WAL, обработанный пишущим WAL процессом, при котором инициируется " "сброс журнала на диск." -#: utils/misc/guc.c:2329 +#: utils/misc/guc.c:2349 msgid "Sets the maximum number of simultaneously running WAL sender processes." msgstr "" "Задаёт предельное число одновременно работающих процессов передачи WAL." -#: utils/misc/guc.c:2340 +#: utils/misc/guc.c:2360 msgid "Sets the maximum number of simultaneously defined replication slots." msgstr "Задаёт предельное число одновременно существующих слотов репликации." -#: utils/misc/guc.c:2350 +#: utils/misc/guc.c:2370 msgid "Sets the maximum time to wait for WAL replication." msgstr "Задаёт предельное время ожидания репликации WAL." -#: utils/misc/guc.c:2361 +#: utils/misc/guc.c:2381 msgid "" "Sets the delay in microseconds between transaction commit and flushing WAL " "to disk." @@ -25707,18 +26013,18 @@ msgstr "" "Задаёт задержку в микросекундах между фиксированием транзакций и сбросом WAL " "на диск." -#: utils/misc/guc.c:2373 +#: utils/misc/guc.c:2393 msgid "" "Sets the minimum concurrent open transactions before performing commit_delay." msgstr "" "Задаёт минимальное число одновременно открытых транзакций для применения " "commit_delay." -#: utils/misc/guc.c:2384 +#: utils/misc/guc.c:2404 msgid "Sets the number of digits displayed for floating-point values." msgstr "Задаёт число выводимых цифр для чисел с плавающей точкой." -#: utils/misc/guc.c:2385 +#: utils/misc/guc.c:2405 msgid "" "This affects real, double precision, and geometric data types. The parameter " "value is added to the standard number of digits (FLT_DIG or DBL_DIG as " @@ -25727,17 +26033,17 @@ msgstr "" "Этот параметр относится к типам real, double и geometric. Значение параметра " "добавляется к стандартному числу цифр (FLT_DIG или DBL_DIG)." -#: utils/misc/guc.c:2396 +#: utils/misc/guc.c:2416 msgid "Sets the minimum execution time above which statements will be logged." msgstr "" "Задаёт предельное время выполнения оператора, при превышении которого он " "фиксируется в протоколе." -#: utils/misc/guc.c:2398 +#: utils/misc/guc.c:2418 msgid "Zero prints all queries. -1 turns this feature off." msgstr "При 0 протоколируются все запросы; -1 отключает эти сообщения." -#: utils/misc/guc.c:2408 +#: utils/misc/guc.c:2428 msgid "" "Sets the minimum execution time above which autovacuum actions will be " "logged." @@ -25745,22 +26051,22 @@ msgstr "" "Задаёт предельное время выполнения автоочистки, при превышении которого эта " "операция фиксируется в протоколе." -#: utils/misc/guc.c:2410 +#: utils/misc/guc.c:2430 msgid "Zero prints all actions. -1 turns autovacuum logging off." msgstr "" "При 0 протоколируются все операции автоочистки; -1 отключает эти сообщения." -#: utils/misc/guc.c:2420 +#: utils/misc/guc.c:2440 msgid "Background writer sleep time between rounds." msgstr "Время простоя в процессе фоновой записи между подходами." -#: utils/misc/guc.c:2431 +#: utils/misc/guc.c:2451 msgid "Background writer maximum number of LRU pages to flush per round." msgstr "" "Максимальное число LRU-страниц, сбрасываемых за один подход, в процессе " "фоновой записи." -#: utils/misc/guc.c:2454 +#: utils/misc/guc.c:2474 msgid "" "Number of simultaneous requests that can be handled efficiently by the disk " "subsystem." @@ -25768,100 +26074,100 @@ msgstr "" "Число одновременных запросов, которые могут быть эффективно обработаны " "дисковой подсистемой." -#: utils/misc/guc.c:2455 +#: utils/misc/guc.c:2475 msgid "" "For RAID arrays, this should be approximately the number of drive spindles " "in the array." msgstr "" "Для RAID-массивов это примерно равно числу физических дисков в массиве." -#: utils/misc/guc.c:2481 +#: utils/misc/guc.c:2501 msgid "Maximum number of concurrent worker processes." msgstr "Задаёт максимально возможное число рабочих процессов." -#: utils/misc/guc.c:2493 +#: utils/misc/guc.c:2513 msgid "Maximum number of logical replication worker processes." msgstr "" "Задаёт максимально возможное число рабочих процессов логической репликации." -#: utils/misc/guc.c:2505 +#: utils/misc/guc.c:2525 msgid "Maximum number of table synchronization workers per subscription." msgstr "" "Задаёт максимально возможное число процессов синхронизации таблиц для одной " "подписки." -#: utils/misc/guc.c:2515 +#: utils/misc/guc.c:2535 msgid "Automatic log file rotation will occur after N minutes." msgstr "Автоматическая прокрутка файла протокола через каждые N минут." -#: utils/misc/guc.c:2526 +#: utils/misc/guc.c:2546 msgid "Automatic log file rotation will occur after N kilobytes." msgstr "" "Автоматическая прокрутка файла протокола при выходе за предел N килобайт." -#: utils/misc/guc.c:2537 +#: utils/misc/guc.c:2557 msgid "Shows the maximum number of function arguments." msgstr "Показывает максимально возможное число аргументов функций." -#: utils/misc/guc.c:2548 +#: utils/misc/guc.c:2568 msgid "Shows the maximum number of index keys." msgstr "Показывает максимально возможное число ключей в индексе." -#: utils/misc/guc.c:2559 +#: utils/misc/guc.c:2579 msgid "Shows the maximum identifier length." msgstr "Показывает максимально возможную длину идентификатора." -#: utils/misc/guc.c:2570 +#: utils/misc/guc.c:2590 msgid "Shows the size of a disk block." msgstr "Показывает размер дискового блока." -#: utils/misc/guc.c:2581 +#: utils/misc/guc.c:2601 msgid "Shows the number of pages per disk file." msgstr "Показывает число страниц в одном файле." -#: utils/misc/guc.c:2592 +#: utils/misc/guc.c:2612 msgid "Shows the block size in the write ahead log." msgstr "Показывает размер блока в журнале WAL." -#: utils/misc/guc.c:2603 +#: utils/misc/guc.c:2623 msgid "" "Sets the time to wait before retrying to retrieve WAL after a failed attempt." msgstr "" "Задаёт время задержки перед повторной попыткой обращения к WAL после неудачи." -#: utils/misc/guc.c:2615 +#: utils/misc/guc.c:2635 msgid "Shows the number of pages per write ahead log segment." msgstr "Показывает число страниц в одном сегменте журнала WAL." -#: utils/misc/guc.c:2628 +#: utils/misc/guc.c:2648 msgid "Time to sleep between autovacuum runs." msgstr "Время простоя между запусками автоочистки." -#: utils/misc/guc.c:2638 +#: utils/misc/guc.c:2658 msgid "Minimum number of tuple updates or deletes prior to vacuum." msgstr "Минимальное число изменений или удалений кортежей, вызывающее очистку." -#: utils/misc/guc.c:2647 +#: utils/misc/guc.c:2667 msgid "Minimum number of tuple inserts, updates, or deletes prior to analyze." msgstr "" "Минимальное число добавлений, изменений или удалений кортежей, вызывающее " "анализ." -#: utils/misc/guc.c:2657 +#: utils/misc/guc.c:2677 msgid "" "Age at which to autovacuum a table to prevent transaction ID wraparound." msgstr "" "Возраст, при котором необходима автоочистка таблицы для предотвращения " "наложений ID транзакций." -#: utils/misc/guc.c:2668 +#: utils/misc/guc.c:2688 msgid "" "Multixact age at which to autovacuum a table to prevent multixact wraparound." msgstr "" "Возраст multixact, при котором необходима автоочистка таблицы для " "предотвращения наложений идентификаторов multixact." -#: utils/misc/guc.c:2678 +#: utils/misc/guc.c:2698 msgid "" "Sets the maximum number of simultaneously running autovacuum worker " "processes." @@ -25869,23 +26175,23 @@ msgstr "" "Задаёт предельное число одновременно выполняющихся рабочих процессов " "автоочистки." -#: utils/misc/guc.c:2688 +#: utils/misc/guc.c:2708 msgid "Sets the maximum number of parallel processes per executor node." msgstr "Задаёт максимальное число параллельных процессов на узел исполнителя." -#: utils/misc/guc.c:2698 +#: utils/misc/guc.c:2718 msgid "" "Sets the maximum number of parallel workers than can be active at one time." msgstr "" "Задаёт максимальное число параллельных процессов, которые могут быть активны " "одновременно." -#: utils/misc/guc.c:2708 +#: utils/misc/guc.c:2728 msgid "Sets the maximum memory to be used by each autovacuum worker process." msgstr "" "Задаёт предельный объём памяти для каждого рабочего процесса автоочистки." -#: utils/misc/guc.c:2719 +#: utils/misc/guc.c:2739 msgid "" "Time before a snapshot is too old to read pages changed after the snapshot " "was taken." @@ -25893,33 +26199,33 @@ msgstr "" "Срок, по истечении которого снимок считается слишком старым для получения " "страниц, изменённых после создания снимка." -#: utils/misc/guc.c:2720 +#: utils/misc/guc.c:2740 msgid "A value of -1 disables this feature." msgstr "Значение -1 отключает это поведение." -#: utils/misc/guc.c:2730 +#: utils/misc/guc.c:2750 msgid "Time between issuing TCP keepalives." msgstr "Интервал между TCP-пакетами пульса (keep-alive)." -#: utils/misc/guc.c:2731 utils/misc/guc.c:2742 +#: utils/misc/guc.c:2751 utils/misc/guc.c:2762 msgid "A value of 0 uses the system default." msgstr "При нулевом значении действует системный параметр." -#: utils/misc/guc.c:2741 +#: utils/misc/guc.c:2761 msgid "Time between TCP keepalive retransmits." msgstr "Интервал между повторениями TCP-пакетов пульса (keep-alive)." -#: utils/misc/guc.c:2752 +#: utils/misc/guc.c:2772 msgid "SSL renegotiation is no longer supported; this can only be 0." msgstr "" "Повторное согласование SSL более не поддерживается; единственное допустимое " "значение - 0." -#: utils/misc/guc.c:2763 +#: utils/misc/guc.c:2783 msgid "Maximum number of TCP keepalive retransmits." msgstr "Максимальное число повторений TCP-пакетов пульса (keep-alive)." -#: utils/misc/guc.c:2764 +#: utils/misc/guc.c:2784 msgid "" "This controls the number of consecutive keepalive retransmits that can be " "lost before a connection is considered dead. A value of 0 uses the system " @@ -25929,15 +26235,15 @@ msgstr "" "прежде чем соединение будет считаться пропавшим. При нулевом значении " "действует системный параметр." -#: utils/misc/guc.c:2775 +#: utils/misc/guc.c:2795 msgid "Sets the maximum allowed result for exact search by GIN." msgstr "Ограничивает результат точного поиска с использованием GIN." -#: utils/misc/guc.c:2786 +#: utils/misc/guc.c:2806 msgid "Sets the planner's assumption about the size of the disk cache." msgstr "Подсказывает планировщику примерный размер дискового кеша." -#: utils/misc/guc.c:2787 +#: utils/misc/guc.c:2807 msgid "" "That is, the portion of the kernel's disk cache that will be used for " "PostgreSQL data files. This is measured in disk pages, which are normally 8 " @@ -25946,12 +26252,12 @@ msgstr "" "Подразумевается часть дискового кеша в ядре ОС, которую займут файлы данных " "PostgreSQL. Размер задаётся в дисковых страницах (обычно это 8 КБ)." -#: utils/misc/guc.c:2799 +#: utils/misc/guc.c:2819 msgid "Sets the minimum amount of table data for a parallel scan." msgstr "" "Задаёт минимальный объём данных в таблице для параллельного сканирования." -#: utils/misc/guc.c:2800 +#: utils/misc/guc.c:2820 msgid "" "If the planner estimates that it will read a number of table pages too small " "to reach this limit, a parallel scan will not be considered." @@ -25960,12 +26266,12 @@ msgstr "" "задано этим ограничением, он исключает параллельное сканирование из " "рассмотрения." -#: utils/misc/guc.c:2810 +#: utils/misc/guc.c:2830 msgid "Sets the minimum amount of index data for a parallel scan." msgstr "" "Задаёт минимальный объём данных в индексе для параллельного сканирования." -#: utils/misc/guc.c:2811 +#: utils/misc/guc.c:2831 msgid "" "If the planner estimates that it will read a number of index pages too small " "to reach this limit, a parallel scan will not be considered." @@ -25974,35 +26280,35 @@ msgstr "" "задано этим ограничением, он исключает параллельное сканирование из " "рассмотрения." -#: utils/misc/guc.c:2822 +#: utils/misc/guc.c:2842 msgid "Shows the server version as an integer." msgstr "Показывает версию сервера в виде целого числа." -#: utils/misc/guc.c:2833 +#: utils/misc/guc.c:2853 msgid "Log the use of temporary files larger than this number of kilobytes." msgstr "" "Фиксирует в протоколе превышение временными файлами заданного размера (в КБ)." -#: utils/misc/guc.c:2834 +#: utils/misc/guc.c:2854 msgid "Zero logs all files. The default is -1 (turning this feature off)." msgstr "" "При 0 отмечаются все файлы; при -1 эти сообщения отключаются (по умолчанию)." -#: utils/misc/guc.c:2844 +#: utils/misc/guc.c:2864 msgid "Sets the size reserved for pg_stat_activity.query, in bytes." msgstr "Задаёт размер, резервируемый для pg_stat_activity.query (в байтах)." -#: utils/misc/guc.c:2859 +#: utils/misc/guc.c:2879 msgid "Sets the maximum size of the pending list for GIN index." msgstr "Задаёт максимальный размер списка-очереди для GIN-индекса." -#: utils/misc/guc.c:2879 +#: utils/misc/guc.c:2899 msgid "" "Sets the planner's estimate of the cost of a sequentially fetched disk page." msgstr "" "Задаёт для планировщика ориентир стоимости последовательного чтения страницы." -#: utils/misc/guc.c:2889 +#: utils/misc/guc.c:2909 msgid "" "Sets the planner's estimate of the cost of a nonsequentially fetched disk " "page." @@ -26010,13 +26316,13 @@ msgstr "" "Задаёт для планировщика ориентир стоимости непоследовательного чтения " "страницы." -#: utils/misc/guc.c:2899 +#: utils/misc/guc.c:2919 msgid "Sets the planner's estimate of the cost of processing each tuple (row)." msgstr "" "Задаёт для планировщика ориентир стоимости обработки каждого кортежа " "(строки)." -#: utils/misc/guc.c:2909 +#: utils/misc/guc.c:2929 msgid "" "Sets the planner's estimate of the cost of processing each index entry " "during an index scan." @@ -26024,7 +26330,7 @@ msgstr "" "Задаёт для планировщика ориентир стоимости обработки каждого элемента " "индекса в процессе сканирования индекса." -#: utils/misc/guc.c:2919 +#: utils/misc/guc.c:2939 msgid "" "Sets the planner's estimate of the cost of processing each operator or " "function call." @@ -26032,7 +26338,7 @@ msgstr "" "Задаёт для планировщика ориентир стоимости обработки каждого оператора или " "вызова функции." -#: utils/misc/guc.c:2929 +#: utils/misc/guc.c:2949 msgid "" "Sets the planner's estimate of the cost of passing each tuple (row) from " "worker to master backend." @@ -26040,7 +26346,7 @@ msgstr "" "Задаёт для планировщика ориентир стоимости передачи каждого кортежа (строки) " "от рабочего процесса обслуживающему." -#: utils/misc/guc.c:2939 +#: utils/misc/guc.c:2959 msgid "" "Sets the planner's estimate of the cost of starting up worker processes for " "parallel query." @@ -26048,32 +26354,32 @@ msgstr "" "Задаёт для планировщика ориентир стоимости запуска рабочих процессов для " "параллельного выполнения запроса." -#: utils/misc/guc.c:2950 +#: utils/misc/guc.c:2970 msgid "" "Sets the planner's estimate of the fraction of a cursor's rows that will be " "retrieved." msgstr "" "Задаёт для планировщика ориентир доли требуемых строк курсора в общем числе." -#: utils/misc/guc.c:2961 +#: utils/misc/guc.c:2981 msgid "GEQO: selective pressure within the population." msgstr "GEQO: селективное давление в популяции." -#: utils/misc/guc.c:2971 +#: utils/misc/guc.c:2991 msgid "GEQO: seed for random path selection." msgstr "GEQO: отправное значение для случайного выбора пути." -#: utils/misc/guc.c:2981 +#: utils/misc/guc.c:3001 msgid "Multiple of the average buffer usage to free per round." msgstr "" "Множитель для среднего числа использованных буферов, определяющий число " "буферов, освобождаемых за один подход." -#: utils/misc/guc.c:2991 +#: utils/misc/guc.c:3011 msgid "Sets the seed for random-number generation." msgstr "Задаёт отправное значение для генератора случайных чисел." -#: utils/misc/guc.c:3002 +#: utils/misc/guc.c:3022 msgid "" "Number of tuple updates or deletes prior to vacuum as a fraction of " "reltuples." @@ -26081,7 +26387,7 @@ msgstr "" "Отношение числа обновлений или удалений кортежей к reltuples, определяющее " "потребность в очистке." -#: utils/misc/guc.c:3011 +#: utils/misc/guc.c:3031 msgid "" "Number of tuple inserts, updates, or deletes prior to analyze as a fraction " "of reltuples." @@ -26089,7 +26395,7 @@ msgstr "" "Отношение числа добавлений, обновлений или удалений кортежей к reltuples, " "определяющее потребность в анализе." -#: utils/misc/guc.c:3021 +#: utils/misc/guc.c:3041 msgid "" "Time spent flushing dirty buffers during checkpoint, as fraction of " "checkpoint interval." @@ -26097,53 +26403,53 @@ msgstr "" "Отношение продолжительности сброса \"грязных\" буферов во время контрольной " "точки к интервалу контрольных точек." -#: utils/misc/guc.c:3040 +#: utils/misc/guc.c:3060 msgid "Sets the shell command that will be called to archive a WAL file." msgstr "Задаёт команду оболочки, вызываемую для архивации файла WAL." -#: utils/misc/guc.c:3050 +#: utils/misc/guc.c:3070 msgid "Sets the client's character set encoding." msgstr "Задаёт кодировку символов, используемую клиентом." -#: utils/misc/guc.c:3061 +#: utils/misc/guc.c:3081 msgid "Controls information prefixed to each log line." msgstr "Определяет содержимое префикса каждой строки протокола." -#: utils/misc/guc.c:3062 +#: utils/misc/guc.c:3082 msgid "If blank, no prefix is used." msgstr "При пустом значении префикс также отсутствует." -#: utils/misc/guc.c:3071 +#: utils/misc/guc.c:3091 msgid "Sets the time zone to use in log messages." msgstr "Задаёт часовой пояс для вывода времени в сообщениях протокола." -#: utils/misc/guc.c:3081 +#: utils/misc/guc.c:3101 msgid "Sets the display format for date and time values." msgstr "Устанавливает формат вывода дат и времени." -#: utils/misc/guc.c:3082 +#: utils/misc/guc.c:3102 msgid "Also controls interpretation of ambiguous date inputs." msgstr "Также помогает разбирать неоднозначно заданные вводимые даты." -#: utils/misc/guc.c:3093 +#: utils/misc/guc.c:3113 msgid "Sets the default tablespace to create tables and indexes in." msgstr "" "Задаёт табличное пространство по умолчанию для новых таблиц и индексов." -#: utils/misc/guc.c:3094 +#: utils/misc/guc.c:3114 msgid "An empty string selects the database's default tablespace." msgstr "При пустом значении используется табличное пространство базы данных." -#: utils/misc/guc.c:3104 +#: utils/misc/guc.c:3124 msgid "Sets the tablespace(s) to use for temporary tables and sort files." msgstr "" "Задаёт табличное пространство(а) для временных таблиц и файлов сортировки." -#: utils/misc/guc.c:3115 +#: utils/misc/guc.c:3135 msgid "Sets the path for dynamically loadable modules." msgstr "Задаёт путь для динамически загружаемых модулей." -#: utils/misc/guc.c:3116 +#: utils/misc/guc.c:3136 msgid "" "If a dynamically loadable module needs to be opened and the specified name " "does not have a directory component (i.e., the name does not contain a " @@ -26153,79 +26459,79 @@ msgstr "" "указан путь (нет символа '/'), система будет искать этот файл в заданном " "пути." -#: utils/misc/guc.c:3129 +#: utils/misc/guc.c:3149 msgid "Sets the location of the Kerberos server key file." msgstr "Задаёт размещение файла с ключом Kerberos для данного сервера." -#: utils/misc/guc.c:3140 +#: utils/misc/guc.c:3160 msgid "Sets the Bonjour service name." msgstr "Задаёт название службы Bonjour." -#: utils/misc/guc.c:3152 +#: utils/misc/guc.c:3172 msgid "Shows the collation order locale." msgstr "Показывает правило сортировки." -#: utils/misc/guc.c:3163 +#: utils/misc/guc.c:3183 msgid "Shows the character classification and case conversion locale." msgstr "Показывает правило классификации символов и преобразования регистра." -#: utils/misc/guc.c:3174 +#: utils/misc/guc.c:3194 msgid "Sets the language in which messages are displayed." msgstr "Задаёт язык выводимых сообщений." -#: utils/misc/guc.c:3184 +#: utils/misc/guc.c:3204 msgid "Sets the locale for formatting monetary amounts." msgstr "Задаёт локаль для форматирования денежных сумм." -#: utils/misc/guc.c:3194 +#: utils/misc/guc.c:3214 msgid "Sets the locale for formatting numbers." msgstr "Задаёт локаль для форматирования чисел." -#: utils/misc/guc.c:3204 +#: utils/misc/guc.c:3224 msgid "Sets the locale for formatting date and time values." msgstr "Задаёт локаль для форматирования дат и времени." -#: utils/misc/guc.c:3214 +#: utils/misc/guc.c:3234 msgid "Lists shared libraries to preload into each backend." msgstr "" "Список разделяемых библиотек, заранее загружаемых в каждый обслуживающий " "процесс." -#: utils/misc/guc.c:3225 +#: utils/misc/guc.c:3245 msgid "Lists shared libraries to preload into server." msgstr "Список разделяемых библиотек, заранее загружаемых в память сервера." -#: utils/misc/guc.c:3236 +#: utils/misc/guc.c:3256 msgid "Lists unprivileged shared libraries to preload into each backend." msgstr "" "Список непривилегированных разделяемых библиотек, заранее загружаемых в " "каждый обслуживающий процесс." -#: utils/misc/guc.c:3247 +#: utils/misc/guc.c:3267 msgid "Sets the schema search order for names that are not schema-qualified." msgstr "Задаёт порядок просмотра схемы при поиске неполных имён." -#: utils/misc/guc.c:3259 +#: utils/misc/guc.c:3279 msgid "Sets the server (database) character set encoding." msgstr "Задаёт кодировку символов сервера (баз данных)." -#: utils/misc/guc.c:3271 +#: utils/misc/guc.c:3291 msgid "Shows the server version." msgstr "Показывает версию сервера." -#: utils/misc/guc.c:3283 +#: utils/misc/guc.c:3303 msgid "Sets the current role." msgstr "Задаёт текущую роль." -#: utils/misc/guc.c:3295 +#: utils/misc/guc.c:3315 msgid "Sets the session user name." msgstr "Задаёт имя пользователя в сеансе." -#: utils/misc/guc.c:3306 +#: utils/misc/guc.c:3326 msgid "Sets the destination for server log output." msgstr "Определяет, куда будет выводиться протокол сервера." -#: utils/misc/guc.c:3307 +#: utils/misc/guc.c:3327 msgid "" "Valid values are combinations of \"stderr\", \"syslog\", \"csvlog\", and " "\"eventlog\", depending on the platform." @@ -26233,24 +26539,24 @@ msgstr "" "Значение может включать сочетание слов \"stderr\", \"syslog\", \"csvlog\" и " "\"eventlog\", в зависимости от платформы." -#: utils/misc/guc.c:3318 +#: utils/misc/guc.c:3338 msgid "Sets the destination directory for log files." msgstr "Задаёт целевой каталог для файлов протоколов." -#: utils/misc/guc.c:3319 +#: utils/misc/guc.c:3339 msgid "Can be specified as relative to the data directory or as absolute path." msgstr "" "Путь может быть абсолютным или указываться относительно каталога данных." -#: utils/misc/guc.c:3329 +#: utils/misc/guc.c:3349 msgid "Sets the file name pattern for log files." msgstr "Задаёт шаблон имени для файлов протоколов." -#: utils/misc/guc.c:3340 +#: utils/misc/guc.c:3360 msgid "Sets the program name used to identify PostgreSQL messages in syslog." msgstr "Задаёт имя программы для идентификации сообщений PostgreSQL в syslog." -#: utils/misc/guc.c:3351 +#: utils/misc/guc.c:3371 msgid "" "Sets the application name used to identify PostgreSQL messages in the event " "log." @@ -26258,113 +26564,117 @@ msgstr "" "Задаёт имя приложения для идентификации сообщений PostgreSQL в журнале " "событий." -#: utils/misc/guc.c:3362 +#: utils/misc/guc.c:3382 msgid "Sets the time zone for displaying and interpreting time stamps." msgstr "" "Задаёт часовой пояс для вывода и разбора строкового представления времени." -#: utils/misc/guc.c:3372 +#: utils/misc/guc.c:3392 msgid "Selects a file of time zone abbreviations." msgstr "Выбирает файл с сокращёнными названиями часовых поясов." -#: utils/misc/guc.c:3382 +#: utils/misc/guc.c:3402 msgid "Sets the current transaction's isolation level." msgstr "Задаёт текущий уровень изоляции транзакций." -#: utils/misc/guc.c:3393 +#: utils/misc/guc.c:3413 msgid "Sets the owning group of the Unix-domain socket." -msgstr "Задаёт группу-владельца доменного сокета Unix." +msgstr "Задаёт группу-владельца Unix-сокета." -#: utils/misc/guc.c:3394 +#: utils/misc/guc.c:3414 msgid "" "The owning user of the socket is always the user that starts the server." msgstr "" "Собственно владельцем сокета всегда будет пользователь, запускающий сервер." -#: utils/misc/guc.c:3404 +#: utils/misc/guc.c:3424 msgid "Sets the directories where Unix-domain sockets will be created." -msgstr "Задаёт каталоги, где будут создаваться доменные сокеты Unix." +msgstr "Задаёт каталоги, где будут создаваться Unix-сокеты." -#: utils/misc/guc.c:3419 +#: utils/misc/guc.c:3439 msgid "Sets the host name or IP address(es) to listen to." msgstr "Задаёт имя узла или IP-адрес(а) для привязки." -#: utils/misc/guc.c:3434 +#: utils/misc/guc.c:3454 msgid "Sets the server's data directory." msgstr "Определяет каталог данных сервера." -#: utils/misc/guc.c:3445 +#: utils/misc/guc.c:3465 msgid "Sets the server's main configuration file." msgstr "Определяет основной файл конфигурации сервера." -#: utils/misc/guc.c:3456 +#: utils/misc/guc.c:3476 msgid "Sets the server's \"hba\" configuration file." msgstr "Задаёт путь к файлу конфигурации \"hba\"." -#: utils/misc/guc.c:3467 +#: utils/misc/guc.c:3487 msgid "Sets the server's \"ident\" configuration file." msgstr "Задаёт путь к файлу конфигурации \"ident\"." -#: utils/misc/guc.c:3478 +#: utils/misc/guc.c:3498 msgid "Writes the postmaster PID to the specified file." msgstr "Файл, в который будет записан код процесса postmaster." -#: utils/misc/guc.c:3489 +#: utils/misc/guc.c:3509 msgid "Location of the SSL server certificate file." msgstr "Размещение файла сертификата сервера для SSL." -#: utils/misc/guc.c:3499 +#: utils/misc/guc.c:3519 msgid "Location of the SSL server private key file." msgstr "Размещение файла с закрытым ключом сервера для SSL." -#: utils/misc/guc.c:3509 +#: utils/misc/guc.c:3529 msgid "Location of the SSL certificate authority file." msgstr "Размещение файла центра сертификации для SSL." -#: utils/misc/guc.c:3519 +#: utils/misc/guc.c:3539 msgid "Location of the SSL certificate revocation list file." msgstr "Размещение файла со списком отзыва сертификатов для SSL." -#: utils/misc/guc.c:3529 +#: utils/misc/guc.c:3549 msgid "Writes temporary statistics files to the specified directory." msgstr "Каталог, в который будут записываться временные файлы статистики." -#: utils/misc/guc.c:3540 +#: utils/misc/guc.c:3560 msgid "" "Number of synchronous standbys and list of names of potential synchronous " "ones." msgstr "" "Количество потенциально синхронных резервных серверов и список их имён." -#: utils/misc/guc.c:3551 +#: utils/misc/guc.c:3571 msgid "Sets default text search configuration." msgstr "Задаёт конфигурацию текстового поиска по умолчанию." -#: utils/misc/guc.c:3561 +#: utils/misc/guc.c:3581 msgid "Sets the list of allowed SSL ciphers." msgstr "Задаёт список допустимых алгоритмов шифрования для SSL." -#: utils/misc/guc.c:3576 +#: utils/misc/guc.c:3596 msgid "Sets the curve to use for ECDH." msgstr "Задаёт кривую для ECDH." -#: utils/misc/guc.c:3591 +#: utils/misc/guc.c:3611 +msgid "Location of the SSL DH parameters file." +msgstr "Размещение файла с параметрами SSL DH." + +#: utils/misc/guc.c:3622 msgid "Sets the application name to be reported in statistics and logs." msgstr "" "Задаёт имя приложения, которое будет выводиться в статистике и протоколах." -#: utils/misc/guc.c:3602 +#: utils/misc/guc.c:3633 msgid "Sets the name of the cluster, which is included in the process title." msgstr "Задаёт имя кластера, которое будет добавляться в название процесса." -#: utils/misc/guc.c:3613 +#: utils/misc/guc.c:3644 msgid "" "Sets the WAL resource managers for which WAL consistency checks are done." msgstr "" "Задаёт перечень менеджеров ресурсов WAL, для которых выполняются проверки " "целостности WAL." -#: utils/misc/guc.c:3614 +#: utils/misc/guc.c:3645 msgid "" "Full-page images will be logged for all data blocks and cross-checked " "against the results of WAL replay." @@ -26372,20 +26682,20 @@ msgstr "" "При этом в журнал будут записываться образы полных страниц для всех блоков " "данных для сверки с результатами воспроизведения WAL." -#: utils/misc/guc.c:3633 +#: utils/misc/guc.c:3664 msgid "Sets whether \"\\'\" is allowed in string literals." msgstr "Определяет, можно ли использовать \"\\'\" в текстовых строках." -#: utils/misc/guc.c:3643 +#: utils/misc/guc.c:3674 msgid "Sets the output format for bytea." msgstr "Задаёт формат вывода данных типа bytea." -#: utils/misc/guc.c:3653 +#: utils/misc/guc.c:3684 msgid "Sets the message levels that are sent to the client." msgstr "Ограничивает уровень сообщений, передаваемых клиенту." -#: utils/misc/guc.c:3654 utils/misc/guc.c:3707 utils/misc/guc.c:3718 -#: utils/misc/guc.c:3784 +#: utils/misc/guc.c:3685 utils/misc/guc.c:3738 utils/misc/guc.c:3749 +#: utils/misc/guc.c:3815 msgid "" "Each level includes all the levels that follow it. The later the level, the " "fewer messages are sent." @@ -26393,12 +26703,12 @@ msgstr "" "Каждый уровень включает все последующие. Чем выше уровень, тем меньше " "сообщений." -#: utils/misc/guc.c:3664 +#: utils/misc/guc.c:3695 msgid "Enables the planner to use constraints to optimize queries." msgstr "" "Разрешает планировщику оптимизировать запросы, полагаясь на ограничения." -#: utils/misc/guc.c:3665 +#: utils/misc/guc.c:3696 msgid "" "Table scans will be skipped if their constraints guarantee that no rows " "match the query." @@ -26406,76 +26716,76 @@ msgstr "" "Сканирование таблицы не будет выполняться, если её ограничения гарантируют, " "что запросу не удовлетворяют никакие строки." -#: utils/misc/guc.c:3675 +#: utils/misc/guc.c:3706 msgid "Sets the transaction isolation level of each new transaction." msgstr "Задаёт уровень изоляции транзакций для новых транзакций." -#: utils/misc/guc.c:3685 +#: utils/misc/guc.c:3716 msgid "Sets the display format for interval values." msgstr "Задаёт формат отображения для внутренних значений." -#: utils/misc/guc.c:3696 +#: utils/misc/guc.c:3727 msgid "Sets the verbosity of logged messages." msgstr "Задаёт детализацию протоколируемых сообщений." -#: utils/misc/guc.c:3706 +#: utils/misc/guc.c:3737 msgid "Sets the message levels that are logged." msgstr "Ограничивает уровни протоколируемых сообщений." -#: utils/misc/guc.c:3717 +#: utils/misc/guc.c:3748 msgid "" "Causes all statements generating error at or above this level to be logged." msgstr "" "Включает протоколирование для SQL-операторов, выполненных с ошибкой этого " "или большего уровня." -#: utils/misc/guc.c:3728 +#: utils/misc/guc.c:3759 msgid "Sets the type of statements logged." msgstr "Задаёт тип протоколируемых операторов." -#: utils/misc/guc.c:3738 +#: utils/misc/guc.c:3769 msgid "Sets the syslog \"facility\" to be used when syslog enabled." msgstr "Задаёт получателя сообщений, отправляемых в syslog." -#: utils/misc/guc.c:3753 +#: utils/misc/guc.c:3784 msgid "Sets the session's behavior for triggers and rewrite rules." msgstr "" "Задаёт режим срабатывания триггеров и правил перезаписи для текущего сеанса." -#: utils/misc/guc.c:3763 +#: utils/misc/guc.c:3794 msgid "Sets the current transaction's synchronization level." msgstr "Задаёт уровень синхронизации текущей транзакции." -#: utils/misc/guc.c:3773 +#: utils/misc/guc.c:3804 msgid "Allows archiving of WAL files using archive_command." msgstr "Разрешает архивацию файлов WAL командой archive_command." -#: utils/misc/guc.c:3783 +#: utils/misc/guc.c:3814 msgid "Enables logging of recovery-related debugging information." msgstr "" "Включает протоколирование отладочной информации, связанной с репликацией." -#: utils/misc/guc.c:3799 +#: utils/misc/guc.c:3830 msgid "Collects function-level statistics on database activity." msgstr "Включает сбор статистики активности в БД на уровне функций." -#: utils/misc/guc.c:3809 +#: utils/misc/guc.c:3840 msgid "Set the level of information written to the WAL." msgstr "Задаёт уровень информации, записываемой в WAL." -#: utils/misc/guc.c:3819 +#: utils/misc/guc.c:3850 msgid "Selects the dynamic shared memory implementation used." msgstr "Выбирает используемую реализацию динамической разделяемой памяти." -#: utils/misc/guc.c:3829 +#: utils/misc/guc.c:3860 msgid "Selects the method used for forcing WAL updates to disk." msgstr "Выбирает метод принудительной записи изменений в WAL на диск." -#: utils/misc/guc.c:3839 +#: utils/misc/guc.c:3870 msgid "Sets how binary values are to be encoded in XML." msgstr "Определяет, как должны кодироваться двоичные значения в XML." -#: utils/misc/guc.c:3849 +#: utils/misc/guc.c:3880 msgid "" "Sets whether XML data in implicit parsing and serialization operations is to " "be considered as documents or content fragments." @@ -26483,15 +26793,15 @@ msgstr "" "Определяет, следует ли рассматривать XML-данные в неявных операциях разбора " "и сериализации как документы или как фрагменты содержания." -#: utils/misc/guc.c:3860 +#: utils/misc/guc.c:3891 msgid "Use of huge pages on Linux." msgstr "Включает использование гигантских страниц в Linux." -#: utils/misc/guc.c:3870 +#: utils/misc/guc.c:3901 msgid "Forces use of parallel query facilities." msgstr "Принудительно включает режим параллельного выполнения запросов." -#: utils/misc/guc.c:3871 +#: utils/misc/guc.c:3902 msgid "" "If possible, run query using a parallel worker and with parallel " "restrictions." @@ -26499,11 +26809,11 @@ msgstr "" "Если возможно, запрос выполняется параллельными исполнителями и с " "ограничениями параллельности." -#: utils/misc/guc.c:3880 +#: utils/misc/guc.c:3911 msgid "Encrypt passwords." msgstr "Шифровать пароли." -#: utils/misc/guc.c:3881 +#: utils/misc/guc.c:3912 msgid "" "When a password is specified in CREATE USER or ALTER USER without writing " "either ENCRYPTED or UNENCRYPTED, this parameter determines whether the " @@ -26512,12 +26822,12 @@ msgstr "" "Этот параметр определяет, нужно ли шифровать пароли, заданные в CREATE USER " "или ALTER USER без указания ENCRYPTED или UNENCRYPTED." -#: utils/misc/guc.c:4683 +#: utils/misc/guc.c:4714 #, c-format msgid "%s: could not access directory \"%s\": %s\n" msgstr "%s: ошибка доступа к каталогу \"%s\": %s\n" -#: utils/misc/guc.c:4688 +#: utils/misc/guc.c:4719 #, c-format msgid "" "Run initdb or pg_basebackup to initialize a PostgreSQL data directory.\n" @@ -26525,7 +26835,7 @@ msgstr "" "Запустите initdb или pg_basebackup для инициализации каталога данных " "PostgreSQL.\n" -#: utils/misc/guc.c:4708 +#: utils/misc/guc.c:4739 #, c-format msgid "" "%s does not know where to find the server configuration file.\n" @@ -26536,12 +26846,12 @@ msgstr "" "Вы должны указать его расположение в параметре --config-file или -D, либо " "установить переменную окружения PGDATA.\n" -#: utils/misc/guc.c:4727 +#: utils/misc/guc.c:4758 #, c-format msgid "%s: could not access the server configuration file \"%s\": %s\n" msgstr "%s не может открыть файл конфигурации сервера \"%s\": %s\n" -#: utils/misc/guc.c:4753 +#: utils/misc/guc.c:4784 #, c-format msgid "" "%s does not know where to find the database system data.\n" @@ -26552,7 +26862,7 @@ msgstr "" "Их расположение можно задать как значение \"data_directory\" в файле \"%s\", " "либо передать в параметре -D, либо установить переменную окружения PGDATA.\n" -#: utils/misc/guc.c:4801 +#: utils/misc/guc.c:4832 #, c-format msgid "" "%s does not know where to find the \"hba\" configuration file.\n" @@ -26563,7 +26873,7 @@ msgstr "" "Его расположение можно задать как значение \"hba_file\" в файле \"%s\", либо " "передать в параметре -D, либо установить переменную окружения PGDATA.\n" -#: utils/misc/guc.c:4824 +#: utils/misc/guc.c:4855 #, c-format msgid "" "%s does not know where to find the \"ident\" configuration file.\n" @@ -26574,129 +26884,129 @@ msgstr "" "Его расположение можно задать как значение \"ident_file\" в файле \"%s\", " "либо передать в параметре -D, либо установить переменную окружения PGDATA.\n" -#: utils/misc/guc.c:5498 utils/misc/guc.c:5545 +#: utils/misc/guc.c:5529 utils/misc/guc.c:5576 msgid "Value exceeds integer range." msgstr "Значение выходит за рамки целых чисел." -#: utils/misc/guc.c:5768 +#: utils/misc/guc.c:5799 #, c-format msgid "parameter \"%s\" requires a numeric value" msgstr "параметр \"%s\" требует числовое значение" -#: utils/misc/guc.c:5777 +#: utils/misc/guc.c:5808 #, c-format msgid "%g is outside the valid range for parameter \"%s\" (%g .. %g)" msgstr "%g вне диапазона, допустимого для параметра \"%s\" (%g .. %g)" -#: utils/misc/guc.c:5930 utils/misc/guc.c:7276 +#: utils/misc/guc.c:5961 utils/misc/guc.c:7307 #, c-format msgid "cannot set parameters during a parallel operation" msgstr "устанавливать параметры во время параллельных операций нельзя" -#: utils/misc/guc.c:5937 utils/misc/guc.c:6688 utils/misc/guc.c:6741 -#: utils/misc/guc.c:7104 utils/misc/guc.c:7863 utils/misc/guc.c:8031 -#: utils/misc/guc.c:9709 +#: utils/misc/guc.c:5968 utils/misc/guc.c:6719 utils/misc/guc.c:6772 +#: utils/misc/guc.c:7135 utils/misc/guc.c:7894 utils/misc/guc.c:8062 +#: utils/misc/guc.c:9731 #, c-format msgid "unrecognized configuration parameter \"%s\"" msgstr "нераспознанный параметр конфигурации: \"%s\"" -#: utils/misc/guc.c:5952 utils/misc/guc.c:7116 +#: utils/misc/guc.c:5983 utils/misc/guc.c:7147 #, c-format msgid "parameter \"%s\" cannot be changed" msgstr "параметр \"%s\" нельзя изменить" -#: utils/misc/guc.c:5975 utils/misc/guc.c:6168 utils/misc/guc.c:6258 -#: utils/misc/guc.c:6348 utils/misc/guc.c:6456 utils/misc/guc.c:6551 -#: guc-file.l:350 +#: utils/misc/guc.c:6006 utils/misc/guc.c:6199 utils/misc/guc.c:6289 +#: utils/misc/guc.c:6379 utils/misc/guc.c:6487 utils/misc/guc.c:6582 +#: guc-file.l:351 #, c-format msgid "parameter \"%s\" cannot be changed without restarting the server" msgstr "параметр \"%s\" изменяется только при перезапуске сервера" -#: utils/misc/guc.c:5985 +#: utils/misc/guc.c:6016 #, c-format msgid "parameter \"%s\" cannot be changed now" msgstr "параметр \"%s\" нельзя изменить сейчас" -#: utils/misc/guc.c:6003 utils/misc/guc.c:6049 utils/misc/guc.c:9725 +#: utils/misc/guc.c:6034 utils/misc/guc.c:6080 utils/misc/guc.c:9747 #, c-format msgid "permission denied to set parameter \"%s\"" msgstr "нет прав для изменения параметра \"%s\"" -#: utils/misc/guc.c:6039 +#: utils/misc/guc.c:6070 #, c-format msgid "parameter \"%s\" cannot be set after connection start" msgstr "параметр \"%s\" нельзя задать после установления соединения" -#: utils/misc/guc.c:6087 +#: utils/misc/guc.c:6118 #, c-format msgid "cannot set parameter \"%s\" within security-definer function" msgstr "" "параметр \"%s\" нельзя задать в функции с контекстом безопасности " "определившего" -#: utils/misc/guc.c:6696 utils/misc/guc.c:6746 utils/misc/guc.c:8038 +#: utils/misc/guc.c:6727 utils/misc/guc.c:6777 utils/misc/guc.c:8069 #, c-format msgid "must be superuser or a member of pg_read_all_settings to examine \"%s\"" msgstr "" "прочитать \"%s\" может только суперпользователь или член роли " "pg_read_all_settings" -#: utils/misc/guc.c:6813 +#: utils/misc/guc.c:6844 #, c-format msgid "SET %s takes only one argument" msgstr "SET %s принимает только один аргумент" -#: utils/misc/guc.c:7064 +#: utils/misc/guc.c:7095 #, c-format msgid "must be superuser to execute ALTER SYSTEM command" msgstr "выполнить команду ALTER SYSTEM может только суперпользователь" -#: utils/misc/guc.c:7149 +#: utils/misc/guc.c:7180 #, c-format msgid "parameter value for ALTER SYSTEM must not contain a newline" msgstr "значение параметра для ALTER SYSTEM не должно быть многострочным" -#: utils/misc/guc.c:7194 +#: utils/misc/guc.c:7225 #, c-format msgid "could not parse contents of file \"%s\"" msgstr "не удалось разобрать содержимое файла \"%s\"" -#: utils/misc/guc.c:7352 +#: utils/misc/guc.c:7383 #, c-format msgid "SET LOCAL TRANSACTION SNAPSHOT is not implemented" msgstr "SET LOCAL TRANSACTION SNAPSHOT не реализовано" -#: utils/misc/guc.c:7436 +#: utils/misc/guc.c:7467 #, c-format msgid "SET requires parameter name" msgstr "SET требует имя параметра" -#: utils/misc/guc.c:7560 +#: utils/misc/guc.c:7591 #, c-format msgid "attempt to redefine parameter \"%s\"" msgstr "попытка переопределить параметр \"%s\"" -#: utils/misc/guc.c:9342 +#: utils/misc/guc.c:9364 #, c-format msgid "parameter \"%s\" could not be set" msgstr "параметр \"%s\" нельзя установить" -#: utils/misc/guc.c:9429 +#: utils/misc/guc.c:9451 #, c-format msgid "could not parse setting for parameter \"%s\"" msgstr "не удалось разобрать значение параметра \"%s\"" -#: utils/misc/guc.c:9787 utils/misc/guc.c:9821 +#: utils/misc/guc.c:9809 utils/misc/guc.c:9843 #, c-format msgid "invalid value for parameter \"%s\": %d" msgstr "неверное значение параметра \"%s\": %d" -#: utils/misc/guc.c:9855 +#: utils/misc/guc.c:9877 #, c-format msgid "invalid value for parameter \"%s\": %g" msgstr "неверное значение параметра \"%s\": %g" -#: utils/misc/guc.c:10125 +#: utils/misc/guc.c:10147 #, c-format msgid "" "\"temp_buffers\" cannot be changed after any temporary tables have been " @@ -26705,23 +27015,23 @@ msgstr "" "параметр \"temp_buffers\" нельзя изменить после обращения к временным " "таблицам в текущем сеансе." -#: utils/misc/guc.c:10137 +#: utils/misc/guc.c:10159 #, c-format msgid "Bonjour is not supported by this build" msgstr "Bonjour не поддерживается в данной сборке" -#: utils/misc/guc.c:10150 +#: utils/misc/guc.c:10172 #, c-format msgid "SSL is not supported by this build" msgstr "SSL не поддерживается в данной сборке" -#: utils/misc/guc.c:10162 +#: utils/misc/guc.c:10184 #, c-format msgid "Cannot enable parameter when \"log_statement_stats\" is true." msgstr "" "Этот параметр нельзя включить, когда \"log_statement_stats\" равен true." -#: utils/misc/guc.c:10174 +#: utils/misc/guc.c:10196 #, c-format msgid "" "Cannot enable \"log_statement_stats\" when \"log_parser_stats\", " @@ -26750,6 +27060,12 @@ msgstr "" "вычисленная контрольная сумма (CRC) не соответствует значению, сохранённому " "в файле" +# well-spelled: пользов +#: utils/misc/pg_rusage.c:64 +#, c-format +msgid "CPU: user: %d.%02d s, system: %d.%02d s, elapsed: %d.%02d s" +msgstr "CPU: пользов.: %d.%02d с, система: %d.%02d с, прошло: %d.%02d с" + #: utils/misc/rls.c:128 #, c-format msgid "query would be affected by row-level security policy for table \"%s\"" @@ -26857,21 +27173,16 @@ msgstr "" msgid "Failed while creating memory context \"%s\"." msgstr "Ошибка при создании контекста памяти \"%s\"." -#: utils/mmgr/dsa.c:518 +#: utils/mmgr/dsa.c:518 utils/mmgr/dsa.c:1323 #, c-format -msgid "could not attach to dsa_handle" -msgstr "не удалось подключиться к dsa" +msgid "could not attach to dynamic shared area" +msgstr "не удалось подключиться к динамической разделяемой области" #: utils/mmgr/dsa.c:714 utils/mmgr/dsa.c:796 #, c-format msgid "Failed on DSA request of size %zu." msgstr "Ошибка при запросе памяти DSA (%zu Б)." -#: utils/mmgr/dsa.c:1322 -#, c-format -msgid "could not attach to dsa_area" -msgstr "не удалось подключиться к области dsa" - #: utils/mmgr/mcxt.c:726 utils/mmgr/mcxt.c:761 utils/mmgr/mcxt.c:798 #: utils/mmgr/mcxt.c:835 utils/mmgr/mcxt.c:869 utils/mmgr/mcxt.c:898 #: utils/mmgr/mcxt.c:932 utils/mmgr/mcxt.c:983 utils/mmgr/mcxt.c:1017 @@ -26910,22 +27221,22 @@ msgstr "нельзя выполнить PREPARE для транзакции, с msgid "could not read block %ld of temporary file: %m" msgstr "не удалось считать блок %ld временного файла: %m" -#: utils/sort/tuplesort.c:3066 +#: utils/sort/tuplesort.c:3072 #, c-format msgid "cannot have more than %d runs for an external sort" msgstr "число потоков данных для внешней сортировки не может превышать %d" -#: utils/sort/tuplesort.c:4135 +#: utils/sort/tuplesort.c:4146 #, c-format msgid "could not create unique index \"%s\"" msgstr "создать уникальный индекс \"%s\" не удалось" -#: utils/sort/tuplesort.c:4137 +#: utils/sort/tuplesort.c:4148 #, c-format msgid "Key %s is duplicated." msgstr "Ключ %s дублируется." -#: utils/sort/tuplesort.c:4138 +#: utils/sort/tuplesort.c:4149 #, c-format msgid "Duplicate keys exist." msgstr "Данные содержат дублирующиеся ключи." @@ -26951,31 +27262,32 @@ msgstr "не удалось прочитать временный файл ис msgid "could not write to tuplestore temporary file: %m" msgstr "не удалось записать во временный файл источника кортежей: %m" -#: utils/time/snapmgr.c:618 +#: utils/time/snapmgr.c:622 #, c-format msgid "The source transaction is not running anymore." msgstr "Исходная транзакция уже не выполняется." -#: utils/time/snapmgr.c:1190 +#: utils/time/snapmgr.c:1200 #, c-format msgid "cannot export a snapshot from a subtransaction" msgstr "экспортировать снимок из вложенной транзакции нельзя" -#: utils/time/snapmgr.c:1339 utils/time/snapmgr.c:1344 -#: utils/time/snapmgr.c:1349 utils/time/snapmgr.c:1364 -#: utils/time/snapmgr.c:1369 utils/time/snapmgr.c:1374 -#: utils/time/snapmgr.c:1473 utils/time/snapmgr.c:1489 -#: utils/time/snapmgr.c:1514 +#: utils/time/snapmgr.c:1359 utils/time/snapmgr.c:1364 +#: utils/time/snapmgr.c:1369 utils/time/snapmgr.c:1384 +#: utils/time/snapmgr.c:1389 utils/time/snapmgr.c:1394 +#: utils/time/snapmgr.c:1409 utils/time/snapmgr.c:1414 +#: utils/time/snapmgr.c:1419 utils/time/snapmgr.c:1519 +#: utils/time/snapmgr.c:1535 utils/time/snapmgr.c:1560 #, c-format msgid "invalid snapshot data in file \"%s\"" msgstr "неверные данные снимка в файле \"%s\"" -#: utils/time/snapmgr.c:1411 +#: utils/time/snapmgr.c:1456 #, c-format msgid "SET TRANSACTION SNAPSHOT must be called before any query" msgstr "команда SET TRANSACTION SNAPSHOT должна выполняться до запросов" -#: utils/time/snapmgr.c:1420 +#: utils/time/snapmgr.c:1465 #, c-format msgid "" "a snapshot-importing transaction must have isolation level SERIALIZABLE or " @@ -26984,12 +27296,12 @@ msgstr "" "транзакция, импортирующая снимок, должна иметь уровень изоляции SERIALIZABLE " "или REPEATABLE READ" -#: utils/time/snapmgr.c:1429 utils/time/snapmgr.c:1438 +#: utils/time/snapmgr.c:1474 utils/time/snapmgr.c:1483 #, c-format msgid "invalid snapshot identifier: \"%s\"" msgstr "неверный идентификатор снимка: \"%s\"" -#: utils/time/snapmgr.c:1527 +#: utils/time/snapmgr.c:1573 #, c-format msgid "" "a serializable transaction cannot import a snapshot from a non-serializable " @@ -26997,7 +27309,7 @@ msgid "" msgstr "" "сериализуемая транзакция не может импортировать снимок из не сериализуемой" -#: utils/time/snapmgr.c:1531 +#: utils/time/snapmgr.c:1577 #, c-format msgid "" "a non-read-only serializable transaction cannot import a snapshot from a " @@ -27006,253 +27318,264 @@ msgstr "" "сериализуемая транзакция в режиме \"чтение-запись\" не может импортировать " "снимок из транзакции в режиме \"только чтение\"" -#: utils/time/snapmgr.c:1546 +#: utils/time/snapmgr.c:1592 #, c-format msgid "cannot import a snapshot from a different database" msgstr "нельзя импортировать снимок из другой базы данных" -#: gram.y:1062 +#: gram.y:1002 +#, c-format +msgid "UNENCRYPTED PASSWORD is no longer supported" +msgstr "вариант UNENCRYPTED PASSWORD более не поддерживается" + +#: gram.y:1003 +#, c-format +msgid "Remove UNENCRYPTED to store the password in encrypted form instead." +msgstr "" +"Удалите слово UNENCRYPTED, чтобы сохранить пароль в зашифрованном виде." + +#: gram.y:1065 #, c-format msgid "unrecognized role option \"%s\"" msgstr "нераспознанный параметр роли \"%s\"" -#: gram.y:1336 gram.y:1351 +#: gram.y:1312 gram.y:1327 #, c-format msgid "CREATE SCHEMA IF NOT EXISTS cannot include schema elements" msgstr "CREATE SCHEMA IF NOT EXISTS не может включать элементы схемы" -#: gram.y:1496 +#: gram.y:1472 #, c-format msgid "current database cannot be changed" msgstr "сменить текущую базу данных нельзя" -#: gram.y:1620 +#: gram.y:1596 #, c-format msgid "time zone interval must be HOUR or HOUR TO MINUTE" msgstr "" "интервал, задающий часовой пояс, должен иметь точность HOUR или HOUR TO " "MINUTE" -#: gram.y:2771 gram.y:2800 +#: gram.y:2612 +#, c-format +msgid "sequence option \"%s\" not supported here" +msgstr "параметр последовательности \"%s\" здесь не поддерживается" + +#: gram.y:2835 gram.y:2864 #, c-format msgid "STDIN/STDOUT not allowed with PROGRAM" msgstr "указания STDIN/STDOUT несовместимы с PROGRAM" -#: gram.y:3110 gram.y:3117 gram.y:10994 gram.y:11002 +#: gram.y:3174 gram.y:3181 gram.y:11072 gram.y:11080 #, c-format msgid "GLOBAL is deprecated in temporary table creation" msgstr "указание GLOBAL при создании временных таблиц устарело" -#: gram.y:5037 +#: gram.y:5118 #, c-format msgid "unrecognized row security option \"%s\"" msgstr "нераспознанный вариант политики безопасности строк \"%s\"" -#: gram.y:5038 +#: gram.y:5119 #, c-format msgid "Only PERMISSIVE or RESTRICTIVE policies are supported currently." msgstr "" "В настоящее время поддерживаются только политики PERMISSIVE и RESTRICTIVE." -#: gram.y:5146 +#: gram.y:5227 msgid "duplicate trigger events specified" msgstr "события триггера повторяются" -#: gram.y:5289 +#: gram.y:5370 #, c-format msgid "conflicting constraint properties" msgstr "противоречащие характеристики ограничения" -#: gram.y:5395 +#: gram.y:5476 #, c-format msgid "CREATE ASSERTION is not yet implemented" msgstr "оператор CREATE ASSERTION ещё не реализован" -#: gram.y:5410 +#: gram.y:5491 #, c-format msgid "DROP ASSERTION is not yet implemented" msgstr "оператор DROP ASSERTION ещё не реализован" -#: gram.y:5794 +#: gram.y:5871 #, c-format msgid "RECHECK is no longer required" msgstr "RECHECK более не требуется" -#: gram.y:5795 +#: gram.y:5872 #, c-format msgid "Update your data type." msgstr "Обновите тип данных." -#: gram.y:7438 +#: gram.y:7515 #, c-format msgid "aggregates cannot have output arguments" msgstr "у агрегатных функций не может быть выходных аргументов" -#: gram.y:9263 -#, c-format -msgid "unrecognized option \"%s\"" -msgstr "нераспознанный параметр \"%s\"" - -#: gram.y:9588 gram.y:9606 +#: gram.y:9647 gram.y:9665 #, c-format msgid "WITH CHECK OPTION not supported on recursive views" msgstr "" "предложение WITH CHECK OPTION не поддерживается для рекурсивных представлений" -#: gram.y:10139 +#: gram.y:10198 #, c-format msgid "unrecognized VACUUM option \"%s\"" msgstr "нераспознанный параметр VACUUM: \"%s\"" -#: gram.y:11102 +#: gram.y:11180 #, c-format msgid "LIMIT #,# syntax is not supported" msgstr "синтаксис LIMIT #,# не поддерживается" -#: gram.y:11103 +#: gram.y:11181 #, c-format msgid "Use separate LIMIT and OFFSET clauses." msgstr "Используйте отдельные предложения LIMIT и OFFSET." -#: gram.y:11384 gram.y:11409 +#: gram.y:11462 gram.y:11487 #, c-format msgid "VALUES in FROM must have an alias" msgstr "список VALUES во FROM должен иметь псевдоним" -#: gram.y:11385 gram.y:11410 +#: gram.y:11463 gram.y:11488 #, c-format msgid "For example, FROM (VALUES ...) [AS] foo." msgstr "Например, FROM (VALUES ...) [AS] foo." -#: gram.y:11390 gram.y:11415 +#: gram.y:11468 gram.y:11493 #, c-format msgid "subquery in FROM must have an alias" msgstr "подзапрос во FROM должен иметь псевдоним" -#: gram.y:11391 gram.y:11416 +#: gram.y:11469 gram.y:11494 #, c-format msgid "For example, FROM (SELECT ...) [AS] foo." msgstr "Например, FROM (SELECT ...) [AS] foo." -#: gram.y:11869 +#: gram.y:11948 #, c-format msgid "only one DEFAULT value is allowed" msgstr "допускается только одно значение DEFAULT" -#: gram.y:11878 +#: gram.y:11957 #, c-format msgid "only one PATH value per column is allowed" msgstr "для столбца допускается только одно значение PATH" -#: gram.y:11887 +#: gram.y:11966 #, c-format msgid "conflicting or redundant NULL / NOT NULL declarations for column \"%s\"" msgstr "" "конфликтующие или избыточные объявления NULL/NOT NULL для столбца \"%s\"" -#: gram.y:11896 +#: gram.y:11975 #, c-format msgid "unrecognized column option \"%s\"" msgstr "нераспознанный параметр столбца \"%s\"" -#: gram.y:12150 +#: gram.y:12229 #, c-format msgid "precision for type float must be at least 1 bit" msgstr "тип float должен иметь точность минимум 1 бит" -#: gram.y:12159 +#: gram.y:12238 #, c-format msgid "precision for type float must be less than 54 bits" msgstr "тип float должен иметь точность меньше 54 бит" -#: gram.y:12650 +#: gram.y:12729 #, c-format msgid "wrong number of parameters on left side of OVERLAPS expression" msgstr "неверное число параметров в левой части выражения OVERLAPS" -#: gram.y:12655 +#: gram.y:12734 #, c-format msgid "wrong number of parameters on right side of OVERLAPS expression" msgstr "неверное число параметров в правой части выражения OVERLAPS" -#: gram.y:12830 +#: gram.y:12909 #, c-format msgid "UNIQUE predicate is not yet implemented" msgstr "предикат UNIQUE ещё не реализован" -#: gram.y:13177 +#: gram.y:13256 #, c-format msgid "cannot use multiple ORDER BY clauses with WITHIN GROUP" msgstr "ORDER BY с WITHIN GROUP можно указать только один раз" -#: gram.y:13182 +#: gram.y:13261 #, c-format msgid "cannot use DISTINCT with WITHIN GROUP" msgstr "DISTINCT нельзя использовать с WITHIN GROUP" -#: gram.y:13187 +#: gram.y:13266 #, c-format msgid "cannot use VARIADIC with WITHIN GROUP" msgstr "VARIADIC нельзя использовать с WITHIN GROUP" -#: gram.y:13613 +#: gram.y:13692 #, c-format msgid "RANGE PRECEDING is only supported with UNBOUNDED" msgstr "RANGE PRECEDING поддерживается только с UNBOUNDED" -#: gram.y:13619 +#: gram.y:13698 #, c-format msgid "RANGE FOLLOWING is only supported with UNBOUNDED" msgstr "RANGE FOLLOWING поддерживается только с UNBOUNDED" -#: gram.y:13646 gram.y:13669 +#: gram.y:13725 gram.y:13748 #, c-format msgid "frame start cannot be UNBOUNDED FOLLOWING" msgstr "началом рамки не может быть UNBOUNDED FOLLOWING" -#: gram.y:13651 +#: gram.y:13730 #, c-format msgid "frame starting from following row cannot end with current row" msgstr "" "рамка, начинающаяся со следующей строки, не может заканчиваться текущей" -#: gram.y:13674 +#: gram.y:13753 #, c-format msgid "frame end cannot be UNBOUNDED PRECEDING" msgstr "концом рамки не может быть UNBOUNDED PRECEDING" -#: gram.y:13680 +#: gram.y:13759 #, c-format msgid "frame starting from current row cannot have preceding rows" msgstr "" "рамка, начинающаяся с текущей строки, не может иметь предшествующих строк" -#: gram.y:13687 +#: gram.y:13766 #, c-format msgid "frame starting from following row cannot have preceding rows" msgstr "" "рамка, начинающаяся со следующей строки, не может иметь предшествующих строк" -#: gram.y:14322 +#: gram.y:14401 #, c-format msgid "type modifier cannot have parameter name" msgstr "параметр функции-модификатора типа должен быть безымянным" -#: gram.y:14328 +#: gram.y:14407 #, c-format msgid "type modifier cannot have ORDER BY" msgstr "модификатор типа не может включать ORDER BY" -#: gram.y:14392 gram.y:14398 +#: gram.y:14471 gram.y:14477 #, c-format msgid "%s cannot be used as a role name here" msgstr "%s нельзя использовать здесь как имя роли" -#: gram.y:15060 gram.y:15249 +#: gram.y:15139 gram.y:15328 msgid "improper use of \"*\"" msgstr "недопустимое использование \"*\"" -#: gram.y:15313 +#: gram.y:15392 #, c-format msgid "" "an ordered-set aggregate with a VARIADIC direct argument must have one " @@ -27261,77 +27584,77 @@ msgstr "" "сортирующая агрегатная функция с непосредственным аргументом VARIADIC должна " "иметь один агрегатный аргумент VARIADIC того же типа данных" -#: gram.y:15350 +#: gram.y:15429 #, c-format msgid "multiple ORDER BY clauses not allowed" msgstr "ORDER BY можно указать только один раз" -#: gram.y:15361 +#: gram.y:15440 #, c-format msgid "multiple OFFSET clauses not allowed" msgstr "OFFSET можно указать только один раз" -#: gram.y:15370 +#: gram.y:15449 #, c-format msgid "multiple LIMIT clauses not allowed" msgstr "LIMIT можно указать только один раз" -#: gram.y:15379 +#: gram.y:15458 #, c-format msgid "multiple WITH clauses not allowed" msgstr "WITH можно указать только один раз" -#: gram.y:15583 +#: gram.y:15662 #, c-format msgid "OUT and INOUT arguments aren't allowed in TABLE functions" msgstr "в табличных функциях не может быть аргументов OUT и INOUT" -#: gram.y:15684 +#: gram.y:15763 #, c-format msgid "multiple COLLATE clauses not allowed" msgstr "COLLATE можно указать только один раз" #. translator: %s is CHECK, UNIQUE, or similar -#: gram.y:15722 gram.y:15735 +#: gram.y:15801 gram.y:15814 #, c-format msgid "%s constraints cannot be marked DEFERRABLE" msgstr "ограничения %s не могут иметь характеристики DEFERRABLE" #. translator: %s is CHECK, UNIQUE, or similar -#: gram.y:15748 +#: gram.y:15827 #, c-format msgid "%s constraints cannot be marked NOT VALID" msgstr "ограничения %s не могут иметь характеристики NOT VALID" #. translator: %s is CHECK, UNIQUE, or similar -#: gram.y:15761 +#: gram.y:15840 #, c-format msgid "%s constraints cannot be marked NO INHERIT" msgstr "ограничения %s не могут иметь характеристики NO INHERIT" -#: guc-file.l:313 +#: guc-file.l:314 #, c-format msgid "unrecognized configuration parameter \"%s\" in file \"%s\" line %u" msgstr "нераспознанный параметр конфигурации \"%s\" в файле \"%s\", строке %u" -#: guc-file.l:386 +#: guc-file.l:387 #, c-format msgid "parameter \"%s\" removed from configuration file, reset to default" msgstr "" "параметр \"%s\" удалён из файла конфигурации, он принимает значение по " "умолчанию" -#: guc-file.l:452 +#: guc-file.l:453 #, c-format msgid "parameter \"%s\" changed to \"%s\"" msgstr "параметр \"%s\" принял значение \"%s\"" -#: guc-file.l:494 +#: guc-file.l:495 #, c-format msgid "configuration file \"%s\" contains errors" msgstr "файл конфигурации \"%s\" содержит ошибки" -#: guc-file.l:499 +#: guc-file.l:500 #, c-format msgid "" "configuration file \"%s\" contains errors; unaffected changes were applied" @@ -27339,55 +27662,55 @@ msgstr "" "файл конфигурации \"%s\" содержит ошибки; были применены не зависимые " "изменения" -#: guc-file.l:504 +#: guc-file.l:505 #, c-format msgid "configuration file \"%s\" contains errors; no changes were applied" msgstr "файл конфигурации \"%s\" содержит ошибки; изменения не были применены" -#: guc-file.l:577 +#: guc-file.l:578 #, c-format msgid "" "could not open configuration file \"%s\": maximum nesting depth exceeded" msgstr "" "открыть файл конфигурации \"%s\" не удалось: превышен предел вложенности" -#: guc-file.l:604 +#: guc-file.l:605 #, c-format msgid "skipping missing configuration file \"%s\"" msgstr "отсутствующий файл конфигурации \"%s\" пропускается" -#: guc-file.l:858 +#: guc-file.l:859 #, c-format msgid "syntax error in file \"%s\" line %u, near end of line" msgstr "ошибка синтаксиса в файле \"%s\", в конце строки %u" -#: guc-file.l:868 +#: guc-file.l:869 #, c-format msgid "syntax error in file \"%s\" line %u, near token \"%s\"" msgstr "ошибка синтаксиса в файле \"%s\", в строке %u, рядом с \"%s\"" -#: guc-file.l:888 +#: guc-file.l:889 #, c-format msgid "too many syntax errors found, abandoning file \"%s\"" msgstr "" "обнаружено слишком много синтаксических ошибок, обработка файла \"%s\" " "прекращается" -#: guc-file.l:940 +#: guc-file.l:941 #, c-format msgid "could not open configuration directory \"%s\": %m" msgstr "открыть каталог конфигурации \"%s\" не удалось: %m" -#: repl_gram.y:320 repl_gram.y:352 +#: repl_gram.y:330 repl_gram.y:362 #, c-format msgid "invalid timeline %u" msgstr "неверная линия времени %u" -#: repl_scanner.l:125 +#: repl_scanner.l:126 msgid "invalid streaming start location" msgstr "неверная позиция начала потока" -#: repl_scanner.l:176 scan.l:670 +#: repl_scanner.l:177 scan.l:670 msgid "unterminated quoted string" msgstr "незавершённая строка в кавычках" @@ -27421,8 +27744,8 @@ msgstr "" msgid "invalid Unicode escape character" msgstr "неверный символ спецкода Unicode" -#: scan.l:605 scan.l:613 scan.l:621 scan.l:622 scan.l:623 scan.l:1337 -#: scan.l:1364 scan.l:1368 scan.l:1406 scan.l:1410 scan.l:1432 scan.l:1442 +#: scan.l:605 scan.l:613 scan.l:621 scan.l:622 scan.l:623 scan.l:1338 +#: scan.l:1365 scan.l:1369 scan.l:1407 scan.l:1411 scan.l:1433 scan.l:1443 msgid "invalid Unicode surrogate pair" msgstr "неверная суррогатная пара Unicode" @@ -27457,7 +27780,7 @@ msgstr "незавершённая спецстрока с $" msgid "zero-length delimited identifier" msgstr "пустой идентификатор в кавычках" -#: scan.l:793 syncrep_scanner.l:87 +#: scan.l:793 syncrep_scanner.l:89 msgid "unterminated quoted identifier" msgstr "незавершённый идентификатор в кавычках" @@ -27466,18 +27789,18 @@ msgid "operator too long" msgstr "слишком длинный оператор" #. translator: %s is typically the translation of "syntax error" -#: scan.l:1077 +#: scan.l:1078 #, c-format msgid "%s at end of input" msgstr "%s в конце" #. translator: first %s is typically the translation of "syntax error" -#: scan.l:1085 +#: scan.l:1086 #, c-format msgid "%s at or near \"%s\"" msgstr "%s (примерное положение: \"%s\")" -#: scan.l:1251 scan.l:1283 +#: scan.l:1252 scan.l:1284 msgid "" "Unicode escape values cannot be used for code point values above 007F when " "the server encoding is not UTF8" @@ -27485,16 +27808,16 @@ msgstr "" "Спецкоды Unicode для значений выше 007F можно использовать только с " "серверной кодировкой UTF8" -#: scan.l:1279 scan.l:1424 +#: scan.l:1280 scan.l:1425 msgid "invalid Unicode escape value" msgstr "неверное значение спецкода Unicode" -#: scan.l:1488 +#: scan.l:1489 #, c-format msgid "nonstandard use of \\' in a string literal" msgstr "нестандартное применение \\' в строке" -#: scan.l:1489 +#: scan.l:1490 #, c-format msgid "" "Use '' to write quotes in strings, or use the escape string syntax (E'...')." @@ -27502,27 +27825,403 @@ msgstr "" "Записывайте апостроф в строках в виде '' или используйте синтаксис спецстрок " "(E'...')." -#: scan.l:1498 +#: scan.l:1499 #, c-format msgid "nonstandard use of \\\\ in a string literal" msgstr "нестандартное применение \\\\ в строке" -#: scan.l:1499 +#: scan.l:1500 #, c-format msgid "Use the escape string syntax for backslashes, e.g., E'\\\\'." msgstr "" "Используйте для записи обратных слэшей синтаксис спецстрок, например E'\\\\'." -#: scan.l:1513 +#: scan.l:1514 #, c-format msgid "nonstandard use of escape in a string literal" msgstr "нестандартное использование спецсимвола в строке" -#: scan.l:1514 +#: scan.l:1515 #, c-format msgid "Use the escape string syntax for escapes, e.g., E'\\r\\n'." msgstr "Используйте для записи спецсимволов синтаксис спецстрок E'\\r\\n'." +#~ msgid "invalid number of arguments: object must be matched key value pairs" +#~ msgstr "" +#~ "неверное число аргументов: объект должен составляться из пар ключ-значение" + +#~ msgid "unsafe use of new value \"%s\" of enum type %s" +#~ msgstr "" +#~ "небезопасное использование нового значения \"%s\" типа-перечисления %s" + +#~ msgid "New enum values must be committed before they can be used." +#~ msgstr "" +#~ "Новые значения перечисления должны быть зафиксированы перед " +#~ "использованием." + +#~ msgid "invalid publish list" +#~ msgstr "неверный список публикации" + +#~ msgid "column \"%s\" referenced in statistics does not exist" +#~ msgstr "столбец \"%s\", указанный в статистике, не существует" + +#~ msgid "not connected to database" +#~ msgstr "нет подключения к базе данных" + +#~ msgid "invalid input syntax for %s: \"%s\"" +#~ msgstr "неверный синтаксис для %s: \"%s\"" + +#~ msgid "transaction ID " +#~ msgstr "идентификатор транзакции " + +#~ msgid "in progress" +#~ msgstr "выполняется" + +#~ msgid "committed" +#~ msgstr "зафиксирована" + +#~ msgid "aborted" +#~ msgstr "прервана" + +#~ msgid "could not get keyword values for locale \"%s\": %s" +#~ msgstr "не удалось получить значения ключевых слов для локали \"%s\": %s" + +#~ msgid "index row size %lu exceeds maximum %lu for index \"%s\"" +#~ msgstr "" +#~ "размер строки индекса (%lu) больше предельного размера (%lu) (индекс \"%s" +#~ "\")" + +#~ msgid "" +#~ "brin operator family \"%s\" contains function %s with invalid support " +#~ "number %d" +#~ msgstr "" +#~ "семейство операторов brin \"%s\" содержит функцию %s с неправильным " +#~ "опорным номером %d" + +#~ msgid "" +#~ "brin operator family \"%s\" contains function %s with wrong signature for " +#~ "support number %d" +#~ msgstr "" +#~ "семейство операторов brin \"%s\" содержит функцию %s с неподходящим " +#~ "объявлением для опорного номера %d" + +#~ msgid "" +#~ "brin operator family \"%s\" contains operator %s with invalid strategy " +#~ "number %d" +#~ msgstr "" +#~ "семейство операторов brin \"%s\" содержит оператор %s с неправильным " +#~ "номером стратегии %d" + +#~ msgid "" +#~ "brin operator family \"%s\" contains invalid ORDER BY specification for " +#~ "operator %s" +#~ msgstr "" +#~ "семейство операторов brin \"%s\" содержит некорректное определение ORDER " +#~ "BY для оператора %s" + +#~ msgid "" +#~ "brin operator family \"%s\" contains operator %s with wrong signature" +#~ msgstr "" +#~ "семейство операторов brin \"%s\" содержит оператор %s с неподходящим " +#~ "объявлением" + +#~ msgid "brin operator class \"%s\" is missing support function %d" +#~ msgstr "в классе операторов brin \"%s\" нет опорной функции %d" + +#~ msgid "" +#~ "gist operator family \"%s\" contains support procedure %s with cross-type " +#~ "registration" +#~ msgstr "" +#~ "семейство операторов gist \"%s\" содержит опорную процедуру %s с " +#~ "межтиповой регистрацией" + +#~ msgid "" +#~ "gist operator family \"%s\" contains function %s with invalid support " +#~ "number %d" +#~ msgstr "" +#~ "семейство операторов gist \"%s\" содержит функцию %s с неправильным " +#~ "опорным номером %d" + +#~ msgid "" +#~ "gist operator family \"%s\" contains function %s with wrong signature for " +#~ "support number %d" +#~ msgstr "" +#~ "семейство операторов gist \"%s\" содержит функцию %s с неподходящим " +#~ "объявлением для опорного номера %d" + +#~ msgid "" +#~ "gist operator family \"%s\" contains operator %s with invalid strategy " +#~ "number %d" +#~ msgstr "" +#~ "семейство операторов gist \"%s\" содержит оператор %s с неправильным " +#~ "номером стратегии %d" + +#~ msgid "" +#~ "gist operator family \"%s\" contains operator %s with wrong signature" +#~ msgstr "" +#~ "семейство операторов gist \"%s\" содержит оператор %s с неподходящим " +#~ "объявлением" + +#~ msgid "gist operator class \"%s\" is missing support function %d" +#~ msgstr "в классе операторов gist \"%s\" нет опорной функции %d" + +#~ msgid "" +#~ "hash operator family \"%s\" contains support procedure %s with cross-type " +#~ "registration" +#~ msgstr "" +#~ "семейство операторов hash \"%s\" содержит опорную процедуру %s с " +#~ "межтиповой регистрацией" + +#~ msgid "" +#~ "hash operator family \"%s\" contains function %s with wrong signature for " +#~ "support number %d" +#~ msgstr "" +#~ "семейство операторов hash \"%s\" содержит функцию %s с неподходящим " +#~ "объявлением для опорного номера %d" + +#~ msgid "" +#~ "hash operator family \"%s\" contains function %s with invalid support " +#~ "number %d" +#~ msgstr "" +#~ "семейство операторов hash \"%s\" содержит функцию %s с неправильным " +#~ "опорным номером %d" + +#~ msgid "" +#~ "hash operator family \"%s\" contains operator %s with invalid strategy " +#~ "number %d" +#~ msgstr "" +#~ "семейство операторов hash \"%s\" содержит оператор %s с неправильным " +#~ "номером стратегии %d" + +#~ msgid "" +#~ "hash operator family \"%s\" contains invalid ORDER BY specification for " +#~ "operator %s" +#~ msgstr "" +#~ "семейство операторов hash \"%s\" содержит некорректное определение ORDER " +#~ "BY для оператора %s" + +#~ msgid "" +#~ "hash operator family \"%s\" contains operator %s with wrong signature" +#~ msgstr "" +#~ "семейство операторов hash \"%s\" содержит оператор %s с неподходящим " +#~ "объявлением" + +#~ msgid "" +#~ "hash operator family \"%s\" is missing operator(s) for types %s and %s" +#~ msgstr "" +#~ "в семействе операторов hash \"%s\" нет оператора(ов) для типов %s и %s" + +#~ msgid "hash operator class \"%s\" is missing operator(s)" +#~ msgstr "в классе операторов hash \"%s\" нет оператора(ов)" + +#~ msgid "" +#~ "btree operator family \"%s\" contains function %s with invalid support " +#~ "number %d" +#~ msgstr "" +#~ "семейство операторов btree \"%s\" содержит функцию %s с неправильным " +#~ "опорным номером %d" + +#~ msgid "" +#~ "btree operator family \"%s\" contains function %s with wrong signature " +#~ "for support number %d" +#~ msgstr "" +#~ "семейство операторов btree \"%s\" содержит функцию %s с неподходящим " +#~ "объявлением для опорного номера %d" + +#~ msgid "" +#~ "btree operator family \"%s\" contains operator %s with invalid strategy " +#~ "number %d" +#~ msgstr "" +#~ "семейство операторов btree \"%s\" содержит оператор %s с неправильным " +#~ "номером стратегии %d" + +#~ msgid "" +#~ "btree operator family \"%s\" contains invalid ORDER BY specification for " +#~ "operator %s" +#~ msgstr "" +#~ "семейство операторов btree \"%s\" содержит некорректное определение ORDER " +#~ "BY для оператора %s" + +#~ msgid "" +#~ "btree operator family \"%s\" contains operator %s with wrong signature" +#~ msgstr "" +#~ "семейство операторов btree \"%s\" содержит оператор %s с неподходящим " +#~ "объявлением" + +#~ msgid "" +#~ "btree operator family \"%s\" is missing operator(s) for types %s and %s" +#~ msgstr "" +#~ "в семействе операторов btree \"%s\" нет оператора(ов) для типов %s и %s" + +#~ msgid "btree operator class \"%s\" is missing operator(s)" +#~ msgstr "в классе операторов btree \"%s\" нет оператора(ов)" + +#~ msgid "btree operator family \"%s\" is missing cross-type operator(s)" +#~ msgstr "в семействе операторов btree \"%s\" нет межтипового оператора(ов)" + +#~ msgid "" +#~ "spgist operator family \"%s\" contains support procedure %s with cross-" +#~ "type registration" +#~ msgstr "" +#~ "семейство операторов spgist \"%s\" содержит опорную процедуру %s с " +#~ "межтиповой регистрацией" + +#~ msgid "" +#~ "spgist operator family \"%s\" contains function %s with invalid support " +#~ "number %d" +#~ msgstr "" +#~ "семейство операторов spgist \"%s\" содержит функцию %s с неправильным " +#~ "опорным номером %d" + +#~ msgid "" +#~ "spgist operator family \"%s\" contains function %s with wrong signature " +#~ "for support number %d" +#~ msgstr "" +#~ "семейство операторов spgist \"%s\" содержит функцию %s с неподходящим " +#~ "объявлением для опорного номера %d" + +#~ msgid "" +#~ "spgist operator family \"%s\" contains operator %s with invalid strategy " +#~ "number %d" +#~ msgstr "" +#~ "семейство операторов spgist \"%s\" содержит оператор %s с неправильным " +#~ "номером стратегии %d" + +#~ msgid "" +#~ "spgist operator family \"%s\" contains invalid ORDER BY specification for " +#~ "operator %s" +#~ msgstr "" +#~ "семейство операторов spgist \"%s\" содержит некорректное определение " +#~ "ORDER BY для оператора %s" + +#~ msgid "" +#~ "spgist operator family \"%s\" contains operator %s with wrong signature" +#~ msgstr "" +#~ "семейство операторов spgist \"%s\" содержит оператор %s с неподходящим " +#~ "объявлением" + +#~ msgid "" +#~ "spgist operator family \"%s\" is missing operator(s) for types %s and %s" +#~ msgstr "" +#~ "в семействе операторов spgist \"%s\" нет оператора(ов) для типов %s и %s" + +#~ msgid "spgist operator class \"%s\" is missing operator(s)" +#~ msgstr "в классе операторов spgist \"%s\" нет оператора(ов)" + +#~ msgid "cannot create temporary tables in parallel mode" +#~ msgstr "создавать временные таблицы в параллельном режиме нельзя" + +#~ msgid "cannot create range partition with empty range" +#~ msgstr "создать диапазонную секцию с пустым диапазоном нельзя" + +#~ msgid "could get display name for locale \"%s\": %s" +#~ msgstr "не удалось получить отображаемое название локали \"%s\": %s" + +#~ msgid "synchronized table states" +#~ msgstr "состояние таблиц синхронизировано" + +#~ msgid "added subscription for table %s.%s" +#~ msgstr "добавлена подписка на таблицу %s.%s" + +#~ msgid "removed subscription for table %s.%s" +#~ msgstr "удалена подписка на таблицу %s.%s" + +#~ msgid "Triggers on partitioned tables cannot have transition tables." +#~ msgstr "" +#~ "Триггеры секционированных таблиц не могут использовать переходные таблицы." + +#~ msgid "malformed SCRAM message (length mismatch)" +#~ msgstr "неправильное сообщение SCRAM (некорректная длина)" + +#~ msgid "invalid SCRAM response (nonce mismatch)" +#~ msgstr "неверный ответ SCRAM (несовпадение проверочного кода)" + +#~ msgid "malformed SCRAM message (attribute '%c' expected, %s found)" +#~ msgstr "неправильное сообщение SCRAM (ожидался атрибут '%c', получено: %s)" + +#~ msgid "malformed SCRAM message (expected = in attr %c)" +#~ msgstr "неправильное сообщение SCRAM (в атрибуте %c ожидалось =)" + +#~ msgid "malformed SCRAM message (attribute expected, invalid char %s found)" +#~ msgstr "" +#~ "неправильное сообщение SCRAM (ожидался атрибут, получен некорректный " +#~ "символ %s)" + +#~ msgid "malformed SCRAM message (comma expected, got %s)" +#~ msgstr "неправильное сообщение SCRAM (ожидалась запятая, получено: %s)" + +#~ msgid "User \"%s\" has an empty password." +#~ msgstr "У пользователя \"%s\" пустой пароль." + +#~ msgid "cannot specify finite value after UNBOUNDED" +#~ msgstr "указать конечное значение после UNBOUNDED нельзя" + +#~ msgid "could not determine data type for argument 1" +#~ msgstr "не удалось определить тип данных аргумента 1" + +#~ msgid "could not determine data type for argument 2" +#~ msgstr "не удалось определить тип данных аргумента 2" + +#~ msgid "argument %d: could not determine data type" +#~ msgstr "аргумент %d: не удалось определить тип данных" + +#~ msgid "could not open transaction log file \"%s\": %m" +#~ msgstr "не удалось открыть файл журнала транзакций \"%s\": %m" + +#~ msgid "removing transaction log backup history file \"%s\"" +#~ msgstr "удаляется файл истории копирования журнала: \"%s\"" + +#~ msgid "range partition key of row contains null" +#~ msgstr "ключ разбиения по диапазонам в строке таблицы содержит NULL" + +#~ msgid "extended statistics \"%s\" do not exist, skipping" +#~ msgstr "расширенная статистика \"%s\" не существует, пропускается" + +#~ msgid "only scalar types can be used in extended statistics" +#~ msgstr "в расширенной статистике могут использоваться только скалярные типы" + +#~ msgid "unrecognized STATISTICS option \"%s\"" +#~ msgstr "нераспознанное указание для STATISTICS: \"%s\"" + +#~ msgid "must truncate child tables too" +#~ msgstr "опустошаться должны также и дочерние таблицы" + +#~ msgid "constraint must be dropped from child tables too" +#~ msgstr "ограничение также должно удаляться из дочерних таблиц" + +#~ msgid "column \"%s\" is in range partition key" +#~ msgstr "столбец \"%s\" входит в ключ разбиения по диапазонам" + +#~ msgid "column must be dropped from child tables too" +#~ msgstr "столбец также должен удаляться из дочерних таблиц" + +#~ msgid "transaction log switch forced (archive_timeout=%d)" +#~ msgstr "принудительное переключение журнала транзакций (archive_timeout=%d)" + +#~ msgid "archived transaction log file \"%s\"" +#~ msgstr "файл архива журнала транзакций \"%s\"" + +#~ msgid "Transaction ID %u finished; no more running transactions." +#~ msgstr "Транзакция %u завершена, больше активных транзакций нет." + +#~ msgid "%u transaction needs to finish." +#~ msgid_plural "%u transactions need to finish." +#~ msgstr[0] "Необходимо дождаться завершения транзакций (%u)." +#~ msgstr[1] "Необходимо дождаться завершения транзакций (%u)." +#~ msgstr[2] "Необходимо дождаться завершения транзакций (%u)." + +#~ msgid "Consider ALTER TABLE \"%s\".\"%s\" ALTER \"%s\" SET STATISTICS -1" +#~ msgstr "Попробуйте ALTER TABLE \"%s\".\"%s\" ALTER \"%s\" SET STATISTICS -1" + +#~ msgid "select() failed: %m" +#~ msgstr "ошибка в select(): %m" + +#~ msgid "could not attach to dsa_handle" +#~ msgstr "не удалось подключиться к dsa" + +#~ msgid "unrecognized option \"%s\"" +#~ msgstr "нераспознанный параметр \"%s\"" + #~ msgid "could not remove old transaction log file \"%s\": %m" #~ msgstr "не удалось стереть старый файл журнала транзакций \"%s\": %m" @@ -27695,9 +28394,6 @@ msgstr "Используйте для записи спецсимволов си #~ "недостаточно разделяемой памяти для элементов структуры данных \"%s" #~ "\" (запрошено байт: %zu)" -#~ msgid "corrupted item pointer: offset = %u, length = %u" -#~ msgstr "испорченный указатель элемента: смещение = %u, длина = %u" - #~ msgid "invalid input syntax for type boolean: \"%s\"" #~ msgstr "неверное значение для логического типа: \"%s\"" @@ -27713,9 +28409,6 @@ msgstr "Используйте для записи спецсимволов си #~ msgid "\"TZ\"/\"tz\"/\"OF\" format patterns are not supported in to_date" #~ msgstr "шаблоны формата \"TZ\"/\"tz\"/\"OF\" не поддерживаются в to_date" -#~ msgid "invalid input syntax for integer: \"%s\"" -#~ msgstr "неверное значение для целого числа: \"%s\"" - #~ msgid "value \"%s\" is out of range for type bigint" #~ msgstr "значение \"%s\" вне диапазона для типа bigint" @@ -28065,9 +28758,6 @@ msgstr "Используйте для записи спецсимволов си #~ msgid "commit timestamp Xid oldest/newest: %u/%u" #~ msgstr "старейшая/новейшая транзакция с меткой времени: %u/%u" -#~ msgid "Table %s is temporary." -#~ msgstr "Таблица %s - временная." - #~ msgid "cannot change status of table %s to logged" #~ msgstr "сделать таблицу %s журналируемой нельзя" @@ -28102,9 +28792,6 @@ msgstr "Используйте для записи спецсимволов си #~ msgid "arg %d: key cannot be null" #~ msgstr "аргумент %d: ключ не может быть NULL" -#~ msgid "too many parallel workers already attached" -#~ msgstr "уже подключено слишком много параллельных исполнителей" - #~ msgid "" #~ "\"%s\" is not a table, materialized view, composite type, or foreign table" #~ msgstr "" @@ -28323,9 +29010,6 @@ msgstr "Используйте для записи спецсимволов си #~ "Задаёт максимальное расстояние в сегментах журнала между автоматическими " #~ "контрольными точками WAL." -#~ msgid "SET AUTOCOMMIT TO OFF is no longer supported" -#~ msgstr "SET AUTOCOMMIT TO OFF больше не поддерживается" - #~ msgid "assertion checking is not supported by this build" #~ msgstr "в данной сборке не поддерживаются проверки истинности" @@ -28480,9 +29164,6 @@ msgstr "Используйте для записи спецсимволов си #~ msgid "%s: could not determine user name (GetUserName failed)\n" #~ msgstr "%s: не удалось определить имя пользователя (ошибка в GetUserName)\n" -#~ msgid "too many column aliases specified for function %s" -#~ msgstr "для функции %s указано слишком много названий столбцов" - #~ msgid "Expected 1 tuple with 3 fields, got %d tuples with %d fields." #~ msgstr "" #~ "Ожидался 1 кортеж с 3 полями, однако получено кортежей: %d, полей: %d." @@ -28549,9 +29230,6 @@ msgstr "Используйте для записи спецсимволов си #~ msgid "window functions cannot use named arguments" #~ msgstr "у оконных функций не может быть именованных аргументов" -#~ msgid "invalid list syntax for \"listen_addresses\"" -#~ msgstr "неверный формат списка для \"listen_addresses\"" - #~ msgid "invalid list syntax for \"unix_socket_directories\"" #~ msgstr "неверный формат списка для \"unix_socket_directories\"" diff --git a/src/backend/po/sv.po b/src/backend/po/sv.po new file mode 100644 index 0000000000..70430e2a60 --- /dev/null +++ b/src/backend/po/sv.po @@ -0,0 +1,26101 @@ +# Swedish message translation file for postgresql +# Dennis Björklund , 2002, 2003, 2004, 2005, 2006, 2017, 2018. +# +# Många av termerna är tekniska termer som refererar till begrepp i SQL-satser och liknande. Om man +# översätter vissa av dessa så kommer det bli väldigt svårt för användaren att förstå vad vi menar. +# För många av dessa har jag valt att behålla det engelska ordet som ett begrepp. Det är en svår +# balansgång. +# +# T.ex. ett integritetsvillkor som deklarerats med flaggan DEFERRABLE har jag i text som +# tar upp det lämnat kvar begreppet deferrable. T.ex: +# +# att ange deferrable för integritetsvillkor stöds inte för domäner +# +# På många ställen är det svårt att avgöra. Ta t.ex. integer som ibland refererar till typen integer och +# ibland refererar mer allmänt till heltal. +msgid "" +msgstr "" +"Project-Id-Version: PostgreSQL 10\n" +"Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" +"POT-Creation-Date: 2018-06-20 17:14+0000\n" +"PO-Revision-Date: 2018-06-25 08:42+0200\n" +"Last-Translator: Dennis Björklund \n" +"Language-Team: Swedish \n" +"Language: sv\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: ../common/config_info.c:130 ../common/config_info.c:138 +#: ../common/config_info.c:146 ../common/config_info.c:154 +#: ../common/config_info.c:162 ../common/config_info.c:170 +#: ../common/config_info.c:178 ../common/config_info.c:186 +#: ../common/config_info.c:194 +msgid "not recorded" +msgstr "ej sparad" + +#: ../common/controldata_utils.c:58 commands/copy.c:3146 +#: commands/extension.c:3330 utils/adt/genfile.c:151 +#, c-format +msgid "could not open file \"%s\" for reading: %m" +msgstr "kunde inte öppna filen \"%s\" för läsning: %m" + +#: ../common/controldata_utils.c:62 +#, c-format +msgid "%s: could not open file \"%s\" for reading: %s\n" +msgstr "%s: kunde inte öppna fil \"%s\" för läsning: %s\n" + +#: ../common/controldata_utils.c:75 access/transam/timeline.c:347 +#: access/transam/xlog.c:3407 access/transam/xlog.c:10857 +#: access/transam/xlog.c:10870 access/transam/xlog.c:11295 +#: access/transam/xlog.c:11375 access/transam/xlog.c:11414 +#: access/transam/xlog.c:11457 access/transam/xlogfuncs.c:658 +#: access/transam/xlogfuncs.c:677 commands/extension.c:3340 libpq/hba.c:499 +#: replication/logical/origin.c:701 replication/logical/origin.c:731 +#: replication/logical/reorderbuffer.c:3101 replication/walsender.c:507 +#: storage/file/copydir.c:195 utils/adt/genfile.c:168 utils/adt/misc.c:944 +#, c-format +msgid "could not read file \"%s\": %m" +msgstr "kunde inte läsa fil \"%s\": %m" + +#: ../common/controldata_utils.c:78 +#, c-format +msgid "%s: could not read file \"%s\": %s\n" +msgstr "%s: kunde inte läsa fil \"%s\": %s\n" + +#: ../common/controldata_utils.c:86 +#, c-format +msgid "could not read file \"%s\": read %d of %d" +msgstr "kunde inte läsa fil \"%s\": läste %d av %d" + +#: ../common/controldata_utils.c:90 +#, c-format +msgid "%s: could not read file \"%s\": read %d of %d\n" +msgstr "%s: kunde inte läsa fil \"%s\": läste %d av %d\n" + +#: ../common/controldata_utils.c:112 +msgid "byte ordering mismatch" +msgstr "byte-ordning stämmer inte" + +#: ../common/controldata_utils.c:114 +#, c-format +msgid "" +"WARNING: possible byte ordering mismatch\n" +"The byte ordering used to store the pg_control file might not match the one\n" +"used by this program. In that case the results below would be incorrect, and\n" +"the PostgreSQL installation would be incompatible with this data directory.\n" +msgstr "" +"VARNING: möjligt fel i talordning\n" +"Den endian-ordning med vilken pg_control lagrar filer passar kanske\n" +"inte detta program. I så fall kan nedanstående utfall vara oriktigt\n" +"och det installerade PostgreSQL vara oförenligt med databaskatalogen.\n" + +#: ../common/exec.c:127 ../common/exec.c:241 ../common/exec.c:284 +#, c-format +msgid "could not identify current directory: %s" +msgstr "kunde inte identifiera aktuell katalog: %s" + +#: ../common/exec.c:146 +#, c-format +msgid "invalid binary \"%s\"" +msgstr "ogiltig binär \"%s\"" + +#: ../common/exec.c:195 +#, c-format +msgid "could not read binary \"%s\"" +msgstr "kunde inte läsa binär \"%s\"" + +#: ../common/exec.c:202 +#, c-format +msgid "could not find a \"%s\" to execute" +msgstr "kunde inte hitta en \"%s\" att köra" + +#: ../common/exec.c:257 ../common/exec.c:293 +#, c-format +msgid "could not change directory to \"%s\": %s" +msgstr "kunde inte byta katalog till \"%s\": %s" + +#: ../common/exec.c:272 +#, c-format +msgid "could not read symbolic link \"%s\"" +msgstr "kunde inte läsa symbolisk länk \"%s\"" + +#: ../common/exec.c:523 +#, c-format +msgid "pclose failed: %s" +msgstr "pclose misslyckades: %s" + +#: ../common/fe_memutils.c:35 ../common/fe_memutils.c:75 +#: ../common/fe_memutils.c:98 ../common/psprintf.c:181 ../port/path.c:632 +#: ../port/path.c:670 ../port/path.c:687 utils/misc/ps_status.c:171 +#: utils/misc/ps_status.c:179 utils/misc/ps_status.c:209 +#: utils/misc/ps_status.c:217 +#, c-format +msgid "out of memory\n" +msgstr "slut på minne\n" + +#: ../common/fe_memutils.c:92 +#, c-format +msgid "cannot duplicate null pointer (internal error)\n" +msgstr "kan inte duplicera null-pekare (internt fel)\n" + +#: ../common/file_utils.c:82 ../common/file_utils.c:186 +#, c-format +msgid "%s: could not stat file \"%s\": %s\n" +msgstr "%s: kunde ta status på filen \"%s\": %s\n" + +#: ../common/file_utils.c:162 +#, c-format +msgid "%s: could not open directory \"%s\": %s\n" +msgstr "%s: kunde inte öppna katalog \"%s\": %s\n" + +#: ../common/file_utils.c:198 +#, c-format +msgid "%s: could not read directory \"%s\": %s\n" +msgstr "%s: kunde inte läsa katalog \"%s\": %s\n" + +#: ../common/file_utils.c:231 ../common/file_utils.c:291 +#: ../common/file_utils.c:367 +#, c-format +msgid "%s: could not open file \"%s\": %s\n" +msgstr "%s: kunde inte öppna fil \"%s\": %s\n" + +#: ../common/file_utils.c:304 ../common/file_utils.c:376 +#, c-format +msgid "%s: could not fsync file \"%s\": %s\n" +msgstr "%s: kunde inte utföra fsync på filen \"%s\": %s\n" + +#: ../common/file_utils.c:387 +#, c-format +msgid "%s: could not rename file \"%s\" to \"%s\": %s\n" +msgstr "%s: kunde inte döpa om fil \"%s\" till \"%s\": %s\n" + +#: ../common/pgfnames.c:45 +#, c-format +msgid "could not open directory \"%s\": %s\n" +msgstr "kunde inte öppna katalog \"%s\": %s\n" + +#: ../common/pgfnames.c:72 +#, c-format +msgid "could not read directory \"%s\": %s\n" +msgstr "kunde inte läsa katalog \"%s\": %s\n" + +#: ../common/pgfnames.c:84 +#, c-format +msgid "could not close directory \"%s\": %s\n" +msgstr "kunde inte stänga katalog \"%s\": %s\n" + +#: ../common/psprintf.c:179 ../port/path.c:630 ../port/path.c:668 +#: ../port/path.c:685 access/transam/twophase.c:1372 access/transam/xlog.c:6443 +#: lib/dshash.c:246 lib/stringinfo.c:277 libpq/auth.c:1150 libpq/auth.c:1516 +#: libpq/auth.c:1584 libpq/auth.c:2102 postmaster/bgworker.c:337 +#: postmaster/bgworker.c:914 postmaster/postmaster.c:2390 +#: postmaster/postmaster.c:2412 postmaster/postmaster.c:3979 +#: postmaster/postmaster.c:4687 postmaster/postmaster.c:4762 +#: postmaster/postmaster.c:5454 postmaster/postmaster.c:5791 +#: replication/libpqwalreceiver/libpqwalreceiver.c:260 +#: replication/logical/logical.c:174 storage/buffer/localbuf.c:436 +#: storage/file/fd.c:781 storage/file/fd.c:1219 storage/file/fd.c:1380 +#: storage/file/fd.c:2286 storage/ipc/procarray.c:1055 +#: storage/ipc/procarray.c:1543 storage/ipc/procarray.c:1550 +#: storage/ipc/procarray.c:1965 storage/ipc/procarray.c:2589 +#: utils/adt/cryptohashes.c:45 utils/adt/cryptohashes.c:65 +#: utils/adt/formatting.c:1568 utils/adt/formatting.c:1690 +#: utils/adt/formatting.c:1813 utils/adt/pg_locale.c:468 +#: utils/adt/pg_locale.c:652 utils/adt/regexp.c:219 utils/fmgr/dfmgr.c:221 +#: utils/hash/dynahash.c:448 utils/hash/dynahash.c:557 +#: utils/hash/dynahash.c:1069 utils/mb/mbutils.c:365 utils/mb/mbutils.c:698 +#: utils/misc/guc.c:4230 utils/misc/guc.c:4246 utils/misc/guc.c:4259 +#: utils/misc/guc.c:7234 utils/misc/tzparser.c:468 utils/mmgr/aset.c:482 +#: utils/mmgr/dsa.c:713 utils/mmgr/dsa.c:795 utils/mmgr/generation.c:249 +#: utils/mmgr/mcxt.c:796 utils/mmgr/mcxt.c:832 utils/mmgr/mcxt.c:870 +#: utils/mmgr/mcxt.c:908 utils/mmgr/mcxt.c:944 utils/mmgr/mcxt.c:975 +#: utils/mmgr/mcxt.c:1011 utils/mmgr/mcxt.c:1063 utils/mmgr/mcxt.c:1098 +#: utils/mmgr/mcxt.c:1133 utils/mmgr/slab.c:239 +#, c-format +msgid "out of memory" +msgstr "slut på minne" + +#: ../common/relpath.c:58 +#, c-format +msgid "invalid fork name" +msgstr "ogiltigt fork-namn" + +#: ../common/relpath.c:59 +#, c-format +msgid "Valid fork names are \"main\", \"fsm\", \"vm\", and \"init\"." +msgstr "Giltiga fork-värden är \"main\", \"fsm\", \"vm\" och \"init\"." + +#: ../common/restricted_token.c:68 +#, c-format +msgid "%s: WARNING: cannot create restricted tokens on this platform\n" +msgstr "%s: VARNING: \"Restricted Token\" stöds inte av plattformen.\n" + +#: ../common/restricted_token.c:77 +#, c-format +msgid "%s: could not open process token: error code %lu\n" +msgstr "%s: kunde inte skapa token: felkod %lu\n" + +#: ../common/restricted_token.c:90 +#, c-format +msgid "%s: could not allocate SIDs: error code %lu\n" +msgstr "%s: kunde inte tilldela SID: felkod %lu\n" + +#: ../common/restricted_token.c:110 +#, c-format +msgid "%s: could not create restricted token: error code %lu\n" +msgstr "%s: kunde inte skapa begränsat token: felkod %lu\n" + +#: ../common/restricted_token.c:132 +#, c-format +msgid "%s: could not start process for command \"%s\": error code %lu\n" +msgstr "%s: kunde inte starta process för kommando \"%s\": felkod %lu\n" + +#: ../common/restricted_token.c:170 +#, c-format +msgid "%s: could not re-execute with restricted token: error code %lu\n" +msgstr "%s: kunde inte köra igen med begränsat token: felkod %lu\n" + +#: ../common/restricted_token.c:186 +#, c-format +msgid "%s: could not get exit code from subprocess: error code %lu\n" +msgstr "%s: kunde inte hämta statuskod för underprocess: felkod %lu\n" + +#: ../common/rmtree.c:77 +#, c-format +msgid "could not stat file or directory \"%s\": %s\n" +msgstr "kunde inte ta status på fil eller katalog \"%s\": %s\n" + +#: ../common/rmtree.c:104 ../common/rmtree.c:121 +#, c-format +msgid "could not remove file or directory \"%s\": %s\n" +msgstr "kunde inte ta bort fil eller katalog \"%s\": %s\n" + +#: ../common/saslprep.c:1090 +#, c-format +msgid "password too long" +msgstr "lösenorder är för långt" + +#: ../common/username.c:43 +#, c-format +msgid "could not look up effective user ID %ld: %s" +msgstr "kunde inte slå upp effektivt användar-id %ld: %s" + +#: ../common/username.c:45 libpq/auth.c:2049 +msgid "user does not exist" +msgstr "användaren finns inte" + +#: ../common/username.c:60 +#, c-format +msgid "user name lookup failure: error code %lu" +msgstr "misslyckad sökning efter användarnamn: felkod %lu" + +#: ../common/wait_error.c:45 +#, c-format +msgid "command not executable" +msgstr "kommandot är inte körbart" + +#: ../common/wait_error.c:49 +#, c-format +msgid "command not found" +msgstr "kommandot kan ej hittas" + +#: ../common/wait_error.c:54 +#, c-format +msgid "child process exited with exit code %d" +msgstr "barnprocess avslutade med kod %d" + +#: ../common/wait_error.c:61 +#, c-format +msgid "child process was terminated by exception 0x%X" +msgstr "barnprocess terminerades med avbrott 0x%X" + +#: ../common/wait_error.c:71 +#, c-format +msgid "child process was terminated by signal %s" +msgstr "barnprocess terminerades av signal %s" + +#: ../common/wait_error.c:75 +#, c-format +msgid "child process was terminated by signal %d" +msgstr "barnprocess terminerades av signal %d" + +#: ../common/wait_error.c:80 +#, c-format +msgid "child process exited with unrecognized status %d" +msgstr "barnprocess avslutade med okänd statuskod %d" + +#: ../port/chklocale.c:288 +#, c-format +msgid "could not determine encoding for codeset \"%s\"" +msgstr "kunde inte bestämma kodning för teckentabell \"%s\"" + +#: ../port/chklocale.c:409 ../port/chklocale.c:415 +#, c-format +msgid "could not determine encoding for locale \"%s\": codeset is \"%s\"" +msgstr "kunde inte bestämma kodning för lokal \"%s\": teckentabellen är \"%s\"" + +#: ../port/dirmod.c:218 +#, c-format +msgid "could not set junction for \"%s\": %s" +msgstr "kunde inte sätta knutpunkt (junction) för \"%s\": %s" + +#: ../port/dirmod.c:221 +#, c-format +msgid "could not set junction for \"%s\": %s\n" +msgstr "kunde inte sätta knutpunkt (junktion) för \"%s\": %s\n" + +#: ../port/dirmod.c:295 +#, c-format +msgid "could not get junction for \"%s\": %s" +msgstr "kunde inte hämta knutpunkt (junction) för \"%s\": %s" + +#: ../port/dirmod.c:298 +#, c-format +msgid "could not get junction for \"%s\": %s\n" +msgstr "kunde inte hämta knutpunkt (junction) för \"%s\": %s\n" + +#: ../port/open.c:111 +#, c-format +msgid "could not open file \"%s\": %s" +msgstr "kunde inte öppna fil \"%s\": %s" + +#: ../port/open.c:112 +msgid "lock violation" +msgstr "lås-överträdelse" + +#: ../port/open.c:112 +msgid "sharing violation" +msgstr "sharing-överträdelse" + +#: ../port/open.c:113 +#, c-format +msgid "Continuing to retry for 30 seconds." +msgstr "Fortsätter att försöka i 30 sekunder." + +#: ../port/open.c:114 +#, c-format +msgid "You might have antivirus, backup, or similar software interfering with the database system." +msgstr "Du kan ha antivirus, backup eller liknande mjukvara som stör databassystemet" + +#: ../port/path.c:654 +#, c-format +msgid "could not get current working directory: %s\n" +msgstr "kunde inte fastställa nuvarande arbetskatalog: %s\n" + +#: ../port/strerror.c:25 +#, c-format +msgid "unrecognized error %d" +msgstr "okänt fel: %d" + +#: ../port/win32security.c:62 +#, c-format +msgid "could not get SID for Administrators group: error code %lu\n" +msgstr "kunde inte hämta SID för Administratörsgrupp: felkod %lu\n" + +#: ../port/win32security.c:72 +#, c-format +msgid "could not get SID for PowerUsers group: error code %lu\n" +msgstr "kunde inte hämta SID för PowerUser-grupp: felkod %lu\n" + +#: ../port/win32security.c:80 +#, c-format +msgid "could not check access token membership: error code %lu\n" +msgstr "kunde inte kontrollera access-token-medlemskap: felkod %lu\n" + +#: access/brin/brin.c:200 +#, c-format +msgid "request for BRIN range summarization for index \"%s\" page %u was not recorded" +msgstr "förfrågan efter BRIN-intervallsummering för index \"%s\" sida %u har inte spelats in" + +#: access/brin/brin.c:877 access/brin/brin.c:954 access/gin/ginfast.c:1023 +#: access/transam/xlog.c:10269 access/transam/xlog.c:10796 +#: access/transam/xlogfuncs.c:286 access/transam/xlogfuncs.c:313 +#: access/transam/xlogfuncs.c:352 access/transam/xlogfuncs.c:373 +#: access/transam/xlogfuncs.c:394 access/transam/xlogfuncs.c:464 +#: access/transam/xlogfuncs.c:520 +#, c-format +msgid "recovery is in progress" +msgstr "återställning pågår" + +#: access/brin/brin.c:878 access/brin/brin.c:955 +#, c-format +msgid "BRIN control functions cannot be executed during recovery." +msgstr "BRIN-kontrollfunktioner kan inte köras under återställning." + +#: access/brin/brin.c:886 access/brin/brin.c:963 +#, c-format +msgid "block number out of range: %s" +msgstr "blocknummer är utanför giltigt intervall: %s" + +#: access/brin/brin.c:909 access/brin/brin.c:986 +#, c-format +msgid "\"%s\" is not a BRIN index" +msgstr "\"%s\" är inte ett BRIN-index" + +#: access/brin/brin.c:925 access/brin/brin.c:1002 +#, c-format +msgid "could not open parent table of index %s" +msgstr "kunde inte öppna föräldratabell för index %s" + +#: access/brin/brin_pageops.c:77 access/brin/brin_pageops.c:363 +#: access/brin/brin_pageops.c:844 access/gin/ginentrypage.c:110 +#: access/gist/gist.c:1376 access/nbtree/nbtinsert.c:678 +#: access/nbtree/nbtsort.c:839 access/spgist/spgdoinsert.c:1957 +#, c-format +msgid "index row size %zu exceeds maximum %zu for index \"%s\"" +msgstr "indexradstorlek %zu överstiger maximum %zu för index \"%s\"" + +#: access/brin/brin_revmap.c:382 access/brin/brin_revmap.c:388 +#, c-format +msgid "corrupted BRIN index: inconsistent range map" +msgstr "trasigt BRIN-index: inkonsistent intervall-map" + +#: access/brin/brin_revmap.c:404 +#, c-format +msgid "leftover placeholder tuple detected in BRIN index \"%s\", deleting" +msgstr "kvarlämnad platshållartuple hittad i BRIN-index \"%s\", raderar" + +#: access/brin/brin_revmap.c:601 +#, c-format +msgid "unexpected page type 0x%04X in BRIN index \"%s\" block %u" +msgstr "oväntad sidtyp 0x%04X i BRIN-index \"%s\" block %u" + +#: access/brin/brin_validate.c:116 access/gin/ginvalidate.c:149 +#: access/gist/gistvalidate.c:146 access/hash/hashvalidate.c:132 +#: access/nbtree/nbtvalidate.c:110 access/spgist/spgvalidate.c:165 +#, c-format +msgid "operator family \"%s\" of access method %s contains function %s with invalid support number %d" +msgstr "operatorfamilj \"%s\" för accessmetod %s innehåller funktion %s med ogiltigt supportnummer %d" + +#: access/brin/brin_validate.c:132 access/gin/ginvalidate.c:161 +#: access/gist/gistvalidate.c:158 access/hash/hashvalidate.c:115 +#: access/nbtree/nbtvalidate.c:122 access/spgist/spgvalidate.c:177 +#, c-format +msgid "operator family \"%s\" of access method %s contains function %s with wrong signature for support number %d" +msgstr "operatorfamilj \"%s\" för accessmetod %s innehåller funktion %s med felaktig signatur för supportnummer %d" + +#: access/brin/brin_validate.c:154 access/gin/ginvalidate.c:180 +#: access/gist/gistvalidate.c:178 access/hash/hashvalidate.c:153 +#: access/nbtree/nbtvalidate.c:142 access/spgist/spgvalidate.c:196 +#, c-format +msgid "operator family \"%s\" of access method %s contains operator %s with invalid strategy number %d" +msgstr "operatorfamilj \"%s\" för accessmetod %s innehåller operator %s med ogiltigt strateginummer %d" + +#: access/brin/brin_validate.c:183 access/gin/ginvalidate.c:193 +#: access/hash/hashvalidate.c:166 access/nbtree/nbtvalidate.c:155 +#: access/spgist/spgvalidate.c:209 +#, c-format +msgid "operator family \"%s\" of access method %s contains invalid ORDER BY specification for operator %s" +msgstr "operatorfamilj \"%s\" för accessmetod %s innehåller ogiltig ORDER BY-specifikatioon för operator %s" + +#: access/brin/brin_validate.c:196 access/gin/ginvalidate.c:206 +#: access/gist/gistvalidate.c:226 access/hash/hashvalidate.c:179 +#: access/nbtree/nbtvalidate.c:168 access/spgist/spgvalidate.c:222 +#, c-format +msgid "operator family \"%s\" of access method %s contains operator %s with wrong signature" +msgstr "operatorfamilj \"%s\" för accessmetod %s innehåller operator %s med felaktig signatur" + +#: access/brin/brin_validate.c:234 access/hash/hashvalidate.c:219 +#: access/nbtree/nbtvalidate.c:226 access/spgist/spgvalidate.c:249 +#, c-format +msgid "operator family \"%s\" of access method %s is missing operator(s) for types %s and %s" +msgstr "operatorfamilj \"%s\" för accessmetod %s saknar operator(er) för typerna %s och %s" + +#: access/brin/brin_validate.c:244 +#, c-format +msgid "operator family \"%s\" of access method %s is missing support function(s) for types %s and %s" +msgstr "operatorfamilj \"%s\" för accessmetod %s saknas supportfunktion(er) för typerna %s och %s" + +#: access/brin/brin_validate.c:257 access/hash/hashvalidate.c:233 +#: access/nbtree/nbtvalidate.c:250 access/spgist/spgvalidate.c:282 +#, c-format +msgid "operator class \"%s\" of access method %s is missing operator(s)" +msgstr "operatorklass \"%s\" för accessmetoden %s saknar operator(er)" + +#: access/brin/brin_validate.c:268 access/gin/ginvalidate.c:247 +#: access/gist/gistvalidate.c:266 +#, c-format +msgid "operator class \"%s\" of access method %s is missing support function %d" +msgstr "operatorklass \"%s\" för accessmetod %s saknar supportfunktion %d" + +#: access/common/heaptuple.c:1090 access/common/heaptuple.c:1806 +#, c-format +msgid "number of columns (%d) exceeds limit (%d)" +msgstr "antalet kolumner (%d) överskrider gränsen (%d)" + +#: access/common/indextuple.c:63 +#, c-format +msgid "number of index columns (%d) exceeds limit (%d)" +msgstr "antalet indexerade kolumner (%d) överskrider gränsen (%d)" + +#: access/common/indextuple.c:179 access/spgist/spgutils.c:685 +#, c-format +msgid "index row requires %zu bytes, maximum size is %zu" +msgstr "indexrad kräver %zu byte, maximal storlek är %zu" + +#: access/common/printtup.c:365 tcop/fastpath.c:180 tcop/fastpath.c:530 +#: tcop/postgres.c:1755 +#, c-format +msgid "unsupported format code: %d" +msgstr "ej stödd formatkod: %d" + +#: access/common/reloptions.c:568 +#, c-format +msgid "user-defined relation parameter types limit exceeded" +msgstr "överskriden gräns för användardefinierade relationsparametertyper" + +#: access/common/reloptions.c:849 +#, c-format +msgid "RESET must not include values for parameters" +msgstr "RESET får inte ha med värden på parametrar" + +#: access/common/reloptions.c:881 +#, c-format +msgid "unrecognized parameter namespace \"%s\"" +msgstr "okänd parameternamnrymd \"%s\"" + +#: access/common/reloptions.c:1121 parser/parse_clause.c:277 +#, c-format +msgid "unrecognized parameter \"%s\"" +msgstr "okänd parameter \"%s\"" + +#: access/common/reloptions.c:1151 +#, c-format +msgid "parameter \"%s\" specified more than once" +msgstr "parameter \"%s\" angiven mer än en gång" + +#: access/common/reloptions.c:1167 +#, c-format +msgid "invalid value for boolean option \"%s\": %s" +msgstr "ogiltigt värde för booleansk flagga \"%s\": \"%s\"" + +#: access/common/reloptions.c:1179 +#, c-format +msgid "invalid value for integer option \"%s\": %s" +msgstr "ogiltigt värde för heltalsflagga \"%s\": \"%s\"" + +#: access/common/reloptions.c:1185 access/common/reloptions.c:1205 +#, c-format +msgid "value %s out of bounds for option \"%s\"" +msgstr "värdet %s är utanför sitt intervall för flaggan \"%s\"" + +#: access/common/reloptions.c:1187 +#, c-format +msgid "Valid values are between \"%d\" and \"%d\"." +msgstr "Giltiga värden är mellan \"%d\" och \"%d\"." + +#: access/common/reloptions.c:1199 +#, c-format +msgid "invalid value for floating point option \"%s\": %s" +msgstr "ogiltigt värde för flyttalsflagga \"%s\": %s" + +#: access/common/reloptions.c:1207 +#, c-format +msgid "Valid values are between \"%f\" and \"%f\"." +msgstr "Giltiga värden är mellan \"%f\" och \"%f\"." + +#: access/common/tupconvert.c:108 +#, c-format +msgid "Returned type %s does not match expected type %s in column %d." +msgstr "Returnerad typ %s matchar inte förväntad type %s i kolumn %d." + +#: access/common/tupconvert.c:136 +#, c-format +msgid "Number of returned columns (%d) does not match expected column count (%d)." +msgstr "Antalet returnerade kolumner (%d) matchar inte förväntat antal kolumner (%d)." + +#: access/common/tupconvert.c:329 +#, c-format +msgid "Attribute \"%s\" of type %s does not match corresponding attribute of type %s." +msgstr "Attribut \"%s\" för typ %s matchar inte motsvarande attribut för typ %s." + +#: access/common/tupconvert.c:341 +#, c-format +msgid "Attribute \"%s\" of type %s does not exist in type %s." +msgstr "Attribut \"%s\" i typ %s finns inte i typ %s." + +#: access/common/tupdesc.c:834 parser/parse_clause.c:819 +#: parser/parse_relation.c:1539 +#, c-format +msgid "column \"%s\" cannot be declared SETOF" +msgstr "kolumn \"%s\" kan inte deklareras som SETOF" + +#: access/gin/ginbulk.c:44 +#, c-format +msgid "posting list is too long" +msgstr "post-listan är för lång" + +#: access/gin/ginbulk.c:45 +#, c-format +msgid "Reduce maintenance_work_mem." +msgstr "Minska maintenance_work_mem." + +#: access/gin/ginfast.c:1024 +#, c-format +msgid "GIN pending list cannot be cleaned up during recovery." +msgstr "väntande GIN-lista kan inte städas upp under återställning." + +#: access/gin/ginfast.c:1031 +#, c-format +msgid "\"%s\" is not a GIN index" +msgstr "\"%s\" är inte ett GIN-index" + +#: access/gin/ginfast.c:1042 +#, c-format +msgid "cannot access temporary indexes of other sessions" +msgstr "kan inte flytta temporära index tillhörande andra sessioner" + +#: access/gin/ginscan.c:402 +#, c-format +msgid "old GIN indexes do not support whole-index scans nor searches for nulls" +msgstr "gamla GIN-index stöder inte hela-index-scan eller sökningar efter null" + +#: access/gin/ginscan.c:403 +#, c-format +msgid "To fix this, do REINDEX INDEX \"%s\"." +msgstr "För att fixa detta, kör REINDEX INDEX \"%s\"." + +#: access/gin/ginutil.c:138 executor/execExpr.c:1867 +#: utils/adt/arrayfuncs.c:3777 utils/adt/arrayfuncs.c:6375 +#: utils/adt/rowtypes.c:935 +#, c-format +msgid "could not identify a comparison function for type %s" +msgstr "kunde inte hitta någon jämförelsefunktion för typen %s" + +#: access/gin/ginvalidate.c:93 access/gist/gistvalidate.c:93 +#: access/hash/hashvalidate.c:99 access/spgist/spgvalidate.c:99 +#, c-format +msgid "operator family \"%s\" of access method %s contains support procedure %s with different left and right input types" +msgstr "operatorfamilj \"%s\" för accessmetod %s innehåller supportprocedur %s med olika vänster- och höger-inputtyper" + +#: access/gin/ginvalidate.c:257 +#, c-format +msgid "operator class \"%s\" of access method %s is missing support function %d or %d" +msgstr "operatorklass \"%s\" för accessmetod \"%s\" saknar supportfunktion %d eller %d" + +#: access/gist/gist.c:713 access/gist/gistvacuum.c:257 +#, c-format +msgid "index \"%s\" contains an inner tuple marked as invalid" +msgstr "index \"%s\" innehåller en inre tupel som är markerad ogiltig" + +#: access/gist/gist.c:715 access/gist/gistvacuum.c:259 +#, c-format +msgid "This is caused by an incomplete page split at crash recovery before upgrading to PostgreSQL 9.1." +msgstr "Detta orsakas av en inkomplett siduppdelning under krashåterställning körd innan uppdatering till PostgreSQL 9.1." + +#: access/gist/gist.c:716 access/gist/gistutil.c:759 access/gist/gistutil.c:770 +#: access/gist/gistvacuum.c:260 access/hash/hashutil.c:241 +#: access/hash/hashutil.c:252 access/hash/hashutil.c:264 +#: access/hash/hashutil.c:285 access/nbtree/nbtpage.c:678 +#: access/nbtree/nbtpage.c:689 +#, c-format +msgid "Please REINDEX it." +msgstr "Var vänlig och kör REINDEX på det." + +#: access/gist/gistbuild.c:250 +#, c-format +msgid "invalid value for \"buffering\" option" +msgstr "ogiltigt argument till \"buffering\"-flaggan" + +#: access/gist/gistbuild.c:251 +#, c-format +msgid "Valid values are \"on\", \"off\", and \"auto\"." +msgstr "Giltiga värden är \"on\", \"off\" och \"auto\"." + +#: access/gist/gistbuildbuffers.c:778 utils/sort/logtape.c:255 +#, c-format +msgid "could not write block %ld of temporary file: %m" +msgstr "kunde inte skriva block %ld i temporär fil: %m" + +#: access/gist/gistsplit.c:446 +#, c-format +msgid "picksplit method for column %d of index \"%s\" failed" +msgstr "picksplit-metod för kolumn %d i index \"%s\" misslyckades" + +#: access/gist/gistsplit.c:448 +#, c-format +msgid "The index is not optimal. To optimize it, contact a developer, or try to use the column as the second one in the CREATE INDEX command." +msgstr "Indexet är inte optimalt. För att optimera det, kontakta en utvecklare eller försök använda kolumnen som det andra värdet i CREATE INDEX-kommandot." + +#: access/gist/gistutil.c:756 access/hash/hashutil.c:238 +#: access/nbtree/nbtpage.c:675 +#, c-format +msgid "index \"%s\" contains unexpected zero page at block %u" +msgstr "index \"%s\" innehåller en oväntad nollställd sida vid block %u" + +#: access/gist/gistutil.c:767 access/hash/hashutil.c:249 +#: access/hash/hashutil.c:261 access/nbtree/nbtpage.c:686 +#, c-format +msgid "index \"%s\" contains corrupted page at block %u" +msgstr "index \"%s\" har en trasig sida vid block %u" + +#: access/gist/gistvalidate.c:196 +#, c-format +msgid "operator family \"%s\" of access method %s contains unsupported ORDER BY specification for operator %s" +msgstr "operatorfamiljen \"%s\" för accessmetod %s innehåller en ORDER BY som inte stöds för operator %s" + +#: access/gist/gistvalidate.c:207 +#, c-format +msgid "operator family \"%s\" of access method %s contains incorrect ORDER BY opfamily specification for operator %s" +msgstr "operatorfamiljen \"%s\" för accessmetod %s innehåller en inkorrekt ORDER BY \"opfamiily\"-specifikation för operator %s" + +#: access/hash/hashinsert.c:83 +#, c-format +msgid "index row size %zu exceeds hash maximum %zu" +msgstr "indexradstorlek %zu överstiger hash-maximum %zu" + +#: access/hash/hashinsert.c:85 access/spgist/spgdoinsert.c:1961 +#: access/spgist/spgutils.c:746 +#, c-format +msgid "Values larger than a buffer page cannot be indexed." +msgstr "Värden större än en buffert-sida kan inte indexeras." + +#: access/hash/hashovfl.c:87 +#, c-format +msgid "invalid overflow block number %u" +msgstr "ogiltigt overflow-blocknummer %u" + +#: access/hash/hashovfl.c:283 access/hash/hashpage.c:463 +#, c-format +msgid "out of overflow pages in hash index \"%s\"" +msgstr "slut på överspillsidor i hash-index \"%s\"" + +#: access/hash/hashsearch.c:315 +#, c-format +msgid "hash indexes do not support whole-index scans" +msgstr "hash-index stöder inte hela-index-scans" + +#: access/hash/hashutil.c:277 +#, c-format +msgid "index \"%s\" is not a hash index" +msgstr "index \"%s\" är inte ett hashträd" + +#: access/hash/hashutil.c:283 +#, c-format +msgid "index \"%s\" has wrong hash version" +msgstr "index \"%s\" har fel hash-version" + +#: access/hash/hashvalidate.c:191 +#, c-format +msgid "operator family \"%s\" of access method %s lacks support function for operator %s" +msgstr "operatorfamilj \"%s\" för accessmetod %s saknar suppportfunktion för operator %s" + +#: access/hash/hashvalidate.c:249 access/nbtree/nbtvalidate.c:266 +#, c-format +msgid "operator family \"%s\" of access method %s is missing cross-type operator(s)" +msgstr "operatorfamilj \"%s\" för accessmetod %s saknar mellan-typ-operator(er)" + +#: access/heap/heapam.c:1304 access/heap/heapam.c:1333 +#: access/heap/heapam.c:1366 catalog/aclchk.c:1828 +#, c-format +msgid "\"%s\" is an index" +msgstr "\"%s\" är ett index" + +#: access/heap/heapam.c:1309 access/heap/heapam.c:1338 +#: access/heap/heapam.c:1371 catalog/aclchk.c:1835 commands/tablecmds.c:10339 +#: commands/tablecmds.c:13551 +#, c-format +msgid "\"%s\" is a composite type" +msgstr "\"%s\" är en composite-typ" + +#: access/heap/heapam.c:2639 +#, c-format +msgid "cannot insert tuples in a parallel worker" +msgstr "kan inte lägga till tupler i en parellell arbetare" + +#: access/heap/heapam.c:3091 +#, c-format +msgid "cannot delete tuples during a parallel operation" +msgstr "kan inte radera tupler under en parallell operation" + +#: access/heap/heapam.c:3137 +#, c-format +msgid "attempted to delete invisible tuple" +msgstr "försökte ta bort en osynlig tuple" + +#: access/heap/heapam.c:3572 access/heap/heapam.c:6409 +#, c-format +msgid "cannot update tuples during a parallel operation" +msgstr "kan inte uppdatera tupler under en parallell operation" + +#: access/heap/heapam.c:3720 +#, c-format +msgid "attempted to update invisible tuple" +msgstr "försökte uppdatera en osynlig tuple" + +#: access/heap/heapam.c:5085 access/heap/heapam.c:5123 +#: access/heap/heapam.c:5375 executor/execMain.c:2657 +#, c-format +msgid "could not obtain lock on row in relation \"%s\"" +msgstr "kunde inte låsa rad i relationen \"%s\"" + +#: access/heap/hio.c:338 access/heap/rewriteheap.c:670 +#, c-format +msgid "row is too big: size %zu, maximum size %zu" +msgstr "raden är för stor: storlek %zu, maximal storlek %zu" + +#: access/heap/rewriteheap.c:930 +#, c-format +msgid "could not write to file \"%s\", wrote %d of %d: %m" +msgstr "kunde inte skriva till fil \"%s\", skrev %d av %d: %m." + +#: access/heap/rewriteheap.c:970 access/heap/rewriteheap.c:1185 +#: access/heap/rewriteheap.c:1284 access/transam/timeline.c:411 +#: access/transam/timeline.c:490 access/transam/xlog.c:3274 +#: access/transam/xlog.c:3440 replication/logical/snapbuild.c:1629 +#: replication/slot.c:1297 replication/slot.c:1384 storage/file/fd.c:639 +#: storage/file/fd.c:3515 storage/smgr/md.c:1043 storage/smgr/md.c:1276 +#: storage/smgr/md.c:1449 utils/misc/guc.c:7256 +#, c-format +msgid "could not fsync file \"%s\": %m" +msgstr "kunde inte fsync:a fil \"%s\": %m" + +#: access/heap/rewriteheap.c:1024 access/heap/rewriteheap.c:1143 +#: access/transam/timeline.c:314 access/transam/timeline.c:465 +#: access/transam/xlog.c:3227 access/transam/xlog.c:3378 +#: access/transam/xlog.c:10607 access/transam/xlog.c:10645 +#: access/transam/xlog.c:11048 postmaster/postmaster.c:4454 +#: replication/logical/origin.c:575 replication/slot.c:1249 +#: storage/file/copydir.c:167 storage/smgr/md.c:326 utils/time/snapmgr.c:1297 +#, c-format +msgid "could not create file \"%s\": %m" +msgstr "kan inte skapa fil \"%s\": %m" + +#: access/heap/rewriteheap.c:1153 +#, c-format +msgid "could not truncate file \"%s\" to %u: %m" +msgstr "kunde inte trunkera fil \"%s\" till %u: %m" + +#: access/heap/rewriteheap.c:1161 replication/walsender.c:487 +#: storage/smgr/md.c:1948 +#, c-format +msgid "could not seek to end of file \"%s\": %m" +msgstr "kunde inte söka (seek) till slutet av filen \"%s\": %m" + +#: access/heap/rewriteheap.c:1173 access/transam/timeline.c:369 +#: access/transam/timeline.c:404 access/transam/timeline.c:482 +#: access/transam/xlog.c:3263 access/transam/xlog.c:3431 +#: postmaster/postmaster.c:4464 postmaster/postmaster.c:4474 +#: replication/logical/origin.c:584 replication/logical/origin.c:623 +#: replication/logical/origin.c:639 replication/logical/snapbuild.c:1611 +#: replication/slot.c:1280 storage/file/copydir.c:208 +#: utils/init/miscinit.c:1341 utils/init/miscinit.c:1352 +#: utils/init/miscinit.c:1360 utils/misc/guc.c:7217 utils/misc/guc.c:7248 +#: utils/misc/guc.c:9110 utils/misc/guc.c:9124 utils/time/snapmgr.c:1302 +#: utils/time/snapmgr.c:1309 +#, c-format +msgid "could not write to file \"%s\": %m" +msgstr "kunde inte skriva till fil \"%s\": %m" + +#: access/heap/rewriteheap.c:1259 access/transam/xlogarchive.c:113 +#: access/transam/xlogarchive.c:469 postmaster/postmaster.c:1275 +#: postmaster/syslogger.c:1372 replication/logical/origin.c:563 +#: replication/logical/reorderbuffer.c:2607 +#: replication/logical/snapbuild.c:1560 replication/logical/snapbuild.c:1935 +#: replication/slot.c:1357 storage/file/fd.c:690 storage/file/fd.c:3118 +#: storage/file/fd.c:3180 storage/file/reinit.c:255 storage/ipc/dsm.c:315 +#: storage/smgr/md.c:425 storage/smgr/md.c:474 storage/smgr/md.c:1396 +#: utils/time/snapmgr.c:1640 +#, c-format +msgid "could not remove file \"%s\": %m" +msgstr "kunde inte ta bort fil \"%s\": %m" + +#: access/heap/rewriteheap.c:1273 access/transam/timeline.c:111 +#: access/transam/timeline.c:236 access/transam/timeline.c:333 +#: access/transam/xlog.c:3204 access/transam/xlog.c:3323 +#: access/transam/xlog.c:3364 access/transam/xlog.c:3641 +#: access/transam/xlog.c:3719 access/transam/xlogutils.c:708 +#: postmaster/syslogger.c:1381 replication/basebackup.c:507 +#: replication/basebackup.c:1381 replication/logical/origin.c:694 +#: replication/logical/reorderbuffer.c:2134 +#: replication/logical/reorderbuffer.c:2378 +#: replication/logical/reorderbuffer.c:3081 +#: replication/logical/snapbuild.c:1603 replication/logical/snapbuild.c:1691 +#: replication/slot.c:1372 replication/walsender.c:480 +#: replication/walsender.c:2401 storage/file/copydir.c:161 +#: storage/file/fd.c:622 storage/file/fd.c:3410 storage/file/fd.c:3494 +#: storage/smgr/md.c:607 utils/error/elog.c:1879 utils/init/miscinit.c:1265 +#: utils/init/miscinit.c:1400 utils/init/miscinit.c:1477 utils/misc/guc.c:7476 +#: utils/misc/guc.c:7508 +#, c-format +msgid "could not open file \"%s\": %m" +msgstr "kunde inte öppna fil \"%s\": %m" + +#: access/index/amapi.c:83 commands/amcmds.c:163 +#, c-format +msgid "access method \"%s\" is not of type %s" +msgstr "accessmetod \"%s\" har inte typ %s" + +#: access/index/amapi.c:99 +#, c-format +msgid "index access method \"%s\" does not have a handler" +msgstr "indexaccessmetod \"%s\" har ingen hanterare" + +#: access/index/indexam.c:160 catalog/objectaddress.c:1223 +#: commands/indexcmds.c:2236 commands/tablecmds.c:249 commands/tablecmds.c:273 +#: commands/tablecmds.c:13542 commands/tablecmds.c:14793 +#, c-format +msgid "\"%s\" is not an index" +msgstr "\"%s\" är inte ett index" + +#: access/nbtree/nbtinsert.c:530 +#, c-format +msgid "duplicate key value violates unique constraint \"%s\"" +msgstr "duplicerat nyckelvärde bryter mot unik-villkor \"%s\"" + +#: access/nbtree/nbtinsert.c:532 +#, c-format +msgid "Key %s already exists." +msgstr "Nyckeln %s existerar redan." + +#: access/nbtree/nbtinsert.c:599 +#, c-format +msgid "failed to re-find tuple within index \"%s\"" +msgstr "misslyckades att återfinna tuple i index \"%s\"" + +#: access/nbtree/nbtinsert.c:601 +#, c-format +msgid "This may be because of a non-immutable index expression." +msgstr "Det kan bero på ett icke-immutable indexuttryck." + +#: access/nbtree/nbtinsert.c:681 access/nbtree/nbtsort.c:842 +#, c-format +msgid "" +"Values larger than 1/3 of a buffer page cannot be indexed.\n" +"Consider a function index of an MD5 hash of the value, or use full text indexing." +msgstr "" +"Värden större än 1/3 av en buffer-sida kan inte indexeras.\n" +"Kanske kan du använda ett funktionsindex av ett MD5-hashvärde istället\n" +"eller möjligen full-text-indexering." + +#: access/nbtree/nbtpage.c:318 access/nbtree/nbtpage.c:529 +#: access/nbtree/nbtpage.c:618 parser/parse_utilcmd.c:2055 +#, c-format +msgid "index \"%s\" is not a btree" +msgstr "index \"%s\" är inte ett btree" + +#: access/nbtree/nbtpage.c:325 access/nbtree/nbtpage.c:536 +#: access/nbtree/nbtpage.c:625 +#, c-format +msgid "version mismatch in index \"%s\": file version %d, current version %d, minimal supported version %d" +msgstr "versionsfel i index \"%s\": filversion %d, aktuell version %d, minsta supportade version %d" + +#: access/nbtree/nbtpage.c:1312 +#, c-format +msgid "index \"%s\" contains a half-dead internal page" +msgstr "index \"%s\" innehåller en halvdöd intern sida" + +#: access/nbtree/nbtpage.c:1314 +#, c-format +msgid "This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it." +msgstr "Detta kan ha orsakats av en avbruten VACUUM i version 9.3 eller äldre, innan uppdatering. Vänligen REINDEX:era det." + +#: access/nbtree/nbtvalidate.c:236 +#, c-format +msgid "operator family \"%s\" of access method %s is missing support function for types %s and %s" +msgstr "operatorfamilj \"%s\" för accessmetod %s saknar supportfunktioner för typerna %s och %s" + +#: access/spgist/spgutils.c:136 +#, c-format +msgid "compress method must not defined when leaf type is different from input type" +msgstr "komprimeringsmetod får inte definieras när lövtypen skiljer sig från indatatypen" + +#: access/spgist/spgutils.c:743 +#, c-format +msgid "SP-GiST inner tuple size %zu exceeds maximum %zu" +msgstr "SP-GiST inre tuplestorlek %zu överstiger maximala %zu" + +#: access/spgist/spgvalidate.c:269 +#, c-format +msgid "operator family \"%s\" of access method %s is missing support function %d for type %s" +msgstr "operatorfamilj \"%s\" för accessmetod %s saknar supportfunktion %d för typ %s" + +#: access/tablesample/bernoulli.c:152 access/tablesample/system.c:156 +#, c-format +msgid "sample percentage must be between 0 and 100" +msgstr "urvalsprocent måste vara mellan 0 och 100" + +#: access/transam/commit_ts.c:295 +#, c-format +msgid "cannot retrieve commit timestamp for transaction %u" +msgstr "kan inte hämta commit-tidsstämpel för transaktion %u" + +#: access/transam/commit_ts.c:393 +#, c-format +msgid "could not get commit timestamp data" +msgstr "kunde inte hämta commit-tidsstämpeldata" + +#: access/transam/commit_ts.c:395 +#, c-format +msgid "Make sure the configuration parameter \"%s\" is set on the master server." +msgstr "Se till att konfigurationsparametern \"%s\" är satt på master-servern." + +#: access/transam/commit_ts.c:397 +#, c-format +msgid "Make sure the configuration parameter \"%s\" is set." +msgstr "Se till att konfigurationsparametern \"%s\" är satt." + +#: access/transam/multixact.c:1000 +#, c-format +msgid "database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database \"%s\"" +msgstr "databasen tar inte emot kommandon som genererar nya MultiXactId:er för att förhinda dataförlust vid \"wraparound\" i databasen \"%s\"" + +#: access/transam/multixact.c:1002 access/transam/multixact.c:1009 +#: access/transam/multixact.c:1033 access/transam/multixact.c:1042 +#, c-format +msgid "" +"Execute a database-wide VACUUM in that database.\n" +"You might also need to commit or roll back old prepared transactions, or drop stale replication slots." +msgstr "" +"Utför en hela databasen-VACUUM i den databasen.\n" +"Du kan också behöva commit:a eller rulla tillbaka gamla förberedda transaktioner eller slänga gamla replikeringsslottar." + +#: access/transam/multixact.c:1007 +#, c-format +msgid "database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database with OID %u" +msgstr "databasen tar inte emot kommandon som genererar nya MultiXactId:er för att förhinda dataförlust vid \"wraparound\" i databasen med OID %u" + +#: access/transam/multixact.c:1028 access/transam/multixact.c:2318 +#, c-format +msgid "database \"%s\" must be vacuumed before %u more MultiXactId is used" +msgid_plural "database \"%s\" must be vacuumed before %u more MultiXactIds are used" +msgstr[0] "databasen \"%s\" måste städas innan ytterligare %u MultiXactId används" +msgstr[1] "databasen \"%s\" måste städas innan ytterligare %u MultiXactId:er används" + +#: access/transam/multixact.c:1037 access/transam/multixact.c:2327 +#, c-format +msgid "database with OID %u must be vacuumed before %u more MultiXactId is used" +msgid_plural "database with OID %u must be vacuumed before %u more MultiXactIds are used" +msgstr[0] "databas med OID %u måste städas (vacuum) innan %u till MultiXactId används" +msgstr[1] "databas med OID %u måste städas (vacuum) innan %u till MultiXactId:er används" + +#: access/transam/multixact.c:1098 +#, c-format +msgid "multixact \"members\" limit exceeded" +msgstr "multixact \"members\"-gräns överskriden" + +#: access/transam/multixact.c:1099 +#, c-format +msgid "This command would create a multixact with %u members, but the remaining space is only enough for %u member." +msgid_plural "This command would create a multixact with %u members, but the remaining space is only enough for %u members." +msgstr[0] "Detta kommando skapar en multixact med %u medlemmar, men återstående utrymmer räcker bara till %u medlem." +msgstr[1] "Detta kommando skapar en multixact med %u medlemmar, men återstående utrymmer räcker bara till %u medlemmar." + +#: access/transam/multixact.c:1104 +#, c-format +msgid "Execute a database-wide VACUUM in database with OID %u with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings." +msgstr "Kör en hela-databas-VACUUM i databas med OID %u med reducerade iställningar vacuum_multixact_freeze_min_age och vacuum_multixact_freeze_table_age." + +#: access/transam/multixact.c:1135 +#, c-format +msgid "database with OID %u must be vacuumed before %d more multixact member is used" +msgid_plural "database with OID %u must be vacuumed before %d more multixact members are used" +msgstr[0] "databas med OID %u måste städas innan %d mer multixact-medlem används" +msgstr[1] "databas med OID %u måste städas innan %d fler multixact-medlemmar används" + +#: access/transam/multixact.c:1140 +#, c-format +msgid "Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings." +msgstr "Kör en hela-databas-VACUUM i den databasen med reducerade inställningar för vacuum_multixact_freeze_min_age och vacuum_multixact_freeze_table_age." + +#: access/transam/multixact.c:1277 +#, c-format +msgid "MultiXactId %u does no longer exist -- apparent wraparound" +msgstr "MultiXactId %u finns inte längre -- troligen en wraparound" + +#: access/transam/multixact.c:1285 +#, c-format +msgid "MultiXactId %u has not been created yet -- apparent wraparound" +msgstr "MultiXactId %u har inte skapats än -- troligen en wraparound" + +#: access/transam/multixact.c:2268 +#, c-format +msgid "MultiXactId wrap limit is %u, limited by database with OID %u" +msgstr "MultiXactId wrap-gräns är %u, begränsad av databasen med OID %u" + +#: access/transam/multixact.c:2323 access/transam/multixact.c:2332 +#: access/transam/varsup.c:146 access/transam/varsup.c:153 +#: access/transam/varsup.c:405 access/transam/varsup.c:412 +#, c-format +msgid "" +"To avoid a database shutdown, execute a database-wide VACUUM in that database.\n" +"You might also need to commit or roll back old prepared transactions, or drop stale replication slots." +msgstr "" +"För att undvika att databasen stängs ner, utför en hela databas-VACCUM i den databasen.\n" +"Du kan också behöva commit:a eller rulla tillbaka gamla förberedda transaktioner eller slänga gamla replikeringsslottar." + +#: access/transam/multixact.c:2602 +#, c-format +msgid "oldest MultiXactId member is at offset %u" +msgstr "äldsta MultiXactId-medlemmen är vid offset %u" + +#: access/transam/multixact.c:2606 +#, c-format +msgid "MultiXact member wraparound protections are disabled because oldest checkpointed MultiXact %u does not exist on disk" +msgstr "MultiXact-medlems wraparound-skydd är avslagen eftersom äldsta checkpoint:ade MultiXact %u inte finns på disk" + +#: access/transam/multixact.c:2628 +#, c-format +msgid "MultiXact member wraparound protections are now enabled" +msgstr "MultiXact-medlems wraparound-skydd är nu påslagen" + +#: access/transam/multixact.c:2631 +#, c-format +msgid "MultiXact member stop limit is now %u based on MultiXact %u" +msgstr "MultiXact-medlems stoppgräns är nu %u baserad på MultiXact %u" + +#: access/transam/multixact.c:3011 +#, c-format +msgid "oldest MultiXact %u not found, earliest MultiXact %u, skipping truncation" +msgstr "äldsta MultiXact %u hittas inte, tidigast MultiXact %u, skippar trunkering" + +#: access/transam/multixact.c:3029 +#, c-format +msgid "cannot truncate up to MultiXact %u because it does not exist on disk, skipping truncation" +msgstr "kan inte trunkera upp till %u eftersom den inte finns på disk, skippar trunkering" + +#: access/transam/multixact.c:3355 +#, c-format +msgid "invalid MultiXactId: %u" +msgstr "ogiltig MultiXactId: %u" + +#: access/transam/parallel.c:660 access/transam/parallel.c:783 +#, c-format +msgid "parallel worker failed to initialize" +msgstr "parallell arbetare misslyckades med initiering" + +#: access/transam/parallel.c:661 access/transam/parallel.c:784 +#, c-format +msgid "More details may be available in the server log." +msgstr "Fler detaljer kan finnas i serverloggen." + +#: access/transam/parallel.c:845 +#, c-format +msgid "postmaster exited during a parallel transaction" +msgstr "postmaster avslutade under en parallell transaktion" + +#: access/transam/parallel.c:1032 +#, c-format +msgid "lost connection to parallel worker" +msgstr "tappad kopplingen till parallell arbetare" + +#: access/transam/parallel.c:1098 access/transam/parallel.c:1100 +msgid "parallel worker" +msgstr "parallell arbetare" + +#: access/transam/parallel.c:1245 +#, c-format +msgid "could not map dynamic shared memory segment" +msgstr "kunde inte skapa dynamiskt delat minnessegment: %m" + +#: access/transam/parallel.c:1250 +#, c-format +msgid "invalid magic number in dynamic shared memory segment" +msgstr "ogiltigt magiskt nummer i dynamiskt delat minnessegment" + +#: access/transam/slru.c:668 +#, c-format +msgid "file \"%s\" doesn't exist, reading as zeroes" +msgstr "filen \"%s\" existerar inte, läses som nollor" + +#: access/transam/slru.c:906 access/transam/slru.c:912 +#: access/transam/slru.c:919 access/transam/slru.c:926 +#: access/transam/slru.c:933 access/transam/slru.c:940 +#, c-format +msgid "could not access status of transaction %u" +msgstr "kunde inte läsa status på transaktion %u" + +#: access/transam/slru.c:907 +#, c-format +msgid "Could not open file \"%s\": %m." +msgstr "Kunde inte öppna fil \"%s\": %m." + +#: access/transam/slru.c:913 +#, c-format +msgid "Could not seek in file \"%s\" to offset %u: %m." +msgstr "Kunde inte söka i fil \"%s\" till offset %u: %m." + +#: access/transam/slru.c:920 +#, c-format +msgid "Could not read from file \"%s\" at offset %u: %m." +msgstr "Kunde inte läsa från fil \"%s\" på offset %u: %m." + +#: access/transam/slru.c:927 +#, c-format +msgid "Could not write to file \"%s\" at offset %u: %m." +msgstr "Kunde inte skriva till fil \"%s\" på offset %u: %m." + +#: access/transam/slru.c:934 +#, c-format +msgid "Could not fsync file \"%s\": %m." +msgstr "Kunde inte fsync:a fil \"%s\": %m." + +#: access/transam/slru.c:941 +#, c-format +msgid "Could not close file \"%s\": %m." +msgstr "Kunde inte stänga fil \"%s\": %m." + +#: access/transam/slru.c:1198 +#, c-format +msgid "could not truncate directory \"%s\": apparent wraparound" +msgstr "Kunde inte trunkera katalog \"%s\": trolig wraparound" + +#: access/transam/slru.c:1253 access/transam/slru.c:1309 +#, c-format +msgid "removing file \"%s\"" +msgstr "tar bort fil \"%s\"" + +#: access/transam/timeline.c:148 access/transam/timeline.c:153 +#, c-format +msgid "syntax error in history file: %s" +msgstr "syntaxfel i history-fil: %s" + +#: access/transam/timeline.c:149 +#, c-format +msgid "Expected a numeric timeline ID." +msgstr "Förväntade ett numeriskt tidslinje-ID." + +#: access/transam/timeline.c:154 +#, c-format +msgid "Expected a write-ahead log switchpoint location." +msgstr "Förväntade en write-ahead-logg:s switchpoint-position." + +#: access/transam/timeline.c:158 +#, c-format +msgid "invalid data in history file: %s" +msgstr "felaktig data i history-fil: %s" + +#: access/transam/timeline.c:159 +#, c-format +msgid "Timeline IDs must be in increasing sequence." +msgstr "Tidslinje-ID måste komma i en stigande sekvens." + +#: access/transam/timeline.c:179 +#, c-format +msgid "invalid data in history file \"%s\"" +msgstr "felaktig data i history-fil \"%s\"" + +#: access/transam/timeline.c:180 +#, c-format +msgid "Timeline IDs must be less than child timeline's ID." +msgstr "Tidslinje-ID:er måste vara mindre än barnens tidslinje-ID:er." + +#: access/transam/timeline.c:417 access/transam/timeline.c:496 +#: access/transam/xlog.c:3281 access/transam/xlog.c:3446 +#: access/transam/xlogfuncs.c:683 commands/copy.c:1742 +#: storage/file/copydir.c:219 +#, c-format +msgid "could not close file \"%s\": %m" +msgstr "kunde inte stänga fil \"%s\": %m" + +#: access/transam/timeline.c:578 +#, c-format +msgid "requested timeline %u is not in this server's history" +msgstr "efterfrågad tidslinje %u finns inte i denna servers historik" + +#: access/transam/twophase.c:381 +#, c-format +msgid "transaction identifier \"%s\" is too long" +msgstr "transaktionsidentifierare \"%s\" är för lång" + +#: access/transam/twophase.c:388 +#, c-format +msgid "prepared transactions are disabled" +msgstr "förberedda transaktioner är avslagna" + +#: access/transam/twophase.c:389 +#, c-format +msgid "Set max_prepared_transactions to a nonzero value." +msgstr "Sätt max_prepared_transactions till ett ickenollvärde." + +#: access/transam/twophase.c:408 +#, c-format +msgid "transaction identifier \"%s\" is already in use" +msgstr "transaktionsidentifierare \"%s\" används redan" + +#: access/transam/twophase.c:417 access/transam/twophase.c:2415 +#, c-format +msgid "maximum number of prepared transactions reached" +msgstr "maximalt antal förberedda transaktioner har uppnåtts" + +#: access/transam/twophase.c:418 access/transam/twophase.c:2416 +#, c-format +msgid "Increase max_prepared_transactions (currently %d)." +msgstr "Öka max_prepared_transactions (nu %d)." + +#: access/transam/twophase.c:585 +#, c-format +msgid "prepared transaction with identifier \"%s\" is busy" +msgstr "förberedd transaktion med identifierare \"%s\" är upptagen" + +#: access/transam/twophase.c:591 +#, c-format +msgid "permission denied to finish prepared transaction" +msgstr "rättighet saknas för att slutföra förberedd transaktion" + +#: access/transam/twophase.c:592 +#, c-format +msgid "Must be superuser or the user that prepared the transaction." +msgstr "Måste vara superanvändare eller den användare som förberedde transaktionen" + +#: access/transam/twophase.c:603 +#, c-format +msgid "prepared transaction belongs to another database" +msgstr "förberedda transaktionen tillhör en annan databas" + +#: access/transam/twophase.c:604 +#, c-format +msgid "Connect to the database where the transaction was prepared to finish it." +msgstr "Anslut till databasen där transaktionen var förberedd för att slutföra den." + +#: access/transam/twophase.c:619 +#, c-format +msgid "prepared transaction with identifier \"%s\" does not exist" +msgstr "förberedd transaktion med identifierare \"%s\" finns inte" + +#: access/transam/twophase.c:1102 +#, c-format +msgid "two-phase state file maximum length exceeded" +msgstr "tvåfas-statusfilens maximala längd överskriden" + +#: access/transam/twophase.c:1231 +#, c-format +msgid "could not open two-phase state file \"%s\": %m" +msgstr "kunde inte öppna tvåfas-statusfil \"%s\": %m" + +#: access/transam/twophase.c:1248 +#, c-format +msgid "could not stat two-phase state file \"%s\": %m" +msgstr "kunde inte göra stat() på tvåfas-statusfil \"%s\": %m" + +#: access/transam/twophase.c:1282 +#, c-format +msgid "could not read two-phase state file \"%s\": %m" +msgstr "kunde inte läsa tvåfas-statusfil \"%s\": %m" + +#: access/transam/twophase.c:1373 access/transam/xlog.c:6444 +#, c-format +msgid "Failed while allocating a WAL reading processor." +msgstr "Millslyckades vid allokering av en WAL-läs-processor." + +#: access/transam/twophase.c:1379 +#, c-format +msgid "could not read two-phase state from WAL at %X/%X" +msgstr "kunde inte läsa tvåfas-status från WAL vid %X/%X" + +#: access/transam/twophase.c:1387 +#, c-format +msgid "expected two-phase state data is not present in WAL at %X/%X" +msgstr "förväntad tvåfas-statusdata finns inte i WAL vid %X/%X" + +#: access/transam/twophase.c:1630 +#, c-format +msgid "could not remove two-phase state file \"%s\": %m" +msgstr "kunde inte ta bort tvåfas-statusfil \"%s\": %m" + +#: access/transam/twophase.c:1659 +#, c-format +msgid "could not recreate two-phase state file \"%s\": %m" +msgstr "kunde inte återskapa tvåfas-statusfil \"%s\": %m" + +#: access/transam/twophase.c:1670 access/transam/twophase.c:1678 +#, c-format +msgid "could not write two-phase state file: %m" +msgstr "kunde inte skriva tvåfas-statusfil: %m" + +#: access/transam/twophase.c:1692 +#, c-format +msgid "could not fsync two-phase state file: %m" +msgstr "kunde inte fsync:a tvåfas-statusfil %m" + +#: access/transam/twophase.c:1699 +#, c-format +msgid "could not close two-phase state file: %m" +msgstr "kunde inte stänga tvåfas-statusfil: %m" + +#: access/transam/twophase.c:1787 +#, c-format +msgid "%u two-phase state file was written for a long-running prepared transaction" +msgid_plural "%u two-phase state files were written for long-running prepared transactions" +msgstr[0] "%u tvåfas-statusfil skrevs för långkörande förberedd transkation" +msgstr[1] "%u tvåfas-statusfiler skrevs för långkörande förberedda transaktioner" + +#: access/transam/twophase.c:2016 +#, c-format +msgid "recovering prepared transaction %u from shared memory" +msgstr "återskapar förberedd transaktion %u från delat minne" + +#: access/transam/twophase.c:2106 +#, c-format +msgid "removing stale two-phase state file for transaction %u" +msgstr "tar bort död tvåfas-statusfil för transaktioon %u" + +#: access/transam/twophase.c:2113 +#, c-format +msgid "removing stale two-phase state from memory for transaction %u" +msgstr "tar bort död tvåfas-statusfil från minne för transaktion %u" + +#: access/transam/twophase.c:2126 +#, c-format +msgid "removing future two-phase state file for transaction %u" +msgstr "tar bort framtida tvåfas-statusfil för transaktion %u" + +#: access/transam/twophase.c:2133 +#, c-format +msgid "removing future two-phase state from memory for transaction %u" +msgstr "tar bort framtida tvåfas-statusfil från minne för transaktion %u" + +#: access/transam/twophase.c:2147 access/transam/twophase.c:2166 +#, c-format +msgid "removing corrupt two-phase state file for transaction %u" +msgstr "tar bort korrupt tvåfas-statusfil för transaktion %u" + +#: access/transam/twophase.c:2173 +#, c-format +msgid "removing corrupt two-phase state from memory for transaction %u" +msgstr "tar bort korrupt tvåfas-statusfil från minne för transaktion %u" + +#: access/transam/varsup.c:124 +#, c-format +msgid "database is not accepting commands to avoid wraparound data loss in database \"%s\"" +msgstr "databasen tar inte emot kommandon för att förhinda dataförlust vid \"wraparound\" i databasen \"%s\"" + +#: access/transam/varsup.c:126 access/transam/varsup.c:133 +#, c-format +msgid "" +"Stop the postmaster and vacuum that database in single-user mode.\n" +"You might also need to commit or roll back old prepared transactions, or drop stale replication slots." +msgstr "" +"Stoppa postmaster och städa (vacuum) den databasen i enanvändarläge.\n" +"Du kan också behöva commit:a eller rulla tillbaka förberedda transaktioner eller slänga gamla replikeringsslottar." + +#: access/transam/varsup.c:131 +#, c-format +msgid "database is not accepting commands to avoid wraparound data loss in database with OID %u" +msgstr "databasen tar inte emot kommandon för att förhinda dataförlust vid wraparound i databas med OID %u" + +#: access/transam/varsup.c:143 access/transam/varsup.c:402 +#, c-format +msgid "database \"%s\" must be vacuumed within %u transactions" +msgstr "databas \"%s\" måste städas (vacuum) inom %u transaktioner" + +#: access/transam/varsup.c:150 access/transam/varsup.c:409 +#, c-format +msgid "database with OID %u must be vacuumed within %u transactions" +msgstr "databas med OID %u måste städas (vacuum) inom %u transaktioner" + +#: access/transam/varsup.c:367 +#, c-format +msgid "transaction ID wrap limit is %u, limited by database with OID %u" +msgstr "transaktions-ID wrap-gräns är %u, begränsad av databas med OID %u" + +#: access/transam/xact.c:938 +#, c-format +msgid "cannot have more than 2^32-2 commands in a transaction" +msgstr "kan inte ha mer än 2^32-2 kommandon i en transaktion" + +#: access/transam/xact.c:1463 +#, c-format +msgid "maximum number of committed subtransactions (%d) exceeded" +msgstr "maximalt antal commit:ade undertransaktioner (%d) överskridet" + +#: access/transam/xact.c:2258 +#, c-format +msgid "cannot PREPARE a transaction that has operated on temporary tables" +msgstr "kan inte göra PREPARE på en transaktion som har arbetat med temporära tabeller" + +#: access/transam/xact.c:2268 +#, c-format +msgid "cannot PREPARE a transaction that has exported snapshots" +msgstr "kan inte göra PREPARE på en transaktion som har exporterade snapshots" + +#: access/transam/xact.c:2277 +#, c-format +msgid "cannot PREPARE a transaction that has manipulated logical replication workers" +msgstr "kan inte göra PREPARE på en transaktion som har förändrat logiska replikeringsarbetare" + +#. translator: %s represents an SQL statement name +#: access/transam/xact.c:3162 +#, c-format +msgid "%s cannot run inside a transaction block" +msgstr "%s kan inte köras i ett transaktionsblock" + +#. translator: %s represents an SQL statement name +#: access/transam/xact.c:3172 +#, c-format +msgid "%s cannot run inside a subtransaction" +msgstr "%s kan inte köras i ett undertransaktionsblock" + +#. translator: %s represents an SQL statement name +#: access/transam/xact.c:3182 +#, c-format +msgid "%s cannot be executed from a function" +msgstr "%s kan inte köras från en funktion" + +#. translator: %s represents an SQL statement name +#: access/transam/xact.c:3251 access/transam/xact.c:3875 +#: access/transam/xact.c:3944 access/transam/xact.c:4055 +#, c-format +msgid "%s can only be used in transaction blocks" +msgstr "%s kan bara användas i transaktionsblock" + +#: access/transam/xact.c:3444 +#, c-format +msgid "there is already a transaction in progress" +msgstr "det är redan en transaktion igång" + +#: access/transam/xact.c:3555 access/transam/xact.c:3625 +#: access/transam/xact.c:3734 +#, c-format +msgid "there is no transaction in progress" +msgstr "ingen transaktion pågår" + +#: access/transam/xact.c:3636 +#, c-format +msgid "cannot commit during a parallel operation" +msgstr "kan inte commit:a under en parallell operation" + +#: access/transam/xact.c:3745 +#, c-format +msgid "cannot abort during a parallel operation" +msgstr "can inte avbryta under en parallell operation" + +#: access/transam/xact.c:3839 +#, c-format +msgid "cannot define savepoints during a parallel operation" +msgstr "kan inte definiera sparpunkter under en parallell operation" + +#: access/transam/xact.c:3926 +#, c-format +msgid "cannot release savepoints during a parallel operation" +msgstr "kan inte frigöra en sparpunkt under en parallell operation" + +#: access/transam/xact.c:3936 access/transam/xact.c:3987 +#: access/transam/xact.c:4047 access/transam/xact.c:4096 +#, c-format +msgid "savepoint \"%s\" does not exist" +msgstr "sparpunkt \"%s\" existerar inte" + +#: access/transam/xact.c:3993 access/transam/xact.c:4102 +#, c-format +msgid "savepoint \"%s\" does not exist within current savepoint level" +msgstr "sparpunkt \"%s\" finns inte inom aktuell sparpunktsnivå" + +#: access/transam/xact.c:4035 +#, c-format +msgid "cannot rollback to savepoints during a parallel operation" +msgstr "kan inte rulla tillbaka till sparpunkt under en parallell operation" + +#: access/transam/xact.c:4163 +#, c-format +msgid "cannot start subtransactions during a parallel operation" +msgstr "kan inte starta subtransaktioner under en parallell operation" + +#: access/transam/xact.c:4231 +#, c-format +msgid "cannot commit subtransactions during a parallel operation" +msgstr "kan inte commit:a subtransaktioner undert en parallell operation" + +#: access/transam/xact.c:4867 +#, c-format +msgid "cannot have more than 2^32-1 subtransactions in a transaction" +msgstr "kan inte ha mer än 2^32-1 subtransaktioner i en transaktion" + +#: access/transam/xlog.c:2479 +#, c-format +msgid "could not seek in log file %s to offset %u: %m" +msgstr "kunde inte söka i loggfil %s till offset %u: %m" + +#: access/transam/xlog.c:2501 +#, c-format +msgid "could not write to log file %s at offset %u, length %zu: %m" +msgstr "kunde inte skriva till loggfil %s vid offset %u, längd %zu: %m" + +#: access/transam/xlog.c:2767 +#, c-format +msgid "updated min recovery point to %X/%X on timeline %u" +msgstr "updaterade minsta återställningspunkt till %X/%X på tidslinje %u" + +#: access/transam/xlog.c:3411 +#, c-format +msgid "not enough data in file \"%s\"" +msgstr "otillräckligt med data i fil \"%s\"" + +#: access/transam/xlog.c:3556 +#, c-format +msgid "could not open write-ahead log file \"%s\": %m" +msgstr "kunde inte öppna write-ahead-logg-fil \"%s\": %m" + +#: access/transam/xlog.c:3745 access/transam/xlog.c:5634 +#, c-format +msgid "could not close log file %s: %m" +msgstr "kunde inte stänga loggfil %s: %m" + +#: access/transam/xlog.c:3811 access/transam/xlogutils.c:703 +#: replication/walsender.c:2396 +#, c-format +msgid "requested WAL segment %s has already been removed" +msgstr "efterfrågat WAL-segment %s har redan tagits bort" + +#: access/transam/xlog.c:4018 +#, c-format +msgid "recycled write-ahead log file \"%s\"" +msgstr "återanvände write-ahead-loggfil \"%s\"" + +#: access/transam/xlog.c:4030 +#, c-format +msgid "removing write-ahead log file \"%s\"" +msgstr "tar bort write-ahead-loggfil \"%s\"" + +#: access/transam/xlog.c:4050 +#, c-format +msgid "could not rename old write-ahead log file \"%s\": %m" +msgstr "kunde inte döpa om gammal write-ahead-loggfil \"%s\": %m" + +#: access/transam/xlog.c:4092 access/transam/xlog.c:4102 +#, c-format +msgid "required WAL directory \"%s\" does not exist" +msgstr "krävd WAL-katalog \"%s\" finns inte" + +#: access/transam/xlog.c:4108 +#, c-format +msgid "creating missing WAL directory \"%s\"" +msgstr "skapar saknad WAL-katalog \"%s\"" + +#: access/transam/xlog.c:4111 +#, c-format +msgid "could not create missing directory \"%s\": %m" +msgstr "kunde inte skapa saknad katalog \"%s\": %m" + +#: access/transam/xlog.c:4219 +#, c-format +msgid "unexpected timeline ID %u in log segment %s, offset %u" +msgstr "oväntad tidslinje-ID %u i loggsegment %s, offset %u" + +#: access/transam/xlog.c:4341 +#, c-format +msgid "new timeline %u is not a child of database system timeline %u" +msgstr "ny tidslinje %u är inte ett barn till databasens systemtidslinje %u" + +#: access/transam/xlog.c:4355 +#, c-format +msgid "new timeline %u forked off current database system timeline %u before current recovery point %X/%X" +msgstr "ny tidslinje %u skapad från aktuella databasens systemtidslinje %u innan nuvarande återställningspunkt %X/%X" + +#: access/transam/xlog.c:4374 +#, c-format +msgid "new target timeline is %u" +msgstr "ny måltidslinje är %u" + +#: access/transam/xlog.c:4454 +#, c-format +msgid "could not create control file \"%s\": %m" +msgstr "kunde inte skapa kontrollfil \"%s\": %m" + +#: access/transam/xlog.c:4466 access/transam/xlog.c:4720 +#, c-format +msgid "could not write to control file: %m" +msgstr "kunde inte skriva till kontrollfil: %m" + +#: access/transam/xlog.c:4474 access/transam/xlog.c:4728 +#, c-format +msgid "could not fsync control file: %m" +msgstr "kunde inte fsync:a kontrollfil: %m" + +#: access/transam/xlog.c:4480 access/transam/xlog.c:4734 +#, c-format +msgid "could not close control file: %m" +msgstr "kunde inte stänga kontrollfil: %m" + +#: access/transam/xlog.c:4499 access/transam/xlog.c:4708 +#, c-format +msgid "could not open control file \"%s\": %m" +msgstr "kunde inte öppna kontrollfil \"%s\": %m" + +#: access/transam/xlog.c:4509 +#, c-format +msgid "could not read from control file: %m" +msgstr "kunde inte läsa från kontrollfil: %m" + +#: access/transam/xlog.c:4512 +#, c-format +msgid "could not read from control file: read %d bytes, expected %d" +msgstr "kunde inte läsa från kontrollfil: läste %d byte, förväntade %d" + +#: access/transam/xlog.c:4527 access/transam/xlog.c:4536 +#: access/transam/xlog.c:4560 access/transam/xlog.c:4567 +#: access/transam/xlog.c:4574 access/transam/xlog.c:4579 +#: access/transam/xlog.c:4586 access/transam/xlog.c:4593 +#: access/transam/xlog.c:4600 access/transam/xlog.c:4607 +#: access/transam/xlog.c:4614 access/transam/xlog.c:4621 +#: access/transam/xlog.c:4630 access/transam/xlog.c:4637 +#: access/transam/xlog.c:4646 access/transam/xlog.c:4653 +#: utils/init/miscinit.c:1498 +#, c-format +msgid "database files are incompatible with server" +msgstr "databasfilerna är inkompatibla med servern" + +#: access/transam/xlog.c:4528 +#, c-format +msgid "The database cluster was initialized with PG_CONTROL_VERSION %d (0x%08x), but the server was compiled with PG_CONTROL_VERSION %d (0x%08x)." +msgstr "Databasklustret initierades med PG_CONTROL_VERSION %d (0x%08x), men servern kompilerades med PG_CONTROL_VERSION %d (0x%08x)." + +#: access/transam/xlog.c:4532 +#, c-format +msgid "This could be a problem of mismatched byte ordering. It looks like you need to initdb." +msgstr "Detta kan orsakas av en felaktig byte-ordning. Du behöver troligen köra initdb." + +#: access/transam/xlog.c:4537 +#, c-format +msgid "The database cluster was initialized with PG_CONTROL_VERSION %d, but the server was compiled with PG_CONTROL_VERSION %d." +msgstr "Databasklustret initierades med PG_CONTROL_VERSION %d, men servern kompilerades med PG_CONTROL_VERSION %d." + +#: access/transam/xlog.c:4540 access/transam/xlog.c:4564 +#: access/transam/xlog.c:4571 access/transam/xlog.c:4576 +#, c-format +msgid "It looks like you need to initdb." +msgstr "Du behöver troligen köra initdb." + +#: access/transam/xlog.c:4551 +#, c-format +msgid "incorrect checksum in control file" +msgstr "ogiltig kontrollsumma kontrollfil" + +#: access/transam/xlog.c:4561 +#, c-format +msgid "The database cluster was initialized with CATALOG_VERSION_NO %d, but the server was compiled with CATALOG_VERSION_NO %d." +msgstr "Databasklustret initierades med CATALOG_VERSION_NO %d, men servern kompilerades med CATALOG_VERSION_NO %d." + +#: access/transam/xlog.c:4568 +#, c-format +msgid "The database cluster was initialized with MAXALIGN %d, but the server was compiled with MAXALIGN %d." +msgstr "Databasklustret initierades med MAXALIGN %d, men servern kompilerades med MAXALIGN %d." + +#: access/transam/xlog.c:4575 +#, c-format +msgid "The database cluster appears to use a different floating-point number format than the server executable." +msgstr "Databasklustret verkar använda en annan flyttalsrepresentation än vad serverprogrammet gör." + +#: access/transam/xlog.c:4580 +#, c-format +msgid "The database cluster was initialized with BLCKSZ %d, but the server was compiled with BLCKSZ %d." +msgstr "Databasklustret initierades med BLCKSZ %d, men servern kompilerades med BLCKSZ %d." + +#: access/transam/xlog.c:4583 access/transam/xlog.c:4590 +#: access/transam/xlog.c:4597 access/transam/xlog.c:4604 +#: access/transam/xlog.c:4611 access/transam/xlog.c:4618 +#: access/transam/xlog.c:4625 access/transam/xlog.c:4633 +#: access/transam/xlog.c:4640 access/transam/xlog.c:4649 +#: access/transam/xlog.c:4656 +#, c-format +msgid "It looks like you need to recompile or initdb." +msgstr "Det verkar som om du måste kompilera om eller köra initdb." + +#: access/transam/xlog.c:4587 +#, c-format +msgid "The database cluster was initialized with RELSEG_SIZE %d, but the server was compiled with RELSEG_SIZE %d." +msgstr "Databasklustret initierades med RELSEG_SIZE %d, men servern kompilerades med RELSEG_SIZE %d." + +#: access/transam/xlog.c:4594 +#, c-format +msgid "The database cluster was initialized with XLOG_BLCKSZ %d, but the server was compiled with XLOG_BLCKSZ %d." +msgstr "Databasklustret initierades med XLOG_BLCKSZ %d, men servern kompilerades med XLOG_BLCKSZ %d." + +#: access/transam/xlog.c:4601 +#, c-format +msgid "The database cluster was initialized with NAMEDATALEN %d, but the server was compiled with NAMEDATALEN %d." +msgstr "Databasklustret initierades med NAMEDATALEN %d, men servern kompilerades med NAMEDATALEN %d." + +#: access/transam/xlog.c:4608 +#, c-format +msgid "The database cluster was initialized with INDEX_MAX_KEYS %d, but the server was compiled with INDEX_MAX_KEYS %d." +msgstr "Databasklustret initierades med INDEX_MAX_KEYS %d, men servern kompilerades med INDEX_MAX_KEYS %d." + +#: access/transam/xlog.c:4615 +#, c-format +msgid "The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d, but the server was compiled with TOAST_MAX_CHUNK_SIZE %d." +msgstr "Databasklustret initierades med TOAST_MAX_CHUNK_SIZE %d, men servern kompilerades med TOAST_MAX_CHUNK_SIZE %d." + +#: access/transam/xlog.c:4622 +#, c-format +msgid "The database cluster was initialized with LOBLKSIZE %d, but the server was compiled with LOBLKSIZE %d." +msgstr "Databasklustret initierades med LOBLKSIZE %d, men servern kompilerades med LOBLKSIZE %d." + +#: access/transam/xlog.c:4631 +#, c-format +msgid "The database cluster was initialized without USE_FLOAT4_BYVAL but the server was compiled with USE_FLOAT4_BYVAL." +msgstr "Databasklustret initierades utan USE_FLOAT4_BYVAL, men servern kompilerades med USE_FLOAT4_BYVAL." + +#: access/transam/xlog.c:4638 +#, c-format +msgid "The database cluster was initialized with USE_FLOAT4_BYVAL but the server was compiled without USE_FLOAT4_BYVAL." +msgstr "Databasklustret initierades med USE_FLOAT4_BYVAL, men servern kompilerades utan USE_FLOAT4_BYVAL." + +#: access/transam/xlog.c:4647 +#, c-format +msgid "The database cluster was initialized without USE_FLOAT8_BYVAL but the server was compiled with USE_FLOAT8_BYVAL." +msgstr "Databasklustret initierades utan USE_FLOAT8_BYVAL, men servern kompilerades med USE_FLOAT8_BYVAL." + +#: access/transam/xlog.c:4654 +#, c-format +msgid "The database cluster was initialized with USE_FLOAT8_BYVAL but the server was compiled without USE_FLOAT8_BYVAL." +msgstr "Databasklustret initierades med USE_FLOAT8_BYVAL, men servern kompilerades utan USE_FLOAT8_BYVAL." + +#: access/transam/xlog.c:4663 +#, c-format +msgid "WAL segment size must be a power of two between 1 MB and 1 GB, but the control file specifies %d byte" +msgid_plural "WAL segment size must be a power of two between 1 MB and 1 GB, but the control file specifies %d bytes" +msgstr[0] "WAL-segmentstorlek måste vara en tvåpotens mellan 1MB och 1GB men kontrollfilen anger %d byte" +msgstr[1] "WAL-segmentstorlek måste vara en tvåpotens mellan 1MB och 1GB men kontrollfilen anger %d byte" + +#: access/transam/xlog.c:4675 +#, c-format +msgid "\"min_wal_size\" must be at least twice \"wal_segment_size\"." +msgstr "\"min_wal_size\" måste vara minst dubbla \"wal_segment_size\"." + +#: access/transam/xlog.c:4679 +#, c-format +msgid "\"max_wal_size\" must be at least twice \"wal_segment_size\"." +msgstr "\"max_wal_size\" måste vara minst dubbla \"wal_segment_size\"." + +#: access/transam/xlog.c:5066 +#, c-format +msgid "could not generate secret authorization token" +msgstr "kunde inte generera hemligt auktorisationstoken" + +#: access/transam/xlog.c:5156 +#, c-format +msgid "could not write bootstrap write-ahead log file: %m" +msgstr "kunde inte skriva bootstrap-write-ahead-loggfil: %m" + +#: access/transam/xlog.c:5164 +#, c-format +msgid "could not fsync bootstrap write-ahead log file: %m" +msgstr "kunde inte fsync:a bootstrap-write-ahead-loggfil: %m" + +#: access/transam/xlog.c:5170 +#, c-format +msgid "could not close bootstrap write-ahead log file: %m" +msgstr "kunde inte stänga bootstrap-write-ahead-loggfil: %m" + +#: access/transam/xlog.c:5252 +#, c-format +msgid "could not open recovery command file \"%s\": %m" +msgstr "kunde inte öppna återställningskommandofil \"%s\": %m" + +#: access/transam/xlog.c:5298 access/transam/xlog.c:5412 +#, c-format +msgid "invalid value for recovery parameter \"%s\": \"%s\"" +msgstr "ogiltigt värde för återställningsparameter \"%s\": \"%s\"" + +#: access/transam/xlog.c:5301 +#, c-format +msgid "Valid values are \"pause\", \"promote\", and \"shutdown\"." +msgstr "Giltiga värden är \"pause\", \"promote\" och \"shutdown\"." + +#: access/transam/xlog.c:5321 +#, c-format +msgid "recovery_target_timeline is not a valid number: \"%s\"" +msgstr "recovery_target_timeline är inte ett giltigt nummer: \"%s\"" + +#: access/transam/xlog.c:5338 +#, c-format +msgid "recovery_target_xid is not a valid number: \"%s\"" +msgstr "recovery_target_xid är inte ett giltigt nummer: \"%s\"" + +#: access/transam/xlog.c:5358 +#, c-format +msgid "recovery_target_time is not a valid timestamp: \"%s\"" +msgstr "recovery_target_time är inte en giltigt tidstämpel: \"%s\"" + +#: access/transam/xlog.c:5381 +#, c-format +msgid "recovery_target_name is too long (maximum %d characters)" +msgstr "recovery_target_name är för lång (maximalt %d tecken)" + +#: access/transam/xlog.c:5415 +#, c-format +msgid "The only allowed value is \"immediate\"." +msgstr "Det enda tillåtna värdet är \"immediate\"." + +#: access/transam/xlog.c:5428 access/transam/xlog.c:5439 +#: commands/extension.c:547 commands/extension.c:555 utils/misc/guc.c:5983 +#, c-format +msgid "parameter \"%s\" requires a Boolean value" +msgstr "parameter \"%s\" kräver ett boolskt värde" + +#: access/transam/xlog.c:5474 +#, c-format +msgid "parameter \"%s\" requires a temporal value" +msgstr "parameter \"%s\" kräver ett temporärt värde" + +#: access/transam/xlog.c:5476 catalog/dependency.c:969 catalog/dependency.c:970 +#: catalog/dependency.c:976 catalog/dependency.c:977 catalog/dependency.c:988 +#: catalog/dependency.c:989 commands/tablecmds.c:1069 +#: commands/tablecmds.c:10803 commands/user.c:1064 commands/view.c:505 +#: libpq/auth.c:336 replication/syncrep.c:1158 storage/lmgr/deadlock.c:1139 +#: storage/lmgr/proc.c:1322 utils/adt/acl.c:5269 utils/misc/guc.c:6005 +#: utils/misc/guc.c:6098 utils/misc/guc.c:10086 utils/misc/guc.c:10120 +#: utils/misc/guc.c:10154 utils/misc/guc.c:10188 utils/misc/guc.c:10223 +#, c-format +msgid "%s" +msgstr "%s" + +#: access/transam/xlog.c:5483 +#, c-format +msgid "unrecognized recovery parameter \"%s\"" +msgstr "okänd återställningsparameter \"%s\"" + +#: access/transam/xlog.c:5494 +#, c-format +msgid "recovery command file \"%s\" specified neither primary_conninfo nor restore_command" +msgstr "återställningskommandofil \"%s\" angav inte vare sig primary_conninfo eller restore_command" + +#: access/transam/xlog.c:5496 +#, c-format +msgid "The database server will regularly poll the pg_wal subdirectory to check for files placed there." +msgstr "Databasservern kommer med jämna mellanrum att poll:a pg_wal-underkatalogen för att se om filer placerats där." + +#: access/transam/xlog.c:5503 +#, c-format +msgid "recovery command file \"%s\" must specify restore_command when standby mode is not enabled" +msgstr "återställningskommandofil \"%s\" måste ange restore_command när standby-läge inte är påslaget" + +#: access/transam/xlog.c:5524 +#, c-format +msgid "standby mode is not supported by single-user servers" +msgstr "standby-läge stöd inte av enanvändarservrar" + +#: access/transam/xlog.c:5543 +#, c-format +msgid "recovery target timeline %u does not exist" +msgstr "återställningsmåltidslinje %u finns inte" + +#: access/transam/xlog.c:5664 +#, c-format +msgid "archive recovery complete" +msgstr "arkivåterställning klar" + +#: access/transam/xlog.c:5723 access/transam/xlog.c:5989 +#, c-format +msgid "recovery stopping after reaching consistency" +msgstr "återställning stoppad efter att ha uppnått konsistens" + +#: access/transam/xlog.c:5744 +#, c-format +msgid "recovery stopping before WAL location (LSN) \"%X/%X\"" +msgstr "återställning stoppad före WAL-position (LSN) \"%X/%X\"" + +#: access/transam/xlog.c:5830 +#, c-format +msgid "recovery stopping before commit of transaction %u, time %s" +msgstr "återställning stoppad före commit av transaktion %u, tid %s" + +#: access/transam/xlog.c:5837 +#, c-format +msgid "recovery stopping before abort of transaction %u, time %s" +msgstr "återställning stoppad före abort av transaktion %u, tid %s" + +#: access/transam/xlog.c:5883 +#, c-format +msgid "recovery stopping at restore point \"%s\", time %s" +msgstr "återställning stoppad vid återställningspunkt \"%s\", tid %s" + +#: access/transam/xlog.c:5901 +#, c-format +msgid "recovery stopping after WAL location (LSN) \"%X/%X\"" +msgstr "återställning stoppad efter WAL-position (LSN) \"%X/%X\"" + +#: access/transam/xlog.c:5969 +#, c-format +msgid "recovery stopping after commit of transaction %u, time %s" +msgstr "återställning stoppad efter commit av transaktion %u, tid %s" + +#: access/transam/xlog.c:5977 +#, c-format +msgid "recovery stopping after abort of transaction %u, time %s" +msgstr "återställning stoppad efter abort av transaktion %u, tid %s" + +#: access/transam/xlog.c:6017 +#, c-format +msgid "recovery has paused" +msgstr "återställning har pausats" + +#: access/transam/xlog.c:6018 +#, c-format +msgid "Execute pg_wal_replay_resume() to continue." +msgstr "Kör pg_wal_replay_resume() för att fortsätta." + +#: access/transam/xlog.c:6226 +#, c-format +msgid "hot standby is not possible because %s = %d is a lower setting than on the master server (its value was %d)" +msgstr "hot standby är inte möjligt då %s = %d har ett lägre värde än på masterservern (dess värde var %d)" + +#: access/transam/xlog.c:6252 +#, c-format +msgid "WAL was generated with wal_level=minimal, data may be missing" +msgstr "WAL genererades med wal_level=minimal, data kan saknas" + +#: access/transam/xlog.c:6253 +#, c-format +msgid "This happens if you temporarily set wal_level=minimal without taking a new base backup." +msgstr "Detta händer om du temporärt sätter wal_level=minimal utan att ta en ny basbackup." + +#: access/transam/xlog.c:6264 +#, c-format +msgid "hot standby is not possible because wal_level was not set to \"replica\" or higher on the master server" +msgstr "hot standby är inte möjligt då wal_level inte satts till \"replica\" eller högre på masterservern" + +#: access/transam/xlog.c:6265 +#, c-format +msgid "Either set wal_level to \"replica\" on the master, or turn off hot_standby here." +msgstr "Antingen sätt wal_level till \"replica\" på mastern eller stäng av hot_standby här." + +#: access/transam/xlog.c:6317 +#, c-format +msgid "control file contains invalid data" +msgstr "kontrollfil innehåller ogiltig data" + +#: access/transam/xlog.c:6323 +#, c-format +msgid "database system was shut down at %s" +msgstr "databassystemet stängdes ner vid %s" + +#: access/transam/xlog.c:6328 +#, c-format +msgid "database system was shut down in recovery at %s" +msgstr "databassystemet stängdes ner under återställning vid %s" + +#: access/transam/xlog.c:6332 +#, c-format +msgid "database system shutdown was interrupted; last known up at %s" +msgstr "nedstängning av databasen avbröts; senast kända upptidpunkt vid %s" + +#: access/transam/xlog.c:6336 +#, c-format +msgid "database system was interrupted while in recovery at %s" +msgstr "databassystemet avbröts under återställning vid %s" + +#: access/transam/xlog.c:6338 +#, c-format +msgid "This probably means that some data is corrupted and you will have to use the last backup for recovery." +msgstr "Det betyder troligen att en del data är förstörd och du behöver återställa databasen från den senaste backup:en." + +#: access/transam/xlog.c:6342 +#, c-format +msgid "database system was interrupted while in recovery at log time %s" +msgstr "databassystemet avbröts under återställning vid loggtid %s" + +#: access/transam/xlog.c:6344 +#, c-format +msgid "If this has occurred more than once some data might be corrupted and you might need to choose an earlier recovery target." +msgstr "Om detta har hänt mer än en gång så kan data vara korrupt och du kanske måste återställa till ett tidigare återställningsmål." + +#: access/transam/xlog.c:6348 +#, c-format +msgid "database system was interrupted; last known up at %s" +msgstr "databassystemet avbröts; senast kända upptidpunkt vid %s" + +#: access/transam/xlog.c:6404 +#, c-format +msgid "entering standby mode" +msgstr "går in i standby-läge" + +#: access/transam/xlog.c:6407 +#, c-format +msgid "starting point-in-time recovery to XID %u" +msgstr "startar point-in-time-återställning till XID %u" + +#: access/transam/xlog.c:6411 +#, c-format +msgid "starting point-in-time recovery to %s" +msgstr "startar point-in-time-återställning till %s" + +#: access/transam/xlog.c:6415 +#, c-format +msgid "starting point-in-time recovery to \"%s\"" +msgstr "startar point-in-time-återställning till \"%s\"" + +#: access/transam/xlog.c:6419 +#, c-format +msgid "starting point-in-time recovery to WAL location (LSN) \"%X/%X\"" +msgstr "startart point-in-time-återställning till WAL-position (LSN) \"%X/%X\"" + +#: access/transam/xlog.c:6424 +#, c-format +msgid "starting point-in-time recovery to earliest consistent point" +msgstr "startar point-in-time-återställning till tidigast konsistenta punkt" + +#: access/transam/xlog.c:6427 +#, c-format +msgid "starting archive recovery" +msgstr "Startar arkivåterställning" + +#: access/transam/xlog.c:6478 access/transam/xlog.c:6603 +#, c-format +msgid "checkpoint record is at %X/%X" +msgstr "checkpoint-posten är vid %X/%X" + +#: access/transam/xlog.c:6492 +#, c-format +msgid "could not find redo location referenced by checkpoint record" +msgstr "kunde inte hitta redo-position refererad av checkpoint-post" + +#: access/transam/xlog.c:6493 access/transam/xlog.c:6500 +#, c-format +msgid "If you are not restoring from a backup, try removing the file \"%s/backup_label\"." +msgstr "Om du inte hålller på att återställa från en backup, försök med att ta bort filen \"%s/backup_label\"." + +#: access/transam/xlog.c:6499 +#, c-format +msgid "could not locate required checkpoint record" +msgstr "kunde inte hitta den checkpoint-post som krävs" + +#: access/transam/xlog.c:6525 commands/tablespace.c:641 +#, c-format +msgid "could not create symbolic link \"%s\": %m" +msgstr "kan inte skapa symbolisk länk \"%s\": %m" + +#: access/transam/xlog.c:6557 access/transam/xlog.c:6563 +#, c-format +msgid "ignoring file \"%s\" because no file \"%s\" exists" +msgstr "hoppar över fil \"%s\" då ingen fil \"%s\" finns" + +#: access/transam/xlog.c:6559 access/transam/xlog.c:11536 +#, c-format +msgid "File \"%s\" was renamed to \"%s\"." +msgstr "Filen \"%s\" döptes om till \"%s\"." + +#: access/transam/xlog.c:6565 +#, c-format +msgid "Could not rename file \"%s\" to \"%s\": %m." +msgstr "Kunde inte döpa om fil \"%s\" till \"%s\": %m" + +#: access/transam/xlog.c:6615 +#, c-format +msgid "could not locate a valid checkpoint record" +msgstr "kunde inte hitta en giltig checkpoint-post" + +#: access/transam/xlog.c:6653 +#, c-format +msgid "requested timeline %u is not a child of this server's history" +msgstr "efterfrågad tidslinje %u är inte ett barn till denna servers historik" + +#: access/transam/xlog.c:6655 +#, c-format +msgid "Latest checkpoint is at %X/%X on timeline %u, but in the history of the requested timeline, the server forked off from that timeline at %X/%X." +msgstr "Senaste checkpoint är vid %X/%X på tidslinje %u, men i historiken för efterfrågad tidslinje så avvek servern från den tidslinjen vid %X/%X." + +#: access/transam/xlog.c:6671 +#, c-format +msgid "requested timeline %u does not contain minimum recovery point %X/%X on timeline %u" +msgstr "efterfågan tidslinje %u innehåller inte minimal återställningspunkt %X/%X på tidslinje %u" + +#: access/transam/xlog.c:6702 +#, c-format +msgid "invalid next transaction ID" +msgstr "nästa transaktions-ID ogiltig" + +#: access/transam/xlog.c:6796 +#, c-format +msgid "invalid redo in checkpoint record" +msgstr "ogiltig redo i checkpoint-post" + +#: access/transam/xlog.c:6807 +#, c-format +msgid "invalid redo record in shutdown checkpoint" +msgstr "ogiltig redo-post i nedstängnings-checkpoint" + +#: access/transam/xlog.c:6835 +#, c-format +msgid "database system was not properly shut down; automatic recovery in progress" +msgstr "databassystemet stängdes inte ned korrekt; automatisk återställning pågår" + +#: access/transam/xlog.c:6839 +#, c-format +msgid "crash recovery starts in timeline %u and has target timeline %u" +msgstr "krashåterställning startar i tidslinje %u och har måltidslinje %u" + +#: access/transam/xlog.c:6882 +#, c-format +msgid "backup_label contains data inconsistent with control file" +msgstr "backup_label innehåller data som inte stämmer med kontrollfil" + +#: access/transam/xlog.c:6883 +#, c-format +msgid "This means that the backup is corrupted and you will have to use another backup for recovery." +msgstr "Det betyder att backup:en är trasig och du behöver använda en annan backup för att återställa." + +#: access/transam/xlog.c:6957 +#, c-format +msgid "initializing for hot standby" +msgstr "initierar för hot standby" + +#: access/transam/xlog.c:7089 +#, c-format +msgid "redo starts at %X/%X" +msgstr "redo startar vid %X/%X" + +#: access/transam/xlog.c:7323 +#, c-format +msgid "requested recovery stop point is before consistent recovery point" +msgstr "efterfrågad återställningsstopppunkt är före en konsistent återställningspunkt" + +#: access/transam/xlog.c:7361 +#, c-format +msgid "redo done at %X/%X" +msgstr "redo gjord vid %X/%X" + +#: access/transam/xlog.c:7366 +#, c-format +msgid "last completed transaction was at log time %s" +msgstr "senaste kompletta transaktionen var vid loggtid %s" + +#: access/transam/xlog.c:7375 +#, c-format +msgid "redo is not required" +msgstr "redo behövs inte" + +#: access/transam/xlog.c:7450 access/transam/xlog.c:7454 +#, c-format +msgid "WAL ends before end of online backup" +msgstr "WAL slutar före sluttiden av online-backup:en" + +#: access/transam/xlog.c:7451 +#, c-format +msgid "All WAL generated while online backup was taken must be available at recovery." +msgstr "Alla genererade WAL under tiden online-backup:en togs måste vara tillgängliga vid återställning." + +#: access/transam/xlog.c:7455 +#, c-format +msgid "Online backup started with pg_start_backup() must be ended with pg_stop_backup(), and all WAL up to that point must be available at recovery." +msgstr "Online-backup startad med pg_start_backup() måste avslutas med pg_stop_backup() och alla WAL fram till den punkten måste vara tillgängliga vid återställning." + +#: access/transam/xlog.c:7458 +#, c-format +msgid "WAL ends before consistent recovery point" +msgstr "WAL avslutas innan konstistent återställningspunkt" + +#: access/transam/xlog.c:7485 +#, c-format +msgid "selected new timeline ID: %u" +msgstr "valt nytt tidslinje-ID: %u" + +#: access/transam/xlog.c:7914 +#, c-format +msgid "consistent recovery state reached at %X/%X" +msgstr "konsistent återställningstillstånd uppnått vid %X/%X" + +#: access/transam/xlog.c:8106 +#, c-format +msgid "invalid primary checkpoint link in control file" +msgstr "ogiltig primär checkpoint-länk i kontrollfil" + +#: access/transam/xlog.c:8110 +#, c-format +msgid "invalid checkpoint link in backup_label file" +msgstr "ogiltig checkpoint-länk i \"backup_label\"-fil" + +#: access/transam/xlog.c:8127 +#, c-format +msgid "invalid primary checkpoint record" +msgstr "ogiltig primär checkpoint-post" + +#: access/transam/xlog.c:8131 +#, c-format +msgid "invalid checkpoint record" +msgstr "ogiltig checkpoint-post" + +#: access/transam/xlog.c:8142 +#, c-format +msgid "invalid resource manager ID in primary checkpoint record" +msgstr "ogiltig resurshanterar-ID i primär checkpoint-post" + +#: access/transam/xlog.c:8146 +#, c-format +msgid "invalid resource manager ID in checkpoint record" +msgstr "ogiltig resurshanterar-ID i checkpoint-post" + +#: access/transam/xlog.c:8159 +#, c-format +msgid "invalid xl_info in primary checkpoint record" +msgstr "ogiltig xl_info i primär checkpoint-post" + +#: access/transam/xlog.c:8163 +#, c-format +msgid "invalid xl_info in checkpoint record" +msgstr "ogiltig xl_info i checkpoint-post" + +#: access/transam/xlog.c:8174 +#, c-format +msgid "invalid length of primary checkpoint record" +msgstr "ogiltig längd i primär checkpoint-post" + +#: access/transam/xlog.c:8178 +#, c-format +msgid "invalid length of checkpoint record" +msgstr "ogiltig längd på checkpoint-post" + +#: access/transam/xlog.c:8384 +#, c-format +msgid "shutting down" +msgstr "stänger ner" + +#: access/transam/xlog.c:8703 +#, c-format +msgid "checkpoint skipped because system is idle" +msgstr "checkpoint överhoppad på grund av att systemet är olastat" + +#: access/transam/xlog.c:8908 +#, c-format +msgid "concurrent write-ahead log activity while database system is shutting down" +msgstr "samtidig write-ahead-logg-aktivitet när databassystemet stängs ner" + +#: access/transam/xlog.c:9161 +#, c-format +msgid "skipping restartpoint, recovery has already ended" +msgstr "hoppar över omstartpunkt, återställning har redan avslutats" + +#: access/transam/xlog.c:9184 +#, c-format +msgid "skipping restartpoint, already performed at %X/%X" +msgstr "hoppar över omstartpunkt, redan gjorde vid %X/%X" + +#: access/transam/xlog.c:9359 +#, c-format +msgid "recovery restart point at %X/%X" +msgstr "återställningens omstartspunkt vid %X/%X" + +#: access/transam/xlog.c:9361 +#, c-format +msgid "Last completed transaction was at log time %s." +msgstr "Senaste kompletta transaktionen var vid loggtid %s" + +#: access/transam/xlog.c:9495 +#, c-format +msgid "restore point \"%s\" created at %X/%X" +msgstr "återställningspunkt \"%s\" skapad vid %X/%X" + +#: access/transam/xlog.c:9625 +#, c-format +msgid "unexpected previous timeline ID %u (current timeline ID %u) in checkpoint record" +msgstr "oväntad föregående tidslinje-ID %u (nuvarande tidslinje-ID %u) i checkpoint-post" + +#: access/transam/xlog.c:9634 +#, c-format +msgid "unexpected timeline ID %u (after %u) in checkpoint record" +msgstr "oväntad tidslinje-ID %u (efter %u) i checkpoint-post" + +#: access/transam/xlog.c:9650 +#, c-format +msgid "unexpected timeline ID %u in checkpoint record, before reaching minimum recovery point %X/%X on timeline %u" +msgstr "oväntad tidslinje-ID %u i checkpoint-post, innan vi nått minimal återställningspunkt %X/%X på tidslinje %u" + +#: access/transam/xlog.c:9726 +#, c-format +msgid "online backup was canceled, recovery cannot continue" +msgstr "online-backup avbröts, återställning kan inte fortsätta" + +#: access/transam/xlog.c:9782 access/transam/xlog.c:9838 +#: access/transam/xlog.c:9861 +#, c-format +msgid "unexpected timeline ID %u (should be %u) in checkpoint record" +msgstr "oväntad tidslinje-ID %u (skall vara %u) i checkpoint-post" + +#: access/transam/xlog.c:10137 +#, c-format +msgid "could not fsync log segment %s: %m" +msgstr "kunde inte fsync:a loggsegment %s: %m" + +#: access/transam/xlog.c:10162 +#, c-format +msgid "could not fsync log file %s: %m" +msgstr "kunde inte fsync:a loggfil %s: %m" + +#: access/transam/xlog.c:10170 +#, c-format +msgid "could not fsync write-through log file %s: %m" +msgstr "kunde inte fsync:a skriv-igenom-loggfil %s: %m" + +#: access/transam/xlog.c:10179 +#, c-format +msgid "could not fdatasync log file %s: %m" +msgstr "kunde inte fdatasync:a loggfil %s: %m" + +#: access/transam/xlog.c:10270 access/transam/xlog.c:10797 +#: access/transam/xlogfuncs.c:287 access/transam/xlogfuncs.c:314 +#: access/transam/xlogfuncs.c:353 access/transam/xlogfuncs.c:374 +#: access/transam/xlogfuncs.c:395 +#, c-format +msgid "WAL control functions cannot be executed during recovery." +msgstr "WAL-kontrollfunktioner kan inte köras under återställning." + +#: access/transam/xlog.c:10279 access/transam/xlog.c:10806 +#, c-format +msgid "WAL level not sufficient for making an online backup" +msgstr "WAL-nivå inte tillräcklig för att kunna skapa en online-backup" + +#: access/transam/xlog.c:10280 access/transam/xlog.c:10807 +#: access/transam/xlogfuncs.c:320 +#, c-format +msgid "wal_level must be set to \"replica\" or \"logical\" at server start." +msgstr "wal_level måste vara satt till \"replica\" eller \"logical\" vid serverstart." + +#: access/transam/xlog.c:10285 +#, c-format +msgid "backup label too long (max %d bytes)" +msgstr "backup-etikett för lång (max %d byte)" + +#: access/transam/xlog.c:10322 access/transam/xlog.c:10598 +#: access/transam/xlog.c:10636 +#, c-format +msgid "a backup is already in progress" +msgstr "en backup är redan på gång" + +#: access/transam/xlog.c:10323 +#, c-format +msgid "Run pg_stop_backup() and try again." +msgstr "Kör pg_stop_backup() och försök igen." + +#: access/transam/xlog.c:10419 +#, c-format +msgid "WAL generated with full_page_writes=off was replayed since last restartpoint" +msgstr "WAL skapad med full_page_writes=off har återspelats sedab senaste omstartpunkten" + +#: access/transam/xlog.c:10421 access/transam/xlog.c:11002 +#, c-format +msgid "This means that the backup being taken on the standby is corrupt and should not be used. Enable full_page_writes and run CHECKPOINT on the master, and then try an online backup again." +msgstr "Det betyder att backup:en som tas på standby:en är trasig och inte skall användas. Slå på full_page_writes och kör CHECKPOINT på master och försök sedan ta en ny online-backup igen." + +#: access/transam/xlog.c:10489 replication/basebackup.c:1222 +#: utils/adt/misc.c:517 +#, c-format +msgid "could not read symbolic link \"%s\": %m" +msgstr "kan inte läsa symbolisk länk \"%s\": %m" + +#: access/transam/xlog.c:10496 replication/basebackup.c:1227 +#: utils/adt/misc.c:522 +#, c-format +msgid "symbolic link \"%s\" target is too long" +msgstr "mål för symbolisk länk \"%s\" är för lång" + +#: access/transam/xlog.c:10548 commands/tablespace.c:391 +#: commands/tablespace.c:553 replication/basebackup.c:1242 utils/adt/misc.c:530 +#, c-format +msgid "tablespaces are not supported on this platform" +msgstr "tabellutrymmen stöds inte på denna plattform" + +#: access/transam/xlog.c:10592 access/transam/xlog.c:10630 +#: access/transam/xlog.c:10845 access/transam/xlogarchive.c:105 +#: access/transam/xlogarchive.c:265 commands/copy.c:1872 commands/copy.c:3156 +#: commands/extension.c:3319 commands/tablespace.c:782 +#: commands/tablespace.c:873 guc-file.l:1004 replication/basebackup.c:513 +#: replication/basebackup.c:583 replication/logical/snapbuild.c:1518 +#: storage/file/copydir.c:68 storage/file/copydir.c:107 storage/file/fd.c:1732 +#: storage/file/fd.c:3098 storage/file/fd.c:3277 storage/file/fd.c:3362 +#: utils/adt/dbsize.c:70 utils/adt/dbsize.c:222 utils/adt/dbsize.c:302 +#: utils/adt/genfile.c:131 utils/adt/genfile.c:382 +#, c-format +msgid "could not stat file \"%s\": %m" +msgstr "kunde inte göra stat() på fil \"%s\": %m" + +#: access/transam/xlog.c:10599 access/transam/xlog.c:10637 +#, c-format +msgid "If you're sure there is no backup in progress, remove file \"%s\" and try again." +msgstr "Om du är säker på att det inte pågår någon backup så ta bort filen \"%s\" och försök igen." + +#: access/transam/xlog.c:10616 access/transam/xlog.c:10654 +#: access/transam/xlog.c:11065 postmaster/syslogger.c:1392 +#: postmaster/syslogger.c:1405 +#, c-format +msgid "could not write file \"%s\": %m" +msgstr "kunde inte skriva fil \"%s\": %m" + +#: access/transam/xlog.c:10822 +#, c-format +msgid "exclusive backup not in progress" +msgstr "exklusiv backup är inte på gång" + +#: access/transam/xlog.c:10849 +#, c-format +msgid "a backup is not in progress" +msgstr "ingen backup är på gång" + +#: access/transam/xlog.c:10935 access/transam/xlog.c:10948 +#: access/transam/xlog.c:11309 access/transam/xlog.c:11315 +#: access/transam/xlog.c:11363 access/transam/xlog.c:11436 +#: access/transam/xlogfuncs.c:688 +#, c-format +msgid "invalid data in file \"%s\"" +msgstr "felaktig data i fil \"%s\"" + +#: access/transam/xlog.c:10952 replication/basebackup.c:1079 +#, c-format +msgid "the standby was promoted during online backup" +msgstr "standby:en befordrades under online-backup" + +#: access/transam/xlog.c:10953 replication/basebackup.c:1080 +#, c-format +msgid "This means that the backup being taken is corrupt and should not be used. Try taking another online backup." +msgstr "Det betyder att backupen som tas är trasig och inte skall användas. Försök ta en ny online-backup." + +#: access/transam/xlog.c:11000 +#, c-format +msgid "WAL generated with full_page_writes=off was replayed during online backup" +msgstr "WAL skapad med full_page_writes=off återspelades under online-backup" + +#: access/transam/xlog.c:11120 +#, c-format +msgid "pg_stop_backup cleanup done, waiting for required WAL segments to be archived" +msgstr "pg_stop_backup-uppstädning klar, väntar på att de WAL-segment som krävs blir arkiverade" + +#: access/transam/xlog.c:11130 +#, c-format +msgid "pg_stop_backup still waiting for all required WAL segments to be archived (%d seconds elapsed)" +msgstr "pg_stop_backup väntar fortfarande på att alla krävda WAL-segments skall bli arkiverade (%d sekunder har gått)" + +#: access/transam/xlog.c:11132 +#, c-format +msgid "Check that your archive_command is executing properly. pg_stop_backup can be canceled safely, but the database backup will not be usable without all the WAL segments." +msgstr "Kontrollera att ditt archive_command kör som det skall. pg_stop_backup kan avbrytas på ett säkert sätt men databasbackup:en kommer inte vara användbart utan att alla WAL-segment finns." + +#: access/transam/xlog.c:11139 +#, c-format +msgid "pg_stop_backup complete, all required WAL segments have been archived" +msgstr "pg_stop_backup komplett, alla krävda WAL-segments har arkiverats" + +#: access/transam/xlog.c:11143 +#, c-format +msgid "WAL archiving is not enabled; you must ensure that all required WAL segments are copied through other means to complete the backup" +msgstr "WAL-arkivering är inte påslagen; du måste se till att alla krävda WAL-segment har kopierats på annat sätt för att backup:en skall vara komplett" + +#: access/transam/xlog.c:11346 +#, c-format +msgid "backup time %s in file \"%s\"" +msgstr "backuptid %s i fil \"%s\"" + +#: access/transam/xlog.c:11351 +#, c-format +msgid "backup label %s in file \"%s\"" +msgstr "backup-etikett %s i fil \"%s\"" + +#: access/transam/xlog.c:11364 +#, c-format +msgid "Timeline ID parsed is %u, but expected %u" +msgstr "Parsad tidslinje-ID är %u men förväntade sig %u" + +#: access/transam/xlog.c:11368 +#, c-format +msgid "backup timeline %u in file \"%s\"" +msgstr "backuptidslinje %u i fil \"%s\"" + +#. translator: %s is a WAL record description +#: access/transam/xlog.c:11476 +#, c-format +msgid "WAL redo at %X/%X for %s" +msgstr "WAL-redo vid %X/%X för %s" + +#: access/transam/xlog.c:11525 +#, c-format +msgid "online backup mode was not canceled" +msgstr "online backupläge har ej avbrutits" + +#: access/transam/xlog.c:11526 +#, c-format +msgid "File \"%s\" could not be renamed to \"%s\": %m." +msgstr "Filen \"%s\" kunde inte döpas om till \"%s\": %m." + +#: access/transam/xlog.c:11535 access/transam/xlog.c:11547 +#: access/transam/xlog.c:11557 +#, c-format +msgid "online backup mode canceled" +msgstr "online backupläge avbrutet" + +#: access/transam/xlog.c:11548 +#, c-format +msgid "Files \"%s\" and \"%s\" were renamed to \"%s\" and \"%s\", respectively." +msgstr "Filer \"%s\" och \"%s\" döptes om till \"%s\" och \"%s\", var för sig." + +#: access/transam/xlog.c:11558 +#, c-format +msgid "File \"%s\" was renamed to \"%s\", but file \"%s\" could not be renamed to \"%s\": %m." +msgstr "Filen \"%s\" dötes om till \"%s\", men filen \"%s\" kunde inte döpas om till \"%s\": %m." + +#: access/transam/xlog.c:11682 access/transam/xlogutils.c:726 +#: replication/walreceiver.c:1025 replication/walsender.c:2413 +#, c-format +msgid "could not seek in log segment %s to offset %u: %m" +msgstr "kunde inte söka i loggsegment %s till offset %u: %m" + +#: access/transam/xlog.c:11696 +#, c-format +msgid "could not read from log segment %s, offset %u: %m" +msgstr "kunde inte läsa från loggsegment %s, offset %u: %m" + +#: access/transam/xlog.c:12225 +#, c-format +msgid "received promote request" +msgstr "tog emot förfrågan om befordring" + +#: access/transam/xlog.c:12238 +#, c-format +msgid "trigger file found: %s" +msgstr "utlösarfil hittad: %s" + +#: access/transam/xlog.c:12247 +#, c-format +msgid "could not stat trigger file \"%s\": %m" +msgstr "kunde inte göra stat() på utlösarfil \"%s\": %m" + +#: access/transam/xlogarchive.c:244 +#, c-format +msgid "archive file \"%s\" has wrong size: %lu instead of %lu" +msgstr "arkivfil \"%s\" har fel storlek: %lu istället för %lu" + +#: access/transam/xlogarchive.c:253 +#, c-format +msgid "restored log file \"%s\" from archive" +msgstr "återställd logfil \"%s\" från arkiv" + +#: access/transam/xlogarchive.c:303 +#, c-format +msgid "could not restore file \"%s\" from archive: %s" +msgstr "kunde inte återställa fil \"%s\" från arkiv: %s" + +#. translator: First %s represents a recovery.conf parameter name like +#. "recovery_end_command", the 2nd is the value of that parameter, the +#. third an already translated error message. +#: access/transam/xlogarchive.c:416 +#, c-format +msgid "%s \"%s\": %s" +msgstr "%s \"%s\": %s" + +#: access/transam/xlogarchive.c:459 postmaster/syslogger.c:1416 +#: replication/logical/snapbuild.c:1644 replication/slot.c:590 +#: replication/slot.c:1198 replication/slot.c:1310 storage/file/fd.c:650 +#: storage/file/fd.c:745 utils/time/snapmgr.c:1318 +#, c-format +msgid "could not rename file \"%s\" to \"%s\": %m" +msgstr "kunde inte döpa om fil \"%s\" till \"%s\": %m" + +#: access/transam/xlogarchive.c:526 access/transam/xlogarchive.c:590 +#, c-format +msgid "could not create archive status file \"%s\": %m" +msgstr "kunde inte skapa arkiveringsstatusfil \"%s\": %m" + +#: access/transam/xlogarchive.c:534 access/transam/xlogarchive.c:598 +#, c-format +msgid "could not write archive status file \"%s\": %m" +msgstr "kunde inte skriva arkiveringsstatusfil \"%s\": %m" + +#: access/transam/xlogfuncs.c:54 +#, c-format +msgid "aborting backup due to backend exiting before pg_stop_backup was called" +msgstr "avbryter backup på grund av att backend:en stoppades innan pg_stop_backup anropades" + +#: access/transam/xlogfuncs.c:84 +#, c-format +msgid "a backup is already in progress in this session" +msgstr "en backup är redan på gång i denna session" + +#: access/transam/xlogfuncs.c:142 access/transam/xlogfuncs.c:224 +#, c-format +msgid "non-exclusive backup in progress" +msgstr "icke-exklusiv backup är på gång" + +#: access/transam/xlogfuncs.c:143 access/transam/xlogfuncs.c:225 +#, c-format +msgid "Did you mean to use pg_stop_backup('f')?" +msgstr "Menade du att använda pg_stop_backup('f')?" + +#: access/transam/xlogfuncs.c:195 commands/event_trigger.c:1464 +#: commands/event_trigger.c:2015 commands/extension.c:1895 +#: commands/extension.c:2004 commands/extension.c:2228 commands/prepare.c:722 +#: executor/execExpr.c:2208 executor/execSRF.c:715 executor/functions.c:1034 +#: foreign/foreign.c:488 libpq/hba.c:2600 replication/logical/launcher.c:1027 +#: replication/logical/logicalfuncs.c:176 replication/logical/origin.c:1442 +#: replication/slotfuncs.c:200 replication/walsender.c:3182 +#: utils/adt/jsonfuncs.c:1701 utils/adt/jsonfuncs.c:1832 +#: utils/adt/jsonfuncs.c:2020 utils/adt/jsonfuncs.c:2147 +#: utils/adt/jsonfuncs.c:3567 utils/adt/pgstatfuncs.c:457 +#: utils/adt/pgstatfuncs.c:558 utils/fmgr/funcapi.c:62 utils/misc/guc.c:8819 +#: utils/mmgr/portalmem.c:1124 +#, c-format +msgid "set-valued function called in context that cannot accept a set" +msgstr "en funktion som returnerar en mängd anropades i kontext som inte godtar en mängd" + +#: access/transam/xlogfuncs.c:199 commands/event_trigger.c:1468 +#: commands/event_trigger.c:2019 commands/extension.c:1899 +#: commands/extension.c:2008 commands/extension.c:2232 commands/prepare.c:726 +#: foreign/foreign.c:493 libpq/hba.c:2604 replication/logical/launcher.c:1031 +#: replication/logical/logicalfuncs.c:180 replication/logical/origin.c:1446 +#: replication/slotfuncs.c:204 replication/walsender.c:3186 +#: utils/adt/pgstatfuncs.c:461 utils/adt/pgstatfuncs.c:562 +#: utils/misc/guc.c:8823 utils/misc/pg_config.c:43 utils/mmgr/portalmem.c:1128 +#, c-format +msgid "materialize mode required, but it is not allowed in this context" +msgstr "materialiserat läge krävs, men stöds inte i detta kontext" + +#: access/transam/xlogfuncs.c:241 +#, c-format +msgid "non-exclusive backup is not in progress" +msgstr "icke-exklusiv backup är inte på gång" + +#: access/transam/xlogfuncs.c:242 +#, c-format +msgid "Did you mean to use pg_stop_backup('t')?" +msgstr "Menade du att använda pg_stop_backup('t')?" + +#: access/transam/xlogfuncs.c:319 +#, c-format +msgid "WAL level not sufficient for creating a restore point" +msgstr "WAL-nivån är inte tillräcklig för att skapa en återställningspunkt" + +#: access/transam/xlogfuncs.c:327 +#, c-format +msgid "value too long for restore point (maximum %d characters)" +msgstr "värdet för långt för en återställningspunkt (maximalt %d tecken)" + +#: access/transam/xlogfuncs.c:465 +#, c-format +msgid "pg_walfile_name_offset() cannot be executed during recovery." +msgstr "pg_walfile_name_offset() kan inte köras under återställnng." + +#: access/transam/xlogfuncs.c:521 +#, c-format +msgid "pg_walfile_name() cannot be executed during recovery." +msgstr "pg_walfile_name() kan inte köras under återställning." + +#: access/transam/xlogfuncs.c:541 access/transam/xlogfuncs.c:561 +#: access/transam/xlogfuncs.c:578 +#, c-format +msgid "recovery is not in progress" +msgstr "återställning är inte i gång" + +#: access/transam/xlogfuncs.c:542 access/transam/xlogfuncs.c:562 +#: access/transam/xlogfuncs.c:579 +#, c-format +msgid "Recovery control functions can only be executed during recovery." +msgstr "Återställningskontrollfunktioner kan bara köras under återställning." + +#: access/transam/xlogreader.c:299 +#, c-format +msgid "invalid record offset at %X/%X" +msgstr "ogiltig postoffset vid %X/%X" + +#: access/transam/xlogreader.c:307 +#, c-format +msgid "contrecord is requested by %X/%X" +msgstr "contrecord är begärd vid %X/%X" + +#: access/transam/xlogreader.c:348 access/transam/xlogreader.c:646 +#, c-format +msgid "invalid record length at %X/%X: wanted %u, got %u" +msgstr "ogiltig postlängd vid %X/%X: förväntade %u, fick %u" + +#: access/transam/xlogreader.c:363 +#, c-format +msgid "record length %u at %X/%X too long" +msgstr "postlängd %u vid %X/%X är för lång" + +#: access/transam/xlogreader.c:404 +#, c-format +msgid "there is no contrecord flag at %X/%X" +msgstr "det finns ingen contrecord-flagga vid %X/%X" + +#: access/transam/xlogreader.c:417 +#, c-format +msgid "invalid contrecord length %u at %X/%X" +msgstr "ogiltig contrecord-längd %u vid %X/%X" + +#: access/transam/xlogreader.c:654 +#, c-format +msgid "invalid resource manager ID %u at %X/%X" +msgstr "ogiltigt resurshanterar-ID %u vid %X/%X" + +#: access/transam/xlogreader.c:668 access/transam/xlogreader.c:685 +#, c-format +msgid "record with incorrect prev-link %X/%X at %X/%X" +msgstr "post med inkorrekt prev-link %X/%X vid %X/%X" + +#: access/transam/xlogreader.c:722 +#, c-format +msgid "incorrect resource manager data checksum in record at %X/%X" +msgstr "felaktig resurshanterardatakontrollsumma i post vid %X/%X" + +#: access/transam/xlogreader.c:759 +#, c-format +msgid "invalid magic number %04X in log segment %s, offset %u" +msgstr "felaktigt magiskt nummer %04X i loggsegment %s, offset %u" + +#: access/transam/xlogreader.c:773 access/transam/xlogreader.c:824 +#, c-format +msgid "invalid info bits %04X in log segment %s, offset %u" +msgstr "ogiltiga infobitar %04X i loggsegment %s, offset %u" + +#: access/transam/xlogreader.c:799 +#, c-format +msgid "WAL file is from different database system: WAL file database system identifier is %s, pg_control database system identifier is %s" +msgstr "WAL-fil är från ett annat databassystem: WAL-filens databassystemidentifierare är %s, pg_control databassystemidentifierare är %s" + +#: access/transam/xlogreader.c:806 +#, c-format +msgid "WAL file is from different database system: incorrect segment size in page header" +msgstr "WAL-fil är från ett annat databassystem: inkorrekt segmentstorlek i sidhuvuid" + +#: access/transam/xlogreader.c:812 +#, c-format +msgid "WAL file is from different database system: incorrect XLOG_BLCKSZ in page header" +msgstr "WAL-fil är från ett annat databassystem: inkorrekt XLOG_BLCKSZ i sidhuvuid" + +#: access/transam/xlogreader.c:843 +#, c-format +msgid "unexpected pageaddr %X/%X in log segment %s, offset %u" +msgstr "oväntad sidadress %X/%X i loggsegment %s, offset %u" + +# FIXME +#: access/transam/xlogreader.c:868 +#, c-format +msgid "out-of-sequence timeline ID %u (after %u) in log segment %s, offset %u" +msgstr "\"ej i sekvens\"-fel på tidslinje-ID %u (efter %u) i loggsegment %s, offset %u" + +#: access/transam/xlogreader.c:1113 +#, c-format +msgid "out-of-order block_id %u at %X/%X" +msgstr "\"ej i sekvens\"-block_id %u vid %X/%X" + +#: access/transam/xlogreader.c:1136 +#, c-format +msgid "BKPBLOCK_HAS_DATA set, but no data included at %X/%X" +msgstr "BKPBLOCK_HAS_DATA satt, men ingen data inkluderad vid %X/%X" + +#: access/transam/xlogreader.c:1143 +#, c-format +msgid "BKPBLOCK_HAS_DATA not set, but data length is %u at %X/%X" +msgstr "BKPBLOCK_HAS_DATA ej satt, men datalängd är %u vid %X/%X" + +#: access/transam/xlogreader.c:1179 +#, c-format +msgid "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X" +msgstr "BKPIMAGE_HAS_HOLE satt, men håloffset %u längd %u block-image-längd %u vid %X/%X" + +#: access/transam/xlogreader.c:1195 +#, c-format +msgid "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X" +msgstr "BKPIMAGE_HAS_HOLE ej satt, men håloffset %u längd %u vid %X/%X" + +#: access/transam/xlogreader.c:1210 +#, c-format +msgid "BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X" +msgstr "BKPIMAGE_IS_COMPRESSED satt, men block-image-längd %u vid %X/%X" + +#: access/transam/xlogreader.c:1225 +#, c-format +msgid "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X" +msgstr "varken BKPIMAGE_HAS_HOLE eller BKPIMAGE_IS_COMPRESSED satt, men block-image-längd är %u vid %X/%X" + +#: access/transam/xlogreader.c:1241 +#, c-format +msgid "BKPBLOCK_SAME_REL set but no previous rel at %X/%X" +msgstr "BKPBLOCK_SAME_REL satt men ingen tidigare rel vid %X/%X" + +#: access/transam/xlogreader.c:1253 +#, c-format +msgid "invalid block_id %u at %X/%X" +msgstr "ogiltig block_id %u vid %X/%X" + +#: access/transam/xlogreader.c:1342 +#, c-format +msgid "record with invalid length at %X/%X" +msgstr "post med ogiltig längd vid %X/%X" + +#: access/transam/xlogreader.c:1431 +#, c-format +msgid "invalid compressed image at %X/%X, block %d" +msgstr "ogiltig komprimerad image vid %X/%X, block %d" + +#: access/transam/xlogutils.c:749 replication/walsender.c:2432 +#, c-format +msgid "could not read from log segment %s, offset %u, length %lu: %m" +msgstr "kunde inte läsa från loggsegment %s, offset %u, längd %lu: %m" + +#: bootstrap/bootstrap.c:268 +#, c-format +msgid "-X requires a power of two value between 1 MB and 1 GB" +msgstr "-X kräver ett tvåpotensvärde mellan 1 MB och 1 GB" + +#: bootstrap/bootstrap.c:285 postmaster/postmaster.c:826 tcop/postgres.c:3552 +#, c-format +msgid "--%s requires a value" +msgstr "--%s kräver ett värde" + +#: bootstrap/bootstrap.c:290 postmaster/postmaster.c:831 tcop/postgres.c:3557 +#, c-format +msgid "-c %s requires a value" +msgstr "-c %s kräver ett värde" + +#: bootstrap/bootstrap.c:301 postmaster/postmaster.c:843 +#: postmaster/postmaster.c:856 +#, c-format +msgid "Try \"%s --help\" for more information.\n" +msgstr "Försök med \"%s --help\" för mer information.\n" + +#: bootstrap/bootstrap.c:310 +#, c-format +msgid "%s: invalid command-line arguments\n" +msgstr "%s: ogiltigt kommandoradsargument\n" + +#: catalog/aclchk.c:203 +#, c-format +msgid "grant options can only be granted to roles" +msgstr "\"grant option\" kan bara ges till roller" + +#: catalog/aclchk.c:326 +#, c-format +msgid "no privileges were granted for column \"%s\" of relation \"%s\"" +msgstr "inga rättigheter givna för kolumn \"%s\" i relation \"%s\"" + +#: catalog/aclchk.c:331 +#, c-format +msgid "no privileges were granted for \"%s\"" +msgstr "inga rättigheter gavs till \"%s\"" + +#: catalog/aclchk.c:339 +#, c-format +msgid "not all privileges were granted for column \"%s\" of relation \"%s\"" +msgstr "inte alla rättigheter givna för kolumn \"%s\" i relation \"%s\"" + +#: catalog/aclchk.c:344 +#, c-format +msgid "not all privileges were granted for \"%s\"" +msgstr "inte alla rättigheter givna för \"%s\"" + +#: catalog/aclchk.c:355 +#, c-format +msgid "no privileges could be revoked for column \"%s\" of relation \"%s\"" +msgstr "inga rättigheter kunde tas tillbaka från kolumn \"%s\" i relation \"%s\"" + +#: catalog/aclchk.c:360 +#, c-format +msgid "no privileges could be revoked for \"%s\"" +msgstr "inga rättigheter kunde tas tillbaka från \"%s\"" + +#: catalog/aclchk.c:368 +#, c-format +msgid "not all privileges could be revoked for column \"%s\" of relation \"%s\"" +msgstr "inte alla rättigheter kunde tas tillbaka från kolumn \"%s\" i relation \"%s\"" + +#: catalog/aclchk.c:373 +#, c-format +msgid "not all privileges could be revoked for \"%s\"" +msgstr "inte alla rättigheter kunde tas tillbaka från \"%s\"" + +#: catalog/aclchk.c:456 catalog/aclchk.c:995 +#, c-format +msgid "invalid privilege type %s for relation" +msgstr "ogiltig privilegietyp %s för relation" + +#: catalog/aclchk.c:460 catalog/aclchk.c:999 +#, c-format +msgid "invalid privilege type %s for sequence" +msgstr "ogiltig privilegietyp %s för sekvens" + +#: catalog/aclchk.c:464 +#, c-format +msgid "invalid privilege type %s for database" +msgstr "ogiltig privilegietyp %s för databas" + +#: catalog/aclchk.c:468 +#, c-format +msgid "invalid privilege type %s for domain" +msgstr "ogiltig privilegietyp %s för domän" + +#: catalog/aclchk.c:472 catalog/aclchk.c:1003 +#, c-format +msgid "invalid privilege type %s for function" +msgstr "ogiltig privilegietyp %s för funktion" + +#: catalog/aclchk.c:476 +#, c-format +msgid "invalid privilege type %s for language" +msgstr "ogiltig privilegietyp %s för språk" + +#: catalog/aclchk.c:480 +#, c-format +msgid "invalid privilege type %s for large object" +msgstr "ogiltig privilegietyp %s för stort objekt" + +#: catalog/aclchk.c:484 catalog/aclchk.c:1019 +#, c-format +msgid "invalid privilege type %s for schema" +msgstr "ogiltig privilegietyp %s för schema" + +#: catalog/aclchk.c:488 catalog/aclchk.c:1007 +#, c-format +msgid "invalid privilege type %s for procedure" +msgstr "ogiltig rättighetstyp %s för procedur" + +#: catalog/aclchk.c:492 catalog/aclchk.c:1011 +#, c-format +msgid "invalid privilege type %s for routine" +msgstr "ogiltig rättighetstyp %s för rutin" + +#: catalog/aclchk.c:496 +#, c-format +msgid "invalid privilege type %s for tablespace" +msgstr "ogiltig privilegietyp %s för tabellutrymme" + +#: catalog/aclchk.c:500 catalog/aclchk.c:1015 +#, c-format +msgid "invalid privilege type %s for type" +msgstr "ogiltig privilegietyp %s för typ" + +#: catalog/aclchk.c:504 +#, c-format +msgid "invalid privilege type %s for foreign-data wrapper" +msgstr "ogiltig privilegietyp %s för främmande data-omvandlare" + +#: catalog/aclchk.c:508 +#, c-format +msgid "invalid privilege type %s for foreign server" +msgstr "ogiltig privilegietyp %s för främmande server" + +#: catalog/aclchk.c:547 +#, c-format +msgid "column privileges are only valid for relations" +msgstr "kolumnprivilegier är bara giltiga för relationer" + +#: catalog/aclchk.c:707 catalog/aclchk.c:4131 catalog/aclchk.c:4913 +#: catalog/objectaddress.c:928 catalog/pg_largeobject.c:111 +#: storage/large_object/inv_api.c:284 +#, c-format +msgid "large object %u does not exist" +msgstr "stort objekt %u existerar inte" + +#: catalog/aclchk.c:932 catalog/aclchk.c:941 commands/collationcmds.c:113 +#: commands/copy.c:1057 commands/copy.c:1077 commands/copy.c:1086 +#: commands/copy.c:1095 commands/copy.c:1104 commands/copy.c:1113 +#: commands/copy.c:1122 commands/copy.c:1131 commands/copy.c:1140 +#: commands/copy.c:1158 commands/copy.c:1174 commands/copy.c:1194 +#: commands/copy.c:1211 commands/dbcommands.c:155 commands/dbcommands.c:164 +#: commands/dbcommands.c:173 commands/dbcommands.c:182 +#: commands/dbcommands.c:191 commands/dbcommands.c:200 +#: commands/dbcommands.c:209 commands/dbcommands.c:218 +#: commands/dbcommands.c:227 commands/dbcommands.c:1427 +#: commands/dbcommands.c:1436 commands/dbcommands.c:1445 +#: commands/dbcommands.c:1454 commands/extension.c:1678 +#: commands/extension.c:1688 commands/extension.c:1698 +#: commands/extension.c:1708 commands/extension.c:2949 +#: commands/foreigncmds.c:537 commands/foreigncmds.c:546 +#: commands/functioncmds.c:557 commands/functioncmds.c:682 +#: commands/functioncmds.c:691 commands/functioncmds.c:700 +#: commands/functioncmds.c:709 commands/functioncmds.c:2103 +#: commands/functioncmds.c:2111 commands/publicationcmds.c:92 +#: commands/sequence.c:1256 commands/sequence.c:1266 commands/sequence.c:1276 +#: commands/sequence.c:1286 commands/sequence.c:1296 commands/sequence.c:1306 +#: commands/sequence.c:1316 commands/sequence.c:1326 commands/sequence.c:1336 +#: commands/subscriptioncmds.c:110 commands/subscriptioncmds.c:120 +#: commands/subscriptioncmds.c:130 commands/subscriptioncmds.c:140 +#: commands/subscriptioncmds.c:154 commands/subscriptioncmds.c:165 +#: commands/subscriptioncmds.c:179 commands/tablecmds.c:6267 +#: commands/typecmds.c:295 commands/typecmds.c:1444 commands/typecmds.c:1453 +#: commands/typecmds.c:1461 commands/typecmds.c:1469 commands/typecmds.c:1477 +#: commands/user.c:134 commands/user.c:148 commands/user.c:157 +#: commands/user.c:166 commands/user.c:175 commands/user.c:184 +#: commands/user.c:193 commands/user.c:202 commands/user.c:211 +#: commands/user.c:220 commands/user.c:229 commands/user.c:238 +#: commands/user.c:247 commands/user.c:555 commands/user.c:563 +#: commands/user.c:571 commands/user.c:579 commands/user.c:587 +#: commands/user.c:595 commands/user.c:603 commands/user.c:611 +#: commands/user.c:620 commands/user.c:628 commands/user.c:636 +#: parser/parse_utilcmd.c:407 replication/pgoutput/pgoutput.c:111 +#: replication/pgoutput/pgoutput.c:132 replication/walsender.c:801 +#: replication/walsender.c:812 replication/walsender.c:822 +#, c-format +msgid "conflicting or redundant options" +msgstr "motstridiga eller redundanta inställningar" + +#: catalog/aclchk.c:1052 +#, c-format +msgid "default privileges cannot be set for columns" +msgstr "standardrättigheter kan inte sättas för kolumner" + +#: catalog/aclchk.c:1212 +#, c-format +msgid "cannot use IN SCHEMA clause when using GRANT/REVOKE ON SCHEMAS" +msgstr "kan inte använda IN SCHEMA-klausul samtidigt som GRANT/REVOKE ON SCHEMAS" + +#: catalog/aclchk.c:1576 catalog/objectaddress.c:1390 commands/analyze.c:433 +#: commands/copy.c:4776 commands/sequence.c:1691 commands/tablecmds.c:5913 +#: commands/tablecmds.c:6061 commands/tablecmds.c:6118 +#: commands/tablecmds.c:6192 commands/tablecmds.c:6286 +#: commands/tablecmds.c:6345 commands/tablecmds.c:6484 +#: commands/tablecmds.c:6559 commands/tablecmds.c:6651 +#: commands/tablecmds.c:6745 commands/tablecmds.c:9079 +#: commands/tablecmds.c:9358 commands/tablecmds.c:9795 commands/trigger.c:904 +#: parser/analyze.c:2311 parser/parse_relation.c:2735 +#: parser/parse_relation.c:2798 parser/parse_target.c:1024 +#: parser/parse_type.c:127 utils/adt/acl.c:2843 utils/adt/ruleutils.c:2422 +#, c-format +msgid "column \"%s\" of relation \"%s\" does not exist" +msgstr "kolumn \"%s\" i relation \"%s\" existerar inte" + +#: catalog/aclchk.c:1843 catalog/objectaddress.c:1230 commands/sequence.c:1129 +#: commands/tablecmds.c:231 commands/tablecmds.c:13515 utils/adt/acl.c:2076 +#: utils/adt/acl.c:2106 utils/adt/acl.c:2138 utils/adt/acl.c:2170 +#: utils/adt/acl.c:2198 utils/adt/acl.c:2228 +#, c-format +msgid "\"%s\" is not a sequence" +msgstr "\"%s\" är inte en sekvens" + +#: catalog/aclchk.c:1881 +#, c-format +msgid "sequence \"%s\" only supports USAGE, SELECT, and UPDATE privileges" +msgstr "sekvensen \"%s\" stöder bara USAGE-, SELECT- och UPDATE-rättigheter" + +#: catalog/aclchk.c:1898 +#, c-format +msgid "invalid privilege type %s for table" +msgstr "ogiltig rättighetstyp %s för tabell" + +#: catalog/aclchk.c:2064 +#, c-format +msgid "invalid privilege type %s for column" +msgstr "ogitligt rättighetstyp %s för kolumn" + +#: catalog/aclchk.c:2077 +#, c-format +msgid "sequence \"%s\" only supports SELECT column privileges" +msgstr "sekvensen \"%s\" stöder bara kolumnrättigheten SELECT" + +#: catalog/aclchk.c:2659 +#, c-format +msgid "language \"%s\" is not trusted" +msgstr "språket \"%s\" är inte betrott" + +#: catalog/aclchk.c:2661 +#, c-format +msgid "GRANT and REVOKE are not allowed on untrusted languages, because only superusers can use untrusted languages." +msgstr "GRANT och REVOKE är inte tillåtna på icke betrodda språk då bara superanvändare kan använda icke betrodda språk." + +#: catalog/aclchk.c:3175 +#, c-format +msgid "cannot set privileges of array types" +msgstr "kan inte sätta privilegier för array-typer" + +#: catalog/aclchk.c:3176 +#, c-format +msgid "Set the privileges of the element type instead." +msgstr "Sätt rättigheter för elementtypen istället." + +#: catalog/aclchk.c:3183 catalog/objectaddress.c:1520 +#, c-format +msgid "\"%s\" is not a domain" +msgstr "\"%s\" är inte en domän" + +#: catalog/aclchk.c:3303 +#, c-format +msgid "unrecognized privilege type \"%s\"" +msgstr "okänd privilegietyp \"%s\"" + +#: catalog/aclchk.c:3364 +#, c-format +msgid "permission denied for aggregate %s" +msgstr "rättighet saknas för aggregat %s" + +#: catalog/aclchk.c:3367 +#, c-format +msgid "permission denied for collation %s" +msgstr "rättighet saknas för jämförelse %s" + +#: catalog/aclchk.c:3370 +#, c-format +msgid "permission denied for column %s" +msgstr "rättighet saknas för kolumn %s" + +#: catalog/aclchk.c:3373 +#, c-format +msgid "permission denied for conversion %s" +msgstr "rättighet saknas för konvertering %s" + +#: catalog/aclchk.c:3376 +#, c-format +msgid "permission denied for database %s" +msgstr "rättighet saknas för databas %s" + +#: catalog/aclchk.c:3379 +#, c-format +msgid "permission denied for domain %s" +msgstr "rättighet saknas för domän %s" + +#: catalog/aclchk.c:3382 +#, c-format +msgid "permission denied for event trigger %s" +msgstr "rättighet saknas för händelseutlösare %s" + +#: catalog/aclchk.c:3385 +#, c-format +msgid "permission denied for extension %s" +msgstr "rättighet saknas för utökning %s" + +#: catalog/aclchk.c:3388 +#, c-format +msgid "permission denied for foreign-data wrapper %s" +msgstr "rättighet saknas för främmande data-omvandlare %s" + +#: catalog/aclchk.c:3391 +#, c-format +msgid "permission denied for foreign server %s" +msgstr "rättighet saknas för främmande server %s" + +#: catalog/aclchk.c:3394 +#, c-format +msgid "permission denied for foreign table %s" +msgstr "rättighet saknas för främmande tabell %s" + +#: catalog/aclchk.c:3397 +#, c-format +msgid "permission denied for function %s" +msgstr "rättighet saknas för funktion %s" + +#: catalog/aclchk.c:3400 +#, c-format +msgid "permission denied for index %s" +msgstr "rättighet saknas för index %s" + +#: catalog/aclchk.c:3403 +#, c-format +msgid "permission denied for language %s" +msgstr "rättighet saknas för språk %s" + +#: catalog/aclchk.c:3406 +#, c-format +msgid "permission denied for large object %s" +msgstr "rättighet saknas för stort objekt %s" + +#: catalog/aclchk.c:3409 +#, c-format +msgid "permission denied for materialized view %s" +msgstr "rättighet saknas för materialiserad vy %s" + +#: catalog/aclchk.c:3412 +#, c-format +msgid "permission denied for operator class %s" +msgstr "rättighet saknas för operatorklasss %s" + +#: catalog/aclchk.c:3415 +#, c-format +msgid "permission denied for operator %s" +msgstr "rättighet saknas för operator %s" + +#: catalog/aclchk.c:3418 +#, c-format +msgid "permission denied for operator family %s" +msgstr "rättighet saknas för operatorfamilj %s" + +#: catalog/aclchk.c:3421 +#, c-format +msgid "permission denied for policy %s" +msgstr "rättighet saknas för policy %s" + +#: catalog/aclchk.c:3424 +#, c-format +msgid "permission denied for procedure %s" +msgstr "rättighet saknas för procedur %s" + +#: catalog/aclchk.c:3427 +#, c-format +msgid "permission denied for publication %s" +msgstr "rättighet saknas för publicering %s" + +#: catalog/aclchk.c:3430 +#, c-format +msgid "permission denied for routine %s" +msgstr "rättighet saknas för rutin %s" + +#: catalog/aclchk.c:3433 +#, c-format +msgid "permission denied for schema %s" +msgstr "rättighet saknas för schema %s" + +#: catalog/aclchk.c:3436 commands/sequence.c:599 commands/sequence.c:833 +#: commands/sequence.c:875 commands/sequence.c:916 commands/sequence.c:1789 +#: commands/sequence.c:1853 +#, c-format +msgid "permission denied for sequence %s" +msgstr "rättighet saknas för sekvens %s" + +#: catalog/aclchk.c:3439 +#, c-format +msgid "permission denied for statistics object %s" +msgstr "rättighet saknas för statistikobjekt %s" + +#: catalog/aclchk.c:3442 +#, c-format +msgid "permission denied for subscription %s" +msgstr "rättighet saknas för prenumeration %s" + +#: catalog/aclchk.c:3445 +#, c-format +msgid "permission denied for table %s" +msgstr "rättighet saknas för tabell %s" + +#: catalog/aclchk.c:3448 +#, c-format +msgid "permission denied for tablespace %s" +msgstr "rättighet saknas för tabellutrymme %s" + +#: catalog/aclchk.c:3451 +#, c-format +msgid "permission denied for text search configuration %s" +msgstr "rättighet saknas för textsökkonfigurering %s" + +#: catalog/aclchk.c:3454 +#, c-format +msgid "permission denied for text search dictionary %s" +msgstr "rättighet saknas för textsökordlista %s" + +#: catalog/aclchk.c:3457 +#, c-format +msgid "permission denied for type %s" +msgstr "rättighet saknas för typ %s" + +#: catalog/aclchk.c:3460 +#, c-format +msgid "permission denied for view %s" +msgstr "rättighet saknas för vy %s" + +#: catalog/aclchk.c:3495 +#, c-format +msgid "must be owner of aggregate %s" +msgstr "måste vara ägaren till aggregatet %s" + +#: catalog/aclchk.c:3498 +#, c-format +msgid "must be owner of collation %s" +msgstr "måste vara ägaren till jämförelsen %s" + +#: catalog/aclchk.c:3501 +#, c-format +msgid "must be owner of conversion %s" +msgstr "måste vara ägaren till konverteringen %s" + +#: catalog/aclchk.c:3504 +#, c-format +msgid "must be owner of database %s" +msgstr "måste vara ägaren till databasen %s" + +#: catalog/aclchk.c:3507 +#, c-format +msgid "must be owner of domain %s" +msgstr "måste vara ägaren av domänen %s" + +#: catalog/aclchk.c:3510 +#, c-format +msgid "must be owner of event trigger %s" +msgstr "måste vara ägaren till händelseutlösaren %s" + +#: catalog/aclchk.c:3513 +#, c-format +msgid "must be owner of extension %s" +msgstr "måste vara ägaren till utökningen %s" + +#: catalog/aclchk.c:3516 +#, c-format +msgid "must be owner of foreign-data wrapper %s" +msgstr "måste vara ägaren till främmande data-omvandlaren %s" + +#: catalog/aclchk.c:3519 +#, c-format +msgid "must be owner of foreign server %s" +msgstr "måste vara ägaren till främmande servern %s" + +#: catalog/aclchk.c:3522 +#, c-format +msgid "must be owner of foreign table %s" +msgstr "måste vara ägaren till främmande tabellen %s" + +#: catalog/aclchk.c:3525 +#, c-format +msgid "must be owner of function %s" +msgstr "måste vara ägaren till funktionen %s" + +#: catalog/aclchk.c:3528 +#, c-format +msgid "must be owner of index %s" +msgstr "måste vara ägaren till indexet %s" + +#: catalog/aclchk.c:3531 +#, c-format +msgid "must be owner of language %s" +msgstr "måste vara ägaren till språket %s" + +#: catalog/aclchk.c:3534 +#, c-format +msgid "must be owner of large object %s" +msgstr "måste vara ägaren till stora objektet %s" + +#: catalog/aclchk.c:3537 +#, c-format +msgid "must be owner of materialized view %s" +msgstr "måste vara ägaren till den materialiserade vyn %s" + +#: catalog/aclchk.c:3540 +#, c-format +msgid "must be owner of operator class %s" +msgstr "måste vara ägaren till operatorklassen %s" + +#: catalog/aclchk.c:3543 +#, c-format +msgid "must be owner of operator %s" +msgstr "måste vara ägaren till operatorn %s" + +#: catalog/aclchk.c:3546 +#, c-format +msgid "must be owner of operator family %s" +msgstr "måste vara ägaren till operatorfamiljen %s" + +#: catalog/aclchk.c:3549 +#, c-format +msgid "must be owner of procedure %s" +msgstr "måste vara ägaren till proceduren %s" + +#: catalog/aclchk.c:3552 +#, c-format +msgid "must be owner of publication %s" +msgstr "måste vara ägaren till publiceringen %s" + +#: catalog/aclchk.c:3555 +#, c-format +msgid "must be owner of routine %s" +msgstr "måste vara ägaren till rutinen %s" + +#: catalog/aclchk.c:3558 +#, c-format +msgid "must be owner of sequence %s" +msgstr "måste vara ägaren till sekvensen %s" + +#: catalog/aclchk.c:3561 +#, c-format +msgid "must be owner of subscription %s" +msgstr "måste vara ägaren till prenumerationen %s" + +#: catalog/aclchk.c:3564 +#, c-format +msgid "must be owner of table %s" +msgstr "måste vara ägaren till tabellen %s" + +#: catalog/aclchk.c:3567 +#, c-format +msgid "must be owner of type %s" +msgstr "måste vara ägaren till typen %s" + +#: catalog/aclchk.c:3570 +#, c-format +msgid "must be owner of view %s" +msgstr "måste vara ägaren till vyn %s" + +#: catalog/aclchk.c:3573 +#, c-format +msgid "must be owner of schema %s" +msgstr "måste vara ägaren till schemat %s" + +#: catalog/aclchk.c:3576 +#, c-format +msgid "must be owner of statistics object %s" +msgstr "måste vara ägaren till statistikobjektet %s" + +#: catalog/aclchk.c:3579 +#, c-format +msgid "must be owner of tablespace %s" +msgstr "måste vara ägaren till tabellutrymmet %s" + +#: catalog/aclchk.c:3582 +#, c-format +msgid "must be owner of text search configuration %s" +msgstr "måste vara ägaren till textsökkonfigurationen %s" + +#: catalog/aclchk.c:3585 +#, c-format +msgid "must be owner of text search dictionary %s" +msgstr "måste vara ägaren till textsökordlistan %s" + +#: catalog/aclchk.c:3599 +#, c-format +msgid "must be owner of relation %s" +msgstr "måste vara ägaren till relationen %s" + +#: catalog/aclchk.c:3643 +#, c-format +msgid "permission denied for column \"%s\" of relation \"%s\"" +msgstr "rättighet saknas för kolumn \"%s\" i relation \"%s\"" + +#: catalog/aclchk.c:3764 catalog/aclchk.c:3772 +#, c-format +msgid "attribute %d of relation with OID %u does not exist" +msgstr "attribut %d i relation med OID %u existerar inte" + +#: catalog/aclchk.c:3845 catalog/aclchk.c:4764 +#, c-format +msgid "relation with OID %u does not exist" +msgstr "relation med OID %u existerar inte" + +#: catalog/aclchk.c:3944 catalog/aclchk.c:5182 +#, c-format +msgid "database with OID %u does not exist" +msgstr "databas med OID %u finns inte" + +#: catalog/aclchk.c:3998 catalog/aclchk.c:4842 tcop/fastpath.c:221 +#: utils/fmgr/fmgr.c:2195 +#, c-format +msgid "function with OID %u does not exist" +msgstr "funktionen med OID %u existerar inte" + +#: catalog/aclchk.c:4052 catalog/aclchk.c:4868 +#, c-format +msgid "language with OID %u does not exist" +msgstr "språk med OID %u existerar inte" + +#: catalog/aclchk.c:4216 catalog/aclchk.c:4940 +#, c-format +msgid "schema with OID %u does not exist" +msgstr "schema med OID %u existerar inte" + +#: catalog/aclchk.c:4270 catalog/aclchk.c:4967 +#, c-format +msgid "tablespace with OID %u does not exist" +msgstr "tabellutrymme med OID %u finns inte" + +#: catalog/aclchk.c:4329 catalog/aclchk.c:5101 commands/foreigncmds.c:324 +#, c-format +msgid "foreign-data wrapper with OID %u does not exist" +msgstr "främmande data-omvandlare med OID %u finns inte" + +#: catalog/aclchk.c:4391 catalog/aclchk.c:5128 commands/foreigncmds.c:459 +#, c-format +msgid "foreign server with OID %u does not exist" +msgstr "främmande server med OID %u finns inte" + +#: catalog/aclchk.c:4451 catalog/aclchk.c:4790 utils/cache/typcache.c:368 +#, c-format +msgid "type with OID %u does not exist" +msgstr "typ med OID %u existerar inte" + +#: catalog/aclchk.c:4816 +#, c-format +msgid "operator with OID %u does not exist" +msgstr "operator med OID %u existerar inte" + +#: catalog/aclchk.c:4993 +#, c-format +msgid "operator class with OID %u does not exist" +msgstr "operatorklass med OID %u existerar inte" + +#: catalog/aclchk.c:5020 +#, c-format +msgid "operator family with OID %u does not exist" +msgstr "operatorfamilj med OID %u existerar inte" + +#: catalog/aclchk.c:5047 +#, c-format +msgid "text search dictionary with OID %u does not exist" +msgstr "textsökordlista med OID %u existerar inte" + +#: catalog/aclchk.c:5074 +#, c-format +msgid "text search configuration with OID %u does not exist" +msgstr "textsökkonfiguration med OID %u existerar inte" + +#: catalog/aclchk.c:5155 commands/event_trigger.c:590 +#, c-format +msgid "event trigger with OID %u does not exist" +msgstr "händelseutlösare med OID %u existerar inte" + +#: catalog/aclchk.c:5208 commands/collationcmds.c:347 +#, c-format +msgid "collation with OID %u does not exist" +msgstr "jämförelse med OID %u existerar inte" + +#: catalog/aclchk.c:5234 +#, c-format +msgid "conversion with OID %u does not exist" +msgstr "konvertering med OID %u existerar inte" + +#: catalog/aclchk.c:5275 +#, c-format +msgid "extension with OID %u does not exist" +msgstr "utökning med OID %u existerar inte" + +#: catalog/aclchk.c:5302 commands/publicationcmds.c:747 +#, c-format +msgid "publication with OID %u does not exist" +msgstr "publicering med OID %u existerar inte" + +#: catalog/aclchk.c:5328 commands/subscriptioncmds.c:1098 +#, c-format +msgid "subscription with OID %u does not exist" +msgstr "prenumeration med OID %u existerar inte" + +#: catalog/aclchk.c:5354 +#, c-format +msgid "statistics object with OID %u does not exist" +msgstr "statistikobjekt med OID %u finns inte" + +#: catalog/dependency.c:611 +#, c-format +msgid "cannot drop %s because %s requires it" +msgstr "kan inte ta bort %s eftersom %s behöver den" + +#: catalog/dependency.c:614 +#, c-format +msgid "You can drop %s instead." +msgstr "Du kan ta bort %s i stället." + +#: catalog/dependency.c:787 catalog/pg_shdepend.c:574 +#, c-format +msgid "cannot drop %s because it is required by the database system" +msgstr "kan inte ta bort %s eftersom den krävs av databassystemet" + +#: catalog/dependency.c:905 +#, c-format +msgid "drop auto-cascades to %s" +msgstr "drop svämmar automatiskt över (cascades) till %s" + +#: catalog/dependency.c:917 catalog/dependency.c:926 +#, c-format +msgid "%s depends on %s" +msgstr "%s beror på %s" + +#: catalog/dependency.c:938 catalog/dependency.c:947 +#, c-format +msgid "drop cascades to %s" +msgstr "drop svämmar över (cascades) till %s" + +#: catalog/dependency.c:955 catalog/pg_shdepend.c:685 +#, c-format +msgid "" +"\n" +"and %d other object (see server log for list)" +msgid_plural "" +"\n" +"and %d other objects (see server log for list)" +msgstr[0] "" +"\n" +"och %d annat objekt (se serverloggen för en lista)" +msgstr[1] "" +"\n" +"och %d andra objekt (se serverloggen för en lista)" + +#: catalog/dependency.c:967 +#, c-format +msgid "cannot drop %s because other objects depend on it" +msgstr "kan inte ta bort %s eftersom andra objekt beror på den" + +#: catalog/dependency.c:971 catalog/dependency.c:978 +#, c-format +msgid "Use DROP ... CASCADE to drop the dependent objects too." +msgstr "Använd DROP ... CASCADE för att ta bort de beroende objekten också." + +#: catalog/dependency.c:975 +#, c-format +msgid "cannot drop desired object(s) because other objects depend on them" +msgstr "kan inte ta bort önskade objekt eftersom andra objekt beror på dem" + +#. translator: %d always has a value larger than 1 +#: catalog/dependency.c:984 +#, c-format +msgid "drop cascades to %d other object" +msgid_plural "drop cascades to %d other objects" +msgstr[0] "drop svämmar över (cascades) till %d andra objekt" +msgstr[1] "drop svämmar över (cascades) till %d andra objekt" + +#: catalog/dependency.c:1644 +#, c-format +msgid "constant of the type %s cannot be used here" +msgstr "konstant av typen %s kan inte användas här" + +#: catalog/heap.c:286 +#, c-format +msgid "permission denied to create \"%s.%s\"" +msgstr "rättighet saknas för att skapa \"%s.%s\"" + +#: catalog/heap.c:288 +#, c-format +msgid "System catalog modifications are currently disallowed." +msgstr "Systemkatalogändringar är för tillfället inte tillåtna." + +#: catalog/heap.c:425 commands/tablecmds.c:1861 commands/tablecmds.c:2385 +#: commands/tablecmds.c:5480 +#, c-format +msgid "tables can have at most %d columns" +msgstr "tabeller kan ha som mest %d kolumner" + +#: catalog/heap.c:444 commands/tablecmds.c:5776 +#, c-format +msgid "column name \"%s\" conflicts with a system column name" +msgstr "kolumnnamn \"%s\" står i konflikt med ett systemkolumnnamn" + +#: catalog/heap.c:460 +#, c-format +msgid "column name \"%s\" specified more than once" +msgstr "kolumnnamn \"%s\" angiven mer än en gång" + +#: catalog/heap.c:513 +#, c-format +msgid "column \"%s\" has pseudo-type %s" +msgstr "kolumn \"%s\" har pseudo-typ %s" + +#: catalog/heap.c:543 +#, c-format +msgid "composite type %s cannot be made a member of itself" +msgstr "composite-typ %s kan inte vara en del av sig själv" + +#: catalog/heap.c:585 commands/createas.c:201 commands/createas.c:498 +#, c-format +msgid "no collation was derived for column \"%s\" with collatable type %s" +msgstr "ingen jämförelse kunde härledas för kolumn \"%s\" med jämförelsetyp %s" + +#: catalog/heap.c:587 commands/createas.c:204 commands/createas.c:501 +#: commands/indexcmds.c:1557 commands/tablecmds.c:13816 commands/view.c:103 +#: regex/regc_pg_locale.c:263 utils/adt/formatting.c:1536 +#: utils/adt/formatting.c:1658 utils/adt/formatting.c:1781 utils/adt/like.c:184 +#: utils/adt/selfuncs.c:5807 utils/adt/varlena.c:1416 utils/adt/varlena.c:1881 +#, c-format +msgid "Use the COLLATE clause to set the collation explicitly." +msgstr "Använd en COLLATE-klausul för att sätta jämförelsen explicit." + +#: catalog/heap.c:1076 catalog/index.c:864 commands/tablecmds.c:3154 +#, c-format +msgid "relation \"%s\" already exists" +msgstr "relationen \"%s\" finns redan" + +#: catalog/heap.c:1092 catalog/pg_type.c:409 catalog/pg_type.c:731 +#: commands/typecmds.c:236 commands/typecmds.c:787 commands/typecmds.c:1186 +#: commands/typecmds.c:1419 commands/typecmds.c:2174 +#, c-format +msgid "type \"%s\" already exists" +msgstr "typen \"%s\" existerar redan" + +#: catalog/heap.c:1093 +#, c-format +msgid "A relation has an associated type of the same name, so you must use a name that doesn't conflict with any existing type." +msgstr "En relation har en associerad typ med samma namn så du måste använda ett namn som inte krockar med någon existerande typ." + +#: catalog/heap.c:1122 +#, c-format +msgid "pg_class heap OID value not set when in binary upgrade mode" +msgstr "pg_class heap OID-värde är inte satt i binärt uppgraderingsläge" + +#: catalog/heap.c:2254 +#, c-format +msgid "cannot add NO INHERIT constraint to partitioned table \"%s\"" +msgstr "kan inte lägga till NO INHERIT-villkor till partitionerad tabell \"%s\"" + +#: catalog/heap.c:2519 +#, c-format +msgid "check constraint \"%s\" already exists" +msgstr "check-villkor \"%s\" finns redan" + +#: catalog/heap.c:2688 catalog/pg_constraint.c:912 commands/tablecmds.c:7122 +#, c-format +msgid "constraint \"%s\" for relation \"%s\" already exists" +msgstr "integritetsvillkor \"%s\" för relation \"%s\" finns redan" + +#: catalog/heap.c:2695 +#, c-format +msgid "constraint \"%s\" conflicts with non-inherited constraint on relation \"%s\"" +msgstr "villkor \"%s\" står i konflikt med icke-ärvt villkor på relation \"%s\"" + +#: catalog/heap.c:2706 +#, c-format +msgid "constraint \"%s\" conflicts with inherited constraint on relation \"%s\"" +msgstr "villkor \"%s\" står i konflikt med ärvt villkor på relation \"%s\"" + +#: catalog/heap.c:2716 +#, c-format +msgid "constraint \"%s\" conflicts with NOT VALID constraint on relation \"%s\"" +msgstr "villkor \"%s\" står i konflikt med NOT VALID-villkor på relation \"%s\"" + +#: catalog/heap.c:2721 +#, c-format +msgid "merging constraint \"%s\" with inherited definition" +msgstr "slår samman villkor \"%s\" med ärvd definition" + +#: catalog/heap.c:2837 +#, c-format +msgid "cannot use column references in default expression" +msgstr "kan inte använda kolumnreferenser i default-uttryck" + +#: catalog/heap.c:2862 rewrite/rewriteHandler.c:1176 +#, c-format +msgid "column \"%s\" is of type %s but default expression is of type %s" +msgstr "kolumn \"%s\" har typ %s men default-uttryck har typen %s" + +#: catalog/heap.c:2867 commands/prepare.c:384 parser/parse_node.c:430 +#: parser/parse_target.c:590 parser/parse_target.c:859 +#: parser/parse_target.c:869 rewrite/rewriteHandler.c:1181 +#, c-format +msgid "You will need to rewrite or cast the expression." +msgstr "Du måste skriva om eller typomvandla uttrycket." + +#: catalog/heap.c:2914 +#, c-format +msgid "only table \"%s\" can be referenced in check constraint" +msgstr "bara tabell \"%s\" kan refereras i check-villkoret" + +#: catalog/heap.c:3154 +#, c-format +msgid "unsupported ON COMMIT and foreign key combination" +msgstr "inget stöd för kombinationen ON COMMIT och främmande nyckel" + +#: catalog/heap.c:3155 +#, c-format +msgid "Table \"%s\" references \"%s\", but they do not have the same ON COMMIT setting." +msgstr "Tabell \"%s\" refererar till \"%s\", men de har inte samma ON COMMIT-inställning." + +#: catalog/heap.c:3160 +#, c-format +msgid "cannot truncate a table referenced in a foreign key constraint" +msgstr "kan inte trunkera en tabell som refererars till i ett främmande nyckelvillkor" + +#: catalog/heap.c:3161 +#, c-format +msgid "Table \"%s\" references \"%s\"." +msgstr "Tabell \"%s\" refererar till \"%s\"." + +#: catalog/heap.c:3163 +#, c-format +msgid "Truncate table \"%s\" at the same time, or use TRUNCATE ... CASCADE." +msgstr "Trunkera tabellen \"%s\" samtidigt, eller använd TRUNCATE ... CASCADE." + +#: catalog/index.c:231 parser/parse_utilcmd.c:1824 parser/parse_utilcmd.c:1911 +#, c-format +msgid "multiple primary keys for table \"%s\" are not allowed" +msgstr "multipla primärnycklar för tabell \"%s\" tillåts inte" + +#: catalog/index.c:249 +#, c-format +msgid "primary keys cannot be expressions" +msgstr "primärnycklar kan inte vara uttryck" + +#: catalog/index.c:814 catalog/index.c:1259 +#, c-format +msgid "user-defined indexes on system catalog tables are not supported" +msgstr "användardefinierade index på systemkatalogen är inte möjligt" + +#: catalog/index.c:824 +#, c-format +msgid "concurrent index creation on system catalog tables is not supported" +msgstr "samtida indexskapande på systemkatalogtabeller stöds inte" + +#: catalog/index.c:842 +#, c-format +msgid "shared indexes cannot be created after initdb" +msgstr "delade index kan inte skapas efter initdb" + +#: catalog/index.c:856 commands/createas.c:250 commands/sequence.c:152 +#: parser/parse_utilcmd.c:205 +#, c-format +msgid "relation \"%s\" already exists, skipping" +msgstr "relationen \"%s\" finns redan, hoppar över" + +#: catalog/index.c:892 +#, c-format +msgid "pg_class index OID value not set when in binary upgrade mode" +msgstr "pg_class index OID-värde är inte satt i binärt uppgraderingsläge" + +#: catalog/index.c:1534 +#, c-format +msgid "DROP INDEX CONCURRENTLY must be first action in transaction" +msgstr "DROP INDEX CONCURRENTLY måste vara första operationen i transaktion" + +#: catalog/index.c:2263 +#, c-format +msgid "building index \"%s\" on table \"%s\" serially" +msgstr "bygger index \"%s\" på tabell \"%s\" seriellt" + +#: catalog/index.c:2268 +#, c-format +msgid "building index \"%s\" on table \"%s\" with request for %d parallel worker" +msgid_plural "building index \"%s\" on table \"%s\" with request for %d parallel workers" +msgstr[0] "bygger index \"%s\" på tabell \"%s\" och efterfrågar %d parallell arbetare" +msgstr[1] "bygger index \"%s\" på tabell \"%s\" och efterfrågar %d parallella arbetare" + +#: catalog/index.c:3657 +#, c-format +msgid "cannot reindex temporary tables of other sessions" +msgstr "kan inte omindexera temporära tabeller som tillhör andra sessioner" + +#: catalog/index.c:3788 +#, c-format +msgid "index \"%s\" was reindexed" +msgstr "index \"%s\" omindexerades" + +#: catalog/index.c:3859 +#, c-format +msgid "REINDEX of partitioned tables is not yet implemented, skipping \"%s\"" +msgstr "REINDEX på partitionerade tabeller är inte implementerat ännu, hoppar över \"%s\"" + +#: catalog/namespace.c:248 catalog/namespace.c:452 catalog/namespace.c:546 +#: commands/trigger.c:5377 +#, c-format +msgid "cross-database references are not implemented: \"%s.%s.%s\"" +msgstr "referenser till andra databaser är inte implementerat: \"%s.%s.%s\"" + +#: catalog/namespace.c:305 +#, c-format +msgid "temporary tables cannot specify a schema name" +msgstr "temporära tabeller kan inte anges med ett schemanamn" + +#: catalog/namespace.c:386 +#, c-format +msgid "could not obtain lock on relation \"%s.%s\"" +msgstr "kunde inte ta lås på relationen \"%s.%s\"" + +#: catalog/namespace.c:391 commands/lockcmds.c:152 commands/lockcmds.c:238 +#, c-format +msgid "could not obtain lock on relation \"%s\"" +msgstr "kunde inte ta lås på relationen \"%s\"" + +#: catalog/namespace.c:419 parser/parse_relation.c:1158 +#, c-format +msgid "relation \"%s.%s\" does not exist" +msgstr "relationen \"%s.%s\" existerar inte" + +#: catalog/namespace.c:424 parser/parse_relation.c:1171 +#: parser/parse_relation.c:1179 +#, c-format +msgid "relation \"%s\" does not exist" +msgstr "relationen \"%s\" existerar inte" + +#: catalog/namespace.c:492 catalog/namespace.c:3011 commands/extension.c:1466 +#: commands/extension.c:1472 +#, c-format +msgid "no schema has been selected to create in" +msgstr "inget schema har valts för att skapa i" + +#: catalog/namespace.c:644 catalog/namespace.c:657 +#, c-format +msgid "cannot create relations in temporary schemas of other sessions" +msgstr "kan inte skapa relationer i temporära scheman som tillhör andra sessioner" + +#: catalog/namespace.c:648 +#, c-format +msgid "cannot create temporary relation in non-temporary schema" +msgstr "kan inte skapa temporär relation i icke-temporärt schema" + +#: catalog/namespace.c:663 +#, c-format +msgid "only temporary relations may be created in temporary schemas" +msgstr "bara temporära relationer får skapas i temporära scheman" + +#: catalog/namespace.c:2201 +#, c-format +msgid "statistics object \"%s\" does not exist" +msgstr "statistikobjektet \"%s\" existerar inte" + +#: catalog/namespace.c:2324 +#, c-format +msgid "text search parser \"%s\" does not exist" +msgstr "textsökparser \"%s\" finns inte" + +#: catalog/namespace.c:2450 +#, c-format +msgid "text search dictionary \"%s\" does not exist" +msgstr "textsökkatalog \"%s\" finns inte" + +#: catalog/namespace.c:2577 +#, c-format +msgid "text search template \"%s\" does not exist" +msgstr "textsökmall \"%s\" finns inte" + +#: catalog/namespace.c:2703 commands/tsearchcmds.c:1185 +#: utils/cache/ts_cache.c:616 +#, c-format +msgid "text search configuration \"%s\" does not exist" +msgstr "textsökkonfiguration \"%s\" finns inte" + +#: catalog/namespace.c:2816 parser/parse_expr.c:793 parser/parse_target.c:1214 +#, c-format +msgid "cross-database references are not implemented: %s" +msgstr "referenser till andra databaser är inte implementerat: %s" + +#: catalog/namespace.c:2822 gram.y:14707 gram.y:16139 parser/parse_expr.c:800 +#: parser/parse_target.c:1221 +#, c-format +msgid "improper qualified name (too many dotted names): %s" +msgstr "ej korrekt kvalificerat namn (för många namn med punkt): %s" + +#: catalog/namespace.c:2953 +#, c-format +msgid "cannot move objects into or out of temporary schemas" +msgstr "kan inte flytta objekt in eller ut från temporära scheman" + +#: catalog/namespace.c:2959 +#, c-format +msgid "cannot move objects into or out of TOAST schema" +msgstr "kan inte flytta objekt in eller ut från TOAST-schema" + +#: catalog/namespace.c:3032 commands/schemacmds.c:256 commands/schemacmds.c:334 +#: commands/tablecmds.c:1014 +#, c-format +msgid "schema \"%s\" does not exist" +msgstr "schema \"%s\" existerar inte" + +#: catalog/namespace.c:3063 +#, c-format +msgid "improper relation name (too many dotted names): %s" +msgstr "ej korrekt relationsnamn (för många namn med punkt): %s" + +#: catalog/namespace.c:3557 +#, c-format +msgid "collation \"%s\" for encoding \"%s\" does not exist" +msgstr "jämförelse \"%s\" för kodning \"%s\" finns inte" + +#: catalog/namespace.c:3612 +#, c-format +msgid "conversion \"%s\" does not exist" +msgstr "konvertering \"%s\" finns inte" + +#: catalog/namespace.c:3820 +#, c-format +msgid "permission denied to create temporary tables in database \"%s\"" +msgstr "rättighet saknas för att skapa temporära tabeller i databasen \"%s\"" + +#: catalog/namespace.c:3836 +#, c-format +msgid "cannot create temporary tables during recovery" +msgstr "kan inte skapa temptabeller under återställning" + +#: catalog/namespace.c:3842 +#, c-format +msgid "cannot create temporary tables during a parallel operation" +msgstr "kan inte skapa temporära tabeller under en parallell operation" + +#: catalog/namespace.c:4091 commands/tablespace.c:1171 commands/variable.c:64 +#: utils/misc/guc.c:10255 utils/misc/guc.c:10333 +#, c-format +msgid "List syntax is invalid." +msgstr "List-syntaxen är ogiltig." + +#: catalog/objectaddress.c:1238 catalog/pg_publication.c:66 +#: commands/policy.c:94 commands/policy.c:394 commands/policy.c:484 +#: commands/tablecmds.c:225 commands/tablecmds.c:267 commands/tablecmds.c:1719 +#: commands/tablecmds.c:4975 commands/tablecmds.c:9197 +#, c-format +msgid "\"%s\" is not a table" +msgstr "\"%s\" är inte en tabell" + +#: catalog/objectaddress.c:1245 commands/tablecmds.c:237 +#: commands/tablecmds.c:5005 commands/tablecmds.c:13520 commands/view.c:141 +#, c-format +msgid "\"%s\" is not a view" +msgstr "\"%s\" är inte en vy" + +#: catalog/objectaddress.c:1252 commands/matview.c:172 commands/tablecmds.c:243 +#: commands/tablecmds.c:13525 +#, c-format +msgid "\"%s\" is not a materialized view" +msgstr "\"%s\" är inte en materialiserad vy" + +#: catalog/objectaddress.c:1259 commands/tablecmds.c:261 +#: commands/tablecmds.c:5008 commands/tablecmds.c:13530 +#, c-format +msgid "\"%s\" is not a foreign table" +msgstr "\"%s\" är inte en främmande tabell" + +#: catalog/objectaddress.c:1300 +#, c-format +msgid "must specify relation and object name" +msgstr "måste ange relation och objektnamn" + +#: catalog/objectaddress.c:1376 catalog/objectaddress.c:1429 +#, c-format +msgid "column name must be qualified" +msgstr "kolumnnamn måste vara kvalificerat" + +#: catalog/objectaddress.c:1472 +#, c-format +msgid "default value for column \"%s\" of relation \"%s\" does not exist" +msgstr "standardvärde för kolumn \"%s\" i relation \"%s\" existerar inte" + +#: catalog/objectaddress.c:1509 commands/functioncmds.c:131 +#: commands/tablecmds.c:253 commands/typecmds.c:3320 parser/parse_type.c:226 +#: parser/parse_type.c:255 parser/parse_type.c:828 utils/adt/acl.c:4377 +#, c-format +msgid "type \"%s\" does not exist" +msgstr "typen \"%s\" existerar inte" + +#: catalog/objectaddress.c:1628 +#, c-format +msgid "operator %d (%s, %s) of %s does not exist" +msgstr "operator %d (%s, %s) för %s finns inte" + +#: catalog/objectaddress.c:1659 +#, c-format +msgid "function %d (%s, %s) of %s does not exist" +msgstr "funktion %d (%s, %s) för %s finns inte" + +#: catalog/objectaddress.c:1710 catalog/objectaddress.c:1736 +#, c-format +msgid "user mapping for user \"%s\" on server \"%s\" does not exist" +msgstr "användarmappning för användare \"%s\" på server \"%s\" finns inte" + +#: catalog/objectaddress.c:1725 commands/foreigncmds.c:428 +#: commands/foreigncmds.c:1004 commands/foreigncmds.c:1377 +#: foreign/foreign.c:688 +#, c-format +msgid "server \"%s\" does not exist" +msgstr "server \"%s\" finns inte" + +#: catalog/objectaddress.c:1792 +#, c-format +msgid "publication relation \"%s\" in publication \"%s\" does not exist" +msgstr "publiceringsrelation \"%s\" i publicering \"%s\" finns inte" + +#: catalog/objectaddress.c:1854 +#, c-format +msgid "unrecognized default ACL object type \"%c\"" +msgstr "okänd standard-ACL-objekttyp \"%c\"" + +#: catalog/objectaddress.c:1855 +#, c-format +msgid "Valid object types are \"%c\", \"%c\", \"%c\", \"%c\", \"%c\"." +msgstr "Giltiga objekttyper är \"%c\", \"%c\", \"%c\", \"%c\", \"%c\"." + +#: catalog/objectaddress.c:1906 +#, c-format +msgid "default ACL for user \"%s\" in schema \"%s\" on %s does not exist" +msgstr "standard ACL för användare \"%s\" i schema \"%s\" på %s finns inte" + +#: catalog/objectaddress.c:1911 +#, c-format +msgid "default ACL for user \"%s\" on %s does not exist" +msgstr "standard ACL för användare \"%s\" på %s finns inte" + +#: catalog/objectaddress.c:1938 catalog/objectaddress.c:1996 +#: catalog/objectaddress.c:2053 +#, c-format +msgid "name or argument lists may not contain nulls" +msgstr "namn eller argumentlistor får inte innehålla null" + +#: catalog/objectaddress.c:1972 +#, c-format +msgid "unsupported object type \"%s\"" +msgstr "ej stöd för objekttyp \"%s\"" + +#: catalog/objectaddress.c:1992 catalog/objectaddress.c:2010 +#: catalog/objectaddress.c:2151 +#, c-format +msgid "name list length must be exactly %d" +msgstr "namnlistlängen måste vara exakt %d" + +#: catalog/objectaddress.c:2014 +#, c-format +msgid "large object OID may not be null" +msgstr "stort objekt-OID får inte vara null" + +#: catalog/objectaddress.c:2023 catalog/objectaddress.c:2086 +#: catalog/objectaddress.c:2093 +#, c-format +msgid "name list length must be at least %d" +msgstr "namnlistlängden måste vara minst %d" + +#: catalog/objectaddress.c:2079 catalog/objectaddress.c:2100 +#, c-format +msgid "argument list length must be exactly %d" +msgstr "argumentlistans längd måste vara exakt %d" + +#: catalog/objectaddress.c:2330 libpq/be-fsstubs.c:321 +#, c-format +msgid "must be owner of large object %u" +msgstr "måste vara ägaren till stort objekt %u" + +#: catalog/objectaddress.c:2345 commands/functioncmds.c:1452 +#, c-format +msgid "must be owner of type %s or type %s" +msgstr "måste vara ägaren till typ %s eller typ %s" + +#: catalog/objectaddress.c:2395 catalog/objectaddress.c:2412 +#, c-format +msgid "must be superuser" +msgstr "måste vara superanvändare" + +#: catalog/objectaddress.c:2402 +#, c-format +msgid "must have CREATEROLE privilege" +msgstr "måste ha rättigheten CREATEROLE" + +#: catalog/objectaddress.c:2481 +#, c-format +msgid "unrecognized object type \"%s\"" +msgstr "okänd objekttyp \"%s\"" + +#. translator: second %s is, e.g., "table %s" +#: catalog/objectaddress.c:2694 +#, c-format +msgid "column %s of %s" +msgstr "kolumn %s av %s" + +#: catalog/objectaddress.c:2704 +#, c-format +msgid "function %s" +msgstr "funktion %s" + +#: catalog/objectaddress.c:2709 +#, c-format +msgid "type %s" +msgstr "typ %s" + +#: catalog/objectaddress.c:2739 +#, c-format +msgid "cast from %s to %s" +msgstr "typomvandling från %s till %s" + +#: catalog/objectaddress.c:2767 +#, c-format +msgid "collation %s" +msgstr "jämförelse %s" + +#. translator: second %s is, e.g., "table %s" +#: catalog/objectaddress.c:2793 +#, c-format +msgid "constraint %s on %s" +msgstr "villkor %s på %s" + +#: catalog/objectaddress.c:2799 +#, c-format +msgid "constraint %s" +msgstr "villkor %s" + +#: catalog/objectaddress.c:2826 +#, c-format +msgid "conversion %s" +msgstr "konvertering %s" + +#. translator: %s is typically "column %s of table %s" +#: catalog/objectaddress.c:2865 +#, c-format +msgid "default value for %s" +msgstr "default-värde för %s" + +#: catalog/objectaddress.c:2874 +#, c-format +msgid "language %s" +msgstr "språk %s" + +#: catalog/objectaddress.c:2879 +#, c-format +msgid "large object %u" +msgstr "stort objekt %u" + +#: catalog/objectaddress.c:2884 +#, c-format +msgid "operator %s" +msgstr "operator %s" + +#: catalog/objectaddress.c:2916 +#, c-format +msgid "operator class %s for access method %s" +msgstr "operatorklass %s för accessmetod %s" + +#: catalog/objectaddress.c:2939 +#, c-format +msgid "access method %s" +msgstr "accessmetod %s" + +#. translator: %d is the operator strategy (a number), the +#. first two %s's are data type names, the third %s is the +#. description of the operator family, and the last %s is the +#. textual form of the operator with arguments. +#: catalog/objectaddress.c:2981 +#, c-format +msgid "operator %d (%s, %s) of %s: %s" +msgstr "operator %d (%s, %s) för %s: %s" + +#. translator: %d is the function number, the first two %s's +#. are data type names, the third %s is the description of the +#. operator family, and the last %s is the textual form of the +#. function with arguments. +#: catalog/objectaddress.c:3031 +#, c-format +msgid "function %d (%s, %s) of %s: %s" +msgstr "funktion %d (%s, %s) för %s: %s" + +#. translator: second %s is, e.g., "table %s" +#: catalog/objectaddress.c:3075 +#, c-format +msgid "rule %s on %s" +msgstr "regel %s på %s" + +#. translator: second %s is, e.g., "table %s" +#: catalog/objectaddress.c:3113 +#, c-format +msgid "trigger %s on %s" +msgstr "utlösare %s på %s" + +#: catalog/objectaddress.c:3129 +#, c-format +msgid "schema %s" +msgstr "schema %s" + +#: catalog/objectaddress.c:3152 +#, c-format +msgid "statistics object %s" +msgstr "statistikobjekt %s" + +#: catalog/objectaddress.c:3179 +#, c-format +msgid "text search parser %s" +msgstr "textsökparser %s" + +#: catalog/objectaddress.c:3205 +#, c-format +msgid "text search dictionary %s" +msgstr "textsökordlista %s" + +#: catalog/objectaddress.c:3231 +#, c-format +msgid "text search template %s" +msgstr "textsökmall %s" + +#: catalog/objectaddress.c:3257 +#, c-format +msgid "text search configuration %s" +msgstr "textsökkonfiguration %s" + +#: catalog/objectaddress.c:3266 +#, c-format +msgid "role %s" +msgstr "roll %s" + +#: catalog/objectaddress.c:3279 +#, c-format +msgid "database %s" +msgstr "databas %s" + +#: catalog/objectaddress.c:3291 +#, c-format +msgid "tablespace %s" +msgstr "tabellutrymme %s" + +#: catalog/objectaddress.c:3300 +#, c-format +msgid "foreign-data wrapper %s" +msgstr "främmande data-omvandlare %s" + +#: catalog/objectaddress.c:3309 +#, c-format +msgid "server %s" +msgstr "server %s" + +#: catalog/objectaddress.c:3337 +#, c-format +msgid "user mapping for %s on server %s" +msgstr "användarmappning för %s på server %s" + +#: catalog/objectaddress.c:3382 +#, c-format +msgid "default privileges on new relations belonging to role %s in schema %s" +msgstr "standardrättigheter för nya relationer som tillhör rollen %s i schema %s" + +#: catalog/objectaddress.c:3386 +#, c-format +msgid "default privileges on new relations belonging to role %s" +msgstr "standardrättigheter för nya relationer som tillhör rollen %s" + +#: catalog/objectaddress.c:3392 +#, c-format +msgid "default privileges on new sequences belonging to role %s in schema %s" +msgstr "standardrättigheter för nya sekvenser som tillhör rollen %s i schema %s" + +#: catalog/objectaddress.c:3396 +#, c-format +msgid "default privileges on new sequences belonging to role %s" +msgstr "standardrättigheter för nya sekvenser som tillhör rollen %s" + +#: catalog/objectaddress.c:3402 +#, c-format +msgid "default privileges on new functions belonging to role %s in schema %s" +msgstr "standardrättigheter för nya funktioner som tillhör rollen %s i schema %s" + +#: catalog/objectaddress.c:3406 +#, c-format +msgid "default privileges on new functions belonging to role %s" +msgstr "standardrättigheter för nya funktioner som tillhör rollen %s" + +#: catalog/objectaddress.c:3412 +#, c-format +msgid "default privileges on new types belonging to role %s in schema %s" +msgstr "standardrättigheter för nya typer som tillhör rollen %s i schema %s" + +#: catalog/objectaddress.c:3416 +#, c-format +msgid "default privileges on new types belonging to role %s" +msgstr "standardrättigheter för nya typer som tillhör rollen %s" + +#: catalog/objectaddress.c:3422 +#, c-format +msgid "default privileges on new schemas belonging to role %s" +msgstr ".\n" +msgstr "" +"\n" +"Vänligen läs dokumentationen för en komplett lista av körningsinställningar\n" +"och hur man anger dem på kommandoraden eller i konfigurationsfilen.\n" +"\n" +"Rapportera buggar till .\n" + +#: main/main.c:391 +#, c-format +msgid "" +"\"root\" execution of the PostgreSQL server is not permitted.\n" +"The server must be started under an unprivileged user ID to prevent\n" +"possible system security compromise. See the documentation for\n" +"more information on how to properly start the server.\n" +msgstr "" +"Att köra PostgreSQL-servern som \"root\" tillåts inte.\n" +"Servern måste starts av ett icke priviligerat användare-ID för att förhindra\n" +"ev. säkehetsproblem. Se dokumentationen för mer information om hur man\n" +"startar servern på rätt sätt.\n" + +#: main/main.c:408 +#, c-format +msgid "%s: real and effective user IDs must match\n" +msgstr "%s: riktig och effektiv användar-ID måste matcha varandra\n" + +#: main/main.c:415 +#, c-format +msgid "" +"Execution of PostgreSQL by a user with administrative permissions is not\n" +"permitted.\n" +"The server must be started under an unprivileged user ID to prevent\n" +"possible system security compromises. See the documentation for\n" +"more information on how to properly start the server.\n" +msgstr "" +"Det är inte tillåtet för en användare med administratörsrättigheter att köra\n" +"PostgreSQL.\n" +"Servern måste starts av ett icke priviligerat användare-ID för att förhindra\n" +"ev. säkehetsproblem. Se dokumentationen för mer information om hur man startar\n" +"servern på rätt sätt.\n" + +#: nodes/extensible.c:66 +#, c-format +msgid "extensible node type \"%s\" already exists" +msgstr "utökningsbar nodtyp \"%s\" finns redan" + +#: nodes/extensible.c:114 +#, c-format +msgid "ExtensibleNodeMethods \"%s\" was not registered" +msgstr "ExtensibleNodeMethods \"%s\" har inte registerats" + +#: nodes/nodeFuncs.c:123 nodes/nodeFuncs.c:154 parser/parse_coerce.c:1910 +#: parser/parse_coerce.c:1938 parser/parse_coerce.c:2014 +#: parser/parse_expr.c:2119 parser/parse_func.c:676 parser/parse_oper.c:967 +#, c-format +msgid "could not find array type for data type %s" +msgstr "kunde inte hitta array-typ för datatyp %s" + +#: optimizer/path/joinrels.c:837 +#, c-format +msgid "FULL JOIN is only supported with merge-joinable or hash-joinable join conditions" +msgstr "FULL JOIN stöds bara med villkor som är merge-joinbara eller hash-joinbara" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: optimizer/plan/initsplan.c:1221 +#, c-format +msgid "%s cannot be applied to the nullable side of an outer join" +msgstr "%s kan inte appliceras på den nullbara sidan av en outer join" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: optimizer/plan/planner.c:1757 parser/analyze.c:1625 parser/analyze.c:1822 +#: parser/analyze.c:2653 +#, c-format +msgid "%s is not allowed with UNION/INTERSECT/EXCEPT" +msgstr "%s tillåẗs inte med UNION/INTERSECT/EXCEPT" + +#: optimizer/plan/planner.c:2329 optimizer/plan/planner.c:4050 +#, c-format +msgid "could not implement GROUP BY" +msgstr "kunde inte implementera GROUP BY" + +#: optimizer/plan/planner.c:2330 optimizer/plan/planner.c:4051 +#: optimizer/plan/planner.c:4795 optimizer/prep/prepunion.c:1074 +#, c-format +msgid "Some of the datatypes only support hashing, while others only support sorting." +msgstr "Några av datatyperna stöder bara hash:ning medan andra bara stöder sortering." + +#: optimizer/plan/planner.c:4794 +#, c-format +msgid "could not implement DISTINCT" +msgstr "kunde inte implementera DISTINCT" + +#: optimizer/plan/planner.c:5479 +#, c-format +msgid "could not implement window PARTITION BY" +msgstr "kunde inte implementera fönster-PARTITION BY" + +#: optimizer/plan/planner.c:5480 +#, c-format +msgid "Window partitioning columns must be of sortable datatypes." +msgstr "Fönsterpartitionskolumner måsta ha en sorterbar datatyp." + +#: optimizer/plan/planner.c:5484 +#, c-format +msgid "could not implement window ORDER BY" +msgstr "kunde inte implementera fönster-ORDER BY" + +#: optimizer/plan/planner.c:5485 +#, c-format +msgid "Window ordering columns must be of sortable datatypes." +msgstr "Fönsterordningskolumner måste ha en sorterbar datatyp." + +#: optimizer/plan/setrefs.c:418 +#, c-format +msgid "too many range table entries" +msgstr "för många element i \"range table\"" + +#: optimizer/prep/prepunion.c:538 +#, c-format +msgid "could not implement recursive UNION" +msgstr "kunde inte implementera rekursiv UNION" + +#: optimizer/prep/prepunion.c:539 +#, c-format +msgid "All column datatypes must be hashable." +msgstr "Alla kolumndatatyper måsta vara hash-bara." + +#. translator: %s is UNION, INTERSECT, or EXCEPT +#: optimizer/prep/prepunion.c:1073 +#, c-format +msgid "could not implement %s" +msgstr "kunde inte implementera %s" + +#: optimizer/util/clauses.c:4834 +#, c-format +msgid "SQL function \"%s\" during inlining" +msgstr "SQL-funktion \"%s\" vid inline:ing" + +#: optimizer/util/plancat.c:127 +#, c-format +msgid "cannot access temporary or unlogged relations during recovery" +msgstr "kan inte accessa temporära eller ologgade relationer under återställning" + +#: optimizer/util/plancat.c:651 +#, c-format +msgid "whole row unique index inference specifications are not supported" +msgstr "inferens av unikt index för hel rad stöds inte" + +#: optimizer/util/plancat.c:668 +#, c-format +msgid "constraint in ON CONFLICT clause has no associated index" +msgstr "villkor för ON CONFLICT-klausul har inget associerat index" + +#: optimizer/util/plancat.c:719 +#, c-format +msgid "ON CONFLICT DO UPDATE not supported with exclusion constraints" +msgstr "ON CONFLICT DO UPDATE stöds inte med uteslutningsvillkor" + +#: optimizer/util/plancat.c:824 +#, c-format +msgid "there is no unique or exclusion constraint matching the ON CONFLICT specification" +msgstr "finns inget unik eller uteslutningsvillkor som matchar ON CONFLICT-specifikationen" + +#: parser/analyze.c:709 parser/analyze.c:1388 +#, c-format +msgid "VALUES lists must all be the same length" +msgstr "VÄRDE-listor måste alla ha samma längd" + +#: parser/analyze.c:919 +#, c-format +msgid "INSERT has more expressions than target columns" +msgstr "INSERT har fler uttryck än målkolumner" + +#: parser/analyze.c:937 +#, c-format +msgid "INSERT has more target columns than expressions" +msgstr "INSERT har fler målkolumner än uttryck" + +#: parser/analyze.c:941 +#, c-format +msgid "The insertion source is a row expression containing the same number of columns expected by the INSERT. Did you accidentally use extra parentheses?" +msgstr "Imatningskällan är ett raduttryck som innehåller samma antal kolumner som INSERT:en förväntade sig. Glömde du använda extra parenteser?" + +#: parser/analyze.c:1201 parser/analyze.c:1598 +#, c-format +msgid "SELECT ... INTO is not allowed here" +msgstr "SELECT ... INTO tillåts inte här" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:1530 parser/analyze.c:2832 +#, c-format +msgid "%s cannot be applied to VALUES" +msgstr "%s kan inte appliceras på VÄRDEN" + +#: parser/analyze.c:1749 +#, c-format +msgid "invalid UNION/INTERSECT/EXCEPT ORDER BY clause" +msgstr "ogiltig UNION/INTERSECT/EXCEPT ORDER BY-klausul" + +#: parser/analyze.c:1750 +#, c-format +msgid "Only result column names can be used, not expressions or functions." +msgstr "Bara kolumnnamn i resultatet kan användas, inte uttryck eller funktioner." + +#: parser/analyze.c:1751 +#, c-format +msgid "Add the expression/function to every SELECT, or move the UNION into a FROM clause." +msgstr "Lägg till uttrycket/funktionen till varje SELECT eller flytta UNION:en in i en FROM-klausul." + +#: parser/analyze.c:1812 +#, c-format +msgid "INTO is only allowed on first SELECT of UNION/INTERSECT/EXCEPT" +msgstr "INTO tillåts bara i den första SELECT i UNION/INTERSECT/EXCEPT" + +#: parser/analyze.c:1884 +#, c-format +msgid "UNION/INTERSECT/EXCEPT member statement cannot refer to other relations of same query level" +msgstr "UNION/INTERSECT/EXCEPT-medlemssats kan inte referera till andra relationer på samma frågenivå" + +#: parser/analyze.c:1973 +#, c-format +msgid "each %s query must have the same number of columns" +msgstr "varje %s-fråga måste ha samma antal kolumner" + +#: parser/analyze.c:2366 +#, c-format +msgid "RETURNING must have at least one column" +msgstr "RETURNING måste ha minst en kolumn" + +#: parser/analyze.c:2407 +#, c-format +msgid "cannot specify both SCROLL and NO SCROLL" +msgstr "kan inte ange både SCROLL och NO SCROLL" + +#: parser/analyze.c:2426 +#, c-format +msgid "DECLARE CURSOR must not contain data-modifying statements in WITH" +msgstr "DECLARE CURSOR får inte innehålla datamodifierande satser i WITH" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2434 +#, c-format +msgid "DECLARE CURSOR WITH HOLD ... %s is not supported" +msgstr "DECLARE CURSOR WITH HOLD ... %s stöds inte" + +#: parser/analyze.c:2437 +#, c-format +msgid "Holdable cursors must be READ ONLY." +msgstr "Hållbara markörer måste vara READ ONLY." + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2445 +#, c-format +msgid "DECLARE SCROLL CURSOR ... %s is not supported" +msgstr "DECLARE SCROLL CURSOR ... %s stöds inte" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2456 +#, c-format +msgid "DECLARE INSENSITIVE CURSOR ... %s is not supported" +msgstr "DECLARE INSENSITIVE CURSOR ... %s stöds inte" + +#: parser/analyze.c:2459 +#, c-format +msgid "Insensitive cursors must be READ ONLY." +msgstr "Okänsliga markörer måste vara READ ONLY." + +#: parser/analyze.c:2525 +#, c-format +msgid "materialized views must not use data-modifying statements in WITH" +msgstr "materialiserade vyer får inte innehålla datamodifierande satser i WITH" + +#: parser/analyze.c:2535 +#, c-format +msgid "materialized views must not use temporary tables or views" +msgstr "materialiserade vyer får inte använda temporära tabeller eller vyer" + +#: parser/analyze.c:2545 +#, c-format +msgid "materialized views may not be defined using bound parameters" +msgstr "materialiserade vyer kan inte defineras med bundna parametrar" + +#: parser/analyze.c:2557 +#, c-format +msgid "materialized views cannot be UNLOGGED" +msgstr "materialiserad vyer kan inte vara UNLOGGED" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2660 +#, c-format +msgid "%s is not allowed with DISTINCT clause" +msgstr "%s tillåts inte med DISTINCT-klausul" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2667 +#, c-format +msgid "%s is not allowed with GROUP BY clause" +msgstr "%s tillåts inte med GROUP BY-klausul" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2674 +#, c-format +msgid "%s is not allowed with HAVING clause" +msgstr "%s tillåts inte med HAVING-klausul" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2681 +#, c-format +msgid "%s is not allowed with aggregate functions" +msgstr "%s tillåts inte med aggregatfunktioner" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2688 +#, c-format +msgid "%s is not allowed with window functions" +msgstr "%s tillåts inte med fönsterfunktioner" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2695 +#, c-format +msgid "%s is not allowed with set-returning functions in the target list" +msgstr "%s tillåts inte med mängdreturnerande funktioner i mållistan" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2774 +#, c-format +msgid "%s must specify unqualified relation names" +msgstr "%s: måste ange okvalificerade relationsnamn" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2805 +#, c-format +msgid "%s cannot be applied to a join" +msgstr "%s kan inte appliceras på en join" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2814 +#, c-format +msgid "%s cannot be applied to a function" +msgstr "%s kan inte appliceras på en funktion" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2823 +#, c-format +msgid "%s cannot be applied to a table function" +msgstr "%s kan inte appliceras på tabellfunktion" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2841 +#, c-format +msgid "%s cannot be applied to a WITH query" +msgstr "%s kan inte appliceras på en WITH-fråga" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2850 +#, c-format +msgid "%s cannot be applied to a named tuplestore" +msgstr "%s kan inte appliceras på en namngiven tupellagring" + +#. translator: %s is a SQL row locking clause such as FOR UPDATE +#: parser/analyze.c:2867 +#, c-format +msgid "relation \"%s\" in %s clause not found in FROM clause" +msgstr "relationen \"%s\" i %s-klausul hittades inte i FROM-klausul" + +#: parser/parse_agg.c:221 parser/parse_oper.c:222 +#, c-format +msgid "could not identify an ordering operator for type %s" +msgstr "kunde inte identifiera en jämförelseoperator för typ %s" + +#: parser/parse_agg.c:223 +#, c-format +msgid "Aggregates with DISTINCT must be able to sort their inputs." +msgstr "Aggregat med DISTINCT måste kunna sortera sina indata." + +#: parser/parse_agg.c:258 +#, c-format +msgid "GROUPING must have fewer than 32 arguments" +msgstr "GROUPING måste ha färre än 32 argument" + +#: parser/parse_agg.c:361 +msgid "aggregate functions are not allowed in JOIN conditions" +msgstr "aggregatfunktioner tillåts inte i JOIN-villkor" + +#: parser/parse_agg.c:363 +msgid "grouping operations are not allowed in JOIN conditions" +msgstr "gruppoperationer tillåts inte i JOIN-villkor" + +#: parser/parse_agg.c:375 +msgid "aggregate functions are not allowed in FROM clause of their own query level" +msgstr "aggregatfunktioner tillåts inte i FROM-klausul på sin egen frågenivå" + +#: parser/parse_agg.c:377 +msgid "grouping operations are not allowed in FROM clause of their own query level" +msgstr "gruppoperationer tillåts inte i FROM-klausul på sin egen frågenivå" + +#: parser/parse_agg.c:382 +msgid "aggregate functions are not allowed in functions in FROM" +msgstr "aggregatfunktioner tillåts inte i funktioner i FROM" + +#: parser/parse_agg.c:384 +msgid "grouping operations are not allowed in functions in FROM" +msgstr "gruppoperationer tillåts inte i funktioner i FROM" + +#: parser/parse_agg.c:392 +msgid "aggregate functions are not allowed in policy expressions" +msgstr "aggregatfunktioner tillåts inte i policyuttryck" + +#: parser/parse_agg.c:394 +msgid "grouping operations are not allowed in policy expressions" +msgstr "gruppoperationer tillåts inte i policyuttryck" + +#: parser/parse_agg.c:411 +msgid "aggregate functions are not allowed in window RANGE" +msgstr "aggregatfunktioner tillåts inte i fönster-RANGE" + +#: parser/parse_agg.c:413 +msgid "grouping operations are not allowed in window RANGE" +msgstr "grupperingsoperationer tillåts inte i fönster-RANGE" + +#: parser/parse_agg.c:418 +msgid "aggregate functions are not allowed in window ROWS" +msgstr "aggregatfunktioner tillåts inte i fönster-RADER" + +#: parser/parse_agg.c:420 +msgid "grouping operations are not allowed in window ROWS" +msgstr "grupperingsfunktioner tillåts inte i fönster-RADER" + +#: parser/parse_agg.c:425 +msgid "aggregate functions are not allowed in window GROUPS" +msgstr "aggregatfunktioner tillåts inte i fönster-GROUPS" + +#: parser/parse_agg.c:427 +msgid "grouping operations are not allowed in window GROUPS" +msgstr "grupperingsfunktioner tillåts inte i fönster-GROUPS" + +#: parser/parse_agg.c:461 +msgid "aggregate functions are not allowed in check constraints" +msgstr "aggregatfunktioner tillåts inte i check-villkor" + +#: parser/parse_agg.c:463 +msgid "grouping operations are not allowed in check constraints" +msgstr "gruppoperationer tillåts inte i check-villkor" + +#: parser/parse_agg.c:470 +msgid "aggregate functions are not allowed in DEFAULT expressions" +msgstr "aggregatfunktioner tillåts inte i DEFAULT-uttryck" + +#: parser/parse_agg.c:472 +msgid "grouping operations are not allowed in DEFAULT expressions" +msgstr "grupperingsoperationer tillåts inte i DEFAULT-uttryck" + +#: parser/parse_agg.c:477 +msgid "aggregate functions are not allowed in index expressions" +msgstr "aggregatfunktioner tillåts inte i indexuttryck" + +#: parser/parse_agg.c:479 +msgid "grouping operations are not allowed in index expressions" +msgstr "gruppoperationer tillåts inte i indexuttryck" + +#: parser/parse_agg.c:484 +msgid "aggregate functions are not allowed in index predicates" +msgstr "aggregatfunktionsanrop tillåts inte i indexpredikat" + +#: parser/parse_agg.c:486 +msgid "grouping operations are not allowed in index predicates" +msgstr "gruppoperationer tillåts inte i indexpredikat" + +#: parser/parse_agg.c:491 +msgid "aggregate functions are not allowed in transform expressions" +msgstr "aggregatfunktioner tillåts inte i transform-uttryck" + +#: parser/parse_agg.c:493 +msgid "grouping operations are not allowed in transform expressions" +msgstr "gruppoperationer tillåts inte i transforme-uttryck" + +#: parser/parse_agg.c:498 +msgid "aggregate functions are not allowed in EXECUTE parameters" +msgstr "aggregatfunktioner tillåts inte i EXECUTE-parametrar" + +#: parser/parse_agg.c:500 +msgid "grouping operations are not allowed in EXECUTE parameters" +msgstr "gruppoperationer tillåts inte i EXECUTE-parametrar" + +#: parser/parse_agg.c:505 +msgid "aggregate functions are not allowed in trigger WHEN conditions" +msgstr "aggregatfunktioner tillåts inte i WHEN-utlösarvillkor" + +#: parser/parse_agg.c:507 +msgid "grouping operations are not allowed in trigger WHEN conditions" +msgstr "gruppoperationer tillåts inte i WHEN-utlösarvillkor" + +#: parser/parse_agg.c:512 +msgid "aggregate functions are not allowed in partition key expressions" +msgstr "aggregatfunktioner tillåts inte i partitionsnyckeluttryck" + +#: parser/parse_agg.c:514 +msgid "grouping operations are not allowed in partition key expressions" +msgstr "gruppoperationer tillåts inte i partitionsnyckeluttryck" + +#: parser/parse_agg.c:520 +msgid "aggregate functions are not allowed in CALL arguments" +msgstr "aggregatfunktioner tillåts inte i CALL-argument" + +#: parser/parse_agg.c:522 +msgid "grouping operations are not allowed in CALL arguments" +msgstr "gruppoperationer tillåts inte i CALL-argument" + +#. translator: %s is name of a SQL construct, eg GROUP BY +#: parser/parse_agg.c:545 parser/parse_clause.c:1817 +#, c-format +msgid "aggregate functions are not allowed in %s" +msgstr "aggregatfunktioner tillåts inte i %s" + +#. translator: %s is name of a SQL construct, eg GROUP BY +#: parser/parse_agg.c:548 +#, c-format +msgid "grouping operations are not allowed in %s" +msgstr "gruppoperationer tillåts inte i %s" + +#: parser/parse_agg.c:656 +#, c-format +msgid "outer-level aggregate cannot contain a lower-level variable in its direct arguments" +msgstr "yttre aggregat kan inte innehålla inre variabel i sitt direkta argument" + +#: parser/parse_agg.c:735 +#, c-format +msgid "aggregate function calls cannot contain set-returning function calls" +msgstr "aggregatfunktionsanrop kan inte innehålla mängdreturnerande funktionsanrop" + +#: parser/parse_agg.c:736 parser/parse_expr.c:1766 parser/parse_expr.c:2246 +#: parser/parse_func.c:847 +#, c-format +msgid "You might be able to move the set-returning function into a LATERAL FROM item." +msgstr "Du kanske kan flytta den mängdreturnerande funktionen in i en LATERAL FROM-konstruktion." + +#: parser/parse_agg.c:741 +#, c-format +msgid "aggregate function calls cannot contain window function calls" +msgstr "aggregatfunktionsanrop kan inte innehålla fönsterfunktionanrop" + +#: parser/parse_agg.c:820 +msgid "window functions are not allowed in JOIN conditions" +msgstr "fönsterfunktioner tillåts inte i JOIN-villkor" + +#: parser/parse_agg.c:827 +msgid "window functions are not allowed in functions in FROM" +msgstr "fönsterfunktioner tillåts inte i funktioner i FROM" + +#: parser/parse_agg.c:833 +msgid "window functions are not allowed in policy expressions" +msgstr "fönsterfunktioner tillåts inte i policy-uttryck" + +#: parser/parse_agg.c:846 +msgid "window functions are not allowed in window definitions" +msgstr "fönsterfunktioner tillåts inte i fönsterdefinitioner" + +#: parser/parse_agg.c:878 +msgid "window functions are not allowed in check constraints" +msgstr "fönsterfunktioner tillåts inte i check-villkor" + +#: parser/parse_agg.c:882 +msgid "window functions are not allowed in DEFAULT expressions" +msgstr "fönsterfunktioner tillåts inte i DEFAULT-uttryck" + +#: parser/parse_agg.c:885 +msgid "window functions are not allowed in index expressions" +msgstr "fönsterfunktioner tillåts inte i indexuttryck" + +#: parser/parse_agg.c:888 +msgid "window functions are not allowed in index predicates" +msgstr "fönsterfunktioner tillåts inte i indexpredikat" + +#: parser/parse_agg.c:891 +msgid "window functions are not allowed in transform expressions" +msgstr "fönsterfunktioner tillåts inte i transform-uttrycket" + +#: parser/parse_agg.c:894 +msgid "window functions are not allowed in EXECUTE parameters" +msgstr "fönsterfunktioner tillåts inte i EXECUTE-parametrar" + +#: parser/parse_agg.c:897 +msgid "window functions are not allowed in trigger WHEN conditions" +msgstr "fönsterfunktioner tillåts inte i WHEN-utlösarvillkor" + +#: parser/parse_agg.c:900 +msgid "window functions are not allowed in partition key expressions" +msgstr "fönsterfunktioner tillåts inte io partitionsnyckeluttryck" + +#: parser/parse_agg.c:903 +msgid "window functions are not allowed in CALL arguments" +msgstr "fönsterfunktioner tillåts inte i CALL-argument" + +#. translator: %s is name of a SQL construct, eg GROUP BY +#: parser/parse_agg.c:923 parser/parse_clause.c:1826 +#, c-format +msgid "window functions are not allowed in %s" +msgstr "fönsterfunktioner tillåts inte i %s" + +#: parser/parse_agg.c:957 parser/parse_clause.c:2662 +#, c-format +msgid "window \"%s\" does not exist" +msgstr "fönster \"%s\" finns inte" + +#: parser/parse_agg.c:1042 +#, c-format +msgid "too many grouping sets present (maximum 4096)" +msgstr "för många grupperingsmängder (maximalt 4096)" + +#: parser/parse_agg.c:1191 +#, c-format +msgid "aggregate functions are not allowed in a recursive query's recursive term" +msgstr "aggregatfunktioner tillåts inte i en rekursiv frågas rekursiva term" + +#: parser/parse_agg.c:1384 +#, c-format +msgid "column \"%s.%s\" must appear in the GROUP BY clause or be used in an aggregate function" +msgstr "kolumn \"%s.%s\" måste stå med i GROUP BY-klausulen eller användas i en aggregatfunktion" + +#: parser/parse_agg.c:1387 +#, c-format +msgid "Direct arguments of an ordered-set aggregate must use only grouped columns." +msgstr "Direkta argument till en sorterad-mängd-aggregat får bara använda grupperade kolumner." + +#: parser/parse_agg.c:1392 +#, c-format +msgid "subquery uses ungrouped column \"%s.%s\" from outer query" +msgstr "underfråga använder ogrupperad kolumn \"%s.%s\" från yttre fråga" + +#: parser/parse_agg.c:1556 +#, c-format +msgid "arguments to GROUPING must be grouping expressions of the associated query level" +msgstr "argument till GROUPING måste vare grupputtryck på den tillhörande frågenivån" + +#: parser/parse_clause.c:199 +#, c-format +msgid "relation \"%s\" cannot be the target of a modifying statement" +msgstr "relationen \"%s\" kan inte vara målet för en modifierande sats" + +#: parser/parse_clause.c:615 parser/parse_clause.c:643 parser/parse_func.c:2265 +#, c-format +msgid "set-returning functions must appear at top level of FROM" +msgstr "mängdreturnerande funktioner måste vara på toppnivå i FROM" + +#: parser/parse_clause.c:655 +#, c-format +msgid "multiple column definition lists are not allowed for the same function" +msgstr "multipla kolumndefinitionslistor tillåts inte i samma funktion" + +#: parser/parse_clause.c:688 +#, c-format +msgid "ROWS FROM() with multiple functions cannot have a column definition list" +msgstr "ROWS FROM() med multipla funktioner kan inte ha en kolumndefinitionslista" + +#: parser/parse_clause.c:689 +#, c-format +msgid "Put a separate column definition list for each function inside ROWS FROM()." +msgstr "Lägg till en separat kolumndefinitionslista för varje funktion inne i ROWS FROM()." + +#: parser/parse_clause.c:695 +#, c-format +msgid "UNNEST() with multiple arguments cannot have a column definition list" +msgstr "UNNEST() med multipla argument kan inte ha en kolumndefinitionslista" + +#: parser/parse_clause.c:696 +#, c-format +msgid "Use separate UNNEST() calls inside ROWS FROM(), and attach a column definition list to each one." +msgstr "Använd separata UNNEST()-anrop inne i ROWS FROM() och koppla en kolumndefinitionslista till varje." + +#: parser/parse_clause.c:703 +#, c-format +msgid "WITH ORDINALITY cannot be used with a column definition list" +msgstr "WITH ORDINALITY kan inte användas tillsammans med en kolumndefinitionslista" + +#: parser/parse_clause.c:704 +#, c-format +msgid "Put the column definition list inside ROWS FROM()." +msgstr "Placera kolumndefinitionslistan inne i ROWS FROM()." + +#: parser/parse_clause.c:807 +#, c-format +msgid "only one FOR ORDINALITY column is allowed" +msgstr "bara en FOR ORDINALITY-kolumn tillåts" + +#: parser/parse_clause.c:868 +#, c-format +msgid "column name \"%s\" is not unique" +msgstr "kolumnnamn \"%s\" är inte unikt" + +#: parser/parse_clause.c:910 +#, c-format +msgid "namespace name \"%s\" is not unique" +msgstr "namespace-namn \"%s\" är inte unikt" + +#: parser/parse_clause.c:920 +#, c-format +msgid "only one default namespace is allowed" +msgstr "bara ett standard-namespace tillåts" + +#: parser/parse_clause.c:981 +#, c-format +msgid "tablesample method %s does not exist" +msgstr "tabellsamplingsmetod \"%s\" existerar inte" + +#: parser/parse_clause.c:1003 +#, c-format +msgid "tablesample method %s requires %d argument, not %d" +msgid_plural "tablesample method %s requires %d arguments, not %d" +msgstr[0] "tabellsamplingsmetod %s kräver %d argument, inte %d" +msgstr[1] "tabellsamplingsmetod %s kräver %d argument, inte %d" + +#: parser/parse_clause.c:1037 +#, c-format +msgid "tablesample method %s does not support REPEATABLE" +msgstr "tabellsamplingsmetod %s stöder inte REPEATABLE" + +#: parser/parse_clause.c:1207 +#, c-format +msgid "TABLESAMPLE clause can only be applied to tables and materialized views" +msgstr "TABLESAMPLE-klausul kan bara appliceras på tabeller och materialiserade vyer" + +#: parser/parse_clause.c:1377 +#, c-format +msgid "column name \"%s\" appears more than once in USING clause" +msgstr "kolumnnamn \"%s\" angivet mer än en gång i USING-klausul" + +#: parser/parse_clause.c:1392 +#, c-format +msgid "common column name \"%s\" appears more than once in left table" +msgstr "gemensamt kolumnnamn \"%s\" finns mer än en gång i vänstra tabellen" + +#: parser/parse_clause.c:1401 +#, c-format +msgid "column \"%s\" specified in USING clause does not exist in left table" +msgstr "kolumn \"%s\" angiven i USING-klausul finns inte i den vänstra tabellen" + +#: parser/parse_clause.c:1415 +#, c-format +msgid "common column name \"%s\" appears more than once in right table" +msgstr "gemensamt kolumnnamn \"%s\" finns mer än en gång i högra tabellen" + +#: parser/parse_clause.c:1424 +#, c-format +msgid "column \"%s\" specified in USING clause does not exist in right table" +msgstr "kolumn \"%s\" angiven i USING-klausul finns inte i den högra tabellen" + +#: parser/parse_clause.c:1478 +#, c-format +msgid "column alias list for \"%s\" has too many entries" +msgstr "kolumnaliaslista för \"%s\" har för många element" + +#. translator: %s is name of a SQL construct, eg LIMIT +#: parser/parse_clause.c:1787 +#, c-format +msgid "argument of %s must not contain variables" +msgstr "argumentet till %s får inte innehålla variabler" + +#. translator: first %s is name of a SQL construct, eg ORDER BY +#: parser/parse_clause.c:1952 +#, c-format +msgid "%s \"%s\" is ambiguous" +msgstr "%s \"%s\" är tvetydig" + +#. translator: %s is name of a SQL construct, eg ORDER BY +#: parser/parse_clause.c:1981 +#, c-format +msgid "non-integer constant in %s" +msgstr "ej heltalskonstant i %s" + +#. translator: %s is name of a SQL construct, eg ORDER BY +#: parser/parse_clause.c:2003 +#, c-format +msgid "%s position %d is not in select list" +msgstr "%s-position %d finns inte i select-listan" + +#: parser/parse_clause.c:2444 +#, c-format +msgid "CUBE is limited to 12 elements" +msgstr "CUBE är begränsad till 12 element" + +#: parser/parse_clause.c:2650 +#, c-format +msgid "window \"%s\" is already defined" +msgstr "fönster \"%s\" är redan definierad" + +#: parser/parse_clause.c:2711 +#, c-format +msgid "cannot override PARTITION BY clause of window \"%s\"" +msgstr "kan inte övertrumfa PARTITION BY-klausul för fönster \"%s\"" + +#: parser/parse_clause.c:2723 +#, c-format +msgid "cannot override ORDER BY clause of window \"%s\"" +msgstr "kan inte övertrumfa ORDER BY-klausul för fönster \"%s\"" + +#: parser/parse_clause.c:2753 parser/parse_clause.c:2759 +#, c-format +msgid "cannot copy window \"%s\" because it has a frame clause" +msgstr "kan inte kopiera fönster \"%s\" då det har en fönsterramklausul" + +#: parser/parse_clause.c:2761 +#, c-format +msgid "Omit the parentheses in this OVER clause." +msgstr "Ta bort parenteserna i denna OVER-klausul." + +#: parser/parse_clause.c:2781 +#, c-format +msgid "RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column" +msgstr "RANGE med offset PRECEDING/FOLLOWING kräver exakt en ORDER BY-kolumn" + +#: parser/parse_clause.c:2864 +#, c-format +msgid "in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list" +msgstr "i ett aggregat med DISTINCT så måste ORDER BY-uttryck finnas i argumentlistan" + +#: parser/parse_clause.c:2865 +#, c-format +msgid "for SELECT DISTINCT, ORDER BY expressions must appear in select list" +msgstr "i SELECT DISTINCT så måste ORDER BY-uttryck finnas i select-listan" + +#: parser/parse_clause.c:2897 +#, c-format +msgid "an aggregate with DISTINCT must have at least one argument" +msgstr "ett aggregat med DISTINCT måste ha minst ett argument" + +#: parser/parse_clause.c:2898 +#, c-format +msgid "SELECT DISTINCT must have at least one column" +msgstr "SELECT DISTINCT måste ha minst en kolumn" + +#: parser/parse_clause.c:2964 parser/parse_clause.c:2996 +#, c-format +msgid "SELECT DISTINCT ON expressions must match initial ORDER BY expressions" +msgstr "SELECT DISTINCT ON-uttrycken måste matcha de initiala ORDER BY-uttrycken" + +#: parser/parse_clause.c:3074 +#, c-format +msgid "ASC/DESC is not allowed in ON CONFLICT clause" +msgstr "ASC/DESC tillåts inte i ON CONFLICT-klausul" + +#: parser/parse_clause.c:3080 +#, c-format +msgid "NULLS FIRST/LAST is not allowed in ON CONFLICT clause" +msgstr "NULLS FIRST/LAST tillåts inte i ON CONFLICT-klausul" + +#: parser/parse_clause.c:3159 +#, c-format +msgid "ON CONFLICT DO UPDATE requires inference specification or constraint name" +msgstr "ON CONFLICT DO UPDATE kräver inferensangivelse eller villkorsnamn" + +#: parser/parse_clause.c:3160 +#, c-format +msgid "For example, ON CONFLICT (column_name)." +msgstr "Till exempel, ON CONFLICT (kolumnnamn)." + +#: parser/parse_clause.c:3171 +#, c-format +msgid "ON CONFLICT is not supported with system catalog tables" +msgstr "ON CONFLICT stöds inte för systemkatalogtabeller" + +#: parser/parse_clause.c:3179 +#, c-format +msgid "ON CONFLICT is not supported on table \"%s\" used as a catalog table" +msgstr "ON CONFLICT stöds inte på tabell \"%s\" som används som katalogtabell" + +#: parser/parse_clause.c:3322 +#, c-format +msgid "operator %s is not a valid ordering operator" +msgstr "operator %s är inte en giltig sorteringsoperator" + +#: parser/parse_clause.c:3324 +#, c-format +msgid "Ordering operators must be \"<\" or \">\" members of btree operator families." +msgstr "Sorteringsoperationer måste vara \"<\"- eller \">\"-medlemmar i btree-operatorfamiljer." + +#: parser/parse_clause.c:3635 +#, c-format +msgid "RANGE with offset PRECEDING/FOLLOWING is not supported for column type %s" +msgstr "RANGE med offset PRECEDING/FOLLOWING stöds inte för kolumntyp %s" + +#: parser/parse_clause.c:3641 +#, c-format +msgid "RANGE with offset PRECEDING/FOLLOWING is not supported for column type %s and offset type %s" +msgstr "RANGE med offset PRECEDING/FOLLOWING stöd inte av kolumntyp %s och offset-typ %s" + +#: parser/parse_clause.c:3644 +#, c-format +msgid "Cast the offset value to an appropriate type." +msgstr "Typomvandla offset-värdet till lämplig typ." + +#: parser/parse_clause.c:3649 +#, c-format +msgid "RANGE with offset PRECEDING/FOLLOWING has multiple interpretations for column type %s and offset type %s" +msgstr "RANGE med offset PRECEDING/FOLLOWING har multipla tolkingar för kolumntyp %s och offset-typ %s" + +#: parser/parse_clause.c:3652 +#, c-format +msgid "Cast the offset value to the exact intended type." +msgstr "Typomvandla offset-värdet till exakt den önskade typen." + +#: parser/parse_coerce.c:1017 parser/parse_coerce.c:1055 +#: parser/parse_coerce.c:1073 parser/parse_coerce.c:1088 +#: parser/parse_expr.c:2153 parser/parse_expr.c:2741 parser/parse_target.c:955 +#, c-format +msgid "cannot cast type %s to %s" +msgstr "kan inte omvandla typ %s till %s" + +#: parser/parse_coerce.c:1058 +#, c-format +msgid "Input has too few columns." +msgstr "Indata har för få kolumner" + +#: parser/parse_coerce.c:1076 +#, c-format +msgid "Cannot cast type %s to %s in column %d." +msgstr "Kan inte typomvandla typ %s till %s i kolumn %d." + +#: parser/parse_coerce.c:1091 +#, c-format +msgid "Input has too many columns." +msgstr "Indata har för många kolumner" + +#. translator: first %s is name of a SQL construct, eg WHERE +#. translator: first %s is name of a SQL construct, eg LIMIT +#: parser/parse_coerce.c:1146 parser/parse_coerce.c:1194 +#, c-format +msgid "argument of %s must be type %s, not type %s" +msgstr "argumentet till %s måste vara av typ %s, inte av typ %s" + +#. translator: %s is name of a SQL construct, eg WHERE +#. translator: %s is name of a SQL construct, eg LIMIT +#: parser/parse_coerce.c:1157 parser/parse_coerce.c:1206 +#, c-format +msgid "argument of %s must not return a set" +msgstr "argumentet till %s får inte returnera en mängd" + +#. translator: first %s is name of a SQL construct, eg CASE +#: parser/parse_coerce.c:1346 +#, c-format +msgid "%s types %s and %s cannot be matched" +msgstr "%s typer %s och %s matchar inte" + +#. translator: first %s is name of a SQL construct, eg CASE +#: parser/parse_coerce.c:1413 +#, c-format +msgid "%s could not convert type %s to %s" +msgstr "%s kan inte konvertera typ %s till %s" + +#: parser/parse_coerce.c:1715 +#, c-format +msgid "arguments declared \"anyelement\" are not all alike" +msgstr "argument deklarerade som \"anyelement\" är inte alla likadana" + +#: parser/parse_coerce.c:1735 +#, c-format +msgid "arguments declared \"anyarray\" are not all alike" +msgstr "argument deklarerade \"anyarray\" är inte alla likadana" + +#: parser/parse_coerce.c:1755 +#, c-format +msgid "arguments declared \"anyrange\" are not all alike" +msgstr "argument deklarerade \"anyrange\" är inte alla likadana" + +#: parser/parse_coerce.c:1784 parser/parse_coerce.c:1999 +#: parser/parse_coerce.c:2033 +#, c-format +msgid "argument declared %s is not an array but type %s" +msgstr "argumentet deklarerad %s är inte en array utan typ %s" + +#: parser/parse_coerce.c:1800 parser/parse_coerce.c:1839 +#, c-format +msgid "argument declared %s is not consistent with argument declared %s" +msgstr "argument deklarerad %s är inte konsistent med argument deklarerad %s" + +#: parser/parse_coerce.c:1822 parser/parse_coerce.c:2046 +#, c-format +msgid "argument declared %s is not a range type but type %s" +msgstr "argumentet deklarerad %s är inte en intervalltyp utan typ %s" + +#: parser/parse_coerce.c:1860 +#, c-format +msgid "could not determine polymorphic type because input has type %s" +msgstr "kunde inte bestämma en polymorf typ då indata har typ %s" + +#: parser/parse_coerce.c:1871 +#, c-format +msgid "type matched to anynonarray is an array type: %s" +msgstr "typen som matchar anynonarray är en array-typ: %s" + +#: parser/parse_coerce.c:1881 +#, c-format +msgid "type matched to anyenum is not an enum type: %s" +msgstr "typen som matchar anyenum är inte en enum-typ: %s" + +#: parser/parse_coerce.c:1921 parser/parse_coerce.c:1951 +#, c-format +msgid "could not find range type for data type %s" +msgstr "kunde inte hitta intervalltyp för datatyp %s" + +#: parser/parse_collate.c:228 parser/parse_collate.c:475 +#: parser/parse_collate.c:981 +#, c-format +msgid "collation mismatch between implicit collations \"%s\" and \"%s\"" +msgstr "jämförelser (collation) matchar inte mellan implicita jämförelser \"%s\" och \"%s\"" + +#: parser/parse_collate.c:231 parser/parse_collate.c:478 +#: parser/parse_collate.c:984 +#, c-format +msgid "You can choose the collation by applying the COLLATE clause to one or both expressions." +msgstr "Du kan välja jämförelse genom att applicera en COLLATE-klausul till ett eller båda uttrycken." + +#: parser/parse_collate.c:831 +#, c-format +msgid "collation mismatch between explicit collations \"%s\" and \"%s\"" +msgstr "jämförelser (collation) matchar inte mellan explicita jämförelser \"%s\" och \"%s\"" + +#: parser/parse_cte.c:42 +#, c-format +msgid "recursive reference to query \"%s\" must not appear within its non-recursive term" +msgstr "rekursiv referens till fråga \"%s\" får inte finnas inom dess ickerekursiva term" + +#: parser/parse_cte.c:44 +#, c-format +msgid "recursive reference to query \"%s\" must not appear within a subquery" +msgstr "rekursiv referens till fråga \"%s\" får inte finnas i en subfråga" + +#: parser/parse_cte.c:46 +#, c-format +msgid "recursive reference to query \"%s\" must not appear within an outer join" +msgstr "rekursiv referens till fråga \"%s\" får inte finnas i en outer join" + +#: parser/parse_cte.c:48 +#, c-format +msgid "recursive reference to query \"%s\" must not appear within INTERSECT" +msgstr "rekursiv referens till fråga \"%s\" får inte finnas i en INTERSECT" + +#: parser/parse_cte.c:50 +#, c-format +msgid "recursive reference to query \"%s\" must not appear within EXCEPT" +msgstr "rekursiv referens till fråga \"%s\" får inte finnas i en EXCEPT" + +#: parser/parse_cte.c:132 +#, c-format +msgid "WITH query name \"%s\" specified more than once" +msgstr "WITH-frågenamn \"%s\" angivet mer än en gång" + +#: parser/parse_cte.c:264 +#, c-format +msgid "WITH clause containing a data-modifying statement must be at the top level" +msgstr "WITH-klausul som innehåller en datamodifierande sats måste vara på toppnivå" + +#: parser/parse_cte.c:313 +#, c-format +msgid "recursive query \"%s\" column %d has type %s in non-recursive term but type %s overall" +msgstr "rekursiv fråga \"%s\" kolumn %d har typ %s i den ickerekursiva termen med typ %s totalt sett" + +#: parser/parse_cte.c:319 +#, c-format +msgid "Cast the output of the non-recursive term to the correct type." +msgstr "Typomvandla utdatan för den ickerekursiva termen till korrekt typ." + +#: parser/parse_cte.c:324 +#, c-format +msgid "recursive query \"%s\" column %d has collation \"%s\" in non-recursive term but collation \"%s\" overall" +msgstr "rekursiv fråga \"%s\" kolumn %d har jämförelse (collation) \"%s\" i en icke-rekursiv term men jämförelse \"%s\" totalt sett" + +#: parser/parse_cte.c:328 +#, c-format +msgid "Use the COLLATE clause to set the collation of the non-recursive term." +msgstr "Använd en COLLATE-klausul för att sätta jämförelse för den icke-rekursiva termen." + +#: parser/parse_cte.c:418 +#, c-format +msgid "WITH query \"%s\" has %d columns available but %d columns specified" +msgstr "WITH-fråga \"%s\" har %d kolumner tillgängliga men %d kolumner angivna" + +#: parser/parse_cte.c:598 +#, c-format +msgid "mutual recursion between WITH items is not implemented" +msgstr "ömsesidig rekursion mellan WITH-poster är inte implementerat" + +#: parser/parse_cte.c:650 +#, c-format +msgid "recursive query \"%s\" must not contain data-modifying statements" +msgstr "rekursiv fråga \"%s\" får inte innehålla datamodifierande satser" + +#: parser/parse_cte.c:658 +#, c-format +msgid "recursive query \"%s\" does not have the form non-recursive-term UNION [ALL] recursive-term" +msgstr "rekursiv fråga \"%s\" är inte på formen icke-rekursiv-term UNION [ALL] rekursiv-term" + +#: parser/parse_cte.c:702 +#, c-format +msgid "ORDER BY in a recursive query is not implemented" +msgstr "ORDER BY i en rekursiv fråga är inte implementerat" + +#: parser/parse_cte.c:708 +#, c-format +msgid "OFFSET in a recursive query is not implemented" +msgstr "OFFSET i en rekursiv fråga är inte implementerat" + +#: parser/parse_cte.c:714 +#, c-format +msgid "LIMIT in a recursive query is not implemented" +msgstr "LIMIT i en rekursiv fråga är inte implementerat" + +#: parser/parse_cte.c:720 +#, c-format +msgid "FOR UPDATE/SHARE in a recursive query is not implemented" +msgstr "FOR UPDATE/SHARE i en rekursiv fråga är inte implementerat" + +#: parser/parse_cte.c:777 +#, c-format +msgid "recursive reference to query \"%s\" must not appear more than once" +msgstr "rekursiv referens till fråga \"%s\" får inte finnas med mer än en gång" + +#: parser/parse_expr.c:350 +#, c-format +msgid "DEFAULT is not allowed in this context" +msgstr "DEFAULT tillåts inte i detta kontext" + +#: parser/parse_expr.c:403 parser/parse_relation.c:3287 +#: parser/parse_relation.c:3307 +#, c-format +msgid "column %s.%s does not exist" +msgstr "kolumnen %s.%s finns inte" + +#: parser/parse_expr.c:415 +#, c-format +msgid "column \"%s\" not found in data type %s" +msgstr "kolumn \"%s\" fanns inte i datatypen %s" + +#: parser/parse_expr.c:421 +#, c-format +msgid "could not identify column \"%s\" in record data type" +msgstr "kunde inte hitta kolumnen \"%s\" i record-datatyp" + +#: parser/parse_expr.c:427 +#, c-format +msgid "column notation .%s applied to type %s, which is not a composite type" +msgstr "kolumnotation .%s använd på typ %s som inte är en sammanslagen typ" + +#: parser/parse_expr.c:458 parser/parse_target.c:722 +#, c-format +msgid "row expansion via \"*\" is not supported here" +msgstr "radexpansion via \"*\" stöds inte här" + +#: parser/parse_expr.c:771 parser/parse_relation.c:689 +#: parser/parse_relation.c:789 parser/parse_target.c:1193 +#, c-format +msgid "column reference \"%s\" is ambiguous" +msgstr "kolumnreferens \"%s\" är tvetydig" + +#: parser/parse_expr.c:827 parser/parse_param.c:110 parser/parse_param.c:142 +#: parser/parse_param.c:199 parser/parse_param.c:298 +#, c-format +msgid "there is no parameter $%d" +msgstr "det finns ingen parameter $%d" + +#: parser/parse_expr.c:1070 +#, c-format +msgid "NULLIF requires = operator to yield boolean" +msgstr "NULLIF kräver att =-operatorn returnerar boolean" + +#. translator: %s is name of a SQL construct, eg NULLIF +#: parser/parse_expr.c:1076 parser/parse_expr.c:3057 +#, c-format +msgid "%s must not return a set" +msgstr "%s får inte returnera en mängd" + +#: parser/parse_expr.c:1524 parser/parse_expr.c:1556 +#, c-format +msgid "number of columns does not match number of values" +msgstr "antalet kolumner matchar inte antalet värden" + +#: parser/parse_expr.c:1570 +#, c-format +msgid "source for a multiple-column UPDATE item must be a sub-SELECT or ROW() expression" +msgstr "källa till en multiple-kolumn-UPDATE-post måste vara en sub-SELECT eller ROW()-uttryck" + +#. translator: %s is name of a SQL construct, eg GROUP BY +#: parser/parse_expr.c:1764 parser/parse_expr.c:2244 parser/parse_func.c:2372 +#, c-format +msgid "set-returning functions are not allowed in %s" +msgstr "mängdreturnerande funktioner tillåts inte i %s" + +#: parser/parse_expr.c:1825 +msgid "cannot use subquery in check constraint" +msgstr "kan inte använda subfråga i check-villkor" + +#: parser/parse_expr.c:1829 +msgid "cannot use subquery in DEFAULT expression" +msgstr "kan inte använda underfråga i DEFAULT-uttryck" + +#: parser/parse_expr.c:1832 +msgid "cannot use subquery in index expression" +msgstr "kan inte använda subfråga i indexuttryck" + +#: parser/parse_expr.c:1835 +msgid "cannot use subquery in index predicate" +msgstr "kan inte använda subfråga i indexpredikat" + +#: parser/parse_expr.c:1838 +msgid "cannot use subquery in transform expression" +msgstr "kan inte använda underfråga i transformeringsuttrycket" + +#: parser/parse_expr.c:1841 +msgid "cannot use subquery in EXECUTE parameter" +msgstr "kan inte använda subfråga i EXECUTE-parameter" + +#: parser/parse_expr.c:1844 +msgid "cannot use subquery in trigger WHEN condition" +msgstr "kan inte använda subfråga i utlösares WHEN-villkor" + +#: parser/parse_expr.c:1847 +msgid "cannot use subquery in partition key expression" +msgstr "kan inte använda underfråga i partitionsnyckeluttryck" + +#: parser/parse_expr.c:1850 +msgid "cannot use subquery in CALL argument" +msgstr "kan inte använda subfråga i CALL-argument" + +#: parser/parse_expr.c:1903 +#, c-format +msgid "subquery must return only one column" +msgstr "underfråga kan bara returnera en kolumn" + +#: parser/parse_expr.c:1987 +#, c-format +msgid "subquery has too many columns" +msgstr "underfråga har för många kolumner" + +#: parser/parse_expr.c:1992 +#, c-format +msgid "subquery has too few columns" +msgstr "underfråga har för få kolumner" + +#: parser/parse_expr.c:2093 +#, c-format +msgid "cannot determine type of empty array" +msgstr "kan inte bestämma typen av en tom array" + +#: parser/parse_expr.c:2094 +#, c-format +msgid "Explicitly cast to the desired type, for example ARRAY[]::integer[]." +msgstr "Typomvandla explicit till den önskade typen, till exempel ARRAY[]::integer[]." + +#: parser/parse_expr.c:2108 +#, c-format +msgid "could not find element type for data type %s" +msgstr "kunde inte hitta elementtyp för datatyp %s" + +#: parser/parse_expr.c:2395 +#, c-format +msgid "unnamed XML attribute value must be a column reference" +msgstr "onamnat XML-attributvärde måste vara en kolumnreferens" + +#: parser/parse_expr.c:2396 +#, c-format +msgid "unnamed XML element value must be a column reference" +msgstr "onamnat XML-elementvärde måste vara en kolumnreferens" + +#: parser/parse_expr.c:2411 +#, c-format +msgid "XML attribute name \"%s\" appears more than once" +msgstr "XML-attributnamn \"%s\" finns med mer än en gång" + +#: parser/parse_expr.c:2518 +#, c-format +msgid "cannot cast XMLSERIALIZE result to %s" +msgstr "kan inte typomvandla XMLSERIALIZE-resultat till %s" + +#: parser/parse_expr.c:2814 parser/parse_expr.c:3010 +#, c-format +msgid "unequal number of entries in row expressions" +msgstr "olika antal element i raduttryck" + +#: parser/parse_expr.c:2824 +#, c-format +msgid "cannot compare rows of zero length" +msgstr "kan inte jämföra rader med längden noll" + +#: parser/parse_expr.c:2849 +#, c-format +msgid "row comparison operator must yield type boolean, not type %s" +msgstr "operator för radjämförelse måste resultera i typen boolean, inte %s" + +#: parser/parse_expr.c:2856 +#, c-format +msgid "row comparison operator must not return a set" +msgstr "radjämförelseoperator får inte returnera en mängd" + +#: parser/parse_expr.c:2915 parser/parse_expr.c:2956 +#, c-format +msgid "could not determine interpretation of row comparison operator %s" +msgstr "kunde inte lista ut tolkning av radjämförelseoperator %s" + +#: parser/parse_expr.c:2917 +#, c-format +msgid "Row comparison operators must be associated with btree operator families." +msgstr "Radjämförelseoperatorer måste vara associerade med btreee-operatorfamiljer." + +#: parser/parse_expr.c:2958 +#, c-format +msgid "There are multiple equally-plausible candidates." +msgstr "Det finns flera lika sannolika kandidater." + +#: parser/parse_expr.c:3051 +#, c-format +msgid "IS DISTINCT FROM requires = operator to yield boolean" +msgstr "IS DISTINCT FROM kräver att operatorn = ger tillbaka en boolean" + +#: parser/parse_expr.c:3370 parser/parse_expr.c:3388 +#, c-format +msgid "operator precedence change: %s is now lower precedence than %s" +msgstr "operator-precedence-ändring: %s har nu lägre precedence än %s" + +#: parser/parse_func.c:185 +#, c-format +msgid "argument name \"%s\" used more than once" +msgstr "argumentnamn \"%s\" angivet mer än en gång" + +#: parser/parse_func.c:196 +#, c-format +msgid "positional argument cannot follow named argument" +msgstr "positionella argument kan inte komma efter namngivna argument" + +#: parser/parse_func.c:278 parser/parse_func.c:2165 +#, c-format +msgid "%s is not a procedure" +msgstr "%s är inte en procedur" + +#: parser/parse_func.c:282 +#, c-format +msgid "To call a function, use SELECT." +msgstr "För att anropa en funktion, använd SELECT." + +#: parser/parse_func.c:288 +#, c-format +msgid "%s is a procedure" +msgstr "\"%s\" är en procedur" + +#: parser/parse_func.c:292 +#, c-format +msgid "To call a procedure, use CALL." +msgstr "För att anropa en procedur, använd CALL" + +#: parser/parse_func.c:306 +#, c-format +msgid "%s(*) specified, but %s is not an aggregate function" +msgstr "%s(*) angivet, men %s är inte en aggregatfunktion" + +#: parser/parse_func.c:313 +#, c-format +msgid "DISTINCT specified, but %s is not an aggregate function" +msgstr "DISTINCT angiven, men %s är inte en aggregatfunktion" + +#: parser/parse_func.c:319 +#, c-format +msgid "WITHIN GROUP specified, but %s is not an aggregate function" +msgstr "WITHIN GROUP angiven, men %s är inte en aggregatfunktion" + +#: parser/parse_func.c:325 +#, c-format +msgid "ORDER BY specified, but %s is not an aggregate function" +msgstr "ORDER BY angiven, men %s är inte en aggregatfunktion" + +#: parser/parse_func.c:331 +#, c-format +msgid "FILTER specified, but %s is not an aggregate function" +msgstr "FILTER angiven, men %s är inte en aggregatfunktion" + +#: parser/parse_func.c:337 +#, c-format +msgid "OVER specified, but %s is not a window function nor an aggregate function" +msgstr "OVER angiven, men %s är inte en fönsterfunktion eller en aggregatfunktion" + +#: parser/parse_func.c:375 +#, c-format +msgid "WITHIN GROUP is required for ordered-set aggregate %s" +msgstr "WITHIN GROUP krävs för sorterad-mängd-aggregat %s" + +#: parser/parse_func.c:381 +#, c-format +msgid "OVER is not supported for ordered-set aggregate %s" +msgstr "DISTINCT stöds inte för sorterad-mängd-aggregat %s" + +#: parser/parse_func.c:412 parser/parse_func.c:441 +#, c-format +msgid "There is an ordered-set aggregate %s, but it requires %d direct arguments, not %d." +msgstr "Det finns ett sorterad-mängd-aggregat %s, men det kräver %d direkta argument, inte %d." + +#: parser/parse_func.c:466 +#, c-format +msgid "To use the hypothetical-set aggregate %s, the number of hypothetical direct arguments (here %d) must match the number of ordering columns (here %d)." +msgstr "För att använda hypotetiskt mängdaggregat %s så måste antalet direkta hypotetiska argument (här %d) matcha antalet sorteringskolumner (här %d)." + +#: parser/parse_func.c:480 +#, c-format +msgid "There is an ordered-set aggregate %s, but it requires at least %d direct arguments." +msgstr "Det finns ett sorterad-mängd-aggregat %s, men det kräver minst %d direkta argument." + +#: parser/parse_func.c:499 +#, c-format +msgid "%s is not an ordered-set aggregate, so it cannot have WITHIN GROUP" +msgstr "%s är inte en sorterad-mängd-aggregat, så den kan inte ha WITHIN GROUP" + +#: parser/parse_func.c:512 +#, c-format +msgid "window function %s requires an OVER clause" +msgstr "fönsterfunktion %s kräver en OVER-klausul" + +#: parser/parse_func.c:519 +#, c-format +msgid "window function %s cannot have WITHIN GROUP" +msgstr "fönsterfunktion %s kan inte ha en WITHIN GROUP" + +#: parser/parse_func.c:547 +#, c-format +msgid "function %s is not unique" +msgstr "funktionen %s är inte unik" + +#: parser/parse_func.c:550 +#, c-format +msgid "Could not choose a best candidate function. You might need to add explicit type casts." +msgstr "Kunde inte välja en bästa kandidatfunktion: Du kan behöva lägga till explicita typomvandlingar." + +#: parser/parse_func.c:589 +#, c-format +msgid "No aggregate function matches the given name and argument types. Perhaps you misplaced ORDER BY; ORDER BY must appear after all regular arguments of the aggregate." +msgstr "Ingen aggregatfunktion matchar det givna namnet och argumenttyperna. Kanske har du placerat ORDER BY på fel plats; ORDER BY måste komma efter alla vanliga argument till aggregatet." + +#: parser/parse_func.c:600 +#, c-format +msgid "No function matches the given name and argument types. You might need to add explicit type casts." +msgstr "Ingen funktion matchar det angivna namnet och argumenttyperna. Du kan behöva lägga till explicita typomvandlingar." + +#: parser/parse_func.c:702 +#, c-format +msgid "VARIADIC argument must be an array" +msgstr "VARIADIC-argument måste vara en array" + +#: parser/parse_func.c:754 parser/parse_func.c:818 +#, c-format +msgid "%s(*) must be used to call a parameterless aggregate function" +msgstr "%s(*) måste användas för att anropa en parameterlös aggregatfunktion" + +#: parser/parse_func.c:761 +#, c-format +msgid "aggregates cannot return sets" +msgstr "aggregat kan inte returnera mängder" + +#: parser/parse_func.c:776 +#, c-format +msgid "aggregates cannot use named arguments" +msgstr "aggregat kan inte använda namngivna argument" + +#: parser/parse_func.c:808 +#, c-format +msgid "DISTINCT is not implemented for window functions" +msgstr "DISTINCT är inte implementerad för fönsterfunktioner" + +#: parser/parse_func.c:828 +#, c-format +msgid "aggregate ORDER BY is not implemented for window functions" +msgstr "aggregat-ORDER BY är inte implementerat för fönsterfunktioner" + +#: parser/parse_func.c:837 +#, c-format +msgid "FILTER is not implemented for non-aggregate window functions" +msgstr "FILTER är inte implementerat för icke-aggregat-fönsterfunktioner" + +#: parser/parse_func.c:846 +#, c-format +msgid "window function calls cannot contain set-returning function calls" +msgstr "fönsterfunktioner kan inte innehålla funtionsanrop till funktioner som returnerar mängder" + +#: parser/parse_func.c:854 +#, c-format +msgid "window functions cannot return sets" +msgstr "fönsterfunktioner kan inte returnera mängder" + +#: parser/parse_func.c:2040 +#, c-format +msgid "function name \"%s\" is not unique" +msgstr "funktionsnamn \"%s\" är inte unikt" + +#: parser/parse_func.c:2042 +#, c-format +msgid "Specify the argument list to select the function unambiguously." +msgstr "Ange argumentlistan för att välja funktionen entydigt." + +#: parser/parse_func.c:2052 +#, c-format +msgid "could not find a function named \"%s\"" +msgstr "kunde inte hitta funktion med namn \"%s\"" + +#: parser/parse_func.c:2134 +#, c-format +msgid "%s is not a function" +msgstr "%s är inte en funktion" + +#: parser/parse_func.c:2148 +#, c-format +msgid "could not find a procedure named \"%s\"" +msgstr "kunde inte hitta en procedur med namn \"%s\"" + +#: parser/parse_func.c:2153 +#, c-format +msgid "procedure %s does not exist" +msgstr "proceduren \"%s\" finns inte" + +#: parser/parse_func.c:2179 +#, c-format +msgid "could not find a aggregate named \"%s\"" +msgstr "kunde inte hitta ett aggregat med namn \"%s\"" + +#: parser/parse_func.c:2184 +#, c-format +msgid "aggregate %s(*) does not exist" +msgstr "aggregatfunktion %s(*) existerar inte" + +#: parser/parse_func.c:2189 +#, c-format +msgid "aggregate %s does not exist" +msgstr "aggregatfunktion %s existerar inte" + +#: parser/parse_func.c:2202 +#, c-format +msgid "function %s is not an aggregate" +msgstr "funktionen %s är inte en aggregatfunktion" + +#: parser/parse_func.c:2252 +msgid "set-returning functions are not allowed in JOIN conditions" +msgstr "mängdreturnerande funktioner tillåts inte i JOIN-villkor" + +#: parser/parse_func.c:2273 +msgid "set-returning functions are not allowed in policy expressions" +msgstr "mängdreturnerande funktioner tillåts inte i policy-uttryck" + +#: parser/parse_func.c:2289 +msgid "set-returning functions are not allowed in window definitions" +msgstr "mängdreturnerande funktioner tillåts inte i fönsterdefinitioner" + +#: parser/parse_func.c:2327 +msgid "set-returning functions are not allowed in check constraints" +msgstr "mängdreturnerande funktioner tillåts inte i check-villkor" + +#: parser/parse_func.c:2331 +msgid "set-returning functions are not allowed in DEFAULT expressions" +msgstr "mängdreturnerande funktioner tillåts inte i DEFAULT-uttryck" + +#: parser/parse_func.c:2334 +msgid "set-returning functions are not allowed in index expressions" +msgstr "mängdreturnerande funktioner tillåts inte i indexuttryck" + +#: parser/parse_func.c:2337 +msgid "set-returning functions are not allowed in index predicates" +msgstr "mängdreturnerande funktioner tillåts inte i indexpredukat" + +#: parser/parse_func.c:2340 +msgid "set-returning functions are not allowed in transform expressions" +msgstr "mängdreturnerande funktioner tillåts inte i transformuttryck" + +#: parser/parse_func.c:2343 +msgid "set-returning functions are not allowed in EXECUTE parameters" +msgstr "mängdreturnerande funktioner tillåts inte i EXECUTE-parametrar" + +#: parser/parse_func.c:2346 +msgid "set-returning functions are not allowed in trigger WHEN conditions" +msgstr "mängdreturnerande funktioner tillåts inte i WHEN-utlösarvillkor" + +#: parser/parse_func.c:2349 +msgid "set-returning functions are not allowed in partition key expressions" +msgstr "mängdreturnerande funktioner tillåts inte i partitionsnyckeluttryck" + +#: parser/parse_func.c:2352 +msgid "set-returning functions are not allowed in CALL arguments" +msgstr "mängdreturnerande funktioner tillåts inte i CALL-argument" + +#: parser/parse_node.c:87 +#, c-format +msgid "target lists can have at most %d entries" +msgstr "mållista kan ha som mest %d poster" + +#: parser/parse_node.c:256 +#, c-format +msgid "cannot subscript type %s because it is not an array" +msgstr "kan inte indexera typ %s då det inte är en array" + +#: parser/parse_node.c:358 parser/parse_node.c:395 +#, c-format +msgid "array subscript must have type integer" +msgstr "arrayindex måste ha typen integer" + +#: parser/parse_node.c:426 +#, c-format +msgid "array assignment requires type %s but expression is of type %s" +msgstr "array-tilldelning kräver typ %s men uttrycket har typ %s" + +#: parser/parse_oper.c:125 parser/parse_oper.c:724 utils/adt/regproc.c:520 +#: utils/adt/regproc.c:704 +#, c-format +msgid "operator does not exist: %s" +msgstr "operator existerar inte: %s" + +#: parser/parse_oper.c:224 +#, c-format +msgid "Use an explicit ordering operator or modify the query." +msgstr "Använd en explicit ordningsoperator eller ändra frågan." + +#: parser/parse_oper.c:480 +#, c-format +msgid "operator requires run-time type coercion: %s" +msgstr "operator kräver run-time-typomvandling: %s" + +#: parser/parse_oper.c:716 +#, c-format +msgid "operator is not unique: %s" +msgstr "operatorn är inte unik: %s" + +#: parser/parse_oper.c:718 +#, c-format +msgid "Could not choose a best candidate operator. You might need to add explicit type casts." +msgstr "Kunde inte välja en bästa kandidatoperator. Du behöver troligen lägga till en explicit typomvandling." + +#: parser/parse_oper.c:727 +#, c-format +msgid "No operator matches the given name and argument type. You might need to add an explicit type cast." +msgstr "Ingen operator matchar det angivna namnet och argumenttyp. Du kan behöva lägga till explicita typomvandlingar." + +#: parser/parse_oper.c:729 +#, c-format +msgid "No operator matches the given name and argument types. You might need to add explicit type casts." +msgstr "Ingen operator matchar det angivna namnet och argumenttyperna. Du kan behöva lägga till explicita typomvandlingar." + +#: parser/parse_oper.c:790 parser/parse_oper.c:912 +#, c-format +msgid "operator is only a shell: %s" +msgstr "operator är bara en skaltyp: %s" + +#: parser/parse_oper.c:900 +#, c-format +msgid "op ANY/ALL (array) requires array on right side" +msgstr "op ANY/ALL (array) kräver en array på höger sida" + +#: parser/parse_oper.c:942 +#, c-format +msgid "op ANY/ALL (array) requires operator to yield boolean" +msgstr "op ANY/ALL (array) kräver att operatorn returnerar en boolean" + +#: parser/parse_oper.c:947 +#, c-format +msgid "op ANY/ALL (array) requires operator not to return a set" +msgstr "op ANY/ALL (array) kräver att operatorn inte returnerar en mängd" + +#: parser/parse_param.c:216 +#, c-format +msgid "inconsistent types deduced for parameter $%d" +msgstr "inkonsistenta typer härledda för parameter $%d" + +#: parser/parse_relation.c:176 +#, c-format +msgid "table reference \"%s\" is ambiguous" +msgstr "tabellreferens \"%s\" är tvetydig" + +#: parser/parse_relation.c:220 +#, c-format +msgid "table reference %u is ambiguous" +msgstr "tabellreferens %u är tvetydig" + +#: parser/parse_relation.c:419 +#, c-format +msgid "table name \"%s\" specified more than once" +msgstr "tabellnamn \"%s\" angivet mer än en gång" + +#: parser/parse_relation.c:446 parser/parse_relation.c:3227 +#, c-format +msgid "invalid reference to FROM-clause entry for table \"%s\"" +msgstr "ogiltig referens till FROM-klausulpost för tabell \"%s\"" + +#: parser/parse_relation.c:449 parser/parse_relation.c:3232 +#, c-format +msgid "There is an entry for table \"%s\", but it cannot be referenced from this part of the query." +msgstr "Det finns en post för tabell \"%s\" men den kan inte refereras till från denna del av frågan." + +#: parser/parse_relation.c:451 +#, c-format +msgid "The combining JOIN type must be INNER or LEFT for a LATERAL reference." +msgstr "JOIN-typen måste vara INNER eller LEFT för att fungera med LATERAL." + +#: parser/parse_relation.c:727 +#, c-format +msgid "system column \"%s\" reference in check constraint is invalid" +msgstr "systemkolumn \"%s\" som refereras till i check-villkor är ogiltigt" + +#: parser/parse_relation.c:1086 parser/parse_relation.c:1366 +#: parser/parse_relation.c:1936 +#, c-format +msgid "table \"%s\" has %d columns available but %d columns specified" +msgstr "tabell \"%s\" har %d kolumner tillgängliga men %d kolumner angivna" + +#: parser/parse_relation.c:1173 +#, c-format +msgid "There is a WITH item named \"%s\", but it cannot be referenced from this part of the query." +msgstr "Det finns en WITH-post med namn \"%s\" men den kan inte refereras till från denna del av frågan." + +#: parser/parse_relation.c:1175 +#, c-format +msgid "Use WITH RECURSIVE, or re-order the WITH items to remove forward references." +msgstr "Använd WITH RECURSIVE eller ändra ordning på WITH-posterna för att ta bort framåt-referenser." + +#: parser/parse_relation.c:1486 +#, c-format +msgid "a column definition list is only allowed for functions returning \"record\"" +msgstr "en kolumndefinitionslista tillåts bara för funktioner som returnerar \"record\"" + +#: parser/parse_relation.c:1495 +#, c-format +msgid "a column definition list is required for functions returning \"record\"" +msgstr "en kolumndefinitionslista krävs för funktioner som returnerar \"record\"" + +#: parser/parse_relation.c:1575 +#, c-format +msgid "function \"%s\" in FROM has unsupported return type %s" +msgstr "funktion \"%s\" i FROM har en icke stödd returtyp %s" + +#: parser/parse_relation.c:1764 +#, c-format +msgid "VALUES lists \"%s\" have %d columns available but %d columns specified" +msgstr "VALUES-lista \"%s\" har %d kolumner tillgängliga men %d kolumner angivna" + +#: parser/parse_relation.c:1819 +#, c-format +msgid "joins can have at most %d columns" +msgstr "joins kan ha som mest %d kolumner" + +#: parser/parse_relation.c:1909 +#, c-format +msgid "WITH query \"%s\" does not have a RETURNING clause" +msgstr "WITH-fråga \"%s\" har ingen RETURNING-klausul" + +#: parser/parse_relation.c:2846 parser/parse_relation.c:2884 +#: parser/parse_relation.c:3011 +#, c-format +msgid "column %d of relation \"%s\" does not exist" +msgstr "kolumn %d i relation \"%s\" finns inte" + +#: parser/parse_relation.c:3230 +#, c-format +msgid "Perhaps you meant to reference the table alias \"%s\"." +msgstr "Kanske tänkte du referera till tabellaliaset \"%s\"." + +#: parser/parse_relation.c:3238 +#, c-format +msgid "missing FROM-clause entry for table \"%s\"" +msgstr "saknar FROM-klausulpost för tabell \"%s\"" + +#: parser/parse_relation.c:3290 +#, c-format +msgid "Perhaps you meant to reference the column \"%s.%s\"." +msgstr "Kanske tänkte du referera trill kolumnen \"%s.%s\"." + +#: parser/parse_relation.c:3292 +#, c-format +msgid "There is a column named \"%s\" in table \"%s\", but it cannot be referenced from this part of the query." +msgstr "Det finns en kolumn med namn \"%s\" i tabell \"%s\" men den kan inte refereras till från denna del av frågan." + +#: parser/parse_relation.c:3309 +#, c-format +msgid "Perhaps you meant to reference the column \"%s.%s\" or the column \"%s.%s\"." +msgstr "Kanske tänkte du referera till kolumnen \"%s.%s\" eller kolumnen \"%s.%s\"." + +#: parser/parse_target.c:483 parser/parse_target.c:784 +#, c-format +msgid "cannot assign to system column \"%s\"" +msgstr "kan inte skriva till systemkolumn \"%s\"" + +#: parser/parse_target.c:511 +#, c-format +msgid "cannot set an array element to DEFAULT" +msgstr "kan inte sätta ett array-element till DEFAULT" + +#: parser/parse_target.c:516 +#, c-format +msgid "cannot set a subfield to DEFAULT" +msgstr "kan inte sätta ett underfält till DEFAULT" + +#: parser/parse_target.c:585 +#, c-format +msgid "column \"%s\" is of type %s but expression is of type %s" +msgstr "kolumn \"%s\" har typ %s men uttrycket är av typ %s" + +#: parser/parse_target.c:768 +#, c-format +msgid "cannot assign to field \"%s\" of column \"%s\" because its type %s is not a composite type" +msgstr "kan inte tilldela till fält \"%s\" i kolumn \"%s\" då dess typ %s inte är en composit-typ" + +#: parser/parse_target.c:777 +#, c-format +msgid "cannot assign to field \"%s\" of column \"%s\" because there is no such column in data type %s" +msgstr "kan inte tilldela till fält \"%s\" i kolumn \"%s\" då det inte finns någon sådan kolumn i datatypen %s" + +#: parser/parse_target.c:854 +#, c-format +msgid "array assignment to \"%s\" requires type %s but expression is of type %s" +msgstr "array-tilldelning till \"%s\" kräver typ %s men uttrycket har typ %s" + +#: parser/parse_target.c:864 +#, c-format +msgid "subfield \"%s\" is of type %s but expression is of type %s" +msgstr "underfält \"%s\" har typ %s men uttrycket har typ %s" + +#: parser/parse_target.c:1283 +#, c-format +msgid "SELECT * with no tables specified is not valid" +msgstr "SELECT * utan tabeller angivna är inte giltigt" + +#: parser/parse_type.c:83 +#, c-format +msgid "improper %%TYPE reference (too few dotted names): %s" +msgstr "dålig %%TYPE-referens (för får punktade namn): %s" + +#: parser/parse_type.c:105 +#, c-format +msgid "improper %%TYPE reference (too many dotted names): %s" +msgstr "dålig %%TYPE-referens (för många punktade namn): %s" + +#: parser/parse_type.c:140 +#, c-format +msgid "type reference %s converted to %s" +msgstr "typreferens %s konverterad till %s" + +#: parser/parse_type.c:261 parser/parse_type.c:838 utils/cache/typcache.c:373 +#, c-format +msgid "type \"%s\" is only a shell" +msgstr "typ \"%s\" är bara ett skal" + +#: parser/parse_type.c:346 +#, c-format +msgid "type modifier is not allowed for type \"%s\"" +msgstr "typmodifierare tillåts inte för typ \"%s\"" + +#: parser/parse_type.c:388 +#, c-format +msgid "type modifiers must be simple constants or identifiers" +msgstr "typmodifierare måste vare enkla konstanter eller identifierare" + +#: parser/parse_type.c:704 parser/parse_type.c:803 +#, c-format +msgid "invalid type name \"%s\"" +msgstr "ogiltigt typnamn \"%s\"" + +#: parser/parse_utilcmd.c:272 +#, c-format +msgid "cannot create partitioned table as inheritance child" +msgstr "kan inte skapa partitionerad tabell som barnarv" + +#: parser/parse_utilcmd.c:447 +#, c-format +msgid "%s will create implicit sequence \"%s\" for serial column \"%s.%s\"" +msgstr "%s kommer skapa en implicit sekvens \"%s\" för \"serial\"-kolumnen \"%s.%s\"" + +#: parser/parse_utilcmd.c:570 +#, c-format +msgid "array of serial is not implemented" +msgstr "array med serial är inte implementerat" + +#: parser/parse_utilcmd.c:646 parser/parse_utilcmd.c:658 +#, c-format +msgid "conflicting NULL/NOT NULL declarations for column \"%s\" of table \"%s\"" +msgstr "motstridiga NULL/NOT NULL-villkor för kolumnen \"%s\" i tabell \"%s\"" + +#: parser/parse_utilcmd.c:670 +#, c-format +msgid "multiple default values specified for column \"%s\" of table \"%s\"" +msgstr "multipla default-värden angivna för kolumn \"%s\" i tabell \"%s\"" + +#: parser/parse_utilcmd.c:687 +#, c-format +msgid "identity columns are not supported on typed tables" +msgstr "identitetskolumner stöds inte på typade tabeller" + +#: parser/parse_utilcmd.c:691 +#, c-format +msgid "identity columns are not supported on partitions" +msgstr "identitetskolumner stöds inte för partitioner" + +#: parser/parse_utilcmd.c:700 +#, c-format +msgid "multiple identity specifications for column \"%s\" of table \"%s\"" +msgstr "multipla identitetspecifikationer för kolumn \"%s\" i tabell \"%s\"" + +#: parser/parse_utilcmd.c:723 parser/parse_utilcmd.c:822 +#, c-format +msgid "primary key constraints are not supported on foreign tables" +msgstr "primärnyckelvillkor stöds inte på främmande tabeller" + +#: parser/parse_utilcmd.c:732 parser/parse_utilcmd.c:832 +#, c-format +msgid "unique constraints are not supported on foreign tables" +msgstr "unika villkor stöds inte på främmande tabeller" + +#: parser/parse_utilcmd.c:749 parser/parse_utilcmd.c:862 +#, c-format +msgid "foreign key constraints are not supported on foreign tables" +msgstr "främmande nyckel-villkor stöds inte för främmande tabeller" + +#: parser/parse_utilcmd.c:777 +#, c-format +msgid "both default and identity specified for column \"%s\" of table \"%s\"" +msgstr "både default och identity angiven för kolumn \"%s\" i tabell \"%s\"" + +#: parser/parse_utilcmd.c:842 +#, c-format +msgid "exclusion constraints are not supported on foreign tables" +msgstr "uteslutningsvillkor stöds inte på främmande tabeller" + +#: parser/parse_utilcmd.c:848 +#, c-format +msgid "exclusion constraints are not supported on partitioned tables" +msgstr "uteslutningsvillkor stöds inte för partitionerade tabeller" + +#: parser/parse_utilcmd.c:912 +#, c-format +msgid "LIKE is not supported for creating foreign tables" +msgstr "LIKE stöds inte för att skapa främmande tabeller" + +#: parser/parse_utilcmd.c:1517 parser/parse_utilcmd.c:1624 +#, c-format +msgid "Index \"%s\" contains a whole-row table reference." +msgstr "Index \"%s\" innehåller en hela-raden-referens." + +#: parser/parse_utilcmd.c:1974 +#, c-format +msgid "cannot use an existing index in CREATE TABLE" +msgstr "kan inte använda ett existerande index i CREATE TABLE" + +#: parser/parse_utilcmd.c:1994 +#, c-format +msgid "index \"%s\" is already associated with a constraint" +msgstr "index \"%s\" är redan associerad med ett villkor" + +#: parser/parse_utilcmd.c:2002 +#, c-format +msgid "index \"%s\" does not belong to table \"%s\"" +msgstr "index \"%s\" tillhör inte tabell \"%s\"" + +#: parser/parse_utilcmd.c:2009 +#, c-format +msgid "index \"%s\" is not valid" +msgstr "index \"%s\" är inte giltigt" + +#: parser/parse_utilcmd.c:2015 +#, c-format +msgid "\"%s\" is not a unique index" +msgstr "\"%s\" är inte ett unikt index" + +#: parser/parse_utilcmd.c:2016 parser/parse_utilcmd.c:2023 +#: parser/parse_utilcmd.c:2030 parser/parse_utilcmd.c:2102 +#, c-format +msgid "Cannot create a primary key or unique constraint using such an index." +msgstr "Kan inte skapa en primärnyckel eller ett unikt villkor med hjälp av ett sådant index." + +#: parser/parse_utilcmd.c:2022 +#, c-format +msgid "index \"%s\" contains expressions" +msgstr "index \"%s\" innehåller uttryck" + +#: parser/parse_utilcmd.c:2029 +#, c-format +msgid "\"%s\" is a partial index" +msgstr "\"%s\" är ett partiellt index" + +#: parser/parse_utilcmd.c:2041 +#, c-format +msgid "\"%s\" is a deferrable index" +msgstr "\"%s\" är ett \"deferrable\" index" + +#: parser/parse_utilcmd.c:2042 +#, c-format +msgid "Cannot create a non-deferrable constraint using a deferrable index." +msgstr "Kan inte skapa ett icke-\"deferrable\" integritetsvillkor från ett \"deferrable\" index." + +#: parser/parse_utilcmd.c:2101 +#, c-format +msgid "index \"%s\" does not have default sorting behavior" +msgstr "index \"%s\" har ingen standard för sorteringsbeteende" + +#: parser/parse_utilcmd.c:2250 +#, c-format +msgid "column \"%s\" appears twice in primary key constraint" +msgstr "kolumn \"%s\" finns med två gånger i primära nyckel-villkoret" + +#: parser/parse_utilcmd.c:2256 +#, c-format +msgid "column \"%s\" appears twice in unique constraint" +msgstr "kolumn \"%s\" finns med två gånger i unique-villkoret" + +#: parser/parse_utilcmd.c:2579 +#, c-format +msgid "index expressions and predicates can refer only to the table being indexed" +msgstr "indexuttryck och predikat kan bara referera till tabellen som indexeras" + +#: parser/parse_utilcmd.c:2625 +#, c-format +msgid "rules on materialized views are not supported" +msgstr "regler på materialiserade vyer stöds inte" + +#: parser/parse_utilcmd.c:2686 +#, c-format +msgid "rule WHERE condition cannot contain references to other relations" +msgstr "WHERE-villkor i regel kan inte innehålla referenser till andra relationer" + +#: parser/parse_utilcmd.c:2758 +#, c-format +msgid "rules with WHERE conditions can only have SELECT, INSERT, UPDATE, or DELETE actions" +msgstr "regler med WHERE-villkor kan bara innehålla SELECT-, INSERT-, UPDATE- eller DELETE-handlingar" + +#: parser/parse_utilcmd.c:2776 parser/parse_utilcmd.c:2875 +#: rewrite/rewriteHandler.c:497 rewrite/rewriteManip.c:1015 +#, c-format +msgid "conditional UNION/INTERSECT/EXCEPT statements are not implemented" +msgstr "UNION-/INTERSECT-/EXCEPT-satser med villkor är inte implementerat" + +#: parser/parse_utilcmd.c:2794 +#, c-format +msgid "ON SELECT rule cannot use OLD" +msgstr "ON SELECT-regel kan inte använda OLD" + +#: parser/parse_utilcmd.c:2798 +#, c-format +msgid "ON SELECT rule cannot use NEW" +msgstr "ON SELECT-regel kan inte använda NEW" + +#: parser/parse_utilcmd.c:2807 +#, c-format +msgid "ON INSERT rule cannot use OLD" +msgstr "ON INSERT-regel kan inte använda OLD" + +#: parser/parse_utilcmd.c:2813 +#, c-format +msgid "ON DELETE rule cannot use NEW" +msgstr "ON DELETE-regel kan inte använda NEW" + +#: parser/parse_utilcmd.c:2841 +#, c-format +msgid "cannot refer to OLD within WITH query" +msgstr "kan inte referera till OLD i WITH-fråga" + +#: parser/parse_utilcmd.c:2848 +#, c-format +msgid "cannot refer to NEW within WITH query" +msgstr "kan inte referera till NEW i WITH-fråga" + +#: parser/parse_utilcmd.c:3286 +#, c-format +msgid "misplaced DEFERRABLE clause" +msgstr "felplacerad DEFERRABLE-klausul" + +#: parser/parse_utilcmd.c:3291 parser/parse_utilcmd.c:3306 +#, c-format +msgid "multiple DEFERRABLE/NOT DEFERRABLE clauses not allowed" +msgstr "multipla DEFERRABLE/NOT DEFERRABLE-klausuler tillåts inte" + +#: parser/parse_utilcmd.c:3301 +#, c-format +msgid "misplaced NOT DEFERRABLE clause" +msgstr "felplacerad NOT DEFERRABLE-klausul" + +#: parser/parse_utilcmd.c:3322 +#, c-format +msgid "misplaced INITIALLY DEFERRED clause" +msgstr "felplacerad INITIALLY DEFERRED-klausul" + +#: parser/parse_utilcmd.c:3327 parser/parse_utilcmd.c:3353 +#, c-format +msgid "multiple INITIALLY IMMEDIATE/DEFERRED clauses not allowed" +msgstr "multipla INITIALLY IMMEDIATE/DEFERRED-klausuler tillåts inte" + +#: parser/parse_utilcmd.c:3348 +#, c-format +msgid "misplaced INITIALLY IMMEDIATE clause" +msgstr "felplacerad klausul INITIALLY IMMEDIATE" + +#: parser/parse_utilcmd.c:3539 +#, c-format +msgid "CREATE specifies a schema (%s) different from the one being created (%s)" +msgstr "CREATE anger ett schema (%s) som skiljer sig från det som skapas (%s)" + +#: parser/parse_utilcmd.c:3573 +#, c-format +msgid "table \"%s\" is not partitioned" +msgstr "tabell \"%s\" är inte partitionerad" + +#: parser/parse_utilcmd.c:3580 +#, c-format +msgid "index \"%s\" is not partitioned" +msgstr "index \"%s\" är inte partitionerad" + +#: parser/parse_utilcmd.c:3614 +#, c-format +msgid "a hash-partitioned table may not have a default partition" +msgstr "en hash-partitionerad tabell får inte ha en standardpartition" + +#: parser/parse_utilcmd.c:3631 +#, c-format +msgid "invalid bound specification for a hash partition" +msgstr "ogiltig gränsangivelse för hash-partition" + +#: parser/parse_utilcmd.c:3637 partitioning/partbounds.c:2136 +#, c-format +msgid "modulus for hash partition must be a positive integer" +msgstr "modulo för hash-partition vara ett positivt integer" + +#: parser/parse_utilcmd.c:3644 partitioning/partbounds.c:2144 +#, c-format +msgid "remainder for hash partition must be less than modulus" +msgstr "rest för hash-partition måste vara lägre än modulo" + +#: parser/parse_utilcmd.c:3656 +#, c-format +msgid "invalid bound specification for a list partition" +msgstr "ogiltig gränsangivelse för listpartition" + +#: parser/parse_utilcmd.c:3712 +#, c-format +msgid "invalid bound specification for a range partition" +msgstr "ogiltig gränsangivelse för range-partition" + +#: parser/parse_utilcmd.c:3718 +#, c-format +msgid "FROM must specify exactly one value per partitioning column" +msgstr "FROM måste ge exakt ett värde per partitionerande kolumn" + +#: parser/parse_utilcmd.c:3722 +#, c-format +msgid "TO must specify exactly one value per partitioning column" +msgstr "TO måste ge exakt ett värde per partitionerande kolumn" + +#: parser/parse_utilcmd.c:3769 parser/parse_utilcmd.c:3783 +#, c-format +msgid "cannot specify NULL in range bound" +msgstr "kan inte ange NULL i range-gräns" + +#: parser/parse_utilcmd.c:3830 +#, c-format +msgid "every bound following MAXVALUE must also be MAXVALUE" +msgstr "varje gräns efter MAXVALUE måste också vara MAXVALUE" + +#: parser/parse_utilcmd.c:3837 +#, c-format +msgid "every bound following MINVALUE must also be MINVALUE" +msgstr "varje gräns efter MINVALUE måste också vara MINVALUE" + +#: parser/parse_utilcmd.c:3868 parser/parse_utilcmd.c:3880 +#, c-format +msgid "specified value cannot be cast to type %s for column \"%s\"" +msgstr "angivet värde kan inte typomvandlas till typ %s för kolumn \"%s\"" + +#: parser/parse_utilcmd.c:3882 +#, c-format +msgid "The cast requires a non-immutable conversion." +msgstr "Typomvandligen kräver en icke-immuterbar konvertering." + +#: parser/parse_utilcmd.c:3883 +#, c-format +msgid "Try putting the literal value in single quotes." +msgstr "Försöka att sätta literalen inom enkelcitattecken." + +#: parser/scansup.c:204 +#, c-format +msgid "identifier \"%s\" will be truncated to \"%s\"" +msgstr "identifierare \"%s\" kommer trunkeras till \"%s\"" + +#: partitioning/partbounds.c:331 +#, c-format +msgid "partition \"%s\" conflicts with existing default partition \"%s\"" +msgstr "partition \"%s\" står i konflikt med existerande default-partition \"%s\"" + +#: partitioning/partbounds.c:390 +#, c-format +msgid "every hash partition modulus must be a factor of the next larger modulus" +msgstr "varje hash-partition-modulo måste vara en faktror av näste högre modulo" + +#: partitioning/partbounds.c:486 +#, c-format +msgid "empty range bound specified for partition \"%s\"" +msgstr "tom intervallsgräns angiven för partition \"%s\"" + +#: partitioning/partbounds.c:488 +#, c-format +msgid "Specified lower bound %s is greater than or equal to upper bound %s." +msgstr "Angiven lägre gräns %s är större än eller lika med övre gräns %s." + +#: partitioning/partbounds.c:585 +#, c-format +msgid "partition \"%s\" would overlap partition \"%s\"" +msgstr "partition \"%s\" skulle överlappa partition \"%s\"" + +#: partitioning/partbounds.c:685 +#, c-format +msgid "skipped scanning foreign table \"%s\" which is a partition of default partition \"%s\"" +msgstr "hoppade över skanning av främmand tabell \"%s\" som er en partition för standardpartitionen \"%s\"" + +#: partitioning/partbounds.c:724 +#, c-format +msgid "updated partition constraint for default partition \"%s\" would be violated by some row" +msgstr "uppdaterat partitionsintegritetsvillkor för standardpartition \"%s\" skulle brytas mot av någon rad" + +#: partitioning/partbounds.c:2140 +#, c-format +msgid "remainder for hash partition must be a non-negative integer" +msgstr "rest för hash-partition måste vara ett icke-negativt heltal" + +#: partitioning/partbounds.c:2167 +#, c-format +msgid "\"%s\" is not a hash partitioned table" +msgstr "\"%s\" är inte en hash-partitionerad tabell" + +#: partitioning/partbounds.c:2178 partitioning/partbounds.c:2294 +#, c-format +msgid "number of partitioning columns (%d) does not match number of partition keys provided (%d)" +msgstr "antalet partitioneringskolumner (%d) stämmer inte med antalet partioneringsnycklas som angivits (%d)" + +#: partitioning/partbounds.c:2198 partitioning/partbounds.c:2230 +#, c-format +msgid "column %d of the partition key has type \"%s\", but supplied value is of type \"%s\"" +msgstr "kolumn %d i partitioneringsnyckeln har typ \"%s\" men använt värde har typ \"%s\"" + +#: port/pg_shmem.c:196 port/sysv_shmem.c:196 +#, c-format +msgid "could not create shared memory segment: %m" +msgstr "kunde inte skapa delat minnessegment: %m" + +#: port/pg_shmem.c:197 port/sysv_shmem.c:197 +#, c-format +msgid "Failed system call was shmget(key=%lu, size=%zu, 0%o)." +msgstr "Misslyckade systemanropet var semget(key=%lu, size=%zu, 0%o)." + +#: port/pg_shmem.c:201 port/sysv_shmem.c:201 +#, c-format +msgid "" +"This error usually means that PostgreSQL's request for a shared memory segment exceeded your kernel's SHMMAX parameter, or possibly that it is less than your kernel's SHMMIN parameter.\n" +"The PostgreSQL documentation contains more information about shared memory configuration." +msgstr "" +"Felet betyder vanligen att PostgreSQLs begäran av delat minnessegment överskred kärnans SHMMAX-parameter eller möjligen att det är lägre än kärnans SHMMIN-parameter.\n" +"PostgreSQLs dokumentation innehåller mer information om konfigueration av delat minne." + +#: port/pg_shmem.c:208 port/sysv_shmem.c:208 +#, c-format +msgid "" +"This error usually means that PostgreSQL's request for a shared memory segment exceeded your kernel's SHMALL parameter. You might need to reconfigure the kernel with larger SHMALL.\n" +"The PostgreSQL documentation contains more information about shared memory configuration." +msgstr "" +"Felet betyder vanligen att PostgreSQLs begäran av delat minnessegment överskred kärnans SHMALL-parameter. Du kan behöva rekonfigurera kärnan med ett större SHMALL.\n" +"PostgreSQLs dokumentation innehåller mer information om konfigueration av delat minne." + +#: port/pg_shmem.c:214 port/sysv_shmem.c:214 +#, c-format +msgid "" +"This error does *not* mean that you have run out of disk space. It occurs either if all available shared memory IDs have been taken, in which case you need to raise the SHMMNI parameter in your kernel, or because the system's overall limit for shared memory has been reached.\n" +"The PostgreSQL documentation contains more information about shared memory configuration." +msgstr "" +"Felet betyder *inte* att diskutrymmet tagit slut. Felet sker aningen om alla tillgängliga ID-nummer för delat minne tagit slut och då behöver du öka kärnans SHMMNI-parameter eller för att systemets totala gräns för delat minne ha nåtts.\n" +"PostgreSQLs dokumentation innehåller mer information om konfigueration av delat minne." + +#: port/pg_shmem.c:505 port/sysv_shmem.c:505 +#, c-format +msgid "could not map anonymous shared memory: %m" +msgstr "kunde inte mappa anonymt delat minne: %m" + +#: port/pg_shmem.c:507 port/sysv_shmem.c:507 +#, c-format +msgid "This error usually means that PostgreSQL's request for a shared memory segment exceeded available memory, swap space, or huge pages. To reduce the request size (currently %zu bytes), reduce PostgreSQL's shared memory usage, perhaps by reducing shared_buffers or max_connections." +msgstr "Detta fel betyder vanligtvis att PostgreSQL:s begäran av delat minnessegment överskrider mängden tillgängligt minne, swap eller stora sidor. För att minska begärd storlek (nu %zu byte) minska PostgreSQL:s användning av delat minne t.ex. genom att dra ner på shared_buffers eller max_connections." + +#: port/pg_shmem.c:573 port/sysv_shmem.c:573 +#, c-format +msgid "huge pages not supported on this platform" +msgstr "stora sidor stöds inte på denna plattform" + +#: port/pg_shmem.c:668 port/sysv_shmem.c:668 +#, c-format +msgid "could not stat data directory \"%s\": %m" +msgstr "kunde inte göra stat() på datakatalog \"%s\": %m" + +#: port/sysv_sema.c:123 +#, c-format +msgid "could not create semaphores: %m" +msgstr "kan inte skapa semafor: %m" + +#: port/sysv_sema.c:124 +#, c-format +msgid "Failed system call was semget(%lu, %d, 0%o)." +msgstr "Misslyckade systemanropet var semget(%lu, %d, 0%o)." + +#: port/sysv_sema.c:128 +#, c-format +msgid "" +"This error does *not* mean that you have run out of disk space. It occurs when either the system limit for the maximum number of semaphore sets (SEMMNI), or the system wide maximum number of semaphores (SEMMNS), would be exceeded. You need to raise the respective kernel parameter. Alternatively, reduce PostgreSQL's consumption of semaphores by reducing its max_connections parameter.\n" +"The PostgreSQL documentation contains more information about configuring your system for PostgreSQL." +msgstr "Detta fel betyder *inte* att disken blivit full. Detta fel kommer när systemgränsen för maximalt antal semaforvektorer (SEMMNI) överskridits eller när systemets globala maximum för semaforer (SEMMNS) överskridits. Du behöver öka respektive kernel-parameter. Alternativt kan du minska PostgreSQL:s användning av semaforer genom att dra ner på parametern max_connections. PostgreSQL:s dokumentation innehåller mer information om hur du konfigurerar systemet för PostgreSQL." + +#: port/sysv_sema.c:158 +#, c-format +msgid "You possibly need to raise your kernel's SEMVMX value to be at least %d. Look into the PostgreSQL documentation for details." +msgstr "Du kan behöva öka kärnans SEMVMX-värde till minst %d. Se PostgreSQL:s dokumentation för mer information." + +#: port/win32/crashdump.c:121 +#, c-format +msgid "could not load dbghelp.dll, cannot write crash dump\n" +msgstr "kunde inte ladda dbghelp.dll, kan inte skiva krash-dump\n" + +#: port/win32/crashdump.c:129 +#, c-format +msgid "could not load required functions in dbghelp.dll, cannot write crash dump\n" +msgstr "kunde inte ladda behövda funktioner i dbghelp.dll, kan inte skriva krash-dump\n" + +#: port/win32/crashdump.c:160 +#, c-format +msgid "could not open crash dump file \"%s\" for writing: error code %lu\n" +msgstr "kunde inte öppna krashdumpfil \"%s\" för skrivning: felkod %lu\n" + +#: port/win32/crashdump.c:167 +#, c-format +msgid "wrote crash dump to file \"%s\"\n" +msgstr "skrev krashdump till fil \"%s\".\n" + +#: port/win32/crashdump.c:169 +#, c-format +msgid "could not write crash dump to file \"%s\": error code %lu\n" +msgstr "kunde inte skriva krashdump till fil \"%s\": felkod %lu\n" + +#: port/win32/signal.c:194 +#, c-format +msgid "could not create signal listener pipe for PID %d: error code %lu" +msgstr "kunde inte skapa signallyssnarrör (pipe) för PID %d: felkod %lu" + +#: port/win32/signal.c:274 port/win32/signal.c:306 +#, c-format +msgid "could not create signal listener pipe: error code %lu; retrying\n" +msgstr "kunde inte skapa signallyssnar-pipe: felkod %lu; försöker igen\n" + +#: port/win32/signal.c:317 +#, c-format +msgid "could not create signal dispatch thread: error code %lu\n" +msgstr "kunde inte skapa signal-dispatch-tråd: felkod %lu\n" + +#: port/win32_sema.c:104 +#, c-format +msgid "could not create semaphore: error code %lu" +msgstr "kan inte skapa semafor: felkod %lu" + +#: port/win32_sema.c:181 +#, c-format +msgid "could not lock semaphore: error code %lu" +msgstr "kunde inte låsa semafor: felkod %lu" + +#: port/win32_sema.c:201 +#, c-format +msgid "could not unlock semaphore: error code %lu" +msgstr "kunde inte låsa upp semafor: felkod %lu" + +#: port/win32_sema.c:231 +#, c-format +msgid "could not try-lock semaphore: error code %lu" +msgstr "kunde inte utföra \"try-lock\" på semafor: felkod %lu" + +#: port/win32_shmem.c:122 port/win32_shmem.c:130 port/win32_shmem.c:142 +#: port/win32_shmem.c:157 +#, c-format +msgid "could not enable Lock Pages in Memory user right: error code %lu" +msgstr "kunde inte aktivera användarrättigheten \"Lock Pages in Memory\": felkod %lu" + +#: port/win32_shmem.c:123 port/win32_shmem.c:131 port/win32_shmem.c:143 +#: port/win32_shmem.c:158 +#, c-format +msgid "Failed system call was %s." +msgstr "Misslyckat systemanrop var %s." + +#: port/win32_shmem.c:153 +#, c-format +msgid "could not enable Lock Pages in Memory user right" +msgstr "kunde inte aktivera användarrättigheten \"Lock Pages in Memory\"" + +#: port/win32_shmem.c:154 +#, c-format +msgid "Assign Lock Pages in Memory user right to the Windows user account which runs PostgreSQL." +msgstr "Tilldela användarrättigheten \"Lock Pages in Memory\" till Windows-användarkontot som kör PostgreSQL." + +#: port/win32_shmem.c:210 +#, c-format +msgid "the processor does not support large pages" +msgstr "processorn stöder inte stora sidor" + +#: port/win32_shmem.c:212 port/win32_shmem.c:217 +#, c-format +msgid "disabling huge pages" +msgstr "stänger av stora sidor" + +#: port/win32_shmem.c:279 port/win32_shmem.c:315 port/win32_shmem.c:333 +#, c-format +msgid "could not create shared memory segment: error code %lu" +msgstr "kunde inte skapa delat minnessegment: felkod %lu" + +#: port/win32_shmem.c:280 +#, c-format +msgid "Failed system call was CreateFileMapping(size=%zu, name=%s)." +msgstr "Misslyckade systemanropet var CreateFileMapping(size=%zu, name=%s)." + +#: port/win32_shmem.c:305 +#, c-format +msgid "pre-existing shared memory block is still in use" +msgstr "redan existerande delat minnesblock används fortfarande" + +#: port/win32_shmem.c:306 +#, c-format +msgid "Check if there are any old server processes still running, and terminate them." +msgstr "Kontrollera om det finns några gamla serverprocesser som fortfarande kör och stäng ner dem." + +#: port/win32_shmem.c:316 +#, c-format +msgid "Failed system call was DuplicateHandle." +msgstr "Misslyckat systemanrop var DuplicateHandle." + +#: port/win32_shmem.c:334 +#, c-format +msgid "Failed system call was MapViewOfFileEx." +msgstr "Misslyckat systemanrop var MapViewOfFileEx." + +#: postmaster/autovacuum.c:406 +#, c-format +msgid "could not fork autovacuum launcher process: %m" +msgstr "kunde inte starta autovacuum-process: %m" + +#: postmaster/autovacuum.c:442 +#, c-format +msgid "autovacuum launcher started" +msgstr "autovacuum-startare startad" + +#: postmaster/autovacuum.c:832 +#, c-format +msgid "autovacuum launcher shutting down" +msgstr "autovacuum-startare stänger ner" + +#: postmaster/autovacuum.c:1494 +#, c-format +msgid "could not fork autovacuum worker process: %m" +msgstr "kunde inte starta autovacuum-arbetsprocess: %m" + +#: postmaster/autovacuum.c:1700 +#, c-format +msgid "autovacuum: processing database \"%s\"" +msgstr "autovacuum: processar databas \"%s\"" + +#: postmaster/autovacuum.c:2275 +#, c-format +msgid "autovacuum: dropping orphan temp table \"%s.%s.%s\"" +msgstr "autovacuum: slänger övergiven temptabell \"%s.%s.%s\"" + +#: postmaster/autovacuum.c:2504 +#, c-format +msgid "automatic vacuum of table \"%s.%s.%s\"" +msgstr "automatisk vacuum av tabell \"%s.%s.%s\"" + +#: postmaster/autovacuum.c:2507 +#, c-format +msgid "automatic analyze of table \"%s.%s.%s\"" +msgstr "automatisk analys av tabell \"%s.%s.%s\"" + +#: postmaster/autovacuum.c:2700 +#, c-format +msgid "processing work entry for relation \"%s.%s.%s\"" +msgstr "processar arbetspost för relation \"%s.%s.%s\"" + +#: postmaster/autovacuum.c:3279 +#, c-format +msgid "autovacuum not started because of misconfiguration" +msgstr "autovacuum har inte startats på grund av en felkonfigurering" + +#: postmaster/autovacuum.c:3280 +#, c-format +msgid "Enable the \"track_counts\" option." +msgstr "Slå på flaggan \"track_counts\"." + +#: postmaster/bgworker.c:395 postmaster/bgworker.c:862 +#, c-format +msgid "registering background worker \"%s\"" +msgstr "registrerar bakgrundsarbetare \"%s\"" + +#: postmaster/bgworker.c:427 +#, c-format +msgid "unregistering background worker \"%s\"" +msgstr "avregistrerar bakgrundsarbetare \"%s\"" + +#: postmaster/bgworker.c:592 +#, c-format +msgid "background worker \"%s\": must attach to shared memory in order to request a database connection" +msgstr "bakgrundsarbetare \"%s\": måste ansluta till delat minne för att kunna få en databasanslutning" + +#: postmaster/bgworker.c:601 +#, c-format +msgid "background worker \"%s\": cannot request database access if starting at postmaster start" +msgstr "bakgrundsarbetare \"%s\" kan inte få databasaccess om den startar när postmaster startar" + +#: postmaster/bgworker.c:615 +#, c-format +msgid "background worker \"%s\": invalid restart interval" +msgstr "bakgrundsarbetare \"%s\": ogiltigt omstartsintervall" + +#: postmaster/bgworker.c:630 +#, c-format +msgid "background worker \"%s\": parallel workers may not be configured for restart" +msgstr "bakgrundsarbetare \"%s\": parallella arbetare kan inte konfigureras för omstart" + +#: postmaster/bgworker.c:681 +#, c-format +msgid "terminating background worker \"%s\" due to administrator command" +msgstr "terminerar bakgrundsarbetare \"%s\" pga administratörskommando" + +#: postmaster/bgworker.c:870 +#, c-format +msgid "background worker \"%s\": must be registered in shared_preload_libraries" +msgstr "bakgrundsarbetare \"%s\": måste vara registrerad i shared_preload_libraries" + +#: postmaster/bgworker.c:882 +#, c-format +msgid "background worker \"%s\": only dynamic background workers can request notification" +msgstr "bakgrundsarbetare \"%s\": bara dynamiska bakgrundsarbetare kan be om notifiering" + +#: postmaster/bgworker.c:897 +#, c-format +msgid "too many background workers" +msgstr "för många bakgrundsarbetare" + +#: postmaster/bgworker.c:898 +#, c-format +msgid "Up to %d background worker can be registered with the current settings." +msgid_plural "Up to %d background workers can be registered with the current settings." +msgstr[0] "Upp till %d bakgrundsarbetare kan registreras med nuvarande inställning." +msgstr[1] "Upp till %d bakgrundsarbetare kan registreras med nuvarande inställning." + +#: postmaster/bgworker.c:902 +#, c-format +msgid "Consider increasing the configuration parameter \"max_worker_processes\"." +msgstr "Överväg att öka konfigurationsparametern \"max_worker_processes\"." + +#: postmaster/checkpointer.c:464 +#, c-format +msgid "checkpoints are occurring too frequently (%d second apart)" +msgid_plural "checkpoints are occurring too frequently (%d seconds apart)" +msgstr[0] "checkpoint:s sker för ofta (%d sekund emellan)" +msgstr[1] "checkpoint:s sker för ofta (%d sekunder emellan)" + +#: postmaster/checkpointer.c:468 +#, c-format +msgid "Consider increasing the configuration parameter \"max_wal_size\"." +msgstr "Överväg att öka konfigurationsparametern \"max_wal_size\"." + +#: postmaster/checkpointer.c:1088 +#, c-format +msgid "checkpoint request failed" +msgstr "checkpoint-behgäran misslyckades" + +#: postmaster/checkpointer.c:1089 +#, c-format +msgid "Consult recent messages in the server log for details." +msgstr "Se senaste meddelanden i serverloggen för mer information." + +#: postmaster/checkpointer.c:1284 +#, c-format +msgid "compacted fsync request queue from %d entries to %d entries" +msgstr "minskade fsync-kön från %d poster till %d poster" + +#: postmaster/pgarch.c:148 +#, c-format +msgid "could not fork archiver: %m" +msgstr "kunde inte fork():a arkiveraren: %m" + +#: postmaster/pgarch.c:456 +#, c-format +msgid "archive_mode enabled, yet archive_command is not set" +msgstr "archive_mode är påslagen, men ändå är archive_command inte satt" + +#: postmaster/pgarch.c:484 +#, c-format +msgid "archiving write-ahead log file \"%s\" failed too many times, will try again later" +msgstr "arkivering av write-ahead-logg-fil \"%s\" misslyckades för många gånger, kommer försöka igen senare" + +#: postmaster/pgarch.c:587 +#, c-format +msgid "archive command failed with exit code %d" +msgstr "arkiveringskommando misslyckades med felkod %d" + +#: postmaster/pgarch.c:589 postmaster/pgarch.c:599 postmaster/pgarch.c:606 +#: postmaster/pgarch.c:612 postmaster/pgarch.c:621 +#, c-format +msgid "The failed archive command was: %s" +msgstr "Det misslyckade arkiveringskommandot var: %s" + +#: postmaster/pgarch.c:596 +#, c-format +msgid "archive command was terminated by exception 0x%X" +msgstr "arkiveringskommandot terminerades med avbrott 0x%X" + +#: postmaster/pgarch.c:598 postmaster/postmaster.c:3567 +#, c-format +msgid "See C include file \"ntstatus.h\" for a description of the hexadecimal value." +msgstr "Se C-include-fil \"ntstatus.h\" för en beskrivning av det hexdecimala värdet." + +#: postmaster/pgarch.c:603 +#, c-format +msgid "archive command was terminated by signal %d: %s" +msgstr "arkiveringskommandot terminerades av signal %d: %s" + +#: postmaster/pgarch.c:610 +#, c-format +msgid "archive command was terminated by signal %d" +msgstr "arkiveringskommandot terminerades av signal %d" + +#: postmaster/pgarch.c:619 +#, c-format +msgid "archive command exited with unrecognized status %d" +msgstr "arkiveringskommandot avslutade med okänd statuskod %d" + +#: postmaster/pgstat.c:395 +#, c-format +msgid "could not resolve \"localhost\": %s" +msgstr "kunde inte slå upp \"localhost\": %s" + +#: postmaster/pgstat.c:418 +#, c-format +msgid "trying another address for the statistics collector" +msgstr "försöker med en annan adress till statistikinsamlare" + +#: postmaster/pgstat.c:427 +#, c-format +msgid "could not create socket for statistics collector: %m" +msgstr "kunde inte skapa uttag (socket) för statistikinsamlare: %m" + +#: postmaster/pgstat.c:439 +#, c-format +msgid "could not bind socket for statistics collector: %m" +msgstr "kunde inte göra bind på uttag (socket) för statistikinsamlare: %m" + +#: postmaster/pgstat.c:450 +#, c-format +msgid "could not get address of socket for statistics collector: %m" +msgstr "kunde inte få adress till uttag (socket) för statistikinsamlare: %m" + +#: postmaster/pgstat.c:466 +#, c-format +msgid "could not connect socket for statistics collector: %m" +msgstr "kunde inte ansluta uttag (socket) för statistikinsamlare: %m" + +#: postmaster/pgstat.c:487 +#, c-format +msgid "could not send test message on socket for statistics collector: %m" +msgstr "kunde inte skicka testmeddelande till uttag (socket) för statistikinsamlaren: %m" + +#: postmaster/pgstat.c:513 +#, c-format +msgid "select() failed in statistics collector: %m" +msgstr "select() misslyckades i statistikinsamlaren: %m" + +#: postmaster/pgstat.c:528 +#, c-format +msgid "test message did not get through on socket for statistics collector" +msgstr "testmeddelande kom inte igenom på uttag (socket) för statistikinsamlare" + +#: postmaster/pgstat.c:543 +#, c-format +msgid "could not receive test message on socket for statistics collector: %m" +msgstr "kunde inte ta emot testmeddelande på uttag (socket) för statistikinsamlaren: %m" + +#: postmaster/pgstat.c:553 +#, c-format +msgid "incorrect test message transmission on socket for statistics collector" +msgstr "inkorrekt överföring av testmeddelande på uttag (socket) till statistikinsamlare" + +#: postmaster/pgstat.c:576 +#, c-format +msgid "could not set statistics collector socket to nonblocking mode: %m" +msgstr "kunde inte sätta statistikinsamlarens uttag (socket) till ickeblockerande läge: %m" + +#: postmaster/pgstat.c:615 +#, c-format +msgid "disabling statistics collector for lack of working socket" +msgstr "stänger av statistikinsamlare då arbetsuttag (socket) saknas" + +#: postmaster/pgstat.c:762 +#, c-format +msgid "could not fork statistics collector: %m" +msgstr "kunde inte fork():a statistikinsamlaren: %m" + +#: postmaster/pgstat.c:1342 +#, c-format +msgid "unrecognized reset target: \"%s\"" +msgstr "okänt återställningsmål \"%s\"" + +#: postmaster/pgstat.c:1343 +#, c-format +msgid "Target must be \"archiver\" or \"bgwriter\"." +msgstr "Målet måste vara \"archiver\" eller \"bgwriter\"." + +#: postmaster/pgstat.c:4362 +#, c-format +msgid "could not read statistics message: %m" +msgstr "kunde inte läsa statistikmeddelande: %m" + +#: postmaster/pgstat.c:4694 postmaster/pgstat.c:4851 +#, c-format +msgid "could not open temporary statistics file \"%s\": %m" +msgstr "kunde inte öppna temporär statistikfil \"%s\": %m" + +#: postmaster/pgstat.c:4761 postmaster/pgstat.c:4896 +#, c-format +msgid "could not write temporary statistics file \"%s\": %m" +msgstr "kunde inte skriva temporär statistikfil \"%s\": %m" + +#: postmaster/pgstat.c:4770 postmaster/pgstat.c:4905 +#, c-format +msgid "could not close temporary statistics file \"%s\": %m" +msgstr "kunde inte stänga temporär statistikfil \"%s\": %m" + +#: postmaster/pgstat.c:4778 postmaster/pgstat.c:4913 +#, c-format +msgid "could not rename temporary statistics file \"%s\" to \"%s\": %m" +msgstr "kunde inte döpa om temporär statistikfil \"%s\" till \"%s\": %m" + +#: postmaster/pgstat.c:5002 postmaster/pgstat.c:5208 postmaster/pgstat.c:5361 +#, c-format +msgid "could not open statistics file \"%s\": %m" +msgstr "kunde inte öppna statistikfil \"%s\": %m" + +#: postmaster/pgstat.c:5014 postmaster/pgstat.c:5024 postmaster/pgstat.c:5045 +#: postmaster/pgstat.c:5067 postmaster/pgstat.c:5082 postmaster/pgstat.c:5145 +#: postmaster/pgstat.c:5220 postmaster/pgstat.c:5240 postmaster/pgstat.c:5258 +#: postmaster/pgstat.c:5274 postmaster/pgstat.c:5292 postmaster/pgstat.c:5308 +#: postmaster/pgstat.c:5373 postmaster/pgstat.c:5385 postmaster/pgstat.c:5397 +#: postmaster/pgstat.c:5422 postmaster/pgstat.c:5444 +#, c-format +msgid "corrupted statistics file \"%s\"" +msgstr "korrupt statistikfil \"%s\"" + +#: postmaster/pgstat.c:5573 +#, c-format +msgid "using stale statistics instead of current ones because stats collector is not responding" +msgstr "använder gammal statistik istället för aktuell data då statistikinsamlaren inte svarar" + +#: postmaster/pgstat.c:5900 +#, c-format +msgid "database hash table corrupted during cleanup --- abort" +msgstr "databasens hashtabell har blivit korrupt vid uppstädning --- avbryter" + +#: postmaster/postmaster.c:717 +#, c-format +msgid "%s: invalid argument for option -f: \"%s\"\n" +msgstr "%s: ogiltigt argument till flagga -f: \"%s\"\n" + +#: postmaster/postmaster.c:803 +#, c-format +msgid "%s: invalid argument for option -t: \"%s\"\n" +msgstr "%s: ogiltigt argument till flagga -t: \"%s\"\n" + +#: postmaster/postmaster.c:854 +#, c-format +msgid "%s: invalid argument: \"%s\"\n" +msgstr "%s: ogiltigt argument: \"%s\"\n" + +#: postmaster/postmaster.c:896 +#, c-format +msgid "%s: superuser_reserved_connections (%d) plus max_wal_senders (%d) must be less than max_connections (%d)\n" +msgstr "%s: superuser_reserved_connections (%d) plus max_wal_senders (%d) måste vara mindre än max_connections (%d)\n" + +#: postmaster/postmaster.c:903 +#, c-format +msgid "WAL archival cannot be enabled when wal_level is \"minimal\"" +msgstr "WAL-arkivering kan inte slås på när wal_level är \"minimal\"" + +#: postmaster/postmaster.c:906 +#, c-format +msgid "WAL streaming (max_wal_senders > 0) requires wal_level \"replica\" or \"logical\"" +msgstr "WAL-strömning (max_wal_senders > 0) kräver wal_level \"replica\" eller \"logical\"" + +#: postmaster/postmaster.c:914 +#, c-format +msgid "%s: invalid datetoken tables, please fix\n" +msgstr "%s: ogiltiga datumtokentabeller, det behöver lagas\n" + +#: postmaster/postmaster.c:1028 postmaster/postmaster.c:1126 +#: utils/init/miscinit.c:1547 +#, c-format +msgid "invalid list syntax in parameter \"%s\"" +msgstr "ogiltigt listsyntax för parameter \"%s\"" + +#: postmaster/postmaster.c:1059 +#, c-format +msgid "could not create listen socket for \"%s\"" +msgstr "kunde inte skapa lyssnande uttag (socket) för \"%s\"" + +#: postmaster/postmaster.c:1065 +#, c-format +msgid "could not create any TCP/IP sockets" +msgstr "kunde inte skapa TCP/IP-uttag (socket)" + +#: postmaster/postmaster.c:1148 +#, c-format +msgid "could not create Unix-domain socket in directory \"%s\"" +msgstr "kunde inte skapa unix-domän-uttag (socket) i katalog \"%s\"" + +#: postmaster/postmaster.c:1154 +#, c-format +msgid "could not create any Unix-domain sockets" +msgstr "kunde inte skapa något Unix-domän-uttag (socket)" + +#: postmaster/postmaster.c:1166 +#, c-format +msgid "no socket created for listening" +msgstr "inget uttag (socket) skapat för lyssnande" + +#: postmaster/postmaster.c:1206 +#, c-format +msgid "could not create I/O completion port for child queue" +msgstr "kunde inte skapa \"I/O completion port\" för barnkö" + +#: postmaster/postmaster.c:1235 +#, c-format +msgid "%s: could not change permissions of external PID file \"%s\": %s\n" +msgstr "%s: kunde inte ändra rättigheter på extern PID-fil \"%s\": %s\n" + +#: postmaster/postmaster.c:1239 +#, c-format +msgid "%s: could not write external PID file \"%s\": %s\n" +msgstr "%s: kunde inte skriva extern PID-fil \"%s\": %s\n" + +#: postmaster/postmaster.c:1296 +#, c-format +msgid "ending log output to stderr" +msgstr "avslutar loggutmatning till stderr" + +#: postmaster/postmaster.c:1297 +#, c-format +msgid "Future log output will go to log destination \"%s\"." +msgstr "Framtida loggutmatning kommer gå till logg-destination \"%s\"." + +#: postmaster/postmaster.c:1323 utils/init/postinit.c:214 +#, c-format +msgid "could not load pg_hba.conf" +msgstr "kunde inte ladda pg_hba.conf" + +#: postmaster/postmaster.c:1349 +#, c-format +msgid "postmaster became multithreaded during startup" +msgstr "postmaster blev flertrådad under uppstart" + +#: postmaster/postmaster.c:1350 +#, c-format +msgid "Set the LC_ALL environment variable to a valid locale." +msgstr "Sätt omgivningsvariabeln LC_ALL till en giltig lokal." + +#: postmaster/postmaster.c:1455 +#, c-format +msgid "%s: could not locate matching postgres executable" +msgstr "%s: kunde inte hitta matchande postgres-binär" + +#: postmaster/postmaster.c:1478 utils/misc/tzparser.c:341 +#, c-format +msgid "This may indicate an incomplete PostgreSQL installation, or that the file \"%s\" has been moved away from its proper location." +msgstr "Detta tyder på en inkomplett PostgreSQL-installation alternativt att filen \"%s\" har flyttats bort från sin korrekta plats." + +#: postmaster/postmaster.c:1505 +#, c-format +msgid "" +"%s: could not find the database system\n" +"Expected to find it in the directory \"%s\",\n" +"but could not open file \"%s\": %s\n" +msgstr "" +"%s: kunde inte hitta databassystemet\n" +"Förväntade mig att hitta det i katalogen \"%s\",\n" +"men kunde inte öppna filen \"%s\": %s\n" + +#: postmaster/postmaster.c:1682 +#, c-format +msgid "select() failed in postmaster: %m" +msgstr "select() misslyckades i postmaster: %m" + +#: postmaster/postmaster.c:1837 +#, c-format +msgid "performing immediate shutdown because data directory lock file is invalid" +msgstr "stänger ner omedelbart då datakatalogens låsfil är ogiltig" + +#: postmaster/postmaster.c:1915 postmaster/postmaster.c:1946 +#, c-format +msgid "incomplete startup packet" +msgstr "ofullständigt startuppaket" + +#: postmaster/postmaster.c:1927 +#, c-format +msgid "invalid length of startup packet" +msgstr "ogiltig längd på startuppaket" + +#: postmaster/postmaster.c:1985 +#, c-format +msgid "failed to send SSL negotiation response: %m" +msgstr "misslyckades att skicka SSL-förhandlingssvar: %m" + +#: postmaster/postmaster.c:2011 +#, c-format +msgid "unsupported frontend protocol %u.%u: server supports %u.0 to %u.%u" +msgstr "inget stöd för framändans protokoll %u.%u: servern stödjer %u.0 till %u.%u" + +#: postmaster/postmaster.c:2075 utils/misc/guc.c:6003 utils/misc/guc.c:6096 +#: utils/misc/guc.c:7422 utils/misc/guc.c:10183 utils/misc/guc.c:10217 +#, c-format +msgid "invalid value for parameter \"%s\": \"%s\"" +msgstr "ogiltigt värde för parameter \"%s\": \"%s\"" + +#: postmaster/postmaster.c:2078 +#, c-format +msgid "Valid values are: \"false\", 0, \"true\", 1, \"database\"." +msgstr "Giltiga värden är: \"false\", 0, \"true\", 1, \"database\"." + +#: postmaster/postmaster.c:2108 +#, c-format +msgid "invalid startup packet layout: expected terminator as last byte" +msgstr "ogiltig startpaketlayout: förväntade en terminator som sista byte" + +#: postmaster/postmaster.c:2146 +#, c-format +msgid "no PostgreSQL user name specified in startup packet" +msgstr "inget PostgreSQL-användarnamn angivet i startuppaketet" + +#: postmaster/postmaster.c:2205 +#, c-format +msgid "the database system is starting up" +msgstr "databassystemet startar upp" + +#: postmaster/postmaster.c:2210 +#, c-format +msgid "the database system is shutting down" +msgstr "databassystemet stänger ner" + +#: postmaster/postmaster.c:2215 +#, c-format +msgid "the database system is in recovery mode" +msgstr "databassystemet är räddningsläge" + +#: postmaster/postmaster.c:2220 storage/ipc/procarray.c:292 +#: storage/ipc/sinvaladt.c:298 storage/lmgr/proc.c:339 +#, c-format +msgid "sorry, too many clients already" +msgstr "ledsen, för många klienter" + +#: postmaster/postmaster.c:2310 +#, c-format +msgid "wrong key in cancel request for process %d" +msgstr "fel nyckel i avbrytbegäran för process %d" + +#: postmaster/postmaster.c:2318 +#, c-format +msgid "PID %d in cancel request did not match any process" +msgstr "PID %d i avbrytbegäran matchade inte någon process" + +#: postmaster/postmaster.c:2529 +#, c-format +msgid "received SIGHUP, reloading configuration files" +msgstr "mottog SIGHUP, läser om konfigurationsfiler" + +#: postmaster/postmaster.c:2554 +#, c-format +msgid "pg_hba.conf was not reloaded" +msgstr "pg_hba.conf laddades inte om" + +#: postmaster/postmaster.c:2558 +#, c-format +msgid "pg_ident.conf was not reloaded" +msgstr "pg_ident.conf laddades inte om" + +#: postmaster/postmaster.c:2568 +#, c-format +msgid "SSL configuration was not reloaded" +msgstr "SSL-konfiguration laddades inte om" + +#: postmaster/postmaster.c:2616 +#, c-format +msgid "received smart shutdown request" +msgstr "tog emot förfrågan om att stänga ner smart" + +#: postmaster/postmaster.c:2674 +#, c-format +msgid "received fast shutdown request" +msgstr "tog emot förfrågan om att stänga ner snabbt" + +#: postmaster/postmaster.c:2707 +#, c-format +msgid "aborting any active transactions" +msgstr "avbryter aktiva transaktioner" + +#: postmaster/postmaster.c:2741 +#, c-format +msgid "received immediate shutdown request" +msgstr "mottog begäran för omedelbar nedstängning" + +#: postmaster/postmaster.c:2808 +#, c-format +msgid "shutdown at recovery target" +msgstr "nedstängs vid återställningsmål" + +#: postmaster/postmaster.c:2824 postmaster/postmaster.c:2847 +msgid "startup process" +msgstr "uppstartprocess" + +#: postmaster/postmaster.c:2827 +#, c-format +msgid "aborting startup due to startup process failure" +msgstr "avbryter uppstart på grund av fel i startprocessen" + +#: postmaster/postmaster.c:2888 +#, c-format +msgid "database system is ready to accept connections" +msgstr "databassystemet är redo att ta emot anslutningar" + +#: postmaster/postmaster.c:2909 +msgid "background writer process" +msgstr "bakgrundsskrivarprocess" + +#: postmaster/postmaster.c:2963 +msgid "checkpointer process" +msgstr "checkpoint-process" + +#: postmaster/postmaster.c:2979 +msgid "WAL writer process" +msgstr "WAL-skrivarprocess" + +#: postmaster/postmaster.c:2994 +msgid "WAL receiver process" +msgstr "WAL-mottagarprocess" + +#: postmaster/postmaster.c:3009 +msgid "autovacuum launcher process" +msgstr "autovacuum-startprocess" + +#: postmaster/postmaster.c:3024 +msgid "archiver process" +msgstr "arkiveringsprocess" + +#: postmaster/postmaster.c:3040 +msgid "statistics collector process" +msgstr "statistikinsamlingsprocess" + +#: postmaster/postmaster.c:3054 +msgid "system logger process" +msgstr "system-logg-process" + +#: postmaster/postmaster.c:3116 +#, c-format +msgid "background worker \"%s\"" +msgstr "bakgrundsarbetare \"%s\"" + +#: postmaster/postmaster.c:3200 postmaster/postmaster.c:3220 +#: postmaster/postmaster.c:3227 postmaster/postmaster.c:3245 +msgid "server process" +msgstr "serverprocess" + +#: postmaster/postmaster.c:3299 +#, c-format +msgid "terminating any other active server processes" +msgstr "avslutar andra aktiva serverprocesser" + +#. translator: %s is a noun phrase describing a child process, such as +#. "server process" +#: postmaster/postmaster.c:3555 +#, c-format +msgid "%s (PID %d) exited with exit code %d" +msgstr "%s (PID %d) avslutade med felkod %d" + +#: postmaster/postmaster.c:3557 postmaster/postmaster.c:3568 +#: postmaster/postmaster.c:3579 postmaster/postmaster.c:3588 +#: postmaster/postmaster.c:3598 +#, c-format +msgid "Failed process was running: %s" +msgstr "Misslyckad process körde: %s" + +#. translator: %s is a noun phrase describing a child process, such as +#. "server process" +#: postmaster/postmaster.c:3565 +#, c-format +msgid "%s (PID %d) was terminated by exception 0x%X" +msgstr "%s (PID %d) terminerades av avbrott 0x%X" + +#. translator: %s is a noun phrase describing a child process, such as +#. "server process" +#: postmaster/postmaster.c:3575 +#, c-format +msgid "%s (PID %d) was terminated by signal %d: %s" +msgstr "%s (PID %d) terminerades av signal %d: %s" + +#. translator: %s is a noun phrase describing a child process, such as +#. "server process" +#: postmaster/postmaster.c:3586 +#, c-format +msgid "%s (PID %d) was terminated by signal %d" +msgstr "%s (PID %d) terminerades av signal %d" + +#. translator: %s is a noun phrase describing a child process, such as +#. "server process" +#: postmaster/postmaster.c:3596 +#, c-format +msgid "%s (PID %d) exited with unrecognized status %d" +msgstr "%s (PID %d) avslutade med okänd status %d" + +#: postmaster/postmaster.c:3783 +#, c-format +msgid "abnormal database system shutdown" +msgstr "ej normal databasnedstängning" + +#: postmaster/postmaster.c:3823 +#, c-format +msgid "all server processes terminated; reinitializing" +msgstr "alla serverprocesser är avslutade; initierar på nytt" + +#: postmaster/postmaster.c:3993 postmaster/postmaster.c:5418 +#: postmaster/postmaster.c:5782 +#, c-format +msgid "could not generate random cancel key" +msgstr "kunde inte skapa slumpad avbrytningsnyckel" + +#: postmaster/postmaster.c:4047 +#, c-format +msgid "could not fork new process for connection: %m" +msgstr "kunde inte fork():a ny process for uppkoppling: %m" + +#: postmaster/postmaster.c:4089 +msgid "could not fork new process for connection: " +msgstr "kunde inte fork():a ny process for uppkoppling: " + +#: postmaster/postmaster.c:4203 +#, c-format +msgid "connection received: host=%s port=%s" +msgstr "ansluting mottagen: värd=%s port=%s" + +#: postmaster/postmaster.c:4208 +#, c-format +msgid "connection received: host=%s" +msgstr "ansluting mottagen: värd=%s" + +#: postmaster/postmaster.c:4493 +#, c-format +msgid "could not execute server process \"%s\": %m" +msgstr "kunde inte köra serverprocess \"%s\": %m" + +#: postmaster/postmaster.c:4646 +#, c-format +msgid "giving up after too many tries to reserve shared memory" +msgstr "ger upp efter för många försök att reservera delat minne" + +#: postmaster/postmaster.c:4647 +#, c-format +msgid "This might be caused by ASLR or antivirus software." +msgstr "Detta kan orsakas av ASLR eller antivirusprogram." + +#: postmaster/postmaster.c:4858 +#, c-format +msgid "SSL configuration could not be loaded in child process" +msgstr "SSL-konfigurering kunde inte laddas i barnprocess" + +#: postmaster/postmaster.c:4990 +#, c-format +msgid "Please report this to ." +msgstr "Vänligen rapportera detta till ." + +#: postmaster/postmaster.c:5077 +#, c-format +msgid "database system is ready to accept read only connections" +msgstr "databassystemet är redo att ta emot read-only-anslutningar" + +#: postmaster/postmaster.c:5346 +#, c-format +msgid "could not fork startup process: %m" +msgstr "kunde inte starta startup-processen: %m" + +#: postmaster/postmaster.c:5350 +#, c-format +msgid "could not fork background writer process: %m" +msgstr "kunde inte starta process för bakgrundsskrivare: %m" + +#: postmaster/postmaster.c:5354 +#, c-format +msgid "could not fork checkpointer process: %m" +msgstr "kunde inte fork:a bakgrundsprocess: %m" + +#: postmaster/postmaster.c:5358 +#, c-format +msgid "could not fork WAL writer process: %m" +msgstr "kunde inte fork:a WAL-skrivprocess: %m" + +#: postmaster/postmaster.c:5362 +#, c-format +msgid "could not fork WAL receiver process: %m" +msgstr "kunde inte fork:a WAL-mottagarprocess: %m" + +#: postmaster/postmaster.c:5366 +#, c-format +msgid "could not fork process: %m" +msgstr "kunde inte fork:a process: %m" + +#: postmaster/postmaster.c:5553 postmaster/postmaster.c:5576 +#, c-format +msgid "database connection requirement not indicated during registration" +msgstr "krav på databasanslutning fanns inte med vid registering" + +#: postmaster/postmaster.c:5560 postmaster/postmaster.c:5583 +#, c-format +msgid "invalid processing mode in background worker" +msgstr "ogiltigt processläge i bakgrundsarbetare" + +#: postmaster/postmaster.c:5655 +#, c-format +msgid "starting background worker process \"%s\"" +msgstr "startar bakgrundsarbetarprocess \"%s\"" + +#: postmaster/postmaster.c:5667 +#, c-format +msgid "could not fork worker process: %m" +msgstr "kunde inte starta (fork) arbetarprocess: %m" + +#: postmaster/postmaster.c:6100 +#, c-format +msgid "could not duplicate socket %d for use in backend: error code %d" +msgstr "kunde inte duplicera uttag (socket) %d för att använda i backend: felkod %d" + +#: postmaster/postmaster.c:6132 +#, c-format +msgid "could not create inherited socket: error code %d\n" +msgstr "kunde inte skapa ärvt uttag (socket): felkod %d\n" + +#: postmaster/postmaster.c:6161 +#, c-format +msgid "could not open backend variables file \"%s\": %s\n" +msgstr "kunde inte öppna bakändans variabelfil \"%s\": %s\n" + +#: postmaster/postmaster.c:6168 +#, c-format +msgid "could not read from backend variables file \"%s\": %s\n" +msgstr "kunde inte läsa från bakändans variabelfil \"%s\": %s\n" + +#: postmaster/postmaster.c:6177 +#, c-format +msgid "could not remove file \"%s\": %s\n" +msgstr "kunde inte ta bort fil \"%s\": %s\n" + +#: postmaster/postmaster.c:6194 +#, c-format +msgid "could not map view of backend variables: error code %lu\n" +msgstr "kunde inte mappa in vy för bakgrundsvariabler: felkod %lu\n" + +#: postmaster/postmaster.c:6203 +#, c-format +msgid "could not unmap view of backend variables: error code %lu\n" +msgstr "kunde inte avmappa vy för bakgrundsvariabler: felkod %lu\n" + +#: postmaster/postmaster.c:6210 +#, c-format +msgid "could not close handle to backend parameter variables: error code %lu\n" +msgstr "kunde inte stänga \"handle\" till backend:ens parametervariabler: felkod %lu\n" + +#: postmaster/postmaster.c:6371 +#, c-format +msgid "could not read exit code for process\n" +msgstr "kunde inte läsa avslutningskod för process\n" + +#: postmaster/postmaster.c:6376 +#, c-format +msgid "could not post child completion status\n" +msgstr "kunde inte skicka barnets avslutningsstatus\n" + +#: postmaster/syslogger.c:453 postmaster/syslogger.c:1054 +#, c-format +msgid "could not read from logger pipe: %m" +msgstr "kunde inte läsa från loggrör (pipe): %m" + +#: postmaster/syslogger.c:503 +#, c-format +msgid "logger shutting down" +msgstr "loggaren stänger ner" + +#: postmaster/syslogger.c:547 postmaster/syslogger.c:561 +#, c-format +msgid "could not create pipe for syslog: %m" +msgstr "kunde inte skapa rör (pipe) för syslog: %m" + +#: postmaster/syslogger.c:597 +#, c-format +msgid "could not fork system logger: %m" +msgstr "kunde inte fork:a systemloggaren: %m" + +#: postmaster/syslogger.c:633 +#, c-format +msgid "redirecting log output to logging collector process" +msgstr "omdirigerar loggutmatning till logginsamlingsprocess" + +#: postmaster/syslogger.c:634 +#, c-format +msgid "Future log output will appear in directory \"%s\"." +msgstr "Framtida loggutmatning kommer dyka upp i katalog \"%s\"." + +#: postmaster/syslogger.c:642 +#, c-format +msgid "could not redirect stdout: %m" +msgstr "kunde inte omdirigera stdout: %m" + +#: postmaster/syslogger.c:647 postmaster/syslogger.c:664 +#, c-format +msgid "could not redirect stderr: %m" +msgstr "kunde inte omdirigera stderr: %m" + +#: postmaster/syslogger.c:1009 +#, c-format +msgid "could not write to log file: %s\n" +msgstr "kunde inte skriva till loggfil: %s\n" + +#: postmaster/syslogger.c:1151 +#, c-format +msgid "could not open log file \"%s\": %m" +msgstr "kunde inte öppna loggfil \"%s\": %m" + +#: postmaster/syslogger.c:1213 postmaster/syslogger.c:1257 +#, c-format +msgid "disabling automatic rotation (use SIGHUP to re-enable)" +msgstr "stänger av automatisk rotation (använd SIGHUP för att slå på igen)" + +#: regex/regc_pg_locale.c:262 +#, c-format +msgid "could not determine which collation to use for regular expression" +msgstr "kunde inte bestämma vilken jämförelse (collation) som skall användas för reguljära uttryck" + +#: repl_gram.y:336 repl_gram.y:368 +#, c-format +msgid "invalid timeline %u" +msgstr "ogiltig tidslinje %u" + +#: repl_scanner.l:129 +msgid "invalid streaming start location" +msgstr "ogiltig startposition för strömning" + +#: repl_scanner.l:180 scan.l:674 +msgid "unterminated quoted string" +msgstr "icketerminerad citerad sträng" + +#: replication/basebackup.c:336 +#, c-format +msgid "could not stat control file \"%s\": %m" +msgstr "kunde inte göra stat() på kontrollfil \"%s\": %m" + +#: replication/basebackup.c:443 +#, c-format +msgid "could not find any WAL files" +msgstr "kunde inte hitta några WAL-filer" + +#: replication/basebackup.c:457 replication/basebackup.c:472 +#: replication/basebackup.c:481 +#, c-format +msgid "could not find WAL file \"%s\"" +msgstr "kunde inte hitta WAL-fil \"%s\"" + +#: replication/basebackup.c:520 replication/basebackup.c:548 +#, c-format +msgid "unexpected WAL file size \"%s\"" +msgstr "oväntad WAL-filstorlek \"%s\"" + +#: replication/basebackup.c:534 replication/basebackup.c:1526 +#, c-format +msgid "base backup could not send data, aborting backup" +msgstr "basbackup kunde inte skicka data, avbryter backup" + +#: replication/basebackup.c:606 +#, c-format +msgid "%s total checksum verification failures" +msgstr "totalt %s verifieringsfel av checksumma" + +#: replication/basebackup.c:610 +#, c-format +msgid "checksum verification failure during base backup" +msgstr "misslyckad verifiering av checksumma under basbackup" + +#: replication/basebackup.c:654 replication/basebackup.c:663 +#: replication/basebackup.c:672 replication/basebackup.c:681 +#: replication/basebackup.c:690 replication/basebackup.c:701 +#: replication/basebackup.c:718 replication/basebackup.c:727 +#, c-format +msgid "duplicate option \"%s\"" +msgstr "duplicerad flagga \"%s\"" + +#: replication/basebackup.c:707 utils/misc/guc.c:6013 +#, c-format +msgid "%d is outside the valid range for parameter \"%s\" (%d .. %d)" +msgstr "%d är utanför giltigt intervall för parameter \"%s\" (%d .. %d)" + +#: replication/basebackup.c:981 replication/basebackup.c:1151 +#, c-format +msgid "could not stat file or directory \"%s\": %m" +msgstr "kunde inte ta status på fil eller katalog \"%s\": %m" + +#: replication/basebackup.c:1306 +#, c-format +msgid "skipping special file \"%s\"" +msgstr "hoppar över specialfil \"%s\"" + +#: replication/basebackup.c:1411 +#, c-format +msgid "invalid segment number %d in file \"%s\"" +msgstr "ogiltigt segmentnummer %d i fil \"%s\"" + +#: replication/basebackup.c:1430 +#, c-format +msgid "cannot verify checksum in file \"%s\", block %d: read buffer size %d and page size %d differ" +msgstr "kan inte verifiera checksumma i fil \"%s\", block %d: läsbufferstorlek %d och sidstorlek %d skiljer sig åt" + +#: replication/basebackup.c:1474 replication/basebackup.c:1490 +#, c-format +msgid "could not fseek in file \"%s\": %m" +msgstr "kunde inte gör fseek i fil \"%s\": %m" + +#: replication/basebackup.c:1482 +#, c-format +msgid "could not reread block %d of file \"%s\": %m" +msgstr "kunde inte läsa tillbaka block %d i fil \"%s\": %m" + +#: replication/basebackup.c:1506 +#, c-format +msgid "checksum verification failed in file \"%s\", block %d: calculated %X but expected %X" +msgstr "checksumkontroll misslyckades i fil \"%s\", block %d: beräknade %X men förväntade %X" + +#: replication/basebackup.c:1513 +#, c-format +msgid "further checksum verification failures in file \"%s\" will not be reported" +msgstr "ytterligare kontroller av checksummor i fil \"%s\" kommer inte rapporteras" + +#: replication/basebackup.c:1571 +#, c-format +msgid "file \"%s\" has a total of %d checksum verification failures" +msgstr "filen \"%s\" har totalt %d kontrollerade felaktiga checksummor" + +#: replication/basebackup.c:1599 +#, c-format +msgid "file name too long for tar format: \"%s\"" +msgstr "filnamnet är för långt för tar-format: \"%s\"" + +#: replication/basebackup.c:1604 +#, c-format +msgid "symbolic link target too long for tar format: file name \"%s\", target \"%s\"" +msgstr "mål för symbolisk länk är för långt för tar-format: filnamn \"%s\", mål \"%s\"" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:235 +#, c-format +msgid "invalid connection string syntax: %s" +msgstr "ogiltig anslutningssträngsyntax %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:259 +#, c-format +msgid "could not parse connection string: %s" +msgstr "kunde inte parsa anslutningssträng: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:332 +#, c-format +msgid "could not receive database system identifier and timeline ID from the primary server: %s" +msgstr "kunde inte hämta databassystemidentifierare och tidslinje-ID från primära servern: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:343 +#: replication/libpqwalreceiver/libpqwalreceiver.c:550 +#, c-format +msgid "invalid response from primary server" +msgstr "ogiltigt svar från primär server" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:344 +#, c-format +msgid "Could not identify system: got %d rows and %d fields, expected %d rows and %d or more fields." +msgstr "Kunde inte identifiera system: fick %d rader och %d fält, förväntade %d rader och %d eller fler fält." + +#: replication/libpqwalreceiver/libpqwalreceiver.c:410 +#: replication/libpqwalreceiver/libpqwalreceiver.c:416 +#: replication/libpqwalreceiver/libpqwalreceiver.c:441 +#, c-format +msgid "could not start WAL streaming: %s" +msgstr "kunde inte starta WAL-strömning: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:460 +#, c-format +msgid "could not send end-of-streaming message to primary: %s" +msgstr "kunde inte skicka meddelandet end-of-streaming till primären: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:482 +#, c-format +msgid "unexpected result set after end-of-streaming" +msgstr "oväntad resultatmängd efter end-of-streaming" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:496 +#, c-format +msgid "error while shutting down streaming COPY: %s" +msgstr "fel vid nestängning av strömmande COPY: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:505 +#, c-format +msgid "error reading result of streaming command: %s" +msgstr "fel vid läsning av resultat från strömmningskommando: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:513 +#: replication/libpqwalreceiver/libpqwalreceiver.c:741 +#, c-format +msgid "unexpected result after CommandComplete: %s" +msgstr "oväntat resultat efter CommandComplete: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:539 +#, c-format +msgid "could not receive timeline history file from the primary server: %s" +msgstr "kan inte ta emot fil med tidslinjehistorik från primära servern: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:551 +#, c-format +msgid "Expected 1 tuple with 2 fields, got %d tuples with %d fields." +msgstr "Förväntade 1 tupel med 2 fält, fick %d tupler med %d fält." + +#: replication/libpqwalreceiver/libpqwalreceiver.c:705 +#: replication/libpqwalreceiver/libpqwalreceiver.c:756 +#: replication/libpqwalreceiver/libpqwalreceiver.c:762 +#, c-format +msgid "could not receive data from WAL stream: %s" +msgstr "kunde inte ta emot data från WAL-ström: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:781 +#, c-format +msgid "could not send data to WAL stream: %s" +msgstr "kunde inte skicka data till WAL-ström: %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:830 +#, c-format +msgid "could not create replication slot \"%s\": %s" +msgstr "kunde inte skapa replikeringsslot \"%s\": %s" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:864 +#, c-format +msgid "invalid query response" +msgstr "ogiltigt frågerespons" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:865 +#, c-format +msgid "Expected %d fields, got %d fields." +msgstr "Förväntade %d fält, fick %d fält." + +#: replication/libpqwalreceiver/libpqwalreceiver.c:934 +#, c-format +msgid "the query interface requires a database connection" +msgstr "frågeinterface:et kräver en databasanslutning" + +#: replication/libpqwalreceiver/libpqwalreceiver.c:965 +msgid "empty query" +msgstr "tom fråga" + +#: replication/logical/launcher.c:298 +#, c-format +msgid "starting logical replication worker for subscription \"%s\"" +msgstr "startar logisk replikeringsarbetare för prenumeration \"%s\"" + +#: replication/logical/launcher.c:305 +#, c-format +msgid "cannot start logical replication workers when max_replication_slots = 0" +msgstr "kan inte starta logisk replikeringsarbetare när max_replication_slots = 0" + +#: replication/logical/launcher.c:385 +#, c-format +msgid "out of logical replication worker slots" +msgstr "slut på logiska replikeringsarbetarslots" + +#: replication/logical/launcher.c:386 +#, c-format +msgid "You might need to increase max_logical_replication_workers." +msgstr "Du kan behöva öka max_logical_replication_workers." + +#: replication/logical/launcher.c:441 +#, c-format +msgid "out of background worker slots" +msgstr "slut på bakgrundsarbetarslots" + +#: replication/logical/launcher.c:442 +#, c-format +msgid "You might need to increase max_worker_processes." +msgstr "Du kan behöva öka max_worker_processes." + +#: replication/logical/launcher.c:625 +#, c-format +msgid "logical replication worker slot %d is empty, cannot attach" +msgstr "logisk replikeringsarbetarslot %d är tom, kan inte ansluta" + +#: replication/logical/launcher.c:634 +#, c-format +msgid "logical replication worker slot %d is already used by another worker, cannot attach" +msgstr "logiisk replikeringsarbetarslot %d används redan av en annan arbetare, kan inte ansluta" + +#: replication/logical/launcher.c:888 +#, c-format +msgid "logical replication launcher started" +msgstr "logisk replikeringsstartare startad" + +#: replication/logical/logical.c:85 +#, c-format +msgid "logical decoding requires wal_level >= logical" +msgstr "logisk avkodning kräver wal_level >= logical" + +#: replication/logical/logical.c:90 +#, c-format +msgid "logical decoding requires a database connection" +msgstr "logisk avkodning kräver en databasanslutning" + +#: replication/logical/logical.c:108 +#, c-format +msgid "logical decoding cannot be used while in recovery" +msgstr "logisk avkodning kan inte användas under återställning" + +#: replication/logical/logical.c:250 replication/logical/logical.c:378 +#, c-format +msgid "cannot use physical replication slot for logical decoding" +msgstr "kan inte använda fysisk replikeringsslot för logisk avkodning" + +#: replication/logical/logical.c:255 replication/logical/logical.c:383 +#, c-format +msgid "replication slot \"%s\" was not created in this database" +msgstr "replikeringsslot \"%s\" har inte skapats i denna databasen" + +#: replication/logical/logical.c:262 +#, c-format +msgid "cannot create logical replication slot in transaction that has performed writes" +msgstr "kan inte skapa logisk replikeringsslot i transaktion som redan har utfört skrivningar" + +#: replication/logical/logical.c:423 +#, c-format +msgid "starting logical decoding for slot \"%s\"" +msgstr "startar logisk avkodning för slot \"%s\"" + +#: replication/logical/logical.c:425 +#, c-format +msgid "Streaming transactions committing after %X/%X, reading WAL from %X/%X." +msgstr "Strömmar transaktioner commit:ade efter %X/%X, läser WAL från %X/%X" + +#: replication/logical/logical.c:575 +#, c-format +msgid "slot \"%s\", output plugin \"%s\", in the %s callback, associated LSN %X/%X" +msgstr "slot \"%s\", utdata-plugin \"%s\", i callback:en %s, associerad LSN %X/%X" + +#: replication/logical/logical.c:582 +#, c-format +msgid "slot \"%s\", output plugin \"%s\", in the %s callback" +msgstr "slot \"%s\", utdata-plugin \"%s\", i callback:en %s" + +#: replication/logical/logicalfuncs.c:114 replication/slotfuncs.c:35 +#, c-format +msgid "must be superuser or replication role to use replication slots" +msgstr "måste vara superanvändare eller replikeringsroll för att använda replikeringsslottar" + +#: replication/logical/logicalfuncs.c:153 +#, c-format +msgid "slot name must not be null" +msgstr "slot-namn får inte vara null" + +#: replication/logical/logicalfuncs.c:169 +#, c-format +msgid "options array must not be null" +msgstr "flagg-array får inte vara null" + +#: replication/logical/logicalfuncs.c:200 +#, c-format +msgid "array must be one-dimensional" +msgstr "array:en måste vara endimensionell" + +#: replication/logical/logicalfuncs.c:206 +#, c-format +msgid "array must not contain nulls" +msgstr "array:en får inte innehålla null" + +#: replication/logical/logicalfuncs.c:222 utils/adt/json.c:2310 +#: utils/adt/jsonb.c:1269 +#, c-format +msgid "array must have even number of elements" +msgstr "array:en måste ha ett jämnt antal element" + +#: replication/logical/logicalfuncs.c:269 +#, c-format +msgid "logical decoding output plugin \"%s\" produces binary output, but function \"%s\" expects textual data" +msgstr "utdata-plugin \"%s\" för logisk avkodning producerar binär utdata men funktionen \"%s\" förväntar sig textdata" + +#: replication/logical/origin.c:185 +#, c-format +msgid "only superusers can query or manipulate replication origins" +msgstr "bara superanvändare kan läsa eller ändra replikeringskällor" + +#: replication/logical/origin.c:190 +#, c-format +msgid "cannot query or manipulate replication origin when max_replication_slots = 0" +msgstr "kan inte se eller ändra replikeringskällor när max_replication_slots = 0" + +#: replication/logical/origin.c:195 +#, c-format +msgid "cannot manipulate replication origins during recovery" +msgstr "kan inte ändra replikeringskällor under tiden återställning sker" + +#: replication/logical/origin.c:230 +#, c-format +msgid "replication origin \"%s\" does not exist" +msgstr "replikeringskälla \"%s\" finns inte" + +#: replication/logical/origin.c:321 +#, c-format +msgid "could not find free replication origin OID" +msgstr "kunde inte hitta ledig replikering-origin-OID" + +#: replication/logical/origin.c:369 +#, c-format +msgid "could not drop replication origin with OID %d, in use by PID %d" +msgstr "kunde inte slänga replikeringskälla med OID %d som används av PID %d" + +#: replication/logical/origin.c:461 +#, c-format +msgid "replication origin with OID %u does not exist" +msgstr "replikeringskälla med OID %u finns inte" + +#: replication/logical/origin.c:707 +#, c-format +msgid "replication checkpoint has wrong magic %u instead of %u" +msgstr "replikeringscheckpoint har fel magiskt tal %u istället för %u" + +#: replication/logical/origin.c:739 +#, c-format +msgid "could not read file \"%s\": read %d of %zu" +msgstr "kunde inte läsa fil \"%s\": läste %d av %zu" + +#: replication/logical/origin.c:748 +#, c-format +msgid "could not find free replication state, increase max_replication_slots" +msgstr "kunde inte hitta ledig replikeringsplats, öka max_replication_slots" + +#: replication/logical/origin.c:766 +#, c-format +msgid "replication slot checkpoint has wrong checksum %u, expected %u" +msgstr "replikeringsslot-checkpoint har felaktig kontrollsumma %u, förväntade %u" + +#: replication/logical/origin.c:890 +#, c-format +msgid "replication origin with OID %d is already active for PID %d" +msgstr "replikeringskälla med OID %d är redan aktiv för PID %d" + +#: replication/logical/origin.c:901 replication/logical/origin.c:1088 +#, c-format +msgid "could not find free replication state slot for replication origin with OID %u" +msgstr "kunde inte hitta ledig replikerings-state-slot för replikerings-origin med OID %u" + +#: replication/logical/origin.c:903 replication/logical/origin.c:1090 +#: replication/slot.c:1515 +#, c-format +msgid "Increase max_replication_slots and try again." +msgstr "Öka max_replication_slots och försök igen." + +#: replication/logical/origin.c:1047 +#, c-format +msgid "cannot setup replication origin when one is already setup" +msgstr "kan inte ställa in replikeringskälla när en redan är inställd" + +#: replication/logical/origin.c:1076 +#, c-format +msgid "replication identifier %d is already active for PID %d" +msgstr "replikeringsidentifierare %d är redan aktiv för PID %d" + +#: replication/logical/origin.c:1127 replication/logical/origin.c:1325 +#: replication/logical/origin.c:1345 +#, c-format +msgid "no replication origin is configured" +msgstr "ingen replikeringskälla är konfigurerad" + +#: replication/logical/relation.c:255 +#, c-format +msgid "logical replication target relation \"%s.%s\" does not exist" +msgstr "logisk replikeringsmålrelation \"%s.%s\" finns inte" + +#: replication/logical/relation.c:297 +#, c-format +msgid "logical replication target relation \"%s.%s\" is missing some replicated columns" +msgstr "logisk replikeringsmålrelation \"%s.%s\" saknar några replikerade kolumner" + +#: replication/logical/relation.c:337 +#, c-format +msgid "logical replication target relation \"%s.%s\" uses system columns in REPLICA IDENTITY index" +msgstr "logisk replikeringsmålrelation \"%s.%s\" använder systemkolumner i REPLICA IDENTITY-index" + +#: replication/logical/reorderbuffer.c:2310 +#, c-format +msgid "could not write to data file for XID %u: %m" +msgstr "kunde inte skriva till datafil för XID %u: %m" + +#: replication/logical/reorderbuffer.c:2403 +#: replication/logical/reorderbuffer.c:2425 +#, c-format +msgid "could not read from reorderbuffer spill file: %m" +msgstr "kunde inte läsa från reorderbuffer spill-fil: %m" + +#: replication/logical/reorderbuffer.c:2407 +#: replication/logical/reorderbuffer.c:2429 +#, c-format +msgid "could not read from reorderbuffer spill file: read %d instead of %u bytes" +msgstr "kunde inte läsa från reorderbuffer spill-fil: läste %d istället för %u byte" + +#: replication/logical/reorderbuffer.c:2642 +#, c-format +msgid "could not remove file \"%s\" during removal of pg_replslot/%s/*.xid: %m" +msgstr "kunde inte radera fil \"%s\" vid borttagning av pg_replslot/%s/*.xid: %m" + +#: replication/logical/reorderbuffer.c:3108 +#, c-format +msgid "could not read from file \"%s\": read %d instead of %d bytes" +msgstr "kunde inte läsa från fil \"%s\": läste %d istället för %d byte" + +#: replication/logical/snapbuild.c:612 +#, c-format +msgid "initial slot snapshot too large" +msgstr "initialt slot-snapshot är för stort" + +#: replication/logical/snapbuild.c:664 +#, c-format +msgid "exported logical decoding snapshot: \"%s\" with %u transaction ID" +msgid_plural "exported logical decoding snapshot: \"%s\" with %u transaction IDs" +msgstr[0] "exporterade logisk avkodnings-snapshot: \"%s\" med %u transaktions-ID" +msgstr[1] "exporterade logisk avkodnings-snapshot: \"%s\" med %u transaktions-ID" + +#: replication/logical/snapbuild.c:1262 replication/logical/snapbuild.c:1355 +#: replication/logical/snapbuild.c:1841 +#, c-format +msgid "logical decoding found consistent point at %X/%X" +msgstr "logisk avkodning hittade konsistent punkt vid %X/%X" + +#: replication/logical/snapbuild.c:1264 +#, c-format +msgid "There are no running transactions." +msgstr "Det finns inga körande transaktioner." + +#: replication/logical/snapbuild.c:1306 +#, c-format +msgid "logical decoding found initial starting point at %X/%X" +msgstr "logisk avkodning hittade initial startpunkt vid %X/%X" + +#: replication/logical/snapbuild.c:1308 replication/logical/snapbuild.c:1332 +#, c-format +msgid "Waiting for transactions (approximately %d) older than %u to end." +msgstr "Väntar på att transaktioner (cirka %d) äldre än %u skall gå klart." + +#: replication/logical/snapbuild.c:1330 +#, c-format +msgid "logical decoding found initial consistent point at %X/%X" +msgstr "logisk avkodning hittade initial konsistent punkt vid %X/%X" + +#: replication/logical/snapbuild.c:1357 +#, c-format +msgid "There are no old transactions anymore." +msgstr "Det finns inte längre några gamla transaktioner." + +#: replication/logical/snapbuild.c:1714 replication/logical/snapbuild.c:1742 +#: replication/logical/snapbuild.c:1759 replication/logical/snapbuild.c:1775 +#, c-format +msgid "could not read file \"%s\", read %d of %d: %m" +msgstr "kunde inte läsa fil \"%s\": läste %d av %d: %m" + +#: replication/logical/snapbuild.c:1720 +#, c-format +msgid "snapbuild state file \"%s\" has wrong magic number: %u instead of %u" +msgstr "snapbuild-state-fil \"%s\" har fel magiskt tal: %u istället för %u" + +#: replication/logical/snapbuild.c:1725 +#, c-format +msgid "snapbuild state file \"%s\" has unsupported version: %u instead of %u" +msgstr "snapbuild-state-fil \"%s\" har en ej stödd version: %u istället för %u" + +#: replication/logical/snapbuild.c:1788 +#, c-format +msgid "checksum mismatch for snapbuild state file \"%s\": is %u, should be %u" +msgstr "checksumma stämmer inte för snapbuild-state-fil \"%s\": är %u, skall vara %u" + +#: replication/logical/snapbuild.c:1843 +#, c-format +msgid "Logical decoding will begin using saved snapshot." +msgstr "Logisk avkodning kommer starta med sparat snapshot." + +#: replication/logical/snapbuild.c:1915 +#, c-format +msgid "could not parse file name \"%s\"" +msgstr "kunde inte parsa filnamn \"%s\"" + +#: replication/logical/tablesync.c:138 +#, c-format +msgid "logical replication table synchronization worker for subscription \"%s\", table \"%s\" has finished" +msgstr "logisk replikerings tabellsynkroniseringsarbetare för prenumeration \"%s\", tabell \"%s\" är klar" + +#: replication/logical/tablesync.c:685 +#, c-format +msgid "could not fetch table info for table \"%s.%s\" from publisher: %s" +msgstr "kunde inte hämta tabellinfo för tabell \"%s.%s\" från publicerare: %s" + +#: replication/logical/tablesync.c:691 +#, c-format +msgid "table \"%s.%s\" not found on publisher" +msgstr "tabell \"%s.%s\" hittades inte hos publicerare" + +#: replication/logical/tablesync.c:721 +#, c-format +msgid "could not fetch table info for table \"%s.%s\": %s" +msgstr "kunde inte hämta tabellinfo för tabell \"%s.%s\": %s" + +#: replication/logical/tablesync.c:791 +#, c-format +msgid "could not start initial contents copy for table \"%s.%s\": %s" +msgstr "kunde inte starta initial innehållskopiering för tabell \"%s.%s\": %s" + +#: replication/logical/tablesync.c:904 +#, c-format +msgid "table copy could not start transaction on publisher" +msgstr "tabellkopiering kunde inte starta transaktion på publiceraren" + +#: replication/logical/tablesync.c:926 +#, c-format +msgid "table copy could not finish transaction on publisher" +msgstr "tabellkopiering kunde inte slutföra transaktion på publiceraren" + +#: replication/logical/worker.c:307 +#, c-format +msgid "processing remote data for replication target relation \"%s.%s\" column \"%s\", remote type %s, local type %s" +msgstr "processar fjärrdata för replikeringsmålrelation \"%s.%s\" kolumn \"%s\", fjärrtyp %s, lokal typ %s" + +#: replication/logical/worker.c:528 +#, c-format +msgid "ORIGIN message sent out of order" +msgstr "ORIGIN-meddelande skickat i fel ordning" + +#: replication/logical/worker.c:659 +#, c-format +msgid "publisher did not send replica identity column expected by the logical replication target relation \"%s.%s\"" +msgstr "publicerare skickade inte identitetskolumn för replika som förväntades av den logiska replikeringens målrelation \"%s.%s\"" + +#: replication/logical/worker.c:666 +#, c-format +msgid "logical replication target relation \"%s.%s\" has neither REPLICA IDENTITY index nor PRIMARY KEY and published relation does not have REPLICA IDENTITY FULL" +msgstr "logisk replikeringsmålrelation \"%s.%s\" har varken REPLICA IDENTITY-index eller PRIMARY KEY och den publicerade relationen har inte REPLICA IDENTITY FULL" + +#: replication/logical/worker.c:1005 +#, c-format +msgid "invalid logical replication message type \"%c\"" +msgstr "ogiltig logisk replikeringsmeddelandetyp \"%c\"" + +#: replication/logical/worker.c:1146 +#, c-format +msgid "data stream from publisher has ended" +msgstr "dataströmmen från publiceraren har avslutats" + +#: replication/logical/worker.c:1305 +#, c-format +msgid "terminating logical replication worker due to timeout" +msgstr "avslutar logisk replikeringsarbetare på grund av timeout" + +#: replication/logical/worker.c:1453 +#, c-format +msgid "logical replication apply worker for subscription \"%s\" will stop because the subscription was removed" +msgstr "logisk replikerings uppspelningsarbetare för prenumeration \"%s\" kommer stoppa då prenumerationen har tagits bort" + +#: replication/logical/worker.c:1467 +#, c-format +msgid "logical replication apply worker for subscription \"%s\" will stop because the subscription was disabled" +msgstr "logisk replikerings uppspelningsarbetare för prenumeration \"%s\" kommer stoppa då prenumerationen har stängts av" + +#: replication/logical/worker.c:1481 +#, c-format +msgid "logical replication apply worker for subscription \"%s\" will restart because the connection information was changed" +msgstr "logisk replikerings uppspelningsarbetare för prenumeration \"%s\" kommer starta om då uppkopplingsinformationen ändrats" + +#: replication/logical/worker.c:1495 +#, c-format +msgid "logical replication apply worker for subscription \"%s\" will restart because subscription was renamed" +msgstr "logisk replikerings uppspelningsarbetare för prenumeration \"%s\" kommer starta om då prenumerationen bytt namn" + +#: replication/logical/worker.c:1512 +#, c-format +msgid "logical replication apply worker for subscription \"%s\" will restart because the replication slot name was changed" +msgstr "logisk replikerings uppspelningsarbetare för prenumeration \"%s\" kommer starta om då replikeringsslotten bytt namn" + +#: replication/logical/worker.c:1526 +#, c-format +msgid "logical replication apply worker for subscription \"%s\" will restart because subscription's publications were changed" +msgstr "logisk replikerings uppspelningsarbetare för prenumeration \"%s\" kommer starta om då prenumerationens publiceringar ändrats" + +#: replication/logical/worker.c:1629 +#, c-format +msgid "logical replication apply worker for subscription %u will not start because the subscription was removed during startup" +msgstr "logisk replikerings uppspelningsarbetare för prenumeration %u kommer inte starta då prenumerationen togs bort under uppstart" + +#: replication/logical/worker.c:1641 +#, c-format +msgid "logical replication apply worker for subscription \"%s\" will not start because the subscription was disabled during startup" +msgstr "logisk replikerings uppspelningsarbetare för prenumeration \"%s\" kommer inte starta då prenumerationen stänges av under uppstart" + +#: replication/logical/worker.c:1659 +#, c-format +msgid "logical replication table synchronization worker for subscription \"%s\", table \"%s\" has started" +msgstr "logisk replikerings tabellsynkroniseringsarbetare för prenumeration \"%s\", tabell \"%s\" har startat" + +#: replication/logical/worker.c:1663 +#, c-format +msgid "logical replication apply worker for subscription \"%s\" has started" +msgstr "logisk replikerings uppspelningsarbetare för prenumeration \"%s\" har startat" + +#: replication/logical/worker.c:1703 +#, c-format +msgid "subscription has no replication slot set" +msgstr "prenumeration har ingen replikeringsslot angiven" + +#: replication/pgoutput/pgoutput.c:117 +#, c-format +msgid "invalid proto_version" +msgstr "ogiltig proto_version" + +#: replication/pgoutput/pgoutput.c:122 +#, c-format +msgid "proto_version \"%s\" out of range" +msgstr "proto_version \"%s\" är utanför giltigt intervall" + +#: replication/pgoutput/pgoutput.c:139 +#, c-format +msgid "invalid publication_names syntax" +msgstr "ogiltig publication_names-syntax" + +#: replication/pgoutput/pgoutput.c:181 +#, c-format +msgid "client sent proto_version=%d but we only support protocol %d or lower" +msgstr "klienten skickade proto_version=%d men vi stöder bara protokoll %d eller lägre" + +#: replication/pgoutput/pgoutput.c:187 +#, c-format +msgid "client sent proto_version=%d but we only support protocol %d or higher" +msgstr "klienten skickade proto_version=%d men vi stöder bara protokoll %d eller högre" + +#: replication/pgoutput/pgoutput.c:193 +#, c-format +msgid "publication_names parameter missing" +msgstr "saknar parameter publication_names" + +#: replication/slot.c:182 +#, c-format +msgid "replication slot name \"%s\" is too short" +msgstr "replikeringsslotnamn \"%s\" är för kort" + +#: replication/slot.c:191 +#, c-format +msgid "replication slot name \"%s\" is too long" +msgstr "replikeringsslotnamn \"%s\" är för långt" + +#: replication/slot.c:204 +#, c-format +msgid "replication slot name \"%s\" contains invalid character" +msgstr "replikeringsslotnamn \"%s\" innehåller ogiltiga tecken" + +#: replication/slot.c:206 +#, c-format +msgid "Replication slot names may only contain lower case letters, numbers, and the underscore character." +msgstr "Replikeringsslotnamn får bara innehålla små bokstäver, nummer och understreck." + +#: replication/slot.c:253 +#, c-format +msgid "replication slot \"%s\" already exists" +msgstr "replikeringsslot \"%s\" finns redan" + +#: replication/slot.c:263 +#, c-format +msgid "all replication slots are in use" +msgstr "alla replikeringsslots används" + +#: replication/slot.c:264 +#, c-format +msgid "Free one or increase max_replication_slots." +msgstr "Frigör en eller öka max_replication_slots." + +#: replication/slot.c:379 +#, c-format +msgid "replication slot \"%s\" does not exist" +msgstr "replikeringsslot \"%s\" existerar inte" + +#: replication/slot.c:390 replication/slot.c:940 +#, c-format +msgid "replication slot \"%s\" is active for PID %d" +msgstr "replikeringsslot \"%s\" är aktiv för PID %d" + +#: replication/slot.c:624 replication/slot.c:1128 replication/slot.c:1476 +#, c-format +msgid "could not remove directory \"%s\"" +msgstr "kunde inte ta bort katalog \"%s\"" + +#: replication/slot.c:970 +#, c-format +msgid "replication slots can only be used if max_replication_slots > 0" +msgstr "replikeringsslots kan bara användas om max_replication_slots > 0" + +#: replication/slot.c:975 +#, c-format +msgid "replication slots can only be used if wal_level >= replica" +msgstr "replikeringsslots kan bara användas om wal_level >= replica" + +#: replication/slot.c:1406 replication/slot.c:1446 +#, c-format +msgid "could not read file \"%s\", read %d of %u: %m" +msgstr "kunde inte läsa fil \"%s\": läste %d av %u: %m" + +#: replication/slot.c:1415 +#, c-format +msgid "replication slot file \"%s\" has wrong magic number: %u instead of %u" +msgstr "replikeringsslotfil \"%s\" har fel magiskt nummer: %u istället för %u" + +#: replication/slot.c:1422 +#, c-format +msgid "replication slot file \"%s\" has unsupported version %u" +msgstr "replikeringsslotfil \"%s\" har en icke stödd version %u" + +#: replication/slot.c:1429 +#, c-format +msgid "replication slot file \"%s\" has corrupted length %u" +msgstr "replikeringsslotfil \"%s\" har felaktig längd %u" + +#: replication/slot.c:1461 +#, c-format +msgid "checksum mismatch for replication slot file \"%s\": is %u, should be %u" +msgstr "kontrollsummefel för replikeringsslot-fil \"%s\": är %u, skall vara %u" + +#: replication/slot.c:1514 +#, c-format +msgid "too many replication slots active before shutdown" +msgstr "för många aktiva replikeringsslottar innan nerstängning" + +#: replication/slotfuncs.c:468 +#, c-format +msgid "invalid target wal lsn" +msgstr "ogiltig mål-lsn för wal" + +#: replication/slotfuncs.c:502 +#, c-format +msgid "cannot move slot to %X/%X, minimum is %X/%X" +msgstr "kan inte flytta slot till %X/%X, minimum är %X/%X" + +#: replication/syncrep.c:246 +#, c-format +msgid "canceling the wait for synchronous replication and terminating connection due to administrator command" +msgstr "avbryter väntan på synkron replikering samt avslutar anslutning på grund av ett administratörskommando" + +#: replication/syncrep.c:247 replication/syncrep.c:264 +#, c-format +msgid "The transaction has already committed locally, but might not have been replicated to the standby." +msgstr "Transaktionen har redan commit:ats lokalt men har kanske inte replikerats till standby:en." + +#: replication/syncrep.c:263 +#, c-format +msgid "canceling wait for synchronous replication due to user request" +msgstr "avbryter väntan på synkron replikering efter användarens önskemål" + +#: replication/syncrep.c:397 +#, c-format +msgid "standby \"%s\" now has synchronous standby priority %u" +msgstr "standby \"%s\" har nu synkron standby-prioritet %u" + +#: replication/syncrep.c:458 +#, c-format +msgid "standby \"%s\" is now a synchronous standby with priority %u" +msgstr "standby \"%s\" är nu en synkron standby med prioritet %u" + +#: replication/syncrep.c:462 +#, c-format +msgid "standby \"%s\" is now a candidate for quorum synchronous standby" +msgstr "standby \"%s\" är nu en kvorumkandidat för synkron standby" + +#: replication/syncrep.c:1160 +#, c-format +msgid "synchronous_standby_names parser failed" +msgstr "synchronous_standby_names-parser misslyckades" + +#: replication/syncrep.c:1166 +#, c-format +msgid "number of synchronous standbys (%d) must be greater than zero" +msgstr "antal synkrona standbys (%d) måste vara fler än noll" + +#: replication/walreceiver.c:169 +#, c-format +msgid "terminating walreceiver process due to administrator command" +msgstr "avslutar wal-mottagarprocessen på grund av ett administratörskommando" + +#: replication/walreceiver.c:309 +#, c-format +msgid "could not connect to the primary server: %s" +msgstr "kunde inte ansluta till primärserver: %s" + +#: replication/walreceiver.c:359 +#, c-format +msgid "database system identifier differs between the primary and standby" +msgstr "databassystemets identifierare skiljer sig åt mellan primären och standby:en" + +#: replication/walreceiver.c:360 +#, c-format +msgid "The primary's identifier is %s, the standby's identifier is %s." +msgstr "Primärens identifierare är %s, standby:ens identifierare är %s." + +#: replication/walreceiver.c:371 +#, c-format +msgid "highest timeline %u of the primary is behind recovery timeline %u" +msgstr "högsta tidslinjen %u i primären är efter återställningstidslinjen %u" + +#: replication/walreceiver.c:407 +#, c-format +msgid "started streaming WAL from primary at %X/%X on timeline %u" +msgstr "startade strömning av WAL från primären vid %X/%X på tidslinje %u" + +#: replication/walreceiver.c:412 +#, c-format +msgid "restarted WAL streaming at %X/%X on timeline %u" +msgstr "återstartade WAL-strömning vid %X/%X på tidslinje %u" + +#: replication/walreceiver.c:441 +#, c-format +msgid "cannot continue WAL streaming, recovery has already ended" +msgstr "kan inte fortsätta WAL-strömning, återställning har redan avslutats" + +#: replication/walreceiver.c:478 +#, c-format +msgid "replication terminated by primary server" +msgstr "replikering avslutad av primär server" + +#: replication/walreceiver.c:479 +#, c-format +msgid "End of WAL reached on timeline %u at %X/%X." +msgstr "Slut på WAL nådd på tidslinje %u vid %X/%X." + +#: replication/walreceiver.c:574 +#, c-format +msgid "terminating walreceiver due to timeout" +msgstr "avslutar wal-mottagare på grund av timeout" + +#: replication/walreceiver.c:614 +#, c-format +msgid "primary server contains no more WAL on requested timeline %u" +msgstr "primär server har ingen mer WAL på efterfrågad tidslinje %u" + +#: replication/walreceiver.c:629 replication/walreceiver.c:988 +#, c-format +msgid "could not close log segment %s: %m" +msgstr "kunde inte stänga loggsegment %s: %m" + +#: replication/walreceiver.c:754 +#, c-format +msgid "fetching timeline history file for timeline %u from primary server" +msgstr "hämtar tidslinjehistorikfil för tidslinje %u från primära servern" + +#: replication/walreceiver.c:1042 +#, c-format +msgid "could not write to log segment %s at offset %u, length %lu: %m" +msgstr "kunde inte skriva till loggfilsegment %s på offset %u, längd %lu: %m" + +#: replication/walsender.c:491 +#, c-format +msgid "could not seek to beginning of file \"%s\": %m" +msgstr "kunde inte söka till början av filen \"%s\": %m" + +#: replication/walsender.c:532 +#, c-format +msgid "IDENTIFY_SYSTEM has not been run before START_REPLICATION" +msgstr "IDENTIFY_SYSTEM har inte körts före START_REPLICATION" + +#: replication/walsender.c:549 +#, c-format +msgid "cannot use a logical replication slot for physical replication" +msgstr "kan inte använda logisk replikeringsslot för fysisk replikering" + +#: replication/walsender.c:612 +#, c-format +msgid "requested starting point %X/%X on timeline %u is not in this server's history" +msgstr "efterfrågad startpunkt %X/%X på tidslinje %u finns inte i denna servers historik" + +#: replication/walsender.c:616 +#, c-format +msgid "This server's history forked from timeline %u at %X/%X." +msgstr "Denna servers historik delade sig från tidslinje %u vid %X/%X." + +#: replication/walsender.c:661 +#, c-format +msgid "requested starting point %X/%X is ahead of the WAL flush position of this server %X/%X" +msgstr "efterfrågad startpunkt %X/%X är längre fram än denna servers flush:ade WAL-skrivposition %X/%X" + +#: replication/walsender.c:890 +#, c-format +msgid "CREATE_REPLICATION_SLOT ... EXPORT_SNAPSHOT must not be called inside a transaction" +msgstr "CREATE_REPLICATION_SLOT ... EXPORT_SNAPSHOT får inte anropas i en transaktion" + +#: replication/walsender.c:899 +#, c-format +msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called inside a transaction" +msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT måste anropas i en transaktion" + +#: replication/walsender.c:904 +#, c-format +msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called in REPEATABLE READ isolation mode transaction" +msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT måste anropas i transaktions REPEATABLE READ-isolationsläge" + +#: replication/walsender.c:909 +#, c-format +msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must be called before any query" +msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT måste anropas innan någon fråga" + +#: replication/walsender.c:914 +#, c-format +msgid "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT must not be called in a subtransaction" +msgstr "CREATE_REPLICATION_SLOT ... USE_SNAPSHOT får inte anropas i en subtransaktion" + +#: replication/walsender.c:1060 +#, c-format +msgid "terminating walsender process after promotion" +msgstr "stänger ner walsender-process efter promovering" + +#: replication/walsender.c:1447 +#, c-format +msgid "cannot execute new commands while WAL sender is in stopping mode" +msgstr "kan inte utföra nya kommandon när WAL-sändare är i stopp-läge" + +#: replication/walsender.c:1480 +#, c-format +msgid "received replication command: %s" +msgstr "tog emot replikeringskommando: %s" + +#: replication/walsender.c:1496 tcop/fastpath.c:279 tcop/postgres.c:1010 +#: tcop/postgres.c:1334 tcop/postgres.c:1594 tcop/postgres.c:2000 +#: tcop/postgres.c:2373 tcop/postgres.c:2452 +#, c-format +msgid "current transaction is aborted, commands ignored until end of transaction block" +msgstr "aktuella transaktionen har avbrutits, alla kommandon ignoreras tills slutet på transaktionen" + +#: replication/walsender.c:1561 +#, c-format +msgid "cannot execute SQL commands in WAL sender for physical replication" +msgstr "kan inte köra SQL-kommandon i WAL-sändare för fysisk replikering" + +#: replication/walsender.c:1607 replication/walsender.c:1623 +#, c-format +msgid "unexpected EOF on standby connection" +msgstr "oväntat EOF från standby-anslutning" + +#: replication/walsender.c:1637 +#, c-format +msgid "unexpected standby message type \"%c\", after receiving CopyDone" +msgstr "oväntat standby-meddelandetyp \"%c\" efter att vi tagit emot CopyDone" + +#: replication/walsender.c:1675 +#, c-format +msgid "invalid standby message type \"%c\"" +msgstr "ogiltigt standby-meddelandetyp \"%c\"" + +#: replication/walsender.c:1716 +#, c-format +msgid "unexpected message type \"%c\"" +msgstr "oväntad meddelandetyp \"%c\"" + +#: replication/walsender.c:2086 +#, c-format +msgid "terminating walsender process due to replication timeout" +msgstr "avslutar walsender-process på grund av replikerings-timeout" + +#: replication/walsender.c:2172 +#, c-format +msgid "standby \"%s\" has now caught up with primary" +msgstr "standby \"%s\" har nu kommit ikapp primären" + +#: replication/walsender.c:2279 +#, c-format +msgid "number of requested standby connections exceeds max_wal_senders (currently %d)" +msgstr "antalet efterfrågade standby-anslutningar överskrider max_wal_senders (nu %d)" + +#: rewrite/rewriteDefine.c:112 rewrite/rewriteDefine.c:980 +#, c-format +msgid "rule \"%s\" for relation \"%s\" already exists" +msgstr "regel \"%s\" för relation \"%s\" existerar redan" + +#: rewrite/rewriteDefine.c:296 +#, c-format +msgid "rule actions on OLD are not implemented" +msgstr "regelhandlingar på OLD är inte implementerat" + +#: rewrite/rewriteDefine.c:297 +#, c-format +msgid "Use views or triggers instead." +msgstr "Använd vyer eller utlösare (trigger) istället." + +#: rewrite/rewriteDefine.c:301 +#, c-format +msgid "rule actions on NEW are not implemented" +msgstr "regelhandlingar på NEW är inte implementerat" + +#: rewrite/rewriteDefine.c:302 +#, c-format +msgid "Use triggers instead." +msgstr "Använd utlösare (trigger) istället." + +#: rewrite/rewriteDefine.c:315 +#, c-format +msgid "INSTEAD NOTHING rules on SELECT are not implemented" +msgstr "INSTEAD NOTHING-regler på SELECT är inte implementerat ännu" + +#: rewrite/rewriteDefine.c:316 +#, c-format +msgid "Use views instead." +msgstr "Använd vyer istället." + +#: rewrite/rewriteDefine.c:324 +#, c-format +msgid "multiple actions for rules on SELECT are not implemented" +msgstr "flera regelhandlingar på SELECT är inte implementerat" + +#: rewrite/rewriteDefine.c:334 +#, c-format +msgid "rules on SELECT must have action INSTEAD SELECT" +msgstr "regler på SELECT måste ha handlingen INSTEAD SELECT" + +#: rewrite/rewriteDefine.c:342 +#, c-format +msgid "rules on SELECT must not contain data-modifying statements in WITH" +msgstr "regler på SELECT får inte innehålla datamodifierande satser i WITH" + +#: rewrite/rewriteDefine.c:350 +#, c-format +msgid "event qualifications are not implemented for rules on SELECT" +msgstr "händelsebegränsningar är inte implementerat för regler på SELECT" + +#: rewrite/rewriteDefine.c:377 +#, c-format +msgid "\"%s\" is already a view" +msgstr "\"%s\" är redan en vy" + +#: rewrite/rewriteDefine.c:401 +#, c-format +msgid "view rule for \"%s\" must be named \"%s\"" +msgstr "vy-regel (rule) för \"%s\" måste ha namnet \"%s\"" + +#: rewrite/rewriteDefine.c:428 +#, c-format +msgid "cannot convert partitioned table \"%s\" to a view" +msgstr "kan inte konvertera partitionerad tabell \"%s\" till en vy" + +#: rewrite/rewriteDefine.c:434 +#, c-format +msgid "cannot convert partition \"%s\" to a view" +msgstr "kan inte konvertera partition \"%s\" till en vy" + +#: rewrite/rewriteDefine.c:442 +#, c-format +msgid "could not convert table \"%s\" to a view because it is not empty" +msgstr "kunde inte konvertera tabell \"%s\" till en vy då den inte är tom" + +#: rewrite/rewriteDefine.c:450 +#, c-format +msgid "could not convert table \"%s\" to a view because it has triggers" +msgstr "kunde inte konvertera tabell \"%s\" till en vy då den har utlösare" + +#: rewrite/rewriteDefine.c:452 +#, c-format +msgid "In particular, the table cannot be involved in any foreign key relationships." +msgstr "Mer specifikt, tabellen kan inte vare inblandad i främmande-nyckelberoenden." + +#: rewrite/rewriteDefine.c:457 +#, c-format +msgid "could not convert table \"%s\" to a view because it has indexes" +msgstr "kunde inte konvertera tabell \"%s\" till en vy eftersom den har index" + +#: rewrite/rewriteDefine.c:463 +#, c-format +msgid "could not convert table \"%s\" to a view because it has child tables" +msgstr "kunde inte konvertera tabell \"%s\" till en vy då den har barntabeller" + +#: rewrite/rewriteDefine.c:469 +#, c-format +msgid "could not convert table \"%s\" to a view because it has row security enabled" +msgstr "kunde inte konvertera tabell \"%s\" till en vy eftersom den har radsäkerhet påslagen" + +#: rewrite/rewriteDefine.c:475 +#, c-format +msgid "could not convert table \"%s\" to a view because it has row security policies" +msgstr "kunde inte konvertera tabell \"%s\" till en vy eftersom den har radsäkerhetspolicy" + +#: rewrite/rewriteDefine.c:502 +#, c-format +msgid "cannot have multiple RETURNING lists in a rule" +msgstr "kan inte ha flera RETURNING-listor i en regel" + +#: rewrite/rewriteDefine.c:507 +#, c-format +msgid "RETURNING lists are not supported in conditional rules" +msgstr "RETURNING-listor stöds inte i villkorade regler" + +#: rewrite/rewriteDefine.c:511 +#, c-format +msgid "RETURNING lists are not supported in non-INSTEAD rules" +msgstr "RETURNING-listor stöds inte i icke-INSTEAD-regler" + +#: rewrite/rewriteDefine.c:675 +#, c-format +msgid "SELECT rule's target list has too many entries" +msgstr "SELECT-regelns mållista har för många poster" + +#: rewrite/rewriteDefine.c:676 +#, c-format +msgid "RETURNING list has too many entries" +msgstr "RETURNING-lista har för många element" + +#: rewrite/rewriteDefine.c:703 +#, c-format +msgid "cannot convert relation containing dropped columns to view" +msgstr "kan inte konvertera en relation som har borttagna kolumner till en vy" + +#: rewrite/rewriteDefine.c:704 +#, c-format +msgid "cannot create a RETURNING list for a relation containing dropped columns" +msgstr "kan inte skapa en RETURNING-lista för relationer som innehåller borttagna kolumner" + +#: rewrite/rewriteDefine.c:710 +#, c-format +msgid "SELECT rule's target entry %d has different column name from column \"%s\"" +msgstr "SELECT-regels målpost %d har ett annat kolumnnamn än kolumnen \"%s\"" + +#: rewrite/rewriteDefine.c:712 +#, c-format +msgid "SELECT target entry is named \"%s\"." +msgstr "SELECT-målpost har namn \"%s\"." + +#: rewrite/rewriteDefine.c:721 +#, c-format +msgid "SELECT rule's target entry %d has different type from column \"%s\"" +msgstr "SELECT-regels målpot %d har en annan typ än kolumnen \"%s\"" + +#: rewrite/rewriteDefine.c:723 +#, c-format +msgid "RETURNING list's entry %d has different type from column \"%s\"" +msgstr "RETURNING-listans post %d har en annan typ än kolumnen \"%s\"" + +#: rewrite/rewriteDefine.c:726 rewrite/rewriteDefine.c:750 +#, c-format +msgid "SELECT target entry has type %s, but column has type %s." +msgstr "SELECT-målpost har typ %s men kolumnen har typ %s." + +#: rewrite/rewriteDefine.c:729 rewrite/rewriteDefine.c:754 +#, c-format +msgid "RETURNING list entry has type %s, but column has type %s." +msgstr "RETURNING-listpost har typ %s men kolumnen har typ %s." + +#: rewrite/rewriteDefine.c:745 +#, c-format +msgid "SELECT rule's target entry %d has different size from column \"%s\"" +msgstr "SELECT-regelns målpost %d har en annan storlek än kolumnen \"%s\"" + +#: rewrite/rewriteDefine.c:747 +#, c-format +msgid "RETURNING list's entry %d has different size from column \"%s\"" +msgstr "RETURNING-listpost %d har en annan storlek än kolumnen\"%s\"" + +#: rewrite/rewriteDefine.c:764 +#, c-format +msgid "SELECT rule's target list has too few entries" +msgstr "SELECT-regels mållista har för få element" + +#: rewrite/rewriteDefine.c:765 +#, c-format +msgid "RETURNING list has too few entries" +msgstr "RETURNING-lista har för få element" + +#: rewrite/rewriteDefine.c:857 rewrite/rewriteDefine.c:971 +#: rewrite/rewriteSupport.c:109 +#, c-format +msgid "rule \"%s\" for relation \"%s\" does not exist" +msgstr "regel \"%s\" för relation \"%s\" existerar inte" + +#: rewrite/rewriteDefine.c:990 +#, c-format +msgid "renaming an ON SELECT rule is not allowed" +msgstr "byta namn på en ON SELECT-regel tillåts inte" + +#: rewrite/rewriteHandler.c:540 +#, c-format +msgid "WITH query name \"%s\" appears in both a rule action and the query being rewritten" +msgstr "WITH-frågenamn \"%s\" finns både i en regelhändelse och i frågan som skrivs om" + +#: rewrite/rewriteHandler.c:600 +#, c-format +msgid "cannot have RETURNING lists in multiple rules" +msgstr "kan inte ha RETURNING-listor i multipla regler" + +#: rewrite/rewriteHandler.c:822 +#, c-format +msgid "cannot insert into column \"%s\"" +msgstr "kan inte sätta in i kolumn \"%s\"" + +#: rewrite/rewriteHandler.c:823 rewrite/rewriteHandler.c:838 +#, c-format +msgid "Column \"%s\" is an identity column defined as GENERATED ALWAYS." +msgstr "Kolumn \"%s\" är en identitetskolumn definierad som GENERATED ALWAYS." + +#: rewrite/rewriteHandler.c:825 +#, c-format +msgid "Use OVERRIDING SYSTEM VALUE to override." +msgstr "Använd OVERRIDING SYSTEM VALUE för att överskugga." + +#: rewrite/rewriteHandler.c:837 +#, c-format +msgid "column \"%s\" can only be updated to DEFAULT" +msgstr "kolumn \"%s\" kan bara uppdateras till DEFAULT" + +#: rewrite/rewriteHandler.c:999 rewrite/rewriteHandler.c:1017 +#, c-format +msgid "multiple assignments to same column \"%s\"" +msgstr "flera tilldelningar till samma kolumn \"%s\"" + +#: rewrite/rewriteHandler.c:1909 +#, c-format +msgid "infinite recursion detected in policy for relation \"%s\"" +msgstr "oändlig rekursion detekterad i policy för relation \"%s\"" + +#: rewrite/rewriteHandler.c:2226 +msgid "Junk view columns are not updatable." +msgstr "Skräpkolumner i vy är inte uppdateringsbara." + +#: rewrite/rewriteHandler.c:2231 +msgid "View columns that are not columns of their base relation are not updatable." +msgstr "Vykolumner som inte är kolumner i dess basrelation är inte uppdateringsbara." + +#: rewrite/rewriteHandler.c:2234 +msgid "View columns that refer to system columns are not updatable." +msgstr "Vykolumner som refererar till systemkolumner är inte uppdateringsbara." + +#: rewrite/rewriteHandler.c:2237 +msgid "View columns that return whole-row references are not updatable." +msgstr "Vykolumner som returnerar hel-rad-referenser är inte uppdateringsbara." + +#: rewrite/rewriteHandler.c:2295 +msgid "Views containing DISTINCT are not automatically updatable." +msgstr "Vyer som innehåller DISTINCT är inte automatiskt uppdateringsbara." + +#: rewrite/rewriteHandler.c:2298 +msgid "Views containing GROUP BY are not automatically updatable." +msgstr "Vyer som innehåller GROUP BY är inte automatiskt uppdateringsbara." + +#: rewrite/rewriteHandler.c:2301 +msgid "Views containing HAVING are not automatically updatable." +msgstr "Vyer som innehåller HAVING är inte automatiskt uppdateringsbara." + +#: rewrite/rewriteHandler.c:2304 +msgid "Views containing UNION, INTERSECT, or EXCEPT are not automatically updatable." +msgstr "Vyer som innehåller UNION, INTERSECT eller EXCEPT är inte automatiskt uppdateringsbara." + +#: rewrite/rewriteHandler.c:2307 +msgid "Views containing WITH are not automatically updatable." +msgstr "Vyer som innehåller WITH är inte automatiskt uppdateringsbara." + +#: rewrite/rewriteHandler.c:2310 +msgid "Views containing LIMIT or OFFSET are not automatically updatable." +msgstr "Vyer som innehåller LIMIT eller OFFSET är inte automatiskt uppdateringsbara." + +#: rewrite/rewriteHandler.c:2322 +msgid "Views that return aggregate functions are not automatically updatable." +msgstr "Vyer som returnerar aggregatfunktioner är inte automatiskt uppdateringsbara." + +#: rewrite/rewriteHandler.c:2325 +msgid "Views that return window functions are not automatically updatable." +msgstr "Vyer som returnerar fönsterfunktioner uppdateras inte automatiskt." + +#: rewrite/rewriteHandler.c:2328 +msgid "Views that return set-returning functions are not automatically updatable." +msgstr "Vyer som returnerar mängd-returnerande funktioner är inte automatiskt uppdateringsbara." + +#: rewrite/rewriteHandler.c:2335 rewrite/rewriteHandler.c:2339 +#: rewrite/rewriteHandler.c:2347 +msgid "Views that do not select from a single table or view are not automatically updatable." +msgstr "Vyer som inte läser från en ensam tabell eller vy är inte automatiskt uppdateringsbar." + +#: rewrite/rewriteHandler.c:2350 +msgid "Views containing TABLESAMPLE are not automatically updatable." +msgstr "Vyer som innehåller TABLESAMPLE är inte automatiskt uppdateringsbara." + +#: rewrite/rewriteHandler.c:2374 +msgid "Views that have no updatable columns are not automatically updatable." +msgstr "Vyer som inte har några uppdateringsbara kolumner är inte automatiskt uppdateringsbara." + +#: rewrite/rewriteHandler.c:2828 +#, c-format +msgid "cannot insert into column \"%s\" of view \"%s\"" +msgstr "kan inte insert:a i kolumn \"%s\" i vy \"%s\"" + +#: rewrite/rewriteHandler.c:2836 +#, c-format +msgid "cannot update column \"%s\" of view \"%s\"" +msgstr "kan inte uppdatera kolumn \"%s\" i view \"%s\"" + +#: rewrite/rewriteHandler.c:3219 +#, c-format +msgid "DO INSTEAD NOTHING rules are not supported for data-modifying statements in WITH" +msgstr "DO INSTEAD NOTHING-regler stöds inte för datamodifierande satser i WITH" + +#: rewrite/rewriteHandler.c:3233 +#, c-format +msgid "conditional DO INSTEAD rules are not supported for data-modifying statements in WITH" +msgstr "villkorliga DO INSTEAD-regler stöds inte för datamodifierande satser i WITH" + +#: rewrite/rewriteHandler.c:3237 +#, c-format +msgid "DO ALSO rules are not supported for data-modifying statements in WITH" +msgstr "DO ALSO-regler stöds inte för datamodifierande satser i WITH" + +#: rewrite/rewriteHandler.c:3242 +#, c-format +msgid "multi-statement DO INSTEAD rules are not supported for data-modifying statements in WITH" +msgstr "fler-satsiga DO INSTEAD-regler stöds inte för datamodifierande satser i WITH" + +#: rewrite/rewriteHandler.c:3461 +#, c-format +msgid "cannot perform INSERT RETURNING on relation \"%s\"" +msgstr "kan inte utföra INSERT RETURNING på relation \"%s\"" + +#: rewrite/rewriteHandler.c:3463 +#, c-format +msgid "You need an unconditional ON INSERT DO INSTEAD rule with a RETURNING clause." +msgstr "Du behöver en villkorslös ON INSERT DO INSTEAD-regel med en RETURNING-klausul." + +#: rewrite/rewriteHandler.c:3468 +#, c-format +msgid "cannot perform UPDATE RETURNING on relation \"%s\"" +msgstr "kan inte utföra UPDATE RETURNING på relation \"%s\"" + +#: rewrite/rewriteHandler.c:3470 +#, c-format +msgid "You need an unconditional ON UPDATE DO INSTEAD rule with a RETURNING clause." +msgstr "Du behöver en villkorslös ON UPDATE DO INSTEAD-regel med en RETURNING-klausul." + +#: rewrite/rewriteHandler.c:3475 +#, c-format +msgid "cannot perform DELETE RETURNING on relation \"%s\"" +msgstr "kan inte utföra DELETE RETURNING på relation \"%s\"" + +#: rewrite/rewriteHandler.c:3477 +#, c-format +msgid "You need an unconditional ON DELETE DO INSTEAD rule with a RETURNING clause." +msgstr "Du behöver en villkorslös ON DELETE DO INSTEAD-regel med en RETURNING-klausul." + +#: rewrite/rewriteHandler.c:3495 +#, c-format +msgid "INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules" +msgstr "INSERT med ON CONFLICT-klausul kan inte användas med tabell som har INSERT- eller UPDATE-regler" + +#: rewrite/rewriteHandler.c:3552 +#, c-format +msgid "WITH cannot be used in a query that is rewritten by rules into multiple queries" +msgstr "WITH kan inte användas i en fråga där regler skrivit om den till flera olika frågor" + +#: rewrite/rewriteManip.c:1003 +#, c-format +msgid "conditional utility statements are not implemented" +msgstr "villkorliga hjälpsatser är inte implementerat" + +#: rewrite/rewriteManip.c:1169 +#, c-format +msgid "WHERE CURRENT OF on a view is not implemented" +msgstr "WHERE CURRENT OF för en vy är inte implementerat" + +#: rewrite/rewriteManip.c:1503 +#, c-format +msgid "NEW variables in ON UPDATE rules cannot reference columns that are part of a multiple assignment in the subject UPDATE command" +msgstr "NEW-variabler i ON UPDATE-regler kan inte referera till kolumner som är del av en multiple uppdatering i subjektets UPDATE-kommando" + +#: scan.l:436 +msgid "unterminated /* comment" +msgstr "ej avslutad /*-kommentar" + +#: scan.l:465 +msgid "unterminated bit string literal" +msgstr "ej avslutad bitsträngslitteral" + +#: scan.l:486 +msgid "unterminated hexadecimal string literal" +msgstr "ej avslutad hexadecimal stränglitteral" + +#: scan.l:536 +#, c-format +msgid "unsafe use of string constant with Unicode escapes" +msgstr "osäker användning av strängkonstand med Unicode-escape:r" + +#: scan.l:537 +#, c-format +msgid "String constants with Unicode escapes cannot be used when standard_conforming_strings is off." +msgstr "Strängkonstanter som innehåller Unicode-escapesekvenser kan inte användas när standard_conforming_strings är av." + +#: scan.l:583 scan.l:782 +msgid "invalid Unicode escape character" +msgstr "ogiltigt Unicode-escape-tecken" + +#: scan.l:609 scan.l:617 scan.l:625 scan.l:626 scan.l:627 scan.l:1339 +#: scan.l:1366 scan.l:1370 scan.l:1408 scan.l:1412 scan.l:1434 scan.l:1444 +msgid "invalid Unicode surrogate pair" +msgstr "ogiltigt Unicode-surrogatpar" + +#: scan.l:631 +#, c-format +msgid "invalid Unicode escape" +msgstr "ogiltig Unicode-escapesekvens" + +#: scan.l:632 +#, c-format +msgid "Unicode escapes must be \\uXXXX or \\UXXXXXXXX." +msgstr "Unicode-escapesekvenser måste vara \\uXXXX eller \\UXXXXXXXX." + +#: scan.l:643 +#, c-format +msgid "unsafe use of \\' in a string literal" +msgstr "osäker användning av \\' i stränglitteral" + +#: scan.l:644 +#, c-format +msgid "Use '' to write quotes in strings. \\' is insecure in client-only encodings." +msgstr "Använd '' för att inkludera ett enkelcitattecken i en sträng. \\' är inte säkert i klient-teckenkodning." + +#: scan.l:719 +msgid "unterminated dollar-quoted string" +msgstr "icke terminerad dollarciterad sträng" + +#: scan.l:736 scan.l:762 scan.l:777 +msgid "zero-length delimited identifier" +msgstr "noll-längds avdelad identifierare" + +#: scan.l:797 syncrep_scanner.l:91 +msgid "unterminated quoted identifier" +msgstr "icke terminerad citerad identifierare" + +#: scan.l:928 +msgid "operator too long" +msgstr "operatorn är för lång" + +#. translator: %s is typically the translation of "syntax error" +#: scan.l:1084 +#, c-format +msgid "%s at end of input" +msgstr "%s vid slutet av indatan" + +#. translator: first %s is typically the translation of "syntax error" +#: scan.l:1092 +#, c-format +msgid "%s at or near \"%s\"" +msgstr "%s vid eller nära \"%s\"" + +#: scan.l:1253 scan.l:1285 +msgid "Unicode escape values cannot be used for code point values above 007F when the server encoding is not UTF8" +msgstr "Unicode-escape-värden kan inte användas för kodpunkter över 007F när serverkodningen inte är UTF8" + +#: scan.l:1281 scan.l:1426 +msgid "invalid Unicode escape value" +msgstr "ogiltigt Unicode-escapevärde" + +#: scan.l:1490 +#, c-format +msgid "nonstandard use of \\' in a string literal" +msgstr "ickestandard användning av \\' i stränglitteral" + +#: scan.l:1491 +#, c-format +msgid "Use '' to write quotes in strings, or use the escape string syntax (E'...')." +msgstr "Använd '' för att skriva citattecken i strängar eller använd escape-strängsyntac (E'...')." + +#: scan.l:1500 +#, c-format +msgid "nonstandard use of \\\\ in a string literal" +msgstr "ickestandard användning av \\\\ i strängslitteral" + +#: scan.l:1501 +#, c-format +msgid "Use the escape string syntax for backslashes, e.g., E'\\\\'." +msgstr "Använd escape-strängsyntax för bakstreck, dvs. E'\\\\'." + +#: scan.l:1515 +#, c-format +msgid "nonstandard use of escape in a string literal" +msgstr "ickestandard användning av escape i stränglitteral" + +#: scan.l:1516 +#, c-format +msgid "Use the escape string syntax for escapes, e.g., E'\\r\\n'." +msgstr "Använd escape-strängsyntax, dvs E'\\r\\n'." + +#: snowball/dict_snowball.c:177 +#, c-format +msgid "no Snowball stemmer available for language \"%s\" and encoding \"%s\"" +msgstr "det finns ingen Snowball-stemmer för språk \"%s\" och kodning \"%s\"" + +#: snowball/dict_snowball.c:200 tsearch/dict_ispell.c:74 +#: tsearch/dict_simple.c:49 +#, c-format +msgid "multiple StopWords parameters" +msgstr "multipla StoppOrd-parametrar" + +#: snowball/dict_snowball.c:209 +#, c-format +msgid "multiple Language parameters" +msgstr "multipla parametrar \"Language\"" + +#: snowball/dict_snowball.c:216 +#, c-format +msgid "unrecognized Snowball parameter: \"%s\"" +msgstr "okänd Snowball-parameter: \"%s\"" + +#: snowball/dict_snowball.c:224 +#, c-format +msgid "missing Language parameter" +msgstr "saknar parameter \"Language\"" + +#: statistics/dependencies.c:534 +#, c-format +msgid "invalid zero-length item array in MVDependencies" +msgstr "ogiltig array med storlek noll i MVDependencies" + +#: statistics/dependencies.c:672 statistics/dependencies.c:725 +#: statistics/mvdistinct.c:341 statistics/mvdistinct.c:394 +#: utils/adt/pseudotypes.c:94 utils/adt/pseudotypes.c:122 +#: utils/adt/pseudotypes.c:147 utils/adt/pseudotypes.c:171 +#: utils/adt/pseudotypes.c:282 utils/adt/pseudotypes.c:307 +#: utils/adt/pseudotypes.c:335 utils/adt/pseudotypes.c:363 +#: utils/adt/pseudotypes.c:393 +#, c-format +msgid "cannot accept a value of type %s" +msgstr "kan inte acceptera ett värde av type %s" + +#: statistics/extended_stats.c:104 +#, c-format +msgid "statistics object \"%s.%s\" could not be computed for relation \"%s.%s\"" +msgstr "statistikobjekt \"%s.%s\" kunde inte beräknas för relation \"%s.%s\"" + +#: statistics/mvdistinct.c:262 +#, c-format +msgid "invalid ndistinct magic %08x (expected %08x)" +msgstr "ogiltig magiskt värde %08x för ndistinct (förväntade %08x)" + +#: statistics/mvdistinct.c:267 +#, c-format +msgid "invalid ndistinct type %d (expected %d)" +msgstr "ogiltig typ %d för ndistinct (förväntade %d)" + +#: statistics/mvdistinct.c:272 +#, c-format +msgid "invalid zero-length item array in MVNDistinct" +msgstr "ogiltig array med storlek noll i MVNDistinct" + +#: statistics/mvdistinct.c:281 +#, c-format +msgid "invalid MVNDistinct size %zd (expected at least %zd)" +msgstr "ogiltig MVNDistinct-storlek %zd (förväntade minst %zd)" + +#: storage/buffer/bufmgr.c:544 storage/buffer/bufmgr.c:657 +#, c-format +msgid "cannot access temporary tables of other sessions" +msgstr "får inte röra temporära tabeller som tillhör andra sessioner" + +#: storage/buffer/bufmgr.c:807 +#, c-format +msgid "unexpected data beyond EOF in block %u of relation %s" +msgstr "oväntad data efter EOF i block %u för relation %s" + +#: storage/buffer/bufmgr.c:809 +#, c-format +msgid "This has been seen to occur with buggy kernels; consider updating your system." +msgstr "Detta beteende har observerats med buggiga kärnor; fundera på att uppdatera ditt system." + +#: storage/buffer/bufmgr.c:907 +#, c-format +msgid "invalid page in block %u of relation %s; zeroing out page" +msgstr "felaktig sida i block %u för relation %s; nollställer sidan" + +#: storage/buffer/bufmgr.c:4013 +#, c-format +msgid "could not write block %u of %s" +msgstr "kunde inte skriva block %u av %s" + +#: storage/buffer/bufmgr.c:4015 +#, c-format +msgid "Multiple failures --- write error might be permanent." +msgstr "Multipla fel --- skrivfelet kan vara permanent." + +#: storage/buffer/bufmgr.c:4036 storage/buffer/bufmgr.c:4055 +#, c-format +msgid "writing block %u of relation %s" +msgstr "skriver block %u i relation %s" + +#: storage/buffer/bufmgr.c:4358 +#, c-format +msgid "snapshot too old" +msgstr "snapshot för gammal" + +#: storage/buffer/localbuf.c:199 +#, c-format +msgid "no empty local buffer available" +msgstr "ingen tom lokal buffer tillgänglig" + +#: storage/buffer/localbuf.c:427 +#, c-format +msgid "cannot access temporary tables during a parallel operation" +msgstr "kan inte komma åt temporära tabeller under en parallell operation" + +#: storage/file/buffile.c:317 +#, c-format +msgid "could not open BufFile \"%s\"" +msgstr "kunde inte öppna BufFile \"%s\"" + +#: storage/file/fd.c:451 storage/file/fd.c:523 storage/file/fd.c:559 +#, c-format +msgid "could not flush dirty data: %m" +msgstr "kunde inte flush:a smutsig data: %m" + +#: storage/file/fd.c:481 +#, c-format +msgid "could not determine dirty data size: %m" +msgstr "kunde inte lista ut storlek på smutsig data: %m" + +#: storage/file/fd.c:533 +#, c-format +msgid "could not munmap() while flushing data: %m" +msgstr "kunde inte göra munmap() vid flush:ning av data: %m" + +#: storage/file/fd.c:734 +#, c-format +msgid "could not link file \"%s\" to \"%s\": %m" +msgstr "kunde inte länka fil \"%s\" till \"%s\": %m" + +#: storage/file/fd.c:828 +#, c-format +msgid "getrlimit failed: %m" +msgstr "getrlimit misslyckades: %m" + +#: storage/file/fd.c:918 +#, c-format +msgid "insufficient file descriptors available to start server process" +msgstr "otillräckligt antal fildeskriptorer tillgängligt för att starta serverprocessen" + +#: storage/file/fd.c:919 +#, c-format +msgid "System allows %d, we need at least %d." +msgstr "Systemet tillåter %d, vi behöver minst %d." + +#: storage/file/fd.c:970 storage/file/fd.c:2371 storage/file/fd.c:2473 +#: storage/file/fd.c:2625 +#, c-format +msgid "out of file descriptors: %m; release and retry" +msgstr "slut på fildeskriptorer: %m; frigör och försök igen" + +#: storage/file/fd.c:1312 +#, c-format +msgid "temporary file: path \"%s\", size %lu" +msgstr "temporär fil: sökväg \"%s\", storlek %lu" + +#: storage/file/fd.c:1444 +#, c-format +msgid "cannot create temporary directory \"%s\": %m" +msgstr "kunde inte skapa temporär katalog \"%s\": %m" + +#: storage/file/fd.c:1451 +#, c-format +msgid "cannot create temporary subdirectory \"%s\": %m" +msgstr "kunde inte skapa temporär underkatalog \"%s\": %m" + +#: storage/file/fd.c:1644 +#, c-format +msgid "could not create temporary file \"%s\": %m" +msgstr "kan inte skapa temporär fil \"%s\": %m" + +#: storage/file/fd.c:1679 +#, c-format +msgid "could not open temporary file \"%s\": %m" +msgstr "kunde inte öppna temporär fil \"%s\": %m" + +# unlink refererar till unix-funktionen unlink() så den översätter vi inte +#: storage/file/fd.c:1720 +#, c-format +msgid "cannot unlink temporary file \"%s\": %m" +msgstr "kunde inte unlink:a temporär fil \"%s\": %m" + +#: storage/file/fd.c:2002 +#, c-format +msgid "temporary file size exceeds temp_file_limit (%dkB)" +msgstr "storlek på temporär fil överskrider temp_file_limit (%dkB)" + +#: storage/file/fd.c:2347 storage/file/fd.c:2406 +#, c-format +msgid "exceeded maxAllocatedDescs (%d) while trying to open file \"%s\"" +msgstr "överskred maxAllocatedDescs (%d) vid försök att öppna fil \"%s\"" + +#: storage/file/fd.c:2446 +#, c-format +msgid "exceeded maxAllocatedDescs (%d) while trying to execute command \"%s\"" +msgstr "överskred maxAllocatedDescs (%d) vid försök att köra kommando \"%s\"" + +#: storage/file/fd.c:2601 +#, c-format +msgid "exceeded maxAllocatedDescs (%d) while trying to open directory \"%s\"" +msgstr "överskred maxAllocatedDescs (%d) vid försök att öppna katalog \"%s\"" + +#: storage/file/fd.c:2692 +#, c-format +msgid "could not read directory \"%s\": %m" +msgstr "kunde inte läsa katalog \"%s\": %m" + +#: storage/file/fd.c:3124 +#, c-format +msgid "unexpected file found in temporary-files directory: \"%s\"" +msgstr "oväntad fil hittades i katalogen för temporära filer: \"%s\"" + +#: storage/file/fd.c:3443 +#, c-format +msgid "could not rmdir directory \"%s\": %m" +msgstr "kunde inte göra rmdir på katalogen \"%s\": %m" + +#: storage/file/sharedfileset.c:93 +#, c-format +msgid "could not attach to a SharedFileSet that is already destroyed" +msgstr "kunde inte koppla till en SharedFileSet som redan tagits bort" + +#: storage/ipc/dsm.c:351 +#, c-format +msgid "dynamic shared memory control segment is corrupt" +msgstr "dynamiskt delat minnes kontrollsegment är korrupt" + +#: storage/ipc/dsm.c:398 +#, c-format +msgid "dynamic shared memory is disabled" +msgstr "dynamiskt delat minne är avstängt" + +#: storage/ipc/dsm.c:399 +#, c-format +msgid "Set dynamic_shared_memory_type to a value other than \"none\"." +msgstr "Sätt dynamic_shared_memory_type till ett annat värde än \"none\"." + +#: storage/ipc/dsm.c:419 +#, c-format +msgid "dynamic shared memory control segment is not valid" +msgstr "dynamiskt delat minnes kontrollsegment är inte giltigt" + +#: storage/ipc/dsm.c:515 +#, c-format +msgid "too many dynamic shared memory segments" +msgstr "för många dynamiska delade minnessegment" + +#: storage/ipc/dsm_impl.c:263 storage/ipc/dsm_impl.c:364 +#: storage/ipc/dsm_impl.c:581 storage/ipc/dsm_impl.c:696 +#: storage/ipc/dsm_impl.c:867 storage/ipc/dsm_impl.c:1011 +#, c-format +msgid "could not unmap shared memory segment \"%s\": %m" +msgstr "kunde inte avmappa delat minnessegment \"%s\": %m" + +#: storage/ipc/dsm_impl.c:273 storage/ipc/dsm_impl.c:591 +#: storage/ipc/dsm_impl.c:706 storage/ipc/dsm_impl.c:877 +#, c-format +msgid "could not remove shared memory segment \"%s\": %m" +msgstr "kunde inte ta bort delat minnessegment \"%s\": %m" + +#: storage/ipc/dsm_impl.c:294 storage/ipc/dsm_impl.c:777 +#: storage/ipc/dsm_impl.c:891 +#, c-format +msgid "could not open shared memory segment \"%s\": %m" +msgstr "kunde inte öppna delat minnessegment \"%s\": %m" + +#: storage/ipc/dsm_impl.c:318 storage/ipc/dsm_impl.c:607 +#: storage/ipc/dsm_impl.c:822 storage/ipc/dsm_impl.c:915 +#, c-format +msgid "could not stat shared memory segment \"%s\": %m" +msgstr "kunde inte göra stat() på delat minnessegment \"%s\": %m" + +#: storage/ipc/dsm_impl.c:338 storage/ipc/dsm_impl.c:934 +#: storage/ipc/dsm_impl.c:984 +#, c-format +msgid "could not resize shared memory segment \"%s\" to %zu bytes: %m" +msgstr "kunde inte ändra storlek på delat minnessegment \"%s\" till %zu byte: %m" + +#: storage/ipc/dsm_impl.c:388 storage/ipc/dsm_impl.c:628 +#: storage/ipc/dsm_impl.c:798 storage/ipc/dsm_impl.c:1035 +#, c-format +msgid "could not map shared memory segment \"%s\": %m" +msgstr "kunde inte mappa delat minnessegment \"%s\": %m" + +#: storage/ipc/dsm_impl.c:563 +#, c-format +msgid "could not get shared memory segment: %m" +msgstr "kunde inte hämta delat minnessegment: %m" + +#: storage/ipc/dsm_impl.c:762 +#, c-format +msgid "could not create shared memory segment \"%s\": %m" +msgstr "kunde inte skapa delat minnessegment \"%s\": %m" + +#: storage/ipc/dsm_impl.c:1077 storage/ipc/dsm_impl.c:1125 +#, c-format +msgid "could not duplicate handle for \"%s\": %m" +msgstr "kunde inte duplicera handle för \"%s\": %m" + +#: storage/ipc/latch.c:829 +#, c-format +msgid "epoll_ctl() failed: %m" +msgstr "epoll_ctl() misslyckades: %m" + +#: storage/ipc/latch.c:1060 +#, c-format +msgid "epoll_wait() failed: %m" +msgstr "epoll_wait() misslyckades: %m" + +#: storage/ipc/latch.c:1182 +#, c-format +msgid "poll() failed: %m" +msgstr "poll() misslyckades: %m" + +#: storage/ipc/shm_toc.c:118 storage/ipc/shm_toc.c:200 storage/lmgr/lock.c:883 +#: storage/lmgr/lock.c:917 storage/lmgr/lock.c:2679 storage/lmgr/lock.c:4004 +#: storage/lmgr/lock.c:4069 storage/lmgr/lock.c:4361 +#: storage/lmgr/predicate.c:2355 storage/lmgr/predicate.c:2370 +#: storage/lmgr/predicate.c:3762 storage/lmgr/predicate.c:4905 +#: utils/hash/dynahash.c:1065 +#, c-format +msgid "out of shared memory" +msgstr "slut på delat minne" + +#: storage/ipc/shmem.c:165 storage/ipc/shmem.c:246 +#, c-format +msgid "out of shared memory (%zu bytes requested)" +msgstr "slut på delat minne (%zu byte efterfrågat)" + +#: storage/ipc/shmem.c:421 +#, c-format +msgid "could not create ShmemIndex entry for data structure \"%s\"" +msgstr "kunde inte skapa ShmemIndex-post för datastrukturen \"%s\"" + +#: storage/ipc/shmem.c:436 +#, c-format +msgid "ShmemIndex entry size is wrong for data structure \"%s\": expected %zu, actual %zu" +msgstr "ShmemIndex-poststorlek är fel för datastruktur \"%s\": förväntade %zu var %zu" + +#: storage/ipc/shmem.c:453 +#, c-format +msgid "not enough shared memory for data structure \"%s\" (%zu bytes requested)" +msgstr "otillräckligt delat minne för datastruktur \"%s\" (efterfrågade %zu byte)" + +#: storage/ipc/shmem.c:484 storage/ipc/shmem.c:503 +#, c-format +msgid "requested shared memory size overflows size_t" +msgstr "efterfrågad delat minnesstorlek överskrider size_t" + +#: storage/ipc/standby.c:531 tcop/postgres.c:3027 +#, c-format +msgid "canceling statement due to conflict with recovery" +msgstr "avbryter sats på grund av konflikt med återställning" + +#: storage/ipc/standby.c:532 tcop/postgres.c:2306 +#, c-format +msgid "User transaction caused buffer deadlock with recovery." +msgstr "Användartransaktion orsakade deadlock för buffer vid återställning." + +#: storage/large_object/inv_api.c:190 +#, c-format +msgid "pg_largeobject entry for OID %u, page %d has invalid data field size %d" +msgstr "pg_largeobject-post för OID %u, sida %d har ogiltig datafältstorlek %d" + +#: storage/large_object/inv_api.c:271 +#, c-format +msgid "invalid flags for opening a large object: %d" +msgstr "ogiltiga flaggor för att öppna stort objekt: %d" + +#: storage/large_object/inv_api.c:461 +#, c-format +msgid "invalid whence setting: %d" +msgstr "ogiltig whence-inställning: %d" + +#: storage/large_object/inv_api.c:633 +#, c-format +msgid "invalid large object write request size: %d" +msgstr "ogiltig storlek för stort objects skrivningbegäran: %d" + +#: storage/lmgr/deadlock.c:1109 +#, c-format +msgid "Process %d waits for %s on %s; blocked by process %d." +msgstr "Process %d väntar på %s för %s; blockerad av process %d." + +#: storage/lmgr/deadlock.c:1128 +#, c-format +msgid "Process %d: %s" +msgstr "Process %d: %s" + +#: storage/lmgr/deadlock.c:1137 +#, c-format +msgid "deadlock detected" +msgstr "deadlock upptäckt" + +#: storage/lmgr/deadlock.c:1140 +#, c-format +msgid "See server log for query details." +msgstr "Se server-logg för frågedetaljer." + +#: storage/lmgr/lmgr.c:745 +#, c-format +msgid "while updating tuple (%u,%u) in relation \"%s\"" +msgstr "vid uppdatering av tupel (%u,%u) i relation \"%s\"" + +#: storage/lmgr/lmgr.c:748 +#, c-format +msgid "while deleting tuple (%u,%u) in relation \"%s\"" +msgstr "vid borttagning av tupel (%u,%u) i relation \"%s\"" + +#: storage/lmgr/lmgr.c:751 +#, c-format +msgid "while locking tuple (%u,%u) in relation \"%s\"" +msgstr "vid låsning av tupel (%u,%u) i relation \"%s\"" + +#: storage/lmgr/lmgr.c:754 +#, c-format +msgid "while locking updated version (%u,%u) of tuple in relation \"%s\"" +msgstr "vid låsning av uppdaterad version (%u,%u) av tupel i relation \"%s\"" + +#: storage/lmgr/lmgr.c:757 +#, c-format +msgid "while inserting index tuple (%u,%u) in relation \"%s\"" +msgstr "vid insättning av indextupel (%u,%u) i relation \"%s\"" + +#: storage/lmgr/lmgr.c:760 +#, c-format +msgid "while checking uniqueness of tuple (%u,%u) in relation \"%s\"" +msgstr "vid kontroll av unikhet av tupel (%u,%u) i relation \"%s\"" + +#: storage/lmgr/lmgr.c:763 +#, c-format +msgid "while rechecking updated tuple (%u,%u) in relation \"%s\"" +msgstr "vid återkontroll av uppdaterad tupel (%u,%u) i relation \"%s\"" + +#: storage/lmgr/lmgr.c:766 +#, c-format +msgid "while checking exclusion constraint on tuple (%u,%u) in relation \"%s\"" +msgstr "vid kontroll av uteslutningsvillkor av tupel (%u,%u) i relation \"%s\"" + +#: storage/lmgr/lmgr.c:986 +#, c-format +msgid "relation %u of database %u" +msgstr "relation %u i databasen %u" + +#: storage/lmgr/lmgr.c:992 +#, c-format +msgid "extension of relation %u of database %u" +msgstr "utökning av relation %u i databas %u" + +#: storage/lmgr/lmgr.c:998 +#, c-format +msgid "page %u of relation %u of database %u" +msgstr "sida %u i relation %u i databas %u" + +#: storage/lmgr/lmgr.c:1005 +#, c-format +msgid "tuple (%u,%u) of relation %u of database %u" +msgstr "tuple (%u,%u) i relation %u i databas %u" + +#: storage/lmgr/lmgr.c:1013 +#, c-format +msgid "transaction %u" +msgstr "transaktion %u" + +#: storage/lmgr/lmgr.c:1018 +#, c-format +msgid "virtual transaction %d/%u" +msgstr "vituell transaktion %d/%u" + +#: storage/lmgr/lmgr.c:1024 +#, c-format +msgid "speculative token %u of transaction %u" +msgstr "spekulativ token %u för transaktion %u" + +#: storage/lmgr/lmgr.c:1030 +#, c-format +msgid "object %u of class %u of database %u" +msgstr "objekt %u av klass %u i databas %u" + +#: storage/lmgr/lmgr.c:1038 +#, c-format +msgid "user lock [%u,%u,%u]" +msgstr "användarlås [%u,%u,%u]" + +#: storage/lmgr/lmgr.c:1045 +#, c-format +msgid "advisory lock [%u,%u,%u,%u]" +msgstr "rådgivande lås [%u,%u,%u,%u]" + +#: storage/lmgr/lmgr.c:1053 +#, c-format +msgid "unrecognized locktag type %d" +msgstr "okänd låsetikettyp %d" + +#: storage/lmgr/lock.c:732 +#, c-format +msgid "cannot acquire lock mode %s on database objects while recovery is in progress" +msgstr "kan inte ta låsläge %s på databasobjekt när återställning pågår" + +#: storage/lmgr/lock.c:734 +#, c-format +msgid "Only RowExclusiveLock or less can be acquired on database objects during recovery." +msgstr "Bara RowExclusiveLock eller lägre kan tas på databasobjekt under återställning." + +#: storage/lmgr/lock.c:884 storage/lmgr/lock.c:918 storage/lmgr/lock.c:2680 +#: storage/lmgr/lock.c:4005 storage/lmgr/lock.c:4070 storage/lmgr/lock.c:4362 +#, c-format +msgid "You might need to increase max_locks_per_transaction." +msgstr "Du kan behöva öka parametern max_locks_per_transaction." + +#: storage/lmgr/lock.c:3121 storage/lmgr/lock.c:3237 +#, c-format +msgid "cannot PREPARE while holding both session-level and transaction-level locks on the same object" +msgstr "kan inte göra PREPARE samtidigt som vi håller lås på sessionsnivå och transaktionsnivå för samma objekt" + +#: storage/lmgr/predicate.c:682 +#, c-format +msgid "not enough elements in RWConflictPool to record a read/write conflict" +msgstr "ej tillräckligt med element i RWConflictPool för att spara ner en läs/skriv-konflikt" + +#: storage/lmgr/predicate.c:683 storage/lmgr/predicate.c:711 +#, c-format +msgid "You might need to run fewer transactions at a time or increase max_connections." +msgstr "Du kan behöva köra färre samtidiga transaktioner eller öka max_connections." + +#: storage/lmgr/predicate.c:710 +#, c-format +msgid "not enough elements in RWConflictPool to record a potential read/write conflict" +msgstr "ej tillräckligt med element i RWConflictPool för att spara ner en potentiell läs/skriv-konflikt" + +#: storage/lmgr/predicate.c:1515 +#, c-format +msgid "deferrable snapshot was unsafe; trying a new one" +msgstr "deferrable-snapshot var osäklert; försöker med ett nytt" + +#: storage/lmgr/predicate.c:1604 +#, c-format +msgid "\"default_transaction_isolation\" is set to \"serializable\"." +msgstr "\"default_transaction_isolation\" är satt till \"serializable\"." + +#: storage/lmgr/predicate.c:1605 +#, c-format +msgid "You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default." +msgstr "Du kan använda \"SET default_transaction_isolation = 'repeatable read'\" för att ändra standardvärdet." + +#: storage/lmgr/predicate.c:1645 +#, c-format +msgid "a snapshot-importing transaction must not be READ ONLY DEFERRABLE" +msgstr "en snapshot-importerande transaktion får inte vara READ ONLY DEFERRABLE" + +#: storage/lmgr/predicate.c:1725 utils/time/snapmgr.c:621 +#: utils/time/snapmgr.c:627 +#, c-format +msgid "could not import the requested snapshot" +msgstr "kunde inte importera efterfrågat snapshot" + +#: storage/lmgr/predicate.c:1726 utils/time/snapmgr.c:628 +#, c-format +msgid "The source process with PID %d is not running anymore." +msgstr "Källprocessen med PID %d kör inte längre." + +#: storage/lmgr/predicate.c:2356 storage/lmgr/predicate.c:2371 +#: storage/lmgr/predicate.c:3763 +#, c-format +msgid "You might need to increase max_pred_locks_per_transaction." +msgstr "Du kan behöva öka parametern max_pred_locks_per_transaction." + +#: storage/lmgr/predicate.c:3917 storage/lmgr/predicate.c:4006 +#: storage/lmgr/predicate.c:4014 storage/lmgr/predicate.c:4053 +#: storage/lmgr/predicate.c:4292 storage/lmgr/predicate.c:4629 +#: storage/lmgr/predicate.c:4641 storage/lmgr/predicate.c:4683 +#: storage/lmgr/predicate.c:4721 +#, c-format +msgid "could not serialize access due to read/write dependencies among transactions" +msgstr "kunde inte serialisera åtkomst på grund av läs/skriv-beroenden bland transaktionerna" + +#: storage/lmgr/predicate.c:3919 storage/lmgr/predicate.c:4008 +#: storage/lmgr/predicate.c:4016 storage/lmgr/predicate.c:4055 +#: storage/lmgr/predicate.c:4294 storage/lmgr/predicate.c:4631 +#: storage/lmgr/predicate.c:4643 storage/lmgr/predicate.c:4685 +#: storage/lmgr/predicate.c:4723 +#, c-format +msgid "The transaction might succeed if retried." +msgstr "Transaktionen kan lyckas om den körs igen." + +#: storage/lmgr/proc.c:1309 +#, c-format +msgid "Process %d waits for %s on %s." +msgstr "Process %d väntar på %s för %s." + +#: storage/lmgr/proc.c:1320 +#, c-format +msgid "sending cancel to blocking autovacuum PID %d" +msgstr "skickar avbryt till blockerande autovacuum-PID %d" + +#: storage/lmgr/proc.c:1338 utils/adt/misc.c:270 +#, c-format +msgid "could not send signal to process %d: %m" +msgstr "kunde inte skicka signal till process %d: %m" + +#: storage/lmgr/proc.c:1440 +#, c-format +msgid "process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms" +msgstr "process %d undvek deadlock på %s för %s genom att kasta om köordningen efter %ld.%03d ms" + +#: storage/lmgr/proc.c:1455 +#, c-format +msgid "process %d detected deadlock while waiting for %s on %s after %ld.%03d ms" +msgstr "process %d upptäckte deadlock medan den väntade på %s för %s efter %ld.%03d ms" + +#: storage/lmgr/proc.c:1464 +#, c-format +msgid "process %d still waiting for %s on %s after %ld.%03d ms" +msgstr "process %d väntar fortfarande på %s för %s efter %ld.%03d ms" + +#: storage/lmgr/proc.c:1471 +#, c-format +msgid "process %d acquired %s on %s after %ld.%03d ms" +msgstr "process %d fick %s på %s efter %ld.%03d ms" + +#: storage/lmgr/proc.c:1487 +#, c-format +msgid "process %d failed to acquire %s on %s after %ld.%03d ms" +msgstr "process %d misslyckades att ta %s på %s efter %ld.%03d ms" + +#: storage/page/bufpage.c:151 +#, c-format +msgid "page verification failed, calculated checksum %u but expected %u" +msgstr "sidverifiering misslyckades, beräknade kontrollsumma %u men förväntade %u" + +#: storage/page/bufpage.c:213 storage/page/bufpage.c:507 +#: storage/page/bufpage.c:744 storage/page/bufpage.c:877 +#: storage/page/bufpage.c:973 storage/page/bufpage.c:1083 +#, c-format +msgid "corrupted page pointers: lower = %u, upper = %u, special = %u" +msgstr "korrupta sidpekare: lägre = %u, övre = %u, special = %u" + +#: storage/page/bufpage.c:529 +#, c-format +msgid "corrupted item pointer: %u" +msgstr "korrupt post-pekare: %u" + +#: storage/page/bufpage.c:556 storage/page/bufpage.c:928 +#, c-format +msgid "corrupted item lengths: total %u, available space %u" +msgstr "trasiga postlängder: totalt %u, tillgänglig plats %u" + +#: storage/page/bufpage.c:763 storage/page/bufpage.c:989 +#: storage/page/bufpage.c:1099 +#, c-format +msgid "corrupted item pointer: offset = %u, size = %u" +msgstr "trasig postpekare: offset = %u, storlek = %u" + +#: storage/page/bufpage.c:901 +#, c-format +msgid "corrupted item pointer: offset = %u, length = %u" +msgstr "trasig postpekare: offset = %u, längd = %u" + +#: storage/smgr/md.c:447 storage/smgr/md.c:973 +#, c-format +msgid "could not truncate file \"%s\": %m" +msgstr "kunde inte trunkera fil \"%s\": %m" + +#: storage/smgr/md.c:514 +#, c-format +msgid "cannot extend file \"%s\" beyond %u blocks" +msgstr "kan inte utöka fil \"%s\" utöver %u block" + +#: storage/smgr/md.c:536 storage/smgr/md.c:753 storage/smgr/md.c:829 +#, c-format +msgid "could not seek to block %u in file \"%s\": %m" +msgstr "kunde inte söka (seek) till block %u i fil \"%s\": %m" + +#: storage/smgr/md.c:544 +#, c-format +msgid "could not extend file \"%s\": %m" +msgstr "kunde inte utöka fil \"%s\": %m" + +#: storage/smgr/md.c:546 storage/smgr/md.c:553 storage/smgr/md.c:856 +#, c-format +msgid "Check free disk space." +msgstr "Kontrollera ledigt diskutrymme." + +#: storage/smgr/md.c:550 +#, c-format +msgid "could not extend file \"%s\": wrote only %d of %d bytes at block %u" +msgstr "kunde inte utöka fil \"%s\": skrev bara %d av %d byte vid block %u" + +#: storage/smgr/md.c:771 +#, c-format +msgid "could not read block %u in file \"%s\": %m" +msgstr "kunde inte läsa block %u i fil \"%s\": %m" + +#: storage/smgr/md.c:787 +#, c-format +msgid "could not read block %u in file \"%s\": read only %d of %d bytes" +msgstr "kunde inte läsa block %u i fil \"%s\": läste bara %d av %d byte" + +#: storage/smgr/md.c:847 +#, c-format +msgid "could not write block %u in file \"%s\": %m" +msgstr "kunde inte skriva block %u i fil \"%s\": %m" + +#: storage/smgr/md.c:852 +#, c-format +msgid "could not write block %u in file \"%s\": wrote only %d of %d bytes" +msgstr "kunde inte skriva block %u i fil \"%s\": skrev bara %d av %d byte" + +#: storage/smgr/md.c:944 +#, c-format +msgid "could not truncate file \"%s\" to %u blocks: it's only %u blocks now" +msgstr "kunde inte trunkera fil \"%s\" till %u block: den är bara %u block nu" + +#: storage/smgr/md.c:999 +#, c-format +msgid "could not truncate file \"%s\" to %u blocks: %m" +msgstr "kunde inte trunkera fil \"%s\" till %u block: %m" + +#: storage/smgr/md.c:1281 +#, c-format +msgid "could not fsync file \"%s\" but retrying: %m" +msgstr "kunde inte fsync:a fil \"%s\" men försöker igen: %m" + +#: storage/smgr/md.c:1444 +#, c-format +msgid "could not forward fsync request because request queue is full" +msgstr "kunde inte skicka vidare fsync-förfrågan då kön för förfrågningar är full" + +#: storage/smgr/md.c:1913 +#, c-format +msgid "could not open file \"%s\" (target block %u): previous segment is only %u blocks" +msgstr "kunde inte öppna fil \"%s\" (målblock %u): föregående segment är bara %u block" + +#: storage/smgr/md.c:1927 +#, c-format +msgid "could not open file \"%s\" (target block %u): %m" +msgstr "kunde inte öppna fil \"%s\" (målblock %u): %m" + +#: tcop/fastpath.c:109 tcop/fastpath.c:461 tcop/fastpath.c:591 +#, c-format +msgid "invalid argument size %d in function call message" +msgstr "ogiltig argumentstorlek %d i funktionsaropsmeddelande" + +#: tcop/fastpath.c:307 +#, c-format +msgid "fastpath function call: \"%s\" (OID %u)" +msgstr "fastpath funktionsanrop: \"%s\" (OID %u)" + +#: tcop/fastpath.c:389 tcop/postgres.c:1195 tcop/postgres.c:1459 +#: tcop/postgres.c:1841 tcop/postgres.c:2062 +#, c-format +msgid "duration: %s ms" +msgstr "varaktighet %s ms" + +#: tcop/fastpath.c:393 +#, c-format +msgid "duration: %s ms fastpath function call: \"%s\" (OID %u)" +msgstr "varaktighet: %s ms fastpath funktionsanrop: \"%s\" (OID %u)" + +#: tcop/fastpath.c:429 tcop/fastpath.c:556 +#, c-format +msgid "function call message contains %d arguments but function requires %d" +msgstr "meddelande för funktionsanrop innehåller %d argument men funktionen kräver %d" + +#: tcop/fastpath.c:437 +#, c-format +msgid "function call message contains %d argument formats but %d arguments" +msgstr "meddelande för funktioonsanrop innehåller %d argumentformat men %d argument" + +#: tcop/fastpath.c:524 tcop/fastpath.c:607 +#, c-format +msgid "incorrect binary data format in function argument %d" +msgstr "inkorrekt binärt dataformat i funktionsargument %d" + +#: tcop/postgres.c:359 tcop/postgres.c:395 tcop/postgres.c:422 +#, c-format +msgid "unexpected EOF on client connection" +msgstr "oväntat EOF från klienten" + +#: tcop/postgres.c:445 tcop/postgres.c:457 tcop/postgres.c:468 +#: tcop/postgres.c:480 tcop/postgres.c:4379 +#, c-format +msgid "invalid frontend message type %d" +msgstr "ogiltig frontend-meddelandetyp %d" + +#: tcop/postgres.c:950 +#, c-format +msgid "statement: %s" +msgstr "sats: %s" + +#: tcop/postgres.c:1200 +#, c-format +msgid "duration: %s ms statement: %s" +msgstr "varaktighet: %s ms sats: %s" + +#: tcop/postgres.c:1250 +#, c-format +msgid "parse %s: %s" +msgstr "parse %s: %s" + +#: tcop/postgres.c:1307 +#, c-format +msgid "cannot insert multiple commands into a prepared statement" +msgstr "kan inte stoppa in multipla kommandon i en förberedd sats" + +#: tcop/postgres.c:1464 +#, c-format +msgid "duration: %s ms parse %s: %s" +msgstr "varaktighet: %s ms parse %s: %s" + +#: tcop/postgres.c:1509 +#, c-format +msgid "bind %s to %s" +msgstr "bind %s till %s" + +#: tcop/postgres.c:1528 tcop/postgres.c:2354 +#, c-format +msgid "unnamed prepared statement does not exist" +msgstr "förberedd sats utan namn existerar inte" + +#: tcop/postgres.c:1571 +#, c-format +msgid "bind message has %d parameter formats but %d parameters" +msgstr "bind-meddelande har %d parameterformat men %d parametrar" + +#: tcop/postgres.c:1577 +#, c-format +msgid "bind message supplies %d parameters, but prepared statement \"%s\" requires %d" +msgstr "bind-meddelande ger %d parametrar men förberedd sats \"%s\" kräver %d" + +#: tcop/postgres.c:1748 +#, c-format +msgid "incorrect binary data format in bind parameter %d" +msgstr "inkorrekt binärdataformat i bind-parameter %d" + +#: tcop/postgres.c:1846 +#, c-format +msgid "duration: %s ms bind %s%s%s: %s" +msgstr "varaktighet: %s ms bind %s%s%s: %s" + +#: tcop/postgres.c:1894 tcop/postgres.c:2438 +#, c-format +msgid "portal \"%s\" does not exist" +msgstr "portal \"%s\" existerar inte" + +#: tcop/postgres.c:1979 +#, c-format +msgid "%s %s%s%s: %s" +msgstr "%s %s%s%s: %s" + +#: tcop/postgres.c:1981 tcop/postgres.c:2070 +msgid "execute fetch from" +msgstr "kör hämtning från" + +#: tcop/postgres.c:1982 tcop/postgres.c:2071 +msgid "execute" +msgstr "kör" + +#: tcop/postgres.c:2067 +#, c-format +msgid "duration: %s ms %s %s%s%s: %s" +msgstr "varaktighet: %s ms %s %s%s%s: %s" + +#: tcop/postgres.c:2193 +#, c-format +msgid "prepare: %s" +msgstr "prepare: %s" + +#: tcop/postgres.c:2259 +#, c-format +msgid "parameters: %s" +msgstr "parametrar: %s" + +#: tcop/postgres.c:2278 +#, c-format +msgid "abort reason: recovery conflict" +msgstr "abortskäl: återställningskonflikt" + +#: tcop/postgres.c:2294 +#, c-format +msgid "User was holding shared buffer pin for too long." +msgstr "Användaren höll delad bufferfastlåsning för länge." + +#: tcop/postgres.c:2297 +#, c-format +msgid "User was holding a relation lock for too long." +msgstr "Användare höll ett relationslås för länge." + +#: tcop/postgres.c:2300 +#, c-format +msgid "User was or might have been using tablespace that must be dropped." +msgstr "Användaren använde eller har använt ett tablespace som tagits bort." + +#: tcop/postgres.c:2303 +#, c-format +msgid "User query might have needed to see row versions that must be removed." +msgstr "Användarfrågan kan ha behövt se radversioner som har tagits bort." + +#: tcop/postgres.c:2309 +#, c-format +msgid "User was connected to a database that must be dropped." +msgstr "Användare var ansluten till databas som måste slängas." + +#: tcop/postgres.c:2624 +#, c-format +msgid "terminating connection because of crash of another server process" +msgstr "avbryter anslutning på grund av en krash i en annan serverprocess" + +#: tcop/postgres.c:2625 +#, c-format +msgid "The postmaster has commanded this server process to roll back the current transaction and exit, because another server process exited abnormally and possibly corrupted shared memory." +msgstr "Postmastern har sagt åt denna serverprocess att rulla tillbaka den aktuella transaktionen och avsluta då en annan process har avslutats onormalt och har eventuellt trasat sönder delat minne." + +#: tcop/postgres.c:2629 tcop/postgres.c:2957 +#, c-format +msgid "In a moment you should be able to reconnect to the database and repeat your command." +msgstr "Du kan strax återansluta till databasen och upprepa kommandot." + +#: tcop/postgres.c:2715 +#, c-format +msgid "floating-point exception" +msgstr "flyttalsavbrott" + +#: tcop/postgres.c:2716 +#, c-format +msgid "An invalid floating-point operation was signaled. This probably means an out-of-range result or an invalid operation, such as division by zero." +msgstr "En ogiltig flyttalsoperation har signalerats. Detta beror troligen på ett resultat som är utanför giltigt intervall eller en ogiltig operation så som division med noll." + +#: tcop/postgres.c:2887 +#, c-format +msgid "canceling authentication due to timeout" +msgstr "avbryter autentisering på grund av timeout" + +#: tcop/postgres.c:2891 +#, c-format +msgid "terminating autovacuum process due to administrator command" +msgstr "avslutar autovacuum-process på grund av ett administratörskommando" + +#: tcop/postgres.c:2895 +#, c-format +msgid "terminating logical replication worker due to administrator command" +msgstr "avslutar logisk replikeringsarbetare på grund av ett administratörskommando" + +#: tcop/postgres.c:2899 +#, c-format +msgid "logical replication launcher shutting down" +msgstr "logisk replikeringsuppstartare stänger ner" + +#: tcop/postgres.c:2912 tcop/postgres.c:2922 tcop/postgres.c:2955 +#, c-format +msgid "terminating connection due to conflict with recovery" +msgstr "avslutar anslutning på grund av konflikt med återställning" + +#: tcop/postgres.c:2928 +#, c-format +msgid "terminating connection due to administrator command" +msgstr "avslutar anslutning på grund av ett administratörskommando" + +#: tcop/postgres.c:2938 +#, c-format +msgid "connection to client lost" +msgstr "anslutning till klient har brutits" + +#: tcop/postgres.c:3004 +#, c-format +msgid "canceling statement due to lock timeout" +msgstr "avbryter sats på grund av lås-timeout" + +#: tcop/postgres.c:3011 +#, c-format +msgid "canceling statement due to statement timeout" +msgstr "avbryter sats på grund av sats-timeout" + +#: tcop/postgres.c:3018 +#, c-format +msgid "canceling autovacuum task" +msgstr "avbryter autovacuum-uppgift" + +#: tcop/postgres.c:3041 +#, c-format +msgid "canceling statement due to user request" +msgstr "avbryter sats på användares begäran" + +#: tcop/postgres.c:3051 +#, c-format +msgid "terminating connection due to idle-in-transaction timeout" +msgstr "terminerar anslutning på grund av idle-in-transaction-timeout" + +#: tcop/postgres.c:3165 +#, c-format +msgid "stack depth limit exceeded" +msgstr "maximalt stackdjup överskridet" + +#: tcop/postgres.c:3166 +#, c-format +msgid "Increase the configuration parameter \"max_stack_depth\" (currently %dkB), after ensuring the platform's stack depth limit is adequate." +msgstr "Öka konfigurationsparametern \"max_stack_depth\" (nu %dkB) efter att ha undersökt att plattformens gräns för stackdjup är tillräcklig." + +#: tcop/postgres.c:3229 +#, c-format +msgid "\"max_stack_depth\" must not exceed %ldkB." +msgstr "\"max_stack_depth\" får ej överskrida %ldkB." + +#: tcop/postgres.c:3231 +#, c-format +msgid "Increase the platform's stack depth limit via \"ulimit -s\" or local equivalent." +msgstr "Öka plattformens stackdjupbegränsning via \"ulimit -s\" eller motsvarande." + +#: tcop/postgres.c:3591 +#, c-format +msgid "invalid command-line argument for server process: %s" +msgstr "ogiltigt kommandoradsargument för serverprocess: %s" + +#: tcop/postgres.c:3592 tcop/postgres.c:3598 +#, c-format +msgid "Try \"%s --help\" for more information." +msgstr "Försök med \"%s --help\" för mer information." + +#: tcop/postgres.c:3596 +#, c-format +msgid "%s: invalid command-line argument: %s" +msgstr "%s: ogiltigt kommandoradsargument: %s" + +#: tcop/postgres.c:3658 +#, c-format +msgid "%s: no database nor user name specified" +msgstr "%s: ingen databas eller användarnamn angivet" + +#: tcop/postgres.c:4287 +#, c-format +msgid "invalid CLOSE message subtype %d" +msgstr "ogiltig subtyp %d för CLOSE-meddelande" + +#: tcop/postgres.c:4322 +#, c-format +msgid "invalid DESCRIBE message subtype %d" +msgstr "ogiltig subtyp %d för DESCRIBE-meddelande" + +#: tcop/postgres.c:4400 +#, c-format +msgid "fastpath function calls not supported in a replication connection" +msgstr "fastpath-funktionsanrop stöds inte i en replikeringsanslutning" + +#: tcop/postgres.c:4404 +#, c-format +msgid "extended query protocol not supported in a replication connection" +msgstr "utökat frågeprotokoll stöds inte i en replikeringsanslutning" + +#: tcop/postgres.c:4581 +#, c-format +msgid "disconnection: session time: %d:%02d:%02d.%03d user=%s database=%s host=%s%s%s" +msgstr "nedkoppling: sessionstid: %d:%02d:%02d.%03d användare=%s databas=%s värd=%s%s%s" + +#: tcop/pquery.c:645 +#, c-format +msgid "bind message has %d result formats but query has %d columns" +msgstr "bind-meddelande har %d resultatformat men frågan har %d kolumner" + +#: tcop/pquery.c:952 +#, c-format +msgid "cursor can only scan forward" +msgstr "markör kan bara hoppa framåt" + +#: tcop/pquery.c:953 +#, c-format +msgid "Declare it with SCROLL option to enable backward scan." +msgstr "Deklarera den med flaggan SCROLL för att kunna traversera bakåt." + +#. translator: %s is name of a SQL command, eg CREATE +#: tcop/utility.c:245 +#, c-format +msgid "cannot execute %s in a read-only transaction" +msgstr "kan inte köra %s i read-only-transaktion" + +#. translator: %s is name of a SQL command, eg CREATE +#: tcop/utility.c:263 +#, c-format +msgid "cannot execute %s during a parallel operation" +msgstr "kan inte köra %s under parallell operation" + +#. translator: %s is name of a SQL command, eg CREATE +#: tcop/utility.c:282 +#, c-format +msgid "cannot execute %s during recovery" +msgstr "kan inte köra %s under återställning" + +#. translator: %s is name of a SQL command, eg PREPARE +#: tcop/utility.c:300 +#, c-format +msgid "cannot execute %s within security-restricted operation" +msgstr "kan inte köra %s inom säkerhetsbegränsad operation" + +#: tcop/utility.c:757 +#, c-format +msgid "must be superuser to do CHECKPOINT" +msgstr "måste vara superanvändare för att göra CHECKPOINT" + +#: tcop/utility.c:1338 +#, c-format +msgid "cannot create index on partitioned table \"%s\"" +msgstr "kan inte skapa index för partitionerad tabell \"%s\"" + +#: tcop/utility.c:1340 +#, c-format +msgid "Table \"%s\" contains partitions that are foreign tables." +msgstr "Tabell \"%s\" innehåller partitioner som är främmande tabeller." + +#: tsearch/dict_ispell.c:52 tsearch/dict_thesaurus.c:624 +#, c-format +msgid "multiple DictFile parameters" +msgstr "multipla DictFile-parametrar" + +#: tsearch/dict_ispell.c:63 +#, c-format +msgid "multiple AffFile parameters" +msgstr "multipla AffFile-parametrar" + +#: tsearch/dict_ispell.c:82 +#, c-format +msgid "unrecognized Ispell parameter: \"%s\"" +msgstr "okänd Ispell-parameter: \"%s\"" + +#: tsearch/dict_ispell.c:96 +#, c-format +msgid "missing AffFile parameter" +msgstr "saknar AffFile-parameter" + +#: tsearch/dict_ispell.c:102 tsearch/dict_thesaurus.c:648 +#, c-format +msgid "missing DictFile parameter" +msgstr "saknar DictFile-parameter" + +#: tsearch/dict_simple.c:58 +#, c-format +msgid "multiple Accept parameters" +msgstr "multipla Accept-parametrar" + +#: tsearch/dict_simple.c:66 +#, c-format +msgid "unrecognized simple dictionary parameter: \"%s\"" +msgstr "okänd parameter för \"simple dictionary\": \"%s\"" + +#: tsearch/dict_synonym.c:118 +#, c-format +msgid "unrecognized synonym parameter: \"%s\"" +msgstr "okänd synonymparameter: \"%s\"" + +#: tsearch/dict_synonym.c:125 +#, c-format +msgid "missing Synonyms parameter" +msgstr "saknar Synonym-prameter" + +#: tsearch/dict_synonym.c:132 +#, c-format +msgid "could not open synonym file \"%s\": %m" +msgstr "kunde inte öppna synonymfil \"%s\": %m" + +#: tsearch/dict_thesaurus.c:179 +#, c-format +msgid "could not open thesaurus file \"%s\": %m" +msgstr "kunde inte öppna synonymordboksfil \"%s\": %m" + +#: tsearch/dict_thesaurus.c:212 +#, c-format +msgid "unexpected delimiter" +msgstr "oväntad avdelare" + +#: tsearch/dict_thesaurus.c:262 tsearch/dict_thesaurus.c:278 +#, c-format +msgid "unexpected end of line or lexeme" +msgstr "oväntat slut på raden eller lexem" + +#: tsearch/dict_thesaurus.c:287 +#, c-format +msgid "unexpected end of line" +msgstr "oväntat slut på raden" + +#: tsearch/dict_thesaurus.c:297 +#, c-format +msgid "too many lexemes in thesaurus entry" +msgstr "för många lexem i synonymordbokspost" + +#: tsearch/dict_thesaurus.c:421 +#, c-format +msgid "thesaurus sample word \"%s\" isn't recognized by subdictionary (rule %d)" +msgstr "synonymordbokens exempelord \"%s\" känns inte igen av underordbok (regel %d)" + +#: tsearch/dict_thesaurus.c:427 +#, c-format +msgid "thesaurus sample word \"%s\" is a stop word (rule %d)" +msgstr "synonymordbokens exempelord \"%s\" är ett stoppord (regel %d)" + +#: tsearch/dict_thesaurus.c:430 +#, c-format +msgid "Use \"?\" to represent a stop word within a sample phrase." +msgstr "Använd \"?\" för att representera ett stoppord i en exempelfras." + +#: tsearch/dict_thesaurus.c:576 +#, c-format +msgid "thesaurus substitute word \"%s\" is a stop word (rule %d)" +msgstr "synonymordbokens ersättningsord \"%s\" är ett stoppord (regel %d)" + +#: tsearch/dict_thesaurus.c:583 +#, c-format +msgid "thesaurus substitute word \"%s\" isn't recognized by subdictionary (rule %d)" +msgstr "synonymordbokens ersättningsord \"%s\" känns inte igen av underordbok (regel %d)" + +#: tsearch/dict_thesaurus.c:595 +#, c-format +msgid "thesaurus substitute phrase is empty (rule %d)" +msgstr "synonymordbokens ersättningsfras är tim (regel %d)" + +#: tsearch/dict_thesaurus.c:633 +#, c-format +msgid "multiple Dictionary parameters" +msgstr "multipla ordboksparametrar" + +#: tsearch/dict_thesaurus.c:640 +#, c-format +msgid "unrecognized Thesaurus parameter: \"%s\"" +msgstr "okänd synonymordboksparameter: \"%s\"" + +#: tsearch/dict_thesaurus.c:652 +#, c-format +msgid "missing Dictionary parameter" +msgstr "saknar ordlistparameter" + +#: tsearch/spell.c:380 tsearch/spell.c:397 tsearch/spell.c:406 +#: tsearch/spell.c:1034 +#, c-format +msgid "invalid affix flag \"%s\"" +msgstr "ogiltig affix-flagga \"%s\"" + +#: tsearch/spell.c:384 tsearch/spell.c:1038 +#, c-format +msgid "affix flag \"%s\" is out of range" +msgstr "affix-flaggan \"%s\" är utanför giltigt intervall" + +#: tsearch/spell.c:414 +#, c-format +msgid "invalid character in affix flag \"%s\"" +msgstr "ogiltigt tecken i affix-flagga \"%s\"" + +#: tsearch/spell.c:434 +#, c-format +msgid "invalid affix flag \"%s\" with \"long\" flag value" +msgstr "ogiltig affix-flagga \"%s\" med flaggvärdet \"long\"" + +#: tsearch/spell.c:522 +#, c-format +msgid "could not open dictionary file \"%s\": %m" +msgstr "kunde inte öppna ordboksfil \"%s\": %m" + +#: tsearch/spell.c:740 utils/adt/regexp.c:204 +#, c-format +msgid "invalid regular expression: %s" +msgstr "ogiltigt reguljärt uttryck: %s" + +#: tsearch/spell.c:1161 tsearch/spell.c:1726 +#, c-format +msgid "invalid affix alias \"%s\"" +msgstr "ogiltigt affix-alias \"%s\"" + +#: tsearch/spell.c:1211 tsearch/spell.c:1282 tsearch/spell.c:1431 +#, c-format +msgid "could not open affix file \"%s\": %m" +msgstr "kunde inte öppna affix-fil \"%s\": %m" + +#: tsearch/spell.c:1265 +#, c-format +msgid "Ispell dictionary supports only \"default\", \"long\", and \"num\" flag values" +msgstr "Ispell-ordbok stöder bara flaggorna \"default\", \"long\" och \"num\"" + +#: tsearch/spell.c:1309 +#, c-format +msgid "invalid number of flag vector aliases" +msgstr "ogiltigt antal alias i flaggvektor" + +#: tsearch/spell.c:1332 +#, c-format +msgid "number of aliases exceeds specified number %d" +msgstr "antalet alias överskriver angivet antal %d" + +#: tsearch/spell.c:1547 +#, c-format +msgid "affix file contains both old-style and new-style commands" +msgstr "affix-fil innehåller kommandon på gammalt och nytt format" + +#: tsearch/to_tsany.c:185 utils/adt/tsvector.c:271 utils/adt/tsvector_op.c:1134 +#, c-format +msgid "string is too long for tsvector (%d bytes, max %d bytes)" +msgstr "strängen är för lång för tsvector (%d byte, max %d byte)" + +#: tsearch/ts_locale.c:174 +#, c-format +msgid "line %d of configuration file \"%s\": \"%s\"" +msgstr "rad %d i konfigureringsfil \"%s\": \"%s\"" + +#: tsearch/ts_locale.c:291 +#, c-format +msgid "conversion from wchar_t to server encoding failed: %m" +msgstr "konvertering från wchar_t till serverkodning misslyckades: %m" + +#: tsearch/ts_parse.c:390 tsearch/ts_parse.c:397 tsearch/ts_parse.c:566 +#: tsearch/ts_parse.c:573 +#, c-format +msgid "word is too long to be indexed" +msgstr "ordet är för långt för att indexeras" + +#: tsearch/ts_parse.c:391 tsearch/ts_parse.c:398 tsearch/ts_parse.c:567 +#: tsearch/ts_parse.c:574 +#, c-format +msgid "Words longer than %d characters are ignored." +msgstr "Ord längre än %d tecken hoppas över." + +#: tsearch/ts_utils.c:51 +#, c-format +msgid "invalid text search configuration file name \"%s\"" +msgstr "ogiltigt filnamn \"%s\" till textsökkonfiguration" + +#: tsearch/ts_utils.c:83 +#, c-format +msgid "could not open stop-word file \"%s\": %m" +msgstr "kunde inte öppna stoppordsfil \"%s\": %m" + +#: tsearch/wparser.c:322 tsearch/wparser.c:410 tsearch/wparser.c:487 +#, c-format +msgid "text search parser does not support headline creation" +msgstr "textsökparsern stöder inte skapande av rubriker" + +#: tsearch/wparser_def.c:2486 +#, c-format +msgid "unrecognized headline parameter: \"%s\"" +msgstr "okänd rubrikparameter: \"%s\"" + +#: tsearch/wparser_def.c:2495 +#, c-format +msgid "MinWords should be less than MaxWords" +msgstr "MinWords skall vara mindre än MaxWords" + +#: tsearch/wparser_def.c:2499 +#, c-format +msgid "MinWords should be positive" +msgstr "MinWords skall vara positiv" + +#: tsearch/wparser_def.c:2503 +#, c-format +msgid "ShortWord should be >= 0" +msgstr "ShortWord skall vara >= 0" + +#: tsearch/wparser_def.c:2507 +#, c-format +msgid "MaxFragments should be >= 0" +msgstr "MaxFragments skall vara >= 0" + +#: utils/adt/acl.c:171 utils/adt/name.c:91 +#, c-format +msgid "identifier too long" +msgstr "identifieraren för lång" + +#: utils/adt/acl.c:172 utils/adt/name.c:92 +#, c-format +msgid "Identifier must be less than %d characters." +msgstr "Identifierare måste vara mindre än %d tecken." + +#: utils/adt/acl.c:258 +#, c-format +msgid "unrecognized key word: \"%s\"" +msgstr "okänt nyckelord: \"%s\"" + +#: utils/adt/acl.c:259 +#, c-format +msgid "ACL key word must be \"group\" or \"user\"." +msgstr "ACL-nyckelord måste vara \"group\" eller \"user\"." + +#: utils/adt/acl.c:264 +#, c-format +msgid "missing name" +msgstr "namn saknas" + +#: utils/adt/acl.c:265 +#, c-format +msgid "A name must follow the \"group\" or \"user\" key word." +msgstr "Ett namn måste följa efter nyckelorden \"group\" resp. \"user\"." + +#: utils/adt/acl.c:271 +#, c-format +msgid "missing \"=\" sign" +msgstr "saknar \"=\"-tecken" + +#: utils/adt/acl.c:324 +#, c-format +msgid "invalid mode character: must be one of \"%s\"" +msgstr "ogiltigt lägestecken: måste vara en av \"%s\"" + +#: utils/adt/acl.c:346 +#, c-format +msgid "a name must follow the \"/\" sign" +msgstr "ett namn måste följa på tecknet \"/\"" + +#: utils/adt/acl.c:354 +#, c-format +msgid "defaulting grantor to user ID %u" +msgstr "sätter fullmaktsgivaranvändar-ID till standardvärdet %u" + +#: utils/adt/acl.c:545 +#, c-format +msgid "ACL array contains wrong data type" +msgstr "ACL-array innehåller fel datatyp" + +#: utils/adt/acl.c:549 +#, c-format +msgid "ACL arrays must be one-dimensional" +msgstr "ACL-array:er måste vara endimensionella" + +#: utils/adt/acl.c:553 +#, c-format +msgid "ACL arrays must not contain null values" +msgstr "ACL-array:er får inte innehålla null-värden" + +#: utils/adt/acl.c:577 +#, c-format +msgid "extra garbage at the end of the ACL specification" +msgstr "skräp vid slutet av ACL-angivelse" + +#: utils/adt/acl.c:1213 +#, c-format +msgid "grant options cannot be granted back to your own grantor" +msgstr "fullmaksgivarflaggor kan inte ges tillbaka till den som givit det till dig" + +#: utils/adt/acl.c:1274 +#, c-format +msgid "dependent privileges exist" +msgstr "det finns beroende privilegier" + +#: utils/adt/acl.c:1275 +#, c-format +msgid "Use CASCADE to revoke them too." +msgstr "Använd CASCADE för att återkalla dem med." + +#: utils/adt/acl.c:1537 +#, c-format +msgid "aclinsert is no longer supported" +msgstr "aclinsert stöds inte länge" + +#: utils/adt/acl.c:1547 +#, c-format +msgid "aclremove is no longer supported" +msgstr "aclremove stöds inte längre" + +#: utils/adt/acl.c:1633 utils/adt/acl.c:1687 +#, c-format +msgid "unrecognized privilege type: \"%s\"" +msgstr "okänd privilegietyp: \"%s\"" + +#: utils/adt/acl.c:3430 utils/adt/regproc.c:102 utils/adt/regproc.c:277 +#, c-format +msgid "function \"%s\" does not exist" +msgstr "funktionen \"%s\" finns inte" + +#: utils/adt/acl.c:4884 +#, c-format +msgid "must be member of role \"%s\"" +msgstr "måste vara medlem i rollen \"%s\"" + +#: utils/adt/array_expanded.c:274 utils/adt/arrayfuncs.c:932 +#: utils/adt/arrayfuncs.c:1520 utils/adt/arrayfuncs.c:3223 +#: utils/adt/arrayfuncs.c:3363 utils/adt/arrayfuncs.c:5898 +#: utils/adt/arrayfuncs.c:6209 utils/adt/arrayutils.c:93 +#: utils/adt/arrayutils.c:102 utils/adt/arrayutils.c:109 +#, c-format +msgid "array size exceeds the maximum allowed (%d)" +msgstr "array-storlek överskrider maximalt tillåtna (%d)" + +#: utils/adt/array_userfuncs.c:80 utils/adt/array_userfuncs.c:466 +#: utils/adt/array_userfuncs.c:546 utils/adt/json.c:1829 utils/adt/json.c:1924 +#: utils/adt/json.c:1962 utils/adt/jsonb.c:1083 utils/adt/jsonb.c:1112 +#: utils/adt/jsonb.c:1504 utils/adt/jsonb.c:1668 utils/adt/jsonb.c:1678 +#, c-format +msgid "could not determine input data type" +msgstr "kan inte bestämma indatatyp" + +#: utils/adt/array_userfuncs.c:85 +#, c-format +msgid "input data type is not an array" +msgstr "indatatyp är inte en array" + +#: utils/adt/array_userfuncs.c:129 utils/adt/array_userfuncs.c:181 +#: utils/adt/arrayfuncs.c:1323 utils/adt/float.c:1363 utils/adt/float.c:1422 +#: utils/adt/float.c:3708 utils/adt/float.c:3722 utils/adt/int.c:755 +#: utils/adt/int.c:777 utils/adt/int.c:791 utils/adt/int.c:805 +#: utils/adt/int.c:836 utils/adt/int.c:857 utils/adt/int.c:974 +#: utils/adt/int.c:988 utils/adt/int.c:1002 utils/adt/int.c:1035 +#: utils/adt/int.c:1049 utils/adt/int.c:1063 utils/adt/int.c:1094 +#: utils/adt/int.c:1176 utils/adt/int8.c:1164 utils/adt/numeric.c:3117 +#: utils/adt/numeric.c:3126 utils/adt/varbit.c:1173 utils/adt/varbit.c:1575 +#: utils/adt/varlena.c:1053 utils/adt/varlena.c:2983 +#, c-format +msgid "integer out of range" +msgstr "heltal utanför giltigt intervall" + +#: utils/adt/array_userfuncs.c:136 utils/adt/array_userfuncs.c:191 +#, c-format +msgid "argument must be empty or one-dimensional array" +msgstr "argumentet måste vara tomt eller en endimensionell array" + +#: utils/adt/array_userfuncs.c:273 utils/adt/array_userfuncs.c:312 +#: utils/adt/array_userfuncs.c:349 utils/adt/array_userfuncs.c:378 +#: utils/adt/array_userfuncs.c:406 +#, c-format +msgid "cannot concatenate incompatible arrays" +msgstr "kan inte konkatenera inkompatibla arrayer" + +#: utils/adt/array_userfuncs.c:274 +#, c-format +msgid "Arrays with element types %s and %s are not compatible for concatenation." +msgstr "Array:er med elementtyper %s och %s är inte kompatibla för sammaslagning." + +#: utils/adt/array_userfuncs.c:313 +#, c-format +msgid "Arrays of %d and %d dimensions are not compatible for concatenation." +msgstr "Array:er med dimensioner %d och %d är inte kompatibla för sammaslagning." + +#: utils/adt/array_userfuncs.c:350 +#, c-format +msgid "Arrays with differing element dimensions are not compatible for concatenation." +msgstr "Array:er med olika elementdimensioner är inte kompatibla för sammaslagning." + +#: utils/adt/array_userfuncs.c:379 utils/adt/array_userfuncs.c:407 +#, c-format +msgid "Arrays with differing dimensions are not compatible for concatenation." +msgstr "Array:er med olika dimensioner fungerar inte vid konkatenering." + +#: utils/adt/array_userfuncs.c:662 utils/adt/array_userfuncs.c:814 +#, c-format +msgid "searching for elements in multidimensional arrays is not supported" +msgstr "sökning efter element i en multidimensionell array stöds inte" + +#: utils/adt/array_userfuncs.c:686 +#, c-format +msgid "initial position must not be null" +msgstr "initiala positionen får ej vara null" + +#: utils/adt/arrayfuncs.c:269 utils/adt/arrayfuncs.c:283 +#: utils/adt/arrayfuncs.c:294 utils/adt/arrayfuncs.c:316 +#: utils/adt/arrayfuncs.c:331 utils/adt/arrayfuncs.c:345 +#: utils/adt/arrayfuncs.c:351 utils/adt/arrayfuncs.c:358 +#: utils/adt/arrayfuncs.c:489 utils/adt/arrayfuncs.c:505 +#: utils/adt/arrayfuncs.c:516 utils/adt/arrayfuncs.c:531 +#: utils/adt/arrayfuncs.c:552 utils/adt/arrayfuncs.c:582 +#: utils/adt/arrayfuncs.c:589 utils/adt/arrayfuncs.c:597 +#: utils/adt/arrayfuncs.c:631 utils/adt/arrayfuncs.c:654 +#: utils/adt/arrayfuncs.c:674 utils/adt/arrayfuncs.c:786 +#: utils/adt/arrayfuncs.c:795 utils/adt/arrayfuncs.c:825 +#: utils/adt/arrayfuncs.c:840 utils/adt/arrayfuncs.c:893 +#, c-format +msgid "malformed array literal: \"%s\"" +msgstr "felaktig array-literal: \"%s\"" + +#: utils/adt/arrayfuncs.c:270 +#, c-format +msgid "\"[\" must introduce explicitly-specified array dimensions." +msgstr "\"[\" måste införa explicit angivna array-dimensioner." + +#: utils/adt/arrayfuncs.c:284 +#, c-format +msgid "Missing array dimension value." +msgstr "Saknar värde i array-dimension." + +#: utils/adt/arrayfuncs.c:295 utils/adt/arrayfuncs.c:332 +#, c-format +msgid "Missing \"%s\" after array dimensions." +msgstr "Saknar \"%s\" efter array-dimensioner." + +#: utils/adt/arrayfuncs.c:304 utils/adt/arrayfuncs.c:2871 +#: utils/adt/arrayfuncs.c:2903 utils/adt/arrayfuncs.c:2918 +#, c-format +msgid "upper bound cannot be less than lower bound" +msgstr "övre gränsen kan inte vara lägre än undre gränsen" + +#: utils/adt/arrayfuncs.c:317 +#, c-format +msgid "Array value must start with \"{\" or dimension information." +msgstr "Array-värde måste starta med \"{\" eller dimensionsinformation" + +#: utils/adt/arrayfuncs.c:346 +#, c-format +msgid "Array contents must start with \"{\"." +msgstr "Array-innehåll måste starta med \"{\"." + +#: utils/adt/arrayfuncs.c:352 utils/adt/arrayfuncs.c:359 +#, c-format +msgid "Specified array dimensions do not match array contents." +msgstr "Angivna array-dimensioner matchar inte array-innehållet." + +#: utils/adt/arrayfuncs.c:490 utils/adt/arrayfuncs.c:517 +#: utils/adt/rangetypes.c:2178 utils/adt/rangetypes.c:2186 +#: utils/adt/rowtypes.c:209 utils/adt/rowtypes.c:217 +#, c-format +msgid "Unexpected end of input." +msgstr "oväntat slut på indata." + +#: utils/adt/arrayfuncs.c:506 utils/adt/arrayfuncs.c:553 +#: utils/adt/arrayfuncs.c:583 utils/adt/arrayfuncs.c:632 +#, c-format +msgid "Unexpected \"%c\" character." +msgstr "oväntat tecken \"%c\"." + +#: utils/adt/arrayfuncs.c:532 utils/adt/arrayfuncs.c:655 +#, c-format +msgid "Unexpected array element." +msgstr "Oväntat array-element." + +#: utils/adt/arrayfuncs.c:590 +#, c-format +msgid "Unmatched \"%c\" character." +msgstr "Icke matchat tecken \"%c\"." + +#: utils/adt/arrayfuncs.c:598 utils/adt/jsonfuncs.c:2394 +#, c-format +msgid "Multidimensional arrays must have sub-arrays with matching dimensions." +msgstr "Flerdimensionella array:er måste ha underarray:er med matchande dimensioner." + +#: utils/adt/arrayfuncs.c:675 +#, c-format +msgid "Junk after closing right brace." +msgstr "Skräp efter avslutande höger parentes." + +#: utils/adt/arrayfuncs.c:1285 utils/adt/arrayfuncs.c:3331 +#: utils/adt/arrayfuncs.c:5804 +#, c-format +msgid "invalid number of dimensions: %d" +msgstr "felaktigt antal dimensioner: %d" + +#: utils/adt/arrayfuncs.c:1296 +#, c-format +msgid "invalid array flags" +msgstr "ogiltiga array-flaggor" + +#: utils/adt/arrayfuncs.c:1304 +#, c-format +msgid "wrong element type" +msgstr "fel elementtyp" + +#: utils/adt/arrayfuncs.c:1354 utils/adt/rangetypes.c:334 +#: utils/cache/lsyscache.c:2701 +#, c-format +msgid "no binary input function available for type %s" +msgstr "ingen binär indatafunktion finns för typen %s" + +#: utils/adt/arrayfuncs.c:1494 +#, c-format +msgid "improper binary format in array element %d" +msgstr "felaktigt binärt format i array-element %d" + +#: utils/adt/arrayfuncs.c:1575 utils/adt/rangetypes.c:339 +#: utils/cache/lsyscache.c:2734 +#, c-format +msgid "no binary output function available for type %s" +msgstr "det saknas en binär output-funktion för typen %s" + +#: utils/adt/arrayfuncs.c:2053 +#, c-format +msgid "slices of fixed-length arrays not implemented" +msgstr "slice av fixlängd-array är inte implementerat" + +#: utils/adt/arrayfuncs.c:2231 utils/adt/arrayfuncs.c:2253 +#: utils/adt/arrayfuncs.c:2302 utils/adt/arrayfuncs.c:2538 +#: utils/adt/arrayfuncs.c:2849 utils/adt/arrayfuncs.c:5790 +#: utils/adt/arrayfuncs.c:5816 utils/adt/arrayfuncs.c:5827 +#: utils/adt/json.c:2323 utils/adt/json.c:2398 utils/adt/jsonb.c:1282 +#: utils/adt/jsonb.c:1368 utils/adt/jsonfuncs.c:4277 utils/adt/jsonfuncs.c:4428 +#: utils/adt/jsonfuncs.c:4473 utils/adt/jsonfuncs.c:4520 +#, c-format +msgid "wrong number of array subscripts" +msgstr "fel antal array-indexeringar" + +#: utils/adt/arrayfuncs.c:2236 utils/adt/arrayfuncs.c:2344 +#: utils/adt/arrayfuncs.c:2602 utils/adt/arrayfuncs.c:2908 +#, c-format +msgid "array subscript out of range" +msgstr "array-index utanför giltigt område" + +#: utils/adt/arrayfuncs.c:2241 +#, c-format +msgid "cannot assign null value to an element of a fixed-length array" +msgstr "kan inte tilldela null-värde till ett element i en array med fast längd" + +#: utils/adt/arrayfuncs.c:2796 +#, c-format +msgid "updates on slices of fixed-length arrays not implemented" +msgstr "uppdatering av slice på fixlängd-array är inte implementerat" + +#: utils/adt/arrayfuncs.c:2827 +#, c-format +msgid "array slice subscript must provide both boundaries" +msgstr "array-slice-index måste inkludera båda gränser" + +#: utils/adt/arrayfuncs.c:2828 +#, c-format +msgid "When assigning to a slice of an empty array value, slice boundaries must be fully specified." +msgstr "Vid tilldelning till en slice av en tom array så måste slice-gränserna anges" + +#: utils/adt/arrayfuncs.c:2839 utils/adt/arrayfuncs.c:2934 +#, c-format +msgid "source array too small" +msgstr "käll-array för liten" + +#: utils/adt/arrayfuncs.c:3487 +#, c-format +msgid "null array element not allowed in this context" +msgstr "null-element i arrayer stöds inte i detta kontext" + +#: utils/adt/arrayfuncs.c:3589 utils/adt/arrayfuncs.c:3760 +#: utils/adt/arrayfuncs.c:4112 +#, c-format +msgid "cannot compare arrays of different element types" +msgstr "kan inte jämföra arrayer med olika elementtyper" + +#: utils/adt/arrayfuncs.c:3936 utils/adt/rangetypes.c:1253 +#: utils/adt/rangetypes.c:1317 +#, c-format +msgid "could not identify a hash function for type %s" +msgstr "kunde inte hitta en hash-funktion för typ %s" + +#: utils/adt/arrayfuncs.c:4028 +#, c-format +msgid "could not identify an extended hash function for type %s" +msgstr "kunde inte hitta en utökad hash-funktion för typ %s" + +#: utils/adt/arrayfuncs.c:5204 +#, c-format +msgid "data type %s is not an array type" +msgstr "datatypen %s är inte en arraytyp" + +#: utils/adt/arrayfuncs.c:5259 +#, c-format +msgid "cannot accumulate null arrays" +msgstr "kan inte ackumulera null-array:er" + +#: utils/adt/arrayfuncs.c:5287 +#, c-format +msgid "cannot accumulate empty arrays" +msgstr "kan inte ackumulera tomma array:er" + +#: utils/adt/arrayfuncs.c:5316 utils/adt/arrayfuncs.c:5322 +#, c-format +msgid "cannot accumulate arrays of different dimensionality" +msgstr "kan inte ackumulera arrayer med olika dimensioner" + +#: utils/adt/arrayfuncs.c:5688 utils/adt/arrayfuncs.c:5728 +#, c-format +msgid "dimension array or low bound array cannot be null" +msgstr "dimensionsarray eller undre gränsarray kan inte vara null" + +#: utils/adt/arrayfuncs.c:5791 utils/adt/arrayfuncs.c:5817 +#, c-format +msgid "Dimension array must be one dimensional." +msgstr "Dimensionsarray måste vara endimensionell." + +#: utils/adt/arrayfuncs.c:5796 utils/adt/arrayfuncs.c:5822 +#, c-format +msgid "dimension values cannot be null" +msgstr "dimensionsvärden kan inte vara null" + +#: utils/adt/arrayfuncs.c:5828 +#, c-format +msgid "Low bound array has different size than dimensions array." +msgstr "Undre arraygräns har annan storlek än dimensionsarray." + +#: utils/adt/arrayfuncs.c:6074 +#, c-format +msgid "removing elements from multidimensional arrays is not supported" +msgstr "borttagning av element från en multidimensionell array stöds inte" + +#: utils/adt/arrayfuncs.c:6351 +#, c-format +msgid "thresholds must be one-dimensional array" +msgstr "gränsvärden måste vara en endimensionell array" + +#: utils/adt/arrayfuncs.c:6356 +#, c-format +msgid "thresholds array must not contain NULLs" +msgstr "gränsvärdesarray får inte innehålla NULLL-värden" + +#: utils/adt/arrayutils.c:209 +#, c-format +msgid "typmod array must be type cstring[]" +msgstr "typmod-array måste ha typ cstring[]" + +#: utils/adt/arrayutils.c:214 +#, c-format +msgid "typmod array must be one-dimensional" +msgstr "typmod-array måste vara endimensionell" + +#: utils/adt/arrayutils.c:219 +#, c-format +msgid "typmod array must not contain nulls" +msgstr "typmod-arrayen får inte innehålla null-värden" + +#: utils/adt/ascii.c:76 +#, c-format +msgid "encoding conversion from %s to ASCII not supported" +msgstr "kodningskonvertering från %s till ASCII stöds inte" + +#. translator: first %s is inet or cidr +#: utils/adt/bool.c:153 utils/adt/cash.c:277 utils/adt/datetime.c:3788 +#: utils/adt/float.c:241 utils/adt/float.c:315 utils/adt/float.c:339 +#: utils/adt/float.c:458 utils/adt/float.c:541 utils/adt/float.c:567 +#: utils/adt/geo_ops.c:155 utils/adt/geo_ops.c:165 utils/adt/geo_ops.c:177 +#: utils/adt/geo_ops.c:209 utils/adt/geo_ops.c:254 utils/adt/geo_ops.c:264 +#: utils/adt/geo_ops.c:934 utils/adt/geo_ops.c:1320 utils/adt/geo_ops.c:1355 +#: utils/adt/geo_ops.c:1363 utils/adt/geo_ops.c:3429 utils/adt/geo_ops.c:4562 +#: utils/adt/geo_ops.c:4578 utils/adt/geo_ops.c:4585 utils/adt/mac.c:94 +#: utils/adt/mac8.c:93 utils/adt/mac8.c:166 utils/adt/mac8.c:184 +#: utils/adt/mac8.c:202 utils/adt/mac8.c:221 utils/adt/nabstime.c:1539 +#: utils/adt/network.c:58 utils/adt/numeric.c:604 utils/adt/numeric.c:631 +#: utils/adt/numeric.c:5662 utils/adt/numeric.c:5686 utils/adt/numeric.c:5710 +#: utils/adt/numeric.c:6516 utils/adt/numeric.c:6542 utils/adt/oid.c:44 +#: utils/adt/oid.c:58 utils/adt/oid.c:64 utils/adt/oid.c:86 +#: utils/adt/pg_lsn.c:44 utils/adt/pg_lsn.c:50 utils/adt/tid.c:72 +#: utils/adt/tid.c:80 utils/adt/tid.c:88 utils/adt/txid.c:405 +#: utils/adt/uuid.c:136 +#, c-format +msgid "invalid input syntax for type %s: \"%s\"" +msgstr "ogiltig indatasyntax för type %s: \"%s\"" + +#: utils/adt/cash.c:215 utils/adt/cash.c:240 utils/adt/cash.c:250 +#: utils/adt/cash.c:290 utils/adt/int8.c:117 utils/adt/numutils.c:75 +#: utils/adt/numutils.c:82 utils/adt/oid.c:70 utils/adt/oid.c:109 +#, c-format +msgid "value \"%s\" is out of range for type %s" +msgstr "värdet \"%s\" är utanför giltigt intervall för typen %s" + +#: utils/adt/cash.c:652 utils/adt/cash.c:702 utils/adt/cash.c:753 +#: utils/adt/cash.c:802 utils/adt/cash.c:854 utils/adt/cash.c:904 +#: utils/adt/float.c:852 utils/adt/float.c:916 utils/adt/float.c:3469 +#: utils/adt/float.c:3532 utils/adt/geo_ops.c:4092 utils/adt/int.c:820 +#: utils/adt/int.c:936 utils/adt/int.c:1016 utils/adt/int.c:1078 +#: utils/adt/int.c:1116 utils/adt/int.c:1144 utils/adt/int8.c:592 +#: utils/adt/int8.c:650 utils/adt/int8.c:850 utils/adt/int8.c:930 +#: utils/adt/int8.c:992 utils/adt/int8.c:1072 utils/adt/numeric.c:7080 +#: utils/adt/numeric.c:7369 utils/adt/numeric.c:8381 utils/adt/timestamp.c:3235 +#, c-format +msgid "division by zero" +msgstr "division med noll" + +#: utils/adt/char.c:169 +#, c-format +msgid "\"char\" out of range" +msgstr "\"char\" utanför sitt intervall" + +#: utils/adt/date.c:65 utils/adt/timestamp.c:95 utils/adt/varbit.c:54 +#: utils/adt/varchar.c:46 +#, c-format +msgid "invalid type modifier" +msgstr "ogiltig typmodifierare" + +#: utils/adt/date.c:77 +#, c-format +msgid "TIME(%d)%s precision must not be negative" +msgstr "TIME(%d)%s-precisionen får inte vara negativ" + +#: utils/adt/date.c:83 +#, c-format +msgid "TIME(%d)%s precision reduced to maximum allowed, %d" +msgstr "TIME(%d)%s-precisionen reducerad till maximalt tillåtna, %d" + +#: utils/adt/date.c:144 utils/adt/datetime.c:1193 utils/adt/datetime.c:2104 +#, c-format +msgid "date/time value \"current\" is no longer supported" +msgstr "datum/tid-värde \"current\" stöds inte längre" + +#: utils/adt/date.c:170 utils/adt/date.c:178 utils/adt/formatting.c:3606 +#: utils/adt/formatting.c:3615 +#, c-format +msgid "date out of range: \"%s\"" +msgstr "datum utanför giltigt intervall \"%s\"" + +#: utils/adt/date.c:225 utils/adt/date.c:537 utils/adt/date.c:561 +#: utils/adt/xml.c:2089 +#, c-format +msgid "date out of range" +msgstr "datum utanför giltigt intervall" + +#: utils/adt/date.c:271 utils/adt/timestamp.c:564 +#, c-format +msgid "date field value out of range: %d-%02d-%02d" +msgstr "datumfältvärde utanför giltigt område: %d-%02d-%02d" + +#: utils/adt/date.c:278 utils/adt/date.c:287 utils/adt/timestamp.c:570 +#, c-format +msgid "date out of range: %d-%02d-%02d" +msgstr "datum utanför giltigt område: %d-%02d-%02d" + +#: utils/adt/date.c:325 utils/adt/date.c:348 utils/adt/date.c:374 +#: utils/adt/date.c:1118 utils/adt/date.c:1164 utils/adt/date.c:1704 +#: utils/adt/date.c:1735 utils/adt/date.c:1764 utils/adt/date.c:2596 +#: utils/adt/datetime.c:1677 utils/adt/formatting.c:3472 +#: utils/adt/formatting.c:3504 utils/adt/formatting.c:3581 +#: utils/adt/json.c:1621 utils/adt/json.c:1641 utils/adt/nabstime.c:456 +#: utils/adt/nabstime.c:499 utils/adt/nabstime.c:529 utils/adt/nabstime.c:572 +#: utils/adt/timestamp.c:230 utils/adt/timestamp.c:262 +#: utils/adt/timestamp.c:692 utils/adt/timestamp.c:701 +#: utils/adt/timestamp.c:779 utils/adt/timestamp.c:812 +#: utils/adt/timestamp.c:2814 utils/adt/timestamp.c:2835 +#: utils/adt/timestamp.c:2848 utils/adt/timestamp.c:2857 +#: utils/adt/timestamp.c:2865 utils/adt/timestamp.c:2920 +#: utils/adt/timestamp.c:2943 utils/adt/timestamp.c:2956 +#: utils/adt/timestamp.c:2967 utils/adt/timestamp.c:2975 +#: utils/adt/timestamp.c:3635 utils/adt/timestamp.c:3760 +#: utils/adt/timestamp.c:3801 utils/adt/timestamp.c:3891 +#: utils/adt/timestamp.c:3937 utils/adt/timestamp.c:4040 +#: utils/adt/timestamp.c:4447 utils/adt/timestamp.c:4546 +#: utils/adt/timestamp.c:4556 utils/adt/timestamp.c:4648 +#: utils/adt/timestamp.c:4750 utils/adt/timestamp.c:4760 +#: utils/adt/timestamp.c:4992 utils/adt/timestamp.c:5006 +#: utils/adt/timestamp.c:5011 utils/adt/timestamp.c:5025 +#: utils/adt/timestamp.c:5070 utils/adt/timestamp.c:5102 +#: utils/adt/timestamp.c:5109 utils/adt/timestamp.c:5142 +#: utils/adt/timestamp.c:5146 utils/adt/timestamp.c:5215 +#: utils/adt/timestamp.c:5219 utils/adt/timestamp.c:5233 +#: utils/adt/timestamp.c:5267 utils/adt/xml.c:2111 utils/adt/xml.c:2118 +#: utils/adt/xml.c:2138 utils/adt/xml.c:2145 +#, c-format +msgid "timestamp out of range" +msgstr "timestamp utanför giltigt intervall" + +#: utils/adt/date.c:512 +#, c-format +msgid "cannot subtract infinite dates" +msgstr "kan inte subtrahera oändliga datum" + +#: utils/adt/date.c:590 utils/adt/date.c:621 utils/adt/date.c:639 +#: utils/adt/date.c:2633 utils/adt/date.c:2643 +#, c-format +msgid "date out of range for timestamp" +msgstr "datum utanför filtigt område för timestamp" + +#: utils/adt/date.c:1190 +#, c-format +msgid "cannot convert reserved abstime value to date" +msgstr "kan inte konvertera reserverat abstime-värde till date" + +#: utils/adt/date.c:1208 utils/adt/date.c:1214 +#, c-format +msgid "abstime out of range for date" +msgstr "abstime utanför giltigt område för date" + +#: utils/adt/date.c:1327 utils/adt/date.c:2091 +#, c-format +msgid "time out of range" +msgstr "time utanför giltigt intervall" + +#: utils/adt/date.c:1383 utils/adt/timestamp.c:589 +#, c-format +msgid "time field value out of range: %d:%02d:%02g" +msgstr "time-värde utanför giltigt område: %d:%02d:%02g" + +#: utils/adt/date.c:1893 utils/adt/date.c:2395 utils/adt/float.c:1202 +#: utils/adt/float.c:1271 utils/adt/int.c:612 utils/adt/int.c:659 +#: utils/adt/int.c:694 utils/adt/int8.c:491 utils/adt/numeric.c:2189 +#: utils/adt/timestamp.c:3284 utils/adt/timestamp.c:3315 +#: utils/adt/timestamp.c:3346 +#, c-format +msgid "invalid preceding or following size in window function" +msgstr "ogiltig föregående eller efterföljande storlek i fönsterfunktion" + +#: utils/adt/date.c:1978 utils/adt/date.c:1991 +#, c-format +msgid "\"time\" units \"%s\" not recognized" +msgstr "känner inte igen \"time\"-enhet \"%s\"" + +#: utils/adt/date.c:2099 +#, c-format +msgid "time zone displacement out of range" +msgstr "tidszonförskjutning utanför giltigt intervall" + +#: utils/adt/date.c:2728 utils/adt/date.c:2741 +#, c-format +msgid "\"time with time zone\" units \"%s\" not recognized" +msgstr "känner inte igen \"time with time zone\" enhet \"%s\"" + +#: utils/adt/date.c:2814 utils/adt/datetime.c:915 utils/adt/datetime.c:1835 +#: utils/adt/datetime.c:4625 utils/adt/timestamp.c:503 +#: utils/adt/timestamp.c:530 utils/adt/timestamp.c:5017 +#: utils/adt/timestamp.c:5225 +#, c-format +msgid "time zone \"%s\" not recognized" +msgstr "tidszon \"%s\" känns inte igen" + +#: utils/adt/date.c:2846 utils/adt/timestamp.c:5059 utils/adt/timestamp.c:5256 +#, c-format +msgid "interval time zone \"%s\" must not include months or days" +msgstr "intervalltidszonen \"%s\" kan inte inkludera månader eller år" + +#: utils/adt/datetime.c:3761 utils/adt/datetime.c:3768 +#, c-format +msgid "date/time field value out of range: \"%s\"" +msgstr "datum/tid-värde utanför giltigt område: \"%s\"" + +#: utils/adt/datetime.c:3770 +#, c-format +msgid "Perhaps you need a different \"datestyle\" setting." +msgstr "Du kanske behöver en annan inställning av variabeln \"datestyle\"." + +#: utils/adt/datetime.c:3775 +#, c-format +msgid "interval field value out of range: \"%s\"" +msgstr "intervall-värde utanför giltigt område: \"%s\"" + +#: utils/adt/datetime.c:3781 +#, c-format +msgid "time zone displacement out of range: \"%s\"" +msgstr "tidszonförskjutning itanför sitt intervall: \"%s\"" + +#: utils/adt/datetime.c:4627 +#, c-format +msgid "This time zone name appears in the configuration file for time zone abbreviation \"%s\"." +msgstr "Detta tidszonsnamn finns i konfigurationsfilen för tidszonsförkortning \"%s\"." + +#: utils/adt/datum.c:86 utils/adt/datum.c:98 +#, c-format +msgid "invalid Datum pointer" +msgstr "ogiltigt Datum-pekare" + +#: utils/adt/dbsize.c:759 utils/adt/dbsize.c:827 +#, c-format +msgid "invalid size: \"%s\"" +msgstr "ogiltig storlek: \"%s\"" + +#: utils/adt/dbsize.c:828 +#, c-format +msgid "Invalid size unit: \"%s\"." +msgstr "Ogiltig storleksenhet: \"%s\"." + +#: utils/adt/dbsize.c:829 +#, c-format +msgid "Valid units are \"bytes\", \"kB\", \"MB\", \"GB\", and \"TB\"." +msgstr "Giltiga enheter är \"bytes\", \"kB\", \"MB\", \"GB\" och \"TB\"." + +#: utils/adt/domains.c:92 +#, c-format +msgid "type %s is not a domain" +msgstr "typen %s är inte en domän" + +#: utils/adt/encode.c:55 utils/adt/encode.c:91 +#, c-format +msgid "unrecognized encoding: \"%s\"" +msgstr "okänd kodning: \"%s\"" + +#: utils/adt/encode.c:150 +#, c-format +msgid "invalid hexadecimal digit: \"%c\"" +msgstr "ogiltigt hexdecimal siffra: \"%c\"" + +#: utils/adt/encode.c:178 +#, c-format +msgid "invalid hexadecimal data: odd number of digits" +msgstr "ogiltig hexadecimal data: udda antal siffror" + +#: utils/adt/encode.c:295 +#, c-format +msgid "unexpected \"=\" while decoding base64 sequence" +msgstr "oväntat \"=\" vid avkodning av base64-sekvens" + +#: utils/adt/encode.c:307 +#, c-format +msgid "invalid symbol \"%c\" while decoding base64 sequence" +msgstr "ogiltig symbol \"%c\" vid avkodning av base64-sekvens" + +#: utils/adt/encode.c:327 +#, c-format +msgid "invalid base64 end sequence" +msgstr "ogiltig base64-slutsekvens" + +#: utils/adt/encode.c:328 +#, c-format +msgid "Input data is missing padding, is truncated, or is otherwise corrupted." +msgstr "Indata saknar paddning, är trunkerad eller är trasig på annat sätt." + +#: utils/adt/encode.c:442 utils/adt/encode.c:507 utils/adt/json.c:786 +#: utils/adt/json.c:826 utils/adt/json.c:842 utils/adt/json.c:854 +#: utils/adt/json.c:864 utils/adt/json.c:915 utils/adt/json.c:947 +#: utils/adt/json.c:966 utils/adt/json.c:978 utils/adt/json.c:990 +#: utils/adt/json.c:1135 utils/adt/json.c:1149 utils/adt/json.c:1160 +#: utils/adt/json.c:1168 utils/adt/json.c:1176 utils/adt/json.c:1184 +#: utils/adt/json.c:1192 utils/adt/json.c:1200 utils/adt/json.c:1208 +#: utils/adt/json.c:1216 utils/adt/json.c:1246 utils/adt/varlena.c:296 +#: utils/adt/varlena.c:337 +#, c-format +msgid "invalid input syntax for type %s" +msgstr "ogiltig indatassyntax för typen %s" + +#: utils/adt/enum.c:48 utils/adt/enum.c:58 utils/adt/enum.c:113 +#: utils/adt/enum.c:123 +#, c-format +msgid "invalid input value for enum %s: \"%s\"" +msgstr "ogiltigt indata-värde för enum %s: \"%s\"" + +#: utils/adt/enum.c:85 utils/adt/enum.c:148 utils/adt/enum.c:207 +#, c-format +msgid "invalid internal value for enum: %u" +msgstr "ogiltigt internt värde för enum: %u" + +#: utils/adt/enum.c:360 utils/adt/enum.c:389 utils/adt/enum.c:429 +#: utils/adt/enum.c:449 +#, c-format +msgid "could not determine actual enum type" +msgstr "kunde inte bestämma den verkliga enum-typen" + +#: utils/adt/enum.c:368 utils/adt/enum.c:397 +#, c-format +msgid "enum %s contains no values" +msgstr "enum %s innehåller inga värden" + +#: utils/adt/expandedrecord.c:98 utils/adt/expandedrecord.c:230 +#: utils/cache/typcache.c:1563 utils/cache/typcache.c:1719 +#: utils/cache/typcache.c:1849 utils/fmgr/funcapi.c:430 +#, c-format +msgid "type %s is not composite" +msgstr "typen %s är inte composite" + +#: utils/adt/float.c:55 +#, c-format +msgid "value out of range: overflow" +msgstr "värde utanför giltigt intervall: overflow" + +#: utils/adt/float.c:60 +#, c-format +msgid "value out of range: underflow" +msgstr "värde utanför giltigt intervall: underflow" + +#: utils/adt/float.c:309 +#, c-format +msgid "\"%s\" is out of range for type real" +msgstr "\"%s\" är utanför giltigt intervall för typen real" + +#: utils/adt/float.c:534 +#, c-format +msgid "\"%s\" is out of range for type double precision" +msgstr "\"%s\" är utanför giltigt intervall för typen double precision" + +#: utils/adt/float.c:1381 utils/adt/float.c:1439 utils/adt/int.c:332 +#: utils/adt/int.c:870 utils/adt/int.c:892 utils/adt/int.c:906 +#: utils/adt/int.c:920 utils/adt/int.c:952 utils/adt/int.c:1190 +#: utils/adt/int8.c:1185 utils/adt/numeric.c:3214 utils/adt/numeric.c:3223 +#, c-format +msgid "smallint out of range" +msgstr "smallint utanför sitt intervall" + +#: utils/adt/float.c:1565 utils/adt/numeric.c:7802 +#, c-format +msgid "cannot take square root of a negative number" +msgstr "kan inte ta kvadratroten av ett negativt tal" + +#: utils/adt/float.c:1626 utils/adt/numeric.c:3017 +#, c-format +msgid "zero raised to a negative power is undefined" +msgstr "noll upphöjt med ett negativt tal är odefinierat" + +#: utils/adt/float.c:1630 utils/adt/numeric.c:3023 +#, c-format +msgid "a negative number raised to a non-integer power yields a complex result" +msgstr "ett negativt tal upphöjt i en icke-negativ potens ger ett komplext resultat" + +#: utils/adt/float.c:1696 utils/adt/float.c:1726 utils/adt/numeric.c:8068 +#, c-format +msgid "cannot take logarithm of zero" +msgstr "kan inte ta logartimen av noll" + +#: utils/adt/float.c:1700 utils/adt/float.c:1730 utils/adt/numeric.c:8072 +#, c-format +msgid "cannot take logarithm of a negative number" +msgstr "kan inte ta logaritmen av ett negativt tal" + +#: utils/adt/float.c:1760 utils/adt/float.c:1790 utils/adt/float.c:1882 +#: utils/adt/float.c:1908 utils/adt/float.c:1935 utils/adt/float.c:1961 +#: utils/adt/float.c:2108 utils/adt/float.c:2143 utils/adt/float.c:2307 +#: utils/adt/float.c:2361 utils/adt/float.c:2425 utils/adt/float.c:2480 +#, c-format +msgid "input is out of range" +msgstr "indata är utanför giltigt intervall" + +#: utils/adt/float.c:3686 utils/adt/numeric.c:1504 +#, c-format +msgid "count must be greater than zero" +msgstr "antal måste vara större än noll" + +#: utils/adt/float.c:3691 utils/adt/numeric.c:1511 +#, c-format +msgid "operand, lower bound, and upper bound cannot be NaN" +msgstr "operand, lägre gräns och övre gräns kan inte vara NaN" + +#: utils/adt/float.c:3697 +#, c-format +msgid "lower and upper bounds must be finite" +msgstr "lägre och övre gräns måste vara ändliga" + +#: utils/adt/float.c:3731 utils/adt/numeric.c:1524 +#, c-format +msgid "lower bound cannot equal upper bound" +msgstr "lägre gräns kan inte vara samma som övre gräns" + +#: utils/adt/formatting.c:488 +#, c-format +msgid "invalid format specification for an interval value" +msgstr "ogiltig formatspecifikation för ett intervallvärdei" + +#: utils/adt/formatting.c:489 +#, c-format +msgid "Intervals are not tied to specific calendar dates." +msgstr "Intervaller är inte kopplade till specifika kalenderdatum." + +#: utils/adt/formatting.c:1059 +#, c-format +msgid "\"EEEE\" must be the last pattern used" +msgstr "\"EEEE\" måste vara det sista mönstret som används" + +#: utils/adt/formatting.c:1067 +#, c-format +msgid "\"9\" must be ahead of \"PR\"" +msgstr "\"9\" måste vara före \"PR\"" + +#: utils/adt/formatting.c:1083 +#, c-format +msgid "\"0\" must be ahead of \"PR\"" +msgstr "\"0\" måste vara före \"PR\"" + +#: utils/adt/formatting.c:1110 +#, c-format +msgid "multiple decimal points" +msgstr "multipla decimalpunkter" + +#: utils/adt/formatting.c:1114 utils/adt/formatting.c:1197 +#, c-format +msgid "cannot use \"V\" and decimal point together" +msgstr "kan inte använda \"V\" ach decimalpunkt tillsammans" + +#: utils/adt/formatting.c:1126 +#, c-format +msgid "cannot use \"S\" twice" +msgstr "kan inte använda \"S\" två gånger" + +#: utils/adt/formatting.c:1130 +#, c-format +msgid "cannot use \"S\" and \"PL\"/\"MI\"/\"SG\"/\"PR\" together" +msgstr "kan inte använda \"S\" och \"PL\"/\"MI\"/\"SG\"/\"PR\" tillsammans" + +#: utils/adt/formatting.c:1150 +#, c-format +msgid "cannot use \"S\" and \"MI\" together" +msgstr "kan inte använda \"S\" och \"MI\" tillsammans." + +#: utils/adt/formatting.c:1160 +#, c-format +msgid "cannot use \"S\" and \"PL\" together" +msgstr "kan inte använda \"S\" och \"PL\" tillsammans." + +#: utils/adt/formatting.c:1170 +#, c-format +msgid "cannot use \"S\" and \"SG\" together" +msgstr "kan inte använda \"S\" och \"SG\" tillsammans." + +#: utils/adt/formatting.c:1179 +#, c-format +msgid "cannot use \"PR\" and \"S\"/\"PL\"/\"MI\"/\"SG\" together" +msgstr "kan inte använda \"PR\" och \"S\"/\"PL\"/\"MI\"/\"SG\" tillsammans." + +#: utils/adt/formatting.c:1205 +#, c-format +msgid "cannot use \"EEEE\" twice" +msgstr "kan inte använda \"EEEE\" två gånger" + +#: utils/adt/formatting.c:1211 +#, c-format +msgid "\"EEEE\" is incompatible with other formats" +msgstr "\"EEEE\" är inkompatibel med andra format" + +#: utils/adt/formatting.c:1212 +#, c-format +msgid "\"EEEE\" may only be used together with digit and decimal point patterns." +msgstr "\"EEEE\" får bara användas tillsammans med siffror- och decimalpunkts-mönster." + +#: utils/adt/formatting.c:1392 +#, c-format +msgid "\"%s\" is not a number" +msgstr "\"%s\" är inte ett nummer" + +#: utils/adt/formatting.c:1470 +#, c-format +msgid "case conversion failed: %s" +msgstr "case-konvertering misslyckades: %s" + +#: utils/adt/formatting.c:1535 +#, c-format +msgid "could not determine which collation to use for lower() function" +msgstr "kunde inte bestämma jämförelse (collation) för funktionen lower()" + +#: utils/adt/formatting.c:1657 +#, c-format +msgid "could not determine which collation to use for upper() function" +msgstr "kunde inte bestämma jämförelse (collation) för funktionen upper()" + +#: utils/adt/formatting.c:1780 +#, c-format +msgid "could not determine which collation to use for initcap() function" +msgstr "kunde inte bestämma jämförelse (collation) för funktionen initcap()" + +#: utils/adt/formatting.c:2148 +#, c-format +msgid "invalid combination of date conventions" +msgstr "ogiltig kombination av datumkonventioner" + +#: utils/adt/formatting.c:2149 +#, c-format +msgid "Do not mix Gregorian and ISO week date conventions in a formatting template." +msgstr "Blanda inte datumkonventionerna Gregoriansk och ISO-veckor i formatteringsmall." + +#: utils/adt/formatting.c:2166 +#, c-format +msgid "conflicting values for \"%s\" field in formatting string" +msgstr "värden för \"%s\" i formatsträng står i konflikt med varandra" + +#: utils/adt/formatting.c:2168 +#, c-format +msgid "This value contradicts a previous setting for the same field type." +msgstr "Detta värde motsäger en tidigare inställning för samma fälttyp." + +#: utils/adt/formatting.c:2229 +#, c-format +msgid "source string too short for \"%s\" formatting field" +msgstr "källsträngen är för kort för formatfält \"%s\"" + +#: utils/adt/formatting.c:2231 +#, c-format +msgid "Field requires %d characters, but only %d remain." +msgstr "Fältet kräver %d tecken men bara %d återstår." + +#: utils/adt/formatting.c:2234 utils/adt/formatting.c:2248 +#, c-format +msgid "If your source string is not fixed-width, try using the \"FM\" modifier." +msgstr "Om din källsträng inte är av fast längd så testa med modifieraren \"FM\"." + +#: utils/adt/formatting.c:2244 utils/adt/formatting.c:2257 +#: utils/adt/formatting.c:2387 +#, c-format +msgid "invalid value \"%s\" for \"%s\"" +msgstr "ogiltigt värde \"%s\" för \"%s\"" + +#: utils/adt/formatting.c:2246 +#, c-format +msgid "Field requires %d characters, but only %d could be parsed." +msgstr "Fältet kräver %d tecken men bara %d kunde parsas." + +#: utils/adt/formatting.c:2259 +#, c-format +msgid "Value must be an integer." +msgstr "Värdet måste vara ett heltal." + +#: utils/adt/formatting.c:2264 +#, c-format +msgid "value for \"%s\" in source string is out of range" +msgstr "värdet för \"%s\" i källsträng är utanför giltigt intervall" + +#: utils/adt/formatting.c:2266 +#, c-format +msgid "Value must be in the range %d to %d." +msgstr "Värdet måste vara i intervallet %d till %d." + +#: utils/adt/formatting.c:2389 +#, c-format +msgid "The given value did not match any of the allowed values for this field." +msgstr "Det givna värdet matchar inget av de tillåtna värdena för detta fält." + +#: utils/adt/formatting.c:2587 utils/adt/formatting.c:2607 +#: utils/adt/formatting.c:2627 utils/adt/formatting.c:2647 +#: utils/adt/formatting.c:2666 utils/adt/formatting.c:2685 +#: utils/adt/formatting.c:2709 utils/adt/formatting.c:2727 +#: utils/adt/formatting.c:2745 utils/adt/formatting.c:2763 +#: utils/adt/formatting.c:2780 utils/adt/formatting.c:2797 +#, c-format +msgid "localized string format value too long" +msgstr "lokaliserat strängformatvärde är för långt" + +#: utils/adt/formatting.c:3084 +#, c-format +msgid "formatting field \"%s\" is only supported in to_char" +msgstr "formateringsfält \"%s\" stöds bara i to_char" + +#: utils/adt/formatting.c:3209 +#, c-format +msgid "invalid input string for \"Y,YYY\"" +msgstr "ogiltig indatasträng för \"Y,YYY\"" + +#: utils/adt/formatting.c:3724 +#, c-format +msgid "hour \"%d\" is invalid for the 12-hour clock" +msgstr "timmen \"%d\" är ogiltigt för en 12-timmars-klocka" + +#: utils/adt/formatting.c:3726 +#, c-format +msgid "Use the 24-hour clock, or give an hour between 1 and 12." +msgstr "Använd en 24-timmars-klocka eller ange en timme mellan 1 och 12." + +#: utils/adt/formatting.c:3832 +#, c-format +msgid "cannot calculate day of year without year information" +msgstr "kan inte beräkna dag på året utan årsinformation" + +#: utils/adt/formatting.c:4737 +#, c-format +msgid "\"EEEE\" not supported for input" +msgstr "\"EEEE\" stöds inte för indata" + +#: utils/adt/formatting.c:4749 +#, c-format +msgid "\"RN\" not supported for input" +msgstr "\"RN\" stöds inte för indata" + +#: utils/adt/genfile.c:79 +#, c-format +msgid "reference to parent directory (\"..\") not allowed" +msgstr "referens till föräldrakatalog (\"..\") tillåts inte" + +#: utils/adt/genfile.c:90 +#, c-format +msgid "absolute path not allowed" +msgstr "absolut sökväg tillåts inte" + +#: utils/adt/genfile.c:95 +#, c-format +msgid "path must be in or below the current directory" +msgstr "sökväg måste vara i eller under den aktuella katalogen" + +#: utils/adt/genfile.c:142 utils/adt/oracle_compat.c:185 +#: utils/adt/oracle_compat.c:283 utils/adt/oracle_compat.c:759 +#: utils/adt/oracle_compat.c:1054 +#, c-format +msgid "requested length too large" +msgstr "efterfrågad längd är för lång" + +#: utils/adt/genfile.c:159 +#, c-format +msgid "could not seek in file \"%s\": %m" +msgstr "kunde inte söka (seek) i fil \"%s\": %m" + +#: utils/adt/genfile.c:219 +#, c-format +msgid "must be superuser to read files with adminpack 1.0" +msgstr "måste vara superanvändare för att läsa filer med adminpack 1.0" + +#: utils/adt/genfile.c:220 +#, c-format +msgid "Consider using pg_file_read(), which is part of core, instead." +msgstr "Du kanske kan använda pg_file_read() istället som är en del av core." + +#: utils/adt/geo_ops.c:939 +#, c-format +msgid "invalid line specification: A and B cannot both be zero" +msgstr "ogiltig radangivelse: A och B kan inte båda vara noll" + +#: utils/adt/geo_ops.c:947 +#, c-format +msgid "invalid line specification: must be two distinct points" +msgstr "ogiltig linjeangivelse: måste vara två enskilda punkter" + +#: utils/adt/geo_ops.c:1341 utils/adt/geo_ops.c:3439 utils/adt/geo_ops.c:4252 +#: utils/adt/geo_ops.c:5180 +#, c-format +msgid "too many points requested" +msgstr "för många punkter efterfrågade" + +#: utils/adt/geo_ops.c:1403 +#, c-format +msgid "invalid number of points in external \"path\" value" +msgstr "ogiltigt antal punkter i externt \"path\"-värde" + +#: utils/adt/geo_ops.c:2554 +#, c-format +msgid "function \"dist_lb\" not implemented" +msgstr "funktionen \"dist_lb\" är inte implementerad" + +#: utils/adt/geo_ops.c:3014 +#, c-format +msgid "function \"close_sl\" not implemented" +msgstr "funktionen \"close_sl\" är inte implementerad" + +#: utils/adt/geo_ops.c:3116 +#, c-format +msgid "function \"close_lb\" not implemented" +msgstr "funktionen \"close_lb\" är inte implementerad" + +#: utils/adt/geo_ops.c:3405 +#, c-format +msgid "cannot create bounding box for empty polygon" +msgstr "kan inte skapa en omslutande box för en tom polygon" + +#: utils/adt/geo_ops.c:3486 +#, c-format +msgid "invalid number of points in external \"polygon\" value" +msgstr "ogiltigt antal punkter i ett externt \"polygon\"-värde" + +#: utils/adt/geo_ops.c:4011 +#, c-format +msgid "function \"poly_distance\" not implemented" +msgstr "funktionen \"poly_distance\" är inte implementerad" + +#: utils/adt/geo_ops.c:4364 +#, c-format +msgid "function \"path_center\" not implemented" +msgstr "funktionen \"path_center\" är inte implementerad" + +#: utils/adt/geo_ops.c:4381 +#, c-format +msgid "open path cannot be converted to polygon" +msgstr "öppen väg kan inte konverteras till en polygon" + +#: utils/adt/geo_ops.c:4630 +#, c-format +msgid "invalid radius in external \"circle\" value" +msgstr "ogiltig radie i ett externt cirkelvärde" + +#: utils/adt/geo_ops.c:5166 +#, c-format +msgid "cannot convert circle with radius zero to polygon" +msgstr "kan inte konvertera en cirkel med radie noll till en polygon" + +#: utils/adt/geo_ops.c:5171 +#, c-format +msgid "must request at least 2 points" +msgstr "måste efterfråga minst 2 punkter" + +#: utils/adt/geo_ops.c:5215 +#, c-format +msgid "cannot convert empty polygon to circle" +msgstr "kan inte konvertera en tom polygon till en cirkel" + +#: utils/adt/int.c:160 +#, c-format +msgid "int2vector has too many elements" +msgstr "int2vector har för många element" + +#: utils/adt/int.c:235 +#, c-format +msgid "invalid int2vector data" +msgstr "ogiltig int2vector-data" + +#: utils/adt/int.c:241 utils/adt/oid.c:215 utils/adt/oid.c:296 +#, c-format +msgid "oidvector has too many elements" +msgstr "oidvector har för många element" + +#: utils/adt/int.c:1379 utils/adt/int8.c:1309 utils/adt/numeric.c:1412 +#: utils/adt/timestamp.c:5318 utils/adt/timestamp.c:5399 +#, c-format +msgid "step size cannot equal zero" +msgstr "stegstorleken kan inte vara noll" + +#: utils/adt/int8.c:125 utils/adt/numutils.c:51 utils/adt/numutils.c:61 +#: utils/adt/numutils.c:105 +#, c-format +msgid "invalid input syntax for integer: \"%s\"" +msgstr "felaktig indatasyntax för heltal: \"%s\"" + +#: utils/adt/int8.c:526 utils/adt/int8.c:549 utils/adt/int8.c:563 +#: utils/adt/int8.c:577 utils/adt/int8.c:608 utils/adt/int8.c:632 +#: utils/adt/int8.c:687 utils/adt/int8.c:701 utils/adt/int8.c:725 +#: utils/adt/int8.c:738 utils/adt/int8.c:807 utils/adt/int8.c:821 +#: utils/adt/int8.c:835 utils/adt/int8.c:866 utils/adt/int8.c:888 +#: utils/adt/int8.c:902 utils/adt/int8.c:916 utils/adt/int8.c:949 +#: utils/adt/int8.c:963 utils/adt/int8.c:977 utils/adt/int8.c:1008 +#: utils/adt/int8.c:1030 utils/adt/int8.c:1044 utils/adt/int8.c:1058 +#: utils/adt/int8.c:1218 utils/adt/int8.c:1253 utils/adt/numeric.c:3169 +#: utils/adt/varbit.c:1655 +#, c-format +msgid "bigint out of range" +msgstr "bigint utanför sitt intervall" + +#: utils/adt/int8.c:1266 +#, c-format +msgid "OID out of range" +msgstr "OID utanför sitt intervall" + +#: utils/adt/json.c:787 +#, c-format +msgid "Character with value 0x%02x must be escaped." +msgstr "Tecken med värde 0x%02x måste escape:as." + +#: utils/adt/json.c:828 +#, c-format +msgid "\"\\u\" must be followed by four hexadecimal digits." +msgstr "\"\\u\" måste följas av fyra hexdecimala siffror." + +#: utils/adt/json.c:844 +#, c-format +msgid "Unicode high surrogate must not follow a high surrogate." +msgstr "Unicodes övre surrogathalva får inte komma efter en övre surrogathalva." + +#: utils/adt/json.c:855 utils/adt/json.c:865 utils/adt/json.c:917 +#: utils/adt/json.c:979 utils/adt/json.c:991 +#, c-format +msgid "Unicode low surrogate must follow a high surrogate." +msgstr "Unicodes lägre surrogathalva måste följa en övre surrogathalva." + +#: utils/adt/json.c:880 utils/adt/json.c:903 +#, c-format +msgid "unsupported Unicode escape sequence" +msgstr "Unicode escape-sekvens som inte stöds" + +#: utils/adt/json.c:881 +#, c-format +msgid "\\u0000 cannot be converted to text." +msgstr "\\u0000 kan inte konverteras till text." + +#: utils/adt/json.c:904 +#, c-format +msgid "Unicode escape values cannot be used for code point values above 007F when the server encoding is not UTF8." +msgstr "Escape-värden för unicode kan inte användas för kodpunkter med värde över 007F när serverns kodning inte är UTF8." + +#: utils/adt/json.c:949 utils/adt/json.c:967 +#, c-format +msgid "Escape sequence \"\\%s\" is invalid." +msgstr "Escape-sekvens \"\\%s\" är ogiltig." + +#: utils/adt/json.c:1136 +#, c-format +msgid "The input string ended unexpectedly." +msgstr "Indatasträngen avslutades oväntat." + +#: utils/adt/json.c:1150 +#, c-format +msgid "Expected end of input, but found \"%s\"." +msgstr "Förväntade slut på indata, men hittade \"%s\"." + +#: utils/adt/json.c:1161 +#, c-format +msgid "Expected JSON value, but found \"%s\"." +msgstr "Förväntade JSON-värde, men hittade \"%s\"." + +#: utils/adt/json.c:1169 utils/adt/json.c:1217 +#, c-format +msgid "Expected string, but found \"%s\"." +msgstr "Förväntade sträng, men hittade \"%s\"." + +#: utils/adt/json.c:1177 +#, c-format +msgid "Expected array element or \"]\", but found \"%s\"." +msgstr "Färväntade array-element eller \"]\", men hittade \"%s\"." + +#: utils/adt/json.c:1185 +#, c-format +msgid "Expected \",\" or \"]\", but found \"%s\"." +msgstr "Förväntade \",\" eller \"]\", men hittade \"%s\"." + +#: utils/adt/json.c:1193 +#, c-format +msgid "Expected string or \"}\", but found \"%s\"." +msgstr "Färväntade sträng eller \"}\", men hittade \"%s\"." + +#: utils/adt/json.c:1201 +#, c-format +msgid "Expected \":\", but found \"%s\"." +msgstr "Förväntade sig \":\" men hittade \"%s\"." + +#: utils/adt/json.c:1209 +#, c-format +msgid "Expected \",\" or \"}\", but found \"%s\"." +msgstr "Förväntade sig \",\" eller \"}\" men hittade \"%s\"." + +#: utils/adt/json.c:1247 +#, c-format +msgid "Token \"%s\" is invalid." +msgstr "Token \"%s\" är ogiltig." + +#: utils/adt/json.c:1319 +#, c-format +msgid "JSON data, line %d: %s%s%s" +msgstr "JSON-data, rad %d: %s%s%s" + +#: utils/adt/json.c:1475 utils/adt/jsonb.c:728 +#, c-format +msgid "key value must be scalar, not array, composite, or json" +msgstr "nyckelvärde måste vara skalär, inte array, composite eller json" + +#: utils/adt/json.c:2076 utils/adt/json.c:2086 utils/fmgr/funcapi.c:1564 +#, c-format +msgid "could not determine data type for argument %d" +msgstr "kunde inte lista ut datatypen för argument %d" + +#: utils/adt/json.c:2110 utils/adt/jsonb.c:1694 +#, c-format +msgid "field name must not be null" +msgstr "fältnamnet får inte vara null" + +#: utils/adt/json.c:2194 utils/adt/jsonb.c:1146 +#, c-format +msgid "argument list must have even number of elements" +msgstr "argumentlistan måste ha ett jämt antal element" + +#: utils/adt/json.c:2195 +#, c-format +msgid "The arguments of json_build_object() must consist of alternating keys and values." +msgstr "Argumenten till json_build_object() måste bestå av varannan nyckel och varannat värde." + +#: utils/adt/json.c:2210 +#, c-format +msgid "argument %d cannot be null" +msgstr "argument %d kan inte vara null" + +#: utils/adt/json.c:2211 +#, c-format +msgid "Object keys should be text." +msgstr "Objektnycklar skall vara text." + +#: utils/adt/json.c:2317 utils/adt/jsonb.c:1276 +#, c-format +msgid "array must have two columns" +msgstr "array:en måste ha två kolumner" + +#: utils/adt/json.c:2341 utils/adt/json.c:2425 utils/adt/jsonb.c:1300 +#: utils/adt/jsonb.c:1395 +#, c-format +msgid "null value not allowed for object key" +msgstr "null-värde tillåts inte som objektnyckel" + +#: utils/adt/json.c:2414 utils/adt/jsonb.c:1384 +#, c-format +msgid "mismatched array dimensions" +msgstr "array-dimensionerna stämmer inte" + +#: utils/adt/jsonb.c:258 +#, c-format +msgid "string too long to represent as jsonb string" +msgstr "strängen är för lång för att representeras som en jsonb-sträng" + +#: utils/adt/jsonb.c:259 +#, c-format +msgid "Due to an implementation restriction, jsonb strings cannot exceed %d bytes." +msgstr "På grund av en implementationsbegränsning så kan jsonb-strängar inte överstiga %d byte." + +#: utils/adt/jsonb.c:1147 +#, c-format +msgid "The arguments of jsonb_build_object() must consist of alternating keys and values." +msgstr "Argumenten till jsonb_build_object() måste bestå av varannan nyckel och varannat värde." + +#: utils/adt/jsonb.c:1159 +#, c-format +msgid "argument %d: key must not be null" +msgstr "argument %d: nyckeln får inte vara null" + +#: utils/adt/jsonb.c:1747 +#, c-format +msgid "object keys must be strings" +msgstr "objektnycklar måste vara strängar" + +#: utils/adt/jsonb.c:1910 +#, c-format +msgid "cannot cast jsonb null to type %s" +msgstr "kan inte typomvandla jsonb-null till type %s" + +#: utils/adt/jsonb.c:1911 +#, c-format +msgid "cannot cast jsonb string to type %s" +msgstr "kan inte typomvandla jsonb-sträng till typ %s" + +#: utils/adt/jsonb.c:1912 +#, c-format +msgid "cannot cast jsonb numeric to type %s" +msgstr "kan inte typomvandla jsonb-numeric till typ %s" + +#: utils/adt/jsonb.c:1913 +#, c-format +msgid "cannot cast jsonb boolean to type %s" +msgstr "kan inte typomvandla jsonb-boolean till typ %s" + +#: utils/adt/jsonb.c:1914 +#, c-format +msgid "cannot cast jsonb array to type %s" +msgstr "kan inte typomvandla jsonb-array till typ %s" + +#: utils/adt/jsonb.c:1915 +#, c-format +msgid "cannot cast jsonb object to type %s" +msgstr "kan inte typomvandla jsonb-objekt till typ %s" + +#: utils/adt/jsonb.c:1916 +#, c-format +msgid "cannot cast jsonb array or object to type %s" +msgstr "kan inte typomvandla jsonb-array eller objekt till typ %s" + +#: utils/adt/jsonb_util.c:657 +#, c-format +msgid "number of jsonb object pairs exceeds the maximum allowed (%zu)" +msgstr "antalet jsonb-objektpar överskrider det maximalt tillåtna (%zu)" + +#: utils/adt/jsonb_util.c:698 +#, c-format +msgid "number of jsonb array elements exceeds the maximum allowed (%zu)" +msgstr "antalet jsonb-array-element överskrider det maximalt tillåtna (%zu)" + +#: utils/adt/jsonb_util.c:1569 utils/adt/jsonb_util.c:1589 +#, c-format +msgid "total size of jsonb array elements exceeds the maximum of %u bytes" +msgstr "total storlek på elementen i jsonb-array överskrider maximala %u byte" + +#: utils/adt/jsonb_util.c:1650 utils/adt/jsonb_util.c:1685 +#: utils/adt/jsonb_util.c:1705 +#, c-format +msgid "total size of jsonb object elements exceeds the maximum of %u bytes" +msgstr "total storlek på element i jsonb-objekt överskrider maximum på %u byte" + +#: utils/adt/jsonfuncs.c:523 utils/adt/jsonfuncs.c:688 +#: utils/adt/jsonfuncs.c:2276 utils/adt/jsonfuncs.c:2712 +#: utils/adt/jsonfuncs.c:3468 utils/adt/jsonfuncs.c:3812 +#, c-format +msgid "cannot call %s on a scalar" +msgstr "kan inte anropa %s på en skalär" + +#: utils/adt/jsonfuncs.c:528 utils/adt/jsonfuncs.c:675 +#: utils/adt/jsonfuncs.c:2714 utils/adt/jsonfuncs.c:3457 +#, c-format +msgid "cannot call %s on an array" +msgstr "kan inte anropa %s på en array" + +#: utils/adt/jsonfuncs.c:1591 utils/adt/jsonfuncs.c:1626 +#, c-format +msgid "cannot get array length of a scalar" +msgstr "kan inte hämta array-längd på skalär" + +#: utils/adt/jsonfuncs.c:1595 utils/adt/jsonfuncs.c:1614 +#, c-format +msgid "cannot get array length of a non-array" +msgstr "kan inte hämta array-längd på icke-array" + +#: utils/adt/jsonfuncs.c:1691 +#, c-format +msgid "cannot call %s on a non-object" +msgstr "kan inte anropa %s på ett icke-objekt" + +#: utils/adt/jsonfuncs.c:1709 utils/adt/jsonfuncs.c:3261 +#: utils/adt/jsonfuncs.c:3612 +#, c-format +msgid "function returning record called in context that cannot accept type record" +msgstr "en funktion med post som värde anropades i sammanhang där poster inte kan godtagas." + +#: utils/adt/jsonfuncs.c:1949 +#, c-format +msgid "cannot deconstruct an array as an object" +msgstr "kan inte dekonstruera en array som ett objekt" + +#: utils/adt/jsonfuncs.c:1961 +#, c-format +msgid "cannot deconstruct a scalar" +msgstr "kan inte dekonstruera en skalär" + +#: utils/adt/jsonfuncs.c:2007 +#, c-format +msgid "cannot extract elements from a scalar" +msgstr "kan inte extrahera element från en skalär" + +#: utils/adt/jsonfuncs.c:2011 +#, c-format +msgid "cannot extract elements from an object" +msgstr "kan inte extrahera element från ett objekt" + +#: utils/adt/jsonfuncs.c:2263 utils/adt/jsonfuncs.c:3701 +#, c-format +msgid "cannot call %s on a non-array" +msgstr "kan inte anropa %s på icke-array" + +#: utils/adt/jsonfuncs.c:2329 utils/adt/jsonfuncs.c:2334 +#: utils/adt/jsonfuncs.c:2351 utils/adt/jsonfuncs.c:2357 +#, c-format +msgid "expected JSON array" +msgstr "förväntade JSON-array" + +#: utils/adt/jsonfuncs.c:2330 +#, c-format +msgid "See the value of key \"%s\"." +msgstr "Se värdetypen för nyckel \"%s\"" + +#: utils/adt/jsonfuncs.c:2352 +#, c-format +msgid "See the array element %s of key \"%s\"." +msgstr "Se array-element %s för nyckel \"%s\"." + +#: utils/adt/jsonfuncs.c:2358 +#, c-format +msgid "See the array element %s." +msgstr "Se array-element %s." + +#: utils/adt/jsonfuncs.c:2393 +#, c-format +msgid "malformed JSON array" +msgstr "felaktig JSON-array" + +#: utils/adt/jsonfuncs.c:3245 utils/adt/jsonfuncs.c:3597 +#, c-format +msgid "first argument of %s must be a row type" +msgstr "första argumentet till %s måste vara en radtyp" + +#: utils/adt/jsonfuncs.c:3263 utils/adt/jsonfuncs.c:3614 +#, c-format +msgid "Try calling the function in the FROM clause using a column definition list." +msgstr "Försök att anropa funktionen i FROM-klausulen med en kolumndefinitionslista." + +#: utils/adt/jsonfuncs.c:3718 utils/adt/jsonfuncs.c:3794 +#, c-format +msgid "argument of %s must be an array of objects" +msgstr "argumentet till %s måste vara en array med objekt" + +#: utils/adt/jsonfuncs.c:3746 +#, c-format +msgid "cannot call %s on an object" +msgstr "kan inte anropa %s på ett objekt" + +#: utils/adt/jsonfuncs.c:4223 utils/adt/jsonfuncs.c:4282 +#: utils/adt/jsonfuncs.c:4362 +#, c-format +msgid "cannot delete from scalar" +msgstr "kan inte radera från en skalär" + +#: utils/adt/jsonfuncs.c:4367 +#, c-format +msgid "cannot delete from object using integer index" +msgstr "kan inte radera från objekt genom att använda heltalsindex" + +#: utils/adt/jsonfuncs.c:4433 utils/adt/jsonfuncs.c:4525 +#, c-format +msgid "cannot set path in scalar" +msgstr "kan inte sätta sökväg i skalär" + +#: utils/adt/jsonfuncs.c:4478 +#, c-format +msgid "cannot delete path in scalar" +msgstr "kan inte radera sökväg i skalär" + +#: utils/adt/jsonfuncs.c:4648 +#, c-format +msgid "invalid concatenation of jsonb objects" +msgstr "ogiltig sammanslagning av jsonb-objekt" + +#: utils/adt/jsonfuncs.c:4682 +#, c-format +msgid "path element at position %d is null" +msgstr "sökvägselement vid position %d är null" + +#: utils/adt/jsonfuncs.c:4768 +#, c-format +msgid "cannot replace existing key" +msgstr "kan inte ersätta befintlig nyckel" + +#: utils/adt/jsonfuncs.c:4769 +#, c-format +msgid "Try using the function jsonb_set to replace key value." +msgstr "Försök använda funktionen jsonb_set för att ersätta nyckelvärde." + +#: utils/adt/jsonfuncs.c:4851 +#, c-format +msgid "path element at position %d is not an integer: \"%s\"" +msgstr "sökvägselement vid position %d är inte ett heltal: \"%s\"" + +#: utils/adt/jsonfuncs.c:4970 +#, c-format +msgid "wrong flag type, only arrays and scalars are allowed" +msgstr "fel flaggtyp, bara array:er och skalärer tillåts" + +#: utils/adt/jsonfuncs.c:4977 +#, c-format +msgid "flag array element is not a string" +msgstr "flaggelement i arrayen är inte en sträng" + +#: utils/adt/jsonfuncs.c:4978 utils/adt/jsonfuncs.c:5000 +#, c-format +msgid "Possible values are: \"string\", \"numeric\", \"boolean\", \"key\", and \"all\"" +msgstr "Möjliga värden är: \"string\", \"numeric\", \"boolean\", \"key\" samt \"all\"" + +#: utils/adt/jsonfuncs.c:4998 +#, c-format +msgid "wrong flag in flag array: \"%s\"" +msgstr "fel flagga i flagg-array: \"%s\"" + +#: utils/adt/levenshtein.c:133 +#, c-format +msgid "levenshtein argument exceeds maximum length of %d characters" +msgstr "levenshtein-argument överskrider maximala längden på %d tecken" + +#: utils/adt/like.c:183 utils/adt/selfuncs.c:5806 +#, c-format +msgid "could not determine which collation to use for ILIKE" +msgstr "kunde inte bestämma vilken jämförelse (collation) som skall användas för ILIKE" + +#: utils/adt/like_match.c:107 utils/adt/like_match.c:167 +#, c-format +msgid "LIKE pattern must not end with escape character" +msgstr "LIKE-mönster för inte sluta med ett escape-tecken" + +#: utils/adt/like_match.c:292 utils/adt/regexp.c:698 +#, c-format +msgid "invalid escape string" +msgstr "ogiltig escape-sträng" + +#: utils/adt/like_match.c:293 utils/adt/regexp.c:699 +#, c-format +msgid "Escape string must be empty or one character." +msgstr "Escape-sträng måste vara tom eller ett tecken." + +#: utils/adt/lockfuncs.c:664 +#, c-format +msgid "cannot use advisory locks during a parallel operation" +msgstr "kan inte använda rådgivande lås vid en parallell operation" + +#: utils/adt/mac.c:102 +#, c-format +msgid "invalid octet value in \"macaddr\" value: \"%s\"" +msgstr "ogiltigt oktet-värde i \"macaddr\"-värde: \"%s\"" + +#: utils/adt/mac8.c:563 +#, c-format +msgid "macaddr8 data out of range to convert to macaddr" +msgstr "macaddr8-data utanför giltigt intervall för att konverteras till macaddr" + +#: utils/adt/mac8.c:564 +#, c-format +msgid "Only addresses that have FF and FE as values in the 4th and 5th bytes from the left, for example xx:xx:xx:ff:fe:xx:xx:xx, are eligible to be converted from macaddr8 to macaddr." +msgstr "Bara adresser som har FF och FE som värden i 4:e och 5:e byten från vänster, till exempel xx:xx:xx:ff:fe:xx:xx:xx, är möjliga att konvertera från macaddr8 till macaddr." + +#: utils/adt/misc.c:239 +#, c-format +msgid "PID %d is not a PostgreSQL server process" +msgstr "PID %d är inte en PostgreSQL serverprocess" + +#: utils/adt/misc.c:290 +#, c-format +msgid "must be a superuser to cancel superuser query" +msgstr "måste vara superanvändare för att avbryta superanvändares fråga" + +#: utils/adt/misc.c:295 +#, c-format +msgid "must be a member of the role whose query is being canceled or member of pg_signal_backend" +msgstr "måste vara medlem i den roll vars fråga håller på att avbrytas eller medlem i pg_signal_backend" + +#: utils/adt/misc.c:314 +#, c-format +msgid "must be a superuser to terminate superuser process" +msgstr "måste vara superanvändare för stoppa superanvändares process" + +#: utils/adt/misc.c:319 +#, c-format +msgid "must be a member of the role whose process is being terminated or member of pg_signal_backend" +msgstr "måste vara medlem i den roll vars process håller på att avslutas eller medlem i pg_signal_backend" + +#: utils/adt/misc.c:336 +#, c-format +msgid "failed to send signal to postmaster: %m" +msgstr "misslyckades med att sända en signal till postmaster: %m" + +#: utils/adt/misc.c:355 +#, c-format +msgid "must be superuser to rotate log files with adminpack 1.0" +msgstr "måste vara superanvändare för att rotera loggfiler med adminpack 1.0" + +#: utils/adt/misc.c:356 +#, c-format +msgid "Consider using pg_logfile_rotate(), which is part of core, instead." +msgstr "Överväg att använda pg_logfile_rotate() istället vilken är en del av \"core\"." + +#: utils/adt/misc.c:361 utils/adt/misc.c:381 +#, c-format +msgid "rotation not possible because log collection not active" +msgstr "rotering är inte möjligt då logginsamling inte är aktiverad" + +#: utils/adt/misc.c:418 +#, c-format +msgid "global tablespace never has databases" +msgstr "globala tablespace:t innehåller aldrig databaser" + +#: utils/adt/misc.c:439 +#, c-format +msgid "%u is not a tablespace OID" +msgstr "%u är inte ett tabelespace-OID" + +#: utils/adt/misc.c:626 +msgid "unreserved" +msgstr "oreserverad" + +#: utils/adt/misc.c:630 +msgid "unreserved (cannot be function or type name)" +msgstr "ej reserverad (kan inte vara funktion eller typnamn)" + +#: utils/adt/misc.c:634 +msgid "reserved (can be function or type name)" +msgstr "reserverad (kan vara funktion eller typnamn)" + +#: utils/adt/misc.c:638 +msgid "reserved" +msgstr "reserverad" + +#: utils/adt/misc.c:812 utils/adt/misc.c:826 utils/adt/misc.c:865 +#: utils/adt/misc.c:871 utils/adt/misc.c:877 utils/adt/misc.c:900 +#, c-format +msgid "string is not a valid identifier: \"%s\"" +msgstr "sträng är inte en giltig identifierare: \"%s\"" + +#: utils/adt/misc.c:814 +#, c-format +msgid "String has unclosed double quotes." +msgstr "Sträng har ej avslutade dubbla citattecken." + +#: utils/adt/misc.c:828 +#, c-format +msgid "Quoted identifier must not be empty." +msgstr "Citerad identifierare får inte vara tom." + +#: utils/adt/misc.c:867 +#, c-format +msgid "No valid identifier before \".\"." +msgstr "Ingen giltig indentifierare innan \".\"." + +#: utils/adt/misc.c:873 +#, c-format +msgid "No valid identifier after \".\"." +msgstr "Ingen giltig identifierare efter \".\"." + +#: utils/adt/misc.c:934 +#, c-format +msgid "log format \"%s\" is not supported" +msgstr "loggformat \"%s\" stöds inte" + +#: utils/adt/misc.c:935 +#, c-format +msgid "The supported log formats are \"stderr\" and \"csvlog\"." +msgstr "Loggformat som stöds är \"stderr\" och \"csvlog\"." + +#: utils/adt/nabstime.c:137 +#, c-format +msgid "invalid time zone name: \"%s\"" +msgstr "ogiltigt tidszon-namn: \"%s\"" + +#: utils/adt/nabstime.c:482 utils/adt/nabstime.c:555 +#, c-format +msgid "cannot convert abstime \"invalid\" to timestamp" +msgstr "kan inte konvertera abstime \"invalid\" till timestamp" + +#: utils/adt/nabstime.c:782 +#, c-format +msgid "invalid status in external \"tinterval\" value" +msgstr "ogitlig status i externt \"tinterval\"-värde" + +#: utils/adt/nabstime.c:852 +#, c-format +msgid "cannot convert reltime \"invalid\" to interval" +msgstr "kan inte konvertera reltime \"invalid\" till interval" + +#: utils/adt/network.c:69 +#, c-format +msgid "invalid cidr value: \"%s\"" +msgstr "ogiltigt cidr-värde: \"%s\"" + +#: utils/adt/network.c:70 utils/adt/network.c:200 +#, c-format +msgid "Value has bits set to right of mask." +msgstr "Värdet har bitar till höger om masken." + +#: utils/adt/network.c:111 utils/adt/network.c:592 utils/adt/network.c:617 +#: utils/adt/network.c:642 +#, c-format +msgid "could not format inet value: %m" +msgstr "kunde inte formattera inet-värde: %m" + +#. translator: %s is inet or cidr +#: utils/adt/network.c:168 +#, c-format +msgid "invalid address family in external \"%s\" value" +msgstr "ogiltig adressfamilj i externt \"%s\"-värde" + +#. translator: %s is inet or cidr +#: utils/adt/network.c:175 +#, c-format +msgid "invalid bits in external \"%s\" value" +msgstr "ogiltig bitar i externt \"%s\"-värde" + +#. translator: %s is inet or cidr +#: utils/adt/network.c:184 +#, c-format +msgid "invalid length in external \"%s\" value" +msgstr "ogiltig längd i extern \"%s\"-värde" + +#: utils/adt/network.c:199 +#, c-format +msgid "invalid external \"cidr\" value" +msgstr "ogiltigt externt \"cidr\"-värde" + +#: utils/adt/network.c:295 utils/adt/network.c:318 +#, c-format +msgid "invalid mask length: %d" +msgstr "ogiltig masklängd: %d" + +#: utils/adt/network.c:660 +#, c-format +msgid "could not format cidr value: %m" +msgstr "kunde inte formattera \"cidr\"-värde: %m" + +#: utils/adt/network.c:893 +#, c-format +msgid "cannot merge addresses from different families" +msgstr "kan inte slå samman adresser från olika familjer" + +#: utils/adt/network.c:1309 +#, c-format +msgid "cannot AND inet values of different sizes" +msgstr "kan inte AND:a inet-värden av olika storlek" + +#: utils/adt/network.c:1341 +#, c-format +msgid "cannot OR inet values of different sizes" +msgstr "kan inte OR:a inet-värden av olika storlek" + +#: utils/adt/network.c:1402 utils/adt/network.c:1478 +#, c-format +msgid "result is out of range" +msgstr "resultatet är utanför giltigt intervall" + +#: utils/adt/network.c:1443 +#, c-format +msgid "cannot subtract inet values of different sizes" +msgstr "kan inte subtrahera inet-värden av olika storlek" + +#: utils/adt/numeric.c:830 +#, c-format +msgid "invalid sign in external \"numeric\" value" +msgstr "ogiltigt tecken i externt \"numric\"-värde" + +#: utils/adt/numeric.c:836 +#, c-format +msgid "invalid scale in external \"numeric\" value" +msgstr "ogiltig skala i externt \"numeric\"-värde" + +#: utils/adt/numeric.c:845 +#, c-format +msgid "invalid digit in external \"numeric\" value" +msgstr "felaktig siffra i externt numeriskt (\"numeric\") värde " + +#: utils/adt/numeric.c:1035 utils/adt/numeric.c:1049 +#, c-format +msgid "NUMERIC precision %d must be between 1 and %d" +msgstr "Precisionen %d för NUMERIC måste vara mellan 1 och %d" + +#: utils/adt/numeric.c:1040 +#, c-format +msgid "NUMERIC scale %d must be between 0 and precision %d" +msgstr "Skalan %d för NUMERIC måste vara mellan 0 och precisionen %d" + +#: utils/adt/numeric.c:1058 +#, c-format +msgid "invalid NUMERIC type modifier" +msgstr "ogiltig typmodifierare för NUMERIC" + +#: utils/adt/numeric.c:1390 +#, c-format +msgid "start value cannot be NaN" +msgstr "startvärde får inte vara NaN" + +#: utils/adt/numeric.c:1395 +#, c-format +msgid "stop value cannot be NaN" +msgstr "stoppvärde får inte vara NaN" + +#: utils/adt/numeric.c:1405 +#, c-format +msgid "step size cannot be NaN" +msgstr "stegstorlek får inte vara NaN" + +#: utils/adt/numeric.c:2736 utils/adt/numeric.c:5725 utils/adt/numeric.c:6170 +#: utils/adt/numeric.c:7878 utils/adt/numeric.c:8303 utils/adt/numeric.c:8417 +#: utils/adt/numeric.c:8490 +#, c-format +msgid "value overflows numeric format" +msgstr "overflow på värde i formatet numeric" + +#: utils/adt/numeric.c:3095 +#, c-format +msgid "cannot convert NaN to integer" +msgstr "kan inte konvertera NaN till ett integer" + +#: utils/adt/numeric.c:3161 +#, c-format +msgid "cannot convert NaN to bigint" +msgstr "kan inte konvertera NaN till ett bigint" + +#: utils/adt/numeric.c:3206 +#, c-format +msgid "cannot convert NaN to smallint" +msgstr "kan inte konvertera NaN till ett smallint" + +#: utils/adt/numeric.c:3243 utils/adt/numeric.c:3314 +#, c-format +msgid "cannot convert infinity to numeric" +msgstr "kan inte konvertera oändlighet till numeric" + +#: utils/adt/numeric.c:6240 +#, c-format +msgid "numeric field overflow" +msgstr "overflow i numeric-fält" + +#: utils/adt/numeric.c:6241 +#, c-format +msgid "A field with precision %d, scale %d must round to an absolute value less than %s%d." +msgstr "Ett fält med precision %d, skala %d måste avrundas till ett absolut värde mindre än %s%d." + +#: utils/adt/numutils.c:89 +#, c-format +msgid "value \"%s\" is out of range for 8-bit integer" +msgstr "värdet \"%s\" är utanför intervallet för ett 8-bitars heltal" + +#: utils/adt/oid.c:290 +#, c-format +msgid "invalid oidvector data" +msgstr "ogiltig oidvector-data" + +#: utils/adt/oracle_compat.c:896 +#, c-format +msgid "requested character too large" +msgstr "efterfrågat tecken är för stort" + +#: utils/adt/oracle_compat.c:946 utils/adt/oracle_compat.c:1008 +#, c-format +msgid "requested character too large for encoding: %d" +msgstr "efterfrågat tecken är för stort för kodning: %d" + +#: utils/adt/oracle_compat.c:987 +#, c-format +msgid "requested character not valid for encoding: %d" +msgstr "efterfrågat tecken är inte giltigt för kodning: %d" + +#: utils/adt/oracle_compat.c:1001 +#, c-format +msgid "null character not permitted" +msgstr "nolltecken tillåts inte" + +#: utils/adt/orderedsetaggs.c:442 utils/adt/orderedsetaggs.c:546 +#: utils/adt/orderedsetaggs.c:684 +#, c-format +msgid "percentile value %g is not between 0 and 1" +msgstr "percentil-värde %g är inte mellan 0 och 1" + +#: utils/adt/pg_locale.c:1034 +#, c-format +msgid "Apply system library package updates." +msgstr "Applicera paketuppdateringar för systembibliotek." + +#: utils/adt/pg_locale.c:1249 +#, c-format +msgid "could not create locale \"%s\": %m" +msgstr "kunde inte skapa locale \"%s\": %m" + +#: utils/adt/pg_locale.c:1252 +#, c-format +msgid "The operating system could not find any locale data for the locale name \"%s\"." +msgstr "Operativsystemet kunde inte hitta någon lokaldata för lokalnamnet \"%s\"." + +#: utils/adt/pg_locale.c:1353 +#, c-format +msgid "collations with different collate and ctype values are not supported on this platform" +msgstr "jämförelser (collations) med olika collate- och ctype-värden stöds inte på denna plattform" + +#: utils/adt/pg_locale.c:1362 +#, c-format +msgid "collation provider LIBC is not supported on this platform" +msgstr "leverantören LIBC för jämförelse (collation) stöds inte på denna plattform" + +#: utils/adt/pg_locale.c:1374 +#, c-format +msgid "collations with different collate and ctype values are not supported by ICU" +msgstr "jämförelser (collation) med olika collate- och ctype-värden stöds inte av ICU" + +#: utils/adt/pg_locale.c:1380 utils/adt/pg_locale.c:1468 +#, c-format +msgid "could not open collator for locale \"%s\": %s" +msgstr "kunde inte öppna jämförelse för lokal \"%s\": %s" + +#: utils/adt/pg_locale.c:1391 +#, c-format +msgid "ICU is not supported in this build" +msgstr "ICU stöds inte av detta bygge" + +#: utils/adt/pg_locale.c:1392 +#, c-format +msgid "You need to rebuild PostgreSQL using --with-icu." +msgstr "Du behöver bygga om PostgreSQL med --with-icu." + +#: utils/adt/pg_locale.c:1412 +#, c-format +msgid "collation \"%s\" has no actual version, but a version was specified" +msgstr "jämförelse (collation) \"%s\" har ingen version men en version angavs" + +#: utils/adt/pg_locale.c:1419 +#, c-format +msgid "collation \"%s\" has version mismatch" +msgstr "jämförelse (collation) \"%s\" har en version som inte matchar" + +#: utils/adt/pg_locale.c:1421 +#, c-format +msgid "The collation in the database was created using version %s, but the operating system provides version %s." +msgstr "Jämförelsen (collation) i databasen har skapats med version %s men operativsystemet har version %s." + +#: utils/adt/pg_locale.c:1424 +#, c-format +msgid "Rebuild all objects affected by this collation and run ALTER COLLATION %s REFRESH VERSION, or build PostgreSQL with the right library version." +msgstr "Bygg om alla objekt som påverkas av denna jämförelse (collation) och kör ALTER COLLATION %s REFRESH VERSION eller bygg PostgreSQL med rätt bibliotekversion." + +#: utils/adt/pg_locale.c:1508 +#, c-format +msgid "could not open ICU converter for encoding \"%s\": %s" +msgstr "kunde inte öppna ICU-konverterare för kodning \"%s\": %s" + +#: utils/adt/pg_locale.c:1539 utils/adt/pg_locale.c:1548 +#, c-format +msgid "ucnv_toUChars failed: %s" +msgstr "ucnv_toUChars misslyckades: %s" + +#: utils/adt/pg_locale.c:1577 utils/adt/pg_locale.c:1586 +#, c-format +msgid "ucnv_fromUChars failed: %s" +msgstr "ucnv_fromUChars misslyckades: %s" + +#: utils/adt/pg_locale.c:1758 +#, c-format +msgid "invalid multibyte character for locale" +msgstr "ogiltigt multibyte-tecken för lokalen" + +#: utils/adt/pg_locale.c:1759 +#, c-format +msgid "The server's LC_CTYPE locale is probably incompatible with the database encoding." +msgstr "Serverns LC_CTYPE-lokal är troligen inkompatibel med databasens teckenkodning." + +#: utils/adt/pg_upgrade_support.c:28 +#, c-format +msgid "function can only be called when server is in binary upgrade mode" +msgstr "funktionen kan bara anropas när servern är i binärt uppgraderingsläge" + +#: utils/adt/pgstatfuncs.c:474 +#, c-format +msgid "invalid command name: \"%s\"" +msgstr "ogiltigt kommandonamn: \"%s\"" + +#: utils/adt/pseudotypes.c:247 +#, c-format +msgid "cannot accept a value of a shell type" +msgstr "kan inte acceptera ett värde av typen shell" + +#: utils/adt/pseudotypes.c:260 +#, c-format +msgid "cannot display a value of a shell type" +msgstr "kan inte visa ett värde av typen shell" + +#: utils/adt/pseudotypes.c:350 utils/adt/pseudotypes.c:376 +#, c-format +msgid "cannot output a value of type %s" +msgstr "kan inte mata ut ett värde av typ %s" + +#: utils/adt/pseudotypes.c:403 +#, c-format +msgid "cannot display a value of type %s" +msgstr "kan inte visa ett värde av typ %s" + +#: utils/adt/rangetypes.c:405 +#, c-format +msgid "range constructor flags argument must not be null" +msgstr "konstruktorflaggargument till range får inte vara null" + +#: utils/adt/rangetypes.c:992 +#, c-format +msgid "result of range difference would not be contiguous" +msgstr "resultatet av range-skillnad skulle inte vara angränsande" + +#: utils/adt/rangetypes.c:1053 +#, c-format +msgid "result of range union would not be contiguous" +msgstr "resultatet av range-union skulle inte vara angränsande" + +#: utils/adt/rangetypes.c:1597 +#, c-format +msgid "range lower bound must be less than or equal to range upper bound" +msgstr "lägre gräns för range måste vara lägre eller lika med övre gräns för range" + +#: utils/adt/rangetypes.c:1980 utils/adt/rangetypes.c:1993 +#: utils/adt/rangetypes.c:2007 +#, c-format +msgid "invalid range bound flags" +msgstr "ogiltig gränsflagga för range" + +#: utils/adt/rangetypes.c:1981 utils/adt/rangetypes.c:1994 +#: utils/adt/rangetypes.c:2008 +#, c-format +msgid "Valid values are \"[]\", \"[)\", \"(]\", and \"()\"." +msgstr "Giltiga värden är \"[]\", \"[)\", \"(]\" och \"()\"." + +#: utils/adt/rangetypes.c:2073 utils/adt/rangetypes.c:2090 +#: utils/adt/rangetypes.c:2103 utils/adt/rangetypes.c:2121 +#: utils/adt/rangetypes.c:2132 utils/adt/rangetypes.c:2176 +#: utils/adt/rangetypes.c:2184 +#, c-format +msgid "malformed range literal: \"%s\"" +msgstr "trasig range-litteral: \"%s\"" + +#: utils/adt/rangetypes.c:2075 +#, c-format +msgid "Junk after \"empty\" key word." +msgstr "Skräp efter nyckelordet \"empty\"." + +#: utils/adt/rangetypes.c:2092 +#, c-format +msgid "Missing left parenthesis or bracket." +msgstr "Saknar vänster parentes eller hakparentes." + +#: utils/adt/rangetypes.c:2105 +#, c-format +msgid "Missing comma after lower bound." +msgstr "Saknar komma efter lägre gräns." + +#: utils/adt/rangetypes.c:2123 +#, c-format +msgid "Too many commas." +msgstr "För många komman." + +#: utils/adt/rangetypes.c:2134 +#, c-format +msgid "Junk after right parenthesis or bracket." +msgstr "Skräp efter höger parentes eller hakparentes." + +#: utils/adt/regexp.c:285 utils/adt/regexp.c:1344 utils/adt/varlena.c:3993 +#, c-format +msgid "regular expression failed: %s" +msgstr "reguljärt uttryck misslyckades: %s" + +#: utils/adt/regexp.c:422 +#, c-format +msgid "invalid regexp option: \"%c\"" +msgstr "ogiltig regexp-flagga: \"%c\"" + +#: utils/adt/regexp.c:862 +#, c-format +msgid "regexp_match does not support the global option" +msgstr "regexp_match stöder inte global-flaggan" + +#: utils/adt/regexp.c:863 +#, c-format +msgid "Use the regexp_matches function instead." +msgstr "Använd regexp_matches-funktionen istället." + +#: utils/adt/regexp.c:1163 +#, c-format +msgid "regexp_split_to_table does not support the global option" +msgstr "regexp_split_to_table stöder inte global-flaggan" + +#: utils/adt/regexp.c:1219 +#, c-format +msgid "regexp_split_to_array does not support the global option" +msgstr "regexp_split_to_array stöder inte global-flaggan" + +#: utils/adt/regproc.c:106 +#, c-format +msgid "more than one function named \"%s\"" +msgstr "mer än en funktion med namn %s" + +#: utils/adt/regproc.c:524 +#, c-format +msgid "more than one operator named %s" +msgstr "mer än en operator med namn %s" + +#: utils/adt/regproc.c:696 utils/adt/regproc.c:737 utils/adt/regproc.c:1865 +#: utils/adt/ruleutils.c:9065 utils/adt/ruleutils.c:9233 +#, c-format +msgid "too many arguments" +msgstr "för många argument" + +#: utils/adt/regproc.c:697 utils/adt/regproc.c:738 +#, c-format +msgid "Provide two argument types for operator." +msgstr "Ange två argumenttyper för operatorn." + +#: utils/adt/regproc.c:1449 utils/adt/regproc.c:1473 utils/adt/regproc.c:1574 +#: utils/adt/regproc.c:1598 utils/adt/regproc.c:1700 utils/adt/regproc.c:1705 +#: utils/adt/varlena.c:3246 utils/adt/varlena.c:3251 +#, c-format +msgid "invalid name syntax" +msgstr "ogiltig namnsyntax" + +#: utils/adt/regproc.c:1763 +#, c-format +msgid "expected a left parenthesis" +msgstr "förväntade en vänsterparentes" + +#: utils/adt/regproc.c:1779 +#, c-format +msgid "expected a right parenthesis" +msgstr "förväntade en högreparentes" + +#: utils/adt/regproc.c:1798 +#, c-format +msgid "expected a type name" +msgstr "förväntade ett typnamn" + +#: utils/adt/regproc.c:1830 +#, c-format +msgid "improper type name" +msgstr "olämpligt typnamn" + +#: utils/adt/ri_triggers.c:337 utils/adt/ri_triggers.c:2085 +#: utils/adt/ri_triggers.c:2842 +#, c-format +msgid "insert or update on table \"%s\" violates foreign key constraint \"%s\"" +msgstr "insert eller update på tabell \"%s\" bryter mot främmande nyckel-villkoret \"%s\"" + +#: utils/adt/ri_triggers.c:340 utils/adt/ri_triggers.c:2088 +#, c-format +msgid "MATCH FULL does not allow mixing of null and nonnull key values." +msgstr "MATCH FULL tillåter inte att man blandar null och icke-null-värden." + +#: utils/adt/ri_triggers.c:2273 +#, c-format +msgid "function \"%s\" must be fired for INSERT" +msgstr "funktionen \"%s\" måste köras för INSERT" + +#: utils/adt/ri_triggers.c:2279 +#, c-format +msgid "function \"%s\" must be fired for UPDATE" +msgstr "funktionen \"%s\" måste köras för UPDATE" + +#: utils/adt/ri_triggers.c:2285 +#, c-format +msgid "function \"%s\" must be fired for DELETE" +msgstr "funktionen \"%s\" måste köras för DELETE" + +#: utils/adt/ri_triggers.c:2308 +#, c-format +msgid "no pg_constraint entry for trigger \"%s\" on table \"%s\"" +msgstr "ingen pg_constraint-post för utlösare \"%s\" på tabell \"%s\"" + +#: utils/adt/ri_triggers.c:2310 +#, c-format +msgid "Remove this referential integrity trigger and its mates, then do ALTER TABLE ADD CONSTRAINT." +msgstr "Ta bort denna utlösare för referensiell integritet och dess kollegor, gör sen ALTER TABLE ADD CONSTRAINT." + +#: utils/adt/ri_triggers.c:2689 +#, c-format +msgid "referential integrity query on \"%s\" from constraint \"%s\" on \"%s\" gave unexpected result" +msgstr "referentiell integritetsfråga på \"%s\" från villkor \"%s\" på \"%s\" gav oväntat resultat" + +#: utils/adt/ri_triggers.c:2693 +#, c-format +msgid "This is most likely due to a rule having rewritten the query." +msgstr "Detta beror troligen på att en regel har skrivit om frågan." + +#: utils/adt/ri_triggers.c:2846 +#, c-format +msgid "Key (%s)=(%s) is not present in table \"%s\"." +msgstr "Nyckel (%s)=(%s) finns inte i tabellen \"%s\"." + +#: utils/adt/ri_triggers.c:2849 +#, c-format +msgid "Key is not present in table \"%s\"." +msgstr "Nyckeln finns inte i tabellen \"%s\"." + +#: utils/adt/ri_triggers.c:2855 +#, c-format +msgid "update or delete on table \"%s\" violates foreign key constraint \"%s\" on table \"%s\"" +msgstr "update eller delete på tabell \"%s\" bryter mot främmande nyckel-villkoret \"%s\" för tabell \"%s\"" + +#: utils/adt/ri_triggers.c:2860 +#, c-format +msgid "Key (%s)=(%s) is still referenced from table \"%s\"." +msgstr "Nyckeln (%s)=(%s) refereras fortfarande till från tabell \"%s\"." + +#: utils/adt/ri_triggers.c:2863 +#, c-format +msgid "Key is still referenced from table \"%s\"." +msgstr "Nyckel refereras fortfarande till från tabell \"%s\"." + +#: utils/adt/rowtypes.c:103 utils/adt/rowtypes.c:481 +#, c-format +msgid "input of anonymous composite types is not implemented" +msgstr "inläsning av annonym composite-typ är inte implementerat" + +#: utils/adt/rowtypes.c:155 utils/adt/rowtypes.c:184 utils/adt/rowtypes.c:207 +#: utils/adt/rowtypes.c:215 utils/adt/rowtypes.c:267 utils/adt/rowtypes.c:275 +#, c-format +msgid "malformed record literal: \"%s\"" +msgstr "felaktig postliteral: \"%s\"" + +#: utils/adt/rowtypes.c:156 +#, c-format +msgid "Missing left parenthesis." +msgstr "Saknar vänster parentes" + +#: utils/adt/rowtypes.c:185 +#, c-format +msgid "Too few columns." +msgstr "För få kolumner." + +#: utils/adt/rowtypes.c:268 +#, c-format +msgid "Too many columns." +msgstr "För många kolumner." + +#: utils/adt/rowtypes.c:276 +#, c-format +msgid "Junk after right parenthesis." +msgstr "Skräp efter höger parentes" + +#: utils/adt/rowtypes.c:530 +#, c-format +msgid "wrong number of columns: %d, expected %d" +msgstr "fel antal kolumner: %d, förväntade %d" + +#: utils/adt/rowtypes.c:558 +#, c-format +msgid "wrong data type: %u, expected %u" +msgstr "fel datatyp: %u, förväntade %u" + +#: utils/adt/rowtypes.c:619 +#, c-format +msgid "improper binary format in record column %d" +msgstr "felaktigt binärt format i postkolumn %d" + +#: utils/adt/rowtypes.c:910 utils/adt/rowtypes.c:1154 utils/adt/rowtypes.c:1413 +#: utils/adt/rowtypes.c:1657 +#, c-format +msgid "cannot compare dissimilar column types %s and %s at record column %d" +msgstr "kan inte jämföra olika kolumntyper %s och %s vid postkolumn %d" + +#: utils/adt/rowtypes.c:999 utils/adt/rowtypes.c:1225 utils/adt/rowtypes.c:1508 +#: utils/adt/rowtypes.c:1731 +#, c-format +msgid "cannot compare record types with different numbers of columns" +msgstr "kan inte jämföra record-typer med olika antal kolumner" + +#: utils/adt/ruleutils.c:4756 +#, c-format +msgid "rule \"%s\" has unsupported event type %d" +msgstr "regel \"%s\" har en icke stödd händelsetyp %d" + +#: utils/adt/selfuncs.c:5791 +#, c-format +msgid "case insensitive matching not supported on type bytea" +msgstr "matchning utan skiftlägeskänslighet stöds inte för typen bytea" + +#: utils/adt/selfuncs.c:5893 +#, c-format +msgid "regular-expression matching not supported on type bytea" +msgstr "matching med reguljär-uttryck stöds inte för typen bytea" + +#: utils/adt/timestamp.c:107 +#, c-format +msgid "TIMESTAMP(%d)%s precision must not be negative" +msgstr "prceision för TIMESTAMP(%d)%s kan inte vara negativ" + +#: utils/adt/timestamp.c:113 +#, c-format +msgid "TIMESTAMP(%d)%s precision reduced to maximum allowed, %d" +msgstr "precision för TIMESTAMP(%d)%s reducerad till högsta tillåtna, %d" + +#: utils/adt/timestamp.c:176 utils/adt/timestamp.c:416 +#, c-format +msgid "timestamp out of range: \"%s\"" +msgstr "timestamp utanför giltigt intervall: \"%s\"" + +#: utils/adt/timestamp.c:194 utils/adt/timestamp.c:434 +#: utils/adt/timestamp.c:941 +#, c-format +msgid "date/time value \"%s\" is no longer supported" +msgstr "datum/tid-värde \"%s\" stöds inte längre" + +#: utils/adt/timestamp.c:362 +#, c-format +msgid "timestamp(%d) precision must be between %d and %d" +msgstr "timestamp(%d)-precision måste vara mellan %d och %d" + +#: utils/adt/timestamp.c:484 +#, c-format +msgid "invalid input syntax for numeric time zone: \"%s\"" +msgstr "felaktig indatasyntax för numerisk tidszon: \"%s\"" + +#: utils/adt/timestamp.c:486 +#, c-format +msgid "Numeric time zones must have \"-\" or \"+\" as first character." +msgstr "Numeriska tidszoner måste ha \"-\" eller \"+\" som sitt första tecken." + +#: utils/adt/timestamp.c:499 +#, c-format +msgid "numeric time zone \"%s\" out of range" +msgstr "numerisk tidszon \"%s\" utanför giltigt intervall" + +#: utils/adt/timestamp.c:601 utils/adt/timestamp.c:611 +#: utils/adt/timestamp.c:619 +#, c-format +msgid "timestamp out of range: %d-%02d-%02d %d:%02d:%02g" +msgstr "timestamp utanför giltigt intervall: %d-%02d-%02d %d:%02d:%02g" + +#: utils/adt/timestamp.c:720 +#, c-format +msgid "timestamp cannot be NaN" +msgstr "timestamp kan inte vara NaN" + +#: utils/adt/timestamp.c:738 utils/adt/timestamp.c:750 +#, c-format +msgid "timestamp out of range: \"%g\"" +msgstr "timestamp utanför giltigt intervall: \"%g\"" + +#: utils/adt/timestamp.c:935 utils/adt/timestamp.c:1505 +#: utils/adt/timestamp.c:1918 utils/adt/timestamp.c:3013 +#: utils/adt/timestamp.c:3018 utils/adt/timestamp.c:3023 +#: utils/adt/timestamp.c:3073 utils/adt/timestamp.c:3080 +#: utils/adt/timestamp.c:3087 utils/adt/timestamp.c:3107 +#: utils/adt/timestamp.c:3114 utils/adt/timestamp.c:3121 +#: utils/adt/timestamp.c:3151 utils/adt/timestamp.c:3159 +#: utils/adt/timestamp.c:3203 utils/adt/timestamp.c:3630 +#: utils/adt/timestamp.c:3755 utils/adt/timestamp.c:4140 +#, c-format +msgid "interval out of range" +msgstr "interval utanför giltigt intervall" + +#: utils/adt/timestamp.c:1068 utils/adt/timestamp.c:1101 +#, c-format +msgid "invalid INTERVAL type modifier" +msgstr "ogitligt modifierare för typen INTERVAL" + +#: utils/adt/timestamp.c:1084 +#, c-format +msgid "INTERVAL(%d) precision must not be negative" +msgstr "INTERVAL(%d)-precision kan inte vara negativ" + +#: utils/adt/timestamp.c:1090 +#, c-format +msgid "INTERVAL(%d) precision reduced to maximum allowed, %d" +msgstr "INTERVAL(%d)-precision reducerad till maximalt tillåtna, %d" + +#: utils/adt/timestamp.c:1462 +#, c-format +msgid "interval(%d) precision must be between %d and %d" +msgstr "interval(%d)-precision måste vara mellan %d och %d" + +#: utils/adt/timestamp.c:2614 +#, c-format +msgid "cannot subtract infinite timestamps" +msgstr "kan inte subtrahera oändliga tider (timestamp)" + +#: utils/adt/timestamp.c:3883 utils/adt/timestamp.c:4400 +#: utils/adt/timestamp.c:4567 utils/adt/timestamp.c:4588 +#, c-format +msgid "timestamp units \"%s\" not supported" +msgstr "timestamp-enhet \"%s\" stöds inte" + +#: utils/adt/timestamp.c:3897 utils/adt/timestamp.c:4354 +#: utils/adt/timestamp.c:4598 +#, c-format +msgid "timestamp units \"%s\" not recognized" +msgstr "timestamp-enhet \"%s\" känns inte igen" + +#: utils/adt/timestamp.c:4029 utils/adt/timestamp.c:4395 +#: utils/adt/timestamp.c:4768 utils/adt/timestamp.c:4790 +#, c-format +msgid "timestamp with time zone units \"%s\" not supported" +msgstr "timestamp with time zone, enhet \"%s\" stöds inte" + +#: utils/adt/timestamp.c:4046 utils/adt/timestamp.c:4349 +#: utils/adt/timestamp.c:4799 +#, c-format +msgid "timestamp with time zone units \"%s\" not recognized" +msgstr "timestamp with time zone, enhet \"%s\" känns inte igen" + +#: utils/adt/timestamp.c:4127 +#, c-format +msgid "interval units \"%s\" not supported because months usually have fractional weeks" +msgstr "intervallenhet \"%s\" stöds inte då månader typiskt har veckor på bråkform" + +#: utils/adt/timestamp.c:4133 utils/adt/timestamp.c:4893 +#, c-format +msgid "interval units \"%s\" not supported" +msgstr "intervallenhet \"%s\" stöds inte" + +#: utils/adt/timestamp.c:4149 utils/adt/timestamp.c:4916 +#, c-format +msgid "interval units \"%s\" not recognized" +msgstr "intervallenhet \"%s\" känns inte igen" + +#: utils/adt/trigfuncs.c:42 +#, c-format +msgid "suppress_redundant_updates_trigger: must be called as trigger" +msgstr "suppress_redundant_updates_trigger: måste anropas som utlösare" + +#: utils/adt/trigfuncs.c:48 +#, c-format +msgid "suppress_redundant_updates_trigger: must be called on update" +msgstr "suppress_redundant_updates_trigger: måste anropas vid update" + +#: utils/adt/trigfuncs.c:54 +#, c-format +msgid "suppress_redundant_updates_trigger: must be called before update" +msgstr "suppress_redundant_updates_trigger: måste anropas innan update" + +#: utils/adt/trigfuncs.c:60 +#, c-format +msgid "suppress_redundant_updates_trigger: must be called for each row" +msgstr "suppress_redundant_updates_trigger: måste anropas för varje rad" + +#: utils/adt/tsgistidx.c:100 +#, c-format +msgid "gtsvector_in not implemented" +msgstr "gtsvector_in är inte implementerad" + +#: utils/adt/tsquery.c:200 +#, c-format +msgid "distance in phrase operator should not be greater than %d" +msgstr "distans i frasoperator skall inte vara större än %d" + +#: utils/adt/tsquery.c:310 utils/adt/tsquery.c:725 +#: utils/adt/tsvector_parser.c:133 +#, c-format +msgid "syntax error in tsquery: \"%s\"" +msgstr "syntaxfel i tsquery: \"%s\"" + +#: utils/adt/tsquery.c:334 +#, c-format +msgid "no operand in tsquery: \"%s\"" +msgstr "ingen operand i tsquery: \"%s\"" + +#: utils/adt/tsquery.c:568 +#, c-format +msgid "value is too big in tsquery: \"%s\"" +msgstr "värdet är för stort i tsquery: \"%s\"" + +#: utils/adt/tsquery.c:573 +#, c-format +msgid "operand is too long in tsquery: \"%s\"" +msgstr "operanden är för lång i tsquery: \"%s\"" + +#: utils/adt/tsquery.c:601 +#, c-format +msgid "word is too long in tsquery: \"%s\"" +msgstr "ord för långt i tsquery: \"%s\"" + +#: utils/adt/tsquery.c:870 +#, c-format +msgid "text-search query doesn't contain lexemes: \"%s\"" +msgstr "textsökfråga innehåller inte lexem: \"%s\"" + +#: utils/adt/tsquery.c:881 utils/adt/tsquery_util.c:375 +#, c-format +msgid "tsquery is too large" +msgstr "tsquery är för stor" + +#: utils/adt/tsquery_cleanup.c:407 +#, c-format +msgid "text-search query contains only stop words or doesn't contain lexemes, ignored" +msgstr "textsökfråga innehåller bara stoppord eller innehåller inga lexem, hoppar över" + +#: utils/adt/tsquery_op.c:123 +#, c-format +msgid "distance in phrase operator should be non-negative and less than %d" +msgstr "distans i frasoperator skall vara icke-negativ och mindre än %d" + +#: utils/adt/tsquery_rewrite.c:321 +#, c-format +msgid "ts_rewrite query must return two tsquery columns" +msgstr "ts_rewrite-fråga måste returnera två tsquery-kolumner" + +#: utils/adt/tsrank.c:413 +#, c-format +msgid "array of weight must be one-dimensional" +msgstr "array med vikter måste vara endimensionell" + +#: utils/adt/tsrank.c:418 +#, c-format +msgid "array of weight is too short" +msgstr "array med vikter är för kort" + +#: utils/adt/tsrank.c:423 +#, c-format +msgid "array of weight must not contain nulls" +msgstr "array med vikter får inte innehålla null-värden" + +#: utils/adt/tsrank.c:432 utils/adt/tsrank.c:869 +#, c-format +msgid "weight out of range" +msgstr "vikten är utanför giltigt intervall" + +#: utils/adt/tsvector.c:214 +#, c-format +msgid "word is too long (%ld bytes, max %ld bytes)" +msgstr "ordet är för långt (%ld byte, max %ld byte)" + +#: utils/adt/tsvector.c:221 +#, c-format +msgid "string is too long for tsvector (%ld bytes, max %ld bytes)" +msgstr "strängen är för lång för tsvector (%ld byte, max %ld byte)" + +#: utils/adt/tsvector_op.c:323 utils/adt/tsvector_op.c:610 +#: utils/adt/tsvector_op.c:778 +#, c-format +msgid "lexeme array may not contain nulls" +msgstr "lexem-array:en får inte innehålla null-värden" + +#: utils/adt/tsvector_op.c:853 +#, c-format +msgid "weight array may not contain nulls" +msgstr "vikt-array:en får inte innehålla null-värden" + +#: utils/adt/tsvector_op.c:877 +#, c-format +msgid "unrecognized weight: \"%c\"" +msgstr "okänd vikt: \"%c\"" + +#: utils/adt/tsvector_op.c:2314 +#, c-format +msgid "ts_stat query must return one tsvector column" +msgstr "ts_stat-frågan måste returnera en tsvector-kolumn" + +#: utils/adt/tsvector_op.c:2496 +#, c-format +msgid "tsvector column \"%s\" does not exist" +msgstr "tsvector-kolumnen \"%s\" existerar inte" + +#: utils/adt/tsvector_op.c:2503 +#, c-format +msgid "column \"%s\" is not of tsvector type" +msgstr "kolumnen \"%s\" är inte av typen tsvector" + +#: utils/adt/tsvector_op.c:2515 +#, c-format +msgid "configuration column \"%s\" does not exist" +msgstr "konfigurationskolumnen \"%s\" existerar inte" + +#: utils/adt/tsvector_op.c:2521 +#, c-format +msgid "column \"%s\" is not of regconfig type" +msgstr "kolumn \"%s\" har inte regconfig-typ" + +#: utils/adt/tsvector_op.c:2528 +#, c-format +msgid "configuration column \"%s\" must not be null" +msgstr "konfigurationskolumn \"%s\" får inte vara null" + +#: utils/adt/tsvector_op.c:2541 +#, c-format +msgid "text search configuration name \"%s\" must be schema-qualified" +msgstr "Textsökkonfigurationsnamn \"%s\" måste vara angivet med schema" + +#: utils/adt/tsvector_op.c:2566 +#, c-format +msgid "column \"%s\" is not of a character type" +msgstr "kolumnen \"%s\" är inte av typen character" + +#: utils/adt/tsvector_parser.c:134 +#, c-format +msgid "syntax error in tsvector: \"%s\"" +msgstr "syntaxfel i tsvector: \"%s\"" + +#: utils/adt/tsvector_parser.c:200 +#, c-format +msgid "there is no escaped character: \"%s\"" +msgstr "det finns inget escape-tecken: \"%s\"" + +#: utils/adt/tsvector_parser.c:318 +#, c-format +msgid "wrong position info in tsvector: \"%s\"" +msgstr "fel positionsinfo i tsvector: \"%s\"" + +#: utils/adt/txid.c:135 +#, c-format +msgid "transaction ID %s is in the future" +msgstr "transaktions-ID %s är från framtiden" + +#: utils/adt/txid.c:624 +#, c-format +msgid "invalid external txid_snapshot data" +msgstr "ogiltig extern txid_snapshot-data" + +#: utils/adt/varbit.c:59 utils/adt/varchar.c:51 +#, c-format +msgid "length for type %s must be at least 1" +msgstr "längden för typ %s måste vara minst 1" + +#: utils/adt/varbit.c:64 utils/adt/varchar.c:55 +#, c-format +msgid "length for type %s cannot exceed %d" +msgstr "längden för typ %s kan inte överstiga %d" + +#: utils/adt/varbit.c:165 utils/adt/varbit.c:477 utils/adt/varbit.c:974 +#, c-format +msgid "bit string length exceeds the maximum allowed (%d)" +msgstr "bitstränglängden överskrider det maximalt tillåtna (%d)" + +#: utils/adt/varbit.c:179 utils/adt/varbit.c:322 utils/adt/varbit.c:379 +#, c-format +msgid "bit string length %d does not match type bit(%d)" +msgstr "bitsträngslängden %d matchar inte typen bit(%d)" + +#: utils/adt/varbit.c:201 utils/adt/varbit.c:513 +#, c-format +msgid "\"%c\" is not a valid binary digit" +msgstr "\"%c\" är inte en giltig binär siffra" + +#: utils/adt/varbit.c:226 utils/adt/varbit.c:538 +#, c-format +msgid "\"%c\" is not a valid hexadecimal digit" +msgstr "\"%c\" är inte en giltig hexdecimal siffra" + +#: utils/adt/varbit.c:313 utils/adt/varbit.c:629 +#, c-format +msgid "invalid length in external bit string" +msgstr "ogiltig längd på extern bitsträng" + +#: utils/adt/varbit.c:491 utils/adt/varbit.c:638 utils/adt/varbit.c:732 +#, c-format +msgid "bit string too long for type bit varying(%d)" +msgstr "bitsträngen för lång för typen bit varying(%d)" + +#: utils/adt/varbit.c:1067 utils/adt/varbit.c:1169 utils/adt/varlena.c:841 +#: utils/adt/varlena.c:905 utils/adt/varlena.c:1049 utils/adt/varlena.c:2912 +#: utils/adt/varlena.c:2979 +#, c-format +msgid "negative substring length not allowed" +msgstr "negativ substräng-läng tillåts inte" + +#: utils/adt/varbit.c:1226 +#, c-format +msgid "cannot AND bit strings of different sizes" +msgstr "kan inte AND:a bitsträngar av olika storlek" + +#: utils/adt/varbit.c:1268 +#, c-format +msgid "cannot OR bit strings of different sizes" +msgstr "kan inte OR:a bitsträngar av olika storlek" + +#: utils/adt/varbit.c:1315 +#, c-format +msgid "cannot XOR bit strings of different sizes" +msgstr "kan inte XOR:a bitsträngar av olika storlek" + +#: utils/adt/varbit.c:1803 utils/adt/varbit.c:1861 +#, c-format +msgid "bit index %d out of valid range (0..%d)" +msgstr "bitindex %d utanför giltigt intervall (0..%d)" + +#: utils/adt/varbit.c:1812 utils/adt/varlena.c:3170 +#, c-format +msgid "new bit must be 0 or 1" +msgstr "nya biten måste vara 0 eller 1" + +#: utils/adt/varchar.c:155 utils/adt/varchar.c:308 +#, c-format +msgid "value too long for type character(%d)" +msgstr "värdet för långt för typen character (%d)" + +#: utils/adt/varchar.c:470 utils/adt/varchar.c:623 +#, c-format +msgid "value too long for type character varying(%d)" +msgstr "värdet för långt för typen character varying(%d)" + +#: utils/adt/varlena.c:1415 utils/adt/varlena.c:1880 +#, c-format +msgid "could not determine which collation to use for string comparison" +msgstr "kunde inte bestämma vilken jämförelse (collation) som skall användas för strängjämförelse" + +#: utils/adt/varlena.c:1472 utils/adt/varlena.c:1485 +#, c-format +msgid "could not convert string to UTF-16: error code %lu" +msgstr "kunde inte konvertera sträng till UTF-16: felkod %lu" + +#: utils/adt/varlena.c:1500 +#, c-format +msgid "could not compare Unicode strings: %m" +msgstr "kunde inte jämföra Unicode-strängar: %m" + +#: utils/adt/varlena.c:1555 utils/adt/varlena.c:2176 +#, c-format +msgid "collation failed: %s" +msgstr "jämförelse misslyckades: %s" + +#: utils/adt/varlena.c:2394 +#, c-format +msgid "sort key generation failed: %s" +msgstr "generering av sorteringsnyckel misslyckades: %s" + +#: utils/adt/varlena.c:3056 utils/adt/varlena.c:3087 utils/adt/varlena.c:3122 +#: utils/adt/varlena.c:3158 +#, c-format +msgid "index %d out of valid range, 0..%d" +msgstr "index %d utanför giltigt intervall, 0..%d" + +#: utils/adt/varlena.c:4089 +#, c-format +msgid "field position must be greater than zero" +msgstr "fältpositionen måste vara större än noll" + +#: utils/adt/varlena.c:4968 +#, c-format +msgid "unterminated format() type specifier" +msgstr "icketerminerad typangivelse för format()" + +#: utils/adt/varlena.c:4969 utils/adt/varlena.c:5103 utils/adt/varlena.c:5224 +#, c-format +msgid "For a single \"%%\" use \"%%%%\"." +msgstr "För ett ensamt \"%%\" använd \"%%%%\"." + +#: utils/adt/varlena.c:5101 utils/adt/varlena.c:5222 +#, c-format +msgid "unrecognized format() type specifier \"%c\"" +msgstr "okänd typspecifierare \"%c\" för format()" + +#: utils/adt/varlena.c:5114 utils/adt/varlena.c:5171 +#, c-format +msgid "too few arguments for format()" +msgstr "för få argument till format()" + +#: utils/adt/varlena.c:5267 utils/adt/varlena.c:5449 +#, c-format +msgid "number is out of range" +msgstr "numret är utanför giltigt intervall" + +#: utils/adt/varlena.c:5330 utils/adt/varlena.c:5358 +#, c-format +msgid "format specifies argument 0, but arguments are numbered from 1" +msgstr "formatet anger argument 0 men argumenten är numrerade från 1" + +#: utils/adt/varlena.c:5351 +#, c-format +msgid "width argument position must be ended by \"$\"" +msgstr "argumentposition för bredd måste avslutas med \"$\"" + +#: utils/adt/varlena.c:5396 +#, c-format +msgid "null values cannot be formatted as an SQL identifier" +msgstr "null-värden kan inte formatteras som SQL-identifierare" + +#: utils/adt/windowfuncs.c:243 +#, c-format +msgid "argument of ntile must be greater than zero" +msgstr "argumentet till ntile måste vara större än noll" + +#: utils/adt/windowfuncs.c:465 +#, c-format +msgid "argument of nth_value must be greater than zero" +msgstr "argumentet till nth_value måste vara större än noll" + +#: utils/adt/xml.c:220 +#, c-format +msgid "unsupported XML feature" +msgstr "ej stödd XML-finess" + +#: utils/adt/xml.c:221 +#, c-format +msgid "This functionality requires the server to be built with libxml support." +msgstr "Denna funktionalitet kräver att servern byggts med libxml-support." + +#: utils/adt/xml.c:222 +#, c-format +msgid "You need to rebuild PostgreSQL using --with-libxml." +msgstr "Du behöver bygga om PostgreSQL med flaggan --with-libxml." + +#: utils/adt/xml.c:241 utils/mb/mbutils.c:512 +#, c-format +msgid "invalid encoding name \"%s\"" +msgstr "ogiltigt kodningsnamn \"%s\"" + +#: utils/adt/xml.c:484 utils/adt/xml.c:489 +#, c-format +msgid "invalid XML comment" +msgstr "ogiltigt XML-kommentar" + +#: utils/adt/xml.c:618 +#, c-format +msgid "not an XML document" +msgstr "inget XML-dokument" + +#: utils/adt/xml.c:777 utils/adt/xml.c:800 +#, c-format +msgid "invalid XML processing instruction" +msgstr "ogiltig XML-processinstruktion" + +#: utils/adt/xml.c:778 +#, c-format +msgid "XML processing instruction target name cannot be \"%s\"." +msgstr "XML-processinstruktions målnamn kan inte vara \"%s\"." + +#: utils/adt/xml.c:801 +#, c-format +msgid "XML processing instruction cannot contain \"?>\"." +msgstr "XML-processinstruktion kan inte innehålla \"?>\"." + +#: utils/adt/xml.c:880 +#, c-format +msgid "xmlvalidate is not implemented" +msgstr "xmlvalidate är inte implementerat" + +#: utils/adt/xml.c:959 +#, c-format +msgid "could not initialize XML library" +msgstr "kunde inte initiera XML-bibliotek" + +#: utils/adt/xml.c:960 +#, c-format +msgid "libxml2 has incompatible char type: sizeof(char)=%u, sizeof(xmlChar)=%u." +msgstr "libxml2 har inkompatibel char-typ: sizeof(char)=%u, sizeof(xmlChar)=%u." + +#: utils/adt/xml.c:1046 +#, c-format +msgid "could not set up XML error handler" +msgstr "kunde inte ställa in XML-felhanterare" + +#: utils/adt/xml.c:1047 +#, c-format +msgid "This probably indicates that the version of libxml2 being used is not compatible with the libxml2 header files that PostgreSQL was built with." +msgstr "Detta tyder på att libxml2-versionen som används inte är kompatibel med libxml2-header-filerna som PostgreSQL byggts med." + +#: utils/adt/xml.c:1797 +msgid "Invalid character value." +msgstr "Ogiltigt teckenvärde." + +#: utils/adt/xml.c:1800 +msgid "Space required." +msgstr "Mellanslag krävs." + +#: utils/adt/xml.c:1803 +msgid "standalone accepts only 'yes' or 'no'." +msgstr "standalone tillåter bara 'yes' eller 'no'." + +#: utils/adt/xml.c:1806 +msgid "Malformed declaration: missing version." +msgstr "Felaktig deklaration: saknar version." + +#: utils/adt/xml.c:1809 +msgid "Missing encoding in text declaration." +msgstr "Saknar kodning i textdeklaration." + +#: utils/adt/xml.c:1812 +msgid "Parsing XML declaration: '?>' expected." +msgstr "Parsar XML-deklaration: förväntade sig '?>'" + +#: utils/adt/xml.c:1815 +#, c-format +msgid "Unrecognized libxml error code: %d." +msgstr "Okänd libxml-felkod: %d." + +#: utils/adt/xml.c:2090 +#, c-format +msgid "XML does not support infinite date values." +msgstr "XML stöder inte oändliga datumvärden." + +#: utils/adt/xml.c:2112 utils/adt/xml.c:2139 +#, c-format +msgid "XML does not support infinite timestamp values." +msgstr "XML stöder inte oändliga timestamp-värden." + +#: utils/adt/xml.c:2551 +#, c-format +msgid "invalid query" +msgstr "ogiltig fråga" + +#: utils/adt/xml.c:3874 +#, c-format +msgid "invalid array for XML namespace mapping" +msgstr "ogiltig array till XML-namnrymdmappning" + +#: utils/adt/xml.c:3875 +#, c-format +msgid "The array must be two-dimensional with length of the second axis equal to 2." +msgstr "Arrayen måste vara tvådimensionell där längden på andra axeln är 2." + +#: utils/adt/xml.c:3899 +#, c-format +msgid "empty XPath expression" +msgstr "tomt XPath-uttryck" + +#: utils/adt/xml.c:3954 +#, c-format +msgid "neither namespace name nor URI may be null" +msgstr "varken namnrymdnamn eller URI får vara null" + +#: utils/adt/xml.c:3961 +#, c-format +msgid "could not register XML namespace with name \"%s\" and URI \"%s\"" +msgstr "kunde inte registrera XML-namnrymd med namn \"%s\" och URL \"%s\"" + +#: utils/adt/xml.c:4315 +#, c-format +msgid "DEFAULT namespace is not supported" +msgstr "namnrymden DEFAULT stöds inte" + +#: utils/adt/xml.c:4344 +#, c-format +msgid "row path filter must not be empty string" +msgstr "sökvägsfilter för rad får inte vara tomma strängen" + +#: utils/adt/xml.c:4375 +#, c-format +msgid "column path filter must not be empty string" +msgstr "sokvägsfilter för kolumn får inte vara tomma strängen" + +#: utils/adt/xml.c:4561 +#, c-format +msgid "more than one value returned by column XPath expression" +msgstr "mer än ett värde returnerades från kolumns XPath-uttryck" + +#: utils/cache/lsyscache.c:2630 utils/cache/lsyscache.c:2663 +#: utils/cache/lsyscache.c:2696 utils/cache/lsyscache.c:2729 +#, c-format +msgid "type %s is only a shell" +msgstr "typ %s är bara en skaltyp" + +#: utils/cache/lsyscache.c:2635 +#, c-format +msgid "no input function available for type %s" +msgstr "ingen inläsningsfunktion finns för typ %s" + +#: utils/cache/lsyscache.c:2668 +#, c-format +msgid "no output function available for type %s" +msgstr "ingen utmatningsfunktion finns för typ %s" + +#: utils/cache/partcache.c:202 +#, c-format +msgid "operator class \"%s\" of access method %s is missing support function %d for type %s" +msgstr "operatorklass \"%s\" för accessmetod %s saknar supportfunktion %d för typ %s" + +#: utils/cache/plancache.c:723 +#, c-format +msgid "cached plan must not change result type" +msgstr "cache:ad plan får inte ändra resultattyp" + +#: utils/cache/relcache.c:5820 +#, c-format +msgid "could not create relation-cache initialization file \"%s\": %m" +msgstr "kunde inte skapa initieringsfil \"%s\" för relations-cache: %m" + +#: utils/cache/relcache.c:5822 +#, c-format +msgid "Continuing anyway, but there's something wrong." +msgstr "Fortsätter ändå, trots att något är fel." + +#: utils/cache/relcache.c:6176 +#, c-format +msgid "could not remove cache file \"%s\": %m" +msgstr "kunde inte ta bort cache-fil \"%s\": %m" + +#: utils/cache/relmapper.c:509 +#, c-format +msgid "cannot PREPARE a transaction that modified relation mapping" +msgstr "kan inte göra PREPARE på en transaktion som ändrat relationsmappningen" + +#: utils/cache/relmapper.c:651 utils/cache/relmapper.c:751 +#, c-format +msgid "could not open relation mapping file \"%s\": %m" +msgstr "kunde inte öppna relationsmappningsfil \"%s\": %m" + +#: utils/cache/relmapper.c:665 +#, c-format +msgid "could not read relation mapping file \"%s\": %m" +msgstr "kunde inte läsa relationsmappningsfil \"%s\": %m" + +#: utils/cache/relmapper.c:676 +#, c-format +msgid "relation mapping file \"%s\" contains invalid data" +msgstr "relationsmappningsfilen \"%s\" innehåller ogiltig data" + +#: utils/cache/relmapper.c:686 +#, c-format +msgid "relation mapping file \"%s\" contains incorrect checksum" +msgstr "relationsmappningsfilen \"%s\" innehåller en felaktig checksumma" + +#: utils/cache/relmapper.c:785 +#, c-format +msgid "could not write to relation mapping file \"%s\": %m" +msgstr "kunde inte skriva till relationsmappningsfilen \"%s\": %m" + +#: utils/cache/relmapper.c:800 +#, c-format +msgid "could not fsync relation mapping file \"%s\": %m" +msgstr "kunde inte fsync:a relationsmappningsfilen \"%s\": %m" + +#: utils/cache/relmapper.c:807 +#, c-format +msgid "could not close relation mapping file \"%s\": %m" +msgstr "kunde inte stänga relationsmappningsfilen \"%s\": %m" + +#: utils/cache/typcache.c:1623 utils/fmgr/funcapi.c:435 +#, c-format +msgid "record type has not been registered" +msgstr "posttypen har inte registrerats" + +#: utils/error/assert.c:34 +#, c-format +msgid "TRAP: ExceptionalCondition: bad arguments\n" +msgstr "TRAP: ExceptionalCondition: fel argument\n" + +#: utils/error/assert.c:37 +#, c-format +msgid "TRAP: %s(\"%s\", File: \"%s\", Line: %d)\n" +msgstr "TRAP: %s(\"%s\", Fil: \"%s\", Rad: %d)\n" + +#: utils/error/elog.c:322 utils/error/elog.c:1306 +#, c-format +msgid "error occurred at %s:%d before error message processing is available\n" +msgstr "fel uppstod vid %s:%d innan processning av felmeddelande är tillgängligt\n" + +#: utils/error/elog.c:1889 +#, c-format +msgid "could not reopen file \"%s\" as stderr: %m" +msgstr "kunde inte återöppna filen \"%s\" som stderr: %m" + +#: utils/error/elog.c:1902 +#, c-format +msgid "could not reopen file \"%s\" as stdout: %m" +msgstr "kunde inte återöppna filen \"%s\" som stdout: %m" + +#: utils/error/elog.c:2394 utils/error/elog.c:2411 utils/error/elog.c:2427 +msgid "[unknown]" +msgstr "[okänd]" + +#: utils/error/elog.c:2887 utils/error/elog.c:3190 utils/error/elog.c:3298 +msgid "missing error text" +msgstr "saknar feltext" + +#: utils/error/elog.c:2890 utils/error/elog.c:2893 utils/error/elog.c:3301 +#: utils/error/elog.c:3304 +#, c-format +msgid " at character %d" +msgstr " vid tecken %d" + +#: utils/error/elog.c:2903 utils/error/elog.c:2910 +msgid "DETAIL: " +msgstr "DETALJ: " + +#: utils/error/elog.c:2917 +msgid "HINT: " +msgstr "TIPS: " + +#: utils/error/elog.c:2924 +msgid "QUERY: " +msgstr "FRÅGA: " + +#: utils/error/elog.c:2931 +msgid "CONTEXT: " +msgstr "KONTEXT: " + +#: utils/error/elog.c:2941 +#, c-format +msgid "LOCATION: %s, %s:%d\n" +msgstr "PLATS: %s, %s:%d\n" + +#: utils/error/elog.c:2948 +#, c-format +msgid "LOCATION: %s:%d\n" +msgstr "PLATS: %s:%d\n" + +#: utils/error/elog.c:2962 +msgid "STATEMENT: " +msgstr "SATS: " + +#. translator: This string will be truncated at 47 +#. characters expanded. +#: utils/error/elog.c:3419 +#, c-format +msgid "operating system error %d" +msgstr "operativsystemfel %d" + +#: utils/error/elog.c:3617 +msgid "DEBUG" +msgstr "DEBUG" + +#: utils/error/elog.c:3621 +msgid "LOG" +msgstr "LOGG" + +#: utils/error/elog.c:3624 +msgid "INFO" +msgstr "INFO" + +#: utils/error/elog.c:3627 +msgid "NOTICE" +msgstr "NOTIS" + +#: utils/error/elog.c:3630 +msgid "WARNING" +msgstr "VARNING" + +#: utils/error/elog.c:3633 +msgid "ERROR" +msgstr "FEL" + +#: utils/error/elog.c:3636 +msgid "FATAL" +msgstr "FATALT" + +#: utils/error/elog.c:3639 +msgid "PANIC" +msgstr "PANIK" + +#: utils/fmgr/dfmgr.c:121 +#, c-format +msgid "could not find function \"%s\" in file \"%s\"" +msgstr "kunde inte hitta funktionen \"%s\" i filen \"%s\"" + +#: utils/fmgr/dfmgr.c:239 +#, c-format +msgid "could not load library \"%s\": %s" +msgstr "kunde inte ladda länkbibliotek \"%s\": %s" + +#: utils/fmgr/dfmgr.c:271 +#, c-format +msgid "incompatible library \"%s\": missing magic block" +msgstr "inkompatibelt bibliotek \"%s\": saknar magiskt block" + +#: utils/fmgr/dfmgr.c:273 +#, c-format +msgid "Extension libraries are required to use the PG_MODULE_MAGIC macro." +msgstr "Utökningsbibliotek krävs för att använda macro:t PG_MODULE_MAGIC." + +#: utils/fmgr/dfmgr.c:319 +#, c-format +msgid "incompatible library \"%s\": version mismatch" +msgstr "inkompatibelt bibliotek \"%s\": versionen stämmer inte" + +#: utils/fmgr/dfmgr.c:321 +#, c-format +msgid "Server is version %d, library is version %s." +msgstr "Servern är version %d, biblioteket är version %s." + +#: utils/fmgr/dfmgr.c:338 +#, c-format +msgid "Server has FUNC_MAX_ARGS = %d, library has %d." +msgstr "Servern har FUNC_MAX_ARGS = %d, biblioteket har %d." + +#: utils/fmgr/dfmgr.c:347 +#, c-format +msgid "Server has INDEX_MAX_KEYS = %d, library has %d." +msgstr "Servern har INDEX_MAX_KEYS = %d, biblioteket har %d." + +#: utils/fmgr/dfmgr.c:356 +#, c-format +msgid "Server has NAMEDATALEN = %d, library has %d." +msgstr "Servern har NAMEDATALEN = %d, biblioteket har %d." + +#: utils/fmgr/dfmgr.c:365 +#, c-format +msgid "Server has FLOAT4PASSBYVAL = %s, library has %s." +msgstr "Servern har FLOAT4PASSBYVAL = %s, biblioteket har %s." + +#: utils/fmgr/dfmgr.c:374 +#, c-format +msgid "Server has FLOAT8PASSBYVAL = %s, library has %s." +msgstr "Servern har FLOAT8PASSBYVAL = %s, biblioteket har %s." + +#: utils/fmgr/dfmgr.c:381 +msgid "Magic block has unexpected length or padding difference." +msgstr "Magiskt block har oväntad längd eller annan paddning." + +#: utils/fmgr/dfmgr.c:384 +#, c-format +msgid "incompatible library \"%s\": magic block mismatch" +msgstr "inkompatibelt bibliotek \"%s\": magiskt block matchar inte" + +#: utils/fmgr/dfmgr.c:548 +#, c-format +msgid "access to library \"%s\" is not allowed" +msgstr "åtkomst till biblioteket \"%s\" tillåts inte" + +#: utils/fmgr/dfmgr.c:574 +#, c-format +msgid "invalid macro name in dynamic library path: %s" +msgstr "ogiltigt macro-namn i dynamisk biblioteksökväg: %s" + +#: utils/fmgr/dfmgr.c:614 +#, c-format +msgid "zero-length component in parameter \"dynamic_library_path\"" +msgstr "komponent med längden noll i parameter \"dynamic_library_path\"" + +#: utils/fmgr/dfmgr.c:633 +#, c-format +msgid "component in parameter \"dynamic_library_path\" is not an absolute path" +msgstr "komponent som inte är en absolut sökväg i parameter \"dynamic_library_path\"" + +#: utils/fmgr/fmgr.c:236 +#, c-format +msgid "internal function \"%s\" is not in internal lookup table" +msgstr "interna funktionen \"%s\" finns inte i den interna uppslagstabellen" + +#: utils/fmgr/fmgr.c:485 +#, c-format +msgid "could not find function information for function \"%s\"" +msgstr "kunde inte hitta funktionsinformation för funktion \"%s\"" + +#: utils/fmgr/fmgr.c:487 +#, c-format +msgid "SQL-callable functions need an accompanying PG_FUNCTION_INFO_V1(funcname)." +msgstr "SQL-anropbara funktioner kräver en medföljande PG_FUNCTION_INFO_V1(funknamn)." + +#: utils/fmgr/fmgr.c:505 +#, c-format +msgid "unrecognized API version %d reported by info function \"%s\"" +msgstr "okänd API-version %d rapporterad av infofunktion \"%s\"" + +#: utils/fmgr/fmgr.c:2210 +#, c-format +msgid "language validation function %u called for language %u instead of %u" +msgstr "språkvalideringsfunktion %u anropad för språk %u istället för %u" + +#: utils/fmgr/funcapi.c:358 +#, c-format +msgid "could not determine actual result type for function \"%s\" declared to return type %s" +msgstr "kunde inte bestämma resultattyp för funktion \"%s\" som deklarerats att returnera typ %s" + +#: utils/fmgr/funcapi.c:1403 utils/fmgr/funcapi.c:1435 +#, c-format +msgid "number of aliases does not match number of columns" +msgstr "antalet alias matchar inte antalet kolumner" + +#: utils/fmgr/funcapi.c:1429 +#, c-format +msgid "no column alias was provided" +msgstr "inget kolumnalias angivet" + +#: utils/fmgr/funcapi.c:1453 +#, c-format +msgid "could not determine row description for function returning record" +msgstr "kunde inte få radbeskrivning för funktion som returnerar en record" + +#: utils/init/miscinit.c:108 +#, c-format +msgid "data directory \"%s\" does not exist" +msgstr "databaskatalogen \"%s\" existerar inte" + +#: utils/init/miscinit.c:113 +#, c-format +msgid "could not read permissions of directory \"%s\": %m" +msgstr "kunde inte läsa rättigheter på katalog \"%s\": %m" + +#: utils/init/miscinit.c:121 +#, c-format +msgid "specified data directory \"%s\" is not a directory" +msgstr "angiven datakatalog \"%s\" är inte en katalog" + +#: utils/init/miscinit.c:137 +#, c-format +msgid "data directory \"%s\" has wrong ownership" +msgstr "datakatalogen \"%s\" har fel ägare" + +#: utils/init/miscinit.c:139 +#, c-format +msgid "The server must be started by the user that owns the data directory." +msgstr "Servern måste startas av den användare som äger datakatalogen." + +#: utils/init/miscinit.c:157 +#, c-format +msgid "data directory \"%s\" has invalid permissions" +msgstr "datakatalogen \"%s\" har felaktiga rättigheter" + +#: utils/init/miscinit.c:159 +#, c-format +msgid "Permissions should be u=rwx (0700) or u=rwx,g=rx (0750)." +msgstr "Rättigheterna skall vara u=rwx (0700) eller u=rwx,g=rx (0750)." + +#: utils/init/miscinit.c:218 +#, c-format +msgid "could not change directory to \"%s\": %m" +msgstr "kunde inte byta katalog till \"%s\": %m" + +#: utils/init/miscinit.c:546 utils/misc/guc.c:6360 +#, c-format +msgid "cannot set parameter \"%s\" within security-restricted operation" +msgstr "kan inte sätta parameter \"%s\" från en säkerhetsbegränsad operation" + +#: utils/init/miscinit.c:607 +#, c-format +msgid "role with OID %u does not exist" +msgstr "roll med OID %u existerar inte" + +#: utils/init/miscinit.c:637 +#, c-format +msgid "role \"%s\" is not permitted to log in" +msgstr "roll \"%s\" tillåts inte logga in" + +#: utils/init/miscinit.c:655 +#, c-format +msgid "too many connections for role \"%s\"" +msgstr "för många uppkopplingar för roll \"%s\"" + +#: utils/init/miscinit.c:715 +#, c-format +msgid "permission denied to set session authorization" +msgstr "rättighet saknas för att sätta sessionsauktorisation" + +#: utils/init/miscinit.c:798 +#, c-format +msgid "invalid role OID: %u" +msgstr "ogiltigt roll-OID: %u" + +#: utils/init/miscinit.c:852 +#, c-format +msgid "database system is shut down" +msgstr "databassystemet är nedstängt" + +#: utils/init/miscinit.c:939 +#, c-format +msgid "could not create lock file \"%s\": %m" +msgstr "kan inte skapa låsfil \"%s\": %m" + +#: utils/init/miscinit.c:953 +#, c-format +msgid "could not open lock file \"%s\": %m" +msgstr "kunde inte öppna låsfil \"%s\": %m" + +#: utils/init/miscinit.c:960 +#, c-format +msgid "could not read lock file \"%s\": %m" +msgstr "kunde inte läsa låsfil \"%s\": %m" + +#: utils/init/miscinit.c:969 +#, c-format +msgid "lock file \"%s\" is empty" +msgstr "låsfilen \"%s\" är tom" + +#: utils/init/miscinit.c:970 +#, c-format +msgid "Either another server is starting, or the lock file is the remnant of a previous server startup crash." +msgstr "Antingen startar en annan server eller så är låsfilen kvar från en tidigare serverkrash vid uppstart." + +#: utils/init/miscinit.c:1014 +#, c-format +msgid "lock file \"%s\" already exists" +msgstr "låsfil med namn \"%s\" finns redan" + +#: utils/init/miscinit.c:1018 +#, c-format +msgid "Is another postgres (PID %d) running in data directory \"%s\"?" +msgstr "Kör en annan postgres (PID %d) i datakatalogen \"%s\"?" + +#: utils/init/miscinit.c:1020 +#, c-format +msgid "Is another postmaster (PID %d) running in data directory \"%s\"?" +msgstr "Kör en annan postmaster (PID %d) i datakatalogen \"%s\"?" + +#: utils/init/miscinit.c:1023 +#, c-format +msgid "Is another postgres (PID %d) using socket file \"%s\"?" +msgstr "Använder en annan postgres (PID %d) uttagesfilen (socket) \"%s\"?" + +#: utils/init/miscinit.c:1025 +#, c-format +msgid "Is another postmaster (PID %d) using socket file \"%s\"?" +msgstr "Använder en annan postmaster (PID %d) uttagesfilen (socket) \"%s\"?" + +#: utils/init/miscinit.c:1061 +#, c-format +msgid "pre-existing shared memory block (key %lu, ID %lu) is still in use" +msgstr "redan existerande delat minnesblock (nyckel %lu, ID %lu) används fortfarande" + +#: utils/init/miscinit.c:1064 +#, c-format +msgid "If you're sure there are no old server processes still running, remove the shared memory block or just delete the file \"%s\"." +msgstr "Om du är säker på att ingen gammal serverprocess forfarande kör, så ta bort det delade minnesblocket eller radera helt enkelt filen \"%s\"." + +#: utils/init/miscinit.c:1080 +#, c-format +msgid "could not remove old lock file \"%s\": %m" +msgstr "kunde inte ta bort gammal låsfil \"%s\": %m" + +#: utils/init/miscinit.c:1082 +#, c-format +msgid "The file seems accidentally left over, but it could not be removed. Please remove the file by hand and try again." +msgstr "Filen verkar ha lämnats kvar av misstag, men kan inte tas bort. Ta bort den för hand och försök igen.>" + +#: utils/init/miscinit.c:1119 utils/init/miscinit.c:1133 +#: utils/init/miscinit.c:1144 +#, c-format +msgid "could not write lock file \"%s\": %m" +msgstr "kunde inte skriva låsfil \"%s\": %m" + +#: utils/init/miscinit.c:1276 utils/init/miscinit.c:1419 utils/misc/guc.c:9201 +#, c-format +msgid "could not read from file \"%s\": %m" +msgstr "kunde inte läsa från fil \"%s\": %m" + +#: utils/init/miscinit.c:1407 +#, c-format +msgid "could not open file \"%s\": %m; continuing anyway" +msgstr "kunde inte öppna fil \"%s\": %m: fortsätter ändå" + +#: utils/init/miscinit.c:1432 +#, c-format +msgid "lock file \"%s\" contains wrong PID: %ld instead of %ld" +msgstr "låsfil \"%s\" innehåller fel PID: %ld istället för %ld" + +#: utils/init/miscinit.c:1471 utils/init/miscinit.c:1487 +#, c-format +msgid "\"%s\" is not a valid data directory" +msgstr "\"%s\" är inte en giltigt datakatalog" + +#: utils/init/miscinit.c:1473 +#, c-format +msgid "File \"%s\" is missing." +msgstr "Filen \"%s\" saknas." + +#: utils/init/miscinit.c:1489 +#, c-format +msgid "File \"%s\" does not contain valid data." +msgstr "Filen \"%s\" innehåller inte giltig data." + +#: utils/init/miscinit.c:1491 +#, c-format +msgid "You might need to initdb." +msgstr "Du kan behöva köra initdb." + +#: utils/init/miscinit.c:1499 +#, c-format +msgid "The data directory was initialized by PostgreSQL version %s, which is not compatible with this version %s." +msgstr "Datakatalogen har skapats av PostgreSQL version %s, som inte är kompatibel med version %s." + +#: utils/init/miscinit.c:1566 +#, c-format +msgid "loaded library \"%s\"" +msgstr "laddat bibliotek \"%s\"" + +#: utils/init/postinit.c:252 +#, c-format +msgid "replication connection authorized: user=%s SSL enabled (protocol=%s, cipher=%s, bits=%d, compression=%s)" +msgstr "replikeringsanslutning auktoriserad: användare=%s SSL påslagen (protokoll=%s, krypto=%s, bitar=%d, komprimering=%s)" + +#: utils/init/postinit.c:257 utils/init/postinit.c:274 +msgid "off" +msgstr "av" + +#: utils/init/postinit.c:257 utils/init/postinit.c:274 +msgid "on" +msgstr "på" + +#: utils/init/postinit.c:261 +#, c-format +msgid "replication connection authorized: user=%s" +msgstr "replikeringsanslutning auktoriserad: användare=%s" + +#: utils/init/postinit.c:269 +#, c-format +msgid "connection authorized: user=%s database=%s SSL enabled (protocol=%s, cipher=%s, bits=%d, compression=%s)" +msgstr "anslutning auktoriserad: användare=%s databas=%s SSL påslagen (protokoll=%s, krypto=%s, bitar=%d, komprimering=%s)" + +#: utils/init/postinit.c:278 +#, c-format +msgid "connection authorized: user=%s database=%s" +msgstr "anslutning auktoriserad: användare=%s databas=%s" + +#: utils/init/postinit.c:310 +#, c-format +msgid "database \"%s\" has disappeared from pg_database" +msgstr "databasen \"%s\" har försvunnit från pg_database" + +#: utils/init/postinit.c:312 +#, c-format +msgid "Database OID %u now seems to belong to \"%s\"." +msgstr "Databasen med OID %u verkar nu höra till \"%s\"." + +#: utils/init/postinit.c:332 +#, c-format +msgid "database \"%s\" is not currently accepting connections" +msgstr "databasen \"%s\" tar för närvarande inte emot uppkopplingar" + +#: utils/init/postinit.c:345 +#, c-format +msgid "permission denied for database \"%s\"" +msgstr "rättighet saknas för databas \"%s\"" + +#: utils/init/postinit.c:346 +#, c-format +msgid "User does not have CONNECT privilege." +msgstr "Användaren har inte rättigheten CONNECT." + +#: utils/init/postinit.c:363 +#, c-format +msgid "too many connections for database \"%s\"" +msgstr "för många uppkopplingar till databasen \"%s\"" + +#: utils/init/postinit.c:385 utils/init/postinit.c:392 +#, c-format +msgid "database locale is incompatible with operating system" +msgstr "databaslokalen är inkompatibel med operativsystemet" + +#: utils/init/postinit.c:386 +#, c-format +msgid "The database was initialized with LC_COLLATE \"%s\", which is not recognized by setlocale()." +msgstr "Databasen initierades med LC_COLLATE \"%s\" vilket inte känns igen av setlocale()." + +#: utils/init/postinit.c:388 utils/init/postinit.c:395 +#, c-format +msgid "Recreate the database with another locale or install the missing locale." +msgstr "Återskapa databasen med en annan lokal eller installera den saknade lokalen." + +#: utils/init/postinit.c:393 +#, c-format +msgid "The database was initialized with LC_CTYPE \"%s\", which is not recognized by setlocale()." +msgstr "Databasen initierades med LC_CTYPE \"%s\", vilket inte känns igen av setlocale()." + +#: utils/init/postinit.c:726 +#, c-format +msgid "no roles are defined in this database system" +msgstr "inga roller är definierade i detta databassystem" + +#: utils/init/postinit.c:727 +#, c-format +msgid "You should immediately run CREATE USER \"%s\" SUPERUSER;." +msgstr "Du borde direkt köra CREATE USER \"%s\" SUPERUSER;." + +#: utils/init/postinit.c:763 +#, c-format +msgid "new replication connections are not allowed during database shutdown" +msgstr "nya replikeringsanslutningar tillåts inte under databasnedstängning" + +#: utils/init/postinit.c:767 +#, c-format +msgid "must be superuser to connect during database shutdown" +msgstr "måste vara superanvändare för att ansluta när databasen håller på att stängas ner" + +#: utils/init/postinit.c:777 +#, c-format +msgid "must be superuser to connect in binary upgrade mode" +msgstr "måste vara superanvändare för att ansluta i binärt uppgraderingsläger" + +#: utils/init/postinit.c:791 +#, c-format +msgid "remaining connection slots are reserved for non-replication superuser connections" +msgstr "resterande anslutningsslottar är reserverade för superanvändaranslutningar utan replikering" + +#: utils/init/postinit.c:801 +#, c-format +msgid "must be superuser or replication role to start walsender" +msgstr "måste vara superanvändare eller replikeringsroll för att starta \"walsender\"" + +#: utils/init/postinit.c:870 +#, c-format +msgid "database %u does not exist" +msgstr "databasen %u existerar inte" + +#: utils/init/postinit.c:959 +#, c-format +msgid "It seems to have just been dropped or renamed." +msgstr "Det verkar precis ha tagits bort eller döpts om." + +#: utils/init/postinit.c:977 +#, c-format +msgid "The database subdirectory \"%s\" is missing." +msgstr "Databasens underbibliotek \"%s\" saknas." + +#: utils/init/postinit.c:982 +#, c-format +msgid "could not access directory \"%s\": %m" +msgstr "kunde inte komma åt katalog \"%s\": %m" + +#: utils/mb/conv.c:488 utils/mb/conv.c:680 +#, c-format +msgid "invalid encoding number: %d" +msgstr "ogiltigt kodningsnummer: %d" + +#: utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c:122 +#: utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c:154 +#, c-format +msgid "unexpected encoding ID %d for ISO 8859 character sets" +msgstr "oväntat kodnings-ID %d för ISO 8859-teckenuppsättningarna" + +#: utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c:103 +#: utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c:135 +#, c-format +msgid "unexpected encoding ID %d for WIN character sets" +msgstr "oväntat kodnings-ID %d för WIN-teckenuppsättningarna" + +#: utils/mb/encnames.c:473 +#, c-format +msgid "encoding \"%s\" not supported by ICU" +msgstr "kodning \"%s\" stöds inte av ICU" + +#: utils/mb/encnames.c:572 +#, c-format +msgid "encoding name too long" +msgstr "kodningsnamnet är för långt" + +#: utils/mb/mbutils.c:296 +#, c-format +msgid "conversion between %s and %s is not supported" +msgstr "konvertering mellan %s och %s stöds inte" + +#: utils/mb/mbutils.c:355 +#, c-format +msgid "default conversion function for encoding \"%s\" to \"%s\" does not exist" +msgstr "standardkonverteringsfunktion för kodning \"%s\" till \"%s\" finns inte" + +#: utils/mb/mbutils.c:366 utils/mb/mbutils.c:699 +#, c-format +msgid "String of %d bytes is too long for encoding conversion." +msgstr "Sträng på %d byte är för lång för kodningskonvertering." + +#: utils/mb/mbutils.c:453 +#, c-format +msgid "invalid source encoding name \"%s\"" +msgstr "ogiltigt källkodningsnamn \"%s\"" + +#: utils/mb/mbutils.c:458 +#, c-format +msgid "invalid destination encoding name \"%s\"" +msgstr "ogiltigt målkodningsnamn \"%s\"" + +#: utils/mb/mbutils.c:598 +#, c-format +msgid "invalid byte value for encoding \"%s\": 0x%02x" +msgstr "ogiltigt byte-sekvens för kodning \"%s\": 0x%02x\"" + +#: utils/mb/mbutils.c:940 +#, c-format +msgid "bind_textdomain_codeset failed" +msgstr "bind_textdomain_codeset misslyckades" + +#: utils/mb/wchar.c:2015 +#, c-format +msgid "invalid byte sequence for encoding \"%s\": %s" +msgstr "ogiltigt byte-sekvens för kodning \"%s\": %s" + +#: utils/mb/wchar.c:2048 +#, c-format +msgid "character with byte sequence %s in encoding \"%s\" has no equivalent in encoding \"%s\"" +msgstr "tecken med byte-sekvens %s i kodning \"%s\" har inget motsvarande i kodning \"%s\"" + +#: utils/misc/guc.c:571 +msgid "Ungrouped" +msgstr "Ej grupperad" + +#: utils/misc/guc.c:573 +msgid "File Locations" +msgstr "Filplatser" + +#: utils/misc/guc.c:575 +msgid "Connections and Authentication" +msgstr "Uppkopplingar och Autentisering" + +#: utils/misc/guc.c:577 +msgid "Connections and Authentication / Connection Settings" +msgstr "Uppkopplingar och Autentisering / Uppkopplingsinställningar" + +#: utils/misc/guc.c:579 +msgid "Connections and Authentication / Authentication" +msgstr "Uppkopplingar och Autentisering / Autentisering" + +#: utils/misc/guc.c:581 +msgid "Connections and Authentication / SSL" +msgstr "Uppkopplingar och Autentisering / SSL" + +#: utils/misc/guc.c:583 +msgid "Resource Usage" +msgstr "Resursanvändning" + +#: utils/misc/guc.c:585 +msgid "Resource Usage / Memory" +msgstr "Resursanvändning / Minne" + +#: utils/misc/guc.c:587 +msgid "Resource Usage / Disk" +msgstr "Resursanvändning / Disk" + +#: utils/misc/guc.c:589 +msgid "Resource Usage / Kernel Resources" +msgstr "Resursanvändning / Kärnresurser" + +#: utils/misc/guc.c:591 +msgid "Resource Usage / Cost-Based Vacuum Delay" +msgstr "Resursanvändning / Kostnadsbaserad Vacuum-fördröjning" + +#: utils/misc/guc.c:593 +msgid "Resource Usage / Background Writer" +msgstr "Resursanvändning / Bakgrundskrivare" + +#: utils/misc/guc.c:595 +msgid "Resource Usage / Asynchronous Behavior" +msgstr "Resursanvändning / Asynkront beteende" + +#: utils/misc/guc.c:597 +msgid "Write-Ahead Log" +msgstr "Write-Ahead Log" + +#: utils/misc/guc.c:599 +msgid "Write-Ahead Log / Settings" +msgstr "Write-Ahead Log / Inställningar" + +#: utils/misc/guc.c:601 +msgid "Write-Ahead Log / Checkpoints" +msgstr "Write-Ahead Log / Checkpoint:er" + +#: utils/misc/guc.c:603 +msgid "Write-Ahead Log / Archiving" +msgstr "Write-Ahead Log / Arkivering" + +#: utils/misc/guc.c:605 +msgid "Replication" +msgstr "Replikering" + +#: utils/misc/guc.c:607 +msgid "Replication / Sending Servers" +msgstr "Replilering / Skickande servrar" + +#: utils/misc/guc.c:609 +msgid "Replication / Master Server" +msgstr "Replikering / Master-server" + +#: utils/misc/guc.c:611 +msgid "Replication / Standby Servers" +msgstr "Replikering / Standby-servrar" + +#: utils/misc/guc.c:613 +msgid "Replication / Subscribers" +msgstr "Replikering / Prenumeranter" + +#: utils/misc/guc.c:615 +msgid "Query Tuning" +msgstr "Frågeoptimering" + +#: utils/misc/guc.c:617 +msgid "Query Tuning / Planner Method Configuration" +msgstr "Frågeoptimering / Planeringsmetodinställningar" + +#: utils/misc/guc.c:619 +msgid "Query Tuning / Planner Cost Constants" +msgstr "Frågeoptimering / Plannerarens kostnadskonstanter" + +#: utils/misc/guc.c:621 +msgid "Query Tuning / Genetic Query Optimizer" +msgstr "Frågeoptimering / Genetisk frågeoptimerare" + +#: utils/misc/guc.c:623 +msgid "Query Tuning / Other Planner Options" +msgstr "Frågeoptimering / Andra planeringsinställningar" + +#: utils/misc/guc.c:625 +msgid "Reporting and Logging" +msgstr "Rapportering och loggning" + +#: utils/misc/guc.c:627 +msgid "Reporting and Logging / Where to Log" +msgstr "Rapportering och loggning / Logga var?" + +#: utils/misc/guc.c:629 +msgid "Reporting and Logging / When to Log" +msgstr "Rapportering och loggning / Logga när?" + +#: utils/misc/guc.c:631 +msgid "Reporting and Logging / What to Log" +msgstr "Rapportering och loggning / Logga vad?" + +#: utils/misc/guc.c:633 +msgid "Process Title" +msgstr "Processtitel" + +#: utils/misc/guc.c:635 +msgid "Statistics" +msgstr "Statistik" + +#: utils/misc/guc.c:637 +msgid "Statistics / Monitoring" +msgstr "Statistik / Övervakning" + +#: utils/misc/guc.c:639 +msgid "Statistics / Query and Index Statistics Collector" +msgstr "Statistik / Insamlare av fråge- och index-statistik" + +#: utils/misc/guc.c:641 +msgid "Autovacuum" +msgstr "Autovacuum" + +#: utils/misc/guc.c:643 +msgid "Client Connection Defaults" +msgstr "Standard för klientanslutning" + +#: utils/misc/guc.c:645 +msgid "Client Connection Defaults / Statement Behavior" +msgstr "Standard för klientanslutning / Satsbeteende" + +#: utils/misc/guc.c:647 +msgid "Client Connection Defaults / Locale and Formatting" +msgstr "Standard för klientanslutning / Lokal och formattering" + +#: utils/misc/guc.c:649 +msgid "Client Connection Defaults / Shared Library Preloading" +msgstr "Standard för klientanslutning / Förladdning av delat bibliotek" + +#: utils/misc/guc.c:651 +msgid "Client Connection Defaults / Other Defaults" +msgstr "Standard för klientanslutning / Övriga standardvärden" + +#: utils/misc/guc.c:653 +msgid "Lock Management" +msgstr "Låshantering" + +#: utils/misc/guc.c:655 +msgid "Version and Platform Compatibility" +msgstr "Version och plattformskompabilitet" + +#: utils/misc/guc.c:657 +msgid "Version and Platform Compatibility / Previous PostgreSQL Versions" +msgstr "Version och plattformskompabilitet / Tidigare PostrgreSQL-versioner" + +#: utils/misc/guc.c:659 +msgid "Version and Platform Compatibility / Other Platforms and Clients" +msgstr "Version och plattformskompabilitet / Andra plattformar och klienter" + +#: utils/misc/guc.c:661 +msgid "Error Handling" +msgstr "Felhantering" + +#: utils/misc/guc.c:663 +msgid "Preset Options" +msgstr "Förinställningsflaggor" + +#: utils/misc/guc.c:665 +msgid "Customized Options" +msgstr "Ändrade flaggor" + +#: utils/misc/guc.c:667 +msgid "Developer Options" +msgstr "Utvecklarflaggor" + +#: utils/misc/guc.c:721 +msgid "Valid units for this parameter are \"B\", \"kB\", \"MB\", \"GB\", and \"TB\"." +msgstr "Giltiga enheter för denna parameter är \"B\", \"kB\", \"MB\", \"GB\" och \"TB\"." + +#: utils/misc/guc.c:763 +msgid "Valid units for this parameter are \"ms\", \"s\", \"min\", \"h\", and \"d\"." +msgstr "Giltiga enheter för denna parameter är \"ms\", \"s\", \"min\", \"h\" och \"d\"." + +#: utils/misc/guc.c:822 +msgid "Enables the planner's use of sequential-scan plans." +msgstr "Aktiverar planerarens användning av planer med sekvensiell skanning." + +#: utils/misc/guc.c:831 +msgid "Enables the planner's use of index-scan plans." +msgstr "Aktiverar planerarens användning av planer med indexskanning." + +#: utils/misc/guc.c:840 +msgid "Enables the planner's use of index-only-scan plans." +msgstr "Aktiverar planerarens användning av planer med skanning av enbart index." + +#: utils/misc/guc.c:849 +msgid "Enables the planner's use of bitmap-scan plans." +msgstr "Aktiverar planerarens användning av planer med bitmapskanning." + +#: utils/misc/guc.c:858 +msgid "Enables the planner's use of TID scan plans." +msgstr "Aktiverar planerarens användning av planer med TID-skanning." + +#: utils/misc/guc.c:867 +msgid "Enables the planner's use of explicit sort steps." +msgstr "Slår på planerarens användning av explicita sorteringssteg." + +#: utils/misc/guc.c:876 +msgid "Enables the planner's use of hashed aggregation plans." +msgstr "Aktiverar planerarens användning av planer med hash-aggregering" + +#: utils/misc/guc.c:885 +msgid "Enables the planner's use of materialization." +msgstr "Aktiverar planerarens användning av materialisering." + +#: utils/misc/guc.c:894 +msgid "Enables the planner's use of nested-loop join plans." +msgstr "Aktiverar planerarens användning av planer med nästlad loop-join," + +#: utils/misc/guc.c:903 +msgid "Enables the planner's use of merge join plans." +msgstr "Aktiverar planerarens användning av merge-join-planer." + +#: utils/misc/guc.c:912 +msgid "Enables the planner's use of hash join plans." +msgstr "Aktiverar planerarens användning av hash-join-planer." + +#: utils/misc/guc.c:921 +msgid "Enables the planner's use of gather merge plans." +msgstr "Aktiverar planerarens användning av planer med gather-merge." + +#: utils/misc/guc.c:930 +msgid "Enables partitionwise join." +msgstr "Aktiverar join per partition." + +#: utils/misc/guc.c:939 +msgid "Enables partitionwise aggregation and grouping." +msgstr "Aktiverar aggregering och gruppering per partition." + +#: utils/misc/guc.c:948 +msgid "Enables the planner's use of parallel append plans." +msgstr "Aktiverar planerarens användning av planer med parallell append." + +#: utils/misc/guc.c:957 +msgid "Enables the planner's user of parallel hash plans." +msgstr "Aktiverar planerarens användning av planer med parallell hash." + +#: utils/misc/guc.c:966 +msgid "Enable plan-time and run-time partition pruning." +msgstr "Aktiverar partitionsbeskärning vid planering och vid körning." + +#: utils/misc/guc.c:967 +msgid "Allows the query planner and executor to compare partition bounds to conditions in the query to determine which partitions must be scanned." +msgstr "Tillåter att frågeplaneraren och exekveraren jämför partitionsgränser med villkor i frågan för att bestämma vilka partitioner som skall skannas." + +#: utils/misc/guc.c:977 +msgid "Enables genetic query optimization." +msgstr "Aktiverar genetisk frågeoptimering." + +#: utils/misc/guc.c:978 +msgid "This algorithm attempts to do planning without exhaustive searching." +msgstr "Denna algoritm försöker utföra planering utan fullständig sökning." + +#: utils/misc/guc.c:988 +msgid "Shows whether the current user is a superuser." +msgstr "Visar om den aktuella användaren är en superanvändare." + +#: utils/misc/guc.c:998 +msgid "Enables advertising the server via Bonjour." +msgstr "Aktiverar annonsering av servern via Bonjour." + +#: utils/misc/guc.c:1007 +msgid "Collects transaction commit time." +msgstr "Samlar in tid för transaktions-commit." + +#: utils/misc/guc.c:1016 +msgid "Enables SSL connections." +msgstr "Tillåter SSL-anslutningar." + +#: utils/misc/guc.c:1025 +msgid "Also use ssl_passphrase_command during server reload." +msgstr "Använd ssl_passphrase_command även vid server-reload." + +#: utils/misc/guc.c:1034 +msgid "Give priority to server ciphersuite order." +msgstr "Ge prioritet till serverns ordning av kryptometoder." + +#: utils/misc/guc.c:1043 +msgid "Forces synchronization of updates to disk." +msgstr "Tvingar synkronisering av uppdateringar till disk." + +#: utils/misc/guc.c:1044 +msgid "The server will use the fsync() system call in several places to make sure that updates are physically written to disk. This insures that a database cluster will recover to a consistent state after an operating system or hardware crash." +msgstr "Servern kommer använda systemanropet fsync() på ett antal platser för att se till att uppdateringar fysiskt skrivs till disk. Detta för att säkerställa att databasklustret kan starta i ett konsistent tillstånd efter en operativsystemkrash eller hårdvarukrash." + +#: utils/misc/guc.c:1055 +msgid "Continues processing after a checksum failure." +msgstr "Fortsätter processande efter checksummefel." + +#: utils/misc/guc.c:1056 +msgid "Detection of a checksum failure normally causes PostgreSQL to report an error, aborting the current transaction. Setting ignore_checksum_failure to true causes the system to ignore the failure (but still report a warning), and continue processing. This behavior could cause crashes or other serious problems. Only has an effect if checksums are enabled." +msgstr "Normalt vid detektion av checksummefel så rapporterar PostgreSQL felet och avbryter den aktuella transaktionen. Sätts ignore_checksum_failure till true så kommer systemet hoppa över felet (men fortfarande rapportera en varning). Detta beteende kan orsaka krasher eller andra allvarliga problem. Detta påverkas bara om checksummor är påslaget." + +#: utils/misc/guc.c:1070 +msgid "Continues processing past damaged page headers." +msgstr "Fortsätter processande efter trasiga sidhuvuden." + +#: utils/misc/guc.c:1071 +msgid "Detection of a damaged page header normally causes PostgreSQL to report an error, aborting the current transaction. Setting zero_damaged_pages to true causes the system to instead report a warning, zero out the damaged page, and continue processing. This behavior will destroy data, namely all the rows on the damaged page." +msgstr "Normalt vid detektion av trasiga sidhuvuden så rapporterar PostgreSQL felet och avbryter den aktuella transaktionen. Sätts zero_damaged_pages till true så kommer systemet istället rapportera en varning, nollställa den trasiga sidan samt fortsätta processa. Detta kommer förstöra data (alla rader i den trasiga sidan)." + +#: utils/misc/guc.c:1084 +msgid "Writes full pages to WAL when first modified after a checkpoint." +msgstr "Skriver fulla sidor till WAL första gången de ändras efter en checkpoint." + +#: utils/misc/guc.c:1085 +msgid "A page write in process during an operating system crash might be only partially written to disk. During recovery, the row changes stored in WAL are not enough to recover. This option writes pages when first modified after a checkpoint to WAL so full recovery is possible." +msgstr "En sidskrivning som sker vid en operativsystemkrash kan bli delvis utskriven till disk. Under återställning så kommer radändringar i WAL:en inte vara tillräckligt för att återställa datan. Denna flagga skriver ut sidor först efter att en WAL-checkpoint gjorts vilket gör att full återställning kan ske." + +#: utils/misc/guc.c:1098 +msgid "Writes full pages to WAL when first modified after a checkpoint, even for a non-critical modifications." +msgstr "Skriver fulla sidor till WAL första gången de ändras efter en checkpoint, även för ickekritiska ändringar." + +#: utils/misc/guc.c:1108 +msgid "Compresses full-page writes written in WAL file." +msgstr "Komprimerar skrivning av hela sidor som skrivs i WAL-fil." + +#: utils/misc/guc.c:1118 +msgid "Logs each checkpoint." +msgstr "Logga varje checkpoint." + +#: utils/misc/guc.c:1127 +msgid "Logs each successful connection." +msgstr "Logga varje lyckad anslutning." + +#: utils/misc/guc.c:1136 +msgid "Logs end of a session, including duration." +msgstr "Loggar slut på session, inklusive längden." + +#: utils/misc/guc.c:1145 +msgid "Logs each replication command." +msgstr "Loggar alla replikeringskommanon." + +#: utils/misc/guc.c:1154 +msgid "Shows whether the running server has assertion checks enabled." +msgstr "Visar om den körande servern har assert-kontroller påslagna." + +#: utils/misc/guc.c:1169 +msgid "Terminate session on any error." +msgstr "Avbryt sessionen vid fel." + +#: utils/misc/guc.c:1178 +msgid "Reinitialize server after backend crash." +msgstr "Återinitiera servern efter en backend-krash." + +#: utils/misc/guc.c:1188 +msgid "Logs the duration of each completed SQL statement." +msgstr "Loggar tiden för varje avslutad SQL-sats." + +#: utils/misc/guc.c:1197 +msgid "Logs each query's parse tree." +msgstr "Loggar alla frågors parse-träd." + +#: utils/misc/guc.c:1206 +msgid "Logs each query's rewritten parse tree." +msgstr "Logga alla frågors omskrivet parse-träd." + +#: utils/misc/guc.c:1215 +msgid "Logs each query's execution plan." +msgstr "Logga alla frågors körningsplan." + +#: utils/misc/guc.c:1224 +msgid "Indents parse and plan tree displays." +msgstr "Indentera parse och planeringsträdutskrifter" + +#: utils/misc/guc.c:1233 +msgid "Writes parser performance statistics to the server log." +msgstr "Skriver parserns prestandastatistik till serverloggen." + +#: utils/misc/guc.c:1242 +msgid "Writes planner performance statistics to the server log." +msgstr "Skriver planerarens prestandastatistik till serverloggen." + +#: utils/misc/guc.c:1251 +msgid "Writes executor performance statistics to the server log." +msgstr "Skrivere exekverarens prestandastatistik till serverloggen." + +#: utils/misc/guc.c:1260 +msgid "Writes cumulative performance statistics to the server log." +msgstr "Skriver ackumulerad prestandastatistik till serverloggen." + +#: utils/misc/guc.c:1270 +msgid "Logs system resource usage statistics (memory and CPU) on various B-tree operations." +msgstr "Loggar statisik för användning av systemresurser (minne och CPU) för olika B-tree-operationer." + +#: utils/misc/guc.c:1282 +msgid "Collects information about executing commands." +msgstr "Samla information om körda kommanon." + +#: utils/misc/guc.c:1283 +msgid "Enables the collection of information on the currently executing command of each session, along with the time at which that command began execution." +msgstr "Slår på insamling av information om det nu körande kommandot för varje session, tillsammans med klockslaget när det kommandot började köra." + +#: utils/misc/guc.c:1293 +msgid "Collects statistics on database activity." +msgstr "Samla in statistik om databasaktivitet." + +#: utils/misc/guc.c:1302 +msgid "Collects timing statistics for database I/O activity." +msgstr "Samla in timingstatistik om databasens I/O-aktivitet." + +#: utils/misc/guc.c:1312 +msgid "Updates the process title to show the active SQL command." +msgstr "Uppdaterar processtitel till att visa aktivt SQL-kommando." + +#: utils/misc/guc.c:1313 +msgid "Enables updating of the process title every time a new SQL command is received by the server." +msgstr "Slår på uppdatering av processtiteln varje gång ett nytt SQL-kommando tas emot av servern." + +#: utils/misc/guc.c:1326 +msgid "Starts the autovacuum subprocess." +msgstr "Starta autovacuum-barnprocess." + +#: utils/misc/guc.c:1336 +msgid "Generates debugging output for LISTEN and NOTIFY." +msgstr "Skapar debug-output för LISTEN och NOTIFY." + +#: utils/misc/guc.c:1348 +msgid "Emits information about lock usage." +msgstr "Visar information om låsanvändning." + +#: utils/misc/guc.c:1358 +msgid "Emits information about user lock usage." +msgstr "Visar information om användares låsanvändning." + +#: utils/misc/guc.c:1368 +msgid "Emits information about lightweight lock usage." +msgstr "Visar information om lättviktig låsanvändning." + +#: utils/misc/guc.c:1378 +msgid "Dumps information about all current locks when a deadlock timeout occurs." +msgstr "Dumpar information om alla aktuella lås när en deadlock-timeout sker." + +#: utils/misc/guc.c:1390 +msgid "Logs long lock waits." +msgstr "Loggar långa väntetider på lås." + +#: utils/misc/guc.c:1400 +msgid "Logs the host name in the connection logs." +msgstr "Loggar hostnamnet i anslutningsloggen." + +#: utils/misc/guc.c:1401 +msgid "By default, connection logs only show the IP address of the connecting host. If you want them to show the host name you can turn this on, but depending on your host name resolution setup it might impose a non-negligible performance penalty." +msgstr "Som standard visar anslutningsloggen bara IP-adressen för den anslutande värden. Om du vill att värdnamnet skall visas så kan du slå på detta men beroende på hur uppsättningen av namnuppslag är gjored så kan detta ha en markant prestandapåverkan." + +#: utils/misc/guc.c:1412 +msgid "Treats \"expr=NULL\" as \"expr IS NULL\"." +msgstr "Tolkar \"uttryck=NULL\" som \"uttryck IS NULL\"." + +#: utils/misc/guc.c:1413 +msgid "When turned on, expressions of the form expr = NULL (or NULL = expr) are treated as expr IS NULL, that is, they return true if expr evaluates to the null value, and false otherwise. The correct behavior of expr = NULL is to always return null (unknown)." +msgstr "Om påslagen så kommer uttryck på formen uttryck = NULL (eller NULL = uttryck) att behandlas som uttryck IS NULL, det vill säga returnera true om uttryck evalueras till värdet null eller evalueras till false annars. Det korrekta beteendet för uttryck = NULL är att alltid returnera null (okänt)." + +#: utils/misc/guc.c:1425 +msgid "Enables per-database user names." +msgstr "Aktiverar användarnamn per databas." + +#: utils/misc/guc.c:1434 +msgid "Sets the default read-only status of new transactions." +msgstr "Ställer in standard read-only-status för nya transaktioner." + +#: utils/misc/guc.c:1443 +msgid "Sets the current transaction's read-only status." +msgstr "Ställer in nuvarande transaktions read-only-status." + +#: utils/misc/guc.c:1453 +msgid "Sets the default deferrable status of new transactions." +msgstr "Ställer in standard deferrable-status för nya transaktioner." + +#: utils/misc/guc.c:1462 +msgid "Whether to defer a read-only serializable transaction until it can be executed with no possible serialization failures." +msgstr "Bestämmer om en serialiserbar transaktion för läsning kommer fördröjas tills den kan köras utan serialiseringsfel." + +#: utils/misc/guc.c:1472 +msgid "Enable row security." +msgstr "Aktiverar radsäkerhet." + +#: utils/misc/guc.c:1473 +msgid "When enabled, row security will be applied to all users." +msgstr "Om aktiv så kommer radsäkerhet användas för alla användare." + +#: utils/misc/guc.c:1481 +msgid "Check function bodies during CREATE FUNCTION." +msgstr "Kontrollera funktionskroppen vid CREATE FUNCTION." + +#: utils/misc/guc.c:1490 +msgid "Enable input of NULL elements in arrays." +msgstr "Aktiverar inmatning av NULL-element i arrayer." + +#: utils/misc/guc.c:1491 +msgid "When turned on, unquoted NULL in an array input value means a null value; otherwise it is taken literally." +msgstr "Om påslagen så kommer ej citerade NULL i indatavärden för en array betyda värdet null, annars tolkas det bokstavligt." + +#: utils/misc/guc.c:1501 +msgid "Create new tables with OIDs by default." +msgstr "Skapa nya tabeller med OID:er som standard." + +#: utils/misc/guc.c:1510 +msgid "Start a subprocess to capture stderr output and/or csvlogs into log files." +msgstr "Starta en subprocess för att fånga output från stderr och/eller csv-loggar till loggfiler." + +#: utils/misc/guc.c:1519 +msgid "Truncate existing log files of same name during log rotation." +msgstr "Trunkera existerande loggfiler med samma namn under loggrotering." + +#: utils/misc/guc.c:1530 +msgid "Emit information about resource usage in sorting." +msgstr "Skicka ut information om resursanvändning vid sortering." + +#: utils/misc/guc.c:1544 +msgid "Generate debugging output for synchronized scanning." +msgstr "Generera debug-output för synkroniserad skanning." + +#: utils/misc/guc.c:1559 +msgid "Enable bounded sorting using heap sort." +msgstr "Slår på begränsad sortering med heap-sort." + +#: utils/misc/guc.c:1572 +msgid "Emit WAL-related debugging output." +msgstr "Skicka ut WAL-relaterad debug-data." + +#: utils/misc/guc.c:1584 +msgid "Datetimes are integer based." +msgstr "Datetime är heltalsbaserad" + +#: utils/misc/guc.c:1595 +msgid "Sets whether Kerberos and GSSAPI user names should be treated as case-insensitive." +msgstr "Bestämmer om Kerberos- och GSSAPI-användarnamn skall tolkas skiftlägesokänsligt." + +#: utils/misc/guc.c:1605 +msgid "Warn about backslash escapes in ordinary string literals." +msgstr "Varna om backåtstreck-escape i vanliga stränglitteraler." + +#: utils/misc/guc.c:1615 +msgid "Causes '...' strings to treat backslashes literally." +msgstr "Gör att '...'-stängar tolkar bakåtstreck bokstavligt." + +#: utils/misc/guc.c:1626 +msgid "Enable synchronized sequential scans." +msgstr "Slå på synkroniserad sekvensiell skanning." + +#: utils/misc/guc.c:1636 +msgid "Allows connections and queries during recovery." +msgstr "Tillåt anslutningar och frågor under återställning." + +#: utils/misc/guc.c:1646 +msgid "Allows feedback from a hot standby to the primary that will avoid query conflicts." +msgstr "Tillåter feedback från en hot standby till primären för att undvika frågekonflikter." + +#: utils/misc/guc.c:1656 +msgid "Allows modifications of the structure of system tables." +msgstr "Tillåter strukturförändringar av systemtabeller." + +#: utils/misc/guc.c:1667 +msgid "Disables reading from system indexes." +msgstr "Stänger av läsning från systemindex." + +#: utils/misc/guc.c:1668 +msgid "It does not prevent updating the indexes, so it is safe to use. The worst consequence is slowness." +msgstr "Det förhindrar inte uppdatering av index så det är helt säkert att använda. Det värsta som kan hända är att det är långsamt." + +#: utils/misc/guc.c:1679 +msgid "Enables backward compatibility mode for privilege checks on large objects." +msgstr "Slår på bakåtkompabilitetsläge för rättighetskontroller på stora objekt." + +#: utils/misc/guc.c:1680 +msgid "Skips privilege checks when reading or modifying large objects, for compatibility with PostgreSQL releases prior to 9.0." +msgstr "Hoppar över rättighetskontroller vid läsning eller modifiering av stora objekt, för kompabilitet med PostgreSQL-releaser innan 9.0." + +#: utils/misc/guc.c:1690 +msgid "Emit a warning for constructs that changed meaning since PostgreSQL 9.4." +msgstr "Skicka ut varning för konstruktioner som ändrat semantik sedan PostgreSQL 9.4." + +#: utils/misc/guc.c:1700 +msgid "When generating SQL fragments, quote all identifiers." +msgstr "När SQL-fragment genereras så citera alla identifierare." + +#: utils/misc/guc.c:1710 +msgid "Shows whether data checksums are turned on for this cluster." +msgstr "Visar om datachecksummor är påslagna för detta kluster." + +#: utils/misc/guc.c:1721 +msgid "Add sequence number to syslog messages to avoid duplicate suppression." +msgstr "Lägg till sekvensnummer till syslog-meddelanden för att undvika att duplikat tas bort." + +#: utils/misc/guc.c:1731 +msgid "Split messages sent to syslog by lines and to fit into 1024 bytes." +msgstr "Dela meddelanden som skickas till syslog till egna rader och begränsa till 1024 byte." + +#: utils/misc/guc.c:1741 +msgid "Controls whether Gather and Gather Merge also run subplans." +msgstr "Bestämmer om \"Gather\" och \"Gather Merge\" också exekverar subplaner." + +#: utils/misc/guc.c:1742 +msgid "Should gather nodes also run subplans, or just gather tuples?" +msgstr "Skall gather-noder också exekvera subplaner eller bara samla in tupler?" + +#: utils/misc/guc.c:1751 +msgid "Allow JIT compilation." +msgstr "Tillåt JIT-kompilering." + +#: utils/misc/guc.c:1761 +msgid "Register JIT compiled function with debugger." +msgstr "Registrera JIT-kompilerad funktion hos debuggern." + +#: utils/misc/guc.c:1778 +msgid "Write out LLVM bitcode to facilitate JIT debugging." +msgstr "Skriv ut LLVM-bitkod för att möjliggöra JIT-debuggning." + +#: utils/misc/guc.c:1789 +msgid "Allow JIT compilation of expressions." +msgstr "Tillåt JIT-kompilering av uttryck." + +#: utils/misc/guc.c:1800 +msgid "Register JIT compiled function with perf profiler." +msgstr "Registrera JIT-kompilerad funktion med perf-profilerare." + +#: utils/misc/guc.c:1817 +msgid "Allow JIT compilation of tuple deforming." +msgstr "Tillåt JIT-kompilering av tupeluppdelning." + +#: utils/misc/guc.c:1837 +msgid "Forces a switch to the next WAL file if a new file has not been started within N seconds." +msgstr "Tvingar byte till nästa WAL-fil om en ny fil inte har startats inom N sekunder." + +#: utils/misc/guc.c:1848 +msgid "Waits N seconds on connection startup after authentication." +msgstr "Väntar N sekunder vid anslutningsstart efter authentisering." + +#: utils/misc/guc.c:1849 utils/misc/guc.c:2400 +msgid "This allows attaching a debugger to the process." +msgstr "Detta tillåter att man ansluter en debugger till processen." + +#: utils/misc/guc.c:1858 +msgid "Sets the default statistics target." +msgstr "Sätter standardstatistikmålet." + +#: utils/misc/guc.c:1859 +msgid "This applies to table columns that have not had a column-specific target set via ALTER TABLE SET STATISTICS." +msgstr "Detta gäller tabellkolumner som inte har ett kolumnspecifikt mål satt med ALTER TABLE SET STATISTICS." + +#: utils/misc/guc.c:1868 +msgid "Sets the FROM-list size beyond which subqueries are not collapsed." +msgstr "Sätter en övre gräns på FROM-listans storlek där subfrågor slås isär." + +#: utils/misc/guc.c:1870 +msgid "The planner will merge subqueries into upper queries if the resulting FROM list would have no more than this many items." +msgstr "Planeraren kommer slå samman subfrågor med yttre frågor om den resulterande FROM-listan inte har fler än så här många poster." + +#: utils/misc/guc.c:1880 +msgid "Sets the FROM-list size beyond which JOIN constructs are not flattened." +msgstr "Sätter en övre gräns på FROM-listans storlek där JOIN-konstruktioner plattas till." + +#: utils/misc/guc.c:1882 +msgid "The planner will flatten explicit JOIN constructs into lists of FROM items whenever a list of no more than this many items would result." +msgstr "Planeraren kommer platta till explicita JOIN-konstruktioner till listor av FROM-poster när resultatet blir en lista med max så här många poster." + +#: utils/misc/guc.c:1892 +msgid "Sets the threshold of FROM items beyond which GEQO is used." +msgstr "Sätter en undre gräns på antal FROM-poster när GEQO används." + +#: utils/misc/guc.c:1901 +msgid "GEQO: effort is used to set the default for other GEQO parameters." +msgstr "GEQO: effort används som standard för andra GEQO-parametrar." + +#: utils/misc/guc.c:1910 +msgid "GEQO: number of individuals in the population." +msgstr "GEQO: antal individer i populationen." + +#: utils/misc/guc.c:1911 utils/misc/guc.c:1920 +msgid "Zero selects a suitable default value." +msgstr "Noll väljer ett lämpligt standardvärde." + +#: utils/misc/guc.c:1919 +msgid "GEQO: number of iterations of the algorithm." +msgstr "GEQO: antal iterationer för algoritmen." + +#: utils/misc/guc.c:1930 +msgid "Sets the time to wait on a lock before checking for deadlock." +msgstr "Sätter tiden som väntas på ett lås innan kontroll av deadlock sker." + +#: utils/misc/guc.c:1941 +msgid "Sets the maximum delay before canceling queries when a hot standby server is processing archived WAL data." +msgstr "Sätter maximal fördröjning innan frågor avbryts när en \"hot standby\"-server processar arkiverad WAL-data." + +#: utils/misc/guc.c:1952 +msgid "Sets the maximum delay before canceling queries when a hot standby server is processing streamed WAL data." +msgstr "Sätter maximal fördröjning innan frågor avbryts när en \"hot stanby\"-server processar strömmad WAL-data." + +#: utils/misc/guc.c:1963 +msgid "Sets the maximum interval between WAL receiver status reports to the primary." +msgstr "Sätter maximalt intervall mellan statusrapporter till primären från WAL-mottagaren." + +#: utils/misc/guc.c:1974 +msgid "Sets the maximum wait time to receive data from the primary." +msgstr "Sätter maximal väntetid för att ta emot data från primären." + +#: utils/misc/guc.c:1985 +msgid "Sets the maximum number of concurrent connections." +msgstr "Sätter maximalt antal samtidiga anslutningar." + +#: utils/misc/guc.c:1996 +msgid "Sets the number of connection slots reserved for superusers." +msgstr "Sätter antalet anslutningsslottar som reserverats för superanvändare." + +#: utils/misc/guc.c:2010 +msgid "Sets the number of shared memory buffers used by the server." +msgstr "Sätter antalet delade minnesbuffrar som används av servern." + +#: utils/misc/guc.c:2021 +msgid "Sets the maximum number of temporary buffers used by each session." +msgstr "Sätter maximalt antal temporära buffertar som används per session." + +#: utils/misc/guc.c:2032 +msgid "Sets the TCP port the server listens on." +msgstr "Sätter TCP-porten som servern lyssnar på." + +#: utils/misc/guc.c:2042 +msgid "Sets the access permissions of the Unix-domain socket." +msgstr "Sätter accessrättigheter för Unix-domainuttag (socket)." + +#: utils/misc/guc.c:2043 +msgid "Unix-domain sockets use the usual Unix file system permission set. The parameter value is expected to be a numeric mode specification in the form accepted by the chmod and umask system calls. (To use the customary octal format the number must start with a 0 (zero).)" +msgstr "Unixdomänuttag (socket) använder unix vanliga filsystemsrättigheter. Parametervärdet förväntas vara en numerisk rättighetsangivelse så som accepteras av systemanropen chmod och umask. (För att använda det vanliga oktala formatet så måste numret börja med 0 (noll).)" + +#: utils/misc/guc.c:2057 +msgid "Sets the file permissions for log files." +msgstr "Sätter filrättigheter för loggfiler." + +#: utils/misc/guc.c:2058 +msgid "The parameter value is expected to be a numeric mode specification in the form accepted by the chmod and umask system calls. (To use the customary octal format the number must start with a 0 (zero).)" +msgstr "Parametervärdet förväntas vara en numerisk rättighetsangivelse så som accepteras av systemanropen chmod och umask. (För att använda det vanliga oktala formatet så måste numret börja med 0 (noll).)" + +#: utils/misc/guc.c:2072 +msgid "Mode of the data directory." +msgstr "Läge för datakatalog." + +#: utils/misc/guc.c:2073 +msgid "The parameter value is a numeric mode specification in the form accepted by the chmod and umask system calls. (To use the customary octal format the number must start with a 0 (zero).)" +msgstr "Parametervärdet är en numerisk rättighetsangivelse så som accepteras av systemanropen chmod och umask. (För att använda det vanliga oktala formatet så måste numret börja med 0 (noll).)" + +#: utils/misc/guc.c:2086 +msgid "Sets the maximum memory to be used for query workspaces." +msgstr "Sätter maximalt minne som används för frågors arbetsyta." + +#: utils/misc/guc.c:2087 +msgid "This much memory can be used by each internal sort operation and hash table before switching to temporary disk files." +msgstr "Så här mycket minne kan användas av varje intern sorteringsoperation resp. hash-tabell innan temporära filer på disk börjar användas." + +#: utils/misc/guc.c:2099 +msgid "Sets the maximum memory to be used for maintenance operations." +msgstr "Sätter det maximala minnet som får användas för underhållsoperationer." + +#: utils/misc/guc.c:2100 +msgid "This includes operations such as VACUUM and CREATE INDEX." +msgstr "Detta inkluderar operationer som VACUUM och CREATE INDEX." + +#: utils/misc/guc.c:2115 +msgid "Sets the maximum stack depth, in kilobytes." +msgstr "Sätter det maximala stackdjupet, i kilobyte." + +#: utils/misc/guc.c:2126 +msgid "Limits the total size of all temporary files used by each process." +msgstr "Begränsar den totala storleken för alla temporära filer som används i en process." + +#: utils/misc/guc.c:2127 +msgid "-1 means no limit." +msgstr "-1 betyder ingen gräns." + +#: utils/misc/guc.c:2137 +msgid "Vacuum cost for a page found in the buffer cache." +msgstr "Vacuum-kostnad för en sida som hittas i buffer-cache:n." + +#: utils/misc/guc.c:2147 +msgid "Vacuum cost for a page not found in the buffer cache." +msgstr "Vacuum-kostnad för en sida som inte hittas i buffer-cache:n." + +#: utils/misc/guc.c:2157 +msgid "Vacuum cost for a page dirtied by vacuum." +msgstr "Vacuum-kostnad för sidor som smutsats ner vid städning." + +#: utils/misc/guc.c:2167 +msgid "Vacuum cost amount available before napping." +msgstr "Vacuum-kostnad kvar innan pausande." + +#: utils/misc/guc.c:2177 +msgid "Vacuum cost delay in milliseconds." +msgstr "Städkostfördröjning i millisekunder." + +#: utils/misc/guc.c:2188 +msgid "Vacuum cost delay in milliseconds, for autovacuum." +msgstr "Städkostfördröjning i millisekunder, för autovacuum." + +#: utils/misc/guc.c:2199 +msgid "Vacuum cost amount available before napping, for autovacuum." +msgstr "Vacuum-kostnad kvar innan pausande, för autovacuum." + +#: utils/misc/guc.c:2209 +msgid "Sets the maximum number of simultaneously open files for each server process." +msgstr "Sätter det maximala antalet filer som en serverprocess kan ha öppna på en gång." + +#: utils/misc/guc.c:2222 +msgid "Sets the maximum number of simultaneously prepared transactions." +msgstr "Sätter det maximala antalet förberedda transaktioner man får ha på en gång." + +#: utils/misc/guc.c:2233 +msgid "Sets the minimum OID of tables for tracking locks." +msgstr "Sätter minsta tabell-OID för spårning av lås." + +#: utils/misc/guc.c:2234 +msgid "Is used to avoid output on system tables." +msgstr "Används för att undvika utdata för systemtabeller." + +#: utils/misc/guc.c:2243 +msgid "Sets the OID of the table with unconditionally lock tracing." +msgstr "Sätter OID för tabellen med ovillkorlig låsspårning." + +#: utils/misc/guc.c:2255 +msgid "Sets the maximum allowed duration of any statement." +msgstr "Sätter den maximala tiden som en sats får köra." + +#: utils/misc/guc.c:2256 utils/misc/guc.c:2267 utils/misc/guc.c:2278 +msgid "A value of 0 turns off the timeout." +msgstr "Värdet 0 stänger av timeout:en." + +#: utils/misc/guc.c:2266 +msgid "Sets the maximum allowed duration of any wait for a lock." +msgstr "Sätter den maximala tiden som man får vänta på ett lås." + +#: utils/misc/guc.c:2277 +msgid "Sets the maximum allowed duration of any idling transaction." +msgstr "Sätter den maximala tiden som en transaktion tillås vara \"idle\"." + +#: utils/misc/guc.c:2288 +msgid "Minimum age at which VACUUM should freeze a table row." +msgstr "Minimal ålder där VACUUM skall frysa en tabellrad." + +#: utils/misc/guc.c:2298 +msgid "Age at which VACUUM should scan whole table to freeze tuples." +msgstr "Ålder där VACUUM skall skanna hela tabellen för att frysa tupler." + +#: utils/misc/guc.c:2308 +msgid "Minimum age at which VACUUM should freeze a MultiXactId in a table row." +msgstr "Minsta ålder där VACUUM skall frysa en MultiXactId i en tabellrad." + +#: utils/misc/guc.c:2318 +msgid "Multixact age at which VACUUM should scan whole table to freeze tuples." +msgstr "Multixact-ålder där VACUUM skall skanna hela tabellen för att frysa tupler." + +#: utils/misc/guc.c:2328 +msgid "Number of transactions by which VACUUM and HOT cleanup should be deferred, if any." +msgstr "Antalet transaktioner som VACUUM och HOT-städning skall fördröjas (om någon)." + +#: utils/misc/guc.c:2341 +msgid "Sets the maximum number of locks per transaction." +msgstr "Sätter det maximala antalet lås per transaktion." + +#: utils/misc/guc.c:2342 +msgid "The shared lock table is sized on the assumption that at most max_locks_per_transaction * max_connections distinct objects will need to be locked at any one time." +msgstr "Den delade låstabellen har storlek efter antagandet att maximalt max_locks_per_transaction * max_connections olika objekt kommer behöva låsas vid en tidpunkt." + +#: utils/misc/guc.c:2353 +msgid "Sets the maximum number of predicate locks per transaction." +msgstr "Sätter det maximala antalet predikatlås per transaktion." + +#: utils/misc/guc.c:2354 +msgid "The shared predicate lock table is sized on the assumption that at most max_pred_locks_per_transaction * max_connections distinct objects will need to be locked at any one time." +msgstr "Den delade predikatlåstabellen har storlek efter antagandet att maximalt max_pred_locks_per_transaction * max_connections olika objekt kommer behöva låsas vid en tidpunkt." + +#: utils/misc/guc.c:2365 +msgid "Sets the maximum number of predicate-locked pages and tuples per relation." +msgstr "Sätter det maximala antalet predikatlåsta sidor och tupler per relation." + +#: utils/misc/guc.c:2366 +msgid "If more than this total of pages and tuples in the same relation are locked by a connection, those locks are replaced by a relation-level lock." +msgstr "Om fler än detta totala antal sidor och tupler för samma relation är låsta av en anslutning så ersätts dessa lås med ett lås på relationen." + +#: utils/misc/guc.c:2376 +msgid "Sets the maximum number of predicate-locked tuples per page." +msgstr "Sätter det maximala antalet predikatlåsta tupler per sida." + +#: utils/misc/guc.c:2377 +msgid "If more than this number of tuples on the same page are locked by a connection, those locks are replaced by a page-level lock." +msgstr "Om fler än detta antal tupler på samma sida är låsta av en anslutning så ersätts dessa lås med ett lås på sidan." + +#: utils/misc/guc.c:2387 +msgid "Sets the maximum allowed time to complete client authentication." +msgstr "Sätter maximalt tillåten tid att slutföra klientautentisering." + +#: utils/misc/guc.c:2399 +msgid "Waits N seconds on connection startup before authentication." +msgstr "Väntar N sekunder efter anslutning innan autentisering." + +#: utils/misc/guc.c:2410 +msgid "Sets the number of WAL files held for standby servers." +msgstr "Sätter antal WAL-filer som sparas för standby-servrar." + +#: utils/misc/guc.c:2420 +msgid "Sets the minimum size to shrink the WAL to." +msgstr "Sätter maximal storlek som WAL kan krympas till." + +#: utils/misc/guc.c:2432 +msgid "Sets the WAL size that triggers a checkpoint." +msgstr "Sätter WAL-storlek som utlöser en checkpoint." + +#: utils/misc/guc.c:2444 +msgid "Sets the maximum time between automatic WAL checkpoints." +msgstr "Sätter maximal tid mellan två automatiska WAL-checkpoint:er." + +#: utils/misc/guc.c:2455 +msgid "Enables warnings if checkpoint segments are filled more frequently than this." +msgstr "Slår på varning om checkpoint-segment fylls oftare än det här." + +#: utils/misc/guc.c:2457 +msgid "Write a message to the server log if checkpoints caused by the filling of checkpoint segment files happens more frequently than this number of seconds. Zero turns off the warning." +msgstr "Skriv ett meddelande i serverloggen om checkpoint:er som orsakas av fulla checkpoint-segmentfiler händer oftare än detta antal sekunder. Noll stänger av varningen." + +#: utils/misc/guc.c:2469 utils/misc/guc.c:2626 utils/misc/guc.c:2653 +msgid "Number of pages after which previously performed writes are flushed to disk." +msgstr "Antal sidor varefter tidigare skrivningar flush:as till disk." + +#: utils/misc/guc.c:2480 +msgid "Sets the number of disk-page buffers in shared memory for WAL." +msgstr "Sätter antal buffrar för disksidor i delat minne för WAL." + +#: utils/misc/guc.c:2491 +msgid "Time between WAL flushes performed in the WAL writer." +msgstr "Tid mellan WAL-flush:ar utförda i WAL-skrivaren." + +#: utils/misc/guc.c:2502 +msgid "Amount of WAL written out by WAL writer that triggers a flush." +msgstr "Mängden WAL utskrivna av WAL-skrivaren som utlöser en flush." + +#: utils/misc/guc.c:2514 +msgid "Sets the maximum number of simultaneously running WAL sender processes." +msgstr "Sätter maximalt antal samtidigt körande WAL-sändarprocesser." + +#: utils/misc/guc.c:2525 +msgid "Sets the maximum number of simultaneously defined replication slots." +msgstr "Sätter maximalt antal samtidigt definierade replikeringsslottar." + +#: utils/misc/guc.c:2535 +msgid "Sets the maximum time to wait for WAL replication." +msgstr "Sätter maximal tid att vänta på WAL-replikering." + +#: utils/misc/guc.c:2546 +msgid "Sets the delay in microseconds between transaction commit and flushing WAL to disk." +msgstr "Sätter fördröjning i mikrosekunder mellan transaktions-commit ochj flush:ning av WAL till disk." + +#: utils/misc/guc.c:2558 +msgid "Sets the minimum concurrent open transactions before performing commit_delay." +msgstr "Sätter minsta antal samtida öppna transaktioner innan vi utför en commit_delay." + +#: utils/misc/guc.c:2569 +msgid "Sets the number of digits displayed for floating-point values." +msgstr "Sätter antal siffror som visas för flyttalsvärden." + +#: utils/misc/guc.c:2570 +msgid "This affects real, double precision, and geometric data types. The parameter value is added to the standard number of digits (FLT_DIG or DBL_DIG as appropriate)." +msgstr "Detta påverkar real, double precision och geometriska datatyper. Parameterns värde läggs till standard antal siffror (FLT_DIG eller DBL_DIG respektive)." + +#: utils/misc/guc.c:2581 +msgid "Sets the minimum execution time above which statements will be logged." +msgstr "Sätter minimal körtid där långsammare satser kommer loggas." + +#: utils/misc/guc.c:2583 +msgid "Zero prints all queries. -1 turns this feature off." +msgstr "Noll skriver ut alla frågor. -1 stänger av denna finess." + +#: utils/misc/guc.c:2593 +msgid "Sets the minimum execution time above which autovacuum actions will be logged." +msgstr "Sätter minimal körtid där långsammare autovacuum-operationer kommer loggas." + +#: utils/misc/guc.c:2595 +msgid "Zero prints all actions. -1 turns autovacuum logging off." +msgstr "Noll skriver ut alla operationer. -1 stänger av autovacuum." + +#: utils/misc/guc.c:2605 +msgid "Background writer sleep time between rounds." +msgstr "Bakgrundsskrivarens sleep-tid mellan körningar." + +#: utils/misc/guc.c:2616 +msgid "Background writer maximum number of LRU pages to flush per round." +msgstr "Bakgrundsskrivarens maximala antal LRU-sidor som flush:as per omgång." + +#: utils/misc/guc.c:2639 +msgid "Number of simultaneous requests that can be handled efficiently by the disk subsystem." +msgstr "Antal samtidiga förfrågningar som kan effektivt kan hanteras av disksystemet." + +#: utils/misc/guc.c:2640 +msgid "For RAID arrays, this should be approximately the number of drive spindles in the array." +msgstr "För RAID-array:er så borde det vara ungerfär så många som antalet spindlar i array:en." + +#: utils/misc/guc.c:2666 +msgid "Maximum number of concurrent worker processes." +msgstr "Maximalt antal samtidiga arbetsprocesser." + +#: utils/misc/guc.c:2678 +msgid "Maximum number of logical replication worker processes." +msgstr "Maximalt antal arbetsprocesser för logisk replikering." + +#: utils/misc/guc.c:2690 +msgid "Maximum number of table synchronization workers per subscription." +msgstr "Maximalt antal tabellsynkroniseringsarbetare per prenumeration." + +#: utils/misc/guc.c:2700 +msgid "Automatic log file rotation will occur after N minutes." +msgstr "Automatisk loggfilsrotering kommer ske efter N minuter." + +#: utils/misc/guc.c:2711 +msgid "Automatic log file rotation will occur after N kilobytes." +msgstr "Automatisk loggfilsrotering kommer ske efter N kilobyte." + +#: utils/misc/guc.c:2722 +msgid "Shows the maximum number of function arguments." +msgstr "Visar maximalt antal funktionsargument." + +#: utils/misc/guc.c:2733 +msgid "Shows the maximum number of index keys." +msgstr "Visar maximalt antal indexnycklar." + +#: utils/misc/guc.c:2744 +msgid "Shows the maximum identifier length." +msgstr "Visar den maximala identifierarlängden." + +#: utils/misc/guc.c:2755 +msgid "Shows the size of a disk block." +msgstr "Visar storleken på ett diskblock." + +#: utils/misc/guc.c:2766 +msgid "Shows the number of pages per disk file." +msgstr "Visar antal sidor per diskfil." + +#: utils/misc/guc.c:2777 +msgid "Shows the block size in the write ahead log." +msgstr "Visar blockstorleken i the write-ahead-loggen." + +#: utils/misc/guc.c:2788 +msgid "Sets the time to wait before retrying to retrieve WAL after a failed attempt." +msgstr "Sätter väntetiden innan databasen försöker ta emot WAL efter ett misslyckat försök." + +#: utils/misc/guc.c:2800 +msgid "Shows the size of write ahead log segments." +msgstr "Visar storleken på write-ahead-log-segment." + +#: utils/misc/guc.c:2813 +msgid "Time to sleep between autovacuum runs." +msgstr "Tid att sova mellan körningar av autovacuum." + +#: utils/misc/guc.c:2823 +msgid "Minimum number of tuple updates or deletes prior to vacuum." +msgstr "Minst antal tupel-uppdateringar eller raderingar innan vacuum." + +#: utils/misc/guc.c:2832 +msgid "Minimum number of tuple inserts, updates, or deletes prior to analyze." +msgstr "Minsta antal tupel-insert, -update eller -delete innan analyze." + +#: utils/misc/guc.c:2842 +msgid "Age at which to autovacuum a table to prevent transaction ID wraparound." +msgstr "Ålder då autovacuum körs på en tabell för att förhindra wrapaound på transaktions-ID." + +#: utils/misc/guc.c:2853 +msgid "Multixact age at which to autovacuum a table to prevent multixact wraparound." +msgstr "Ålder på multixact då autovacuum körs på en tabell för att förhindra wrapaound på multixact." + +#: utils/misc/guc.c:2863 +msgid "Sets the maximum number of simultaneously running autovacuum worker processes." +msgstr "Sätter maximalt antal samtidigt körande arbetsprocesser för autovacuum." + +#: utils/misc/guc.c:2873 +msgid "Sets the maximum number of parallel processes per maintenance operation." +msgstr "Sätter maximalt antal parallella processer per underhållsoperation." + +#: utils/misc/guc.c:2883 +msgid "Sets the maximum number of parallel processes per executor node." +msgstr "Sätter maximalt antal parallella processer per exekveringsnod." + +#: utils/misc/guc.c:2893 +msgid "Sets the maximum number of parallel workers that can be active at one time." +msgstr "Sätter maximalt antal parallella arbetare som kan vara aktiva på en gång." + +#: utils/misc/guc.c:2903 +msgid "Sets the maximum memory to be used by each autovacuum worker process." +msgstr "Sätter maximalt minne som kan användas av varje arbetsprocess för autovacuum." + +#: utils/misc/guc.c:2914 +msgid "Time before a snapshot is too old to read pages changed after the snapshot was taken." +msgstr "Tid innan ett snapshot är för gammalt för att läsa sidor som ändrats efter snapshot:en tagits." + +#: utils/misc/guc.c:2915 +msgid "A value of -1 disables this feature." +msgstr "Värdet -1 stänger av denna funktion." + +#: utils/misc/guc.c:2925 +msgid "Time between issuing TCP keepalives." +msgstr "Tid mellan skickande av TCP-keepalive." + +#: utils/misc/guc.c:2926 utils/misc/guc.c:2937 +msgid "A value of 0 uses the system default." +msgstr "Värdet 0 anger systemets standardvärde." + +#: utils/misc/guc.c:2936 +msgid "Time between TCP keepalive retransmits." +msgstr "Tid mellan omsändning av TCP-keepalive." + +#: utils/misc/guc.c:2947 +msgid "SSL renegotiation is no longer supported; this can only be 0." +msgstr "SSL-förhandling stöds inte längre; denna kan bara vara 0." + +#: utils/misc/guc.c:2958 +msgid "Maximum number of TCP keepalive retransmits." +msgstr "Maximalt antal omsändningar av TCP-keepalive." + +#: utils/misc/guc.c:2959 +msgid "This controls the number of consecutive keepalive retransmits that can be lost before a connection is considered dead. A value of 0 uses the system default." +msgstr "Detta bestämmer antalet keepalive-omsändingar i rad som kan försvinna innan en anslutning anses vara död. Värdet 0 betyder systemstandardvärdet." + +#: utils/misc/guc.c:2970 +msgid "Sets the maximum allowed result for exact search by GIN." +msgstr "Sätter maximalt tillåtna resultat för exakt sökning med GIN." + +#: utils/misc/guc.c:2981 +msgid "Sets the planner's assumption about the size of the disk cache." +msgstr "Sätter planerarens antagande om storleken på diskcachen." + +#: utils/misc/guc.c:2982 +msgid "That is, the portion of the kernel's disk cache that will be used for PostgreSQL data files. This is measured in disk pages, which are normally 8 kB each." +msgstr "Det är andelen av kärnans diskcache som kommer användas till PostgreSQLs datafiler. Det mäts i disksidor som normalt är 8 kb styck." + +#: utils/misc/guc.c:2994 +msgid "Sets the minimum amount of table data for a parallel scan." +msgstr "Sätter minsta mängd tabelldata för en parallell skanning." + +#: utils/misc/guc.c:2995 +msgid "If the planner estimates that it will read a number of table pages too small to reach this limit, a parallel scan will not be considered." +msgstr "Om planeraren beräknar att den kommer läsa för få tabellsidor för att nå denna gräns så kommer den inte försöka med en parallell skanning." + +#: utils/misc/guc.c:3005 +msgid "Sets the minimum amount of index data for a parallel scan." +msgstr "Anger minimala mängden indexdata för en parallell scan." + +#: utils/misc/guc.c:3006 +msgid "If the planner estimates that it will read a number of index pages too small to reach this limit, a parallel scan will not be considered." +msgstr "Om planeraren beräknar att den kommer läsa för få indexsidor för att nå denna gräns så kommer den inte försöka med en parallell skanning." + +#: utils/misc/guc.c:3017 +msgid "Shows the server version as an integer." +msgstr "Visar serverns version som ett heltal." + +#: utils/misc/guc.c:3028 +msgid "Log the use of temporary files larger than this number of kilobytes." +msgstr "Logga användning av temporära filer som är större än detta antal kilobyte." + +#: utils/misc/guc.c:3029 +msgid "Zero logs all files. The default is -1 (turning this feature off)." +msgstr "Noll loggar alla filer. Standard är -1 (stänger av denna finess)." + +#: utils/misc/guc.c:3039 +msgid "Sets the size reserved for pg_stat_activity.query, in bytes." +msgstr "Ställer in storleken reserverad för pg_stat_activity.query, i byte." + +#: utils/misc/guc.c:3050 +msgid "Sets the maximum size of the pending list for GIN index." +msgstr "Sätter maximal storlek på väntelistan för GIN-index." + +#: utils/misc/guc.c:3070 +msgid "Sets the planner's estimate of the cost of a sequentially fetched disk page." +msgstr "Ställer in planerarens estimat av kostnaden för att hämta en disksida sekvensiellt." + +#: utils/misc/guc.c:3080 +msgid "Sets the planner's estimate of the cost of a nonsequentially fetched disk page." +msgstr "Ställer in planerarens estimat av kostnaden för att hämta en disksida icke-sekvensiellt." + +#: utils/misc/guc.c:3090 +msgid "Sets the planner's estimate of the cost of processing each tuple (row)." +msgstr "Ställer in planerarens estimat av kostnaden för att processa varje tupel (rad)." + +#: utils/misc/guc.c:3100 +msgid "Sets the planner's estimate of the cost of processing each index entry during an index scan." +msgstr "Sätter planerarens kostnadsuppskattning för att processa varje indexpost under en indexskanning." + +#: utils/misc/guc.c:3110 +msgid "Sets the planner's estimate of the cost of processing each operator or function call." +msgstr "Sätter planerarens kostnadsuppskattning för att processa varje operator- eller funktions-anrop." + +#: utils/misc/guc.c:3120 +msgid "Sets the planner's estimate of the cost of passing each tuple (row) from worker to master backend." +msgstr "Sätter planerarens kostnadsuppskattning för att skicka varje tupel (rad) från en arbetare till huvud-backend:en. " + +#: utils/misc/guc.c:3130 +msgid "Sets the planner's estimate of the cost of starting up worker processes for parallel query." +msgstr "Sätter planerarens kostnadsuppskattning för att starta upp en arbetsprocess för en parallell fråga." + +#: utils/misc/guc.c:3141 +msgid "Perform JIT compilation if query is more expensive." +msgstr "Utför JIT-kompilering om frågan är dyrare." + +#: utils/misc/guc.c:3142 +msgid "-1 disables JIT compilation." +msgstr "-1 stänger av JIT-kompilering." + +#: utils/misc/guc.c:3151 +msgid "Optimize JITed functions if query is more expensive." +msgstr "Optimera JIT-funktioner om frågan är dyrare." + +#: utils/misc/guc.c:3152 +msgid "-1 disables optimization." +msgstr "-1 stänger av optimering." + +#: utils/misc/guc.c:3161 +msgid "Perform JIT inlining if query is more expensive." +msgstr "Utför JIT-\"inlining\" om frågan är dyrare." + +#: utils/misc/guc.c:3162 +msgid "-1 disables inlining." +msgstr "-1 stänger av \"inlining\"" + +#: utils/misc/guc.c:3171 +msgid "Sets the planner's estimate of the fraction of a cursor's rows that will be retrieved." +msgstr "Sätter planerarens uppskattning av hur stor del av markörens rader som kommer hämtas. " + +#: utils/misc/guc.c:3182 +msgid "GEQO: selective pressure within the population." +msgstr "GEQO: selektionstryck inom populationen." + +#: utils/misc/guc.c:3192 +msgid "GEQO: seed for random path selection." +msgstr "GEQO: slumptalsfrö för val av slumpad sökväg." + +#: utils/misc/guc.c:3202 +msgid "Multiple of the average buffer usage to free per round." +msgstr "Multipel av genomsnittlig bufferanvändning som frias per runda." + +#: utils/misc/guc.c:3212 +msgid "Sets the seed for random-number generation." +msgstr "Sätter fröet för slumptalsgeneratorn." + +#: utils/misc/guc.c:3223 +msgid "Number of tuple updates or deletes prior to vacuum as a fraction of reltuples." +msgstr "Antalet tupeluppdateringar eller borttagningar innan vacuum relativt reltuples." + +#: utils/misc/guc.c:3232 +msgid "Number of tuple inserts, updates, or deletes prior to analyze as a fraction of reltuples." +msgstr "Antalet tupelinsättningar, uppdateringar eller borttagningar innan analyze relativt reltuples." + +#: utils/misc/guc.c:3242 +msgid "Time spent flushing dirty buffers during checkpoint, as fraction of checkpoint interval." +msgstr "Tid lagd på att flusha nedsmutsade buffrar vid checkpoint relativt checkpoint-intervallet." + +#: utils/misc/guc.c:3252 +msgid "Number of tuple inserts prior to index cleanup as a fraction of reltuples." +msgstr "Antal tupelinsättningar innan indexuppstädning relativt reltuples." + +#: utils/misc/guc.c:3271 +msgid "Sets the shell command that will be called to archive a WAL file." +msgstr "Sätter shell-kommandot som kommer anropas för att arkivera en WAL-fil." + +#: utils/misc/guc.c:3281 +msgid "Sets the client's character set encoding." +msgstr "Ställer in klientens teckenkodning." + +#: utils/misc/guc.c:3292 +msgid "Controls information prefixed to each log line." +msgstr "Styr information prefixat till varje loggrad." + +#: utils/misc/guc.c:3293 +msgid "If blank, no prefix is used." +msgstr "Om tom så används inget prefix." + +#: utils/misc/guc.c:3302 +msgid "Sets the time zone to use in log messages." +msgstr "Sätter tidszonen som används i loggmeddelanden." + +#: utils/misc/guc.c:3312 +msgid "Sets the display format for date and time values." +msgstr "Sätter displayformat för datum och tidvärden." + +#: utils/misc/guc.c:3313 +msgid "Also controls interpretation of ambiguous date inputs." +msgstr "Styr också tolkning av tvetydig datumindata." + +#: utils/misc/guc.c:3324 +msgid "Sets the default tablespace to create tables and indexes in." +msgstr "Ställer in standard tabellutrymme där tabeller och index skapas." + +#: utils/misc/guc.c:3325 +msgid "An empty string selects the database's default tablespace." +msgstr "En tom sträng väljer databasens standardtabellutrymme." + +#: utils/misc/guc.c:3335 +msgid "Sets the tablespace(s) to use for temporary tables and sort files." +msgstr "Ställer in tablespace för temporära tabeller och sorteringsfiler." + +#: utils/misc/guc.c:3346 +msgid "Sets the path for dynamically loadable modules." +msgstr "Sätter sökvägen till dynamiskt laddade moduler." + +#: utils/misc/guc.c:3347 +msgid "If a dynamically loadable module needs to be opened and the specified name does not have a directory component (i.e., the name does not contain a slash), the system will search this path for the specified file." +msgstr "Om en dynamiskt laddad modul behöver öppnas och det angivna namnet inte har en katalogkomponent (dvs, namnet inte innehåller snedstreck) så kommer systemet använda denna sökväg för filen." + +#: utils/misc/guc.c:3360 +msgid "Sets the location of the Kerberos server key file." +msgstr "Ställer in platsen för Kerberos servernyckelfil." + +#: utils/misc/guc.c:3371 +msgid "Sets the Bonjour service name." +msgstr "Sätter Bonjour-tjänstens namn." + +#: utils/misc/guc.c:3383 +msgid "Shows the collation order locale." +msgstr "Visar lokal för jämförelseordning." + +#: utils/misc/guc.c:3394 +msgid "Shows the character classification and case conversion locale." +msgstr "Visar lokal för teckenklassificering samt skiftlägeskonvertering." + +#: utils/misc/guc.c:3405 +msgid "Sets the language in which messages are displayed." +msgstr "Sätter språket som meddelanden visas i." + +#: utils/misc/guc.c:3415 +msgid "Sets the locale for formatting monetary amounts." +msgstr "Sätter lokalen för att formattera monetära belopp." + +#: utils/misc/guc.c:3425 +msgid "Sets the locale for formatting numbers." +msgstr "Ställer in lokalen för att formattera nummer." + +#: utils/misc/guc.c:3435 +msgid "Sets the locale for formatting date and time values." +msgstr "Sätter lokalen för att formattera datum och tider." + +#: utils/misc/guc.c:3445 +msgid "Lists shared libraries to preload into each backend." +msgstr "Listar delade bibliotek som skall förladdas i varje backend." + +#: utils/misc/guc.c:3456 +msgid "Lists shared libraries to preload into server." +msgstr "Listar delade bibliotek som skall förladdas i servern." + +#: utils/misc/guc.c:3467 +msgid "Lists unprivileged shared libraries to preload into each backend." +msgstr "Listar ej priviligerade delade bibliotek som förladdas in i varje backend." + +#: utils/misc/guc.c:3478 +msgid "Sets the schema search order for names that are not schema-qualified." +msgstr "Sätter schemats sökordning för namn som inte är schema-prefixade." + +#: utils/misc/guc.c:3490 +msgid "Sets the server (database) character set encoding." +msgstr "Ställer in serverns (databasens) teckenkodning." + +#: utils/misc/guc.c:3502 +msgid "Shows the server version." +msgstr "Visar serverversionen" + +#: utils/misc/guc.c:3514 +msgid "Sets the current role." +msgstr "Ställer in den aktiva rollen." + +#: utils/misc/guc.c:3526 +msgid "Sets the session user name." +msgstr "Sätter sessionens användarnamn." + +#: utils/misc/guc.c:3537 +msgid "Sets the destination for server log output." +msgstr "Sätter serverloggens destination." + +#: utils/misc/guc.c:3538 +msgid "Valid values are combinations of \"stderr\", \"syslog\", \"csvlog\", and \"eventlog\", depending on the platform." +msgstr "Giltiga värden är kombinationer av \"stderr\", \"syslog\", \"csvlog\" och \"eventlog\", beroende på plattform." + +#: utils/misc/guc.c:3549 +msgid "Sets the destination directory for log files." +msgstr "Sätter destinationskatalogen för loggfiler." + +#: utils/misc/guc.c:3550 +msgid "Can be specified as relative to the data directory or as absolute path." +msgstr "Kan anges relativt datakatalogen eller som en absolut sökväg." + +#: utils/misc/guc.c:3560 +msgid "Sets the file name pattern for log files." +msgstr "Sätter filnamnsmallen för loggfiler." + +#: utils/misc/guc.c:3571 +msgid "Sets the program name used to identify PostgreSQL messages in syslog." +msgstr "Sätter programnamnet som används för att identifiera PostgreSQLs meddelanden i syslog." + +#: utils/misc/guc.c:3582 +msgid "Sets the application name used to identify PostgreSQL messages in the event log." +msgstr "Sätter applikationsnamnet som används för att identifiera PostgreSQLs meddelanden i händelseloggen." + +#: utils/misc/guc.c:3593 +msgid "Sets the time zone for displaying and interpreting time stamps." +msgstr "Ställer in tidszon för visande och tolkande av tidsstämplar." + +#: utils/misc/guc.c:3603 +msgid "Selects a file of time zone abbreviations." +msgstr "Väljer en fil för tidszonsförkortningar." + +#: utils/misc/guc.c:3613 +msgid "Sets the current transaction's isolation level." +msgstr "Sätter den aktuella transaktionsisolationsnivån." + +#: utils/misc/guc.c:3624 +msgid "Sets the owning group of the Unix-domain socket." +msgstr "Sätter ägande grupp för Unix-domainuttaget (socket)." + +#: utils/misc/guc.c:3625 +msgid "The owning user of the socket is always the user that starts the server." +msgstr "Ägaren av uttaget (socker) är alltid användaren som startar servern." + +#: utils/misc/guc.c:3635 +msgid "Sets the directories where Unix-domain sockets will be created." +msgstr "Ställer in kataloger där Unix-domän-uttag (socket) kommer skapas." + +#: utils/misc/guc.c:3650 +msgid "Sets the host name or IP address(es) to listen to." +msgstr "Sätter värdnamn eller IP-adress(er) att lyssna på." + +#: utils/misc/guc.c:3665 +msgid "Sets the server's data directory." +msgstr "Ställer in serverns datakatalog." + +#: utils/misc/guc.c:3676 +msgid "Sets the server's main configuration file." +msgstr "Sätter serverns huvudkonfigurationsfil." + +#: utils/misc/guc.c:3687 +msgid "Sets the server's \"hba\" configuration file." +msgstr "Sätter serverns \"hba\"-konfigurationsfil." + +#: utils/misc/guc.c:3698 +msgid "Sets the server's \"ident\" configuration file." +msgstr "Sätter serverns \"ident\"-konfigurationsfil." + +#: utils/misc/guc.c:3709 +msgid "Writes the postmaster PID to the specified file." +msgstr "Skriver postmaster-PID till angiven fil." + +#: utils/misc/guc.c:3720 +msgid "Location of the SSL server certificate file." +msgstr "Plats för serverns SSL-certifikatfil." + +#: utils/misc/guc.c:3730 +msgid "Location of the SSL server private key file." +msgstr "Plats för serverns privata SSL-nyckelfil." + +#: utils/misc/guc.c:3740 +msgid "Location of the SSL certificate authority file." +msgstr "Plats för SSL-certifikats auktoritetsfil." + +#: utils/misc/guc.c:3750 +msgid "Location of the SSL certificate revocation list file." +msgstr "Plats för SSL-certifikats återkallningsfil." + +#: utils/misc/guc.c:3760 +msgid "Writes temporary statistics files to the specified directory." +msgstr "Skriver temporära statistikfiler till angiven katalog." + +#: utils/misc/guc.c:3771 +msgid "Number of synchronous standbys and list of names of potential synchronous ones." +msgstr "Antalet synkrona standby och en lista med namn på potentiellt synkrona sådana." + +#: utils/misc/guc.c:3782 +msgid "Sets default text search configuration." +msgstr "Ställer in standard textsökkonfiguration." + +#: utils/misc/guc.c:3792 +msgid "Sets the list of allowed SSL ciphers." +msgstr "Ställer in listan med tillåtna SSL-krypton." + +#: utils/misc/guc.c:3807 +msgid "Sets the curve to use for ECDH." +msgstr "Ställer in kurvan att använda för ECDH." + +#: utils/misc/guc.c:3822 +msgid "Location of the SSL DH parameters file." +msgstr "Plats för SSL DH-parameterfil." + +#: utils/misc/guc.c:3833 +msgid "Command to obtain passphrases for SSL." +msgstr "Kommando för att hämta lösenfraser för SSL." + +#: utils/misc/guc.c:3843 +msgid "Sets the application name to be reported in statistics and logs." +msgstr "Sätter applikationsnamn som rapporteras i statistik och loggar." + +#: utils/misc/guc.c:3854 +msgid "Sets the name of the cluster, which is included in the process title." +msgstr "Sätter namnet på klustret som inkluderas i processtiteln." + +#: utils/misc/guc.c:3865 +msgid "Sets the WAL resource managers for which WAL consistency checks are done." +msgstr "Sätter WAL-resurshanterare som WAL-konsistenskontoller görs med." + +#: utils/misc/guc.c:3866 +msgid "Full-page images will be logged for all data blocks and cross-checked against the results of WAL replay." +msgstr "Hela sidkopior kommer loggas för alla datablock och kontrolleras mot resultatet av en WAL-uppspelning." + +#: utils/misc/guc.c:3876 +msgid "JIT provider to use." +msgstr "JIT-leverantör som används." + +#: utils/misc/guc.c:3896 +msgid "Sets whether \"\\'\" is allowed in string literals." +msgstr "Ställer in hurvida \"\\'\" tillåts i sträng-literaler." + +#: utils/misc/guc.c:3906 +msgid "Sets the output format for bytea." +msgstr "Ställer in output-format för bytea." + +#: utils/misc/guc.c:3916 +msgid "Sets the message levels that are sent to the client." +msgstr "Ställer in meddelandenivåer som skickas till klienten." + +#: utils/misc/guc.c:3917 utils/misc/guc.c:3970 utils/misc/guc.c:3981 +#: utils/misc/guc.c:4047 +msgid "Each level includes all the levels that follow it. The later the level, the fewer messages are sent." +msgstr "Varje nivå inkluderar de efterföljande nivåerna. Ju senare nivå destå färre meddlanden skickas." + +#: utils/misc/guc.c:3927 +msgid "Enables the planner to use constraints to optimize queries." +msgstr "Slår på planerarens användning av integritetsvillkor för att optimera frågor." + +#: utils/misc/guc.c:3928 +msgid "Table scans will be skipped if their constraints guarantee that no rows match the query." +msgstr "Tabellskanningar kommer hoppas över om dess integritetsvillkor garanterar att inga rader komma matchas av frågan." + +#: utils/misc/guc.c:3938 +msgid "Sets the transaction isolation level of each new transaction." +msgstr "Ställer in isolationsnivån för nya transaktioner." + +#: utils/misc/guc.c:3948 +msgid "Sets the display format for interval values." +msgstr "Ställer in visningsformat för intervallvärden." + +#: utils/misc/guc.c:3959 +msgid "Sets the verbosity of logged messages." +msgstr "Ställer in pratighet för loggade meddelanden." + +#: utils/misc/guc.c:3969 +msgid "Sets the message levels that are logged." +msgstr "Ställer in meddelandenivåer som loggas." + +#: utils/misc/guc.c:3980 +msgid "Causes all statements generating error at or above this level to be logged." +msgstr "Gör att alla satser som genererar fel vid eller över denna nivå kommer loggas." + +#: utils/misc/guc.c:3991 +msgid "Sets the type of statements logged." +msgstr "Ställer in vilken sorts satser som loggas." + +#: utils/misc/guc.c:4001 +msgid "Sets the syslog \"facility\" to be used when syslog enabled." +msgstr "Ställer in syslog-\"facility\" som används när syslog är påslagen." + +#: utils/misc/guc.c:4016 +msgid "Sets the session's behavior for triggers and rewrite rules." +msgstr "Sätter sessionens beteende för utlösare och omskrivningsregler." + +#: utils/misc/guc.c:4026 +msgid "Sets the current transaction's synchronization level." +msgstr "Ställer in den nuvarande transaktionens synkroniseringsnivå." + +#: utils/misc/guc.c:4036 +msgid "Allows archiving of WAL files using archive_command." +msgstr "Tillåter arkivering av WAL-filer med hjälp av archive_command." + +#: utils/misc/guc.c:4046 +msgid "Enables logging of recovery-related debugging information." +msgstr "Slår på loggning av återställningsrelaterad debug-information." + +#: utils/misc/guc.c:4062 +msgid "Collects function-level statistics on database activity." +msgstr "Samlar in statistik på funktionsnivå över databasaktivitet." + +#: utils/misc/guc.c:4072 +msgid "Set the level of information written to the WAL." +msgstr "Ställer in mängden information som skrivs till WAL." + +#: utils/misc/guc.c:4082 +msgid "Selects the dynamic shared memory implementation used." +msgstr "Väljer implementation som används för dynamiskt delat minne." + +#: utils/misc/guc.c:4092 +msgid "Selects the method used for forcing WAL updates to disk." +msgstr "Väljer metod för att tvinga WAL-uppdateringar till disk." + +#: utils/misc/guc.c:4102 +msgid "Sets how binary values are to be encoded in XML." +msgstr "Ställer in hur binära värden kodas i XML." + +#: utils/misc/guc.c:4112 +msgid "Sets whether XML data in implicit parsing and serialization operations is to be considered as documents or content fragments." +msgstr "Sätter hurvida XML-data vid implicit parsning och serialiseringsoperationer ses som dokument eller innehållsfragment." + +#: utils/misc/guc.c:4123 +msgid "Use of huge pages on Linux or Windows." +msgstr "Använd stora sidor på Linux resp. Windows." + +#: utils/misc/guc.c:4133 +msgid "Forces use of parallel query facilities." +msgstr "Tvingar användning av parallella frågefinesser." + +#: utils/misc/guc.c:4134 +msgid "If possible, run query using a parallel worker and with parallel restrictions." +msgstr "Om det är möjligt så kör fråga med en parallell arbetare och med parallella begränsningar." + +#: utils/misc/guc.c:4143 +msgid "Encrypt passwords." +msgstr "Kryptera lösenord." + +#: utils/misc/guc.c:4144 +msgid "When a password is specified in CREATE USER or ALTER USER without writing either ENCRYPTED or UNENCRYPTED, this parameter determines whether the password is to be encrypted." +msgstr "När ett lösenord anges i CREATE USER eller ALTER USER utan man skrivit varken ENCRYPTED eller UNENCRYPTED så bestämmer denna parameter om lösenordet kommer krypteras." + +#: utils/misc/guc.c:4946 +#, c-format +msgid "%s: could not access directory \"%s\": %s\n" +msgstr "%s: kunde inte komma åt katalogen \"%s\": %s\n" + +#: utils/misc/guc.c:4951 +#, c-format +msgid "Run initdb or pg_basebackup to initialize a PostgreSQL data directory.\n" +msgstr "Kör initdb eller pg_basebackup för att initiera en PostgreSQL-datakatalog.\n" + +#: utils/misc/guc.c:4971 +#, c-format +msgid "" +"%s does not know where to find the server configuration file.\n" +"You must specify the --config-file or -D invocation option or set the PGDATA environment variable.\n" +msgstr "" +"%s vet inte var servens konfigurationsfil är.\n" +"Du måste ange flaggan --config-file eller -D alternativt sätta omgivningsvariabeln PGDATA.\n" + +#: utils/misc/guc.c:4990 +#, c-format +msgid "%s: could not access the server configuration file \"%s\": %s\n" +msgstr "%s: har inte åtkomst till serverns konfigureringsfil \"%s\": %s\n" + +#: utils/misc/guc.c:5016 +#, c-format +msgid "" +"%s does not know where to find the database system data.\n" +"This can be specified as \"data_directory\" in \"%s\", or by the -D invocation option, or by the PGDATA environment variable.\n" +msgstr "" +"%s vet inte var databasens systemdata är.\n" +"Det kan anges med \"data_directory\" i \"%s\" eller med flaggan -D alternativt genom att sätta omgivningsvariabeln PGDATA.\n" + +#: utils/misc/guc.c:5064 +#, c-format +msgid "" +"%s does not know where to find the \"hba\" configuration file.\n" +"This can be specified as \"hba_file\" in \"%s\", or by the -D invocation option, or by the PGDATA environment variable.\n" +msgstr "" +"%s vet inte var \"hba\"-konfigurationsfilen är.\n" +"Detta kan anges som \"hba_file\" i \"%s\" eller med flaggan -D alternativt genom att sätta omgivningsvariabeln PGDATA.\n" + +#: utils/misc/guc.c:5087 +#, c-format +msgid "" +"%s does not know where to find the \"ident\" configuration file.\n" +"This can be specified as \"ident_file\" in \"%s\", or by the -D invocation option, or by the PGDATA environment variable.\n" +msgstr "" +"%s vet inte var \"ident\"-konfigurationsfilen är.\n" +"Detta kan anges som \"ident_file\" i \"%s\" eller med flaggan -D alternativt genom att sätta omgivningsvariabeln PGDATA.\n" + +#: utils/misc/guc.c:5762 utils/misc/guc.c:5809 +msgid "Value exceeds integer range." +msgstr "Värde överskriver heltalsintervall." + +#: utils/misc/guc.c:6032 +#, c-format +msgid "parameter \"%s\" requires a numeric value" +msgstr "parameter \"%s\" kräver ett numeriskt värde" + +#: utils/misc/guc.c:6041 +#, c-format +msgid "%g is outside the valid range for parameter \"%s\" (%g .. %g)" +msgstr "%g är utanför giltigt intervall för parameter \"%s\" (%g .. %g)" + +#: utils/misc/guc.c:6194 utils/misc/guc.c:7564 +#, c-format +msgid "cannot set parameters during a parallel operation" +msgstr "kan inte sätta parametrar under en parallell operation" + +#: utils/misc/guc.c:6201 utils/misc/guc.c:6953 utils/misc/guc.c:7006 +#: utils/misc/guc.c:7057 utils/misc/guc.c:7393 utils/misc/guc.c:8160 +#: utils/misc/guc.c:8328 utils/misc/guc.c:10003 +#, c-format +msgid "unrecognized configuration parameter \"%s\"" +msgstr "okänd konfigurationsparameter \"%s\"" + +#: utils/misc/guc.c:6216 utils/misc/guc.c:7405 +#, c-format +msgid "parameter \"%s\" cannot be changed" +msgstr "parameter \"%s\" kan inte ändras" + +#: utils/misc/guc.c:6249 +#, c-format +msgid "parameter \"%s\" cannot be changed now" +msgstr "parameter \"%s\" kan inte ändras nu" + +#: utils/misc/guc.c:6267 utils/misc/guc.c:6314 utils/misc/guc.c:10019 +#, c-format +msgid "permission denied to set parameter \"%s\"" +msgstr "rättighet saknas för att sätta parameter \"%s\"" + +#: utils/misc/guc.c:6304 +#, c-format +msgid "parameter \"%s\" cannot be set after connection start" +msgstr "parameter \"%s\" kan inte ändras efter uppkopplingen startats" + +#: utils/misc/guc.c:6352 +#, c-format +msgid "cannot set parameter \"%s\" within security-definer function" +msgstr "kan inte sätta parameter \"%s\" inom en security-definer-funktion" + +#: utils/misc/guc.c:6961 utils/misc/guc.c:7011 utils/misc/guc.c:8335 +#, c-format +msgid "must be superuser or a member of pg_read_all_settings to examine \"%s\"" +msgstr "måste vara superanvändare eller medlem i pg_read_all_settings för att undersöka \"%s\"" + +#: utils/misc/guc.c:7102 +#, c-format +msgid "SET %s takes only one argument" +msgstr "SET %s tar bara ett argument" + +#: utils/misc/guc.c:7353 +#, c-format +msgid "must be superuser to execute ALTER SYSTEM command" +msgstr "måste vara superanvändare för att köra kommandot ALTER SYSTEM" + +#: utils/misc/guc.c:7438 +#, c-format +msgid "parameter value for ALTER SYSTEM must not contain a newline" +msgstr "parametervärde till ALTER SYSTEM kan inte innehålla nyradstecken" + +#: utils/misc/guc.c:7483 +#, c-format +msgid "could not parse contents of file \"%s\"" +msgstr "kunde inte parsa innehållet i fil \"%s\"" + +#: utils/misc/guc.c:7640 +#, c-format +msgid "SET LOCAL TRANSACTION SNAPSHOT is not implemented" +msgstr "SET LOCAL TRANSACTION SNAPSHOT är inte implementerat ännu" + +#: utils/misc/guc.c:7724 +#, c-format +msgid "SET requires parameter name" +msgstr "SET kräver ett parameternamn" + +#: utils/misc/guc.c:7857 +#, c-format +msgid "attempt to redefine parameter \"%s\"" +msgstr "försök att omdefiniera parameter \"%s\"" + +#: utils/misc/guc.c:9636 +#, c-format +msgid "parameter \"%s\" could not be set" +msgstr "parameter \"%s\" kunde inte sättas" + +#: utils/misc/guc.c:9723 +#, c-format +msgid "could not parse setting for parameter \"%s\"" +msgstr "kunde inte tolka inställningen för parameter \"%s\"" + +#: utils/misc/guc.c:10081 utils/misc/guc.c:10115 +#, c-format +msgid "invalid value for parameter \"%s\": %d" +msgstr "ogiltigt värde för parameter \"%s\": %d" + +#: utils/misc/guc.c:10149 +#, c-format +msgid "invalid value for parameter \"%s\": %g" +msgstr "ogiltigt värde för parameter \"%s\": %g" + +#: utils/misc/guc.c:10419 +#, c-format +msgid "\"temp_buffers\" cannot be changed after any temporary tables have been accessed in the session." +msgstr "\"temp_buffers\" kan inte ändras efter att man använt temporära tabeller i sessionen." + +#: utils/misc/guc.c:10431 +#, c-format +msgid "Bonjour is not supported by this build" +msgstr "Bonjour stöds inte av detta bygge" + +#: utils/misc/guc.c:10444 +#, c-format +msgid "SSL is not supported by this build" +msgstr "SSL stöds inte av detta bygge" + +#: utils/misc/guc.c:10456 +#, c-format +msgid "Cannot enable parameter when \"log_statement_stats\" is true." +msgstr "Kan inte slå på parameter när \"log_statement_stats\" är satt." + +#: utils/misc/guc.c:10468 +#, c-format +msgid "Cannot enable \"log_statement_stats\" when \"log_parser_stats\", \"log_planner_stats\", or \"log_executor_stats\" is true." +msgstr "Kan inte slå på \"log_statement_stats\" när \"log_parser_stats\", \"log_planner_stats\" eller \"log_executor_stats\" är satta." + +#: utils/misc/help_config.c:131 +#, c-format +msgid "internal error: unrecognized run-time parameter type\n" +msgstr "internt fel: okänd parametertyp\n" + +#: utils/misc/pg_config.c:60 +#, c-format +msgid "query-specified return tuple and function return type are not compatible" +msgstr "fråge-angiven typ för retur-tupel och funktions returtyp är inte kompatibla" + +#: utils/misc/pg_controldata.c:59 utils/misc/pg_controldata.c:137 +#: utils/misc/pg_controldata.c:241 utils/misc/pg_controldata.c:308 +#, c-format +msgid "calculated CRC checksum does not match value stored in file" +msgstr "beräknad CRC-checksumma matchar inte värdet som är lagrat i fil" + +#: utils/misc/pg_rusage.c:64 +#, c-format +msgid "CPU: user: %d.%02d s, system: %d.%02d s, elapsed: %d.%02d s" +msgstr "CPU: användare: %d.%02d s, system: %d.%02d s, förflutit: %d.%02d s" + +#: utils/misc/rls.c:127 +#, c-format +msgid "query would be affected by row-level security policy for table \"%s\"" +msgstr "frågan påverkas av radsäkerhetspolicyn för tabell \"%s\"" + +#: utils/misc/rls.c:129 +#, c-format +msgid "To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY." +msgstr "För att slå av policyn för tabellens ägare, använd ALTER TABLE NO FORCE ROW LEVEL SECURITY." + +#: utils/misc/timeout.c:388 +#, c-format +msgid "cannot add more timeout reasons" +msgstr "kan inte lägga till fler timeoutskäl" + +#: utils/misc/tzparser.c:61 +#, c-format +msgid "time zone abbreviation \"%s\" is too long (maximum %d characters) in time zone file \"%s\", line %d" +msgstr "tidszonförkortningen \"%s\" är för lång (max %d tecken) i tidszonfilen \"%s\", rad %d" + +#: utils/misc/tzparser.c:73 +#, c-format +msgid "time zone offset %d is out of range in time zone file \"%s\", line %d" +msgstr "tidszonoffset %d är otanför giltigt intervall i tidszonfilen \"%s\", rad %d" + +#: utils/misc/tzparser.c:112 +#, c-format +msgid "missing time zone abbreviation in time zone file \"%s\", line %d" +msgstr "tidszonförkortning saknas i tidszonfilen \"%s\", rad %d" + +#: utils/misc/tzparser.c:121 +#, c-format +msgid "missing time zone offset in time zone file \"%s\", line %d" +msgstr "tidszonoffset saknas i tidszonfilen \"%s\", rad %d" + +#: utils/misc/tzparser.c:133 +#, c-format +msgid "invalid number for time zone offset in time zone file \"%s\", line %d" +msgstr "felaktigt nummer för tidszonsoffset i tidszonfilen \"%s\", rad %d" + +#: utils/misc/tzparser.c:169 +#, c-format +msgid "invalid syntax in time zone file \"%s\", line %d" +msgstr "felaktig syntax i tidszonfilen \"%s\", rad %d" + +#: utils/misc/tzparser.c:237 +#, c-format +msgid "time zone abbreviation \"%s\" is multiply defined" +msgstr "tidszonförkortningen \"%s\" är definierad flera gånger" + +#: utils/misc/tzparser.c:239 +#, c-format +msgid "Entry in time zone file \"%s\", line %d, conflicts with entry in file \"%s\", line %d." +msgstr "Post i tidszonfilen \"%s\", rad %d, står i konflikt med post i filen \"%s\", rad %d." + +#: utils/misc/tzparser.c:301 +#, c-format +msgid "invalid time zone file name \"%s\"" +msgstr "ogiltigt tidszonfilnamn: \"%s\"" + +#: utils/misc/tzparser.c:314 +#, c-format +msgid "time zone file recursion limit exceeded in file \"%s\"" +msgstr "tidszonfilens rekursiva maxtak överskridet i filen \"%s\"" + +#: utils/misc/tzparser.c:353 utils/misc/tzparser.c:366 +#, c-format +msgid "could not read time zone file \"%s\": %m" +msgstr "kunde inte läsa tidszonfil \"%s\": %m" + +#: utils/misc/tzparser.c:376 +#, c-format +msgid "line is too long in time zone file \"%s\", line %d" +msgstr "raden är för lång i tidszonfil \"%s\", rad %d" + +#: utils/misc/tzparser.c:399 +#, c-format +msgid "@INCLUDE without file name in time zone file \"%s\", line %d" +msgstr "@INCLUDE utan filnamn i tidszonfil \"%s\", rad %d" + +#: utils/mmgr/aset.c:483 utils/mmgr/generation.c:250 utils/mmgr/slab.c:240 +#, c-format +msgid "Failed while creating memory context \"%s\"." +msgstr "Misslyckades vid skapande av minneskontext \"%s\"." + +#: utils/mmgr/dsa.c:518 utils/mmgr/dsa.c:1323 +#, c-format +msgid "could not attach to dynamic shared area" +msgstr "kunde inte ansluta till dynamisk delad area" + +#: utils/mmgr/mcxt.c:797 utils/mmgr/mcxt.c:833 utils/mmgr/mcxt.c:871 +#: utils/mmgr/mcxt.c:909 utils/mmgr/mcxt.c:945 utils/mmgr/mcxt.c:976 +#: utils/mmgr/mcxt.c:1012 utils/mmgr/mcxt.c:1064 utils/mmgr/mcxt.c:1099 +#: utils/mmgr/mcxt.c:1134 +#, c-format +msgid "Failed on request of size %zu in memory context \"%s\"." +msgstr "Misslyckades med förfrågan av storlek %zu i minneskontext \"%s\"." + +#: utils/mmgr/portalmem.c:187 +#, c-format +msgid "cursor \"%s\" already exists" +msgstr "markör \"%s\" finns redan" + +#: utils/mmgr/portalmem.c:191 +#, c-format +msgid "closing existing cursor \"%s\"" +msgstr "stänger existerande markör \"%s\"" + +#: utils/mmgr/portalmem.c:398 +#, c-format +msgid "portal \"%s\" cannot be run" +msgstr "portal \"%s\" kan inte köras" + +#: utils/mmgr/portalmem.c:476 +#, c-format +msgid "cannot drop pinned portal \"%s\"" +msgstr "kan inte ta bort fastsatt portal \"%s\"" + +#: utils/mmgr/portalmem.c:484 +#, c-format +msgid "cannot drop active portal \"%s\"" +msgstr "kan inte ta bort aktiv portal \"%s\"" + +#: utils/mmgr/portalmem.c:719 +#, c-format +msgid "cannot PREPARE a transaction that has created a cursor WITH HOLD" +msgstr "kan inte göra PREPARE på en transaktion som skapat en markör med WITH HOLD" + +#: utils/mmgr/portalmem.c:1253 +#, c-format +msgid "cannot perform transaction commands inside a cursor loop that is not read-only" +msgstr "kan inte utföra transaktionskommandon i en markörloop som inte är read-only" + +#: utils/sort/logtape.c:276 +#, c-format +msgid "could not read block %ld of temporary file: %m" +msgstr "kunde inte läsa block %ld av temporärfil: %m" + +#: utils/sort/logtape.c:439 +#, c-format +msgid "could not determine size of temporary file \"%s\"" +msgstr "kunde inte bestämma storlek på temporär fil \"%s\"" + +#: utils/sort/sharedtuplestore.c:208 +#, c-format +msgid "could not write to temporary file: %m" +msgstr "kunde inte skriva till temporär fil: %m" + +#: utils/sort/sharedtuplestore.c:437 utils/sort/sharedtuplestore.c:446 +#: utils/sort/sharedtuplestore.c:469 utils/sort/sharedtuplestore.c:486 +#: utils/sort/sharedtuplestore.c:503 utils/sort/sharedtuplestore.c:575 +#: utils/sort/sharedtuplestore.c:581 +#, c-format +msgid "could not read from shared tuplestore temporary file" +msgstr "kunde inte läsa från delad temporär lagringsfil för tupler" + +#: utils/sort/sharedtuplestore.c:492 +#, c-format +msgid "unexpected chunk in shared tuplestore temporary file" +msgstr "oväntad chunk i delad temporär lagringsfil för tupler" + +#: utils/sort/tuplesort.c:2967 +#, c-format +msgid "cannot have more than %d runs for an external sort" +msgstr "kan inte ha mer än %d körningar för en extern sortering" + +#: utils/sort/tuplesort.c:4051 +#, c-format +msgid "could not create unique index \"%s\"" +msgstr "kunde inte skapa unikt index \"%s\"" + +#: utils/sort/tuplesort.c:4053 +#, c-format +msgid "Key %s is duplicated." +msgstr "Nyckeln %s är duplicerad." + +#: utils/sort/tuplesort.c:4054 +#, c-format +msgid "Duplicate keys exist." +msgstr "Duplicerade nycklar existerar." + +#: utils/sort/tuplestore.c:518 utils/sort/tuplestore.c:528 +#: utils/sort/tuplestore.c:869 utils/sort/tuplestore.c:973 +#: utils/sort/tuplestore.c:1037 utils/sort/tuplestore.c:1054 +#: utils/sort/tuplestore.c:1256 utils/sort/tuplestore.c:1321 +#: utils/sort/tuplestore.c:1330 +#, c-format +msgid "could not seek in tuplestore temporary file: %m" +msgstr "kunde inte söka i temporär lagringsfil för tupler: %m" + +#: utils/sort/tuplestore.c:1477 utils/sort/tuplestore.c:1550 +#: utils/sort/tuplestore.c:1556 +#, c-format +msgid "could not read from tuplestore temporary file: %m" +msgstr "kunde inte läsa från temporär lagringsfil för tupler: %m" + +#: utils/sort/tuplestore.c:1518 utils/sort/tuplestore.c:1523 +#: utils/sort/tuplestore.c:1529 +#, c-format +msgid "could not write to tuplestore temporary file: %m" +msgstr "kunde inte skriva till temporär lagringsfil för tupler: %m" + +#: utils/time/snapmgr.c:622 +#, c-format +msgid "The source transaction is not running anymore." +msgstr "Källtransaktionen kör inte längre." + +#: utils/time/snapmgr.c:1200 +#, c-format +msgid "cannot export a snapshot from a subtransaction" +msgstr "kan inte exportera ett snapshot från en subtransaktion" + +#: utils/time/snapmgr.c:1359 utils/time/snapmgr.c:1364 +#: utils/time/snapmgr.c:1369 utils/time/snapmgr.c:1384 +#: utils/time/snapmgr.c:1389 utils/time/snapmgr.c:1394 +#: utils/time/snapmgr.c:1409 utils/time/snapmgr.c:1414 +#: utils/time/snapmgr.c:1419 utils/time/snapmgr.c:1519 +#: utils/time/snapmgr.c:1535 utils/time/snapmgr.c:1560 +#, c-format +msgid "invalid snapshot data in file \"%s\"" +msgstr "ogiltig snapshot-data i fil \"%s\"" + +#: utils/time/snapmgr.c:1456 +#, c-format +msgid "SET TRANSACTION SNAPSHOT must be called before any query" +msgstr "SET TRANSACTION SNAPSHOT måste anropas innan någon fråga" + +#: utils/time/snapmgr.c:1465 +#, c-format +msgid "a snapshot-importing transaction must have isolation level SERIALIZABLE or REPEATABLE READ" +msgstr "en snapshot-importerande transaktion måste ha isoleringsnivå SERIALIZABLE eller REPEATABLE READ" + +#: utils/time/snapmgr.c:1474 utils/time/snapmgr.c:1483 +#, c-format +msgid "invalid snapshot identifier: \"%s\"" +msgstr "ogiltig snapshot-identifierare: \"%s\"" + +#: utils/time/snapmgr.c:1573 +#, c-format +msgid "a serializable transaction cannot import a snapshot from a non-serializable transaction" +msgstr "en serialiserbar transaktion kan inte importera ett snapshot från en icke-serialiserbar transaktion" + +#: utils/time/snapmgr.c:1577 +#, c-format +msgid "a non-read-only serializable transaction cannot import a snapshot from a read-only transaction" +msgstr "en serialiserbar transaktion som inte är read-only kan inte importera en snapshot från en read-only-transaktion." + +#: utils/time/snapmgr.c:1592 +#, c-format +msgid "cannot import a snapshot from a different database" +msgstr "kan inte importera en snapshot från en annan databas" + +#~ msgid "\"%s\" is already an attribute of type %s" +#~ msgstr "\"%s\" är redan ett attribut med typ %s" diff --git a/src/backend/port/.gitignore b/src/backend/port/.gitignore index 9f4f1af5e9..4ef36b82c7 100644 --- a/src/backend/port/.gitignore +++ b/src/backend/port/.gitignore @@ -1,4 +1,3 @@ -/dynloader.c /pg_sema.c /pg_shmem.c /tas.s diff --git a/src/backend/port/Makefile b/src/backend/port/Makefile index aba1e92fe1..f4120bec55 100644 --- a/src/backend/port/Makefile +++ b/src/backend/port/Makefile @@ -21,7 +21,7 @@ subdir = src/backend/port top_builddir = ../../.. include $(top_builddir)/src/Makefile.global -OBJS = atomics.o dynloader.o pg_sema.o pg_shmem.o $(TAS) +OBJS = atomics.o pg_sema.o pg_shmem.o $(TAS) ifeq ($(PORTNAME), win32) SUBDIRS += win32 diff --git a/src/backend/port/atomics.c b/src/backend/port/atomics.c index c0c2b31270..caa84bf2b6 100644 --- a/src/backend/port/atomics.c +++ b/src/backend/port/atomics.c @@ -3,7 +3,7 @@ * atomics.c * Non-Inline parts of the atomics implementation * - * Portions Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2013-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -68,18 +68,35 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr) #else SpinLockInit((slock_t *) &ptr->sema); #endif + + ptr->value = false; } bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr) { - return TAS((slock_t *) &ptr->sema); + uint32 oldval; + + SpinLockAcquire((slock_t *) &ptr->sema); + oldval = ptr->value; + ptr->value = true; + SpinLockRelease((slock_t *) &ptr->sema); + + return oldval == 0; } void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr) { - S_UNLOCK((slock_t *) &ptr->sema); + SpinLockAcquire((slock_t *) &ptr->sema); + ptr->value = false; + SpinLockRelease((slock_t *) &ptr->sema); +} + +bool +pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr) +{ + return ptr->value == 0; } #endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */ diff --git a/src/backend/port/dynloader/aix.c b/src/backend/port/dynloader/aix.c deleted file mode 100644 index bf6ec257e7..0000000000 --- a/src/backend/port/dynloader/aix.c +++ /dev/null @@ -1,7 +0,0 @@ -/* - * src/backend/port/dynloader/aix.c - * - * Dummy file used for nothing at this point - * - * see aix.h - */ diff --git a/src/backend/port/dynloader/aix.h b/src/backend/port/dynloader/aix.h deleted file mode 100644 index 4b1bad6e45..0000000000 --- a/src/backend/port/dynloader/aix.h +++ /dev/null @@ -1,39 +0,0 @@ -/*------------------------------------------------------------------------- - * - * aix.h - * prototypes for AIX-specific routines - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/backend/port/dynloader/aix.h - * - *------------------------------------------------------------------------- - */ - -#ifndef PORT_PROTOS_H -#define PORT_PROTOS_H - -#include -#include "utils/dynamic_loader.h" /* pgrminclude ignore */ - -/* - * In some older systems, the RTLD_NOW flag isn't defined and the mode - * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted - * if available, but it doesn't exist everywhere. - * If it doesn't exist, set it to 0 so it has no effect. - */ -#ifndef RTLD_NOW -#define RTLD_NOW 1 -#endif -#ifndef RTLD_GLOBAL -#define RTLD_GLOBAL 0 -#endif - -#define pg_dlopen(f) dlopen((f), RTLD_NOW | RTLD_GLOBAL) -#define pg_dlsym(h, f) ((PGFunction) dlsym(h, f)) -#define pg_dlclose(h) dlclose(h) -#define pg_dlerror() dlerror() - -#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/cygwin.c b/src/backend/port/dynloader/cygwin.c deleted file mode 100644 index 5c52bf6147..0000000000 --- a/src/backend/port/dynloader/cygwin.c +++ /dev/null @@ -1,3 +0,0 @@ -/* src/backend/port/dynloader/cygwin.c */ - -/* Dummy file used for nothing at this point; see cygwin.h */ diff --git a/src/backend/port/dynloader/cygwin.h b/src/backend/port/dynloader/cygwin.h deleted file mode 100644 index 5d819cfd7b..0000000000 --- a/src/backend/port/dynloader/cygwin.h +++ /dev/null @@ -1,36 +0,0 @@ -/*------------------------------------------------------------------------- - * - * Dynamic loader declarations for Cygwin - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/backend/port/dynloader/cygwin.h - * - *------------------------------------------------------------------------- - */ -#ifndef PORT_PROTOS_H -#define PORT_PROTOS_H - -#include -#include "utils/dynamic_loader.h" /* pgrminclude ignore */ - -/* - * In some older systems, the RTLD_NOW flag isn't defined and the mode - * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted - * if available, but it doesn't exist everywhere. - * If it doesn't exist, set it to 0 so it has no effect. - */ -#ifndef RTLD_NOW -#define RTLD_NOW 1 -#endif -#ifndef RTLD_GLOBAL -#define RTLD_GLOBAL 0 -#endif - -#define pg_dlopen(f) dlopen((f), RTLD_NOW | RTLD_GLOBAL) -#define pg_dlsym dlsym -#define pg_dlclose dlclose -#define pg_dlerror dlerror - -#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/darwin.c b/src/backend/port/dynloader/darwin.c deleted file mode 100644 index f8fdeaf122..0000000000 --- a/src/backend/port/dynloader/darwin.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Dynamic loading support for macOS (Darwin) - * - * If dlopen() is available (Darwin 10.3 and later), we just use it. - * Otherwise we emulate it with the older, now deprecated, NSLinkModule API. - * - * src/backend/port/dynloader/darwin.c - */ -#include "postgres.h" - -#ifdef HAVE_DLOPEN -#include -#else -#include -#endif - -#include "dynloader.h" - - -#ifdef HAVE_DLOPEN - -void * -pg_dlopen(char *filename) -{ - return dlopen(filename, RTLD_NOW | RTLD_GLOBAL); -} - -void -pg_dlclose(void *handle) -{ - dlclose(handle); -} - -PGFunction -pg_dlsym(void *handle, char *funcname) -{ - /* Do not prepend an underscore: see dlopen(3) */ - return dlsym(handle, funcname); -} - -char * -pg_dlerror(void) -{ - return dlerror(); -} -#else /* !HAVE_DLOPEN */ - -/* - * These routines were taken from the Apache source, but were made - * available with a PostgreSQL-compatible license. Kudos Wilfredo - * Sánchez . - */ - -static NSObjectFileImageReturnCode cofiff_result = NSObjectFileImageFailure; - -void * -pg_dlopen(char *filename) -{ - NSObjectFileImage image; - - cofiff_result = NSCreateObjectFileImageFromFile(filename, &image); - if (cofiff_result != NSObjectFileImageSuccess) - return NULL; - return NSLinkModule(image, filename, - NSLINKMODULE_OPTION_BINDNOW | - NSLINKMODULE_OPTION_RETURN_ON_ERROR); -} - -void -pg_dlclose(void *handle) -{ - NSUnLinkModule(handle, FALSE); -} - -PGFunction -pg_dlsym(void *handle, char *funcname) -{ - NSSymbol symbol; - char *symname = (char *) malloc(strlen(funcname) + 2); - - if (!symname) - return NULL; - - sprintf(symname, "_%s", funcname); - if (NSIsSymbolNameDefined(symname)) - { - symbol = NSLookupAndBindSymbol(symname); - - free(symname); - return (PGFunction) NSAddressOfSymbol(symbol); - } - else - { - free(symname); - return NULL; - } -} - -char * -pg_dlerror(void) -{ - NSLinkEditErrors c; - int errorNumber; - const char *fileName; - const char *errorString = NULL; - - switch (cofiff_result) - { - case NSObjectFileImageSuccess: - /* must have failed in NSLinkModule */ - NSLinkEditError(&c, &errorNumber, &fileName, &errorString); - if (errorString == NULL || *errorString == '\0') - errorString = "unknown link-edit failure"; - break; - case NSObjectFileImageFailure: - errorString = "failed to open object file"; - break; - case NSObjectFileImageInappropriateFile: - errorString = "inappropriate object file"; - break; - case NSObjectFileImageArch: - errorString = "object file is for wrong architecture"; - break; - case NSObjectFileImageFormat: - errorString = "object file has wrong format"; - break; - case NSObjectFileImageAccess: - errorString = "insufficient permissions for object file"; - break; - default: - errorString = "unknown failure to open object file"; - break; - } - - return (char *) errorString; -} - -#endif /* HAVE_DLOPEN */ diff --git a/src/backend/port/dynloader/darwin.h b/src/backend/port/dynloader/darwin.h deleted file mode 100644 index 44a3bd6b82..0000000000 --- a/src/backend/port/dynloader/darwin.h +++ /dev/null @@ -1,8 +0,0 @@ -/* src/backend/port/dynloader/darwin.h */ - -#include "fmgr.h" - -void *pg_dlopen(char *filename); -PGFunction pg_dlsym(void *handle, char *funcname); -void pg_dlclose(void *handle); -char *pg_dlerror(void); diff --git a/src/backend/port/dynloader/freebsd.c b/src/backend/port/dynloader/freebsd.c deleted file mode 100644 index 23547b06bb..0000000000 --- a/src/backend/port/dynloader/freebsd.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1990 The Regents of the University of California. - * All rights reserved. - * - * src/backend/port/dynloader/freebsd.c - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#if defined(LIBC_SCCS) && !defined(lint) -static char sccsid[] = "@(#)dl.c 5.4 (Berkeley) 2/23/91"; -#endif /* LIBC_SCCS and not lint */ - -#include "postgres.h" - -#include -#include -#include - -#include "dynloader.h" - -static char error_message[BUFSIZ]; - -char * -BSD44_derived_dlerror(void) -{ - static char ret[BUFSIZ]; - - strcpy(ret, error_message); - error_message[0] = 0; - return (ret[0] == 0) ? NULL : ret; -} - -void * -BSD44_derived_dlopen(const char *file, int num) -{ -#if !defined(HAVE_DLOPEN) - snprintf(error_message, sizeof(error_message), - "dlopen (%s) not supported", file); - return NULL; -#else - void *vp; - - if ((vp = dlopen((char *) file, num)) == NULL) - snprintf(error_message, sizeof(error_message), - "dlopen (%s) failed: %s", file, dlerror()); - return vp; -#endif -} - -void * -BSD44_derived_dlsym(void *handle, const char *name) -{ -#if !defined(HAVE_DLOPEN) - snprintf(error_message, sizeof(error_message), - "dlsym (%s) failed", name); - return NULL; -#else - void *vp; - -#ifndef __ELF__ - char buf[BUFSIZ]; - - if (*name != '_') - { - snprintf(buf, sizeof(buf), "_%s", name); - name = buf; - } -#endif /* !__ELF__ */ - if ((vp = dlsym(handle, (char *) name)) == NULL) - snprintf(error_message, sizeof(error_message), - "dlsym (%s) failed", name); - return vp; -#endif -} - -void -BSD44_derived_dlclose(void *handle) -{ -#if defined(HAVE_DLOPEN) - dlclose(handle); -#endif -} diff --git a/src/backend/port/dynloader/freebsd.h b/src/backend/port/dynloader/freebsd.h deleted file mode 100644 index 6faf07f962..0000000000 --- a/src/backend/port/dynloader/freebsd.h +++ /dev/null @@ -1,58 +0,0 @@ -/*------------------------------------------------------------------------- - * - * freebsd.h - * port-specific prototypes for FreeBSD - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/backend/port/dynloader/freebsd.h - * - *------------------------------------------------------------------------- - */ -#ifndef PORT_PROTOS_H -#define PORT_PROTOS_H - -#include -#include -#include - -#include "utils/dynamic_loader.h" /* pgrminclude ignore */ - -/* - * Dynamic Loader on NetBSD 1.0. - * - * this dynamic loader uses the system dynamic loading interface for shared - * libraries (ie. dlopen/dlsym/dlclose). The user must specify a shared - * library as the file to be dynamically loaded. - * - * agc - I know this is all a bit crufty, but it does work, is fairly - * portable, and works (the stipulation that the d.l. function must - * begin with an underscore is fairly tricky, and some versions of - * NetBSD (like 1.0, and 1.0A pre June 1995) have no dlerror.) - */ - -/* - * In some older systems, the RTLD_NOW flag isn't defined and the mode - * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted - * if available, but it doesn't exist everywhere. - * If it doesn't exist, set it to 0 so it has no effect. - */ -#ifndef RTLD_NOW -#define RTLD_NOW 1 -#endif -#ifndef RTLD_GLOBAL -#define RTLD_GLOBAL 0 -#endif - -#define pg_dlopen(f) BSD44_derived_dlopen((f), RTLD_NOW | RTLD_GLOBAL) -#define pg_dlsym BSD44_derived_dlsym -#define pg_dlclose BSD44_derived_dlclose -#define pg_dlerror BSD44_derived_dlerror - -char *BSD44_derived_dlerror(void); -void *BSD44_derived_dlopen(const char *filename, int num); -void *BSD44_derived_dlsym(void *handle, const char *name); -void BSD44_derived_dlclose(void *handle); - -#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/hpux.c b/src/backend/port/dynloader/hpux.c deleted file mode 100644 index 5a0e40146d..0000000000 --- a/src/backend/port/dynloader/hpux.c +++ /dev/null @@ -1,68 +0,0 @@ -/*------------------------------------------------------------------------- - * - * dynloader.c - * dynamic loader for HP-UX using the shared library mechanism - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * src/backend/port/dynloader/hpux.c - * - * NOTES - * all functions are defined here -- it's impossible to trace the - * shl_* routines from the bundled HP-UX debugger. - * - *------------------------------------------------------------------------- - */ -#include "postgres.h" - -/* System includes */ -#include -#include - -#include "dynloader.h" -#include "utils/dynamic_loader.h" - -void * -pg_dlopen(char *filename) -{ - /* - * Use BIND_IMMEDIATE so that undefined symbols cause a failure return - * from shl_load(), rather than an abort() later on when we attempt to - * call the library! - */ - shl_t handle = shl_load(filename, - BIND_IMMEDIATE | BIND_VERBOSE | DYNAMIC_PATH, - 0L); - - return (void *) handle; -} - -PGFunction -pg_dlsym(void *handle, char *funcname) -{ - PGFunction f; - - if (shl_findsym((shl_t *) & handle, funcname, TYPE_PROCEDURE, &f) == -1) - f = (PGFunction) NULL; - return f; -} - -void -pg_dlclose(void *handle) -{ - shl_unload((shl_t) handle); -} - -char * -pg_dlerror(void) -{ - static char errmsg[] = "shl_load failed"; - - if (errno) - return strerror(errno); - - return errmsg; -} diff --git a/src/backend/port/dynloader/hpux.h b/src/backend/port/dynloader/hpux.h deleted file mode 100644 index 0a17454f2b..0000000000 --- a/src/backend/port/dynloader/hpux.h +++ /dev/null @@ -1,25 +0,0 @@ -/*------------------------------------------------------------------------- - * - * dynloader.h - * dynamic loader for HP-UX using the shared library mechanism - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * src/backend/port/dynloader/hpux.h - * - * NOTES - * all functions are defined here -- it's impossible to trace the - * shl_* routines from the bundled HP-UX debugger. - * - *------------------------------------------------------------------------- - */ -/* System includes */ -#include "fmgr.h" - -extern void *pg_dlopen(char *filename); -extern PGFunction pg_dlsym(void *handle, char *funcname); -extern void pg_dlclose(void *handle); -extern char *pg_dlerror(void); diff --git a/src/backend/port/dynloader/linux.c b/src/backend/port/dynloader/linux.c deleted file mode 100644 index 38e19f7484..0000000000 --- a/src/backend/port/dynloader/linux.c +++ /dev/null @@ -1,133 +0,0 @@ -/*------------------------------------------------------------------------- - * - * linux.c - * Dynamic Loader for Postgres for Linux, generated from those for - * Ultrix. - * - * You need to install the dld library on your Linux system! - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * src/backend/port/dynloader/linux.c - * - *------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#ifdef HAVE_DLD_H -#include -#endif - -#include "dynloader.h" -#include "miscadmin.h" - - -#ifndef HAVE_DLOPEN - -void * -pg_dlopen(char *filename) -{ -#ifndef HAVE_DLD_H - elog(ERROR, "dynamic load not supported"); - return NULL; -#else - static int dl_initialized = 0; - - /* - * initializes the dynamic loader with the executable's pathname. (only - * needs to do this the first time pg_dlopen is called.) - */ - if (!dl_initialized) - { - if (dld_init(dld_find_executable(my_exec_path))) - return NULL; - - /* - * if there are undefined symbols, we want dl to search from the - * following libraries also. - */ - dl_initialized = 1; - } - - /* - * link the file, then check for undefined symbols! - */ - if (dld_link(filename)) - return NULL; - - /* - * If undefined symbols: try to link with the C and math libraries! This - * could be smarter, if the dynamic linker was able to handle shared libs! - */ - if (dld_undefined_sym_count > 0) - { - if (dld_link("/usr/lib/libc.a")) - { - elog(WARNING, "could not link C library"); - return NULL; - } - if (dld_undefined_sym_count > 0) - { - if (dld_link("/usr/lib/libm.a")) - { - elog(WARNING, "could not link math library"); - return NULL; - } - if (dld_undefined_sym_count > 0) - { - int count = dld_undefined_sym_count; - char **list = dld_list_undefined_sym(); - - /* list the undefined symbols, if any */ - do - { - elog(WARNING, "\"%s\" is undefined", *list); - list++; - count--; - } while (count > 0); - - dld_unlink_by_file(filename, 1); - return NULL; - } - } - } - - return (void *) strdup(filename); -#endif -} - -PGFunction -pg_dlsym(void *handle, char *funcname) -{ -#ifndef HAVE_DLD_H - return NULL; -#else - return (PGFunction) dld_get_func((funcname)); -#endif -} - -void -pg_dlclose(void *handle) -{ -#ifndef HAVE_DLD_H -#else - dld_unlink_by_file(handle, 1); - free(handle); -#endif -} - -char * -pg_dlerror(void) -{ -#ifndef HAVE_DLD_H - return "dynaloader unsupported"; -#else - return dld_strerror(dld_errno); -#endif -} - -#endif /* !HAVE_DLOPEN */ diff --git a/src/backend/port/dynloader/linux.h b/src/backend/port/dynloader/linux.h deleted file mode 100644 index d2c25df033..0000000000 --- a/src/backend/port/dynloader/linux.h +++ /dev/null @@ -1,44 +0,0 @@ -/*------------------------------------------------------------------------- - * - * linux.h - * Port-specific prototypes for Linux - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/backend/port/dynloader/linux.h - * - *------------------------------------------------------------------------- - */ -#ifndef PORT_PROTOS_H -#define PORT_PROTOS_H - -#include "utils/dynamic_loader.h" /* pgrminclude ignore */ -#ifdef HAVE_DLOPEN -#include -#endif - - -#ifdef HAVE_DLOPEN - -/* - * In some older systems, the RTLD_NOW flag isn't defined and the mode - * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted - * if available, but it doesn't exist everywhere. - * If it doesn't exist, set it to 0 so it has no effect. - */ -#ifndef RTLD_NOW -#define RTLD_NOW 1 -#endif -#ifndef RTLD_GLOBAL -#define RTLD_GLOBAL 0 -#endif - -#define pg_dlopen(f) dlopen((f), RTLD_NOW | RTLD_GLOBAL) -#define pg_dlsym dlsym -#define pg_dlclose dlclose -#define pg_dlerror dlerror -#endif /* HAVE_DLOPEN */ - -#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/netbsd.c b/src/backend/port/dynloader/netbsd.c deleted file mode 100644 index 475d746514..0000000000 --- a/src/backend/port/dynloader/netbsd.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1990 The Regents of the University of California. - * All rights reserved. - * - * src/backend/port/dynloader/netbsd.c - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#if defined(LIBC_SCCS) && !defined(lint) -static char sccsid[] = "@(#)dl.c 5.4 (Berkeley) 2/23/91"; -#endif /* LIBC_SCCS and not lint */ - -#include "postgres.h" - -#include -#include -#include - -#include "dynloader.h" - -static char error_message[BUFSIZ]; - -char * -BSD44_derived_dlerror(void) -{ - static char ret[BUFSIZ]; - - strcpy(ret, error_message); - error_message[0] = 0; - return (ret[0] == 0) ? NULL : ret; -} - -void * -BSD44_derived_dlopen(const char *file, int num) -{ -#if !defined(HAVE_DLOPEN) - snprintf(error_message, sizeof(error_message), - "dlopen (%s) not supported", file); - return NULL; -#else - void *vp; - - if ((vp = dlopen((char *) file, num)) == NULL) - snprintf(error_message, sizeof(error_message), - "dlopen (%s) failed: %s", file, dlerror()); - return vp; -#endif -} - -void * -BSD44_derived_dlsym(void *handle, const char *name) -{ -#if !defined(HAVE_DLOPEN) - snprintf(error_message, sizeof(error_message), - "dlsym (%s) failed", name); - return NULL; -#else - void *vp; - -#ifndef __ELF__ - char buf[BUFSIZ]; - - if (*name != '_') - { - snprintf(buf, sizeof(buf), "_%s", name); - name = buf; - } -#endif /* !__ELF__ */ - if ((vp = dlsym(handle, (char *) name)) == NULL) - snprintf(error_message, sizeof(error_message), - "dlsym (%s) failed", name); - return vp; -#endif -} - -void -BSD44_derived_dlclose(void *handle) -{ -#if defined(HAVE_DLOPEN) - dlclose(handle); -#endif -} diff --git a/src/backend/port/dynloader/netbsd.h b/src/backend/port/dynloader/netbsd.h deleted file mode 100644 index 2ca332256b..0000000000 --- a/src/backend/port/dynloader/netbsd.h +++ /dev/null @@ -1,59 +0,0 @@ -/*------------------------------------------------------------------------- - * - * netbsd.h - * port-specific prototypes for NetBSD - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/backend/port/dynloader/netbsd.h - * - *------------------------------------------------------------------------- - */ -#ifndef PORT_PROTOS_H -#define PORT_PROTOS_H - -#include -#include -#include - -#include "utils/dynamic_loader.h" /* pgrminclude ignore */ - -/* - * Dynamic Loader on NetBSD 1.0. - * - * this dynamic loader uses the system dynamic loading interface for shared - * libraries (ie. dlopen/dlsym/dlclose). The user must specify a shared - * library as the file to be dynamically loaded. - * - * agc - I know this is all a bit crufty, but it does work, is fairly - * portable, and works (the stipulation that the d.l. function must - * begin with an underscore is fairly tricky, and some versions of - * NetBSD (like 1.0, and 1.0A pre June 1995) have no dlerror.) - */ - -/* - * In some older systems, the RTLD_NOW flag isn't defined and the mode - * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted - * if available, but it doesn't exist everywhere. - * If it doesn't exist, set it to 0 so it has no effect. - */ -#ifndef RTLD_NOW -#define RTLD_NOW 1 -#endif -#ifndef RTLD_GLOBAL -#define RTLD_GLOBAL 0 -#endif - -#define pg_dlopen(f) BSD44_derived_dlopen((f), RTLD_NOW | RTLD_GLOBAL) -#define pg_dlsym BSD44_derived_dlsym -#define pg_dlclose BSD44_derived_dlclose -#define pg_dlerror BSD44_derived_dlerror - -char *BSD44_derived_dlerror(void); -void *BSD44_derived_dlopen(const char *filename, int num); -void *BSD44_derived_dlsym(void *handle, const char *name); -void BSD44_derived_dlclose(void *handle); - -#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/openbsd.c b/src/backend/port/dynloader/openbsd.c deleted file mode 100644 index 7b481b90d1..0000000000 --- a/src/backend/port/dynloader/openbsd.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1990 The Regents of the University of California. - * All rights reserved. - * - * src/backend/port/dynloader/openbsd.c - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#if defined(LIBC_SCCS) && !defined(lint) -static char sccsid[] = "@(#)dl.c 5.4 (Berkeley) 2/23/91"; -#endif /* LIBC_SCCS and not lint */ - -#include "postgres.h" - -#include -#include -#include - -#include "dynloader.h" - -static char error_message[BUFSIZ]; - -char * -BSD44_derived_dlerror(void) -{ - static char ret[BUFSIZ]; - - strcpy(ret, error_message); - error_message[0] = 0; - return (ret[0] == 0) ? NULL : ret; -} - -void * -BSD44_derived_dlopen(const char *file, int num) -{ -#if !defined(HAVE_DLOPEN) - snprintf(error_message, sizeof(error_message), - "dlopen (%s) not supported", file); - return NULL; -#else - void *vp; - - if ((vp = dlopen((char *) file, num)) == NULL) - snprintf(error_message, sizeof(error_message), - "dlopen (%s) failed: %s", file, dlerror()); - return vp; -#endif -} - -void * -BSD44_derived_dlsym(void *handle, const char *name) -{ -#if !defined(HAVE_DLOPEN) - snprintf(error_message, sizeof(error_message), - "dlsym (%s) failed", name); - return NULL; -#else - void *vp; - -#ifndef __ELF__ - char buf[BUFSIZ]; - - if (*name != '_') - { - snprintf(buf, sizeof(buf), "_%s", name); - name = buf; - } -#endif /* !__ELF__ */ - if ((vp = dlsym(handle, (char *) name)) == NULL) - snprintf(error_message, sizeof(error_message), - "dlsym (%s) failed", name); - return vp; -#endif -} - -void -BSD44_derived_dlclose(void *handle) -{ -#if defined(HAVE_DLOPEN) - dlclose(handle); -#endif -} diff --git a/src/backend/port/dynloader/openbsd.h b/src/backend/port/dynloader/openbsd.h deleted file mode 100644 index 1130f39b41..0000000000 --- a/src/backend/port/dynloader/openbsd.h +++ /dev/null @@ -1,58 +0,0 @@ -/*------------------------------------------------------------------------- - * - * openbsd.h - * port-specific prototypes for OpenBSD - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/backend/port/dynloader/openbsd.h - * - *------------------------------------------------------------------------- - */ -#ifndef PORT_PROTOS_H -#define PORT_PROTOS_H - -#include -#include -#include - -#include "utils/dynamic_loader.h" /* pgrminclude ignore */ - -/* - * Dynamic Loader on NetBSD 1.0. - * - * this dynamic loader uses the system dynamic loading interface for shared - * libraries (ie. dlopen/dlsym/dlclose). The user must specify a shared - * library as the file to be dynamically loaded. - * - * agc - I know this is all a bit crufty, but it does work, is fairly - * portable, and works (the stipulation that the d.l. function must - * begin with an underscore is fairly tricky, and some versions of - * NetBSD (like 1.0, and 1.0A pre June 1995) have no dlerror.) - */ - -/* - * In some older systems, the RTLD_NOW flag isn't defined and the mode - * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted - * if available, but it doesn't exist everywhere. - * If it doesn't exist, set it to 0 so it has no effect. - */ -#ifndef RTLD_NOW -#define RTLD_NOW 1 -#endif -#ifndef RTLD_GLOBAL -#define RTLD_GLOBAL 0 -#endif - -#define pg_dlopen(f) BSD44_derived_dlopen((f), RTLD_NOW | RTLD_GLOBAL) -#define pg_dlsym BSD44_derived_dlsym -#define pg_dlclose BSD44_derived_dlclose -#define pg_dlerror BSD44_derived_dlerror - -char *BSD44_derived_dlerror(void); -void *BSD44_derived_dlopen(const char *filename, int num); -void *BSD44_derived_dlsym(void *handle, const char *name); -void BSD44_derived_dlclose(void *handle); - -#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/solaris.c b/src/backend/port/dynloader/solaris.c deleted file mode 100644 index 19adcedc5e..0000000000 --- a/src/backend/port/dynloader/solaris.c +++ /dev/null @@ -1,7 +0,0 @@ -/* - * src/backend/port/dynloader/solaris.c - * - * Dummy file used for nothing at this point - * - * see solaris.h - */ diff --git a/src/backend/port/dynloader/solaris.h b/src/backend/port/dynloader/solaris.h deleted file mode 100644 index e7638ff0fc..0000000000 --- a/src/backend/port/dynloader/solaris.h +++ /dev/null @@ -1,38 +0,0 @@ -/*------------------------------------------------------------------------- - * - * solaris.h - * port-specific prototypes for Solaris - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/backend/port/dynloader/solaris.h - * - *------------------------------------------------------------------------- - */ -#ifndef PORT_PROTOS_H -#define PORT_PROTOS_H - -#include -#include "utils/dynamic_loader.h" /* pgrminclude ignore */ - -/* - * In some older systems, the RTLD_NOW flag isn't defined and the mode - * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted - * if available, but it doesn't exist everywhere. - * If it doesn't exist, set it to 0 so it has no effect. - */ -#ifndef RTLD_NOW -#define RTLD_NOW 1 -#endif -#ifndef RTLD_GLOBAL -#define RTLD_GLOBAL 0 -#endif - -#define pg_dlopen(f) dlopen((f), RTLD_NOW | RTLD_GLOBAL) -#define pg_dlsym dlsym -#define pg_dlclose dlclose -#define pg_dlerror dlerror - -#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/win32.c b/src/backend/port/dynloader/win32.c deleted file mode 100644 index c59823e367..0000000000 --- a/src/backend/port/dynloader/win32.c +++ /dev/null @@ -1,85 +0,0 @@ -/* src/backend/port/dynloader/win32.c */ - -#include "postgres.h" - -char *dlerror(void); -int dlclose(void *handle); -void *dlsym(void *handle, const char *symbol); -void *dlopen(const char *path, int mode); - -static char last_dyn_error[512]; - -static void -set_dl_error(void) -{ - DWORD err = GetLastError(); - - if (FormatMessage(FORMAT_MESSAGE_IGNORE_INSERTS | - FORMAT_MESSAGE_FROM_SYSTEM, - NULL, - err, - MAKELANGID(LANG_ENGLISH, SUBLANG_DEFAULT), - last_dyn_error, - sizeof(last_dyn_error) - 1, - NULL) == 0) - { - snprintf(last_dyn_error, sizeof(last_dyn_error) - 1, - "unknown error %lu", err); - } -} - -char * -dlerror(void) -{ - if (last_dyn_error[0]) - return last_dyn_error; - else - return NULL; -} - -int -dlclose(void *handle) -{ - if (!FreeLibrary((HMODULE) handle)) - { - set_dl_error(); - return 1; - } - last_dyn_error[0] = 0; - return 0; -} - -void * -dlsym(void *handle, const char *symbol) -{ - void *ptr; - - ptr = GetProcAddress((HMODULE) handle, symbol); - if (!ptr) - { - set_dl_error(); - return NULL; - } - last_dyn_error[0] = 0; - return ptr; -} - -void * -dlopen(const char *path, int mode) -{ - HMODULE h; - int prevmode; - - /* Disable popup error messages when loading DLLs */ - prevmode = SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOOPENFILEERRORBOX); - h = LoadLibrary(path); - SetErrorMode(prevmode); - - if (!h) - { - set_dl_error(); - return NULL; - } - last_dyn_error[0] = 0; - return (void *) h; -} diff --git a/src/backend/port/dynloader/win32.h b/src/backend/port/dynloader/win32.h deleted file mode 100644 index ddbf866520..0000000000 --- a/src/backend/port/dynloader/win32.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * src/backend/port/dynloader/win32.h - */ -#ifndef PORT_PROTOS_H -#define PORT_PROTOS_H - -#include "utils/dynamic_loader.h" /* pgrminclude ignore */ - -#define pg_dlopen(f) dlopen((f), 1) -#define pg_dlsym dlsym -#define pg_dlclose dlclose -#define pg_dlerror dlerror - -char *dlerror(void); -int dlclose(void *handle); -void *dlsym(void *handle, const char *symbol); -void *dlopen(const char *path, int mode); - -#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/posix_sema.c b/src/backend/port/posix_sema.c index 5719caf9b5..5174550794 100644 --- a/src/backend/port/posix_sema.c +++ b/src/backend/port/posix_sema.c @@ -15,7 +15,7 @@ * forked backends, but they could not be accessed by exec'd backends. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -41,13 +41,19 @@ #error cannot use named POSIX semaphores with EXEC_BACKEND #endif +typedef union SemTPadded +{ + sem_t pgsem; + char pad[PG_CACHE_LINE_SIZE]; +} SemTPadded; + /* typedef PGSemaphore is equivalent to pointer to sem_t */ typedef struct PGSemaphoreData { - sem_t pgsem; + SemTPadded sem_padded; } PGSemaphoreData; -#define PG_SEM_REF(x) (&(x)->pgsem) +#define PG_SEM_REF(x) (&(x)->sem_padded.pgsem) #define IPCProtection (0600) /* access/modify by user only */ diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c index d4202feb56..1c178a7317 100644 --- a/src/backend/port/sysv_sema.c +++ b/src/backend/port/sysv_sema.c @@ -4,7 +4,7 @@ * Implement PGSemaphores using SysV semaphore facilities * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c index e8cf6d3e93..741c455ccb 100644 --- a/src/backend/port/sysv_shmem.c +++ b/src/backend/port/sysv_shmem.c @@ -9,7 +9,7 @@ * exist, though, because mmap'd shmem provides no way to find out how * many processes are attached, which we need for interlocking purposes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/backend/port/tas/sunstudio_sparc.s b/src/backend/port/tas/sunstudio_sparc.s index 73ff315d1a..0db590ee46 100644 --- a/src/backend/port/tas/sunstudio_sparc.s +++ b/src/backend/port/tas/sunstudio_sparc.s @@ -3,7 +3,7 @@ ! sunstudio_sparc.s ! compare and swap for Sun Studio on Sparc ! -! Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +! Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group ! Portions Copyright (c) 1994, Regents of the University of California ! ! IDENTIFICATION @@ -26,7 +26,7 @@ pg_atomic_cas: ! "cas" only works on sparcv9 and sparcv8plus chips, and - ! requies a compiler targeting these CPUs. It will fail + ! requires a compiler targeting these CPUs. It will fail ! on a compiler targeting sparcv8, and of course will not ! be understood by a sparcv8 CPU. gcc continues to use ! "ldstub" because it targets sparcv7. diff --git a/src/backend/port/tas/sunstudio_x86.s b/src/backend/port/tas/sunstudio_x86.s index 31934b01d4..af2b8c6a17 100644 --- a/src/backend/port/tas/sunstudio_x86.s +++ b/src/backend/port/tas/sunstudio_x86.s @@ -3,7 +3,7 @@ / sunstudio_x86.s / compare and swap for Sun Studio on x86 / -/ Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +/ Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group / Portions Copyright (c) 1994, Regents of the University of California / / IDENTIFICATION diff --git a/src/backend/port/win32/crashdump.c b/src/backend/port/win32/crashdump.c index f06dfd1987..7b84d22679 100644 --- a/src/backend/port/win32/crashdump.c +++ b/src/backend/port/win32/crashdump.c @@ -28,7 +28,7 @@ * be added, though at the cost of a greater chance of the crash dump failing. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/port/win32/crashdump.c diff --git a/src/backend/port/win32/mingwcompat.c b/src/backend/port/win32/mingwcompat.c index e02b41711e..3577d2538f 100644 --- a/src/backend/port/win32/mingwcompat.c +++ b/src/backend/port/win32/mingwcompat.c @@ -3,7 +3,7 @@ * mingwcompat.c * MinGW compatibility functions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/port/win32/mingwcompat.c diff --git a/src/backend/port/win32/signal.c b/src/backend/port/win32/signal.c index 0fd993e3f3..f489cee8bd 100644 --- a/src/backend/port/win32/signal.c +++ b/src/backend/port/win32/signal.c @@ -3,7 +3,7 @@ * signal.c * Microsoft Windows Win32 Signal Emulation Functions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/port/win32/signal.c diff --git a/src/backend/port/win32/socket.c b/src/backend/port/win32/socket.c index ba8b863d82..af35cfbbb3 100644 --- a/src/backend/port/win32/socket.c +++ b/src/backend/port/win32/socket.c @@ -3,7 +3,7 @@ * socket.c * Microsoft Windows Win32 Socket Functions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/port/win32/socket.c @@ -690,39 +690,3 @@ pgwin32_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, c memcpy(writefds, &outwritefds, sizeof(fd_set)); return nummatches; } - - -/* - * Return win32 error string, since strerror can't - * handle winsock codes - */ -static char wserrbuf[256]; -const char * -pgwin32_socket_strerror(int err) -{ - static HANDLE handleDLL = INVALID_HANDLE_VALUE; - - if (handleDLL == INVALID_HANDLE_VALUE) - { - handleDLL = LoadLibraryEx("netmsg.dll", NULL, DONT_RESOLVE_DLL_REFERENCES | LOAD_LIBRARY_AS_DATAFILE); - if (handleDLL == NULL) - ereport(FATAL, - (errmsg_internal("could not load netmsg.dll: error code %lu", GetLastError()))); - } - - ZeroMemory(&wserrbuf, sizeof(wserrbuf)); - if (FormatMessage(FORMAT_MESSAGE_IGNORE_INSERTS | - FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_FROM_HMODULE, - handleDLL, - err, - MAKELANGID(LANG_ENGLISH, SUBLANG_DEFAULT), - wserrbuf, - sizeof(wserrbuf) - 1, - NULL) == 0) - { - /* Failed to get id */ - sprintf(wserrbuf, "unrecognized winsock error %d", err); - } - return wserrbuf; -} diff --git a/src/backend/port/win32/timer.c b/src/backend/port/win32/timer.c index f0a45f4339..dac105acf5 100644 --- a/src/backend/port/win32/timer.c +++ b/src/backend/port/win32/timer.c @@ -8,7 +8,7 @@ * - Does not support interval timer (value->it_interval) * - Only supports ITIMER_REAL * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/port/win32/timer.c diff --git a/src/backend/port/win32_sema.c b/src/backend/port/win32_sema.c index a798510bbc..a924c59cdb 100644 --- a/src/backend/port/win32_sema.c +++ b/src/backend/port/win32_sema.c @@ -3,7 +3,7 @@ * win32_sema.c * Microsoft Windows Win32 Semaphores Emulation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/port/win32_sema.c diff --git a/src/backend/port/win32_shmem.c b/src/backend/port/win32_shmem.c index 01f51f3158..f8ca52e1af 100644 --- a/src/backend/port/win32_shmem.c +++ b/src/backend/port/win32_shmem.c @@ -3,7 +3,7 @@ * win32_shmem.c * Implement shared memory using win32 facilities * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/port/win32_shmem.c @@ -21,6 +21,7 @@ HANDLE UsedShmemSegID = INVALID_HANDLE_VALUE; void *UsedShmemSegAddr = NULL; static Size UsedShmemSegSize = 0; +static bool EnableLockPagesPrivilege(int elevel); static void pgwin32_SharedMemoryDelete(int status, Datum shmId); /* @@ -103,6 +104,66 @@ PGSharedMemoryIsInUse(unsigned long id1, unsigned long id2) return true; } +/* + * EnableLockPagesPrivilege + * + * Try to acquire SeLockMemoryPrivilege so we can use large pages. + */ +static bool +EnableLockPagesPrivilege(int elevel) +{ + HANDLE hToken; + TOKEN_PRIVILEGES tp; + LUID luid; + + if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hToken)) + { + ereport(elevel, + (errmsg("could not enable Lock Pages in Memory user right: error code %lu", GetLastError()), + errdetail("Failed system call was %s.", "OpenProcessToken"))); + return FALSE; + } + + if (!LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &luid)) + { + ereport(elevel, + (errmsg("could not enable Lock Pages in Memory user right: error code %lu", GetLastError()), + errdetail("Failed system call was %s.", "LookupPrivilegeValue"))); + CloseHandle(hToken); + return FALSE; + } + tp.PrivilegeCount = 1; + tp.Privileges[0].Luid = luid; + tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; + + if (!AdjustTokenPrivileges(hToken, FALSE, &tp, 0, NULL, NULL)) + { + ereport(elevel, + (errmsg("could not enable Lock Pages in Memory user right: error code %lu", GetLastError()), + errdetail("Failed system call was %s.", "AdjustTokenPrivileges"))); + CloseHandle(hToken); + return FALSE; + } + + if (GetLastError() != ERROR_SUCCESS) + { + if (GetLastError() == ERROR_NOT_ALL_ASSIGNED) + ereport(elevel, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("could not enable Lock Pages in Memory user right"), + errhint("Assign Lock Pages in Memory user right to the Windows user account which runs PostgreSQL."))); + else + ereport(elevel, + (errmsg("could not enable Lock Pages in Memory user right: error code %lu", GetLastError()), + errdetail("Failed system call was %s.", "AdjustTokenPrivileges"))); + CloseHandle(hToken); + return FALSE; + } + + CloseHandle(hToken); + + return TRUE; +} /* * PGSharedMemoryCreate @@ -127,11 +188,9 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port, int i; DWORD size_high; DWORD size_low; - - if (huge_pages == HUGE_PAGES_ON) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("huge pages not supported on this platform"))); + SIZE_T largePageSize = 0; + Size orig_size = size; + DWORD flProtect = PAGE_READWRITE; /* Room for a header? */ Assert(size > MAXALIGN(sizeof(PGShmemHeader))); @@ -140,6 +199,35 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port, UsedShmemSegAddr = NULL; + if (huge_pages == HUGE_PAGES_ON || huge_pages == HUGE_PAGES_TRY) + { + /* Does the processor support large pages? */ + largePageSize = GetLargePageMinimum(); + if (largePageSize == 0) + { + ereport(huge_pages == HUGE_PAGES_ON ? FATAL : DEBUG1, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("the processor does not support large pages"))); + ereport(DEBUG1, + (errmsg("disabling huge pages"))); + } + else if (!EnableLockPagesPrivilege(huge_pages == HUGE_PAGES_ON ? FATAL : DEBUG1)) + { + ereport(DEBUG1, + (errmsg("disabling huge pages"))); + } + else + { + /* Huge pages available and privilege enabled, so turn on */ + flProtect = PAGE_READWRITE | SEC_COMMIT | SEC_LARGE_PAGES; + + /* Round size up as appropriate. */ + if (size % largePageSize != 0) + size += largePageSize - (size % largePageSize); + } + } + +retry: #ifdef _WIN64 size_high = size >> 32; #else @@ -163,16 +251,35 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port, hmap = CreateFileMapping(INVALID_HANDLE_VALUE, /* Use the pagefile */ NULL, /* Default security attrs */ - PAGE_READWRITE, /* Memory is Read/Write */ + flProtect, size_high, /* Size Upper 32 Bits */ size_low, /* Size Lower 32 bits */ szShareMem); if (!hmap) - ereport(FATAL, - (errmsg("could not create shared memory segment: error code %lu", GetLastError()), - errdetail("Failed system call was CreateFileMapping(size=%zu, name=%s).", - size, szShareMem))); + { + if (GetLastError() == ERROR_NO_SYSTEM_RESOURCES && + huge_pages == HUGE_PAGES_TRY && + (flProtect & SEC_LARGE_PAGES) != 0) + { + elog(DEBUG1, "CreateFileMapping(%zu) with SEC_LARGE_PAGES failed, " + "huge pages disabled", + size); + + /* + * Use the original size, not the rounded-up value, when + * falling back to non-huge pages. + */ + size = orig_size; + flProtect = PAGE_READWRITE; + goto retry; + } + else + ereport(FATAL, + (errmsg("could not create shared memory segment: error code %lu", GetLastError()), + errdetail("Failed system call was CreateFileMapping(size=%zu, name=%s).", + size, szShareMem))); + } /* * If the segment already existed, CreateFileMapping() will return a diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 776b1c0a9d..978089575b 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -50,7 +50,7 @@ * there is a window (caused by pgstat delay) on which a worker may choose a * table that was already vacuumed; this is a bug in the current design. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -79,6 +79,7 @@ #include "lib/ilist.h" #include "libpq/pqsignal.h" #include "miscadmin.h" +#include "nodes/makefuncs.h" #include "pgstat.h" #include "postmaster/autovacuum.h" #include "postmaster/fork_process.h" @@ -93,7 +94,6 @@ #include "storage/sinvaladt.h" #include "storage/smgr.h" #include "tcop/tcopprot.h" -#include "utils/dsa.h" #include "utils/fmgroids.h" #include "utils/fmgrprotos.h" #include "utils/lsyscache.h" @@ -212,9 +212,9 @@ typedef struct autovac_table * wi_launchtime Time at which this worker was launched * wi_cost_* Vacuum cost-based delay parameters current in this worker * - * All fields are protected by AutovacuumLock, except for wi_tableoid which is - * protected by AutovacuumScheduleLock (which is read-only for everyone except - * that worker itself). + * All fields are protected by AutovacuumLock, except for wi_tableoid and + * wi_sharedrel which are protected by AutovacuumScheduleLock (note these + * two fields are read-only for everyone except that worker itself). *------------- */ typedef struct WorkerInfoData @@ -436,7 +436,7 @@ AutoVacLauncherMain(int argc, char *argv[]) am_autovacuum_launcher = true; /* Identify myself via ps */ - init_ps_display("autovacuum launcher process", "", "", ""); + init_ps_display(pgstat_get_backend_desc(B_AUTOVAC_LAUNCHER), "", "", ""); ereport(DEBUG1, (errmsg("autovacuum launcher started"))); @@ -477,7 +477,7 @@ AutoVacLauncherMain(int argc, char *argv[]) InitProcess(); #endif - InitPostgres(NULL, InvalidOid, NULL, InvalidOid, NULL); + InitPostgres(NULL, InvalidOid, NULL, InvalidOid, NULL, false); SetProcessingMode(NormalProcessing); @@ -522,16 +522,12 @@ AutoVacLauncherMain(int argc, char *argv[]) pgstat_report_wait_end(); AbortBufferIO(); UnlockBuffers(); - if (CurrentResourceOwner) - { - ResourceOwnerRelease(CurrentResourceOwner, - RESOURCE_RELEASE_BEFORE_LOCKS, - false, true); - /* we needn't bother with the other ResourceOwnerRelease phases */ - } + /* this is probably dead code, but let's be safe: */ + if (AuxProcessResourceOwner) + ReleaseAuxProcessResources(false); AtEOXact_Buffers(false); AtEOXact_SMgr(); - AtEOXact_Files(); + AtEOXact_Files(false); AtEOXact_HashTables(false); /* @@ -574,6 +570,12 @@ AutoVacLauncherMain(int argc, char *argv[]) /* must unblock signals before calling rebuild_database_list */ PG_SETMASK(&UnBlockSig); + /* + * Set always-secure search path. Launcher doesn't connect to a database, + * so this has no effect. + */ + SetConfigOption("search_path", "", PGC_SUSET, PGC_S_OVERRIDE); + /* * Force zero_damaged_pages OFF in the autovac process, even if it is set * in postgresql.conf. We don't really want such a dangerous option being @@ -1519,7 +1521,7 @@ AutoVacWorkerMain(int argc, char *argv[]) am_autovacuum_worker = true; /* Identify myself via ps */ - init_ps_display("autovacuum worker process", "", "", ""); + init_ps_display(pgstat_get_backend_desc(B_AUTOVAC_WORKER), "", "", ""); SetProcessingMode(InitProcessing); @@ -1584,6 +1586,14 @@ AutoVacWorkerMain(int argc, char *argv[]) PG_SETMASK(&UnBlockSig); + /* + * Set always-secure search path, so malicious users can't redirect user + * code (e.g. pg_index.indexprs). (That code runs in a + * SECURITY_RESTRICTED_OPERATION sandbox, so malicious users could not + * take control of the entire autovacuum worker in any case.) + */ + SetConfigOption("search_path", "", PGC_SUSET, PGC_S_OVERRIDE); + /* * Force zero_damaged_pages OFF in the autovac process, even if it is set * in postgresql.conf. We don't really want such a dangerous option being @@ -1679,7 +1689,7 @@ AutoVacWorkerMain(int argc, char *argv[]) * Note: if we have selected a just-deleted database (due to using * stale stats info), we'll fail and exit here. */ - InitPostgres(NULL, dbid, NULL, InvalidOid, dbname); + InitPostgres(NULL, dbid, NULL, InvalidOid, dbname, false); SetProcessingMode(NormalProcessing); set_ps_display(dbname, false); ereport(DEBUG1, @@ -2070,14 +2080,11 @@ do_autovacuum(void) */ if (classForm->relpersistence == RELPERSISTENCE_TEMP) { - int backendID; - - backendID = GetTempNamespaceBackendId(classForm->relnamespace); - - /* We just ignore it if the owning backend is still active */ - if (backendID != InvalidBackendId && - (backendID == MyBackendId || - BackendIdGetProc(backendID) == NULL)) + /* + * We just ignore it if the owning backend is still active and + * using the temporary schema. + */ + if (!isTempNamespaceInUse(classForm->relnamespace)) { /* * The table seems to be orphaned -- although it might be that @@ -2205,7 +2212,6 @@ do_autovacuum(void) { Oid relid = lfirst_oid(cell); Form_pg_class classForm; - int backendID; ObjectAddress object; /* @@ -2247,10 +2253,8 @@ do_autovacuum(void) UnlockRelationOid(relid, AccessExclusiveLock); continue; } - backendID = GetTempNamespaceBackendId(classForm->relnamespace); - if (!(backendID != InvalidBackendId && - (backendID == MyBackendId || - BackendIdGetProc(backendID) == NULL))) + + if (isTempNamespaceInUse(classForm->relnamespace)) { UnlockRelationOid(relid, AccessExclusiveLock); continue; @@ -2303,7 +2307,9 @@ do_autovacuum(void) foreach(cell, table_oids) { Oid relid = lfirst_oid(cell); + HeapTuple classTup; autovac_table *tab; + bool isshared; bool skipit; int stdVacuumCostDelay; int stdVacuumCostLimit; @@ -2328,9 +2334,23 @@ do_autovacuum(void) } /* - * hold schedule lock from here until we're sure that this table still - * needs vacuuming. We also need the AutovacuumLock to walk the - * worker array, but we'll let go of that one quickly. + * Find out whether the table is shared or not. (It's slightly + * annoying to fetch the syscache entry just for this, but in typical + * cases it adds little cost because table_recheck_autovac would + * refetch the entry anyway. We could buy that back by copying the + * tuple here and passing it to table_recheck_autovac, but that + * increases the odds of that function working with stale data.) + */ + classTup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); + if (!HeapTupleIsValid(classTup)) + continue; /* somebody deleted the rel, forget it */ + isshared = ((Form_pg_class) GETSTRUCT(classTup))->relisshared; + ReleaseSysCache(classTup); + + /* + * Hold schedule lock from here until we've claimed the table. We + * also need the AutovacuumLock to walk the worker array, but that one + * can just be a shared lock. */ LWLockAcquire(AutovacuumScheduleLock, LW_EXCLUSIVE); LWLockAcquire(AutovacuumLock, LW_SHARED); @@ -2366,6 +2386,16 @@ do_autovacuum(void) continue; } + /* + * Store the table's OID in shared memory before releasing the + * schedule lock, so that other workers don't try to vacuum it + * concurrently. (We claim it here so as not to hold + * AutovacuumScheduleLock while rechecking the stats.) + */ + MyWorkerInfo->wi_tableoid = relid; + MyWorkerInfo->wi_sharedrel = isshared; + LWLockRelease(AutovacuumScheduleLock); + /* * Check whether pgstat data still says we need to vacuum this table. * It could have changed if something else processed the table while @@ -2382,18 +2412,13 @@ do_autovacuum(void) if (tab == NULL) { /* someone else vacuumed the table, or it went away */ + LWLockAcquire(AutovacuumScheduleLock, LW_EXCLUSIVE); + MyWorkerInfo->wi_tableoid = InvalidOid; + MyWorkerInfo->wi_sharedrel = false; LWLockRelease(AutovacuumScheduleLock); continue; } - /* - * Ok, good to go. Store the table in shared memory before releasing - * the lock so that other workers don't vacuum it concurrently. - */ - MyWorkerInfo->wi_tableoid = relid; - MyWorkerInfo->wi_sharedrel = tab->at_sharedrel; - LWLockRelease(AutovacuumScheduleLock); - /* * Remember the prevailing values of the vacuum cost GUCs. We have to * restore these at the bottom of the loop, else we'll compute wrong @@ -2444,8 +2469,10 @@ do_autovacuum(void) */ PG_TRY(); { + /* Use PortalContext for any per-table allocations */ + MemoryContextSwitchTo(PortalContext); + /* have at it */ - MemoryContextSwitchTo(TopTransactionContext); autovacuum_do_vac_analyze(tab, bstrategy); /* @@ -2482,6 +2509,9 @@ do_autovacuum(void) } PG_END_TRY(); + /* Make sure we're back in AutovacMemCxt */ + MemoryContextSwitchTo(AutovacMemCxt); + did_vacuum = true; /* the PGXACT flags are reset at the next end of transaction */ @@ -2503,10 +2533,10 @@ do_autovacuum(void) * settings, so we don't want to give up our share of I/O for a very * short interval and thereby thrash the global balance. */ - LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE); + LWLockAcquire(AutovacuumScheduleLock, LW_EXCLUSIVE); MyWorkerInfo->wi_tableoid = InvalidOid; MyWorkerInfo->wi_sharedrel = false; - LWLockRelease(AutovacuumLock); + LWLockRelease(AutovacuumScheduleLock); /* restore vacuum cost GUCs for the next iteration */ VacuumCostDelay = stdVacuumCostDelay; @@ -2525,6 +2555,8 @@ do_autovacuum(void) continue; if (workitem->avw_active) continue; + if (workitem->avw_database != MyDatabaseId) + continue; /* claim this one, and release lock while performing it */ workitem->avw_active = true; @@ -2533,8 +2565,7 @@ do_autovacuum(void) perform_work_item(workitem); /* - * Check for config changes before acquiring lock for further - * jobs. + * Check for config changes before acquiring lock for further jobs. */ CHECK_FOR_INTERRUPTS(); if (got_SIGHUP) @@ -2601,10 +2632,9 @@ perform_work_item(AutoVacuumWorkItem *workitem) /* * Save the relation name for a possible error message, to avoid a catalog * lookup in case of an error. If any of these return NULL, then the - * relation has been dropped since last we checked; skip it. Note: they - * must live in a long-lived memory context because we call vacuum and - * analyze in different transactions. + * relation has been dropped since last we checked; skip it. */ + Assert(CurrentMemoryContext == AutovacMemCxt); cur_relname = get_rel_name(workitem->avw_relation); cur_nspname = get_namespace_name(get_rel_namespace(workitem->avw_relation)); @@ -2614,6 +2644,9 @@ perform_work_item(AutoVacuumWorkItem *workitem) autovac_report_workitem(workitem, cur_nspname, cur_datname); + /* clean up memory before each work item */ + MemoryContextResetAndDeleteChildren(PortalContext); + /* * We will abort the current work item if something errors out, and * continue with the next one; in particular, this happens if we are @@ -2622,9 +2655,10 @@ perform_work_item(AutoVacuumWorkItem *workitem) */ PG_TRY(); { - /* have at it */ - MemoryContextSwitchTo(TopTransactionContext); + /* Use PortalContext for any per-work-item allocations */ + MemoryContextSwitchTo(PortalContext); + /* have at it */ switch (workitem->avw_type) { case AVW_BRINSummarizeRange: @@ -2668,6 +2702,9 @@ perform_work_item(AutoVacuumWorkItem *workitem) } PG_END_TRY(); + /* Make sure we're back in AutovacMemCxt */ + MemoryContextSwitchTo(AutovacMemCxt); + /* We intentionally do not set did_vacuum here */ /* be tidy */ @@ -2857,7 +2894,7 @@ table_recheck_autovac(Oid relid, HTAB *table_toast_map, tab->at_vacoptions = VACOPT_SKIPTOAST | (dovacuum ? VACOPT_VACUUM : 0) | (doanalyze ? VACOPT_ANALYZE : 0) | - (!wraparound ? VACOPT_NOWAIT : 0); + (!wraparound ? VACOPT_SKIP_LOCKED : 0); tab->at_params.freeze_min_age = freeze_min_age; tab->at_params.freeze_table_age = freeze_table_age; tab->at_params.multixact_freeze_min_age = multixact_freeze_min_age; @@ -3069,20 +3106,19 @@ relation_needs_vacanalyze(Oid relid, static void autovacuum_do_vac_analyze(autovac_table *tab, BufferAccessStrategy bstrategy) { - RangeVar rangevar; - - /* Set up command parameters --- use local variables instead of palloc */ - MemSet(&rangevar, 0, sizeof(rangevar)); - - rangevar.schemaname = tab->at_nspname; - rangevar.relname = tab->at_relname; - rangevar.location = -1; + RangeVar *rangevar; + VacuumRelation *rel; + List *rel_list; /* Let pgstat know what we're doing */ autovac_report_activity(tab); - vacuum(tab->at_vacoptions, &rangevar, tab->at_relid, &tab->at_params, NIL, - bstrategy, true); + /* Set up one VacuumRelation target, identified by OID, for vacuum() */ + rangevar = makeRangeVar(tab->at_nspname, tab->at_relname, -1); + rel = makeVacuumRelation(rangevar, tab->at_relid, NIL); + rel_list = list_make1(rel); + + vacuum(tab->at_vacoptions, rel_list, &tab->at_params, bstrategy, true); } /* @@ -3181,12 +3217,14 @@ AutoVacuumingActive(void) /* * Request one work item to the next autovacuum run processing our database. + * Return false if the request can't be recorded. */ -void +bool AutoVacuumRequestWork(AutoVacuumWorkItemType type, Oid relationId, BlockNumber blkno) { int i; + bool result = false; LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE); @@ -3206,12 +3244,15 @@ AutoVacuumRequestWork(AutoVacuumWorkItemType type, Oid relationId, workitem->avw_database = MyDatabaseId; workitem->avw_relation = relationId; workitem->avw_blockNumber = blkno; + result = true; /* done */ break; } LWLockRelease(AutovacuumLock); + + return result; } /* diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c index 28af6f0f07..d2b695e146 100644 --- a/src/backend/postmaster/bgworker.c +++ b/src/backend/postmaster/bgworker.c @@ -2,7 +2,7 @@ * bgworker.c * POSTGRES pluggable background workers implementation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/postmaster/bgworker.c @@ -344,6 +344,8 @@ BackgroundWorkerStateChange(void) */ ascii_safe_strlcpy(rw->rw_worker.bgw_name, slot->worker.bgw_name, BGW_MAXLEN); + ascii_safe_strlcpy(rw->rw_worker.bgw_type, + slot->worker.bgw_type, BGW_MAXLEN); ascii_safe_strlcpy(rw->rw_worker.bgw_library_name, slot->worker.bgw_library_name, BGW_MAXLEN); ascii_safe_strlcpy(rw->rw_worker.bgw_function_name, @@ -630,34 +632,33 @@ SanityCheckBackgroundWorker(BackgroundWorker *worker, int elevel) return false; } + /* + * If bgw_type is not filled in, use bgw_name. + */ + if (strcmp(worker->bgw_type, "") == 0) + strcpy(worker->bgw_type, worker->bgw_name); + return true; } static void bgworker_quickdie(SIGNAL_ARGS) { - sigaddset(&BlockSig, SIGQUIT); /* prevent nested calls */ - PG_SETMASK(&BlockSig); - - /* - * We DO NOT want to run proc_exit() callbacks -- we're here because - * shared memory may be corrupted, so we don't want to try to clean up our - * transaction. Just nail the windows shut and get out of town. Now that - * there's an atexit callback to prevent third-party code from breaking - * things by calling exit() directly, we have to reset the callbacks - * explicitly to make this work as intended. - */ - on_exit_reset(); - /* - * Note we do exit(2) not exit(0). This is to force the postmaster into a - * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random + * We DO NOT want to run proc_exit() or atexit() callbacks -- we're here + * because shared memory may be corrupted, so we don't want to try to + * clean up our transaction. Just nail the windows shut and get out of + * town. The callbacks wouldn't be safe to run from a signal handler, + * anyway. + * + * Note we do _exit(2) not _exit(0). This is to force the postmaster into + * a system reset cycle if someone sends a manual SIGQUIT to a random * backend. This is necessary precisely because we don't clean up our * shared memory state. (The "dead man switch" mechanism in pmsignal.c * should ensure the postmaster sees this as a crash, too, but no harm in * being doubly sure.) */ - exit(2); + _exit(2); } /* @@ -671,7 +672,7 @@ bgworker_die(SIGNAL_ARGS) ereport(FATAL, (errcode(ERRCODE_ADMIN_SHUTDOWN), errmsg("terminating background worker \"%s\" due to administrator command", - MyBgworkerEntry->bgw_name))); + MyBgworkerEntry->bgw_type))); } /* @@ -700,7 +701,6 @@ void StartBackgroundWorker(void) { sigjmp_buf local_sigjmp_buf; - char buf[MAXPGPATH]; BackgroundWorker *worker = MyBgworkerEntry; bgworker_main_type entrypt; @@ -710,8 +710,7 @@ StartBackgroundWorker(void) IsBackgroundWorker = true; /* Identify myself via ps */ - snprintf(buf, MAXPGPATH, "bgworker: %s", worker->bgw_name); - init_ps_display(buf, "", "", ""); + init_ps_display(worker->bgw_name, "", "", ""); /* * If we're not supposed to have shared memory access, then detach from @@ -1028,14 +1027,18 @@ RegisterDynamicBackgroundWorker(BackgroundWorker *worker, * Get the PID of a dynamically-registered background worker. * * If the worker is determined to be running, the return value will be - * BGWH_STARTED and *pidp will get the PID of the worker process. - * Otherwise, the return value will be BGWH_NOT_YET_STARTED if the worker - * hasn't been started yet, and BGWH_STOPPED if the worker was previously - * running but is no longer. + * BGWH_STARTED and *pidp will get the PID of the worker process. If the + * postmaster has not yet attempted to start the worker, the return value will + * be BGWH_NOT_YET_STARTED. Otherwise, the return value is BGWH_STOPPED. * - * In the latter case, the worker may be stopped temporarily (if it is - * configured for automatic restart and exited non-zero) or gone for - * good (if it exited with code 0 or if it is configured not to restart). + * BGWH_STOPPED can indicate either that the worker is temporarily stopped + * (because it is configured for automatic restart and exited non-zero), + * or that the worker is permanently stopped (because it exited with exit + * code 0, or was not configured for automatic restart), or even that the + * worker was unregistered without ever starting (either because startup + * failed and the worker is not configured for automatic restart, or because + * TerminateBackgroundWorker was used before the worker was successfully + * started). */ BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp) @@ -1060,8 +1063,11 @@ GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp) * time, but we assume such changes are atomic. So the value we read * won't be garbage, but it might be out of date by the time the caller * examines it (but that's unavoidable anyway). + * + * The in_use flag could be in the process of changing from true to false, + * but if it is already false then it can't change further. */ - if (handle->generation != slot->generation) + if (handle->generation != slot->generation || !slot->in_use) pid = 0; else pid = slot->pid; @@ -1233,3 +1239,40 @@ LookupBackgroundWorkerFunction(const char *libraryname, const char *funcname) return (bgworker_main_type) load_external_function(libraryname, funcname, true, NULL); } + +/* + * Given a PID, get the bgw_type of the background worker. Returns NULL if + * not a valid background worker. + * + * The return value is in static memory belonging to this function, so it has + * to be used before calling this function again. This is so that the caller + * doesn't have to worry about the background worker locking protocol. + */ +const char * +GetBackgroundWorkerTypeByPid(pid_t pid) +{ + int slotno; + bool found = false; + static char result[BGW_MAXLEN]; + + LWLockAcquire(BackgroundWorkerLock, LW_SHARED); + + for (slotno = 0; slotno < BackgroundWorkerData->total_slots; slotno++) + { + BackgroundWorkerSlot *slot = &BackgroundWorkerData->slot[slotno]; + + if (slot->pid > 0 && slot->pid == pid) + { + strcpy(result, slot->worker.bgw_type); + found = true; + break; + } + } + + LWLockRelease(BackgroundWorkerLock); + + if (!found) + return NULL; + + return result; +} diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c index 9ad74ee977..b1e9bb2c53 100644 --- a/src/backend/postmaster/bgwriter.c +++ b/src/backend/postmaster/bgwriter.c @@ -24,7 +24,7 @@ * should be killed by SIGQUIT and then a recovery cycle started. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -141,12 +141,6 @@ BackgroundWriterMain(void) /* We allow SIGQUIT (quickdie) at all times */ sigdelset(&BlockSig, SIGQUIT); - /* - * Create a resource owner to keep track of our resources (currently only - * buffer pins). - */ - CurrentResourceOwner = ResourceOwnerCreate(NULL, "Background Writer"); - /* * We just started, assume there has been either a shutdown or * end-of-recovery snapshot. @@ -191,14 +185,10 @@ BackgroundWriterMain(void) ConditionVariableCancelSleep(); AbortBufferIO(); UnlockBuffers(); - /* buffer pins are released here: */ - ResourceOwnerRelease(CurrentResourceOwner, - RESOURCE_RELEASE_BEFORE_LOCKS, - false, true); - /* we needn't bother with the other ResourceOwnerRelease phases */ + ReleaseAuxProcessResources(false); AtEOXact_Buffers(false); AtEOXact_SMgr(); - AtEOXact_Files(); + AtEOXact_Files(false); AtEOXact_HashTables(false); /* @@ -409,27 +399,21 @@ BackgroundWriterMain(void) static void bg_quickdie(SIGNAL_ARGS) { - PG_SETMASK(&BlockSig); - /* - * We DO NOT want to run proc_exit() callbacks -- we're here because - * shared memory may be corrupted, so we don't want to try to clean up our - * transaction. Just nail the windows shut and get out of town. Now that - * there's an atexit callback to prevent third-party code from breaking - * things by calling exit() directly, we have to reset the callbacks - * explicitly to make this work as intended. - */ - on_exit_reset(); - - /* - * Note we do exit(2) not exit(0). This is to force the postmaster into a - * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random + * We DO NOT want to run proc_exit() or atexit() callbacks -- we're here + * because shared memory may be corrupted, so we don't want to try to + * clean up our transaction. Just nail the windows shut and get out of + * town. The callbacks wouldn't be safe to run from a signal handler, + * anyway. + * + * Note we do _exit(2) not _exit(0). This is to force the postmaster into + * a system reset cycle if someone sends a manual SIGQUIT to a random * backend. This is necessary precisely because we don't clean up our * shared memory state. (The "dead man switch" mechanism in pmsignal.c * should ensure the postmaster sees this as a crash, too, but no harm in * being doubly sure.) */ - exit(2); + _exit(2); } /* SIGHUP: set flag to re-read config file at next convenient time */ diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index e48ebd557f..1a033093c5 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -26,7 +26,7 @@ * restart needs to be forced.) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -231,12 +231,6 @@ CheckpointerMain(void) */ last_checkpoint_time = last_xlog_switch_time = (pg_time_t) time(NULL); - /* - * Create a resource owner to keep track of our resources (currently only - * buffer pins). - */ - CurrentResourceOwner = ResourceOwnerCreate(NULL, "Checkpointer"); - /* * Create a memory context that we will do all our work in. We do this so * that we can reset the context during error recovery and thereby avoid @@ -275,14 +269,10 @@ CheckpointerMain(void) pgstat_report_wait_end(); AbortBufferIO(); UnlockBuffers(); - /* buffer pins are released here: */ - ResourceOwnerRelease(CurrentResourceOwner, - RESOURCE_RELEASE_BEFORE_LOCKS, - false, true); - /* we needn't bother with the other ResourceOwnerRelease phases */ + ReleaseAuxProcessResources(false); AtEOXact_Buffers(false); AtEOXact_SMgr(); - AtEOXact_Files(); + AtEOXact_Files(false); AtEOXact_HashTables(false); /* Warn any waiting backends that the checkpoint failed. */ @@ -624,7 +614,7 @@ CheckArchiveTimeout(void) * If the returned pointer points exactly to a segment boundary, * assume nothing happened. */ - if ((switchpoint % XLogSegSize) != 0) + if (XLogSegmentOffset(switchpoint, wal_segment_size) != 0) elog(DEBUG1, "write-ahead log switch forced (archive_timeout=%d)", XLogArchiveTimeout); } @@ -782,7 +772,8 @@ IsCheckpointOnSchedule(double progress) recptr = GetXLogReplayRecPtr(NULL); else recptr = GetInsertRecPtr(); - elapsed_xlogs = (((double) (recptr - ckpt_start_recptr)) / XLogSegSize) / CheckPointSegments; + elapsed_xlogs = (((double) (recptr - ckpt_start_recptr)) / + wal_segment_size) / CheckPointSegments; if (progress < elapsed_xlogs) { @@ -822,27 +813,21 @@ IsCheckpointOnSchedule(double progress) static void chkpt_quickdie(SIGNAL_ARGS) { - PG_SETMASK(&BlockSig); - /* - * We DO NOT want to run proc_exit() callbacks -- we're here because - * shared memory may be corrupted, so we don't want to try to clean up our - * transaction. Just nail the windows shut and get out of town. Now that - * there's an atexit callback to prevent third-party code from breaking - * things by calling exit() directly, we have to reset the callbacks - * explicitly to make this work as intended. - */ - on_exit_reset(); - - /* - * Note we do exit(2) not exit(0). This is to force the postmaster into a - * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random + * We DO NOT want to run proc_exit() or atexit() callbacks -- we're here + * because shared memory may be corrupted, so we don't want to try to + * clean up our transaction. Just nail the windows shut and get out of + * town. The callbacks wouldn't be safe to run from a signal handler, + * anyway. + * + * Note we do _exit(2) not _exit(0). This is to force the postmaster into + * a system reset cycle if someone sends a manual SIGQUIT to a random * backend. This is necessary precisely because we don't clean up our * shared memory state. (The "dead man switch" mechanism in pmsignal.c * should ensure the postmaster sees this as a crash, too, but no harm in * being doubly sure.) */ - exit(2); + _exit(2); } /* SIGHUP: set flag to re-read config file at next convenient time */ diff --git a/src/backend/postmaster/fork_process.c b/src/backend/postmaster/fork_process.c index 65145b1205..d3005d9f1d 100644 --- a/src/backend/postmaster/fork_process.c +++ b/src/backend/postmaster/fork_process.c @@ -4,7 +4,7 @@ * EXEC_BACKEND case; it might be extended to do so, but it would be * considerably more complex. * - * Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/postmaster/fork_process.c diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c index ddf9d698e0..885e85ad8a 100644 --- a/src/backend/postmaster/pgarch.c +++ b/src/backend/postmaster/pgarch.c @@ -14,7 +14,7 @@ * * Initial author: Simon Riggs simon@2ndquadrant.com * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -236,7 +236,7 @@ PgArchiverMain(int argc, char *argv[]) /* * Identify myself via ps */ - init_ps_display("archiver process", "", "", ""); + init_ps_display("archiver", "", "", ""); pgarch_MainLoop(); @@ -673,11 +673,6 @@ pgarch_readyXlog(char *xlog) snprintf(XLogArchiveStatusDir, MAXPGPATH, XLOGDIR "/archive_status"); rldir = AllocateDir(XLogArchiveStatusDir); - if (rldir == NULL) - ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not open archive status directory \"%s\": %m", - XLogArchiveStatusDir))); while ((rlde = ReadDir(rldir, XLogArchiveStatusDir)) != NULL) { diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index 1f75e2e97d..42bccce0af 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -11,7 +11,7 @@ * - Add a pgstat config column to pg_database, so this * entire thing can be enabled/disabled on a per db basis. * - * Copyright (c) 2001-2017, PostgreSQL Global Development Group + * Copyright (c) 2001-2018, PostgreSQL Global Development Group * * src/backend/postmaster/pgstat.c * ---------- @@ -2650,7 +2650,7 @@ CreateSharedBackendStatus(void) } /* Create or attach to the shared appname buffer */ - size = mul_size(NAMEDATALEN, MaxBackends); + size = mul_size(NAMEDATALEN, NumBackendStatSlots); BackendAppnameBuffer = (char *) ShmemInitStruct("Backend Application Name Buffer", size, &found); @@ -2668,7 +2668,7 @@ CreateSharedBackendStatus(void) } /* Create or attach to the shared client hostname buffer */ - size = mul_size(NAMEDATALEN, MaxBackends); + size = mul_size(NAMEDATALEN, NumBackendStatSlots); BackendClientHostnameBuffer = (char *) ShmemInitStruct("Backend Client Host Name Buffer", size, &found); @@ -2695,13 +2695,13 @@ CreateSharedBackendStatus(void) if (!found) { - MemSet(BackendActivityBuffer, 0, size); + MemSet(BackendActivityBuffer, 0, BackendActivityBufferSize); /* Initialize st_activity pointers. */ buffer = BackendActivityBuffer; for (i = 0; i < NumBackendStatSlots; i++) { - BackendStatusArray[i].st_activity = buffer; + BackendStatusArray[i].st_activity_raw = buffer; buffer += pgstat_track_activity_query_size; } } @@ -2784,21 +2784,13 @@ pgstat_initialize(void) void pgstat_bestart(void) { - TimestampTz proc_start_timestamp; SockAddr clientaddr; volatile PgBackendStatus *beentry; /* * To minimize the time spent modifying the PgBackendStatus entry, fetch * all the needed data first. - * - * If we have a MyProcPort, use its session start time (for consistency, - * and to save a kernel call). */ - if (MyProcPort) - proc_start_timestamp = MyProcPort->SessionStartTime; - else - proc_start_timestamp = GetCurrentTimestamp(); /* * We may not have a MyProcPort (eg, if this is the autovacuum process). @@ -2883,7 +2875,7 @@ pgstat_bestart(void) } while ((beentry->st_changecount & 1) == 0); beentry->st_procpid = MyProcPid; - beentry->st_proc_start_timestamp = proc_start_timestamp; + beentry->st_proc_start_timestamp = MyStartTimestamp; beentry->st_activity_start_timestamp = 0; beentry->st_state_start_timestamp = 0; beentry->st_xact_start_timestamp = 0; @@ -2909,8 +2901,8 @@ pgstat_bestart(void) beentry->st_ssl = true; beentry->st_sslstatus->ssl_bits = be_tls_get_cipher_bits(MyProcPort); beentry->st_sslstatus->ssl_compression = be_tls_get_compression(MyProcPort); - be_tls_get_version(MyProcPort, beentry->st_sslstatus->ssl_version, NAMEDATALEN); - be_tls_get_cipher(MyProcPort, beentry->st_sslstatus->ssl_cipher, NAMEDATALEN); + strlcpy(beentry->st_sslstatus->ssl_version, be_tls_get_version(MyProcPort), NAMEDATALEN); + strlcpy(beentry->st_sslstatus->ssl_cipher, be_tls_get_cipher(MyProcPort), NAMEDATALEN); be_tls_get_peerdn_name(MyProcPort, beentry->st_sslstatus->ssl_clientdn, NAMEDATALEN); } else @@ -2922,11 +2914,11 @@ pgstat_bestart(void) #endif beentry->st_state = STATE_UNDEFINED; beentry->st_appname[0] = '\0'; - beentry->st_activity[0] = '\0'; + beentry->st_activity_raw[0] = '\0'; /* Also make sure the last byte in each string area is always 0 */ beentry->st_clienthostname[NAMEDATALEN - 1] = '\0'; beentry->st_appname[NAMEDATALEN - 1] = '\0'; - beentry->st_activity[pgstat_track_activity_query_size - 1] = '\0'; + beentry->st_activity_raw[pgstat_track_activity_query_size - 1] = '\0'; beentry->st_progress_command = PROGRESS_COMMAND_INVALID; beentry->st_progress_command_target = InvalidOid; @@ -3017,7 +3009,7 @@ pgstat_report_activity(BackendState state, const char *cmd_str) pgstat_increment_changecount_before(beentry); beentry->st_state = STATE_DISABLED; beentry->st_state_start_timestamp = 0; - beentry->st_activity[0] = '\0'; + beentry->st_activity_raw[0] = '\0'; beentry->st_activity_start_timestamp = 0; /* st_xact_start_timestamp and wait_event_info are also disabled */ beentry->st_xact_start_timestamp = 0; @@ -3034,8 +3026,12 @@ pgstat_report_activity(BackendState state, const char *cmd_str) start_timestamp = GetCurrentStatementStartTimestamp(); if (cmd_str != NULL) { - len = pg_mbcliplen(cmd_str, strlen(cmd_str), - pgstat_track_activity_query_size - 1); + /* + * Compute length of to-be-stored string unaware of multi-byte + * characters. For speed reasons that'll get corrected on read, rather + * than computed every write. + */ + len = Min(strlen(cmd_str), pgstat_track_activity_query_size - 1); } current_timestamp = GetCurrentTimestamp(); @@ -3049,8 +3045,8 @@ pgstat_report_activity(BackendState state, const char *cmd_str) if (cmd_str != NULL) { - memcpy((char *) beentry->st_activity, cmd_str, len); - beentry->st_activity[len] = '\0'; + memcpy((char *) beentry->st_activity_raw, cmd_str, len); + beentry->st_activity_raw[len] = '\0'; beentry->st_activity_start_timestamp = start_timestamp; } @@ -3220,6 +3216,7 @@ pgstat_read_current_status(void) LocalPgBackendStatus *localtable; LocalPgBackendStatus *localentry; char *localappname, + *localclienthostname, *localactivity; #ifdef USE_SSL PgBackendSSLStatus *localsslstatus; @@ -3238,6 +3235,9 @@ pgstat_read_current_status(void) localappname = (char *) MemoryContextAlloc(pgStatLocalContext, NAMEDATALEN * NumBackendStatSlots); + localclienthostname = (char *) + MemoryContextAlloc(pgStatLocalContext, + NAMEDATALEN * NumBackendStatSlots); localactivity = (char *) MemoryContextAlloc(pgStatLocalContext, pgstat_track_activity_query_size * NumBackendStatSlots); @@ -3278,8 +3278,10 @@ pgstat_read_current_status(void) */ strcpy(localappname, (char *) beentry->st_appname); localentry->backendStatus.st_appname = localappname; - strcpy(localactivity, (char *) beentry->st_activity); - localentry->backendStatus.st_activity = localactivity; + strcpy(localclienthostname, (char *) beentry->st_clienthostname); + localentry->backendStatus.st_clienthostname = localclienthostname; + strcpy(localactivity, (char *) beentry->st_activity_raw); + localentry->backendStatus.st_activity_raw = localactivity; localentry->backendStatus.st_ssl = beentry->st_ssl; #ifdef USE_SSL if (beentry->st_ssl) @@ -3309,6 +3311,7 @@ pgstat_read_current_status(void) localentry++; localappname += NAMEDATALEN; + localclienthostname += NAMEDATALEN; localactivity += pgstat_track_activity_query_size; #ifdef USE_SSL localsslstatus++; @@ -3481,12 +3484,12 @@ pgstat_get_wait_activity(WaitEventActivity w) case WAIT_EVENT_CHECKPOINTER_MAIN: event_name = "CheckpointerMain"; break; - case WAIT_EVENT_LOGICAL_LAUNCHER_MAIN: - event_name = "LogicalLauncherMain"; - break; case WAIT_EVENT_LOGICAL_APPLY_MAIN: event_name = "LogicalApplyMain"; break; + case WAIT_EVENT_LOGICAL_LAUNCHER_MAIN: + event_name = "LogicalLauncherMain"; + break; case WAIT_EVENT_PGSTAT_MAIN: event_name = "PgStatMain"; break; @@ -3579,9 +3582,57 @@ pgstat_get_wait_ipc(WaitEventIPC w) case WAIT_EVENT_BTREE_PAGE: event_name = "BtreePage"; break; + case WAIT_EVENT_CLOG_GROUP_UPDATE: + event_name = "ClogGroupUpdate"; + break; case WAIT_EVENT_EXECUTE_GATHER: event_name = "ExecuteGather"; break; + case WAIT_EVENT_HASH_BATCH_ALLOCATING: + event_name = "Hash/Batch/Allocating"; + break; + case WAIT_EVENT_HASH_BATCH_ELECTING: + event_name = "Hash/Batch/Electing"; + break; + case WAIT_EVENT_HASH_BATCH_LOADING: + event_name = "Hash/Batch/Loading"; + break; + case WAIT_EVENT_HASH_BUILD_ALLOCATING: + event_name = "Hash/Build/Allocating"; + break; + case WAIT_EVENT_HASH_BUILD_ELECTING: + event_name = "Hash/Build/Electing"; + break; + case WAIT_EVENT_HASH_BUILD_HASHING_INNER: + event_name = "Hash/Build/HashingInner"; + break; + case WAIT_EVENT_HASH_BUILD_HASHING_OUTER: + event_name = "Hash/Build/HashingOuter"; + break; + case WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATING: + event_name = "Hash/GrowBatches/Allocating"; + break; + case WAIT_EVENT_HASH_GROW_BATCHES_DECIDING: + event_name = "Hash/GrowBatches/Deciding"; + break; + case WAIT_EVENT_HASH_GROW_BATCHES_ELECTING: + event_name = "Hash/GrowBatches/Electing"; + break; + case WAIT_EVENT_HASH_GROW_BATCHES_FINISHING: + event_name = "Hash/GrowBatches/Finishing"; + break; + case WAIT_EVENT_HASH_GROW_BATCHES_REPARTITIONING: + event_name = "Hash/GrowBatches/Repartitioning"; + break; + case WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATING: + event_name = "Hash/GrowBuckets/Allocating"; + break; + case WAIT_EVENT_HASH_GROW_BUCKETS_ELECTING: + event_name = "Hash/GrowBuckets/Electing"; + break; + case WAIT_EVENT_HASH_GROW_BUCKETS_REINSERTING: + event_name = "Hash/GrowBuckets/Reinserting"; + break; case WAIT_EVENT_LOGICAL_SYNC_DATA: event_name = "LogicalSyncData"; break; @@ -3600,15 +3651,21 @@ pgstat_get_wait_ipc(WaitEventIPC w) case WAIT_EVENT_MQ_SEND: event_name = "MessageQueueSend"; break; - case WAIT_EVENT_PARALLEL_FINISH: - event_name = "ParallelFinish"; - break; case WAIT_EVENT_PARALLEL_BITMAP_SCAN: event_name = "ParallelBitmapScan"; break; + case WAIT_EVENT_PARALLEL_CREATE_INDEX_SCAN: + event_name = "ParallelCreateIndexScan"; + break; + case WAIT_EVENT_PARALLEL_FINISH: + event_name = "ParallelFinish"; + break; case WAIT_EVENT_PROCARRAY_GROUP_UPDATE: event_name = "ProcArrayGroupUpdate"; break; + case WAIT_EVENT_PROMOTE: + event_name = "Promote"; + break; case WAIT_EVENT_REPLICATION_ORIGIN_DROP: event_name = "ReplicationOriginDrop"; break; @@ -3863,6 +3920,9 @@ pgstat_get_wait_io(WaitEventIO w) case WAIT_EVENT_WAL_READ: event_name = "WALRead"; break; + case WAIT_EVENT_WAL_SYNC: + event_name = "WALSync"; + break; case WAIT_EVENT_WAL_SYNC_METHOD_ASSIGN: event_name = "WALSyncMethodAssign"; break; @@ -3942,10 +4002,13 @@ pgstat_get_backend_current_activity(int pid, bool checkUser) /* Now it is safe to use the non-volatile pointer */ if (checkUser && !superuser() && beentry->st_userid != GetUserId()) return ""; - else if (*(beentry->st_activity) == '\0') + else if (*(beentry->st_activity_raw) == '\0') return ""; else - return beentry->st_activity; + { + /* this'll leak a bit of memory, but that seems acceptable */ + return pgstat_clip_activity(beentry->st_activity_raw); + } } beentry++; @@ -3991,7 +4054,7 @@ pgstat_get_crashed_backend_activity(int pid, char *buffer, int buflen) if (beentry->st_procpid == pid) { /* Read pointer just once, so it can't change after validation */ - const char *activity = beentry->st_activity; + const char *activity = beentry->st_activity_raw; const char *activity_last; /* @@ -4014,7 +4077,8 @@ pgstat_get_crashed_backend_activity(int pid, char *buffer, int buflen) /* * Copy only ASCII-safe characters so we don't run into encoding * problems when reporting the message; and be sure not to run off - * the end of memory. + * the end of memory. As only ASCII characters are reported, it + * doesn't seem necessary to perform multibyte aware clipping. */ ascii_safe_strlcpy(buffer, activity, Min(buflen, pgstat_track_activity_query_size)); @@ -4213,7 +4277,7 @@ PgstatCollectorMain(int argc, char *argv[]) /* * Identify myself via ps */ - init_ps_display("stats collector process", "", "", ""); + init_ps_display("stats collector", "", "", ""); /* * Read in existing stats files or initialize the stats to zero. @@ -4741,7 +4805,7 @@ get_dbstat_filename(bool permanent, bool tempname, Oid databaseid, pgstat_stat_directory, databaseid, tempname ? "tmp" : "stat"); - if (printed > len) + if (printed >= len) elog(ERROR, "overlength pgstat path"); } @@ -5259,7 +5323,7 @@ pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash, * pgstat_read_db_statsfile_timestamp() - * * Attempt to determine the timestamp of the last db statfile write. - * Returns TRUE if successful; the timestamp is stored in *ts. + * Returns true if successful; the timestamp is stored in *ts. * * This needs to be careful about handling databases for which no stats file * exists, such as databases without a stat entry or those not yet written: @@ -6267,3 +6331,47 @@ pgstat_db_requested(Oid databaseid) return false; } + +/* + * Convert a potentially unsafely truncated activity string (see + * PgBackendStatus.st_activity_raw's documentation) into a correctly truncated + * one. + * + * The returned string is allocated in the caller's memory context and may be + * freed. + */ +char * +pgstat_clip_activity(const char *raw_activity) +{ + char *activity; + int rawlen; + int cliplen; + + /* + * Some callers, like pgstat_get_backend_current_activity(), do not + * guarantee that the buffer isn't concurrently modified. We try to take + * care that the buffer is always terminated by a NUL byte regardless, but + * let's still be paranoid about the string's length. In those cases the + * underlying buffer is guaranteed to be pgstat_track_activity_query_size + * large. + */ + activity = pnstrdup(raw_activity, pgstat_track_activity_query_size - 1); + + /* now double-guaranteed to be NUL terminated */ + rawlen = strlen(activity); + + /* + * All supported server-encodings make it possible to determine the length + * of a multi-byte character from its first byte (this is not the case for + * client encodings, see GB18030). As st_activity is always stored using + * server encoding, this allows us to perform multi-byte aware truncation, + * even if the string earlier was truncated in the middle of a multi-byte + * character. + */ + cliplen = pg_mbcliplen(activity, rawlen, + pgstat_track_activity_query_size - 1); + + activity[cliplen] = '\0'; + + return activity; +} diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 95180b2ef5..688f462e7d 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -32,7 +32,7 @@ * clients. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -74,8 +74,6 @@ #include #include #include -#include -#include #include #include @@ -99,14 +97,18 @@ #include "access/xlog.h" #include "bootstrap/bootstrap.h" #include "catalog/pg_control.h" +#include "common/file_perm.h" #include "common/ip.h" +#include "common/string.h" #include "lib/ilist.h" #include "libpq/auth.h" #include "libpq/libpq.h" +#include "libpq/pqformat.h" #include "libpq/pqsignal.h" #include "miscadmin.h" #include "pg_getopt.h" #include "pgstat.h" +#include "port/pg_bswap.h" #include "postmaster/autovacuum.h" #include "postmaster/bgworker_internals.h" #include "postmaster/fork_process.h" @@ -123,11 +125,11 @@ #include "tcop/tcopprot.h" #include "utils/builtins.h" #include "utils/datetime.h" -#include "utils/dynamic_loader.h" #include "utils/memutils.h" #include "utils/pidfile.h" #include "utils/ps_status.h" #include "utils/timeout.h" +#include "utils/timestamp.h" #include "utils/varlena.h" #ifdef EXEC_BACKEND @@ -202,9 +204,9 @@ char *ListenAddresses; /* * ReservedBackends is the number of backends reserved for superuser use. - * This number is taken out of the pool size given by MaxBackends so + * This number is taken out of the pool size given by MaxConnections so * number of backend slots available to non-superusers is - * (MaxBackends - ReservedBackends). Note what this really means is + * (MaxConnections - ReservedBackends). Note what this really means is * "if there are <= ReservedBackends connections available, only superusers * can make new connections" --- pre-existing superuser connections don't * count against the limit. @@ -390,7 +392,7 @@ static DNSServiceRef bonjour_sdref = NULL; static void CloseServerPorts(int status, Datum arg); static void unlink_external_pid_file(int status, Datum arg); static void getInstallationPaths(const char *argv0); -static void checkDataDir(void); +static void checkControlFile(void); static Port *ConnCreate(int serverFd); static void ConnFree(Port *port); static void reset_shared(int port); @@ -413,6 +415,7 @@ static void ExitPostmaster(int status) pg_attribute_noreturn(); static int ServerLoop(void); static int BackendStartup(Port *port); static int ProcessStartupPacket(Port *port, bool SSLdone); +static void SendNegotiateProtocolVersion(List *unrecognized_protocol_options); static void processCancelRequest(Port *port, void *pkt); static int initMasks(fd_set *rmask); static void report_fork_failure_to_client(Port *port, int errnum); @@ -579,26 +582,21 @@ PostmasterMain(int argc, char *argv[]) int i; char *output_config_variable = NULL; - MyProcPid = PostmasterPid = getpid(); + InitProcessGlobals(); - MyStartTime = time(NULL); + PostmasterPid = MyProcPid; IsPostmasterEnvironment = true; /* - * for security, no dir or file created can be group or other accessible - */ - umask(S_IRWXG | S_IRWXO); - - /* - * Initialize random(3) so we don't get the same values in every run. + * We should not be creating any files or directories before we check the + * data directory (see checkDataDir()), but just in case set the umask to + * the most restrictive (owner-only) permissions. * - * Note: the seed is pretty predictable from externally-visible facts such - * as postmaster start time, so avoid using random() for security-critical - * random values during postmaster startup. At the time of first - * connection, PostmasterRandom will select a hopefully-more-random seed. + * checkDataDir() will reset the umask based on the data directory + * permissions. */ - srandom((unsigned int) (MyProcPid ^ MyStartTime)); + umask(PG_MODE_MASK_OWNER); /* * By default, palloc() requests in the postmaster will be allocated in @@ -875,20 +873,20 @@ PostmasterMain(int argc, char *argv[]) /* Verify that DataDir looks reasonable */ checkDataDir(); + /* Check that pg_control exists */ + checkControlFile(); + /* And switch working directory into it */ ChangeToDataDir(); /* * Check for invalid combinations of GUC settings. */ - if (ReservedBackends >= MaxConnections) - { - write_stderr("%s: superuser_reserved_connections must be less than max_connections\n", progname); - ExitPostmaster(1); - } - if (max_wal_senders >= MaxConnections) + if (ReservedBackends + max_wal_senders >= MaxConnections) { - write_stderr("%s: max_wal_senders must be less than max_connections\n", progname); + write_stderr("%s: superuser_reserved_connections (%d) plus max_wal_senders (%d) must be less than max_connections (%d)\n", + progname, + ReservedBackends, max_wal_senders, MaxConnections); ExitPostmaster(1); } if (XLogArchiveMode > ARCHIVE_MODE_OFF && wal_level == WAL_LEVEL_MINIMAL) @@ -950,6 +948,17 @@ PostmasterMain(int argc, char *argv[]) */ CreateDataDirLockFile(true); + /* + * Read the control file (for error checking and config info). + * + * Since we verify the control file's CRC, this has a useful side effect + * on machines where we need a run-time test for CRC support instructions. + * The postmaster will do the test once at startup, and then its child + * processes will inherit the correct function pointer and not need to + * repeat the test. + */ + LocalProcessControlFile(false); + /* * Initialize SSL library, if specified. */ @@ -1069,7 +1078,7 @@ PostmasterMain(int argc, char *argv[]) "_postgresql._tcp.", NULL, NULL, - htons(PostPortNumber), + pg_hton16(PostPortNumber), 0, NULL, NULL, @@ -1250,6 +1259,9 @@ PostmasterMain(int argc, char *argv[]) */ RemovePromoteSignalFiles(); + /* Do the same for logrotate signal file */ + RemoveLogrotateSignalFiles(); + /* Remove any outdated file holding the current log filenames. */ if (unlink(LOG_METAINFO_DATAFILE) < 0 && errno != ENOENT) ereport(LOG, @@ -1467,82 +1479,17 @@ getInstallationPaths(const char *argv0) */ } - /* - * Validate the proposed data directory + * Check that pg_control exists in the correct location in the data directory. + * + * No attempt is made to validate the contents of pg_control here. This is + * just a sanity check to see if we are looking at a real data directory. */ static void -checkDataDir(void) +checkControlFile(void) { char path[MAXPGPATH]; FILE *fp; - struct stat stat_buf; - - Assert(DataDir); - - if (stat(DataDir, &stat_buf) != 0) - { - if (errno == ENOENT) - ereport(FATAL, - (errcode_for_file_access(), - errmsg("data directory \"%s\" does not exist", - DataDir))); - else - ereport(FATAL, - (errcode_for_file_access(), - errmsg("could not read permissions of directory \"%s\": %m", - DataDir))); - } - - /* eventual chdir would fail anyway, but let's test ... */ - if (!S_ISDIR(stat_buf.st_mode)) - ereport(FATAL, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("specified data directory \"%s\" is not a directory", - DataDir))); - - /* - * Check that the directory belongs to my userid; if not, reject. - * - * This check is an essential part of the interlock that prevents two - * postmasters from starting in the same directory (see CreateLockFile()). - * Do not remove or weaken it. - * - * XXX can we safely enable this check on Windows? - */ -#if !defined(WIN32) && !defined(__CYGWIN__) - if (stat_buf.st_uid != geteuid()) - ereport(FATAL, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("data directory \"%s\" has wrong ownership", - DataDir), - errhint("The server must be started by the user that owns the data directory."))); -#endif - - /* - * Check if the directory has group or world access. If so, reject. - * - * It would be possible to allow weaker constraints (for example, allow - * group access) but we cannot make a general assumption that that is - * okay; for example there are platforms where nearly all users - * customarily belong to the same group. Perhaps this test should be - * configurable. - * - * XXX temporarily suppress check when on Windows, because there may not - * be proper support for Unix-y file permissions. Need to think of a - * reasonable check to apply on Windows. - */ -#if !defined(WIN32) && !defined(__CYGWIN__) - if (stat_buf.st_mode & (S_IRWXG | S_IRWXO)) - ereport(FATAL, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("data directory \"%s\" has group or world access", - DataDir), - errdetail("Permissions should be u=rwx (0700)."))); -#endif - - /* Look for PG_VERSION before looking for pg_control */ - ValidatePgVersion(DataDir); snprintf(path, sizeof(path), "%s/global/pg_control", DataDir); @@ -1963,7 +1910,7 @@ ProcessStartupPacket(Port *port, bool SSLdone) return STATUS_ERROR; } - len = ntohl(len); + len = pg_ntoh32(len); len -= 4; if (len < (int32) sizeof(ProtocolVersion) || @@ -1999,7 +1946,7 @@ ProcessStartupPacket(Port *port, bool SSLdone) * The first field is either a protocol version number or a special * request code. */ - port->proto = proto = ntohl(*((ProtocolVersion *) buf)); + port->proto = proto = pg_ntoh32(*((ProtocolVersion *) buf)); if (proto == CANCEL_REQUEST_CODE) { @@ -2050,12 +1997,9 @@ ProcessStartupPacket(Port *port, bool SSLdone) */ FrontendProtocol = proto; - /* Check we can handle the protocol the frontend is using. */ - + /* Check that the major protocol version is in range. */ if (PG_PROTOCOL_MAJOR(proto) < PG_PROTOCOL_MAJOR(PG_PROTOCOL_EARLIEST) || - PG_PROTOCOL_MAJOR(proto) > PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) || - (PG_PROTOCOL_MAJOR(proto) == PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) && - PG_PROTOCOL_MINOR(proto) > PG_PROTOCOL_MINOR(PG_PROTOCOL_LATEST))) + PG_PROTOCOL_MAJOR(proto) > PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST)) ereport(FATAL, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported frontend protocol %u.%u: server supports %u.0 to %u.%u", @@ -2077,6 +2021,7 @@ ProcessStartupPacket(Port *port, bool SSLdone) if (PG_PROTOCOL_MAJOR(proto) >= 3) { int32 offset = sizeof(ProtocolVersion); + List *unrecognized_protocol_options = NIL; /* * Scan packet body for name/option pairs. We can assume any string @@ -2126,6 +2071,16 @@ ProcessStartupPacket(Port *port, bool SSLdone) valptr), errhint("Valid values are: \"false\", 0, \"true\", 1, \"database\"."))); } + else if (strncmp(nameptr, "_pq_.", 5) == 0) + { + /* + * Any option beginning with _pq_. is reserved for use as a + * protocol-level option, but at present no such options are + * defined. + */ + unrecognized_protocol_options = + lappend(unrecognized_protocol_options, pstrdup(nameptr)); + } else { /* Assume it's a generic GUC option */ @@ -2133,6 +2088,21 @@ ProcessStartupPacket(Port *port, bool SSLdone) pstrdup(nameptr)); port->guc_options = lappend(port->guc_options, pstrdup(valptr)); + + /* + * Copy application_name to port if we come across it. This + * is done so we can log the application_name in the + * connection authorization message. Note that the GUC would + * be used but we haven't gone through GUC setup yet. + */ + if (strcmp(nameptr, "application_name") == 0) + { + char *tmp_app_name = pstrdup(valptr); + + pg_clean_ascii(tmp_app_name); + + port->application_name = tmp_app_name; + } } offset = valoffset + strlen(valptr) + 1; } @@ -2145,6 +2115,16 @@ ProcessStartupPacket(Port *port, bool SSLdone) ereport(FATAL, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid startup packet layout: expected terminator as last byte"))); + + /* + * If the client requested a newer protocol version or if the client + * requested any protocol options we didn't recognize, let them know + * the newest minor protocol version we do support and the names of + * any unrecognized options. + */ + if (PG_PROTOCOL_MINOR(proto) > PG_PROTOCOL_MINOR(PG_PROTOCOL_LATEST) || + unrecognized_protocol_options != NIL) + SendNegotiateProtocolVersion(unrecognized_protocol_options); } else { @@ -2258,6 +2238,34 @@ ProcessStartupPacket(Port *port, bool SSLdone) return STATUS_OK; } +/* + * Send a NegotiateProtocolVersion to the client. This lets the client know + * that they have requested a newer minor protocol version than we are able + * to speak. We'll speak the highest version we know about; the client can, + * of course, abandon the connection if that's a problem. + * + * We also include in the response a list of protocol options we didn't + * understand. This allows clients to include optional parameters that might + * be present either in newer protocol versions or third-party protocol + * extensions without fear of having to reconnect if those options are not + * understood, while at the same time making certain that the client is aware + * of which options were actually accepted. + */ +static void +SendNegotiateProtocolVersion(List *unrecognized_protocol_options) +{ + StringInfoData buf; + ListCell *lc; + + pq_beginmessage(&buf, 'v'); /* NegotiateProtocolVersion */ + pq_sendint32(&buf, PG_PROTOCOL_LATEST); + pq_sendint32(&buf, list_length(unrecognized_protocol_options)); + foreach(lc, unrecognized_protocol_options) + pq_sendstring(&buf, lfirst(lc)); + pq_endmessage(&buf); + + /* no need to flush, some other message will follow */ +} /* * The client has sent a cancel request packet, not a normal @@ -2278,8 +2286,8 @@ processCancelRequest(Port *port, void *pkt) int i; #endif - backendPID = (int) ntohl(canc->backendPID); - cancelAuthCode = (int32) ntohl(canc->cancelAuthCode); + backendPID = (int) pg_ntoh32(canc->backendPID); + cancelAuthCode = (int32) pg_ntoh32(canc->cancelAuthCode); /* * See if we have a matching backend. In the EXEC_BACKEND case, we can no @@ -2496,6 +2504,32 @@ ClosePostmasterPorts(bool am_syslogger) } +/* + * InitProcessGlobals -- set MyProcPid, MyStartTime[stamp], random seeds + * + * Called early in every backend. + */ +void +InitProcessGlobals(void) +{ + MyProcPid = getpid(); + MyStartTimestamp = GetCurrentTimestamp(); + MyStartTime = timestamptz_to_time_t(MyStartTimestamp); + + /* + * Don't want backend to be able to see the postmaster random number + * generator state. We have to clobber the static random_seed. + */ +#ifndef HAVE_STRONG_RANDOM + random_seed = 0; + random_start_time.tv_usec = 0; +#endif + + /* Set a different seed for random() in every backend. */ + srandom((unsigned int) MyProcPid ^ (unsigned int) MyStartTimestamp); +} + + /* * reset_shared -- reset shared memory and semaphores */ @@ -2686,7 +2720,7 @@ pmdie(SIGNAL_ARGS) signal_child(BgWriterPID, SIGTERM); if (WalReceiverPID != 0) signal_child(WalReceiverPID, SIGTERM); - if (pmState == PM_RECOVERY) + if (pmState == PM_STARTUP || pmState == PM_RECOVERY) { SignalSomeChildren(SIGTERM, BACKEND_TYPE_BGWORKER); @@ -3114,8 +3148,9 @@ CleanupBackgroundWorker(int pid, exitstatus = 0; #endif - snprintf(namebuf, MAXPGPATH, "%s: %s", _("worker process"), - rw->rw_worker.bgw_name); + snprintf(namebuf, MAXPGPATH, _("background worker \"%s\""), + rw->rw_worker.bgw_type); + if (!EXIT_STATUS_0(exitstatus)) { @@ -3826,6 +3861,10 @@ PostmasterStateMachine(void) ResetBackgroundWorkerCrashTimes(); shmem_exit(1); + + /* re-read control file into local memory */ + LocalProcessControlFile(true); + reset_shared(PostPortNumber); StartupPID = StartupDataBase(); @@ -4132,10 +4171,6 @@ BackendInitialize(Port *port) /* This flag will remain set until InitPostgres finishes authentication */ ClientAuthInProgress = true; /* limit visibility of log messages */ - /* save process start time */ - port->SessionStartTime = GetCurrentTimestamp(); - MyStartTime = timestamptz_to_time_t(port->SessionStartTime); - /* set these to empty in case they are needed before we set them up */ port->remote_host = ""; port->remote_port = ""; @@ -4259,14 +4294,14 @@ BackendInitialize(Port *port) * * For a walsender, the ps display is set in the following form: * - * postgres: wal sender process + * postgres: walsender * - * To achieve that, we pass "wal sender process" as username and username - * as dbname to init_ps_display(). XXX: should add a new variant of + * To achieve that, we pass "walsender" as username and username as dbname + * to init_ps_display(). XXX: should add a new variant of * init_ps_display() to avoid abusing the parameters like this. */ if (am_walsender) - init_ps_display("wal sender process", port->user_name, remote_ps_data, + init_ps_display(pgstat_get_backend_desc(B_WAL_SENDER), port->user_name, remote_ps_data, update_process_title ? "authentication" : ""); else init_ps_display(port->user_name, port->database_name, remote_ps_data, @@ -4293,23 +4328,8 @@ BackendRun(Port *port) char **av; int maxac; int ac; - long secs; - int usecs; int i; - /* - * Don't want backend to be able to see the postmaster random number - * generator state. We have to clobber the static random_seed *and* start - * a new random sequence in the random() library function. - */ -#ifndef HAVE_STRONG_RANDOM - random_seed = 0; - random_start_time.tv_usec = 0; -#endif - /* slightly hacky way to convert timestamptz into integers */ - TimestampDifference(0, port->SessionStartTime, &secs, &usecs); - srandom((unsigned int) (MyProcPid ^ (usecs << 12) ^ secs)); - /* * Now, build the argv vector that will be given to PostgresMain. * @@ -4438,9 +4458,9 @@ internal_forkexec(int argc, char *argv[], Port *port) { /* * As in OpenTemporaryFileInTablespace, try to make the temp-file - * directory + * directory, ignoring errors. */ - mkdir(PG_TEMP_FILES_DIR, S_IRWXU); + (void) MakePGDirectory(PG_TEMP_FILES_DIR); fp = AllocateFile(tmpfilename, PG_BINARY_W); if (!fp) @@ -4805,6 +4825,20 @@ SubPostmasterMain(int argc, char *argv[]) /* Read in remaining GUC variables */ read_nondefault_variables(); + /* + * Check that the data directory looks valid, which will also check the + * privileges on the data directory and update our umask and file/group + * variables for creating files later. Note: this should really be done + * before we create any files or directories. + */ + checkDataDir(); + + /* + * (re-)read control file, as it contains config. The postmaster will + * already have read this, but this process doesn't know about that. + */ + LocalProcessControlFile(false); + /* * Reload any libraries that were preloaded by the postmaster. Since we * exec'd this process, those libraries didn't come along with us; but we @@ -5082,11 +5116,18 @@ sigusr1_handler(SIGNAL_ARGS) signal_child(PgArchPID, SIGUSR1); } - if (CheckPostmasterSignal(PMSIGNAL_ROTATE_LOGFILE) && - SysLoggerPID != 0) + /* Tell syslogger to rotate logfile if requested */ + if (SysLoggerPID != 0) { - /* Tell syslogger to rotate logfile */ - signal_child(SysLoggerPID, SIGUSR1); + if (CheckLogrotateSignal()) + { + signal_child(SysLoggerPID, SIGUSR1); + RemoveLogrotateSignalFiles(); + } + else if (CheckPostmasterSignal(PMSIGNAL_ROTATE_LOGFILE)) + { + signal_child(SysLoggerPID, SIGUSR1); + } } if (CheckPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER) && @@ -5524,7 +5565,7 @@ MaxLivePostmasterChildren(void) * Connect background worker to a database. */ void -BackgroundWorkerInitializeConnection(char *dbname, char *username) +BackgroundWorkerInitializeConnection(const char *dbname, const char *username, uint32 flags) { BackgroundWorker *worker = MyBgworkerEntry; @@ -5534,7 +5575,7 @@ BackgroundWorkerInitializeConnection(char *dbname, char *username) (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("database connection requirement not indicated during registration"))); - InitPostgres(dbname, InvalidOid, username, InvalidOid, NULL); + InitPostgres(dbname, InvalidOid, username, InvalidOid, NULL, (flags & BGWORKER_BYPASS_ALLOWCONN) != 0); /* it had better not gotten out of "init" mode yet */ if (!IsInitProcessingMode()) @@ -5547,7 +5588,7 @@ BackgroundWorkerInitializeConnection(char *dbname, char *username) * Connect background worker to a database using OIDs. */ void -BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid) +BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid, uint32 flags) { BackgroundWorker *worker = MyBgworkerEntry; @@ -5557,7 +5598,7 @@ BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid) (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("database connection requirement not indicated during registration"))); - InitPostgres(NULL, dboid, NULL, useroid, NULL); + InitPostgres(NULL, dboid, NULL, useroid, NULL, (flags & BGWORKER_BYPASS_ALLOWCONN) != 0); /* it had better not gotten out of "init" mode yet */ if (!IsInitProcessingMode()) @@ -5848,7 +5889,16 @@ maybe_start_bgworkers(void) { if (rw->rw_worker.bgw_restart_time == BGW_NEVER_RESTART) { + int notify_pid; + + notify_pid = rw->rw_worker.bgw_notify_pid; + ForgetBackgroundWorker(&iter); + + /* Report worker is gone now. */ + if (notify_pid != 0) + kill(notify_pid, SIGUSR1); + continue; } diff --git a/src/backend/postmaster/startup.c b/src/backend/postmaster/startup.c index 4693a1bb86..2926211e35 100644 --- a/src/backend/postmaster/startup.c +++ b/src/backend/postmaster/startup.c @@ -9,7 +9,7 @@ * though.) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -69,27 +69,21 @@ static void StartupProcSigHupHandler(SIGNAL_ARGS); static void startupproc_quickdie(SIGNAL_ARGS) { - PG_SETMASK(&BlockSig); - - /* - * We DO NOT want to run proc_exit() callbacks -- we're here because - * shared memory may be corrupted, so we don't want to try to clean up our - * transaction. Just nail the windows shut and get out of town. Now that - * there's an atexit callback to prevent third-party code from breaking - * things by calling exit() directly, we have to reset the callbacks - * explicitly to make this work as intended. - */ - on_exit_reset(); - /* - * Note we do exit(2) not exit(0). This is to force the postmaster into a - * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random + * We DO NOT want to run proc_exit() or atexit() callbacks -- we're here + * because shared memory may be corrupted, so we don't want to try to + * clean up our transaction. Just nail the windows shut and get out of + * town. The callbacks wouldn't be safe to run from a signal handler, + * anyway. + * + * Note we do _exit(2) not _exit(0). This is to force the postmaster into + * a system reset cycle if someone sends a manual SIGQUIT to a random * backend. This is necessary precisely because we don't clean up our * shared memory state. (The "dead man switch" mechanism in pmsignal.c * should ensure the postmaster sees this as a crash, too, but no harm in * being doubly sure.) */ - exit(2); + _exit(2); } diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c index 3255b42c7d..29bdcec895 100644 --- a/src/backend/postmaster/syslogger.c +++ b/src/backend/postmaster/syslogger.c @@ -13,7 +13,7 @@ * * Author: Andreas Pflug * - * Copyright (c) 2004-2017, PostgreSQL Global Development Group + * Copyright (c) 2004-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -41,9 +41,11 @@ #include "postmaster/postmaster.h" #include "postmaster/syslogger.h" #include "storage/dsm.h" +#include "storage/fd.h" #include "storage/ipc.h" #include "storage/latch.h" #include "storage/pg_shmem.h" +#include "tcop/tcopprot.h" #include "utils/guc.h" #include "utils/ps_status.h" #include "utils/timestamp.h" @@ -55,6 +57,9 @@ */ #define READ_BUF_SIZE (2 * PIPE_CHUNK_SIZE) +/* Log rotation signal file path, relative to $PGDATA */ +#define LOGROTATE_SIGNAL_FILE "logrotate" + /* * GUC parameters. Logging_collector cannot be changed after postmaster @@ -134,7 +139,6 @@ static void syslogger_parseArgs(int argc, char *argv[]); NON_EXEC_STATIC void SysLoggerMain(int argc, char *argv[]) pg_attribute_noreturn(); static void process_pipe_input(char *logbuffer, int *bytes_in_logbuffer); static void flush_pipe_input(char *logbuffer, int *bytes_in_logbuffer); -static void open_csvlogfile(void); static FILE *logfile_open(const char *filename, const char *mode, bool allow_errors); @@ -173,7 +177,7 @@ SysLoggerMain(int argc, char *argv[]) am_syslogger = true; - init_ps_display("logger process", "", "", ""); + init_ps_display("logger", "", "", ""); /* * If we restarted, our stderr is already redirected into our own input @@ -271,11 +275,13 @@ SysLoggerMain(int argc, char *argv[]) #endif /* WIN32 */ /* - * Remember active logfile's name. We recompute this from the reference + * Remember active logfiles' name(s). We recompute 'em from the reference * time because passing down just the pg_time_t is a lot cheaper than * passing a whole file path in the EXEC_BACKEND case. */ last_file_name = logfile_getname(first_syslogger_file_time, NULL); + if (csvlogFile != NULL) + last_csv_file_name = logfile_getname(first_syslogger_file_time, ".csv"); /* remember active logfile parameters */ currentLogDir = pstrdup(Log_directory); @@ -285,6 +291,13 @@ SysLoggerMain(int argc, char *argv[]) set_next_rotation_time(); update_metainfo_datafile(); + /* + * Reset whereToSendOutput, as the postmaster will do (but hasn't yet, at + * the point where we forked). This prevents duplicate output of messages + * from syslogger itself. + */ + whereToSendOutput = DestNone; + /* main worker loop */ for (;;) { @@ -322,7 +335,7 @@ SysLoggerMain(int argc, char *argv[]) /* * Also, create new directory if not present; ignore errors */ - mkdir(Log_directory, S_IRWXU); + (void) MakePGDirectory(Log_directory); } if (strcmp(Log_filename, currentLogFilename) != 0) { @@ -331,6 +344,14 @@ SysLoggerMain(int argc, char *argv[]) rotation_requested = true; } + /* + * Force a rotation if CSVLOG output was just turned on or off and + * we need to open or close csvlogFile accordingly. + */ + if (((Log_destination & LOG_DESTINATION_CSVLOG) != 0) != + (csvlogFile != NULL)) + rotation_requested = true; + /* * If rotation time parameter changed, reset next rotation time, * but don't immediately force a rotation. @@ -387,7 +408,7 @@ SysLoggerMain(int argc, char *argv[]) { /* * Force rotation when both values are zero. It means the request - * was sent by pg_rotate_logfile. + * was sent by pg_rotate_logfile() or "pg_ctl logrotate". */ if (!time_based_rotation && size_rotation_for == 0) size_rotation_for = LOG_DESTINATION_STDERR | LOG_DESTINATION_CSVLOG; @@ -564,7 +585,7 @@ SysLogger_Start(void) /* * Create log directory if not present; ignore errors */ - mkdir(Log_directory, S_IRWXU); + (void) MakePGDirectory(Log_directory); /* * The initial logfile is created right in the postmaster, to verify that @@ -579,12 +600,27 @@ SysLogger_Start(void) * a time-based rotation. */ first_syslogger_file_time = time(NULL); + filename = logfile_getname(first_syslogger_file_time, NULL); syslogFile = logfile_open(filename, "a", false); pfree(filename); + /* + * Likewise for the initial CSV log file, if that's enabled. (Note that + * we open syslogFile even when only CSV output is nominally enabled, + * since some code paths will write to syslogFile anyway.) + */ + if (Log_destination & LOG_DESTINATION_CSVLOG) + { + filename = logfile_getname(first_syslogger_file_time, ".csv"); + + csvlogFile = logfile_open(filename, "a", false); + + pfree(filename); + } + #ifdef EXEC_BACKEND switch ((sysloggerPid = syslogger_forkexec())) #else @@ -674,9 +710,14 @@ SysLogger_Start(void) redirection_done = true; } - /* postmaster will never write the file; close it */ + /* postmaster will never write the file(s); close 'em */ fclose(syslogFile); syslogFile = NULL; + if (csvlogFile != NULL) + { + fclose(csvlogFile); + csvlogFile = NULL; + } return (int) sysloggerPid; } @@ -698,6 +739,7 @@ syslogger_forkexec(void) char *av[10]; int ac = 0; char filenobuf[32]; + char csvfilenobuf[32]; av[ac++] = "postgres"; av[ac++] = "--forklog"; @@ -719,6 +761,21 @@ syslogger_forkexec(void) #endif /* WIN32 */ av[ac++] = filenobuf; +#ifndef WIN32 + if (csvlogFile != NULL) + snprintf(csvfilenobuf, sizeof(csvfilenobuf), "%d", + fileno(csvlogFile)); + else + strcpy(csvfilenobuf, "-1"); +#else /* WIN32 */ + if (csvlogFile != NULL) + snprintf(csvfilenobuf, sizeof(csvfilenobuf), "%ld", + (long) _get_osfhandle(_fileno(csvlogFile))); + else + strcpy(csvfilenobuf, "0"); +#endif /* WIN32 */ + av[ac++] = csvfilenobuf; + av[ac] = NULL; Assert(ac < lengthof(av)); @@ -735,9 +792,16 @@ syslogger_parseArgs(int argc, char *argv[]) { int fd; - Assert(argc == 4); + Assert(argc == 5); argv += 3; + /* + * Re-open the error output files that were opened by SysLogger_Start(). + * + * We expect this will always succeed, which is too optimistic, but if it + * fails there's not a lot we can do to report the problem anyway. As + * coded, we'll just crash on a null pointer dereference after failure... + */ #ifndef WIN32 fd = atoi(*argv++); if (fd != -1) @@ -745,6 +809,12 @@ syslogger_parseArgs(int argc, char *argv[]) syslogFile = fdopen(fd, "a"); setvbuf(syslogFile, NULL, PG_IOLBF, 0); } + fd = atoi(*argv++); + if (fd != -1) + { + csvlogFile = fdopen(fd, "a"); + setvbuf(csvlogFile, NULL, PG_IOLBF, 0); + } #else /* WIN32 */ fd = atoi(*argv++); if (fd != 0) @@ -756,6 +826,16 @@ syslogger_parseArgs(int argc, char *argv[]) setvbuf(syslogFile, NULL, PG_IOLBF, 0); } } + fd = atoi(*argv++); + if (fd != 0) + { + fd = _open_osfhandle(fd, _O_APPEND | _O_TEXT); + if (fd > 0) + { + csvlogFile = fdopen(fd, "a"); + setvbuf(csvlogFile, NULL, PG_IOLBF, 0); + } + } #endif /* WIN32 */ } #endif /* EXEC_BACKEND */ @@ -997,13 +1077,29 @@ write_syslogger_file(const char *buffer, int count, int destination) int rc; FILE *logfile; - if (destination == LOG_DESTINATION_CSVLOG && csvlogFile == NULL) - open_csvlogfile(); + /* + * If we're told to write to csvlogFile, but it's not open, dump the data + * to syslogFile (which is always open) instead. This can happen if CSV + * output is enabled after postmaster start and we've been unable to open + * csvlogFile. There are also race conditions during a parameter change + * whereby backends might send us CSV output before we open csvlogFile or + * after we close it. Writing CSV-formatted output to the regular log + * file isn't great, but it beats dropping log output on the floor. + * + * Think not to improve this by trying to open csvlogFile on-the-fly. Any + * failure in that would lead to recursion. + */ + logfile = (destination == LOG_DESTINATION_CSVLOG && + csvlogFile != NULL) ? csvlogFile : syslogFile; - logfile = destination == LOG_DESTINATION_CSVLOG ? csvlogFile : syslogFile; rc = fwrite(buffer, 1, count, logfile); - /* can't use ereport here because of possible recursion */ + /* + * Try to report any failure. We mustn't use ereport because it would + * just recurse right back here, but write_stderr is OK: it will write + * either to the postmaster's original stderr, or to /dev/null, but never + * to our input pipe which would result in a different sort of looping. + */ if (rc != count) write_stderr("could not write to log file: %s\n", strerror(errno)); } @@ -1086,31 +1182,6 @@ pipeThread(void *arg) } #endif /* WIN32 */ -/* - * Open the csv log file - we do this opportunistically, because - * we don't know if CSV logging will be wanted. - * - * This is only used the first time we open the csv log in a given syslogger - * process, not during rotations. As with opening the main log file, we - * always append in this situation. - */ -static void -open_csvlogfile(void) -{ - char *filename; - - filename = logfile_getname(time(NULL), ".csv"); - - csvlogFile = logfile_open(filename, "a", false); - - if (last_csv_file_name != NULL) /* probably shouldn't happen */ - pfree(last_csv_file_name); - - last_csv_file_name = filename; - - update_metainfo_datafile(); -} - /* * Open a new logfile with proper permissions and buffering options. * @@ -1178,7 +1249,7 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for) else fntime = time(NULL); filename = logfile_getname(fntime, NULL); - if (csvlogFile != NULL) + if (Log_destination & LOG_DESTINATION_CSVLOG) csvfilename = logfile_getname(fntime, ".csv"); /* @@ -1230,10 +1301,16 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for) filename = NULL; } - /* Same as above, but for csv file. */ - - if (csvlogFile != NULL && - (time_based_rotation || (size_rotation_for & LOG_DESTINATION_CSVLOG))) + /* + * Same as above, but for csv file. Note that if LOG_DESTINATION_CSVLOG + * was just turned on, we might have to open csvlogFile here though it was + * not open before. In such a case we'll append not overwrite (since + * last_csv_file_name will be NULL); that is consistent with the normal + * rules since it's not a time-based rotation. + */ + if ((Log_destination & LOG_DESTINATION_CSVLOG) && + (csvlogFile == NULL || + time_based_rotation || (size_rotation_for & LOG_DESTINATION_CSVLOG))) { if (Log_truncate_on_rotation && time_based_rotation && last_csv_file_name != NULL && @@ -1264,7 +1341,8 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for) return; } - fclose(csvlogFile); + if (csvlogFile != NULL) + fclose(csvlogFile); csvlogFile = fh; /* instead of pfree'ing filename, remember it for next time */ @@ -1273,6 +1351,16 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for) last_csv_file_name = csvfilename; csvfilename = NULL; } + else if (!(Log_destination & LOG_DESTINATION_CSVLOG) && + csvlogFile != NULL) + { + /* CSVLOG was just turned off, so close the old file */ + fclose(csvlogFile); + csvlogFile = NULL; + if (last_csv_file_name != NULL) + pfree(last_csv_file_name); + last_csv_file_name = NULL; + } if (filename) pfree(filename); @@ -1421,6 +1509,30 @@ update_metainfo_datafile(void) * -------------------------------- */ +/* + * Check to see if a log rotation request has arrived. Should be + * called by postmaster after receiving SIGUSR1. + */ +bool +CheckLogrotateSignal(void) +{ + struct stat stat_buf; + + if (stat(LOGROTATE_SIGNAL_FILE, &stat_buf) == 0) + return true; + + return false; +} + +/* + * Remove the file signaling a log rotateion request. + */ +void +RemoveLogrotateSignalFiles(void) +{ + unlink(LOGROTATE_SIGNAL_FILE); +} + /* SIGHUP: set flag to reload config file */ static void sigHupHandler(SIGNAL_ARGS) diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c index 7b89e02428..fb66bceeed 100644 --- a/src/backend/postmaster/walwriter.c +++ b/src/backend/postmaster/walwriter.c @@ -31,7 +31,7 @@ * should be killed by SIGQUIT and then a recovery cycle started. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -129,12 +129,6 @@ WalWriterMain(void) /* We allow SIGQUIT (quickdie) at all times */ sigdelset(&BlockSig, SIGQUIT); - /* - * Create a resource owner to keep track of our resources (not clear that - * we need this, but may as well have one). - */ - CurrentResourceOwner = ResourceOwnerCreate(NULL, "Wal Writer"); - /* * Create a memory context that we will do all our work in. We do this so * that we can reset the context during error recovery and thereby avoid @@ -172,14 +166,10 @@ WalWriterMain(void) pgstat_report_wait_end(); AbortBufferIO(); UnlockBuffers(); - /* buffer pins are released here: */ - ResourceOwnerRelease(CurrentResourceOwner, - RESOURCE_RELEASE_BEFORE_LOCKS, - false, true); - /* we needn't bother with the other ResourceOwnerRelease phases */ + ReleaseAuxProcessResources(false); AtEOXact_Buffers(false); AtEOXact_SMgr(); - AtEOXact_Files(); + AtEOXact_Files(false); AtEOXact_HashTables(false); /* @@ -319,27 +309,21 @@ WalWriterMain(void) static void wal_quickdie(SIGNAL_ARGS) { - PG_SETMASK(&BlockSig); - /* - * We DO NOT want to run proc_exit() callbacks -- we're here because - * shared memory may be corrupted, so we don't want to try to clean up our - * transaction. Just nail the windows shut and get out of town. Now that - * there's an atexit callback to prevent third-party code from breaking - * things by calling exit() directly, we have to reset the callbacks - * explicitly to make this work as intended. - */ - on_exit_reset(); - - /* - * Note we do exit(2) not exit(0). This is to force the postmaster into a - * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random + * We DO NOT want to run proc_exit() or atexit() callbacks -- we're here + * because shared memory may be corrupted, so we don't want to try to + * clean up our transaction. Just nail the windows shut and get out of + * town. The callbacks wouldn't be safe to run from a signal handler, + * anyway. + * + * Note we do _exit(2) not _exit(0). This is to force the postmaster into + * a system reset cycle if someone sends a manual SIGQUIT to a random * backend. This is necessary precisely because we don't clean up our * shared memory state. (The "dead man switch" mechanism in pmsignal.c * should ensure the postmaster sees this as a crash, too, but no harm in * being doubly sure.) */ - exit(2); + _exit(2); } /* SIGHUP: set flag to re-read config file at next convenient time */ diff --git a/src/backend/regex/regc_lex.c b/src/backend/regex/regc_lex.c index 2c6551ca74..38617b79fd 100644 --- a/src/backend/regex/regc_lex.c +++ b/src/backend/regex/regc_lex.c @@ -875,6 +875,7 @@ lexescape(struct vars *v) /* oops, doesn't look like it's a backref after all... */ v->now = save; /* and fall through into octal number */ + /* FALLTHROUGH */ case CHR('0'): NOTE(REG_UUNPORT); v->now--; /* put first digit back */ diff --git a/src/backend/regex/regc_pg_locale.c b/src/backend/regex/regc_pg_locale.c index 6982879688..acbed2eeed 100644 --- a/src/backend/regex/regc_pg_locale.c +++ b/src/backend/regex/regc_pg_locale.c @@ -6,7 +6,7 @@ * * This file is #included by regcomp.c; it's not meant to compile standalone. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -29,20 +29,20 @@ * * 2. In the "default" collation (which is supposed to obey LC_CTYPE): * - * 2a. When working in UTF8 encoding, we use the functions if - * available. This assumes that every platform uses Unicode codepoints - * directly as the wchar_t representation of Unicode. On some platforms + * 2a. When working in UTF8 encoding, we use the functions. + * This assumes that every platform uses Unicode codepoints directly + * as the wchar_t representation of Unicode. On some platforms * wchar_t is only 16 bits wide, so we have to punt for codepoints > 0xFFFF. * - * 2b. In all other encodings, or on machines that lack , we use - * the functions for pg_wchar values up to 255, and punt for values - * above that. This is only 100% correct in single-byte encodings such as - * LATINn. However, non-Unicode multibyte encodings are mostly Far Eastern - * character sets for which the properties being tested here aren't very - * relevant for higher code values anyway. The difficulty with using the - * functions with non-Unicode multibyte encodings is that we can - * have no certainty that the platform's wchar_t representation matches - * what we do in pg_wchar conversions. + * 2b. In all other encodings, we use the functions for pg_wchar + * values up to 255, and punt for values above that. This is 100% correct + * only in single-byte encodings such as LATINn. However, non-Unicode + * multibyte encodings are mostly Far Eastern character sets for which the + * properties being tested here aren't very relevant for higher code values + * anyway. The difficulty with using the functions with + * non-Unicode multibyte encodings is that we can have no certainty that + * the platform's wchar_t representation matches what we do in pg_wchar + * conversions. * * 3. Other collations are only supported on platforms that HAVE_LOCALE_T. * Here, we use the locale_t-extended forms of the and @@ -268,7 +268,6 @@ pg_set_regex_collation(Oid collation) pg_regex_strategy = PG_REGEX_LOCALE_ICU; else #endif -#ifdef USE_WIDE_UPPER_LOWER if (GetDatabaseEncoding() == PG_UTF8) { if (pg_regex_locale) @@ -277,7 +276,6 @@ pg_set_regex_collation(Oid collation) pg_regex_strategy = PG_REGEX_LOCALE_WIDE; } else -#endif /* USE_WIDE_UPPER_LOWER */ { if (pg_regex_locale) pg_regex_strategy = PG_REGEX_LOCALE_1BYTE_L; @@ -298,16 +296,14 @@ pg_wc_isdigit(pg_wchar c) return (c <= (pg_wchar) 127 && (pg_char_properties[c] & PG_ISDIGIT)); case PG_REGEX_LOCALE_WIDE: -#ifdef USE_WIDE_UPPER_LOWER if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswdigit((wint_t) c); -#endif /* FALL THRU */ case PG_REGEX_LOCALE_1BYTE: return (c <= (pg_wchar) UCHAR_MAX && isdigit((unsigned char) c)); case PG_REGEX_LOCALE_WIDE_L: -#if defined(HAVE_LOCALE_T) && defined(USE_WIDE_UPPER_LOWER) +#ifdef HAVE_LOCALE_T if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswdigit_l((wint_t) c, pg_regex_locale->info.lt); #endif @@ -336,16 +332,14 @@ pg_wc_isalpha(pg_wchar c) return (c <= (pg_wchar) 127 && (pg_char_properties[c] & PG_ISALPHA)); case PG_REGEX_LOCALE_WIDE: -#ifdef USE_WIDE_UPPER_LOWER if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswalpha((wint_t) c); -#endif /* FALL THRU */ case PG_REGEX_LOCALE_1BYTE: return (c <= (pg_wchar) UCHAR_MAX && isalpha((unsigned char) c)); case PG_REGEX_LOCALE_WIDE_L: -#if defined(HAVE_LOCALE_T) && defined(USE_WIDE_UPPER_LOWER) +#ifdef HAVE_LOCALE_T if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswalpha_l((wint_t) c, pg_regex_locale->info.lt); #endif @@ -374,16 +368,14 @@ pg_wc_isalnum(pg_wchar c) return (c <= (pg_wchar) 127 && (pg_char_properties[c] & PG_ISALNUM)); case PG_REGEX_LOCALE_WIDE: -#ifdef USE_WIDE_UPPER_LOWER if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswalnum((wint_t) c); -#endif /* FALL THRU */ case PG_REGEX_LOCALE_1BYTE: return (c <= (pg_wchar) UCHAR_MAX && isalnum((unsigned char) c)); case PG_REGEX_LOCALE_WIDE_L: -#if defined(HAVE_LOCALE_T) && defined(USE_WIDE_UPPER_LOWER) +#ifdef HAVE_LOCALE_T if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswalnum_l((wint_t) c, pg_regex_locale->info.lt); #endif @@ -412,16 +404,14 @@ pg_wc_isupper(pg_wchar c) return (c <= (pg_wchar) 127 && (pg_char_properties[c] & PG_ISUPPER)); case PG_REGEX_LOCALE_WIDE: -#ifdef USE_WIDE_UPPER_LOWER if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswupper((wint_t) c); -#endif /* FALL THRU */ case PG_REGEX_LOCALE_1BYTE: return (c <= (pg_wchar) UCHAR_MAX && isupper((unsigned char) c)); case PG_REGEX_LOCALE_WIDE_L: -#if defined(HAVE_LOCALE_T) && defined(USE_WIDE_UPPER_LOWER) +#ifdef HAVE_LOCALE_T if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswupper_l((wint_t) c, pg_regex_locale->info.lt); #endif @@ -450,16 +440,14 @@ pg_wc_islower(pg_wchar c) return (c <= (pg_wchar) 127 && (pg_char_properties[c] & PG_ISLOWER)); case PG_REGEX_LOCALE_WIDE: -#ifdef USE_WIDE_UPPER_LOWER if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswlower((wint_t) c); -#endif /* FALL THRU */ case PG_REGEX_LOCALE_1BYTE: return (c <= (pg_wchar) UCHAR_MAX && islower((unsigned char) c)); case PG_REGEX_LOCALE_WIDE_L: -#if defined(HAVE_LOCALE_T) && defined(USE_WIDE_UPPER_LOWER) +#ifdef HAVE_LOCALE_T if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswlower_l((wint_t) c, pg_regex_locale->info.lt); #endif @@ -488,16 +476,14 @@ pg_wc_isgraph(pg_wchar c) return (c <= (pg_wchar) 127 && (pg_char_properties[c] & PG_ISGRAPH)); case PG_REGEX_LOCALE_WIDE: -#ifdef USE_WIDE_UPPER_LOWER if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswgraph((wint_t) c); -#endif /* FALL THRU */ case PG_REGEX_LOCALE_1BYTE: return (c <= (pg_wchar) UCHAR_MAX && isgraph((unsigned char) c)); case PG_REGEX_LOCALE_WIDE_L: -#if defined(HAVE_LOCALE_T) && defined(USE_WIDE_UPPER_LOWER) +#ifdef HAVE_LOCALE_T if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswgraph_l((wint_t) c, pg_regex_locale->info.lt); #endif @@ -526,16 +512,14 @@ pg_wc_isprint(pg_wchar c) return (c <= (pg_wchar) 127 && (pg_char_properties[c] & PG_ISPRINT)); case PG_REGEX_LOCALE_WIDE: -#ifdef USE_WIDE_UPPER_LOWER if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswprint((wint_t) c); -#endif /* FALL THRU */ case PG_REGEX_LOCALE_1BYTE: return (c <= (pg_wchar) UCHAR_MAX && isprint((unsigned char) c)); case PG_REGEX_LOCALE_WIDE_L: -#if defined(HAVE_LOCALE_T) && defined(USE_WIDE_UPPER_LOWER) +#ifdef HAVE_LOCALE_T if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswprint_l((wint_t) c, pg_regex_locale->info.lt); #endif @@ -564,16 +548,14 @@ pg_wc_ispunct(pg_wchar c) return (c <= (pg_wchar) 127 && (pg_char_properties[c] & PG_ISPUNCT)); case PG_REGEX_LOCALE_WIDE: -#ifdef USE_WIDE_UPPER_LOWER if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswpunct((wint_t) c); -#endif /* FALL THRU */ case PG_REGEX_LOCALE_1BYTE: return (c <= (pg_wchar) UCHAR_MAX && ispunct((unsigned char) c)); case PG_REGEX_LOCALE_WIDE_L: -#if defined(HAVE_LOCALE_T) && defined(USE_WIDE_UPPER_LOWER) +#ifdef HAVE_LOCALE_T if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswpunct_l((wint_t) c, pg_regex_locale->info.lt); #endif @@ -602,16 +584,14 @@ pg_wc_isspace(pg_wchar c) return (c <= (pg_wchar) 127 && (pg_char_properties[c] & PG_ISSPACE)); case PG_REGEX_LOCALE_WIDE: -#ifdef USE_WIDE_UPPER_LOWER if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswspace((wint_t) c); -#endif /* FALL THRU */ case PG_REGEX_LOCALE_1BYTE: return (c <= (pg_wchar) UCHAR_MAX && isspace((unsigned char) c)); case PG_REGEX_LOCALE_WIDE_L: -#if defined(HAVE_LOCALE_T) && defined(USE_WIDE_UPPER_LOWER) +#ifdef HAVE_LOCALE_T if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return iswspace_l((wint_t) c, pg_regex_locale->info.lt); #endif @@ -644,10 +624,8 @@ pg_wc_toupper(pg_wchar c) /* force C behavior for ASCII characters, per comments above */ if (c <= (pg_wchar) 127) return pg_ascii_toupper((unsigned char) c); -#ifdef USE_WIDE_UPPER_LOWER if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return towupper((wint_t) c); -#endif /* FALL THRU */ case PG_REGEX_LOCALE_1BYTE: /* force C behavior for ASCII characters, per comments above */ @@ -657,7 +635,7 @@ pg_wc_toupper(pg_wchar c) return toupper((unsigned char) c); return c; case PG_REGEX_LOCALE_WIDE_L: -#if defined(HAVE_LOCALE_T) && defined(USE_WIDE_UPPER_LOWER) +#ifdef HAVE_LOCALE_T if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return towupper_l((wint_t) c, pg_regex_locale->info.lt); #endif @@ -690,10 +668,8 @@ pg_wc_tolower(pg_wchar c) /* force C behavior for ASCII characters, per comments above */ if (c <= (pg_wchar) 127) return pg_ascii_tolower((unsigned char) c); -#ifdef USE_WIDE_UPPER_LOWER if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return towlower((wint_t) c); -#endif /* FALL THRU */ case PG_REGEX_LOCALE_1BYTE: /* force C behavior for ASCII characters, per comments above */ @@ -703,7 +679,7 @@ pg_wc_tolower(pg_wchar c) return tolower((unsigned char) c); return c; case PG_REGEX_LOCALE_WIDE_L: -#if defined(HAVE_LOCALE_T) && defined(USE_WIDE_UPPER_LOWER) +#ifdef HAVE_LOCALE_T if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF) return towlower_l((wint_t) c, pg_regex_locale->info.lt); #endif diff --git a/src/backend/regex/regcomp.c b/src/backend/regex/regcomp.c index 51385509bb..eb1f3d57a8 100644 --- a/src/backend/regex/regcomp.c +++ b/src/backend/regex/regcomp.c @@ -909,7 +909,8 @@ parseqatom(struct vars *v, } /* legal in EREs due to specification botch */ NOTE(REG_UPBOTCH); - /* fallthrough into case PLAIN */ + /* fall through into case PLAIN */ + /* FALLTHROUGH */ case PLAIN: onechr(v, v->nextvalue, lp, rp); okcolors(v->nfa, v->cm); diff --git a/src/backend/regex/regexport.c b/src/backend/regex/regexport.c index febf2adaec..81b3bf1fc8 100644 --- a/src/backend/regex/regexport.c +++ b/src/backend/regex/regexport.c @@ -15,7 +15,7 @@ * allows the caller to decide how big is too big to bother with. * * - * Portions Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2013-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1998, 1999 Henry Spencer * * IDENTIFICATION diff --git a/src/backend/regex/regprefix.c b/src/backend/regex/regprefix.c index 96ca0e1ed3..6bf7c77f98 100644 --- a/src/backend/regex/regprefix.c +++ b/src/backend/regex/regprefix.c @@ -4,7 +4,7 @@ * Extract a common prefix, if any, from a compiled regex. * * - * Portions Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1998, 1999 Henry Spencer * * IDENTIFICATION diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c index 9776858f03..b20f6c379c 100644 --- a/src/backend/replication/basebackup.c +++ b/src/backend/replication/basebackup.c @@ -3,7 +3,7 @@ * basebackup.c * code for taking a base backup and streaming it to a standby * - * Portions Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2010-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/replication/basebackup.c @@ -17,8 +17,8 @@ #include #include "access/xlog_internal.h" /* for pg_start/stop_backup */ -#include "catalog/catalog.h" #include "catalog/pg_type.h" +#include "common/file_perm.h" #include "lib/stringinfo.h" #include "libpq/libpq.h" #include "libpq/pqformat.h" @@ -26,16 +26,20 @@ #include "nodes/pg_list.h" #include "pgtar.h" #include "pgstat.h" +#include "port.h" #include "postmaster/syslogger.h" #include "replication/basebackup.h" #include "replication/walsender.h" #include "replication/walsender_private.h" +#include "storage/bufpage.h" +#include "storage/checksum.h" #include "storage/dsm_impl.h" #include "storage/fd.h" #include "storage/ipc.h" +#include "storage/reinit.h" #include "utils/builtins.h" -#include "utils/elog.h" #include "utils/ps_status.h" +#include "utils/relcache.h" #include "utils/timestamp.h" @@ -51,9 +55,9 @@ typedef struct } basebackup_options; -static int64 sendDir(char *path, int basepathlen, bool sizeonly, +static int64 sendDir(const char *path, int basepathlen, bool sizeonly, List *tablespaces, bool sendtblspclinks); -static bool sendFile(char *readfilename, char *tarfilename, +static bool sendFile(const char *readfilename, const char *tarfilename, struct stat *statbuf, bool missing_ok); static void sendFileWithContent(const char *filename, const char *content); static int64 _tarWriteHeader(const char *filename, const char *linktarget, @@ -63,11 +67,12 @@ static int64 _tarWriteDir(const char *pathbuf, int basepathlen, struct stat *sta static void send_int8_string(StringInfoData *buf, int64 intval); static void SendBackupHeader(List *tablespaces); static void base_backup_cleanup(int code, Datum arg); -static void perform_base_backup(basebackup_options *opt, DIR *tblspcdir); +static void perform_base_backup(basebackup_options *opt); static void parse_basebackup_options(List *options, basebackup_options *opt); static void SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli); static int compareWalFileNames(const void *a, const void *b); static void throttle(size_t increment); +static bool is_checksummed_file(const char *fullpath, const char *filename); /* Was the backup currently in-progress initiated in recovery mode? */ static bool backup_started_in_recovery = false; @@ -97,10 +102,22 @@ static TimeOffset elapsed_min_unit; /* The last check of the transfer rate. */ static TimestampTz throttled_last; +/* The starting XLOG position of the base backup. */ +static XLogRecPtr startptr; + +/* Total number of checksum failures during base backup. */ +static int64 total_checksum_failures; + +/* Do not verify checksums. */ +static bool noverify_checksums = false; + /* * The contents of these directories are removed or recreated during server * start so they are not included in backups. The directories themselves are * kept and included as empty to preserve access permissions. + * + * Note: this list should be kept in sync with the filter lists in pg_rewind's + * filemap.c. */ static const char *excludeDirContents[] = { @@ -151,6 +168,9 @@ static const char *excludeFiles[] = /* Skip current log file temporary file */ LOG_METAINFO_DATAFILE_TMP, + /* Skip relation cache because it is rebuilt on startup */ + RELCACHE_INIT_FILENAME, + /* * If there's a backup_label or tablespace_map file, it belongs to a * backup started by the user with pg_start_backup(). It is *not* correct @@ -167,6 +187,18 @@ static const char *excludeFiles[] = NULL }; +/* + * List of files excluded from checksum validation. + */ +static const char *const noChecksumFiles[] = { + "pg_control", + "pg_filenode.map", + "pg_internal.init", + "PG_VERSION", + NULL, +}; + + /* * Called when ERROR or FATAL happens in perform_base_backup() after * we have started the backup - make sure we end it! @@ -184,9 +216,8 @@ base_backup_cleanup(int code, Datum arg) * clobbered by longjmp" from stupider versions of gcc. */ static void -perform_base_backup(basebackup_options *opt, DIR *tblspcdir) +perform_base_backup(basebackup_options *opt) { - XLogRecPtr startptr; TimeLineID starttli; XLogRecPtr endptr; TimeLineID endtli; @@ -202,15 +233,17 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) labelfile = makeStringInfo(); tblspc_map_file = makeStringInfo(); + total_checksum_failures = 0; + startptr = do_pg_start_backup(opt->label, opt->fastcheckpoint, &starttli, - labelfile, tblspcdir, &tablespaces, + labelfile, &tablespaces, tblspc_map_file, opt->progress, opt->sendtblspcmapfile); /* * Once do_pg_start_backup has been called, ensure that any failure causes * us to abort the backup so we don't "leak" a backup counter. For this - * reason, *all* functionality between do_pg_start_backup() and + * reason, *all* functionality between do_pg_start_backup() and the end of * do_pg_stop_backup() should be inside the error cleanup block! */ @@ -274,7 +307,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) /* Send CopyOutResponse message */ pq_beginmessage(&buf, 'H'); pq_sendbyte(&buf, 0); /* overall format */ - pq_sendint(&buf, 0, 2); /* natts */ + pq_sendint16(&buf, 0); /* natts */ pq_endmessage(&buf); if (ti->path == NULL) @@ -300,7 +333,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) if (lstat(XLOG_CONTROL_FILE, &statbuf) != 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not stat control file \"%s\": %m", + errmsg("could not stat file \"%s\": %m", XLOG_CONTROL_FILE))); sendFile(XLOG_CONTROL_FILE, XLOG_CONTROL_FILE, &statbuf, false); } @@ -320,10 +353,11 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) else pq_putemptymessage('c'); /* CopyDone */ } + + endptr = do_pg_stop_backup(labelfile->data, !opt->nowait, &endtli); } PG_END_ENSURE_ERROR_CLEANUP(base_backup_cleanup, (Datum) 0); - endptr = do_pg_stop_backup(labelfile->data, !opt->nowait, &endtli); if (opt->includewal) { @@ -357,15 +391,12 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) * shouldn't be such files, but if there are, there's little harm in * including them. */ - XLByteToSeg(startptr, startsegno); - XLogFileName(firstoff, ThisTimeLineID, startsegno); - XLByteToPrevSeg(endptr, endsegno); - XLogFileName(lastoff, ThisTimeLineID, endsegno); + XLByteToSeg(startptr, startsegno, wal_segment_size); + XLogFileName(firstoff, ThisTimeLineID, startsegno, wal_segment_size); + XLByteToPrevSeg(endptr, endsegno, wal_segment_size); + XLogFileName(lastoff, ThisTimeLineID, endsegno, wal_segment_size); dir = AllocateDir("pg_wal"); - if (!dir) - ereport(ERROR, - (errmsg("could not open directory \"%s\": %m", "pg_wal"))); while ((de = ReadDir(dir, "pg_wal")) != NULL) { /* Does it look like a WAL segment, and is it in the range? */ @@ -415,12 +446,13 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) * Sanity check: the first and last segment should cover startptr and * endptr, with no gaps in between. */ - XLogFromFileName(walFiles[0], &tli, &segno); + XLogFromFileName(walFiles[0], &tli, &segno, wal_segment_size); if (segno != startsegno) { char startfname[MAXFNAMELEN]; - XLogFileName(startfname, ThisTimeLineID, startsegno); + XLogFileName(startfname, ThisTimeLineID, startsegno, + wal_segment_size); ereport(ERROR, (errmsg("could not find WAL file \"%s\"", startfname))); } @@ -429,12 +461,13 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) XLogSegNo currsegno = segno; XLogSegNo nextsegno = segno + 1; - XLogFromFileName(walFiles[i], &tli, &segno); + XLogFromFileName(walFiles[i], &tli, &segno, wal_segment_size); if (!(nextsegno == segno || currsegno == segno)) { char nextfname[MAXFNAMELEN]; - XLogFileName(nextfname, ThisTimeLineID, nextsegno); + XLogFileName(nextfname, ThisTimeLineID, nextsegno, + wal_segment_size); ereport(ERROR, (errmsg("could not find WAL file \"%s\"", nextfname))); } @@ -443,7 +476,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) { char endfname[MAXFNAMELEN]; - XLogFileName(endfname, ThisTimeLineID, endsegno); + XLogFileName(endfname, ThisTimeLineID, endsegno, wal_segment_size); ereport(ERROR, (errmsg("could not find WAL file \"%s\"", endfname))); } @@ -457,11 +490,13 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) pgoff_t len = 0; snprintf(pathbuf, MAXPGPATH, XLOGDIR "/%s", walFiles[i]); - XLogFromFileName(walFiles[i], &tli, &segno); + XLogFromFileName(walFiles[i], &tli, &segno, wal_segment_size); fp = AllocateFile(pathbuf, "rb"); if (fp == NULL) { + int save_errno = errno; + /* * Most likely reason for this is that the file was already * removed by a checkpoint, so check for that to get a better @@ -469,6 +504,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) */ CheckXLogRemoved(segno, tli); + errno = save_errno; ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", pathbuf))); @@ -479,7 +515,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) (errcode_for_file_access(), errmsg("could not stat file \"%s\": %m", pathbuf))); - if (statbuf.st_size != XLogSegSize) + if (statbuf.st_size != wal_segment_size) { CheckXLogRemoved(segno, tli); ereport(ERROR, @@ -490,7 +526,9 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) /* send the WAL file itself */ _tarWriteHeader(pathbuf, NULL, &statbuf, false); - while ((cnt = fread(buf, 1, Min(sizeof(buf), XLogSegSize - len), fp)) > 0) + while ((cnt = fread(buf, 1, + Min(sizeof(buf), wal_segment_size - len), + fp)) > 0) { CheckXLogRemoved(segno, tli); /* Send the chunk as a CopyData message */ @@ -501,11 +539,11 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) len += cnt; throttle(cnt); - if (len == XLogSegSize) + if (len == wal_segment_size) break; } - if (len != XLogSegSize) + if (len != wal_segment_size) { CheckXLogRemoved(segno, tli); ereport(ERROR, @@ -513,7 +551,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) errmsg("unexpected WAL file size \"%s\"", walFiles[i]))); } - /* XLogSegSize is a multiple of 512, so no need for padding */ + /* wal_segment_size is a multiple of 512, so no need for padding */ FreeFile(fp); @@ -558,6 +596,23 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) pq_putemptymessage('c'); } SendXlogRecPtrResult(endptr, endtli); + + if (total_checksum_failures) + { + if (total_checksum_failures > 1) + { + char buf[64]; + + snprintf(buf, sizeof(buf), INT64_FORMAT, total_checksum_failures); + + ereport(WARNING, + (errmsg("%s total checksum verification failures", buf))); + } + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("checksum verification failure during base backup"))); + } + } /* @@ -587,6 +642,7 @@ parse_basebackup_options(List *options, basebackup_options *opt) bool o_wal = false; bool o_maxrate = false; bool o_tablespace_map = false; + bool o_noverify_checksums = false; MemSet(opt, 0, sizeof(*opt)); foreach(lopt, options) @@ -666,6 +722,15 @@ parse_basebackup_options(List *options, basebackup_options *opt) opt->sendtblspcmapfile = true; o_tablespace_map = true; } + else if (strcmp(defel->defname, "noverify_checksums") == 0) + { + if (o_noverify_checksums) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("duplicate option \"%s\"", defel->defname))); + noverify_checksums = true; + o_noverify_checksums = true; + } else elog(ERROR, "option \"%s\" not recognized", defel->defname); @@ -685,7 +750,6 @@ parse_basebackup_options(List *options, basebackup_options *opt) void SendBaseBackup(BaseBackupCmd *cmd) { - DIR *dir; basebackup_options opt; parse_basebackup_options(cmd->options, &opt); @@ -701,15 +765,7 @@ SendBaseBackup(BaseBackupCmd *cmd) set_ps_display(activitymsg, false); } - /* Make sure we can open the directory with tablespaces in it */ - dir = AllocateDir("pg_tblspc"); - if (!dir) - ereport(ERROR, - (errmsg("could not open directory \"%s\": %m", "pg_tblspc"))); - - perform_base_backup(&opt, dir); - - FreeDir(dir); + perform_base_backup(&opt); } static void @@ -718,7 +774,7 @@ send_int8_string(StringInfoData *buf, int64 intval) char is[32]; sprintf(is, INT64_FORMAT, intval); - pq_sendint(buf, strlen(is), 4); + pq_sendint32(buf, strlen(is)); pq_sendbytes(buf, is, strlen(is)); } @@ -730,34 +786,34 @@ SendBackupHeader(List *tablespaces) /* Construct and send the directory information */ pq_beginmessage(&buf, 'T'); /* RowDescription */ - pq_sendint(&buf, 3, 2); /* 3 fields */ + pq_sendint16(&buf, 3); /* 3 fields */ /* First field - spcoid */ pq_sendstring(&buf, "spcoid"); - pq_sendint(&buf, 0, 4); /* table oid */ - pq_sendint(&buf, 0, 2); /* attnum */ - pq_sendint(&buf, OIDOID, 4); /* type oid */ - pq_sendint(&buf, 4, 2); /* typlen */ - pq_sendint(&buf, 0, 4); /* typmod */ - pq_sendint(&buf, 0, 2); /* format code */ + pq_sendint32(&buf, 0); /* table oid */ + pq_sendint16(&buf, 0); /* attnum */ + pq_sendint32(&buf, OIDOID); /* type oid */ + pq_sendint16(&buf, 4); /* typlen */ + pq_sendint32(&buf, 0); /* typmod */ + pq_sendint16(&buf, 0); /* format code */ /* Second field - spcpath */ pq_sendstring(&buf, "spclocation"); - pq_sendint(&buf, 0, 4); - pq_sendint(&buf, 0, 2); - pq_sendint(&buf, TEXTOID, 4); - pq_sendint(&buf, -1, 2); - pq_sendint(&buf, 0, 4); - pq_sendint(&buf, 0, 2); + pq_sendint32(&buf, 0); + pq_sendint16(&buf, 0); + pq_sendint32(&buf, TEXTOID); + pq_sendint16(&buf, -1); + pq_sendint32(&buf, 0); + pq_sendint16(&buf, 0); /* Third field - size */ pq_sendstring(&buf, "size"); - pq_sendint(&buf, 0, 4); - pq_sendint(&buf, 0, 2); - pq_sendint(&buf, INT8OID, 4); - pq_sendint(&buf, 8, 2); - pq_sendint(&buf, 0, 4); - pq_sendint(&buf, 0, 2); + pq_sendint32(&buf, 0); + pq_sendint16(&buf, 0); + pq_sendint32(&buf, INT8OID); + pq_sendint16(&buf, 8); + pq_sendint32(&buf, 0); + pq_sendint16(&buf, 0); pq_endmessage(&buf); foreach(lc, tablespaces) @@ -766,28 +822,28 @@ SendBackupHeader(List *tablespaces) /* Send one datarow message */ pq_beginmessage(&buf, 'D'); - pq_sendint(&buf, 3, 2); /* number of columns */ + pq_sendint16(&buf, 3); /* number of columns */ if (ti->path == NULL) { - pq_sendint(&buf, -1, 4); /* Length = -1 ==> NULL */ - pq_sendint(&buf, -1, 4); + pq_sendint32(&buf, -1); /* Length = -1 ==> NULL */ + pq_sendint32(&buf, -1); } else { Size len; len = strlen(ti->oid); - pq_sendint(&buf, len, 4); + pq_sendint32(&buf, len); pq_sendbytes(&buf, ti->oid, len); len = strlen(ti->path); - pq_sendint(&buf, len, 4); + pq_sendint32(&buf, len); pq_sendbytes(&buf, ti->path, len); } if (ti->size >= 0) send_int8_string(&buf, ti->size / 1024); else - pq_sendint(&buf, -1, 4); /* NULL */ + pq_sendint32(&buf, -1); /* NULL */ pq_endmessage(&buf); } @@ -808,42 +864,42 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli) Size len; pq_beginmessage(&buf, 'T'); /* RowDescription */ - pq_sendint(&buf, 2, 2); /* 2 fields */ + pq_sendint16(&buf, 2); /* 2 fields */ /* Field headers */ pq_sendstring(&buf, "recptr"); - pq_sendint(&buf, 0, 4); /* table oid */ - pq_sendint(&buf, 0, 2); /* attnum */ - pq_sendint(&buf, TEXTOID, 4); /* type oid */ - pq_sendint(&buf, -1, 2); - pq_sendint(&buf, 0, 4); - pq_sendint(&buf, 0, 2); + pq_sendint32(&buf, 0); /* table oid */ + pq_sendint16(&buf, 0); /* attnum */ + pq_sendint32(&buf, TEXTOID); /* type oid */ + pq_sendint16(&buf, -1); + pq_sendint32(&buf, 0); + pq_sendint16(&buf, 0); pq_sendstring(&buf, "tli"); - pq_sendint(&buf, 0, 4); /* table oid */ - pq_sendint(&buf, 0, 2); /* attnum */ + pq_sendint32(&buf, 0); /* table oid */ + pq_sendint16(&buf, 0); /* attnum */ /* * int8 may seem like a surprising data type for this, but in theory int4 * would not be wide enough for this, as TimeLineID is unsigned. */ - pq_sendint(&buf, INT8OID, 4); /* type oid */ - pq_sendint(&buf, -1, 2); - pq_sendint(&buf, 0, 4); - pq_sendint(&buf, 0, 2); + pq_sendint32(&buf, INT8OID); /* type oid */ + pq_sendint16(&buf, -1); + pq_sendint32(&buf, 0); + pq_sendint16(&buf, 0); pq_endmessage(&buf); /* Data row */ pq_beginmessage(&buf, 'D'); - pq_sendint(&buf, 2, 2); /* number of columns */ + pq_sendint16(&buf, 2); /* number of columns */ len = snprintf(str, sizeof(str), "%X/%X", (uint32) (ptr >> 32), (uint32) ptr); - pq_sendint(&buf, len, 4); + pq_sendint32(&buf, len); pq_sendbytes(&buf, str, len); len = snprintf(str, sizeof(str), "%u", tli); - pq_sendint(&buf, len, 4); + pq_sendint32(&buf, len); pq_sendbytes(&buf, str, len); pq_endmessage(&buf); @@ -877,7 +933,7 @@ sendFileWithContent(const char *filename, const char *content) statbuf.st_gid = getegid(); #endif statbuf.st_mtime = time(NULL); - statbuf.st_mode = S_IRUSR | S_IWUSR; + statbuf.st_mode = pg_file_create_mode; statbuf.st_size = len; _tarWriteHeader(filename, NULL, &statbuf, false); @@ -954,7 +1010,7 @@ sendTablespace(char *path, bool sizeonly) * as it will be sent separately in the tablespace_map file. */ static int64 -sendDir(char *path, int basepathlen, bool sizeonly, List *tablespaces, +sendDir(const char *path, int basepathlen, bool sizeonly, List *tablespaces, bool sendtblspclinks) { DIR *dir; @@ -962,12 +1018,44 @@ sendDir(char *path, int basepathlen, bool sizeonly, List *tablespaces, char pathbuf[MAXPGPATH * 2]; struct stat statbuf; int64 size = 0; + const char *lastDir; /* Split last dir from parent path. */ + bool isDbDir = false; /* Does this directory contain relations? */ + + /* + * Determine if the current path is a database directory that can contain + * relations. + * + * Start by finding the location of the delimiter between the parent path + * and the current path. + */ + lastDir = last_dir_separator(path); + + /* Does this path look like a database path (i.e. all digits)? */ + if (lastDir != NULL && + strspn(lastDir + 1, "0123456789") == strlen(lastDir + 1)) + { + /* Part of path that contains the parent directory. */ + int parentPathLen = lastDir - path; + + /* + * Mark path as a database directory if the parent path is either + * $PGDATA/base or a tablespace version path. + */ + if (strncmp(path, "./base", parentPathLen) == 0 || + (parentPathLen >= (sizeof(TABLESPACE_VERSION_DIRECTORY) - 1) && + strncmp(lastDir - (sizeof(TABLESPACE_VERSION_DIRECTORY) - 1), + TABLESPACE_VERSION_DIRECTORY, + sizeof(TABLESPACE_VERSION_DIRECTORY) - 1) == 0)) + isDbDir = true; + } dir = AllocateDir(path); while ((de = ReadDir(dir, path)) != NULL) { int excludeIdx; bool excludeFound; + ForkNumber relForkNum; /* Type of fork if file is a relation */ + int relOidChars; /* Chars in filename that are the rel oid */ /* Skip special stuff */ if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) @@ -1011,6 +1099,47 @@ sendDir(char *path, int basepathlen, bool sizeonly, List *tablespaces, if (excludeFound) continue; + /* Exclude all forks for unlogged tables except the init fork */ + if (isDbDir && + parse_filename_for_nontemp_relation(de->d_name, &relOidChars, + &relForkNum)) + { + /* Never exclude init forks */ + if (relForkNum != INIT_FORKNUM) + { + char initForkFile[MAXPGPATH]; + char relOid[OIDCHARS + 1]; + + /* + * If any other type of fork, check if there is an init fork + * with the same OID. If so, the file can be excluded. + */ + memcpy(relOid, de->d_name, relOidChars); + relOid[relOidChars] = '\0'; + snprintf(initForkFile, sizeof(initForkFile), "%s/%s_init", + path, relOid); + + if (lstat(initForkFile, &statbuf) == 0) + { + elog(DEBUG2, + "unlogged relation file \"%s\" excluded from backup", + de->d_name); + + continue; + } + } + } + + /* Exclude temporary relations */ + if (isDbDir && looks_like_temp_rel_name(de->d_name)) + { + elog(DEBUG2, + "temporary relation file \"%s\" excluded from backup", + de->d_name); + + continue; + } + snprintf(pathbuf, sizeof(pathbuf), "%s/%s", path, de->d_name); /* Skip pg_control here to back up it last */ @@ -1183,6 +1312,33 @@ sendDir(char *path, int basepathlen, bool sizeonly, List *tablespaces, return size; } +/* + * Check if a file should have its checksum validated. + * We validate checksums on files in regular tablespaces + * (including global and default) only, and in those there + * are some files that are explicitly excluded. + */ +static bool +is_checksummed_file(const char *fullpath, const char *filename) +{ + const char *const *f; + + /* Check that the file is in a tablespace */ + if (strncmp(fullpath, "./global/", 9) == 0 || + strncmp(fullpath, "./base/", 7) == 0 || + strncmp(fullpath, "/", 1) == 0) + { + /* Compare file against noChecksumFiles skiplist */ + for (f = noChecksumFiles; *f; f++) + if (strcmp(*f, filename) == 0) + return false; + + return true; + } + else + return false; +} + /***** * Functions for handling tar file format * @@ -1199,14 +1355,24 @@ sendDir(char *path, int basepathlen, bool sizeonly, List *tablespaces, * and the file did not exist. */ static bool -sendFile(char *readfilename, char *tarfilename, struct stat *statbuf, +sendFile(const char *readfilename, const char *tarfilename, struct stat *statbuf, bool missing_ok) { FILE *fp; + BlockNumber blkno = 0; + bool block_retry = false; char buf[TAR_SEND_SIZE]; - size_t cnt; + uint16 checksum; + int checksum_failures = 0; + off_t cnt; + int i; pgoff_t len = 0; + char *page; size_t pad; + PageHeader phdr; + int segmentno = 0; + char *segmentpath; + bool verify_checksum = false; fp = AllocateFile(readfilename, "rb"); if (fp == NULL) @@ -1220,8 +1386,143 @@ sendFile(char *readfilename, char *tarfilename, struct stat *statbuf, _tarWriteHeader(tarfilename, NULL, statbuf, false); + if (!noverify_checksums && DataChecksumsEnabled()) + { + char *filename; + + /* + * Get the filename (excluding path). As last_dir_separator() + * includes the last directory separator, we chop that off by + * incrementing the pointer. + */ + filename = last_dir_separator(readfilename) + 1; + + if (is_checksummed_file(readfilename, filename)) + { + verify_checksum = true; + + /* + * Cut off at the segment boundary (".") to get the segment number + * in order to mix it into the checksum. + */ + segmentpath = strstr(filename, "."); + if (segmentpath != NULL) + { + segmentno = atoi(segmentpath + 1); + if (segmentno == 0) + ereport(ERROR, + (errmsg("invalid segment number %d in file \"%s\"", + segmentno, filename))); + } + } + } + while ((cnt = fread(buf, 1, Min(sizeof(buf), statbuf->st_size - len), fp)) > 0) { + /* + * The checksums are verified at block level, so we iterate over the + * buffer in chunks of BLCKSZ, after making sure that + * TAR_SEND_SIZE/buf is divisible by BLCKSZ and we read a multiple of + * BLCKSZ bytes. + */ + Assert(TAR_SEND_SIZE % BLCKSZ == 0); + + if (verify_checksum && (cnt % BLCKSZ != 0)) + { + ereport(WARNING, + (errmsg("cannot verify checksum in file \"%s\", block " + "%d: read buffer size %d and page size %d " + "differ", + readfilename, blkno, (int) cnt, BLCKSZ))); + verify_checksum = false; + } + + if (verify_checksum) + { + for (i = 0; i < cnt / BLCKSZ; i++) + { + page = buf + BLCKSZ * i; + + /* + * Only check pages which have not been modified since the + * start of the base backup. Otherwise, they might have been + * written only halfway and the checksum would not be valid. + * However, replaying WAL would reinstate the correct page in + * this case. We also skip completely new pages, since they + * don't have a checksum yet. + */ + if (!PageIsNew(page) && PageGetLSN(page) < startptr) + { + checksum = pg_checksum_page((char *) page, blkno + segmentno * RELSEG_SIZE); + phdr = (PageHeader) page; + if (phdr->pd_checksum != checksum) + { + /* + * Retry the block on the first failure. It's + * possible that we read the first 4K page of the + * block just before postgres updated the entire block + * so it ends up looking torn to us. We only need to + * retry once because the LSN should be updated to + * something we can ignore on the next pass. If the + * error happens again then it is a true validation + * failure. + */ + if (block_retry == false) + { + /* Reread the failed block */ + if (fseek(fp, -(cnt - BLCKSZ * i), SEEK_CUR) == -1) + { + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not fseek in file \"%s\": %m", + readfilename))); + } + + if (fread(buf + BLCKSZ * i, 1, BLCKSZ, fp) != BLCKSZ) + { + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not reread block %d of file \"%s\": %m", + blkno, readfilename))); + } + + if (fseek(fp, cnt - BLCKSZ * i - BLCKSZ, SEEK_CUR) == -1) + { + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not fseek in file \"%s\": %m", + readfilename))); + } + + /* Set flag so we know a retry was attempted */ + block_retry = true; + + /* Reset loop to validate the block again */ + i--; + continue; + } + + checksum_failures++; + + if (checksum_failures <= 5) + ereport(WARNING, + (errmsg("checksum verification failed in " + "file \"%s\", block %d: calculated " + "%X but expected %X", + readfilename, blkno, checksum, + phdr->pd_checksum))); + if (checksum_failures == 5) + ereport(WARNING, + (errmsg("further checksum verification " + "failures in file \"%s\" will not " + "be reported", readfilename))); + } + } + block_retry = false; + blkno++; + } + } + /* Send the chunk as a CopyData message */ if (pq_putmessage('d', buf, cnt)) ereport(ERROR, @@ -1267,6 +1568,14 @@ sendFile(char *readfilename, char *tarfilename, struct stat *statbuf, FreeFile(fp); + if (checksum_failures > 1) + { + ereport(WARNING, + (errmsg("file \"%s\" has a total of %d checksum verification " + "failures", readfilename, checksum_failures))); + } + total_checksum_failures += checksum_failures; + return true; } @@ -1323,7 +1632,7 @@ _tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf, #else if (pgwin32_is_junction(pathbuf)) #endif - statbuf->st_mode = S_IFDIR | S_IRWXU; + statbuf->st_mode = S_IFDIR | pg_dir_create_mode; return _tarWriteHeader(pathbuf + basepathlen + 1, NULL, statbuf, sizeonly); } @@ -1336,10 +1645,7 @@ _tarWriteDir(const char *pathbuf, int basepathlen, struct stat *statbuf, static void throttle(size_t increment) { - TimeOffset elapsed, - elapsed_min, - sleep; - int wait_result; + TimeOffset elapsed_min; if (throttling_counter < 0) return; @@ -1348,14 +1654,28 @@ throttle(size_t increment) if (throttling_counter < throttling_sample) return; - /* Time elapsed since the last measurement (and possible wake up). */ - elapsed = GetCurrentTimestamp() - throttled_last; - /* How much should have elapsed at minimum? */ - elapsed_min = elapsed_min_unit * (throttling_counter / throttling_sample); - sleep = elapsed_min - elapsed; - /* Only sleep if the transfer is faster than it should be. */ - if (sleep > 0) + /* How much time should have elapsed at minimum? */ + elapsed_min = elapsed_min_unit * + (throttling_counter / throttling_sample); + + /* + * Since the latch could be set repeatedly because of concurrently WAL + * activity, sleep in a loop to ensure enough time has passed. + */ + for (;;) { + TimeOffset elapsed, + sleep; + int wait_result; + + /* Time elapsed since the last measurement (and possible wake up). */ + elapsed = GetCurrentTimestamp() - throttled_last; + + /* sleep if the transfer is faster than it should be */ + sleep = elapsed_min - elapsed; + if (sleep <= 0) + break; + ResetLatch(MyLatch); /* We're eating a potentially set latch, so check for interrupts */ @@ -1372,6 +1692,10 @@ throttle(size_t increment) if (wait_result & WL_LATCH_SET) CHECK_FOR_INTERRUPTS(); + + /* Done waiting? */ + if (wait_result & WL_TIMEOUT) + break; } /* diff --git a/src/backend/replication/libpqwalreceiver/Makefile b/src/backend/replication/libpqwalreceiver/Makefile index a7a5fe1ed2..75b0e2b49f 100644 --- a/src/backend/replication/libpqwalreceiver/Makefile +++ b/src/backend/replication/libpqwalreceiver/Makefile @@ -15,7 +15,8 @@ include $(top_builddir)/src/Makefile.global override CPPFLAGS := -I$(srcdir) -I$(libpq_srcdir) $(CPPFLAGS) OBJS = libpqwalreceiver.o $(WIN32RES) -SHLIB_LINK = $(libpq) $(filter -lintl, $(LIBS)) +SHLIB_LINK_INTERNAL = $(libpq) +SHLIB_LINK = $(filter -lintl, $(LIBS)) SHLIB_PREREQS = submake-libpq PGFILEDESC = "libpqwalreceiver - receive WAL during streaming replication" NAME = libpqwalreceiver diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c index 3957bd37fb..1e1695ef4f 100644 --- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c +++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c @@ -6,7 +6,7 @@ * loaded as a dynamic module to avoid linking the main server binary with * libpq. * - * Portions Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2010-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -53,6 +53,8 @@ static WalReceiverConn *libpqrcv_connect(const char *conninfo, char **err); static void libpqrcv_check_conninfo(const char *conninfo); static char *libpqrcv_get_conninfo(WalReceiverConn *conn); +static void libpqrcv_get_senderinfo(WalReceiverConn *conn, + char **sender_host, int *sender_port); static char *libpqrcv_identify_system(WalReceiverConn *conn, TimeLineID *primary_tli, int *server_version); @@ -82,6 +84,7 @@ static WalReceiverFunctionsType PQWalReceiverFunctions = { libpqrcv_connect, libpqrcv_check_conninfo, libpqrcv_get_conninfo, + libpqrcv_get_senderinfo, libpqrcv_identify_system, libpqrcv_readtimelinehistoryfile, libpqrcv_startstreaming, @@ -125,10 +128,7 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname, /* * We use the expand_dbname parameter to process the connection string (or - * URI), and pass some extra options. The deliberately undocumented - * parameter "replication=true" makes it a replication connection. The - * database name is ignored by the server in replication mode, but specify - * "replication" for .pgpass lookup. + * URI), and pass some extra options. */ keys[i] = "dbname"; vals[i] = conninfo; @@ -136,6 +136,10 @@ libpqrcv_connect(const char *conninfo, bool logical, const char *appname, vals[i] = logical ? "database" : "true"; if (!logical) { + /* + * The database name is ignored by the server in replication mode, but + * specify "replication" for .pgpass lookup. + */ keys[++i] = "dbname"; vals[i] = "replication"; } @@ -282,6 +286,29 @@ libpqrcv_get_conninfo(WalReceiverConn *conn) return retval; } +/* + * Provides information of sender this WAL receiver is connected to. + */ +static void +libpqrcv_get_senderinfo(WalReceiverConn *conn, char **sender_host, + int *sender_port) +{ + char *ret = NULL; + + *sender_host = NULL; + *sender_port = 0; + + Assert(conn->streamConn != NULL); + + ret = PQhost(conn->streamConn); + if (ret && strlen(ret) != 0) + *sender_host = pstrdup(ret); + + ret = PQport(conn->streamConn); + if (ret && strlen(ret) != 0) + *sender_port = atoi(ret); +} + /* * Check that primary's system identifier matches ours, and fetch the current * timeline ID of the primary. @@ -318,7 +345,7 @@ libpqrcv_identify_system(WalReceiverConn *conn, TimeLineID *primary_tli, ntuples, nfields, 3, 1))); } primary_sysid = pstrdup(PQgetvalue(res, 0, 0)); - *primary_tli = pg_atoi(PQgetvalue(res, 0, 1), 4, 0); + *primary_tli = pg_strtoint32(PQgetvalue(res, 0, 1)); PQclear(res); *server_version = PQserverVersion(conn->streamConn); @@ -453,7 +480,7 @@ libpqrcv_endstreaming(WalReceiverConn *conn, TimeLineID *next_tli) if (PQnfields(res) < 2 || PQntuples(res) != 1) ereport(ERROR, (errmsg("unexpected result set after end-of-streaming"))); - *next_tli = pg_atoi(PQgetvalue(res, 0, 0), sizeof(uint32), 0); + *next_tli = pg_strtoint32(PQgetvalue(res, 0, 0)); PQclear(res); /* the result set should be followed by CommandComplete */ diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index 486fd0c988..afb497227e 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -16,7 +16,7 @@ * contents of records in here except turning them into a more usable * format. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -65,6 +65,7 @@ static void DecodeLogicalMsgOp(LogicalDecodingContext *ctx, XLogRecordBuffer *bu static void DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); static void DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); static void DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); +static void DecodeTruncate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); static void DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); static void DecodeSpecConfirm(LogicalDecodingContext *ctx, XLogRecordBuffer *buf); @@ -88,6 +89,9 @@ static void DecodeXLogTuple(char *data, Size len, ReorderBufferTupleBuf *tup); * call ReorderBufferProcessXid for each record type by default, because * e.g. empty xacts can be handled more efficiently if there's no previous * state for them. + * + * We also support the ability to fast forward thru records, skipping some + * record types completely - see individual record types for details. */ void LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogReaderState *record) @@ -332,8 +336,10 @@ DecodeStandbyOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) xl_invalidations *invalidations = (xl_invalidations *) XLogRecGetData(r); - ReorderBufferImmediateInvalidation( - ctx->reorder, invalidations->nmsgs, invalidations->msgs); + if (!ctx->fast_forward) + ReorderBufferImmediateInvalidation(ctx->reorder, + invalidations->nmsgs, + invalidations->msgs); } break; default: @@ -353,14 +359,19 @@ DecodeHeap2Op(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) ReorderBufferProcessXid(ctx->reorder, xid, buf->origptr); - /* no point in doing anything yet */ - if (SnapBuildCurrentState(builder) < SNAPBUILD_FULL_SNAPSHOT) + /* + * If we don't have snapshot or we are just fast-forwarding, there is no + * point in decoding changes. + */ + if (SnapBuildCurrentState(builder) < SNAPBUILD_FULL_SNAPSHOT || + ctx->fast_forward) return; switch (info) { case XLOG_HEAP2_MULTI_INSERT: - if (SnapBuildProcessChange(builder, xid, buf->origptr)) + if (!ctx->fast_forward && + SnapBuildProcessChange(builder, xid, buf->origptr)) DecodeMultiInsert(ctx, buf); break; case XLOG_HEAP2_NEW_CID: @@ -408,8 +419,12 @@ DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) ReorderBufferProcessXid(ctx->reorder, xid, buf->origptr); - /* no point in doing anything yet */ - if (SnapBuildCurrentState(builder) < SNAPBUILD_FULL_SNAPSHOT) + /* + * If we don't have snapshot or we are just fast-forwarding, there is no + * point in decoding data changes. + */ + if (SnapBuildCurrentState(builder) < SNAPBUILD_FULL_SNAPSHOT || + ctx->fast_forward) return; switch (info) @@ -435,6 +450,11 @@ DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) DecodeDelete(ctx, buf); break; + case XLOG_HEAP_TRUNCATE: + if (SnapBuildProcessChange(builder, xid, buf->origptr)) + DecodeTruncate(ctx, buf); + break; + case XLOG_HEAP_INPLACE: /* @@ -501,8 +521,12 @@ DecodeLogicalMsgOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) ReorderBufferProcessXid(ctx->reorder, XLogRecGetXid(r), buf->origptr); - /* No point in doing anything yet. */ - if (SnapBuildCurrentState(builder) < SNAPBUILD_FULL_SNAPSHOT) + /* + * If we don't have snapshot or we are just fast-forwarding, there is no + * point in decoding messages. + */ + if (SnapBuildCurrentState(builder) < SNAPBUILD_FULL_SNAPSHOT || + ctx->fast_forward) return; message = (xl_logical_message *) XLogRecGetData(r); @@ -554,8 +578,9 @@ DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, */ if (parsed->nmsgs > 0) { - ReorderBufferAddInvalidations(ctx->reorder, xid, buf->origptr, - parsed->nmsgs, parsed->msgs); + if (!ctx->fast_forward) + ReorderBufferAddInvalidations(ctx->reorder, xid, buf->origptr, + parsed->nmsgs, parsed->msgs); ReorderBufferXidSetCatalogChanges(ctx->reorder, xid, buf->origptr); } @@ -574,6 +599,7 @@ DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, * are restarting or if we haven't assembled a consistent snapshot yet. * 2) The transaction happened in another database. * 3) The output plugin is not interested in the origin. + * 4) We are doing fast-forwarding * * We can't just use ReorderBufferAbort() here, because we need to execute * the transaction's invalidations. This currently won't be needed if @@ -589,7 +615,7 @@ DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, */ if (SnapBuildXactNeedsSkip(ctx->snapshot_builder, buf->origptr) || (parsed->dbId != InvalidOid && parsed->dbId != ctx->slot->data.database) || - FilterByOrigin(ctx, origin_id)) + ctx->fast_forward || FilterByOrigin(ctx, origin_id)) { for (i = 0; i < parsed->nsubxacts; i++) { @@ -805,6 +831,42 @@ DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) ReorderBufferQueueChange(ctx->reorder, XLogRecGetXid(r), buf->origptr, change); } +/* + * Parse XLOG_HEAP_TRUNCATE from wal + */ +static void +DecodeTruncate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) +{ + XLogReaderState *r = buf->record; + xl_heap_truncate *xlrec; + ReorderBufferChange *change; + + xlrec = (xl_heap_truncate *) XLogRecGetData(r); + + /* only interested in our database */ + if (xlrec->dbId != ctx->slot->data.database) + return; + + /* output plugin doesn't look for this origin, no need to queue */ + if (FilterByOrigin(ctx, XLogRecGetOrigin(r))) + return; + + change = ReorderBufferGetChange(ctx->reorder); + change->action = REORDER_BUFFER_CHANGE_TRUNCATE; + change->origin_id = XLogRecGetOrigin(r); + if (xlrec->flags & XLH_TRUNCATE_CASCADE) + change->data.truncate.cascade = true; + if (xlrec->flags & XLH_TRUNCATE_RESTART_SEQS) + change->data.truncate.restart_seqs = true; + change->data.truncate.nrelids = xlrec->nrelids; + change->data.truncate.relids = ReorderBufferGetRelids(ctx->reorder, + xlrec->nrelids); + memcpy(change->data.truncate.relids, xlrec->relids, + xlrec->nrelids * sizeof(Oid)); + ReorderBufferQueueChange(ctx->reorder, XLogRecGetXid(r), + buf->origptr, change); +} + /* * Decode XLOG_HEAP2_MULTI_INSERT_insert record into multiple tuplebufs. * diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c index 6c894421a3..ada16adb67 100644 --- a/src/backend/replication/logical/launcher.c +++ b/src/backend/replication/logical/launcher.c @@ -2,7 +2,7 @@ * launcher.c * PostgreSQL logical replication worker launcher process * - * Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/replication/logical/launcher.c @@ -79,7 +79,19 @@ typedef struct LogicalRepWorkerId Oid relid; } LogicalRepWorkerId; -static List *on_commit_stop_workers = NIL; +typedef struct StopWorkersData +{ + int nestDepth; /* Sub-transaction nest level */ + List *workers; /* List of LogicalRepWorkerId */ + struct StopWorkersData *parent; /* This need not be an immediate + * subtransaction parent */ +} StopWorkersData; + +/* + * Stack of StopWorkersData elements. Each stack element contains the workers + * to be stopped for that subtransaction. + */ +static StopWorkersData *on_commit_stop_workers = NULL; static void ApplyLauncherWakeup(void); static void logicalrep_launcher_onexit(int code, Datum arg); @@ -168,14 +180,11 @@ get_subscription_list(void) */ static void WaitForReplicationWorkerAttach(LogicalRepWorker *worker, + uint16 generation, BackgroundWorkerHandle *handle) { BgwHandleStatus status; int rc; - uint16 generation; - - /* Remember generation for future identification. */ - generation = worker->generation; for (;;) { @@ -282,7 +291,7 @@ logicalrep_workers_find(Oid subid, bool only_running) } /* - * Start new apply background worker. + * Start new apply background worker, if possible. */ void logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid, @@ -290,6 +299,7 @@ logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid, { BackgroundWorker bgw; BackgroundWorkerHandle *bgw_handle; + uint16 generation; int i; int slot = 0; LogicalRepWorker *worker = NULL; @@ -406,6 +416,9 @@ logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid, worker->reply_lsn = InvalidXLogRecPtr; TIMESTAMP_NOBEGIN(worker->reply_time); + /* Before releasing lock, remember generation for future identification. */ + generation = worker->generation; + LWLockRelease(LogicalRepWorkerLock); /* Register the new dynamic worker. */ @@ -421,6 +434,7 @@ logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid, else snprintf(bgw.bgw_name, BGW_MAXLEN, "logical replication worker for subscription %u", subid); + snprintf(bgw.bgw_type, BGW_MAXLEN, "logical replication worker"); bgw.bgw_restart_time = BGW_NEVER_RESTART; bgw.bgw_notify_pid = MyProcPid; @@ -428,6 +442,12 @@ logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid, if (!RegisterDynamicBackgroundWorker(&bgw, &bgw_handle)) { + /* Failed to start worker, so clean up the worker slot. */ + LWLockAcquire(LogicalRepWorkerLock, LW_EXCLUSIVE); + Assert(generation == worker->generation); + logicalrep_worker_cleanup(worker); + LWLockRelease(LogicalRepWorkerLock); + ereport(WARNING, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), errmsg("out of background worker slots"), @@ -436,7 +456,7 @@ logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid, } /* Now wait until it attaches. */ - WaitForReplicationWorkerAttach(worker, bgw_handle); + WaitForReplicationWorkerAttach(worker, generation, bgw_handle); } /* @@ -551,17 +571,41 @@ logicalrep_worker_stop(Oid subid, Oid relid) void logicalrep_worker_stop_at_commit(Oid subid, Oid relid) { + int nestDepth = GetCurrentTransactionNestLevel(); LogicalRepWorkerId *wid; MemoryContext oldctx; /* Make sure we store the info in context that survives until commit. */ oldctx = MemoryContextSwitchTo(TopTransactionContext); + /* Check that previous transactions were properly cleaned up. */ + Assert(on_commit_stop_workers == NULL || + nestDepth >= on_commit_stop_workers->nestDepth); + + /* + * Push a new stack element if we don't already have one for the current + * nestDepth. + */ + if (on_commit_stop_workers == NULL || + nestDepth > on_commit_stop_workers->nestDepth) + { + StopWorkersData *newdata = palloc(sizeof(StopWorkersData)); + + newdata->nestDepth = nestDepth; + newdata->workers = NIL; + newdata->parent = on_commit_stop_workers; + on_commit_stop_workers = newdata; + } + + /* + * Finally add a new worker into the worker list of the current + * subtransaction. + */ wid = palloc(sizeof(LogicalRepWorkerId)); wid->subid = subid; wid->relid = relid; - - on_commit_stop_workers = lappend(on_commit_stop_workers, wid); + on_commit_stop_workers->workers = + lappend(on_commit_stop_workers->workers, wid); MemoryContextSwitchTo(oldctx); } @@ -768,6 +812,8 @@ ApplyLauncherRegister(void) snprintf(bgw.bgw_function_name, BGW_MAXLEN, "ApplyLauncherMain"); snprintf(bgw.bgw_name, BGW_MAXLEN, "logical replication launcher"); + snprintf(bgw.bgw_type, BGW_MAXLEN, + "logical replication launcher"); bgw.bgw_restart_time = 5; bgw.bgw_notify_pid = 0; bgw.bgw_main_arg = (Datum) 0; @@ -813,7 +859,7 @@ ApplyLauncherShmemInit(void) bool XactManipulatesLogicalReplicationWorkers(void) { - return (on_commit_stop_workers != NIL); + return (on_commit_stop_workers != NULL); } /* @@ -822,15 +868,25 @@ XactManipulatesLogicalReplicationWorkers(void) void AtEOXact_ApplyLauncher(bool isCommit) { + + Assert(on_commit_stop_workers == NULL || + (on_commit_stop_workers->nestDepth == 1 && + on_commit_stop_workers->parent == NULL)); + if (isCommit) { ListCell *lc; - foreach(lc, on_commit_stop_workers) + if (on_commit_stop_workers != NULL) { - LogicalRepWorkerId *wid = lfirst(lc); + List *workers = on_commit_stop_workers->workers; - logicalrep_worker_stop(wid->subid, wid->relid); + foreach(lc, workers) + { + LogicalRepWorkerId *wid = lfirst(lc); + + logicalrep_worker_stop(wid->subid, wid->relid); + } } if (on_commit_launcher_wakeup) @@ -841,10 +897,64 @@ AtEOXact_ApplyLauncher(bool isCommit) * No need to pfree on_commit_stop_workers. It was allocated in * transaction memory context, which is going to be cleaned soon. */ - on_commit_stop_workers = NIL; + on_commit_stop_workers = NULL; on_commit_launcher_wakeup = false; } +/* + * On commit, merge the current on_commit_stop_workers list into the + * immediate parent, if present. + * On rollback, discard the current on_commit_stop_workers list. + * Pop out the stack. + */ +void +AtEOSubXact_ApplyLauncher(bool isCommit, int nestDepth) +{ + StopWorkersData *parent; + + /* Exit immediately if there's no work to do at this level. */ + if (on_commit_stop_workers == NULL || + on_commit_stop_workers->nestDepth < nestDepth) + return; + + Assert(on_commit_stop_workers->nestDepth == nestDepth); + + parent = on_commit_stop_workers->parent; + + if (isCommit) + { + /* + * If the upper stack element is not an immediate parent + * subtransaction, just decrement the notional nesting depth without + * doing any real work. Else, we need to merge the current workers + * list into the parent. + */ + if (!parent || parent->nestDepth < nestDepth - 1) + { + on_commit_stop_workers->nestDepth--; + return; + } + + parent->workers = + list_concat(parent->workers, on_commit_stop_workers->workers); + } + else + { + /* + * Abandon everything that was done at this nesting level. Explicitly + * free memory to avoid a transaction-lifespan leak. + */ + list_free_deep(on_commit_stop_workers->workers); + } + + /* + * We have taken care of the current subtransaction workers list for both + * abort or commit. So we are ready to pop the stack. + */ + pfree(on_commit_stop_workers); + on_commit_stop_workers = parent; +} + /* * Request wakeup of the launcher on commit of the transaction. * @@ -891,7 +1001,7 @@ ApplyLauncherMain(Datum main_arg) * Establish connection to nailed catalogs (we only ever access * pg_subscription). */ - BackgroundWorkerInitializeConnection(NULL, NULL); + BackgroundWorkerInitializeConnection(NULL, NULL, 0); /* Enter main loop */ for (;;) @@ -915,9 +1025,7 @@ ApplyLauncherMain(Datum main_arg) /* Use temporary context for the database list and worker info. */ subctx = AllocSetContextCreate(TopMemoryContext, "Logical Replication Launcher sublist", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldctx = MemoryContextSwitchTo(subctx); /* search for subscriptions to start or stop. */ diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index efb9785f25..9f99e4f049 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -2,7 +2,7 @@ * logical.c * PostgreSQL logical decoding coordination * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/replication/logical/logical.c @@ -62,6 +62,8 @@ static void commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, XLogRecPtr commit_lsn); static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, Relation relation, ReorderBufferChange *change); +static void truncate_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, + int nrelations, Relation relations[], ReorderBufferChange *change); static void message_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, XLogRecPtr message_lsn, bool transactional, const char *prefix, Size message_size, const char *message); @@ -77,6 +79,11 @@ CheckLogicalDecodingRequirements(void) { CheckSlotRequirements(); + /* + * NB: Adding a new requirement likely means that RestoreSlotFromDisk() + * needs the same check. + */ + if (wal_level < WAL_LEVEL_LOGICAL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), @@ -115,6 +122,7 @@ StartupDecodingContext(List *output_plugin_options, XLogRecPtr start_lsn, TransactionId xmin_horizon, bool need_full_snapshot, + bool fast_forward, XLogPageReadCB read_page, LogicalOutputPluginWriterPrepareWrite prepare_write, LogicalOutputPluginWriterWrite do_write, @@ -140,7 +148,8 @@ StartupDecodingContext(List *output_plugin_options, * (re-)load output plugins, so we detect a bad (removed) output plugin * now. */ - LoadOutputPlugin(&ctx->callbacks, NameStr(slot->data.plugin)); + if (!fast_forward) + LoadOutputPlugin(&ctx->callbacks, NameStr(slot->data.plugin)); /* * Now that the slot's xmin has been set, we can announce ourselves as a @@ -163,7 +172,7 @@ StartupDecodingContext(List *output_plugin_options, ctx->slot = slot; - ctx->reader = XLogReaderAllocate(read_page, ctx); + ctx->reader = XLogReaderAllocate(wal_segment_size, read_page, ctx); if (!ctx->reader) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), @@ -181,6 +190,7 @@ StartupDecodingContext(List *output_plugin_options, /* wrap output plugin callbacks, so we can add error context information */ ctx->reorder->begin = begin_cb_wrapper; ctx->reorder->apply_change = change_cb_wrapper; + ctx->reorder->apply_truncate = truncate_cb_wrapper; ctx->reorder->commit = commit_cb_wrapper; ctx->reorder->message = message_cb_wrapper; @@ -191,6 +201,8 @@ StartupDecodingContext(List *output_plugin_options, ctx->output_plugin_options = output_plugin_options; + ctx->fast_forward = fast_forward; + MemoryContextSwitchTo(old_context); return ctx; @@ -290,10 +302,12 @@ CreateInitDecodingContext(char *plugin, xmin_horizon = GetOldestSafeDecodingTransactionId(!need_full_snapshot); + SpinLockAcquire(&slot->mutex); slot->effective_catalog_xmin = xmin_horizon; slot->data.catalog_xmin = xmin_horizon; if (need_full_snapshot) slot->effective_xmin = xmin_horizon; + SpinLockRelease(&slot->mutex); ReplicationSlotsComputeRequiredXmin(true); @@ -303,8 +317,9 @@ CreateInitDecodingContext(char *plugin, ReplicationSlotSave(); ctx = StartupDecodingContext(NIL, InvalidXLogRecPtr, xmin_horizon, - need_full_snapshot, read_page, prepare_write, - do_write, update_progress); + need_full_snapshot, false, + read_page, prepare_write, do_write, + update_progress); /* call output plugin initialization callback */ old_context = MemoryContextSwitchTo(ctx->context); @@ -312,6 +327,8 @@ CreateInitDecodingContext(char *plugin, startup_cb_wrapper(ctx, &ctx->options, true); MemoryContextSwitchTo(old_context); + ctx->reorder->output_rewrites = ctx->options.receive_rewrites; + return ctx; } @@ -326,7 +343,10 @@ CreateInitDecodingContext(char *plugin, * that, see below). * * output_plugin_options - * contains options passed to the output plugin. + * options passed to the output plugin. + * + * fast_forward + * bypass the generation of logical changes. * * read_page, prepare_write, do_write, update_progress * callbacks that have to be filled to perform the use-case dependent, @@ -342,6 +362,7 @@ CreateInitDecodingContext(char *plugin, LogicalDecodingContext * CreateDecodingContext(XLogRecPtr start_lsn, List *output_plugin_options, + bool fast_forward, XLogPageReadCB read_page, LogicalOutputPluginWriterPrepareWrite prepare_write, LogicalOutputPluginWriterWrite do_write, @@ -395,8 +416,8 @@ CreateDecodingContext(XLogRecPtr start_lsn, ctx = StartupDecodingContext(output_plugin_options, start_lsn, InvalidTransactionId, false, - read_page, prepare_write, do_write, - update_progress); + fast_forward, read_page, prepare_write, + do_write, update_progress); /* call output plugin initialization callback */ old_context = MemoryContextSwitchTo(ctx->context); @@ -404,10 +425,12 @@ CreateDecodingContext(XLogRecPtr start_lsn, startup_cb_wrapper(ctx, &ctx->options, false); MemoryContextSwitchTo(old_context); + ctx->reorder->output_rewrites = ctx->options.receive_rewrites; + ereport(LOG, (errmsg("starting logical decoding for slot \"%s\"", NameStr(slot->data.name)), - errdetail("streaming transactions committing after %X/%X, reading WAL from %X/%X", + errdetail("Streaming transactions committing after %X/%X, reading WAL from %X/%X.", (uint32) (slot->data.confirmed_flush >> 32), (uint32) slot->data.confirmed_flush, (uint32) (slot->data.restart_lsn >> 32), @@ -432,13 +455,14 @@ void DecodingContextFindStartpoint(LogicalDecodingContext *ctx) { XLogRecPtr startptr; + ReplicationSlot *slot = ctx->slot; /* Initialize from where to start reading WAL. */ - startptr = ctx->slot->data.restart_lsn; + startptr = slot->data.restart_lsn; elog(DEBUG1, "searching for logical decoding starting point, starting at %X/%X", - (uint32) (ctx->slot->data.restart_lsn >> 32), - (uint32) ctx->slot->data.restart_lsn); + (uint32) (slot->data.restart_lsn >> 32), + (uint32) slot->data.restart_lsn); /* Wait for a consistent starting point */ for (;;) @@ -464,7 +488,9 @@ DecodingContextFindStartpoint(LogicalDecodingContext *ctx) CHECK_FOR_INTERRUPTS(); } - ctx->slot->data.confirmed_flush = ctx->reader->EndRecPtr; + SpinLockAcquire(&slot->mutex); + slot->data.confirmed_flush = ctx->reader->EndRecPtr; + SpinLockRelease(&slot->mutex); } /* @@ -573,6 +599,8 @@ startup_cb_wrapper(LogicalDecodingContext *ctx, OutputPluginOptions *opt, bool i LogicalErrorCallbackState state; ErrorContextCallback errcallback; + Assert(!ctx->fast_forward); + /* Push callback + info on the error context stack */ state.ctx = ctx; state.callback_name = "startup"; @@ -598,6 +626,8 @@ shutdown_cb_wrapper(LogicalDecodingContext *ctx) LogicalErrorCallbackState state; ErrorContextCallback errcallback; + Assert(!ctx->fast_forward); + /* Push callback + info on the error context stack */ state.ctx = ctx; state.callback_name = "shutdown"; @@ -629,6 +659,8 @@ begin_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn) LogicalErrorCallbackState state; ErrorContextCallback errcallback; + Assert(!ctx->fast_forward); + /* Push callback + info on the error context stack */ state.ctx = ctx; state.callback_name = "begin"; @@ -658,6 +690,8 @@ commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, LogicalErrorCallbackState state; ErrorContextCallback errcallback; + Assert(!ctx->fast_forward); + /* Push callback + info on the error context stack */ state.ctx = ctx; state.callback_name = "commit"; @@ -687,6 +721,8 @@ change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, LogicalErrorCallbackState state; ErrorContextCallback errcallback; + Assert(!ctx->fast_forward); + /* Push callback + info on the error context stack */ state.ctx = ctx; state.callback_name = "change"; @@ -714,6 +750,46 @@ change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, error_context_stack = errcallback.previous; } +static void +truncate_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, + int nrelations, Relation relations[], ReorderBufferChange *change) +{ + LogicalDecodingContext *ctx = cache->private_data; + LogicalErrorCallbackState state; + ErrorContextCallback errcallback; + + Assert(!ctx->fast_forward); + + if (!ctx->callbacks.truncate_cb) + return; + + /* Push callback + info on the error context stack */ + state.ctx = ctx; + state.callback_name = "truncate"; + state.report_location = change->lsn; + errcallback.callback = output_plugin_error_callback; + errcallback.arg = (void *) &state; + errcallback.previous = error_context_stack; + error_context_stack = &errcallback; + + /* set output state */ + ctx->accept_writes = true; + ctx->write_xid = txn->xid; + + /* + * report this change's lsn so replies from clients can give an up2date + * answer. This won't ever be enough (and shouldn't be!) to confirm + * receipt of this transaction, but it might allow another transaction's + * commit to be confirmed with one message. + */ + ctx->write_location = change->lsn; + + ctx->callbacks.truncate_cb(ctx, txn, nrelations, relations, change); + + /* Pop the error context stack */ + error_context_stack = errcallback.previous; +} + bool filter_by_origin_cb_wrapper(LogicalDecodingContext *ctx, RepOriginId origin_id) { @@ -721,6 +797,8 @@ filter_by_origin_cb_wrapper(LogicalDecodingContext *ctx, RepOriginId origin_id) ErrorContextCallback errcallback; bool ret; + Assert(!ctx->fast_forward); + /* Push callback + info on the error context stack */ state.ctx = ctx; state.callback_name = "filter_by_origin"; @@ -751,6 +829,8 @@ message_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, LogicalErrorCallbackState state; ErrorContextCallback errcallback; + Assert(!ctx->fast_forward); + if (ctx->callbacks.message_cb == NULL) return; @@ -838,7 +918,7 @@ LogicalIncreaseXminForSlot(XLogRecPtr current_lsn, TransactionId xmin) * Mark the minimal LSN (restart_lsn) we need to read to replay all * transactions that have not yet committed at current_lsn. * - * Just like IncreaseRestartDecodingForSlot this only takes effect when the + * Just like LogicalIncreaseXminForSlot this only takes effect when the * client has confirmed to have received current_lsn. */ void diff --git a/src/backend/replication/logical/logicalfuncs.c b/src/backend/replication/logical/logicalfuncs.c index a3ba2b1266..45aae71a49 100644 --- a/src/backend/replication/logical/logicalfuncs.c +++ b/src/backend/replication/logical/logicalfuncs.c @@ -6,7 +6,7 @@ * logical replication slots via SQL. * * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/replication/logicalfuncs.c @@ -251,6 +251,7 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin /* restart at slot's confirmed_flush */ ctx = CreateDecodingContext(InvalidXLogRecPtr, options, + false, logical_read_local_xlog_page, LogicalOutputPrepareWrite, LogicalOutputWrite, NULL); @@ -278,8 +279,6 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin */ startptr = MyReplicationSlot->data.restart_lsn; - CurrentResourceOwner = ResourceOwnerCreate(CurrentResourceOwner, "logical decoding"); - /* invalidate non-timetravel entries */ InvalidateSystemCaches(); @@ -319,6 +318,11 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin tuplestore_donestoring(tupstore); + /* + * Logical decoding could have clobbered CurrentResourceOwner during + * transaction management, so restore the executor's value. (This is + * a kluge, but it's not worth cleaning up right now.) + */ CurrentResourceOwner = old_resowner; /* diff --git a/src/backend/replication/logical/message.c b/src/backend/replication/logical/message.c index ef7d6c5cde..0eba74c26a 100644 --- a/src/backend/replication/logical/message.c +++ b/src/backend/replication/logical/message.c @@ -3,7 +3,7 @@ * message.c * Generic logical messages. * - * Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Copyright (c) 2013-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/replication/logical/message.c diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c index 14cb3d0bf2..bf97dcdee4 100644 --- a/src/backend/replication/logical/origin.c +++ b/src/backend/replication/logical/origin.c @@ -3,7 +3,7 @@ * origin.c * Logical replication progress tracking support. * - * Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Copyright (c) 2013-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/replication/logical/origin.c @@ -60,7 +60,7 @@ * all our platforms, but it also simplifies memory ordering concerns * between the remote and local lsn. We use a lwlock instead of a spinlock * so it's less harmful to hold the lock over a WAL write - * (c.f. AdvanceReplicationProgress). + * (cf. AdvanceReplicationProgress). * * --------------------------------------------------------------------------- */ @@ -225,8 +225,10 @@ replorigin_by_name(char *roname, bool missing_ok) ReleaseSysCache(tuple); } else if (!missing_ok) - elog(ERROR, "cache lookup failed for replication origin '%s'", - roname); + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("replication origin \"%s\" does not exist", + roname))); return roident; } @@ -337,20 +339,26 @@ replorigin_drop(RepOriginId roident, bool nowait) Assert(IsTransactionState()); + /* + * To interlock against concurrent drops, we hold ExclusiveLock on + * pg_replication_origin throughout this function. + */ rel = heap_open(ReplicationOriginRelationId, ExclusiveLock); + /* + * First, clean up the slot state info, if there is any matching slot. + */ restart: tuple = NULL; - /* cleanup the slot state info */ LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE); for (i = 0; i < max_replication_slots; i++) { ReplicationState *state = &replication_states[i]; - /* found our slot */ if (state->roident == roident) { + /* found our slot, is it busy? */ if (state->acquired_by != 0) { ConditionVariable *cv; @@ -361,16 +369,23 @@ replorigin_drop(RepOriginId roident, bool nowait) errmsg("could not drop replication origin with OID %d, in use by PID %d", state->roident, state->acquired_by))); + + /* + * We must wait and then retry. Since we don't know which CV + * to wait on until here, we can't readily use + * ConditionVariablePrepareToSleep (calling it here would be + * wrong, since we could miss the signal if we did so); just + * use ConditionVariableSleep directly. + */ cv = &state->origin_cv; LWLockRelease(ReplicationOriginLock); - ConditionVariablePrepareToSleep(cv); + ConditionVariableSleep(cv, WAIT_EVENT_REPLICATION_ORIGIN_DROP); - ConditionVariableCancelSleep(); goto restart; } - /* first WAL log */ + /* first make a WAL log entry */ { xl_replorigin_drop xlrec; @@ -380,7 +395,7 @@ replorigin_drop(RepOriginId roident, bool nowait) XLogInsert(RM_REPLORIGIN_ID, XLOG_REPLORIGIN_DROP); } - /* then reset the in-memory entry */ + /* then clear the in-memory slot */ state->roident = InvalidRepOriginId; state->remote_lsn = InvalidXLogRecPtr; state->local_lsn = InvalidXLogRecPtr; @@ -388,7 +403,11 @@ replorigin_drop(RepOriginId roident, bool nowait) } } LWLockRelease(ReplicationOriginLock); + ConditionVariableCancelSleep(); + /* + * Now, we can delete the catalog entry. + */ tuple = SearchSysCache1(REPLORIGIDENT, ObjectIdGetDatum(roident)); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for replication origin with oid %u", @@ -437,8 +456,10 @@ replorigin_by_oid(RepOriginId roident, bool missing_ok, char **roname) *roname = NULL; if (!missing_ok) - elog(ERROR, "cache lookup failed for replication origin with oid %u", - roident); + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("replication origin with OID %u does not exist", + roident))); return false; } @@ -546,9 +567,8 @@ CheckPointReplicationOrigin(void) * no other backend can perform this at the same time, we're protected by * CheckpointLock. */ - tmpfd = OpenTransientFile((char *) tmppath, - O_CREAT | O_EXCL | O_WRONLY | PG_BINARY, - S_IRUSR | S_IWUSR); + tmpfd = OpenTransientFile(tmppath, + O_CREAT | O_EXCL | O_WRONLY | PG_BINARY); if (tmpfd < 0) ereport(PANIC, (errcode_for_file_access(), @@ -556,9 +576,15 @@ CheckPointReplicationOrigin(void) tmppath))); /* write magic */ + errno = 0; if ((write(tmpfd, &magic, sizeof(magic))) != sizeof(magic)) { + int save_errno = errno; + CloseTransientFile(tmpfd); + + /* if write didn't set errno, assume problem is no disk space */ + errno = save_errno ? save_errno : ENOSPC; ereport(PANIC, (errcode_for_file_access(), errmsg("could not write to file \"%s\": %m", @@ -594,10 +620,16 @@ CheckPointReplicationOrigin(void) /* make sure we only write out a commit that's persistent */ XLogFlush(local_lsn); + errno = 0; if ((write(tmpfd, &disk_state, sizeof(disk_state))) != sizeof(disk_state)) { + int save_errno = errno; + CloseTransientFile(tmpfd); + + /* if write didn't set errno, assume problem is no disk space */ + errno = save_errno ? save_errno : ENOSPC; ereport(PANIC, (errcode_for_file_access(), errmsg("could not write to file \"%s\": %m", @@ -611,9 +643,15 @@ CheckPointReplicationOrigin(void) /* write out the CRC */ FIN_CRC32C(crc); + errno = 0; if ((write(tmpfd, &crc, sizeof(crc))) != sizeof(crc)) { + int save_errno = errno; + CloseTransientFile(tmpfd); + + /* if write didn't set errno, assume problem is no disk space */ + errno = save_errno ? save_errno : ENOSPC; ereport(PANIC, (errcode_for_file_access(), errmsg("could not write to file \"%s\": %m", @@ -660,7 +698,7 @@ StartupReplicationOrigin(void) elog(DEBUG2, "starting up replication origin progress state"); - fd = OpenTransientFile((char *) path, O_RDONLY | PG_BINARY, 0); + fd = OpenTransientFile(path, O_RDONLY | PG_BINARY); /* * might have had max_replication_slots == 0 last run, or we just brought @@ -677,9 +715,18 @@ StartupReplicationOrigin(void) /* verify magic, that is written even if nothing was active */ readBytes = read(fd, &magic, sizeof(magic)); if (readBytes != sizeof(magic)) - ereport(PANIC, - (errmsg("could not read file \"%s\": %m", - path))); + { + if (readBytes < 0) + ereport(PANIC, + (errcode_for_file_access(), + errmsg("could not read file \"%s\": %m", + path))); + else + ereport(PANIC, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("could not read file \"%s\": read %d of %zu", + path, readBytes, sizeof(magic)))); + } COMP_CRC32C(crc, &magic, sizeof(magic)); if (magic != REPLICATION_STATE_MAGIC) @@ -1205,7 +1252,7 @@ pg_replication_origin_drop(PG_FUNCTION_ARGS) roident = replorigin_by_name(name, false); Assert(OidIsValid(roident)); - replorigin_drop(roident, false); + replorigin_drop(roident, true); pfree(name); @@ -1413,7 +1460,7 @@ pg_show_replication_origin_status(PG_FUNCTION_ARGS) int i; #define REPLICATION_ORIGIN_PROGRESS_COLS 4 - /* we we want to return 0 rows if slot is set to zero */ + /* we want to return 0 rows if slot is set to zero */ replorigin_check_prerequisites(false, true); if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) diff --git a/src/backend/replication/logical/proto.c b/src/backend/replication/logical/proto.c index 94dfee0b24..19451714da 100644 --- a/src/backend/replication/logical/proto.c +++ b/src/backend/replication/logical/proto.c @@ -3,7 +3,7 @@ * proto.c * logical replication protocol functions * - * Copyright (c) 2015-2017, PostgreSQL Global Development Group + * Copyright (c) 2015-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/replication/logical/proto.c @@ -26,6 +26,9 @@ */ #define LOGICALREP_IS_REPLICA_IDENTITY 1 +#define TRUNCATE_CASCADE (1<<0) +#define TRUNCATE_RESTART_SEQS (1<<1) + static void logicalrep_write_attrs(StringInfo out, Relation rel); static void logicalrep_write_tuple(StringInfo out, Relation rel, HeapTuple tuple); @@ -47,7 +50,7 @@ logicalrep_write_begin(StringInfo out, ReorderBufferTXN *txn) /* fixed fields */ pq_sendint64(out, txn->final_lsn); pq_sendint64(out, txn->commit_time); - pq_sendint(out, txn->xid, 4); + pq_sendint32(out, txn->xid); } /* @@ -145,7 +148,7 @@ logicalrep_write_insert(StringInfo out, Relation rel, HeapTuple newtuple) rel->rd_rel->relreplident == REPLICA_IDENTITY_INDEX); /* use Oid as relation identifier */ - pq_sendint(out, RelationGetRelid(rel), 4); + pq_sendint32(out, RelationGetRelid(rel)); pq_sendbyte(out, 'N'); /* new tuple follows */ logicalrep_write_tuple(out, rel, newtuple); @@ -189,7 +192,7 @@ logicalrep_write_update(StringInfo out, Relation rel, HeapTuple oldtuple, rel->rd_rel->relreplident == REPLICA_IDENTITY_INDEX); /* use Oid as relation identifier */ - pq_sendint(out, RelationGetRelid(rel), 4); + pq_sendint32(out, RelationGetRelid(rel)); if (oldtuple != NULL) { @@ -258,7 +261,7 @@ logicalrep_write_delete(StringInfo out, Relation rel, HeapTuple oldtuple) pq_sendbyte(out, 'D'); /* action DELETE */ /* use Oid as relation identifier */ - pq_sendint(out, RelationGetRelid(rel), 4); + pq_sendint32(out, RelationGetRelid(rel)); if (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL) pq_sendbyte(out, 'O'); /* old tuple follows */ @@ -292,6 +295,58 @@ logicalrep_read_delete(StringInfo in, LogicalRepTupleData *oldtup) return relid; } +/* + * Write TRUNCATE to the output stream. + */ +void +logicalrep_write_truncate(StringInfo out, + int nrelids, + Oid relids[], + bool cascade, bool restart_seqs) +{ + int i; + uint8 flags = 0; + + pq_sendbyte(out, 'T'); /* action TRUNCATE */ + + pq_sendint32(out, nrelids); + + /* encode and send truncate flags */ + if (cascade) + flags |= TRUNCATE_CASCADE; + if (restart_seqs) + flags |= TRUNCATE_RESTART_SEQS; + pq_sendint8(out, flags); + + for (i = 0; i < nrelids; i++) + pq_sendint32(out, relids[i]); +} + +/* + * Read TRUNCATE from stream. + */ +List * +logicalrep_read_truncate(StringInfo in, + bool *cascade, bool *restart_seqs) +{ + int i; + int nrelids; + List *relids = NIL; + uint8 flags; + + nrelids = pq_getmsgint(in, 4); + + /* read and decode truncate flags */ + flags = pq_getmsgint(in, 1); + *cascade = (flags & TRUNCATE_CASCADE) > 0; + *restart_seqs = (flags & TRUNCATE_RESTART_SEQS) > 0; + + for (i = 0; i < nrelids; i++) + relids = lappend_oid(relids, pq_getmsgint(in, 4)); + + return relids; +} + /* * Write relation description to the output stream. */ @@ -303,7 +358,7 @@ logicalrep_write_rel(StringInfo out, Relation rel) pq_sendbyte(out, 'R'); /* sending RELATION */ /* use Oid as relation identifier */ - pq_sendint(out, RelationGetRelid(rel), 4); + pq_sendint32(out, RelationGetRelid(rel)); /* send qualified relation name */ logicalrep_write_namespace(out, RelationGetNamespace(rel)); @@ -360,7 +415,7 @@ logicalrep_write_typ(StringInfo out, Oid typoid) typtup = (Form_pg_type) GETSTRUCT(tup); /* use Oid as relation identifier */ - pq_sendint(out, typoid, 4); + pq_sendint32(out, typoid); /* send qualified type name */ logicalrep_write_namespace(out, typtup->typnamespace); @@ -398,11 +453,11 @@ logicalrep_write_tuple(StringInfo out, Relation rel, HeapTuple tuple) for (i = 0; i < desc->natts; i++) { - if (desc->attrs[i]->attisdropped) + if (TupleDescAttr(desc, i)->attisdropped) continue; nliveatts++; } - pq_sendint(out, nliveatts, 2); + pq_sendint16(out, nliveatts); /* try to allocate enough memory from the get-go */ enlargeStringInfo(out, tuple->t_len + @@ -415,7 +470,7 @@ logicalrep_write_tuple(StringInfo out, Relation rel, HeapTuple tuple) { HeapTuple typtup; Form_pg_type typclass; - Form_pg_attribute att = desc->attrs[i]; + Form_pg_attribute att = TupleDescAttr(desc, i); char *outputstr; /* skip dropped columns */ @@ -518,11 +573,11 @@ logicalrep_write_attrs(StringInfo out, Relation rel) /* send number of live attributes */ for (i = 0; i < desc->natts; i++) { - if (desc->attrs[i]->attisdropped) + if (TupleDescAttr(desc, i)->attisdropped) continue; nliveatts++; } - pq_sendint(out, nliveatts, 2); + pq_sendint16(out, nliveatts); /* fetch bitmap of REPLICATION IDENTITY attributes */ replidentfull = (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL); @@ -533,7 +588,7 @@ logicalrep_write_attrs(StringInfo out, Relation rel) /* send the attributes */ for (i = 0; i < desc->natts; i++) { - Form_pg_attribute att = desc->attrs[i]; + Form_pg_attribute att = TupleDescAttr(desc, i); uint8 flags = 0; if (att->attisdropped) @@ -551,10 +606,10 @@ logicalrep_write_attrs(StringInfo out, Relation rel) pq_sendstring(out, NameStr(att->attname)); /* attribute type id */ - pq_sendint(out, (int) att->atttypid, sizeof(att->atttypid)); + pq_sendint32(out, (int) att->atttypid); /* attribute mode */ - pq_sendint(out, att->atttypmod, sizeof(att->atttypmod)); + pq_sendint32(out, att->atttypmod); } bms_free(idattrs); diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c index a7ea16d714..1f20df5680 100644 --- a/src/backend/replication/logical/relation.c +++ b/src/backend/replication/logical/relation.c @@ -2,7 +2,7 @@ * relation.c * PostgreSQL logical replication * - * Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/replication/logical/relation.c @@ -35,8 +35,6 @@ static MemoryContext LogicalRepRelMapContext = NULL; static HTAB *LogicalRepRelMap = NULL; static HTAB *LogicalRepTypMap = NULL; -static void logicalrep_typmap_invalidate_cb(Datum arg, int cacheid, - uint32 hashvalue); /* * Relcache invalidation callback for our relation map cache. @@ -115,8 +113,6 @@ logicalrep_relmap_init(void) /* Watch for invalidation events. */ CacheRegisterRelcacheCallback(logicalrep_relmap_invalidate_cb, (Datum) 0); - CacheRegisterSyscacheCallback(TYPEOID, logicalrep_typmap_invalidate_cb, - (Datum) 0); } /* @@ -278,15 +274,16 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) for (i = 0; i < desc->natts; i++) { int attnum; + Form_pg_attribute attr = TupleDescAttr(desc, i); - if (desc->attrs[i]->attisdropped) + if (attr->attisdropped) { entry->attrmap[i] = -1; continue; } attnum = logicalrep_rel_att_by_name(remoterel, - NameStr(desc->attrs[i]->attname)); + NameStr(attr->attname)); entry->attrmap[i] = attnum; if (attnum >= 0) @@ -374,27 +371,6 @@ logicalrep_rel_close(LogicalRepRelMapEntry *rel, LOCKMODE lockmode) rel->localrel = NULL; } - -/* - * Type cache invalidation callback for our type map cache. - */ -static void -logicalrep_typmap_invalidate_cb(Datum arg, int cacheid, uint32 hashvalue) -{ - HASH_SEQ_STATUS status; - LogicalRepTyp *entry; - - /* Just to be sure. */ - if (LogicalRepTypMap == NULL) - return; - - /* invalidate all cache entries */ - hash_seq_init(&status, LogicalRepTypMap); - - while ((entry = (LogicalRepTyp *) hash_seq_search(&status)) != NULL) - entry->typoid = InvalidOid; -} - /* * Free the type map cache entry data. */ @@ -403,8 +379,6 @@ logicalrep_typmap_free_entry(LogicalRepTyp *entry) { pfree(entry->nspname); pfree(entry->typname); - - entry->typoid = InvalidOid; } /* @@ -435,59 +409,53 @@ logicalrep_typmap_update(LogicalRepTyp *remotetyp) entry->nspname = pstrdup(remotetyp->nspname); entry->typname = pstrdup(remotetyp->typname); MemoryContextSwitchTo(oldctx); - entry->typoid = InvalidOid; } /* - * Fetch type info from the cache. + * Fetch type name from the cache by remote type OID. + * + * Return a substitute value if we cannot find the data type; no message is + * sent to the log in that case, because this is used by error callback + * already. */ -Oid -logicalrep_typmap_getid(Oid remoteid) +char * +logicalrep_typmap_gettypname(Oid remoteid) { LogicalRepTyp *entry; bool found; - Oid nspoid; /* Internal types are mapped directly. */ if (remoteid < FirstNormalObjectId) { if (!get_typisdefined(remoteid)) - ereport(ERROR, - (errmsg("builtin type %u not found", remoteid), - errhint("This can be caused by having publisher with " - "higher major version than subscriber"))); - return remoteid; + { + /* + * This can be caused by having a publisher with a higher + * PostgreSQL major version than the subscriber. + */ + return psprintf("unrecognized %u", remoteid); + } + + return format_type_be(remoteid); } if (LogicalRepTypMap == NULL) - logicalrep_relmap_init(); + { + /* + * If the typemap is not initialized yet, we cannot possibly attempt + * to search the hash table; but there's no way we know the type + * locally yet, since we haven't received a message about this type, + * so this is the best we can do. + */ + return psprintf("unrecognized %u", remoteid); + } - /* Try finding the mapping. */ + /* search the mapping */ entry = hash_search(LogicalRepTypMap, (void *) &remoteid, HASH_FIND, &found); - if (!found) - elog(ERROR, "no type map entry for remote type %u", - remoteid); - - /* Found and mapped, return the oid. */ - if (OidIsValid(entry->typoid)) - return entry->typoid; - - /* Otherwise, try to map to local type. */ - nspoid = LookupExplicitNamespace(entry->nspname, true); - if (OidIsValid(nspoid)) - entry->typoid = GetSysCacheOid2(TYPENAMENSP, - PointerGetDatum(entry->typname), - ObjectIdGetDatum(nspoid)); - else - entry->typoid = InvalidOid; - - if (!OidIsValid(entry->typoid)) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("data type \"%s.%s\" required for logical replication does not exist", - entry->nspname, entry->typname))); + return psprintf("unrecognized %u", remoteid); - return entry->typoid; + Assert(OidIsValid(entry->remoteid)); + return psprintf("%s.%s", entry->nspname, entry->typname); } diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 5567bee061..bed63c768e 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -4,7 +4,7 @@ * PostgreSQL logical replay/reorder buffer management * * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Copyright (c) 2012-2018, PostgreSQL Global Development Group * * * IDENTIFICATION @@ -15,7 +15,7 @@ * they are written to the WAL and is responsible to reassemble them into * toplevel transaction sized pieces. When a transaction is completely * reassembled - signalled by reading the transaction commit record - it - * will then call the output plugin (c.f. ReorderBufferCommit()) with the + * will then call the output plugin (cf. ReorderBufferCommit()) with the * individual changes. The output plugins rely on snapshots built by * snapbuild.c which hands them to us. * @@ -43,6 +43,12 @@ * transaction there will be no other data carrying records between a row's * toast chunks and the row data itself. See ReorderBufferToast* for * details. + * + * ReorderBuffer uses two special memory context types - SlabContext for + * allocations of fixed-length structures (changes and transactions), and + * GenerationContext for the variable-length transaction data (allocated + * and freed in groups with similar lifespan). + * * ------------------------------------------------------------------------- */ #include "postgres.h" @@ -150,15 +156,6 @@ typedef struct ReorderBufferDiskChange */ static const Size max_changes_in_memory = 4096; -/* - * We use a very simple form of a slab allocator for frequently allocated - * objects, simply keeping a fixed number in a linked list when unused, - * instead pfree()ing them. Without that in many workloads aset.c becomes a - * major bottleneck, especially when spilling to disk while decoding batch - * workloads. - */ -static const Size max_cached_tuplebufs = 4096 * 2; /* ~8MB */ - /* --------------------------------------- * primary reorderbuffer support routines * --------------------------------------- @@ -168,6 +165,8 @@ static void ReorderBufferReturnTXN(ReorderBuffer *rb, ReorderBufferTXN *txn); static ReorderBufferTXN *ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create, bool *is_new, XLogRecPtr lsn, bool create_as_top); +static void ReorderBufferTransferSnapToParent(ReorderBufferTXN *txn, + ReorderBufferTXN *subtxn); static void AssertTXNLsnOrder(ReorderBuffer *rb); @@ -199,6 +198,9 @@ static Size ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn static void ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn, char *change); static void ReorderBufferRestoreCleanup(ReorderBuffer *rb, ReorderBufferTXN *txn); +static void ReorderBufferCleanupSerializedTXNs(const char *slotname); +static void ReorderBufferSerializedPath(char *path, ReplicationSlot *slot, + TransactionId xid, XLogSegNo segno); static void ReorderBufferFreeSnap(ReorderBuffer *rb, Snapshot snap); static Snapshot ReorderBufferCopySnap(ReorderBuffer *rb, Snapshot orig_snap, @@ -217,7 +219,8 @@ static void ReorderBufferToastAppendChunk(ReorderBuffer *rb, ReorderBufferTXN *t /* - * Allocate a new ReorderBuffer + * Allocate a new ReorderBuffer and clean out any old serialized state from + * prior ReorderBuffer instances for the same slot. */ ReorderBuffer * ReorderBufferAllocate(void) @@ -226,6 +229,8 @@ ReorderBufferAllocate(void) HASHCTL hash_ctl; MemoryContext new_ctx; + Assert(MyReplicationSlot != NULL); + /* allocate memory in own context, to have better accountability */ new_ctx = AllocSetContextCreate(CurrentMemoryContext, "ReorderBuffer", @@ -248,6 +253,10 @@ ReorderBufferAllocate(void) SLAB_DEFAULT_BLOCK_SIZE, sizeof(ReorderBufferTXN)); + buffer->tup_context = GenerationContextCreate(new_ctx, + "Tuples", + SLAB_LARGE_BLOCK_SIZE); + hash_ctl.keysize = sizeof(TransactionId); hash_ctl.entrysize = sizeof(ReorderBufferTXNByIdEnt); hash_ctl.hcxt = buffer->context; @@ -258,15 +267,20 @@ ReorderBufferAllocate(void) buffer->by_txn_last_xid = InvalidTransactionId; buffer->by_txn_last_txn = NULL; - buffer->nr_cached_tuplebufs = 0; - buffer->outbuf = NULL; buffer->outbufsize = 0; buffer->current_restart_decoding_lsn = InvalidXLogRecPtr; dlist_init(&buffer->toplevel_by_lsn); - slist_init(&buffer->cached_tuplebufs); + dlist_init(&buffer->txns_by_base_snapshot_lsn); + + /* + * Ensure there's no stale data from prior uses of this slot, in case some + * prior exit avoided calling ReorderBufferFree. Failure to do this can + * produce duplicated txns, and it's very cheap if there's nothing there. + */ + ReorderBufferCleanupSerializedTXNs(NameStr(MyReplicationSlot->data.name)); return buffer; } @@ -284,6 +298,9 @@ ReorderBufferFree(ReorderBuffer *rb) * memory context. */ MemoryContextDelete(context); + + /* Free disk space used by unconsumed reorder buffers */ + ReorderBufferCleanupSerializedTXNs(NameStr(MyReplicationSlot->data.name)); } /* @@ -308,9 +325,6 @@ ReorderBufferGetTXN(ReorderBuffer *rb) /* * Free a ReorderBufferTXN. - * - * Deallocation might be delayed for efficiency purposes, for details check - * the comments above max_cached_changes's definition. */ static void ReorderBufferReturnTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) @@ -340,7 +354,7 @@ ReorderBufferReturnTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) } /* - * Get an unused, possibly preallocated, ReorderBufferChange. + * Get an fresh ReorderBufferChange. */ ReorderBufferChange * ReorderBufferGetChange(ReorderBuffer *rb) @@ -356,9 +370,6 @@ ReorderBufferGetChange(ReorderBuffer *rb) /* * Free an ReorderBufferChange. - * - * Deallocation might be delayed for efficiency purposes, for details check - * the comments above max_cached_changes's definition. */ void ReorderBufferReturnChange(ReorderBuffer *rb, ReorderBufferChange *change) @@ -398,6 +409,13 @@ ReorderBufferReturnChange(ReorderBuffer *rb, ReorderBufferChange *change) } break; /* no data in addition to the struct itself */ + case REORDER_BUFFER_CHANGE_TRUNCATE: + if (change->data.truncate.relids != NULL) + { + ReorderBufferReturnRelids(rb, change->data.truncate.relids); + change->data.truncate.relids = NULL; + } + break; case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM: case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID: case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID: @@ -408,8 +426,8 @@ ReorderBufferReturnChange(ReorderBuffer *rb, ReorderBufferChange *change) } /* - * Get an unused, possibly preallocated, ReorderBufferTupleBuf fitting at - * least a tuple of size tuple_len (excluding header overhead). + * Get a fresh ReorderBufferTupleBuf fitting at least a tuple of size + * tuple_len (excluding header overhead). */ ReorderBufferTupleBuf * ReorderBufferGetTupleBuf(ReorderBuffer *rb, Size tuple_len) @@ -419,70 +437,54 @@ ReorderBufferGetTupleBuf(ReorderBuffer *rb, Size tuple_len) alloc_len = tuple_len + SizeofHeapTupleHeader; - /* - * Most tuples are below MaxHeapTupleSize, so we use a slab allocator for - * those. Thus always allocate at least MaxHeapTupleSize. Note that tuples - * generated for oldtuples can be bigger, as they don't have out-of-line - * toast columns. - */ - if (alloc_len < MaxHeapTupleSize) - alloc_len = MaxHeapTupleSize; - - - /* if small enough, check the slab cache */ - if (alloc_len <= MaxHeapTupleSize && rb->nr_cached_tuplebufs) - { - rb->nr_cached_tuplebufs--; - tuple = slist_container(ReorderBufferTupleBuf, node, - slist_pop_head_node(&rb->cached_tuplebufs)); - Assert(tuple->alloc_tuple_size == MaxHeapTupleSize); -#ifdef USE_ASSERT_CHECKING - memset(&tuple->tuple, 0xa9, sizeof(HeapTupleData)); - VALGRIND_MAKE_MEM_UNDEFINED(&tuple->tuple, sizeof(HeapTupleData)); -#endif - tuple->tuple.t_data = ReorderBufferTupleBufData(tuple); -#ifdef USE_ASSERT_CHECKING - memset(tuple->tuple.t_data, 0xa8, tuple->alloc_tuple_size); - VALGRIND_MAKE_MEM_UNDEFINED(tuple->tuple.t_data, tuple->alloc_tuple_size); -#endif - } - else - { - tuple = (ReorderBufferTupleBuf *) - MemoryContextAlloc(rb->context, - sizeof(ReorderBufferTupleBuf) + - MAXIMUM_ALIGNOF + alloc_len); - tuple->alloc_tuple_size = alloc_len; - tuple->tuple.t_data = ReorderBufferTupleBufData(tuple); - } + tuple = (ReorderBufferTupleBuf *) + MemoryContextAlloc(rb->tup_context, + sizeof(ReorderBufferTupleBuf) + + MAXIMUM_ALIGNOF + alloc_len); + tuple->alloc_tuple_size = alloc_len; + tuple->tuple.t_data = ReorderBufferTupleBufData(tuple); return tuple; } /* * Free an ReorderBufferTupleBuf. - * - * Deallocation might be delayed for efficiency purposes, for details check - * the comments above max_cached_changes's definition. */ void ReorderBufferReturnTupleBuf(ReorderBuffer *rb, ReorderBufferTupleBuf *tuple) { - /* check whether to put into the slab cache, oversized tuples never are */ - if (tuple->alloc_tuple_size == MaxHeapTupleSize && - rb->nr_cached_tuplebufs < max_cached_tuplebufs) - { - rb->nr_cached_tuplebufs++; - slist_push_head(&rb->cached_tuplebufs, &tuple->node); - VALGRIND_MAKE_MEM_UNDEFINED(tuple->tuple.t_data, tuple->alloc_tuple_size); - VALGRIND_MAKE_MEM_UNDEFINED(tuple, sizeof(ReorderBufferTupleBuf)); - VALGRIND_MAKE_MEM_DEFINED(&tuple->node, sizeof(tuple->node)); - VALGRIND_MAKE_MEM_DEFINED(&tuple->alloc_tuple_size, sizeof(tuple->alloc_tuple_size)); - } - else - { - pfree(tuple); - } + pfree(tuple); +} + +/* + * Get an array for relids of truncated relations. + * + * We use the global memory context (for the whole reorder buffer), because + * none of the existing ones seems like a good match (some are SLAB, so we + * can't use those, and tup_context is meant for tuple data, not relids). We + * could add yet another context, but it seems like an overkill - TRUNCATE is + * not particularly common operation, so it does not seem worth it. + */ +Oid * +ReorderBufferGetRelids(ReorderBuffer *rb, int nrelids) +{ + Oid *relids; + Size alloc_len; + + alloc_len = sizeof(Oid) * nrelids; + + relids = (Oid *) MemoryContextAlloc(rb->context, alloc_len); + + return relids; +} + +/* + * Free an array of relids. + */ +void +ReorderBufferReturnRelids(ReorderBuffer *rb, Oid *relids) +{ + pfree(relids); } /* @@ -500,7 +502,6 @@ ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create, bool found; Assert(TransactionIdIsValid(xid)); - Assert(!create || lsn != InvalidXLogRecPtr); /* * Check the one-entry lookup cache first @@ -544,6 +545,7 @@ ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create, { /* initialize the new entry, if creation was requested */ Assert(ent != NULL); + Assert(lsn != InvalidXLogRecPtr); ent->txn = ReorderBufferGetTXN(rb); ent->txn->xid = xid; @@ -645,43 +647,80 @@ ReorderBufferQueueMessage(ReorderBuffer *rb, TransactionId xid, } } - +/* + * AssertTXNLsnOrder + * Verify LSN ordering of transaction lists in the reorderbuffer + * + * Other LSN-related invariants are checked too. + * + * No-op if assertions are not in use. + */ static void AssertTXNLsnOrder(ReorderBuffer *rb) { #ifdef USE_ASSERT_CHECKING dlist_iter iter; XLogRecPtr prev_first_lsn = InvalidXLogRecPtr; + XLogRecPtr prev_base_snap_lsn = InvalidXLogRecPtr; dlist_foreach(iter, &rb->toplevel_by_lsn) { - ReorderBufferTXN *cur_txn; + ReorderBufferTXN *cur_txn = dlist_container(ReorderBufferTXN, node, + iter.cur); - cur_txn = dlist_container(ReorderBufferTXN, node, iter.cur); + /* start LSN must be set */ Assert(cur_txn->first_lsn != InvalidXLogRecPtr); + /* If there is an end LSN, it must be higher than start LSN */ if (cur_txn->end_lsn != InvalidXLogRecPtr) Assert(cur_txn->first_lsn <= cur_txn->end_lsn); + /* Current initial LSN must be strictly higher than previous */ if (prev_first_lsn != InvalidXLogRecPtr) Assert(prev_first_lsn < cur_txn->first_lsn); + /* known-as-subtxn txns must not be listed */ Assert(!cur_txn->is_known_as_subxact); + prev_first_lsn = cur_txn->first_lsn; } + + dlist_foreach(iter, &rb->txns_by_base_snapshot_lsn) + { + ReorderBufferTXN *cur_txn = dlist_container(ReorderBufferTXN, + base_snapshot_node, + iter.cur); + + /* base snapshot (and its LSN) must be set */ + Assert(cur_txn->base_snapshot != NULL); + Assert(cur_txn->base_snapshot_lsn != InvalidXLogRecPtr); + + /* current LSN must be strictly higher than previous */ + if (prev_base_snap_lsn != InvalidXLogRecPtr) + Assert(prev_base_snap_lsn < cur_txn->base_snapshot_lsn); + + /* known-as-subtxn txns must not be listed */ + Assert(!cur_txn->is_known_as_subxact); + + prev_base_snap_lsn = cur_txn->base_snapshot_lsn; + } #endif } +/* + * ReorderBufferGetOldestTXN + * Return oldest transaction in reorderbuffer + */ ReorderBufferTXN * ReorderBufferGetOldestTXN(ReorderBuffer *rb) { ReorderBufferTXN *txn; + AssertTXNLsnOrder(rb); + if (dlist_is_empty(&rb->toplevel_by_lsn)) return NULL; - AssertTXNLsnOrder(rb); - txn = dlist_head_element(ReorderBufferTXN, node, &rb->toplevel_by_lsn); Assert(!txn->is_known_as_subxact); @@ -689,12 +728,44 @@ ReorderBufferGetOldestTXN(ReorderBuffer *rb) return txn; } +/* + * ReorderBufferGetOldestXmin + * Return oldest Xmin in reorderbuffer + * + * Returns oldest possibly running Xid from the point of view of snapshots + * used in the transactions kept by reorderbuffer, or InvalidTransactionId if + * there are none. + * + * Since snapshots are assigned monotonically, this equals the Xmin of the + * base snapshot with minimal base_snapshot_lsn. + */ +TransactionId +ReorderBufferGetOldestXmin(ReorderBuffer *rb) +{ + ReorderBufferTXN *txn; + + AssertTXNLsnOrder(rb); + + if (dlist_is_empty(&rb->txns_by_base_snapshot_lsn)) + return InvalidTransactionId; + + txn = dlist_head_element(ReorderBufferTXN, base_snapshot_node, + &rb->txns_by_base_snapshot_lsn); + return txn->base_snapshot->xmin; +} + void ReorderBufferSetRestartPoint(ReorderBuffer *rb, XLogRecPtr ptr) { rb->current_restart_decoding_lsn = ptr; } +/* + * ReorderBufferAssignChild + * + * Make note that we know that subxid is a subtransaction of xid, seen as of + * the given lsn. + */ void ReorderBufferAssignChild(ReorderBuffer *rb, TransactionId xid, TransactionId subxid, XLogRecPtr lsn) @@ -707,32 +778,107 @@ ReorderBufferAssignChild(ReorderBuffer *rb, TransactionId xid, txn = ReorderBufferTXNByXid(rb, xid, true, &new_top, lsn, true); subtxn = ReorderBufferTXNByXid(rb, subxid, true, &new_sub, lsn, false); - if (new_sub) + if (new_top && !new_sub) + elog(ERROR, "subtransaction logged without previous top-level txn record"); + + if (!new_sub) { - /* - * we assign subtransactions to top level transaction even if we don't - * have data for it yet, assignment records frequently reference xids - * that have not yet produced any records. Knowing those aren't top - * level xids allows us to make processing cheaper in some places. - */ - dlist_push_tail(&txn->subtxns, &subtxn->node); - txn->nsubtxns++; + if (subtxn->is_known_as_subxact) + { + /* already associated, nothing to do */ + return; + } + else + { + /* + * We already saw this transaction, but initially added it to the + * list of top-level txns. Now that we know it's not top-level, + * remove it from there. + */ + dlist_delete(&subtxn->node); + } } - else if (!subtxn->is_known_as_subxact) - { - subtxn->is_known_as_subxact = true; - Assert(subtxn->nsubtxns == 0); - /* remove from lsn order list of top-level transactions */ - dlist_delete(&subtxn->node); + subtxn->is_known_as_subxact = true; + subtxn->toplevel_xid = xid; + Assert(subtxn->nsubtxns == 0); - /* add to toplevel transaction */ - dlist_push_tail(&txn->subtxns, &subtxn->node); - txn->nsubtxns++; - } - else if (new_top) + /* add to subtransaction list */ + dlist_push_tail(&txn->subtxns, &subtxn->node); + txn->nsubtxns++; + + /* Possibly transfer the subtxn's snapshot to its top-level txn. */ + ReorderBufferTransferSnapToParent(txn, subtxn); + + /* Verify LSN-ordering invariant */ + AssertTXNLsnOrder(rb); +} + +/* + * ReorderBufferTransferSnapToParent + * Transfer base snapshot from subtxn to top-level txn, if needed + * + * This is done if the top-level txn doesn't have a base snapshot, or if the + * subtxn's base snapshot has an earlier LSN than the top-level txn's base + * snapshot's LSN. This can happen if there are no changes in the toplevel + * txn but there are some in the subtxn, or the first change in subtxn has + * earlier LSN than first change in the top-level txn and we learned about + * their kinship only now. + * + * The subtransaction's snapshot is cleared regardless of the transfer + * happening, since it's not needed anymore in either case. + * + * We do this as soon as we become aware of their kinship, to avoid queueing + * extra snapshots to txns known-as-subtxns -- only top-level txns will + * receive further snapshots. + */ +static void +ReorderBufferTransferSnapToParent(ReorderBufferTXN *txn, + ReorderBufferTXN *subtxn) +{ + Assert(subtxn->toplevel_xid == txn->xid); + + if (subtxn->base_snapshot != NULL) { - elog(ERROR, "existing subxact assigned to unknown toplevel xact"); + if (txn->base_snapshot == NULL || + subtxn->base_snapshot_lsn < txn->base_snapshot_lsn) + { + /* + * If the toplevel transaction already has a base snapshot but + * it's newer than the subxact's, purge it. + */ + if (txn->base_snapshot != NULL) + { + SnapBuildSnapDecRefcount(txn->base_snapshot); + dlist_delete(&txn->base_snapshot_node); + } + + /* + * The snapshot is now the top transaction's; transfer it, and + * adjust the list position of the top transaction in the list by + * moving it to where the subtransaction is. + */ + txn->base_snapshot = subtxn->base_snapshot; + txn->base_snapshot_lsn = subtxn->base_snapshot_lsn; + dlist_insert_before(&subtxn->base_snapshot_node, + &txn->base_snapshot_node); + + /* + * The subtransaction doesn't have a snapshot anymore (so it + * mustn't be in the list.) + */ + subtxn->base_snapshot = NULL; + subtxn->base_snapshot_lsn = InvalidXLogRecPtr; + dlist_delete(&subtxn->base_snapshot_node); + } + else + { + /* Base snap of toplevel is fine, so subxact's is not needed */ + SnapBuildSnapDecRefcount(subtxn->base_snapshot); + dlist_delete(&subtxn->base_snapshot_node); + subtxn->base_snapshot = NULL; + subtxn->base_snapshot_lsn = InvalidXLogRecPtr; + } } } @@ -745,7 +891,6 @@ ReorderBufferCommitChild(ReorderBuffer *rb, TransactionId xid, TransactionId subxid, XLogRecPtr commit_lsn, XLogRecPtr end_lsn) { - ReorderBufferTXN *txn; ReorderBufferTXN *subtxn; subtxn = ReorderBufferTXNByXid(rb, subxid, false, NULL, @@ -757,42 +902,14 @@ ReorderBufferCommitChild(ReorderBuffer *rb, TransactionId xid, if (!subtxn) return; - txn = ReorderBufferTXNByXid(rb, xid, false, NULL, commit_lsn, true); - - if (txn == NULL) - elog(ERROR, "subxact logged without previous toplevel record"); - - /* - * Pass our base snapshot to the parent transaction if it doesn't have - * one, or ours is older. That can happen if there are no changes in the - * toplevel transaction but in one of the child transactions. This allows - * the parent to simply use its base snapshot initially. - */ - if (subtxn->base_snapshot != NULL && - (txn->base_snapshot == NULL || - txn->base_snapshot_lsn > subtxn->base_snapshot_lsn)) - { - txn->base_snapshot = subtxn->base_snapshot; - txn->base_snapshot_lsn = subtxn->base_snapshot_lsn; - subtxn->base_snapshot = NULL; - subtxn->base_snapshot_lsn = InvalidXLogRecPtr; - } - subtxn->final_lsn = commit_lsn; subtxn->end_lsn = end_lsn; - if (!subtxn->is_known_as_subxact) - { - subtxn->is_known_as_subxact = true; - Assert(subtxn->nsubtxns == 0); - - /* remove from lsn order list of top-level transactions */ - dlist_delete(&subtxn->node); - - /* add to subtransaction list */ - dlist_push_tail(&txn->subtxns, &subtxn->node); - txn->nsubtxns++; - } + /* + * Assign this subxact as a child of the toplevel xact (no-op if already + * done.) + */ + ReorderBufferAssignChild(rb, xid, subxid, InvalidXLogRecPtr); } @@ -1116,11 +1233,13 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) ReorderBufferReturnChange(rb, change); } + /* + * Cleanup the base snapshot, if set. + */ if (txn->base_snapshot != NULL) { SnapBuildSnapDecRefcount(txn->base_snapshot); - txn->base_snapshot = NULL; - txn->base_snapshot_lsn = InvalidXLogRecPtr; + dlist_delete(&txn->base_snapshot_node); } /* @@ -1295,17 +1414,17 @@ ReorderBufferFreeSnap(ReorderBuffer *rb, Snapshot snap) } /* - * Perform the replay of a transaction and it's non-aborted subtransactions. + * Perform the replay of a transaction and its non-aborted subtransactions. * * Subtransactions previously have to be processed by * ReorderBufferCommitChild(), even if previously assigned to the toplevel * transaction with ReorderBufferAssignChild. * - * We currently can only decode a transaction's contents in when their commit - * record is read because that's currently the only place where we know about - * cache invalidations. Thus, once a toplevel commit is read, we iterate over - * the top and subtransactions (using a k-way merge) and replay the changes in - * lsn order. + * We currently can only decode a transaction's contents when its commit + * record is read because that's the only place where we know about cache + * invalidations. Thus, once a toplevel commit is read, we iterate over the top + * and subtransactions (using a k-way merge) and replay the changes in lsn + * order. */ void ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, @@ -1333,10 +1452,10 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, txn->origin_lsn = origin_lsn; /* - * If this transaction didn't have any real changes in our database, it's - * OK not to have a snapshot. Note that ReorderBufferCommitChild will have - * transferred its snapshot to this transaction if it had one and the - * toplevel tx didn't. + * If this transaction has no snapshot, it didn't make any changes to the + * database, so there's nothing to decode. Note that + * ReorderBufferCommitChild will have transferred any snapshots from + * subtransactions if there were any. */ if (txn->base_snapshot == NULL) { @@ -1392,6 +1511,8 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, * use as a normal record. It'll be cleaned up at the end * of INSERT processing. */ + if (specinsert == NULL) + elog(ERROR, "invalid ordering of speculative insertion changes"); Assert(specinsert->data.tp.oldtuple == NULL); change = specinsert; change->action = REORDER_BUFFER_CHANGE_INSERT; @@ -1406,8 +1527,16 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, change->data.tp.relnode.relNode); /* - * Catalog tuple without data, emitted while catalog was - * in the process of being rewritten. + * Mapped catalog tuple without data, emitted while + * catalog table was in the process of being rewritten. We + * can fail to look up the relfilenode, because the the + * relmapper has no "historic" view, in contrast to normal + * the normal catalog during decoding. Thus repeated + * rewrites can cause a lookup failure. That's OK because + * we do not decode catalog changes anyway. Normally such + * tuples would be skipped over below, but we can't + * identify whether the table should be logically logged + * without mapping the relfilenode to the oid. */ if (reloid == InvalidOid && change->data.tp.newtuple == NULL && @@ -1429,6 +1558,13 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, if (!RelationIsLogicallyLogged(relation)) goto change_done; + /* + * Ignore temporary heaps created during DDL unless the + * plugin has asked for them. + */ + if (relation->rd_rel->relrewrite && !rb->output_rewrites) + goto change_done; + /* * For now ignore sequence changes entirely. Most of the * time they don't log changes using records we @@ -1462,10 +1598,17 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, * transaction's changes. Otherwise it will get * freed/reused while restoring spooled data from * disk. + * + * But skip doing so if there's no tuple-data. That + * happens if a non-mapped system catalog with a toast + * table is rewritten. */ - dlist_delete(&change->node); - ReorderBufferToastAppendChunk(rb, txn, relation, - change); + if (change->data.tp.newtuple != NULL) + { + dlist_delete(&change->node); + ReorderBufferToastAppendChunk(rb, txn, relation, + change); + } } change_done: @@ -1514,6 +1657,38 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, specinsert = change; break; + case REORDER_BUFFER_CHANGE_TRUNCATE: + { + int i; + int nrelids = change->data.truncate.nrelids; + int nrelations = 0; + Relation *relations; + + relations = palloc0(nrelids * sizeof(Relation)); + for (i = 0; i < nrelids; i++) + { + Oid relid = change->data.truncate.relids[i]; + Relation relation; + + relation = RelationIdGetRelation(relid); + + if (relation == NULL) + elog(ERROR, "could not open relation with OID %u", relid); + + if (!RelationIsLogicallyLogged(relation)) + continue; + + relations[nrelations++] = relation; + } + + rb->apply_truncate(rb, txn, nrelations, relations, change); + + for (i = 0; i < nrelations; i++) + RelationClose(relations[i]); + + break; + } + case REORDER_BUFFER_CHANGE_MESSAGE: rb->message(rb, txn, change->lsn, true, change->data.msg.prefix, @@ -1713,8 +1888,8 @@ ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid) * Iterate through all (potential) toplevel TXNs and abort all that are * older than what possibly can be running. Once we've found the first * that is alive we stop, there might be some that acquired an xid earlier - * but started writing later, but it's unlikely and they will cleaned up - * in a later call to ReorderBufferAbortOld(). + * but started writing later, but it's unlikely and they will be cleaned + * up in a later call to this function. */ dlist_foreach_modify(it, &rb->toplevel_by_lsn) { @@ -1724,6 +1899,21 @@ ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid) if (TransactionIdPrecedes(txn->xid, oldestRunningXid)) { + /* + * We set final_lsn on a transaction when we decode its commit or + * abort record, but we never see those records for crashed + * transactions. To ensure cleanup of these transactions, set + * final_lsn to that of their last change; this causes + * ReorderBufferRestoreCleanup to do the right thing. + */ + if (txn->serialized && txn->final_lsn == 0) + { + ReorderBufferChange *last = + dlist_tail_element(ReorderBufferChange, node, &txn->changes); + + txn->final_lsn = last->lsn; + } + elog(DEBUG2, "aborting old transaction %u", txn->xid); /* remove potential on-disk data, and deallocate this tx */ @@ -1780,7 +1970,7 @@ ReorderBufferForget(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn) /* * Execute invalidations happening outside the context of a decoded * transaction. That currently happens either for xid-less commits - * (c.f. RecordTransactionCommit()) or for invalidations in uninteresting + * (cf. RecordTransactionCommit()) or for invalidations in uninteresting * transactions (via ReorderBufferForget()). */ void @@ -1845,12 +2035,10 @@ ReorderBufferAddSnapshot(ReorderBuffer *rb, TransactionId xid, } /* - * Setup the base snapshot of a transaction. The base snapshot is the snapshot - * that is used to decode all changes until either this transaction modifies - * the catalog or another catalog modifying transaction commits. + * Set up the transaction's base snapshot. * - * Needs to be called before any changes are added with - * ReorderBufferQueueChange(). + * If we know that xid is a subtransaction, set the base snapshot on the + * top-level transaction instead. */ void ReorderBufferSetBaseSnapshot(ReorderBuffer *rb, TransactionId xid, @@ -1859,12 +2047,23 @@ ReorderBufferSetBaseSnapshot(ReorderBuffer *rb, TransactionId xid, ReorderBufferTXN *txn; bool is_new; + AssertArg(snap != NULL); + + /* + * Fetch the transaction to operate on. If we know it's a subtransaction, + * operate on its top-level transaction instead. + */ txn = ReorderBufferTXNByXid(rb, xid, true, &is_new, lsn, true); + if (txn->is_known_as_subxact) + txn = ReorderBufferTXNByXid(rb, txn->toplevel_xid, false, + NULL, InvalidXLogRecPtr, false); Assert(txn->base_snapshot == NULL); - Assert(snap != NULL); txn->base_snapshot = snap; txn->base_snapshot_lsn = lsn; + dlist_push_tail(&rb->txns_by_base_snapshot_lsn, &txn->base_snapshot_node); + + AssertTXNLsnOrder(rb); } /* @@ -1983,25 +2182,26 @@ ReorderBufferXidHasCatalogChanges(ReorderBuffer *rb, TransactionId xid) } /* - * Have we already added the first snapshot? + * ReorderBufferXidHasBaseSnapshot + * Have we already set the base snapshot for the given txn/subtxn? */ bool ReorderBufferXidHasBaseSnapshot(ReorderBuffer *rb, TransactionId xid) { ReorderBufferTXN *txn; - txn = ReorderBufferTXNByXid(rb, xid, false, NULL, InvalidXLogRecPtr, - false); + txn = ReorderBufferTXNByXid(rb, xid, false, + NULL, InvalidXLogRecPtr, false); /* transaction isn't known yet, ergo no snapshot */ if (txn == NULL) return false; - /* - * TODO: It would be a nice improvement if we would check the toplevel - * transaction in subtransactions, but we'd need to keep track of a bit - * more state. - */ + /* a known subtxn? operate on top-level txn instead */ + if (txn->is_known_as_subxact) + txn = ReorderBufferTXNByXid(rb, txn->toplevel_xid, false, + NULL, InvalidXLogRecPtr, false); + return txn->base_snapshot != NULL; } @@ -2058,7 +2258,6 @@ ReorderBufferSerializeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) int fd = -1; XLogSegNo curOpenSegNo = 0; Size spilled = 0; - char path[MAXPGPATH]; elog(DEBUG2, "spill %u changes in XID %u to disk", (uint32) txn->nentries_mem, txn->xid); @@ -2083,34 +2282,31 @@ ReorderBufferSerializeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) * store in segment in which it belongs by start lsn, don't split over * multiple segments tho */ - if (fd == -1 || !XLByteInSeg(change->lsn, curOpenSegNo)) + if (fd == -1 || + !XLByteInSeg(change->lsn, curOpenSegNo, wal_segment_size)) { - XLogRecPtr recptr; + char path[MAXPGPATH]; if (fd != -1) CloseTransientFile(fd); - XLByteToSeg(change->lsn, curOpenSegNo); - XLogSegNoOffsetToRecPtr(curOpenSegNo, 0, recptr); + XLByteToSeg(change->lsn, curOpenSegNo, wal_segment_size); /* * No need to care about TLIs here, only used during a single run, * so each LSN only maps to a specific WAL record. */ - sprintf(path, "pg_replslot/%s/xid-%u-lsn-%X-%X.snap", - NameStr(MyReplicationSlot->data.name), txn->xid, - (uint32) (recptr >> 32), (uint32) recptr); + ReorderBufferSerializedPath(path, MyReplicationSlot, txn->xid, + curOpenSegNo); /* open segment, create it if necessary */ fd = OpenTransientFile(path, - O_CREAT | O_WRONLY | O_APPEND | PG_BINARY, - S_IRUSR | S_IWUSR); + O_CREAT | O_WRONLY | O_APPEND | PG_BINARY); if (fd < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not open file \"%s\": %m", - path))); + errmsg("could not open file \"%s\": %m", path))); } ReorderBufferSerializeChange(rb, txn, fd, change); @@ -2265,6 +2461,27 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn, sizeof(TransactionId) * snap->subxcnt); data += sizeof(TransactionId) * snap->subxcnt; } + break; + } + case REORDER_BUFFER_CHANGE_TRUNCATE: + { + Size size; + char *data; + + /* account for the OIDs of truncated relations */ + size = sizeof(Oid) * change->data.truncate.nrelids; + sz += size; + + /* make sure we have enough space */ + ReorderBufferSerializeReserve(rb, sz); + + data = ((char *) rb->outbuf) + sizeof(ReorderBufferDiskChange); + /* might have been reallocated above */ + ondisk = (ReorderBufferDiskChange *) rb->outbuf; + + memcpy(data, change->data.truncate.relids, size); + data += size; + break; } case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM: @@ -2276,13 +2493,16 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn, ondisk->size = sz; + errno = 0; pgstat_report_wait_start(WAIT_EVENT_REORDER_BUFFER_WRITE); if (write(fd, rb->outbuf, ondisk->size) != ondisk->size) { int save_errno = errno; CloseTransientFile(fd); - errno = save_errno; + + /* if write didn't set errno, assume problem is no disk space */ + errno = save_errno ? save_errno : ENOSPC; ereport(ERROR, (errcode_for_file_access(), errmsg("could not write to data file for XID %u: %m", @@ -2319,7 +2539,7 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn, txn->nentries_mem = 0; Assert(dlist_is_empty(&txn->changes)); - XLByteToSeg(txn->final_lsn, last_segno); + XLByteToSeg(txn->final_lsn, last_segno, wal_segment_size); while (restored < max_changes_in_memory && *segno <= last_segno) { @@ -2328,27 +2548,22 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn, if (*fd == -1) { - XLogRecPtr recptr; char path[MAXPGPATH]; /* first time in */ if (*segno == 0) - { - XLByteToSeg(txn->first_lsn, *segno); - } + XLByteToSeg(txn->first_lsn, *segno, wal_segment_size); Assert(*segno != 0 || dlist_is_empty(&txn->changes)); - XLogSegNoOffsetToRecPtr(*segno, 0, recptr); /* * No need to care about TLIs here, only used during a single run, * so each LSN only maps to a specific WAL record. */ - sprintf(path, "pg_replslot/%s/xid-%u-lsn-%X-%X.snap", - NameStr(MyReplicationSlot->data.name), txn->xid, - (uint32) (recptr >> 32), (uint32) recptr); + ReorderBufferSerializedPath(path, MyReplicationSlot, txn->xid, + *segno); - *fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0); + *fd = OpenTransientFile(path, O_RDONLY | PG_BINARY); if (*fd < 0 && errno == ENOENT) { *fd = -1; @@ -2360,7 +2575,6 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", path))); - } /* @@ -2552,6 +2766,17 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn, break; } /* the base struct contains all the data, easy peasy */ + case REORDER_BUFFER_CHANGE_TRUNCATE: + { + Oid *relids; + + relids = ReorderBufferGetRelids(rb, + change->data.truncate.nrelids); + memcpy(relids, data, change->data.truncate.nrelids * sizeof(Oid)); + change->data.truncate.relids = relids; + + break; + } case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM: case REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID: case REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID: @@ -2575,20 +2800,15 @@ ReorderBufferRestoreCleanup(ReorderBuffer *rb, ReorderBufferTXN *txn) Assert(txn->first_lsn != InvalidXLogRecPtr); Assert(txn->final_lsn != InvalidXLogRecPtr); - XLByteToSeg(txn->first_lsn, first); - XLByteToSeg(txn->final_lsn, last); + XLByteToSeg(txn->first_lsn, first, wal_segment_size); + XLByteToSeg(txn->final_lsn, last, wal_segment_size); /* iterate over all possible filenames, and delete them */ for (cur = first; cur <= last; cur++) { char path[MAXPGPATH]; - XLogRecPtr recptr; - XLogSegNoOffsetToRecPtr(cur, 0, recptr); - - sprintf(path, "pg_replslot/%s/xid-%u-lsn-%X-%X.snap", - NameStr(MyReplicationSlot->data.name), txn->xid, - (uint32) (recptr >> 32), (uint32) recptr); + ReorderBufferSerializedPath(path, MyReplicationSlot, txn->xid, cur); if (unlink(path) != 0 && errno != ENOENT) ereport(ERROR, (errcode_for_file_access(), @@ -2596,6 +2816,63 @@ ReorderBufferRestoreCleanup(ReorderBuffer *rb, ReorderBufferTXN *txn) } } +/* + * Remove any leftover serialized reorder buffers from a slot directory after a + * prior crash or decoding session exit. + */ +static void +ReorderBufferCleanupSerializedTXNs(const char *slotname) +{ + DIR *spill_dir; + struct dirent *spill_de; + struct stat statbuf; + char path[MAXPGPATH * 2 + 12]; + + sprintf(path, "pg_replslot/%s", slotname); + + /* we're only handling directories here, skip if it's not ours */ + if (lstat(path, &statbuf) == 0 && !S_ISDIR(statbuf.st_mode)) + return; + + spill_dir = AllocateDir(path); + while ((spill_de = ReadDirExtended(spill_dir, path, INFO)) != NULL) + { + /* only look at names that can be ours */ + if (strncmp(spill_de->d_name, "xid", 3) == 0) + { + snprintf(path, sizeof(path), + "pg_replslot/%s/%s", slotname, + spill_de->d_name); + + if (unlink(path) != 0) + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not remove file \"%s\" during removal of pg_replslot/%s/xid*: %m", + path, slotname))); + } + } + FreeDir(spill_dir); +} + +/* + * Given a replication slot, transaction ID and segment number, fill in the + * corresponding spill file into 'path', which is a caller-owned buffer of size + * at least MAXPGPATH. + */ +static void +ReorderBufferSerializedPath(char *path, ReplicationSlot *slot, TransactionId xid, + XLogSegNo segno) +{ + XLogRecPtr recptr; + + XLogSegNoOffsetToRecPtr(segno, 0, wal_segment_size, recptr); + + snprintf(path, MAXPGPATH, "pg_replslot/%s/xid-%u-lsn-%X-%X.spill", + NameStr(MyReplicationSlot->data.name), + xid, + (uint32) (recptr >> 32), (uint32) recptr); +} + /* * Delete all data spilled to disk after we've restarted/crashed. It will be * recreated when the respective slots are reused. @@ -2606,15 +2883,9 @@ StartupReorderBuffer(void) DIR *logical_dir; struct dirent *logical_de; - DIR *spill_dir; - struct dirent *spill_de; - logical_dir = AllocateDir("pg_replslot"); while ((logical_de = ReadDir(logical_dir, "pg_replslot")) != NULL) { - struct stat statbuf; - char path[MAXPGPATH * 2 + 12]; - if (strcmp(logical_de->d_name, ".") == 0 || strcmp(logical_de->d_name, "..") == 0) continue; @@ -2627,33 +2898,7 @@ StartupReorderBuffer(void) * ok, has to be a surviving logical slot, iterate and delete * everything starting with xid-* */ - sprintf(path, "pg_replslot/%s", logical_de->d_name); - - /* we're only creating directories here, skip if it's not our's */ - if (lstat(path, &statbuf) == 0 && !S_ISDIR(statbuf.st_mode)) - continue; - - spill_dir = AllocateDir(path); - while ((spill_de = ReadDir(spill_dir, path)) != NULL) - { - if (strcmp(spill_de->d_name, ".") == 0 || - strcmp(spill_de->d_name, "..") == 0) - continue; - - /* only look at names that can be ours */ - if (strncmp(spill_de->d_name, "xid", 3) == 0) - { - sprintf(path, "pg_replslot/%s/%s", logical_de->d_name, - spill_de->d_name); - - if (unlink(path) != 0) - ereport(PANIC, - (errcode_for_file_access(), - errmsg("could not remove file \"%s\": %m", - path))); - } - } - FreeDir(spill_dir); + ReorderBufferCleanupSerializedTXNs(logical_de->d_name); } FreeDir(logical_dir); } @@ -2800,7 +3045,7 @@ ReorderBufferToastReplace(ReorderBuffer *rb, ReorderBufferTXN *txn, for (natt = 0; natt < desc->natts; natt++) { - Form_pg_attribute attr = desc->attrs[natt]; + Form_pg_attribute attr = TupleDescAttr(desc, natt); ReorderBufferToastEnt *ent; struct varlena *varlena; @@ -3037,7 +3282,7 @@ ApplyLogicalMappingFile(HTAB *tuplecid_data, Oid relid, const char *fname) LogicalRewriteMappingData map; sprintf(path, "pg_logical/mappings/%s", fname); - fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0); + fd = OpenTransientFile(path, O_RDONLY | PG_BINARY); if (fd < 0) ereport(ERROR, (errcode_for_file_access(), @@ -3115,11 +3360,13 @@ ApplyLogicalMappingFile(HTAB *tuplecid_data, Oid relid, const char *fname) new_ent->combocid = ent->combocid; } } + + CloseTransientFile(fd); } /* - * Check whether the TransactionOId 'xid' is in the pre-sorted array 'xip'. + * Check whether the TransactionOid 'xid' is in the pre-sorted array 'xip'. */ static bool TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num) diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index fba57a0470..a6cd6c67d1 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -42,7 +42,7 @@ * catalog in a transaction. During normal operation this is achieved by using * CommandIds/cmin/cmax. The problem with that however is that for space * efficiency reasons only one value of that is stored - * (c.f. combocid.c). Since ComboCids are only available in memory we log + * (cf. combocid.c). Since ComboCids are only available in memory we log * additional information which allows us to get the original (cmin, cmax) * pair during visibility checks. Check the reorderbuffer.c's comment above * ResolveCminCmaxDuringDecoding() for details. @@ -107,7 +107,7 @@ * is a convenient point to initialize replication from, which is why we * export a snapshot at that point, which *can* be used to read normal data. * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/replication/snapbuild.c @@ -830,9 +830,9 @@ SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn) * all. We'll add a snapshot when the first change gets queued. * * NB: This works correctly even for subtransactions because - * ReorderBufferCommitChild() takes care to pass the parent the base - * snapshot, and while iterating the changequeue we'll get the change - * from the subtxn. + * ReorderBufferAssignChild() takes care to transfer the base snapshot + * to the top-level transaction, and while iterating the changequeue + * we'll get the change from the subtxn. */ if (!ReorderBufferXidHasBaseSnapshot(builder->reorder, txn->xid)) continue; @@ -1074,7 +1074,7 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid, /* refcount of the snapshot builder for the new snapshot */ SnapBuildSnapIncRefcount(builder->snapshot); - /* add a new Snapshot to all currently running transactions */ + /* add a new catalog snapshot to all currently running transactions */ SnapBuildDistributeNewCatalogSnapshot(builder, lsn); } } @@ -1094,6 +1094,7 @@ void SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *running) { ReorderBufferTXN *txn; + TransactionId xmin; /* * If we're not consistent yet, inspect the record to see whether it @@ -1126,15 +1127,21 @@ SnapBuildProcessRunningXacts(SnapBuild *builder, XLogRecPtr lsn, xl_running_xact /* Remove transactions we don't need to keep track off anymore */ SnapBuildPurgeCommittedTxn(builder); - elog(DEBUG3, "xmin: %u, xmax: %u, oldestrunning: %u", - builder->xmin, builder->xmax, - running->oldestRunningXid); - /* - * Increase shared memory limits, so vacuum can work on tuples we - * prevented from being pruned till now. + * Advance the xmin limit for the current replication slot, to allow + * vacuum to clean up the tuples this slot has been protecting. + * + * The reorderbuffer might have an xmin among the currently running + * snapshots; use it if so. If not, we need only consider the snapshots + * we'll produce later, which can't be less than the oldest running xid in + * the record we're reading now. */ - LogicalIncreaseXminForSlot(lsn, running->oldestRunningXid); + xmin = ReorderBufferGetOldestXmin(builder->reorder); + if (xmin == InvalidTransactionId) + xmin = running->oldestRunningXid; + elog(DEBUG3, "xmin: %u, xmax: %u, oldest running: %u, oldest xmin: %u", + builder->xmin, builder->xmax, running->oldestRunningXid, xmin); + LogicalIncreaseXminForSlot(lsn, xmin); /* * Also tell the slot where we can restart decoding from. We don't want to @@ -1597,16 +1604,21 @@ SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn) /* we have valid data now, open tempfile and write it there */ fd = OpenTransientFile(tmppath, - O_CREAT | O_EXCL | O_WRONLY | PG_BINARY, - S_IRUSR | S_IWUSR); + O_CREAT | O_EXCL | O_WRONLY | PG_BINARY); if (fd < 0) ereport(ERROR, (errmsg("could not open file \"%s\": %m", path))); + errno = 0; pgstat_report_wait_start(WAIT_EVENT_SNAPBUILD_WRITE); if ((write(fd, ondisk, needed_length)) != needed_length) { + int save_errno = errno; + CloseTransientFile(fd); + + /* if write didn't set errno, assume problem is no disk space */ + errno = save_errno ? save_errno : ENOSPC; ereport(ERROR, (errcode_for_file_access(), errmsg("could not write to file \"%s\": %m", tmppath))); @@ -1624,7 +1636,10 @@ SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn) pgstat_report_wait_start(WAIT_EVENT_SNAPBUILD_SYNC); if (pg_fsync(fd) != 0) { + int save_errno = errno; + CloseTransientFile(fd); + errno = save_errno; ereport(ERROR, (errcode_for_file_access(), errmsg("could not fsync file \"%s\": %m", tmppath))); @@ -1682,7 +1697,7 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn) sprintf(path, "pg_logical/snapshots/%X-%X.snap", (uint32) (lsn >> 32), (uint32) lsn); - fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0); + fd = OpenTransientFile(path, O_RDONLY | PG_BINARY); if (fd < 0 && errno == ENOENT) return false; @@ -1709,11 +1724,23 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn) pgstat_report_wait_end(); if (readBytes != SnapBuildOnDiskConstantSize) { + int save_errno = errno; + CloseTransientFile(fd); - ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not read file \"%s\", read %d of %d: %m", - path, readBytes, (int) SnapBuildOnDiskConstantSize))); + + if (readBytes < 0) + { + errno = save_errno; + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read file \"%s\": %m", path))); + } + else + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("could not read file \"%s\": read %d of %zu", + path, readBytes, + (Size) SnapBuildOnDiskConstantSize))); } if (ondisk.magic != SNAPBUILD_MAGIC) @@ -1737,11 +1764,22 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn) pgstat_report_wait_end(); if (readBytes != sizeof(SnapBuild)) { + int save_errno = errno; + CloseTransientFile(fd); - ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not read file \"%s\", read %d of %d: %m", - path, readBytes, (int) sizeof(SnapBuild)))); + + if (readBytes < 0) + { + errno = save_errno; + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read file \"%s\": %m", path))); + } + else + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("could not read file \"%s\": read %d of %zu", + path, readBytes, sizeof(SnapBuild)))); } COMP_CRC32C(checksum, &ondisk.builder, sizeof(SnapBuild)); @@ -1754,11 +1792,22 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn) pgstat_report_wait_end(); if (readBytes != sz) { + int save_errno = errno; + CloseTransientFile(fd); - ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not read file \"%s\", read %d of %d: %m", - path, readBytes, (int) sz))); + + if (readBytes < 0) + { + errno = save_errno; + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read file \"%s\": %m", path))); + } + else + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("could not read file \"%s\": read %d of %zu", + path, readBytes, sz))); } COMP_CRC32C(checksum, ondisk.builder.was_running.was_xip, sz); @@ -1770,11 +1819,22 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn) pgstat_report_wait_end(); if (readBytes != sz) { + int save_errno = errno; + CloseTransientFile(fd); - ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not read file \"%s\", read %d of %d: %m", - path, readBytes, (int) sz))); + + if (readBytes < 0) + { + errno = save_errno; + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read file \"%s\": %m", path))); + } + else + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("could not read file \"%s\": read %d of %zu", + path, readBytes, sz))); } COMP_CRC32C(checksum, ondisk.builder.committed.xip, sz); diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index 4cca0f1a85..6e420d893c 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -2,7 +2,7 @@ * tablesync.c * PostgreSQL logical replication * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/replication/logical/tablesync.c @@ -298,11 +298,10 @@ process_syncing_tables_for_sync(XLogRecPtr current_lsn) SpinLockRelease(&MyLogicalRepWorker->relmutex); - SetSubscriptionRelState(MyLogicalRepWorker->subid, - MyLogicalRepWorker->relid, - MyLogicalRepWorker->relstate, - MyLogicalRepWorker->relstate_lsn, - true); + UpdateSubscriptionRelState(MyLogicalRepWorker->subid, + MyLogicalRepWorker->relid, + MyLogicalRepWorker->relstate, + MyLogicalRepWorker->relstate_lsn); walrcv_endstreaming(wrconn, &tli); finish_sync_worker(); @@ -427,9 +426,10 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) StartTransactionCommand(); started_tx = true; } - SetSubscriptionRelState(MyLogicalRepWorker->subid, - rstate->relid, rstate->state, - rstate->lsn, true); + + UpdateSubscriptionRelState(MyLogicalRepWorker->subid, + rstate->relid, rstate->state, + rstate->lsn); } } else @@ -795,7 +795,8 @@ copy_table(Relation rel) copybuf = makeStringInfo(); pstate = make_parsestate(NULL); - addRangeTableEntryForRelation(pstate, rel, NULL, false, false); + addRangeTableEntryForRelation(pstate, rel, AccessShareLock, + NULL, false, false); attnamelist = make_copy_attnamelist(relmapentry); cstate = BeginCopyFrom(pstate, rel, NULL, false, copy_read_data, attnamelist, NIL); @@ -870,11 +871,10 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) /* Update the state and make it visible to others. */ StartTransactionCommand(); - SetSubscriptionRelState(MyLogicalRepWorker->subid, - MyLogicalRepWorker->relid, - MyLogicalRepWorker->relstate, - MyLogicalRepWorker->relstate_lsn, - true); + UpdateSubscriptionRelState(MyLogicalRepWorker->subid, + MyLogicalRepWorker->relid, + MyLogicalRepWorker->relstate, + MyLogicalRepWorker->relstate_lsn); CommitTransactionCommand(); pgstat_report_stat(false); @@ -961,11 +961,10 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) * Update the new state in catalog. No need to bother * with the shmem state as we are exiting for good. */ - SetSubscriptionRelState(MyLogicalRepWorker->subid, - MyLogicalRepWorker->relid, - SUBREL_STATE_SYNCDONE, - *origin_startpos, - true); + UpdateSubscriptionRelState(MyLogicalRepWorker->subid, + MyLogicalRepWorker->relid, + SUBREL_STATE_SYNCDONE, + *origin_startpos); finish_sync_worker(); } break; diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 7c2df57645..277da69fa6 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -2,7 +2,7 @@ * worker.c * PostgreSQL logical replication worker (apply) * - * Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/replication/logical/worker.c @@ -30,10 +30,12 @@ #include "access/xact.h" #include "access/xlog_internal.h" +#include "catalog/catalog.h" #include "catalog/namespace.h" #include "catalog/pg_subscription.h" #include "catalog/pg_subscription_rel.h" +#include "commands/tablecmds.h" #include "commands/trigger.h" #include "executor/executor.h" @@ -83,6 +85,7 @@ #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/memutils.h" +#include "utils/rel.h" #include "utils/timeout.h" #include "utils/tqual.h" #include "utils/syscache.h" @@ -100,8 +103,9 @@ static dlist_head lsn_mapping = DLIST_STATIC_INIT(lsn_mapping); typedef struct SlotErrCallbackArg { - LogicalRepRelation *rel; - int attnum; + LogicalRepRelMapEntry *rel; + int local_attnum; + int remote_attnum; } SlotErrCallbackArg; static MemoryContext ApplyMessageContext = NULL; @@ -195,7 +199,8 @@ create_estate_for_relation(LogicalRepRelMapEntry *rel) rte->rtekind = RTE_RELATION; rte->relid = RelationGetRelid(rel->localrel); rte->relkind = rel->localrel->rd_rel->relkind; - estate->es_range_table = list_make1(rte); + rte->rellockmode = AccessShareLock; + ExecInitRangeTable(estate, list_make1(rte)); resultRelInfo = makeNode(ResultRelInfo); InitResultRelInfo(resultRelInfo, rel->localrel, 1, NULL, 0); @@ -204,9 +209,11 @@ create_estate_for_relation(LogicalRepRelMapEntry *rel) estate->es_num_result_relations = 1; estate->es_result_relation_info = resultRelInfo; + estate->es_output_cid = GetCurrentCommandId(true); + /* Triggers might need a slot */ if (resultRelInfo->ri_TrigDesc) - estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate); + estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate, NULL); /* Prepare to catch AFTER triggers. */ AfterTriggerBeginQuery(); @@ -247,7 +254,7 @@ slot_fill_defaults(LogicalRepRelMapEntry *rel, EState *estate, { Expr *defexpr; - if (desc->attrs[attnum]->attisdropped) + if (TupleDescAttr(desc, attnum)->attisdropped) continue; if (rel->attrmap[attnum] >= 0) @@ -280,19 +287,29 @@ static void slot_store_error_callback(void *arg) { SlotErrCallbackArg *errarg = (SlotErrCallbackArg *) arg; + LogicalRepRelMapEntry *rel; + char *remotetypname; Oid remotetypoid, localtypoid; - if (errarg->attnum < 0) + /* Nothing to do if remote attribute number is not set */ + if (errarg->remote_attnum < 0) return; - remotetypoid = errarg->rel->atttyps[errarg->attnum]; - localtypoid = logicalrep_typmap_getid(remotetypoid); + rel = errarg->rel; + remotetypoid = rel->remoterel.atttyps[errarg->remote_attnum]; + + /* Fetch remote type name from the LogicalRepTypMap cache */ + remotetypname = logicalrep_typmap_gettypname(remotetypoid); + + /* Fetch local type OID from the local sys cache */ + localtypoid = get_atttype(rel->localreloid, errarg->local_attnum + 1); + errcontext("processing remote data for replication target relation \"%s.%s\" column \"%s\", " "remote type %s, local type %s", - errarg->rel->nspname, errarg->rel->relname, - errarg->rel->attnames[errarg->attnum], - format_type_be(remotetypoid), + rel->remoterel.nspname, rel->remoterel.relname, + rel->remoterel.attnames[errarg->remote_attnum], + remotetypname, format_type_be(localtypoid)); } @@ -313,8 +330,9 @@ slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel, ExecClearTuple(slot); /* Push callback + info on the error context stack */ - errarg.rel = &rel->remoterel; - errarg.attnum = -1; + errarg.rel = rel; + errarg.local_attnum = -1; + errarg.remote_attnum = -1; errcallback.callback = slot_store_error_callback; errcallback.arg = (void *) &errarg; errcallback.previous = error_context_stack; @@ -323,7 +341,7 @@ slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel, /* Call the "in" function for each non-dropped attribute */ for (i = 0; i < natts; i++) { - Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i]; + Form_pg_attribute att = TupleDescAttr(slot->tts_tupleDescriptor, i); int remoteattnum = rel->attrmap[i]; if (!att->attisdropped && remoteattnum >= 0 && @@ -332,14 +350,17 @@ slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel, Oid typinput; Oid typioparam; - errarg.attnum = remoteattnum; + errarg.local_attnum = i; + errarg.remote_attnum = remoteattnum; getTypeInputInfo(att->atttypid, &typinput, &typioparam); - slot->tts_values[i] = OidInputFunctionCall(typinput, - values[remoteattnum], - typioparam, - att->atttypmod); + slot->tts_values[i] = + OidInputFunctionCall(typinput, values[remoteattnum], + typioparam, att->atttypmod); slot->tts_isnull[i] = false; + + errarg.local_attnum = -1; + errarg.remote_attnum = -1; } else { @@ -378,8 +399,9 @@ slot_modify_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel, ExecClearTuple(slot); /* Push callback + info on the error context stack */ - errarg.rel = &rel->remoterel; - errarg.attnum = -1; + errarg.rel = rel; + errarg.local_attnum = -1; + errarg.remote_attnum = -1; errcallback.callback = slot_store_error_callback; errcallback.arg = (void *) &errarg; errcallback.previous = error_context_stack; @@ -388,25 +410,31 @@ slot_modify_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel, /* Call the "in" function for each replaced attribute */ for (i = 0; i < natts; i++) { - Form_pg_attribute att = slot->tts_tupleDescriptor->attrs[i]; + Form_pg_attribute att = TupleDescAttr(slot->tts_tupleDescriptor, i); int remoteattnum = rel->attrmap[i]; - if (remoteattnum >= 0 && !replaces[remoteattnum]) + if (remoteattnum < 0) continue; - if (remoteattnum >= 0 && values[remoteattnum] != NULL) + if (!replaces[remoteattnum]) + continue; + + if (values[remoteattnum] != NULL) { Oid typinput; Oid typioparam; - errarg.attnum = remoteattnum; + errarg.local_attnum = i; + errarg.remote_attnum = remoteattnum; getTypeInputInfo(att->atttypid, &typinput, &typioparam); - slot->tts_values[i] = OidInputFunctionCall(typinput, - values[remoteattnum], - typioparam, - att->atttypmod); + slot->tts_values[i] = + OidInputFunctionCall(typinput, values[remoteattnum], + typioparam, att->atttypmod); slot->tts_isnull[i] = false; + + errarg.local_attnum = -1; + errarg.remote_attnum = -1; } else { @@ -580,8 +608,11 @@ apply_handle_insert(StringInfo s) /* Initialize the executor state. */ estate = create_estate_for_relation(rel); - remoteslot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(remoteslot, RelationGetDescr(rel->localrel)); + remoteslot = ExecInitExtraTupleSlot(estate, + RelationGetDescr(rel->localrel)); + + /* Input functions may need an active snapshot, so get one */ + PushActiveSnapshot(GetTransactionSnapshot()); /* Process and store remote tuple in the slot */ oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); @@ -589,7 +620,6 @@ apply_handle_insert(StringInfo s) slot_fill_defaults(rel, estate, remoteslot); MemoryContextSwitchTo(oldctx); - PushActiveSnapshot(GetTransactionSnapshot()); ExecOpenIndices(estate->es_result_relation_info, false); /* Do the insert. */ @@ -629,7 +659,7 @@ check_relation_updatable(LogicalRepRelMapEntry *rel) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("publisher does not send replica identity column " + errmsg("publisher did not send replica identity column " "expected by the logical replication target relation \"%s.%s\"", rel->remoterel.nspname, rel->remoterel.relname))); } @@ -684,10 +714,10 @@ apply_handle_update(StringInfo s) /* Initialize the executor state. */ estate = create_estate_for_relation(rel); - remoteslot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(remoteslot, RelationGetDescr(rel->localrel)); - localslot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(localslot, RelationGetDescr(rel->localrel)); + remoteslot = ExecInitExtraTupleSlot(estate, + RelationGetDescr(rel->localrel)); + localslot = ExecInitExtraTupleSlot(estate, + RelationGetDescr(rel->localrel)); EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1); PushActiveSnapshot(GetTransactionSnapshot()); @@ -726,7 +756,7 @@ apply_handle_update(StringInfo s) { /* Process and store remote tuple in the slot */ oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - ExecStoreTuple(localslot->tts_tuple, remoteslot, InvalidBuffer, false); + ExecStoreHeapTuple(localslot->tts_tuple, remoteslot, false); slot_modify_cstrings(remoteslot, rel, newtup.values, newtup.changed); MemoryContextSwitchTo(oldctx); @@ -802,10 +832,10 @@ apply_handle_delete(StringInfo s) /* Initialize the executor state. */ estate = create_estate_for_relation(rel); - remoteslot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(remoteslot, RelationGetDescr(rel->localrel)); - localslot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(localslot, RelationGetDescr(rel->localrel)); + remoteslot = ExecInitExtraTupleSlot(estate, + RelationGetDescr(rel->localrel)); + localslot = ExecInitExtraTupleSlot(estate, + RelationGetDescr(rel->localrel)); EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1); PushActiveSnapshot(GetTransactionSnapshot()); @@ -842,10 +872,10 @@ apply_handle_delete(StringInfo s) else { /* The tuple to be deleted could not be found. */ - ereport(DEBUG1, - (errmsg("logical replication could not find row for delete " - "in replication target %s", - RelationGetRelationName(rel->localrel)))); + elog(DEBUG1, + "logical replication could not find row for delete " + "in replication target relation \"%s\"", + RelationGetRelationName(rel->localrel)); } /* Cleanup. */ @@ -864,6 +894,67 @@ apply_handle_delete(StringInfo s) CommandCounterIncrement(); } +/* + * Handle TRUNCATE message. + * + * TODO: FDW support + */ +static void +apply_handle_truncate(StringInfo s) +{ + bool cascade = false; + bool restart_seqs = false; + List *remote_relids = NIL; + List *remote_rels = NIL; + List *rels = NIL; + List *relids = NIL; + List *relids_logged = NIL; + ListCell *lc; + + ensure_transaction(); + + remote_relids = logicalrep_read_truncate(s, &cascade, &restart_seqs); + + foreach(lc, remote_relids) + { + LogicalRepRelId relid = lfirst_oid(lc); + LogicalRepRelMapEntry *rel; + + rel = logicalrep_rel_open(relid, RowExclusiveLock); + if (!should_apply_changes_for_rel(rel)) + { + /* + * The relation can't become interesting in the middle of the + * transaction so it's safe to unlock it. + */ + logicalrep_rel_close(rel, RowExclusiveLock); + continue; + } + + remote_rels = lappend(remote_rels, rel); + rels = lappend(rels, rel->localrel); + relids = lappend_oid(relids, rel->localreloid); + if (RelationIsLogicallyLogged(rel->localrel)) + relids_logged = lappend_oid(relids_logged, rel->localreloid); + } + + /* + * Even if we used CASCADE on the upstream master we explicitly default to + * replaying changes without further cascading. This might be later + * changeable with a user specified option. + */ + ExecuteTruncateGuts(rels, relids, relids_logged, DROP_RESTRICT, restart_seqs); + + foreach(lc, remote_rels) + { + LogicalRepRelMapEntry *rel = lfirst(lc); + + logicalrep_rel_close(rel, NoLock); + } + + CommandCounterIncrement(); +} + /* * Logical replication protocol message dispatcher. @@ -895,6 +986,10 @@ apply_dispatch(StringInfo s) case 'D': apply_handle_delete(s); break; + /* TRUNCATE */ + case 'T': + apply_handle_truncate(s); + break; /* RELATION */ case 'R': apply_handle_relation(s); @@ -910,7 +1005,7 @@ apply_dispatch(StringInfo s) default: ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("invalid logical replication message type %c", action))); + errmsg("invalid logical replication message type \"%c\"", action))); } } @@ -1503,6 +1598,11 @@ ApplyWorkerMain(Datum main_arg) pqsignal(SIGTERM, die); BackgroundWorkerUnblockSignals(); + /* + * We don't currently need any ResourceOwner in a walreceiver process, but + * if we did, we could call CreateAuxProcessResourceOwner here. + */ + /* Initialise stats to a sanish value */ MyLogicalRepWorker->last_send_time = MyLogicalRepWorker->last_recv_time = MyLogicalRepWorker->reply_time = GetCurrentTimestamp(); @@ -1510,17 +1610,14 @@ ApplyWorkerMain(Datum main_arg) /* Load the libpq-specific functions */ load_file("libpqwalreceiver", false); - Assert(CurrentResourceOwner == NULL); - CurrentResourceOwner = ResourceOwnerCreate(NULL, - "logical replication apply"); - /* Run as replica session replication role. */ SetConfigOption("session_replication_role", "replica", PGC_SUSET, PGC_S_OVERRIDE); /* Connect to our database. */ BackgroundWorkerInitializeConnectionByOid(MyLogicalRepWorker->dbid, - MyLogicalRepWorker->userid); + MyLogicalRepWorker->userid, + 0); /* Load the subscription into persistent memory context. */ ApplyContext = AllocSetContextCreate(TopMemoryContext, @@ -1528,14 +1625,20 @@ ApplyWorkerMain(Datum main_arg) ALLOCSET_DEFAULT_SIZES); StartTransactionCommand(); oldctx = MemoryContextSwitchTo(ApplyContext); - MySubscription = GetSubscription(MyLogicalRepWorker->subid, false); + + MySubscription = GetSubscription(MyLogicalRepWorker->subid, true); + if (!MySubscription) + { + ereport(LOG, + (errmsg("logical replication apply worker for subscription %u will not " + "start because the subscription was removed during startup", + MyLogicalRepWorker->subid))); + proc_exit(0); + } + MySubscriptionValid = true; MemoryContextSwitchTo(oldctx); - /* Setup synchronous commit according to the user's wishes */ - SetConfigOption("synchronous_commit", MySubscription->synccommit, - PGC_BACKEND, PGC_S_OVERRIDE); - if (!MySubscription->enabled) { ereport(LOG, @@ -1546,6 +1649,10 @@ ApplyWorkerMain(Datum main_arg) proc_exit(0); } + /* Setup synchronous commit according to the user's wishes */ + SetConfigOption("synchronous_commit", MySubscription->synccommit, + PGC_BACKEND, PGC_S_OVERRIDE); + /* Keep us informed about subscription changes. */ CacheRegisterSyscacheCallback(SUBSCRIPTIONOID, subscription_change_cb, diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index 370b74f232..86e0951a70 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -3,7 +3,7 @@ * pgoutput.c * Logical Replication output plugin * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/backend/replication/pgoutput/pgoutput.c @@ -39,6 +39,9 @@ static void pgoutput_commit_txn(LogicalDecodingContext *ctx, static void pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation rel, ReorderBufferChange *change); +static void pgoutput_truncate(LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, int nrelations, Relation relations[], + ReorderBufferChange *change); static bool pgoutput_origin_filter(LogicalDecodingContext *ctx, RepOriginId origin_id); @@ -77,6 +80,7 @@ _PG_output_plugin_init(OutputPluginCallbacks *cb) cb->startup_cb = pgoutput_startup; cb->begin_cb = pgoutput_begin_txn; cb->change_cb = pgoutput_change; + cb->truncate_cb = pgoutput_truncate; cb->commit_cb = pgoutput_commit_txn; cb->filter_by_origin_cb = pgoutput_origin_filter; cb->shutdown_cb = pgoutput_shutdown; @@ -115,7 +119,7 @@ parse_output_parameters(List *options, uint32 *protocol_version, if (parsed > PG_UINT32_MAX || parsed < 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("proto_verson \"%s\" out of range", + errmsg("proto_version \"%s\" out of range", strVal(defel->arg)))); *protocol_version = (uint32) parsed; @@ -151,9 +155,7 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, /* Create our memory context for private allocations. */ data->context = AllocSetContextCreate(ctx->context, "logical replication output context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); ctx->output_plugin_private = data; @@ -252,6 +254,46 @@ pgoutput_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, OutputPluginWrite(ctx, true); } +/* + * Write the relation schema if the current schema hasn't been sent yet. + */ +static void +maybe_send_schema(LogicalDecodingContext *ctx, + Relation relation, RelationSyncEntry *relentry) +{ + if (!relentry->schema_sent) + { + TupleDesc desc; + int i; + + desc = RelationGetDescr(relation); + + /* + * Write out type info if needed. We do that only for user created + * types. + */ + for (i = 0; i < desc->natts; i++) + { + Form_pg_attribute att = TupleDescAttr(desc, i); + + if (att->attisdropped) + continue; + + if (att->atttypid < FirstNormalObjectId) + continue; + + OutputPluginPrepareWrite(ctx, false); + logicalrep_write_typ(ctx->out, att->atttypid); + OutputPluginWrite(ctx, false); + } + + OutputPluginPrepareWrite(ctx, false); + logicalrep_write_rel(ctx->out, relation); + OutputPluginWrite(ctx, false); + relentry->schema_sent = true; + } +} + /* * Sends the decoded DML over wire. */ @@ -263,6 +305,9 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, MemoryContext old; RelationSyncEntry *relentry; + if (!is_publishable_relation(relation)) + return; + relentry = get_rel_sync_entry(data, RelationGetRelid(relation)); /* First check the table filter */ @@ -287,40 +332,7 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, /* Avoid leaking memory by using and resetting our own context */ old = MemoryContextSwitchTo(data->context); - /* - * Write the relation schema if the current schema haven't been sent yet. - */ - if (!relentry->schema_sent) - { - TupleDesc desc; - int i; - - desc = RelationGetDescr(relation); - - /* - * Write out type info if needed. We do that only for user created - * types. - */ - for (i = 0; i < desc->natts; i++) - { - Form_pg_attribute att = desc->attrs[i]; - - if (att->attisdropped) - continue; - - if (att->atttypid < FirstNormalObjectId) - continue; - - OutputPluginPrepareWrite(ctx, false); - logicalrep_write_typ(ctx->out, att->atttypid); - OutputPluginWrite(ctx, false); - } - - OutputPluginPrepareWrite(ctx, false); - logicalrep_write_rel(ctx->out, relation); - OutputPluginWrite(ctx, false); - relentry->schema_sent = true; - } + maybe_send_schema(ctx, relation, relentry); /* Send the data */ switch (change->action) @@ -362,6 +374,54 @@ pgoutput_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, MemoryContextReset(data->context); } +static void +pgoutput_truncate(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, + int nrelations, Relation relations[], ReorderBufferChange *change) +{ + PGOutputData *data = (PGOutputData *) ctx->output_plugin_private; + MemoryContext old; + RelationSyncEntry *relentry; + int i; + int nrelids; + Oid *relids; + + old = MemoryContextSwitchTo(data->context); + + relids = palloc0(nrelations * sizeof(Oid)); + nrelids = 0; + + for (i = 0; i < nrelations; i++) + { + Relation relation = relations[i]; + Oid relid = RelationGetRelid(relation); + + if (!is_publishable_relation(relation)) + continue; + + relentry = get_rel_sync_entry(data, relid); + + if (!relentry->pubactions.pubtruncate) + continue; + + relids[nrelids++] = relid; + maybe_send_schema(ctx, relation, relentry); + } + + if (nrelids > 0) + { + OutputPluginPrepareWrite(ctx, true); + logicalrep_write_truncate(ctx->out, + nrelids, + relids, + change->data.truncate.cascade, + change->data.truncate.restart_seqs); + OutputPluginWrite(ctx, true); + } + + MemoryContextSwitchTo(old); + MemoryContextReset(data->context); +} + /* * Currently we always forward. */ @@ -503,7 +563,7 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) * we only need to consider ones that the subscriber requested. */ entry->pubactions.pubinsert = entry->pubactions.pubupdate = - entry->pubactions.pubdelete = false; + entry->pubactions.pubdelete = entry->pubactions.pubtruncate = false; foreach(lc, data->publications) { @@ -514,10 +574,11 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) entry->pubactions.pubinsert |= pub->pubactions.pubinsert; entry->pubactions.pubupdate |= pub->pubactions.pubupdate; entry->pubactions.pubdelete |= pub->pubactions.pubdelete; + entry->pubactions.pubtruncate |= pub->pubactions.pubtruncate; } if (entry->pubactions.pubinsert && entry->pubactions.pubupdate && - entry->pubactions.pubdelete) + entry->pubactions.pubdelete && entry->pubactions.pubtruncate) break; } diff --git a/src/backend/replication/repl_gram.y b/src/backend/replication/repl_gram.y index ec047c827c..843a878ff3 100644 --- a/src/backend/replication/repl_gram.y +++ b/src/backend/replication/repl_gram.y @@ -3,7 +3,7 @@ * * repl_gram.y - Parser for the replication commands * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -72,10 +72,12 @@ static SQLCmd *make_sqlcmd(void); %token K_LABEL %token K_PROGRESS %token K_FAST +%token K_WAIT %token K_NOWAIT %token K_MAX_RATE %token K_WAL %token K_TABLESPACE_MAP +%token K_NOVERIFY_CHECKSUMS %token K_TIMELINE %token K_PHYSICAL %token K_LOGICAL @@ -153,7 +155,7 @@ var_name: IDENT { $$ = $1; } /* * BASE_BACKUP [LABEL '
tag attributes, or unset if none\n" msgstr " \\T [TEXT] HTML
-Tag-Attribute setzen oder löschen\n" -#: help.c:273 +#: help.c:283 #, c-format msgid " \\x [on|off|auto] toggle expanded output (currently %s)\n" msgstr " \\x [on|off|auto] erweiterte Ausgabe umschalten (gegenwärtig %s)\n" -#: help.c:277 +#: help.c:287 #, c-format msgid "Connection\n" msgstr "Verbindung\n" -#: help.c:279 +#: help.c:289 #, c-format msgid "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" @@ -2732,7 +2904,7 @@ msgstr "" " \\c[onnect] {[DBNAME|- BENUTZER|- HOST|- PORT|-] | conninfo}\n" " mit neuer Datenbank verbinden (aktuell »%s«)\n" -#: help.c:283 +#: help.c:293 #, c-format msgid "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" @@ -2741,74 +2913,74 @@ msgstr "" " \\c[onnect] {[DBNAME|- BENUTZER|- HOST|- PORT|-] | conninfo}\n" " mit neuer Datenbank verbinden (aktuell keine Verbindung)\n" -#: help.c:285 +#: help.c:295 +#, c-format +msgid " \\conninfo display information about current connection\n" +msgstr " \\conninfo Informationen über aktuelle Verbindung anzeigen\n" + +#: help.c:296 #, c-format msgid " \\encoding [ENCODING] show or set client encoding\n" msgstr " \\encoding [KODIERUNG] Client-Kodierung zeigen oder setzen\n" -#: help.c:286 +#: help.c:297 #, c-format msgid " \\password [USERNAME] securely change the password for a user\n" msgstr "" " \\password [BENUTZERNAME]\n" " sicheres Ändern eines Benutzerpasswortes\n" -#: help.c:287 -#, c-format -msgid " \\conninfo display information about current connection\n" -msgstr " \\conninfo Informationen über aktuelle Verbindung anzeigen\n" - -#: help.c:290 +#: help.c:300 #, c-format msgid "Operating System\n" msgstr "Betriebssystem\n" -#: help.c:291 +#: help.c:301 #, c-format msgid " \\cd [DIR] change the current working directory\n" msgstr " \\cd [VERZ] Arbeitsverzeichnis wechseln\n" -#: help.c:292 +#: help.c:302 #, c-format msgid " \\setenv NAME [VALUE] set or unset environment variable\n" msgstr " \\setenv NAME [WERT] Umgebungsvariable setzen oder löschen\n" -#: help.c:293 +#: help.c:303 #, c-format msgid " \\timing [on|off] toggle timing of commands (currently %s)\n" msgstr " \\timing [on|off] Zeitmessung umschalten (gegenwärtig %s)\n" -#: help.c:295 +#: help.c:305 #, c-format msgid " \\! [COMMAND] execute command in shell or start interactive shell\n" msgstr " \\! [BEFEHL] Befehl in Shell ausführen oder interaktive Shell starten\n" -#: help.c:298 +#: help.c:308 #, c-format msgid "Variables\n" msgstr "Variablen\n" -#: help.c:299 +#: help.c:309 #, c-format msgid " \\prompt [TEXT] NAME prompt user to set internal variable\n" msgstr " \\prompt [TEXT] NAME interne Variable vom Benutzer abfragen\n" -#: help.c:300 +#: help.c:310 #, c-format msgid " \\set [NAME [VALUE]] set internal variable, or list all if no parameters\n" msgstr " \\set [NAME [WERT]] interne Variable setzen, oder alle anzeigen\n" -#: help.c:301 +#: help.c:311 #, c-format msgid " \\unset NAME unset (delete) internal variable\n" msgstr " \\unset NAME interne Variable löschen\n" -#: help.c:304 +#: help.c:314 #, c-format msgid "Large Objects\n" msgstr "Large Objects\n" -#: help.c:305 +#: help.c:315 #, c-format msgid "" " \\lo_export LOBOID FILE\n" @@ -2821,7 +2993,7 @@ msgstr "" " \\lo_list\n" " \\lo_unlink LOBOID Large-Object-Operationen\n" -#: help.c:332 +#: help.c:342 #, c-format msgid "" "List of specially treated variables\n" @@ -2830,12 +3002,12 @@ msgstr "" "Liste besonderer Variablen\n" "\n" -#: help.c:334 +#: help.c:344 #, c-format msgid "psql variables:\n" msgstr "psql-Variablen:\n" -#: help.c:336 +#: help.c:346 #, c-format msgid "" " psql --set=NAME=VALUE\n" @@ -2846,165 +3018,288 @@ msgstr "" " oder \\set NAME WERT innerhalb von psql\n" "\n" -#: help.c:338 -#, c-format -msgid " AUTOCOMMIT if set, successful SQL commands are automatically committed\n" +#: help.c:348 +#, fuzzy, c-format +#| msgid " AUTOCOMMIT if set, successful SQL commands are automatically committed\n" +msgid "" +" AUTOCOMMIT\n" +" if set, successful SQL commands are automatically committed\n" msgstr "" " AUTOCOMMIT wenn gesetzt werden alle erfolgreichen SQL-Befehle\n" " automatisch committet\n" -#: help.c:339 -#, c-format +#: help.c:350 +#, fuzzy, c-format +#| msgid "" +#| " COMP_KEYWORD_CASE determines the case used to complete SQL key words\n" +#| " [lower, upper, preserve-lower, preserve-upper]\n" msgid "" -" COMP_KEYWORD_CASE determines the case used to complete SQL key words\n" -" [lower, upper, preserve-lower, preserve-upper]\n" +" COMP_KEYWORD_CASE\n" +" determines the case used to complete SQL key words\n" +" [lower, upper, preserve-lower, preserve-upper]\n" msgstr "" " COMP_KEYWORD_CASE bestimmt, ob SQL-Schlüsselwörter in Groß- oder Klein-\n" " schreibung vervollständigt werden\n" " [lower, upper, preserve-lower, preserve-upper]\n" -#: help.c:341 -#, c-format -msgid " DBNAME the currently connected database name\n" +#: help.c:353 +#, fuzzy, c-format +#| msgid " DBNAME the currently connected database name\n" +msgid "" +" DBNAME\n" +" the currently connected database name\n" msgstr " DBNAME Name der aktuellen Datenbank\n" -#: help.c:342 -#, c-format +#: help.c:355 +#, fuzzy, c-format +#| msgid "" +#| " ECHO controls what input is written to standard output\n" +#| " [all, errors, none, queries]\n" msgid "" -" ECHO controls what input is written to standard output\n" -" [all, errors, none, queries]\n" +" ECHO\n" +" controls what input is written to standard output\n" +" [all, errors, none, queries]\n" msgstr "" " ECHO kontrolliert, welche Eingaben auf die Standardausgabe\n" " geschrieben werden [all, errors, none, queries]\n" -#: help.c:344 -#, c-format +#: help.c:358 +#, fuzzy, c-format +#| msgid "" +#| " ECHO_HIDDEN if set, display internal queries executed by backslash commands;\n" +#| " if set to \"noexec\", just show without execution\n" msgid "" -" ECHO_HIDDEN if set, display internal queries executed by backslash commands;\n" -" if set to \"noexec\", just show without execution\n" +" ECHO_HIDDEN\n" +" if set, display internal queries executed by backslash commands;\n" +" if set to \"noexec\", just show them without execution\n" msgstr "" " ECHO_HIDDEN wenn gesetzt, interne Anfragen, die von Backslash-Befehlen\n" " ausgeführt werden, anzeigen; wenn auf »noexec« gesetzt, nur\n" " anzeigen, nicht ausführen\n" -#: help.c:346 -#, c-format -msgid " ENCODING current client character set encoding\n" +#: help.c:361 +#, fuzzy, c-format +#| msgid " ENCODING current client character set encoding\n" +msgid "" +" ENCODING\n" +" current client character set encoding\n" msgstr " ENCODING aktuelle Zeichensatzkodierung des Clients\n" -#: help.c:347 +#: help.c:363 #, c-format msgid "" -" FETCH_COUNT the number of result rows to fetch and display at a time\n" -" (default: 0=unlimited)\n" +" ERROR\n" +" true if last query failed, else false\n" +msgstr "" + +#: help.c:365 +#, fuzzy, c-format +#| msgid "" +#| " FETCH_COUNT the number of result rows to fetch and display at a time\n" +#| " (default: 0=unlimited)\n" +msgid "" +" FETCH_COUNT\n" +" the number of result rows to fetch and display at a time (0 = unlimited)\n" msgstr "" " FETCH_COUNT Anzahl auf einmal zu holender und anzuzeigender Zeilen\n" " (Standard: 0=unbegrenzt)\n" -#: help.c:349 -#, c-format -msgid " HISTCONTROL controls command history [ignorespace, ignoredups, ignoreboth]\n" +#: help.c:367 +#, fuzzy, c-format +#| msgid " HISTCONTROL controls command history [ignorespace, ignoredups, ignoreboth]\n" +msgid "" +" HISTCONTROL\n" +" controls command history [ignorespace, ignoredups, ignoreboth]\n" msgstr "" " HISTCONTROL kontrolliert Befehlsgeschichte\n" " [ignorespace, ignoredups, ignoreboth]\n" -#: help.c:350 -#, c-format -msgid " HISTFILE file name used to store the command history\n" +#: help.c:369 +#, fuzzy, c-format +#| msgid " HISTFILE file name used to store the command history\n" +msgid "" +" HISTFILE\n" +" file name used to store the command history\n" msgstr " HISTFILE Dateiname für die Befehlsgeschichte\n" -#: help.c:351 -#, c-format -msgid " HISTSIZE max number of commands to store in the command history\n" +#: help.c:371 +#, fuzzy, c-format +#| msgid " HISTSIZE max number of commands to store in the command history\n" +msgid "" +" HISTSIZE\n" +" max number of commands to store in the command history\n" msgstr " HISTSIZE max. Anzahl der in der Befehlsgeschichte zu speichernden Befehle\n" -#: help.c:352 -#, c-format -msgid " HOST the currently connected database server host\n" +#: help.c:373 +#, fuzzy, c-format +#| msgid " HOST the currently connected database server host\n" +msgid "" +" HOST\n" +" the currently connected database server host\n" msgstr " HOST der aktuell verbundene Datenbankserverhost\n" -#: help.c:353 +#: help.c:375 #, fuzzy, c-format -#| msgid " IGNOREEOF if unset, sending an EOF to interactive session terminates application\n" -msgid " IGNOREEOF number of EOFs needed to terminate an interactive session\n" -msgstr "" -" IGNOREEOF wenn nicht gesetzt beendet ein EOF in einer interaktiven\n" -" Sitzung das Programm\n" +#| msgid " IGNOREEOF number of EOFs needed to terminate an interactive session\n" +msgid "" +" IGNOREEOF\n" +" number of EOFs needed to terminate an interactive session\n" +msgstr " IGNOREEOF Anzahl benötigter EOFs um eine interaktive Sitzung zu beenden\n" -#: help.c:354 -#, c-format -msgid " LASTOID value of the last affected OID\n" +#: help.c:377 +#, fuzzy, c-format +#| msgid " LASTOID value of the last affected OID\n" +msgid "" +" LASTOID\n" +" value of the last affected OID\n" msgstr " LASTOID Wert der zuletzt beinträchtigten OID\n" -#: help.c:355 +#: help.c:379 #, c-format -msgid " ON_ERROR_ROLLBACK if set, an error doesn't stop a transaction (uses implicit savepoints)\n" +msgid "" +" LAST_ERROR_MESSAGE\n" +" LAST_ERROR_SQLSTATE\n" +" message and SQLSTATE of last error, or empty string and \"00000\" if none\n" +msgstr "" + +#: help.c:382 +#, fuzzy, c-format +#| msgid " ON_ERROR_ROLLBACK if set, an error doesn't stop a transaction (uses implicit savepoints)\n" +msgid "" +" ON_ERROR_ROLLBACK\n" +" if set, an error doesn't stop a transaction (uses implicit savepoints)\n" msgstr "" " ON_ERROR_ROLLBACK wenn gesetzt beendet ein Fehler die Transaktion nicht\n" " (verwendet implizite Sicherungspunkte)\n" -#: help.c:356 -#, c-format -msgid " ON_ERROR_STOP stop batch execution after error\n" +#: help.c:384 +#, fuzzy, c-format +#| msgid " ON_ERROR_STOP stop batch execution after error\n" +msgid "" +" ON_ERROR_STOP\n" +" stop batch execution after error\n" msgstr " ON_ERROR_STOP Skriptausführung bei Fehler beenden\n" -#: help.c:357 -#, c-format -msgid " PORT server port of the current connection\n" +#: help.c:386 +#, fuzzy, c-format +#| msgid " PORT server port of the current connection\n" +msgid "" +" PORT\n" +" server port of the current connection\n" msgstr " PORT Serverport der aktuellen Verbindung\n" -#: help.c:358 -#, c-format -msgid " PROMPT1 specifies the standard psql prompt\n" +#: help.c:388 +#, fuzzy, c-format +#| msgid " PROMPT1 specifies the standard psql prompt\n" +msgid "" +" PROMPT1\n" +" specifies the standard psql prompt\n" msgstr " PROMPT1 der normale psql-Prompt\n" -#: help.c:359 -#, c-format -msgid " PROMPT2 specifies the prompt used when a statement continues from a previous line\n" +#: help.c:390 +#, fuzzy, c-format +#| msgid " PROMPT2 specifies the prompt used when a statement continues from a previous line\n" +msgid "" +" PROMPT2\n" +" specifies the prompt used when a statement continues from a previous line\n" msgstr "" " PROMPT2 der Prompt, wenn eine Anweisung von der vorherigen Zeile\n" " fortgesetzt wird\n" -#: help.c:360 -#, c-format -msgid " PROMPT3 specifies the prompt used during COPY ... FROM STDIN\n" +#: help.c:392 +#, fuzzy, c-format +#| msgid " PROMPT3 specifies the prompt used during COPY ... FROM STDIN\n" +msgid "" +" PROMPT3\n" +" specifies the prompt used during COPY ... FROM STDIN\n" msgstr " PROMPT3 der Prompt während COPY ... FROM STDIN\n" -#: help.c:361 -#, c-format -msgid " QUIET run quietly (same as -q option)\n" +#: help.c:394 +#, fuzzy, c-format +#| msgid " QUIET run quietly (same as -q option)\n" +msgid "" +" QUIET\n" +" run quietly (same as -q option)\n" msgstr " QUIET stille Ausführung (wie Option -q)\n" -#: help.c:362 +#: help.c:396 #, c-format -msgid " SHOW_CONTEXT controls display of message context fields [never, errors, always]\n" +msgid "" +" ROW_COUNT\n" +" number of rows returned or affected by last query, or 0\n" +msgstr "" + +#: help.c:398 +#, fuzzy, c-format +#| msgid " SERVER_VERSION_NUM server's version (numeric format)\n" +msgid "" +" SERVER_VERSION_NAME\n" +" SERVER_VERSION_NUM\n" +" server's version (in short string or numeric format)\n" +msgstr " SERVER_VERSION_NUM Serverversion (numerisches Format)\n" + +#: help.c:401 +#, fuzzy, c-format +#| msgid " SHOW_CONTEXT controls display of message context fields [never, errors, always]\n" +msgid "" +" SHOW_CONTEXT\n" +" controls display of message context fields [never, errors, always]\n" msgstr "" " SHOW_CONTEXT kontrolliert die Anzeige von Kontextinformationen in\n" " Meldungen [never, errors, always]\n" -#: help.c:363 -#, c-format -msgid " SINGLELINE end of line terminates SQL command mode (same as -S option)\n" +#: help.c:403 +#, fuzzy, c-format +#| msgid " SINGLELINE end of line terminates SQL command mode (same as -S option)\n" +msgid "" +" SINGLELINE\n" +" if set, end of line terminates SQL commands (same as -S option)\n" msgstr " SINGLELINE Zeilenende beendet SQL-Anweisung (wie Option -S)\n" -#: help.c:364 -#, c-format -msgid " SINGLESTEP single-step mode (same as -s option)\n" +#: help.c:405 +#, fuzzy, c-format +#| msgid " SINGLESTEP single-step mode (same as -s option)\n" +msgid "" +" SINGLESTEP\n" +" single-step mode (same as -s option)\n" msgstr " SINGLESTEP Einzelschrittmodus (wie Option -s)\n" -#: help.c:365 +#: help.c:407 #, c-format -msgid " USER the currently connected database user\n" +msgid "" +" SQLSTATE\n" +" SQLSTATE of last query, or \"00000\" if no error\n" +msgstr "" + +#: help.c:409 +#, fuzzy, c-format +#| msgid " USER the currently connected database user\n" +msgid "" +" USER\n" +" the currently connected database user\n" msgstr " USER der aktuell verbundene Datenbankbenutzer\n" -#: help.c:366 -#, c-format -msgid " VERBOSITY controls verbosity of error reports [default, verbose, terse]\n" +#: help.c:411 +#, fuzzy, c-format +#| msgid " VERBOSITY controls verbosity of error reports [default, verbose, terse]\n" +msgid "" +" VERBOSITY\n" +" controls verbosity of error reports [default, verbose, terse]\n" msgstr "" " VERBOSITY kontrolliert wieviele Details in Fehlermeldungen enthalten\n" " sind [default, verbose, terse]\n" -#: help.c:368 +#: help.c:413 +#, fuzzy, c-format +#| msgid " SERVER_VERSION_NUM server's version (numeric format)\n" +msgid "" +" VERSION\n" +" VERSION_NAME\n" +" VERSION_NUM\n" +" psql's version (in verbose string, short string, or numeric format)\n" +msgstr " SERVER_VERSION_NUM Serverversion (numerisches Format)\n" + +#: help.c:418 #, c-format msgid "" "\n" @@ -3013,7 +3308,7 @@ msgstr "" "\n" "Anzeigeeinstellungen:\n" -#: help.c:370 +#: help.c:420 #, c-format msgid "" " psql --pset=NAME[=VALUE]\n" @@ -3024,121 +3319,175 @@ msgstr "" " oder \\pset NAME [WERT] innerhalb von psql\n" "\n" -#: help.c:372 -#, c-format -msgid " border border style (number)\n" +#: help.c:422 +#, fuzzy, c-format +#| msgid " border border style (number)\n" +msgid "" +" border\n" +" border style (number)\n" msgstr " border Rahmenstil (Zahl)\n" -#: help.c:373 -#, c-format -msgid " columns target width for the wrapped format\n" +#: help.c:424 +#, fuzzy, c-format +#| msgid " columns target width for the wrapped format\n" +msgid "" +" columns\n" +" target width for the wrapped format\n" msgstr " columns Zielbreite für das Format »wrapped«\n" -#: help.c:374 -#, c-format -msgid " expanded (or x) expanded output [on, off, auto]\n" +#: help.c:426 +#, fuzzy, c-format +#| msgid " expanded (or x) expanded output [on, off, auto]\n" +msgid "" +" expanded (or x)\n" +" expanded output [on, off, auto]\n" msgstr " expanded (oder x) erweiterte Ausgabe [on, off, auto]\n" -#: help.c:375 -#, c-format -msgid " fieldsep field separator for unaligned output (default \"%s\")\n" +#: help.c:428 +#, fuzzy, c-format +#| msgid " fieldsep field separator for unaligned output (default \"%s\")\n" +msgid "" +" fieldsep\n" +" field separator for unaligned output (default \"%s\")\n" msgstr "" " fieldsep Feldtrennzeichen für unausgerichteten Ausgabemodus\n" " (Standard »%s«)\n" -#: help.c:376 -#, c-format -msgid " fieldsep_zero set field separator for unaligned output to zero byte\n" +#: help.c:431 +#, fuzzy, c-format +#| msgid " fieldsep_zero set field separator for unaligned output to zero byte\n" +msgid "" +" fieldsep_zero\n" +" set field separator for unaligned output to a zero byte\n" msgstr "" " fieldsep_zero Feldtrennzeichen für unausgerichteten Ausgabemodus auf\n" " Null-Byte setzen\n" -#: help.c:377 -#, c-format -msgid " footer enable or disable display of the table footer [on, off]\n" +#: help.c:433 +#, fuzzy, c-format +#| msgid " footer enable or disable display of the table footer [on, off]\n" +msgid "" +" footer\n" +" enable or disable display of the table footer [on, off]\n" msgstr " footer Tabellenfußzeile ein- oder auschalten [on, off]\n" -#: help.c:378 -#, c-format -msgid " format set output format [unaligned, aligned, wrapped, html, asciidoc, ...]\n" +#: help.c:435 +#, fuzzy, c-format +#| msgid " format set output format [unaligned, aligned, wrapped, html, asciidoc, ...]\n" +msgid "" +" format\n" +" set output format [unaligned, aligned, wrapped, html, asciidoc, ...]\n" msgstr " format Ausgabeformat setzen [unaligned, aligned, wrapped, html, asciidoc, ...]\n" -#: help.c:379 -#, c-format -msgid " linestyle set the border line drawing style [ascii, old-ascii, unicode]\n" +#: help.c:437 +#, fuzzy, c-format +#| msgid " linestyle set the border line drawing style [ascii, old-ascii, unicode]\n" +msgid "" +" linestyle\n" +" set the border line drawing style [ascii, old-ascii, unicode]\n" msgstr " linestyle Rahmenlinienstil setzen [ascii, old-ascii, unicode]\n" -#: help.c:380 -#, c-format -msgid " null set the string to be printed in place of a null value\n" +#: help.c:439 +#, fuzzy, c-format +#| msgid " null set the string to be printed in place of a null value\n" +msgid "" +" null\n" +" set the string to be printed in place of a null value\n" msgstr "" " null setzt die Zeichenkette, die anstelle eines NULL-Wertes\n" " ausgegeben wird\n" -#: help.c:381 -#, c-format +#: help.c:441 +#, fuzzy, c-format +#| msgid "" +#| " numericlocale enable or disable display of a locale-specific character to separate\n" +#| " groups of digits [on, off]\n" msgid "" -" numericlocale enable or disable display of a locale-specific character to separate\n" -" groups of digits [on, off]\n" +" numericlocale\n" +" enable display of a locale-specific character to separate groups of digits\n" msgstr "" " numericlocale Verwendung eines Locale-spezifischen Zeichens zur Trennung\n" " von Zifferngruppen ein- oder auschalten [on, off]\n" -#: help.c:383 -#, c-format -msgid " pager control when an external pager is used [yes, no, always]\n" +#: help.c:443 +#, fuzzy, c-format +#| msgid " pager control when an external pager is used [yes, no, always]\n" +msgid "" +" pager\n" +" control when an external pager is used [yes, no, always]\n" msgstr "" " pager kontrolliert Verwendung eines externen Pager-Programms\n" " [yes, no, always]\n" -#: help.c:384 -#, c-format -msgid " recordsep record (line) separator for unaligned output\n" +#: help.c:445 +#, fuzzy, c-format +#| msgid " recordsep record (line) separator for unaligned output\n" +msgid "" +" recordsep\n" +" record (line) separator for unaligned output\n" msgstr " recordsep Satztrennzeichen für unausgerichteten Ausgabemodus\n" -#: help.c:385 -#, c-format -msgid " recordsep_zero set record separator for unaligned output to zero byte\n" +#: help.c:447 +#, fuzzy, c-format +#| msgid " recordsep_zero set record separator for unaligned output to zero byte\n" +msgid "" +" recordsep_zero\n" +" set record separator for unaligned output to a zero byte\n" msgstr "" " recordsep_zero Satztrennzeichen für unausgerichteten Ausgabemodus auf\n" " Null-Byte setzen\n" -#: help.c:386 -#, c-format +#: help.c:449 +#, fuzzy, c-format +#| msgid "" +#| " tableattr (or T) specify attributes for table tag in html format or proportional\n" +#| " column widths for left-aligned data types in latex-longtable format\n" msgid "" -" tableattr (or T) specify attributes for table tag in html format or proportional\n" -" column widths for left-aligned data types in latex-longtable format\n" +" tableattr (or T)\n" +" specify attributes for table tag in html format, or proportional\n" +" column widths for left-aligned data types in latex-longtable format\n" msgstr "" " tableattr (or T) Attribute für das »table«-Tag im Format »html« oder\n" " proportionale Spaltenbreite für links ausgerichtete Datentypen\n" " im Format »latex-longtable«\n" -#: help.c:388 -#, c-format -msgid " title set the table title for any subsequently printed tables\n" +#: help.c:452 +#, fuzzy, c-format +#| msgid " title set the table title for any subsequently printed tables\n" +msgid "" +" title\n" +" set the table title for subsequently printed tables\n" msgstr " title setzt den Titel darauffolgend ausgegebener Tabellen\n" -#: help.c:389 -#, c-format -msgid " tuples_only if set, only actual table data is shown\n" +#: help.c:454 +#, fuzzy, c-format +#| msgid " tuples_only if set, only actual table data is shown\n" +msgid "" +" tuples_only\n" +" if set, only actual table data is shown\n" msgstr "" " tuples_only wenn gesetzt werden nur die eigentlichen Tabellendaten\n" " gezeigt\n" -#: help.c:390 -#, c-format +#: help.c:456 +#, fuzzy, c-format +#| msgid "" +#| " unicode_border_linestyle\n" +#| " unicode_column_linestyle\n" +#| " unicode_header_linestyle\n" +#| " set the style of Unicode line drawing [single, double]\n" msgid "" " unicode_border_linestyle\n" " unicode_column_linestyle\n" " unicode_header_linestyle\n" -" set the style of Unicode line drawing [single, double]\n" +" set the style of Unicode line drawing [single, double]\n" msgstr "" " unicode_border_linestyle\n" " unicode_column_linestyle\n" " unicode_header_linestyle\n" " setzt den Stil für Unicode-Linien [single, double]\n" -#: help.c:395 +#: help.c:461 #, c-format msgid "" "\n" @@ -3147,7 +3496,7 @@ msgstr "" "\n" "Umgebungsvariablen:\n" -#: help.c:399 +#: help.c:465 #, c-format msgid "" " NAME=VALUE [NAME=VALUE] psql ...\n" @@ -3158,7 +3507,7 @@ msgstr "" " oder \\setenv NAME [WERT] innerhalb von psql\n" "\n" -#: help.c:401 +#: help.c:467 #, c-format msgid "" " set NAME=VALUE\n" @@ -3171,94 +3520,139 @@ msgstr "" " oder \\setenv NAME [WERT] innerhalb von psql\n" "\n" -#: help.c:404 -#, c-format -msgid " COLUMNS number of columns for wrapped format\n" +#: help.c:470 +#, fuzzy, c-format +#| msgid " COLUMNS number of columns for wrapped format\n" +msgid "" +" COLUMNS\n" +" number of columns for wrapped format\n" msgstr " COLUMNS Anzahl Spalten im Format »wrapped«\n" -#: help.c:405 -#, c-format -msgid " PAGER name of external pager program\n" -msgstr " PAGER Name des externen Pager-Programms\n" - -#: help.c:406 -#, c-format -msgid " PGAPPNAME same as the application_name connection parameter\n" +#: help.c:472 +#, fuzzy, c-format +#| msgid " PGAPPNAME same as the application_name connection parameter\n" +msgid "" +" PGAPPNAME\n" +" same as the application_name connection parameter\n" msgstr " PGAPPNAME wie Verbindungsparameter »application_name«\n" -#: help.c:407 -#, c-format -msgid " PGDATABASE same as the dbname connection parameter\n" +#: help.c:474 +#, fuzzy, c-format +#| msgid " PGDATABASE same as the dbname connection parameter\n" +msgid "" +" PGDATABASE\n" +" same as the dbname connection parameter\n" msgstr " PGDATABASE wie Verbindungsparameter »dbname«\n" -#: help.c:408 -#, c-format -msgid " PGHOST same as the host connection parameter\n" +#: help.c:476 +#, fuzzy, c-format +#| msgid " PGHOST same as the host connection parameter\n" +msgid "" +" PGHOST\n" +" same as the host connection parameter\n" msgstr " PGHOST wie Verbindungsparameter »host«\n" -#: help.c:409 -#, c-format -msgid " PGPORT same as the port connection parameter\n" -msgstr " PGPORT wie Verbindungsparameter »port«\n" - -#: help.c:410 -#, c-format -msgid " PGUSER same as the user connection parameter\n" -msgstr " PGUSER wie Verbindungsparameter »user«\n" - -#: help.c:411 -#, c-format -msgid " PGPASSWORD connection password (not recommended)\n" +#: help.c:478 +#, fuzzy, c-format +#| msgid " PGPASSWORD connection password (not recommended)\n" +msgid "" +" PGPASSWORD\n" +" connection password (not recommended)\n" msgstr " PGPASSWORD Verbindungspasswort (nicht empfohlen)\n" -#: help.c:412 -#, c-format -msgid " PGPASSFILE password file name\n" +#: help.c:480 +#, fuzzy, c-format +#| msgid " PGPASSFILE password file name\n" +msgid "" +" PGPASSFILE\n" +" password file name\n" msgstr " PGPASSFILE Name der Passwortdatei\n" -#: help.c:413 -#, c-format +#: help.c:482 +#, fuzzy, c-format +#| msgid " PGPORT same as the port connection parameter\n" +msgid "" +" PGPORT\n" +" same as the port connection parameter\n" +msgstr " PGPORT wie Verbindungsparameter »port«\n" + +#: help.c:484 +#, fuzzy, c-format +#| msgid " PGUSER same as the user connection parameter\n" +msgid "" +" PGUSER\n" +" same as the user connection parameter\n" +msgstr " PGUSER wie Verbindungsparameter »user«\n" + +#: help.c:486 +#, fuzzy, c-format +#| msgid "" +#| " PSQL_EDITOR, EDITOR, VISUAL\n" +#| " editor used by the \\e, \\ef, and \\ev commands\n" msgid "" " PSQL_EDITOR, EDITOR, VISUAL\n" -" editor used by the \\e, \\ef, and \\ev commands\n" +" editor used by the \\e, \\ef, and \\ev commands\n" msgstr "" " PSQL_EDITOR, EDITOR, VISUAL\n" " Editor für Befehle \\e, \\ef und \\ev\n" -#: help.c:415 -#, c-format +#: help.c:488 +#, fuzzy, c-format +#| msgid "" +#| " PSQL_EDITOR_LINENUMBER_ARG\n" +#| " how to specify a line number when invoking the editor\n" msgid "" " PSQL_EDITOR_LINENUMBER_ARG\n" -" how to specify a line number when invoking the editor\n" +" how to specify a line number when invoking the editor\n" msgstr "" " PSQL_EDITOR_LINENUMBER_ARG\n" " wie die Zeilennummer beim Aufruf des Editors angegeben wird\n" -#: help.c:417 -#, c-format -msgid " PSQL_HISTORY alternative location for the command history file\n" +#: help.c:490 +#, fuzzy, c-format +#| msgid " PSQL_HISTORY alternative location for the command history file\n" +msgid "" +" PSQL_HISTORY\n" +" alternative location for the command history file\n" msgstr " PSQL_HISTORY alternativer Pfad für History-Datei\n" -#: help.c:418 -#, c-format -msgid " PSQLRC alternative location for the user's .psqlrc file\n" +#: help.c:492 +#, fuzzy, c-format +#| msgid " PAGER name of external pager program\n" +msgid "" +" PSQL_PAGER, PAGER\n" +" name of external pager program\n" +msgstr " PAGER Name des externen Pager-Programms\n" + +#: help.c:494 +#, fuzzy, c-format +#| msgid " PSQLRC alternative location for the user's .psqlrc file\n" +msgid "" +" PSQLRC\n" +" alternative location for the user's .psqlrc file\n" msgstr " PSQLRC alternativer Pfad für .psqlrc-Datei des Benutzers\n" -#: help.c:419 -#, c-format -msgid " SHELL shell used by the \\! command\n" +#: help.c:496 +#, fuzzy, c-format +#| msgid " SHELL shell used by the \\! command\n" +msgid "" +" SHELL\n" +" shell used by the \\! command\n" msgstr " SHELL Shell für den Befehl \\!\n" -#: help.c:420 -#, c-format -msgid " TMPDIR directory for temporary files\n" +#: help.c:498 +#, fuzzy, c-format +#| msgid " TMPDIR directory for temporary files\n" +msgid "" +" TMPDIR\n" +" directory for temporary files\n" msgstr " TMPDIR Verzeichnis für temporäre Dateien\n" -#: help.c:463 +#: help.c:542 msgid "Available help:\n" msgstr "Verfügbare Hilfe:\n" -#: help.c:547 +#: help.c:626 #, c-format msgid "" "Command: %s\n" @@ -3273,7 +3667,7 @@ msgstr "" "%s\n" "\n" -#: help.c:563 +#: help.c:642 #, c-format msgid "" "No help available for \"%s\".\n" @@ -3320,12 +3714,17 @@ msgstr "ID" msgid "Large objects" msgstr "Large Objects" -#: mainloop.c:168 +#: mainloop.c:136 +#, c-format +msgid "\\if: escaped\n" +msgstr "\\if: abgebrochen\n" + +#: mainloop.c:183 #, c-format msgid "Use \"\\q\" to leave %s.\n" msgstr "Verwenden Sie »\\q«, um %s zu verlassen.\n" -#: mainloop.c:190 +#: mainloop.c:205 msgid "" "The input is a PostgreSQL custom-format dump.\n" "Use the pg_restore command-line client to restore this dump to a database.\n" @@ -3334,11 +3733,25 @@ msgstr "" "Verwenden Sie den Kommandozeilen-Client pg_restore, um diesen Dump in die\n" "Datenbank zurückzuspielen.\n" -#: mainloop.c:210 +#: mainloop.c:282 +msgid "Use \\? for help or press control-C to clear the input buffer." +msgstr "" + +#: mainloop.c:284 +#, fuzzy +#| msgid "" +#| "Type \"help\" for help.\n" +#| "\n" +msgid "Use \\? for help." +msgstr "" +"Geben Sie »help« für Hilfe ein.\n" +"\n" + +#: mainloop.c:288 msgid "You are using psql, the command-line interface to PostgreSQL." msgstr "Dies ist psql, die Kommandozeilenschnittstelle für PostgreSQL." -#: mainloop.c:211 +#: mainloop.c:289 #, c-format msgid "" "Type: \\copyright for distribution terms\n" @@ -3353,2118 +3766,2281 @@ msgstr "" " \\g oder Semikolon, um eine Anfrage auszuführen\n" " \\q um zu beenden\n" -#: psqlscanslash.l:585 +#: mainloop.c:313 +msgid "Use \\q to quit." +msgstr "" + +#: mainloop.c:316 mainloop.c:340 +msgid "Use control-D to quit." +msgstr "" + +#: mainloop.c:318 mainloop.c:342 +msgid "Use control-C to quit." +msgstr "" + +#: mainloop.c:449 mainloop.c:591 +#, c-format +msgid "query ignored; use \\endif or Ctrl-C to exit current \\if block\n" +msgstr "Anfrage ignoriert; verwenden Sie \\endif oder Strg-C um den aktuellen \\if-Block zu beenden\n" + +#: mainloop.c:609 +#, c-format +msgid "reached EOF without finding closing \\endif(s)\n" +msgstr "Dateiende erreicht, aber schließendes \\endif fehlt\n" + +#: psqlscanslash.l:637 #, c-format msgid "unterminated quoted string\n" msgstr "Zeichenkette in Anführungszeichen nicht abgeschlossen\n" -#: psqlscanslash.l:739 +#: psqlscanslash.l:810 #, c-format msgid "%s: out of memory\n" msgstr "%s: Speicher aufgebraucht\n" -#: sql_help.c:36 sql_help.c:39 sql_help.c:42 sql_help.c:64 sql_help.c:66 -#: sql_help.c:68 sql_help.c:79 sql_help.c:81 sql_help.c:83 sql_help.c:109 -#: sql_help.c:115 sql_help.c:117 sql_help.c:119 sql_help.c:121 sql_help.c:124 -#: sql_help.c:126 sql_help.c:128 sql_help.c:221 sql_help.c:223 sql_help.c:224 -#: sql_help.c:226 sql_help.c:228 sql_help.c:231 sql_help.c:233 sql_help.c:235 -#: sql_help.c:237 sql_help.c:249 sql_help.c:250 sql_help.c:251 sql_help.c:253 -#: sql_help.c:300 sql_help.c:302 sql_help.c:304 sql_help.c:306 sql_help.c:367 -#: sql_help.c:372 sql_help.c:374 sql_help.c:417 sql_help.c:419 sql_help.c:422 -#: sql_help.c:424 sql_help.c:491 sql_help.c:496 sql_help.c:501 sql_help.c:506 -#: sql_help.c:511 sql_help.c:560 sql_help.c:562 sql_help.c:564 sql_help.c:566 -#: sql_help.c:569 sql_help.c:571 sql_help.c:582 sql_help.c:584 sql_help.c:625 -#: sql_help.c:627 sql_help.c:629 sql_help.c:632 sql_help.c:634 sql_help.c:636 -#: sql_help.c:669 sql_help.c:673 sql_help.c:677 sql_help.c:696 sql_help.c:699 -#: sql_help.c:702 sql_help.c:731 sql_help.c:743 sql_help.c:751 sql_help.c:754 -#: sql_help.c:757 sql_help.c:772 sql_help.c:775 sql_help.c:798 sql_help.c:801 -#: sql_help.c:803 sql_help.c:805 sql_help.c:807 sql_help.c:848 sql_help.c:871 -#: sql_help.c:882 sql_help.c:884 sql_help.c:903 sql_help.c:913 sql_help.c:915 -#: sql_help.c:917 sql_help.c:929 sql_help.c:933 sql_help.c:935 sql_help.c:954 -#: sql_help.c:957 sql_help.c:959 sql_help.c:960 sql_help.c:961 sql_help.c:962 -#: sql_help.c:1049 sql_help.c:1051 sql_help.c:1054 sql_help.c:1057 -#: sql_help.c:1059 sql_help.c:1061 sql_help.c:1064 sql_help.c:1067 -#: sql_help.c:1127 sql_help.c:1129 sql_help.c:1131 sql_help.c:1134 -#: sql_help.c:1155 sql_help.c:1158 sql_help.c:1161 sql_help.c:1164 -#: sql_help.c:1168 sql_help.c:1170 sql_help.c:1172 sql_help.c:1174 -#: sql_help.c:1188 sql_help.c:1191 sql_help.c:1193 sql_help.c:1195 -#: sql_help.c:1205 sql_help.c:1207 sql_help.c:1217 sql_help.c:1219 -#: sql_help.c:1229 sql_help.c:1232 sql_help.c:1254 sql_help.c:1256 -#: sql_help.c:1258 sql_help.c:1261 sql_help.c:1263 sql_help.c:1265 -#: sql_help.c:1268 sql_help.c:1318 sql_help.c:1356 sql_help.c:1359 -#: sql_help.c:1361 sql_help.c:1363 sql_help.c:1365 sql_help.c:1367 -#: sql_help.c:1370 sql_help.c:1410 sql_help.c:1615 sql_help.c:1679 -#: sql_help.c:1698 sql_help.c:1711 sql_help.c:1765 sql_help.c:1769 -#: sql_help.c:1779 sql_help.c:1799 sql_help.c:1824 sql_help.c:1842 -#: sql_help.c:1871 sql_help.c:1964 sql_help.c:2006 sql_help.c:2028 -#: sql_help.c:2048 sql_help.c:2049 sql_help.c:2084 sql_help.c:2104 -#: sql_help.c:2126 sql_help.c:2140 sql_help.c:2161 sql_help.c:2191 -#: sql_help.c:2216 sql_help.c:2262 sql_help.c:2508 sql_help.c:2521 -#: sql_help.c:2538 sql_help.c:2554 sql_help.c:2594 sql_help.c:2646 -#: sql_help.c:2650 sql_help.c:2652 sql_help.c:2658 sql_help.c:2676 -#: sql_help.c:2703 sql_help.c:2738 sql_help.c:2750 sql_help.c:2759 -#: sql_help.c:2803 sql_help.c:2817 sql_help.c:2845 sql_help.c:2853 -#: sql_help.c:2861 sql_help.c:2869 sql_help.c:2877 sql_help.c:2885 -#: sql_help.c:2893 sql_help.c:2901 sql_help.c:2910 sql_help.c:2921 -#: sql_help.c:2929 sql_help.c:2937 sql_help.c:2945 sql_help.c:2953 -#: sql_help.c:2963 sql_help.c:2972 sql_help.c:2981 sql_help.c:2989 -#: sql_help.c:2998 sql_help.c:3006 sql_help.c:3014 sql_help.c:3023 -#: sql_help.c:3031 sql_help.c:3039 sql_help.c:3047 sql_help.c:3057 -#: sql_help.c:3065 sql_help.c:3073 sql_help.c:3081 sql_help.c:3089 -#: sql_help.c:3097 sql_help.c:3114 sql_help.c:3123 sql_help.c:3131 -#: sql_help.c:3148 sql_help.c:3163 sql_help.c:3430 sql_help.c:3481 -#: sql_help.c:3510 sql_help.c:3518 sql_help.c:3937 sql_help.c:3985 -#: sql_help.c:4126 +#: sql_help.c:35 sql_help.c:38 sql_help.c:41 sql_help.c:65 sql_help.c:66 +#: sql_help.c:68 sql_help.c:70 sql_help.c:81 sql_help.c:83 sql_help.c:85 +#: sql_help.c:111 sql_help.c:117 sql_help.c:119 sql_help.c:121 sql_help.c:123 +#: sql_help.c:126 sql_help.c:128 sql_help.c:130 sql_help.c:235 sql_help.c:237 +#: sql_help.c:238 sql_help.c:240 sql_help.c:242 sql_help.c:245 sql_help.c:247 +#: sql_help.c:249 sql_help.c:251 sql_help.c:263 sql_help.c:264 sql_help.c:265 +#: sql_help.c:267 sql_help.c:316 sql_help.c:318 sql_help.c:320 sql_help.c:322 +#: sql_help.c:391 sql_help.c:396 sql_help.c:398 sql_help.c:441 sql_help.c:443 +#: sql_help.c:446 sql_help.c:448 sql_help.c:515 sql_help.c:520 sql_help.c:525 +#: sql_help.c:530 sql_help.c:535 sql_help.c:587 sql_help.c:589 sql_help.c:591 +#: sql_help.c:593 sql_help.c:595 sql_help.c:598 sql_help.c:600 sql_help.c:603 +#: sql_help.c:614 sql_help.c:616 sql_help.c:657 sql_help.c:659 sql_help.c:661 +#: sql_help.c:664 sql_help.c:666 sql_help.c:668 sql_help.c:701 sql_help.c:705 +#: sql_help.c:709 sql_help.c:728 sql_help.c:731 sql_help.c:734 sql_help.c:763 +#: sql_help.c:775 sql_help.c:783 sql_help.c:786 sql_help.c:789 sql_help.c:804 +#: sql_help.c:807 sql_help.c:836 sql_help.c:841 sql_help.c:846 sql_help.c:851 +#: sql_help.c:856 sql_help.c:878 sql_help.c:880 sql_help.c:882 sql_help.c:884 +#: sql_help.c:887 sql_help.c:889 sql_help.c:930 sql_help.c:974 sql_help.c:979 +#: sql_help.c:984 sql_help.c:989 sql_help.c:994 sql_help.c:1013 sql_help.c:1024 +#: sql_help.c:1026 sql_help.c:1045 sql_help.c:1055 sql_help.c:1057 +#: sql_help.c:1059 sql_help.c:1071 sql_help.c:1075 sql_help.c:1077 +#: sql_help.c:1088 sql_help.c:1090 sql_help.c:1092 sql_help.c:1108 +#: sql_help.c:1110 sql_help.c:1114 sql_help.c:1117 sql_help.c:1118 +#: sql_help.c:1119 sql_help.c:1122 sql_help.c:1124 sql_help.c:1257 +#: sql_help.c:1259 sql_help.c:1262 sql_help.c:1265 sql_help.c:1267 +#: sql_help.c:1269 sql_help.c:1272 sql_help.c:1275 sql_help.c:1387 +#: sql_help.c:1389 sql_help.c:1391 sql_help.c:1394 sql_help.c:1415 +#: sql_help.c:1418 sql_help.c:1421 sql_help.c:1424 sql_help.c:1428 +#: sql_help.c:1430 sql_help.c:1432 sql_help.c:1434 sql_help.c:1448 +#: sql_help.c:1451 sql_help.c:1453 sql_help.c:1455 sql_help.c:1465 +#: sql_help.c:1467 sql_help.c:1477 sql_help.c:1479 sql_help.c:1489 +#: sql_help.c:1492 sql_help.c:1514 sql_help.c:1516 sql_help.c:1518 +#: sql_help.c:1521 sql_help.c:1523 sql_help.c:1525 sql_help.c:1528 +#: sql_help.c:1578 sql_help.c:1620 sql_help.c:1623 sql_help.c:1625 +#: sql_help.c:1627 sql_help.c:1629 sql_help.c:1631 sql_help.c:1634 +#: sql_help.c:1681 sql_help.c:1697 sql_help.c:1918 sql_help.c:1987 +#: sql_help.c:2006 sql_help.c:2019 sql_help.c:2075 sql_help.c:2081 +#: sql_help.c:2091 sql_help.c:2111 sql_help.c:2136 sql_help.c:2154 +#: sql_help.c:2183 sql_help.c:2275 sql_help.c:2316 sql_help.c:2339 +#: sql_help.c:2360 sql_help.c:2361 sql_help.c:2396 sql_help.c:2416 +#: sql_help.c:2438 sql_help.c:2452 sql_help.c:2472 sql_help.c:2495 +#: sql_help.c:2525 sql_help.c:2550 sql_help.c:2596 sql_help.c:2867 +#: sql_help.c:2880 sql_help.c:2897 sql_help.c:2913 sql_help.c:2953 +#: sql_help.c:3005 sql_help.c:3009 sql_help.c:3011 sql_help.c:3017 +#: sql_help.c:3035 sql_help.c:3062 sql_help.c:3097 sql_help.c:3109 +#: sql_help.c:3118 sql_help.c:3162 sql_help.c:3176 sql_help.c:3204 +#: sql_help.c:3212 sql_help.c:3220 sql_help.c:3228 sql_help.c:3236 +#: sql_help.c:3244 sql_help.c:3252 sql_help.c:3260 sql_help.c:3269 +#: sql_help.c:3280 sql_help.c:3288 sql_help.c:3296 sql_help.c:3304 +#: sql_help.c:3312 sql_help.c:3322 sql_help.c:3331 sql_help.c:3340 +#: sql_help.c:3348 sql_help.c:3358 sql_help.c:3369 sql_help.c:3377 +#: sql_help.c:3386 sql_help.c:3397 sql_help.c:3406 sql_help.c:3414 +#: sql_help.c:3422 sql_help.c:3430 sql_help.c:3438 sql_help.c:3446 +#: sql_help.c:3454 sql_help.c:3462 sql_help.c:3470 sql_help.c:3478 +#: sql_help.c:3486 sql_help.c:3503 sql_help.c:3512 sql_help.c:3520 +#: sql_help.c:3537 sql_help.c:3552 sql_help.c:3820 sql_help.c:3871 +#: sql_help.c:3900 sql_help.c:3908 sql_help.c:4341 sql_help.c:4389 +#: sql_help.c:4530 msgid "name" msgstr "Name" -#: sql_help.c:37 sql_help.c:40 sql_help.c:43 sql_help.c:311 sql_help.c:1476 -#: sql_help.c:2818 sql_help.c:3733 +#: sql_help.c:36 sql_help.c:39 sql_help.c:42 sql_help.c:327 sql_help.c:1768 +#: sql_help.c:3177 sql_help.c:4127 msgid "aggregate_signature" msgstr "Aggregatsignatur" -#: sql_help.c:38 sql_help.c:65 sql_help.c:80 sql_help.c:116 sql_help.c:236 -#: sql_help.c:254 sql_help.c:375 sql_help.c:423 sql_help.c:500 sql_help.c:546 -#: sql_help.c:561 sql_help.c:583 sql_help.c:633 sql_help.c:698 sql_help.c:753 -#: sql_help.c:774 sql_help.c:849 sql_help.c:873 sql_help.c:883 sql_help.c:916 -#: sql_help.c:936 sql_help.c:1058 sql_help.c:1128 sql_help.c:1171 -#: sql_help.c:1192 sql_help.c:1206 sql_help.c:1218 sql_help.c:1231 -#: sql_help.c:1262 sql_help.c:1319 sql_help.c:1364 +#: sql_help.c:37 sql_help.c:67 sql_help.c:82 sql_help.c:118 sql_help.c:250 +#: sql_help.c:268 sql_help.c:399 sql_help.c:447 sql_help.c:524 sql_help.c:570 +#: sql_help.c:588 sql_help.c:615 sql_help.c:665 sql_help.c:730 sql_help.c:785 +#: sql_help.c:806 sql_help.c:845 sql_help.c:890 sql_help.c:931 sql_help.c:983 +#: sql_help.c:1015 sql_help.c:1025 sql_help.c:1058 sql_help.c:1078 +#: sql_help.c:1091 sql_help.c:1125 sql_help.c:1266 sql_help.c:1388 +#: sql_help.c:1431 sql_help.c:1452 sql_help.c:1466 sql_help.c:1478 +#: sql_help.c:1491 sql_help.c:1522 sql_help.c:1579 sql_help.c:1628 msgid "new_name" msgstr "neuer_Name" -#: sql_help.c:41 sql_help.c:67 sql_help.c:82 sql_help.c:118 sql_help.c:234 -#: sql_help.c:252 sql_help.c:373 sql_help.c:459 sql_help.c:505 sql_help.c:585 -#: sql_help.c:594 sql_help.c:652 sql_help.c:672 sql_help.c:701 sql_help.c:756 -#: sql_help.c:802 sql_help.c:885 sql_help.c:914 sql_help.c:934 sql_help.c:958 -#: sql_help.c:1112 sql_help.c:1130 sql_help.c:1173 sql_help.c:1194 -#: sql_help.c:1257 sql_help.c:1362 sql_help.c:2494 +#: sql_help.c:40 sql_help.c:69 sql_help.c:84 sql_help.c:120 sql_help.c:248 +#: sql_help.c:266 sql_help.c:397 sql_help.c:483 sql_help.c:529 sql_help.c:617 +#: sql_help.c:626 sql_help.c:684 sql_help.c:704 sql_help.c:733 sql_help.c:788 +#: sql_help.c:850 sql_help.c:888 sql_help.c:988 sql_help.c:1027 sql_help.c:1056 +#: sql_help.c:1076 sql_help.c:1089 sql_help.c:1123 sql_help.c:1326 +#: sql_help.c:1390 sql_help.c:1433 sql_help.c:1454 sql_help.c:1517 +#: sql_help.c:1626 sql_help.c:2853 msgid "new_owner" msgstr "neuer_Eigentümer" -#: sql_help.c:44 sql_help.c:69 sql_help.c:84 sql_help.c:238 sql_help.c:303 -#: sql_help.c:425 sql_help.c:510 sql_help.c:635 sql_help.c:676 sql_help.c:704 -#: sql_help.c:759 sql_help.c:918 sql_help.c:1060 sql_help.c:1175 -#: sql_help.c:1196 sql_help.c:1208 sql_help.c:1220 sql_help.c:1264 -#: sql_help.c:1366 +#: sql_help.c:43 sql_help.c:71 sql_help.c:86 sql_help.c:252 sql_help.c:319 +#: sql_help.c:449 sql_help.c:534 sql_help.c:667 sql_help.c:708 sql_help.c:736 +#: sql_help.c:791 sql_help.c:855 sql_help.c:993 sql_help.c:1060 sql_help.c:1093 +#: sql_help.c:1268 sql_help.c:1435 sql_help.c:1456 sql_help.c:1468 +#: sql_help.c:1480 sql_help.c:1524 sql_help.c:1630 msgid "new_schema" msgstr "neues_Schema" -#: sql_help.c:45 sql_help.c:1529 sql_help.c:2819 sql_help.c:3752 +#: sql_help.c:44 sql_help.c:1832 sql_help.c:3178 sql_help.c:4156 msgid "where aggregate_signature is:" msgstr "wobei Aggregatsignatur Folgendes ist:" -#: sql_help.c:46 sql_help.c:49 sql_help.c:52 sql_help.c:321 sql_help.c:346 -#: sql_help.c:349 sql_help.c:352 sql_help.c:492 sql_help.c:497 sql_help.c:502 -#: sql_help.c:507 sql_help.c:512 sql_help.c:1494 sql_help.c:1530 -#: sql_help.c:1533 sql_help.c:1536 sql_help.c:1680 sql_help.c:1699 -#: sql_help.c:1702 sql_help.c:1965 sql_help.c:2820 sql_help.c:2823 -#: sql_help.c:2826 sql_help.c:2911 sql_help.c:3316 sql_help.c:3648 -#: sql_help.c:3739 sql_help.c:3753 sql_help.c:3756 sql_help.c:3759 +#: sql_help.c:45 sql_help.c:48 sql_help.c:51 sql_help.c:337 sql_help.c:350 +#: sql_help.c:354 sql_help.c:370 sql_help.c:373 sql_help.c:376 sql_help.c:516 +#: sql_help.c:521 sql_help.c:526 sql_help.c:531 sql_help.c:536 sql_help.c:837 +#: sql_help.c:842 sql_help.c:847 sql_help.c:852 sql_help.c:857 sql_help.c:975 +#: sql_help.c:980 sql_help.c:985 sql_help.c:990 sql_help.c:995 sql_help.c:1786 +#: sql_help.c:1803 sql_help.c:1809 sql_help.c:1833 sql_help.c:1836 +#: sql_help.c:1839 sql_help.c:1988 sql_help.c:2007 sql_help.c:2010 +#: sql_help.c:2276 sql_help.c:2473 sql_help.c:3179 sql_help.c:3182 +#: sql_help.c:3185 sql_help.c:3270 sql_help.c:3359 sql_help.c:3387 +#: sql_help.c:3705 sql_help.c:4038 sql_help.c:4133 sql_help.c:4140 +#: sql_help.c:4146 sql_help.c:4157 sql_help.c:4160 sql_help.c:4163 msgid "argmode" msgstr "Argmodus" -#: sql_help.c:47 sql_help.c:50 sql_help.c:53 sql_help.c:322 sql_help.c:347 -#: sql_help.c:350 sql_help.c:353 sql_help.c:493 sql_help.c:498 sql_help.c:503 -#: sql_help.c:508 sql_help.c:513 sql_help.c:1495 sql_help.c:1531 -#: sql_help.c:1534 sql_help.c:1537 sql_help.c:1681 sql_help.c:1700 -#: sql_help.c:1703 sql_help.c:1966 sql_help.c:2821 sql_help.c:2824 -#: sql_help.c:2827 sql_help.c:2912 sql_help.c:3740 sql_help.c:3754 -#: sql_help.c:3757 sql_help.c:3760 +#: sql_help.c:46 sql_help.c:49 sql_help.c:52 sql_help.c:338 sql_help.c:351 +#: sql_help.c:355 sql_help.c:371 sql_help.c:374 sql_help.c:377 sql_help.c:517 +#: sql_help.c:522 sql_help.c:527 sql_help.c:532 sql_help.c:537 sql_help.c:838 +#: sql_help.c:843 sql_help.c:848 sql_help.c:853 sql_help.c:858 sql_help.c:976 +#: sql_help.c:981 sql_help.c:986 sql_help.c:991 sql_help.c:996 sql_help.c:1787 +#: sql_help.c:1804 sql_help.c:1810 sql_help.c:1834 sql_help.c:1837 +#: sql_help.c:1840 sql_help.c:1989 sql_help.c:2008 sql_help.c:2011 +#: sql_help.c:2277 sql_help.c:2474 sql_help.c:3180 sql_help.c:3183 +#: sql_help.c:3186 sql_help.c:3271 sql_help.c:3360 sql_help.c:3388 +#: sql_help.c:4134 sql_help.c:4141 sql_help.c:4147 sql_help.c:4158 +#: sql_help.c:4161 sql_help.c:4164 msgid "argname" msgstr "Argname" -#: sql_help.c:48 sql_help.c:51 sql_help.c:54 sql_help.c:323 sql_help.c:348 -#: sql_help.c:351 sql_help.c:354 sql_help.c:494 sql_help.c:499 sql_help.c:504 -#: sql_help.c:509 sql_help.c:514 sql_help.c:1496 sql_help.c:1532 -#: sql_help.c:1535 sql_help.c:1538 sql_help.c:1967 sql_help.c:2822 -#: sql_help.c:2825 sql_help.c:2828 sql_help.c:2913 sql_help.c:3741 -#: sql_help.c:3755 sql_help.c:3758 sql_help.c:3761 +#: sql_help.c:47 sql_help.c:50 sql_help.c:53 sql_help.c:339 sql_help.c:352 +#: sql_help.c:356 sql_help.c:372 sql_help.c:375 sql_help.c:378 sql_help.c:518 +#: sql_help.c:523 sql_help.c:528 sql_help.c:533 sql_help.c:538 sql_help.c:839 +#: sql_help.c:844 sql_help.c:849 sql_help.c:854 sql_help.c:859 sql_help.c:977 +#: sql_help.c:982 sql_help.c:987 sql_help.c:992 sql_help.c:997 sql_help.c:1788 +#: sql_help.c:1805 sql_help.c:1811 sql_help.c:1835 sql_help.c:1838 +#: sql_help.c:1841 sql_help.c:2278 sql_help.c:2475 sql_help.c:3181 +#: sql_help.c:3184 sql_help.c:3187 sql_help.c:3272 sql_help.c:3361 +#: sql_help.c:3389 sql_help.c:4135 sql_help.c:4142 sql_help.c:4148 +#: sql_help.c:4159 sql_help.c:4162 sql_help.c:4165 msgid "argtype" msgstr "Argtyp" -#: sql_help.c:110 sql_help.c:370 sql_help.c:448 sql_help.c:460 sql_help.c:799 -#: sql_help.c:843 sql_help.c:931 sql_help.c:955 sql_help.c:1189 -#: sql_help.c:1313 sql_help.c:1341 sql_help.c:1586 sql_help.c:1592 -#: sql_help.c:1874 sql_help.c:1915 sql_help.c:1922 sql_help.c:1931 -#: sql_help.c:2007 sql_help.c:2163 sql_help.c:2192 sql_help.c:2284 -#: sql_help.c:2300 sql_help.c:2523 sql_help.c:2704 sql_help.c:2726 -#: sql_help.c:3183 sql_help.c:3350 +#: sql_help.c:112 sql_help.c:394 sql_help.c:472 sql_help.c:484 sql_help.c:925 +#: sql_help.c:1073 sql_help.c:1449 sql_help.c:1573 sql_help.c:1605 +#: sql_help.c:1652 sql_help.c:1889 sql_help.c:1895 sql_help.c:2186 +#: sql_help.c:2227 sql_help.c:2234 sql_help.c:2243 sql_help.c:2317 +#: sql_help.c:2526 sql_help.c:2618 sql_help.c:2882 sql_help.c:3063 +#: sql_help.c:3085 sql_help.c:3572 sql_help.c:3739 sql_help.c:4588 msgid "option" msgstr "Option" -#: sql_help.c:111 sql_help.c:800 sql_help.c:844 sql_help.c:956 sql_help.c:1314 -#: sql_help.c:2008 sql_help.c:2164 sql_help.c:2193 sql_help.c:2301 -#: sql_help.c:2705 +#: sql_help.c:113 sql_help.c:926 sql_help.c:1574 sql_help.c:2318 +#: sql_help.c:2527 sql_help.c:3064 msgid "where option can be:" msgstr "wobei Option Folgendes sein kann:" -#: sql_help.c:112 sql_help.c:1806 +#: sql_help.c:114 sql_help.c:2118 msgid "allowconn" msgstr "allowconn" -#: sql_help.c:113 sql_help.c:845 sql_help.c:1315 sql_help.c:1807 -#: sql_help.c:2194 sql_help.c:2706 +#: sql_help.c:115 sql_help.c:927 sql_help.c:1575 sql_help.c:2119 +#: sql_help.c:2528 sql_help.c:3065 msgid "connlimit" msgstr "Verbindungslimit" -#: sql_help.c:114 sql_help.c:1808 +#: sql_help.c:116 sql_help.c:2120 msgid "istemplate" msgstr "istemplate" -#: sql_help.c:120 sql_help.c:573 sql_help.c:638 sql_help.c:1063 -#: sql_help.c:1105 +#: sql_help.c:122 sql_help.c:605 sql_help.c:670 sql_help.c:1271 sql_help.c:1319 msgid "new_tablespace" msgstr "neuer_Tablespace" -#: sql_help.c:122 sql_help.c:125 sql_help.c:127 sql_help.c:519 sql_help.c:521 -#: sql_help.c:522 sql_help.c:852 sql_help.c:856 sql_help.c:859 sql_help.c:973 -#: sql_help.c:976 sql_help.c:1321 sql_help.c:1324 sql_help.c:1326 -#: sql_help.c:1976 sql_help.c:3535 sql_help.c:3926 +#: sql_help.c:124 sql_help.c:127 sql_help.c:129 sql_help.c:543 sql_help.c:545 +#: sql_help.c:546 sql_help.c:862 sql_help.c:864 sql_help.c:865 sql_help.c:934 +#: sql_help.c:938 sql_help.c:941 sql_help.c:1002 sql_help.c:1004 +#: sql_help.c:1005 sql_help.c:1136 sql_help.c:1139 sql_help.c:1582 +#: sql_help.c:1586 sql_help.c:1589 sql_help.c:2287 sql_help.c:2479 +#: sql_help.c:3925 sql_help.c:4330 msgid "configuration_parameter" msgstr "Konfigurationsparameter" -#: sql_help.c:123 sql_help.c:371 sql_help.c:443 sql_help.c:449 sql_help.c:461 -#: sql_help.c:520 sql_help.c:568 sql_help.c:644 sql_help.c:650 sql_help.c:853 -#: sql_help.c:932 sql_help.c:974 sql_help.c:975 sql_help.c:1087 -#: sql_help.c:1107 sql_help.c:1133 sql_help.c:1190 sql_help.c:1322 -#: sql_help.c:1342 sql_help.c:1875 sql_help.c:1916 sql_help.c:1923 -#: sql_help.c:1932 sql_help.c:1977 sql_help.c:1978 sql_help.c:2036 -#: sql_help.c:2068 sql_help.c:2285 sql_help.c:2397 sql_help.c:2409 -#: sql_help.c:2422 sql_help.c:2458 sql_help.c:2480 sql_help.c:2497 -#: sql_help.c:2524 sql_help.c:2727 sql_help.c:3351 sql_help.c:3927 -#: sql_help.c:3928 +#: sql_help.c:125 sql_help.c:395 sql_help.c:467 sql_help.c:473 sql_help.c:485 +#: sql_help.c:544 sql_help.c:597 sql_help.c:676 sql_help.c:682 sql_help.c:863 +#: sql_help.c:886 sql_help.c:935 sql_help.c:1003 sql_help.c:1074 +#: sql_help.c:1113 sql_help.c:1116 sql_help.c:1121 sql_help.c:1137 +#: sql_help.c:1138 sql_help.c:1301 sql_help.c:1321 sql_help.c:1371 +#: sql_help.c:1393 sql_help.c:1450 sql_help.c:1583 sql_help.c:1606 +#: sql_help.c:2187 sql_help.c:2228 sql_help.c:2235 sql_help.c:2244 +#: sql_help.c:2288 sql_help.c:2289 sql_help.c:2348 sql_help.c:2380 +#: sql_help.c:2480 sql_help.c:2481 sql_help.c:2498 sql_help.c:2619 +#: sql_help.c:2649 sql_help.c:2749 sql_help.c:2761 sql_help.c:2774 +#: sql_help.c:2817 sql_help.c:2839 sql_help.c:2856 sql_help.c:2883 +#: sql_help.c:3086 sql_help.c:3740 sql_help.c:4331 sql_help.c:4332 msgid "value" msgstr "Wert" -#: sql_help.c:185 +#: sql_help.c:197 msgid "target_role" msgstr "Zielrolle" -#: sql_help.c:186 sql_help.c:1858 sql_help.c:2240 sql_help.c:2245 -#: sql_help.c:3298 sql_help.c:3305 sql_help.c:3319 sql_help.c:3325 -#: sql_help.c:3630 sql_help.c:3637 sql_help.c:3651 sql_help.c:3657 +#: sql_help.c:198 sql_help.c:2170 sql_help.c:2574 sql_help.c:2579 +#: sql_help.c:3687 sql_help.c:3694 sql_help.c:3708 sql_help.c:3714 +#: sql_help.c:4020 sql_help.c:4027 sql_help.c:4041 sql_help.c:4047 msgid "schema_name" msgstr "Schemaname" -#: sql_help.c:187 +#: sql_help.c:199 msgid "abbreviated_grant_or_revoke" msgstr "abgekürztes_Grant_oder_Revoke" -#: sql_help.c:188 +#: sql_help.c:200 msgid "where abbreviated_grant_or_revoke is one of:" msgstr "wobei abgekürztes_Grant_oder_Revoke Folgendes sein kann:" -#: sql_help.c:189 sql_help.c:190 sql_help.c:191 sql_help.c:192 sql_help.c:193 -#: sql_help.c:194 sql_help.c:195 sql_help.c:196 sql_help.c:544 sql_help.c:572 -#: sql_help.c:637 sql_help.c:777 sql_help.c:863 sql_help.c:1062 -#: sql_help.c:1329 sql_help.c:2011 sql_help.c:2012 sql_help.c:2013 -#: sql_help.c:2014 sql_help.c:2015 sql_help.c:2142 sql_help.c:2197 -#: sql_help.c:2198 sql_help.c:2199 sql_help.c:2200 sql_help.c:2201 -#: sql_help.c:2709 sql_help.c:2710 sql_help.c:2711 sql_help.c:2712 -#: sql_help.c:2713 sql_help.c:3332 sql_help.c:3333 sql_help.c:3334 -#: sql_help.c:3631 sql_help.c:3635 sql_help.c:3638 sql_help.c:3640 -#: sql_help.c:3642 sql_help.c:3644 sql_help.c:3646 sql_help.c:3652 -#: sql_help.c:3654 sql_help.c:3656 sql_help.c:3658 sql_help.c:3660 -#: sql_help.c:3662 sql_help.c:3663 sql_help.c:3664 sql_help.c:3947 +#: sql_help.c:201 sql_help.c:202 sql_help.c:203 sql_help.c:204 sql_help.c:205 +#: sql_help.c:206 sql_help.c:207 sql_help.c:208 sql_help.c:209 sql_help.c:210 +#: sql_help.c:568 sql_help.c:604 sql_help.c:669 sql_help.c:809 sql_help.c:945 +#: sql_help.c:1270 sql_help.c:1593 sql_help.c:2321 sql_help.c:2322 +#: sql_help.c:2323 sql_help.c:2324 sql_help.c:2325 sql_help.c:2454 +#: sql_help.c:2531 sql_help.c:2532 sql_help.c:2533 sql_help.c:2534 +#: sql_help.c:2535 sql_help.c:3068 sql_help.c:3069 sql_help.c:3070 +#: sql_help.c:3071 sql_help.c:3072 sql_help.c:3721 sql_help.c:3722 +#: sql_help.c:3723 sql_help.c:4021 sql_help.c:4025 sql_help.c:4028 +#: sql_help.c:4030 sql_help.c:4032 sql_help.c:4034 sql_help.c:4036 +#: sql_help.c:4042 sql_help.c:4044 sql_help.c:4046 sql_help.c:4048 +#: sql_help.c:4050 sql_help.c:4052 sql_help.c:4053 sql_help.c:4054 +#: sql_help.c:4351 msgid "role_name" msgstr "Rollenname" -#: sql_help.c:222 sql_help.c:436 sql_help.c:1078 sql_help.c:1080 -#: sql_help.c:1358 sql_help.c:1827 sql_help.c:1831 sql_help.c:1935 -#: sql_help.c:1939 sql_help.c:2032 sql_help.c:2393 sql_help.c:2405 -#: sql_help.c:2418 sql_help.c:2426 sql_help.c:2436 sql_help.c:2462 -#: sql_help.c:3381 sql_help.c:3396 sql_help.c:3398 sql_help.c:3812 -#: sql_help.c:3813 sql_help.c:3822 sql_help.c:3863 sql_help.c:3864 -#: sql_help.c:3865 sql_help.c:3866 sql_help.c:3867 sql_help.c:3868 -#: sql_help.c:3901 sql_help.c:3902 sql_help.c:3907 sql_help.c:3912 -#: sql_help.c:4051 sql_help.c:4052 sql_help.c:4061 sql_help.c:4102 -#: sql_help.c:4103 sql_help.c:4104 sql_help.c:4105 sql_help.c:4106 -#: sql_help.c:4107 sql_help.c:4154 sql_help.c:4156 sql_help.c:4189 -#: sql_help.c:4245 sql_help.c:4246 sql_help.c:4255 sql_help.c:4296 -#: sql_help.c:4297 sql_help.c:4298 sql_help.c:4299 sql_help.c:4300 -#: sql_help.c:4301 +#: sql_help.c:236 sql_help.c:460 sql_help.c:1286 sql_help.c:1288 +#: sql_help.c:1339 sql_help.c:1350 sql_help.c:1375 sql_help.c:1622 +#: sql_help.c:2139 sql_help.c:2143 sql_help.c:2247 sql_help.c:2251 +#: sql_help.c:2343 sql_help.c:2745 sql_help.c:2757 sql_help.c:2770 +#: sql_help.c:2778 sql_help.c:2789 sql_help.c:2821 sql_help.c:3771 +#: sql_help.c:3786 sql_help.c:3788 sql_help.c:4216 sql_help.c:4217 +#: sql_help.c:4226 sql_help.c:4267 sql_help.c:4268 sql_help.c:4269 +#: sql_help.c:4270 sql_help.c:4271 sql_help.c:4272 sql_help.c:4305 +#: sql_help.c:4306 sql_help.c:4311 sql_help.c:4316 sql_help.c:4455 +#: sql_help.c:4456 sql_help.c:4465 sql_help.c:4506 sql_help.c:4507 +#: sql_help.c:4508 sql_help.c:4509 sql_help.c:4510 sql_help.c:4511 +#: sql_help.c:4558 sql_help.c:4560 sql_help.c:4606 sql_help.c:4662 +#: sql_help.c:4663 sql_help.c:4672 sql_help.c:4713 sql_help.c:4714 +#: sql_help.c:4715 sql_help.c:4716 sql_help.c:4717 sql_help.c:4718 msgid "expression" msgstr "Ausdruck" -#: sql_help.c:225 +#: sql_help.c:239 msgid "domain_constraint" msgstr "Domänen-Constraint" -#: sql_help.c:227 sql_help.c:229 sql_help.c:232 sql_help.c:451 sql_help.c:452 -#: sql_help.c:1055 sql_help.c:1093 sql_help.c:1094 sql_help.c:1095 -#: sql_help.c:1115 sql_help.c:1482 sql_help.c:1484 sql_help.c:1830 -#: sql_help.c:1934 sql_help.c:1938 sql_help.c:2425 sql_help.c:2435 -#: sql_help.c:3393 +#: sql_help.c:241 sql_help.c:243 sql_help.c:246 sql_help.c:475 sql_help.c:476 +#: sql_help.c:1263 sql_help.c:1307 sql_help.c:1308 sql_help.c:1309 +#: sql_help.c:1338 sql_help.c:1349 sql_help.c:1366 sql_help.c:1774 +#: sql_help.c:1776 sql_help.c:2142 sql_help.c:2246 sql_help.c:2250 +#: sql_help.c:2777 sql_help.c:2788 sql_help.c:3783 msgid "constraint_name" msgstr "Constraint-Name" -#: sql_help.c:230 sql_help.c:1056 +#: sql_help.c:244 sql_help.c:1264 msgid "new_constraint_name" msgstr "neuer_Constraint-Name" -#: sql_help.c:301 sql_help.c:930 +#: sql_help.c:317 sql_help.c:1072 msgid "new_version" msgstr "neue_Version" -#: sql_help.c:305 sql_help.c:307 +#: sql_help.c:321 sql_help.c:323 msgid "member_object" msgstr "Elementobjekt" -#: sql_help.c:308 +#: sql_help.c:324 msgid "where member_object is:" msgstr "wobei Elementobjekt Folgendes ist:" -#: sql_help.c:309 sql_help.c:314 sql_help.c:315 sql_help.c:316 sql_help.c:317 -#: sql_help.c:318 sql_help.c:319 sql_help.c:324 sql_help.c:328 sql_help.c:330 -#: sql_help.c:332 sql_help.c:333 sql_help.c:334 sql_help.c:335 sql_help.c:336 -#: sql_help.c:337 sql_help.c:338 sql_help.c:339 sql_help.c:340 sql_help.c:343 -#: sql_help.c:344 sql_help.c:1474 sql_help.c:1479 sql_help.c:1486 -#: sql_help.c:1487 sql_help.c:1488 sql_help.c:1489 sql_help.c:1490 -#: sql_help.c:1491 sql_help.c:1492 sql_help.c:1497 sql_help.c:1499 -#: sql_help.c:1503 sql_help.c:1505 sql_help.c:1509 sql_help.c:1510 -#: sql_help.c:1513 sql_help.c:1514 sql_help.c:1515 sql_help.c:1516 -#: sql_help.c:1517 sql_help.c:1518 sql_help.c:1519 sql_help.c:1520 -#: sql_help.c:1521 sql_help.c:1526 sql_help.c:1527 sql_help.c:3729 -#: sql_help.c:3734 sql_help.c:3735 sql_help.c:3736 sql_help.c:3737 -#: sql_help.c:3743 sql_help.c:3744 sql_help.c:3745 sql_help.c:3746 -#: sql_help.c:3747 sql_help.c:3748 sql_help.c:3749 sql_help.c:3750 +#: sql_help.c:325 sql_help.c:330 sql_help.c:331 sql_help.c:332 sql_help.c:333 +#: sql_help.c:334 sql_help.c:335 sql_help.c:340 sql_help.c:344 sql_help.c:346 +#: sql_help.c:348 sql_help.c:357 sql_help.c:358 sql_help.c:359 sql_help.c:360 +#: sql_help.c:361 sql_help.c:362 sql_help.c:363 sql_help.c:364 sql_help.c:367 +#: sql_help.c:368 sql_help.c:1766 sql_help.c:1771 sql_help.c:1778 +#: sql_help.c:1779 sql_help.c:1780 sql_help.c:1781 sql_help.c:1782 +#: sql_help.c:1783 sql_help.c:1784 sql_help.c:1789 sql_help.c:1791 +#: sql_help.c:1795 sql_help.c:1797 sql_help.c:1801 sql_help.c:1806 +#: sql_help.c:1807 sql_help.c:1814 sql_help.c:1815 sql_help.c:1816 +#: sql_help.c:1817 sql_help.c:1818 sql_help.c:1819 sql_help.c:1820 +#: sql_help.c:1821 sql_help.c:1822 sql_help.c:1823 sql_help.c:1824 +#: sql_help.c:1829 sql_help.c:1830 sql_help.c:4123 sql_help.c:4128 +#: sql_help.c:4129 sql_help.c:4130 sql_help.c:4131 sql_help.c:4137 +#: sql_help.c:4138 sql_help.c:4143 sql_help.c:4144 sql_help.c:4149 +#: sql_help.c:4150 sql_help.c:4151 sql_help.c:4152 sql_help.c:4153 +#: sql_help.c:4154 msgid "object_name" msgstr "Objektname" -#: sql_help.c:310 sql_help.c:1475 sql_help.c:3732 +#: sql_help.c:326 sql_help.c:1767 sql_help.c:4126 msgid "aggregate_name" msgstr "Aggregatname" -#: sql_help.c:312 sql_help.c:1477 sql_help.c:1745 sql_help.c:1749 -#: sql_help.c:1751 sql_help.c:2836 +#: sql_help.c:328 sql_help.c:1769 sql_help.c:2053 sql_help.c:2057 +#: sql_help.c:2059 sql_help.c:3195 msgid "source_type" msgstr "Quelltyp" -#: sql_help.c:313 sql_help.c:1478 sql_help.c:1746 sql_help.c:1750 -#: sql_help.c:1752 sql_help.c:2837 +#: sql_help.c:329 sql_help.c:1770 sql_help.c:2054 sql_help.c:2058 +#: sql_help.c:2060 sql_help.c:3196 msgid "target_type" msgstr "Zieltyp" -#: sql_help.c:320 sql_help.c:741 sql_help.c:1493 sql_help.c:1747 -#: sql_help.c:1782 sql_help.c:1845 sql_help.c:2085 sql_help.c:2116 -#: sql_help.c:2600 sql_help.c:3315 sql_help.c:3647 sql_help.c:3738 -#: sql_help.c:3841 sql_help.c:3845 sql_help.c:3849 sql_help.c:3852 -#: sql_help.c:4080 sql_help.c:4084 sql_help.c:4088 sql_help.c:4091 -#: sql_help.c:4274 sql_help.c:4278 sql_help.c:4282 sql_help.c:4285 +#: sql_help.c:336 sql_help.c:773 sql_help.c:1785 sql_help.c:2055 +#: sql_help.c:2094 sql_help.c:2157 sql_help.c:2397 sql_help.c:2428 +#: sql_help.c:2959 sql_help.c:4037 sql_help.c:4132 sql_help.c:4245 +#: sql_help.c:4249 sql_help.c:4253 sql_help.c:4256 sql_help.c:4484 +#: sql_help.c:4488 sql_help.c:4492 sql_help.c:4495 sql_help.c:4691 +#: sql_help.c:4695 sql_help.c:4699 sql_help.c:4702 msgid "function_name" msgstr "Funktionsname" -#: sql_help.c:325 sql_help.c:734 sql_help.c:1500 sql_help.c:2109 +#: sql_help.c:341 sql_help.c:766 sql_help.c:1792 sql_help.c:2421 msgid "operator_name" msgstr "Operatorname" -#: sql_help.c:326 sql_help.c:670 sql_help.c:674 sql_help.c:678 sql_help.c:1501 -#: sql_help.c:2086 sql_help.c:2954 +#: sql_help.c:342 sql_help.c:702 sql_help.c:706 sql_help.c:710 sql_help.c:1793 +#: sql_help.c:2398 sql_help.c:3313 msgid "left_type" msgstr "linker_Typ" -#: sql_help.c:327 sql_help.c:671 sql_help.c:675 sql_help.c:679 sql_help.c:1502 -#: sql_help.c:2087 sql_help.c:2955 +#: sql_help.c:343 sql_help.c:703 sql_help.c:707 sql_help.c:711 sql_help.c:1794 +#: sql_help.c:2399 sql_help.c:3314 msgid "right_type" msgstr "rechter_Typ" -#: sql_help.c:329 sql_help.c:331 sql_help.c:697 sql_help.c:700 sql_help.c:703 -#: sql_help.c:732 sql_help.c:744 sql_help.c:752 sql_help.c:755 sql_help.c:758 -#: sql_help.c:1504 sql_help.c:1506 sql_help.c:2106 sql_help.c:2127 -#: sql_help.c:2441 sql_help.c:2964 sql_help.c:2973 +#: sql_help.c:345 sql_help.c:347 sql_help.c:729 sql_help.c:732 sql_help.c:735 +#: sql_help.c:764 sql_help.c:776 sql_help.c:784 sql_help.c:787 sql_help.c:790 +#: sql_help.c:1355 sql_help.c:1796 sql_help.c:1798 sql_help.c:2418 +#: sql_help.c:2439 sql_help.c:2794 sql_help.c:3323 sql_help.c:3332 msgid "index_method" msgstr "Indexmethode" -#: sql_help.c:341 sql_help.c:1111 sql_help.c:1522 sql_help.c:1973 -#: sql_help.c:2400 sql_help.c:2567 sql_help.c:3105 sql_help.c:3329 -#: sql_help.c:3661 +#: sql_help.c:349 sql_help.c:1802 sql_help.c:4139 +msgid "procedure_name" +msgstr "Prozedurname" + +#: sql_help.c:353 sql_help.c:1808 sql_help.c:3704 sql_help.c:4145 +msgid "routine_name" +msgstr "Routinenname" + +#: sql_help.c:365 sql_help.c:1325 sql_help.c:1825 sql_help.c:2284 +#: sql_help.c:2478 sql_help.c:2752 sql_help.c:2926 sql_help.c:3494 +#: sql_help.c:3718 sql_help.c:4051 msgid "type_name" msgstr "Typname" -#: sql_help.c:342 sql_help.c:1523 sql_help.c:1972 sql_help.c:2568 -#: sql_help.c:2794 sql_help.c:3106 sql_help.c:3321 sql_help.c:3653 +#: sql_help.c:366 sql_help.c:1826 sql_help.c:2283 sql_help.c:2477 +#: sql_help.c:2927 sql_help.c:3153 sql_help.c:3495 sql_help.c:3710 +#: sql_help.c:4043 msgid "lang_name" msgstr "Sprachname" -#: sql_help.c:345 +#: sql_help.c:369 msgid "and aggregate_signature is:" msgstr "und Aggregatsignatur Folgendes ist:" -#: sql_help.c:368 sql_help.c:1617 sql_help.c:1872 +#: sql_help.c:392 sql_help.c:1920 sql_help.c:2184 msgid "handler_function" msgstr "Handler-Funktion" -#: sql_help.c:369 sql_help.c:1873 +#: sql_help.c:393 sql_help.c:2185 msgid "validator_function" msgstr "Validator-Funktion" -#: sql_help.c:418 sql_help.c:495 sql_help.c:626 sql_help.c:1050 -#: sql_help.c:1255 sql_help.c:2432 sql_help.c:2433 sql_help.c:2449 -#: sql_help.c:2450 +#: sql_help.c:442 sql_help.c:519 sql_help.c:658 sql_help.c:840 sql_help.c:978 +#: sql_help.c:1258 sql_help.c:1346 sql_help.c:1347 sql_help.c:1363 +#: sql_help.c:1364 sql_help.c:1515 sql_help.c:2785 sql_help.c:2786 +#: sql_help.c:2802 sql_help.c:2803 msgid "action" msgstr "Aktion" -#: sql_help.c:420 sql_help.c:427 sql_help.c:431 sql_help.c:432 sql_help.c:435 -#: sql_help.c:437 sql_help.c:438 sql_help.c:439 sql_help.c:441 sql_help.c:444 -#: sql_help.c:446 sql_help.c:447 sql_help.c:630 sql_help.c:640 sql_help.c:642 -#: sql_help.c:645 sql_help.c:647 sql_help.c:912 sql_help.c:1052 -#: sql_help.c:1070 sql_help.c:1074 sql_help.c:1075 sql_help.c:1079 -#: sql_help.c:1081 sql_help.c:1082 sql_help.c:1083 sql_help.c:1085 -#: sql_help.c:1088 sql_help.c:1090 sql_help.c:1357 sql_help.c:1360 -#: sql_help.c:1380 sql_help.c:1481 sql_help.c:1583 sql_help.c:1588 -#: sql_help.c:1602 sql_help.c:1603 sql_help.c:1604 sql_help.c:1913 -#: sql_help.c:1926 sql_help.c:1970 sql_help.c:2031 sql_help.c:2066 -#: sql_help.c:2270 sql_help.c:2384 sql_help.c:2392 sql_help.c:2401 -#: sql_help.c:2404 sql_help.c:2413 sql_help.c:2417 sql_help.c:2437 -#: sql_help.c:2439 sql_help.c:2446 sql_help.c:2461 sql_help.c:2478 -#: sql_help.c:2603 sql_help.c:2739 sql_help.c:3300 sql_help.c:3301 -#: sql_help.c:3380 sql_help.c:3395 sql_help.c:3397 sql_help.c:3399 -#: sql_help.c:3632 sql_help.c:3633 sql_help.c:3731 sql_help.c:3872 -#: sql_help.c:4111 sql_help.c:4153 sql_help.c:4155 sql_help.c:4157 -#: sql_help.c:4174 sql_help.c:4177 sql_help.c:4305 +#: sql_help.c:444 sql_help.c:451 sql_help.c:455 sql_help.c:456 sql_help.c:459 +#: sql_help.c:461 sql_help.c:462 sql_help.c:463 sql_help.c:465 sql_help.c:468 +#: sql_help.c:470 sql_help.c:471 sql_help.c:662 sql_help.c:672 sql_help.c:674 +#: sql_help.c:677 sql_help.c:679 sql_help.c:1054 sql_help.c:1260 +#: sql_help.c:1278 sql_help.c:1282 sql_help.c:1283 sql_help.c:1287 +#: sql_help.c:1289 sql_help.c:1290 sql_help.c:1291 sql_help.c:1293 +#: sql_help.c:1296 sql_help.c:1297 sql_help.c:1299 sql_help.c:1302 +#: sql_help.c:1304 sql_help.c:1351 sql_help.c:1353 sql_help.c:1360 +#: sql_help.c:1369 sql_help.c:1374 sql_help.c:1621 sql_help.c:1624 +#: sql_help.c:1658 sql_help.c:1773 sql_help.c:1886 sql_help.c:1891 +#: sql_help.c:1905 sql_help.c:1906 sql_help.c:1907 sql_help.c:2225 +#: sql_help.c:2238 sql_help.c:2281 sql_help.c:2342 sql_help.c:2346 +#: sql_help.c:2378 sql_help.c:2604 sql_help.c:2632 sql_help.c:2633 +#: sql_help.c:2736 sql_help.c:2744 sql_help.c:2753 sql_help.c:2756 +#: sql_help.c:2765 sql_help.c:2769 sql_help.c:2790 sql_help.c:2792 +#: sql_help.c:2799 sql_help.c:2815 sql_help.c:2820 sql_help.c:2837 +#: sql_help.c:2962 sql_help.c:3098 sql_help.c:3689 sql_help.c:3690 +#: sql_help.c:3770 sql_help.c:3785 sql_help.c:3787 sql_help.c:3789 +#: sql_help.c:4022 sql_help.c:4023 sql_help.c:4125 sql_help.c:4276 +#: sql_help.c:4515 sql_help.c:4557 sql_help.c:4559 sql_help.c:4561 +#: sql_help.c:4594 sql_help.c:4722 msgid "column_name" msgstr "Spaltenname" -#: sql_help.c:421 sql_help.c:631 sql_help.c:1053 +#: sql_help.c:445 sql_help.c:663 sql_help.c:1261 msgid "new_column_name" msgstr "neuer_Spaltenname" -#: sql_help.c:426 sql_help.c:516 sql_help.c:639 sql_help.c:1069 -#: sql_help.c:1271 +#: sql_help.c:450 sql_help.c:540 sql_help.c:671 sql_help.c:861 sql_help.c:999 +#: sql_help.c:1277 sql_help.c:1531 msgid "where action is one of:" msgstr "wobei Aktion Folgendes sein kann:" -#: sql_help.c:428 sql_help.c:433 sql_help.c:904 sql_help.c:1071 -#: sql_help.c:1076 sql_help.c:1273 sql_help.c:1277 sql_help.c:1825 -#: sql_help.c:1914 sql_help.c:2105 sql_help.c:2263 sql_help.c:2385 -#: sql_help.c:2648 sql_help.c:3482 +#: sql_help.c:452 sql_help.c:457 sql_help.c:1046 sql_help.c:1279 +#: sql_help.c:1284 sql_help.c:1533 sql_help.c:1537 sql_help.c:2137 +#: sql_help.c:2226 sql_help.c:2417 sql_help.c:2597 sql_help.c:2737 +#: sql_help.c:3007 sql_help.c:3872 msgid "data_type" msgstr "Datentyp" -#: sql_help.c:429 sql_help.c:434 sql_help.c:1072 sql_help.c:1077 -#: sql_help.c:1274 sql_help.c:1278 sql_help.c:1826 sql_help.c:1917 -#: sql_help.c:2033 sql_help.c:2386 sql_help.c:2394 sql_help.c:2406 -#: sql_help.c:2419 sql_help.c:2649 sql_help.c:2655 sql_help.c:3390 +#: sql_help.c:453 sql_help.c:458 sql_help.c:1280 sql_help.c:1285 +#: sql_help.c:1534 sql_help.c:1538 sql_help.c:2138 sql_help.c:2229 +#: sql_help.c:2344 sql_help.c:2738 sql_help.c:2746 sql_help.c:2758 +#: sql_help.c:2771 sql_help.c:3008 sql_help.c:3014 sql_help.c:3780 msgid "collation" msgstr "Sortierfolge" -#: sql_help.c:430 sql_help.c:1073 sql_help.c:1918 sql_help.c:1927 -#: sql_help.c:2387 sql_help.c:2402 sql_help.c:2414 +#: sql_help.c:454 sql_help.c:1281 sql_help.c:2230 sql_help.c:2239 +#: sql_help.c:2739 sql_help.c:2754 sql_help.c:2766 msgid "column_constraint" msgstr "Spalten-Constraint" -#: sql_help.c:440 sql_help.c:641 sql_help.c:1084 +#: sql_help.c:464 sql_help.c:602 sql_help.c:673 sql_help.c:1298 msgid "integer" msgstr "ganze_Zahl" -#: sql_help.c:442 sql_help.c:445 sql_help.c:643 sql_help.c:646 sql_help.c:1086 -#: sql_help.c:1089 +#: sql_help.c:466 sql_help.c:469 sql_help.c:675 sql_help.c:678 sql_help.c:1300 +#: sql_help.c:1303 msgid "attribute_option" msgstr "Attributoption" -#: sql_help.c:450 sql_help.c:1091 sql_help.c:1919 sql_help.c:1928 -#: sql_help.c:2388 sql_help.c:2403 sql_help.c:2415 +#: sql_help.c:474 sql_help.c:1305 sql_help.c:2231 sql_help.c:2240 +#: sql_help.c:2740 sql_help.c:2755 sql_help.c:2767 msgid "table_constraint" msgstr "Tabellen-Constraint" -#: sql_help.c:453 sql_help.c:454 sql_help.c:455 sql_help.c:456 sql_help.c:1096 -#: sql_help.c:1097 sql_help.c:1098 sql_help.c:1099 sql_help.c:1524 +#: sql_help.c:477 sql_help.c:478 sql_help.c:479 sql_help.c:480 sql_help.c:1310 +#: sql_help.c:1311 sql_help.c:1312 sql_help.c:1313 sql_help.c:1827 msgid "trigger_name" msgstr "Triggername" -#: sql_help.c:457 sql_help.c:458 sql_help.c:1109 sql_help.c:1110 -#: sql_help.c:1920 sql_help.c:1925 sql_help.c:2391 sql_help.c:2412 +#: sql_help.c:481 sql_help.c:482 sql_help.c:1323 sql_help.c:1324 +#: sql_help.c:2232 sql_help.c:2237 sql_help.c:2743 sql_help.c:2764 msgid "parent_table" msgstr "Elterntabelle" -#: sql_help.c:515 sql_help.c:565 sql_help.c:628 sql_help.c:1234 -#: sql_help.c:1857 +#: sql_help.c:539 sql_help.c:594 sql_help.c:660 sql_help.c:860 sql_help.c:998 +#: sql_help.c:1494 sql_help.c:2169 msgid "extension_name" msgstr "Erweiterungsname" -#: sql_help.c:517 sql_help.c:1974 +#: sql_help.c:541 sql_help.c:1000 sql_help.c:2285 msgid "execution_cost" msgstr "Ausführungskosten" -#: sql_help.c:518 sql_help.c:1975 +#: sql_help.c:542 sql_help.c:1001 sql_help.c:2286 msgid "result_rows" msgstr "Ergebniszeilen" -#: sql_help.c:539 sql_help.c:541 sql_help.c:842 sql_help.c:850 sql_help.c:854 -#: sql_help.c:857 sql_help.c:860 sql_help.c:1312 sql_help.c:1320 -#: sql_help.c:1323 sql_help.c:1325 sql_help.c:1327 sql_help.c:2241 -#: sql_help.c:2243 sql_help.c:2246 sql_help.c:2247 sql_help.c:3299 -#: sql_help.c:3303 sql_help.c:3306 sql_help.c:3308 sql_help.c:3310 -#: sql_help.c:3312 sql_help.c:3314 sql_help.c:3320 sql_help.c:3322 -#: sql_help.c:3324 sql_help.c:3326 sql_help.c:3328 sql_help.c:3330 +#: sql_help.c:563 sql_help.c:565 sql_help.c:924 sql_help.c:932 sql_help.c:936 +#: sql_help.c:939 sql_help.c:942 sql_help.c:1572 sql_help.c:1580 +#: sql_help.c:1584 sql_help.c:1587 sql_help.c:1590 sql_help.c:2575 +#: sql_help.c:2577 sql_help.c:2580 sql_help.c:2581 sql_help.c:3688 +#: sql_help.c:3692 sql_help.c:3695 sql_help.c:3697 sql_help.c:3699 +#: sql_help.c:3701 sql_help.c:3703 sql_help.c:3709 sql_help.c:3711 +#: sql_help.c:3713 sql_help.c:3715 sql_help.c:3717 sql_help.c:3719 msgid "role_specification" msgstr "Rollenangabe" -#: sql_help.c:540 sql_help.c:542 sql_help.c:1339 sql_help.c:1800 -#: sql_help.c:2249 sql_help.c:2724 sql_help.c:3139 sql_help.c:3957 +#: sql_help.c:564 sql_help.c:566 sql_help.c:1603 sql_help.c:2112 +#: sql_help.c:2583 sql_help.c:3083 sql_help.c:3528 sql_help.c:4361 msgid "user_name" msgstr "Benutzername" -#: sql_help.c:543 sql_help.c:862 sql_help.c:1328 sql_help.c:2248 -#: sql_help.c:3331 +#: sql_help.c:567 sql_help.c:944 sql_help.c:1592 sql_help.c:2582 +#: sql_help.c:3720 msgid "where role_specification can be:" msgstr "wobei Rollenangabe Folgendes sein kann:" -#: sql_help.c:545 +#: sql_help.c:569 msgid "group_name" msgstr "Gruppenname" -#: sql_help.c:563 sql_help.c:1805 sql_help.c:2037 sql_help.c:2069 -#: sql_help.c:2398 sql_help.c:2410 sql_help.c:2423 sql_help.c:2459 -#: sql_help.c:2481 sql_help.c:2493 sql_help.c:3327 sql_help.c:3659 +#: sql_help.c:590 sql_help.c:1372 sql_help.c:2117 sql_help.c:2349 +#: sql_help.c:2381 sql_help.c:2750 sql_help.c:2762 sql_help.c:2775 +#: sql_help.c:2818 sql_help.c:2840 sql_help.c:2852 sql_help.c:3716 +#: sql_help.c:4049 msgid "tablespace_name" msgstr "Tablespace-Name" -#: sql_help.c:567 sql_help.c:570 sql_help.c:649 sql_help.c:651 sql_help.c:1106 -#: sql_help.c:1108 sql_help.c:2035 sql_help.c:2067 sql_help.c:2396 -#: sql_help.c:2408 sql_help.c:2421 sql_help.c:2457 sql_help.c:2479 +#: sql_help.c:592 sql_help.c:680 sql_help.c:1318 sql_help.c:1327 +#: sql_help.c:1367 sql_help.c:1707 +msgid "index_name" +msgstr "Indexname" + +#: sql_help.c:596 sql_help.c:599 sql_help.c:681 sql_help.c:683 sql_help.c:1320 +#: sql_help.c:1322 sql_help.c:1370 sql_help.c:2347 sql_help.c:2379 +#: sql_help.c:2748 sql_help.c:2760 sql_help.c:2773 sql_help.c:2816 +#: sql_help.c:2838 msgid "storage_parameter" msgstr "Storage-Parameter" -#: sql_help.c:593 sql_help.c:1498 sql_help.c:3742 +#: sql_help.c:601 +msgid "column_number" +msgstr "Spaltennummer" + +#: sql_help.c:625 sql_help.c:1790 sql_help.c:4136 msgid "large_object_oid" msgstr "Large-Object-OID" -#: sql_help.c:648 sql_help.c:1104 sql_help.c:1113 sql_help.c:1116 -#: sql_help.c:1420 -msgid "index_name" -msgstr "Indexname" - -#: sql_help.c:680 sql_help.c:2090 +#: sql_help.c:712 sql_help.c:2402 msgid "res_proc" msgstr "Res-Funktion" -#: sql_help.c:681 sql_help.c:2091 +#: sql_help.c:713 sql_help.c:2403 msgid "join_proc" msgstr "Join-Funktion" -#: sql_help.c:733 sql_help.c:745 sql_help.c:2108 +#: sql_help.c:765 sql_help.c:777 sql_help.c:2420 msgid "strategy_number" msgstr "Strategienummer" -#: sql_help.c:735 sql_help.c:736 sql_help.c:739 sql_help.c:740 sql_help.c:746 -#: sql_help.c:747 sql_help.c:749 sql_help.c:750 sql_help.c:2110 -#: sql_help.c:2111 sql_help.c:2114 sql_help.c:2115 +#: sql_help.c:767 sql_help.c:768 sql_help.c:771 sql_help.c:772 sql_help.c:778 +#: sql_help.c:779 sql_help.c:781 sql_help.c:782 sql_help.c:2422 sql_help.c:2423 +#: sql_help.c:2426 sql_help.c:2427 msgid "op_type" msgstr "Optyp" -#: sql_help.c:737 sql_help.c:2112 +#: sql_help.c:769 sql_help.c:2424 msgid "sort_family_name" msgstr "Sortierfamilienname" -#: sql_help.c:738 sql_help.c:748 sql_help.c:2113 +#: sql_help.c:770 sql_help.c:780 sql_help.c:2425 msgid "support_number" msgstr "Unterst-Nummer" -#: sql_help.c:742 sql_help.c:1748 sql_help.c:2117 sql_help.c:2570 -#: sql_help.c:2572 +#: sql_help.c:774 sql_help.c:2056 sql_help.c:2429 sql_help.c:2929 +#: sql_help.c:2931 msgid "argument_type" msgstr "Argumenttyp" -#: sql_help.c:773 sql_help.c:776 sql_help.c:804 sql_help.c:806 sql_help.c:808 -#: sql_help.c:872 sql_help.c:911 sql_help.c:1230 sql_help.c:1233 -#: sql_help.c:1379 sql_help.c:1419 sql_help.c:1483 sql_help.c:1508 -#: sql_help.c:1512 sql_help.c:1525 sql_help.c:1582 sql_help.c:1587 -#: sql_help.c:1912 sql_help.c:1924 sql_help.c:2029 sql_help.c:2065 -#: sql_help.c:2141 sql_help.c:2162 sql_help.c:2218 sql_help.c:2269 -#: sql_help.c:2383 sql_help.c:2399 sql_help.c:2411 sql_help.c:2477 -#: sql_help.c:2596 sql_help.c:2773 sql_help.c:2990 sql_help.c:3015 -#: sql_help.c:3115 sql_help.c:3297 sql_help.c:3302 sql_help.c:3347 -#: sql_help.c:3378 sql_help.c:3629 sql_help.c:3634 sql_help.c:3730 -#: sql_help.c:3827 sql_help.c:3829 sql_help.c:3878 sql_help.c:3917 -#: sql_help.c:4066 sql_help.c:4068 sql_help.c:4117 sql_help.c:4151 -#: sql_help.c:4173 sql_help.c:4175 sql_help.c:4176 sql_help.c:4260 -#: sql_help.c:4262 sql_help.c:4311 +#: sql_help.c:805 sql_help.c:808 sql_help.c:879 sql_help.c:881 sql_help.c:883 +#: sql_help.c:1014 sql_help.c:1053 sql_help.c:1490 sql_help.c:1493 +#: sql_help.c:1657 sql_help.c:1706 sql_help.c:1775 sql_help.c:1800 +#: sql_help.c:1813 sql_help.c:1828 sql_help.c:1885 sql_help.c:1890 +#: sql_help.c:2224 sql_help.c:2236 sql_help.c:2340 sql_help.c:2377 +#: sql_help.c:2453 sql_help.c:2496 sql_help.c:2552 sql_help.c:2603 +#: sql_help.c:2634 sql_help.c:2735 sql_help.c:2751 sql_help.c:2763 +#: sql_help.c:2836 sql_help.c:2955 sql_help.c:3132 sql_help.c:3349 +#: sql_help.c:3398 sql_help.c:3504 sql_help.c:3686 sql_help.c:3691 +#: sql_help.c:3736 sql_help.c:3768 sql_help.c:4019 sql_help.c:4024 +#: sql_help.c:4124 sql_help.c:4231 sql_help.c:4233 sql_help.c:4282 +#: sql_help.c:4321 sql_help.c:4470 sql_help.c:4472 sql_help.c:4521 +#: sql_help.c:4555 sql_help.c:4593 sql_help.c:4677 sql_help.c:4679 +#: sql_help.c:4728 msgid "table_name" msgstr "Tabellenname" -#: sql_help.c:778 sql_help.c:2143 +#: sql_help.c:810 sql_help.c:2455 msgid "using_expression" msgstr "Using-Ausdruck" -#: sql_help.c:779 sql_help.c:2144 +#: sql_help.c:811 sql_help.c:2456 msgid "check_expression" msgstr "Check-Ausdruck" -#: sql_help.c:846 sql_help.c:1316 sql_help.c:2009 sql_help.c:2195 -#: sql_help.c:2707 +#: sql_help.c:885 sql_help.c:2497 +msgid "publication_parameter" +msgstr "Publikationsparameter" + +#: sql_help.c:928 sql_help.c:1576 sql_help.c:2319 sql_help.c:2529 +#: sql_help.c:3066 msgid "password" msgstr "Passwort" -#: sql_help.c:847 sql_help.c:1317 sql_help.c:2010 sql_help.c:2196 -#: sql_help.c:2708 +#: sql_help.c:929 sql_help.c:1577 sql_help.c:2320 sql_help.c:2530 +#: sql_help.c:3067 msgid "timestamp" msgstr "Zeit" -#: sql_help.c:851 sql_help.c:855 sql_help.c:858 sql_help.c:861 sql_help.c:3307 -#: sql_help.c:3639 +#: sql_help.c:933 sql_help.c:937 sql_help.c:940 sql_help.c:943 sql_help.c:1581 +#: sql_help.c:1585 sql_help.c:1588 sql_help.c:1591 sql_help.c:3696 +#: sql_help.c:4029 msgid "database_name" msgstr "Datenbankname" -#: sql_help.c:905 sql_help.c:2264 +#: sql_help.c:1047 sql_help.c:2598 msgid "increment" msgstr "Inkrement" -#: sql_help.c:906 sql_help.c:2265 +#: sql_help.c:1048 sql_help.c:2599 msgid "minvalue" msgstr "Minwert" -#: sql_help.c:907 sql_help.c:2266 +#: sql_help.c:1049 sql_help.c:2600 msgid "maxvalue" msgstr "Maxwert" -#: sql_help.c:908 sql_help.c:2267 sql_help.c:3825 sql_help.c:3915 -#: sql_help.c:4064 sql_help.c:4193 sql_help.c:4258 +#: sql_help.c:1050 sql_help.c:2601 sql_help.c:4229 sql_help.c:4319 +#: sql_help.c:4468 sql_help.c:4610 sql_help.c:4675 msgid "start" msgstr "Start" -#: sql_help.c:909 +#: sql_help.c:1051 sql_help.c:1295 msgid "restart" msgstr "Restart" -#: sql_help.c:910 sql_help.c:2268 +#: sql_help.c:1052 sql_help.c:2602 msgid "cache" msgstr "Cache" -#: sql_help.c:1065 sql_help.c:1068 +#: sql_help.c:1109 sql_help.c:2646 +msgid "conninfo" +msgstr "Verbindungsinfo" + +#: sql_help.c:1111 sql_help.c:2647 +msgid "publication_name" +msgstr "Publikationsname" + +#: sql_help.c:1112 +msgid "set_publication_option" +msgstr "SET-Publikationsoption" + +#: sql_help.c:1115 +msgid "refresh_option" +msgstr "Refresh-Option" + +#: sql_help.c:1120 sql_help.c:2648 +msgid "subscription_parameter" +msgstr "Subskriptionsparameter" + +#: sql_help.c:1273 sql_help.c:1276 msgid "partition_name" msgstr "Partitionsname" -#: sql_help.c:1066 sql_help.c:1929 sql_help.c:2416 +#: sql_help.c:1274 sql_help.c:2241 sql_help.c:2768 msgid "partition_bound_spec" -msgstr "" +msgstr "Partitionsbegrenzungsangabe" + +#: sql_help.c:1292 sql_help.c:1341 sql_help.c:2780 +msgid "sequence_options" +msgstr "Sequenzoptionen" + +#: sql_help.c:1294 +msgid "sequence_option" +msgstr "Sequenzoption" -#: sql_help.c:1092 +#: sql_help.c:1306 msgid "table_constraint_using_index" msgstr "Tabellen-Constraint-für-Index" -#: sql_help.c:1100 sql_help.c:1101 sql_help.c:1102 sql_help.c:1103 +#: sql_help.c:1314 sql_help.c:1315 sql_help.c:1316 sql_help.c:1317 msgid "rewrite_rule_name" msgstr "Regelname" -#: sql_help.c:1114 +#: sql_help.c:1328 sql_help.c:2805 +msgid "and partition_bound_spec is:" +msgstr "und Partitionsbegrenzungsangabe Folgendes ist:" + +#: sql_help.c:1329 sql_help.c:1331 sql_help.c:1333 sql_help.c:1335 +#: sql_help.c:1336 sql_help.c:2806 sql_help.c:2808 sql_help.c:2810 +#: sql_help.c:2812 sql_help.c:2813 +msgid "numeric_literal" +msgstr "numerische_Konstante" + +#: sql_help.c:1330 sql_help.c:1332 sql_help.c:1334 sql_help.c:2807 +#: sql_help.c:2809 sql_help.c:2811 +msgid "string_literal" +msgstr "Zeichenkettenkonstante" + +#: sql_help.c:1337 +#, fuzzy +#| msgid "where column_constraint is:" +msgid "and column_constraint is:" +msgstr "wobei Spalten-Constraint Folgendes ist:" + +#: sql_help.c:1340 sql_help.c:2248 sql_help.c:2279 sql_help.c:2476 +#: sql_help.c:2779 +msgid "default_expr" +msgstr "Vorgabeausdruck" + +#: sql_help.c:1342 sql_help.c:1343 sql_help.c:1352 sql_help.c:1354 +#: sql_help.c:1358 sql_help.c:2781 sql_help.c:2782 sql_help.c:2791 +#: sql_help.c:2793 sql_help.c:2797 +msgid "index_parameters" +msgstr "Indexparameter" + +#: sql_help.c:1344 sql_help.c:1361 sql_help.c:2783 sql_help.c:2800 +msgid "reftable" +msgstr "Reftabelle" + +#: sql_help.c:1345 sql_help.c:1362 sql_help.c:2784 sql_help.c:2801 +msgid "refcolumn" +msgstr "Refspalte" + +#: sql_help.c:1348 sql_help.c:2249 sql_help.c:2787 +msgid "and table_constraint is:" +msgstr "und Tabellen-Constraint Folgendes ist:" + +#: sql_help.c:1356 sql_help.c:2795 +msgid "exclude_element" +msgstr "Exclude-Element" + +#: sql_help.c:1357 sql_help.c:2796 sql_help.c:4227 sql_help.c:4317 +#: sql_help.c:4466 sql_help.c:4608 sql_help.c:4673 +msgid "operator" +msgstr "Operator" + +#: sql_help.c:1359 sql_help.c:2350 sql_help.c:2798 +msgid "predicate" +msgstr "Prädikat" + +#: sql_help.c:1365 msgid "and table_constraint_using_index is:" msgstr "und Tabellen-Constraint-für-Index Folgendes ist:" -#: sql_help.c:1132 sql_help.c:1135 sql_help.c:2496 +#: sql_help.c:1368 sql_help.c:2814 +msgid "index_parameters in UNIQUE, PRIMARY KEY, and EXCLUDE constraints are:" +msgstr "Indexparameter bei UNIQUE-, PRIMARY KEY- und EXCLUDE-Constraints sind:" + +#: sql_help.c:1373 sql_help.c:2819 +msgid "exclude_element in an EXCLUDE constraint is:" +msgstr "Exclude-Element in einem EXCLUDE-Constraint ist:" + +#: sql_help.c:1376 sql_help.c:2345 sql_help.c:2747 sql_help.c:2759 +#: sql_help.c:2772 sql_help.c:2822 sql_help.c:3781 +msgid "opclass" +msgstr "Opklasse" + +#: sql_help.c:1392 sql_help.c:1395 sql_help.c:2855 msgid "tablespace_option" msgstr "Tablespace-Option" -#: sql_help.c:1156 sql_help.c:1159 sql_help.c:1165 sql_help.c:1169 +#: sql_help.c:1416 sql_help.c:1419 sql_help.c:1425 sql_help.c:1429 msgid "token_type" msgstr "Tokentyp" -#: sql_help.c:1157 sql_help.c:1160 +#: sql_help.c:1417 sql_help.c:1420 msgid "dictionary_name" msgstr "Wörterbuchname" -#: sql_help.c:1162 sql_help.c:1166 +#: sql_help.c:1422 sql_help.c:1426 msgid "old_dictionary" msgstr "altes_Wörterbuch" -#: sql_help.c:1163 sql_help.c:1167 +#: sql_help.c:1423 sql_help.c:1427 msgid "new_dictionary" msgstr "neues_Wörterbuch" -#: sql_help.c:1259 sql_help.c:1272 sql_help.c:1275 sql_help.c:1276 -#: sql_help.c:2647 +#: sql_help.c:1519 sql_help.c:1532 sql_help.c:1535 sql_help.c:1536 +#: sql_help.c:3006 msgid "attribute_name" msgstr "Attributname" -#: sql_help.c:1260 +#: sql_help.c:1520 msgid "new_attribute_name" msgstr "neuer_Attributname" -#: sql_help.c:1266 sql_help.c:1270 +#: sql_help.c:1526 sql_help.c:1530 msgid "new_enum_value" msgstr "neuer_Enum-Wert" -#: sql_help.c:1267 -#, fuzzy -#| msgid "new_enum_value" +#: sql_help.c:1527 msgid "neighbor_enum_value" -msgstr "neuer_Enum-Wert" +msgstr "Nachbar-Enum-Wert" -#: sql_help.c:1269 +#: sql_help.c:1529 msgid "existing_enum_value" msgstr "existierender_Enum-Wert" -#: sql_help.c:1340 sql_help.c:1921 sql_help.c:1930 sql_help.c:2280 -#: sql_help.c:2725 sql_help.c:3140 sql_help.c:3313 sql_help.c:3348 -#: sql_help.c:3645 +#: sql_help.c:1604 sql_help.c:2233 sql_help.c:2242 sql_help.c:2614 +#: sql_help.c:3084 sql_help.c:3529 sql_help.c:3702 sql_help.c:3737 +#: sql_help.c:4035 msgid "server_name" msgstr "Servername" -#: sql_help.c:1368 sql_help.c:1371 sql_help.c:2740 +#: sql_help.c:1632 sql_help.c:1635 sql_help.c:3099 msgid "view_option_name" msgstr "Sichtoptionsname" -#: sql_help.c:1369 sql_help.c:2741 +#: sql_help.c:1633 sql_help.c:3100 msgid "view_option_value" msgstr "Sichtoptionswert" -#: sql_help.c:1394 sql_help.c:3973 sql_help.c:3975 sql_help.c:3999 +#: sql_help.c:1653 sql_help.c:1654 sql_help.c:4589 sql_help.c:4590 +#, fuzzy +#| msgid "table_constraint" +msgid "table_and_columns" +msgstr "Tabellen-Constraint" + +#: sql_help.c:1655 sql_help.c:1896 sql_help.c:3575 sql_help.c:4591 +msgid "where option can be one of:" +msgstr "wobei Option eine der folgenden sein kann:" + +#: sql_help.c:1656 sql_help.c:4592 +#, fuzzy +#| msgid "and table_constraint is:" +msgid "and table_and_columns is:" +msgstr "und Tabellen-Constraint Folgendes ist:" + +#: sql_help.c:1672 sql_help.c:4377 sql_help.c:4379 sql_help.c:4403 msgid "transaction_mode" msgstr "Transaktionsmodus" -#: sql_help.c:1395 sql_help.c:3976 sql_help.c:4000 +#: sql_help.c:1673 sql_help.c:4380 sql_help.c:4404 msgid "where transaction_mode is one of:" msgstr "wobei Transaktionsmodus Folgendes sein kann:" -#: sql_help.c:1480 +#: sql_help.c:1682 sql_help.c:4237 sql_help.c:4246 sql_help.c:4250 +#: sql_help.c:4254 sql_help.c:4257 sql_help.c:4476 sql_help.c:4485 +#: sql_help.c:4489 sql_help.c:4493 sql_help.c:4496 sql_help.c:4683 +#: sql_help.c:4692 sql_help.c:4696 sql_help.c:4700 sql_help.c:4703 +msgid "argument" +msgstr "Argument" + +#: sql_help.c:1772 msgid "relation_name" msgstr "Relationsname" -#: sql_help.c:1485 sql_help.c:3309 sql_help.c:3641 +#: sql_help.c:1777 sql_help.c:3698 sql_help.c:4031 msgid "domain_name" msgstr "Domänenname" -#: sql_help.c:1507 +#: sql_help.c:1799 msgid "policy_name" msgstr "Policy-Name" -#: sql_help.c:1511 +#: sql_help.c:1812 msgid "rule_name" msgstr "Regelname" -#: sql_help.c:1528 +#: sql_help.c:1831 msgid "text" msgstr "Text" -#: sql_help.c:1553 sql_help.c:3491 sql_help.c:3679 +#: sql_help.c:1856 sql_help.c:3881 sql_help.c:4069 msgid "transaction_id" msgstr "Transaktions-ID" -#: sql_help.c:1584 sql_help.c:1590 sql_help.c:3417 +#: sql_help.c:1887 sql_help.c:1893 sql_help.c:3807 msgid "filename" msgstr "Dateiname" -#: sql_help.c:1585 sql_help.c:1591 sql_help.c:2220 sql_help.c:2221 -#: sql_help.c:2222 +#: sql_help.c:1888 sql_help.c:1894 sql_help.c:2554 sql_help.c:2555 +#: sql_help.c:2556 msgid "command" msgstr "Befehl" -#: sql_help.c:1589 sql_help.c:2070 sql_help.c:2482 sql_help.c:2742 -#: sql_help.c:2760 sql_help.c:3382 +#: sql_help.c:1892 sql_help.c:2382 sql_help.c:2841 sql_help.c:3101 +#: sql_help.c:3119 sql_help.c:3772 msgid "query" msgstr "Anfrage" -#: sql_help.c:1593 sql_help.c:3186 -msgid "where option can be one of:" -msgstr "wobei Option eine der folgenden sein kann:" - -#: sql_help.c:1594 +#: sql_help.c:1897 msgid "format_name" msgstr "Formatname" -#: sql_help.c:1595 sql_help.c:1596 sql_help.c:1599 sql_help.c:3187 -#: sql_help.c:3188 sql_help.c:3189 sql_help.c:3190 sql_help.c:3191 -#: sql_help.c:3192 +#: sql_help.c:1898 sql_help.c:1899 sql_help.c:1902 sql_help.c:3576 +#: sql_help.c:3577 sql_help.c:3578 sql_help.c:3579 sql_help.c:3580 +#: sql_help.c:3581 msgid "boolean" msgstr "boolean" -#: sql_help.c:1597 +#: sql_help.c:1900 msgid "delimiter_character" msgstr "Trennzeichen" -#: sql_help.c:1598 +#: sql_help.c:1901 msgid "null_string" msgstr "Null-Zeichenkette" -#: sql_help.c:1600 +#: sql_help.c:1903 msgid "quote_character" msgstr "Quote-Zeichen" -#: sql_help.c:1601 +#: sql_help.c:1904 msgid "escape_character" msgstr "Escape-Zeichen" -#: sql_help.c:1605 +#: sql_help.c:1908 msgid "encoding_name" msgstr "Kodierungsname" -#: sql_help.c:1616 +#: sql_help.c:1919 msgid "access_method_type" msgstr "Zugriffsmethodentyp" -#: sql_help.c:1682 sql_help.c:1701 sql_help.c:1704 +#: sql_help.c:1990 sql_help.c:2009 sql_help.c:2012 msgid "arg_data_type" msgstr "Arg-Datentyp" -#: sql_help.c:1683 sql_help.c:1705 sql_help.c:1713 +#: sql_help.c:1991 sql_help.c:2013 sql_help.c:2021 msgid "sfunc" msgstr "Übergangsfunktion" -#: sql_help.c:1684 sql_help.c:1706 sql_help.c:1714 +#: sql_help.c:1992 sql_help.c:2014 sql_help.c:2022 msgid "state_data_type" msgstr "Zustandsdatentyp" -#: sql_help.c:1685 sql_help.c:1707 sql_help.c:1715 +#: sql_help.c:1993 sql_help.c:2015 sql_help.c:2023 msgid "state_data_size" msgstr "Zustandsdatengröße" -#: sql_help.c:1686 sql_help.c:1708 sql_help.c:1716 +#: sql_help.c:1994 sql_help.c:2016 sql_help.c:2024 msgid "ffunc" msgstr "Abschlussfunktion" -#: sql_help.c:1687 sql_help.c:1717 +#: sql_help.c:1995 sql_help.c:2025 msgid "combinefunc" msgstr "Combine-Funktion" -#: sql_help.c:1688 sql_help.c:1718 +#: sql_help.c:1996 sql_help.c:2026 msgid "serialfunc" msgstr "Serialisierungsfunktion" -#: sql_help.c:1689 sql_help.c:1719 +#: sql_help.c:1997 sql_help.c:2027 msgid "deserialfunc" msgstr "Deserialisierungsfunktion" -#: sql_help.c:1690 sql_help.c:1709 sql_help.c:1720 +#: sql_help.c:1998 sql_help.c:2017 sql_help.c:2028 msgid "initial_condition" msgstr "Anfangswert" -#: sql_help.c:1691 sql_help.c:1721 +#: sql_help.c:1999 sql_help.c:2029 msgid "msfunc" msgstr "Moving-Übergangsfunktion" -#: sql_help.c:1692 sql_help.c:1722 +#: sql_help.c:2000 sql_help.c:2030 msgid "minvfunc" msgstr "Moving-Inversfunktion" -#: sql_help.c:1693 sql_help.c:1723 +#: sql_help.c:2001 sql_help.c:2031 msgid "mstate_data_type" msgstr "Moving-Zustandsdatentyp" -#: sql_help.c:1694 sql_help.c:1724 +#: sql_help.c:2002 sql_help.c:2032 msgid "mstate_data_size" msgstr "Moving-Zustandsdatengröße" -#: sql_help.c:1695 sql_help.c:1725 +#: sql_help.c:2003 sql_help.c:2033 msgid "mffunc" msgstr "Moving-Abschlussfunktion" -#: sql_help.c:1696 sql_help.c:1726 +#: sql_help.c:2004 sql_help.c:2034 msgid "minitial_condition" msgstr "Moving-Anfangswert" -#: sql_help.c:1697 sql_help.c:1727 +#: sql_help.c:2005 sql_help.c:2035 msgid "sort_operator" msgstr "Sortieroperator" -#: sql_help.c:1710 +#: sql_help.c:2018 msgid "or the old syntax" msgstr "oder die alte Syntax" -#: sql_help.c:1712 +#: sql_help.c:2020 msgid "base_type" msgstr "Basistyp" -#: sql_help.c:1766 +#: sql_help.c:2076 msgid "locale" msgstr "Locale" -#: sql_help.c:1767 sql_help.c:1803 +#: sql_help.c:2077 sql_help.c:2115 msgid "lc_collate" msgstr "lc_collate" -#: sql_help.c:1768 sql_help.c:1804 +#: sql_help.c:2078 sql_help.c:2116 msgid "lc_ctype" msgstr "lc_ctype" -#: sql_help.c:1770 +#: sql_help.c:2079 sql_help.c:4122 +msgid "provider" +msgstr "Provider" + +#: sql_help.c:2080 sql_help.c:2171 +msgid "version" +msgstr "Version" + +#: sql_help.c:2082 msgid "existing_collation" msgstr "existierende_Sortierfolge" -#: sql_help.c:1780 +#: sql_help.c:2092 msgid "source_encoding" msgstr "Quellkodierung" -#: sql_help.c:1781 +#: sql_help.c:2093 msgid "dest_encoding" msgstr "Zielkodierung" -#: sql_help.c:1801 sql_help.c:2522 +#: sql_help.c:2113 sql_help.c:2881 msgid "template" msgstr "Vorlage" -#: sql_help.c:1802 +#: sql_help.c:2114 msgid "encoding" msgstr "Kodierung" -#: sql_help.c:1828 +#: sql_help.c:2140 msgid "constraint" msgstr "Constraint" -#: sql_help.c:1829 +#: sql_help.c:2141 msgid "where constraint is:" msgstr "wobei Constraint Folgendes ist:" -#: sql_help.c:1843 sql_help.c:2217 sql_help.c:2595 +#: sql_help.c:2155 sql_help.c:2551 sql_help.c:2954 msgid "event" msgstr "Ereignis" -#: sql_help.c:1844 +#: sql_help.c:2156 msgid "filter_variable" msgstr "Filtervariable" -#: sql_help.c:1859 -msgid "version" -msgstr "Version" - -#: sql_help.c:1860 +#: sql_help.c:2172 msgid "old_version" msgstr "alte_Version" -#: sql_help.c:1933 sql_help.c:2424 +#: sql_help.c:2245 sql_help.c:2776 msgid "where column_constraint is:" msgstr "wobei Spalten-Constraint Folgendes ist:" -#: sql_help.c:1936 sql_help.c:1968 sql_help.c:2427 -msgid "default_expr" -msgstr "Vorgabeausdruck" - -#: sql_help.c:1937 sql_help.c:2434 -msgid "and table_constraint is:" -msgstr "und Tabellen-Constraint Folgendes ist:" - -#: sql_help.c:1969 +#: sql_help.c:2280 msgid "rettype" msgstr "Rückgabetyp" -#: sql_help.c:1971 +#: sql_help.c:2282 msgid "column_type" msgstr "Spaltentyp" -#: sql_help.c:1979 +#: sql_help.c:2290 sql_help.c:2482 msgid "definition" msgstr "Definition" -#: sql_help.c:1980 +#: sql_help.c:2291 sql_help.c:2483 msgid "obj_file" msgstr "Objektdatei" -#: sql_help.c:1981 +#: sql_help.c:2292 sql_help.c:2484 msgid "link_symbol" msgstr "Linksymbol" -#: sql_help.c:1982 -msgid "attribute" -msgstr "Attribut" - -#: sql_help.c:2016 sql_help.c:2202 sql_help.c:2714 +#: sql_help.c:2326 sql_help.c:2536 sql_help.c:3073 msgid "uid" msgstr "Uid" -#: sql_help.c:2030 +#: sql_help.c:2341 msgid "method" msgstr "Methode" -#: sql_help.c:2034 sql_help.c:2395 sql_help.c:2407 sql_help.c:2420 -#: sql_help.c:2463 sql_help.c:3391 -msgid "opclass" -msgstr "Opklasse" - -#: sql_help.c:2038 sql_help.c:2445 -msgid "predicate" -msgstr "Prädikat" - -#: sql_help.c:2050 +#: sql_help.c:2362 msgid "call_handler" msgstr "Handler" -#: sql_help.c:2051 +#: sql_help.c:2363 msgid "inline_handler" msgstr "Inline-Handler" -#: sql_help.c:2052 +#: sql_help.c:2364 msgid "valfunction" msgstr "Valfunktion" -#: sql_help.c:2088 +#: sql_help.c:2400 msgid "com_op" msgstr "Kommutator-Op" -#: sql_help.c:2089 +#: sql_help.c:2401 msgid "neg_op" msgstr "Umkehrungs-Op" -#: sql_help.c:2107 +#: sql_help.c:2419 msgid "family_name" msgstr "Familienname" -#: sql_help.c:2118 +#: sql_help.c:2430 msgid "storage_type" msgstr "Storage-Typ" -#: sql_help.c:2219 sql_help.c:2599 sql_help.c:2776 sql_help.c:3401 -#: sql_help.c:3816 sql_help.c:3818 sql_help.c:3906 sql_help.c:3908 -#: sql_help.c:4055 sql_help.c:4057 sql_help.c:4160 sql_help.c:4249 -#: sql_help.c:4251 +#: sql_help.c:2553 sql_help.c:2958 sql_help.c:3135 sql_help.c:3791 +#: sql_help.c:4220 sql_help.c:4222 sql_help.c:4310 sql_help.c:4312 +#: sql_help.c:4459 sql_help.c:4461 sql_help.c:4564 sql_help.c:4666 +#: sql_help.c:4668 msgid "condition" msgstr "Bedingung" -#: sql_help.c:2223 sql_help.c:2602 +#: sql_help.c:2557 sql_help.c:2961 msgid "where event can be one of:" msgstr "wobei Ereignis eins der folgenden sein kann:" -#: sql_help.c:2242 sql_help.c:2244 +#: sql_help.c:2576 sql_help.c:2578 msgid "schema_element" msgstr "Schemaelement" -#: sql_help.c:2281 +#: sql_help.c:2615 msgid "server_type" msgstr "Servertyp" -#: sql_help.c:2282 +#: sql_help.c:2616 msgid "server_version" msgstr "Serverversion" -#: sql_help.c:2283 sql_help.c:3311 sql_help.c:3643 +#: sql_help.c:2617 sql_help.c:3700 sql_help.c:4033 msgid "fdw_name" msgstr "FDW-Name" -#: sql_help.c:2299 +#: sql_help.c:2630 +msgid "statistics_name" +msgstr "Statistikname" + +#: sql_help.c:2631 +msgid "statistics_kind" +msgstr "Statistikart" + +#: sql_help.c:2645 msgid "subscription_name" msgstr "Subskriptionsname" -#: sql_help.c:2389 +#: sql_help.c:2741 msgid "source_table" msgstr "Quelltabelle" -#: sql_help.c:2390 +#: sql_help.c:2742 msgid "like_option" msgstr "Like-Option" -#: sql_help.c:2428 sql_help.c:2429 sql_help.c:2438 sql_help.c:2440 -#: sql_help.c:2444 -msgid "index_parameters" -msgstr "Indexparameter" - -#: sql_help.c:2430 sql_help.c:2447 -msgid "reftable" -msgstr "Reftabelle" - -#: sql_help.c:2431 sql_help.c:2448 -msgid "refcolumn" -msgstr "Refspalte" - -#: sql_help.c:2442 -msgid "exclude_element" -msgstr "Exclude-Element" - -#: sql_help.c:2443 sql_help.c:3823 sql_help.c:3913 sql_help.c:4062 -#: sql_help.c:4191 sql_help.c:4256 -msgid "operator" -msgstr "Operator" - -#: sql_help.c:2451 +#: sql_help.c:2804 msgid "and like_option is:" msgstr "und Like-Option Folgendes ist:" -#: sql_help.c:2452 -msgid "and partition_bound_spec is:" -msgstr "" - -#: sql_help.c:2453 sql_help.c:2454 sql_help.c:2455 -msgid "bound_literal" -msgstr "" - -#: sql_help.c:2456 -msgid "index_parameters in UNIQUE, PRIMARY KEY, and EXCLUDE constraints are:" -msgstr "Indexparameter bei UNIQUE-, PRIMARY KEY- und EXCLUDE-Constraints sind:" - -#: sql_help.c:2460 -msgid "exclude_element in an EXCLUDE constraint is:" -msgstr "Exclude-Element in einem EXCLUDE-Constraint ist:" - -#: sql_help.c:2495 +#: sql_help.c:2854 msgid "directory" msgstr "Verzeichnis" -#: sql_help.c:2509 +#: sql_help.c:2868 msgid "parser_name" msgstr "Parser-Name" -#: sql_help.c:2510 +#: sql_help.c:2869 msgid "source_config" msgstr "Quellkonfig" -#: sql_help.c:2539 +#: sql_help.c:2898 msgid "start_function" msgstr "Startfunktion" -#: sql_help.c:2540 +#: sql_help.c:2899 msgid "gettoken_function" msgstr "Gettext-Funktion" -#: sql_help.c:2541 +#: sql_help.c:2900 msgid "end_function" msgstr "Endfunktion" -#: sql_help.c:2542 +#: sql_help.c:2901 msgid "lextypes_function" msgstr "Lextypenfunktion" -#: sql_help.c:2543 +#: sql_help.c:2902 msgid "headline_function" msgstr "Headline-Funktion" -#: sql_help.c:2555 +#: sql_help.c:2914 msgid "init_function" msgstr "Init-Funktion" -#: sql_help.c:2556 +#: sql_help.c:2915 msgid "lexize_function" msgstr "Lexize-Funktion" -#: sql_help.c:2569 +#: sql_help.c:2928 msgid "from_sql_function_name" msgstr "From-SQL-Funktionsname" -#: sql_help.c:2571 +#: sql_help.c:2930 msgid "to_sql_function_name" msgstr "To-SQL-Funktionsname" -#: sql_help.c:2597 +#: sql_help.c:2956 msgid "referenced_table_name" msgstr "verwiesener_Tabellenname" -#: sql_help.c:2598 -#, fuzzy -#| msgid "relation_name" +#: sql_help.c:2957 msgid "transition_relation_name" -msgstr "Relationsname" +msgstr "Übergangsrelationsname" -#: sql_help.c:2601 +#: sql_help.c:2960 msgid "arguments" msgstr "Argumente" -#: sql_help.c:2651 sql_help.c:3751 +#: sql_help.c:3010 sql_help.c:4155 msgid "label" msgstr "Label" -#: sql_help.c:2653 +#: sql_help.c:3012 msgid "subtype" msgstr "Untertyp" -#: sql_help.c:2654 +#: sql_help.c:3013 msgid "subtype_operator_class" msgstr "Untertyp-Operatorklasse" -#: sql_help.c:2656 +#: sql_help.c:3015 msgid "canonical_function" msgstr "Canonical-Funktion" -#: sql_help.c:2657 +#: sql_help.c:3016 msgid "subtype_diff_function" msgstr "Untertyp-Diff-Funktion" -#: sql_help.c:2659 +#: sql_help.c:3018 msgid "input_function" msgstr "Eingabefunktion" -#: sql_help.c:2660 +#: sql_help.c:3019 msgid "output_function" msgstr "Ausgabefunktion" -#: sql_help.c:2661 +#: sql_help.c:3020 msgid "receive_function" msgstr "Empfangsfunktion" -#: sql_help.c:2662 +#: sql_help.c:3021 msgid "send_function" msgstr "Sendefunktion" -#: sql_help.c:2663 +#: sql_help.c:3022 msgid "type_modifier_input_function" msgstr "Typmod-Eingabefunktion" -#: sql_help.c:2664 +#: sql_help.c:3023 msgid "type_modifier_output_function" msgstr "Typmod-Ausgabefunktion" -#: sql_help.c:2665 +#: sql_help.c:3024 msgid "analyze_function" msgstr "Analyze-Funktion" -#: sql_help.c:2666 +#: sql_help.c:3025 msgid "internallength" msgstr "interne_Länge" -#: sql_help.c:2667 +#: sql_help.c:3026 msgid "alignment" msgstr "Ausrichtung" -#: sql_help.c:2668 +#: sql_help.c:3027 msgid "storage" msgstr "Speicherung" -#: sql_help.c:2669 +#: sql_help.c:3028 msgid "like_type" msgstr "wie_Typ" -#: sql_help.c:2670 +#: sql_help.c:3029 msgid "category" msgstr "Kategorie" -#: sql_help.c:2671 +#: sql_help.c:3030 msgid "preferred" msgstr "bevorzugt" -#: sql_help.c:2672 +#: sql_help.c:3031 msgid "default" msgstr "Vorgabewert" -#: sql_help.c:2673 +#: sql_help.c:3032 msgid "element" msgstr "Element" -#: sql_help.c:2674 +#: sql_help.c:3033 msgid "delimiter" msgstr "Trennzeichen" -#: sql_help.c:2675 +#: sql_help.c:3034 msgid "collatable" msgstr "sortierbar" -#: sql_help.c:2772 sql_help.c:3377 sql_help.c:3811 sql_help.c:3900 -#: sql_help.c:4050 sql_help.c:4150 sql_help.c:4244 +#: sql_help.c:3131 sql_help.c:3767 sql_help.c:4215 sql_help.c:4304 +#: sql_help.c:4454 sql_help.c:4554 sql_help.c:4661 msgid "with_query" msgstr "With-Anfrage" -#: sql_help.c:2774 sql_help.c:3379 sql_help.c:3830 sql_help.c:3836 -#: sql_help.c:3839 sql_help.c:3843 sql_help.c:3847 sql_help.c:3855 -#: sql_help.c:4069 sql_help.c:4075 sql_help.c:4078 sql_help.c:4082 -#: sql_help.c:4086 sql_help.c:4094 sql_help.c:4152 sql_help.c:4263 -#: sql_help.c:4269 sql_help.c:4272 sql_help.c:4276 sql_help.c:4280 -#: sql_help.c:4288 +#: sql_help.c:3133 sql_help.c:3769 sql_help.c:4234 sql_help.c:4240 +#: sql_help.c:4243 sql_help.c:4247 sql_help.c:4251 sql_help.c:4259 +#: sql_help.c:4473 sql_help.c:4479 sql_help.c:4482 sql_help.c:4486 +#: sql_help.c:4490 sql_help.c:4498 sql_help.c:4556 sql_help.c:4680 +#: sql_help.c:4686 sql_help.c:4689 sql_help.c:4693 sql_help.c:4697 +#: sql_help.c:4705 msgid "alias" msgstr "Alias" -#: sql_help.c:2775 +#: sql_help.c:3134 msgid "using_list" msgstr "Using-Liste" -#: sql_help.c:2777 sql_help.c:3218 sql_help.c:3458 sql_help.c:4161 +#: sql_help.c:3136 sql_help.c:3607 sql_help.c:3848 sql_help.c:4565 msgid "cursor_name" msgstr "Cursor-Name" -#: sql_help.c:2778 sql_help.c:3385 sql_help.c:4162 +#: sql_help.c:3137 sql_help.c:3775 sql_help.c:4566 msgid "output_expression" msgstr "Ausgabeausdruck" -#: sql_help.c:2779 sql_help.c:3386 sql_help.c:3814 sql_help.c:3903 -#: sql_help.c:4053 sql_help.c:4163 sql_help.c:4247 +#: sql_help.c:3138 sql_help.c:3776 sql_help.c:4218 sql_help.c:4307 +#: sql_help.c:4457 sql_help.c:4567 sql_help.c:4664 msgid "output_name" msgstr "Ausgabename" -#: sql_help.c:2795 +#: sql_help.c:3154 msgid "code" msgstr "Code" -#: sql_help.c:3164 +#: sql_help.c:3553 msgid "parameter" msgstr "Parameter" -#: sql_help.c:3184 sql_help.c:3185 sql_help.c:3483 +#: sql_help.c:3573 sql_help.c:3574 sql_help.c:3873 msgid "statement" msgstr "Anweisung" -#: sql_help.c:3217 sql_help.c:3457 +#: sql_help.c:3606 sql_help.c:3847 msgid "direction" msgstr "Richtung" -#: sql_help.c:3219 sql_help.c:3459 +#: sql_help.c:3608 sql_help.c:3849 msgid "where direction can be empty or one of:" msgstr "wobei Richtung leer sein kann oder Folgendes:" -#: sql_help.c:3220 sql_help.c:3221 sql_help.c:3222 sql_help.c:3223 -#: sql_help.c:3224 sql_help.c:3460 sql_help.c:3461 sql_help.c:3462 -#: sql_help.c:3463 sql_help.c:3464 sql_help.c:3824 sql_help.c:3826 -#: sql_help.c:3914 sql_help.c:3916 sql_help.c:4063 sql_help.c:4065 -#: sql_help.c:4192 sql_help.c:4194 sql_help.c:4257 sql_help.c:4259 +#: sql_help.c:3609 sql_help.c:3610 sql_help.c:3611 sql_help.c:3612 +#: sql_help.c:3613 sql_help.c:3850 sql_help.c:3851 sql_help.c:3852 +#: sql_help.c:3853 sql_help.c:3854 sql_help.c:4228 sql_help.c:4230 +#: sql_help.c:4318 sql_help.c:4320 sql_help.c:4467 sql_help.c:4469 +#: sql_help.c:4609 sql_help.c:4611 sql_help.c:4674 sql_help.c:4676 msgid "count" msgstr "Anzahl" -#: sql_help.c:3304 sql_help.c:3636 +#: sql_help.c:3693 sql_help.c:4026 msgid "sequence_name" msgstr "Sequenzname" -#: sql_help.c:3317 sql_help.c:3649 +#: sql_help.c:3706 sql_help.c:4039 msgid "arg_name" msgstr "Argname" -#: sql_help.c:3318 sql_help.c:3650 +#: sql_help.c:3707 sql_help.c:4040 msgid "arg_type" msgstr "Argtyp" -#: sql_help.c:3323 sql_help.c:3655 +#: sql_help.c:3712 sql_help.c:4045 msgid "loid" msgstr "Large-Object-OID" -#: sql_help.c:3346 +#: sql_help.c:3735 msgid "remote_schema" msgstr "fernes_Schema" -#: sql_help.c:3349 +#: sql_help.c:3738 msgid "local_schema" msgstr "lokales_Schema" -#: sql_help.c:3383 +#: sql_help.c:3773 msgid "conflict_target" msgstr "Konfliktziel" -#: sql_help.c:3384 +#: sql_help.c:3774 msgid "conflict_action" msgstr "Konfliktaktion" -#: sql_help.c:3387 +#: sql_help.c:3777 msgid "where conflict_target can be one of:" msgstr "wobei Konfliktziel Folgendes sein kann:" -#: sql_help.c:3388 +#: sql_help.c:3778 msgid "index_column_name" msgstr "Indexspaltenname" -#: sql_help.c:3389 +#: sql_help.c:3779 msgid "index_expression" msgstr "Indexausdruck" -#: sql_help.c:3392 +#: sql_help.c:3782 msgid "index_predicate" msgstr "Indexprädikat" -#: sql_help.c:3394 +#: sql_help.c:3784 msgid "and conflict_action is one of:" msgstr "und Konfliktaktion Folgendes sein kann:" -#: sql_help.c:3400 sql_help.c:4158 +#: sql_help.c:3790 sql_help.c:4562 msgid "sub-SELECT" msgstr "Sub-SELECT" -#: sql_help.c:3409 sql_help.c:3472 sql_help.c:4134 +#: sql_help.c:3799 sql_help.c:3862 sql_help.c:4538 msgid "channel" msgstr "Kanal" -#: sql_help.c:3431 +#: sql_help.c:3821 msgid "lockmode" msgstr "Sperrmodus" -#: sql_help.c:3432 +#: sql_help.c:3822 msgid "where lockmode is one of:" msgstr "wobei Sperrmodus Folgendes sein kann:" -#: sql_help.c:3473 +#: sql_help.c:3863 msgid "payload" msgstr "Payload" -#: sql_help.c:3500 +#: sql_help.c:3890 msgid "old_role" msgstr "alte_Rolle" -#: sql_help.c:3501 +#: sql_help.c:3891 msgid "new_role" msgstr "neue_Rolle" -#: sql_help.c:3526 sql_help.c:3687 sql_help.c:3695 +#: sql_help.c:3916 sql_help.c:4077 sql_help.c:4085 msgid "savepoint_name" msgstr "Sicherungspunktsname" -#: sql_help.c:3728 -msgid "provider" -msgstr "Provider" - -#: sql_help.c:3815 sql_help.c:3857 sql_help.c:3859 sql_help.c:3905 -#: sql_help.c:4054 sql_help.c:4096 sql_help.c:4098 sql_help.c:4248 -#: sql_help.c:4290 sql_help.c:4292 +#: sql_help.c:4219 sql_help.c:4261 sql_help.c:4263 sql_help.c:4309 +#: sql_help.c:4458 sql_help.c:4500 sql_help.c:4502 sql_help.c:4665 +#: sql_help.c:4707 sql_help.c:4709 msgid "from_item" msgstr "From-Element" -#: sql_help.c:3817 sql_help.c:3869 sql_help.c:4056 sql_help.c:4108 -#: sql_help.c:4250 sql_help.c:4302 +#: sql_help.c:4221 sql_help.c:4273 sql_help.c:4460 sql_help.c:4512 +#: sql_help.c:4667 sql_help.c:4719 msgid "grouping_element" msgstr "Gruppierelement" -#: sql_help.c:3819 sql_help.c:3909 sql_help.c:4058 sql_help.c:4252 +#: sql_help.c:4223 sql_help.c:4313 sql_help.c:4462 sql_help.c:4669 msgid "window_name" msgstr "Fenstername" -#: sql_help.c:3820 sql_help.c:3910 sql_help.c:4059 sql_help.c:4253 +#: sql_help.c:4224 sql_help.c:4314 sql_help.c:4463 sql_help.c:4670 msgid "window_definition" msgstr "Fensterdefinition" -#: sql_help.c:3821 sql_help.c:3835 sql_help.c:3873 sql_help.c:3911 -#: sql_help.c:4060 sql_help.c:4074 sql_help.c:4112 sql_help.c:4254 -#: sql_help.c:4268 sql_help.c:4306 +#: sql_help.c:4225 sql_help.c:4239 sql_help.c:4277 sql_help.c:4315 +#: sql_help.c:4464 sql_help.c:4478 sql_help.c:4516 sql_help.c:4671 +#: sql_help.c:4685 sql_help.c:4723 msgid "select" msgstr "Select" -#: sql_help.c:3828 sql_help.c:4067 sql_help.c:4261 +#: sql_help.c:4232 sql_help.c:4471 sql_help.c:4678 msgid "where from_item can be one of:" msgstr "wobei From-Element Folgendes sein kann:" -#: sql_help.c:3831 sql_help.c:3837 sql_help.c:3840 sql_help.c:3844 -#: sql_help.c:3856 sql_help.c:4070 sql_help.c:4076 sql_help.c:4079 -#: sql_help.c:4083 sql_help.c:4095 sql_help.c:4264 sql_help.c:4270 -#: sql_help.c:4273 sql_help.c:4277 sql_help.c:4289 +#: sql_help.c:4235 sql_help.c:4241 sql_help.c:4244 sql_help.c:4248 +#: sql_help.c:4260 sql_help.c:4474 sql_help.c:4480 sql_help.c:4483 +#: sql_help.c:4487 sql_help.c:4499 sql_help.c:4681 sql_help.c:4687 +#: sql_help.c:4690 sql_help.c:4694 sql_help.c:4706 msgid "column_alias" msgstr "Spaltenalias" -#: sql_help.c:3832 sql_help.c:4071 sql_help.c:4265 +#: sql_help.c:4236 sql_help.c:4475 sql_help.c:4682 msgid "sampling_method" msgstr "Stichprobenmethode" -#: sql_help.c:3833 sql_help.c:3842 sql_help.c:3846 sql_help.c:3850 -#: sql_help.c:3853 sql_help.c:4072 sql_help.c:4081 sql_help.c:4085 -#: sql_help.c:4089 sql_help.c:4092 sql_help.c:4266 sql_help.c:4275 -#: sql_help.c:4279 sql_help.c:4283 sql_help.c:4286 -msgid "argument" -msgstr "Argument" - -#: sql_help.c:3834 sql_help.c:4073 sql_help.c:4267 +#: sql_help.c:4238 sql_help.c:4477 sql_help.c:4684 msgid "seed" msgstr "Startwert" -#: sql_help.c:3838 sql_help.c:3871 sql_help.c:4077 sql_help.c:4110 -#: sql_help.c:4271 sql_help.c:4304 +#: sql_help.c:4242 sql_help.c:4275 sql_help.c:4481 sql_help.c:4514 +#: sql_help.c:4688 sql_help.c:4721 msgid "with_query_name" msgstr "With-Anfragename" -#: sql_help.c:3848 sql_help.c:3851 sql_help.c:3854 sql_help.c:4087 -#: sql_help.c:4090 sql_help.c:4093 sql_help.c:4281 sql_help.c:4284 -#: sql_help.c:4287 +#: sql_help.c:4252 sql_help.c:4255 sql_help.c:4258 sql_help.c:4491 +#: sql_help.c:4494 sql_help.c:4497 sql_help.c:4698 sql_help.c:4701 +#: sql_help.c:4704 msgid "column_definition" msgstr "Spaltendefinition" -#: sql_help.c:3858 sql_help.c:4097 sql_help.c:4291 +#: sql_help.c:4262 sql_help.c:4501 sql_help.c:4708 msgid "join_type" msgstr "Verbundtyp" -#: sql_help.c:3860 sql_help.c:4099 sql_help.c:4293 +#: sql_help.c:4264 sql_help.c:4503 sql_help.c:4710 msgid "join_condition" msgstr "Verbundbedingung" -#: sql_help.c:3861 sql_help.c:4100 sql_help.c:4294 +#: sql_help.c:4265 sql_help.c:4504 sql_help.c:4711 msgid "join_column" msgstr "Verbundspalte" -#: sql_help.c:3862 sql_help.c:4101 sql_help.c:4295 +#: sql_help.c:4266 sql_help.c:4505 sql_help.c:4712 msgid "and grouping_element can be one of:" msgstr "und Gruppierelement eins der folgenden sein kann:" -#: sql_help.c:3870 sql_help.c:4109 sql_help.c:4303 +#: sql_help.c:4274 sql_help.c:4513 sql_help.c:4720 msgid "and with_query is:" msgstr "und With-Anfrage ist:" -#: sql_help.c:3874 sql_help.c:4113 sql_help.c:4307 +#: sql_help.c:4278 sql_help.c:4517 sql_help.c:4724 msgid "values" msgstr "values" -#: sql_help.c:3875 sql_help.c:4114 sql_help.c:4308 +#: sql_help.c:4279 sql_help.c:4518 sql_help.c:4725 msgid "insert" msgstr "insert" -#: sql_help.c:3876 sql_help.c:4115 sql_help.c:4309 +#: sql_help.c:4280 sql_help.c:4519 sql_help.c:4726 msgid "update" msgstr "update" -#: sql_help.c:3877 sql_help.c:4116 sql_help.c:4310 +#: sql_help.c:4281 sql_help.c:4520 sql_help.c:4727 msgid "delete" msgstr "delete" -#: sql_help.c:3904 +#: sql_help.c:4308 msgid "new_table" msgstr "neue_Tabelle" -#: sql_help.c:3929 +#: sql_help.c:4333 msgid "timezone" msgstr "Zeitzone" -#: sql_help.c:3974 +#: sql_help.c:4378 msgid "snapshot_id" msgstr "Snapshot-ID" -#: sql_help.c:4159 +#: sql_help.c:4563 msgid "from_list" msgstr "From-Liste" -#: sql_help.c:4190 +#: sql_help.c:4607 msgid "sort_expression" msgstr "Sortierausdruck" -#: sql_help.c:4317 sql_help.c:5087 +#: sql_help.c:4734 sql_help.c:5549 msgid "abort the current transaction" msgstr "bricht die aktuelle Transaktion ab" -#: sql_help.c:4322 +#: sql_help.c:4739 msgid "change the definition of an aggregate function" msgstr "ändert die Definition einer Aggregatfunktion" -#: sql_help.c:4327 +#: sql_help.c:4744 msgid "change the definition of a collation" msgstr "ändert die Definition einer Sortierfolge" -#: sql_help.c:4332 +#: sql_help.c:4749 msgid "change the definition of a conversion" msgstr "ändert die Definition einer Zeichensatzkonversion" -#: sql_help.c:4337 +#: sql_help.c:4754 msgid "change a database" msgstr "ändert eine Datenbank" -#: sql_help.c:4342 +#: sql_help.c:4759 msgid "define default access privileges" msgstr "definiert vorgegebene Zugriffsprivilegien" -#: sql_help.c:4347 +#: sql_help.c:4764 msgid "change the definition of a domain" msgstr "ändert die Definition einer Domäne" -#: sql_help.c:4352 +#: sql_help.c:4769 msgid "change the definition of an event trigger" msgstr "ändert die Definition eines Ereignistriggers" -#: sql_help.c:4357 +#: sql_help.c:4774 msgid "change the definition of an extension" msgstr "ändert die Definition einer Erweiterung" -#: sql_help.c:4362 +#: sql_help.c:4779 msgid "change the definition of a foreign-data wrapper" msgstr "ändert die Definition eines Fremddaten-Wrappers" -#: sql_help.c:4367 +#: sql_help.c:4784 msgid "change the definition of a foreign table" msgstr "ändert die Definition einer Fremdtabelle" -#: sql_help.c:4372 +#: sql_help.c:4789 msgid "change the definition of a function" msgstr "ändert die Definition einer Funktion" -#: sql_help.c:4377 +#: sql_help.c:4794 msgid "change role name or membership" msgstr "ändert Rollenname oder -mitglieder" -#: sql_help.c:4382 +#: sql_help.c:4799 msgid "change the definition of an index" msgstr "ändert die Definition eines Index" -#: sql_help.c:4387 +#: sql_help.c:4804 msgid "change the definition of a procedural language" msgstr "ändert die Definition einer prozeduralen Sprache" -#: sql_help.c:4392 +#: sql_help.c:4809 msgid "change the definition of a large object" msgstr "ändert die Definition eines Large Object" -#: sql_help.c:4397 +#: sql_help.c:4814 msgid "change the definition of a materialized view" msgstr "ändert die Definition einer materialisierten Sicht" -#: sql_help.c:4402 +#: sql_help.c:4819 msgid "change the definition of an operator" msgstr "ändert die Definition eines Operators" -#: sql_help.c:4407 +#: sql_help.c:4824 msgid "change the definition of an operator class" msgstr "ändert die Definition einer Operatorklasse" -#: sql_help.c:4412 +#: sql_help.c:4829 msgid "change the definition of an operator family" msgstr "ändert die Definition einer Operatorfamilie" -#: sql_help.c:4417 +#: sql_help.c:4834 msgid "change the definition of a row level security policy" msgstr "ändert die Definition einer Policy für Sicherheit auf Zeilenebene" -#: sql_help.c:4422 +#: sql_help.c:4839 +msgid "change the definition of a procedure" +msgstr "ändert die Definition einer Prozedur" + +#: sql_help.c:4844 msgid "change the definition of a publication" msgstr "ändert die Definition einer Publikation" -#: sql_help.c:4427 sql_help.c:4502 +#: sql_help.c:4849 sql_help.c:4934 msgid "change a database role" msgstr "ändert eine Datenbankrolle" -#: sql_help.c:4432 +#: sql_help.c:4854 +msgid "change the definition of a routine" +msgstr "ändert die Definition einer Routine" + +#: sql_help.c:4859 msgid "change the definition of a rule" msgstr "ändert die Definition einer Regel" -#: sql_help.c:4437 +#: sql_help.c:4864 msgid "change the definition of a schema" msgstr "ändert die Definition eines Schemas" -#: sql_help.c:4442 +#: sql_help.c:4869 msgid "change the definition of a sequence generator" msgstr "ändert die Definition eines Sequenzgenerators" -#: sql_help.c:4447 +#: sql_help.c:4874 msgid "change the definition of a foreign server" msgstr "ändert die Definition eines Fremdservers" -#: sql_help.c:4452 +#: sql_help.c:4879 +msgid "change the definition of an extended statistics object" +msgstr "ändert die Definition eines erweiterten Statistikobjekts" + +#: sql_help.c:4884 msgid "change the definition of a subscription" msgstr "ändert die Definition einer Subskription" -#: sql_help.c:4457 +#: sql_help.c:4889 msgid "change a server configuration parameter" msgstr "ändert einen Server-Konfigurationsparameter" -#: sql_help.c:4462 +#: sql_help.c:4894 msgid "change the definition of a table" msgstr "ändert die Definition einer Tabelle" -#: sql_help.c:4467 +#: sql_help.c:4899 msgid "change the definition of a tablespace" msgstr "ändert die Definition eines Tablespace" -#: sql_help.c:4472 +#: sql_help.c:4904 msgid "change the definition of a text search configuration" msgstr "ändert die Definition einer Textsuchekonfiguration" -#: sql_help.c:4477 +#: sql_help.c:4909 msgid "change the definition of a text search dictionary" msgstr "ändert die Definition eines Textsuchewörterbuchs" -#: sql_help.c:4482 +#: sql_help.c:4914 msgid "change the definition of a text search parser" msgstr "ändert die Definition eines Textsucheparsers" -#: sql_help.c:4487 +#: sql_help.c:4919 msgid "change the definition of a text search template" msgstr "ändert die Definition einer Textsuchevorlage" -#: sql_help.c:4492 +#: sql_help.c:4924 msgid "change the definition of a trigger" msgstr "ändert die Definition eines Triggers" -#: sql_help.c:4497 +#: sql_help.c:4929 msgid "change the definition of a type" msgstr "ändert die Definition eines Typs" -#: sql_help.c:4507 +#: sql_help.c:4939 msgid "change the definition of a user mapping" msgstr "ändert die Definition einer Benutzerabbildung" -#: sql_help.c:4512 +#: sql_help.c:4944 msgid "change the definition of a view" msgstr "ändert die Definition einer Sicht" -#: sql_help.c:4517 +#: sql_help.c:4949 msgid "collect statistics about a database" msgstr "sammelt Statistiken über eine Datenbank" -#: sql_help.c:4522 sql_help.c:5152 +#: sql_help.c:4954 sql_help.c:5614 msgid "start a transaction block" msgstr "startet einen Transaktionsblock" -#: sql_help.c:4527 -msgid "force a transaction log checkpoint" -msgstr "erzwingt einen Checkpoint im Transaktionslog" +#: sql_help.c:4959 +#, fuzzy +#| msgid "remove a procedural language" +msgid "invoke a procedure" +msgstr "entfernt eine prozedurale Sprache" + +#: sql_help.c:4964 +msgid "force a write-ahead log checkpoint" +msgstr "erzwingt einen Checkpoint im Write-Ahead-Log" -#: sql_help.c:4532 +#: sql_help.c:4969 msgid "close a cursor" msgstr "schließt einen Cursor" -#: sql_help.c:4537 +#: sql_help.c:4974 msgid "cluster a table according to an index" msgstr "clustert eine Tabelle nach einem Index" -#: sql_help.c:4542 +#: sql_help.c:4979 msgid "define or change the comment of an object" msgstr "definiert oder ändert den Kommentar eines Objektes" -#: sql_help.c:4547 sql_help.c:4987 +#: sql_help.c:4984 sql_help.c:5449 msgid "commit the current transaction" msgstr "schließt die aktuelle Transaktion ab" -#: sql_help.c:4552 +#: sql_help.c:4989 msgid "commit a transaction that was earlier prepared for two-phase commit" msgstr "schließt eine Transaktion ab, die vorher für Two-Phase-Commit vorbereitet worden war" -#: sql_help.c:4557 +#: sql_help.c:4994 msgid "copy data between a file and a table" msgstr "kopiert Daten zwischen einer Datei und einer Tabelle" -#: sql_help.c:4562 +#: sql_help.c:4999 msgid "define a new access method" msgstr "definiert eine neue Zugriffsmethode" -#: sql_help.c:4567 +#: sql_help.c:5004 msgid "define a new aggregate function" msgstr "definiert eine neue Aggregatfunktion" -#: sql_help.c:4572 +#: sql_help.c:5009 msgid "define a new cast" msgstr "definiert eine neue Typumwandlung" -#: sql_help.c:4577 +#: sql_help.c:5014 msgid "define a new collation" msgstr "definiert eine neue Sortierfolge" -#: sql_help.c:4582 +#: sql_help.c:5019 msgid "define a new encoding conversion" msgstr "definiert eine neue Kodierungskonversion" -#: sql_help.c:4587 +#: sql_help.c:5024 msgid "create a new database" msgstr "erzeugt eine neue Datenbank" -#: sql_help.c:4592 +#: sql_help.c:5029 msgid "define a new domain" msgstr "definiert eine neue Domäne" -#: sql_help.c:4597 +#: sql_help.c:5034 msgid "define a new event trigger" msgstr "definiert einen neuen Ereignistrigger" -#: sql_help.c:4602 +#: sql_help.c:5039 msgid "install an extension" msgstr "installiert eine Erweiterung" -#: sql_help.c:4607 +#: sql_help.c:5044 msgid "define a new foreign-data wrapper" msgstr "definiert einen neuen Fremddaten-Wrapper" -#: sql_help.c:4612 +#: sql_help.c:5049 msgid "define a new foreign table" msgstr "definiert eine neue Fremdtabelle" -#: sql_help.c:4617 +#: sql_help.c:5054 msgid "define a new function" msgstr "definiert eine neue Funktion" -#: sql_help.c:4622 sql_help.c:4667 sql_help.c:4747 +#: sql_help.c:5059 sql_help.c:5109 sql_help.c:5194 msgid "define a new database role" msgstr "definiert eine neue Datenbankrolle" -#: sql_help.c:4627 +#: sql_help.c:5064 msgid "define a new index" msgstr "definiert einen neuen Index" -#: sql_help.c:4632 +#: sql_help.c:5069 msgid "define a new procedural language" msgstr "definiert eine neue prozedurale Sprache" -#: sql_help.c:4637 +#: sql_help.c:5074 msgid "define a new materialized view" msgstr "definiert eine neue materialisierte Sicht" -#: sql_help.c:4642 +#: sql_help.c:5079 msgid "define a new operator" msgstr "definiert einen neuen Operator" -#: sql_help.c:4647 +#: sql_help.c:5084 msgid "define a new operator class" msgstr "definiert eine neue Operatorklasse" -#: sql_help.c:4652 +#: sql_help.c:5089 msgid "define a new operator family" msgstr "definiert eine neue Operatorfamilie" -#: sql_help.c:4657 +#: sql_help.c:5094 msgid "define a new row level security policy for a table" msgstr "definiert eine neue Policy für Sicherheit auf Zeilenebene für eine Tabelle" -#: sql_help.c:4662 +#: sql_help.c:5099 +msgid "define a new procedure" +msgstr "definiert eine neue Prozedur" + +#: sql_help.c:5104 msgid "define a new publication" msgstr "definiert eine neue Publikation" -#: sql_help.c:4672 +#: sql_help.c:5114 msgid "define a new rewrite rule" msgstr "definiert eine neue Umschreiberegel" -#: sql_help.c:4677 +#: sql_help.c:5119 msgid "define a new schema" msgstr "definiert ein neues Schema" -#: sql_help.c:4682 +#: sql_help.c:5124 msgid "define a new sequence generator" msgstr "definiert einen neuen Sequenzgenerator" -#: sql_help.c:4687 +#: sql_help.c:5129 msgid "define a new foreign server" msgstr "definiert einen neuen Fremdserver" -#: sql_help.c:4692 +#: sql_help.c:5134 +msgid "define extended statistics" +msgstr "definiert erweiterte Statistiken" + +#: sql_help.c:5139 msgid "define a new subscription" msgstr "definiert eine neue Subskription" -#: sql_help.c:4697 +#: sql_help.c:5144 msgid "define a new table" msgstr "definiert eine neue Tabelle" -#: sql_help.c:4702 sql_help.c:5117 +#: sql_help.c:5149 sql_help.c:5579 msgid "define a new table from the results of a query" msgstr "definiert eine neue Tabelle aus den Ergebnissen einer Anfrage" -#: sql_help.c:4707 +#: sql_help.c:5154 msgid "define a new tablespace" msgstr "definiert einen neuen Tablespace" -#: sql_help.c:4712 +#: sql_help.c:5159 msgid "define a new text search configuration" msgstr "definiert eine neue Textsuchekonfiguration" -#: sql_help.c:4717 +#: sql_help.c:5164 msgid "define a new text search dictionary" msgstr "definiert ein neues Textsuchewörterbuch" -#: sql_help.c:4722 +#: sql_help.c:5169 msgid "define a new text search parser" msgstr "definiert einen neuen Textsucheparser" -#: sql_help.c:4727 +#: sql_help.c:5174 msgid "define a new text search template" msgstr "definiert eine neue Textsuchevorlage" -#: sql_help.c:4732 +#: sql_help.c:5179 msgid "define a new transform" msgstr "definiert eine neue Transformation" -#: sql_help.c:4737 +#: sql_help.c:5184 msgid "define a new trigger" msgstr "definiert einen neuen Trigger" -#: sql_help.c:4742 +#: sql_help.c:5189 msgid "define a new data type" msgstr "definiert einen neuen Datentyp" -#: sql_help.c:4752 +#: sql_help.c:5199 msgid "define a new mapping of a user to a foreign server" msgstr "definiert eine neue Abbildung eines Benutzers auf einen Fremdserver" -#: sql_help.c:4757 +#: sql_help.c:5204 msgid "define a new view" msgstr "definiert eine neue Sicht" -#: sql_help.c:4762 +#: sql_help.c:5209 msgid "deallocate a prepared statement" msgstr "gibt einen vorbereiteten Befehl frei" -#: sql_help.c:4767 +#: sql_help.c:5214 msgid "define a cursor" msgstr "definiert einen Cursor" -#: sql_help.c:4772 +#: sql_help.c:5219 msgid "delete rows of a table" msgstr "löscht Zeilen einer Tabelle" -#: sql_help.c:4777 +#: sql_help.c:5224 msgid "discard session state" msgstr "verwirft den Sitzungszustand" -#: sql_help.c:4782 +#: sql_help.c:5229 msgid "execute an anonymous code block" msgstr "führt einen anonymen Codeblock aus" -#: sql_help.c:4787 +#: sql_help.c:5234 msgid "remove an access method" msgstr "entfernt eine Zugriffsmethode" -#: sql_help.c:4792 +#: sql_help.c:5239 msgid "remove an aggregate function" msgstr "entfernt eine Aggregatfunktion" -#: sql_help.c:4797 +#: sql_help.c:5244 msgid "remove a cast" msgstr "entfernt eine Typumwandlung" -#: sql_help.c:4802 +#: sql_help.c:5249 msgid "remove a collation" msgstr "entfernt eine Sortierfolge" -#: sql_help.c:4807 +#: sql_help.c:5254 msgid "remove a conversion" msgstr "entfernt eine Zeichensatzkonversion" -#: sql_help.c:4812 +#: sql_help.c:5259 msgid "remove a database" msgstr "entfernt eine Datenbank" -#: sql_help.c:4817 +#: sql_help.c:5264 msgid "remove a domain" msgstr "entfernt eine Domäne" -#: sql_help.c:4822 +#: sql_help.c:5269 msgid "remove an event trigger" msgstr "entfernt einen Ereignistrigger" -#: sql_help.c:4827 +#: sql_help.c:5274 msgid "remove an extension" msgstr "entfernt eine Erweiterung" -#: sql_help.c:4832 +#: sql_help.c:5279 msgid "remove a foreign-data wrapper" msgstr "entfernt einen Fremddaten-Wrapper" -#: sql_help.c:4837 +#: sql_help.c:5284 msgid "remove a foreign table" msgstr "entfernt eine Fremdtabelle" -#: sql_help.c:4842 +#: sql_help.c:5289 msgid "remove a function" msgstr "entfernt eine Funktion" -#: sql_help.c:4847 sql_help.c:4897 sql_help.c:4972 +#: sql_help.c:5294 sql_help.c:5349 sql_help.c:5434 msgid "remove a database role" msgstr "entfernt eine Datenbankrolle" -#: sql_help.c:4852 +#: sql_help.c:5299 msgid "remove an index" msgstr "entfernt einen Index" -#: sql_help.c:4857 +#: sql_help.c:5304 msgid "remove a procedural language" msgstr "entfernt eine prozedurale Sprache" -#: sql_help.c:4862 +#: sql_help.c:5309 msgid "remove a materialized view" msgstr "entfernt eine materialisierte Sicht" -#: sql_help.c:4867 +#: sql_help.c:5314 msgid "remove an operator" msgstr "entfernt einen Operator" -#: sql_help.c:4872 +#: sql_help.c:5319 msgid "remove an operator class" msgstr "entfernt eine Operatorklasse" -#: sql_help.c:4877 +#: sql_help.c:5324 msgid "remove an operator family" msgstr "entfernt eine Operatorfamilie" -#: sql_help.c:4882 +#: sql_help.c:5329 msgid "remove database objects owned by a database role" msgstr "entfernt die einer Datenbankrolle gehörenden Datenbankobjekte" -#: sql_help.c:4887 +#: sql_help.c:5334 msgid "remove a row level security policy from a table" msgstr "entfernt eine Policy für Sicherheit auf Zeilenebene von einer Tabelle" -#: sql_help.c:4892 +#: sql_help.c:5339 +msgid "remove a procedure" +msgstr "entfernt eine Prozedur" + +#: sql_help.c:5344 msgid "remove a publication" msgstr "entfernt eine Publikation" -#: sql_help.c:4902 +#: sql_help.c:5354 +msgid "remove a routine" +msgstr "entfernt eine Routine" + +#: sql_help.c:5359 msgid "remove a rewrite rule" msgstr "entfernt eine Umschreiberegel" -#: sql_help.c:4907 +#: sql_help.c:5364 msgid "remove a schema" msgstr "entfernt ein Schema" -#: sql_help.c:4912 +#: sql_help.c:5369 msgid "remove a sequence" msgstr "entfernt eine Sequenz" -#: sql_help.c:4917 +#: sql_help.c:5374 msgid "remove a foreign server descriptor" msgstr "entfernt einen Fremdserverdeskriptor" -#: sql_help.c:4922 +#: sql_help.c:5379 +msgid "remove extended statistics" +msgstr "entfernt erweiterte Statistiken" + +#: sql_help.c:5384 msgid "remove a subscription" msgstr "entfernt eine Subskription" -#: sql_help.c:4927 +#: sql_help.c:5389 msgid "remove a table" msgstr "entfernt eine Tabelle" -#: sql_help.c:4932 +#: sql_help.c:5394 msgid "remove a tablespace" msgstr "entfernt einen Tablespace" -#: sql_help.c:4937 +#: sql_help.c:5399 msgid "remove a text search configuration" msgstr "entfernt eine Textsuchekonfiguration" -#: sql_help.c:4942 +#: sql_help.c:5404 msgid "remove a text search dictionary" msgstr "entfernt ein Textsuchewörterbuch" -#: sql_help.c:4947 +#: sql_help.c:5409 msgid "remove a text search parser" msgstr "entfernt einen Textsucheparser" -#: sql_help.c:4952 +#: sql_help.c:5414 msgid "remove a text search template" msgstr "entfernt eine Textsuchevorlage" -#: sql_help.c:4957 +#: sql_help.c:5419 msgid "remove a transform" msgstr "entfernt eine Transformation" -#: sql_help.c:4962 +#: sql_help.c:5424 msgid "remove a trigger" msgstr "entfernt einen Trigger" -#: sql_help.c:4967 +#: sql_help.c:5429 msgid "remove a data type" msgstr "entfernt einen Datentyp" -#: sql_help.c:4977 +#: sql_help.c:5439 msgid "remove a user mapping for a foreign server" msgstr "entfernt eine Benutzerabbildung für einen Fremdserver" -#: sql_help.c:4982 +#: sql_help.c:5444 msgid "remove a view" msgstr "entfernt eine Sicht" -#: sql_help.c:4992 +#: sql_help.c:5454 msgid "execute a prepared statement" msgstr "führt einen vorbereiteten Befehl aus" -#: sql_help.c:4997 +#: sql_help.c:5459 msgid "show the execution plan of a statement" msgstr "zeigt den Ausführungsplan eines Befehls" -#: sql_help.c:5002 +#: sql_help.c:5464 msgid "retrieve rows from a query using a cursor" msgstr "liest Zeilen aus einer Anfrage mit einem Cursor" -#: sql_help.c:5007 +#: sql_help.c:5469 msgid "define access privileges" msgstr "definiert Zugriffsprivilegien" -#: sql_help.c:5012 +#: sql_help.c:5474 msgid "import table definitions from a foreign server" msgstr "importiert Tabellendefinitionen von einem Fremdserver" -#: sql_help.c:5017 +#: sql_help.c:5479 msgid "create new rows in a table" msgstr "erzeugt neue Zeilen in einer Tabelle" -#: sql_help.c:5022 +#: sql_help.c:5484 msgid "listen for a notification" msgstr "hört auf eine Benachrichtigung" -#: sql_help.c:5027 +#: sql_help.c:5489 msgid "load a shared library file" msgstr "lädt eine dynamische Bibliotheksdatei" -#: sql_help.c:5032 +#: sql_help.c:5494 msgid "lock a table" msgstr "sperrt eine Tabelle" -#: sql_help.c:5037 +#: sql_help.c:5499 msgid "position a cursor" msgstr "positioniert einen Cursor" -#: sql_help.c:5042 +#: sql_help.c:5504 msgid "generate a notification" msgstr "erzeugt eine Benachrichtigung" -#: sql_help.c:5047 +#: sql_help.c:5509 msgid "prepare a statement for execution" msgstr "bereitet einen Befehl zur Ausführung vor" -#: sql_help.c:5052 +#: sql_help.c:5514 msgid "prepare the current transaction for two-phase commit" msgstr "bereitet die aktuelle Transaktion für Two-Phase-Commit vor" -#: sql_help.c:5057 +#: sql_help.c:5519 msgid "change the ownership of database objects owned by a database role" msgstr "ändert den Eigentümer der der Rolle gehörenden Datenbankobjekte" -#: sql_help.c:5062 +#: sql_help.c:5524 msgid "replace the contents of a materialized view" msgstr "ersetzt den Inhalt einer materialisierten Sicht" -#: sql_help.c:5067 +#: sql_help.c:5529 msgid "rebuild indexes" msgstr "baut Indexe neu" -#: sql_help.c:5072 +#: sql_help.c:5534 msgid "destroy a previously defined savepoint" msgstr "gibt einen zuvor definierten Sicherungspunkt frei" -#: sql_help.c:5077 +#: sql_help.c:5539 msgid "restore the value of a run-time parameter to the default value" msgstr "setzt einen Konfigurationsparameter auf die Voreinstellung zurück" -#: sql_help.c:5082 +#: sql_help.c:5544 msgid "remove access privileges" msgstr "entfernt Zugriffsprivilegien" -#: sql_help.c:5092 +#: sql_help.c:5554 msgid "cancel a transaction that was earlier prepared for two-phase commit" msgstr "storniert eine Transaktion, die vorher für Two-Phase-Commit vorbereitet worden war" -#: sql_help.c:5097 +#: sql_help.c:5559 msgid "roll back to a savepoint" msgstr "rollt eine Transaktion bis zu einem Sicherungspunkt zurück" -#: sql_help.c:5102 +#: sql_help.c:5564 msgid "define a new savepoint within the current transaction" msgstr "definiert einen neuen Sicherungspunkt in der aktuellen Transaktion" -#: sql_help.c:5107 +#: sql_help.c:5569 msgid "define or change a security label applied to an object" msgstr "definiert oder ändert ein Security-Label eines Objektes" -#: sql_help.c:5112 sql_help.c:5157 sql_help.c:5187 +#: sql_help.c:5574 sql_help.c:5619 sql_help.c:5649 msgid "retrieve rows from a table or view" msgstr "liest Zeilen aus einer Tabelle oder Sicht" -#: sql_help.c:5122 +#: sql_help.c:5584 msgid "change a run-time parameter" msgstr "ändert einen Konfigurationsparameter" -#: sql_help.c:5127 +#: sql_help.c:5589 msgid "set constraint check timing for the current transaction" msgstr "setzt die Zeitsteuerung für Check-Constraints in der aktuellen Transaktion" -#: sql_help.c:5132 +#: sql_help.c:5594 msgid "set the current user identifier of the current session" msgstr "setzt den aktuellen Benutzernamen der aktuellen Sitzung" -#: sql_help.c:5137 +#: sql_help.c:5599 msgid "set the session user identifier and the current user identifier of the current session" msgstr "setzt den Sitzungsbenutzernamen und den aktuellen Benutzernamen der aktuellen Sitzung" -#: sql_help.c:5142 +#: sql_help.c:5604 msgid "set the characteristics of the current transaction" msgstr "setzt die Charakteristika der aktuellen Transaktion" -#: sql_help.c:5147 +#: sql_help.c:5609 msgid "show the value of a run-time parameter" msgstr "zeigt den Wert eines Konfigurationsparameters" -#: sql_help.c:5162 +#: sql_help.c:5624 msgid "empty a table or set of tables" msgstr "leert eine oder mehrere Tabellen" -#: sql_help.c:5167 +#: sql_help.c:5629 msgid "stop listening for a notification" msgstr "beendet das Hören auf eine Benachrichtigung" -#: sql_help.c:5172 +#: sql_help.c:5634 msgid "update rows of a table" msgstr "aktualisiert Zeilen einer Tabelle" -#: sql_help.c:5177 +#: sql_help.c:5639 msgid "garbage-collect and optionally analyze a database" msgstr "säubert und analysiert eine Datenbank" -#: sql_help.c:5182 +#: sql_help.c:5644 msgid "compute a set of rows" msgstr "berechnet eine Zeilenmenge" -#: startup.c:184 +#: startup.c:190 #, c-format msgid "%s: -1 can only be used in non-interactive mode\n" msgstr "%s: -1 kann nur im nicht interaktiven Modus verwendet werden\n" -#: startup.c:287 +#: startup.c:305 #, c-format msgid "%s: could not open log file \"%s\": %s\n" msgstr "%s: konnte Logdatei »%s« nicht öffnen: %s\n" -#: startup.c:387 +#: startup.c:412 #, c-format msgid "" "Type \"help\" for help.\n" @@ -5473,27 +6049,27 @@ msgstr "" "Geben Sie »help« für Hilfe ein.\n" "\n" -#: startup.c:536 +#: startup.c:561 #, c-format msgid "%s: could not set printing parameter \"%s\"\n" msgstr "%s: konnte Ausgabeparameter »%s« nicht setzen\n" -#: startup.c:638 +#: startup.c:663 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Versuchen Sie »%s --help« für weitere Informationen.\n" -#: startup.c:655 +#: startup.c:680 #, c-format msgid "%s: warning: extra command-line argument \"%s\" ignored\n" msgstr "%s: Warnung: überflüssiges Kommandozeilenargument »%s« ignoriert\n" -#: startup.c:704 +#: startup.c:729 #, c-format msgid "%s: could not find own program executable\n" msgstr "%s: konnte eigene Programmdatei nicht finden\n" -#: tab-complete.c:4029 +#: tab-complete.c:4478 #, c-format msgid "" "tab completion query failed: %s\n" @@ -5505,36 +6081,43 @@ msgstr "" "%s\n" #: variables.c:139 -#, fuzzy, c-format -#| msgid "unrecognized value \"%s\" for \"%s\"; assuming \"%s\"\n" -msgid "unrecognized value \"%s\" for \"%s\": boolean expected\n" -msgstr "unbekannter Wert »%s« für »%s«; »%s« wird angenommen\n" +#, c-format +msgid "unrecognized value \"%s\" for \"%s\": Boolean expected\n" +msgstr "unbekannter Wert »%s« für »%s«: Boole'scher Wert erwartet\n" #: variables.c:176 -#, fuzzy, c-format -#| msgid "invalid value \"%s\" for \"%s\"" +#, c-format msgid "invalid value \"%s\" for \"%s\": integer expected\n" -msgstr "ungültiger Wert »%s« für »%s«" +msgstr "ungültiger Wert »%s« für »%s«: ganze Zahl erwartet\n" #: variables.c:224 -#, fuzzy, c-format -#| msgid "invalid locale name: \"%s\"" +#, c-format msgid "invalid variable name: \"%s\"\n" -msgstr "ungültiger Locale-Name: »%s«" +msgstr "ungültiger Variablenname: »%s«\n" #: variables.c:393 -#, fuzzy, c-format -#| msgid "unrecognized value \"%s\" for \"%s\"; assuming \"%s\"\n" +#, c-format msgid "" "unrecognized value \"%s\" for \"%s\"\n" "Available values are: %s.\n" -msgstr "unbekannter Wert »%s« für »%s«; »%s« wird angenommen\n" +msgstr "" +"unbekannter Wert »%s« für »%s«\n" +"Verfügbare Werte sind: %s.\n" + +#~ msgid "attribute" +#~ msgstr "Attribut" + +#~ msgid " VERSION_NUM psql's version (numeric format)\n" +#~ msgstr " VERSION_NUM Version von psql (numerisches Format)\n" + +#~ msgid " VERSION_NAME psql's version (short string)\n" +#~ msgstr " VERSION_NAME Version von psql (kurze Zeichenkette)\n" -#~ msgid "%s: could not set variable \"%s\"\n" -#~ msgstr "%s: konnte Variable »%s« nicht setzen\n" +#~ msgid " VERSION psql's version (verbose string)\n" +#~ msgstr " VERSION Version von psql (lange Zeichenkette)\n" -#~ msgid "could not set variable \"%s\"\n" -#~ msgstr "konnte Variable »%s« nicht setzen\n" +#~ msgid " SERVER_VERSION_NAME server's version (short string)\n" +#~ msgstr " SERVER_VERSION_NAME Serverversion (kurze Zeichenkette)\n" -#~ msgid "\\%s: error while setting variable\n" -#~ msgstr "\\%s: Fehler beim Setzen der Variable\n" +#~ msgid "normal" +#~ msgstr "normal" diff --git a/src/bin/psql/po/fr.po b/src/bin/psql/po/fr.po index 2b5ecb32b7..d00bb8ce95 100644 --- a/src/bin/psql/po/fr.po +++ b/src/bin/psql/po/fr.po @@ -9,8 +9,8 @@ msgid "" msgstr "" "Project-Id-Version: PostgreSQL 9.6\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-07-28 18:44+0000\n" -"PO-Revision-Date: 2017-07-29 09:12+0200\n" +"POT-Creation-Date: 2017-11-06 09:44+0000\n" +"PO-Revision-Date: 2017-11-11 14:22+0100\n" "Last-Translator: Guillaume Lelarge \n" "Language-Team: French \n" "Language: fr\n" @@ -18,7 +18,7 @@ msgstr "" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" -"X-Generator: Poedit 2.0.2\n" +"X-Generator: Poedit 2.0.3\n" #: ../../common/exec.c:127 ../../common/exec.c:241 ../../common/exec.c:284 #, c-format @@ -202,7 +202,7 @@ msgstr "Vous êtes connecté à la base de données « %s » en tant qu'utilisat msgid "no query buffer\n" msgstr "aucun tampon de requête\n" -#: command.c:948 command.c:4765 +#: command.c:948 command.c:4784 #, c-format msgid "invalid line number: %s\n" msgstr "numéro de ligne invalide : %s\n" @@ -226,7 +226,7 @@ msgstr "Le serveur (version %s) ne supporte pas l'édition des définitions de v msgid "%s: invalid encoding name or conversion procedure not found\n" msgstr "%s : nom d'encodage invalide ou procédure de conversion introuvable\n" -#: command.c:1266 command.c:1888 command.c:3169 command.c:4867 common.c:173 +#: command.c:1266 command.c:1888 command.c:3169 command.c:4886 common.c:173 #: common.c:244 common.c:541 common.c:1288 common.c:1316 common.c:1417 #: copy.c:489 copy.c:708 large_obj.c:156 large_obj.c:191 large_obj.c:253 #, c-format @@ -336,20 +336,20 @@ msgstr "Chronométrage activé." msgid "Timing is off." msgstr "Chronométrage désactivé." -#: command.c:2549 command.c:2577 command.c:3518 command.c:3521 command.c:3524 -#: command.c:3530 command.c:3532 command.c:3540 command.c:3550 command.c:3559 -#: command.c:3573 command.c:3590 command.c:3648 common.c:69 copy.c:332 +#: command.c:2549 command.c:2577 command.c:3537 command.c:3540 command.c:3543 +#: command.c:3549 command.c:3551 command.c:3559 command.c:3569 command.c:3578 +#: command.c:3592 command.c:3609 command.c:3667 common.c:69 copy.c:332 #: copy.c:392 copy.c:405 psqlscanslash.l:760 psqlscanslash.l:771 #: psqlscanslash.l:781 #, c-format msgid "%s: %s\n" msgstr "%s : %s\n" -#: command.c:2961 startup.c:202 +#: command.c:2961 startup.c:205 msgid "Password: " msgstr "Mot de passe : " -#: command.c:2966 startup.c:204 +#: command.c:2966 startup.c:207 #, c-format msgid "Password for user %s: " msgstr "Mot de passe pour l'utilisateur %s : " @@ -429,243 +429,243 @@ msgstr "" " Voir la section « Notes aux utilisateurs de Windows » de la page\n" " référence de psql pour les détails.\n" -#: command.c:3407 +#: command.c:3426 #, c-format msgid "environment variable PSQL_EDITOR_LINENUMBER_ARG must be set to specify a line number\n" msgstr "" "la variable d'environnement EDITOR_LINENUMBER_SWITCH doit être configurée\n" "pour spécifier un numéro de ligne\n" -#: command.c:3436 +#: command.c:3455 #, c-format msgid "could not start editor \"%s\"\n" msgstr "n'a pas pu exécuter l'éditeur « %s »\n" -#: command.c:3438 +#: command.c:3457 #, c-format msgid "could not start /bin/sh\n" msgstr "n'a pas pu exécuter /bin/sh\n" -#: command.c:3476 +#: command.c:3495 #, c-format msgid "could not locate temporary directory: %s\n" msgstr "n'a pas pu localiser le répertoire temporaire : %s\n" -#: command.c:3503 +#: command.c:3522 #, c-format msgid "could not open temporary file \"%s\": %s\n" msgstr "n'a pas pu ouvrir le fichier temporaire « %s » : %s\n" -#: command.c:3777 +#: command.c:3796 #, c-format msgid "\\pset: allowed formats are unaligned, aligned, wrapped, html, asciidoc, latex, latex-longtable, troff-ms\n" msgstr "" "\\pset : les formats autorisés sont unaligned, aligned, wrapped, html, asciidoc, latex,\n" "latex-longtable, troff-ms\n" -#: command.c:3795 +#: command.c:3814 #, c-format msgid "\\pset: allowed line styles are ascii, old-ascii, unicode\n" msgstr "\\pset: les styles de lignes autorisés sont ascii, old-ascii, unicode\n" -#: command.c:3810 +#: command.c:3829 #, c-format msgid "\\pset: allowed Unicode border line styles are single, double\n" msgstr "\\pset : les styles autorisés de ligne de bordure Unicode sont single, double\n" -#: command.c:3825 +#: command.c:3844 #, c-format msgid "\\pset: allowed Unicode column line styles are single, double\n" msgstr "\\pset : les styles autorisés pour la ligne de colonne Unicode sont single, double\n" -#: command.c:3840 +#: command.c:3859 #, c-format msgid "\\pset: allowed Unicode header line styles are single, double\n" msgstr "\\pset : les styles autorisés pour la ligne d'en-tête Unicode sont single, double\n" -#: command.c:4005 command.c:4184 +#: command.c:4024 command.c:4203 #, c-format msgid "\\pset: unknown option: %s\n" msgstr "\\pset : option inconnue : %s\n" -#: command.c:4023 +#: command.c:4042 #, c-format msgid "Border style is %d.\n" msgstr "Le style de bordure est %d.\n" -#: command.c:4029 +#: command.c:4048 #, c-format msgid "Target width is unset.\n" msgstr "La largeur cible n'est pas configuré.\n" -#: command.c:4031 +#: command.c:4050 #, c-format msgid "Target width is %d.\n" msgstr "La largeur cible est %d.\n" -#: command.c:4038 +#: command.c:4057 #, c-format msgid "Expanded display is on.\n" msgstr "Affichage étendu activé.\n" -#: command.c:4040 +#: command.c:4059 #, c-format msgid "Expanded display is used automatically.\n" msgstr "L'affichage étendu est utilisé automatiquement.\n" -#: command.c:4042 +#: command.c:4061 #, c-format msgid "Expanded display is off.\n" msgstr "Affichage étendu désactivé.\n" -#: command.c:4049 command.c:4057 +#: command.c:4068 command.c:4076 #, c-format msgid "Field separator is zero byte.\n" msgstr "Le séparateur de champs est l'octet zéro.\n" -#: command.c:4051 +#: command.c:4070 #, c-format msgid "Field separator is \"%s\".\n" msgstr "Le séparateur de champs est « %s ».\n" -#: command.c:4064 +#: command.c:4083 #, c-format msgid "Default footer is on.\n" msgstr "Le bas de page pas défaut est activé.\n" -#: command.c:4066 +#: command.c:4085 #, c-format msgid "Default footer is off.\n" msgstr "Le bas de page par défaut est désactivé.\n" -#: command.c:4072 +#: command.c:4091 #, c-format msgid "Output format is %s.\n" msgstr "Le format de sortie est %s.\n" -#: command.c:4078 +#: command.c:4097 #, c-format msgid "Line style is %s.\n" msgstr "Le style de ligne est %s.\n" -#: command.c:4085 +#: command.c:4104 #, c-format msgid "Null display is \"%s\".\n" msgstr "L'affichage de null est « %s ».\n" -#: command.c:4093 +#: command.c:4112 #, c-format msgid "Locale-adjusted numeric output is on.\n" msgstr "L'affichage de la sortie numérique adaptée à la locale est activé.\n" -#: command.c:4095 +#: command.c:4114 #, c-format msgid "Locale-adjusted numeric output is off.\n" msgstr "L'affichage de la sortie numérique adaptée à la locale est désactivé.\n" -#: command.c:4102 +#: command.c:4121 #, c-format msgid "Pager is used for long output.\n" msgstr "Le paginateur est utilisé pour les affichages longs.\n" -#: command.c:4104 +#: command.c:4123 #, c-format msgid "Pager is always used.\n" msgstr "Le paginateur est toujours utilisé.\n" -#: command.c:4106 +#: command.c:4125 #, c-format msgid "Pager usage is off.\n" msgstr "L'utilisation du paginateur est désactivé.\n" -#: command.c:4112 +#: command.c:4131 #, c-format msgid "Pager won't be used for less than %d line.\n" msgid_plural "Pager won't be used for less than %d lines.\n" msgstr[0] "Le paginateur ne sera pas utilisé pour moins que %d ligne.\n" msgstr[1] "Le paginateur ne sera pas utilisé pour moins que %d lignes.\n" -#: command.c:4122 command.c:4132 +#: command.c:4141 command.c:4151 #, c-format msgid "Record separator is zero byte.\n" msgstr "Le séparateur d'enregistrements est l'octet zéro.\n" -#: command.c:4124 +#: command.c:4143 #, c-format msgid "Record separator is .\n" msgstr "Le séparateur d'enregistrement est .\n" -#: command.c:4126 +#: command.c:4145 #, c-format msgid "Record separator is \"%s\".\n" msgstr "Le séparateur d'enregistrements est « %s ».\n" -#: command.c:4139 +#: command.c:4158 #, c-format msgid "Table attributes are \"%s\".\n" msgstr "Les attributs de la table sont « %s ».\n" -#: command.c:4142 +#: command.c:4161 #, c-format msgid "Table attributes unset.\n" msgstr "Les attributs de la table ne sont pas définis.\n" -#: command.c:4149 +#: command.c:4168 #, c-format msgid "Title is \"%s\".\n" msgstr "Le titre est « %s ».\n" -#: command.c:4151 +#: command.c:4170 #, c-format msgid "Title is unset.\n" msgstr "Le titre n'est pas défini.\n" -#: command.c:4158 +#: command.c:4177 #, c-format msgid "Tuples only is on.\n" msgstr "L'affichage des tuples seuls est activé.\n" -#: command.c:4160 +#: command.c:4179 #, c-format msgid "Tuples only is off.\n" msgstr "L'affichage des tuples seuls est désactivé.\n" -#: command.c:4166 +#: command.c:4185 #, c-format msgid "Unicode border line style is \"%s\".\n" msgstr "Le style de bordure Unicode est « %s ».\n" -#: command.c:4172 +#: command.c:4191 #, c-format msgid "Unicode column line style is \"%s\".\n" msgstr "Le style de ligne Unicode est « %s ».\n" -#: command.c:4178 +#: command.c:4197 #, c-format msgid "Unicode header line style is \"%s\".\n" msgstr "Le style d'en-tête Unicode est « %s ».\n" -#: command.c:4338 +#: command.c:4357 #, c-format msgid "\\!: failed\n" msgstr "\\! : échec\n" -#: command.c:4363 common.c:754 +#: command.c:4382 common.c:754 #, c-format msgid "\\watch cannot be used with an empty query\n" msgstr "\\watch ne peut pas être utilisé avec une requête vide\n" -#: command.c:4404 +#: command.c:4423 #, c-format msgid "%s\t%s (every %gs)\n" msgstr "%s\t%s (chaque %gs)\n" -#: command.c:4407 +#: command.c:4426 #, c-format msgid "%s (every %gs)\n" msgstr "%s (chaque %gs)\n" -#: command.c:4461 command.c:4468 common.c:654 common.c:661 common.c:1271 +#: command.c:4480 command.c:4487 common.c:654 common.c:661 common.c:1271 #, c-format msgid "" "********* QUERY **********\n" @@ -678,12 +678,12 @@ msgstr "" "**************************\n" "\n" -#: command.c:4660 +#: command.c:4679 #, c-format msgid "\"%s.%s\" is not a view\n" msgstr "« %s.%s » n'est pas une vue\n" -#: command.c:4676 +#: command.c:4695 #, c-format msgid "could not parse reloptions array\n" msgstr "n'a pas pu analyser le tableau reloptions\n" @@ -918,20 +918,20 @@ msgid "\\crosstabview: column name not found: \"%s\"\n" msgstr "\\crosstabview : nom de colonne non trouvé : « %s »\n" #: describe.c:74 describe.c:346 describe.c:603 describe.c:735 describe.c:879 -#: describe.c:1040 describe.c:1112 describe.c:3342 describe.c:3554 -#: describe.c:3645 describe.c:3893 describe.c:4038 describe.c:4279 -#: describe.c:4354 describe.c:4365 describe.c:4427 describe.c:4852 -#: describe.c:4935 +#: describe.c:1040 describe.c:1112 describe.c:3371 describe.c:3583 +#: describe.c:3674 describe.c:3922 describe.c:4067 describe.c:4308 +#: describe.c:4383 describe.c:4394 describe.c:4456 describe.c:4881 +#: describe.c:4964 msgid "Schema" msgstr "Schéma" #: describe.c:75 describe.c:164 describe.c:231 describe.c:239 describe.c:347 #: describe.c:604 describe.c:736 describe.c:797 describe.c:880 describe.c:1113 -#: describe.c:3343 describe.c:3477 describe.c:3555 describe.c:3646 -#: describe.c:3725 describe.c:3894 describe.c:3963 describe.c:4039 -#: describe.c:4280 describe.c:4355 describe.c:4366 describe.c:4428 -#: describe.c:4625 describe.c:4709 describe.c:4933 describe.c:5105 -#: describe.c:5309 +#: describe.c:3372 describe.c:3506 describe.c:3584 describe.c:3675 +#: describe.c:3754 describe.c:3923 describe.c:3992 describe.c:4068 +#: describe.c:4309 describe.c:4384 describe.c:4395 describe.c:4457 +#: describe.c:4654 describe.c:4738 describe.c:4962 describe.c:5134 +#: describe.c:5341 msgid "Name" msgstr "Nom" @@ -945,12 +945,12 @@ msgid "Argument data types" msgstr "Type de données des paramètres" #: describe.c:108 describe.c:174 describe.c:262 describe.c:468 describe.c:652 -#: describe.c:751 describe.c:822 describe.c:1115 describe.c:1756 -#: describe.c:3132 describe.c:3377 describe.c:3508 describe.c:3582 -#: describe.c:3655 describe.c:3738 describe.c:3806 describe.c:3906 -#: describe.c:3972 describe.c:4040 describe.c:4181 describe.c:4223 -#: describe.c:4296 describe.c:4358 describe.c:4367 describe.c:4429 -#: describe.c:4651 describe.c:4731 describe.c:4866 describe.c:4936 +#: describe.c:751 describe.c:822 describe.c:1115 describe.c:1845 +#: describe.c:3161 describe.c:3406 describe.c:3537 describe.c:3611 +#: describe.c:3684 describe.c:3767 describe.c:3835 describe.c:3935 +#: describe.c:4001 describe.c:4069 describe.c:4210 describe.c:4252 +#: describe.c:4325 describe.c:4387 describe.c:4396 describe.c:4458 +#: describe.c:4680 describe.c:4760 describe.c:4895 describe.c:4965 #: large_obj.c:289 large_obj.c:299 msgid "Description" msgstr "Description" @@ -969,12 +969,12 @@ msgid "Index" msgstr "Index" #: describe.c:166 describe.c:366 describe.c:411 describe.c:428 describe.c:887 -#: describe.c:1051 describe.c:1716 describe.c:3352 describe.c:3556 -#: describe.c:4728 +#: describe.c:1051 describe.c:1582 describe.c:1606 describe.c:1808 +#: describe.c:3381 describe.c:3585 describe.c:4757 msgid "Type" msgstr "Type" -#: describe.c:173 describe.c:4630 +#: describe.c:173 describe.c:4659 msgid "Handler" msgstr "Gestionnaire" @@ -988,9 +988,9 @@ msgid "The server (version %s) does not support tablespaces.\n" msgstr "Le serveur (version %s) ne supporte pas les tablespaces.\n" #: describe.c:232 describe.c:240 describe.c:456 describe.c:642 describe.c:798 -#: describe.c:1039 describe.c:3353 describe.c:3481 describe.c:3727 -#: describe.c:3964 describe.c:4626 describe.c:4710 describe.c:5106 -#: describe.c:5310 large_obj.c:288 +#: describe.c:1039 describe.c:3382 describe.c:3510 describe.c:3756 +#: describe.c:3993 describe.c:4655 describe.c:4739 describe.c:5135 +#: describe.c:5247 describe.c:5342 large_obj.c:288 msgid "Owner" msgstr "Propriétaire" @@ -998,11 +998,11 @@ msgstr "Propriétaire" msgid "Location" msgstr "Emplacement" -#: describe.c:252 describe.c:2944 +#: describe.c:252 describe.c:2980 msgid "Options" msgstr "Options" -#: describe.c:257 describe.c:615 describe.c:814 describe.c:3369 describe.c:3373 +#: describe.c:257 describe.c:615 describe.c:814 describe.c:3398 describe.c:3402 msgid "Size" msgstr "Taille" @@ -1117,7 +1117,7 @@ msgstr "Type de l'arg. droit" msgid "Result type" msgstr "Type du résultat" -#: describe.c:744 describe.c:3797 describe.c:4180 +#: describe.c:744 describe.c:3826 describe.c:4209 msgid "Function" msgstr "Fonction" @@ -1129,11 +1129,11 @@ msgstr "Liste des opérateurs" msgid "Encoding" msgstr "Encodage" -#: describe.c:804 describe.c:3895 +#: describe.c:804 describe.c:3924 msgid "Collate" msgstr "Collationnement" -#: describe.c:805 describe.c:3896 +#: describe.c:805 describe.c:3925 msgid "Ctype" msgstr "Type caract." @@ -1145,24 +1145,24 @@ msgstr "Tablespace" msgid "List of databases" msgstr "Liste des bases de données" -#: describe.c:881 describe.c:886 describe.c:1042 describe.c:3344 -#: describe.c:3351 +#: describe.c:881 describe.c:886 describe.c:1042 describe.c:3373 +#: describe.c:3380 msgid "table" msgstr "table" -#: describe.c:882 describe.c:3345 +#: describe.c:882 describe.c:3374 msgid "view" msgstr "vue" -#: describe.c:883 describe.c:3346 +#: describe.c:883 describe.c:3375 msgid "materialized view" msgstr "vue matérialisée" -#: describe.c:884 describe.c:1044 describe.c:3348 +#: describe.c:884 describe.c:1044 describe.c:3377 msgid "sequence" msgstr "séquence" -#: describe.c:885 describe.c:3350 +#: describe.c:885 describe.c:3379 msgid "foreign table" msgstr "table distante" @@ -1174,7 +1174,7 @@ msgstr "Droits d'accès à la colonne" msgid "Policies" msgstr "Politiques" -#: describe.c:995 describe.c:5366 describe.c:5370 +#: describe.c:995 describe.c:5398 describe.c:5402 msgid "Access privileges" msgstr "Droits d'accès" @@ -1227,690 +1227,710 @@ msgstr "règle" msgid "Object descriptions" msgstr "Descriptions des objets" -#: describe.c:1327 describe.c:3440 +#: describe.c:1327 describe.c:3469 #, c-format msgid "Did not find any relation named \"%s\".\n" msgstr "Aucune relation nommée « %s » n'a été trouvée.\n" -#: describe.c:1330 describe.c:3443 +#: describe.c:1330 describe.c:3472 #, c-format msgid "Did not find any relations.\n" msgstr "N'a trouvé aucune relation.\n" -#: describe.c:1539 +#: describe.c:1537 #, c-format msgid "Did not find any relation with OID %s.\n" msgstr "Aucune relation avec l'OID « %s » n'a été trouvée.\n" -#: describe.c:1652 describe.c:1701 +#: describe.c:1583 describe.c:1607 +msgid "Start" +msgstr "Début" + +#: describe.c:1584 describe.c:1608 +msgid "Minimum" +msgstr "Minimum" + +#: describe.c:1585 describe.c:1609 +msgid "Maximum" +msgstr "Maximum" + +#: describe.c:1586 describe.c:1610 +msgid "Increment" +msgstr "Incrément" + +#: describe.c:1587 describe.c:1611 describe.c:3678 describe.c:3829 +msgid "yes" +msgstr "oui" + +#: describe.c:1588 describe.c:1612 describe.c:3678 describe.c:3827 +msgid "no" +msgstr "non" + +#: describe.c:1589 describe.c:1613 +msgid "Cycles?" +msgstr "Cycles ?" + +#: describe.c:1590 describe.c:1614 +msgid "Cache" +msgstr "Cache" + +#: describe.c:1657 +#, c-format +msgid "Owned by: %s" +msgstr "Propriétaire : %s" + +#: describe.c:1661 +#, c-format +msgid "Sequence for identity column: %s" +msgstr "Séquence pour la colonne d'identité : %s" + +#: describe.c:1668 +#, c-format +msgid "Sequence \"%s.%s\"" +msgstr "Séquence « %s.%s »" + +#: describe.c:1748 describe.c:1793 #, c-format msgid "Unlogged table \"%s.%s\"" msgstr "Table non tracée « %s.%s »" -#: describe.c:1655 describe.c:1704 +#: describe.c:1751 describe.c:1796 #, c-format msgid "Table \"%s.%s\"" msgstr "Table « %s.%s »" -#: describe.c:1659 +#: describe.c:1755 #, c-format msgid "View \"%s.%s\"" msgstr "Vue « %s.%s »" -#: describe.c:1664 +#: describe.c:1760 #, c-format msgid "Unlogged materialized view \"%s.%s\"" msgstr "Vue matérialisée non journalisée « %s.%s »" -#: describe.c:1667 +#: describe.c:1763 #, c-format msgid "Materialized view \"%s.%s\"" msgstr "Vue matérialisée « %s.%s »" -#: describe.c:1671 -#, c-format -msgid "Sequence \"%s.%s\"" -msgstr "Séquence « %s.%s »" - -#: describe.c:1676 +#: describe.c:1768 #, c-format msgid "Unlogged index \"%s.%s\"" msgstr "Index non tracé « %s.%s »" -#: describe.c:1679 +#: describe.c:1771 #, c-format msgid "Index \"%s.%s\"" msgstr "Index « %s.%s »" -#: describe.c:1684 +#: describe.c:1776 #, c-format msgid "Special relation \"%s.%s\"" msgstr "Relation spéciale « %s.%s »" -#: describe.c:1688 +#: describe.c:1780 #, c-format msgid "TOAST table \"%s.%s\"" msgstr "Table TOAST « %s.%s »" -#: describe.c:1692 +#: describe.c:1784 #, c-format msgid "Composite type \"%s.%s\"" msgstr "Type composé « %s.%s »" -#: describe.c:1696 +#: describe.c:1788 #, c-format msgid "Foreign table \"%s.%s\"" msgstr "Table distante « %s.%s »" -#: describe.c:1715 +#: describe.c:1807 msgid "Column" msgstr "Colonne" -#: describe.c:1726 describe.c:3562 +#: describe.c:1818 describe.c:3591 msgid "Collation" msgstr "Collationnement" -#: describe.c:1727 describe.c:3569 +#: describe.c:1819 describe.c:3598 msgid "Nullable" msgstr "NULL-able" -#: describe.c:1728 describe.c:3570 +#: describe.c:1820 describe.c:3599 msgid "Default" msgstr "Par défaut" -#: describe.c:1733 -msgid "Value" -msgstr "Valeur" - -#: describe.c:1736 +#: describe.c:1825 msgid "Definition" msgstr "Définition" -#: describe.c:1739 describe.c:4646 describe.c:4730 describe.c:4801 -#: describe.c:4865 +#: describe.c:1828 describe.c:4675 describe.c:4759 describe.c:4830 +#: describe.c:4894 msgid "FDW options" msgstr "Options FDW" -#: describe.c:1743 +#: describe.c:1832 msgid "Storage" msgstr "Stockage" -#: describe.c:1748 +#: describe.c:1837 msgid "Stats target" msgstr "Cible de statistiques" -#: describe.c:1897 +#: describe.c:1982 #, c-format msgid "Partition of: %s %s" msgstr "Partition de : %s %s" -#: describe.c:1903 +#: describe.c:1988 #, c-format msgid "Partition constraint: %s" msgstr "Contrainte de partition : %s" -#: describe.c:1926 +#: describe.c:2011 #, c-format msgid "Partition key: %s" msgstr "Clé de partition : %s" -#: describe.c:1994 +#: describe.c:2079 msgid "primary key, " msgstr "clé primaire, " -#: describe.c:1996 +#: describe.c:2081 msgid "unique, " msgstr "unique, " -#: describe.c:2002 +#: describe.c:2087 #, c-format msgid "for table \"%s.%s\"" msgstr "pour la table « %s.%s »" -#: describe.c:2006 +#: describe.c:2091 #, c-format msgid ", predicate (%s)" msgstr ", prédicat (%s)" -#: describe.c:2009 +#: describe.c:2094 msgid ", clustered" msgstr ", en cluster" -#: describe.c:2012 +#: describe.c:2097 msgid ", invalid" msgstr ", invalide" -#: describe.c:2015 +#: describe.c:2100 msgid ", deferrable" msgstr ", déferrable" -#: describe.c:2018 +#: describe.c:2103 msgid ", initially deferred" msgstr ", initialement déferré" -#: describe.c:2021 +#: describe.c:2106 msgid ", replica identity" msgstr ", identité réplica" -#: describe.c:2060 -#, c-format -msgid "Owned by: %s" -msgstr "Propriétaire : %s" - -#: describe.c:2065 -#, c-format -msgid "Sequence for identity column: %s" -msgstr "Séquence pour la colonne d'identité : %s" - -#: describe.c:2129 +#: describe.c:2165 msgid "Indexes:" msgstr "Index :" -#: describe.c:2213 +#: describe.c:2249 msgid "Check constraints:" msgstr "Contraintes de vérification :" -#: describe.c:2244 +#: describe.c:2280 msgid "Foreign-key constraints:" msgstr "Contraintes de clés étrangères :" -#: describe.c:2275 +#: describe.c:2311 msgid "Referenced by:" msgstr "Référencé par :" -#: describe.c:2325 +#: describe.c:2361 msgid "Policies:" msgstr "Politiques :" -#: describe.c:2328 +#: describe.c:2364 msgid "Policies (forced row security enabled):" msgstr "Politiques (mode sécurité de ligne activé en forcé) :" -#: describe.c:2331 +#: describe.c:2367 msgid "Policies (row security enabled): (none)" msgstr "Politiques (mode sécurité de ligne activé) : (aucune)" -#: describe.c:2334 +#: describe.c:2370 msgid "Policies (forced row security enabled): (none)" msgstr "Politiques (mode sécurité de ligne activé en forcé) : (aucune)" -#: describe.c:2337 +#: describe.c:2373 msgid "Policies (row security disabled):" msgstr "Politiques (mode sécurité de ligne désactivé) :" -#: describe.c:2399 +#: describe.c:2435 msgid "Statistics objects:" msgstr "Objets statistiques :" -#: describe.c:2502 describe.c:2587 +#: describe.c:2538 describe.c:2623 msgid "Rules:" msgstr "Règles :" -#: describe.c:2505 +#: describe.c:2541 msgid "Disabled rules:" msgstr "Règles désactivées :" -#: describe.c:2508 +#: describe.c:2544 msgid "Rules firing always:" msgstr "Règles toujous activées :" -#: describe.c:2511 +#: describe.c:2547 msgid "Rules firing on replica only:" msgstr "Règles activées uniquement sur le réplica :" -#: describe.c:2551 +#: describe.c:2587 msgid "Publications:" msgstr "Publications :" -#: describe.c:2570 +#: describe.c:2606 msgid "View definition:" msgstr "Définition de la vue :" -#: describe.c:2705 +#: describe.c:2741 msgid "Triggers:" msgstr "Triggers :" -#: describe.c:2709 +#: describe.c:2745 msgid "Disabled user triggers:" msgstr "Triggers utilisateurs désactivés :" -#: describe.c:2711 +#: describe.c:2747 msgid "Disabled triggers:" msgstr "Triggers désactivés :" -#: describe.c:2714 +#: describe.c:2750 msgid "Disabled internal triggers:" msgstr "Triggers internes désactivés :" -#: describe.c:2717 +#: describe.c:2753 msgid "Triggers firing always:" msgstr "Triggers toujours activés :" -#: describe.c:2720 +#: describe.c:2756 msgid "Triggers firing on replica only:" msgstr "Triggers activés uniquement sur le réplica :" -#: describe.c:2779 +#: describe.c:2815 #, c-format msgid "Server: %s" msgstr "Serveur : %s" -#: describe.c:2787 +#: describe.c:2823 #, c-format msgid "FDW options: (%s)" msgstr "Options FDW : (%s)" -#: describe.c:2806 +#: describe.c:2842 msgid "Inherits" msgstr "Hérite de" -#: describe.c:2860 +#: describe.c:2896 #, c-format msgid "Number of child tables: %d (Use \\d+ to list them.)" msgstr "Nombre de tables enfants : %d (utilisez \\d+ pour les lister)" -#: describe.c:2862 +#: describe.c:2898 #, c-format msgid "Number of partitions: %d (Use \\d+ to list them.)" msgstr "Nombre de partitions : %d (utilisez \\d+ pour les lister)" -#: describe.c:2870 +#: describe.c:2906 msgid "Child tables" msgstr "Tables enfant :" -#: describe.c:2870 +#: describe.c:2906 msgid "Partitions" msgstr "Partitions" -#: describe.c:2904 +#: describe.c:2940 #, c-format msgid "Typed table of type: %s" msgstr "Table de type : %s" -#: describe.c:2920 +#: describe.c:2956 msgid "Replica Identity" msgstr "Identité de réplicat" -#: describe.c:2933 +#: describe.c:2969 msgid "Has OIDs: yes" msgstr "Contient des OID : oui" -#: describe.c:3020 +#: describe.c:3049 #, c-format msgid "Tablespace: \"%s\"" msgstr "Tablespace : « %s »" #. translator: before this string there's an index description like #. '"foo_pkey" PRIMARY KEY, btree (a)' -#: describe.c:3032 +#: describe.c:3061 #, c-format msgid ", tablespace \"%s\"" msgstr ", tablespace « %s »" -#: describe.c:3125 +#: describe.c:3154 msgid "List of roles" msgstr "Liste des rôles" -#: describe.c:3127 +#: describe.c:3156 msgid "Role name" msgstr "Nom du rôle" -#: describe.c:3128 +#: describe.c:3157 msgid "Attributes" msgstr "Attributs" -#: describe.c:3129 +#: describe.c:3158 msgid "Member of" msgstr "Membre de" -#: describe.c:3140 +#: describe.c:3169 msgid "Superuser" msgstr "Superutilisateur" -#: describe.c:3143 +#: describe.c:3172 msgid "No inheritance" msgstr "Pas d'héritage" -#: describe.c:3146 +#: describe.c:3175 msgid "Create role" msgstr "Créer un rôle" -#: describe.c:3149 +#: describe.c:3178 msgid "Create DB" msgstr "Créer une base" -#: describe.c:3152 +#: describe.c:3181 msgid "Cannot login" msgstr "Ne peut pas se connecter" -#: describe.c:3156 +#: describe.c:3185 msgid "Replication" msgstr "Réplication" -#: describe.c:3160 +#: describe.c:3189 msgid "Bypass RLS" msgstr "Contournement RLS" -#: describe.c:3169 +#: describe.c:3198 msgid "No connections" msgstr "Sans connexions" -#: describe.c:3171 +#: describe.c:3200 #, c-format msgid "%d connection" msgid_plural "%d connections" msgstr[0] "%d connexion" msgstr[1] "%d connexions" -#: describe.c:3181 +#: describe.c:3210 msgid "Password valid until " msgstr "Mot de passe valide jusqu'à " -#: describe.c:3231 +#: describe.c:3260 #, c-format msgid "The server (version %s) does not support per-database role settings.\n" msgstr "Le serveur (version %s) ne supporte pas les paramètres de rôles par bases de données.\n" -#: describe.c:3244 +#: describe.c:3273 msgid "Role" msgstr "Rôle" -#: describe.c:3245 +#: describe.c:3274 msgid "Database" msgstr "Base de données" -#: describe.c:3246 +#: describe.c:3275 msgid "Settings" msgstr "Réglages" -#: describe.c:3267 +#: describe.c:3296 #, c-format msgid "Did not find any settings for role \"%s\" and database \"%s\".\n" msgstr "N'a trouvé aucune configuration pour le rôle « %s » et la base de données « %s ».\n" -#: describe.c:3270 +#: describe.c:3299 #, c-format msgid "Did not find any settings for role \"%s\".\n" msgstr "N'a trouvé aucune configuration pour le rôle « %s ».\n" -#: describe.c:3273 +#: describe.c:3302 #, c-format msgid "Did not find any settings.\n" msgstr "N'a trouvé aucune configuration.\n" -#: describe.c:3278 +#: describe.c:3307 msgid "List of settings" msgstr "Liste des paramètres" -#: describe.c:3347 +#: describe.c:3376 msgid "index" msgstr "index" -#: describe.c:3349 +#: describe.c:3378 msgid "special" msgstr "spécial" -#: describe.c:3358 describe.c:4853 +#: describe.c:3387 describe.c:4882 msgid "Table" msgstr "Table" -#: describe.c:3448 +#: describe.c:3477 msgid "List of relations" msgstr "Liste des relations" -#: describe.c:3485 +#: describe.c:3514 msgid "Trusted" msgstr "De confiance" -#: describe.c:3493 +#: describe.c:3522 msgid "Internal language" msgstr "Langage interne" -#: describe.c:3494 +#: describe.c:3523 msgid "Call handler" msgstr "Gestionnaire d'appel" -#: describe.c:3495 describe.c:4633 +#: describe.c:3524 describe.c:4662 msgid "Validator" msgstr "Validateur" -#: describe.c:3498 +#: describe.c:3527 msgid "Inline handler" msgstr "Gestionnaire en ligne" -#: describe.c:3526 +#: describe.c:3555 msgid "List of languages" msgstr "Liste des langages" -#: describe.c:3571 +#: describe.c:3600 msgid "Check" msgstr "Vérification" -#: describe.c:3613 +#: describe.c:3642 msgid "List of domains" msgstr "Liste des domaines" -#: describe.c:3647 +#: describe.c:3676 msgid "Source" msgstr "Source" -#: describe.c:3648 +#: describe.c:3677 msgid "Destination" msgstr "Destination" -#: describe.c:3649 describe.c:3798 -msgid "no" -msgstr "non" - -#: describe.c:3649 describe.c:3800 -msgid "yes" -msgstr "oui" - -#: describe.c:3650 +#: describe.c:3679 msgid "Default?" msgstr "Par défaut ?" -#: describe.c:3687 +#: describe.c:3716 msgid "List of conversions" msgstr "Liste des conversions" -#: describe.c:3726 +#: describe.c:3755 msgid "Event" msgstr "Événement" -#: describe.c:3728 +#: describe.c:3757 msgid "enabled" msgstr "activé" -#: describe.c:3729 +#: describe.c:3758 msgid "replica" msgstr "réplicat" -#: describe.c:3730 +#: describe.c:3759 msgid "always" msgstr "toujours" -#: describe.c:3731 +#: describe.c:3760 msgid "disabled" msgstr "désactivé" -#: describe.c:3732 describe.c:5311 +#: describe.c:3761 describe.c:5343 msgid "Enabled" msgstr "Activé" -#: describe.c:3733 +#: describe.c:3762 msgid "Procedure" msgstr "Procédure" -#: describe.c:3734 +#: describe.c:3763 msgid "Tags" msgstr "Tags" -#: describe.c:3753 +#: describe.c:3782 msgid "List of event triggers" msgstr "Liste des triggers sur évènement" -#: describe.c:3795 +#: describe.c:3824 msgid "Source type" msgstr "Type source" -#: describe.c:3796 +#: describe.c:3825 msgid "Target type" msgstr "Type cible" -#: describe.c:3799 +#: describe.c:3828 msgid "in assignment" msgstr "assigné" -#: describe.c:3801 +#: describe.c:3830 msgid "Implicit?" msgstr "Implicite ?" -#: describe.c:3852 +#: describe.c:3881 msgid "List of casts" msgstr "Liste des conversions explicites" -#: describe.c:3880 +#: describe.c:3909 #, c-format msgid "The server (version %s) does not support collations.\n" msgstr "Le serveur (version %s) ne supporte pas les collationnements.\n" -#: describe.c:3901 +#: describe.c:3930 msgid "Provider" msgstr "Fournisseur" -#: describe.c:3936 +#: describe.c:3965 msgid "List of collations" msgstr "Liste des collationnements" -#: describe.c:3995 +#: describe.c:4024 msgid "List of schemas" msgstr "Liste des schémas" -#: describe.c:4020 describe.c:4267 describe.c:4338 describe.c:4409 +#: describe.c:4049 describe.c:4296 describe.c:4367 describe.c:4438 #, c-format msgid "The server (version %s) does not support full text search.\n" msgstr "Le serveur (version %s) ne supporte pas la recherche plein texte.\n" -#: describe.c:4055 +#: describe.c:4084 msgid "List of text search parsers" msgstr "Liste des analyseurs de la recherche de texte" -#: describe.c:4100 +#: describe.c:4129 #, c-format msgid "Did not find any text search parser named \"%s\".\n" msgstr "Aucun analyseur de la recherche de texte nommé « %s » n'a été trouvé.\n" -#: describe.c:4103 +#: describe.c:4132 #, c-format msgid "Did not find any text search parsers.\n" msgstr "N'a trouvé aucun analyseur de recherche plein texte.\n" -#: describe.c:4178 +#: describe.c:4207 msgid "Start parse" msgstr "Début de l'analyse" -#: describe.c:4179 +#: describe.c:4208 msgid "Method" msgstr "Méthode" -#: describe.c:4183 +#: describe.c:4212 msgid "Get next token" msgstr "Obtenir le prochain jeton" -#: describe.c:4185 +#: describe.c:4214 msgid "End parse" msgstr "Fin de l'analyse" -#: describe.c:4187 +#: describe.c:4216 msgid "Get headline" msgstr "Obtenir l'en-tête" -#: describe.c:4189 +#: describe.c:4218 msgid "Get token types" msgstr "Obtenir les types de jeton" -#: describe.c:4200 +#: describe.c:4229 #, c-format msgid "Text search parser \"%s.%s\"" msgstr "Analyseur « %s.%s » de la recherche de texte" -#: describe.c:4203 +#: describe.c:4232 #, c-format msgid "Text search parser \"%s\"" msgstr "Analyseur « %s » de la recherche de texte" -#: describe.c:4222 +#: describe.c:4251 msgid "Token name" msgstr "Nom du jeton" -#: describe.c:4233 +#: describe.c:4262 #, c-format msgid "Token types for parser \"%s.%s\"" msgstr "Types de jeton pour l'analyseur « %s.%s »" -#: describe.c:4236 +#: describe.c:4265 #, c-format msgid "Token types for parser \"%s\"" msgstr "Types de jeton pour l'analyseur « %s »" -#: describe.c:4290 +#: describe.c:4319 msgid "Template" msgstr "Modèle" -#: describe.c:4291 +#: describe.c:4320 msgid "Init options" msgstr "Options d'initialisation :" -#: describe.c:4313 +#: describe.c:4342 msgid "List of text search dictionaries" msgstr "Liste des dictionnaires de la recherche de texte" -#: describe.c:4356 +#: describe.c:4385 msgid "Init" msgstr "Initialisation" -#: describe.c:4357 +#: describe.c:4386 msgid "Lexize" msgstr "Lexize" -#: describe.c:4384 +#: describe.c:4413 msgid "List of text search templates" msgstr "Liste des modèles de la recherche de texte" -#: describe.c:4444 +#: describe.c:4473 msgid "List of text search configurations" msgstr "Liste des configurations de la recherche de texte" -#: describe.c:4490 +#: describe.c:4519 #, c-format msgid "Did not find any text search configuration named \"%s\".\n" msgstr "Aucune configuration de la recherche de texte nommée « %s » n'a été trouvée.\n" -#: describe.c:4493 +#: describe.c:4522 #, c-format msgid "Did not find any text search configurations.\n" msgstr "N'a trouvé aucune configuration de recherche plein texte.\n" -#: describe.c:4559 +#: describe.c:4588 msgid "Token" msgstr "Jeton" -#: describe.c:4560 +#: describe.c:4589 msgid "Dictionaries" msgstr "Dictionnaires" -#: describe.c:4571 +#: describe.c:4600 #, c-format msgid "Text search configuration \"%s.%s\"" msgstr "Configuration « %s.%s » de la recherche de texte" -#: describe.c:4574 +#: describe.c:4603 #, c-format msgid "Text search configuration \"%s\"" msgstr "Configuration « %s » de la recherche de texte" -#: describe.c:4578 +#: describe.c:4607 #, c-format msgid "" "\n" @@ -1919,7 +1939,7 @@ msgstr "" "\n" "Analyseur : « %s.%s »" -#: describe.c:4581 +#: describe.c:4610 #, c-format msgid "" "\n" @@ -1928,148 +1948,148 @@ msgstr "" "\n" "Analyseur : « %s »" -#: describe.c:4615 +#: describe.c:4644 #, c-format msgid "The server (version %s) does not support foreign-data wrappers.\n" msgstr "Le serveur (version %s) ne supporte pas les wrappers de données distantes.\n" -#: describe.c:4673 +#: describe.c:4702 msgid "List of foreign-data wrappers" msgstr "Liste des wrappers de données distantes" -#: describe.c:4698 +#: describe.c:4727 #, c-format msgid "The server (version %s) does not support foreign servers.\n" msgstr "Le serveur (version %s) ne supporte pas les serveurs distants.\n" -#: describe.c:4711 +#: describe.c:4740 msgid "Foreign-data wrapper" msgstr "Wrapper des données distantes" -#: describe.c:4729 describe.c:4934 +#: describe.c:4758 describe.c:4963 msgid "Version" msgstr "Version" -#: describe.c:4755 +#: describe.c:4784 msgid "List of foreign servers" msgstr "Liste des serveurs distants" -#: describe.c:4780 +#: describe.c:4809 #, c-format msgid "The server (version %s) does not support user mappings.\n" msgstr "Le serveur (version %s) ne supporte pas les correspondances d'utilisateurs.\n" -#: describe.c:4790 describe.c:4854 +#: describe.c:4819 describe.c:4883 msgid "Server" msgstr "Serveur" -#: describe.c:4791 +#: describe.c:4820 msgid "User name" msgstr "Nom de l'utilisateur" -#: describe.c:4816 +#: describe.c:4845 msgid "List of user mappings" msgstr "Liste des correspondances utilisateurs" -#: describe.c:4841 +#: describe.c:4870 #, c-format msgid "The server (version %s) does not support foreign tables.\n" msgstr "Le serveur (version %s) ne supporte pas les tables distantes.\n" -#: describe.c:4894 +#: describe.c:4923 msgid "List of foreign tables" msgstr "Liste des tables distantes" -#: describe.c:4919 describe.c:4976 +#: describe.c:4948 describe.c:5005 #, c-format msgid "The server (version %s) does not support extensions.\n" msgstr "Le serveur (version %s) ne supporte pas les extensions.\n" -#: describe.c:4951 +#: describe.c:4980 msgid "List of installed extensions" msgstr "Liste des extensions installées" -#: describe.c:5004 +#: describe.c:5033 #, c-format msgid "Did not find any extension named \"%s\".\n" msgstr "N'a trouvé aucune extension nommée « %s ».\n" -#: describe.c:5007 +#: describe.c:5036 #, c-format msgid "Did not find any extensions.\n" msgstr "N'a trouvé aucune extension.\n" -#: describe.c:5051 +#: describe.c:5080 msgid "Object description" msgstr "Description d'objet" -#: describe.c:5061 +#: describe.c:5090 #, c-format msgid "Objects in extension \"%s\"" msgstr "Objets dans l'extension « %s »" -#: describe.c:5090 describe.c:5156 +#: describe.c:5119 describe.c:5185 #, c-format msgid "The server (version %s) does not support publications.\n" msgstr "Le serveur (version %s) ne supporte pas les publications.\n" -#: describe.c:5107 describe.c:5217 +#: describe.c:5136 describe.c:5248 msgid "All tables" msgstr "Toutes les tables" -#: describe.c:5108 describe.c:5218 +#: describe.c:5137 describe.c:5249 msgid "Inserts" msgstr "Insertions" -#: describe.c:5109 describe.c:5219 +#: describe.c:5138 describe.c:5250 msgid "Updates" msgstr "Mises à jour" -#: describe.c:5110 describe.c:5220 +#: describe.c:5139 describe.c:5251 msgid "Deletes" msgstr "Suppressions" -#: describe.c:5127 +#: describe.c:5156 msgid "List of publications" msgstr "Liste des publications" -#: describe.c:5187 +#: describe.c:5217 #, c-format msgid "Did not find any publication named \"%s\".\n" msgstr "N'a trouvé aucune publication nommée « %s ».\n" -#: describe.c:5190 +#: describe.c:5220 #, c-format msgid "Did not find any publications.\n" msgstr "N'a trouvé aucune publication.\n" -#: describe.c:5214 +#: describe.c:5244 #, c-format msgid "Publication %s" msgstr "Publication %s" -#: describe.c:5252 +#: describe.c:5284 msgid "Tables:" msgstr "Tables :" -#: describe.c:5296 +#: describe.c:5328 #, c-format msgid "The server (version %s) does not support subscriptions.\n" msgstr "Le serveur (version %s) ne supporte pas les souscriptions.\n" -#: describe.c:5312 +#: describe.c:5344 msgid "Publication" msgstr "Publication" -#: describe.c:5319 +#: describe.c:5351 msgid "Synchronous commit" msgstr "Validation synchrone" -#: describe.c:5320 +#: describe.c:5352 msgid "Conninfo" msgstr "Informations de connexion" -#: describe.c:5342 +#: describe.c:5374 msgid "List of subscriptions" msgstr "Liste des souscriptions" @@ -2087,7 +2107,7 @@ msgstr "" "psql est l'interface interactive de PostgreSQL.\n" "\n" -#: help.c:74 help.c:344 help.c:378 help.c:405 +#: help.c:74 help.c:344 help.c:383 help.c:410 #, c-format msgid "Usage:\n" msgstr "Usage :\n" @@ -3180,30 +3200,55 @@ msgstr " QUIET s'exécute en silence (identique à l'option -q)\n" #: help.c:371 #, c-format +msgid " SERVER_VERSION_NAME server's version (short string)\n" +msgstr " SERVER_VERSION_NAME version du serveur (chaîne courte)\n" + +#: help.c:372 +#, c-format +msgid " SERVER_VERSION_NUM server's version (numeric format)\n" +msgstr " SERVER_VERSION_NUM version du serveur (format numérique)\n" + +#: help.c:373 +#, c-format msgid " SHOW_CONTEXT controls display of message context fields [never, errors, always]\n" msgstr " SHOW_CONTEXT contrôle l'affichage des champs de contexte du message [never, errors, always]\n" -#: help.c:372 +#: help.c:374 #, c-format msgid " SINGLELINE end of line terminates SQL command mode (same as -S option)\n" msgstr " SINGLELINE une fin de ligne termine le mode de commande SQL (identique à l'option -S)\n" -#: help.c:373 +#: help.c:375 #, c-format msgid " SINGLESTEP single-step mode (same as -s option)\n" msgstr " SINGLESTEP mode pas à pas (identique à l'option -s)\n" -#: help.c:374 +#: help.c:376 #, c-format msgid " USER the currently connected database user\n" msgstr " USER l'utilisateur actuellement connecté\n" -#: help.c:375 +#: help.c:377 #, c-format msgid " VERBOSITY controls verbosity of error reports [default, verbose, terse]\n" msgstr " VERBOSITY contrôle la verbosité des rapports d'erreurs [default, verbose, terse]\n" -#: help.c:377 +#: help.c:378 +#, c-format +msgid " VERSION psql's version (verbose string)\n" +msgstr " VERSION version de psql (chaîne verbeuse)\n" + +#: help.c:379 +#, c-format +msgid " VERSION_NAME psql's version (short string)\n" +msgstr " VERSION_NAME version de psql (chaîne courte)\n" + +#: help.c:380 +#, c-format +msgid " VERSION_NUM psql's version (numeric format)\n" +msgstr " VERSION_NUM version de psql (format numérique)\n" + +#: help.c:382 #, c-format msgid "" "\n" @@ -3212,7 +3257,7 @@ msgstr "" "\n" "Paramètres d'affichage :\n" -#: help.c:379 +#: help.c:384 #, c-format msgid "" " psql --pset=NAME[=VALUE]\n" @@ -3223,54 +3268,54 @@ msgstr "" " ou \\pset NOM [VALEUR] dans psql\n" "\n" -#: help.c:381 +#: help.c:386 #, c-format msgid " border border style (number)\n" msgstr " border style de bordure (nombre)\n" -#: help.c:382 +#: help.c:387 #, c-format msgid " columns target width for the wrapped format\n" msgstr " columns largeur cible pour le format encadré\n" -#: help.c:383 +#: help.c:388 #, c-format msgid " expanded (or x) expanded output [on, off, auto]\n" msgstr " expanded (or x) sortie étendue [on, off, auto]\n" -#: help.c:384 +#: help.c:389 #, c-format msgid " fieldsep field separator for unaligned output (default \"%s\")\n" msgstr " fieldsep champ séparateur pour l'affichage non aligné (par défaut « %s »)\n" -#: help.c:385 +#: help.c:390 #, c-format msgid " fieldsep_zero set field separator for unaligned output to zero byte\n" msgstr "" " fieldsep_zero configure le séparateur de champ pour l'affichage non\\n\n" " aligné à l'octet zéro\n" -#: help.c:386 +#: help.c:391 #, c-format msgid " footer enable or disable display of the table footer [on, off]\n" msgstr " footer active ou désactive l'affiche du bas de tableau [on, off]\n" -#: help.c:387 +#: help.c:392 #, c-format msgid " format set output format [unaligned, aligned, wrapped, html, asciidoc, ...]\n" msgstr " format active le format de sortie [unaligned, aligned, wrapped, html, asciidoc, ...]\n" -#: help.c:388 +#: help.c:393 #, c-format msgid " linestyle set the border line drawing style [ascii, old-ascii, unicode]\n" msgstr " linestyle configure l'affichage des lignes de bordure [ascii, old-ascii, unicode]\n" -#: help.c:389 +#: help.c:394 #, c-format msgid " null set the string to be printed in place of a null value\n" msgstr " null configure la chaîne à afficher à la place d'une valeur NULL\n" -#: help.c:390 +#: help.c:395 #, c-format msgid "" " numericlocale enable or disable display of a locale-specific character to separate\n" @@ -3279,17 +3324,17 @@ msgstr "" " numericlocale active ou désactive l'affichage d'un caractère spécigique à la locale pour séparer\n" " des groupes de chiffres [on, off]\n" -#: help.c:392 +#: help.c:397 #, c-format msgid " pager control when an external pager is used [yes, no, always]\n" msgstr " pager contrôle quand un paginateur externe est utilisé [yes, no, always]\n" -#: help.c:393 +#: help.c:398 #, c-format msgid " recordsep record (line) separator for unaligned output\n" msgstr " recordsep enregistre le séparateur de ligne pour les affichages non alignés\n" -#: help.c:394 +#: help.c:399 #, c-format msgid " recordsep_zero set record separator for unaligned output to zero byte\n" msgstr "" @@ -3297,7 +3342,7 @@ msgstr "" " non aligné à l'octet zéro\n" "\n" -#: help.c:395 +#: help.c:400 #, c-format msgid "" " tableattr (or T) specify attributes for table tag in html format or proportional\n" @@ -3306,17 +3351,17 @@ msgstr "" " tableattr (or T) indique les attributs pour la balise de table dans le format html ou les largeurs\n" " proportionnelles de colonnes pour les types de données alignés à gauche dans le format latex-longtable\n" -#: help.c:397 +#: help.c:402 #, c-format msgid " title set the table title for any subsequently printed tables\n" msgstr " title configure le titre de la table pour toute table affichée\n" -#: help.c:398 +#: help.c:403 #, c-format msgid " tuples_only if set, only actual table data is shown\n" msgstr " tuples_only si activé, seules les données de la table sont affichées\n" -#: help.c:399 +#: help.c:404 #, c-format msgid "" " unicode_border_linestyle\n" @@ -3329,7 +3374,7 @@ msgstr "" " unicode_header_linestyle\n" " configure le style d'affichage de ligne Unicode [single, double]\n" -#: help.c:404 +#: help.c:409 #, c-format msgid "" "\n" @@ -3338,7 +3383,7 @@ msgstr "" "\n" "Variables d'environnement :\n" -#: help.c:408 +#: help.c:413 #, c-format msgid "" " NAME=VALUE [NAME=VALUE] psql ...\n" @@ -3349,7 +3394,7 @@ msgstr "" " ou \\setenv NOM [VALEUR] dans psql\n" "\n" -#: help.c:410 +#: help.c:415 #, c-format msgid "" " set NAME=VALUE\n" @@ -3362,52 +3407,52 @@ msgstr "" " ou \\setenv NOM [VALEUR] dans psql\n" "\n" -#: help.c:413 +#: help.c:418 #, c-format msgid " COLUMNS number of columns for wrapped format\n" msgstr " COLUMNS nombre de colonnes pour le format encadré\n" -#: help.c:414 +#: help.c:419 #, c-format msgid " PAGER name of external pager program\n" msgstr " PAGER nom du paginateur externe\n" -#: help.c:415 +#: help.c:420 #, c-format msgid " PGAPPNAME same as the application_name connection parameter\n" msgstr " PGAPPNAME identique au paramètre de connexion application_name\n" -#: help.c:416 +#: help.c:421 #, c-format msgid " PGDATABASE same as the dbname connection parameter\n" msgstr " PGDATABASE identique au paramètre de connexion dbname\n" -#: help.c:417 +#: help.c:422 #, c-format msgid " PGHOST same as the host connection parameter\n" msgstr " PGHOST identique au paramètre de connexion host\n" -#: help.c:418 +#: help.c:423 #, c-format msgid " PGPASSWORD connection password (not recommended)\n" msgstr " PGPASSWORD mot de passe de connexion (non recommendé)\n" -#: help.c:419 +#: help.c:424 #, c-format msgid " PGPASSFILE password file name\n" msgstr " PGPASSFILE nom du fichier de mot de passe\n" -#: help.c:420 +#: help.c:425 #, c-format msgid " PGPORT same as the port connection parameter\n" msgstr " PGPORT identique au paramètre de connexion port\n" -#: help.c:421 +#: help.c:426 #, c-format msgid " PGUSER same as the user connection parameter\n" msgstr " PGUSER identique au paramètre de connexion user\n" -#: help.c:422 +#: help.c:427 #, c-format msgid "" " PSQL_EDITOR, EDITOR, VISUAL\n" @@ -3416,7 +3461,7 @@ msgstr "" " PSQL_EDITOR, EDITOR, VISUAL\n" " éditeur utilisé par les commandes \\e, \\ef et \\ev\n" -#: help.c:424 +#: help.c:429 #, c-format msgid "" " PSQL_EDITOR_LINENUMBER_ARG\n" @@ -3425,31 +3470,31 @@ msgstr "" " PSQL_EDITOR_LINENUMBER_ARG\n" " comment spécifier un numéro de ligne lors de l'appel de l'éditeur\n" -#: help.c:426 +#: help.c:431 #, c-format msgid " PSQL_HISTORY alternative location for the command history file\n" msgstr " PSQL_HISTORY autre emplacement pour le fichier d'historique des commandes\n" -#: help.c:427 +#: help.c:432 #, c-format msgid " PSQLRC alternative location for the user's .psqlrc file\n" msgstr " PSQLRC autre emplacement pour le fichier .psqlrc de l'utilisateur\n" -#: help.c:428 +#: help.c:433 #, c-format msgid " SHELL shell used by the \\! command\n" msgstr " SHELL shell utilisé par la commande \\!\n" -#: help.c:429 +#: help.c:434 #, c-format msgid " TMPDIR directory for temporary files\n" msgstr " TMPDIR répertoire pour les fichiers temporaires\n" -#: help.c:472 +#: help.c:477 msgid "Available help:\n" msgstr "Aide-mémoire disponible :\n" -#: help.c:556 +#: help.c:561 #, c-format msgid "" "Command: %s\n" @@ -3464,7 +3509,7 @@ msgstr "" "%s\n" "\n" -#: help.c:572 +#: help.c:577 #, c-format msgid "" "No help available for \"%s\".\n" @@ -3599,34 +3644,34 @@ msgstr "%s : mémoire épuisée\n" #: sql_help.c:1258 sql_help.c:1260 sql_help.c:1270 sql_help.c:1273 #: sql_help.c:1295 sql_help.c:1297 sql_help.c:1299 sql_help.c:1302 #: sql_help.c:1304 sql_help.c:1306 sql_help.c:1309 sql_help.c:1359 -#: sql_help.c:1397 sql_help.c:1400 sql_help.c:1402 sql_help.c:1404 -#: sql_help.c:1406 sql_help.c:1408 sql_help.c:1411 sql_help.c:1451 -#: sql_help.c:1662 sql_help.c:1726 sql_help.c:1745 sql_help.c:1758 -#: sql_help.c:1814 sql_help.c:1820 sql_help.c:1830 sql_help.c:1850 -#: sql_help.c:1875 sql_help.c:1893 sql_help.c:1922 sql_help.c:2015 -#: sql_help.c:2057 sql_help.c:2079 sql_help.c:2099 sql_help.c:2100 -#: sql_help.c:2135 sql_help.c:2155 sql_help.c:2177 sql_help.c:2191 -#: sql_help.c:2206 sql_help.c:2236 sql_help.c:2261 sql_help.c:2307 -#: sql_help.c:2573 sql_help.c:2586 sql_help.c:2603 sql_help.c:2619 -#: sql_help.c:2659 sql_help.c:2711 sql_help.c:2715 sql_help.c:2717 -#: sql_help.c:2723 sql_help.c:2741 sql_help.c:2768 sql_help.c:2803 -#: sql_help.c:2815 sql_help.c:2824 sql_help.c:2868 sql_help.c:2882 -#: sql_help.c:2910 sql_help.c:2918 sql_help.c:2926 sql_help.c:2934 -#: sql_help.c:2942 sql_help.c:2950 sql_help.c:2958 sql_help.c:2966 -#: sql_help.c:2975 sql_help.c:2986 sql_help.c:2994 sql_help.c:3002 -#: sql_help.c:3010 sql_help.c:3018 sql_help.c:3028 sql_help.c:3037 -#: sql_help.c:3046 sql_help.c:3054 sql_help.c:3063 sql_help.c:3071 -#: sql_help.c:3079 sql_help.c:3088 sql_help.c:3096 sql_help.c:3104 -#: sql_help.c:3112 sql_help.c:3120 sql_help.c:3128 sql_help.c:3136 -#: sql_help.c:3144 sql_help.c:3152 sql_help.c:3160 sql_help.c:3168 -#: sql_help.c:3185 sql_help.c:3194 sql_help.c:3202 sql_help.c:3219 -#: sql_help.c:3234 sql_help.c:3502 sql_help.c:3553 sql_help.c:3582 -#: sql_help.c:3590 sql_help.c:4013 sql_help.c:4061 sql_help.c:4202 +#: sql_help.c:1401 sql_help.c:1404 sql_help.c:1406 sql_help.c:1408 +#: sql_help.c:1410 sql_help.c:1412 sql_help.c:1415 sql_help.c:1455 +#: sql_help.c:1666 sql_help.c:1730 sql_help.c:1749 sql_help.c:1762 +#: sql_help.c:1818 sql_help.c:1824 sql_help.c:1834 sql_help.c:1854 +#: sql_help.c:1879 sql_help.c:1897 sql_help.c:1926 sql_help.c:2019 +#: sql_help.c:2061 sql_help.c:2083 sql_help.c:2103 sql_help.c:2104 +#: sql_help.c:2139 sql_help.c:2159 sql_help.c:2181 sql_help.c:2195 +#: sql_help.c:2210 sql_help.c:2240 sql_help.c:2265 sql_help.c:2311 +#: sql_help.c:2577 sql_help.c:2590 sql_help.c:2607 sql_help.c:2623 +#: sql_help.c:2663 sql_help.c:2715 sql_help.c:2719 sql_help.c:2721 +#: sql_help.c:2727 sql_help.c:2745 sql_help.c:2772 sql_help.c:2807 +#: sql_help.c:2819 sql_help.c:2828 sql_help.c:2872 sql_help.c:2886 +#: sql_help.c:2914 sql_help.c:2922 sql_help.c:2930 sql_help.c:2938 +#: sql_help.c:2946 sql_help.c:2954 sql_help.c:2962 sql_help.c:2970 +#: sql_help.c:2979 sql_help.c:2990 sql_help.c:2998 sql_help.c:3006 +#: sql_help.c:3014 sql_help.c:3022 sql_help.c:3032 sql_help.c:3041 +#: sql_help.c:3050 sql_help.c:3058 sql_help.c:3067 sql_help.c:3075 +#: sql_help.c:3083 sql_help.c:3092 sql_help.c:3100 sql_help.c:3108 +#: sql_help.c:3116 sql_help.c:3124 sql_help.c:3132 sql_help.c:3140 +#: sql_help.c:3148 sql_help.c:3156 sql_help.c:3164 sql_help.c:3172 +#: sql_help.c:3189 sql_help.c:3198 sql_help.c:3206 sql_help.c:3223 +#: sql_help.c:3238 sql_help.c:3506 sql_help.c:3557 sql_help.c:3586 +#: sql_help.c:3594 sql_help.c:4017 sql_help.c:4065 sql_help.c:4206 msgid "name" msgstr "nom" -#: sql_help.c:37 sql_help.c:40 sql_help.c:43 sql_help.c:326 sql_help.c:1520 -#: sql_help.c:2883 sql_help.c:3807 +#: sql_help.c:37 sql_help.c:40 sql_help.c:43 sql_help.c:326 sql_help.c:1524 +#: sql_help.c:2887 sql_help.c:3811 msgid "aggregate_signature" msgstr "signature_agrégat" @@ -3637,7 +3682,7 @@ msgstr "signature_agrégat" #: sql_help.c:927 sql_help.c:947 sql_help.c:960 sql_help.c:994 sql_help.c:1093 #: sql_help.c:1169 sql_help.c:1212 sql_help.c:1233 sql_help.c:1247 #: sql_help.c:1259 sql_help.c:1272 sql_help.c:1303 sql_help.c:1360 -#: sql_help.c:1405 +#: sql_help.c:1409 msgid "new_name" msgstr "nouveau_nom" @@ -3646,7 +3691,7 @@ msgstr "nouveau_nom" #: sql_help.c:609 sql_help.c:667 sql_help.c:687 sql_help.c:716 sql_help.c:771 #: sql_help.c:817 sql_help.c:896 sql_help.c:925 sql_help.c:945 sql_help.c:958 #: sql_help.c:992 sql_help.c:1153 sql_help.c:1171 sql_help.c:1214 -#: sql_help.c:1235 sql_help.c:1298 sql_help.c:1403 sql_help.c:2559 +#: sql_help.c:1235 sql_help.c:1298 sql_help.c:1407 sql_help.c:2563 msgid "new_owner" msgstr "nouveau_propriétaire" @@ -3654,67 +3699,67 @@ msgstr "nouveau_propriétaire" #: sql_help.c:440 sql_help.c:525 sql_help.c:650 sql_help.c:691 sql_help.c:719 #: sql_help.c:774 sql_help.c:929 sql_help.c:962 sql_help.c:1095 sql_help.c:1216 #: sql_help.c:1237 sql_help.c:1249 sql_help.c:1261 sql_help.c:1305 -#: sql_help.c:1407 +#: sql_help.c:1411 msgid "new_schema" msgstr "nouveau_schéma" -#: sql_help.c:45 sql_help.c:1576 sql_help.c:2884 sql_help.c:3828 +#: sql_help.c:45 sql_help.c:1580 sql_help.c:2888 sql_help.c:3832 msgid "where aggregate_signature is:" msgstr "où signature_agrégat est :" #: sql_help.c:46 sql_help.c:49 sql_help.c:52 sql_help.c:336 sql_help.c:361 #: sql_help.c:364 sql_help.c:367 sql_help.c:507 sql_help.c:512 sql_help.c:517 -#: sql_help.c:522 sql_help.c:527 sql_help.c:1538 sql_help.c:1577 -#: sql_help.c:1580 sql_help.c:1583 sql_help.c:1727 sql_help.c:1746 -#: sql_help.c:1749 sql_help.c:2016 sql_help.c:2885 sql_help.c:2888 -#: sql_help.c:2891 sql_help.c:2976 sql_help.c:3387 sql_help.c:3720 -#: sql_help.c:3813 sql_help.c:3829 sql_help.c:3832 sql_help.c:3835 +#: sql_help.c:522 sql_help.c:527 sql_help.c:1542 sql_help.c:1581 +#: sql_help.c:1584 sql_help.c:1587 sql_help.c:1731 sql_help.c:1750 +#: sql_help.c:1753 sql_help.c:2020 sql_help.c:2889 sql_help.c:2892 +#: sql_help.c:2895 sql_help.c:2980 sql_help.c:3391 sql_help.c:3724 +#: sql_help.c:3817 sql_help.c:3833 sql_help.c:3836 sql_help.c:3839 msgid "argmode" msgstr "mode_argument" #: sql_help.c:47 sql_help.c:50 sql_help.c:53 sql_help.c:337 sql_help.c:362 #: sql_help.c:365 sql_help.c:368 sql_help.c:508 sql_help.c:513 sql_help.c:518 -#: sql_help.c:523 sql_help.c:528 sql_help.c:1539 sql_help.c:1578 -#: sql_help.c:1581 sql_help.c:1584 sql_help.c:1728 sql_help.c:1747 -#: sql_help.c:1750 sql_help.c:2017 sql_help.c:2886 sql_help.c:2889 -#: sql_help.c:2892 sql_help.c:2977 sql_help.c:3814 sql_help.c:3830 -#: sql_help.c:3833 sql_help.c:3836 +#: sql_help.c:523 sql_help.c:528 sql_help.c:1543 sql_help.c:1582 +#: sql_help.c:1585 sql_help.c:1588 sql_help.c:1732 sql_help.c:1751 +#: sql_help.c:1754 sql_help.c:2021 sql_help.c:2890 sql_help.c:2893 +#: sql_help.c:2896 sql_help.c:2981 sql_help.c:3818 sql_help.c:3834 +#: sql_help.c:3837 sql_help.c:3840 msgid "argname" msgstr "nom_agrégat" #: sql_help.c:48 sql_help.c:51 sql_help.c:54 sql_help.c:338 sql_help.c:363 #: sql_help.c:366 sql_help.c:369 sql_help.c:509 sql_help.c:514 sql_help.c:519 -#: sql_help.c:524 sql_help.c:529 sql_help.c:1540 sql_help.c:1579 -#: sql_help.c:1582 sql_help.c:1585 sql_help.c:2018 sql_help.c:2887 -#: sql_help.c:2890 sql_help.c:2893 sql_help.c:2978 sql_help.c:3815 -#: sql_help.c:3831 sql_help.c:3834 sql_help.c:3837 +#: sql_help.c:524 sql_help.c:529 sql_help.c:1544 sql_help.c:1583 +#: sql_help.c:1586 sql_help.c:1589 sql_help.c:2022 sql_help.c:2891 +#: sql_help.c:2894 sql_help.c:2897 sql_help.c:2982 sql_help.c:3819 +#: sql_help.c:3835 sql_help.c:3838 sql_help.c:3841 msgid "argtype" msgstr "type_argument" #: sql_help.c:113 sql_help.c:385 sql_help.c:463 sql_help.c:475 sql_help.c:854 -#: sql_help.c:942 sql_help.c:1230 sql_help.c:1354 sql_help.c:1382 -#: sql_help.c:1633 sql_help.c:1639 sql_help.c:1925 sql_help.c:1966 -#: sql_help.c:1973 sql_help.c:1982 sql_help.c:2058 sql_help.c:2237 -#: sql_help.c:2329 sql_help.c:2588 sql_help.c:2769 sql_help.c:2791 -#: sql_help.c:3254 sql_help.c:3421 +#: sql_help.c:942 sql_help.c:1230 sql_help.c:1354 sql_help.c:1386 +#: sql_help.c:1637 sql_help.c:1643 sql_help.c:1929 sql_help.c:1970 +#: sql_help.c:1977 sql_help.c:1986 sql_help.c:2062 sql_help.c:2241 +#: sql_help.c:2333 sql_help.c:2592 sql_help.c:2773 sql_help.c:2795 +#: sql_help.c:3258 sql_help.c:3425 msgid "option" msgstr "option" -#: sql_help.c:114 sql_help.c:855 sql_help.c:1355 sql_help.c:2059 -#: sql_help.c:2238 sql_help.c:2770 +#: sql_help.c:114 sql_help.c:855 sql_help.c:1355 sql_help.c:2063 +#: sql_help.c:2242 sql_help.c:2774 msgid "where option can be:" msgstr "où option peut être :" -#: sql_help.c:115 sql_help.c:1857 +#: sql_help.c:115 sql_help.c:1861 msgid "allowconn" msgstr "allowconn" -#: sql_help.c:116 sql_help.c:856 sql_help.c:1356 sql_help.c:1858 -#: sql_help.c:2239 sql_help.c:2771 +#: sql_help.c:116 sql_help.c:856 sql_help.c:1356 sql_help.c:1862 +#: sql_help.c:2243 sql_help.c:2775 msgid "connlimit" msgstr "limite_de_connexion" -#: sql_help.c:117 sql_help.c:1859 +#: sql_help.c:117 sql_help.c:1863 msgid "istemplate" msgstr "istemplate" @@ -3724,8 +3769,8 @@ msgstr "nouveau_tablespace" #: sql_help.c:125 sql_help.c:128 sql_help.c:130 sql_help.c:534 sql_help.c:536 #: sql_help.c:537 sql_help.c:863 sql_help.c:867 sql_help.c:870 sql_help.c:1005 -#: sql_help.c:1008 sql_help.c:1362 sql_help.c:1365 sql_help.c:1367 -#: sql_help.c:2027 sql_help.c:3607 sql_help.c:4002 +#: sql_help.c:1008 sql_help.c:1363 sql_help.c:1367 sql_help.c:1370 +#: sql_help.c:2031 sql_help.c:3611 sql_help.c:4006 msgid "configuration_parameter" msgstr "paramètre_configuration" @@ -3733,13 +3778,13 @@ msgstr "paramètre_configuration" #: sql_help.c:535 sql_help.c:583 sql_help.c:659 sql_help.c:665 sql_help.c:815 #: sql_help.c:864 sql_help.c:943 sql_help.c:982 sql_help.c:985 sql_help.c:990 #: sql_help.c:1006 sql_help.c:1007 sql_help.c:1128 sql_help.c:1148 -#: sql_help.c:1174 sql_help.c:1231 sql_help.c:1363 sql_help.c:1383 -#: sql_help.c:1926 sql_help.c:1967 sql_help.c:1974 sql_help.c:1983 -#: sql_help.c:2028 sql_help.c:2029 sql_help.c:2087 sql_help.c:2119 -#: sql_help.c:2209 sql_help.c:2330 sql_help.c:2360 sql_help.c:2458 -#: sql_help.c:2470 sql_help.c:2483 sql_help.c:2523 sql_help.c:2545 -#: sql_help.c:2562 sql_help.c:2589 sql_help.c:2792 sql_help.c:3422 -#: sql_help.c:4003 sql_help.c:4004 +#: sql_help.c:1174 sql_help.c:1231 sql_help.c:1364 sql_help.c:1387 +#: sql_help.c:1930 sql_help.c:1971 sql_help.c:1978 sql_help.c:1987 +#: sql_help.c:2032 sql_help.c:2033 sql_help.c:2091 sql_help.c:2123 +#: sql_help.c:2213 sql_help.c:2334 sql_help.c:2364 sql_help.c:2462 +#: sql_help.c:2474 sql_help.c:2487 sql_help.c:2527 sql_help.c:2549 +#: sql_help.c:2566 sql_help.c:2593 sql_help.c:2796 sql_help.c:3426 +#: sql_help.c:4007 sql_help.c:4008 msgid "value" msgstr "valeur" @@ -3747,9 +3792,9 @@ msgstr "valeur" msgid "target_role" msgstr "rôle_cible" -#: sql_help.c:199 sql_help.c:1909 sql_help.c:2285 sql_help.c:2290 -#: sql_help.c:3369 sql_help.c:3376 sql_help.c:3390 sql_help.c:3396 -#: sql_help.c:3702 sql_help.c:3709 sql_help.c:3723 sql_help.c:3729 +#: sql_help.c:199 sql_help.c:1913 sql_help.c:2289 sql_help.c:2294 +#: sql_help.c:3373 sql_help.c:3380 sql_help.c:3394 sql_help.c:3400 +#: sql_help.c:3706 sql_help.c:3713 sql_help.c:3727 sql_help.c:3733 msgid "schema_name" msgstr "nom_schéma" @@ -3764,33 +3809,33 @@ msgstr "où abbreviated_grant_or_revoke fait partie de :" #: sql_help.c:202 sql_help.c:203 sql_help.c:204 sql_help.c:205 sql_help.c:206 #: sql_help.c:207 sql_help.c:208 sql_help.c:209 sql_help.c:210 sql_help.c:211 #: sql_help.c:559 sql_help.c:587 sql_help.c:652 sql_help.c:792 sql_help.c:874 -#: sql_help.c:1097 sql_help.c:1370 sql_help.c:2062 sql_help.c:2063 -#: sql_help.c:2064 sql_help.c:2065 sql_help.c:2066 sql_help.c:2193 -#: sql_help.c:2242 sql_help.c:2243 sql_help.c:2244 sql_help.c:2245 -#: sql_help.c:2246 sql_help.c:2774 sql_help.c:2775 sql_help.c:2776 -#: sql_help.c:2777 sql_help.c:2778 sql_help.c:3403 sql_help.c:3404 -#: sql_help.c:3405 sql_help.c:3703 sql_help.c:3707 sql_help.c:3710 -#: sql_help.c:3712 sql_help.c:3714 sql_help.c:3716 sql_help.c:3718 -#: sql_help.c:3724 sql_help.c:3726 sql_help.c:3728 sql_help.c:3730 -#: sql_help.c:3732 sql_help.c:3734 sql_help.c:3735 sql_help.c:3736 -#: sql_help.c:4023 +#: sql_help.c:1097 sql_help.c:1374 sql_help.c:2066 sql_help.c:2067 +#: sql_help.c:2068 sql_help.c:2069 sql_help.c:2070 sql_help.c:2197 +#: sql_help.c:2246 sql_help.c:2247 sql_help.c:2248 sql_help.c:2249 +#: sql_help.c:2250 sql_help.c:2778 sql_help.c:2779 sql_help.c:2780 +#: sql_help.c:2781 sql_help.c:2782 sql_help.c:3407 sql_help.c:3408 +#: sql_help.c:3409 sql_help.c:3707 sql_help.c:3711 sql_help.c:3714 +#: sql_help.c:3716 sql_help.c:3718 sql_help.c:3720 sql_help.c:3722 +#: sql_help.c:3728 sql_help.c:3730 sql_help.c:3732 sql_help.c:3734 +#: sql_help.c:3736 sql_help.c:3738 sql_help.c:3739 sql_help.c:3740 +#: sql_help.c:4027 msgid "role_name" msgstr "nom_rôle" #: sql_help.c:237 sql_help.c:451 sql_help.c:1113 sql_help.c:1115 -#: sql_help.c:1399 sql_help.c:1878 sql_help.c:1882 sql_help.c:1986 -#: sql_help.c:1990 sql_help.c:2083 sql_help.c:2454 sql_help.c:2466 -#: sql_help.c:2479 sql_help.c:2487 sql_help.c:2498 sql_help.c:2527 -#: sql_help.c:3453 sql_help.c:3468 sql_help.c:3470 sql_help.c:3888 -#: sql_help.c:3889 sql_help.c:3898 sql_help.c:3939 sql_help.c:3940 -#: sql_help.c:3941 sql_help.c:3942 sql_help.c:3943 sql_help.c:3944 -#: sql_help.c:3977 sql_help.c:3978 sql_help.c:3983 sql_help.c:3988 -#: sql_help.c:4127 sql_help.c:4128 sql_help.c:4137 sql_help.c:4178 -#: sql_help.c:4179 sql_help.c:4180 sql_help.c:4181 sql_help.c:4182 -#: sql_help.c:4183 sql_help.c:4230 sql_help.c:4232 sql_help.c:4265 -#: sql_help.c:4321 sql_help.c:4322 sql_help.c:4331 sql_help.c:4372 -#: sql_help.c:4373 sql_help.c:4374 sql_help.c:4375 sql_help.c:4376 -#: sql_help.c:4377 +#: sql_help.c:1403 sql_help.c:1882 sql_help.c:1886 sql_help.c:1990 +#: sql_help.c:1994 sql_help.c:2087 sql_help.c:2458 sql_help.c:2470 +#: sql_help.c:2483 sql_help.c:2491 sql_help.c:2502 sql_help.c:2531 +#: sql_help.c:3457 sql_help.c:3472 sql_help.c:3474 sql_help.c:3892 +#: sql_help.c:3893 sql_help.c:3902 sql_help.c:3943 sql_help.c:3944 +#: sql_help.c:3945 sql_help.c:3946 sql_help.c:3947 sql_help.c:3948 +#: sql_help.c:3981 sql_help.c:3982 sql_help.c:3987 sql_help.c:3992 +#: sql_help.c:4131 sql_help.c:4132 sql_help.c:4141 sql_help.c:4182 +#: sql_help.c:4183 sql_help.c:4184 sql_help.c:4185 sql_help.c:4186 +#: sql_help.c:4187 sql_help.c:4234 sql_help.c:4236 sql_help.c:4269 +#: sql_help.c:4325 sql_help.c:4326 sql_help.c:4335 sql_help.c:4376 +#: sql_help.c:4377 sql_help.c:4378 sql_help.c:4379 sql_help.c:4380 +#: sql_help.c:4381 msgid "expression" msgstr "expression" @@ -3800,9 +3845,9 @@ msgstr "contrainte_domaine" #: sql_help.c:242 sql_help.c:244 sql_help.c:247 sql_help.c:466 sql_help.c:467 #: sql_help.c:1090 sql_help.c:1134 sql_help.c:1135 sql_help.c:1136 -#: sql_help.c:1156 sql_help.c:1526 sql_help.c:1528 sql_help.c:1881 -#: sql_help.c:1985 sql_help.c:1989 sql_help.c:2486 sql_help.c:2497 -#: sql_help.c:3465 +#: sql_help.c:1156 sql_help.c:1530 sql_help.c:1532 sql_help.c:1885 +#: sql_help.c:1989 sql_help.c:1993 sql_help.c:2490 sql_help.c:2501 +#: sql_help.c:3469 msgid "constraint_name" msgstr "nom_contrainte" @@ -3826,73 +3871,73 @@ msgstr "où objet_membre fait partie de :" #: sql_help.c:333 sql_help.c:334 sql_help.c:339 sql_help.c:343 sql_help.c:345 #: sql_help.c:347 sql_help.c:348 sql_help.c:349 sql_help.c:350 sql_help.c:351 #: sql_help.c:352 sql_help.c:353 sql_help.c:354 sql_help.c:355 sql_help.c:358 -#: sql_help.c:359 sql_help.c:1518 sql_help.c:1523 sql_help.c:1530 -#: sql_help.c:1531 sql_help.c:1532 sql_help.c:1533 sql_help.c:1534 -#: sql_help.c:1535 sql_help.c:1536 sql_help.c:1541 sql_help.c:1543 -#: sql_help.c:1547 sql_help.c:1549 sql_help.c:1553 sql_help.c:1554 -#: sql_help.c:1555 sql_help.c:1558 sql_help.c:1559 sql_help.c:1560 -#: sql_help.c:1561 sql_help.c:1562 sql_help.c:1563 sql_help.c:1564 +#: sql_help.c:359 sql_help.c:1522 sql_help.c:1527 sql_help.c:1534 +#: sql_help.c:1535 sql_help.c:1536 sql_help.c:1537 sql_help.c:1538 +#: sql_help.c:1539 sql_help.c:1540 sql_help.c:1545 sql_help.c:1547 +#: sql_help.c:1551 sql_help.c:1553 sql_help.c:1557 sql_help.c:1558 +#: sql_help.c:1559 sql_help.c:1562 sql_help.c:1563 sql_help.c:1564 #: sql_help.c:1565 sql_help.c:1566 sql_help.c:1567 sql_help.c:1568 -#: sql_help.c:1573 sql_help.c:1574 sql_help.c:3803 sql_help.c:3808 -#: sql_help.c:3809 sql_help.c:3810 sql_help.c:3811 sql_help.c:3817 -#: sql_help.c:3818 sql_help.c:3819 sql_help.c:3820 sql_help.c:3821 +#: sql_help.c:1569 sql_help.c:1570 sql_help.c:1571 sql_help.c:1572 +#: sql_help.c:1577 sql_help.c:1578 sql_help.c:3807 sql_help.c:3812 +#: sql_help.c:3813 sql_help.c:3814 sql_help.c:3815 sql_help.c:3821 #: sql_help.c:3822 sql_help.c:3823 sql_help.c:3824 sql_help.c:3825 -#: sql_help.c:3826 +#: sql_help.c:3826 sql_help.c:3827 sql_help.c:3828 sql_help.c:3829 +#: sql_help.c:3830 msgid "object_name" msgstr "nom_objet" -#: sql_help.c:325 sql_help.c:1519 sql_help.c:3806 +#: sql_help.c:325 sql_help.c:1523 sql_help.c:3810 msgid "aggregate_name" msgstr "nom_agrégat" -#: sql_help.c:327 sql_help.c:1521 sql_help.c:1792 sql_help.c:1796 -#: sql_help.c:1798 sql_help.c:2901 +#: sql_help.c:327 sql_help.c:1525 sql_help.c:1796 sql_help.c:1800 +#: sql_help.c:1802 sql_help.c:2905 msgid "source_type" msgstr "type_source" -#: sql_help.c:328 sql_help.c:1522 sql_help.c:1793 sql_help.c:1797 -#: sql_help.c:1799 sql_help.c:2902 +#: sql_help.c:328 sql_help.c:1526 sql_help.c:1797 sql_help.c:1801 +#: sql_help.c:1803 sql_help.c:2906 msgid "target_type" msgstr "type_cible" -#: sql_help.c:335 sql_help.c:756 sql_help.c:1537 sql_help.c:1794 -#: sql_help.c:1833 sql_help.c:1896 sql_help.c:2136 sql_help.c:2167 -#: sql_help.c:2665 sql_help.c:3386 sql_help.c:3719 sql_help.c:3812 -#: sql_help.c:3917 sql_help.c:3921 sql_help.c:3925 sql_help.c:3928 -#: sql_help.c:4156 sql_help.c:4160 sql_help.c:4164 sql_help.c:4167 -#: sql_help.c:4350 sql_help.c:4354 sql_help.c:4358 sql_help.c:4361 +#: sql_help.c:335 sql_help.c:756 sql_help.c:1541 sql_help.c:1798 +#: sql_help.c:1837 sql_help.c:1900 sql_help.c:2140 sql_help.c:2171 +#: sql_help.c:2669 sql_help.c:3390 sql_help.c:3723 sql_help.c:3816 +#: sql_help.c:3921 sql_help.c:3925 sql_help.c:3929 sql_help.c:3932 +#: sql_help.c:4160 sql_help.c:4164 sql_help.c:4168 sql_help.c:4171 +#: sql_help.c:4354 sql_help.c:4358 sql_help.c:4362 sql_help.c:4365 msgid "function_name" msgstr "nom_fonction" -#: sql_help.c:340 sql_help.c:749 sql_help.c:1544 sql_help.c:2160 +#: sql_help.c:340 sql_help.c:749 sql_help.c:1548 sql_help.c:2164 msgid "operator_name" msgstr "nom_opérateur" -#: sql_help.c:341 sql_help.c:685 sql_help.c:689 sql_help.c:693 sql_help.c:1545 -#: sql_help.c:2137 sql_help.c:3019 +#: sql_help.c:341 sql_help.c:685 sql_help.c:689 sql_help.c:693 sql_help.c:1549 +#: sql_help.c:2141 sql_help.c:3023 msgid "left_type" msgstr "type_argument_gauche" -#: sql_help.c:342 sql_help.c:686 sql_help.c:690 sql_help.c:694 sql_help.c:1546 -#: sql_help.c:2138 sql_help.c:3020 +#: sql_help.c:342 sql_help.c:686 sql_help.c:690 sql_help.c:694 sql_help.c:1550 +#: sql_help.c:2142 sql_help.c:3024 msgid "right_type" msgstr "type_argument_droit" #: sql_help.c:344 sql_help.c:346 sql_help.c:712 sql_help.c:715 sql_help.c:718 #: sql_help.c:747 sql_help.c:759 sql_help.c:767 sql_help.c:770 sql_help.c:773 -#: sql_help.c:1548 sql_help.c:1550 sql_help.c:2157 sql_help.c:2178 -#: sql_help.c:2503 sql_help.c:3029 sql_help.c:3038 +#: sql_help.c:1552 sql_help.c:1554 sql_help.c:2161 sql_help.c:2182 +#: sql_help.c:2507 sql_help.c:3033 sql_help.c:3042 msgid "index_method" msgstr "méthode_indexage" -#: sql_help.c:356 sql_help.c:1152 sql_help.c:1569 sql_help.c:2024 -#: sql_help.c:2461 sql_help.c:2632 sql_help.c:3176 sql_help.c:3400 -#: sql_help.c:3733 +#: sql_help.c:356 sql_help.c:1152 sql_help.c:1573 sql_help.c:2028 +#: sql_help.c:2465 sql_help.c:2636 sql_help.c:3180 sql_help.c:3404 +#: sql_help.c:3737 msgid "type_name" msgstr "nom_type" -#: sql_help.c:357 sql_help.c:1570 sql_help.c:2023 sql_help.c:2633 -#: sql_help.c:2859 sql_help.c:3177 sql_help.c:3392 sql_help.c:3725 +#: sql_help.c:357 sql_help.c:1574 sql_help.c:2027 sql_help.c:2637 +#: sql_help.c:2863 sql_help.c:3181 sql_help.c:3396 sql_help.c:3729 msgid "lang_name" msgstr "nom_langage" @@ -3900,16 +3945,16 @@ msgstr "nom_langage" msgid "and aggregate_signature is:" msgstr "et signature_agrégat est :" -#: sql_help.c:383 sql_help.c:1664 sql_help.c:1923 +#: sql_help.c:383 sql_help.c:1668 sql_help.c:1927 msgid "handler_function" msgstr "fonction_gestionnaire" -#: sql_help.c:384 sql_help.c:1924 +#: sql_help.c:384 sql_help.c:1928 msgid "validator_function" msgstr "fonction_validateur" #: sql_help.c:433 sql_help.c:510 sql_help.c:641 sql_help.c:1085 sql_help.c:1296 -#: sql_help.c:2494 sql_help.c:2495 sql_help.c:2511 sql_help.c:2512 +#: sql_help.c:2498 sql_help.c:2499 sql_help.c:2515 sql_help.c:2516 msgid "action" msgstr "action" @@ -3920,18 +3965,18 @@ msgstr "action" #: sql_help.c:1109 sql_help.c:1110 sql_help.c:1114 sql_help.c:1116 #: sql_help.c:1117 sql_help.c:1118 sql_help.c:1120 sql_help.c:1123 #: sql_help.c:1124 sql_help.c:1126 sql_help.c:1129 sql_help.c:1131 -#: sql_help.c:1398 sql_help.c:1401 sql_help.c:1421 sql_help.c:1525 -#: sql_help.c:1630 sql_help.c:1635 sql_help.c:1649 sql_help.c:1650 -#: sql_help.c:1651 sql_help.c:1964 sql_help.c:1977 sql_help.c:2021 -#: sql_help.c:2082 sql_help.c:2117 sql_help.c:2315 sql_help.c:2343 -#: sql_help.c:2344 sql_help.c:2445 sql_help.c:2453 sql_help.c:2462 -#: sql_help.c:2465 sql_help.c:2474 sql_help.c:2478 sql_help.c:2499 -#: sql_help.c:2501 sql_help.c:2508 sql_help.c:2526 sql_help.c:2543 -#: sql_help.c:2668 sql_help.c:2804 sql_help.c:3371 sql_help.c:3372 -#: sql_help.c:3452 sql_help.c:3467 sql_help.c:3469 sql_help.c:3471 -#: sql_help.c:3704 sql_help.c:3705 sql_help.c:3805 sql_help.c:3948 -#: sql_help.c:4187 sql_help.c:4229 sql_help.c:4231 sql_help.c:4233 -#: sql_help.c:4250 sql_help.c:4253 sql_help.c:4381 +#: sql_help.c:1402 sql_help.c:1405 sql_help.c:1425 sql_help.c:1529 +#: sql_help.c:1634 sql_help.c:1639 sql_help.c:1653 sql_help.c:1654 +#: sql_help.c:1655 sql_help.c:1968 sql_help.c:1981 sql_help.c:2025 +#: sql_help.c:2086 sql_help.c:2121 sql_help.c:2319 sql_help.c:2347 +#: sql_help.c:2348 sql_help.c:2449 sql_help.c:2457 sql_help.c:2466 +#: sql_help.c:2469 sql_help.c:2478 sql_help.c:2482 sql_help.c:2503 +#: sql_help.c:2505 sql_help.c:2512 sql_help.c:2530 sql_help.c:2547 +#: sql_help.c:2672 sql_help.c:2808 sql_help.c:3375 sql_help.c:3376 +#: sql_help.c:3456 sql_help.c:3471 sql_help.c:3473 sql_help.c:3475 +#: sql_help.c:3708 sql_help.c:3709 sql_help.c:3809 sql_help.c:3952 +#: sql_help.c:4191 sql_help.c:4233 sql_help.c:4235 sql_help.c:4237 +#: sql_help.c:4254 sql_help.c:4257 sql_help.c:4385 msgid "column_name" msgstr "nom_colonne" @@ -3944,21 +3989,21 @@ msgid "where action is one of:" msgstr "où action fait partie de :" #: sql_help.c:443 sql_help.c:448 sql_help.c:915 sql_help.c:1106 sql_help.c:1111 -#: sql_help.c:1314 sql_help.c:1318 sql_help.c:1876 sql_help.c:1965 -#: sql_help.c:2156 sql_help.c:2308 sql_help.c:2446 sql_help.c:2713 -#: sql_help.c:3554 +#: sql_help.c:1314 sql_help.c:1318 sql_help.c:1880 sql_help.c:1969 +#: sql_help.c:2160 sql_help.c:2312 sql_help.c:2450 sql_help.c:2717 +#: sql_help.c:3558 msgid "data_type" msgstr "type_données" #: sql_help.c:444 sql_help.c:449 sql_help.c:1107 sql_help.c:1112 -#: sql_help.c:1315 sql_help.c:1319 sql_help.c:1877 sql_help.c:1968 -#: sql_help.c:2084 sql_help.c:2447 sql_help.c:2455 sql_help.c:2467 -#: sql_help.c:2480 sql_help.c:2714 sql_help.c:2720 sql_help.c:3462 +#: sql_help.c:1315 sql_help.c:1319 sql_help.c:1881 sql_help.c:1972 +#: sql_help.c:2088 sql_help.c:2451 sql_help.c:2459 sql_help.c:2471 +#: sql_help.c:2484 sql_help.c:2718 sql_help.c:2724 sql_help.c:3466 msgid "collation" msgstr "collationnement" -#: sql_help.c:445 sql_help.c:1108 sql_help.c:1969 sql_help.c:1978 -#: sql_help.c:2448 sql_help.c:2463 sql_help.c:2475 +#: sql_help.c:445 sql_help.c:1108 sql_help.c:1973 sql_help.c:1982 +#: sql_help.c:2452 sql_help.c:2467 sql_help.c:2479 msgid "column_constraint" msgstr "contrainte_colonne" @@ -3971,50 +4016,50 @@ msgstr "entier" msgid "attribute_option" msgstr "option_attribut" -#: sql_help.c:465 sql_help.c:1132 sql_help.c:1970 sql_help.c:1979 -#: sql_help.c:2449 sql_help.c:2464 sql_help.c:2476 +#: sql_help.c:465 sql_help.c:1132 sql_help.c:1974 sql_help.c:1983 +#: sql_help.c:2453 sql_help.c:2468 sql_help.c:2480 msgid "table_constraint" msgstr "contrainte_table" #: sql_help.c:468 sql_help.c:469 sql_help.c:470 sql_help.c:471 sql_help.c:1137 -#: sql_help.c:1138 sql_help.c:1139 sql_help.c:1140 sql_help.c:1571 +#: sql_help.c:1138 sql_help.c:1139 sql_help.c:1140 sql_help.c:1575 msgid "trigger_name" msgstr "nom_trigger" #: sql_help.c:472 sql_help.c:473 sql_help.c:1150 sql_help.c:1151 -#: sql_help.c:1971 sql_help.c:1976 sql_help.c:2452 sql_help.c:2473 +#: sql_help.c:1975 sql_help.c:1980 sql_help.c:2456 sql_help.c:2477 msgid "parent_table" msgstr "table_parent" -#: sql_help.c:530 sql_help.c:580 sql_help.c:643 sql_help.c:1275 sql_help.c:1908 +#: sql_help.c:530 sql_help.c:580 sql_help.c:643 sql_help.c:1275 sql_help.c:1912 msgid "extension_name" msgstr "nom_extension" -#: sql_help.c:532 sql_help.c:2025 +#: sql_help.c:532 sql_help.c:2029 msgid "execution_cost" msgstr "coût_exécution" -#: sql_help.c:533 sql_help.c:2026 +#: sql_help.c:533 sql_help.c:2030 msgid "result_rows" msgstr "lignes_de_résultat" #: sql_help.c:554 sql_help.c:556 sql_help.c:853 sql_help.c:861 sql_help.c:865 #: sql_help.c:868 sql_help.c:871 sql_help.c:1353 sql_help.c:1361 -#: sql_help.c:1364 sql_help.c:1366 sql_help.c:1368 sql_help.c:2286 -#: sql_help.c:2288 sql_help.c:2291 sql_help.c:2292 sql_help.c:3370 -#: sql_help.c:3374 sql_help.c:3377 sql_help.c:3379 sql_help.c:3381 -#: sql_help.c:3383 sql_help.c:3385 sql_help.c:3391 sql_help.c:3393 -#: sql_help.c:3395 sql_help.c:3397 sql_help.c:3399 sql_help.c:3401 +#: sql_help.c:1365 sql_help.c:1368 sql_help.c:1371 sql_help.c:2290 +#: sql_help.c:2292 sql_help.c:2295 sql_help.c:2296 sql_help.c:3374 +#: sql_help.c:3378 sql_help.c:3381 sql_help.c:3383 sql_help.c:3385 +#: sql_help.c:3387 sql_help.c:3389 sql_help.c:3395 sql_help.c:3397 +#: sql_help.c:3399 sql_help.c:3401 sql_help.c:3403 sql_help.c:3405 msgid "role_specification" msgstr "specification_role" -#: sql_help.c:555 sql_help.c:557 sql_help.c:1380 sql_help.c:1851 -#: sql_help.c:2294 sql_help.c:2789 sql_help.c:3210 sql_help.c:4033 +#: sql_help.c:555 sql_help.c:557 sql_help.c:1384 sql_help.c:1855 +#: sql_help.c:2298 sql_help.c:2793 sql_help.c:3214 sql_help.c:4037 msgid "user_name" msgstr "nom_utilisateur" -#: sql_help.c:558 sql_help.c:873 sql_help.c:1369 sql_help.c:2293 -#: sql_help.c:3402 +#: sql_help.c:558 sql_help.c:873 sql_help.c:1373 sql_help.c:2297 +#: sql_help.c:3406 msgid "where role_specification can be:" msgstr "où specification_role peut être :" @@ -4022,116 +4067,117 @@ msgstr "où specification_role peut être :" msgid "group_name" msgstr "nom_groupe" -#: sql_help.c:578 sql_help.c:1856 sql_help.c:2088 sql_help.c:2120 -#: sql_help.c:2459 sql_help.c:2471 sql_help.c:2484 sql_help.c:2524 -#: sql_help.c:2546 sql_help.c:2558 sql_help.c:3398 sql_help.c:3731 +#: sql_help.c:578 sql_help.c:1860 sql_help.c:2092 sql_help.c:2124 +#: sql_help.c:2463 sql_help.c:2475 sql_help.c:2488 sql_help.c:2528 +#: sql_help.c:2550 sql_help.c:2562 sql_help.c:3402 sql_help.c:3735 msgid "tablespace_name" msgstr "nom_tablespace" #: sql_help.c:582 sql_help.c:585 sql_help.c:664 sql_help.c:666 sql_help.c:1147 -#: sql_help.c:1149 sql_help.c:2086 sql_help.c:2118 sql_help.c:2457 -#: sql_help.c:2469 sql_help.c:2482 sql_help.c:2522 sql_help.c:2544 +#: sql_help.c:1149 sql_help.c:2090 sql_help.c:2122 sql_help.c:2461 +#: sql_help.c:2473 sql_help.c:2486 sql_help.c:2526 sql_help.c:2548 msgid "storage_parameter" msgstr "paramètre_stockage" -#: sql_help.c:608 sql_help.c:1542 sql_help.c:3816 +#: sql_help.c:608 sql_help.c:1546 sql_help.c:3820 msgid "large_object_oid" msgstr "oid_large_object" #: sql_help.c:663 sql_help.c:1145 sql_help.c:1154 sql_help.c:1157 -#: sql_help.c:1461 +#: sql_help.c:1465 msgid "index_name" msgstr "nom_index" -#: sql_help.c:695 sql_help.c:2141 +#: sql_help.c:695 sql_help.c:2145 msgid "res_proc" msgstr "res_proc" -#: sql_help.c:696 sql_help.c:2142 +#: sql_help.c:696 sql_help.c:2146 msgid "join_proc" msgstr "join_proc" -#: sql_help.c:748 sql_help.c:760 sql_help.c:2159 +#: sql_help.c:748 sql_help.c:760 sql_help.c:2163 msgid "strategy_number" msgstr "numéro_de_stratégie" #: sql_help.c:750 sql_help.c:751 sql_help.c:754 sql_help.c:755 sql_help.c:761 -#: sql_help.c:762 sql_help.c:764 sql_help.c:765 sql_help.c:2161 sql_help.c:2162 -#: sql_help.c:2165 sql_help.c:2166 +#: sql_help.c:762 sql_help.c:764 sql_help.c:765 sql_help.c:2165 sql_help.c:2166 +#: sql_help.c:2169 sql_help.c:2170 msgid "op_type" msgstr "type_op" -#: sql_help.c:752 sql_help.c:2163 +#: sql_help.c:752 sql_help.c:2167 msgid "sort_family_name" msgstr "nom_famille_tri" -#: sql_help.c:753 sql_help.c:763 sql_help.c:2164 +#: sql_help.c:753 sql_help.c:763 sql_help.c:2168 msgid "support_number" msgstr "numéro_de_support" -#: sql_help.c:757 sql_help.c:1795 sql_help.c:2168 sql_help.c:2635 -#: sql_help.c:2637 +#: sql_help.c:757 sql_help.c:1799 sql_help.c:2172 sql_help.c:2639 +#: sql_help.c:2641 msgid "argument_type" msgstr "type_argument" #: sql_help.c:788 sql_help.c:791 sql_help.c:808 sql_help.c:810 sql_help.c:812 #: sql_help.c:883 sql_help.c:922 sql_help.c:1271 sql_help.c:1274 -#: sql_help.c:1420 sql_help.c:1460 sql_help.c:1527 sql_help.c:1552 -#: sql_help.c:1557 sql_help.c:1572 sql_help.c:1629 sql_help.c:1634 -#: sql_help.c:1963 sql_help.c:1975 sql_help.c:2080 sql_help.c:2116 -#: sql_help.c:2192 sql_help.c:2207 sql_help.c:2263 sql_help.c:2314 -#: sql_help.c:2345 sql_help.c:2444 sql_help.c:2460 sql_help.c:2472 -#: sql_help.c:2542 sql_help.c:2661 sql_help.c:2838 sql_help.c:3055 -#: sql_help.c:3080 sql_help.c:3186 sql_help.c:3368 sql_help.c:3373 -#: sql_help.c:3418 sql_help.c:3450 sql_help.c:3701 sql_help.c:3706 -#: sql_help.c:3804 sql_help.c:3903 sql_help.c:3905 sql_help.c:3954 -#: sql_help.c:3993 sql_help.c:4142 sql_help.c:4144 sql_help.c:4193 -#: sql_help.c:4227 sql_help.c:4249 sql_help.c:4251 sql_help.c:4252 -#: sql_help.c:4336 sql_help.c:4338 sql_help.c:4387 +#: sql_help.c:1424 sql_help.c:1464 sql_help.c:1531 sql_help.c:1556 +#: sql_help.c:1561 sql_help.c:1576 sql_help.c:1633 sql_help.c:1638 +#: sql_help.c:1967 sql_help.c:1979 sql_help.c:2084 sql_help.c:2120 +#: sql_help.c:2196 sql_help.c:2211 sql_help.c:2267 sql_help.c:2318 +#: sql_help.c:2349 sql_help.c:2448 sql_help.c:2464 sql_help.c:2476 +#: sql_help.c:2546 sql_help.c:2665 sql_help.c:2842 sql_help.c:3059 +#: sql_help.c:3084 sql_help.c:3190 sql_help.c:3372 sql_help.c:3377 +#: sql_help.c:3422 sql_help.c:3454 sql_help.c:3705 sql_help.c:3710 +#: sql_help.c:3808 sql_help.c:3907 sql_help.c:3909 sql_help.c:3958 +#: sql_help.c:3997 sql_help.c:4146 sql_help.c:4148 sql_help.c:4197 +#: sql_help.c:4231 sql_help.c:4253 sql_help.c:4255 sql_help.c:4256 +#: sql_help.c:4340 sql_help.c:4342 sql_help.c:4391 msgid "table_name" msgstr "nom_table" -#: sql_help.c:793 sql_help.c:2194 +#: sql_help.c:793 sql_help.c:2198 msgid "using_expression" msgstr "expression_using" -#: sql_help.c:794 sql_help.c:2195 +#: sql_help.c:794 sql_help.c:2199 msgid "check_expression" msgstr "expression_check" -#: sql_help.c:814 sql_help.c:2208 +#: sql_help.c:814 sql_help.c:2212 msgid "publication_parameter" msgstr "paramètre_publication" -#: sql_help.c:857 sql_help.c:1357 sql_help.c:2060 sql_help.c:2240 -#: sql_help.c:2772 +#: sql_help.c:857 sql_help.c:1357 sql_help.c:2064 sql_help.c:2244 +#: sql_help.c:2776 msgid "password" msgstr "mot_de_passe" -#: sql_help.c:858 sql_help.c:1358 sql_help.c:2061 sql_help.c:2241 -#: sql_help.c:2773 +#: sql_help.c:858 sql_help.c:1358 sql_help.c:2065 sql_help.c:2245 +#: sql_help.c:2777 msgid "timestamp" msgstr "horodatage" -#: sql_help.c:862 sql_help.c:866 sql_help.c:869 sql_help.c:872 sql_help.c:3378 -#: sql_help.c:3711 +#: sql_help.c:862 sql_help.c:866 sql_help.c:869 sql_help.c:872 sql_help.c:1362 +#: sql_help.c:1366 sql_help.c:1369 sql_help.c:1372 sql_help.c:3382 +#: sql_help.c:3715 msgid "database_name" msgstr "nom_base_de_donnée" -#: sql_help.c:916 sql_help.c:2309 +#: sql_help.c:916 sql_help.c:2313 msgid "increment" msgstr "incrément" -#: sql_help.c:917 sql_help.c:2310 +#: sql_help.c:917 sql_help.c:2314 msgid "minvalue" msgstr "valeur_min" -#: sql_help.c:918 sql_help.c:2311 +#: sql_help.c:918 sql_help.c:2315 msgid "maxvalue" msgstr "valeur_max" -#: sql_help.c:919 sql_help.c:2312 sql_help.c:3901 sql_help.c:3991 -#: sql_help.c:4140 sql_help.c:4269 sql_help.c:4334 +#: sql_help.c:919 sql_help.c:2316 sql_help.c:3905 sql_help.c:3995 +#: sql_help.c:4144 sql_help.c:4273 sql_help.c:4338 msgid "start" msgstr "début" @@ -4139,15 +4185,15 @@ msgstr "début" msgid "restart" msgstr "nouveau_début" -#: sql_help.c:921 sql_help.c:2313 +#: sql_help.c:921 sql_help.c:2317 msgid "cache" msgstr "cache" -#: sql_help.c:978 sql_help.c:2357 +#: sql_help.c:978 sql_help.c:2361 msgid "conninfo" msgstr "conninfo" -#: sql_help.c:980 sql_help.c:2358 +#: sql_help.c:980 sql_help.c:2362 msgid "publication_name" msgstr "nom_publication" @@ -4159,7 +4205,7 @@ msgstr "option_ensemble_publication" msgid "refresh_option" msgstr "option_rafraichissement" -#: sql_help.c:989 sql_help.c:2359 +#: sql_help.c:989 sql_help.c:2363 msgid "subscription_parameter" msgstr "paramètre_souscription" @@ -4167,11 +4213,11 @@ msgstr "paramètre_souscription" msgid "partition_name" msgstr "nom_partition" -#: sql_help.c:1101 sql_help.c:1980 sql_help.c:2477 +#: sql_help.c:1101 sql_help.c:1984 sql_help.c:2481 msgid "partition_bound_spec" msgstr "partition_bound_spec" -#: sql_help.c:1119 sql_help.c:2489 +#: sql_help.c:1119 sql_help.c:2493 msgid "sequence_options" msgstr "options_séquence" @@ -4191,7 +4237,7 @@ msgstr "nom_règle_réécriture" msgid "and table_constraint_using_index is:" msgstr "et contrainte_table_utilisant_index est :" -#: sql_help.c:1173 sql_help.c:1176 sql_help.c:2561 +#: sql_help.c:1173 sql_help.c:1176 sql_help.c:2565 msgid "tablespace_option" msgstr "option_tablespace" @@ -4212,7 +4258,7 @@ msgid "new_dictionary" msgstr "nouveau_dictionnaire" #: sql_help.c:1300 sql_help.c:1313 sql_help.c:1316 sql_help.c:1317 -#: sql_help.c:2712 +#: sql_help.c:2716 msgid "attribute_name" msgstr "nom_attribut" @@ -4232,1504 +4278,1504 @@ msgstr "valeur_enum_voisine" msgid "existing_enum_value" msgstr "valeur_enum_existante" -#: sql_help.c:1381 sql_help.c:1972 sql_help.c:1981 sql_help.c:2325 -#: sql_help.c:2790 sql_help.c:3211 sql_help.c:3384 sql_help.c:3419 -#: sql_help.c:3717 +#: sql_help.c:1385 sql_help.c:1976 sql_help.c:1985 sql_help.c:2329 +#: sql_help.c:2794 sql_help.c:3215 sql_help.c:3388 sql_help.c:3423 +#: sql_help.c:3721 msgid "server_name" msgstr "nom_serveur" -#: sql_help.c:1409 sql_help.c:1412 sql_help.c:2805 +#: sql_help.c:1413 sql_help.c:1416 sql_help.c:2809 msgid "view_option_name" msgstr "nom_option_vue" -#: sql_help.c:1410 sql_help.c:2806 +#: sql_help.c:1414 sql_help.c:2810 msgid "view_option_value" msgstr "valeur_option_vue" -#: sql_help.c:1435 sql_help.c:4049 sql_help.c:4051 sql_help.c:4075 +#: sql_help.c:1439 sql_help.c:4053 sql_help.c:4055 sql_help.c:4079 msgid "transaction_mode" msgstr "mode_transaction" -#: sql_help.c:1436 sql_help.c:4052 sql_help.c:4076 +#: sql_help.c:1440 sql_help.c:4056 sql_help.c:4080 msgid "where transaction_mode is one of:" msgstr "où mode_transaction fait partie de :" -#: sql_help.c:1524 +#: sql_help.c:1528 msgid "relation_name" msgstr "nom_relation" -#: sql_help.c:1529 sql_help.c:3380 sql_help.c:3713 +#: sql_help.c:1533 sql_help.c:3384 sql_help.c:3717 msgid "domain_name" msgstr "nom_domaine" -#: sql_help.c:1551 +#: sql_help.c:1555 msgid "policy_name" msgstr "nom_politique" -#: sql_help.c:1556 +#: sql_help.c:1560 msgid "rule_name" msgstr "nom_règle" -#: sql_help.c:1575 +#: sql_help.c:1579 msgid "text" msgstr "texte" -#: sql_help.c:1600 sql_help.c:3563 sql_help.c:3751 +#: sql_help.c:1604 sql_help.c:3567 sql_help.c:3755 msgid "transaction_id" msgstr "id_transaction" -#: sql_help.c:1631 sql_help.c:1637 sql_help.c:3489 +#: sql_help.c:1635 sql_help.c:1641 sql_help.c:3493 msgid "filename" msgstr "nom_fichier" -#: sql_help.c:1632 sql_help.c:1638 sql_help.c:2265 sql_help.c:2266 -#: sql_help.c:2267 +#: sql_help.c:1636 sql_help.c:1642 sql_help.c:2269 sql_help.c:2270 +#: sql_help.c:2271 msgid "command" msgstr "commande" -#: sql_help.c:1636 sql_help.c:2121 sql_help.c:2547 sql_help.c:2807 -#: sql_help.c:2825 sql_help.c:3454 +#: sql_help.c:1640 sql_help.c:2125 sql_help.c:2551 sql_help.c:2811 +#: sql_help.c:2829 sql_help.c:3458 msgid "query" msgstr "requête" -#: sql_help.c:1640 sql_help.c:3257 +#: sql_help.c:1644 sql_help.c:3261 msgid "where option can be one of:" msgstr "où option fait partie de :" -#: sql_help.c:1641 +#: sql_help.c:1645 msgid "format_name" msgstr "nom_format" -#: sql_help.c:1642 sql_help.c:1643 sql_help.c:1646 sql_help.c:3258 -#: sql_help.c:3259 sql_help.c:3260 sql_help.c:3261 sql_help.c:3262 -#: sql_help.c:3263 +#: sql_help.c:1646 sql_help.c:1647 sql_help.c:1650 sql_help.c:3262 +#: sql_help.c:3263 sql_help.c:3264 sql_help.c:3265 sql_help.c:3266 +#: sql_help.c:3267 msgid "boolean" msgstr "boolean" -#: sql_help.c:1644 +#: sql_help.c:1648 msgid "delimiter_character" msgstr "caractère_délimiteur" -#: sql_help.c:1645 +#: sql_help.c:1649 msgid "null_string" msgstr "chaîne_null" -#: sql_help.c:1647 +#: sql_help.c:1651 msgid "quote_character" msgstr "caractère_guillemet" -#: sql_help.c:1648 +#: sql_help.c:1652 msgid "escape_character" msgstr "chaîne_d_échappement" -#: sql_help.c:1652 +#: sql_help.c:1656 msgid "encoding_name" msgstr "nom_encodage" -#: sql_help.c:1663 +#: sql_help.c:1667 msgid "access_method_type" msgstr "access_method_type" -#: sql_help.c:1729 sql_help.c:1748 sql_help.c:1751 +#: sql_help.c:1733 sql_help.c:1752 sql_help.c:1755 msgid "arg_data_type" msgstr "type_données_arg" -#: sql_help.c:1730 sql_help.c:1752 sql_help.c:1760 +#: sql_help.c:1734 sql_help.c:1756 sql_help.c:1764 msgid "sfunc" msgstr "sfunc" -#: sql_help.c:1731 sql_help.c:1753 sql_help.c:1761 +#: sql_help.c:1735 sql_help.c:1757 sql_help.c:1765 msgid "state_data_type" msgstr "type_de_données_statut" -#: sql_help.c:1732 sql_help.c:1754 sql_help.c:1762 +#: sql_help.c:1736 sql_help.c:1758 sql_help.c:1766 msgid "state_data_size" msgstr "taille_de_données_statut" -#: sql_help.c:1733 sql_help.c:1755 sql_help.c:1763 +#: sql_help.c:1737 sql_help.c:1759 sql_help.c:1767 msgid "ffunc" msgstr "ffunc" -#: sql_help.c:1734 sql_help.c:1764 +#: sql_help.c:1738 sql_help.c:1768 msgid "combinefunc" msgstr "combinefunc" -#: sql_help.c:1735 sql_help.c:1765 +#: sql_help.c:1739 sql_help.c:1769 msgid "serialfunc" msgstr "serialfunc" -#: sql_help.c:1736 sql_help.c:1766 +#: sql_help.c:1740 sql_help.c:1770 msgid "deserialfunc" msgstr "deserialfunc" -#: sql_help.c:1737 sql_help.c:1756 sql_help.c:1767 +#: sql_help.c:1741 sql_help.c:1760 sql_help.c:1771 msgid "initial_condition" msgstr "condition_initiale" -#: sql_help.c:1738 sql_help.c:1768 +#: sql_help.c:1742 sql_help.c:1772 msgid "msfunc" msgstr "msfunc" -#: sql_help.c:1739 sql_help.c:1769 +#: sql_help.c:1743 sql_help.c:1773 msgid "minvfunc" msgstr "minvfunc" -#: sql_help.c:1740 sql_help.c:1770 +#: sql_help.c:1744 sql_help.c:1774 msgid "mstate_data_type" msgstr "m_type_de_données_statut" -#: sql_help.c:1741 sql_help.c:1771 +#: sql_help.c:1745 sql_help.c:1775 msgid "mstate_data_size" msgstr "m_taille_de_données_statut" -#: sql_help.c:1742 sql_help.c:1772 +#: sql_help.c:1746 sql_help.c:1776 msgid "mffunc" msgstr "mffunc" -#: sql_help.c:1743 sql_help.c:1773 +#: sql_help.c:1747 sql_help.c:1777 msgid "minitial_condition" msgstr "m_condition_initiale" -#: sql_help.c:1744 sql_help.c:1774 +#: sql_help.c:1748 sql_help.c:1778 msgid "sort_operator" msgstr "opérateur_de_tri" -#: sql_help.c:1757 +#: sql_help.c:1761 msgid "or the old syntax" msgstr "ou l'ancienne syntaxe" -#: sql_help.c:1759 +#: sql_help.c:1763 msgid "base_type" msgstr "type_base" -#: sql_help.c:1815 +#: sql_help.c:1819 msgid "locale" msgstr "locale" -#: sql_help.c:1816 sql_help.c:1854 +#: sql_help.c:1820 sql_help.c:1858 msgid "lc_collate" msgstr "lc_collate" -#: sql_help.c:1817 sql_help.c:1855 +#: sql_help.c:1821 sql_help.c:1859 msgid "lc_ctype" msgstr "lc_ctype" -#: sql_help.c:1818 sql_help.c:3802 +#: sql_help.c:1822 sql_help.c:3806 msgid "provider" msgstr "fournisseur" -#: sql_help.c:1819 sql_help.c:1910 +#: sql_help.c:1823 sql_help.c:1914 msgid "version" msgstr "version" -#: sql_help.c:1821 +#: sql_help.c:1825 msgid "existing_collation" msgstr "collationnement_existant" -#: sql_help.c:1831 +#: sql_help.c:1835 msgid "source_encoding" msgstr "encodage_source" -#: sql_help.c:1832 +#: sql_help.c:1836 msgid "dest_encoding" msgstr "encodage_destination" -#: sql_help.c:1852 sql_help.c:2587 +#: sql_help.c:1856 sql_help.c:2591 msgid "template" msgstr "modèle" -#: sql_help.c:1853 +#: sql_help.c:1857 msgid "encoding" msgstr "encodage" -#: sql_help.c:1879 +#: sql_help.c:1883 msgid "constraint" msgstr "contrainte" -#: sql_help.c:1880 +#: sql_help.c:1884 msgid "where constraint is:" msgstr "où la contrainte est :" -#: sql_help.c:1894 sql_help.c:2262 sql_help.c:2660 +#: sql_help.c:1898 sql_help.c:2266 sql_help.c:2664 msgid "event" msgstr "événement" -#: sql_help.c:1895 +#: sql_help.c:1899 msgid "filter_variable" msgstr "filter_variable" -#: sql_help.c:1911 +#: sql_help.c:1915 msgid "old_version" msgstr "ancienne_version" -#: sql_help.c:1984 sql_help.c:2485 +#: sql_help.c:1988 sql_help.c:2489 msgid "where column_constraint is:" msgstr "où contrainte_colonne est :" -#: sql_help.c:1987 sql_help.c:2019 sql_help.c:2488 +#: sql_help.c:1991 sql_help.c:2023 sql_help.c:2492 msgid "default_expr" msgstr "expression_par_défaut" -#: sql_help.c:1988 sql_help.c:2496 +#: sql_help.c:1992 sql_help.c:2500 msgid "and table_constraint is:" msgstr "et contrainte_table est :" -#: sql_help.c:2020 +#: sql_help.c:2024 msgid "rettype" msgstr "type_en_retour" -#: sql_help.c:2022 +#: sql_help.c:2026 msgid "column_type" msgstr "type_colonne" -#: sql_help.c:2030 +#: sql_help.c:2034 msgid "definition" msgstr "définition" -#: sql_help.c:2031 +#: sql_help.c:2035 msgid "obj_file" msgstr "fichier_objet" -#: sql_help.c:2032 +#: sql_help.c:2036 msgid "link_symbol" msgstr "symbole_link" -#: sql_help.c:2033 +#: sql_help.c:2037 msgid "attribute" msgstr "attribut" -#: sql_help.c:2067 sql_help.c:2247 sql_help.c:2779 +#: sql_help.c:2071 sql_help.c:2251 sql_help.c:2783 msgid "uid" msgstr "uid" -#: sql_help.c:2081 +#: sql_help.c:2085 msgid "method" msgstr "méthode" -#: sql_help.c:2085 sql_help.c:2456 sql_help.c:2468 sql_help.c:2481 -#: sql_help.c:2528 sql_help.c:3463 +#: sql_help.c:2089 sql_help.c:2460 sql_help.c:2472 sql_help.c:2485 +#: sql_help.c:2532 sql_help.c:3467 msgid "opclass" msgstr "classe_d_opérateur" -#: sql_help.c:2089 sql_help.c:2507 +#: sql_help.c:2093 sql_help.c:2511 msgid "predicate" msgstr "prédicat" -#: sql_help.c:2101 +#: sql_help.c:2105 msgid "call_handler" msgstr "gestionnaire_d_appel" -#: sql_help.c:2102 +#: sql_help.c:2106 msgid "inline_handler" msgstr "gestionnaire_en_ligne" -#: sql_help.c:2103 +#: sql_help.c:2107 msgid "valfunction" msgstr "fonction_val" -#: sql_help.c:2139 +#: sql_help.c:2143 msgid "com_op" msgstr "com_op" -#: sql_help.c:2140 +#: sql_help.c:2144 msgid "neg_op" msgstr "neg_op" -#: sql_help.c:2158 +#: sql_help.c:2162 msgid "family_name" msgstr "nom_famille" -#: sql_help.c:2169 +#: sql_help.c:2173 msgid "storage_type" msgstr "type_stockage" -#: sql_help.c:2264 sql_help.c:2664 sql_help.c:2841 sql_help.c:3473 -#: sql_help.c:3892 sql_help.c:3894 sql_help.c:3982 sql_help.c:3984 -#: sql_help.c:4131 sql_help.c:4133 sql_help.c:4236 sql_help.c:4325 -#: sql_help.c:4327 +#: sql_help.c:2268 sql_help.c:2668 sql_help.c:2845 sql_help.c:3477 +#: sql_help.c:3896 sql_help.c:3898 sql_help.c:3986 sql_help.c:3988 +#: sql_help.c:4135 sql_help.c:4137 sql_help.c:4240 sql_help.c:4329 +#: sql_help.c:4331 msgid "condition" msgstr "condition" -#: sql_help.c:2268 sql_help.c:2667 +#: sql_help.c:2272 sql_help.c:2671 msgid "where event can be one of:" msgstr "où événement fait partie de :" -#: sql_help.c:2287 sql_help.c:2289 +#: sql_help.c:2291 sql_help.c:2293 msgid "schema_element" msgstr "élément_schéma" -#: sql_help.c:2326 +#: sql_help.c:2330 msgid "server_type" msgstr "type_serveur" -#: sql_help.c:2327 +#: sql_help.c:2331 msgid "server_version" msgstr "version_serveur" -#: sql_help.c:2328 sql_help.c:3382 sql_help.c:3715 +#: sql_help.c:2332 sql_help.c:3386 sql_help.c:3719 msgid "fdw_name" msgstr "nom_fdw" -#: sql_help.c:2341 +#: sql_help.c:2345 msgid "statistics_name" msgstr "nom_statistique" -#: sql_help.c:2342 -msgid "statistic_type" -msgstr "type_statistique" +#: sql_help.c:2346 +msgid "statistics_kind" +msgstr "statistics_kind" -#: sql_help.c:2356 +#: sql_help.c:2360 msgid "subscription_name" msgstr "nom_souscription" -#: sql_help.c:2450 +#: sql_help.c:2454 msgid "source_table" msgstr "table_source" -#: sql_help.c:2451 +#: sql_help.c:2455 msgid "like_option" msgstr "option_like" -#: sql_help.c:2490 sql_help.c:2491 sql_help.c:2500 sql_help.c:2502 -#: sql_help.c:2506 +#: sql_help.c:2494 sql_help.c:2495 sql_help.c:2504 sql_help.c:2506 +#: sql_help.c:2510 msgid "index_parameters" msgstr "paramètres_index" -#: sql_help.c:2492 sql_help.c:2509 +#: sql_help.c:2496 sql_help.c:2513 msgid "reftable" msgstr "table_référence" -#: sql_help.c:2493 sql_help.c:2510 +#: sql_help.c:2497 sql_help.c:2514 msgid "refcolumn" msgstr "colonne_référence" -#: sql_help.c:2504 +#: sql_help.c:2508 msgid "exclude_element" msgstr "élément_exclusion" -#: sql_help.c:2505 sql_help.c:3899 sql_help.c:3989 sql_help.c:4138 -#: sql_help.c:4267 sql_help.c:4332 +#: sql_help.c:2509 sql_help.c:3903 sql_help.c:3993 sql_help.c:4142 +#: sql_help.c:4271 sql_help.c:4336 msgid "operator" msgstr "opérateur" -#: sql_help.c:2513 +#: sql_help.c:2517 msgid "and like_option is:" msgstr "et option_like est :" -#: sql_help.c:2514 +#: sql_help.c:2518 msgid "and partition_bound_spec is:" msgstr "et partition_bound_spec est :" -#: sql_help.c:2515 sql_help.c:2517 sql_help.c:2519 +#: sql_help.c:2519 sql_help.c:2521 sql_help.c:2523 msgid "numeric_literal" msgstr "numeric_literal" -#: sql_help.c:2516 sql_help.c:2518 sql_help.c:2520 +#: sql_help.c:2520 sql_help.c:2522 sql_help.c:2524 msgid "string_literal" msgstr "littéral_chaîne" -#: sql_help.c:2521 +#: sql_help.c:2525 msgid "index_parameters in UNIQUE, PRIMARY KEY, and EXCLUDE constraints are:" msgstr "dans les contraintes UNIQUE, PRIMARY KEY et EXCLUDE, les paramètres_index sont :" -#: sql_help.c:2525 +#: sql_help.c:2529 msgid "exclude_element in an EXCLUDE constraint is:" msgstr "élément_exclusion dans une contrainte EXCLUDE est :" -#: sql_help.c:2560 +#: sql_help.c:2564 msgid "directory" msgstr "répertoire" -#: sql_help.c:2574 +#: sql_help.c:2578 msgid "parser_name" msgstr "nom_analyseur" -#: sql_help.c:2575 +#: sql_help.c:2579 msgid "source_config" msgstr "configuration_source" -#: sql_help.c:2604 +#: sql_help.c:2608 msgid "start_function" msgstr "fonction_start" -#: sql_help.c:2605 +#: sql_help.c:2609 msgid "gettoken_function" msgstr "fonction_gettoken" -#: sql_help.c:2606 +#: sql_help.c:2610 msgid "end_function" msgstr "fonction_end" -#: sql_help.c:2607 +#: sql_help.c:2611 msgid "lextypes_function" msgstr "fonction_lextypes" -#: sql_help.c:2608 +#: sql_help.c:2612 msgid "headline_function" msgstr "fonction_headline" -#: sql_help.c:2620 +#: sql_help.c:2624 msgid "init_function" msgstr "fonction_init" -#: sql_help.c:2621 +#: sql_help.c:2625 msgid "lexize_function" msgstr "fonction_lexize" -#: sql_help.c:2634 +#: sql_help.c:2638 msgid "from_sql_function_name" msgstr "nom_fonction_from_sql" -#: sql_help.c:2636 +#: sql_help.c:2640 msgid "to_sql_function_name" msgstr "nom_fonction_to_sql" -#: sql_help.c:2662 +#: sql_help.c:2666 msgid "referenced_table_name" msgstr "nom_table_référencée" -#: sql_help.c:2663 +#: sql_help.c:2667 msgid "transition_relation_name" msgstr "nom_relation_transition" -#: sql_help.c:2666 +#: sql_help.c:2670 msgid "arguments" msgstr "arguments" -#: sql_help.c:2716 sql_help.c:3827 +#: sql_help.c:2720 sql_help.c:3831 msgid "label" msgstr "label" -#: sql_help.c:2718 +#: sql_help.c:2722 msgid "subtype" msgstr "sous_type" -#: sql_help.c:2719 +#: sql_help.c:2723 msgid "subtype_operator_class" msgstr "classe_opérateur_sous_type" -#: sql_help.c:2721 +#: sql_help.c:2725 msgid "canonical_function" msgstr "fonction_canonique" -#: sql_help.c:2722 +#: sql_help.c:2726 msgid "subtype_diff_function" msgstr "fonction_diff_sous_type" -#: sql_help.c:2724 +#: sql_help.c:2728 msgid "input_function" msgstr "fonction_en_sortie" -#: sql_help.c:2725 +#: sql_help.c:2729 msgid "output_function" msgstr "fonction_en_sortie" -#: sql_help.c:2726 +#: sql_help.c:2730 msgid "receive_function" msgstr "fonction_receive" -#: sql_help.c:2727 +#: sql_help.c:2731 msgid "send_function" msgstr "fonction_send" -#: sql_help.c:2728 +#: sql_help.c:2732 msgid "type_modifier_input_function" msgstr "fonction_en_entrée_modificateur_type" -#: sql_help.c:2729 +#: sql_help.c:2733 msgid "type_modifier_output_function" msgstr "fonction_en_sortie_modificateur_type" -#: sql_help.c:2730 +#: sql_help.c:2734 msgid "analyze_function" msgstr "fonction_analyze" -#: sql_help.c:2731 +#: sql_help.c:2735 msgid "internallength" msgstr "longueur_interne" -#: sql_help.c:2732 +#: sql_help.c:2736 msgid "alignment" msgstr "alignement" -#: sql_help.c:2733 +#: sql_help.c:2737 msgid "storage" msgstr "stockage" -#: sql_help.c:2734 +#: sql_help.c:2738 msgid "like_type" msgstr "type_like" -#: sql_help.c:2735 +#: sql_help.c:2739 msgid "category" msgstr "catégorie" -#: sql_help.c:2736 +#: sql_help.c:2740 msgid "preferred" msgstr "préféré" -#: sql_help.c:2737 +#: sql_help.c:2741 msgid "default" msgstr "par défaut" -#: sql_help.c:2738 +#: sql_help.c:2742 msgid "element" msgstr "élément" -#: sql_help.c:2739 +#: sql_help.c:2743 msgid "delimiter" msgstr "délimiteur" -#: sql_help.c:2740 +#: sql_help.c:2744 msgid "collatable" msgstr "collationnable" -#: sql_help.c:2837 sql_help.c:3449 sql_help.c:3887 sql_help.c:3976 -#: sql_help.c:4126 sql_help.c:4226 sql_help.c:4320 +#: sql_help.c:2841 sql_help.c:3453 sql_help.c:3891 sql_help.c:3980 +#: sql_help.c:4130 sql_help.c:4230 sql_help.c:4324 msgid "with_query" msgstr "requête_with" -#: sql_help.c:2839 sql_help.c:3451 sql_help.c:3906 sql_help.c:3912 -#: sql_help.c:3915 sql_help.c:3919 sql_help.c:3923 sql_help.c:3931 -#: sql_help.c:4145 sql_help.c:4151 sql_help.c:4154 sql_help.c:4158 -#: sql_help.c:4162 sql_help.c:4170 sql_help.c:4228 sql_help.c:4339 -#: sql_help.c:4345 sql_help.c:4348 sql_help.c:4352 sql_help.c:4356 -#: sql_help.c:4364 +#: sql_help.c:2843 sql_help.c:3455 sql_help.c:3910 sql_help.c:3916 +#: sql_help.c:3919 sql_help.c:3923 sql_help.c:3927 sql_help.c:3935 +#: sql_help.c:4149 sql_help.c:4155 sql_help.c:4158 sql_help.c:4162 +#: sql_help.c:4166 sql_help.c:4174 sql_help.c:4232 sql_help.c:4343 +#: sql_help.c:4349 sql_help.c:4352 sql_help.c:4356 sql_help.c:4360 +#: sql_help.c:4368 msgid "alias" msgstr "alias" -#: sql_help.c:2840 +#: sql_help.c:2844 msgid "using_list" msgstr "liste_using" -#: sql_help.c:2842 sql_help.c:3289 sql_help.c:3530 sql_help.c:4237 +#: sql_help.c:2846 sql_help.c:3293 sql_help.c:3534 sql_help.c:4241 msgid "cursor_name" msgstr "nom_curseur" -#: sql_help.c:2843 sql_help.c:3457 sql_help.c:4238 +#: sql_help.c:2847 sql_help.c:3461 sql_help.c:4242 msgid "output_expression" msgstr "expression_en_sortie" -#: sql_help.c:2844 sql_help.c:3458 sql_help.c:3890 sql_help.c:3979 -#: sql_help.c:4129 sql_help.c:4239 sql_help.c:4323 +#: sql_help.c:2848 sql_help.c:3462 sql_help.c:3894 sql_help.c:3983 +#: sql_help.c:4133 sql_help.c:4243 sql_help.c:4327 msgid "output_name" msgstr "nom_en_sortie" -#: sql_help.c:2860 +#: sql_help.c:2864 msgid "code" msgstr "code" -#: sql_help.c:3235 +#: sql_help.c:3239 msgid "parameter" msgstr "paramètre" -#: sql_help.c:3255 sql_help.c:3256 sql_help.c:3555 +#: sql_help.c:3259 sql_help.c:3260 sql_help.c:3559 msgid "statement" msgstr "instruction" -#: sql_help.c:3288 sql_help.c:3529 +#: sql_help.c:3292 sql_help.c:3533 msgid "direction" msgstr "direction" -#: sql_help.c:3290 sql_help.c:3531 +#: sql_help.c:3294 sql_help.c:3535 msgid "where direction can be empty or one of:" msgstr "où direction peut être vide ou faire partie de :" -#: sql_help.c:3291 sql_help.c:3292 sql_help.c:3293 sql_help.c:3294 -#: sql_help.c:3295 sql_help.c:3532 sql_help.c:3533 sql_help.c:3534 -#: sql_help.c:3535 sql_help.c:3536 sql_help.c:3900 sql_help.c:3902 -#: sql_help.c:3990 sql_help.c:3992 sql_help.c:4139 sql_help.c:4141 -#: sql_help.c:4268 sql_help.c:4270 sql_help.c:4333 sql_help.c:4335 +#: sql_help.c:3295 sql_help.c:3296 sql_help.c:3297 sql_help.c:3298 +#: sql_help.c:3299 sql_help.c:3536 sql_help.c:3537 sql_help.c:3538 +#: sql_help.c:3539 sql_help.c:3540 sql_help.c:3904 sql_help.c:3906 +#: sql_help.c:3994 sql_help.c:3996 sql_help.c:4143 sql_help.c:4145 +#: sql_help.c:4272 sql_help.c:4274 sql_help.c:4337 sql_help.c:4339 msgid "count" msgstr "nombre" -#: sql_help.c:3375 sql_help.c:3708 +#: sql_help.c:3379 sql_help.c:3712 msgid "sequence_name" msgstr "nom_séquence" -#: sql_help.c:3388 sql_help.c:3721 +#: sql_help.c:3392 sql_help.c:3725 msgid "arg_name" msgstr "nom_argument" -#: sql_help.c:3389 sql_help.c:3722 +#: sql_help.c:3393 sql_help.c:3726 msgid "arg_type" msgstr "type_arg" -#: sql_help.c:3394 sql_help.c:3727 +#: sql_help.c:3398 sql_help.c:3731 msgid "loid" msgstr "loid" -#: sql_help.c:3417 +#: sql_help.c:3421 msgid "remote_schema" msgstr "schema_distant" -#: sql_help.c:3420 +#: sql_help.c:3424 msgid "local_schema" msgstr "schéma_local" -#: sql_help.c:3455 +#: sql_help.c:3459 msgid "conflict_target" msgstr "cible_conflit" -#: sql_help.c:3456 +#: sql_help.c:3460 msgid "conflict_action" msgstr "action_conflit" -#: sql_help.c:3459 +#: sql_help.c:3463 msgid "where conflict_target can be one of:" msgstr "où cible_conflit fait partie de :" -#: sql_help.c:3460 +#: sql_help.c:3464 msgid "index_column_name" msgstr "index_nom_colonne" -#: sql_help.c:3461 +#: sql_help.c:3465 msgid "index_expression" msgstr "index_expression" -#: sql_help.c:3464 +#: sql_help.c:3468 msgid "index_predicate" msgstr "index_prédicat" -#: sql_help.c:3466 +#: sql_help.c:3470 msgid "and conflict_action is one of:" msgstr "où action_conflit fait partie de :" -#: sql_help.c:3472 sql_help.c:4234 +#: sql_help.c:3476 sql_help.c:4238 msgid "sub-SELECT" msgstr "sous-SELECT" -#: sql_help.c:3481 sql_help.c:3544 sql_help.c:4210 +#: sql_help.c:3485 sql_help.c:3548 sql_help.c:4214 msgid "channel" msgstr "canal" -#: sql_help.c:3503 +#: sql_help.c:3507 msgid "lockmode" msgstr "mode_de_verrou" -#: sql_help.c:3504 +#: sql_help.c:3508 msgid "where lockmode is one of:" msgstr "où mode_de_verrou fait partie de :" -#: sql_help.c:3545 +#: sql_help.c:3549 msgid "payload" msgstr "contenu" -#: sql_help.c:3572 +#: sql_help.c:3576 msgid "old_role" msgstr "ancien_rôle" -#: sql_help.c:3573 +#: sql_help.c:3577 msgid "new_role" msgstr "nouveau_rôle" -#: sql_help.c:3598 sql_help.c:3759 sql_help.c:3767 +#: sql_help.c:3602 sql_help.c:3763 sql_help.c:3771 msgid "savepoint_name" msgstr "nom_savepoint" -#: sql_help.c:3891 sql_help.c:3933 sql_help.c:3935 sql_help.c:3981 -#: sql_help.c:4130 sql_help.c:4172 sql_help.c:4174 sql_help.c:4324 -#: sql_help.c:4366 sql_help.c:4368 +#: sql_help.c:3895 sql_help.c:3937 sql_help.c:3939 sql_help.c:3985 +#: sql_help.c:4134 sql_help.c:4176 sql_help.c:4178 sql_help.c:4328 +#: sql_help.c:4370 sql_help.c:4372 msgid "from_item" msgstr "élément_from" -#: sql_help.c:3893 sql_help.c:3945 sql_help.c:4132 sql_help.c:4184 -#: sql_help.c:4326 sql_help.c:4378 +#: sql_help.c:3897 sql_help.c:3949 sql_help.c:4136 sql_help.c:4188 +#: sql_help.c:4330 sql_help.c:4382 msgid "grouping_element" msgstr "element_regroupement" -#: sql_help.c:3895 sql_help.c:3985 sql_help.c:4134 sql_help.c:4328 +#: sql_help.c:3899 sql_help.c:3989 sql_help.c:4138 sql_help.c:4332 msgid "window_name" msgstr "nom_window" -#: sql_help.c:3896 sql_help.c:3986 sql_help.c:4135 sql_help.c:4329 +#: sql_help.c:3900 sql_help.c:3990 sql_help.c:4139 sql_help.c:4333 msgid "window_definition" msgstr "définition_window" -#: sql_help.c:3897 sql_help.c:3911 sql_help.c:3949 sql_help.c:3987 -#: sql_help.c:4136 sql_help.c:4150 sql_help.c:4188 sql_help.c:4330 -#: sql_help.c:4344 sql_help.c:4382 +#: sql_help.c:3901 sql_help.c:3915 sql_help.c:3953 sql_help.c:3991 +#: sql_help.c:4140 sql_help.c:4154 sql_help.c:4192 sql_help.c:4334 +#: sql_help.c:4348 sql_help.c:4386 msgid "select" msgstr "sélection" -#: sql_help.c:3904 sql_help.c:4143 sql_help.c:4337 +#: sql_help.c:3908 sql_help.c:4147 sql_help.c:4341 msgid "where from_item can be one of:" msgstr "où élément_from fait partie de :" -#: sql_help.c:3907 sql_help.c:3913 sql_help.c:3916 sql_help.c:3920 -#: sql_help.c:3932 sql_help.c:4146 sql_help.c:4152 sql_help.c:4155 -#: sql_help.c:4159 sql_help.c:4171 sql_help.c:4340 sql_help.c:4346 -#: sql_help.c:4349 sql_help.c:4353 sql_help.c:4365 +#: sql_help.c:3911 sql_help.c:3917 sql_help.c:3920 sql_help.c:3924 +#: sql_help.c:3936 sql_help.c:4150 sql_help.c:4156 sql_help.c:4159 +#: sql_help.c:4163 sql_help.c:4175 sql_help.c:4344 sql_help.c:4350 +#: sql_help.c:4353 sql_help.c:4357 sql_help.c:4369 msgid "column_alias" msgstr "alias_colonne" -#: sql_help.c:3908 sql_help.c:4147 sql_help.c:4341 +#: sql_help.c:3912 sql_help.c:4151 sql_help.c:4345 msgid "sampling_method" msgstr "méthode_echantillonnage" -#: sql_help.c:3909 sql_help.c:3918 sql_help.c:3922 sql_help.c:3926 -#: sql_help.c:3929 sql_help.c:4148 sql_help.c:4157 sql_help.c:4161 -#: sql_help.c:4165 sql_help.c:4168 sql_help.c:4342 sql_help.c:4351 -#: sql_help.c:4355 sql_help.c:4359 sql_help.c:4362 +#: sql_help.c:3913 sql_help.c:3922 sql_help.c:3926 sql_help.c:3930 +#: sql_help.c:3933 sql_help.c:4152 sql_help.c:4161 sql_help.c:4165 +#: sql_help.c:4169 sql_help.c:4172 sql_help.c:4346 sql_help.c:4355 +#: sql_help.c:4359 sql_help.c:4363 sql_help.c:4366 msgid "argument" msgstr "argument" -#: sql_help.c:3910 sql_help.c:4149 sql_help.c:4343 +#: sql_help.c:3914 sql_help.c:4153 sql_help.c:4347 msgid "seed" msgstr "graine" -#: sql_help.c:3914 sql_help.c:3947 sql_help.c:4153 sql_help.c:4186 -#: sql_help.c:4347 sql_help.c:4380 +#: sql_help.c:3918 sql_help.c:3951 sql_help.c:4157 sql_help.c:4190 +#: sql_help.c:4351 sql_help.c:4384 msgid "with_query_name" msgstr "nom_requête_with" -#: sql_help.c:3924 sql_help.c:3927 sql_help.c:3930 sql_help.c:4163 -#: sql_help.c:4166 sql_help.c:4169 sql_help.c:4357 sql_help.c:4360 -#: sql_help.c:4363 +#: sql_help.c:3928 sql_help.c:3931 sql_help.c:3934 sql_help.c:4167 +#: sql_help.c:4170 sql_help.c:4173 sql_help.c:4361 sql_help.c:4364 +#: sql_help.c:4367 msgid "column_definition" msgstr "définition_colonne" -#: sql_help.c:3934 sql_help.c:4173 sql_help.c:4367 +#: sql_help.c:3938 sql_help.c:4177 sql_help.c:4371 msgid "join_type" msgstr "type_de_jointure" -#: sql_help.c:3936 sql_help.c:4175 sql_help.c:4369 +#: sql_help.c:3940 sql_help.c:4179 sql_help.c:4373 msgid "join_condition" msgstr "condition_de_jointure" -#: sql_help.c:3937 sql_help.c:4176 sql_help.c:4370 +#: sql_help.c:3941 sql_help.c:4180 sql_help.c:4374 msgid "join_column" msgstr "colonne_de_jointure" -#: sql_help.c:3938 sql_help.c:4177 sql_help.c:4371 +#: sql_help.c:3942 sql_help.c:4181 sql_help.c:4375 msgid "and grouping_element can be one of:" msgstr "où element_regroupement fait partie de :" -#: sql_help.c:3946 sql_help.c:4185 sql_help.c:4379 +#: sql_help.c:3950 sql_help.c:4189 sql_help.c:4383 msgid "and with_query is:" msgstr "et requête_with est :" -#: sql_help.c:3950 sql_help.c:4189 sql_help.c:4383 +#: sql_help.c:3954 sql_help.c:4193 sql_help.c:4387 msgid "values" msgstr "valeurs" -#: sql_help.c:3951 sql_help.c:4190 sql_help.c:4384 +#: sql_help.c:3955 sql_help.c:4194 sql_help.c:4388 msgid "insert" msgstr "insert" -#: sql_help.c:3952 sql_help.c:4191 sql_help.c:4385 +#: sql_help.c:3956 sql_help.c:4195 sql_help.c:4389 msgid "update" msgstr "update" -#: sql_help.c:3953 sql_help.c:4192 sql_help.c:4386 +#: sql_help.c:3957 sql_help.c:4196 sql_help.c:4390 msgid "delete" msgstr "delete" -#: sql_help.c:3980 +#: sql_help.c:3984 msgid "new_table" msgstr "nouvelle_table" -#: sql_help.c:4005 +#: sql_help.c:4009 msgid "timezone" msgstr "fuseau_horaire" -#: sql_help.c:4050 +#: sql_help.c:4054 msgid "snapshot_id" msgstr "id_snapshot" -#: sql_help.c:4235 +#: sql_help.c:4239 msgid "from_list" msgstr "liste_from" -#: sql_help.c:4266 +#: sql_help.c:4270 msgid "sort_expression" msgstr "expression_de_tri" -#: sql_help.c:4393 sql_help.c:5178 +#: sql_help.c:4397 sql_help.c:5182 msgid "abort the current transaction" msgstr "abandonner la transaction en cours" -#: sql_help.c:4398 +#: sql_help.c:4402 msgid "change the definition of an aggregate function" msgstr "modifier la définition d'une fonction d'agrégation" -#: sql_help.c:4403 +#: sql_help.c:4407 msgid "change the definition of a collation" msgstr "modifier la définition d'un collationnement" -#: sql_help.c:4408 +#: sql_help.c:4412 msgid "change the definition of a conversion" msgstr "modifier la définition d'une conversion" -#: sql_help.c:4413 +#: sql_help.c:4417 msgid "change a database" msgstr "modifier une base de données" -#: sql_help.c:4418 +#: sql_help.c:4422 msgid "define default access privileges" msgstr "définir les droits d'accès par défaut" -#: sql_help.c:4423 +#: sql_help.c:4427 msgid "change the definition of a domain" msgstr "modifier la définition d'un domaine" -#: sql_help.c:4428 +#: sql_help.c:4432 msgid "change the definition of an event trigger" msgstr "modifier la définition d'un trigger sur évènement" -#: sql_help.c:4433 +#: sql_help.c:4437 msgid "change the definition of an extension" msgstr "modifier la définition d'une extension" -#: sql_help.c:4438 +#: sql_help.c:4442 msgid "change the definition of a foreign-data wrapper" msgstr "modifier la définition d'un wrapper de données distantes" -#: sql_help.c:4443 +#: sql_help.c:4447 msgid "change the definition of a foreign table" msgstr "modifier la définition d'une table distante" -#: sql_help.c:4448 +#: sql_help.c:4452 msgid "change the definition of a function" msgstr "modifier la définition d'une fonction" -#: sql_help.c:4453 +#: sql_help.c:4457 msgid "change role name or membership" msgstr "modifier le nom d'un groupe ou la liste des ses membres" -#: sql_help.c:4458 +#: sql_help.c:4462 msgid "change the definition of an index" msgstr "modifier la définition d'un index" -#: sql_help.c:4463 +#: sql_help.c:4467 msgid "change the definition of a procedural language" msgstr "modifier la définition d'un langage procédural" -#: sql_help.c:4468 +#: sql_help.c:4472 msgid "change the definition of a large object" msgstr "modifier la définition d'un « Large Object »" -#: sql_help.c:4473 +#: sql_help.c:4477 msgid "change the definition of a materialized view" msgstr "modifier la définition d'une vue matérialisée" -#: sql_help.c:4478 +#: sql_help.c:4482 msgid "change the definition of an operator" msgstr "modifier la définition d'un opérateur" -#: sql_help.c:4483 +#: sql_help.c:4487 msgid "change the definition of an operator class" msgstr "modifier la définition d'une classe d'opérateurs" -#: sql_help.c:4488 +#: sql_help.c:4492 msgid "change the definition of an operator family" msgstr "modifier la définition d'une famille d'opérateur" -#: sql_help.c:4493 +#: sql_help.c:4497 msgid "change the definition of a row level security policy" msgstr "modifier la définition d'une politique de sécurité au niveau ligne" -#: sql_help.c:4498 +#: sql_help.c:4502 msgid "change the definition of a publication" msgstr "modifier la définition d'une publication" -#: sql_help.c:4503 sql_help.c:4583 +#: sql_help.c:4507 sql_help.c:4587 msgid "change a database role" msgstr "modifier un rôle" -#: sql_help.c:4508 +#: sql_help.c:4512 msgid "change the definition of a rule" msgstr "modifier la définition d'une règle" -#: sql_help.c:4513 +#: sql_help.c:4517 msgid "change the definition of a schema" msgstr "modifier la définition d'un schéma" -#: sql_help.c:4518 +#: sql_help.c:4522 msgid "change the definition of a sequence generator" msgstr "modifier la définition d'un générateur de séquence" -#: sql_help.c:4523 +#: sql_help.c:4527 msgid "change the definition of a foreign server" msgstr "modifier la définition d'un serveur distant" -#: sql_help.c:4528 +#: sql_help.c:4532 msgid "change the definition of an extended statistics object" msgstr "modifier la définition d'un objet de statistiques étendues" -#: sql_help.c:4533 +#: sql_help.c:4537 msgid "change the definition of a subscription" msgstr "modifier la définition d'une souscription" -#: sql_help.c:4538 +#: sql_help.c:4542 msgid "change a server configuration parameter" msgstr "modifie un paramètre de configuration du serveur" -#: sql_help.c:4543 +#: sql_help.c:4547 msgid "change the definition of a table" msgstr "modifier la définition d'une table" -#: sql_help.c:4548 +#: sql_help.c:4552 msgid "change the definition of a tablespace" msgstr "modifier la définition d'un tablespace" -#: sql_help.c:4553 +#: sql_help.c:4557 msgid "change the definition of a text search configuration" msgstr "modifier la définition d'une configuration de la recherche de texte" -#: sql_help.c:4558 +#: sql_help.c:4562 msgid "change the definition of a text search dictionary" msgstr "modifier la définition d'un dictionnaire de la recherche de texte" -#: sql_help.c:4563 +#: sql_help.c:4567 msgid "change the definition of a text search parser" msgstr "modifier la définition d'un analyseur de la recherche de texte" -#: sql_help.c:4568 +#: sql_help.c:4572 msgid "change the definition of a text search template" msgstr "modifier la définition d'un modèle de la recherche de texte" -#: sql_help.c:4573 +#: sql_help.c:4577 msgid "change the definition of a trigger" msgstr "modifier la définition d'un trigger" -#: sql_help.c:4578 +#: sql_help.c:4582 msgid "change the definition of a type" msgstr "modifier la définition d'un type" -#: sql_help.c:4588 +#: sql_help.c:4592 msgid "change the definition of a user mapping" msgstr "modifier la définition d'une correspondance d'utilisateur" -#: sql_help.c:4593 +#: sql_help.c:4597 msgid "change the definition of a view" msgstr "modifier la définition d'une vue" -#: sql_help.c:4598 +#: sql_help.c:4602 msgid "collect statistics about a database" msgstr "acquérir des statistiques concernant la base de données" -#: sql_help.c:4603 sql_help.c:5243 +#: sql_help.c:4607 sql_help.c:5247 msgid "start a transaction block" msgstr "débuter un bloc de transaction" -#: sql_help.c:4608 +#: sql_help.c:4612 msgid "force a write-ahead log checkpoint" msgstr "forcer un point de vérification des journaux de transactions" -#: sql_help.c:4613 +#: sql_help.c:4617 msgid "close a cursor" msgstr "fermer un curseur" -#: sql_help.c:4618 +#: sql_help.c:4622 msgid "cluster a table according to an index" msgstr "réorganiser (cluster) une table en fonction d'un index" -#: sql_help.c:4623 +#: sql_help.c:4627 msgid "define or change the comment of an object" msgstr "définir ou modifier les commentaires d'un objet" -#: sql_help.c:4628 sql_help.c:5078 +#: sql_help.c:4632 sql_help.c:5082 msgid "commit the current transaction" msgstr "valider la transaction en cours" -#: sql_help.c:4633 +#: sql_help.c:4637 msgid "commit a transaction that was earlier prepared for two-phase commit" msgstr "" "valider une transaction précédemment préparée pour une validation en deux\n" "phases" -#: sql_help.c:4638 +#: sql_help.c:4642 msgid "copy data between a file and a table" msgstr "copier des données entre un fichier et une table" -#: sql_help.c:4643 +#: sql_help.c:4647 msgid "define a new access method" msgstr "définir une nouvelle méthode d'accès" -#: sql_help.c:4648 +#: sql_help.c:4652 msgid "define a new aggregate function" msgstr "définir une nouvelle fonction d'agrégation" -#: sql_help.c:4653 +#: sql_help.c:4657 msgid "define a new cast" msgstr "définir un nouveau transtypage" -#: sql_help.c:4658 +#: sql_help.c:4662 msgid "define a new collation" msgstr "définir un nouveau collationnement" -#: sql_help.c:4663 +#: sql_help.c:4667 msgid "define a new encoding conversion" msgstr "définir une nouvelle conversion d'encodage" -#: sql_help.c:4668 +#: sql_help.c:4672 msgid "create a new database" msgstr "créer une nouvelle base de données" -#: sql_help.c:4673 +#: sql_help.c:4677 msgid "define a new domain" msgstr "définir un nouveau domaine" -#: sql_help.c:4678 +#: sql_help.c:4682 msgid "define a new event trigger" msgstr "définir un nouveau trigger sur évènement" -#: sql_help.c:4683 +#: sql_help.c:4687 msgid "install an extension" msgstr "installer une extension" -#: sql_help.c:4688 +#: sql_help.c:4692 msgid "define a new foreign-data wrapper" msgstr "définir un nouveau wrapper de données distantes" -#: sql_help.c:4693 +#: sql_help.c:4697 msgid "define a new foreign table" msgstr "définir une nouvelle table distante" -#: sql_help.c:4698 +#: sql_help.c:4702 msgid "define a new function" msgstr "définir une nouvelle fonction" -#: sql_help.c:4703 sql_help.c:4748 sql_help.c:4833 +#: sql_help.c:4707 sql_help.c:4752 sql_help.c:4837 msgid "define a new database role" msgstr "définir un nouveau rôle" -#: sql_help.c:4708 +#: sql_help.c:4712 msgid "define a new index" msgstr "définir un nouvel index" -#: sql_help.c:4713 +#: sql_help.c:4717 msgid "define a new procedural language" msgstr "définir un nouveau langage de procédures" -#: sql_help.c:4718 +#: sql_help.c:4722 msgid "define a new materialized view" msgstr "définir une nouvelle vue matérialisée" -#: sql_help.c:4723 +#: sql_help.c:4727 msgid "define a new operator" msgstr "définir un nouvel opérateur" -#: sql_help.c:4728 +#: sql_help.c:4732 msgid "define a new operator class" msgstr "définir une nouvelle classe d'opérateur" -#: sql_help.c:4733 +#: sql_help.c:4737 msgid "define a new operator family" msgstr "définir une nouvelle famille d'opérateur" -#: sql_help.c:4738 +#: sql_help.c:4742 msgid "define a new row level security policy for a table" msgstr "définir une nouvelle politique de sécurité au niveau ligne pour une table" -#: sql_help.c:4743 +#: sql_help.c:4747 msgid "define a new publication" msgstr "définir une nouvelle publication" -#: sql_help.c:4753 +#: sql_help.c:4757 msgid "define a new rewrite rule" msgstr "définir une nouvelle règle de réécriture" -#: sql_help.c:4758 +#: sql_help.c:4762 msgid "define a new schema" msgstr "définir un nouveau schéma" -#: sql_help.c:4763 +#: sql_help.c:4767 msgid "define a new sequence generator" msgstr "définir un nouveau générateur de séquence" -#: sql_help.c:4768 +#: sql_help.c:4772 msgid "define a new foreign server" msgstr "définir un nouveau serveur distant" -#: sql_help.c:4773 +#: sql_help.c:4777 msgid "define extended statistics" msgstr "définir des statistiques étendues" -#: sql_help.c:4778 +#: sql_help.c:4782 msgid "define a new subscription" msgstr "définir une nouvelle souscription" -#: sql_help.c:4783 +#: sql_help.c:4787 msgid "define a new table" msgstr "définir une nouvelle table" -#: sql_help.c:4788 sql_help.c:5208 +#: sql_help.c:4792 sql_help.c:5212 msgid "define a new table from the results of a query" msgstr "définir une nouvelle table à partir des résultats d'une requête" -#: sql_help.c:4793 +#: sql_help.c:4797 msgid "define a new tablespace" msgstr "définir un nouveau tablespace" -#: sql_help.c:4798 +#: sql_help.c:4802 msgid "define a new text search configuration" msgstr "définir une nouvelle configuration de la recherche de texte" -#: sql_help.c:4803 +#: sql_help.c:4807 msgid "define a new text search dictionary" msgstr "définir un nouveau dictionnaire de la recherche de texte" -#: sql_help.c:4808 +#: sql_help.c:4812 msgid "define a new text search parser" msgstr "définir un nouvel analyseur de la recherche de texte" -#: sql_help.c:4813 +#: sql_help.c:4817 msgid "define a new text search template" msgstr "définir un nouveau modèle de la recherche de texte" -#: sql_help.c:4818 +#: sql_help.c:4822 msgid "define a new transform" msgstr "définir une nouvelle transformation" -#: sql_help.c:4823 +#: sql_help.c:4827 msgid "define a new trigger" msgstr "définir un nouveau trigger" -#: sql_help.c:4828 +#: sql_help.c:4832 msgid "define a new data type" msgstr "définir un nouveau type de données" -#: sql_help.c:4838 +#: sql_help.c:4842 msgid "define a new mapping of a user to a foreign server" msgstr "définit une nouvelle correspondance d'un utilisateur vers un serveur distant" -#: sql_help.c:4843 +#: sql_help.c:4847 msgid "define a new view" msgstr "définir une nouvelle vue" -#: sql_help.c:4848 +#: sql_help.c:4852 msgid "deallocate a prepared statement" msgstr "désallouer une instruction préparée" -#: sql_help.c:4853 +#: sql_help.c:4857 msgid "define a cursor" msgstr "définir un curseur" -#: sql_help.c:4858 +#: sql_help.c:4862 msgid "delete rows of a table" msgstr "supprimer des lignes d'une table" -#: sql_help.c:4863 +#: sql_help.c:4867 msgid "discard session state" msgstr "annuler l'état de la session" -#: sql_help.c:4868 +#: sql_help.c:4872 msgid "execute an anonymous code block" msgstr "exécute un bloc de code anonyme" -#: sql_help.c:4873 +#: sql_help.c:4877 msgid "remove an access method" msgstr "supprimer une méthode d'accès" -#: sql_help.c:4878 +#: sql_help.c:4882 msgid "remove an aggregate function" msgstr "supprimer une fonction d'agrégation" -#: sql_help.c:4883 +#: sql_help.c:4887 msgid "remove a cast" msgstr "supprimer un transtypage" -#: sql_help.c:4888 +#: sql_help.c:4892 msgid "remove a collation" msgstr "supprimer un collationnement" -#: sql_help.c:4893 +#: sql_help.c:4897 msgid "remove a conversion" msgstr "supprimer une conversion" -#: sql_help.c:4898 +#: sql_help.c:4902 msgid "remove a database" msgstr "supprimer une base de données" -#: sql_help.c:4903 +#: sql_help.c:4907 msgid "remove a domain" msgstr "supprimer un domaine" -#: sql_help.c:4908 +#: sql_help.c:4912 msgid "remove an event trigger" msgstr "supprimer un trigger sur évènement" -#: sql_help.c:4913 +#: sql_help.c:4917 msgid "remove an extension" msgstr "supprimer une extension" -#: sql_help.c:4918 +#: sql_help.c:4922 msgid "remove a foreign-data wrapper" msgstr "supprimer un wrapper de données distantes" -#: sql_help.c:4923 +#: sql_help.c:4927 msgid "remove a foreign table" msgstr "supprimer une table distante" -#: sql_help.c:4928 +#: sql_help.c:4932 msgid "remove a function" msgstr "supprimer une fonction" -#: sql_help.c:4933 sql_help.c:4983 sql_help.c:5063 +#: sql_help.c:4937 sql_help.c:4987 sql_help.c:5067 msgid "remove a database role" msgstr "supprimer un rôle de la base de données" -#: sql_help.c:4938 +#: sql_help.c:4942 msgid "remove an index" msgstr "supprimer un index" -#: sql_help.c:4943 +#: sql_help.c:4947 msgid "remove a procedural language" msgstr "supprimer un langage procédural" -#: sql_help.c:4948 +#: sql_help.c:4952 msgid "remove a materialized view" msgstr "supprimer une vue matérialisée" -#: sql_help.c:4953 +#: sql_help.c:4957 msgid "remove an operator" msgstr "supprimer un opérateur" -#: sql_help.c:4958 +#: sql_help.c:4962 msgid "remove an operator class" msgstr "supprimer une classe d'opérateur" -#: sql_help.c:4963 +#: sql_help.c:4967 msgid "remove an operator family" msgstr "supprimer une famille d'opérateur" -#: sql_help.c:4968 +#: sql_help.c:4972 msgid "remove database objects owned by a database role" msgstr "supprimer les objets appartenant à un rôle" -#: sql_help.c:4973 +#: sql_help.c:4977 msgid "remove a row level security policy from a table" msgstr "supprimer une nouvelle politique de sécurité au niveau ligne pour une table" -#: sql_help.c:4978 +#: sql_help.c:4982 msgid "remove a publication" msgstr "supprimer une publication" -#: sql_help.c:4988 +#: sql_help.c:4992 msgid "remove a rewrite rule" msgstr "supprimer une règle de réécriture" -#: sql_help.c:4993 +#: sql_help.c:4997 msgid "remove a schema" msgstr "supprimer un schéma" -#: sql_help.c:4998 +#: sql_help.c:5002 msgid "remove a sequence" msgstr "supprimer une séquence" -#: sql_help.c:5003 +#: sql_help.c:5007 msgid "remove a foreign server descriptor" msgstr "supprimer un descripteur de serveur distant" -#: sql_help.c:5008 +#: sql_help.c:5012 msgid "remove extended statistics" msgstr "supprimer des statistiques étendues" -#: sql_help.c:5013 +#: sql_help.c:5017 msgid "remove a subscription" msgstr "supprimer une souscription" -#: sql_help.c:5018 +#: sql_help.c:5022 msgid "remove a table" msgstr "supprimer une table" -#: sql_help.c:5023 +#: sql_help.c:5027 msgid "remove a tablespace" msgstr "supprimer un tablespace" -#: sql_help.c:5028 +#: sql_help.c:5032 msgid "remove a text search configuration" msgstr "supprimer une configuration de la recherche de texte" -#: sql_help.c:5033 +#: sql_help.c:5037 msgid "remove a text search dictionary" msgstr "supprimer un dictionnaire de la recherche de texte" -#: sql_help.c:5038 +#: sql_help.c:5042 msgid "remove a text search parser" msgstr "supprimer un analyseur de la recherche de texte" -#: sql_help.c:5043 +#: sql_help.c:5047 msgid "remove a text search template" msgstr "supprimer un modèle de la recherche de texte" -#: sql_help.c:5048 +#: sql_help.c:5052 msgid "remove a transform" msgstr "supprimer une transformation" -#: sql_help.c:5053 +#: sql_help.c:5057 msgid "remove a trigger" msgstr "supprimer un trigger" -#: sql_help.c:5058 +#: sql_help.c:5062 msgid "remove a data type" msgstr "supprimer un type de données" -#: sql_help.c:5068 +#: sql_help.c:5072 msgid "remove a user mapping for a foreign server" msgstr "supprime une correspondance utilisateur pour un serveur distant" -#: sql_help.c:5073 +#: sql_help.c:5077 msgid "remove a view" msgstr "supprimer une vue" -#: sql_help.c:5083 +#: sql_help.c:5087 msgid "execute a prepared statement" msgstr "exécuter une instruction préparée" -#: sql_help.c:5088 +#: sql_help.c:5092 msgid "show the execution plan of a statement" msgstr "afficher le plan d'exécution d'une instruction" -#: sql_help.c:5093 +#: sql_help.c:5097 msgid "retrieve rows from a query using a cursor" msgstr "extraire certaines lignes d'une requête à l'aide d'un curseur" -#: sql_help.c:5098 +#: sql_help.c:5102 msgid "define access privileges" msgstr "définir des privilèges d'accès" -#: sql_help.c:5103 +#: sql_help.c:5107 msgid "import table definitions from a foreign server" msgstr "importer la définition d'une table à partir d'un serveur distant" -#: sql_help.c:5108 +#: sql_help.c:5112 msgid "create new rows in a table" msgstr "créer de nouvelles lignes dans une table" -#: sql_help.c:5113 +#: sql_help.c:5117 msgid "listen for a notification" msgstr "se mettre à l'écoute d'une notification" -#: sql_help.c:5118 +#: sql_help.c:5122 msgid "load a shared library file" msgstr "charger un fichier de bibliothèque partagée" -#: sql_help.c:5123 +#: sql_help.c:5127 msgid "lock a table" msgstr "verrouiller une table" -#: sql_help.c:5128 +#: sql_help.c:5132 msgid "position a cursor" msgstr "positionner un curseur" -#: sql_help.c:5133 +#: sql_help.c:5137 msgid "generate a notification" msgstr "engendrer une notification" -#: sql_help.c:5138 +#: sql_help.c:5142 msgid "prepare a statement for execution" msgstr "préparer une instruction pour exécution" -#: sql_help.c:5143 +#: sql_help.c:5147 msgid "prepare the current transaction for two-phase commit" msgstr "préparer la transaction en cours pour une validation en deux phases" -#: sql_help.c:5148 +#: sql_help.c:5152 msgid "change the ownership of database objects owned by a database role" msgstr "changer le propriétaire des objets d'un rôle" -#: sql_help.c:5153 +#: sql_help.c:5157 msgid "replace the contents of a materialized view" msgstr "remplacer le contenu d'une vue matérialisée" -#: sql_help.c:5158 +#: sql_help.c:5162 msgid "rebuild indexes" msgstr "reconstruire des index" -#: sql_help.c:5163 +#: sql_help.c:5167 msgid "destroy a previously defined savepoint" msgstr "détruire un point de retournement précédemment défini" -#: sql_help.c:5168 +#: sql_help.c:5172 msgid "restore the value of a run-time parameter to the default value" msgstr "réinitialiser un paramètre d'exécution à sa valeur par défaut" -#: sql_help.c:5173 +#: sql_help.c:5177 msgid "remove access privileges" msgstr "supprimer des privilèges d'accès" -#: sql_help.c:5183 +#: sql_help.c:5187 msgid "cancel a transaction that was earlier prepared for two-phase commit" msgstr "" "annuler une transaction précédemment préparée pour une validation en deux\n" "phases" -#: sql_help.c:5188 +#: sql_help.c:5192 msgid "roll back to a savepoint" msgstr "annuler jusqu'au point de retournement" -#: sql_help.c:5193 +#: sql_help.c:5197 msgid "define a new savepoint within the current transaction" msgstr "définir un nouveau point de retournement pour la transaction en cours" -#: sql_help.c:5198 +#: sql_help.c:5202 msgid "define or change a security label applied to an object" msgstr "définir ou modifier un label de sécurité à un objet" -#: sql_help.c:5203 sql_help.c:5248 sql_help.c:5278 +#: sql_help.c:5207 sql_help.c:5252 sql_help.c:5282 msgid "retrieve rows from a table or view" msgstr "extraire des lignes d'une table ou d'une vue" -#: sql_help.c:5213 +#: sql_help.c:5217 msgid "change a run-time parameter" msgstr "modifier un paramètre d'exécution" -#: sql_help.c:5218 +#: sql_help.c:5222 msgid "set constraint check timing for the current transaction" msgstr "définir le moment de la vérification des contraintes pour la transaction en cours" -#: sql_help.c:5223 +#: sql_help.c:5227 msgid "set the current user identifier of the current session" msgstr "définir l'identifiant actuel de l'utilisateur de la session courante" -#: sql_help.c:5228 +#: sql_help.c:5232 msgid "set the session user identifier and the current user identifier of the current session" msgstr "" "définir l'identifiant de l'utilisateur de session et l'identifiant actuel de\n" "l'utilisateur de la session courante" -#: sql_help.c:5233 +#: sql_help.c:5237 msgid "set the characteristics of the current transaction" msgstr "définir les caractéristiques de la transaction en cours" -#: sql_help.c:5238 +#: sql_help.c:5242 msgid "show the value of a run-time parameter" msgstr "afficher la valeur d'un paramètre d'exécution" -#: sql_help.c:5253 +#: sql_help.c:5257 msgid "empty a table or set of tables" msgstr "vider une table ou un ensemble de tables" -#: sql_help.c:5258 +#: sql_help.c:5262 msgid "stop listening for a notification" msgstr "arrêter l'écoute d'une notification" -#: sql_help.c:5263 +#: sql_help.c:5267 msgid "update rows of a table" msgstr "actualiser les lignes d'une table" -#: sql_help.c:5268 +#: sql_help.c:5272 msgid "garbage-collect and optionally analyze a database" msgstr "compacter et optionnellement analyser une base de données" -#: sql_help.c:5273 +#: sql_help.c:5277 msgid "compute a set of rows" msgstr "calculer un ensemble de lignes" -#: startup.c:184 +#: startup.c:187 #, c-format msgid "%s: -1 can only be used in non-interactive mode\n" msgstr "%s p: -1 peut seulement être utilisé dans un mode non intéractif\n" -#: startup.c:287 +#: startup.c:290 #, c-format msgid "%s: could not open log file \"%s\": %s\n" msgstr "%s : n'a pas pu ouvrir le journal applicatif « %s » : %s\n" -#: startup.c:394 +#: startup.c:397 #, c-format msgid "" "Type \"help\" for help.\n" @@ -5738,27 +5784,27 @@ msgstr "" "Saisissez « help » pour l'aide.\n" "\n" -#: startup.c:543 +#: startup.c:546 #, c-format msgid "%s: could not set printing parameter \"%s\"\n" msgstr "%s : n'a pas pu configurer le paramètre d'impression « %s »\n" -#: startup.c:645 +#: startup.c:648 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Essayez « %s --help » pour plus d'informations.\n" -#: startup.c:662 +#: startup.c:665 #, c-format msgid "%s: warning: extra command-line argument \"%s\" ignored\n" msgstr "%s : attention : option supplémentaire « %s » ignorée\n" -#: startup.c:711 +#: startup.c:714 #, c-format msgid "%s: could not find own program executable\n" msgstr "%s : n'a pas pu trouver son propre exécutable\n" -#: tab-complete.c:4184 +#: tab-complete.c:4186 #, c-format msgid "" "tab completion query failed: %s\n" @@ -5771,8 +5817,8 @@ msgstr "" #: variables.c:139 #, c-format -msgid "unrecognized value \"%s\" for \"%s\": boolean expected\n" -msgstr "valeur « %s » non reconnue pour « %s » ; booléen attendu\n" +msgid "unrecognized value \"%s\" for \"%s\": Boolean expected\n" +msgstr "valeur « %s » non reconnue pour « %s » : booléen attendu\n" #: variables.c:176 #, c-format @@ -5793,802 +5839,954 @@ msgstr "" "valeur « %s » non reconnue pour « %s »\n" "Les valeurs disponibles sont : %s.\n" -#~ msgid "serialtype" -#~ msgstr "serialtype" +#~ msgid "No per-database role settings support in this server version.\n" +#~ msgstr "Pas de supprot des paramètres rôle par base de données pour la version de ce serveur.\n" -#~ msgid "SSL connection (unknown cipher)\n" -#~ msgstr "Connexion SSL (chiffrement inconnu)\n" +#~ msgid "No matching settings found.\n" +#~ msgstr "Aucun paramètre correspondant trouvé.\n" -#~ msgid " -?, --help show this help, then exit\n" -#~ msgstr " -?, --help affiche cette aide puis quitte\n" +#~ msgid "No settings found.\n" +#~ msgstr "Aucun paramètre trouvé.\n" -#~ msgid "(No rows)\n" -#~ msgstr "(Aucune ligne)\n" +#~ msgid "No matching relations found.\n" +#~ msgstr "Aucune relation correspondante trouvée.\n" -#~ msgid "ALTER VIEW name RENAME TO newname" -#~ msgstr "ALTER VIEW nom RENAME TO nouveau_nom" +#~ msgid "No relations found.\n" +#~ msgstr "Aucune relation trouvée.\n" -#~ msgid " \"%s\"" -#~ msgstr " « %s »" +#~ msgid "Password encryption failed.\n" +#~ msgstr "Échec du chiffrement du mot de passe.\n" -#~ msgid "?%c? \"%s.%s\"" -#~ msgstr "?%c? « %s.%s »" +#~ msgid "\\%s: error while setting variable\n" +#~ msgstr "\\%s : erreur lors de l'initialisation de la variable\n" -#~ msgid "Access privileges for database \"%s\"" -#~ msgstr "Droits d'accès pour la base de données « %s »" +#~ msgid "+ opt(%d) = |%s|\n" +#~ msgstr "+ opt(%d) = |%s|\n" -#~ msgid "" -#~ "WARNING: You are connected to a server with major version %d.%d,\n" -#~ "but your %s client is major version %d.%d. Some backslash commands,\n" -#~ "such as \\d, might not work properly.\n" -#~ "\n" -#~ msgstr "" -#~ "ATTENTION : vous êtes connecté sur un serveur dont la version majeure est\n" -#~ "%d.%d alors que votre client %s est en version majeure %d.%d. Certaines\n" -#~ "commandes avec antislashs, comme \\d, peuvent ne pas fonctionner\n" -#~ "correctement.\n" -#~ "\n" +#~ msgid "could not set variable \"%s\"\n" +#~ msgstr "n'a pas pu initialiser la variable « %s »\n" -#~ msgid "" -#~ "Welcome to %s %s, the PostgreSQL interactive terminal.\n" -#~ "\n" -#~ msgstr "" -#~ "Bienvenue dans %s %s, l'interface interactive de PostgreSQL.\n" -#~ "\n" +#~ msgid "Modifiers" +#~ msgstr "Modificateurs" -#~ msgid "" -#~ "Welcome to %s %s (server %s), the PostgreSQL interactive terminal.\n" -#~ "\n" -#~ msgstr "" -#~ "Bienvenue dans %s %s (serveur %s), l'interface interactive de PostgreSQL.\n" -#~ "\n" +#~ msgid "collate %s" +#~ msgstr "collationnement %s" -#~ msgid "Copy, Large Object\n" -#~ msgstr "Copie, « Large Object »\n" +#~ msgid "not null" +#~ msgstr "non NULL" -#~ msgid " \\z [PATTERN] list table, view, and sequence access privileges (same as \\dp)\n" -#~ msgstr "" -#~ " \\z [MODÈLE] affiche la liste des privilèges d'accès aux tables,\n" -#~ " vues et séquences (identique à \\dp)\n" +#~ msgid "default %s" +#~ msgstr "Par défaut, %s" -#~ msgid " \\l list all databases (add \"+\" for more detail)\n" -#~ msgstr "" -#~ " \\l affiche la liste des bases de données (ajouter « + »\n" -#~ " pour plus de détails)\n" +#~ msgid "Modifier" +#~ msgstr "Modificateur" -#~ msgid " \\dT [PATTERN] list data types (add \"+\" for more detail)\n" -#~ msgstr "" -#~ " \\dT [MODÈLE] affiche la liste des types de données (ajouter « + »\n" -#~ " pour plus de détails)\n" +#~ msgid "Object Description" +#~ msgstr "Description d'un objet" -#~ msgid " \\dn [PATTERN] list schemas (add \"+\" for more detail)\n" -#~ msgstr "" -#~ " \\dn [MODÈLE] affiche la liste des schémas (ajouter « + » pour\n" -#~ " plus de détails)\n" +#~ msgid "%s: could not set variable \"%s\"\n" +#~ msgstr "%s : n'a pas pu initialiser la variable « %s »\n" -#~ msgid " \\dFp [PATTERN] list text search parsers (add \"+\" for more detail)\n" -#~ msgstr "" -#~ " \\dFp [MODÈLE] affiche la liste des analyseurs de la recherche de\n" -#~ " texte (ajouter « + » pour plus de détails)\n" +#~ msgid "Watch every %lds\t%s" +#~ msgstr "Vérifier chaque %lds\t%s" -#~ msgid " \\dFd [PATTERN] list text search dictionaries (add \"+\" for more detail)\n" -#~ msgstr "" -#~ " \\dFd [MODÈLE] affiche la liste des dictionnaires de la recherche\n" -#~ " de texte (ajouter « + » pour plus de détails)\n" +#~ msgid "Showing locale-adjusted numeric output." +#~ msgstr "Affichage de la sortie numérique adaptée à la locale." -#~ msgid " \\df [PATTERN] list functions (add \"+\" for more detail)\n" -#~ msgstr "" -#~ " \\df [MODÈLE] affiche la liste des fonctions (ajouter « + » pour\n" -#~ " plus de détails)\n" +#~ msgid "Showing only tuples." +#~ msgstr "Affichage des tuples seuls." -#~ msgid " \\db [PATTERN] list tablespaces (add \"+\" for more detail)\n" -#~ msgstr "" -#~ " \\db [MODÈLE] affiche la liste des tablespaces (ajouter « + » pour\n" -#~ " plus de détails)\n" +#~ msgid "could not get current user name: %s\n" +#~ msgstr "n'a pas pu obtenir le nom d'utilisateur courant : %s\n" -#~ msgid "" -#~ " \\d{t|i|s|v|S} [PATTERN] (add \"+\" for more detail)\n" -#~ " list tables/indexes/sequences/views/system tables\n" -#~ msgstr "" -#~ " \\d{t|i|s|v|S} [MODÈLE] (ajouter « + » pour plus de détails)\n" -#~ " affiche la liste des\n" -#~ " tables/index/séquences/vues/tables système\n" +#~ msgid "agg_name" +#~ msgstr "nom_d_agrégat" -#~ msgid "(1 row)" -#~ msgid_plural "(%lu rows)" -#~ msgstr[0] "(1 ligne)" -#~ msgstr[1] "(%lu lignes)" +#~ msgid "agg_type" +#~ msgstr "type_aggrégat" -#~ msgid " \"%s\" IN %s %s" -#~ msgstr " \"%s\" DANS %s %s" +#~ msgid "input_data_type" +#~ msgstr "type_de_données_en_entrée" -#~ msgid "" -#~ "VALUES ( expression [, ...] ) [, ...]\n" -#~ " [ ORDER BY sort_expression [ ASC | DESC | USING operator ] [, ...] ]\n" -#~ " [ LIMIT { count | ALL } ]\n" -#~ " [ OFFSET start [ ROW | ROWS ] ]\n" -#~ " [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]" -#~ msgstr "" -#~ "VALUES ( expression [, ...] ) [, ...]\n" -#~ " [ ORDER BY expression_tri [ ASC | DESC | USING opérateur ] [, ...] ]\n" -#~ " [ LIMIT { total | ALL } ]\n" -#~ " [ OFFSET début [ ROW | ROWS ] ]\n" -#~ " [ FETCH { FIRST | NEXT } [ total ] { ROW | ROWS } ONLY ]" +#~ msgid "could not change directory to \"%s\"" +#~ msgstr "n'a pas pu accéder au répertoire « %s »" -#~ msgid "" -#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ table ]\n" -#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ table [ (column [, ...] ) ] ]" -#~ msgstr "" -#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ table ]\n" -#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ table [ (colonne [, ...] ) ] ]" +#~ msgid "%s: pg_strdup: cannot duplicate null pointer (internal error)\n" +#~ msgstr "%s : pg_strdup : ne peut pas dupliquer le pointeur null (erreur interne)\n" + +#~ msgid " \\l[+] list all databases\n" +#~ msgstr " \\l[+] affiche la liste des bases de données\n" + +#~ msgid "\\%s: error\n" +#~ msgstr "\\%s : erreur\n" + +#~ msgid "\\copy: %s" +#~ msgstr "\\copy : %s" + +#~ msgid "\\copy: unexpected response (%d)\n" +#~ msgstr "\\copy : réponse inattendue (%d)\n" + +#~ msgid " --help show this help, then exit\n" +#~ msgstr " --help affiche cette aide, puis quitte\n" + +#~ msgid " --version output version information, then exit\n" +#~ msgstr " --version affiche la version, puis quitte\n" + +#~ msgid "contains support for command-line editing" +#~ msgstr "contient une gestion avancée de la ligne de commande" + +#~ msgid "data type" +#~ msgstr "type de données" + +#~ msgid "column" +#~ msgstr "colonne" + +#~ msgid "new_column" +#~ msgstr "nouvelle_colonne" + +#~ msgid "tablespace" +#~ msgstr "tablespace" + +#~ msgid " on host \"%s\"" +#~ msgstr " sur l'hôte « %s »" + +#~ msgid " at port \"%s\"" +#~ msgstr " sur le port « %s »" + +#~ msgid " as user \"%s\"" +#~ msgstr " comme utilisateur « %s »" + +#~ msgid "define a new constraint trigger" +#~ msgstr "définir une nouvelle contrainte de déclenchement" + +#~ msgid "Exclusion constraints:" +#~ msgstr "Contraintes d'exclusion :" + +#~ msgid "rolename" +#~ msgstr "nom_rôle" + +#~ msgid "number" +#~ msgstr "numéro" + +#~ msgid "ABORT [ WORK | TRANSACTION ]" +#~ msgstr "ABORT [ WORK | TRANSACTION ]" #~ msgid "" -#~ "UPDATE [ ONLY ] table [ [ AS ] alias ]\n" -#~ " SET { column = { expression | DEFAULT } |\n" -#~ " ( column [, ...] ) = ( { expression | DEFAULT } [, ...] ) } [, ...]\n" -#~ " [ FROM fromlist ]\n" -#~ " [ WHERE condition | WHERE CURRENT OF cursor_name ]\n" -#~ " [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ]" +#~ "ALTER AGGREGATE name ( type [ , ... ] ) RENAME TO new_name\n" +#~ "ALTER AGGREGATE name ( type [ , ... ] ) OWNER TO new_owner\n" +#~ "ALTER AGGREGATE name ( type [ , ... ] ) SET SCHEMA new_schema" #~ msgstr "" -#~ "UPDATE [ ONLY ] table [ [ AS ] alias ]\n" -#~ " SET { colonne = { expression | DEFAULT } |\n" -#~ " ( colonne [, ...] ) = ( { expression | DEFAULT } [, ...] ) } [, ...]\n" -#~ " [ FROM liste_from ]\n" -#~ " [ WHERE condition | WHERE CURRENT OF nom_curseur ]\n" -#~ " [ RETURNING * | expression_sortie [ [ AS ] nom_sortie ] [, ...] ]" - -#~ msgid "UNLISTEN { name | * }" -#~ msgstr "UNLISTEN { nom | * }" +#~ "ALTER AGGREGATE nom ( type [ , ... ] ) RENAME TO nouveau_nom\n" +#~ "ALTER AGGREGATE nom ( type [ , ... ] ) OWNER TO nouveau_propriétaire\n" +#~ "ALTER AGGREGATE nom ( type [ , ... ] ) SET SCHEMA nouveau_schéma" #~ msgid "" -#~ "TRUNCATE [ TABLE ] [ ONLY ] name [, ... ]\n" -#~ " [ RESTART IDENTITY | CONTINUE IDENTITY ] [ CASCADE | RESTRICT ]" +#~ "ALTER CONVERSION name RENAME TO newname\n" +#~ "ALTER CONVERSION name OWNER TO newowner" #~ msgstr "" -#~ "TRUNCATE [ TABLE ] [ ONLY ] nom [, ... ]\n" -#~ " [ RESTART IDENTITY | CONTINUE IDENTITY ] [ CASCADE | RESTRICT ]" +#~ "ALTER CONVERSION nom RENAME TO nouveau_nom\n" +#~ "ALTER CONVERSION nom OWNER TO nouveau_propriétaire" #~ msgid "" -#~ "START TRANSACTION [ transaction_mode [, ...] ]\n" +#~ "ALTER DATABASE name [ [ WITH ] option [ ... ] ]\n" #~ "\n" -#~ "where transaction_mode is one of:\n" +#~ "where option can be:\n" #~ "\n" -#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" -#~ " READ WRITE | READ ONLY" -#~ msgstr "" -#~ "START TRANSACTION [ mode_transaction [, ...] ]\n" +#~ " CONNECTION LIMIT connlimit\n" #~ "\n" -#~ "où mode_transaction peut être :\n" +#~ "ALTER DATABASE name RENAME TO newname\n" #~ "\n" -#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ |\n" -#~ " READ COMMITTED | READ UNCOMMITTED }\n" -#~ " READ WRITE | READ ONLY" - -#~ msgid "" -#~ "SHOW name\n" -#~ "SHOW ALL" -#~ msgstr "" -#~ "SHOW nom\n" -#~ "SHOW ALL" - -#~ msgid "" -#~ "SET TRANSACTION transaction_mode [, ...]\n" -#~ "SET SESSION CHARACTERISTICS AS TRANSACTION transaction_mode [, ...]\n" +#~ "ALTER DATABASE name OWNER TO new_owner\n" #~ "\n" -#~ "where transaction_mode is one of:\n" +#~ "ALTER DATABASE name SET TABLESPACE new_tablespace\n" #~ "\n" -#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" -#~ " READ WRITE | READ ONLY" +#~ "ALTER DATABASE name SET configuration_parameter { TO | = } { value | DEFAULT }\n" +#~ "ALTER DATABASE name SET configuration_parameter FROM CURRENT\n" +#~ "ALTER DATABASE name RESET configuration_parameter\n" +#~ "ALTER DATABASE name RESET ALL" #~ msgstr "" -#~ "SET TRANSACTION mode_transaction [, ...]\n" -#~ "SET SESSION CHARACTERISTICS AS TRANSACTION mode_transaction [, ...]\n" +#~ "ALTER DATABASE nom [ [ WITH ] option [ ... ] ]\n" #~ "\n" -#~ "où mode_transaction peut être :\n" +#~ "où option peut être:\n" #~ "\n" -#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ |\n" -#~ " READ COMMITTED | READ UNCOMMITTED }\n" -#~ " READ WRITE | READ ONLY" - -#~ msgid "" -#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION username\n" -#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION DEFAULT\n" -#~ "RESET SESSION AUTHORIZATION" -#~ msgstr "" -#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION nom_utilisateur\n" -#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION DEFAULT\n" -#~ "RESET SESSION AUTHORIZATION" +#~ " CONNECTION LIMIT limite_connexion\n" +#~ "\n" +#~ "ALTER DATABASE nom RENAME TO nouveau_nom\n" +#~ "\n" +#~ "ALTER DATABASE nom OWNER TO nouveau_propriétaire\n" +#~ "\n" +#~ "ALTER DATABASE nom SET TABLESPACE nouveau_tablespace\n" +#~ "\n" +#~ "ALTER DATABASE nom SET paramètre_configuration { TO | = } { valeur | DEFAULT }\n" +#~ "ALTER DATABASE nom SET paramètre_configuration FROM CURRENT\n" +#~ "ALTER DATABASE nom RESET paramètre_configuration\n" +#~ "ALTER DATABASE nom RESET ALL" #~ msgid "" -#~ "SET [ SESSION | LOCAL ] ROLE rolename\n" -#~ "SET [ SESSION | LOCAL ] ROLE NONE\n" -#~ "RESET ROLE" -#~ msgstr "" -#~ "SET [ SESSION | LOCAL ] ROLE nom_rôle\n" -#~ "SET [ SESSION | LOCAL ] ROLE NONE\n" -#~ "RESET ROLE" - -#~ msgid "SET CONSTRAINTS { ALL | name [, ...] } { DEFERRED | IMMEDIATE }" -#~ msgstr "SET CONSTRAINTS { ALL | nom [, ...] } { DEFERRED | IMMEDIATE }" - -#~ msgid "" -#~ "SET [ SESSION | LOCAL ] configuration_parameter { TO | = } { value | 'value' | DEFAULT }\n" -#~ "SET [ SESSION | LOCAL ] TIME ZONE { timezone | LOCAL | DEFAULT }" +#~ "ALTER DOMAIN name\n" +#~ " { SET DEFAULT expression | DROP DEFAULT }\n" +#~ "ALTER DOMAIN name\n" +#~ " { SET | DROP } NOT NULL\n" +#~ "ALTER DOMAIN name\n" +#~ " ADD domain_constraint\n" +#~ "ALTER DOMAIN name\n" +#~ " DROP CONSTRAINT constraint_name [ RESTRICT | CASCADE ]\n" +#~ "ALTER DOMAIN name\n" +#~ " OWNER TO new_owner \n" +#~ "ALTER DOMAIN name\n" +#~ " SET SCHEMA new_schema" #~ msgstr "" -#~ "SET [ SESSION | LOCAL ] paramètre { TO | = } { valeur | 'valeur' | DEFAULT }\n" -#~ "SET [ SESSION | LOCAL ] TIME ZONE { zone_horaire | LOCAL | DEFAULT }" +#~ "ALTER DOMAIN nom\n" +#~ " { SET DEFAULT expression | DROP DEFAULT }\n" +#~ "ALTER DOMAIN nom\n" +#~ " { SET | DROP } NOT NULL\n" +#~ "ALTER DOMAIN nom\n" +#~ " ADD contrainte_domaine\n" +#~ "ALTER DOMAIN nom\n" +#~ " DROP CONSTRAINT nom_contrainte [ RESTRICT | CASCADE ]\n" +#~ "ALTER DOMAIN nom\n" +#~ " OWNER TO nouveau_propriétaire \n" +#~ "ALTER DOMAIN nom\n" +#~ " SET SCHEMA nouveau_schéma" #~ msgid "" -#~ "[ WITH [ RECURSIVE ] with_query [, ...] ]\n" -#~ "SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]\n" -#~ " * | expression [ [ AS ] output_name ] [, ...]\n" -#~ " INTO [ TEMPORARY | TEMP ] [ TABLE ] new_table\n" -#~ " [ FROM from_item [, ...] ]\n" -#~ " [ WHERE condition ]\n" -#~ " [ GROUP BY expression [, ...] ]\n" -#~ " [ HAVING condition [, ...] ]\n" -#~ " [ WINDOW window_name AS ( window_definition ) [, ...] ]\n" -#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" -#~ " [ ORDER BY expression [ ASC | DESC | USING operator ] [ NULLS { FIRST | LAST } ] [, ...] ]\n" -#~ " [ LIMIT { count | ALL } ]\n" -#~ " [ OFFSET start [ ROW | ROWS ] ]\n" -#~ " [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]\n" -#~ " [ FOR { UPDATE | SHARE } [ OF table_name [, ...] ] [ NOWAIT ] [...] ]" +#~ "ALTER FOREIGN DATA WRAPPER name\n" +#~ " [ VALIDATOR valfunction | NO VALIDATOR ]\n" +#~ " [ OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ]) ]\n" +#~ "ALTER FOREIGN DATA WRAPPER name OWNER TO new_owner" #~ msgstr "" -#~ "[ WITH [ RECURSIVE ] requête_with [, ...] ]\n" -#~ "SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]\n" -#~ " * | expression [ [ AS ] nom_sortie ] [, ...]\n" -#~ " INTO [ TEMPORARY | TEMP ] [ TABLE ] nouvelle_table\n" -#~ " [ FROM élément_from [, ...] ]\n" -#~ " [ WHERE condition ]\n" -#~ " [ GROUP BY expression [, ...] ]\n" -#~ " [ HAVING condition [, ...] ]\n" -#~ " [ WINDOW nom_window AS ( définition_window ) [, ...] ]\n" -#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" -#~ " [ ORDER BY expression [ ASC | DESC | USING opérateur ] [ NULLS { FIRST | LAST } ] [, ...] ]\n" -#~ " [ LIMIT { total | ALL } ]\n" -#~ " [ OFFSET début [ ROW | ROWS ] ]\n" -#~ " [ FETCH { FIRST | NEXT } [ total ] { ROW | ROWS } ONLY ]\n" -#~ " [ FOR { UPDATE | SHARE } [ OF nom_table [, ...] ] [ NOWAIT ] [...] ]" +#~ "ALTER FOREIGN DATA WRAPPER nom\n" +#~ " [ VALIDATOR fonction_validation | NO VALIDATOR ]\n" +#~ " [ OPTIONS ( [ ADD | SET | DROP ] option ['valeur'] [, ... ]) ]\n" +#~ "ALTER FOREIGN DATA WRAPPER nom OWNER TO nouveau_propriétaire" #~ msgid "" -#~ "[ WITH [ RECURSIVE ] with_query [, ...] ]\n" -#~ "SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]\n" -#~ " * | expression [ [ AS ] output_name ] [, ...]\n" -#~ " [ FROM from_item [, ...] ]\n" -#~ " [ WHERE condition ]\n" -#~ " [ GROUP BY expression [, ...] ]\n" -#~ " [ HAVING condition [, ...] ]\n" -#~ " [ WINDOW window_name AS ( window_definition ) [, ...] ]\n" -#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" -#~ " [ ORDER BY expression [ ASC | DESC | USING operator ] [ NULLS { FIRST | LAST } ] [, ...] ]\n" -#~ " [ LIMIT { count | ALL } ]\n" -#~ " [ OFFSET start [ ROW | ROWS ] ]\n" -#~ " [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]\n" -#~ " [ FOR { UPDATE | SHARE } [ OF table_name [, ...] ] [ NOWAIT ] [...] ]\n" -#~ "\n" -#~ "where from_item can be one of:\n" -#~ "\n" -#~ " [ ONLY ] table_name [ * ] [ [ AS ] alias [ ( column_alias [, ...] ) ] ]\n" -#~ " ( select ) [ AS ] alias [ ( column_alias [, ...] ) ]\n" -#~ " with_query_name [ [ AS ] alias [ ( column_alias [, ...] ) ] ]\n" -#~ " function_name ( [ argument [, ...] ] ) [ AS ] alias [ ( column_alias [, ...] | column_definition [, ...] ) ]\n" -#~ " function_name ( [ argument [, ...] ] ) AS ( column_definition [, ...] )\n" -#~ " from_item [ NATURAL ] join_type from_item [ ON join_condition | USING ( join_column [, ...] ) ]\n" -#~ "\n" -#~ "and with_query is:\n" +#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" +#~ " action [ ... ] [ RESTRICT ]\n" +#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" +#~ " RENAME TO new_name\n" +#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" +#~ " OWNER TO new_owner\n" +#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" +#~ " SET SCHEMA new_schema\n" #~ "\n" -#~ " with_query_name [ ( column_name [, ...] ) ] AS ( select )\n" +#~ "where action is one of:\n" #~ "\n" -#~ "TABLE { [ ONLY ] table_name [ * ] | with_query_name }" +#~ " CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" +#~ " IMMUTABLE | STABLE | VOLATILE\n" +#~ " [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER\n" +#~ " COST execution_cost\n" +#~ " ROWS result_rows\n" +#~ " SET configuration_parameter { TO | = } { value | DEFAULT }\n" +#~ " SET configuration_parameter FROM CURRENT\n" +#~ " RESET configuration_parameter\n" +#~ " RESET ALL" #~ msgstr "" -#~ "[ WITH [ RECURSIVE ] requête_with [, ...] ]\n" -#~ "SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]\n" -#~ " * | expression [ [ AS ] nom_sortie ] [, ...]\n" -#~ " [ FROM élément_from [, ...] ]\n" -#~ " [ WHERE condition ]\n" -#~ " [ GROUP BY expression [, ...] ]\n" -#~ " [ HAVING condition [, ...] ]\n" -#~ " [ WINDOW nom_window AS ( définition_window ) [, ...] ]\n" -#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" -#~ " [ ORDER BY expression [ ASC | DESC | USING opérateur ] [ NULLS { FIRST | LAST } ] [, ...] ]\n" -#~ " [ LIMIT { total | ALL } ]\n" -#~ " [ OFFSET début [ ROW | ROWS ] ]\n" -#~ " [ FETCH { FIRST | NEXT } [ total ] { ROW | ROWS } ONLY ]\n" -#~ " [ FOR { UPDATE | SHARE } [ OF nom_table [, ...] ] [ NOWAIT ] [...] ]\n" -#~ "\n" -#~ "avec élément_from faisant parti de :\n" +#~ "ALTER FUNCTION nom ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] )\n" +#~ " action [, ... ] [ RESTRICT ]\n" +#~ "ALTER FUNCTION nom ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] )\n" +#~ " RENAME TO nouveau_nom\n" +#~ "ALTER FUNCTION nom ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] )\n" +#~ " OWNER TO nouveau_proprietaire\n" +#~ "ALTER FUNCTION nom ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] )\n" +#~ " SET SCHEMA nouveau_schema\n" #~ "\n" -#~ " [ ONLY ] nom_table [ * ] [ [ AS ] alias [ ( alias_colonne [, ...] ) ] ]\n" -#~ " ( select ) [ AS ] alias [ ( alias_colonne [, ...] ) ]\n" -#~ " nom_requête_with [ [ AS ] alias [ ( alias_colonne [, ...] ) ] ]\n" -#~ " nom_fonction ( [ argument [, ...] ] ) [ AS ] alias [ ( alias_colonne [, ...] | définition_colonne [, ...] ) ]\n" -#~ " nom_fonction ( [ argument [, ...] ] ) AS ( définition_colonne [, ...] )\n" -#~ " élément_from [ NATURAL ] type_jointure élément_from [ ON condition_jointure | USING ( colonne_jointure [, ...] ) ]\n" +#~ "où action peut être :\n" #~ "\n" -#~ "et requête_with est:\n" +#~ " CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" +#~ " IMMUTABLE | STABLE | VOLATILE\n" +#~ " [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER\n" +#~ " COST cout_execution\n" +#~ " ROWS lignes_resultats\n" +#~ " SET paramètre { TO | = } { valeur | DEFAULT }\n" +#~ " SET paramètre FROM CURRENT\n" +#~ " RESET paramètre\n" +#~ " RESET ALL" + +#~ msgid "" +#~ "ALTER GROUP groupname ADD USER username [, ... ]\n" +#~ "ALTER GROUP groupname DROP USER username [, ... ]\n" #~ "\n" -#~ " nom_requête_with [ ( nom_colonne [, ...] ) ] AS ( select )\n" +#~ "ALTER GROUP groupname RENAME TO newname" +#~ msgstr "" +#~ "ALTER GROUP nom_groupe ADD USER nom_utilisateur [, ... ]\n" +#~ "ALTER GROUP nom_groupe DROP USER nom_utilisateur [, ... ]\n" #~ "\n" -#~ "TABLE { [ ONLY ] nom_table [ * ] | nom_requête_with }" +#~ "ALTER GROUP nom_groupe RENAME TO nouveau_nom" -#~ msgid "ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] savepoint_name" -#~ msgstr "ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] nom_retour" +#~ msgid "" +#~ "ALTER INDEX name RENAME TO new_name\n" +#~ "ALTER INDEX name SET TABLESPACE tablespace_name\n" +#~ "ALTER INDEX name SET ( storage_parameter = value [, ... ] )\n" +#~ "ALTER INDEX name RESET ( storage_parameter [, ... ] )" +#~ msgstr "" +#~ "ALTER INDEX nom RENAME TO nouveau_nom\n" +#~ "ALTER INDEX nom SET TABLESPACE nom_tablespace\n" +#~ "ALTER INDEX nom SET ( paramètre_stockage = valeur [, ... ] )\n" +#~ "ALTER INDEX nom RESET ( paramètre_stockage [, ... ] )" -#~ msgid "ROLLBACK PREPARED transaction_id" -#~ msgstr "ROLLBACK PREPARED id_transaction" +#~ msgid "" +#~ "ALTER [ PROCEDURAL ] LANGUAGE name RENAME TO newname\n" +#~ "ALTER [ PROCEDURAL ] LANGUAGE name OWNER TO new_owner" +#~ msgstr "" +#~ "ALTER [ PROCEDURAL ] LANGUAGE nom RENAME TO nouveau_nom\n" +#~ "ALTER [ PROCEDURAL ] LANGUAGE nom OWNER TO nouveau_propriétaire" -#~ msgid "ROLLBACK [ WORK | TRANSACTION ]" -#~ msgstr "ROLLBACK [ WORK | TRANSACTION ]" +#~ msgid "ALTER OPERATOR name ( { lefttype | NONE } , { righttype | NONE } ) OWNER TO newowner" +#~ msgstr "" +#~ "ALTER OPERATOR nom ( { lefttype | NONE } , { righttype | NONE } )\n" +#~ " OWNER TO nouveau_propriétaire" #~ msgid "" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON [ TABLE ] tablename [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { SELECT | INSERT | UPDATE | REFERENCES } ( column [, ...] )\n" -#~ " [,...] | ALL [ PRIVILEGES ] ( column [, ...] ) }\n" -#~ " ON [ TABLE ] tablename [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { USAGE | SELECT | UPDATE }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SEQUENCE sequencename [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "ALTER OPERATOR CLASS name USING index_method RENAME TO newname\n" +#~ "ALTER OPERATOR CLASS name USING index_method OWNER TO newowner" +#~ msgstr "" +#~ "ALTER OPERATOR CLASS nom USING méthode_indexation\n" +#~ " RENAME TO nouveau_nom\n" +#~ "ALTER OPERATOR CLASS nom USING méthode_indexation\n" +#~ " OWNER TO nouveau_propriétaire" + +#~ msgid "" +#~ "ALTER OPERATOR FAMILY name USING index_method ADD\n" +#~ " { OPERATOR strategy_number operator_name ( op_type, op_type )\n" +#~ " | FUNCTION support_number [ ( op_type [ , op_type ] ) ] funcname ( argument_type [, ...] )\n" +#~ " } [, ... ]\n" +#~ "ALTER OPERATOR FAMILY name USING index_method DROP\n" +#~ " { OPERATOR strategy_number ( op_type [ , op_type ] )\n" +#~ " | FUNCTION support_number ( op_type [ , op_type ] )\n" +#~ " } [, ... ]\n" +#~ "ALTER OPERATOR FAMILY name USING index_method RENAME TO newname\n" +#~ "ALTER OPERATOR FAMILY name USING index_method OWNER TO newowner" +#~ msgstr "" +#~ "ALTER OPERATOR FAMILY nom USING méthode_indexage ADD\n" +#~ " { OPERATOR numéro_stratégie nom_opérateur ( type_op, type_op ) \n" +#~ " | FUNCTION numéro_support [ ( type_op [ , type_op ] ) ]\n" +#~ " nom_fonction ( type_argument [, ...] )\n" +#~ " } [, ... ]\n" +#~ "ALTER OPERATOR FAMILY nom USING méthode_indexage DROP\n" +#~ " { OPERATOR numéro_stratégie ( type_op [ , type_op ] )\n" +#~ " | FUNCTION numéro_support ( type_op [ , type_op ] )\n" +#~ " } [, ... ]\n" +#~ "ALTER OPERATOR FAMILY nom USING méthode_indexage\n" +#~ " RENAME TO nouveau_nom\n" +#~ "ALTER OPERATOR FAMILY nom USING méthode_indexage\n" +#~ " OWNER TO nouveau_propriétaire" + +#~ msgid "" +#~ "ALTER ROLE name [ [ WITH ] option [ ... ] ]\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON DATABASE dbname [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON FOREIGN DATA WRAPPER fdwname [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON FOREIGN SERVER servername [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { EXECUTE | ALL [ PRIVILEGES ] }\n" -#~ " ON FUNCTION funcname ( [ [ argmode ] [ argname ] argtype [, ...] ] ) [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON LANGUAGE langname [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SCHEMA schemaname [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "where option can be:\n" +#~ " \n" +#~ " SUPERUSER | NOSUPERUSER\n" +#~ " | CREATEDB | NOCREATEDB\n" +#~ " | CREATEROLE | NOCREATEROLE\n" +#~ " | CREATEUSER | NOCREATEUSER\n" +#~ " | INHERIT | NOINHERIT\n" +#~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT connlimit\n" +#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" +#~ " | VALID UNTIL 'timestamp' \n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { CREATE | ALL [ PRIVILEGES ] }\n" -#~ " ON TABLESPACE tablespacename [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "ALTER ROLE name RENAME TO newname\n" #~ "\n" -#~ "REVOKE [ ADMIN OPTION FOR ]\n" -#~ " role [, ...] FROM rolename [, ...]\n" -#~ " [ CASCADE | RESTRICT ]" +#~ "ALTER ROLE name SET configuration_parameter { TO | = } { value | DEFAULT }\n" +#~ "ALTER ROLE name SET configuration_parameter FROM CURRENT\n" +#~ "ALTER ROLE name RESET configuration_parameter\n" +#~ "ALTER ROLE name RESET ALL" #~ msgstr "" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON [ TABLE ] nom_table [, ...]\n" -#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { SELECT | INSERT | UPDATE | REFERENCES } ( colonne [, ...] )\n" -#~ " [,...] | ALL [ PRIVILEGES ] ( colonne [, ...] ) }\n" -#~ " ON [ TABLE ] nom_table [, ...]\n" -#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { USAGE | SELECT | UPDATE }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SEQUENCE nom_séquence [, ...]\n" -#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON DATABASE nom_base [, ...]\n" -#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON FOREIGN DATA WRAPPER nom_fdw [, ...]\n" -#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON FOREIGN SERVER nom_serveur [, ...]\n" -#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { EXECUTE | ALL [ PRIVILEGES ] }\n" -#~ " ON FUNCTION nom_fonction ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] ) [, ...]\n" -#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" -#~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON LANGUAGE nom_langage [, ...]\n" -#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "ALTER ROLE nom [ [ WITH ] option [ ... ] ]\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SCHEMA nom_schéma [, ...]\n" -#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "où option peut être :\n" +#~ " \n" +#~ " SUPERUSER | NOSUPERUSER\n" +#~ " | CREATEDB | NOCREATEDB\n" +#~ " | CREATEROLE | NOCREATEROLE\n" +#~ " | CREATEUSER | NOCREATEUSER\n" +#~ " | INHERIT | NOINHERIT\n" +#~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT limite_connexions\n" +#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'mot de passe'\n" +#~ " | VALID UNTIL 'timestamp' \n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { CREATE | ALL [ PRIVILEGES ] }\n" -#~ " ON TABLESPACE nom_tablespace [, ...]\n" -#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "ALTER ROLE nom RENAME TO nouveau_nom\n" #~ "\n" -#~ "REVOKE [ ADMIN OPTION FOR ]\n" -#~ " role [, ...] FROM nom_rôle [, ...]\n" -#~ " [ CASCADE | RESTRICT ]" - -#~ msgid "RELEASE [ SAVEPOINT ] savepoint_name" -#~ msgstr "RELEASE [ SAVEPOINT ] nom_retour" - -#~ msgid "REINDEX { INDEX | TABLE | DATABASE | SYSTEM } name [ FORCE ]" -#~ msgstr "REINDEX { INDEX | TABLE | DATABASE | SYSTEM } nom [ FORCE ]" - -#~ msgid "REASSIGN OWNED BY old_role [, ...] TO new_role" -#~ msgstr "REASSIGN OWNED BY ancien_role [, ...] TO nouveau_role" - -#~ msgid "PREPARE TRANSACTION transaction_id" -#~ msgstr "PREPARE TRANSACTION id_transaction" - -#~ msgid "PREPARE name [ ( datatype [, ...] ) ] AS statement" -#~ msgstr "PREPARE nom_plan [ ( type_données [, ...] ) ] AS instruction" - -#~ msgid "NOTIFY name" -#~ msgstr "NOTIFY nom" - -#~ msgid "MOVE [ direction { FROM | IN } ] cursorname" -#~ msgstr "MOVE [ direction { FROM | IN } ] nom_de_curseur" +#~ "ALTER ROLE nom SET paramètre { TO | = } { valeur | DEFAULT }\n" +#~ "ALTER ROLE name SET paramètre FROM CURRENT\n" +#~ "ALTER ROLE nom RESET paramètre\n" +#~ "ALTER ROLE name RESET ALL" #~ msgid "" -#~ "LOCK [ TABLE ] [ ONLY ] name [, ...] [ IN lockmode MODE ] [ NOWAIT ]\n" -#~ "\n" -#~ "where lockmode is one of:\n" -#~ "\n" -#~ " ACCESS SHARE | ROW SHARE | ROW EXCLUSIVE | SHARE UPDATE EXCLUSIVE\n" -#~ " | SHARE | SHARE ROW EXCLUSIVE | EXCLUSIVE | ACCESS EXCLUSIVE" +#~ "ALTER SCHEMA name RENAME TO newname\n" +#~ "ALTER SCHEMA name OWNER TO newowner" #~ msgstr "" -#~ "LOCK [ TABLE ] [ ONLY ] nom [, ...] [ IN mode_verrouillage MODE ] [ NOWAIT ]\n" -#~ "\n" -#~ "avec mode_verrouillage parmi :\n" -#~ "\n" -#~ " ACCESS SHARE | ROW SHARE | ROW EXCLUSIVE | SHARE UPDATE EXCLUSIVE\n" -#~ " | SHARE | SHARE ROW EXCLUSIVE | EXCLUSIVE | ACCESS EXCLUSIVE" - -#~ msgid "LOAD 'filename'" -#~ msgstr "LOAD 'nom_de_fichier'" +#~ "ALTER SCHEMA nom RENAME TO nouveau_nom\n" +#~ "ALTER SCHEMA nom OWNER TO nouveau_propriétaire" -#~ msgid "LISTEN name" -#~ msgstr "LISTEN nom" +#~ msgid "" +#~ "ALTER SEQUENCE name [ INCREMENT [ BY ] increment ]\n" +#~ " [ MINVALUE minvalue | NO MINVALUE ] [ MAXVALUE maxvalue | NO MAXVALUE ]\n" +#~ " [ START [ WITH ] start ]\n" +#~ " [ RESTART [ [ WITH ] restart ] ]\n" +#~ " [ CACHE cache ] [ [ NO ] CYCLE ]\n" +#~ " [ OWNED BY { table.column | NONE } ]\n" +#~ "ALTER SEQUENCE name OWNER TO new_owner\n" +#~ "ALTER SEQUENCE name RENAME TO new_name\n" +#~ "ALTER SEQUENCE name SET SCHEMA new_schema" +#~ msgstr "" +#~ "ALTER SEQUENCE nom [ INCREMENT [ BY ] incrément ]\n" +#~ " [ MINVALUE valeur_min | NO MINVALUE ] [ MAXVALUE valeur_max | NO MAXVALUE ]\n" +#~ " [ START [ WITH ] valeur_début ]\n" +#~ " [ RESTART [ [ WITH ] valeur_redémarrage ] ]\n" +#~ " [ CACHE cache ] [ [ NO ] CYCLE ]\n" +#~ " [ OWNED BY { table.colonne | NONE } ]\n" +#~ "ALTER SEQUENCE nom OWNER TO new_propriétaire\n" +#~ "ALTER SEQUENCE nom RENAME TO new_nom\n" +#~ "ALTER SEQUENCE nom SET SCHEMA new_schéma" #~ msgid "" -#~ "INSERT INTO table [ ( column [, ...] ) ]\n" -#~ " { DEFAULT VALUES | VALUES ( { expression | DEFAULT } [, ...] ) [, ...] | query }\n" -#~ " [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ]" +#~ "ALTER SERVER servername [ VERSION 'newversion' ]\n" +#~ " [ OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) ]\n" +#~ "ALTER SERVER servername OWNER TO new_owner" #~ msgstr "" -#~ "INSERT INTO table [ ( colonne [, ...] ) ]\n" -#~ " { DEFAULT VALUES | VALUES ( { expression | DEFAULT } [, ...] ) [, ...] | requête }\n" -#~ " [ RETURNING * | expression_sortie [ [ AS ] nom_sortie ] [, ...] ]" +#~ "ALTER SERVER nom [ VERSION 'nouvelleversion' ]\n" +#~ " [ OPTIONS ( [ ADD | SET | DROP ] option ['valeur'] [, ... ] ) ]\n" +#~ "ALTER SERVER nom OWNER TO nouveau_propriétaire" #~ msgid "" -#~ "GRANT { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON [ TABLE ] tablename [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" -#~ "\n" -#~ "GRANT { { SELECT | INSERT | UPDATE | REFERENCES } ( column [, ...] )\n" -#~ " [,...] | ALL [ PRIVILEGES ] ( column [, ...] ) }\n" -#~ " ON [ TABLE ] tablename [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" -#~ "\n" -#~ "GRANT { { USAGE | SELECT | UPDATE }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SEQUENCE sequencename [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" -#~ "\n" -#~ "GRANT { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON DATABASE dbname [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" -#~ "\n" -#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON FOREIGN DATA WRAPPER fdwname [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" -#~ "\n" -#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON FOREIGN SERVER servername [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" -#~ "\n" -#~ "GRANT { EXECUTE | ALL [ PRIVILEGES ] }\n" -#~ " ON FUNCTION funcname ( [ [ argmode ] [ argname ] argtype [, ...] ] ) [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "ALTER TABLE [ ONLY ] name [ * ]\n" +#~ " action [, ... ]\n" +#~ "ALTER TABLE [ ONLY ] name [ * ]\n" +#~ " RENAME [ COLUMN ] column TO new_column\n" +#~ "ALTER TABLE name\n" +#~ " RENAME TO new_name\n" +#~ "ALTER TABLE name\n" +#~ " SET SCHEMA new_schema\n" #~ "\n" -#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON LANGUAGE langname [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "where action is one of:\n" #~ "\n" -#~ "GRANT { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SCHEMA schemaname [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ " ADD [ COLUMN ] column type [ column_constraint [ ... ] ]\n" +#~ " DROP [ COLUMN ] column [ RESTRICT | CASCADE ]\n" +#~ " ALTER [ COLUMN ] column [ SET DATA ] TYPE type [ USING expression ]\n" +#~ " ALTER [ COLUMN ] column SET DEFAULT expression\n" +#~ " ALTER [ COLUMN ] column DROP DEFAULT\n" +#~ " ALTER [ COLUMN ] column { SET | DROP } NOT NULL\n" +#~ " ALTER [ COLUMN ] column SET STATISTICS integer\n" +#~ " ALTER [ COLUMN ] column SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN }\n" +#~ " ADD table_constraint\n" +#~ " DROP CONSTRAINT constraint_name [ RESTRICT | CASCADE ]\n" +#~ " DISABLE TRIGGER [ trigger_name | ALL | USER ]\n" +#~ " ENABLE TRIGGER [ trigger_name | ALL | USER ]\n" +#~ " ENABLE REPLICA TRIGGER trigger_name\n" +#~ " ENABLE ALWAYS TRIGGER trigger_name\n" +#~ " DISABLE RULE rewrite_rule_name\n" +#~ " ENABLE RULE rewrite_rule_name\n" +#~ " ENABLE REPLICA RULE rewrite_rule_name\n" +#~ " ENABLE ALWAYS RULE rewrite_rule_name\n" +#~ " CLUSTER ON index_name\n" +#~ " SET WITHOUT CLUSTER\n" +#~ " SET WITH OIDS\n" +#~ " SET WITHOUT OIDS\n" +#~ " SET ( storage_parameter = value [, ... ] )\n" +#~ " RESET ( storage_parameter [, ... ] )\n" +#~ " INHERIT parent_table\n" +#~ " NO INHERIT parent_table\n" +#~ " OWNER TO new_owner\n" +#~ " SET TABLESPACE new_tablespace" +#~ msgstr "" +#~ "ALTER TABLE [ ONLY ] nom [ * ]\n" +#~ " action [, ... ]\n" +#~ "ALTER TABLE [ ONLY ] nom [ * ]\n" +#~ " RENAME [ COLUMN ] colonne TO nouvelle_colonne\n" +#~ "ALTER TABLE nom\n" +#~ " RENAME TO nouveau_nom\n" +#~ "ALTER TABLE nom\n" +#~ " SET SCHEMA nouveau_schema\n" #~ "\n" -#~ "GRANT { CREATE | ALL [ PRIVILEGES ] }\n" -#~ " ON TABLESPACE tablespacename [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "où action peut être :\n" #~ "\n" -#~ "GRANT role [, ...] TO rolename [, ...] [ WITH ADMIN OPTION ]" +#~ " ADD [ COLUMN ] colonne type [ contrainte_colonne [ ... ] ]\n" +#~ " DROP [ COLUMN ] colonne [ RESTRICT | CASCADE ]\n" +#~ " ALTER [ COLUMN ] colonne [ SET DATA ] TYPE type [ USING expression ]\n" +#~ " ALTER [ COLUMN ] colonne SET DEFAULT expression\n" +#~ " ALTER [ COLUMN ] colonne DROP DEFAULT\n" +#~ " ALTER [ COLUMN ] colonne { SET | DROP } NOT NULL\n" +#~ " ALTER [ COLUMN ] colonne SET STATISTICS entier\n" +#~ " ALTER [ COLUMN ] colonne SET STORAGE\n" +#~ " { PLAIN | EXTERNAL | EXTENDED | MAIN }\n" +#~ " ADD contrainte_table\n" +#~ " DROP CONSTRAINT nom_contrainte [ RESTRICT | CASCADE ]\n" +#~ " DISABLE TRIGGER [ nom_trigger | ALL | USER ]\n" +#~ " ENABLE TRIGGER [ nom_trigger | ALL | USER ]\n" +#~ " ENABLE REPLICA TRIGGER nom_trigger\n" +#~ " ENABLE ALWAYS TRIGGER nom_trigger\n" +#~ " DISABLE RULE nom_règle_réécriture\n" +#~ " ENABLE RULE nom_règle_réécriture\n" +#~ " ENABLE REPLICA RULE nom_règle_réécriture\n" +#~ " ENABLE ALWAYS RULE nom_règle_réécriture\n" +#~ " CLUSTER ON nom_index\n" +#~ " SET WITHOUT CLUSTER\n" +#~ " SET WITH OIDS\n" +#~ " SET WITHOUT OIDS\n" +#~ " SET ( paramètre_stockage = valeur [, ... ] )\n" +#~ " RESET ( paramètre_stockage [, ... ] )\n" +#~ " INHERIT table_parent\n" +#~ " NO INHERIT table_parent\n" +#~ " OWNER TO nouveau_propriétaire\n" +#~ " SET TABLESPACE nouveau_tablespace" + +#~ msgid "" +#~ "ALTER TABLESPACE name RENAME TO newname\n" +#~ "ALTER TABLESPACE name OWNER TO newowner" #~ msgstr "" -#~ "GRANT { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON [ TABLE ] nom_table [, ...]\n" -#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "ALTER TABLESPACE nom RENAME TO nouveau_nom\n" +#~ "ALTER TABLESPACE nom OWNER TO nouveau_propriétaire" + +#~ msgid "" +#~ "ALTER TEXT SEARCH CONFIGURATION name\n" +#~ " ADD MAPPING FOR token_type [, ... ] WITH dictionary_name [, ... ]\n" +#~ "ALTER TEXT SEARCH CONFIGURATION name\n" +#~ " ALTER MAPPING FOR token_type [, ... ] WITH dictionary_name [, ... ]\n" +#~ "ALTER TEXT SEARCH CONFIGURATION name\n" +#~ " ALTER MAPPING REPLACE old_dictionary WITH new_dictionary\n" +#~ "ALTER TEXT SEARCH CONFIGURATION name\n" +#~ " ALTER MAPPING FOR token_type [, ... ] REPLACE old_dictionary WITH new_dictionary\n" +#~ "ALTER TEXT SEARCH CONFIGURATION name\n" +#~ " DROP MAPPING [ IF EXISTS ] FOR token_type [, ... ]\n" +#~ "ALTER TEXT SEARCH CONFIGURATION name RENAME TO newname\n" +#~ "ALTER TEXT SEARCH CONFIGURATION name OWNER TO newowner" +#~ msgstr "" +#~ "ALTER TEXT SEARCH CONFIGURATION nom\n" +#~ " ADD MAPPING FOR type_jeton [, ... ] WITH nom_dictionnaire [, ... ]\n" +#~ "ALTER TEXT SEARCH CONFIGURATION nom\n" +#~ " ALTER MAPPING FOR type_jeton [, ... ] WITH nom_dictionnaire [, ... ]\n" +#~ "ALTER TEXT SEARCH CONFIGURATION nom\n" +#~ " ALTER MAPPING REPLACE ancien_dictionnaire WITH nouveau_dictionnaire\n" +#~ "ALTER TEXT SEARCH CONFIGURATION nom\n" +#~ " ALTER MAPPING FOR type_jeton [, ... ]\n" +#~ " REPLACE ancien_dictionnaire WITH nouveau_dictionnaire\n" +#~ "ALTER TEXT SEARCH CONFIGURATION nom\n" +#~ " DROP MAPPING [ IF EXISTS ] FOR type_jeton [, ... ]\n" +#~ "ALTER TEXT SEARCH CONFIGURATION nom RENAME TO nouveau_nom\n" +#~ "ALTER TEXT SEARCH CONFIGURATION nom OWNER TO nouveau_propriétaire" + +#~ msgid "" +#~ "ALTER TEXT SEARCH DICTIONARY name (\n" +#~ " option [ = value ] [, ... ]\n" +#~ ")\n" +#~ "ALTER TEXT SEARCH DICTIONARY name RENAME TO newname\n" +#~ "ALTER TEXT SEARCH DICTIONARY name OWNER TO newowner" +#~ msgstr "" +#~ "ALTER TEXT SEARCH DICTIONARY nom (\n" +#~ " option [ = valeur ] [, ... ]\n" +#~ ")\n" +#~ "ALTER TEXT SEARCH DICTIONARY nom RENAME TO nouveau_nom\n" +#~ "ALTER TEXT SEARCH DICTIONARY nom OWNER TO nouveau_propriétaire" + +#~ msgid "ALTER TEXT SEARCH PARSER name RENAME TO newname" +#~ msgstr "ALTER TEXT SEARCH PARSER nom RENAME TO nouveau_nom" + +#~ msgid "ALTER TEXT SEARCH TEMPLATE name RENAME TO newname" +#~ msgstr "ALTER TEXT SEARCH TEMPLATE nom RENAME TO nouveau_nom" + +#~ msgid "ALTER TRIGGER name ON table RENAME TO newname" +#~ msgstr "ALTER TRIGGER nom ON table RENAME TO nouveau_nom" + +#~ msgid "" +#~ "ALTER TYPE name RENAME TO new_name\n" +#~ "ALTER TYPE name OWNER TO new_owner \n" +#~ "ALTER TYPE name SET SCHEMA new_schema" +#~ msgstr "" +#~ "ALTER TYPE nom RENAME TO nouveau_nom\n" +#~ "ALTER TYPE nom OWNER TO nouveau_propriétaire\n" +#~ "ALTER TYPE nom SET SCHEMA nouveau_schéma" + +#~ msgid "" +#~ "ALTER USER name [ [ WITH ] option [ ... ] ]\n" #~ "\n" -#~ "GRANT { { SELECT | INSERT | UPDATE | REFERENCES } ( colonne [, ...] )\n" -#~ " [,...] | ALL [ PRIVILEGES ] ( colonne [, ...] ) }\n" -#~ " ON [ TABLE ] nom_table [, ...]\n" -#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "where option can be:\n" +#~ " \n" +#~ " SUPERUSER | NOSUPERUSER\n" +#~ " | CREATEDB | NOCREATEDB\n" +#~ " | CREATEROLE | NOCREATEROLE\n" +#~ " | CREATEUSER | NOCREATEUSER\n" +#~ " | INHERIT | NOINHERIT\n" +#~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT connlimit\n" +#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" +#~ " | VALID UNTIL 'timestamp' \n" #~ "\n" -#~ "GRANT { { USAGE | SELECT | UPDATE }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SEQUENCE nom_séquence [, ...]\n" -#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "ALTER USER name RENAME TO newname\n" #~ "\n" -#~ "GRANT { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON DATABASE nom_base [, ...]\n" -#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "ALTER USER name SET configuration_parameter { TO | = } { value | DEFAULT }\n" +#~ "ALTER USER name SET configuration_parameter FROM CURRENT\n" +#~ "ALTER USER name RESET configuration_parameter\n" +#~ "ALTER USER name RESET ALL" +#~ msgstr "" +#~ "ALTER USER nom [ [ WITH ] option [ ... ] ]\n" #~ "\n" -#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON FOREIGN DATA WRAPPER nomfdw [, ...]\n" -#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "où option peut être :\n" +#~ " \n" +#~ " SUPERUSER | NOSUPERUSER\n" +#~ " | CREATEDB | NOCREATEDB\n" +#~ " | CREATEROLE | NOCREATEROLE\n" +#~ " | CREATEUSER | NOCREATEUSER\n" +#~ " | INHERIT | NOINHERIT\n" +#~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT limite_connexion\n" +#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'motdepasse'\n" +#~ " | VALID UNTIL 'timestamp' \n" #~ "\n" -#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON FOREIGN SERVER nom_serveur [, ...]\n" -#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "ALTER USER nom RENAME TO nouveau_nom\n" #~ "\n" -#~ "GRANT { EXECUTE | ALL [ PRIVILEGES ] }\n" -#~ " ON FUNCTION nom_fonction ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] ) [, ...]\n" -#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "ALTER USER nom SET paramètre { TO | = } { valeur | DEFAULT }\n" +#~ "ALTER USER name SET paramètre FROM CURRENT\n" +#~ "ALTER USER nom RESET paramètre\n" +#~ "ALTER USER name RESET ALL" + +#~ msgid "" +#~ "ALTER USER MAPPING FOR { username | USER | CURRENT_USER | PUBLIC }\n" +#~ " SERVER servername\n" +#~ " OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] )" +#~ msgstr "" +#~ "ALTER USER MAPPING FOR { nom_utilisateur | USER | CURRENT_USER | PUBLIC }\n" +#~ " SERVER nom_serveur\n" +#~ " OPTIONS ( [ ADD | SET | DROP ] option ['valeur'] [, ... ] )" + +#~ msgid "" +#~ "ALTER VIEW name ALTER [ COLUMN ] column SET DEFAULT expression\n" +#~ "ALTER VIEW name ALTER [ COLUMN ] column DROP DEFAULT\n" +#~ "ALTER VIEW name OWNER TO new_owner\n" +#~ "ALTER VIEW name RENAME TO new_name\n" +#~ "ALTER VIEW name SET SCHEMA new_schema" +#~ msgstr "" +#~ "ALTER VIEW nom ALTER [ COLUMN ] colonne SET DEFAULT expression\n" +#~ "ALTER VIEW nom ALTER [ COLUMN ] colonne DROP DEFAULT\n" +#~ "ALTER VIEW nom OWNER TO nouveau_propriétaire\n" +#~ "ALTER VIEW nom RENAME TO nouveau_nom\n" +#~ "ALTER VIEW nom SET SCHEMA nouveau_schéma" + +#~ msgid "ANALYZE [ VERBOSE ] [ table [ ( column [, ...] ) ] ]" +#~ msgstr "ANALYZE [ VERBOSE ] [ table [ ( colonne [, ...] ) ] ]" + +#~ msgid "" +#~ "BEGIN [ WORK | TRANSACTION ] [ transaction_mode [, ...] ]\n" #~ "\n" -#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON LANGUAGE nom_langage [, ...]\n" -#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "where transaction_mode is one of:\n" #~ "\n" -#~ "GRANT { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SCHEMA nom_schéma [, ...]\n" -#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" +#~ " READ WRITE | READ ONLY" +#~ msgstr "" +#~ "BEGIN [ WORK | TRANSACTION ] [ transaction_mode [, ...] ]\n" #~ "\n" -#~ "GRANT { CREATE | ALL [ PRIVILEGES ] }\n" -#~ " ON TABLESPACE nom_tablespace [, ...]\n" -#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "où transaction_mode peut être :\n" #~ "\n" -#~ "GRANT rôle [, ...] TO nom_rôle [, ...] [ WITH ADMIN OPTION ]" +#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ |\n" +#~ " READ COMMITTED | READ UNCOMMITTED }\n" +#~ " READ WRITE | READ ONLY" + +#~ msgid "CHECKPOINT" +#~ msgstr "CHECKPOINT" + +#~ msgid "CLOSE { name | ALL }" +#~ msgstr "CLOSE { nom | ALL }" #~ msgid "" -#~ "FETCH [ direction { FROM | IN } ] cursorname\n" -#~ "\n" -#~ "where direction can be empty or one of:\n" -#~ "\n" -#~ " NEXT\n" -#~ " PRIOR\n" -#~ " FIRST\n" -#~ " LAST\n" -#~ " ABSOLUTE count\n" -#~ " RELATIVE count\n" -#~ " count\n" -#~ " ALL\n" -#~ " FORWARD\n" -#~ " FORWARD count\n" -#~ " FORWARD ALL\n" -#~ " BACKWARD\n" -#~ " BACKWARD count\n" -#~ " BACKWARD ALL" +#~ "CLUSTER [VERBOSE] tablename [ USING indexname ]\n" +#~ "CLUSTER [VERBOSE]" #~ msgstr "" -#~ "FETCH [ direction { FROM | IN } ] nom_curseur\n" -#~ "\n" -#~ "sans préciser de direction ou en choissant une des directions suivantes :\n" -#~ "\n" -#~ " NEXT\n" -#~ " PRIOR\n" -#~ " FIRST\n" -#~ " LAST\n" -#~ " ABSOLUTE nombre\n" -#~ " RELATIVE nombre\n" -#~ " count\n" -#~ " ALL\n" -#~ " FORWARD\n" -#~ " FORWARD nombre\n" -#~ " FORWARD ALL\n" -#~ " BACKWARD\n" -#~ " BACKWARD nombre\n" -#~ " BACKWARD ALL" - -#~ msgid "EXPLAIN [ ANALYZE ] [ VERBOSE ] statement" -#~ msgstr "EXPLAIN [ ANALYZE ] [ VERBOSE ] instruction" - -#~ msgid "EXECUTE name [ ( parameter [, ...] ) ]" -#~ msgstr "EXECUTE nom_plan [ ( paramètre [, ...] ) ]" - -#~ msgid "END [ WORK | TRANSACTION ]" -#~ msgstr "END [ WORK | TRANSACTION ]" - -#~ msgid "DROP VIEW [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP VIEW [IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" - -#~ msgid "DROP USER MAPPING [ IF EXISTS ] FOR { username | USER | CURRENT_USER | PUBLIC } SERVER servername" -#~ msgstr "DROP USER MAPPING [ IF EXISTS ] FOR { nomutilisateur | USER | CURRENT_USER | PUBLIC } SERVER nomserveur" - -#~ msgid "DROP USER [ IF EXISTS ] name [, ...]" -#~ msgstr "DROP USER [IF EXISTS ] nom [, ...]" - -#~ msgid "DROP TYPE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TYPE [IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" - -#~ msgid "DROP TRIGGER [ IF EXISTS ] name ON table [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TRIGGER [IF EXISTS ] nom ON table [ CASCADE | RESTRICT ]" - -#~ msgid "DROP TEXT SEARCH TEMPLATE [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TEXT SEARCH TEMPLATE [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" - -#~ msgid "DROP TEXT SEARCH PARSER [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TEXT SEARCH PARSER [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" - -#~ msgid "DROP TEXT SEARCH DICTIONARY [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TEXT SEARCH DICTIONARY [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" - -#~ msgid "DROP TEXT SEARCH CONFIGURATION [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TEXT SEARCH CONFIGURATION [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" - -#~ msgid "DROP TABLESPACE [ IF EXISTS ] tablespacename" -#~ msgstr "DROP TABLESPACE [IF EXISTS ] nom_tablespace" - -#~ msgid "DROP TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TABLE [IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" - -#~ msgid "DROP SERVER [ IF EXISTS ] servername [ CASCADE | RESTRICT ]" -#~ msgstr "DROP SERVER [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" - -#~ msgid "DROP SEQUENCE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP SEQUENCE [IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" - -#~ msgid "DROP SCHEMA [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP SCHEMA [IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" +#~ "CLUSTER [VERBOSE] nom_table [ USING nom_index ]\n" +#~ "CLUSTER [VERBOSE]" -#~ msgid "DROP RULE [ IF EXISTS ] name ON relation [ CASCADE | RESTRICT ]" -#~ msgstr "DROP RULE [IF EXISTS ] nom ON relation [ CASCADE | RESTRICT ]" +#~ msgid "" +#~ "COMMENT ON\n" +#~ "{\n" +#~ " TABLE object_name |\n" +#~ " COLUMN table_name.column_name |\n" +#~ " AGGREGATE agg_name (agg_type [, ...] ) |\n" +#~ " CAST (sourcetype AS targettype) |\n" +#~ " CONSTRAINT constraint_name ON table_name |\n" +#~ " CONVERSION object_name |\n" +#~ " DATABASE object_name |\n" +#~ " DOMAIN object_name |\n" +#~ " FUNCTION func_name ( [ [ argmode ] [ argname ] argtype [, ...] ] ) |\n" +#~ " INDEX object_name |\n" +#~ " LARGE OBJECT large_object_oid |\n" +#~ " OPERATOR op (leftoperand_type, rightoperand_type) |\n" +#~ " OPERATOR CLASS object_name USING index_method |\n" +#~ " OPERATOR FAMILY object_name USING index_method |\n" +#~ " [ PROCEDURAL ] LANGUAGE object_name |\n" +#~ " ROLE object_name |\n" +#~ " RULE rule_name ON table_name |\n" +#~ " SCHEMA object_name |\n" +#~ " SEQUENCE object_name |\n" +#~ " TABLESPACE object_name |\n" +#~ " TEXT SEARCH CONFIGURATION object_name |\n" +#~ " TEXT SEARCH DICTIONARY object_name |\n" +#~ " TEXT SEARCH PARSER object_name |\n" +#~ " TEXT SEARCH TEMPLATE object_name |\n" +#~ " TRIGGER trigger_name ON table_name |\n" +#~ " TYPE object_name |\n" +#~ " VIEW object_name\n" +#~ "} IS 'text'" +#~ msgstr "" +#~ "COMMENT ON\n" +#~ "{\n" +#~ " TABLE nom_objet |\n" +#~ " COLUMN nom_table.nom_colonne |\n" +#~ " AGGREGATE nom_agg (type_agg [, ...] ) |\n" +#~ " CAST (type_source AS type_cible) |\n" +#~ " CONSTRAINT nom_contrainte ON nom_table |\n" +#~ " CONVERSION nom_objet |\n" +#~ " DATABASE nom_objet |\n" +#~ " DOMAIN nom_objet |\n" +#~ " FUNCTION nom_fonction ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] ) |\n" +#~ " INDEX nom_objet |\n" +#~ " LARGE OBJECT oid_LO |\n" +#~ " OPERATOR op (type_operande_gauche, type_operande_droit) |\n" +#~ " OPERATOR CLASS nom_objet USING methode_indexage |\n" +#~ " OPERATOR FAMILY nom_objet USING methode_indexage |\n" +#~ " [ PROCEDURAL ] LANGUAGE nom_objet |\n" +#~ " ROLE nom_objet |\n" +#~ " RULE nom_regle ON nom_table |\n" +#~ " SCHEMA nom_objet |\n" +#~ " SEQUENCE nom_objet |\n" +#~ " TABLESPACE nom_objet |\n" +#~ " TEXT SEARCH CONFIGURATION nom_objet |\n" +#~ " TEXT SEARCH DICTIONARY nom_objet |\n" +#~ " TEXT SEARCH PARSER nom_objet |\n" +#~ " TEXT SEARCH TEMPLATE nom_objet |\n" +#~ " TRIGGER nom_trigger ON nom_objet |\n" +#~ " TYPE nom_objet |\n" +#~ " VIEW nom_objet\n" +#~ "} IS 'text'" -#~ msgid "DROP ROLE [ IF EXISTS ] name [, ...]" -#~ msgstr "DROP ROLE [IF EXISTS ] nom [, ...]" +#~ msgid "COMMIT [ WORK | TRANSACTION ]" +#~ msgstr "COMMIT [ WORK | TRANSACTION ]" -#~ msgid "DROP OWNED BY name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP OWNED BY nom [, ...] [ CASCADE | RESTRICT ]" +#~ msgid "COMMIT PREPARED transaction_id" +#~ msgstr "COMMIT PREPARED id_transaction" -#~ msgid "DROP OPERATOR FAMILY [ IF EXISTS ] name USING index_method [ CASCADE | RESTRICT ]" +#~ msgid "" +#~ "COPY tablename [ ( column [, ...] ) ]\n" +#~ " FROM { 'filename' | STDIN }\n" +#~ " [ [ WITH ] \n" +#~ " [ BINARY ]\n" +#~ " [ OIDS ]\n" +#~ " [ DELIMITER [ AS ] 'delimiter' ]\n" +#~ " [ NULL [ AS ] 'null string' ]\n" +#~ " [ CSV [ HEADER ]\n" +#~ " [ QUOTE [ AS ] 'quote' ] \n" +#~ " [ ESCAPE [ AS ] 'escape' ]\n" +#~ " [ FORCE NOT NULL column [, ...] ]\n" +#~ "\n" +#~ "COPY { tablename [ ( column [, ...] ) ] | ( query ) }\n" +#~ " TO { 'filename' | STDOUT }\n" +#~ " [ [ WITH ] \n" +#~ " [ BINARY ]\n" +#~ " [ OIDS ]\n" +#~ " [ DELIMITER [ AS ] 'delimiter' ]\n" +#~ " [ NULL [ AS ] 'null string' ]\n" +#~ " [ CSV [ HEADER ]\n" +#~ " [ QUOTE [ AS ] 'quote' ] \n" +#~ " [ ESCAPE [ AS ] 'escape' ]\n" +#~ " [ FORCE QUOTE column [, ...] ]" #~ msgstr "" -#~ "DROP OPERATOR FAMILY [IF EXISTS ] nom\n" -#~ " USING méthode_indexage [ CASCADE | RESTRICT ]" +#~ "COPY nom_table [ ( colonne [, ...] ) ]\n" +#~ " FROM { 'nom_fichier' | STDIN }\n" +#~ " [ [ WITH ] \n" +#~ " [ BINARY ]\n" +#~ " [ OIDS ]\n" +#~ " [ DELIMITER [ AS ] 'délimiteur' ]\n" +#~ " [ NULL [ AS ] 'chaîne null' ]\n" +#~ " [ CSV [ HEADER ]\n" +#~ " [ QUOTE [ AS ] 'guillemet' ] \n" +#~ " [ ESCAPE [ AS ] 'échappement' ]\n" +#~ " [ FORCE NOT NULL colonne [, ...] ]\n" +#~ "\n" +#~ "COPY { nom_table [ ( colonne [, ...] ) ] | ( requête ) }\n" +#~ " TO { 'nom_fichier' | STDOUT }\n" +#~ " [ [ WITH ] \n" +#~ " [ BINARY ]\n" +#~ " [ OIDS ]\n" +#~ " [ DELIMITER [ AS ] 'délimiteur' ]\n" +#~ " [ NULL [ AS ] 'chaîne null' ]\n" +#~ " [ CSV [ HEADER ]\n" +#~ " [ QUOTE [ AS ] 'guillemet' ] \n" +#~ " [ ESCAPE [ AS ] 'échappement' ]\n" +#~ " [ FORCE QUOTE colonne [, ...] ]" -#~ msgid "DROP OPERATOR CLASS [ IF EXISTS ] name USING index_method [ CASCADE | RESTRICT ]" +#~ msgid "" +#~ "CREATE AGGREGATE name ( input_data_type [ , ... ] ) (\n" +#~ " SFUNC = sfunc,\n" +#~ " STYPE = state_data_type\n" +#~ " [ , FINALFUNC = ffunc ]\n" +#~ " [ , INITCOND = initial_condition ]\n" +#~ " [ , SORTOP = sort_operator ]\n" +#~ ")\n" +#~ "\n" +#~ "or the old syntax\n" +#~ "\n" +#~ "CREATE AGGREGATE name (\n" +#~ " BASETYPE = base_type,\n" +#~ " SFUNC = sfunc,\n" +#~ " STYPE = state_data_type\n" +#~ " [ , FINALFUNC = ffunc ]\n" +#~ " [ , INITCOND = initial_condition ]\n" +#~ " [ , SORTOP = sort_operator ]\n" +#~ ")" #~ msgstr "" -#~ "DROP OPERATOR CLASS [IF EXISTS ] nom\n" -#~ " USING méthode_indexage [ CASCADE | RESTRICT ]" +#~ "CREATE AGGREGATE nom ( type_données_en_entrée [ , ... ] ) (\n" +#~ " SFUNC = sfonction,\n" +#~ " STYPE = type_données_état\n" +#~ " [ , FINALFUNC = fonction_f ]\n" +#~ " [ , INITCOND = condition_initiale ]\n" +#~ " [ , SORTOP = opérateur_tri ]\n" +#~ ")\n" +#~ "\n" +#~ "ou l'ancienne syntaxe\n" +#~ "\n" +#~ "CREATE AGGREGATE nom (\n" +#~ " BASETYPE = type_base,\n" +#~ " SFUNC = fonction_s,\n" +#~ " STYPE = type_données_état\n" +#~ " [ , FINALFUNC = fonction_f ]\n" +#~ " [ , INITCOND = condition_initiale ]\n" +#~ " [ , SORTOP = opérateur_tri ]\n" +#~ ")" -#~ msgid "DROP OPERATOR [ IF EXISTS ] name ( { lefttype | NONE } , { righttype | NONE } ) [ CASCADE | RESTRICT ]" +#~ msgid "" +#~ "CREATE CAST (sourcetype AS targettype)\n" +#~ " WITH FUNCTION funcname (argtypes)\n" +#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "\n" +#~ "CREATE CAST (sourcetype AS targettype)\n" +#~ " WITHOUT FUNCTION\n" +#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "\n" +#~ "CREATE CAST (sourcetype AS targettype)\n" +#~ " WITH INOUT\n" +#~ " [ AS ASSIGNMENT | AS IMPLICIT ]" #~ msgstr "" -#~ "DROP OPERATOR [IF EXISTS ] nom\n" -#~ " ( { type_gauche | NONE } , { type_droit | NONE } )\n" -#~ " [ CASCADE | RESTRICT ]" - -#~ msgid "DROP [ PROCEDURAL ] LANGUAGE [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP [ PROCEDURAL ] LANGUAGE [IF EXISTS ] nom [ CASCADE | RESTRICT ]" - -#~ msgid "DROP INDEX [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP INDEX [IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" - -#~ msgid "DROP GROUP [ IF EXISTS ] name [, ...]" -#~ msgstr "DROP GROUP [IF EXISTS ] nom [, ...]" +#~ "CREATE CAST (type_source AS type_cible)\n" +#~ " WITH FUNCTION nom_fonction (type_argument)\n" +#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "\n" +#~ "CREATE CAST (type_source AS type_cible)\n" +#~ " WITHOUT FUNCTION\n" +#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "\n" +#~ "CREATE CAST (type_source AS type_cible)\n" +#~ " WITH INOUT\n" +#~ " [ AS ASSIGNMENT | AS IMPLICIT ]" #~ msgid "" -#~ "DROP FUNCTION [ IF EXISTS ] name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" -#~ " [ CASCADE | RESTRICT ]" +#~ "CREATE CONSTRAINT TRIGGER name\n" +#~ " AFTER event [ OR ... ]\n" +#~ " ON table_name\n" +#~ " [ FROM referenced_table_name ]\n" +#~ " { NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED } }\n" +#~ " FOR EACH ROW\n" +#~ " EXECUTE PROCEDURE funcname ( arguments )" #~ msgstr "" -#~ "DROP FUNCTION [IF EXISTS ] nom\n" -#~ " ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] )\n" -#~ " [ CASCADE | RESTRICT ]" - -#~ msgid "DROP FOREIGN DATA WRAPPER [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP FOREIGN DATA WRAPPER [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" - -#~ msgid "DROP DOMAIN [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP DOMAIN [ IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" - -#~ msgid "DROP DATABASE [ IF EXISTS ] name" -#~ msgstr "DROP DATABASE [ IF EXISTS ] nom" - -#~ msgid "DROP CONVERSION [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP CONVERSION [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" - -#~ msgid "DROP CAST [ IF EXISTS ] (sourcetype AS targettype) [ CASCADE | RESTRICT ]" -#~ msgstr "DROP CAST [ IF EXISTS ] (type_source AS type_cible) [ CASCADE | RESTRICT ]" - -#~ msgid "DROP AGGREGATE [ IF EXISTS ] name ( type [ , ... ] ) [ CASCADE | RESTRICT ]" -#~ msgstr "DROP AGGREGATE [ IF EXISTS ] nom ( type [ , ... ] ) [ CASCADE | RESTRICT ]" +#~ "CREATE CONSTRAINT TRIGGER nom\n" +#~ " AFTER événement [ OR ... ]\n" +#~ " ON table\n" +#~ " [ FROM table_référencée ]\n" +#~ " { NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED } }\n" +#~ " FOR EACH ROW\n" +#~ " EXECUTE PROCEDURE nom_fonction ( arguments )" -#~ msgid "DISCARD { ALL | PLANS | TEMPORARY | TEMP }" -#~ msgstr "DISCARD { ALL | PLANS | TEMPORARY | TEMP }" +#~ msgid "" +#~ "CREATE [ DEFAULT ] CONVERSION name\n" +#~ " FOR source_encoding TO dest_encoding FROM funcname" +#~ msgstr "" +#~ "CREATE [DEFAULT] CONVERSION nom\n" +#~ " FOR codage_source TO codage_cible FROM nom_fonction" #~ msgid "" -#~ "DELETE FROM [ ONLY ] table [ [ AS ] alias ]\n" -#~ " [ USING usinglist ]\n" -#~ " [ WHERE condition | WHERE CURRENT OF cursor_name ]\n" -#~ " [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ]" +#~ "CREATE DATABASE name\n" +#~ " [ [ WITH ] [ OWNER [=] dbowner ]\n" +#~ " [ TEMPLATE [=] template ]\n" +#~ " [ ENCODING [=] encoding ]\n" +#~ " [ LC_COLLATE [=] lc_collate ]\n" +#~ " [ LC_CTYPE [=] lc_ctype ]\n" +#~ " [ TABLESPACE [=] tablespace ]\n" +#~ " [ CONNECTION LIMIT [=] connlimit ] ]" #~ msgstr "" -#~ "DELETE FROM [ ONLY ] table [ [ AS ] alias ]\n" -#~ " [ USING liste_using ]\n" -#~ " [ WHERE condition | WHERE CURRENT OF nom_curseur ]\n" -#~ " [ RETURNING * | expression_sortie [ [ AS ] nom_sortie ] [, ...] ]" +#~ "CREATE DATABASE nom\n" +#~ " [ [ WITH ] [ OWNER [=] nom_propriétaire ]\n" +#~ " [ TEMPLATE [=] modèle ]\n" +#~ " [ ENCODING [=] encodage ]\n" +#~ " [ LC_COLLATE [=] tri_caract ]\n" +#~ " [ LC_CTYPE [=] type_caract ]\n" +#~ " [ TABLESPACE [=] tablespace ]\n" +#~ " [ CONNECTION LIMIT [=] limite_connexion ] ]" #~ msgid "" -#~ "DECLARE name [ BINARY ] [ INSENSITIVE ] [ [ NO ] SCROLL ]\n" -#~ " CURSOR [ { WITH | WITHOUT } HOLD ] FOR query" +#~ "CREATE DOMAIN name [ AS ] data_type\n" +#~ " [ DEFAULT expression ]\n" +#~ " [ constraint [ ... ] ]\n" +#~ "\n" +#~ "where constraint is:\n" +#~ "\n" +#~ "[ CONSTRAINT constraint_name ]\n" +#~ "{ NOT NULL | NULL | CHECK (expression) }" #~ msgstr "" -#~ "DECLARE nom [ BINARY ] [ INSENSITIVE ] [ [ NO ] SCROLL ]\n" -#~ " CURSOR [ { WITH | WITHOUT } HOLD ] FOR requête" - -#~ msgid "DEALLOCATE [ PREPARE ] { name | ALL }" -#~ msgstr "DEALLOCATE [ PREPARE ] { nom_plan | ALL }" +#~ "CREATE DOMAIN nom [AS] type_données\n" +#~ " [ DEFAULT expression ]\n" +#~ " [ contrainte [ ... ] ]\n" +#~ "\n" +#~ "avec comme contrainte :\n" +#~ "\n" +#~ "[ CONSTRAINT nom_contrainte ]\n" +#~ "{ NOT NULL | NULL | CHECK (expression) }" #~ msgid "" -#~ "CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] VIEW name [ ( column_name [, ...] ) ]\n" -#~ " AS query" +#~ "CREATE FOREIGN DATA WRAPPER name\n" +#~ " [ VALIDATOR valfunction | NO VALIDATOR ]\n" +#~ " [ OPTIONS ( option 'value' [, ... ] ) ]" #~ msgstr "" -#~ "CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] VIEW nom\n" -#~ " [ ( nom_colonne [, ...] ) ]\n" -#~ " AS requête" +#~ "CREATE FOREIGN DATA WRAPPER nom\n" +#~ " [ VALIDATOR fonction_validation | NO VALIDATOR ]\n" +#~ " [ OPTIONS ( option 'valeur' [, ... ] ) ]" #~ msgid "" -#~ "CREATE USER MAPPING FOR { username | USER | CURRENT_USER | PUBLIC }\n" -#~ " SERVER servername\n" -#~ " [ OPTIONS ( option 'value' [ , ... ] ) ]" +#~ "CREATE [ OR REPLACE ] FUNCTION\n" +#~ " name ( [ [ argmode ] [ argname ] argtype [ { DEFAULT | = } defexpr ] [, ...] ] )\n" +#~ " [ RETURNS rettype\n" +#~ " | RETURNS TABLE ( colname coltype [, ...] ) ]\n" +#~ " { LANGUAGE langname\n" +#~ " | WINDOW\n" +#~ " | IMMUTABLE | STABLE | VOLATILE\n" +#~ " | CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" +#~ " | [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER\n" +#~ " | COST execution_cost\n" +#~ " | ROWS result_rows\n" +#~ " | SET configuration_parameter { TO value | = value | FROM CURRENT }\n" +#~ " | AS 'definition'\n" +#~ " | AS 'obj_file', 'link_symbol'\n" +#~ " } ...\n" +#~ " [ WITH ( attribute [, ...] ) ]" #~ msgstr "" -#~ "CREATE USER MAPPING FOR { nomutilisateur | USER | CURRENT_USER | PUBLIC }\n" -#~ " SERVER nomserveur\n" -#~ " [ OPTIONS ( option 'valeur' [ , ... ] ) ]" +#~ "CREATE [ OR REPLACE ] FUNCTION\n" +#~ " nom ( [ [ mode_arg ] [ nom_arg ] type_arg [ { DEFAULT | = } expr_par_défaut ] [, ...] ] )\n" +#~ " [ RETURNS type_ret\n" +#~ " | RETURNS TABLE ( nom_colonne type_colonne [, ...] ) ]\n" +#~ " { LANGUAGE nom_lang\n" +#~ " | WINDOW\n" +#~ " | IMMUTABLE | STABLE | VOLATILE\n" +#~ " | CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" +#~ " | [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER\n" +#~ " | COST coût_exécution\n" +#~ " | ROWS lignes_résultats\n" +#~ " | SET paramètre_configuration { TO valeur | = valeur | FROM CURRENT }\n" +#~ " | AS 'définition'\n" +#~ " | AS 'fichier_obj', 'symbôle_lien'\n" +#~ " } ...\n" +#~ " [ WITH ( attribut [, ...] ) ]" #~ msgid "" -#~ "CREATE USER name [ [ WITH ] option [ ... ] ]\n" +#~ "CREATE GROUP name [ [ WITH ] option [ ... ] ]\n" #~ "\n" #~ "where option can be:\n" #~ " \n" @@ -6598,7 +6796,6 @@ msgstr "" #~ " | CREATEUSER | NOCREATEUSER\n" #~ " | INHERIT | NOINHERIT\n" #~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT connlimit\n" #~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" #~ " | VALID UNTIL 'timestamp' \n" #~ " | IN ROLE rolename [, ...]\n" @@ -6608,7 +6805,7 @@ msgstr "" #~ " | USER rolename [, ...]\n" #~ " | SYSID uid" #~ msgstr "" -#~ "CREATE USER nom [ [ WITH ] option [ ... ] ]\n" +#~ "CREATE GROUP nom [ [ WITH ] option [ ... ] ]\n" #~ "\n" #~ "où option peut être :\n" #~ " \n" @@ -6618,7 +6815,6 @@ msgstr "" #~ " | CREATEUSER | NOCREATEUSER\n" #~ " | INHERIT | NOINHERIT\n" #~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT limite_connexion\n" #~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'motdepasse'\n" #~ " | VALID UNTIL 'timestamp' \n" #~ " | IN ROLE nom_rôle [, ...]\n" @@ -6629,141 +6825,145 @@ msgstr "" #~ " | SYSID uid" #~ msgid "" -#~ "CREATE TYPE name AS\n" -#~ " ( attribute_name data_type [, ... ] )\n" -#~ "\n" -#~ "CREATE TYPE name AS ENUM\n" -#~ " ( 'label' [, ... ] )\n" -#~ "\n" -#~ "CREATE TYPE name (\n" -#~ " INPUT = input_function,\n" -#~ " OUTPUT = output_function\n" -#~ " [ , RECEIVE = receive_function ]\n" -#~ " [ , SEND = send_function ]\n" -#~ " [ , TYPMOD_IN = type_modifier_input_function ]\n" -#~ " [ , TYPMOD_OUT = type_modifier_output_function ]\n" -#~ " [ , ANALYZE = analyze_function ]\n" -#~ " [ , INTERNALLENGTH = { internallength | VARIABLE } ]\n" -#~ " [ , PASSEDBYVALUE ]\n" -#~ " [ , ALIGNMENT = alignment ]\n" -#~ " [ , STORAGE = storage ]\n" -#~ " [ , LIKE = like_type ]\n" -#~ " [ , CATEGORY = category ]\n" -#~ " [ , PREFERRED = preferred ]\n" -#~ " [ , DEFAULT = default ]\n" -#~ " [ , ELEMENT = element ]\n" -#~ " [ , DELIMITER = delimiter ]\n" -#~ ")\n" -#~ "\n" -#~ "CREATE TYPE name" +#~ "CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] name ON table [ USING method ]\n" +#~ " ( { column | ( expression ) } [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] [, ...] )\n" +#~ " [ WITH ( storage_parameter = value [, ... ] ) ]\n" +#~ " [ TABLESPACE tablespace ]\n" +#~ " [ WHERE predicate ]" #~ msgstr "" -#~ "CREATE TYPE nom AS\n" -#~ " ( nom_attribut type_donnee [, ... ] )\n" -#~ "\n" -#~ "CREATE TYPE nom AS ENUM\n" -#~ " ( 'label' [, ... ] )\n" -#~ "\n" -#~ "CREATE TYPE nom (\n" -#~ " INPUT = fonction_entrée,\n" -#~ " OUTPUT = fonction_sortie\n" -#~ " [ , RECEIVE = fonction_réception ]\n" -#~ " [ , SEND = fonction_envoi ]\n" -#~ " [ , TYPMOD_IN = fonction_entrée_modif_type ]\n" -#~ " [ , TYPMOD_OUT = fonction_sortie_modif_type ]\n" -#~ " [ , ANALYZE = fonction_analyse ]\n" -#~ " [ , INTERNALLENGTH = { longueur_interne | VARIABLE } ]\n" -#~ " [ , PASSEDBYVALUE ]\n" -#~ " [ , ALIGNMENT = alignement ]\n" -#~ " [ , STORAGE = stockage ]\n" -#~ " [ , LIKE = type_like ]\n" -#~ " [ , CATEGORY = catégorie ]\n" -#~ " [ , PREFERRED = préféré ]\n" -#~ " [ , DEFAULT = valeur_par_défaut ]\n" -#~ " [ , ELEMENT = élément ]\n" -#~ " [ , DELIMITER = délimiteur ]\n" -#~ ")\n" -#~ "\n" -#~ "CREATE TYPE nom" +#~ "CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] nom ON table [ USING methode ]\n" +#~ " ( { colonne | ( expression ) } [ classe_operateur ]\n" +#~ " [ ASC | DESC ]\n" +#~ " [ NULLS { FIRST | LAST } ] [, ...] )\n" +#~ " [ WITH ( parametre_stockage = valeur [, ... ] ) ]\n" +#~ " [ TABLESPACE tablespace ]\n" +#~ " [ WHERE predicat ]" #~ msgid "" -#~ "CREATE TRIGGER name { BEFORE | AFTER } { event [ OR ... ] }\n" -#~ " ON table [ FOR [ EACH ] { ROW | STATEMENT } ]\n" -#~ " EXECUTE PROCEDURE funcname ( arguments )" +#~ "CREATE [ PROCEDURAL ] LANGUAGE name\n" +#~ "CREATE [ TRUSTED ] [ PROCEDURAL ] LANGUAGE name\n" +#~ " HANDLER call_handler [ VALIDATOR valfunction ]" #~ msgstr "" -#~ "CREATE TRIGGER nom { BEFORE | AFTER } { événement [ OR ... ] }\n" -#~ " ON table [ FOR [ EACH ] { ROW | STATEMENT } ]\n" -#~ " EXECUTE PROCEDURE nom_fonction ( arguments )" +#~ "CREATE [ PROCEDURAL ] LANGUAGE nom\n" +#~ "CREATE [ TRUSTED ] [ PROCEDURAL ] LANGUAGE nom\n" +#~ " HANDLER gestionnaire_appels [ VALIDATOR fonction_val ]" #~ msgid "" -#~ "CREATE TEXT SEARCH TEMPLATE name (\n" -#~ " [ INIT = init_function , ]\n" -#~ " LEXIZE = lexize_function\n" +#~ "CREATE OPERATOR name (\n" +#~ " PROCEDURE = funcname\n" +#~ " [, LEFTARG = lefttype ] [, RIGHTARG = righttype ]\n" +#~ " [, COMMUTATOR = com_op ] [, NEGATOR = neg_op ]\n" +#~ " [, RESTRICT = res_proc ] [, JOIN = join_proc ]\n" +#~ " [, HASHES ] [, MERGES ]\n" #~ ")" #~ msgstr "" -#~ "CREATE TEXT SEARCH TEMPLATE nom (\n" -#~ " [ INIT = fonction_init , ]\n" -#~ " LEXIZE = fonction_lexize\n" +#~ "CREATE OPERATOR nom (\n" +#~ " PROCEDURE = nom_fonction\n" +#~ " [, LEFTARG = type_gauche ] [, RIGHTARG = type_droit ]\n" +#~ " [, COMMUTATOR = op_com ] [, NEGATOR = op_neg ]\n" +#~ " [, RESTRICT = proc_res ] [, JOIN = proc_join ]\n" +#~ " [, HASHES ] [, MERGES ]\n" #~ ")" #~ msgid "" -#~ "CREATE TEXT SEARCH PARSER name (\n" -#~ " START = start_function ,\n" -#~ " GETTOKEN = gettoken_function ,\n" -#~ " END = end_function ,\n" -#~ " LEXTYPES = lextypes_function\n" -#~ " [, HEADLINE = headline_function ]\n" -#~ ")" +#~ "CREATE OPERATOR CLASS name [ DEFAULT ] FOR TYPE data_type\n" +#~ " USING index_method [ FAMILY family_name ] AS\n" +#~ " { OPERATOR strategy_number operator_name [ ( op_type, op_type ) ]\n" +#~ " | FUNCTION support_number [ ( op_type [ , op_type ] ) ] funcname ( argument_type [, ...] )\n" +#~ " | STORAGE storage_type\n" +#~ " } [, ... ]" #~ msgstr "" -#~ "CREATE TEXT SEARCH PARSER nom (\n" -#~ " START = fonction_debut ,\n" -#~ " GETTOKEN = fonction_jeton ,\n" -#~ " END = fonction_fin ,\n" -#~ " LEXTYPES = fonction_typeslexem\n" -#~ " [, HEADLINE = fonction_entete ]\n" -#~ ")" +#~ "CREATE OPERATOR CLASS nom [ DEFAULT ] FOR TYPE type_donnée\n" +#~ " USING méthode_indexage [ FAMILY nom_famille ] AS\n" +#~ " { OPERATOR numéro_stratégie nom_operateur [ ( op_type, op_type ) ]\n" +#~ " | FUNCTION numéro_support [ ( type_op [ , type_op ] ) ]\n" +#~ " nom_fonction ( type_argument [, ...] )\n" +#~ " | STORAGE type_stockage\n" +#~ " } [, ... ]" + +#~ msgid "CREATE OPERATOR FAMILY name USING index_method" +#~ msgstr "CREATE OPERATOR FAMILY nom USING methode_indexage" + +#~ msgid "" +#~ "CREATE ROLE name [ [ WITH ] option [ ... ] ]\n" +#~ "\n" +#~ "where option can be:\n" +#~ " \n" +#~ " SUPERUSER | NOSUPERUSER\n" +#~ " | CREATEDB | NOCREATEDB\n" +#~ " | CREATEROLE | NOCREATEROLE\n" +#~ " | CREATEUSER | NOCREATEUSER\n" +#~ " | INHERIT | NOINHERIT\n" +#~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT connlimit\n" +#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" +#~ " | VALID UNTIL 'timestamp' \n" +#~ " | IN ROLE rolename [, ...]\n" +#~ " | IN GROUP rolename [, ...]\n" +#~ " | ROLE rolename [, ...]\n" +#~ " | ADMIN rolename [, ...]\n" +#~ " | USER rolename [, ...]\n" +#~ " | SYSID uid" +#~ msgstr "" +#~ "CREATE ROLE nom [ [ WITH ] option [ ... ] ]\n" +#~ "\n" +#~ "où option peut être :\n" +#~ " \n" +#~ " SUPERUSER | NOSUPERUSER\n" +#~ " | CREATEDB | NOCREATEDB\n" +#~ " | CREATEROLE | NOCREATEROLE\n" +#~ " | CREATEUSER | NOCREATEUSER\n" +#~ " | INHERIT | NOINHERIT\n" +#~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT limite_connexion\n" +#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'motdepasse'\n" +#~ " | VALID UNTIL 'timestamp' \n" +#~ " | IN ROLE nom_rôle [, ...]\n" +#~ " | IN GROUP nom_rôle [, ...]\n" +#~ " | ROLE nom_rôle [, ...]\n" +#~ " | ADMIN nom_rôle [, ...]\n" +#~ " | USER nom_rôle [, ...]\n" +#~ " | SYSID uid" #~ msgid "" -#~ "CREATE TEXT SEARCH DICTIONARY name (\n" -#~ " TEMPLATE = template\n" -#~ " [, option = value [, ... ]]\n" -#~ ")" +#~ "CREATE [ OR REPLACE ] RULE name AS ON event\n" +#~ " TO table [ WHERE condition ]\n" +#~ " DO [ ALSO | INSTEAD ] { NOTHING | command | ( command ; command ... ) }" #~ msgstr "" -#~ "CREATE TEXT SEARCH DICTIONARY nom (\n" -#~ " TEMPLATE = modèle\n" -#~ " [, option = valeur [, ... ]]\n" -#~ ")" +#~ "CREATE [ OR REPLACE ] RULE nom AS ON événement\n" +#~ " TO table [ WHERE condition ]\n" +#~ " DO [ ALSO | INSTEAD ] { NOTHING | commande | ( commande ; commande ... ) }" #~ msgid "" -#~ "CREATE TEXT SEARCH CONFIGURATION name (\n" -#~ " PARSER = parser_name |\n" -#~ " COPY = source_config\n" -#~ ")" +#~ "CREATE SCHEMA schemaname [ AUTHORIZATION username ] [ schema_element [ ... ] ]\n" +#~ "CREATE SCHEMA AUTHORIZATION username [ schema_element [ ... ] ]" #~ msgstr "" -#~ "CREATE TEXT SEARCH CONFIGURATION nom (\n" -#~ " PARSER = nom_analyseur |\n" -#~ " COPY = config_source\n" -#~ ")" +#~ "CREATE SCHEMA nom_schema [ AUTHORIZATION nom_utilisateur ]\n" +#~ " [ element_schema [ ... ] ]\n" +#~ "CREATE SCHEMA AUTHORIZATION nom_utilisateur [ element_schema [ ... ] ]" -#~ msgid "CREATE TABLESPACE tablespacename [ OWNER username ] LOCATION 'directory'" +#~ msgid "" +#~ "CREATE [ TEMPORARY | TEMP ] SEQUENCE name [ INCREMENT [ BY ] increment ]\n" +#~ " [ MINVALUE minvalue | NO MINVALUE ] [ MAXVALUE maxvalue | NO MAXVALUE ]\n" +#~ " [ START [ WITH ] start ] [ CACHE cache ] [ [ NO ] CYCLE ]\n" +#~ " [ OWNED BY { table.column | NONE } ]" #~ msgstr "" -#~ "CREATE TABLESPACE nom_tablespace [ OWNER nom_utilisateur ]\n" -#~ " LOCATION 'répertoire'" +#~ "CREATE [ TEMPORARY | TEMP ] SEQUENCE nom [ INCREMENT [ BY ] incrémentation ]\n" +#~ " [ MINVALUE valeur_mini | NO MINVALUE ]\n" +#~ " [ MAXVALUE valeur_maxi | NO MAXVALUE ]\n" +#~ " [ START [ WITH ] valeur_départ ]\n" +#~ " [ CACHE en_cache ]\n" +#~ " [ [ NO ] CYCLE ]\n" +#~ " [ OWNED BY { table.colonne | NONE } ]" #~ msgid "" -#~ "CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } ] TABLE table_name\n" -#~ " [ (column_name [, ...] ) ]\n" -#~ " [ WITH ( storage_parameter [= value] [, ... ] ) | WITH OIDS | WITHOUT OIDS ]\n" -#~ " [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]\n" -#~ " [ TABLESPACE tablespace ]\n" -#~ " AS query\n" -#~ " [ WITH [ NO ] DATA ]" +#~ "CREATE SERVER servername [ TYPE 'servertype' ] [ VERSION 'serverversion' ]\n" +#~ " FOREIGN DATA WRAPPER fdwname\n" +#~ " [ OPTIONS ( option 'value' [, ... ] ) ]" #~ msgstr "" -#~ "CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } ] TABLE nom_table\n" -#~ " [ (nom_colonne [, ...] ) ]\n" -#~ " [ WITH ( paramètre_stockage [= valeur] [, ... ] ) | WITH OIDS | WITHOUT OIDS ]\n" -#~ " [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]\n" -#~ " [ TABLESPACE tablespace ]\n" -#~ " AS requête [ WITH [ NO ] DATA ]" +#~ "CREATE SERVER nom [ TYPE 'typeserveur' ] [ VERSION 'versionserveur' ]\n" +#~ " FOREIGN DATA WRAPPER nomfdw\n" +#~ " [ OPTIONS ( option 'valeur' [, ... ] ) ]" #~ msgid "" #~ "CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } ] TABLE table_name ( [\n" @@ -6850,148 +7050,144 @@ msgstr "" #~ "[ USING INDEX TABLESPACE espace_logique ]" #~ msgid "" -#~ "CREATE SERVER servername [ TYPE 'servertype' ] [ VERSION 'serverversion' ]\n" -#~ " FOREIGN DATA WRAPPER fdwname\n" -#~ " [ OPTIONS ( option 'value' [, ... ] ) ]" -#~ msgstr "" -#~ "CREATE SERVER nom [ TYPE 'typeserveur' ] [ VERSION 'versionserveur' ]\n" -#~ " FOREIGN DATA WRAPPER nomfdw\n" -#~ " [ OPTIONS ( option 'valeur' [, ... ] ) ]" - -#~ msgid "" -#~ "CREATE [ TEMPORARY | TEMP ] SEQUENCE name [ INCREMENT [ BY ] increment ]\n" -#~ " [ MINVALUE minvalue | NO MINVALUE ] [ MAXVALUE maxvalue | NO MAXVALUE ]\n" -#~ " [ START [ WITH ] start ] [ CACHE cache ] [ [ NO ] CYCLE ]\n" -#~ " [ OWNED BY { table.column | NONE } ]" +#~ "CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } ] TABLE table_name\n" +#~ " [ (column_name [, ...] ) ]\n" +#~ " [ WITH ( storage_parameter [= value] [, ... ] ) | WITH OIDS | WITHOUT OIDS ]\n" +#~ " [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]\n" +#~ " [ TABLESPACE tablespace ]\n" +#~ " AS query\n" +#~ " [ WITH [ NO ] DATA ]" #~ msgstr "" -#~ "CREATE [ TEMPORARY | TEMP ] SEQUENCE nom [ INCREMENT [ BY ] incrémentation ]\n" -#~ " [ MINVALUE valeur_mini | NO MINVALUE ]\n" -#~ " [ MAXVALUE valeur_maxi | NO MAXVALUE ]\n" -#~ " [ START [ WITH ] valeur_départ ]\n" -#~ " [ CACHE en_cache ]\n" -#~ " [ [ NO ] CYCLE ]\n" -#~ " [ OWNED BY { table.colonne | NONE } ]" +#~ "CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } ] TABLE nom_table\n" +#~ " [ (nom_colonne [, ...] ) ]\n" +#~ " [ WITH ( paramètre_stockage [= valeur] [, ... ] ) | WITH OIDS | WITHOUT OIDS ]\n" +#~ " [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]\n" +#~ " [ TABLESPACE tablespace ]\n" +#~ " AS requête [ WITH [ NO ] DATA ]" -#~ msgid "" -#~ "CREATE SCHEMA schemaname [ AUTHORIZATION username ] [ schema_element [ ... ] ]\n" -#~ "CREATE SCHEMA AUTHORIZATION username [ schema_element [ ... ] ]" +#~ msgid "CREATE TABLESPACE tablespacename [ OWNER username ] LOCATION 'directory'" #~ msgstr "" -#~ "CREATE SCHEMA nom_schema [ AUTHORIZATION nom_utilisateur ]\n" -#~ " [ element_schema [ ... ] ]\n" -#~ "CREATE SCHEMA AUTHORIZATION nom_utilisateur [ element_schema [ ... ] ]" +#~ "CREATE TABLESPACE nom_tablespace [ OWNER nom_utilisateur ]\n" +#~ " LOCATION 'répertoire'" #~ msgid "" -#~ "CREATE [ OR REPLACE ] RULE name AS ON event\n" -#~ " TO table [ WHERE condition ]\n" -#~ " DO [ ALSO | INSTEAD ] { NOTHING | command | ( command ; command ... ) }" +#~ "CREATE TEXT SEARCH CONFIGURATION name (\n" +#~ " PARSER = parser_name |\n" +#~ " COPY = source_config\n" +#~ ")" #~ msgstr "" -#~ "CREATE [ OR REPLACE ] RULE nom AS ON événement\n" -#~ " TO table [ WHERE condition ]\n" -#~ " DO [ ALSO | INSTEAD ] { NOTHING | commande | ( commande ; commande ... ) }" +#~ "CREATE TEXT SEARCH CONFIGURATION nom (\n" +#~ " PARSER = nom_analyseur |\n" +#~ " COPY = config_source\n" +#~ ")" #~ msgid "" -#~ "CREATE ROLE name [ [ WITH ] option [ ... ] ]\n" -#~ "\n" -#~ "where option can be:\n" -#~ " \n" -#~ " SUPERUSER | NOSUPERUSER\n" -#~ " | CREATEDB | NOCREATEDB\n" -#~ " | CREATEROLE | NOCREATEROLE\n" -#~ " | CREATEUSER | NOCREATEUSER\n" -#~ " | INHERIT | NOINHERIT\n" -#~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT connlimit\n" -#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" -#~ " | VALID UNTIL 'timestamp' \n" -#~ " | IN ROLE rolename [, ...]\n" -#~ " | IN GROUP rolename [, ...]\n" -#~ " | ROLE rolename [, ...]\n" -#~ " | ADMIN rolename [, ...]\n" -#~ " | USER rolename [, ...]\n" -#~ " | SYSID uid" +#~ "CREATE TEXT SEARCH DICTIONARY name (\n" +#~ " TEMPLATE = template\n" +#~ " [, option = value [, ... ]]\n" +#~ ")" #~ msgstr "" -#~ "CREATE ROLE nom [ [ WITH ] option [ ... ] ]\n" -#~ "\n" -#~ "où option peut être :\n" -#~ " \n" -#~ " SUPERUSER | NOSUPERUSER\n" -#~ " | CREATEDB | NOCREATEDB\n" -#~ " | CREATEROLE | NOCREATEROLE\n" -#~ " | CREATEUSER | NOCREATEUSER\n" -#~ " | INHERIT | NOINHERIT\n" -#~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT limite_connexion\n" -#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'motdepasse'\n" -#~ " | VALID UNTIL 'timestamp' \n" -#~ " | IN ROLE nom_rôle [, ...]\n" -#~ " | IN GROUP nom_rôle [, ...]\n" -#~ " | ROLE nom_rôle [, ...]\n" -#~ " | ADMIN nom_rôle [, ...]\n" -#~ " | USER nom_rôle [, ...]\n" -#~ " | SYSID uid" - -#~ msgid "CREATE OPERATOR FAMILY name USING index_method" -#~ msgstr "CREATE OPERATOR FAMILY nom USING methode_indexage" +#~ "CREATE TEXT SEARCH DICTIONARY nom (\n" +#~ " TEMPLATE = modèle\n" +#~ " [, option = valeur [, ... ]]\n" +#~ ")" #~ msgid "" -#~ "CREATE OPERATOR CLASS name [ DEFAULT ] FOR TYPE data_type\n" -#~ " USING index_method [ FAMILY family_name ] AS\n" -#~ " { OPERATOR strategy_number operator_name [ ( op_type, op_type ) ]\n" -#~ " | FUNCTION support_number [ ( op_type [ , op_type ] ) ] funcname ( argument_type [, ...] )\n" -#~ " | STORAGE storage_type\n" -#~ " } [, ... ]" +#~ "CREATE TEXT SEARCH PARSER name (\n" +#~ " START = start_function ,\n" +#~ " GETTOKEN = gettoken_function ,\n" +#~ " END = end_function ,\n" +#~ " LEXTYPES = lextypes_function\n" +#~ " [, HEADLINE = headline_function ]\n" +#~ ")" #~ msgstr "" -#~ "CREATE OPERATOR CLASS nom [ DEFAULT ] FOR TYPE type_donnée\n" -#~ " USING méthode_indexage [ FAMILY nom_famille ] AS\n" -#~ " { OPERATOR numéro_stratégie nom_operateur [ ( op_type, op_type ) ]\n" -#~ " | FUNCTION numéro_support [ ( type_op [ , type_op ] ) ]\n" -#~ " nom_fonction ( type_argument [, ...] )\n" -#~ " | STORAGE type_stockage\n" -#~ " } [, ... ]" +#~ "CREATE TEXT SEARCH PARSER nom (\n" +#~ " START = fonction_debut ,\n" +#~ " GETTOKEN = fonction_jeton ,\n" +#~ " END = fonction_fin ,\n" +#~ " LEXTYPES = fonction_typeslexem\n" +#~ " [, HEADLINE = fonction_entete ]\n" +#~ ")" #~ msgid "" -#~ "CREATE OPERATOR name (\n" -#~ " PROCEDURE = funcname\n" -#~ " [, LEFTARG = lefttype ] [, RIGHTARG = righttype ]\n" -#~ " [, COMMUTATOR = com_op ] [, NEGATOR = neg_op ]\n" -#~ " [, RESTRICT = res_proc ] [, JOIN = join_proc ]\n" -#~ " [, HASHES ] [, MERGES ]\n" +#~ "CREATE TEXT SEARCH TEMPLATE name (\n" +#~ " [ INIT = init_function , ]\n" +#~ " LEXIZE = lexize_function\n" #~ ")" #~ msgstr "" -#~ "CREATE OPERATOR nom (\n" -#~ " PROCEDURE = nom_fonction\n" -#~ " [, LEFTARG = type_gauche ] [, RIGHTARG = type_droit ]\n" -#~ " [, COMMUTATOR = op_com ] [, NEGATOR = op_neg ]\n" -#~ " [, RESTRICT = proc_res ] [, JOIN = proc_join ]\n" -#~ " [, HASHES ] [, MERGES ]\n" +#~ "CREATE TEXT SEARCH TEMPLATE nom (\n" +#~ " [ INIT = fonction_init , ]\n" +#~ " LEXIZE = fonction_lexize\n" #~ ")" #~ msgid "" -#~ "CREATE [ PROCEDURAL ] LANGUAGE name\n" -#~ "CREATE [ TRUSTED ] [ PROCEDURAL ] LANGUAGE name\n" -#~ " HANDLER call_handler [ VALIDATOR valfunction ]" +#~ "CREATE TRIGGER name { BEFORE | AFTER } { event [ OR ... ] }\n" +#~ " ON table [ FOR [ EACH ] { ROW | STATEMENT } ]\n" +#~ " EXECUTE PROCEDURE funcname ( arguments )" #~ msgstr "" -#~ "CREATE [ PROCEDURAL ] LANGUAGE nom\n" -#~ "CREATE [ TRUSTED ] [ PROCEDURAL ] LANGUAGE nom\n" -#~ " HANDLER gestionnaire_appels [ VALIDATOR fonction_val ]" +#~ "CREATE TRIGGER nom { BEFORE | AFTER } { événement [ OR ... ] }\n" +#~ " ON table [ FOR [ EACH ] { ROW | STATEMENT } ]\n" +#~ " EXECUTE PROCEDURE nom_fonction ( arguments )" #~ msgid "" -#~ "CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] name ON table [ USING method ]\n" -#~ " ( { column | ( expression ) } [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] [, ...] )\n" -#~ " [ WITH ( storage_parameter = value [, ... ] ) ]\n" -#~ " [ TABLESPACE tablespace ]\n" -#~ " [ WHERE predicate ]" +#~ "CREATE TYPE name AS\n" +#~ " ( attribute_name data_type [, ... ] )\n" +#~ "\n" +#~ "CREATE TYPE name AS ENUM\n" +#~ " ( 'label' [, ... ] )\n" +#~ "\n" +#~ "CREATE TYPE name (\n" +#~ " INPUT = input_function,\n" +#~ " OUTPUT = output_function\n" +#~ " [ , RECEIVE = receive_function ]\n" +#~ " [ , SEND = send_function ]\n" +#~ " [ , TYPMOD_IN = type_modifier_input_function ]\n" +#~ " [ , TYPMOD_OUT = type_modifier_output_function ]\n" +#~ " [ , ANALYZE = analyze_function ]\n" +#~ " [ , INTERNALLENGTH = { internallength | VARIABLE } ]\n" +#~ " [ , PASSEDBYVALUE ]\n" +#~ " [ , ALIGNMENT = alignment ]\n" +#~ " [ , STORAGE = storage ]\n" +#~ " [ , LIKE = like_type ]\n" +#~ " [ , CATEGORY = category ]\n" +#~ " [ , PREFERRED = preferred ]\n" +#~ " [ , DEFAULT = default ]\n" +#~ " [ , ELEMENT = element ]\n" +#~ " [ , DELIMITER = delimiter ]\n" +#~ ")\n" +#~ "\n" +#~ "CREATE TYPE name" #~ msgstr "" -#~ "CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] nom ON table [ USING methode ]\n" -#~ " ( { colonne | ( expression ) } [ classe_operateur ]\n" -#~ " [ ASC | DESC ]\n" -#~ " [ NULLS { FIRST | LAST } ] [, ...] )\n" -#~ " [ WITH ( parametre_stockage = valeur [, ... ] ) ]\n" -#~ " [ TABLESPACE tablespace ]\n" -#~ " [ WHERE predicat ]" +#~ "CREATE TYPE nom AS\n" +#~ " ( nom_attribut type_donnee [, ... ] )\n" +#~ "\n" +#~ "CREATE TYPE nom AS ENUM\n" +#~ " ( 'label' [, ... ] )\n" +#~ "\n" +#~ "CREATE TYPE nom (\n" +#~ " INPUT = fonction_entrée,\n" +#~ " OUTPUT = fonction_sortie\n" +#~ " [ , RECEIVE = fonction_réception ]\n" +#~ " [ , SEND = fonction_envoi ]\n" +#~ " [ , TYPMOD_IN = fonction_entrée_modif_type ]\n" +#~ " [ , TYPMOD_OUT = fonction_sortie_modif_type ]\n" +#~ " [ , ANALYZE = fonction_analyse ]\n" +#~ " [ , INTERNALLENGTH = { longueur_interne | VARIABLE } ]\n" +#~ " [ , PASSEDBYVALUE ]\n" +#~ " [ , ALIGNMENT = alignement ]\n" +#~ " [ , STORAGE = stockage ]\n" +#~ " [ , LIKE = type_like ]\n" +#~ " [ , CATEGORY = catégorie ]\n" +#~ " [ , PREFERRED = préféré ]\n" +#~ " [ , DEFAULT = valeur_par_défaut ]\n" +#~ " [ , ELEMENT = élément ]\n" +#~ " [ , DELIMITER = délimiteur ]\n" +#~ ")\n" +#~ "\n" +#~ "CREATE TYPE nom" #~ msgid "" -#~ "CREATE GROUP name [ [ WITH ] option [ ... ] ]\n" +#~ "CREATE USER name [ [ WITH ] option [ ... ] ]\n" #~ "\n" #~ "where option can be:\n" #~ " \n" @@ -7001,6 +7197,7 @@ msgstr "" #~ " | CREATEUSER | NOCREATEUSER\n" #~ " | INHERIT | NOINHERIT\n" #~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT connlimit\n" #~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" #~ " | VALID UNTIL 'timestamp' \n" #~ " | IN ROLE rolename [, ...]\n" @@ -7010,7 +7207,7 @@ msgstr "" #~ " | USER rolename [, ...]\n" #~ " | SYSID uid" #~ msgstr "" -#~ "CREATE GROUP nom [ [ WITH ] option [ ... ] ]\n" +#~ "CREATE USER nom [ [ WITH ] option [ ... ] ]\n" #~ "\n" #~ "où option peut être :\n" #~ " \n" @@ -7020,6 +7217,7 @@ msgstr "" #~ " | CREATEUSER | NOCREATEUSER\n" #~ " | INHERIT | NOINHERIT\n" #~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT limite_connexion\n" #~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'motdepasse'\n" #~ " | VALID UNTIL 'timestamp' \n" #~ " | IN ROLE nom_rôle [, ...]\n" @@ -7030,947 +7228,801 @@ msgstr "" #~ " | SYSID uid" #~ msgid "" -#~ "CREATE [ OR REPLACE ] FUNCTION\n" -#~ " name ( [ [ argmode ] [ argname ] argtype [ { DEFAULT | = } defexpr ] [, ...] ] )\n" -#~ " [ RETURNS rettype\n" -#~ " | RETURNS TABLE ( colname coltype [, ...] ) ]\n" -#~ " { LANGUAGE langname\n" -#~ " | WINDOW\n" -#~ " | IMMUTABLE | STABLE | VOLATILE\n" -#~ " | CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" -#~ " | [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER\n" -#~ " | COST execution_cost\n" -#~ " | ROWS result_rows\n" -#~ " | SET configuration_parameter { TO value | = value | FROM CURRENT }\n" -#~ " | AS 'definition'\n" -#~ " | AS 'obj_file', 'link_symbol'\n" -#~ " } ...\n" -#~ " [ WITH ( attribute [, ...] ) ]" +#~ "CREATE USER MAPPING FOR { username | USER | CURRENT_USER | PUBLIC }\n" +#~ " SERVER servername\n" +#~ " [ OPTIONS ( option 'value' [ , ... ] ) ]" #~ msgstr "" -#~ "CREATE [ OR REPLACE ] FUNCTION\n" -#~ " nom ( [ [ mode_arg ] [ nom_arg ] type_arg [ { DEFAULT | = } expr_par_défaut ] [, ...] ] )\n" -#~ " [ RETURNS type_ret\n" -#~ " | RETURNS TABLE ( nom_colonne type_colonne [, ...] ) ]\n" -#~ " { LANGUAGE nom_lang\n" -#~ " | WINDOW\n" -#~ " | IMMUTABLE | STABLE | VOLATILE\n" -#~ " | CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" -#~ " | [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER\n" -#~ " | COST coût_exécution\n" -#~ " | ROWS lignes_résultats\n" -#~ " | SET paramètre_configuration { TO valeur | = valeur | FROM CURRENT }\n" -#~ " | AS 'définition'\n" -#~ " | AS 'fichier_obj', 'symbôle_lien'\n" -#~ " } ...\n" -#~ " [ WITH ( attribut [, ...] ) ]" +#~ "CREATE USER MAPPING FOR { nomutilisateur | USER | CURRENT_USER | PUBLIC }\n" +#~ " SERVER nomserveur\n" +#~ " [ OPTIONS ( option 'valeur' [ , ... ] ) ]" #~ msgid "" -#~ "CREATE FOREIGN DATA WRAPPER name\n" -#~ " [ VALIDATOR valfunction | NO VALIDATOR ]\n" -#~ " [ OPTIONS ( option 'value' [, ... ] ) ]" +#~ "CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] VIEW name [ ( column_name [, ...] ) ]\n" +#~ " AS query" #~ msgstr "" -#~ "CREATE FOREIGN DATA WRAPPER nom\n" -#~ " [ VALIDATOR fonction_validation | NO VALIDATOR ]\n" -#~ " [ OPTIONS ( option 'valeur' [, ... ] ) ]" +#~ "CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] VIEW nom\n" +#~ " [ ( nom_colonne [, ...] ) ]\n" +#~ " AS requête" + +#~ msgid "DEALLOCATE [ PREPARE ] { name | ALL }" +#~ msgstr "DEALLOCATE [ PREPARE ] { nom_plan | ALL }" #~ msgid "" -#~ "CREATE DOMAIN name [ AS ] data_type\n" -#~ " [ DEFAULT expression ]\n" -#~ " [ constraint [ ... ] ]\n" -#~ "\n" -#~ "where constraint is:\n" -#~ "\n" -#~ "[ CONSTRAINT constraint_name ]\n" -#~ "{ NOT NULL | NULL | CHECK (expression) }" +#~ "DECLARE name [ BINARY ] [ INSENSITIVE ] [ [ NO ] SCROLL ]\n" +#~ " CURSOR [ { WITH | WITHOUT } HOLD ] FOR query" #~ msgstr "" -#~ "CREATE DOMAIN nom [AS] type_données\n" -#~ " [ DEFAULT expression ]\n" -#~ " [ contrainte [ ... ] ]\n" -#~ "\n" -#~ "avec comme contrainte :\n" -#~ "\n" -#~ "[ CONSTRAINT nom_contrainte ]\n" -#~ "{ NOT NULL | NULL | CHECK (expression) }" +#~ "DECLARE nom [ BINARY ] [ INSENSITIVE ] [ [ NO ] SCROLL ]\n" +#~ " CURSOR [ { WITH | WITHOUT } HOLD ] FOR requête" #~ msgid "" -#~ "CREATE DATABASE name\n" -#~ " [ [ WITH ] [ OWNER [=] dbowner ]\n" -#~ " [ TEMPLATE [=] template ]\n" -#~ " [ ENCODING [=] encoding ]\n" -#~ " [ LC_COLLATE [=] lc_collate ]\n" -#~ " [ LC_CTYPE [=] lc_ctype ]\n" -#~ " [ TABLESPACE [=] tablespace ]\n" -#~ " [ CONNECTION LIMIT [=] connlimit ] ]" +#~ "DELETE FROM [ ONLY ] table [ [ AS ] alias ]\n" +#~ " [ USING usinglist ]\n" +#~ " [ WHERE condition | WHERE CURRENT OF cursor_name ]\n" +#~ " [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ]" #~ msgstr "" -#~ "CREATE DATABASE nom\n" -#~ " [ [ WITH ] [ OWNER [=] nom_propriétaire ]\n" -#~ " [ TEMPLATE [=] modèle ]\n" -#~ " [ ENCODING [=] encodage ]\n" -#~ " [ LC_COLLATE [=] tri_caract ]\n" -#~ " [ LC_CTYPE [=] type_caract ]\n" -#~ " [ TABLESPACE [=] tablespace ]\n" -#~ " [ CONNECTION LIMIT [=] limite_connexion ] ]" +#~ "DELETE FROM [ ONLY ] table [ [ AS ] alias ]\n" +#~ " [ USING liste_using ]\n" +#~ " [ WHERE condition | WHERE CURRENT OF nom_curseur ]\n" +#~ " [ RETURNING * | expression_sortie [ [ AS ] nom_sortie ] [, ...] ]" + +#~ msgid "DISCARD { ALL | PLANS | TEMPORARY | TEMP }" +#~ msgstr "DISCARD { ALL | PLANS | TEMPORARY | TEMP }" + +#~ msgid "DROP AGGREGATE [ IF EXISTS ] name ( type [ , ... ] ) [ CASCADE | RESTRICT ]" +#~ msgstr "DROP AGGREGATE [ IF EXISTS ] nom ( type [ , ... ] ) [ CASCADE | RESTRICT ]" + +#~ msgid "DROP CAST [ IF EXISTS ] (sourcetype AS targettype) [ CASCADE | RESTRICT ]" +#~ msgstr "DROP CAST [ IF EXISTS ] (type_source AS type_cible) [ CASCADE | RESTRICT ]" + +#~ msgid "DROP CONVERSION [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP CONVERSION [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" + +#~ msgid "DROP DATABASE [ IF EXISTS ] name" +#~ msgstr "DROP DATABASE [ IF EXISTS ] nom" + +#~ msgid "DROP DOMAIN [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP DOMAIN [ IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "DROP FOREIGN DATA WRAPPER [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP FOREIGN DATA WRAPPER [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" + +#~ msgid "" +#~ "DROP FUNCTION [ IF EXISTS ] name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" +#~ " [ CASCADE | RESTRICT ]" +#~ msgstr "" +#~ "DROP FUNCTION [IF EXISTS ] nom\n" +#~ " ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] )\n" +#~ " [ CASCADE | RESTRICT ]" + +#~ msgid "DROP GROUP [ IF EXISTS ] name [, ...]" +#~ msgstr "DROP GROUP [IF EXISTS ] nom [, ...]" + +#~ msgid "DROP INDEX [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP INDEX [IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "DROP [ PROCEDURAL ] LANGUAGE [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP [ PROCEDURAL ] LANGUAGE [IF EXISTS ] nom [ CASCADE | RESTRICT ]" + +#~ msgid "DROP OPERATOR [ IF EXISTS ] name ( { lefttype | NONE } , { righttype | NONE } ) [ CASCADE | RESTRICT ]" +#~ msgstr "" +#~ "DROP OPERATOR [IF EXISTS ] nom\n" +#~ " ( { type_gauche | NONE } , { type_droit | NONE } )\n" +#~ " [ CASCADE | RESTRICT ]" + +#~ msgid "DROP OPERATOR CLASS [ IF EXISTS ] name USING index_method [ CASCADE | RESTRICT ]" +#~ msgstr "" +#~ "DROP OPERATOR CLASS [IF EXISTS ] nom\n" +#~ " USING méthode_indexage [ CASCADE | RESTRICT ]" + +#~ msgid "DROP OPERATOR FAMILY [ IF EXISTS ] name USING index_method [ CASCADE | RESTRICT ]" +#~ msgstr "" +#~ "DROP OPERATOR FAMILY [IF EXISTS ] nom\n" +#~ " USING méthode_indexage [ CASCADE | RESTRICT ]" + +#~ msgid "DROP OWNED BY name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP OWNED BY nom [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "DROP ROLE [ IF EXISTS ] name [, ...]" +#~ msgstr "DROP ROLE [IF EXISTS ] nom [, ...]" + +#~ msgid "DROP RULE [ IF EXISTS ] name ON relation [ CASCADE | RESTRICT ]" +#~ msgstr "DROP RULE [IF EXISTS ] nom ON relation [ CASCADE | RESTRICT ]" + +#~ msgid "DROP SCHEMA [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP SCHEMA [IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "DROP SEQUENCE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP SEQUENCE [IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "DROP SERVER [ IF EXISTS ] servername [ CASCADE | RESTRICT ]" +#~ msgstr "DROP SERVER [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TABLE [IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TABLESPACE [ IF EXISTS ] tablespacename" +#~ msgstr "DROP TABLESPACE [IF EXISTS ] nom_tablespace" + +#~ msgid "DROP TEXT SEARCH CONFIGURATION [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TEXT SEARCH CONFIGURATION [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TEXT SEARCH DICTIONARY [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TEXT SEARCH DICTIONARY [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TEXT SEARCH PARSER [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TEXT SEARCH PARSER [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TEXT SEARCH TEMPLATE [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TEXT SEARCH TEMPLATE [ IF EXISTS ] nom [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TRIGGER [ IF EXISTS ] name ON table [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TRIGGER [IF EXISTS ] nom ON table [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TYPE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TYPE [IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "DROP USER [ IF EXISTS ] name [, ...]" +#~ msgstr "DROP USER [IF EXISTS ] nom [, ...]" + +#~ msgid "DROP USER MAPPING [ IF EXISTS ] FOR { username | USER | CURRENT_USER | PUBLIC } SERVER servername" +#~ msgstr "DROP USER MAPPING [ IF EXISTS ] FOR { nomutilisateur | USER | CURRENT_USER | PUBLIC } SERVER nomserveur" + +#~ msgid "DROP VIEW [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP VIEW [IF EXISTS ] nom [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "END [ WORK | TRANSACTION ]" +#~ msgstr "END [ WORK | TRANSACTION ]" -#~ msgid "" -#~ "CREATE [ DEFAULT ] CONVERSION name\n" -#~ " FOR source_encoding TO dest_encoding FROM funcname" -#~ msgstr "" -#~ "CREATE [DEFAULT] CONVERSION nom\n" -#~ " FOR codage_source TO codage_cible FROM nom_fonction" +#~ msgid "EXECUTE name [ ( parameter [, ...] ) ]" +#~ msgstr "EXECUTE nom_plan [ ( paramètre [, ...] ) ]" -#~ msgid "" -#~ "CREATE CONSTRAINT TRIGGER name\n" -#~ " AFTER event [ OR ... ]\n" -#~ " ON table_name\n" -#~ " [ FROM referenced_table_name ]\n" -#~ " { NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED } }\n" -#~ " FOR EACH ROW\n" -#~ " EXECUTE PROCEDURE funcname ( arguments )" -#~ msgstr "" -#~ "CREATE CONSTRAINT TRIGGER nom\n" -#~ " AFTER événement [ OR ... ]\n" -#~ " ON table\n" -#~ " [ FROM table_référencée ]\n" -#~ " { NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED } }\n" -#~ " FOR EACH ROW\n" -#~ " EXECUTE PROCEDURE nom_fonction ( arguments )" +#~ msgid "EXPLAIN [ ANALYZE ] [ VERBOSE ] statement" +#~ msgstr "EXPLAIN [ ANALYZE ] [ VERBOSE ] instruction" #~ msgid "" -#~ "CREATE CAST (sourcetype AS targettype)\n" -#~ " WITH FUNCTION funcname (argtypes)\n" -#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "FETCH [ direction { FROM | IN } ] cursorname\n" #~ "\n" -#~ "CREATE CAST (sourcetype AS targettype)\n" -#~ " WITHOUT FUNCTION\n" -#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "where direction can be empty or one of:\n" #~ "\n" -#~ "CREATE CAST (sourcetype AS targettype)\n" -#~ " WITH INOUT\n" -#~ " [ AS ASSIGNMENT | AS IMPLICIT ]" +#~ " NEXT\n" +#~ " PRIOR\n" +#~ " FIRST\n" +#~ " LAST\n" +#~ " ABSOLUTE count\n" +#~ " RELATIVE count\n" +#~ " count\n" +#~ " ALL\n" +#~ " FORWARD\n" +#~ " FORWARD count\n" +#~ " FORWARD ALL\n" +#~ " BACKWARD\n" +#~ " BACKWARD count\n" +#~ " BACKWARD ALL" #~ msgstr "" -#~ "CREATE CAST (type_source AS type_cible)\n" -#~ " WITH FUNCTION nom_fonction (type_argument)\n" -#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "FETCH [ direction { FROM | IN } ] nom_curseur\n" #~ "\n" -#~ "CREATE CAST (type_source AS type_cible)\n" -#~ " WITHOUT FUNCTION\n" -#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "sans préciser de direction ou en choissant une des directions suivantes :\n" #~ "\n" -#~ "CREATE CAST (type_source AS type_cible)\n" -#~ " WITH INOUT\n" -#~ " [ AS ASSIGNMENT | AS IMPLICIT ]" +#~ " NEXT\n" +#~ " PRIOR\n" +#~ " FIRST\n" +#~ " LAST\n" +#~ " ABSOLUTE nombre\n" +#~ " RELATIVE nombre\n" +#~ " count\n" +#~ " ALL\n" +#~ " FORWARD\n" +#~ " FORWARD nombre\n" +#~ " FORWARD ALL\n" +#~ " BACKWARD\n" +#~ " BACKWARD nombre\n" +#~ " BACKWARD ALL" #~ msgid "" -#~ "CREATE AGGREGATE name ( input_data_type [ , ... ] ) (\n" -#~ " SFUNC = sfunc,\n" -#~ " STYPE = state_data_type\n" -#~ " [ , FINALFUNC = ffunc ]\n" -#~ " [ , INITCOND = initial_condition ]\n" -#~ " [ , SORTOP = sort_operator ]\n" -#~ ")\n" +#~ "GRANT { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON [ TABLE ] tablename [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" #~ "\n" -#~ "or the old syntax\n" +#~ "GRANT { { SELECT | INSERT | UPDATE | REFERENCES } ( column [, ...] )\n" +#~ " [,...] | ALL [ PRIVILEGES ] ( column [, ...] ) }\n" +#~ " ON [ TABLE ] tablename [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" #~ "\n" -#~ "CREATE AGGREGATE name (\n" -#~ " BASETYPE = base_type,\n" -#~ " SFUNC = sfunc,\n" -#~ " STYPE = state_data_type\n" -#~ " [ , FINALFUNC = ffunc ]\n" -#~ " [ , INITCOND = initial_condition ]\n" -#~ " [ , SORTOP = sort_operator ]\n" -#~ ")" -#~ msgstr "" -#~ "CREATE AGGREGATE nom ( type_données_en_entrée [ , ... ] ) (\n" -#~ " SFUNC = sfonction,\n" -#~ " STYPE = type_données_état\n" -#~ " [ , FINALFUNC = fonction_f ]\n" -#~ " [ , INITCOND = condition_initiale ]\n" -#~ " [ , SORTOP = opérateur_tri ]\n" -#~ ")\n" +#~ "GRANT { { USAGE | SELECT | UPDATE }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SEQUENCE sequencename [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" #~ "\n" -#~ "ou l'ancienne syntaxe\n" +#~ "GRANT { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON DATABASE dbname [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" #~ "\n" -#~ "CREATE AGGREGATE nom (\n" -#~ " BASETYPE = type_base,\n" -#~ " SFUNC = fonction_s,\n" -#~ " STYPE = type_données_état\n" -#~ " [ , FINALFUNC = fonction_f ]\n" -#~ " [ , INITCOND = condition_initiale ]\n" -#~ " [ , SORTOP = opérateur_tri ]\n" -#~ ")" - -#~ msgid "" -#~ "COPY tablename [ ( column [, ...] ) ]\n" -#~ " FROM { 'filename' | STDIN }\n" -#~ " [ [ WITH ] \n" -#~ " [ BINARY ]\n" -#~ " [ OIDS ]\n" -#~ " [ DELIMITER [ AS ] 'delimiter' ]\n" -#~ " [ NULL [ AS ] 'null string' ]\n" -#~ " [ CSV [ HEADER ]\n" -#~ " [ QUOTE [ AS ] 'quote' ] \n" -#~ " [ ESCAPE [ AS ] 'escape' ]\n" -#~ " [ FORCE NOT NULL column [, ...] ]\n" +#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON FOREIGN DATA WRAPPER fdwname [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" #~ "\n" -#~ "COPY { tablename [ ( column [, ...] ) ] | ( query ) }\n" -#~ " TO { 'filename' | STDOUT }\n" -#~ " [ [ WITH ] \n" -#~ " [ BINARY ]\n" -#~ " [ OIDS ]\n" -#~ " [ DELIMITER [ AS ] 'delimiter' ]\n" -#~ " [ NULL [ AS ] 'null string' ]\n" -#~ " [ CSV [ HEADER ]\n" -#~ " [ QUOTE [ AS ] 'quote' ] \n" -#~ " [ ESCAPE [ AS ] 'escape' ]\n" -#~ " [ FORCE QUOTE column [, ...] ]" -#~ msgstr "" -#~ "COPY nom_table [ ( colonne [, ...] ) ]\n" -#~ " FROM { 'nom_fichier' | STDIN }\n" -#~ " [ [ WITH ] \n" -#~ " [ BINARY ]\n" -#~ " [ OIDS ]\n" -#~ " [ DELIMITER [ AS ] 'délimiteur' ]\n" -#~ " [ NULL [ AS ] 'chaîne null' ]\n" -#~ " [ CSV [ HEADER ]\n" -#~ " [ QUOTE [ AS ] 'guillemet' ] \n" -#~ " [ ESCAPE [ AS ] 'échappement' ]\n" -#~ " [ FORCE NOT NULL colonne [, ...] ]\n" +#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON FOREIGN SERVER servername [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" #~ "\n" -#~ "COPY { nom_table [ ( colonne [, ...] ) ] | ( requête ) }\n" -#~ " TO { 'nom_fichier' | STDOUT }\n" -#~ " [ [ WITH ] \n" -#~ " [ BINARY ]\n" -#~ " [ OIDS ]\n" -#~ " [ DELIMITER [ AS ] 'délimiteur' ]\n" -#~ " [ NULL [ AS ] 'chaîne null' ]\n" -#~ " [ CSV [ HEADER ]\n" -#~ " [ QUOTE [ AS ] 'guillemet' ] \n" -#~ " [ ESCAPE [ AS ] 'échappement' ]\n" -#~ " [ FORCE QUOTE colonne [, ...] ]" - -#~ msgid "COMMIT PREPARED transaction_id" -#~ msgstr "COMMIT PREPARED id_transaction" - -#~ msgid "COMMIT [ WORK | TRANSACTION ]" -#~ msgstr "COMMIT [ WORK | TRANSACTION ]" - -#~ msgid "" -#~ "COMMENT ON\n" -#~ "{\n" -#~ " TABLE object_name |\n" -#~ " COLUMN table_name.column_name |\n" -#~ " AGGREGATE agg_name (agg_type [, ...] ) |\n" -#~ " CAST (sourcetype AS targettype) |\n" -#~ " CONSTRAINT constraint_name ON table_name |\n" -#~ " CONVERSION object_name |\n" -#~ " DATABASE object_name |\n" -#~ " DOMAIN object_name |\n" -#~ " FUNCTION func_name ( [ [ argmode ] [ argname ] argtype [, ...] ] ) |\n" -#~ " INDEX object_name |\n" -#~ " LARGE OBJECT large_object_oid |\n" -#~ " OPERATOR op (leftoperand_type, rightoperand_type) |\n" -#~ " OPERATOR CLASS object_name USING index_method |\n" -#~ " OPERATOR FAMILY object_name USING index_method |\n" -#~ " [ PROCEDURAL ] LANGUAGE object_name |\n" -#~ " ROLE object_name |\n" -#~ " RULE rule_name ON table_name |\n" -#~ " SCHEMA object_name |\n" -#~ " SEQUENCE object_name |\n" -#~ " TABLESPACE object_name |\n" -#~ " TEXT SEARCH CONFIGURATION object_name |\n" -#~ " TEXT SEARCH DICTIONARY object_name |\n" -#~ " TEXT SEARCH PARSER object_name |\n" -#~ " TEXT SEARCH TEMPLATE object_name |\n" -#~ " TRIGGER trigger_name ON table_name |\n" -#~ " TYPE object_name |\n" -#~ " VIEW object_name\n" -#~ "} IS 'text'" -#~ msgstr "" -#~ "COMMENT ON\n" -#~ "{\n" -#~ " TABLE nom_objet |\n" -#~ " COLUMN nom_table.nom_colonne |\n" -#~ " AGGREGATE nom_agg (type_agg [, ...] ) |\n" -#~ " CAST (type_source AS type_cible) |\n" -#~ " CONSTRAINT nom_contrainte ON nom_table |\n" -#~ " CONVERSION nom_objet |\n" -#~ " DATABASE nom_objet |\n" -#~ " DOMAIN nom_objet |\n" -#~ " FUNCTION nom_fonction ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] ) |\n" -#~ " INDEX nom_objet |\n" -#~ " LARGE OBJECT oid_LO |\n" -#~ " OPERATOR op (type_operande_gauche, type_operande_droit) |\n" -#~ " OPERATOR CLASS nom_objet USING methode_indexage |\n" -#~ " OPERATOR FAMILY nom_objet USING methode_indexage |\n" -#~ " [ PROCEDURAL ] LANGUAGE nom_objet |\n" -#~ " ROLE nom_objet |\n" -#~ " RULE nom_regle ON nom_table |\n" -#~ " SCHEMA nom_objet |\n" -#~ " SEQUENCE nom_objet |\n" -#~ " TABLESPACE nom_objet |\n" -#~ " TEXT SEARCH CONFIGURATION nom_objet |\n" -#~ " TEXT SEARCH DICTIONARY nom_objet |\n" -#~ " TEXT SEARCH PARSER nom_objet |\n" -#~ " TEXT SEARCH TEMPLATE nom_objet |\n" -#~ " TRIGGER nom_trigger ON nom_objet |\n" -#~ " TYPE nom_objet |\n" -#~ " VIEW nom_objet\n" -#~ "} IS 'text'" +#~ "GRANT { EXECUTE | ALL [ PRIVILEGES ] }\n" +#~ " ON FUNCTION funcname ( [ [ argmode ] [ argname ] argtype [, ...] ] ) [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON LANGUAGE langname [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SCHEMA schemaname [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { CREATE | ALL [ PRIVILEGES ] }\n" +#~ " ON TABLESPACE tablespacename [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT role [, ...] TO rolename [, ...] [ WITH ADMIN OPTION ]" +#~ msgstr "" +#~ "GRANT { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON [ TABLE ] nom_table [, ...]\n" +#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { { SELECT | INSERT | UPDATE | REFERENCES } ( colonne [, ...] )\n" +#~ " [,...] | ALL [ PRIVILEGES ] ( colonne [, ...] ) }\n" +#~ " ON [ TABLE ] nom_table [, ...]\n" +#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { { USAGE | SELECT | UPDATE }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SEQUENCE nom_séquence [, ...]\n" +#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON DATABASE nom_base [, ...]\n" +#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON FOREIGN DATA WRAPPER nomfdw [, ...]\n" +#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON FOREIGN SERVER nom_serveur [, ...]\n" +#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { EXECUTE | ALL [ PRIVILEGES ] }\n" +#~ " ON FUNCTION nom_fonction ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] ) [, ...]\n" +#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON LANGUAGE nom_langage [, ...]\n" +#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SCHEMA nom_schéma [, ...]\n" +#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { CREATE | ALL [ PRIVILEGES ] }\n" +#~ " ON TABLESPACE nom_tablespace [, ...]\n" +#~ " TO { [ GROUP ] nom_rôle | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT rôle [, ...] TO nom_rôle [, ...] [ WITH ADMIN OPTION ]" #~ msgid "" -#~ "CLUSTER [VERBOSE] tablename [ USING indexname ]\n" -#~ "CLUSTER [VERBOSE]" +#~ "INSERT INTO table [ ( column [, ...] ) ]\n" +#~ " { DEFAULT VALUES | VALUES ( { expression | DEFAULT } [, ...] ) [, ...] | query }\n" +#~ " [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ]" #~ msgstr "" -#~ "CLUSTER [VERBOSE] nom_table [ USING nom_index ]\n" -#~ "CLUSTER [VERBOSE]" +#~ "INSERT INTO table [ ( colonne [, ...] ) ]\n" +#~ " { DEFAULT VALUES | VALUES ( { expression | DEFAULT } [, ...] ) [, ...] | requête }\n" +#~ " [ RETURNING * | expression_sortie [ [ AS ] nom_sortie ] [, ...] ]" -#~ msgid "CLOSE { name | ALL }" -#~ msgstr "CLOSE { nom | ALL }" +#~ msgid "LISTEN name" +#~ msgstr "LISTEN nom" -#~ msgid "CHECKPOINT" -#~ msgstr "CHECKPOINT" +#~ msgid "LOAD 'filename'" +#~ msgstr "LOAD 'nom_de_fichier'" #~ msgid "" -#~ "BEGIN [ WORK | TRANSACTION ] [ transaction_mode [, ...] ]\n" +#~ "LOCK [ TABLE ] [ ONLY ] name [, ...] [ IN lockmode MODE ] [ NOWAIT ]\n" #~ "\n" -#~ "where transaction_mode is one of:\n" +#~ "where lockmode is one of:\n" #~ "\n" -#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" -#~ " READ WRITE | READ ONLY" +#~ " ACCESS SHARE | ROW SHARE | ROW EXCLUSIVE | SHARE UPDATE EXCLUSIVE\n" +#~ " | SHARE | SHARE ROW EXCLUSIVE | EXCLUSIVE | ACCESS EXCLUSIVE" #~ msgstr "" -#~ "BEGIN [ WORK | TRANSACTION ] [ transaction_mode [, ...] ]\n" +#~ "LOCK [ TABLE ] [ ONLY ] nom [, ...] [ IN mode_verrouillage MODE ] [ NOWAIT ]\n" #~ "\n" -#~ "où transaction_mode peut être :\n" +#~ "avec mode_verrouillage parmi :\n" #~ "\n" -#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ |\n" -#~ " READ COMMITTED | READ UNCOMMITTED }\n" -#~ " READ WRITE | READ ONLY" +#~ " ACCESS SHARE | ROW SHARE | ROW EXCLUSIVE | SHARE UPDATE EXCLUSIVE\n" +#~ " | SHARE | SHARE ROW EXCLUSIVE | EXCLUSIVE | ACCESS EXCLUSIVE" -#~ msgid "ANALYZE [ VERBOSE ] [ table [ ( column [, ...] ) ] ]" -#~ msgstr "ANALYZE [ VERBOSE ] [ table [ ( colonne [, ...] ) ] ]" +#~ msgid "MOVE [ direction { FROM | IN } ] cursorname" +#~ msgstr "MOVE [ direction { FROM | IN } ] nom_de_curseur" -#~ msgid "" -#~ "ALTER VIEW name ALTER [ COLUMN ] column SET DEFAULT expression\n" -#~ "ALTER VIEW name ALTER [ COLUMN ] column DROP DEFAULT\n" -#~ "ALTER VIEW name OWNER TO new_owner\n" -#~ "ALTER VIEW name RENAME TO new_name\n" -#~ "ALTER VIEW name SET SCHEMA new_schema" -#~ msgstr "" -#~ "ALTER VIEW nom ALTER [ COLUMN ] colonne SET DEFAULT expression\n" -#~ "ALTER VIEW nom ALTER [ COLUMN ] colonne DROP DEFAULT\n" -#~ "ALTER VIEW nom OWNER TO nouveau_propriétaire\n" -#~ "ALTER VIEW nom RENAME TO nouveau_nom\n" -#~ "ALTER VIEW nom SET SCHEMA nouveau_schéma" +#~ msgid "NOTIFY name" +#~ msgstr "NOTIFY nom" -#~ msgid "" -#~ "ALTER USER MAPPING FOR { username | USER | CURRENT_USER | PUBLIC }\n" -#~ " SERVER servername\n" -#~ " OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] )" -#~ msgstr "" -#~ "ALTER USER MAPPING FOR { nom_utilisateur | USER | CURRENT_USER | PUBLIC }\n" -#~ " SERVER nom_serveur\n" -#~ " OPTIONS ( [ ADD | SET | DROP ] option ['valeur'] [, ... ] )" +#~ msgid "PREPARE name [ ( datatype [, ...] ) ] AS statement" +#~ msgstr "PREPARE nom_plan [ ( type_données [, ...] ) ] AS instruction" + +#~ msgid "PREPARE TRANSACTION transaction_id" +#~ msgstr "PREPARE TRANSACTION id_transaction" + +#~ msgid "REASSIGN OWNED BY old_role [, ...] TO new_role" +#~ msgstr "REASSIGN OWNED BY ancien_role [, ...] TO nouveau_role" + +#~ msgid "REINDEX { INDEX | TABLE | DATABASE | SYSTEM } name [ FORCE ]" +#~ msgstr "REINDEX { INDEX | TABLE | DATABASE | SYSTEM } nom [ FORCE ]" + +#~ msgid "RELEASE [ SAVEPOINT ] savepoint_name" +#~ msgstr "RELEASE [ SAVEPOINT ] nom_retour" #~ msgid "" -#~ "ALTER USER name [ [ WITH ] option [ ... ] ]\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON [ TABLE ] tablename [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "where option can be:\n" -#~ " \n" -#~ " SUPERUSER | NOSUPERUSER\n" -#~ " | CREATEDB | NOCREATEDB\n" -#~ " | CREATEROLE | NOCREATEROLE\n" -#~ " | CREATEUSER | NOCREATEUSER\n" -#~ " | INHERIT | NOINHERIT\n" -#~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT connlimit\n" -#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" -#~ " | VALID UNTIL 'timestamp' \n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { SELECT | INSERT | UPDATE | REFERENCES } ( column [, ...] )\n" +#~ " [,...] | ALL [ PRIVILEGES ] ( column [, ...] ) }\n" +#~ " ON [ TABLE ] tablename [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "ALTER USER name RENAME TO newname\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { USAGE | SELECT | UPDATE }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SEQUENCE sequencename [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "ALTER USER name SET configuration_parameter { TO | = } { value | DEFAULT }\n" -#~ "ALTER USER name SET configuration_parameter FROM CURRENT\n" -#~ "ALTER USER name RESET configuration_parameter\n" -#~ "ALTER USER name RESET ALL" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON DATABASE dbname [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON FOREIGN DATA WRAPPER fdwname [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON FOREIGN SERVER servername [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { EXECUTE | ALL [ PRIVILEGES ] }\n" +#~ " ON FUNCTION funcname ( [ [ argmode ] [ argname ] argtype [, ...] ] ) [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON LANGUAGE langname [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SCHEMA schemaname [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { CREATE | ALL [ PRIVILEGES ] }\n" +#~ " ON TABLESPACE tablespacename [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ ADMIN OPTION FOR ]\n" +#~ " role [, ...] FROM rolename [, ...]\n" +#~ " [ CASCADE | RESTRICT ]" #~ msgstr "" -#~ "ALTER USER nom [ [ WITH ] option [ ... ] ]\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON [ TABLE ] nom_table [, ...]\n" +#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { SELECT | INSERT | UPDATE | REFERENCES } ( colonne [, ...] )\n" +#~ " [,...] | ALL [ PRIVILEGES ] ( colonne [, ...] ) }\n" +#~ " ON [ TABLE ] nom_table [, ...]\n" +#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { USAGE | SELECT | UPDATE }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SEQUENCE nom_séquence [, ...]\n" +#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON DATABASE nom_base [, ...]\n" +#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON FOREIGN DATA WRAPPER nom_fdw [, ...]\n" +#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON FOREIGN SERVER nom_serveur [, ...]\n" +#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { EXECUTE | ALL [ PRIVILEGES ] }\n" +#~ " ON FUNCTION nom_fonction ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] ) [, ...]\n" +#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON LANGUAGE nom_langage [, ...]\n" +#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "où option peut être :\n" -#~ " \n" -#~ " SUPERUSER | NOSUPERUSER\n" -#~ " | CREATEDB | NOCREATEDB\n" -#~ " | CREATEROLE | NOCREATEROLE\n" -#~ " | CREATEUSER | NOCREATEUSER\n" -#~ " | INHERIT | NOINHERIT\n" -#~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT limite_connexion\n" -#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'motdepasse'\n" -#~ " | VALID UNTIL 'timestamp' \n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SCHEMA nom_schéma [, ...]\n" +#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "ALTER USER nom RENAME TO nouveau_nom\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { CREATE | ALL [ PRIVILEGES ] }\n" +#~ " ON TABLESPACE nom_tablespace [, ...]\n" +#~ " FROM { [ GROUP ] nom_rôle | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "ALTER USER nom SET paramètre { TO | = } { valeur | DEFAULT }\n" -#~ "ALTER USER name SET paramètre FROM CURRENT\n" -#~ "ALTER USER nom RESET paramètre\n" -#~ "ALTER USER name RESET ALL" - -#~ msgid "" -#~ "ALTER TYPE name RENAME TO new_name\n" -#~ "ALTER TYPE name OWNER TO new_owner \n" -#~ "ALTER TYPE name SET SCHEMA new_schema" -#~ msgstr "" -#~ "ALTER TYPE nom RENAME TO nouveau_nom\n" -#~ "ALTER TYPE nom OWNER TO nouveau_propriétaire\n" -#~ "ALTER TYPE nom SET SCHEMA nouveau_schéma" - -#~ msgid "ALTER TRIGGER name ON table RENAME TO newname" -#~ msgstr "ALTER TRIGGER nom ON table RENAME TO nouveau_nom" - -#~ msgid "ALTER TEXT SEARCH TEMPLATE name RENAME TO newname" -#~ msgstr "ALTER TEXT SEARCH TEMPLATE nom RENAME TO nouveau_nom" - -#~ msgid "ALTER TEXT SEARCH PARSER name RENAME TO newname" -#~ msgstr "ALTER TEXT SEARCH PARSER nom RENAME TO nouveau_nom" +#~ "REVOKE [ ADMIN OPTION FOR ]\n" +#~ " role [, ...] FROM nom_rôle [, ...]\n" +#~ " [ CASCADE | RESTRICT ]" -#~ msgid "" -#~ "ALTER TEXT SEARCH DICTIONARY name (\n" -#~ " option [ = value ] [, ... ]\n" -#~ ")\n" -#~ "ALTER TEXT SEARCH DICTIONARY name RENAME TO newname\n" -#~ "ALTER TEXT SEARCH DICTIONARY name OWNER TO newowner" -#~ msgstr "" -#~ "ALTER TEXT SEARCH DICTIONARY nom (\n" -#~ " option [ = valeur ] [, ... ]\n" -#~ ")\n" -#~ "ALTER TEXT SEARCH DICTIONARY nom RENAME TO nouveau_nom\n" -#~ "ALTER TEXT SEARCH DICTIONARY nom OWNER TO nouveau_propriétaire" +#~ msgid "ROLLBACK [ WORK | TRANSACTION ]" +#~ msgstr "ROLLBACK [ WORK | TRANSACTION ]" -#~ msgid "" -#~ "ALTER TEXT SEARCH CONFIGURATION name\n" -#~ " ADD MAPPING FOR token_type [, ... ] WITH dictionary_name [, ... ]\n" -#~ "ALTER TEXT SEARCH CONFIGURATION name\n" -#~ " ALTER MAPPING FOR token_type [, ... ] WITH dictionary_name [, ... ]\n" -#~ "ALTER TEXT SEARCH CONFIGURATION name\n" -#~ " ALTER MAPPING REPLACE old_dictionary WITH new_dictionary\n" -#~ "ALTER TEXT SEARCH CONFIGURATION name\n" -#~ " ALTER MAPPING FOR token_type [, ... ] REPLACE old_dictionary WITH new_dictionary\n" -#~ "ALTER TEXT SEARCH CONFIGURATION name\n" -#~ " DROP MAPPING [ IF EXISTS ] FOR token_type [, ... ]\n" -#~ "ALTER TEXT SEARCH CONFIGURATION name RENAME TO newname\n" -#~ "ALTER TEXT SEARCH CONFIGURATION name OWNER TO newowner" -#~ msgstr "" -#~ "ALTER TEXT SEARCH CONFIGURATION nom\n" -#~ " ADD MAPPING FOR type_jeton [, ... ] WITH nom_dictionnaire [, ... ]\n" -#~ "ALTER TEXT SEARCH CONFIGURATION nom\n" -#~ " ALTER MAPPING FOR type_jeton [, ... ] WITH nom_dictionnaire [, ... ]\n" -#~ "ALTER TEXT SEARCH CONFIGURATION nom\n" -#~ " ALTER MAPPING REPLACE ancien_dictionnaire WITH nouveau_dictionnaire\n" -#~ "ALTER TEXT SEARCH CONFIGURATION nom\n" -#~ " ALTER MAPPING FOR type_jeton [, ... ]\n" -#~ " REPLACE ancien_dictionnaire WITH nouveau_dictionnaire\n" -#~ "ALTER TEXT SEARCH CONFIGURATION nom\n" -#~ " DROP MAPPING [ IF EXISTS ] FOR type_jeton [, ... ]\n" -#~ "ALTER TEXT SEARCH CONFIGURATION nom RENAME TO nouveau_nom\n" -#~ "ALTER TEXT SEARCH CONFIGURATION nom OWNER TO nouveau_propriétaire" +#~ msgid "ROLLBACK PREPARED transaction_id" +#~ msgstr "ROLLBACK PREPARED id_transaction" -#~ msgid "" -#~ "ALTER TABLESPACE name RENAME TO newname\n" -#~ "ALTER TABLESPACE name OWNER TO newowner" -#~ msgstr "" -#~ "ALTER TABLESPACE nom RENAME TO nouveau_nom\n" -#~ "ALTER TABLESPACE nom OWNER TO nouveau_propriétaire" +#~ msgid "ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] savepoint_name" +#~ msgstr "ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] nom_retour" #~ msgid "" -#~ "ALTER TABLE [ ONLY ] name [ * ]\n" -#~ " action [, ... ]\n" -#~ "ALTER TABLE [ ONLY ] name [ * ]\n" -#~ " RENAME [ COLUMN ] column TO new_column\n" -#~ "ALTER TABLE name\n" -#~ " RENAME TO new_name\n" -#~ "ALTER TABLE name\n" -#~ " SET SCHEMA new_schema\n" +#~ "[ WITH [ RECURSIVE ] with_query [, ...] ]\n" +#~ "SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]\n" +#~ " * | expression [ [ AS ] output_name ] [, ...]\n" +#~ " [ FROM from_item [, ...] ]\n" +#~ " [ WHERE condition ]\n" +#~ " [ GROUP BY expression [, ...] ]\n" +#~ " [ HAVING condition [, ...] ]\n" +#~ " [ WINDOW window_name AS ( window_definition ) [, ...] ]\n" +#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" +#~ " [ ORDER BY expression [ ASC | DESC | USING operator ] [ NULLS { FIRST | LAST } ] [, ...] ]\n" +#~ " [ LIMIT { count | ALL } ]\n" +#~ " [ OFFSET start [ ROW | ROWS ] ]\n" +#~ " [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]\n" +#~ " [ FOR { UPDATE | SHARE } [ OF table_name [, ...] ] [ NOWAIT ] [...] ]\n" #~ "\n" -#~ "where action is one of:\n" +#~ "where from_item can be one of:\n" #~ "\n" -#~ " ADD [ COLUMN ] column type [ column_constraint [ ... ] ]\n" -#~ " DROP [ COLUMN ] column [ RESTRICT | CASCADE ]\n" -#~ " ALTER [ COLUMN ] column [ SET DATA ] TYPE type [ USING expression ]\n" -#~ " ALTER [ COLUMN ] column SET DEFAULT expression\n" -#~ " ALTER [ COLUMN ] column DROP DEFAULT\n" -#~ " ALTER [ COLUMN ] column { SET | DROP } NOT NULL\n" -#~ " ALTER [ COLUMN ] column SET STATISTICS integer\n" -#~ " ALTER [ COLUMN ] column SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN }\n" -#~ " ADD table_constraint\n" -#~ " DROP CONSTRAINT constraint_name [ RESTRICT | CASCADE ]\n" -#~ " DISABLE TRIGGER [ trigger_name | ALL | USER ]\n" -#~ " ENABLE TRIGGER [ trigger_name | ALL | USER ]\n" -#~ " ENABLE REPLICA TRIGGER trigger_name\n" -#~ " ENABLE ALWAYS TRIGGER trigger_name\n" -#~ " DISABLE RULE rewrite_rule_name\n" -#~ " ENABLE RULE rewrite_rule_name\n" -#~ " ENABLE REPLICA RULE rewrite_rule_name\n" -#~ " ENABLE ALWAYS RULE rewrite_rule_name\n" -#~ " CLUSTER ON index_name\n" -#~ " SET WITHOUT CLUSTER\n" -#~ " SET WITH OIDS\n" -#~ " SET WITHOUT OIDS\n" -#~ " SET ( storage_parameter = value [, ... ] )\n" -#~ " RESET ( storage_parameter [, ... ] )\n" -#~ " INHERIT parent_table\n" -#~ " NO INHERIT parent_table\n" -#~ " OWNER TO new_owner\n" -#~ " SET TABLESPACE new_tablespace" -#~ msgstr "" -#~ "ALTER TABLE [ ONLY ] nom [ * ]\n" -#~ " action [, ... ]\n" -#~ "ALTER TABLE [ ONLY ] nom [ * ]\n" -#~ " RENAME [ COLUMN ] colonne TO nouvelle_colonne\n" -#~ "ALTER TABLE nom\n" -#~ " RENAME TO nouveau_nom\n" -#~ "ALTER TABLE nom\n" -#~ " SET SCHEMA nouveau_schema\n" +#~ " [ ONLY ] table_name [ * ] [ [ AS ] alias [ ( column_alias [, ...] ) ] ]\n" +#~ " ( select ) [ AS ] alias [ ( column_alias [, ...] ) ]\n" +#~ " with_query_name [ [ AS ] alias [ ( column_alias [, ...] ) ] ]\n" +#~ " function_name ( [ argument [, ...] ] ) [ AS ] alias [ ( column_alias [, ...] | column_definition [, ...] ) ]\n" +#~ " function_name ( [ argument [, ...] ] ) AS ( column_definition [, ...] )\n" +#~ " from_item [ NATURAL ] join_type from_item [ ON join_condition | USING ( join_column [, ...] ) ]\n" #~ "\n" -#~ "où action peut être :\n" +#~ "and with_query is:\n" #~ "\n" -#~ " ADD [ COLUMN ] colonne type [ contrainte_colonne [ ... ] ]\n" -#~ " DROP [ COLUMN ] colonne [ RESTRICT | CASCADE ]\n" -#~ " ALTER [ COLUMN ] colonne [ SET DATA ] TYPE type [ USING expression ]\n" -#~ " ALTER [ COLUMN ] colonne SET DEFAULT expression\n" -#~ " ALTER [ COLUMN ] colonne DROP DEFAULT\n" -#~ " ALTER [ COLUMN ] colonne { SET | DROP } NOT NULL\n" -#~ " ALTER [ COLUMN ] colonne SET STATISTICS entier\n" -#~ " ALTER [ COLUMN ] colonne SET STORAGE\n" -#~ " { PLAIN | EXTERNAL | EXTENDED | MAIN }\n" -#~ " ADD contrainte_table\n" -#~ " DROP CONSTRAINT nom_contrainte [ RESTRICT | CASCADE ]\n" -#~ " DISABLE TRIGGER [ nom_trigger | ALL | USER ]\n" -#~ " ENABLE TRIGGER [ nom_trigger | ALL | USER ]\n" -#~ " ENABLE REPLICA TRIGGER nom_trigger\n" -#~ " ENABLE ALWAYS TRIGGER nom_trigger\n" -#~ " DISABLE RULE nom_règle_réécriture\n" -#~ " ENABLE RULE nom_règle_réécriture\n" -#~ " ENABLE REPLICA RULE nom_règle_réécriture\n" -#~ " ENABLE ALWAYS RULE nom_règle_réécriture\n" -#~ " CLUSTER ON nom_index\n" -#~ " SET WITHOUT CLUSTER\n" -#~ " SET WITH OIDS\n" -#~ " SET WITHOUT OIDS\n" -#~ " SET ( paramètre_stockage = valeur [, ... ] )\n" -#~ " RESET ( paramètre_stockage [, ... ] )\n" -#~ " INHERIT table_parent\n" -#~ " NO INHERIT table_parent\n" -#~ " OWNER TO nouveau_propriétaire\n" -#~ " SET TABLESPACE nouveau_tablespace" - -#~ msgid "" -#~ "ALTER SERVER servername [ VERSION 'newversion' ]\n" -#~ " [ OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) ]\n" -#~ "ALTER SERVER servername OWNER TO new_owner" -#~ msgstr "" -#~ "ALTER SERVER nom [ VERSION 'nouvelleversion' ]\n" -#~ " [ OPTIONS ( [ ADD | SET | DROP ] option ['valeur'] [, ... ] ) ]\n" -#~ "ALTER SERVER nom OWNER TO nouveau_propriétaire" - -#~ msgid "" -#~ "ALTER SEQUENCE name [ INCREMENT [ BY ] increment ]\n" -#~ " [ MINVALUE minvalue | NO MINVALUE ] [ MAXVALUE maxvalue | NO MAXVALUE ]\n" -#~ " [ START [ WITH ] start ]\n" -#~ " [ RESTART [ [ WITH ] restart ] ]\n" -#~ " [ CACHE cache ] [ [ NO ] CYCLE ]\n" -#~ " [ OWNED BY { table.column | NONE } ]\n" -#~ "ALTER SEQUENCE name OWNER TO new_owner\n" -#~ "ALTER SEQUENCE name RENAME TO new_name\n" -#~ "ALTER SEQUENCE name SET SCHEMA new_schema" -#~ msgstr "" -#~ "ALTER SEQUENCE nom [ INCREMENT [ BY ] incrément ]\n" -#~ " [ MINVALUE valeur_min | NO MINVALUE ] [ MAXVALUE valeur_max | NO MAXVALUE ]\n" -#~ " [ START [ WITH ] valeur_début ]\n" -#~ " [ RESTART [ [ WITH ] valeur_redémarrage ] ]\n" -#~ " [ CACHE cache ] [ [ NO ] CYCLE ]\n" -#~ " [ OWNED BY { table.colonne | NONE } ]\n" -#~ "ALTER SEQUENCE nom OWNER TO new_propriétaire\n" -#~ "ALTER SEQUENCE nom RENAME TO new_nom\n" -#~ "ALTER SEQUENCE nom SET SCHEMA new_schéma" - -#~ msgid "" -#~ "ALTER SCHEMA name RENAME TO newname\n" -#~ "ALTER SCHEMA name OWNER TO newowner" -#~ msgstr "" -#~ "ALTER SCHEMA nom RENAME TO nouveau_nom\n" -#~ "ALTER SCHEMA nom OWNER TO nouveau_propriétaire" - -#~ msgid "" -#~ "ALTER ROLE name [ [ WITH ] option [ ... ] ]\n" +#~ " with_query_name [ ( column_name [, ...] ) ] AS ( select )\n" #~ "\n" -#~ "where option can be:\n" -#~ " \n" -#~ " SUPERUSER | NOSUPERUSER\n" -#~ " | CREATEDB | NOCREATEDB\n" -#~ " | CREATEROLE | NOCREATEROLE\n" -#~ " | CREATEUSER | NOCREATEUSER\n" -#~ " | INHERIT | NOINHERIT\n" -#~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT connlimit\n" -#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" -#~ " | VALID UNTIL 'timestamp' \n" +#~ "TABLE { [ ONLY ] table_name [ * ] | with_query_name }" +#~ msgstr "" +#~ "[ WITH [ RECURSIVE ] requête_with [, ...] ]\n" +#~ "SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]\n" +#~ " * | expression [ [ AS ] nom_sortie ] [, ...]\n" +#~ " [ FROM élément_from [, ...] ]\n" +#~ " [ WHERE condition ]\n" +#~ " [ GROUP BY expression [, ...] ]\n" +#~ " [ HAVING condition [, ...] ]\n" +#~ " [ WINDOW nom_window AS ( définition_window ) [, ...] ]\n" +#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" +#~ " [ ORDER BY expression [ ASC | DESC | USING opérateur ] [ NULLS { FIRST | LAST } ] [, ...] ]\n" +#~ " [ LIMIT { total | ALL } ]\n" +#~ " [ OFFSET début [ ROW | ROWS ] ]\n" +#~ " [ FETCH { FIRST | NEXT } [ total ] { ROW | ROWS } ONLY ]\n" +#~ " [ FOR { UPDATE | SHARE } [ OF nom_table [, ...] ] [ NOWAIT ] [...] ]\n" #~ "\n" -#~ "ALTER ROLE name RENAME TO newname\n" +#~ "avec élément_from faisant parti de :\n" #~ "\n" -#~ "ALTER ROLE name SET configuration_parameter { TO | = } { value | DEFAULT }\n" -#~ "ALTER ROLE name SET configuration_parameter FROM CURRENT\n" -#~ "ALTER ROLE name RESET configuration_parameter\n" -#~ "ALTER ROLE name RESET ALL" -#~ msgstr "" -#~ "ALTER ROLE nom [ [ WITH ] option [ ... ] ]\n" +#~ " [ ONLY ] nom_table [ * ] [ [ AS ] alias [ ( alias_colonne [, ...] ) ] ]\n" +#~ " ( select ) [ AS ] alias [ ( alias_colonne [, ...] ) ]\n" +#~ " nom_requête_with [ [ AS ] alias [ ( alias_colonne [, ...] ) ] ]\n" +#~ " nom_fonction ( [ argument [, ...] ] ) [ AS ] alias [ ( alias_colonne [, ...] | définition_colonne [, ...] ) ]\n" +#~ " nom_fonction ( [ argument [, ...] ] ) AS ( définition_colonne [, ...] )\n" +#~ " élément_from [ NATURAL ] type_jointure élément_from [ ON condition_jointure | USING ( colonne_jointure [, ...] ) ]\n" #~ "\n" -#~ "où option peut être :\n" -#~ " \n" -#~ " SUPERUSER | NOSUPERUSER\n" -#~ " | CREATEDB | NOCREATEDB\n" -#~ " | CREATEROLE | NOCREATEROLE\n" -#~ " | CREATEUSER | NOCREATEUSER\n" -#~ " | INHERIT | NOINHERIT\n" -#~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT limite_connexions\n" -#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'mot de passe'\n" -#~ " | VALID UNTIL 'timestamp' \n" +#~ "et requête_with est:\n" #~ "\n" -#~ "ALTER ROLE nom RENAME TO nouveau_nom\n" +#~ " nom_requête_with [ ( nom_colonne [, ...] ) ] AS ( select )\n" #~ "\n" -#~ "ALTER ROLE nom SET paramètre { TO | = } { valeur | DEFAULT }\n" -#~ "ALTER ROLE name SET paramètre FROM CURRENT\n" -#~ "ALTER ROLE nom RESET paramètre\n" -#~ "ALTER ROLE name RESET ALL" +#~ "TABLE { [ ONLY ] nom_table [ * ] | nom_requête_with }" #~ msgid "" -#~ "ALTER OPERATOR FAMILY name USING index_method ADD\n" -#~ " { OPERATOR strategy_number operator_name ( op_type, op_type )\n" -#~ " | FUNCTION support_number [ ( op_type [ , op_type ] ) ] funcname ( argument_type [, ...] )\n" -#~ " } [, ... ]\n" -#~ "ALTER OPERATOR FAMILY name USING index_method DROP\n" -#~ " { OPERATOR strategy_number ( op_type [ , op_type ] )\n" -#~ " | FUNCTION support_number ( op_type [ , op_type ] )\n" -#~ " } [, ... ]\n" -#~ "ALTER OPERATOR FAMILY name USING index_method RENAME TO newname\n" -#~ "ALTER OPERATOR FAMILY name USING index_method OWNER TO newowner" +#~ "[ WITH [ RECURSIVE ] with_query [, ...] ]\n" +#~ "SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]\n" +#~ " * | expression [ [ AS ] output_name ] [, ...]\n" +#~ " INTO [ TEMPORARY | TEMP ] [ TABLE ] new_table\n" +#~ " [ FROM from_item [, ...] ]\n" +#~ " [ WHERE condition ]\n" +#~ " [ GROUP BY expression [, ...] ]\n" +#~ " [ HAVING condition [, ...] ]\n" +#~ " [ WINDOW window_name AS ( window_definition ) [, ...] ]\n" +#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" +#~ " [ ORDER BY expression [ ASC | DESC | USING operator ] [ NULLS { FIRST | LAST } ] [, ...] ]\n" +#~ " [ LIMIT { count | ALL } ]\n" +#~ " [ OFFSET start [ ROW | ROWS ] ]\n" +#~ " [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]\n" +#~ " [ FOR { UPDATE | SHARE } [ OF table_name [, ...] ] [ NOWAIT ] [...] ]" #~ msgstr "" -#~ "ALTER OPERATOR FAMILY nom USING méthode_indexage ADD\n" -#~ " { OPERATOR numéro_stratégie nom_opérateur ( type_op, type_op ) \n" -#~ " | FUNCTION numéro_support [ ( type_op [ , type_op ] ) ]\n" -#~ " nom_fonction ( type_argument [, ...] )\n" -#~ " } [, ... ]\n" -#~ "ALTER OPERATOR FAMILY nom USING méthode_indexage DROP\n" -#~ " { OPERATOR numéro_stratégie ( type_op [ , type_op ] )\n" -#~ " | FUNCTION numéro_support ( type_op [ , type_op ] )\n" -#~ " } [, ... ]\n" -#~ "ALTER OPERATOR FAMILY nom USING méthode_indexage\n" -#~ " RENAME TO nouveau_nom\n" -#~ "ALTER OPERATOR FAMILY nom USING méthode_indexage\n" -#~ " OWNER TO nouveau_propriétaire" +#~ "[ WITH [ RECURSIVE ] requête_with [, ...] ]\n" +#~ "SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]\n" +#~ " * | expression [ [ AS ] nom_sortie ] [, ...]\n" +#~ " INTO [ TEMPORARY | TEMP ] [ TABLE ] nouvelle_table\n" +#~ " [ FROM élément_from [, ...] ]\n" +#~ " [ WHERE condition ]\n" +#~ " [ GROUP BY expression [, ...] ]\n" +#~ " [ HAVING condition [, ...] ]\n" +#~ " [ WINDOW nom_window AS ( définition_window ) [, ...] ]\n" +#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" +#~ " [ ORDER BY expression [ ASC | DESC | USING opérateur ] [ NULLS { FIRST | LAST } ] [, ...] ]\n" +#~ " [ LIMIT { total | ALL } ]\n" +#~ " [ OFFSET début [ ROW | ROWS ] ]\n" +#~ " [ FETCH { FIRST | NEXT } [ total ] { ROW | ROWS } ONLY ]\n" +#~ " [ FOR { UPDATE | SHARE } [ OF nom_table [, ...] ] [ NOWAIT ] [...] ]" #~ msgid "" -#~ "ALTER OPERATOR CLASS name USING index_method RENAME TO newname\n" -#~ "ALTER OPERATOR CLASS name USING index_method OWNER TO newowner" -#~ msgstr "" -#~ "ALTER OPERATOR CLASS nom USING méthode_indexation\n" -#~ " RENAME TO nouveau_nom\n" -#~ "ALTER OPERATOR CLASS nom USING méthode_indexation\n" -#~ " OWNER TO nouveau_propriétaire" - -#~ msgid "ALTER OPERATOR name ( { lefttype | NONE } , { righttype | NONE } ) OWNER TO newowner" +#~ "SET [ SESSION | LOCAL ] configuration_parameter { TO | = } { value | 'value' | DEFAULT }\n" +#~ "SET [ SESSION | LOCAL ] TIME ZONE { timezone | LOCAL | DEFAULT }" #~ msgstr "" -#~ "ALTER OPERATOR nom ( { lefttype | NONE } , { righttype | NONE } )\n" -#~ " OWNER TO nouveau_propriétaire" +#~ "SET [ SESSION | LOCAL ] paramètre { TO | = } { valeur | 'valeur' | DEFAULT }\n" +#~ "SET [ SESSION | LOCAL ] TIME ZONE { zone_horaire | LOCAL | DEFAULT }" -#~ msgid "" -#~ "ALTER [ PROCEDURAL ] LANGUAGE name RENAME TO newname\n" -#~ "ALTER [ PROCEDURAL ] LANGUAGE name OWNER TO new_owner" -#~ msgstr "" -#~ "ALTER [ PROCEDURAL ] LANGUAGE nom RENAME TO nouveau_nom\n" -#~ "ALTER [ PROCEDURAL ] LANGUAGE nom OWNER TO nouveau_propriétaire" +#~ msgid "SET CONSTRAINTS { ALL | name [, ...] } { DEFERRED | IMMEDIATE }" +#~ msgstr "SET CONSTRAINTS { ALL | nom [, ...] } { DEFERRED | IMMEDIATE }" #~ msgid "" -#~ "ALTER INDEX name RENAME TO new_name\n" -#~ "ALTER INDEX name SET TABLESPACE tablespace_name\n" -#~ "ALTER INDEX name SET ( storage_parameter = value [, ... ] )\n" -#~ "ALTER INDEX name RESET ( storage_parameter [, ... ] )" +#~ "SET [ SESSION | LOCAL ] ROLE rolename\n" +#~ "SET [ SESSION | LOCAL ] ROLE NONE\n" +#~ "RESET ROLE" #~ msgstr "" -#~ "ALTER INDEX nom RENAME TO nouveau_nom\n" -#~ "ALTER INDEX nom SET TABLESPACE nom_tablespace\n" -#~ "ALTER INDEX nom SET ( paramètre_stockage = valeur [, ... ] )\n" -#~ "ALTER INDEX nom RESET ( paramètre_stockage [, ... ] )" +#~ "SET [ SESSION | LOCAL ] ROLE nom_rôle\n" +#~ "SET [ SESSION | LOCAL ] ROLE NONE\n" +#~ "RESET ROLE" #~ msgid "" -#~ "ALTER GROUP groupname ADD USER username [, ... ]\n" -#~ "ALTER GROUP groupname DROP USER username [, ... ]\n" -#~ "\n" -#~ "ALTER GROUP groupname RENAME TO newname" +#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION username\n" +#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION DEFAULT\n" +#~ "RESET SESSION AUTHORIZATION" #~ msgstr "" -#~ "ALTER GROUP nom_groupe ADD USER nom_utilisateur [, ... ]\n" -#~ "ALTER GROUP nom_groupe DROP USER nom_utilisateur [, ... ]\n" -#~ "\n" -#~ "ALTER GROUP nom_groupe RENAME TO nouveau_nom" +#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION nom_utilisateur\n" +#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION DEFAULT\n" +#~ "RESET SESSION AUTHORIZATION" #~ msgid "" -#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" -#~ " action [ ... ] [ RESTRICT ]\n" -#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" -#~ " RENAME TO new_name\n" -#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" -#~ " OWNER TO new_owner\n" -#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" -#~ " SET SCHEMA new_schema\n" +#~ "SET TRANSACTION transaction_mode [, ...]\n" +#~ "SET SESSION CHARACTERISTICS AS TRANSACTION transaction_mode [, ...]\n" #~ "\n" -#~ "where action is one of:\n" +#~ "where transaction_mode is one of:\n" #~ "\n" -#~ " CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" -#~ " IMMUTABLE | STABLE | VOLATILE\n" -#~ " [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER\n" -#~ " COST execution_cost\n" -#~ " ROWS result_rows\n" -#~ " SET configuration_parameter { TO | = } { value | DEFAULT }\n" -#~ " SET configuration_parameter FROM CURRENT\n" -#~ " RESET configuration_parameter\n" -#~ " RESET ALL" +#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" +#~ " READ WRITE | READ ONLY" #~ msgstr "" -#~ "ALTER FUNCTION nom ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] )\n" -#~ " action [, ... ] [ RESTRICT ]\n" -#~ "ALTER FUNCTION nom ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] )\n" -#~ " RENAME TO nouveau_nom\n" -#~ "ALTER FUNCTION nom ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] )\n" -#~ " OWNER TO nouveau_proprietaire\n" -#~ "ALTER FUNCTION nom ( [ [ mode_arg ] [ nom_arg ] type_arg [, ...] ] )\n" -#~ " SET SCHEMA nouveau_schema\n" +#~ "SET TRANSACTION mode_transaction [, ...]\n" +#~ "SET SESSION CHARACTERISTICS AS TRANSACTION mode_transaction [, ...]\n" #~ "\n" -#~ "où action peut être :\n" +#~ "où mode_transaction peut être :\n" #~ "\n" -#~ " CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" -#~ " IMMUTABLE | STABLE | VOLATILE\n" -#~ " [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER\n" -#~ " COST cout_execution\n" -#~ " ROWS lignes_resultats\n" -#~ " SET paramètre { TO | = } { valeur | DEFAULT }\n" -#~ " SET paramètre FROM CURRENT\n" -#~ " RESET paramètre\n" -#~ " RESET ALL" - -#~ msgid "" -#~ "ALTER FOREIGN DATA WRAPPER name\n" -#~ " [ VALIDATOR valfunction | NO VALIDATOR ]\n" -#~ " [ OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ]) ]\n" -#~ "ALTER FOREIGN DATA WRAPPER name OWNER TO new_owner" -#~ msgstr "" -#~ "ALTER FOREIGN DATA WRAPPER nom\n" -#~ " [ VALIDATOR fonction_validation | NO VALIDATOR ]\n" -#~ " [ OPTIONS ( [ ADD | SET | DROP ] option ['valeur'] [, ... ]) ]\n" -#~ "ALTER FOREIGN DATA WRAPPER nom OWNER TO nouveau_propriétaire" +#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ |\n" +#~ " READ COMMITTED | READ UNCOMMITTED }\n" +#~ " READ WRITE | READ ONLY" #~ msgid "" -#~ "ALTER DOMAIN name\n" -#~ " { SET DEFAULT expression | DROP DEFAULT }\n" -#~ "ALTER DOMAIN name\n" -#~ " { SET | DROP } NOT NULL\n" -#~ "ALTER DOMAIN name\n" -#~ " ADD domain_constraint\n" -#~ "ALTER DOMAIN name\n" -#~ " DROP CONSTRAINT constraint_name [ RESTRICT | CASCADE ]\n" -#~ "ALTER DOMAIN name\n" -#~ " OWNER TO new_owner \n" -#~ "ALTER DOMAIN name\n" -#~ " SET SCHEMA new_schema" +#~ "SHOW name\n" +#~ "SHOW ALL" #~ msgstr "" -#~ "ALTER DOMAIN nom\n" -#~ " { SET DEFAULT expression | DROP DEFAULT }\n" -#~ "ALTER DOMAIN nom\n" -#~ " { SET | DROP } NOT NULL\n" -#~ "ALTER DOMAIN nom\n" -#~ " ADD contrainte_domaine\n" -#~ "ALTER DOMAIN nom\n" -#~ " DROP CONSTRAINT nom_contrainte [ RESTRICT | CASCADE ]\n" -#~ "ALTER DOMAIN nom\n" -#~ " OWNER TO nouveau_propriétaire \n" -#~ "ALTER DOMAIN nom\n" -#~ " SET SCHEMA nouveau_schéma" +#~ "SHOW nom\n" +#~ "SHOW ALL" -#~ msgid "" -#~ "ALTER DATABASE name [ [ WITH ] option [ ... ] ]\n" -#~ "\n" -#~ "where option can be:\n" -#~ "\n" -#~ " CONNECTION LIMIT connlimit\n" -#~ "\n" -#~ "ALTER DATABASE name RENAME TO newname\n" -#~ "\n" -#~ "ALTER DATABASE name OWNER TO new_owner\n" -#~ "\n" -#~ "ALTER DATABASE name SET TABLESPACE new_tablespace\n" -#~ "\n" -#~ "ALTER DATABASE name SET configuration_parameter { TO | = } { value | DEFAULT }\n" -#~ "ALTER DATABASE name SET configuration_parameter FROM CURRENT\n" -#~ "ALTER DATABASE name RESET configuration_parameter\n" -#~ "ALTER DATABASE name RESET ALL" -#~ msgstr "" -#~ "ALTER DATABASE nom [ [ WITH ] option [ ... ] ]\n" -#~ "\n" -#~ "où option peut être:\n" -#~ "\n" -#~ " CONNECTION LIMIT limite_connexion\n" +#~ msgid "" +#~ "START TRANSACTION [ transaction_mode [, ...] ]\n" #~ "\n" -#~ "ALTER DATABASE nom RENAME TO nouveau_nom\n" +#~ "where transaction_mode is one of:\n" #~ "\n" -#~ "ALTER DATABASE nom OWNER TO nouveau_propriétaire\n" +#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" +#~ " READ WRITE | READ ONLY" +#~ msgstr "" +#~ "START TRANSACTION [ mode_transaction [, ...] ]\n" #~ "\n" -#~ "ALTER DATABASE nom SET TABLESPACE nouveau_tablespace\n" +#~ "où mode_transaction peut être :\n" #~ "\n" -#~ "ALTER DATABASE nom SET paramètre_configuration { TO | = } { valeur | DEFAULT }\n" -#~ "ALTER DATABASE nom SET paramètre_configuration FROM CURRENT\n" -#~ "ALTER DATABASE nom RESET paramètre_configuration\n" -#~ "ALTER DATABASE nom RESET ALL" +#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ |\n" +#~ " READ COMMITTED | READ UNCOMMITTED }\n" +#~ " READ WRITE | READ ONLY" #~ msgid "" -#~ "ALTER CONVERSION name RENAME TO newname\n" -#~ "ALTER CONVERSION name OWNER TO newowner" +#~ "TRUNCATE [ TABLE ] [ ONLY ] name [, ... ]\n" +#~ " [ RESTART IDENTITY | CONTINUE IDENTITY ] [ CASCADE | RESTRICT ]" #~ msgstr "" -#~ "ALTER CONVERSION nom RENAME TO nouveau_nom\n" -#~ "ALTER CONVERSION nom OWNER TO nouveau_propriétaire" +#~ "TRUNCATE [ TABLE ] [ ONLY ] nom [, ... ]\n" +#~ " [ RESTART IDENTITY | CONTINUE IDENTITY ] [ CASCADE | RESTRICT ]" + +#~ msgid "UNLISTEN { name | * }" +#~ msgstr "UNLISTEN { nom | * }" #~ msgid "" -#~ "ALTER AGGREGATE name ( type [ , ... ] ) RENAME TO new_name\n" -#~ "ALTER AGGREGATE name ( type [ , ... ] ) OWNER TO new_owner\n" -#~ "ALTER AGGREGATE name ( type [ , ... ] ) SET SCHEMA new_schema" +#~ "UPDATE [ ONLY ] table [ [ AS ] alias ]\n" +#~ " SET { column = { expression | DEFAULT } |\n" +#~ " ( column [, ...] ) = ( { expression | DEFAULT } [, ...] ) } [, ...]\n" +#~ " [ FROM fromlist ]\n" +#~ " [ WHERE condition | WHERE CURRENT OF cursor_name ]\n" +#~ " [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ]" #~ msgstr "" -#~ "ALTER AGGREGATE nom ( type [ , ... ] ) RENAME TO nouveau_nom\n" -#~ "ALTER AGGREGATE nom ( type [ , ... ] ) OWNER TO nouveau_propriétaire\n" -#~ "ALTER AGGREGATE nom ( type [ , ... ] ) SET SCHEMA nouveau_schéma" - -#~ msgid "ABORT [ WORK | TRANSACTION ]" -#~ msgstr "ABORT [ WORK | TRANSACTION ]" - -#~ msgid "number" -#~ msgstr "numéro" - -#~ msgid "rolename" -#~ msgstr "nom_rôle" - -#~ msgid "Exclusion constraints:" -#~ msgstr "Contraintes d'exclusion :" - -#~ msgid "define a new constraint trigger" -#~ msgstr "définir une nouvelle contrainte de déclenchement" - -#~ msgid " as user \"%s\"" -#~ msgstr " comme utilisateur « %s »" - -#~ msgid " at port \"%s\"" -#~ msgstr " sur le port « %s »" - -#~ msgid " on host \"%s\"" -#~ msgstr " sur l'hôte « %s »" - -#~ msgid "tablespace" -#~ msgstr "tablespace" - -#~ msgid "new_column" -#~ msgstr "nouvelle_colonne" - -#~ msgid "column" -#~ msgstr "colonne" - -#~ msgid "data type" -#~ msgstr "type de données" - -#~ msgid "contains support for command-line editing" -#~ msgstr "contient une gestion avancée de la ligne de commande" - -#~ msgid " --version output version information, then exit\n" -#~ msgstr " --version affiche la version, puis quitte\n" - -#~ msgid " --help show this help, then exit\n" -#~ msgstr " --help affiche cette aide, puis quitte\n" - -#~ msgid "\\copy: unexpected response (%d)\n" -#~ msgstr "\\copy : réponse inattendue (%d)\n" - -#~ msgid "\\copy: %s" -#~ msgstr "\\copy : %s" +#~ "UPDATE [ ONLY ] table [ [ AS ] alias ]\n" +#~ " SET { colonne = { expression | DEFAULT } |\n" +#~ " ( colonne [, ...] ) = ( { expression | DEFAULT } [, ...] ) } [, ...]\n" +#~ " [ FROM liste_from ]\n" +#~ " [ WHERE condition | WHERE CURRENT OF nom_curseur ]\n" +#~ " [ RETURNING * | expression_sortie [ [ AS ] nom_sortie ] [, ...] ]" -#~ msgid "\\%s: error\n" -#~ msgstr "\\%s : erreur\n" +#~ msgid "" +#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ table ]\n" +#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ table [ (column [, ...] ) ] ]" +#~ msgstr "" +#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ table ]\n" +#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ table [ (colonne [, ...] ) ] ]" -#~ msgid " \\l[+] list all databases\n" -#~ msgstr " \\l[+] affiche la liste des bases de données\n" +#~ msgid "" +#~ "VALUES ( expression [, ...] ) [, ...]\n" +#~ " [ ORDER BY sort_expression [ ASC | DESC | USING operator ] [, ...] ]\n" +#~ " [ LIMIT { count | ALL } ]\n" +#~ " [ OFFSET start [ ROW | ROWS ] ]\n" +#~ " [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]" +#~ msgstr "" +#~ "VALUES ( expression [, ...] ) [, ...]\n" +#~ " [ ORDER BY expression_tri [ ASC | DESC | USING opérateur ] [, ...] ]\n" +#~ " [ LIMIT { total | ALL } ]\n" +#~ " [ OFFSET début [ ROW | ROWS ] ]\n" +#~ " [ FETCH { FIRST | NEXT } [ total ] { ROW | ROWS } ONLY ]" -#~ msgid "%s: pg_strdup: cannot duplicate null pointer (internal error)\n" -#~ msgstr "%s : pg_strdup : ne peut pas dupliquer le pointeur null (erreur interne)\n" +#~ msgid " \"%s\" IN %s %s" +#~ msgstr " \"%s\" DANS %s %s" -#~ msgid "could not change directory to \"%s\"" -#~ msgstr "n'a pas pu accéder au répertoire « %s »" +#~ msgid "(1 row)" +#~ msgid_plural "(%lu rows)" +#~ msgstr[0] "(1 ligne)" +#~ msgstr[1] "(%lu lignes)" -#~ msgid "input_data_type" -#~ msgstr "type_de_données_en_entrée" +#~ msgid "" +#~ " \\d{t|i|s|v|S} [PATTERN] (add \"+\" for more detail)\n" +#~ " list tables/indexes/sequences/views/system tables\n" +#~ msgstr "" +#~ " \\d{t|i|s|v|S} [MODÈLE] (ajouter « + » pour plus de détails)\n" +#~ " affiche la liste des\n" +#~ " tables/index/séquences/vues/tables système\n" -#~ msgid "agg_type" -#~ msgstr "type_aggrégat" +#~ msgid " \\db [PATTERN] list tablespaces (add \"+\" for more detail)\n" +#~ msgstr "" +#~ " \\db [MODÈLE] affiche la liste des tablespaces (ajouter « + » pour\n" +#~ " plus de détails)\n" -#~ msgid "agg_name" -#~ msgstr "nom_d_agrégat" +#~ msgid " \\df [PATTERN] list functions (add \"+\" for more detail)\n" +#~ msgstr "" +#~ " \\df [MODÈLE] affiche la liste des fonctions (ajouter « + » pour\n" +#~ " plus de détails)\n" -#~ msgid "could not get current user name: %s\n" -#~ msgstr "n'a pas pu obtenir le nom d'utilisateur courant : %s\n" +#~ msgid " \\dFd [PATTERN] list text search dictionaries (add \"+\" for more detail)\n" +#~ msgstr "" +#~ " \\dFd [MODÈLE] affiche la liste des dictionnaires de la recherche\n" +#~ " de texte (ajouter « + » pour plus de détails)\n" -#~ msgid "Showing only tuples." -#~ msgstr "Affichage des tuples seuls." +#~ msgid " \\dFp [PATTERN] list text search parsers (add \"+\" for more detail)\n" +#~ msgstr "" +#~ " \\dFp [MODÈLE] affiche la liste des analyseurs de la recherche de\n" +#~ " texte (ajouter « + » pour plus de détails)\n" -#~ msgid "Showing locale-adjusted numeric output." -#~ msgstr "Affichage de la sortie numérique adaptée à la locale." +#~ msgid " \\dn [PATTERN] list schemas (add \"+\" for more detail)\n" +#~ msgstr "" +#~ " \\dn [MODÈLE] affiche la liste des schémas (ajouter « + » pour\n" +#~ " plus de détails)\n" -#~ msgid "Watch every %lds\t%s" -#~ msgstr "Vérifier chaque %lds\t%s" +#~ msgid " \\dT [PATTERN] list data types (add \"+\" for more detail)\n" +#~ msgstr "" +#~ " \\dT [MODÈLE] affiche la liste des types de données (ajouter « + »\n" +#~ " pour plus de détails)\n" -#~ msgid "%s: could not set variable \"%s\"\n" -#~ msgstr "%s : n'a pas pu initialiser la variable « %s »\n" +#~ msgid " \\l list all databases (add \"+\" for more detail)\n" +#~ msgstr "" +#~ " \\l affiche la liste des bases de données (ajouter « + »\n" +#~ " pour plus de détails)\n" -#~ msgid "Object Description" -#~ msgstr "Description d'un objet" +#~ msgid " \\z [PATTERN] list table, view, and sequence access privileges (same as \\dp)\n" +#~ msgstr "" +#~ " \\z [MODÈLE] affiche la liste des privilèges d'accès aux tables,\n" +#~ " vues et séquences (identique à \\dp)\n" -#~ msgid "Modifier" -#~ msgstr "Modificateur" +#~ msgid "Copy, Large Object\n" +#~ msgstr "Copie, « Large Object »\n" -#~ msgid "default %s" -#~ msgstr "Par défaut, %s" +#~ msgid "" +#~ "Welcome to %s %s (server %s), the PostgreSQL interactive terminal.\n" +#~ "\n" +#~ msgstr "" +#~ "Bienvenue dans %s %s (serveur %s), l'interface interactive de PostgreSQL.\n" +#~ "\n" -#~ msgid "not null" -#~ msgstr "non NULL" +#~ msgid "" +#~ "Welcome to %s %s, the PostgreSQL interactive terminal.\n" +#~ "\n" +#~ msgstr "" +#~ "Bienvenue dans %s %s, l'interface interactive de PostgreSQL.\n" +#~ "\n" -#~ msgid "collate %s" -#~ msgstr "collationnement %s" +#~ msgid "" +#~ "WARNING: You are connected to a server with major version %d.%d,\n" +#~ "but your %s client is major version %d.%d. Some backslash commands,\n" +#~ "such as \\d, might not work properly.\n" +#~ "\n" +#~ msgstr "" +#~ "ATTENTION : vous êtes connecté sur un serveur dont la version majeure est\n" +#~ "%d.%d alors que votre client %s est en version majeure %d.%d. Certaines\n" +#~ "commandes avec antislashs, comme \\d, peuvent ne pas fonctionner\n" +#~ "correctement.\n" +#~ "\n" -#~ msgid "Modifiers" -#~ msgstr "Modificateurs" +#~ msgid "Access privileges for database \"%s\"" +#~ msgstr "Droits d'accès pour la base de données « %s »" -#~ msgid "could not set variable \"%s\"\n" -#~ msgstr "n'a pas pu initialiser la variable « %s »\n" +#~ msgid "?%c? \"%s.%s\"" +#~ msgstr "?%c? « %s.%s »" -#~ msgid "+ opt(%d) = |%s|\n" -#~ msgstr "+ opt(%d) = |%s|\n" +#~ msgid " \"%s\"" +#~ msgstr " « %s »" -#~ msgid "\\%s: error while setting variable\n" -#~ msgstr "\\%s : erreur lors de l'initialisation de la variable\n" +#~ msgid "ALTER VIEW name RENAME TO newname" +#~ msgstr "ALTER VIEW nom RENAME TO nouveau_nom" -#~ msgid "Password encryption failed.\n" -#~ msgstr "Échec du chiffrement du mot de passe.\n" +#~ msgid "(No rows)\n" +#~ msgstr "(Aucune ligne)\n" -#~ msgid "No relations found.\n" -#~ msgstr "Aucune relation trouvée.\n" +#~ msgid " -?, --help show this help, then exit\n" +#~ msgstr " -?, --help affiche cette aide puis quitte\n" -#~ msgid "No matching relations found.\n" -#~ msgstr "Aucune relation correspondante trouvée.\n" +#~ msgid "SSL connection (unknown cipher)\n" +#~ msgstr "Connexion SSL (chiffrement inconnu)\n" -#~ msgid "No settings found.\n" -#~ msgstr "Aucun paramètre trouvé.\n" +#~ msgid "serialtype" +#~ msgstr "serialtype" -#~ msgid "No matching settings found.\n" -#~ msgstr "Aucun paramètre correspondant trouvé.\n" +#~ msgid "statistic_type" +#~ msgstr "type_statistique" -#~ msgid "No per-database role settings support in this server version.\n" -#~ msgstr "Pas de supprot des paramètres rôle par base de données pour la version de ce serveur.\n" +#~ msgid "Value" +#~ msgstr "Valeur" diff --git a/src/bin/psql/po/it.po b/src/bin/psql/po/it.po index e935f8af50..b56513fd89 100644 --- a/src/bin/psql/po/it.po +++ b/src/bin/psql/po/it.po @@ -1,40 +1,36 @@ # -# Translation of psql to Italian -# PostgreSQL Project +# psql.po +# Italian message translation file for psql # -# Associazione Culturale ITPUG - Italian PostgreSQL Users Group -# http://www.itpug.org/ - info@itpug.org +# For development and bug report please use: +# https://github.com/dvarrazzo/postgresql-it # -# Traduttori: -# * Cosimo D'Arcangelo -# * Massimo Mangoni -# * Daniele Varrazzo +# Copyright (C) 2012-2017 PostgreSQL Global Development Group +# Copyright (C) 2010, Associazione Culturale ITPUG # -# Revisori -# * Emanuele Zamprogno +# Daniele Varrazzo , 2012-2017. +# Cosimo D'Arcangelo +# Massimo Mangoni +# Mirko Tebaldi +# Gabriele Bartolini # -# Traduttori precedenti: -# * Mirko Tebaldi -# * Gabriele Bartolini -# -# Copyright (c) 2010, Associazione Culturale ITPUG -# Distributed under the same license of the PostgreSQL project +# This file is distributed under the same license as the PostgreSQL package. # msgid "" msgstr "" "Project-Id-Version: psql (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-05-22 07:45+0000\n" -"PO-Revision-Date: 2017-05-29 22:06+0100\n" +"POT-Creation-Date: 2017-10-23 23:14+0000\n" +"PO-Revision-Date: 2017-10-24 08:36+0100\n" "Last-Translator: Daniele Varrazzo \n" -"Language-Team: Gruppo traduzioni ITPUG \n" +"Language-Team: https://github.com/dvarrazzo/postgresql-it\n" "Language: it\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "X-Poedit-SourceCharset: utf-8\n" "Plural-Forms: nplurals=2; plural=n != 1;\n" -"X-Generator: Poedit 1.8.7.1\n" +"X-Generator: Poedit 1.5.4\n" #: ../../common/exec.c:127 ../../common/exec.c:241 ../../common/exec.c:284 #, c-format @@ -209,199 +205,199 @@ msgstr "Sei collegato al database \"%s\" con nome utente \"%s\" tramite il socke msgid "You are connected to database \"%s\" as user \"%s\" on host \"%s\" at port \"%s\".\n" msgstr "Sei collegato al database \"%s\" con nome utente \"%s\" sull'host \"%s\" porta \"%s\".\n" -#: command.c:912 command.c:1002 command.c:1111 command.c:2520 +#: command.c:915 command.c:1005 command.c:1114 command.c:2523 #, c-format msgid "no query buffer\n" msgstr "Nessun buffer query\n" -#: command.c:945 command.c:4757 +#: command.c:948 command.c:4784 #, c-format msgid "invalid line number: %s\n" msgstr "numero di riga non valido: \"%s\"\n" -#: command.c:995 +#: command.c:998 #, c-format msgid "The server (version %s) does not support editing function source.\n" msgstr "Il server (versione %s) non supporta la modifica dei sorgenti delle funzioni.\n" -#: command.c:1070 command.c:1151 +#: command.c:1073 command.c:1154 msgid "No changes" msgstr "Nessuna modifica" -#: command.c:1104 +#: command.c:1107 #, c-format msgid "The server (version %s) does not support editing view definitions.\n" msgstr "Il server (versione %s) non supporta la modifica della definizione delle viste.\n" -#: command.c:1228 +#: command.c:1231 #, c-format msgid "%s: invalid encoding name or conversion procedure not found\n" msgstr "%s: nome codifica errato oppure non esiste una procedura di conversione\n" -#: command.c:1263 command.c:1885 command.c:3161 command.c:4859 common.c:173 +#: command.c:1266 command.c:1888 command.c:3169 command.c:4886 common.c:173 #: common.c:244 common.c:541 common.c:1288 common.c:1316 common.c:1417 -#: copy.c:489 copy.c:709 large_obj.c:156 large_obj.c:191 large_obj.c:253 +#: copy.c:489 copy.c:708 large_obj.c:156 large_obj.c:191 large_obj.c:253 #, c-format msgid "%s" msgstr "%s" -#: command.c:1267 +#: command.c:1270 msgid "out of memory" msgstr "memoria esaurita" -#: command.c:1270 +#: command.c:1273 msgid "There is no previous error." msgstr "Non c'è un errore precedente." -#: command.c:1441 command.c:1746 command.c:1760 command.c:1777 command.c:1937 -#: command.c:2174 command.c:2487 command.c:2527 +#: command.c:1444 command.c:1749 command.c:1763 command.c:1780 command.c:1940 +#: command.c:2177 command.c:2490 command.c:2530 #, c-format msgid "\\%s: missing required argument\n" msgstr "\\%s: parametro richiesto mancante\n" -#: command.c:1572 +#: command.c:1575 #, c-format msgid "\\elif: cannot occur after \\else\n" msgstr "\\elif: non può apparire dopo \\else\n" -#: command.c:1577 +#: command.c:1580 #, c-format msgid "\\elif: no matching \\if\n" msgstr "\\elif: \\if corrispondente non trovato\n" -#: command.c:1641 +#: command.c:1644 #, c-format msgid "\\else: cannot occur after \\else\n" msgstr "\\else: non può apparire dopo \\else\n" -#: command.c:1646 +#: command.c:1649 #, c-format msgid "\\else: no matching \\if\n" msgstr "\\else: \\if corrispondente non trovato\n" -#: command.c:1686 +#: command.c:1689 #, c-format msgid "\\endif: no matching \\if\n" msgstr "\\endif: \\if corrispondente non trovato\n" -#: command.c:1841 +#: command.c:1844 msgid "Query buffer is empty." msgstr "Il buffer query è vuoto." -#: command.c:1863 +#: command.c:1866 msgid "Enter new password: " msgstr "Inserire la nuova password: " -#: command.c:1864 +#: command.c:1867 msgid "Enter it again: " msgstr "Conferma password: " -#: command.c:1868 +#: command.c:1871 #, c-format msgid "Passwords didn't match.\n" msgstr "Le password non corrispondono.\n" -#: command.c:1967 +#: command.c:1970 #, c-format msgid "\\%s: could not read value for variable\n" msgstr "\\%s: errore nella lettura del valore della variabile\n" -#: command.c:2070 +#: command.c:2073 msgid "Query buffer reset (cleared)." msgstr "Buffer query resettato (svuotato)." -#: command.c:2092 +#: command.c:2095 #, c-format msgid "Wrote history to file \"%s\".\n" msgstr "Storia scritta nel file \"%s\".\n" -#: command.c:2179 +#: command.c:2182 #, c-format msgid "\\%s: environment variable name must not contain \"=\"\n" msgstr "\\%s: il nome della variabile d'ambiente non deve contenere \"=\"\n" -#: command.c:2235 +#: command.c:2238 #, c-format msgid "The server (version %s) does not support showing function source.\n" msgstr "Il server (versione %s) non supporta la visualizzazione dei sorgenti delle funzioni.\n" -#: command.c:2242 +#: command.c:2245 #, c-format msgid "function name is required\n" msgstr "il nome della funzione è richiesto\n" -#: command.c:2329 +#: command.c:2332 #, c-format msgid "The server (version %s) does not support showing view definitions.\n" msgstr "-\"Il server (versione %s) non supporta la visualizzazione della definizione delle viste.\n" -#: command.c:2336 +#: command.c:2339 #, c-format msgid "view name is required\n" msgstr "il nome della vista è richiesto\n" -#: command.c:2459 +#: command.c:2462 msgid "Timing is on." msgstr "Controllo tempo attivato" -#: command.c:2461 +#: command.c:2464 msgid "Timing is off." msgstr "Controllo tempo disattivato." -#: command.c:2546 command.c:2574 command.c:3510 command.c:3513 command.c:3516 -#: command.c:3522 command.c:3524 command.c:3532 command.c:3542 command.c:3551 -#: command.c:3565 command.c:3582 command.c:3640 common.c:69 copy.c:332 +#: command.c:2549 command.c:2577 command.c:3537 command.c:3540 command.c:3543 +#: command.c:3549 command.c:3551 command.c:3559 command.c:3569 command.c:3578 +#: command.c:3592 command.c:3609 command.c:3667 common.c:69 copy.c:332 #: copy.c:392 copy.c:405 psqlscanslash.l:760 psqlscanslash.l:771 #: psqlscanslash.l:781 #, c-format msgid "%s: %s\n" msgstr "%s: %s\n" -#: command.c:2953 startup.c:202 +#: command.c:2961 startup.c:205 msgid "Password: " msgstr "Password: " -#: command.c:2958 startup.c:204 +#: command.c:2966 startup.c:207 #, c-format msgid "Password for user %s: " msgstr "Inserisci la password per l'utente %s: " -#: command.c:3008 +#: command.c:3016 #, c-format msgid "All connection parameters must be supplied because no database connection exists\n" msgstr "Tutti i parametri di connessione devono essere forniti perché non esiste alcuna connessione di database\n" -#: command.c:3165 +#: command.c:3173 #, c-format msgid "Previous connection kept\n" msgstr "Connessione precedente mantenuta\n" -#: command.c:3169 +#: command.c:3177 #, c-format msgid "\\connect: %s" msgstr "\\connect: %s" -#: command.c:3205 +#: command.c:3213 #, c-format msgid "You are now connected to database \"%s\" as user \"%s\" via socket in \"%s\" at port \"%s\".\n" msgstr "Adesso sei collegato al database \"%s\" con nome utente \"%s\" tramite socket \"%s\" porta \"%s\".\n" -#: command.c:3208 +#: command.c:3216 #, c-format msgid "You are now connected to database \"%s\" as user \"%s\" on host \"%s\" at port \"%s\".\n" msgstr "Adesso sei collegato al database \"%s\" con nome utente \"%s\" sull'host \"%s\" porta \"%s\".\n" -#: command.c:3212 +#: command.c:3220 #, c-format msgid "You are now connected to database \"%s\" as user \"%s\".\n" msgstr "Sei collegato al database \"%s\" con nome utente \"%s\".\n" -#: command.c:3245 +#: command.c:3253 #, c-format msgid "%s (%s, server %s)\n" msgstr "%s (%s, server %s)\n" -#: command.c:3253 +#: command.c:3261 #, c-format msgid "" "WARNING: %s major version %s, server major version %s.\n" @@ -410,24 +406,24 @@ msgstr "" "ATTENZIONE: versione maggiore %s %s, versione maggiore server %s.\n" " Alcune caratteristiche di psql potrebbero non funzionare.\n" -#: command.c:3290 +#: command.c:3298 #, c-format msgid "SSL connection (protocol: %s, cipher: %s, bits: %s, compression: %s)\n" msgstr "connessione SSL (protocollo: %s, cifrario: %s, bit: %s, compressione: %s)\n" -#: command.c:3291 command.c:3292 command.c:3293 +#: command.c:3299 command.c:3300 command.c:3301 msgid "unknown" msgstr "sconosciuto" -#: command.c:3294 help.c:45 +#: command.c:3302 help.c:45 msgid "off" msgstr "disattivato" -#: command.c:3294 help.c:45 +#: command.c:3302 help.c:45 msgid "on" msgstr "attivato" -#: command.c:3314 +#: command.c:3322 #, c-format msgid "" "WARNING: Console code page (%u) differs from Windows code page (%u)\n" @@ -439,239 +435,239 @@ msgstr "" " funzionare correttamente. Vedi le pagine di riferimento\n" " psql \"Note per utenti Windows\" per i dettagli.\n" -#: command.c:3399 +#: command.c:3426 #, c-format msgid "environment variable PSQL_EDITOR_LINENUMBER_ARG must be set to specify a line number\n" msgstr "la variabile di ambiente PSQL_EDITOR_LINENUMBER_ARG deve specificare un numero di riga\n" -#: command.c:3428 +#: command.c:3455 #, c-format msgid "could not start editor \"%s\"\n" msgstr "avvio dell'editor \"%s\" fallito\n" -#: command.c:3430 +#: command.c:3457 #, c-format msgid "could not start /bin/sh\n" msgstr "avvio di /bin/sh fallito\n" -#: command.c:3468 +#: command.c:3495 #, c-format msgid "could not locate temporary directory: %s\n" msgstr "directory temporanea non trovata: %s\n" -#: command.c:3495 +#: command.c:3522 #, c-format msgid "could not open temporary file \"%s\": %s\n" msgstr "apertura del file temporaneo \"%s\" fallita: %s\n" -#: command.c:3769 +#: command.c:3796 #, c-format msgid "\\pset: allowed formats are unaligned, aligned, wrapped, html, asciidoc, latex, latex-longtable, troff-ms\n" msgstr "\\pset: i formati consentiti sono unaligned, aligned, wrapped, html, asciidoc, latex, latex-longtable, troff-ms\n" -#: command.c:3787 +#: command.c:3814 #, c-format msgid "\\pset: allowed line styles are ascii, old-ascii, unicode\n" msgstr "\\pset: gli stili di linea permessi sono ascii, old-ascii, unicode\n" -#: command.c:3802 +#: command.c:3829 #, c-format msgid "\\pset: allowed Unicode border line styles are single, double\n" msgstr "\\pset: gli stili riga Unicode dei bordi consentiti sono single, double\n" -#: command.c:3817 +#: command.c:3844 #, c-format msgid "\\pset: allowed Unicode column line styles are single, double\n" msgstr "\\pset: gli stili riga Unicode delle colonne consentiti sono single, double\n" -#: command.c:3832 +#: command.c:3859 #, c-format msgid "\\pset: allowed Unicode header line styles are single, double\n" msgstr "\\pset: gli stili riga Unicode delle intestazioni consentiti sono single, double\n" -#: command.c:3997 command.c:4176 +#: command.c:4024 command.c:4203 #, c-format msgid "\\pset: unknown option: %s\n" msgstr "\\pset: opzione sconosciuta: %s\n" -#: command.c:4015 +#: command.c:4042 #, c-format msgid "Border style is %d.\n" msgstr "Lo stile del bordo è %d.\n" -#: command.c:4021 +#: command.c:4048 #, c-format msgid "Target width is unset.\n" msgstr "La lunghezza di destinazione non è impostata.\n" -#: command.c:4023 +#: command.c:4050 #, c-format msgid "Target width is %d.\n" msgstr "La larghezza di destinazione è %d.\n" -#: command.c:4030 +#: command.c:4057 #, c-format msgid "Expanded display is on.\n" msgstr "La visualizzazione espansa è attiva.\n" -#: command.c:4032 +#: command.c:4059 #, c-format msgid "Expanded display is used automatically.\n" msgstr "La visualizzazione espansa è usata automaticamente.\n" -#: command.c:4034 +#: command.c:4061 #, c-format msgid "Expanded display is off.\n" msgstr "La visualizzazione espansa è disattivata.\n" -#: command.c:4041 command.c:4049 +#: command.c:4068 command.c:4076 #, c-format msgid "Field separator is zero byte.\n" msgstr "Il separatore di campo è il byte zero.\n" -#: command.c:4043 +#: command.c:4070 #, c-format msgid "Field separator is \"%s\".\n" msgstr "Il separatore di campo è \"%s\".\n" -#: command.c:4056 +#: command.c:4083 #, c-format msgid "Default footer is on.\n" msgstr "Il piè di pagina di default è attivo.\n" -#: command.c:4058 +#: command.c:4085 #, c-format msgid "Default footer is off.\n" msgstr "Il piè di pagina di default è disattivato.\n" -#: command.c:4064 +#: command.c:4091 #, c-format msgid "Output format is %s.\n" msgstr "Il formato di output è %s.\n" -#: command.c:4070 +#: command.c:4097 #, c-format msgid "Line style is %s.\n" msgstr "Lo stile della linea è %s.\n" -#: command.c:4077 +#: command.c:4104 #, c-format msgid "Null display is \"%s\".\n" msgstr "La visualizzazione dei null è \"%s\".\n" -#: command.c:4085 +#: command.c:4112 #, c-format msgid "Locale-adjusted numeric output is on.\n" msgstr "La correzione dell'output numerico secondo il locale è attiva.\n" -#: command.c:4087 +#: command.c:4114 #, c-format msgid "Locale-adjusted numeric output is off.\n" msgstr "La correzione dell'output numerico secondo il locale è disattivata.\n" -#: command.c:4094 +#: command.c:4121 #, c-format msgid "Pager is used for long output.\n" msgstr "Usa la paginazione per risultati estesi.\n" -#: command.c:4096 +#: command.c:4123 #, c-format msgid "Pager is always used.\n" msgstr "Paginazione sempre attiva.\n" -#: command.c:4098 +#: command.c:4125 #, c-format msgid "Pager usage is off.\n" msgstr "Paginazione disattivata.\n" -#: command.c:4104 +#: command.c:4131 #, c-format msgid "Pager won't be used for less than %d line.\n" msgid_plural "Pager won't be used for less than %d lines.\n" msgstr[0] "La paginazione non verrà usata per meno di %d riga.\n" msgstr[1] "La paginazione non verrà usata per meno di %d righe.\n" -#: command.c:4114 command.c:4124 +#: command.c:4141 command.c:4151 #, c-format msgid "Record separator is zero byte.\n" msgstr "Il separatore di record è il byte zero.\n" -#: command.c:4116 +#: command.c:4143 #, c-format msgid "Record separator is .\n" msgstr "Il separatore di record è .\n" -#: command.c:4118 +#: command.c:4145 #, c-format msgid "Record separator is \"%s\".\n" msgstr "Il separatore di record è \"%s\".\n" -#: command.c:4131 +#: command.c:4158 #, c-format msgid "Table attributes are \"%s\".\n" msgstr "Gli attributi di tabella sono \"%s\".\n" -#: command.c:4134 +#: command.c:4161 #, c-format msgid "Table attributes unset.\n" msgstr "Gli attributi di tabella non sono specificati.\n" -#: command.c:4141 +#: command.c:4168 #, c-format msgid "Title is \"%s\".\n" msgstr "Il titolo è \"%s\".\n" -#: command.c:4143 +#: command.c:4170 #, c-format msgid "Title is unset.\n" msgstr "Il titolo non è assegnato.\n" -#: command.c:4150 +#: command.c:4177 #, c-format msgid "Tuples only is on.\n" msgstr "La visualizzazione dei soli dati è attiva.\n" -#: command.c:4152 +#: command.c:4179 #, c-format msgid "Tuples only is off.\n" msgstr "La visualizzazione dei soli dati è disattivata.\n" -#: command.c:4158 +#: command.c:4185 #, c-format msgid "Unicode border line style is \"%s\".\n" msgstr "Lo stile riga Unicode dei bordi è \"%s\".\n" -#: command.c:4164 +#: command.c:4191 #, c-format msgid "Unicode column line style is \"%s\".\n" msgstr "Lo stile riga Unicode delle colonne è \"%s\".\n" -#: command.c:4170 +#: command.c:4197 #, c-format msgid "Unicode header line style is \"%s\".\n" msgstr "Lo stile riga Unicode delle intestazioni è \"%s\".\n" -#: command.c:4330 +#: command.c:4357 #, c-format msgid "\\!: failed\n" msgstr "\\!: fallita\n" -#: command.c:4355 common.c:754 +#: command.c:4382 common.c:754 #, c-format msgid "\\watch cannot be used with an empty query\n" msgstr "\\watch non può essere usato con una query vuota\n" -#: command.c:4396 +#: command.c:4423 #, c-format msgid "%s\t%s (every %gs)\n" msgstr "%s\t%s (ogni %gs)\n" -#: command.c:4399 +#: command.c:4426 #, c-format msgid "%s (every %gs)\n" msgstr "%s (ogni %gs)\n" -#: command.c:4453 command.c:4460 common.c:654 common.c:661 common.c:1271 +#: command.c:4480 command.c:4487 common.c:654 common.c:661 common.c:1271 #, c-format msgid "" "********* QUERY **********\n" @@ -684,12 +680,12 @@ msgstr "" "**************************\n" "\n" -#: command.c:4652 +#: command.c:4679 #, c-format msgid "\"%s.%s\" is not a view\n" msgstr "\"%s.%s\" non è una vista\n" -#: command.c:4668 +#: command.c:4695 #, c-format msgid "could not parse reloptions array\n" msgstr "interpretazione dell'array reloptions fallita\n" @@ -862,11 +858,11 @@ msgstr "" "Inserire i dati da copiare seguiti da un \"a capo\".\n" "Terminare con un backslash ed un punto su una singola riga, o un segnale EOF." -#: copy.c:671 +#: copy.c:670 msgid "aborted because of read failure" msgstr "interrotto a causa di lettura non riuscita" -#: copy.c:705 +#: copy.c:704 msgid "trying to exit copy mode" msgstr "tentativo di uscita dalla modalità copy" @@ -915,990 +911,1021 @@ msgstr "\\crosstabview: nome di colonna ambiguo: \"%s\"\n" msgid "\\crosstabview: column name not found: \"%s\"\n" msgstr "\\crosstabview: colonna non trovata: \"%s\"\n" -#: describe.c:73 describe.c:342 describe.c:599 describe.c:730 describe.c:874 -#: describe.c:1035 describe.c:1107 describe.c:3332 describe.c:3538 -#: describe.c:3629 describe.c:3877 describe.c:4022 describe.c:4254 -#: describe.c:4329 describe.c:4340 describe.c:4402 describe.c:4822 -#: describe.c:4905 +#: describe.c:74 describe.c:346 describe.c:603 describe.c:735 describe.c:879 +#: describe.c:1040 describe.c:1112 describe.c:3371 describe.c:3583 +#: describe.c:3674 describe.c:3922 describe.c:4067 describe.c:4308 +#: describe.c:4383 describe.c:4394 describe.c:4456 describe.c:4881 +#: describe.c:4964 msgid "Schema" msgstr "Schema" -#: describe.c:74 describe.c:162 describe.c:228 describe.c:236 describe.c:343 -#: describe.c:600 describe.c:731 describe.c:792 describe.c:875 describe.c:1108 -#: describe.c:3333 describe.c:3461 describe.c:3539 describe.c:3630 -#: describe.c:3709 describe.c:3878 describe.c:3947 describe.c:4023 -#: describe.c:4255 describe.c:4330 describe.c:4341 describe.c:4403 -#: describe.c:4595 describe.c:4679 describe.c:4903 describe.c:5071 -#: describe.c:5262 +#: describe.c:75 describe.c:164 describe.c:231 describe.c:239 describe.c:347 +#: describe.c:604 describe.c:736 describe.c:797 describe.c:880 describe.c:1113 +#: describe.c:3372 describe.c:3506 describe.c:3584 describe.c:3675 +#: describe.c:3754 describe.c:3923 describe.c:3992 describe.c:4068 +#: describe.c:4309 describe.c:4384 describe.c:4395 describe.c:4457 +#: describe.c:4654 describe.c:4738 describe.c:4962 describe.c:5134 +#: describe.c:5341 msgid "Name" msgstr "Nome" -#: describe.c:75 describe.c:355 describe.c:401 describe.c:418 +#: describe.c:76 describe.c:359 describe.c:405 describe.c:422 msgid "Result data type" msgstr "Tipo dato del risultato" -#: describe.c:83 describe.c:96 describe.c:100 describe.c:356 describe.c:402 -#: describe.c:419 +#: describe.c:84 describe.c:97 describe.c:101 describe.c:360 describe.c:406 +#: describe.c:423 msgid "Argument data types" msgstr "Tipo dato dei parametri" -#: describe.c:107 describe.c:172 describe.c:259 describe.c:464 describe.c:648 -#: describe.c:746 describe.c:817 describe.c:1110 describe.c:1746 -#: describe.c:3132 describe.c:3367 describe.c:3492 describe.c:3566 -#: describe.c:3639 describe.c:3722 describe.c:3790 describe.c:3890 -#: describe.c:3956 describe.c:4024 describe.c:4160 describe.c:4200 -#: describe.c:4271 describe.c:4333 describe.c:4342 describe.c:4404 -#: describe.c:4621 describe.c:4701 describe.c:4836 describe.c:4906 +#: describe.c:108 describe.c:174 describe.c:262 describe.c:468 describe.c:652 +#: describe.c:751 describe.c:822 describe.c:1115 describe.c:1845 +#: describe.c:3161 describe.c:3406 describe.c:3537 describe.c:3611 +#: describe.c:3684 describe.c:3767 describe.c:3835 describe.c:3935 +#: describe.c:4001 describe.c:4069 describe.c:4210 describe.c:4252 +#: describe.c:4325 describe.c:4387 describe.c:4396 describe.c:4458 +#: describe.c:4680 describe.c:4760 describe.c:4895 describe.c:4965 #: large_obj.c:289 large_obj.c:299 msgid "Description" msgstr "Descrizione" -#: describe.c:125 +#: describe.c:126 msgid "List of aggregate functions" msgstr "Lista delle funzione aggregate" -#: describe.c:149 +#: describe.c:151 #, c-format msgid "The server (version %s) does not support access methods.\n" msgstr "Il server (versione %s) non supporta metodi di accesso.\n" -#: describe.c:163 +#: describe.c:165 msgid "Index" msgstr "Indice" -#: describe.c:164 describe.c:362 describe.c:407 describe.c:424 describe.c:882 -#: describe.c:1046 describe.c:1706 describe.c:3342 describe.c:3540 -#: describe.c:4698 +#: describe.c:166 describe.c:366 describe.c:411 describe.c:428 describe.c:887 +#: describe.c:1051 describe.c:1582 describe.c:1606 describe.c:1808 +#: describe.c:3381 describe.c:3585 describe.c:4757 msgid "Type" msgstr "Tipo" -#: describe.c:171 describe.c:4600 +#: describe.c:173 describe.c:4659 msgid "Handler" msgstr "Handler" -#: describe.c:190 +#: describe.c:192 msgid "List of access methods" msgstr "Lista dei metodi di accesso" -#: describe.c:215 +#: describe.c:218 #, c-format msgid "The server (version %s) does not support tablespaces.\n" msgstr "Il server (versione %s) non supporta i tablespace.\n" -#: describe.c:229 describe.c:237 describe.c:452 describe.c:638 describe.c:793 -#: describe.c:1034 describe.c:3343 describe.c:3465 describe.c:3711 -#: describe.c:3948 describe.c:4596 describe.c:4680 describe.c:5072 -#: describe.c:5263 large_obj.c:288 +#: describe.c:232 describe.c:240 describe.c:456 describe.c:642 describe.c:798 +#: describe.c:1039 describe.c:3382 describe.c:3510 describe.c:3756 +#: describe.c:3993 describe.c:4655 describe.c:4739 describe.c:5135 +#: describe.c:5247 describe.c:5342 large_obj.c:288 msgid "Owner" msgstr "Proprietario" -#: describe.c:230 describe.c:238 +#: describe.c:233 describe.c:241 msgid "Location" msgstr "Posizione" -#: describe.c:249 describe.c:2947 +#: describe.c:252 describe.c:2980 msgid "Options" msgstr "Opzioni" -#: describe.c:254 describe.c:611 describe.c:809 describe.c:3359 describe.c:3363 +#: describe.c:257 describe.c:615 describe.c:814 describe.c:3398 +#: describe.c:3402 msgid "Size" msgstr "Dimensione" -#: describe.c:276 +#: describe.c:279 msgid "List of tablespaces" msgstr "Lista dei tablespace" -#: describe.c:316 +#: describe.c:320 #, c-format msgid "\\df only takes [antwS+] as options\n" msgstr "\\df accetta come opzione solo [antwS+]\n" -#: describe.c:324 +#: describe.c:328 #, c-format msgid "\\df does not take a \"w\" option with server version %s\n" msgstr "\\df non accetta un'opzione \"w\" con il server in versione %s\n" #. translator: "agg" is short for "aggregate" -#: describe.c:358 describe.c:404 describe.c:421 +#: describe.c:362 describe.c:408 describe.c:425 msgid "agg" msgstr "aggr" -#: describe.c:359 +#: describe.c:363 msgid "window" msgstr "finestra" -#: describe.c:360 describe.c:405 describe.c:422 describe.c:1244 +#: describe.c:364 describe.c:409 describe.c:426 describe.c:1249 msgid "trigger" msgstr "trigger" -#: describe.c:361 describe.c:406 describe.c:423 +#: describe.c:365 describe.c:410 describe.c:427 msgid "normal" msgstr "normale" -#: describe.c:434 +#: describe.c:438 msgid "immutable" msgstr "immutabile" -#: describe.c:435 +#: describe.c:439 msgid "stable" msgstr "stabile" -#: describe.c:436 +#: describe.c:440 msgid "volatile" msgstr "volatile" -#: describe.c:437 +#: describe.c:441 msgid "Volatility" msgstr "Volatilità" -#: describe.c:445 +#: describe.c:449 msgid "restricted" msgstr "ristretta" -#: describe.c:446 +#: describe.c:450 msgid "safe" msgstr "sicura" -#: describe.c:447 +#: describe.c:451 msgid "unsafe" msgstr "non sicura" -#: describe.c:448 +#: describe.c:452 msgid "Parallel" msgstr "Parallela" -#: describe.c:453 +#: describe.c:457 msgid "definer" msgstr "definitore" -#: describe.c:454 +#: describe.c:458 msgid "invoker" msgstr "invocatore" -#: describe.c:455 +#: describe.c:459 msgid "Security" msgstr "Sicurezza" -#: describe.c:462 +#: describe.c:466 msgid "Language" msgstr "Linguaggio" -#: describe.c:463 +#: describe.c:467 msgid "Source code" msgstr "Codice sorgente" -#: describe.c:562 +#: describe.c:566 msgid "List of functions" msgstr "Lista delle funzioni" -#: describe.c:610 +#: describe.c:614 msgid "Internal name" msgstr "Nome interno" -#: describe.c:632 +#: describe.c:636 msgid "Elements" msgstr "Elementi" -#: describe.c:689 +#: describe.c:693 msgid "List of data types" msgstr "Lista dei tipi di dati" -#: describe.c:732 +#: describe.c:737 msgid "Left arg type" msgstr "Argomento sinistro" -#: describe.c:733 +#: describe.c:738 msgid "Right arg type" msgstr "Argomento destro" -#: describe.c:734 +#: describe.c:739 msgid "Result type" msgstr "Tipo di risultato" -#: describe.c:739 describe.c:3781 describe.c:4159 +#: describe.c:744 describe.c:3826 describe.c:4209 msgid "Function" msgstr "Funzione" -#: describe.c:764 +#: describe.c:769 msgid "List of operators" msgstr "Lista degli operatori" -#: describe.c:794 +#: describe.c:799 msgid "Encoding" msgstr "Codifica" -#: describe.c:799 describe.c:3879 +#: describe.c:804 describe.c:3924 msgid "Collate" msgstr "Ordinamento" -#: describe.c:800 describe.c:3880 +#: describe.c:805 describe.c:3925 msgid "Ctype" msgstr "Ctype" -#: describe.c:813 +#: describe.c:818 msgid "Tablespace" msgstr "Tablespace" -#: describe.c:835 +#: describe.c:840 msgid "List of databases" msgstr "Lista dei database" -#: describe.c:876 describe.c:881 describe.c:1037 describe.c:3334 -#: describe.c:3341 +#: describe.c:881 describe.c:886 describe.c:1042 describe.c:3373 +#: describe.c:3380 msgid "table" msgstr "tabella" -#: describe.c:877 describe.c:3335 +#: describe.c:882 describe.c:3374 msgid "view" msgstr "vista" -#: describe.c:878 describe.c:3336 +#: describe.c:883 describe.c:3375 msgid "materialized view" msgstr "vista materializzata" -#: describe.c:879 describe.c:1039 describe.c:3338 +#: describe.c:884 describe.c:1044 describe.c:3377 msgid "sequence" msgstr "sequenza" -#: describe.c:880 describe.c:3340 +#: describe.c:885 describe.c:3379 msgid "foreign table" msgstr "tabella esterna" -#: describe.c:893 +#: describe.c:898 msgid "Column privileges" msgstr "Privilegi di colonna" -#: describe.c:924 describe.c:958 +#: describe.c:929 describe.c:963 msgid "Policies" msgstr "Regole di sicurezza" -#: describe.c:990 describe.c:5319 describe.c:5323 +#: describe.c:995 describe.c:5398 describe.c:5402 msgid "Access privileges" msgstr "Privilegi di accesso" -#: describe.c:1021 +#: describe.c:1026 #, c-format msgid "The server (version %s) does not support altering default privileges.\n" msgstr "Il server (versione %s) non supporta la modifica dei privilegi di default.\n" -#: describe.c:1041 +#: describe.c:1046 msgid "function" msgstr "funzione" -#: describe.c:1043 +#: describe.c:1048 msgid "type" msgstr "tipo" -#: describe.c:1045 +#: describe.c:1050 msgid "schema" msgstr "schema" -#: describe.c:1069 +#: describe.c:1074 msgid "Default access privileges" msgstr "Privilegi di accesso di default" -#: describe.c:1109 +#: describe.c:1114 msgid "Object" msgstr "Oggetto" -#: describe.c:1123 +#: describe.c:1128 msgid "table constraint" msgstr "vincolo di tabella" -#: describe.c:1145 +#: describe.c:1150 msgid "domain constraint" msgstr "vincolo di dominio" -#: describe.c:1173 +#: describe.c:1178 msgid "operator class" msgstr "classe operatori" -#: describe.c:1202 +#: describe.c:1207 msgid "operator family" msgstr "famiglia operatori" -#: describe.c:1224 +#: describe.c:1229 msgid "rule" msgstr "regola" -#: describe.c:1266 +#: describe.c:1271 msgid "Object descriptions" msgstr "Descrizioni oggetti" -#: describe.c:1320 +#: describe.c:1327 describe.c:3469 #, c-format msgid "Did not find any relation named \"%s\".\n" -msgstr "Non ho trovato alcuna relazione di nome \"%s\".\n" +msgstr "Non è stata trovata nessuna relazione chiamata \"%s\".\n" + +#: describe.c:1330 describe.c:3472 +#, c-format +msgid "Did not find any relations.\n" +msgstr "Non è stata trovata nessuna relazione.\n" -#: describe.c:1529 +#: describe.c:1537 #, c-format msgid "Did not find any relation with OID %s.\n" -msgstr "Non ho trovato nessuna relazione con OID %s.\n" +msgstr "Non è stata trovata nessuna relazione con OID %s.\n" + +#: describe.c:1583 describe.c:1607 +msgid "Start" +msgstr "Inizio" + +#: describe.c:1584 describe.c:1608 +msgid "Minimum" +msgstr "Minimo" -#: describe.c:1642 describe.c:1691 +#: describe.c:1585 describe.c:1609 +msgid "Maximum" +msgstr "Massimo" + +#: describe.c:1586 describe.c:1610 +msgid "Increment" +msgstr "Incremento" + +#: describe.c:1587 describe.c:1611 describe.c:3678 describe.c:3829 +msgid "yes" +msgstr "sì" + +#: describe.c:1588 describe.c:1612 describe.c:3678 describe.c:3827 +msgid "no" +msgstr "no" + +#: describe.c:1589 describe.c:1613 +msgid "Cycles?" +msgstr "Riparte?" + +#: describe.c:1590 describe.c:1614 +msgid "Cache" +msgstr "Cache" + +#: describe.c:1657 +#, c-format +msgid "Owned by: %s" +msgstr "Proprietario: %s" + +#: describe.c:1661 +#, c-format +msgid "Sequence for identity column: %s" +msgstr "Sequenza per la colonna identità: %s" + +#: describe.c:1668 +#, c-format +msgid "Sequence \"%s.%s\"" +msgstr "Sequenza \"%s.%s\"" + +#: describe.c:1748 describe.c:1793 #, c-format msgid "Unlogged table \"%s.%s\"" msgstr "Tabella non loggata \"%s.%s\"" -#: describe.c:1645 describe.c:1694 +#: describe.c:1751 describe.c:1796 #, c-format msgid "Table \"%s.%s\"" msgstr "Tabella \"%s.%s\"" -#: describe.c:1649 +#: describe.c:1755 #, c-format msgid "View \"%s.%s\"" msgstr "Vista \"%s.%s\"" -#: describe.c:1654 +#: describe.c:1760 #, c-format msgid "Unlogged materialized view \"%s.%s\"" msgstr "Vista materializzata non loggata \"%s.%s\"" -#: describe.c:1657 +#: describe.c:1763 #, c-format msgid "Materialized view \"%s.%s\"" msgstr "Vista materializzata \"%s.%s\"" -#: describe.c:1661 -#, c-format -msgid "Sequence \"%s.%s\"" -msgstr "Sequenza \"%s.%s\"" - -#: describe.c:1666 +#: describe.c:1768 #, c-format msgid "Unlogged index \"%s.%s\"" msgstr "Indice non loggato \"%s.%s\"" -#: describe.c:1669 +#: describe.c:1771 #, c-format msgid "Index \"%s.%s\"" msgstr "Indice \"%s.%s\"" -#: describe.c:1674 +#: describe.c:1776 #, c-format msgid "Special relation \"%s.%s\"" msgstr "relazione speciale \"%s.%s\"" -#: describe.c:1678 +#: describe.c:1780 #, c-format msgid "TOAST table \"%s.%s\"" msgstr "Tabella TOAST \"%s.%s\"" -#: describe.c:1682 +#: describe.c:1784 #, c-format msgid "Composite type \"%s.%s\"" msgstr "Tipo composito \"%s.%s\"" -#: describe.c:1686 +#: describe.c:1788 #, c-format msgid "Foreign table \"%s.%s\"" msgstr "Tabella esterna \"%s.%s\"" -#: describe.c:1705 +#: describe.c:1807 msgid "Column" msgstr "Colonna" -#: describe.c:1716 describe.c:3546 +#: describe.c:1818 describe.c:3591 msgid "Collation" msgstr "Ordinamento" -#: describe.c:1717 describe.c:3553 +#: describe.c:1819 describe.c:3598 msgid "Nullable" msgstr "Può essere null" -#: describe.c:1718 describe.c:3554 +#: describe.c:1820 describe.c:3599 msgid "Default" msgstr "Default" -#: describe.c:1723 -msgid "Value" -msgstr "Valore" - -#: describe.c:1726 +#: describe.c:1825 msgid "Definition" msgstr "Definizione" -#: describe.c:1729 describe.c:4616 describe.c:4700 describe.c:4771 -#: describe.c:4835 -msgid "FDW Options" +#: describe.c:1828 describe.c:4675 describe.c:4759 describe.c:4830 +#: describe.c:4894 +msgid "FDW options" msgstr "Opzioni FDW" -#: describe.c:1733 +#: describe.c:1832 msgid "Storage" msgstr "Memorizzazione" -#: describe.c:1738 +#: describe.c:1837 msgid "Stats target" msgstr "Dest. stat." -#: describe.c:1893 +#: describe.c:1982 #, c-format msgid "Partition of: %s %s" msgstr "Partizione di: %s %s" -#: describe.c:1899 +#: describe.c:1988 #, c-format msgid "Partition constraint: %s" msgstr "Vincolo di partizione: %s" -#: describe.c:1922 +#: describe.c:2011 #, c-format msgid "Partition key: %s" msgstr "Chiave di partizione: %s" -#: describe.c:1990 +#: describe.c:2079 msgid "primary key, " msgstr "chiave primaria, " -#: describe.c:1992 +#: describe.c:2081 msgid "unique, " msgstr "univoco, " -#: describe.c:1998 +#: describe.c:2087 #, c-format msgid "for table \"%s.%s\"" msgstr "per la tabella \"%s.%s\"" -#: describe.c:2002 +#: describe.c:2091 #, c-format msgid ", predicate (%s)" msgstr ", predicato (%s)" -#: describe.c:2005 +#: describe.c:2094 msgid ", clustered" msgstr ", raggruppato" -#: describe.c:2008 +#: describe.c:2097 msgid ", invalid" msgstr ", non valido" -#: describe.c:2011 +#: describe.c:2100 msgid ", deferrable" msgstr ", deferibile" -#: describe.c:2014 +#: describe.c:2103 msgid ", initially deferred" msgstr ", inizialmente deferito" -#: describe.c:2017 +#: describe.c:2106 msgid ", replica identity" msgstr ", identità di replica" -#: describe.c:2056 -#, c-format -msgid "Owned by: %s" -msgstr "Proprietario: %s" - -#: describe.c:2061 -#, c-format -msgid "Sequence for identity column: %s" -msgstr "Sequenza per la colonna identità: %s" - -#: describe.c:2125 +#: describe.c:2165 msgid "Indexes:" msgstr "Indici:" -#: describe.c:2209 +#: describe.c:2249 msgid "Check constraints:" msgstr "Vincoli di controllo:" -#: describe.c:2240 +#: describe.c:2280 msgid "Foreign-key constraints:" msgstr "Vincoli di integrità referenziale" -#: describe.c:2271 +#: describe.c:2311 msgid "Referenced by:" msgstr "Referenziato da:" -#: describe.c:2331 +#: describe.c:2361 msgid "Policies:" msgstr "Regole di sicurezza:" -#: describe.c:2334 +#: describe.c:2364 msgid "Policies (forced row security enabled):" msgstr "Regole (sicurezza per riga forzata abilitata):" -#: describe.c:2337 +#: describe.c:2367 msgid "Policies (row security enabled): (none)" msgstr "Regole (sicurezza per riga abilitata): (nessuna)" -#: describe.c:2340 +#: describe.c:2370 msgid "Policies (forced row security enabled): (none)" msgstr "Regole (sicurezza per riga forzata abilitata): (nessuna)" -#: describe.c:2343 +#: describe.c:2373 msgid "Policies (row security disabled):" msgstr "Regole (sicurezza per riga disabilitata):" -#: describe.c:2405 +#: describe.c:2435 msgid "Statistics objects:" msgstr "Oggetti statistiche:" -#: describe.c:2508 describe.c:2590 +#: describe.c:2538 describe.c:2623 msgid "Rules:" msgstr "Regole:" -#: describe.c:2511 +#: describe.c:2541 msgid "Disabled rules:" msgstr "Regole disabilitate:" -#: describe.c:2514 +#: describe.c:2544 msgid "Rules firing always:" msgstr "Regole sempre abilitate:" -#: describe.c:2517 +#: describe.c:2547 msgid "Rules firing on replica only:" msgstr "Regole abilitate solo su replica:" -#: describe.c:2554 +#: describe.c:2587 msgid "Publications:" msgstr "Pubblicazioni:" -#: describe.c:2573 +#: describe.c:2606 msgid "View definition:" msgstr "Definizione vista:" -#: describe.c:2708 +#: describe.c:2741 msgid "Triggers:" msgstr "Trigger:" -#: describe.c:2712 +#: describe.c:2745 msgid "Disabled user triggers:" msgstr "Trigger utente disabilitati:" -#: describe.c:2714 +#: describe.c:2747 msgid "Disabled triggers:" msgstr "Trigger disabilitati:" -#: describe.c:2717 +#: describe.c:2750 msgid "Disabled internal triggers:" msgstr "Trigger interni disabilitati:" -#: describe.c:2720 +#: describe.c:2753 msgid "Triggers firing always:" msgstr "Trigger sempre abilitati:" -#: describe.c:2723 +#: describe.c:2756 msgid "Triggers firing on replica only:" msgstr "Trigger abilitati solo su replica." -#: describe.c:2782 +#: describe.c:2815 #, c-format msgid "Server: %s" msgstr "Server: %s" -#: describe.c:2790 +#: describe.c:2823 #, c-format -msgid "FDW Options: (%s)" -msgstr "Opzioni FDW: (%s)" +msgid "FDW options: (%s)" +msgstr "Opzioni FDW (%s)" -#: describe.c:2809 +#: describe.c:2842 msgid "Inherits" msgstr "Eredita" -#: describe.c:2863 +#: describe.c:2896 #, c-format msgid "Number of child tables: %d (Use \\d+ to list them.)" msgstr "Numero di tabelle figlio: %d (Usa \\d+ per elencarle.)" -#: describe.c:2865 +#: describe.c:2898 #, c-format msgid "Number of partitions: %d (Use \\d+ to list them.)" msgstr "Numero di partizioni: %d (Usa \\d+ per elencarle.)" -#: describe.c:2873 +#: describe.c:2906 msgid "Child tables" msgstr "Tabelle figlio" -#: describe.c:2873 +#: describe.c:2906 msgid "Partitions" msgstr "Partizioni" -#: describe.c:2907 +#: describe.c:2940 #, c-format msgid "Typed table of type: %s" msgstr "Tabella di tipo: %s" -#: describe.c:2923 +#: describe.c:2956 msgid "Replica Identity" msgstr "Identità di replica" -#: describe.c:2936 +#: describe.c:2969 msgid "Has OIDs: yes" msgstr "Ha OID: sì" -#: describe.c:3020 +#: describe.c:3049 #, c-format msgid "Tablespace: \"%s\"" msgstr "Tablespace: \"%s\"" #. translator: before this string there's an index description like #. '"foo_pkey" PRIMARY KEY, btree (a)' -#: describe.c:3032 +#: describe.c:3061 #, c-format msgid ", tablespace \"%s\"" msgstr ", tablespace \"%s\"" -#: describe.c:3125 +#: describe.c:3154 msgid "List of roles" msgstr "Lista dei ruoli" -#: describe.c:3127 +#: describe.c:3156 msgid "Role name" msgstr "Nome ruolo" -#: describe.c:3128 +#: describe.c:3157 msgid "Attributes" msgstr "Attributi" -#: describe.c:3129 +#: describe.c:3158 msgid "Member of" msgstr "Membro di" -#: describe.c:3140 +#: describe.c:3169 msgid "Superuser" msgstr "Superutente" -#: describe.c:3143 +#: describe.c:3172 msgid "No inheritance" msgstr "Nessuna ereditarietà" -#: describe.c:3146 +#: describe.c:3175 msgid "Create role" msgstr "Crea ruoli" -#: describe.c:3149 +#: describe.c:3178 msgid "Create DB" msgstr "Crea DB" -#: describe.c:3152 +#: describe.c:3181 msgid "Cannot login" msgstr "Login non possibile" -#: describe.c:3156 +#: describe.c:3185 msgid "Replication" msgstr "Replica" -#: describe.c:3160 +#: describe.c:3189 msgid "Bypass RLS" msgstr "Scavalca RLS" -#: describe.c:3169 +#: describe.c:3198 msgid "No connections" msgstr "Niente connessioni" -#: describe.c:3171 +#: describe.c:3200 #, c-format msgid "%d connection" msgid_plural "%d connections" msgstr[0] "%d connessione" msgstr[1] "%d connessioni" -#: describe.c:3181 +#: describe.c:3210 msgid "Password valid until " msgstr "Password valida fino a " -#: describe.c:3237 +#: describe.c:3260 +#, c-format +msgid "The server (version %s) does not support per-database role settings.\n" +msgstr "Il server (versione %s) non supporta l'impostazione dei ruoli per database.\n" + +#: describe.c:3273 msgid "Role" msgstr "Ruolo" -#: describe.c:3238 +#: describe.c:3274 msgid "Database" msgstr "Database" -#: describe.c:3239 +#: describe.c:3275 msgid "Settings" msgstr "Impostazioni" -#: describe.c:3249 +#: describe.c:3296 #, c-format -msgid "No per-database role settings support in this server version.\n" -msgstr "Questa versione del server non supporta l'impostazione dei ruoli per database.\n" +msgid "Did not find any settings for role \"%s\" and database \"%s\".\n" +msgstr "Non è stata trovata nessuna impostazione per il ruolo \"%s\" e il database \"%s\".\n" -#: describe.c:3260 +#: describe.c:3299 #, c-format -msgid "No matching settings found.\n" -msgstr "Nessuna impostazione corrispondente trovata.\n" +msgid "Did not find any settings for role \"%s\".\n" +msgstr "Non è stata trovata nessuna impostazione per il ruolo \"%s\".\n" -#: describe.c:3262 +#: describe.c:3302 #, c-format -msgid "No settings found.\n" -msgstr "Nessuna impostazione trovata.\n" +msgid "Did not find any settings.\n" +msgstr "Non è stata trovata nessuna impostazione.\n" -#: describe.c:3267 +#: describe.c:3307 msgid "List of settings" msgstr "Lista delle impostazioni" -#: describe.c:3337 +#: describe.c:3376 msgid "index" msgstr "indice" -#: describe.c:3339 +#: describe.c:3378 msgid "special" msgstr "speciale" -#: describe.c:3348 describe.c:4823 +#: describe.c:3387 describe.c:4882 msgid "Table" msgstr "Tabella" -#: describe.c:3425 -#, c-format -msgid "No matching relations found.\n" -msgstr "Nessuna relazione corrispondente trovata.\n" - -#: describe.c:3427 -#, c-format -msgid "No relations found.\n" -msgstr "Nessuna relazione trovata.\n" - -#: describe.c:3432 +#: describe.c:3477 msgid "List of relations" msgstr "Lista delle relazioni" -#: describe.c:3469 +#: describe.c:3514 msgid "Trusted" msgstr "Fidato" -#: describe.c:3477 -msgid "Internal Language" +#: describe.c:3522 +msgid "Internal language" msgstr "Linguaggio interno" -#: describe.c:3478 -msgid "Call Handler" -msgstr "Gestore Chiamate" +#: describe.c:3523 +msgid "Call handler" +msgstr "Handler di chiamata" -#: describe.c:3479 describe.c:4603 +#: describe.c:3524 describe.c:4662 msgid "Validator" msgstr "Validatore" -#: describe.c:3482 -msgid "Inline Handler" -msgstr "Handler Inline" +#: describe.c:3527 +msgid "Inline handler" +msgstr "Handler inline" -#: describe.c:3510 +#: describe.c:3555 msgid "List of languages" msgstr "Lista dei linguaggi" -#: describe.c:3555 +#: describe.c:3600 msgid "Check" msgstr "Controllo" -#: describe.c:3597 +#: describe.c:3642 msgid "List of domains" msgstr "Lista dei domini" -#: describe.c:3631 +#: describe.c:3676 msgid "Source" msgstr "Sorgente" -#: describe.c:3632 +#: describe.c:3677 msgid "Destination" msgstr "Destinazione" -#: describe.c:3633 describe.c:3782 -msgid "no" -msgstr "no" - -#: describe.c:3633 describe.c:3784 -msgid "yes" -msgstr "sì" - -#: describe.c:3634 +#: describe.c:3679 msgid "Default?" msgstr "Predefinito?" -#: describe.c:3671 +#: describe.c:3716 msgid "List of conversions" msgstr "Lista delle conversioni" -#: describe.c:3710 +#: describe.c:3755 msgid "Event" msgstr "Evento" -#: describe.c:3712 +#: describe.c:3757 msgid "enabled" msgstr "abilitato" -#: describe.c:3713 +#: describe.c:3758 msgid "replica" msgstr "replica" -#: describe.c:3714 +#: describe.c:3759 msgid "always" msgstr "sempre" -#: describe.c:3715 +#: describe.c:3760 msgid "disabled" msgstr "disabilitato" -#: describe.c:3716 describe.c:5264 +#: describe.c:3761 describe.c:5343 msgid "Enabled" msgstr "Abilitato" -#: describe.c:3717 +#: describe.c:3762 msgid "Procedure" msgstr "Procedura" -#: describe.c:3718 +#: describe.c:3763 msgid "Tags" msgstr "Tag" -#: describe.c:3737 +#: describe.c:3782 msgid "List of event triggers" msgstr "Lista di trigger di evento" -#: describe.c:3779 +#: describe.c:3824 msgid "Source type" msgstr "Tipo di partenza" -#: describe.c:3780 +#: describe.c:3825 msgid "Target type" msgstr "Tipo di arrivo" -#: describe.c:3783 +#: describe.c:3828 msgid "in assignment" msgstr "in assegnazione" -#: describe.c:3785 +#: describe.c:3830 msgid "Implicit?" msgstr "Implicito?" -#: describe.c:3836 +#: describe.c:3881 msgid "List of casts" msgstr "Lista delle conversioni di tipo" -#: describe.c:3864 +#: describe.c:3909 #, c-format msgid "The server (version %s) does not support collations.\n" msgstr "Il server (versione %s) non supporta gli ordinamenti.\n" -#: describe.c:3885 +#: describe.c:3930 msgid "Provider" msgstr "Provider" -#: describe.c:3920 +#: describe.c:3965 msgid "List of collations" msgstr "Lista degli ordinamenti" -#: describe.c:3979 +#: describe.c:4024 msgid "List of schemas" msgstr "Lista degli schemi" -#: describe.c:4004 describe.c:4242 describe.c:4313 describe.c:4384 +#: describe.c:4049 describe.c:4296 describe.c:4367 describe.c:4438 #, c-format msgid "The server (version %s) does not support full text search.\n" msgstr "Il server (versione %s) non supporta la ricerca full text.\n" -#: describe.c:4039 +#: describe.c:4084 msgid "List of text search parsers" msgstr "Lista degli analizzatori di ricerca resto" -#: describe.c:4082 +#: describe.c:4129 #, c-format msgid "Did not find any text search parser named \"%s\".\n" -msgstr "Non ho trovato alcun analizzatore di ricerca testo chiamato \"%s\".\n" +msgstr "Non è stato trovato nessun analizzatore di ricerca testo chiamato \"%s\".\n" -#: describe.c:4157 +#: describe.c:4132 +#, c-format +msgid "Did not find any text search parsers.\n" +msgstr "Non è stato trovato nessun analizzatore di ricerca testo.\n" + +#: describe.c:4207 msgid "Start parse" msgstr "Inizio analisi" -#: describe.c:4158 +#: describe.c:4208 msgid "Method" msgstr "Metodo" -#: describe.c:4162 +#: describe.c:4212 msgid "Get next token" msgstr "Ottiene il token successivo" -#: describe.c:4164 +#: describe.c:4214 msgid "End parse" msgstr "Fine analisi" -#: describe.c:4166 +#: describe.c:4216 msgid "Get headline" msgstr "Ottiene intestazione" -#: describe.c:4168 +#: describe.c:4218 msgid "Get token types" msgstr "Ottieni i tipi token" -#: describe.c:4178 +#: describe.c:4229 #, c-format msgid "Text search parser \"%s.%s\"" msgstr "Analizzatore di ricerca teso \"%s.%s\"" -#: describe.c:4180 +#: describe.c:4232 #, c-format msgid "Text search parser \"%s\"" msgstr "Analizzatore di ricerca testo \"%s\"" -#: describe.c:4199 +#: describe.c:4251 msgid "Token name" msgstr "Nome token" -#: describe.c:4210 +#: describe.c:4262 #, c-format msgid "Token types for parser \"%s.%s\"" msgstr "Tipi token per l'analizzatore \"%s.%s\"" -#: describe.c:4212 +#: describe.c:4265 #, c-format msgid "Token types for parser \"%s\"" msgstr "Tipi token per l'analizzatore \"%s\"" -#: describe.c:4265 +#: describe.c:4319 msgid "Template" msgstr "Modello" -#: describe.c:4266 +#: describe.c:4320 msgid "Init options" msgstr "Opzioni iniziali:" -#: describe.c:4288 +#: describe.c:4342 msgid "List of text search dictionaries" msgstr "Lista dei dizionari di ricerca testo" -#: describe.c:4331 +#: describe.c:4385 msgid "Init" msgstr "Init" -#: describe.c:4332 +#: describe.c:4386 msgid "Lexize" msgstr "Lexize" -#: describe.c:4359 +#: describe.c:4413 msgid "List of text search templates" msgstr "Lista dei modelli di ricerca testo" -#: describe.c:4419 +#: describe.c:4473 msgid "List of text search configurations" msgstr "Lista delle configurazioni di ricerca testo" -#: describe.c:4463 +#: describe.c:4519 #, c-format msgid "Did not find any text search configuration named \"%s\".\n" -msgstr "Non trovata alcuna configurazione di ricerca testo chiamata \"%s\".\n" +msgstr "Non è stata trovata nessuna configurazione di ricerca testo chiamata \"%s\".\n" -#: describe.c:4529 +#: describe.c:4522 +#, c-format +msgid "Did not find any text search configurations.\n" +msgstr "Non è stata trovata nessuna configurazione di ricerca testo.\n" + +#: describe.c:4588 msgid "Token" msgstr "Token" -#: describe.c:4530 +#: describe.c:4589 msgid "Dictionaries" msgstr "Dizionari" -#: describe.c:4541 +#: describe.c:4600 #, c-format msgid "Text search configuration \"%s.%s\"" msgstr "Configurazione di ricerca testo \"%s.%s\"" -#: describe.c:4544 +#: describe.c:4603 #, c-format msgid "Text search configuration \"%s\"" msgstr "Configurazione di ricerca testo \"%s\"" -#: describe.c:4548 +#: describe.c:4607 #, c-format msgid "" "\n" @@ -1907,7 +1934,7 @@ msgstr "" "\n" "Analizzatore \"%s.%s\"" -#: describe.c:4551 +#: describe.c:4610 #, c-format msgid "" "\n" @@ -1916,134 +1943,148 @@ msgstr "" "\n" "Analizzatore: \"%s\"" -#: describe.c:4585 +#: describe.c:4644 #, c-format msgid "The server (version %s) does not support foreign-data wrappers.\n" msgstr "Il server (versione %s) non supporta i wrapper di dati esterni.\n" -#: describe.c:4643 +#: describe.c:4702 msgid "List of foreign-data wrappers" msgstr "Lista dei wrapper di dati esterni" -#: describe.c:4668 +#: describe.c:4727 #, c-format msgid "The server (version %s) does not support foreign servers.\n" msgstr "Il server (versione %s) non supporta server esterni.\n" -#: describe.c:4681 +#: describe.c:4740 msgid "Foreign-data wrapper" msgstr "Wrapper per dati esterni" -#: describe.c:4699 describe.c:4904 +#: describe.c:4758 describe.c:4963 msgid "Version" msgstr "Versione" -#: describe.c:4725 +#: describe.c:4784 msgid "List of foreign servers" msgstr "Lista dei server esterni" -#: describe.c:4750 +#: describe.c:4809 #, c-format msgid "The server (version %s) does not support user mappings.\n" msgstr "Il server (versione %s) non supporta la mappatura di utenti.\n" -#: describe.c:4760 describe.c:4824 +#: describe.c:4819 describe.c:4883 msgid "Server" msgstr "Server" -#: describe.c:4761 +#: describe.c:4820 msgid "User name" msgstr "Nome utente" -#: describe.c:4786 +#: describe.c:4845 msgid "List of user mappings" msgstr "Lista delle mappature degli utenti" -#: describe.c:4811 +#: describe.c:4870 #, c-format msgid "The server (version %s) does not support foreign tables.\n" msgstr "Il server (versione %s) non supporta tabelle esterne.\n" -#: describe.c:4864 +#: describe.c:4923 msgid "List of foreign tables" msgstr "Lista delle tabelle esterne" -#: describe.c:4889 describe.c:4946 +#: describe.c:4948 describe.c:5005 #, c-format msgid "The server (version %s) does not support extensions.\n" msgstr "Il server (versione %s) non supporta le estensioni.\n" -#: describe.c:4921 +#: describe.c:4980 msgid "List of installed extensions" msgstr "Lista delle estensioni installate" -#: describe.c:4974 +#: describe.c:5033 #, c-format msgid "Did not find any extension named \"%s\".\n" -msgstr "Non ho trovato alcuna estensione nominata \"%s\".\n" +msgstr "Non è stata trovata nessuna estensione chiamata \"%s\".\n" -#: describe.c:4977 +#: describe.c:5036 #, c-format msgid "Did not find any extensions.\n" -msgstr "Non ho trovato alcuna estensione.\n" +msgstr "Non è stata trovata nessuna estensione.\n" -#: describe.c:5021 -msgid "Object Description" -msgstr "Descrizione Oggetto" +#: describe.c:5080 +msgid "Object description" +msgstr "Descrizione dell'oggetto" -#: describe.c:5030 +#: describe.c:5090 #, c-format msgid "Objects in extension \"%s\"" msgstr "Oggetti nell'estensione \"%s\"" -#: describe.c:5057 describe.c:5120 +#: describe.c:5119 describe.c:5185 #, c-format msgid "The server (version %s) does not support publications.\n" msgstr "Il server (versione %s) non supporta pubblicazioni.\n" -#: describe.c:5073 describe.c:5165 +#: describe.c:5136 describe.c:5248 +msgid "All tables" +msgstr "Tutte le tabelle" + +#: describe.c:5137 describe.c:5249 msgid "Inserts" msgstr "Inserimenti" -#: describe.c:5074 describe.c:5166 +#: describe.c:5138 describe.c:5250 msgid "Updates" msgstr "Modifiche" -#: describe.c:5075 describe.c:5167 +#: describe.c:5139 describe.c:5251 msgid "Deletes" msgstr "Cancellazioni" -#: describe.c:5092 +#: describe.c:5156 msgid "List of publications" msgstr "Lista delle pubblicazioni" -#: describe.c:5162 +#: describe.c:5217 +#, c-format +msgid "Did not find any publication named \"%s\".\n" +msgstr "Non è stata trovata nessuna pubblicazione chiamata \"%s\".\n" + +#: describe.c:5220 +#, c-format +msgid "Did not find any publications.\n" +msgstr "Non è stata trovata nessuna pubblicazione.\n" + +#: describe.c:5244 #, c-format msgid "Publication %s" msgstr "Pubblicazione %s" -#: describe.c:5207 +#: describe.c:5284 msgid "Tables:" msgstr "Tabelle:" -#: describe.c:5249 +#: describe.c:5328 #, c-format msgid "The server (version %s) does not support subscriptions.\n" msgstr "Il server (versione %s) non supporta sottoscrizioni.\n" -#: describe.c:5265 +#: describe.c:5344 msgid "Publication" msgstr "Pubblicazione" -#: describe.c:5272 +#: describe.c:5351 msgid "Synchronous commit" msgstr "Commit sincrono" -#: describe.c:5273 +#: describe.c:5352 msgid "Conninfo" msgstr "Conninfo" -#: describe.c:5295 +#: describe.c:5374 msgid "List of subscriptions" msgstr "Lista di sottoscrizioni" @@ -2061,7 +2102,7 @@ msgstr "" "psql è il terminale interattivo per PostgreSQL.\n" "\n" -#: help.c:74 help.c:342 help.c:376 help.c:403 +#: help.c:74 help.c:344 help.c:383 help.c:410 #, c-format msgid "Usage:\n" msgstr "Utilizzo:\n" @@ -2373,21 +2414,21 @@ msgstr " \\copyright mostra i termini di uso e distribuzione di Pos #: help.c:174 #, c-format +msgid " \\crosstabview [COLUMNS] execute query and display results in crosstab\n" +msgstr " \\crosstabview [COLONNE] esegui la query e mostra il risultato in crosstab\n" + +#: help.c:175 +#, c-format msgid " \\errverbose show most recent error message at maximum verbosity\n" msgstr " \\errverbose mostra il messaggio di errore più recente alla massima loquacità\n" -#: help.c:175 +#: help.c:176 #, c-format msgid " \\g [FILE] or ; execute query (and send results to file or |pipe)\n" msgstr "" " \\g [FILE] o ; esegui la query (ed invia i risultati ad un file o\n" " ad una |pipe)\n" -#: help.c:176 -#, c-format -msgid " \\gx [FILE] as \\g, but forces expanded output mode\n" -msgstr " \\gx [FILE] come \\g, ma forza un modo di output espanso\n" - #: help.c:177 #, c-format msgid " \\gexec execute query, then execute each value in its result\n" @@ -2400,13 +2441,13 @@ msgstr " \\gset [PREFIX] esegui la query e salva il risultato in una va #: help.c:179 #, c-format -msgid " \\q quit psql\n" -msgstr " \\q esci da psql\n" +msgid " \\gx [FILE] as \\g, but forces expanded output mode\n" +msgstr " \\gx [FILE] come \\g, ma forza un modo di output espanso\n" #: help.c:180 #, c-format -msgid " \\crosstabview [COLUMNS] execute query and display results in crosstab\n" -msgstr " \\crosstabview [COLONNE] esegui la query e mostra il risultato in crosstab\n" +msgid " \\q quit psql\n" +msgstr " \\q esci da psql\n" #: help.c:181 #, c-format @@ -2602,151 +2643,151 @@ msgstr " \\dd[S] [MODELLO] mostra la descrizione di oggetti non elencati #: help.c:230 #, c-format -msgid " \\ddp [PATTERN] list default privileges\n" -msgstr " \\ddp [MODELLO] elenca i privilegi predefiniti\n" +msgid " \\dD[S+] [PATTERN] list domains\n" +msgstr " \\dD[S+] [MODELLO] elenca i domini\n" #: help.c:231 #, c-format -msgid " \\dD[S+] [PATTERN] list domains\n" -msgstr " \\dD[S+] [MODELLO] elenca i domini\n" +msgid " \\ddp [PATTERN] list default privileges\n" +msgstr " \\ddp [MODELLO] elenca i privilegi predefiniti\n" #: help.c:232 #, c-format +msgid " \\dE[S+] [PATTERN] list foreign tables\n" +msgstr " \\dE[S+] [MODELLO] elenca le tabelle esterne\n" + +#: help.c:233 +#, c-format msgid " \\det[+] [PATTERN] list foreign tables\n" msgstr " \\det[+] [MODELLO] elenca le tabelle esterne\n" -#: help.c:233 +#: help.c:234 #, c-format msgid " \\des[+] [PATTERN] list foreign servers\n" msgstr " \\des[+] [MODELLO] elenca i server esterni\n" -#: help.c:234 +#: help.c:235 #, c-format msgid " \\deu[+] [PATTERN] list user mappings\n" msgstr " \\deu[+] [MODELLO] elenca le mappature degli utenti\n" -#: help.c:235 +#: help.c:236 #, c-format msgid " \\dew[+] [PATTERN] list foreign-data wrappers\n" msgstr " \\dew[+] [MODELLO] elenca i wrapper di dati esterni\n" -#: help.c:236 +#: help.c:237 #, c-format msgid " \\df[antw][S+] [PATRN] list [only agg/normal/trigger/window] functions\n" msgstr " \\df[antw][S+] [MOD] elenca le funzioni [solo aggr/normali/trigger/finestra]\n" -#: help.c:237 +#: help.c:238 #, c-format msgid " \\dF[+] [PATTERN] list text search configurations\n" msgstr " \\dF[+] [MODELLO] elenca le configurazioni di ricerca testo\n" -#: help.c:238 +#: help.c:239 #, c-format msgid " \\dFd[+] [PATTERN] list text search dictionaries\n" msgstr " \\dFd[+] [MODELLO] elenca i dizionari di ricerca testo\n" -#: help.c:239 +#: help.c:240 #, c-format msgid " \\dFp[+] [PATTERN] list text search parsers\n" msgstr " \\dFp[+] [MODELLO] elenca gli analizzatori di ricerca testo\n" -#: help.c:240 +#: help.c:241 #, c-format msgid " \\dFt[+] [PATTERN] list text search templates\n" msgstr " \\dFt[+] [MODELLO] elenca i modelli di ricerca di testo\n" -#: help.c:241 +#: help.c:242 #, c-format msgid " \\dg[S+] [PATTERN] list roles\n" msgstr " \\dg[S+] [MODELLO] elenca i ruoli\n" -#: help.c:242 +#: help.c:243 #, c-format msgid " \\di[S+] [PATTERN] list indexes\n" msgstr " \\di[S+] [MODELLO] elenca gli indici\n" -#: help.c:243 +#: help.c:244 #, c-format msgid " \\dl list large objects, same as \\lo_list\n" msgstr " \\dl elenca i large object, stesso risultato di \\lo_list\n" -#: help.c:244 +#: help.c:245 #, c-format msgid " \\dL[S+] [PATTERN] list procedural languages\n" msgstr " \\dL[S+] [MODELLO] elenca i linguaggi procedurali\n" -#: help.c:245 +#: help.c:246 #, c-format msgid " \\dm[S+] [PATTERN] list materialized views\n" msgstr " \\dm[S+] [PATTERN] elenca le viste materializzate\n" -#: help.c:246 +#: help.c:247 #, c-format msgid " \\dn[S+] [PATTERN] list schemas\n" msgstr " \\dn[S+] [MODELLO] elenca gli schemi\n" -#: help.c:247 +#: help.c:248 #, c-format msgid " \\do[S] [PATTERN] list operators\n" msgstr " \\do[S] [MODELLO] elenca gli operatori\n" -#: help.c:248 +#: help.c:249 #, c-format msgid " \\dO[S+] [PATTERN] list collations\n" msgstr " \\dO[S+] [MODELLO] elenca gli ordinamenti\n" -#: help.c:249 +#: help.c:250 #, c-format msgid " \\dp [PATTERN] list table, view, and sequence access privileges\n" msgstr "" " \\dp [MODELLO] elenca i permessi di accesso alla tabella, vista\n" " o sequenza\n" -#: help.c:250 +#: help.c:251 #, c-format msgid " \\drds [PATRN1 [PATRN2]] list per-database role settings\n" msgstr " \\drds [MOD1 [MOD2]] elenca le impostazioni dei ruoli per database\n" -#: help.c:251 +#: help.c:252 #, c-format msgid " \\dRp[+] [PATTERN] list replication publications\n" msgstr " \\dRp[+] [MODELLO] elenca le pubblicazioni di replica\n" -#: help.c:252 +#: help.c:253 #, c-format msgid " \\dRs[+] [PATTERN] list replication subscriptions\n" msgstr " \\dRs[+] [MODELLO] elenca le sottoscrizioni di replica\n" -#: help.c:253 +#: help.c:254 #, c-format msgid " \\ds[S+] [PATTERN] list sequences\n" msgstr " \\ds[S+] [MODELLO] elenca le sequenze\n" -#: help.c:254 +#: help.c:255 #, c-format msgid " \\dt[S+] [PATTERN] list tables\n" msgstr " \\dt[S+] [MODELLO] elenca le tabelle\n" -#: help.c:255 +#: help.c:256 #, c-format msgid " \\dT[S+] [PATTERN] list data types\n" msgstr " \\dT[S+] [MODELLO] elenca i tipi di dato\n" -#: help.c:256 +#: help.c:257 #, c-format msgid " \\du[S+] [PATTERN] list roles\n" msgstr " \\du[S+] [MODELLO] elenca i ruoli\n" -#: help.c:257 +#: help.c:258 #, c-format msgid " \\dv[S+] [PATTERN] list views\n" msgstr " \\dv[S+] [MODELLO] elenca le viste\n" -#: help.c:258 -#, c-format -msgid " \\dE[S+] [PATTERN] list foreign tables\n" -msgstr " \\dE[S+] [MODELLO] elenca le tabelle esterne\n" - #: help.c:259 #, c-format msgid " \\dx[+] [PATTERN] list extensions\n" @@ -2810,40 +2851,44 @@ msgstr " \\H cambia modalità HTML (attualmente %s)\n" #, c-format msgid "" " \\pset [NAME [VALUE]] set table output option\n" -" (NAME := {format|border|expanded|fieldsep|fieldsep_zero|footer|null|\n" -" numericlocale|recordsep|recordsep_zero|tuples_only|title|tableattr|pager|\n" -" unicode_border_linestyle|unicode_column_linestyle|unicode_header_linestyle})\n" +" (NAME := {border|columns|expanded|fieldsep|fieldsep_zero|\n" +" footer|format|linestyle|null|numericlocale|pager|\n" +" pager_min_lines|recordsep|recordsep_zero|tableattr|title|\n" +" tuples_only|unicode_border_linestyle|\n" +" unicode_column_linestyle|unicode_header_linestyle})\n" msgstr "" " \\pset [NOME [VALORE]] imposta opzioni di output tabella\n" -" (NOME := {format|border|expanded|fieldsep|fieldsep_zero|footer|null|\n" -" numericlocale|recordsep|recordsep_zero|tuples_only|title|tableattr|pager|\n" -" unicode_border_linestyle|unicode_column_linestyle|unicode_header_linestyle})\n" +" (NOME := {border|columns|expanded|fieldsep|fieldsep_zero|\n" +" footer|format|linestyle|null|numericlocale|pager|\n" +" pager_min_lines|recordsep|recordsep_zero|tableattr|title|\n" +" tuples_only|unicode_border_linestyle|\n" +" unicode_column_linestyle|unicode_header_linestyle})\n" -#: help.c:277 +#: help.c:279 #, c-format msgid " \\t [on|off] show only rows (currently %s)\n" msgstr " \\t [on|off] mostra solo le righe (attualmente %s)\n" -#: help.c:279 +#: help.c:281 #, c-format msgid " \\T [STRING] set HTML
tag attributes, or unset if none\n" msgstr "" " \\T [STRINGA] imposta gli attributi HTML di
, se non\n" " specificato allora annullali\n" -#: help.c:280 +#: help.c:282 #, c-format msgid " \\x [on|off|auto] toggle expanded output (currently %s)\n" msgstr "" " \\x [on|off|auto] cambia modalità output espansa\n" " (attualmente %s)\n" -#: help.c:284 +#: help.c:286 #, c-format msgid "Connection\n" msgstr "Connessione\n" -#: help.c:286 +#: help.c:288 #, c-format msgid "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" @@ -2852,7 +2897,7 @@ msgstr "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" " connetti ad un nuovo database (attualmente \"%s\")\n" -#: help.c:290 +#: help.c:292 #, c-format msgid "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" @@ -2861,78 +2906,78 @@ msgstr "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" " connetti ad un nuovo database (nessuna connessione attiva)\n" -#: help.c:292 +#: help.c:294 +#, c-format +msgid " \\conninfo display information about current connection\n" +msgstr " \\conninfo mostra le informazioni su la connessione corrente\n" + +#: help.c:295 #, c-format msgid " \\encoding [ENCODING] show or set client encoding\n" msgstr " \\encoding [CODIFICA] mostra o imposta la codifica del client\n" -#: help.c:293 +#: help.c:296 #, c-format msgid " \\password [USERNAME] securely change the password for a user\n" msgstr " \\password [UTENTE] cambia la password per un utente in sicurezza\n" -#: help.c:294 -#, c-format -msgid " \\conninfo display information about current connection\n" -msgstr " \\conninfo mostra le informazioni su la connessione corrente\n" - -#: help.c:297 +#: help.c:299 #, c-format msgid "Operating System\n" msgstr "Sistema operativo\n" -#: help.c:298 +#: help.c:300 #, c-format msgid " \\cd [DIR] change the current working directory\n" msgstr " \\cd [DIRECTORY] cambia la directory di lavoro\n" -#: help.c:299 +#: help.c:301 #, c-format msgid " \\setenv NAME [VALUE] set or unset environment variable\n" msgstr " \\setenv NOME [VALORE] imposta o elimina una variabile d'ambiente\n" -#: help.c:300 +#: help.c:302 #, c-format msgid " \\timing [on|off] toggle timing of commands (currently %s)\n" msgstr "" " \\timing [on|off] imposta cronometro dei comandi\n" " (attualmente %s)\n" -#: help.c:302 +#: help.c:304 #, c-format msgid " \\! [COMMAND] execute command in shell or start interactive shell\n" msgstr "" " \\! [COMANDO] esegui un comando in una shell oppure avvia una shell\n" " interattiva\n" -#: help.c:305 +#: help.c:307 #, c-format msgid "Variables\n" msgstr "Variabili\n" -#: help.c:306 +#: help.c:308 #, c-format msgid " \\prompt [TEXT] NAME prompt user to set internal variable\n" msgstr " \\prompt [TESTO] NOME richiedi all'utente di impostare una variabile interna\n" -#: help.c:307 +#: help.c:309 #, c-format msgid " \\set [NAME [VALUE]] set internal variable, or list all if no parameters\n" msgstr "" " \\set [NOME [VALORE]] imposta una variabile interna, oppure mostrale tutte\n" " se non sono specificati parametri\n" -#: help.c:308 +#: help.c:310 #, c-format msgid " \\unset NAME unset (delete) internal variable\n" msgstr " \\unset NOME cancella una variabile interna\n" -#: help.c:311 +#: help.c:313 #, c-format msgid "Large Objects\n" msgstr "Large Object\n" -#: help.c:312 +#: help.c:314 #, c-format msgid "" " \\lo_export LOBOID FILE\n" @@ -2945,7 +2990,7 @@ msgstr "" " \\lo_list\n" " \\lo_unlink LOBOID operazioni sui large object\n" -#: help.c:339 +#: help.c:341 #, c-format msgid "" "List of specially treated variables\n" @@ -2954,12 +2999,12 @@ msgstr "" "Lista delle variabili speciali\n" "\n" -#: help.c:341 +#: help.c:343 #, c-format msgid "psql variables:\n" msgstr "variabili psql:\n" -#: help.c:343 +#: help.c:345 #, c-format msgid "" " psql --set=NAME=VALUE\n" @@ -2970,12 +3015,12 @@ msgstr "" " oppure \\set NOME VALORE dentro psql\n" "\n" -#: help.c:345 +#: help.c:347 #, c-format msgid " AUTOCOMMIT if set, successful SQL commands are automatically committed\n" msgstr " AUTOCOMMIT se impostato, i comandi SQL riusciti sono salvati automaticamente\n" -#: help.c:346 +#: help.c:348 #, c-format msgid "" " COMP_KEYWORD_CASE determines the case used to complete SQL key words\n" @@ -2984,12 +3029,12 @@ msgstr "" " COMP_KEYWORD_CASE determina il caso usato per completare le parole chiave SQL\n" " [lower, upper, preserve-lower, preserve-upper]\n" -#: help.c:348 +#: help.c:350 #, c-format msgid " DBNAME the currently connected database name\n" msgstr " DBNAME il nome del database attualmente connesso\n" -#: help.c:349 +#: help.c:351 #, c-format msgid "" " ECHO controls what input is written to standard output\n" @@ -2998,7 +3043,7 @@ msgstr "" " ECHO controlla quale input è scritto su stardard output\n" " [all, errors, none, queries]\n" -#: help.c:351 +#: help.c:353 #, c-format msgid "" " ECHO_HIDDEN if set, display internal queries executed by backslash commands;\n" @@ -3007,12 +3052,12 @@ msgstr "" " ECHO_HIDDEN se impostato, mostra le query interne dei comandi backslash;\n" " se impostato a \"noexec\", mostrale solo senza eseguirle\n" -#: help.c:353 +#: help.c:355 #, c-format msgid " ENCODING current client character set encoding\n" msgstr " ENCODING codifica del set di caratteri del client corrente\n" -#: help.c:354 +#: help.c:356 #, c-format msgid "" " FETCH_COUNT the number of result rows to fetch and display at a time\n" @@ -3021,97 +3066,122 @@ msgstr "" " FETCH_COUNT il numero di righe del risultato da leggere e mostrare pr volta\n" " (default: 0=tutte)\n" -#: help.c:356 +#: help.c:358 #, c-format msgid " HISTCONTROL controls command history [ignorespace, ignoredups, ignoreboth]\n" msgstr " HISTCONTROL controlla la storia dei comandi [ignorespace, ignoredups, ignoreboth]\n" -#: help.c:357 +#: help.c:359 #, c-format msgid " HISTFILE file name used to store the command history\n" msgstr " HISTFILE nome del file usato per memorizzare la storia dei comandi\n" -#: help.c:358 +#: help.c:360 #, c-format msgid " HISTSIZE max number of commands to store in the command history\n" msgstr " HISTSIZE numero massimo di comandi da salvare nella storia dei comandi\n" -#: help.c:359 +#: help.c:361 #, c-format msgid " HOST the currently connected database server host\n" msgstr " HOST l'host del server del database attualmente connesso\n" -#: help.c:360 +#: help.c:362 #, c-format msgid " IGNOREEOF number of EOFs needed to terminate an interactive session\n" msgstr " IGNOREEOF numero di EOF richiesti per terminare una sessione interattiva\n" -#: help.c:361 +#: help.c:363 #, c-format msgid " LASTOID value of the last affected OID\n" msgstr " LASTOID valore dell'ultimo OID interessato\n" -#: help.c:362 +#: help.c:364 #, c-format msgid " ON_ERROR_ROLLBACK if set, an error doesn't stop a transaction (uses implicit savepoints)\n" msgstr " ON_ERROR_ROLLBACK se impostato, un errore non termina una transazione (usa punti di salvataggio impliciti)\n" -#: help.c:363 +#: help.c:365 #, c-format msgid " ON_ERROR_STOP stop batch execution after error\n" msgstr " ON_ERROR_STOP termina l'esecuzione batch dopo un errore\n" -#: help.c:364 +#: help.c:366 #, c-format msgid " PORT server port of the current connection\n" msgstr " PORT porta del server attualmente connesso\n" -#: help.c:365 +#: help.c:367 #, c-format msgid " PROMPT1 specifies the standard psql prompt\n" msgstr " PROMPT1 specifica il prompt psql standard\n" -#: help.c:366 +#: help.c:368 #, c-format msgid " PROMPT2 specifies the prompt used when a statement continues from a previous line\n" msgstr " PROMPT2 specifica il prompt usato quando un'istruzione continua da una riga precedente\n" -#: help.c:367 +#: help.c:369 #, c-format msgid " PROMPT3 specifies the prompt used during COPY ... FROM STDIN\n" msgstr " PROMPT3 specifica il prompt usato in COPY ... FROM STDIN\n" -#: help.c:368 +#: help.c:370 #, c-format msgid " QUIET run quietly (same as -q option)\n" msgstr " QUIET esegui silenziosamente (come con l'opzione -q)\n" -#: help.c:369 +#: help.c:371 +#, c-format +msgid " SERVER_VERSION_NAME server's version (short string)\n" +msgstr " SERVER_VERSION_NAME versione del server (stringa breve)\n" + +#: help.c:372 +#, c-format +msgid " SERVER_VERSION_NUM server's version (numeric format)\n" +msgstr " SERVER_VERSION_NUM versione del server (formato numerico)\n" + +#: help.c:373 #, c-format msgid " SHOW_CONTEXT controls display of message context fields [never, errors, always]\n" msgstr " SHOW_CONTEXT controlla la visualizzazione dei campi di contesto dei messaggi [never, errors, always]\n" -#: help.c:370 +#: help.c:374 #, c-format msgid " SINGLELINE end of line terminates SQL command mode (same as -S option)\n" msgstr " SINGLELINE la fine riga termina i comandi SQL (come con l'opzione -S)\n" -#: help.c:371 +#: help.c:375 #, c-format msgid " SINGLESTEP single-step mode (same as -s option)\n" msgstr " SINGLESTEP modalità passo singolo (come con l'opzione -s)\n" -#: help.c:372 +#: help.c:376 #, c-format msgid " USER the currently connected database user\n" msgstr " USER l'utente database attualmente connesso\n" -#: help.c:373 +#: help.c:377 #, c-format msgid " VERBOSITY controls verbosity of error reports [default, verbose, terse]\n" msgstr " VERBOSITY controlla la loquacità della visualizzazione degli errori [default, verbose, terse]\n" -#: help.c:375 +#: help.c:378 +#, c-format +msgid " VERSION psql's version (verbose string)\n" +msgstr " VERSION versione di psql (stringa estesa)\n" + +#: help.c:379 +#, c-format +msgid " VERSION_NAME psql's version (short string)\n" +msgstr " VERSION_NAME versione di psql (strina breve)\n" + +#: help.c:380 +#, c-format +msgid " VERSION_NUM psql's version (numeric format)\n" +msgstr " VERSION_NUM versione di psql (formato numerico)\n" + +#: help.c:382 #, c-format msgid "" "\n" @@ -3120,7 +3190,7 @@ msgstr "" "\n" "Impostazioni di visualizzazione:\n" -#: help.c:377 +#: help.c:384 #, c-format msgid "" " psql --pset=NAME[=VALUE]\n" @@ -3131,52 +3201,52 @@ msgstr "" " oppure \\pset NOME [VALORE] dentro psql\n" "\n" -#: help.c:379 +#: help.c:386 #, c-format msgid " border border style (number)\n" msgstr " border stile bordo (numero)\n" -#: help.c:380 +#: help.c:387 #, c-format msgid " columns target width for the wrapped format\n" msgstr " columns larghezza destinazione per il formato wrapped\n" -#: help.c:381 +#: help.c:388 #, c-format msgid " expanded (or x) expanded output [on, off, auto]\n" msgstr " expanded (o x) output espanso [on, off, auto]\n" -#: help.c:382 +#: help.c:389 #, c-format msgid " fieldsep field separator for unaligned output (default \"%s\")\n" msgstr " fieldsep separatore di campo per l'output non allineato (default \"%s\")\n" -#: help.c:383 +#: help.c:390 #, c-format msgid " fieldsep_zero set field separator for unaligned output to zero byte\n" msgstr " fieldsep_zero imposta il separatore di campo per l'output non allineato al byte zero\n" -#: help.c:384 +#: help.c:391 #, c-format msgid " footer enable or disable display of the table footer [on, off]\n" msgstr " footer abilita o disabilita la visualizzazione del piè di pagina [on, off]\n" -#: help.c:385 +#: help.c:392 #, c-format msgid " format set output format [unaligned, aligned, wrapped, html, asciidoc, ...]\n" msgstr " format imposta il formato di output [unaligned, aligned, wrapped, html, asciidoc, ...]\n" -#: help.c:386 +#: help.c:393 #, c-format msgid " linestyle set the border line drawing style [ascii, old-ascii, unicode]\n" msgstr " linestyle imposta lo stile di disegno delle righe dei bordi [ascii, old-ascii, unicode]\n" -#: help.c:387 +#: help.c:394 #, c-format msgid " null set the string to be printed in place of a null value\n" msgstr " null imposta la stringa da visualizzare al posto dei valori null\n" -#: help.c:388 +#: help.c:395 #, c-format msgid "" " numericlocale enable or disable display of a locale-specific character to separate\n" @@ -3185,22 +3255,22 @@ msgstr "" " numericlocale abilita o disabilita i caratteri specifici per il locale per separare\n" " i gruppi di cifre [on, off]\n" -#: help.c:390 +#: help.c:397 #, c-format msgid " pager control when an external pager is used [yes, no, always]\n" msgstr " pager controlla quando usare la paginazione esterna [yes, no, always]\n" -#: help.c:391 +#: help.c:398 #, c-format msgid " recordsep record (line) separator for unaligned output\n" msgstr " recordsep separatore di record (riga) per l'output non allineato\n" -#: help.c:392 +#: help.c:399 #, c-format msgid " recordsep_zero set record separator for unaligned output to zero byte\n" msgstr " recordsep_zero imposta il separatore di campo per l'output non allineato al byte zero\n" -#: help.c:393 +#: help.c:400 #, c-format msgid "" " tableattr (or T) specify attributes for table tag in html format or proportional\n" @@ -3210,17 +3280,17 @@ msgstr "" " larghezza colonna proporzionale dei dati allineati a sinistra\n" " in formato latex-longtable\n" -#: help.c:395 +#: help.c:402 #, c-format msgid " title set the table title for any subsequently printed tables\n" msgstr " title imposta il titolo della tabella per ogni tabella stampata in seguto\n" -#: help.c:396 +#: help.c:403 #, c-format msgid " tuples_only if set, only actual table data is shown\n" msgstr " tuples_only se impostato, mostra solo i dati della tabella\n" -#: help.c:397 +#: help.c:404 #, c-format msgid "" " unicode_border_linestyle\n" @@ -3233,7 +3303,7 @@ msgstr "" " unicode_header_linestyle\n" " imposta lo stile di disegno delle righe Unicode [single, double]\n" -#: help.c:402 +#: help.c:409 #, c-format msgid "" "\n" @@ -3242,7 +3312,7 @@ msgstr "" "\n" "Variabili d'ambiente:\n" -#: help.c:406 +#: help.c:413 #, c-format msgid "" " NAME=VALUE [NAME=VALUE] psql ...\n" @@ -3253,7 +3323,7 @@ msgstr "" " oppure \\setenv NOME [VALORE] dentro psql\n" "\n" -#: help.c:408 +#: help.c:415 #, c-format msgid "" " set NAME=VALUE\n" @@ -3266,52 +3336,52 @@ msgstr "" " oppure \\setenv NOME [VALORE] dentro psql\n" "\n" -#: help.c:411 +#: help.c:418 #, c-format msgid " COLUMNS number of columns for wrapped format\n" msgstr " COLUMNS numero di colonne per il formato wrapped\n" -#: help.c:412 +#: help.c:419 #, c-format msgid " PAGER name of external pager program\n" msgstr " PAGER nome del programma di paginazione esterno\n" -#: help.c:413 +#: help.c:420 #, c-format msgid " PGAPPNAME same as the application_name connection parameter\n" msgstr " PGAPPNAME come il parametro di connessione application_name\n" -#: help.c:414 +#: help.c:421 #, c-format msgid " PGDATABASE same as the dbname connection parameter\n" msgstr " PGDATABASE come il parametro di connessione dbname\n" -#: help.c:415 +#: help.c:422 #, c-format msgid " PGHOST same as the host connection parameter\n" msgstr " PGHOST come il parametro di connessione host\n" -#: help.c:416 -#, c-format -msgid " PGPORT same as the port connection parameter\n" -msgstr " PGPORT come il parametro di connessione port\n" - -#: help.c:417 -#, c-format -msgid " PGUSER same as the user connection parameter\n" -msgstr " PGUSER come il parametro di connessione user\n" - -#: help.c:418 +#: help.c:423 #, c-format msgid " PGPASSWORD connection password (not recommended)\n" msgstr " PGPASSWORD password di connessione (uso non raccomandato)\n" -#: help.c:419 +#: help.c:424 #, c-format msgid " PGPASSFILE password file name\n" msgstr " PGPASSFILE nome del file delle password\n" -#: help.c:420 +#: help.c:425 +#, c-format +msgid " PGPORT same as the port connection parameter\n" +msgstr " PGPORT come il parametro di connessione port\n" + +#: help.c:426 +#, c-format +msgid " PGUSER same as the user connection parameter\n" +msgstr " PGUSER come il parametro di connessione user\n" + +#: help.c:427 #, c-format msgid "" " PSQL_EDITOR, EDITOR, VISUAL\n" @@ -3320,7 +3390,7 @@ msgstr "" " PSQL_EDITOR, EDITOR, VISUAL\n" " editor usato dai comandi \\e, \\ef, \\ev\n" -#: help.c:422 +#: help.c:429 #, c-format msgid "" " PSQL_EDITOR_LINENUMBER_ARG\n" @@ -3329,31 +3399,31 @@ msgstr "" " PSQL_EDITOR_LINENUMBER_ARG\n" " come specificare un numero di riga quando si invoca l'editor\n" -#: help.c:424 +#: help.c:431 #, c-format msgid " PSQL_HISTORY alternative location for the command history file\n" msgstr " PSQL_HISTORY posizione alternativa del file della storia dei comandi\n" -#: help.c:425 +#: help.c:432 #, c-format msgid " PSQLRC alternative location for the user's .psqlrc file\n" msgstr " PSQLRC posizione alternativa del file .psqlrc dell'utente\n" -#: help.c:426 +#: help.c:433 #, c-format msgid " SHELL shell used by the \\! command\n" msgstr " SHELL shell usata dal comando \\!\n" -#: help.c:427 +#: help.c:434 #, c-format msgid " TMPDIR directory for temporary files\n" msgstr " TMPDIR directory per i file temporanei\n" -#: help.c:470 +#: help.c:477 msgid "Available help:\n" msgstr "Aiuti disponibili:\n" -#: help.c:554 +#: help.c:561 #, c-format msgid "" "Command: %s\n" @@ -3368,7 +3438,7 @@ msgstr "" "%s\n" "\n" -#: help.c:570 +#: help.c:577 #, c-format msgid "" "No help available for \"%s\".\n" @@ -3502,34 +3572,34 @@ msgstr "%s: memoria esaurita\n" #: sql_help.c:1258 sql_help.c:1260 sql_help.c:1270 sql_help.c:1273 #: sql_help.c:1295 sql_help.c:1297 sql_help.c:1299 sql_help.c:1302 #: sql_help.c:1304 sql_help.c:1306 sql_help.c:1309 sql_help.c:1359 -#: sql_help.c:1397 sql_help.c:1400 sql_help.c:1402 sql_help.c:1404 -#: sql_help.c:1406 sql_help.c:1408 sql_help.c:1411 sql_help.c:1451 -#: sql_help.c:1662 sql_help.c:1726 sql_help.c:1745 sql_help.c:1758 -#: sql_help.c:1814 sql_help.c:1820 sql_help.c:1830 sql_help.c:1850 -#: sql_help.c:1875 sql_help.c:1893 sql_help.c:1922 sql_help.c:2015 -#: sql_help.c:2057 sql_help.c:2079 sql_help.c:2099 sql_help.c:2100 -#: sql_help.c:2135 sql_help.c:2155 sql_help.c:2177 sql_help.c:2191 -#: sql_help.c:2206 sql_help.c:2236 sql_help.c:2261 sql_help.c:2307 -#: sql_help.c:2569 sql_help.c:2582 sql_help.c:2599 sql_help.c:2615 -#: sql_help.c:2655 sql_help.c:2707 sql_help.c:2711 sql_help.c:2713 -#: sql_help.c:2719 sql_help.c:2737 sql_help.c:2764 sql_help.c:2799 -#: sql_help.c:2811 sql_help.c:2820 sql_help.c:2864 sql_help.c:2878 -#: sql_help.c:2906 sql_help.c:2914 sql_help.c:2922 sql_help.c:2930 -#: sql_help.c:2938 sql_help.c:2946 sql_help.c:2954 sql_help.c:2962 -#: sql_help.c:2971 sql_help.c:2982 sql_help.c:2990 sql_help.c:2998 -#: sql_help.c:3006 sql_help.c:3014 sql_help.c:3024 sql_help.c:3033 -#: sql_help.c:3042 sql_help.c:3050 sql_help.c:3059 sql_help.c:3067 -#: sql_help.c:3075 sql_help.c:3084 sql_help.c:3092 sql_help.c:3100 -#: sql_help.c:3108 sql_help.c:3116 sql_help.c:3124 sql_help.c:3132 -#: sql_help.c:3140 sql_help.c:3148 sql_help.c:3156 sql_help.c:3164 -#: sql_help.c:3181 sql_help.c:3190 sql_help.c:3198 sql_help.c:3215 -#: sql_help.c:3230 sql_help.c:3498 sql_help.c:3549 sql_help.c:3578 -#: sql_help.c:3586 sql_help.c:4009 sql_help.c:4057 sql_help.c:4198 +#: sql_help.c:1401 sql_help.c:1404 sql_help.c:1406 sql_help.c:1408 +#: sql_help.c:1410 sql_help.c:1412 sql_help.c:1415 sql_help.c:1455 +#: sql_help.c:1666 sql_help.c:1730 sql_help.c:1749 sql_help.c:1762 +#: sql_help.c:1818 sql_help.c:1824 sql_help.c:1834 sql_help.c:1854 +#: sql_help.c:1879 sql_help.c:1897 sql_help.c:1926 sql_help.c:2019 +#: sql_help.c:2061 sql_help.c:2083 sql_help.c:2103 sql_help.c:2104 +#: sql_help.c:2139 sql_help.c:2159 sql_help.c:2181 sql_help.c:2195 +#: sql_help.c:2210 sql_help.c:2240 sql_help.c:2265 sql_help.c:2311 +#: sql_help.c:2577 sql_help.c:2590 sql_help.c:2607 sql_help.c:2623 +#: sql_help.c:2663 sql_help.c:2715 sql_help.c:2719 sql_help.c:2721 +#: sql_help.c:2727 sql_help.c:2745 sql_help.c:2772 sql_help.c:2807 +#: sql_help.c:2819 sql_help.c:2828 sql_help.c:2872 sql_help.c:2886 +#: sql_help.c:2914 sql_help.c:2922 sql_help.c:2930 sql_help.c:2938 +#: sql_help.c:2946 sql_help.c:2954 sql_help.c:2962 sql_help.c:2970 +#: sql_help.c:2979 sql_help.c:2990 sql_help.c:2998 sql_help.c:3006 +#: sql_help.c:3014 sql_help.c:3022 sql_help.c:3032 sql_help.c:3041 +#: sql_help.c:3050 sql_help.c:3058 sql_help.c:3067 sql_help.c:3075 +#: sql_help.c:3083 sql_help.c:3092 sql_help.c:3100 sql_help.c:3108 +#: sql_help.c:3116 sql_help.c:3124 sql_help.c:3132 sql_help.c:3140 +#: sql_help.c:3148 sql_help.c:3156 sql_help.c:3164 sql_help.c:3172 +#: sql_help.c:3189 sql_help.c:3198 sql_help.c:3206 sql_help.c:3223 +#: sql_help.c:3238 sql_help.c:3506 sql_help.c:3557 sql_help.c:3586 +#: sql_help.c:3594 sql_help.c:4017 sql_help.c:4065 sql_help.c:4206 msgid "name" msgstr "nome" -#: sql_help.c:37 sql_help.c:40 sql_help.c:43 sql_help.c:326 sql_help.c:1520 -#: sql_help.c:2879 sql_help.c:3803 +#: sql_help.c:37 sql_help.c:40 sql_help.c:43 sql_help.c:326 sql_help.c:1524 +#: sql_help.c:2887 sql_help.c:3811 msgid "aggregate_signature" msgstr "signature_aggregato" @@ -3540,7 +3610,7 @@ msgstr "signature_aggregato" #: sql_help.c:927 sql_help.c:947 sql_help.c:960 sql_help.c:994 sql_help.c:1093 #: sql_help.c:1169 sql_help.c:1212 sql_help.c:1233 sql_help.c:1247 #: sql_help.c:1259 sql_help.c:1272 sql_help.c:1303 sql_help.c:1360 -#: sql_help.c:1405 +#: sql_help.c:1409 msgid "new_name" msgstr "nuovo_nome" @@ -3549,86 +3619,87 @@ msgstr "nuovo_nome" #: sql_help.c:609 sql_help.c:667 sql_help.c:687 sql_help.c:716 sql_help.c:771 #: sql_help.c:817 sql_help.c:896 sql_help.c:925 sql_help.c:945 sql_help.c:958 #: sql_help.c:992 sql_help.c:1153 sql_help.c:1171 sql_help.c:1214 -#: sql_help.c:1235 sql_help.c:1298 sql_help.c:1403 sql_help.c:2555 +#: sql_help.c:1235 sql_help.c:1298 sql_help.c:1407 sql_help.c:2563 msgid "new_owner" msgstr "nuovo_proprietario" #: sql_help.c:44 sql_help.c:72 sql_help.c:87 sql_help.c:253 sql_help.c:318 #: sql_help.c:440 sql_help.c:525 sql_help.c:650 sql_help.c:691 sql_help.c:719 -#: sql_help.c:774 sql_help.c:929 sql_help.c:962 sql_help.c:1095 sql_help.c:1216 -#: sql_help.c:1237 sql_help.c:1249 sql_help.c:1261 sql_help.c:1305 -#: sql_help.c:1407 +#: sql_help.c:774 sql_help.c:929 sql_help.c:962 sql_help.c:1095 +#: sql_help.c:1216 sql_help.c:1237 sql_help.c:1249 sql_help.c:1261 +#: sql_help.c:1305 sql_help.c:1411 msgid "new_schema" msgstr "nuovo_schema" -#: sql_help.c:45 sql_help.c:1576 sql_help.c:2880 sql_help.c:3824 +#: sql_help.c:45 sql_help.c:1580 sql_help.c:2888 sql_help.c:3832 msgid "where aggregate_signature is:" msgstr "dove signature_aggregato è:" #: sql_help.c:46 sql_help.c:49 sql_help.c:52 sql_help.c:336 sql_help.c:361 #: sql_help.c:364 sql_help.c:367 sql_help.c:507 sql_help.c:512 sql_help.c:517 -#: sql_help.c:522 sql_help.c:527 sql_help.c:1538 sql_help.c:1577 -#: sql_help.c:1580 sql_help.c:1583 sql_help.c:1727 sql_help.c:1746 -#: sql_help.c:1749 sql_help.c:2016 sql_help.c:2881 sql_help.c:2884 -#: sql_help.c:2887 sql_help.c:2972 sql_help.c:3383 sql_help.c:3716 -#: sql_help.c:3809 sql_help.c:3825 sql_help.c:3828 sql_help.c:3831 +#: sql_help.c:522 sql_help.c:527 sql_help.c:1542 sql_help.c:1581 +#: sql_help.c:1584 sql_help.c:1587 sql_help.c:1731 sql_help.c:1750 +#: sql_help.c:1753 sql_help.c:2020 sql_help.c:2889 sql_help.c:2892 +#: sql_help.c:2895 sql_help.c:2980 sql_help.c:3391 sql_help.c:3724 +#: sql_help.c:3817 sql_help.c:3833 sql_help.c:3836 sql_help.c:3839 msgid "argmode" msgstr "modo_arg" #: sql_help.c:47 sql_help.c:50 sql_help.c:53 sql_help.c:337 sql_help.c:362 #: sql_help.c:365 sql_help.c:368 sql_help.c:508 sql_help.c:513 sql_help.c:518 -#: sql_help.c:523 sql_help.c:528 sql_help.c:1539 sql_help.c:1578 -#: sql_help.c:1581 sql_help.c:1584 sql_help.c:1728 sql_help.c:1747 -#: sql_help.c:1750 sql_help.c:2017 sql_help.c:2882 sql_help.c:2885 -#: sql_help.c:2888 sql_help.c:2973 sql_help.c:3810 sql_help.c:3826 -#: sql_help.c:3829 sql_help.c:3832 +#: sql_help.c:523 sql_help.c:528 sql_help.c:1543 sql_help.c:1582 +#: sql_help.c:1585 sql_help.c:1588 sql_help.c:1732 sql_help.c:1751 +#: sql_help.c:1754 sql_help.c:2021 sql_help.c:2890 sql_help.c:2893 +#: sql_help.c:2896 sql_help.c:2981 sql_help.c:3818 sql_help.c:3834 +#: sql_help.c:3837 sql_help.c:3840 msgid "argname" msgstr "nome_arg" #: sql_help.c:48 sql_help.c:51 sql_help.c:54 sql_help.c:338 sql_help.c:363 #: sql_help.c:366 sql_help.c:369 sql_help.c:509 sql_help.c:514 sql_help.c:519 -#: sql_help.c:524 sql_help.c:529 sql_help.c:1540 sql_help.c:1579 -#: sql_help.c:1582 sql_help.c:1585 sql_help.c:2018 sql_help.c:2883 -#: sql_help.c:2886 sql_help.c:2889 sql_help.c:2974 sql_help.c:3811 -#: sql_help.c:3827 sql_help.c:3830 sql_help.c:3833 +#: sql_help.c:524 sql_help.c:529 sql_help.c:1544 sql_help.c:1583 +#: sql_help.c:1586 sql_help.c:1589 sql_help.c:2022 sql_help.c:2891 +#: sql_help.c:2894 sql_help.c:2897 sql_help.c:2982 sql_help.c:3819 +#: sql_help.c:3835 sql_help.c:3838 sql_help.c:3841 msgid "argtype" msgstr "tipo_arg" #: sql_help.c:113 sql_help.c:385 sql_help.c:463 sql_help.c:475 sql_help.c:854 -#: sql_help.c:942 sql_help.c:1230 sql_help.c:1354 sql_help.c:1382 -#: sql_help.c:1633 sql_help.c:1639 sql_help.c:1925 sql_help.c:1966 -#: sql_help.c:1973 sql_help.c:1982 sql_help.c:2058 sql_help.c:2237 -#: sql_help.c:2329 sql_help.c:2584 sql_help.c:2765 sql_help.c:2787 -#: sql_help.c:3250 sql_help.c:3417 +#: sql_help.c:942 sql_help.c:1230 sql_help.c:1354 sql_help.c:1386 +#: sql_help.c:1637 sql_help.c:1643 sql_help.c:1929 sql_help.c:1970 +#: sql_help.c:1977 sql_help.c:1986 sql_help.c:2062 sql_help.c:2241 +#: sql_help.c:2333 sql_help.c:2592 sql_help.c:2773 sql_help.c:2795 +#: sql_help.c:3258 sql_help.c:3425 msgid "option" msgstr "opzione" -#: sql_help.c:114 sql_help.c:855 sql_help.c:1355 sql_help.c:2059 -#: sql_help.c:2238 sql_help.c:2766 +#: sql_help.c:114 sql_help.c:855 sql_help.c:1355 sql_help.c:2063 +#: sql_help.c:2242 sql_help.c:2774 msgid "where option can be:" msgstr "dove opzione può essere:" -#: sql_help.c:115 sql_help.c:1857 +#: sql_help.c:115 sql_help.c:1861 msgid "allowconn" msgstr "permetti_conn" -#: sql_help.c:116 sql_help.c:856 sql_help.c:1356 sql_help.c:1858 -#: sql_help.c:2239 sql_help.c:2767 +#: sql_help.c:116 sql_help.c:856 sql_help.c:1356 sql_help.c:1862 +#: sql_help.c:2243 sql_help.c:2775 msgid "connlimit" msgstr "limite_conn" -#: sql_help.c:117 sql_help.c:1859 +#: sql_help.c:117 sql_help.c:1863 msgid "istemplate" msgstr "è_template" -#: sql_help.c:123 sql_help.c:588 sql_help.c:653 sql_help.c:1098 sql_help.c:1146 +#: sql_help.c:123 sql_help.c:588 sql_help.c:653 sql_help.c:1098 +#: sql_help.c:1146 msgid "new_tablespace" msgstr "nuovo_tablespace" #: sql_help.c:125 sql_help.c:128 sql_help.c:130 sql_help.c:534 sql_help.c:536 #: sql_help.c:537 sql_help.c:863 sql_help.c:867 sql_help.c:870 sql_help.c:1005 -#: sql_help.c:1008 sql_help.c:1362 sql_help.c:1365 sql_help.c:1367 -#: sql_help.c:2027 sql_help.c:3603 sql_help.c:3998 +#: sql_help.c:1008 sql_help.c:1363 sql_help.c:1367 sql_help.c:1370 +#: sql_help.c:2031 sql_help.c:3611 sql_help.c:4006 msgid "configuration_parameter" msgstr "parametro_config" @@ -3636,13 +3707,13 @@ msgstr "parametro_config" #: sql_help.c:535 sql_help.c:583 sql_help.c:659 sql_help.c:665 sql_help.c:815 #: sql_help.c:864 sql_help.c:943 sql_help.c:982 sql_help.c:985 sql_help.c:990 #: sql_help.c:1006 sql_help.c:1007 sql_help.c:1128 sql_help.c:1148 -#: sql_help.c:1174 sql_help.c:1231 sql_help.c:1363 sql_help.c:1383 -#: sql_help.c:1926 sql_help.c:1967 sql_help.c:1974 sql_help.c:1983 -#: sql_help.c:2028 sql_help.c:2029 sql_help.c:2087 sql_help.c:2119 -#: sql_help.c:2209 sql_help.c:2330 sql_help.c:2360 sql_help.c:2457 -#: sql_help.c:2469 sql_help.c:2482 sql_help.c:2519 sql_help.c:2541 -#: sql_help.c:2558 sql_help.c:2585 sql_help.c:2788 sql_help.c:3418 -#: sql_help.c:3999 sql_help.c:4000 +#: sql_help.c:1174 sql_help.c:1231 sql_help.c:1364 sql_help.c:1387 +#: sql_help.c:1930 sql_help.c:1971 sql_help.c:1978 sql_help.c:1987 +#: sql_help.c:2032 sql_help.c:2033 sql_help.c:2091 sql_help.c:2123 +#: sql_help.c:2213 sql_help.c:2334 sql_help.c:2364 sql_help.c:2462 +#: sql_help.c:2474 sql_help.c:2487 sql_help.c:2527 sql_help.c:2549 +#: sql_help.c:2566 sql_help.c:2593 sql_help.c:2796 sql_help.c:3426 +#: sql_help.c:4007 sql_help.c:4008 msgid "value" msgstr "valore" @@ -3650,9 +3721,9 @@ msgstr "valore" msgid "target_role" msgstr "ruolo_destinazione" -#: sql_help.c:199 sql_help.c:1909 sql_help.c:2285 sql_help.c:2290 -#: sql_help.c:3365 sql_help.c:3372 sql_help.c:3386 sql_help.c:3392 -#: sql_help.c:3698 sql_help.c:3705 sql_help.c:3719 sql_help.c:3725 +#: sql_help.c:199 sql_help.c:1913 sql_help.c:2289 sql_help.c:2294 +#: sql_help.c:3373 sql_help.c:3380 sql_help.c:3394 sql_help.c:3400 +#: sql_help.c:3706 sql_help.c:3713 sql_help.c:3727 sql_help.c:3733 msgid "schema_name" msgstr "nome_schema" @@ -3667,33 +3738,33 @@ msgstr "dove grant_o_revoke_abbreviato è uno di:" #: sql_help.c:202 sql_help.c:203 sql_help.c:204 sql_help.c:205 sql_help.c:206 #: sql_help.c:207 sql_help.c:208 sql_help.c:209 sql_help.c:210 sql_help.c:211 #: sql_help.c:559 sql_help.c:587 sql_help.c:652 sql_help.c:792 sql_help.c:874 -#: sql_help.c:1097 sql_help.c:1370 sql_help.c:2062 sql_help.c:2063 -#: sql_help.c:2064 sql_help.c:2065 sql_help.c:2066 sql_help.c:2193 -#: sql_help.c:2242 sql_help.c:2243 sql_help.c:2244 sql_help.c:2245 -#: sql_help.c:2246 sql_help.c:2770 sql_help.c:2771 sql_help.c:2772 -#: sql_help.c:2773 sql_help.c:2774 sql_help.c:3399 sql_help.c:3400 -#: sql_help.c:3401 sql_help.c:3699 sql_help.c:3703 sql_help.c:3706 -#: sql_help.c:3708 sql_help.c:3710 sql_help.c:3712 sql_help.c:3714 -#: sql_help.c:3720 sql_help.c:3722 sql_help.c:3724 sql_help.c:3726 -#: sql_help.c:3728 sql_help.c:3730 sql_help.c:3731 sql_help.c:3732 -#: sql_help.c:4019 +#: sql_help.c:1097 sql_help.c:1374 sql_help.c:2066 sql_help.c:2067 +#: sql_help.c:2068 sql_help.c:2069 sql_help.c:2070 sql_help.c:2197 +#: sql_help.c:2246 sql_help.c:2247 sql_help.c:2248 sql_help.c:2249 +#: sql_help.c:2250 sql_help.c:2778 sql_help.c:2779 sql_help.c:2780 +#: sql_help.c:2781 sql_help.c:2782 sql_help.c:3407 sql_help.c:3408 +#: sql_help.c:3409 sql_help.c:3707 sql_help.c:3711 sql_help.c:3714 +#: sql_help.c:3716 sql_help.c:3718 sql_help.c:3720 sql_help.c:3722 +#: sql_help.c:3728 sql_help.c:3730 sql_help.c:3732 sql_help.c:3734 +#: sql_help.c:3736 sql_help.c:3738 sql_help.c:3739 sql_help.c:3740 +#: sql_help.c:4027 msgid "role_name" msgstr "nome_ruolo" #: sql_help.c:237 sql_help.c:451 sql_help.c:1113 sql_help.c:1115 -#: sql_help.c:1399 sql_help.c:1878 sql_help.c:1882 sql_help.c:1986 -#: sql_help.c:1990 sql_help.c:2083 sql_help.c:2453 sql_help.c:2465 -#: sql_help.c:2478 sql_help.c:2486 sql_help.c:2497 sql_help.c:2523 -#: sql_help.c:3449 sql_help.c:3464 sql_help.c:3466 sql_help.c:3884 -#: sql_help.c:3885 sql_help.c:3894 sql_help.c:3935 sql_help.c:3936 -#: sql_help.c:3937 sql_help.c:3938 sql_help.c:3939 sql_help.c:3940 -#: sql_help.c:3973 sql_help.c:3974 sql_help.c:3979 sql_help.c:3984 -#: sql_help.c:4123 sql_help.c:4124 sql_help.c:4133 sql_help.c:4174 -#: sql_help.c:4175 sql_help.c:4176 sql_help.c:4177 sql_help.c:4178 -#: sql_help.c:4179 sql_help.c:4226 sql_help.c:4228 sql_help.c:4261 -#: sql_help.c:4317 sql_help.c:4318 sql_help.c:4327 sql_help.c:4368 -#: sql_help.c:4369 sql_help.c:4370 sql_help.c:4371 sql_help.c:4372 -#: sql_help.c:4373 +#: sql_help.c:1403 sql_help.c:1882 sql_help.c:1886 sql_help.c:1990 +#: sql_help.c:1994 sql_help.c:2087 sql_help.c:2458 sql_help.c:2470 +#: sql_help.c:2483 sql_help.c:2491 sql_help.c:2502 sql_help.c:2531 +#: sql_help.c:3457 sql_help.c:3472 sql_help.c:3474 sql_help.c:3892 +#: sql_help.c:3893 sql_help.c:3902 sql_help.c:3943 sql_help.c:3944 +#: sql_help.c:3945 sql_help.c:3946 sql_help.c:3947 sql_help.c:3948 +#: sql_help.c:3981 sql_help.c:3982 sql_help.c:3987 sql_help.c:3992 +#: sql_help.c:4131 sql_help.c:4132 sql_help.c:4141 sql_help.c:4182 +#: sql_help.c:4183 sql_help.c:4184 sql_help.c:4185 sql_help.c:4186 +#: sql_help.c:4187 sql_help.c:4234 sql_help.c:4236 sql_help.c:4269 +#: sql_help.c:4325 sql_help.c:4326 sql_help.c:4335 sql_help.c:4376 +#: sql_help.c:4377 sql_help.c:4378 sql_help.c:4379 sql_help.c:4380 +#: sql_help.c:4381 msgid "expression" msgstr "espressione" @@ -3703,9 +3774,9 @@ msgstr "vincolo_di_dominio" #: sql_help.c:242 sql_help.c:244 sql_help.c:247 sql_help.c:466 sql_help.c:467 #: sql_help.c:1090 sql_help.c:1134 sql_help.c:1135 sql_help.c:1136 -#: sql_help.c:1156 sql_help.c:1526 sql_help.c:1528 sql_help.c:1881 -#: sql_help.c:1985 sql_help.c:1989 sql_help.c:2485 sql_help.c:2496 -#: sql_help.c:3461 +#: sql_help.c:1156 sql_help.c:1530 sql_help.c:1532 sql_help.c:1885 +#: sql_help.c:1989 sql_help.c:1993 sql_help.c:2490 sql_help.c:2501 +#: sql_help.c:3469 msgid "constraint_name" msgstr "nome_vincolo" @@ -3729,73 +3800,73 @@ msgstr "dove oggetto_membro è:" #: sql_help.c:333 sql_help.c:334 sql_help.c:339 sql_help.c:343 sql_help.c:345 #: sql_help.c:347 sql_help.c:348 sql_help.c:349 sql_help.c:350 sql_help.c:351 #: sql_help.c:352 sql_help.c:353 sql_help.c:354 sql_help.c:355 sql_help.c:358 -#: sql_help.c:359 sql_help.c:1518 sql_help.c:1523 sql_help.c:1530 -#: sql_help.c:1531 sql_help.c:1532 sql_help.c:1533 sql_help.c:1534 -#: sql_help.c:1535 sql_help.c:1536 sql_help.c:1541 sql_help.c:1543 -#: sql_help.c:1547 sql_help.c:1549 sql_help.c:1553 sql_help.c:1554 -#: sql_help.c:1555 sql_help.c:1558 sql_help.c:1559 sql_help.c:1560 -#: sql_help.c:1561 sql_help.c:1562 sql_help.c:1563 sql_help.c:1564 +#: sql_help.c:359 sql_help.c:1522 sql_help.c:1527 sql_help.c:1534 +#: sql_help.c:1535 sql_help.c:1536 sql_help.c:1537 sql_help.c:1538 +#: sql_help.c:1539 sql_help.c:1540 sql_help.c:1545 sql_help.c:1547 +#: sql_help.c:1551 sql_help.c:1553 sql_help.c:1557 sql_help.c:1558 +#: sql_help.c:1559 sql_help.c:1562 sql_help.c:1563 sql_help.c:1564 #: sql_help.c:1565 sql_help.c:1566 sql_help.c:1567 sql_help.c:1568 -#: sql_help.c:1573 sql_help.c:1574 sql_help.c:3799 sql_help.c:3804 -#: sql_help.c:3805 sql_help.c:3806 sql_help.c:3807 sql_help.c:3813 -#: sql_help.c:3814 sql_help.c:3815 sql_help.c:3816 sql_help.c:3817 -#: sql_help.c:3818 sql_help.c:3819 sql_help.c:3820 sql_help.c:3821 -#: sql_help.c:3822 +#: sql_help.c:1569 sql_help.c:1570 sql_help.c:1571 sql_help.c:1572 +#: sql_help.c:1577 sql_help.c:1578 sql_help.c:3807 sql_help.c:3812 +#: sql_help.c:3813 sql_help.c:3814 sql_help.c:3815 sql_help.c:3821 +#: sql_help.c:3822 sql_help.c:3823 sql_help.c:3824 sql_help.c:3825 +#: sql_help.c:3826 sql_help.c:3827 sql_help.c:3828 sql_help.c:3829 +#: sql_help.c:3830 msgid "object_name" msgstr "nome_oggetto" -#: sql_help.c:325 sql_help.c:1519 sql_help.c:3802 +#: sql_help.c:325 sql_help.c:1523 sql_help.c:3810 msgid "aggregate_name" msgstr "nome_aggregato" -#: sql_help.c:327 sql_help.c:1521 sql_help.c:1792 sql_help.c:1796 -#: sql_help.c:1798 sql_help.c:2897 +#: sql_help.c:327 sql_help.c:1525 sql_help.c:1796 sql_help.c:1800 +#: sql_help.c:1802 sql_help.c:2905 msgid "source_type" msgstr "tipo_sorgente" -#: sql_help.c:328 sql_help.c:1522 sql_help.c:1793 sql_help.c:1797 -#: sql_help.c:1799 sql_help.c:2898 +#: sql_help.c:328 sql_help.c:1526 sql_help.c:1797 sql_help.c:1801 +#: sql_help.c:1803 sql_help.c:2906 msgid "target_type" msgstr "tipo_destinazione" -#: sql_help.c:335 sql_help.c:756 sql_help.c:1537 sql_help.c:1794 -#: sql_help.c:1833 sql_help.c:1896 sql_help.c:2136 sql_help.c:2167 -#: sql_help.c:2661 sql_help.c:3382 sql_help.c:3715 sql_help.c:3808 -#: sql_help.c:3913 sql_help.c:3917 sql_help.c:3921 sql_help.c:3924 -#: sql_help.c:4152 sql_help.c:4156 sql_help.c:4160 sql_help.c:4163 -#: sql_help.c:4346 sql_help.c:4350 sql_help.c:4354 sql_help.c:4357 +#: sql_help.c:335 sql_help.c:756 sql_help.c:1541 sql_help.c:1798 +#: sql_help.c:1837 sql_help.c:1900 sql_help.c:2140 sql_help.c:2171 +#: sql_help.c:2669 sql_help.c:3390 sql_help.c:3723 sql_help.c:3816 +#: sql_help.c:3921 sql_help.c:3925 sql_help.c:3929 sql_help.c:3932 +#: sql_help.c:4160 sql_help.c:4164 sql_help.c:4168 sql_help.c:4171 +#: sql_help.c:4354 sql_help.c:4358 sql_help.c:4362 sql_help.c:4365 msgid "function_name" msgstr "nome_funzione" -#: sql_help.c:340 sql_help.c:749 sql_help.c:1544 sql_help.c:2160 +#: sql_help.c:340 sql_help.c:749 sql_help.c:1548 sql_help.c:2164 msgid "operator_name" msgstr "nome_operatore" -#: sql_help.c:341 sql_help.c:685 sql_help.c:689 sql_help.c:693 sql_help.c:1545 -#: sql_help.c:2137 sql_help.c:3015 +#: sql_help.c:341 sql_help.c:685 sql_help.c:689 sql_help.c:693 sql_help.c:1549 +#: sql_help.c:2141 sql_help.c:3023 msgid "left_type" msgstr "tipo_sx" -#: sql_help.c:342 sql_help.c:686 sql_help.c:690 sql_help.c:694 sql_help.c:1546 -#: sql_help.c:2138 sql_help.c:3016 +#: sql_help.c:342 sql_help.c:686 sql_help.c:690 sql_help.c:694 sql_help.c:1550 +#: sql_help.c:2142 sql_help.c:3024 msgid "right_type" msgstr "tipo_dx" #: sql_help.c:344 sql_help.c:346 sql_help.c:712 sql_help.c:715 sql_help.c:718 #: sql_help.c:747 sql_help.c:759 sql_help.c:767 sql_help.c:770 sql_help.c:773 -#: sql_help.c:1548 sql_help.c:1550 sql_help.c:2157 sql_help.c:2178 -#: sql_help.c:2502 sql_help.c:3025 sql_help.c:3034 +#: sql_help.c:1552 sql_help.c:1554 sql_help.c:2161 sql_help.c:2182 +#: sql_help.c:2507 sql_help.c:3033 sql_help.c:3042 msgid "index_method" msgstr "metodo_indice" -#: sql_help.c:356 sql_help.c:1152 sql_help.c:1569 sql_help.c:2024 -#: sql_help.c:2460 sql_help.c:2628 sql_help.c:3172 sql_help.c:3396 -#: sql_help.c:3729 +#: sql_help.c:356 sql_help.c:1152 sql_help.c:1573 sql_help.c:2028 +#: sql_help.c:2465 sql_help.c:2636 sql_help.c:3180 sql_help.c:3404 +#: sql_help.c:3737 msgid "type_name" msgstr "nome_di_tipo" -#: sql_help.c:357 sql_help.c:1570 sql_help.c:2023 sql_help.c:2629 -#: sql_help.c:2855 sql_help.c:3173 sql_help.c:3388 sql_help.c:3721 +#: sql_help.c:357 sql_help.c:1574 sql_help.c:2027 sql_help.c:2637 +#: sql_help.c:2863 sql_help.c:3181 sql_help.c:3396 sql_help.c:3729 msgid "lang_name" msgstr "nome_linguaggio" @@ -3803,38 +3874,39 @@ msgstr "nome_linguaggio" msgid "and aggregate_signature is:" msgstr "e signature_aggregato è:" -#: sql_help.c:383 sql_help.c:1664 sql_help.c:1923 +#: sql_help.c:383 sql_help.c:1668 sql_help.c:1927 msgid "handler_function" msgstr "funzione_handler" -#: sql_help.c:384 sql_help.c:1924 +#: sql_help.c:384 sql_help.c:1928 msgid "validator_function" msgstr "funzione_validazione" -#: sql_help.c:433 sql_help.c:510 sql_help.c:641 sql_help.c:1085 sql_help.c:1296 -#: sql_help.c:2493 sql_help.c:2494 sql_help.c:2510 sql_help.c:2511 +#: sql_help.c:433 sql_help.c:510 sql_help.c:641 sql_help.c:1085 +#: sql_help.c:1296 sql_help.c:2498 sql_help.c:2499 sql_help.c:2515 +#: sql_help.c:2516 msgid "action" msgstr "azione" #: sql_help.c:435 sql_help.c:442 sql_help.c:446 sql_help.c:447 sql_help.c:450 #: sql_help.c:452 sql_help.c:453 sql_help.c:454 sql_help.c:456 sql_help.c:459 #: sql_help.c:461 sql_help.c:462 sql_help.c:645 sql_help.c:655 sql_help.c:657 -#: sql_help.c:660 sql_help.c:662 sql_help.c:923 sql_help.c:1087 sql_help.c:1105 -#: sql_help.c:1109 sql_help.c:1110 sql_help.c:1114 sql_help.c:1116 -#: sql_help.c:1117 sql_help.c:1118 sql_help.c:1120 sql_help.c:1123 -#: sql_help.c:1124 sql_help.c:1126 sql_help.c:1129 sql_help.c:1131 -#: sql_help.c:1398 sql_help.c:1401 sql_help.c:1421 sql_help.c:1525 -#: sql_help.c:1630 sql_help.c:1635 sql_help.c:1649 sql_help.c:1650 -#: sql_help.c:1651 sql_help.c:1964 sql_help.c:1977 sql_help.c:2021 -#: sql_help.c:2082 sql_help.c:2117 sql_help.c:2315 sql_help.c:2343 -#: sql_help.c:2344 sql_help.c:2444 sql_help.c:2452 sql_help.c:2461 -#: sql_help.c:2464 sql_help.c:2473 sql_help.c:2477 sql_help.c:2498 -#: sql_help.c:2500 sql_help.c:2507 sql_help.c:2522 sql_help.c:2539 -#: sql_help.c:2664 sql_help.c:2800 sql_help.c:3367 sql_help.c:3368 -#: sql_help.c:3448 sql_help.c:3463 sql_help.c:3465 sql_help.c:3467 -#: sql_help.c:3700 sql_help.c:3701 sql_help.c:3801 sql_help.c:3944 -#: sql_help.c:4183 sql_help.c:4225 sql_help.c:4227 sql_help.c:4229 -#: sql_help.c:4246 sql_help.c:4249 sql_help.c:4377 +#: sql_help.c:660 sql_help.c:662 sql_help.c:923 sql_help.c:1087 +#: sql_help.c:1105 sql_help.c:1109 sql_help.c:1110 sql_help.c:1114 +#: sql_help.c:1116 sql_help.c:1117 sql_help.c:1118 sql_help.c:1120 +#: sql_help.c:1123 sql_help.c:1124 sql_help.c:1126 sql_help.c:1129 +#: sql_help.c:1131 sql_help.c:1402 sql_help.c:1405 sql_help.c:1425 +#: sql_help.c:1529 sql_help.c:1634 sql_help.c:1639 sql_help.c:1653 +#: sql_help.c:1654 sql_help.c:1655 sql_help.c:1968 sql_help.c:1981 +#: sql_help.c:2025 sql_help.c:2086 sql_help.c:2121 sql_help.c:2319 +#: sql_help.c:2347 sql_help.c:2348 sql_help.c:2449 sql_help.c:2457 +#: sql_help.c:2466 sql_help.c:2469 sql_help.c:2478 sql_help.c:2482 +#: sql_help.c:2503 sql_help.c:2505 sql_help.c:2512 sql_help.c:2530 +#: sql_help.c:2547 sql_help.c:2672 sql_help.c:2808 sql_help.c:3375 +#: sql_help.c:3376 sql_help.c:3456 sql_help.c:3471 sql_help.c:3473 +#: sql_help.c:3475 sql_help.c:3708 sql_help.c:3709 sql_help.c:3809 +#: sql_help.c:3952 sql_help.c:4191 sql_help.c:4233 sql_help.c:4235 +#: sql_help.c:4237 sql_help.c:4254 sql_help.c:4257 sql_help.c:4385 msgid "column_name" msgstr "nome_colonna" @@ -3842,26 +3914,27 @@ msgstr "nome_colonna" msgid "new_column_name" msgstr "nuovo_nome_colonna" -#: sql_help.c:441 sql_help.c:531 sql_help.c:654 sql_help.c:1104 sql_help.c:1312 +#: sql_help.c:441 sql_help.c:531 sql_help.c:654 sql_help.c:1104 +#: sql_help.c:1312 msgid "where action is one of:" msgstr "dove azione è una di:" -#: sql_help.c:443 sql_help.c:448 sql_help.c:915 sql_help.c:1106 sql_help.c:1111 -#: sql_help.c:1314 sql_help.c:1318 sql_help.c:1876 sql_help.c:1965 -#: sql_help.c:2156 sql_help.c:2308 sql_help.c:2445 sql_help.c:2709 -#: sql_help.c:3550 +#: sql_help.c:443 sql_help.c:448 sql_help.c:915 sql_help.c:1106 +#: sql_help.c:1111 sql_help.c:1314 sql_help.c:1318 sql_help.c:1880 +#: sql_help.c:1969 sql_help.c:2160 sql_help.c:2312 sql_help.c:2450 +#: sql_help.c:2717 sql_help.c:3558 msgid "data_type" msgstr "tipo_di_dato" #: sql_help.c:444 sql_help.c:449 sql_help.c:1107 sql_help.c:1112 -#: sql_help.c:1315 sql_help.c:1319 sql_help.c:1877 sql_help.c:1968 -#: sql_help.c:2084 sql_help.c:2446 sql_help.c:2454 sql_help.c:2466 -#: sql_help.c:2479 sql_help.c:2710 sql_help.c:2716 sql_help.c:3458 +#: sql_help.c:1315 sql_help.c:1319 sql_help.c:1881 sql_help.c:1972 +#: sql_help.c:2088 sql_help.c:2451 sql_help.c:2459 sql_help.c:2471 +#: sql_help.c:2484 sql_help.c:2718 sql_help.c:2724 sql_help.c:3466 msgid "collation" msgstr "ordinamento" -#: sql_help.c:445 sql_help.c:1108 sql_help.c:1969 sql_help.c:1978 -#: sql_help.c:2447 sql_help.c:2462 sql_help.c:2474 +#: sql_help.c:445 sql_help.c:1108 sql_help.c:1973 sql_help.c:1982 +#: sql_help.c:2452 sql_help.c:2467 sql_help.c:2479 msgid "column_constraint" msgstr "vincolo_di_colonna" @@ -3874,50 +3947,51 @@ msgstr "intero" msgid "attribute_option" msgstr "opzione_attributo" -#: sql_help.c:465 sql_help.c:1132 sql_help.c:1970 sql_help.c:1979 -#: sql_help.c:2448 sql_help.c:2463 sql_help.c:2475 +#: sql_help.c:465 sql_help.c:1132 sql_help.c:1974 sql_help.c:1983 +#: sql_help.c:2453 sql_help.c:2468 sql_help.c:2480 msgid "table_constraint" msgstr "vincoli_di_tabella" #: sql_help.c:468 sql_help.c:469 sql_help.c:470 sql_help.c:471 sql_help.c:1137 -#: sql_help.c:1138 sql_help.c:1139 sql_help.c:1140 sql_help.c:1571 +#: sql_help.c:1138 sql_help.c:1139 sql_help.c:1140 sql_help.c:1575 msgid "trigger_name" msgstr "nome_trigger" #: sql_help.c:472 sql_help.c:473 sql_help.c:1150 sql_help.c:1151 -#: sql_help.c:1971 sql_help.c:1976 sql_help.c:2451 sql_help.c:2472 +#: sql_help.c:1975 sql_help.c:1980 sql_help.c:2456 sql_help.c:2477 msgid "parent_table" msgstr "tabella_padre" -#: sql_help.c:530 sql_help.c:580 sql_help.c:643 sql_help.c:1275 sql_help.c:1908 +#: sql_help.c:530 sql_help.c:580 sql_help.c:643 sql_help.c:1275 +#: sql_help.c:1912 msgid "extension_name" msgstr "nome_estensione" -#: sql_help.c:532 sql_help.c:2025 +#: sql_help.c:532 sql_help.c:2029 msgid "execution_cost" msgstr "costo_di_esecuzione" -#: sql_help.c:533 sql_help.c:2026 +#: sql_help.c:533 sql_help.c:2030 msgid "result_rows" msgstr "righe_risultato" #: sql_help.c:554 sql_help.c:556 sql_help.c:853 sql_help.c:861 sql_help.c:865 #: sql_help.c:868 sql_help.c:871 sql_help.c:1353 sql_help.c:1361 -#: sql_help.c:1364 sql_help.c:1366 sql_help.c:1368 sql_help.c:2286 -#: sql_help.c:2288 sql_help.c:2291 sql_help.c:2292 sql_help.c:3366 -#: sql_help.c:3370 sql_help.c:3373 sql_help.c:3375 sql_help.c:3377 -#: sql_help.c:3379 sql_help.c:3381 sql_help.c:3387 sql_help.c:3389 -#: sql_help.c:3391 sql_help.c:3393 sql_help.c:3395 sql_help.c:3397 +#: sql_help.c:1365 sql_help.c:1368 sql_help.c:1371 sql_help.c:2290 +#: sql_help.c:2292 sql_help.c:2295 sql_help.c:2296 sql_help.c:3374 +#: sql_help.c:3378 sql_help.c:3381 sql_help.c:3383 sql_help.c:3385 +#: sql_help.c:3387 sql_help.c:3389 sql_help.c:3395 sql_help.c:3397 +#: sql_help.c:3399 sql_help.c:3401 sql_help.c:3403 sql_help.c:3405 msgid "role_specification" msgstr "specifica_ruolo" -#: sql_help.c:555 sql_help.c:557 sql_help.c:1380 sql_help.c:1851 -#: sql_help.c:2294 sql_help.c:2785 sql_help.c:3206 sql_help.c:4029 +#: sql_help.c:555 sql_help.c:557 sql_help.c:1384 sql_help.c:1855 +#: sql_help.c:2298 sql_help.c:2793 sql_help.c:3214 sql_help.c:4037 msgid "user_name" msgstr "nome_utente" -#: sql_help.c:558 sql_help.c:873 sql_help.c:1369 sql_help.c:2293 -#: sql_help.c:3398 +#: sql_help.c:558 sql_help.c:873 sql_help.c:1373 sql_help.c:2297 +#: sql_help.c:3406 msgid "where role_specification can be:" msgstr "dove specifica_ruolo può essere:" @@ -3925,116 +3999,117 @@ msgstr "dove specifica_ruolo può essere:" msgid "group_name" msgstr "nome_gruppo" -#: sql_help.c:578 sql_help.c:1856 sql_help.c:2088 sql_help.c:2120 -#: sql_help.c:2458 sql_help.c:2470 sql_help.c:2483 sql_help.c:2520 -#: sql_help.c:2542 sql_help.c:2554 sql_help.c:3394 sql_help.c:3727 +#: sql_help.c:578 sql_help.c:1860 sql_help.c:2092 sql_help.c:2124 +#: sql_help.c:2463 sql_help.c:2475 sql_help.c:2488 sql_help.c:2528 +#: sql_help.c:2550 sql_help.c:2562 sql_help.c:3402 sql_help.c:3735 msgid "tablespace_name" msgstr "nome_tablespace" #: sql_help.c:582 sql_help.c:585 sql_help.c:664 sql_help.c:666 sql_help.c:1147 -#: sql_help.c:1149 sql_help.c:2086 sql_help.c:2118 sql_help.c:2456 -#: sql_help.c:2468 sql_help.c:2481 sql_help.c:2518 sql_help.c:2540 +#: sql_help.c:1149 sql_help.c:2090 sql_help.c:2122 sql_help.c:2461 +#: sql_help.c:2473 sql_help.c:2486 sql_help.c:2526 sql_help.c:2548 msgid "storage_parameter" msgstr "parametro_di_memorizzazione" -#: sql_help.c:608 sql_help.c:1542 sql_help.c:3812 +#: sql_help.c:608 sql_help.c:1546 sql_help.c:3820 msgid "large_object_oid" msgstr "oid_large_object" #: sql_help.c:663 sql_help.c:1145 sql_help.c:1154 sql_help.c:1157 -#: sql_help.c:1461 +#: sql_help.c:1465 msgid "index_name" msgstr "nome_indice" -#: sql_help.c:695 sql_help.c:2141 +#: sql_help.c:695 sql_help.c:2145 msgid "res_proc" msgstr "res_proc" -#: sql_help.c:696 sql_help.c:2142 +#: sql_help.c:696 sql_help.c:2146 msgid "join_proc" msgstr "proc_join" -#: sql_help.c:748 sql_help.c:760 sql_help.c:2159 +#: sql_help.c:748 sql_help.c:760 sql_help.c:2163 msgid "strategy_number" msgstr "strategia_num" #: sql_help.c:750 sql_help.c:751 sql_help.c:754 sql_help.c:755 sql_help.c:761 -#: sql_help.c:762 sql_help.c:764 sql_help.c:765 sql_help.c:2161 sql_help.c:2162 -#: sql_help.c:2165 sql_help.c:2166 +#: sql_help.c:762 sql_help.c:764 sql_help.c:765 sql_help.c:2165 +#: sql_help.c:2166 sql_help.c:2169 sql_help.c:2170 msgid "op_type" msgstr "tipo_op" -#: sql_help.c:752 sql_help.c:2163 +#: sql_help.c:752 sql_help.c:2167 msgid "sort_family_name" msgstr "nome_famiglia_sort" -#: sql_help.c:753 sql_help.c:763 sql_help.c:2164 +#: sql_help.c:753 sql_help.c:763 sql_help.c:2168 msgid "support_number" msgstr "num_supporto" -#: sql_help.c:757 sql_help.c:1795 sql_help.c:2168 sql_help.c:2631 -#: sql_help.c:2633 +#: sql_help.c:757 sql_help.c:1799 sql_help.c:2172 sql_help.c:2639 +#: sql_help.c:2641 msgid "argument_type" msgstr "tipo_argomento" #: sql_help.c:788 sql_help.c:791 sql_help.c:808 sql_help.c:810 sql_help.c:812 #: sql_help.c:883 sql_help.c:922 sql_help.c:1271 sql_help.c:1274 -#: sql_help.c:1420 sql_help.c:1460 sql_help.c:1527 sql_help.c:1552 -#: sql_help.c:1557 sql_help.c:1572 sql_help.c:1629 sql_help.c:1634 -#: sql_help.c:1963 sql_help.c:1975 sql_help.c:2080 sql_help.c:2116 -#: sql_help.c:2192 sql_help.c:2207 sql_help.c:2263 sql_help.c:2314 -#: sql_help.c:2345 sql_help.c:2443 sql_help.c:2459 sql_help.c:2471 -#: sql_help.c:2538 sql_help.c:2657 sql_help.c:2834 sql_help.c:3051 -#: sql_help.c:3076 sql_help.c:3182 sql_help.c:3364 sql_help.c:3369 -#: sql_help.c:3414 sql_help.c:3446 sql_help.c:3697 sql_help.c:3702 -#: sql_help.c:3800 sql_help.c:3899 sql_help.c:3901 sql_help.c:3950 -#: sql_help.c:3989 sql_help.c:4138 sql_help.c:4140 sql_help.c:4189 -#: sql_help.c:4223 sql_help.c:4245 sql_help.c:4247 sql_help.c:4248 -#: sql_help.c:4332 sql_help.c:4334 sql_help.c:4383 +#: sql_help.c:1424 sql_help.c:1464 sql_help.c:1531 sql_help.c:1556 +#: sql_help.c:1561 sql_help.c:1576 sql_help.c:1633 sql_help.c:1638 +#: sql_help.c:1967 sql_help.c:1979 sql_help.c:2084 sql_help.c:2120 +#: sql_help.c:2196 sql_help.c:2211 sql_help.c:2267 sql_help.c:2318 +#: sql_help.c:2349 sql_help.c:2448 sql_help.c:2464 sql_help.c:2476 +#: sql_help.c:2546 sql_help.c:2665 sql_help.c:2842 sql_help.c:3059 +#: sql_help.c:3084 sql_help.c:3190 sql_help.c:3372 sql_help.c:3377 +#: sql_help.c:3422 sql_help.c:3454 sql_help.c:3705 sql_help.c:3710 +#: sql_help.c:3808 sql_help.c:3907 sql_help.c:3909 sql_help.c:3958 +#: sql_help.c:3997 sql_help.c:4146 sql_help.c:4148 sql_help.c:4197 +#: sql_help.c:4231 sql_help.c:4253 sql_help.c:4255 sql_help.c:4256 +#: sql_help.c:4340 sql_help.c:4342 sql_help.c:4391 msgid "table_name" msgstr "nome_tabella" -#: sql_help.c:793 sql_help.c:2194 +#: sql_help.c:793 sql_help.c:2198 msgid "using_expression" msgstr "espressione_using" -#: sql_help.c:794 sql_help.c:2195 +#: sql_help.c:794 sql_help.c:2199 msgid "check_expression" msgstr "espressione_check" -#: sql_help.c:814 sql_help.c:2208 +#: sql_help.c:814 sql_help.c:2212 msgid "publication_parameter" msgstr "parametro_pubblicazione" -#: sql_help.c:857 sql_help.c:1357 sql_help.c:2060 sql_help.c:2240 -#: sql_help.c:2768 +#: sql_help.c:857 sql_help.c:1357 sql_help.c:2064 sql_help.c:2244 +#: sql_help.c:2776 msgid "password" msgstr "password" -#: sql_help.c:858 sql_help.c:1358 sql_help.c:2061 sql_help.c:2241 -#: sql_help.c:2769 +#: sql_help.c:858 sql_help.c:1358 sql_help.c:2065 sql_help.c:2245 +#: sql_help.c:2777 msgid "timestamp" msgstr "timestamp" -#: sql_help.c:862 sql_help.c:866 sql_help.c:869 sql_help.c:872 sql_help.c:3374 -#: sql_help.c:3707 +#: sql_help.c:862 sql_help.c:866 sql_help.c:869 sql_help.c:872 sql_help.c:1362 +#: sql_help.c:1366 sql_help.c:1369 sql_help.c:1372 sql_help.c:3382 +#: sql_help.c:3715 msgid "database_name" msgstr "nome_database" -#: sql_help.c:916 sql_help.c:2309 +#: sql_help.c:916 sql_help.c:2313 msgid "increment" msgstr "incremento" -#: sql_help.c:917 sql_help.c:2310 +#: sql_help.c:917 sql_help.c:2314 msgid "minvalue" msgstr "valoremin" -#: sql_help.c:918 sql_help.c:2311 +#: sql_help.c:918 sql_help.c:2315 msgid "maxvalue" msgstr "valoremax" -#: sql_help.c:919 sql_help.c:2312 sql_help.c:3897 sql_help.c:3987 -#: sql_help.c:4136 sql_help.c:4265 sql_help.c:4330 +#: sql_help.c:919 sql_help.c:2316 sql_help.c:3905 sql_help.c:3995 +#: sql_help.c:4144 sql_help.c:4273 sql_help.c:4338 msgid "start" msgstr "inizio" @@ -4042,23 +4117,27 @@ msgstr "inizio" msgid "restart" msgstr "riavvio" -#: sql_help.c:921 sql_help.c:2313 +#: sql_help.c:921 sql_help.c:2317 msgid "cache" msgstr "cache" -#: sql_help.c:978 sql_help.c:2357 +#: sql_help.c:978 sql_help.c:2361 msgid "conninfo" msgstr "conninfo" -#: sql_help.c:980 sql_help.c:2358 +#: sql_help.c:980 sql_help.c:2362 msgid "publication_name" msgstr "nome_pubblicazione" -#: sql_help.c:981 sql_help.c:984 +#: sql_help.c:981 +msgid "set_publication_option" +msgstr "opzione_set_publication" + +#: sql_help.c:984 msgid "refresh_option" msgstr "opzione_refresh" -#: sql_help.c:989 sql_help.c:2359 +#: sql_help.c:989 sql_help.c:2363 msgid "subscription_parameter" msgstr "parametro_sottoscrizione" @@ -4066,11 +4145,11 @@ msgstr "parametro_sottoscrizione" msgid "partition_name" msgstr "nome_partizione" -#: sql_help.c:1101 sql_help.c:1980 sql_help.c:2476 +#: sql_help.c:1101 sql_help.c:1984 sql_help.c:2481 msgid "partition_bound_spec" msgstr "specifica_margine_partizione" -#: sql_help.c:1119 sql_help.c:2488 +#: sql_help.c:1119 sql_help.c:2493 msgid "sequence_options" msgstr "opzioni_sequenza" @@ -4090,7 +4169,7 @@ msgstr "nome_regola_di_riscrittura" msgid "and table_constraint_using_index is:" msgstr "e vincolo_di_tabella_con_indice è:" -#: sql_help.c:1173 sql_help.c:1176 sql_help.c:2557 +#: sql_help.c:1173 sql_help.c:1176 sql_help.c:2565 msgid "tablespace_option" msgstr "opzione_tablespace" @@ -4111,7 +4190,7 @@ msgid "new_dictionary" msgstr "nuovo_dizionario" #: sql_help.c:1300 sql_help.c:1313 sql_help.c:1316 sql_help.c:1317 -#: sql_help.c:2708 +#: sql_help.c:2716 msgid "attribute_name" msgstr "nome_attributo" @@ -4131,1494 +4210,1498 @@ msgstr "valore_enum_vicino" msgid "existing_enum_value" msgstr "valore_enum_esistente" -#: sql_help.c:1381 sql_help.c:1972 sql_help.c:1981 sql_help.c:2325 -#: sql_help.c:2786 sql_help.c:3207 sql_help.c:3380 sql_help.c:3415 -#: sql_help.c:3713 +#: sql_help.c:1385 sql_help.c:1976 sql_help.c:1985 sql_help.c:2329 +#: sql_help.c:2794 sql_help.c:3215 sql_help.c:3388 sql_help.c:3423 +#: sql_help.c:3721 msgid "server_name" msgstr "nome_server" -#: sql_help.c:1409 sql_help.c:1412 sql_help.c:2801 +#: sql_help.c:1413 sql_help.c:1416 sql_help.c:2809 msgid "view_option_name" msgstr "nome_opzione_vista" -#: sql_help.c:1410 sql_help.c:2802 +#: sql_help.c:1414 sql_help.c:2810 msgid "view_option_value" msgstr "valore_opzione_vista" -#: sql_help.c:1435 sql_help.c:4045 sql_help.c:4047 sql_help.c:4071 +#: sql_help.c:1439 sql_help.c:4053 sql_help.c:4055 sql_help.c:4079 msgid "transaction_mode" msgstr "modalità_transazione" -#: sql_help.c:1436 sql_help.c:4048 sql_help.c:4072 +#: sql_help.c:1440 sql_help.c:4056 sql_help.c:4080 msgid "where transaction_mode is one of:" msgstr "dove modalità_transazione è una di:" -#: sql_help.c:1524 +#: sql_help.c:1528 msgid "relation_name" msgstr "nome_relazione" -#: sql_help.c:1529 sql_help.c:3376 sql_help.c:3709 +#: sql_help.c:1533 sql_help.c:3384 sql_help.c:3717 msgid "domain_name" msgstr "nome_dominio" -#: sql_help.c:1551 +#: sql_help.c:1555 msgid "policy_name" msgstr "nome_regola" -#: sql_help.c:1556 +#: sql_help.c:1560 msgid "rule_name" msgstr "nome_ruolo" -#: sql_help.c:1575 +#: sql_help.c:1579 msgid "text" msgstr "testo" -#: sql_help.c:1600 sql_help.c:3559 sql_help.c:3747 +#: sql_help.c:1604 sql_help.c:3567 sql_help.c:3755 msgid "transaction_id" msgstr "id_transazione" -#: sql_help.c:1631 sql_help.c:1637 sql_help.c:3485 +#: sql_help.c:1635 sql_help.c:1641 sql_help.c:3493 msgid "filename" msgstr "nome_file" -#: sql_help.c:1632 sql_help.c:1638 sql_help.c:2265 sql_help.c:2266 -#: sql_help.c:2267 +#: sql_help.c:1636 sql_help.c:1642 sql_help.c:2269 sql_help.c:2270 +#: sql_help.c:2271 msgid "command" msgstr "comando" -#: sql_help.c:1636 sql_help.c:2121 sql_help.c:2543 sql_help.c:2803 -#: sql_help.c:2821 sql_help.c:3450 +#: sql_help.c:1640 sql_help.c:2125 sql_help.c:2551 sql_help.c:2811 +#: sql_help.c:2829 sql_help.c:3458 msgid "query" msgstr "query" -#: sql_help.c:1640 sql_help.c:3253 +#: sql_help.c:1644 sql_help.c:3261 msgid "where option can be one of:" msgstr "dove opzione può essere una di:" -#: sql_help.c:1641 +#: sql_help.c:1645 msgid "format_name" msgstr "nome_formato" -#: sql_help.c:1642 sql_help.c:1643 sql_help.c:1646 sql_help.c:3254 -#: sql_help.c:3255 sql_help.c:3256 sql_help.c:3257 sql_help.c:3258 -#: sql_help.c:3259 +#: sql_help.c:1646 sql_help.c:1647 sql_help.c:1650 sql_help.c:3262 +#: sql_help.c:3263 sql_help.c:3264 sql_help.c:3265 sql_help.c:3266 +#: sql_help.c:3267 msgid "boolean" msgstr "booleano" -#: sql_help.c:1644 +#: sql_help.c:1648 msgid "delimiter_character" msgstr "carattere_delimitatore" -#: sql_help.c:1645 +#: sql_help.c:1649 msgid "null_string" msgstr "stringa_nulla" -#: sql_help.c:1647 +#: sql_help.c:1651 msgid "quote_character" msgstr "carattere_virgolette" -#: sql_help.c:1648 +#: sql_help.c:1652 msgid "escape_character" msgstr "carattere_di_escape" -#: sql_help.c:1652 +#: sql_help.c:1656 msgid "encoding_name" msgstr "nome_codifica" -#: sql_help.c:1663 +#: sql_help.c:1667 msgid "access_method_type" msgstr "tipo_metodo_accesso" -#: sql_help.c:1729 sql_help.c:1748 sql_help.c:1751 +#: sql_help.c:1733 sql_help.c:1752 sql_help.c:1755 msgid "arg_data_type" msgstr "topo_dato_argomento" -#: sql_help.c:1730 sql_help.c:1752 sql_help.c:1760 +#: sql_help.c:1734 sql_help.c:1756 sql_help.c:1764 msgid "sfunc" msgstr "sfunz" -#: sql_help.c:1731 sql_help.c:1753 sql_help.c:1761 +#: sql_help.c:1735 sql_help.c:1757 sql_help.c:1765 msgid "state_data_type" msgstr "tipo_dato_stato" -#: sql_help.c:1732 sql_help.c:1754 sql_help.c:1762 +#: sql_help.c:1736 sql_help.c:1758 sql_help.c:1766 msgid "state_data_size" msgstr "dimensione_dato_stato" -#: sql_help.c:1733 sql_help.c:1755 sql_help.c:1763 +#: sql_help.c:1737 sql_help.c:1759 sql_help.c:1767 msgid "ffunc" msgstr "ffunz" -#: sql_help.c:1734 sql_help.c:1764 +#: sql_help.c:1738 sql_help.c:1768 msgid "combinefunc" msgstr "funz_combine" -#: sql_help.c:1735 sql_help.c:1765 +#: sql_help.c:1739 sql_help.c:1769 msgid "serialfunc" msgstr "funz_serial" -#: sql_help.c:1736 sql_help.c:1766 +#: sql_help.c:1740 sql_help.c:1770 msgid "deserialfunc" msgstr "funz_deserial" -#: sql_help.c:1737 sql_help.c:1756 sql_help.c:1767 +#: sql_help.c:1741 sql_help.c:1760 sql_help.c:1771 msgid "initial_condition" msgstr "condizione_iniziale" -#: sql_help.c:1738 sql_help.c:1768 +#: sql_help.c:1742 sql_help.c:1772 msgid "msfunc" msgstr "msfunz" -#: sql_help.c:1739 sql_help.c:1769 +#: sql_help.c:1743 sql_help.c:1773 msgid "minvfunc" msgstr "minvfunz" -#: sql_help.c:1740 sql_help.c:1770 +#: sql_help.c:1744 sql_help.c:1774 msgid "mstate_data_type" msgstr "tipo_dato_mstato" -#: sql_help.c:1741 sql_help.c:1771 +#: sql_help.c:1745 sql_help.c:1775 msgid "mstate_data_size" msgstr "tipo_dato_mstato" -#: sql_help.c:1742 sql_help.c:1772 +#: sql_help.c:1746 sql_help.c:1776 msgid "mffunc" msgstr "mffunz" -#: sql_help.c:1743 sql_help.c:1773 +#: sql_help.c:1747 sql_help.c:1777 msgid "minitial_condition" msgstr "condizione_minima" -#: sql_help.c:1744 sql_help.c:1774 +#: sql_help.c:1748 sql_help.c:1778 msgid "sort_operator" msgstr "operatore_di_ordinamento" -#: sql_help.c:1757 +#: sql_help.c:1761 msgid "or the old syntax" msgstr "o la vecchia sintassi" -#: sql_help.c:1759 +#: sql_help.c:1763 msgid "base_type" msgstr "tipo_base" -#: sql_help.c:1815 +#: sql_help.c:1819 msgid "locale" msgstr "locale" -#: sql_help.c:1816 sql_help.c:1854 +#: sql_help.c:1820 sql_help.c:1858 msgid "lc_collate" msgstr "lc_collate" -#: sql_help.c:1817 sql_help.c:1855 +#: sql_help.c:1821 sql_help.c:1859 msgid "lc_ctype" msgstr "lc_ctype" -#: sql_help.c:1818 sql_help.c:3798 +#: sql_help.c:1822 sql_help.c:3806 msgid "provider" msgstr "provider" -#: sql_help.c:1819 sql_help.c:1910 +#: sql_help.c:1823 sql_help.c:1914 msgid "version" msgstr "versione" -#: sql_help.c:1821 +#: sql_help.c:1825 msgid "existing_collation" msgstr "ordinamento_esistente" -#: sql_help.c:1831 +#: sql_help.c:1835 msgid "source_encoding" msgstr "codifica_origine" -#: sql_help.c:1832 +#: sql_help.c:1836 msgid "dest_encoding" msgstr "codifica_destinazione" -#: sql_help.c:1852 sql_help.c:2583 +#: sql_help.c:1856 sql_help.c:2591 msgid "template" msgstr "template" -#: sql_help.c:1853 +#: sql_help.c:1857 msgid "encoding" msgstr "codifica" -#: sql_help.c:1879 +#: sql_help.c:1883 msgid "constraint" msgstr "vincolo" -#: sql_help.c:1880 +#: sql_help.c:1884 msgid "where constraint is:" msgstr "dove vincolo di è:" -#: sql_help.c:1894 sql_help.c:2262 sql_help.c:2656 +#: sql_help.c:1898 sql_help.c:2266 sql_help.c:2664 msgid "event" msgstr "evento" -#: sql_help.c:1895 +#: sql_help.c:1899 msgid "filter_variable" msgstr "valiabile_filtro" -#: sql_help.c:1911 +#: sql_help.c:1915 msgid "old_version" msgstr "vecchia_versione" -#: sql_help.c:1984 sql_help.c:2484 +#: sql_help.c:1988 sql_help.c:2489 msgid "where column_constraint is:" msgstr "dove vincolo_di_colonna è:" -#: sql_help.c:1987 sql_help.c:2019 sql_help.c:2487 +#: sql_help.c:1991 sql_help.c:2023 sql_help.c:2492 msgid "default_expr" msgstr "expr_default" -#: sql_help.c:1988 sql_help.c:2495 +#: sql_help.c:1992 sql_help.c:2500 msgid "and table_constraint is:" msgstr "e vincolo_di_tabella è:" -#: sql_help.c:2020 +#: sql_help.c:2024 msgid "rettype" msgstr "tipo_ritorno" -#: sql_help.c:2022 +#: sql_help.c:2026 msgid "column_type" msgstr "tipo_colonna" -#: sql_help.c:2030 +#: sql_help.c:2034 msgid "definition" msgstr "definizione" -#: sql_help.c:2031 +#: sql_help.c:2035 msgid "obj_file" msgstr "file_obj" -#: sql_help.c:2032 +#: sql_help.c:2036 msgid "link_symbol" msgstr "simbolo_link" -#: sql_help.c:2033 +#: sql_help.c:2037 msgid "attribute" msgstr "attributo" -#: sql_help.c:2067 sql_help.c:2247 sql_help.c:2775 +#: sql_help.c:2071 sql_help.c:2251 sql_help.c:2783 msgid "uid" msgstr "uid" -#: sql_help.c:2081 +#: sql_help.c:2085 msgid "method" msgstr "metodo" -#: sql_help.c:2085 sql_help.c:2455 sql_help.c:2467 sql_help.c:2480 -#: sql_help.c:2524 sql_help.c:3459 +#: sql_help.c:2089 sql_help.c:2460 sql_help.c:2472 sql_help.c:2485 +#: sql_help.c:2532 sql_help.c:3467 msgid "opclass" msgstr "classe_op" -#: sql_help.c:2089 sql_help.c:2506 +#: sql_help.c:2093 sql_help.c:2511 msgid "predicate" msgstr "predicato" -#: sql_help.c:2101 +#: sql_help.c:2105 msgid "call_handler" msgstr "handler_chiamata" -#: sql_help.c:2102 +#: sql_help.c:2106 msgid "inline_handler" msgstr "handler_inline" -#: sql_help.c:2103 +#: sql_help.c:2107 msgid "valfunction" msgstr "funzione_valid" -#: sql_help.c:2139 +#: sql_help.c:2143 msgid "com_op" msgstr "com_op" -#: sql_help.c:2140 +#: sql_help.c:2144 msgid "neg_op" msgstr "neg_op" -#: sql_help.c:2158 +#: sql_help.c:2162 msgid "family_name" msgstr "nome_famiglia" -#: sql_help.c:2169 +#: sql_help.c:2173 msgid "storage_type" msgstr "tipo_memorizzazione" -#: sql_help.c:2264 sql_help.c:2660 sql_help.c:2837 sql_help.c:3469 -#: sql_help.c:3888 sql_help.c:3890 sql_help.c:3978 sql_help.c:3980 -#: sql_help.c:4127 sql_help.c:4129 sql_help.c:4232 sql_help.c:4321 -#: sql_help.c:4323 +#: sql_help.c:2268 sql_help.c:2668 sql_help.c:2845 sql_help.c:3477 +#: sql_help.c:3896 sql_help.c:3898 sql_help.c:3986 sql_help.c:3988 +#: sql_help.c:4135 sql_help.c:4137 sql_help.c:4240 sql_help.c:4329 +#: sql_help.c:4331 msgid "condition" msgstr "condizione" -#: sql_help.c:2268 sql_help.c:2663 +#: sql_help.c:2272 sql_help.c:2671 msgid "where event can be one of:" msgstr "dove evento può essere uno di:" -#: sql_help.c:2287 sql_help.c:2289 +#: sql_help.c:2291 sql_help.c:2293 msgid "schema_element" msgstr "elemento_di_schema" -#: sql_help.c:2326 +#: sql_help.c:2330 msgid "server_type" msgstr "tipo_di_server" -#: sql_help.c:2327 +#: sql_help.c:2331 msgid "server_version" msgstr "versione_server" -#: sql_help.c:2328 sql_help.c:3378 sql_help.c:3711 +#: sql_help.c:2332 sql_help.c:3386 sql_help.c:3719 msgid "fdw_name" msgstr "nome_fdw" -#: sql_help.c:2341 +#: sql_help.c:2345 msgid "statistics_name" msgstr "nome_statistica" -#: sql_help.c:2342 -msgid "statistic_type" +#: sql_help.c:2346 +msgid "statistics_kind" msgstr "tipo_statistica" -#: sql_help.c:2356 +#: sql_help.c:2360 msgid "subscription_name" msgstr "nome_sottoscrizione" -#: sql_help.c:2449 +#: sql_help.c:2454 msgid "source_table" msgstr "tabella_origine" -#: sql_help.c:2450 +#: sql_help.c:2455 msgid "like_option" msgstr "opzioni_di_like" -#: sql_help.c:2489 sql_help.c:2490 sql_help.c:2499 sql_help.c:2501 -#: sql_help.c:2505 +#: sql_help.c:2494 sql_help.c:2495 sql_help.c:2504 sql_help.c:2506 +#: sql_help.c:2510 msgid "index_parameters" msgstr "parametri_di_indice" -#: sql_help.c:2491 sql_help.c:2508 +#: sql_help.c:2496 sql_help.c:2513 msgid "reftable" msgstr "tabella_ref" -#: sql_help.c:2492 sql_help.c:2509 +#: sql_help.c:2497 sql_help.c:2514 msgid "refcolumn" msgstr "colonna_ref" -#: sql_help.c:2503 +#: sql_help.c:2508 msgid "exclude_element" msgstr "elemento_di_esclusione" -#: sql_help.c:2504 sql_help.c:3895 sql_help.c:3985 sql_help.c:4134 -#: sql_help.c:4263 sql_help.c:4328 +#: sql_help.c:2509 sql_help.c:3903 sql_help.c:3993 sql_help.c:4142 +#: sql_help.c:4271 sql_help.c:4336 msgid "operator" msgstr "operatore" -#: sql_help.c:2512 +#: sql_help.c:2517 msgid "and like_option is:" msgstr "e opzione_like è:" -#: sql_help.c:2513 +#: sql_help.c:2518 msgid "and partition_bound_spec is:" msgstr "e specifica_margine_partizione è:" -#: sql_help.c:2514 sql_help.c:2515 sql_help.c:2516 -msgid "bound_literal" -msgstr "letterale_limite" +#: sql_help.c:2519 sql_help.c:2521 sql_help.c:2523 +msgid "numeric_literal" +msgstr "letterale_numerico" -#: sql_help.c:2517 +#: sql_help.c:2520 sql_help.c:2522 sql_help.c:2524 +msgid "string_literal" +msgstr "letterale_stringa" + +#: sql_help.c:2525 msgid "index_parameters in UNIQUE, PRIMARY KEY, and EXCLUDE constraints are:" msgstr "parametri_di_indice nei vincoli UNIQUE, PRIMARY KEY e EXCLUDE sono:" -#: sql_help.c:2521 +#: sql_help.c:2529 msgid "exclude_element in an EXCLUDE constraint is:" msgstr "elemento_di_esclusione in un vincolo EXCLUDE è:" -#: sql_help.c:2556 +#: sql_help.c:2564 msgid "directory" msgstr "directory" -#: sql_help.c:2570 +#: sql_help.c:2578 msgid "parser_name" msgstr "nome_parser" -#: sql_help.c:2571 +#: sql_help.c:2579 msgid "source_config" msgstr "config_origine" -#: sql_help.c:2600 +#: sql_help.c:2608 msgid "start_function" msgstr "funzione_inizio" -#: sql_help.c:2601 +#: sql_help.c:2609 msgid "gettoken_function" msgstr "funzione_gettoken" -#: sql_help.c:2602 +#: sql_help.c:2610 msgid "end_function" msgstr "funzione_fine" -#: sql_help.c:2603 +#: sql_help.c:2611 msgid "lextypes_function" msgstr "funzione_lextypes" -#: sql_help.c:2604 +#: sql_help.c:2612 msgid "headline_function" msgstr "funzione_headline" -#: sql_help.c:2616 +#: sql_help.c:2624 msgid "init_function" msgstr "funzione_init" -#: sql_help.c:2617 +#: sql_help.c:2625 msgid "lexize_function" msgstr "funzione_lexize" -#: sql_help.c:2630 +#: sql_help.c:2638 msgid "from_sql_function_name" msgstr "nome_funzione_from_sql" -#: sql_help.c:2632 +#: sql_help.c:2640 msgid "to_sql_function_name" msgstr "nome_funzione_to_sql" -#: sql_help.c:2658 +#: sql_help.c:2666 msgid "referenced_table_name" msgstr "nome_tabella_referenziata" -#: sql_help.c:2659 +#: sql_help.c:2667 msgid "transition_relation_name" msgstr "nome_tabella_transizione" -#: sql_help.c:2662 +#: sql_help.c:2670 msgid "arguments" msgstr "argomenti" -#: sql_help.c:2712 sql_help.c:3823 +#: sql_help.c:2720 sql_help.c:3831 msgid "label" msgstr "etichetta" -#: sql_help.c:2714 +#: sql_help.c:2722 msgid "subtype" msgstr "sottotipo" -#: sql_help.c:2715 +#: sql_help.c:2723 msgid "subtype_operator_class" msgstr "classe_operatore_sottotipo" -#: sql_help.c:2717 +#: sql_help.c:2725 msgid "canonical_function" msgstr "funzione_canonica" -#: sql_help.c:2718 +#: sql_help.c:2726 msgid "subtype_diff_function" msgstr "funzione_diff_sottotipo" -#: sql_help.c:2720 +#: sql_help.c:2728 msgid "input_function" msgstr "funzione_input" -#: sql_help.c:2721 +#: sql_help.c:2729 msgid "output_function" msgstr "funzione_output" -#: sql_help.c:2722 +#: sql_help.c:2730 msgid "receive_function" msgstr "funzione_receive" -#: sql_help.c:2723 +#: sql_help.c:2731 msgid "send_function" msgstr "funzione_send" -#: sql_help.c:2724 +#: sql_help.c:2732 msgid "type_modifier_input_function" msgstr "funzione_input_modificatore_tipo" -#: sql_help.c:2725 +#: sql_help.c:2733 msgid "type_modifier_output_function" msgstr "funzione_output_modificatore_tipo" -#: sql_help.c:2726 +#: sql_help.c:2734 msgid "analyze_function" msgstr "funzione_analyze" -#: sql_help.c:2727 +#: sql_help.c:2735 msgid "internallength" msgstr "lunghezza_interna" -#: sql_help.c:2728 +#: sql_help.c:2736 msgid "alignment" msgstr "allineamento" -#: sql_help.c:2729 +#: sql_help.c:2737 msgid "storage" msgstr "memorizzazione" -#: sql_help.c:2730 +#: sql_help.c:2738 msgid "like_type" msgstr "tipo_like" -#: sql_help.c:2731 +#: sql_help.c:2739 msgid "category" msgstr "categoria" -#: sql_help.c:2732 +#: sql_help.c:2740 msgid "preferred" msgstr "preferito" -#: sql_help.c:2733 +#: sql_help.c:2741 msgid "default" msgstr "predefinito" -#: sql_help.c:2734 +#: sql_help.c:2742 msgid "element" msgstr "elemento" -#: sql_help.c:2735 +#: sql_help.c:2743 msgid "delimiter" msgstr "delimitatore" -#: sql_help.c:2736 +#: sql_help.c:2744 msgid "collatable" msgstr "ordinabile" -#: sql_help.c:2833 sql_help.c:3445 sql_help.c:3883 sql_help.c:3972 -#: sql_help.c:4122 sql_help.c:4222 sql_help.c:4316 +#: sql_help.c:2841 sql_help.c:3453 sql_help.c:3891 sql_help.c:3980 +#: sql_help.c:4130 sql_help.c:4230 sql_help.c:4324 msgid "with_query" msgstr "query_with" -#: sql_help.c:2835 sql_help.c:3447 sql_help.c:3902 sql_help.c:3908 -#: sql_help.c:3911 sql_help.c:3915 sql_help.c:3919 sql_help.c:3927 -#: sql_help.c:4141 sql_help.c:4147 sql_help.c:4150 sql_help.c:4154 -#: sql_help.c:4158 sql_help.c:4166 sql_help.c:4224 sql_help.c:4335 -#: sql_help.c:4341 sql_help.c:4344 sql_help.c:4348 sql_help.c:4352 -#: sql_help.c:4360 +#: sql_help.c:2843 sql_help.c:3455 sql_help.c:3910 sql_help.c:3916 +#: sql_help.c:3919 sql_help.c:3923 sql_help.c:3927 sql_help.c:3935 +#: sql_help.c:4149 sql_help.c:4155 sql_help.c:4158 sql_help.c:4162 +#: sql_help.c:4166 sql_help.c:4174 sql_help.c:4232 sql_help.c:4343 +#: sql_help.c:4349 sql_help.c:4352 sql_help.c:4356 sql_help.c:4360 +#: sql_help.c:4368 msgid "alias" msgstr "alias" -#: sql_help.c:2836 +#: sql_help.c:2844 msgid "using_list" msgstr "lista_using" -#: sql_help.c:2838 sql_help.c:3285 sql_help.c:3526 sql_help.c:4233 +#: sql_help.c:2846 sql_help.c:3293 sql_help.c:3534 sql_help.c:4241 msgid "cursor_name" msgstr "nome_cursore" -#: sql_help.c:2839 sql_help.c:3453 sql_help.c:4234 +#: sql_help.c:2847 sql_help.c:3461 sql_help.c:4242 msgid "output_expression" msgstr "espressione_output" -#: sql_help.c:2840 sql_help.c:3454 sql_help.c:3886 sql_help.c:3975 -#: sql_help.c:4125 sql_help.c:4235 sql_help.c:4319 +#: sql_help.c:2848 sql_help.c:3462 sql_help.c:3894 sql_help.c:3983 +#: sql_help.c:4133 sql_help.c:4243 sql_help.c:4327 msgid "output_name" msgstr "nome_output" -#: sql_help.c:2856 +#: sql_help.c:2864 msgid "code" msgstr "codice" -#: sql_help.c:3231 +#: sql_help.c:3239 msgid "parameter" msgstr "parametro" -#: sql_help.c:3251 sql_help.c:3252 sql_help.c:3551 +#: sql_help.c:3259 sql_help.c:3260 sql_help.c:3559 msgid "statement" msgstr "istruzione" -#: sql_help.c:3284 sql_help.c:3525 +#: sql_help.c:3292 sql_help.c:3533 msgid "direction" msgstr "direzione" -#: sql_help.c:3286 sql_help.c:3527 +#: sql_help.c:3294 sql_help.c:3535 msgid "where direction can be empty or one of:" msgstr "dove direzione può essere vuota o una di:" -#: sql_help.c:3287 sql_help.c:3288 sql_help.c:3289 sql_help.c:3290 -#: sql_help.c:3291 sql_help.c:3528 sql_help.c:3529 sql_help.c:3530 -#: sql_help.c:3531 sql_help.c:3532 sql_help.c:3896 sql_help.c:3898 -#: sql_help.c:3986 sql_help.c:3988 sql_help.c:4135 sql_help.c:4137 -#: sql_help.c:4264 sql_help.c:4266 sql_help.c:4329 sql_help.c:4331 +#: sql_help.c:3295 sql_help.c:3296 sql_help.c:3297 sql_help.c:3298 +#: sql_help.c:3299 sql_help.c:3536 sql_help.c:3537 sql_help.c:3538 +#: sql_help.c:3539 sql_help.c:3540 sql_help.c:3904 sql_help.c:3906 +#: sql_help.c:3994 sql_help.c:3996 sql_help.c:4143 sql_help.c:4145 +#: sql_help.c:4272 sql_help.c:4274 sql_help.c:4337 sql_help.c:4339 msgid "count" msgstr "conteggio" -#: sql_help.c:3371 sql_help.c:3704 +#: sql_help.c:3379 sql_help.c:3712 msgid "sequence_name" msgstr "nome_sequenza" -#: sql_help.c:3384 sql_help.c:3717 +#: sql_help.c:3392 sql_help.c:3725 msgid "arg_name" msgstr "nome_arg" -#: sql_help.c:3385 sql_help.c:3718 +#: sql_help.c:3393 sql_help.c:3726 msgid "arg_type" msgstr "tipo_arg" -#: sql_help.c:3390 sql_help.c:3723 +#: sql_help.c:3398 sql_help.c:3731 msgid "loid" msgstr "loid" -#: sql_help.c:3413 +#: sql_help.c:3421 msgid "remote_schema" msgstr "schema_remoto" -#: sql_help.c:3416 +#: sql_help.c:3424 msgid "local_schema" msgstr "schema_locale" -#: sql_help.c:3451 +#: sql_help.c:3459 msgid "conflict_target" msgstr "target_conflitto" -#: sql_help.c:3452 +#: sql_help.c:3460 msgid "conflict_action" msgstr "azione_conflitto" -#: sql_help.c:3455 +#: sql_help.c:3463 msgid "where conflict_target can be one of:" msgstr "dove target_conflitto può essere uno di:" -#: sql_help.c:3456 +#: sql_help.c:3464 msgid "index_column_name" msgstr "nome_colonna_indice" -#: sql_help.c:3457 +#: sql_help.c:3465 msgid "index_expression" msgstr "espressione_indice" -#: sql_help.c:3460 +#: sql_help.c:3468 msgid "index_predicate" msgstr "indice_predicato" -#: sql_help.c:3462 +#: sql_help.c:3470 msgid "and conflict_action is one of:" msgstr "e azione_conflitto è una di:" -#: sql_help.c:3468 sql_help.c:4230 +#: sql_help.c:3476 sql_help.c:4238 msgid "sub-SELECT" msgstr "sub-SELECT" -#: sql_help.c:3477 sql_help.c:3540 sql_help.c:4206 +#: sql_help.c:3485 sql_help.c:3548 sql_help.c:4214 msgid "channel" msgstr "canale" -#: sql_help.c:3499 +#: sql_help.c:3507 msgid "lockmode" msgstr "modalità_lock" -#: sql_help.c:3500 +#: sql_help.c:3508 msgid "where lockmode is one of:" msgstr "dove modalità_lock è una di:" -#: sql_help.c:3541 +#: sql_help.c:3549 msgid "payload" msgstr "payload" -#: sql_help.c:3568 +#: sql_help.c:3576 msgid "old_role" msgstr "vecchio_ruolo" -#: sql_help.c:3569 +#: sql_help.c:3577 msgid "new_role" msgstr "nuovo_ruolo" -#: sql_help.c:3594 sql_help.c:3755 sql_help.c:3763 +#: sql_help.c:3602 sql_help.c:3763 sql_help.c:3771 msgid "savepoint_name" msgstr "nome_punto_salvataggio" -#: sql_help.c:3887 sql_help.c:3929 sql_help.c:3931 sql_help.c:3977 -#: sql_help.c:4126 sql_help.c:4168 sql_help.c:4170 sql_help.c:4320 -#: sql_help.c:4362 sql_help.c:4364 +#: sql_help.c:3895 sql_help.c:3937 sql_help.c:3939 sql_help.c:3985 +#: sql_help.c:4134 sql_help.c:4176 sql_help.c:4178 sql_help.c:4328 +#: sql_help.c:4370 sql_help.c:4372 msgid "from_item" msgstr "elemento_from" -#: sql_help.c:3889 sql_help.c:3941 sql_help.c:4128 sql_help.c:4180 -#: sql_help.c:4322 sql_help.c:4374 +#: sql_help.c:3897 sql_help.c:3949 sql_help.c:4136 sql_help.c:4188 +#: sql_help.c:4330 sql_help.c:4382 msgid "grouping_element" msgstr "elemento_raggruppante" -#: sql_help.c:3891 sql_help.c:3981 sql_help.c:4130 sql_help.c:4324 +#: sql_help.c:3899 sql_help.c:3989 sql_help.c:4138 sql_help.c:4332 msgid "window_name" msgstr "nome_finestra" -#: sql_help.c:3892 sql_help.c:3982 sql_help.c:4131 sql_help.c:4325 +#: sql_help.c:3900 sql_help.c:3990 sql_help.c:4139 sql_help.c:4333 msgid "window_definition" msgstr "definizione_finestra" -#: sql_help.c:3893 sql_help.c:3907 sql_help.c:3945 sql_help.c:3983 -#: sql_help.c:4132 sql_help.c:4146 sql_help.c:4184 sql_help.c:4326 -#: sql_help.c:4340 sql_help.c:4378 +#: sql_help.c:3901 sql_help.c:3915 sql_help.c:3953 sql_help.c:3991 +#: sql_help.c:4140 sql_help.c:4154 sql_help.c:4192 sql_help.c:4334 +#: sql_help.c:4348 sql_help.c:4386 msgid "select" msgstr "select" -#: sql_help.c:3900 sql_help.c:4139 sql_help.c:4333 +#: sql_help.c:3908 sql_help.c:4147 sql_help.c:4341 msgid "where from_item can be one of:" msgstr "dove from_item può essere uno di:" -#: sql_help.c:3903 sql_help.c:3909 sql_help.c:3912 sql_help.c:3916 -#: sql_help.c:3928 sql_help.c:4142 sql_help.c:4148 sql_help.c:4151 -#: sql_help.c:4155 sql_help.c:4167 sql_help.c:4336 sql_help.c:4342 -#: sql_help.c:4345 sql_help.c:4349 sql_help.c:4361 +#: sql_help.c:3911 sql_help.c:3917 sql_help.c:3920 sql_help.c:3924 +#: sql_help.c:3936 sql_help.c:4150 sql_help.c:4156 sql_help.c:4159 +#: sql_help.c:4163 sql_help.c:4175 sql_help.c:4344 sql_help.c:4350 +#: sql_help.c:4353 sql_help.c:4357 sql_help.c:4369 msgid "column_alias" msgstr "alias_colonna" -#: sql_help.c:3904 sql_help.c:4143 sql_help.c:4337 +#: sql_help.c:3912 sql_help.c:4151 sql_help.c:4345 msgid "sampling_method" msgstr "metodo_di_campionamento" -#: sql_help.c:3905 sql_help.c:3914 sql_help.c:3918 sql_help.c:3922 -#: sql_help.c:3925 sql_help.c:4144 sql_help.c:4153 sql_help.c:4157 -#: sql_help.c:4161 sql_help.c:4164 sql_help.c:4338 sql_help.c:4347 -#: sql_help.c:4351 sql_help.c:4355 sql_help.c:4358 +#: sql_help.c:3913 sql_help.c:3922 sql_help.c:3926 sql_help.c:3930 +#: sql_help.c:3933 sql_help.c:4152 sql_help.c:4161 sql_help.c:4165 +#: sql_help.c:4169 sql_help.c:4172 sql_help.c:4346 sql_help.c:4355 +#: sql_help.c:4359 sql_help.c:4363 sql_help.c:4366 msgid "argument" msgstr "argomento" -#: sql_help.c:3906 sql_help.c:4145 sql_help.c:4339 +#: sql_help.c:3914 sql_help.c:4153 sql_help.c:4347 msgid "seed" msgstr "seme" -#: sql_help.c:3910 sql_help.c:3943 sql_help.c:4149 sql_help.c:4182 -#: sql_help.c:4343 sql_help.c:4376 +#: sql_help.c:3918 sql_help.c:3951 sql_help.c:4157 sql_help.c:4190 +#: sql_help.c:4351 sql_help.c:4384 msgid "with_query_name" msgstr "nome_query_with" -#: sql_help.c:3920 sql_help.c:3923 sql_help.c:3926 sql_help.c:4159 -#: sql_help.c:4162 sql_help.c:4165 sql_help.c:4353 sql_help.c:4356 -#: sql_help.c:4359 +#: sql_help.c:3928 sql_help.c:3931 sql_help.c:3934 sql_help.c:4167 +#: sql_help.c:4170 sql_help.c:4173 sql_help.c:4361 sql_help.c:4364 +#: sql_help.c:4367 msgid "column_definition" msgstr "definizione_colonna" -#: sql_help.c:3930 sql_help.c:4169 sql_help.c:4363 +#: sql_help.c:3938 sql_help.c:4177 sql_help.c:4371 msgid "join_type" msgstr "tipo_join" -#: sql_help.c:3932 sql_help.c:4171 sql_help.c:4365 +#: sql_help.c:3940 sql_help.c:4179 sql_help.c:4373 msgid "join_condition" msgstr "condizione_join" -#: sql_help.c:3933 sql_help.c:4172 sql_help.c:4366 +#: sql_help.c:3941 sql_help.c:4180 sql_help.c:4374 msgid "join_column" msgstr "colonna_join" -#: sql_help.c:3934 sql_help.c:4173 sql_help.c:4367 +#: sql_help.c:3942 sql_help.c:4181 sql_help.c:4375 msgid "and grouping_element can be one of:" msgstr "e elemento_raggruppante può essere uno di:" -#: sql_help.c:3942 sql_help.c:4181 sql_help.c:4375 +#: sql_help.c:3950 sql_help.c:4189 sql_help.c:4383 msgid "and with_query is:" msgstr "e with_query è:" -#: sql_help.c:3946 sql_help.c:4185 sql_help.c:4379 +#: sql_help.c:3954 sql_help.c:4193 sql_help.c:4387 msgid "values" msgstr "valori" -#: sql_help.c:3947 sql_help.c:4186 sql_help.c:4380 +#: sql_help.c:3955 sql_help.c:4194 sql_help.c:4388 msgid "insert" msgstr "insert" -#: sql_help.c:3948 sql_help.c:4187 sql_help.c:4381 +#: sql_help.c:3956 sql_help.c:4195 sql_help.c:4389 msgid "update" msgstr "update" -#: sql_help.c:3949 sql_help.c:4188 sql_help.c:4382 +#: sql_help.c:3957 sql_help.c:4196 sql_help.c:4390 msgid "delete" msgstr "delete" -#: sql_help.c:3976 +#: sql_help.c:3984 msgid "new_table" msgstr "nuova_tabella" -#: sql_help.c:4001 +#: sql_help.c:4009 msgid "timezone" msgstr "timezone" -#: sql_help.c:4046 +#: sql_help.c:4054 msgid "snapshot_id" msgstr "id_snapshot" -#: sql_help.c:4231 +#: sql_help.c:4239 msgid "from_list" msgstr "lista_from" -#: sql_help.c:4262 +#: sql_help.c:4270 msgid "sort_expression" msgstr "espressione_ordinamento" -#: sql_help.c:4389 sql_help.c:5174 +#: sql_help.c:4397 sql_help.c:5182 msgid "abort the current transaction" msgstr "annulla la transazione corrente" -#: sql_help.c:4394 +#: sql_help.c:4402 msgid "change the definition of an aggregate function" msgstr "cambia la definizione di una funzione di aggregazione" -#: sql_help.c:4399 +#: sql_help.c:4407 msgid "change the definition of a collation" msgstr "cambia la definizione di un ordinamento" -#: sql_help.c:4404 +#: sql_help.c:4412 msgid "change the definition of a conversion" msgstr "cambia la definizione di una conversione" -#: sql_help.c:4409 +#: sql_help.c:4417 msgid "change a database" msgstr "cambia un database" -#: sql_help.c:4414 +#: sql_help.c:4422 msgid "define default access privileges" msgstr "definisci i privilegi di accesso di default" -#: sql_help.c:4419 +#: sql_help.c:4427 msgid "change the definition of a domain" msgstr "cambia la definizione di un dominio" -#: sql_help.c:4424 +#: sql_help.c:4432 msgid "change the definition of an event trigger" msgstr "cambia la definizione di un trigger di evento" -#: sql_help.c:4429 +#: sql_help.c:4437 msgid "change the definition of an extension" msgstr "cambia la definizione di una estensione" -#: sql_help.c:4434 +#: sql_help.c:4442 msgid "change the definition of a foreign-data wrapper" msgstr "cambia la definizione di un wrapper di dati esterni" -#: sql_help.c:4439 +#: sql_help.c:4447 msgid "change the definition of a foreign table" msgstr "cambia la definizione di una tabella esterna" -#: sql_help.c:4444 +#: sql_help.c:4452 msgid "change the definition of a function" msgstr "cambia la definizione di una funzione" -#: sql_help.c:4449 +#: sql_help.c:4457 msgid "change role name or membership" msgstr "cambia il nome del ruolo o l'appartenenza" -#: sql_help.c:4454 +#: sql_help.c:4462 msgid "change the definition of an index" msgstr "cambia la definizione di un indice" -#: sql_help.c:4459 +#: sql_help.c:4467 msgid "change the definition of a procedural language" msgstr "cambia la definizione di un linguaggio procedurale" -#: sql_help.c:4464 +#: sql_help.c:4472 msgid "change the definition of a large object" msgstr "cambia la definizione di un large object" -#: sql_help.c:4469 +#: sql_help.c:4477 msgid "change the definition of a materialized view" msgstr "cambia la definizione di una vista materializzata" -#: sql_help.c:4474 +#: sql_help.c:4482 msgid "change the definition of an operator" msgstr "cambia la definizione di un operatore" -#: sql_help.c:4479 +#: sql_help.c:4487 msgid "change the definition of an operator class" msgstr "cambia la definizione di una classe di operatori" -#: sql_help.c:4484 +#: sql_help.c:4492 msgid "change the definition of an operator family" msgstr "cambia la definizione di una famiglia di operatori" -#: sql_help.c:4489 +#: sql_help.c:4497 msgid "change the definition of a row level security policy" msgstr "cambia la definizione di una regola di sicurezza per riga" -#: sql_help.c:4494 +#: sql_help.c:4502 msgid "change the definition of a publication" msgstr "cambia la definizione di una pubblicazione" -#: sql_help.c:4499 sql_help.c:4579 +#: sql_help.c:4507 sql_help.c:4587 msgid "change a database role" msgstr "cambia un ruolo di database" -#: sql_help.c:4504 +#: sql_help.c:4512 msgid "change the definition of a rule" msgstr "cambia la definizione di una regola" -#: sql_help.c:4509 +#: sql_help.c:4517 msgid "change the definition of a schema" msgstr "cambia la definizione di uno schema" -#: sql_help.c:4514 +#: sql_help.c:4522 msgid "change the definition of a sequence generator" msgstr "cambia la definizione di un generatore di sequenza" -#: sql_help.c:4519 +#: sql_help.c:4527 msgid "change the definition of a foreign server" msgstr "cambia la definizione di un server esterno" -#: sql_help.c:4524 +#: sql_help.c:4532 msgid "change the definition of an extended statistics object" msgstr "cambia la definizione di una statistica estesa" -#: sql_help.c:4529 +#: sql_help.c:4537 msgid "change the definition of a subscription" msgstr "cambia la definizione di una sottoscrizione" -#: sql_help.c:4534 +#: sql_help.c:4542 msgid "change a server configuration parameter" msgstr "cambia un parametro di configurazione del server" -#: sql_help.c:4539 +#: sql_help.c:4547 msgid "change the definition of a table" msgstr "cambia la definizione di una tabella" -#: sql_help.c:4544 +#: sql_help.c:4552 msgid "change the definition of a tablespace" msgstr "cambia la definizione di un tablespace" -#: sql_help.c:4549 +#: sql_help.c:4557 msgid "change the definition of a text search configuration" msgstr "cambia la definizione di una configurazione di ricerca testo" -#: sql_help.c:4554 +#: sql_help.c:4562 msgid "change the definition of a text search dictionary" msgstr "cambia la definizione di un dizionario di ricerca testo" -#: sql_help.c:4559 +#: sql_help.c:4567 msgid "change the definition of a text search parser" msgstr "cambia la definizione di un analizzatore di ricerca testo" -#: sql_help.c:4564 +#: sql_help.c:4572 msgid "change the definition of a text search template" msgstr "cambia la definizione di un modello di ricerca testo" -#: sql_help.c:4569 +#: sql_help.c:4577 msgid "change the definition of a trigger" msgstr "cambia la definizione di un trigger" -#: sql_help.c:4574 +#: sql_help.c:4582 msgid "change the definition of a type" msgstr "cambia la definizione di un tipo di dato" -#: sql_help.c:4584 +#: sql_help.c:4592 msgid "change the definition of a user mapping" msgstr "cambia la definizione di una mappatura degli" -#: sql_help.c:4589 +#: sql_help.c:4597 msgid "change the definition of a view" msgstr "cambia la definizione di una vista" -#: sql_help.c:4594 +#: sql_help.c:4602 msgid "collect statistics about a database" msgstr "raccogli statistiche sul database" -#: sql_help.c:4599 sql_help.c:5239 +#: sql_help.c:4607 sql_help.c:5247 msgid "start a transaction block" msgstr "avvia un blocco di transazione" -#: sql_help.c:4604 +#: sql_help.c:4612 msgid "force a write-ahead log checkpoint" msgstr "forza un checkpoint del write-ahead log" -#: sql_help.c:4609 +#: sql_help.c:4617 msgid "close a cursor" msgstr "chiudi un cursore" -#: sql_help.c:4614 +#: sql_help.c:4622 msgid "cluster a table according to an index" msgstr "raggruppa una tabella in base ad un indice" -#: sql_help.c:4619 +#: sql_help.c:4627 msgid "define or change the comment of an object" msgstr "definisci o modifica il commento di un oggetto" -#: sql_help.c:4624 sql_help.c:5074 +#: sql_help.c:4632 sql_help.c:5082 msgid "commit the current transaction" msgstr "rendi persistente la transazione corrente" -#: sql_help.c:4629 +#: sql_help.c:4637 msgid "commit a transaction that was earlier prepared for two-phase commit" msgstr "concludi transazione che è stata precedentemente preparata per un commit a due fasi" -#: sql_help.c:4634 +#: sql_help.c:4642 msgid "copy data between a file and a table" msgstr "copia i dati tra un file ed una tabella" -#: sql_help.c:4639 +#: sql_help.c:4647 msgid "define a new access method" msgstr "definisci un nuovo metodo di accesso" -#: sql_help.c:4644 +#: sql_help.c:4652 msgid "define a new aggregate function" msgstr "definisci una nuova funzione aggregata" -#: sql_help.c:4649 +#: sql_help.c:4657 msgid "define a new cast" msgstr "definisci una nuova conversione di tipi" -#: sql_help.c:4654 +#: sql_help.c:4662 msgid "define a new collation" msgstr "definisci un nuovo ordinamento" -#: sql_help.c:4659 +#: sql_help.c:4667 msgid "define a new encoding conversion" msgstr "definisci una nuova conversione di codifica" -#: sql_help.c:4664 +#: sql_help.c:4672 msgid "create a new database" msgstr "crea un nuovo database" -#: sql_help.c:4669 +#: sql_help.c:4677 msgid "define a new domain" msgstr "definisci un nuovo dominio" -#: sql_help.c:4674 +#: sql_help.c:4682 msgid "define a new event trigger" msgstr "definisci un nuovo trigger di evento" -#: sql_help.c:4679 +#: sql_help.c:4687 msgid "install an extension" msgstr "installa un'estensione" -#: sql_help.c:4684 +#: sql_help.c:4692 msgid "define a new foreign-data wrapper" msgstr "definisci un nuovo wrapper di dati esterni" -#: sql_help.c:4689 +#: sql_help.c:4697 msgid "define a new foreign table" msgstr "definisci una nuova tabella esterna" -#: sql_help.c:4694 +#: sql_help.c:4702 msgid "define a new function" msgstr "definisci una nuova funzione" -#: sql_help.c:4699 sql_help.c:4744 sql_help.c:4829 +#: sql_help.c:4707 sql_help.c:4752 sql_help.c:4837 msgid "define a new database role" msgstr "definisci un nuovo ruolo database" -#: sql_help.c:4704 +#: sql_help.c:4712 msgid "define a new index" msgstr "crea un nuovo indice" -#: sql_help.c:4709 +#: sql_help.c:4717 msgid "define a new procedural language" msgstr "definisci un nuovo linguaggio procedurale" -#: sql_help.c:4714 +#: sql_help.c:4722 msgid "define a new materialized view" msgstr "definisci una nuova vista materializzata" -#: sql_help.c:4719 +#: sql_help.c:4727 msgid "define a new operator" msgstr "definisci un nuovo operatore" -#: sql_help.c:4724 +#: sql_help.c:4732 msgid "define a new operator class" msgstr "definisci una nuova classe di operatori" -#: sql_help.c:4729 +#: sql_help.c:4737 msgid "define a new operator family" msgstr "definisci una nuova famiglia operatore" -#: sql_help.c:4734 +#: sql_help.c:4742 msgid "define a new row level security policy for a table" msgstr "definisci una nuova regola di sicurezza per riga per una tabella" -#: sql_help.c:4739 +#: sql_help.c:4747 msgid "define a new publication" msgstr "definisci una nuova pubblicazione" -#: sql_help.c:4749 +#: sql_help.c:4757 msgid "define a new rewrite rule" msgstr "definisci una nuova regola di riscrittura" -#: sql_help.c:4754 +#: sql_help.c:4762 msgid "define a new schema" msgstr "crea un nuovo schema" -#: sql_help.c:4759 +#: sql_help.c:4767 msgid "define a new sequence generator" msgstr "definisci un nuovo generatore di sequenze" -#: sql_help.c:4764 +#: sql_help.c:4772 msgid "define a new foreign server" msgstr "definisci un nuovo server esterno" -#: sql_help.c:4769 +#: sql_help.c:4777 msgid "define extended statistics" msgstr "definisci una statistica estesa" -#: sql_help.c:4774 +#: sql_help.c:4782 msgid "define a new subscription" msgstr "definisci una nuova sottoscrizione" -#: sql_help.c:4779 +#: sql_help.c:4787 msgid "define a new table" msgstr "crea una nuova tabella" -#: sql_help.c:4784 sql_help.c:5204 +#: sql_help.c:4792 sql_help.c:5212 msgid "define a new table from the results of a query" msgstr "crea una nuova tabella dai risultati di una query" -#: sql_help.c:4789 +#: sql_help.c:4797 msgid "define a new tablespace" msgstr "crea un nuovo tablespace" -#: sql_help.c:4794 +#: sql_help.c:4802 msgid "define a new text search configuration" msgstr "definisci una nuova configurazione di ricerca testo" -#: sql_help.c:4799 +#: sql_help.c:4807 msgid "define a new text search dictionary" msgstr "definisci un nuovo dizionario di ricerca testo" -#: sql_help.c:4804 +#: sql_help.c:4812 msgid "define a new text search parser" msgstr "definisci un nuovo analizzatore di ricerca testo" -#: sql_help.c:4809 +#: sql_help.c:4817 msgid "define a new text search template" msgstr "definisci un nuovo modello di ricerca testo" -#: sql_help.c:4814 +#: sql_help.c:4822 msgid "define a new transform" msgstr "definisci una nuova trasformazione" -#: sql_help.c:4819 +#: sql_help.c:4827 msgid "define a new trigger" msgstr "definisci un nuovo trigger" -#: sql_help.c:4824 +#: sql_help.c:4832 msgid "define a new data type" msgstr "definisci un nuovo tipo di dato" -#: sql_help.c:4834 +#: sql_help.c:4842 msgid "define a new mapping of a user to a foreign server" msgstr "definisci una nuova mappatura di un utente ad un server esterno" -#: sql_help.c:4839 +#: sql_help.c:4847 msgid "define a new view" msgstr "definisci una nuova vista" -#: sql_help.c:4844 +#: sql_help.c:4852 msgid "deallocate a prepared statement" msgstr "dealloca una istruzione preparata" -#: sql_help.c:4849 +#: sql_help.c:4857 msgid "define a cursor" msgstr "definisci un cursore" -#: sql_help.c:4854 +#: sql_help.c:4862 msgid "delete rows of a table" msgstr "elimina le righe di una tabella" -#: sql_help.c:4859 +#: sql_help.c:4867 msgid "discard session state" msgstr "cancella lo stato della sessione" -#: sql_help.c:4864 +#: sql_help.c:4872 msgid "execute an anonymous code block" msgstr "esegui un blocco di codice anonimo" -#: sql_help.c:4869 +#: sql_help.c:4877 msgid "remove an access method" msgstr "rimuovi un metodo di accesso" -#: sql_help.c:4874 +#: sql_help.c:4882 msgid "remove an aggregate function" msgstr "elimina una funzione aggregata" -#: sql_help.c:4879 +#: sql_help.c:4887 msgid "remove a cast" msgstr "elimina una conversione di tipi" -#: sql_help.c:4884 +#: sql_help.c:4892 msgid "remove a collation" msgstr "elimina un ordinamento" -#: sql_help.c:4889 +#: sql_help.c:4897 msgid "remove a conversion" msgstr "elimina una conversione" -#: sql_help.c:4894 +#: sql_help.c:4902 msgid "remove a database" msgstr "elimina un database" -#: sql_help.c:4899 +#: sql_help.c:4907 msgid "remove a domain" msgstr "elimina un dominio" -#: sql_help.c:4904 +#: sql_help.c:4912 msgid "remove an event trigger" msgstr "elimina un trigger di evento" -#: sql_help.c:4909 +#: sql_help.c:4917 msgid "remove an extension" msgstr "elimina una estensione" -#: sql_help.c:4914 +#: sql_help.c:4922 msgid "remove a foreign-data wrapper" msgstr "elimina un wrapper di dati esterni" -#: sql_help.c:4919 +#: sql_help.c:4927 msgid "remove a foreign table" msgstr "elimina una tabella esterna" -#: sql_help.c:4924 +#: sql_help.c:4932 msgid "remove a function" msgstr "elimina una funzione" -#: sql_help.c:4929 sql_help.c:4979 sql_help.c:5059 +#: sql_help.c:4937 sql_help.c:4987 sql_help.c:5067 msgid "remove a database role" msgstr "elimina un ruolo di database" -#: sql_help.c:4934 +#: sql_help.c:4942 msgid "remove an index" msgstr "elimina un indice" -#: sql_help.c:4939 +#: sql_help.c:4947 msgid "remove a procedural language" msgstr "elimina un linguaggio procedurale" -#: sql_help.c:4944 +#: sql_help.c:4952 msgid "remove a materialized view" msgstr "elimina una vista materializzata" -#: sql_help.c:4949 +#: sql_help.c:4957 msgid "remove an operator" msgstr "elimina un operatore" -#: sql_help.c:4954 +#: sql_help.c:4962 msgid "remove an operator class" msgstr "elimina una classe di operatori" -#: sql_help.c:4959 +#: sql_help.c:4967 msgid "remove an operator family" msgstr "elimina una famiglia operatore" -#: sql_help.c:4964 +#: sql_help.c:4972 msgid "remove database objects owned by a database role" msgstr "elimina gli oggetti database di proprietà di un ruolo di database" -#: sql_help.c:4969 +#: sql_help.c:4977 msgid "remove a row level security policy from a table" msgstr "rimuovi una regola di sicurezza per riga da una tabella" -#: sql_help.c:4974 +#: sql_help.c:4982 msgid "remove a publication" msgstr "rimuovi una pubblicazione" -#: sql_help.c:4984 +#: sql_help.c:4992 msgid "remove a rewrite rule" msgstr "elimina una regola di riscrittura" -#: sql_help.c:4989 +#: sql_help.c:4997 msgid "remove a schema" msgstr "elimina uno schema" -#: sql_help.c:4994 +#: sql_help.c:5002 msgid "remove a sequence" msgstr "elimina una sequenza" -#: sql_help.c:4999 +#: sql_help.c:5007 msgid "remove a foreign server descriptor" msgstr "elimina una descrizione server esterno" -#: sql_help.c:5004 +#: sql_help.c:5012 msgid "remove extended statistics" msgstr "rimuovi una statistica estesa" -#: sql_help.c:5009 +#: sql_help.c:5017 msgid "remove a subscription" msgstr "rimuovi una sottoscrizione" -#: sql_help.c:5014 +#: sql_help.c:5022 msgid "remove a table" msgstr "elimina una tabella" -#: sql_help.c:5019 +#: sql_help.c:5027 msgid "remove a tablespace" msgstr "elimina un tablespace" -#: sql_help.c:5024 +#: sql_help.c:5032 msgid "remove a text search configuration" msgstr "elimina una configurazione di ricerca testo" -#: sql_help.c:5029 +#: sql_help.c:5037 msgid "remove a text search dictionary" msgstr "elimina un dizionario di ricerca testo" -#: sql_help.c:5034 +#: sql_help.c:5042 msgid "remove a text search parser" msgstr "elimina un analizzatore di ricerca testo" -#: sql_help.c:5039 +#: sql_help.c:5047 msgid "remove a text search template" msgstr "elimina un modello di ricerca testo" -#: sql_help.c:5044 +#: sql_help.c:5052 msgid "remove a transform" msgstr "elimina una trasformazione" -#: sql_help.c:5049 +#: sql_help.c:5057 msgid "remove a trigger" msgstr "elimina un trigger" -#: sql_help.c:5054 +#: sql_help.c:5062 msgid "remove a data type" msgstr "elimina un tipo di dato" -#: sql_help.c:5064 +#: sql_help.c:5072 msgid "remove a user mapping for a foreign server" msgstr "elimina la mappatura degli utenti per un server esterno" -#: sql_help.c:5069 +#: sql_help.c:5077 msgid "remove a view" msgstr "elimina una vista" -#: sql_help.c:5079 +#: sql_help.c:5087 msgid "execute a prepared statement" msgstr "esegui una istruzione preparata" -#: sql_help.c:5084 +#: sql_help.c:5092 msgid "show the execution plan of a statement" msgstr "mostra il piano di esecuzione di una istruzione" -#: sql_help.c:5089 +#: sql_help.c:5097 msgid "retrieve rows from a query using a cursor" msgstr "estrai delle righe da una query utilizzando un cursore" -#: sql_help.c:5094 +#: sql_help.c:5102 msgid "define access privileges" msgstr "definisci i privilegi di accesso" -#: sql_help.c:5099 +#: sql_help.c:5107 msgid "import table definitions from a foreign server" msgstr "importa le definizioni di tabella da un server remoto" -#: sql_help.c:5104 +#: sql_help.c:5112 msgid "create new rows in a table" msgstr "crea nuove righe in una tabella" -#: sql_help.c:5109 +#: sql_help.c:5117 msgid "listen for a notification" msgstr "attendi l'arrivo di notifiche" -#: sql_help.c:5114 +#: sql_help.c:5122 msgid "load a shared library file" msgstr "carica un file di libreria condivisa" -#: sql_help.c:5119 +#: sql_help.c:5127 msgid "lock a table" msgstr "blocca una tabella" -#: sql_help.c:5124 +#: sql_help.c:5132 msgid "position a cursor" msgstr "posiziona un cursore" -#: sql_help.c:5129 +#: sql_help.c:5137 msgid "generate a notification" msgstr "genera una notifica" -#: sql_help.c:5134 +#: sql_help.c:5142 msgid "prepare a statement for execution" msgstr "prepara una istruzione per l'esecuzione" -#: sql_help.c:5139 +#: sql_help.c:5147 msgid "prepare the current transaction for two-phase commit" msgstr "prepara la transazione corrente per un commit a due fasi" -#: sql_help.c:5144 +#: sql_help.c:5152 msgid "change the ownership of database objects owned by a database role" msgstr "cambia il proprietario degli oggetti del database posseduti da un ruolo" -#: sql_help.c:5149 +#: sql_help.c:5157 msgid "replace the contents of a materialized view" msgstr "sostituisci il contenuto di una vista materializzata" -#: sql_help.c:5154 +#: sql_help.c:5162 msgid "rebuild indexes" msgstr "ricostruisci indici" -#: sql_help.c:5159 +#: sql_help.c:5167 msgid "destroy a previously defined savepoint" msgstr "distruggi un punto di salvataggio precedentemente definito" -#: sql_help.c:5164 +#: sql_help.c:5172 msgid "restore the value of a run-time parameter to the default value" msgstr "ripristina un parametro di esecuzione al suo valore di predefinito" -#: sql_help.c:5169 +#: sql_help.c:5177 msgid "remove access privileges" msgstr "elimina i privilegi di accesso" -#: sql_help.c:5179 +#: sql_help.c:5187 msgid "cancel a transaction that was earlier prepared for two-phase commit" msgstr "annulla una transazione che era stata preparata per un commit a due fasi" -#: sql_help.c:5184 +#: sql_help.c:5192 msgid "roll back to a savepoint" msgstr "annulla le modifiche fino a un punto di salvataggio" -#: sql_help.c:5189 +#: sql_help.c:5197 msgid "define a new savepoint within the current transaction" msgstr "definisci un nuovo punto di salvataggio per la transazione corrente" -#: sql_help.c:5194 +#: sql_help.c:5202 msgid "define or change a security label applied to an object" msgstr "definisci o modifica un'etichetta di sicurezza applicata a un oggetto" -#: sql_help.c:5199 sql_help.c:5244 sql_help.c:5274 +#: sql_help.c:5207 sql_help.c:5252 sql_help.c:5282 msgid "retrieve rows from a table or view" msgstr "estrai righe da una tabella o una vista" -#: sql_help.c:5209 +#: sql_help.c:5217 msgid "change a run-time parameter" msgstr "modifica un parametro di esecuzione" -#: sql_help.c:5214 +#: sql_help.c:5222 msgid "set constraint check timing for the current transaction" msgstr "imposta il momento del controllo dei vincoli per la transazione corrente" -#: sql_help.c:5219 +#: sql_help.c:5227 msgid "set the current user identifier of the current session" msgstr "imposta l'identificativo utente della sessione corrente" -#: sql_help.c:5224 +#: sql_help.c:5232 msgid "set the session user identifier and the current user identifier of the current session" msgstr "imposta l'identificazione utente della sessione e l'identificazione utente corrente della sessione corrente" -#: sql_help.c:5229 +#: sql_help.c:5237 msgid "set the characteristics of the current transaction" msgstr "imposta le caratteristiche della transazione corrente" -#: sql_help.c:5234 +#: sql_help.c:5242 msgid "show the value of a run-time parameter" msgstr "mostra il valore di un parametro di esecuzione" -#: sql_help.c:5249 +#: sql_help.c:5257 msgid "empty a table or set of tables" msgstr "svuota una tabella o una lista di tabelle" -#: sql_help.c:5254 +#: sql_help.c:5262 msgid "stop listening for a notification" msgstr "termina l'attesa di notifiche" -#: sql_help.c:5259 +#: sql_help.c:5267 msgid "update rows of a table" msgstr "modifica le righe di una tabella" -#: sql_help.c:5264 +#: sql_help.c:5272 msgid "garbage-collect and optionally analyze a database" msgstr "pulisci ed eventualmente analizza il database" -#: sql_help.c:5269 +#: sql_help.c:5277 msgid "compute a set of rows" msgstr "genera una sequenza di righe" -#: startup.c:184 +#: startup.c:187 #, c-format msgid "%s: -1 can only be used in non-interactive mode\n" msgstr "%s: -1 può essere usato solo in modalità non interattiva\n" -#: startup.c:287 +#: startup.c:290 #, c-format msgid "%s: could not open log file \"%s\": %s\n" msgstr "%s: apertura del file di log \"%s\" fallita: %s\n" -#: startup.c:394 +#: startup.c:397 #, c-format msgid "" "Type \"help\" for help.\n" @@ -5627,27 +5710,27 @@ msgstr "" "Digita \"help\" per avere un aiuto.\n" "\n" -#: startup.c:543 +#: startup.c:546 #, c-format msgid "%s: could not set printing parameter \"%s\"\n" msgstr "%s: impostazione del parametro di stampa \"%s\" fallito\n" -#: startup.c:645 +#: startup.c:648 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Prova \"%s --help\" per maggiori informazioni.\n" -#: startup.c:662 +#: startup.c:665 #, c-format msgid "%s: warning: extra command-line argument \"%s\" ignored\n" msgstr "%s: attenzione: parametro in eccesso \"%s\" nella riga di comando ignorato\n" -#: startup.c:711 +#: startup.c:714 #, c-format msgid "%s: could not find own program executable\n" msgstr "%s: il proprio programma eseguibile non è stato trovato\n" -#: tab-complete.c:4182 +#: tab-complete.c:4186 #, c-format msgid "" "tab completion query failed: %s\n" @@ -5660,7 +5743,7 @@ msgstr "" #: variables.c:139 #, c-format -msgid "unrecognized value \"%s\" for \"%s\": boolean expected\n" +msgid "unrecognized value \"%s\" for \"%s\": Boolean expected\n" msgstr "valore \"%s\" non valido per \"%s\": è necessario un booleano\n" #: variables.c:176 diff --git a/src/bin/psql/po/ja.po b/src/bin/psql/po/ja.po index 309fbbdf7a..34bd8a4e25 100644 --- a/src/bin/psql/po/ja.po +++ b/src/bin/psql/po/ja.po @@ -1,1681 +1,2080 @@ -# translation of psql. -# HOTTA Michihide , 2010. +# LANGUAGE message translation file for psql +# Copyright (C) 2018 PostgreSQL Global Development Group +# This file is distributed under the same license as the psql (PostgreSQL) package. +# Michihide Hotta , 2010. # msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 9.4.2\n" +"Project-Id-Version: psql (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2015-05-23 22:59+0900\n" -"PO-Revision-Date: 2015-05-27 18:55+0900\n" -"Last-Translator: KOIZUMI Satoru \n" -"Language-Team: jpug-doc \n" +"POT-Creation-Date: 2018-01-23 23:44+0000\n" +"PO-Revision-Date: 2018-02-13 09:40+0900\n" "Language: ja\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=2; plural=n != 1;\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"Last-Translator: Michihide Hotta \n" +"Language-Team: \n" +"X-Generator: Poedit 2.0.6\n" #: ../../common/exec.c:127 ../../common/exec.c:241 ../../common/exec.c:284 #, c-format msgid "could not identify current directory: %s" -msgstr "カレントディレクトリを識別できませんでした。: %s" +msgstr "現在のディレクトリを識別できませんでした: %s" #: ../../common/exec.c:146 #, c-format msgid "invalid binary \"%s\"" -msgstr "\"%s\" は有効なバイナリファイルではありません。" +msgstr "無効なバイナリ \"%s\"" #: ../../common/exec.c:195 #, c-format msgid "could not read binary \"%s\"" -msgstr "バイナリファイル \"%s\" を読み込めませんでした。" +msgstr "バイナリ \"%s\" を読み取ることができませんでした" #: ../../common/exec.c:202 #, c-format msgid "could not find a \"%s\" to execute" -msgstr "実行に必要な \"%s\" が見つかりません。" +msgstr "実行対象の \"%s\" が見つかりませんでした" #: ../../common/exec.c:257 ../../common/exec.c:293 #, c-format msgid "could not change directory to \"%s\": %s" -msgstr "ディレクトリ\"%s\"に移動できませんでした: %s" +msgstr "ディレクトリ \"%s\" に移動できませんでした: %s" #: ../../common/exec.c:272 #, c-format msgid "could not read symbolic link \"%s\"" -msgstr "シンボリックリンク \"%s\" を読み込めませんでした。" +msgstr "シンボリックリンク \"%s\" を読み取ることができませんでした" #: ../../common/exec.c:523 #, c-format msgid "pclose failed: %s" msgstr "pcloseが失敗しました: %s" -#: ../../common/fe_memutils.c:33 ../../common/fe_memutils.c:60 -#: ../../common/fe_memutils.c:83 command.c:321 input.c:205 mainloop.c:72 -#: mainloop.c:234 +#: ../../common/fe_memutils.c:35 ../../common/fe_memutils.c:75 +#: ../../common/fe_memutils.c:98 command.c:608 input.c:227 mainloop.c:82 +#: mainloop.c:276 #, c-format msgid "out of memory\n" msgstr "メモリ不足です\n" -#: ../../common/fe_memutils.c:77 +#: ../../common/fe_memutils.c:92 #, c-format msgid "cannot duplicate null pointer (internal error)\n" -msgstr "null ポインタを複製できません(内部エラー)。\n" +msgstr "null ポインターを複製することはできません(内部エラー) \n" -#: ../../common/username.c:45 +#: ../../common/username.c:43 #, c-format msgid "could not look up effective user ID %ld: %s" msgstr "実効ユーザID %ld が見つかりませんでした: %s" -#: ../../common/username.c:47 command.c:276 +#: ../../common/username.c:45 command.c:555 msgid "user does not exist" msgstr "ユーザが存在しません" -#: ../../common/username.c:62 +#: ../../common/username.c:60 #, c-format msgid "user name lookup failure: error code %lu" -msgstr "ユーザ名の検索に失敗: エラーコード %lu" +msgstr "ユーザー名の検索に失敗: エラー コード %lu" -#: ../../common/wait_error.c:47 +#: ../../common/wait_error.c:45 #, c-format msgid "command not executable" -msgstr "コマンドは実行形式ではありません" +msgstr "コマンドが実行形式ではありません" -#: ../../common/wait_error.c:51 +#: ../../common/wait_error.c:49 #, c-format msgid "command not found" msgstr "コマンドが見つかりません" -#: ../../common/wait_error.c:56 +#: ../../common/wait_error.c:54 #, c-format msgid "child process exited with exit code %d" -msgstr "子プロセスが終了コード %d で終了しました。" +msgstr "子プロセスが終了コード %d で終了しました" -#: ../../common/wait_error.c:63 +#: ../../common/wait_error.c:61 #, c-format msgid "child process was terminated by exception 0x%X" -msgstr "子プロセスが例外 0x%X で終了させられました。" +msgstr "子プロセスが例外 0x%X で強制終了しました" -#: ../../common/wait_error.c:73 +#: ../../common/wait_error.c:71 #, c-format msgid "child process was terminated by signal %s" -msgstr "子プロセスがシグナル %s で終了させられました。" +msgstr "子プロセスがシグナル %s で強制終了しました" -#: ../../common/wait_error.c:77 +#: ../../common/wait_error.c:75 #, c-format msgid "child process was terminated by signal %d" -msgstr "子プロセスがシグナル %d で終了させられました。" +msgstr "子プロセスがシグナル %d で強制終了しました" -#: ../../common/wait_error.c:82 +#: ../../common/wait_error.c:80 #, c-format msgid "child process exited with unrecognized status %d" -msgstr "子プロセスが不明な状態%dにより終了しました。" +msgstr "子プロセスは認識できないステータス %d で終了しました" -#: command.c:117 +#: ../../fe_utils/print.c:353 +#, c-format +msgid "(%lu row)" +msgid_plural "(%lu rows)" +msgstr[0] "(%lu 行)" + +#: ../../fe_utils/print.c:2913 +#, c-format +msgid "Interrupted\n" +msgstr "割り込み\n" + +#: ../../fe_utils/print.c:2977 +#, c-format +msgid "Cannot add header to table content: column count of %d exceeded.\n" +msgstr "テーブルの内容にヘッダーを追加できません: 列数 %d が制限値を超えています。\n" + +#: ../../fe_utils/print.c:3017 +#, c-format +msgid "Cannot add cell to table content: total cell count of %d exceeded.\n" +msgstr "テーブルの内容にセルを追加できません: セルの合計数 %d が制限値を超えています。\n" + +#: ../../fe_utils/print.c:3266 +#, c-format +msgid "invalid output format (internal error): %d" +msgstr "出力フォーマットが無効(内部エラー):%d" + +#: ../../fe_utils/psqlscan.l:713 +#, c-format +msgid "skipping recursive expansion of variable \"%s\"\n" +msgstr "変数 \"%s\" の再帰展開をスキップしています\n" + +#: command.c:223 #, c-format msgid "Invalid command \\%s. Try \\? for help.\n" -msgstr "\\%sコマンドは無効です。\\? でヘルプを参照してください。\n" +msgstr "\\%s は無効なコマンドです。\\? でヘルプを参照してください。\n" -#: command.c:119 +#: command.c:225 #, c-format msgid "invalid command \\%s\n" -msgstr "\\%sは無効なコマンドです\n" +msgstr "\\%s は無効なコマンドです\n" -#: command.c:130 +#: command.c:243 #, c-format msgid "\\%s: extra argument \"%s\" ignored\n" -msgstr "\\%s: 余分な引数 \"%s\" は無視されました。\n" +msgstr "\\%s: 余分な引数 \"%s\" は無視されました\n" + +#: command.c:295 +#, c-format +msgid "\\%s command ignored; use \\endif or Ctrl-C to exit current \\if block\n" +msgstr "\\%s コマンドは無視されます。現在の \\if ブロックを抜けるには \\endif または Ctrl-C を使用します。\n" -#: command.c:274 +#: command.c:553 #, c-format msgid "could not get home directory for user ID %ld: %s\n" -msgstr "ユーザID %ld のホームディレクトリを特定できません: %s\n" +msgstr "ユーザー ID %ld のホームディレクトリを取得できませんでした : %s\n" -#: command.c:292 +#: command.c:571 #, c-format msgid "\\%s: could not change directory to \"%s\": %s\n" -msgstr "\\%s: ディレクトリを \"%s\" に変更できません:%s\n" +msgstr "\\%s: ディレクトリを \"%s\" に変更できませんでした: %s\n" -#: command.c:307 common.c:446 common.c:886 +#: command.c:596 common.c:648 common.c:706 common.c:1242 #, c-format msgid "You are currently not connected to a database.\n" -msgstr "現在データベースには接続していません。\n" +msgstr "現在データベースに接続していません。\n" -#: command.c:334 +#: command.c:621 #, c-format msgid "You are connected to database \"%s\" as user \"%s\" via socket in \"%s\" at port \"%s\".\n" -msgstr "データベース\"%s\"にユーザ\"%s\"でソケット\"%s\"経由のポート\"%s\"で接続しています。\n" +msgstr "データベース \"%s\" にユーザ \"%s\" として、ソケット \"%s\" のポート \"%s\" を介して接続しています。\n" -#: command.c:337 +#: command.c:624 #, c-format msgid "You are connected to database \"%s\" as user \"%s\" on host \"%s\" at port \"%s\".\n" -msgstr "ホスト\"%3$s\"上のポート\"%4$s\"のデータベース\"%1$s\"にユーザ\"%2$s\"で接続しています\n" +msgstr "データベース \"%s\" にユーザ \"%s\" として、ホスト \"%s\" 上のポート \"%s\" を介して接続しています。\n" -#: command.c:538 command.c:608 command.c:1403 +#: command.c:915 command.c:1005 command.c:1114 command.c:2523 #, c-format msgid "no query buffer\n" -msgstr "問い合わせバッファがありません。\n" +msgstr "問い合わせバッファがありません\n" -#: command.c:571 command.c:3035 +#: command.c:948 command.c:4784 #, c-format msgid "invalid line number: %s\n" msgstr "無効な行番号です: %s\n" -#: command.c:602 +#: command.c:998 #, c-format -msgid "The server (version %d.%d) does not support editing function source.\n" -msgstr "このサーバーのバージョン (%d.%d) は関数のソース編集をサポートしていません\n" +msgid "The server (version %s) does not support editing function source.\n" +msgstr "このサーバー (バージョン %s) は関数ソースコードの編集をサポートしていません。\n" -#: command.c:682 +#: command.c:1073 command.c:1154 msgid "No changes" -msgstr "変更なし" +msgstr "変更されていません" + +#: command.c:1107 +#, c-format +msgid "The server (version %s) does not support editing view definitions.\n" +msgstr "このサーバー (バージョン %s) はビュー定義の編集をサポートしていません。\n" -#: command.c:736 +#: command.c:1231 #, c-format msgid "%s: invalid encoding name or conversion procedure not found\n" -msgstr "%s: 符号化方式名が無効、または変換用プロシージャが見つかりません。\n" +msgstr "%s: エンコーディング名が無効か、または変換プロシージャが見つかりません。\n" + +#: command.c:1266 command.c:1888 command.c:3169 command.c:4886 common.c:173 +#: common.c:244 common.c:541 common.c:1288 common.c:1316 common.c:1417 +#: copy.c:489 copy.c:708 large_obj.c:156 large_obj.c:191 large_obj.c:253 +#, c-format +msgid "%s" +msgstr "%s" + +#: command.c:1270 +msgid "out of memory" +msgstr "メモリ不足です" -#: command.c:833 command.c:883 command.c:897 command.c:914 command.c:1021 -#: command.c:1180 command.c:1383 command.c:1414 +#: command.c:1273 +msgid "There is no previous error." +msgstr "直前のエラーはありません。" + +#: command.c:1444 command.c:1749 command.c:1763 command.c:1780 command.c:1940 +#: command.c:2177 command.c:2490 command.c:2530 #, c-format msgid "\\%s: missing required argument\n" msgstr "\\%s: 必要な引数がありません\n" -#: command.c:946 +#: command.c:1575 +#, c-format +msgid "\\elif: cannot occur after \\else\n" +msgstr "\\elif: \\else の後には置けません\n" + +#: command.c:1580 +#, c-format +msgid "\\elif: no matching \\if\n" +msgstr "\\elif: これに対応する \\if がありません\n" + +#: command.c:1644 +#, c-format +msgid "\\else: cannot occur after \\else\n" +msgstr "\\else: \\else の後には置けません\n" + +#: command.c:1649 +#, c-format +msgid "\\else: no matching \\if\n" +msgstr "\\else: これに対応する \\if がありません\n" + +#: command.c:1689 +#, c-format +msgid "\\endif: no matching \\if\n" +msgstr "\\endif: これに対応する \\if がありません\n" + +#: command.c:1844 msgid "Query buffer is empty." msgstr "問い合わせバッファは空です。" -#: command.c:956 +#: command.c:1866 msgid "Enter new password: " -msgstr "新しいパスワード: " +msgstr "新しいパスワードを入力してください: " -#: command.c:957 +#: command.c:1867 msgid "Enter it again: " -msgstr "もう一度入力してください:" +msgstr "もう一度入力してください: " -#: command.c:961 +#: command.c:1871 #, c-format msgid "Passwords didn't match.\n" -msgstr "パスワードが一致しません。\n" - -#: command.c:979 -#, c-format -msgid "Password encryption failed.\n" -msgstr "パスワードの暗号化に失敗しました。\n" +msgstr "パスワードが一致しませんでした。\n" -#: command.c:1050 command.c:1161 command.c:1388 +#: command.c:1970 #, c-format -msgid "\\%s: error while setting variable\n" -msgstr "\\%s: 変数を設定している時にエラー\n" +msgid "\\%s: could not read value for variable\n" +msgstr "\\%s: 変数の値を読み取ることができませんでした\n" -#: command.c:1108 +#: command.c:2073 msgid "Query buffer reset (cleared)." msgstr "問い合わせバッファがリセット(クリア)されました。" -#: command.c:1120 +#: command.c:2095 #, c-format msgid "Wrote history to file \"%s\".\n" -msgstr "ファイル\"%s\"に履歴を出力しました。\n" +msgstr "ファイル \"%s\" にヒストリーを出力しました。\n" -#: command.c:1185 +#: command.c:2182 #, c-format msgid "\\%s: environment variable name must not contain \"=\"\n" -msgstr "\\%s: 環境変数の名前には\"=\"を含められません\n" +msgstr "\\%s: 環境変数名に \"=\" を含めることはできません\n" -#: command.c:1227 +#: command.c:2238 #, c-format -msgid "The server (version %d.%d) does not support showing function source.\n" -msgstr "このサーバ(バージョン%d.%d)は関数ソースの表示をサポートしていません。\n" +msgid "The server (version %s) does not support showing function source.\n" +msgstr "このサーバー (バージョン %s) は関数ソースの表示をサポートしていません。\n" -#: command.c:1233 +#: command.c:2245 #, c-format msgid "function name is required\n" -msgstr "関数名が必要です\n" +msgstr "関数名が必要です。\n" + +#: command.c:2332 +#, c-format +msgid "The server (version %s) does not support showing view definitions.\n" +msgstr "このサーバー (バージョン %s) はビュー定義の表示をサポートしていません。\n" + +#: command.c:2339 +#, c-format +msgid "view name is required\n" +msgstr "ビュー名が必要です\n" -#: command.c:1368 +#: command.c:2462 msgid "Timing is on." msgstr "タイミングは on です。" -#: command.c:1370 +#: command.c:2464 msgid "Timing is off." msgstr "タイミングは off です。" -#: command.c:1431 command.c:1451 command.c:2072 command.c:2075 command.c:2078 -#: command.c:2084 command.c:2086 command.c:2094 command.c:2104 command.c:2113 -#: command.c:2127 command.c:2144 command.c:2203 common.c:74 copy.c:333 -#: copy.c:393 copy.c:408 psqlscan.l:1677 psqlscan.l:1688 psqlscan.l:1698 +#: command.c:2549 command.c:2577 command.c:3537 command.c:3540 command.c:3543 +#: command.c:3549 command.c:3551 command.c:3559 command.c:3569 command.c:3578 +#: command.c:3592 command.c:3609 command.c:3667 common.c:69 copy.c:332 +#: copy.c:392 copy.c:405 psqlscanslash.l:761 psqlscanslash.l:772 +#: psqlscanslash.l:782 #, c-format msgid "%s: %s\n" msgstr "%s: %s\n" -#: command.c:1530 -#, c-format -msgid "+ opt(%d) = |%s|\n" -msgstr "+ opt(%d) = |%s|\n" - -#: command.c:1556 startup.c:184 +#: command.c:2961 startup.c:205 msgid "Password: " -msgstr "パスワード: " +msgstr "パスワード: " -#: command.c:1561 startup.c:186 +#: command.c:2966 startup.c:207 #, c-format msgid "Password for user %s: " -msgstr "ユーザ %s のパスワード: " +msgstr "ユーザー %s のパスワード: " -#: command.c:1608 +#: command.c:3016 #, c-format msgid "All connection parameters must be supplied because no database connection exists\n" -msgstr "データベース接続がありませんのですべての接続パラメータを指定しなければなりません\n" - -#: command.c:1725 command.c:3069 common.c:120 common.c:413 common.c:478 -#: common.c:929 common.c:954 common.c:1051 copy.c:492 copy.c:695 -#: large_obj.c:158 large_obj.c:193 large_obj.c:255 psqlscan.l:1949 -#, c-format -msgid "%s" -msgstr "%s" +msgstr "データベース接続がないため、すべての接続パラメータを指定しなければなりません\n" -#: command.c:1729 +#: command.c:3173 #, c-format msgid "Previous connection kept\n" msgstr "以前の接続は保持されています。\n" -#: command.c:1733 +#: command.c:3177 #, c-format msgid "\\connect: %s" msgstr "\\connect: %s" -#: command.c:1766 +#: command.c:3213 #, c-format msgid "You are now connected to database \"%s\" as user \"%s\" via socket in \"%s\" at port \"%s\".\n" -msgstr "ポート\"%4$s\"のソケット\"%3$s\"経由でデータベース\"%1$s\"にユーザ\"%2$s\"として接続しました。\n" +msgstr "データベース \"%s\" にユーザ \"%s\" として、ソケット \"%s\" のポート \"%s\" を介して接続しました。\n" -#: command.c:1769 +#: command.c:3216 #, c-format msgid "You are now connected to database \"%s\" as user \"%s\" on host \"%s\" at port \"%s\".\n" -msgstr "ホスト\"%3$s\"上のポート\"%4$s\"でデータベース\"%1$s\"にユーザ\"%2$s\"として接続しました。\n" +msgstr "データベース \"%s\" にユーザ \"%s\" として、ホスト \"%s\" のポート \"%s\" を介して接続しました。\n" -#: command.c:1773 +#: command.c:3220 #, c-format msgid "You are now connected to database \"%s\" as user \"%s\".\n" -msgstr "データベース \"%s\" にユーザ\"%s\"として接続しました。\n" +msgstr "データベース \"%s\" にユーザ \"%s\" として接続しました。\n" -#: command.c:1807 +#: command.c:3253 #, c-format msgid "%s (%s, server %s)\n" -msgstr "%s (%s, サーバー %s)\n" +msgstr "%s (%s、サーバー %s)\n" -#: command.c:1815 +#: command.c:3261 #, c-format msgid "" -"WARNING: %s major version %d.%d, server major version %d.%d.\n" +"WARNING: %s major version %s, server major version %s.\n" " Some psql features might not work.\n" msgstr "" -"注意: %s メジャーバージョン %d.%d, サーバーバージョン %d.%d.\n" +"警告: %s のメジャーバージョンは %s ですが、サーバーのメジャーバージョンは %s です。\n" " psql の機能の中で、動作しないものがあるかもしれません。\n" -#: command.c:1845 +#: command.c:3298 #, c-format -msgid "SSL connection (protocol: %s, cipher: %s, bits: %d, compression: %s)\n" -msgstr "SSL接続(プロトコル: %s, 暗号化方式: %s, ビット長: %d, 圧縮: %s)\n" +msgid "SSL connection (protocol: %s, cipher: %s, bits: %s, compression: %s)\n" +msgstr "SSL 接続 (プロトコル: %s、暗号化方式: %s、ビット長: %s、圧縮: %s)\n" + +#: command.c:3299 command.c:3300 command.c:3301 +msgid "unknown" +msgstr "不明" -#: command.c:1847 help.c:46 +#: command.c:3302 help.c:45 msgid "off" msgstr "オフ" -#: command.c:1847 help.c:46 +#: command.c:3302 help.c:45 msgid "on" msgstr "オン" -#: command.c:1856 -#, c-format -msgid "SSL connection (unknown cipher)\n" -msgstr "SSL 接続 (未定義の暗号化方式)\n" - -#: command.c:1877 +#: command.c:3322 #, c-format msgid "" "WARNING: Console code page (%u) differs from Windows code page (%u)\n" " 8-bit characters might not work correctly. See psql reference\n" " page \"Notes for Windows users\" for details.\n" msgstr "" -"警告:コンソールのコードページ (%u) が Windows のコードページ (%u) と\n" -" 異なるため、8 ビット文字列が正しく動作しない可能性があります。\n" -" 詳細は psql リファレンスマニュアルの \"Notes for Windows users\"\n" -" (ウィンドウズユーザのために)を参照してください。\n" -"\n" +"警告:コンソールのコードページ (%u) が Windows のコードページ (%u) と異なるため、\n" +" 8 ビット文字列が正しく動作しない可能性があります。詳細は psql リファレンスマニュアルの\n" +" \"Notes for Windows users\"(Windowsユーザ向けの注意)を参照してください。\n" -#: command.c:1961 +#: command.c:3426 #, c-format msgid "environment variable PSQL_EDITOR_LINENUMBER_ARG must be set to specify a line number\n" -msgstr "行番号を指定するためにはPSQL_EDITOR_LINENUMBER_ARG変数を設定しなければなりません\n" +msgstr "環境変数 PSQL_EDITOR_LINENUMBER_ARG で行番号を指定する必要があります。\n" -#: command.c:1990 +#: command.c:3455 #, c-format msgid "could not start editor \"%s\"\n" msgstr "エディタ \"%s\" を起動できませんでした。\n" -#: command.c:1992 +#: command.c:3457 #, c-format msgid "could not start /bin/sh\n" msgstr "/bin/sh を起動できませんでした。\n" -#: command.c:2030 +#: command.c:3495 #, c-format msgid "could not locate temporary directory: %s\n" -msgstr "一時ディレクトリに移動できません: %s\n" +msgstr "一時ディレクトリが見つかりませんでした: %s\n" -#: command.c:2057 +#: command.c:3522 #, c-format msgid "could not open temporary file \"%s\": %s\n" -msgstr "一時ファイル \"%s\" を開けません: %s\n" +msgstr "一時ファイル \"%s\" を開けませんでした: %s\n" -#: command.c:2325 +#: command.c:3796 #, c-format -msgid "\\pset: allowed formats are unaligned, aligned, wrapped, html, latex, troff-ms\n" -msgstr "\\pset: 有効なフォーマットは unaligned, aligned, wrapped, html, latex, troff-ms です。\n" +msgid "\\pset: allowed formats are unaligned, aligned, wrapped, html, asciidoc, latex, latex-longtable, troff-ms\n" +msgstr "\\pset: 有効なフォーマットは unaligned, aligned, wrapped, html, asciidoc, latex, latex-longtable, troff-ms です。\n" -#: command.c:2344 +#: command.c:3814 #, c-format msgid "\\pset: allowed line styles are ascii, old-ascii, unicode\n" -msgstr "\\pset: 有効な行スタイルは ascii, old-ascii, unicode です。\n" +msgstr "\\pset: 有効な線のスタイルは ascii, old-ascii, unicode です。\n" + +#: command.c:3829 +#, c-format +msgid "\\pset: allowed Unicode border line styles are single, double\n" +msgstr "\\pset: 有効な Unicode 罫線のスタイルは single, double です。\n" + +#: command.c:3844 +#, c-format +msgid "\\pset: allowed Unicode column line styles are single, double\n" +msgstr "\\pset: 有効な Unicode 列罫線のスタイルは single, double です。\n" + +#: command.c:3859 +#, c-format +msgid "\\pset: allowed Unicode header line styles are single, double\n" +msgstr "\\pset: 有効な Unicode ヘッダー罫線のスタイルは single, double です。\n" -#: command.c:2490 command.c:2641 +#: command.c:4024 command.c:4203 #, c-format msgid "\\pset: unknown option: %s\n" msgstr "\\pset: 未定義のオプション:%s\n" -#: command.c:2508 +#: command.c:4042 #, c-format msgid "Border style is %d.\n" -msgstr "境界線のスタイルは %d です。\n" +msgstr "罫線スタイルは %d です。\n" -#: command.c:2514 +#: command.c:4048 #, c-format msgid "Target width is unset.\n" -msgstr "対象幅はセットされていません。\n" +msgstr "ターゲットの幅が設定されていません。\n" -#: command.c:2516 +#: command.c:4050 #, c-format msgid "Target width is %d.\n" -msgstr "対象幅は%dです。\n" +msgstr "ターゲットの幅は %d です。\n" -#: command.c:2523 +#: command.c:4057 #, c-format msgid "Expanded display is on.\n" msgstr "拡張表示は on です。\n" -#: command.c:2525 +#: command.c:4059 #, c-format msgid "Expanded display is used automatically.\n" -msgstr "拡張表示が自動的に使用されます\n" +msgstr "拡張表示が自動的に使われます。\n" -#: command.c:2527 +#: command.c:4061 #, c-format msgid "Expanded display is off.\n" msgstr "拡張表示は off です。\n" -#: command.c:2534 command.c:2542 +#: command.c:4068 command.c:4076 #, c-format msgid "Field separator is zero byte.\n" msgstr "フィールド区切り文字はゼロバイトです。\n" -#: command.c:2536 +#: command.c:4070 #, c-format msgid "Field separator is \"%s\".\n" msgstr "フィールド区切り文字は \"%s\" です。\n" -#: command.c:2549 +#: command.c:4083 #, c-format msgid "Default footer is on.\n" -msgstr "デフォルトのフッタは on です。\n" +msgstr "デフォルトフッター(行数の表示)は on です。\n" -#: command.c:2551 +#: command.c:4085 #, c-format msgid "Default footer is off.\n" -msgstr "デフォルトのフッタは off です。\n" +msgstr "デフォルトフッター(行数の表示)は off です。\n" -#: command.c:2557 +#: command.c:4091 #, c-format msgid "Output format is %s.\n" -msgstr "出力フォーマットは %s です。\n" +msgstr "出力形式は %s です。\n" -#: command.c:2563 +#: command.c:4097 #, c-format msgid "Line style is %s.\n" -msgstr "境界線のスタイルは %s です。\n" +msgstr "線のスタイルは %s です。\n" -#: command.c:2570 +#: command.c:4104 #, c-format msgid "Null display is \"%s\".\n" msgstr "Null 表示は \"%s\" です。\n" -#: command.c:2578 +#: command.c:4112 #, c-format msgid "Locale-adjusted numeric output is on.\n" -msgstr "「数値出力のロケール調整」は on です。\n" +msgstr "『数値出力時のロケール調整』は on です。\n" -#: command.c:2580 +#: command.c:4114 #, c-format msgid "Locale-adjusted numeric output is off.\n" -msgstr "「数値出力のロケール調整」は off です。\n" +msgstr "『数値出力時のロケール調整』は off です。\n" -#: command.c:2587 +#: command.c:4121 #, c-format msgid "Pager is used for long output.\n" -msgstr "出力が長い場合はページャが使われます。\n" +msgstr "表示が縦に長くなる場合はページャーを使います。\n" -#: command.c:2589 +#: command.c:4123 #, c-format msgid "Pager is always used.\n" -msgstr "常にページャが使われます。\n" +msgstr "常にページャーを使います。\n" -#: command.c:2591 +#: command.c:4125 #, c-format msgid "Pager usage is off.\n" -msgstr "「ページャを使う」は off です。\n" +msgstr "「ページャーを使う」は off です。\n" + +#: command.c:4131 +#, c-format +msgid "Pager won't be used for less than %d line.\n" +msgid_plural "Pager won't be used for less than %d lines.\n" +msgstr[0] "%d 行未満の場合、ページャーは使われません。\n" -#: command.c:2598 command.c:2608 +#: command.c:4141 command.c:4151 #, c-format msgid "Record separator is zero byte.\n" -msgstr "レコード区切り文字はゼロバイトです。\n" +msgstr "レコードの区切り文字はゼロバイトです\n" -#: command.c:2600 +#: command.c:4143 #, c-format msgid "Record separator is .\n" msgstr "レコード区切り文字は です。\n" -#: command.c:2602 +#: command.c:4145 #, c-format msgid "Record separator is \"%s\".\n" -msgstr "レコード区切り文字は \"%s\" です。\n" +msgstr "レコード区切り記号は \"%s\"です。\n" -#: command.c:2615 +#: command.c:4158 #, c-format msgid "Table attributes are \"%s\".\n" -msgstr "テーブル属性は \"%s\" です。\n" +msgstr "テーブル属性は \"%s\"です。\n" -#: command.c:2618 +#: command.c:4161 #, c-format msgid "Table attributes unset.\n" -msgstr "テーブル属性はセットされていません。\n" +msgstr "テーブル属性は設定されていません。\n" -#: command.c:2625 +#: command.c:4168 #, c-format msgid "Title is \"%s\".\n" msgstr "タイトルは \"%s\" です。\n" -#: command.c:2627 +#: command.c:4170 #, c-format msgid "Title is unset.\n" -msgstr "タイトルはセットされていません。\n" +msgstr "タイトルは設定されていません。\n" -#: command.c:2634 +#: command.c:4177 #, c-format msgid "Tuples only is on.\n" msgstr "「タプルのみ表示」は on です。\n" -#: command.c:2636 +#: command.c:4179 #, c-format msgid "Tuples only is off.\n" msgstr "「タプルのみ表示」は off です。\n" -#: command.c:2787 +#: command.c:4185 +#, c-format +msgid "Unicode border line style is \"%s\".\n" +msgstr "Unicode の罫線スタイルは \"%s\" です。\n" + +#: command.c:4191 +#, c-format +msgid "Unicode column line style is \"%s\".\n" +msgstr "Unicode 行罫線のスタイルは \"%s\" です。\n" + +#: command.c:4197 +#, c-format +msgid "Unicode header line style is \"%s\".\n" +msgstr "Unicode ヘッダー行のスタイルは \"%s\" です。\n" + +#: command.c:4357 #, c-format msgid "\\!: failed\n" msgstr "\\!: 失敗\n" -#: command.c:2807 command.c:2865 +#: command.c:4382 common.c:754 #, c-format msgid "\\watch cannot be used with an empty query\n" -msgstr "\\watchを空の問い合わせで使用することができません\n" +msgstr "\\watch は空の問い合わせでは使えません\n" -#: command.c:2828 +#: command.c:4423 #, c-format -msgid "Watch every %lds\t%s" -msgstr "%ld秒毎に監視します\t%s" +msgid "%s\t%s (every %gs)\n" +msgstr "%s\t%s (%g 秒毎)\n" -#: command.c:2872 +#: command.c:4426 #, c-format -msgid "\\watch cannot be used with COPY\n" -msgstr "\\watchではCOPYを使用することができません\n" +msgid "%s (every %gs)\n" +msgstr "%s (%g 秒毎)\n" -#: command.c:2878 +#: command.c:4480 command.c:4487 common.c:654 common.c:661 common.c:1271 #, c-format -msgid "unexpected result status for \\watch\n" -msgstr "\\watchに対する想定外の結果状態\n" +msgid "" +"********* QUERY **********\n" +"%s\n" +"**************************\n" +"\n" +msgstr "" +"******** 問い合わせ ******\n" +"%s\n" +"**************************\n" +"\n" + +#: command.c:4679 +#, c-format +msgid "\"%s.%s\" is not a view\n" +msgstr "\"%s.%s\" はビューではありません\n" + +#: command.c:4695 +#, c-format +msgid "could not parse reloptions array\n" +msgstr "reloptions 配列を解析できませんでした。\n" + +#: common.c:158 +#, c-format +msgid "cannot escape without active connection\n" +msgstr "有効な接続がないのでエスケープできません。\n" -#: common.c:287 +#: common.c:199 +#, c-format +msgid "shell command argument contains a newline or carriage return: \"%s\"\n" +msgstr "シェルコマンドの引数に改行(LF)またはキャリッジリターン(CR)が含まれています: \"%s\"\n" + +#: common.c:415 #, c-format msgid "connection to server was lost\n" -msgstr "サーバーへの接続が切れました。\n" +msgstr "サーバへの接続が失われました。\n" -#: common.c:291 +#: common.c:419 #, c-format msgid "The connection to the server was lost. Attempting reset: " -msgstr "サーバーへの接続が切れました。リセットしています: " +msgstr "サーバーへの接続が失われました。リセットしています: " -#: common.c:296 +#: common.c:424 #, c-format msgid "Failed.\n" msgstr "失敗。\n" -#: common.c:303 +#: common.c:431 #, c-format msgid "Succeeded.\n" msgstr "成功。\n" -#: common.c:403 common.c:683 common.c:851 +#: common.c:531 common.c:1034 common.c:1206 #, c-format msgid "unexpected PQresultStatus: %d\n" -msgstr "想定外のPQresultStatus: %d\n" +msgstr "予期しない PQresultStatus: %d\n" -#: common.c:452 common.c:459 common.c:912 +#: common.c:593 #, c-format -msgid "" -"********* QUERY **********\n" -"%s\n" -"**************************\n" -"\n" -msgstr "" -"********* 問い合わせ ********\n" -"%s\n" -"*****************************\n" -"\n" +msgid "Time: %.3f ms\n" +msgstr "時間: %.3f ミリ秒\n" + +#: common.c:608 +#, c-format +msgid "Time: %.3f ms (%02d:%06.3f)\n" +msgstr "時間: %.3f ミリ秒(%02d:%06.3f)\n" + +#: common.c:617 +#, c-format +msgid "Time: %.3f ms (%02d:%02d:%06.3f)\n" +msgstr "時間: %.3f ミリ秒 (%02d:%02d:%06.3f)\n" + +#: common.c:624 +#, c-format +msgid "Time: %.3f ms (%.0f d %02d:%02d:%06.3f)\n" +msgstr "時間: %.3f ミリ秒 (%.0f 日 %02d:%02d:%06.3f)\n" + +#: common.c:761 +#, c-format +msgid "\\watch cannot be used with COPY\n" +msgstr "\\watch は COPY と一緒には使えません\n" -#: common.c:513 +#: common.c:766 +#, c-format +msgid "unexpected result status for \\watch\n" +msgstr "\\watch で予期しない結果のステータス\n" + +#: common.c:795 #, c-format msgid "Asynchronous notification \"%s\" with payload \"%s\" received from server process with PID %d.\n" -msgstr "PID %3$d を持つサーバープロセスから、ペイロード \"%2$s\" を持つ非同期通知 \"%1$s\" を受信しました。\n" +msgstr "PID %3$d のサーバープロセスから、ペイロード \"%2$s\" を持つ非同期通知 \"%1$s\" を受信しました。\n" -#: common.c:516 +#: common.c:798 #, c-format msgid "Asynchronous notification \"%s\" received from server process with PID %d.\n" -msgstr "PID %2$d を持つサーバープロセスから非同期通知 \"%1$s\" を受信しました。\n" +msgstr "PID %2$d のサーバープロセスから非同期通知 \"%1$s\" を受信しました。\n" -#: common.c:578 +#: common.c:860 #, c-format msgid "no rows returned for \\gset\n" -msgstr "\\gsetに対して行が返されませんでした\n" +msgstr "\\gset に対して返すべき行がありません\n" -#: common.c:583 +#: common.c:865 #, c-format msgid "more than one row returned for \\gset\n" -msgstr "\\gsetに対して複数の行が返されました\n" - -#: common.c:609 -#, c-format -msgid "could not set variable \"%s\"\n" -msgstr "変数 \"%s\" をセットできませんでした\n" +msgstr "\\gset に対して複数の行が返されました\n" -#: common.c:894 +#: common.c:1251 #, c-format msgid "" "***(Single step mode: verify command)*******************************************\n" "%s\n" "***(press return to proceed or enter x and return to cancel)********************\n" msgstr "" -"***(シングルステップモード: 問い合わせを検査してください)********\n" +"***(シングルステップモード: コマンドを確認してください)********\n" "%s\n" "***([Enter] を押して進むか、x [Enter] でキャンセル)**************\n" -#: common.c:945 +#: common.c:1306 #, c-format -msgid "The server (version %d.%d) does not support savepoints for ON_ERROR_ROLLBACK.\n" -msgstr "このサーバー(バージョン%d.%d)では、ON_ERROR_ROLLBACK用のセーブポイントをサポートしていません。\n" +msgid "The server (version %s) does not support savepoints for ON_ERROR_ROLLBACK.\n" +msgstr "このサーバー (バージョン %s) は ON_ERROR_ROLLBACK 用のセーブポイントをサポートしていません。\n" -#: common.c:1039 +#: common.c:1362 #, c-format -msgid "unexpected transaction status (%d)\n" -msgstr "想定外のトランザクション状態 (%d)\n" +msgid "STATEMENT: %s\n" +msgstr "ステートメント: %s\n" -#: common.c:1067 +#: common.c:1405 #, c-format -msgid "Time: %.3f ms\n" -msgstr "時間: %.3f ms\n" +msgid "unexpected transaction status (%d)\n" +msgstr "予期しないトランザクションのステータス (%d)\n" -#: copy.c:98 +#: copy.c:99 #, c-format msgid "\\copy: arguments required\n" -msgstr "\\copy: 引数がありません。\n" +msgstr "\\copy: 引数が必要です\n" -#: copy.c:253 +#: copy.c:254 #, c-format msgid "\\copy: parse error at \"%s\"\n" -msgstr "\\copy: \"%s\" でパースエラー発生\n" +msgstr "\\copy: \"%s\" で構文解析エラー\n" -#: copy.c:255 +#: copy.c:256 #, c-format msgid "\\copy: parse error at end of line\n" -msgstr "\\copy: 行末でパースエラー発生\n" +msgstr "\\copy: 行の末尾で構文解析エラー\n" -#: copy.c:330 +#: copy.c:329 #, c-format msgid "could not execute command \"%s\": %s\n" -msgstr "コマンド\"%s\"を実行できませんでした: %s\n" +msgstr "コマンド \"%s\" を実行できませんでした: %s\n" -#: copy.c:346 +#: copy.c:345 #, c-format msgid "could not stat file \"%s\": %s\n" -msgstr "ファイル\"%s\"のstatができませんでした: %s\n" +msgstr "ファイル \"%s\" を stat できませんでした: %s\n" -#: copy.c:350 +#: copy.c:349 #, c-format msgid "%s: cannot copy from/to a directory\n" msgstr "%s: ディレクトリから/ディレクトリへのコピーはできません。\n" -#: copy.c:387 +#: copy.c:386 #, c-format msgid "could not close pipe to external command: %s\n" -msgstr "外部コマンドに対するパイプをクローズできませんでした: %s\n" +msgstr "外部コマンドへのパイプを閉じることができませんでした: %s\n" -#: copy.c:455 copy.c:466 +#: copy.c:452 copy.c:463 #, c-format msgid "could not write COPY data: %s\n" -msgstr "COPY 対象データを書き込めませんでした:%s\n" +msgstr "COPY データを書き込めませんでした: %s\n" -#: copy.c:473 +#: copy.c:470 #, c-format msgid "COPY data transfer failed: %s" -msgstr "COPY 対象データの転送に失敗しました:%s" +msgstr "COPY データの転送に失敗しました: %s" -#: copy.c:534 +#: copy.c:531 msgid "canceled by user" -msgstr "ユーザによりキャンセルされました" +msgstr "ユーザーによってキャンセルされました" -#: copy.c:544 +#: copy.c:542 msgid "" "Enter data to be copied followed by a newline.\n" -"End with a backslash and a period on a line by itself." +"End with a backslash and a period on a line by itself, or an EOF signal." msgstr "" -"コピーするデータに続いて改行を入力します。\n" -"バックスラッシュ(\\)とピリオドだけの行で終了します。" +"コピーするデータに続いて改行を入力してください。\n" +"バックスラッシュとピリオドだけの行、もしくは EOF シグナルで終了します。" -#: copy.c:667 +#: copy.c:670 msgid "aborted because of read failure" -msgstr "読み込みに失敗したため異常終了しました" +msgstr "読み取りエラーのため中止" -#: copy.c:691 +#: copy.c:704 msgid "trying to exit copy mode" msgstr "コピーモードを終了しようとしています。" -#: describe.c:71 describe.c:259 describe.c:491 describe.c:615 describe.c:758 -#: describe.c:844 describe.c:914 describe.c:2759 describe.c:2964 -#: describe.c:3054 describe.c:3299 describe.c:3436 describe.c:3665 -#: describe.c:3737 describe.c:3748 describe.c:3807 describe.c:4215 -#: describe.c:4294 +#: crosstabview.c:123 +#, c-format +msgid "\\crosstabview: statement did not return a result set\n" +msgstr "\\crosstabview: ステートメントは結果セットを返しませんでした。\n" + +#: crosstabview.c:129 +#, c-format +msgid "\\crosstabview: query must return at least three columns\n" +msgstr "\\crosstabview: 問い合わせは、少なくとも3つの列を返す必要があります。\n" + +#: crosstabview.c:156 +#, c-format +msgid "\\crosstabview: vertical and horizontal headers must be different columns\n" +msgstr "\\crosstabview: 垂直方向と水平方向のヘッダーは異なった列にする必要があります。\n" + +#: crosstabview.c:172 +#, c-format +msgid "\\crosstabview: data column must be specified when query returns more than three columns\n" +msgstr "\\crosstabview: 問い合わせが 4 つ以上の列を返す場合、データ列を指定する必要があります。\n" + +#: crosstabview.c:228 +#, c-format +msgid "\\crosstabview: maximum number of columns (%d) exceeded\n" +msgstr "列数が制限値 (%d) を超えています。\n" + +#: crosstabview.c:397 +#, c-format +msgid "\\crosstabview: query result contains multiple data values for row \"%s\", column \"%s\"\n" +msgstr "\\crosstabview: 問い合わせ結果の中の \"%s\" 行 \"%s\" 列に複数のデータ値が含まれています。\n" + +#: crosstabview.c:645 +#, c-format +msgid "\\crosstabview: column number %d is out of range 1..%d\n" +msgstr "\\crosstabview: 列番号 %d が範囲外です(1..%d)\n" + +#: crosstabview.c:670 +#, c-format +msgid "\\crosstabview: ambiguous column name: \"%s\"\n" +msgstr "\\crosstabview: 列名を一意に特定できません: \"%s\"\n" + +#: crosstabview.c:678 +#, c-format +msgid "\\crosstabview: column name not found: \"%s\"\n" +msgstr "\\crosstabview: 列名が見つかりませんでした: \"%s\"\n" + +#: describe.c:74 describe.c:346 describe.c:603 describe.c:735 describe.c:879 +#: describe.c:1040 describe.c:1112 describe.c:3371 describe.c:3583 +#: describe.c:3674 describe.c:3922 describe.c:4067 describe.c:4308 +#: describe.c:4383 describe.c:4394 describe.c:4456 describe.c:4881 +#: describe.c:4964 msgid "Schema" msgstr "スキーマ" -#: describe.c:72 describe.c:156 describe.c:164 describe.c:260 describe.c:492 -#: describe.c:616 describe.c:677 describe.c:759 describe.c:915 describe.c:2760 -#: describe.c:2886 describe.c:2965 describe.c:3055 describe.c:3134 -#: describe.c:3300 describe.c:3364 describe.c:3437 describe.c:3666 -#: describe.c:3738 describe.c:3749 describe.c:3808 describe.c:3997 -#: describe.c:4078 describe.c:4292 +#: describe.c:75 describe.c:164 describe.c:231 describe.c:239 describe.c:347 +#: describe.c:604 describe.c:736 describe.c:797 describe.c:880 describe.c:1113 +#: describe.c:3372 describe.c:3506 describe.c:3584 describe.c:3675 +#: describe.c:3754 describe.c:3923 describe.c:3992 describe.c:4068 +#: describe.c:4309 describe.c:4384 describe.c:4395 describe.c:4457 +#: describe.c:4654 describe.c:4738 describe.c:4962 describe.c:5134 +#: describe.c:5341 msgid "Name" msgstr "名前" -#: describe.c:73 describe.c:272 describe.c:318 describe.c:335 +#: describe.c:76 describe.c:359 describe.c:405 describe.c:422 msgid "Result data type" msgstr "結果のデータ型" -#: describe.c:81 describe.c:94 describe.c:98 describe.c:273 describe.c:319 -#: describe.c:336 +#: describe.c:84 describe.c:97 describe.c:101 describe.c:360 describe.c:406 +#: describe.c:423 msgid "Argument data types" msgstr "引数のデータ型" -#: describe.c:105 describe.c:182 describe.c:365 describe.c:534 describe.c:631 -#: describe.c:702 describe.c:917 describe.c:1486 describe.c:2564 -#: describe.c:2793 describe.c:2917 describe.c:2991 describe.c:3064 -#: describe.c:3147 describe.c:3215 describe.c:3307 describe.c:3373 -#: describe.c:3438 describe.c:3574 describe.c:3614 describe.c:3682 -#: describe.c:3741 describe.c:3750 describe.c:3809 describe.c:4023 -#: describe.c:4100 describe.c:4229 describe.c:4295 large_obj.c:291 -#: large_obj.c:301 +#: describe.c:108 describe.c:174 describe.c:262 describe.c:468 describe.c:652 +#: describe.c:751 describe.c:822 describe.c:1115 describe.c:1845 +#: describe.c:3161 describe.c:3406 describe.c:3537 describe.c:3611 +#: describe.c:3684 describe.c:3767 describe.c:3835 describe.c:3935 +#: describe.c:4001 describe.c:4069 describe.c:4210 describe.c:4252 +#: describe.c:4325 describe.c:4387 describe.c:4396 describe.c:4458 +#: describe.c:4680 describe.c:4760 describe.c:4895 describe.c:4965 +#: large_obj.c:289 large_obj.c:299 msgid "Description" msgstr "説明" -#: describe.c:123 +#: describe.c:126 msgid "List of aggregate functions" msgstr "集約関数一覧" -#: describe.c:144 +#: describe.c:151 +#, c-format +msgid "The server (version %s) does not support access methods.\n" +msgstr "このサーバー (バージョン %s) はアクセスメソッドをサポートしていません。\n" + +#: describe.c:165 +msgid "Index" +msgstr "インデックス" + +#: describe.c:166 describe.c:366 describe.c:411 describe.c:428 describe.c:887 +#: describe.c:1051 describe.c:1582 describe.c:1606 describe.c:1808 +#: describe.c:3381 describe.c:3585 describe.c:4757 +msgid "Type" +msgstr "型" + +#: describe.c:173 describe.c:4659 +msgid "Handler" +msgstr "ハンドラー" + +#: describe.c:192 +msgid "List of access methods" +msgstr "アクセスメソッド一覧" + +#: describe.c:218 #, c-format -msgid "The server (version %d.%d) does not support tablespaces.\n" -msgstr "このサーバーのバージョン (%d.%d) はテーブルスペースをサポートしていません。\n" +msgid "The server (version %s) does not support tablespaces.\n" +msgstr "このサーバー (バージョン %s) はテーブル空間をサポートしていません。\n" -#: describe.c:157 describe.c:165 describe.c:362 describe.c:678 describe.c:843 -#: describe.c:2769 describe.c:2890 describe.c:3136 describe.c:3365 -#: describe.c:3998 describe.c:4079 large_obj.c:290 +#: describe.c:232 describe.c:240 describe.c:456 describe.c:642 describe.c:798 +#: describe.c:1039 describe.c:3382 describe.c:3510 describe.c:3756 +#: describe.c:3993 describe.c:4655 describe.c:4739 describe.c:5135 +#: describe.c:5247 describe.c:5342 large_obj.c:288 msgid "Owner" msgstr "所有者" -#: describe.c:158 describe.c:166 +#: describe.c:233 describe.c:241 msgid "Location" msgstr "場所" -#: describe.c:177 describe.c:2382 +#: describe.c:252 describe.c:2980 msgid "Options" msgstr "オプション" -#: describe.c:199 +#: describe.c:257 describe.c:615 describe.c:814 describe.c:3398 describe.c:3402 +msgid "Size" +msgstr "サイズ" + +#: describe.c:279 msgid "List of tablespaces" -msgstr "テーブルスペース一覧" +msgstr "テーブル空間一覧" -#: describe.c:236 +#: describe.c:320 #, c-format msgid "\\df only takes [antwS+] as options\n" -msgstr "\\dfはオプションとして[antwS+]のみを取ることができます\n" +msgstr "\\df で指定できるオプションは [antwS+] のみです。\n" -#: describe.c:242 +#: describe.c:328 #, c-format -msgid "\\df does not take a \"w\" option with server version %d.%d\n" -msgstr "サーバーバージョン%d.%dでは\\dfは\"w\"オプションを受け付けません\n" +msgid "\\df does not take a \"w\" option with server version %s\n" +msgstr "サーバーバージョン %s の \\df では \"w\" オプションは指定できません。\n" #. translator: "agg" is short for "aggregate" -#: describe.c:275 describe.c:321 describe.c:338 +#: describe.c:362 describe.c:408 describe.c:425 msgid "agg" -msgstr "agg(集約)" +msgstr "集約" -#: describe.c:276 +#: describe.c:363 msgid "window" -msgstr "window(ウィンドウ)" +msgstr "ウィンドウ" -#: describe.c:277 describe.c:322 describe.c:339 describe.c:1028 +#: describe.c:364 describe.c:409 describe.c:426 describe.c:1249 msgid "trigger" -msgstr "trigger(トリガ)" +msgstr "トリガー" -#: describe.c:278 describe.c:323 describe.c:340 +#: describe.c:365 describe.c:410 describe.c:427 msgid "normal" -msgstr "normal(通常)" - -#: describe.c:279 describe.c:324 describe.c:341 describe.c:765 describe.c:853 -#: describe.c:1455 describe.c:2768 describe.c:2966 describe.c:4097 -msgid "Type" -msgstr "型" +msgstr "通常" -#: describe.c:355 -msgid "definer" -msgstr "定義元" - -#: describe.c:356 -msgid "invoker" -msgstr "呼び出し元" - -#: describe.c:357 -msgid "Security" -msgstr "セキュリティ" - -#: describe.c:358 +#: describe.c:438 msgid "immutable" -msgstr "不変" +msgstr "IMMUTABLE" -#: describe.c:359 +#: describe.c:439 msgid "stable" -msgstr "安定" +msgstr "STABLE" -#: describe.c:360 +#: describe.c:440 msgid "volatile" -msgstr "揮発性" +msgstr "VOLATILE" -#: describe.c:361 +#: describe.c:441 msgid "Volatility" -msgstr "揮発性" +msgstr "関数の変動性分類" -#: describe.c:363 +#: describe.c:449 +msgid "restricted" +msgstr "制限付き" + +#: describe.c:450 +msgid "safe" +msgstr "安全" + +#: describe.c:451 +msgid "unsafe" +msgstr "危険" + +#: describe.c:452 +msgid "Parallel" +msgstr "並列実行" + +#: describe.c:457 +msgid "definer" +msgstr "定義ロール" + +#: describe.c:458 +msgid "invoker" +msgstr "起動ロール" + +#: describe.c:459 +msgid "Security" +msgstr "セキュリティ" + +#: describe.c:466 msgid "Language" -msgstr "言語" +msgstr "手続き言語" -#: describe.c:364 +#: describe.c:467 msgid "Source code" msgstr "ソースコード" -#: describe.c:462 +#: describe.c:566 msgid "List of functions" msgstr "関数一覧" -#: describe.c:502 +#: describe.c:614 msgid "Internal name" msgstr "内部名" -#: describe.c:503 describe.c:694 describe.c:2785 describe.c:2789 -msgid "Size" -msgstr "サイズ" - -#: describe.c:524 +#: describe.c:636 msgid "Elements" -msgstr "要素" +msgstr "構成要素" -#: describe.c:574 +#: describe.c:693 msgid "List of data types" msgstr "データ型一覧" -#: describe.c:617 +#: describe.c:737 msgid "Left arg type" msgstr "左辺の型" -#: describe.c:618 +#: describe.c:738 msgid "Right arg type" msgstr "右辺の型" -#: describe.c:619 +#: describe.c:739 msgid "Result type" msgstr "結果の型" -#: describe.c:624 describe.c:3206 describe.c:3573 +#: describe.c:744 describe.c:3826 describe.c:4209 msgid "Function" msgstr "関数" -#: describe.c:649 +#: describe.c:769 msgid "List of operators" msgstr "演算子一覧" -#: describe.c:679 +#: describe.c:799 msgid "Encoding" msgstr "エンコーディング" -#: describe.c:684 describe.c:3301 +#: describe.c:804 describe.c:3924 msgid "Collate" msgstr "照合順序" -#: describe.c:685 describe.c:3302 +#: describe.c:805 describe.c:3925 msgid "Ctype" msgstr "Ctype(変換演算子)" -#: describe.c:698 +#: describe.c:818 msgid "Tablespace" -msgstr "テーブルスペース" +msgstr "テーブル空間" -#: describe.c:720 +#: describe.c:840 msgid "List of databases" msgstr "データベース一覧" -#: describe.c:760 describe.c:846 describe.c:2761 +#: describe.c:881 describe.c:886 describe.c:1042 describe.c:3373 +#: describe.c:3380 msgid "table" msgstr "テーブル" -#: describe.c:761 describe.c:2762 +#: describe.c:882 describe.c:3374 msgid "view" msgstr "ビュー" -#: describe.c:762 describe.c:2763 +#: describe.c:883 describe.c:3375 msgid "materialized view" msgstr "マテリアライズドビュー" -#: describe.c:763 describe.c:848 describe.c:2765 +#: describe.c:884 describe.c:1044 describe.c:3377 msgid "sequence" msgstr "シーケンス" -#: describe.c:764 describe.c:2767 +#: describe.c:885 describe.c:3379 msgid "foreign table" msgstr "外部テーブル" -#: describe.c:776 -msgid "Column access privileges" -msgstr "列のアクセス権限" +#: describe.c:898 +msgid "Column privileges" +msgstr "列の権限" -#: describe.c:802 describe.c:4439 describe.c:4443 +#: describe.c:929 describe.c:963 +msgid "Policies" +msgstr "ポリシー" + +#: describe.c:995 describe.c:5398 describe.c:5402 msgid "Access privileges" -msgstr "アクセス権" +msgstr "アクセス権限" -#: describe.c:831 +#: describe.c:1026 #, c-format -msgid "The server (version %d.%d) does not support altering default privileges.\n" -msgstr "このサーバー(バージョン%d.%d)は代替のデフォルト権限をサポートしていません。\n" +msgid "The server (version %s) does not support altering default privileges.\n" +msgstr "このサーバー (バージョン %s) は代替のデフォルト権限をサポートしていません。\n" -#: describe.c:850 +#: describe.c:1046 msgid "function" msgstr "関数" -#: describe.c:852 +#: describe.c:1048 msgid "type" msgstr "型" -#: describe.c:876 +#: describe.c:1050 +msgid "schema" +msgstr "スキーマ" + +#: describe.c:1074 msgid "Default access privileges" msgstr "デフォルトのアクセス権限" -#: describe.c:916 +#: describe.c:1114 msgid "Object" msgstr "オブジェクト" -#: describe.c:930 sql_help.c:1595 -msgid "constraint" -msgstr "制約" +#: describe.c:1128 +msgid "table constraint" +msgstr "テーブル制約" -#: describe.c:957 +#: describe.c:1150 +msgid "domain constraint" +msgstr "ドメイン制約" + +#: describe.c:1178 msgid "operator class" msgstr "演算子クラス" -#: describe.c:986 +#: describe.c:1207 msgid "operator family" msgstr "演算子族" -#: describe.c:1008 +#: describe.c:1229 msgid "rule" msgstr "ルール" -#: describe.c:1050 +#: describe.c:1271 msgid "Object descriptions" msgstr "オブジェクトの説明" -#: describe.c:1104 +#: describe.c:1327 describe.c:3469 #, c-format msgid "Did not find any relation named \"%s\".\n" -msgstr "\"%s\" という名前のリレーションが見つかりません。\n" +msgstr "\"%s\" という名前のリレーションは見つかりませんでした。\n" + +#: describe.c:1330 describe.c:3472 +#, c-format +msgid "Did not find any relations.\n" +msgstr "リレーションが見つかりませんでした。\n" -#: describe.c:1295 +#: describe.c:1537 #, c-format msgid "Did not find any relation with OID %s.\n" -msgstr "OID %s を持つリレーションが見つかりません。\n" +msgstr "OID %s を持つリレーションが見つかりませんでした。\n" + +#: describe.c:1583 describe.c:1607 +msgid "Start" +msgstr "開始" + +#: describe.c:1584 describe.c:1608 +msgid "Minimum" +msgstr "最小" + +#: describe.c:1585 describe.c:1609 +msgid "Maximum" +msgstr "最大" + +#: describe.c:1586 describe.c:1610 +msgid "Increment" +msgstr "増分" + +#: describe.c:1587 describe.c:1611 describe.c:3678 describe.c:3829 +msgid "yes" +msgstr "はい" -#: describe.c:1399 +#: describe.c:1588 describe.c:1612 describe.c:3678 describe.c:3827 +msgid "no" +msgstr "いいえ" + +#: describe.c:1589 describe.c:1613 +msgid "Cycles?" +msgstr "循環?" + +#: describe.c:1590 describe.c:1614 +msgid "Cache" +msgstr "キャッシュ" + +#: describe.c:1657 +#, c-format +msgid "Owned by: %s" +msgstr "所有者: %s" + +#: describe.c:1661 +#, c-format +msgid "Sequence for identity column: %s" +msgstr "識別列のシーケンス: %s" + +#: describe.c:1668 +#, c-format +msgid "Sequence \"%s.%s\"" +msgstr "シーケンス \"%s.%s\"" + +#: describe.c:1748 describe.c:1793 #, c-format msgid "Unlogged table \"%s.%s\"" msgstr "ログを取らないテーブル \"%s.%s\"" -#: describe.c:1402 +#: describe.c:1751 describe.c:1796 #, c-format msgid "Table \"%s.%s\"" msgstr "テーブル \"%s.%s\"" -#: describe.c:1406 +#: describe.c:1755 #, c-format msgid "View \"%s.%s\"" msgstr "ビュー \"%s.%s\"" -#: describe.c:1411 +#: describe.c:1760 #, c-format msgid "Unlogged materialized view \"%s.%s\"" msgstr "ログを取らないマテリアライズドビュー \"%s.%s\"" -#: describe.c:1414 +#: describe.c:1763 #, c-format msgid "Materialized view \"%s.%s\"" msgstr "マテリアライズドビュー \"%s.%s\"" -#: describe.c:1418 -#, c-format -msgid "Sequence \"%s.%s\"" -msgstr "シーケンス \"%s.%s\"" - -#: describe.c:1423 +#: describe.c:1768 #, c-format msgid "Unlogged index \"%s.%s\"" msgstr "ログを取らないインデックス \"%s.%s\"" -#: describe.c:1426 +#: describe.c:1771 #, c-format msgid "Index \"%s.%s\"" msgstr "インデックス \"%s.%s\"" -#: describe.c:1431 +#: describe.c:1776 #, c-format msgid "Special relation \"%s.%s\"" msgstr "特殊なリレーション \"%s.%s\"" -#: describe.c:1435 +#: describe.c:1780 #, c-format msgid "TOAST table \"%s.%s\"" msgstr "TOAST テーブル \"%s.%s\"" -#: describe.c:1439 +#: describe.c:1784 #, c-format msgid "Composite type \"%s.%s\"" msgstr "複合型 \"%s.%s\"" -#: describe.c:1443 +#: describe.c:1788 #, c-format msgid "Foreign table \"%s.%s\"" msgstr "外部テーブル \"%s.%s\"" -#: describe.c:1454 +#: describe.c:1807 msgid "Column" msgstr "列" -#: describe.c:1463 -msgid "Modifiers" -msgstr "修飾語" +#: describe.c:1818 describe.c:3591 +msgid "Collation" +msgstr "照合順序" + +#: describe.c:1819 describe.c:3598 +msgid "Nullable" +msgstr "Null 値を許容" -#: describe.c:1468 -msgid "Value" -msgstr "値" +#: describe.c:1820 describe.c:3599 +msgid "Default" +msgstr "デフォルト" -#: describe.c:1471 +#: describe.c:1825 msgid "Definition" msgstr "定義" -#: describe.c:1474 describe.c:4018 describe.c:4099 describe.c:4167 -#: describe.c:4228 -msgid "FDW Options" -msgstr "FDWオプション" +#: describe.c:1828 describe.c:4675 describe.c:4759 describe.c:4830 +#: describe.c:4894 +msgid "FDW options" +msgstr "FDW オプション" -#: describe.c:1478 +#: describe.c:1832 msgid "Storage" msgstr "ストレージ" -#: describe.c:1481 +#: describe.c:1837 msgid "Stats target" -msgstr "対象統計情報" +msgstr "統計の対象" -#: describe.c:1531 +#: describe.c:1982 #, c-format -msgid "collate %s" -msgstr "照合順序 %s" +msgid "Partition of: %s %s" +msgstr "パーティション: %s %s" -#: describe.c:1539 -msgid "not null" -msgstr "not null" +#: describe.c:1988 +#, c-format +msgid "Partition constraint: %s" +msgstr "パーティションの制約: %s" -#. translator: default values of column definitions -#: describe.c:1549 +#: describe.c:2011 #, c-format -msgid "default %s" -msgstr "default %s" +msgid "Partition key: %s" +msgstr "パーティションキー: %s" -#: describe.c:1664 +#: describe.c:2079 msgid "primary key, " msgstr "プライマリキー, " -#: describe.c:1666 +#: describe.c:2081 msgid "unique, " -msgstr "ユニーク, " +msgstr "ユニーク," -#: describe.c:1672 +#: describe.c:2087 #, c-format msgid "for table \"%s.%s\"" msgstr "テーブル \"%s.%s\" 用" -#: describe.c:1676 +#: describe.c:2091 #, c-format msgid ", predicate (%s)" -msgstr ", 述語 (%s)" +msgstr "、述語 (%s)" -#: describe.c:1679 +#: describe.c:2094 msgid ", clustered" -msgstr ", クラスタ化済み" +msgstr "、クラスター化" -#: describe.c:1682 +#: describe.c:2097 msgid ", invalid" -msgstr ", 無効" +msgstr "無効" -#: describe.c:1685 +#: describe.c:2100 msgid ", deferrable" -msgstr ", 遅延可能" +msgstr "、遅延可能" -#: describe.c:1688 +#: describe.c:2103 msgid ", initially deferred" -msgstr ", 最初から遅延されている" +msgstr "、最初から遅延中" -#: describe.c:1691 +#: describe.c:2106 msgid ", replica identity" -msgstr "レプリカ特性" - -#: describe.c:1726 -#, c-format -msgid "Owned by: %s" -msgstr "所有者: %s" +msgstr "、レプリカの id" -#: describe.c:1786 +#: describe.c:2165 msgid "Indexes:" msgstr "インデックス:" -#: describe.c:1870 +#: describe.c:2249 msgid "Check constraints:" -msgstr "検査制約:" +msgstr "Check 制約:" -#: describe.c:1901 +#: describe.c:2280 msgid "Foreign-key constraints:" msgstr "外部キー制約:" -#: describe.c:1932 +#: describe.c:2311 msgid "Referenced by:" -msgstr "参照元:" +msgstr "参照元:" + +#: describe.c:2361 +msgid "Policies:" +msgstr "ポリシー:" + +#: describe.c:2364 +msgid "Policies (forced row security enabled):" +msgstr "ポリシー(行セキュリティを強制的に有効化):" + +#: describe.c:2367 +msgid "Policies (row security enabled): (none)" +msgstr "ポリシー(行セキュリティ有効化): (なし)" -#: describe.c:2014 describe.c:2064 +#: describe.c:2370 +msgid "Policies (forced row security enabled): (none)" +msgstr "ポリシー(行セキュリティを強制的に有効化): (なし)" + +#: describe.c:2373 +msgid "Policies (row security disabled):" +msgstr "ポリシー(行セキュリティを無効化):" + +#: describe.c:2435 +msgid "Statistics objects:" +msgstr "統計オブジェクト:" + +#: describe.c:2538 describe.c:2623 msgid "Rules:" msgstr "ルール:" -#: describe.c:2017 +#: describe.c:2541 msgid "Disabled rules:" -msgstr "無効にされたルール:" +msgstr "無効化されたルール:" -#: describe.c:2020 +#: describe.c:2544 msgid "Rules firing always:" -msgstr "常に無視されるルール" +msgstr "常に適用するルール:" -#: describe.c:2023 +#: describe.c:2547 msgid "Rules firing on replica only:" -msgstr "レプリカでのみ無視されるルール" +msgstr "レプリカ上でのみ適用するルール:" + +#: describe.c:2587 +msgid "Publications:" +msgstr "パブリケーション:" -#: describe.c:2047 +#: describe.c:2606 msgid "View definition:" -msgstr "ビュー定義:" +msgstr "ビューの定義:" -#: describe.c:2182 +#: describe.c:2741 msgid "Triggers:" -msgstr "トリガ:" +msgstr "トリガー:" -#: describe.c:2186 +#: describe.c:2745 msgid "Disabled user triggers:" -msgstr "無効にされたユーザトリガ:" +msgstr "無効化されたユーザートリガー:" -#: describe.c:2188 +#: describe.c:2747 msgid "Disabled triggers:" -msgstr "無効にされたトリガ:" +msgstr "無効化されたトリガー:" -#: describe.c:2191 +#: describe.c:2750 msgid "Disabled internal triggers:" -msgstr "無効にされた内部トリガ:" +msgstr "無効化された内部トリガー:" -#: describe.c:2194 +#: describe.c:2753 msgid "Triggers firing always:" -msgstr "常に無視されるトリガ" +msgstr "常に適用するするトリガー:" -#: describe.c:2197 +#: describe.c:2756 msgid "Triggers firing on replica only:" -msgstr "レプリカでのみ無視されるトリガ" +msgstr "レプリカ上でのみ適用するトリガー:" + +#: describe.c:2815 +#, c-format +msgid "Server: %s" +msgstr "サーバー: %s" + +#: describe.c:2823 +#, c-format +msgid "FDW options: (%s)" +msgstr "FDW オプション: (%s)" -#: describe.c:2276 +#: describe.c:2842 msgid "Inherits" -msgstr "継承" +msgstr "継承元" -#: describe.c:2315 +#: describe.c:2896 #, c-format msgid "Number of child tables: %d (Use \\d+ to list them.)" -msgstr "子テーブルの数:%d(\\d+ で一覧表示)" +msgstr "子テーブル数: %d (\\+d で一覧を表示)" -#: describe.c:2322 +#: describe.c:2898 +#, c-format +msgid "Number of partitions: %d (Use \\d+ to list them.)" +msgstr "パーティション数: %d (\\+d で一覧を表示)。" + +#: describe.c:2906 msgid "Child tables" msgstr "子テーブル" -#: describe.c:2344 +#: describe.c:2906 +msgid "Partitions" +msgstr "パーティション" + +#: describe.c:2940 #, c-format msgid "Typed table of type: %s" -msgstr "型付けされたテーブルの型:%s" +msgstr "%s 型の型付きテーブル" -#: describe.c:2358 +#: describe.c:2956 msgid "Replica Identity" -msgstr "レプリカ特性" +msgstr "レプリカ識別" -#: describe.c:2371 +#: describe.c:2969 msgid "Has OIDs: yes" -msgstr "OID を持つ: はい" +msgstr "OID あり: はい" -#: describe.c:2460 +#: describe.c:3049 #, c-format msgid "Tablespace: \"%s\"" -msgstr "テーブルスペース \"%s\"" +msgstr "テーブル空間: \"%s\"" #. translator: before this string there's an index description like #. '"foo_pkey" PRIMARY KEY, btree (a)' -#: describe.c:2472 +#: describe.c:3061 #, c-format msgid ", tablespace \"%s\"" -msgstr "テーブルスペース \"%s\"" +msgstr "、テーブル空間 \"%s\"" -#: describe.c:2557 +#: describe.c:3154 msgid "List of roles" msgstr "ロール一覧" -#: describe.c:2559 +#: describe.c:3156 msgid "Role name" msgstr "ロール名" -#: describe.c:2560 +#: describe.c:3157 msgid "Attributes" msgstr "属性" -#: describe.c:2561 +#: describe.c:3158 msgid "Member of" -msgstr "メンバー" +msgstr "所属グループ" -#: describe.c:2572 +#: describe.c:3169 msgid "Superuser" -msgstr "スーパーユーザ" +msgstr "スーパーユーザー" -#: describe.c:2575 +#: describe.c:3172 msgid "No inheritance" msgstr "継承なし" -#: describe.c:2578 +#: describe.c:3175 msgid "Create role" -msgstr "ロールを作成できる" +msgstr "ロール作成可" -#: describe.c:2581 +#: describe.c:3178 msgid "Create DB" -msgstr "DBを作成できる" +msgstr "DB作成可" -#: describe.c:2584 +#: describe.c:3181 msgid "Cannot login" -msgstr "ログインできない" +msgstr "ログインできません" -#: describe.c:2588 +#: describe.c:3185 msgid "Replication" -msgstr "レプリケーション" +msgstr "レプリケーション可" + +#: describe.c:3189 +msgid "Bypass RLS" +msgstr "RLS のバイパス" -#: describe.c:2597 +#: describe.c:3198 msgid "No connections" msgstr "接続なし" -#: describe.c:2599 +#: describe.c:3200 #, c-format msgid "%d connection" msgid_plural "%d connections" msgstr[0] "%d 個の接続" -msgstr[1] "%d 個の接続" -#: describe.c:2609 +#: describe.c:3210 msgid "Password valid until " -msgstr "パスワード有効期限" +msgstr "パスワードの有効期限 " + +#: describe.c:3260 +#, c-format +msgid "The server (version %s) does not support per-database role settings.\n" +msgstr "このサーバー (バージョン %s) はデータベースごとのロール設定をサポートしていません\n" -#: describe.c:2665 +#: describe.c:3273 msgid "Role" msgstr "ロール" -#: describe.c:2666 +#: describe.c:3274 msgid "Database" msgstr "データベース" -#: describe.c:2667 +#: describe.c:3275 msgid "Settings" msgstr "設定" -#: describe.c:2677 +#: describe.c:3296 #, c-format -msgid "No per-database role settings support in this server version.\n" -msgstr "このバージョンのサーバーでは、データベース毎のロール設定をサポートしていません。\n" +msgid "Did not find any settings for role \"%s\" and database \"%s\".\n" +msgstr "ロール \"%s\" とデータベース \"%s\" の設定が見つかりませんでした。\n" -#: describe.c:2688 +#: describe.c:3299 #, c-format -msgid "No matching settings found.\n" -msgstr "マッチする設定が見つかりません\n" +msgid "Did not find any settings for role \"%s\".\n" +msgstr "ロール \"%s\" の設定が見つかりませんでした。\n" -#: describe.c:2690 +#: describe.c:3302 #, c-format -msgid "No settings found.\n" -msgstr "設定がありません。\n" +msgid "Did not find any settings.\n" +msgstr "設定が見つかりませんでした。\n" -#: describe.c:2695 +#: describe.c:3307 msgid "List of settings" -msgstr "設定の一覧" +msgstr "設定一覧" -#: describe.c:2764 +#: describe.c:3376 msgid "index" msgstr "インデックス" -#: describe.c:2766 +#: describe.c:3378 msgid "special" msgstr "特殊" -#: describe.c:2774 describe.c:4216 +#: describe.c:3387 describe.c:4882 msgid "Table" msgstr "テーブル" -#: describe.c:2850 -#, c-format -msgid "No matching relations found.\n" -msgstr "マッチするリレーションが見つかりません\n" - -#: describe.c:2852 -#, c-format -msgid "No relations found.\n" -msgstr "リレーションがありません。\n" - -#: describe.c:2857 +#: describe.c:3477 msgid "List of relations" -msgstr "リレーションの一覧" +msgstr "リレーション一覧" -#: describe.c:2894 +#: describe.c:3514 msgid "Trusted" -msgstr "信頼?" +msgstr "信頼済み" -#: describe.c:2902 -msgid "Internal Language" +#: describe.c:3522 +msgid "Internal language" msgstr "内部言語" -#: describe.c:2903 -msgid "Call Handler" +#: describe.c:3523 +msgid "Call handler" msgstr "呼び出しハンドラー" -#: describe.c:2904 describe.c:4005 +#: describe.c:3524 describe.c:4662 msgid "Validator" -msgstr "バリデータ" +msgstr "バリデーター" -#: describe.c:2907 -msgid "Inline Handler" +#: describe.c:3527 +msgid "Inline handler" msgstr "インラインハンドラー" -#: describe.c:2935 +#: describe.c:3555 msgid "List of languages" -msgstr "言語一覧" - -#: describe.c:2979 -msgid "Modifier" -msgstr "修飾語" +msgstr "手続き言語一覧" -#: describe.c:2980 +#: describe.c:3600 msgid "Check" -msgstr "チェック" +msgstr "CHECK制約" -#: describe.c:3022 +#: describe.c:3642 msgid "List of domains" msgstr "ドメイン一覧" -#: describe.c:3056 +#: describe.c:3676 msgid "Source" -msgstr "ソース" +msgstr "変換元" -#: describe.c:3057 +#: describe.c:3677 msgid "Destination" -msgstr "宛先" - -#: describe.c:3058 describe.c:3207 -msgid "no" -msgstr "no" +msgstr "変換先" -#: describe.c:3058 describe.c:3209 -msgid "yes" -msgstr "yes" - -#: describe.c:3059 +#: describe.c:3679 msgid "Default?" -msgstr "デフォルト?" +msgstr "デフォルト?" -#: describe.c:3096 +#: describe.c:3716 msgid "List of conversions" -msgstr "変換ルール一覧" +msgstr "符号化方式一覧" -#: describe.c:3135 +#: describe.c:3755 msgid "Event" msgstr "イベント" -#: describe.c:3137 +#: describe.c:3757 msgid "enabled" msgstr "有効" -#: describe.c:3138 +#: describe.c:3758 msgid "replica" msgstr "レプリカ" -#: describe.c:3139 +#: describe.c:3759 msgid "always" -msgstr "常に" +msgstr "常時" -#: describe.c:3140 +#: describe.c:3760 msgid "disabled" msgstr "無効" -#: describe.c:3141 +#: describe.c:3761 describe.c:5343 msgid "Enabled" -msgstr "有効" +msgstr "有効状態" -#: describe.c:3142 +#: describe.c:3762 msgid "Procedure" -msgstr "プロシージャ" +msgstr "プロシージャー名" -#: describe.c:3143 +#: describe.c:3763 msgid "Tags" msgstr "タグ" -#: describe.c:3162 +#: describe.c:3782 msgid "List of event triggers" -msgstr "イベントトリガの一覧" +msgstr "イベントトリガー一覧" -#: describe.c:3204 +#: describe.c:3824 msgid "Source type" -msgstr "ソースの型" +msgstr "変換元の型" -#: describe.c:3205 +#: describe.c:3825 msgid "Target type" -msgstr "ターゲットの型" +msgstr "変換先の型" -#: describe.c:3208 +#: describe.c:3828 msgid "in assignment" -msgstr "代入" +msgstr "代入時のみ" -#: describe.c:3210 +#: describe.c:3830 msgid "Implicit?" -msgstr "暗黙?" +msgstr "暗黙的に適用 ?" -#: describe.c:3261 +#: describe.c:3881 msgid "List of casts" msgstr "キャスト一覧" -#: describe.c:3287 +#: describe.c:3909 #, c-format -msgid "The server (version %d.%d) does not support collations.\n" -msgstr "このサーバーのバージョン (%d.%d) は照合順序をサポートしていません。\n" +msgid "The server (version %s) does not support collations.\n" +msgstr "このサーバー (バージョン %s) は照合順序をサポートしていません。\n" + +#: describe.c:3930 +msgid "Provider" +msgstr "プロバイダー" -#: describe.c:3337 +#: describe.c:3965 msgid "List of collations" msgstr "照合順序一覧" -#: describe.c:3396 +#: describe.c:4024 msgid "List of schemas" msgstr "スキーマ一覧" -#: describe.c:3419 describe.c:3654 describe.c:3722 describe.c:3790 +#: describe.c:4049 describe.c:4296 describe.c:4367 describe.c:4438 #, c-format -msgid "The server (version %d.%d) does not support full text search.\n" -msgstr "このバージョン (%d.%d) のサーバーは全文検索をサポートしていません。\n" +msgid "The server (version %s) does not support full text search.\n" +msgstr "このサーバー (バージョン %s) は全文検索をサポートしていません。\n" -#: describe.c:3453 +#: describe.c:4084 msgid "List of text search parsers" msgstr "テキスト検索用パーサ一覧" -#: describe.c:3496 +#: describe.c:4129 #, c-format msgid "Did not find any text search parser named \"%s\".\n" -msgstr "テキスト検索用パーサ \"%s\" が見つかりません。\n" +msgstr "テキスト検索用パーサ \"%s\" が見つかりませんでした。\n" + +#: describe.c:4132 +#, c-format +msgid "Did not find any text search parsers.\n" +msgstr "テキスト検索パーサが見つかりませんでした。\n" -#: describe.c:3571 +#: describe.c:4207 msgid "Start parse" -msgstr "パース起動" +msgstr "パース開始" -#: describe.c:3572 +#: describe.c:4208 msgid "Method" msgstr "メソッド" -#: describe.c:3576 +#: describe.c:4212 msgid "Get next token" msgstr "次のトークンを取得" -#: describe.c:3578 +#: describe.c:4214 msgid "End parse" msgstr "パース終了" -#: describe.c:3580 +#: describe.c:4216 msgid "Get headline" -msgstr "見出しの取得" +msgstr "見出しを取得" -#: describe.c:3582 +#: describe.c:4218 msgid "Get token types" -msgstr "トークンタイプの取得" +msgstr "トークンタイプを取得" -#: describe.c:3592 +#: describe.c:4229 #, c-format msgid "Text search parser \"%s.%s\"" -msgstr "テキスト検索用パーサ \"%s.%s\"" +msgstr "テキスト検索パーサ \"%s.%s\"" -#: describe.c:3594 +#: describe.c:4232 #, c-format msgid "Text search parser \"%s\"" -msgstr "テキスト検索用パーサ \"%s\"" +msgstr "テキスト検索パーサ \"%s\"" -#: describe.c:3613 +#: describe.c:4251 msgid "Token name" msgstr "トークン名" -#: describe.c:3624 +#: describe.c:4262 #, c-format msgid "Token types for parser \"%s.%s\"" msgstr "パーサ \"%s.%s\" のトークンタイプ" -#: describe.c:3626 +#: describe.c:4265 #, c-format msgid "Token types for parser \"%s\"" msgstr "パーサ \"%s\" のトークンタイプ" -#: describe.c:3676 +#: describe.c:4319 msgid "Template" msgstr "テンプレート" -#: describe.c:3677 +#: describe.c:4320 msgid "Init options" -msgstr "初期化オプション:" +msgstr "初期化オプション" -#: describe.c:3699 +#: describe.c:4342 msgid "List of text search dictionaries" -msgstr "テキスト検索用辞書の一覧" +msgstr "テキスト検索用辞書一覧" -#: describe.c:3739 +#: describe.c:4385 msgid "Init" msgstr "初期化" -#: describe.c:3740 +#: describe.c:4386 msgid "Lexize" msgstr "Lex 処理" -#: describe.c:3767 +#: describe.c:4413 msgid "List of text search templates" -msgstr "テキスト検索用テンプレート一覧" +msgstr "テキスト検索テンプレート一覧" -#: describe.c:3824 +#: describe.c:4473 msgid "List of text search configurations" -msgstr "テキスト検索用設定一覧" +msgstr "テキスト検索設定一覧" -#: describe.c:3868 +#: describe.c:4519 #, c-format msgid "Did not find any text search configuration named \"%s\".\n" -msgstr "テキスト検索用設定 \"%s\" が見つかりません。\n" +msgstr "テキスト検索用設定 \"%s\" が見つかりませんでした。\n" -#: describe.c:3934 +#: describe.c:4522 +#, c-format +msgid "Did not find any text search configurations.\n" +msgstr "テキスト検索設定が見つかりませんでした。\n" + +#: describe.c:4588 msgid "Token" msgstr "トークン" -#: describe.c:3935 +#: describe.c:4589 msgid "Dictionaries" msgstr "辞書" -#: describe.c:3946 +#: describe.c:4600 #, c-format msgid "Text search configuration \"%s.%s\"" -msgstr "テキスト検索用設定 \"%s.%s\"" +msgstr "テキスト検索設定 \"%s.%s\"" -#: describe.c:3949 +#: describe.c:4603 #, c-format msgid "Text search configuration \"%s\"" -msgstr "テキスト検索用設定 \"%s\"" +msgstr "テキスト検索設定 \"%s\"" -#: describe.c:3953 +#: describe.c:4607 #, c-format msgid "" "\n" "Parser: \"%s.%s\"" msgstr "" "\n" -"パーサ: \"%s.%s\"" +"パーサ: \"%s.%s\"" -#: describe.c:3956 +#: describe.c:4610 #, c-format msgid "" "\n" "Parser: \"%s\"" msgstr "" "\n" -"パーサ:\"%s\"" +"パーサ: \"%s\"" -#: describe.c:3988 +#: describe.c:4644 #, c-format -msgid "The server (version %d.%d) does not support foreign-data wrappers.\n" -msgstr "このバージョン (%d.%d) のサーバーは外部データラッパーをサポートしていません。\n" - -#: describe.c:4002 -msgid "Handler" -msgstr "ハンドラー" +msgid "The server (version %s) does not support foreign-data wrappers.\n" +msgstr "このサーバー (バージョン %s) は外部データラッパをサポートしていません。\n" -#: describe.c:4045 +#: describe.c:4702 msgid "List of foreign-data wrappers" -msgstr "外部データラッパーの一覧" +msgstr "外部データラッパ一覧" -#: describe.c:4068 +#: describe.c:4727 #, c-format -msgid "The server (version %d.%d) does not support foreign servers.\n" -msgstr "このサーバー(バージョン%d.%d)は外部サーバーをサポートしていません。\n" +msgid "The server (version %s) does not support foreign servers.\n" +msgstr "このサーバー (バージョン %s) は外部サーバをサポートしていません。\n" -#: describe.c:4080 +#: describe.c:4740 msgid "Foreign-data wrapper" -msgstr "外部データラッパー" +msgstr "外部データラッパ" -#: describe.c:4098 describe.c:4293 +#: describe.c:4758 describe.c:4963 msgid "Version" msgstr "バージョン" -#: describe.c:4124 +#: describe.c:4784 msgid "List of foreign servers" msgstr "外部サーバー一覧" -#: describe.c:4147 +#: describe.c:4809 #, c-format -msgid "The server (version %d.%d) does not support user mappings.\n" -msgstr "このサーバー(バージョン%d.%d)はユーザマップをサポートしていません。\n" +msgid "The server (version %s) does not support user mappings.\n" +msgstr "このサーバー (バージョン %s) はユーザーマッピングをサポートしていません。\n" -#: describe.c:4156 describe.c:4217 +#: describe.c:4819 describe.c:4883 msgid "Server" msgstr "サーバー" -#: describe.c:4157 +#: describe.c:4820 msgid "User name" -msgstr "ユーザ名" +msgstr "ユーザー名" -#: describe.c:4182 +#: describe.c:4845 msgid "List of user mappings" -msgstr "ユーザマッピングの一覧" +msgstr "ユーザーマッピング一覧" -#: describe.c:4205 +#: describe.c:4870 #, c-format -msgid "The server (version %d.%d) does not support foreign tables.\n" -msgstr "このサーバー(バージョン%d.%d)は外部テーブルをサポートしていません。\n" +msgid "The server (version %s) does not support foreign tables.\n" +msgstr "このサーバー (バージョン %s) は外部テーブルをサポートしていません。\n" -#: describe.c:4256 +#: describe.c:4923 msgid "List of foreign tables" msgstr "外部テーブル一覧" -#: describe.c:4279 describe.c:4333 +#: describe.c:4948 describe.c:5005 #, c-format -msgid "The server (version %d.%d) does not support extensions.\n" -msgstr "このサーバーのバージョン (%d.%d) は拡張をサポートしていません。\n" +msgid "The server (version %s) does not support extensions.\n" +msgstr "このサーバー (バージョン %s) は拡張をサポートしていません。\n" -#: describe.c:4310 +#: describe.c:4980 msgid "List of installed extensions" -msgstr "インストール済みの拡張の一覧" +msgstr "インストール済みの拡張一覧" -#: describe.c:4360 +#: describe.c:5033 #, c-format msgid "Did not find any extension named \"%s\".\n" -msgstr "\"%s\" という名前の拡張が見つかりません。\n" +msgstr "\"%s\" という名前の拡張が見つかりませんでした。\n" -#: describe.c:4363 +#: describe.c:5036 #, c-format msgid "Did not find any extensions.\n" -msgstr "拡張がまったく見つかりません。\n" +msgstr "拡張が見つかりませんでした。\n" -#: describe.c:4407 -msgid "Object Description" +#: describe.c:5080 +msgid "Object description" msgstr "オブジェクトの説明" -#: describe.c:4416 +#: describe.c:5090 #, c-format msgid "Objects in extension \"%s\"" -msgstr "拡張\"%s\"内のオブジェクト" +msgstr "拡張 \"%s\" 内のオブジェクト" + +#: describe.c:5119 describe.c:5185 +#, c-format +msgid "The server (version %s) does not support publications.\n" +msgstr "このサーバー (バージョン %s) はパブリケーションをサポートしていません。\n" + +#: describe.c:5136 describe.c:5248 +msgid "All tables" +msgstr "全テーブル" + +#: describe.c:5137 describe.c:5249 +msgid "Inserts" +msgstr "Insert文" + +#: describe.c:5138 describe.c:5250 +msgid "Updates" +msgstr "Update文" + +#: describe.c:5139 describe.c:5251 +msgid "Deletes" +msgstr "Delete文" + +#: describe.c:5156 +msgid "List of publications" +msgstr "パブリケーション一覧" + +#: describe.c:5217 +#, c-format +msgid "Did not find any publication named \"%s\".\n" +msgstr "\"%s\" という名前のパブリケーションが見つかりませんでした。\n" + +#: describe.c:5220 +#, c-format +msgid "Did not find any publications.\n" +msgstr "パブリケーションが見つかりませんでした。\n" + +#: describe.c:5244 +#, c-format +msgid "Publication %s" +msgstr "パブリケーション %s" + +#: describe.c:5284 +msgid "Tables:" +msgstr "テーブル:" + +#: describe.c:5328 +#, c-format +msgid "The server (version %s) does not support subscriptions.\n" +msgstr "このサーバー (バージョン %s) はサブスクリプションをサポートしていません。\n" + +#: describe.c:5344 +msgid "Publication" +msgstr "パブリケーション" + +#: describe.c:5351 +msgid "Synchronous commit" +msgstr "同期コミット" + +#: describe.c:5352 +msgid "Conninfo" +msgstr "接続情報" + +#: describe.c:5374 +msgid "List of subscriptions" +msgstr "サブスクリプション一覧" #: help.c:62 #, c-format msgid "%s\n" msgstr "%s\n" -#: help.c:67 +#: help.c:73 #, c-format msgid "" "psql is the PostgreSQL interactive terminal.\n" @@ -1684,12 +2083,12 @@ msgstr "" "psql は PostgreSQL の会話型ターミナルです。\n" "\n" -#: help.c:68 +#: help.c:74 help.c:344 help.c:383 help.c:410 #, c-format msgid "Usage:\n" -msgstr "使用方法:\n" +msgstr "使い方:\n" -#: help.c:69 +#: help.c:75 #, c-format msgid "" " psql [OPTION]... [DBNAME [USERNAME]]\n" @@ -1698,185 +2097,202 @@ msgstr "" " psql [オプション]... [データベース名 [ユーザ名]]\n" "\n" -#: help.c:71 +#: help.c:77 #, c-format msgid "General options:\n" -msgstr "一般的なオプション:\n" +msgstr "一般的なオプション:\n" -#: help.c:76 +#: help.c:82 #, c-format msgid " -c, --command=COMMAND run only single command (SQL or internal) and exit\n" -msgstr " -c, --command=コマンド (SQLまたは内部の)単一コマンドを一つだけ実行して終了\n" +msgstr " -c, --command=コマンド 単一の(SQLまたは内部)コマンドを一つだけ実行して終了\n" -#: help.c:77 +#: help.c:83 #, c-format msgid " -d, --dbname=DBNAME database name to connect to (default: \"%s\")\n" -msgstr " -d, --dbname=DB名 接続するデータベース名を指定(デフォルト: \"%s\")\n" +msgstr " -d, --dbname=DB名 接続するデータベース名(デフォルト: \"%s\")\n" -#: help.c:78 +#: help.c:84 #, c-format msgid " -f, --file=FILENAME execute commands from file, then exit\n" -msgstr " -f, --file=ファイル名 ファイルからコマンドを読み込んで実行後、終了\n" +msgstr " -f, --file=ファイル名 ファイルからコマンドを読み込んで実行後、終了します。\n" -#: help.c:79 +#: help.c:85 #, c-format msgid " -l, --list list available databases, then exit\n" -msgstr " -l(エル), --list 使用可能なデータベース一覧を表示して終了\n" +msgstr " -l(エル), --list 使用可能なデータベース一覧を表示して終了します。\n" -#: help.c:80 +#: help.c:86 #, c-format msgid "" " -v, --set=, --variable=NAME=VALUE\n" " set psql variable NAME to VALUE\n" +" (e.g., -v ON_ERROR_STOP=1)\n" msgstr "" " -v, --set=, --variable=名前=値\n" -" psql 変数 '名前' に '値' をセット\n" +" psql 変数 '名前' に '値' をセットします。\n" +" (例: -v ON_ERROR_STOP=1)\n" -#: help.c:82 +#: help.c:89 #, c-format msgid " -V, --version output version information, then exit\n" -msgstr " -V, --version バージョン情報を表示し、終了します\n" +msgstr " -V, --version バージョン情報を表示して終了します。\n" -#: help.c:83 +#: help.c:90 #, c-format msgid " -X, --no-psqlrc do not read startup file (~/.psqlrc)\n" -msgstr " -X, --no-psqlrc 初期化ファイル (~/.psqlrc) を読みこまない\n" +msgstr " -X, --no-psqlrc 初期化ファイル (~/.psqlrc) を読み込みません。\n" -#: help.c:84 +#: help.c:91 #, c-format msgid "" " -1 (\"one\"), --single-transaction\n" " execute as a single transaction (if non-interactive)\n" msgstr "" -" -1(数字の1), --single-transaction\n" -" 単一のトランザクションとして実行(対話式でない場合)\n" +" -1 (数字の1), --single-transaction\n" +" (対話形式でない場合)単一のトランザクションとして実行します。\n" -#: help.c:86 +#: help.c:93 #, c-format -msgid " -?, --help show this help, then exit\n" -msgstr " -?, --help このヘルプを表示し、終了します\n" +msgid " -?, --help[=options] show this help, then exit\n" +msgstr " -?, --help[=オプション] このヘルプを表示して終了します。\n" -#: help.c:88 +#: help.c:94 +#, c-format +msgid " --help=commands list backslash commands, then exit\n" +msgstr " --help=コマンド バックスラッシュコマンドの一覧を表示して終了します。\n" + +#: help.c:95 +#, c-format +msgid " --help=variables list special variables, then exit\n" +msgstr " --help=変数名 特殊変数の一覧を表示して終了します。\n" + +#: help.c:97 #, c-format msgid "" "\n" "Input and output options:\n" msgstr "" "\n" -"入出力オプション:\n" +"入出力オプション:\n" -#: help.c:89 +#: help.c:98 #, c-format msgid " -a, --echo-all echo all input from script\n" -msgstr " -a, --echo-all スクリプトからのすべての入力を表示\n" +msgstr " -a, --echo-all スクリプトから読み込んだ入力をすべて表示します。\n" -#: help.c:90 +#: help.c:99 +#, c-format +msgid " -b, --echo-errors echo failed commands\n" +msgstr " -b, --echo-errors 失敗したコマンドを表示します。\n" + +#: help.c:100 #, c-format msgid " -e, --echo-queries echo commands sent to server\n" -msgstr " -e, --echo-queries サーバーへ送信したコマンドを表示\n" +msgstr " -e, --echo-queries サーバーへ送信したコマンドを表示します。\n" -#: help.c:91 +#: help.c:101 #, c-format msgid " -E, --echo-hidden display queries that internal commands generate\n" -msgstr " -E, --echo-hidden 内部コマンドが生成した問い合わせを表示\n" +msgstr " -E, --echo-hidden 内部コマンドが生成した問い合わせを表示します。\n" -#: help.c:92 +#: help.c:102 #, c-format msgid " -L, --log-file=FILENAME send session log to file\n" -msgstr " -L, --log-file=ファイル名 セッションログをファイルに書き込む\n" +msgstr " -L, --log-file=ファイル名 セッションログをファイルに書き込みます。\n" -#: help.c:93 +#: help.c:103 #, c-format msgid " -n, --no-readline disable enhanced command line editing (readline)\n" -msgstr " -n, --no-readline 拡張コマンドライン編集機能(readline)を無効にする\n" +msgstr " -n, --no-readline 拡張コマンドライン編集機能(readline)を無効にします。\n" -#: help.c:94 +#: help.c:104 #, c-format msgid " -o, --output=FILENAME send query results to file (or |pipe)\n" -msgstr " -o, --output=ファイル名 問い合わせ結果をファイル(または |パイプ)に送る\n" +msgstr " -o, --output=ファイル名 問い合わせの結果をファイル(または |パイプ)に送ります。\n" -#: help.c:95 +#: help.c:105 #, c-format msgid " -q, --quiet run quietly (no messages, only query output)\n" -msgstr " -q, --quiet 静かに実行(メッセージなしで、問い合わせの出力のみ)\n" +msgstr " -q, --quiet 静かに実行します(メッセージなしで、問い合わせの出力のみを表示)\n" -#: help.c:96 +#: help.c:106 #, c-format msgid " -s, --single-step single-step mode (confirm each query)\n" -msgstr " -s, --single-step シングルステップモード(各問い合わせごとに確認)\n" +msgstr " -s, --single-step シングルステップモード(各問い合わせごとに確認)\n" -#: help.c:97 +#: help.c:107 #, c-format msgid " -S, --single-line single-line mode (end of line terminates SQL command)\n" -msgstr " -S, --single-line 単一行モード(行末を SQL コマンドの終了とみなす)\n" +msgstr " -S, --single-line 単一行モード(行末で SQL コマンドを終端する)\n" -#: help.c:99 +#: help.c:109 #, c-format msgid "" "\n" "Output format options:\n" msgstr "" "\n" -"出力フォーマットオプション:\n" +"出力フォーマットのオプション\n" -#: help.c:100 +#: help.c:110 #, c-format msgid " -A, --no-align unaligned table output mode\n" msgstr " -A, --no-align 桁揃えなしのテーブル出力モード\n" -#: help.c:101 +#: help.c:111 #, c-format msgid "" " -F, --field-separator=STRING\n" " field separator for unaligned output (default: \"%s\")\n" msgstr "" " -F, --field-separator=文字列\n" -" 桁揃えなしの出力でのフィールド区切り文字(デフォルト: \"%s\")\n" +" 桁揃えなし出力時のフィールド区切り文字(デフォルト: \"%s\")\n" -#: help.c:104 +#: help.c:114 #, c-format msgid " -H, --html HTML table output mode\n" msgstr " -H, --html HTML テーブル出力モード\n" -#: help.c:105 +#: help.c:115 #, c-format msgid " -P, --pset=VAR[=ARG] set printing option VAR to ARG (see \\pset command)\n" -msgstr " -P, --pset=変数[=値] 表示オプション '変数' を '値' にセット (\\pset コマンドを参照)\n" +msgstr " -P, --pset=変数[=値] 表示オプション '変数' を '値' にセット(\\pset コマンドを参照)\n" -#: help.c:106 +#: help.c:116 #, c-format msgid "" " -R, --record-separator=STRING\n" " record separator for unaligned output (default: newline)\n" msgstr "" " -R, --record-separator=文字列\n" -" 桁揃えなしの出力でのレコード区切り文字(デフォルト:改行)\n" +" 桁揃えなし出力におけるレコード区切り文字(デフォルト:改行)\n" -#: help.c:108 +#: help.c:118 #, c-format msgid " -t, --tuples-only print rows only\n" msgstr " -t, --tuples-only 行のみを表示\n" -#: help.c:109 +#: help.c:119 #, c-format msgid " -T, --table-attr=TEXT set HTML table tag attributes (e.g., width, border)\n" -msgstr " -T, --table-attr=TEXT HTMLテーブルのタグ属性をセット(width, border等)\n" +msgstr " -T, --table-attr=TEXT HTMLテーブルのタグ属性をセット(width, border等)\n" -#: help.c:110 +#: help.c:120 #, c-format msgid " -x, --expanded turn on expanded table output\n" -msgstr " -x, --expanded 拡張テーブル出力を有効にする\n" +msgstr " -x, --expanded 拡張テーブル出力に切り替える\n" -#: help.c:111 +#: help.c:121 #, c-format msgid "" " -z, --field-separator-zero\n" " set field separator for unaligned output to zero byte\n" msgstr "" " -z, --field-separator-zero\n" -" 桁揃えなしの出力でのフィールド区切り文字をゼロバイトに設定\n" +" 桁揃えなし出力時のフィールド区切り文字をゼロバイトに設定\n" -#: help.c:113 +#: help.c:123 #, c-format msgid "" " -0, --record-separator-zero\n" @@ -1885,7 +2301,7 @@ msgstr "" " -0, --record-separator-zero\n" " 桁揃えなしの出力でのレコード区切り文字をゼロバイトに設定\n" -#: help.c:116 +#: help.c:126 #, c-format msgid "" "\n" @@ -1894,38 +2310,36 @@ msgstr "" "\n" "接続オプション:\n" -#: help.c:119 +#: help.c:129 #, c-format msgid " -h, --host=HOSTNAME database server host or socket directory (default: \"%s\")\n" -msgstr " -h, --host=ホスト名 データベースサーバーのホストまたはソケットのディレクトリ(デフォルト: \"%s\")\n" +msgstr " -h, --host=ホスト名 データベースサーバーのホストまたはソケットのディレクトリ(デフォルト: \"%s\")\n" -#: help.c:120 +#: help.c:130 msgid "local socket" msgstr "ローカルソケット" -#: help.c:123 +#: help.c:133 #, c-format msgid " -p, --port=PORT database server port (default: \"%s\")\n" -msgstr " -p, --port=ポート番号 データベースサーバーのポート番号(デフォルト: \"%s\")\n" +msgstr " -p, --port=ポート番号 データベースサーバーのポート番号(デフォルト: \"%s\")\n" -#: help.c:129 +#: help.c:139 #, c-format msgid " -U, --username=USERNAME database user name (default: \"%s\")\n" -msgstr " -U, --username=ユーザー名 データベースのユーザ名(デフォルト: \"%s\")\n" +msgstr " -U, --username=ユーザー名 データベースのユーザ名(デフォルト: \"%s\")\n" -#: help.c:130 +#: help.c:140 #, c-format msgid " -w, --no-password never prompt for password\n" msgstr " -w, --no-password パスワード入力を要求しない\n" -#: help.c:131 +#: help.c:141 #, c-format msgid " -W, --password force password prompt (should happen automatically)\n" -msgstr "" -" -W, --password パスワードプロンプトを強制表示する\n" -" (本来は自動的に表示されるはずです)\n" +msgstr " -W, --password パスワードプロンプトの強制表示(本来は自動的に表示されるはず)\n" -#: help.c:133 +#: help.c:143 #, c-format msgid "" "\n" @@ -1935,487 +2349,1006 @@ msgid "" "\n" msgstr "" "\n" -"詳細は psql の中で \"\\?\" (内部コマンドの場合) または \"\\help\"\n" -"(SQL コマンドの場合) をタイプするか、PostgreSQL ドキュメントの psql の\n" +"詳細は psql の中で \"\\?\" (内部コマンドの場合)または \"\\help\"\n" +"(SQL コマンドの場合)をタイプするか、または PostgreSQL ドキュメント中の psql の\n" "セクションを参照のこと。\n" "\n" -#: help.c:136 +#: help.c:146 #, c-format msgid "Report bugs to .\n" -msgstr "不具合はまで報告してください。\n" +msgstr "不具合を見つけた場合、まで報告してください。\n" -#: help.c:157 +#: help.c:172 #, c-format msgid "General\n" msgstr "一般\n" -#: help.c:158 +#: help.c:173 #, c-format msgid " \\copyright show PostgreSQL usage and distribution terms\n" -msgstr " \\copyright PostgreSQL の使い方と配布条件を表示\n" +msgstr " \\copyright PostgreSQL の使い方と配布条件を表示します。\n" + +#: help.c:174 +#, c-format +msgid " \\crosstabview [COLUMNS] execute query and display results in crosstab\n" +msgstr " \\crosstabview [列数] 問い合わせを実行し、結果をクロスタブに表示します。\n" -#: help.c:159 +#: help.c:175 +#, c-format +msgid " \\errverbose show most recent error message at maximum verbosity\n" +msgstr " \\errverbose 最後に発生したエラーメッセージを冗長性最大で表示します。\n" + +#: help.c:176 #, c-format msgid " \\g [FILE] or ; execute query (and send results to file or |pipe)\n" -msgstr " \\g [ファイル] または ';' 問い合わせを実行(し、結果をファイルまたは |パイプ へ書き出す)\n" +msgstr " \\g [ファイル] または ; 問い合わせを実行(し、結果をファイルまたは |パイプ へ出力)します。\n" + +#: help.c:177 +#, c-format +msgid " \\gexec execute query, then execute each value in its result\n" +msgstr " \\gexec 問い合わせを実行し、結果の中の個々の値を実行します。\n" -#: help.c:160 +#: help.c:178 #, c-format msgid " \\gset [PREFIX] execute query and store results in psql variables\n" -msgstr "\\gset [PREFIX] 問い合わせを実行し結果をpsql変数に格納\n" +msgstr " \\gset [PREFIX] 問い合わせを実行して結果を psql 変数に格納します。\n" -#: help.c:161 +#: help.c:179 #, c-format -msgid " \\h [NAME] help on syntax of SQL commands, * for all commands\n" -msgstr " \\h [名前] SQL コマンドの文法ヘルプ、* で全コマンド\n" +msgid " \\gx [FILE] as \\g, but forces expanded output mode\n" +msgstr " \\gx [ファイル名] \\g と同じですが、拡張出力モードで実行します。\n" -#: help.c:162 +#: help.c:180 #, c-format msgid " \\q quit psql\n" -msgstr " \\q psql を終了する\n" +msgstr " \\q psql を終了します。\n" -#: help.c:163 +#: help.c:181 #, c-format msgid " \\watch [SEC] execute query every SEC seconds\n" -msgstr " \\watch [SEC] SEC秒毎に問い合わせを実行する\n" +msgstr " \\watch [秒数] 指定した秒数ごとに問い合わせを実行します。\n" -#: help.c:166 +#: help.c:184 +#, c-format +msgid "Help\n" +msgstr "ヘルプ\n" + +#: help.c:186 +#, c-format +msgid " \\? [commands] show help on backslash commands\n" +msgstr " \\? [コマンド] バックスラッシュコマンドのヘルプを表示します。\n" + +#: help.c:187 +#, c-format +msgid " \\? options show help on psql command-line options\n" +msgstr " \\? オプション psql のコマンドライン・オプションのヘルプを表示します。\n" + +#: help.c:188 +#, c-format +msgid " \\? variables show help on special variables\n" +msgstr " \\? 変数名 特殊変数のヘルプを表示します。\n" + +#: help.c:189 +#, c-format +msgid " \\h [NAME] help on syntax of SQL commands, * for all commands\n" +msgstr " \\h [名前] SQL コマンドの文法ヘルプの表示。* で全コマンドを表示します。\n" + +#: help.c:192 #, c-format msgid "Query Buffer\n" msgstr "問い合わせバッファ\n" -#: help.c:167 +#: help.c:193 #, c-format msgid " \\e [FILE] [LINE] edit the query buffer (or file) with external editor\n" -msgstr " \\e [ファイル] [行番号] 現在の問い合わせバッファ(やファイル)を外部エディタで編集する\n" +msgstr " \\e [ファイル] [行番号] 現在の問い合わせバッファ(やファイル)を外部エディタで編集します。\n" -#: help.c:168 +#: help.c:194 #, c-format msgid " \\ef [FUNCNAME [LINE]] edit function definition with external editor\n" -msgstr " \\ef [関数名 [行番号]] 関数定義を外部エディタで編集する\n" +msgstr " \\ef [関数名 [行番号]] 関数定義を外部エディタで編集します。\n" + +#: help.c:195 +#, c-format +msgid " \\ev [VIEWNAME [LINE]] edit view definition with external editor\n" +msgstr " \\ef [ビュー名 [行番号]] ビュー定義を外部エディタで編集します。\n" -#: help.c:169 +#: help.c:196 #, c-format msgid " \\p show the contents of the query buffer\n" -msgstr " \\p 問い合わせバッファの内容を表示する\n" +msgstr " \\p 問い合わせバッファの中身を表示します。\n" -#: help.c:170 +#: help.c:197 #, c-format msgid " \\r reset (clear) the query buffer\n" -msgstr " \\r 問い合わせバッファをリセット(クリア)する\n" +msgstr " \\r 問い合わせバッファをリセット(クリア)します。\n" -#: help.c:172 +#: help.c:199 #, c-format msgid " \\s [FILE] display history or save it to file\n" -msgstr " \\s [ファイル] ヒストリを表示またはファイルに保存する\n" +msgstr " \\s [ファイル] ヒストリを表示またはファイルに保存します。\n" -#: help.c:174 +#: help.c:201 #, c-format msgid " \\w FILE write query buffer to file\n" -msgstr " \\w ファイル 問い合わせバッファの内容をファイルに書き出す\n" +msgstr " \\w ファイル 問い合わせの中身をファイルに保存します。\n" -#: help.c:177 +#: help.c:204 #, c-format msgid "Input/Output\n" msgstr "入出力\n" -#: help.c:178 +#: help.c:205 #, c-format msgid " \\copy ... perform SQL COPY with data stream to the client host\n" -msgstr " \\copy ... クライアントホストに対し、データストリームを使ってSQLコピーを行う\n" +msgstr " \\copy ... クライアントホストに対し、データストリームを使って SQL コピーを行います。\n" -#: help.c:179 +#: help.c:206 #, c-format msgid " \\echo [STRING] write string to standard output\n" -msgstr " \\echo [文字列] 文字列を標準出力に書き出す\n" +msgstr " \\echo [文字列] 文字列を標準出力に書き出します。\n" -#: help.c:180 +#: help.c:207 #, c-format msgid " \\i FILE execute commands from file\n" -msgstr " \\i ファイル ファイルからコマンドを読み込んで実行する\n" +msgstr " \\i ファイル ファイルからコマンドを読み込んで実行します。\n" -#: help.c:181 +#: help.c:208 #, c-format msgid " \\ir FILE as \\i, but relative to location of current script\n" -msgstr " \\ir ファイル \\iと同じ。ただし現在のスクリプトの場所からの相対パス\n" +msgstr " \\ir ファイル \\i と同じ。ただし現在のスクリプト位置からの相対パスで指定します。\n" -#: help.c:182 +#: help.c:209 #, c-format msgid " \\o [FILE] send all query results to file or |pipe\n" -msgstr " \\o [ファイル] すべての問い合わせの結果をファイルまたは |パイプ へ送る\n" +msgstr " \\o [ファイル] 問い合わせ結果をすべてファイルまたは |パイプ へ送ります。\n" -#: help.c:183 +#: help.c:210 #, c-format msgid " \\qecho [STRING] write string to query output stream (see \\o)\n" -msgstr " \\qecho [文字列] 文字列を問い合わせ出力ストリームに出力(\\o を参照)\n" +msgstr " \\qecho [文字列] 文字列を問い合わせ出力ストリームに出力(\\o を参照)します。\n" -#: help.c:186 +#: help.c:213 +#, c-format +msgid "Conditional\n" +msgstr "条件分岐\n" + +#: help.c:214 +#, c-format +msgid " \\if EXPR begin conditional block\n" +msgstr " \\if EXPR 条件ブロックの開始\n" + +#: help.c:215 +#, c-format +msgid " \\elif EXPR alternative within current conditional block\n" +msgstr " \\elif EXPR 現在の条件ブロック内の代替条件\n" + +#: help.c:216 +#, c-format +msgid " \\else final alternative within current conditional block\n" +msgstr " \\else 現在の条件ブロックにおける最後の選択肢\n" + +#: help.c:217 +#, c-format +msgid " \\endif end conditional block\n" +msgstr " \\endif 条件ブロックの終了\n" + +#: help.c:220 #, c-format msgid "Informational\n" -msgstr "情報\n" +msgstr "情報表示\n" -#: help.c:187 +#: help.c:221 #, c-format msgid " (options: S = show system objects, + = additional detail)\n" -msgstr " (修飾子: S = システムオブジェクトを表示 + = 付加情報)\n" +msgstr " (オプション:S = システムオブジェクトの表示, + = 詳細表示)\n" -#: help.c:188 +#: help.c:222 #, c-format msgid " \\d[S+] list tables, views, and sequences\n" -msgstr " \\d[S+] テーブル、ビュー、シーケンスの一覧を表示する\n" +msgstr " \\d[S+] テーブル、ビュー、シーケンスの一覧を表示します。\n" -#: help.c:189 +#: help.c:223 #, c-format msgid " \\d[S+] NAME describe table, view, sequence, or index\n" -msgstr " \\d[S+] 名前 テーブル、ビュー、シーケンス、インデックスの説明を表示する\n" +msgstr " \\d[S+] 名前 テーブル、ビュー、シーケンス、インデックスの説明を表示します。\n" -#: help.c:190 +#: help.c:224 #, c-format msgid " \\da[S] [PATTERN] list aggregates\n" -msgstr " \\da[S] [パターン] 集約関数の一覧を表示する\n" +msgstr " \\da[S] [パターン] 集約関数一覧を表示します。\n" -#: help.c:191 +#: help.c:225 +#, c-format +msgid " \\dA[+] [PATTERN] list access methods\n" +msgstr " \\dA[+] [パターン] アクセスメソッド一覧を表示します。\n" + +#: help.c:226 #, c-format msgid " \\db[+] [PATTERN] list tablespaces\n" -msgstr " \\db[+] [パターン] テーブルスペースの一覧を表示する\n" +msgstr " \\db[+] [パターン] テーブル空間一覧を表示します。\n" -#: help.c:192 +#: help.c:227 #, c-format msgid " \\dc[S+] [PATTERN] list conversions\n" -msgstr " \\dc[S+] [パターン] 変換ルールの一覧を表示する\n" +msgstr " \\dc[S+] [パターン] 符号化方式一覧を表示します。\n" -#: help.c:193 +#: help.c:228 #, c-format msgid " \\dC[+] [PATTERN] list casts\n" -msgstr " \\dC[+] [パターン] キャストの一覧を表示する\n" +msgstr " \\dC[+] [パターン] キャスト一覧を表示します。\n" -#: help.c:194 +#: help.c:229 #, c-format msgid " \\dd[S] [PATTERN] show object descriptions not displayed elsewhere\n" -msgstr " \\dd[S] [パターン] 他では表示されないオブジェクトの説明を表示する\n" +msgstr " \\dd[S] [パターン] 他では表示されないオブジェクトの説明を表示します。\n" -#: help.c:195 +#: help.c:230 +#, c-format +msgid " \\dD[S+] [PATTERN] list domains\n" +msgstr " \\dD[S+] [パターン] ドメイン一覧を表示します。\n" + +#: help.c:231 #, c-format msgid " \\ddp [PATTERN] list default privileges\n" -msgstr " \\ddp [パターン] デフォルト権限の一覧を表示する\n" +msgstr " \\ddp [パターン] デフォルト権限一覧を表示します。\n" -#: help.c:196 +#: help.c:232 #, c-format -msgid " \\dD[S+] [PATTERN] list domains\n" -msgstr " \\dD[S+] [パターン] ドメインの一覧を表示する\n" +msgid " \\dE[S+] [PATTERN] list foreign tables\n" +msgstr " \\dE[S+] [パターン] 外部テーブル一覧を表示します。\n" -#: help.c:197 +#: help.c:233 #, c-format msgid " \\det[+] [PATTERN] list foreign tables\n" -msgstr " \\det[+] [パターン] 外部テーブルの一覧を表示する\n" +msgstr " \\det[+] [パターン] 外部テーブル一覧を表示します。\n" -#: help.c:198 +#: help.c:234 #, c-format msgid " \\des[+] [PATTERN] list foreign servers\n" -msgstr " \\des[+] [パターン] 外部サーバーの一覧を表示する\n" +msgstr " \\des[+] [パターン] 外部サーバー一覧を表示します。\n" -#: help.c:199 +#: help.c:235 #, c-format msgid " \\deu[+] [PATTERN] list user mappings\n" -msgstr " \\deu[+] [パターン] ユーザマッピングの一覧を表示する\n" +msgstr " \\deu[+] [パターン] ユーザマッピング一覧を表示します。\n" -#: help.c:200 +#: help.c:236 #, c-format msgid " \\dew[+] [PATTERN] list foreign-data wrappers\n" -msgstr " \\dew[+] [パターン] 外部データラッパーの一覧を表示する\n" +msgstr " \\dew[+] [パターン] 外部データラッパ一覧を表示します。\n" -#: help.c:201 +#: help.c:237 #, c-format msgid " \\df[antw][S+] [PATRN] list [only agg/normal/trigger/window] functions\n" -msgstr " \\df[antw][S+] [パターン] 関数(集約/通常/トリガー/ウィンドウのみ)の一覧を表示する\n" +msgstr " \\df[antw][S+] [パターン] (集約/通常/トリガー/ウィンドウ)関数(のみ)の一覧を表示します。\n" -#: help.c:202 +#: help.c:238 #, c-format msgid " \\dF[+] [PATTERN] list text search configurations\n" -msgstr " \\dF[+] [パターン] テキスト検索設定の一覧を表示する\n" +msgstr " \\dF[+] [パターン] テキスト検索設定一覧を表示します。\n" -#: help.c:203 +#: help.c:239 #, c-format msgid " \\dFd[+] [PATTERN] list text search dictionaries\n" -msgstr " \\dFd[+] [パターン] テキスト検索用辞書の一覧を表示する\n" +msgstr " \\dFd[+] [パターン] テキスト検索用辞書一覧を表示します。\n" -#: help.c:204 +#: help.c:240 #, c-format msgid " \\dFp[+] [PATTERN] list text search parsers\n" -msgstr " \\dFp[+] [パターン] テキスト検索用パーサーの一覧を表示する\n" +msgstr " \\dFp[+] [パターン] テキスト検索用パーサ一覧を表示します。\n" -#: help.c:205 +#: help.c:241 #, c-format msgid " \\dFt[+] [PATTERN] list text search templates\n" -msgstr " \\dFt[+] [パターン] テキスト検索用テンプレートの一覧を表示する\n" +msgstr " \\dFt[+] [パターン] テキスト検索用テンプレート一覧を表示します。\n" -#: help.c:206 +#: help.c:242 #, c-format -msgid " \\dg[+] [PATTERN] list roles\n" -msgstr " \\dg[+] [パターン] ロールの一覧を表示する\n" +msgid " \\dg[S+] [PATTERN] list roles\n" +msgstr " \\dg[S+] [パターン] ロール一覧を表示します。\n" -#: help.c:207 +#: help.c:243 #, c-format msgid " \\di[S+] [PATTERN] list indexes\n" -msgstr " \\di[S+] [パターン] インデックスの一覧を表示する\n" +msgstr " \\di[S+] [パターン] インデックス一覧を表示します。\n" -#: help.c:208 +#: help.c:244 #, c-format msgid " \\dl list large objects, same as \\lo_list\n" -msgstr " \\dl ラージオブジェクトの一覧を表示する。\\lo_list と同じ。\n" +msgstr " \\dl ラージオブジェクト一覧を表示します。\\lo_list と同じです。\n" -#: help.c:209 +#: help.c:245 #, c-format msgid " \\dL[S+] [PATTERN] list procedural languages\n" -msgstr " \\dL[S+] [パターン] 手続き言語の一覧を表示する\n" +msgstr " \\dL[S+] [パターン] 手続き言語一覧を表示します。\n" -#: help.c:210 +#: help.c:246 #, c-format msgid " \\dm[S+] [PATTERN] list materialized views\n" -msgstr " \\dm[S+] [パターン] マテリアライズドビューの一覧を表示する\n" +msgstr " \\dm[S+] [パターン] マテリアライズドビューの一覧を表示します。\n" -#: help.c:211 +#: help.c:247 #, c-format msgid " \\dn[S+] [PATTERN] list schemas\n" -msgstr " \\dn[S+] [パターン] スキーマの一覧を表示する\n" +msgstr " \\dn[S+] [パターン] スキーマ一覧を表示します。\n" -#: help.c:212 +#: help.c:248 #, c-format msgid " \\do[S] [PATTERN] list operators\n" -msgstr " \\do[S] [名前] 演算子の一覧を表示する\n" +msgstr " \\do[S] [名前] 演算子一覧を表示します。\n" -#: help.c:213 +#: help.c:249 #, c-format msgid " \\dO[S+] [PATTERN] list collations\n" -msgstr " \\dO[S+] [パターン] 照合順序の一覧を表示する\n" +msgstr " \\dO[S+] [パターン] 照合順序一覧を表示します。\n" -#: help.c:214 +#: help.c:250 #, c-format msgid " \\dp [PATTERN] list table, view, and sequence access privileges\n" -msgstr " \\dp [パターン] テーブル、ビュー、シーケンスのアクセス権一覧を表示する\n" +msgstr " \\dp [パターン] テーブル、ビュー、シーケンスのアクセス権一覧を表示します。\n" -#: help.c:215 +#: help.c:251 #, c-format msgid " \\drds [PATRN1 [PATRN2]] list per-database role settings\n" -msgstr " \\drds [パターン1 [パターン2]] データベース毎のロール(ユーザー)設定の一覧を表示する\n" +msgstr " \\drds [パターン1 [パターン2]] データベース毎のロール設定一覧を表示します。\n" -#: help.c:216 +#: help.c:252 +#, c-format +msgid " \\dRp[+] [PATTERN] list replication publications\n" +msgstr " \\dRp[+] [パターン] レプリケーションのパブリケーション一覧を表示します。\n" + +#: help.c:253 +#, c-format +msgid " \\dRs[+] [PATTERN] list replication subscriptions\n" +msgstr " \\dRs[+] [パターン] レプリケーションのサブスクリプション一覧を表示します。\n" + +#: help.c:254 #, c-format msgid " \\ds[S+] [PATTERN] list sequences\n" -msgstr " \\ds[S+] [パターン] シーケンスの一覧を表示する\n" +msgstr " \\dc[S+] [パターン] 変換シーケンス一覧を表示します。\n" -#: help.c:217 +#: help.c:255 #, c-format msgid " \\dt[S+] [PATTERN] list tables\n" -msgstr " \\dt[S+] [パターン] テーブルの一覧を表示する\n" +msgstr " \\ds[S+] [パターン] テーブル一覧を表示します。\n" -#: help.c:218 +#: help.c:256 #, c-format msgid " \\dT[S+] [PATTERN] list data types\n" -msgstr " \\dT[S+] [パターン] データ型の一覧を表示する\n" +msgstr " \\dt[S+] [パターン] データ型一覧を表示します。\n" -#: help.c:219 +#: help.c:257 #, c-format -msgid " \\du[+] [PATTERN] list roles\n" -msgstr " \\du[+] [パターン] ロールの一覧を表示する\n" +msgid " \\du[S+] [PATTERN] list roles\n" +msgstr " \\du[S+] [パターン] ロール一覧を表示します。\n" -#: help.c:220 +#: help.c:258 #, c-format msgid " \\dv[S+] [PATTERN] list views\n" -msgstr " \\dv[S+] [パターン] ビューの一覧を表示する\n" - -#: help.c:221 -#, c-format -msgid " \\dE[S+] [PATTERN] list foreign tables\n" -msgstr " \\dE[S+] [パターン] 外部テーブルの一覧を表示する\n" +msgstr " \\dv[S+] [パターン] ビュー一覧を表示します。\n" -#: help.c:222 +#: help.c:259 #, c-format msgid " \\dx[+] [PATTERN] list extensions\n" -msgstr " \\dx[+] [パターン] 拡張の一覧を表示する\n" +msgstr " \\dx[+] [パターン] 拡張一覧を表示します。\n" -#: help.c:223 +#: help.c:260 #, c-format msgid " \\dy [PATTERN] list event triggers\n" -msgstr " \\dy [パターン] イベントトリガの一覧を表示する\n" +msgstr " \\dy [パターン] イベントトリガー一覧を表示します。\n" -#: help.c:224 +#: help.c:261 #, c-format msgid " \\l[+] [PATTERN] list databases\n" -msgstr " \\l[+] [パターン] データベースの一覧を表示する\n" +msgstr " \\l[+] [パターン] データベース一覧を表示します。\n" -#: help.c:225 +#: help.c:262 #, c-format -msgid " \\sf[+] FUNCNAME show a function's definition\n" -msgstr " \\sf[+] 関数名 関数定義を表示する\n" +msgid " \\sf[+] FUNCNAME show a function's definition\n" +msgstr " \\sf[+] 関数名 関数定義を表示します。\n" -#: help.c:226 +#: help.c:263 +#, c-format +msgid " \\sv[+] VIEWNAME show a view's definition\n" +msgstr " \\sv[+] ビュー名 ビュー定義を表示します。\n" + +#: help.c:264 #, c-format msgid " \\z [PATTERN] same as \\dp\n" -msgstr " \\z [パターン] \\dp と同じ\n" +msgstr " \\z [パターン] \\dp と同じです。\n" -#: help.c:229 +#: help.c:267 #, c-format msgid "Formatting\n" msgstr "書式設定\n" -#: help.c:230 +#: help.c:268 #, c-format msgid " \\a toggle between unaligned and aligned output mode\n" -msgstr " \\a 出力モードの 'unaligned' / 'aligned' を切り替える\n" +msgstr " \\a 出力モード(unaligned / aligned)を切り替えます。\n" -#: help.c:231 +#: help.c:269 #, c-format msgid " \\C [STRING] set table title, or unset if none\n" -msgstr " \\C タイトル テーブルのタイトルを設定する。指定がなければ解除\n" +msgstr " \\C [文字列] テーブルのタイトル設定。指定がなければ解除します。\n" -#: help.c:232 +#: help.c:270 #, c-format msgid " \\f [STRING] show or set field separator for unaligned query output\n" -msgstr " \\f [文字列] 桁揃えを行わない(unaligned)問い合わせ出力におけるフィールド区切り文字を表示または設定\n" +msgstr " \\f [文字列] 桁揃えなしの問い合わせ出力で使われるフィールド区切り文字を表示または設定します。\n" -#: help.c:233 +#: help.c:271 #, c-format msgid " \\H toggle HTML output mode (currently %s)\n" -msgstr " \\H HTML の出力モードを切り替える(現在: %s)\n" +msgstr " \\H HTML の出力モードを切り替えます(現在: %s)\n" -#: help.c:235 +#: help.c:273 #, c-format msgid "" " \\pset [NAME [VALUE]] set table output option\n" -" (NAME := {format|border|expanded|fieldsep|fieldsep_zero|footer|null|\n" -" numericlocale|recordsep|recordsep_zero|tuples_only|title|tableattr|pager})\n" +" (NAME := {border|columns|expanded|fieldsep|fieldsep_zero|\n" +" footer|format|linestyle|null|numericlocale|pager|\n" +" pager_min_lines|recordsep|recordsep_zero|tableattr|title|\n" +" tuples_only|unicode_border_linestyle|\n" +" unicode_column_linestyle|unicode_header_linestyle})\n" msgstr "" -" \\pset [名前 [値]] テーブル出力のオプションを設定する\n" -" (名前 := {format|border|expanded|fieldsep|fieldsep_zero|footer|null|\n" -" numericlocale|recordsep|recordsep_zero|tuples_only|title|tableattr|pager})\n" +" \\pset [名前 [値]] テーブル出力のオプション設定\n" +" (名前 := {border|columns|expanded|fieldsep|fieldsep_zero|\n" +" footer|format|linestyle|null|numericlocale|pager|\n" +" pager_min_lines|recordsep|recordsep_zero|tableattr|title|\n" +" tuples_only|unicode_border_linestyle|\n" +" unicode_column_linestyle|unicode_header_linestyle})\n" -#: help.c:238 +#: help.c:279 #, c-format msgid " \\t [on|off] show only rows (currently %s)\n" -msgstr " \\t [on|off] 行のみを表示するか? (現在: %s)\n" +msgstr " \\t [on|off] 行のみ表示モード(現在: %s)\n" -#: help.c:240 +#: help.c:281 #, c-format msgid " \\T [STRING] set HTML
tag attributes, or unset if none\n" -msgstr " \\T [文字列] HTML の
タグの属性をセット。引数がなければ解除\n" +msgstr " \\T [文字列] HTML の
タグ属性のセット。引数がなければ解除します。\n" -#: help.c:241 +#: help.c:282 #, c-format msgid " \\x [on|off|auto] toggle expanded output (currently %s)\n" -msgstr " \\x [on|off|auto] 拡張出力の切り替え(現在: %s)\n" +msgstr " \\x [on|off|auto] 拡張出力の切り替え(現在: %s)\n" -#: help.c:245 +#: help.c:286 +#, c-format +msgid "Connection\n" +msgstr "接続関連\n" + +#: help.c:288 +#, c-format +msgid "" +" \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" +" connect to new database (currently \"%s\")\n" +msgstr "" +" \\c[onnect] {[DB名|- ユーザ名|- ホスト名|- ポート番号|-] | 接続文字列}\n" +" 新しいデータベースに接続します(現在: \"%s\")\n" + +#: help.c:292 +#, c-format +msgid "" +" \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" +" connect to new database (currently no connection)\n" +msgstr "" +" \\c[onnect] {[DB名|- ユーザ名|- ホスト名|- ポート番号|-] | 接続文字列}\n" +" 新しいデータベースに接続します(現在: 未接続)\n" + +#: help.c:294 +#, c-format +msgid " \\conninfo display information about current connection\n" +msgstr " \\conninfo 現在の接続に関する情報を表示します。\n" + +#: help.c:295 +#, c-format +msgid " \\encoding [ENCODING] show or set client encoding\n" +msgstr " \\encoding [エンコーディング] クライアントのエンコーディングを表示または設定します。\n" + +#: help.c:296 +#, c-format +msgid " \\password [USERNAME] securely change the password for a user\n" +msgstr " \\password [ユーザ名] ユーザのパスワードを安全に変更します。\n" + +#: help.c:299 +#, c-format +msgid "Operating System\n" +msgstr "オペレーティングシステム\n" + +#: help.c:300 +#, c-format +msgid " \\cd [DIR] change the current working directory\n" +msgstr " \\cd [DIR] カレントディレクトリを変更します。\n" + +#: help.c:301 +#, c-format +msgid " \\setenv NAME [VALUE] set or unset environment variable\n" +msgstr " \\setenv 名前 [値] 環境変数を設定または解除します。\n" + +#: help.c:302 +#, c-format +msgid " \\timing [on|off] toggle timing of commands (currently %s)\n" +msgstr " \\timing [on|off] コマンドのタイミングを切り替えます(現在: %s)\n" + +#: help.c:304 +#, c-format +msgid " \\! [COMMAND] execute command in shell or start interactive shell\n" +msgstr " \\! [コマンド] シェルでコマンドを実行するか、もしくは会話型シェルを起動します。\n" + +#: help.c:307 +#, c-format +msgid "Variables\n" +msgstr "変数\n" + +#: help.c:308 +#, c-format +msgid " \\prompt [TEXT] NAME prompt user to set internal variable\n" +msgstr " \\prompt [テキスト] 変数名 ユーザに対して内部変数のセットを促します。\n" + +#: help.c:309 +#, c-format +msgid " \\set [NAME [VALUE]] set internal variable, or list all if no parameters\n" +msgstr " \\set [変数名 [値]] 内部変数の値を設定します。引数がない場合は一覧を表示します。\n" + +#: help.c:310 +#, c-format +msgid " \\unset NAME unset (delete) internal variable\n" +msgstr " \\unset 変数名 内部変数を削除します。\n" + +#: help.c:313 +#, c-format +msgid "Large Objects\n" +msgstr "ラージ オブジェクト\n" + +#: help.c:314 +#, c-format +msgid "" +" \\lo_export LOBOID FILE\n" +" \\lo_import FILE [COMMENT]\n" +" \\lo_list\n" +" \\lo_unlink LOBOID large object operations\n" +msgstr "" +" \\lo_export LOBOID ファイル名\n" +" \\lo_import ファイル名 [コメント]\n" +" \\lo_list\n" +" \\lo_unlink LOBOID ラージオブジェクトの操作\n" + +#: help.c:341 +#, c-format +msgid "" +"List of specially treated variables\n" +"\n" +msgstr "" +"特殊な扱いをする変数の一覧\n" +"\n" + +#: help.c:343 +#, c-format +msgid "psql variables:\n" +msgstr "psql 変数:\n" + +#: help.c:345 +#, c-format +msgid "" +" psql --set=NAME=VALUE\n" +" or \\set NAME VALUE inside psql\n" +"\n" +msgstr "" +" psql --set=名前=値\n" +" または psql に入ってから \\set 名前 値\n" +"\n" + +#: help.c:347 +#, c-format +msgid " AUTOCOMMIT if set, successful SQL commands are automatically committed\n" +msgstr " AUTOCOMMIT セットされている場合、SQL コマンドが成功したら自動的にコミットします。\n" + +#: help.c:348 +#, c-format +msgid "" +" COMP_KEYWORD_CASE determines the case used to complete SQL key words\n" +" [lower, upper, preserve-lower, preserve-upper]\n" +msgstr "" +" COMP_KEYWORD_CASE 有効な SQL キーワードとみなす大文字小文字のルール\n" +" [lower, upper, preserve-lower, preserve-upper]\n" + +#: help.c:350 +#, c-format +msgid " DBNAME the currently connected database name\n" +msgstr " DBNAME 現在接続中のデータベース名\n" + +#: help.c:351 +#, c-format +msgid "" +" ECHO controls what input is written to standard output\n" +" [all, errors, none, queries]\n" +msgstr "" +" ECHO 標準出力への出力対象とする入力のタイプを設定します。\n" +" [all, errors, none, queries]\n" + +#: help.c:353 +#, c-format +msgid "" +" ECHO_HIDDEN if set, display internal queries executed by backslash commands;\n" +" if set to \"noexec\", just show without execution\n" +msgstr "" +" ECHO_HIDDEN セットされている場合、バックスラッシュコマンドにより実行される内部問い合わせを表示します;\n" +" \"noexec\" にセットした場合、これらを単に表示するだけで、実際には実行しません。\n" + +#: help.c:355 +#, c-format +msgid " ENCODING current client character set encoding\n" +msgstr " ENCODING 現在のクライアント側の文字セットのエンコーディング\n" + +#: help.c:356 +#, c-format +msgid "" +" FETCH_COUNT the number of result rows to fetch and display at a time\n" +" (default: 0=unlimited)\n" +msgstr "" +" FETCH_COUNT 取得および表示する結果セットの一回分の行数\n" +" (デフォルト:0 = 無制限)\n" + +#: help.c:358 +#, c-format +msgid " HISTCONTROL controls command history [ignorespace, ignoredups, ignoreboth]\n" +msgstr " HISTCONTROL コマンドヒストリーのコントロール [ignorespace, ignoredups, ignoreboth]\n" + +#: help.c:359 +#, c-format +msgid " HISTFILE file name used to store the command history\n" +msgstr " HISTFILE コマンドヒストリーを保存するのに使うファイル名\n" + +#: help.c:360 +#, c-format +msgid " HISTSIZE max number of commands to store in the command history\n" +msgstr " HISTSIZE コマンドヒストリーに保存するコマンドの最大数\n" + +#: help.c:361 +#, c-format +msgid " HOST the currently connected database server host\n" +msgstr " HOST 現在接続中のデータベースサーバーホスト\n" + +#: help.c:362 +#, c-format +msgid " IGNOREEOF number of EOFs needed to terminate an interactive session\n" +msgstr " IGNOREEOF 会話形セッションを終わらせるのに必要な EOF の数\n" + +#: help.c:363 +#, c-format +msgid " LASTOID value of the last affected OID\n" +msgstr " LASTOID 最後に変更の影響を受けた OID の値\n" + +#: help.c:364 +#, c-format +msgid " ON_ERROR_ROLLBACK if set, an error doesn't stop a transaction (uses implicit savepoints)\n" +msgstr " ON_ERROR_ROLLBACK セットされている場合、エラー発生時でもトランザクションを停止しません(暗黙のセーブポイントを使用します)\n" + +#: help.c:365 +#, c-format +msgid " ON_ERROR_STOP stop batch execution after error\n" +msgstr " ON_ERROR_STOP エラー発生後にバッチの実行を停止します。\n" + +#: help.c:366 +#, c-format +msgid " PORT server port of the current connection\n" +msgstr " PORT 現在の接続のサーバーポート\n" + +#: help.c:367 +#, c-format +msgid " PROMPT1 specifies the standard psql prompt\n" +msgstr " PROMPT1 psql の標準のプロンプトを指定します。\n" + +#: help.c:368 +#, c-format +msgid " PROMPT2 specifies the prompt used when a statement continues from a previous line\n" +msgstr " PROMPT2 ステートメントが前行から継続する場合のプロンプトを指定します。\n" + +#: help.c:369 +#, c-format +msgid " PROMPT3 specifies the prompt used during COPY ... FROM STDIN\n" +msgstr " PROMPT3 COPY ... FROM STDIN の最中に使われるプロンプトを指定します。\n" + +#: help.c:370 +#, c-format +msgid " QUIET run quietly (same as -q option)\n" +msgstr " QUIET メッセージを表示しません(-q オプションと同じ)\n" + +#: help.c:371 +#, c-format +msgid " SERVER_VERSION_NAME server's version (short string)\n" +msgstr " SERVER_VERSION_NAME サーバのバージョン名 (短い文字列)\n" + +#: help.c:372 +#, c-format +msgid " SERVER_VERSION_NUM server's version (numeric format)\n" +msgstr " SERVER_VERSION_NUM サーバのバージョン (数値書式)\n" + +#: help.c:373 +#, c-format +msgid " SHOW_CONTEXT controls display of message context fields [never, errors, always]\n" +msgstr " SHOW_CONTEXT メッセージコンテキストフィールドの表示をコントロール [never, errors, always]\n" + +#: help.c:374 +#, c-format +msgid " SINGLELINE end of line terminates SQL command mode (same as -S option)\n" +msgstr " SINGLELINE 行末で SQL コマンドを終端する(-S オプションと同じ)\n" + +#: help.c:375 +#, c-format +msgid " SINGLESTEP single-step mode (same as -s option)\n" +msgstr " SINGLESTEP シングルステップモード(-s オプションと同じ)\n" + +#: help.c:376 +#, c-format +msgid " USER the currently connected database user\n" +msgstr " USER 現在接続中のデータベース ユーザー\n" + +#: help.c:377 +#, c-format +msgid " VERBOSITY controls verbosity of error reports [default, verbose, terse]\n" +msgstr " VERBOSITY エラー報告を冗長性をコントロール [default, verbose, terse]\n" + +#: help.c:378 +#, c-format +msgid " VERSION psql's version (verbose string)\n" +msgstr " VERSION psql のバージョン (詳細な文字列)\n" + +#: help.c:379 +#, c-format +msgid " VERSION_NAME psql's version (short string)\n" +msgstr " VERSION_NAME psql のバージョン (短い文字列)\n" + +#: help.c:380 +#, c-format +msgid " VERSION_NUM psql's version (numeric format)\n" +msgstr " VERSION_NUM psql のバージョン (数値フォーマット)\n" + +#: help.c:382 +#, c-format +msgid "" +"\n" +"Display settings:\n" +msgstr "" +"\n" +"表示設定:\n" + +#: help.c:384 +#, c-format +msgid "" +" psql --pset=NAME[=VALUE]\n" +" or \\pset NAME [VALUE] inside psql\n" +"\n" +msgstr "" +" psql --pset=名前[=値]\n" +" または psql に入ってから \\pset 名前 [値]\n" +"\n" + +#: help.c:386 +#, c-format +msgid " border border style (number)\n" +msgstr " border 境界線のスタイル(数字)\n" + +#: help.c:387 +#, c-format +msgid " columns target width for the wrapped format\n" +msgstr " columns 折り返し表示をする際の横幅\n" + +#: help.c:388 +#, c-format +msgid " expanded (or x) expanded output [on, off, auto]\n" +msgstr " expanded (or x) 拡張出力 [on, off, auto]\n" + +#: help.c:389 +#, c-format +msgid " fieldsep field separator for unaligned output (default \"%s\")\n" +msgstr " fieldsep 桁揃えなし出力時のフィールド区切り文字(デフォルトは \"%s\")\n" + +#: help.c:390 +#, c-format +msgid " fieldsep_zero set field separator for unaligned output to zero byte\n" +msgstr " fieldsep_zero 桁揃えなし出力時のフィールド区切り文字をゼロバイトに設定\n" + +#: help.c:391 +#, c-format +msgid " footer enable or disable display of the table footer [on, off]\n" +msgstr " footer テーブルフッター出力の要否を設定 [on, off]\n" + +#: help.c:392 +#, c-format +msgid " format set output format [unaligned, aligned, wrapped, html, asciidoc, ...]\n" +msgstr " format 出力フォーマットを設定 [unaligned, aligned, wrapped, html, asciidoc, ...]\n" + +#: help.c:393 +#, c-format +msgid " linestyle set the border line drawing style [ascii, old-ascii, unicode]\n" +msgstr " linestyle 境界線の描画スタイルを設定 [ascii, old-ascii, unicode]\n" + +#: help.c:394 +#, c-format +msgid " null set the string to be printed in place of a null value\n" +msgstr " null null 値の所に表示する文字列を設定\n" + +#: help.c:395 +#, c-format +msgid "" +" numericlocale enable or disable display of a locale-specific character to separate\n" +" groups of digits [on, off]\n" +msgstr "" +" numericlocale つながった数字を区切るためにロケール固有文字を表示するかどうか [on, off]\n" + +#: help.c:397 +#, c-format +msgid " pager control when an external pager is used [yes, no, always]\n" +msgstr " pager 外部ページャーを使うかどうか [yes, no, always]\n" + +#: help.c:398 +#, c-format +msgid " recordsep record (line) separator for unaligned output\n" +msgstr " recordsep 桁揃えなし出力時のレコード(行)区切り文字\n" + +#: help.c:399 +#, c-format +msgid " recordsep_zero set record separator for unaligned output to zero byte\n" +msgstr " recordsep_zero 桁揃えなし出力時のレコード区切り文字をゼロバイトに設定\n" + +#: help.c:400 +#, c-format +msgid "" +" tableattr (or T) specify attributes for table tag in html format or proportional\n" +" column widths for left-aligned data types in latex-longtable format\n" +msgstr "" +" tableattr (or T) HTML フォーマット時の table タグの属性、もしくは latex-longtable\n" +" フォーマット時に左寄せするデータ型のプロポーショナル表示用のカラム幅を指定\n" + +#: help.c:402 +#, c-format +msgid " title set the table title for any subsequently printed tables\n" +msgstr " title これ以降に表示される表の table タイトルを設定\n" + +#: help.c:403 +#, c-format +msgid " tuples_only if set, only actual table data is shown\n" +msgstr " tuples_only セットされている場合、実際のテーブルデータのみを表示する\n" + +#: help.c:404 #, c-format -msgid "Connection\n" -msgstr "接続\n" +msgid "" +" unicode_border_linestyle\n" +" unicode_column_linestyle\n" +" unicode_header_linestyle\n" +" set the style of Unicode line drawing [single, double]\n" +msgstr "" +" unicode_border_linestyle\n" +" unicode_column_linestyle\n" +" unicode_header_linestyle\n" +" Unicode による線描画時のスタイルを設定 [single, double]\n" -#: help.c:247 +#: help.c:409 #, c-format msgid "" -" \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" -" connect to new database (currently \"%s\")\n" +"\n" +"Environment variables:\n" msgstr "" -" \\c[onnect] {[DB名|- ユーザ名|- ホスト名|- ポート番号|-] | conninfo}\n" -" 新しいデータベースに接続する (現在: \"%s\")\n" +"\n" +"環境変数:\n" -#: help.c:251 +#: help.c:413 #, c-format msgid "" -" \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" -" connect to new database (currently no connection)\n" +" NAME=VALUE [NAME=VALUE] psql ...\n" +" or \\setenv NAME [VALUE] inside psql\n" +"\n" msgstr "" -" \\c[onnect] {[DB名|- ユーザ名|- ホスト名|- ポート番号|-] | conninfo}\n" -" 新しいデータベースに接続する (現在: 接続無し)\n" +" 名前=値 [名前=値] psql ...\n" +" または、psql に入ってから \\setenv 名前 [値]\n" +"\n" -#: help.c:253 +#: help.c:415 #, c-format -msgid " \\encoding [ENCODING] show or set client encoding\n" +msgid "" +" set NAME=VALUE\n" +" psql ...\n" +" or \\setenv NAME [VALUE] inside psql\n" +"\n" msgstr "" -" \\encoding [エンコーディング]\n" -" クライアントのエンコーディングを表示またはセット\n" +" set 名前=値\n" +" psql ...\n" +" または、psql に入ってから \\setenv 名前 [値]\n" +"\n" -#: help.c:254 +#: help.c:418 #, c-format -msgid " \\password [USERNAME] securely change the password for a user\n" -msgstr " \\password [ユーザ名] ユーザのパスワードを安全に変更する\n" +msgid " COLUMNS number of columns for wrapped format\n" +msgstr " COLUMNS 折り返し書式におけるカラム数\n" -#: help.c:255 +#: help.c:419 #, c-format -msgid " \\conninfo display information about current connection\n" -msgstr " \\conninfo 現在の接続に関する情報を表示する\n" +msgid " PAGER name of external pager program\n" +msgstr " PAGER 外部ページャープログラムの名前\n" -#: help.c:258 +#: help.c:420 #, c-format -msgid "Operating System\n" -msgstr "オペレーティングシステム\n" +msgid " PGAPPNAME same as the application_name connection parameter\n" +msgstr " PGAPPNAME application_name 接続パラメーターと同じ\n" -#: help.c:259 +#: help.c:421 #, c-format -msgid " \\cd [DIR] change the current working directory\n" -msgstr " \\cd [DIR] カレントディレクトリを変更\n" +msgid " PGDATABASE same as the dbname connection parameter\n" +msgstr " PGDATABASE dbname 接続パラメーターと同じ\n" -#: help.c:260 +#: help.c:422 #, c-format -msgid " \\setenv NAME [VALUE] set or unset environment variable\n" -msgstr " \\setenv NAME [VALUE] 環境変数の設定、設定解除を行う\n" +msgid " PGHOST same as the host connection parameter\n" +msgstr " PGHOST host 接続パラメーターと同じ\n" -#: help.c:261 +#: help.c:423 #, c-format -msgid " \\timing [on|off] toggle timing of commands (currently %s)\n" -msgstr " \\timing [on|off] コマンドのタイミングを切り替える(現在: %s)\n" +msgid " PGPASSWORD connection password (not recommended)\n" +msgstr " PGPASSWORD 接続用パスワード(推奨されません)\n" -#: help.c:263 +#: help.c:424 #, c-format -msgid " \\! [COMMAND] execute command in shell or start interactive shell\n" -msgstr " \\! [コマンド] シェルでコマンドを実行、もしくは会話型シェルを起動\n" +msgid " PGPASSFILE password file name\n" +msgstr " PGPASSFILE パスワードファイルのパスワード ファイル名\n" -#: help.c:266 +#: help.c:425 #, c-format -msgid "Variables\n" -msgstr "変数\n" +msgid " PGPORT same as the port connection parameter\n" +msgstr " PGPORT port 接続パラメーターと同じ\n" -#: help.c:267 +#: help.c:426 #, c-format -msgid " \\prompt [TEXT] NAME prompt user to set internal variable\n" -msgstr " \\prompt [テキスト] 変数名 ユーザに内部変数をセットするよう促す\n" +msgid " PGUSER same as the user connection parameter\n" +msgstr " PGUSER user 接続パラメーターと同じ\n" -#: help.c:268 +#: help.c:427 #, c-format -msgid " \\set [NAME [VALUE]] set internal variable, or list all if no parameters\n" +msgid "" +" PSQL_EDITOR, EDITOR, VISUAL\n" +" editor used by the \\e, \\ef, and \\ev commands\n" msgstr "" -" \\set [変数名 [値]]\n" -" 内部変数の値をセット。引数がない場合は一覧表示。\n" +" PSQL_EDITOR, EDITOR, VISUAL\n" +" \\e, \\ef, \\ev コマンドで使われるエディター\n" -#: help.c:269 +#: help.c:429 #, c-format -msgid " \\unset NAME unset (delete) internal variable\n" -msgstr " \\unset 変数名 内部変数を削除する\n" +msgid "" +" PSQL_EDITOR_LINENUMBER_ARG\n" +" how to specify a line number when invoking the editor\n" +msgstr "" +" PSQL_EDITOR_LINENUMBER_ARG\n" +" エディターの起動時に行番号を指定する方法\n" -#: help.c:272 +#: help.c:431 #, c-format -msgid "Large Objects\n" -msgstr "ラージオブジェクト\n" +msgid " PSQL_HISTORY alternative location for the command history file\n" +msgstr " PSQL_HISTORY コマンドラインヒストリー保存用ファイルの場所\n" -#: help.c:273 +#: help.c:432 #, c-format -msgid "" -" \\lo_export LOBOID FILE\n" -" \\lo_import FILE [COMMENT]\n" -" \\lo_list\n" -" \\lo_unlink LOBOID large object operations\n" -msgstr "" -" \\lo_export LOBOID ファイル名\n" -" \\lo_import ファイル名 [コメント]\n" -" \\lo_list\n" -" \\lo_unlink LOBOID ラージオブジェクトの操作\n" +msgid " PSQLRC alternative location for the user's .psqlrc file\n" +msgstr " PSQLRC ユーザーの .psqlrc ファイルの場所\n" -#: help.c:320 +#: help.c:433 +#, c-format +msgid " SHELL shell used by the \\! command\n" +msgstr " SHELL \\! コマンドで使われるシェル\n" + +#: help.c:434 +#, c-format +msgid " TMPDIR directory for temporary files\n" +msgstr " TMPDIR テンポラリファイル用ディレクトリ\n" + +#: help.c:477 msgid "Available help:\n" msgstr "利用可能なヘルプ:\n" -#: help.c:404 +#: help.c:561 #, c-format msgid "" "Command: %s\n" @@ -2424,69 +3357,82 @@ msgid "" "%s\n" "\n" msgstr "" -"コマンド: %s\n" -"説明: %s\n" +"コマンド: %s\n" +"説明: %s\n" "書式:\n" "%s\n" "\n" -#: help.c:420 +#: help.c:577 #, c-format msgid "" "No help available for \"%s\".\n" "Try \\h with no arguments to see available help.\n" msgstr "" -"\"%s\" についてはヘルプ情報がありません。\n" +"\"%s\" のヘルプがありません。\n" "引数なしで \\h とタイプすると、ヘルプの一覧が表示されます。\n" -#: input.c:194 +#: input.c:216 #, c-format msgid "could not read from input file: %s\n" msgstr "入力ファイルから読み込めませんでした: %s\n" -#: input.c:446 input.c:485 +#: input.c:471 input.c:510 #, c-format msgid "could not save history to file \"%s\": %s\n" -msgstr "ファイル \"%s\" にヒストリを保存できませんでした: %s\n" +msgstr "ファイル \"%s\" にヒストリーを保存できませんでした: %s\n" -#: input.c:505 +#: input.c:530 #, c-format msgid "history is not supported by this installation\n" -msgstr "この環境ではヒストリ機能をサポートしていません。\n" +msgstr "この環境ではヒストリー機能がサポートされていません。\n" -#: large_obj.c:66 +#: large_obj.c:64 #, c-format msgid "%s: not connected to a database\n" -msgstr "%s: どのデータベースにも接続されていません\n" +msgstr "%s: データベースに接続していません。\n" -#: large_obj.c:85 +#: large_obj.c:83 #, c-format msgid "%s: current transaction is aborted\n" -msgstr "%s: トランザクションを中断しました\n" +msgstr "%s: 現在のトランザクションは中止されます。\n" -#: large_obj.c:88 +#: large_obj.c:86 #, c-format msgid "%s: unknown transaction status\n" -msgstr "%s: トランザクションの状態が不明です。\n" +msgstr "%s: 不明なトランザクションの状態\n" -#: large_obj.c:289 large_obj.c:300 +#: large_obj.c:287 large_obj.c:298 msgid "ID" msgstr "ID" -#: large_obj.c:310 +#: large_obj.c:308 msgid "Large objects" -msgstr "ラージオブジェクト" +msgstr "ラージ オブジェクト" + +#: mainloop.c:136 +#, c-format +msgid "\\if: escaped\n" +msgstr "\\if: ブロックを抜けました。\n" -#: mainloop.c:159 +#: mainloop.c:183 #, c-format msgid "Use \"\\q\" to leave %s.\n" msgstr "\"\\q\" で %s を抜けます。\n" -#: mainloop.c:189 +#: mainloop.c:205 +msgid "" +"The input is a PostgreSQL custom-format dump.\n" +"Use the pg_restore command-line client to restore this dump to a database.\n" +msgstr "" +"この入力データは PostgreSQL のカスタムフォーマットのダンプです。\n" +"このダンプをデータベースにリストアするには pg_restore コマンドを使ってください。\n" + +#: mainloop.c:225 msgid "You are using psql, the command-line interface to PostgreSQL." -msgstr "PostgreSQL へのコマンドライン・インターフェース、psql へようこそ。" +msgstr "PostgreSQL へのコマンド ライン インターフェイス、psql を使用しています。" -#: mainloop.c:190 +#: mainloop.c:226 #, c-format msgid "" "Type: \\copyright for distribution terms\n" @@ -2495,1968 +3441,2247 @@ msgid "" " \\g or terminate with semicolon to execute query\n" " \\q to quit\n" msgstr "" -" \\copyright とタイプすると、配布条件を表示します。\n" +"ヒント: \\copyright とタイプすると、配布条件を表示します。\n" " \\h とタイプすると、SQL コマンドのヘルプを表示します。\n" " \\? とタイプすると、psql コマンドのヘルプを表示します。\n" " \\g と打つかセミコロンで閉じると、問い合わせを実行します。\n" " \\q で終了します。\n" -#: print.c:272 -#, c-format -msgid "(%lu row)" -msgid_plural "(%lu rows)" -msgstr[0] "(%lu 行)" -msgstr[1] "(%lu 行)" - -#: print.c:1174 -#, c-format -msgid "(No rows)\n" -msgstr "(行がありません)\n" - -#: print.c:2238 -#, c-format -msgid "Interrupted\n" -msgstr "中断されました\n" - -#: print.c:2304 -#, c-format -msgid "Cannot add header to table content: column count of %d exceeded.\n" -msgstr "テーブルの内容に見出しを追加できませんでした:列数 %d が制限を越えています。\n" - -#: print.c:2344 +#: mainloop.c:339 mainloop.c:476 #, c-format -msgid "Cannot add cell to table content: total cell count of %d exceeded.\n" -msgstr "テーブルの内容にセルを追加できませんでした:全セル数 %d が制限を越えています。\n" - -#: print.c:2570 -#, c-format -msgid "invalid output format (internal error): %d" -msgstr "出力フォーマットが無効(内部エラー):%d" +msgid "query ignored; use \\endif or Ctrl-C to exit current \\if block\n" +msgstr "問い合わせは無視されました。\\endif か Ctrl-C で現在の \\if ブロックを抜けてください\n" -#: psqlscan.l:727 +#: mainloop.c:494 #, c-format -msgid "skipping recursive expansion of variable \"%s\"\n" -msgstr "変数\"%s\"の再帰展開をスキップしています\n" +msgid "reached EOF without finding closing \\endif(s)\n" +msgstr "ブロックを閉じる \\endif を検索中に、ファイルの終端(EOF)に達しました。\n" -#: psqlscan.l:1604 +#: psqlscanslash.l:615 #, c-format msgid "unterminated quoted string\n" -msgstr "文字列の引用符が閉じていません\n" +msgstr "文字列の引用符が閉じていません。\n" -#: psqlscan.l:1704 +#: psqlscanslash.l:788 #, c-format msgid "%s: out of memory\n" -msgstr "%s: メモリ不足です\n" - -#: psqlscan.l:1933 -#, c-format -msgid "can't escape without active connection\n" -msgstr "有効な接続なしではエスケープできません\n" - -#: sql_help.c:32 sql_help.c:35 sql_help.c:38 sql_help.c:60 sql_help.c:62 -#: sql_help.c:64 sql_help.c:75 sql_help.c:77 sql_help.c:79 sql_help.c:103 -#: sql_help.c:107 sql_help.c:109 sql_help.c:111 sql_help.c:113 sql_help.c:116 -#: sql_help.c:118 sql_help.c:120 sql_help.c:213 sql_help.c:215 sql_help.c:216 -#: sql_help.c:218 sql_help.c:220 sql_help.c:223 sql_help.c:225 sql_help.c:227 -#: sql_help.c:229 sql_help.c:241 sql_help.c:242 sql_help.c:243 sql_help.c:245 -#: sql_help.c:290 sql_help.c:292 sql_help.c:294 sql_help.c:296 sql_help.c:354 -#: sql_help.c:359 sql_help.c:361 sql_help.c:396 sql_help.c:398 sql_help.c:401 -#: sql_help.c:403 sql_help.c:460 sql_help.c:465 sql_help.c:470 sql_help.c:475 -#: sql_help.c:515 sql_help.c:517 sql_help.c:519 sql_help.c:522 sql_help.c:524 -#: sql_help.c:535 sql_help.c:537 sql_help.c:577 sql_help.c:579 sql_help.c:582 -#: sql_help.c:584 sql_help.c:586 sql_help.c:612 sql_help.c:616 sql_help.c:629 -#: sql_help.c:632 sql_help.c:635 sql_help.c:655 sql_help.c:667 sql_help.c:675 -#: sql_help.c:678 sql_help.c:681 sql_help.c:711 sql_help.c:717 sql_help.c:719 -#: sql_help.c:723 sql_help.c:726 sql_help.c:729 sql_help.c:738 sql_help.c:749 -#: sql_help.c:751 sql_help.c:768 sql_help.c:777 sql_help.c:779 sql_help.c:781 -#: sql_help.c:793 sql_help.c:797 sql_help.c:799 sql_help.c:878 sql_help.c:880 -#: sql_help.c:883 sql_help.c:886 sql_help.c:888 sql_help.c:890 sql_help.c:951 -#: sql_help.c:953 sql_help.c:955 sql_help.c:958 sql_help.c:979 sql_help.c:982 -#: sql_help.c:985 sql_help.c:988 sql_help.c:992 sql_help.c:994 sql_help.c:996 -#: sql_help.c:998 sql_help.c:1012 sql_help.c:1015 sql_help.c:1017 -#: sql_help.c:1019 sql_help.c:1029 sql_help.c:1031 sql_help.c:1041 -#: sql_help.c:1043 sql_help.c:1052 sql_help.c:1073 sql_help.c:1075 -#: sql_help.c:1077 sql_help.c:1080 sql_help.c:1082 sql_help.c:1084 -#: sql_help.c:1122 sql_help.c:1128 sql_help.c:1130 sql_help.c:1133 -#: sql_help.c:1135 sql_help.c:1137 sql_help.c:1164 sql_help.c:1167 -#: sql_help.c:1169 sql_help.c:1171 sql_help.c:1173 sql_help.c:1175 -#: sql_help.c:1178 sql_help.c:1218 sql_help.c:1456 sql_help.c:1472 -#: sql_help.c:1485 sql_help.c:1536 sql_help.c:1540 sql_help.c:1550 -#: sql_help.c:1568 sql_help.c:1591 sql_help.c:1609 sql_help.c:1637 -#: sql_help.c:1696 sql_help.c:1738 sql_help.c:1760 sql_help.c:1780 -#: sql_help.c:1781 sql_help.c:1816 sql_help.c:1836 sql_help.c:1858 -#: sql_help.c:1886 sql_help.c:1911 sql_help.c:1947 sql_help.c:2133 -#: sql_help.c:2146 sql_help.c:2163 sql_help.c:2179 sql_help.c:2202 -#: sql_help.c:2253 sql_help.c:2257 sql_help.c:2259 sql_help.c:2265 -#: sql_help.c:2283 sql_help.c:2310 sql_help.c:2345 sql_help.c:2357 -#: sql_help.c:2366 sql_help.c:2416 sql_help.c:2444 sql_help.c:2452 -#: sql_help.c:2460 sql_help.c:2468 sql_help.c:2476 sql_help.c:2484 -#: sql_help.c:2492 sql_help.c:2500 sql_help.c:2509 sql_help.c:2520 -#: sql_help.c:2528 sql_help.c:2536 sql_help.c:2544 sql_help.c:2552 -#: sql_help.c:2562 sql_help.c:2571 sql_help.c:2580 sql_help.c:2588 -#: sql_help.c:2596 sql_help.c:2605 sql_help.c:2613 sql_help.c:2621 -#: sql_help.c:2629 sql_help.c:2637 sql_help.c:2645 sql_help.c:2653 -#: sql_help.c:2661 sql_help.c:2669 sql_help.c:2677 sql_help.c:2686 -#: sql_help.c:2694 sql_help.c:2711 sql_help.c:2726 sql_help.c:2932 -#: sql_help.c:2983 sql_help.c:3011 sql_help.c:3019 sql_help.c:3417 -#: sql_help.c:3465 sql_help.c:3585 +msgstr "%s: メモリ不足です。\n" + +#: sql_help.c:36 sql_help.c:39 sql_help.c:42 sql_help.c:66 sql_help.c:67 +#: sql_help.c:69 sql_help.c:71 sql_help.c:82 sql_help.c:84 sql_help.c:86 +#: sql_help.c:112 sql_help.c:118 sql_help.c:120 sql_help.c:122 sql_help.c:124 +#: sql_help.c:127 sql_help.c:129 sql_help.c:131 sql_help.c:236 sql_help.c:238 +#: sql_help.c:239 sql_help.c:241 sql_help.c:243 sql_help.c:246 sql_help.c:248 +#: sql_help.c:250 sql_help.c:252 sql_help.c:264 sql_help.c:265 sql_help.c:266 +#: sql_help.c:268 sql_help.c:315 sql_help.c:317 sql_help.c:319 sql_help.c:321 +#: sql_help.c:382 sql_help.c:387 sql_help.c:389 sql_help.c:432 sql_help.c:434 +#: sql_help.c:437 sql_help.c:439 sql_help.c:506 sql_help.c:511 sql_help.c:516 +#: sql_help.c:521 sql_help.c:526 sql_help.c:575 sql_help.c:577 sql_help.c:579 +#: sql_help.c:581 sql_help.c:584 sql_help.c:586 sql_help.c:597 sql_help.c:599 +#: sql_help.c:640 sql_help.c:642 sql_help.c:644 sql_help.c:647 sql_help.c:649 +#: sql_help.c:651 sql_help.c:684 sql_help.c:688 sql_help.c:692 sql_help.c:711 +#: sql_help.c:714 sql_help.c:717 sql_help.c:746 sql_help.c:758 sql_help.c:766 +#: sql_help.c:769 sql_help.c:772 sql_help.c:787 sql_help.c:790 sql_help.c:807 +#: sql_help.c:809 sql_help.c:811 sql_help.c:813 sql_help.c:816 sql_help.c:818 +#: sql_help.c:859 sql_help.c:882 sql_help.c:893 sql_help.c:895 sql_help.c:914 +#: sql_help.c:924 sql_help.c:926 sql_help.c:928 sql_help.c:940 sql_help.c:944 +#: sql_help.c:946 sql_help.c:957 sql_help.c:959 sql_help.c:961 sql_help.c:977 +#: sql_help.c:979 sql_help.c:983 sql_help.c:986 sql_help.c:987 sql_help.c:988 +#: sql_help.c:991 sql_help.c:993 sql_help.c:1084 sql_help.c:1086 +#: sql_help.c:1089 sql_help.c:1092 sql_help.c:1094 sql_help.c:1096 +#: sql_help.c:1099 sql_help.c:1102 sql_help.c:1168 sql_help.c:1170 +#: sql_help.c:1172 sql_help.c:1175 sql_help.c:1196 sql_help.c:1199 +#: sql_help.c:1202 sql_help.c:1205 sql_help.c:1209 sql_help.c:1211 +#: sql_help.c:1213 sql_help.c:1215 sql_help.c:1229 sql_help.c:1232 +#: sql_help.c:1234 sql_help.c:1236 sql_help.c:1246 sql_help.c:1248 +#: sql_help.c:1258 sql_help.c:1260 sql_help.c:1270 sql_help.c:1273 +#: sql_help.c:1295 sql_help.c:1297 sql_help.c:1299 sql_help.c:1302 +#: sql_help.c:1304 sql_help.c:1306 sql_help.c:1309 sql_help.c:1359 +#: sql_help.c:1401 sql_help.c:1404 sql_help.c:1406 sql_help.c:1408 +#: sql_help.c:1410 sql_help.c:1412 sql_help.c:1415 sql_help.c:1455 +#: sql_help.c:1666 sql_help.c:1730 sql_help.c:1749 sql_help.c:1762 +#: sql_help.c:1818 sql_help.c:1824 sql_help.c:1834 sql_help.c:1854 +#: sql_help.c:1879 sql_help.c:1897 sql_help.c:1926 sql_help.c:2019 +#: sql_help.c:2061 sql_help.c:2083 sql_help.c:2103 sql_help.c:2104 +#: sql_help.c:2139 sql_help.c:2159 sql_help.c:2181 sql_help.c:2195 +#: sql_help.c:2210 sql_help.c:2240 sql_help.c:2265 sql_help.c:2311 +#: sql_help.c:2577 sql_help.c:2590 sql_help.c:2607 sql_help.c:2623 +#: sql_help.c:2663 sql_help.c:2715 sql_help.c:2719 sql_help.c:2721 +#: sql_help.c:2727 sql_help.c:2745 sql_help.c:2772 sql_help.c:2807 +#: sql_help.c:2819 sql_help.c:2828 sql_help.c:2872 sql_help.c:2886 +#: sql_help.c:2914 sql_help.c:2922 sql_help.c:2930 sql_help.c:2938 +#: sql_help.c:2946 sql_help.c:2954 sql_help.c:2962 sql_help.c:2970 +#: sql_help.c:2979 sql_help.c:2990 sql_help.c:2998 sql_help.c:3006 +#: sql_help.c:3014 sql_help.c:3022 sql_help.c:3032 sql_help.c:3041 +#: sql_help.c:3050 sql_help.c:3058 sql_help.c:3067 sql_help.c:3075 +#: sql_help.c:3083 sql_help.c:3092 sql_help.c:3100 sql_help.c:3108 +#: sql_help.c:3116 sql_help.c:3124 sql_help.c:3132 sql_help.c:3140 +#: sql_help.c:3148 sql_help.c:3156 sql_help.c:3164 sql_help.c:3172 +#: sql_help.c:3189 sql_help.c:3198 sql_help.c:3206 sql_help.c:3223 +#: sql_help.c:3238 sql_help.c:3506 sql_help.c:3557 sql_help.c:3586 +#: sql_help.c:3594 sql_help.c:4017 sql_help.c:4065 sql_help.c:4206 msgid "name" msgstr "名前" -#: sql_help.c:33 sql_help.c:36 sql_help.c:39 sql_help.c:300 sql_help.c:1279 -#: sql_help.c:2417 sql_help.c:3234 +#: sql_help.c:37 sql_help.c:40 sql_help.c:43 sql_help.c:326 sql_help.c:1524 +#: sql_help.c:2887 sql_help.c:3811 msgid "aggregate_signature" -msgstr "集約関数の呼出し情報" - -#: sql_help.c:34 sql_help.c:61 sql_help.c:76 sql_help.c:108 sql_help.c:228 -#: sql_help.c:246 sql_help.c:362 sql_help.c:402 sql_help.c:469 sql_help.c:502 -#: sql_help.c:516 sql_help.c:536 sql_help.c:583 sql_help.c:631 sql_help.c:677 -#: sql_help.c:718 sql_help.c:740 sql_help.c:750 sql_help.c:780 sql_help.c:800 -#: sql_help.c:887 sql_help.c:952 sql_help.c:995 sql_help.c:1016 -#: sql_help.c:1030 sql_help.c:1042 sql_help.c:1054 sql_help.c:1081 -#: sql_help.c:1129 sql_help.c:1172 +msgstr "集約関数のシグニチャー" + +#: sql_help.c:38 sql_help.c:68 sql_help.c:83 sql_help.c:119 sql_help.c:251 +#: sql_help.c:269 sql_help.c:390 sql_help.c:438 sql_help.c:515 sql_help.c:561 +#: sql_help.c:576 sql_help.c:598 sql_help.c:648 sql_help.c:713 sql_help.c:768 +#: sql_help.c:789 sql_help.c:819 sql_help.c:860 sql_help.c:884 sql_help.c:894 +#: sql_help.c:927 sql_help.c:947 sql_help.c:960 sql_help.c:994 sql_help.c:1093 +#: sql_help.c:1169 sql_help.c:1212 sql_help.c:1233 sql_help.c:1247 +#: sql_help.c:1259 sql_help.c:1272 sql_help.c:1303 sql_help.c:1360 +#: sql_help.c:1409 msgid "new_name" msgstr "新しい名前" -#: sql_help.c:37 sql_help.c:63 sql_help.c:78 sql_help.c:110 sql_help.c:226 -#: sql_help.c:244 sql_help.c:360 sql_help.c:431 sql_help.c:474 sql_help.c:538 -#: sql_help.c:547 sql_help.c:602 sql_help.c:615 sql_help.c:634 sql_help.c:680 -#: sql_help.c:752 sql_help.c:778 sql_help.c:798 sql_help.c:935 sql_help.c:954 -#: sql_help.c:997 sql_help.c:1018 sql_help.c:1076 sql_help.c:1170 +#: sql_help.c:41 sql_help.c:70 sql_help.c:85 sql_help.c:121 sql_help.c:249 +#: sql_help.c:267 sql_help.c:388 sql_help.c:474 sql_help.c:520 sql_help.c:600 +#: sql_help.c:609 sql_help.c:667 sql_help.c:687 sql_help.c:716 sql_help.c:771 +#: sql_help.c:817 sql_help.c:896 sql_help.c:925 sql_help.c:945 sql_help.c:958 +#: sql_help.c:992 sql_help.c:1153 sql_help.c:1171 sql_help.c:1214 +#: sql_help.c:1235 sql_help.c:1298 sql_help.c:1407 sql_help.c:2563 msgid "new_owner" msgstr "新しい所有者" -#: sql_help.c:40 sql_help.c:65 sql_help.c:80 sql_help.c:230 sql_help.c:293 -#: sql_help.c:404 sql_help.c:479 sql_help.c:585 sql_help.c:619 sql_help.c:637 -#: sql_help.c:683 sql_help.c:782 sql_help.c:889 sql_help.c:999 sql_help.c:1020 -#: sql_help.c:1032 sql_help.c:1044 sql_help.c:1083 sql_help.c:1174 +#: sql_help.c:44 sql_help.c:72 sql_help.c:87 sql_help.c:253 sql_help.c:318 +#: sql_help.c:440 sql_help.c:525 sql_help.c:650 sql_help.c:691 sql_help.c:719 +#: sql_help.c:774 sql_help.c:929 sql_help.c:962 sql_help.c:1095 sql_help.c:1216 +#: sql_help.c:1237 sql_help.c:1249 sql_help.c:1261 sql_help.c:1305 +#: sql_help.c:1411 msgid "new_schema" msgstr "新しいスキーマ" -#: sql_help.c:41 sql_help.c:1326 sql_help.c:2418 sql_help.c:3253 +#: sql_help.c:45 sql_help.c:1580 sql_help.c:2888 sql_help.c:3832 msgid "where aggregate_signature is:" -msgstr "集約関数の呼出し情報は以下の通り:" - -#: sql_help.c:42 sql_help.c:45 sql_help.c:48 sql_help.c:310 sql_help.c:333 -#: sql_help.c:336 sql_help.c:339 sql_help.c:461 sql_help.c:466 sql_help.c:471 -#: sql_help.c:476 sql_help.c:1295 sql_help.c:1327 sql_help.c:1330 -#: sql_help.c:1333 sql_help.c:1457 sql_help.c:1473 sql_help.c:1476 -#: sql_help.c:1697 sql_help.c:2419 sql_help.c:2422 sql_help.c:2425 -#: sql_help.c:2510 sql_help.c:2870 sql_help.c:3149 sql_help.c:3240 -#: sql_help.c:3254 sql_help.c:3257 sql_help.c:3260 +msgstr "集約関数のシグニチャーには以下のものがあります:" + +#: sql_help.c:46 sql_help.c:49 sql_help.c:52 sql_help.c:336 sql_help.c:361 +#: sql_help.c:364 sql_help.c:367 sql_help.c:507 sql_help.c:512 sql_help.c:517 +#: sql_help.c:522 sql_help.c:527 sql_help.c:1542 sql_help.c:1581 +#: sql_help.c:1584 sql_help.c:1587 sql_help.c:1731 sql_help.c:1750 +#: sql_help.c:1753 sql_help.c:2020 sql_help.c:2889 sql_help.c:2892 +#: sql_help.c:2895 sql_help.c:2980 sql_help.c:3391 sql_help.c:3724 +#: sql_help.c:3817 sql_help.c:3833 sql_help.c:3836 sql_help.c:3839 msgid "argmode" msgstr "引数のモード" -#: sql_help.c:43 sql_help.c:46 sql_help.c:49 sql_help.c:311 sql_help.c:334 -#: sql_help.c:337 sql_help.c:340 sql_help.c:462 sql_help.c:467 sql_help.c:472 -#: sql_help.c:477 sql_help.c:1296 sql_help.c:1328 sql_help.c:1331 -#: sql_help.c:1334 sql_help.c:1458 sql_help.c:1474 sql_help.c:1477 -#: sql_help.c:1698 sql_help.c:2420 sql_help.c:2423 sql_help.c:2426 -#: sql_help.c:2511 sql_help.c:3241 sql_help.c:3255 sql_help.c:3258 -#: sql_help.c:3261 +#: sql_help.c:47 sql_help.c:50 sql_help.c:53 sql_help.c:337 sql_help.c:362 +#: sql_help.c:365 sql_help.c:368 sql_help.c:508 sql_help.c:513 sql_help.c:518 +#: sql_help.c:523 sql_help.c:528 sql_help.c:1543 sql_help.c:1582 +#: sql_help.c:1585 sql_help.c:1588 sql_help.c:1732 sql_help.c:1751 +#: sql_help.c:1754 sql_help.c:2021 sql_help.c:2890 sql_help.c:2893 +#: sql_help.c:2896 sql_help.c:2981 sql_help.c:3818 sql_help.c:3834 +#: sql_help.c:3837 sql_help.c:3840 msgid "argname" -msgstr "引数名" - -#: sql_help.c:44 sql_help.c:47 sql_help.c:50 sql_help.c:312 sql_help.c:335 -#: sql_help.c:338 sql_help.c:341 sql_help.c:463 sql_help.c:468 sql_help.c:473 -#: sql_help.c:478 sql_help.c:1297 sql_help.c:1329 sql_help.c:1332 -#: sql_help.c:1335 sql_help.c:1699 sql_help.c:2421 sql_help.c:2424 -#: sql_help.c:2427 sql_help.c:2512 sql_help.c:3242 sql_help.c:3256 -#: sql_help.c:3259 sql_help.c:3262 +msgstr "引数の名前" + +#: sql_help.c:48 sql_help.c:51 sql_help.c:54 sql_help.c:338 sql_help.c:363 +#: sql_help.c:366 sql_help.c:369 sql_help.c:509 sql_help.c:514 sql_help.c:519 +#: sql_help.c:524 sql_help.c:529 sql_help.c:1544 sql_help.c:1583 +#: sql_help.c:1586 sql_help.c:1589 sql_help.c:2022 sql_help.c:2891 +#: sql_help.c:2894 sql_help.c:2897 sql_help.c:2982 sql_help.c:3819 +#: sql_help.c:3835 sql_help.c:3838 sql_help.c:3841 msgid "argtype" msgstr "引数の型" -#: sql_help.c:104 sql_help.c:357 sql_help.c:425 sql_help.c:432 sql_help.c:712 -#: sql_help.c:795 sql_help.c:1013 sql_help.c:1123 sql_help.c:1149 -#: sql_help.c:1383 sql_help.c:1389 sql_help.c:1640 sql_help.c:1664 -#: sql_help.c:1669 sql_help.c:1739 sql_help.c:1887 sql_help.c:1968 -#: sql_help.c:2148 sql_help.c:2311 sql_help.c:2333 sql_help.c:2745 +#: sql_help.c:113 sql_help.c:385 sql_help.c:463 sql_help.c:475 sql_help.c:854 +#: sql_help.c:942 sql_help.c:1230 sql_help.c:1354 sql_help.c:1386 +#: sql_help.c:1637 sql_help.c:1643 sql_help.c:1929 sql_help.c:1970 +#: sql_help.c:1977 sql_help.c:1986 sql_help.c:2062 sql_help.c:2241 +#: sql_help.c:2333 sql_help.c:2592 sql_help.c:2773 sql_help.c:2795 +#: sql_help.c:3258 sql_help.c:3425 msgid "option" msgstr "オプション" -#: sql_help.c:105 sql_help.c:713 sql_help.c:1124 sql_help.c:1740 -#: sql_help.c:1888 sql_help.c:2312 +#: sql_help.c:114 sql_help.c:855 sql_help.c:1355 sql_help.c:2063 +#: sql_help.c:2242 sql_help.c:2774 msgid "where option can be:" -msgstr "オプションは以下の通り:" +msgstr "オプションには以下のものがあります:" -#: sql_help.c:106 sql_help.c:714 sql_help.c:1125 sql_help.c:1575 -#: sql_help.c:1889 sql_help.c:2313 +#: sql_help.c:115 sql_help.c:1861 +msgid "allowconn" +msgstr "接続の可否(真偽値)" + +#: sql_help.c:116 sql_help.c:856 sql_help.c:1356 sql_help.c:1862 +#: sql_help.c:2243 sql_help.c:2775 msgid "connlimit" -msgstr "最大接続数" +msgstr "最大同時接続数" + +#: sql_help.c:117 sql_help.c:1863 +msgid "istemplate" +msgstr "テンプレートかどうか(真偽値)" -#: sql_help.c:112 sql_help.c:526 sql_help.c:588 sql_help.c:603 sql_help.c:892 -#: sql_help.c:936 +#: sql_help.c:123 sql_help.c:588 sql_help.c:653 sql_help.c:1098 sql_help.c:1146 msgid "new_tablespace" -msgstr "テーブルスペース" +msgstr "新しいテーブル空間名" -#: sql_help.c:114 sql_help.c:117 sql_help.c:119 sql_help.c:483 sql_help.c:485 -#: sql_help.c:486 sql_help.c:721 sql_help.c:725 sql_help.c:728 sql_help.c:811 -#: sql_help.c:814 sql_help.c:1131 sql_help.c:1134 sql_help.c:1136 -#: sql_help.c:1707 sql_help.c:3036 sql_help.c:3406 +#: sql_help.c:125 sql_help.c:128 sql_help.c:130 sql_help.c:534 sql_help.c:536 +#: sql_help.c:537 sql_help.c:863 sql_help.c:867 sql_help.c:870 sql_help.c:1005 +#: sql_help.c:1008 sql_help.c:1363 sql_help.c:1367 sql_help.c:1370 +#: sql_help.c:2031 sql_help.c:3611 sql_help.c:4006 msgid "configuration_parameter" -msgstr "設定パラメータ" - -#: sql_help.c:115 sql_help.c:358 sql_help.c:421 sql_help.c:426 sql_help.c:433 -#: sql_help.c:484 sql_help.c:521 sql_help.c:594 sql_help.c:600 sql_help.c:722 -#: sql_help.c:796 sql_help.c:812 sql_help.c:813 sql_help.c:911 sql_help.c:930 -#: sql_help.c:957 sql_help.c:1014 sql_help.c:1132 sql_help.c:1150 -#: sql_help.c:1641 sql_help.c:1665 sql_help.c:1670 sql_help.c:1708 -#: sql_help.c:1709 sql_help.c:1768 sql_help.c:1800 sql_help.c:1969 -#: sql_help.c:2043 sql_help.c:2051 sql_help.c:2083 sql_help.c:2105 -#: sql_help.c:2122 sql_help.c:2149 sql_help.c:2334 sql_help.c:3407 -#: sql_help.c:3408 +msgstr "設定パラメーター" + +#: sql_help.c:126 sql_help.c:386 sql_help.c:458 sql_help.c:464 sql_help.c:476 +#: sql_help.c:535 sql_help.c:583 sql_help.c:659 sql_help.c:665 sql_help.c:815 +#: sql_help.c:864 sql_help.c:943 sql_help.c:982 sql_help.c:985 sql_help.c:990 +#: sql_help.c:1006 sql_help.c:1007 sql_help.c:1128 sql_help.c:1148 +#: sql_help.c:1174 sql_help.c:1231 sql_help.c:1364 sql_help.c:1387 +#: sql_help.c:1930 sql_help.c:1971 sql_help.c:1978 sql_help.c:1987 +#: sql_help.c:2032 sql_help.c:2033 sql_help.c:2091 sql_help.c:2123 +#: sql_help.c:2213 sql_help.c:2334 sql_help.c:2364 sql_help.c:2462 +#: sql_help.c:2474 sql_help.c:2487 sql_help.c:2527 sql_help.c:2549 +#: sql_help.c:2566 sql_help.c:2593 sql_help.c:2796 sql_help.c:3426 +#: sql_help.c:4007 sql_help.c:4008 msgid "value" msgstr "値" -#: sql_help.c:177 +#: sql_help.c:198 msgid "target_role" msgstr "対象のロール" -#: sql_help.c:178 sql_help.c:1624 sql_help.c:1929 sql_help.c:1934 -#: sql_help.c:2852 sql_help.c:2859 sql_help.c:2873 sql_help.c:2879 -#: sql_help.c:3131 sql_help.c:3138 sql_help.c:3152 sql_help.c:3158 +#: sql_help.c:199 sql_help.c:1913 sql_help.c:2289 sql_help.c:2294 +#: sql_help.c:3373 sql_help.c:3380 sql_help.c:3394 sql_help.c:3400 +#: sql_help.c:3706 sql_help.c:3713 sql_help.c:3727 sql_help.c:3733 msgid "schema_name" msgstr "スキーマ名" -#: sql_help.c:179 +#: sql_help.c:200 msgid "abbreviated_grant_or_revoke" -msgstr "権限付与/剥奪の省略形" +msgstr "GRANT/REVOKEの省略形" -#: sql_help.c:180 +#: sql_help.c:201 msgid "where abbreviated_grant_or_revoke is one of:" -msgstr "権限付与/剥奪の省略形は以下のいずれか:" - -#: sql_help.c:181 sql_help.c:182 sql_help.c:183 sql_help.c:184 sql_help.c:185 -#: sql_help.c:186 sql_help.c:187 sql_help.c:188 sql_help.c:525 sql_help.c:587 -#: sql_help.c:891 sql_help.c:1743 sql_help.c:1744 sql_help.c:1745 -#: sql_help.c:1746 sql_help.c:1747 sql_help.c:1892 sql_help.c:1893 -#: sql_help.c:1894 sql_help.c:1895 sql_help.c:1896 sql_help.c:2316 -#: sql_help.c:2317 sql_help.c:2318 sql_help.c:2319 sql_help.c:2320 -#: sql_help.c:2853 sql_help.c:2857 sql_help.c:2860 sql_help.c:2862 -#: sql_help.c:2864 sql_help.c:2866 sql_help.c:2868 sql_help.c:2874 -#: sql_help.c:2876 sql_help.c:2878 sql_help.c:2880 sql_help.c:2882 -#: sql_help.c:2884 sql_help.c:2885 sql_help.c:2886 sql_help.c:3132 -#: sql_help.c:3136 sql_help.c:3139 sql_help.c:3141 sql_help.c:3143 -#: sql_help.c:3145 sql_help.c:3147 sql_help.c:3153 sql_help.c:3155 -#: sql_help.c:3157 sql_help.c:3159 sql_help.c:3161 sql_help.c:3163 -#: sql_help.c:3164 sql_help.c:3165 sql_help.c:3427 +msgstr "GRANT/REVOKEの省略形は以下のいずれかです:" + +#: sql_help.c:202 sql_help.c:203 sql_help.c:204 sql_help.c:205 sql_help.c:206 +#: sql_help.c:207 sql_help.c:208 sql_help.c:209 sql_help.c:210 sql_help.c:211 +#: sql_help.c:559 sql_help.c:587 sql_help.c:652 sql_help.c:792 sql_help.c:874 +#: sql_help.c:1097 sql_help.c:1374 sql_help.c:2066 sql_help.c:2067 +#: sql_help.c:2068 sql_help.c:2069 sql_help.c:2070 sql_help.c:2197 +#: sql_help.c:2246 sql_help.c:2247 sql_help.c:2248 sql_help.c:2249 +#: sql_help.c:2250 sql_help.c:2778 sql_help.c:2779 sql_help.c:2780 +#: sql_help.c:2781 sql_help.c:2782 sql_help.c:3407 sql_help.c:3408 +#: sql_help.c:3409 sql_help.c:3707 sql_help.c:3711 sql_help.c:3714 +#: sql_help.c:3716 sql_help.c:3718 sql_help.c:3720 sql_help.c:3722 +#: sql_help.c:3728 sql_help.c:3730 sql_help.c:3732 sql_help.c:3734 +#: sql_help.c:3736 sql_help.c:3738 sql_help.c:3739 sql_help.c:3740 +#: sql_help.c:4027 msgid "role_name" msgstr "ロール名" -#: sql_help.c:214 sql_help.c:414 sql_help.c:902 sql_help.c:904 sql_help.c:1166 -#: sql_help.c:1594 sql_help.c:1598 sql_help.c:1764 sql_help.c:2055 -#: sql_help.c:2065 sql_help.c:2087 sql_help.c:2900 sql_help.c:3303 -#: sql_help.c:3304 sql_help.c:3308 sql_help.c:3313 sql_help.c:3381 -#: sql_help.c:3382 sql_help.c:3387 sql_help.c:3392 sql_help.c:3521 -#: sql_help.c:3522 sql_help.c:3526 sql_help.c:3531 sql_help.c:3611 -#: sql_help.c:3613 sql_help.c:3644 sql_help.c:3690 sql_help.c:3691 -#: sql_help.c:3695 sql_help.c:3700 +#: sql_help.c:237 sql_help.c:451 sql_help.c:1113 sql_help.c:1115 +#: sql_help.c:1403 sql_help.c:1882 sql_help.c:1886 sql_help.c:1990 +#: sql_help.c:1994 sql_help.c:2087 sql_help.c:2458 sql_help.c:2470 +#: sql_help.c:2483 sql_help.c:2491 sql_help.c:2502 sql_help.c:2531 +#: sql_help.c:3457 sql_help.c:3472 sql_help.c:3474 sql_help.c:3892 +#: sql_help.c:3893 sql_help.c:3902 sql_help.c:3943 sql_help.c:3944 +#: sql_help.c:3945 sql_help.c:3946 sql_help.c:3947 sql_help.c:3948 +#: sql_help.c:3981 sql_help.c:3982 sql_help.c:3987 sql_help.c:3992 +#: sql_help.c:4131 sql_help.c:4132 sql_help.c:4141 sql_help.c:4182 +#: sql_help.c:4183 sql_help.c:4184 sql_help.c:4185 sql_help.c:4186 +#: sql_help.c:4187 sql_help.c:4234 sql_help.c:4236 sql_help.c:4269 +#: sql_help.c:4325 sql_help.c:4326 sql_help.c:4335 sql_help.c:4376 +#: sql_help.c:4377 sql_help.c:4378 sql_help.c:4379 sql_help.c:4380 +#: sql_help.c:4381 msgid "expression" msgstr "評価式" -#: sql_help.c:217 +#: sql_help.c:240 msgid "domain_constraint" msgstr "ドメイン制約" -#: sql_help.c:219 sql_help.c:221 sql_help.c:224 sql_help.c:884 sql_help.c:917 -#: sql_help.c:918 sql_help.c:919 sql_help.c:939 sql_help.c:1285 -#: sql_help.c:1597 sql_help.c:1672 sql_help.c:2054 sql_help.c:2064 +#: sql_help.c:242 sql_help.c:244 sql_help.c:247 sql_help.c:466 sql_help.c:467 +#: sql_help.c:1090 sql_help.c:1134 sql_help.c:1135 sql_help.c:1136 +#: sql_help.c:1156 sql_help.c:1530 sql_help.c:1532 sql_help.c:1885 +#: sql_help.c:1989 sql_help.c:1993 sql_help.c:2490 sql_help.c:2501 +#: sql_help.c:3469 msgid "constraint_name" msgstr "制約名" -#: sql_help.c:222 sql_help.c:885 +#: sql_help.c:245 sql_help.c:1091 msgid "new_constraint_name" msgstr "新しい制約名" -#: sql_help.c:291 sql_help.c:794 +#: sql_help.c:316 sql_help.c:941 msgid "new_version" msgstr "新しいバージョン" -#: sql_help.c:295 sql_help.c:297 +#: sql_help.c:320 sql_help.c:322 msgid "member_object" -msgstr "メンバオブジェクト" +msgstr "メンバーオブジェクト" -#: sql_help.c:298 +#: sql_help.c:323 msgid "where member_object is:" -msgstr "メンバオブジェクトは以下の通り:" +msgstr "メンバーオブジェクトは以下の通りです:" + +#: sql_help.c:324 sql_help.c:329 sql_help.c:330 sql_help.c:331 sql_help.c:332 +#: sql_help.c:333 sql_help.c:334 sql_help.c:339 sql_help.c:343 sql_help.c:345 +#: sql_help.c:347 sql_help.c:348 sql_help.c:349 sql_help.c:350 sql_help.c:351 +#: sql_help.c:352 sql_help.c:353 sql_help.c:354 sql_help.c:355 sql_help.c:358 +#: sql_help.c:359 sql_help.c:1522 sql_help.c:1527 sql_help.c:1534 +#: sql_help.c:1535 sql_help.c:1536 sql_help.c:1537 sql_help.c:1538 +#: sql_help.c:1539 sql_help.c:1540 sql_help.c:1545 sql_help.c:1547 +#: sql_help.c:1551 sql_help.c:1553 sql_help.c:1557 sql_help.c:1558 +#: sql_help.c:1559 sql_help.c:1562 sql_help.c:1563 sql_help.c:1564 +#: sql_help.c:1565 sql_help.c:1566 sql_help.c:1567 sql_help.c:1568 +#: sql_help.c:1569 sql_help.c:1570 sql_help.c:1571 sql_help.c:1572 +#: sql_help.c:1577 sql_help.c:1578 sql_help.c:3807 sql_help.c:3812 +#: sql_help.c:3813 sql_help.c:3814 sql_help.c:3815 sql_help.c:3821 +#: sql_help.c:3822 sql_help.c:3823 sql_help.c:3824 sql_help.c:3825 +#: sql_help.c:3826 sql_help.c:3827 sql_help.c:3828 sql_help.c:3829 +#: sql_help.c:3830 +msgid "object_name" +msgstr "オブジェクト名" -#: sql_help.c:299 sql_help.c:1278 sql_help.c:3233 +#: sql_help.c:325 sql_help.c:1523 sql_help.c:3810 msgid "aggregate_name" -msgstr "集約関数の名前" +msgstr "集約関数名" -#: sql_help.c:301 sql_help.c:1280 sql_help.c:1516 sql_help.c:1520 -#: sql_help.c:1522 sql_help.c:2435 +#: sql_help.c:327 sql_help.c:1525 sql_help.c:1796 sql_help.c:1800 +#: sql_help.c:1802 sql_help.c:2905 msgid "source_type" -msgstr "ソースの型" +msgstr "変換前の型" -#: sql_help.c:302 sql_help.c:1281 sql_help.c:1517 sql_help.c:1521 -#: sql_help.c:1523 sql_help.c:2436 +#: sql_help.c:328 sql_help.c:1526 sql_help.c:1797 sql_help.c:1801 +#: sql_help.c:1803 sql_help.c:2906 msgid "target_type" -msgstr "ターゲットの型" - -#: sql_help.c:303 sql_help.c:304 sql_help.c:305 sql_help.c:306 sql_help.c:307 -#: sql_help.c:308 sql_help.c:313 sql_help.c:317 sql_help.c:319 sql_help.c:321 -#: sql_help.c:322 sql_help.c:323 sql_help.c:324 sql_help.c:325 sql_help.c:326 -#: sql_help.c:327 sql_help.c:328 sql_help.c:329 sql_help.c:330 sql_help.c:331 -#: sql_help.c:1282 sql_help.c:1287 sql_help.c:1288 sql_help.c:1289 -#: sql_help.c:1290 sql_help.c:1291 sql_help.c:1292 sql_help.c:1293 -#: sql_help.c:1298 sql_help.c:1300 sql_help.c:1304 sql_help.c:1306 -#: sql_help.c:1308 sql_help.c:1309 sql_help.c:1312 sql_help.c:1313 -#: sql_help.c:1314 sql_help.c:1315 sql_help.c:1316 sql_help.c:1317 -#: sql_help.c:1318 sql_help.c:1319 sql_help.c:1320 sql_help.c:1323 -#: sql_help.c:1324 sql_help.c:3230 sql_help.c:3235 sql_help.c:3236 -#: sql_help.c:3237 sql_help.c:3238 sql_help.c:3244 sql_help.c:3245 -#: sql_help.c:3246 sql_help.c:3247 sql_help.c:3248 sql_help.c:3249 -#: sql_help.c:3250 sql_help.c:3251 -msgid "object_name" -msgstr "オブジェクト名" - -#: sql_help.c:309 sql_help.c:665 sql_help.c:1294 sql_help.c:1518 -#: sql_help.c:1553 sql_help.c:1612 sql_help.c:1817 sql_help.c:1848 -#: sql_help.c:2207 sql_help.c:2869 sql_help.c:3148 sql_help.c:3239 -#: sql_help.c:3329 sql_help.c:3333 sql_help.c:3337 sql_help.c:3340 -#: sql_help.c:3547 sql_help.c:3551 sql_help.c:3555 sql_help.c:3558 -#: sql_help.c:3716 sql_help.c:3720 sql_help.c:3724 sql_help.c:3727 +msgstr "変換後の型" + +#: sql_help.c:335 sql_help.c:756 sql_help.c:1541 sql_help.c:1798 +#: sql_help.c:1837 sql_help.c:1900 sql_help.c:2140 sql_help.c:2171 +#: sql_help.c:2669 sql_help.c:3390 sql_help.c:3723 sql_help.c:3816 +#: sql_help.c:3921 sql_help.c:3925 sql_help.c:3929 sql_help.c:3932 +#: sql_help.c:4160 sql_help.c:4164 sql_help.c:4168 sql_help.c:4171 +#: sql_help.c:4354 sql_help.c:4358 sql_help.c:4362 sql_help.c:4365 msgid "function_name" msgstr "関数名" -#: sql_help.c:314 sql_help.c:658 sql_help.c:1301 sql_help.c:1841 +#: sql_help.c:340 sql_help.c:749 sql_help.c:1548 sql_help.c:2164 msgid "operator_name" msgstr "演算子名" -#: sql_help.c:315 sql_help.c:613 sql_help.c:617 sql_help.c:1302 -#: sql_help.c:1818 sql_help.c:2553 +#: sql_help.c:341 sql_help.c:685 sql_help.c:689 sql_help.c:693 sql_help.c:1549 +#: sql_help.c:2141 sql_help.c:3023 msgid "left_type" msgstr "左辺の型" -#: sql_help.c:316 sql_help.c:614 sql_help.c:618 sql_help.c:1303 -#: sql_help.c:1819 sql_help.c:2554 +#: sql_help.c:342 sql_help.c:686 sql_help.c:690 sql_help.c:694 sql_help.c:1550 +#: sql_help.c:2142 sql_help.c:3024 msgid "right_type" msgstr "右辺の型" -#: sql_help.c:318 sql_help.c:320 sql_help.c:630 sql_help.c:633 sql_help.c:636 -#: sql_help.c:656 sql_help.c:668 sql_help.c:676 sql_help.c:679 sql_help.c:682 -#: sql_help.c:1305 sql_help.c:1307 sql_help.c:1838 sql_help.c:1859 -#: sql_help.c:2070 sql_help.c:2563 sql_help.c:2572 +#: sql_help.c:344 sql_help.c:346 sql_help.c:712 sql_help.c:715 sql_help.c:718 +#: sql_help.c:747 sql_help.c:759 sql_help.c:767 sql_help.c:770 sql_help.c:773 +#: sql_help.c:1552 sql_help.c:1554 sql_help.c:2161 sql_help.c:2182 +#: sql_help.c:2507 sql_help.c:3033 sql_help.c:3042 msgid "index_method" msgstr "インデックスメソッド" -#: sql_help.c:332 +#: sql_help.c:356 sql_help.c:1152 sql_help.c:1573 sql_help.c:2028 +#: sql_help.c:2465 sql_help.c:2636 sql_help.c:3180 sql_help.c:3404 +#: sql_help.c:3737 +msgid "type_name" +msgstr "型名" + +#: sql_help.c:357 sql_help.c:1574 sql_help.c:2027 sql_help.c:2637 +#: sql_help.c:2863 sql_help.c:3181 sql_help.c:3396 sql_help.c:3729 +msgid "lang_name" +msgstr "言語名" + +#: sql_help.c:360 msgid "and aggregate_signature is:" -msgstr "集約関数の呼出し情報は以下の通り" +msgstr "集約関数のシグニチャーは以下の通りです:" -#: sql_help.c:355 sql_help.c:1638 +#: sql_help.c:383 sql_help.c:1668 sql_help.c:1927 msgid "handler_function" -msgstr "ハンドラ関数" +msgstr "ハンドラー関数" -#: sql_help.c:356 sql_help.c:1639 +#: sql_help.c:384 sql_help.c:1928 msgid "validator_function" -msgstr "バリデータ関数" +msgstr "バリデーター関数" -#: sql_help.c:397 sql_help.c:464 sql_help.c:578 sql_help.c:879 sql_help.c:1074 -#: sql_help.c:2061 sql_help.c:2062 sql_help.c:2078 sql_help.c:2079 +#: sql_help.c:433 sql_help.c:510 sql_help.c:641 sql_help.c:1085 sql_help.c:1296 +#: sql_help.c:2498 sql_help.c:2499 sql_help.c:2515 sql_help.c:2516 msgid "action" msgstr "アクション" -#: sql_help.c:399 sql_help.c:406 sql_help.c:410 sql_help.c:411 sql_help.c:413 -#: sql_help.c:415 sql_help.c:416 sql_help.c:417 sql_help.c:419 sql_help.c:422 -#: sql_help.c:424 sql_help.c:580 sql_help.c:590 sql_help.c:592 sql_help.c:595 -#: sql_help.c:597 sql_help.c:776 sql_help.c:881 sql_help.c:894 sql_help.c:898 -#: sql_help.c:899 sql_help.c:903 sql_help.c:905 sql_help.c:906 sql_help.c:907 -#: sql_help.c:909 sql_help.c:912 sql_help.c:914 sql_help.c:1165 -#: sql_help.c:1168 sql_help.c:1188 sql_help.c:1284 sql_help.c:1380 -#: sql_help.c:1385 sql_help.c:1399 sql_help.c:1400 sql_help.c:1401 -#: sql_help.c:1662 sql_help.c:1702 sql_help.c:1763 sql_help.c:1798 -#: sql_help.c:1954 sql_help.c:2034 sql_help.c:2047 sql_help.c:2066 -#: sql_help.c:2068 sql_help.c:2075 sql_help.c:2086 sql_help.c:2103 -#: sql_help.c:2210 sql_help.c:2346 sql_help.c:2854 sql_help.c:2855 -#: sql_help.c:2899 sql_help.c:3133 sql_help.c:3134 sql_help.c:3232 -#: sql_help.c:3352 sql_help.c:3570 sql_help.c:3610 sql_help.c:3612 -#: sql_help.c:3629 sql_help.c:3632 sql_help.c:3739 +#: sql_help.c:435 sql_help.c:442 sql_help.c:446 sql_help.c:447 sql_help.c:450 +#: sql_help.c:452 sql_help.c:453 sql_help.c:454 sql_help.c:456 sql_help.c:459 +#: sql_help.c:461 sql_help.c:462 sql_help.c:645 sql_help.c:655 sql_help.c:657 +#: sql_help.c:660 sql_help.c:662 sql_help.c:923 sql_help.c:1087 sql_help.c:1105 +#: sql_help.c:1109 sql_help.c:1110 sql_help.c:1114 sql_help.c:1116 +#: sql_help.c:1117 sql_help.c:1118 sql_help.c:1120 sql_help.c:1123 +#: sql_help.c:1124 sql_help.c:1126 sql_help.c:1129 sql_help.c:1131 +#: sql_help.c:1402 sql_help.c:1405 sql_help.c:1425 sql_help.c:1529 +#: sql_help.c:1634 sql_help.c:1639 sql_help.c:1653 sql_help.c:1654 +#: sql_help.c:1655 sql_help.c:1968 sql_help.c:1981 sql_help.c:2025 +#: sql_help.c:2086 sql_help.c:2121 sql_help.c:2319 sql_help.c:2347 +#: sql_help.c:2348 sql_help.c:2449 sql_help.c:2457 sql_help.c:2466 +#: sql_help.c:2469 sql_help.c:2478 sql_help.c:2482 sql_help.c:2503 +#: sql_help.c:2505 sql_help.c:2512 sql_help.c:2530 sql_help.c:2547 +#: sql_help.c:2672 sql_help.c:2808 sql_help.c:3375 sql_help.c:3376 +#: sql_help.c:3456 sql_help.c:3471 sql_help.c:3473 sql_help.c:3475 +#: sql_help.c:3708 sql_help.c:3709 sql_help.c:3809 sql_help.c:3952 +#: sql_help.c:4191 sql_help.c:4233 sql_help.c:4235 sql_help.c:4237 +#: sql_help.c:4254 sql_help.c:4257 sql_help.c:4385 msgid "column_name" msgstr "列名" -#: sql_help.c:400 sql_help.c:581 sql_help.c:882 +#: sql_help.c:436 sql_help.c:646 sql_help.c:1088 msgid "new_column_name" msgstr "新しい列名" -#: sql_help.c:405 sql_help.c:480 sql_help.c:589 sql_help.c:893 sql_help.c:1087 +#: sql_help.c:441 sql_help.c:531 sql_help.c:654 sql_help.c:1104 sql_help.c:1312 msgid "where action is one of:" -msgstr "アクションは以下のいずれか:" +msgstr "アクションは以下のいずれかです:" -#: sql_help.c:407 sql_help.c:412 sql_help.c:895 sql_help.c:900 sql_help.c:1089 -#: sql_help.c:1093 sql_help.c:1592 sql_help.c:1663 sql_help.c:1837 -#: sql_help.c:2035 sql_help.c:2255 sql_help.c:2984 +#: sql_help.c:443 sql_help.c:448 sql_help.c:915 sql_help.c:1106 sql_help.c:1111 +#: sql_help.c:1314 sql_help.c:1318 sql_help.c:1880 sql_help.c:1969 +#: sql_help.c:2160 sql_help.c:2312 sql_help.c:2450 sql_help.c:2717 +#: sql_help.c:3558 msgid "data_type" msgstr "データ型" -#: sql_help.c:408 sql_help.c:896 sql_help.c:901 sql_help.c:1090 -#: sql_help.c:1094 sql_help.c:1593 sql_help.c:1666 sql_help.c:1765 -#: sql_help.c:2036 sql_help.c:2256 sql_help.c:2262 +#: sql_help.c:444 sql_help.c:449 sql_help.c:1107 sql_help.c:1112 +#: sql_help.c:1315 sql_help.c:1319 sql_help.c:1881 sql_help.c:1972 +#: sql_help.c:2088 sql_help.c:2451 sql_help.c:2459 sql_help.c:2471 +#: sql_help.c:2484 sql_help.c:2718 sql_help.c:2724 sql_help.c:3466 msgid "collation" msgstr "照合順序" -#: sql_help.c:409 sql_help.c:897 sql_help.c:1667 sql_help.c:2037 -#: sql_help.c:2048 +#: sql_help.c:445 sql_help.c:1108 sql_help.c:1973 sql_help.c:1982 +#: sql_help.c:2452 sql_help.c:2467 sql_help.c:2479 msgid "column_constraint" -msgstr "列制約" +msgstr "カラム制約" -#: sql_help.c:418 sql_help.c:591 sql_help.c:908 +#: sql_help.c:455 sql_help.c:656 sql_help.c:1125 msgid "integer" msgstr "整数" -#: sql_help.c:420 sql_help.c:423 sql_help.c:593 sql_help.c:596 sql_help.c:910 -#: sql_help.c:913 +#: sql_help.c:457 sql_help.c:460 sql_help.c:658 sql_help.c:661 sql_help.c:1127 +#: sql_help.c:1130 msgid "attribute_option" msgstr "属性オプション" -#: sql_help.c:427 sql_help.c:428 sql_help.c:429 sql_help.c:430 sql_help.c:920 -#: sql_help.c:921 sql_help.c:922 sql_help.c:923 sql_help.c:1321 +#: sql_help.c:465 sql_help.c:1132 sql_help.c:1974 sql_help.c:1983 +#: sql_help.c:2453 sql_help.c:2468 sql_help.c:2480 +msgid "table_constraint" +msgstr "テーブル制約" + +#: sql_help.c:468 sql_help.c:469 sql_help.c:470 sql_help.c:471 sql_help.c:1137 +#: sql_help.c:1138 sql_help.c:1139 sql_help.c:1140 sql_help.c:1575 msgid "trigger_name" msgstr "トリガー名" -#: sql_help.c:481 sql_help.c:1705 +#: sql_help.c:472 sql_help.c:473 sql_help.c:1150 sql_help.c:1151 +#: sql_help.c:1975 sql_help.c:1980 sql_help.c:2456 sql_help.c:2477 +msgid "parent_table" +msgstr "親テーブル" + +#: sql_help.c:530 sql_help.c:580 sql_help.c:643 sql_help.c:1275 sql_help.c:1912 +msgid "extension_name" +msgstr "拡張名" + +#: sql_help.c:532 sql_help.c:2029 msgid "execution_cost" msgstr "実行コスト" -#: sql_help.c:482 sql_help.c:1706 +#: sql_help.c:533 sql_help.c:2030 msgid "result_rows" msgstr "結果の行数" -#: sql_help.c:497 sql_help.c:499 sql_help.c:501 +#: sql_help.c:554 sql_help.c:556 sql_help.c:853 sql_help.c:861 sql_help.c:865 +#: sql_help.c:868 sql_help.c:871 sql_help.c:1353 sql_help.c:1361 +#: sql_help.c:1365 sql_help.c:1368 sql_help.c:1371 sql_help.c:2290 +#: sql_help.c:2292 sql_help.c:2295 sql_help.c:2296 sql_help.c:3374 +#: sql_help.c:3378 sql_help.c:3381 sql_help.c:3383 sql_help.c:3385 +#: sql_help.c:3387 sql_help.c:3389 sql_help.c:3395 sql_help.c:3397 +#: sql_help.c:3399 sql_help.c:3401 sql_help.c:3403 sql_help.c:3405 +msgid "role_specification" +msgstr "ロールの指定" + +#: sql_help.c:555 sql_help.c:557 sql_help.c:1384 sql_help.c:1855 +#: sql_help.c:2298 sql_help.c:2793 sql_help.c:3214 sql_help.c:4037 +msgid "user_name" +msgstr "ユーザー名" + +#: sql_help.c:558 sql_help.c:873 sql_help.c:1373 sql_help.c:2297 +#: sql_help.c:3406 +msgid "where role_specification can be:" +msgstr "ロール指定は以下の通りです:" + +#: sql_help.c:560 msgid "group_name" msgstr "グループ名" -#: sql_help.c:498 sql_help.c:500 sql_help.c:1147 sql_help.c:1569 -#: sql_help.c:1930 sql_help.c:1932 sql_help.c:1935 sql_help.c:1936 -#: sql_help.c:2119 sql_help.c:2331 sql_help.c:2702 sql_help.c:3437 -msgid "user_name" -msgstr "ユーザ名" - -#: sql_help.c:518 sql_help.c:1574 sql_help.c:1769 sql_help.c:1801 -#: sql_help.c:2044 sql_help.c:2052 sql_help.c:2084 sql_help.c:2106 -#: sql_help.c:2118 sql_help.c:2881 sql_help.c:3160 +#: sql_help.c:578 sql_help.c:1860 sql_help.c:2092 sql_help.c:2124 +#: sql_help.c:2463 sql_help.c:2475 sql_help.c:2488 sql_help.c:2528 +#: sql_help.c:2550 sql_help.c:2562 sql_help.c:3402 sql_help.c:3735 msgid "tablespace_name" -msgstr "テーブルスペース名" +msgstr "テーブル空間名" -#: sql_help.c:520 sql_help.c:523 sql_help.c:599 sql_help.c:601 sql_help.c:929 -#: sql_help.c:931 sql_help.c:1767 sql_help.c:1799 sql_help.c:2042 -#: sql_help.c:2050 sql_help.c:2082 sql_help.c:2104 +#: sql_help.c:582 sql_help.c:585 sql_help.c:664 sql_help.c:666 sql_help.c:1147 +#: sql_help.c:1149 sql_help.c:2090 sql_help.c:2122 sql_help.c:2461 +#: sql_help.c:2473 sql_help.c:2486 sql_help.c:2526 sql_help.c:2548 msgid "storage_parameter" msgstr "ストレージパラメーター" -#: sql_help.c:546 sql_help.c:1299 sql_help.c:3243 +#: sql_help.c:608 sql_help.c:1546 sql_help.c:3820 msgid "large_object_oid" -msgstr "ラージオブジェクトのoid" +msgstr "ラージオブジェクトのOID" -#: sql_help.c:598 sql_help.c:928 sql_help.c:937 sql_help.c:940 sql_help.c:1228 +#: sql_help.c:663 sql_help.c:1145 sql_help.c:1154 sql_help.c:1157 +#: sql_help.c:1465 msgid "index_name" msgstr "インデックス名" -#: sql_help.c:657 sql_help.c:669 sql_help.c:1840 +#: sql_help.c:695 sql_help.c:2145 +msgid "res_proc" +msgstr "制約選択評価関数" + +#: sql_help.c:696 sql_help.c:2146 +msgid "join_proc" +msgstr "結合選択評価関数" + +#: sql_help.c:748 sql_help.c:760 sql_help.c:2163 msgid "strategy_number" -msgstr "ストラテジー番号" +msgstr "戦略番号" -#: sql_help.c:659 sql_help.c:660 sql_help.c:663 sql_help.c:664 sql_help.c:670 -#: sql_help.c:671 sql_help.c:673 sql_help.c:674 sql_help.c:1842 -#: sql_help.c:1843 sql_help.c:1846 sql_help.c:1847 +#: sql_help.c:750 sql_help.c:751 sql_help.c:754 sql_help.c:755 sql_help.c:761 +#: sql_help.c:762 sql_help.c:764 sql_help.c:765 sql_help.c:2165 sql_help.c:2166 +#: sql_help.c:2169 sql_help.c:2170 msgid "op_type" msgstr "演算子の型" -#: sql_help.c:661 sql_help.c:1844 +#: sql_help.c:752 sql_help.c:2167 msgid "sort_family_name" msgstr "ソートファミリー名" -#: sql_help.c:662 sql_help.c:672 sql_help.c:1845 +#: sql_help.c:753 sql_help.c:763 sql_help.c:2168 msgid "support_number" msgstr "サポート番号" -#: sql_help.c:666 sql_help.c:1519 sql_help.c:1849 +#: sql_help.c:757 sql_help.c:1799 sql_help.c:2172 sql_help.c:2639 +#: sql_help.c:2641 msgid "argument_type" msgstr "引数の型" -#: sql_help.c:715 sql_help.c:1126 sql_help.c:1741 sql_help.c:1890 -#: sql_help.c:2314 +#: sql_help.c:788 sql_help.c:791 sql_help.c:808 sql_help.c:810 sql_help.c:812 +#: sql_help.c:883 sql_help.c:922 sql_help.c:1271 sql_help.c:1274 +#: sql_help.c:1424 sql_help.c:1464 sql_help.c:1531 sql_help.c:1556 +#: sql_help.c:1561 sql_help.c:1576 sql_help.c:1633 sql_help.c:1638 +#: sql_help.c:1967 sql_help.c:1979 sql_help.c:2084 sql_help.c:2120 +#: sql_help.c:2196 sql_help.c:2211 sql_help.c:2267 sql_help.c:2318 +#: sql_help.c:2349 sql_help.c:2448 sql_help.c:2464 sql_help.c:2476 +#: sql_help.c:2546 sql_help.c:2665 sql_help.c:2842 sql_help.c:3059 +#: sql_help.c:3084 sql_help.c:3190 sql_help.c:3372 sql_help.c:3377 +#: sql_help.c:3422 sql_help.c:3454 sql_help.c:3705 sql_help.c:3710 +#: sql_help.c:3808 sql_help.c:3907 sql_help.c:3909 sql_help.c:3958 +#: sql_help.c:3997 sql_help.c:4146 sql_help.c:4148 sql_help.c:4197 +#: sql_help.c:4231 sql_help.c:4253 sql_help.c:4255 sql_help.c:4256 +#: sql_help.c:4340 sql_help.c:4342 sql_help.c:4391 +msgid "table_name" +msgstr "テーブル名" + +#: sql_help.c:793 sql_help.c:2198 +msgid "using_expression" +msgstr "USING表現" + +#: sql_help.c:794 sql_help.c:2199 +msgid "check_expression" +msgstr "CHECK表現" + +#: sql_help.c:814 sql_help.c:2212 +msgid "publication_parameter" +msgstr "パブリケーションパラメーター" + +#: sql_help.c:857 sql_help.c:1357 sql_help.c:2064 sql_help.c:2244 +#: sql_help.c:2776 msgid "password" msgstr "パスワード" -#: sql_help.c:716 sql_help.c:1127 sql_help.c:1742 sql_help.c:1891 -#: sql_help.c:2315 +#: sql_help.c:858 sql_help.c:1358 sql_help.c:2065 sql_help.c:2245 +#: sql_help.c:2777 msgid "timestamp" msgstr "タイムスタンプ" -#: sql_help.c:720 sql_help.c:724 sql_help.c:727 sql_help.c:730 sql_help.c:2861 -#: sql_help.c:3140 +#: sql_help.c:862 sql_help.c:866 sql_help.c:869 sql_help.c:872 sql_help.c:1362 +#: sql_help.c:1366 sql_help.c:1369 sql_help.c:1372 sql_help.c:3382 +#: sql_help.c:3715 msgid "database_name" msgstr "データベース名" -#: sql_help.c:739 sql_help.c:775 sql_help.c:1053 sql_help.c:1187 -#: sql_help.c:1227 sql_help.c:1286 sql_help.c:1311 sql_help.c:1322 -#: sql_help.c:1379 sql_help.c:1384 sql_help.c:1661 sql_help.c:1761 -#: sql_help.c:1797 sql_help.c:1913 sql_help.c:1953 sql_help.c:2033 -#: sql_help.c:2045 sql_help.c:2102 sql_help.c:2204 sql_help.c:2380 -#: sql_help.c:2597 sql_help.c:2678 sql_help.c:2851 sql_help.c:2856 -#: sql_help.c:2898 sql_help.c:3130 sql_help.c:3135 sql_help.c:3231 -#: sql_help.c:3318 sql_help.c:3320 sql_help.c:3358 sql_help.c:3397 -#: sql_help.c:3536 sql_help.c:3538 sql_help.c:3576 sql_help.c:3608 -#: sql_help.c:3628 sql_help.c:3630 sql_help.c:3631 sql_help.c:3705 -#: sql_help.c:3707 sql_help.c:3745 -msgid "table_name" -msgstr "テーブル名" - -#: sql_help.c:769 sql_help.c:1948 +#: sql_help.c:916 sql_help.c:2313 msgid "increment" -msgstr "増分" +msgstr "増分値" -#: sql_help.c:770 sql_help.c:1949 +#: sql_help.c:917 sql_help.c:2314 msgid "minvalue" msgstr "最小値" -#: sql_help.c:771 sql_help.c:1950 +#: sql_help.c:918 sql_help.c:2315 msgid "maxvalue" msgstr "最大値" -#: sql_help.c:772 sql_help.c:1951 sql_help.c:3316 sql_help.c:3395 -#: sql_help.c:3534 sql_help.c:3648 sql_help.c:3703 +#: sql_help.c:919 sql_help.c:2316 sql_help.c:3905 sql_help.c:3995 +#: sql_help.c:4144 sql_help.c:4273 sql_help.c:4338 msgid "start" -msgstr "開始値" +msgstr "開始番号" -#: sql_help.c:773 +#: sql_help.c:920 sql_help.c:1122 msgid "restart" -msgstr "再開始値" +msgstr "再開始番号" -#: sql_help.c:774 sql_help.c:1952 +#: sql_help.c:921 sql_help.c:2317 msgid "cache" -msgstr "キャッシュ" +msgstr "キャッシュ割り当て数" -#: sql_help.c:915 sql_help.c:2038 sql_help.c:2049 -msgid "table_constraint" -msgstr "テーブル制約" +#: sql_help.c:978 sql_help.c:2361 +msgid "conninfo" +msgstr "接続文字列" + +#: sql_help.c:980 sql_help.c:2362 +msgid "publication_name" +msgstr "パブリケーション名" + +#: sql_help.c:981 +msgid "set_publication_option" +msgstr "{SET PUBLICATION の追加オプション}" + +#: sql_help.c:984 +msgid "refresh_option" +msgstr "{REFRESH PUBLICATION の追加オプション}" + +#: sql_help.c:989 sql_help.c:2363 +msgid "subscription_parameter" +msgstr "{SUBSCRIPTION パラメーター名}" + +#: sql_help.c:1100 sql_help.c:1103 +msgid "partition_name" +msgstr "パーティション名" -#: sql_help.c:916 +#: sql_help.c:1101 sql_help.c:1984 sql_help.c:2481 +msgid "partition_bound_spec" +msgstr "パーティション境界の仕様" + +#: sql_help.c:1119 sql_help.c:2493 +msgid "sequence_options" +msgstr "シーケンスオプション" + +#: sql_help.c:1121 +msgid "sequence_option" +msgstr "シーケンスオプション" + +#: sql_help.c:1133 msgid "table_constraint_using_index" -msgstr "インデックスを使用するテーブル制約" +msgstr "インデックスを使うテーブルの制約" -#: sql_help.c:924 sql_help.c:925 sql_help.c:926 sql_help.c:927 +#: sql_help.c:1141 sql_help.c:1142 sql_help.c:1143 sql_help.c:1144 msgid "rewrite_rule_name" msgstr "書き換えルール名" -#: sql_help.c:932 sql_help.c:933 sql_help.c:2041 -msgid "parent_table" -msgstr "親テーブル" - -#: sql_help.c:934 sql_help.c:2046 sql_help.c:2883 sql_help.c:3162 -msgid "type_name" -msgstr "型名" - -#: sql_help.c:938 +#: sql_help.c:1155 msgid "and table_constraint_using_index is:" -msgstr "またインデックスを使用するテーブルの制約条件は以下の通り:" +msgstr "テーブル制約は以下の通りです:" -#: sql_help.c:956 sql_help.c:959 sql_help.c:2121 +#: sql_help.c:1173 sql_help.c:1176 sql_help.c:2565 msgid "tablespace_option" -msgstr "テーブルスペース・オプション" +msgstr "テーブル空間のオプション" -#: sql_help.c:980 sql_help.c:983 sql_help.c:989 sql_help.c:993 +#: sql_help.c:1197 sql_help.c:1200 sql_help.c:1206 sql_help.c:1210 msgid "token_type" msgstr "トークンの型" -#: sql_help.c:981 sql_help.c:984 +#: sql_help.c:1198 sql_help.c:1201 msgid "dictionary_name" msgstr "辞書名" -#: sql_help.c:986 sql_help.c:990 +#: sql_help.c:1203 sql_help.c:1207 msgid "old_dictionary" msgstr "元の辞書" -#: sql_help.c:987 sql_help.c:991 +#: sql_help.c:1204 sql_help.c:1208 msgid "new_dictionary" msgstr "新しい辞書" -#: sql_help.c:1078 sql_help.c:1088 sql_help.c:1091 sql_help.c:1092 -#: sql_help.c:2254 +#: sql_help.c:1300 sql_help.c:1313 sql_help.c:1316 sql_help.c:1317 +#: sql_help.c:2716 msgid "attribute_name" msgstr "属性名" -#: sql_help.c:1079 +#: sql_help.c:1301 msgid "new_attribute_name" msgstr "新しい属性名" -#: sql_help.c:1085 +#: sql_help.c:1307 sql_help.c:1311 msgid "new_enum_value" msgstr "新しい列挙値" -#: sql_help.c:1086 +#: sql_help.c:1308 +msgid "neighbor_enum_value" +msgstr "隣接した列挙値" + +#: sql_help.c:1310 msgid "existing_enum_value" msgstr "既存の列挙値" -#: sql_help.c:1148 sql_help.c:1668 sql_help.c:1964 sql_help.c:2332 -#: sql_help.c:2703 sql_help.c:2867 sql_help.c:3146 +#: sql_help.c:1385 sql_help.c:1976 sql_help.c:1985 sql_help.c:2329 +#: sql_help.c:2794 sql_help.c:3215 sql_help.c:3388 sql_help.c:3423 +#: sql_help.c:3721 msgid "server_name" msgstr "サーバー名" -#: sql_help.c:1176 sql_help.c:1179 sql_help.c:2347 +#: sql_help.c:1413 sql_help.c:1416 sql_help.c:2809 msgid "view_option_name" msgstr "ビューのオプション名" -#: sql_help.c:1177 sql_help.c:2348 +#: sql_help.c:1414 sql_help.c:2810 msgid "view_option_value" -msgstr "ビューのオプション値" +msgstr "ビューオプションの値" -#: sql_help.c:1202 sql_help.c:3453 sql_help.c:3455 sql_help.c:3479 +#: sql_help.c:1439 sql_help.c:4053 sql_help.c:4055 sql_help.c:4079 msgid "transaction_mode" msgstr "トランザクションのモード" -#: sql_help.c:1203 sql_help.c:3456 sql_help.c:3480 +#: sql_help.c:1440 sql_help.c:4056 sql_help.c:4080 msgid "where transaction_mode is one of:" -msgstr "トランザクションのモードは以下のいずれか:" +msgstr "トランザクションのモードは以下の通りです:" -#: sql_help.c:1283 +#: sql_help.c:1528 msgid "relation_name" -msgstr "拡張名" +msgstr "リレーション名" -#: sql_help.c:1310 +#: sql_help.c:1533 sql_help.c:3384 sql_help.c:3717 +msgid "domain_name" +msgstr "ドメイン名" + +#: sql_help.c:1555 +msgid "policy_name" +msgstr "ポリシー名" + +#: sql_help.c:1560 msgid "rule_name" -msgstr "ロール名" +msgstr "ルール名" -#: sql_help.c:1325 +#: sql_help.c:1579 msgid "text" -msgstr "テキスト" +msgstr "コメント文字列" -#: sql_help.c:1350 sql_help.c:2993 sql_help.c:3180 +#: sql_help.c:1604 sql_help.c:3567 sql_help.c:3755 msgid "transaction_id" -msgstr "トランザクション ID" +msgstr "トランザクションID" -#: sql_help.c:1381 sql_help.c:1387 sql_help.c:2919 +#: sql_help.c:1635 sql_help.c:1641 sql_help.c:3493 msgid "filename" msgstr "ファイル名" -#: sql_help.c:1382 sql_help.c:1388 sql_help.c:1915 sql_help.c:1916 -#: sql_help.c:1917 +#: sql_help.c:1636 sql_help.c:1642 sql_help.c:2269 sql_help.c:2270 +#: sql_help.c:2271 msgid "command" msgstr "コマンド" -#: sql_help.c:1386 sql_help.c:1802 sql_help.c:2107 sql_help.c:2349 -#: sql_help.c:2367 sql_help.c:2901 +#: sql_help.c:1640 sql_help.c:2125 sql_help.c:2551 sql_help.c:2811 +#: sql_help.c:2829 sql_help.c:3458 msgid "query" msgstr "問い合わせ" -#: sql_help.c:1390 sql_help.c:2748 +#: sql_help.c:1644 sql_help.c:3261 msgid "where option can be one of:" -msgstr "オプションは以下のいずれか:" +msgstr "オプションには以下のうちのいずれかを指定します:" -#: sql_help.c:1391 +#: sql_help.c:1645 msgid "format_name" msgstr "フォーマット名" -#: sql_help.c:1392 sql_help.c:1393 sql_help.c:1396 sql_help.c:2749 -#: sql_help.c:2750 sql_help.c:2751 sql_help.c:2752 sql_help.c:2753 +#: sql_help.c:1646 sql_help.c:1647 sql_help.c:1650 sql_help.c:3262 +#: sql_help.c:3263 sql_help.c:3264 sql_help.c:3265 sql_help.c:3266 +#: sql_help.c:3267 msgid "boolean" -msgstr "ブール値" +msgstr "真偽値" -#: sql_help.c:1394 +#: sql_help.c:1648 msgid "delimiter_character" msgstr "区切り文字" -#: sql_help.c:1395 +#: sql_help.c:1649 msgid "null_string" -msgstr "null文字列" +msgstr "NULL文字列" -#: sql_help.c:1397 +#: sql_help.c:1651 msgid "quote_character" msgstr "引用符文字" -#: sql_help.c:1398 +#: sql_help.c:1652 msgid "escape_character" msgstr "エスケープ文字" -#: sql_help.c:1402 +#: sql_help.c:1656 msgid "encoding_name" msgstr "エンコーディング名" -#: sql_help.c:1459 sql_help.c:1475 sql_help.c:1478 +#: sql_help.c:1667 +msgid "access_method_type" +msgstr "アクセスメソッドの型" + +#: sql_help.c:1733 sql_help.c:1752 sql_help.c:1755 msgid "arg_data_type" msgstr "入力データ型" -#: sql_help.c:1460 sql_help.c:1479 sql_help.c:1487 +#: sql_help.c:1734 sql_help.c:1756 sql_help.c:1764 msgid "sfunc" msgstr "状態遷移関数" -#: sql_help.c:1461 sql_help.c:1480 sql_help.c:1488 +#: sql_help.c:1735 sql_help.c:1757 sql_help.c:1765 msgid "state_data_type" msgstr "状態データの型" -#: sql_help.c:1462 sql_help.c:1481 sql_help.c:1489 +#: sql_help.c:1736 sql_help.c:1758 sql_help.c:1766 msgid "state_data_size" -msgstr "状態データの大きさ" +msgstr "状態データのサイズ" -#: sql_help.c:1463 sql_help.c:1482 sql_help.c:1490 +#: sql_help.c:1737 sql_help.c:1759 sql_help.c:1767 msgid "ffunc" msgstr "終了関数" -#: sql_help.c:1464 sql_help.c:1483 sql_help.c:1491 +#: sql_help.c:1738 sql_help.c:1768 +msgid "combinefunc" +msgstr "結合関数" + +#: sql_help.c:1739 sql_help.c:1769 +msgid "serialfunc" +msgstr "シリアライズ関数" + +#: sql_help.c:1740 sql_help.c:1770 +msgid "deserialfunc" +msgstr "デシリアライズ関数" + +#: sql_help.c:1741 sql_help.c:1760 sql_help.c:1771 msgid "initial_condition" msgstr "初期条件" -#: sql_help.c:1465 sql_help.c:1492 +#: sql_help.c:1742 sql_help.c:1772 msgid "msfunc" msgstr "前方状態遷移関数" -#: sql_help.c:1466 sql_help.c:1493 +#: sql_help.c:1743 sql_help.c:1773 msgid "minvfunc" msgstr "逆状態遷移関数" -#: sql_help.c:1467 sql_help.c:1494 +#: sql_help.c:1744 sql_help.c:1774 msgid "mstate_data_type" -msgstr "移動集約モードでの状態データの型" +msgstr "移動集約モード時の状態値のデータ型" -#: sql_help.c:1468 sql_help.c:1495 +#: sql_help.c:1745 sql_help.c:1775 msgid "mstate_data_size" -msgstr "移動集約モードでの状態データの大きさ" +msgstr "移動集約モード時の状態値のデータサイズ" -#: sql_help.c:1469 sql_help.c:1496 +#: sql_help.c:1746 sql_help.c:1776 msgid "mffunc" -msgstr "移動集約モードでの終了関数" +msgstr "移動集約モード時の終了関数" -#: sql_help.c:1470 sql_help.c:1497 +#: sql_help.c:1747 sql_help.c:1777 msgid "minitial_condition" -msgstr "移動集約モードでの初期条件" +msgstr "移動集約モード時の初期条件" -#: sql_help.c:1471 sql_help.c:1498 +#: sql_help.c:1748 sql_help.c:1778 msgid "sort_operator" msgstr "ソート演算子" -#: sql_help.c:1484 +#: sql_help.c:1761 msgid "or the old syntax" msgstr "または古い構文" -#: sql_help.c:1486 +#: sql_help.c:1763 msgid "base_type" msgstr "基本の型" -#: sql_help.c:1537 +#: sql_help.c:1819 msgid "locale" msgstr "ロケール" -#: sql_help.c:1538 sql_help.c:1572 +#: sql_help.c:1820 sql_help.c:1858 msgid "lc_collate" msgstr "照合順序" -#: sql_help.c:1539 sql_help.c:1573 +#: sql_help.c:1821 sql_help.c:1859 msgid "lc_ctype" msgstr "Ctype(変換演算子)" -#: sql_help.c:1541 +#: sql_help.c:1822 sql_help.c:3806 +msgid "provider" +msgstr "プロバイダ" + +#: sql_help.c:1823 sql_help.c:1914 +msgid "version" +msgstr "バージョン" + +#: sql_help.c:1825 msgid "existing_collation" msgstr "既存の照合順序" -#: sql_help.c:1551 +#: sql_help.c:1835 msgid "source_encoding" msgstr "変換元のエンコーディング" -#: sql_help.c:1552 +#: sql_help.c:1836 msgid "dest_encoding" msgstr "変換先のエンコーディング" -#: sql_help.c:1570 sql_help.c:2147 +#: sql_help.c:1856 sql_help.c:2591 msgid "template" msgstr "テンプレート" -#: sql_help.c:1571 +#: sql_help.c:1857 msgid "encoding" -msgstr "エンコーディング" +msgstr "エンコード" + +#: sql_help.c:1883 +msgid "constraint" +msgstr "制約条件" -#: sql_help.c:1596 +#: sql_help.c:1884 msgid "where constraint is:" -msgstr "制約条件:" +msgstr "制約条件は以下の通りです:" -#: sql_help.c:1610 sql_help.c:1912 sql_help.c:2203 +#: sql_help.c:1898 sql_help.c:2266 sql_help.c:2664 msgid "event" msgstr "イベント" -#: sql_help.c:1611 +#: sql_help.c:1899 msgid "filter_variable" -msgstr "フィルタ変数" +msgstr "フィルター変数" -#: sql_help.c:1623 -msgid "extension_name" -msgstr "拡張名" - -#: sql_help.c:1625 -msgid "version" -msgstr "バージョン" - -#: sql_help.c:1626 +#: sql_help.c:1915 msgid "old_version" -msgstr "古いバージョン" +msgstr "旧バージョン" -#: sql_help.c:1671 sql_help.c:2053 +#: sql_help.c:1988 sql_help.c:2489 msgid "where column_constraint is:" -msgstr "列制約:" +msgstr "カラム制約は以下の通りです:" -#: sql_help.c:1673 sql_help.c:1700 sql_help.c:2056 +#: sql_help.c:1991 sql_help.c:2023 sql_help.c:2492 msgid "default_expr" -msgstr "デフォルトの評価式" +msgstr "デフォルト表現" + +#: sql_help.c:1992 sql_help.c:2500 +msgid "and table_constraint is:" +msgstr "テーブル制約は以下の通りです:" -#: sql_help.c:1701 +#: sql_help.c:2024 msgid "rettype" msgstr "戻り値の型" -#: sql_help.c:1703 +#: sql_help.c:2026 msgid "column_type" msgstr "列の型" -#: sql_help.c:1704 sql_help.c:2401 sql_help.c:2875 sql_help.c:3154 -msgid "lang_name" -msgstr "言語" - -#: sql_help.c:1710 +#: sql_help.c:2034 msgid "definition" msgstr "定義" -#: sql_help.c:1711 +#: sql_help.c:2035 msgid "obj_file" msgstr "オブジェクトファイル名" -#: sql_help.c:1712 +#: sql_help.c:2036 msgid "link_symbol" msgstr "リンクシンボル" -#: sql_help.c:1713 +#: sql_help.c:2037 msgid "attribute" msgstr "属性" -#: sql_help.c:1748 sql_help.c:1897 sql_help.c:2321 +#: sql_help.c:2071 sql_help.c:2251 sql_help.c:2783 msgid "uid" -msgstr "ユーザーID" +msgstr "UID" -#: sql_help.c:1762 +#: sql_help.c:2085 msgid "method" -msgstr "メソッド" +msgstr "インデックスメソッド" -#: sql_help.c:1766 sql_help.c:2088 +#: sql_help.c:2089 sql_help.c:2460 sql_help.c:2472 sql_help.c:2485 +#: sql_help.c:2532 sql_help.c:3467 msgid "opclass" msgstr "演算子クラス" -#: sql_help.c:1770 sql_help.c:2074 +#: sql_help.c:2093 sql_help.c:2511 msgid "predicate" -msgstr "述語" +msgstr "インデックスの述語" -#: sql_help.c:1782 +#: sql_help.c:2105 msgid "call_handler" msgstr "呼び出しハンドラー" -#: sql_help.c:1783 +#: sql_help.c:2106 msgid "inline_handler" msgstr "インラインハンドラー" -#: sql_help.c:1784 +#: sql_help.c:2107 msgid "valfunction" -msgstr "バリデータ関数" +msgstr "バリデーション関数" -#: sql_help.c:1820 +#: sql_help.c:2143 msgid "com_op" -msgstr "交換用演算子" +msgstr "交代演算子" -#: sql_help.c:1821 +#: sql_help.c:2144 msgid "neg_op" -msgstr "否定用演算子" - -#: sql_help.c:1822 -msgid "res_proc" -msgstr "制約手続き" - -#: sql_help.c:1823 -msgid "join_proc" -msgstr "JOIN手続き" +msgstr "否定演算子" -#: sql_help.c:1839 +#: sql_help.c:2162 msgid "family_name" -msgstr "ファミリー名" +msgstr "演算子族の名前" -#: sql_help.c:1850 +#: sql_help.c:2173 msgid "storage_type" -msgstr "ストレージの型" +msgstr "ストレージタイプ" -#: sql_help.c:1914 sql_help.c:2206 sql_help.c:2383 sql_help.c:3307 -#: sql_help.c:3309 sql_help.c:3386 sql_help.c:3388 sql_help.c:3525 -#: sql_help.c:3527 sql_help.c:3615 sql_help.c:3694 sql_help.c:3696 +#: sql_help.c:2268 sql_help.c:2668 sql_help.c:2845 sql_help.c:3477 +#: sql_help.c:3896 sql_help.c:3898 sql_help.c:3986 sql_help.c:3988 +#: sql_help.c:4135 sql_help.c:4137 sql_help.c:4240 sql_help.c:4329 +#: sql_help.c:4331 msgid "condition" msgstr "条件" -#: sql_help.c:1918 sql_help.c:2209 +#: sql_help.c:2272 sql_help.c:2671 msgid "where event can be one of:" -msgstr "イベントは以下のいずれか:" +msgstr "イベントは以下のいずれかです:" -#: sql_help.c:1931 sql_help.c:1933 +#: sql_help.c:2291 sql_help.c:2293 msgid "schema_element" msgstr "スキーマ要素" -#: sql_help.c:1965 +#: sql_help.c:2330 msgid "server_type" msgstr "サーバーのタイプ" -#: sql_help.c:1966 +#: sql_help.c:2331 msgid "server_version" msgstr "サーバーのバージョン" -#: sql_help.c:1967 sql_help.c:2865 sql_help.c:3144 +#: sql_help.c:2332 sql_help.c:3386 sql_help.c:3719 msgid "fdw_name" -msgstr "外部データラッパー" +msgstr "外部データラッパ名" + +#: sql_help.c:2345 +msgid "statistics_name" +msgstr "統計オブジェクト名" + +#: sql_help.c:2346 +msgid "statistics_kind" +msgstr "統計種別" + +#: sql_help.c:2360 +msgid "subscription_name" +msgstr "サブスクリプション名" -#: sql_help.c:2039 +#: sql_help.c:2454 msgid "source_table" -msgstr "ソースのテーブル" +msgstr "コピー元のテーブル" -#: sql_help.c:2040 +#: sql_help.c:2455 msgid "like_option" -msgstr "LIKE オプション:" +msgstr "LIKEオプション" -#: sql_help.c:2057 sql_help.c:2058 sql_help.c:2067 sql_help.c:2069 -#: sql_help.c:2073 +#: sql_help.c:2494 sql_help.c:2495 sql_help.c:2504 sql_help.c:2506 +#: sql_help.c:2510 msgid "index_parameters" -msgstr "インデックスのパラメーター" +msgstr "インデックスパラメーター" -#: sql_help.c:2059 sql_help.c:2076 +#: sql_help.c:2496 sql_help.c:2513 msgid "reftable" msgstr "参照テーブル" -#: sql_help.c:2060 sql_help.c:2077 +#: sql_help.c:2497 sql_help.c:2514 msgid "refcolumn" msgstr "参照列" -#: sql_help.c:2063 -msgid "and table_constraint is:" -msgstr "テーブル制約:" - -#: sql_help.c:2071 +#: sql_help.c:2508 msgid "exclude_element" -msgstr "排他要素" +msgstr "除外対象要素" -#: sql_help.c:2072 sql_help.c:3314 sql_help.c:3393 sql_help.c:3532 -#: sql_help.c:3646 sql_help.c:3701 +#: sql_help.c:2509 sql_help.c:3903 sql_help.c:3993 sql_help.c:4142 +#: sql_help.c:4271 sql_help.c:4336 msgid "operator" msgstr "演算子" -#: sql_help.c:2080 +#: sql_help.c:2517 msgid "and like_option is:" -msgstr "LIKE オプション:" +msgstr "LIKE オプションは以下の通りです:" + +#: sql_help.c:2518 +msgid "and partition_bound_spec is:" +msgstr "パーティション境界の仕様は以下の通りです:" + +#: sql_help.c:2519 sql_help.c:2521 sql_help.c:2523 +msgid "numeric_literal" +msgstr "数値定数" -#: sql_help.c:2081 +#: sql_help.c:2520 sql_help.c:2522 sql_help.c:2524 +msgid "string_literal" +msgstr "文字列定数" + +#: sql_help.c:2525 msgid "index_parameters in UNIQUE, PRIMARY KEY, and EXCLUDE constraints are:" -msgstr "UNIQUE, PRIMARY KEY, EXCLUDE におけるインデックスパラメーターの制約条件:" +msgstr "UNIQUE, PRIMARY KEY, EXCLUDE 制約のインデックスパラメーターは以下の通りです:" -#: sql_help.c:2085 +#: sql_help.c:2529 msgid "exclude_element in an EXCLUDE constraint is:" -msgstr "EXCLUDE における排他要素の制約条件:" +msgstr "EXCLUDE 制約の除外対象要素は以下の通りです:" -#: sql_help.c:2120 +#: sql_help.c:2564 msgid "directory" -msgstr "ディレクトリー" +msgstr "ディレクトリ" -#: sql_help.c:2134 +#: sql_help.c:2578 msgid "parser_name" -msgstr "パーサー名" +msgstr "パーサ名" -#: sql_help.c:2135 +#: sql_help.c:2579 msgid "source_config" -msgstr "ソース設定" +msgstr "複製元の設定" -#: sql_help.c:2164 +#: sql_help.c:2608 msgid "start_function" msgstr "開始関数" -#: sql_help.c:2165 +#: sql_help.c:2609 msgid "gettoken_function" -msgstr "トークン取得用関数" +msgstr "トークン取得関数" -#: sql_help.c:2166 +#: sql_help.c:2610 msgid "end_function" msgstr "終了関数" -#: sql_help.c:2167 +#: sql_help.c:2611 msgid "lextypes_function" -msgstr "LEX 型の関数" +msgstr "LEXTYPE関数" -#: sql_help.c:2168 +#: sql_help.c:2612 msgid "headline_function" msgstr "見出し関数" -#: sql_help.c:2180 +#: sql_help.c:2624 msgid "init_function" msgstr "初期処理関数" -#: sql_help.c:2181 +#: sql_help.c:2625 msgid "lexize_function" -msgstr "LEX 処理関数" +msgstr "LEXIZE関数" + +#: sql_help.c:2638 +msgid "from_sql_function_name" +msgstr "{FROM SQL 関数名}" + +#: sql_help.c:2640 +msgid "to_sql_function_name" +msgstr "{TO SQL 関数名}" -#: sql_help.c:2205 +#: sql_help.c:2666 msgid "referenced_table_name" -msgstr "非参照テーブル名" +msgstr "被参照テーブル名" -#: sql_help.c:2208 +#: sql_help.c:2667 +msgid "transition_relation_name" +msgstr "移行用リレーション名" + +#: sql_help.c:2670 msgid "arguments" msgstr "引数" -#: sql_help.c:2258 sql_help.c:3252 +#: sql_help.c:2720 sql_help.c:3831 msgid "label" msgstr "ラベル" -#: sql_help.c:2260 +#: sql_help.c:2722 msgid "subtype" -msgstr "派生元型" +msgstr "当該範囲のデータ型" -#: sql_help.c:2261 +#: sql_help.c:2723 msgid "subtype_operator_class" -msgstr "派生元型の演算子クラス" +msgstr "当該範囲のデータ型の演算子クラス" -#: sql_help.c:2263 +#: sql_help.c:2725 msgid "canonical_function" msgstr "正規化関数" -#: sql_help.c:2264 +#: sql_help.c:2726 msgid "subtype_diff_function" -msgstr "派生元型差異関数" +msgstr "当該範囲のデータ型の差分抽出関数" -#: sql_help.c:2266 +#: sql_help.c:2728 msgid "input_function" msgstr "入力関数" -#: sql_help.c:2267 +#: sql_help.c:2729 msgid "output_function" msgstr "出力関数" -#: sql_help.c:2268 +#: sql_help.c:2730 msgid "receive_function" msgstr "受信関数" -#: sql_help.c:2269 +#: sql_help.c:2731 msgid "send_function" msgstr "送信関数" -#: sql_help.c:2270 +#: sql_help.c:2732 msgid "type_modifier_input_function" msgstr "型修飾子の入力関数" -#: sql_help.c:2271 +#: sql_help.c:2733 msgid "type_modifier_output_function" msgstr "型修飾子の出力関数" -#: sql_help.c:2272 +#: sql_help.c:2734 msgid "analyze_function" msgstr "分析関数" -#: sql_help.c:2273 +#: sql_help.c:2735 msgid "internallength" msgstr "内部長" -#: sql_help.c:2274 +#: sql_help.c:2736 msgid "alignment" -msgstr "アラインメント" +msgstr "バイト境界" -#: sql_help.c:2275 +#: sql_help.c:2737 msgid "storage" msgstr "ストレージ" -#: sql_help.c:2276 +#: sql_help.c:2738 msgid "like_type" msgstr "LIKEの型" -#: sql_help.c:2277 +#: sql_help.c:2739 msgid "category" msgstr "カテゴリー" -#: sql_help.c:2278 +#: sql_help.c:2740 msgid "preferred" -msgstr "推奨" +msgstr "優先データ型かどうか(真偽値)" -#: sql_help.c:2279 +#: sql_help.c:2741 msgid "default" msgstr "デフォルト" -#: sql_help.c:2280 +#: sql_help.c:2742 msgid "element" -msgstr "要素" +msgstr "要素のデータ型" -#: sql_help.c:2281 +#: sql_help.c:2743 msgid "delimiter" -msgstr "デリミタ" +msgstr "区切り記号" -#: sql_help.c:2282 +#: sql_help.c:2744 msgid "collatable" -msgstr "照合順序" +msgstr "照合可能" -#: sql_help.c:2379 sql_help.c:2897 sql_help.c:3302 sql_help.c:3380 -#: sql_help.c:3520 sql_help.c:3607 sql_help.c:3689 +#: sql_help.c:2841 sql_help.c:3453 sql_help.c:3891 sql_help.c:3980 +#: sql_help.c:4130 sql_help.c:4230 sql_help.c:4324 msgid "with_query" msgstr "WITH問い合わせ" -#: sql_help.c:2381 sql_help.c:3321 sql_help.c:3324 sql_help.c:3327 -#: sql_help.c:3331 sql_help.c:3335 sql_help.c:3343 sql_help.c:3539 -#: sql_help.c:3542 sql_help.c:3545 sql_help.c:3549 sql_help.c:3553 -#: sql_help.c:3561 sql_help.c:3609 sql_help.c:3708 sql_help.c:3711 -#: sql_help.c:3714 sql_help.c:3718 sql_help.c:3722 sql_help.c:3730 +#: sql_help.c:2843 sql_help.c:3455 sql_help.c:3910 sql_help.c:3916 +#: sql_help.c:3919 sql_help.c:3923 sql_help.c:3927 sql_help.c:3935 +#: sql_help.c:4149 sql_help.c:4155 sql_help.c:4158 sql_help.c:4162 +#: sql_help.c:4166 sql_help.c:4174 sql_help.c:4232 sql_help.c:4343 +#: sql_help.c:4349 sql_help.c:4352 sql_help.c:4356 sql_help.c:4360 +#: sql_help.c:4368 msgid "alias" -msgstr "別名" +msgstr "エイリアス" -#: sql_help.c:2382 +#: sql_help.c:2844 msgid "using_list" -msgstr "USING リスト" +msgstr "USINGリスト" -#: sql_help.c:2384 sql_help.c:2779 sql_help.c:2960 sql_help.c:3616 +#: sql_help.c:2846 sql_help.c:3293 sql_help.c:3534 sql_help.c:4241 msgid "cursor_name" msgstr "カーソル名" -#: sql_help.c:2385 sql_help.c:2902 sql_help.c:3617 +#: sql_help.c:2847 sql_help.c:3461 sql_help.c:4242 msgid "output_expression" msgstr "出力表現" -#: sql_help.c:2386 sql_help.c:2903 sql_help.c:3305 sql_help.c:3383 -#: sql_help.c:3523 sql_help.c:3618 sql_help.c:3692 +#: sql_help.c:2848 sql_help.c:3462 sql_help.c:3894 sql_help.c:3983 +#: sql_help.c:4133 sql_help.c:4243 sql_help.c:4327 msgid "output_name" msgstr "出力名" -#: sql_help.c:2402 +#: sql_help.c:2864 msgid "code" -msgstr "コード" +msgstr "コードブロック" -#: sql_help.c:2727 +#: sql_help.c:3239 msgid "parameter" msgstr "パラメータ" -#: sql_help.c:2746 sql_help.c:2747 sql_help.c:2985 +#: sql_help.c:3259 sql_help.c:3260 sql_help.c:3559 msgid "statement" msgstr "ステートメント" -#: sql_help.c:2778 sql_help.c:2959 +#: sql_help.c:3292 sql_help.c:3533 msgid "direction" -msgstr "方向" +msgstr "取り出す方向と行数" -#: sql_help.c:2780 sql_help.c:2961 +#: sql_help.c:3294 sql_help.c:3535 msgid "where direction can be empty or one of:" -msgstr "方向は無指定もしくは以下のいずれか:" +msgstr "取り出す方向と行数は無指定もしくは以下のいずれかです:" -#: sql_help.c:2781 sql_help.c:2782 sql_help.c:2783 sql_help.c:2784 -#: sql_help.c:2785 sql_help.c:2962 sql_help.c:2963 sql_help.c:2964 -#: sql_help.c:2965 sql_help.c:2966 sql_help.c:3315 sql_help.c:3317 -#: sql_help.c:3394 sql_help.c:3396 sql_help.c:3533 sql_help.c:3535 -#: sql_help.c:3647 sql_help.c:3649 sql_help.c:3702 sql_help.c:3704 +#: sql_help.c:3295 sql_help.c:3296 sql_help.c:3297 sql_help.c:3298 +#: sql_help.c:3299 sql_help.c:3536 sql_help.c:3537 sql_help.c:3538 +#: sql_help.c:3539 sql_help.c:3540 sql_help.c:3904 sql_help.c:3906 +#: sql_help.c:3994 sql_help.c:3996 sql_help.c:4143 sql_help.c:4145 +#: sql_help.c:4272 sql_help.c:4274 sql_help.c:4337 sql_help.c:4339 msgid "count" -msgstr "カウント" +msgstr "取り出す位置や行数" -#: sql_help.c:2858 sql_help.c:3137 +#: sql_help.c:3379 sql_help.c:3712 msgid "sequence_name" msgstr "シーケンス名" -#: sql_help.c:2863 sql_help.c:3142 -msgid "domain_name" -msgstr "ドメイン名" - -#: sql_help.c:2871 sql_help.c:3150 +#: sql_help.c:3392 sql_help.c:3725 msgid "arg_name" msgstr "引数名" -#: sql_help.c:2872 sql_help.c:3151 +#: sql_help.c:3393 sql_help.c:3726 msgid "arg_type" msgstr "引数の型" -#: sql_help.c:2877 sql_help.c:3156 +#: sql_help.c:3398 sql_help.c:3731 msgid "loid" msgstr "ラージオブジェクトid" -#: sql_help.c:2911 sql_help.c:2974 sql_help.c:3593 +#: sql_help.c:3421 +msgid "remote_schema" +msgstr "リモートスキーマ" + +#: sql_help.c:3424 +msgid "local_schema" +msgstr "ローカルスキーマ" + +#: sql_help.c:3459 +msgid "conflict_target" +msgstr "競合ターゲット" + +#: sql_help.c:3460 +msgid "conflict_action" +msgstr "競合時アクション" + +#: sql_help.c:3463 +msgid "where conflict_target can be one of:" +msgstr "競合ターゲットは以下のいずれかです:" + +#: sql_help.c:3464 +msgid "index_column_name" +msgstr "インデックスのカラム名" + +#: sql_help.c:3465 +msgid "index_expression" +msgstr "インデックス表現" + +#: sql_help.c:3468 +msgid "index_predicate" +msgstr "インデックスの述語" + +#: sql_help.c:3470 +msgid "and conflict_action is one of:" +msgstr "競合時アクションは以下のいずれかです:" + +#: sql_help.c:3476 sql_help.c:4238 +msgid "sub-SELECT" +msgstr "副問い合わせ句" + +#: sql_help.c:3485 sql_help.c:3548 sql_help.c:4214 msgid "channel" msgstr "チャネル" -#: sql_help.c:2933 +#: sql_help.c:3507 msgid "lockmode" msgstr "ロックモード" -#: sql_help.c:2934 +#: sql_help.c:3508 msgid "where lockmode is one of:" -msgstr "ロックモードは以下のいずれか:" +msgstr "ロックモードは以下のいずれかです:" -#: sql_help.c:2975 +#: sql_help.c:3549 msgid "payload" msgstr "ペイロード" -#: sql_help.c:3001 +#: sql_help.c:3576 msgid "old_role" msgstr "元のロール" -#: sql_help.c:3002 +#: sql_help.c:3577 msgid "new_role" msgstr "新しいロール" -#: sql_help.c:3027 sql_help.c:3188 sql_help.c:3196 +#: sql_help.c:3602 sql_help.c:3763 sql_help.c:3771 msgid "savepoint_name" msgstr "セーブポイント名" -#: sql_help.c:3229 -msgid "provider" -msgstr "プロバイダ" - -#: sql_help.c:3306 sql_help.c:3345 sql_help.c:3347 sql_help.c:3385 -#: sql_help.c:3524 sql_help.c:3563 sql_help.c:3565 sql_help.c:3693 -#: sql_help.c:3732 sql_help.c:3734 +#: sql_help.c:3895 sql_help.c:3937 sql_help.c:3939 sql_help.c:3985 +#: sql_help.c:4134 sql_help.c:4176 sql_help.c:4178 sql_help.c:4328 +#: sql_help.c:4370 sql_help.c:4372 msgid "from_item" -msgstr "FROM 項目" +msgstr "FROM項目" + +#: sql_help.c:3897 sql_help.c:3949 sql_help.c:4136 sql_help.c:4188 +#: sql_help.c:4330 sql_help.c:4382 +msgid "grouping_element" +msgstr "グルーピング要素" -#: sql_help.c:3310 sql_help.c:3389 sql_help.c:3528 sql_help.c:3697 +#: sql_help.c:3899 sql_help.c:3989 sql_help.c:4138 sql_help.c:4332 msgid "window_name" msgstr "ウィンドウ名" -#: sql_help.c:3311 sql_help.c:3390 sql_help.c:3529 sql_help.c:3698 +#: sql_help.c:3900 sql_help.c:3990 sql_help.c:4139 sql_help.c:4333 msgid "window_definition" msgstr "ウィンドウ定義" -#: sql_help.c:3312 sql_help.c:3323 sql_help.c:3353 sql_help.c:3391 -#: sql_help.c:3530 sql_help.c:3541 sql_help.c:3571 sql_help.c:3699 -#: sql_help.c:3710 sql_help.c:3740 +#: sql_help.c:3901 sql_help.c:3915 sql_help.c:3953 sql_help.c:3991 +#: sql_help.c:4140 sql_help.c:4154 sql_help.c:4192 sql_help.c:4334 +#: sql_help.c:4348 sql_help.c:4386 msgid "select" msgstr "SELECT句" -#: sql_help.c:3319 sql_help.c:3537 sql_help.c:3706 +#: sql_help.c:3908 sql_help.c:4147 sql_help.c:4341 msgid "where from_item can be one of:" -msgstr "FROM項目は以下のいずれか:" +msgstr "FROM項目は以下のいずれかです:" -#: sql_help.c:3322 sql_help.c:3325 sql_help.c:3328 sql_help.c:3332 -#: sql_help.c:3344 sql_help.c:3540 sql_help.c:3543 sql_help.c:3546 -#: sql_help.c:3550 sql_help.c:3562 sql_help.c:3709 sql_help.c:3712 -#: sql_help.c:3715 sql_help.c:3719 sql_help.c:3731 +#: sql_help.c:3911 sql_help.c:3917 sql_help.c:3920 sql_help.c:3924 +#: sql_help.c:3936 sql_help.c:4150 sql_help.c:4156 sql_help.c:4159 +#: sql_help.c:4163 sql_help.c:4175 sql_help.c:4344 sql_help.c:4350 +#: sql_help.c:4353 sql_help.c:4357 sql_help.c:4369 msgid "column_alias" -msgstr "列の別名" +msgstr "行エイリアス" -#: sql_help.c:3326 sql_help.c:3351 sql_help.c:3544 sql_help.c:3569 -#: sql_help.c:3713 sql_help.c:3738 -msgid "with_query_name" -msgstr "WITH問い合わせ名" +#: sql_help.c:3912 sql_help.c:4151 sql_help.c:4345 +msgid "sampling_method" +msgstr "サンプリングメソッド" -#: sql_help.c:3330 sql_help.c:3334 sql_help.c:3338 sql_help.c:3341 -#: sql_help.c:3548 sql_help.c:3552 sql_help.c:3556 sql_help.c:3559 -#: sql_help.c:3717 sql_help.c:3721 sql_help.c:3725 sql_help.c:3728 +#: sql_help.c:3913 sql_help.c:3922 sql_help.c:3926 sql_help.c:3930 +#: sql_help.c:3933 sql_help.c:4152 sql_help.c:4161 sql_help.c:4165 +#: sql_help.c:4169 sql_help.c:4172 sql_help.c:4346 sql_help.c:4355 +#: sql_help.c:4359 sql_help.c:4363 sql_help.c:4366 msgid "argument" msgstr "引数" -#: sql_help.c:3336 sql_help.c:3339 sql_help.c:3342 sql_help.c:3554 -#: sql_help.c:3557 sql_help.c:3560 sql_help.c:3723 sql_help.c:3726 -#: sql_help.c:3729 +#: sql_help.c:3914 sql_help.c:4153 sql_help.c:4347 +msgid "seed" +msgstr "乱数シード" + +#: sql_help.c:3918 sql_help.c:3951 sql_help.c:4157 sql_help.c:4190 +#: sql_help.c:4351 sql_help.c:4384 +msgid "with_query_name" +msgstr "WITH問い合わせ名" + +#: sql_help.c:3928 sql_help.c:3931 sql_help.c:3934 sql_help.c:4167 +#: sql_help.c:4170 sql_help.c:4173 sql_help.c:4361 sql_help.c:4364 +#: sql_help.c:4367 msgid "column_definition" -msgstr "列定義" +msgstr "カラム定義" -#: sql_help.c:3346 sql_help.c:3564 sql_help.c:3733 +#: sql_help.c:3938 sql_help.c:4177 sql_help.c:4371 msgid "join_type" -msgstr "結合種類" +msgstr "JOINタイプ" -#: sql_help.c:3348 sql_help.c:3566 sql_help.c:3735 +#: sql_help.c:3940 sql_help.c:4179 sql_help.c:4373 msgid "join_condition" -msgstr "結合条件" +msgstr "JOIN条件" -#: sql_help.c:3349 sql_help.c:3567 sql_help.c:3736 +#: sql_help.c:3941 sql_help.c:4180 sql_help.c:4374 msgid "join_column" -msgstr "結合列" +msgstr "JOINカラム" -#: sql_help.c:3350 sql_help.c:3568 sql_help.c:3737 +#: sql_help.c:3942 sql_help.c:4181 sql_help.c:4375 +msgid "and grouping_element can be one of:" +msgstr "グルーピング要素は以下のいずれかです:" + +#: sql_help.c:3950 sql_help.c:4189 sql_help.c:4383 msgid "and with_query is:" -msgstr "WITH問い合わせ:" +msgstr "WITH問い合わせは以下のいずれかです:" -#: sql_help.c:3354 sql_help.c:3572 sql_help.c:3741 +#: sql_help.c:3954 sql_help.c:4193 sql_help.c:4387 msgid "values" msgstr "VALUES句" -#: sql_help.c:3355 sql_help.c:3573 sql_help.c:3742 +#: sql_help.c:3955 sql_help.c:4194 sql_help.c:4388 msgid "insert" msgstr "INSERT句" -#: sql_help.c:3356 sql_help.c:3574 sql_help.c:3743 +#: sql_help.c:3956 sql_help.c:4195 sql_help.c:4389 msgid "update" msgstr "UPDATE句" -#: sql_help.c:3357 sql_help.c:3575 sql_help.c:3744 +#: sql_help.c:3957 sql_help.c:4196 sql_help.c:4390 msgid "delete" msgstr "DELETE句" -#: sql_help.c:3384 +#: sql_help.c:3984 msgid "new_table" msgstr "新しいテーブル" -#: sql_help.c:3409 +#: sql_help.c:4009 msgid "timezone" msgstr "タイムゾーン" -#: sql_help.c:3454 +#: sql_help.c:4054 msgid "snapshot_id" msgstr "スナップショットID" -#: sql_help.c:3614 +#: sql_help.c:4239 msgid "from_list" -msgstr "FROM リスト" +msgstr "FROMリスト" -#: sql_help.c:3645 +#: sql_help.c:4270 msgid "sort_expression" msgstr "ソート表現" -#: sql_help.h:191 sql_help.h:891 +#: sql_help.c:4397 sql_help.c:5182 msgid "abort the current transaction" -msgstr "現在のトランザクションを中断する" +msgstr "現在のトランザクションを中止します" -#: sql_help.h:196 +#: sql_help.c:4402 msgid "change the definition of an aggregate function" -msgstr "集約関数の定義を変更する" +msgstr "集約関数の定義を変更します。" -#: sql_help.h:201 +#: sql_help.c:4407 msgid "change the definition of a collation" -msgstr "照合順序の定義を変更する" +msgstr "照合順序の定義を変更します。" -#: sql_help.h:206 +#: sql_help.c:4412 msgid "change the definition of a conversion" -msgstr "エンコーディング変換ルールの定義を変更する" +msgstr "エンコーディング変換ルールの定義を変更します。" -#: sql_help.h:211 +#: sql_help.c:4417 msgid "change a database" -msgstr "データベースを変更する" +msgstr "データベースを変更します。" -#: sql_help.h:216 +#: sql_help.c:4422 msgid "define default access privileges" -msgstr "デフォルトのアクセス権限を定義する" +msgstr "デフォルトのアクセス権限を定義します。" -#: sql_help.h:221 +#: sql_help.c:4427 msgid "change the definition of a domain" -msgstr "ドメインの定義を変更する" +msgstr "ドメインの定義を変更します。" -#: sql_help.h:226 +#: sql_help.c:4432 msgid "change the definition of an event trigger" -msgstr "イベントトリガの定義を変更する" +msgstr "イベントトリガーの定義を変更します。" -#: sql_help.h:231 +#: sql_help.c:4437 msgid "change the definition of an extension" -msgstr "拡張の定義を変更する" +msgstr "拡張の定義を変更します。" -#: sql_help.h:236 +#: sql_help.c:4442 msgid "change the definition of a foreign-data wrapper" -msgstr "外部データラッパーの定義を変更する" +msgstr "外部データラッパの定義を変更します。" -#: sql_help.h:241 +#: sql_help.c:4447 msgid "change the definition of a foreign table" -msgstr "外部テーブルの定義を変更する" +msgstr "外部テーブルの定義を変更します。" -#: sql_help.h:246 +#: sql_help.c:4452 msgid "change the definition of a function" -msgstr "関数の定義を変更する" +msgstr "関数の定義を変更します。" -#: sql_help.h:251 +#: sql_help.c:4457 msgid "change role name or membership" -msgstr "ロールの名前またはメンバーシップを変更する" +msgstr "ロール名またはメンバーシップを変更します。" -#: sql_help.h:256 +#: sql_help.c:4462 msgid "change the definition of an index" -msgstr "インデックスの定義を変更する" +msgstr "インデックスの定義を変更します。" -#: sql_help.h:261 +#: sql_help.c:4467 msgid "change the definition of a procedural language" -msgstr "手続き言語の定義を変更する" +msgstr "手続き言語の定義を変更します。" -#: sql_help.h:266 +#: sql_help.c:4472 msgid "change the definition of a large object" -msgstr "ラージオブジェクトの定義を変更する" +msgstr "ラージオブジェクトの定義を変更します。" -#: sql_help.h:271 +#: sql_help.c:4477 msgid "change the definition of a materialized view" -msgstr "マテリアライズドビューの定義を変更する" +msgstr "マテリアライズドビューの定義を変更します。" -#: sql_help.h:276 +#: sql_help.c:4482 msgid "change the definition of an operator" -msgstr "演算子の定義を変更する" +msgstr "演算子の定義を変更します。" -#: sql_help.h:281 +#: sql_help.c:4487 msgid "change the definition of an operator class" -msgstr "演算子クラスの定義を変更する" +msgstr "演算子クラスの定義を変更します。" -#: sql_help.h:286 +#: sql_help.c:4492 msgid "change the definition of an operator family" -msgstr "演算子ファミリの定義を変更する" +msgstr "演算子族の定義を変更します。" + +#: sql_help.c:4497 +msgid "change the definition of a row level security policy" +msgstr "行レベルのセキュリティ ポリシーの定義を変更します。" -#: sql_help.h:291 sql_help.h:361 +#: sql_help.c:4502 +msgid "change the definition of a publication" +msgstr "パブリケーションの定義を変更します。" + +#: sql_help.c:4507 sql_help.c:4587 msgid "change a database role" -msgstr "データベースのロールを変更する" +msgstr "データベースロールを変更します。" -#: sql_help.h:296 +#: sql_help.c:4512 msgid "change the definition of a rule" -msgstr "ルールの定義を変更する" +msgstr "ルールの定義を変更します。" -#: sql_help.h:301 +#: sql_help.c:4517 msgid "change the definition of a schema" -msgstr "スキーマの定義を変更する" +msgstr "スキーマの定義を変更します。" -#: sql_help.h:306 +#: sql_help.c:4522 msgid "change the definition of a sequence generator" -msgstr "シーケンスジェネレーターの定義を変更する" +msgstr "シーケンスジェネレーターの定義を変更します。" -#: sql_help.h:311 +#: sql_help.c:4527 msgid "change the definition of a foreign server" -msgstr "外部サーバーの定義を変更する" +msgstr "外部サーバーの定義を変更します。" + +#: sql_help.c:4532 +msgid "change the definition of an extended statistics object" +msgstr "拡張統計情報オブジェクトの定義を変更します。" -#: sql_help.h:316 +#: sql_help.c:4537 +msgid "change the definition of a subscription" +msgstr "サブスクリプションの定義を変更します。" + +#: sql_help.c:4542 msgid "change a server configuration parameter" -msgstr "サーバ設定パラメータを変更する" +msgstr "サーバーの構成パラメーターを変更します。" -#: sql_help.h:321 +#: sql_help.c:4547 msgid "change the definition of a table" -msgstr "テーブルの定義を変更する" +msgstr "テーブルの定義を変更します。" -#: sql_help.h:326 +#: sql_help.c:4552 msgid "change the definition of a tablespace" -msgstr "テーブルスペースの定義を変更する" +msgstr "テーブル空間の定義を変更します。" -#: sql_help.h:331 +#: sql_help.c:4557 msgid "change the definition of a text search configuration" -msgstr "テキスト検索設定の定義を変更する" +msgstr "テキスト検索設定の定義を変更します。" -#: sql_help.h:336 +#: sql_help.c:4562 msgid "change the definition of a text search dictionary" -msgstr "テキスト検索辞書の定義を変更する" +msgstr "テキスト検索辞書の定義を変更します。" -#: sql_help.h:341 +#: sql_help.c:4567 msgid "change the definition of a text search parser" -msgstr "テキスト検索パーサの定義を変更する" +msgstr "テキスト検索パーサの定義を変更します。" -#: sql_help.h:346 +#: sql_help.c:4572 msgid "change the definition of a text search template" -msgstr "テキスト検索テンプレートの定義を変更する" +msgstr "テキスト検索テンプレートの定義を変更します。" -#: sql_help.h:351 +#: sql_help.c:4577 msgid "change the definition of a trigger" -msgstr "トリガの定義を変更する" +msgstr "トリガーの定義を変更します。" -#: sql_help.h:356 +#: sql_help.c:4582 msgid "change the definition of a type" -msgstr "型の定義を変更する" +msgstr "型の定義を変更します。" -#: sql_help.h:366 +#: sql_help.c:4592 msgid "change the definition of a user mapping" -msgstr "ユーザマッピングの定義を変更する" +msgstr "ユーザーマッピングの定義を変更します。" -#: sql_help.h:371 +#: sql_help.c:4597 msgid "change the definition of a view" -msgstr "ビューの定義を変更する" +msgstr "ビューの定義を変更します。" -#: sql_help.h:376 +#: sql_help.c:4602 msgid "collect statistics about a database" -msgstr "データベースの統計情報を収集する" +msgstr "データベースの統計情報を収集します。" -#: sql_help.h:381 sql_help.h:956 +#: sql_help.c:4607 sql_help.c:5247 msgid "start a transaction block" -msgstr "トランザクションブロックを開始する" +msgstr "トランザクションブロックを開始します。" -#: sql_help.h:386 -msgid "force a transaction log checkpoint" -msgstr "トランザクションログのチェックポイントを強制設定する" +#: sql_help.c:4612 +msgid "force a write-ahead log checkpoint" +msgstr "先行書き込みログのチェックポイントを強制的に実行します。" -#: sql_help.h:391 +#: sql_help.c:4617 msgid "close a cursor" -msgstr "カーソルを閉じる" +msgstr "カーソルを閉じます。" -#: sql_help.h:396 +#: sql_help.c:4622 msgid "cluster a table according to an index" -msgstr "インデックスに従ってテーブルをクラスタ化する" +msgstr "インデックスに従ってテーブルをクラスタ化します。" -#: sql_help.h:401 +#: sql_help.c:4627 msgid "define or change the comment of an object" -msgstr "オブジェクトのコメントを定義または変更する" +msgstr "オブジェクトのコメントを定義または変更します。" -#: sql_help.h:406 sql_help.h:796 +#: sql_help.c:4632 sql_help.c:5082 msgid "commit the current transaction" -msgstr "現在のトランザクションをコミットする" +msgstr "現在のトランザクションをコミットします。" -#: sql_help.h:411 +#: sql_help.c:4637 msgid "commit a transaction that was earlier prepared for two-phase commit" -msgstr "2フェーズコミットのために事前に準備されたトランザクションをコミットする" +msgstr "二相コミットのために事前に準備されたトランザクションをコミットします。" -#: sql_help.h:416 +#: sql_help.c:4642 msgid "copy data between a file and a table" -msgstr "ファイルとテーブル間でデータをコピーする" +msgstr "ファイルとテーブル間でデータをコピーします。" + +#: sql_help.c:4647 +msgid "define a new access method" +msgstr "新しいアクセスメソッドを定義します。" -#: sql_help.h:421 +#: sql_help.c:4652 msgid "define a new aggregate function" -msgstr "新しい集約関数を定義する" +msgstr "新しい集約関数を定義します。" -#: sql_help.h:426 +#: sql_help.c:4657 msgid "define a new cast" -msgstr "新しいキャストを定義する" +msgstr "新しいキャストを定義します。" -#: sql_help.h:431 +#: sql_help.c:4662 msgid "define a new collation" -msgstr "新しい照合順序を定義する" +msgstr "新しい照合順序を定義します。" -#: sql_help.h:436 +#: sql_help.c:4667 msgid "define a new encoding conversion" -msgstr "新しいエンコーディングの変換ルールを定義する" +msgstr "新しいエンコーディングの変換ルールを定義します。" -#: sql_help.h:441 +#: sql_help.c:4672 msgid "create a new database" -msgstr "新しいデータベースを作成する" +msgstr "新しいデータベースを作成します。" -#: sql_help.h:446 +#: sql_help.c:4677 msgid "define a new domain" -msgstr "新しいドメインを定義する" +msgstr "新しいドメインを定義します。" -#: sql_help.h:451 +#: sql_help.c:4682 msgid "define a new event trigger" -msgstr "新しいイベントトリガを定義する" +msgstr "新しいイベントトリガーを定義します。" -#: sql_help.h:456 +#: sql_help.c:4687 msgid "install an extension" -msgstr "拡張をインストールする" +msgstr "拡張をインストールします。" -#: sql_help.h:461 +#: sql_help.c:4692 msgid "define a new foreign-data wrapper" -msgstr "新しい外部データラッパーを定義する" +msgstr "新しい外部データラッパを定義します。" -#: sql_help.h:466 +#: sql_help.c:4697 msgid "define a new foreign table" -msgstr "新しい外部テーブルを定義する" +msgstr "新しい外部テーブルを定義します。" -#: sql_help.h:471 +#: sql_help.c:4702 msgid "define a new function" -msgstr "新しい関数を定義する" +msgstr "新しい関数を定義します。" -#: sql_help.h:476 sql_help.h:511 sql_help.h:581 +#: sql_help.c:4707 sql_help.c:4752 sql_help.c:4837 msgid "define a new database role" -msgstr "データベースの新しいロールを定義する" +msgstr "新しいデータベースロールを定義します。" -#: sql_help.h:481 +#: sql_help.c:4712 msgid "define a new index" -msgstr "新しいインデックスを定義する" +msgstr "新しいインデックスを定義します。" -#: sql_help.h:486 +#: sql_help.c:4717 msgid "define a new procedural language" -msgstr "新しい手続き言語を定義する" +msgstr "新しい手続き言語を定義します。" -#: sql_help.h:491 +#: sql_help.c:4722 msgid "define a new materialized view" -msgstr "新しいマテリアライズドビューを定義する" +msgstr "新しいマテリアライズドビューを定義します。" -#: sql_help.h:496 +#: sql_help.c:4727 msgid "define a new operator" -msgstr "新しい演算子を定義する" +msgstr "新しい演算子を定義します。" -#: sql_help.h:501 +#: sql_help.c:4732 msgid "define a new operator class" -msgstr "新しい演算子クラスを定義する" +msgstr "新しい演算子クラスを定義します。" -#: sql_help.h:506 +#: sql_help.c:4737 msgid "define a new operator family" -msgstr "新しい演算子ファミリを定義する" +msgstr "新しい演算子族を定義します。" -#: sql_help.h:516 +#: sql_help.c:4742 +msgid "define a new row level security policy for a table" +msgstr "テーブルに対して新しい行レベルのセキュリティポリシーを定義します。" + +#: sql_help.c:4747 +msgid "define a new publication" +msgstr "新しいパブリケーションを定義します。" + +#: sql_help.c:4757 msgid "define a new rewrite rule" -msgstr "新しい書き換えルールを定義する" +msgstr "新しい書き換えルールを定義します。" -#: sql_help.h:521 +#: sql_help.c:4762 msgid "define a new schema" -msgstr "新しいスキーマを定義する" +msgstr "新しいスキーマを定義します。" -#: sql_help.h:526 +#: sql_help.c:4767 msgid "define a new sequence generator" -msgstr "新しいシーケンスジェネレーターを定義する" +msgstr "新しいシーケンスジェネレーターを定義します。" -#: sql_help.h:531 +#: sql_help.c:4772 msgid "define a new foreign server" -msgstr "新しい外部サーバーを定義する" +msgstr "新しい外部サーバーを定義します。" + +#: sql_help.c:4777 +msgid "define extended statistics" +msgstr "拡張統計情報を定義します。" + +#: sql_help.c:4782 +msgid "define a new subscription" +msgstr "新しいサブスクリプションを定義します。" -#: sql_help.h:536 +#: sql_help.c:4787 msgid "define a new table" -msgstr "新しいテーブルを定義する" +msgstr "新しいテーブルを定義します。" -#: sql_help.h:541 sql_help.h:921 +#: sql_help.c:4792 sql_help.c:5212 msgid "define a new table from the results of a query" -msgstr "問い合わせ結果から新しいテーブルを生成する" +msgstr "問い合わせの結果から新しいテーブルを定義します。" -#: sql_help.h:546 +#: sql_help.c:4797 msgid "define a new tablespace" -msgstr "新しいテーブルスペースを定義する" +msgstr "新しいテーブル空間を定義します。" -#: sql_help.h:551 +#: sql_help.c:4802 msgid "define a new text search configuration" -msgstr "新しいテキスト検索設定を定義する" +msgstr "新しいテキスト検索設定を定義します。" -#: sql_help.h:556 +#: sql_help.c:4807 msgid "define a new text search dictionary" -msgstr "新しいテキスト検索用辞書を定義する" +msgstr "新しいテキスト検索辞書を定義します。" -#: sql_help.h:561 +#: sql_help.c:4812 msgid "define a new text search parser" -msgstr "新しいテキスト検索用パーサを定義する" +msgstr "新しいテキスト検索パーサを定義します。" -#: sql_help.h:566 +#: sql_help.c:4817 msgid "define a new text search template" -msgstr "新しいテキスト検索テンプレートを定義する" +msgstr "新しいテキスト検索テンプレートを定義します。" -#: sql_help.h:571 +#: sql_help.c:4822 +msgid "define a new transform" +msgstr "新しい自動変換ルールを定義します。" + +#: sql_help.c:4827 msgid "define a new trigger" -msgstr "新しいトリガを定義する" +msgstr "新しいトリガーを定義します。" -#: sql_help.h:576 +#: sql_help.c:4832 msgid "define a new data type" -msgstr "新しいデータ型を定義する" +msgstr "新しいデータ型を定義します。" -#: sql_help.h:586 +#: sql_help.c:4842 msgid "define a new mapping of a user to a foreign server" -msgstr "外部サーバーに対してユーザの新しいマッピングを定義する" +msgstr "外部サーバに対するユーザーの新しいマッピングを定義します。" -#: sql_help.h:591 +#: sql_help.c:4847 msgid "define a new view" -msgstr "新しいビューを定義する" +msgstr "新しいビューを定義します。" -#: sql_help.h:596 +#: sql_help.c:4852 msgid "deallocate a prepared statement" -msgstr "プリペアドステートメントを開放する" +msgstr "プリペアドステートメントを開放します。" -#: sql_help.h:601 +#: sql_help.c:4857 msgid "define a cursor" -msgstr "カーソルを定義する" +msgstr "カーソルを定義します。" -#: sql_help.h:606 +#: sql_help.c:4862 msgid "delete rows of a table" -msgstr "テーブルの行を削除する" +msgstr "テーブルの行を削除します。" -#: sql_help.h:611 +#: sql_help.c:4867 msgid "discard session state" -msgstr "セッションの状態を破棄する" +msgstr "セッション状態を破棄します。" -#: sql_help.h:616 +#: sql_help.c:4872 msgid "execute an anonymous code block" -msgstr "無名コードブロックを実行する" +msgstr "無名コードブロックを実行します。" + +#: sql_help.c:4877 +msgid "remove an access method" +msgstr "アクセスメソッドを削除します。" -#: sql_help.h:621 +#: sql_help.c:4882 msgid "remove an aggregate function" -msgstr "集約関数を削除する" +msgstr "集約関数を削除します。" -#: sql_help.h:626 +#: sql_help.c:4887 msgid "remove a cast" -msgstr "キャストを削除する" +msgstr "キャストを削除します。" -#: sql_help.h:631 +#: sql_help.c:4892 msgid "remove a collation" -msgstr "照合順序を削除する" +msgstr "照合順序を削除します。" -#: sql_help.h:636 +#: sql_help.c:4897 msgid "remove a conversion" -msgstr "エンコーディング変換ルールを削除する" +msgstr "符号化方式変換を削除します。" -#: sql_help.h:641 +#: sql_help.c:4902 msgid "remove a database" -msgstr "データベースを削除する" +msgstr "データベースを削除します。" -#: sql_help.h:646 +#: sql_help.c:4907 msgid "remove a domain" -msgstr "ドメインを削除する" +msgstr "ドメインを削除します。" -#: sql_help.h:651 +#: sql_help.c:4912 msgid "remove an event trigger" -msgstr "イベントトリガを削除する" +msgstr "イベントトリガーを削除します。" -#: sql_help.h:656 +#: sql_help.c:4917 msgid "remove an extension" -msgstr "拡張を削除する" +msgstr "拡張を削除します。" -#: sql_help.h:661 +#: sql_help.c:4922 msgid "remove a foreign-data wrapper" -msgstr "外部データラッパーを削除する" +msgstr "外部データラッパを削除します。" -#: sql_help.h:666 +#: sql_help.c:4927 msgid "remove a foreign table" -msgstr "外部テーブルを削除する" +msgstr "外部テーブルを削除します。" -#: sql_help.h:671 +#: sql_help.c:4932 msgid "remove a function" -msgstr "関数を削除する" +msgstr "関数を削除します。" -#: sql_help.h:676 sql_help.h:716 sql_help.h:781 +#: sql_help.c:4937 sql_help.c:4987 sql_help.c:5067 msgid "remove a database role" -msgstr "データベースのロールを削除する" +msgstr "データベースロールを削除します。" -#: sql_help.h:681 +#: sql_help.c:4942 msgid "remove an index" -msgstr "インデックスを削除する" +msgstr "インデックスを削除します。" -#: sql_help.h:686 +#: sql_help.c:4947 msgid "remove a procedural language" -msgstr "手続き言語を削除する" +msgstr "手続き言語を削除します。" -#: sql_help.h:691 +#: sql_help.c:4952 msgid "remove a materialized view" -msgstr "マテリアライズドビューを削除する" +msgstr "マテリアライズドビューを削除します。" -#: sql_help.h:696 +#: sql_help.c:4957 msgid "remove an operator" -msgstr "演算子を削除する" +msgstr "演算子を削除します。" -#: sql_help.h:701 +#: sql_help.c:4962 msgid "remove an operator class" -msgstr "演算子クラスを削除する" +msgstr "演算子クラスを削除します。" -#: sql_help.h:706 +#: sql_help.c:4967 msgid "remove an operator family" -msgstr "演算子ファミリを削除する" +msgstr "演算子族を削除します。" -#: sql_help.h:711 +#: sql_help.c:4972 msgid "remove database objects owned by a database role" -msgstr "特定のデータベースロールが所有するデータベースオブジェクトを削除する" +msgstr "データベースロールが所有するデータベースオブジェクトを削除します。" + +#: sql_help.c:4977 +msgid "remove a row level security policy from a table" +msgstr "テーブルから行レベルのセキュリティポリシーを削除します。" + +#: sql_help.c:4982 +msgid "remove a publication" +msgstr "パブリケーションを削除します。" -#: sql_help.h:721 +#: sql_help.c:4992 msgid "remove a rewrite rule" -msgstr "書き換えルールを削除する" +msgstr "書き換えルールを削除します。" -#: sql_help.h:726 +#: sql_help.c:4997 msgid "remove a schema" -msgstr "スキーマを削除する" +msgstr "スキーマを削除します。" -#: sql_help.h:731 +#: sql_help.c:5002 msgid "remove a sequence" -msgstr "シーケンスを削除する" +msgstr "シーケンスを削除します。" -#: sql_help.h:736 +#: sql_help.c:5007 msgid "remove a foreign server descriptor" -msgstr "外部サーバー識別子を削除する" +msgstr "外部サーバ記述子を削除します。" -#: sql_help.h:741 +#: sql_help.c:5012 +msgid "remove extended statistics" +msgstr "拡張統計情報を削除します。" + +#: sql_help.c:5017 +msgid "remove a subscription" +msgstr "サブスクリプションを削除します。" + +#: sql_help.c:5022 msgid "remove a table" -msgstr "テーブルを削除する" +msgstr "テーブルを削除します。" -#: sql_help.h:746 +#: sql_help.c:5027 msgid "remove a tablespace" -msgstr "テーブルスペースを削除する" +msgstr "テーブル空間を削除します。" -#: sql_help.h:751 +#: sql_help.c:5032 msgid "remove a text search configuration" -msgstr "テキスト検索設定を削除する" +msgstr "テキスト検索設定を削除します。" -#: sql_help.h:756 +#: sql_help.c:5037 msgid "remove a text search dictionary" -msgstr "テキスト検索用辞書を削除する" +msgstr "テキスト検索辞書を削除します。" -#: sql_help.h:761 +#: sql_help.c:5042 msgid "remove a text search parser" -msgstr "テキスト検索用パーサを削除する" +msgstr "テキスト検索パーサを削除します。" -#: sql_help.h:766 +#: sql_help.c:5047 msgid "remove a text search template" -msgstr "テキスト検索用テンプレートを削除する" +msgstr "テキスト検索テンプレートを削除します。" -#: sql_help.h:771 +#: sql_help.c:5052 +msgid "remove a transform" +msgstr "自動変換ルールを削除します。" + +#: sql_help.c:5057 msgid "remove a trigger" -msgstr "トリガを削除する" +msgstr "トリガーを削除します。" -#: sql_help.h:776 +#: sql_help.c:5062 msgid "remove a data type" -msgstr "データ型を削除する" +msgstr "データ型を削除します。" -#: sql_help.h:786 +#: sql_help.c:5072 msgid "remove a user mapping for a foreign server" -msgstr "外部サーバーのユーザマッピングを削除" +msgstr "外部サーバのユーザーマッピングを削除します。" -#: sql_help.h:791 +#: sql_help.c:5077 msgid "remove a view" -msgstr "ビューを削除する" +msgstr "ビューを削除します。" -#: sql_help.h:801 +#: sql_help.c:5087 msgid "execute a prepared statement" -msgstr "プリペアドステートメントを実行する" +msgstr "プリペアドステートメントを実行します。" -#: sql_help.h:806 +#: sql_help.c:5092 msgid "show the execution plan of a statement" -msgstr "ステートメントの実行プランを表示する" +msgstr "ステートメントの実行計画を表示します。" -#: sql_help.h:811 +#: sql_help.c:5097 msgid "retrieve rows from a query using a cursor" -msgstr "カーソルを使って問い合わせから行を取り出す" +msgstr "カーソルを使って問い合わせから行を取り出します。" -#: sql_help.h:816 +#: sql_help.c:5102 msgid "define access privileges" -msgstr "アクセス権限を定義する" +msgstr "アクセス権限を定義します。" + +#: sql_help.c:5107 +msgid "import table definitions from a foreign server" +msgstr "外部サーバーからテーブル定義をインポートします。" -#: sql_help.h:821 +#: sql_help.c:5112 msgid "create new rows in a table" -msgstr "テーブルに新しい行を作成する" +msgstr "テーブルに新しい行を作成します。" -#: sql_help.h:826 +#: sql_help.c:5117 msgid "listen for a notification" -msgstr "通知メッセージを監視する" +msgstr "通知メッセージを監視します。" -#: sql_help.h:831 +#: sql_help.c:5122 msgid "load a shared library file" -msgstr "共有ライブラリファイルをロードする" +msgstr "共有ライブラリファイルをロードします。" -#: sql_help.h:836 +#: sql_help.c:5127 msgid "lock a table" -msgstr "テーブルをロックする" +msgstr "テーブルをロックします。" -#: sql_help.h:841 +#: sql_help.c:5132 msgid "position a cursor" -msgstr "カーソルを位置付ける" +msgstr "カーソルを位置づけます。" -#: sql_help.h:846 +#: sql_help.c:5137 msgid "generate a notification" -msgstr "通知メッセージを生成する" +msgstr "通知を生成します。" -#: sql_help.h:851 +#: sql_help.c:5142 msgid "prepare a statement for execution" -msgstr "実行に先立ってステートメントを準備する" +msgstr "実行に備えてステートメントを準備します。" -#: sql_help.h:856 +#: sql_help.c:5147 msgid "prepare the current transaction for two-phase commit" -msgstr "2フェーズコミット用に現在のトランザクションを準備する" +msgstr "二相コミットに備えて現在のトランザクションを準備します。" -#: sql_help.h:861 +#: sql_help.c:5152 msgid "change the ownership of database objects owned by a database role" -msgstr "あるデータベースロールが所有するデータベースオブジェクトの所有者を変更する" +msgstr "データベースロールが所有するデータベースオブジェクトの所有権を変更します。" -#: sql_help.h:866 +#: sql_help.c:5157 msgid "replace the contents of a materialized view" -msgstr "マテリアライズドビューの内容を置き換える" +msgstr "マテリアライズドビューの内容を置き換えます。" -#: sql_help.h:871 +#: sql_help.c:5162 msgid "rebuild indexes" -msgstr "インデックスを再構築する" +msgstr "インデックスを再構築します。" -#: sql_help.h:876 +#: sql_help.c:5167 msgid "destroy a previously defined savepoint" -msgstr "前回定義したセーブポイントを削除する" +msgstr "以前に定義されたセーブポイントを破棄します。" -#: sql_help.h:881 +#: sql_help.c:5172 msgid "restore the value of a run-time parameter to the default value" -msgstr "実行時パラメータの値をデフォルト値に戻す" +msgstr "実行時パラメーターの値をデフォルト値に戻します。" -#: sql_help.h:886 +#: sql_help.c:5177 msgid "remove access privileges" -msgstr "アクセス権限を剥奪する" +msgstr "アクセス特権を削除します。" -#: sql_help.h:896 +#: sql_help.c:5187 msgid "cancel a transaction that was earlier prepared for two-phase commit" -msgstr "2フェーズコミット用に事前準備されたトランザクションをキャンセルする" +msgstr "二相コミットのために事前に準備されたトランザクションをキャンセルします。" -#: sql_help.h:901 +#: sql_help.c:5192 msgid "roll back to a savepoint" -msgstr "セーブポイントまでロールバックする" +msgstr "セーブポイントまでロールバックします。" -#: sql_help.h:906 +#: sql_help.c:5197 msgid "define a new savepoint within the current transaction" -msgstr "現在のトランザクションに対して新しいセーブポイントを定義する" +msgstr "現在のトランザクション内で新しいセーブポイントを定義します。" -#: sql_help.h:911 +#: sql_help.c:5202 msgid "define or change a security label applied to an object" -msgstr "オブジェクトに適用されるセキュリティラベルを定義または変更する" +msgstr "オブジェクトに適用されるセキュリティラベルを定義または変更します。" -#: sql_help.h:916 sql_help.h:961 sql_help.h:991 +#: sql_help.c:5207 sql_help.c:5252 sql_help.c:5282 msgid "retrieve rows from a table or view" -msgstr "テーブルもしくはビューから行を取り出す" +msgstr "テーブルまたはビューから行を取得します。" -#: sql_help.h:926 +#: sql_help.c:5217 msgid "change a run-time parameter" -msgstr "実行時パラメータを変更する" +msgstr "実行時のパラメーターを変更します。" -#: sql_help.h:931 +#: sql_help.c:5222 msgid "set constraint check timing for the current transaction" -msgstr "現在のトランザクションに対して制約検査のタイミングを設定する" +msgstr "現在のトランザクションについて、制約チェックのタイミングを設定します。" -#: sql_help.h:936 +#: sql_help.c:5227 msgid "set the current user identifier of the current session" -msgstr "現在のセッションにおける現在のユーザ識別を設定する" +msgstr "現在のセッションの現在のユーザー識別子を設定します。" -#: sql_help.h:941 +#: sql_help.c:5232 msgid "set the session user identifier and the current user identifier of the current session" -msgstr "セッションのユーザ識別、および現在のセッションにおける現在のユーザ識別を設定する" +msgstr "セッションのユーザ識別子および現在のセッションの現在のユーザー識別子を設定します。" -#: sql_help.h:946 +#: sql_help.c:5237 msgid "set the characteristics of the current transaction" -msgstr "現在のトランザクションの特性を設定します" +msgstr "現在のトランザクションの特性を設定します。" -#: sql_help.h:951 +#: sql_help.c:5242 msgid "show the value of a run-time parameter" -msgstr "実行時パラメータの値を表示する" +msgstr "実行時パラメーターの値を表示します。" -#: sql_help.h:966 +#: sql_help.c:5257 msgid "empty a table or set of tables" -msgstr "テーブルもしくはテーブルのセットを 0 件に切り詰める" +msgstr "テーブルもしくはテーブルセットを0件に切り詰めます。" -#: sql_help.h:971 +#: sql_help.c:5262 msgid "stop listening for a notification" -msgstr "通知メッセージの監視を中止する" +msgstr "通知メッセージの監視を中止します。" -#: sql_help.h:976 +#: sql_help.c:5267 msgid "update rows of a table" -msgstr "テーブルの行を更新する" +msgstr "テーブルの行を更新します。" -#: sql_help.h:981 +#: sql_help.c:5272 msgid "garbage-collect and optionally analyze a database" -msgstr "ガーベジコレクションを行い、オプションでデータベースの分析をします" +msgstr "ガーベッジコレクションを行い、また必要に応じてデータベースを分析します。" -#: sql_help.h:986 +#: sql_help.c:5277 msgid "compute a set of rows" -msgstr "行セットを計算します" +msgstr "行セットを計算します。" -#: startup.c:166 +#: startup.c:187 #, c-format msgid "%s: -1 can only be used in non-interactive mode\n" -msgstr "%s: -1は対話式モード以外でのみ使用できます\n" +msgstr "%s: -1 が使えるのは非対話型モード時だけです\n" -#: startup.c:266 +#: startup.c:290 #, c-format msgid "%s: could not open log file \"%s\": %s\n" -msgstr "%s: ログファイル \"%s\" をオープンできません: %s\n" +msgstr "%s: ログファイル \"%s\" を開くことができませんでした: %s\n" -#: startup.c:328 +#: startup.c:397 #, c-format msgid "" "Type \"help\" for help.\n" "\n" msgstr "" -"\"help\" でヘルプを表示します.\n" +"\"help\" でヘルプを表示します。\n" "\n" -#: startup.c:471 +#: startup.c:546 #, c-format msgid "%s: could not set printing parameter \"%s\"\n" -msgstr "%s: 表示用パラメータ \"%s\" をセットできませんでした\n" +msgstr "%s: 印刷パラメーター \"%s\" を設定できませんでした\n" -#: startup.c:511 -#, c-format -msgid "%s: could not delete variable \"%s\"\n" -msgstr "%s: 変数 \"%s\" を削除できませんでした\n" - -#: startup.c:521 -#, c-format -msgid "%s: could not set variable \"%s\"\n" -msgstr "%s: 変数 \"%s\" をセットできませんでした\n" - -#: startup.c:564 startup.c:570 +#: startup.c:648 #, c-format msgid "Try \"%s --help\" for more information.\n" -msgstr "詳細は '%s --help' をごらんください\n" +msgstr "詳細は \"%s --help\" をごらんください。\n" -#: startup.c:587 +#: startup.c:665 #, c-format msgid "%s: warning: extra command-line argument \"%s\" ignored\n" -msgstr "%s: 警告:余分なコマンドラインオプション \"%s\" は無視されます\n" +msgstr "%s: 警告: 余分なコマンドライン引数 \"%s\" は無視されました。\n" -#: startup.c:609 +#: startup.c:714 #, c-format msgid "%s: could not find own program executable\n" -msgstr "%s: 実行ファイル自体がありませんでした\n" +msgstr "%s: 実行可能プログラムファイルが見つかりませんでした。\n" -#: startup.c:729 startup.c:776 startup.c:797 startup.c:834 variables.c:121 -#, c-format -msgid "unrecognized value \"%s\" for \"%s\"; assuming \"%s\"\n" -msgstr "\"%2$s\" の不明な値 \"%1$s\"。\"%3$s\"と仮定します\n" - -#: tab-complete.c:4098 +#: tab-complete.c:4186 #, c-format msgid "" "tab completion query failed: %s\n" "Query was:\n" "%s\n" msgstr "" -"問い合わせのタブ補完に失敗: %s\n" -"問い合わせは\n" +"タブ補完の問い合わせに失敗しました: %s\n" +"実行した問い合わせ:\n" "%s\n" + +#: variables.c:139 +#, c-format +msgid "unrecognized value \"%s\" for \"%s\": Boolean expected\n" +msgstr "\"%2$s\" の値 \"%1$s\" が認識できません:真偽値を指定してください。\n" + +#: variables.c:176 +#, c-format +msgid "invalid value \"%s\" for \"%s\": integer expected\n" +msgstr "\"%2$s\" の値 \"%1$s\" が無効です: 整数を指定してください。\n" + +#: variables.c:224 +#, c-format +msgid "invalid variable name: \"%s\"\n" +msgstr "変数名が無効です: \"%s\"\n" + +#: variables.c:393 +#, c-format +msgid "" +"unrecognized value \"%s\" for \"%s\"\n" +"Available values are: %s.\n" +msgstr "" +"\"%2$s\" の値 \"%1$s\" が認識できません。\n" +"有効な値は %3$s です。\n" diff --git a/src/bin/psql/po/ko.po b/src/bin/psql/po/ko.po index b355b663f6..88a1cbfbfd 100644 --- a/src/bin/psql/po/ko.po +++ b/src/bin/psql/po/ko.po @@ -3,10 +3,10 @@ # msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 9.6\n" +"Project-Id-Version: psql (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2016-09-26 14:02+0900\n" -"PO-Revision-Date: 2016-09-27 01:42+0900\n" +"POT-Creation-Date: 2017-09-19 09:51+0900\n" +"PO-Revision-Date: 2017-09-19 10:26+0900\n" "Last-Translator: Ioseph Kim \n" "Language-Team: Korean \n" "Language: ko\n" @@ -51,8 +51,8 @@ msgid "pclose failed: %s" msgstr "pclose 실패: %s" #: ../../common/fe_memutils.c:35 ../../common/fe_memutils.c:75 -#: ../../common/fe_memutils.c:98 command.c:342 input.c:227 mainloop.c:80 -#: mainloop.c:261 +#: ../../common/fe_memutils.c:98 command.c:608 input.c:227 mainloop.c:82 +#: mainloop.c:276 #, c-format msgid "out of memory\n" msgstr "메모리 부족\n" @@ -62,112 +62,125 @@ msgstr "메모리 부족\n" msgid "cannot duplicate null pointer (internal error)\n" msgstr "null 포인터를 복제할 수 없음(내부 오류)\n" -#: ../../common/username.c:45 +#: ../../common/username.c:43 #, c-format msgid "could not look up effective user ID %ld: %s" msgstr "UID %ld 해당하는 사용자를 찾을 수 없음: %s" -#: ../../common/username.c:47 command.c:299 +#: ../../common/username.c:45 command.c:555 msgid "user does not exist" msgstr "사용자 없음" -#: ../../common/username.c:62 +#: ../../common/username.c:60 #, c-format msgid "user name lookup failure: error code %lu" msgstr "사용자 이름 찾기 실패: 오류번호 %lu" -#: ../../common/wait_error.c:47 +#: ../../common/wait_error.c:45 #, c-format msgid "command not executable" msgstr "명령을 실행할 수 없음" -#: ../../common/wait_error.c:51 +#: ../../common/wait_error.c:49 #, c-format msgid "command not found" msgstr "명령어를 찾을 수 없음" -#: ../../common/wait_error.c:56 +#: ../../common/wait_error.c:54 #, c-format msgid "child process exited with exit code %d" msgstr "하위 프로세스가 %d 코드로 종료했음" -#: ../../common/wait_error.c:63 +#: ../../common/wait_error.c:61 #, c-format msgid "child process was terminated by exception 0x%X" msgstr "0x%X 예외처리에 의해 하위 프로세스가 종료되었음" -#: ../../common/wait_error.c:73 +#: ../../common/wait_error.c:71 #, c-format msgid "child process was terminated by signal %s" msgstr "%s 시그널 감지로 하위 프로세스가 종료되었음" -#: ../../common/wait_error.c:77 +#: ../../common/wait_error.c:75 #, c-format msgid "child process was terminated by signal %d" msgstr "하위 프로세스가 %d 신호를 받고 종료되었음" -#: ../../common/wait_error.c:82 +#: ../../common/wait_error.c:80 #, c-format msgid "child process exited with unrecognized status %d" msgstr "하위 프로세스가 알 수 없는 상태(%d)로 종료되었음" -#: ../../fe_utils/print.c:354 +#: ../../fe_utils/print.c:353 #, c-format msgid "(%lu row)" msgid_plural "(%lu rows)" msgstr[0] "(%lu개 행)" -#: ../../fe_utils/print.c:2906 +#: ../../fe_utils/print.c:2913 #, c-format msgid "Interrupted\n" msgstr "인트럽트발생\n" -#: ../../fe_utils/print.c:2970 +#: ../../fe_utils/print.c:2977 #, c-format msgid "Cannot add header to table content: column count of %d exceeded.\n" msgstr "테이블 내용에 헤더를 추가할 수 없음: 열 수가 %d개를 초과했습니다.\n" -#: ../../fe_utils/print.c:3010 +#: ../../fe_utils/print.c:3017 #, c-format msgid "Cannot add cell to table content: total cell count of %d exceeded.\n" msgstr "테이블 내용에 셀을 추가할 수 없음: 총 셀 수가 %d개를 초과했습니다.\n" -#: ../../fe_utils/print.c:3259 +#: ../../fe_utils/print.c:3266 #, c-format msgid "invalid output format (internal error): %d" msgstr "잘못된 출력 형식 (내부 오류): %d" -#: command.c:129 +#: ../../fe_utils/psqlscan.l:713 +#, c-format +msgid "skipping recursive expansion of variable \"%s\"\n" +msgstr "\"%s\" 변수의 재귀적 확장을 건너뛰는 중\n" + +#: command.c:223 #, c-format msgid "Invalid command \\%s. Try \\? for help.\n" msgstr "잘못된 명령: \\%s. 도움말은 \\?.\n" -#: command.c:131 +#: command.c:225 #, c-format msgid "invalid command \\%s\n" msgstr "잘못된 명령: \\%s\n" -#: command.c:142 +#: command.c:243 #, c-format msgid "\\%s: extra argument \"%s\" ignored\n" msgstr "\\%s: \"%s\" 추가 인수가 무시되었음\n" -#: command.c:297 +#: command.c:295 +#, c-format +msgid "" +"\\%s command ignored; use \\endif or Ctrl-C to exit current \\if block\n" +msgstr "" +"\\%s 명령은 무시함; 현재 \\if 블록을 중지하려면, \\endif 명령이나 Ctrl-C 키" +"를 사용하세요.\n" + +#: command.c:553 #, c-format msgid "could not get home directory for user ID %ld: %s\n" msgstr "UID %ld 사용자의 홈 디렉터리를 찾을 수 없음: %s\n" -#: command.c:315 +#: command.c:571 #, c-format msgid "\\%s: could not change directory to \"%s\": %s\n" msgstr "\\%s: \"%s\" 디렉터리로 이동할 수 없음: %s\n" -#: command.c:330 common.c:553 common.c:611 common.c:1144 +#: command.c:596 common.c:648 common.c:706 common.c:1242 #, c-format msgid "You are currently not connected to a database.\n" msgstr "현재 데이터베이스에 연결되어있지 않습니다.\n" -#: command.c:355 +#: command.c:621 #, c-format msgid "" "You are connected to database \"%s\" as user \"%s\" via socket in \"%s\" at " @@ -175,7 +188,7 @@ msgid "" msgstr "" "접속정보: 데이터베이스=\"%s\", 사용자=\"%s\", 소켓=\"%s\", 포트=\"%s\".\n" -#: command.c:358 +#: command.c:624 #, c-format msgid "" "You are connected to database \"%s\" as user \"%s\" on host \"%s\" at port " @@ -183,149 +196,164 @@ msgid "" msgstr "" "접속정보: 데이터베이스=\"%s\", 사용자=\"%s\", 호스트=\"%s\", 포트=\"%s\".\n" -#: command.c:574 command.c:647 command.c:746 command.c:1584 +#: command.c:915 command.c:1005 command.c:1114 command.c:2523 #, c-format msgid "no query buffer\n" msgstr "쿼리 버퍼가 없음\n" -#: command.c:607 command.c:3547 +#: command.c:948 command.c:4784 #, c-format msgid "invalid line number: %s\n" msgstr "잘못된 줄 번호: %s\n" -#: command.c:640 +#: command.c:998 #, c-format msgid "The server (version %s) does not support editing function source.\n" msgstr "이 서버(%s 버전)는 함수 소스 편집 기능을 제공하지 않습니다.\n" -#: command.c:721 command.c:792 +#: command.c:1073 command.c:1154 msgid "No changes" msgstr "변경 내용 없음" -#: command.c:739 +#: command.c:1107 #, c-format msgid "The server (version %s) does not support editing view definitions.\n" msgstr "이 서버(%s 버전)는 뷰 정의 편집 기능을 제공하지 않습니다.\n" -#: command.c:846 +#: command.c:1231 #, c-format msgid "%s: invalid encoding name or conversion procedure not found\n" msgstr "%s: 타당치 못한 인코딩 이름 또는 문자셋 변환 프로시저 없음\n" -#: command.c:871 command.c:1962 command.c:3649 common.c:153 common.c:200 -#: common.c:497 common.c:1190 common.c:1218 common.c:1319 copy.c:489 -#: copy.c:699 large_obj.c:156 large_obj.c:191 large_obj.c:253 +#: command.c:1266 command.c:1888 command.c:3169 command.c:4886 common.c:173 +#: common.c:244 common.c:541 common.c:1288 common.c:1316 common.c:1417 +#: copy.c:489 copy.c:708 large_obj.c:156 large_obj.c:191 large_obj.c:253 #, c-format msgid "%s" msgstr "%s" -#: command.c:875 +#: command.c:1270 msgid "out of memory" msgstr "메모리 부족" -#: command.c:878 +#: command.c:1273 msgid "There is no previous error." msgstr "이전 오류가 없습니다." -#: command.c:972 command.c:1022 command.c:1036 command.c:1053 command.c:1160 -#: command.c:1324 command.c:1564 command.c:1595 +#: command.c:1444 command.c:1749 command.c:1763 command.c:1780 command.c:1940 +#: command.c:2177 command.c:2490 command.c:2530 #, c-format msgid "\\%s: missing required argument\n" msgstr "\\%s: 필요한 인수가 빠졌음\n" -#: command.c:1085 +#: command.c:1575 +#, c-format +msgid "\\elif: cannot occur after \\else\n" +msgstr "\\elif: \\else 구문 뒤에 올 수 없음\n" + +#: command.c:1580 +#, c-format +msgid "\\elif: no matching \\if\n" +msgstr "\\elif: \\if 명령과 짝이 안맞음\n" + +#: command.c:1644 +#, c-format +msgid "\\else: cannot occur after \\else\n" +msgstr "\\else: \\else 명령 뒤에 올 수 없음\n" + +#: command.c:1649 +#, c-format +msgid "\\else: no matching \\if\n" +msgstr "\\else: \\if 명령과 짝이 안맞음\n" + +#: command.c:1689 +#, c-format +msgid "\\endif: no matching \\if\n" +msgstr "\\endif: \\if 명령과 짝이 안맞음\n" + +#: command.c:1844 msgid "Query buffer is empty." msgstr "쿼리 버퍼가 비었음." -#: command.c:1095 +#: command.c:1866 msgid "Enter new password: " msgstr "새 암호를 입력하세요:" -#: command.c:1096 +#: command.c:1867 msgid "Enter it again: " msgstr "다시 입력해 주세요:" -#: command.c:1100 +#: command.c:1871 #, c-format msgid "Passwords didn't match.\n" msgstr "암호가 서로 틀립니다.\n" -#: command.c:1118 -#, c-format -msgid "Password encryption failed.\n" -msgstr "암호 암호화 실패\n" - -#: command.c:1189 command.c:1305 command.c:1569 +#: command.c:1970 #, c-format -msgid "\\%s: error while setting variable\n" -msgstr "\\%s: 변수 지정 실패\n" +msgid "\\%s: could not read value for variable\n" +msgstr "\\%s: 변수 값을 읽을 수 없음\n" -#: command.c:1252 +#: command.c:2073 msgid "Query buffer reset (cleared)." msgstr "쿼리 버퍼 초기화 (비웠음)." -#: command.c:1264 +#: command.c:2095 #, c-format msgid "Wrote history to file \"%s\".\n" msgstr "명령내역(history)을 \"%s\" 파일에 기록했습니다.\n" -#: command.c:1329 +#: command.c:2182 #, c-format msgid "\\%s: environment variable name must not contain \"=\"\n" msgstr "\\%s: OS 환경 변수 이름에는 \"=\" 문자가 없어야 함\n" -#: command.c:1373 +#: command.c:2238 #, c-format msgid "The server (version %s) does not support showing function source.\n" msgstr "이 서버(%s 버전)는 함수 소스 보기 기능을 제공하지 않습니다.\n" -#: command.c:1380 +#: command.c:2245 #, c-format msgid "function name is required\n" msgstr "함수 이름이 필요합니다\n" -#: command.c:1455 +#: command.c:2332 #, c-format msgid "The server (version %s) does not support showing view definitions.\n" msgstr "이 서버(%s 버전)는 뷰 정의 보기 기능을 제공하지 않습니다.\n" -#: command.c:1462 +#: command.c:2339 #, c-format msgid "view name is required\n" msgstr "뷰 이름이 필요합니다\n" -#: command.c:1549 +#: command.c:2462 msgid "Timing is on." msgstr "작업수행시간 보임" -#: command.c:1551 +#: command.c:2464 msgid "Timing is off." msgstr "작업수행시간 숨김" -#: command.c:1613 command.c:1633 command.c:2311 command.c:2314 command.c:2317 -#: command.c:2323 command.c:2325 command.c:2333 command.c:2343 command.c:2352 -#: command.c:2366 command.c:2383 command.c:2441 common.c:68 copy.c:332 -#: copy.c:392 copy.c:405 psqlscanslash.l:711 psqlscanslash.l:722 -#: psqlscanslash.l:732 +#: command.c:2549 command.c:2577 command.c:3537 command.c:3540 command.c:3543 +#: command.c:3549 command.c:3551 command.c:3559 command.c:3569 command.c:3578 +#: command.c:3592 command.c:3609 command.c:3667 common.c:69 copy.c:332 +#: copy.c:392 copy.c:405 psqlscanslash.l:761 psqlscanslash.l:772 +#: psqlscanslash.l:782 #, c-format msgid "%s: %s\n" msgstr "%s: %s\n" -#: command.c:1727 -#, c-format -msgid "+ opt(%d) = |%s|\n" -msgstr "+ opt(%d) = |%s|\n" - -#: command.c:1753 startup.c:207 +#: command.c:2961 startup.c:205 msgid "Password: " msgstr "암호: " -#: command.c:1758 startup.c:209 +#: command.c:2966 startup.c:207 #, c-format msgid "Password for user %s: " msgstr "%s 사용자의 암호: " -#: command.c:1809 +#: command.c:3016 #, c-format msgid "" "All connection parameters must be supplied because no database connection " @@ -333,17 +361,17 @@ msgid "" msgstr "" "현재 접속 정보가 없습니다. 접속을 위한 연결 관련 매개변수를 지정하세요\n" -#: command.c:1966 +#: command.c:3173 #, c-format msgid "Previous connection kept\n" msgstr "이전 연결이 유지되었음\n" -#: command.c:1970 +#: command.c:3177 #, c-format msgid "\\connect: %s" msgstr "\\연결: %s" -#: command.c:2006 +#: command.c:3213 #, c-format msgid "" "You are now connected to database \"%s\" as user \"%s\" via socket in \"%s\" " @@ -351,7 +379,7 @@ msgid "" msgstr "" "접속정보: 데이터베이스=\"%s\", 사용자=\"%s\", 소켓=\"%s\", 포트=\"%s\".\n" -#: command.c:2009 +#: command.c:3216 #, c-format msgid "" "You are now connected to database \"%s\" as user \"%s\" on host \"%s\" at " @@ -359,17 +387,17 @@ msgid "" msgstr "" "접속정보: 데이터베이스=\"%s\", 사용자=\"%s\", 호스트=\"%s\", 포트=\"%s\".\n" -#: command.c:2013 +#: command.c:3220 #, c-format msgid "You are now connected to database \"%s\" as user \"%s\".\n" msgstr "접속정보: 데이터베이스=\"%s\", 사용자=\"%s\".\n" -#: command.c:2046 +#: command.c:3253 #, c-format msgid "%s (%s, server %s)\n" msgstr "%s(%s, %s 서버)\n" -#: command.c:2054 +#: command.c:3261 #, c-format msgid "" "WARNING: %s major version %s, server major version %s.\n" @@ -378,24 +406,24 @@ msgstr "" "경고: %s 메이저 버전 %s, 서버 메이저 버전 %s.\n" " 일부 psql 기능이 작동하지 않을 수도 있습니다.\n" -#: command.c:2091 +#: command.c:3298 #, c-format msgid "SSL connection (protocol: %s, cipher: %s, bits: %s, compression: %s)\n" msgstr "SSL 연결정보 (프로토콜: %s, 암호화기법: %s, 비트: %s, 압축: %s)\n" -#: command.c:2092 command.c:2093 command.c:2094 +#: command.c:3299 command.c:3300 command.c:3301 msgid "unknown" msgstr "알수없음" -#: command.c:2095 help.c:46 +#: command.c:3302 help.c:45 msgid "off" msgstr "off" -#: command.c:2095 help.c:46 +#: command.c:3302 help.c:45 msgid "on" msgstr "on" -#: command.c:2115 +#: command.c:3322 #, c-format msgid "" "WARNING: Console code page (%u) differs from Windows code page (%u)\n" @@ -407,36 +435,36 @@ msgstr "" "참조\n" " 페이지 \"Notes for Windows users\"를 참조하십시오.\n" -#: command.c:2200 +#: command.c:3426 #, c-format msgid "" "environment variable PSQL_EDITOR_LINENUMBER_ARG must be set to specify a " "line number\n" msgstr "" -"지정한 줄번호를 사용하기 위해서는 PSQL_EDITOR_LINENUMBER_ARG 이름의 OS 환경" -"변수가 설정되어 있어야 합니다.\n" +"지정한 줄번호를 사용하기 위해서는 PSQL_EDITOR_LINENUMBER_ARG 이름의 OS 환경변" +"수가 설정되어 있어야 합니다.\n" -#: command.c:2229 +#: command.c:3455 #, c-format msgid "could not start editor \"%s\"\n" msgstr "\"%s\" 문서 편집기를 실행시킬 수 없음\n" -#: command.c:2231 +#: command.c:3457 #, c-format msgid "could not start /bin/sh\n" msgstr "/bin/sh 명령을 실행할 수 없음\n" -#: command.c:2269 +#: command.c:3495 #, c-format msgid "could not locate temporary directory: %s\n" msgstr "임시 디렉터리 경로를 알 수 없음: %s\n" -#: command.c:2296 +#: command.c:3522 #, c-format msgid "could not open temporary file \"%s\": %s\n" msgstr "\"%s\" 임시 파일을 열 수 없음: %s\n" -#: command.c:2570 +#: command.c:3796 #, c-format msgid "" "\\pset: allowed formats are unaligned, aligned, wrapped, html, asciidoc, " @@ -445,208 +473,208 @@ msgstr "" "\\pset: 허용되는 출력 형식: unaligned, aligned, wrapped, html, asciidoc, " "latex, latex-longtable, troff-ms\n" -#: command.c:2589 +#: command.c:3814 #, c-format msgid "\\pset: allowed line styles are ascii, old-ascii, unicode\n" msgstr "\\pset: 사용할 수 있는 선 모양은 ascii, old-ascii, unicode\n" -#: command.c:2605 +#: command.c:3829 #, c-format msgid "\\pset: allowed Unicode border line styles are single, double\n" msgstr "\\pset: 사용할 수 있는 유니코드 테두리 모양은 single, double\n" -#: command.c:2620 +#: command.c:3844 #, c-format msgid "\\pset: allowed Unicode column line styles are single, double\n" msgstr "\\pset: 사용할 수 있는 유니코드 칼럼 선 모양은 single, double\n" -#: command.c:2635 +#: command.c:3859 #, c-format msgid "\\pset: allowed Unicode header line styles are single, double\n" msgstr "\\pset: 사용할 수 있는 유니코드 헤더 선 모양은 single, double\n" -#: command.c:2787 command.c:2966 +#: command.c:4024 command.c:4203 #, c-format msgid "\\pset: unknown option: %s\n" msgstr "\\pset: 알 수 없는 옵션: %s\n" -#: command.c:2805 +#: command.c:4042 #, c-format msgid "Border style is %d.\n" msgstr "html 테이블의 테두리를 %d로 지정했습니다.\n" -#: command.c:2811 +#: command.c:4048 #, c-format msgid "Target width is unset.\n" msgstr "대상 너비 미지정.\n" -#: command.c:2813 +#: command.c:4050 #, c-format msgid "Target width is %d.\n" msgstr "대상 너비는 %d입니다.\n" -#: command.c:2820 +#: command.c:4057 #, c-format msgid "Expanded display is on.\n" msgstr "칼럼 단위 보기 기능 켬.\n" -#: command.c:2822 +#: command.c:4059 #, c-format msgid "Expanded display is used automatically.\n" msgstr "칼럼 단위 보기 기능을 자동으로 지정 함.\n" -#: command.c:2824 +#: command.c:4061 #, c-format msgid "Expanded display is off.\n" msgstr "칼럼 단위 보기 기능 끔.\n" -#: command.c:2831 command.c:2839 +#: command.c:4068 command.c:4076 #, c-format msgid "Field separator is zero byte.\n" msgstr "필드 구분자가 0 바이트입니다.\n" -#: command.c:2833 +#: command.c:4070 #, c-format msgid "Field separator is \"%s\".\n" msgstr "필드 구분자 \"%s\".\n" -#: command.c:2846 +#: command.c:4083 #, c-format msgid "Default footer is on.\n" msgstr "기본 꼬릿말 보기 기능 켬.\n" -#: command.c:2848 +#: command.c:4085 #, c-format msgid "Default footer is off.\n" msgstr "기본 꼬릿말 보기 기능 끔.\n" -#: command.c:2854 +#: command.c:4091 #, c-format msgid "Output format is %s.\n" msgstr "현재 출력 형식: %s.\n" -#: command.c:2860 +#: command.c:4097 #, c-format msgid "Line style is %s.\n" msgstr "선 모양: %s.\n" -#: command.c:2867 +#: command.c:4104 #, c-format msgid "Null display is \"%s\".\n" msgstr "Null 값은 \"%s\" 문자로 보여짐.\n" -#: command.c:2875 +#: command.c:4112 #, c-format msgid "Locale-adjusted numeric output is on.\n" msgstr "로케일 맞춤 숫자 표기 기능 켬.\n" -#: command.c:2877 +#: command.c:4114 #, c-format msgid "Locale-adjusted numeric output is off.\n" msgstr "로케일 맞춤 숫자 표기 기능 끔.\n" -#: command.c:2884 +#: command.c:4121 #, c-format msgid "Pager is used for long output.\n" msgstr "긴 출력을 위해 페이저가 사용됨.\n" -#: command.c:2886 +#: command.c:4123 #, c-format msgid "Pager is always used.\n" msgstr "항상 페이저가 사용됨.\n" -#: command.c:2888 +#: command.c:4125 #, c-format msgid "Pager usage is off.\n" msgstr "화면단위 보기 기능 끔(전체 자료 모두 보여줌).\n" -#: command.c:2894 +#: command.c:4131 #, c-format msgid "Pager won't be used for less than %d line.\n" msgid_plural "Pager won't be used for less than %d lines.\n" msgstr[0] "%d 줄보다 적은 경우는 페이지 단위 보기가 사용되지 않음\n" -#: command.c:2904 command.c:2914 +#: command.c:4141 command.c:4151 #, c-format msgid "Record separator is zero byte.\n" msgstr "레코드 구분자가 0 바이트임.\n" -#: command.c:2906 +#: command.c:4143 #, c-format msgid "Record separator is .\n" msgstr "레코드 구분자는 줄바꿈 문자입니다.\n" -#: command.c:2908 +#: command.c:4145 #, c-format msgid "Record separator is \"%s\".\n" msgstr "레코드 구분자 \"%s\".\n" -#: command.c:2921 +#: command.c:4158 #, c-format msgid "Table attributes are \"%s\".\n" msgstr "테이블 속성: \"%s\".\n" -#: command.c:2924 +#: command.c:4161 #, c-format msgid "Table attributes unset.\n" msgstr "테이블 속성 모두 지움.\n" -#: command.c:2931 +#: command.c:4168 #, c-format msgid "Title is \"%s\".\n" msgstr "출력 테이블의 제목: \"%s\"\n" -#: command.c:2933 +#: command.c:4170 #, c-format msgid "Title is unset.\n" msgstr "출력 테이블의 제목을 지정하지 않았습니다.\n" -#: command.c:2940 +#: command.c:4177 #, c-format msgid "Tuples only is on.\n" msgstr "자료만 보기 기능 켬.\n" -#: command.c:2942 +#: command.c:4179 #, c-format msgid "Tuples only is off.\n" msgstr "자료만 보기 기능 끔.\n" -#: command.c:2948 +#: command.c:4185 #, c-format msgid "Unicode border line style is \"%s\".\n" msgstr "유니코드 테두리 선문자: \"%s\".\n" -#: command.c:2954 +#: command.c:4191 #, c-format msgid "Unicode column line style is \"%s\".\n" msgstr "유니코드 칼럼 선문자: \"%s\".\n" -#: command.c:2960 +#: command.c:4197 #, c-format msgid "Unicode header line style is \"%s\".\n" msgstr "유니코드 헤더 선문자: \"%s\".\n" -#: command.c:3120 +#: command.c:4357 #, c-format msgid "\\!: failed\n" msgstr "\\!: 실패\n" -#: command.c:3145 common.c:659 +#: command.c:4382 common.c:754 #, c-format msgid "\\watch cannot be used with an empty query\n" msgstr "\\watch 명령으로 수행할 쿼리가 없습니다.\n" -#: command.c:3186 +#: command.c:4423 #, c-format msgid "%s\t%s (every %gs)\n" msgstr "%s\t%s (%g초 간격)\n" -#: command.c:3189 +#: command.c:4426 #, c-format msgid "%s (every %gs)\n" msgstr "%s (%g초 간격)\n" -#: command.c:3243 command.c:3250 common.c:559 common.c:566 common.c:1173 +#: command.c:4480 command.c:4487 common.c:654 common.c:661 common.c:1271 #, c-format msgid "" "********* QUERY **********\n" @@ -659,91 +687,105 @@ msgstr "" "**************************\n" "\n" -#: command.c:3442 +#: command.c:4679 #, c-format msgid "\"%s.%s\" is not a view\n" msgstr "\"%s.%s\" 뷰(view)가 아님\n" -#: command.c:3458 +#: command.c:4695 #, c-format msgid "could not parse reloptions array\n" msgstr "reloptions 배열을 분석할 수 없음\n" -#: common.c:138 +#: common.c:158 #, c-format msgid "cannot escape without active connection\n" msgstr "현재 접속한 연결 없이는 특수문자처리를 할 수 없음\n" -#: common.c:371 +#: common.c:199 +#, c-format +msgid "shell command argument contains a newline or carriage return: \"%s\"\n" +msgstr "쉘 명령의 인자에 줄바꿈 문자가 있음: \"%s\"\n" + +#: common.c:415 #, c-format msgid "connection to server was lost\n" msgstr "서버로부터 연결이 끊어졌습니다.\n" -#: common.c:375 +#: common.c:419 #, c-format msgid "The connection to the server was lost. Attempting reset: " msgstr "서버로부터 연결이 끊어졌습니다. 다시 연결을 시도합니다: " -#: common.c:380 +#: common.c:424 #, c-format msgid "Failed.\n" msgstr "실패.\n" -#: common.c:387 +#: common.c:431 #, c-format msgid "Succeeded.\n" msgstr "성공.\n" -#: common.c:487 common.c:936 common.c:1108 +#: common.c:531 common.c:1034 common.c:1206 #, c-format msgid "unexpected PQresultStatus: %d\n" msgstr "PQresultStatus 반환값이 잘못됨: %d\n" -#: common.c:666 +#: common.c:593 +#, c-format +msgid "Time: %.3f ms\n" +msgstr "작업시간: %.3f ms\n" + +#: common.c:608 +#, c-format +msgid "Time: %.3f ms (%02d:%06.3f)\n" +msgstr "작업시간: %.3f ms (%02d:%06.3f)\n" + +#: common.c:617 +#, c-format +msgid "Time: %.3f ms (%02d:%02d:%06.3f)\n" +msgstr "작업시간: %.3f ms (%02d:%02d:%06.3f)\n" + +#: common.c:624 +#, c-format +msgid "Time: %.3f ms (%.0f d %02d:%02d:%06.3f)\n" +msgstr "작업시간: %.3f ms (%.0f d %02d:%02d:%06.3f)\n" + +#: common.c:761 #, c-format msgid "\\watch cannot be used with COPY\n" msgstr "\\watch 작업으로 COPY 명령은 사용할 수 없음\n" -#: common.c:671 +#: common.c:766 #, c-format msgid "unexpected result status for \\watch\n" msgstr "\\watch 쿼리 결과가 비정상적입니다.\n" -#: common.c:682 common.c:1335 -#, c-format -msgid "Time: %.3f ms\n" -msgstr "작업시간: %.3f ms\n" - -#: common.c:700 +#: common.c:795 #, c-format msgid "" "Asynchronous notification \"%s\" with payload \"%s\" received from server " "process with PID %d.\n" -msgstr "" -"\"%s\" 비동기 통지를 받음, 부가정보: \"%s\", 보낸 프로세스: %d.\n" +msgstr "\"%s\" 비동기 통지를 받음, 부가정보: \"%s\", 보낸 프로세스: %d.\n" -#: common.c:703 +#: common.c:798 #, c-format msgid "" "Asynchronous notification \"%s\" received from server process with PID %d.\n" msgstr "동기화 신호 \"%s\" 받음, 해당 서버 프로세스 PID %d.\n" -#: common.c:761 +#: common.c:860 #, c-format msgid "no rows returned for \\gset\n" msgstr "\\gset 해당 자료 없음\n" -#: common.c:766 +#: common.c:865 #, c-format msgid "more than one row returned for \\gset\n" msgstr "\\gset 실행 결과가 단일 자료가 아님\n" -#: common.c:792 -#, c-format -msgid "could not set variable \"%s\"\n" -msgstr "\"%s\" 변수를 지정할 수 없음\n" - -#: common.c:1153 +#: common.c:1251 #, c-format msgid "" "***(Single step mode: verify " @@ -756,20 +798,19 @@ msgstr "" "%s\n" "***(Enter: 계속 진행, x Enter: 중지)********************\n" -#: common.c:1208 +#: common.c:1306 #, c-format msgid "" "The server (version %s) does not support savepoints for ON_ERROR_ROLLBACK.\n" msgstr "" -"서버(%s 버전)에서 ON_ERROR_ROLLBACK에 사용할 savepoint를 지원하지 않습니" -"다.\n" +"서버(%s 버전)에서 ON_ERROR_ROLLBACK에 사용할 savepoint를 지원하지 않습니다.\n" -#: common.c:1264 +#: common.c:1362 #, c-format msgid "STATEMENT: %s\n" msgstr "명령 구문: %s\n" -#: common.c:1307 +#: common.c:1405 #, c-format msgid "unexpected transaction status (%d)\n" msgstr "알 수 없는 트랜잭션 상태 (%d)\n" @@ -826,50 +867,51 @@ msgstr "사용자에 의해서 취소됨" #: copy.c:542 msgid "" "Enter data to be copied followed by a newline.\n" -"End with a backslash and a period on a line by itself." +"End with a backslash and a period on a line by itself, or an EOF signal." msgstr "" "한 줄에 한 레코드씩 데이터를 입력하고\n" -"자료입력이 끝나면 backslash 점 (\\.) 마지막 줄 처음에 입력합니다." +"자료입력이 끝나면 backslash 점 (\\.) 마지막 줄 처음에 입력하는 EOF 시그널을 " +"보내세요." -#: copy.c:671 +#: copy.c:670 msgid "aborted because of read failure" msgstr "읽기 실패로 중지됨" -#: copy.c:695 +#: copy.c:704 msgid "trying to exit copy mode" msgstr "복사 모드를 종료하는 중" -#: crosstabview.c:125 +#: crosstabview.c:123 #, c-format msgid "\\crosstabview: statement did not return a result set\n" msgstr "\\crosstabview: 구문 결과가 집합을 반환하지 않았음\n" -#: crosstabview.c:131 +#: crosstabview.c:129 #, c-format msgid "\\crosstabview: query must return at least three columns\n" msgstr "\\crosstabview: 쿼리 결과는 적어도 세 개의 칼럼은 반환 해야 함\n" -#: crosstabview.c:158 +#: crosstabview.c:156 #, c-format msgid "" "\\crosstabview: vertical and horizontal headers must be different columns\n" -msgstr "" -"\\crosstabview: 행과 열의 칼럼이 각각 다른 칼럼이어야 함\n" +msgstr "\\crosstabview: 행과 열의 칼럼이 각각 다른 칼럼이어야 함\n" -#: crosstabview.c:174 +#: crosstabview.c:172 #, c-format msgid "" "\\crosstabview: data column must be specified when query returns more than " "three columns\n" msgstr "" -"\\crosstabview: 처리할 칼럼이 세개보다 많을 때는 자료로 사용할 칼럼을 지정해야 함\n" +"\\crosstabview: 처리할 칼럼이 세개보다 많을 때는 자료로 사용할 칼럼을 지정해" +"야 함\n" -#: crosstabview.c:230 +#: crosstabview.c:228 #, c-format msgid "\\crosstabview: maximum number of columns (%d) exceeded\n" msgstr "\\crosstabview: 최대 칼럼 수 (%d) 초과\n" -#: crosstabview.c:398 +#: crosstabview.c:397 #, c-format msgid "" "\\crosstabview: query result contains multiple data values for row \"%s\", " @@ -877,958 +919,1015 @@ msgid "" msgstr "" "\\crosstabview: \"%s\" 로우, \"%s\" 칼럼에 대해 쿼리 결과는 다중값이어야 함\n" -#: crosstabview.c:646 +#: crosstabview.c:645 #, c-format msgid "\\crosstabview: column number %d is out of range 1..%d\n" msgstr "\\crosstabview: %d 번째 열은 0..%d 범위를 벗어났음\n" -#: crosstabview.c:671 +#: crosstabview.c:670 #, c-format msgid "\\crosstabview: ambiguous column name: \"%s\"\n" msgstr "\\crosstabview: 칼럼 이름이 중복되었음: \"%s\"\n" -#: crosstabview.c:679 +#: crosstabview.c:678 #, c-format msgid "\\crosstabview: column name not found: \"%s\"\n" msgstr "\\crosstabview: 칼럼 이름 없음: \"%s\"\n" -#: describe.c:71 describe.c:340 describe.c:597 describe.c:727 describe.c:870 -#: describe.c:990 describe.c:1060 describe.c:3035 describe.c:3240 -#: describe.c:3330 describe.c:3578 describe.c:3718 describe.c:3950 -#: describe.c:4025 describe.c:4036 describe.c:4098 describe.c:4518 -#: describe.c:4601 +#: describe.c:74 describe.c:346 describe.c:603 describe.c:735 describe.c:879 +#: describe.c:1040 describe.c:1112 describe.c:3342 describe.c:3554 +#: describe.c:3645 describe.c:3893 describe.c:4038 describe.c:4279 +#: describe.c:4354 describe.c:4365 describe.c:4427 describe.c:4852 +#: describe.c:4935 msgid "Schema" msgstr "스키마" -#: describe.c:72 describe.c:160 describe.c:226 describe.c:234 describe.c:341 -#: describe.c:598 describe.c:728 describe.c:789 describe.c:871 describe.c:1061 -#: describe.c:3036 describe.c:3162 describe.c:3241 describe.c:3331 -#: describe.c:3410 describe.c:3579 describe.c:3643 describe.c:3719 -#: describe.c:3951 describe.c:4026 describe.c:4037 describe.c:4099 -#: describe.c:4291 describe.c:4375 describe.c:4599 +#: describe.c:75 describe.c:164 describe.c:231 describe.c:239 describe.c:347 +#: describe.c:604 describe.c:736 describe.c:797 describe.c:880 describe.c:1113 +#: describe.c:3343 describe.c:3477 describe.c:3555 describe.c:3646 +#: describe.c:3725 describe.c:3894 describe.c:3963 describe.c:4039 +#: describe.c:4280 describe.c:4355 describe.c:4366 describe.c:4428 +#: describe.c:4625 describe.c:4709 describe.c:4933 describe.c:5105 +#: describe.c:5312 msgid "Name" msgstr "이름" -#: describe.c:73 describe.c:353 describe.c:399 describe.c:416 +#: describe.c:76 describe.c:359 describe.c:405 describe.c:422 msgid "Result data type" msgstr "반환 자료형" -#: describe.c:81 describe.c:94 describe.c:98 describe.c:354 describe.c:400 -#: describe.c:417 +#: describe.c:84 describe.c:97 describe.c:101 describe.c:360 describe.c:406 +#: describe.c:423 msgid "Argument data types" msgstr "인자 자료형" -#: describe.c:105 describe.c:170 describe.c:257 describe.c:462 describe.c:646 -#: describe.c:743 describe.c:814 describe.c:1063 describe.c:1676 -#: describe.c:2836 describe.c:3069 describe.c:3193 describe.c:3267 -#: describe.c:3340 describe.c:3423 describe.c:3491 describe.c:3586 -#: describe.c:3652 describe.c:3720 describe.c:3856 describe.c:3896 -#: describe.c:3967 describe.c:4029 describe.c:4038 describe.c:4100 -#: describe.c:4317 describe.c:4397 describe.c:4532 describe.c:4602 +#: describe.c:108 describe.c:174 describe.c:262 describe.c:468 describe.c:652 +#: describe.c:751 describe.c:822 describe.c:1115 describe.c:1756 +#: describe.c:3132 describe.c:3377 describe.c:3508 describe.c:3582 +#: describe.c:3655 describe.c:3738 describe.c:3806 describe.c:3906 +#: describe.c:3972 describe.c:4040 describe.c:4181 describe.c:4223 +#: describe.c:4296 describe.c:4358 describe.c:4367 describe.c:4429 +#: describe.c:4651 describe.c:4731 describe.c:4866 describe.c:4936 #: large_obj.c:289 large_obj.c:299 msgid "Description" msgstr "설명" -#: describe.c:123 +#: describe.c:126 msgid "List of aggregate functions" msgstr "통계 함수 목록" -#: describe.c:147 +#: describe.c:151 #, c-format msgid "The server (version %s) does not support access methods.\n" msgstr "서버(%s 버전)에서 접근 방법을 지원하지 않습니다.\n" -#: describe.c:161 +#: describe.c:165 msgid "Index" msgstr "인덱스" -#: describe.c:162 describe.c:360 describe.c:405 describe.c:422 describe.c:877 -#: describe.c:999 describe.c:1645 describe.c:3044 describe.c:3242 -#: describe.c:4394 +#: describe.c:166 describe.c:366 describe.c:411 describe.c:428 describe.c:887 +#: describe.c:1051 describe.c:1716 describe.c:3352 describe.c:3556 +#: describe.c:4728 msgid "Type" msgstr "종류" -#: describe.c:169 describe.c:4296 +#: describe.c:173 describe.c:4630 msgid "Handler" msgstr "핸들러" -#: describe.c:188 +#: describe.c:192 msgid "List of access methods" msgstr "접근 방법 목록" -#: describe.c:213 +#: describe.c:218 #, c-format msgid "The server (version %s) does not support tablespaces.\n" msgstr "서버(%s 버전)에서 테이블스페이스를 지원하지 않습니다.\n" -#: describe.c:227 describe.c:235 describe.c:450 describe.c:636 describe.c:790 -#: describe.c:989 describe.c:3045 describe.c:3166 describe.c:3412 -#: describe.c:3644 describe.c:4292 describe.c:4376 large_obj.c:288 +#: describe.c:232 describe.c:240 describe.c:456 describe.c:642 describe.c:798 +#: describe.c:1039 describe.c:3353 describe.c:3481 describe.c:3727 +#: describe.c:3964 describe.c:4626 describe.c:4710 describe.c:5106 +#: describe.c:5218 describe.c:5313 large_obj.c:288 msgid "Owner" msgstr "소유주" -#: describe.c:228 describe.c:236 +#: describe.c:233 describe.c:241 msgid "Location" msgstr "위치" -#: describe.c:247 describe.c:2647 +#: describe.c:252 describe.c:2944 msgid "Options" msgstr "옵션" -#: describe.c:252 describe.c:609 describe.c:806 describe.c:3061 -#: describe.c:3065 +#: describe.c:257 describe.c:615 describe.c:814 describe.c:3369 +#: describe.c:3373 msgid "Size" msgstr "크기" -#: describe.c:274 +#: describe.c:279 msgid "List of tablespaces" msgstr "테이블스페이스 목록" -#: describe.c:314 +#: describe.c:320 #, c-format msgid "\\df only takes [antwS+] as options\n" msgstr "\\df는 [antwS+]만 옵션으로 사용함\n" -#: describe.c:322 +#: describe.c:328 #, c-format msgid "\\df does not take a \"w\" option with server version %s\n" msgstr "\\df 명령은 %s 버전 서버에서는 \"w\" 옵션을 사용하지 않음\n" #. translator: "agg" is short for "aggregate" -#: describe.c:356 describe.c:402 describe.c:419 +#: describe.c:362 describe.c:408 describe.c:425 msgid "agg" msgstr "집계" -#: describe.c:357 +#: describe.c:363 msgid "window" msgstr "창" -#: describe.c:358 describe.c:403 describe.c:420 describe.c:1197 +#: describe.c:364 describe.c:409 describe.c:426 describe.c:1249 msgid "trigger" msgstr "트리거" -#: describe.c:359 describe.c:404 describe.c:421 +#: describe.c:365 describe.c:410 describe.c:427 msgid "normal" msgstr "일반" -#: describe.c:432 +#: describe.c:438 msgid "immutable" msgstr "immutable" -#: describe.c:433 +#: describe.c:439 msgid "stable" msgstr "stable" -#: describe.c:434 +#: describe.c:440 msgid "volatile" msgstr "volatile" -#: describe.c:435 +#: describe.c:441 msgid "Volatility" msgstr "휘발성" -#: describe.c:443 +#: describe.c:449 msgid "restricted" msgstr "엄격함" -#: describe.c:444 +#: describe.c:450 msgid "safe" msgstr "safe" -#: describe.c:445 +#: describe.c:451 msgid "unsafe" msgstr "unsafe" -#: describe.c:446 +#: describe.c:452 msgid "Parallel" msgstr "병렬처리" -#: describe.c:451 +#: describe.c:457 msgid "definer" msgstr "definer" -#: describe.c:452 +#: describe.c:458 msgid "invoker" msgstr "invoker" -#: describe.c:453 +#: describe.c:459 msgid "Security" msgstr "보안" -#: describe.c:460 +#: describe.c:466 msgid "Language" msgstr "언어" -#: describe.c:461 +#: describe.c:467 msgid "Source code" msgstr "소스 코드" -#: describe.c:560 +#: describe.c:566 msgid "List of functions" msgstr "함수 목록" -#: describe.c:608 +#: describe.c:614 msgid "Internal name" msgstr "내부 이름" -#: describe.c:630 +#: describe.c:636 msgid "Elements" msgstr "요소" -#: describe.c:686 +#: describe.c:693 msgid "List of data types" msgstr "자료형 목록" -#: describe.c:729 +#: describe.c:737 msgid "Left arg type" msgstr "왼쪽 인수 자료형" -#: describe.c:730 +#: describe.c:738 msgid "Right arg type" msgstr "오른쪽 인수 자료형" -#: describe.c:731 +#: describe.c:739 msgid "Result type" msgstr "반환 자료형" -#: describe.c:736 describe.c:3482 describe.c:3855 +#: describe.c:744 describe.c:3797 describe.c:4180 msgid "Function" msgstr "함수" -#: describe.c:761 +#: describe.c:769 msgid "List of operators" msgstr "연산자 목록" -#: describe.c:791 +#: describe.c:799 msgid "Encoding" msgstr "인코딩" -#: describe.c:796 describe.c:3580 +#: describe.c:804 describe.c:3895 msgid "Collate" msgstr "Collate" -#: describe.c:797 describe.c:3581 +#: describe.c:805 describe.c:3896 msgid "Ctype" msgstr "Ctype" -#: describe.c:810 +#: describe.c:818 msgid "Tablespace" msgstr "테이블스페이스" -#: describe.c:832 +#: describe.c:840 msgid "List of databases" msgstr "데이터베이스 목록" -#: describe.c:872 describe.c:992 describe.c:3037 +#: describe.c:881 describe.c:886 describe.c:1042 describe.c:3344 +#: describe.c:3351 msgid "table" msgstr "테이블" -#: describe.c:873 describe.c:3038 +#: describe.c:882 describe.c:3345 msgid "view" msgstr "뷰(view)" -#: describe.c:874 describe.c:3039 +#: describe.c:883 describe.c:3346 msgid "materialized view" msgstr "구체화된 뷰" -#: describe.c:875 describe.c:994 describe.c:3041 +#: describe.c:884 describe.c:1044 describe.c:3348 msgid "sequence" msgstr "시퀀스" -#: describe.c:876 describe.c:3043 +#: describe.c:885 describe.c:3350 msgid "foreign table" msgstr "외부 테이블" -#: describe.c:888 +#: describe.c:898 msgid "Column privileges" msgstr "칼럼 접근권한" -#: describe.c:919 +#: describe.c:929 describe.c:963 msgid "Policies" msgstr "정책" -#: describe.c:945 describe.c:4749 describe.c:4753 +#: describe.c:995 describe.c:5369 describe.c:5373 msgid "Access privileges" msgstr "액세스 권한" -#: describe.c:976 +#: describe.c:1026 #, c-format msgid "The server (version %s) does not support altering default privileges.\n" msgstr "이 서버(%s 버전)는 기본 접근권한 변경 기능을 제공하지 않습니다.\n" -#: describe.c:996 +#: describe.c:1046 msgid "function" msgstr "함수" -#: describe.c:998 +#: describe.c:1048 msgid "type" msgstr "type" -#: describe.c:1022 +#: describe.c:1050 +msgid "schema" +msgstr "스키마" + +#: describe.c:1074 msgid "Default access privileges" msgstr "기본 접근권한" -#: describe.c:1062 +#: describe.c:1114 msgid "Object" msgstr "개체" -#: describe.c:1076 +#: describe.c:1128 msgid "table constraint" msgstr "테이블 제약 조건" -#: describe.c:1098 +#: describe.c:1150 msgid "domain constraint" msgstr "도메인 제약조건" -#: describe.c:1126 +#: describe.c:1178 msgid "operator class" msgstr "연산자 클래스" -#: describe.c:1155 +#: describe.c:1207 msgid "operator family" msgstr "연산자 부류" -#: describe.c:1177 +#: describe.c:1229 msgid "rule" msgstr "룰(rule)" -#: describe.c:1219 +#: describe.c:1271 msgid "Object descriptions" msgstr "개체 설명" -#: describe.c:1273 +#: describe.c:1327 describe.c:3440 #, c-format msgid "Did not find any relation named \"%s\".\n" msgstr "\"%s\" 이름을 릴레이션(relation) 없음.\n" -#: describe.c:1483 +#: describe.c:1330 describe.c:3443 +#, c-format +msgid "Did not find any relations.\n" +msgstr "관련 릴레이션 찾을 수 없음.\n" + +#: describe.c:1539 #, c-format msgid "Did not find any relation with OID %s.\n" msgstr "%s oid의 어떤 릴레이션(relation)도 찾을 수 없음.\n" -#: describe.c:1589 +#: describe.c:1652 describe.c:1701 #, c-format msgid "Unlogged table \"%s.%s\"" msgstr "로그 미사용 테이블 \"%s.%s\"" -#: describe.c:1592 +#: describe.c:1655 describe.c:1704 #, c-format msgid "Table \"%s.%s\"" msgstr "\"%s.%s\" 테이블" -#: describe.c:1596 +#: describe.c:1659 #, c-format msgid "View \"%s.%s\"" msgstr "\"%s.%s\" 뷰(view)" -#: describe.c:1601 +#: describe.c:1664 #, c-format msgid "Unlogged materialized view \"%s.%s\"" msgstr "트랜잭션 로그를 남기지 않은 구체화된 뷰 \"%s.%s\"" -#: describe.c:1604 +#: describe.c:1667 #, c-format msgid "Materialized view \"%s.%s\"" msgstr "Materialized 뷰 \"%s.%s\"" -#: describe.c:1608 +#: describe.c:1671 #, c-format msgid "Sequence \"%s.%s\"" msgstr "\"%s.%s\" 시퀀스" -#: describe.c:1613 +#: describe.c:1676 #, c-format msgid "Unlogged index \"%s.%s\"" msgstr "\"%s.%s\" 로그 미사용 인덱스" -#: describe.c:1616 +#: describe.c:1679 #, c-format msgid "Index \"%s.%s\"" msgstr "\"%s.%s\" 인덱스" -#: describe.c:1621 +#: describe.c:1684 #, c-format msgid "Special relation \"%s.%s\"" msgstr "\"%s.%s\" 특수 릴레이션(relation)" -#: describe.c:1625 +#: describe.c:1688 #, c-format msgid "TOAST table \"%s.%s\"" msgstr "\"%s.%s\" TOAST 테이블" -#: describe.c:1629 +#: describe.c:1692 #, c-format msgid "Composite type \"%s.%s\"" msgstr "\"%s.%s\" 복합자료형" -#: describe.c:1633 +#: describe.c:1696 #, c-format msgid "Foreign table \"%s.%s\"" msgstr "\"%s.%s\" 외부 테이블" -#: describe.c:1644 +#: describe.c:1715 msgid "Column" msgstr "필드명" -#: describe.c:1653 -msgid "Modifiers" -msgstr "기타 조건" +#: describe.c:1726 describe.c:3562 +msgid "Collation" +msgstr "Collation" + +#: describe.c:1727 describe.c:3569 +msgid "Nullable" +msgstr "NULL허용" -#: describe.c:1658 +#: describe.c:1728 describe.c:3570 +msgid "Default" +msgstr "초기값" + +#: describe.c:1733 msgid "Value" msgstr "값" -#: describe.c:1661 +#: describe.c:1736 msgid "Definition" msgstr "정의" -#: describe.c:1664 describe.c:4312 describe.c:4396 describe.c:4467 -#: describe.c:4531 -msgid "FDW Options" +#: describe.c:1739 describe.c:4646 describe.c:4730 describe.c:4801 +#: describe.c:4865 +msgid "FDW options" msgstr "FDW 옵션" -#: describe.c:1668 +#: describe.c:1743 msgid "Storage" msgstr "스토리지" -#: describe.c:1671 +#: describe.c:1748 msgid "Stats target" msgstr "통계수집량" -#: describe.c:1721 +#: describe.c:1897 #, c-format -msgid "collate %s" -msgstr "collate %s" +msgid "Partition of: %s %s" +msgstr "소속 파티션: %s %s" -#: describe.c:1729 -msgid "not null" -msgstr "Null 아님" +#: describe.c:1903 +#, c-format +msgid "Partition constraint: %s" +msgstr "파티션 제약조건: %s" -#. translator: default values of column definitions -#: describe.c:1739 +#: describe.c:1926 #, c-format -msgid "default %s" -msgstr "초기값 %s" +msgid "Partition key: %s" +msgstr "파티션 키: %s" -#: describe.c:1854 +#: describe.c:1994 msgid "primary key, " msgstr "기본키, " -#: describe.c:1856 +#: describe.c:1996 msgid "unique, " msgstr "고유, " -#: describe.c:1862 +#: describe.c:2002 #, c-format msgid "for table \"%s.%s\"" msgstr "적용테이블: \"%s.%s\"" -#: describe.c:1866 +#: describe.c:2006 #, c-format msgid ", predicate (%s)" msgstr ", predicate (%s)" -#: describe.c:1869 +#: describe.c:2009 msgid ", clustered" msgstr ", 클러스됨" -#: describe.c:1872 +#: describe.c:2012 msgid ", invalid" msgstr ", 잘못됨" -#: describe.c:1875 +#: describe.c:2015 msgid ", deferrable" -msgstr "" +msgstr ", 지연가능" -#: describe.c:1878 +#: describe.c:2018 msgid ", initially deferred" -msgstr "" +msgstr ", 트랜잭션단위지연" -#: describe.c:1881 +#: describe.c:2021 msgid ", replica identity" -msgstr "" +msgstr ", 복제 식별자" -#: describe.c:1916 +#: describe.c:2060 #, c-format msgid "Owned by: %s" msgstr "소유주: %s" -#: describe.c:1976 +#: describe.c:2065 +#, c-format +msgid "Sequence for identity column: %s" +msgstr "식별 칼럼용 시퀀스: %s" + +#: describe.c:2129 msgid "Indexes:" msgstr "인덱스들:" -#: describe.c:2060 +#: describe.c:2213 msgid "Check constraints:" msgstr "체크 제약 조건:" -#: describe.c:2091 +#: describe.c:2244 msgid "Foreign-key constraints:" msgstr "참조키 제약 조건:" -#: describe.c:2122 +#: describe.c:2275 msgid "Referenced by:" msgstr "다음에서 참조됨:" -#: describe.c:2167 +#: describe.c:2325 msgid "Policies:" -msgstr "" +msgstr "정책:" -#: describe.c:2170 +#: describe.c:2328 msgid "Policies (forced row security enabled):" -msgstr "" +msgstr "정책 (로우단위 보안정책 강제 활성화):" -#: describe.c:2173 +#: describe.c:2331 msgid "Policies (row security enabled): (none)" -msgstr "" +msgstr "정책 (로우단위 보안정책 활성화): (없음)" -#: describe.c:2176 +#: describe.c:2334 msgid "Policies (forced row security enabled): (none)" -msgstr "" +msgstr "정책 (로우단위 보안정책 강제 활성화): (없음)" -#: describe.c:2179 +#: describe.c:2337 msgid "Policies (row security disabled):" -msgstr "" +msgstr "정책 (로우단위 보안정책 비활성화):" + +#: describe.c:2399 +msgid "Statistics objects:" +msgstr "통계정보 객체:" -#: describe.c:2279 describe.c:2329 +#: describe.c:2502 describe.c:2587 msgid "Rules:" msgstr "룰(rule)들:" -#: describe.c:2282 +#: describe.c:2505 msgid "Disabled rules:" msgstr "사용중지된 규칙:" -#: describe.c:2285 +#: describe.c:2508 msgid "Rules firing always:" msgstr "항상 발생하는 규칙:" -#: describe.c:2288 +#: describe.c:2511 msgid "Rules firing on replica only:" msgstr "복제본에서만 발생하는 규칙:" -#: describe.c:2312 +#: describe.c:2551 +msgid "Publications:" +msgstr "발행자:" + +#: describe.c:2570 msgid "View definition:" msgstr "뷰 정의:" -#: describe.c:2447 +#: describe.c:2705 msgid "Triggers:" msgstr "트리거들:" -#: describe.c:2451 +#: describe.c:2709 msgid "Disabled user triggers:" msgstr "사용중지된 사용자 트리거:" -#: describe.c:2453 +#: describe.c:2711 msgid "Disabled triggers:" msgstr "사용중지된 트리거:" -#: describe.c:2456 +#: describe.c:2714 msgid "Disabled internal triggers:" msgstr "사용중지된 내부 트리거:" -#: describe.c:2459 +#: describe.c:2717 msgid "Triggers firing always:" msgstr "항상 발생하는 트리거:" -#: describe.c:2462 +#: describe.c:2720 msgid "Triggers firing on replica only:" msgstr "복제본에서만 발생하는 트리거:" -#: describe.c:2541 +#: describe.c:2779 +#, c-format +msgid "Server: %s" +msgstr "서버: %s" + +#: describe.c:2787 +#, c-format +msgid "FDW options: (%s)" +msgstr "FDW 옵션들: (%s)" + +#: describe.c:2806 msgid "Inherits" msgstr "상속" -#: describe.c:2580 +#: describe.c:2860 #, c-format msgid "Number of child tables: %d (Use \\d+ to list them.)" msgstr "하위 테이블 수: %d (\\d+ 명령으로 볼 수 있음)" -#: describe.c:2587 +#: describe.c:2862 +#, c-format +msgid "Number of partitions: %d (Use \\d+ to list them.)" +msgstr "파티션 테이블 수: %d (\\d+ 명령으로 볼 수 있음)" + +#: describe.c:2870 msgid "Child tables" msgstr "하위 테이블" -#: describe.c:2609 +#: describe.c:2870 +msgid "Partitions" +msgstr "파티션들" + +#: describe.c:2904 #, c-format msgid "Typed table of type: %s" -msgstr "" +msgstr "자료형의 typed 테이블: %s" -#: describe.c:2623 +#: describe.c:2920 msgid "Replica Identity" -msgstr "" +msgstr "복제 식별자" -#: describe.c:2636 +#: describe.c:2933 msgid "Has OIDs: yes" msgstr "OID 사용: yes" -#: describe.c:2724 +#: describe.c:3020 #, c-format msgid "Tablespace: \"%s\"" msgstr "테이블스페이스: \"%s\"" #. translator: before this string there's an index description like #. '"foo_pkey" PRIMARY KEY, btree (a)' -#: describe.c:2736 +#: describe.c:3032 #, c-format msgid ", tablespace \"%s\"" msgstr ", \"%s\" 테이블스페이스" -#: describe.c:2829 +#: describe.c:3125 msgid "List of roles" msgstr "롤 목록" -#: describe.c:2831 +#: describe.c:3127 msgid "Role name" msgstr "롤 이름" -#: describe.c:2832 +#: describe.c:3128 msgid "Attributes" msgstr "속성" -#: describe.c:2833 +#: describe.c:3129 msgid "Member of" msgstr "소속 그룹:" -#: describe.c:2844 +#: describe.c:3140 msgid "Superuser" msgstr "슈퍼유저" -#: describe.c:2847 +#: describe.c:3143 msgid "No inheritance" msgstr "상속 없음" -#: describe.c:2850 +#: describe.c:3146 msgid "Create role" msgstr "롤 만들기" -#: describe.c:2853 +#: describe.c:3149 msgid "Create DB" msgstr "DB 만들기" -#: describe.c:2856 +#: describe.c:3152 msgid "Cannot login" msgstr "로그인할 수 없음" -#: describe.c:2860 +#: describe.c:3156 msgid "Replication" msgstr "복제" -#: describe.c:2864 +#: describe.c:3160 msgid "Bypass RLS" -msgstr "" +msgstr "RLS 통과" -#: describe.c:2873 +#: describe.c:3169 msgid "No connections" msgstr "연결 없음" -#: describe.c:2875 +#: describe.c:3171 #, c-format msgid "%d connection" msgid_plural "%d connections" msgstr[0] "%d개 연결" -#: describe.c:2885 +#: describe.c:3181 msgid "Password valid until " -msgstr "" +msgstr "비밀번호 만료기한: " -#: describe.c:2941 +#: describe.c:3231 +#, c-format +msgid "The server (version %s) does not support per-database role settings.\n" +msgstr "이 서버(%s 버전)는 데이터베이스 개별 롤 설정을 지원하지 않습니다.\n" + +#: describe.c:3244 msgid "Role" msgstr "롤" -#: describe.c:2942 +#: describe.c:3245 msgid "Database" msgstr "데이터베이스" -#: describe.c:2943 +#: describe.c:3246 msgid "Settings" msgstr "설정" -#: describe.c:2953 +#: describe.c:3267 #, c-format -msgid "No per-database role settings support in this server version.\n" -msgstr "이 버전의 서버는 데이터베이스 단위 롤 설정이 안됩니다.\n" +msgid "Did not find any settings for role \"%s\" and database \"%s\".\n" +msgstr "\"%s\" 롤과 \"%s\" 데이터베이스에 대한 특정 설정이 없습니다.\n" -#: describe.c:2964 +#: describe.c:3270 #, c-format -msgid "No matching settings found.\n" -msgstr "찾는 설정이 없습니다.\n" +msgid "Did not find any settings for role \"%s\".\n" +msgstr "\"%s\" 롤용 특정 설정이 없음.\n" -#: describe.c:2966 +#: describe.c:3273 #, c-format -msgid "No settings found.\n" -msgstr "설정 없음.\n" +msgid "Did not find any settings.\n" +msgstr "추가 설정 없음.\n" -#: describe.c:2971 +#: describe.c:3278 msgid "List of settings" msgstr "설정 목록" -#: describe.c:3040 +#: describe.c:3347 msgid "index" msgstr "인덱스" -#: describe.c:3042 +#: describe.c:3349 msgid "special" msgstr "특수" -#: describe.c:3050 describe.c:4519 +#: describe.c:3358 describe.c:4853 msgid "Table" msgstr "테이블" -#: describe.c:3126 -#, c-format -msgid "No matching relations found.\n" -msgstr "검색조건에 일치하는 릴레이션(relation) 없음.\n" - -#: describe.c:3128 -#, c-format -msgid "No relations found.\n" -msgstr "릴레이션(relation) 없음.\n" - -#: describe.c:3133 +#: describe.c:3448 msgid "List of relations" msgstr "릴레이션(relation) 목록" -#: describe.c:3170 +#: describe.c:3485 msgid "Trusted" msgstr "신뢰됨" -#: describe.c:3178 -msgid "Internal Language" +#: describe.c:3493 +msgid "Internal language" msgstr "내부 언어" -#: describe.c:3179 -msgid "Call Handler" +#: describe.c:3494 +msgid "Call handler" msgstr "호출 핸들러" -#: describe.c:3180 describe.c:4299 +#: describe.c:3495 describe.c:4633 msgid "Validator" msgstr "유효성 검사기" -#: describe.c:3183 -msgid "Inline Handler" +#: describe.c:3498 +msgid "Inline handler" msgstr "인라인 핸들러" -#: describe.c:3211 +#: describe.c:3526 msgid "List of languages" msgstr "언어 목록" -#: describe.c:3255 -msgid "Modifier" -msgstr "기타 조건" - -#: describe.c:3256 +#: describe.c:3571 msgid "Check" msgstr "체크" -#: describe.c:3298 +#: describe.c:3613 msgid "List of domains" msgstr "도메인(domain) 목록" -#: describe.c:3332 +#: describe.c:3647 msgid "Source" msgstr "소스" -#: describe.c:3333 +#: describe.c:3648 msgid "Destination" msgstr "설명" -#: describe.c:3334 describe.c:3483 +#: describe.c:3649 describe.c:3798 msgid "no" msgstr "아니오" -#: describe.c:3334 describe.c:3485 +#: describe.c:3649 describe.c:3800 msgid "yes" msgstr "예" -#: describe.c:3335 +#: describe.c:3650 msgid "Default?" msgstr "초기값?" -#: describe.c:3372 +#: describe.c:3687 msgid "List of conversions" msgstr "문자코드변환규칙(conversion) 목록" -#: describe.c:3411 +#: describe.c:3726 msgid "Event" msgstr "이벤트" -#: describe.c:3413 +#: describe.c:3728 msgid "enabled" msgstr "활성화" -#: describe.c:3414 +#: describe.c:3729 msgid "replica" -msgstr "" +msgstr "replica" -#: describe.c:3415 +#: describe.c:3730 msgid "always" -msgstr "" +msgstr "항상" -#: describe.c:3416 +#: describe.c:3731 msgid "disabled" msgstr "비활성화" -#: describe.c:3417 +#: describe.c:3732 describe.c:5314 msgid "Enabled" msgstr "활성화" -#: describe.c:3418 +#: describe.c:3733 msgid "Procedure" msgstr "프로시져" -#: describe.c:3419 +#: describe.c:3734 msgid "Tags" -msgstr "" +msgstr "태그" -#: describe.c:3438 +#: describe.c:3753 msgid "List of event triggers" msgstr "이벤트 트리거 목록" -#: describe.c:3480 +#: describe.c:3795 msgid "Source type" msgstr "Source 자료형" -#: describe.c:3481 +#: describe.c:3796 msgid "Target type" msgstr "Target 자료형" -#: describe.c:3484 +#: describe.c:3799 msgid "in assignment" msgstr "in assignment" -#: describe.c:3486 +#: describe.c:3801 msgid "Implicit?" msgstr "Implicit?" -#: describe.c:3537 +#: describe.c:3852 msgid "List of casts" msgstr "형변환자 목록" -#: describe.c:3565 +#: describe.c:3880 #, c-format msgid "The server (version %s) does not support collations.\n" msgstr "이 서버(%s 버전)는 문자 정렬(collation) 기능을 지원하지 않습니다.\n" -#: describe.c:3616 +#: describe.c:3901 +msgid "Provider" +msgstr "제공자" + +#: describe.c:3936 msgid "List of collations" msgstr "문자 정렬 목록" -#: describe.c:3675 +#: describe.c:3995 msgid "List of schemas" msgstr "스키마(schema) 목록" -#: describe.c:3700 describe.c:3938 describe.c:4009 describe.c:4080 +#: describe.c:4020 describe.c:4267 describe.c:4338 describe.c:4409 #, c-format msgid "The server (version %s) does not support full text search.\n" msgstr "이 서버(%s 버전)에서 전문 검색을 지원하지 않습니다.\n" -#: describe.c:3735 +#: describe.c:4055 msgid "List of text search parsers" msgstr "텍스트 검색 파서 목록" -#: describe.c:3778 +#: describe.c:4100 #, c-format msgid "Did not find any text search parser named \"%s\".\n" msgstr "\"%s\"(이)라는 텍스트 검색 파서를 찾지 못했습니다.\n" -#: describe.c:3853 +#: describe.c:4103 +#, c-format +msgid "Did not find any text search parsers.\n" +msgstr "특정 텍스트 검색 파서를 찾지 못했습니다.\n" + +#: describe.c:4178 msgid "Start parse" msgstr "구문 분석 시작" -#: describe.c:3854 +#: describe.c:4179 msgid "Method" msgstr "방법" -#: describe.c:3858 +#: describe.c:4183 msgid "Get next token" msgstr "다음 토큰 가져오기" -#: describe.c:3860 +#: describe.c:4185 msgid "End parse" msgstr "구문 분석 종료" -#: describe.c:3862 +#: describe.c:4187 msgid "Get headline" msgstr "헤드라인 가져오기" -#: describe.c:3864 +#: describe.c:4189 msgid "Get token types" msgstr "토큰 형식 가져오기" -#: describe.c:3874 +#: describe.c:4200 #, c-format msgid "Text search parser \"%s.%s\"" msgstr "\"%s.%s\" 텍스트 검색 파서" -#: describe.c:3876 +#: describe.c:4203 #, c-format msgid "Text search parser \"%s\"" msgstr "\"%s\" 텍스트 검색 파서" -#: describe.c:3895 +#: describe.c:4222 msgid "Token name" msgstr "토큰 이름" -#: describe.c:3906 +#: describe.c:4233 #, c-format msgid "Token types for parser \"%s.%s\"" msgstr "\"%s.%s\" 파서의 토큰 형식" -#: describe.c:3908 +#: describe.c:4236 #, c-format msgid "Token types for parser \"%s\"" msgstr "\"%s\" 파서의 토큰 형식" -#: describe.c:3961 +#: describe.c:4290 msgid "Template" msgstr "템플릿" -#: describe.c:3962 +#: describe.c:4291 msgid "Init options" msgstr "초기화 옵션" -#: describe.c:3984 +#: describe.c:4313 msgid "List of text search dictionaries" msgstr "텍스트 검색 사전 목록" -#: describe.c:4027 +#: describe.c:4356 msgid "Init" msgstr "초기화" -#: describe.c:4028 +#: describe.c:4357 msgid "Lexize" msgstr "Lexize" -#: describe.c:4055 +#: describe.c:4384 msgid "List of text search templates" msgstr "텍스트 검색 템플릿 목록" -#: describe.c:4115 +#: describe.c:4444 msgid "List of text search configurations" msgstr "텍스트 검색 구성 목록" -#: describe.c:4159 +#: describe.c:4490 #, c-format msgid "Did not find any text search configuration named \"%s\".\n" msgstr "\"%s\"(이)라는 텍스트 검색 구성을 찾지 못했습니다.\n" -#: describe.c:4225 +#: describe.c:4493 +#, c-format +msgid "Did not find any text search configurations.\n" +msgstr "특정 텍스트 검색 구성을 찾지 못했습니다.\n" + +#: describe.c:4559 msgid "Token" msgstr "토큰" -#: describe.c:4226 +#: describe.c:4560 msgid "Dictionaries" msgstr "사전" -#: describe.c:4237 +#: describe.c:4571 #, c-format msgid "Text search configuration \"%s.%s\"" msgstr "텍스트 검색 구성 \"%s.%s\"" -#: describe.c:4240 +#: describe.c:4574 #, c-format msgid "Text search configuration \"%s\"" msgstr "텍스트 검색 구성 \"%s\"" -#: describe.c:4244 +#: describe.c:4578 #, c-format msgid "" "\n" @@ -1837,7 +1936,7 @@ msgstr "" "\n" "파서: \"%s.%s\"" -#: describe.c:4247 +#: describe.c:4581 #, c-format msgid "" "\n" @@ -1846,92 +1945,157 @@ msgstr "" "\n" "파서: \"%s\"" -#: describe.c:4281 +#: describe.c:4615 #, c-format msgid "The server (version %s) does not support foreign-data wrappers.\n" msgstr "이 서버(%s 버전)에서 외부 데이터 래퍼를 지원하지 않습니다.\n" -#: describe.c:4339 +#: describe.c:4673 msgid "List of foreign-data wrappers" msgstr "외부 데이터 래퍼 목록" -#: describe.c:4364 +#: describe.c:4698 #, c-format msgid "The server (version %s) does not support foreign servers.\n" msgstr "이 서버(%s 버전)에서 외부 서버를 지원하지 않습니다.\n" -#: describe.c:4377 +#: describe.c:4711 msgid "Foreign-data wrapper" msgstr "외부 데이터 래퍼" -#: describe.c:4395 describe.c:4600 +#: describe.c:4729 describe.c:4934 msgid "Version" msgstr "버전" -#: describe.c:4421 +#: describe.c:4755 msgid "List of foreign servers" msgstr "외부 서버 목록" -#: describe.c:4446 +#: describe.c:4780 #, c-format msgid "The server (version %s) does not support user mappings.\n" msgstr "이 서버(%s 버전)에서 사용자 매핑을 지원하지 않습니다.\n" -#: describe.c:4456 describe.c:4520 +#: describe.c:4790 describe.c:4854 msgid "Server" msgstr "서버" -#: describe.c:4457 +#: describe.c:4791 msgid "User name" msgstr "사용자 이름" -#: describe.c:4482 +#: describe.c:4816 msgid "List of user mappings" msgstr "사용자 매핑 목록" -#: describe.c:4507 +#: describe.c:4841 #, c-format msgid "The server (version %s) does not support foreign tables.\n" msgstr "이 서버(%s 버전)에서 외부 테이블을 지원하지 않습니다.\n" -#: describe.c:4560 +#: describe.c:4894 msgid "List of foreign tables" msgstr "외부 테이블 목록" -#: describe.c:4585 describe.c:4642 +#: describe.c:4919 describe.c:4976 #, c-format msgid "The server (version %s) does not support extensions.\n" msgstr "이 서버(%s 버전)에서 확장기능을 지원하지 않습니다.\n" -#: describe.c:4617 +#: describe.c:4951 msgid "List of installed extensions" msgstr "설치된 확장기능 목록" -#: describe.c:4670 +#: describe.c:5004 #, c-format msgid "Did not find any extension named \"%s\".\n" msgstr "\"%s\" 이름의 확장 기능 모듈을 찾을 수 없습니다.\n" -#: describe.c:4673 +#: describe.c:5007 #, c-format msgid "Did not find any extensions.\n" msgstr "추가할 확장 기능 모듈이 없음.\n" -#: describe.c:4717 -msgid "Object Description" -msgstr "객체 설명" +#: describe.c:5051 +msgid "Object description" +msgstr "개체 설명" -#: describe.c:4726 +#: describe.c:5061 #, c-format msgid "Objects in extension \"%s\"" msgstr "\"%s\" 확장 기능 안에 포함된 객체들" -#: help.c:63 +#: describe.c:5090 describe.c:5156 +#, c-format +msgid "The server (version %s) does not support publications.\n" +msgstr "이 서버(%s 버전)는 논리 복제 발행 기능을 지원하지 않습니다.\n" + +#: describe.c:5107 describe.c:5219 +msgid "All tables" +msgstr "모든 테이블" + +#: describe.c:5108 describe.c:5220 +msgid "Inserts" +msgstr "Inserts" + +#: describe.c:5109 describe.c:5221 +msgid "Updates" +msgstr "Updates" + +#: describe.c:5110 describe.c:5222 +msgid "Deletes" +msgstr "Deletes" + +#: describe.c:5127 +msgid "List of publications" +msgstr "발행 목록" + +#: describe.c:5188 +#, c-format +msgid "Did not find any publication named \"%s\".\n" +msgstr "\"%s\" 이름의 발행 없음.\n" + +#: describe.c:5191 +#, c-format +msgid "Did not find any publications.\n" +msgstr "발행 없음.\n" + +#: describe.c:5215 +#, c-format +msgid "Publication %s" +msgstr "%s 발행" + +#: describe.c:5255 +msgid "Tables:" +msgstr "테이블" + +#: describe.c:5299 +#, c-format +msgid "The server (version %s) does not support subscriptions.\n" +msgstr "이 서버(%s 버전)는 구독 기능을 지원하지 않습니다.\n" + +#: describe.c:5315 +msgid "Publication" +msgstr "발행" + +#: describe.c:5322 +msgid "Synchronous commit" +msgstr "동기식 커밋" + +#: describe.c:5323 +msgid "Conninfo" +msgstr "연결정보" + +#: describe.c:5345 +msgid "List of subscriptions" +msgstr "구독 목록" + +#: help.c:62 #, c-format msgid "%s\n" msgstr "%s\n" -#: help.c:74 +#: help.c:73 #, c-format msgid "" "psql is the PostgreSQL interactive terminal.\n" @@ -1940,12 +2104,12 @@ msgstr "" "psql은 PostgreSQL 대화식 터미널입니다.\n" "\n" -#: help.c:75 help.c:333 help.c:367 help.c:394 +#: help.c:74 help.c:344 help.c:383 help.c:410 #, c-format msgid "Usage:\n" msgstr "사용법:\n" -#: help.c:76 +#: help.c:75 #, c-format msgid "" " psql [OPTION]... [DBNAME [USERNAME]]\n" @@ -1954,12 +2118,12 @@ msgstr "" " psql [OPTION]... [DBNAME [USERNAME]]\n" "\n" -#: help.c:78 +#: help.c:77 #, c-format msgid "General options:\n" msgstr "일반 옵션:\n" -#: help.c:83 +#: help.c:82 #, c-format msgid "" " -c, --command=COMMAND run only single command (SQL or internal) and " @@ -1967,24 +2131,24 @@ msgid "" msgstr "" " -c, --command=COMMAND 하나의 명령(SQL 또는 내부 명령)만 실행하고 끝냄\n" -#: help.c:84 +#: help.c:83 #, c-format msgid "" " -d, --dbname=DBNAME database name to connect to (default: \"%s\")\n" msgstr " -d, --dbname=DBNAME 연결할 데이터베이스 이름(기본 값: \"%s\")\n" -#: help.c:85 +#: help.c:84 #, c-format msgid " -f, --file=FILENAME execute commands from file, then exit\n" msgstr " -f, --file=FILENAME 파일 안에 지정한 명령을 실행하고 끝냄\n" -#: help.c:86 +#: help.c:85 #, c-format msgid " -l, --list list available databases, then exit\n" msgstr "" " -l, --list 사용 가능한 데이터베이스 목록을 표시하고 끝냄\n" -#: help.c:87 +#: help.c:86 #, c-format msgid "" " -v, --set=, --variable=NAME=VALUE\n" @@ -1995,17 +2159,17 @@ msgstr "" " psql 변수 NAME을 VALUE로 설정\n" " (예, -v ON_ERROR_STOP=1)\n" -#: help.c:90 +#: help.c:89 #, c-format msgid " -V, --version output version information, then exit\n" msgstr " -V, --version 버전 정보를 보여주고 마침\n" -#: help.c:91 +#: help.c:90 #, c-format msgid " -X, --no-psqlrc do not read startup file (~/.psqlrc)\n" msgstr " -X, --no-psqlrc 시작 파일(~/.psqlrc)을 읽지 않음\n" -#: help.c:92 +#: help.c:91 #, c-format msgid "" " -1 (\"one\"), --single-transaction\n" @@ -2015,25 +2179,24 @@ msgstr "" " -1 (\"one\"), --single-transaction\n" " 명령 파일을 하나의 트랜잭션으로 실행\n" -#: help.c:94 +#: help.c:93 #, c-format msgid " -?, --help[=options] show this help, then exit\n" msgstr " -?, --help[=options] 이 도움말을 표시하고 종료\n" -#: help.c:95 +#: help.c:94 #, c-format msgid " --help=commands list backslash commands, then exit\n" msgstr "" " --help=commands psql 내장명령어(\\문자로 시작하는)를 표시하고 종" "료\n" -#: help.c:96 +#: help.c:95 #, c-format msgid " --help=variables list special variables, then exit\n" -msgstr "" -" --help=variables 특별 변수들 보여주고, 종료\n" +msgstr " --help=variables 특별 변수들 보여주고, 종료\n" -#: help.c:98 +#: help.c:97 #, c-format msgid "" "\n" @@ -2042,63 +2205,63 @@ msgstr "" "\n" "입출력 옵션:\n" -#: help.c:99 +#: help.c:98 #, c-format msgid " -a, --echo-all echo all input from script\n" msgstr " -a, --echo-all 스크립트의 모든 입력 표시\n" -#: help.c:100 +#: help.c:99 #, c-format msgid " -b, --echo-errors echo failed commands\n" msgstr " -b, --echo-errors 실패한 명령들 출력\n" -#: help.c:101 +#: help.c:100 #, c-format msgid " -e, --echo-queries echo commands sent to server\n" msgstr " -e, --echo-queries 서버로 보낸 명령 표시\n" -#: help.c:102 +#: help.c:101 #, c-format msgid "" " -E, --echo-hidden display queries that internal commands generate\n" msgstr " -E, --echo-hidden 내부 명령이 생성하는 쿼리 표시\n" -#: help.c:103 +#: help.c:102 #, c-format msgid " -L, --log-file=FILENAME send session log to file\n" msgstr " -L, --log-file=FILENAME 세션 로그를 파일로 보냄\n" -#: help.c:104 +#: help.c:103 #, c-format msgid "" " -n, --no-readline disable enhanced command line editing (readline)\n" msgstr "" " -n, --no-readline 확장된 명령행 편집 기능을 사용중지함(readline)\n" -#: help.c:105 +#: help.c:104 #, c-format msgid " -o, --output=FILENAME send query results to file (or |pipe)\n" msgstr " -o, --output=FILENAME 쿼리 결과를 파일(또는 |파이프)로 보냄\n" -#: help.c:106 +#: help.c:105 #, c-format msgid "" " -q, --quiet run quietly (no messages, only query output)\n" msgstr " -q, --quiet 자동 실행(메시지 없이 쿼리 결과만 표시)\n" -#: help.c:107 +#: help.c:106 #, c-format msgid " -s, --single-step single-step mode (confirm each query)\n" msgstr " -s, --single-step 단독 순차 모드(각 쿼리 확인)\n" -#: help.c:108 +#: help.c:107 #, c-format msgid "" " -S, --single-line single-line mode (end of line terminates SQL " "command)\n" msgstr " -S, --single-line 한 줄 모드(줄 끝에서 SQL 명령이 종료됨)\n" -#: help.c:110 +#: help.c:109 #, c-format msgid "" "\n" @@ -2107,12 +2270,12 @@ msgstr "" "\n" "출력 형식 옵션:\n" -#: help.c:111 +#: help.c:110 #, c-format msgid " -A, --no-align unaligned table output mode\n" msgstr " -A, --no-align 정렬되지 않은 표 형태의 출력 모드\n" -#: help.c:112 +#: help.c:111 #, c-format msgid "" " -F, --field-separator=STRING\n" @@ -2123,12 +2286,12 @@ msgstr "" " unaligned 출력용 필드 구분자 설정(기본 값: \"%s" "\")\n" -#: help.c:115 +#: help.c:114 #, c-format msgid " -H, --html HTML table output mode\n" msgstr " -H, --html HTML 표 형태 출력 모드\n" -#: help.c:116 +#: help.c:115 #, c-format msgid "" " -P, --pset=VAR[=ARG] set printing option VAR to ARG (see \\pset " @@ -2136,7 +2299,7 @@ msgid "" msgstr "" " -P, --pset=VAR[=ARG] 인쇄 옵션 VAR을 ARG로 설정(\\pset 명령 참조)\n" -#: help.c:117 +#: help.c:116 #, c-format msgid "" " -R, --record-separator=STRING\n" @@ -2147,12 +2310,12 @@ msgstr "" " unaligned 출력용 레코드 구분자 설정\n" " (기본 값: 줄바꿈 문자)\n" -#: help.c:119 +#: help.c:118 #, c-format msgid " -t, --tuples-only print rows only\n" msgstr " -t, --tuples-only 행만 인쇄\n" -#: help.c:120 +#: help.c:119 #, c-format msgid "" " -T, --table-attr=TEXT set HTML table tag attributes (e.g., width, " @@ -2160,12 +2323,12 @@ msgid "" msgstr "" " -T, --table-attr=TEXT HTML table 태그 속성 설정(예: width, border)\n" -#: help.c:121 +#: help.c:120 #, c-format msgid " -x, --expanded turn on expanded table output\n" msgstr " -x, --expanded 확장된 표 형태로 출력\n" -#: help.c:122 +#: help.c:121 #, c-format msgid "" " -z, --field-separator-zero\n" @@ -2175,7 +2338,7 @@ msgstr "" " -z, --field-separator-zero\n" " unaligned 출력용 필드 구분자를 0 바이트로 지정\n" -#: help.c:124 +#: help.c:123 #, c-format msgid "" " -0, --record-separator-zero\n" @@ -2185,7 +2348,7 @@ msgstr "" " -0, --record-separator-zero\n" " unaligned 출력용 레코드 구분자를 0 바이트로 지정\n" -#: help.c:127 +#: help.c:126 #, c-format msgid "" "\n" @@ -2194,7 +2357,7 @@ msgstr "" "\n" "연결 옵션들:\n" -#: help.c:130 +#: help.c:129 #, c-format msgid "" " -h, --host=HOSTNAME database server host or socket directory " @@ -2203,33 +2366,33 @@ msgstr "" " -h, --host=HOSTNAME 데이터베이스 서버 호스트 또는 소켓 디렉터리\n" " (기본값: \"%s\")\n" -#: help.c:131 +#: help.c:130 msgid "local socket" msgstr "로컬 소켓" -#: help.c:134 +#: help.c:133 #, c-format msgid " -p, --port=PORT database server port (default: \"%s\")\n" msgstr " -p, --port=PORT 데이터베이스 서버 포트(기본 값: \"%s\")\n" -#: help.c:140 +#: help.c:139 #, c-format msgid " -U, --username=USERNAME database user name (default: \"%s\")\n" msgstr " -U, --username=USERNAME 데이터베이스 사용자 이름(기본 값: \"%s\")\n" -#: help.c:141 +#: help.c:140 #, c-format msgid " -w, --no-password never prompt for password\n" msgstr " -w, --no-password 암호 프롬프트 표시 안 함\n" -#: help.c:142 +#: help.c:141 #, c-format msgid "" " -W, --password force password prompt (should happen " "automatically)\n" msgstr " -W, --password 암호 입력 프롬프트 보임(자동으로 처리함)\n" -#: help.c:144 +#: help.c:143 #, c-format msgid "" "\n" @@ -2245,22 +2408,29 @@ msgstr "" "설명서에서 psql 섹션을 참조하십시오.\n" "\n" -#: help.c:147 +#: help.c:146 #, c-format msgid "Report bugs to .\n" msgstr "오류보고: .\n" -#: help.c:173 +#: help.c:172 #, c-format msgid "General\n" msgstr "일반\n" -#: help.c:174 +#: help.c:173 #, c-format msgid "" " \\copyright show PostgreSQL usage and distribution terms\n" msgstr " \\copyright PostgreSQL 사용법 및 저작권 정보 표시\n" +#: help.c:174 +#, c-format +msgid "" +" \\crosstabview [COLUMNS] execute query and display results in crosstab\n" +msgstr "" +" \\crosstabview [칼럼들] 쿼리를 실행하고, 피봇 테이블 형태로 자료를 보여줌\n" + #: help.c:175 #, c-format msgid "" @@ -2281,8 +2451,7 @@ msgstr "" msgid "" " \\gexec execute query, then execute each value in its " "result\n" -msgstr "" -" \\gexec 쿼리를 실행하고, 그 결과를 각각 실행 함\n" +msgstr " \\gexec 쿼리를 실행하고, 그 결과를 각각 실행 함\n" #: help.c:178 #, c-format @@ -2292,15 +2461,14 @@ msgstr " \\gset [PREFIX] 쿼리 실행 뒤 그 결과를 psql 변수로 #: help.c:179 #, c-format -msgid " \\q quit psql\n" -msgstr " \\q psql 종료\n" +msgid " \\gx [FILE] as \\g, but forces expanded output mode\n" +msgstr "" +" \\gx [FILE] \\g 명령과 같으나, 출력을 확장 모드로 강제함\n" #: help.c:180 #, c-format -msgid "" -" \\crosstabview [COLUMNS] execute query and display results in crosstab\n" -msgstr "" -" \\crosstabview [칼럼들] 쿼리를 실행하고, 피봇 테이블 형태로 자료를 보여줌\n" +msgid " \\q quit psql\n" +msgstr " \\q psql 종료\n" #: help.c:181 #, c-format @@ -2424,242 +2592,280 @@ msgstr " \\qecho [STRING] 문자열을 쿼리 출력 스트림에 기록 #: help.c:213 #, c-format +msgid "Conditional\n" +msgstr "조건문\n" + +#: help.c:214 +#, c-format +msgid " \\if EXPR begin conditional block\n" +msgstr " \\if EXPR 조건문 시작\n" + +#: help.c:215 +#, c-format +msgid "" +" \\elif EXPR alternative within current conditional block\n" +msgstr " \\elif EXPR else if 구문 시작\n" + +#: help.c:216 +#, c-format +msgid "" +" \\else final alternative within current conditional " +"block\n" +msgstr " \\else 조건문의 그 외 조건\n" + +#: help.c:217 +#, c-format +msgid " \\endif end conditional block\n" +msgstr " \\endif 조건문 끝\n" + +#: help.c:220 +#, c-format msgid "Informational\n" msgstr "정보보기\n" -#: help.c:214 +#: help.c:221 #, c-format msgid " (options: S = show system objects, + = additional detail)\n" msgstr " (옵션: S = 시스템 개체 표시, + = 추가 상세 정보)\n" -#: help.c:215 +#: help.c:222 #, c-format msgid " \\d[S+] list tables, views, and sequences\n" msgstr " \\d[S+] 테이블, 뷰 및 시퀀스 목록\n" -#: help.c:216 +#: help.c:223 #, c-format msgid " \\d[S+] NAME describe table, view, sequence, or index\n" msgstr " \\d[S+] NAME 테이블, 뷰, 시퀀스 또는 인덱스 설명\n" -#: help.c:217 +#: help.c:224 #, c-format msgid " \\da[S] [PATTERN] list aggregates\n" msgstr " \\da[S] [PATTERN] 집계 함수 목록\n" -#: help.c:218 +#: help.c:225 #, c-format msgid " \\dA[+] [PATTERN] list access methods\n" msgstr " \\dA[+] [PATTERN] 접근 방법 목록\n" -#: help.c:219 +#: help.c:226 #, c-format msgid " \\db[+] [PATTERN] list tablespaces\n" msgstr " \\db[+] [PATTERN] 테이블스페이스 목록\n" -#: help.c:220 +#: help.c:227 #, c-format msgid " \\dc[S+] [PATTERN] list conversions\n" msgstr " \\dc[S+] [PATTERN] 문자셋 변환자 목록\n" -#: help.c:221 +#: help.c:228 #, c-format msgid " \\dC[+] [PATTERN] list casts\n" msgstr " \\dC[+] [PATTERN] 자료형 변환자 목록\n" -#: help.c:222 +#: help.c:229 #, c-format msgid "" " \\dd[S] [PATTERN] show object descriptions not displayed elsewhere\n" msgstr "" " \\dd[S] [PATTERN] 다른 곳에서는 볼 수 없는 객체 설명을 보여줌\n" -#: help.c:223 +#: help.c:230 +#, c-format +msgid " \\dD[S+] [PATTERN] list domains\n" +msgstr " \\dD[S+] [PATTERN] 도메인 목록\n" + +#: help.c:231 #, c-format msgid " \\ddp [PATTERN] list default privileges\n" msgstr " \\ddp [PATTERN] 기본 접근권한 목록\n" -#: help.c:224 +#: help.c:232 #, c-format -msgid " \\dD[S+] [PATTERN] list domains\n" -msgstr " \\dD[S+] [PATTERN] 도메인 목록\n" +msgid " \\dE[S+] [PATTERN] list foreign tables\n" +msgstr " \\dE[S+] [PATTERN] 외부 테이블 목록\n" -#: help.c:225 +#: help.c:233 #, c-format msgid " \\det[+] [PATTERN] list foreign tables\n" msgstr " \\det[+] [PATTERN] 외부 테이블 목록\n" -#: help.c:226 +#: help.c:234 #, c-format msgid " \\des[+] [PATTERN] list foreign servers\n" msgstr " \\des[+] [PATTERN] 외부 서버 목록\n" -#: help.c:227 +#: help.c:235 #, c-format msgid " \\deu[+] [PATTERN] list user mappings\n" msgstr " \\deu[+] [PATTERN] 사용자 매핑 목록\n" -#: help.c:228 +#: help.c:236 #, c-format msgid " \\dew[+] [PATTERN] list foreign-data wrappers\n" msgstr " \\dew[+] [PATTERN] 외부 데이터 래퍼 목록\n" -#: help.c:229 +#: help.c:237 #, c-format msgid "" " \\df[antw][S+] [PATRN] list [only agg/normal/trigger/window] functions\n" msgstr " \\df[antw][S+] [PATRN] [only agg/normal/trigger/window] 함수 목록\n" -#: help.c:230 +#: help.c:238 #, c-format msgid " \\dF[+] [PATTERN] list text search configurations\n" msgstr " \\dF[+] [PATTERN] 텍스트 검색 구성 목록\n" -#: help.c:231 +#: help.c:239 #, c-format msgid " \\dFd[+] [PATTERN] list text search dictionaries\n" msgstr " \\dFd[+] [PATTERN] 텍스트 검색 사전 목록\n" -#: help.c:232 +#: help.c:240 #, c-format msgid " \\dFp[+] [PATTERN] list text search parsers\n" msgstr " \\dFp[+] [PATTERN] 텍스트 검색 파서 목록\n" -#: help.c:233 +#: help.c:241 #, c-format msgid " \\dFt[+] [PATTERN] list text search templates\n" msgstr " \\dFt[+] [PATTERN] 텍스트 검색 템플릿 목록\n" -#: help.c:234 +#: help.c:242 #, c-format msgid " \\dg[S+] [PATTERN] list roles\n" msgstr " \\dg[S+] [PATTERN] 롤 목록\n" -#: help.c:235 +#: help.c:243 #, c-format msgid " \\di[S+] [PATTERN] list indexes\n" msgstr " \\di[S+] [PATTERN] 인덱스 목록\n" -#: help.c:236 +#: help.c:244 #, c-format msgid " \\dl list large objects, same as \\lo_list\n" msgstr " \\dl 큰 개체 목록, \\lo_list 명령과 같음\n" -#: help.c:237 +#: help.c:245 #, c-format msgid " \\dL[S+] [PATTERN] list procedural languages\n" msgstr " \\dL[S+] [PATTERN] 프로시져 언어 목록\n" -#: help.c:238 +#: help.c:246 #, c-format msgid " \\dm[S+] [PATTERN] list materialized views\n" msgstr " \\dm[S+] [PATTERN] materialized 뷰 목록\n" -#: help.c:239 +#: help.c:247 #, c-format msgid " \\dn[S+] [PATTERN] list schemas\n" msgstr " \\dn[S+] [PATTERN] 스키마 목록\n" -#: help.c:240 +#: help.c:248 #, c-format msgid " \\do[S] [PATTERN] list operators\n" msgstr " \\do[S] [PATTERN] 연산자 목록\n" -#: help.c:241 +#: help.c:249 #, c-format msgid " \\dO[S+] [PATTERN] list collations\n" msgstr " \\dO[S+] [PATTERN] collation 목록\n" -#: help.c:242 +#: help.c:250 #, c-format msgid "" " \\dp [PATTERN] list table, view, and sequence access privileges\n" msgstr " \\dp [PATTERN] 테이블, 뷰 및 시퀀스 액세스 권한 목록\n" -#: help.c:243 +#: help.c:251 #, c-format msgid " \\drds [PATRN1 [PATRN2]] list per-database role settings\n" msgstr " \\drds [PATRN1 [PATRN2]] 데이터베이스별 롤 설정 목록\n" -#: help.c:244 +#: help.c:252 +#, c-format +msgid " \\dRp[+] [PATTERN] list replication publications\n" +msgstr " \\dRp[+] [PATTERN] 복제 발행 목록\n" + +#: help.c:253 +#, c-format +msgid " \\dRs[+] [PATTERN] list replication subscriptions\n" +msgstr " \\dRs[+] [PATTERN] 복제 구독 목록\n" + +#: help.c:254 #, c-format msgid " \\ds[S+] [PATTERN] list sequences\n" msgstr " \\ds[S+] [PATTERN] 시퀀스 목록\n" -#: help.c:245 +#: help.c:255 #, c-format msgid " \\dt[S+] [PATTERN] list tables\n" msgstr " \\dt[S+] [PATTERN] 테이블 목록\n" -#: help.c:246 +#: help.c:256 #, c-format msgid " \\dT[S+] [PATTERN] list data types\n" msgstr " \\dT[S+] [PATTERN] 데이터 형식 목록\n" -#: help.c:247 +#: help.c:257 #, c-format msgid " \\du[S+] [PATTERN] list roles\n" msgstr " \\du[S+] [PATTERN] 롤 목록\n" -#: help.c:248 +#: help.c:258 #, c-format msgid " \\dv[S+] [PATTERN] list views\n" msgstr " \\dv[S+] [PATTERN] 뷰 목록\n" -#: help.c:249 -#, c-format -msgid " \\dE[S+] [PATTERN] list foreign tables\n" -msgstr " \\dE[S+] [PATTERN] 외부 테이블 목록\n" - -#: help.c:250 +#: help.c:259 #, c-format msgid " \\dx[+] [PATTERN] list extensions\n" msgstr " \\dx[+] [PATTERN] 확장 모듈 목록\n" -#: help.c:251 +#: help.c:260 #, c-format msgid " \\dy [PATTERN] list event triggers\n" msgstr " \\dy [PATTERN] 이벤트 트리거 목록\n" -#: help.c:252 +#: help.c:261 #, c-format msgid " \\l[+] [PATTERN] list databases\n" msgstr " \\l[+] [PATTERN] 데이터베이스 목록\n" -#: help.c:253 +#: help.c:262 #, c-format msgid " \\sf[+] FUNCNAME show a function's definition\n" msgstr " \\sf[+] 함수이름 함수 정의 보기\n" -#: help.c:254 +#: help.c:263 #, c-format msgid " \\sv[+] VIEWNAME show a view's definition\n" msgstr " \\sv[+] 뷰이름 뷰 정의 보기\n" -#: help.c:255 +#: help.c:264 #, c-format msgid " \\z [PATTERN] same as \\dp\n" msgstr " \\z [PATTERN] \\dp와 같음\n" -#: help.c:258 +#: help.c:267 #, c-format msgid "Formatting\n" msgstr "출력 형식\n" -#: help.c:259 +#: help.c:268 #, c-format msgid "" " \\a toggle between unaligned and aligned output mode\n" msgstr "" " \\a 정렬되지 않은 출력 모드와 정렬된 출력 모드 전환\n" -#: help.c:260 +#: help.c:269 #, c-format msgid " \\C [STRING] set table title, or unset if none\n" msgstr "" " \\C [STRING] 테이블 제목 설정 또는 값이 없는 경우 설정 안 함\n" -#: help.c:261 +#: help.c:270 #, c-format msgid "" " \\f [STRING] show or set field separator for unaligned query " @@ -2667,35 +2873,40 @@ msgid "" msgstr "" " \\f [STRING] unaligned 출력에 대해 필드 구분자 표시 또는 설정\n" -#: help.c:262 +#: help.c:271 #, c-format msgid " \\H toggle HTML output mode (currently %s)\n" msgstr " \\H HTML 출력 모드 전환(현재 %s)\n" -#: help.c:264 +#: help.c:273 #, c-format msgid "" " \\pset [NAME [VALUE]] set table output option\n" -" (NAME := {format|border|expanded|fieldsep|" -"fieldsep_zero|footer|null|\n" -" numericlocale|recordsep|recordsep_zero|tuples_only|" -"title|tableattr|pager|\n" -" unicode_border_linestyle|unicode_column_linestyle|" +" (NAME := {border|columns|expanded|fieldsep|" +"fieldsep_zero|\n" +" footer|format|linestyle|null|numericlocale|pager|\n" +" pager_min_lines|recordsep|recordsep_zero|tableattr|" +"title|\n" +" tuples_only|unicode_border_linestyle|\n" +" unicode_column_linestyle|" "unicode_header_linestyle})\n" msgstr "" -" \\pset [NAME [VALUE]] 표 출력 옵션\n" -" (NAME := {format|border|expanded|fieldsep|\n" -" fieldsep_zero|footer|null|numericlocale|recordsep|\n" -" recordsep_zero|tuples_only|title|tableattr|pager|\n" -" unicode_border_linestyle|unicode_column_linestyle|\n" -" unicode_header_linestyle})\n" +" \\pset [이름 [값]] 테이블 출력 옵션 설정\n" +" (이름 := {border|columns|expanded|fieldsep|" +"fieldsep_zero|\n" +" footer|format|linestyle|null|numericlocale|pager|\n" +" pager_min_lines|recordsep|recordsep_zero|tableattr|" +"title|\n" +" tuples_only|unicode_border_linestyle|\n" +" unicode_column_linestyle|" +"unicode_header_linestyle})\n" -#: help.c:268 +#: help.c:279 #, c-format msgid " \\t [on|off] show only rows (currently %s)\n" msgstr " \\t [on|off] 행만 표시(현재 %s)\n" -#: help.c:270 +#: help.c:281 #, c-format msgid "" " \\T [STRING] set HTML
tag attributes, or unset if none\n" @@ -2703,17 +2914,17 @@ msgstr "" " \\T [STRING] HTML
태그 속성 설정 또는 비었는 경우 설정 " "안 함\n" -#: help.c:271 +#: help.c:282 #, c-format msgid " \\x [on|off|auto] toggle expanded output (currently %s)\n" msgstr " \\x [on|off|auto] 확장된 출력 전환 (현재 %s)\n" -#: help.c:275 +#: help.c:286 #, c-format msgid "Connection\n" msgstr "연결\n" -#: help.c:277 +#: help.c:288 #, c-format msgid "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" @@ -2722,7 +2933,7 @@ msgstr "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" " 새 데이터베이스에 접속 (현재 \"%s\")\n" -#: help.c:281 +#: help.c:292 #, c-format msgid "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" @@ -2731,61 +2942,61 @@ msgstr "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" " 새 데이터베이스에 접속 (현재 접속해 있지 않음)\n" -#: help.c:283 +#: help.c:294 +#, c-format +msgid "" +" \\conninfo display information about current connection\n" +msgstr " \\conninfo 현재 데이터베이스 접속 정보 보기\n" + +#: help.c:295 #, c-format msgid " \\encoding [ENCODING] show or set client encoding\n" msgstr " \\encoding [ENCODING] 클라이언트 인코딩 표시 또는 설정\n" -#: help.c:284 +#: help.c:296 #, c-format msgid " \\password [USERNAME] securely change the password for a user\n" msgstr " \\password [USERNAME] 사용자 암호를 안전하게 변경\n" -#: help.c:285 -#, c-format -msgid "" -" \\conninfo display information about current connection\n" -msgstr " \\conninfo 현재 데이터베이스 접속 정보 보기\n" - -#: help.c:288 +#: help.c:299 #, c-format msgid "Operating System\n" msgstr "운영 체제\n" -#: help.c:289 +#: help.c:300 #, c-format msgid " \\cd [DIR] change the current working directory\n" msgstr " \\cd [DIR] 현재 작업 디렉터리 변경\n" -#: help.c:290 +#: help.c:301 #, c-format msgid " \\setenv NAME [VALUE] set or unset environment variable\n" msgstr " \\setenv NAME [VALUE] 환경 변수 지정 및 해제\n" -#: help.c:291 +#: help.c:302 #, c-format msgid " \\timing [on|off] toggle timing of commands (currently %s)\n" msgstr " \\timing [on|off] 명령 실행 시간 전환(현재 %s)\n" -#: help.c:293 +#: help.c:304 #, c-format msgid "" " \\! [COMMAND] execute command in shell or start interactive " "shell\n" msgstr " \\! [COMMAND] 셸 명령 실행 또는 대화식 셸 시작\n" -#: help.c:296 +#: help.c:307 #, c-format msgid "Variables\n" msgstr "변수\n" -#: help.c:297 +#: help.c:308 #, c-format msgid " \\prompt [TEXT] NAME prompt user to set internal variable\n" msgstr "" " \\prompt [TEXT] NAME 사용자에게 내부 변수를 설정하라는 메시지 표시\n" -#: help.c:298 +#: help.c:309 #, c-format msgid "" " \\set [NAME [VALUE]] set internal variable, or list all if no " @@ -2794,17 +3005,17 @@ msgstr "" " \\set [NAME [VALUE]] 내부 변수 설정 또는 미지정 경우 모든 변수 목록 표" "시\n" -#: help.c:299 +#: help.c:310 #, c-format msgid " \\unset NAME unset (delete) internal variable\n" msgstr " \\unset NAME 내부 변수 설정 해제(삭제)\n" -#: help.c:302 +#: help.c:313 #, c-format msgid "Large Objects\n" msgstr "큰 개체\n" -#: help.c:303 +#: help.c:314 #, c-format msgid "" " \\lo_export LOBOID FILE\n" @@ -2817,19 +3028,19 @@ msgstr "" " \\lo_list\n" " \\lo_unlink LOBOID 큰 개체 작업\n" -#: help.c:330 +#: help.c:341 #, c-format msgid "" "List of specially treated variables\n" "\n" msgstr "특별한 기능 설정 변수 목록\n" -#: help.c:332 +#: help.c:343 #, c-format msgid "psql variables:\n" msgstr "psql 변수들:\n" -#: help.c:334 +#: help.c:345 #, c-format msgid "" " psql --set=NAME=VALUE\n" @@ -2840,167 +3051,211 @@ msgstr "" " 또는 psql 명령 모드에서는 \\set NAME VALUE\n" "\n" -#: help.c:336 +#: help.c:347 #, c-format msgid "" " AUTOCOMMIT if set, successful SQL commands are automatically " "committed\n" msgstr " AUTOCOMMIT 지정 하면 SQL 명령이 성공하면 자동으로 커밋\n" -#: help.c:337 +#: help.c:348 #, c-format msgid "" " COMP_KEYWORD_CASE determines the case used to complete SQL key words\n" " [lower, upper, preserve-lower, preserve-upper]\n" msgstr "" +" COMP_KEYWORD_CASE SQL 키워드 자동완성에서 대소문자 처리\n" +" [lower, upper, preserve-lower, preserve-upper]\n" -#: help.c:339 +#: help.c:350 #, c-format msgid " DBNAME the currently connected database name\n" msgstr " DBNAME 현재 접속한 데이터베이스 이름\n" -#: help.c:340 +#: help.c:351 #, c-format msgid "" " ECHO controls what input is written to standard output\n" " [all, errors, none, queries]\n" msgstr "" +" ECHO 입력을 표준 출력으로 보낼 종류\n" +" [all, errors, none, queries]\n" -#: help.c:342 +#: help.c:353 #, c-format msgid "" " ECHO_HIDDEN if set, display internal queries executed by backslash " "commands;\n" " if set to \"noexec\", just show without execution\n" msgstr "" +" ECHO_HIDDEN 지정 되면 psql 내장 명령어의 내부 쿼리를 출력함;\n" +" \"noexec\" 값으로 설정하면, 실행되지 않고 쿼리만 보여" +"줌\n" -#: help.c:344 +#: help.c:355 #, c-format msgid " ENCODING current client character set encoding\n" -msgstr " ENCODING 현재 클라이언트 인코딩\n" +msgstr " ENCODING 현재 클라이언트 인코딩 지정\n" -#: help.c:345 +#: help.c:356 #, c-format msgid "" " FETCH_COUNT the number of result rows to fetch and display at a " "time\n" " (default: 0=unlimited)\n" msgstr "" +" FETCH_COUNT 쿼리 결과에 대해서 출력할 최대 로우 개수\n" +" (기본값: 0=제한없음)\n" -#: help.c:347 +#: help.c:358 #, c-format msgid "" " HISTCONTROL controls command history [ignorespace, ignoredups, " "ignoreboth]\n" msgstr "" +" HISTCONTROL 명령 내역 처리 방법 [ignorespace, ignoredups, " +"ignoreboth]\n" -#: help.c:348 +#: help.c:359 #, c-format msgid " HISTFILE file name used to store the command history\n" -msgstr "" +msgstr " HISTFILE 명령 내역을 저장할 파일 이름\n" -#: help.c:349 +#: help.c:360 #, c-format msgid "" -" HISTSIZE the number of commands to store in the command history\n" -msgstr "" +" HISTSIZE max number of commands to store in the command history\n" +msgstr " HISTSIZE 명령 내역 최대 보관 개수\n" -#: help.c:350 +#: help.c:361 #, c-format msgid " HOST the currently connected database server host\n" msgstr " HOST 현재 접속한 데이터베이스 서버\n" -#: help.c:351 +#: help.c:362 #, c-format msgid "" -" IGNOREEOF if unset, sending an EOF to interactive session " -"terminates application\n" -msgstr "" +" IGNOREEOF number of EOFs needed to terminate an interactive " +"session\n" +msgstr " IGNOREEOF 대화형 세션 종료를 위한 EOF 개수\n" -#: help.c:352 +#: help.c:363 #, c-format msgid " LASTOID value of the last affected OID\n" msgstr " LASTOID 마지막 영향 받은 OID\n" -#: help.c:353 +#: help.c:364 #, c-format msgid "" " ON_ERROR_ROLLBACK if set, an error doesn't stop a transaction (uses " "implicit savepoints)\n" msgstr "" +" ON_ERROR_ROLLBACK 설정하면 오류 발생시에도 트랜잭션 중지 안함 (savepoint\n" +" 암묵적 사용)\n" -#: help.c:354 +#: help.c:365 #, c-format msgid " ON_ERROR_STOP stop batch execution after error\n" -msgstr "" +msgstr " ON_ERROR_STOP 배치 작업 시 오류가 발생하면 중지함\n" -#: help.c:355 +#: help.c:366 #, c-format msgid " PORT server port of the current connection\n" msgstr " PORT 현재 접속한 서버 포트\n" -#: help.c:356 +#: help.c:367 #, c-format msgid " PROMPT1 specifies the standard psql prompt\n" -msgstr "" +msgstr " PROMPT1 기본 psql 프롬프트 정의\n" -#: help.c:357 +#: help.c:368 #, c-format msgid "" " PROMPT2 specifies the prompt used when a statement continues " "from a previous line\n" -msgstr "" +msgstr " PROMPT2 아직 구문이 덜 끝난 명령행의 프롬프트\n" -#: help.c:358 +#: help.c:369 #, c-format msgid "" " PROMPT3 specifies the prompt used during COPY ... FROM STDIN\n" -msgstr "" +msgstr " PROMPT3 COPY ... FROM STDIN 작업시 보일 프롬프트\n" -#: help.c:359 +#: help.c:370 #, c-format msgid " QUIET run quietly (same as -q option)\n" msgstr " QUIET 조용히 실행 (-q 옵션과 같음)\n" -#: help.c:360 +#: help.c:371 +#, c-format +msgid " SERVER_VERSION_NAME server's version (short string)\n" +msgstr "" + +#: help.c:372 +#, c-format +msgid " SERVER_VERSION_NUM server's version (numeric format)\n" +msgstr "" + +#: help.c:373 #, c-format msgid "" " SHOW_CONTEXT controls display of message context fields [never, " "errors, always]\n" msgstr "" +" SHOW_CONTEXT 상황별 자세한 메시지 내용 출력 제어 [never, errors,\n" +" always]\n" -#: help.c:361 +#: help.c:374 #, c-format msgid "" " SINGLELINE end of line terminates SQL command mode (same as -S " "option)\n" -msgstr "" +msgstr " SINGLELINE 한 줄에 하나의 SQL 명령 실행 (-S 옵션과 같음)\n" -#: help.c:362 +#: help.c:375 #, c-format msgid " SINGLESTEP single-step mode (same as -s option)\n" -msgstr "" +msgstr " SINGLESTEP 각 명령을 확인하며 실행 (-s 옵션과 같음)\n" -#: help.c:363 +#: help.c:376 #, c-format msgid " USER the currently connected database user\n" msgstr " USER 현재 접속한 데이터베이스 사용자\n" -#: help.c:364 +#: help.c:377 #, c-format msgid "" " VERBOSITY controls verbosity of error reports [default, verbose, " "terse]\n" msgstr "" +" VERBOSITY 오류 출력시 자세히 볼 내용 범위 [default, verbose, " +"terse]\n" + +#: help.c:378 +#, c-format +msgid " VERSION psql's version (verbose string)\n" +msgstr "" -#: help.c:366 +#: help.c:379 +#, c-format +msgid " VERSION_NAME psql's version (short string)\n" +msgstr "" + +#: help.c:380 +#, c-format +msgid " VERSION_NUM psql's version (numeric format)\n" +msgstr "" + +#: help.c:382 #, c-format msgid "" "\n" "Display settings:\n" -msgstr "\n출력 설정들:\n" +msgstr "" +"\n" +"출력 설정들:\n" -#: help.c:368 +#: help.c:384 #, c-format msgid "" " psql --pset=NAME[=VALUE]\n" @@ -3011,43 +3266,41 @@ msgstr "" " 또는 psql 명령 모드에서는 \\pset NAME [VALUE]\n" "\n" -#: help.c:370 +#: help.c:386 #, c-format msgid " border border style (number)\n" msgstr " border 테두리 모양 (숫자)\n" -#: help.c:371 +#: help.c:387 #, c-format msgid " columns target width for the wrapped format\n" msgstr " columns 줄바꿈을 위한 너비 지정\n" -#: help.c:372 +#: help.c:388 #, c-format msgid " expanded (or x) expanded output [on, off, auto]\n" msgstr " expanded (또는 x) 확장된 출력 전환 [on, off, auto]\n" -#: help.c:373 +#: help.c:389 #, c-format msgid "" " fieldsep field separator for unaligned output (default \"%s\")\n" -msgstr "" -" fieldsep unaligned 출력용 필드 구분자 (초기값 \"%s\"')\n" +msgstr " fieldsep unaligned 출력용 필드 구분자 (초기값 \"%s\"')\n" -#: help.c:374 +#: help.c:390 #, c-format msgid "" " fieldsep_zero set field separator for unaligned output to zero byte\n" -msgstr "" -" fieldsep_zero unaligned 출력용 필드 구분자를 0 바이트로 지정\n" +msgstr " fieldsep_zero unaligned 출력용 필드 구분자를 0 바이트로 지정\n" -#: help.c:375 +#: help.c:391 #, c-format msgid "" " footer enable or disable display of the table footer [on, " "off]\n" msgstr " footer 테이블 꼬리말 보이기 전환 [on, off]\n" -#: help.c:376 +#: help.c:392 #, c-format msgid "" " format set output format [unaligned, aligned, wrapped, html, " @@ -3056,47 +3309,50 @@ msgstr "" " format 출력 양식 지정 [unaligned, aligned, wrapped, html, " "asciidoc, ...]\n" -#: help.c:377 +#: help.c:393 #, c-format msgid "" " linestyle set the border line drawing style [ascii, old-ascii, " "unicode]\n" -msgstr "" +msgstr " linestyle 테두리 선 모양 지정 [ascii, old-ascii, unicode]\n" -#: help.c:378 +#: help.c:394 #, c-format msgid "" " null set the string to be printed in place of a null value\n" -msgstr "" +msgstr " null null 값 출력 방법\n" -#: help.c:379 +#: help.c:395 #, c-format msgid "" " numericlocale enable or disable display of a locale-specific " "character to separate\n" " groups of digits [on, off]\n" msgstr "" +" numericlocale 숫자 출력에서 로케일 기반 천자리 분리 문자 활성화\n" +" [on, off]\n" -#: help.c:381 +#: help.c:397 #, c-format msgid "" " pager control when an external pager is used [yes, no, " "always]\n" msgstr "" +" pager 외부 페이지 단위 보기 도구 사용 여부 [yes, no, always]\n" -#: help.c:382 +#: help.c:398 #, c-format msgid " recordsep record (line) separator for unaligned output\n" msgstr " recordsep unaligned 출력용 레코드(줄) 구분자\n" -#: help.c:383 +#: help.c:399 #, c-format msgid "" " recordsep_zero set record separator for unaligned output to zero byte\n" msgstr "" " recordsep_zero unaligned 출력용 레코드 구분자를 0 바이트로 지정\n" -#: help.c:384 +#: help.c:400 #, c-format msgid "" " tableattr (or T) specify attributes for table tag in html format or " @@ -3104,20 +3360,23 @@ msgid "" " column widths for left-aligned data types in latex-" "longtable format\n" msgstr "" +" tableattr (또는 T) html 테이블 태그에 대한 속성이나,\n" +" latex-longtable 양식에서 왼쪽 정렬 자료용 칼럼 넓이 지" +"정\n" -#: help.c:386 +#: help.c:402 #, c-format msgid "" " title set the table title for any subsequently printed " "tables\n" msgstr " title 테이블 제목 지정\n" -#: help.c:387 +#: help.c:403 #, c-format msgid " tuples_only if set, only actual table data is shown\n" msgstr " tuples_only 지정되면, 자료만 보임\n" -#: help.c:388 +#: help.c:404 #, c-format msgid "" " unicode_border_linestyle\n" @@ -3130,7 +3389,7 @@ msgstr "" " unicode_header_linestyle\n" " 유니코드 선 종류 [single, double]\n" -#: help.c:393 +#: help.c:409 #, c-format msgid "" "\n" @@ -3139,7 +3398,7 @@ msgstr "" "\n" "OS 환경 변수들:\n" -#: help.c:397 +#: help.c:413 #, c-format msgid "" " NAME=VALUE [NAME=VALUE] psql ...\n" @@ -3150,7 +3409,7 @@ msgstr "" " 또는 psql 명령 모드에서는 \\setenv NAME [VALUE]\n" "\n" -#: help.c:399 +#: help.c:415 #, c-format msgid "" " set NAME=VALUE\n" @@ -3163,53 +3422,53 @@ msgstr "" " 또는 psql 명령 모드에서는 \\setenv NAME [VALUE]\n" "\n" -#: help.c:402 +#: help.c:418 #, c-format msgid " COLUMNS number of columns for wrapped format\n" msgstr " COLUMNS 다음 줄로 넘어갈 칼럼 수\n" -#: help.c:403 +#: help.c:419 #, c-format msgid " PAGER name of external pager program\n" msgstr " PAGER 페이지 단위 보기에서 사용할 프로그램\n" -#: help.c:404 +#: help.c:420 #, c-format msgid "" " PGAPPNAME same as the application_name connection parameter\n" msgstr " PGAPPNAME application_name 변수값으로 사용됨\n" -#: help.c:405 +#: help.c:421 #, c-format msgid " PGDATABASE same as the dbname connection parameter\n" msgstr " PGDATABASE 접속할 데이터베이스 이름\n" -#: help.c:406 +#: help.c:422 #, c-format msgid " PGHOST same as the host connection parameter\n" msgstr " PGHOST 서버 접속용 호스트 이름\n" -#: help.c:407 -#, c-format -msgid " PGPORT same as the port connection parameter\n" -msgstr " PGPORT 서버 접속용 포트\n" - -#: help.c:408 -#, c-format -msgid " PGUSER same as the user connection parameter\n" -msgstr " PGUSER 서버 접속용 데이터베이스 사용자 이름\n" - -#: help.c:409 +#: help.c:423 #, c-format msgid " PGPASSWORD connection password (not recommended)\n" msgstr " PGPASSWORD 서버 접속 비밀번호 (보안에 취약함)\n" -#: help.c:410 +#: help.c:424 #, c-format msgid " PGPASSFILE password file name\n" msgstr " PGPASSFILE 서버 접속용 비밀번호가 저장된 파일 이름\n" -#: help.c:411 +#: help.c:425 +#, c-format +msgid " PGPORT same as the port connection parameter\n" +msgstr " PGPORT 서버 접속용 포트\n" + +#: help.c:426 +#, c-format +msgid " PGUSER same as the user connection parameter\n" +msgstr " PGUSER 서버 접속용 데이터베이스 사용자 이름\n" + +#: help.c:427 #, c-format msgid "" " PSQL_EDITOR, EDITOR, VISUAL\n" @@ -3218,7 +3477,7 @@ msgstr "" " PSQL_EDITOR, EDITOR, VISUAL\n" " \\e, \\ef, \\ev 명령에서 사용할 외부 편집기 경로\n" -#: help.c:413 +#: help.c:429 #, c-format msgid "" " PSQL_EDITOR_LINENUMBER_ARG\n" @@ -3227,32 +3486,32 @@ msgstr "" " PSQL_EDITOR_LINENUMBER_ARG\n" " 외부 편집기 호출 시 사용할 줄번호 선택 옵션\n" -#: help.c:415 +#: help.c:431 #, c-format msgid "" " PSQL_HISTORY alternative location for the command history file\n" msgstr " PSQL_HISTORY 사용자 .psql_history 파일 임의 지정\n" -#: help.c:416 +#: help.c:432 #, c-format msgid " PSQLRC alternative location for the user's .psqlrc file\n" msgstr " PSQLRC 사용자 .psqlrc 파일의 임의 지정\n" -#: help.c:417 +#: help.c:433 #, c-format msgid " SHELL shell used by the \\! command\n" msgstr " SHELL \\! 명령에서 사용할 쉘\n" -#: help.c:418 +#: help.c:434 #, c-format msgid " TMPDIR directory for temporary files\n" msgstr " TMPDIR 임시 파일을 사용할 디렉터리\n" -#: help.c:461 +#: help.c:477 msgid "Available help:\n" msgstr "사용 가능한 도움말:\n" -#: help.c:545 +#: help.c:561 #, c-format msgid "" "Command: %s\n" @@ -3267,7 +3526,7 @@ msgstr "" "%s\n" "\n" -#: help.c:561 +#: help.c:577 #, c-format msgid "" "No help available for \"%s\".\n" @@ -3312,24 +3571,32 @@ msgstr "ID" #: large_obj.c:308 msgid "Large objects" -msgstr "Large objects" +msgstr "대형 객체들" + +#: mainloop.c:136 +#, c-format +msgid "\\if: escaped\n" +msgstr "\\if: escaped\n" -#: mainloop.c:168 +#: mainloop.c:183 #, c-format msgid "Use \"\\q\" to leave %s.\n" msgstr "마치려면 \"\\q\"를 입력하세요: %s\n" -#: mainloop.c:190 +#: mainloop.c:205 msgid "" "The input is a PostgreSQL custom-format dump.\n" "Use the pg_restore command-line client to restore this dump to a database.\n" msgstr "" +"이 입력은 PostgreSQL 사용자양식 덤프 내용입니다.\n" +"이 덤프 내용을 데이터베이스에 반영하려면,\n" +"pg_restore 명령행 클라이언트를 사용하세요.\n" -#: mainloop.c:210 +#: mainloop.c:225 msgid "You are using psql, the command-line interface to PostgreSQL." msgstr "PostgreSQL에 대한 명령행 인터페이스인 psql을 사용하고 있습니다." -#: mainloop.c:211 +#: mainloop.c:226 #, c-format msgid "" "Type: \\copyright for distribution terms\n" @@ -3344,2053 +3611,2189 @@ msgstr "" " \\g 또는 명령 끝에 세미콜론(;) 쿼리 실행\n" " \\q 종료\n" -#: psqlscan.l:713 +#: mainloop.c:339 mainloop.c:476 #, c-format -msgid "skipping recursive expansion of variable \"%s\"\n" -msgstr "\"%s\" 변수의 재귀적 확장을 건너뛰는 중\n" +msgid "query ignored; use \\endif or Ctrl-C to exit current \\if block\n" +msgstr "" +"쿼리 무시됨; 현재 \\if 블록을 끝내려면 \\endif 또는 Ctrl-C 키를 사용하세요.\n" + +#: mainloop.c:494 +#, c-format +msgid "reached EOF without finding closing \\endif(s)\n" +msgstr "\\endif 없이 EOF 도달\n" -#: psqlscanslash.l:584 +#: psqlscanslash.l:615 #, c-format msgid "unterminated quoted string\n" msgstr "인용 부호 짝 맞지 않음\n" -#: psqlscanslash.l:738 +#: psqlscanslash.l:788 #, c-format msgid "%s: out of memory\n" msgstr "%s: 메모리 부족\n" -#: sql_help.c:36 sql_help.c:39 sql_help.c:42 sql_help.c:64 sql_help.c:66 -#: sql_help.c:68 sql_help.c:79 sql_help.c:81 sql_help.c:83 sql_help.c:109 -#: sql_help.c:115 sql_help.c:117 sql_help.c:119 sql_help.c:121 sql_help.c:124 -#: sql_help.c:126 sql_help.c:128 sql_help.c:221 sql_help.c:223 sql_help.c:224 -#: sql_help.c:226 sql_help.c:228 sql_help.c:231 sql_help.c:233 sql_help.c:235 -#: sql_help.c:237 sql_help.c:249 sql_help.c:250 sql_help.c:251 sql_help.c:253 -#: sql_help.c:299 sql_help.c:301 sql_help.c:303 sql_help.c:305 sql_help.c:365 -#: sql_help.c:370 sql_help.c:372 sql_help.c:415 sql_help.c:417 sql_help.c:420 -#: sql_help.c:422 sql_help.c:489 sql_help.c:494 sql_help.c:499 sql_help.c:504 -#: sql_help.c:509 sql_help.c:558 sql_help.c:560 sql_help.c:562 sql_help.c:564 -#: sql_help.c:567 sql_help.c:569 sql_help.c:580 sql_help.c:582 sql_help.c:624 -#: sql_help.c:626 sql_help.c:628 sql_help.c:631 sql_help.c:633 sql_help.c:635 -#: sql_help.c:669 sql_help.c:673 sql_help.c:677 sql_help.c:696 sql_help.c:699 -#: sql_help.c:702 sql_help.c:731 sql_help.c:743 sql_help.c:751 sql_help.c:754 -#: sql_help.c:757 sql_help.c:772 sql_help.c:775 sql_help.c:819 sql_help.c:842 -#: sql_help.c:853 sql_help.c:855 sql_help.c:872 sql_help.c:881 sql_help.c:883 -#: sql_help.c:885 sql_help.c:897 sql_help.c:901 sql_help.c:903 sql_help.c:987 -#: sql_help.c:989 sql_help.c:992 sql_help.c:995 sql_help.c:997 sql_help.c:999 -#: sql_help.c:1060 sql_help.c:1062 sql_help.c:1064 sql_help.c:1067 -#: sql_help.c:1088 sql_help.c:1091 sql_help.c:1094 sql_help.c:1097 -#: sql_help.c:1101 sql_help.c:1103 sql_help.c:1105 sql_help.c:1107 -#: sql_help.c:1121 sql_help.c:1124 sql_help.c:1126 sql_help.c:1128 -#: sql_help.c:1138 sql_help.c:1140 sql_help.c:1150 sql_help.c:1152 -#: sql_help.c:1162 sql_help.c:1165 sql_help.c:1186 sql_help.c:1188 -#: sql_help.c:1190 sql_help.c:1193 sql_help.c:1195 sql_help.c:1197 -#: sql_help.c:1247 sql_help.c:1285 sql_help.c:1288 sql_help.c:1290 -#: sql_help.c:1292 sql_help.c:1294 sql_help.c:1296 sql_help.c:1299 -#: sql_help.c:1339 sql_help.c:1544 sql_help.c:1608 sql_help.c:1627 -#: sql_help.c:1640 sql_help.c:1694 sql_help.c:1698 sql_help.c:1708 -#: sql_help.c:1728 sql_help.c:1753 sql_help.c:1771 sql_help.c:1800 -#: sql_help.c:1875 sql_help.c:1917 sql_help.c:1939 sql_help.c:1959 -#: sql_help.c:1960 sql_help.c:1995 sql_help.c:2015 sql_help.c:2037 -#: sql_help.c:2050 sql_help.c:2081 sql_help.c:2106 sql_help.c:2150 -#: sql_help.c:2336 sql_help.c:2349 sql_help.c:2366 sql_help.c:2382 -#: sql_help.c:2421 sql_help.c:2472 sql_help.c:2476 sql_help.c:2478 -#: sql_help.c:2484 sql_help.c:2502 sql_help.c:2529 sql_help.c:2564 -#: sql_help.c:2576 sql_help.c:2585 sql_help.c:2629 sql_help.c:2643 -#: sql_help.c:2671 sql_help.c:2679 sql_help.c:2687 sql_help.c:2695 -#: sql_help.c:2703 sql_help.c:2711 sql_help.c:2719 sql_help.c:2727 -#: sql_help.c:2736 sql_help.c:2747 sql_help.c:2755 sql_help.c:2763 -#: sql_help.c:2771 sql_help.c:2779 sql_help.c:2789 sql_help.c:2798 -#: sql_help.c:2807 sql_help.c:2815 sql_help.c:2824 sql_help.c:2832 -#: sql_help.c:2841 sql_help.c:2849 sql_help.c:2857 sql_help.c:2865 -#: sql_help.c:2873 sql_help.c:2881 sql_help.c:2889 sql_help.c:2897 -#: sql_help.c:2905 sql_help.c:2922 sql_help.c:2931 sql_help.c:2939 -#: sql_help.c:2956 sql_help.c:2971 sql_help.c:3236 sql_help.c:3287 -#: sql_help.c:3316 sql_help.c:3324 sql_help.c:3743 sql_help.c:3791 -#: sql_help.c:3932 +#: sql_help.c:36 sql_help.c:39 sql_help.c:42 sql_help.c:66 sql_help.c:67 +#: sql_help.c:69 sql_help.c:71 sql_help.c:82 sql_help.c:84 sql_help.c:86 +#: sql_help.c:112 sql_help.c:118 sql_help.c:120 sql_help.c:122 sql_help.c:124 +#: sql_help.c:127 sql_help.c:129 sql_help.c:131 sql_help.c:236 sql_help.c:238 +#: sql_help.c:239 sql_help.c:241 sql_help.c:243 sql_help.c:246 sql_help.c:248 +#: sql_help.c:250 sql_help.c:252 sql_help.c:264 sql_help.c:265 sql_help.c:266 +#: sql_help.c:268 sql_help.c:315 sql_help.c:317 sql_help.c:319 sql_help.c:321 +#: sql_help.c:382 sql_help.c:387 sql_help.c:389 sql_help.c:432 sql_help.c:434 +#: sql_help.c:437 sql_help.c:439 sql_help.c:506 sql_help.c:511 sql_help.c:516 +#: sql_help.c:521 sql_help.c:526 sql_help.c:575 sql_help.c:577 sql_help.c:579 +#: sql_help.c:581 sql_help.c:584 sql_help.c:586 sql_help.c:597 sql_help.c:599 +#: sql_help.c:640 sql_help.c:642 sql_help.c:644 sql_help.c:647 sql_help.c:649 +#: sql_help.c:651 sql_help.c:684 sql_help.c:688 sql_help.c:692 sql_help.c:711 +#: sql_help.c:714 sql_help.c:717 sql_help.c:746 sql_help.c:758 sql_help.c:766 +#: sql_help.c:769 sql_help.c:772 sql_help.c:787 sql_help.c:790 sql_help.c:807 +#: sql_help.c:809 sql_help.c:811 sql_help.c:813 sql_help.c:816 sql_help.c:818 +#: sql_help.c:859 sql_help.c:882 sql_help.c:893 sql_help.c:895 sql_help.c:914 +#: sql_help.c:924 sql_help.c:926 sql_help.c:928 sql_help.c:940 sql_help.c:944 +#: sql_help.c:946 sql_help.c:957 sql_help.c:959 sql_help.c:961 sql_help.c:977 +#: sql_help.c:979 sql_help.c:983 sql_help.c:986 sql_help.c:987 sql_help.c:988 +#: sql_help.c:991 sql_help.c:993 sql_help.c:1084 sql_help.c:1086 +#: sql_help.c:1089 sql_help.c:1092 sql_help.c:1094 sql_help.c:1096 +#: sql_help.c:1099 sql_help.c:1102 sql_help.c:1168 sql_help.c:1170 +#: sql_help.c:1172 sql_help.c:1175 sql_help.c:1196 sql_help.c:1199 +#: sql_help.c:1202 sql_help.c:1205 sql_help.c:1209 sql_help.c:1211 +#: sql_help.c:1213 sql_help.c:1215 sql_help.c:1229 sql_help.c:1232 +#: sql_help.c:1234 sql_help.c:1236 sql_help.c:1246 sql_help.c:1248 +#: sql_help.c:1258 sql_help.c:1260 sql_help.c:1270 sql_help.c:1273 +#: sql_help.c:1295 sql_help.c:1297 sql_help.c:1299 sql_help.c:1302 +#: sql_help.c:1304 sql_help.c:1306 sql_help.c:1309 sql_help.c:1359 +#: sql_help.c:1401 sql_help.c:1404 sql_help.c:1406 sql_help.c:1408 +#: sql_help.c:1410 sql_help.c:1412 sql_help.c:1415 sql_help.c:1455 +#: sql_help.c:1666 sql_help.c:1730 sql_help.c:1749 sql_help.c:1762 +#: sql_help.c:1818 sql_help.c:1824 sql_help.c:1834 sql_help.c:1854 +#: sql_help.c:1879 sql_help.c:1897 sql_help.c:1926 sql_help.c:2019 +#: sql_help.c:2061 sql_help.c:2083 sql_help.c:2103 sql_help.c:2104 +#: sql_help.c:2139 sql_help.c:2159 sql_help.c:2181 sql_help.c:2195 +#: sql_help.c:2210 sql_help.c:2240 sql_help.c:2265 sql_help.c:2311 +#: sql_help.c:2577 sql_help.c:2590 sql_help.c:2607 sql_help.c:2623 +#: sql_help.c:2663 sql_help.c:2715 sql_help.c:2719 sql_help.c:2721 +#: sql_help.c:2727 sql_help.c:2745 sql_help.c:2772 sql_help.c:2807 +#: sql_help.c:2819 sql_help.c:2828 sql_help.c:2872 sql_help.c:2886 +#: sql_help.c:2914 sql_help.c:2922 sql_help.c:2930 sql_help.c:2938 +#: sql_help.c:2946 sql_help.c:2954 sql_help.c:2962 sql_help.c:2970 +#: sql_help.c:2979 sql_help.c:2990 sql_help.c:2998 sql_help.c:3006 +#: sql_help.c:3014 sql_help.c:3022 sql_help.c:3032 sql_help.c:3041 +#: sql_help.c:3050 sql_help.c:3058 sql_help.c:3067 sql_help.c:3075 +#: sql_help.c:3083 sql_help.c:3092 sql_help.c:3100 sql_help.c:3108 +#: sql_help.c:3116 sql_help.c:3124 sql_help.c:3132 sql_help.c:3140 +#: sql_help.c:3148 sql_help.c:3156 sql_help.c:3164 sql_help.c:3172 +#: sql_help.c:3189 sql_help.c:3198 sql_help.c:3206 sql_help.c:3223 +#: sql_help.c:3238 sql_help.c:3506 sql_help.c:3557 sql_help.c:3586 +#: sql_help.c:3594 sql_help.c:4017 sql_help.c:4065 sql_help.c:4206 msgid "name" msgstr "이름" -#: sql_help.c:37 sql_help.c:40 sql_help.c:43 sql_help.c:309 sql_help.c:1405 -#: sql_help.c:2644 sql_help.c:3539 +#: sql_help.c:37 sql_help.c:40 sql_help.c:43 sql_help.c:326 sql_help.c:1524 +#: sql_help.c:2887 sql_help.c:3811 msgid "aggregate_signature" msgstr "집계함수_식별구문" -#: sql_help.c:38 sql_help.c:65 sql_help.c:80 sql_help.c:116 sql_help.c:236 -#: sql_help.c:254 sql_help.c:373 sql_help.c:421 sql_help.c:498 sql_help.c:544 -#: sql_help.c:559 sql_help.c:581 sql_help.c:632 sql_help.c:698 sql_help.c:753 -#: sql_help.c:774 sql_help.c:820 sql_help.c:844 sql_help.c:854 sql_help.c:884 -#: sql_help.c:904 sql_help.c:996 sql_help.c:1061 sql_help.c:1104 -#: sql_help.c:1125 sql_help.c:1139 sql_help.c:1151 sql_help.c:1164 -#: sql_help.c:1194 sql_help.c:1248 sql_help.c:1293 +#: sql_help.c:38 sql_help.c:68 sql_help.c:83 sql_help.c:119 sql_help.c:251 +#: sql_help.c:269 sql_help.c:390 sql_help.c:438 sql_help.c:515 sql_help.c:561 +#: sql_help.c:576 sql_help.c:598 sql_help.c:648 sql_help.c:713 sql_help.c:768 +#: sql_help.c:789 sql_help.c:819 sql_help.c:860 sql_help.c:884 sql_help.c:894 +#: sql_help.c:927 sql_help.c:947 sql_help.c:960 sql_help.c:994 sql_help.c:1093 +#: sql_help.c:1169 sql_help.c:1212 sql_help.c:1233 sql_help.c:1247 +#: sql_help.c:1259 sql_help.c:1272 sql_help.c:1303 sql_help.c:1360 +#: sql_help.c:1409 msgid "new_name" msgstr "새이름" -#: sql_help.c:41 sql_help.c:67 sql_help.c:82 sql_help.c:118 sql_help.c:234 -#: sql_help.c:252 sql_help.c:371 sql_help.c:457 sql_help.c:503 sql_help.c:583 -#: sql_help.c:592 sql_help.c:651 sql_help.c:672 sql_help.c:701 sql_help.c:756 -#: sql_help.c:856 sql_help.c:882 sql_help.c:902 sql_help.c:1045 -#: sql_help.c:1063 sql_help.c:1106 sql_help.c:1127 sql_help.c:1189 -#: sql_help.c:1291 sql_help.c:2322 +#: sql_help.c:41 sql_help.c:70 sql_help.c:85 sql_help.c:121 sql_help.c:249 +#: sql_help.c:267 sql_help.c:388 sql_help.c:474 sql_help.c:520 sql_help.c:600 +#: sql_help.c:609 sql_help.c:667 sql_help.c:687 sql_help.c:716 sql_help.c:771 +#: sql_help.c:817 sql_help.c:896 sql_help.c:925 sql_help.c:945 sql_help.c:958 +#: sql_help.c:992 sql_help.c:1153 sql_help.c:1171 sql_help.c:1214 +#: sql_help.c:1235 sql_help.c:1298 sql_help.c:1407 sql_help.c:2563 msgid "new_owner" msgstr "새사용자" -#: sql_help.c:44 sql_help.c:69 sql_help.c:84 sql_help.c:238 sql_help.c:302 -#: sql_help.c:423 sql_help.c:508 sql_help.c:634 sql_help.c:676 sql_help.c:704 -#: sql_help.c:759 sql_help.c:886 sql_help.c:998 sql_help.c:1108 -#: sql_help.c:1129 sql_help.c:1141 sql_help.c:1153 sql_help.c:1196 -#: sql_help.c:1295 +#: sql_help.c:44 sql_help.c:72 sql_help.c:87 sql_help.c:253 sql_help.c:318 +#: sql_help.c:440 sql_help.c:525 sql_help.c:650 sql_help.c:691 sql_help.c:719 +#: sql_help.c:774 sql_help.c:929 sql_help.c:962 sql_help.c:1095 +#: sql_help.c:1216 sql_help.c:1237 sql_help.c:1249 sql_help.c:1261 +#: sql_help.c:1305 sql_help.c:1411 msgid "new_schema" msgstr "새스키마" -#: sql_help.c:45 sql_help.c:1458 sql_help.c:2645 sql_help.c:3558 +#: sql_help.c:45 sql_help.c:1580 sql_help.c:2888 sql_help.c:3832 msgid "where aggregate_signature is:" msgstr "집계함수_식별구문 사용법:" -#: sql_help.c:46 sql_help.c:49 sql_help.c:52 sql_help.c:319 sql_help.c:344 -#: sql_help.c:347 sql_help.c:350 sql_help.c:490 sql_help.c:495 sql_help.c:500 -#: sql_help.c:505 sql_help.c:510 sql_help.c:1423 sql_help.c:1459 -#: sql_help.c:1462 sql_help.c:1465 sql_help.c:1609 sql_help.c:1628 -#: sql_help.c:1631 sql_help.c:1876 sql_help.c:2646 sql_help.c:2649 -#: sql_help.c:2652 sql_help.c:2737 sql_help.c:3122 sql_help.c:3454 -#: sql_help.c:3545 sql_help.c:3559 sql_help.c:3562 sql_help.c:3565 +#: sql_help.c:46 sql_help.c:49 sql_help.c:52 sql_help.c:336 sql_help.c:361 +#: sql_help.c:364 sql_help.c:367 sql_help.c:507 sql_help.c:512 sql_help.c:517 +#: sql_help.c:522 sql_help.c:527 sql_help.c:1542 sql_help.c:1581 +#: sql_help.c:1584 sql_help.c:1587 sql_help.c:1731 sql_help.c:1750 +#: sql_help.c:1753 sql_help.c:2020 sql_help.c:2889 sql_help.c:2892 +#: sql_help.c:2895 sql_help.c:2980 sql_help.c:3391 sql_help.c:3724 +#: sql_help.c:3817 sql_help.c:3833 sql_help.c:3836 sql_help.c:3839 msgid "argmode" msgstr "인자모드" -#: sql_help.c:47 sql_help.c:50 sql_help.c:53 sql_help.c:320 sql_help.c:345 -#: sql_help.c:348 sql_help.c:351 sql_help.c:491 sql_help.c:496 sql_help.c:501 -#: sql_help.c:506 sql_help.c:511 sql_help.c:1424 sql_help.c:1460 -#: sql_help.c:1463 sql_help.c:1466 sql_help.c:1610 sql_help.c:1629 -#: sql_help.c:1632 sql_help.c:1877 sql_help.c:2647 sql_help.c:2650 -#: sql_help.c:2653 sql_help.c:2738 sql_help.c:3546 sql_help.c:3560 -#: sql_help.c:3563 sql_help.c:3566 +#: sql_help.c:47 sql_help.c:50 sql_help.c:53 sql_help.c:337 sql_help.c:362 +#: sql_help.c:365 sql_help.c:368 sql_help.c:508 sql_help.c:513 sql_help.c:518 +#: sql_help.c:523 sql_help.c:528 sql_help.c:1543 sql_help.c:1582 +#: sql_help.c:1585 sql_help.c:1588 sql_help.c:1732 sql_help.c:1751 +#: sql_help.c:1754 sql_help.c:2021 sql_help.c:2890 sql_help.c:2893 +#: sql_help.c:2896 sql_help.c:2981 sql_help.c:3818 sql_help.c:3834 +#: sql_help.c:3837 sql_help.c:3840 msgid "argname" msgstr "인자이름" -#: sql_help.c:48 sql_help.c:51 sql_help.c:54 sql_help.c:321 sql_help.c:346 -#: sql_help.c:349 sql_help.c:352 sql_help.c:492 sql_help.c:497 sql_help.c:502 -#: sql_help.c:507 sql_help.c:512 sql_help.c:1425 sql_help.c:1461 -#: sql_help.c:1464 sql_help.c:1467 sql_help.c:1878 sql_help.c:2648 -#: sql_help.c:2651 sql_help.c:2654 sql_help.c:2739 sql_help.c:3547 -#: sql_help.c:3561 sql_help.c:3564 sql_help.c:3567 +#: sql_help.c:48 sql_help.c:51 sql_help.c:54 sql_help.c:338 sql_help.c:363 +#: sql_help.c:366 sql_help.c:369 sql_help.c:509 sql_help.c:514 sql_help.c:519 +#: sql_help.c:524 sql_help.c:529 sql_help.c:1544 sql_help.c:1583 +#: sql_help.c:1586 sql_help.c:1589 sql_help.c:2022 sql_help.c:2891 +#: sql_help.c:2894 sql_help.c:2897 sql_help.c:2982 sql_help.c:3819 +#: sql_help.c:3835 sql_help.c:3838 sql_help.c:3841 msgid "argtype" msgstr "인자자료형" -#: sql_help.c:110 sql_help.c:368 sql_help.c:446 sql_help.c:458 sql_help.c:814 -#: sql_help.c:899 sql_help.c:1122 sql_help.c:1242 sql_help.c:1270 -#: sql_help.c:1515 sql_help.c:1521 sql_help.c:1803 sql_help.c:1835 -#: sql_help.c:1842 sql_help.c:1918 sql_help.c:2082 sql_help.c:2171 -#: sql_help.c:2351 sql_help.c:2530 sql_help.c:2552 sql_help.c:2990 -#: sql_help.c:3156 +#: sql_help.c:113 sql_help.c:385 sql_help.c:463 sql_help.c:475 sql_help.c:854 +#: sql_help.c:942 sql_help.c:1230 sql_help.c:1354 sql_help.c:1386 +#: sql_help.c:1637 sql_help.c:1643 sql_help.c:1929 sql_help.c:1970 +#: sql_help.c:1977 sql_help.c:1986 sql_help.c:2062 sql_help.c:2241 +#: sql_help.c:2333 sql_help.c:2592 sql_help.c:2773 sql_help.c:2795 +#: sql_help.c:3258 sql_help.c:3425 msgid "option" msgstr "옵션" -#: sql_help.c:111 sql_help.c:815 sql_help.c:1243 sql_help.c:1919 -#: sql_help.c:2083 sql_help.c:2531 +#: sql_help.c:114 sql_help.c:855 sql_help.c:1355 sql_help.c:2063 +#: sql_help.c:2242 sql_help.c:2774 msgid "where option can be:" msgstr "옵션 사용법:" -#: sql_help.c:112 sql_help.c:1735 +#: sql_help.c:115 sql_help.c:1861 msgid "allowconn" -msgstr "" +msgstr "접속허용" -#: sql_help.c:113 sql_help.c:816 sql_help.c:1244 sql_help.c:1736 -#: sql_help.c:2084 sql_help.c:2532 +#: sql_help.c:116 sql_help.c:856 sql_help.c:1356 sql_help.c:1862 +#: sql_help.c:2243 sql_help.c:2775 msgid "connlimit" -msgstr "" +msgstr "접속제한" -#: sql_help.c:114 sql_help.c:1737 +#: sql_help.c:117 sql_help.c:1863 msgid "istemplate" -msgstr "true|false" +msgstr "템플릿?" -#: sql_help.c:120 sql_help.c:571 sql_help.c:637 sql_help.c:652 sql_help.c:1001 -#: sql_help.c:1038 +#: sql_help.c:123 sql_help.c:588 sql_help.c:653 sql_help.c:1098 +#: sql_help.c:1146 msgid "new_tablespace" msgstr "새테이블스페이스" -#: sql_help.c:122 sql_help.c:125 sql_help.c:127 sql_help.c:517 sql_help.c:519 -#: sql_help.c:520 sql_help.c:823 sql_help.c:827 sql_help.c:830 sql_help.c:915 -#: sql_help.c:918 sql_help.c:1250 sql_help.c:1253 sql_help.c:1255 -#: sql_help.c:1887 sql_help.c:3341 sql_help.c:3732 +#: sql_help.c:125 sql_help.c:128 sql_help.c:130 sql_help.c:534 sql_help.c:536 +#: sql_help.c:537 sql_help.c:863 sql_help.c:867 sql_help.c:870 sql_help.c:1005 +#: sql_help.c:1008 sql_help.c:1363 sql_help.c:1367 sql_help.c:1370 +#: sql_help.c:2031 sql_help.c:3611 sql_help.c:4006 msgid "configuration_parameter" msgstr "환경설정_매개변수" -#: sql_help.c:123 sql_help.c:369 sql_help.c:441 sql_help.c:447 sql_help.c:459 -#: sql_help.c:518 sql_help.c:566 sql_help.c:643 sql_help.c:649 sql_help.c:824 -#: sql_help.c:900 sql_help.c:916 sql_help.c:917 sql_help.c:1020 -#: sql_help.c:1040 sql_help.c:1066 sql_help.c:1123 sql_help.c:1251 -#: sql_help.c:1271 sql_help.c:1804 sql_help.c:1836 sql_help.c:1843 -#: sql_help.c:1888 sql_help.c:1889 sql_help.c:1947 sql_help.c:1979 -#: sql_help.c:2172 sql_help.c:2246 sql_help.c:2254 sql_help.c:2286 -#: sql_help.c:2308 sql_help.c:2325 sql_help.c:2352 sql_help.c:2553 -#: sql_help.c:3157 sql_help.c:3733 sql_help.c:3734 +#: sql_help.c:126 sql_help.c:386 sql_help.c:458 sql_help.c:464 sql_help.c:476 +#: sql_help.c:535 sql_help.c:583 sql_help.c:659 sql_help.c:665 sql_help.c:815 +#: sql_help.c:864 sql_help.c:943 sql_help.c:982 sql_help.c:985 sql_help.c:990 +#: sql_help.c:1006 sql_help.c:1007 sql_help.c:1128 sql_help.c:1148 +#: sql_help.c:1174 sql_help.c:1231 sql_help.c:1364 sql_help.c:1387 +#: sql_help.c:1930 sql_help.c:1971 sql_help.c:1978 sql_help.c:1987 +#: sql_help.c:2032 sql_help.c:2033 sql_help.c:2091 sql_help.c:2123 +#: sql_help.c:2213 sql_help.c:2334 sql_help.c:2364 sql_help.c:2462 +#: sql_help.c:2474 sql_help.c:2487 sql_help.c:2527 sql_help.c:2549 +#: sql_help.c:2566 sql_help.c:2593 sql_help.c:2796 sql_help.c:3426 +#: sql_help.c:4007 sql_help.c:4008 msgid "value" msgstr "값" -#: sql_help.c:185 +#: sql_help.c:198 msgid "target_role" msgstr "대상롤" -#: sql_help.c:186 sql_help.c:1787 sql_help.c:2130 sql_help.c:2135 -#: sql_help.c:3104 sql_help.c:3111 sql_help.c:3125 sql_help.c:3131 -#: sql_help.c:3436 sql_help.c:3443 sql_help.c:3457 sql_help.c:3463 +#: sql_help.c:199 sql_help.c:1913 sql_help.c:2289 sql_help.c:2294 +#: sql_help.c:3373 sql_help.c:3380 sql_help.c:3394 sql_help.c:3400 +#: sql_help.c:3706 sql_help.c:3713 sql_help.c:3727 sql_help.c:3733 msgid "schema_name" msgstr "스키마이름" -#: sql_help.c:187 +#: sql_help.c:200 msgid "abbreviated_grant_or_revoke" -msgstr "" +msgstr "grant_또는_revoke_내용" -#: sql_help.c:188 +#: sql_help.c:201 msgid "where abbreviated_grant_or_revoke is one of:" -msgstr "" - -#: sql_help.c:189 sql_help.c:190 sql_help.c:191 sql_help.c:192 sql_help.c:193 -#: sql_help.c:194 sql_help.c:195 sql_help.c:196 sql_help.c:542 sql_help.c:570 -#: sql_help.c:636 sql_help.c:777 sql_help.c:834 sql_help.c:1000 -#: sql_help.c:1258 sql_help.c:1922 sql_help.c:1923 sql_help.c:1924 -#: sql_help.c:1925 sql_help.c:1926 sql_help.c:2052 sql_help.c:2087 -#: sql_help.c:2088 sql_help.c:2089 sql_help.c:2090 sql_help.c:2091 -#: sql_help.c:2535 sql_help.c:2536 sql_help.c:2537 sql_help.c:2538 -#: sql_help.c:2539 sql_help.c:3138 sql_help.c:3139 sql_help.c:3140 -#: sql_help.c:3437 sql_help.c:3441 sql_help.c:3444 sql_help.c:3446 -#: sql_help.c:3448 sql_help.c:3450 sql_help.c:3452 sql_help.c:3458 -#: sql_help.c:3460 sql_help.c:3462 sql_help.c:3464 sql_help.c:3466 -#: sql_help.c:3468 sql_help.c:3469 sql_help.c:3470 sql_help.c:3753 +msgstr "grant_또는_revoke_내용에 사용되는 구문:" + +#: sql_help.c:202 sql_help.c:203 sql_help.c:204 sql_help.c:205 sql_help.c:206 +#: sql_help.c:207 sql_help.c:208 sql_help.c:209 sql_help.c:210 sql_help.c:211 +#: sql_help.c:559 sql_help.c:587 sql_help.c:652 sql_help.c:792 sql_help.c:874 +#: sql_help.c:1097 sql_help.c:1374 sql_help.c:2066 sql_help.c:2067 +#: sql_help.c:2068 sql_help.c:2069 sql_help.c:2070 sql_help.c:2197 +#: sql_help.c:2246 sql_help.c:2247 sql_help.c:2248 sql_help.c:2249 +#: sql_help.c:2250 sql_help.c:2778 sql_help.c:2779 sql_help.c:2780 +#: sql_help.c:2781 sql_help.c:2782 sql_help.c:3407 sql_help.c:3408 +#: sql_help.c:3409 sql_help.c:3707 sql_help.c:3711 sql_help.c:3714 +#: sql_help.c:3716 sql_help.c:3718 sql_help.c:3720 sql_help.c:3722 +#: sql_help.c:3728 sql_help.c:3730 sql_help.c:3732 sql_help.c:3734 +#: sql_help.c:3736 sql_help.c:3738 sql_help.c:3739 sql_help.c:3740 +#: sql_help.c:4027 msgid "role_name" msgstr "롤이름" -#: sql_help.c:222 sql_help.c:434 sql_help.c:1011 sql_help.c:1013 -#: sql_help.c:1287 sql_help.c:1756 sql_help.c:1760 sql_help.c:1846 -#: sql_help.c:1850 sql_help.c:1943 sql_help.c:2258 sql_help.c:2268 -#: sql_help.c:2290 sql_help.c:3187 sql_help.c:3202 sql_help.c:3204 -#: sql_help.c:3618 sql_help.c:3619 sql_help.c:3628 sql_help.c:3669 -#: sql_help.c:3670 sql_help.c:3671 sql_help.c:3672 sql_help.c:3673 -#: sql_help.c:3674 sql_help.c:3707 sql_help.c:3708 sql_help.c:3713 -#: sql_help.c:3718 sql_help.c:3857 sql_help.c:3858 sql_help.c:3867 -#: sql_help.c:3908 sql_help.c:3909 sql_help.c:3910 sql_help.c:3911 -#: sql_help.c:3912 sql_help.c:3913 sql_help.c:3960 sql_help.c:3962 -#: sql_help.c:3995 sql_help.c:4051 sql_help.c:4052 sql_help.c:4061 -#: sql_help.c:4102 sql_help.c:4103 sql_help.c:4104 sql_help.c:4105 -#: sql_help.c:4106 sql_help.c:4107 +#: sql_help.c:237 sql_help.c:451 sql_help.c:1113 sql_help.c:1115 +#: sql_help.c:1403 sql_help.c:1882 sql_help.c:1886 sql_help.c:1990 +#: sql_help.c:1994 sql_help.c:2087 sql_help.c:2458 sql_help.c:2470 +#: sql_help.c:2483 sql_help.c:2491 sql_help.c:2502 sql_help.c:2531 +#: sql_help.c:3457 sql_help.c:3472 sql_help.c:3474 sql_help.c:3892 +#: sql_help.c:3893 sql_help.c:3902 sql_help.c:3943 sql_help.c:3944 +#: sql_help.c:3945 sql_help.c:3946 sql_help.c:3947 sql_help.c:3948 +#: sql_help.c:3981 sql_help.c:3982 sql_help.c:3987 sql_help.c:3992 +#: sql_help.c:4131 sql_help.c:4132 sql_help.c:4141 sql_help.c:4182 +#: sql_help.c:4183 sql_help.c:4184 sql_help.c:4185 sql_help.c:4186 +#: sql_help.c:4187 sql_help.c:4234 sql_help.c:4236 sql_help.c:4269 +#: sql_help.c:4325 sql_help.c:4326 sql_help.c:4335 sql_help.c:4376 +#: sql_help.c:4377 sql_help.c:4378 sql_help.c:4379 sql_help.c:4380 +#: sql_help.c:4381 msgid "expression" msgstr "표현식" -#: sql_help.c:225 +#: sql_help.c:240 msgid "domain_constraint" msgstr "도메인_제약조건" -#: sql_help.c:227 sql_help.c:229 sql_help.c:232 sql_help.c:449 sql_help.c:450 -#: sql_help.c:993 sql_help.c:1026 sql_help.c:1027 sql_help.c:1028 -#: sql_help.c:1048 sql_help.c:1411 sql_help.c:1413 sql_help.c:1759 -#: sql_help.c:1845 sql_help.c:1849 sql_help.c:2257 sql_help.c:2267 -#: sql_help.c:3199 +#: sql_help.c:242 sql_help.c:244 sql_help.c:247 sql_help.c:466 sql_help.c:467 +#: sql_help.c:1090 sql_help.c:1134 sql_help.c:1135 sql_help.c:1136 +#: sql_help.c:1156 sql_help.c:1530 sql_help.c:1532 sql_help.c:1885 +#: sql_help.c:1989 sql_help.c:1993 sql_help.c:2490 sql_help.c:2501 +#: sql_help.c:3469 msgid "constraint_name" msgstr "제약조건_이름" -#: sql_help.c:230 sql_help.c:994 +#: sql_help.c:245 sql_help.c:1091 msgid "new_constraint_name" msgstr "새제약조건_이름" -#: sql_help.c:300 sql_help.c:898 +#: sql_help.c:316 sql_help.c:941 msgid "new_version" msgstr "새버전" -#: sql_help.c:304 sql_help.c:306 +#: sql_help.c:320 sql_help.c:322 msgid "member_object" -msgstr "" +msgstr "맴버_객체" -#: sql_help.c:307 +#: sql_help.c:323 msgid "where member_object is:" -msgstr "" +msgstr "맴버_객체 사용법:" + +#: sql_help.c:324 sql_help.c:329 sql_help.c:330 sql_help.c:331 sql_help.c:332 +#: sql_help.c:333 sql_help.c:334 sql_help.c:339 sql_help.c:343 sql_help.c:345 +#: sql_help.c:347 sql_help.c:348 sql_help.c:349 sql_help.c:350 sql_help.c:351 +#: sql_help.c:352 sql_help.c:353 sql_help.c:354 sql_help.c:355 sql_help.c:358 +#: sql_help.c:359 sql_help.c:1522 sql_help.c:1527 sql_help.c:1534 +#: sql_help.c:1535 sql_help.c:1536 sql_help.c:1537 sql_help.c:1538 +#: sql_help.c:1539 sql_help.c:1540 sql_help.c:1545 sql_help.c:1547 +#: sql_help.c:1551 sql_help.c:1553 sql_help.c:1557 sql_help.c:1558 +#: sql_help.c:1559 sql_help.c:1562 sql_help.c:1563 sql_help.c:1564 +#: sql_help.c:1565 sql_help.c:1566 sql_help.c:1567 sql_help.c:1568 +#: sql_help.c:1569 sql_help.c:1570 sql_help.c:1571 sql_help.c:1572 +#: sql_help.c:1577 sql_help.c:1578 sql_help.c:3807 sql_help.c:3812 +#: sql_help.c:3813 sql_help.c:3814 sql_help.c:3815 sql_help.c:3821 +#: sql_help.c:3822 sql_help.c:3823 sql_help.c:3824 sql_help.c:3825 +#: sql_help.c:3826 sql_help.c:3827 sql_help.c:3828 sql_help.c:3829 +#: sql_help.c:3830 +msgid "object_name" +msgstr "객체이름" -#: sql_help.c:308 sql_help.c:1404 sql_help.c:3538 +#: sql_help.c:325 sql_help.c:1523 sql_help.c:3810 msgid "aggregate_name" msgstr "집계함수이름" -#: sql_help.c:310 sql_help.c:1406 sql_help.c:1674 sql_help.c:1678 -#: sql_help.c:1680 sql_help.c:2662 +#: sql_help.c:327 sql_help.c:1525 sql_help.c:1796 sql_help.c:1800 +#: sql_help.c:1802 sql_help.c:2905 msgid "source_type" msgstr "기존자료형" -#: sql_help.c:311 sql_help.c:1407 sql_help.c:1675 sql_help.c:1679 -#: sql_help.c:1681 sql_help.c:2663 +#: sql_help.c:328 sql_help.c:1526 sql_help.c:1797 sql_help.c:1801 +#: sql_help.c:1803 sql_help.c:2906 msgid "target_type" msgstr "대상자료형" -#: sql_help.c:312 sql_help.c:313 sql_help.c:314 sql_help.c:315 sql_help.c:316 -#: sql_help.c:317 sql_help.c:322 sql_help.c:326 sql_help.c:328 sql_help.c:330 -#: sql_help.c:331 sql_help.c:332 sql_help.c:333 sql_help.c:334 sql_help.c:335 -#: sql_help.c:336 sql_help.c:337 sql_help.c:338 sql_help.c:341 sql_help.c:342 -#: sql_help.c:1403 sql_help.c:1408 sql_help.c:1415 sql_help.c:1416 -#: sql_help.c:1417 sql_help.c:1418 sql_help.c:1419 sql_help.c:1420 -#: sql_help.c:1421 sql_help.c:1426 sql_help.c:1428 sql_help.c:1432 -#: sql_help.c:1434 sql_help.c:1438 sql_help.c:1439 sql_help.c:1442 -#: sql_help.c:1443 sql_help.c:1444 sql_help.c:1445 sql_help.c:1446 -#: sql_help.c:1447 sql_help.c:1448 sql_help.c:1449 sql_help.c:1450 -#: sql_help.c:1455 sql_help.c:1456 sql_help.c:3535 sql_help.c:3540 -#: sql_help.c:3541 sql_help.c:3542 sql_help.c:3543 sql_help.c:3549 -#: sql_help.c:3550 sql_help.c:3551 sql_help.c:3552 sql_help.c:3553 -#: sql_help.c:3554 sql_help.c:3555 sql_help.c:3556 -msgid "object_name" -msgstr "객체이름" - -#: sql_help.c:318 sql_help.c:741 sql_help.c:1422 sql_help.c:1676 -#: sql_help.c:1711 sql_help.c:1774 sql_help.c:1996 sql_help.c:2027 -#: sql_help.c:2426 sql_help.c:3121 sql_help.c:3453 sql_help.c:3544 -#: sql_help.c:3647 sql_help.c:3651 sql_help.c:3655 sql_help.c:3658 -#: sql_help.c:3886 sql_help.c:3890 sql_help.c:3894 sql_help.c:3897 -#: sql_help.c:4080 sql_help.c:4084 sql_help.c:4088 sql_help.c:4091 +#: sql_help.c:335 sql_help.c:756 sql_help.c:1541 sql_help.c:1798 +#: sql_help.c:1837 sql_help.c:1900 sql_help.c:2140 sql_help.c:2171 +#: sql_help.c:2669 sql_help.c:3390 sql_help.c:3723 sql_help.c:3816 +#: sql_help.c:3921 sql_help.c:3925 sql_help.c:3929 sql_help.c:3932 +#: sql_help.c:4160 sql_help.c:4164 sql_help.c:4168 sql_help.c:4171 +#: sql_help.c:4354 sql_help.c:4358 sql_help.c:4362 sql_help.c:4365 msgid "function_name" msgstr "함수이름" -#: sql_help.c:323 sql_help.c:734 sql_help.c:1429 sql_help.c:2020 +#: sql_help.c:340 sql_help.c:749 sql_help.c:1548 sql_help.c:2164 msgid "operator_name" msgstr "연산자이름" -#: sql_help.c:324 sql_help.c:670 sql_help.c:674 sql_help.c:678 sql_help.c:1430 -#: sql_help.c:1997 sql_help.c:2780 +#: sql_help.c:341 sql_help.c:685 sql_help.c:689 sql_help.c:693 sql_help.c:1549 +#: sql_help.c:2141 sql_help.c:3023 msgid "left_type" msgstr "왼쪽인자_자료형" -#: sql_help.c:325 sql_help.c:671 sql_help.c:675 sql_help.c:679 sql_help.c:1431 -#: sql_help.c:1998 sql_help.c:2781 +#: sql_help.c:342 sql_help.c:686 sql_help.c:690 sql_help.c:694 sql_help.c:1550 +#: sql_help.c:2142 sql_help.c:3024 msgid "right_type" msgstr "오른쪽인자_자료형" -#: sql_help.c:327 sql_help.c:329 sql_help.c:697 sql_help.c:700 sql_help.c:703 -#: sql_help.c:732 sql_help.c:744 sql_help.c:752 sql_help.c:755 sql_help.c:758 -#: sql_help.c:1433 sql_help.c:1435 sql_help.c:2017 sql_help.c:2038 -#: sql_help.c:2273 sql_help.c:2790 sql_help.c:2799 +#: sql_help.c:344 sql_help.c:346 sql_help.c:712 sql_help.c:715 sql_help.c:718 +#: sql_help.c:747 sql_help.c:759 sql_help.c:767 sql_help.c:770 sql_help.c:773 +#: sql_help.c:1552 sql_help.c:1554 sql_help.c:2161 sql_help.c:2182 +#: sql_help.c:2507 sql_help.c:3033 sql_help.c:3042 msgid "index_method" msgstr "색인방법" -#: sql_help.c:339 sql_help.c:1044 sql_help.c:1451 sql_help.c:1884 -#: sql_help.c:2249 sql_help.c:2395 sql_help.c:2913 sql_help.c:3135 -#: sql_help.c:3467 +#: sql_help.c:356 sql_help.c:1152 sql_help.c:1573 sql_help.c:2028 +#: sql_help.c:2465 sql_help.c:2636 sql_help.c:3180 sql_help.c:3404 +#: sql_help.c:3737 msgid "type_name" msgstr "자료형이름" -#: sql_help.c:340 sql_help.c:1452 sql_help.c:1883 sql_help.c:2396 -#: sql_help.c:2620 sql_help.c:2914 sql_help.c:3127 sql_help.c:3459 +#: sql_help.c:357 sql_help.c:1574 sql_help.c:2027 sql_help.c:2637 +#: sql_help.c:2863 sql_help.c:3181 sql_help.c:3396 sql_help.c:3729 msgid "lang_name" -msgstr "" +msgstr "언어_이름" -#: sql_help.c:343 +#: sql_help.c:360 msgid "and aggregate_signature is:" -msgstr "" +msgstr "집계함수_식별구문 사용법:" -#: sql_help.c:366 sql_help.c:1546 sql_help.c:1801 +#: sql_help.c:383 sql_help.c:1668 sql_help.c:1927 msgid "handler_function" msgstr "핸들러_함수" -#: sql_help.c:367 sql_help.c:1802 +#: sql_help.c:384 sql_help.c:1928 msgid "validator_function" msgstr "유효성검사_함수" -#: sql_help.c:416 sql_help.c:493 sql_help.c:625 sql_help.c:988 sql_help.c:1187 -#: sql_help.c:2264 sql_help.c:2265 sql_help.c:2281 sql_help.c:2282 +#: sql_help.c:433 sql_help.c:510 sql_help.c:641 sql_help.c:1085 +#: sql_help.c:1296 sql_help.c:2498 sql_help.c:2499 sql_help.c:2515 +#: sql_help.c:2516 msgid "action" msgstr "동작" -#: sql_help.c:418 sql_help.c:425 sql_help.c:429 sql_help.c:430 sql_help.c:433 -#: sql_help.c:435 sql_help.c:436 sql_help.c:437 sql_help.c:439 sql_help.c:442 -#: sql_help.c:444 sql_help.c:445 sql_help.c:629 sql_help.c:639 sql_help.c:641 -#: sql_help.c:644 sql_help.c:646 sql_help.c:880 sql_help.c:990 sql_help.c:1003 -#: sql_help.c:1007 sql_help.c:1008 sql_help.c:1012 sql_help.c:1014 -#: sql_help.c:1015 sql_help.c:1016 sql_help.c:1018 sql_help.c:1021 -#: sql_help.c:1023 sql_help.c:1286 sql_help.c:1289 sql_help.c:1309 -#: sql_help.c:1410 sql_help.c:1512 sql_help.c:1517 sql_help.c:1531 -#: sql_help.c:1532 sql_help.c:1533 sql_help.c:1833 sql_help.c:1881 -#: sql_help.c:1942 sql_help.c:1977 sql_help.c:2157 sql_help.c:2237 -#: sql_help.c:2250 sql_help.c:2269 sql_help.c:2271 sql_help.c:2278 -#: sql_help.c:2289 sql_help.c:2306 sql_help.c:2429 sql_help.c:2565 -#: sql_help.c:3106 sql_help.c:3107 sql_help.c:3186 sql_help.c:3201 -#: sql_help.c:3203 sql_help.c:3205 sql_help.c:3438 sql_help.c:3439 -#: sql_help.c:3537 sql_help.c:3678 sql_help.c:3917 sql_help.c:3959 -#: sql_help.c:3961 sql_help.c:3963 sql_help.c:3980 sql_help.c:3983 -#: sql_help.c:4111 +#: sql_help.c:435 sql_help.c:442 sql_help.c:446 sql_help.c:447 sql_help.c:450 +#: sql_help.c:452 sql_help.c:453 sql_help.c:454 sql_help.c:456 sql_help.c:459 +#: sql_help.c:461 sql_help.c:462 sql_help.c:645 sql_help.c:655 sql_help.c:657 +#: sql_help.c:660 sql_help.c:662 sql_help.c:923 sql_help.c:1087 +#: sql_help.c:1105 sql_help.c:1109 sql_help.c:1110 sql_help.c:1114 +#: sql_help.c:1116 sql_help.c:1117 sql_help.c:1118 sql_help.c:1120 +#: sql_help.c:1123 sql_help.c:1124 sql_help.c:1126 sql_help.c:1129 +#: sql_help.c:1131 sql_help.c:1402 sql_help.c:1405 sql_help.c:1425 +#: sql_help.c:1529 sql_help.c:1634 sql_help.c:1639 sql_help.c:1653 +#: sql_help.c:1654 sql_help.c:1655 sql_help.c:1968 sql_help.c:1981 +#: sql_help.c:2025 sql_help.c:2086 sql_help.c:2121 sql_help.c:2319 +#: sql_help.c:2347 sql_help.c:2348 sql_help.c:2449 sql_help.c:2457 +#: sql_help.c:2466 sql_help.c:2469 sql_help.c:2478 sql_help.c:2482 +#: sql_help.c:2503 sql_help.c:2505 sql_help.c:2512 sql_help.c:2530 +#: sql_help.c:2547 sql_help.c:2672 sql_help.c:2808 sql_help.c:3375 +#: sql_help.c:3376 sql_help.c:3456 sql_help.c:3471 sql_help.c:3473 +#: sql_help.c:3475 sql_help.c:3708 sql_help.c:3709 sql_help.c:3809 +#: sql_help.c:3952 sql_help.c:4191 sql_help.c:4233 sql_help.c:4235 +#: sql_help.c:4237 sql_help.c:4254 sql_help.c:4257 sql_help.c:4385 msgid "column_name" msgstr "칼럼이름" -#: sql_help.c:419 sql_help.c:630 sql_help.c:991 +#: sql_help.c:436 sql_help.c:646 sql_help.c:1088 msgid "new_column_name" msgstr "새칼럼이름" -#: sql_help.c:424 sql_help.c:514 sql_help.c:638 sql_help.c:1002 -#: sql_help.c:1200 +#: sql_help.c:441 sql_help.c:531 sql_help.c:654 sql_help.c:1104 +#: sql_help.c:1312 msgid "where action is one of:" msgstr "동작 사용법:" -#: sql_help.c:426 sql_help.c:431 sql_help.c:1004 sql_help.c:1009 -#: sql_help.c:1202 sql_help.c:1206 sql_help.c:1754 sql_help.c:1834 -#: sql_help.c:2016 sql_help.c:2238 sql_help.c:2474 sql_help.c:3288 +#: sql_help.c:443 sql_help.c:448 sql_help.c:915 sql_help.c:1106 +#: sql_help.c:1111 sql_help.c:1314 sql_help.c:1318 sql_help.c:1880 +#: sql_help.c:1969 sql_help.c:2160 sql_help.c:2312 sql_help.c:2450 +#: sql_help.c:2717 sql_help.c:3558 msgid "data_type" msgstr "자료형" -#: sql_help.c:427 sql_help.c:432 sql_help.c:1005 sql_help.c:1010 -#: sql_help.c:1203 sql_help.c:1207 sql_help.c:1755 sql_help.c:1837 -#: sql_help.c:1944 sql_help.c:2239 sql_help.c:2475 sql_help.c:2481 -#: sql_help.c:3196 +#: sql_help.c:444 sql_help.c:449 sql_help.c:1107 sql_help.c:1112 +#: sql_help.c:1315 sql_help.c:1319 sql_help.c:1881 sql_help.c:1972 +#: sql_help.c:2088 sql_help.c:2451 sql_help.c:2459 sql_help.c:2471 +#: sql_help.c:2484 sql_help.c:2718 sql_help.c:2724 sql_help.c:3466 msgid "collation" msgstr "collation" -#: sql_help.c:428 sql_help.c:1006 sql_help.c:1838 sql_help.c:2240 -#: sql_help.c:2251 +#: sql_help.c:445 sql_help.c:1108 sql_help.c:1973 sql_help.c:1982 +#: sql_help.c:2452 sql_help.c:2467 sql_help.c:2479 msgid "column_constraint" msgstr "칼럼_제약조건" -#: sql_help.c:438 sql_help.c:640 sql_help.c:1017 +#: sql_help.c:455 sql_help.c:656 sql_help.c:1125 msgid "integer" msgstr "정수" -#: sql_help.c:440 sql_help.c:443 sql_help.c:642 sql_help.c:645 sql_help.c:1019 -#: sql_help.c:1022 +#: sql_help.c:457 sql_help.c:460 sql_help.c:658 sql_help.c:661 sql_help.c:1127 +#: sql_help.c:1130 msgid "attribute_option" msgstr "속성_옵션" -#: sql_help.c:448 sql_help.c:1024 sql_help.c:1839 sql_help.c:2241 -#: sql_help.c:2252 +#: sql_help.c:465 sql_help.c:1132 sql_help.c:1974 sql_help.c:1983 +#: sql_help.c:2453 sql_help.c:2468 sql_help.c:2480 msgid "table_constraint" msgstr "테이블_제약조건" -#: sql_help.c:451 sql_help.c:452 sql_help.c:453 sql_help.c:454 sql_help.c:1029 -#: sql_help.c:1030 sql_help.c:1031 sql_help.c:1032 sql_help.c:1453 +#: sql_help.c:468 sql_help.c:469 sql_help.c:470 sql_help.c:471 sql_help.c:1137 +#: sql_help.c:1138 sql_help.c:1139 sql_help.c:1140 sql_help.c:1575 msgid "trigger_name" msgstr "트리거이름" -#: sql_help.c:455 sql_help.c:456 sql_help.c:1042 sql_help.c:1043 -#: sql_help.c:1840 sql_help.c:2244 +#: sql_help.c:472 sql_help.c:473 sql_help.c:1150 sql_help.c:1151 +#: sql_help.c:1975 sql_help.c:1980 sql_help.c:2456 sql_help.c:2477 msgid "parent_table" msgstr "상위_테이블" -#: sql_help.c:513 sql_help.c:563 sql_help.c:627 sql_help.c:1167 -#: sql_help.c:1786 +#: sql_help.c:530 sql_help.c:580 sql_help.c:643 sql_help.c:1275 +#: sql_help.c:1912 msgid "extension_name" msgstr "확장모듈이름" -#: sql_help.c:515 sql_help.c:1885 +#: sql_help.c:532 sql_help.c:2029 msgid "execution_cost" msgstr "실행비용" -#: sql_help.c:516 sql_help.c:1886 +#: sql_help.c:533 sql_help.c:2030 msgid "result_rows" -msgstr "" - -#: sql_help.c:537 sql_help.c:539 sql_help.c:813 sql_help.c:821 sql_help.c:825 -#: sql_help.c:828 sql_help.c:831 sql_help.c:1241 sql_help.c:1249 -#: sql_help.c:1252 sql_help.c:1254 sql_help.c:1256 sql_help.c:2131 -#: sql_help.c:2133 sql_help.c:2136 sql_help.c:2137 sql_help.c:3105 -#: sql_help.c:3109 sql_help.c:3112 sql_help.c:3114 sql_help.c:3116 -#: sql_help.c:3118 sql_help.c:3120 sql_help.c:3126 sql_help.c:3128 -#: sql_help.c:3130 sql_help.c:3132 sql_help.c:3134 sql_help.c:3136 +msgstr "반환자료수" + +#: sql_help.c:554 sql_help.c:556 sql_help.c:853 sql_help.c:861 sql_help.c:865 +#: sql_help.c:868 sql_help.c:871 sql_help.c:1353 sql_help.c:1361 +#: sql_help.c:1365 sql_help.c:1368 sql_help.c:1371 sql_help.c:2290 +#: sql_help.c:2292 sql_help.c:2295 sql_help.c:2296 sql_help.c:3374 +#: sql_help.c:3378 sql_help.c:3381 sql_help.c:3383 sql_help.c:3385 +#: sql_help.c:3387 sql_help.c:3389 sql_help.c:3395 sql_help.c:3397 +#: sql_help.c:3399 sql_help.c:3401 sql_help.c:3403 sql_help.c:3405 msgid "role_specification" msgstr "롤_명세" -#: sql_help.c:538 sql_help.c:540 sql_help.c:1268 sql_help.c:1729 -#: sql_help.c:2139 sql_help.c:2550 sql_help.c:2947 sql_help.c:3763 +#: sql_help.c:555 sql_help.c:557 sql_help.c:1384 sql_help.c:1855 +#: sql_help.c:2298 sql_help.c:2793 sql_help.c:3214 sql_help.c:4037 msgid "user_name" msgstr "사용자이름" -#: sql_help.c:541 sql_help.c:833 sql_help.c:1257 sql_help.c:2138 -#: sql_help.c:3137 +#: sql_help.c:558 sql_help.c:873 sql_help.c:1373 sql_help.c:2297 +#: sql_help.c:3406 msgid "where role_specification can be:" msgstr "롤_명세 사용법:" -#: sql_help.c:543 +#: sql_help.c:560 msgid "group_name" msgstr "그룹이름" -#: sql_help.c:561 sql_help.c:1734 sql_help.c:1948 sql_help.c:1980 -#: sql_help.c:2247 sql_help.c:2255 sql_help.c:2287 sql_help.c:2309 -#: sql_help.c:2321 sql_help.c:3133 sql_help.c:3465 +#: sql_help.c:578 sql_help.c:1860 sql_help.c:2092 sql_help.c:2124 +#: sql_help.c:2463 sql_help.c:2475 sql_help.c:2488 sql_help.c:2528 +#: sql_help.c:2550 sql_help.c:2562 sql_help.c:3402 sql_help.c:3735 msgid "tablespace_name" msgstr "테이블스페이스이름" -#: sql_help.c:565 sql_help.c:568 sql_help.c:648 sql_help.c:650 sql_help.c:1039 -#: sql_help.c:1041 sql_help.c:1946 sql_help.c:1978 sql_help.c:2245 -#: sql_help.c:2253 sql_help.c:2285 sql_help.c:2307 +#: sql_help.c:582 sql_help.c:585 sql_help.c:664 sql_help.c:666 sql_help.c:1147 +#: sql_help.c:1149 sql_help.c:2090 sql_help.c:2122 sql_help.c:2461 +#: sql_help.c:2473 sql_help.c:2486 sql_help.c:2526 sql_help.c:2548 msgid "storage_parameter" -msgstr "" +msgstr "스토리지_매개변수" -#: sql_help.c:591 sql_help.c:1427 sql_help.c:3548 +#: sql_help.c:608 sql_help.c:1546 sql_help.c:3820 msgid "large_object_oid" msgstr "대형_객체_oid" -#: sql_help.c:647 sql_help.c:1037 sql_help.c:1046 sql_help.c:1049 -#: sql_help.c:1349 +#: sql_help.c:663 sql_help.c:1145 sql_help.c:1154 sql_help.c:1157 +#: sql_help.c:1465 msgid "index_name" msgstr "인덱스이름" -#: sql_help.c:680 sql_help.c:2001 +#: sql_help.c:695 sql_help.c:2145 msgid "res_proc" msgstr "" -#: sql_help.c:681 sql_help.c:2002 +#: sql_help.c:696 sql_help.c:2146 msgid "join_proc" msgstr "" -#: sql_help.c:733 sql_help.c:745 sql_help.c:2019 +#: sql_help.c:748 sql_help.c:760 sql_help.c:2163 msgid "strategy_number" -msgstr "" +msgstr "전략_번호" -#: sql_help.c:735 sql_help.c:736 sql_help.c:739 sql_help.c:740 sql_help.c:746 -#: sql_help.c:747 sql_help.c:749 sql_help.c:750 sql_help.c:2021 -#: sql_help.c:2022 sql_help.c:2025 sql_help.c:2026 +#: sql_help.c:750 sql_help.c:751 sql_help.c:754 sql_help.c:755 sql_help.c:761 +#: sql_help.c:762 sql_help.c:764 sql_help.c:765 sql_help.c:2165 +#: sql_help.c:2166 sql_help.c:2169 sql_help.c:2170 msgid "op_type" msgstr "연산자자료형" -#: sql_help.c:737 sql_help.c:2023 +#: sql_help.c:752 sql_help.c:2167 msgid "sort_family_name" msgstr "" -#: sql_help.c:738 sql_help.c:748 sql_help.c:2024 +#: sql_help.c:753 sql_help.c:763 sql_help.c:2168 msgid "support_number" msgstr "" -#: sql_help.c:742 sql_help.c:1677 sql_help.c:2028 sql_help.c:2398 -#: sql_help.c:2400 +#: sql_help.c:757 sql_help.c:1799 sql_help.c:2172 sql_help.c:2639 +#: sql_help.c:2641 msgid "argument_type" msgstr "인자자료형" -#: sql_help.c:773 sql_help.c:776 sql_help.c:843 sql_help.c:879 sql_help.c:1163 -#: sql_help.c:1166 sql_help.c:1308 sql_help.c:1348 sql_help.c:1412 -#: sql_help.c:1437 sql_help.c:1441 sql_help.c:1454 sql_help.c:1511 -#: sql_help.c:1516 sql_help.c:1832 sql_help.c:1940 sql_help.c:1976 -#: sql_help.c:2051 sql_help.c:2108 sql_help.c:2156 sql_help.c:2236 -#: sql_help.c:2248 sql_help.c:2305 sql_help.c:2423 sql_help.c:2599 -#: sql_help.c:2816 sql_help.c:2833 sql_help.c:2923 sql_help.c:3103 -#: sql_help.c:3108 sql_help.c:3153 sql_help.c:3184 sql_help.c:3435 -#: sql_help.c:3440 sql_help.c:3536 sql_help.c:3633 sql_help.c:3635 -#: sql_help.c:3684 sql_help.c:3723 sql_help.c:3872 sql_help.c:3874 -#: sql_help.c:3923 sql_help.c:3957 sql_help.c:3979 sql_help.c:3981 -#: sql_help.c:3982 sql_help.c:4066 sql_help.c:4068 sql_help.c:4117 +#: sql_help.c:788 sql_help.c:791 sql_help.c:808 sql_help.c:810 sql_help.c:812 +#: sql_help.c:883 sql_help.c:922 sql_help.c:1271 sql_help.c:1274 +#: sql_help.c:1424 sql_help.c:1464 sql_help.c:1531 sql_help.c:1556 +#: sql_help.c:1561 sql_help.c:1576 sql_help.c:1633 sql_help.c:1638 +#: sql_help.c:1967 sql_help.c:1979 sql_help.c:2084 sql_help.c:2120 +#: sql_help.c:2196 sql_help.c:2211 sql_help.c:2267 sql_help.c:2318 +#: sql_help.c:2349 sql_help.c:2448 sql_help.c:2464 sql_help.c:2476 +#: sql_help.c:2546 sql_help.c:2665 sql_help.c:2842 sql_help.c:3059 +#: sql_help.c:3084 sql_help.c:3190 sql_help.c:3372 sql_help.c:3377 +#: sql_help.c:3422 sql_help.c:3454 sql_help.c:3705 sql_help.c:3710 +#: sql_help.c:3808 sql_help.c:3907 sql_help.c:3909 sql_help.c:3958 +#: sql_help.c:3997 sql_help.c:4146 sql_help.c:4148 sql_help.c:4197 +#: sql_help.c:4231 sql_help.c:4253 sql_help.c:4255 sql_help.c:4256 +#: sql_help.c:4340 sql_help.c:4342 sql_help.c:4391 msgid "table_name" msgstr "테이블이름" -#: sql_help.c:778 sql_help.c:2053 +#: sql_help.c:793 sql_help.c:2198 msgid "using_expression" msgstr "" -#: sql_help.c:779 sql_help.c:2054 +#: sql_help.c:794 sql_help.c:2199 msgid "check_expression" -msgstr "" +msgstr "체크_표현식" + +#: sql_help.c:814 sql_help.c:2212 +msgid "publication_parameter" +msgstr "발행_매개변수" -#: sql_help.c:817 sql_help.c:1245 sql_help.c:1920 sql_help.c:2085 -#: sql_help.c:2533 +#: sql_help.c:857 sql_help.c:1357 sql_help.c:2064 sql_help.c:2244 +#: sql_help.c:2776 msgid "password" msgstr "암호" -#: sql_help.c:818 sql_help.c:1246 sql_help.c:1921 sql_help.c:2086 -#: sql_help.c:2534 +#: sql_help.c:858 sql_help.c:1358 sql_help.c:2065 sql_help.c:2245 +#: sql_help.c:2777 msgid "timestamp" msgstr "" -#: sql_help.c:822 sql_help.c:826 sql_help.c:829 sql_help.c:832 sql_help.c:3113 -#: sql_help.c:3445 +#: sql_help.c:862 sql_help.c:866 sql_help.c:869 sql_help.c:872 sql_help.c:1362 +#: sql_help.c:1366 sql_help.c:1369 sql_help.c:1372 sql_help.c:3382 +#: sql_help.c:3715 msgid "database_name" msgstr "데이터베이스이름" -#: sql_help.c:873 sql_help.c:2151 +#: sql_help.c:916 sql_help.c:2313 msgid "increment" msgstr "" -#: sql_help.c:874 sql_help.c:2152 +#: sql_help.c:917 sql_help.c:2314 msgid "minvalue" msgstr "최소값" -#: sql_help.c:875 sql_help.c:2153 +#: sql_help.c:918 sql_help.c:2315 msgid "maxvalue" msgstr "최대값" -#: sql_help.c:876 sql_help.c:2154 sql_help.c:3631 sql_help.c:3721 -#: sql_help.c:3870 sql_help.c:3999 sql_help.c:4064 +#: sql_help.c:919 sql_help.c:2316 sql_help.c:3905 sql_help.c:3995 +#: sql_help.c:4144 sql_help.c:4273 sql_help.c:4338 msgid "start" msgstr "시작" -#: sql_help.c:877 +#: sql_help.c:920 sql_help.c:1122 msgid "restart" msgstr "재시작" -#: sql_help.c:878 sql_help.c:2155 +#: sql_help.c:921 sql_help.c:2317 msgid "cache" msgstr "캐쉬" -#: sql_help.c:1025 +#: sql_help.c:978 sql_help.c:2361 +msgid "conninfo" +msgstr "접속정보" + +#: sql_help.c:980 sql_help.c:2362 +msgid "publication_name" +msgstr "발행_이름" + +#: sql_help.c:981 +msgid "set_publication_option" +msgstr "발행_옵션_설정" + +#: sql_help.c:984 +msgid "refresh_option" +msgstr "새로고침_옵션" + +#: sql_help.c:989 sql_help.c:2363 +msgid "subscription_parameter" +msgstr "구독_매개변수" + +#: sql_help.c:1100 sql_help.c:1103 +msgid "partition_name" +msgstr "파티션_이름" + +#: sql_help.c:1101 sql_help.c:1984 sql_help.c:2481 +msgid "partition_bound_spec" +msgstr "파티션_범위_정의" + +#: sql_help.c:1119 sql_help.c:2493 +msgid "sequence_options" +msgstr "시퀀스_옵션" + +#: sql_help.c:1121 +msgid "sequence_option" +msgstr "시퀀스_옵션" + +#: sql_help.c:1133 msgid "table_constraint_using_index" msgstr "" -#: sql_help.c:1033 sql_help.c:1034 sql_help.c:1035 sql_help.c:1036 +#: sql_help.c:1141 sql_help.c:1142 sql_help.c:1143 sql_help.c:1144 msgid "rewrite_rule_name" msgstr "" -#: sql_help.c:1047 +#: sql_help.c:1155 msgid "and table_constraint_using_index is:" msgstr "" -#: sql_help.c:1065 sql_help.c:1068 sql_help.c:2324 +#: sql_help.c:1173 sql_help.c:1176 sql_help.c:2565 msgid "tablespace_option" msgstr "테이블스페이스_옵션" -#: sql_help.c:1089 sql_help.c:1092 sql_help.c:1098 sql_help.c:1102 +#: sql_help.c:1197 sql_help.c:1200 sql_help.c:1206 sql_help.c:1210 msgid "token_type" msgstr "토큰_종류" -#: sql_help.c:1090 sql_help.c:1093 +#: sql_help.c:1198 sql_help.c:1201 msgid "dictionary_name" msgstr "사전이름" -#: sql_help.c:1095 sql_help.c:1099 +#: sql_help.c:1203 sql_help.c:1207 msgid "old_dictionary" msgstr "옛사전" -#: sql_help.c:1096 sql_help.c:1100 +#: sql_help.c:1204 sql_help.c:1208 msgid "new_dictionary" msgstr "새사전" -#: sql_help.c:1191 sql_help.c:1201 sql_help.c:1204 sql_help.c:1205 -#: sql_help.c:2473 +#: sql_help.c:1300 sql_help.c:1313 sql_help.c:1316 sql_help.c:1317 +#: sql_help.c:2716 msgid "attribute_name" msgstr "속성이름" -#: sql_help.c:1192 +#: sql_help.c:1301 msgid "new_attribute_name" msgstr "새속성이름" -#: sql_help.c:1198 +#: sql_help.c:1307 sql_help.c:1311 msgid "new_enum_value" msgstr "" -#: sql_help.c:1199 +#: sql_help.c:1308 +msgid "neighbor_enum_value" +msgstr "" + +#: sql_help.c:1310 msgid "existing_enum_value" msgstr "" -#: sql_help.c:1269 sql_help.c:1841 sql_help.c:2167 sql_help.c:2551 -#: sql_help.c:2948 sql_help.c:3119 sql_help.c:3154 sql_help.c:3451 +#: sql_help.c:1385 sql_help.c:1976 sql_help.c:1985 sql_help.c:2329 +#: sql_help.c:2794 sql_help.c:3215 sql_help.c:3388 sql_help.c:3423 +#: sql_help.c:3721 msgid "server_name" msgstr "서버이름" -#: sql_help.c:1297 sql_help.c:1300 sql_help.c:2566 +#: sql_help.c:1413 sql_help.c:1416 sql_help.c:2809 msgid "view_option_name" msgstr "뷰_옵션이름" -#: sql_help.c:1298 sql_help.c:2567 +#: sql_help.c:1414 sql_help.c:2810 msgid "view_option_value" msgstr "" -#: sql_help.c:1323 sql_help.c:3779 sql_help.c:3781 sql_help.c:3805 +#: sql_help.c:1439 sql_help.c:4053 sql_help.c:4055 sql_help.c:4079 msgid "transaction_mode" msgstr "트랜잭션모드" -#: sql_help.c:1324 sql_help.c:3782 sql_help.c:3806 +#: sql_help.c:1440 sql_help.c:4056 sql_help.c:4080 msgid "where transaction_mode is one of:" msgstr "트랜잭션모드 사용법:" -#: sql_help.c:1409 +#: sql_help.c:1528 msgid "relation_name" msgstr "릴레이션이름" -#: sql_help.c:1414 sql_help.c:3115 sql_help.c:3447 +#: sql_help.c:1533 sql_help.c:3384 sql_help.c:3717 msgid "domain_name" msgstr "도메인이름" -#: sql_help.c:1436 +#: sql_help.c:1555 msgid "policy_name" msgstr "정책이름" -#: sql_help.c:1440 +#: sql_help.c:1560 msgid "rule_name" msgstr "룰이름" -#: sql_help.c:1457 +#: sql_help.c:1579 msgid "text" msgstr "" -#: sql_help.c:1482 sql_help.c:3297 sql_help.c:3485 +#: sql_help.c:1604 sql_help.c:3567 sql_help.c:3755 msgid "transaction_id" -msgstr "" +msgstr "트랜잭션_id" -#: sql_help.c:1513 sql_help.c:1519 sql_help.c:3223 +#: sql_help.c:1635 sql_help.c:1641 sql_help.c:3493 msgid "filename" msgstr "파일이름" -#: sql_help.c:1514 sql_help.c:1520 sql_help.c:2110 sql_help.c:2111 -#: sql_help.c:2112 +#: sql_help.c:1636 sql_help.c:1642 sql_help.c:2269 sql_help.c:2270 +#: sql_help.c:2271 msgid "command" msgstr "명령어" -#: sql_help.c:1518 sql_help.c:1981 sql_help.c:2310 sql_help.c:2568 -#: sql_help.c:2586 sql_help.c:3188 +#: sql_help.c:1640 sql_help.c:2125 sql_help.c:2551 sql_help.c:2811 +#: sql_help.c:2829 sql_help.c:3458 msgid "query" msgstr "쿼리문" -#: sql_help.c:1522 sql_help.c:2993 +#: sql_help.c:1644 sql_help.c:3261 msgid "where option can be one of:" msgstr "옵션 사용법:" -#: sql_help.c:1523 +#: sql_help.c:1645 msgid "format_name" msgstr "입출력양식이름" -#: sql_help.c:1524 sql_help.c:1525 sql_help.c:1528 sql_help.c:2994 -#: sql_help.c:2995 sql_help.c:2996 sql_help.c:2997 sql_help.c:2998 +#: sql_help.c:1646 sql_help.c:1647 sql_help.c:1650 sql_help.c:3262 +#: sql_help.c:3263 sql_help.c:3264 sql_help.c:3265 sql_help.c:3266 +#: sql_help.c:3267 msgid "boolean" msgstr "" -#: sql_help.c:1526 +#: sql_help.c:1648 msgid "delimiter_character" msgstr "구분문자" -#: sql_help.c:1527 +#: sql_help.c:1649 msgid "null_string" msgstr "널문자열" -#: sql_help.c:1529 +#: sql_help.c:1651 msgid "quote_character" msgstr "인용부호" -#: sql_help.c:1530 +#: sql_help.c:1652 msgid "escape_character" msgstr "이스케이프 문자" -#: sql_help.c:1534 +#: sql_help.c:1656 msgid "encoding_name" msgstr "인코딩이름" -#: sql_help.c:1545 +#: sql_help.c:1667 msgid "access_method_type" msgstr "" -#: sql_help.c:1611 sql_help.c:1630 sql_help.c:1633 +#: sql_help.c:1733 sql_help.c:1752 sql_help.c:1755 msgid "arg_data_type" msgstr "인자자료형" -#: sql_help.c:1612 sql_help.c:1634 sql_help.c:1642 +#: sql_help.c:1734 sql_help.c:1756 sql_help.c:1764 msgid "sfunc" msgstr "" -#: sql_help.c:1613 sql_help.c:1635 sql_help.c:1643 +#: sql_help.c:1735 sql_help.c:1757 sql_help.c:1765 msgid "state_data_type" msgstr "" -#: sql_help.c:1614 sql_help.c:1636 sql_help.c:1644 +#: sql_help.c:1736 sql_help.c:1758 sql_help.c:1766 msgid "state_data_size" msgstr "" -#: sql_help.c:1615 sql_help.c:1637 sql_help.c:1645 +#: sql_help.c:1737 sql_help.c:1759 sql_help.c:1767 msgid "ffunc" msgstr "" -#: sql_help.c:1616 sql_help.c:1646 +#: sql_help.c:1738 sql_help.c:1768 msgid "combinefunc" msgstr "" -#: sql_help.c:1617 sql_help.c:1647 +#: sql_help.c:1739 sql_help.c:1769 msgid "serialfunc" msgstr "" -#: sql_help.c:1618 sql_help.c:1648 +#: sql_help.c:1740 sql_help.c:1770 msgid "deserialfunc" msgstr "" -#: sql_help.c:1619 sql_help.c:1638 sql_help.c:1649 +#: sql_help.c:1741 sql_help.c:1760 sql_help.c:1771 msgid "initial_condition" msgstr "" -#: sql_help.c:1620 sql_help.c:1650 +#: sql_help.c:1742 sql_help.c:1772 msgid "msfunc" msgstr "" -#: sql_help.c:1621 sql_help.c:1651 +#: sql_help.c:1743 sql_help.c:1773 msgid "minvfunc" msgstr "" -#: sql_help.c:1622 sql_help.c:1652 +#: sql_help.c:1744 sql_help.c:1774 msgid "mstate_data_type" msgstr "" -#: sql_help.c:1623 sql_help.c:1653 +#: sql_help.c:1745 sql_help.c:1775 msgid "mstate_data_size" msgstr "" -#: sql_help.c:1624 sql_help.c:1654 +#: sql_help.c:1746 sql_help.c:1776 msgid "mffunc" msgstr "" -#: sql_help.c:1625 sql_help.c:1655 +#: sql_help.c:1747 sql_help.c:1777 msgid "minitial_condition" msgstr "" -#: sql_help.c:1626 sql_help.c:1656 +#: sql_help.c:1748 sql_help.c:1778 msgid "sort_operator" msgstr "정렬연산자" -#: sql_help.c:1639 +#: sql_help.c:1761 msgid "or the old syntax" -msgstr "" +msgstr "또는 옛날 구문" -#: sql_help.c:1641 +#: sql_help.c:1763 msgid "base_type" msgstr "기본자료형" -#: sql_help.c:1695 +#: sql_help.c:1819 msgid "locale" msgstr "로케일" -#: sql_help.c:1696 sql_help.c:1732 +#: sql_help.c:1820 sql_help.c:1858 msgid "lc_collate" -msgstr "" +msgstr "lc_collate" -#: sql_help.c:1697 sql_help.c:1733 +#: sql_help.c:1821 sql_help.c:1859 msgid "lc_ctype" msgstr "lc_ctype" -#: sql_help.c:1699 +#: sql_help.c:1822 sql_help.c:3806 +msgid "provider" +msgstr "제공자" + +#: sql_help.c:1823 sql_help.c:1914 +msgid "version" +msgstr "버전" + +#: sql_help.c:1825 msgid "existing_collation" msgstr "" -#: sql_help.c:1709 +#: sql_help.c:1835 msgid "source_encoding" msgstr "원래인코딩" -#: sql_help.c:1710 +#: sql_help.c:1836 msgid "dest_encoding" msgstr "대상인코딩" -#: sql_help.c:1730 sql_help.c:2350 +#: sql_help.c:1856 sql_help.c:2591 msgid "template" msgstr "템플릿" -#: sql_help.c:1731 +#: sql_help.c:1857 msgid "encoding" msgstr "인코딩" -#: sql_help.c:1757 +#: sql_help.c:1883 msgid "constraint" msgstr "제약조건" -#: sql_help.c:1758 +#: sql_help.c:1884 msgid "where constraint is:" msgstr "제약조건 사용법:" -#: sql_help.c:1772 sql_help.c:2107 sql_help.c:2422 +#: sql_help.c:1898 sql_help.c:2266 sql_help.c:2664 msgid "event" msgstr "이벤트" -#: sql_help.c:1773 +#: sql_help.c:1899 msgid "filter_variable" msgstr "" -#: sql_help.c:1788 -msgid "version" -msgstr "버전" - -#: sql_help.c:1789 +#: sql_help.c:1915 msgid "old_version" msgstr "옛버전" -#: sql_help.c:1844 sql_help.c:2256 +#: sql_help.c:1988 sql_help.c:2489 msgid "where column_constraint is:" msgstr "칼럼_제약조건 사용법:" -#: sql_help.c:1847 sql_help.c:1879 sql_help.c:2259 +#: sql_help.c:1991 sql_help.c:2023 sql_help.c:2492 msgid "default_expr" msgstr "초기값_표현식" -#: sql_help.c:1848 sql_help.c:2266 +#: sql_help.c:1992 sql_help.c:2500 msgid "and table_constraint is:" msgstr "테이블_제약조건 사용법:" -#: sql_help.c:1880 +#: sql_help.c:2024 msgid "rettype" msgstr "" -#: sql_help.c:1882 +#: sql_help.c:2026 msgid "column_type" msgstr "" -#: sql_help.c:1890 +#: sql_help.c:2034 msgid "definition" msgstr "함수정의" -#: sql_help.c:1891 +#: sql_help.c:2035 msgid "obj_file" msgstr "오브젝트파일" -#: sql_help.c:1892 +#: sql_help.c:2036 msgid "link_symbol" msgstr "연결할_함수명" -#: sql_help.c:1893 +#: sql_help.c:2037 msgid "attribute" msgstr "속성" -#: sql_help.c:1927 sql_help.c:2092 sql_help.c:2540 +#: sql_help.c:2071 sql_help.c:2251 sql_help.c:2783 msgid "uid" msgstr "" -#: sql_help.c:1941 +#: sql_help.c:2085 msgid "method" msgstr "색인방법" -#: sql_help.c:1945 sql_help.c:2291 sql_help.c:3197 +#: sql_help.c:2089 sql_help.c:2460 sql_help.c:2472 sql_help.c:2485 +#: sql_help.c:2532 sql_help.c:3467 msgid "opclass" -msgstr "" +msgstr "연산자클래스" -#: sql_help.c:1949 sql_help.c:2277 +#: sql_help.c:2093 sql_help.c:2511 msgid "predicate" msgstr "범위한정구문" -#: sql_help.c:1961 +#: sql_help.c:2105 msgid "call_handler" msgstr "" -#: sql_help.c:1962 +#: sql_help.c:2106 msgid "inline_handler" msgstr "" -#: sql_help.c:1963 +#: sql_help.c:2107 msgid "valfunction" msgstr "구문검사함수" -#: sql_help.c:1999 +#: sql_help.c:2143 msgid "com_op" msgstr "" -#: sql_help.c:2000 +#: sql_help.c:2144 msgid "neg_op" msgstr "" -#: sql_help.c:2018 +#: sql_help.c:2162 msgid "family_name" msgstr "" -#: sql_help.c:2029 +#: sql_help.c:2173 msgid "storage_type" msgstr "스토리지_유형" -#: sql_help.c:2109 sql_help.c:2425 sql_help.c:2602 sql_help.c:3207 -#: sql_help.c:3622 sql_help.c:3624 sql_help.c:3712 sql_help.c:3714 -#: sql_help.c:3861 sql_help.c:3863 sql_help.c:3966 sql_help.c:4055 -#: sql_help.c:4057 +#: sql_help.c:2268 sql_help.c:2668 sql_help.c:2845 sql_help.c:3477 +#: sql_help.c:3896 sql_help.c:3898 sql_help.c:3986 sql_help.c:3988 +#: sql_help.c:4135 sql_help.c:4137 sql_help.c:4240 sql_help.c:4329 +#: sql_help.c:4331 msgid "condition" msgstr "조건" -#: sql_help.c:2113 sql_help.c:2428 +#: sql_help.c:2272 sql_help.c:2671 msgid "where event can be one of:" msgstr "이벤트 사용법:" -#: sql_help.c:2132 sql_help.c:2134 +#: sql_help.c:2291 sql_help.c:2293 msgid "schema_element" msgstr "" -#: sql_help.c:2168 +#: sql_help.c:2330 msgid "server_type" msgstr "서버_종류" -#: sql_help.c:2169 +#: sql_help.c:2331 msgid "server_version" msgstr "서버_버전" -#: sql_help.c:2170 sql_help.c:3117 sql_help.c:3449 +#: sql_help.c:2332 sql_help.c:3386 sql_help.c:3719 msgid "fdw_name" -msgstr "" +msgstr "fdw_이름" + +#: sql_help.c:2345 +msgid "statistics_name" +msgstr "통계정보_이름" -#: sql_help.c:2242 +#: sql_help.c:2346 +msgid "statistics_kind" +msgstr "통계정보_종류" + +#: sql_help.c:2360 +msgid "subscription_name" +msgstr "구독_이름" + +#: sql_help.c:2454 msgid "source_table" msgstr "원본테이블" -#: sql_help.c:2243 +#: sql_help.c:2455 msgid "like_option" msgstr "LIKE구문옵션" -#: sql_help.c:2260 sql_help.c:2261 sql_help.c:2270 sql_help.c:2272 -#: sql_help.c:2276 +#: sql_help.c:2494 sql_help.c:2495 sql_help.c:2504 sql_help.c:2506 +#: sql_help.c:2510 msgid "index_parameters" msgstr "색인매개변수" -#: sql_help.c:2262 sql_help.c:2279 +#: sql_help.c:2496 sql_help.c:2513 msgid "reftable" msgstr "참조테이블" -#: sql_help.c:2263 sql_help.c:2280 +#: sql_help.c:2497 sql_help.c:2514 msgid "refcolumn" msgstr "참조칼럼" -#: sql_help.c:2274 +#: sql_help.c:2508 msgid "exclude_element" msgstr "" -#: sql_help.c:2275 sql_help.c:3629 sql_help.c:3719 sql_help.c:3868 -#: sql_help.c:3997 sql_help.c:4062 +#: sql_help.c:2509 sql_help.c:3903 sql_help.c:3993 sql_help.c:4142 +#: sql_help.c:4271 sql_help.c:4336 msgid "operator" msgstr "연산자" -#: sql_help.c:2283 +#: sql_help.c:2517 msgid "and like_option is:" msgstr "" -#: sql_help.c:2284 +#: sql_help.c:2518 +msgid "and partition_bound_spec is:" +msgstr "" + +#: sql_help.c:2519 sql_help.c:2521 sql_help.c:2523 +msgid "numeric_literal" +msgstr "" + +#: sql_help.c:2520 sql_help.c:2522 sql_help.c:2524 +msgid "string_literal" +msgstr "" + +#: sql_help.c:2525 msgid "index_parameters in UNIQUE, PRIMARY KEY, and EXCLUDE constraints are:" msgstr "" -#: sql_help.c:2288 +#: sql_help.c:2529 msgid "exclude_element in an EXCLUDE constraint is:" msgstr "" -#: sql_help.c:2323 +#: sql_help.c:2564 msgid "directory" msgstr "디렉터리" -#: sql_help.c:2337 +#: sql_help.c:2578 msgid "parser_name" msgstr "구문분석기_이름" -#: sql_help.c:2338 +#: sql_help.c:2579 msgid "source_config" msgstr "원본_설정" -#: sql_help.c:2367 +#: sql_help.c:2608 msgid "start_function" msgstr "시작_함수" -#: sql_help.c:2368 +#: sql_help.c:2609 msgid "gettoken_function" msgstr "gettoken함수" -#: sql_help.c:2369 +#: sql_help.c:2610 msgid "end_function" msgstr "종료_함수" -#: sql_help.c:2370 +#: sql_help.c:2611 msgid "lextypes_function" msgstr "lextypes함수" -#: sql_help.c:2371 +#: sql_help.c:2612 msgid "headline_function" msgstr "headline함수" -#: sql_help.c:2383 +#: sql_help.c:2624 msgid "init_function" msgstr "init함수" -#: sql_help.c:2384 +#: sql_help.c:2625 msgid "lexize_function" msgstr "lexize함수" -#: sql_help.c:2397 +#: sql_help.c:2638 msgid "from_sql_function_name" msgstr "" -#: sql_help.c:2399 +#: sql_help.c:2640 msgid "to_sql_function_name" msgstr "" -#: sql_help.c:2424 +#: sql_help.c:2666 msgid "referenced_table_name" msgstr "" -#: sql_help.c:2427 +#: sql_help.c:2667 +msgid "transition_relation_name" +msgstr "전달_릴레이션이름" + +#: sql_help.c:2670 msgid "arguments" msgstr "인자들" -#: sql_help.c:2477 sql_help.c:3557 +#: sql_help.c:2720 sql_help.c:3831 msgid "label" msgstr "" -#: sql_help.c:2479 +#: sql_help.c:2722 msgid "subtype" msgstr "" -#: sql_help.c:2480 +#: sql_help.c:2723 msgid "subtype_operator_class" msgstr "" -#: sql_help.c:2482 +#: sql_help.c:2725 msgid "canonical_function" msgstr "" -#: sql_help.c:2483 +#: sql_help.c:2726 msgid "subtype_diff_function" msgstr "" -#: sql_help.c:2485 +#: sql_help.c:2728 msgid "input_function" msgstr "입력함수" -#: sql_help.c:2486 +#: sql_help.c:2729 msgid "output_function" msgstr "출력함수" -#: sql_help.c:2487 +#: sql_help.c:2730 msgid "receive_function" msgstr "받는함수" -#: sql_help.c:2488 +#: sql_help.c:2731 msgid "send_function" msgstr "주는함수" -#: sql_help.c:2489 +#: sql_help.c:2732 msgid "type_modifier_input_function" msgstr "" -#: sql_help.c:2490 +#: sql_help.c:2733 msgid "type_modifier_output_function" msgstr "" -#: sql_help.c:2491 +#: sql_help.c:2734 msgid "analyze_function" msgstr "분석함수" -#: sql_help.c:2492 +#: sql_help.c:2735 msgid "internallength" msgstr "" -#: sql_help.c:2493 +#: sql_help.c:2736 msgid "alignment" msgstr "정렬" -#: sql_help.c:2494 +#: sql_help.c:2737 msgid "storage" msgstr "스토리지" -#: sql_help.c:2495 +#: sql_help.c:2738 msgid "like_type" msgstr "" -#: sql_help.c:2496 +#: sql_help.c:2739 msgid "category" msgstr "" -#: sql_help.c:2497 +#: sql_help.c:2740 msgid "preferred" msgstr "" -#: sql_help.c:2498 +#: sql_help.c:2741 msgid "default" msgstr "기본값" -#: sql_help.c:2499 +#: sql_help.c:2742 msgid "element" msgstr "요소" -#: sql_help.c:2500 +#: sql_help.c:2743 msgid "delimiter" msgstr "구분자" -#: sql_help.c:2501 +#: sql_help.c:2744 msgid "collatable" msgstr "" -#: sql_help.c:2598 sql_help.c:3183 sql_help.c:3617 sql_help.c:3706 -#: sql_help.c:3856 sql_help.c:3956 sql_help.c:4050 +#: sql_help.c:2841 sql_help.c:3453 sql_help.c:3891 sql_help.c:3980 +#: sql_help.c:4130 sql_help.c:4230 sql_help.c:4324 msgid "with_query" msgstr "" -#: sql_help.c:2600 sql_help.c:3185 sql_help.c:3636 sql_help.c:3642 -#: sql_help.c:3645 sql_help.c:3649 sql_help.c:3653 sql_help.c:3661 -#: sql_help.c:3875 sql_help.c:3881 sql_help.c:3884 sql_help.c:3888 -#: sql_help.c:3892 sql_help.c:3900 sql_help.c:3958 sql_help.c:4069 -#: sql_help.c:4075 sql_help.c:4078 sql_help.c:4082 sql_help.c:4086 -#: sql_help.c:4094 +#: sql_help.c:2843 sql_help.c:3455 sql_help.c:3910 sql_help.c:3916 +#: sql_help.c:3919 sql_help.c:3923 sql_help.c:3927 sql_help.c:3935 +#: sql_help.c:4149 sql_help.c:4155 sql_help.c:4158 sql_help.c:4162 +#: sql_help.c:4166 sql_help.c:4174 sql_help.c:4232 sql_help.c:4343 +#: sql_help.c:4349 sql_help.c:4352 sql_help.c:4356 sql_help.c:4360 +#: sql_help.c:4368 msgid "alias" -msgstr "" +msgstr "별칭" -#: sql_help.c:2601 +#: sql_help.c:2844 msgid "using_list" msgstr "" -#: sql_help.c:2603 sql_help.c:3024 sql_help.c:3264 sql_help.c:3967 +#: sql_help.c:2846 sql_help.c:3293 sql_help.c:3534 sql_help.c:4241 msgid "cursor_name" msgstr "커서이름" -#: sql_help.c:2604 sql_help.c:3191 sql_help.c:3968 +#: sql_help.c:2847 sql_help.c:3461 sql_help.c:4242 msgid "output_expression" msgstr "출력표현식" -#: sql_help.c:2605 sql_help.c:3192 sql_help.c:3620 sql_help.c:3709 -#: sql_help.c:3859 sql_help.c:3969 sql_help.c:4053 +#: sql_help.c:2848 sql_help.c:3462 sql_help.c:3894 sql_help.c:3983 +#: sql_help.c:4133 sql_help.c:4243 sql_help.c:4327 msgid "output_name" msgstr "" -#: sql_help.c:2621 +#: sql_help.c:2864 msgid "code" msgstr "" -#: sql_help.c:2972 +#: sql_help.c:3239 msgid "parameter" msgstr "매개변수" -#: sql_help.c:2991 sql_help.c:2992 sql_help.c:3289 +#: sql_help.c:3259 sql_help.c:3260 sql_help.c:3559 msgid "statement" msgstr "명령구문" -#: sql_help.c:3023 sql_help.c:3263 +#: sql_help.c:3292 sql_help.c:3533 msgid "direction" msgstr "방향" -#: sql_help.c:3025 sql_help.c:3265 +#: sql_help.c:3294 sql_help.c:3535 msgid "where direction can be empty or one of:" msgstr "방향 자리는 비워두거나 다음 중 하나:" -#: sql_help.c:3026 sql_help.c:3027 sql_help.c:3028 sql_help.c:3029 -#: sql_help.c:3030 sql_help.c:3266 sql_help.c:3267 sql_help.c:3268 -#: sql_help.c:3269 sql_help.c:3270 sql_help.c:3630 sql_help.c:3632 -#: sql_help.c:3720 sql_help.c:3722 sql_help.c:3869 sql_help.c:3871 -#: sql_help.c:3998 sql_help.c:4000 sql_help.c:4063 sql_help.c:4065 +#: sql_help.c:3295 sql_help.c:3296 sql_help.c:3297 sql_help.c:3298 +#: sql_help.c:3299 sql_help.c:3536 sql_help.c:3537 sql_help.c:3538 +#: sql_help.c:3539 sql_help.c:3540 sql_help.c:3904 sql_help.c:3906 +#: sql_help.c:3994 sql_help.c:3996 sql_help.c:4143 sql_help.c:4145 +#: sql_help.c:4272 sql_help.c:4274 sql_help.c:4337 sql_help.c:4339 msgid "count" msgstr "출력개수" -#: sql_help.c:3110 sql_help.c:3442 +#: sql_help.c:3379 sql_help.c:3712 msgid "sequence_name" msgstr "시퀀스이름" -#: sql_help.c:3123 sql_help.c:3455 +#: sql_help.c:3392 sql_help.c:3725 msgid "arg_name" msgstr "인자이름" -#: sql_help.c:3124 sql_help.c:3456 +#: sql_help.c:3393 sql_help.c:3726 msgid "arg_type" msgstr "인자자료형" -#: sql_help.c:3129 sql_help.c:3461 +#: sql_help.c:3398 sql_help.c:3731 msgid "loid" msgstr "" -#: sql_help.c:3152 +#: sql_help.c:3421 msgid "remote_schema" msgstr "원격_스키마" -#: sql_help.c:3155 +#: sql_help.c:3424 msgid "local_schema" msgstr "로컬_스키마" -#: sql_help.c:3189 +#: sql_help.c:3459 msgid "conflict_target" msgstr "" -#: sql_help.c:3190 +#: sql_help.c:3460 msgid "conflict_action" msgstr "" -#: sql_help.c:3193 +#: sql_help.c:3463 msgid "where conflict_target can be one of:" msgstr "conflict_target 사용법:" -#: sql_help.c:3194 +#: sql_help.c:3464 msgid "index_column_name" msgstr "인덱스칼럼이름" -#: sql_help.c:3195 +#: sql_help.c:3465 msgid "index_expression" msgstr "인덱스표현식" -#: sql_help.c:3198 +#: sql_help.c:3468 msgid "index_predicate" msgstr "" -#: sql_help.c:3200 +#: sql_help.c:3470 msgid "and conflict_action is one of:" msgstr "conflict_action 사용법:" -#: sql_help.c:3206 sql_help.c:3964 +#: sql_help.c:3476 sql_help.c:4238 msgid "sub-SELECT" msgstr "" -#: sql_help.c:3215 sql_help.c:3278 sql_help.c:3940 +#: sql_help.c:3485 sql_help.c:3548 sql_help.c:4214 msgid "channel" msgstr "" -#: sql_help.c:3237 +#: sql_help.c:3507 msgid "lockmode" msgstr "" -#: sql_help.c:3238 +#: sql_help.c:3508 msgid "where lockmode is one of:" msgstr "lockmode 사용법:" -#: sql_help.c:3279 +#: sql_help.c:3549 msgid "payload" msgstr "" -#: sql_help.c:3306 +#: sql_help.c:3576 msgid "old_role" msgstr "기존롤" -#: sql_help.c:3307 +#: sql_help.c:3577 msgid "new_role" msgstr "새롤" -#: sql_help.c:3332 sql_help.c:3493 sql_help.c:3501 +#: sql_help.c:3602 sql_help.c:3763 sql_help.c:3771 msgid "savepoint_name" msgstr "savepoint_name" -#: sql_help.c:3534 -msgid "provider" -msgstr "" - -#: sql_help.c:3621 sql_help.c:3663 sql_help.c:3665 sql_help.c:3711 -#: sql_help.c:3860 sql_help.c:3902 sql_help.c:3904 sql_help.c:4054 -#: sql_help.c:4096 sql_help.c:4098 +#: sql_help.c:3895 sql_help.c:3937 sql_help.c:3939 sql_help.c:3985 +#: sql_help.c:4134 sql_help.c:4176 sql_help.c:4178 sql_help.c:4328 +#: sql_help.c:4370 sql_help.c:4372 msgid "from_item" msgstr "" -#: sql_help.c:3623 sql_help.c:3675 sql_help.c:3862 sql_help.c:3914 -#: sql_help.c:4056 sql_help.c:4108 +#: sql_help.c:3897 sql_help.c:3949 sql_help.c:4136 sql_help.c:4188 +#: sql_help.c:4330 sql_help.c:4382 msgid "grouping_element" msgstr "" -#: sql_help.c:3625 sql_help.c:3715 sql_help.c:3864 sql_help.c:4058 +#: sql_help.c:3899 sql_help.c:3989 sql_help.c:4138 sql_help.c:4332 msgid "window_name" msgstr "윈도우이름" -#: sql_help.c:3626 sql_help.c:3716 sql_help.c:3865 sql_help.c:4059 +#: sql_help.c:3900 sql_help.c:3990 sql_help.c:4139 sql_help.c:4333 msgid "window_definition" msgstr "원도우정의" -#: sql_help.c:3627 sql_help.c:3641 sql_help.c:3679 sql_help.c:3717 -#: sql_help.c:3866 sql_help.c:3880 sql_help.c:3918 sql_help.c:4060 -#: sql_help.c:4074 sql_help.c:4112 +#: sql_help.c:3901 sql_help.c:3915 sql_help.c:3953 sql_help.c:3991 +#: sql_help.c:4140 sql_help.c:4154 sql_help.c:4192 sql_help.c:4334 +#: sql_help.c:4348 sql_help.c:4386 msgid "select" msgstr "" -#: sql_help.c:3634 sql_help.c:3873 sql_help.c:4067 +#: sql_help.c:3908 sql_help.c:4147 sql_help.c:4341 msgid "where from_item can be one of:" msgstr "" -#: sql_help.c:3637 sql_help.c:3643 sql_help.c:3646 sql_help.c:3650 -#: sql_help.c:3662 sql_help.c:3876 sql_help.c:3882 sql_help.c:3885 -#: sql_help.c:3889 sql_help.c:3901 sql_help.c:4070 sql_help.c:4076 -#: sql_help.c:4079 sql_help.c:4083 sql_help.c:4095 +#: sql_help.c:3911 sql_help.c:3917 sql_help.c:3920 sql_help.c:3924 +#: sql_help.c:3936 sql_help.c:4150 sql_help.c:4156 sql_help.c:4159 +#: sql_help.c:4163 sql_help.c:4175 sql_help.c:4344 sql_help.c:4350 +#: sql_help.c:4353 sql_help.c:4357 sql_help.c:4369 msgid "column_alias" msgstr "칼럼별칭" -#: sql_help.c:3638 sql_help.c:3877 sql_help.c:4071 +#: sql_help.c:3912 sql_help.c:4151 sql_help.c:4345 msgid "sampling_method" msgstr "표본추출방법" -#: sql_help.c:3639 sql_help.c:3648 sql_help.c:3652 sql_help.c:3656 -#: sql_help.c:3659 sql_help.c:3878 sql_help.c:3887 sql_help.c:3891 -#: sql_help.c:3895 sql_help.c:3898 sql_help.c:4072 sql_help.c:4081 -#: sql_help.c:4085 sql_help.c:4089 sql_help.c:4092 +#: sql_help.c:3913 sql_help.c:3922 sql_help.c:3926 sql_help.c:3930 +#: sql_help.c:3933 sql_help.c:4152 sql_help.c:4161 sql_help.c:4165 +#: sql_help.c:4169 sql_help.c:4172 sql_help.c:4346 sql_help.c:4355 +#: sql_help.c:4359 sql_help.c:4363 sql_help.c:4366 msgid "argument" msgstr "인자" -#: sql_help.c:3640 sql_help.c:3879 sql_help.c:4073 +#: sql_help.c:3914 sql_help.c:4153 sql_help.c:4347 msgid "seed" msgstr "" -#: sql_help.c:3644 sql_help.c:3677 sql_help.c:3883 sql_help.c:3916 -#: sql_help.c:4077 sql_help.c:4110 +#: sql_help.c:3918 sql_help.c:3951 sql_help.c:4157 sql_help.c:4190 +#: sql_help.c:4351 sql_help.c:4384 msgid "with_query_name" msgstr "" -#: sql_help.c:3654 sql_help.c:3657 sql_help.c:3660 sql_help.c:3893 -#: sql_help.c:3896 sql_help.c:3899 sql_help.c:4087 sql_help.c:4090 -#: sql_help.c:4093 +#: sql_help.c:3928 sql_help.c:3931 sql_help.c:3934 sql_help.c:4167 +#: sql_help.c:4170 sql_help.c:4173 sql_help.c:4361 sql_help.c:4364 +#: sql_help.c:4367 msgid "column_definition" msgstr "칼럼정의" -#: sql_help.c:3664 sql_help.c:3903 sql_help.c:4097 +#: sql_help.c:3938 sql_help.c:4177 sql_help.c:4371 msgid "join_type" msgstr "" -#: sql_help.c:3666 sql_help.c:3905 sql_help.c:4099 +#: sql_help.c:3940 sql_help.c:4179 sql_help.c:4373 msgid "join_condition" msgstr "" -#: sql_help.c:3667 sql_help.c:3906 sql_help.c:4100 +#: sql_help.c:3941 sql_help.c:4180 sql_help.c:4374 msgid "join_column" msgstr "" -#: sql_help.c:3668 sql_help.c:3907 sql_help.c:4101 +#: sql_help.c:3942 sql_help.c:4181 sql_help.c:4375 msgid "and grouping_element can be one of:" msgstr "" -#: sql_help.c:3676 sql_help.c:3915 sql_help.c:4109 +#: sql_help.c:3950 sql_help.c:4189 sql_help.c:4383 msgid "and with_query is:" msgstr "" -#: sql_help.c:3680 sql_help.c:3919 sql_help.c:4113 +#: sql_help.c:3954 sql_help.c:4193 sql_help.c:4387 msgid "values" msgstr "값" -#: sql_help.c:3681 sql_help.c:3920 sql_help.c:4114 +#: sql_help.c:3955 sql_help.c:4194 sql_help.c:4388 msgid "insert" msgstr "" -#: sql_help.c:3682 sql_help.c:3921 sql_help.c:4115 +#: sql_help.c:3956 sql_help.c:4195 sql_help.c:4389 msgid "update" msgstr "" -#: sql_help.c:3683 sql_help.c:3922 sql_help.c:4116 +#: sql_help.c:3957 sql_help.c:4196 sql_help.c:4390 msgid "delete" msgstr "" -#: sql_help.c:3710 +#: sql_help.c:3984 msgid "new_table" msgstr "새테이블" -#: sql_help.c:3735 +#: sql_help.c:4009 msgid "timezone" msgstr "" -#: sql_help.c:3780 +#: sql_help.c:4054 msgid "snapshot_id" msgstr "" -#: sql_help.c:3965 +#: sql_help.c:4239 msgid "from_list" msgstr "" -#: sql_help.c:3996 +#: sql_help.c:4270 msgid "sort_expression" msgstr "" -#: sql_help.c:4123 sql_help.c:4863 +#: sql_help.c:4397 sql_help.c:5182 msgid "abort the current transaction" msgstr "현재 트랜잭션 중지함" -#: sql_help.c:4128 +#: sql_help.c:4402 msgid "change the definition of an aggregate function" msgstr "집계함수 정보 바꾸기" -#: sql_help.c:4133 +#: sql_help.c:4407 msgid "change the definition of a collation" msgstr "collation 정의 바꾸기" -#: sql_help.c:4138 +#: sql_help.c:4412 msgid "change the definition of a conversion" msgstr "문자코드 변환규칙(conversion) 정보 바꾸기" -#: sql_help.c:4143 +#: sql_help.c:4417 msgid "change a database" msgstr "데이터베이스 변경" -#: sql_help.c:4148 +#: sql_help.c:4422 msgid "define default access privileges" msgstr "기본 접근 권한 정의" -#: sql_help.c:4153 +#: sql_help.c:4427 msgid "change the definition of a domain" msgstr "도메인 정보 바꾸기" -#: sql_help.c:4158 +#: sql_help.c:4432 msgid "change the definition of an event trigger" msgstr "트리거 정보 바꾸기" -#: sql_help.c:4163 +#: sql_help.c:4437 msgid "change the definition of an extension" msgstr "확장모듈 정의 바꾸기" -#: sql_help.c:4168 +#: sql_help.c:4442 msgid "change the definition of a foreign-data wrapper" msgstr "외부 데이터 래퍼 정의 바꾸기" -#: sql_help.c:4173 +#: sql_help.c:4447 msgid "change the definition of a foreign table" msgstr "외부 테이블 정의 바꾸기" -#: sql_help.c:4178 +#: sql_help.c:4452 msgid "change the definition of a function" msgstr "함수 정보 바꾸기" -#: sql_help.c:4183 +#: sql_help.c:4457 msgid "change role name or membership" msgstr "롤 이름이나 맴버쉽 바꾸기" -#: sql_help.c:4188 +#: sql_help.c:4462 msgid "change the definition of an index" msgstr "인덱스 정의 바꾸기" -#: sql_help.c:4193 +#: sql_help.c:4467 msgid "change the definition of a procedural language" msgstr "procedural language 정보 바꾸기" -#: sql_help.c:4198 +#: sql_help.c:4472 msgid "change the definition of a large object" msgstr "대형 객체 정의 바꾸기" -#: sql_help.c:4203 +#: sql_help.c:4477 msgid "change the definition of a materialized view" msgstr "materialized 뷰 정의 바꾸기" -#: sql_help.c:4208 +#: sql_help.c:4482 msgid "change the definition of an operator" msgstr "연산자 정의 바꾸기" -#: sql_help.c:4213 +#: sql_help.c:4487 msgid "change the definition of an operator class" msgstr "연산자 클래스 정보 바꾸기" -#: sql_help.c:4218 +#: sql_help.c:4492 msgid "change the definition of an operator family" msgstr "연산자 부류의 정의 바꾸기" -#: sql_help.c:4223 +#: sql_help.c:4497 msgid "change the definition of a row level security policy" msgstr "로우 단위 보안 정책의 정의 바꾸기" -#: sql_help.c:4228 sql_help.c:4298 +#: sql_help.c:4502 +msgid "change the definition of a publication" +msgstr "발행 정보 바꾸기" + +#: sql_help.c:4507 sql_help.c:4587 msgid "change a database role" msgstr "데이터베이스 롤 변경" -#: sql_help.c:4233 +#: sql_help.c:4512 msgid "change the definition of a rule" msgstr "룰 정의 바꾸기" -#: sql_help.c:4238 +#: sql_help.c:4517 msgid "change the definition of a schema" msgstr "스키마 이름 바꾸기" -#: sql_help.c:4243 +#: sql_help.c:4522 msgid "change the definition of a sequence generator" msgstr "시퀀스 정보 바꾸기" -#: sql_help.c:4248 +#: sql_help.c:4527 msgid "change the definition of a foreign server" msgstr "외부 서버 정의 바꾸기" -#: sql_help.c:4253 +#: sql_help.c:4532 +msgid "change the definition of an extended statistics object" +msgstr "확장 통계정보 객체 정의 바꾸기" + +#: sql_help.c:4537 +msgid "change the definition of a subscription" +msgstr "구독 정보 바꾸기" + +#: sql_help.c:4542 msgid "change a server configuration parameter" msgstr "서버 환경 설정 매개 변수 바꾸기" -#: sql_help.c:4258 +#: sql_help.c:4547 msgid "change the definition of a table" msgstr "테이블 정보 바꾸기" -#: sql_help.c:4263 +#: sql_help.c:4552 msgid "change the definition of a tablespace" msgstr "테이블스페이스 정의 바꾸기" -#: sql_help.c:4268 +#: sql_help.c:4557 msgid "change the definition of a text search configuration" msgstr "텍스트 검색 구성 정의 바꾸기" -#: sql_help.c:4273 +#: sql_help.c:4562 msgid "change the definition of a text search dictionary" msgstr "텍스트 검색 사전 정의 바꾸기" -#: sql_help.c:4278 +#: sql_help.c:4567 msgid "change the definition of a text search parser" msgstr "텍스트 검색 파서 정의 바꾸기" -#: sql_help.c:4283 +#: sql_help.c:4572 msgid "change the definition of a text search template" msgstr "텍스트 검색 템플릿 정의 바꾸기" -#: sql_help.c:4288 +#: sql_help.c:4577 msgid "change the definition of a trigger" msgstr "트리거 정보 바꾸기" -#: sql_help.c:4293 +#: sql_help.c:4582 msgid "change the definition of a type" msgstr "자료형 정의 바꾸기" -#: sql_help.c:4303 +#: sql_help.c:4592 msgid "change the definition of a user mapping" msgstr "사용자 매핑 정의 바꾸기" -#: sql_help.c:4308 +#: sql_help.c:4597 msgid "change the definition of a view" msgstr "뷰 정의 바꾸기" -#: sql_help.c:4313 +#: sql_help.c:4602 msgid "collect statistics about a database" msgstr "데이터베이스 사용 통계 정보를 갱신함" -#: sql_help.c:4318 sql_help.c:4928 +#: sql_help.c:4607 sql_help.c:5247 msgid "start a transaction block" msgstr "트랜잭션 블럭을 시작함" -#: sql_help.c:4323 -msgid "force a transaction log checkpoint" -msgstr "트랜잭션 로그를 강제로 checkpoint함" +#: sql_help.c:4612 +msgid "force a write-ahead log checkpoint" +msgstr "트랜잭션 로그를 강제로 체크포인트 함" -#: sql_help.c:4328 +#: sql_help.c:4617 msgid "close a cursor" msgstr "커서 닫기" -#: sql_help.c:4333 +#: sql_help.c:4622 msgid "cluster a table according to an index" msgstr "지정한 인덱스 기준으로 테이블 자료를 다시 저장함" -#: sql_help.c:4338 +#: sql_help.c:4627 msgid "define or change the comment of an object" msgstr "해당 개체의 코멘트를 지정하거나 수정함" -#: sql_help.c:4343 sql_help.c:4763 +#: sql_help.c:4632 sql_help.c:5082 msgid "commit the current transaction" msgstr "현재 트랜잭션 commit" -#: sql_help.c:4348 +#: sql_help.c:4637 msgid "commit a transaction that was earlier prepared for two-phase commit" msgstr "two-phase 커밋을 위해 먼저 준비된 트랜잭션을 커밋하세요." -#: sql_help.c:4353 +#: sql_help.c:4642 msgid "copy data between a file and a table" msgstr "테이블과 파일 사이 자료를 복사함" -#: sql_help.c:4358 +#: sql_help.c:4647 msgid "define a new access method" msgstr "새 접속 방법 정의" -#: sql_help.c:4363 +#: sql_help.c:4652 msgid "define a new aggregate function" msgstr "새 집계합수 만들기" -#: sql_help.c:4368 +#: sql_help.c:4657 msgid "define a new cast" msgstr "새 형변환자 만들기" -#: sql_help.c:4373 +#: sql_help.c:4662 msgid "define a new collation" msgstr "새 collation 만들기" -#: sql_help.c:4378 +#: sql_help.c:4667 msgid "define a new encoding conversion" msgstr "새 문자코드변환규칙(conversion) 만들기" -#: sql_help.c:4383 +#: sql_help.c:4672 msgid "create a new database" msgstr "데이터베이스 생성" -#: sql_help.c:4388 +#: sql_help.c:4677 msgid "define a new domain" msgstr "새 도메인 만들기" -#: sql_help.c:4393 +#: sql_help.c:4682 msgid "define a new event trigger" msgstr "새 이벤트 트리거 만들기" -#: sql_help.c:4398 +#: sql_help.c:4687 msgid "install an extension" -msgstr "" +msgstr "확장 모듈 설치" -#: sql_help.c:4403 +#: sql_help.c:4692 msgid "define a new foreign-data wrapper" msgstr "새 외부 데이터 래퍼 정의" -#: sql_help.c:4408 +#: sql_help.c:4697 msgid "define a new foreign table" msgstr "새 외부 테이블 정의" -#: sql_help.c:4413 +#: sql_help.c:4702 msgid "define a new function" msgstr "새 함수 만들기" -#: sql_help.c:4418 sql_help.c:4458 sql_help.c:4533 +#: sql_help.c:4707 sql_help.c:4752 sql_help.c:4837 msgid "define a new database role" msgstr "새 데이터베이스 롤 만들기" -#: sql_help.c:4423 +#: sql_help.c:4712 msgid "define a new index" msgstr "새 인덱스 만들기" -#: sql_help.c:4428 +#: sql_help.c:4717 msgid "define a new procedural language" msgstr "새 프로시주얼 언어 만들기" -#: sql_help.c:4433 +#: sql_help.c:4722 msgid "define a new materialized view" msgstr "새 materialized 뷰 만들기" -#: sql_help.c:4438 +#: sql_help.c:4727 msgid "define a new operator" msgstr "새 연산자 만들기" -#: sql_help.c:4443 +#: sql_help.c:4732 msgid "define a new operator class" msgstr "새 연잔자 클래스 만들기" -#: sql_help.c:4448 +#: sql_help.c:4737 msgid "define a new operator family" msgstr "새 연산자 부류 만들기" -#: sql_help.c:4453 +#: sql_help.c:4742 msgid "define a new row level security policy for a table" msgstr "특정 테이블에 로우 단위 보안 정책 정의" -#: sql_help.c:4463 +#: sql_help.c:4747 +msgid "define a new publication" +msgstr "새 발행 만들기" + +#: sql_help.c:4757 msgid "define a new rewrite rule" msgstr "새 룰(rule) 만들기" -#: sql_help.c:4468 +#: sql_help.c:4762 msgid "define a new schema" msgstr "새 스키마(schema) 만들기" -#: sql_help.c:4473 +#: sql_help.c:4767 msgid "define a new sequence generator" msgstr "새 시퀀스 만들기" -#: sql_help.c:4478 +#: sql_help.c:4772 msgid "define a new foreign server" msgstr "새 외부 서버 정의" -#: sql_help.c:4483 +#: sql_help.c:4777 +msgid "define extended statistics" +msgstr "새 확장 통계정보 만들기" + +#: sql_help.c:4782 +msgid "define a new subscription" +msgstr "새 구독 만들기" + +#: sql_help.c:4787 msgid "define a new table" msgstr "새 테이블 만들기" -#: sql_help.c:4488 sql_help.c:4893 +#: sql_help.c:4792 sql_help.c:5212 msgid "define a new table from the results of a query" msgstr "쿼리 결과를 새 테이블로 만들기" -#: sql_help.c:4493 +#: sql_help.c:4797 msgid "define a new tablespace" msgstr "새 테이블스페이스 만들기" -#: sql_help.c:4498 +#: sql_help.c:4802 msgid "define a new text search configuration" msgstr "새 텍스트 검색 구성 정의" -#: sql_help.c:4503 +#: sql_help.c:4807 msgid "define a new text search dictionary" msgstr "새 텍스트 검색 사전 정의" -#: sql_help.c:4508 +#: sql_help.c:4812 msgid "define a new text search parser" msgstr "새 텍스트 검색 파서 정의" -#: sql_help.c:4513 +#: sql_help.c:4817 msgid "define a new text search template" msgstr "새 텍스트 검색 템플릿 정의" -#: sql_help.c:4518 +#: sql_help.c:4822 msgid "define a new transform" msgstr "새 transform 만들기" -#: sql_help.c:4523 +#: sql_help.c:4827 msgid "define a new trigger" msgstr "새 트리거 만들기" -#: sql_help.c:4528 +#: sql_help.c:4832 msgid "define a new data type" msgstr "새 자료형 만들기" -#: sql_help.c:4538 +#: sql_help.c:4842 msgid "define a new mapping of a user to a foreign server" msgstr "사용자와 외부 서버 간의 새 매핑 정의" -#: sql_help.c:4543 +#: sql_help.c:4847 msgid "define a new view" msgstr "새 view 만들기" -#: sql_help.c:4548 +#: sql_help.c:4852 msgid "deallocate a prepared statement" msgstr "준비된 구문(prepared statement) 정의" -#: sql_help.c:4553 +#: sql_help.c:4857 msgid "define a cursor" msgstr "커서 지정" -#: sql_help.c:4558 +#: sql_help.c:4862 msgid "delete rows of a table" msgstr "테이블의 자료 삭제" -#: sql_help.c:4563 +#: sql_help.c:4867 msgid "discard session state" msgstr "세션 상태 삭제" -#: sql_help.c:4568 +#: sql_help.c:4872 msgid "execute an anonymous code block" -msgstr "" +msgstr "임의 코드 블록 실행" -#: sql_help.c:4573 +#: sql_help.c:4877 msgid "remove an access method" msgstr "접근 방법 삭제" -#: sql_help.c:4578 +#: sql_help.c:4882 msgid "remove an aggregate function" msgstr "집계 함수 삭제" -#: sql_help.c:4583 +#: sql_help.c:4887 msgid "remove a cast" msgstr "형변환자 삭제" -#: sql_help.c:4588 +#: sql_help.c:4892 msgid "remove a collation" msgstr "collation 삭제" -#: sql_help.c:4593 +#: sql_help.c:4897 msgid "remove a conversion" msgstr "문자코드 변환규칙(conversion) 삭제" -#: sql_help.c:4598 +#: sql_help.c:4902 msgid "remove a database" msgstr "데이터베이스 삭제" -#: sql_help.c:4603 +#: sql_help.c:4907 msgid "remove a domain" msgstr "도메인 삭제" -#: sql_help.c:4608 +#: sql_help.c:4912 msgid "remove an event trigger" msgstr "이벤트 트리거 삭제" -#: sql_help.c:4613 +#: sql_help.c:4917 msgid "remove an extension" msgstr "확장 모듈 삭제" -#: sql_help.c:4618 +#: sql_help.c:4922 msgid "remove a foreign-data wrapper" msgstr "외부 데이터 래퍼 제거" -#: sql_help.c:4623 +#: sql_help.c:4927 msgid "remove a foreign table" msgstr "외부 테이블 삭제" -#: sql_help.c:4628 +#: sql_help.c:4932 msgid "remove a function" msgstr "함수 삭제" -#: sql_help.c:4633 sql_help.c:4678 sql_help.c:4748 +#: sql_help.c:4937 sql_help.c:4987 sql_help.c:5067 msgid "remove a database role" msgstr "데이터베이스 롤 삭제" -#: sql_help.c:4638 +#: sql_help.c:4942 msgid "remove an index" msgstr "인덱스 삭제" -#: sql_help.c:4643 +#: sql_help.c:4947 msgid "remove a procedural language" msgstr "프로시주얼 언어 삭제" -#: sql_help.c:4648 +#: sql_help.c:4952 msgid "remove a materialized view" msgstr "materialized 뷰 삭제" -#: sql_help.c:4653 +#: sql_help.c:4957 msgid "remove an operator" msgstr "연산자 삭제" -#: sql_help.c:4658 +#: sql_help.c:4962 msgid "remove an operator class" msgstr "연산자 클래스 삭제" -#: sql_help.c:4663 +#: sql_help.c:4967 msgid "remove an operator family" msgstr "연산자 부류 삭제" -#: sql_help.c:4668 +#: sql_help.c:4972 msgid "remove database objects owned by a database role" msgstr "데이터베이스 롤로 권한이 부여된 데이터베이스 개체들을 삭제하세요" -#: sql_help.c:4673 +#: sql_help.c:4977 msgid "remove a row level security policy from a table" msgstr "특정 테이블에 정의된 로우 단위 보안 정책 삭제" -#: sql_help.c:4683 +#: sql_help.c:4982 +msgid "remove a publication" +msgstr "발행 삭제" + +#: sql_help.c:4992 msgid "remove a rewrite rule" msgstr "룰(rule) 삭제" -#: sql_help.c:4688 +#: sql_help.c:4997 msgid "remove a schema" msgstr "스키마(schema) 삭제" -#: sql_help.c:4693 +#: sql_help.c:5002 msgid "remove a sequence" msgstr "시퀀스 삭제" -#: sql_help.c:4698 +#: sql_help.c:5007 msgid "remove a foreign server descriptor" msgstr "외부 서버 설명자 제거" -#: sql_help.c:4703 +#: sql_help.c:5012 +msgid "remove extended statistics" +msgstr "확장 통계정보 삭제" + +#: sql_help.c:5017 +msgid "remove a subscription" +msgstr "구독 삭제" + +#: sql_help.c:5022 msgid "remove a table" msgstr "테이블 삭제" -#: sql_help.c:4708 +#: sql_help.c:5027 msgid "remove a tablespace" msgstr "테이블스페이스 삭제" -#: sql_help.c:4713 +#: sql_help.c:5032 msgid "remove a text search configuration" msgstr "텍스트 검색 구성 제거" -#: sql_help.c:4718 +#: sql_help.c:5037 msgid "remove a text search dictionary" msgstr "텍스트 검색 사전 제거" -#: sql_help.c:4723 +#: sql_help.c:5042 msgid "remove a text search parser" msgstr "텍스트 검색 파서 제거" -#: sql_help.c:4728 +#: sql_help.c:5047 msgid "remove a text search template" msgstr "텍스트 검색 템플릿 제거" -#: sql_help.c:4733 +#: sql_help.c:5052 msgid "remove a transform" msgstr "transform 삭제" -#: sql_help.c:4738 +#: sql_help.c:5057 msgid "remove a trigger" msgstr "트리거 삭제" -#: sql_help.c:4743 +#: sql_help.c:5062 msgid "remove a data type" msgstr "자료형 삭제" -#: sql_help.c:4753 +#: sql_help.c:5072 msgid "remove a user mapping for a foreign server" msgstr "외부 서버에 대한 사용자 매핑 제거" -#: sql_help.c:4758 +#: sql_help.c:5077 msgid "remove a view" msgstr "뷰(view) 삭제" -#: sql_help.c:4768 +#: sql_help.c:5087 msgid "execute a prepared statement" msgstr "준비된 구문(prepared statement) 실행" -#: sql_help.c:4773 +#: sql_help.c:5092 msgid "show the execution plan of a statement" msgstr "쿼리 실행계획 보기" -#: sql_help.c:4778 +#: sql_help.c:5097 msgid "retrieve rows from a query using a cursor" msgstr "해당 커서에서 자료 뽑기" -#: sql_help.c:4783 +#: sql_help.c:5102 msgid "define access privileges" msgstr "액세스 권한 지정하기" -#: sql_help.c:4788 +#: sql_help.c:5107 msgid "import table definitions from a foreign server" msgstr "외부 서버로부터 테이블 정의 가져오기" -#: sql_help.c:4793 +#: sql_help.c:5112 msgid "create new rows in a table" msgstr "테이블 자료 삽입" -#: sql_help.c:4798 +#: sql_help.c:5117 msgid "listen for a notification" msgstr "특정 서버 메시지 수신함" -#: sql_help.c:4803 +#: sql_help.c:5122 msgid "load a shared library file" msgstr "공유 라이브러리 파일 로드" -#: sql_help.c:4808 +#: sql_help.c:5127 msgid "lock a table" msgstr "테이블 잠금" -#: sql_help.c:4813 +#: sql_help.c:5132 msgid "position a cursor" msgstr "커서 위치 옮기기" -#: sql_help.c:4818 +#: sql_help.c:5137 msgid "generate a notification" msgstr "특정 서버 메시지 발생" -#: sql_help.c:4823 +#: sql_help.c:5142 msgid "prepare a statement for execution" msgstr "준비된 구문(prepared statement) 만들기" -#: sql_help.c:4828 +#: sql_help.c:5147 msgid "prepare the current transaction for two-phase commit" msgstr "two-phase 커밋을 위해 현재 트랜잭션을 준비함" -#: sql_help.c:4833 +#: sql_help.c:5152 msgid "change the ownership of database objects owned by a database role" msgstr "데이터베이스 롤로 권한이 부여된 데이터베이스 개체들의 소유주 바꾸기" -#: sql_help.c:4838 +#: sql_help.c:5157 msgid "replace the contents of a materialized view" -msgstr "" +msgstr "구체화된 뷰의 내용 수정" -#: sql_help.c:4843 +#: sql_help.c:5162 msgid "rebuild indexes" msgstr "인덱스 다시 만들기" -#: sql_help.c:4848 +#: sql_help.c:5167 msgid "destroy a previously defined savepoint" msgstr "이전 정의된 savepoint를 파기함" -#: sql_help.c:4853 +#: sql_help.c:5172 msgid "restore the value of a run-time parameter to the default value" msgstr "실시간 환경 변수값을 초기값으로 다시 지정" -#: sql_help.c:4858 +#: sql_help.c:5177 msgid "remove access privileges" msgstr "액세스 권한 해제하기" -#: sql_help.c:4868 +#: sql_help.c:5187 msgid "cancel a transaction that was earlier prepared for two-phase commit" msgstr "two-phase 커밋을 위해 먼저 준비되었던 트랜잭션 실행취소하기" -#: sql_help.c:4873 +#: sql_help.c:5192 msgid "roll back to a savepoint" msgstr "savepoint 파기하기" -#: sql_help.c:4878 +#: sql_help.c:5197 msgid "define a new savepoint within the current transaction" msgstr "현재 트랜잭션에서 새로운 savepoint 만들기" -#: sql_help.c:4883 +#: sql_help.c:5202 msgid "define or change a security label applied to an object" msgstr "해당 개체에 보안 라벨을 정의하거나 변경" -#: sql_help.c:4888 sql_help.c:4933 sql_help.c:4963 +#: sql_help.c:5207 sql_help.c:5252 sql_help.c:5282 msgid "retrieve rows from a table or view" msgstr "테이블이나 뷰의 자료를 출력" -#: sql_help.c:4898 +#: sql_help.c:5217 msgid "change a run-time parameter" msgstr "실시간 환경 변수값 바꾸기" -#: sql_help.c:4903 +#: sql_help.c:5222 msgid "set constraint check timing for the current transaction" msgstr "현재 트랜잭션에서 제약조건 설정" -#: sql_help.c:4908 +#: sql_help.c:5227 msgid "set the current user identifier of the current session" msgstr "현재 세션의 현재 사용자 식별자를 지정" -#: sql_help.c:4913 +#: sql_help.c:5232 msgid "" "set the session user identifier and the current user identifier of the " "current session" msgstr "현재 세션의 사용자 인증을 지정함 - 사용자 지정" -#: sql_help.c:4918 +#: sql_help.c:5237 msgid "set the characteristics of the current transaction" msgstr "현재 트랜잭션의 성질을 지정함" -#: sql_help.c:4923 +#: sql_help.c:5242 msgid "show the value of a run-time parameter" msgstr "실시간 환경 변수값들을 보여줌" -#: sql_help.c:4938 +#: sql_help.c:5257 msgid "empty a table or set of tables" msgstr "하나 또는 지정한 여러개의 테이블에서 모든 자료 지움" -#: sql_help.c:4943 +#: sql_help.c:5262 msgid "stop listening for a notification" msgstr "특정 서버 메시지 수신 기능 끔" -#: sql_help.c:4948 +#: sql_help.c:5267 msgid "update rows of a table" msgstr "테이블 자료 갱신" -#: sql_help.c:4953 +#: sql_help.c:5272 msgid "garbage-collect and optionally analyze a database" msgstr "물리적인 자료 정리 작업 - 쓰레기값 청소" -#: sql_help.c:4958 +#: sql_help.c:5277 msgid "compute a set of rows" msgstr "compute a set of rows" -#: startup.c:189 +#: startup.c:187 #, c-format msgid "%s: -1 can only be used in non-interactive mode\n" msgstr "%s: -1 옵션은 비대화형 모드에서만 사용할 수 있음\n" -#: startup.c:289 +#: startup.c:290 #, c-format msgid "%s: could not open log file \"%s\": %s\n" msgstr "%s: \"%s\" 로그 파일을 열 수 없음: %s\n" -#: startup.c:389 +#: startup.c:397 #, c-format msgid "" "Type \"help\" for help.\n" @@ -5399,21 +5802,11 @@ msgstr "" "도움말을 보려면 \"help\"를 입력하십시오.\n" "\n" -#: startup.c:538 +#: startup.c:546 #, c-format msgid "%s: could not set printing parameter \"%s\"\n" msgstr "%s: 출력 매개 변수 \"%s\" 지정할 수 없음\n" -#: startup.c:578 -#, c-format -msgid "%s: could not delete variable \"%s\"\n" -msgstr "%s: \"%s\" 변수를 지울 수 없음\n" - -#: startup.c:588 -#, c-format -msgid "%s: could not set variable \"%s\"\n" -msgstr "%s: \"%s\" 변수를 지정할 수 없음\n" - #: startup.c:648 #, c-format msgid "Try \"%s --help\" for more information.\n" @@ -5429,13 +5822,7 @@ msgstr "%s: 경고: 추가 명령행 인수 \"%s\" 무시됨\n" msgid "%s: could not find own program executable\n" msgstr "%s: 실행 가능한 프로그램을 찾을 수 없습니다\n" -#: startup.c:836 startup.c:883 startup.c:904 startup.c:941 startup.c:963 -#: variables.c:121 -#, c-format -msgid "unrecognized value \"%s\" for \"%s\"; assuming \"%s\"\n" -msgstr "\"%s\" 값은 \"%s\" 변수값으로 사용할 수 없음; \"%s\" 값을 사용함\n" - -#: tab-complete.c:3704 +#: tab-complete.c:4186 #, c-format msgid "" "tab completion query failed: %s\n" @@ -5445,3 +5832,30 @@ msgstr "" "탭 자동완성용 쿼리 실패: %s\n" "사용한 쿼리:\n" "%s\n" + +#: variables.c:139 +#, c-format +msgid "unrecognized value \"%s\" for \"%s\": Boolean expected\n" +msgstr "잘못된 \"%s\" 값을 \"%s\" 변수값으로 사용함: 불린형이어야 함\n" + +#: variables.c:176 +#, c-format +msgid "invalid value \"%s\" for \"%s\": integer expected\n" +msgstr "\"%s\" 값은 \"%s\" 변수값으로 사용할 수 없음; 정수형이어야 함\n" + +#: variables.c:224 +#, c-format +msgid "invalid variable name: \"%s\"\n" +msgstr "잘못된 변수 이름: %s\n" + +#: variables.c:393 +#, c-format +msgid "" +"unrecognized value \"%s\" for \"%s\"\n" +"Available values are: %s.\n" +msgstr "" +"\"%s\" 값은 \"%s\" 변수값으로 사용할 수 없음\n" +"사용할 수 있는 변수값: %s\n" + +#~ msgid "statistic_type" +#~ msgstr "통계정보_종류" diff --git a/src/bin/psql/po/ru.po b/src/bin/psql/po/ru.po index 5a9a8d7a95..08875db134 100644 --- a/src/bin/psql/po/ru.po +++ b/src/bin/psql/po/ru.po @@ -5,13 +5,13 @@ # Oleg Bartunov , 2004-2005. # Sergey Burladyan , 2012. # Alexander Lakhin , 2012-2017. -# msgid "" msgstr "" "Project-Id-Version: psql (PostgreSQL current)\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-04-02 23:45+0000\n" -"PO-Revision-Date: 2017-04-03 10:54+0300\n" +"POT-Creation-Date: 2017-10-09 14:14+0300\n" +"PO-Revision-Date: 2017-11-21 15:42+0300\n" +"Last-Translator: Alexander Lakhin \n" "Language-Team: Russian \n" "Language: ru\n" "MIME-Version: 1.0\n" @@ -19,7 +19,6 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"Last-Translator: Alexander Lakhin \n" #: ../../common/exec.c:127 ../../common/exec.c:241 ../../common/exec.c:284 #, c-format @@ -57,7 +56,7 @@ msgid "pclose failed: %s" msgstr "ошибка pclose: %s" #: ../../common/fe_memutils.c:35 ../../common/fe_memutils.c:75 -#: ../../common/fe_memutils.c:98 command.c:612 input.c:227 mainloop.c:82 +#: ../../common/fe_memutils.c:98 command.c:608 input.c:227 mainloop.c:82 #: mainloop.c:276 #, c-format msgid "out of memory\n" @@ -73,7 +72,7 @@ msgstr "попытка дублирования нулевого указате msgid "could not look up effective user ID %ld: %s" msgstr "выяснить эффективный идентификатор пользователя (%ld) не удалось: %s" -#: ../../common/username.c:45 command.c:559 +#: ../../common/username.c:45 command.c:555 msgid "user does not exist" msgstr "пользователь не существует" @@ -152,22 +151,22 @@ msgstr "неверный формат вывода (внутренняя оши msgid "skipping recursive expansion of variable \"%s\"\n" msgstr "рекурсивное расширение переменной \"%s\" пропускается\n" -#: command.c:227 +#: command.c:223 #, c-format msgid "Invalid command \\%s. Try \\? for help.\n" msgstr "Неверная команда \\%s. Справка по командам: \\?\n" -#: command.c:229 +#: command.c:225 #, c-format msgid "invalid command \\%s\n" msgstr "неверная команда \\%s\n" -#: command.c:247 +#: command.c:243 #, c-format msgid "\\%s: extra argument \"%s\" ignored\n" msgstr "\\%s: лишний аргумент \"%s\" пропущен\n" -#: command.c:299 +#: command.c:295 #, c-format msgid "" "\\%s command ignored; use \\endif or Ctrl-C to exit current \\if block\n" @@ -175,22 +174,22 @@ msgstr "" "команда \\%s игнорируется; добавьте \\endif или нажмите Ctrl-C для " "завершения текущего блока \\if\n" -#: command.c:557 +#: command.c:553 #, c-format msgid "could not get home directory for user ID %ld: %s\n" msgstr "не удалось получить домашний каталог пользователя c ид. %ld: %s\n" -#: command.c:575 +#: command.c:571 #, c-format msgid "\\%s: could not change directory to \"%s\": %s\n" msgstr "\\%s: не удалось перейти в каталог \"%s\": %s\n" -#: command.c:600 common.c:648 common.c:706 common.c:1242 +#: command.c:596 common.c:648 common.c:706 common.c:1242 #, c-format msgid "You are currently not connected to a database.\n" msgstr "В данный момент вы не подключены к базе данных.\n" -#: command.c:625 +#: command.c:621 #, c-format msgid "" "You are connected to database \"%s\" as user \"%s\" via socket in \"%s\" at " @@ -199,7 +198,7 @@ msgstr "" "Вы подключены к базе данных \"%s\" как пользователь \"%s\" через сокет в \"%s" "\", порт \"%s\".\n" -#: command.c:628 +#: command.c:624 #, c-format msgid "" "You are connected to database \"%s\" as user \"%s\" on host \"%s\" at port " @@ -208,174 +207,169 @@ msgstr "" "Вы подключены к базе данных \"%s\" как пользователь \"%s\" (сервер \"%s\", " "порт \"%s\").\n" -#: command.c:916 command.c:1006 command.c:1115 command.c:2524 +#: command.c:915 command.c:1005 command.c:1114 command.c:2523 #, c-format msgid "no query buffer\n" msgstr "нет буфера запросов\n" -#: command.c:949 command.c:4761 +#: command.c:948 command.c:4784 #, c-format msgid "invalid line number: %s\n" msgstr "неверный номер строки: %s\n" -#: command.c:999 +#: command.c:998 #, c-format msgid "The server (version %s) does not support editing function source.\n" msgstr "" "Сервер (версия %s) не поддерживает редактирование исходного кода функции.\n" -#: command.c:1074 command.c:1155 +#: command.c:1073 command.c:1154 msgid "No changes" msgstr "Изменений нет" -#: command.c:1108 +#: command.c:1107 #, c-format msgid "The server (version %s) does not support editing view definitions.\n" msgstr "" "Сервер (версия %s) не поддерживает редактирование определения " "представления.\n" -#: command.c:1232 +#: command.c:1231 #, c-format msgid "%s: invalid encoding name or conversion procedure not found\n" msgstr "" "%s: неверное название кодировки символов или не найдена процедура " "перекодировки\n" -#: command.c:1267 command.c:3165 command.c:4863 common.c:173 common.c:244 -#: common.c:541 common.c:1288 common.c:1316 common.c:1417 copy.c:489 copy.c:699 -#: large_obj.c:156 large_obj.c:191 large_obj.c:253 +#: command.c:1266 command.c:1888 command.c:3169 command.c:4886 common.c:173 +#: common.c:244 common.c:541 common.c:1288 common.c:1316 common.c:1417 +#: copy.c:489 copy.c:708 large_obj.c:156 large_obj.c:191 large_obj.c:253 #, c-format msgid "%s" msgstr "%s" -#: command.c:1271 +#: command.c:1270 msgid "out of memory" msgstr "нехватка памяти" -#: command.c:1274 +#: command.c:1273 msgid "There is no previous error." msgstr "Ошибки не было." -#: command.c:1445 command.c:1750 command.c:1764 command.c:1781 command.c:1941 -#: command.c:2178 command.c:2491 command.c:2531 +#: command.c:1444 command.c:1749 command.c:1763 command.c:1780 command.c:1940 +#: command.c:2177 command.c:2490 command.c:2530 #, c-format msgid "\\%s: missing required argument\n" msgstr "отсутствует необходимый аргумент \\%s\n" -#: command.c:1576 +#: command.c:1575 #, c-format msgid "\\elif: cannot occur after \\else\n" msgstr "\\elif не может находиться после \\else\n" -#: command.c:1581 +#: command.c:1580 #, c-format msgid "\\elif: no matching \\if\n" msgstr "\\elif без соответствующего \\if\n" -#: command.c:1645 +#: command.c:1644 #, c-format msgid "\\else: cannot occur after \\else\n" msgstr "\\else не может находиться после \\else\n" -#: command.c:1650 +#: command.c:1649 #, c-format msgid "\\else: no matching \\if\n" msgstr "\\else без соответствующего \\if\n" -#: command.c:1690 +#: command.c:1689 #, c-format msgid "\\endif: no matching \\if\n" msgstr "\\endif без соответствующего \\if\n" -#: command.c:1845 +#: command.c:1844 msgid "Query buffer is empty." msgstr "Буфер запроса пуст." -#: command.c:1867 +#: command.c:1866 msgid "Enter new password: " msgstr "Введите новый пароль: " -#: command.c:1868 +#: command.c:1867 msgid "Enter it again: " msgstr "Повторите его: " -#: command.c:1872 +#: command.c:1871 #, c-format msgid "Passwords didn't match.\n" msgstr "Пароли не совпадают.\n" -#: command.c:1889 -#, c-format -msgid "Password encryption failed.\n" -msgstr "Ошибка при шифровании пароля.\n" - -#: command.c:1971 +#: command.c:1970 #, c-format msgid "\\%s: could not read value for variable\n" msgstr "\\%s: не удалось прочитать значение переменной\n" -#: command.c:2074 +#: command.c:2073 msgid "Query buffer reset (cleared)." msgstr "Буфер запроса сброшен (очищен)." -#: command.c:2096 +#: command.c:2095 #, c-format msgid "Wrote history to file \"%s\".\n" msgstr "История записана в файл \"%s\".\n" -#: command.c:2183 +#: command.c:2182 #, c-format msgid "\\%s: environment variable name must not contain \"=\"\n" msgstr "\\%s: имя переменной окружения не может содержать знак \"=\"\n" -#: command.c:2239 +#: command.c:2238 #, c-format msgid "The server (version %s) does not support showing function source.\n" msgstr "Сервер (версия %s) не поддерживает вывод исходного кода функции.\n" -#: command.c:2246 +#: command.c:2245 #, c-format msgid "function name is required\n" msgstr "требуется имя функции\n" -#: command.c:2333 +#: command.c:2332 #, c-format msgid "The server (version %s) does not support showing view definitions.\n" msgstr "Сервер (версия %s) не поддерживает вывод определения представлений.\n" -#: command.c:2340 +#: command.c:2339 #, c-format msgid "view name is required\n" msgstr "требуется имя представления\n" -#: command.c:2463 +#: command.c:2462 msgid "Timing is on." msgstr "Секундомер включён." -#: command.c:2465 +#: command.c:2464 msgid "Timing is off." msgstr "Секундомер выключен." -#: command.c:2550 command.c:2578 command.c:3514 command.c:3517 command.c:3520 -#: command.c:3526 command.c:3528 command.c:3536 command.c:3546 command.c:3555 -#: command.c:3569 command.c:3586 command.c:3644 common.c:69 copy.c:332 -#: copy.c:392 copy.c:405 psqlscanslash.l:760 psqlscanslash.l:771 -#: psqlscanslash.l:781 +#: command.c:2549 command.c:2577 command.c:3537 command.c:3540 command.c:3543 +#: command.c:3549 command.c:3551 command.c:3559 command.c:3569 command.c:3578 +#: command.c:3592 command.c:3609 command.c:3667 common.c:69 copy.c:332 +#: copy.c:392 copy.c:405 psqlscanslash.l:761 psqlscanslash.l:772 +#: psqlscanslash.l:782 #, c-format msgid "%s: %s\n" msgstr "%s: %s\n" -#: command.c:2957 startup.c:202 +#: command.c:2961 startup.c:205 msgid "Password: " msgstr "Пароль: " -#: command.c:2962 startup.c:204 +#: command.c:2966 startup.c:207 #, c-format msgid "Password for user %s: " msgstr "Пароль пользователя %s: " -#: command.c:3012 +#: command.c:3016 #, c-format msgid "" "All connection parameters must be supplied because no database connection " @@ -384,17 +378,17 @@ msgstr "" "Без подключения к базе данных необходимо указывать все параметры " "подключения\n" -#: command.c:3169 +#: command.c:3173 #, c-format msgid "Previous connection kept\n" msgstr "Сохранено предыдущее подключение\n" -#: command.c:3173 +#: command.c:3177 #, c-format msgid "\\connect: %s" msgstr "\\connect: %s" -#: command.c:3209 +#: command.c:3213 #, c-format msgid "" "You are now connected to database \"%s\" as user \"%s\" via socket in \"%s\" " @@ -403,7 +397,7 @@ msgstr "" "Вы подключены к базе данных \"%s\" как пользователь \"%s\" через сокет в \"%s" "\", порт \"%s\".\n" -#: command.c:3212 +#: command.c:3216 #, c-format msgid "" "You are now connected to database \"%s\" as user \"%s\" on host \"%s\" at " @@ -412,17 +406,17 @@ msgstr "" "Вы подключены к базе данных \"%s\" как пользователь \"%s\" (сервер \"%s\", " "порт \"%s\") .\n" -#: command.c:3216 +#: command.c:3220 #, c-format msgid "You are now connected to database \"%s\" as user \"%s\".\n" msgstr "Вы подключены к базе данных \"%s\" как пользователь \"%s\".\n" -#: command.c:3249 +#: command.c:3253 #, c-format msgid "%s (%s, server %s)\n" msgstr "%s (%s, сервер %s)\n" -#: command.c:3257 +#: command.c:3261 #, c-format msgid "" "WARNING: %s major version %s, server major version %s.\n" @@ -431,24 +425,24 @@ msgstr "" "ПРЕДУПРЕЖДЕНИЕ: %s имеет базовую версию %s, а сервер - %s.\n" " Часть функций psql может не работать.\n" -#: command.c:3294 +#: command.c:3298 #, c-format msgid "SSL connection (protocol: %s, cipher: %s, bits: %s, compression: %s)\n" msgstr "SSL-соединение (протокол: %s, шифр: %s, бит: %s, сжатие: %s)\n" -#: command.c:3295 command.c:3296 command.c:3297 +#: command.c:3299 command.c:3300 command.c:3301 msgid "unknown" msgstr "неизвестно" -#: command.c:3298 help.c:45 +#: command.c:3302 help.c:45 msgid "off" msgstr "выкл." -#: command.c:3298 help.c:45 +#: command.c:3302 help.c:45 msgid "on" msgstr "вкл." -#: command.c:3318 +#: command.c:3322 #, c-format msgid "" "WARNING: Console code page (%u) differs from Windows code page (%u)\n" @@ -461,7 +455,7 @@ msgstr "" " Подробнее об этом смотрите документацию psql, раздел\n" " \"Notes for Windows users\".\n" -#: command.c:3403 +#: command.c:3426 #, c-format msgid "" "environment variable PSQL_EDITOR_LINENUMBER_ARG must be set to specify a " @@ -470,27 +464,27 @@ msgstr "" "в переменной окружения PSQL_EDITOR_LINENUMBER_ARG должен быть указан номер " "строки\n" -#: command.c:3432 +#: command.c:3455 #, c-format msgid "could not start editor \"%s\"\n" msgstr "не удалось запустить редактор \"%s\"\n" -#: command.c:3434 +#: command.c:3457 #, c-format msgid "could not start /bin/sh\n" msgstr "не удалось запустить /bin/sh\n" -#: command.c:3472 +#: command.c:3495 #, c-format msgid "could not locate temporary directory: %s\n" msgstr "не удалось найти временный каталог: %s\n" -#: command.c:3499 +#: command.c:3522 #, c-format msgid "could not open temporary file \"%s\": %s\n" msgstr "не удалось открыть временный файл \"%s\": %s\n" -#: command.c:3773 +#: command.c:3796 #, c-format msgid "" "\\pset: allowed formats are unaligned, aligned, wrapped, html, asciidoc, " @@ -499,122 +493,122 @@ msgstr "" "допустимые форматы \\pset: unaligned, aligned, wrapped, html, asciidoc, " "latex, latex-longtable, troff-ms\n" -#: command.c:3791 +#: command.c:3814 #, c-format msgid "\\pset: allowed line styles are ascii, old-ascii, unicode\n" msgstr "допустимые стили линий для \\pset: ascii, old-ascii, unicode\n" -#: command.c:3806 +#: command.c:3829 #, c-format msgid "\\pset: allowed Unicode border line styles are single, double\n" msgstr "допустимые стили Unicode-линий границ для \\pset: single, double\n" -#: command.c:3821 +#: command.c:3844 #, c-format msgid "\\pset: allowed Unicode column line styles are single, double\n" msgstr "допустимые стили Unicode-линий столбцов для \\pset: single, double\n" -#: command.c:3836 +#: command.c:3859 #, c-format msgid "\\pset: allowed Unicode header line styles are single, double\n" msgstr "допустимые стили Unicode-линий заголовков для \\pset: single, double\n" -#: command.c:4001 command.c:4180 +#: command.c:4024 command.c:4203 #, c-format msgid "\\pset: unknown option: %s\n" msgstr "неизвестный параметр \\pset: %s\n" -#: command.c:4019 +#: command.c:4042 #, c-format msgid "Border style is %d.\n" msgstr "Стиль границ: %d.\n" -#: command.c:4025 +#: command.c:4048 #, c-format msgid "Target width is unset.\n" msgstr "Ширина вывода сброшена.\n" -#: command.c:4027 +#: command.c:4050 #, c-format msgid "Target width is %d.\n" msgstr "Ширина вывода: %d.\n" -#: command.c:4034 +#: command.c:4057 #, c-format msgid "Expanded display is on.\n" msgstr "Расширенный вывод включён.\n" -#: command.c:4036 +#: command.c:4059 #, c-format msgid "Expanded display is used automatically.\n" msgstr "Расширенный вывод применяется автоматически.\n" -#: command.c:4038 +#: command.c:4061 #, c-format msgid "Expanded display is off.\n" msgstr "Расширенный вывод выключен.\n" -#: command.c:4045 command.c:4053 +#: command.c:4068 command.c:4076 #, c-format msgid "Field separator is zero byte.\n" msgstr "Разделитель полей - нулевой байт.\n" -#: command.c:4047 +#: command.c:4070 #, c-format msgid "Field separator is \"%s\".\n" msgstr "Разделитель полей: \"%s\".\n" -#: command.c:4060 +#: command.c:4083 #, c-format msgid "Default footer is on.\n" msgstr "Строка итогов включена.\n" -#: command.c:4062 +#: command.c:4085 #, c-format msgid "Default footer is off.\n" msgstr "Строка итогов выключена.\n" -#: command.c:4068 +#: command.c:4091 #, c-format msgid "Output format is %s.\n" msgstr "Формат вывода: %s.\n" -#: command.c:4074 +#: command.c:4097 #, c-format msgid "Line style is %s.\n" msgstr "Установлен стиль линий: %s.\n" -#: command.c:4081 +#: command.c:4104 #, c-format msgid "Null display is \"%s\".\n" msgstr "Null выводится как: \"%s\".\n" -#: command.c:4089 +#: command.c:4112 #, c-format msgid "Locale-adjusted numeric output is on.\n" msgstr "Локализованный вывод чисел включён.\n" -#: command.c:4091 +#: command.c:4114 #, c-format msgid "Locale-adjusted numeric output is off.\n" msgstr "Локализованный вывод чисел выключен.\n" -#: command.c:4098 +#: command.c:4121 #, c-format msgid "Pager is used for long output.\n" msgstr "Постраничник используется для вывода длинного текста.\n" -#: command.c:4100 +#: command.c:4123 #, c-format msgid "Pager is always used.\n" msgstr "Постраничник используется всегда.\n" -#: command.c:4102 +#: command.c:4125 #, c-format msgid "Pager usage is off.\n" msgstr "Постраничник выключен.\n" -#: command.c:4108 +#: command.c:4131 #, c-format msgid "Pager won't be used for less than %d line.\n" msgid_plural "Pager won't be used for less than %d lines.\n" @@ -622,87 +616,87 @@ msgstr[0] "Постраничник не будет использоваться msgstr[1] "Постраничник не будет использоваться, если строк меньше %d\n" msgstr[2] "Постраничник не будет использоваться, если строк меньше %d\n" -#: command.c:4118 command.c:4128 +#: command.c:4141 command.c:4151 #, c-format msgid "Record separator is zero byte.\n" msgstr "Разделитель записей - нулевой байт.\n" -#: command.c:4120 +#: command.c:4143 #, c-format msgid "Record separator is .\n" msgstr "Разделитель записей: <новая строка>.\n" -#: command.c:4122 +#: command.c:4145 #, c-format msgid "Record separator is \"%s\".\n" msgstr "Разделитель записей: \"%s\".\n" -#: command.c:4135 +#: command.c:4158 #, c-format msgid "Table attributes are \"%s\".\n" msgstr "Атрибуты HTML-таблицы: \"%s\".\n" -#: command.c:4138 +#: command.c:4161 #, c-format msgid "Table attributes unset.\n" msgstr "Атрибуты HTML-таблицы не заданы.\n" -#: command.c:4145 +#: command.c:4168 #, c-format msgid "Title is \"%s\".\n" msgstr "Заголовок: \"%s\".\n" -#: command.c:4147 +#: command.c:4170 #, c-format msgid "Title is unset.\n" msgstr "Заголовок не задан.\n" -#: command.c:4154 +#: command.c:4177 #, c-format msgid "Tuples only is on.\n" msgstr "Режим вывода только кортежей включён.\n" -#: command.c:4156 +#: command.c:4179 #, c-format msgid "Tuples only is off.\n" msgstr "Режим вывода только кортежей выключен.\n" -#: command.c:4162 +#: command.c:4185 #, c-format msgid "Unicode border line style is \"%s\".\n" msgstr "Стиль Unicode-линий границ: \"%s\".\n" -#: command.c:4168 +#: command.c:4191 #, c-format msgid "Unicode column line style is \"%s\".\n" msgstr "Стиль Unicode-линий столбцов: \"%s\".\n" -#: command.c:4174 +#: command.c:4197 #, c-format msgid "Unicode header line style is \"%s\".\n" msgstr "Стиль Unicode-линий границ: \"%s\".\n" -#: command.c:4334 +#: command.c:4357 #, c-format msgid "\\!: failed\n" msgstr "\\!: ошибка\n" -#: command.c:4359 common.c:754 +#: command.c:4382 common.c:754 #, c-format msgid "\\watch cannot be used with an empty query\n" msgstr "\\watch нельзя использовать с пустым запросом\n" -#: command.c:4400 +#: command.c:4423 #, c-format msgid "%s\t%s (every %gs)\n" msgstr "%s\t%s (обновление: %g с)\n" -#: command.c:4403 +#: command.c:4426 #, c-format msgid "%s (every %gs)\n" msgstr "%s (обновление: %g с)\n" -#: command.c:4457 command.c:4464 common.c:654 common.c:661 common.c:1271 +#: command.c:4480 command.c:4487 common.c:654 common.c:661 common.c:1271 #, c-format msgid "" "********* QUERY **********\n" @@ -715,12 +709,12 @@ msgstr "" "**************************\n" "\n" -#: command.c:4656 +#: command.c:4679 #, c-format msgid "\"%s.%s\" is not a view\n" msgstr "\"%s.%s\" - не представление\n" -#: command.c:4672 +#: command.c:4695 #, c-format msgid "could not parse reloptions array\n" msgstr "не удалось разобрать массив reloptions\n" @@ -901,16 +895,16 @@ msgstr "отменено пользователем" #: copy.c:542 msgid "" "Enter data to be copied followed by a newline.\n" -"End with a backslash and a period on a line by itself." +"End with a backslash and a period on a line by itself, or an EOF signal." msgstr "" "Вводите данные для копирования, разделяя строки переводом строки.\n" -"Закончите ввод строкой '\\.'." +"Закончите ввод строкой '\\.' или сигналом EOF." -#: copy.c:671 +#: copy.c:670 msgid "aborted because of read failure" msgstr "прерывание из-за ошибки чтения" -#: copy.c:695 +#: copy.c:704 msgid "trying to exit copy mode" msgstr "попытка выйти из режима копирования" @@ -970,663 +964,717 @@ msgstr "\\crosstabview: неоднозначное имя столбца: \"%s\" msgid "\\crosstabview: column name not found: \"%s\"\n" msgstr "\\crosstabview: имя столбца не найдено: \"%s\"\n" -#: describe.c:72 describe.c:341 describe.c:598 describe.c:729 describe.c:873 -#: describe.c:1034 describe.c:1106 describe.c:3268 describe.c:3474 -#: describe.c:3565 describe.c:3813 describe.c:3958 describe.c:4190 -#: describe.c:4265 describe.c:4276 describe.c:4338 describe.c:4758 -#: describe.c:4841 +#: describe.c:74 describe.c:346 describe.c:603 describe.c:735 describe.c:879 +#: describe.c:1040 describe.c:1112 describe.c:3371 describe.c:3583 +#: describe.c:3674 describe.c:3922 describe.c:4067 describe.c:4308 +#: describe.c:4383 describe.c:4394 describe.c:4456 describe.c:4881 +#: describe.c:4964 msgid "Schema" msgstr "Схема" -#: describe.c:73 describe.c:161 describe.c:227 describe.c:235 describe.c:342 -#: describe.c:599 describe.c:730 describe.c:791 describe.c:874 describe.c:1107 -#: describe.c:3269 describe.c:3397 describe.c:3475 describe.c:3566 -#: describe.c:3645 describe.c:3814 describe.c:3883 describe.c:3959 -#: describe.c:4191 describe.c:4266 describe.c:4277 describe.c:4339 -#: describe.c:4531 describe.c:4615 describe.c:4839 describe.c:5006 -#: describe.c:5194 +#: describe.c:75 describe.c:164 describe.c:231 describe.c:239 describe.c:347 +#: describe.c:604 describe.c:736 describe.c:797 describe.c:880 describe.c:1113 +#: describe.c:3372 describe.c:3506 describe.c:3584 describe.c:3675 +#: describe.c:3754 describe.c:3923 describe.c:3992 describe.c:4068 +#: describe.c:4309 describe.c:4384 describe.c:4395 describe.c:4457 +#: describe.c:4654 describe.c:4738 describe.c:4962 describe.c:5134 +#: describe.c:5341 msgid "Name" msgstr "Имя" -#: describe.c:74 describe.c:354 describe.c:400 describe.c:417 +#: describe.c:76 describe.c:359 describe.c:405 describe.c:422 msgid "Result data type" msgstr "Тип данных результата" -#: describe.c:82 describe.c:95 describe.c:99 describe.c:355 describe.c:401 -#: describe.c:418 +#: describe.c:84 describe.c:97 describe.c:101 describe.c:360 describe.c:406 +#: describe.c:423 msgid "Argument data types" msgstr "Типы данных аргументов" -#: describe.c:106 describe.c:171 describe.c:258 describe.c:463 describe.c:647 -#: describe.c:745 describe.c:816 describe.c:1109 describe.c:1741 -#: describe.c:3068 describe.c:3303 describe.c:3428 describe.c:3502 -#: describe.c:3575 describe.c:3658 describe.c:3726 describe.c:3826 -#: describe.c:3892 describe.c:3960 describe.c:4096 describe.c:4136 -#: describe.c:4207 describe.c:4269 describe.c:4278 describe.c:4340 -#: describe.c:4557 describe.c:4637 describe.c:4772 describe.c:4842 +#: describe.c:108 describe.c:174 describe.c:262 describe.c:468 describe.c:652 +#: describe.c:751 describe.c:822 describe.c:1115 describe.c:1845 +#: describe.c:3161 describe.c:3406 describe.c:3537 describe.c:3611 +#: describe.c:3684 describe.c:3767 describe.c:3835 describe.c:3935 +#: describe.c:4001 describe.c:4069 describe.c:4210 describe.c:4252 +#: describe.c:4325 describe.c:4387 describe.c:4396 describe.c:4458 +#: describe.c:4680 describe.c:4760 describe.c:4895 describe.c:4965 #: large_obj.c:289 large_obj.c:299 msgid "Description" msgstr "Описание" -#: describe.c:124 +#: describe.c:126 msgid "List of aggregate functions" msgstr "Список агрегатных функций" -#: describe.c:148 +#: describe.c:151 #, c-format msgid "The server (version %s) does not support access methods.\n" msgstr "Сервер (версия %s) не поддерживает методы доступа.\n" -#: describe.c:162 +#: describe.c:165 msgid "Index" msgstr "Индекс" -#: describe.c:163 describe.c:361 describe.c:406 describe.c:423 describe.c:881 -#: describe.c:1045 describe.c:1701 describe.c:3278 describe.c:3476 -#: describe.c:4634 +#: describe.c:166 describe.c:366 describe.c:411 describe.c:428 describe.c:887 +#: describe.c:1051 describe.c:1582 describe.c:1606 describe.c:1808 +#: describe.c:3381 describe.c:3585 describe.c:4757 msgid "Type" msgstr "Тип" -#: describe.c:170 describe.c:4536 +#: describe.c:173 describe.c:4659 msgid "Handler" msgstr "Обработчик" -#: describe.c:189 +#: describe.c:192 msgid "List of access methods" msgstr "Список методов доступа" -#: describe.c:214 +#: describe.c:218 #, c-format msgid "The server (version %s) does not support tablespaces.\n" msgstr "Сервер (версия %s) не поддерживает табличные пространства.\n" -#: describe.c:228 describe.c:236 describe.c:451 describe.c:637 describe.c:792 -#: describe.c:1033 describe.c:3279 describe.c:3401 describe.c:3647 -#: describe.c:3884 describe.c:4532 describe.c:4616 describe.c:5007 -#: describe.c:5195 large_obj.c:288 +#: describe.c:232 describe.c:240 describe.c:456 describe.c:642 describe.c:798 +#: describe.c:1039 describe.c:3382 describe.c:3510 describe.c:3756 +#: describe.c:3993 describe.c:4655 describe.c:4739 describe.c:5135 +#: describe.c:5247 describe.c:5342 large_obj.c:288 msgid "Owner" msgstr "Владелец" -#: describe.c:229 describe.c:237 +#: describe.c:233 describe.c:241 msgid "Location" msgstr "Расположение" -#: describe.c:248 describe.c:2883 +#: describe.c:252 describe.c:2980 msgid "Options" msgstr "Параметры" -#: describe.c:253 describe.c:610 describe.c:808 describe.c:3295 describe.c:3299 +#: describe.c:257 describe.c:615 describe.c:814 describe.c:3398 describe.c:3402 msgid "Size" msgstr "Размер" -#: describe.c:275 +#: describe.c:279 msgid "List of tablespaces" msgstr "Список табличных пространств" -#: describe.c:315 +#: describe.c:320 #, c-format msgid "\\df only takes [antwS+] as options\n" msgstr "\\df принимает в качестве параметров только [antwS+]\n" -#: describe.c:323 +#: describe.c:328 #, c-format msgid "\\df does not take a \"w\" option with server version %s\n" msgstr "\\df не поддерживает параметр \"w\" с сервером версии %s\n" # well-spelled: агр #. translator: "agg" is short for "aggregate" -#: describe.c:357 describe.c:403 describe.c:420 +#: describe.c:362 describe.c:408 describe.c:425 msgid "agg" msgstr "агр." -#: describe.c:358 +#: describe.c:363 msgid "window" msgstr "оконная" -#: describe.c:359 describe.c:404 describe.c:421 describe.c:1243 +#: describe.c:364 describe.c:409 describe.c:426 describe.c:1249 msgid "trigger" msgstr "триггерная" -#: describe.c:360 describe.c:405 describe.c:422 +#: describe.c:365 describe.c:410 describe.c:427 msgid "normal" msgstr "обычная" -#: describe.c:433 +#: describe.c:438 msgid "immutable" msgstr "постоянная" -#: describe.c:434 +#: describe.c:439 msgid "stable" msgstr "стабильная" -#: describe.c:435 +#: describe.c:440 msgid "volatile" msgstr "изменчивая" -#: describe.c:436 +#: describe.c:441 msgid "Volatility" msgstr "Изменчивость" -#: describe.c:444 +#: describe.c:449 msgid "restricted" msgstr "ограниченная" -#: describe.c:445 +#: describe.c:450 msgid "safe" msgstr "безопасная" -#: describe.c:446 +#: describe.c:451 msgid "unsafe" msgstr "небезопасная" -#: describe.c:447 +#: describe.c:452 msgid "Parallel" msgstr "Параллельность" -#: describe.c:452 +#: describe.c:457 msgid "definer" msgstr "определившего" -#: describe.c:453 +#: describe.c:458 msgid "invoker" msgstr "вызывающего" -#: describe.c:454 +#: describe.c:459 msgid "Security" msgstr "Безопасность" -#: describe.c:461 +#: describe.c:466 msgid "Language" msgstr "Язык" -#: describe.c:462 +#: describe.c:467 msgid "Source code" msgstr "Исходный код" -#: describe.c:561 +#: describe.c:566 msgid "List of functions" msgstr "Список функций" -#: describe.c:609 +#: describe.c:614 msgid "Internal name" msgstr "Внутреннее имя" -#: describe.c:631 +#: describe.c:636 msgid "Elements" msgstr "Элементы" -#: describe.c:688 +#: describe.c:693 msgid "List of data types" msgstr "Список типов данных" -#: describe.c:731 +#: describe.c:737 msgid "Left arg type" msgstr "Тип левого аргумента" -#: describe.c:732 +#: describe.c:738 msgid "Right arg type" msgstr "Тип правого аргумента" -#: describe.c:733 +#: describe.c:739 msgid "Result type" msgstr "Результирующий тип" -#: describe.c:738 describe.c:3717 describe.c:4095 +#: describe.c:744 describe.c:3826 describe.c:4209 msgid "Function" msgstr "Функция" -#: describe.c:763 +#: describe.c:769 msgid "List of operators" msgstr "Список операторов" -#: describe.c:793 +#: describe.c:799 msgid "Encoding" msgstr "Кодировка" -#: describe.c:798 describe.c:3815 +#: describe.c:804 describe.c:3924 msgid "Collate" msgstr "LC_COLLATE" -#: describe.c:799 describe.c:3816 +#: describe.c:805 describe.c:3925 msgid "Ctype" msgstr "LC_CTYPE" -#: describe.c:812 +#: describe.c:818 msgid "Tablespace" msgstr "Табл. пространство" -#: describe.c:834 +#: describe.c:840 msgid "List of databases" msgstr "Список баз данных" -#: describe.c:875 describe.c:880 describe.c:1036 describe.c:3270 -#: describe.c:3277 +#: describe.c:881 describe.c:886 describe.c:1042 describe.c:3373 +#: describe.c:3380 msgid "table" msgstr "таблица" -#: describe.c:876 describe.c:3271 +#: describe.c:882 describe.c:3374 msgid "view" msgstr "представление" -#: describe.c:877 describe.c:3272 +#: describe.c:883 describe.c:3375 msgid "materialized view" msgstr "материализованное представление" -#: describe.c:878 describe.c:1038 describe.c:3274 +#: describe.c:884 describe.c:1044 describe.c:3377 msgid "sequence" msgstr "последовательность" -#: describe.c:879 describe.c:3276 +#: describe.c:885 describe.c:3379 msgid "foreign table" msgstr "сторонняя таблица" -#: describe.c:892 +#: describe.c:898 msgid "Column privileges" msgstr "Права для столбцов" -#: describe.c:923 describe.c:957 +#: describe.c:929 describe.c:963 msgid "Policies" msgstr "Политики" -#: describe.c:989 describe.c:5249 describe.c:5253 +#: describe.c:995 describe.c:5398 describe.c:5402 msgid "Access privileges" msgstr "Права доступа" -#: describe.c:1020 +#: describe.c:1026 #, c-format msgid "The server (version %s) does not support altering default privileges.\n" msgstr "Сервер (версия %s) не поддерживает изменение прав по умолчанию.\n" -#: describe.c:1040 +#: describe.c:1046 msgid "function" msgstr "функция" -#: describe.c:1042 +#: describe.c:1048 msgid "type" msgstr "тип" -#: describe.c:1044 +#: describe.c:1050 msgid "schema" msgstr "схема" -#: describe.c:1068 +#: describe.c:1074 msgid "Default access privileges" msgstr "Права доступа по умолчанию" -#: describe.c:1108 +#: describe.c:1114 msgid "Object" msgstr "Объект" -#: describe.c:1122 +#: describe.c:1128 msgid "table constraint" msgstr "ограничение таблицы" -#: describe.c:1144 +#: describe.c:1150 msgid "domain constraint" msgstr "ограничение домена" -#: describe.c:1172 +#: describe.c:1178 msgid "operator class" msgstr "класс операторов" -#: describe.c:1201 +#: describe.c:1207 msgid "operator family" msgstr "семейство операторов" -#: describe.c:1223 +#: describe.c:1229 msgid "rule" msgstr "правило" -#: describe.c:1265 +#: describe.c:1271 msgid "Object descriptions" msgstr "Описание объекта" -#: describe.c:1319 +#: describe.c:1327 describe.c:3469 #, c-format msgid "Did not find any relation named \"%s\".\n" msgstr "Отношение \"%s\" не найдено.\n" -#: describe.c:1528 +#: describe.c:1330 describe.c:3472 +#, c-format +msgid "Did not find any relations.\n" +msgstr "Отношения не найдены.\n" + +#: describe.c:1537 #, c-format msgid "Did not find any relation with OID %s.\n" msgstr "Отношение с OID %s не найдено.\n" -#: describe.c:1637 describe.c:1686 +#: describe.c:1583 describe.c:1607 +msgid "Start" +msgstr "Начальное_значение" + +#: describe.c:1584 describe.c:1608 +msgid "Minimum" +msgstr "Минимум" + +#: describe.c:1585 describe.c:1609 +msgid "Maximum" +msgstr "Максимум" + +#: describe.c:1586 describe.c:1610 +msgid "Increment" +msgstr "Шаг" + +#: describe.c:1587 describe.c:1611 describe.c:3678 describe.c:3829 +msgid "yes" +msgstr "да" + +#: describe.c:1588 describe.c:1612 describe.c:3678 describe.c:3827 +msgid "no" +msgstr "нет" + +#: describe.c:1589 describe.c:1613 +msgid "Cycles?" +msgstr "Зацикливается?" + +#: describe.c:1590 describe.c:1614 +msgid "Cache" +msgstr "Кешируется" + +#: describe.c:1657 +#, c-format +msgid "Owned by: %s" +msgstr "Владелец: %s" + +#: describe.c:1661 +#, c-format +msgid "Sequence for identity column: %s" +msgstr "Последовательность для столбца идентификации: %s" + +#: describe.c:1668 +#, c-format +msgid "Sequence \"%s.%s\"" +msgstr "Последовательность \"%s.%s\"" + +#: describe.c:1748 describe.c:1793 #, c-format msgid "Unlogged table \"%s.%s\"" msgstr "Нежурналируемая таблица \"%s.%s\"" -#: describe.c:1640 describe.c:1689 +#: describe.c:1751 describe.c:1796 #, c-format msgid "Table \"%s.%s\"" msgstr "Таблица \"%s.%s\"" -#: describe.c:1644 +#: describe.c:1755 #, c-format msgid "View \"%s.%s\"" msgstr "Представление \"%s.%s\"" -#: describe.c:1649 +#: describe.c:1760 #, c-format msgid "Unlogged materialized view \"%s.%s\"" msgstr "Нежурналируемое материализованное представление \"%s.%s\"" -#: describe.c:1652 +#: describe.c:1763 #, c-format msgid "Materialized view \"%s.%s\"" msgstr "Материализованное представление \"%s.%s\"" -#: describe.c:1656 -#, c-format -msgid "Sequence \"%s.%s\"" -msgstr "Последовательность \"%s.%s\"" - -#: describe.c:1661 +#: describe.c:1768 #, c-format msgid "Unlogged index \"%s.%s\"" msgstr "Нежурналируемый индекс \"%s.%s\"" -#: describe.c:1664 +#: describe.c:1771 #, c-format msgid "Index \"%s.%s\"" msgstr "Индекс \"%s.%s\"" -#: describe.c:1669 +#: describe.c:1776 #, c-format msgid "Special relation \"%s.%s\"" msgstr "Специальное отношение \"%s.%s\"" -#: describe.c:1673 +#: describe.c:1780 #, c-format msgid "TOAST table \"%s.%s\"" msgstr "TOAST-таблица \"%s.%s\"" -#: describe.c:1677 +#: describe.c:1784 #, c-format msgid "Composite type \"%s.%s\"" msgstr "Составной тип \"%s.%s\"" -#: describe.c:1681 +#: describe.c:1788 #, c-format msgid "Foreign table \"%s.%s\"" msgstr "Сторонняя таблица \"%s.%s\"" -#: describe.c:1700 +#: describe.c:1807 msgid "Column" msgstr "Столбец" -#: describe.c:1711 describe.c:3482 +#: describe.c:1818 describe.c:3591 msgid "Collation" msgstr "Правило сортировки" -#: describe.c:1712 describe.c:3489 +#: describe.c:1819 describe.c:3598 msgid "Nullable" msgstr "Допустимость NULL" -#: describe.c:1713 describe.c:3490 +#: describe.c:1820 describe.c:3599 msgid "Default" msgstr "По умолчанию" -#: describe.c:1718 -msgid "Value" -msgstr "Значение" - -#: describe.c:1721 +#: describe.c:1825 msgid "Definition" msgstr "Определение" # well-spelled: ОСД -#: describe.c:1724 describe.c:4552 describe.c:4636 describe.c:4707 -#: describe.c:4771 -msgid "FDW Options" +#: describe.c:1828 describe.c:4675 describe.c:4759 describe.c:4830 +#: describe.c:4894 +msgid "FDW options" msgstr "Параметры ОСД" -#: describe.c:1728 +#: describe.c:1832 msgid "Storage" msgstr "Хранилище" -#: describe.c:1733 +#: describe.c:1837 msgid "Stats target" msgstr "Цель для статистики" -#: describe.c:1859 +#: describe.c:1982 #, c-format msgid "Partition of: %s %s" msgstr "Секция из: %s %s" -#: describe.c:1880 +#: describe.c:1988 +#, c-format +msgid "Partition constraint: %s" +msgstr "Ограничение секции: %s" + +#: describe.c:2011 #, c-format msgid "Partition key: %s" msgstr "Ключ разбиения: %s" -#: describe.c:1948 +#: describe.c:2079 msgid "primary key, " msgstr "первичный ключ, " -#: describe.c:1950 +#: describe.c:2081 msgid "unique, " msgstr "уникальный, " -#: describe.c:1956 +#: describe.c:2087 #, c-format msgid "for table \"%s.%s\"" msgstr "для таблицы \"%s.%s\"" -#: describe.c:1960 +#: describe.c:2091 #, c-format msgid ", predicate (%s)" msgstr ", предикат (%s)" -#: describe.c:1963 +#: describe.c:2094 msgid ", clustered" msgstr ", кластеризованный" -#: describe.c:1966 +#: describe.c:2097 msgid ", invalid" msgstr ", нерабочий" -#: describe.c:1969 +#: describe.c:2100 msgid ", deferrable" msgstr ", откладываемый" -#: describe.c:1972 +#: describe.c:2103 msgid ", initially deferred" msgstr ", изначально отложенный" -#: describe.c:1975 +#: describe.c:2106 msgid ", replica identity" msgstr ", репликационный" -#: describe.c:2010 -#, c-format -msgid "Owned by: %s" -msgstr "Владелец: %s" - -#: describe.c:2072 +#: describe.c:2165 msgid "Indexes:" msgstr "Индексы:" -#: describe.c:2156 +#: describe.c:2249 msgid "Check constraints:" msgstr "Ограничения-проверки:" # TO REWVIEW -#: describe.c:2187 +#: describe.c:2280 msgid "Foreign-key constraints:" msgstr "Ограничения внешнего ключа:" -#: describe.c:2218 +#: describe.c:2311 msgid "Referenced by:" msgstr "Ссылки извне:" -#: describe.c:2278 +#: describe.c:2361 msgid "Policies:" msgstr "Политики:" -#: describe.c:2281 +#: describe.c:2364 msgid "Policies (forced row security enabled):" msgstr "Политики (усиленная защита строк включена):" -#: describe.c:2284 +#: describe.c:2367 msgid "Policies (row security enabled): (none)" msgstr "Политики (защита строк включена): (Нет)" -#: describe.c:2287 +#: describe.c:2370 msgid "Policies (forced row security enabled): (none)" msgstr "Политики (усиленная защита строк включена): (Нет)" -#: describe.c:2290 +#: describe.c:2373 msgid "Policies (row security disabled):" msgstr "Политики (защита строк выключена):" -#: describe.c:2347 -msgid "Statistics:" -msgstr "Статистика:" +#: describe.c:2435 +msgid "Statistics objects:" +msgstr "Объекты статистики:" -#: describe.c:2444 describe.c:2526 +#: describe.c:2538 describe.c:2623 msgid "Rules:" msgstr "Правила:" -#: describe.c:2447 +#: describe.c:2541 msgid "Disabled rules:" msgstr "Отключённые правила:" -#: describe.c:2450 +#: describe.c:2544 msgid "Rules firing always:" msgstr "Правила, срабатывающие всегда:" -#: describe.c:2453 +#: describe.c:2547 msgid "Rules firing on replica only:" msgstr "Правила, срабатывающие только в реплике:" -#: describe.c:2490 +#: describe.c:2587 msgid "Publications:" msgstr "Публикации:" -#: describe.c:2509 +#: describe.c:2606 msgid "View definition:" msgstr "Определение представления:" -#: describe.c:2644 +#: describe.c:2741 msgid "Triggers:" msgstr "Триггеры:" -#: describe.c:2648 +#: describe.c:2745 msgid "Disabled user triggers:" msgstr "Отключённые пользовательские триггеры:" -#: describe.c:2650 +#: describe.c:2747 msgid "Disabled triggers:" msgstr "Отключённые триггеры:" -#: describe.c:2653 +#: describe.c:2750 msgid "Disabled internal triggers:" msgstr "Отключённые внутренние триггеры:" -#: describe.c:2656 +#: describe.c:2753 msgid "Triggers firing always:" msgstr "Триггеры, срабатывающие всегда:" -#: describe.c:2659 +#: describe.c:2756 msgid "Triggers firing on replica only:" msgstr "Триггеры, срабатывающие только в реплике:" -#: describe.c:2745 +#: describe.c:2815 +#, c-format +msgid "Server: %s" +msgstr "Сервер: %s" + +# well-spelled: ОСД +#: describe.c:2823 +#, c-format +msgid "FDW options: (%s)" +msgstr "Параметр ОСД: (%s)" + +#: describe.c:2842 msgid "Inherits" msgstr "Наследует" -#: describe.c:2799 +#: describe.c:2896 #, c-format msgid "Number of child tables: %d (Use \\d+ to list them.)" msgstr "Дочерних таблиц: %d (чтобы просмотреть и их, воспользуйтесь \\d+)" -#: describe.c:2801 +#: describe.c:2898 #, c-format msgid "Number of partitions: %d (Use \\d+ to list them.)" msgstr "Число секций: %d (чтобы просмотреть их, введите \\d+)" -#: describe.c:2809 +#: describe.c:2906 msgid "Child tables" msgstr "Дочерние таблицы" -#: describe.c:2809 +#: describe.c:2906 msgid "Partitions" msgstr "Секции" -#: describe.c:2843 +#: describe.c:2940 #, c-format msgid "Typed table of type: %s" msgstr "Типизированная таблица типа: %s" -#: describe.c:2859 +#: describe.c:2956 msgid "Replica Identity" msgstr "Идентификация реплики" -#: describe.c:2872 +#: describe.c:2969 msgid "Has OIDs: yes" msgstr "Содержит OID: да" -#: describe.c:2956 +#: describe.c:3049 #, c-format msgid "Tablespace: \"%s\"" msgstr "Табличное пространство: \"%s\"" #. translator: before this string there's an index description like #. '"foo_pkey" PRIMARY KEY, btree (a)' -#: describe.c:2968 +#: describe.c:3061 #, c-format msgid ", tablespace \"%s\"" msgstr ", табл. пространство \"%s\"" -#: describe.c:3061 +#: describe.c:3154 msgid "List of roles" msgstr "Список ролей" -#: describe.c:3063 +#: describe.c:3156 msgid "Role name" msgstr "Имя роли" -#: describe.c:3064 +#: describe.c:3157 msgid "Attributes" msgstr "Атрибуты" -#: describe.c:3065 +#: describe.c:3158 msgid "Member of" msgstr "Член ролей" -#: describe.c:3076 +#: describe.c:3169 msgid "Superuser" msgstr "Суперпользователь" -#: describe.c:3079 +#: describe.c:3172 msgid "No inheritance" msgstr "Не наследуется" -#: describe.c:3082 +#: describe.c:3175 msgid "Create role" msgstr "Создаёт роли" -#: describe.c:3085 +#: describe.c:3178 msgid "Create DB" msgstr "Создаёт БД" -#: describe.c:3088 +#: describe.c:3181 msgid "Cannot login" msgstr "Вход запрещён" -#: describe.c:3092 +#: describe.c:3185 msgid "Replication" msgstr "Репликация" -#: describe.c:3096 +#: describe.c:3189 msgid "Bypass RLS" msgstr "Пропускать RLS" -#: describe.c:3105 +#: describe.c:3198 msgid "No connections" msgstr "Нет подключений" -#: describe.c:3107 +#: describe.c:3200 #, c-format msgid "%d connection" msgid_plural "%d connections" @@ -1634,312 +1682,310 @@ msgstr[0] "%d подключение" msgstr[1] "%d подключения" msgstr[2] "%d подключений" -#: describe.c:3117 +#: describe.c:3210 msgid "Password valid until " msgstr "Пароль действует до " -#: describe.c:3173 +#: describe.c:3260 +#, c-format +msgid "The server (version %s) does not support per-database role settings.\n" +msgstr "" +"Сервер (версия %s) не поддерживает назначение параметров ролей для баз " +"данных.\n" + +#: describe.c:3273 msgid "Role" msgstr "Роль" -#: describe.c:3174 +#: describe.c:3274 msgid "Database" msgstr "БД" -#: describe.c:3175 +#: describe.c:3275 msgid "Settings" msgstr "Параметры" -#: describe.c:3185 +#: describe.c:3296 #, c-format -msgid "No per-database role settings support in this server version.\n" -msgstr "" -"Это версия сервера не поддерживает параметры ролей на уровне базы данных.\n" +msgid "Did not find any settings for role \"%s\" and database \"%s\".\n" +msgstr "Параметры для роли \"%s\" и базы данных \"%s\" не найдены.\n" -#: describe.c:3196 +#: describe.c:3299 #, c-format -msgid "No matching settings found.\n" -msgstr "Соответствующие параметры не найдены.\n" +msgid "Did not find any settings for role \"%s\".\n" +msgstr "Параметры для роли \"%s\" не найдены.\n" -#: describe.c:3198 +#: describe.c:3302 #, c-format -msgid "No settings found.\n" -msgstr "Параметры не найдены.\n" +msgid "Did not find any settings.\n" +msgstr "Никакие параметры не найдены.\n" -#: describe.c:3203 +#: describe.c:3307 msgid "List of settings" msgstr "Список параметров" -#: describe.c:3273 +#: describe.c:3376 msgid "index" msgstr "индекс" # skip-rule: capital-letter-first -#: describe.c:3275 +#: describe.c:3378 msgid "special" msgstr "спец. отношение" -#: describe.c:3284 describe.c:4759 +#: describe.c:3387 describe.c:4882 msgid "Table" msgstr "Таблица" -#: describe.c:3361 -#, c-format -msgid "No matching relations found.\n" -msgstr "Соответствующие отношения не найдены.\n" - -#: describe.c:3363 -#, c-format -msgid "No relations found.\n" -msgstr "Отношения не найдены.\n" - -#: describe.c:3368 +#: describe.c:3477 msgid "List of relations" msgstr "Список отношений" -#: describe.c:3405 +#: describe.c:3514 msgid "Trusted" msgstr "Доверенный" -#: describe.c:3413 -msgid "Internal Language" +#: describe.c:3522 +msgid "Internal language" msgstr "Внутренний язык" -#: describe.c:3414 -msgid "Call Handler" +#: describe.c:3523 +msgid "Call handler" msgstr "Обработчик вызова" -#: describe.c:3415 describe.c:4539 +#: describe.c:3524 describe.c:4662 msgid "Validator" msgstr "Функция проверки" -#: describe.c:3418 -msgid "Inline Handler" +#: describe.c:3527 +msgid "Inline handler" msgstr "Обработчик внедрённого кода" -#: describe.c:3446 +#: describe.c:3555 msgid "List of languages" msgstr "Список языков" -#: describe.c:3491 +#: describe.c:3600 msgid "Check" msgstr "Проверка" -#: describe.c:3533 +#: describe.c:3642 msgid "List of domains" msgstr "Список доменов" -#: describe.c:3567 +#: describe.c:3676 msgid "Source" msgstr "Источник" -#: describe.c:3568 +#: describe.c:3677 msgid "Destination" msgstr "Назначение" -#: describe.c:3569 describe.c:3718 -msgid "no" -msgstr "нет" - -#: describe.c:3569 describe.c:3720 -msgid "yes" -msgstr "да" - -#: describe.c:3570 +#: describe.c:3679 msgid "Default?" msgstr "По умолчанию?" -#: describe.c:3607 +#: describe.c:3716 msgid "List of conversions" msgstr "Список преобразований" -#: describe.c:3646 +#: describe.c:3755 msgid "Event" msgstr "Событие" -#: describe.c:3648 +#: describe.c:3757 msgid "enabled" msgstr "включён" -#: describe.c:3649 +#: describe.c:3758 msgid "replica" msgstr "реплика" -#: describe.c:3650 +#: describe.c:3759 msgid "always" msgstr "всегда" -#: describe.c:3651 +#: describe.c:3760 msgid "disabled" msgstr "отключён" -#: describe.c:3652 describe.c:5196 +#: describe.c:3761 describe.c:5343 msgid "Enabled" msgstr "Включён" -#: describe.c:3653 +#: describe.c:3762 msgid "Procedure" msgstr "Процедура" -#: describe.c:3654 +#: describe.c:3763 msgid "Tags" msgstr "Теги" -#: describe.c:3673 +#: describe.c:3782 msgid "List of event triggers" msgstr "Список событийных триггеров" -#: describe.c:3715 +#: describe.c:3824 msgid "Source type" msgstr "Исходный тип" -#: describe.c:3716 +#: describe.c:3825 msgid "Target type" msgstr "Целевой тип" -#: describe.c:3719 +#: describe.c:3828 msgid "in assignment" msgstr "в присваивании" -#: describe.c:3721 +#: describe.c:3830 msgid "Implicit?" msgstr "Неявное?" -#: describe.c:3772 +#: describe.c:3881 msgid "List of casts" msgstr "Список приведений типов" -#: describe.c:3800 +#: describe.c:3909 #, c-format msgid "The server (version %s) does not support collations.\n" msgstr "Сервер (версия %s) не поддерживает правила сравнения.\n" -#: describe.c:3821 +#: describe.c:3930 msgid "Provider" msgstr "Поставщик" -#: describe.c:3856 +#: describe.c:3965 msgid "List of collations" msgstr "Список правил сортировки" -#: describe.c:3915 +#: describe.c:4024 msgid "List of schemas" msgstr "Список схем" -#: describe.c:3940 describe.c:4178 describe.c:4249 describe.c:4320 +#: describe.c:4049 describe.c:4296 describe.c:4367 describe.c:4438 #, c-format msgid "The server (version %s) does not support full text search.\n" msgstr "Сервер (версия %s) не поддерживает полнотекстовый поиск.\n" -#: describe.c:3975 +#: describe.c:4084 msgid "List of text search parsers" msgstr "Список анализаторов текстового поиска" -#: describe.c:4018 +#: describe.c:4129 #, c-format msgid "Did not find any text search parser named \"%s\".\n" msgstr "Анализатор текстового поиска \"%s\" не найден.\n" -#: describe.c:4093 +#: describe.c:4132 +#, c-format +msgid "Did not find any text search parsers.\n" +msgstr "Никакие анализаторы текстового поиска не найдены.\n" + +#: describe.c:4207 msgid "Start parse" msgstr "Начало разбора" -#: describe.c:4094 +#: describe.c:4208 msgid "Method" msgstr "Метод" -#: describe.c:4098 +#: describe.c:4212 msgid "Get next token" msgstr "Получение следующего фрагмента" -#: describe.c:4100 +#: describe.c:4214 msgid "End parse" msgstr "Окончание разбора" -#: describe.c:4102 +#: describe.c:4216 msgid "Get headline" msgstr "Получение выдержки" -#: describe.c:4104 +#: describe.c:4218 msgid "Get token types" msgstr "Получение типов фрагментов" -#: describe.c:4114 +#: describe.c:4229 #, c-format msgid "Text search parser \"%s.%s\"" msgstr "Анализатор текстового поиска \"%s.%s\"" -#: describe.c:4116 +#: describe.c:4232 #, c-format msgid "Text search parser \"%s\"" msgstr "Анализатор текстового поиска \"%s\"" -#: describe.c:4135 +#: describe.c:4251 msgid "Token name" msgstr "Имя фрагмента" -#: describe.c:4146 +#: describe.c:4262 #, c-format msgid "Token types for parser \"%s.%s\"" msgstr "Типы фрагментов для анализатора \"%s.%s\"" -#: describe.c:4148 +#: describe.c:4265 #, c-format msgid "Token types for parser \"%s\"" msgstr "Типы фрагментов для анализатора \"%s\"" -#: describe.c:4201 +#: describe.c:4319 msgid "Template" msgstr "Шаблон" -#: describe.c:4202 +#: describe.c:4320 msgid "Init options" msgstr "Параметры инициализации" -#: describe.c:4224 +#: describe.c:4342 msgid "List of text search dictionaries" msgstr "Список словарей текстового поиска" -#: describe.c:4267 +#: describe.c:4385 msgid "Init" msgstr "Инициализация" -#: describe.c:4268 +#: describe.c:4386 msgid "Lexize" msgstr "Выделение лексем" -#: describe.c:4295 +#: describe.c:4413 msgid "List of text search templates" msgstr "Список шаблонов текстового поиска" -#: describe.c:4355 +#: describe.c:4473 msgid "List of text search configurations" msgstr "Список конфигураций текстового поиска" -#: describe.c:4399 +#: describe.c:4519 #, c-format msgid "Did not find any text search configuration named \"%s\".\n" msgstr "Конфигурация текстового поиска \"%s\" не найдена.\n" -#: describe.c:4465 +#: describe.c:4522 +#, c-format +msgid "Did not find any text search configurations.\n" +msgstr "Никакие конфигурации текстового поиска не найдены.\n" + +#: describe.c:4588 msgid "Token" msgstr "Фрагмент" -#: describe.c:4466 +#: describe.c:4589 msgid "Dictionaries" msgstr "Словари" -#: describe.c:4477 +#: describe.c:4600 #, c-format msgid "Text search configuration \"%s.%s\"" msgstr "Конфигурация текстового поиска \"%s.%s\"" -#: describe.c:4480 +#: describe.c:4603 #, c-format msgid "Text search configuration \"%s\"" msgstr "Конфигурация текстового поиска \"%s\"" -#: describe.c:4484 +#: describe.c:4607 #, c-format msgid "" "\n" @@ -1948,7 +1994,7 @@ msgstr "" "\n" "Анализатор: \"%s.%s\"" -#: describe.c:4487 +#: describe.c:4610 #, c-format msgid "" "\n" @@ -1957,130 +2003,148 @@ msgstr "" "\n" "Анализатор: \"%s\"" -#: describe.c:4521 +#: describe.c:4644 #, c-format msgid "The server (version %s) does not support foreign-data wrappers.\n" msgstr "Сервер (версия %s) не поддерживает обёртки сторонних данных.\n" -#: describe.c:4579 +#: describe.c:4702 msgid "List of foreign-data wrappers" msgstr "Список обёрток сторонних данных" -#: describe.c:4604 +#: describe.c:4727 #, c-format msgid "The server (version %s) does not support foreign servers.\n" msgstr "Сервер (версия %s) не поддерживает сторонние серверы.\n" -#: describe.c:4617 +#: describe.c:4740 msgid "Foreign-data wrapper" msgstr "Обёртка сторонних данных" -#: describe.c:4635 describe.c:4840 +#: describe.c:4758 describe.c:4963 msgid "Version" msgstr "Версия" -#: describe.c:4661 +#: describe.c:4784 msgid "List of foreign servers" msgstr "Список сторонних серверов" -#: describe.c:4686 +#: describe.c:4809 #, c-format msgid "The server (version %s) does not support user mappings.\n" msgstr "Сервер (версия %s) не поддерживает сопоставления пользователей.\n" -#: describe.c:4696 describe.c:4760 +#: describe.c:4819 describe.c:4883 msgid "Server" msgstr "Сервер" -#: describe.c:4697 +#: describe.c:4820 msgid "User name" msgstr "Имя пользователя" -#: describe.c:4722 +#: describe.c:4845 msgid "List of user mappings" msgstr "Список сопоставлений пользователей" -#: describe.c:4747 +#: describe.c:4870 #, c-format msgid "The server (version %s) does not support foreign tables.\n" msgstr "Сервер (версия %s) не поддерживает сторонние таблицы.\n" -#: describe.c:4800 +#: describe.c:4923 msgid "List of foreign tables" msgstr "Список сторонних таблиц" -#: describe.c:4825 describe.c:4882 +#: describe.c:4948 describe.c:5005 #, c-format msgid "The server (version %s) does not support extensions.\n" msgstr "Сервер (версия %s) не поддерживает расширения.\n" -#: describe.c:4857 +#: describe.c:4980 msgid "List of installed extensions" msgstr "Список установленных расширений" -#: describe.c:4910 +#: describe.c:5033 #, c-format msgid "Did not find any extension named \"%s\".\n" msgstr "Расширение \"%s\" не найдено.\n" -#: describe.c:4913 +#: describe.c:5036 #, c-format msgid "Did not find any extensions.\n" msgstr "Расширения не найдены.\n" -#: describe.c:4957 -msgid "Object Description" +#: describe.c:5080 +msgid "Object description" msgstr "Описание объекта" -#: describe.c:4966 +#: describe.c:5090 #, c-format msgid "Objects in extension \"%s\"" msgstr "Объекты в расширении \"%s\"" -#: describe.c:4992 describe.c:5054 +#: describe.c:5119 describe.c:5185 #, c-format msgid "The server (version %s) does not support publications.\n" msgstr "Сервер (версия %s) не поддерживает публикации.\n" -#: describe.c:5008 describe.c:5099 +#: describe.c:5136 describe.c:5248 +msgid "All tables" +msgstr "Все таблицы" + +#: describe.c:5137 describe.c:5249 msgid "Inserts" msgstr "Добавления" -#: describe.c:5009 describe.c:5100 +#: describe.c:5138 describe.c:5250 msgid "Updates" msgstr "Изменения" -#: describe.c:5010 describe.c:5101 +#: describe.c:5139 describe.c:5251 msgid "Deletes" msgstr "Удаления" -#: describe.c:5027 +#: describe.c:5156 msgid "List of publications" msgstr "Список публикаций" -#: describe.c:5096 +#: describe.c:5217 +#, c-format +msgid "Did not find any publication named \"%s\".\n" +msgstr "Публикация \"%s\" не найдена.\n" + +#: describe.c:5220 +#, c-format +msgid "Did not find any publications.\n" +msgstr "Никакие публикации не найдены.\n" + +#: describe.c:5244 #, c-format msgid "Publication %s" msgstr "Публикация %s" -#: describe.c:5141 +#: describe.c:5284 msgid "Tables:" msgstr "Таблицы:" -#: describe.c:5181 +#: describe.c:5328 #, c-format msgid "The server (version %s) does not support subscriptions.\n" msgstr "Сервер (версия %s) не поддерживает подписки.\n" -#: describe.c:5197 +#: describe.c:5344 msgid "Publication" msgstr "Публикация" -#: describe.c:5203 +#: describe.c:5351 +msgid "Synchronous commit" +msgstr "Синхронная фиксация" + +#: describe.c:5352 msgid "Conninfo" msgstr "Строка подключения" -#: describe.c:5225 +#: describe.c:5374 msgid "List of subscriptions" msgstr "Список подписок" @@ -2098,7 +2162,7 @@ msgstr "" "psql - это интерактивный терминал PostgreSQL.\n" "\n" -#: help.c:74 help.c:342 help.c:376 help.c:403 +#: help.c:74 help.c:344 help.c:383 help.c:410 #, c-format msgid "Usage:\n" msgstr "Использование:\n" @@ -2443,13 +2507,21 @@ msgstr "" #: help.c:174 #, c-format msgid "" +" \\crosstabview [COLUMNS] execute query and display results in crosstab\n" +msgstr "" +" \\crosstabview [СТОЛБЦЫ] выполнить запрос и вывести результат в " +"перекрёстном виде\n" + +#: help.c:175 +#, c-format +msgid "" " \\errverbose show most recent error message at maximum " "verbosity\n" msgstr "" " \\errverbose вывести максимально подробное сообщение о " "последней ошибке\n" -#: help.c:175 +#: help.c:176 #, c-format msgid "" " \\g [FILE] or ; execute query (and send results to file or |pipe)\n" @@ -2457,12 +2529,6 @@ msgstr "" " \\g [ФАЙЛ] или ; выполнить запрос\n" " (и направить результаты в файл или канал |)\n" -#: help.c:176 -#, c-format -msgid " \\gx [FILE] as \\g, but forces expanded output mode\n" -msgstr "" -" \\gx [ФАЙЛ] то же, что и \\g, но в режиме развёрнутого вывода\n" - #: help.c:177 #, c-format msgid "" @@ -2483,16 +2549,14 @@ msgstr "" #: help.c:179 #, c-format -msgid " \\q quit psql\n" -msgstr " \\q выйти из psql\n" +msgid " \\gx [FILE] as \\g, but forces expanded output mode\n" +msgstr "" +" \\gx [ФАЙЛ] то же, что и \\g, но в режиме развёрнутого вывода\n" #: help.c:180 #, c-format -msgid "" -" \\crosstabview [COLUMNS] execute query and display results in crosstab\n" -msgstr "" -" \\crosstabview [СТОЛБЦЫ] выполнить запрос и вывести результат в " -"перекрёстном виде\n" +msgid " \\q quit psql\n" +msgstr " \\q выйти из psql\n" #: help.c:181 #, c-format @@ -2715,35 +2779,40 @@ msgstr "" #: help.c:230 #, c-format -msgid " \\ddp [PATTERN] list default privileges\n" -msgstr " \\ddp [МАСКА] список прав по умолчанию\n" +msgid " \\dD[S+] [PATTERN] list domains\n" +msgstr " \\dD[S+] [МАСКА] список доменов\n" #: help.c:231 #, c-format -msgid " \\dD[S+] [PATTERN] list domains\n" -msgstr " \\dD[S+] [МАСКА] список доменов\n" +msgid " \\ddp [PATTERN] list default privileges\n" +msgstr " \\ddp [МАСКА] список прав по умолчанию\n" #: help.c:232 #, c-format +msgid " \\dE[S+] [PATTERN] list foreign tables\n" +msgstr " \\dE[S+] [МАСКА] список сторонних таблиц\n" + +#: help.c:233 +#, c-format msgid " \\det[+] [PATTERN] list foreign tables\n" msgstr " \\det[+] [МАСКА] список сторонних таблиц\n" -#: help.c:233 +#: help.c:234 #, c-format msgid " \\des[+] [PATTERN] list foreign servers\n" msgstr " \\des[+] [МАСКА] список сторонних серверов\n" -#: help.c:234 +#: help.c:235 #, c-format msgid " \\deu[+] [PATTERN] list user mappings\n" msgstr " \\deu[+] [МАСКА] список сопоставлений пользователей\n" -#: help.c:235 +#: help.c:236 #, c-format msgid " \\dew[+] [PATTERN] list foreign-data wrappers\n" msgstr " \\dew[+] [МАСКА] список обёрток сторонних данных\n" -#: help.c:236 +#: help.c:237 #, c-format msgid "" " \\df[antw][S+] [PATRN] list [only agg/normal/trigger/window] functions\n" @@ -2751,68 +2820,68 @@ msgstr "" " \\df[antw][S+] [МАСКА] список [агрегатных/нормальных/триггерных/оконных]\n" " функций соответственно\n" -#: help.c:237 +#: help.c:238 #, c-format msgid " \\dF[+] [PATTERN] list text search configurations\n" msgstr " \\dF[+] [МАСКА] список конфигураций текстового поиска\n" -#: help.c:238 +#: help.c:239 #, c-format msgid " \\dFd[+] [PATTERN] list text search dictionaries\n" msgstr " \\dFd[+] [МАСКА] список словарей текстового поиска\n" -#: help.c:239 +#: help.c:240 #, c-format msgid " \\dFp[+] [PATTERN] list text search parsers\n" msgstr " \\dFp[+] [МАСКА] список анализаторов текстового поиска\n" -#: help.c:240 +#: help.c:241 #, c-format msgid " \\dFt[+] [PATTERN] list text search templates\n" msgstr " \\dFt[+] [МАСКА] список шаблонов текстового поиска\n" -#: help.c:241 +#: help.c:242 #, c-format msgid " \\dg[S+] [PATTERN] list roles\n" msgstr " \\dg[S+] [МАСКА] список ролей\n" -#: help.c:242 +#: help.c:243 #, c-format msgid " \\di[S+] [PATTERN] list indexes\n" msgstr " \\di[S+] [МАСКА] список индексов\n" -#: help.c:243 +#: help.c:244 #, c-format msgid " \\dl list large objects, same as \\lo_list\n" msgstr "" " \\dl список больших объектов (то же, что и \\lo_list)\n" -#: help.c:244 +#: help.c:245 #, c-format msgid " \\dL[S+] [PATTERN] list procedural languages\n" msgstr " \\dL[S+] [МАСКА] список языков процедур\n" -#: help.c:245 +#: help.c:246 #, c-format msgid " \\dm[S+] [PATTERN] list materialized views\n" msgstr " \\dm[S+] [МАСКА] список материализованных представлений\n" -#: help.c:246 +#: help.c:247 #, c-format msgid " \\dn[S+] [PATTERN] list schemas\n" msgstr " \\dn[S+] [МАСКА] список схем\n" -#: help.c:247 +#: help.c:248 #, c-format msgid " \\do[S] [PATTERN] list operators\n" msgstr " \\do[S] [МАСКА] список операторов\n" -#: help.c:248 +#: help.c:249 #, c-format msgid " \\dO[S+] [PATTERN] list collations\n" msgstr " \\dO[S+] [МАСКА] список правил сортировки\n" -#: help.c:249 +#: help.c:250 #, c-format msgid "" " \\dp [PATTERN] list table, view, and sequence access privileges\n" @@ -2821,51 +2890,46 @@ msgstr "" " последовательностям\n" # well-spelled: МАСК -#: help.c:250 +#: help.c:251 #, c-format msgid " \\drds [PATRN1 [PATRN2]] list per-database role settings\n" msgstr " \\drds [МАСК1 [МАСК2]] список параметров роли на уровне БД\n" -#: help.c:251 +#: help.c:252 #, c-format msgid " \\dRp[+] [PATTERN] list replication publications\n" msgstr " \\dRp[+] [МАСКА] список публикаций для репликации\n" -#: help.c:252 +#: help.c:253 #, c-format msgid " \\dRs[+] [PATTERN] list replication subscriptions\n" msgstr " \\dRs[+] [МАСКА] список подписок на репликацию\n" -#: help.c:253 +#: help.c:254 #, c-format msgid " \\ds[S+] [PATTERN] list sequences\n" msgstr " \\ds[S+] [МАСКА] список последовательностей\n" -#: help.c:254 +#: help.c:255 #, c-format msgid " \\dt[S+] [PATTERN] list tables\n" msgstr " \\dt[S+] [МАСКА] список таблиц\n" -#: help.c:255 +#: help.c:256 #, c-format msgid " \\dT[S+] [PATTERN] list data types\n" msgstr " \\dT[S+] [МАСКА] список типов данных\n" -#: help.c:256 +#: help.c:257 #, c-format msgid " \\du[S+] [PATTERN] list roles\n" msgstr " \\du[S+] [МАСКА] список ролей\n" -#: help.c:257 +#: help.c:258 #, c-format msgid " \\dv[S+] [PATTERN] list views\n" msgstr " \\dv[S+] [МАСКА] список представлений\n" -#: help.c:258 -#, c-format -msgid " \\dE[S+] [PATTERN] list foreign tables\n" -msgstr " \\dE[S+] [МАСКА] список сторонних таблиц\n" - #: help.c:259 #, c-format msgid " \\dx[+] [PATTERN] list extensions\n" @@ -2936,27 +3000,31 @@ msgstr "" #, c-format msgid "" " \\pset [NAME [VALUE]] set table output option\n" -" (NAME := {format|border|expanded|fieldsep|" -"fieldsep_zero|footer|null|\n" -" numericlocale|recordsep|recordsep_zero|tuples_only|" -"title|tableattr|pager|\n" -" unicode_border_linestyle|unicode_column_linestyle|" +" (NAME := {border|columns|expanded|fieldsep|" +"fieldsep_zero|\n" +" footer|format|linestyle|null|numericlocale|pager|\n" +" pager_min_lines|recordsep|recordsep_zero|tableattr|" +"title|\n" +" tuples_only|unicode_border_linestyle|\n" +" unicode_column_linestyle|" "unicode_header_linestyle})\n" msgstr "" " \\pset [ИМЯ [ЗНАЧЕНИЕ]] установить параметр вывода таблицы, где\n" -" ИМЯ := {format|border|expanded|fieldsep|" -"fieldsep_zero|footer|null|\n" -" numericlocale|recordsep|recordsep_zero|tuples_only|" -"title|tableattr|pager|\n" -" unicode_border_linestyle|unicode_column_linestyle|" +" (ИМЯ := {border|columns|expanded|fieldsep|" +"fieldsep_zero|\n" +" footer|format|linestyle|null|numericlocale|pager|\n" +" pager_min_lines|recordsep|recordsep_zero|tableattr|" +"title|\n" +" tuples_only|unicode_border_linestyle|\n" +" unicode_column_linestyle|" "unicode_header_linestyle})\n" -#: help.c:277 +#: help.c:279 #, c-format msgid " \\t [on|off] show only rows (currently %s)\n" msgstr " \\t [on|off] режим вывода только строк (сейчас: %s)\n" -#: help.c:279 +#: help.c:281 #, c-format msgid "" " \\T [STRING] set HTML
tag attributes, or unset if none\n" @@ -2964,19 +3032,19 @@ msgstr "" " \\T [СТРОКА] задать атрибуты для
или убрать, если не " "заданы\n" -#: help.c:280 +#: help.c:282 #, c-format msgid " \\x [on|off|auto] toggle expanded output (currently %s)\n" msgstr "" " \\x [on|off|auto] переключить режим расширенного вывода (сейчас: " "%s)\n" -#: help.c:284 +#: help.c:286 #, c-format msgid "Connection\n" msgstr "Соединение\n" -#: help.c:286 +#: help.c:288 #, c-format msgid "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" @@ -2986,7 +3054,7 @@ msgstr "" " подключиться к другой базе данных\n" " (текущая: \"%s\")\n" -#: help.c:290 +#: help.c:292 #, c-format msgid "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" @@ -2996,44 +3064,44 @@ msgstr "" " подключиться к другой базе данных\n" " (сейчас подключения нет)\n" -#: help.c:292 +#: help.c:294 +#, c-format +msgid "" +" \\conninfo display information about current connection\n" +msgstr " \\conninfo информация о текущем соединении\n" + +#: help.c:295 #, c-format msgid " \\encoding [ENCODING] show or set client encoding\n" msgstr " \\encoding [КОДИРОВКА] показать/установить клиентскую кодировку\n" -#: help.c:293 +#: help.c:296 #, c-format msgid " \\password [USERNAME] securely change the password for a user\n" msgstr " \\password [ИМЯ] безопасно сменить пароль пользователя\n" -#: help.c:294 -#, c-format -msgid "" -" \\conninfo display information about current connection\n" -msgstr " \\conninfo информация о текущем соединении\n" - -#: help.c:297 +#: help.c:299 #, c-format msgid "Operating System\n" msgstr "Операционная система\n" -#: help.c:298 +#: help.c:300 #, c-format msgid " \\cd [DIR] change the current working directory\n" msgstr " \\cd [ПУТЬ] сменить текущий каталог\n" -#: help.c:299 +#: help.c:301 #, c-format msgid " \\setenv NAME [VALUE] set or unset environment variable\n" msgstr "" " \\setenv ИМЯ [ЗНАЧЕНИЕ] установить или сбросить переменную окружения\n" -#: help.c:300 +#: help.c:302 #, c-format msgid " \\timing [on|off] toggle timing of commands (currently %s)\n" msgstr " \\timing [on|off] включить/выключить секундомер (сейчас: %s)\n" -#: help.c:302 +#: help.c:304 #, c-format msgid "" " \\! [COMMAND] execute command in shell or start interactive " @@ -3042,19 +3110,19 @@ msgstr "" " \\! [КОМАНДА] выполнить команду в командной оболочке\n" " или запустить интерактивную оболочку\n" -#: help.c:305 +#: help.c:307 #, c-format msgid "Variables\n" msgstr "Переменные\n" -#: help.c:306 +#: help.c:308 #, c-format msgid " \\prompt [TEXT] NAME prompt user to set internal variable\n" msgstr "" " \\prompt [ТЕКСТ] ИМЯ предложить пользователю задать внутреннюю " "переменную\n" -#: help.c:307 +#: help.c:309 #, c-format msgid "" " \\set [NAME [VALUE]] set internal variable, or list all if no " @@ -3063,17 +3131,17 @@ msgstr "" " \\set [ИМЯ [ЗНАЧЕНИЕ]] установить внутреннюю переменную или вывести все,\n" " если имя не задано\n" -#: help.c:308 +#: help.c:310 #, c-format msgid " \\unset NAME unset (delete) internal variable\n" msgstr " \\unset ИМЯ сбросить (удалить) внутреннюю переменную\n" -#: help.c:311 +#: help.c:313 #, c-format msgid "Large Objects\n" msgstr "Большие объекты\n" -#: help.c:312 +#: help.c:314 #, c-format msgid "" " \\lo_export LOBOID FILE\n" @@ -3086,7 +3154,7 @@ msgstr "" " \\lo_list\n" " \\lo_unlink LOBOID операции с большими объектами\n" -#: help.c:339 +#: help.c:341 #, c-format msgid "" "List of specially treated variables\n" @@ -3095,12 +3163,12 @@ msgstr "" "Список специальных переменных\n" "\n" -#: help.c:341 +#: help.c:343 #, c-format msgid "psql variables:\n" msgstr "Переменные psql:\n" -#: help.c:343 +#: help.c:345 #, c-format msgid "" " psql --set=NAME=VALUE\n" @@ -3111,7 +3179,7 @@ msgstr "" " или \\set ИМЯ ЗНАЧЕНИЕ в приглашении psql\n" "\n" -#: help.c:345 +#: help.c:347 #, c-format msgid "" " AUTOCOMMIT if set, successful SQL commands are automatically " @@ -3120,7 +3188,7 @@ msgstr "" " AUTOCOMMIT если установлен, успешные SQL-команды фиксируются " "автоматически\n" -#: help.c:346 +#: help.c:348 #, c-format msgid "" " COMP_KEYWORD_CASE determines the case used to complete SQL key words\n" @@ -3132,12 +3200,12 @@ msgstr "" " preserve-lower (сохранять нижний),\n" " preserve-upper (сохранять верхний)]\n" -#: help.c:348 +#: help.c:350 #, c-format msgid " DBNAME the currently connected database name\n" msgstr " DBNAME имя текущей подключённой базы данных\n" -#: help.c:349 +#: help.c:351 #, c-format msgid "" " ECHO controls what input is written to standard output\n" @@ -3147,7 +3215,7 @@ msgstr "" " [all (всё), errors (ошибки), none (ничего),\n" " queries (запросы)]\n" -#: help.c:351 +#: help.c:353 #, c-format msgid "" " ECHO_HIDDEN if set, display internal queries executed by backslash " @@ -3159,12 +3227,12 @@ msgstr "" " если установлено значение \"noexec\", они выводятся, но " "не выполняются\n" -#: help.c:353 +#: help.c:355 #, c-format msgid " ENCODING current client character set encoding\n" msgstr " ENCODING текущая кодировка клиентского набора символов\n" -#: help.c:354 +#: help.c:356 #, c-format msgid "" " FETCH_COUNT the number of result rows to fetch and display at a " @@ -3175,7 +3243,7 @@ msgstr "" "за раз\n" " (по умолчанию: 0=без ограничений)\n" -#: help.c:356 +#: help.c:358 #, c-format msgid "" " HISTCONTROL controls command history [ignorespace, ignoredups, " @@ -3186,25 +3254,25 @@ msgstr "" " ignoredups (игнорировать дубли), ignoreboth (и то, и " "другое)]\n" -#: help.c:357 +#: help.c:359 #, c-format msgid " HISTFILE file name used to store the command history\n" msgstr "" " HISTFILE имя файла, в котором будет сохраняться история команд\n" -#: help.c:358 +#: help.c:360 #, c-format msgid "" " HISTSIZE max number of commands to store in the command history\n" msgstr " HISTSIZE макс. число команд, сохраняемых в истории\n" -#: help.c:359 +#: help.c:361 #, c-format msgid " HOST the currently connected database server host\n" msgstr "" " HOST сервер баз данных, к которому установлено подключение\n" -#: help.c:360 +#: help.c:362 #, c-format msgid "" " IGNOREEOF number of EOFs needed to terminate an interactive " @@ -3212,12 +3280,12 @@ msgid "" msgstr "" " IGNOREEOF количество EOF для завершения интерактивного сеанса\n" -#: help.c:361 +#: help.c:363 #, c-format msgid " LASTOID value of the last affected OID\n" msgstr " LASTOID значение последнего задействованного OID\n" -#: help.c:362 +#: help.c:364 #, c-format msgid "" " ON_ERROR_ROLLBACK if set, an error doesn't stop a transaction (uses " @@ -3226,23 +3294,23 @@ msgstr "" " ON_ERROR_ROLLBACK если установлено, транзакция не прекращается при ошибке " "(используются неявные точки сохранения)\n" -#: help.c:363 +#: help.c:365 #, c-format msgid " ON_ERROR_STOP stop batch execution after error\n" msgstr "" " ON_ERROR_STOP останавливать выполнение пакета команд после ошибки\n" -#: help.c:364 +#: help.c:366 #, c-format msgid " PORT server port of the current connection\n" msgstr " PORT порт сервера для текущего соединения\n" -#: help.c:365 +#: help.c:367 #, c-format msgid " PROMPT1 specifies the standard psql prompt\n" msgstr " PROMPT1 устанавливает стандартное приглашение psql\n" -#: help.c:366 +#: help.c:368 #, c-format msgid "" " PROMPT2 specifies the prompt used when a statement continues " @@ -3251,7 +3319,7 @@ msgstr "" " PROMPT2 устанавливает приглашение, которое выводится при " "переносе оператора на новую строку\n" -#: help.c:367 +#: help.c:369 #, c-format msgid "" " PROMPT3 specifies the prompt used during COPY ... FROM STDIN\n" @@ -3259,13 +3327,23 @@ msgstr "" " PROMPT3 устанавливает приглашения для выполнения COPY ... FROM " "STDIN\n" -#: help.c:368 +#: help.c:370 #, c-format msgid " QUIET run quietly (same as -q option)\n" msgstr "" " QUIET выводить минимум сообщений (как и с параметром -q)\n" -#: help.c:369 +#: help.c:371 +#, c-format +msgid " SERVER_VERSION_NAME server's version (short string)\n" +msgstr " SERVER_VERSION_NAME версия сервера (короткая строка)\n" + +#: help.c:372 +#, c-format +msgid " SERVER_VERSION_NUM server's version (numeric format)\n" +msgstr " SERVER_VERSION_NUM версия сервера (в числовом формате)\n" + +#: help.c:373 #, c-format msgid "" " SHOW_CONTEXT controls display of message context fields [never, " @@ -3274,7 +3352,7 @@ msgstr "" " SHOW_CONTEXT управляет отображением полей контекста сообщений " "[never, errors, always]\n" -#: help.c:370 +#: help.c:374 #, c-format msgid "" " SINGLELINE end of line terminates SQL command mode (same as -S " @@ -3283,17 +3361,17 @@ msgstr "" " SINGLELINE конец строки завершает режим ввода SQL-команды (как и с " "параметром -S)\n" -#: help.c:371 +#: help.c:375 #, c-format msgid " SINGLESTEP single-step mode (same as -s option)\n" msgstr " SINGLESTEP пошаговый режим (как и с параметром -s)\n" -#: help.c:372 +#: help.c:376 #, c-format msgid " USER the currently connected database user\n" msgstr " USER текущий пользователь, подключённый к БД\n" -#: help.c:373 +#: help.c:377 #, c-format msgid "" " VERBOSITY controls verbosity of error reports [default, verbose, " @@ -3302,7 +3380,22 @@ msgstr "" " VERBOSITY управляет детализацией отчётов об ошибке [default (по " "умолчанию), verbose (подробно), terse (кратко)]\n" -#: help.c:375 +#: help.c:378 +#, c-format +msgid " VERSION psql's version (verbose string)\n" +msgstr " VERSION версия psql (развёрнутая строка)\n" + +#: help.c:379 +#, c-format +msgid " VERSION_NAME psql's version (short string)\n" +msgstr " VERSION_NAME версия psql (короткая строка)\n" + +#: help.c:380 +#, c-format +msgid " VERSION_NUM psql's version (numeric format)\n" +msgstr " VERSION_NUM версия psql (в числовом формате)\n" + +#: help.c:382 #, c-format msgid "" "\n" @@ -3311,7 +3404,7 @@ msgstr "" "\n" "Параметры отображения:\n" -#: help.c:377 +#: help.c:384 #, c-format msgid "" " psql --pset=NAME[=VALUE]\n" @@ -3322,24 +3415,24 @@ msgstr "" " или \\pset ИМЯ [ЗНАЧЕНИЕ] в приглашении psql\n" "\n" -#: help.c:379 +#: help.c:386 #, c-format msgid " border border style (number)\n" msgstr " border стиль границы (число)\n" -#: help.c:380 +#: help.c:387 #, c-format msgid " columns target width for the wrapped format\n" msgstr " columns целевая ширина для формата с переносом\n" -#: help.c:381 +#: help.c:388 #, c-format msgid " expanded (or x) expanded output [on, off, auto]\n" msgstr "" " expanded (или x) расширенный вывод [on (вкл.), off (выкл.), auto " "(авто)]\n" -#: help.c:382 +#: help.c:389 #, c-format msgid "" " fieldsep field separator for unaligned output (default \"%s\")\n" @@ -3347,7 +3440,7 @@ msgstr "" " fieldsep разделитель полей для неформатированного вывода (по " "умолчанию \"%s\")\n" -#: help.c:383 +#: help.c:390 #, c-format msgid "" " fieldsep_zero set field separator for unaligned output to zero byte\n" @@ -3355,7 +3448,7 @@ msgstr "" " fieldsep_zero устанавливает ноль разделителем полей при " "неформатированном выводе\n" -#: help.c:384 +#: help.c:391 #, c-format msgid "" " footer enable or disable display of the table footer [on, " @@ -3364,7 +3457,7 @@ msgstr "" " footer включает или выключает вывод подписей таблицы [on " "(вкл.), off (выкл.)]\n" -#: help.c:385 +#: help.c:392 #, c-format msgid "" " format set output format [unaligned, aligned, wrapped, html, " @@ -3374,7 +3467,7 @@ msgstr "" "(неформатированный), aligned (выровненный), wrapped (с переносом), html, " "asciidoc, ...]\n" -#: help.c:386 +#: help.c:393 #, c-format msgid "" " linestyle set the border line drawing style [ascii, old-ascii, " @@ -3383,14 +3476,14 @@ msgstr "" " linestyle задаёт стиль рисования линий границы [ascii, old-ascii, " "unicode]\n" -#: help.c:387 +#: help.c:394 #, c-format msgid "" " null set the string to be printed in place of a null value\n" msgstr "" " null устанавливает строку, выводимую вместо значения NULL\n" -#: help.c:388 +#: help.c:395 #, c-format msgid "" " numericlocale enable or disable display of a locale-specific " @@ -3401,7 +3494,7 @@ msgstr "" "разделителя\n" " группы цифр [on (вкл.), off (выкл.)]\n" -#: help.c:390 +#: help.c:397 #, c-format msgid "" " pager control when an external pager is used [yes, no, " @@ -3410,14 +3503,14 @@ msgstr "" " pager определяет, используется ли внешний " "постраничник [yes (да), no (нет), always (всегда)]\n" -#: help.c:391 +#: help.c:398 #, c-format msgid " recordsep record (line) separator for unaligned output\n" msgstr "" " recordsep разделитель записей (строк) при неформатированном " "выводе\n" -#: help.c:392 +#: help.c:399 #, c-format msgid "" " recordsep_zero set record separator for unaligned output to zero byte\n" @@ -3425,7 +3518,7 @@ msgstr "" " recordsep_zero устанавливает ноль разделителем записей при " "неформатированном выводе\n" -#: help.c:393 +#: help.c:400 #, c-format msgid "" " tableattr (or T) specify attributes for table tag in html format or " @@ -3438,7 +3531,7 @@ msgstr "" " ширины столбцов для выровненных влево данных, в формате " "latex-longtable\n" -#: help.c:395 +#: help.c:402 #, c-format msgid "" " title set the table title for any subsequently printed " @@ -3447,14 +3540,14 @@ msgstr "" " title задаёт заголовок таблицы для последовательно печатаемых " "таблиц\n" -#: help.c:396 +#: help.c:403 #, c-format msgid " tuples_only if set, only actual table data is shown\n" msgstr "" " tuples_only если установлено, выводятся только непосредственно " "табличные данные\n" -#: help.c:397 +#: help.c:404 #, c-format msgid "" " unicode_border_linestyle\n" @@ -3468,7 +3561,7 @@ msgstr "" " задаёт стиль рисуемых линий Unicode [single " "(одинарные), double (двойные)]\n" -#: help.c:402 +#: help.c:409 #, c-format msgid "" "\n" @@ -3477,7 +3570,7 @@ msgstr "" "\n" "Переменные окружения:\n" -#: help.c:406 +#: help.c:413 #, c-format msgid "" " NAME=VALUE [NAME=VALUE] psql ...\n" @@ -3488,7 +3581,7 @@ msgstr "" " или \\setenv ИМЯ [ЗНАЧЕНИЕ] в приглашении psql\n" "\n" -#: help.c:408 +#: help.c:415 #, c-format msgid "" " set NAME=VALUE\n" @@ -3501,54 +3594,54 @@ msgstr "" " или \\setenv ИМЯ ЗНАЧЕНИЕ в приглашении psql\n" "\n" -#: help.c:411 +#: help.c:418 #, c-format msgid " COLUMNS number of columns for wrapped format\n" msgstr " COLUMNS число столбцов для форматирования с переносом\n" -#: help.c:412 +#: help.c:419 #, c-format msgid " PAGER name of external pager program\n" msgstr " PAGER имя программы внешнего постраничника\n" -#: help.c:413 +#: help.c:420 #, c-format msgid "" " PGAPPNAME same as the application_name connection parameter\n" msgstr " PGAPPNAME синоним параметра подключения application_name\n" -#: help.c:414 +#: help.c:421 #, c-format msgid " PGDATABASE same as the dbname connection parameter\n" msgstr " PGDATABASE синоним параметра подключения dbname\n" -#: help.c:415 +#: help.c:422 #, c-format msgid " PGHOST same as the host connection parameter\n" msgstr " PGHOST синоним параметра подключения host\n" -#: help.c:416 -#, c-format -msgid " PGPORT same as the port connection parameter\n" -msgstr " PGPORT синоним параметра подключения port\n" - -#: help.c:417 -#, c-format -msgid " PGUSER same as the user connection parameter\n" -msgstr " PGUSER синоним параметра подключения user\n" - -#: help.c:418 +#: help.c:423 #, c-format msgid " PGPASSWORD connection password (not recommended)\n" msgstr "" " PGPASSWORD пароль для подключения (использовать не рекомендуется)\n" -#: help.c:419 +#: help.c:424 #, c-format msgid " PGPASSFILE password file name\n" msgstr " PGPASSFILE имя файла с паролем\n" -#: help.c:420 +#: help.c:425 +#, c-format +msgid " PGPORT same as the port connection parameter\n" +msgstr " PGPORT синоним параметра подключения port\n" + +#: help.c:426 +#, c-format +msgid " PGUSER same as the user connection parameter\n" +msgstr " PGUSER синоним параметра подключения user\n" + +#: help.c:427 #, c-format msgid "" " PSQL_EDITOR, EDITOR, VISUAL\n" @@ -3557,7 +3650,7 @@ msgstr "" " PSQL_EDITOR, EDITOR, VISUAL\n" " редактор, вызываемый командами \\e, \\ef и \\ev\n" -#: help.c:422 +#: help.c:429 #, c-format msgid "" " PSQL_EDITOR_LINENUMBER_ARG\n" @@ -3567,35 +3660,35 @@ msgstr "" " определяет способ передачи номера строки при вызове " "редактора\n" -#: help.c:424 +#: help.c:431 #, c-format msgid "" " PSQL_HISTORY alternative location for the command history file\n" msgstr "" " PSQL_HISTORY альтернативное размещение файла с историей команд\n" -#: help.c:425 +#: help.c:432 #, c-format msgid " PSQLRC alternative location for the user's .psqlrc file\n" msgstr "" " PSQLRC альтернативное размещения пользовательского файла ." "psqlrc\n" -#: help.c:426 +#: help.c:433 #, c-format msgid " SHELL shell used by the \\! command\n" msgstr " SHELL оболочка, вызываемая командой \\!\n" -#: help.c:427 +#: help.c:434 #, c-format msgid " TMPDIR directory for temporary files\n" msgstr " TMPDIR каталог для временных файлов\n" -#: help.c:470 +#: help.c:477 msgid "Available help:\n" msgstr "Имеющаяся справка:\n" -#: help.c:554 +#: help.c:561 #, c-format msgid "" "Command: %s\n" @@ -3610,7 +3703,7 @@ msgstr "" "%s\n" "\n" -#: help.c:570 +#: help.c:577 #, c-format msgid "" "No help available for \"%s\".\n" @@ -3708,12 +3801,12 @@ msgstr "" msgid "reached EOF without finding closing \\endif(s)\n" msgstr "в закончившемся потоке команд не хватает \\endif\n" -#: psqlscanslash.l:614 +#: psqlscanslash.l:615 #, c-format msgid "unterminated quoted string\n" msgstr "незавершённая строка в кавычках\n" -#: psqlscanslash.l:787 +#: psqlscanslash.l:788 #, c-format msgid "%s: out of memory\n" msgstr "%s: нехватка памяти\n" @@ -3732,162 +3825,163 @@ msgstr "%s: нехватка памяти\n" #: sql_help.c:640 sql_help.c:642 sql_help.c:644 sql_help.c:647 sql_help.c:649 #: sql_help.c:651 sql_help.c:684 sql_help.c:688 sql_help.c:692 sql_help.c:711 #: sql_help.c:714 sql_help.c:717 sql_help.c:746 sql_help.c:758 sql_help.c:766 -#: sql_help.c:769 sql_help.c:772 sql_help.c:787 sql_help.c:790 sql_help.c:813 -#: sql_help.c:816 sql_help.c:818 sql_help.c:820 sql_help.c:822 sql_help.c:863 -#: sql_help.c:886 sql_help.c:897 sql_help.c:899 sql_help.c:918 sql_help.c:928 -#: sql_help.c:930 sql_help.c:932 sql_help.c:944 sql_help.c:948 sql_help.c:950 -#: sql_help.c:961 sql_help.c:963 sql_help.c:965 sql_help.c:990 sql_help.c:994 -#: sql_help.c:997 sql_help.c:1000 sql_help.c:1002 sql_help.c:1004 -#: sql_help.c:1005 sql_help.c:1092 sql_help.c:1094 sql_help.c:1097 -#: sql_help.c:1100 sql_help.c:1102 sql_help.c:1104 sql_help.c:1107 -#: sql_help.c:1110 sql_help.c:1170 sql_help.c:1172 sql_help.c:1174 -#: sql_help.c:1177 sql_help.c:1198 sql_help.c:1201 sql_help.c:1204 -#: sql_help.c:1207 sql_help.c:1211 sql_help.c:1213 sql_help.c:1215 -#: sql_help.c:1217 sql_help.c:1231 sql_help.c:1234 sql_help.c:1236 -#: sql_help.c:1238 sql_help.c:1248 sql_help.c:1250 sql_help.c:1260 -#: sql_help.c:1262 sql_help.c:1272 sql_help.c:1275 sql_help.c:1297 -#: sql_help.c:1299 sql_help.c:1301 sql_help.c:1304 sql_help.c:1306 -#: sql_help.c:1308 sql_help.c:1311 sql_help.c:1361 sql_help.c:1399 -#: sql_help.c:1402 sql_help.c:1404 sql_help.c:1406 sql_help.c:1408 -#: sql_help.c:1410 sql_help.c:1413 sql_help.c:1453 sql_help.c:1664 -#: sql_help.c:1728 sql_help.c:1747 sql_help.c:1760 sql_help.c:1816 -#: sql_help.c:1822 sql_help.c:1832 sql_help.c:1852 sql_help.c:1877 -#: sql_help.c:1895 sql_help.c:1924 sql_help.c:2017 sql_help.c:2059 -#: sql_help.c:2081 sql_help.c:2101 sql_help.c:2102 sql_help.c:2137 -#: sql_help.c:2157 sql_help.c:2179 sql_help.c:2193 sql_help.c:2214 -#: sql_help.c:2244 sql_help.c:2269 sql_help.c:2315 sql_help.c:2582 -#: sql_help.c:2595 sql_help.c:2612 sql_help.c:2628 sql_help.c:2668 -#: sql_help.c:2720 sql_help.c:2724 sql_help.c:2726 sql_help.c:2732 -#: sql_help.c:2750 sql_help.c:2777 sql_help.c:2812 sql_help.c:2824 -#: sql_help.c:2833 sql_help.c:2877 sql_help.c:2891 sql_help.c:2919 -#: sql_help.c:2927 sql_help.c:2935 sql_help.c:2943 sql_help.c:2951 -#: sql_help.c:2959 sql_help.c:2967 sql_help.c:2975 sql_help.c:2984 -#: sql_help.c:2995 sql_help.c:3003 sql_help.c:3011 sql_help.c:3019 -#: sql_help.c:3027 sql_help.c:3037 sql_help.c:3046 sql_help.c:3055 -#: sql_help.c:3063 sql_help.c:3072 sql_help.c:3080 sql_help.c:3088 -#: sql_help.c:3097 sql_help.c:3105 sql_help.c:3113 sql_help.c:3121 -#: sql_help.c:3129 sql_help.c:3137 sql_help.c:3145 sql_help.c:3153 -#: sql_help.c:3161 sql_help.c:3169 sql_help.c:3177 sql_help.c:3194 -#: sql_help.c:3203 sql_help.c:3211 sql_help.c:3228 sql_help.c:3243 -#: sql_help.c:3510 sql_help.c:3561 sql_help.c:3590 sql_help.c:3598 -#: sql_help.c:4021 sql_help.c:4069 sql_help.c:4210 +#: sql_help.c:769 sql_help.c:772 sql_help.c:787 sql_help.c:790 sql_help.c:807 +#: sql_help.c:809 sql_help.c:811 sql_help.c:813 sql_help.c:816 sql_help.c:818 +#: sql_help.c:859 sql_help.c:882 sql_help.c:893 sql_help.c:895 sql_help.c:914 +#: sql_help.c:924 sql_help.c:926 sql_help.c:928 sql_help.c:940 sql_help.c:944 +#: sql_help.c:946 sql_help.c:957 sql_help.c:959 sql_help.c:961 sql_help.c:977 +#: sql_help.c:979 sql_help.c:983 sql_help.c:986 sql_help.c:987 sql_help.c:988 +#: sql_help.c:991 sql_help.c:993 sql_help.c:1084 sql_help.c:1086 +#: sql_help.c:1089 sql_help.c:1092 sql_help.c:1094 sql_help.c:1096 +#: sql_help.c:1099 sql_help.c:1102 sql_help.c:1168 sql_help.c:1170 +#: sql_help.c:1172 sql_help.c:1175 sql_help.c:1196 sql_help.c:1199 +#: sql_help.c:1202 sql_help.c:1205 sql_help.c:1209 sql_help.c:1211 +#: sql_help.c:1213 sql_help.c:1215 sql_help.c:1229 sql_help.c:1232 +#: sql_help.c:1234 sql_help.c:1236 sql_help.c:1246 sql_help.c:1248 +#: sql_help.c:1258 sql_help.c:1260 sql_help.c:1270 sql_help.c:1273 +#: sql_help.c:1295 sql_help.c:1297 sql_help.c:1299 sql_help.c:1302 +#: sql_help.c:1304 sql_help.c:1306 sql_help.c:1309 sql_help.c:1359 +#: sql_help.c:1401 sql_help.c:1404 sql_help.c:1406 sql_help.c:1408 +#: sql_help.c:1410 sql_help.c:1412 sql_help.c:1415 sql_help.c:1455 +#: sql_help.c:1666 sql_help.c:1730 sql_help.c:1749 sql_help.c:1762 +#: sql_help.c:1818 sql_help.c:1824 sql_help.c:1834 sql_help.c:1854 +#: sql_help.c:1879 sql_help.c:1897 sql_help.c:1926 sql_help.c:2019 +#: sql_help.c:2061 sql_help.c:2083 sql_help.c:2103 sql_help.c:2104 +#: sql_help.c:2139 sql_help.c:2159 sql_help.c:2181 sql_help.c:2195 +#: sql_help.c:2210 sql_help.c:2240 sql_help.c:2265 sql_help.c:2311 +#: sql_help.c:2577 sql_help.c:2590 sql_help.c:2607 sql_help.c:2623 +#: sql_help.c:2663 sql_help.c:2715 sql_help.c:2719 sql_help.c:2721 +#: sql_help.c:2727 sql_help.c:2745 sql_help.c:2772 sql_help.c:2807 +#: sql_help.c:2819 sql_help.c:2828 sql_help.c:2872 sql_help.c:2886 +#: sql_help.c:2914 sql_help.c:2922 sql_help.c:2930 sql_help.c:2938 +#: sql_help.c:2946 sql_help.c:2954 sql_help.c:2962 sql_help.c:2970 +#: sql_help.c:2979 sql_help.c:2990 sql_help.c:2998 sql_help.c:3006 +#: sql_help.c:3014 sql_help.c:3022 sql_help.c:3032 sql_help.c:3041 +#: sql_help.c:3050 sql_help.c:3058 sql_help.c:3067 sql_help.c:3075 +#: sql_help.c:3083 sql_help.c:3092 sql_help.c:3100 sql_help.c:3108 +#: sql_help.c:3116 sql_help.c:3124 sql_help.c:3132 sql_help.c:3140 +#: sql_help.c:3148 sql_help.c:3156 sql_help.c:3164 sql_help.c:3172 +#: sql_help.c:3189 sql_help.c:3198 sql_help.c:3206 sql_help.c:3223 +#: sql_help.c:3238 sql_help.c:3506 sql_help.c:3557 sql_help.c:3586 +#: sql_help.c:3594 sql_help.c:4017 sql_help.c:4065 sql_help.c:4206 msgid "name" msgstr "имя" -#: sql_help.c:37 sql_help.c:40 sql_help.c:43 sql_help.c:326 sql_help.c:1522 -#: sql_help.c:2892 sql_help.c:3815 +#: sql_help.c:37 sql_help.c:40 sql_help.c:43 sql_help.c:326 sql_help.c:1524 +#: sql_help.c:2887 sql_help.c:3811 msgid "aggregate_signature" msgstr "сигнатура_агр_функции" #: sql_help.c:38 sql_help.c:68 sql_help.c:83 sql_help.c:119 sql_help.c:251 #: sql_help.c:269 sql_help.c:390 sql_help.c:438 sql_help.c:515 sql_help.c:561 #: sql_help.c:576 sql_help.c:598 sql_help.c:648 sql_help.c:713 sql_help.c:768 -#: sql_help.c:789 sql_help.c:864 sql_help.c:888 sql_help.c:898 sql_help.c:931 -#: sql_help.c:951 sql_help.c:964 sql_help.c:1101 sql_help.c:1171 -#: sql_help.c:1214 sql_help.c:1235 sql_help.c:1249 sql_help.c:1261 -#: sql_help.c:1274 sql_help.c:1305 sql_help.c:1362 sql_help.c:1407 +#: sql_help.c:789 sql_help.c:819 sql_help.c:860 sql_help.c:884 sql_help.c:894 +#: sql_help.c:927 sql_help.c:947 sql_help.c:960 sql_help.c:994 sql_help.c:1093 +#: sql_help.c:1169 sql_help.c:1212 sql_help.c:1233 sql_help.c:1247 +#: sql_help.c:1259 sql_help.c:1272 sql_help.c:1303 sql_help.c:1360 +#: sql_help.c:1409 msgid "new_name" msgstr "новое_имя" #: sql_help.c:41 sql_help.c:70 sql_help.c:85 sql_help.c:121 sql_help.c:249 #: sql_help.c:267 sql_help.c:388 sql_help.c:474 sql_help.c:520 sql_help.c:600 #: sql_help.c:609 sql_help.c:667 sql_help.c:687 sql_help.c:716 sql_help.c:771 -#: sql_help.c:817 sql_help.c:900 sql_help.c:929 sql_help.c:949 sql_help.c:962 -#: sql_help.c:1001 sql_help.c:1155 sql_help.c:1173 sql_help.c:1216 -#: sql_help.c:1237 sql_help.c:1300 sql_help.c:1405 sql_help.c:2568 +#: sql_help.c:817 sql_help.c:896 sql_help.c:925 sql_help.c:945 sql_help.c:958 +#: sql_help.c:992 sql_help.c:1153 sql_help.c:1171 sql_help.c:1214 +#: sql_help.c:1235 sql_help.c:1298 sql_help.c:1407 sql_help.c:2563 msgid "new_owner" msgstr "новый_владелец" #: sql_help.c:44 sql_help.c:72 sql_help.c:87 sql_help.c:253 sql_help.c:318 #: sql_help.c:440 sql_help.c:525 sql_help.c:650 sql_help.c:691 sql_help.c:719 -#: sql_help.c:774 sql_help.c:933 sql_help.c:966 sql_help.c:1103 sql_help.c:1218 -#: sql_help.c:1239 sql_help.c:1251 sql_help.c:1263 sql_help.c:1307 -#: sql_help.c:1409 +#: sql_help.c:774 sql_help.c:929 sql_help.c:962 sql_help.c:1095 sql_help.c:1216 +#: sql_help.c:1237 sql_help.c:1249 sql_help.c:1261 sql_help.c:1305 +#: sql_help.c:1411 msgid "new_schema" msgstr "новая_схема" -#: sql_help.c:45 sql_help.c:1578 sql_help.c:2893 sql_help.c:3836 +#: sql_help.c:45 sql_help.c:1580 sql_help.c:2888 sql_help.c:3832 msgid "where aggregate_signature is:" msgstr "где сигнатура_агр_функции:" #: sql_help.c:46 sql_help.c:49 sql_help.c:52 sql_help.c:336 sql_help.c:361 #: sql_help.c:364 sql_help.c:367 sql_help.c:507 sql_help.c:512 sql_help.c:517 -#: sql_help.c:522 sql_help.c:527 sql_help.c:1540 sql_help.c:1579 -#: sql_help.c:1582 sql_help.c:1585 sql_help.c:1729 sql_help.c:1748 -#: sql_help.c:1751 sql_help.c:2018 sql_help.c:2894 sql_help.c:2897 -#: sql_help.c:2900 sql_help.c:2985 sql_help.c:3396 sql_help.c:3728 -#: sql_help.c:3821 sql_help.c:3837 sql_help.c:3840 sql_help.c:3843 +#: sql_help.c:522 sql_help.c:527 sql_help.c:1542 sql_help.c:1581 +#: sql_help.c:1584 sql_help.c:1587 sql_help.c:1731 sql_help.c:1750 +#: sql_help.c:1753 sql_help.c:2020 sql_help.c:2889 sql_help.c:2892 +#: sql_help.c:2895 sql_help.c:2980 sql_help.c:3391 sql_help.c:3724 +#: sql_help.c:3817 sql_help.c:3833 sql_help.c:3836 sql_help.c:3839 msgid "argmode" msgstr "режим_аргумента" #: sql_help.c:47 sql_help.c:50 sql_help.c:53 sql_help.c:337 sql_help.c:362 #: sql_help.c:365 sql_help.c:368 sql_help.c:508 sql_help.c:513 sql_help.c:518 -#: sql_help.c:523 sql_help.c:528 sql_help.c:1541 sql_help.c:1580 -#: sql_help.c:1583 sql_help.c:1586 sql_help.c:1730 sql_help.c:1749 -#: sql_help.c:1752 sql_help.c:2019 sql_help.c:2895 sql_help.c:2898 -#: sql_help.c:2901 sql_help.c:2986 sql_help.c:3822 sql_help.c:3838 -#: sql_help.c:3841 sql_help.c:3844 +#: sql_help.c:523 sql_help.c:528 sql_help.c:1543 sql_help.c:1582 +#: sql_help.c:1585 sql_help.c:1588 sql_help.c:1732 sql_help.c:1751 +#: sql_help.c:1754 sql_help.c:2021 sql_help.c:2890 sql_help.c:2893 +#: sql_help.c:2896 sql_help.c:2981 sql_help.c:3818 sql_help.c:3834 +#: sql_help.c:3837 sql_help.c:3840 msgid "argname" msgstr "имя_аргумента" #: sql_help.c:48 sql_help.c:51 sql_help.c:54 sql_help.c:338 sql_help.c:363 #: sql_help.c:366 sql_help.c:369 sql_help.c:509 sql_help.c:514 sql_help.c:519 -#: sql_help.c:524 sql_help.c:529 sql_help.c:1542 sql_help.c:1581 -#: sql_help.c:1584 sql_help.c:1587 sql_help.c:2020 sql_help.c:2896 -#: sql_help.c:2899 sql_help.c:2902 sql_help.c:2987 sql_help.c:3823 -#: sql_help.c:3839 sql_help.c:3842 sql_help.c:3845 +#: sql_help.c:524 sql_help.c:529 sql_help.c:1544 sql_help.c:1583 +#: sql_help.c:1586 sql_help.c:1589 sql_help.c:2022 sql_help.c:2891 +#: sql_help.c:2894 sql_help.c:2897 sql_help.c:2982 sql_help.c:3819 +#: sql_help.c:3835 sql_help.c:3838 sql_help.c:3841 msgid "argtype" msgstr "тип_аргумента" -#: sql_help.c:113 sql_help.c:385 sql_help.c:463 sql_help.c:475 sql_help.c:814 -#: sql_help.c:858 sql_help.c:946 sql_help.c:1232 sql_help.c:1356 -#: sql_help.c:1384 sql_help.c:1635 sql_help.c:1641 sql_help.c:1927 -#: sql_help.c:1968 sql_help.c:1975 sql_help.c:1984 sql_help.c:2060 -#: sql_help.c:2216 sql_help.c:2245 sql_help.c:2337 sql_help.c:2373 -#: sql_help.c:2597 sql_help.c:2778 sql_help.c:2800 sql_help.c:3263 -#: sql_help.c:3430 +#: sql_help.c:113 sql_help.c:385 sql_help.c:463 sql_help.c:475 sql_help.c:854 +#: sql_help.c:942 sql_help.c:1230 sql_help.c:1354 sql_help.c:1386 +#: sql_help.c:1637 sql_help.c:1643 sql_help.c:1929 sql_help.c:1970 +#: sql_help.c:1977 sql_help.c:1986 sql_help.c:2062 sql_help.c:2241 +#: sql_help.c:2333 sql_help.c:2592 sql_help.c:2773 sql_help.c:2795 +#: sql_help.c:3258 sql_help.c:3425 msgid "option" msgstr "параметр" -#: sql_help.c:114 sql_help.c:815 sql_help.c:859 sql_help.c:1357 sql_help.c:2061 -#: sql_help.c:2217 sql_help.c:2246 sql_help.c:2374 sql_help.c:2779 +#: sql_help.c:114 sql_help.c:855 sql_help.c:1355 sql_help.c:2063 +#: sql_help.c:2242 sql_help.c:2774 msgid "where option can be:" msgstr "где допустимые параметры:" -#: sql_help.c:115 sql_help.c:1859 +#: sql_help.c:115 sql_help.c:1861 msgid "allowconn" msgstr "разр_подключения" -#: sql_help.c:116 sql_help.c:860 sql_help.c:1358 sql_help.c:1860 -#: sql_help.c:2247 sql_help.c:2780 +#: sql_help.c:116 sql_help.c:856 sql_help.c:1356 sql_help.c:1862 +#: sql_help.c:2243 sql_help.c:2775 msgid "connlimit" msgstr "предел_подключений" -#: sql_help.c:117 sql_help.c:1861 +#: sql_help.c:117 sql_help.c:1863 msgid "istemplate" msgstr "это_шаблон" -#: sql_help.c:123 sql_help.c:588 sql_help.c:653 sql_help.c:1106 sql_help.c:1148 +#: sql_help.c:123 sql_help.c:588 sql_help.c:653 sql_help.c:1098 sql_help.c:1146 msgid "new_tablespace" msgstr "новое_табл_пространство" #: sql_help.c:125 sql_help.c:128 sql_help.c:130 sql_help.c:534 sql_help.c:536 -#: sql_help.c:537 sql_help.c:867 sql_help.c:871 sql_help.c:874 sql_help.c:1016 -#: sql_help.c:1019 sql_help.c:1364 sql_help.c:1367 sql_help.c:1369 -#: sql_help.c:2029 sql_help.c:3615 sql_help.c:4010 +#: sql_help.c:537 sql_help.c:863 sql_help.c:867 sql_help.c:870 sql_help.c:1005 +#: sql_help.c:1008 sql_help.c:1363 sql_help.c:1367 sql_help.c:1370 +#: sql_help.c:2031 sql_help.c:3611 sql_help.c:4006 msgid "configuration_parameter" msgstr "параметр_конфигурации" #: sql_help.c:126 sql_help.c:386 sql_help.c:458 sql_help.c:464 sql_help.c:476 -#: sql_help.c:535 sql_help.c:583 sql_help.c:659 sql_help.c:665 sql_help.c:868 -#: sql_help.c:947 sql_help.c:1017 sql_help.c:1018 sql_help.c:1130 -#: sql_help.c:1150 sql_help.c:1176 sql_help.c:1233 sql_help.c:1365 -#: sql_help.c:1385 sql_help.c:1928 sql_help.c:1969 sql_help.c:1976 -#: sql_help.c:1985 sql_help.c:2030 sql_help.c:2031 sql_help.c:2089 -#: sql_help.c:2121 sql_help.c:2338 sql_help.c:2471 sql_help.c:2483 -#: sql_help.c:2496 sql_help.c:2532 sql_help.c:2554 sql_help.c:2571 -#: sql_help.c:2598 sql_help.c:2801 sql_help.c:3431 sql_help.c:4011 -#: sql_help.c:4012 +#: sql_help.c:535 sql_help.c:583 sql_help.c:659 sql_help.c:665 sql_help.c:815 +#: sql_help.c:864 sql_help.c:943 sql_help.c:982 sql_help.c:985 sql_help.c:990 +#: sql_help.c:1006 sql_help.c:1007 sql_help.c:1128 sql_help.c:1148 +#: sql_help.c:1174 sql_help.c:1231 sql_help.c:1364 sql_help.c:1387 +#: sql_help.c:1930 sql_help.c:1971 sql_help.c:1978 sql_help.c:1987 +#: sql_help.c:2032 sql_help.c:2033 sql_help.c:2091 sql_help.c:2123 +#: sql_help.c:2213 sql_help.c:2334 sql_help.c:2364 sql_help.c:2462 +#: sql_help.c:2474 sql_help.c:2487 sql_help.c:2527 sql_help.c:2549 +#: sql_help.c:2566 sql_help.c:2593 sql_help.c:2796 sql_help.c:3426 +#: sql_help.c:4007 sql_help.c:4008 msgid "value" msgstr "значение" @@ -3895,9 +3989,9 @@ msgstr "значение" msgid "target_role" msgstr "целевая_роль" -#: sql_help.c:199 sql_help.c:1911 sql_help.c:2293 sql_help.c:2298 -#: sql_help.c:3378 sql_help.c:3385 sql_help.c:3399 sql_help.c:3405 -#: sql_help.c:3710 sql_help.c:3717 sql_help.c:3731 sql_help.c:3737 +#: sql_help.c:199 sql_help.c:1913 sql_help.c:2289 sql_help.c:2294 +#: sql_help.c:3373 sql_help.c:3380 sql_help.c:3394 sql_help.c:3400 +#: sql_help.c:3706 sql_help.c:3713 sql_help.c:3727 sql_help.c:3733 msgid "schema_name" msgstr "имя_схемы" @@ -3911,34 +4005,34 @@ msgstr "где допустимое предложение_GRANT_или_REVOKE:" #: sql_help.c:202 sql_help.c:203 sql_help.c:204 sql_help.c:205 sql_help.c:206 #: sql_help.c:207 sql_help.c:208 sql_help.c:209 sql_help.c:210 sql_help.c:211 -#: sql_help.c:559 sql_help.c:587 sql_help.c:652 sql_help.c:792 sql_help.c:878 -#: sql_help.c:1105 sql_help.c:1372 sql_help.c:2064 sql_help.c:2065 -#: sql_help.c:2066 sql_help.c:2067 sql_help.c:2068 sql_help.c:2195 -#: sql_help.c:2250 sql_help.c:2251 sql_help.c:2252 sql_help.c:2253 -#: sql_help.c:2254 sql_help.c:2783 sql_help.c:2784 sql_help.c:2785 -#: sql_help.c:2786 sql_help.c:2787 sql_help.c:3412 sql_help.c:3413 -#: sql_help.c:3414 sql_help.c:3711 sql_help.c:3715 sql_help.c:3718 -#: sql_help.c:3720 sql_help.c:3722 sql_help.c:3724 sql_help.c:3726 -#: sql_help.c:3732 sql_help.c:3734 sql_help.c:3736 sql_help.c:3738 -#: sql_help.c:3740 sql_help.c:3742 sql_help.c:3743 sql_help.c:3744 -#: sql_help.c:4031 +#: sql_help.c:559 sql_help.c:587 sql_help.c:652 sql_help.c:792 sql_help.c:874 +#: sql_help.c:1097 sql_help.c:1374 sql_help.c:2066 sql_help.c:2067 +#: sql_help.c:2068 sql_help.c:2069 sql_help.c:2070 sql_help.c:2197 +#: sql_help.c:2246 sql_help.c:2247 sql_help.c:2248 sql_help.c:2249 +#: sql_help.c:2250 sql_help.c:2778 sql_help.c:2779 sql_help.c:2780 +#: sql_help.c:2781 sql_help.c:2782 sql_help.c:3407 sql_help.c:3408 +#: sql_help.c:3409 sql_help.c:3707 sql_help.c:3711 sql_help.c:3714 +#: sql_help.c:3716 sql_help.c:3718 sql_help.c:3720 sql_help.c:3722 +#: sql_help.c:3728 sql_help.c:3730 sql_help.c:3732 sql_help.c:3734 +#: sql_help.c:3736 sql_help.c:3738 sql_help.c:3739 sql_help.c:3740 +#: sql_help.c:4027 msgid "role_name" msgstr "имя_роли" -#: sql_help.c:237 sql_help.c:451 sql_help.c:1121 sql_help.c:1123 -#: sql_help.c:1401 sql_help.c:1880 sql_help.c:1884 sql_help.c:1988 -#: sql_help.c:1992 sql_help.c:2085 sql_help.c:2467 sql_help.c:2479 -#: sql_help.c:2492 sql_help.c:2500 sql_help.c:2510 sql_help.c:2536 -#: sql_help.c:3461 sql_help.c:3476 sql_help.c:3478 sql_help.c:3896 -#: sql_help.c:3897 sql_help.c:3906 sql_help.c:3947 sql_help.c:3948 -#: sql_help.c:3949 sql_help.c:3950 sql_help.c:3951 sql_help.c:3952 -#: sql_help.c:3985 sql_help.c:3986 sql_help.c:3991 sql_help.c:3996 -#: sql_help.c:4135 sql_help.c:4136 sql_help.c:4145 sql_help.c:4186 -#: sql_help.c:4187 sql_help.c:4188 sql_help.c:4189 sql_help.c:4190 -#: sql_help.c:4191 sql_help.c:4238 sql_help.c:4240 sql_help.c:4273 -#: sql_help.c:4329 sql_help.c:4330 sql_help.c:4339 sql_help.c:4380 -#: sql_help.c:4381 sql_help.c:4382 sql_help.c:4383 sql_help.c:4384 -#: sql_help.c:4385 +#: sql_help.c:237 sql_help.c:451 sql_help.c:1113 sql_help.c:1115 +#: sql_help.c:1403 sql_help.c:1882 sql_help.c:1886 sql_help.c:1990 +#: sql_help.c:1994 sql_help.c:2087 sql_help.c:2458 sql_help.c:2470 +#: sql_help.c:2483 sql_help.c:2491 sql_help.c:2502 sql_help.c:2531 +#: sql_help.c:3457 sql_help.c:3472 sql_help.c:3474 sql_help.c:3892 +#: sql_help.c:3893 sql_help.c:3902 sql_help.c:3943 sql_help.c:3944 +#: sql_help.c:3945 sql_help.c:3946 sql_help.c:3947 sql_help.c:3948 +#: sql_help.c:3981 sql_help.c:3982 sql_help.c:3987 sql_help.c:3992 +#: sql_help.c:4131 sql_help.c:4132 sql_help.c:4141 sql_help.c:4182 +#: sql_help.c:4183 sql_help.c:4184 sql_help.c:4185 sql_help.c:4186 +#: sql_help.c:4187 sql_help.c:4234 sql_help.c:4236 sql_help.c:4269 +#: sql_help.c:4325 sql_help.c:4326 sql_help.c:4335 sql_help.c:4376 +#: sql_help.c:4377 sql_help.c:4378 sql_help.c:4379 sql_help.c:4380 +#: sql_help.c:4381 msgid "expression" msgstr "выражение" @@ -3947,18 +4041,18 @@ msgid "domain_constraint" msgstr "ограничение_домена" #: sql_help.c:242 sql_help.c:244 sql_help.c:247 sql_help.c:466 sql_help.c:467 -#: sql_help.c:1098 sql_help.c:1136 sql_help.c:1137 sql_help.c:1138 -#: sql_help.c:1158 sql_help.c:1528 sql_help.c:1530 sql_help.c:1883 -#: sql_help.c:1987 sql_help.c:1991 sql_help.c:2499 sql_help.c:2509 -#: sql_help.c:3473 +#: sql_help.c:1090 sql_help.c:1134 sql_help.c:1135 sql_help.c:1136 +#: sql_help.c:1156 sql_help.c:1530 sql_help.c:1532 sql_help.c:1885 +#: sql_help.c:1989 sql_help.c:1993 sql_help.c:2490 sql_help.c:2501 +#: sql_help.c:3469 msgid "constraint_name" msgstr "имя_ограничения" -#: sql_help.c:245 sql_help.c:1099 +#: sql_help.c:245 sql_help.c:1091 msgid "new_constraint_name" msgstr "имя_нового_ограничения" -#: sql_help.c:316 sql_help.c:945 +#: sql_help.c:316 sql_help.c:941 msgid "new_version" msgstr "новая_версия" @@ -3974,74 +4068,74 @@ msgstr "где элемент_объект:" #: sql_help.c:333 sql_help.c:334 sql_help.c:339 sql_help.c:343 sql_help.c:345 #: sql_help.c:347 sql_help.c:348 sql_help.c:349 sql_help.c:350 sql_help.c:351 #: sql_help.c:352 sql_help.c:353 sql_help.c:354 sql_help.c:355 sql_help.c:358 -#: sql_help.c:359 sql_help.c:1520 sql_help.c:1525 sql_help.c:1532 -#: sql_help.c:1533 sql_help.c:1534 sql_help.c:1535 sql_help.c:1536 -#: sql_help.c:1537 sql_help.c:1538 sql_help.c:1543 sql_help.c:1545 -#: sql_help.c:1549 sql_help.c:1551 sql_help.c:1555 sql_help.c:1556 -#: sql_help.c:1557 sql_help.c:1560 sql_help.c:1561 sql_help.c:1562 -#: sql_help.c:1563 sql_help.c:1564 sql_help.c:1565 sql_help.c:1566 -#: sql_help.c:1567 sql_help.c:1568 sql_help.c:1569 sql_help.c:1570 -#: sql_help.c:1575 sql_help.c:1576 sql_help.c:3811 sql_help.c:3816 -#: sql_help.c:3817 sql_help.c:3818 sql_help.c:3819 sql_help.c:3825 +#: sql_help.c:359 sql_help.c:1522 sql_help.c:1527 sql_help.c:1534 +#: sql_help.c:1535 sql_help.c:1536 sql_help.c:1537 sql_help.c:1538 +#: sql_help.c:1539 sql_help.c:1540 sql_help.c:1545 sql_help.c:1547 +#: sql_help.c:1551 sql_help.c:1553 sql_help.c:1557 sql_help.c:1558 +#: sql_help.c:1559 sql_help.c:1562 sql_help.c:1563 sql_help.c:1564 +#: sql_help.c:1565 sql_help.c:1566 sql_help.c:1567 sql_help.c:1568 +#: sql_help.c:1569 sql_help.c:1570 sql_help.c:1571 sql_help.c:1572 +#: sql_help.c:1577 sql_help.c:1578 sql_help.c:3807 sql_help.c:3812 +#: sql_help.c:3813 sql_help.c:3814 sql_help.c:3815 sql_help.c:3821 +#: sql_help.c:3822 sql_help.c:3823 sql_help.c:3824 sql_help.c:3825 #: sql_help.c:3826 sql_help.c:3827 sql_help.c:3828 sql_help.c:3829 -#: sql_help.c:3830 sql_help.c:3831 sql_help.c:3832 sql_help.c:3833 -#: sql_help.c:3834 +#: sql_help.c:3830 msgid "object_name" msgstr "имя_объекта" # well-spelled: агр -#: sql_help.c:325 sql_help.c:1521 sql_help.c:3814 +#: sql_help.c:325 sql_help.c:1523 sql_help.c:3810 msgid "aggregate_name" msgstr "имя_агр_функции" -#: sql_help.c:327 sql_help.c:1523 sql_help.c:1794 sql_help.c:1798 -#: sql_help.c:1800 sql_help.c:2910 +#: sql_help.c:327 sql_help.c:1525 sql_help.c:1796 sql_help.c:1800 +#: sql_help.c:1802 sql_help.c:2905 msgid "source_type" msgstr "исходный_тип" -#: sql_help.c:328 sql_help.c:1524 sql_help.c:1795 sql_help.c:1799 -#: sql_help.c:1801 sql_help.c:2911 +#: sql_help.c:328 sql_help.c:1526 sql_help.c:1797 sql_help.c:1801 +#: sql_help.c:1803 sql_help.c:2906 msgid "target_type" msgstr "целевой_тип" -#: sql_help.c:335 sql_help.c:756 sql_help.c:1539 sql_help.c:1796 -#: sql_help.c:1835 sql_help.c:1898 sql_help.c:2138 sql_help.c:2169 -#: sql_help.c:2674 sql_help.c:3395 sql_help.c:3727 sql_help.c:3820 -#: sql_help.c:3925 sql_help.c:3929 sql_help.c:3933 sql_help.c:3936 -#: sql_help.c:4164 sql_help.c:4168 sql_help.c:4172 sql_help.c:4175 -#: sql_help.c:4358 sql_help.c:4362 sql_help.c:4366 sql_help.c:4369 +#: sql_help.c:335 sql_help.c:756 sql_help.c:1541 sql_help.c:1798 +#: sql_help.c:1837 sql_help.c:1900 sql_help.c:2140 sql_help.c:2171 +#: sql_help.c:2669 sql_help.c:3390 sql_help.c:3723 sql_help.c:3816 +#: sql_help.c:3921 sql_help.c:3925 sql_help.c:3929 sql_help.c:3932 +#: sql_help.c:4160 sql_help.c:4164 sql_help.c:4168 sql_help.c:4171 +#: sql_help.c:4354 sql_help.c:4358 sql_help.c:4362 sql_help.c:4365 msgid "function_name" msgstr "имя_функции" -#: sql_help.c:340 sql_help.c:749 sql_help.c:1546 sql_help.c:2162 +#: sql_help.c:340 sql_help.c:749 sql_help.c:1548 sql_help.c:2164 msgid "operator_name" msgstr "имя_оператора" -#: sql_help.c:341 sql_help.c:685 sql_help.c:689 sql_help.c:693 sql_help.c:1547 -#: sql_help.c:2139 sql_help.c:3028 +#: sql_help.c:341 sql_help.c:685 sql_help.c:689 sql_help.c:693 sql_help.c:1549 +#: sql_help.c:2141 sql_help.c:3023 msgid "left_type" msgstr "тип_слева" -#: sql_help.c:342 sql_help.c:686 sql_help.c:690 sql_help.c:694 sql_help.c:1548 -#: sql_help.c:2140 sql_help.c:3029 +#: sql_help.c:342 sql_help.c:686 sql_help.c:690 sql_help.c:694 sql_help.c:1550 +#: sql_help.c:2142 sql_help.c:3024 msgid "right_type" msgstr "тип_справа" #: sql_help.c:344 sql_help.c:346 sql_help.c:712 sql_help.c:715 sql_help.c:718 #: sql_help.c:747 sql_help.c:759 sql_help.c:767 sql_help.c:770 sql_help.c:773 -#: sql_help.c:1550 sql_help.c:1552 sql_help.c:2159 sql_help.c:2180 -#: sql_help.c:2515 sql_help.c:3038 sql_help.c:3047 +#: sql_help.c:1552 sql_help.c:1554 sql_help.c:2161 sql_help.c:2182 +#: sql_help.c:2507 sql_help.c:3033 sql_help.c:3042 msgid "index_method" msgstr "метод_индекса" -#: sql_help.c:356 sql_help.c:1154 sql_help.c:1571 sql_help.c:2026 -#: sql_help.c:2474 sql_help.c:2641 sql_help.c:3185 sql_help.c:3409 -#: sql_help.c:3741 +#: sql_help.c:356 sql_help.c:1152 sql_help.c:1573 sql_help.c:2028 +#: sql_help.c:2465 sql_help.c:2636 sql_help.c:3180 sql_help.c:3404 +#: sql_help.c:3737 msgid "type_name" msgstr "имя_типа" -#: sql_help.c:357 sql_help.c:1572 sql_help.c:2025 sql_help.c:2642 -#: sql_help.c:2868 sql_help.c:3186 sql_help.c:3401 sql_help.c:3733 +#: sql_help.c:357 sql_help.c:1574 sql_help.c:2027 sql_help.c:2637 +#: sql_help.c:2863 sql_help.c:3181 sql_help.c:3396 sql_help.c:3729 msgid "lang_name" msgstr "имя_языка" @@ -4049,120 +4143,121 @@ msgstr "имя_языка" msgid "and aggregate_signature is:" msgstr "и сигнатура_агр_функции:" -#: sql_help.c:383 sql_help.c:1666 sql_help.c:1925 +#: sql_help.c:383 sql_help.c:1668 sql_help.c:1927 msgid "handler_function" msgstr "функция_обработчик" -#: sql_help.c:384 sql_help.c:1926 +#: sql_help.c:384 sql_help.c:1928 msgid "validator_function" msgstr "функция_проверки" -#: sql_help.c:433 sql_help.c:510 sql_help.c:641 sql_help.c:1093 sql_help.c:1298 -#: sql_help.c:2506 sql_help.c:2507 sql_help.c:2523 sql_help.c:2524 +#: sql_help.c:433 sql_help.c:510 sql_help.c:641 sql_help.c:1085 sql_help.c:1296 +#: sql_help.c:2498 sql_help.c:2499 sql_help.c:2515 sql_help.c:2516 msgid "action" msgstr "действие" #: sql_help.c:435 sql_help.c:442 sql_help.c:446 sql_help.c:447 sql_help.c:450 #: sql_help.c:452 sql_help.c:453 sql_help.c:454 sql_help.c:456 sql_help.c:459 #: sql_help.c:461 sql_help.c:462 sql_help.c:645 sql_help.c:655 sql_help.c:657 -#: sql_help.c:660 sql_help.c:662 sql_help.c:927 sql_help.c:1095 sql_help.c:1113 -#: sql_help.c:1117 sql_help.c:1118 sql_help.c:1122 sql_help.c:1124 -#: sql_help.c:1125 sql_help.c:1126 sql_help.c:1128 sql_help.c:1131 -#: sql_help.c:1133 sql_help.c:1400 sql_help.c:1403 sql_help.c:1423 -#: sql_help.c:1527 sql_help.c:1632 sql_help.c:1637 sql_help.c:1651 -#: sql_help.c:1652 sql_help.c:1653 sql_help.c:1966 sql_help.c:1979 -#: sql_help.c:2023 sql_help.c:2084 sql_help.c:2119 sql_help.c:2323 -#: sql_help.c:2349 sql_help.c:2350 sql_help.c:2458 sql_help.c:2466 -#: sql_help.c:2475 sql_help.c:2478 sql_help.c:2487 sql_help.c:2491 -#: sql_help.c:2511 sql_help.c:2513 sql_help.c:2520 sql_help.c:2535 -#: sql_help.c:2552 sql_help.c:2677 sql_help.c:2813 sql_help.c:3380 -#: sql_help.c:3381 sql_help.c:3460 sql_help.c:3475 sql_help.c:3477 -#: sql_help.c:3479 sql_help.c:3712 sql_help.c:3713 sql_help.c:3813 -#: sql_help.c:3956 sql_help.c:4195 sql_help.c:4237 sql_help.c:4239 -#: sql_help.c:4241 sql_help.c:4258 sql_help.c:4261 sql_help.c:4389 +#: sql_help.c:660 sql_help.c:662 sql_help.c:923 sql_help.c:1087 sql_help.c:1105 +#: sql_help.c:1109 sql_help.c:1110 sql_help.c:1114 sql_help.c:1116 +#: sql_help.c:1117 sql_help.c:1118 sql_help.c:1120 sql_help.c:1123 +#: sql_help.c:1124 sql_help.c:1126 sql_help.c:1129 sql_help.c:1131 +#: sql_help.c:1402 sql_help.c:1405 sql_help.c:1425 sql_help.c:1529 +#: sql_help.c:1634 sql_help.c:1639 sql_help.c:1653 sql_help.c:1654 +#: sql_help.c:1655 sql_help.c:1968 sql_help.c:1981 sql_help.c:2025 +#: sql_help.c:2086 sql_help.c:2121 sql_help.c:2319 sql_help.c:2347 +#: sql_help.c:2348 sql_help.c:2449 sql_help.c:2457 sql_help.c:2466 +#: sql_help.c:2469 sql_help.c:2478 sql_help.c:2482 sql_help.c:2503 +#: sql_help.c:2505 sql_help.c:2512 sql_help.c:2530 sql_help.c:2547 +#: sql_help.c:2672 sql_help.c:2808 sql_help.c:3375 sql_help.c:3376 +#: sql_help.c:3456 sql_help.c:3471 sql_help.c:3473 sql_help.c:3475 +#: sql_help.c:3708 sql_help.c:3709 sql_help.c:3809 sql_help.c:3952 +#: sql_help.c:4191 sql_help.c:4233 sql_help.c:4235 sql_help.c:4237 +#: sql_help.c:4254 sql_help.c:4257 sql_help.c:4385 msgid "column_name" msgstr "имя_столбца" -#: sql_help.c:436 sql_help.c:646 sql_help.c:1096 +#: sql_help.c:436 sql_help.c:646 sql_help.c:1088 msgid "new_column_name" msgstr "новое_имя_столбца" -#: sql_help.c:441 sql_help.c:531 sql_help.c:654 sql_help.c:1112 sql_help.c:1314 +#: sql_help.c:441 sql_help.c:531 sql_help.c:654 sql_help.c:1104 sql_help.c:1312 msgid "where action is one of:" msgstr "где допустимое действие:" -#: sql_help.c:443 sql_help.c:448 sql_help.c:919 sql_help.c:1114 sql_help.c:1119 -#: sql_help.c:1316 sql_help.c:1320 sql_help.c:1878 sql_help.c:1967 -#: sql_help.c:2158 sql_help.c:2316 sql_help.c:2459 sql_help.c:2722 -#: sql_help.c:3562 +#: sql_help.c:443 sql_help.c:448 sql_help.c:915 sql_help.c:1106 sql_help.c:1111 +#: sql_help.c:1314 sql_help.c:1318 sql_help.c:1880 sql_help.c:1969 +#: sql_help.c:2160 sql_help.c:2312 sql_help.c:2450 sql_help.c:2717 +#: sql_help.c:3558 msgid "data_type" msgstr "тип_данных" -#: sql_help.c:444 sql_help.c:449 sql_help.c:1115 sql_help.c:1120 -#: sql_help.c:1317 sql_help.c:1321 sql_help.c:1879 sql_help.c:1970 -#: sql_help.c:2086 sql_help.c:2460 sql_help.c:2468 sql_help.c:2480 -#: sql_help.c:2493 sql_help.c:2723 sql_help.c:2729 sql_help.c:3470 +#: sql_help.c:444 sql_help.c:449 sql_help.c:1107 sql_help.c:1112 +#: sql_help.c:1315 sql_help.c:1319 sql_help.c:1881 sql_help.c:1972 +#: sql_help.c:2088 sql_help.c:2451 sql_help.c:2459 sql_help.c:2471 +#: sql_help.c:2484 sql_help.c:2718 sql_help.c:2724 sql_help.c:3466 msgid "collation" msgstr "правило_сортировки" -#: sql_help.c:445 sql_help.c:1116 sql_help.c:1971 sql_help.c:1980 -#: sql_help.c:2461 sql_help.c:2476 sql_help.c:2488 +#: sql_help.c:445 sql_help.c:1108 sql_help.c:1973 sql_help.c:1982 +#: sql_help.c:2452 sql_help.c:2467 sql_help.c:2479 msgid "column_constraint" msgstr "ограничение_столбца" -#: sql_help.c:455 sql_help.c:656 sql_help.c:1127 +#: sql_help.c:455 sql_help.c:656 sql_help.c:1125 msgid "integer" msgstr "целое" -#: sql_help.c:457 sql_help.c:460 sql_help.c:658 sql_help.c:661 sql_help.c:1129 -#: sql_help.c:1132 +#: sql_help.c:457 sql_help.c:460 sql_help.c:658 sql_help.c:661 sql_help.c:1127 +#: sql_help.c:1130 msgid "attribute_option" msgstr "атрибут" -#: sql_help.c:465 sql_help.c:1134 sql_help.c:1972 sql_help.c:1981 -#: sql_help.c:2462 sql_help.c:2477 sql_help.c:2489 +#: sql_help.c:465 sql_help.c:1132 sql_help.c:1974 sql_help.c:1983 +#: sql_help.c:2453 sql_help.c:2468 sql_help.c:2480 msgid "table_constraint" msgstr "ограничение_таблицы" -#: sql_help.c:468 sql_help.c:469 sql_help.c:470 sql_help.c:471 sql_help.c:1139 -#: sql_help.c:1140 sql_help.c:1141 sql_help.c:1142 sql_help.c:1573 +#: sql_help.c:468 sql_help.c:469 sql_help.c:470 sql_help.c:471 sql_help.c:1137 +#: sql_help.c:1138 sql_help.c:1139 sql_help.c:1140 sql_help.c:1575 msgid "trigger_name" msgstr "имя_триггера" -#: sql_help.c:472 sql_help.c:473 sql_help.c:1152 sql_help.c:1153 -#: sql_help.c:1973 sql_help.c:1978 sql_help.c:2465 sql_help.c:2486 +#: sql_help.c:472 sql_help.c:473 sql_help.c:1150 sql_help.c:1151 +#: sql_help.c:1975 sql_help.c:1980 sql_help.c:2456 sql_help.c:2477 msgid "parent_table" msgstr "таблица_родитель" -#: sql_help.c:530 sql_help.c:580 sql_help.c:643 sql_help.c:1277 sql_help.c:1910 +#: sql_help.c:530 sql_help.c:580 sql_help.c:643 sql_help.c:1275 sql_help.c:1912 msgid "extension_name" msgstr "имя_расширения" -#: sql_help.c:532 sql_help.c:2027 +#: sql_help.c:532 sql_help.c:2029 msgid "execution_cost" msgstr "стоимость_выполнения" -#: sql_help.c:533 sql_help.c:2028 +#: sql_help.c:533 sql_help.c:2030 msgid "result_rows" msgstr "строк_в_результате" -#: sql_help.c:554 sql_help.c:556 sql_help.c:857 sql_help.c:865 sql_help.c:869 -#: sql_help.c:872 sql_help.c:875 sql_help.c:1355 sql_help.c:1363 -#: sql_help.c:1366 sql_help.c:1368 sql_help.c:1370 sql_help.c:2294 -#: sql_help.c:2296 sql_help.c:2299 sql_help.c:2300 sql_help.c:3379 -#: sql_help.c:3383 sql_help.c:3386 sql_help.c:3388 sql_help.c:3390 -#: sql_help.c:3392 sql_help.c:3394 sql_help.c:3400 sql_help.c:3402 -#: sql_help.c:3404 sql_help.c:3406 sql_help.c:3408 sql_help.c:3410 +#: sql_help.c:554 sql_help.c:556 sql_help.c:853 sql_help.c:861 sql_help.c:865 +#: sql_help.c:868 sql_help.c:871 sql_help.c:1353 sql_help.c:1361 +#: sql_help.c:1365 sql_help.c:1368 sql_help.c:1371 sql_help.c:2290 +#: sql_help.c:2292 sql_help.c:2295 sql_help.c:2296 sql_help.c:3374 +#: sql_help.c:3378 sql_help.c:3381 sql_help.c:3383 sql_help.c:3385 +#: sql_help.c:3387 sql_help.c:3389 sql_help.c:3395 sql_help.c:3397 +#: sql_help.c:3399 sql_help.c:3401 sql_help.c:3403 sql_help.c:3405 msgid "role_specification" msgstr "указание_роли" -#: sql_help.c:555 sql_help.c:557 sql_help.c:1382 sql_help.c:1853 -#: sql_help.c:2302 sql_help.c:2798 sql_help.c:3219 sql_help.c:4041 +#: sql_help.c:555 sql_help.c:557 sql_help.c:1384 sql_help.c:1855 +#: sql_help.c:2298 sql_help.c:2793 sql_help.c:3214 sql_help.c:4037 msgid "user_name" msgstr "имя_пользователя" -#: sql_help.c:558 sql_help.c:877 sql_help.c:1371 sql_help.c:2301 -#: sql_help.c:3411 +#: sql_help.c:558 sql_help.c:873 sql_help.c:1373 sql_help.c:2297 +#: sql_help.c:3406 msgid "where role_specification can be:" msgstr "где допустимое указание_роли:" @@ -4170,1654 +4265,1667 @@ msgstr "где допустимое указание_роли:" msgid "group_name" msgstr "имя_группы" -#: sql_help.c:578 sql_help.c:1858 sql_help.c:2090 sql_help.c:2122 -#: sql_help.c:2472 sql_help.c:2484 sql_help.c:2497 sql_help.c:2533 -#: sql_help.c:2555 sql_help.c:2567 sql_help.c:3407 sql_help.c:3739 +#: sql_help.c:578 sql_help.c:1860 sql_help.c:2092 sql_help.c:2124 +#: sql_help.c:2463 sql_help.c:2475 sql_help.c:2488 sql_help.c:2528 +#: sql_help.c:2550 sql_help.c:2562 sql_help.c:3402 sql_help.c:3735 msgid "tablespace_name" msgstr "табл_пространство" -#: sql_help.c:582 sql_help.c:585 sql_help.c:664 sql_help.c:666 sql_help.c:1149 -#: sql_help.c:1151 sql_help.c:2088 sql_help.c:2120 sql_help.c:2470 -#: sql_help.c:2482 sql_help.c:2495 sql_help.c:2531 sql_help.c:2553 +#: sql_help.c:582 sql_help.c:585 sql_help.c:664 sql_help.c:666 sql_help.c:1147 +#: sql_help.c:1149 sql_help.c:2090 sql_help.c:2122 sql_help.c:2461 +#: sql_help.c:2473 sql_help.c:2486 sql_help.c:2526 sql_help.c:2548 msgid "storage_parameter" msgstr "параметр_хранения" -#: sql_help.c:608 sql_help.c:1544 sql_help.c:3824 +#: sql_help.c:608 sql_help.c:1546 sql_help.c:3820 msgid "large_object_oid" msgstr "oid_большого_объекта" -#: sql_help.c:663 sql_help.c:1147 sql_help.c:1156 sql_help.c:1159 -#: sql_help.c:1463 +#: sql_help.c:663 sql_help.c:1145 sql_help.c:1154 sql_help.c:1157 +#: sql_help.c:1465 msgid "index_name" msgstr "имя_индекса" -#: sql_help.c:695 sql_help.c:2143 +#: sql_help.c:695 sql_help.c:2145 msgid "res_proc" msgstr "процедура_ограничения" -#: sql_help.c:696 sql_help.c:2144 +#: sql_help.c:696 sql_help.c:2146 msgid "join_proc" msgstr "процедура_соединения" -#: sql_help.c:748 sql_help.c:760 sql_help.c:2161 +#: sql_help.c:748 sql_help.c:760 sql_help.c:2163 msgid "strategy_number" msgstr "номер_стратегии" #: sql_help.c:750 sql_help.c:751 sql_help.c:754 sql_help.c:755 sql_help.c:761 -#: sql_help.c:762 sql_help.c:764 sql_help.c:765 sql_help.c:2163 sql_help.c:2164 -#: sql_help.c:2167 sql_help.c:2168 +#: sql_help.c:762 sql_help.c:764 sql_help.c:765 sql_help.c:2165 sql_help.c:2166 +#: sql_help.c:2169 sql_help.c:2170 msgid "op_type" msgstr "тип_операции" -#: sql_help.c:752 sql_help.c:2165 +#: sql_help.c:752 sql_help.c:2167 msgid "sort_family_name" msgstr "семейство_сортировки" -#: sql_help.c:753 sql_help.c:763 sql_help.c:2166 +#: sql_help.c:753 sql_help.c:763 sql_help.c:2168 msgid "support_number" msgstr "номер_опорной_процедуры" -#: sql_help.c:757 sql_help.c:1797 sql_help.c:2170 sql_help.c:2644 -#: sql_help.c:2646 +#: sql_help.c:757 sql_help.c:1799 sql_help.c:2172 sql_help.c:2639 +#: sql_help.c:2641 msgid "argument_type" msgstr "тип_аргумента" -#: sql_help.c:788 sql_help.c:791 sql_help.c:819 sql_help.c:821 sql_help.c:823 -#: sql_help.c:887 sql_help.c:926 sql_help.c:1273 sql_help.c:1276 -#: sql_help.c:1422 sql_help.c:1462 sql_help.c:1529 sql_help.c:1554 -#: sql_help.c:1559 sql_help.c:1574 sql_help.c:1631 sql_help.c:1636 -#: sql_help.c:1965 sql_help.c:1977 sql_help.c:2082 sql_help.c:2118 -#: sql_help.c:2194 sql_help.c:2215 sql_help.c:2271 sql_help.c:2322 -#: sql_help.c:2351 sql_help.c:2457 sql_help.c:2473 sql_help.c:2485 -#: sql_help.c:2551 sql_help.c:2670 sql_help.c:2847 sql_help.c:3064 -#: sql_help.c:3089 sql_help.c:3195 sql_help.c:3377 sql_help.c:3382 -#: sql_help.c:3427 sql_help.c:3458 sql_help.c:3709 sql_help.c:3714 -#: sql_help.c:3812 sql_help.c:3911 sql_help.c:3913 sql_help.c:3962 -#: sql_help.c:4001 sql_help.c:4150 sql_help.c:4152 sql_help.c:4201 -#: sql_help.c:4235 sql_help.c:4257 sql_help.c:4259 sql_help.c:4260 -#: sql_help.c:4344 sql_help.c:4346 sql_help.c:4395 +#: sql_help.c:788 sql_help.c:791 sql_help.c:808 sql_help.c:810 sql_help.c:812 +#: sql_help.c:883 sql_help.c:922 sql_help.c:1271 sql_help.c:1274 +#: sql_help.c:1424 sql_help.c:1464 sql_help.c:1531 sql_help.c:1556 +#: sql_help.c:1561 sql_help.c:1576 sql_help.c:1633 sql_help.c:1638 +#: sql_help.c:1967 sql_help.c:1979 sql_help.c:2084 sql_help.c:2120 +#: sql_help.c:2196 sql_help.c:2211 sql_help.c:2267 sql_help.c:2318 +#: sql_help.c:2349 sql_help.c:2448 sql_help.c:2464 sql_help.c:2476 +#: sql_help.c:2546 sql_help.c:2665 sql_help.c:2842 sql_help.c:3059 +#: sql_help.c:3084 sql_help.c:3190 sql_help.c:3372 sql_help.c:3377 +#: sql_help.c:3422 sql_help.c:3454 sql_help.c:3705 sql_help.c:3710 +#: sql_help.c:3808 sql_help.c:3907 sql_help.c:3909 sql_help.c:3958 +#: sql_help.c:3997 sql_help.c:4146 sql_help.c:4148 sql_help.c:4197 +#: sql_help.c:4231 sql_help.c:4253 sql_help.c:4255 sql_help.c:4256 +#: sql_help.c:4340 sql_help.c:4342 sql_help.c:4391 msgid "table_name" msgstr "имя_таблицы" -#: sql_help.c:793 sql_help.c:2196 +#: sql_help.c:793 sql_help.c:2198 msgid "using_expression" msgstr "выражение_использования" -#: sql_help.c:794 sql_help.c:2197 +#: sql_help.c:794 sql_help.c:2199 msgid "check_expression" msgstr "выражение_проверки" -#: sql_help.c:861 sql_help.c:1359 sql_help.c:2062 sql_help.c:2248 -#: sql_help.c:2781 +#: sql_help.c:814 sql_help.c:2212 +msgid "publication_parameter" +msgstr "параметр_публикации" + +#: sql_help.c:857 sql_help.c:1357 sql_help.c:2064 sql_help.c:2244 +#: sql_help.c:2776 msgid "password" msgstr "пароль" -#: sql_help.c:862 sql_help.c:1360 sql_help.c:2063 sql_help.c:2249 -#: sql_help.c:2782 +#: sql_help.c:858 sql_help.c:1358 sql_help.c:2065 sql_help.c:2245 +#: sql_help.c:2777 msgid "timestamp" msgstr "timestamp" -#: sql_help.c:866 sql_help.c:870 sql_help.c:873 sql_help.c:876 sql_help.c:3387 -#: sql_help.c:3719 +#: sql_help.c:862 sql_help.c:866 sql_help.c:869 sql_help.c:872 sql_help.c:1362 +#: sql_help.c:1366 sql_help.c:1369 sql_help.c:1372 sql_help.c:3382 +#: sql_help.c:3715 msgid "database_name" msgstr "имя_БД" -#: sql_help.c:920 sql_help.c:2317 +#: sql_help.c:916 sql_help.c:2313 msgid "increment" msgstr "шаг" -#: sql_help.c:921 sql_help.c:2318 +#: sql_help.c:917 sql_help.c:2314 msgid "minvalue" msgstr "мин_значение" -#: sql_help.c:922 sql_help.c:2319 +#: sql_help.c:918 sql_help.c:2315 msgid "maxvalue" msgstr "макс_значение" -#: sql_help.c:923 sql_help.c:2320 sql_help.c:3909 sql_help.c:3999 -#: sql_help.c:4148 sql_help.c:4277 sql_help.c:4342 +#: sql_help.c:919 sql_help.c:2316 sql_help.c:3905 sql_help.c:3995 +#: sql_help.c:4144 sql_help.c:4273 sql_help.c:4338 msgid "start" msgstr "начальное_значение" -#: sql_help.c:924 +#: sql_help.c:920 sql_help.c:1122 msgid "restart" msgstr "значение_перезапуска" -#: sql_help.c:925 sql_help.c:2321 +#: sql_help.c:921 sql_help.c:2317 msgid "cache" msgstr "кеш" -#: sql_help.c:991 -msgid "suboption" -msgstr "подпараметр" - -#: sql_help.c:992 -msgid "where suboption can be:" -msgstr "где допустимые подпараметры:" - -#: sql_help.c:993 sql_help.c:2375 -msgid "slot_name" -msgstr "имя_слота" +#: sql_help.c:978 sql_help.c:2361 +msgid "conninfo" +msgstr "строка_подключения" -#: sql_help.c:995 sql_help.c:2372 +#: sql_help.c:980 sql_help.c:2362 msgid "publication_name" msgstr "имя_публикации" -#: sql_help.c:996 sql_help.c:998 -msgid "puboption" -msgstr "параметр_публикации" +#: sql_help.c:981 +msgid "set_publication_option" +msgstr "параметр_set_publication" -#: sql_help.c:999 -msgid "where puboption can be:" -msgstr "где допустимый параметр_публикации:" +#: sql_help.c:984 +msgid "refresh_option" +msgstr "параметр_обновления" -#: sql_help.c:1003 sql_help.c:2371 -msgid "conninfo" -msgstr "строка_подключения" +#: sql_help.c:989 sql_help.c:2363 +msgid "subscription_parameter" +msgstr "параметр_подписки" -#: sql_help.c:1108 sql_help.c:1111 +#: sql_help.c:1100 sql_help.c:1103 msgid "partition_name" msgstr "имя_секции" -#: sql_help.c:1109 sql_help.c:1982 sql_help.c:2490 +#: sql_help.c:1101 sql_help.c:1984 sql_help.c:2481 msgid "partition_bound_spec" msgstr "указание_границ_секции" -#: sql_help.c:1135 +#: sql_help.c:1119 sql_help.c:2493 +msgid "sequence_options" +msgstr "параметры_последовательности" + +#: sql_help.c:1121 +msgid "sequence_option" +msgstr "параметр_последовательности" + +#: sql_help.c:1133 msgid "table_constraint_using_index" msgstr "ограничение_таблицы_с_индексом" -#: sql_help.c:1143 sql_help.c:1144 sql_help.c:1145 sql_help.c:1146 +#: sql_help.c:1141 sql_help.c:1142 sql_help.c:1143 sql_help.c:1144 msgid "rewrite_rule_name" msgstr "имя_правила_перезаписи" -#: sql_help.c:1157 +#: sql_help.c:1155 msgid "and table_constraint_using_index is:" msgstr "и ограничение_таблицы_с_индексом:" -#: sql_help.c:1175 sql_help.c:1178 sql_help.c:2570 +#: sql_help.c:1173 sql_help.c:1176 sql_help.c:2565 msgid "tablespace_option" msgstr "параметр_табл_пространства" -#: sql_help.c:1199 sql_help.c:1202 sql_help.c:1208 sql_help.c:1212 +#: sql_help.c:1197 sql_help.c:1200 sql_help.c:1206 sql_help.c:1210 msgid "token_type" msgstr "тип_фрагмента" -#: sql_help.c:1200 sql_help.c:1203 +#: sql_help.c:1198 sql_help.c:1201 msgid "dictionary_name" msgstr "имя_словаря" -#: sql_help.c:1205 sql_help.c:1209 +#: sql_help.c:1203 sql_help.c:1207 msgid "old_dictionary" msgstr "старый_словарь" -#: sql_help.c:1206 sql_help.c:1210 +#: sql_help.c:1204 sql_help.c:1208 msgid "new_dictionary" msgstr "новый_словарь" -#: sql_help.c:1302 sql_help.c:1315 sql_help.c:1318 sql_help.c:1319 -#: sql_help.c:2721 +#: sql_help.c:1300 sql_help.c:1313 sql_help.c:1316 sql_help.c:1317 +#: sql_help.c:2716 msgid "attribute_name" msgstr "имя_атрибута" -#: sql_help.c:1303 +#: sql_help.c:1301 msgid "new_attribute_name" msgstr "новое_имя_атрибута" -#: sql_help.c:1309 sql_help.c:1313 +#: sql_help.c:1307 sql_help.c:1311 msgid "new_enum_value" msgstr "новое_значение_перечисления" -#: sql_help.c:1310 +#: sql_help.c:1308 msgid "neighbor_enum_value" msgstr "соседнее_значение_перечисления" -#: sql_help.c:1312 +#: sql_help.c:1310 msgid "existing_enum_value" msgstr "существующее_значение_перечисления" -#: sql_help.c:1383 sql_help.c:1974 sql_help.c:1983 sql_help.c:2333 -#: sql_help.c:2799 sql_help.c:3220 sql_help.c:3393 sql_help.c:3428 -#: sql_help.c:3725 +#: sql_help.c:1385 sql_help.c:1976 sql_help.c:1985 sql_help.c:2329 +#: sql_help.c:2794 sql_help.c:3215 sql_help.c:3388 sql_help.c:3423 +#: sql_help.c:3721 msgid "server_name" msgstr "имя_сервера" -#: sql_help.c:1411 sql_help.c:1414 sql_help.c:2814 +#: sql_help.c:1413 sql_help.c:1416 sql_help.c:2809 msgid "view_option_name" msgstr "имя_параметра_представления" -#: sql_help.c:1412 sql_help.c:2815 +#: sql_help.c:1414 sql_help.c:2810 msgid "view_option_value" msgstr "значение_параметра_представления" -#: sql_help.c:1437 sql_help.c:4057 sql_help.c:4059 sql_help.c:4083 +#: sql_help.c:1439 sql_help.c:4053 sql_help.c:4055 sql_help.c:4079 msgid "transaction_mode" msgstr "режим_транзакции" -#: sql_help.c:1438 sql_help.c:4060 sql_help.c:4084 +#: sql_help.c:1440 sql_help.c:4056 sql_help.c:4080 msgid "where transaction_mode is one of:" msgstr "где допустимый режим_транзакции:" -#: sql_help.c:1526 +#: sql_help.c:1528 msgid "relation_name" msgstr "имя_отношения" -#: sql_help.c:1531 sql_help.c:3389 sql_help.c:3721 +#: sql_help.c:1533 sql_help.c:3384 sql_help.c:3717 msgid "domain_name" msgstr "имя_домена" -#: sql_help.c:1553 +#: sql_help.c:1555 msgid "policy_name" msgstr "имя_политики" -#: sql_help.c:1558 +#: sql_help.c:1560 msgid "rule_name" msgstr "имя_правила" -#: sql_help.c:1577 +#: sql_help.c:1579 msgid "text" msgstr "текст" -#: sql_help.c:1602 sql_help.c:3571 sql_help.c:3759 +#: sql_help.c:1604 sql_help.c:3567 sql_help.c:3755 msgid "transaction_id" msgstr "код_транзакции" -#: sql_help.c:1633 sql_help.c:1639 sql_help.c:3497 +#: sql_help.c:1635 sql_help.c:1641 sql_help.c:3493 msgid "filename" msgstr "имя_файла" -#: sql_help.c:1634 sql_help.c:1640 sql_help.c:2273 sql_help.c:2274 -#: sql_help.c:2275 +#: sql_help.c:1636 sql_help.c:1642 sql_help.c:2269 sql_help.c:2270 +#: sql_help.c:2271 msgid "command" msgstr "команда" -#: sql_help.c:1638 sql_help.c:2123 sql_help.c:2556 sql_help.c:2816 -#: sql_help.c:2834 sql_help.c:3462 +#: sql_help.c:1640 sql_help.c:2125 sql_help.c:2551 sql_help.c:2811 +#: sql_help.c:2829 sql_help.c:3458 msgid "query" msgstr "запрос" -#: sql_help.c:1642 sql_help.c:3266 +#: sql_help.c:1644 sql_help.c:3261 msgid "where option can be one of:" msgstr "где допустимый параметр:" -#: sql_help.c:1643 +#: sql_help.c:1645 msgid "format_name" msgstr "имя_формата" -#: sql_help.c:1644 sql_help.c:1645 sql_help.c:1648 sql_help.c:3267 -#: sql_help.c:3268 sql_help.c:3269 sql_help.c:3270 sql_help.c:3271 -#: sql_help.c:3272 +#: sql_help.c:1646 sql_help.c:1647 sql_help.c:1650 sql_help.c:3262 +#: sql_help.c:3263 sql_help.c:3264 sql_help.c:3265 sql_help.c:3266 +#: sql_help.c:3267 msgid "boolean" msgstr "логическое_значение" -#: sql_help.c:1646 +#: sql_help.c:1648 msgid "delimiter_character" msgstr "символ_разделитель" -#: sql_help.c:1647 +#: sql_help.c:1649 msgid "null_string" msgstr "представление_NULL" -#: sql_help.c:1649 +#: sql_help.c:1651 msgid "quote_character" msgstr "символ_кавычек" -#: sql_help.c:1650 +#: sql_help.c:1652 msgid "escape_character" msgstr "спецсимвол" -#: sql_help.c:1654 +#: sql_help.c:1656 msgid "encoding_name" msgstr "имя_кодировки" -#: sql_help.c:1665 +#: sql_help.c:1667 msgid "access_method_type" msgstr "тип_метода_доступа" -#: sql_help.c:1731 sql_help.c:1750 sql_help.c:1753 +#: sql_help.c:1733 sql_help.c:1752 sql_help.c:1755 msgid "arg_data_type" msgstr "тип_данных_аргумента" -#: sql_help.c:1732 sql_help.c:1754 sql_help.c:1762 +#: sql_help.c:1734 sql_help.c:1756 sql_help.c:1764 msgid "sfunc" msgstr "функция_состояния" -#: sql_help.c:1733 sql_help.c:1755 sql_help.c:1763 +#: sql_help.c:1735 sql_help.c:1757 sql_help.c:1765 msgid "state_data_type" msgstr "тип_данных_состояния" -#: sql_help.c:1734 sql_help.c:1756 sql_help.c:1764 +#: sql_help.c:1736 sql_help.c:1758 sql_help.c:1766 msgid "state_data_size" msgstr "размер_данных_состояния" -#: sql_help.c:1735 sql_help.c:1757 sql_help.c:1765 +#: sql_help.c:1737 sql_help.c:1759 sql_help.c:1767 msgid "ffunc" msgstr "функция_завершения" -#: sql_help.c:1736 sql_help.c:1766 +#: sql_help.c:1738 sql_help.c:1768 msgid "combinefunc" msgstr "комбинирующая_функция" -#: sql_help.c:1737 sql_help.c:1767 +#: sql_help.c:1739 sql_help.c:1769 msgid "serialfunc" msgstr "функция_сериализации" -#: sql_help.c:1738 sql_help.c:1768 +#: sql_help.c:1740 sql_help.c:1770 msgid "deserialfunc" msgstr "функция_десериализации" -#: sql_help.c:1739 sql_help.c:1758 sql_help.c:1769 +#: sql_help.c:1741 sql_help.c:1760 sql_help.c:1771 msgid "initial_condition" msgstr "начальное_условие" -#: sql_help.c:1740 sql_help.c:1770 +#: sql_help.c:1742 sql_help.c:1772 msgid "msfunc" msgstr "функция_состояния_движ" -#: sql_help.c:1741 sql_help.c:1771 +#: sql_help.c:1743 sql_help.c:1773 msgid "minvfunc" msgstr "обратная_функция_движ" -#: sql_help.c:1742 sql_help.c:1772 +#: sql_help.c:1744 sql_help.c:1774 msgid "mstate_data_type" msgstr "тип_данных_состояния_движ" -#: sql_help.c:1743 sql_help.c:1773 +#: sql_help.c:1745 sql_help.c:1775 msgid "mstate_data_size" msgstr "размер_данных_состояния_движ" -#: sql_help.c:1744 sql_help.c:1774 +#: sql_help.c:1746 sql_help.c:1776 msgid "mffunc" msgstr "функция_завершения_движ" -#: sql_help.c:1745 sql_help.c:1775 +#: sql_help.c:1747 sql_help.c:1777 msgid "minitial_condition" msgstr "начальное_условие_движ" -#: sql_help.c:1746 sql_help.c:1776 +#: sql_help.c:1748 sql_help.c:1778 msgid "sort_operator" msgstr "оператор_сортировки" -#: sql_help.c:1759 +#: sql_help.c:1761 msgid "or the old syntax" msgstr "или старый синтаксис" -#: sql_help.c:1761 +#: sql_help.c:1763 msgid "base_type" msgstr "базовый_тип" -#: sql_help.c:1817 +#: sql_help.c:1819 msgid "locale" msgstr "код_локали" -#: sql_help.c:1818 sql_help.c:1856 +#: sql_help.c:1820 sql_help.c:1858 msgid "lc_collate" msgstr "код_правила_сортировки" -#: sql_help.c:1819 sql_help.c:1857 +#: sql_help.c:1821 sql_help.c:1859 msgid "lc_ctype" msgstr "код_классификации_символов" -#: sql_help.c:1820 sql_help.c:3810 +#: sql_help.c:1822 sql_help.c:3806 msgid "provider" msgstr "поставщик" -#: sql_help.c:1821 sql_help.c:1912 +#: sql_help.c:1823 sql_help.c:1914 msgid "version" msgstr "версия" -#: sql_help.c:1823 +#: sql_help.c:1825 msgid "existing_collation" msgstr "существующее_правило_сортировки" -#: sql_help.c:1833 +#: sql_help.c:1835 msgid "source_encoding" msgstr "исходная_кодировка" -#: sql_help.c:1834 +#: sql_help.c:1836 msgid "dest_encoding" msgstr "целевая_кодировка" -#: sql_help.c:1854 sql_help.c:2596 +#: sql_help.c:1856 sql_help.c:2591 msgid "template" msgstr "шаблон" -#: sql_help.c:1855 +#: sql_help.c:1857 msgid "encoding" msgstr "кодировка" -#: sql_help.c:1881 +#: sql_help.c:1883 msgid "constraint" msgstr "ограничение" -#: sql_help.c:1882 +#: sql_help.c:1884 msgid "where constraint is:" msgstr "где ограничение:" -#: sql_help.c:1896 sql_help.c:2270 sql_help.c:2669 +#: sql_help.c:1898 sql_help.c:2266 sql_help.c:2664 msgid "event" msgstr "событие" -#: sql_help.c:1897 +#: sql_help.c:1899 msgid "filter_variable" msgstr "переменная_фильтра" -#: sql_help.c:1913 +#: sql_help.c:1915 msgid "old_version" msgstr "старая_версия" -#: sql_help.c:1986 sql_help.c:2498 +#: sql_help.c:1988 sql_help.c:2489 msgid "where column_constraint is:" msgstr "где ограничение_столбца:" -#: sql_help.c:1989 sql_help.c:2021 sql_help.c:2501 +#: sql_help.c:1991 sql_help.c:2023 sql_help.c:2492 msgid "default_expr" msgstr "выражение_по_умолчанию" -#: sql_help.c:1990 sql_help.c:2508 +#: sql_help.c:1992 sql_help.c:2500 msgid "and table_constraint is:" msgstr "и ограничение_таблицы:" -#: sql_help.c:2022 +#: sql_help.c:2024 msgid "rettype" msgstr "тип_возврата" -#: sql_help.c:2024 +#: sql_help.c:2026 msgid "column_type" msgstr "тип_столбца" -#: sql_help.c:2032 +#: sql_help.c:2034 msgid "definition" msgstr "определение" -#: sql_help.c:2033 +#: sql_help.c:2035 msgid "obj_file" msgstr "объектный_файл" -#: sql_help.c:2034 +#: sql_help.c:2036 msgid "link_symbol" msgstr "символ_в_экспорте" -#: sql_help.c:2035 +#: sql_help.c:2037 msgid "attribute" msgstr "атрибут" -#: sql_help.c:2069 sql_help.c:2255 sql_help.c:2788 +#: sql_help.c:2071 sql_help.c:2251 sql_help.c:2783 msgid "uid" msgstr "uid" -#: sql_help.c:2083 +#: sql_help.c:2085 msgid "method" msgstr "метод" -#: sql_help.c:2087 sql_help.c:2469 sql_help.c:2481 sql_help.c:2494 -#: sql_help.c:2537 sql_help.c:3471 +#: sql_help.c:2089 sql_help.c:2460 sql_help.c:2472 sql_help.c:2485 +#: sql_help.c:2532 sql_help.c:3467 msgid "opclass" msgstr "класс_оператора" -#: sql_help.c:2091 sql_help.c:2519 +#: sql_help.c:2093 sql_help.c:2511 msgid "predicate" msgstr "предикат" -#: sql_help.c:2103 +#: sql_help.c:2105 msgid "call_handler" msgstr "обработчик_вызова" -#: sql_help.c:2104 +#: sql_help.c:2106 msgid "inline_handler" msgstr "обработчик_внедрённого_кода" -#: sql_help.c:2105 +#: sql_help.c:2107 msgid "valfunction" msgstr "функция_проверки" -#: sql_help.c:2141 +#: sql_help.c:2143 msgid "com_op" msgstr "коммут_оператор" -#: sql_help.c:2142 +#: sql_help.c:2144 msgid "neg_op" msgstr "обратный_оператор" -#: sql_help.c:2160 +#: sql_help.c:2162 msgid "family_name" msgstr "имя_семейства" -#: sql_help.c:2171 +#: sql_help.c:2173 msgid "storage_type" msgstr "тип_хранения" -#: sql_help.c:2272 sql_help.c:2673 sql_help.c:2850 sql_help.c:3481 -#: sql_help.c:3900 sql_help.c:3902 sql_help.c:3990 sql_help.c:3992 -#: sql_help.c:4139 sql_help.c:4141 sql_help.c:4244 sql_help.c:4333 -#: sql_help.c:4335 +#: sql_help.c:2268 sql_help.c:2668 sql_help.c:2845 sql_help.c:3477 +#: sql_help.c:3896 sql_help.c:3898 sql_help.c:3986 sql_help.c:3988 +#: sql_help.c:4135 sql_help.c:4137 sql_help.c:4240 sql_help.c:4329 +#: sql_help.c:4331 msgid "condition" msgstr "условие" -#: sql_help.c:2276 sql_help.c:2676 +#: sql_help.c:2272 sql_help.c:2671 msgid "where event can be one of:" msgstr "где допустимое событие:" -#: sql_help.c:2295 sql_help.c:2297 +#: sql_help.c:2291 sql_help.c:2293 msgid "schema_element" msgstr "элемент_схемы" -#: sql_help.c:2334 +#: sql_help.c:2330 msgid "server_type" msgstr "тип_сервера" -#: sql_help.c:2335 +#: sql_help.c:2331 msgid "server_version" msgstr "версия_сервера" -#: sql_help.c:2336 sql_help.c:3391 sql_help.c:3723 +#: sql_help.c:2332 sql_help.c:3386 sql_help.c:3719 msgid "fdw_name" msgstr "имя_обёртки_сторонних_данных" -#: sql_help.c:2348 +#: sql_help.c:2345 msgid "statistics_name" msgstr "имя_статистики" -#: sql_help.c:2370 +#: sql_help.c:2346 +msgid "statistics_kind" +msgstr "вид_статистики" + +#: sql_help.c:2360 msgid "subscription_name" msgstr "имя_подписки" -#: sql_help.c:2463 +#: sql_help.c:2454 msgid "source_table" msgstr "исходная_таблица" -#: sql_help.c:2464 +#: sql_help.c:2455 msgid "like_option" msgstr "параметр_порождения" -#: sql_help.c:2502 sql_help.c:2503 sql_help.c:2512 sql_help.c:2514 -#: sql_help.c:2518 +#: sql_help.c:2494 sql_help.c:2495 sql_help.c:2504 sql_help.c:2506 +#: sql_help.c:2510 msgid "index_parameters" msgstr "параметры_индекса" -#: sql_help.c:2504 sql_help.c:2521 +#: sql_help.c:2496 sql_help.c:2513 msgid "reftable" msgstr "целевая_таблица" -#: sql_help.c:2505 sql_help.c:2522 +#: sql_help.c:2497 sql_help.c:2514 msgid "refcolumn" msgstr "целевой_столбец" -#: sql_help.c:2516 +#: sql_help.c:2508 msgid "exclude_element" msgstr "объект_исключения" -#: sql_help.c:2517 sql_help.c:3907 sql_help.c:3997 sql_help.c:4146 -#: sql_help.c:4275 sql_help.c:4340 +#: sql_help.c:2509 sql_help.c:3903 sql_help.c:3993 sql_help.c:4142 +#: sql_help.c:4271 sql_help.c:4336 msgid "operator" msgstr "оператор" -#: sql_help.c:2525 +#: sql_help.c:2517 msgid "and like_option is:" msgstr "и параметр_порождения:" -#: sql_help.c:2526 +#: sql_help.c:2518 msgid "and partition_bound_spec is:" msgstr "и указание_границ_секции:" -#: sql_help.c:2527 sql_help.c:2528 sql_help.c:2529 -msgid "bound_literal" -msgstr "константа_границы" +#: sql_help.c:2519 sql_help.c:2521 sql_help.c:2523 +msgid "numeric_literal" +msgstr "числовая_константа" -#: sql_help.c:2530 +#: sql_help.c:2520 sql_help.c:2522 sql_help.c:2524 +msgid "string_literal" +msgstr "строковая_константа" + +#: sql_help.c:2525 msgid "index_parameters in UNIQUE, PRIMARY KEY, and EXCLUDE constraints are:" msgstr "параметры_индекса в ограничениях UNIQUE, PRIMARY KEY и EXCLUDE:" -#: sql_help.c:2534 +#: sql_help.c:2529 msgid "exclude_element in an EXCLUDE constraint is:" msgstr "объект_исключения в ограничении EXCLUDE:" -#: sql_help.c:2569 +#: sql_help.c:2564 msgid "directory" msgstr "каталог" -#: sql_help.c:2583 +#: sql_help.c:2578 msgid "parser_name" msgstr "имя_анализатора" -#: sql_help.c:2584 +#: sql_help.c:2579 msgid "source_config" msgstr "исходная_конфигурация" -#: sql_help.c:2613 +#: sql_help.c:2608 msgid "start_function" msgstr "функция_начала" -#: sql_help.c:2614 +#: sql_help.c:2609 msgid "gettoken_function" msgstr "функция_выдачи_фрагмента" -#: sql_help.c:2615 +#: sql_help.c:2610 msgid "end_function" msgstr "функция_окончания" -#: sql_help.c:2616 +#: sql_help.c:2611 msgid "lextypes_function" msgstr "функция_лекс_типов" -#: sql_help.c:2617 +#: sql_help.c:2612 msgid "headline_function" msgstr "функция_создания_выдержек" -#: sql_help.c:2629 +#: sql_help.c:2624 msgid "init_function" msgstr "функция_инициализации" -#: sql_help.c:2630 +#: sql_help.c:2625 msgid "lexize_function" msgstr "функция_выделения_лексем" -#: sql_help.c:2643 +#: sql_help.c:2638 msgid "from_sql_function_name" msgstr "имя_функции_из_sql" -#: sql_help.c:2645 +#: sql_help.c:2640 msgid "to_sql_function_name" msgstr "имя_функции_в_sql" -#: sql_help.c:2671 +#: sql_help.c:2666 msgid "referenced_table_name" msgstr "ссылающаяся_таблица" -#: sql_help.c:2672 +#: sql_help.c:2667 msgid "transition_relation_name" msgstr "имя_переходного_отношения" -#: sql_help.c:2675 +#: sql_help.c:2670 msgid "arguments" msgstr "аргументы" -#: sql_help.c:2725 sql_help.c:3835 +#: sql_help.c:2720 sql_help.c:3831 msgid "label" msgstr "метка" -#: sql_help.c:2727 +#: sql_help.c:2722 msgid "subtype" msgstr "подтип" -#: sql_help.c:2728 +#: sql_help.c:2723 msgid "subtype_operator_class" msgstr "класс_оператора_подтипа" -#: sql_help.c:2730 +#: sql_help.c:2725 msgid "canonical_function" msgstr "каноническая_функция" -#: sql_help.c:2731 +#: sql_help.c:2726 msgid "subtype_diff_function" msgstr "функция_различий_подтипа" -#: sql_help.c:2733 +#: sql_help.c:2728 msgid "input_function" msgstr "функция_ввода" -#: sql_help.c:2734 +#: sql_help.c:2729 msgid "output_function" msgstr "функция_вывода" -#: sql_help.c:2735 +#: sql_help.c:2730 msgid "receive_function" msgstr "функция_получения" -#: sql_help.c:2736 +#: sql_help.c:2731 msgid "send_function" msgstr "функция_отправки" -#: sql_help.c:2737 +#: sql_help.c:2732 msgid "type_modifier_input_function" msgstr "функция_ввода_модификатора_типа" -#: sql_help.c:2738 +#: sql_help.c:2733 msgid "type_modifier_output_function" msgstr "функция_вывода_модификатора_типа" -#: sql_help.c:2739 +#: sql_help.c:2734 msgid "analyze_function" msgstr "функция_анализа" -#: sql_help.c:2740 +#: sql_help.c:2735 msgid "internallength" msgstr "внутр_длина" -#: sql_help.c:2741 +#: sql_help.c:2736 msgid "alignment" msgstr "выравнивание" -#: sql_help.c:2742 +#: sql_help.c:2737 msgid "storage" msgstr "хранение" -#: sql_help.c:2743 +#: sql_help.c:2738 msgid "like_type" msgstr "тип_образец" -#: sql_help.c:2744 +#: sql_help.c:2739 msgid "category" msgstr "категория" -#: sql_help.c:2745 +#: sql_help.c:2740 msgid "preferred" msgstr "предпочитаемый" -#: sql_help.c:2746 +#: sql_help.c:2741 msgid "default" msgstr "по_умолчанию" -#: sql_help.c:2747 +#: sql_help.c:2742 msgid "element" msgstr "элемент" -#: sql_help.c:2748 +#: sql_help.c:2743 msgid "delimiter" msgstr "разделитель" -#: sql_help.c:2749 +#: sql_help.c:2744 msgid "collatable" msgstr "сортируемый" -#: sql_help.c:2846 sql_help.c:3457 sql_help.c:3895 sql_help.c:3984 -#: sql_help.c:4134 sql_help.c:4234 sql_help.c:4328 +#: sql_help.c:2841 sql_help.c:3453 sql_help.c:3891 sql_help.c:3980 +#: sql_help.c:4130 sql_help.c:4230 sql_help.c:4324 msgid "with_query" msgstr "запрос_WITH" -#: sql_help.c:2848 sql_help.c:3459 sql_help.c:3914 sql_help.c:3920 -#: sql_help.c:3923 sql_help.c:3927 sql_help.c:3931 sql_help.c:3939 -#: sql_help.c:4153 sql_help.c:4159 sql_help.c:4162 sql_help.c:4166 -#: sql_help.c:4170 sql_help.c:4178 sql_help.c:4236 sql_help.c:4347 -#: sql_help.c:4353 sql_help.c:4356 sql_help.c:4360 sql_help.c:4364 -#: sql_help.c:4372 +#: sql_help.c:2843 sql_help.c:3455 sql_help.c:3910 sql_help.c:3916 +#: sql_help.c:3919 sql_help.c:3923 sql_help.c:3927 sql_help.c:3935 +#: sql_help.c:4149 sql_help.c:4155 sql_help.c:4158 sql_help.c:4162 +#: sql_help.c:4166 sql_help.c:4174 sql_help.c:4232 sql_help.c:4343 +#: sql_help.c:4349 sql_help.c:4352 sql_help.c:4356 sql_help.c:4360 +#: sql_help.c:4368 msgid "alias" msgstr "псевдоним" -#: sql_help.c:2849 +#: sql_help.c:2844 msgid "using_list" msgstr "список_USING" -#: sql_help.c:2851 sql_help.c:3298 sql_help.c:3538 sql_help.c:4245 +#: sql_help.c:2846 sql_help.c:3293 sql_help.c:3534 sql_help.c:4241 msgid "cursor_name" msgstr "имя_курсора" -#: sql_help.c:2852 sql_help.c:3465 sql_help.c:4246 +#: sql_help.c:2847 sql_help.c:3461 sql_help.c:4242 msgid "output_expression" msgstr "выражение_результата" -#: sql_help.c:2853 sql_help.c:3466 sql_help.c:3898 sql_help.c:3987 -#: sql_help.c:4137 sql_help.c:4247 sql_help.c:4331 +#: sql_help.c:2848 sql_help.c:3462 sql_help.c:3894 sql_help.c:3983 +#: sql_help.c:4133 sql_help.c:4243 sql_help.c:4327 msgid "output_name" msgstr "имя_результата" -#: sql_help.c:2869 +#: sql_help.c:2864 msgid "code" msgstr "внедрённый_код" -#: sql_help.c:3244 +#: sql_help.c:3239 msgid "parameter" msgstr "параметр" -#: sql_help.c:3264 sql_help.c:3265 sql_help.c:3563 +#: sql_help.c:3259 sql_help.c:3260 sql_help.c:3559 msgid "statement" msgstr "оператор" -#: sql_help.c:3297 sql_help.c:3537 +#: sql_help.c:3292 sql_help.c:3533 msgid "direction" msgstr "направление" -#: sql_help.c:3299 sql_help.c:3539 +#: sql_help.c:3294 sql_help.c:3535 msgid "where direction can be empty or one of:" msgstr "где допустимое направление пустое или:" -#: sql_help.c:3300 sql_help.c:3301 sql_help.c:3302 sql_help.c:3303 -#: sql_help.c:3304 sql_help.c:3540 sql_help.c:3541 sql_help.c:3542 -#: sql_help.c:3543 sql_help.c:3544 sql_help.c:3908 sql_help.c:3910 -#: sql_help.c:3998 sql_help.c:4000 sql_help.c:4147 sql_help.c:4149 -#: sql_help.c:4276 sql_help.c:4278 sql_help.c:4341 sql_help.c:4343 +#: sql_help.c:3295 sql_help.c:3296 sql_help.c:3297 sql_help.c:3298 +#: sql_help.c:3299 sql_help.c:3536 sql_help.c:3537 sql_help.c:3538 +#: sql_help.c:3539 sql_help.c:3540 sql_help.c:3904 sql_help.c:3906 +#: sql_help.c:3994 sql_help.c:3996 sql_help.c:4143 sql_help.c:4145 +#: sql_help.c:4272 sql_help.c:4274 sql_help.c:4337 sql_help.c:4339 msgid "count" msgstr "число" -#: sql_help.c:3384 sql_help.c:3716 +#: sql_help.c:3379 sql_help.c:3712 msgid "sequence_name" msgstr "имя_последовательности" -#: sql_help.c:3397 sql_help.c:3729 +#: sql_help.c:3392 sql_help.c:3725 msgid "arg_name" msgstr "имя_аргумента" -#: sql_help.c:3398 sql_help.c:3730 +#: sql_help.c:3393 sql_help.c:3726 msgid "arg_type" msgstr "тип_аргумента" -#: sql_help.c:3403 sql_help.c:3735 +#: sql_help.c:3398 sql_help.c:3731 msgid "loid" msgstr "код_БО" -#: sql_help.c:3426 +#: sql_help.c:3421 msgid "remote_schema" msgstr "удалённая_схема" -#: sql_help.c:3429 +#: sql_help.c:3424 msgid "local_schema" msgstr "локальная_схема" -#: sql_help.c:3463 +#: sql_help.c:3459 msgid "conflict_target" msgstr "объект_конфликта" -#: sql_help.c:3464 +#: sql_help.c:3460 msgid "conflict_action" msgstr "действие_при_конфликте" -#: sql_help.c:3467 +#: sql_help.c:3463 msgid "where conflict_target can be one of:" msgstr "где допустимый объект_конфликта:" -#: sql_help.c:3468 +#: sql_help.c:3464 msgid "index_column_name" msgstr "имя_столбца_индекса" -#: sql_help.c:3469 +#: sql_help.c:3465 msgid "index_expression" msgstr "выражение_индекса" -#: sql_help.c:3472 +#: sql_help.c:3468 msgid "index_predicate" msgstr "предикат_индекса" -#: sql_help.c:3474 +#: sql_help.c:3470 msgid "and conflict_action is one of:" msgstr "а допустимое действие_при_конфликте:" -#: sql_help.c:3480 sql_help.c:4242 +#: sql_help.c:3476 sql_help.c:4238 msgid "sub-SELECT" msgstr "вложенный_SELECT" -#: sql_help.c:3489 sql_help.c:3552 sql_help.c:4218 +#: sql_help.c:3485 sql_help.c:3548 sql_help.c:4214 msgid "channel" msgstr "канал" -#: sql_help.c:3511 +#: sql_help.c:3507 msgid "lockmode" msgstr "режим_блокировки" -#: sql_help.c:3512 +#: sql_help.c:3508 msgid "where lockmode is one of:" msgstr "где допустимый режим_блокировки:" -#: sql_help.c:3553 +#: sql_help.c:3549 msgid "payload" msgstr "сообщение_нагрузка" -#: sql_help.c:3580 +#: sql_help.c:3576 msgid "old_role" msgstr "старая_роль" -#: sql_help.c:3581 +#: sql_help.c:3577 msgid "new_role" msgstr "новая_роль" -#: sql_help.c:3606 sql_help.c:3767 sql_help.c:3775 +#: sql_help.c:3602 sql_help.c:3763 sql_help.c:3771 msgid "savepoint_name" msgstr "имя_точки_сохранения" -#: sql_help.c:3899 sql_help.c:3941 sql_help.c:3943 sql_help.c:3989 -#: sql_help.c:4138 sql_help.c:4180 sql_help.c:4182 sql_help.c:4332 -#: sql_help.c:4374 sql_help.c:4376 +#: sql_help.c:3895 sql_help.c:3937 sql_help.c:3939 sql_help.c:3985 +#: sql_help.c:4134 sql_help.c:4176 sql_help.c:4178 sql_help.c:4328 +#: sql_help.c:4370 sql_help.c:4372 msgid "from_item" msgstr "источник_данных" -#: sql_help.c:3901 sql_help.c:3953 sql_help.c:4140 sql_help.c:4192 -#: sql_help.c:4334 sql_help.c:4386 +#: sql_help.c:3897 sql_help.c:3949 sql_help.c:4136 sql_help.c:4188 +#: sql_help.c:4330 sql_help.c:4382 msgid "grouping_element" msgstr "элемент_группирования" -#: sql_help.c:3903 sql_help.c:3993 sql_help.c:4142 sql_help.c:4336 +#: sql_help.c:3899 sql_help.c:3989 sql_help.c:4138 sql_help.c:4332 msgid "window_name" msgstr "имя_окна" -#: sql_help.c:3904 sql_help.c:3994 sql_help.c:4143 sql_help.c:4337 +#: sql_help.c:3900 sql_help.c:3990 sql_help.c:4139 sql_help.c:4333 msgid "window_definition" msgstr "определение_окна" -#: sql_help.c:3905 sql_help.c:3919 sql_help.c:3957 sql_help.c:3995 -#: sql_help.c:4144 sql_help.c:4158 sql_help.c:4196 sql_help.c:4338 -#: sql_help.c:4352 sql_help.c:4390 +#: sql_help.c:3901 sql_help.c:3915 sql_help.c:3953 sql_help.c:3991 +#: sql_help.c:4140 sql_help.c:4154 sql_help.c:4192 sql_help.c:4334 +#: sql_help.c:4348 sql_help.c:4386 msgid "select" msgstr "select" -#: sql_help.c:3912 sql_help.c:4151 sql_help.c:4345 +#: sql_help.c:3908 sql_help.c:4147 sql_help.c:4341 msgid "where from_item can be one of:" msgstr "где допустимый источник_данных:" -#: sql_help.c:3915 sql_help.c:3921 sql_help.c:3924 sql_help.c:3928 -#: sql_help.c:3940 sql_help.c:4154 sql_help.c:4160 sql_help.c:4163 -#: sql_help.c:4167 sql_help.c:4179 sql_help.c:4348 sql_help.c:4354 -#: sql_help.c:4357 sql_help.c:4361 sql_help.c:4373 +#: sql_help.c:3911 sql_help.c:3917 sql_help.c:3920 sql_help.c:3924 +#: sql_help.c:3936 sql_help.c:4150 sql_help.c:4156 sql_help.c:4159 +#: sql_help.c:4163 sql_help.c:4175 sql_help.c:4344 sql_help.c:4350 +#: sql_help.c:4353 sql_help.c:4357 sql_help.c:4369 msgid "column_alias" msgstr "псевдоним_столбца" -#: sql_help.c:3916 sql_help.c:4155 sql_help.c:4349 +#: sql_help.c:3912 sql_help.c:4151 sql_help.c:4345 msgid "sampling_method" msgstr "метод_выборки" -#: sql_help.c:3917 sql_help.c:3926 sql_help.c:3930 sql_help.c:3934 -#: sql_help.c:3937 sql_help.c:4156 sql_help.c:4165 sql_help.c:4169 -#: sql_help.c:4173 sql_help.c:4176 sql_help.c:4350 sql_help.c:4359 -#: sql_help.c:4363 sql_help.c:4367 sql_help.c:4370 +#: sql_help.c:3913 sql_help.c:3922 sql_help.c:3926 sql_help.c:3930 +#: sql_help.c:3933 sql_help.c:4152 sql_help.c:4161 sql_help.c:4165 +#: sql_help.c:4169 sql_help.c:4172 sql_help.c:4346 sql_help.c:4355 +#: sql_help.c:4359 sql_help.c:4363 sql_help.c:4366 msgid "argument" msgstr "аргумент" -#: sql_help.c:3918 sql_help.c:4157 sql_help.c:4351 +#: sql_help.c:3914 sql_help.c:4153 sql_help.c:4347 msgid "seed" msgstr "начальное_число" -#: sql_help.c:3922 sql_help.c:3955 sql_help.c:4161 sql_help.c:4194 -#: sql_help.c:4355 sql_help.c:4388 +#: sql_help.c:3918 sql_help.c:3951 sql_help.c:4157 sql_help.c:4190 +#: sql_help.c:4351 sql_help.c:4384 msgid "with_query_name" msgstr "имя_запроса_WITH" -#: sql_help.c:3932 sql_help.c:3935 sql_help.c:3938 sql_help.c:4171 -#: sql_help.c:4174 sql_help.c:4177 sql_help.c:4365 sql_help.c:4368 -#: sql_help.c:4371 +#: sql_help.c:3928 sql_help.c:3931 sql_help.c:3934 sql_help.c:4167 +#: sql_help.c:4170 sql_help.c:4173 sql_help.c:4361 sql_help.c:4364 +#: sql_help.c:4367 msgid "column_definition" msgstr "определение_столбца" -#: sql_help.c:3942 sql_help.c:4181 sql_help.c:4375 +#: sql_help.c:3938 sql_help.c:4177 sql_help.c:4371 msgid "join_type" msgstr "тип_соединения" -#: sql_help.c:3944 sql_help.c:4183 sql_help.c:4377 +#: sql_help.c:3940 sql_help.c:4179 sql_help.c:4373 msgid "join_condition" msgstr "условие_соединения" -#: sql_help.c:3945 sql_help.c:4184 sql_help.c:4378 +#: sql_help.c:3941 sql_help.c:4180 sql_help.c:4374 msgid "join_column" msgstr "столбец_соединения" -#: sql_help.c:3946 sql_help.c:4185 sql_help.c:4379 +#: sql_help.c:3942 sql_help.c:4181 sql_help.c:4375 msgid "and grouping_element can be one of:" msgstr "где допустимый элемент_группирования:" -#: sql_help.c:3954 sql_help.c:4193 sql_help.c:4387 +#: sql_help.c:3950 sql_help.c:4189 sql_help.c:4383 msgid "and with_query is:" msgstr "и запрос_WITH:" -#: sql_help.c:3958 sql_help.c:4197 sql_help.c:4391 +#: sql_help.c:3954 sql_help.c:4193 sql_help.c:4387 msgid "values" msgstr "значения" -#: sql_help.c:3959 sql_help.c:4198 sql_help.c:4392 +#: sql_help.c:3955 sql_help.c:4194 sql_help.c:4388 msgid "insert" msgstr "insert" -#: sql_help.c:3960 sql_help.c:4199 sql_help.c:4393 +#: sql_help.c:3956 sql_help.c:4195 sql_help.c:4389 msgid "update" msgstr "update" -#: sql_help.c:3961 sql_help.c:4200 sql_help.c:4394 +#: sql_help.c:3957 sql_help.c:4196 sql_help.c:4390 msgid "delete" msgstr "delete" -#: sql_help.c:3988 +#: sql_help.c:3984 msgid "new_table" msgstr "новая_таблица" -#: sql_help.c:4013 +#: sql_help.c:4009 msgid "timezone" msgstr "часовой_пояс" -#: sql_help.c:4058 +#: sql_help.c:4054 msgid "snapshot_id" msgstr "код_снимка" -#: sql_help.c:4243 +#: sql_help.c:4239 msgid "from_list" msgstr "список_FROM" -#: sql_help.c:4274 +#: sql_help.c:4270 msgid "sort_expression" msgstr "выражение_сортировки" -#: sql_help.c:4401 sql_help.c:5186 +#: sql_help.c:4397 sql_help.c:5182 msgid "abort the current transaction" msgstr "прервать текущую транзакцию" -#: sql_help.c:4406 +#: sql_help.c:4402 msgid "change the definition of an aggregate function" msgstr "изменить определение агрегатной функции" -#: sql_help.c:4411 +#: sql_help.c:4407 msgid "change the definition of a collation" msgstr "изменить определение правила сортировки" -#: sql_help.c:4416 +#: sql_help.c:4412 msgid "change the definition of a conversion" msgstr "изменить определение преобразования" -#: sql_help.c:4421 +#: sql_help.c:4417 msgid "change a database" msgstr "изменить атрибуты базы данных" -#: sql_help.c:4426 +#: sql_help.c:4422 msgid "define default access privileges" msgstr "определить права доступа по умолчанию" -#: sql_help.c:4431 +#: sql_help.c:4427 msgid "change the definition of a domain" msgstr "изменить определение домена" -#: sql_help.c:4436 +#: sql_help.c:4432 msgid "change the definition of an event trigger" msgstr "изменить определение событийного триггера" -#: sql_help.c:4441 +#: sql_help.c:4437 msgid "change the definition of an extension" msgstr "изменить определение расширения" -#: sql_help.c:4446 +#: sql_help.c:4442 msgid "change the definition of a foreign-data wrapper" msgstr "изменить определение обёртки сторонних данных" -#: sql_help.c:4451 +#: sql_help.c:4447 msgid "change the definition of a foreign table" msgstr "изменить определение сторонней таблицы" -#: sql_help.c:4456 +#: sql_help.c:4452 msgid "change the definition of a function" msgstr "изменить определение функции" -#: sql_help.c:4461 +#: sql_help.c:4457 msgid "change role name or membership" msgstr "изменить имя роли или членство" -#: sql_help.c:4466 +#: sql_help.c:4462 msgid "change the definition of an index" msgstr "изменить определение индекса" -#: sql_help.c:4471 +#: sql_help.c:4467 msgid "change the definition of a procedural language" msgstr "изменить определение процедурного языка" -#: sql_help.c:4476 +#: sql_help.c:4472 msgid "change the definition of a large object" msgstr "изменить определение большого объекта" -#: sql_help.c:4481 +#: sql_help.c:4477 msgid "change the definition of a materialized view" msgstr "изменить определение материализованного представления" -#: sql_help.c:4486 +#: sql_help.c:4482 msgid "change the definition of an operator" msgstr "изменить определение оператора" -#: sql_help.c:4491 +#: sql_help.c:4487 msgid "change the definition of an operator class" msgstr "изменить определение класса операторов" -#: sql_help.c:4496 +#: sql_help.c:4492 msgid "change the definition of an operator family" msgstr "изменить определение семейства операторов" -#: sql_help.c:4501 +#: sql_help.c:4497 msgid "change the definition of a row level security policy" msgstr "изменить определение политики безопасности на уровне строк" -#: sql_help.c:4506 +#: sql_help.c:4502 msgid "change the definition of a publication" msgstr "изменить определение публикации" -#: sql_help.c:4511 sql_help.c:4591 +#: sql_help.c:4507 sql_help.c:4587 msgid "change a database role" msgstr "изменить роль пользователя БД" -#: sql_help.c:4516 +#: sql_help.c:4512 msgid "change the definition of a rule" msgstr "изменить определение правила" -#: sql_help.c:4521 +#: sql_help.c:4517 msgid "change the definition of a schema" msgstr "изменить определение схемы" -#: sql_help.c:4526 +#: sql_help.c:4522 msgid "change the definition of a sequence generator" msgstr "изменить определение генератора последовательности" -#: sql_help.c:4531 +#: sql_help.c:4527 msgid "change the definition of a foreign server" msgstr "изменить определение стороннего сервера" -#: sql_help.c:4536 -msgid "change the definition of a extended statistics" -msgstr "изменить определение расширенной статистики" +#: sql_help.c:4532 +msgid "change the definition of an extended statistics object" +msgstr "изменить определение объекта расширенной статистики" -#: sql_help.c:4541 +#: sql_help.c:4537 msgid "change the definition of a subscription" msgstr "изменить определение подписки" -#: sql_help.c:4546 +#: sql_help.c:4542 msgid "change a server configuration parameter" msgstr "изменить параметр конфигурации сервера" -#: sql_help.c:4551 +#: sql_help.c:4547 msgid "change the definition of a table" msgstr "изменить определение таблицы" -#: sql_help.c:4556 +#: sql_help.c:4552 msgid "change the definition of a tablespace" msgstr "изменить определение табличного пространства" -#: sql_help.c:4561 +#: sql_help.c:4557 msgid "change the definition of a text search configuration" msgstr "изменить определение конфигурации текстового поиска" -#: sql_help.c:4566 +#: sql_help.c:4562 msgid "change the definition of a text search dictionary" msgstr "изменить определение словаря текстового поиска" -#: sql_help.c:4571 +#: sql_help.c:4567 msgid "change the definition of a text search parser" msgstr "изменить определение анализатора текстового поиска" -#: sql_help.c:4576 +#: sql_help.c:4572 msgid "change the definition of a text search template" msgstr "изменить определение шаблона текстового поиска" -#: sql_help.c:4581 +#: sql_help.c:4577 msgid "change the definition of a trigger" msgstr "изменить определение триггера" -#: sql_help.c:4586 +#: sql_help.c:4582 msgid "change the definition of a type" msgstr "изменить определение типа" -#: sql_help.c:4596 +#: sql_help.c:4592 msgid "change the definition of a user mapping" msgstr "изменить сопоставление пользователей" -#: sql_help.c:4601 +#: sql_help.c:4597 msgid "change the definition of a view" msgstr "изменить определение представления" -#: sql_help.c:4606 +#: sql_help.c:4602 msgid "collect statistics about a database" msgstr "собрать статистику о базе данных" -#: sql_help.c:4611 sql_help.c:5251 +#: sql_help.c:4607 sql_help.c:5247 msgid "start a transaction block" msgstr "начать транзакцию" -#: sql_help.c:4616 -msgid "force a transaction log checkpoint" -msgstr "отметить контрольную точку в журнале транзакций" +#: sql_help.c:4612 +msgid "force a write-ahead log checkpoint" +msgstr "произвести контрольную точку в журнале предзаписи" -#: sql_help.c:4621 +#: sql_help.c:4617 msgid "close a cursor" msgstr "закрыть курсор" -#: sql_help.c:4626 +#: sql_help.c:4622 msgid "cluster a table according to an index" msgstr "перегруппировать таблицу по индексу" -#: sql_help.c:4631 +#: sql_help.c:4627 msgid "define or change the comment of an object" msgstr "задать или изменить комментарий объекта" -#: sql_help.c:4636 sql_help.c:5086 +#: sql_help.c:4632 sql_help.c:5082 msgid "commit the current transaction" msgstr "зафиксировать текущую транзакцию" -#: sql_help.c:4641 +#: sql_help.c:4637 msgid "commit a transaction that was earlier prepared for two-phase commit" msgstr "зафиксировать транзакцию, ранее подготовленную для двухфазной фиксации" -#: sql_help.c:4646 +#: sql_help.c:4642 msgid "copy data between a file and a table" msgstr "импорт/экспорт данных в файл" -#: sql_help.c:4651 +#: sql_help.c:4647 msgid "define a new access method" msgstr "создать новый метод доступа" -#: sql_help.c:4656 +#: sql_help.c:4652 msgid "define a new aggregate function" msgstr "создать агрегатную функцию" -#: sql_help.c:4661 +#: sql_help.c:4657 msgid "define a new cast" msgstr "создать приведение типов" -#: sql_help.c:4666 +#: sql_help.c:4662 msgid "define a new collation" msgstr "создать правило сортировки" -#: sql_help.c:4671 +#: sql_help.c:4667 msgid "define a new encoding conversion" msgstr "создать преобразование кодировки" -#: sql_help.c:4676 +#: sql_help.c:4672 msgid "create a new database" msgstr "создать базу данных" -#: sql_help.c:4681 +#: sql_help.c:4677 msgid "define a new domain" msgstr "создать домен" -#: sql_help.c:4686 +#: sql_help.c:4682 msgid "define a new event trigger" msgstr "создать событийный триггер" -#: sql_help.c:4691 +#: sql_help.c:4687 msgid "install an extension" msgstr "установить расширение" -#: sql_help.c:4696 +#: sql_help.c:4692 msgid "define a new foreign-data wrapper" msgstr "создать обёртку сторонних данных" -#: sql_help.c:4701 +#: sql_help.c:4697 msgid "define a new foreign table" msgstr "создать стороннюю таблицу" -#: sql_help.c:4706 +#: sql_help.c:4702 msgid "define a new function" msgstr "создать функцию" -#: sql_help.c:4711 sql_help.c:4756 sql_help.c:4841 +#: sql_help.c:4707 sql_help.c:4752 sql_help.c:4837 msgid "define a new database role" msgstr "создать роль пользователя БД" -#: sql_help.c:4716 +#: sql_help.c:4712 msgid "define a new index" msgstr "создать индекс" -#: sql_help.c:4721 +#: sql_help.c:4717 msgid "define a new procedural language" msgstr "создать процедурный язык" -#: sql_help.c:4726 +#: sql_help.c:4722 msgid "define a new materialized view" msgstr "создать материализованное представление" -#: sql_help.c:4731 +#: sql_help.c:4727 msgid "define a new operator" msgstr "создать оператор" -#: sql_help.c:4736 +#: sql_help.c:4732 msgid "define a new operator class" msgstr "создать класс операторов" -#: sql_help.c:4741 +#: sql_help.c:4737 msgid "define a new operator family" msgstr "создать семейство операторов" -#: sql_help.c:4746 +#: sql_help.c:4742 msgid "define a new row level security policy for a table" msgstr "создать новую политику безопасности на уровне строк для таблицы" -#: sql_help.c:4751 +#: sql_help.c:4747 msgid "define a new publication" msgstr "создать публикацию" -#: sql_help.c:4761 +#: sql_help.c:4757 msgid "define a new rewrite rule" msgstr "создать правило перезаписи" -#: sql_help.c:4766 +#: sql_help.c:4762 msgid "define a new schema" msgstr "создать схему" -#: sql_help.c:4771 +#: sql_help.c:4767 msgid "define a new sequence generator" msgstr "создать генератор последовательностей" -#: sql_help.c:4776 +#: sql_help.c:4772 msgid "define a new foreign server" msgstr "создать сторонний сервер" -#: sql_help.c:4781 +#: sql_help.c:4777 msgid "define extended statistics" msgstr "создать расширенную статистику" -#: sql_help.c:4786 +#: sql_help.c:4782 msgid "define a new subscription" msgstr "создать подписку" -#: sql_help.c:4791 +#: sql_help.c:4787 msgid "define a new table" msgstr "создать таблицу" -#: sql_help.c:4796 sql_help.c:5216 +#: sql_help.c:4792 sql_help.c:5212 msgid "define a new table from the results of a query" msgstr "создать таблицу из результатов запроса" -#: sql_help.c:4801 +#: sql_help.c:4797 msgid "define a new tablespace" msgstr "создать табличное пространство" -#: sql_help.c:4806 +#: sql_help.c:4802 msgid "define a new text search configuration" msgstr "создать конфигурацию текстового поиска" -#: sql_help.c:4811 +#: sql_help.c:4807 msgid "define a new text search dictionary" msgstr "создать словарь текстового поиска" -#: sql_help.c:4816 +#: sql_help.c:4812 msgid "define a new text search parser" msgstr "создать анализатор текстового поиска" -#: sql_help.c:4821 +#: sql_help.c:4817 msgid "define a new text search template" msgstr "создать шаблон текстового поиска" -#: sql_help.c:4826 +#: sql_help.c:4822 msgid "define a new transform" msgstr "создать преобразование" -#: sql_help.c:4831 +#: sql_help.c:4827 msgid "define a new trigger" msgstr "создать триггер" -#: sql_help.c:4836 +#: sql_help.c:4832 msgid "define a new data type" msgstr "создать тип данных" -#: sql_help.c:4846 +#: sql_help.c:4842 msgid "define a new mapping of a user to a foreign server" msgstr "создать сопоставление пользователя для стороннего сервера" -#: sql_help.c:4851 +#: sql_help.c:4847 msgid "define a new view" msgstr "создать представление" -#: sql_help.c:4856 +#: sql_help.c:4852 msgid "deallocate a prepared statement" msgstr "освободить подготовленный оператор" -#: sql_help.c:4861 +#: sql_help.c:4857 msgid "define a cursor" msgstr "создать курсор" -#: sql_help.c:4866 +#: sql_help.c:4862 msgid "delete rows of a table" msgstr "удалить записи таблицы" -#: sql_help.c:4871 +#: sql_help.c:4867 msgid "discard session state" msgstr "очистить состояние сеанса" -#: sql_help.c:4876 +#: sql_help.c:4872 msgid "execute an anonymous code block" msgstr "выполнить анонимный блок кода" -#: sql_help.c:4881 +#: sql_help.c:4877 msgid "remove an access method" msgstr "удалить метод доступа" -#: sql_help.c:4886 +#: sql_help.c:4882 msgid "remove an aggregate function" msgstr "удалить агрегатную функцию" -#: sql_help.c:4891 +#: sql_help.c:4887 msgid "remove a cast" msgstr "удалить приведение типа" -#: sql_help.c:4896 +#: sql_help.c:4892 msgid "remove a collation" msgstr "удалить правило сортировки" -#: sql_help.c:4901 +#: sql_help.c:4897 msgid "remove a conversion" msgstr "удалить преобразование" -#: sql_help.c:4906 +#: sql_help.c:4902 msgid "remove a database" msgstr "удалить базу данных" -#: sql_help.c:4911 +#: sql_help.c:4907 msgid "remove a domain" msgstr "удалить домен" -#: sql_help.c:4916 +#: sql_help.c:4912 msgid "remove an event trigger" msgstr "удалить событийный триггер" -#: sql_help.c:4921 +#: sql_help.c:4917 msgid "remove an extension" msgstr "удалить расширение" -#: sql_help.c:4926 +#: sql_help.c:4922 msgid "remove a foreign-data wrapper" msgstr "удалить обёртку сторонних данных" -#: sql_help.c:4931 +#: sql_help.c:4927 msgid "remove a foreign table" msgstr "удалить стороннюю таблицу" -#: sql_help.c:4936 +#: sql_help.c:4932 msgid "remove a function" msgstr "удалить функцию" -#: sql_help.c:4941 sql_help.c:4991 sql_help.c:5071 +#: sql_help.c:4937 sql_help.c:4987 sql_help.c:5067 msgid "remove a database role" msgstr "удалить роль пользователя БД" -#: sql_help.c:4946 +#: sql_help.c:4942 msgid "remove an index" msgstr "удалить индекс" -#: sql_help.c:4951 +#: sql_help.c:4947 msgid "remove a procedural language" msgstr "удалить процедурный язык" -#: sql_help.c:4956 +#: sql_help.c:4952 msgid "remove a materialized view" msgstr "удалить материализованное представление" -#: sql_help.c:4961 +#: sql_help.c:4957 msgid "remove an operator" msgstr "удалить оператор" -#: sql_help.c:4966 +#: sql_help.c:4962 msgid "remove an operator class" msgstr "удалить класс операторов" -#: sql_help.c:4971 +#: sql_help.c:4967 msgid "remove an operator family" msgstr "удалить семейство операторов" -#: sql_help.c:4976 +#: sql_help.c:4972 msgid "remove database objects owned by a database role" msgstr "удалить объекты базы данных, принадлежащие роли" -#: sql_help.c:4981 +#: sql_help.c:4977 msgid "remove a row level security policy from a table" msgstr "удалить политику безопасности на уровне строк из таблицы" -#: sql_help.c:4986 +#: sql_help.c:4982 msgid "remove a publication" msgstr "удалить публикацию" -#: sql_help.c:4996 +#: sql_help.c:4992 msgid "remove a rewrite rule" msgstr "удалить правило перезаписи" -#: sql_help.c:5001 +#: sql_help.c:4997 msgid "remove a schema" msgstr "удалить схему" -#: sql_help.c:5006 +#: sql_help.c:5002 msgid "remove a sequence" msgstr "удалить последовательность" -#: sql_help.c:5011 +#: sql_help.c:5007 msgid "remove a foreign server descriptor" msgstr "удалить описание стороннего сервера" -#: sql_help.c:5016 +#: sql_help.c:5012 msgid "remove extended statistics" msgstr "удалить расширенную статистику" -#: sql_help.c:5021 +#: sql_help.c:5017 msgid "remove a subscription" msgstr "удалить подписку" -#: sql_help.c:5026 +#: sql_help.c:5022 msgid "remove a table" msgstr "удалить таблицу" -#: sql_help.c:5031 +#: sql_help.c:5027 msgid "remove a tablespace" msgstr "удалить табличное пространство" -#: sql_help.c:5036 +#: sql_help.c:5032 msgid "remove a text search configuration" msgstr "удалить конфигурацию текстового поиска" -#: sql_help.c:5041 +#: sql_help.c:5037 msgid "remove a text search dictionary" msgstr "удалить словарь текстового поиска" -#: sql_help.c:5046 +#: sql_help.c:5042 msgid "remove a text search parser" msgstr "удалить анализатор текстового поиска" -#: sql_help.c:5051 +#: sql_help.c:5047 msgid "remove a text search template" msgstr "удалить шаблон текстового поиска" -#: sql_help.c:5056 +#: sql_help.c:5052 msgid "remove a transform" msgstr "удалить преобразование" -#: sql_help.c:5061 +#: sql_help.c:5057 msgid "remove a trigger" msgstr "удалить триггер" -#: sql_help.c:5066 +#: sql_help.c:5062 msgid "remove a data type" msgstr "удалить тип данных" -#: sql_help.c:5076 +#: sql_help.c:5072 msgid "remove a user mapping for a foreign server" msgstr "удалить сопоставление пользователя для стороннего сервера" -#: sql_help.c:5081 +#: sql_help.c:5077 msgid "remove a view" msgstr "удалить представление" -#: sql_help.c:5091 +#: sql_help.c:5087 msgid "execute a prepared statement" msgstr "выполнить подготовленный оператор" -#: sql_help.c:5096 +#: sql_help.c:5092 msgid "show the execution plan of a statement" msgstr "показать план выполнения оператора" -#: sql_help.c:5101 +#: sql_help.c:5097 msgid "retrieve rows from a query using a cursor" msgstr "получить результат запроса через курсор" -#: sql_help.c:5106 +#: sql_help.c:5102 msgid "define access privileges" msgstr "определить права доступа" -#: sql_help.c:5111 +#: sql_help.c:5107 msgid "import table definitions from a foreign server" msgstr "импортировать определения таблиц со стороннего сервера" -#: sql_help.c:5116 +#: sql_help.c:5112 msgid "create new rows in a table" msgstr "добавить строки в таблицу" -#: sql_help.c:5121 +#: sql_help.c:5117 msgid "listen for a notification" msgstr "ожидать уведомления" -#: sql_help.c:5126 +#: sql_help.c:5122 msgid "load a shared library file" msgstr "загрузить файл разделяемой библиотеки" -#: sql_help.c:5131 +#: sql_help.c:5127 msgid "lock a table" msgstr "заблокировать таблицу" -#: sql_help.c:5136 +#: sql_help.c:5132 msgid "position a cursor" msgstr "установить курсор" -#: sql_help.c:5141 +#: sql_help.c:5137 msgid "generate a notification" msgstr "сгенерировать уведомление" -#: sql_help.c:5146 +#: sql_help.c:5142 msgid "prepare a statement for execution" msgstr "подготовить оператор для выполнения" -#: sql_help.c:5151 +#: sql_help.c:5147 msgid "prepare the current transaction for two-phase commit" msgstr "подготовить текущую транзакцию для двухфазной фиксации" -#: sql_help.c:5156 +#: sql_help.c:5152 msgid "change the ownership of database objects owned by a database role" msgstr "изменить владельца объектов БД, принадлежащих заданной роли" -#: sql_help.c:5161 +#: sql_help.c:5157 msgid "replace the contents of a materialized view" msgstr "заменить содержимое материализованного представления" -#: sql_help.c:5166 +#: sql_help.c:5162 msgid "rebuild indexes" msgstr "перестроить индексы" -#: sql_help.c:5171 +#: sql_help.c:5167 msgid "destroy a previously defined savepoint" msgstr "удалить ранее определённую точку сохранения" -#: sql_help.c:5176 +#: sql_help.c:5172 msgid "restore the value of a run-time parameter to the default value" msgstr "восстановить исходное значение параметра выполнения" -#: sql_help.c:5181 +#: sql_help.c:5177 msgid "remove access privileges" msgstr "удалить права доступа" -#: sql_help.c:5191 +#: sql_help.c:5187 msgid "cancel a transaction that was earlier prepared for two-phase commit" msgstr "отменить транзакцию, подготовленную ранее для двухфазной фиксации" -#: sql_help.c:5196 +#: sql_help.c:5192 msgid "roll back to a savepoint" msgstr "откатиться к точке сохранения" -#: sql_help.c:5201 +#: sql_help.c:5197 msgid "define a new savepoint within the current transaction" msgstr "определить новую точку сохранения в текущей транзакции" -#: sql_help.c:5206 +#: sql_help.c:5202 msgid "define or change a security label applied to an object" msgstr "задать или изменить метку безопасности, применённую к объекту" -#: sql_help.c:5211 sql_help.c:5256 sql_help.c:5286 +#: sql_help.c:5207 sql_help.c:5252 sql_help.c:5282 msgid "retrieve rows from a table or view" msgstr "выбрать строки из таблицы или представления" -#: sql_help.c:5221 +#: sql_help.c:5217 msgid "change a run-time parameter" msgstr "изменить параметр выполнения" -#: sql_help.c:5226 +#: sql_help.c:5222 msgid "set constraint check timing for the current transaction" msgstr "установить время проверки ограничений для текущей транзакции" -#: sql_help.c:5231 +#: sql_help.c:5227 msgid "set the current user identifier of the current session" msgstr "задать идентификатор текущего пользователя в текущем сеансе" -#: sql_help.c:5236 +#: sql_help.c:5232 msgid "" "set the session user identifier and the current user identifier of the " "current session" @@ -5825,45 +5933,45 @@ msgstr "" "задать идентификатор пользователя сеанса и идентификатор текущего " "пользователя в текущем сеансе" -#: sql_help.c:5241 +#: sql_help.c:5237 msgid "set the characteristics of the current transaction" msgstr "задать свойства текущей транзакции" -#: sql_help.c:5246 +#: sql_help.c:5242 msgid "show the value of a run-time parameter" msgstr "показать значение параметра выполнения" -#: sql_help.c:5261 +#: sql_help.c:5257 msgid "empty a table or set of tables" msgstr "опустошить таблицу или набор таблиц" -#: sql_help.c:5266 +#: sql_help.c:5262 msgid "stop listening for a notification" msgstr "прекратить ожидание уведомлений" -#: sql_help.c:5271 +#: sql_help.c:5267 msgid "update rows of a table" msgstr "изменить строки таблицы" -#: sql_help.c:5276 +#: sql_help.c:5272 msgid "garbage-collect and optionally analyze a database" msgstr "произвести сборку мусора и проанализировать базу данных" -#: sql_help.c:5281 +#: sql_help.c:5277 msgid "compute a set of rows" msgstr "получить набор строк" -#: startup.c:184 +#: startup.c:187 #, c-format msgid "%s: -1 can only be used in non-interactive mode\n" msgstr "%s: -1 можно использовать только в неинтерактивном режиме\n" -#: startup.c:287 +#: startup.c:290 #, c-format msgid "%s: could not open log file \"%s\": %s\n" msgstr "%s: не удалось открыть файл протокола \"%s\": %s\n" -#: startup.c:394 +#: startup.c:397 #, c-format msgid "" "Type \"help\" for help.\n" @@ -5872,27 +5980,27 @@ msgstr "" "Введите \"help\", чтобы получить справку.\n" "\n" -#: startup.c:543 +#: startup.c:546 #, c-format msgid "%s: could not set printing parameter \"%s\"\n" msgstr "%s: не удалось установить параметр печати \"%s\"\n" -#: startup.c:645 +#: startup.c:648 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Для дополнительной информации попробуйте \"%s --help\".\n" -#: startup.c:662 +#: startup.c:665 #, c-format msgid "%s: warning: extra command-line argument \"%s\" ignored\n" msgstr "%s: предупреждение: лишний аргумент \"%s\" проигнорирован\n" -#: startup.c:711 +#: startup.c:714 #, c-format msgid "%s: could not find own program executable\n" msgstr "%s: не удалось найти свой исполняемый файл\n" -#: tab-complete.c:4084 +#: tab-complete.c:4186 #, c-format msgid "" "tab completion query failed: %s\n" @@ -5905,7 +6013,7 @@ msgstr "" #: variables.c:139 #, c-format -msgid "unrecognized value \"%s\" for \"%s\": boolean expected\n" +msgid "unrecognized value \"%s\" for \"%s\": Boolean expected\n" msgstr "" "нераспознанное значение \"%s\" для \"%s\": ожидалось булевское значение\n" @@ -5928,6 +6036,50 @@ msgstr "" "нераспознанное значение \"%s\" для \"%s\"\n" "Допустимые значения: %s.\n" +#~ msgid "Value" +#~ msgstr "Значение" + +#~ msgid "statistic_type" +#~ msgstr "тип_статистики" + +#~ msgid "No per-database role settings support in this server version.\n" +#~ msgstr "" +#~ "Это версия сервера не поддерживает параметры ролей на уровне базы " +#~ "данных.\n" + +#~ msgid "No matching settings found.\n" +#~ msgstr "Соответствующие параметры не найдены.\n" + +#~ msgid "No settings found.\n" +#~ msgstr "Параметры не найдены.\n" + +#~ msgid "No matching relations found.\n" +#~ msgstr "Соответствующие отношения не найдены.\n" + +#~ msgid "No relations found.\n" +#~ msgstr "Отношения не найдены.\n" + +#~ msgid "Object Description" +#~ msgstr "Описание объекта" + +#~ msgid "Password encryption failed.\n" +#~ msgstr "Ошибка при шифровании пароля.\n" + +#~ msgid "suboption" +#~ msgstr "подпараметр" + +#~ msgid "where suboption can be:" +#~ msgstr "где допустимые подпараметры:" + +#~ msgid "slot_name" +#~ msgstr "имя_слота" + +#~ msgid "puboption" +#~ msgstr "параметр_публикации" + +#~ msgid "where puboption can be:" +#~ msgstr "где допустимый параметр_публикации:" + #~ msgid "+ opt(%d) = |%s|\n" #~ msgstr "+ opt(%d) = |%s|\n" diff --git a/src/bin/psql/po/sv.po b/src/bin/psql/po/sv.po index 2f9fac0852..d071a97a1a 100644 --- a/src/bin/psql/po/sv.po +++ b/src/bin/psql/po/sv.po @@ -1,6 +1,6 @@ # Swedish message translation file for psql # Peter Eisentraut , 2001, 2009, 2010. -# Dennis Björklund , 2002, 2003, 2004, 2005, 2006, 2017. +# Dennis Björklund , 2002, 2003, 2004, 2005, 2006, 2017, 2018. # # Use these quotes: "%s" # @@ -8,8 +8,8 @@ msgid "" msgstr "" "Project-Id-Version: PostgreSQL 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-08-02 18:14+0000\n" -"PO-Revision-Date: 2017-08-06 08:09+0200\n" +"POT-Creation-Date: 2018-04-29 20:15+0000\n" +"PO-Revision-Date: 2018-06-25 08:42+0200\n" "Last-Translator: Dennis Björklund \n" "Language-Team: Swedish \n" "Language: sv\n" @@ -54,8 +54,8 @@ msgid "pclose failed: %s" msgstr "pclose misslyckades: %s" #: ../../common/fe_memutils.c:35 ../../common/fe_memutils.c:75 -#: ../../common/fe_memutils.c:98 command.c:608 input.c:227 mainloop.c:82 -#: mainloop.c:276 +#: ../../common/fe_memutils.c:98 command.c:607 input.c:227 mainloop.c:82 +#: mainloop.c:386 #, c-format msgid "out of memory\n" msgstr "slut på minne\n" @@ -70,7 +70,7 @@ msgstr "kan inte duplicera null-pekare (internt fel)\n" msgid "could not look up effective user ID %ld: %s" msgstr "kunde inte slå upp effektivt användar-id %ld: %s" -#: ../../common/username.c:45 command.c:555 +#: ../../common/username.c:45 command.c:554 msgid "user does not exist" msgstr "användaren finns inte" @@ -121,269 +121,270 @@ msgid_plural "(%lu rows)" msgstr[0] "(%lu rad)" msgstr[1] "(%lu rader)" -#: ../../fe_utils/print.c:2913 +#: ../../fe_utils/print.c:2915 #, c-format msgid "Interrupted\n" msgstr "Avbruten\n" -#: ../../fe_utils/print.c:2977 +#: ../../fe_utils/print.c:2979 #, c-format msgid "Cannot add header to table content: column count of %d exceeded.\n" msgstr "Kan inte lägga till rubrik till tabellinnehåll: antal kolumner (%d) överskridet.\n" -#: ../../fe_utils/print.c:3017 +#: ../../fe_utils/print.c:3019 #, c-format msgid "Cannot add cell to table content: total cell count of %d exceeded.\n" msgstr "Kan inte lägga till cell till tabellinnehåll: totala cellantalet (%d) överskridet.\n" -#: ../../fe_utils/print.c:3266 +#: ../../fe_utils/print.c:3268 #, c-format msgid "invalid output format (internal error): %d" msgstr "ogiltigt utdataformat (internt fel): %d" -#: ../../fe_utils/psqlscan.l:713 +#: ../../fe_utils/psqlscan.l:715 #, c-format msgid "skipping recursive expansion of variable \"%s\"\n" msgstr "hoppar över rekursiv expandering av variabeln \"%s\"\n" -#: command.c:223 +#: command.c:220 #, c-format msgid "Invalid command \\%s. Try \\? for help.\n" msgstr "Ogiltigt kommando \\%s. Försök med \\? för hjälp.\n" -#: command.c:225 +#: command.c:222 #, c-format msgid "invalid command \\%s\n" msgstr "ogiltigt kommando \\%s\n" -#: command.c:243 +#: command.c:240 #, c-format msgid "\\%s: extra argument \"%s\" ignored\n" msgstr "\\%s: extra argument \"%s\" ignorerat\n" -#: command.c:295 +#: command.c:292 #, c-format msgid "\\%s command ignored; use \\endif or Ctrl-C to exit current \\if block\n" msgstr "kommandot \\%s ignorerat; använd \\endif eller Ctrl-C för att avsluta nuvarande \\if-block\n" -#: command.c:553 +#: command.c:552 #, c-format msgid "could not get home directory for user ID %ld: %s\n" msgstr "kunde inte hämta hemkatalog för användar-ID %ld: %s\n" -#: command.c:571 +#: command.c:570 #, c-format msgid "\\%s: could not change directory to \"%s\": %s\n" msgstr "\\%s: kunde inte byta katalog till \"%s\": %s\n" -#: command.c:596 common.c:648 common.c:706 common.c:1242 +#: command.c:595 common.c:696 common.c:754 common.c:1292 #, c-format msgid "You are currently not connected to a database.\n" msgstr "Du är för närvarande inte uppkopplad mot en databas.\n" -#: command.c:621 +#: command.c:620 #, c-format msgid "You are connected to database \"%s\" as user \"%s\" via socket in \"%s\" at port \"%s\".\n" msgstr "Du är uppkopplad mot databas \"%s\" som användare \"%s\" via uttag i \"%s\" vid port \"%s\".\n" -#: command.c:624 +#: command.c:623 #, c-format msgid "You are connected to database \"%s\" as user \"%s\" on host \"%s\" at port \"%s\".\n" msgstr "Du är uppkopplad upp mot databas \"%s\" som användare \"%s\" på värd \"%s\" via port \"%s\".\n" -#: command.c:915 command.c:1005 command.c:1114 command.c:2523 +#: command.c:914 command.c:1010 command.c:2395 #, c-format msgid "no query buffer\n" msgstr "ingen frågebuffert\n" -#: command.c:948 command.c:4765 +#: command.c:947 command.c:4667 #, c-format msgid "invalid line number: %s\n" msgstr "ogiltigt radnummer: %s\n" -#: command.c:998 +#: command.c:1001 #, c-format msgid "The server (version %s) does not support editing function source.\n" msgstr "Servern (version %s) stöder inte redigering av funktionskällkod.\n" -#: command.c:1073 command.c:1154 -msgid "No changes" -msgstr "Inga ändringar" - -#: command.c:1107 +#: command.c:1004 #, c-format msgid "The server (version %s) does not support editing view definitions.\n" msgstr "Servern (version %s) stöder inte redigering av vydefinitioner.\n" -#: command.c:1231 +#: command.c:1086 +msgid "No changes" +msgstr "Inga ändringar" + +#: command.c:1163 #, c-format msgid "%s: invalid encoding name or conversion procedure not found\n" msgstr "%s: ogiltigt kodningsnamn eller konverteringsprocedur hittades inte\n" -#: command.c:1266 command.c:1888 command.c:3169 command.c:4867 common.c:173 -#: common.c:244 common.c:541 common.c:1288 common.c:1316 common.c:1417 -#: copy.c:489 copy.c:708 large_obj.c:156 large_obj.c:191 large_obj.c:253 +#: command.c:1198 command.c:1837 command.c:3052 command.c:4769 common.c:174 +#: common.c:245 common.c:542 common.c:1338 common.c:1366 common.c:1474 +#: common.c:1577 common.c:1615 copy.c:489 copy.c:708 large_obj.c:156 +#: large_obj.c:191 large_obj.c:253 #, c-format msgid "%s" msgstr "%s" -#: command.c:1270 +#: command.c:1202 msgid "out of memory" msgstr "slut på minne" -#: command.c:1273 +#: command.c:1205 msgid "There is no previous error." msgstr "Det finns inget tidigare fel." -#: command.c:1444 command.c:1749 command.c:1763 command.c:1780 command.c:1940 -#: command.c:2177 command.c:2490 command.c:2530 +#: command.c:1393 command.c:1698 command.c:1712 command.c:1729 command.c:1889 +#: command.c:2126 command.c:2362 command.c:2402 #, c-format msgid "\\%s: missing required argument\n" msgstr "\\%s: obligatoriskt argument saknas\n" -#: command.c:1575 +#: command.c:1524 #, c-format msgid "\\elif: cannot occur after \\else\n" msgstr "\\elif: kan inte komma efter \\else\n" -#: command.c:1580 +#: command.c:1529 #, c-format msgid "\\elif: no matching \\if\n" msgstr "\\elif: ingen matchande \\if\n" -#: command.c:1644 +#: command.c:1593 #, c-format msgid "\\else: cannot occur after \\else\n" msgstr "\\else: kan inte komma efter \\else\n" -#: command.c:1649 +#: command.c:1598 #, c-format msgid "\\else: no matching \\if\n" msgstr "\\else: ingen matchande \\if\n" -#: command.c:1689 +#: command.c:1638 #, c-format msgid "\\endif: no matching \\if\n" msgstr "\\endif: ingen matchande \\if\n" -#: command.c:1844 +#: command.c:1793 msgid "Query buffer is empty." msgstr "Frågebufferten är tom." -#: command.c:1866 +#: command.c:1815 msgid "Enter new password: " msgstr "Mata in nytt lösenord: " -#: command.c:1867 +#: command.c:1816 msgid "Enter it again: " msgstr "Mata in det igen: " -#: command.c:1871 +#: command.c:1820 #, c-format msgid "Passwords didn't match.\n" msgstr "Lösenorden stämde inte överens.\n" -#: command.c:1970 +#: command.c:1919 #, c-format msgid "\\%s: could not read value for variable\n" msgstr "\\%s: kunde inte läsa värde på varibeln\n" -#: command.c:2073 +#: command.c:2022 msgid "Query buffer reset (cleared)." msgstr "Frågebufferten har blivit borttagen." -#: command.c:2095 +#: command.c:2044 #, c-format msgid "Wrote history to file \"%s\".\n" msgstr "Skrev historiken till fil \"%s\".\n" -#: command.c:2182 +#: command.c:2131 #, c-format msgid "\\%s: environment variable name must not contain \"=\"\n" msgstr "\\%s: omgivningsvariabelnamn får ej innehålla \"=\"\n" -#: command.c:2238 +#: command.c:2192 #, c-format msgid "The server (version %s) does not support showing function source.\n" msgstr "Servern (version %s) stöder inte visning av funktionskällkod.\n" -#: command.c:2245 -#, c-format -msgid "function name is required\n" -msgstr "funktionsnamn krävs\n" - -#: command.c:2332 +#: command.c:2195 #, c-format msgid "The server (version %s) does not support showing view definitions.\n" msgstr "Servern (version %s) stöder inte visning av vydefinitioner.\n" -#: command.c:2339 +#: command.c:2202 +#, c-format +msgid "function name is required\n" +msgstr "funktionsnamn krävs\n" + +#: command.c:2204 #, c-format msgid "view name is required\n" msgstr "vynamn krävs\n" -#: command.c:2462 +#: command.c:2334 msgid "Timing is on." msgstr "Tidtagning är på." -#: command.c:2464 +#: command.c:2336 msgid "Timing is off." msgstr "Tidtagning är av." -#: command.c:2549 command.c:2577 command.c:3518 command.c:3521 command.c:3524 -#: command.c:3530 command.c:3532 command.c:3540 command.c:3550 command.c:3559 -#: command.c:3573 command.c:3590 command.c:3648 common.c:69 copy.c:332 -#: copy.c:392 copy.c:405 psqlscanslash.l:760 psqlscanslash.l:771 -#: psqlscanslash.l:781 +#: command.c:2421 command.c:2449 command.c:3420 command.c:3423 command.c:3426 +#: command.c:3432 command.c:3434 command.c:3442 command.c:3452 command.c:3461 +#: command.c:3475 command.c:3492 command.c:3550 common.c:70 copy.c:332 +#: copy.c:392 copy.c:405 psqlscanslash.l:783 psqlscanslash.l:794 +#: psqlscanslash.l:804 #, c-format msgid "%s: %s\n" msgstr "%s: %s\n" -#: command.c:2961 startup.c:202 +#: command.c:2833 startup.c:214 startup.c:265 msgid "Password: " msgstr "Lösenord: " -#: command.c:2966 startup.c:204 +#: command.c:2838 startup.c:262 #, c-format msgid "Password for user %s: " msgstr "Lösenord för användare %s: " -#: command.c:3016 +#: command.c:2888 #, c-format msgid "All connection parameters must be supplied because no database connection exists\n" msgstr "Alla anslutningsparametrar måste anges då ingen databasuppkoppling är gjord\n" -#: command.c:3173 +#: command.c:3056 #, c-format msgid "Previous connection kept\n" msgstr "Föregående förbindelse bevarad\n" -#: command.c:3177 +#: command.c:3060 #, c-format msgid "\\connect: %s" msgstr "\\connect: %s" -#: command.c:3213 +#: command.c:3096 #, c-format msgid "You are now connected to database \"%s\" as user \"%s\" via socket in \"%s\" at port \"%s\".\n" msgstr "Du är nu uppkopplad mot databasen \"%s\" som användare \"%s\" via uttag i \"%s\" vid port \"%s\".\n" -#: command.c:3216 +#: command.c:3099 #, c-format msgid "You are now connected to database \"%s\" as user \"%s\" on host \"%s\" at port \"%s\".\n" msgstr "Du är nu uppkopplad mot databasen \"%s\" som användare \"%s\" på värd \"%s\" vid port \"%s\".\n" -#: command.c:3220 +#: command.c:3103 #, c-format msgid "You are now connected to database \"%s\" as user \"%s\".\n" msgstr "Du är nu uppkopplad mot databasen \"%s\" som användare \"%s\".\n" -#: command.c:3253 +#: command.c:3136 #, c-format msgid "%s (%s, server %s)\n" msgstr "%s (%s, server %s)\n" -#: command.c:3261 +#: command.c:3144 #, c-format msgid "" "WARNING: %s major version %s, server major version %s.\n" @@ -392,24 +393,24 @@ msgstr "" "VARNING: %s huvudversion %s, server huvudversion %s.\n" " En del psql-finesser kommer kanske inte fungera.\n" -#: command.c:3298 +#: command.c:3181 #, c-format msgid "SSL connection (protocol: %s, cipher: %s, bits: %s, compression: %s)\n" msgstr "SSL-förbindelse (protokoll: %s, krypto: %s, bitar: %s, komprimering: %s)\n" -#: command.c:3299 command.c:3300 command.c:3301 +#: command.c:3182 command.c:3183 command.c:3184 msgid "unknown" msgstr "okänd" -#: command.c:3302 help.c:45 +#: command.c:3185 help.c:45 msgid "off" msgstr "av" -#: command.c:3302 help.c:45 +#: command.c:3185 help.c:45 msgid "on" msgstr "på" -#: command.c:3322 +#: command.c:3205 #, c-format msgid "" "WARNING: Console code page (%u) differs from Windows code page (%u)\n" @@ -420,239 +421,239 @@ msgstr "" " 8-bitars tecken kommer troligen inte fungera korrekt. Se psql:s\n" " referensmanual i sektionen \"Notes for Windows users\" för mer detaljer.\n" -#: command.c:3407 +#: command.c:3309 #, c-format msgid "environment variable PSQL_EDITOR_LINENUMBER_ARG must be set to specify a line number\n" msgstr "omgivningsvariabeln PSQL_EDITOR_LINENUMBER_ARG måste ange ett radnummer\n" -#: command.c:3436 +#: command.c:3338 #, c-format msgid "could not start editor \"%s\"\n" msgstr "kunde inte starta editorn \"%s\"\n" -#: command.c:3438 +#: command.c:3340 #, c-format msgid "could not start /bin/sh\n" msgstr "kunde inte starta /bin/sh\n" -#: command.c:3476 +#: command.c:3378 #, c-format msgid "could not locate temporary directory: %s\n" msgstr "kunde inte hitta temp-katalog: %s\n" -#: command.c:3503 +#: command.c:3405 #, c-format msgid "could not open temporary file \"%s\": %s\n" msgstr "kunde inte öppna temporär fil \"%s\": %s\n" -#: command.c:3777 +#: command.c:3679 #, c-format msgid "\\pset: allowed formats are unaligned, aligned, wrapped, html, asciidoc, latex, latex-longtable, troff-ms\n" msgstr "\\pset: tillåtna format är unaligned, aligned, wrapped, html, asciidoc, latex, latex-longtable, troff-ms\n" -#: command.c:3795 +#: command.c:3697 #, c-format msgid "\\pset: allowed line styles are ascii, old-ascii, unicode\n" msgstr "\\pset: tillåtna linjestilar är ascii, old-ascii, unicode\n" -#: command.c:3810 +#: command.c:3712 #, c-format msgid "\\pset: allowed Unicode border line styles are single, double\n" msgstr "\\pset: tillåtna Unicode-ramstilar är single, double\n" -#: command.c:3825 +#: command.c:3727 #, c-format msgid "\\pset: allowed Unicode column line styles are single, double\n" msgstr "\\pset: tillåtna Unicode-kolumnlinjestilar ärsingle, double\n" -#: command.c:3840 +#: command.c:3742 #, c-format msgid "\\pset: allowed Unicode header line styles are single, double\n" msgstr "\\pset: tillåtna Unicode-rubriklinjestilar är single, double\n" -#: command.c:4005 command.c:4184 +#: command.c:3907 command.c:4086 #, c-format msgid "\\pset: unknown option: %s\n" msgstr "\\pset: okänd parameter: %s\n" -#: command.c:4023 +#: command.c:3925 #, c-format msgid "Border style is %d.\n" msgstr "Ramstil är %d.\n" -#: command.c:4029 +#: command.c:3931 #, c-format msgid "Target width is unset.\n" msgstr "Målvidd är inte satt.\n" -#: command.c:4031 +#: command.c:3933 #, c-format msgid "Target width is %d.\n" msgstr "Målvidd är %d.\n" -#: command.c:4038 +#: command.c:3940 #, c-format msgid "Expanded display is on.\n" msgstr "Utökad visning är på.\n" -#: command.c:4040 +#: command.c:3942 #, c-format msgid "Expanded display is used automatically.\n" msgstr "Utökad visning används automatiskt.\n" -#: command.c:4042 +#: command.c:3944 #, c-format msgid "Expanded display is off.\n" msgstr "Utökad visning är av.\n" -#: command.c:4049 command.c:4057 +#: command.c:3951 command.c:3959 #, c-format msgid "Field separator is zero byte.\n" msgstr "Fältseparatorn är noll-byte.\n" -#: command.c:4051 +#: command.c:3953 #, c-format msgid "Field separator is \"%s\".\n" msgstr "Fältseparatorn är \"%s\".\n" -#: command.c:4064 +#: command.c:3966 #, c-format msgid "Default footer is on.\n" msgstr "Standard sidfot är på.\n" -#: command.c:4066 +#: command.c:3968 #, c-format msgid "Default footer is off.\n" msgstr "Standard sidfot är av.\n" -#: command.c:4072 +#: command.c:3974 #, c-format msgid "Output format is %s.\n" msgstr "Utdataformatet är \"%s\".\n" -#: command.c:4078 +#: command.c:3980 #, c-format msgid "Line style is %s.\n" msgstr "Linjestil är %s.\n" -#: command.c:4085 +#: command.c:3987 #, c-format msgid "Null display is \"%s\".\n" msgstr "Null-visare är \"%s\".\n" -#: command.c:4093 +#: command.c:3995 #, c-format msgid "Locale-adjusted numeric output is on.\n" msgstr "Lokal-anpassad numerisk utdata är på.\n" -#: command.c:4095 +#: command.c:3997 #, c-format msgid "Locale-adjusted numeric output is off.\n" msgstr "Lokal-anpassad numerisk utdata är av.\n" -#: command.c:4102 +#: command.c:4004 #, c-format msgid "Pager is used for long output.\n" msgstr "Siduppdelare är på för lång utdata.\n" -#: command.c:4104 +#: command.c:4006 #, c-format msgid "Pager is always used.\n" msgstr "Siduppdelare används alltid.\n" -#: command.c:4106 +#: command.c:4008 #, c-format msgid "Pager usage is off.\n" msgstr "Siduppdelare är av.\n" -#: command.c:4112 +#: command.c:4014 #, c-format msgid "Pager won't be used for less than %d line.\n" msgid_plural "Pager won't be used for less than %d lines.\n" msgstr[0] "Siduppdelare kommer inte användas för färre än %d linje.\n" msgstr[1] "Siduppdelare kommer inte användas för färre än %d linjer.\n" -#: command.c:4122 command.c:4132 +#: command.c:4024 command.c:4034 #, c-format msgid "Record separator is zero byte.\n" msgstr "Postseparatorn är noll-byte.\n" -#: command.c:4124 +#: command.c:4026 #, c-format msgid "Record separator is .\n" msgstr "Postseparatorn är .\n" -#: command.c:4126 +#: command.c:4028 #, c-format msgid "Record separator is \"%s\".\n" msgstr "Postseparatorn är \"%s\".\n" -#: command.c:4139 +#: command.c:4041 #, c-format msgid "Table attributes are \"%s\".\n" msgstr "Tabellattributen är \"%s\".\n" -#: command.c:4142 +#: command.c:4044 #, c-format msgid "Table attributes unset.\n" msgstr "Tabellattributen är ej satta.\n" -#: command.c:4149 +#: command.c:4051 #, c-format msgid "Title is \"%s\".\n" msgstr "Titeln är \"%s\".\n" -#: command.c:4151 +#: command.c:4053 #, c-format msgid "Title is unset.\n" msgstr "Titeln är inte satt.\n" -#: command.c:4158 +#: command.c:4060 #, c-format msgid "Tuples only is on.\n" msgstr "Visa bara tupler är på.\n" -#: command.c:4160 +#: command.c:4062 #, c-format msgid "Tuples only is off.\n" msgstr "Visa bara tupler är av.\n" -#: command.c:4166 +#: command.c:4068 #, c-format msgid "Unicode border line style is \"%s\".\n" msgstr "Unicode-ramstil är \"%s\".\n" -#: command.c:4172 +#: command.c:4074 #, c-format msgid "Unicode column line style is \"%s\".\n" msgstr "Unicode-kolumnLinjestil är \"%s\".\n" -#: command.c:4178 +#: command.c:4080 #, c-format msgid "Unicode header line style is \"%s\".\n" msgstr "Unicode-rubriklinjestil är \"%s\".\n" -#: command.c:4338 +#: command.c:4240 #, c-format msgid "\\!: failed\n" msgstr "\\!: misslyckades\n" -#: command.c:4363 common.c:754 +#: command.c:4265 common.c:802 #, c-format msgid "\\watch cannot be used with an empty query\n" msgstr "\\watch kan inte användas på en tom fråga\n" -#: command.c:4404 +#: command.c:4306 #, c-format msgid "%s\t%s (every %gs)\n" msgstr "%s\t%s (varje %gs)\n" -#: command.c:4407 +#: command.c:4309 #, c-format msgid "%s (every %gs)\n" msgstr "%s (varje %gs)\n" -#: command.c:4461 command.c:4468 common.c:654 common.c:661 common.c:1271 +#: command.c:4363 command.c:4370 common.c:702 common.c:709 common.c:1321 #, c-format msgid "" "********* QUERY **********\n" @@ -665,102 +666,102 @@ msgstr "" "**************************\n" "\n" -#: command.c:4660 +#: command.c:4562 #, c-format msgid "\"%s.%s\" is not a view\n" msgstr "\"%s.%s\" är inte en vy\n" -#: command.c:4676 +#: command.c:4578 #, c-format msgid "could not parse reloptions array\n" msgstr "kunde inte parsa arrayen reloptions\n" -#: common.c:158 +#: common.c:159 #, c-format msgid "cannot escape without active connection\n" msgstr "kan inte escape:a utan en aktiv upppkoppling\n" -#: common.c:199 +#: common.c:200 #, c-format msgid "shell command argument contains a newline or carriage return: \"%s\"\n" msgstr "shell-kommandots argument innehåller nyrad eller vagnretur: \"%s\"\n" -#: common.c:415 +#: common.c:416 #, c-format msgid "connection to server was lost\n" msgstr "förbindelsen till servern har brutits\n" -#: common.c:419 +#: common.c:420 #, c-format msgid "The connection to the server was lost. Attempting reset: " msgstr "Förbindelsen till servern har brutits. Försöker starta om: " -#: common.c:424 +#: common.c:425 #, c-format msgid "Failed.\n" msgstr "Misslyckades.\n" -#: common.c:431 +#: common.c:432 #, c-format msgid "Succeeded.\n" msgstr "Lyckades.\n" -#: common.c:531 common.c:1034 common.c:1206 +#: common.c:532 common.c:1082 common.c:1256 #, c-format msgid "unexpected PQresultStatus: %d\n" msgstr "oväntad PQresultStatus: %d\n" -#: common.c:593 +#: common.c:641 #, c-format msgid "Time: %.3f ms\n" msgstr "Tid: %.3f ms\n" -#: common.c:608 +#: common.c:656 #, c-format msgid "Time: %.3f ms (%02d:%06.3f)\n" msgstr "Tid: %.3f ms (%02d:%06.3f)\n" -#: common.c:617 +#: common.c:665 #, c-format msgid "Time: %.3f ms (%02d:%02d:%06.3f)\n" msgstr "Tid: %.3f ms (%02d:%02d:%06.3f)\n" -#: common.c:624 +#: common.c:672 #, c-format msgid "Time: %.3f ms (%.0f d %02d:%02d:%06.3f)\n" msgstr "Tid: %.3f ms (%.0f d %02d:%02d:%06.3f)\n" -#: common.c:761 +#: common.c:809 #, c-format msgid "\\watch cannot be used with COPY\n" msgstr "\\watch kan inte användas med COPY\n" -#: common.c:766 +#: common.c:814 #, c-format msgid "unexpected result status for \\watch\n" msgstr "oväntat resultatstatus för \\watch\n" -#: common.c:795 +#: common.c:843 #, c-format msgid "Asynchronous notification \"%s\" with payload \"%s\" received from server process with PID %d.\n" msgstr "Asynkron notificering \"%s\" mottagen med innehåll \"%s\" från serverprocess med PID %d.\n" -#: common.c:798 +#: common.c:846 #, c-format msgid "Asynchronous notification \"%s\" received from server process with PID %d.\n" msgstr "Asynkron notificering \"%s\" mottagen från serverprocess med PID %d.\n" -#: common.c:860 +#: common.c:908 #, c-format msgid "no rows returned for \\gset\n" msgstr "inga rader returnerades för \\gset\n" -#: common.c:865 +#: common.c:913 #, c-format msgid "more than one row returned for \\gset\n" msgstr "mer än en rad returnerades för \\gset\n" -#: common.c:1251 +#: common.c:1301 #, c-format msgid "" "***(Single step mode: verify command)*******************************************\n" @@ -771,21 +772,37 @@ msgstr "" "%s\n" "***(tryck return för att fortsätta eller skriv x och return för att avbryta)*****\n" -#: common.c:1306 +#: common.c:1356 #, c-format msgid "The server (version %s) does not support savepoints for ON_ERROR_ROLLBACK.\n" msgstr "Servern (version %s) stöder inte sparpunkter för ON_ERROR_ROLLBACK.\n" -#: common.c:1362 +#: common.c:1419 #, c-format msgid "STATEMENT: %s\n" msgstr "SATS: %s\n" -#: common.c:1405 +#: common.c:1462 #, c-format msgid "unexpected transaction status (%d)\n" msgstr "oväntad transaktionsstatus (%d)\n" +#: common.c:1599 describe.c:1847 +msgid "Column" +msgstr "Kolumn" + +#: common.c:1600 describe.c:174 describe.c:374 describe.c:392 describe.c:437 +#: describe.c:454 describe.c:925 describe.c:1089 describe.c:1620 +#: describe.c:1644 describe.c:1848 describe.c:3457 describe.c:3662 +#: describe.c:4834 +msgid "Type" +msgstr "Typ" + +#: common.c:1649 +#, c-format +msgid "The command has no result, or the result has no columns.\n" +msgstr "Kommandot hade inget resultat eller så hade resultatet inga kolumner.\n" + #: copy.c:99 #, c-format msgid "\\copy: arguments required\n" @@ -896,1001 +913,1023 @@ msgstr "\\crosstabview: tvetydigt kolumnnamn: \"%s\"\n" msgid "\\crosstabview: column name not found: \"%s\"\n" msgstr "\\crosstabview: hittar ej kolumnnamn: \"%s\"\n" -#: describe.c:74 describe.c:346 describe.c:603 describe.c:735 describe.c:879 -#: describe.c:1040 describe.c:1112 describe.c:3342 describe.c:3554 -#: describe.c:3645 describe.c:3893 describe.c:4038 describe.c:4279 -#: describe.c:4354 describe.c:4365 describe.c:4427 describe.c:4852 -#: describe.c:4935 +#: describe.c:74 describe.c:354 describe.c:641 describe.c:773 describe.c:917 +#: describe.c:1078 describe.c:1150 describe.c:3446 describe.c:3660 +#: describe.c:3751 describe.c:3999 describe.c:4144 describe.c:4385 +#: describe.c:4460 describe.c:4471 describe.c:4533 describe.c:4958 +#: describe.c:5041 msgid "Schema" msgstr "Schema" -#: describe.c:75 describe.c:164 describe.c:231 describe.c:239 describe.c:347 -#: describe.c:604 describe.c:736 describe.c:797 describe.c:880 describe.c:1113 -#: describe.c:3343 describe.c:3477 describe.c:3555 describe.c:3646 -#: describe.c:3725 describe.c:3894 describe.c:3963 describe.c:4039 -#: describe.c:4280 describe.c:4355 describe.c:4366 describe.c:4428 -#: describe.c:4625 describe.c:4709 describe.c:4933 describe.c:5105 -#: describe.c:5312 +#: describe.c:75 describe.c:172 describe.c:239 describe.c:247 describe.c:355 +#: describe.c:642 describe.c:774 describe.c:835 describe.c:918 describe.c:1151 +#: describe.c:3447 describe.c:3583 describe.c:3661 describe.c:3752 +#: describe.c:3831 describe.c:4000 describe.c:4069 describe.c:4145 +#: describe.c:4386 describe.c:4461 describe.c:4472 describe.c:4534 +#: describe.c:4731 describe.c:4815 describe.c:5039 describe.c:5211 +#: describe.c:5436 msgid "Name" msgstr "Namn" -#: describe.c:76 describe.c:359 describe.c:405 describe.c:422 +#: describe.c:76 describe.c:367 describe.c:385 describe.c:431 describe.c:448 msgid "Result data type" msgstr "Resultatdatatyp" -#: describe.c:84 describe.c:97 describe.c:101 describe.c:360 describe.c:406 -#: describe.c:423 +#: describe.c:84 describe.c:97 describe.c:101 describe.c:368 describe.c:386 +#: describe.c:432 describe.c:449 msgid "Argument data types" msgstr "Argumentdatatyp" -#: describe.c:108 describe.c:174 describe.c:262 describe.c:468 describe.c:652 -#: describe.c:751 describe.c:822 describe.c:1115 describe.c:1756 -#: describe.c:3132 describe.c:3377 describe.c:3508 describe.c:3582 -#: describe.c:3655 describe.c:3738 describe.c:3806 describe.c:3906 -#: describe.c:3972 describe.c:4040 describe.c:4181 describe.c:4223 -#: describe.c:4296 describe.c:4358 describe.c:4367 describe.c:4429 -#: describe.c:4651 describe.c:4731 describe.c:4866 describe.c:4936 +#: describe.c:109 describe.c:116 describe.c:182 describe.c:270 describe.c:494 +#: describe.c:690 describe.c:789 describe.c:860 describe.c:1153 describe.c:1888 +#: describe.c:3235 describe.c:3482 describe.c:3614 describe.c:3688 +#: describe.c:3761 describe.c:3844 describe.c:3912 describe.c:4012 +#: describe.c:4078 describe.c:4146 describe.c:4287 describe.c:4329 +#: describe.c:4402 describe.c:4464 describe.c:4473 describe.c:4535 +#: describe.c:4757 describe.c:4837 describe.c:4972 describe.c:5042 #: large_obj.c:289 large_obj.c:299 msgid "Description" msgstr "Beskrivning" -#: describe.c:126 +#: describe.c:134 msgid "List of aggregate functions" msgstr "Lista med aggregatfunktioner" -#: describe.c:151 +#: describe.c:159 #, c-format msgid "The server (version %s) does not support access methods.\n" msgstr "Servern (version %s) stöder inte accessmetoder.\n" -#: describe.c:165 +#: describe.c:173 msgid "Index" msgstr "Index" -#: describe.c:166 describe.c:366 describe.c:411 describe.c:428 describe.c:887 -#: describe.c:1051 describe.c:1716 describe.c:3352 describe.c:3556 -#: describe.c:4728 -msgid "Type" -msgstr "Typ" - -#: describe.c:173 describe.c:4630 +#: describe.c:181 describe.c:4736 msgid "Handler" msgstr "Hanterare" -#: describe.c:192 +#: describe.c:200 msgid "List of access methods" msgstr "Lista med accessmetoder" -#: describe.c:218 +#: describe.c:226 #, c-format msgid "The server (version %s) does not support tablespaces.\n" msgstr "Servern (version %s) stöder inte tabellutrymmen.\n" -#: describe.c:232 describe.c:240 describe.c:456 describe.c:642 describe.c:798 -#: describe.c:1039 describe.c:3353 describe.c:3481 describe.c:3727 -#: describe.c:3964 describe.c:4626 describe.c:4710 describe.c:5106 -#: describe.c:5218 describe.c:5313 large_obj.c:288 +#: describe.c:240 describe.c:248 describe.c:482 describe.c:680 describe.c:836 +#: describe.c:1077 describe.c:3458 describe.c:3587 describe.c:3833 +#: describe.c:4070 describe.c:4732 describe.c:4816 describe.c:5212 +#: describe.c:5338 describe.c:5437 large_obj.c:288 msgid "Owner" msgstr "Ägare" -#: describe.c:233 describe.c:241 +#: describe.c:241 describe.c:249 msgid "Location" msgstr "Plats" -#: describe.c:252 describe.c:2944 +#: describe.c:260 describe.c:3054 msgid "Options" msgstr "Alternativ" -#: describe.c:257 describe.c:615 describe.c:814 describe.c:3369 -#: describe.c:3373 +#: describe.c:265 describe.c:653 describe.c:852 describe.c:3474 describe.c:3478 msgid "Size" msgstr "Storlek" -#: describe.c:279 +#: describe.c:287 msgid "List of tablespaces" msgstr "Lista med tabellutrymmen" -#: describe.c:320 +#: describe.c:328 #, c-format msgid "\\df only takes [antwS+] as options\n" msgstr "\\df tar bara [antwS+] som flaggor\n" -#: describe.c:328 +#: describe.c:336 #, c-format msgid "\\df does not take a \"w\" option with server version %s\n" msgstr "\\df tar inte en \"w\"-flagga med serverversion %s\n" #. translator: "agg" is short for "aggregate" -#: describe.c:362 describe.c:408 describe.c:425 +#: describe.c:370 describe.c:388 describe.c:434 describe.c:451 msgid "agg" msgstr "agg" -#: describe.c:363 +#: describe.c:371 describe.c:389 msgid "window" msgstr "fönster" -#: describe.c:364 describe.c:409 describe.c:426 describe.c:1249 +#: describe.c:372 +msgid "proc" +msgstr "proc" + +#: describe.c:373 describe.c:391 describe.c:436 describe.c:453 +msgid "func" +msgstr "funk" + +#: describe.c:390 describe.c:435 describe.c:452 describe.c:1287 msgid "trigger" msgstr "utlösare" -#: describe.c:365 describe.c:410 describe.c:427 -msgid "normal" -msgstr "normal" - -#: describe.c:438 +#: describe.c:464 msgid "immutable" msgstr "oföränderlig" -#: describe.c:439 +#: describe.c:465 msgid "stable" msgstr "stabil" -#: describe.c:440 +#: describe.c:466 msgid "volatile" msgstr "flyktig" -#: describe.c:441 +#: describe.c:467 msgid "Volatility" msgstr "Flyktighet" -#: describe.c:449 +#: describe.c:475 msgid "restricted" msgstr "begränsad" -#: describe.c:450 +#: describe.c:476 msgid "safe" msgstr "säker" -#: describe.c:451 +#: describe.c:477 msgid "unsafe" msgstr "osäker" -#: describe.c:452 +#: describe.c:478 msgid "Parallel" msgstr "Parallell" -#: describe.c:457 +#: describe.c:483 msgid "definer" msgstr "definierare" -#: describe.c:458 +#: describe.c:484 msgid "invoker" msgstr "anropare" -#: describe.c:459 +#: describe.c:485 msgid "Security" msgstr "Säkerhet" -#: describe.c:466 +#: describe.c:492 msgid "Language" msgstr "Språk" -#: describe.c:467 +#: describe.c:493 msgid "Source code" msgstr "Källkod" -#: describe.c:566 +#: describe.c:604 msgid "List of functions" msgstr "Lista med funktioner" -#: describe.c:614 +#: describe.c:652 msgid "Internal name" msgstr "Internt namn" -#: describe.c:636 +#: describe.c:674 msgid "Elements" msgstr "Element" -#: describe.c:693 +#: describe.c:731 msgid "List of data types" msgstr "Lista med datatyper" -#: describe.c:737 +#: describe.c:775 msgid "Left arg type" msgstr "Vänster argumenttyp" -#: describe.c:738 +#: describe.c:776 msgid "Right arg type" msgstr "Höger argumenttyp" -#: describe.c:739 +#: describe.c:777 msgid "Result type" msgstr "Resultattyp" -#: describe.c:744 describe.c:3797 describe.c:4180 +#: describe.c:782 describe.c:3903 describe.c:4286 msgid "Function" msgstr "Funktion" -#: describe.c:769 +#: describe.c:807 msgid "List of operators" msgstr "Lista med operatorer" -#: describe.c:799 +#: describe.c:837 msgid "Encoding" msgstr "Kodning" -#: describe.c:804 describe.c:3895 +#: describe.c:842 describe.c:4001 msgid "Collate" msgstr "Jämförelse" -#: describe.c:805 describe.c:3896 +#: describe.c:843 describe.c:4002 msgid "Ctype" msgstr "Ctype" -#: describe.c:818 +#: describe.c:856 msgid "Tablespace" msgstr "Tabellutrymme" -#: describe.c:840 +#: describe.c:878 msgid "List of databases" msgstr "Lista med databaser" -#: describe.c:881 describe.c:886 describe.c:1042 describe.c:3344 -#: describe.c:3351 +#: describe.c:919 describe.c:924 describe.c:1080 describe.c:3448 +#: describe.c:3455 msgid "table" msgstr "tabell" -#: describe.c:882 describe.c:3345 +#: describe.c:920 describe.c:3449 msgid "view" msgstr "vy" -#: describe.c:883 describe.c:3346 +#: describe.c:921 describe.c:3450 msgid "materialized view" msgstr "materialiserad vy" -#: describe.c:884 describe.c:1044 describe.c:3348 +#: describe.c:922 describe.c:1082 describe.c:3452 msgid "sequence" msgstr "sekvens" -#: describe.c:885 describe.c:3350 +#: describe.c:923 describe.c:3454 msgid "foreign table" msgstr "främmande tabell" -#: describe.c:898 +#: describe.c:936 msgid "Column privileges" msgstr "Kolumnrättigheter" -#: describe.c:929 describe.c:963 +#: describe.c:967 describe.c:1001 msgid "Policies" msgstr "Policys" -#: describe.c:995 describe.c:5369 describe.c:5373 +#: describe.c:1033 describe.c:5493 describe.c:5497 msgid "Access privileges" msgstr "Åtkomsträttigheter" -#: describe.c:1026 +#: describe.c:1064 #, c-format msgid "The server (version %s) does not support altering default privileges.\n" msgstr "Servern (version %s) stöder inte ändring av standardrättigheter.\n" -#: describe.c:1046 +#: describe.c:1084 msgid "function" msgstr "funktion" -#: describe.c:1048 +#: describe.c:1086 msgid "type" msgstr "typ" -#: describe.c:1050 +#: describe.c:1088 msgid "schema" msgstr "schema" -#: describe.c:1074 +#: describe.c:1112 msgid "Default access privileges" msgstr "Standard accessrättigheter" -#: describe.c:1114 +#: describe.c:1152 msgid "Object" msgstr "Objekt" -#: describe.c:1128 +#: describe.c:1166 msgid "table constraint" msgstr "tabellvillkor" -#: describe.c:1150 +#: describe.c:1188 msgid "domain constraint" msgstr "domänvillkor" -#: describe.c:1178 +#: describe.c:1216 msgid "operator class" msgstr "operatorklass" -#: describe.c:1207 +#: describe.c:1245 msgid "operator family" msgstr "operatorfamilj" -#: describe.c:1229 +#: describe.c:1267 msgid "rule" msgstr "rule" -#: describe.c:1271 +#: describe.c:1309 msgid "Object descriptions" msgstr "Objektbeskrivningar" -#: describe.c:1327 describe.c:3440 +#: describe.c:1365 describe.c:3546 #, c-format msgid "Did not find any relation named \"%s\".\n" msgstr "Kunde inte hitta en relation med namn \"%s\".\n" -#: describe.c:1330 describe.c:3443 +#: describe.c:1368 describe.c:3549 #, c-format msgid "Did not find any relations.\n" msgstr "Kunde inte hitta några relationer.\n" -#: describe.c:1539 +#: describe.c:1575 #, c-format msgid "Did not find any relation with OID %s.\n" msgstr "Kunde inte hitta en relation med OID %s.\n" -#: describe.c:1652 describe.c:1701 +#: describe.c:1621 describe.c:1645 +msgid "Start" +msgstr "Start" + +#: describe.c:1622 describe.c:1646 +msgid "Minimum" +msgstr "Minimum" + +#: describe.c:1623 describe.c:1647 +msgid "Maximum" +msgstr "Maximum" + +#: describe.c:1624 describe.c:1648 +msgid "Increment" +msgstr "Ökning" + +#: describe.c:1625 describe.c:1649 describe.c:3755 describe.c:3906 +msgid "yes" +msgstr "ja" + +#: describe.c:1626 describe.c:1650 describe.c:3755 describe.c:3904 +msgid "no" +msgstr "nej" + +#: describe.c:1627 describe.c:1651 +msgid "Cycles?" +msgstr "Cyklisk?" + +#: describe.c:1628 describe.c:1652 +msgid "Cache" +msgstr "Cache" + +#: describe.c:1695 +#, c-format +msgid "Owned by: %s" +msgstr "Ägd av: %s" + +#: describe.c:1699 +#, c-format +msgid "Sequence for identity column: %s" +msgstr "Sekvens för identitetskolumn: %s" + +#: describe.c:1706 +#, c-format +msgid "Sequence \"%s.%s\"" +msgstr "Sekvens \"%s.%s\"" + +#: describe.c:1787 describe.c:1833 #, c-format msgid "Unlogged table \"%s.%s\"" msgstr "Ologgad tabell \"%s.%s\"" -#: describe.c:1655 describe.c:1704 +#: describe.c:1790 describe.c:1836 #, c-format msgid "Table \"%s.%s\"" msgstr "Tabell \"%s.%s\"" -#: describe.c:1659 +#: describe.c:1794 #, c-format msgid "View \"%s.%s\"" msgstr "Vy \"%s.%s\"" -#: describe.c:1664 +#: describe.c:1799 #, c-format msgid "Unlogged materialized view \"%s.%s\"" msgstr "Ologgad materialiserad vy \"%s.%s\"" -#: describe.c:1667 +#: describe.c:1802 #, c-format msgid "Materialized view \"%s.%s\"" msgstr "Materialiserad vy \"%s.%s\"" -#: describe.c:1671 -#, c-format -msgid "Sequence \"%s.%s\"" -msgstr "Sekvens \"%s.%s\"" - -#: describe.c:1676 +#: describe.c:1808 #, c-format msgid "Unlogged index \"%s.%s\"" msgstr "Ologgat index \"%s.%s\"" -#: describe.c:1679 +#: describe.c:1811 #, c-format msgid "Index \"%s.%s\"" msgstr "Index \"%s.%s\"" -#: describe.c:1684 +#: describe.c:1816 #, c-format msgid "Special relation \"%s.%s\"" msgstr "Särskild relation \"%s.%s\"" -#: describe.c:1688 +#: describe.c:1820 #, c-format msgid "TOAST table \"%s.%s\"" msgstr "TOAST-tabell \"%s.%s\"" -#: describe.c:1692 +#: describe.c:1824 #, c-format msgid "Composite type \"%s.%s\"" msgstr "Sammansatt typ \"%s.%s\"" -#: describe.c:1696 +#: describe.c:1828 #, c-format msgid "Foreign table \"%s.%s\"" msgstr "Främmande tabell \"%s.%s\"" -#: describe.c:1715 -msgid "Column" -msgstr "Kolumn" - -#: describe.c:1726 describe.c:3562 +#: describe.c:1858 describe.c:3668 msgid "Collation" -msgstr "Collation" +msgstr "Jämförelse" -#: describe.c:1727 describe.c:3569 +#: describe.c:1859 describe.c:3675 msgid "Nullable" msgstr "Nullbar" -#: describe.c:1728 describe.c:3570 +#: describe.c:1860 describe.c:3676 msgid "Default" msgstr "Standard" -#: describe.c:1733 -msgid "Value" -msgstr "Värde" - -#: describe.c:1736 +#: describe.c:1866 msgid "Definition" msgstr "Definition" -#: describe.c:1739 describe.c:4646 describe.c:4730 describe.c:4801 -#: describe.c:4865 +#: describe.c:1869 describe.c:4752 describe.c:4836 describe.c:4907 +#: describe.c:4971 msgid "FDW options" msgstr "FDW-alternativ" -#: describe.c:1743 +#: describe.c:1873 msgid "Storage" msgstr "Lagring" -#: describe.c:1748 +#: describe.c:1880 msgid "Stats target" msgstr "Statistikmål" -#: describe.c:1897 +#: describe.c:2028 #, c-format msgid "Partition of: %s %s" msgstr "Partition av: %s %s" -#: describe.c:1903 +#: describe.c:2036 +msgid "No partition constraint" +msgstr "Inget partitioneringsvillkor" + +#: describe.c:2038 #, c-format msgid "Partition constraint: %s" msgstr "Partitioneringsvillkor: %s" -#: describe.c:1926 +#: describe.c:2061 #, c-format msgid "Partition key: %s" msgstr "Partioneringsnyckel: %s" -#: describe.c:1994 +#: describe.c:2130 msgid "primary key, " msgstr "primärnyckel, " -#: describe.c:1996 +#: describe.c:2132 msgid "unique, " msgstr "unik, " -#: describe.c:2002 +#: describe.c:2138 #, c-format msgid "for table \"%s.%s\"" msgstr "för tabell \"%s.%s\"" -#: describe.c:2006 +#: describe.c:2142 #, c-format msgid ", predicate (%s)" msgstr ", predikat (%s)" -#: describe.c:2009 +#: describe.c:2145 msgid ", clustered" msgstr ", klustrad" -#: describe.c:2012 +#: describe.c:2148 msgid ", invalid" msgstr ", ogiltig" -#: describe.c:2015 +#: describe.c:2151 msgid ", deferrable" msgstr ", uppskjutbar" -#: describe.c:2018 +#: describe.c:2154 msgid ", initially deferred" msgstr ", initialt uppskjuten" -#: describe.c:2021 +#: describe.c:2157 msgid ", replica identity" msgstr ", replikaidentitet" -#: describe.c:2060 -#, c-format -msgid "Owned by: %s" -msgstr "Ägd av: %s" - -#: describe.c:2065 -#, c-format -msgid "Sequence for identity column: %s" -msgstr "Sekvens för identitetskolumn: %s" - -#: describe.c:2129 +#: describe.c:2216 msgid "Indexes:" msgstr "Index:" -#: describe.c:2213 +#: describe.c:2300 msgid "Check constraints:" msgstr "Kontrollvillkor:" -#: describe.c:2244 +#: describe.c:2331 msgid "Foreign-key constraints:" msgstr "Främmande nyckel-villkor:" -#: describe.c:2275 +#: describe.c:2362 msgid "Referenced by:" msgstr "Refererad av:" -#: describe.c:2325 +#: describe.c:2412 msgid "Policies:" msgstr "Policys:" -#: describe.c:2328 +#: describe.c:2415 msgid "Policies (forced row security enabled):" msgstr "Policys (tvingad radsäkerhet påslagen):" -#: describe.c:2331 +#: describe.c:2418 msgid "Policies (row security enabled): (none)" msgstr "Policys (radsäkerhet påslagna): (ingen)" -#: describe.c:2334 +#: describe.c:2421 msgid "Policies (forced row security enabled): (none)" msgstr "Policys (tvingad radsäkerhet påslagen): (ingen)" -#: describe.c:2337 +#: describe.c:2424 msgid "Policies (row security disabled):" msgstr "Policys (radsäkerhet avstängd):" -#: describe.c:2399 +#: describe.c:2486 msgid "Statistics objects:" msgstr "Statistikobjekt:" -#: describe.c:2502 describe.c:2587 +#: describe.c:2589 describe.c:2674 msgid "Rules:" msgstr "Regler:" -#: describe.c:2505 +#: describe.c:2592 msgid "Disabled rules:" msgstr "Avstängda regler:" -#: describe.c:2508 +#: describe.c:2595 msgid "Rules firing always:" msgstr "Regler som alltid utförs:" -#: describe.c:2511 +#: describe.c:2598 msgid "Rules firing on replica only:" msgstr "Regler som utförs enbart på replika:" -#: describe.c:2551 +#: describe.c:2638 msgid "Publications:" msgstr "Publiceringar:" -#: describe.c:2570 +#: describe.c:2657 msgid "View definition:" msgstr "Vydefinition:" -#: describe.c:2705 +#: describe.c:2792 msgid "Triggers:" msgstr "Utlösare:" -#: describe.c:2709 +#: describe.c:2796 msgid "Disabled user triggers:" msgstr "Avstängda användarutlösare:" -#: describe.c:2711 +#: describe.c:2798 msgid "Disabled triggers:" msgstr "Avstängda utlösare:" -#: describe.c:2714 +#: describe.c:2801 msgid "Disabled internal triggers:" msgstr "Avstängda interna utlösare:" -#: describe.c:2717 +#: describe.c:2804 msgid "Triggers firing always:" msgstr "Utlösare som alltid aktiveras:" -#: describe.c:2720 +#: describe.c:2807 msgid "Triggers firing on replica only:" msgstr "Utlösare som aktiveras enbart på replika:" -#: describe.c:2779 +#: describe.c:2866 #, c-format msgid "Server: %s" msgstr "Server: %s" -#: describe.c:2787 +#: describe.c:2874 #, c-format msgid "FDW options: (%s)" msgstr "FDW-alternativ: (%s)" -#: describe.c:2806 +#: describe.c:2893 msgid "Inherits" msgstr "Ärver" -#: describe.c:2860 +#: describe.c:2952 +#, c-format +msgid "Number of partitions: %d" +msgstr "Antal partitioner: %d" + +#: describe.c:2961 #, c-format msgid "Number of child tables: %d (Use \\d+ to list them.)" msgstr "Antal barntabeller: %d (Använd \\d+ för att lista dem.)" -#: describe.c:2862 +#: describe.c:2963 #, c-format msgid "Number of partitions: %d (Use \\d+ to list them.)" msgstr "Antal partitioner: %d (Använd \\d+ för att lista dem.)" -#: describe.c:2870 +#: describe.c:2971 msgid "Child tables" msgstr "Barntabeller" -#: describe.c:2870 +#: describe.c:2971 msgid "Partitions" msgstr "Partitioner" -#: describe.c:2904 +#: describe.c:3014 #, c-format msgid "Typed table of type: %s" msgstr "Typad tabell av typ: %s" -#: describe.c:2920 +#: describe.c:3030 msgid "Replica Identity" msgstr "Replikaidentitet" -#: describe.c:2933 +#: describe.c:3043 msgid "Has OIDs: yes" msgstr "Har OID:er: ja" -#: describe.c:3020 +#: describe.c:3123 #, c-format msgid "Tablespace: \"%s\"" msgstr "Tabellutrymme: \"%s\"" #. translator: before this string there's an index description like #. '"foo_pkey" PRIMARY KEY, btree (a)' -#: describe.c:3032 +#: describe.c:3135 #, c-format msgid ", tablespace \"%s\"" msgstr ", tabellutrymme: \"%s\"" -#: describe.c:3125 +#: describe.c:3228 msgid "List of roles" msgstr "Lista med roller" -#: describe.c:3127 +#: describe.c:3230 msgid "Role name" msgstr "Rollnamn" -#: describe.c:3128 +#: describe.c:3231 msgid "Attributes" msgstr "Attribut" -#: describe.c:3129 +#: describe.c:3232 msgid "Member of" msgstr "Medlem av" -#: describe.c:3140 +#: describe.c:3243 msgid "Superuser" msgstr "Superanvändare" -#: describe.c:3143 +#: describe.c:3246 msgid "No inheritance" msgstr "Inget arv" -#: describe.c:3146 +#: describe.c:3249 msgid "Create role" msgstr "Skapa roll" -#: describe.c:3149 +#: describe.c:3252 msgid "Create DB" msgstr "Skapa DB" -#: describe.c:3152 +#: describe.c:3255 msgid "Cannot login" msgstr "Kan inte logga in" -#: describe.c:3156 +#: describe.c:3259 msgid "Replication" msgstr "Replikering" -#: describe.c:3160 +#: describe.c:3263 msgid "Bypass RLS" msgstr "Hopp över RLS" -#: describe.c:3169 +#: describe.c:3272 msgid "No connections" msgstr "Inga uppkopplingar" -#: describe.c:3171 +#: describe.c:3274 #, c-format msgid "%d connection" msgid_plural "%d connections" msgstr[0] "%d uppkoppling" msgstr[1] "%d uppkopplingar" -#: describe.c:3181 +#: describe.c:3284 msgid "Password valid until " msgstr "Lösenord giltigt till " -#: describe.c:3231 +#: describe.c:3334 #, c-format msgid "The server (version %s) does not support per-database role settings.\n" msgstr "Servern (version %s) stöder inte rollinställningar per databas.\n" -#: describe.c:3244 +#: describe.c:3347 msgid "Role" msgstr "Roll" -#: describe.c:3245 +#: describe.c:3348 msgid "Database" msgstr "Databas" -#: describe.c:3246 +#: describe.c:3349 msgid "Settings" msgstr "Inställningar" -#: describe.c:3267 +#: describe.c:3370 #, c-format msgid "Did not find any settings for role \"%s\" and database \"%s\".\n" msgstr "Kunde inte hitta några inställningar för roll \"%s\" och databas \"%s\".\n" -#: describe.c:3270 +#: describe.c:3373 #, c-format msgid "Did not find any settings for role \"%s\".\n" msgstr "Kunde inte hitta några inställningar för roll \"%s\".\n" -#: describe.c:3273 +#: describe.c:3376 #, c-format msgid "Did not find any settings.\n" msgstr "Kunde inte hitta några inställningar.\n" -#: describe.c:3278 +#: describe.c:3381 msgid "List of settings" msgstr "Lista med inställningar" -#: describe.c:3347 +#: describe.c:3451 describe.c:3456 msgid "index" msgstr "index" -#: describe.c:3349 +#: describe.c:3453 msgid "special" msgstr "särskild" -#: describe.c:3358 describe.c:4853 +#: describe.c:3463 describe.c:4959 msgid "Table" msgstr "Tabell" -#: describe.c:3448 +#: describe.c:3554 msgid "List of relations" msgstr "Lista med relationer" -#: describe.c:3485 +#: describe.c:3591 msgid "Trusted" msgstr "Tillförlitlig" -#: describe.c:3493 +#: describe.c:3599 msgid "Internal language" msgstr "Internt språk" -#: describe.c:3494 +#: describe.c:3600 msgid "Call handler" msgstr "Anropshanterare" -#: describe.c:3495 describe.c:4633 +#: describe.c:3601 describe.c:4739 msgid "Validator" msgstr "Validerare" -#: describe.c:3498 +#: describe.c:3604 msgid "Inline handler" msgstr "Inline-hanterare" -#: describe.c:3526 +#: describe.c:3632 msgid "List of languages" msgstr "Lista med språk" -#: describe.c:3571 +#: describe.c:3677 msgid "Check" msgstr "Check" -#: describe.c:3613 +#: describe.c:3719 msgid "List of domains" msgstr "Lista med domäner" -#: describe.c:3647 +#: describe.c:3753 msgid "Source" msgstr "Källa" -#: describe.c:3648 +#: describe.c:3754 msgid "Destination" msgstr "Mål" -#: describe.c:3649 describe.c:3798 -msgid "no" -msgstr "nej" - -#: describe.c:3649 describe.c:3800 -msgid "yes" -msgstr "ja" - -#: describe.c:3650 +#: describe.c:3756 msgid "Default?" msgstr "Standard?" -#: describe.c:3687 +#: describe.c:3793 msgid "List of conversions" msgstr "Lista med konverteringar" -#: describe.c:3726 +#: describe.c:3832 msgid "Event" msgstr "Händelse" -#: describe.c:3728 +#: describe.c:3834 msgid "enabled" msgstr "påslagen" -#: describe.c:3729 +#: describe.c:3835 msgid "replica" msgstr "replika" -#: describe.c:3730 +#: describe.c:3836 msgid "always" msgstr "alltid" -#: describe.c:3731 +#: describe.c:3837 msgid "disabled" msgstr "avstängd" -#: describe.c:3732 describe.c:5314 +#: describe.c:3838 describe.c:5438 msgid "Enabled" msgstr "Påslagen" -#: describe.c:3733 +#: describe.c:3839 msgid "Procedure" msgstr "Procedur" -#: describe.c:3734 +#: describe.c:3840 msgid "Tags" msgstr "Etiketter" -#: describe.c:3753 +#: describe.c:3859 msgid "List of event triggers" msgstr "Lista med händelseutlösare" -#: describe.c:3795 +#: describe.c:3901 msgid "Source type" msgstr "Källtyp" -#: describe.c:3796 +#: describe.c:3902 msgid "Target type" msgstr "Måltyp" -#: describe.c:3799 +#: describe.c:3905 msgid "in assignment" msgstr "i tilldelning" -#: describe.c:3801 +#: describe.c:3907 msgid "Implicit?" msgstr "Implicit?" -#: describe.c:3852 +#: describe.c:3958 msgid "List of casts" msgstr "Lista med typomvandlingar" -#: describe.c:3880 +#: describe.c:3986 #, c-format msgid "The server (version %s) does not support collations.\n" msgstr "Servern (version %s) stöder inte jämförelser (collations).\n" -#: describe.c:3901 +#: describe.c:4007 msgid "Provider" msgstr "Leverantör" -#: describe.c:3936 +#: describe.c:4042 msgid "List of collations" msgstr "Lista med jämförelser (collations)" -#: describe.c:3995 +#: describe.c:4101 msgid "List of schemas" msgstr "Lista med scheman" -#: describe.c:4020 describe.c:4267 describe.c:4338 describe.c:4409 +#: describe.c:4126 describe.c:4373 describe.c:4444 describe.c:4515 #, c-format msgid "The server (version %s) does not support full text search.\n" msgstr "Servern (version %s) stöder inte fulltextsökning.\n" -#: describe.c:4055 +#: describe.c:4161 msgid "List of text search parsers" msgstr "Lista med textsökparsrar" -#: describe.c:4100 +#: describe.c:4206 #, c-format msgid "Did not find any text search parser named \"%s\".\n" msgstr "Kunde inte hitta en textsökparser med namn \"%s\".\n" -#: describe.c:4103 +#: describe.c:4209 #, c-format msgid "Did not find any text search parsers.\n" msgstr "Kunde inte hitta några textsökparsrar.\n" -#: describe.c:4178 +#: describe.c:4284 msgid "Start parse" msgstr "Starta parsning" -#: describe.c:4179 +#: describe.c:4285 msgid "Method" msgstr "Metod" -#: describe.c:4183 +#: describe.c:4289 msgid "Get next token" msgstr "Hämta nästa symbol" -#: describe.c:4185 +#: describe.c:4291 msgid "End parse" msgstr "Avsluta parsning" -#: describe.c:4187 +#: describe.c:4293 msgid "Get headline" msgstr "Hämta rubrik" -#: describe.c:4189 +#: describe.c:4295 msgid "Get token types" msgstr "Hämta symboltyper" -#: describe.c:4200 +#: describe.c:4306 #, c-format msgid "Text search parser \"%s.%s\"" msgstr "Textsökparser \"%s.%s\"" -#: describe.c:4203 +#: describe.c:4309 #, c-format msgid "Text search parser \"%s\"" msgstr "Textsökparser \"%s\"" -#: describe.c:4222 +#: describe.c:4328 msgid "Token name" msgstr "Symbolnamn" -#: describe.c:4233 +#: describe.c:4339 #, c-format msgid "Token types for parser \"%s.%s\"" msgstr "Symboltyper för parser \"%s.%s\"" -#: describe.c:4236 +#: describe.c:4342 #, c-format msgid "Token types for parser \"%s\"" msgstr "Symboltyper för parser \"%s\"" -#: describe.c:4290 +#: describe.c:4396 msgid "Template" msgstr "Mall" -#: describe.c:4291 +#: describe.c:4397 msgid "Init options" msgstr "Initieringsalternativ" -#: describe.c:4313 +#: describe.c:4419 msgid "List of text search dictionaries" msgstr "Lista med textsökordlistor" -#: describe.c:4356 +#: describe.c:4462 msgid "Init" msgstr "Init" -#: describe.c:4357 +#: describe.c:4463 msgid "Lexize" msgstr "Symboluppdelning" -#: describe.c:4384 +#: describe.c:4490 msgid "List of text search templates" msgstr "Lista med textsökmallar" -#: describe.c:4444 +#: describe.c:4550 msgid "List of text search configurations" msgstr "Lista med textsökkonfigurationer" -#: describe.c:4490 +#: describe.c:4596 #, c-format msgid "Did not find any text search configuration named \"%s\".\n" msgstr "Kunde inte hitta en textsökkonfiguration med namn \"%s\".\n" -#: describe.c:4493 +#: describe.c:4599 #, c-format msgid "Did not find any text search configurations.\n" msgstr "Kunde inte hitta några textsökkonfigurationer.\n" -#: describe.c:4559 +#: describe.c:4665 msgid "Token" msgstr "Symbol" -#: describe.c:4560 +#: describe.c:4666 msgid "Dictionaries" msgstr "Ordlistor" -#: describe.c:4571 +#: describe.c:4677 #, c-format msgid "Text search configuration \"%s.%s\"" msgstr "Textsökkonfiguration \"%s.%s\"" -#: describe.c:4574 +#: describe.c:4680 #, c-format msgid "Text search configuration \"%s\"" msgstr "Textsökkonfiguration \"%s\"" -#: describe.c:4578 +#: describe.c:4684 #, c-format msgid "" "\n" @@ -1899,7 +1938,7 @@ msgstr "" "\n" "Parser: \"%s.%s\"" -#: describe.c:4581 +#: describe.c:4687 #, c-format msgid "" "\n" @@ -1908,148 +1947,152 @@ msgstr "" "\n" "Parser: \"%s\"" -#: describe.c:4615 +#: describe.c:4721 #, c-format msgid "The server (version %s) does not support foreign-data wrappers.\n" msgstr "Servern (version %s) stöder inte främmande data-omvandlare.\n" -#: describe.c:4673 +#: describe.c:4779 msgid "List of foreign-data wrappers" msgstr "Lista med främmande data-omvandlare" -#: describe.c:4698 +#: describe.c:4804 #, c-format msgid "The server (version %s) does not support foreign servers.\n" msgstr "Servern (version %s) stöder inte främmande servrar.\n" -#: describe.c:4711 +#: describe.c:4817 msgid "Foreign-data wrapper" msgstr "Främmande data-omvandlare" -#: describe.c:4729 describe.c:4934 +#: describe.c:4835 describe.c:5040 msgid "Version" msgstr "Version" -#: describe.c:4755 +#: describe.c:4861 msgid "List of foreign servers" msgstr "Lista med främmande servrar" -#: describe.c:4780 +#: describe.c:4886 #, c-format msgid "The server (version %s) does not support user mappings.\n" msgstr "Servern (version %s) stöder inte användarmappningar.\n" -#: describe.c:4790 describe.c:4854 +#: describe.c:4896 describe.c:4960 msgid "Server" msgstr "Server" -#: describe.c:4791 +#: describe.c:4897 msgid "User name" msgstr "Användarnamn" -#: describe.c:4816 +#: describe.c:4922 msgid "List of user mappings" msgstr "Lista av användarmappningar" -#: describe.c:4841 +#: describe.c:4947 #, c-format msgid "The server (version %s) does not support foreign tables.\n" msgstr "Servern (version %s) stöder inte främmande tabeller.\n" -#: describe.c:4894 +#: describe.c:5000 msgid "List of foreign tables" msgstr "Lista med främmande tabeller" -#: describe.c:4919 describe.c:4976 +#: describe.c:5025 describe.c:5082 #, c-format msgid "The server (version %s) does not support extensions.\n" msgstr "Servern (version %s) stöder inte utökningar.\n" -#: describe.c:4951 +#: describe.c:5057 msgid "List of installed extensions" msgstr "Lista med installerade utökningar" -#: describe.c:5004 +#: describe.c:5110 #, c-format msgid "Did not find any extension named \"%s\".\n" msgstr "Kunde inte hitta en utökning med namn \"%s\".\n" -#: describe.c:5007 +#: describe.c:5113 #, c-format msgid "Did not find any extensions.\n" msgstr "Kunde inte hitta några utökningar.\n" -#: describe.c:5051 +#: describe.c:5157 msgid "Object description" msgstr "Objektbeskrivning" -#: describe.c:5061 +#: describe.c:5167 #, c-format msgid "Objects in extension \"%s\"" msgstr "Objekt i utökning \"%s\"" -#: describe.c:5090 describe.c:5156 +#: describe.c:5196 describe.c:5267 #, c-format msgid "The server (version %s) does not support publications.\n" msgstr "Servern (version %s) stöder inte publiceringar.\n" -#: describe.c:5107 describe.c:5219 +#: describe.c:5213 describe.c:5339 msgid "All tables" msgstr "Alla tabeller" -#: describe.c:5108 describe.c:5220 +#: describe.c:5214 describe.c:5340 msgid "Inserts" msgstr "Insättningar" -#: describe.c:5109 describe.c:5221 +#: describe.c:5215 describe.c:5341 msgid "Updates" msgstr "Uppdateringar" -#: describe.c:5110 describe.c:5222 +#: describe.c:5216 describe.c:5342 msgid "Deletes" msgstr "Borttagningar" -#: describe.c:5127 +#: describe.c:5220 describe.c:5344 +msgid "Truncates" +msgstr "Trunkerar" + +#: describe.c:5237 msgid "List of publications" msgstr "Lista med publiceringar" -#: describe.c:5188 +#: describe.c:5305 #, c-format msgid "Did not find any publication named \"%s\".\n" msgstr "Kunde inte hitta någon publicering med namn \"%s\".\n" -#: describe.c:5191 +#: describe.c:5308 #, c-format msgid "Did not find any publications.\n" msgstr "Kunde inte hitta några publiceringar.\n" -#: describe.c:5215 +#: describe.c:5335 #, c-format msgid "Publication %s" msgstr "Publicering %s" -#: describe.c:5255 +#: describe.c:5379 msgid "Tables:" msgstr "Tabeller:" -#: describe.c:5299 +#: describe.c:5423 #, c-format msgid "The server (version %s) does not support subscriptions.\n" msgstr "Denna server (version %s) stöder inte prenumerationer.\n" -#: describe.c:5315 +#: describe.c:5439 msgid "Publication" msgstr "Publicering" -#: describe.c:5322 +#: describe.c:5446 msgid "Synchronous commit" msgstr "Synkron commit" -#: describe.c:5323 +#: describe.c:5447 msgid "Conninfo" msgstr "Förbindelseinfo" -#: describe.c:5345 +#: describe.c:5469 msgid "List of subscriptions" msgstr "Lista med prenumerationer" @@ -2067,7 +2110,7 @@ msgstr "" "psql är den interaktiva PostgreSQL-terminalen.\n" "\n" -#: help.c:74 help.c:344 help.c:378 help.c:405 +#: help.c:74 help.c:345 help.c:419 help.c:462 #, c-format msgid "Usage:\n" msgstr "Användning:\n" @@ -2372,405 +2415,410 @@ msgstr " \\g [FILNAMN] eller ; kör frågan (och skriv resultatet till fil el #: help.c:177 #, c-format +msgid " \\gdesc describe result of query, without executing it\n" +msgstr " \\gdesc beskriv resultatet av fråga utan att köra den\n" + +#: help.c:178 +#, c-format msgid " \\gexec execute query, then execute each value in its result\n" msgstr " \\gexec kör fråga, kör sen varje värde i resultatet\n" -#: help.c:178 +#: help.c:179 #, c-format msgid " \\gset [PREFIX] execute query and store results in psql variables\n" msgstr " \\gset [PREFIX] kör frågan och spara resultatet i psql-variabler\n" -#: help.c:179 +#: help.c:180 #, c-format msgid " \\gx [FILE] as \\g, but forces expanded output mode\n" msgstr " \\gx [FIL] som \\g, men tvinga expanderat utmatningsläge\n" -#: help.c:180 +#: help.c:181 #, c-format msgid " \\q quit psql\n" msgstr " \\q avsluta psql\n" -#: help.c:181 +#: help.c:182 #, c-format msgid " \\watch [SEC] execute query every SEC seconds\n" msgstr " \\watch [SEK] kör fråga var SEK sekund\n" -#: help.c:184 +#: help.c:185 #, c-format msgid "Help\n" msgstr "Hjälp\n" -#: help.c:186 +#: help.c:187 #, c-format msgid " \\? [commands] show help on backslash commands\n" msgstr " \\? [kommandon] visa hjälp om backstreckkommandon\n" -#: help.c:187 +#: help.c:188 #, c-format msgid " \\? options show help on psql command-line options\n" msgstr " \\? options visa hjälp för psqls kommandoradflaggor\n" -#: help.c:188 +#: help.c:189 #, c-format msgid " \\? variables show help on special variables\n" msgstr " \\? variables visa hjälp om speciella variabler\n" -#: help.c:189 +#: help.c:190 #, c-format msgid " \\h [NAME] help on syntax of SQL commands, * for all commands\n" msgstr " \\h [NAMN] hjälp med syntaxen för SQL-kommandon, * för alla kommandon\n" -#: help.c:192 +#: help.c:193 #, c-format msgid "Query Buffer\n" msgstr "Frågebuffert\n" -#: help.c:193 +#: help.c:194 #, c-format msgid " \\e [FILE] [LINE] edit the query buffer (or file) with external editor\n" msgstr " \\e [FIL] [RAD] redigera frågebufferten (eller filen) med extern redigerare\n" -#: help.c:194 +#: help.c:195 #, c-format msgid " \\ef [FUNCNAME [LINE]] edit function definition with external editor\n" msgstr " \\ef [FUNKNAMN [RAD]] redigera funktionsdefinition med extern redigerare\n" -#: help.c:195 +#: help.c:196 #, c-format msgid " \\ev [VIEWNAME [LINE]] edit view definition with external editor\n" msgstr " \\ev [FUNKNAMN [RAD]] redigera vydefinition med extern redigerare\n" -#: help.c:196 +#: help.c:197 #, c-format msgid " \\p show the contents of the query buffer\n" msgstr " \\p visa innehållet i frågebufferten\n" -#: help.c:197 +#: help.c:198 #, c-format msgid " \\r reset (clear) the query buffer\n" msgstr " \\r nollställ (radera) frågebufferten\n" -#: help.c:199 +#: help.c:200 #, c-format msgid " \\s [FILE] display history or save it to file\n" msgstr " \\s [FILNAMN] visa kommandohistorien eller spara den i fil\n" -#: help.c:201 +#: help.c:202 #, c-format msgid " \\w FILE write query buffer to file\n" msgstr " \\w FILNAMN skriv frågebuffert till fil\n" -#: help.c:204 +#: help.c:205 #, c-format msgid "Input/Output\n" msgstr "In-/Utmatning\n" -#: help.c:205 +#: help.c:206 #, c-format msgid " \\copy ... perform SQL COPY with data stream to the client host\n" msgstr " \\copy ... utför SQL COPY med dataström till klientvärden\n" -#: help.c:206 +#: help.c:207 #, c-format msgid " \\echo [STRING] write string to standard output\n" msgstr " \\echo [TEXT] skriv text till standard ut\n" -#: help.c:207 +#: help.c:208 #, c-format msgid " \\i FILE execute commands from file\n" msgstr " \\i FILNAMN kör kommandon från fil\n" -#: help.c:208 +#: help.c:209 #, c-format msgid " \\ir FILE as \\i, but relative to location of current script\n" msgstr " \\ir FIL som \\i, men relativt platsen för aktuellt script\n" -#: help.c:209 +#: help.c:210 #, c-format msgid " \\o [FILE] send all query results to file or |pipe\n" msgstr " \\o [FIL] skicka frågeresultat till fil eller |rör\n" -#: help.c:210 +#: help.c:211 #, c-format msgid " \\qecho [STRING] write string to query output stream (see \\o)\n" msgstr " \\qecho [TEXT] skriv text till frågeutdataströmmen (se \\o)\n" -#: help.c:213 +#: help.c:214 #, c-format msgid "Conditional\n" msgstr "Villkor\n" -#: help.c:214 +#: help.c:215 #, c-format msgid " \\if EXPR begin conditional block\n" msgstr " \\if EXPR starta villkorsblock\n" -#: help.c:215 +#: help.c:216 #, c-format msgid " \\elif EXPR alternative within current conditional block\n" msgstr " \\elif EXPR alternativ inom aktuellt villkorsblock\n" -#: help.c:216 +#: help.c:217 #, c-format msgid " \\else final alternative within current conditional block\n" msgstr " \\else avslutningsalternativ inom aktuellt villkorsblock\n" -#: help.c:217 +#: help.c:218 #, c-format msgid " \\endif end conditional block\n" msgstr " \\endif avsluta villkorsblock\n" -#: help.c:220 +#: help.c:221 #, c-format msgid "Informational\n" msgstr "Informationer\n" -#: help.c:221 +#: help.c:222 #, c-format msgid " (options: S = show system objects, + = additional detail)\n" msgstr " (flaggor: S = lista systemobjekt, + = mer detaljer)\n" -#: help.c:222 +#: help.c:223 #, c-format msgid " \\d[S+] list tables, views, and sequences\n" msgstr " \\d[S+] lista tabeller, vyer och sekvenser\n" -#: help.c:223 +#: help.c:224 #, c-format msgid " \\d[S+] NAME describe table, view, sequence, or index\n" msgstr " \\d[S+] NAMN beskriv tabell, vy, sekvens eller index\n" -#: help.c:224 +#: help.c:225 #, c-format msgid " \\da[S] [PATTERN] list aggregates\n" msgstr " \\da[S] [MALL] lista aggregatfunktioner\n" -#: help.c:225 +#: help.c:226 #, c-format msgid " \\dA[+] [PATTERN] list access methods\n" msgstr " \\dA[+] [MALL] lista accessmetoder\n" -#: help.c:226 +#: help.c:227 #, c-format msgid " \\db[+] [PATTERN] list tablespaces\n" msgstr " \\db[+] [MALL] lista tabellutrymmen\n" -#: help.c:227 +#: help.c:228 #, c-format msgid " \\dc[S+] [PATTERN] list conversions\n" msgstr " \\dc[S+] [MALL] lista konverteringar\n" -#: help.c:228 +#: help.c:229 #, c-format msgid " \\dC[+] [PATTERN] list casts\n" msgstr " \\dC[+] [MALL] lista typomvandlingar\n" -#: help.c:229 +#: help.c:230 #, c-format msgid " \\dd[S] [PATTERN] show object descriptions not displayed elsewhere\n" msgstr " \\dd[S] [MALL] visa objektbeskrivning som inte visas på andra ställen\n" -#: help.c:230 +#: help.c:231 #, c-format msgid " \\dD[S+] [PATTERN] list domains\n" msgstr " \\dD[S+] [MALL] lista domäner\n" -#: help.c:231 +#: help.c:232 #, c-format msgid " \\ddp [PATTERN] list default privileges\n" msgstr " \\ddp [MALL] lista standardrättigheter\n" -#: help.c:232 +#: help.c:233 #, c-format msgid " \\dE[S+] [PATTERN] list foreign tables\n" msgstr " \\dE[S+] [MALL] lista främmande tabeller\n" -#: help.c:233 +#: help.c:234 #, c-format msgid " \\det[+] [PATTERN] list foreign tables\n" msgstr " \\det[+] [MALL] lista främmande tabeller\n" -#: help.c:234 +#: help.c:235 #, c-format msgid " \\des[+] [PATTERN] list foreign servers\n" msgstr " \\des[+] [MALL] lista främmande servrar\n" -#: help.c:235 +#: help.c:236 #, c-format msgid " \\deu[+] [PATTERN] list user mappings\n" msgstr " \\deu[+] [MALL] lista användarmappning\n" -#: help.c:236 +#: help.c:237 #, c-format msgid " \\dew[+] [PATTERN] list foreign-data wrappers\n" msgstr " \\dew[+] [MALL] lista främmande data-omvandlare\n" -#: help.c:237 +#: help.c:238 #, c-format msgid " \\df[antw][S+] [PATRN] list [only agg/normal/trigger/window] functions\n" msgstr " \\df[antw][S+] [MALL] lista [endast agg/normala/utlösar/window] funktioner\n" -#: help.c:238 +#: help.c:239 #, c-format msgid " \\dF[+] [PATTERN] list text search configurations\n" msgstr " \\dF[+] [MALL] lista textsökkonfigurationer\n" -#: help.c:239 +#: help.c:240 #, c-format msgid " \\dFd[+] [PATTERN] list text search dictionaries\n" msgstr " \\dFd[+] [MALL] lista textsökordlistor\n" -#: help.c:240 +#: help.c:241 #, c-format msgid " \\dFp[+] [PATTERN] list text search parsers\n" msgstr " \\dFp[+] [MALL] lista textsökparsrar\n" -#: help.c:241 +#: help.c:242 #, c-format msgid " \\dFt[+] [PATTERN] list text search templates\n" msgstr " \\dFt[+] [MALL] lista textsökmallar\n" -#: help.c:242 +#: help.c:243 #, c-format msgid " \\dg[S+] [PATTERN] list roles\n" msgstr " \\dg[S+] [MALL] lista roller\n" -#: help.c:243 +#: help.c:244 #, c-format msgid " \\di[S+] [PATTERN] list indexes\n" msgstr " \\di[S+] [MALL] lista index\n" -#: help.c:244 +#: help.c:245 #, c-format msgid " \\dl list large objects, same as \\lo_list\n" msgstr " \\dl lista stora objekt, samma som \\lo_list\n" -#: help.c:245 +#: help.c:246 #, c-format msgid " \\dL[S+] [PATTERN] list procedural languages\n" msgstr " \\dL[S+] [MALL] lista procedurspråk\n" -#: help.c:246 +#: help.c:247 #, c-format msgid " \\dm[S+] [PATTERN] list materialized views\n" msgstr " \\dm[S+] [MALL] lista materialiserade vyer\n" -#: help.c:247 +#: help.c:248 #, c-format msgid " \\dn[S+] [PATTERN] list schemas\n" msgstr " \\dn[S+] [MALL] lista scheman\n" -#: help.c:248 +#: help.c:249 #, c-format msgid " \\do[S] [PATTERN] list operators\n" msgstr " \\do[S] [MALL] lista operatorer\n" -#: help.c:249 +#: help.c:250 #, c-format msgid " \\dO[S+] [PATTERN] list collations\n" -msgstr " \\dO[S+] [MALL] lista sorteringar (collation)\n" +msgstr " \\dO[S+] [MALL] lista jämförelser (collation)\n" -#: help.c:250 +#: help.c:251 #, c-format msgid " \\dp [PATTERN] list table, view, and sequence access privileges\n" msgstr " \\dp [MALL] lista åtkomsträttigheter för tabeller, vyer och sekvenser\n" -#: help.c:251 +#: help.c:252 #, c-format msgid " \\drds [PATRN1 [PATRN2]] list per-database role settings\n" msgstr " \\drds [MALL1 [MALL2]] lista rollinställningar per databas\n" -#: help.c:252 +#: help.c:253 #, c-format msgid " \\dRp[+] [PATTERN] list replication publications\n" msgstr " \\dRp[+] [MALL] lista replikeringspubliceringar\n" -#: help.c:253 +#: help.c:254 #, c-format msgid " \\dRs[+] [PATTERN] list replication subscriptions\n" msgstr " \\dRs[+] [MALL] lista replikeringsprenumerationer\n" -#: help.c:254 +#: help.c:255 #, c-format msgid " \\ds[S+] [PATTERN] list sequences\n" msgstr " \\ds[S+] [MALL] lista sekvenser\n" -#: help.c:255 +#: help.c:256 #, c-format msgid " \\dt[S+] [PATTERN] list tables\n" msgstr " \\dt[S+] [MALL] lista tabeller\n" -#: help.c:256 +#: help.c:257 #, c-format msgid " \\dT[S+] [PATTERN] list data types\n" msgstr " \\dT[S+] [MALL] lista datatyper\n" -#: help.c:257 +#: help.c:258 #, c-format msgid " \\du[S+] [PATTERN] list roles\n" msgstr " \\du[S+] [MALL] lista roller\n" -#: help.c:258 +#: help.c:259 #, c-format msgid " \\dv[S+] [PATTERN] list views\n" msgstr " \\dv[S+] [MALL] lista vyer\n" -#: help.c:259 +#: help.c:260 #, c-format msgid " \\dx[+] [PATTERN] list extensions\n" msgstr " \\dx[+] [MALL] lista utökningar\n" -#: help.c:260 +#: help.c:261 #, c-format msgid " \\dy [PATTERN] list event triggers\n" msgstr " \\dy [MALL] lista händelseutlösare\n" -#: help.c:261 +#: help.c:262 #, c-format msgid " \\l[+] [PATTERN] list databases\n" msgstr " \\l[+] [MALL] lista databaser\n" -#: help.c:262 +#: help.c:263 #, c-format msgid " \\sf[+] FUNCNAME show a function's definition\n" msgstr " \\sf[+] FUNKNAMN visa en funktions definition\n" -#: help.c:263 +#: help.c:264 #, c-format msgid " \\sv[+] VIEWNAME show a view's definition\n" msgstr " \\sv[+] VYNAMN visa en vys definition\n" -#: help.c:264 +#: help.c:265 #, c-format msgid " \\z [PATTERN] same as \\dp\n" msgstr " \\z [MALL] samma som \\dp\n" -#: help.c:267 +#: help.c:268 #, c-format msgid "Formatting\n" msgstr "Formatering\n" -#: help.c:268 +#: help.c:269 #, c-format msgid " \\a toggle between unaligned and aligned output mode\n" msgstr " \\a byt mellan ojusterat och justerat utdataformat\n" -#: help.c:269 +#: help.c:270 #, c-format msgid " \\C [STRING] set table title, or unset if none\n" msgstr " \\C [TEXT] sätt tabelltitel, eller nollställ\n" -#: help.c:270 +#: help.c:271 #, c-format msgid " \\f [STRING] show or set field separator for unaligned query output\n" msgstr " \\f [TEXT] visa eller sätt fältseparatorn för ojusterad utmatning\n" -#: help.c:271 +#: help.c:272 #, c-format msgid " \\H toggle HTML output mode (currently %s)\n" msgstr " \\H slå på/av HTML-utskriftsläge (för närvarande: %s)\n" -#: help.c:273 +#: help.c:274 #, c-format msgid "" " \\pset [NAME [VALUE]] set table output option\n" @@ -2787,27 +2835,27 @@ msgstr "" " tuples_only|unicode_border_linestyle|\n" " unicode_column_linestyle|unicode_header_linestyle})\n" -#: help.c:279 +#: help.c:280 #, c-format msgid " \\t [on|off] show only rows (currently %s)\n" msgstr " \\t [on|off] visa endast rader (för närvarande: %s)\n" -#: help.c:281 +#: help.c:282 #, c-format msgid " \\T [STRING] set HTML
tag attributes, or unset if none\n" msgstr " \\T [TEXT] sätt HTML-tabellens
-attribut, eller nollställ\n" -#: help.c:282 +#: help.c:283 #, c-format msgid " \\x [on|off|auto] toggle expanded output (currently %s)\n" msgstr " \\x [on|off|auto] slå på/av utökad utskrift (för närvarande: %s)\n" -#: help.c:286 +#: help.c:287 #, c-format msgid "Connection\n" msgstr "Förbindelse\n" -#: help.c:288 +#: help.c:289 #, c-format msgid "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" @@ -2816,7 +2864,7 @@ msgstr "" " \\c[onnect] {[DBNAMN|- ANVÄNDARE|- VÄRD|- PORT|-] | conninfo}\n" " koppla upp mot ny databas (för närvarande \"%s\")\n" -#: help.c:292 +#: help.c:293 #, c-format msgid "" " \\c[onnect] {[DBNAME|- USER|- HOST|- PORT|-] | conninfo}\n" @@ -2825,72 +2873,72 @@ msgstr "" " \\c[onnect] {[DBNAMN|- ANVÄNDARE|- VÄRD|- PORT|-] | conninfo}\n" " koppla upp mot ny databas (för närvarande ingen uppkoppling)\n" -#: help.c:294 +#: help.c:295 #, c-format msgid " \\conninfo display information about current connection\n" msgstr " \\conninfo visa information om aktuell uppkoppling\n" -#: help.c:295 +#: help.c:296 #, c-format msgid " \\encoding [ENCODING] show or set client encoding\n" msgstr " \\encoding [KODNING] visa eller sätt klientens teckenkodning\n" -#: help.c:296 +#: help.c:297 #, c-format msgid " \\password [USERNAME] securely change the password for a user\n" msgstr " \\password [ANVÄNDARNAMN] byt användares lösenord på ett säkert sätt\n" -#: help.c:299 +#: help.c:300 #, c-format msgid "Operating System\n" msgstr "Operativsystem\n" -#: help.c:300 +#: help.c:301 #, c-format msgid " \\cd [DIR] change the current working directory\n" msgstr " \\cd [KATALOG] byt den aktuella katalogen\n" -#: help.c:301 +#: help.c:302 #, c-format msgid " \\setenv NAME [VALUE] set or unset environment variable\n" msgstr " \\setenv NAMN [VÄRDE] sätt eller nollställ omgivningsvariabel\n" -#: help.c:302 +#: help.c:303 #, c-format msgid " \\timing [on|off] toggle timing of commands (currently %s)\n" msgstr " \\timing [on|off] slå på/av tidstagning av kommandon (för närvarande: %s)\n" -#: help.c:304 +#: help.c:305 #, c-format msgid " \\! [COMMAND] execute command in shell or start interactive shell\n" msgstr " \\! [KOMMANDO] kör kommando i skal eller starta interaktivt skal\n" -#: help.c:307 +#: help.c:308 #, c-format msgid "Variables\n" msgstr "Variabler\n" -#: help.c:308 +#: help.c:309 #, c-format msgid " \\prompt [TEXT] NAME prompt user to set internal variable\n" msgstr " \\prompt [TEXT] NAMN be användaren att sätta en intern variabel\n" -#: help.c:309 +#: help.c:310 #, c-format msgid " \\set [NAME [VALUE]] set internal variable, or list all if no parameters\n" msgstr " \\set [NAMN [VÄRDE]] sätt intern variabel, eller lista alla om ingen param\n" -#: help.c:310 +#: help.c:311 #, c-format msgid " \\unset NAME unset (delete) internal variable\n" msgstr " \\unset NAME ta bort intern variabel\n" -#: help.c:313 +#: help.c:314 #, c-format msgid "Large Objects\n" msgstr "Stora objekt\n" -#: help.c:314 +#: help.c:315 #, c-format msgid "" " \\lo_export LOBOID FILE\n" @@ -2903,19 +2951,19 @@ msgstr "" " \\lo_list\n" " \\lo_unlink LOBOID operationer på stora objekt\n" -#: help.c:341 +#: help.c:342 #, c-format msgid "" "List of specially treated variables\n" "\n" msgstr "Lista av variabler som hanteras speciellt\n" -#: help.c:343 +#: help.c:344 #, c-format msgid "psql variables:\n" msgstr "psql-variabler:\n" -#: help.c:345 +#: help.c:346 #, c-format msgid "" " psql --set=NAME=VALUE\n" @@ -2926,148 +2974,300 @@ msgstr "" " eller \\set NAMN VÄRDE inne i psql\n" "\n" -#: help.c:347 -#, c-format -msgid " AUTOCOMMIT if set, successful SQL commands are automatically committed\n" -msgstr " AUTOCOMMIT om satt, efterföljande SQL-kommandon commit:as automatiskt\n" - #: help.c:348 #, c-format msgid "" -" COMP_KEYWORD_CASE determines the case used to complete SQL key words\n" -" [lower, upper, preserve-lower, preserve-upper]\n" +" AUTOCOMMIT\n" +" if set, successful SQL commands are automatically committed\n" msgstr "" -" COMP_KEYWORD_CASE bestämmer skiftläge för att komplettera SQL-nyckelord\n" -" [lower, upper, preserve-lower, preserve-upper]\n" +" AUTOCOMMIT\n" +" om satt så kommer efterföljande SQL-kommandon commit:as automatiskt\n" #: help.c:350 #, c-format -msgid " DBNAME the currently connected database name\n" -msgstr " DBNAME den uppkopplade databasens namn\n" - -#: help.c:351 -#, c-format msgid "" -" ECHO controls what input is written to standard output\n" -" [all, errors, none, queries]\n" +" COMP_KEYWORD_CASE\n" +" determines the case used to complete SQL key words\n" +" [lower, upper, preserve-lower, preserve-upper]\n" msgstr "" -" ECHO bestämmer vilken indata som skrivs till standard ut\n" -" [all, errors, none, queries]\n" +" COMP_KEYWORD_CASE\n" +" bestämmer skiftläge för att komplettera SQL-nyckelord\n" +" [lower, upper, preserve-lower, preserve-upper]\n" #: help.c:353 #, c-format msgid "" -" ECHO_HIDDEN if set, display internal queries executed by backslash commands;\n" -" if set to \"noexec\", just show without execution\n" +" DBNAME\n" +" the currently connected database name\n" msgstr "" -" ECHO_HIDDEN om satt, visa interna frågor som körs av backåtstreckkommandon:\n" -" om satt till \"noexec\", bara visa dem utan att köra\n" +" DBNAME\n" +" den uppkopplade databasens namn\n" #: help.c:355 #, c-format -msgid " ENCODING current client character set encoding\n" -msgstr " ENCODING aktuell teckenkodning för klient\n" +msgid "" +" ECHO\n" +" controls what input is written to standard output\n" +" [all, errors, none, queries]\n" +msgstr "" +" ECHO\n" +" bestämmer vilken indata som skrivs till standard ut\n" +" [all, errors, none, queries]\n" -#: help.c:356 +#: help.c:358 #, c-format msgid "" -" FETCH_COUNT the number of result rows to fetch and display at a time\n" -" (default: 0=unlimited)\n" +" ECHO_HIDDEN\n" +" if set, display internal queries executed by backslash commands;\n" +" if set to \"noexec\", just show them without execution\n" msgstr "" -" FETCH_COUNT antal resultatrader som hämtas och visas åt gången\n" -" (standard: 0=obegränsat)\n" +" ECHO_HIDDEN\n" +" om satt, visa interna frågor som körs av backåtstreckkommandon:\n" +" om satt till \"noexec\", bara visa dem utan att köra\n" -#: help.c:358 +#: help.c:361 #, c-format -msgid " HISTCONTROL controls command history [ignorespace, ignoredups, ignoreboth]\n" -msgstr " HISTCONTROL styr kommandohistoriken [ignorespace, ignoredups, ignoreboth]\n" +msgid "" +" ENCODING\n" +" current client character set encoding\n" +msgstr "" +" ENCODING\n" +" aktuell teckenkodning för klient\n" -#: help.c:359 +#: help.c:363 #, c-format -msgid " HISTFILE file name used to store the command history\n" -msgstr " HISTFILE filnamn för att spara kommandohistoriken i\n" +msgid "" +" ERROR\n" +" true if last query failed, else false\n" +msgstr "" +" ERROR\n" +" sant om sista frågan misslyckades, falskt annars\n" -#: help.c:360 +#: help.c:365 #, c-format -msgid " HISTSIZE max number of commands to store in the command history\n" -msgstr " HISTSIZE max antal kommandon som sparas i kommandohistoriken\n" +msgid "" +" FETCH_COUNT\n" +" the number of result rows to fetch and display at a time (0 = unlimited)\n" +msgstr "" +" FETCH_COUNT\n" +" antal resultatrader som hämtas och visas åt gången (0=obegränsat)\n" -#: help.c:361 +#: help.c:367 #, c-format -msgid " HOST the currently connected database server host\n" -msgstr " HOST den uppkopplade databasens värd\n" +msgid "" +" HISTCONTROL\n" +" controls command history [ignorespace, ignoredups, ignoreboth]\n" +msgstr "" +" HISTCONTROL\n" +" styr kommandohistoriken [ignorespace, ignoredups, ignoreboth]\n" -#: help.c:362 +#: help.c:369 #, c-format -msgid " IGNOREEOF number of EOFs needed to terminate an interactive session\n" -msgstr " IGNOREEOF antal EOF som behövs för att avsluta en interaktiv session\n" +msgid "" +" HISTFILE\n" +" file name used to store the command history\n" +msgstr "" +" HISTFILE\n" +" filnamn för att spara kommandohistoriken i\n" -#: help.c:363 +#: help.c:371 #, c-format -msgid " LASTOID value of the last affected OID\n" -msgstr " LASTOID värdet av den senast påverkade OID\n" +msgid "" +" HISTSIZE\n" +" max number of commands to store in the command history\n" +msgstr "" +" HISTSIZE\n" +" max antal kommandon som sparas i kommandohistoriken\n" -#: help.c:364 +#: help.c:373 #, c-format -msgid " ON_ERROR_ROLLBACK if set, an error doesn't stop a transaction (uses implicit savepoints)\n" -msgstr " ON_ERROR_ROLLBACK om satt, ett fel stoppar inte en transaktion (använder implicita sparpunkter)\n" +msgid "" +" HOST\n" +" the currently connected database server host\n" +msgstr "" +" HOST\n" +" den uppkopplade databasens värd\n" -#: help.c:365 +#: help.c:375 +#, c-format +msgid "" +" IGNOREEOF\n" +" number of EOFs needed to terminate an interactive session\n" +msgstr "" +" IGNOREEOF\n" +" antal EOF som behövs för att avsluta en interaktiv session\n" + +#: help.c:377 #, c-format -msgid " ON_ERROR_STOP stop batch execution after error\n" -msgstr " ON_ERROR_STOP avsluta batchkörning vid fel\n" +msgid "" +" LASTOID\n" +" value of the last affected OID\n" +msgstr "" +" LASTOID\n" +" värdet av den senast påverkade OID:en\n" -#: help.c:366 +#: help.c:379 #, c-format -msgid " PORT server port of the current connection\n" -msgstr " PORT värdport för den aktuella uppkopplingen\n" +msgid "" +" LAST_ERROR_MESSAGE\n" +" LAST_ERROR_SQLSTATE\n" +" message and SQLSTATE of last error, or empty string and \"00000\" if none\n" +msgstr "" +" LAST_ERROR_MESSAGE\n" +" LAST_ERROR_SQLSTATE\n" +" meddelande och SQLSTATE för sista felet eller en tom sträng och \"00000\" om det inte varit fel\n" -#: help.c:367 +#: help.c:382 #, c-format -msgid " PROMPT1 specifies the standard psql prompt\n" -msgstr " PROMPT1 anger standardprompten för psql\n" +msgid "" +" ON_ERROR_ROLLBACK\n" +" if set, an error doesn't stop a transaction (uses implicit savepoints)\n" +msgstr "" +" ON_ERROR_ROLLBACK\n" +" om satt, ett fel stoppar inte en transaktion (använder implicita sparpunkter)\n" -#: help.c:368 +#: help.c:384 #, c-format -msgid " PROMPT2 specifies the prompt used when a statement continues from a previous line\n" -msgstr " PROMPT2 anger den prompt som används om en sats forsätter på efterföljande rad\n" +msgid "" +" ON_ERROR_STOP\n" +" stop batch execution after error\n" +msgstr "" +" ON_ERROR_STOP\n" +" avsluta batchkörning vid fel\n" -#: help.c:369 +#: help.c:386 #, c-format -msgid " PROMPT3 specifies the prompt used during COPY ... FROM STDIN\n" -msgstr " PROMPT3 anger den prompt som används för COPY ... FROM STDIN\n" +msgid "" +" PORT\n" +" server port of the current connection\n" +msgstr "" +" PORT\n" +" värdport för den aktuella uppkopplingen\n" -#: help.c:370 +#: help.c:388 #, c-format -msgid " QUIET run quietly (same as -q option)\n" -msgstr " QUIET kör tyst (samma som flaggan -q)\n" +msgid "" +" PROMPT1\n" +" specifies the standard psql prompt\n" +msgstr "" +" PROMPT1\n" +" anger standardprompten för psql\n" -#: help.c:371 +#: help.c:390 #, c-format -msgid " SHOW_CONTEXT controls display of message context fields [never, errors, always]\n" -msgstr " SHOW_CONTEXT styr visning av meddelandekontextfält [never, errors, always]\n" +msgid "" +" PROMPT2\n" +" specifies the prompt used when a statement continues from a previous line\n" +msgstr "" +" PROMPT2\n" +" anger den prompt som används om en sats forsätter på efterföljande rad\n" -#: help.c:372 +#: help.c:392 #, c-format -msgid " SINGLELINE end of line terminates SQL command mode (same as -S option)\n" -msgstr " SINGLELINE läge där slut på raden avslutar SQL-kommandon (samma som flaggan -S )\n" +msgid "" +" PROMPT3\n" +" specifies the prompt used during COPY ... FROM STDIN\n" +msgstr "" +" PROMPT3\n" +" anger den prompt som används för COPY ... FROM STDIN\n" -#: help.c:373 +#: help.c:394 +#, c-format +msgid "" +" QUIET\n" +" run quietly (same as -q option)\n" +msgstr "" +" QUIET\n" +" kör tyst (samma som flaggan -q)\n" + +#: help.c:396 +#, c-format +msgid "" +" ROW_COUNT\n" +" number of rows returned or affected by last query, or 0\n" +msgstr "" +" ROW_COUNT\n" +" antal rader som returnerades eller påverkades av senaste frågan alternativt 0\n" + +#: help.c:398 #, c-format -msgid " SINGLESTEP single-step mode (same as -s option)\n" -msgstr " SINGLESTEP stegningsläge (samma som flaggan -s)\n" +msgid "" +" SERVER_VERSION_NAME\n" +" SERVER_VERSION_NUM\n" +" server's version (in short string or numeric format)\n" +msgstr "" +" SERVER_VERSION_NUM\n" +" SERVER_VERSION_NAME\n" +" serverns version (i kort sträng eller numeriskt format)\n" -#: help.c:374 +#: help.c:401 #, c-format -msgid " USER the currently connected database user\n" -msgstr " USER den uppkopplade databasanvändaren\n" +msgid "" +" SHOW_CONTEXT\n" +" controls display of message context fields [never, errors, always]\n" +msgstr "" +" SHOW_CONTEXT\n" +" styr visning av meddelandekontextfält [never, errors, always]\n" -#: help.c:375 +#: help.c:403 #, c-format -msgid " VERBOSITY controls verbosity of error reports [default, verbose, terse]\n" -msgstr " VERBOSITY styr verbositet för felrapporter [default, verbose, terse]\n" +msgid "" +" SINGLELINE\n" +" if set, end of line terminates SQL commands (same as -S option)\n" +msgstr "" +" SINGLELINE\n" +" om satt, slut på raden avslutar SQL-kommandon (samma som flaggan -S )\n" -#: help.c:377 +#: help.c:405 +#, c-format +msgid "" +" SINGLESTEP\n" +" single-step mode (same as -s option)\n" +msgstr "" +" SINGLESTEP\n" +" stegningsläge (samma som flaggan -s)\n" + +#: help.c:407 +#, c-format +msgid "" +" SQLSTATE\n" +" SQLSTATE of last query, or \"00000\" if no error\n" +msgstr "" +" SQLSTATE\n" +" SQLSTATE för sista frågan eller \"00000\" om det inte varit fel\n" + +#: help.c:409 +#, c-format +msgid "" +" USER\n" +" the currently connected database user\n" +msgstr "" +" USER\n" +" den uppkopplade databasanvändaren\n" + +#: help.c:411 +#, c-format +msgid "" +" VERBOSITY\n" +" controls verbosity of error reports [default, verbose, terse]\n" +msgstr "" +" VERBOSITY\n" +" styr verbositet för felrapporter [default, verbose, terse]\n" + +#: help.c:413 +#, c-format +msgid "" +" VERSION\n" +" VERSION_NAME\n" +" VERSION_NUM\n" +" psql's version (in verbose string, short string, or numeric format)\n" +msgstr "" +" VERSION\n" +" VERSION_NAME\n" +" VERSION_NUM\n" +" psql:s version (i lång sträng, kort sträng eller numeriskt format)\n" + +#: help.c:418 #, c-format msgid "" "\n" @@ -3076,7 +3276,7 @@ msgstr "" "\n" "Visningsinställningar:\n" -#: help.c:379 +#: help.c:420 #, c-format msgid "" " psql --pset=NAME[=VALUE]\n" @@ -3087,108 +3287,166 @@ msgstr "" " eller \\pset NAMN [VÄRDE] inne i psql\n" "\n" -#: help.c:381 +#: help.c:422 #, c-format -msgid " border border style (number)\n" -msgstr " border ramstil (nummer)\n" +msgid "" +" border\n" +" border style (number)\n" +msgstr "" +" border\n" +" ramstil (nummer)\n" -#: help.c:382 +#: help.c:424 #, c-format -msgid " columns target width for the wrapped format\n" -msgstr " columns målvidd för wrappade format\n" +msgid "" +" columns\n" +" target width for the wrapped format\n" +msgstr "" +" columns\n" +" målvidd för wrappade format\n" -#: help.c:383 +#: help.c:426 #, c-format -msgid " expanded (or x) expanded output [on, off, auto]\n" -msgstr " expanded (eller x) expanderad utdata [on, off, auto]\n" +msgid "" +" expanded (or x)\n" +" expanded output [on, off, auto]\n" +msgstr "" +" expanded (eller x)\n" +" expanderad utdata [on, off, auto]\n" -#: help.c:384 +#: help.c:428 #, c-format -msgid " fieldsep field separator for unaligned output (default \"%s\")\n" -msgstr " fieldsep fältseparator för ej justeras utdata (standard \"%s\")\n" +msgid "" +" fieldsep\n" +" field separator for unaligned output (default \"%s\")\n" +msgstr "" +" fieldsep\n" +" fältseparator för ej justerad utdata (standard \"%s\")\n" -#: help.c:385 +#: help.c:431 #, c-format -msgid " fieldsep_zero set field separator for unaligned output to zero byte\n" -msgstr " fieldsep_zero sätt fältseparator för ej justerad utdata till noll-byte\n" +msgid "" +" fieldsep_zero\n" +" set field separator for unaligned output to a zero byte\n" +msgstr "" +" fieldsep_zero\n" +" sätt fältseparator för ej justerad utdata till noll-byte\n" -#: help.c:386 +#: help.c:433 #, c-format -msgid " footer enable or disable display of the table footer [on, off]\n" -msgstr " footer slå på/av visning av tabellfot [on, off]\n" +msgid "" +" footer\n" +" enable or disable display of the table footer [on, off]\n" +msgstr "" +" footer\n" +" slå på/av visning av tabellfot [on, off]\n" -#: help.c:387 +#: help.c:435 #, c-format -msgid " format set output format [unaligned, aligned, wrapped, html, asciidoc, ...]\n" -msgstr " format sätt utdataformat [unaligned, aligned, wrapped, html, asciidoc, ...]\n" +msgid "" +" format\n" +" set output format [unaligned, aligned, wrapped, html, asciidoc, ...]\n" +msgstr "" +" format\n" +" sätt utdataformat [unaligned, aligned, wrapped, html, asciidoc, ...]\n" -#: help.c:388 +#: help.c:437 #, c-format -msgid " linestyle set the border line drawing style [ascii, old-ascii, unicode]\n" -msgstr " linestyle sätt ramlinjestil [ascii, old-ascii, unicode]\n" +msgid "" +" linestyle\n" +" set the border line drawing style [ascii, old-ascii, unicode]\n" +msgstr "" +" linestyle\n" +" sätt ramlinjestil [ascii, old-ascii, unicode]\n" -#: help.c:389 +#: help.c:439 #, c-format -msgid " null set the string to be printed in place of a null value\n" -msgstr " null sätt sträng som visas istället för null-värden\n" +msgid "" +" null\n" +" set the string to be printed in place of a null value\n" +msgstr "" +" null\n" +" sätt sträng som visas istället för null-värden\n" -#: help.c:390 +#: help.c:441 #, c-format msgid "" -" numericlocale enable or disable display of a locale-specific character to separate\n" -" groups of digits [on, off]\n" +" numericlocale\n" +" enable display of a locale-specific character to separate groups of digits\n" msgstr "" -" numericlocale slå på/av visning av lokalspecifika tecken för gruppering\n" -" av siffror [on, off]\n" +" numericlocale\n" +" slå på visning av lokalspecifika tecken för gruppering av siffror\n" -#: help.c:392 +#: help.c:443 #, c-format -msgid " pager control when an external pager is used [yes, no, always]\n" -msgstr " pager styr när en extern pagenerare används [yes, no, always]\n" +msgid "" +" pager\n" +" control when an external pager is used [yes, no, always]\n" +msgstr "" +" pager\n" +" styr när en extern pagenerare används [yes, no, always]\n" -#: help.c:393 +#: help.c:445 #, c-format -msgid " recordsep record (line) separator for unaligned output\n" -msgstr " recordsep post (rad) separator för ej justerad utdata\n" +msgid "" +" recordsep\n" +" record (line) separator for unaligned output\n" +msgstr "" +" recordsep\n" +" post (rad) separator för ej justerad utdata\n" -#: help.c:394 +#: help.c:447 #, c-format -msgid " recordsep_zero set record separator for unaligned output to zero byte\n" -msgstr " recordsep_zero sätt postseparator för ej justerad utdata till noll-byte\n" +msgid "" +" recordsep_zero\n" +" set record separator for unaligned output to a zero byte\n" +msgstr "" +" recordsep_zero\n" +" sätt postseparator för ej justerad utdata till noll-byte\n" -#: help.c:395 +#: help.c:449 #, c-format msgid "" -" tableattr (or T) specify attributes for table tag in html format or proportional\n" -" column widths for left-aligned data types in latex-longtable format\n" +" tableattr (or T)\n" +" specify attributes for table tag in html format, or proportional\n" +" column widths for left-aligned data types in latex-longtable format\n" msgstr "" -" tableattr (el. T) ange attribut för tabelltaggen i html-format eller proportionella\n" -" kolumnvidder för vänsterjusterade datatypet i latex-longtable-format\n" +" tableattr (el. T)\n" +" ange attribut för tabelltaggen i html-format eller proportionella\n" +" kolumnvidder för vänsterjusterade datatypet i latex-longtable-format\n" -#: help.c:397 +#: help.c:452 #, c-format -msgid " title set the table title for any subsequently printed tables\n" -msgstr " title sätt tabelltitel för efterkommande tabellutskrifter\n" +msgid "" +" title\n" +" set the table title for subsequently printed tables\n" +msgstr "" +" title\n" +" sätt tabelltitel för efterkommande tabellutskrifter\n" -#: help.c:398 +#: help.c:454 #, c-format -msgid " tuples_only if set, only actual table data is shown\n" -msgstr " tuples_only om satt, bara tabelldatan visas\n" +msgid "" +" tuples_only\n" +" if set, only actual table data is shown\n" +msgstr "" +" tuples_only\n" +" om satt, bara tabelldatan visas\n" -#: help.c:399 +#: help.c:456 #, c-format msgid "" " unicode_border_linestyle\n" " unicode_column_linestyle\n" " unicode_header_linestyle\n" -" set the style of Unicode line drawing [single, double]\n" +" set the style of Unicode line drawing [single, double]\n" msgstr "" " unicode_border_linestyle\n" " unicode_column_linestyle\n" " unicode_header_linestyle\n" -" sätter stilen på Unicode-linjer [single, double]\n" +" sätter stilen på Unicode-linjer [single, double]\n" -#: help.c:404 +#: help.c:461 #, c-format msgid "" "\n" @@ -3197,7 +3455,7 @@ msgstr "" "\n" "Omgivningsvariabler:\n" -#: help.c:408 +#: help.c:465 #, c-format msgid "" " NAME=VALUE [NAME=VALUE] psql ...\n" @@ -3208,7 +3466,7 @@ msgstr "" " eller \\setenv NAMN [VÄRDE] inne psql\n" "\n" -#: help.c:410 +#: help.c:467 #, c-format msgid "" " set NAME=VALUE\n" @@ -3221,94 +3479,146 @@ msgstr "" " eller \\setenv NAMN [VÄRDE] inne i psql\n" "\n" -#: help.c:413 -#, c-format -msgid " COLUMNS number of columns for wrapped format\n" -msgstr " COLUMNS antal kolumner i wrappade format\n" - -#: help.c:414 +#: help.c:470 #, c-format -msgid " PAGER name of external pager program\n" -msgstr " PAGER namnet på den externa pageneraren\n" +msgid "" +" COLUMNS\n" +" number of columns for wrapped format\n" +msgstr "" +" COLUMNS\n" +" antal kolumner i wrappade format\n" -#: help.c:415 +#: help.c:472 #, c-format -msgid " PGAPPNAME same as the application_name connection parameter\n" -msgstr " PGAPPNAME samma som anslutningsparametern \"application_name\"\n" +msgid "" +" PGAPPNAME\n" +" same as the application_name connection parameter\n" +msgstr "" +" PGAPPNAME\n" +" samma som anslutningsparametern \"application_name\"\n" -#: help.c:416 +#: help.c:474 #, c-format -msgid " PGDATABASE same as the dbname connection parameter\n" -msgstr " PGDATABASE samma som anslutningsparametern \"dbname\"\n" +msgid "" +" PGDATABASE\n" +" same as the dbname connection parameter\n" +msgstr "" +" PGDATABASE\n" +" samma som anslutningsparametern \"dbname\"\n" -#: help.c:417 +#: help.c:476 #, c-format -msgid " PGHOST same as the host connection parameter\n" -msgstr " PGHOST samma som anslutningsparametern \"host\"\n" +msgid "" +" PGHOST\n" +" same as the host connection parameter\n" +msgstr "" +" PGHOST\n" +" samma som anslutningsparametern \"host\"\n" -#: help.c:418 +#: help.c:478 #, c-format -msgid " PGPASSWORD connection password (not recommended)\n" -msgstr " PGPASSWORD uppkoppingens lösenord (rekommenderas inte)\n" +msgid "" +" PGPASSWORD\n" +" connection password (not recommended)\n" +msgstr "" +" PGPASSWORD\n" +" uppkoppingens lösenord (rekommenderas inte)\n" -#: help.c:419 +#: help.c:480 #, c-format -msgid " PGPASSFILE password file name\n" -msgstr " PGPASSFILE lösenordsfilnamn\n" +msgid "" +" PGPASSFILE\n" +" password file name\n" +msgstr "" +" PGPASSFILE\n" +" lösenordsfilnamn\n" -#: help.c:420 +#: help.c:482 #, c-format -msgid " PGPORT same as the port connection parameter\n" -msgstr " PGPORT samma som anslutingsparametern \"port\"\n" +msgid "" +" PGPORT\n" +" same as the port connection parameter\n" +msgstr "" +" PGPORT\n" +" samma som anslutingsparametern \"port\"\n" -#: help.c:421 +#: help.c:484 #, c-format -msgid " PGUSER same as the user connection parameter\n" -msgstr " PGUSER samma som anslutningsparametern \"user\"\n" +msgid "" +" PGUSER\n" +" same as the user connection parameter\n" +msgstr "" +" PGUSER\n" +" samma som anslutningsparametern \"user\"\n" -#: help.c:422 +#: help.c:486 #, c-format msgid "" " PSQL_EDITOR, EDITOR, VISUAL\n" -" editor used by the \\e, \\ef, and \\ev commands\n" +" editor used by the \\e, \\ef, and \\ev commands\n" msgstr "" " PSQL_EDITOR, EDITOR, VISUAL\n" -" redigerare som används av kommanona \\e, \\ef och \\ev\n" +" redigerare som används av kommanona \\e, \\ef och \\ev\n" -#: help.c:424 +#: help.c:488 #, c-format msgid "" " PSQL_EDITOR_LINENUMBER_ARG\n" -" how to specify a line number when invoking the editor\n" +" how to specify a line number when invoking the editor\n" msgstr "" " PSQL_EDITOR_LINENUMBER_ARG\n" -" hur radnummer anges när redigerare startas\n" +" hur radnummer anges när redigerare startas\n" -#: help.c:426 +#: help.c:490 #, c-format -msgid " PSQL_HISTORY alternative location for the command history file\n" -msgstr " PSQL_HISTORY alternativ plats för kommandohistorikfilen\n" +msgid "" +" PSQL_HISTORY\n" +" alternative location for the command history file\n" +msgstr "" +" PSQL_HISTORY\n" +" alternativ plats för kommandohistorikfilen\n" -#: help.c:427 +#: help.c:492 #, c-format -msgid " PSQLRC alternative location for the user's .psqlrc file\n" -msgstr " PSQLRC alternativ plats för användarens \".psqlrc\"-fil\n" +msgid "" +" PSQL_PAGER, PAGER\n" +" name of external pager program\n" +msgstr "" +" PAGER\n" +" namnet på den externa pageneraren\n" -#: help.c:428 +#: help.c:494 +#, c-format +msgid "" +" PSQLRC\n" +" alternative location for the user's .psqlrc file\n" +msgstr "" +" PSQLRC\n" +" alternativ plats för användarens \".psqlrc\"-fil\n" + +#: help.c:496 #, c-format -msgid " SHELL shell used by the \\! command\n" -msgstr " SHELL skalet som används av kommandot \\!\n" +msgid "" +" SHELL\n" +" shell used by the \\! command\n" +msgstr "" +" SHELL\n" +" skalet som används av kommandot \\!\n" -#: help.c:429 +#: help.c:498 #, c-format -msgid " TMPDIR directory for temporary files\n" -msgstr " TMPDIR katalog för temporärfiler\n" +msgid "" +" TMPDIR\n" +" directory for temporary files\n" +msgstr "" +" TMPDIR\n" +" katalog för temporärfiler\n" -#: help.c:472 +#: help.c:542 msgid "Available help:\n" msgstr "Tillgänglig hjälp:\n" -#: help.c:556 +#: help.c:626 #, c-format msgid "" "Command: %s\n" @@ -3323,7 +3633,7 @@ msgstr "" "%s\n" "\n" -#: help.c:572 +#: help.c:642 #, c-format msgid "" "No help available for \"%s\".\n" @@ -3388,11 +3698,19 @@ msgstr "" "Indatan är en PostgreSQL-specifik dump.\n" "Använd kommandoradsprogrammet pg_restore för att läsa in denna dump till databasen.\n" -#: mainloop.c:225 +#: mainloop.c:282 +msgid "Use \\? for help or press control-C to clear the input buffer." +msgstr "Använd \\? för hjälp eller tryck control-C för att nollställa inmatningsbufferten." + +#: mainloop.c:284 +msgid "Use \\? for help." +msgstr "Använd \\? för hjälp." + +#: mainloop.c:288 msgid "You are using psql, the command-line interface to PostgreSQL." msgstr "Du använder psql, den interaktiva PostgreSQL-terminalen." -#: mainloop.c:226 +#: mainloop.c:289 #, c-format msgid "" "Type: \\copyright for distribution terms\n" @@ -3407,2185 +3725,2273 @@ msgstr "" " \\g eller avsluta med semikolon för att köra en fråga\n" " \\q för att avsluta\n" -#: mainloop.c:339 mainloop.c:476 +#: mainloop.c:313 +msgid "Use \\q to quit." +msgstr "Använd \\q för att avsluta." + +#: mainloop.c:316 mainloop.c:340 +msgid "Use control-D to quit." +msgstr "Använd control-D för att avsluta." + +#: mainloop.c:318 mainloop.c:342 +msgid "Use control-C to quit." +msgstr "Använd control-C för att avsluta." + +#: mainloop.c:449 mainloop.c:591 #, c-format msgid "query ignored; use \\endif or Ctrl-C to exit current \\if block\n" msgstr "fråga ignorerat; använd \\endif eller Ctrl-C för att avsluta aktuellt \\if-block\n" -#: mainloop.c:494 +#: mainloop.c:609 #, c-format msgid "reached EOF without finding closing \\endif(s)\n" msgstr "kom till EOF utan att hitta avslutande \\endif\n" -#: psqlscanslash.l:614 +#: psqlscanslash.l:637 #, c-format msgid "unterminated quoted string\n" msgstr "icketerminerad citerat sträng\n" -#: psqlscanslash.l:787 +#: psqlscanslash.l:810 #, c-format msgid "%s: out of memory\n" msgstr "%s: slut på minne\n" -#: sql_help.c:36 sql_help.c:39 sql_help.c:42 sql_help.c:66 sql_help.c:67 -#: sql_help.c:69 sql_help.c:71 sql_help.c:82 sql_help.c:84 sql_help.c:86 -#: sql_help.c:112 sql_help.c:118 sql_help.c:120 sql_help.c:122 sql_help.c:124 -#: sql_help.c:127 sql_help.c:129 sql_help.c:131 sql_help.c:236 sql_help.c:238 -#: sql_help.c:239 sql_help.c:241 sql_help.c:243 sql_help.c:246 sql_help.c:248 -#: sql_help.c:250 sql_help.c:252 sql_help.c:264 sql_help.c:265 sql_help.c:266 -#: sql_help.c:268 sql_help.c:315 sql_help.c:317 sql_help.c:319 sql_help.c:321 -#: sql_help.c:382 sql_help.c:387 sql_help.c:389 sql_help.c:432 sql_help.c:434 -#: sql_help.c:437 sql_help.c:439 sql_help.c:506 sql_help.c:511 sql_help.c:516 -#: sql_help.c:521 sql_help.c:526 sql_help.c:575 sql_help.c:577 sql_help.c:579 -#: sql_help.c:581 sql_help.c:584 sql_help.c:586 sql_help.c:597 sql_help.c:599 -#: sql_help.c:640 sql_help.c:642 sql_help.c:644 sql_help.c:647 sql_help.c:649 -#: sql_help.c:651 sql_help.c:684 sql_help.c:688 sql_help.c:692 sql_help.c:711 -#: sql_help.c:714 sql_help.c:717 sql_help.c:746 sql_help.c:758 sql_help.c:766 -#: sql_help.c:769 sql_help.c:772 sql_help.c:787 sql_help.c:790 sql_help.c:807 -#: sql_help.c:809 sql_help.c:811 sql_help.c:813 sql_help.c:816 sql_help.c:818 -#: sql_help.c:859 sql_help.c:882 sql_help.c:893 sql_help.c:895 sql_help.c:914 -#: sql_help.c:924 sql_help.c:926 sql_help.c:928 sql_help.c:940 sql_help.c:944 -#: sql_help.c:946 sql_help.c:957 sql_help.c:959 sql_help.c:961 sql_help.c:977 -#: sql_help.c:979 sql_help.c:983 sql_help.c:986 sql_help.c:987 sql_help.c:988 -#: sql_help.c:991 sql_help.c:993 sql_help.c:1084 sql_help.c:1086 -#: sql_help.c:1089 sql_help.c:1092 sql_help.c:1094 sql_help.c:1096 -#: sql_help.c:1099 sql_help.c:1102 sql_help.c:1168 sql_help.c:1170 -#: sql_help.c:1172 sql_help.c:1175 sql_help.c:1196 sql_help.c:1199 -#: sql_help.c:1202 sql_help.c:1205 sql_help.c:1209 sql_help.c:1211 -#: sql_help.c:1213 sql_help.c:1215 sql_help.c:1229 sql_help.c:1232 -#: sql_help.c:1234 sql_help.c:1236 sql_help.c:1246 sql_help.c:1248 -#: sql_help.c:1258 sql_help.c:1260 sql_help.c:1270 sql_help.c:1273 -#: sql_help.c:1295 sql_help.c:1297 sql_help.c:1299 sql_help.c:1302 -#: sql_help.c:1304 sql_help.c:1306 sql_help.c:1309 sql_help.c:1359 -#: sql_help.c:1397 sql_help.c:1400 sql_help.c:1402 sql_help.c:1404 -#: sql_help.c:1406 sql_help.c:1408 sql_help.c:1411 sql_help.c:1451 -#: sql_help.c:1662 sql_help.c:1726 sql_help.c:1745 sql_help.c:1758 -#: sql_help.c:1814 sql_help.c:1820 sql_help.c:1830 sql_help.c:1850 -#: sql_help.c:1875 sql_help.c:1893 sql_help.c:1922 sql_help.c:2015 -#: sql_help.c:2057 sql_help.c:2079 sql_help.c:2099 sql_help.c:2100 -#: sql_help.c:2135 sql_help.c:2155 sql_help.c:2177 sql_help.c:2191 -#: sql_help.c:2206 sql_help.c:2236 sql_help.c:2261 sql_help.c:2307 -#: sql_help.c:2573 sql_help.c:2586 sql_help.c:2603 sql_help.c:2619 -#: sql_help.c:2659 sql_help.c:2711 sql_help.c:2715 sql_help.c:2717 -#: sql_help.c:2723 sql_help.c:2741 sql_help.c:2768 sql_help.c:2803 -#: sql_help.c:2815 sql_help.c:2824 sql_help.c:2868 sql_help.c:2882 -#: sql_help.c:2910 sql_help.c:2918 sql_help.c:2926 sql_help.c:2934 -#: sql_help.c:2942 sql_help.c:2950 sql_help.c:2958 sql_help.c:2966 -#: sql_help.c:2975 sql_help.c:2986 sql_help.c:2994 sql_help.c:3002 -#: sql_help.c:3010 sql_help.c:3018 sql_help.c:3028 sql_help.c:3037 -#: sql_help.c:3046 sql_help.c:3054 sql_help.c:3063 sql_help.c:3071 -#: sql_help.c:3079 sql_help.c:3088 sql_help.c:3096 sql_help.c:3104 -#: sql_help.c:3112 sql_help.c:3120 sql_help.c:3128 sql_help.c:3136 -#: sql_help.c:3144 sql_help.c:3152 sql_help.c:3160 sql_help.c:3168 -#: sql_help.c:3185 sql_help.c:3194 sql_help.c:3202 sql_help.c:3219 -#: sql_help.c:3234 sql_help.c:3502 sql_help.c:3553 sql_help.c:3582 -#: sql_help.c:3590 sql_help.c:4013 sql_help.c:4061 sql_help.c:4202 +#: sql_help.c:35 sql_help.c:38 sql_help.c:41 sql_help.c:65 sql_help.c:66 +#: sql_help.c:68 sql_help.c:70 sql_help.c:81 sql_help.c:83 sql_help.c:85 +#: sql_help.c:111 sql_help.c:117 sql_help.c:119 sql_help.c:121 sql_help.c:123 +#: sql_help.c:126 sql_help.c:128 sql_help.c:130 sql_help.c:235 sql_help.c:237 +#: sql_help.c:238 sql_help.c:240 sql_help.c:242 sql_help.c:245 sql_help.c:247 +#: sql_help.c:249 sql_help.c:251 sql_help.c:263 sql_help.c:264 sql_help.c:265 +#: sql_help.c:267 sql_help.c:316 sql_help.c:318 sql_help.c:320 sql_help.c:322 +#: sql_help.c:391 sql_help.c:396 sql_help.c:398 sql_help.c:441 sql_help.c:443 +#: sql_help.c:446 sql_help.c:448 sql_help.c:515 sql_help.c:520 sql_help.c:525 +#: sql_help.c:530 sql_help.c:535 sql_help.c:587 sql_help.c:589 sql_help.c:591 +#: sql_help.c:593 sql_help.c:595 sql_help.c:598 sql_help.c:600 sql_help.c:603 +#: sql_help.c:614 sql_help.c:616 sql_help.c:657 sql_help.c:659 sql_help.c:661 +#: sql_help.c:664 sql_help.c:666 sql_help.c:668 sql_help.c:701 sql_help.c:705 +#: sql_help.c:709 sql_help.c:728 sql_help.c:731 sql_help.c:734 sql_help.c:763 +#: sql_help.c:775 sql_help.c:783 sql_help.c:786 sql_help.c:789 sql_help.c:804 +#: sql_help.c:807 sql_help.c:836 sql_help.c:841 sql_help.c:846 sql_help.c:851 +#: sql_help.c:856 sql_help.c:878 sql_help.c:880 sql_help.c:882 sql_help.c:884 +#: sql_help.c:887 sql_help.c:889 sql_help.c:930 sql_help.c:974 sql_help.c:979 +#: sql_help.c:984 sql_help.c:989 sql_help.c:994 sql_help.c:1013 sql_help.c:1024 +#: sql_help.c:1026 sql_help.c:1045 sql_help.c:1055 sql_help.c:1057 +#: sql_help.c:1059 sql_help.c:1071 sql_help.c:1075 sql_help.c:1077 +#: sql_help.c:1088 sql_help.c:1090 sql_help.c:1092 sql_help.c:1108 +#: sql_help.c:1110 sql_help.c:1114 sql_help.c:1117 sql_help.c:1118 +#: sql_help.c:1119 sql_help.c:1122 sql_help.c:1124 sql_help.c:1257 +#: sql_help.c:1259 sql_help.c:1262 sql_help.c:1265 sql_help.c:1267 +#: sql_help.c:1269 sql_help.c:1272 sql_help.c:1275 sql_help.c:1387 +#: sql_help.c:1389 sql_help.c:1391 sql_help.c:1394 sql_help.c:1415 +#: sql_help.c:1418 sql_help.c:1421 sql_help.c:1424 sql_help.c:1428 +#: sql_help.c:1430 sql_help.c:1432 sql_help.c:1434 sql_help.c:1448 +#: sql_help.c:1451 sql_help.c:1453 sql_help.c:1455 sql_help.c:1465 +#: sql_help.c:1467 sql_help.c:1477 sql_help.c:1479 sql_help.c:1489 +#: sql_help.c:1492 sql_help.c:1514 sql_help.c:1516 sql_help.c:1518 +#: sql_help.c:1521 sql_help.c:1523 sql_help.c:1525 sql_help.c:1528 +#: sql_help.c:1578 sql_help.c:1620 sql_help.c:1623 sql_help.c:1625 +#: sql_help.c:1627 sql_help.c:1629 sql_help.c:1631 sql_help.c:1634 +#: sql_help.c:1681 sql_help.c:1697 sql_help.c:1918 sql_help.c:1987 +#: sql_help.c:2006 sql_help.c:2019 sql_help.c:2075 sql_help.c:2081 +#: sql_help.c:2091 sql_help.c:2111 sql_help.c:2136 sql_help.c:2154 +#: sql_help.c:2183 sql_help.c:2275 sql_help.c:2316 sql_help.c:2339 +#: sql_help.c:2360 sql_help.c:2361 sql_help.c:2396 sql_help.c:2416 +#: sql_help.c:2438 sql_help.c:2452 sql_help.c:2472 sql_help.c:2495 +#: sql_help.c:2525 sql_help.c:2550 sql_help.c:2596 sql_help.c:2867 +#: sql_help.c:2880 sql_help.c:2897 sql_help.c:2913 sql_help.c:2953 +#: sql_help.c:3005 sql_help.c:3009 sql_help.c:3011 sql_help.c:3017 +#: sql_help.c:3035 sql_help.c:3062 sql_help.c:3097 sql_help.c:3109 +#: sql_help.c:3118 sql_help.c:3162 sql_help.c:3176 sql_help.c:3204 +#: sql_help.c:3212 sql_help.c:3220 sql_help.c:3228 sql_help.c:3236 +#: sql_help.c:3244 sql_help.c:3252 sql_help.c:3260 sql_help.c:3269 +#: sql_help.c:3280 sql_help.c:3288 sql_help.c:3296 sql_help.c:3304 +#: sql_help.c:3312 sql_help.c:3322 sql_help.c:3331 sql_help.c:3340 +#: sql_help.c:3348 sql_help.c:3358 sql_help.c:3369 sql_help.c:3377 +#: sql_help.c:3386 sql_help.c:3397 sql_help.c:3406 sql_help.c:3414 +#: sql_help.c:3422 sql_help.c:3430 sql_help.c:3438 sql_help.c:3446 +#: sql_help.c:3454 sql_help.c:3462 sql_help.c:3470 sql_help.c:3478 +#: sql_help.c:3486 sql_help.c:3503 sql_help.c:3512 sql_help.c:3520 +#: sql_help.c:3537 sql_help.c:3552 sql_help.c:3820 sql_help.c:3871 +#: sql_help.c:3900 sql_help.c:3908 sql_help.c:4341 sql_help.c:4389 +#: sql_help.c:4530 msgid "name" msgstr "namn" -#: sql_help.c:37 sql_help.c:40 sql_help.c:43 sql_help.c:326 sql_help.c:1520 -#: sql_help.c:2883 sql_help.c:3807 +#: sql_help.c:36 sql_help.c:39 sql_help.c:42 sql_help.c:327 sql_help.c:1768 +#: sql_help.c:3177 sql_help.c:4127 msgid "aggregate_signature" msgstr "aggregatsignatur" -#: sql_help.c:38 sql_help.c:68 sql_help.c:83 sql_help.c:119 sql_help.c:251 -#: sql_help.c:269 sql_help.c:390 sql_help.c:438 sql_help.c:515 sql_help.c:561 -#: sql_help.c:576 sql_help.c:598 sql_help.c:648 sql_help.c:713 sql_help.c:768 -#: sql_help.c:789 sql_help.c:819 sql_help.c:860 sql_help.c:884 sql_help.c:894 -#: sql_help.c:927 sql_help.c:947 sql_help.c:960 sql_help.c:994 sql_help.c:1093 -#: sql_help.c:1169 sql_help.c:1212 sql_help.c:1233 sql_help.c:1247 -#: sql_help.c:1259 sql_help.c:1272 sql_help.c:1303 sql_help.c:1360 -#: sql_help.c:1405 +#: sql_help.c:37 sql_help.c:67 sql_help.c:82 sql_help.c:118 sql_help.c:250 +#: sql_help.c:268 sql_help.c:399 sql_help.c:447 sql_help.c:524 sql_help.c:570 +#: sql_help.c:588 sql_help.c:615 sql_help.c:665 sql_help.c:730 sql_help.c:785 +#: sql_help.c:806 sql_help.c:845 sql_help.c:890 sql_help.c:931 sql_help.c:983 +#: sql_help.c:1015 sql_help.c:1025 sql_help.c:1058 sql_help.c:1078 +#: sql_help.c:1091 sql_help.c:1125 sql_help.c:1266 sql_help.c:1388 +#: sql_help.c:1431 sql_help.c:1452 sql_help.c:1466 sql_help.c:1478 +#: sql_help.c:1491 sql_help.c:1522 sql_help.c:1579 sql_help.c:1628 msgid "new_name" msgstr "nytt_namn" -#: sql_help.c:41 sql_help.c:70 sql_help.c:85 sql_help.c:121 sql_help.c:249 -#: sql_help.c:267 sql_help.c:388 sql_help.c:474 sql_help.c:520 sql_help.c:600 -#: sql_help.c:609 sql_help.c:667 sql_help.c:687 sql_help.c:716 sql_help.c:771 -#: sql_help.c:817 sql_help.c:896 sql_help.c:925 sql_help.c:945 sql_help.c:958 -#: sql_help.c:992 sql_help.c:1153 sql_help.c:1171 sql_help.c:1214 -#: sql_help.c:1235 sql_help.c:1298 sql_help.c:1403 sql_help.c:2559 +#: sql_help.c:40 sql_help.c:69 sql_help.c:84 sql_help.c:120 sql_help.c:248 +#: sql_help.c:266 sql_help.c:397 sql_help.c:483 sql_help.c:529 sql_help.c:617 +#: sql_help.c:626 sql_help.c:684 sql_help.c:704 sql_help.c:733 sql_help.c:788 +#: sql_help.c:850 sql_help.c:888 sql_help.c:988 sql_help.c:1027 sql_help.c:1056 +#: sql_help.c:1076 sql_help.c:1089 sql_help.c:1123 sql_help.c:1326 +#: sql_help.c:1390 sql_help.c:1433 sql_help.c:1454 sql_help.c:1517 +#: sql_help.c:1626 sql_help.c:2853 msgid "new_owner" msgstr "ny_ägare" -#: sql_help.c:44 sql_help.c:72 sql_help.c:87 sql_help.c:253 sql_help.c:318 -#: sql_help.c:440 sql_help.c:525 sql_help.c:650 sql_help.c:691 sql_help.c:719 -#: sql_help.c:774 sql_help.c:929 sql_help.c:962 sql_help.c:1095 -#: sql_help.c:1216 sql_help.c:1237 sql_help.c:1249 sql_help.c:1261 -#: sql_help.c:1305 sql_help.c:1407 +#: sql_help.c:43 sql_help.c:71 sql_help.c:86 sql_help.c:252 sql_help.c:319 +#: sql_help.c:449 sql_help.c:534 sql_help.c:667 sql_help.c:708 sql_help.c:736 +#: sql_help.c:791 sql_help.c:855 sql_help.c:993 sql_help.c:1060 sql_help.c:1093 +#: sql_help.c:1268 sql_help.c:1435 sql_help.c:1456 sql_help.c:1468 +#: sql_help.c:1480 sql_help.c:1524 sql_help.c:1630 msgid "new_schema" msgstr "nytt_schema" -#: sql_help.c:45 sql_help.c:1576 sql_help.c:2884 sql_help.c:3828 +#: sql_help.c:44 sql_help.c:1832 sql_help.c:3178 sql_help.c:4156 msgid "where aggregate_signature is:" msgstr "där aggregatsignatur är:" -#: sql_help.c:46 sql_help.c:49 sql_help.c:52 sql_help.c:336 sql_help.c:361 -#: sql_help.c:364 sql_help.c:367 sql_help.c:507 sql_help.c:512 sql_help.c:517 -#: sql_help.c:522 sql_help.c:527 sql_help.c:1538 sql_help.c:1577 -#: sql_help.c:1580 sql_help.c:1583 sql_help.c:1727 sql_help.c:1746 -#: sql_help.c:1749 sql_help.c:2016 sql_help.c:2885 sql_help.c:2888 -#: sql_help.c:2891 sql_help.c:2976 sql_help.c:3387 sql_help.c:3720 -#: sql_help.c:3813 sql_help.c:3829 sql_help.c:3832 sql_help.c:3835 +#: sql_help.c:45 sql_help.c:48 sql_help.c:51 sql_help.c:337 sql_help.c:350 +#: sql_help.c:354 sql_help.c:370 sql_help.c:373 sql_help.c:376 sql_help.c:516 +#: sql_help.c:521 sql_help.c:526 sql_help.c:531 sql_help.c:536 sql_help.c:837 +#: sql_help.c:842 sql_help.c:847 sql_help.c:852 sql_help.c:857 sql_help.c:975 +#: sql_help.c:980 sql_help.c:985 sql_help.c:990 sql_help.c:995 sql_help.c:1786 +#: sql_help.c:1803 sql_help.c:1809 sql_help.c:1833 sql_help.c:1836 +#: sql_help.c:1839 sql_help.c:1988 sql_help.c:2007 sql_help.c:2010 +#: sql_help.c:2276 sql_help.c:2473 sql_help.c:3179 sql_help.c:3182 +#: sql_help.c:3185 sql_help.c:3270 sql_help.c:3359 sql_help.c:3387 +#: sql_help.c:3705 sql_help.c:4038 sql_help.c:4133 sql_help.c:4140 +#: sql_help.c:4146 sql_help.c:4157 sql_help.c:4160 sql_help.c:4163 msgid "argmode" msgstr "arg_läge" -#: sql_help.c:47 sql_help.c:50 sql_help.c:53 sql_help.c:337 sql_help.c:362 -#: sql_help.c:365 sql_help.c:368 sql_help.c:508 sql_help.c:513 sql_help.c:518 -#: sql_help.c:523 sql_help.c:528 sql_help.c:1539 sql_help.c:1578 -#: sql_help.c:1581 sql_help.c:1584 sql_help.c:1728 sql_help.c:1747 -#: sql_help.c:1750 sql_help.c:2017 sql_help.c:2886 sql_help.c:2889 -#: sql_help.c:2892 sql_help.c:2977 sql_help.c:3814 sql_help.c:3830 -#: sql_help.c:3833 sql_help.c:3836 +#: sql_help.c:46 sql_help.c:49 sql_help.c:52 sql_help.c:338 sql_help.c:351 +#: sql_help.c:355 sql_help.c:371 sql_help.c:374 sql_help.c:377 sql_help.c:517 +#: sql_help.c:522 sql_help.c:527 sql_help.c:532 sql_help.c:537 sql_help.c:838 +#: sql_help.c:843 sql_help.c:848 sql_help.c:853 sql_help.c:858 sql_help.c:976 +#: sql_help.c:981 sql_help.c:986 sql_help.c:991 sql_help.c:996 sql_help.c:1787 +#: sql_help.c:1804 sql_help.c:1810 sql_help.c:1834 sql_help.c:1837 +#: sql_help.c:1840 sql_help.c:1989 sql_help.c:2008 sql_help.c:2011 +#: sql_help.c:2277 sql_help.c:2474 sql_help.c:3180 sql_help.c:3183 +#: sql_help.c:3186 sql_help.c:3271 sql_help.c:3360 sql_help.c:3388 +#: sql_help.c:4134 sql_help.c:4141 sql_help.c:4147 sql_help.c:4158 +#: sql_help.c:4161 sql_help.c:4164 msgid "argname" msgstr "arg_namn" -#: sql_help.c:48 sql_help.c:51 sql_help.c:54 sql_help.c:338 sql_help.c:363 -#: sql_help.c:366 sql_help.c:369 sql_help.c:509 sql_help.c:514 sql_help.c:519 -#: sql_help.c:524 sql_help.c:529 sql_help.c:1540 sql_help.c:1579 -#: sql_help.c:1582 sql_help.c:1585 sql_help.c:2018 sql_help.c:2887 -#: sql_help.c:2890 sql_help.c:2893 sql_help.c:2978 sql_help.c:3815 -#: sql_help.c:3831 sql_help.c:3834 sql_help.c:3837 +#: sql_help.c:47 sql_help.c:50 sql_help.c:53 sql_help.c:339 sql_help.c:352 +#: sql_help.c:356 sql_help.c:372 sql_help.c:375 sql_help.c:378 sql_help.c:518 +#: sql_help.c:523 sql_help.c:528 sql_help.c:533 sql_help.c:538 sql_help.c:839 +#: sql_help.c:844 sql_help.c:849 sql_help.c:854 sql_help.c:859 sql_help.c:977 +#: sql_help.c:982 sql_help.c:987 sql_help.c:992 sql_help.c:997 sql_help.c:1788 +#: sql_help.c:1805 sql_help.c:1811 sql_help.c:1835 sql_help.c:1838 +#: sql_help.c:1841 sql_help.c:2278 sql_help.c:2475 sql_help.c:3181 +#: sql_help.c:3184 sql_help.c:3187 sql_help.c:3272 sql_help.c:3361 +#: sql_help.c:3389 sql_help.c:4135 sql_help.c:4142 sql_help.c:4148 +#: sql_help.c:4159 sql_help.c:4162 sql_help.c:4165 msgid "argtype" msgstr "arg_typ" -#: sql_help.c:113 sql_help.c:385 sql_help.c:463 sql_help.c:475 sql_help.c:854 -#: sql_help.c:942 sql_help.c:1230 sql_help.c:1354 sql_help.c:1382 -#: sql_help.c:1633 sql_help.c:1639 sql_help.c:1925 sql_help.c:1966 -#: sql_help.c:1973 sql_help.c:1982 sql_help.c:2058 sql_help.c:2237 -#: sql_help.c:2329 sql_help.c:2588 sql_help.c:2769 sql_help.c:2791 -#: sql_help.c:3254 sql_help.c:3421 +#: sql_help.c:112 sql_help.c:394 sql_help.c:472 sql_help.c:484 sql_help.c:925 +#: sql_help.c:1073 sql_help.c:1449 sql_help.c:1573 sql_help.c:1605 +#: sql_help.c:1652 sql_help.c:1889 sql_help.c:1895 sql_help.c:2186 +#: sql_help.c:2227 sql_help.c:2234 sql_help.c:2243 sql_help.c:2317 +#: sql_help.c:2526 sql_help.c:2618 sql_help.c:2882 sql_help.c:3063 +#: sql_help.c:3085 sql_help.c:3572 sql_help.c:3739 sql_help.c:4588 msgid "option" msgstr "flaggor" -#: sql_help.c:114 sql_help.c:855 sql_help.c:1355 sql_help.c:2059 -#: sql_help.c:2238 sql_help.c:2770 +#: sql_help.c:113 sql_help.c:926 sql_help.c:1574 sql_help.c:2318 +#: sql_help.c:2527 sql_help.c:3064 msgid "where option can be:" msgstr "där flaggor kan vara:" -#: sql_help.c:115 sql_help.c:1857 +#: sql_help.c:114 sql_help.c:2118 msgid "allowconn" msgstr "tillåtansl" -#: sql_help.c:116 sql_help.c:856 sql_help.c:1356 sql_help.c:1858 -#: sql_help.c:2239 sql_help.c:2771 +#: sql_help.c:115 sql_help.c:927 sql_help.c:1575 sql_help.c:2119 +#: sql_help.c:2528 sql_help.c:3065 msgid "connlimit" msgstr "anslutningstak" -#: sql_help.c:117 sql_help.c:1859 +#: sql_help.c:116 sql_help.c:2120 msgid "istemplate" msgstr "ärmall" -#: sql_help.c:123 sql_help.c:588 sql_help.c:653 sql_help.c:1098 -#: sql_help.c:1146 +#: sql_help.c:122 sql_help.c:605 sql_help.c:670 sql_help.c:1271 sql_help.c:1319 msgid "new_tablespace" msgstr "nytt_tabellutrymme" -#: sql_help.c:125 sql_help.c:128 sql_help.c:130 sql_help.c:534 sql_help.c:536 -#: sql_help.c:537 sql_help.c:863 sql_help.c:867 sql_help.c:870 sql_help.c:1005 -#: sql_help.c:1008 sql_help.c:1362 sql_help.c:1365 sql_help.c:1367 -#: sql_help.c:2027 sql_help.c:3607 sql_help.c:4002 +#: sql_help.c:124 sql_help.c:127 sql_help.c:129 sql_help.c:543 sql_help.c:545 +#: sql_help.c:546 sql_help.c:862 sql_help.c:864 sql_help.c:865 sql_help.c:934 +#: sql_help.c:938 sql_help.c:941 sql_help.c:1002 sql_help.c:1004 +#: sql_help.c:1005 sql_help.c:1136 sql_help.c:1139 sql_help.c:1582 +#: sql_help.c:1586 sql_help.c:1589 sql_help.c:2287 sql_help.c:2479 +#: sql_help.c:3925 sql_help.c:4330 msgid "configuration_parameter" msgstr "konfigurationsparameter" -#: sql_help.c:126 sql_help.c:386 sql_help.c:458 sql_help.c:464 sql_help.c:476 -#: sql_help.c:535 sql_help.c:583 sql_help.c:659 sql_help.c:665 sql_help.c:815 -#: sql_help.c:864 sql_help.c:943 sql_help.c:982 sql_help.c:985 sql_help.c:990 -#: sql_help.c:1006 sql_help.c:1007 sql_help.c:1128 sql_help.c:1148 -#: sql_help.c:1174 sql_help.c:1231 sql_help.c:1363 sql_help.c:1383 -#: sql_help.c:1926 sql_help.c:1967 sql_help.c:1974 sql_help.c:1983 -#: sql_help.c:2028 sql_help.c:2029 sql_help.c:2087 sql_help.c:2119 -#: sql_help.c:2209 sql_help.c:2330 sql_help.c:2360 sql_help.c:2458 -#: sql_help.c:2470 sql_help.c:2483 sql_help.c:2523 sql_help.c:2545 -#: sql_help.c:2562 sql_help.c:2589 sql_help.c:2792 sql_help.c:3422 -#: sql_help.c:4003 sql_help.c:4004 +#: sql_help.c:125 sql_help.c:395 sql_help.c:467 sql_help.c:473 sql_help.c:485 +#: sql_help.c:544 sql_help.c:597 sql_help.c:676 sql_help.c:682 sql_help.c:863 +#: sql_help.c:886 sql_help.c:935 sql_help.c:1003 sql_help.c:1074 +#: sql_help.c:1113 sql_help.c:1116 sql_help.c:1121 sql_help.c:1137 +#: sql_help.c:1138 sql_help.c:1301 sql_help.c:1321 sql_help.c:1371 +#: sql_help.c:1393 sql_help.c:1450 sql_help.c:1583 sql_help.c:1606 +#: sql_help.c:2187 sql_help.c:2228 sql_help.c:2235 sql_help.c:2244 +#: sql_help.c:2288 sql_help.c:2289 sql_help.c:2348 sql_help.c:2380 +#: sql_help.c:2480 sql_help.c:2481 sql_help.c:2498 sql_help.c:2619 +#: sql_help.c:2649 sql_help.c:2749 sql_help.c:2761 sql_help.c:2774 +#: sql_help.c:2817 sql_help.c:2839 sql_help.c:2856 sql_help.c:2883 +#: sql_help.c:3086 sql_help.c:3740 sql_help.c:4331 sql_help.c:4332 msgid "value" msgstr "värde" -#: sql_help.c:198 +#: sql_help.c:197 msgid "target_role" msgstr "målroll" -#: sql_help.c:199 sql_help.c:1909 sql_help.c:2285 sql_help.c:2290 -#: sql_help.c:3369 sql_help.c:3376 sql_help.c:3390 sql_help.c:3396 -#: sql_help.c:3702 sql_help.c:3709 sql_help.c:3723 sql_help.c:3729 +#: sql_help.c:198 sql_help.c:2170 sql_help.c:2574 sql_help.c:2579 +#: sql_help.c:3687 sql_help.c:3694 sql_help.c:3708 sql_help.c:3714 +#: sql_help.c:4020 sql_help.c:4027 sql_help.c:4041 sql_help.c:4047 msgid "schema_name" msgstr "schemanamn" -#: sql_help.c:200 +#: sql_help.c:199 msgid "abbreviated_grant_or_revoke" msgstr "förkortad_grant_eller_revoke" -#: sql_help.c:201 +#: sql_help.c:200 msgid "where abbreviated_grant_or_revoke is one of:" msgstr "där förkortad_grant_eller_revok är en av:" -#: sql_help.c:202 sql_help.c:203 sql_help.c:204 sql_help.c:205 sql_help.c:206 -#: sql_help.c:207 sql_help.c:208 sql_help.c:209 sql_help.c:210 sql_help.c:211 -#: sql_help.c:559 sql_help.c:587 sql_help.c:652 sql_help.c:792 sql_help.c:874 -#: sql_help.c:1097 sql_help.c:1370 sql_help.c:2062 sql_help.c:2063 -#: sql_help.c:2064 sql_help.c:2065 sql_help.c:2066 sql_help.c:2193 -#: sql_help.c:2242 sql_help.c:2243 sql_help.c:2244 sql_help.c:2245 -#: sql_help.c:2246 sql_help.c:2774 sql_help.c:2775 sql_help.c:2776 -#: sql_help.c:2777 sql_help.c:2778 sql_help.c:3403 sql_help.c:3404 -#: sql_help.c:3405 sql_help.c:3703 sql_help.c:3707 sql_help.c:3710 -#: sql_help.c:3712 sql_help.c:3714 sql_help.c:3716 sql_help.c:3718 -#: sql_help.c:3724 sql_help.c:3726 sql_help.c:3728 sql_help.c:3730 -#: sql_help.c:3732 sql_help.c:3734 sql_help.c:3735 sql_help.c:3736 -#: sql_help.c:4023 +#: sql_help.c:201 sql_help.c:202 sql_help.c:203 sql_help.c:204 sql_help.c:205 +#: sql_help.c:206 sql_help.c:207 sql_help.c:208 sql_help.c:209 sql_help.c:210 +#: sql_help.c:568 sql_help.c:604 sql_help.c:669 sql_help.c:809 sql_help.c:945 +#: sql_help.c:1270 sql_help.c:1593 sql_help.c:2321 sql_help.c:2322 +#: sql_help.c:2323 sql_help.c:2324 sql_help.c:2325 sql_help.c:2454 +#: sql_help.c:2531 sql_help.c:2532 sql_help.c:2533 sql_help.c:2534 +#: sql_help.c:2535 sql_help.c:3068 sql_help.c:3069 sql_help.c:3070 +#: sql_help.c:3071 sql_help.c:3072 sql_help.c:3721 sql_help.c:3722 +#: sql_help.c:3723 sql_help.c:4021 sql_help.c:4025 sql_help.c:4028 +#: sql_help.c:4030 sql_help.c:4032 sql_help.c:4034 sql_help.c:4036 +#: sql_help.c:4042 sql_help.c:4044 sql_help.c:4046 sql_help.c:4048 +#: sql_help.c:4050 sql_help.c:4052 sql_help.c:4053 sql_help.c:4054 +#: sql_help.c:4351 msgid "role_name" msgstr "rollnamn" -#: sql_help.c:237 sql_help.c:451 sql_help.c:1113 sql_help.c:1115 -#: sql_help.c:1399 sql_help.c:1878 sql_help.c:1882 sql_help.c:1986 -#: sql_help.c:1990 sql_help.c:2083 sql_help.c:2454 sql_help.c:2466 -#: sql_help.c:2479 sql_help.c:2487 sql_help.c:2498 sql_help.c:2527 -#: sql_help.c:3453 sql_help.c:3468 sql_help.c:3470 sql_help.c:3888 -#: sql_help.c:3889 sql_help.c:3898 sql_help.c:3939 sql_help.c:3940 -#: sql_help.c:3941 sql_help.c:3942 sql_help.c:3943 sql_help.c:3944 -#: sql_help.c:3977 sql_help.c:3978 sql_help.c:3983 sql_help.c:3988 -#: sql_help.c:4127 sql_help.c:4128 sql_help.c:4137 sql_help.c:4178 -#: sql_help.c:4179 sql_help.c:4180 sql_help.c:4181 sql_help.c:4182 -#: sql_help.c:4183 sql_help.c:4230 sql_help.c:4232 sql_help.c:4265 -#: sql_help.c:4321 sql_help.c:4322 sql_help.c:4331 sql_help.c:4372 -#: sql_help.c:4373 sql_help.c:4374 sql_help.c:4375 sql_help.c:4376 -#: sql_help.c:4377 +#: sql_help.c:236 sql_help.c:460 sql_help.c:1286 sql_help.c:1288 +#: sql_help.c:1339 sql_help.c:1350 sql_help.c:1375 sql_help.c:1622 +#: sql_help.c:2139 sql_help.c:2143 sql_help.c:2247 sql_help.c:2251 +#: sql_help.c:2343 sql_help.c:2745 sql_help.c:2757 sql_help.c:2770 +#: sql_help.c:2778 sql_help.c:2789 sql_help.c:2821 sql_help.c:3771 +#: sql_help.c:3786 sql_help.c:3788 sql_help.c:4216 sql_help.c:4217 +#: sql_help.c:4226 sql_help.c:4267 sql_help.c:4268 sql_help.c:4269 +#: sql_help.c:4270 sql_help.c:4271 sql_help.c:4272 sql_help.c:4305 +#: sql_help.c:4306 sql_help.c:4311 sql_help.c:4316 sql_help.c:4455 +#: sql_help.c:4456 sql_help.c:4465 sql_help.c:4506 sql_help.c:4507 +#: sql_help.c:4508 sql_help.c:4509 sql_help.c:4510 sql_help.c:4511 +#: sql_help.c:4558 sql_help.c:4560 sql_help.c:4606 sql_help.c:4662 +#: sql_help.c:4663 sql_help.c:4672 sql_help.c:4713 sql_help.c:4714 +#: sql_help.c:4715 sql_help.c:4716 sql_help.c:4717 sql_help.c:4718 msgid "expression" msgstr "uttryck" -#: sql_help.c:240 +#: sql_help.c:239 msgid "domain_constraint" msgstr "domain_villkor" -#: sql_help.c:242 sql_help.c:244 sql_help.c:247 sql_help.c:466 sql_help.c:467 -#: sql_help.c:1090 sql_help.c:1134 sql_help.c:1135 sql_help.c:1136 -#: sql_help.c:1156 sql_help.c:1526 sql_help.c:1528 sql_help.c:1881 -#: sql_help.c:1985 sql_help.c:1989 sql_help.c:2486 sql_help.c:2497 -#: sql_help.c:3465 +#: sql_help.c:241 sql_help.c:243 sql_help.c:246 sql_help.c:475 sql_help.c:476 +#: sql_help.c:1263 sql_help.c:1307 sql_help.c:1308 sql_help.c:1309 +#: sql_help.c:1338 sql_help.c:1349 sql_help.c:1366 sql_help.c:1774 +#: sql_help.c:1776 sql_help.c:2142 sql_help.c:2246 sql_help.c:2250 +#: sql_help.c:2777 sql_help.c:2788 sql_help.c:3783 msgid "constraint_name" msgstr "villkorsnamn" -#: sql_help.c:245 sql_help.c:1091 +#: sql_help.c:244 sql_help.c:1264 msgid "new_constraint_name" msgstr "nyy_villkorsnamn" -#: sql_help.c:316 sql_help.c:941 +#: sql_help.c:317 sql_help.c:1072 msgid "new_version" msgstr "ny_version" -#: sql_help.c:320 sql_help.c:322 +#: sql_help.c:321 sql_help.c:323 msgid "member_object" msgstr "medlemsobjekt" -#: sql_help.c:323 +#: sql_help.c:324 msgid "where member_object is:" msgstr "där medlemsobjekt är:" -#: sql_help.c:324 sql_help.c:329 sql_help.c:330 sql_help.c:331 sql_help.c:332 -#: sql_help.c:333 sql_help.c:334 sql_help.c:339 sql_help.c:343 sql_help.c:345 -#: sql_help.c:347 sql_help.c:348 sql_help.c:349 sql_help.c:350 sql_help.c:351 -#: sql_help.c:352 sql_help.c:353 sql_help.c:354 sql_help.c:355 sql_help.c:358 -#: sql_help.c:359 sql_help.c:1518 sql_help.c:1523 sql_help.c:1530 -#: sql_help.c:1531 sql_help.c:1532 sql_help.c:1533 sql_help.c:1534 -#: sql_help.c:1535 sql_help.c:1536 sql_help.c:1541 sql_help.c:1543 -#: sql_help.c:1547 sql_help.c:1549 sql_help.c:1553 sql_help.c:1554 -#: sql_help.c:1555 sql_help.c:1558 sql_help.c:1559 sql_help.c:1560 -#: sql_help.c:1561 sql_help.c:1562 sql_help.c:1563 sql_help.c:1564 -#: sql_help.c:1565 sql_help.c:1566 sql_help.c:1567 sql_help.c:1568 -#: sql_help.c:1573 sql_help.c:1574 sql_help.c:3803 sql_help.c:3808 -#: sql_help.c:3809 sql_help.c:3810 sql_help.c:3811 sql_help.c:3817 -#: sql_help.c:3818 sql_help.c:3819 sql_help.c:3820 sql_help.c:3821 -#: sql_help.c:3822 sql_help.c:3823 sql_help.c:3824 sql_help.c:3825 -#: sql_help.c:3826 +#: sql_help.c:325 sql_help.c:330 sql_help.c:331 sql_help.c:332 sql_help.c:333 +#: sql_help.c:334 sql_help.c:335 sql_help.c:340 sql_help.c:344 sql_help.c:346 +#: sql_help.c:348 sql_help.c:357 sql_help.c:358 sql_help.c:359 sql_help.c:360 +#: sql_help.c:361 sql_help.c:362 sql_help.c:363 sql_help.c:364 sql_help.c:367 +#: sql_help.c:368 sql_help.c:1766 sql_help.c:1771 sql_help.c:1778 +#: sql_help.c:1779 sql_help.c:1780 sql_help.c:1781 sql_help.c:1782 +#: sql_help.c:1783 sql_help.c:1784 sql_help.c:1789 sql_help.c:1791 +#: sql_help.c:1795 sql_help.c:1797 sql_help.c:1801 sql_help.c:1806 +#: sql_help.c:1807 sql_help.c:1814 sql_help.c:1815 sql_help.c:1816 +#: sql_help.c:1817 sql_help.c:1818 sql_help.c:1819 sql_help.c:1820 +#: sql_help.c:1821 sql_help.c:1822 sql_help.c:1823 sql_help.c:1824 +#: sql_help.c:1829 sql_help.c:1830 sql_help.c:4123 sql_help.c:4128 +#: sql_help.c:4129 sql_help.c:4130 sql_help.c:4131 sql_help.c:4137 +#: sql_help.c:4138 sql_help.c:4143 sql_help.c:4144 sql_help.c:4149 +#: sql_help.c:4150 sql_help.c:4151 sql_help.c:4152 sql_help.c:4153 +#: sql_help.c:4154 msgid "object_name" msgstr "objektnamn" -#: sql_help.c:325 sql_help.c:1519 sql_help.c:3806 +#: sql_help.c:326 sql_help.c:1767 sql_help.c:4126 msgid "aggregate_name" msgstr "aggregatnamn" -#: sql_help.c:327 sql_help.c:1521 sql_help.c:1792 sql_help.c:1796 -#: sql_help.c:1798 sql_help.c:2901 +#: sql_help.c:328 sql_help.c:1769 sql_help.c:2053 sql_help.c:2057 +#: sql_help.c:2059 sql_help.c:3195 msgid "source_type" msgstr "källtyp" -#: sql_help.c:328 sql_help.c:1522 sql_help.c:1793 sql_help.c:1797 -#: sql_help.c:1799 sql_help.c:2902 +#: sql_help.c:329 sql_help.c:1770 sql_help.c:2054 sql_help.c:2058 +#: sql_help.c:2060 sql_help.c:3196 msgid "target_type" msgstr "måltyp" -#: sql_help.c:335 sql_help.c:756 sql_help.c:1537 sql_help.c:1794 -#: sql_help.c:1833 sql_help.c:1896 sql_help.c:2136 sql_help.c:2167 -#: sql_help.c:2665 sql_help.c:3386 sql_help.c:3719 sql_help.c:3812 -#: sql_help.c:3917 sql_help.c:3921 sql_help.c:3925 sql_help.c:3928 -#: sql_help.c:4156 sql_help.c:4160 sql_help.c:4164 sql_help.c:4167 -#: sql_help.c:4350 sql_help.c:4354 sql_help.c:4358 sql_help.c:4361 +#: sql_help.c:336 sql_help.c:773 sql_help.c:1785 sql_help.c:2055 +#: sql_help.c:2094 sql_help.c:2157 sql_help.c:2397 sql_help.c:2428 +#: sql_help.c:2959 sql_help.c:4037 sql_help.c:4132 sql_help.c:4245 +#: sql_help.c:4249 sql_help.c:4253 sql_help.c:4256 sql_help.c:4484 +#: sql_help.c:4488 sql_help.c:4492 sql_help.c:4495 sql_help.c:4691 +#: sql_help.c:4695 sql_help.c:4699 sql_help.c:4702 msgid "function_name" msgstr "funktionsnamn" -#: sql_help.c:340 sql_help.c:749 sql_help.c:1544 sql_help.c:2160 +#: sql_help.c:341 sql_help.c:766 sql_help.c:1792 sql_help.c:2421 msgid "operator_name" msgstr "operatornamn" -#: sql_help.c:341 sql_help.c:685 sql_help.c:689 sql_help.c:693 sql_help.c:1545 -#: sql_help.c:2137 sql_help.c:3019 +#: sql_help.c:342 sql_help.c:702 sql_help.c:706 sql_help.c:710 sql_help.c:1793 +#: sql_help.c:2398 sql_help.c:3313 msgid "left_type" msgstr "vänster_typ" -#: sql_help.c:342 sql_help.c:686 sql_help.c:690 sql_help.c:694 sql_help.c:1546 -#: sql_help.c:2138 sql_help.c:3020 +#: sql_help.c:343 sql_help.c:703 sql_help.c:707 sql_help.c:711 sql_help.c:1794 +#: sql_help.c:2399 sql_help.c:3314 msgid "right_type" msgstr "höger_typ" -#: sql_help.c:344 sql_help.c:346 sql_help.c:712 sql_help.c:715 sql_help.c:718 -#: sql_help.c:747 sql_help.c:759 sql_help.c:767 sql_help.c:770 sql_help.c:773 -#: sql_help.c:1548 sql_help.c:1550 sql_help.c:2157 sql_help.c:2178 -#: sql_help.c:2503 sql_help.c:3029 sql_help.c:3038 +#: sql_help.c:345 sql_help.c:347 sql_help.c:729 sql_help.c:732 sql_help.c:735 +#: sql_help.c:764 sql_help.c:776 sql_help.c:784 sql_help.c:787 sql_help.c:790 +#: sql_help.c:1355 sql_help.c:1796 sql_help.c:1798 sql_help.c:2418 +#: sql_help.c:2439 sql_help.c:2794 sql_help.c:3323 sql_help.c:3332 msgid "index_method" msgstr "indexmetod" -#: sql_help.c:356 sql_help.c:1152 sql_help.c:1569 sql_help.c:2024 -#: sql_help.c:2461 sql_help.c:2632 sql_help.c:3176 sql_help.c:3400 -#: sql_help.c:3733 +#: sql_help.c:349 sql_help.c:1802 sql_help.c:4139 +msgid "procedure_name" +msgstr "procedurnamn" + +#: sql_help.c:353 sql_help.c:1808 sql_help.c:3704 sql_help.c:4145 +msgid "routine_name" +msgstr "rutinnamn" + +#: sql_help.c:365 sql_help.c:1325 sql_help.c:1825 sql_help.c:2284 +#: sql_help.c:2478 sql_help.c:2752 sql_help.c:2926 sql_help.c:3494 +#: sql_help.c:3718 sql_help.c:4051 msgid "type_name" msgstr "typnamn" -#: sql_help.c:357 sql_help.c:1570 sql_help.c:2023 sql_help.c:2633 -#: sql_help.c:2859 sql_help.c:3177 sql_help.c:3392 sql_help.c:3725 +#: sql_help.c:366 sql_help.c:1826 sql_help.c:2283 sql_help.c:2477 +#: sql_help.c:2927 sql_help.c:3153 sql_help.c:3495 sql_help.c:3710 +#: sql_help.c:4043 msgid "lang_name" msgstr "språknamn" -#: sql_help.c:360 +#: sql_help.c:369 msgid "and aggregate_signature is:" msgstr "och aggregatsignatur är:" -#: sql_help.c:383 sql_help.c:1664 sql_help.c:1923 +#: sql_help.c:392 sql_help.c:1920 sql_help.c:2184 msgid "handler_function" msgstr "hanterarfunktion" -#: sql_help.c:384 sql_help.c:1924 +#: sql_help.c:393 sql_help.c:2185 msgid "validator_function" msgstr "valideringsfunktion" -#: sql_help.c:433 sql_help.c:510 sql_help.c:641 sql_help.c:1085 -#: sql_help.c:1296 sql_help.c:2494 sql_help.c:2495 sql_help.c:2511 -#: sql_help.c:2512 +#: sql_help.c:442 sql_help.c:519 sql_help.c:658 sql_help.c:840 sql_help.c:978 +#: sql_help.c:1258 sql_help.c:1346 sql_help.c:1347 sql_help.c:1363 +#: sql_help.c:1364 sql_help.c:1515 sql_help.c:2785 sql_help.c:2786 +#: sql_help.c:2802 sql_help.c:2803 msgid "action" msgstr "aktion" -#: sql_help.c:435 sql_help.c:442 sql_help.c:446 sql_help.c:447 sql_help.c:450 -#: sql_help.c:452 sql_help.c:453 sql_help.c:454 sql_help.c:456 sql_help.c:459 -#: sql_help.c:461 sql_help.c:462 sql_help.c:645 sql_help.c:655 sql_help.c:657 -#: sql_help.c:660 sql_help.c:662 sql_help.c:923 sql_help.c:1087 -#: sql_help.c:1105 sql_help.c:1109 sql_help.c:1110 sql_help.c:1114 -#: sql_help.c:1116 sql_help.c:1117 sql_help.c:1118 sql_help.c:1120 -#: sql_help.c:1123 sql_help.c:1124 sql_help.c:1126 sql_help.c:1129 -#: sql_help.c:1131 sql_help.c:1398 sql_help.c:1401 sql_help.c:1421 -#: sql_help.c:1525 sql_help.c:1630 sql_help.c:1635 sql_help.c:1649 -#: sql_help.c:1650 sql_help.c:1651 sql_help.c:1964 sql_help.c:1977 -#: sql_help.c:2021 sql_help.c:2082 sql_help.c:2117 sql_help.c:2315 -#: sql_help.c:2343 sql_help.c:2344 sql_help.c:2445 sql_help.c:2453 -#: sql_help.c:2462 sql_help.c:2465 sql_help.c:2474 sql_help.c:2478 -#: sql_help.c:2499 sql_help.c:2501 sql_help.c:2508 sql_help.c:2526 -#: sql_help.c:2543 sql_help.c:2668 sql_help.c:2804 sql_help.c:3371 -#: sql_help.c:3372 sql_help.c:3452 sql_help.c:3467 sql_help.c:3469 -#: sql_help.c:3471 sql_help.c:3704 sql_help.c:3705 sql_help.c:3805 -#: sql_help.c:3948 sql_help.c:4187 sql_help.c:4229 sql_help.c:4231 -#: sql_help.c:4233 sql_help.c:4250 sql_help.c:4253 sql_help.c:4381 +#: sql_help.c:444 sql_help.c:451 sql_help.c:455 sql_help.c:456 sql_help.c:459 +#: sql_help.c:461 sql_help.c:462 sql_help.c:463 sql_help.c:465 sql_help.c:468 +#: sql_help.c:470 sql_help.c:471 sql_help.c:662 sql_help.c:672 sql_help.c:674 +#: sql_help.c:677 sql_help.c:679 sql_help.c:1054 sql_help.c:1260 +#: sql_help.c:1278 sql_help.c:1282 sql_help.c:1283 sql_help.c:1287 +#: sql_help.c:1289 sql_help.c:1290 sql_help.c:1291 sql_help.c:1293 +#: sql_help.c:1296 sql_help.c:1297 sql_help.c:1299 sql_help.c:1302 +#: sql_help.c:1304 sql_help.c:1351 sql_help.c:1353 sql_help.c:1360 +#: sql_help.c:1369 sql_help.c:1374 sql_help.c:1621 sql_help.c:1624 +#: sql_help.c:1658 sql_help.c:1773 sql_help.c:1886 sql_help.c:1891 +#: sql_help.c:1905 sql_help.c:1906 sql_help.c:1907 sql_help.c:2225 +#: sql_help.c:2238 sql_help.c:2281 sql_help.c:2342 sql_help.c:2346 +#: sql_help.c:2378 sql_help.c:2604 sql_help.c:2632 sql_help.c:2633 +#: sql_help.c:2736 sql_help.c:2744 sql_help.c:2753 sql_help.c:2756 +#: sql_help.c:2765 sql_help.c:2769 sql_help.c:2790 sql_help.c:2792 +#: sql_help.c:2799 sql_help.c:2815 sql_help.c:2820 sql_help.c:2837 +#: sql_help.c:2962 sql_help.c:3098 sql_help.c:3689 sql_help.c:3690 +#: sql_help.c:3770 sql_help.c:3785 sql_help.c:3787 sql_help.c:3789 +#: sql_help.c:4022 sql_help.c:4023 sql_help.c:4125 sql_help.c:4276 +#: sql_help.c:4515 sql_help.c:4557 sql_help.c:4559 sql_help.c:4561 +#: sql_help.c:4594 sql_help.c:4722 msgid "column_name" msgstr "kolumnnamn" -#: sql_help.c:436 sql_help.c:646 sql_help.c:1088 +#: sql_help.c:445 sql_help.c:663 sql_help.c:1261 msgid "new_column_name" msgstr "nytt_kolumnnamn" -#: sql_help.c:441 sql_help.c:531 sql_help.c:654 sql_help.c:1104 -#: sql_help.c:1312 +#: sql_help.c:450 sql_help.c:540 sql_help.c:671 sql_help.c:861 sql_help.c:999 +#: sql_help.c:1277 sql_help.c:1531 msgid "where action is one of:" msgstr "där aktion är en av:" -#: sql_help.c:443 sql_help.c:448 sql_help.c:915 sql_help.c:1106 -#: sql_help.c:1111 sql_help.c:1314 sql_help.c:1318 sql_help.c:1876 -#: sql_help.c:1965 sql_help.c:2156 sql_help.c:2308 sql_help.c:2446 -#: sql_help.c:2713 sql_help.c:3554 +#: sql_help.c:452 sql_help.c:457 sql_help.c:1046 sql_help.c:1279 +#: sql_help.c:1284 sql_help.c:1533 sql_help.c:1537 sql_help.c:2137 +#: sql_help.c:2226 sql_help.c:2417 sql_help.c:2597 sql_help.c:2737 +#: sql_help.c:3007 sql_help.c:3872 msgid "data_type" msgstr "datatyp" -#: sql_help.c:444 sql_help.c:449 sql_help.c:1107 sql_help.c:1112 -#: sql_help.c:1315 sql_help.c:1319 sql_help.c:1877 sql_help.c:1968 -#: sql_help.c:2084 sql_help.c:2447 sql_help.c:2455 sql_help.c:2467 -#: sql_help.c:2480 sql_help.c:2714 sql_help.c:2720 sql_help.c:3462 +#: sql_help.c:453 sql_help.c:458 sql_help.c:1280 sql_help.c:1285 +#: sql_help.c:1534 sql_help.c:1538 sql_help.c:2138 sql_help.c:2229 +#: sql_help.c:2344 sql_help.c:2738 sql_help.c:2746 sql_help.c:2758 +#: sql_help.c:2771 sql_help.c:3008 sql_help.c:3014 sql_help.c:3780 msgid "collation" -msgstr "sortering" +msgstr "jämförelse" -#: sql_help.c:445 sql_help.c:1108 sql_help.c:1969 sql_help.c:1978 -#: sql_help.c:2448 sql_help.c:2463 sql_help.c:2475 +#: sql_help.c:454 sql_help.c:1281 sql_help.c:2230 sql_help.c:2239 +#: sql_help.c:2739 sql_help.c:2754 sql_help.c:2766 msgid "column_constraint" msgstr "kolumnvillkor" -#: sql_help.c:455 sql_help.c:656 sql_help.c:1125 +#: sql_help.c:464 sql_help.c:602 sql_help.c:673 sql_help.c:1298 msgid "integer" msgstr "heltal" -#: sql_help.c:457 sql_help.c:460 sql_help.c:658 sql_help.c:661 sql_help.c:1127 -#: sql_help.c:1130 +#: sql_help.c:466 sql_help.c:469 sql_help.c:675 sql_help.c:678 sql_help.c:1300 +#: sql_help.c:1303 msgid "attribute_option" msgstr "attributalternativ" -#: sql_help.c:465 sql_help.c:1132 sql_help.c:1970 sql_help.c:1979 -#: sql_help.c:2449 sql_help.c:2464 sql_help.c:2476 +#: sql_help.c:474 sql_help.c:1305 sql_help.c:2231 sql_help.c:2240 +#: sql_help.c:2740 sql_help.c:2755 sql_help.c:2767 msgid "table_constraint" msgstr "tabellvillkor" -#: sql_help.c:468 sql_help.c:469 sql_help.c:470 sql_help.c:471 sql_help.c:1137 -#: sql_help.c:1138 sql_help.c:1139 sql_help.c:1140 sql_help.c:1571 +#: sql_help.c:477 sql_help.c:478 sql_help.c:479 sql_help.c:480 sql_help.c:1310 +#: sql_help.c:1311 sql_help.c:1312 sql_help.c:1313 sql_help.c:1827 msgid "trigger_name" msgstr "utlösarnamn" -#: sql_help.c:472 sql_help.c:473 sql_help.c:1150 sql_help.c:1151 -#: sql_help.c:1971 sql_help.c:1976 sql_help.c:2452 sql_help.c:2473 +#: sql_help.c:481 sql_help.c:482 sql_help.c:1323 sql_help.c:1324 +#: sql_help.c:2232 sql_help.c:2237 sql_help.c:2743 sql_help.c:2764 msgid "parent_table" msgstr "föräldertabell" -#: sql_help.c:530 sql_help.c:580 sql_help.c:643 sql_help.c:1275 -#: sql_help.c:1908 +#: sql_help.c:539 sql_help.c:594 sql_help.c:660 sql_help.c:860 sql_help.c:998 +#: sql_help.c:1494 sql_help.c:2169 msgid "extension_name" msgstr "utökningsnamn" -#: sql_help.c:532 sql_help.c:2025 +#: sql_help.c:541 sql_help.c:1000 sql_help.c:2285 msgid "execution_cost" msgstr "körkostnad" -#: sql_help.c:533 sql_help.c:2026 +#: sql_help.c:542 sql_help.c:1001 sql_help.c:2286 msgid "result_rows" msgstr "resultatrader" -#: sql_help.c:554 sql_help.c:556 sql_help.c:853 sql_help.c:861 sql_help.c:865 -#: sql_help.c:868 sql_help.c:871 sql_help.c:1353 sql_help.c:1361 -#: sql_help.c:1364 sql_help.c:1366 sql_help.c:1368 sql_help.c:2286 -#: sql_help.c:2288 sql_help.c:2291 sql_help.c:2292 sql_help.c:3370 -#: sql_help.c:3374 sql_help.c:3377 sql_help.c:3379 sql_help.c:3381 -#: sql_help.c:3383 sql_help.c:3385 sql_help.c:3391 sql_help.c:3393 -#: sql_help.c:3395 sql_help.c:3397 sql_help.c:3399 sql_help.c:3401 +#: sql_help.c:563 sql_help.c:565 sql_help.c:924 sql_help.c:932 sql_help.c:936 +#: sql_help.c:939 sql_help.c:942 sql_help.c:1572 sql_help.c:1580 +#: sql_help.c:1584 sql_help.c:1587 sql_help.c:1590 sql_help.c:2575 +#: sql_help.c:2577 sql_help.c:2580 sql_help.c:2581 sql_help.c:3688 +#: sql_help.c:3692 sql_help.c:3695 sql_help.c:3697 sql_help.c:3699 +#: sql_help.c:3701 sql_help.c:3703 sql_help.c:3709 sql_help.c:3711 +#: sql_help.c:3713 sql_help.c:3715 sql_help.c:3717 sql_help.c:3719 msgid "role_specification" msgstr "rollspecifikation" -#: sql_help.c:555 sql_help.c:557 sql_help.c:1380 sql_help.c:1851 -#: sql_help.c:2294 sql_help.c:2789 sql_help.c:3210 sql_help.c:4033 +#: sql_help.c:564 sql_help.c:566 sql_help.c:1603 sql_help.c:2112 +#: sql_help.c:2583 sql_help.c:3083 sql_help.c:3528 sql_help.c:4361 msgid "user_name" msgstr "användarnamn" -#: sql_help.c:558 sql_help.c:873 sql_help.c:1369 sql_help.c:2293 -#: sql_help.c:3402 +#: sql_help.c:567 sql_help.c:944 sql_help.c:1592 sql_help.c:2582 +#: sql_help.c:3720 msgid "where role_specification can be:" msgstr "där rollspecifikation kan vara:" -#: sql_help.c:560 +#: sql_help.c:569 msgid "group_name" msgstr "gruppnamn" -#: sql_help.c:578 sql_help.c:1856 sql_help.c:2088 sql_help.c:2120 -#: sql_help.c:2459 sql_help.c:2471 sql_help.c:2484 sql_help.c:2524 -#: sql_help.c:2546 sql_help.c:2558 sql_help.c:3398 sql_help.c:3731 +#: sql_help.c:590 sql_help.c:1372 sql_help.c:2117 sql_help.c:2349 +#: sql_help.c:2381 sql_help.c:2750 sql_help.c:2762 sql_help.c:2775 +#: sql_help.c:2818 sql_help.c:2840 sql_help.c:2852 sql_help.c:3716 +#: sql_help.c:4049 msgid "tablespace_name" msgstr "tabellutrymmesnamn" -#: sql_help.c:582 sql_help.c:585 sql_help.c:664 sql_help.c:666 sql_help.c:1147 -#: sql_help.c:1149 sql_help.c:2086 sql_help.c:2118 sql_help.c:2457 -#: sql_help.c:2469 sql_help.c:2482 sql_help.c:2522 sql_help.c:2544 +#: sql_help.c:592 sql_help.c:680 sql_help.c:1318 sql_help.c:1327 +#: sql_help.c:1367 sql_help.c:1707 +msgid "index_name" +msgstr "indexnamn" + +#: sql_help.c:596 sql_help.c:599 sql_help.c:681 sql_help.c:683 sql_help.c:1320 +#: sql_help.c:1322 sql_help.c:1370 sql_help.c:2347 sql_help.c:2379 +#: sql_help.c:2748 sql_help.c:2760 sql_help.c:2773 sql_help.c:2816 +#: sql_help.c:2838 msgid "storage_parameter" msgstr "lagringsparameter" -#: sql_help.c:608 sql_help.c:1542 sql_help.c:3816 +#: sql_help.c:601 +msgid "column_number" +msgstr "kolumnnummer" + +#: sql_help.c:625 sql_help.c:1790 sql_help.c:4136 msgid "large_object_oid" msgstr "stort_objekt_oid" -#: sql_help.c:663 sql_help.c:1145 sql_help.c:1154 sql_help.c:1157 -#: sql_help.c:1461 -msgid "index_name" -msgstr "indexnamn" - -#: sql_help.c:695 sql_help.c:2141 +#: sql_help.c:712 sql_help.c:2402 msgid "res_proc" msgstr "res_proc" -#: sql_help.c:696 sql_help.c:2142 +#: sql_help.c:713 sql_help.c:2403 msgid "join_proc" msgstr "join_proc" -#: sql_help.c:748 sql_help.c:760 sql_help.c:2159 +#: sql_help.c:765 sql_help.c:777 sql_help.c:2420 msgid "strategy_number" msgstr "strateginummer" -#: sql_help.c:750 sql_help.c:751 sql_help.c:754 sql_help.c:755 sql_help.c:761 -#: sql_help.c:762 sql_help.c:764 sql_help.c:765 sql_help.c:2161 -#: sql_help.c:2162 sql_help.c:2165 sql_help.c:2166 +#: sql_help.c:767 sql_help.c:768 sql_help.c:771 sql_help.c:772 sql_help.c:778 +#: sql_help.c:779 sql_help.c:781 sql_help.c:782 sql_help.c:2422 sql_help.c:2423 +#: sql_help.c:2426 sql_help.c:2427 msgid "op_type" msgstr "op_typ" -#: sql_help.c:752 sql_help.c:2163 +#: sql_help.c:769 sql_help.c:2424 msgid "sort_family_name" msgstr "sorteringsfamiljnamn" -#: sql_help.c:753 sql_help.c:763 sql_help.c:2164 +#: sql_help.c:770 sql_help.c:780 sql_help.c:2425 msgid "support_number" msgstr "supportnummer" -#: sql_help.c:757 sql_help.c:1795 sql_help.c:2168 sql_help.c:2635 -#: sql_help.c:2637 +#: sql_help.c:774 sql_help.c:2056 sql_help.c:2429 sql_help.c:2929 +#: sql_help.c:2931 msgid "argument_type" msgstr "argumenttyp" -#: sql_help.c:788 sql_help.c:791 sql_help.c:808 sql_help.c:810 sql_help.c:812 -#: sql_help.c:883 sql_help.c:922 sql_help.c:1271 sql_help.c:1274 -#: sql_help.c:1420 sql_help.c:1460 sql_help.c:1527 sql_help.c:1552 -#: sql_help.c:1557 sql_help.c:1572 sql_help.c:1629 sql_help.c:1634 -#: sql_help.c:1963 sql_help.c:1975 sql_help.c:2080 sql_help.c:2116 -#: sql_help.c:2192 sql_help.c:2207 sql_help.c:2263 sql_help.c:2314 -#: sql_help.c:2345 sql_help.c:2444 sql_help.c:2460 sql_help.c:2472 -#: sql_help.c:2542 sql_help.c:2661 sql_help.c:2838 sql_help.c:3055 -#: sql_help.c:3080 sql_help.c:3186 sql_help.c:3368 sql_help.c:3373 -#: sql_help.c:3418 sql_help.c:3450 sql_help.c:3701 sql_help.c:3706 -#: sql_help.c:3804 sql_help.c:3903 sql_help.c:3905 sql_help.c:3954 -#: sql_help.c:3993 sql_help.c:4142 sql_help.c:4144 sql_help.c:4193 -#: sql_help.c:4227 sql_help.c:4249 sql_help.c:4251 sql_help.c:4252 -#: sql_help.c:4336 sql_help.c:4338 sql_help.c:4387 +#: sql_help.c:805 sql_help.c:808 sql_help.c:879 sql_help.c:881 sql_help.c:883 +#: sql_help.c:1014 sql_help.c:1053 sql_help.c:1490 sql_help.c:1493 +#: sql_help.c:1657 sql_help.c:1706 sql_help.c:1775 sql_help.c:1800 +#: sql_help.c:1813 sql_help.c:1828 sql_help.c:1885 sql_help.c:1890 +#: sql_help.c:2224 sql_help.c:2236 sql_help.c:2340 sql_help.c:2377 +#: sql_help.c:2453 sql_help.c:2496 sql_help.c:2552 sql_help.c:2603 +#: sql_help.c:2634 sql_help.c:2735 sql_help.c:2751 sql_help.c:2763 +#: sql_help.c:2836 sql_help.c:2955 sql_help.c:3132 sql_help.c:3349 +#: sql_help.c:3398 sql_help.c:3504 sql_help.c:3686 sql_help.c:3691 +#: sql_help.c:3736 sql_help.c:3768 sql_help.c:4019 sql_help.c:4024 +#: sql_help.c:4124 sql_help.c:4231 sql_help.c:4233 sql_help.c:4282 +#: sql_help.c:4321 sql_help.c:4470 sql_help.c:4472 sql_help.c:4521 +#: sql_help.c:4555 sql_help.c:4593 sql_help.c:4677 sql_help.c:4679 +#: sql_help.c:4728 msgid "table_name" msgstr "tabellnamn" -#: sql_help.c:793 sql_help.c:2194 +#: sql_help.c:810 sql_help.c:2455 msgid "using_expression" msgstr "using-uttryck" -#: sql_help.c:794 sql_help.c:2195 +#: sql_help.c:811 sql_help.c:2456 msgid "check_expression" msgstr "check-uttryck" -#: sql_help.c:814 sql_help.c:2208 +#: sql_help.c:885 sql_help.c:2497 msgid "publication_parameter" msgstr "publiceringsparameter" -#: sql_help.c:857 sql_help.c:1357 sql_help.c:2060 sql_help.c:2240 -#: sql_help.c:2772 +#: sql_help.c:928 sql_help.c:1576 sql_help.c:2319 sql_help.c:2529 +#: sql_help.c:3066 msgid "password" msgstr "lösenord" -#: sql_help.c:858 sql_help.c:1358 sql_help.c:2061 sql_help.c:2241 -#: sql_help.c:2773 +#: sql_help.c:929 sql_help.c:1577 sql_help.c:2320 sql_help.c:2530 +#: sql_help.c:3067 msgid "timestamp" msgstr "tidsstämpel" -#: sql_help.c:862 sql_help.c:866 sql_help.c:869 sql_help.c:872 sql_help.c:3378 -#: sql_help.c:3711 +#: sql_help.c:933 sql_help.c:937 sql_help.c:940 sql_help.c:943 sql_help.c:1581 +#: sql_help.c:1585 sql_help.c:1588 sql_help.c:1591 sql_help.c:3696 +#: sql_help.c:4029 msgid "database_name" msgstr "databasnamn" -#: sql_help.c:916 sql_help.c:2309 +#: sql_help.c:1047 sql_help.c:2598 msgid "increment" msgstr "ökningsvärde" -#: sql_help.c:917 sql_help.c:2310 +#: sql_help.c:1048 sql_help.c:2599 msgid "minvalue" msgstr "minvärde" -#: sql_help.c:918 sql_help.c:2311 +#: sql_help.c:1049 sql_help.c:2600 msgid "maxvalue" msgstr "maxvärde" -#: sql_help.c:919 sql_help.c:2312 sql_help.c:3901 sql_help.c:3991 -#: sql_help.c:4140 sql_help.c:4269 sql_help.c:4334 +#: sql_help.c:1050 sql_help.c:2601 sql_help.c:4229 sql_help.c:4319 +#: sql_help.c:4468 sql_help.c:4610 sql_help.c:4675 msgid "start" msgstr "start" -#: sql_help.c:920 sql_help.c:1122 +#: sql_help.c:1051 sql_help.c:1295 msgid "restart" msgstr "starta om" -#: sql_help.c:921 sql_help.c:2313 +#: sql_help.c:1052 sql_help.c:2602 msgid "cache" msgstr "cache" -#: sql_help.c:978 sql_help.c:2357 +#: sql_help.c:1109 sql_help.c:2646 msgid "conninfo" msgstr "anslinfo" -#: sql_help.c:980 sql_help.c:2358 +#: sql_help.c:1111 sql_help.c:2647 msgid "publication_name" msgstr "publiceringsnamn" -#: sql_help.c:981 +#: sql_help.c:1112 msgid "set_publication_option" msgstr "sätt_publicerings_alternativ" -#: sql_help.c:984 +#: sql_help.c:1115 msgid "refresh_option" msgstr "refresh_alternativ" -#: sql_help.c:989 sql_help.c:2359 +#: sql_help.c:1120 sql_help.c:2648 msgid "subscription_parameter" msgstr "prenumerationsparameter" -#: sql_help.c:1100 sql_help.c:1103 +#: sql_help.c:1273 sql_help.c:1276 msgid "partition_name" msgstr "paritionsnamn" -#: sql_help.c:1101 sql_help.c:1980 sql_help.c:2477 +#: sql_help.c:1274 sql_help.c:2241 sql_help.c:2768 msgid "partition_bound_spec" msgstr "partionerings_spec" -#: sql_help.c:1119 sql_help.c:2489 +#: sql_help.c:1292 sql_help.c:1341 sql_help.c:2780 msgid "sequence_options" msgstr "sekvensalternativ" -#: sql_help.c:1121 +#: sql_help.c:1294 msgid "sequence_option" msgstr "sekvensalternativ" -#: sql_help.c:1133 +#: sql_help.c:1306 msgid "table_constraint_using_index" msgstr "tabellvillkor_för_index" -#: sql_help.c:1141 sql_help.c:1142 sql_help.c:1143 sql_help.c:1144 +#: sql_help.c:1314 sql_help.c:1315 sql_help.c:1316 sql_help.c:1317 msgid "rewrite_rule_name" msgstr "omskrivningsregelnamn" -#: sql_help.c:1155 -msgid "and table_constraint_using_index is:" -msgstr "och tabellvillkor_för_index är:" +#: sql_help.c:1328 sql_help.c:2805 +msgid "and partition_bound_spec is:" +msgstr "och partionerings_spec är:" -#: sql_help.c:1173 sql_help.c:1176 sql_help.c:2561 -msgid "tablespace_option" -msgstr "tabellutrymmesalternativ" +#: sql_help.c:1329 sql_help.c:1331 sql_help.c:1333 sql_help.c:1335 +#: sql_help.c:1336 sql_help.c:2806 sql_help.c:2808 sql_help.c:2810 +#: sql_help.c:2812 sql_help.c:2813 +msgid "numeric_literal" +msgstr "numerisk_literal" -#: sql_help.c:1197 sql_help.c:1200 sql_help.c:1206 sql_help.c:1210 -msgid "token_type" -msgstr "symboltyp" +#: sql_help.c:1330 sql_help.c:1332 sql_help.c:1334 sql_help.c:2807 +#: sql_help.c:2809 sql_help.c:2811 +msgid "string_literal" +msgstr "strängliteral" -#: sql_help.c:1198 sql_help.c:1201 -msgid "dictionary_name" -msgstr "ordlistnamn" +#: sql_help.c:1337 +msgid "and column_constraint is:" +msgstr "och kolumnvillkor är:" -#: sql_help.c:1203 sql_help.c:1207 -msgid "old_dictionary" -msgstr "gammal_ordlista" +#: sql_help.c:1340 sql_help.c:2248 sql_help.c:2279 sql_help.c:2476 +#: sql_help.c:2779 +msgid "default_expr" +msgstr "default_uttryck" -#: sql_help.c:1204 sql_help.c:1208 -msgid "new_dictionary" -msgstr "ny_ordlista" +#: sql_help.c:1342 sql_help.c:1343 sql_help.c:1352 sql_help.c:1354 +#: sql_help.c:1358 sql_help.c:2781 sql_help.c:2782 sql_help.c:2791 +#: sql_help.c:2793 sql_help.c:2797 +msgid "index_parameters" +msgstr "indexparametrar" -#: sql_help.c:1300 sql_help.c:1313 sql_help.c:1316 sql_help.c:1317 -#: sql_help.c:2712 -msgid "attribute_name" -msgstr "attributnamn" +#: sql_help.c:1344 sql_help.c:1361 sql_help.c:2783 sql_help.c:2800 +msgid "reftable" +msgstr "reftabell" -#: sql_help.c:1301 -msgid "new_attribute_name" -msgstr "nytt_attributnamn" +#: sql_help.c:1345 sql_help.c:1362 sql_help.c:2784 sql_help.c:2801 +msgid "refcolumn" +msgstr "refkolumn" -#: sql_help.c:1307 sql_help.c:1311 -msgid "new_enum_value" -msgstr "nytt_enumvärde" +#: sql_help.c:1348 sql_help.c:2249 sql_help.c:2787 +msgid "and table_constraint is:" +msgstr "och tabellvillkor är:" -#: sql_help.c:1308 -msgid "neighbor_enum_value" -msgstr "närliggande_enumvärde" +#: sql_help.c:1356 sql_help.c:2795 +msgid "exclude_element" +msgstr "uteslutelement" -#: sql_help.c:1310 -msgid "existing_enum_value" +#: sql_help.c:1357 sql_help.c:2796 sql_help.c:4227 sql_help.c:4317 +#: sql_help.c:4466 sql_help.c:4608 sql_help.c:4673 +msgid "operator" +msgstr "operator" + +#: sql_help.c:1359 sql_help.c:2350 sql_help.c:2798 +msgid "predicate" +msgstr "predikat" + +#: sql_help.c:1365 +msgid "and table_constraint_using_index is:" +msgstr "och tabellvillkor_för_index är:" + +#: sql_help.c:1368 sql_help.c:2814 +msgid "index_parameters in UNIQUE, PRIMARY KEY, and EXCLUDE constraints are:" +msgstr "indexparametrar i UNIQUE-, PRIMARY KEY- och EXCLUDE-villkor är:" + +#: sql_help.c:1373 sql_help.c:2819 +msgid "exclude_element in an EXCLUDE constraint is:" +msgstr "uteslutelement i ett EXCLUDE-villkort är:" + +#: sql_help.c:1376 sql_help.c:2345 sql_help.c:2747 sql_help.c:2759 +#: sql_help.c:2772 sql_help.c:2822 sql_help.c:3781 +msgid "opclass" +msgstr "op-klass" + +#: sql_help.c:1392 sql_help.c:1395 sql_help.c:2855 +msgid "tablespace_option" +msgstr "tabellutrymmesalternativ" + +#: sql_help.c:1416 sql_help.c:1419 sql_help.c:1425 sql_help.c:1429 +msgid "token_type" +msgstr "symboltyp" + +#: sql_help.c:1417 sql_help.c:1420 +msgid "dictionary_name" +msgstr "ordlistnamn" + +#: sql_help.c:1422 sql_help.c:1426 +msgid "old_dictionary" +msgstr "gammal_ordlista" + +#: sql_help.c:1423 sql_help.c:1427 +msgid "new_dictionary" +msgstr "ny_ordlista" + +#: sql_help.c:1519 sql_help.c:1532 sql_help.c:1535 sql_help.c:1536 +#: sql_help.c:3006 +msgid "attribute_name" +msgstr "attributnamn" + +#: sql_help.c:1520 +msgid "new_attribute_name" +msgstr "nytt_attributnamn" + +#: sql_help.c:1526 sql_help.c:1530 +msgid "new_enum_value" +msgstr "nytt_enumvärde" + +#: sql_help.c:1527 +msgid "neighbor_enum_value" +msgstr "närliggande_enumvärde" + +#: sql_help.c:1529 +msgid "existing_enum_value" msgstr "existerande_enumvärde" -#: sql_help.c:1381 sql_help.c:1972 sql_help.c:1981 sql_help.c:2325 -#: sql_help.c:2790 sql_help.c:3211 sql_help.c:3384 sql_help.c:3419 -#: sql_help.c:3717 +#: sql_help.c:1604 sql_help.c:2233 sql_help.c:2242 sql_help.c:2614 +#: sql_help.c:3084 sql_help.c:3529 sql_help.c:3702 sql_help.c:3737 +#: sql_help.c:4035 msgid "server_name" msgstr "servernamn" -#: sql_help.c:1409 sql_help.c:1412 sql_help.c:2805 +#: sql_help.c:1632 sql_help.c:1635 sql_help.c:3099 msgid "view_option_name" msgstr "visningsalternativnamn" -#: sql_help.c:1410 sql_help.c:2806 +#: sql_help.c:1633 sql_help.c:3100 msgid "view_option_value" msgstr "visningsalternativvärde" -#: sql_help.c:1435 sql_help.c:4049 sql_help.c:4051 sql_help.c:4075 +#: sql_help.c:1653 sql_help.c:1654 sql_help.c:4589 sql_help.c:4590 +msgid "table_and_columns" +msgstr "tabell_och_kolumner" + +#: sql_help.c:1655 sql_help.c:1896 sql_help.c:3575 sql_help.c:4591 +msgid "where option can be one of:" +msgstr "där flaggor kan vara en av:" + +#: sql_help.c:1656 sql_help.c:4592 +msgid "and table_and_columns is:" +msgstr "och tabell_och_kolumner är:" + +#: sql_help.c:1672 sql_help.c:4377 sql_help.c:4379 sql_help.c:4403 msgid "transaction_mode" msgstr "transaktionsläge" -#: sql_help.c:1436 sql_help.c:4052 sql_help.c:4076 +#: sql_help.c:1673 sql_help.c:4380 sql_help.c:4404 msgid "where transaction_mode is one of:" msgstr "där transaktionsläge är en av:" -#: sql_help.c:1524 +#: sql_help.c:1682 sql_help.c:4237 sql_help.c:4246 sql_help.c:4250 +#: sql_help.c:4254 sql_help.c:4257 sql_help.c:4476 sql_help.c:4485 +#: sql_help.c:4489 sql_help.c:4493 sql_help.c:4496 sql_help.c:4683 +#: sql_help.c:4692 sql_help.c:4696 sql_help.c:4700 sql_help.c:4703 +msgid "argument" +msgstr "argument" + +#: sql_help.c:1772 msgid "relation_name" msgstr "relationsnamn" -#: sql_help.c:1529 sql_help.c:3380 sql_help.c:3713 +#: sql_help.c:1777 sql_help.c:3698 sql_help.c:4031 msgid "domain_name" msgstr "domännamn" -#: sql_help.c:1551 +#: sql_help.c:1799 msgid "policy_name" msgstr "policynamn" -#: sql_help.c:1556 +#: sql_help.c:1812 msgid "rule_name" msgstr "regelnamn" -#: sql_help.c:1575 +#: sql_help.c:1831 msgid "text" msgstr "text" -#: sql_help.c:1600 sql_help.c:3563 sql_help.c:3751 +#: sql_help.c:1856 sql_help.c:3881 sql_help.c:4069 msgid "transaction_id" msgstr "transaktions-id" -#: sql_help.c:1631 sql_help.c:1637 sql_help.c:3489 +#: sql_help.c:1887 sql_help.c:1893 sql_help.c:3807 msgid "filename" msgstr "filnamn" -#: sql_help.c:1632 sql_help.c:1638 sql_help.c:2265 sql_help.c:2266 -#: sql_help.c:2267 +#: sql_help.c:1888 sql_help.c:1894 sql_help.c:2554 sql_help.c:2555 +#: sql_help.c:2556 msgid "command" msgstr "kommando" -#: sql_help.c:1636 sql_help.c:2121 sql_help.c:2547 sql_help.c:2807 -#: sql_help.c:2825 sql_help.c:3454 +#: sql_help.c:1892 sql_help.c:2382 sql_help.c:2841 sql_help.c:3101 +#: sql_help.c:3119 sql_help.c:3772 msgid "query" msgstr "fråga" -#: sql_help.c:1640 sql_help.c:3257 -msgid "where option can be one of:" -msgstr "där flaggor kan vara en av:" - -#: sql_help.c:1641 +#: sql_help.c:1897 msgid "format_name" msgstr "formatnamn" -#: sql_help.c:1642 sql_help.c:1643 sql_help.c:1646 sql_help.c:3258 -#: sql_help.c:3259 sql_help.c:3260 sql_help.c:3261 sql_help.c:3262 -#: sql_help.c:3263 +#: sql_help.c:1898 sql_help.c:1899 sql_help.c:1902 sql_help.c:3576 +#: sql_help.c:3577 sql_help.c:3578 sql_help.c:3579 sql_help.c:3580 +#: sql_help.c:3581 msgid "boolean" msgstr "boolean" -#: sql_help.c:1644 +#: sql_help.c:1900 msgid "delimiter_character" msgstr "avdelartecken" -#: sql_help.c:1645 +#: sql_help.c:1901 msgid "null_string" msgstr "null-sträng" -#: sql_help.c:1647 +#: sql_help.c:1903 msgid "quote_character" msgstr "citattecken" -#: sql_help.c:1648 +#: sql_help.c:1904 msgid "escape_character" msgstr "escape-tecken" -#: sql_help.c:1652 +#: sql_help.c:1908 msgid "encoding_name" msgstr "kodningsnamn" -#: sql_help.c:1663 +#: sql_help.c:1919 msgid "access_method_type" msgstr "accessmetodtyp" -#: sql_help.c:1729 sql_help.c:1748 sql_help.c:1751 +#: sql_help.c:1990 sql_help.c:2009 sql_help.c:2012 msgid "arg_data_type" msgstr "arg_datatyp" -#: sql_help.c:1730 sql_help.c:1752 sql_help.c:1760 +#: sql_help.c:1991 sql_help.c:2013 sql_help.c:2021 msgid "sfunc" msgstr "sfunc" -#: sql_help.c:1731 sql_help.c:1753 sql_help.c:1761 +#: sql_help.c:1992 sql_help.c:2014 sql_help.c:2022 msgid "state_data_type" msgstr "tillståndsdatatyp" -#: sql_help.c:1732 sql_help.c:1754 sql_help.c:1762 +#: sql_help.c:1993 sql_help.c:2015 sql_help.c:2023 msgid "state_data_size" msgstr "tillståndsdatastorlek" -#: sql_help.c:1733 sql_help.c:1755 sql_help.c:1763 +#: sql_help.c:1994 sql_help.c:2016 sql_help.c:2024 msgid "ffunc" msgstr "ffunc" -#: sql_help.c:1734 sql_help.c:1764 +#: sql_help.c:1995 sql_help.c:2025 msgid "combinefunc" msgstr "kombinerafunk" -#: sql_help.c:1735 sql_help.c:1765 +#: sql_help.c:1996 sql_help.c:2026 msgid "serialfunc" msgstr "serialiseringsfunk" -#: sql_help.c:1736 sql_help.c:1766 +#: sql_help.c:1997 sql_help.c:2027 msgid "deserialfunc" msgstr "deserialiseringsfunk" -#: sql_help.c:1737 sql_help.c:1756 sql_help.c:1767 +#: sql_help.c:1998 sql_help.c:2017 sql_help.c:2028 msgid "initial_condition" msgstr "startvärde" -#: sql_help.c:1738 sql_help.c:1768 +#: sql_help.c:1999 sql_help.c:2029 msgid "msfunc" msgstr "msfunk" -#: sql_help.c:1739 sql_help.c:1769 +#: sql_help.c:2000 sql_help.c:2030 msgid "minvfunc" msgstr "minvfunk" -#: sql_help.c:1740 sql_help.c:1770 +#: sql_help.c:2001 sql_help.c:2031 msgid "mstate_data_type" msgstr "mtillståndsdatatyp" -#: sql_help.c:1741 sql_help.c:1771 +#: sql_help.c:2002 sql_help.c:2032 msgid "mstate_data_size" msgstr "ntillståndsstorlek" -#: sql_help.c:1742 sql_help.c:1772 +#: sql_help.c:2003 sql_help.c:2033 msgid "mffunc" msgstr "mffunk" -#: sql_help.c:1743 sql_help.c:1773 +#: sql_help.c:2004 sql_help.c:2034 msgid "minitial_condition" msgstr "mstartvärde" -#: sql_help.c:1744 sql_help.c:1774 +#: sql_help.c:2005 sql_help.c:2035 msgid "sort_operator" msgstr "sorteringsoperator" -#: sql_help.c:1757 +#: sql_help.c:2018 msgid "or the old syntax" msgstr "eller gamla syntaxen" -#: sql_help.c:1759 +#: sql_help.c:2020 msgid "base_type" msgstr "bastyp" -#: sql_help.c:1815 +#: sql_help.c:2076 msgid "locale" msgstr "lokal" -#: sql_help.c:1816 sql_help.c:1854 +#: sql_help.c:2077 sql_help.c:2115 msgid "lc_collate" msgstr "lc_collate" -#: sql_help.c:1817 sql_help.c:1855 +#: sql_help.c:2078 sql_help.c:2116 msgid "lc_ctype" msgstr "lc_ctype" -#: sql_help.c:1818 sql_help.c:3802 +#: sql_help.c:2079 sql_help.c:4122 msgid "provider" msgstr "leverantör" -#: sql_help.c:1819 sql_help.c:1910 +#: sql_help.c:2080 sql_help.c:2171 msgid "version" msgstr "version" -#: sql_help.c:1821 +#: sql_help.c:2082 msgid "existing_collation" -msgstr "existerande_sortering" +msgstr "existerande_jämförelse" -#: sql_help.c:1831 +#: sql_help.c:2092 msgid "source_encoding" msgstr "källkodning" -#: sql_help.c:1832 +#: sql_help.c:2093 msgid "dest_encoding" msgstr "målkodning" -#: sql_help.c:1852 sql_help.c:2587 +#: sql_help.c:2113 sql_help.c:2881 msgid "template" msgstr "mall" -#: sql_help.c:1853 +#: sql_help.c:2114 msgid "encoding" msgstr "kodning" -#: sql_help.c:1879 +#: sql_help.c:2140 msgid "constraint" msgstr "villkor" -#: sql_help.c:1880 +#: sql_help.c:2141 msgid "where constraint is:" msgstr "där villkor är:" -#: sql_help.c:1894 sql_help.c:2262 sql_help.c:2660 +#: sql_help.c:2155 sql_help.c:2551 sql_help.c:2954 msgid "event" msgstr "händelse" -#: sql_help.c:1895 +#: sql_help.c:2156 msgid "filter_variable" msgstr "filtervariabel" -#: sql_help.c:1911 +#: sql_help.c:2172 msgid "old_version" msgstr "gammal_version" -#: sql_help.c:1984 sql_help.c:2485 +#: sql_help.c:2245 sql_help.c:2776 msgid "where column_constraint is:" msgstr "där kolumnvillkor är:" -#: sql_help.c:1987 sql_help.c:2019 sql_help.c:2488 -msgid "default_expr" -msgstr "default_uttryck" - -#: sql_help.c:1988 sql_help.c:2496 -msgid "and table_constraint is:" -msgstr "och tabellvillkor är:" - -#: sql_help.c:2020 +#: sql_help.c:2280 msgid "rettype" msgstr "rettyp" -#: sql_help.c:2022 +#: sql_help.c:2282 msgid "column_type" msgstr "kolumntyp" -#: sql_help.c:2030 +#: sql_help.c:2290 sql_help.c:2482 msgid "definition" msgstr "definition" -#: sql_help.c:2031 +#: sql_help.c:2291 sql_help.c:2483 msgid "obj_file" msgstr "obj-fil" -#: sql_help.c:2032 +#: sql_help.c:2292 sql_help.c:2484 msgid "link_symbol" msgstr "linksymbol" -#: sql_help.c:2033 -msgid "attribute" -msgstr "attribut" - -#: sql_help.c:2067 sql_help.c:2247 sql_help.c:2779 +#: sql_help.c:2326 sql_help.c:2536 sql_help.c:3073 msgid "uid" msgstr "uid" -#: sql_help.c:2081 +#: sql_help.c:2341 msgid "method" msgstr "metod" -#: sql_help.c:2085 sql_help.c:2456 sql_help.c:2468 sql_help.c:2481 -#: sql_help.c:2528 sql_help.c:3463 -msgid "opclass" -msgstr "op-klass" - -#: sql_help.c:2089 sql_help.c:2507 -msgid "predicate" -msgstr "predikat" - -#: sql_help.c:2101 +#: sql_help.c:2362 msgid "call_handler" msgstr "anropshanterare" -#: sql_help.c:2102 +#: sql_help.c:2363 msgid "inline_handler" msgstr "inline-hanterare" -#: sql_help.c:2103 +#: sql_help.c:2364 msgid "valfunction" msgstr "val-funktion" -#: sql_help.c:2139 +#: sql_help.c:2400 msgid "com_op" msgstr "com_op" -#: sql_help.c:2140 +#: sql_help.c:2401 msgid "neg_op" msgstr "neg_op" -#: sql_help.c:2158 +#: sql_help.c:2419 msgid "family_name" msgstr "familjenamn" -#: sql_help.c:2169 +#: sql_help.c:2430 msgid "storage_type" msgstr "lagringstyp" -#: sql_help.c:2264 sql_help.c:2664 sql_help.c:2841 sql_help.c:3473 -#: sql_help.c:3892 sql_help.c:3894 sql_help.c:3982 sql_help.c:3984 -#: sql_help.c:4131 sql_help.c:4133 sql_help.c:4236 sql_help.c:4325 -#: sql_help.c:4327 +#: sql_help.c:2553 sql_help.c:2958 sql_help.c:3135 sql_help.c:3791 +#: sql_help.c:4220 sql_help.c:4222 sql_help.c:4310 sql_help.c:4312 +#: sql_help.c:4459 sql_help.c:4461 sql_help.c:4564 sql_help.c:4666 +#: sql_help.c:4668 msgid "condition" msgstr "villkor" -#: sql_help.c:2268 sql_help.c:2667 +#: sql_help.c:2557 sql_help.c:2961 msgid "where event can be one of:" msgstr "där händelse kan vara en av:" -#: sql_help.c:2287 sql_help.c:2289 +#: sql_help.c:2576 sql_help.c:2578 msgid "schema_element" msgstr "schema-element" -#: sql_help.c:2326 +#: sql_help.c:2615 msgid "server_type" msgstr "servertyp" -#: sql_help.c:2327 +#: sql_help.c:2616 msgid "server_version" msgstr "serverversion" -#: sql_help.c:2328 sql_help.c:3382 sql_help.c:3715 +#: sql_help.c:2617 sql_help.c:3700 sql_help.c:4033 msgid "fdw_name" msgstr "fdw-namn" -#: sql_help.c:2341 +#: sql_help.c:2630 msgid "statistics_name" msgstr "statistiknamn" -#: sql_help.c:2342 -msgid "statistic_type" -msgstr "statistiktyp" +#: sql_help.c:2631 +msgid "statistics_kind" +msgstr "statistiksort" -#: sql_help.c:2356 +#: sql_help.c:2645 msgid "subscription_name" msgstr "prenumerationsnamn" -#: sql_help.c:2450 +#: sql_help.c:2741 msgid "source_table" msgstr "källtabell" -#: sql_help.c:2451 +#: sql_help.c:2742 msgid "like_option" msgstr "like_alternativ" -#: sql_help.c:2490 sql_help.c:2491 sql_help.c:2500 sql_help.c:2502 -#: sql_help.c:2506 -msgid "index_parameters" -msgstr "indexparametrar" - -#: sql_help.c:2492 sql_help.c:2509 -msgid "reftable" -msgstr "reftabell" - -#: sql_help.c:2493 sql_help.c:2510 -msgid "refcolumn" -msgstr "refkolumn" - -#: sql_help.c:2504 -msgid "exclude_element" -msgstr "uteslutelement" - -#: sql_help.c:2505 sql_help.c:3899 sql_help.c:3989 sql_help.c:4138 -#: sql_help.c:4267 sql_help.c:4332 -msgid "operator" -msgstr "operator" - -#: sql_help.c:2513 +#: sql_help.c:2804 msgid "and like_option is:" msgstr "och likealternativ är:" -#: sql_help.c:2514 -msgid "and partition_bound_spec is:" -msgstr "och partionerings_spec är:" - -#: sql_help.c:2515 sql_help.c:2517 sql_help.c:2519 -msgid "numeric_literal" -msgstr "numerisk_literal" - -#: sql_help.c:2516 sql_help.c:2518 sql_help.c:2520 -msgid "string_literal" -msgstr "strängliteral" - -#: sql_help.c:2521 -msgid "index_parameters in UNIQUE, PRIMARY KEY, and EXCLUDE constraints are:" -msgstr "indexparametrar i UNIQUE-, PRIMARY KEY- och EXCLUDE-villkor är:" - -#: sql_help.c:2525 -msgid "exclude_element in an EXCLUDE constraint is:" -msgstr "uteslutelement i ett EXCLUDE-villkort är:" - -#: sql_help.c:2560 +#: sql_help.c:2854 msgid "directory" msgstr "katalog" -#: sql_help.c:2574 +#: sql_help.c:2868 msgid "parser_name" msgstr "parsernamn" -#: sql_help.c:2575 +#: sql_help.c:2869 msgid "source_config" msgstr "källkonfig" -#: sql_help.c:2604 +#: sql_help.c:2898 msgid "start_function" msgstr "startfunktion" -#: sql_help.c:2605 +#: sql_help.c:2899 msgid "gettoken_function" msgstr "gettoken_funktion" -#: sql_help.c:2606 +#: sql_help.c:2900 msgid "end_function" msgstr "slutfunktion" -#: sql_help.c:2607 +#: sql_help.c:2901 msgid "lextypes_function" msgstr "symboltypfunktion" -#: sql_help.c:2608 +#: sql_help.c:2902 msgid "headline_function" msgstr "rubrikfunktion" -#: sql_help.c:2620 +#: sql_help.c:2914 msgid "init_function" msgstr "init_funktion" -#: sql_help.c:2621 +#: sql_help.c:2915 msgid "lexize_function" msgstr "symboluppdelningsfunktion" -#: sql_help.c:2634 +#: sql_help.c:2928 msgid "from_sql_function_name" msgstr "från_sql_funktionsnamn" -#: sql_help.c:2636 +#: sql_help.c:2930 msgid "to_sql_function_name" msgstr "till_sql_funktionsnamn" -#: sql_help.c:2662 +#: sql_help.c:2956 msgid "referenced_table_name" msgstr "refererat_tabellnamn" -#: sql_help.c:2663 +#: sql_help.c:2957 msgid "transition_relation_name" msgstr "övergångsrelationsnamn" -#: sql_help.c:2666 +#: sql_help.c:2960 msgid "arguments" msgstr "argument" -#: sql_help.c:2716 sql_help.c:3827 +#: sql_help.c:3010 sql_help.c:4155 msgid "label" msgstr "etikett" -#: sql_help.c:2718 +#: sql_help.c:3012 msgid "subtype" msgstr "subtyp" -#: sql_help.c:2719 +#: sql_help.c:3013 msgid "subtype_operator_class" msgstr "subtypoperatorklass" -#: sql_help.c:2721 +#: sql_help.c:3015 msgid "canonical_function" msgstr "kanonisk_funktion" -#: sql_help.c:2722 +#: sql_help.c:3016 msgid "subtype_diff_function" msgstr "subtyp_diff_funktion" -#: sql_help.c:2724 +#: sql_help.c:3018 msgid "input_function" msgstr "inmatningsfunktion" -#: sql_help.c:2725 +#: sql_help.c:3019 msgid "output_function" msgstr "utmatningsfunktion" -#: sql_help.c:2726 +#: sql_help.c:3020 msgid "receive_function" msgstr "mottagarfunktion" -#: sql_help.c:2727 +#: sql_help.c:3021 msgid "send_function" msgstr "sändfunktion" -#: sql_help.c:2728 +#: sql_help.c:3022 msgid "type_modifier_input_function" msgstr "typmodifiering_indatafunktion" -#: sql_help.c:2729 +#: sql_help.c:3023 msgid "type_modifier_output_function" msgstr "typmodifiering_utdatafunktion" -#: sql_help.c:2730 +#: sql_help.c:3024 msgid "analyze_function" msgstr "analysfunktion" -#: sql_help.c:2731 +#: sql_help.c:3025 msgid "internallength" msgstr "internlängd" -#: sql_help.c:2732 +#: sql_help.c:3026 msgid "alignment" msgstr "justering" -#: sql_help.c:2733 +#: sql_help.c:3027 msgid "storage" msgstr "lagring" -#: sql_help.c:2734 +#: sql_help.c:3028 msgid "like_type" msgstr "liketyp" -#: sql_help.c:2735 +#: sql_help.c:3029 msgid "category" msgstr "kategori" -#: sql_help.c:2736 +#: sql_help.c:3030 msgid "preferred" msgstr "föredragen" -#: sql_help.c:2737 +#: sql_help.c:3031 msgid "default" msgstr "standard" -#: sql_help.c:2738 +#: sql_help.c:3032 msgid "element" msgstr "element" -#: sql_help.c:2739 +#: sql_help.c:3033 msgid "delimiter" msgstr "avskiljare" -#: sql_help.c:2740 +#: sql_help.c:3034 msgid "collatable" msgstr "sorterbar" -#: sql_help.c:2837 sql_help.c:3449 sql_help.c:3887 sql_help.c:3976 -#: sql_help.c:4126 sql_help.c:4226 sql_help.c:4320 +#: sql_help.c:3131 sql_help.c:3767 sql_help.c:4215 sql_help.c:4304 +#: sql_help.c:4454 sql_help.c:4554 sql_help.c:4661 msgid "with_query" msgstr "with_fråga" -#: sql_help.c:2839 sql_help.c:3451 sql_help.c:3906 sql_help.c:3912 -#: sql_help.c:3915 sql_help.c:3919 sql_help.c:3923 sql_help.c:3931 -#: sql_help.c:4145 sql_help.c:4151 sql_help.c:4154 sql_help.c:4158 -#: sql_help.c:4162 sql_help.c:4170 sql_help.c:4228 sql_help.c:4339 -#: sql_help.c:4345 sql_help.c:4348 sql_help.c:4352 sql_help.c:4356 -#: sql_help.c:4364 +#: sql_help.c:3133 sql_help.c:3769 sql_help.c:4234 sql_help.c:4240 +#: sql_help.c:4243 sql_help.c:4247 sql_help.c:4251 sql_help.c:4259 +#: sql_help.c:4473 sql_help.c:4479 sql_help.c:4482 sql_help.c:4486 +#: sql_help.c:4490 sql_help.c:4498 sql_help.c:4556 sql_help.c:4680 +#: sql_help.c:4686 sql_help.c:4689 sql_help.c:4693 sql_help.c:4697 +#: sql_help.c:4705 msgid "alias" msgstr "alias" -#: sql_help.c:2840 +#: sql_help.c:3134 msgid "using_list" msgstr "using_lista" -#: sql_help.c:2842 sql_help.c:3289 sql_help.c:3530 sql_help.c:4237 +#: sql_help.c:3136 sql_help.c:3607 sql_help.c:3848 sql_help.c:4565 msgid "cursor_name" msgstr "markörnamn" -#: sql_help.c:2843 sql_help.c:3457 sql_help.c:4238 +#: sql_help.c:3137 sql_help.c:3775 sql_help.c:4566 msgid "output_expression" msgstr "utdatauttryck" -#: sql_help.c:2844 sql_help.c:3458 sql_help.c:3890 sql_help.c:3979 -#: sql_help.c:4129 sql_help.c:4239 sql_help.c:4323 +#: sql_help.c:3138 sql_help.c:3776 sql_help.c:4218 sql_help.c:4307 +#: sql_help.c:4457 sql_help.c:4567 sql_help.c:4664 msgid "output_name" msgstr "utdatanamn" -#: sql_help.c:2860 +#: sql_help.c:3154 msgid "code" msgstr "kod" -#: sql_help.c:3235 +#: sql_help.c:3553 msgid "parameter" msgstr "parameter" -#: sql_help.c:3255 sql_help.c:3256 sql_help.c:3555 +#: sql_help.c:3573 sql_help.c:3574 sql_help.c:3873 msgid "statement" msgstr "sats" -#: sql_help.c:3288 sql_help.c:3529 +#: sql_help.c:3606 sql_help.c:3847 msgid "direction" msgstr "riktning" -#: sql_help.c:3290 sql_help.c:3531 +#: sql_help.c:3608 sql_help.c:3849 msgid "where direction can be empty or one of:" msgstr "där riktning kan vara tom eller en av:" -#: sql_help.c:3291 sql_help.c:3292 sql_help.c:3293 sql_help.c:3294 -#: sql_help.c:3295 sql_help.c:3532 sql_help.c:3533 sql_help.c:3534 -#: sql_help.c:3535 sql_help.c:3536 sql_help.c:3900 sql_help.c:3902 -#: sql_help.c:3990 sql_help.c:3992 sql_help.c:4139 sql_help.c:4141 -#: sql_help.c:4268 sql_help.c:4270 sql_help.c:4333 sql_help.c:4335 +#: sql_help.c:3609 sql_help.c:3610 sql_help.c:3611 sql_help.c:3612 +#: sql_help.c:3613 sql_help.c:3850 sql_help.c:3851 sql_help.c:3852 +#: sql_help.c:3853 sql_help.c:3854 sql_help.c:4228 sql_help.c:4230 +#: sql_help.c:4318 sql_help.c:4320 sql_help.c:4467 sql_help.c:4469 +#: sql_help.c:4609 sql_help.c:4611 sql_help.c:4674 sql_help.c:4676 msgid "count" msgstr "antal" -#: sql_help.c:3375 sql_help.c:3708 +#: sql_help.c:3693 sql_help.c:4026 msgid "sequence_name" msgstr "sekvensnamn" -#: sql_help.c:3388 sql_help.c:3721 +#: sql_help.c:3706 sql_help.c:4039 msgid "arg_name" msgstr "arg_namn" -#: sql_help.c:3389 sql_help.c:3722 +#: sql_help.c:3707 sql_help.c:4040 msgid "arg_type" msgstr "arg_typ" -#: sql_help.c:3394 sql_help.c:3727 +#: sql_help.c:3712 sql_help.c:4045 msgid "loid" msgstr "loid" -#: sql_help.c:3417 +#: sql_help.c:3735 msgid "remote_schema" msgstr "externt_schema" -#: sql_help.c:3420 +#: sql_help.c:3738 msgid "local_schema" msgstr "lokalt_schema" -#: sql_help.c:3455 +#: sql_help.c:3773 msgid "conflict_target" msgstr "konfliktmål" -#: sql_help.c:3456 +#: sql_help.c:3774 msgid "conflict_action" msgstr "konfliktaktion" -#: sql_help.c:3459 +#: sql_help.c:3777 msgid "where conflict_target can be one of:" msgstr "där konfliktmål kan vara en av:" -#: sql_help.c:3460 +#: sql_help.c:3778 msgid "index_column_name" msgstr "indexkolumnnamn" -#: sql_help.c:3461 +#: sql_help.c:3779 msgid "index_expression" msgstr "indexuttryck" -#: sql_help.c:3464 +#: sql_help.c:3782 msgid "index_predicate" msgstr "indexpredikat" -#: sql_help.c:3466 +#: sql_help.c:3784 msgid "and conflict_action is one of:" msgstr "och konfliktaktion är en av:" -#: sql_help.c:3472 sql_help.c:4234 +#: sql_help.c:3790 sql_help.c:4562 msgid "sub-SELECT" msgstr "sub-SELECT" -#: sql_help.c:3481 sql_help.c:3544 sql_help.c:4210 +#: sql_help.c:3799 sql_help.c:3862 sql_help.c:4538 msgid "channel" msgstr "kanal" -#: sql_help.c:3503 +#: sql_help.c:3821 msgid "lockmode" msgstr "låsläge" -#: sql_help.c:3504 +#: sql_help.c:3822 msgid "where lockmode is one of:" msgstr "där låsläge är en av:" -#: sql_help.c:3545 +#: sql_help.c:3863 msgid "payload" msgstr "innehåll" -#: sql_help.c:3572 +#: sql_help.c:3890 msgid "old_role" msgstr "gammal_roll" -#: sql_help.c:3573 +#: sql_help.c:3891 msgid "new_role" msgstr "ny_roll" -#: sql_help.c:3598 sql_help.c:3759 sql_help.c:3767 +#: sql_help.c:3916 sql_help.c:4077 sql_help.c:4085 msgid "savepoint_name" msgstr "sparpunktnamn" -#: sql_help.c:3891 sql_help.c:3933 sql_help.c:3935 sql_help.c:3981 -#: sql_help.c:4130 sql_help.c:4172 sql_help.c:4174 sql_help.c:4324 -#: sql_help.c:4366 sql_help.c:4368 +#: sql_help.c:4219 sql_help.c:4261 sql_help.c:4263 sql_help.c:4309 +#: sql_help.c:4458 sql_help.c:4500 sql_help.c:4502 sql_help.c:4665 +#: sql_help.c:4707 sql_help.c:4709 msgid "from_item" msgstr "frånval" -#: sql_help.c:3893 sql_help.c:3945 sql_help.c:4132 sql_help.c:4184 -#: sql_help.c:4326 sql_help.c:4378 +#: sql_help.c:4221 sql_help.c:4273 sql_help.c:4460 sql_help.c:4512 +#: sql_help.c:4667 sql_help.c:4719 msgid "grouping_element" msgstr "gruperingselement" -#: sql_help.c:3895 sql_help.c:3985 sql_help.c:4134 sql_help.c:4328 +#: sql_help.c:4223 sql_help.c:4313 sql_help.c:4462 sql_help.c:4669 msgid "window_name" msgstr "fönsternamn" -#: sql_help.c:3896 sql_help.c:3986 sql_help.c:4135 sql_help.c:4329 +#: sql_help.c:4224 sql_help.c:4314 sql_help.c:4463 sql_help.c:4670 msgid "window_definition" msgstr "fönsterdefinition" -#: sql_help.c:3897 sql_help.c:3911 sql_help.c:3949 sql_help.c:3987 -#: sql_help.c:4136 sql_help.c:4150 sql_help.c:4188 sql_help.c:4330 -#: sql_help.c:4344 sql_help.c:4382 +#: sql_help.c:4225 sql_help.c:4239 sql_help.c:4277 sql_help.c:4315 +#: sql_help.c:4464 sql_help.c:4478 sql_help.c:4516 sql_help.c:4671 +#: sql_help.c:4685 sql_help.c:4723 msgid "select" msgstr "select" -#: sql_help.c:3904 sql_help.c:4143 sql_help.c:4337 +#: sql_help.c:4232 sql_help.c:4471 sql_help.c:4678 msgid "where from_item can be one of:" msgstr "där frånval kan vara en av:" -#: sql_help.c:3907 sql_help.c:3913 sql_help.c:3916 sql_help.c:3920 -#: sql_help.c:3932 sql_help.c:4146 sql_help.c:4152 sql_help.c:4155 -#: sql_help.c:4159 sql_help.c:4171 sql_help.c:4340 sql_help.c:4346 -#: sql_help.c:4349 sql_help.c:4353 sql_help.c:4365 +#: sql_help.c:4235 sql_help.c:4241 sql_help.c:4244 sql_help.c:4248 +#: sql_help.c:4260 sql_help.c:4474 sql_help.c:4480 sql_help.c:4483 +#: sql_help.c:4487 sql_help.c:4499 sql_help.c:4681 sql_help.c:4687 +#: sql_help.c:4690 sql_help.c:4694 sql_help.c:4706 msgid "column_alias" msgstr "kolumnalias" -#: sql_help.c:3908 sql_help.c:4147 sql_help.c:4341 +#: sql_help.c:4236 sql_help.c:4475 sql_help.c:4682 msgid "sampling_method" msgstr "samplingsmetod" -#: sql_help.c:3909 sql_help.c:3918 sql_help.c:3922 sql_help.c:3926 -#: sql_help.c:3929 sql_help.c:4148 sql_help.c:4157 sql_help.c:4161 -#: sql_help.c:4165 sql_help.c:4168 sql_help.c:4342 sql_help.c:4351 -#: sql_help.c:4355 sql_help.c:4359 sql_help.c:4362 -msgid "argument" -msgstr "argument" - -#: sql_help.c:3910 sql_help.c:4149 sql_help.c:4343 +#: sql_help.c:4238 sql_help.c:4477 sql_help.c:4684 msgid "seed" msgstr "frö" -#: sql_help.c:3914 sql_help.c:3947 sql_help.c:4153 sql_help.c:4186 -#: sql_help.c:4347 sql_help.c:4380 +#: sql_help.c:4242 sql_help.c:4275 sql_help.c:4481 sql_help.c:4514 +#: sql_help.c:4688 sql_help.c:4721 msgid "with_query_name" msgstr "with_frågenamn" -#: sql_help.c:3924 sql_help.c:3927 sql_help.c:3930 sql_help.c:4163 -#: sql_help.c:4166 sql_help.c:4169 sql_help.c:4357 sql_help.c:4360 -#: sql_help.c:4363 +#: sql_help.c:4252 sql_help.c:4255 sql_help.c:4258 sql_help.c:4491 +#: sql_help.c:4494 sql_help.c:4497 sql_help.c:4698 sql_help.c:4701 +#: sql_help.c:4704 msgid "column_definition" msgstr "kolumndefinition" -#: sql_help.c:3934 sql_help.c:4173 sql_help.c:4367 +#: sql_help.c:4262 sql_help.c:4501 sql_help.c:4708 msgid "join_type" msgstr "join-typ" -#: sql_help.c:3936 sql_help.c:4175 sql_help.c:4369 +#: sql_help.c:4264 sql_help.c:4503 sql_help.c:4710 msgid "join_condition" msgstr "join-villkor" -#: sql_help.c:3937 sql_help.c:4176 sql_help.c:4370 +#: sql_help.c:4265 sql_help.c:4504 sql_help.c:4711 msgid "join_column" msgstr "join-kolumn" -#: sql_help.c:3938 sql_help.c:4177 sql_help.c:4371 +#: sql_help.c:4266 sql_help.c:4505 sql_help.c:4712 msgid "and grouping_element can be one of:" msgstr "och grupperingselement kan vara en av:" -#: sql_help.c:3946 sql_help.c:4185 sql_help.c:4379 +#: sql_help.c:4274 sql_help.c:4513 sql_help.c:4720 msgid "and with_query is:" msgstr "och with_fråga är:" -#: sql_help.c:3950 sql_help.c:4189 sql_help.c:4383 +#: sql_help.c:4278 sql_help.c:4517 sql_help.c:4724 msgid "values" msgstr "värden" -#: sql_help.c:3951 sql_help.c:4190 sql_help.c:4384 +#: sql_help.c:4279 sql_help.c:4518 sql_help.c:4725 msgid "insert" msgstr "insert" -#: sql_help.c:3952 sql_help.c:4191 sql_help.c:4385 +#: sql_help.c:4280 sql_help.c:4519 sql_help.c:4726 msgid "update" msgstr "update" -#: sql_help.c:3953 sql_help.c:4192 sql_help.c:4386 +#: sql_help.c:4281 sql_help.c:4520 sql_help.c:4727 msgid "delete" msgstr "delete" -#: sql_help.c:3980 +#: sql_help.c:4308 msgid "new_table" msgstr "ny_tabell" -#: sql_help.c:4005 +#: sql_help.c:4333 msgid "timezone" msgstr "tidszon" -#: sql_help.c:4050 +#: sql_help.c:4378 msgid "snapshot_id" msgstr "snapshot_id" -#: sql_help.c:4235 +#: sql_help.c:4563 msgid "from_list" msgstr "frånlista" -#: sql_help.c:4266 +#: sql_help.c:4607 msgid "sort_expression" msgstr "sorteringsuttryck" -#: sql_help.c:4393 sql_help.c:5178 +#: sql_help.c:4734 sql_help.c:5549 msgid "abort the current transaction" msgstr "avbryt aktuell transaktion" -#: sql_help.c:4398 +#: sql_help.c:4739 msgid "change the definition of an aggregate function" msgstr "ändra definitionen av en aggregatfunktion" -#: sql_help.c:4403 +#: sql_help.c:4744 msgid "change the definition of a collation" -msgstr "ändra definitionen av en sortering" +msgstr "ändra definitionen av en jämförelse" -#: sql_help.c:4408 +#: sql_help.c:4749 msgid "change the definition of a conversion" msgstr "ändra definitionen av en konvertering" -#: sql_help.c:4413 +#: sql_help.c:4754 msgid "change a database" msgstr "ändra en databas" -#: sql_help.c:4418 +#: sql_help.c:4759 msgid "define default access privileges" msgstr "definiera standardaccessrättigheter" -#: sql_help.c:4423 +#: sql_help.c:4764 msgid "change the definition of a domain" msgstr "ändra definitionen av en domän" -#: sql_help.c:4428 +#: sql_help.c:4769 msgid "change the definition of an event trigger" msgstr "ändra definitionen av en händelseutlösare" -#: sql_help.c:4433 +#: sql_help.c:4774 msgid "change the definition of an extension" msgstr "ändra definitionen av en utökning" -#: sql_help.c:4438 +#: sql_help.c:4779 msgid "change the definition of a foreign-data wrapper" msgstr "ändra definitionen av en främmande data-omvandlare" -#: sql_help.c:4443 +#: sql_help.c:4784 msgid "change the definition of a foreign table" msgstr "ändra definitionen av en främmande tabell" -#: sql_help.c:4448 +#: sql_help.c:4789 msgid "change the definition of a function" msgstr "ändra definitionen av en funktion" -#: sql_help.c:4453 +#: sql_help.c:4794 msgid "change role name or membership" msgstr "ändra rollnamn eller medlemskap" -#: sql_help.c:4458 +#: sql_help.c:4799 msgid "change the definition of an index" msgstr "ändra definitionen av ett index" -#: sql_help.c:4463 +#: sql_help.c:4804 msgid "change the definition of a procedural language" msgstr "ändra definitionen av ett procedur-språk" -#: sql_help.c:4468 +#: sql_help.c:4809 msgid "change the definition of a large object" msgstr "ändra definitionen av ett stort objekt" -#: sql_help.c:4473 +#: sql_help.c:4814 msgid "change the definition of a materialized view" msgstr "ändra definitionen av en materialiserad vy" -#: sql_help.c:4478 +#: sql_help.c:4819 msgid "change the definition of an operator" msgstr "ändra definitionen av en operator" -#: sql_help.c:4483 +#: sql_help.c:4824 msgid "change the definition of an operator class" msgstr "ändra definitionen av en operatorklass" -#: sql_help.c:4488 +#: sql_help.c:4829 msgid "change the definition of an operator family" msgstr "ändra definitionen av en operatorfamilj" -#: sql_help.c:4493 +#: sql_help.c:4834 msgid "change the definition of a row level security policy" msgstr "ändra definitionen av en säkerhetspolicy på radnivå" -#: sql_help.c:4498 +#: sql_help.c:4839 +msgid "change the definition of a procedure" +msgstr "ändra definitionen av en procedur" + +#: sql_help.c:4844 msgid "change the definition of a publication" msgstr "ändra definitionen av en publicering" -#: sql_help.c:4503 sql_help.c:4583 +#: sql_help.c:4849 sql_help.c:4934 msgid "change a database role" msgstr "ändra databasroll" -#: sql_help.c:4508 +#: sql_help.c:4854 +msgid "change the definition of a routine" +msgstr "ändra definitionen av en rutin" + +#: sql_help.c:4859 msgid "change the definition of a rule" msgstr "ändra definitionen av en regel" -#: sql_help.c:4513 +#: sql_help.c:4864 msgid "change the definition of a schema" msgstr "ändra definitionen av ett schema" -#: sql_help.c:4518 +#: sql_help.c:4869 msgid "change the definition of a sequence generator" msgstr "ändra definitionen av en sekvensgenerator" -#: sql_help.c:4523 +#: sql_help.c:4874 msgid "change the definition of a foreign server" msgstr "ändra definitionen av en främmande server" -#: sql_help.c:4528 +#: sql_help.c:4879 msgid "change the definition of an extended statistics object" msgstr "ändra definitionen av ett utökat statistikobjekt" -#: sql_help.c:4533 +#: sql_help.c:4884 msgid "change the definition of a subscription" msgstr "ändra definitionen av en prenumerering" -#: sql_help.c:4538 +#: sql_help.c:4889 msgid "change a server configuration parameter" msgstr "ändra en servers konfigurationsparameter" -#: sql_help.c:4543 +#: sql_help.c:4894 msgid "change the definition of a table" msgstr "ändra definitionen av en tabell" -#: sql_help.c:4548 +#: sql_help.c:4899 msgid "change the definition of a tablespace" msgstr "ändra definitionen av ett tabellutrymme" -#: sql_help.c:4553 +#: sql_help.c:4904 msgid "change the definition of a text search configuration" msgstr "ändra definitionen av en textsökkonfiguration" -#: sql_help.c:4558 +#: sql_help.c:4909 msgid "change the definition of a text search dictionary" msgstr "ändra definitionen av en textsökordlista" -#: sql_help.c:4563 +#: sql_help.c:4914 msgid "change the definition of a text search parser" msgstr "ändra definitionen av en textsökparser" -#: sql_help.c:4568 +#: sql_help.c:4919 msgid "change the definition of a text search template" msgstr "ändra definitionen av en textsökmall" -#: sql_help.c:4573 +#: sql_help.c:4924 msgid "change the definition of a trigger" msgstr "ändra definitionen av en utlösare" -#: sql_help.c:4578 +#: sql_help.c:4929 msgid "change the definition of a type" msgstr "ändra definitionen av en typ" -#: sql_help.c:4588 +#: sql_help.c:4939 msgid "change the definition of a user mapping" msgstr "ändra definitionen av en användarmappning" -#: sql_help.c:4593 +#: sql_help.c:4944 msgid "change the definition of a view" msgstr "ändra definitionen av en vy" -#: sql_help.c:4598 +#: sql_help.c:4949 msgid "collect statistics about a database" msgstr "samla in statistik om en databas" -#: sql_help.c:4603 sql_help.c:5243 +#: sql_help.c:4954 sql_help.c:5614 msgid "start a transaction block" msgstr "starta ett transaktionsblock" -#: sql_help.c:4608 +#: sql_help.c:4959 +msgid "invoke a procedure" +msgstr "anropa en procedur" + +#: sql_help.c:4964 msgid "force a write-ahead log checkpoint" msgstr "tvinga checkpoint i transaktionsloggen" -#: sql_help.c:4613 +#: sql_help.c:4969 msgid "close a cursor" msgstr "stäng en markör" -#: sql_help.c:4618 +#: sql_help.c:4974 msgid "cluster a table according to an index" msgstr "klustra en tabell efter ett index" -#: sql_help.c:4623 +#: sql_help.c:4979 msgid "define or change the comment of an object" msgstr "definiera eller ändra en kommentar på ett objekt" -#: sql_help.c:4628 sql_help.c:5078 +#: sql_help.c:4984 sql_help.c:5449 msgid "commit the current transaction" msgstr "utför den aktuella transaktionen" -#: sql_help.c:4633 +#: sql_help.c:4989 msgid "commit a transaction that was earlier prepared for two-phase commit" msgstr "utför commit på en transaktion som tidigare förberetts för två-fas-commit" -#: sql_help.c:4638 +#: sql_help.c:4994 msgid "copy data between a file and a table" msgstr "kopiera data mellan en fil och en tabell" -#: sql_help.c:4643 +#: sql_help.c:4999 msgid "define a new access method" msgstr "definiera en ny accessmetod" -#: sql_help.c:4648 +#: sql_help.c:5004 msgid "define a new aggregate function" msgstr "definiera en ny aggregatfunktion" -#: sql_help.c:4653 +#: sql_help.c:5009 msgid "define a new cast" msgstr "definiera en ny typomvandling" -#: sql_help.c:4658 +#: sql_help.c:5014 msgid "define a new collation" -msgstr "definiera en ny sortering" +msgstr "definiera en ny jämförelse" -#: sql_help.c:4663 +#: sql_help.c:5019 msgid "define a new encoding conversion" msgstr "definiera en ny teckenkodningskonvertering" -#: sql_help.c:4668 +#: sql_help.c:5024 msgid "create a new database" msgstr "skapa en ny databas" -#: sql_help.c:4673 +#: sql_help.c:5029 msgid "define a new domain" msgstr "definiera en ny domän" -#: sql_help.c:4678 +#: sql_help.c:5034 msgid "define a new event trigger" msgstr "definiera en ny händelseutlösare" -#: sql_help.c:4683 +#: sql_help.c:5039 msgid "install an extension" msgstr "installera en utökning" -#: sql_help.c:4688 +#: sql_help.c:5044 msgid "define a new foreign-data wrapper" msgstr "definiera en ny främmande data-omvandlare" -#: sql_help.c:4693 +#: sql_help.c:5049 msgid "define a new foreign table" msgstr "definiera en ny främmande tabell" -#: sql_help.c:4698 +#: sql_help.c:5054 msgid "define a new function" msgstr "definiera en ny funktion" -#: sql_help.c:4703 sql_help.c:4748 sql_help.c:4833 +#: sql_help.c:5059 sql_help.c:5109 sql_help.c:5194 msgid "define a new database role" msgstr "definiera en ny databasroll" -#: sql_help.c:4708 +#: sql_help.c:5064 msgid "define a new index" msgstr "skapa ett nytt index" -#: sql_help.c:4713 +#: sql_help.c:5069 msgid "define a new procedural language" msgstr "definiera ett nytt procedur-språk" -#: sql_help.c:4718 +#: sql_help.c:5074 msgid "define a new materialized view" msgstr "definiera en ny materialiserad vy" -#: sql_help.c:4723 +#: sql_help.c:5079 msgid "define a new operator" msgstr "definiera en ny operator" -#: sql_help.c:4728 +#: sql_help.c:5084 msgid "define a new operator class" msgstr "definiera en ny operatorklass" -#: sql_help.c:4733 +#: sql_help.c:5089 msgid "define a new operator family" msgstr "definiera en ny operatorfamilj" -#: sql_help.c:4738 +#: sql_help.c:5094 msgid "define a new row level security policy for a table" msgstr "definiera en ny säkerhetspolicy på radnivå för en tabell" -#: sql_help.c:4743 +#: sql_help.c:5099 +msgid "define a new procedure" +msgstr "definiera ett ny procedur" + +#: sql_help.c:5104 msgid "define a new publication" msgstr "definiera en ny publicering" -#: sql_help.c:4753 +#: sql_help.c:5114 msgid "define a new rewrite rule" msgstr "definiera en ny omskrivningsregel" -#: sql_help.c:4758 +#: sql_help.c:5119 msgid "define a new schema" msgstr "definiera ett nytt schema" -#: sql_help.c:4763 +#: sql_help.c:5124 msgid "define a new sequence generator" msgstr "definiera en ny sekvensgenerator" -#: sql_help.c:4768 +#: sql_help.c:5129 msgid "define a new foreign server" msgstr "definiera en ny främmande server" -#: sql_help.c:4773 +#: sql_help.c:5134 msgid "define extended statistics" msgstr "definiera utökad statistik" -#: sql_help.c:4778 +#: sql_help.c:5139 msgid "define a new subscription" msgstr "definiera en ny prenumeration" -#: sql_help.c:4783 +#: sql_help.c:5144 msgid "define a new table" msgstr "definiera en ny tabell" -#: sql_help.c:4788 sql_help.c:5208 +#: sql_help.c:5149 sql_help.c:5579 msgid "define a new table from the results of a query" msgstr "definiera en ny tabell utifrån resultatet av en fråga" -#: sql_help.c:4793 +#: sql_help.c:5154 msgid "define a new tablespace" msgstr "definiera ett nytt tabellutrymme" -#: sql_help.c:4798 +#: sql_help.c:5159 msgid "define a new text search configuration" msgstr "definiera en ny textsökkonfiguration" -#: sql_help.c:4803 +#: sql_help.c:5164 msgid "define a new text search dictionary" msgstr "definiera en ny textsökordlista" -#: sql_help.c:4808 +#: sql_help.c:5169 msgid "define a new text search parser" msgstr "definiera en ny textsökparser" -#: sql_help.c:4813 +#: sql_help.c:5174 msgid "define a new text search template" msgstr "definiera en ny textsökmall" -#: sql_help.c:4818 +#: sql_help.c:5179 msgid "define a new transform" msgstr "definiera en ny transform" -#: sql_help.c:4823 +#: sql_help.c:5184 msgid "define a new trigger" msgstr "definiera en ny utlösare" -#: sql_help.c:4828 +#: sql_help.c:5189 msgid "define a new data type" msgstr "definiera en ny datatyp" -#: sql_help.c:4838 +#: sql_help.c:5199 msgid "define a new mapping of a user to a foreign server" msgstr "definiera en ny mappning av en användare till en främmande server" -#: sql_help.c:4843 +#: sql_help.c:5204 msgid "define a new view" msgstr "definiera en ny vy" -#: sql_help.c:4848 +#: sql_help.c:5209 msgid "deallocate a prepared statement" msgstr "deallokera en förberedd sats" -#: sql_help.c:4853 +#: sql_help.c:5214 msgid "define a cursor" msgstr "definiera en markör" -#: sql_help.c:4858 +#: sql_help.c:5219 msgid "delete rows of a table" msgstr "radera rader i en tabell" -#: sql_help.c:4863 +#: sql_help.c:5224 msgid "discard session state" msgstr "släng sessionstillstånd" -#: sql_help.c:4868 +#: sql_help.c:5229 msgid "execute an anonymous code block" msgstr "kör ett annonymt kodblock" -#: sql_help.c:4873 +#: sql_help.c:5234 msgid "remove an access method" msgstr "ta bort en accessmetod" -#: sql_help.c:4878 +#: sql_help.c:5239 msgid "remove an aggregate function" msgstr "ta bort en aggregatfunktioner" -#: sql_help.c:4883 +#: sql_help.c:5244 msgid "remove a cast" msgstr "ta bort en typomvandling" -#: sql_help.c:4888 +#: sql_help.c:5249 msgid "remove a collation" -msgstr "ta bort en sortering" +msgstr "ta bort en jämförelse" -#: sql_help.c:4893 +#: sql_help.c:5254 msgid "remove a conversion" msgstr "ta bort en konvertering" -#: sql_help.c:4898 +#: sql_help.c:5259 msgid "remove a database" msgstr "ta bort en databas" -#: sql_help.c:4903 +#: sql_help.c:5264 msgid "remove a domain" msgstr "ta bort en domän" -#: sql_help.c:4908 +#: sql_help.c:5269 msgid "remove an event trigger" msgstr "ta bort en händelseutlösare" -#: sql_help.c:4913 +#: sql_help.c:5274 msgid "remove an extension" msgstr "ta bort en utökning" -#: sql_help.c:4918 +#: sql_help.c:5279 msgid "remove a foreign-data wrapper" msgstr "ta bort en frammande data-omvandlare" -#: sql_help.c:4923 +#: sql_help.c:5284 msgid "remove a foreign table" msgstr "ta bort en främmande tabell" -#: sql_help.c:4928 +#: sql_help.c:5289 msgid "remove a function" msgstr "ta bort en funktion" -#: sql_help.c:4933 sql_help.c:4983 sql_help.c:5063 +#: sql_help.c:5294 sql_help.c:5349 sql_help.c:5434 msgid "remove a database role" msgstr "ta bort en databasroll" -#: sql_help.c:4938 +#: sql_help.c:5299 msgid "remove an index" msgstr "ta bort ett index" -#: sql_help.c:4943 +#: sql_help.c:5304 msgid "remove a procedural language" msgstr "ta bort ett procedur-språk" -#: sql_help.c:4948 +#: sql_help.c:5309 msgid "remove a materialized view" msgstr "ta bort en materialiserad vy" -#: sql_help.c:4953 +#: sql_help.c:5314 msgid "remove an operator" msgstr "ta bort en operator" -#: sql_help.c:4958 +#: sql_help.c:5319 msgid "remove an operator class" msgstr "ta bort en operatorklass" -#: sql_help.c:4963 +#: sql_help.c:5324 msgid "remove an operator family" msgstr "ta bort en operatorfamilj" -#: sql_help.c:4968 +#: sql_help.c:5329 msgid "remove database objects owned by a database role" msgstr "ta bort databasobjekt som ägs av databasroll" -#: sql_help.c:4973 +#: sql_help.c:5334 msgid "remove a row level security policy from a table" msgstr "ta bort en säkerhetspolicy på radnivå från en tabell" -#: sql_help.c:4978 +#: sql_help.c:5339 +msgid "remove a procedure" +msgstr "ta bort en procedur" + +#: sql_help.c:5344 msgid "remove a publication" msgstr "ta bort en publicering" -#: sql_help.c:4988 +#: sql_help.c:5354 +msgid "remove a routine" +msgstr "ta bort en rutin" + +#: sql_help.c:5359 msgid "remove a rewrite rule" msgstr "ta bort en omskrivningsregel" -#: sql_help.c:4993 +#: sql_help.c:5364 msgid "remove a schema" msgstr "ta bort ett schema" -#: sql_help.c:4998 +#: sql_help.c:5369 msgid "remove a sequence" msgstr "ta bort en sekvens" -#: sql_help.c:5003 +#: sql_help.c:5374 msgid "remove a foreign server descriptor" msgstr "ta bort en främmande server-deskriptor" -#: sql_help.c:5008 +#: sql_help.c:5379 msgid "remove extended statistics" msgstr "ta bort utökad statistik" -#: sql_help.c:5013 +#: sql_help.c:5384 msgid "remove a subscription" msgstr "ta bort en prenumeration" -#: sql_help.c:5018 +#: sql_help.c:5389 msgid "remove a table" msgstr "ta bort en tabell" -#: sql_help.c:5023 +#: sql_help.c:5394 msgid "remove a tablespace" msgstr "ta bort ett tabellutrymme" -#: sql_help.c:5028 +#: sql_help.c:5399 msgid "remove a text search configuration" msgstr "ta bort en textsökkonfiguration" -#: sql_help.c:5033 +#: sql_help.c:5404 msgid "remove a text search dictionary" msgstr "ta bort en textsökordlista" -#: sql_help.c:5038 +#: sql_help.c:5409 msgid "remove a text search parser" msgstr "ta bort en textsökparser" -#: sql_help.c:5043 +#: sql_help.c:5414 msgid "remove a text search template" msgstr "ta bort en textsökmall" -#: sql_help.c:5048 +#: sql_help.c:5419 msgid "remove a transform" msgstr "ta bort en transform" -#: sql_help.c:5053 +#: sql_help.c:5424 msgid "remove a trigger" msgstr "ta bort en utlösare" -#: sql_help.c:5058 +#: sql_help.c:5429 msgid "remove a data type" msgstr "ta bort en datatyp" -#: sql_help.c:5068 +#: sql_help.c:5439 msgid "remove a user mapping for a foreign server" msgstr "ta bort en användarmappning för en främmande server" -#: sql_help.c:5073 +#: sql_help.c:5444 msgid "remove a view" msgstr "ta bort en vy" -#: sql_help.c:5083 +#: sql_help.c:5454 msgid "execute a prepared statement" msgstr "utför en förberedd sats" -#: sql_help.c:5088 +#: sql_help.c:5459 msgid "show the execution plan of a statement" msgstr "visa körningsplanen för en sats" -#: sql_help.c:5093 +#: sql_help.c:5464 msgid "retrieve rows from a query using a cursor" msgstr "hämta rader från en fråga med hjälp av en markör" -#: sql_help.c:5098 +#: sql_help.c:5469 msgid "define access privileges" msgstr "definera åtkomsträttigheter" -#: sql_help.c:5103 +#: sql_help.c:5474 msgid "import table definitions from a foreign server" msgstr "importera tabelldefinitioner från en främmande server" -#: sql_help.c:5108 +#: sql_help.c:5479 msgid "create new rows in a table" msgstr "skapa nya rader i en tabell" -#: sql_help.c:5113 +#: sql_help.c:5484 msgid "listen for a notification" msgstr "lyssna efter notifiering" -#: sql_help.c:5118 +#: sql_help.c:5489 msgid "load a shared library file" msgstr "ladda en delad biblioteksfil (shared library)" -#: sql_help.c:5123 +#: sql_help.c:5494 msgid "lock a table" msgstr "lås en tabell" -#: sql_help.c:5128 +#: sql_help.c:5499 msgid "position a cursor" msgstr "flytta en markör" -#: sql_help.c:5133 +#: sql_help.c:5504 msgid "generate a notification" msgstr "generera en notifiering" -#: sql_help.c:5138 +#: sql_help.c:5509 msgid "prepare a statement for execution" msgstr "förbered en sats för körning" -#: sql_help.c:5143 +#: sql_help.c:5514 msgid "prepare the current transaction for two-phase commit" msgstr "avbryt aktuell transaktion för två-fas-commit" -#: sql_help.c:5148 +#: sql_help.c:5519 msgid "change the ownership of database objects owned by a database role" msgstr "byt ägare på databasobjekt som ägs av en databasroll" -#: sql_help.c:5153 +#: sql_help.c:5524 msgid "replace the contents of a materialized view" msgstr "ersätt innehållet av en materialiserad vy" -#: sql_help.c:5158 +#: sql_help.c:5529 msgid "rebuild indexes" msgstr "återskapa index" -#: sql_help.c:5163 +#: sql_help.c:5534 msgid "destroy a previously defined savepoint" msgstr "ta bort en tidigare definierad sparpunkt" -#: sql_help.c:5168 +#: sql_help.c:5539 msgid "restore the value of a run-time parameter to the default value" msgstr "återställ värde av körningsparameter till standardvärdet" -#: sql_help.c:5173 +#: sql_help.c:5544 msgid "remove access privileges" msgstr "ta bort åtkomsträttigheter" -#: sql_help.c:5183 +#: sql_help.c:5554 msgid "cancel a transaction that was earlier prepared for two-phase commit" msgstr "avbryt en transaktion som tidigare förberetts för två-fas-commit" -#: sql_help.c:5188 +#: sql_help.c:5559 msgid "roll back to a savepoint" msgstr "rulla tillbaka till sparpunkt" -#: sql_help.c:5193 +#: sql_help.c:5564 msgid "define a new savepoint within the current transaction" msgstr "definera en ny sparpunkt i den aktuella transaktionen" -#: sql_help.c:5198 +#: sql_help.c:5569 msgid "define or change a security label applied to an object" msgstr "definiera eller ändra en säkerhetsetikett på ett objekt" -#: sql_help.c:5203 sql_help.c:5248 sql_help.c:5278 +#: sql_help.c:5574 sql_help.c:5619 sql_help.c:5649 msgid "retrieve rows from a table or view" msgstr "hämta rader från en tabell eller vy" -#: sql_help.c:5213 +#: sql_help.c:5584 msgid "change a run-time parameter" -msgstr "ändra en körningsparamter" +msgstr "ändra en körningsparameter" -#: sql_help.c:5218 +#: sql_help.c:5589 msgid "set constraint check timing for the current transaction" msgstr "sätt integritetsvillkorstiming för nuvarande transaktion" -#: sql_help.c:5223 +#: sql_help.c:5594 msgid "set the current user identifier of the current session" msgstr "sätt användare för den aktiva sessionen" -#: sql_help.c:5228 +#: sql_help.c:5599 msgid "set the session user identifier and the current user identifier of the current session" msgstr "sätt sessionsanvändaridentifierare och nuvarande användaridentifierare för den aktiva sessionen" -#: sql_help.c:5233 +#: sql_help.c:5604 msgid "set the characteristics of the current transaction" msgstr "sätt inställningar för nuvarande transaktionen" -#: sql_help.c:5238 +#: sql_help.c:5609 msgid "show the value of a run-time parameter" msgstr "visa värde på en körningsparameter" -#: sql_help.c:5253 +#: sql_help.c:5624 msgid "empty a table or set of tables" msgstr "töm en eller flera tabeller" -#: sql_help.c:5258 +#: sql_help.c:5629 msgid "stop listening for a notification" msgstr "sluta att lyssna efter notifiering" -#: sql_help.c:5263 +#: sql_help.c:5634 msgid "update rows of a table" msgstr "uppdatera rader i en tabell" -#: sql_help.c:5268 +#: sql_help.c:5639 msgid "garbage-collect and optionally analyze a database" msgstr "skräpsamla och eventuellt analysera en databas" -#: sql_help.c:5273 +#: sql_help.c:5644 msgid "compute a set of rows" msgstr "beräkna en mängd rader" -#: startup.c:184 +#: startup.c:190 #, c-format msgid "%s: -1 can only be used in non-interactive mode\n" msgstr "%s: -1 kan bara användas i icke-interaktivt läge\n" -#: startup.c:287 +#: startup.c:305 #, c-format msgid "%s: could not open log file \"%s\": %s\n" msgstr "%s: kunde inte öppna logg-fil \"%s\": %s\n" -#: startup.c:394 +#: startup.c:412 #, c-format msgid "" "Type \"help\" for help.\n" @@ -5594,27 +6000,27 @@ msgstr "" "Skriv \"help\" för hjälp.\n" "\n" -#: startup.c:543 +#: startup.c:561 #, c-format msgid "%s: could not set printing parameter \"%s\"\n" msgstr "%s: kunde inte sätta utskriftsparameter \"%s\"\n" -#: startup.c:645 +#: startup.c:663 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Försök med \"%s --help\" för mer information.\n" -#: startup.c:662 +#: startup.c:680 #, c-format msgid "%s: warning: extra command-line argument \"%s\" ignored\n" msgstr "%s: varning: extra kommandoradsargument \"%s\" ignorerad\n" -#: startup.c:711 +#: startup.c:729 #, c-format msgid "%s: could not find own program executable\n" msgstr "%s: kunde inte hitta det egna programmets körbara fil\n" -#: tab-complete.c:4184 +#: tab-complete.c:4478 #, c-format msgid "" "tab completion query failed: %s\n" @@ -5627,8 +6033,8 @@ msgstr "" #: variables.c:139 #, c-format -msgid "unrecognized value \"%s\" for \"%s\": boolean expected\n" -msgstr "okänt värde \"%s\" för \"%s\": förväntade sig en boolean\n" +msgid "unrecognized value \"%s\" for \"%s\": Boolean expected\n" +msgstr "okänt värde \"%s\" för \"%s\": förväntade sig en Boolean\n" #: variables.c:176 #, c-format @@ -5649,643 +6055,928 @@ msgstr "" "okänt värde \"%s\" för \"%s\"\n" "Tillgängliga värden är: %s.\n" -#~ msgid "" -#~ "WARNING: You are connected to a server with major version %d.%d,\n" -#~ "but your %s client is major version %d.%d. Some backslash commands,\n" -#~ "such as \\d, might not work properly.\n" -#~ "\n" -#~ msgstr "" -#~ "VARNING: Du är uppkopplad mot en server med version %d.%d,\n" -#~ "men din klient %s är version %d.%d. En del snedstreckkommandon\n" -#~ "så som \\d kommer eventuellt inte att fungera som de skall.\n" -#~ "\n" +#~ msgid "normal" +#~ msgstr "normal" -#~ msgid "" -#~ "VALUES ( expression [, ...] ) [, ...]\n" -#~ " [ ORDER BY sort_expression [ ASC | DESC | USING operator ] [, ...] ]\n" -#~ " [ LIMIT { count | ALL } ]\n" -#~ " [ OFFSET start [ ROW | ROWS ] ]\n" -#~ " [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]" -#~ msgstr "" -#~ "VALUES ( uttryck [, ...] ) [, ...]\n" -#~ " [ ORDER BY sorteringsuttryck [ ASC | DESC | USING operator ] [, ...] ]\n" -#~ " [ LIMIT { antal | ALL } ]\n" -#~ " [ OFFSET start [ ROW | ROWS ] ]\n" -#~ " [ FETCH { FIRST | NEXT } [ antal ] { ROW | ROWS } ONLY ]" +#~ msgid " SERVER_VERSION_NAME server's version (short string)\n" +#~ msgstr " SERVER_VERSION_NAME serverns version (kort sträng)\n" -#~ msgid "" -#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ table ]\n" -#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ table [ (column [, ...] ) ] ]" -#~ msgstr "" -#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ tabell ]\n" -#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ tabell [ (kolumn [, ...] ) ] ]" +#~ msgid " VERSION psql's version (verbose string)\n" +#~ msgstr " VERSION psql:s version (lång sträng)\n" -#~ msgid "" -#~ "UPDATE [ ONLY ] table [ [ AS ] alias ]\n" -#~ " SET { column = { expression | DEFAULT } |\n" -#~ " ( column [, ...] ) = ( { expression | DEFAULT } [, ...] ) } [, ...]\n" -#~ " [ FROM fromlist ]\n" -#~ " [ WHERE condition | WHERE CURRENT OF cursor_name ]\n" -#~ " [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ]" -#~ msgstr "" -#~ "UPDATE [ ONLY ] tabell [ [ AS ] alias ]\n" -#~ " SET { kolumn = { uttryck | DEFAULT } |\n" -#~ " ( kolumn [, ...] ) = ( { uttryck | DEFAULT } [, ...] ) } [, ...]\n" -#~ " [ FROM frånlista ]\n" -#~ " [ WHERE villkor | WHERE CURRENT OF markörnamn ]\n" -#~ " [ RETURNING * | utdatauttryck [ [ AS ] utdatanamn ] [, ...] ]" +#~ msgid " VERSION_NAME psql's version (short string)\n" +#~ msgstr " VERSION_NAME psql:s version (kort sträng)\n" -#~ msgid "UNLISTEN { name | * }" -#~ msgstr "UNLISTEN { namn | * }" +#~ msgid " VERSION_NUM psql's version (numeric format)\n" +#~ msgstr " VERSION_NUM psql:s version (numeriskt format)\n" -#~ msgid "" -#~ "TRUNCATE [ TABLE ] [ ONLY ] name [, ... ]\n" -#~ " [ RESTART IDENTITY | CONTINUE IDENTITY ] [ CASCADE | RESTRICT ]" -#~ msgstr "" -#~ "TRUNCATE [ TABLE ] [ ONLY ] namn [, ... ]\n" -#~ " [ RESTART IDENTITY | CONTINUE IDENTITY ] [ CASCADE | RESTRICT ]" +#~ msgid "attribute" +#~ msgstr "attribut" -#~ msgid "" -#~ "START TRANSACTION [ transaction_mode [, ...] ]\n" -#~ "\n" -#~ "where transaction_mode is one of:\n" -#~ "\n" -#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" -#~ " READ WRITE | READ ONLY" -#~ msgstr "" -#~ "START TRANSACTION [ transaktionsläge [, ...] ]\n" -#~ "\n" -#~ "där transaktionsläge är en av:\n" -#~ "\n" -#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" -#~ " READ WRITE | READ ONLY" +#~ msgid "statistic_type" +#~ msgstr "statistiktyp" -#~ msgid "" -#~ "SHOW name\n" -#~ "SHOW ALL" -#~ msgstr "" -#~ "SHOW namn\n" -#~ "SHOW ALL" +#~ msgid "No per-database role settings support in this server version.\n" +#~ msgstr "Inga rollinställningar per databas stöds i denna serverversion.\n" -#~ msgid "" -#~ "SET TRANSACTION transaction_mode [, ...]\n" -#~ "SET SESSION CHARACTERISTICS AS TRANSACTION transaction_mode [, ...]\n" -#~ "\n" -#~ "where transaction_mode is one of:\n" -#~ "\n" -#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" -#~ " READ WRITE | READ ONLY" -#~ msgstr "" -#~ "SET TRANSACTION transaktionsläge [, ...]\n" -#~ "SET SESSION CHARACTERISTICS AS TRANSACTION transaktionsläge [, ...]\n" -#~ "\n" -#~ "där transaktionsläge är en av:\n" -#~ "\n" -#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" -#~ " READ WRITE | READ ONLY" +#~ msgid "No matching settings found.\n" +#~ msgstr "Inga matchande inställningar funna.\n" -#~ msgid "" -#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION username\n" -#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION DEFAULT\n" -#~ "RESET SESSION AUTHORIZATION" -#~ msgstr "" -#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION användarnamn\n" -#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION DEFAULT\n" -#~ "RESET SESSION AUTHORIZATION" +#~ msgid "No settings found.\n" +#~ msgstr "Inga inställningar funna.\n" -#~ msgid "" -#~ "SET [ SESSION | LOCAL ] ROLE rolename\n" -#~ "SET [ SESSION | LOCAL ] ROLE NONE\n" -#~ "RESET ROLE" -#~ msgstr "" -#~ "SET [ SESSION | LOCAL ] ROLE rollnamn\n" -#~ "SET [ SESSION | LOCAL ] ROLE NONE\n" -#~ "RESET ROLE" +#~ msgid "No matching relations found.\n" +#~ msgstr "Inga matchande relationer funna.\n" -#~ msgid "SET CONSTRAINTS { ALL | name [, ...] } { DEFERRED | IMMEDIATE }" -#~ msgstr "SET CONSTRAINTS { ALL | namn [, ...] } { DEFERRED | IMMEDIATE }" +#~ msgid "No relations found.\n" +#~ msgstr "Inga relationer funna.\n" -#~ msgid "" -#~ "SET [ SESSION | LOCAL ] configuration_parameter { TO | = } { value | 'value' | DEFAULT }\n" -#~ "SET [ SESSION | LOCAL ] TIME ZONE { timezone | LOCAL | DEFAULT }" -#~ msgstr "" -#~ "SET [ SESSION | LOCAL ] konfigurationsparameter { TO | = } { värde | 'värde' | DEFAULT }\n" -#~ "SET [ SESSION | LOCAL ] TIME ZONE { tidszon | LOCAL | DEFAULT }" +#~ msgid "Password encryption failed.\n" +#~ msgstr "Lösenordskryptering misslyckades.\n" -#, fuzzy -#~ msgid "" -#~ "[ WITH [ RECURSIVE ] with_query [, ...] ]\n" -#~ "SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]\n" -#~ " * | expression [ [ AS ] output_name ] [, ...]\n" -#~ " INTO [ TEMPORARY | TEMP ] [ TABLE ] new_table\n" -#~ " [ FROM from_item [, ...] ]\n" -#~ " [ WHERE condition ]\n" -#~ " [ GROUP BY expression [, ...] ]\n" -#~ " [ HAVING condition [, ...] ]\n" -#~ " [ WINDOW window_name AS ( window_definition ) [, ...] ]\n" -#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" -#~ " [ ORDER BY expression [ ASC | DESC | USING operator ] [ NULLS { FIRST | LAST } ] [, ...] ]\n" -#~ " [ LIMIT { count | ALL } ]\n" -#~ " [ OFFSET start [ ROW | ROWS ] ]\n" -#~ " [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]\n" -#~ " [ FOR { UPDATE | SHARE } [ OF table_name [, ...] ] [ NOWAIT ] [...] ]" -#~ msgstr "" -#~ "SELECT [ ALL | DISTINCT [ ON ( uttryck [, ...] ) ] ]\n" -#~ " * | uttryck [ AS utnamn ] [, ...]\n" -#~ " INTO [ TEMPORARY | TEMP ] [ TABLE ] ny_tabell\n" -#~ " [ FROM frånval [, ...] ]\n" -#~ " [ WHERE villkor ]\n" -#~ " [ GROUP BY uttryck [, ...] ]\n" -#~ " [ HAVING villkor [, ...] ]\n" -#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" -#~ " [ ORDER BY uttryck [ ASC | DESC | USING operator ] [, ...] ]\n" -#~ " [ LIMIT { antal | ALL } ]\n" -#~ " [ OFFSET start ]\n" -#~ " [ FOR { UPDATE | SHARE } [ OF tabellnamn [, ...] ] [ NOWAIT ] [...] ]" +#~ msgid "\\%s: error\n" +#~ msgstr "\\%s: fel\n" + +#~ msgid " on host \"%s\"" +#~ msgstr " på värd \"%s\"" + +#~ msgid " at port \"%s\"" +#~ msgstr " port \"%s\"" + +#~ msgid " as user \"%s\"" +#~ msgstr " som användare \"%s\"" + +#~ msgid "SSL connection (unknown cipher)\n" +#~ msgstr "SSL-förbindelse (okänt krypto)\n" + +#~ msgid "Showing locale-adjusted numeric output." +#~ msgstr "Visar lokal-anpassad numerisk utdata." + +#~ msgid "Showing only tuples." +#~ msgstr "Visar bara tupler." + +#~ msgid "%s: pg_strdup: cannot duplicate null pointer (internal error)\n" +#~ msgstr "%s: pg_strdup: kan inte duplicera null-pekare (internt fel)\n" + +#~ msgid "\\copy: %s" +#~ msgstr "\\copy: %s" + +#~ msgid "\\copy: unexpected response (%d)\n" +#~ msgstr "\\copy: oväntat svar (%d)\n" + +#~ msgid "could not get current user name: %s\n" +#~ msgstr "kunde inte hämta det aktuella användarnamnet: %s\n" + +#~ msgid " --help show this help, then exit\n" +#~ msgstr " --help visa denna hjälp och avsluta sedan\n" + +#~ msgid " --version output version information, then exit\n" +#~ msgstr " --version visa versionsinformation och avsluta sedan\n" + +#~ msgid " \\dg[+] [PATTERN] list roles (groups)\n" +#~ msgstr " \\dg[+] [MALL] lista roller (grupper)\n" + +#~ msgid " \\du[+] [PATTERN] list roles (users)\n" +#~ msgstr " \\du[+] [MALL] lista roller (användare)\n" + +#~ msgid " \\l[+] list all databases\n" +#~ msgstr " \\l[+] lista alla databaser\n" -#, fuzzy #~ msgid "" -#~ "[ WITH [ RECURSIVE ] with_query [, ...] ]\n" -#~ "SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]\n" -#~ " * | expression [ [ AS ] output_name ] [, ...]\n" -#~ " [ FROM from_item [, ...] ]\n" -#~ " [ WHERE condition ]\n" -#~ " [ GROUP BY expression [, ...] ]\n" -#~ " [ HAVING condition [, ...] ]\n" -#~ " [ WINDOW window_name AS ( window_definition ) [, ...] ]\n" -#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" -#~ " [ ORDER BY expression [ ASC | DESC | USING operator ] [ NULLS { FIRST | LAST } ] [, ...] ]\n" -#~ " [ LIMIT { count | ALL } ]\n" -#~ " [ OFFSET start [ ROW | ROWS ] ]\n" -#~ " [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]\n" -#~ " [ FOR { UPDATE | SHARE } [ OF table_name [, ...] ] [ NOWAIT ] [...] ]\n" -#~ "\n" -#~ "where from_item can be one of:\n" -#~ "\n" -#~ " [ ONLY ] table_name [ * ] [ [ AS ] alias [ ( column_alias [, ...] ) ] ]\n" -#~ " ( select ) [ AS ] alias [ ( column_alias [, ...] ) ]\n" -#~ " with_query_name [ [ AS ] alias [ ( column_alias [, ...] ) ] ]\n" -#~ " function_name ( [ argument [, ...] ] ) [ AS ] alias [ ( column_alias [, ...] | column_definition [, ...] ) ]\n" -#~ " function_name ( [ argument [, ...] ] ) AS ( column_definition [, ...] )\n" -#~ " from_item [ NATURAL ] join_type from_item [ ON join_condition | USING ( join_column [, ...] ) ]\n" -#~ "\n" -#~ "and with_query is:\n" -#~ "\n" -#~ " with_query_name [ ( column_name [, ...] ) ] AS ( select )\n" -#~ "\n" -#~ "TABLE { [ ONLY ] table_name [ * ] | with_query_name }" +#~ " \\pset NAME [VALUE] set table output option\n" +#~ " (NAME := {format|border|expanded|fieldsep|footer|null|\n" +#~ " numericlocale|recordsep|tuples_only|title|tableattr|pager})\n" #~ msgstr "" -#~ "SELECT [ ALL | DISTINCT [ ON ( uttryck [, ...] ) ] ]\n" -#~ " * | uttryck [ AS utnamn ] [, ...]\n" -#~ " [ FROM frånval [, ...] ]\n" -#~ " [ WHERE villkor ]\n" -#~ " [ GROUP BY uttryck [, ...] ]\n" -#~ " [ HAVING villkor [, ...] ]\n" -#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" -#~ " [ ORDER BY uttryck [ ASC | DESC | USING operator ] [, ...] ]\n" -#~ " [ LIMIT { antal | ALL } ]\n" -#~ " [ OFFSET start ]\n" -#~ " [ FOR { UPDATE | SHARE } [ OF tabellnamn [, ...] ] [ NOWAIT ] [...] ]\n" -#~ "\n" -#~ "där frånval kan vara en av:\n" -#~ "\n" -#~ " [ ONLY ] tabellnamn [ * ] [ [ AS ] alias [ ( kolumnalias [, ...] ) ] ]\n" -#~ " ( select ) [ AS ] alias [ ( kolumnalias [, ...] ) ]\n" -#~ " funktionsnamn ( [ argument [, ...] ] ) [ AS ] alias [ ( kolumnalias [, ...] | kolumndefinition [, ...] ) ]\n" -#~ " funktionsnamn ( [ argument [, ...] ] ) AS ( kolumndefinition [, ...] )\n" -#~ " frånval [ NATURAL ] join-typ frånval [ ON join-villkor | USING ( join-kolumn [, ...] ) ]" +#~ " \\pset NAMN [VÄRDE] sätt tabellutskriftsval\n" +#~ " (NAMN := {format|border|expanded|fieldsep|footer|null|\n" +#~ " numericlocale|recordsep|tuples_only|title|tableattr|pager})\n" -#~ msgid "ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] savepoint_name" -#~ msgstr "ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] sparpunktsnamn" +#~ msgid "(No rows)\n" +#~ msgstr "(Inga rader)\n" -#~ msgid "ROLLBACK PREPARED transaction_id" -#~ msgstr "ROLLBACK PREPARED transaktions_id" +#~ msgid "%s: could not set variable \"%s\"\n" +#~ msgstr "%s: kunde inte sätta variabeln \"%s\"\n" -#~ msgid "ROLLBACK [ WORK | TRANSACTION ]" -#~ msgstr "ROLLBACK [ WORK | TRANSACTION ]" +#~ msgid "contains support for command-line editing" +#~ msgstr "innehåller stöd för kommandoradsredigering" + +#~ msgid "data type" +#~ msgstr "datatyp" + +#~ msgid "Modifiers" +#~ msgstr "Modifierare" + +#~ msgid "not null" +#~ msgstr "inte null" + +#~ msgid "default %s" +#~ msgstr "default %s" + +#~ msgid "Modifier" +#~ msgstr "Modifierare" + +#~ msgid "define a new constraint trigger" +#~ msgstr "definiera en ny villkorsutlösare" + +#~ msgid "column" +#~ msgstr "kolumn" + +#~ msgid "new_column" +#~ msgstr "ny_kolumn" + +#~ msgid "agg_name" +#~ msgstr "agg_namn" + +#~ msgid "agg_type" +#~ msgstr "agg_typ" + +#~ msgid "input_data_type" +#~ msgstr "indatatyp" + +#~ msgid "tablespace" +#~ msgstr "tabellutrymme" + +#~ msgid "could not change directory to \"%s\"" +#~ msgstr "kunde inte byta katalog till \"%s\"" + +#~ msgid "ABORT [ WORK | TRANSACTION ]" +#~ msgstr "ABORT [ WORK | TRANSACTION ]" + +#~ msgid "" +#~ "ALTER AGGREGATE name ( type [ , ... ] ) RENAME TO new_name\n" +#~ "ALTER AGGREGATE name ( type [ , ... ] ) OWNER TO new_owner\n" +#~ "ALTER AGGREGATE name ( type [ , ... ] ) SET SCHEMA new_schema" +#~ msgstr "" +#~ "ALTER AGGREGATE namn ( typ [ , ... ] ) RENAME TO nytt_namn\n" +#~ "ALTER AGGREGATE name ( typ [ , ... ] ) OWNER TO ny_ägare\n" +#~ "ALTER AGGREGATE namn ( typ [ , ... ] ) SET SCHEMA nytt_schema" + +#~ msgid "" +#~ "ALTER CONVERSION name RENAME TO newname\n" +#~ "ALTER CONVERSION name OWNER TO newowner" +#~ msgstr "" +#~ "ALTER CONVERSION namn RENAME TO nytt_namn\n" +#~ "ALTER CONVERSION namn OWNER TO ny_ägare" #, fuzzy #~ msgid "" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON [ TABLE ] tablename [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "ALTER DATABASE name [ [ WITH ] option [ ... ] ]\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { SELECT | INSERT | UPDATE | REFERENCES } ( column [, ...] )\n" -#~ " [,...] | ALL [ PRIVILEGES ] ( column [, ...] ) }\n" -#~ " ON [ TABLE ] tablename [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "where option can be:\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { USAGE | SELECT | UPDATE }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SEQUENCE sequencename [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ " CONNECTION LIMIT connlimit\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON DATABASE dbname [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "ALTER DATABASE name RENAME TO newname\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON FOREIGN DATA WRAPPER fdwname [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "ALTER DATABASE name OWNER TO new_owner\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON FOREIGN SERVER servername [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "ALTER DATABASE name SET TABLESPACE new_tablespace\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { EXECUTE | ALL [ PRIVILEGES ] }\n" -#~ " ON FUNCTION funcname ( [ [ argmode ] [ argname ] argtype [, ...] ] ) [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "ALTER DATABASE name SET configuration_parameter { TO | = } { value | DEFAULT }\n" +#~ "ALTER DATABASE name SET configuration_parameter FROM CURRENT\n" +#~ "ALTER DATABASE name RESET configuration_parameter\n" +#~ "ALTER DATABASE name RESET ALL" +#~ msgstr "" +#~ "ALTER DATABASE namn [ [ WITH ] alternativ [ ... ] ]\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON LANGUAGE langname [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "där alternativ kan vara:\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SCHEMA schemaname [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ " CONNECTION LIMIT anslutningstak\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { CREATE | ALL [ PRIVILEGES ] }\n" -#~ " ON TABLESPACE tablespacename [, ...]\n" -#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "ALTER DATABASE namn SET parameter { TO | = } { värde | DEFAULT }\n" +#~ "ALTER DATABASE namn RESET parameter\n" #~ "\n" -#~ "REVOKE [ ADMIN OPTION FOR ]\n" -#~ " role [, ...] FROM rolename [, ...]\n" -#~ " [ CASCADE | RESTRICT ]" -#~ msgstr "" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { SELECT | INSERT | UPDATE | DELETE | REFERENCES | TRIGGER }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON [ TABLE ] tabellnamn [, ...]\n" -#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "ALTER DATABASE namn RENAME TO nyttnamn\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { USAGE | SELECT | UPDATE }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SEQUENCE sekvensnamn [, ...]\n" -#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "ALTER DATABASE namn OWNER TO ny_ägare" + +#~ msgid "" +#~ "ALTER DOMAIN name\n" +#~ " { SET DEFAULT expression | DROP DEFAULT }\n" +#~ "ALTER DOMAIN name\n" +#~ " { SET | DROP } NOT NULL\n" +#~ "ALTER DOMAIN name\n" +#~ " ADD domain_constraint\n" +#~ "ALTER DOMAIN name\n" +#~ " DROP CONSTRAINT constraint_name [ RESTRICT | CASCADE ]\n" +#~ "ALTER DOMAIN name\n" +#~ " OWNER TO new_owner \n" +#~ "ALTER DOMAIN name\n" +#~ " SET SCHEMA new_schema" +#~ msgstr "" +#~ "ALTER DOMAIN namn\n" +#~ " { SET DEFAULT uttryck | DROP DEFAULT }\n" +#~ "ALTER DOMAIN namn\n" +#~ " { SET | DROP } NOT NULL\n" +#~ "ALTER DOMAIN namn\n" +#~ " ADD domain_villkor (constraint)\n" +#~ "ALTER DOMAIN namn\n" +#~ " DROP CONSTRAINT villkorsnamn [ RESTRICT | CASCADE ]\n" +#~ "ALTER DOMAIN namn\n" +#~ " OWNER TO ny_ägare\n" +#~ "ALTER DOMAIN namn\n" +#~ " SET SCHEMA nytt_schema" + +#, fuzzy +#~ msgid "" +#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" +#~ " action [ ... ] [ RESTRICT ]\n" +#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" +#~ " RENAME TO new_name\n" +#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" +#~ " OWNER TO new_owner\n" +#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" +#~ " SET SCHEMA new_schema\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { CREATE | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON DATABASE dbnamn [, ...]\n" -#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "where action is one of:\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { EXECUTE | ALL [ PRIVILEGES ] }\n" -#~ " ON FUNCTION funknamn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] ) [, ...]\n" -#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ " CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" +#~ " IMMUTABLE | STABLE | VOLATILE\n" +#~ " [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER\n" +#~ " COST execution_cost\n" +#~ " ROWS result_rows\n" +#~ " SET configuration_parameter { TO | = } { value | DEFAULT }\n" +#~ " SET configuration_parameter FROM CURRENT\n" +#~ " RESET configuration_parameter\n" +#~ " RESET ALL" +#~ msgstr "" +#~ "ALTER FUNCTION namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] )\n" +#~ " aktion [, ... ] [ RESTRICT ]\n" +#~ "ALTER FUNCTION namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] )\n" +#~ " RENAME TO nytt_namn\n" +#~ "ALTER FUNCTION namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] )\n" +#~ " OWNER TO ny_ägare\n" +#~ "ALTER FUNCTION namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] )\n" +#~ " SET SCHEMA nytt_schema\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON LANGUAGE språknamn [, ...]\n" -#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "där aktion är en av:\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SCHEMA schemanamn [, ...]\n" -#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ " CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" +#~ " IMMUTABLE | STABLE | VOLATILE\n" +#~ " [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER" + +#~ msgid "" +#~ "ALTER GROUP groupname ADD USER username [, ... ]\n" +#~ "ALTER GROUP groupname DROP USER username [, ... ]\n" #~ "\n" -#~ "REVOKE [ GRANT OPTION FOR ]\n" -#~ " { CREATE | ALL [ PRIVILEGES ] }\n" -#~ " ON TABLESPACE tabellutrymmesnamn [, ...]\n" -#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]\n" +#~ "ALTER GROUP groupname RENAME TO newname" +#~ msgstr "" +#~ "ALTER GROUP gruppnamn ADD USER användarnamn [, ... ]\n" +#~ "ALTER GROUP gruppnamn DROP USER användarnamn [, ... ]\n" #~ "\n" -#~ "REVOKE [ ADMIN OPTION FOR ]\n" -#~ " rolk [, ...]\n" -#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" -#~ " [ CASCADE | RESTRICT ]" - -#~ msgid "RELEASE [ SAVEPOINT ] savepoint_name" -#~ msgstr "RELEASE [ SAVEPOINT ] sparpunktsnamn" - -#~ msgid "REINDEX { INDEX | TABLE | DATABASE | SYSTEM } name [ FORCE ]" -#~ msgstr "REINDEX { INDEX | TABLE | DATABASE | SYSTEM } namn [ FORCE ]" - -#~ msgid "REASSIGN OWNED BY old_role [, ...] TO new_role" -#~ msgstr "REASSIGN OWNED BY gammal_roll [, ...] TO ny_roll" +#~ "ALTER GROUP gruppnamn RENAME TO nyttnamn" -#~ msgid "PREPARE TRANSACTION transaction_id" -#~ msgstr "PREPARE TRANSACTION transaktions_id" +#~ msgid "" +#~ "ALTER INDEX name RENAME TO new_name\n" +#~ "ALTER INDEX name SET TABLESPACE tablespace_name\n" +#~ "ALTER INDEX name SET ( storage_parameter = value [, ... ] )\n" +#~ "ALTER INDEX name RESET ( storage_parameter [, ... ] )" +#~ msgstr "" +#~ "ALTER INDEX namn RENAME TO nytt_namn\n" +#~ "ALTER INDEX namn SET TABLESPACE tabellutrymmesnamn\n" +#~ "ALTER INDEX namn SET ( lagringsparameter = värde [, ... ] )\n" +#~ "ALTER INDEX namn RESET ( lagringsparameter [, ... ] )" -#~ msgid "PREPARE name [ ( datatype [, ...] ) ] AS statement" -#~ msgstr "PREPARE namn [ ( datatyp [, ...] ) ] AS sats" +#, fuzzy +#~ msgid "" +#~ "ALTER [ PROCEDURAL ] LANGUAGE name RENAME TO newname\n" +#~ "ALTER [ PROCEDURAL ] LANGUAGE name OWNER TO new_owner" +#~ msgstr "" +#~ "ALTER SCHEMA namn RENAME TO nytt_namn\n" +#~ "ALTER SCHEMA namn OWNER TO ny_ägare" -#~ msgid "NOTIFY name" -#~ msgstr "NOTIFY namn" +#~ msgid "ALTER OPERATOR name ( { lefttype | NONE } , { righttype | NONE } ) OWNER TO newowner" +#~ msgstr "ALTER OPERATOR namn ( { vänster_typ | NONE }, { höger_typ | NONE } ) OWNER TO ny_ägare" -#~ msgid "MOVE [ direction { FROM | IN } ] cursorname" -#~ msgstr "MOVE [ riktning { FROM | IN } ] markörnamn" +#~ msgid "" +#~ "ALTER OPERATOR CLASS name USING index_method RENAME TO newname\n" +#~ "ALTER OPERATOR CLASS name USING index_method OWNER TO newowner" +#~ msgstr "" +#~ "ALTER OPERATOR CLASS namn USING indexmetod RENAME TO nytt_namn\n" +#~ "ALTER OPERATOR CLASS namn USING indexmetod OWNER TO ny_ägare" +#, fuzzy #~ msgid "" -#~ "LOCK [ TABLE ] [ ONLY ] name [, ...] [ IN lockmode MODE ] [ NOWAIT ]\n" +#~ "ALTER ROLE name [ [ WITH ] option [ ... ] ]\n" #~ "\n" -#~ "where lockmode is one of:\n" +#~ "where option can be:\n" +#~ " \n" +#~ " SUPERUSER | NOSUPERUSER\n" +#~ " | CREATEDB | NOCREATEDB\n" +#~ " | CREATEROLE | NOCREATEROLE\n" +#~ " | CREATEUSER | NOCREATEUSER\n" +#~ " | INHERIT | NOINHERIT\n" +#~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT connlimit\n" +#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" +#~ " | VALID UNTIL 'timestamp' \n" #~ "\n" -#~ " ACCESS SHARE | ROW SHARE | ROW EXCLUSIVE | SHARE UPDATE EXCLUSIVE\n" -#~ " | SHARE | SHARE ROW EXCLUSIVE | EXCLUSIVE | ACCESS EXCLUSIVE" +#~ "ALTER ROLE name RENAME TO newname\n" +#~ "\n" +#~ "ALTER ROLE name SET configuration_parameter { TO | = } { value | DEFAULT }\n" +#~ "ALTER ROLE name SET configuration_parameter FROM CURRENT\n" +#~ "ALTER ROLE name RESET configuration_parameter\n" +#~ "ALTER ROLE name RESET ALL" #~ msgstr "" -#~ "LOCK [ TABLE ] [ ONLY ] namn [, ...] [ IN låsläge MODE ] [ NOWAIT ]\n" +#~ "ALTER ROLE namn [ [ WITH ] alternativ [ ... ] ]\n" #~ "\n" -#~ "där låsläge är en av:\n" +#~ "där alternativ kan vara:\n" +#~ " \n" +#~ " SUPERUSER | NOSUPERUSER\n" +#~ " | CREATEDB | NOCREATEDB\n" +#~ " | CREATEROLE | NOCREATEROLE\n" +#~ " | CREATEUSER | NOCREATEUSER\n" +#~ " | INHERIT | NOINHERIT\n" +#~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT anslutningstak\n" +#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'lösenord'\n" +#~ " | VALID UNTIL 'tidsstämpel' \n" #~ "\n" -#~ " ACCESS SHARE | ROW SHARE | ROW EXCLUSIVE | SHARE UPDATE EXCLUSIVE\n" -#~ " | SHARE | SHARE ROW EXCLUSIVE | EXCLUSIVE | ACCESS EXCLUSIVE" - -#~ msgid "LOAD 'filename'" -#~ msgstr "LOAD 'filnamn'" +#~ "ALTER ROLE namn RENAME TO nytt_namn\n" +#~ "\n" +#~ "ALTER ROLE namn SET konfigurationsparameter { TO | = } { värde | DEFAULT }\n" +#~ "ALTER ROLE namn RESET konfigurationsparameter" -#~ msgid "LISTEN name" -#~ msgstr "LISTEN namn" +#~ msgid "" +#~ "ALTER SCHEMA name RENAME TO newname\n" +#~ "ALTER SCHEMA name OWNER TO newowner" +#~ msgstr "" +#~ "ALTER SCHEMA namn RENAME TO nytt_namn\n" +#~ "ALTER SCHEMA namn OWNER TO ny_ägare" +#, fuzzy #~ msgid "" -#~ "INSERT INTO table [ ( column [, ...] ) ]\n" -#~ " { DEFAULT VALUES | VALUES ( { expression | DEFAULT } [, ...] ) [, ...] | query }\n" -#~ " [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ]" +#~ "ALTER SEQUENCE name [ INCREMENT [ BY ] increment ]\n" +#~ " [ MINVALUE minvalue | NO MINVALUE ] [ MAXVALUE maxvalue | NO MAXVALUE ]\n" +#~ " [ START [ WITH ] start ]\n" +#~ " [ RESTART [ [ WITH ] restart ] ]\n" +#~ " [ CACHE cache ] [ [ NO ] CYCLE ]\n" +#~ " [ OWNED BY { table.column | NONE } ]\n" +#~ "ALTER SEQUENCE name OWNER TO new_owner\n" +#~ "ALTER SEQUENCE name RENAME TO new_name\n" +#~ "ALTER SEQUENCE name SET SCHEMA new_schema" #~ msgstr "" -#~ "INSERT INTO tabell [ ( kolumn [, ...] ) ]\n" -#~ " { DEFAULT VALUES | VALUES ( { uttryck | DEFAULT } [, ...] ) [, ...] | fråga }\n" -#~ " [ RETURNING * | utdatauttryck [ [ AS ] utdatanamn ] [, ...] ]" +#~ "ALTER SEQUENCE namn [ INCREMENT [ BY ] ökningsvärde ]\n" +#~ " [ MINVALUE minvärde | NO MINVALUE ] [ MAXVALUE maxvärde | NO MAXVALUE ]\n" +#~ " [ RESTART [ WITH ] start ] [ CACHE cache ] [ [ NO ] CYCLE ]\n" +#~ " [ OWNED BY { tabell.kolumn | NONE } ]\n" +#~ "ALTER SEQUENCE namn SET SCHEMA nytt_schema" #, fuzzy #~ msgid "" -#~ "GRANT { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON [ TABLE ] tablename [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "ALTER TABLE [ ONLY ] name [ * ]\n" +#~ " action [, ... ]\n" +#~ "ALTER TABLE [ ONLY ] name [ * ]\n" +#~ " RENAME [ COLUMN ] column TO new_column\n" +#~ "ALTER TABLE name\n" +#~ " RENAME TO new_name\n" +#~ "ALTER TABLE name\n" +#~ " SET SCHEMA new_schema\n" #~ "\n" -#~ "GRANT { { SELECT | INSERT | UPDATE | REFERENCES } ( column [, ...] )\n" -#~ " [,...] | ALL [ PRIVILEGES ] ( column [, ...] ) }\n" -#~ " ON [ TABLE ] tablename [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "where action is one of:\n" #~ "\n" -#~ "GRANT { { USAGE | SELECT | UPDATE }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SEQUENCE sequencename [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" -#~ "\n" -#~ "GRANT { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON DATABASE dbname [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" -#~ "\n" -#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON FOREIGN DATA WRAPPER fdwname [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" -#~ "\n" -#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON FOREIGN SERVER servername [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" -#~ "\n" -#~ "GRANT { EXECUTE | ALL [ PRIVILEGES ] }\n" -#~ " ON FUNCTION funcname ( [ [ argmode ] [ argname ] argtype [, ...] ] ) [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" -#~ "\n" -#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON LANGUAGE langname [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" -#~ "\n" -#~ "GRANT { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SCHEMA schemaname [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ " ADD [ COLUMN ] column type [ column_constraint [ ... ] ]\n" +#~ " DROP [ COLUMN ] column [ RESTRICT | CASCADE ]\n" +#~ " ALTER [ COLUMN ] column [ SET DATA ] TYPE type [ USING expression ]\n" +#~ " ALTER [ COLUMN ] column SET DEFAULT expression\n" +#~ " ALTER [ COLUMN ] column DROP DEFAULT\n" +#~ " ALTER [ COLUMN ] column { SET | DROP } NOT NULL\n" +#~ " ALTER [ COLUMN ] column SET STATISTICS integer\n" +#~ " ALTER [ COLUMN ] column SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN }\n" +#~ " ADD table_constraint\n" +#~ " DROP CONSTRAINT constraint_name [ RESTRICT | CASCADE ]\n" +#~ " DISABLE TRIGGER [ trigger_name | ALL | USER ]\n" +#~ " ENABLE TRIGGER [ trigger_name | ALL | USER ]\n" +#~ " ENABLE REPLICA TRIGGER trigger_name\n" +#~ " ENABLE ALWAYS TRIGGER trigger_name\n" +#~ " DISABLE RULE rewrite_rule_name\n" +#~ " ENABLE RULE rewrite_rule_name\n" +#~ " ENABLE REPLICA RULE rewrite_rule_name\n" +#~ " ENABLE ALWAYS RULE rewrite_rule_name\n" +#~ " CLUSTER ON index_name\n" +#~ " SET WITHOUT CLUSTER\n" +#~ " SET WITH OIDS\n" +#~ " SET WITHOUT OIDS\n" +#~ " SET ( storage_parameter = value [, ... ] )\n" +#~ " RESET ( storage_parameter [, ... ] )\n" +#~ " INHERIT parent_table\n" +#~ " NO INHERIT parent_table\n" +#~ " OWNER TO new_owner\n" +#~ " SET TABLESPACE new_tablespace" +#~ msgstr "" +#~ "ALTER TABLE [ ONLY ] namn [ * ]\n" +#~ " aktion [, ... ]\n" +#~ "ALTER TABLE [ ONLY ] namn [ * ]\n" +#~ " RENAME [ COLUMN ] kolumn TO ny_kolumn\n" +#~ "ALTER TABLE namn\n" +#~ " RENAME TO nytt_namn\n" +#~ "ALTER TABLE namn\n" +#~ " SET SCHEMA nytt_schema\n" #~ "\n" -#~ "GRANT { CREATE | ALL [ PRIVILEGES ] }\n" -#~ " ON TABLESPACE tablespacename [, ...]\n" -#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "där aktion är en av:\n" #~ "\n" -#~ "GRANT role [, ...] TO rolename [, ...] [ WITH ADMIN OPTION ]" +#~ " ADD [ COLUMN ] kolumn type [ kolumnvillkor [ ... ] ]\n" +#~ " DROP [ COLUMN ] kolumn [ RESTRICT | CASCADE ]\n" +#~ " ALTER [ COLUMN ] kolumn TYPE type [ USING uttryck ]\n" +#~ " ALTER [ COLUMN ] kolumn SET DEFAULT uttryck\n" +#~ " ALTER [ COLUMN ] kolumn DROP DEFAULT\n" +#~ " ALTER [ COLUMN ] kolumn { SET | DROP } NOT NULL\n" +#~ " ALTER [ COLUMN ] kolumn SET STATISTICS heltal\n" +#~ " ALTER [ COLUMN ] kolumn SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN }\n" +#~ " ADD tabellvillkor\n" +#~ " DROP CONSTRAINT villkorsnamn [ RESTRICT | CASCADE ]\n" +#~ " DISABLE TRIGGER [ utlösarnamn | ALL | USER ]\n" +#~ " ENABLE TRIGGER [ utlösarnamn | ALL | USER ]\n" +#~ " CLUSTER ON indexnamn\n" +#~ " SET WITHOUT CLUSTER\n" +#~ " SET WITHOUT OIDS\n" +#~ " SET ( lagringsparameter = värde [, ... ] )\n" +#~ " RESET ( lagringsparameter [, ... ] )\n" +#~ " INHERIT föräldertabell\n" +#~ " NO INHERIT föräldertabell\n" +#~ " OWNER TO ny_ägare\n" +#~ " SET TABLESPACE tabellutrymme" + +#~ msgid "" +#~ "ALTER TABLESPACE name RENAME TO newname\n" +#~ "ALTER TABLESPACE name OWNER TO newowner" #~ msgstr "" -#~ "GRANT { { SELECT | INSERT | UPDATE | DELETE | REFERENCES | TRIGGER }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON [ TABLE ] tabellnamn [, ...]\n" -#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "ALTER TABLESPACE namn RENAME TO nytt_namn\n" +#~ "ALTER TABLESPACE namn OWNER TO ny_ägare" + +#~ msgid "ALTER TEXT SEARCH PARSER name RENAME TO newname" +#~ msgstr "ALTER TEXT SEARCH PARSER namn RENAME TO nyttnamn" + +#~ msgid "ALTER TEXT SEARCH TEMPLATE name RENAME TO newname" +#~ msgstr "ALTER TEXT SEARCH TEMPLATE namn RENAME TO nyttnamn" + +#~ msgid "ALTER TRIGGER name ON table RENAME TO newname" +#~ msgstr "ALTER TRIGGER namb ON tabell RENAME TO nyttnamn" + +#~ msgid "" +#~ "ALTER TYPE name RENAME TO new_name\n" +#~ "ALTER TYPE name OWNER TO new_owner \n" +#~ "ALTER TYPE name SET SCHEMA new_schema" +#~ msgstr "" +#~ "ALTER TYPE namn RENAME TO nytt_namn\n" +#~ "ALTER TYPE namn OWNER TO ny_ägare \n" +#~ "ALTER TYPE namn SET SCHEMA nytt_schema" + +#, fuzzy +#~ msgid "" +#~ "ALTER USER name [ [ WITH ] option [ ... ] ]\n" #~ "\n" -#~ "GRANT { { USAGE | SELECT | UPDATE }\n" -#~ " [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SEQUENCE sekvensnamn [, ...]\n" -#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "where option can be:\n" +#~ " \n" +#~ " SUPERUSER | NOSUPERUSER\n" +#~ " | CREATEDB | NOCREATEDB\n" +#~ " | CREATEROLE | NOCREATEROLE\n" +#~ " | CREATEUSER | NOCREATEUSER\n" +#~ " | INHERIT | NOINHERIT\n" +#~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT connlimit\n" +#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" +#~ " | VALID UNTIL 'timestamp' \n" #~ "\n" -#~ "GRANT { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON DATABASE dbnamn [, ...]\n" -#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "ALTER USER name RENAME TO newname\n" #~ "\n" -#~ "GRANT { EXECUTE | ALL [ PRIVILEGES ] }\n" -#~ " ON FUNCTION funkname ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] ) [, ...]\n" -#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "ALTER USER name SET configuration_parameter { TO | = } { value | DEFAULT }\n" +#~ "ALTER USER name SET configuration_parameter FROM CURRENT\n" +#~ "ALTER USER name RESET configuration_parameter\n" +#~ "ALTER USER name RESET ALL" +#~ msgstr "" +#~ "ALTER USER namn [ [ WITH ] alternativ [ ... ] ]\n" #~ "\n" -#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" -#~ " ON LANGUAGE språknamn [, ...]\n" -#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "där alternativ kan vara:\n" #~ "\n" -#~ "GRANT { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" -#~ " ON SCHEMA schemanamn [, ...]\n" -#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ " SUPERUSER | NOSUPERUSER\n" +#~ " | CREATEDB | NOCREATEDB\n" +#~ " | CREATEROLE | NOCREATEROLE\n" +#~ " | CREATEUSER | NOCREATEUSER\n" +#~ " | INHERIT | NOINHERIT\n" +#~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT anslutningstak\n" +#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'lösenord' \n" +#~ " | VALID UNTIL 'tidsstämpel'\n" #~ "\n" -#~ "GRANT { CREATE | ALL [ PRIVILEGES ] }\n" -#~ " ON TABLESPACE tabellutrymmesnamn [, ...]\n" -#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "ALTER USER namn RENAME TO nytt_namn\n" #~ "\n" -#~ "GRANT roll [, ...] TO användarnamn [, ...] [ WITH ADMIN OPTION ]" +#~ "ALTER USER namn SET konfigurationsparameter { TO | = } { värde | DEFAULT }\n" +#~ "ALTER USER namn RESET konfigurationsparameter" #~ msgid "" -#~ "FETCH [ direction { FROM | IN } ] cursorname\n" +#~ "ALTER VIEW name ALTER [ COLUMN ] column SET DEFAULT expression\n" +#~ "ALTER VIEW name ALTER [ COLUMN ] column DROP DEFAULT\n" +#~ "ALTER VIEW name OWNER TO new_owner\n" +#~ "ALTER VIEW name RENAME TO new_name\n" +#~ "ALTER VIEW name SET SCHEMA new_schema" +#~ msgstr "" +#~ "ALTER VIEW namn ALTER [ COLUMN ] kolumn SET DEFAULT uttryck\n" +#~ "ALTER VIEW namn ALTER [ COLUMN ] kolumn DROP DEFAULT\n" +#~ "ALTER VIEW namn OWNER TO ny_ägare\n" +#~ "ALTER VIEW namn RENAME TO nytt_namn\n" +#~ "ALTER VIEW namn SET SCHEMA nytt_schema" + +#~ msgid "ANALYZE [ VERBOSE ] [ table [ ( column [, ...] ) ] ]" +#~ msgstr "ANALYZE [ VERBOSE ] [ tabell [ ( kolumn [, ...] ) ] ]" + +#~ msgid "" +#~ "BEGIN [ WORK | TRANSACTION ] [ transaction_mode [, ...] ]\n" #~ "\n" -#~ "where direction can be empty or one of:\n" +#~ "where transaction_mode is one of:\n" #~ "\n" -#~ " NEXT\n" -#~ " PRIOR\n" -#~ " FIRST\n" -#~ " LAST\n" -#~ " ABSOLUTE count\n" -#~ " RELATIVE count\n" -#~ " count\n" -#~ " ALL\n" -#~ " FORWARD\n" -#~ " FORWARD count\n" -#~ " FORWARD ALL\n" -#~ " BACKWARD\n" -#~ " BACKWARD count\n" -#~ " BACKWARD ALL" +#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" +#~ " READ WRITE | READ ONLY" #~ msgstr "" -#~ "FETCH [ riktning { FROM | IN } ] markörsnamn\n" +#~ "BEGIN [ WORK | TRANSACTION ] [ transaktionsläge [, ...] ]\n" #~ "\n" -#~ "där riktning kan vara tom eller en av:\n" +#~ "där transaktionsläge är en av:\n" #~ "\n" -#~ " NEXT\n" -#~ " PRIOR\n" -#~ " FIRST\n" -#~ " LAST\n" -#~ " ABSOLUTE antal\n" -#~ " RELATIVE antal\n" -#~ " antal\n" -#~ " ALL\n" -#~ " FORWARD\n" -#~ " FORWARD antal\n" -#~ " FORWARD ALL\n" -#~ " BACKWARD\n" -#~ " BACKWARD antal\n" -#~ " BACKWARD ALL" - -#~ msgid "EXPLAIN [ ANALYZE ] [ VERBOSE ] statement" -#~ msgstr "EXPLAIN [ ANALYZE ] [ VERBOSE ] sats" - -#~ msgid "EXECUTE name [ ( parameter [, ...] ) ]" -#~ msgstr "EXECUTE namn [ ( parameter [, ...] ) ]" - -#~ msgid "END [ WORK | TRANSACTION ]" -#~ msgstr "END [ WORK | TRANSACTION ]" +#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" +#~ " READ WRITE | READ ONLY" -#~ msgid "DROP VIEW [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP VIEW [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" +#~ msgid "CHECKPOINT" +#~ msgstr "CHECKPOINT" -#~ msgid "DROP USER [ IF EXISTS ] name [, ...]" -#~ msgstr "DROP USER [ IF EXISTS ] namn [, ...]" +#~ msgid "CLOSE { name | ALL }" +#~ msgstr "CLOSE { namn | ALL }" -#~ msgid "DROP TYPE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TYPE [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" - -#~ msgid "DROP TRIGGER [ IF EXISTS ] name ON table [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TRIGGER [ IF EXISTS ] namn ON tabell [ CASCADE | RESTRICT ]" - -#~ msgid "DROP TEXT SEARCH TEMPLATE [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TEXT SEARCH TEMPLATE [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" - -#~ msgid "DROP TEXT SEARCH PARSER [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TEXT SEARCH PARSER [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" - -#~ msgid "DROP TEXT SEARCH DICTIONARY [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TEXT SEARCH DICTIONARY [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" - -#~ msgid "DROP TEXT SEARCH CONFIGURATION [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TEXT SEARCH CONFIGURATION [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" - -#~ msgid "DROP TABLESPACE [ IF EXISTS ] tablespacename" -#~ msgstr "DROP TABLESPACE [ IF EXISTS ] tabellutrymmesnamn" - -#~ msgid "DROP TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP TABLE [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" +#~ msgid "" +#~ "CLUSTER [VERBOSE] tablename [ USING indexname ]\n" +#~ "CLUSTER [VERBOSE]" +#~ msgstr "" +#~ "CLUSTER [VERBOSE] tabellnamn [ USING indexnamn ]\n" +#~ "CLUSTER [VERBOSE]" #, fuzzy -#~ msgid "DROP SERVER [ IF EXISTS ] servername [ CASCADE | RESTRICT ]" -#~ msgstr "DROP CONVERSION [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" - -#~ msgid "DROP SEQUENCE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP SEQUENCE [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" - -#~ msgid "DROP SCHEMA [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP SCHEMA [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" - -#~ msgid "DROP RULE [ IF EXISTS ] name ON relation [ CASCADE | RESTRICT ]" -#~ msgstr "DROP RULE [ IF EXISTS ] namn ON relation [ CASCADE | RESTRICT ]" - -#~ msgid "DROP ROLE [ IF EXISTS ] name [, ...]" -#~ msgstr "DROP ROLE [ IF EXISTS ] namn [, ...]" - -#~ msgid "DROP OWNED BY name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP OWNED BY namn [, ...] [ CASCADE | RESTRICT ]" +#~ msgid "" +#~ "COMMENT ON\n" +#~ "{\n" +#~ " TABLE object_name |\n" +#~ " COLUMN table_name.column_name |\n" +#~ " AGGREGATE agg_name (agg_type [, ...] ) |\n" +#~ " CAST (sourcetype AS targettype) |\n" +#~ " CONSTRAINT constraint_name ON table_name |\n" +#~ " CONVERSION object_name |\n" +#~ " DATABASE object_name |\n" +#~ " DOMAIN object_name |\n" +#~ " FUNCTION func_name ( [ [ argmode ] [ argname ] argtype [, ...] ] ) |\n" +#~ " INDEX object_name |\n" +#~ " LARGE OBJECT large_object_oid |\n" +#~ " OPERATOR op (leftoperand_type, rightoperand_type) |\n" +#~ " OPERATOR CLASS object_name USING index_method |\n" +#~ " OPERATOR FAMILY object_name USING index_method |\n" +#~ " [ PROCEDURAL ] LANGUAGE object_name |\n" +#~ " ROLE object_name |\n" +#~ " RULE rule_name ON table_name |\n" +#~ " SCHEMA object_name |\n" +#~ " SEQUENCE object_name |\n" +#~ " TABLESPACE object_name |\n" +#~ " TEXT SEARCH CONFIGURATION object_name |\n" +#~ " TEXT SEARCH DICTIONARY object_name |\n" +#~ " TEXT SEARCH PARSER object_name |\n" +#~ " TEXT SEARCH TEMPLATE object_name |\n" +#~ " TRIGGER trigger_name ON table_name |\n" +#~ " TYPE object_name |\n" +#~ " VIEW object_name\n" +#~ "} IS 'text'" +#~ msgstr "" +#~ "COMMENT ON\n" +#~ "{\n" +#~ " TABLE objektname |\n" +#~ " COLUMN tabellnamn.kolumnnamn |\n" +#~ " AGGREGATE agg_namn (agg_typ) |\n" +#~ " CAST (källtyp AS måltyp) |\n" +#~ " CONSTRAINT villkorsnamn ON tabellnamn |\n" +#~ " CONVERSION objektnamn |\n" +#~ " DATABASE objektnamn |\n" +#~ " DOMAIN objektnamn |\n" +#~ " FUNCTION funk_namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] ) |\n" +#~ " INDEX objektnamn |\n" +#~ " LARGE OBJECT stort_objekt_oid |\n" +#~ " OPERATOR op (vänster operandstyp, höger operandstyp) |\n" +#~ " OPERATOR CLASS objektnamn USING indexmetod |\n" +#~ " [ PROCEDURAL ] LANGUAGE objektnamn |\n" +#~ " ROLE objektnamn |\n" +#~ " RULE regelnamn ON tabellnamn |\n" +#~ " SCHEMA objektnamn |\n" +#~ " SEQUENCE objektnamn |\n" +#~ " TRIGGER utlösarnamn ON tabellnamn |\n" +#~ " TYPE objektnamn |\n" +#~ " VIEW objektnamn\n" +#~ "} IS 'text'" -#~ msgid "DROP OPERATOR FAMILY [ IF EXISTS ] name USING index_method [ CASCADE | RESTRICT ]" -#~ msgstr "DROP OPERATOR FAMILY [ IF EXISTS ] namn USING indexmetod [ CASCADE | RESTRICT ]" +#~ msgid "COMMIT [ WORK | TRANSACTION ]" +#~ msgstr "COMMIT [ WORK | TRANSACTION ]" -#~ msgid "DROP OPERATOR CLASS [ IF EXISTS ] name USING index_method [ CASCADE | RESTRICT ]" -#~ msgstr "DROP OPERATOR CLASS [ IF EXISTS ] namn USING indexmetod [ CASCADE | RESTRICT ]" +#~ msgid "COMMIT PREPARED transaction_id" +#~ msgstr "COMMIT PREPARED transaktions-id" -#~ msgid "DROP OPERATOR [ IF EXISTS ] name ( { lefttype | NONE } , { righttype | NONE } ) [ CASCADE | RESTRICT ]" -#~ msgstr "DROP OPERATOR [ IF EXISTS ] namn ( { vänster_typ | NONE } , { höger_typ | NONE } ) [ CASCADE | RESTRICT ]" +#, fuzzy +#~ msgid "" +#~ "COPY tablename [ ( column [, ...] ) ]\n" +#~ " FROM { 'filename' | STDIN }\n" +#~ " [ [ WITH ] \n" +#~ " [ BINARY ]\n" +#~ " [ OIDS ]\n" +#~ " [ DELIMITER [ AS ] 'delimiter' ]\n" +#~ " [ NULL [ AS ] 'null string' ]\n" +#~ " [ CSV [ HEADER ]\n" +#~ " [ QUOTE [ AS ] 'quote' ] \n" +#~ " [ ESCAPE [ AS ] 'escape' ]\n" +#~ " [ FORCE NOT NULL column [, ...] ]\n" +#~ "\n" +#~ "COPY { tablename [ ( column [, ...] ) ] | ( query ) }\n" +#~ " TO { 'filename' | STDOUT }\n" +#~ " [ [ WITH ] \n" +#~ " [ BINARY ]\n" +#~ " [ OIDS ]\n" +#~ " [ DELIMITER [ AS ] 'delimiter' ]\n" +#~ " [ NULL [ AS ] 'null string' ]\n" +#~ " [ CSV [ HEADER ]\n" +#~ " [ QUOTE [ AS ] 'quote' ] \n" +#~ " [ ESCAPE [ AS ] 'escape' ]\n" +#~ " [ FORCE QUOTE column [, ...] ]" +#~ msgstr "" +#~ "COPY tabellnamn [ ( kolumn [, ...] ) ]\n" +#~ " FROM { 'filnamn' | STDIN }\n" +#~ " [ [ WITH ] \n" +#~ " [ BINARY ] \n" +#~ " [ OIDS ]\n" +#~ " [ DELIMITER [ AS ] 'avdelare' ]\n" +#~ " [ NULL [ AS ] 'null-sträng' ] ]\n" +#~ " [ CSV [ HEADER ]\n" +#~ " [ QUOTE [ AS ] 'citat' ]\n" +#~ " [ ESCAPE [ AS ] 'escape' ]\n" +#~ " [ FORCE NOT NULL kolumn [, ...] ]\n" +#~ "\n" +#~ "COPY { tabellnamn [ ( kolumn [, ...] ) ] | ( fråga ) }\n" +#~ " TO { 'filnamn' | STDOUT }\n" +#~ " [ [ WITH ] \n" +#~ " [ BINARY ]\n" +#~ " [ HEADER ]\n" +#~ " [ OIDS ]\n" +#~ " [ DELIMITER [ AS ] 'avdelare' ]\n" +#~ " [ NULL [ AS ] 'null-sträng' ] ]\n" +#~ " [ CSV [ HEADER ]\n" +#~ " [ QUOTE [ AS ] 'citat' ]\n" +#~ " [ ESCAPE [ AS ] 'escape' ]\n" +#~ " [ FORCE QUOTE kolumn [, ...] ]" -#~ msgid "DROP [ PROCEDURAL ] LANGUAGE [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP [ PROCEDURAL ] LANGUAGE [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" +#~ msgid "" +#~ "CREATE AGGREGATE name ( input_data_type [ , ... ] ) (\n" +#~ " SFUNC = sfunc,\n" +#~ " STYPE = state_data_type\n" +#~ " [ , FINALFUNC = ffunc ]\n" +#~ " [ , INITCOND = initial_condition ]\n" +#~ " [ , SORTOP = sort_operator ]\n" +#~ ")\n" +#~ "\n" +#~ "or the old syntax\n" +#~ "\n" +#~ "CREATE AGGREGATE name (\n" +#~ " BASETYPE = base_type,\n" +#~ " SFUNC = sfunc,\n" +#~ " STYPE = state_data_type\n" +#~ " [ , FINALFUNC = ffunc ]\n" +#~ " [ , INITCOND = initial_condition ]\n" +#~ " [ , SORTOP = sort_operator ]\n" +#~ ")" +#~ msgstr "" +#~ "CREATE AGGREGATE namn ( indatatyp [ , ... ] ) (\n" +#~ " SFUNC = sfunc,\n" +#~ " STYPE = tillståndsdatatyp\n" +#~ " [ , FINALFUNC = ffunc ]\n" +#~ " [ , INITCOND = startvärde ]\n" +#~ " [ , SORTOP = sorteringsoperator ]\n" +#~ ")\n" +#~ "\n" +#~ "eller den gamla syntaxen\n" +#~ "\n" +#~ "CREATE AGGREGATE namn (\n" +#~ " BASETYPE = indatatyp\n" +#~ " SFUNC = sfunc,\n" +#~ " STYPE = tillståndsdatatyp\n" +#~ " [ , FINALFUNC = ffunc ]\n" +#~ " [ , INITCOND = startvärde ]\n" +#~ " [ , SORTOP = sorteringsoperator ]\n" +#~ ")" -#~ msgid "DROP INDEX [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP INDEX [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" +#~ msgid "" +#~ "CREATE CAST (sourcetype AS targettype)\n" +#~ " WITH FUNCTION funcname (argtypes)\n" +#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "\n" +#~ "CREATE CAST (sourcetype AS targettype)\n" +#~ " WITHOUT FUNCTION\n" +#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "\n" +#~ "CREATE CAST (sourcetype AS targettype)\n" +#~ " WITH INOUT\n" +#~ " [ AS ASSIGNMENT | AS IMPLICIT ]" +#~ msgstr "" +#~ "CREATE CAST (källtyp AS måltyp)\n" +#~ " WITH FUNCTION funknamn (argtyper)\n" +#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "\n" +#~ "CREATE CAST (källtyp AS måltyp)\n" +#~ " WITHOUT FUNCTION\n" +#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "\n" +#~ "CREATE CAST (källtyp AS måltyp)\n" +#~ " WITH INOUT\n" +#~ " [ AS ASSIGNMENT | AS IMPLICIT ]" -#~ msgid "DROP GROUP [ IF EXISTS ] name [, ...]" -#~ msgstr "DROP GROUP [ IF EXISTS ] namn [, ...]" +#~ msgid "" +#~ "CREATE CONSTRAINT TRIGGER name\n" +#~ " AFTER event [ OR ... ]\n" +#~ " ON table_name\n" +#~ " [ FROM referenced_table_name ]\n" +#~ " { NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED } }\n" +#~ " FOR EACH ROW\n" +#~ " EXECUTE PROCEDURE funcname ( arguments )" +#~ msgstr "" +#~ "CREATE CONSTRAINT TRIGGER namn \n" +#~ " AFTER händelse [ OR ... ]\n" +#~ " ON tabellnamn\n" +#~ " [ FROM refererat_tabellnamn ]\n" +#~ " { NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED } }\n" +#~ " FOR EACH ROW\n" +#~ " EXECUTE PROCEDURE funktionsnamn ( argument )" #~ msgid "" -#~ "DROP FUNCTION [ IF EXISTS ] name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" -#~ " [ CASCADE | RESTRICT ]" +#~ "CREATE [ DEFAULT ] CONVERSION name\n" +#~ " FOR source_encoding TO dest_encoding FROM funcname" #~ msgstr "" -#~ "DROP FUNCTION [ IF EXISTS ] namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] )\n" -#~ " [ CASCADE | RESTRICT ]" +#~ "CREATE [ DEFAULT ] CONVERSION namn\n" +#~ " FOR källkodning TO målkodning FROM funknamn" #, fuzzy -#~ msgid "DROP FOREIGN DATA WRAPPER [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP CONVERSION [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" - -#~ msgid "DROP DOMAIN [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" -#~ msgstr "DROP DOMAIN [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" +#~ msgid "" +#~ "CREATE DATABASE name\n" +#~ " [ [ WITH ] [ OWNER [=] dbowner ]\n" +#~ " [ TEMPLATE [=] template ]\n" +#~ " [ ENCODING [=] encoding ]\n" +#~ " [ LC_COLLATE [=] lc_collate ]\n" +#~ " [ LC_CTYPE [=] lc_ctype ]\n" +#~ " [ TABLESPACE [=] tablespace ]\n" +#~ " [ CONNECTION LIMIT [=] connlimit ] ]" +#~ msgstr "" +#~ "CREATE DATABASE namn\n" +#~ " [ [ WITH ] [ OWNER [=] db-ägare ]\n" +#~ " [ TEMPLATE [=] mall ]\n" +#~ " [ ENCODING [=] kodning ]\n" +#~ " [ TABLESPACE [=] tabellutrymme ] ]\n" +#~ " [ CONNECTION LIMIT [=] anslutningstak ] ]" -#~ msgid "DROP DATABASE [ IF EXISTS ] name" -#~ msgstr "DROP DATABASE [ IF EXISTS ] namn" +#~ msgid "" +#~ "CREATE DOMAIN name [ AS ] data_type\n" +#~ " [ DEFAULT expression ]\n" +#~ " [ constraint [ ... ] ]\n" +#~ "\n" +#~ "where constraint is:\n" +#~ "\n" +#~ "[ CONSTRAINT constraint_name ]\n" +#~ "{ NOT NULL | NULL | CHECK (expression) }" +#~ msgstr "" +#~ "CREATE DOMAIN namn [ AS ] datatyp\n" +#~ " [ DEFAULT uttryck ]\n" +#~ " [ villkor [ ... ] ]\n" +#~ "\n" +#~ "där villkor är:\n" +#~ "\n" +#~ "[ CONSTRAINT villkorsnamn ]\n" +#~ "{ NOT NULL | NULL | CHECK (uttryck) }" -#~ msgid "DROP CONVERSION [ IF EXISTS ] name [ CASCADE | RESTRICT ]" -#~ msgstr "DROP CONVERSION [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" +#, fuzzy +#~ msgid "" +#~ "CREATE [ OR REPLACE ] FUNCTION\n" +#~ " name ( [ [ argmode ] [ argname ] argtype [ { DEFAULT | = } defexpr ] [, ...] ] )\n" +#~ " [ RETURNS rettype\n" +#~ " | RETURNS TABLE ( colname coltype [, ...] ) ]\n" +#~ " { LANGUAGE langname\n" +#~ " | WINDOW\n" +#~ " | IMMUTABLE | STABLE | VOLATILE\n" +#~ " | CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" +#~ " | [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER\n" +#~ " | COST execution_cost\n" +#~ " | ROWS result_rows\n" +#~ " | SET configuration_parameter { TO value | = value | FROM CURRENT }\n" +#~ " | AS 'definition'\n" +#~ " | AS 'obj_file', 'link_symbol'\n" +#~ " } ...\n" +#~ " [ WITH ( attribute [, ...] ) ]" +#~ msgstr "" +#~ "CREATE [ OR REPLACE ] FUNCTION\n" +#~ " namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] )\n" +#~ " [ RETURNS rettyp ]\n" +#~ " { LANGUAGE språknamn\n" +#~ " | IMMUTABLE | STABLE | VOLATILE\n" +#~ " | CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" +#~ " | [EXTERNAL] SECURITY INVOKER | [EXTERNAL] SECURITY DEFINER\n" +#~ " | AS 'definition'\n" +#~ " | AS 'obj-fil', 'länksymbol'\n" +#~ " } ...\n" +#~ " [ WITH ( attribut [, ...] ) ]" -#~ msgid "DROP CAST [ IF EXISTS ] (sourcetype AS targettype) [ CASCADE | RESTRICT ]" -#~ msgstr "DROP CAST [ IF EXISTS ] (källtyp AS måltyp) [ CASCADE | RESTRICT ]" +#~ msgid "" +#~ "CREATE GROUP name [ [ WITH ] option [ ... ] ]\n" +#~ "\n" +#~ "where option can be:\n" +#~ " \n" +#~ " SUPERUSER | NOSUPERUSER\n" +#~ " | CREATEDB | NOCREATEDB\n" +#~ " | CREATEROLE | NOCREATEROLE\n" +#~ " | CREATEUSER | NOCREATEUSER\n" +#~ " | INHERIT | NOINHERIT\n" +#~ " | LOGIN | NOLOGIN\n" +#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" +#~ " | VALID UNTIL 'timestamp' \n" +#~ " | IN ROLE rolename [, ...]\n" +#~ " | IN GROUP rolename [, ...]\n" +#~ " | ROLE rolename [, ...]\n" +#~ " | ADMIN rolename [, ...]\n" +#~ " | USER rolename [, ...]\n" +#~ " | SYSID uid" +#~ msgstr "" +#~ "CREATE GROUP namn [ [ WITH ] alternativ [ ... ] ]\n" +#~ "\n" +#~ "där alternativ kan vara:\n" +#~ " \n" +#~ " SUPERUSER | NOSUPERUSER\n" +#~ " | CREATEDB | NOCREATEDB\n" +#~ " | CREATEROLE | NOCREATEROLE\n" +#~ " | CREATEUSER | NOCREATEUSER\n" +#~ " | INHERIT | NOINHERIT\n" +#~ " | LOGIN | NOLOGIN\n" +#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'lösenord'\n" +#~ " | VALID UNTIL 'tidsstämpel' \n" +#~ " | IN ROLE rollnamn [, ...]\n" +#~ " | IN GROUP rollnamn [, ...]\n" +#~ " | ROLE rollnamn [, ...]\n" +#~ " | ADMIN rollnamn [, ...]\n" +#~ " | USER rollnamn [, ...]\n" +#~ " | SYSID uid" -#~ msgid "DROP AGGREGATE [ IF EXISTS ] name ( type [ , ... ] ) [ CASCADE | RESTRICT ]" -#~ msgstr "DROP AGGREGATE [ IF EXISTS ] namn ( typ [ , ... ] ) [ CASCADE | RESTRICT ]" +#~ msgid "" +#~ "CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] name ON table [ USING method ]\n" +#~ " ( { column | ( expression ) } [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] [, ...] )\n" +#~ " [ WITH ( storage_parameter = value [, ... ] ) ]\n" +#~ " [ TABLESPACE tablespace ]\n" +#~ " [ WHERE predicate ]" +#~ msgstr "" +#~ "CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] namn ON tabell [ USING metod ]\n" +#~ " ( { kolumn | ( uttryck ) } [ op-klass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] [, ...] )\n" +#~ " [ WITH ( lagringsparameter = värde [, ... ] ) ]\n" +#~ " [ TABLESPACE tabellutrymme ]\n" +#~ " [ WHERE predikat ]" -#~ msgid "DISCARD { ALL | PLANS | TEMPORARY | TEMP }" -#~ msgstr "DISCARD { ALL | PLANS | TEMPORARY | TEMP }" +#~ msgid "" +#~ "CREATE [ PROCEDURAL ] LANGUAGE name\n" +#~ "CREATE [ TRUSTED ] [ PROCEDURAL ] LANGUAGE name\n" +#~ " HANDLER call_handler [ VALIDATOR valfunction ]" +#~ msgstr "" +#~ "CREATE [ PROCEDURAL ] LANGUAGE namn\n" +#~ "CREATE [ TRUSTED ] [ PROCEDURAL ] LANGUAGE namn\n" +#~ " HANDLER anropshanterare [ VALIDATOR val-funktion ]" #~ msgid "" -#~ "DELETE FROM [ ONLY ] table [ [ AS ] alias ]\n" -#~ " [ USING usinglist ]\n" -#~ " [ WHERE condition | WHERE CURRENT OF cursor_name ]\n" -#~ " [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ]" +#~ "CREATE OPERATOR name (\n" +#~ " PROCEDURE = funcname\n" +#~ " [, LEFTARG = lefttype ] [, RIGHTARG = righttype ]\n" +#~ " [, COMMUTATOR = com_op ] [, NEGATOR = neg_op ]\n" +#~ " [, RESTRICT = res_proc ] [, JOIN = join_proc ]\n" +#~ " [, HASHES ] [, MERGES ]\n" +#~ ")" #~ msgstr "" -#~ "DELETE FROM [ ONLY ] tabell [ [ AS ] alias ]\n" -#~ " [ USING using-lista ]\n" -#~ " [ WHERE villkor | WHERE CURRENT OF märkörnamn ]\n" -#~ " [ RETURNING * | utdatauttryck [ [ AS ] utdatanamn ] [, ...] ]" +#~ "CREATE OPERATOR namn (\n" +#~ " PROCEDURE = funknamn\n" +#~ " [, LEFTARG = vänster-typ ] [, RIGHTARG = höger-typ ]\n" +#~ " [, COMMUTATOR = com_op ] [, NEGATOR = neg_op ]\n" +#~ " [, RESTRICT = res_proc ] [, JOIN = join_proc ]\n" +#~ " [, HASHES ] [, MERGES ]\n" +#~ ")" +#, fuzzy #~ msgid "" -#~ "DECLARE name [ BINARY ] [ INSENSITIVE ] [ [ NO ] SCROLL ]\n" -#~ " CURSOR [ { WITH | WITHOUT } HOLD ] FOR query" +#~ "CREATE OPERATOR CLASS name [ DEFAULT ] FOR TYPE data_type\n" +#~ " USING index_method [ FAMILY family_name ] AS\n" +#~ " { OPERATOR strategy_number operator_name [ ( op_type, op_type ) ]\n" +#~ " | FUNCTION support_number [ ( op_type [ , op_type ] ) ] funcname ( argument_type [, ...] )\n" +#~ " | STORAGE storage_type\n" +#~ " } [, ... ]" #~ msgstr "" -#~ "DECLARE namn [ BINARY ] [ INSENSITIVE ] [ [ NO ] SCROLL ]\n" -#~ " CURSOR [ { WITH | WITHOUT } HOLD ] FOR fråga" - -#~ msgid "DEALLOCATE [ PREPARE ] { name | ALL }" -#~ msgstr "DEALLOCATE [ PREPARE ] { namn | ALL }" +#~ "CREATE OPERATOR CLASS namn [ DEFAULT ] FOR TYPE datatyp USING indexmetod AS\n" +#~ " { OPERATOR strateginummer operatornamn [ ( op_typ, op_typ ) ] [ RECHECK ]\n" +#~ " | FUNCTION supportnummer funknamn ( argumenttyp [, ...] )\n" +#~ " | STORAGE lagringstyp\n" +#~ " } [, ... ]" -#~ msgid "" -#~ "CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] VIEW name [ ( column_name [, ...] ) ]\n" -#~ " AS query" -#~ msgstr "" -#~ "CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] VIEW namn [ ( kolumnnamn [, ...] ) ]\n" -#~ " AS fråga" +#~ msgid "CREATE OPERATOR FAMILY name USING index_method" +#~ msgstr "CREATE OPERATOR FAMILY namn USING indexmetod" #~ msgid "" -#~ "CREATE USER name [ [ WITH ] option [ ... ] ]\n" +#~ "CREATE ROLE name [ [ WITH ] option [ ... ] ]\n" #~ "\n" #~ "where option can be:\n" #~ " \n" @@ -6305,7 +6996,7 @@ msgstr "" #~ " | USER rolename [, ...]\n" #~ " | SYSID uid" #~ msgstr "" -#~ "CREATE USER namn [ [ WITH ] alternativ [ ... ] ]\n" +#~ "CREATE ROLE namn [ [ WITH ] alternativ [ ... ] ]\n" #~ "\n" #~ "där alternativ kan vara:\n" #~ " \n" @@ -6325,84 +7016,32 @@ msgstr "" #~ " | USER rollnamn [, ...]\n" #~ " | SYSID uid" -#, fuzzy #~ msgid "" -#~ "CREATE TYPE name AS\n" -#~ " ( attribute_name data_type [, ... ] )\n" -#~ "\n" -#~ "CREATE TYPE name AS ENUM\n" -#~ " ( 'label' [, ... ] )\n" -#~ "\n" -#~ "CREATE TYPE name (\n" -#~ " INPUT = input_function,\n" -#~ " OUTPUT = output_function\n" -#~ " [ , RECEIVE = receive_function ]\n" -#~ " [ , SEND = send_function ]\n" -#~ " [ , TYPMOD_IN = type_modifier_input_function ]\n" -#~ " [ , TYPMOD_OUT = type_modifier_output_function ]\n" -#~ " [ , ANALYZE = analyze_function ]\n" -#~ " [ , INTERNALLENGTH = { internallength | VARIABLE } ]\n" -#~ " [ , PASSEDBYVALUE ]\n" -#~ " [ , ALIGNMENT = alignment ]\n" -#~ " [ , STORAGE = storage ]\n" -#~ " [ , LIKE = like_type ]\n" -#~ " [ , CATEGORY = category ]\n" -#~ " [ , PREFERRED = preferred ]\n" -#~ " [ , DEFAULT = default ]\n" -#~ " [ , ELEMENT = element ]\n" -#~ " [ , DELIMITER = delimiter ]\n" -#~ ")\n" -#~ "\n" -#~ "CREATE TYPE name" +#~ "CREATE [ OR REPLACE ] RULE name AS ON event\n" +#~ " TO table [ WHERE condition ]\n" +#~ " DO [ ALSO | INSTEAD ] { NOTHING | command | ( command ; command ... ) }" #~ msgstr "" -#~ "CREATE TYPE namn AS\n" -#~ " ( attributnamn datatyp [, ... ] )\n" -#~ "\n" -#~ "CREATE TYPE namn (\n" -#~ " INPUT = inmatningsfunktion,\n" -#~ " OUTPUT = utmatningsfunktion\n" -#~ " [ , RECEIVE = mottagarfunktion ]\n" -#~ " [ , SEND = sändfunktion ]\n" -#~ " [ , ANALYZE = analysfunktion ]\n" -#~ " [ , INTERNALLENGTH = { internlängd | VARIABLE } ]\n" -#~ " [ , PASSEDBYVALUE ]\n" -#~ " [ , ALIGNMENT = justering ]\n" -#~ " [ , STORAGE = lagring ]\n" -#~ " [ , DEFAULT = standard ]\n" -#~ " [ , ELEMENT = element ]\n" -#~ " [ , DELIMITER = avskiljare ]\n" -#~ ")\n" -#~ "\n" -#~ "CREATE TYPE namn" +#~ "CREATE [ OR REPLACE ] RULE namn AS ON händelse\n" +#~ " TO tabell [ WHERE villkor ]\n" +#~ " DO [ ALSO | INSTEAD ] { NOTHING | kommando | ( kommando ; kommando ... ) }" #~ msgid "" -#~ "CREATE TRIGGER name { BEFORE | AFTER } { event [ OR ... ] }\n" -#~ " ON table [ FOR [ EACH ] { ROW | STATEMENT } ]\n" -#~ " EXECUTE PROCEDURE funcname ( arguments )" +#~ "CREATE SCHEMA schemaname [ AUTHORIZATION username ] [ schema_element [ ... ] ]\n" +#~ "CREATE SCHEMA AUTHORIZATION username [ schema_element [ ... ] ]" #~ msgstr "" -#~ "CREATE TRIGGER namn { BEFORE | AFTER } { händelse [ OR ... ] }\n" -#~ " ON tabell [ FOR [ EACH ] { ROW | STATEMENT } ]\n" -#~ " EXECUTE PROCEDURE funknamn ( argument )" - -#~ msgid "CREATE TABLESPACE tablespacename [ OWNER username ] LOCATION 'directory'" -#~ msgstr "CREATE TABLESPACE tabellutrymmesnamn [ OWNER användarnamn ] LOCATION 'katalog'" +#~ "CREATE SCHEMA schema-namn [ AUTHORIZATION användarnamn ] [ schema-element [ ... ] ]\n" +#~ "CREATE SCHEMA AUTHORIZATION användarnamn [ schema-element [ ... ] ]" #~ msgid "" -#~ "CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } ] TABLE table_name\n" -#~ " [ (column_name [, ...] ) ]\n" -#~ " [ WITH ( storage_parameter [= value] [, ... ] ) | WITH OIDS | WITHOUT OIDS ]\n" -#~ " [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]\n" -#~ " [ TABLESPACE tablespace ]\n" -#~ " AS query\n" -#~ " [ WITH [ NO ] DATA ]" +#~ "CREATE [ TEMPORARY | TEMP ] SEQUENCE name [ INCREMENT [ BY ] increment ]\n" +#~ " [ MINVALUE minvalue | NO MINVALUE ] [ MAXVALUE maxvalue | NO MAXVALUE ]\n" +#~ " [ START [ WITH ] start ] [ CACHE cache ] [ [ NO ] CYCLE ]\n" +#~ " [ OWNED BY { table.column | NONE } ]" #~ msgstr "" -#~ "CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } ] TABLE tabellnamn\n" -#~ " [ (kolumnnamn [, ...] ) ]\n" -#~ " [ WITH ( lagringsparameter [= värde] [, ... ] ) | WITH OIDS | WITHOUT OIDS ]\n" -#~ " [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]\n" -#~ " [ TABLESPACE tabellutrymme ]\n" -#~ " AS fråga\n" -#~ " [ WITH [ NO ] DATA ]" +#~ "CREATE [ TEMPORARY | TEMP ] SEQUENCE namn [ INCREMENT [ BY ] ökningsvärde ]\n" +#~ " [ MINVALUE minvärde | NO MINVALUE ] [ MAXVALUE maxvärde | NO MAXVALUE ]\n" +#~ " [ START [ WITH ] start ] [ CACHE cache ] [ [ NO ] CYCLE ]\n" +#~ " [ OWNED BY { tabell.kolumn | NONE } ]" #, fuzzy #~ msgid "" @@ -6483,132 +7122,86 @@ msgstr "" #~ "[ USING INDEX TABLESPACE tabellutrymme ]" #~ msgid "" -#~ "CREATE [ TEMPORARY | TEMP ] SEQUENCE name [ INCREMENT [ BY ] increment ]\n" -#~ " [ MINVALUE minvalue | NO MINVALUE ] [ MAXVALUE maxvalue | NO MAXVALUE ]\n" -#~ " [ START [ WITH ] start ] [ CACHE cache ] [ [ NO ] CYCLE ]\n" -#~ " [ OWNED BY { table.column | NONE } ]" -#~ msgstr "" -#~ "CREATE [ TEMPORARY | TEMP ] SEQUENCE namn [ INCREMENT [ BY ] ökningsvärde ]\n" -#~ " [ MINVALUE minvärde | NO MINVALUE ] [ MAXVALUE maxvärde | NO MAXVALUE ]\n" -#~ " [ START [ WITH ] start ] [ CACHE cache ] [ [ NO ] CYCLE ]\n" -#~ " [ OWNED BY { tabell.kolumn | NONE } ]" - -#~ msgid "" -#~ "CREATE SCHEMA schemaname [ AUTHORIZATION username ] [ schema_element [ ... ] ]\n" -#~ "CREATE SCHEMA AUTHORIZATION username [ schema_element [ ... ] ]" +#~ "CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } ] TABLE table_name\n" +#~ " [ (column_name [, ...] ) ]\n" +#~ " [ WITH ( storage_parameter [= value] [, ... ] ) | WITH OIDS | WITHOUT OIDS ]\n" +#~ " [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]\n" +#~ " [ TABLESPACE tablespace ]\n" +#~ " AS query\n" +#~ " [ WITH [ NO ] DATA ]" #~ msgstr "" -#~ "CREATE SCHEMA schema-namn [ AUTHORIZATION användarnamn ] [ schema-element [ ... ] ]\n" -#~ "CREATE SCHEMA AUTHORIZATION användarnamn [ schema-element [ ... ] ]" +#~ "CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } ] TABLE tabellnamn\n" +#~ " [ (kolumnnamn [, ...] ) ]\n" +#~ " [ WITH ( lagringsparameter [= värde] [, ... ] ) | WITH OIDS | WITHOUT OIDS ]\n" +#~ " [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ]\n" +#~ " [ TABLESPACE tabellutrymme ]\n" +#~ " AS fråga\n" +#~ " [ WITH [ NO ] DATA ]" -#~ msgid "" -#~ "CREATE [ OR REPLACE ] RULE name AS ON event\n" -#~ " TO table [ WHERE condition ]\n" -#~ " DO [ ALSO | INSTEAD ] { NOTHING | command | ( command ; command ... ) }" -#~ msgstr "" -#~ "CREATE [ OR REPLACE ] RULE namn AS ON händelse\n" -#~ " TO tabell [ WHERE villkor ]\n" -#~ " DO [ ALSO | INSTEAD ] { NOTHING | kommando | ( kommando ; kommando ... ) }" +#~ msgid "CREATE TABLESPACE tablespacename [ OWNER username ] LOCATION 'directory'" +#~ msgstr "CREATE TABLESPACE tabellutrymmesnamn [ OWNER användarnamn ] LOCATION 'katalog'" #~ msgid "" -#~ "CREATE ROLE name [ [ WITH ] option [ ... ] ]\n" -#~ "\n" -#~ "where option can be:\n" -#~ " \n" -#~ " SUPERUSER | NOSUPERUSER\n" -#~ " | CREATEDB | NOCREATEDB\n" -#~ " | CREATEROLE | NOCREATEROLE\n" -#~ " | CREATEUSER | NOCREATEUSER\n" -#~ " | INHERIT | NOINHERIT\n" -#~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT connlimit\n" -#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" -#~ " | VALID UNTIL 'timestamp' \n" -#~ " | IN ROLE rolename [, ...]\n" -#~ " | IN GROUP rolename [, ...]\n" -#~ " | ROLE rolename [, ...]\n" -#~ " | ADMIN rolename [, ...]\n" -#~ " | USER rolename [, ...]\n" -#~ " | SYSID uid" +#~ "CREATE TRIGGER name { BEFORE | AFTER } { event [ OR ... ] }\n" +#~ " ON table [ FOR [ EACH ] { ROW | STATEMENT } ]\n" +#~ " EXECUTE PROCEDURE funcname ( arguments )" #~ msgstr "" -#~ "CREATE ROLE namn [ [ WITH ] alternativ [ ... ] ]\n" -#~ "\n" -#~ "där alternativ kan vara:\n" -#~ " \n" -#~ " SUPERUSER | NOSUPERUSER\n" -#~ " | CREATEDB | NOCREATEDB\n" -#~ " | CREATEROLE | NOCREATEROLE\n" -#~ " | CREATEUSER | NOCREATEUSER\n" -#~ " | INHERIT | NOINHERIT\n" -#~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT anslutningstak\n" -#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'lösenord'\n" -#~ " | VALID UNTIL 'tidsstämpel' \n" -#~ " | IN ROLE rollnamn [, ...]\n" -#~ " | IN GROUP rollnamn [, ...]\n" -#~ " | ROLE rollnamn [, ...]\n" -#~ " | ADMIN rollnamn [, ...]\n" -#~ " | USER rollnamn [, ...]\n" -#~ " | SYSID uid" - -#~ msgid "CREATE OPERATOR FAMILY name USING index_method" -#~ msgstr "CREATE OPERATOR FAMILY namn USING indexmetod" +#~ "CREATE TRIGGER namn { BEFORE | AFTER } { händelse [ OR ... ] }\n" +#~ " ON tabell [ FOR [ EACH ] { ROW | STATEMENT } ]\n" +#~ " EXECUTE PROCEDURE funknamn ( argument )" #, fuzzy #~ msgid "" -#~ "CREATE OPERATOR CLASS name [ DEFAULT ] FOR TYPE data_type\n" -#~ " USING index_method [ FAMILY family_name ] AS\n" -#~ " { OPERATOR strategy_number operator_name [ ( op_type, op_type ) ]\n" -#~ " | FUNCTION support_number [ ( op_type [ , op_type ] ) ] funcname ( argument_type [, ...] )\n" -#~ " | STORAGE storage_type\n" -#~ " } [, ... ]" -#~ msgstr "" -#~ "CREATE OPERATOR CLASS namn [ DEFAULT ] FOR TYPE datatyp USING indexmetod AS\n" -#~ " { OPERATOR strateginummer operatornamn [ ( op_typ, op_typ ) ] [ RECHECK ]\n" -#~ " | FUNCTION supportnummer funknamn ( argumenttyp [, ...] )\n" -#~ " | STORAGE lagringstyp\n" -#~ " } [, ... ]" - -#~ msgid "" -#~ "CREATE OPERATOR name (\n" -#~ " PROCEDURE = funcname\n" -#~ " [, LEFTARG = lefttype ] [, RIGHTARG = righttype ]\n" -#~ " [, COMMUTATOR = com_op ] [, NEGATOR = neg_op ]\n" -#~ " [, RESTRICT = res_proc ] [, JOIN = join_proc ]\n" -#~ " [, HASHES ] [, MERGES ]\n" -#~ ")" -#~ msgstr "" -#~ "CREATE OPERATOR namn (\n" -#~ " PROCEDURE = funknamn\n" -#~ " [, LEFTARG = vänster-typ ] [, RIGHTARG = höger-typ ]\n" -#~ " [, COMMUTATOR = com_op ] [, NEGATOR = neg_op ]\n" -#~ " [, RESTRICT = res_proc ] [, JOIN = join_proc ]\n" -#~ " [, HASHES ] [, MERGES ]\n" -#~ ")" - -#~ msgid "" -#~ "CREATE [ PROCEDURAL ] LANGUAGE name\n" -#~ "CREATE [ TRUSTED ] [ PROCEDURAL ] LANGUAGE name\n" -#~ " HANDLER call_handler [ VALIDATOR valfunction ]" -#~ msgstr "" -#~ "CREATE [ PROCEDURAL ] LANGUAGE namn\n" -#~ "CREATE [ TRUSTED ] [ PROCEDURAL ] LANGUAGE namn\n" -#~ " HANDLER anropshanterare [ VALIDATOR val-funktion ]" - -#~ msgid "" -#~ "CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] name ON table [ USING method ]\n" -#~ " ( { column | ( expression ) } [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] [, ...] )\n" -#~ " [ WITH ( storage_parameter = value [, ... ] ) ]\n" -#~ " [ TABLESPACE tablespace ]\n" -#~ " [ WHERE predicate ]" +#~ "CREATE TYPE name AS\n" +#~ " ( attribute_name data_type [, ... ] )\n" +#~ "\n" +#~ "CREATE TYPE name AS ENUM\n" +#~ " ( 'label' [, ... ] )\n" +#~ "\n" +#~ "CREATE TYPE name (\n" +#~ " INPUT = input_function,\n" +#~ " OUTPUT = output_function\n" +#~ " [ , RECEIVE = receive_function ]\n" +#~ " [ , SEND = send_function ]\n" +#~ " [ , TYPMOD_IN = type_modifier_input_function ]\n" +#~ " [ , TYPMOD_OUT = type_modifier_output_function ]\n" +#~ " [ , ANALYZE = analyze_function ]\n" +#~ " [ , INTERNALLENGTH = { internallength | VARIABLE } ]\n" +#~ " [ , PASSEDBYVALUE ]\n" +#~ " [ , ALIGNMENT = alignment ]\n" +#~ " [ , STORAGE = storage ]\n" +#~ " [ , LIKE = like_type ]\n" +#~ " [ , CATEGORY = category ]\n" +#~ " [ , PREFERRED = preferred ]\n" +#~ " [ , DEFAULT = default ]\n" +#~ " [ , ELEMENT = element ]\n" +#~ " [ , DELIMITER = delimiter ]\n" +#~ ")\n" +#~ "\n" +#~ "CREATE TYPE name" #~ msgstr "" -#~ "CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] namn ON tabell [ USING metod ]\n" -#~ " ( { kolumn | ( uttryck ) } [ op-klass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] [, ...] )\n" -#~ " [ WITH ( lagringsparameter = värde [, ... ] ) ]\n" -#~ " [ TABLESPACE tabellutrymme ]\n" -#~ " [ WHERE predikat ]" +#~ "CREATE TYPE namn AS\n" +#~ " ( attributnamn datatyp [, ... ] )\n" +#~ "\n" +#~ "CREATE TYPE namn (\n" +#~ " INPUT = inmatningsfunktion,\n" +#~ " OUTPUT = utmatningsfunktion\n" +#~ " [ , RECEIVE = mottagarfunktion ]\n" +#~ " [ , SEND = sändfunktion ]\n" +#~ " [ , ANALYZE = analysfunktion ]\n" +#~ " [ , INTERNALLENGTH = { internlängd | VARIABLE } ]\n" +#~ " [ , PASSEDBYVALUE ]\n" +#~ " [ , ALIGNMENT = justering ]\n" +#~ " [ , STORAGE = lagring ]\n" +#~ " [ , DEFAULT = standard ]\n" +#~ " [ , ELEMENT = element ]\n" +#~ " [ , DELIMITER = avskiljare ]\n" +#~ ")\n" +#~ "\n" +#~ "CREATE TYPE namn" #~ msgid "" -#~ "CREATE GROUP name [ [ WITH ] option [ ... ] ]\n" +#~ "CREATE USER name [ [ WITH ] option [ ... ] ]\n" #~ "\n" #~ "where option can be:\n" #~ " \n" @@ -6618,6 +7211,7 @@ msgstr "" #~ " | CREATEUSER | NOCREATEUSER\n" #~ " | INHERIT | NOINHERIT\n" #~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT connlimit\n" #~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" #~ " | VALID UNTIL 'timestamp' \n" #~ " | IN ROLE rolename [, ...]\n" @@ -6627,7 +7221,7 @@ msgstr "" #~ " | USER rolename [, ...]\n" #~ " | SYSID uid" #~ msgstr "" -#~ "CREATE GROUP namn [ [ WITH ] alternativ [ ... ] ]\n" +#~ "CREATE USER namn [ [ WITH ] alternativ [ ... ] ]\n" #~ "\n" #~ "där alternativ kan vara:\n" #~ " \n" @@ -6637,6 +7231,7 @@ msgstr "" #~ " | CREATEUSER | NOCREATEUSER\n" #~ " | INHERIT | NOINHERIT\n" #~ " | LOGIN | NOLOGIN\n" +#~ " | CONNECTION LIMIT anslutningstak\n" #~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'lösenord'\n" #~ " | VALID UNTIL 'tidsstämpel' \n" #~ " | IN ROLE rollnamn [, ...]\n" @@ -6646,805 +7241,640 @@ msgstr "" #~ " | USER rollnamn [, ...]\n" #~ " | SYSID uid" -#, fuzzy #~ msgid "" -#~ "CREATE [ OR REPLACE ] FUNCTION\n" -#~ " name ( [ [ argmode ] [ argname ] argtype [ { DEFAULT | = } defexpr ] [, ...] ] )\n" -#~ " [ RETURNS rettype\n" -#~ " | RETURNS TABLE ( colname coltype [, ...] ) ]\n" -#~ " { LANGUAGE langname\n" -#~ " | WINDOW\n" -#~ " | IMMUTABLE | STABLE | VOLATILE\n" -#~ " | CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" -#~ " | [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER\n" -#~ " | COST execution_cost\n" -#~ " | ROWS result_rows\n" -#~ " | SET configuration_parameter { TO value | = value | FROM CURRENT }\n" -#~ " | AS 'definition'\n" -#~ " | AS 'obj_file', 'link_symbol'\n" -#~ " } ...\n" -#~ " [ WITH ( attribute [, ...] ) ]" +#~ "CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] VIEW name [ ( column_name [, ...] ) ]\n" +#~ " AS query" #~ msgstr "" -#~ "CREATE [ OR REPLACE ] FUNCTION\n" -#~ " namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] )\n" -#~ " [ RETURNS rettyp ]\n" -#~ " { LANGUAGE språknamn\n" -#~ " | IMMUTABLE | STABLE | VOLATILE\n" -#~ " | CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" -#~ " | [EXTERNAL] SECURITY INVOKER | [EXTERNAL] SECURITY DEFINER\n" -#~ " | AS 'definition'\n" -#~ " | AS 'obj-fil', 'länksymbol'\n" -#~ " } ...\n" -#~ " [ WITH ( attribut [, ...] ) ]" +#~ "CREATE [ OR REPLACE ] [ TEMP | TEMPORARY ] VIEW namn [ ( kolumnnamn [, ...] ) ]\n" +#~ " AS fråga" -#~ msgid "" -#~ "CREATE DOMAIN name [ AS ] data_type\n" -#~ " [ DEFAULT expression ]\n" -#~ " [ constraint [ ... ] ]\n" -#~ "\n" -#~ "where constraint is:\n" -#~ "\n" -#~ "[ CONSTRAINT constraint_name ]\n" -#~ "{ NOT NULL | NULL | CHECK (expression) }" -#~ msgstr "" -#~ "CREATE DOMAIN namn [ AS ] datatyp\n" -#~ " [ DEFAULT uttryck ]\n" -#~ " [ villkor [ ... ] ]\n" -#~ "\n" -#~ "där villkor är:\n" -#~ "\n" -#~ "[ CONSTRAINT villkorsnamn ]\n" -#~ "{ NOT NULL | NULL | CHECK (uttryck) }" +#~ msgid "DEALLOCATE [ PREPARE ] { name | ALL }" +#~ msgstr "DEALLOCATE [ PREPARE ] { namn | ALL }" -#, fuzzy #~ msgid "" -#~ "CREATE DATABASE name\n" -#~ " [ [ WITH ] [ OWNER [=] dbowner ]\n" -#~ " [ TEMPLATE [=] template ]\n" -#~ " [ ENCODING [=] encoding ]\n" -#~ " [ LC_COLLATE [=] lc_collate ]\n" -#~ " [ LC_CTYPE [=] lc_ctype ]\n" -#~ " [ TABLESPACE [=] tablespace ]\n" -#~ " [ CONNECTION LIMIT [=] connlimit ] ]" +#~ "DECLARE name [ BINARY ] [ INSENSITIVE ] [ [ NO ] SCROLL ]\n" +#~ " CURSOR [ { WITH | WITHOUT } HOLD ] FOR query" #~ msgstr "" -#~ "CREATE DATABASE namn\n" -#~ " [ [ WITH ] [ OWNER [=] db-ägare ]\n" -#~ " [ TEMPLATE [=] mall ]\n" -#~ " [ ENCODING [=] kodning ]\n" -#~ " [ TABLESPACE [=] tabellutrymme ] ]\n" -#~ " [ CONNECTION LIMIT [=] anslutningstak ] ]" +#~ "DECLARE namn [ BINARY ] [ INSENSITIVE ] [ [ NO ] SCROLL ]\n" +#~ " CURSOR [ { WITH | WITHOUT } HOLD ] FOR fråga" #~ msgid "" -#~ "CREATE [ DEFAULT ] CONVERSION name\n" -#~ " FOR source_encoding TO dest_encoding FROM funcname" +#~ "DELETE FROM [ ONLY ] table [ [ AS ] alias ]\n" +#~ " [ USING usinglist ]\n" +#~ " [ WHERE condition | WHERE CURRENT OF cursor_name ]\n" +#~ " [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ]" #~ msgstr "" -#~ "CREATE [ DEFAULT ] CONVERSION namn\n" -#~ " FOR källkodning TO målkodning FROM funknamn" +#~ "DELETE FROM [ ONLY ] tabell [ [ AS ] alias ]\n" +#~ " [ USING using-lista ]\n" +#~ " [ WHERE villkor | WHERE CURRENT OF märkörnamn ]\n" +#~ " [ RETURNING * | utdatauttryck [ [ AS ] utdatanamn ] [, ...] ]" + +#~ msgid "DISCARD { ALL | PLANS | TEMPORARY | TEMP }" +#~ msgstr "DISCARD { ALL | PLANS | TEMPORARY | TEMP }" + +#~ msgid "DROP AGGREGATE [ IF EXISTS ] name ( type [ , ... ] ) [ CASCADE | RESTRICT ]" +#~ msgstr "DROP AGGREGATE [ IF EXISTS ] namn ( typ [ , ... ] ) [ CASCADE | RESTRICT ]" + +#~ msgid "DROP CAST [ IF EXISTS ] (sourcetype AS targettype) [ CASCADE | RESTRICT ]" +#~ msgstr "DROP CAST [ IF EXISTS ] (källtyp AS måltyp) [ CASCADE | RESTRICT ]" + +#~ msgid "DROP CONVERSION [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP CONVERSION [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" + +#~ msgid "DROP DATABASE [ IF EXISTS ] name" +#~ msgstr "DROP DATABASE [ IF EXISTS ] namn" + +#~ msgid "DROP DOMAIN [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP DOMAIN [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" + +#, fuzzy +#~ msgid "DROP FOREIGN DATA WRAPPER [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP CONVERSION [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" #~ msgid "" -#~ "CREATE CONSTRAINT TRIGGER name\n" -#~ " AFTER event [ OR ... ]\n" -#~ " ON table_name\n" -#~ " [ FROM referenced_table_name ]\n" -#~ " { NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED } }\n" -#~ " FOR EACH ROW\n" -#~ " EXECUTE PROCEDURE funcname ( arguments )" +#~ "DROP FUNCTION [ IF EXISTS ] name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" +#~ " [ CASCADE | RESTRICT ]" #~ msgstr "" -#~ "CREATE CONSTRAINT TRIGGER namn \n" -#~ " AFTER händelse [ OR ... ]\n" -#~ " ON tabellnamn\n" -#~ " [ FROM refererat_tabellnamn ]\n" -#~ " { NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED } }\n" -#~ " FOR EACH ROW\n" -#~ " EXECUTE PROCEDURE funktionsnamn ( argument )" +#~ "DROP FUNCTION [ IF EXISTS ] namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] )\n" +#~ " [ CASCADE | RESTRICT ]" + +#~ msgid "DROP GROUP [ IF EXISTS ] name [, ...]" +#~ msgstr "DROP GROUP [ IF EXISTS ] namn [, ...]" + +#~ msgid "DROP INDEX [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP INDEX [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "DROP [ PROCEDURAL ] LANGUAGE [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP [ PROCEDURAL ] LANGUAGE [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" + +#~ msgid "DROP OPERATOR [ IF EXISTS ] name ( { lefttype | NONE } , { righttype | NONE } ) [ CASCADE | RESTRICT ]" +#~ msgstr "DROP OPERATOR [ IF EXISTS ] namn ( { vänster_typ | NONE } , { höger_typ | NONE } ) [ CASCADE | RESTRICT ]" + +#~ msgid "DROP OPERATOR CLASS [ IF EXISTS ] name USING index_method [ CASCADE | RESTRICT ]" +#~ msgstr "DROP OPERATOR CLASS [ IF EXISTS ] namn USING indexmetod [ CASCADE | RESTRICT ]" + +#~ msgid "DROP OPERATOR FAMILY [ IF EXISTS ] name USING index_method [ CASCADE | RESTRICT ]" +#~ msgstr "DROP OPERATOR FAMILY [ IF EXISTS ] namn USING indexmetod [ CASCADE | RESTRICT ]" + +#~ msgid "DROP OWNED BY name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP OWNED BY namn [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "DROP ROLE [ IF EXISTS ] name [, ...]" +#~ msgstr "DROP ROLE [ IF EXISTS ] namn [, ...]" + +#~ msgid "DROP RULE [ IF EXISTS ] name ON relation [ CASCADE | RESTRICT ]" +#~ msgstr "DROP RULE [ IF EXISTS ] namn ON relation [ CASCADE | RESTRICT ]" + +#~ msgid "DROP SCHEMA [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP SCHEMA [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "DROP SEQUENCE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP SEQUENCE [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" + +#, fuzzy +#~ msgid "DROP SERVER [ IF EXISTS ] servername [ CASCADE | RESTRICT ]" +#~ msgstr "DROP CONVERSION [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TABLE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TABLE [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TABLESPACE [ IF EXISTS ] tablespacename" +#~ msgstr "DROP TABLESPACE [ IF EXISTS ] tabellutrymmesnamn" + +#~ msgid "DROP TEXT SEARCH CONFIGURATION [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TEXT SEARCH CONFIGURATION [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TEXT SEARCH DICTIONARY [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TEXT SEARCH DICTIONARY [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TEXT SEARCH PARSER [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TEXT SEARCH PARSER [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TEXT SEARCH TEMPLATE [ IF EXISTS ] name [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TEXT SEARCH TEMPLATE [ IF EXISTS ] namn [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TRIGGER [ IF EXISTS ] name ON table [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TRIGGER [ IF EXISTS ] namn ON tabell [ CASCADE | RESTRICT ]" + +#~ msgid "DROP TYPE [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP TYPE [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "DROP USER [ IF EXISTS ] name [, ...]" +#~ msgstr "DROP USER [ IF EXISTS ] namn [, ...]" + +#~ msgid "DROP VIEW [ IF EXISTS ] name [, ...] [ CASCADE | RESTRICT ]" +#~ msgstr "DROP VIEW [ IF EXISTS ] namn [, ...] [ CASCADE | RESTRICT ]" + +#~ msgid "END [ WORK | TRANSACTION ]" +#~ msgstr "END [ WORK | TRANSACTION ]" + +#~ msgid "EXECUTE name [ ( parameter [, ...] ) ]" +#~ msgstr "EXECUTE namn [ ( parameter [, ...] ) ]" + +#~ msgid "EXPLAIN [ ANALYZE ] [ VERBOSE ] statement" +#~ msgstr "EXPLAIN [ ANALYZE ] [ VERBOSE ] sats" #~ msgid "" -#~ "CREATE CAST (sourcetype AS targettype)\n" -#~ " WITH FUNCTION funcname (argtypes)\n" -#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "FETCH [ direction { FROM | IN } ] cursorname\n" #~ "\n" -#~ "CREATE CAST (sourcetype AS targettype)\n" -#~ " WITHOUT FUNCTION\n" -#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "where direction can be empty or one of:\n" #~ "\n" -#~ "CREATE CAST (sourcetype AS targettype)\n" -#~ " WITH INOUT\n" -#~ " [ AS ASSIGNMENT | AS IMPLICIT ]" +#~ " NEXT\n" +#~ " PRIOR\n" +#~ " FIRST\n" +#~ " LAST\n" +#~ " ABSOLUTE count\n" +#~ " RELATIVE count\n" +#~ " count\n" +#~ " ALL\n" +#~ " FORWARD\n" +#~ " FORWARD count\n" +#~ " FORWARD ALL\n" +#~ " BACKWARD\n" +#~ " BACKWARD count\n" +#~ " BACKWARD ALL" #~ msgstr "" -#~ "CREATE CAST (källtyp AS måltyp)\n" -#~ " WITH FUNCTION funknamn (argtyper)\n" -#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "FETCH [ riktning { FROM | IN } ] markörsnamn\n" #~ "\n" -#~ "CREATE CAST (källtyp AS måltyp)\n" -#~ " WITHOUT FUNCTION\n" -#~ " [ AS ASSIGNMENT | AS IMPLICIT ]\n" +#~ "där riktning kan vara tom eller en av:\n" #~ "\n" -#~ "CREATE CAST (källtyp AS måltyp)\n" -#~ " WITH INOUT\n" -#~ " [ AS ASSIGNMENT | AS IMPLICIT ]" +#~ " NEXT\n" +#~ " PRIOR\n" +#~ " FIRST\n" +#~ " LAST\n" +#~ " ABSOLUTE antal\n" +#~ " RELATIVE antal\n" +#~ " antal\n" +#~ " ALL\n" +#~ " FORWARD\n" +#~ " FORWARD antal\n" +#~ " FORWARD ALL\n" +#~ " BACKWARD\n" +#~ " BACKWARD antal\n" +#~ " BACKWARD ALL" +#, fuzzy #~ msgid "" -#~ "CREATE AGGREGATE name ( input_data_type [ , ... ] ) (\n" -#~ " SFUNC = sfunc,\n" -#~ " STYPE = state_data_type\n" -#~ " [ , FINALFUNC = ffunc ]\n" -#~ " [ , INITCOND = initial_condition ]\n" -#~ " [ , SORTOP = sort_operator ]\n" -#~ ")\n" +#~ "GRANT { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON [ TABLE ] tablename [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" #~ "\n" -#~ "or the old syntax\n" +#~ "GRANT { { SELECT | INSERT | UPDATE | REFERENCES } ( column [, ...] )\n" +#~ " [,...] | ALL [ PRIVILEGES ] ( column [, ...] ) }\n" +#~ " ON [ TABLE ] tablename [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" #~ "\n" -#~ "CREATE AGGREGATE name (\n" -#~ " BASETYPE = base_type,\n" -#~ " SFUNC = sfunc,\n" -#~ " STYPE = state_data_type\n" -#~ " [ , FINALFUNC = ffunc ]\n" -#~ " [ , INITCOND = initial_condition ]\n" -#~ " [ , SORTOP = sort_operator ]\n" -#~ ")" -#~ msgstr "" -#~ "CREATE AGGREGATE namn ( indatatyp [ , ... ] ) (\n" -#~ " SFUNC = sfunc,\n" -#~ " STYPE = tillståndsdatatyp\n" -#~ " [ , FINALFUNC = ffunc ]\n" -#~ " [ , INITCOND = startvärde ]\n" -#~ " [ , SORTOP = sorteringsoperator ]\n" -#~ ")\n" +#~ "GRANT { { USAGE | SELECT | UPDATE }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SEQUENCE sequencename [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" #~ "\n" -#~ "eller den gamla syntaxen\n" +#~ "GRANT { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON DATABASE dbname [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" #~ "\n" -#~ "CREATE AGGREGATE namn (\n" -#~ " BASETYPE = indatatyp\n" -#~ " SFUNC = sfunc,\n" -#~ " STYPE = tillståndsdatatyp\n" -#~ " [ , FINALFUNC = ffunc ]\n" -#~ " [ , INITCOND = startvärde ]\n" -#~ " [ , SORTOP = sorteringsoperator ]\n" -#~ ")" - -#, fuzzy -#~ msgid "" -#~ "COPY tablename [ ( column [, ...] ) ]\n" -#~ " FROM { 'filename' | STDIN }\n" -#~ " [ [ WITH ] \n" -#~ " [ BINARY ]\n" -#~ " [ OIDS ]\n" -#~ " [ DELIMITER [ AS ] 'delimiter' ]\n" -#~ " [ NULL [ AS ] 'null string' ]\n" -#~ " [ CSV [ HEADER ]\n" -#~ " [ QUOTE [ AS ] 'quote' ] \n" -#~ " [ ESCAPE [ AS ] 'escape' ]\n" -#~ " [ FORCE NOT NULL column [, ...] ]\n" +#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON FOREIGN DATA WRAPPER fdwname [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" #~ "\n" -#~ "COPY { tablename [ ( column [, ...] ) ] | ( query ) }\n" -#~ " TO { 'filename' | STDOUT }\n" -#~ " [ [ WITH ] \n" -#~ " [ BINARY ]\n" -#~ " [ OIDS ]\n" -#~ " [ DELIMITER [ AS ] 'delimiter' ]\n" -#~ " [ NULL [ AS ] 'null string' ]\n" -#~ " [ CSV [ HEADER ]\n" -#~ " [ QUOTE [ AS ] 'quote' ] \n" -#~ " [ ESCAPE [ AS ] 'escape' ]\n" -#~ " [ FORCE QUOTE column [, ...] ]" -#~ msgstr "" -#~ "COPY tabellnamn [ ( kolumn [, ...] ) ]\n" -#~ " FROM { 'filnamn' | STDIN }\n" -#~ " [ [ WITH ] \n" -#~ " [ BINARY ] \n" -#~ " [ OIDS ]\n" -#~ " [ DELIMITER [ AS ] 'avdelare' ]\n" -#~ " [ NULL [ AS ] 'null-sträng' ] ]\n" -#~ " [ CSV [ HEADER ]\n" -#~ " [ QUOTE [ AS ] 'citat' ]\n" -#~ " [ ESCAPE [ AS ] 'escape' ]\n" -#~ " [ FORCE NOT NULL kolumn [, ...] ]\n" +#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON FOREIGN SERVER servername [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" #~ "\n" -#~ "COPY { tabellnamn [ ( kolumn [, ...] ) ] | ( fråga ) }\n" -#~ " TO { 'filnamn' | STDOUT }\n" -#~ " [ [ WITH ] \n" -#~ " [ BINARY ]\n" -#~ " [ HEADER ]\n" -#~ " [ OIDS ]\n" -#~ " [ DELIMITER [ AS ] 'avdelare' ]\n" -#~ " [ NULL [ AS ] 'null-sträng' ] ]\n" -#~ " [ CSV [ HEADER ]\n" -#~ " [ QUOTE [ AS ] 'citat' ]\n" -#~ " [ ESCAPE [ AS ] 'escape' ]\n" -#~ " [ FORCE QUOTE kolumn [, ...] ]" - -#~ msgid "COMMIT PREPARED transaction_id" -#~ msgstr "COMMIT PREPARED transaktions-id" - -#~ msgid "COMMIT [ WORK | TRANSACTION ]" -#~ msgstr "COMMIT [ WORK | TRANSACTION ]" - -#, fuzzy -#~ msgid "" -#~ "COMMENT ON\n" -#~ "{\n" -#~ " TABLE object_name |\n" -#~ " COLUMN table_name.column_name |\n" -#~ " AGGREGATE agg_name (agg_type [, ...] ) |\n" -#~ " CAST (sourcetype AS targettype) |\n" -#~ " CONSTRAINT constraint_name ON table_name |\n" -#~ " CONVERSION object_name |\n" -#~ " DATABASE object_name |\n" -#~ " DOMAIN object_name |\n" -#~ " FUNCTION func_name ( [ [ argmode ] [ argname ] argtype [, ...] ] ) |\n" -#~ " INDEX object_name |\n" -#~ " LARGE OBJECT large_object_oid |\n" -#~ " OPERATOR op (leftoperand_type, rightoperand_type) |\n" -#~ " OPERATOR CLASS object_name USING index_method |\n" -#~ " OPERATOR FAMILY object_name USING index_method |\n" -#~ " [ PROCEDURAL ] LANGUAGE object_name |\n" -#~ " ROLE object_name |\n" -#~ " RULE rule_name ON table_name |\n" -#~ " SCHEMA object_name |\n" -#~ " SEQUENCE object_name |\n" -#~ " TABLESPACE object_name |\n" -#~ " TEXT SEARCH CONFIGURATION object_name |\n" -#~ " TEXT SEARCH DICTIONARY object_name |\n" -#~ " TEXT SEARCH PARSER object_name |\n" -#~ " TEXT SEARCH TEMPLATE object_name |\n" -#~ " TRIGGER trigger_name ON table_name |\n" -#~ " TYPE object_name |\n" -#~ " VIEW object_name\n" -#~ "} IS 'text'" +#~ "GRANT { EXECUTE | ALL [ PRIVILEGES ] }\n" +#~ " ON FUNCTION funcname ( [ [ argmode ] [ argname ] argtype [, ...] ] ) [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON LANGUAGE langname [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SCHEMA schemaname [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { CREATE | ALL [ PRIVILEGES ] }\n" +#~ " ON TABLESPACE tablespacename [, ...]\n" +#~ " TO { [ GROUP ] rolename | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT role [, ...] TO rolename [, ...] [ WITH ADMIN OPTION ]" #~ msgstr "" -#~ "COMMENT ON\n" -#~ "{\n" -#~ " TABLE objektname |\n" -#~ " COLUMN tabellnamn.kolumnnamn |\n" -#~ " AGGREGATE agg_namn (agg_typ) |\n" -#~ " CAST (källtyp AS måltyp) |\n" -#~ " CONSTRAINT villkorsnamn ON tabellnamn |\n" -#~ " CONVERSION objektnamn |\n" -#~ " DATABASE objektnamn |\n" -#~ " DOMAIN objektnamn |\n" -#~ " FUNCTION funk_namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] ) |\n" -#~ " INDEX objektnamn |\n" -#~ " LARGE OBJECT stort_objekt_oid |\n" -#~ " OPERATOR op (vänster operandstyp, höger operandstyp) |\n" -#~ " OPERATOR CLASS objektnamn USING indexmetod |\n" -#~ " [ PROCEDURAL ] LANGUAGE objektnamn |\n" -#~ " ROLE objektnamn |\n" -#~ " RULE regelnamn ON tabellnamn |\n" -#~ " SCHEMA objektnamn |\n" -#~ " SEQUENCE objektnamn |\n" -#~ " TRIGGER utlösarnamn ON tabellnamn |\n" -#~ " TYPE objektnamn |\n" -#~ " VIEW objektnamn\n" -#~ "} IS 'text'" +#~ "GRANT { { SELECT | INSERT | UPDATE | DELETE | REFERENCES | TRIGGER }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON [ TABLE ] tabellnamn [, ...]\n" +#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { { USAGE | SELECT | UPDATE }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SEQUENCE sekvensnamn [, ...]\n" +#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON DATABASE dbnamn [, ...]\n" +#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { EXECUTE | ALL [ PRIVILEGES ] }\n" +#~ " ON FUNCTION funkname ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] ) [, ...]\n" +#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON LANGUAGE språknamn [, ...]\n" +#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SCHEMA schemanamn [, ...]\n" +#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT { CREATE | ALL [ PRIVILEGES ] }\n" +#~ " ON TABLESPACE tabellutrymmesnamn [, ...]\n" +#~ " TO { användarnamn | GROUP gruppnamn | PUBLIC } [, ...] [ WITH GRANT OPTION ]\n" +#~ "\n" +#~ "GRANT roll [, ...] TO användarnamn [, ...] [ WITH ADMIN OPTION ]" #~ msgid "" -#~ "CLUSTER [VERBOSE] tablename [ USING indexname ]\n" -#~ "CLUSTER [VERBOSE]" +#~ "INSERT INTO table [ ( column [, ...] ) ]\n" +#~ " { DEFAULT VALUES | VALUES ( { expression | DEFAULT } [, ...] ) [, ...] | query }\n" +#~ " [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ]" #~ msgstr "" -#~ "CLUSTER [VERBOSE] tabellnamn [ USING indexnamn ]\n" -#~ "CLUSTER [VERBOSE]" +#~ "INSERT INTO tabell [ ( kolumn [, ...] ) ]\n" +#~ " { DEFAULT VALUES | VALUES ( { uttryck | DEFAULT } [, ...] ) [, ...] | fråga }\n" +#~ " [ RETURNING * | utdatauttryck [ [ AS ] utdatanamn ] [, ...] ]" -#~ msgid "CLOSE { name | ALL }" -#~ msgstr "CLOSE { namn | ALL }" +#~ msgid "LISTEN name" +#~ msgstr "LISTEN namn" -#~ msgid "CHECKPOINT" -#~ msgstr "CHECKPOINT" +#~ msgid "LOAD 'filename'" +#~ msgstr "LOAD 'filnamn'" #~ msgid "" -#~ "BEGIN [ WORK | TRANSACTION ] [ transaction_mode [, ...] ]\n" +#~ "LOCK [ TABLE ] [ ONLY ] name [, ...] [ IN lockmode MODE ] [ NOWAIT ]\n" #~ "\n" -#~ "where transaction_mode is one of:\n" +#~ "where lockmode is one of:\n" #~ "\n" -#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" -#~ " READ WRITE | READ ONLY" +#~ " ACCESS SHARE | ROW SHARE | ROW EXCLUSIVE | SHARE UPDATE EXCLUSIVE\n" +#~ " | SHARE | SHARE ROW EXCLUSIVE | EXCLUSIVE | ACCESS EXCLUSIVE" #~ msgstr "" -#~ "BEGIN [ WORK | TRANSACTION ] [ transaktionsläge [, ...] ]\n" +#~ "LOCK [ TABLE ] [ ONLY ] namn [, ...] [ IN låsläge MODE ] [ NOWAIT ]\n" #~ "\n" -#~ "där transaktionsläge är en av:\n" +#~ "där låsläge är en av:\n" #~ "\n" -#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" -#~ " READ WRITE | READ ONLY" +#~ " ACCESS SHARE | ROW SHARE | ROW EXCLUSIVE | SHARE UPDATE EXCLUSIVE\n" +#~ " | SHARE | SHARE ROW EXCLUSIVE | EXCLUSIVE | ACCESS EXCLUSIVE" -#~ msgid "ANALYZE [ VERBOSE ] [ table [ ( column [, ...] ) ] ]" -#~ msgstr "ANALYZE [ VERBOSE ] [ tabell [ ( kolumn [, ...] ) ] ]" +#~ msgid "MOVE [ direction { FROM | IN } ] cursorname" +#~ msgstr "MOVE [ riktning { FROM | IN } ] markörnamn" -#~ msgid "" -#~ "ALTER VIEW name ALTER [ COLUMN ] column SET DEFAULT expression\n" -#~ "ALTER VIEW name ALTER [ COLUMN ] column DROP DEFAULT\n" -#~ "ALTER VIEW name OWNER TO new_owner\n" -#~ "ALTER VIEW name RENAME TO new_name\n" -#~ "ALTER VIEW name SET SCHEMA new_schema" -#~ msgstr "" -#~ "ALTER VIEW namn ALTER [ COLUMN ] kolumn SET DEFAULT uttryck\n" -#~ "ALTER VIEW namn ALTER [ COLUMN ] kolumn DROP DEFAULT\n" -#~ "ALTER VIEW namn OWNER TO ny_ägare\n" -#~ "ALTER VIEW namn RENAME TO nytt_namn\n" -#~ "ALTER VIEW namn SET SCHEMA nytt_schema" +#~ msgid "NOTIFY name" +#~ msgstr "NOTIFY namn" + +#~ msgid "PREPARE name [ ( datatype [, ...] ) ] AS statement" +#~ msgstr "PREPARE namn [ ( datatyp [, ...] ) ] AS sats" + +#~ msgid "PREPARE TRANSACTION transaction_id" +#~ msgstr "PREPARE TRANSACTION transaktions_id" + +#~ msgid "REASSIGN OWNED BY old_role [, ...] TO new_role" +#~ msgstr "REASSIGN OWNED BY gammal_roll [, ...] TO ny_roll" + +#~ msgid "REINDEX { INDEX | TABLE | DATABASE | SYSTEM } name [ FORCE ]" +#~ msgstr "REINDEX { INDEX | TABLE | DATABASE | SYSTEM } namn [ FORCE ]" + +#~ msgid "RELEASE [ SAVEPOINT ] savepoint_name" +#~ msgstr "RELEASE [ SAVEPOINT ] sparpunktsnamn" #, fuzzy #~ msgid "" -#~ "ALTER USER name [ [ WITH ] option [ ... ] ]\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { SELECT | INSERT | UPDATE | DELETE | TRUNCATE | REFERENCES | TRIGGER }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON [ TABLE ] tablename [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "where option can be:\n" -#~ " \n" -#~ " SUPERUSER | NOSUPERUSER\n" -#~ " | CREATEDB | NOCREATEDB\n" -#~ " | CREATEROLE | NOCREATEROLE\n" -#~ " | CREATEUSER | NOCREATEUSER\n" -#~ " | INHERIT | NOINHERIT\n" -#~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT connlimit\n" -#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" -#~ " | VALID UNTIL 'timestamp' \n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { SELECT | INSERT | UPDATE | REFERENCES } ( column [, ...] )\n" +#~ " [,...] | ALL [ PRIVILEGES ] ( column [, ...] ) }\n" +#~ " ON [ TABLE ] tablename [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "ALTER USER name RENAME TO newname\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { USAGE | SELECT | UPDATE }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SEQUENCE sequencename [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "ALTER USER name SET configuration_parameter { TO | = } { value | DEFAULT }\n" -#~ "ALTER USER name SET configuration_parameter FROM CURRENT\n" -#~ "ALTER USER name RESET configuration_parameter\n" -#~ "ALTER USER name RESET ALL" -#~ msgstr "" -#~ "ALTER USER namn [ [ WITH ] alternativ [ ... ] ]\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { CREATE | CONNECT | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON DATABASE dbname [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "där alternativ kan vara:\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON FOREIGN DATA WRAPPER fdwname [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ " SUPERUSER | NOSUPERUSER\n" -#~ " | CREATEDB | NOCREATEDB\n" -#~ " | CREATEROLE | NOCREATEROLE\n" -#~ " | CREATEUSER | NOCREATEUSER\n" -#~ " | INHERIT | NOINHERIT\n" -#~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT anslutningstak\n" -#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'lösenord' \n" -#~ " | VALID UNTIL 'tidsstämpel'\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON FOREIGN SERVER servername [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "ALTER USER namn RENAME TO nytt_namn\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { EXECUTE | ALL [ PRIVILEGES ] }\n" +#~ " ON FUNCTION funcname ( [ [ argmode ] [ argname ] argtype [, ...] ] ) [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "ALTER USER namn SET konfigurationsparameter { TO | = } { värde | DEFAULT }\n" -#~ "ALTER USER namn RESET konfigurationsparameter" - -#~ msgid "" -#~ "ALTER TYPE name RENAME TO new_name\n" -#~ "ALTER TYPE name OWNER TO new_owner \n" -#~ "ALTER TYPE name SET SCHEMA new_schema" -#~ msgstr "" -#~ "ALTER TYPE namn RENAME TO nytt_namn\n" -#~ "ALTER TYPE namn OWNER TO ny_ägare \n" -#~ "ALTER TYPE namn SET SCHEMA nytt_schema" - -#~ msgid "ALTER TRIGGER name ON table RENAME TO newname" -#~ msgstr "ALTER TRIGGER namb ON tabell RENAME TO nyttnamn" - -#~ msgid "ALTER TEXT SEARCH TEMPLATE name RENAME TO newname" -#~ msgstr "ALTER TEXT SEARCH TEMPLATE namn RENAME TO nyttnamn" - -#~ msgid "ALTER TEXT SEARCH PARSER name RENAME TO newname" -#~ msgstr "ALTER TEXT SEARCH PARSER namn RENAME TO nyttnamn" - -#~ msgid "" -#~ "ALTER TABLESPACE name RENAME TO newname\n" -#~ "ALTER TABLESPACE name OWNER TO newowner" -#~ msgstr "" -#~ "ALTER TABLESPACE namn RENAME TO nytt_namn\n" -#~ "ALTER TABLESPACE namn OWNER TO ny_ägare" - -#, fuzzy -#~ msgid "" -#~ "ALTER TABLE [ ONLY ] name [ * ]\n" -#~ " action [, ... ]\n" -#~ "ALTER TABLE [ ONLY ] name [ * ]\n" -#~ " RENAME [ COLUMN ] column TO new_column\n" -#~ "ALTER TABLE name\n" -#~ " RENAME TO new_name\n" -#~ "ALTER TABLE name\n" -#~ " SET SCHEMA new_schema\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON LANGUAGE langname [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "where action is one of:\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SCHEMA schemaname [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ " ADD [ COLUMN ] column type [ column_constraint [ ... ] ]\n" -#~ " DROP [ COLUMN ] column [ RESTRICT | CASCADE ]\n" -#~ " ALTER [ COLUMN ] column [ SET DATA ] TYPE type [ USING expression ]\n" -#~ " ALTER [ COLUMN ] column SET DEFAULT expression\n" -#~ " ALTER [ COLUMN ] column DROP DEFAULT\n" -#~ " ALTER [ COLUMN ] column { SET | DROP } NOT NULL\n" -#~ " ALTER [ COLUMN ] column SET STATISTICS integer\n" -#~ " ALTER [ COLUMN ] column SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN }\n" -#~ " ADD table_constraint\n" -#~ " DROP CONSTRAINT constraint_name [ RESTRICT | CASCADE ]\n" -#~ " DISABLE TRIGGER [ trigger_name | ALL | USER ]\n" -#~ " ENABLE TRIGGER [ trigger_name | ALL | USER ]\n" -#~ " ENABLE REPLICA TRIGGER trigger_name\n" -#~ " ENABLE ALWAYS TRIGGER trigger_name\n" -#~ " DISABLE RULE rewrite_rule_name\n" -#~ " ENABLE RULE rewrite_rule_name\n" -#~ " ENABLE REPLICA RULE rewrite_rule_name\n" -#~ " ENABLE ALWAYS RULE rewrite_rule_name\n" -#~ " CLUSTER ON index_name\n" -#~ " SET WITHOUT CLUSTER\n" -#~ " SET WITH OIDS\n" -#~ " SET WITHOUT OIDS\n" -#~ " SET ( storage_parameter = value [, ... ] )\n" -#~ " RESET ( storage_parameter [, ... ] )\n" -#~ " INHERIT parent_table\n" -#~ " NO INHERIT parent_table\n" -#~ " OWNER TO new_owner\n" -#~ " SET TABLESPACE new_tablespace" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { CREATE | ALL [ PRIVILEGES ] }\n" +#~ " ON TABLESPACE tablespacename [, ...]\n" +#~ " FROM { [ GROUP ] rolename | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ ADMIN OPTION FOR ]\n" +#~ " role [, ...] FROM rolename [, ...]\n" +#~ " [ CASCADE | RESTRICT ]" #~ msgstr "" -#~ "ALTER TABLE [ ONLY ] namn [ * ]\n" -#~ " aktion [, ... ]\n" -#~ "ALTER TABLE [ ONLY ] namn [ * ]\n" -#~ " RENAME [ COLUMN ] kolumn TO ny_kolumn\n" -#~ "ALTER TABLE namn\n" -#~ " RENAME TO nytt_namn\n" -#~ "ALTER TABLE namn\n" -#~ " SET SCHEMA nytt_schema\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { SELECT | INSERT | UPDATE | DELETE | REFERENCES | TRIGGER }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON [ TABLE ] tabellnamn [, ...]\n" +#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { USAGE | SELECT | UPDATE }\n" +#~ " [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SEQUENCE sekvensnamn [, ...]\n" +#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { CREATE | TEMPORARY | TEMP } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON DATABASE dbnamn [, ...]\n" +#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { EXECUTE | ALL [ PRIVILEGES ] }\n" +#~ " ON FUNCTION funknamn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] ) [, ...]\n" +#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ "där aktion är en av:\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { USAGE | ALL [ PRIVILEGES ] }\n" +#~ " ON LANGUAGE språknamn [, ...]\n" +#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" #~ "\n" -#~ " ADD [ COLUMN ] kolumn type [ kolumnvillkor [ ... ] ]\n" -#~ " DROP [ COLUMN ] kolumn [ RESTRICT | CASCADE ]\n" -#~ " ALTER [ COLUMN ] kolumn TYPE type [ USING uttryck ]\n" -#~ " ALTER [ COLUMN ] kolumn SET DEFAULT uttryck\n" -#~ " ALTER [ COLUMN ] kolumn DROP DEFAULT\n" -#~ " ALTER [ COLUMN ] kolumn { SET | DROP } NOT NULL\n" -#~ " ALTER [ COLUMN ] kolumn SET STATISTICS heltal\n" -#~ " ALTER [ COLUMN ] kolumn SET STORAGE { PLAIN | EXTERNAL | EXTENDED | MAIN }\n" -#~ " ADD tabellvillkor\n" -#~ " DROP CONSTRAINT villkorsnamn [ RESTRICT | CASCADE ]\n" -#~ " DISABLE TRIGGER [ utlösarnamn | ALL | USER ]\n" -#~ " ENABLE TRIGGER [ utlösarnamn | ALL | USER ]\n" -#~ " CLUSTER ON indexnamn\n" -#~ " SET WITHOUT CLUSTER\n" -#~ " SET WITHOUT OIDS\n" -#~ " SET ( lagringsparameter = värde [, ... ] )\n" -#~ " RESET ( lagringsparameter [, ... ] )\n" -#~ " INHERIT föräldertabell\n" -#~ " NO INHERIT föräldertabell\n" -#~ " OWNER TO ny_ägare\n" -#~ " SET TABLESPACE tabellutrymme" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { { CREATE | USAGE } [,...] | ALL [ PRIVILEGES ] }\n" +#~ " ON SCHEMA schemanamn [, ...]\n" +#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ GRANT OPTION FOR ]\n" +#~ " { CREATE | ALL [ PRIVILEGES ] }\n" +#~ " ON TABLESPACE tabellutrymmesnamn [, ...]\n" +#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]\n" +#~ "\n" +#~ "REVOKE [ ADMIN OPTION FOR ]\n" +#~ " rolk [, ...]\n" +#~ " FROM { användarnamn | GROUP gruppnamn | PUBLIC } [, ...]\n" +#~ " [ CASCADE | RESTRICT ]" -#, fuzzy -#~ msgid "" -#~ "ALTER SEQUENCE name [ INCREMENT [ BY ] increment ]\n" -#~ " [ MINVALUE minvalue | NO MINVALUE ] [ MAXVALUE maxvalue | NO MAXVALUE ]\n" -#~ " [ START [ WITH ] start ]\n" -#~ " [ RESTART [ [ WITH ] restart ] ]\n" -#~ " [ CACHE cache ] [ [ NO ] CYCLE ]\n" -#~ " [ OWNED BY { table.column | NONE } ]\n" -#~ "ALTER SEQUENCE name OWNER TO new_owner\n" -#~ "ALTER SEQUENCE name RENAME TO new_name\n" -#~ "ALTER SEQUENCE name SET SCHEMA new_schema" -#~ msgstr "" -#~ "ALTER SEQUENCE namn [ INCREMENT [ BY ] ökningsvärde ]\n" -#~ " [ MINVALUE minvärde | NO MINVALUE ] [ MAXVALUE maxvärde | NO MAXVALUE ]\n" -#~ " [ RESTART [ WITH ] start ] [ CACHE cache ] [ [ NO ] CYCLE ]\n" -#~ " [ OWNED BY { tabell.kolumn | NONE } ]\n" -#~ "ALTER SEQUENCE namn SET SCHEMA nytt_schema" +#~ msgid "ROLLBACK [ WORK | TRANSACTION ]" +#~ msgstr "ROLLBACK [ WORK | TRANSACTION ]" -#~ msgid "" -#~ "ALTER SCHEMA name RENAME TO newname\n" -#~ "ALTER SCHEMA name OWNER TO newowner" -#~ msgstr "" -#~ "ALTER SCHEMA namn RENAME TO nytt_namn\n" -#~ "ALTER SCHEMA namn OWNER TO ny_ägare" +#~ msgid "ROLLBACK PREPARED transaction_id" +#~ msgstr "ROLLBACK PREPARED transaktions_id" + +#~ msgid "ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] savepoint_name" +#~ msgstr "ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] sparpunktsnamn" #, fuzzy #~ msgid "" -#~ "ALTER ROLE name [ [ WITH ] option [ ... ] ]\n" -#~ "\n" -#~ "where option can be:\n" -#~ " \n" -#~ " SUPERUSER | NOSUPERUSER\n" -#~ " | CREATEDB | NOCREATEDB\n" -#~ " | CREATEROLE | NOCREATEROLE\n" -#~ " | CREATEUSER | NOCREATEUSER\n" -#~ " | INHERIT | NOINHERIT\n" -#~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT connlimit\n" -#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'password'\n" -#~ " | VALID UNTIL 'timestamp' \n" +#~ "[ WITH [ RECURSIVE ] with_query [, ...] ]\n" +#~ "SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]\n" +#~ " * | expression [ [ AS ] output_name ] [, ...]\n" +#~ " [ FROM from_item [, ...] ]\n" +#~ " [ WHERE condition ]\n" +#~ " [ GROUP BY expression [, ...] ]\n" +#~ " [ HAVING condition [, ...] ]\n" +#~ " [ WINDOW window_name AS ( window_definition ) [, ...] ]\n" +#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" +#~ " [ ORDER BY expression [ ASC | DESC | USING operator ] [ NULLS { FIRST | LAST } ] [, ...] ]\n" +#~ " [ LIMIT { count | ALL } ]\n" +#~ " [ OFFSET start [ ROW | ROWS ] ]\n" +#~ " [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]\n" +#~ " [ FOR { UPDATE | SHARE } [ OF table_name [, ...] ] [ NOWAIT ] [...] ]\n" #~ "\n" -#~ "ALTER ROLE name RENAME TO newname\n" +#~ "where from_item can be one of:\n" #~ "\n" -#~ "ALTER ROLE name SET configuration_parameter { TO | = } { value | DEFAULT }\n" -#~ "ALTER ROLE name SET configuration_parameter FROM CURRENT\n" -#~ "ALTER ROLE name RESET configuration_parameter\n" -#~ "ALTER ROLE name RESET ALL" -#~ msgstr "" -#~ "ALTER ROLE namn [ [ WITH ] alternativ [ ... ] ]\n" +#~ " [ ONLY ] table_name [ * ] [ [ AS ] alias [ ( column_alias [, ...] ) ] ]\n" +#~ " ( select ) [ AS ] alias [ ( column_alias [, ...] ) ]\n" +#~ " with_query_name [ [ AS ] alias [ ( column_alias [, ...] ) ] ]\n" +#~ " function_name ( [ argument [, ...] ] ) [ AS ] alias [ ( column_alias [, ...] | column_definition [, ...] ) ]\n" +#~ " function_name ( [ argument [, ...] ] ) AS ( column_definition [, ...] )\n" +#~ " from_item [ NATURAL ] join_type from_item [ ON join_condition | USING ( join_column [, ...] ) ]\n" #~ "\n" -#~ "där alternativ kan vara:\n" -#~ " \n" -#~ " SUPERUSER | NOSUPERUSER\n" -#~ " | CREATEDB | NOCREATEDB\n" -#~ " | CREATEROLE | NOCREATEROLE\n" -#~ " | CREATEUSER | NOCREATEUSER\n" -#~ " | INHERIT | NOINHERIT\n" -#~ " | LOGIN | NOLOGIN\n" -#~ " | CONNECTION LIMIT anslutningstak\n" -#~ " | [ ENCRYPTED | UNENCRYPTED ] PASSWORD 'lösenord'\n" -#~ " | VALID UNTIL 'tidsstämpel' \n" +#~ "and with_query is:\n" #~ "\n" -#~ "ALTER ROLE namn RENAME TO nytt_namn\n" +#~ " with_query_name [ ( column_name [, ...] ) ] AS ( select )\n" #~ "\n" -#~ "ALTER ROLE namn SET konfigurationsparameter { TO | = } { värde | DEFAULT }\n" -#~ "ALTER ROLE namn RESET konfigurationsparameter" - -#~ msgid "" -#~ "ALTER OPERATOR CLASS name USING index_method RENAME TO newname\n" -#~ "ALTER OPERATOR CLASS name USING index_method OWNER TO newowner" +#~ "TABLE { [ ONLY ] table_name [ * ] | with_query_name }" #~ msgstr "" -#~ "ALTER OPERATOR CLASS namn USING indexmetod RENAME TO nytt_namn\n" -#~ "ALTER OPERATOR CLASS namn USING indexmetod OWNER TO ny_ägare" - -#~ msgid "ALTER OPERATOR name ( { lefttype | NONE } , { righttype | NONE } ) OWNER TO newowner" -#~ msgstr "ALTER OPERATOR namn ( { vänster_typ | NONE }, { höger_typ | NONE } ) OWNER TO ny_ägare" +#~ "SELECT [ ALL | DISTINCT [ ON ( uttryck [, ...] ) ] ]\n" +#~ " * | uttryck [ AS utnamn ] [, ...]\n" +#~ " [ FROM frånval [, ...] ]\n" +#~ " [ WHERE villkor ]\n" +#~ " [ GROUP BY uttryck [, ...] ]\n" +#~ " [ HAVING villkor [, ...] ]\n" +#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" +#~ " [ ORDER BY uttryck [ ASC | DESC | USING operator ] [, ...] ]\n" +#~ " [ LIMIT { antal | ALL } ]\n" +#~ " [ OFFSET start ]\n" +#~ " [ FOR { UPDATE | SHARE } [ OF tabellnamn [, ...] ] [ NOWAIT ] [...] ]\n" +#~ "\n" +#~ "där frånval kan vara en av:\n" +#~ "\n" +#~ " [ ONLY ] tabellnamn [ * ] [ [ AS ] alias [ ( kolumnalias [, ...] ) ] ]\n" +#~ " ( select ) [ AS ] alias [ ( kolumnalias [, ...] ) ]\n" +#~ " funktionsnamn ( [ argument [, ...] ] ) [ AS ] alias [ ( kolumnalias [, ...] | kolumndefinition [, ...] ) ]\n" +#~ " funktionsnamn ( [ argument [, ...] ] ) AS ( kolumndefinition [, ...] )\n" +#~ " frånval [ NATURAL ] join-typ frånval [ ON join-villkor | USING ( join-kolumn [, ...] ) ]" #, fuzzy #~ msgid "" -#~ "ALTER [ PROCEDURAL ] LANGUAGE name RENAME TO newname\n" -#~ "ALTER [ PROCEDURAL ] LANGUAGE name OWNER TO new_owner" +#~ "[ WITH [ RECURSIVE ] with_query [, ...] ]\n" +#~ "SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ]\n" +#~ " * | expression [ [ AS ] output_name ] [, ...]\n" +#~ " INTO [ TEMPORARY | TEMP ] [ TABLE ] new_table\n" +#~ " [ FROM from_item [, ...] ]\n" +#~ " [ WHERE condition ]\n" +#~ " [ GROUP BY expression [, ...] ]\n" +#~ " [ HAVING condition [, ...] ]\n" +#~ " [ WINDOW window_name AS ( window_definition ) [, ...] ]\n" +#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" +#~ " [ ORDER BY expression [ ASC | DESC | USING operator ] [ NULLS { FIRST | LAST } ] [, ...] ]\n" +#~ " [ LIMIT { count | ALL } ]\n" +#~ " [ OFFSET start [ ROW | ROWS ] ]\n" +#~ " [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]\n" +#~ " [ FOR { UPDATE | SHARE } [ OF table_name [, ...] ] [ NOWAIT ] [...] ]" #~ msgstr "" -#~ "ALTER SCHEMA namn RENAME TO nytt_namn\n" -#~ "ALTER SCHEMA namn OWNER TO ny_ägare" +#~ "SELECT [ ALL | DISTINCT [ ON ( uttryck [, ...] ) ] ]\n" +#~ " * | uttryck [ AS utnamn ] [, ...]\n" +#~ " INTO [ TEMPORARY | TEMP ] [ TABLE ] ny_tabell\n" +#~ " [ FROM frånval [, ...] ]\n" +#~ " [ WHERE villkor ]\n" +#~ " [ GROUP BY uttryck [, ...] ]\n" +#~ " [ HAVING villkor [, ...] ]\n" +#~ " [ { UNION | INTERSECT | EXCEPT } [ ALL ] select ]\n" +#~ " [ ORDER BY uttryck [ ASC | DESC | USING operator ] [, ...] ]\n" +#~ " [ LIMIT { antal | ALL } ]\n" +#~ " [ OFFSET start ]\n" +#~ " [ FOR { UPDATE | SHARE } [ OF tabellnamn [, ...] ] [ NOWAIT ] [...] ]" #~ msgid "" -#~ "ALTER INDEX name RENAME TO new_name\n" -#~ "ALTER INDEX name SET TABLESPACE tablespace_name\n" -#~ "ALTER INDEX name SET ( storage_parameter = value [, ... ] )\n" -#~ "ALTER INDEX name RESET ( storage_parameter [, ... ] )" +#~ "SET [ SESSION | LOCAL ] configuration_parameter { TO | = } { value | 'value' | DEFAULT }\n" +#~ "SET [ SESSION | LOCAL ] TIME ZONE { timezone | LOCAL | DEFAULT }" #~ msgstr "" -#~ "ALTER INDEX namn RENAME TO nytt_namn\n" -#~ "ALTER INDEX namn SET TABLESPACE tabellutrymmesnamn\n" -#~ "ALTER INDEX namn SET ( lagringsparameter = värde [, ... ] )\n" -#~ "ALTER INDEX namn RESET ( lagringsparameter [, ... ] )" +#~ "SET [ SESSION | LOCAL ] konfigurationsparameter { TO | = } { värde | 'värde' | DEFAULT }\n" +#~ "SET [ SESSION | LOCAL ] TIME ZONE { tidszon | LOCAL | DEFAULT }" -#~ msgid "" -#~ "ALTER GROUP groupname ADD USER username [, ... ]\n" -#~ "ALTER GROUP groupname DROP USER username [, ... ]\n" -#~ "\n" -#~ "ALTER GROUP groupname RENAME TO newname" -#~ msgstr "" -#~ "ALTER GROUP gruppnamn ADD USER användarnamn [, ... ]\n" -#~ "ALTER GROUP gruppnamn DROP USER användarnamn [, ... ]\n" -#~ "\n" -#~ "ALTER GROUP gruppnamn RENAME TO nyttnamn" +#~ msgid "SET CONSTRAINTS { ALL | name [, ...] } { DEFERRED | IMMEDIATE }" +#~ msgstr "SET CONSTRAINTS { ALL | namn [, ...] } { DEFERRED | IMMEDIATE }" -#, fuzzy #~ msgid "" -#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" -#~ " action [ ... ] [ RESTRICT ]\n" -#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" -#~ " RENAME TO new_name\n" -#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" -#~ " OWNER TO new_owner\n" -#~ "ALTER FUNCTION name ( [ [ argmode ] [ argname ] argtype [, ...] ] )\n" -#~ " SET SCHEMA new_schema\n" -#~ "\n" -#~ "where action is one of:\n" -#~ "\n" -#~ " CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" -#~ " IMMUTABLE | STABLE | VOLATILE\n" -#~ " [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER\n" -#~ " COST execution_cost\n" -#~ " ROWS result_rows\n" -#~ " SET configuration_parameter { TO | = } { value | DEFAULT }\n" -#~ " SET configuration_parameter FROM CURRENT\n" -#~ " RESET configuration_parameter\n" -#~ " RESET ALL" +#~ "SET [ SESSION | LOCAL ] ROLE rolename\n" +#~ "SET [ SESSION | LOCAL ] ROLE NONE\n" +#~ "RESET ROLE" #~ msgstr "" -#~ "ALTER FUNCTION namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] )\n" -#~ " aktion [, ... ] [ RESTRICT ]\n" -#~ "ALTER FUNCTION namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] )\n" -#~ " RENAME TO nytt_namn\n" -#~ "ALTER FUNCTION namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] )\n" -#~ " OWNER TO ny_ägare\n" -#~ "ALTER FUNCTION namn ( [ [ arg_läge ] [ arg_namn ] arg_typ [, ...] ] )\n" -#~ " SET SCHEMA nytt_schema\n" -#~ "\n" -#~ "där aktion är en av:\n" -#~ "\n" -#~ " CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT\n" -#~ " IMMUTABLE | STABLE | VOLATILE\n" -#~ " [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER" +#~ "SET [ SESSION | LOCAL ] ROLE rollnamn\n" +#~ "SET [ SESSION | LOCAL ] ROLE NONE\n" +#~ "RESET ROLE" #~ msgid "" -#~ "ALTER DOMAIN name\n" -#~ " { SET DEFAULT expression | DROP DEFAULT }\n" -#~ "ALTER DOMAIN name\n" -#~ " { SET | DROP } NOT NULL\n" -#~ "ALTER DOMAIN name\n" -#~ " ADD domain_constraint\n" -#~ "ALTER DOMAIN name\n" -#~ " DROP CONSTRAINT constraint_name [ RESTRICT | CASCADE ]\n" -#~ "ALTER DOMAIN name\n" -#~ " OWNER TO new_owner \n" -#~ "ALTER DOMAIN name\n" -#~ " SET SCHEMA new_schema" +#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION username\n" +#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION DEFAULT\n" +#~ "RESET SESSION AUTHORIZATION" #~ msgstr "" -#~ "ALTER DOMAIN namn\n" -#~ " { SET DEFAULT uttryck | DROP DEFAULT }\n" -#~ "ALTER DOMAIN namn\n" -#~ " { SET | DROP } NOT NULL\n" -#~ "ALTER DOMAIN namn\n" -#~ " ADD domain_villkor (constraint)\n" -#~ "ALTER DOMAIN namn\n" -#~ " DROP CONSTRAINT villkorsnamn [ RESTRICT | CASCADE ]\n" -#~ "ALTER DOMAIN namn\n" -#~ " OWNER TO ny_ägare\n" -#~ "ALTER DOMAIN namn\n" -#~ " SET SCHEMA nytt_schema" +#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION användarnamn\n" +#~ "SET [ SESSION | LOCAL ] SESSION AUTHORIZATION DEFAULT\n" +#~ "RESET SESSION AUTHORIZATION" -#, fuzzy #~ msgid "" -#~ "ALTER DATABASE name [ [ WITH ] option [ ... ] ]\n" -#~ "\n" -#~ "where option can be:\n" -#~ "\n" -#~ " CONNECTION LIMIT connlimit\n" +#~ "SET TRANSACTION transaction_mode [, ...]\n" +#~ "SET SESSION CHARACTERISTICS AS TRANSACTION transaction_mode [, ...]\n" #~ "\n" -#~ "ALTER DATABASE name RENAME TO newname\n" +#~ "where transaction_mode is one of:\n" #~ "\n" -#~ "ALTER DATABASE name OWNER TO new_owner\n" +#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" +#~ " READ WRITE | READ ONLY" +#~ msgstr "" +#~ "SET TRANSACTION transaktionsläge [, ...]\n" +#~ "SET SESSION CHARACTERISTICS AS TRANSACTION transaktionsläge [, ...]\n" #~ "\n" -#~ "ALTER DATABASE name SET TABLESPACE new_tablespace\n" +#~ "där transaktionsläge är en av:\n" #~ "\n" -#~ "ALTER DATABASE name SET configuration_parameter { TO | = } { value | DEFAULT }\n" -#~ "ALTER DATABASE name SET configuration_parameter FROM CURRENT\n" -#~ "ALTER DATABASE name RESET configuration_parameter\n" -#~ "ALTER DATABASE name RESET ALL" +#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" +#~ " READ WRITE | READ ONLY" + +#~ msgid "" +#~ "SHOW name\n" +#~ "SHOW ALL" #~ msgstr "" -#~ "ALTER DATABASE namn [ [ WITH ] alternativ [ ... ] ]\n" -#~ "\n" -#~ "där alternativ kan vara:\n" +#~ "SHOW namn\n" +#~ "SHOW ALL" + +#~ msgid "" +#~ "START TRANSACTION [ transaction_mode [, ...] ]\n" #~ "\n" -#~ " CONNECTION LIMIT anslutningstak\n" +#~ "where transaction_mode is one of:\n" #~ "\n" -#~ "ALTER DATABASE namn SET parameter { TO | = } { värde | DEFAULT }\n" -#~ "ALTER DATABASE namn RESET parameter\n" +#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" +#~ " READ WRITE | READ ONLY" +#~ msgstr "" +#~ "START TRANSACTION [ transaktionsläge [, ...] ]\n" #~ "\n" -#~ "ALTER DATABASE namn RENAME TO nyttnamn\n" +#~ "där transaktionsläge är en av:\n" #~ "\n" -#~ "ALTER DATABASE namn OWNER TO ny_ägare" +#~ " ISOLATION LEVEL { SERIALIZABLE | REPEATABLE READ | READ COMMITTED | READ UNCOMMITTED }\n" +#~ " READ WRITE | READ ONLY" #~ msgid "" -#~ "ALTER CONVERSION name RENAME TO newname\n" -#~ "ALTER CONVERSION name OWNER TO newowner" +#~ "TRUNCATE [ TABLE ] [ ONLY ] name [, ... ]\n" +#~ " [ RESTART IDENTITY | CONTINUE IDENTITY ] [ CASCADE | RESTRICT ]" #~ msgstr "" -#~ "ALTER CONVERSION namn RENAME TO nytt_namn\n" -#~ "ALTER CONVERSION namn OWNER TO ny_ägare" +#~ "TRUNCATE [ TABLE ] [ ONLY ] namn [, ... ]\n" +#~ " [ RESTART IDENTITY | CONTINUE IDENTITY ] [ CASCADE | RESTRICT ]" + +#~ msgid "UNLISTEN { name | * }" +#~ msgstr "UNLISTEN { namn | * }" #~ msgid "" -#~ "ALTER AGGREGATE name ( type [ , ... ] ) RENAME TO new_name\n" -#~ "ALTER AGGREGATE name ( type [ , ... ] ) OWNER TO new_owner\n" -#~ "ALTER AGGREGATE name ( type [ , ... ] ) SET SCHEMA new_schema" +#~ "UPDATE [ ONLY ] table [ [ AS ] alias ]\n" +#~ " SET { column = { expression | DEFAULT } |\n" +#~ " ( column [, ...] ) = ( { expression | DEFAULT } [, ...] ) } [, ...]\n" +#~ " [ FROM fromlist ]\n" +#~ " [ WHERE condition | WHERE CURRENT OF cursor_name ]\n" +#~ " [ RETURNING * | output_expression [ [ AS ] output_name ] [, ...] ]" #~ msgstr "" -#~ "ALTER AGGREGATE namn ( typ [ , ... ] ) RENAME TO nytt_namn\n" -#~ "ALTER AGGREGATE name ( typ [ , ... ] ) OWNER TO ny_ägare\n" -#~ "ALTER AGGREGATE namn ( typ [ , ... ] ) SET SCHEMA nytt_schema" - -#~ msgid "ABORT [ WORK | TRANSACTION ]" -#~ msgstr "ABORT [ WORK | TRANSACTION ]" - -#~ msgid "could not change directory to \"%s\"" -#~ msgstr "kunde inte byta katalog till \"%s\"" - -#~ msgid "tablespace" -#~ msgstr "tabellutrymme" - -#~ msgid "input_data_type" -#~ msgstr "indatatyp" - -#~ msgid "agg_type" -#~ msgstr "agg_typ" - -#~ msgid "agg_name" -#~ msgstr "agg_namn" - -#~ msgid "new_column" -#~ msgstr "ny_kolumn" - -#~ msgid "column" -#~ msgstr "kolumn" - -#~ msgid "define a new constraint trigger" -#~ msgstr "definiera en ny villkorsutlösare" - -#~ msgid "Modifier" -#~ msgstr "Modifierare" - -#~ msgid "default %s" -#~ msgstr "default %s" - -#~ msgid "not null" -#~ msgstr "inte null" - -#~ msgid "Modifiers" -#~ msgstr "Modifierare" - -#~ msgid "data type" -#~ msgstr "datatyp" - -#~ msgid "contains support for command-line editing" -#~ msgstr "innehåller stöd för kommandoradsredigering" - -#~ msgid "%s: could not set variable \"%s\"\n" -#~ msgstr "%s: kunde inte sätta variabeln \"%s\"\n" - -#~ msgid "(No rows)\n" -#~ msgstr "(Inga rader)\n" +#~ "UPDATE [ ONLY ] tabell [ [ AS ] alias ]\n" +#~ " SET { kolumn = { uttryck | DEFAULT } |\n" +#~ " ( kolumn [, ...] ) = ( { uttryck | DEFAULT } [, ...] ) } [, ...]\n" +#~ " [ FROM frånlista ]\n" +#~ " [ WHERE villkor | WHERE CURRENT OF markörnamn ]\n" +#~ " [ RETURNING * | utdatauttryck [ [ AS ] utdatanamn ] [, ...] ]" #~ msgid "" -#~ " \\pset NAME [VALUE] set table output option\n" -#~ " (NAME := {format|border|expanded|fieldsep|footer|null|\n" -#~ " numericlocale|recordsep|tuples_only|title|tableattr|pager})\n" +#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ table ]\n" +#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ table [ (column [, ...] ) ] ]" #~ msgstr "" -#~ " \\pset NAMN [VÄRDE] sätt tabellutskriftsval\n" -#~ " (NAMN := {format|border|expanded|fieldsep|footer|null|\n" -#~ " numericlocale|recordsep|tuples_only|title|tableattr|pager})\n" - -#~ msgid " \\l[+] list all databases\n" -#~ msgstr " \\l[+] lista alla databaser\n" - -#~ msgid " \\du[+] [PATTERN] list roles (users)\n" -#~ msgstr " \\du[+] [MALL] lista roller (användare)\n" - -#~ msgid " \\dg[+] [PATTERN] list roles (groups)\n" -#~ msgstr " \\dg[+] [MALL] lista roller (grupper)\n" - -#~ msgid " --version output version information, then exit\n" -#~ msgstr " --version visa versionsinformation och avsluta sedan\n" - -#~ msgid " --help show this help, then exit\n" -#~ msgstr " --help visa denna hjälp och avsluta sedan\n" - -#~ msgid "could not get current user name: %s\n" -#~ msgstr "kunde inte hämta det aktuella användarnamnet: %s\n" - -#~ msgid "\\copy: unexpected response (%d)\n" -#~ msgstr "\\copy: oväntat svar (%d)\n" - -#~ msgid "\\copy: %s" -#~ msgstr "\\copy: %s" - -#~ msgid "%s: pg_strdup: cannot duplicate null pointer (internal error)\n" -#~ msgstr "%s: pg_strdup: kan inte duplicera null-pekare (internt fel)\n" - -#~ msgid "Showing only tuples." -#~ msgstr "Visar bara tupler." - -#~ msgid "Showing locale-adjusted numeric output." -#~ msgstr "Visar lokal-anpassad numerisk utdata." - -#~ msgid "SSL connection (unknown cipher)\n" -#~ msgstr "SSL-förbindelse (okänt krypto)\n" - -#~ msgid " as user \"%s\"" -#~ msgstr " som användare \"%s\"" - -#~ msgid " at port \"%s\"" -#~ msgstr " port \"%s\"" - -#~ msgid " on host \"%s\"" -#~ msgstr " på värd \"%s\"" - -#~ msgid "\\%s: error\n" -#~ msgstr "\\%s: fel\n" - -#~ msgid "Password encryption failed.\n" -#~ msgstr "Lösenordskryptering misslyckades.\n" - -#~ msgid "No relations found.\n" -#~ msgstr "Inga relationer funna.\n" - -#~ msgid "No matching relations found.\n" -#~ msgstr "Inga matchande relationer funna.\n" +#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ tabell ]\n" +#~ "VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] ANALYZE [ tabell [ (kolumn [, ...] ) ] ]" -#~ msgid "No settings found.\n" -#~ msgstr "Inga inställningar funna.\n" +#~ msgid "" +#~ "VALUES ( expression [, ...] ) [, ...]\n" +#~ " [ ORDER BY sort_expression [ ASC | DESC | USING operator ] [, ...] ]\n" +#~ " [ LIMIT { count | ALL } ]\n" +#~ " [ OFFSET start [ ROW | ROWS ] ]\n" +#~ " [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]" +#~ msgstr "" +#~ "VALUES ( uttryck [, ...] ) [, ...]\n" +#~ " [ ORDER BY sorteringsuttryck [ ASC | DESC | USING operator ] [, ...] ]\n" +#~ " [ LIMIT { antal | ALL } ]\n" +#~ " [ OFFSET start [ ROW | ROWS ] ]\n" +#~ " [ FETCH { FIRST | NEXT } [ antal ] { ROW | ROWS } ONLY ]" -#~ msgid "No matching settings found.\n" -#~ msgstr "Inga matchande inställningar funna.\n" +#~ msgid "" +#~ "WARNING: You are connected to a server with major version %d.%d,\n" +#~ "but your %s client is major version %d.%d. Some backslash commands,\n" +#~ "such as \\d, might not work properly.\n" +#~ "\n" +#~ msgstr "" +#~ "VARNING: Du är uppkopplad mot en server med version %d.%d,\n" +#~ "men din klient %s är version %d.%d. En del snedstreckkommandon\n" +#~ "så som \\d kommer eventuellt inte att fungera som de skall.\n" +#~ "\n" -#~ msgid "No per-database role settings support in this server version.\n" -#~ msgstr "Inga rollinställningar per databas stöds i denna serverversion.\n" +#~ msgid "Value" +#~ msgstr "Värde" diff --git a/src/bin/psql/prompt.c b/src/bin/psql/prompt.c index 913b23e4cd..b176972884 100644 --- a/src/bin/psql/prompt.c +++ b/src/bin/psql/prompt.c @@ -1,7 +1,7 @@ /* * psql - the PostgreSQL interactive terminal * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * src/bin/psql/prompt.c */ diff --git a/src/bin/psql/prompt.h b/src/bin/psql/prompt.h index a7a95effb4..3a84565e4b 100644 --- a/src/bin/psql/prompt.h +++ b/src/bin/psql/prompt.h @@ -1,7 +1,7 @@ /* * psql - the PostgreSQL interactive terminal * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * src/bin/psql/prompt.h */ @@ -10,7 +10,7 @@ /* enum promptStatus_t is now defined by psqlscan.h */ #include "fe_utils/psqlscan.h" -#include "conditional.h" +#include "fe_utils/conditional.h" char *get_prompt(promptStatus_t status, ConditionalStack cstack); diff --git a/src/bin/psql/psqlscanslash.h b/src/bin/psql/psqlscanslash.h index db76061332..8e8efb2f0b 100644 --- a/src/bin/psql/psqlscanslash.h +++ b/src/bin/psql/psqlscanslash.h @@ -1,7 +1,7 @@ /* * psql - the PostgreSQL interactive terminal * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * src/bin/psql/psqlscanslash.h */ diff --git a/src/bin/psql/psqlscanslash.l b/src/bin/psql/psqlscanslash.l index db7a1b9eea..34df35e5f4 100644 --- a/src/bin/psql/psqlscanslash.l +++ b/src/bin/psql/psqlscanslash.l @@ -8,7 +8,7 @@ * * See fe_utils/psqlscan_int.h for additional commentary. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -19,7 +19,7 @@ #include "postgres_fe.h" #include "psqlscanslash.h" -#include "conditional.h" +#include "fe_utils/conditional.h" #include "libpq-fe.h" } @@ -67,6 +67,8 @@ static void evaluate_backtick(PsqlScanState state); extern int slash_yyget_column(yyscan_t yyscanner); extern void slash_yyset_column(int column_no, yyscan_t yyscanner); +/* LCOV_EXCL_START */ + %} /* Except for the prefix, these options should match psqlscan.l */ @@ -281,6 +283,10 @@ other . unquoted_option_chars = 0; } +:\{\?{variable_char}+\} { + psqlscan_test_variable(cur_state, yytext, yyleng); + } + :'{variable_char}* { /* Throw back everything but the colon */ yyless(1); @@ -295,6 +301,20 @@ other . ECHO; } +:\{\?{variable_char}* { + /* Throw back everything but the colon */ + yyless(1); + unquoted_option_chars++; + ECHO; + } + +:\{ { + /* Throw back everything but the colon */ + yyless(1); + unquoted_option_chars++; + ECHO; + } + {other} { unquoted_option_chars++; ECHO; @@ -450,6 +470,8 @@ other . %% +/* LCOV_EXCL_STOP */ + /* * Scan the command name of a psql backslash command. This should be called * after psql_scan() returns PSCAN_BACKSLASH. It is assumed that the input diff --git a/src/bin/psql/settings.h b/src/bin/psql/settings.h index b78f151acd..69e617e6b5 100644 --- a/src/bin/psql/settings.h +++ b/src/bin/psql/settings.h @@ -1,7 +1,7 @@ /* * psql - the PostgreSQL interactive terminal * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * src/bin/psql/settings.h */ @@ -93,7 +93,8 @@ typedef struct _psqlSettings char *gfname; /* one-shot file output argument for \g */ bool g_expanded; /* one-shot expanded output requested via \gx */ char *gset_prefix; /* one-shot prefix argument for \gset */ - bool gexec_flag; /* one-shot flag to execute query's results */ + bool gdesc_flag; /* one-shot request to describe query results */ + bool gexec_flag; /* one-shot request to execute query results */ bool crosstab_flag; /* one-shot request to crosstab results */ char *ctv_args[4]; /* \crosstabview arguments */ diff --git a/src/bin/psql/startup.c b/src/bin/psql/startup.c index 7f767976a5..be57574cd3 100644 --- a/src/bin/psql/startup.c +++ b/src/bin/psql/startup.c @@ -1,7 +1,7 @@ /* * psql - the PostgreSQL interactive terminal * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * src/bin/psql/startup.c */ @@ -101,7 +101,6 @@ main(int argc, char *argv[]) int successResult; bool have_password = false; char password[100]; - char *password_prompt = NULL; bool new_pass; set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("psql")); @@ -160,7 +159,14 @@ main(int argc, char *argv[]) EstablishVariableSpace(); + /* Create variables showing psql version number */ SetVariable(pset.vars, "VERSION", PG_VERSION_STR); + SetVariable(pset.vars, "VERSION_NAME", PG_VERSION); + SetVariable(pset.vars, "VERSION_NUM", CppAsString2(PG_VERSION_NUM)); + + /* Initialize variables for last error */ + SetVariable(pset.vars, "LAST_ERROR_MESSAGE", ""); + SetVariable(pset.vars, "LAST_ERROR_SQLSTATE", "00000"); /* Default values for variables (that don't match the result of \unset) */ SetVariableBool(pset.vars, "AUTOCOMMIT"); @@ -198,15 +204,14 @@ main(int argc, char *argv[]) pset.popt.topt.recordSep.separator_zero = false; } - if (options.username == NULL) - password_prompt = pg_strdup(_("Password: ")); - else - password_prompt = psprintf(_("Password for user %s: "), - options.username); - if (pset.getPassword == TRI_YES) { - simple_prompt(password_prompt, password, sizeof(password), false); + /* + * We can't be sure yet of the username that will be used, so don't + * offer a potentially wrong one. Typical uses of this option are + * noninteractive anyway. + */ + simple_prompt("Password: ", password, sizeof(password), false); have_password = true; } @@ -245,15 +250,28 @@ main(int argc, char *argv[]) !have_password && pset.getPassword != TRI_NO) { + /* + * Before closing the old PGconn, extract the user name that was + * actually connected with --- it might've come out of a URI or + * connstring "database name" rather than options.username. + */ + const char *realusername = PQuser(pset.db); + char *password_prompt; + + if (realusername && realusername[0]) + password_prompt = psprintf(_("Password for user %s: "), + realusername); + else + password_prompt = pg_strdup(_("Password: ")); PQfinish(pset.db); + simple_prompt(password_prompt, password, sizeof(password), false); + free(password_prompt); have_password = true; new_pass = true; } } while (new_pass); - free(password_prompt); - if (PQstatus(pset.db) == CONNECTION_BAD) { fprintf(stderr, "%s: %s", pset.progname, PQerrorMessage(pset.db)); diff --git a/src/bin/psql/stringutils.c b/src/bin/psql/stringutils.c index 959381d085..29b9c9c7f0 100644 --- a/src/bin/psql/stringutils.c +++ b/src/bin/psql/stringutils.c @@ -1,7 +1,7 @@ /* * psql - the PostgreSQL interactive terminal * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * src/bin/psql/stringutils.c */ @@ -27,8 +27,8 @@ * delim - set of non-whitespace separator characters (or NULL) * quote - set of characters that can quote a token (NULL if none) * escape - character that can quote quotes (0 if none) - * e_strings - if TRUE, treat E'...' syntax as a valid token - * del_quotes - if TRUE, strip quotes from the returned token, else return + * e_strings - if true, treat E'...' syntax as a valid token + * del_quotes - if true, strip quotes from the returned token, else return * it exactly as found in the string * encoding - the active character-set encoding * @@ -39,7 +39,7 @@ * a single quote character in the data. If escape isn't 0, then escape * followed by anything (except \0) is a data character too. * - * The combination of e_strings and del_quotes both TRUE is not currently + * The combination of e_strings and del_quotes both true is not currently * handled. This could be fixed but it's not needed anywhere at the moment. * * Note that the string s is _not_ overwritten in this implementation. diff --git a/src/bin/psql/stringutils.h b/src/bin/psql/stringutils.h index 213473f919..d843d7119b 100644 --- a/src/bin/psql/stringutils.h +++ b/src/bin/psql/stringutils.h @@ -1,7 +1,7 @@ /* * psql - the PostgreSQL interactive terminal * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * src/bin/psql/stringutils.h */ diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index 1583cfa998..7294824948 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -1,7 +1,7 @@ /* * psql - the PostgreSQL interactive terminal * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * src/bin/psql/tab-complete.c */ @@ -41,7 +41,7 @@ #include -#include "catalog/pg_class.h" +#include "catalog/pg_class_d.h" #include "libpq-fe.h" #include "pqexpbuffer.h" @@ -70,15 +70,43 @@ extern char *filename_completion_function(); */ PQExpBuffer tab_completion_query_buf = NULL; +/* + * In some situations, the query to find out what names are available to + * complete with must vary depending on server version. We handle this by + * storing a list of queries, each tagged with the minimum server version + * it will work for. Each list must be stored in descending server version + * order, so that the first satisfactory query is the one to use. + * + * When the query string is otherwise constant, an array of VersionedQuery + * suffices. Terminate the array with an entry having min_server_version = 0. + * That entry's query string can be a query that works in all supported older + * server versions, or NULL to give up and do no completion. + */ +typedef struct VersionedQuery +{ + int min_server_version; + const char *query; +} VersionedQuery; + /* * This struct is used to define "schema queries", which are custom-built * to obtain possibly-schema-qualified names of database objects. There is * enough similarity in the structure that we don't want to repeat it each * time. So we put the components of each query into this struct and * assemble them with the common boilerplate in _complete_from_query(). + * + * As with VersionedQuery, we can use an array of these if the query details + * must vary across versions. */ typedef struct SchemaQuery { + /* + * If not zero, minimum server version this struct applies to. If not + * zero, there should be a following struct with a smaller minimum server + * version; use catname == NULL in the last entry if we should do nothing. + */ + int min_server_version; + /* * Name of catalog or catalogs to be queried, with alias, eg. * "pg_catalog.pg_class c". Note that "pg_namespace n" will be added. @@ -133,6 +161,7 @@ static const char *completion_charp; /* to pass a string */ static const char *const *completion_charpp; /* to pass a list of strings */ static const char *completion_info_charp; /* to pass a second string */ static const char *completion_info_charp2; /* to pass a third string */ +static const VersionedQuery *completion_vquery; /* to pass a VersionedQuery */ static const SchemaQuery *completion_squery; /* to pass a SchemaQuery */ static bool completion_case_sensitive; /* completion is case sensitive */ @@ -140,12 +169,14 @@ static bool completion_case_sensitive; /* completion is case sensitive */ * A few macros to ease typing. You can use these to complete the given * string with * 1) The results from a query you pass it. (Perhaps one of those below?) + * We support both simple and versioned queries. * 2) The results from a schema query you pass it. + * We support both simple and versioned schema queries. * 3) The items from a null-pointer-terminated list (with or without - * case-sensitive comparison; see also COMPLETE_WITH_LISTn, below). - * 4) A string constant. - * 5) The list of attributes of the given table (possibly schema-qualified). - * 6/ The list of arguments to the given function (possibly schema-qualified). + * case-sensitive comparison); if the list is constant you can build it + * with COMPLETE_WITH() or COMPLETE_WITH_CS(). + * 4) The list of attributes of the given table (possibly schema-qualified). + * 5) The list of arguments to the given function (possibly schema-qualified). */ #define COMPLETE_WITH_QUERY(query) \ do { \ @@ -153,6 +184,12 @@ do { \ matches = completion_matches(text, complete_from_query); \ } while (0) +#define COMPLETE_WITH_VERSIONED_QUERY(query) \ +do { \ + completion_vquery = query; \ + matches = completion_matches(text, complete_from_versioned_query); \ +} while (0) + #define COMPLETE_WITH_SCHEMA_QUERY(query, addon) \ do { \ completion_squery = &(query); \ @@ -160,25 +197,41 @@ do { \ matches = completion_matches(text, complete_from_schema_query); \ } while (0) -#define COMPLETE_WITH_LIST_CS(list) \ +#define COMPLETE_WITH_VERSIONED_SCHEMA_QUERY(query, addon) \ do { \ - completion_charpp = list; \ - completion_case_sensitive = true; \ - matches = completion_matches(text, complete_from_list); \ + completion_squery = query; \ + completion_vquery = addon; \ + matches = completion_matches(text, complete_from_versioned_schema_query); \ +} while (0) + +#define COMPLETE_WITH_LIST_INT(cs, list) \ +do { \ + completion_case_sensitive = (cs); \ + if (!(list)[1]) \ + { \ + completion_charp = (list)[0]; \ + matches = completion_matches(text, complete_from_const); \ + } \ + else \ + { \ + completion_charpp = (list); \ + matches = completion_matches(text, complete_from_list); \ + } \ } while (0) -#define COMPLETE_WITH_LIST(list) \ +#define COMPLETE_WITH_LIST(list) COMPLETE_WITH_LIST_INT(false, list) +#define COMPLETE_WITH_LIST_CS(list) COMPLETE_WITH_LIST_INT(true, list) + +#define COMPLETE_WITH(...) \ do { \ - completion_charpp = list; \ - completion_case_sensitive = false; \ - matches = completion_matches(text, complete_from_list); \ + static const char *const list[] = { __VA_ARGS__, NULL }; \ + COMPLETE_WITH_LIST(list); \ } while (0) -#define COMPLETE_WITH_CONST(string) \ +#define COMPLETE_WITH_CS(...) \ do { \ - completion_charp = string; \ - completion_case_sensitive = false; \ - matches = completion_matches(text, complete_from_const); \ + static const char *const list[] = { __VA_ARGS__, NULL }; \ + COMPLETE_WITH_LIST_CS(list); \ } while (0) #define COMPLETE_WITH_ATTR(relation, addon) \ @@ -256,379 +309,240 @@ do { \ matches = completion_matches(text, complete_from_query); \ } while (0) -/* - * These macros simplify use of COMPLETE_WITH_LIST for short, fixed lists. - * There is no COMPLETE_WITH_LIST1; use COMPLETE_WITH_CONST for that case. - */ -#define COMPLETE_WITH_LIST2(s1, s2) \ -do { \ - static const char *const list[] = { s1, s2, NULL }; \ - COMPLETE_WITH_LIST(list); \ -} while (0) - -#define COMPLETE_WITH_LIST3(s1, s2, s3) \ -do { \ - static const char *const list[] = { s1, s2, s3, NULL }; \ - COMPLETE_WITH_LIST(list); \ -} while (0) - -#define COMPLETE_WITH_LIST4(s1, s2, s3, s4) \ -do { \ - static const char *const list[] = { s1, s2, s3, s4, NULL }; \ - COMPLETE_WITH_LIST(list); \ -} while (0) - -#define COMPLETE_WITH_LIST5(s1, s2, s3, s4, s5) \ -do { \ - static const char *const list[] = { s1, s2, s3, s4, s5, NULL }; \ - COMPLETE_WITH_LIST(list); \ -} while (0) - -#define COMPLETE_WITH_LIST6(s1, s2, s3, s4, s5, s6) \ -do { \ - static const char *const list[] = { s1, s2, s3, s4, s5, s6, NULL }; \ - COMPLETE_WITH_LIST(list); \ -} while (0) - -#define COMPLETE_WITH_LIST7(s1, s2, s3, s4, s5, s6, s7) \ -do { \ - static const char *const list[] = { s1, s2, s3, s4, s5, s6, s7, NULL }; \ - COMPLETE_WITH_LIST(list); \ -} while (0) - -#define COMPLETE_WITH_LIST8(s1, s2, s3, s4, s5, s6, s7, s8) \ -do { \ - static const char *const list[] = { s1, s2, s3, s4, s5, s6, s7, s8, NULL }; \ - COMPLETE_WITH_LIST(list); \ -} while (0) - -#define COMPLETE_WITH_LIST9(s1, s2, s3, s4, s5, s6, s7, s8, s9) \ -do { \ - static const char *const list[] = { s1, s2, s3, s4, s5, s6, s7, s8, s9, NULL }; \ - COMPLETE_WITH_LIST(list); \ -} while (0) - -#define COMPLETE_WITH_LIST10(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10) \ -do { \ - static const char *const list[] = { s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, NULL }; \ - COMPLETE_WITH_LIST(list); \ -} while (0) - -/* - * Likewise for COMPLETE_WITH_LIST_CS. - */ -#define COMPLETE_WITH_LIST_CS2(s1, s2) \ -do { \ - static const char *const list[] = { s1, s2, NULL }; \ - COMPLETE_WITH_LIST_CS(list); \ -} while (0) - -#define COMPLETE_WITH_LIST_CS3(s1, s2, s3) \ -do { \ - static const char *const list[] = { s1, s2, s3, NULL }; \ - COMPLETE_WITH_LIST_CS(list); \ -} while (0) - -#define COMPLETE_WITH_LIST_CS4(s1, s2, s3, s4) \ -do { \ - static const char *const list[] = { s1, s2, s3, s4, NULL }; \ - COMPLETE_WITH_LIST_CS(list); \ -} while (0) - -#define COMPLETE_WITH_LIST_CS5(s1, s2, s3, s4, s5) \ -do { \ - static const char *const list[] = { s1, s2, s3, s4, s5, NULL }; \ - COMPLETE_WITH_LIST_CS(list); \ -} while (0) - /* * Assembly instructions for schema queries */ -static const SchemaQuery Query_for_list_of_aggregates = { - /* catname */ - "pg_catalog.pg_proc p", - /* selcondition */ - "p.proisagg", - /* viscondition */ - "pg_catalog.pg_function_is_visible(p.oid)", - /* namespace */ - "p.pronamespace", - /* result */ - "pg_catalog.quote_ident(p.proname)", - /* qualresult */ - NULL +static const SchemaQuery Query_for_list_of_aggregates[] = { + { + .min_server_version = 110000, + .catname = "pg_catalog.pg_proc p", + .selcondition = "p.prokind = 'a'", + .viscondition = "pg_catalog.pg_function_is_visible(p.oid)", + .namespace = "p.pronamespace", + .result = "pg_catalog.quote_ident(p.proname)", + }, + { + .catname = "pg_catalog.pg_proc p", + .selcondition = "p.proisagg", + .viscondition = "pg_catalog.pg_function_is_visible(p.oid)", + .namespace = "p.pronamespace", + .result = "pg_catalog.quote_ident(p.proname)", + } }; static const SchemaQuery Query_for_list_of_datatypes = { - /* catname */ - "pg_catalog.pg_type t", + .catname = "pg_catalog.pg_type t", /* selcondition --- ignore table rowtypes and array types */ - "(t.typrelid = 0 " + .selcondition = "(t.typrelid = 0 " " OR (SELECT c.relkind = " CppAsString2(RELKIND_COMPOSITE_TYPE) " FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid)) " "AND t.typname !~ '^_'", - /* viscondition */ - "pg_catalog.pg_type_is_visible(t.oid)", - /* namespace */ - "t.typnamespace", - /* result */ - "pg_catalog.format_type(t.oid, NULL)", - /* qualresult */ - "pg_catalog.quote_ident(t.typname)" + .viscondition = "pg_catalog.pg_type_is_visible(t.oid)", + .namespace = "t.typnamespace", + .result = "pg_catalog.format_type(t.oid, NULL)", + .qualresult = "pg_catalog.quote_ident(t.typname)", }; static const SchemaQuery Query_for_list_of_domains = { - /* catname */ - "pg_catalog.pg_type t", - /* selcondition */ - "t.typtype = 'd'", - /* viscondition */ - "pg_catalog.pg_type_is_visible(t.oid)", - /* namespace */ - "t.typnamespace", - /* result */ - "pg_catalog.quote_ident(t.typname)", - /* qualresult */ - NULL + .catname = "pg_catalog.pg_type t", + .selcondition = "t.typtype = 'd'", + .viscondition = "pg_catalog.pg_type_is_visible(t.oid)", + .namespace = "t.typnamespace", + .result = "pg_catalog.quote_ident(t.typname)", }; -static const SchemaQuery Query_for_list_of_functions = { - /* catname */ - "pg_catalog.pg_proc p", - /* selcondition */ - NULL, - /* viscondition */ - "pg_catalog.pg_function_is_visible(p.oid)", - /* namespace */ - "p.pronamespace", - /* result */ - "pg_catalog.quote_ident(p.proname)", - /* qualresult */ - NULL +/* Note: this intentionally accepts aggregates as well as plain functions */ +static const SchemaQuery Query_for_list_of_functions[] = { + { + .min_server_version = 110000, + .catname = "pg_catalog.pg_proc p", + .selcondition = "p.prokind != 'p'", + .viscondition = "pg_catalog.pg_function_is_visible(p.oid)", + .namespace = "p.pronamespace", + .result = "pg_catalog.quote_ident(p.proname)", + }, + { + .catname = "pg_catalog.pg_proc p", + .viscondition = "pg_catalog.pg_function_is_visible(p.oid)", + .namespace = "p.pronamespace", + .result = "pg_catalog.quote_ident(p.proname)", + } }; -static const SchemaQuery Query_for_list_of_indexes = { - /* catname */ - "pg_catalog.pg_class c", - /* selcondition */ - "c.relkind IN (" CppAsString2(RELKIND_INDEX) ")", - /* viscondition */ - "pg_catalog.pg_table_is_visible(c.oid)", - /* namespace */ - "c.relnamespace", - /* result */ - "pg_catalog.quote_ident(c.relname)", - /* qualresult */ - NULL +static const SchemaQuery Query_for_list_of_procedures[] = { + { + .min_server_version = 110000, + .catname = "pg_catalog.pg_proc p", + .selcondition = "p.prokind = 'p'", + .viscondition = "pg_catalog.pg_function_is_visible(p.oid)", + .namespace = "p.pronamespace", + .result = "pg_catalog.quote_ident(p.proname)", + }, + { + /* not supported in older versions */ + .catname = NULL, + } +}; + +static const SchemaQuery Query_for_list_of_routines = { + .catname = "pg_catalog.pg_proc p", + .viscondition = "pg_catalog.pg_function_is_visible(p.oid)", + .namespace = "p.pronamespace", + .result = "pg_catalog.quote_ident(p.proname)", }; static const SchemaQuery Query_for_list_of_sequences = { - /* catname */ - "pg_catalog.pg_class c", - /* selcondition */ - "c.relkind IN (" CppAsString2(RELKIND_SEQUENCE) ")", - /* viscondition */ - "pg_catalog.pg_table_is_visible(c.oid)", - /* namespace */ - "c.relnamespace", - /* result */ - "pg_catalog.quote_ident(c.relname)", - /* qualresult */ - NULL + .catname = "pg_catalog.pg_class c", + .selcondition = "c.relkind IN (" CppAsString2(RELKIND_SEQUENCE) ")", + .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", + .namespace = "c.relnamespace", + .result = "pg_catalog.quote_ident(c.relname)", }; static const SchemaQuery Query_for_list_of_foreign_tables = { - /* catname */ - "pg_catalog.pg_class c", - /* selcondition */ - "c.relkind IN (" CppAsString2(RELKIND_FOREIGN_TABLE) ")", - /* viscondition */ - "pg_catalog.pg_table_is_visible(c.oid)", - /* namespace */ - "c.relnamespace", - /* result */ - "pg_catalog.quote_ident(c.relname)", - /* qualresult */ - NULL + .catname = "pg_catalog.pg_class c", + .selcondition = "c.relkind IN (" CppAsString2(RELKIND_FOREIGN_TABLE) ")", + .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", + .namespace = "c.relnamespace", + .result = "pg_catalog.quote_ident(c.relname)", }; static const SchemaQuery Query_for_list_of_tables = { - /* catname */ - "pg_catalog.pg_class c", - /* selcondition */ + .catname = "pg_catalog.pg_class c", + .selcondition = "c.relkind IN (" CppAsString2(RELKIND_RELATION) ", " CppAsString2(RELKIND_PARTITIONED_TABLE) ")", - /* viscondition */ - "pg_catalog.pg_table_is_visible(c.oid)", - /* namespace */ - "c.relnamespace", - /* result */ - "pg_catalog.quote_ident(c.relname)", - /* qualresult */ - NULL + .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", + .namespace = "c.relnamespace", + .result = "pg_catalog.quote_ident(c.relname)", }; static const SchemaQuery Query_for_list_of_partitioned_tables = { - /* catname */ - "pg_catalog.pg_class c", - /* selcondition */ - "c.relkind IN (" CppAsString2(RELKIND_PARTITIONED_TABLE) ")", - /* viscondition */ - "pg_catalog.pg_table_is_visible(c.oid)", - /* namespace */ - "c.relnamespace", - /* result */ - "pg_catalog.quote_ident(c.relname)", - /* qualresult */ - NULL + .catname = "pg_catalog.pg_class c", + .selcondition = "c.relkind IN (" CppAsString2(RELKIND_PARTITIONED_TABLE) ")", + .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", + .namespace = "c.relnamespace", + .result = "pg_catalog.quote_ident(c.relname)", }; -static const SchemaQuery Query_for_list_of_constraints_with_schema = { - /* catname */ - "pg_catalog.pg_constraint c", - /* selcondition */ - "c.conrelid <> 0", - /* viscondition */ - "true", /* there is no pg_constraint_is_visible */ - /* namespace */ - "c.connamespace", - /* result */ - "pg_catalog.quote_ident(c.conname)", - /* qualresult */ - NULL +static const SchemaQuery Query_for_list_of_views = { + .catname = "pg_catalog.pg_class c", + .selcondition = "c.relkind IN (" CppAsString2(RELKIND_VIEW) ")", + .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", + .namespace = "c.relnamespace", + .result = "pg_catalog.quote_ident(c.relname)", +}; + +static const SchemaQuery Query_for_list_of_matviews = { + .catname = "pg_catalog.pg_class c", + .selcondition = "c.relkind IN (" CppAsString2(RELKIND_MATVIEW) ")", + .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", + .namespace = "c.relnamespace", + .result = "pg_catalog.quote_ident(c.relname)", +}; + +static const SchemaQuery Query_for_list_of_indexes = { + .catname = "pg_catalog.pg_class c", + .selcondition = + "c.relkind IN (" CppAsString2(RELKIND_INDEX) ", " + CppAsString2(RELKIND_PARTITIONED_INDEX) ")", + .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", + .namespace = "c.relnamespace", + .result = "pg_catalog.quote_ident(c.relname)", +}; + +/* All relations */ +static const SchemaQuery Query_for_list_of_relations = { + .catname = "pg_catalog.pg_class c", + .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", + .namespace = "c.relnamespace", + .result = "pg_catalog.quote_ident(c.relname)", }; /* Relations supporting INSERT, UPDATE or DELETE */ static const SchemaQuery Query_for_list_of_updatables = { - /* catname */ - "pg_catalog.pg_class c", - /* selcondition */ + .catname = "pg_catalog.pg_class c", + .selcondition = "c.relkind IN (" CppAsString2(RELKIND_RELATION) ", " CppAsString2(RELKIND_FOREIGN_TABLE) ", " CppAsString2(RELKIND_VIEW) ", " CppAsString2(RELKIND_PARTITIONED_TABLE) ")", - /* viscondition */ - "pg_catalog.pg_table_is_visible(c.oid)", - /* namespace */ - "c.relnamespace", - /* result */ - "pg_catalog.quote_ident(c.relname)", - /* qualresult */ - NULL + .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", + .namespace = "c.relnamespace", + .result = "pg_catalog.quote_ident(c.relname)", }; -static const SchemaQuery Query_for_list_of_relations = { - /* catname */ - "pg_catalog.pg_class c", - /* selcondition */ - NULL, - /* viscondition */ - "pg_catalog.pg_table_is_visible(c.oid)", - /* namespace */ - "c.relnamespace", - /* result */ - "pg_catalog.quote_ident(c.relname)", - /* qualresult */ - NULL -}; - -static const SchemaQuery Query_for_list_of_tsvmf = { - /* catname */ - "pg_catalog.pg_class c", - /* selcondition */ +/* Relations supporting SELECT */ +static const SchemaQuery Query_for_list_of_selectables = { + .catname = "pg_catalog.pg_class c", + .selcondition = "c.relkind IN (" CppAsString2(RELKIND_RELATION) ", " CppAsString2(RELKIND_SEQUENCE) ", " CppAsString2(RELKIND_VIEW) ", " CppAsString2(RELKIND_MATVIEW) ", " CppAsString2(RELKIND_FOREIGN_TABLE) ", " CppAsString2(RELKIND_PARTITIONED_TABLE) ")", - /* viscondition */ - "pg_catalog.pg_table_is_visible(c.oid)", - /* namespace */ - "c.relnamespace", - /* result */ - "pg_catalog.quote_ident(c.relname)", - /* qualresult */ - NULL + .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", + .namespace = "c.relnamespace", + .result = "pg_catalog.quote_ident(c.relname)", }; -static const SchemaQuery Query_for_list_of_tmf = { - /* catname */ - "pg_catalog.pg_class c", - /* selcondition */ +/* Relations supporting GRANT are currently same as those supporting SELECT */ +#define Query_for_list_of_grantables Query_for_list_of_selectables + +/* Relations supporting ANALYZE */ +static const SchemaQuery Query_for_list_of_analyzables = { + .catname = "pg_catalog.pg_class c", + .selcondition = "c.relkind IN (" CppAsString2(RELKIND_RELATION) ", " + CppAsString2(RELKIND_PARTITIONED_TABLE) ", " CppAsString2(RELKIND_MATVIEW) ", " CppAsString2(RELKIND_FOREIGN_TABLE) ")", - /* viscondition */ - "pg_catalog.pg_table_is_visible(c.oid)", - /* namespace */ - "c.relnamespace", - /* result */ - "pg_catalog.quote_ident(c.relname)", - /* qualresult */ - NULL + .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", + .namespace = "c.relnamespace", + .result = "pg_catalog.quote_ident(c.relname)", }; -static const SchemaQuery Query_for_list_of_tm = { - /* catname */ - "pg_catalog.pg_class c", - /* selcondition */ +/* Relations supporting index creation */ +static const SchemaQuery Query_for_list_of_indexables = { + .catname = "pg_catalog.pg_class c", + .selcondition = "c.relkind IN (" CppAsString2(RELKIND_RELATION) ", " + CppAsString2(RELKIND_PARTITIONED_TABLE) ", " CppAsString2(RELKIND_MATVIEW) ")", - /* viscondition */ - "pg_catalog.pg_table_is_visible(c.oid)", - /* namespace */ - "c.relnamespace", - /* result */ - "pg_catalog.quote_ident(c.relname)", - /* qualresult */ - NULL + .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", + .namespace = "c.relnamespace", + .result = "pg_catalog.quote_ident(c.relname)", }; -static const SchemaQuery Query_for_list_of_views = { - /* catname */ - "pg_catalog.pg_class c", - /* selcondition */ - "c.relkind IN (" CppAsString2(RELKIND_VIEW) ")", - /* viscondition */ - "pg_catalog.pg_table_is_visible(c.oid)", - /* namespace */ - "c.relnamespace", - /* result */ - "pg_catalog.quote_ident(c.relname)", - /* qualresult */ - NULL +/* Relations supporting VACUUM */ +static const SchemaQuery Query_for_list_of_vacuumables = { + .catname = "pg_catalog.pg_class c", + .selcondition = + "c.relkind IN (" CppAsString2(RELKIND_RELATION) ", " + CppAsString2(RELKIND_MATVIEW) ")", + .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", + .namespace = "c.relnamespace", + .result = "pg_catalog.quote_ident(c.relname)", }; -static const SchemaQuery Query_for_list_of_matviews = { - /* catname */ - "pg_catalog.pg_class c", - /* selcondition */ - "c.relkind IN (" CppAsString2(RELKIND_MATVIEW) ")", - /* viscondition */ - "pg_catalog.pg_table_is_visible(c.oid)", - /* namespace */ - "c.relnamespace", - /* result */ - "pg_catalog.quote_ident(c.relname)", - /* qualresult */ - NULL +/* Relations supporting CLUSTER are currently same as those supporting VACUUM */ +#define Query_for_list_of_clusterables Query_for_list_of_vacuumables + +static const SchemaQuery Query_for_list_of_constraints_with_schema = { + .catname = "pg_catalog.pg_constraint c", + .selcondition = "c.conrelid <> 0", + .viscondition = "true", /* there is no pg_constraint_is_visible */ + .namespace = "c.connamespace", + .result = "pg_catalog.quote_ident(c.conname)", }; static const SchemaQuery Query_for_list_of_statistics = { - /* catname */ - "pg_catalog.pg_statistic_ext s", - /* selcondition */ - NULL, - /* viscondition */ - "pg_catalog.pg_statistics_obj_is_visible(s.oid)", - /* namespace */ - "s.stxnamespace", - /* result */ - "pg_catalog.quote_ident(s.stxname)", - /* qualresult */ - NULL + .catname = "pg_catalog.pg_statistic_ext s", + .viscondition = "pg_catalog.pg_statistics_obj_is_visible(s.oid)", + .namespace = "s.stxnamespace", + .result = "pg_catalog.quote_ident(s.stxname)", }; @@ -877,18 +791,6 @@ static const SchemaQuery Query_for_list_of_statistics = { " FROM pg_catalog.pg_am "\ " WHERE substring(pg_catalog.quote_ident(amname),1,%d)='%s'" -#define Query_for_list_of_publications \ -" SELECT pg_catalog.quote_ident(pubname) "\ -" FROM pg_catalog.pg_publication "\ -" WHERE substring(pg_catalog.quote_ident(pubname),1,%d)='%s'" - -#define Query_for_list_of_subscriptions \ -" SELECT pg_catalog.quote_ident(s.subname) "\ -" FROM pg_catalog.pg_subscription s, pg_catalog.pg_database d "\ -" WHERE substring(pg_catalog.quote_ident(s.subname),1,%d)='%s' "\ -" AND d.datname = pg_catalog.current_database() "\ -" AND s.subdbid = d.oid" - /* the silly-looking length condition is just to eat up the current word */ #define Query_for_list_of_arguments \ "SELECT pg_catalog.oidvectortypes(proargtypes)||')' "\ @@ -982,6 +884,32 @@ static const SchemaQuery Query_for_list_of_statistics = { " and pg_catalog.pg_table_is_visible(c2.oid)"\ " and c2.relispartition = 'true'" +/* + * These object types were introduced later than our support cutoff of + * server version 7.4. We use the VersionedQuery infrastructure so that + * we don't send certain-to-fail queries to older servers. + */ + +static const VersionedQuery Query_for_list_of_publications[] = { + {100000, + " SELECT pg_catalog.quote_ident(pubname) " + " FROM pg_catalog.pg_publication " + " WHERE substring(pg_catalog.quote_ident(pubname),1,%d)='%s'" + }, + {0, NULL} +}; + +static const VersionedQuery Query_for_list_of_subscriptions[] = { + {100000, + " SELECT pg_catalog.quote_ident(s.subname) " + " FROM pg_catalog.pg_subscription s, pg_catalog.pg_database d " + " WHERE substring(pg_catalog.quote_ident(s.subname),1,%d)='%s' " + " AND d.datname = pg_catalog.current_database() " + " AND s.subdbid = d.oid" + }, + {0, NULL} +}; + /* * This is a list of all "things" in Pgsql, which can show up after CREATE or * DROP; and there is also a query to get a list of them. @@ -991,6 +919,7 @@ typedef struct { const char *name; const char *query; /* simple query, or NULL */ + const VersionedQuery *vquery; /* versioned query, or NULL */ const SchemaQuery *squery; /* schema query, or NULL */ const bits32 flags; /* visibility flags, see below */ } pgsql_thing_t; @@ -1001,9 +930,9 @@ typedef struct #define THING_NO_SHOW (THING_NO_CREATE | THING_NO_DROP | THING_NO_ALTER) static const pgsql_thing_t words_after_create[] = { - {"ACCESS METHOD", NULL, NULL, THING_NO_ALTER}, - {"AGGREGATE", NULL, &Query_for_list_of_aggregates}, - {"CAST", NULL, NULL}, /* Casts have complex structures for names, so + {"ACCESS METHOD", NULL, NULL, NULL, THING_NO_ALTER}, + {"AGGREGATE", NULL, NULL, Query_for_list_of_aggregates}, + {"CAST", NULL, NULL, NULL}, /* Casts have complex structures for names, so * skip it */ {"COLLATION", "SELECT pg_catalog.quote_ident(collname) FROM pg_catalog.pg_collation WHERE collencoding IN (-1, pg_catalog.pg_char_to_encoding(pg_catalog.getdatabaseencoding())) AND substring(pg_catalog.quote_ident(collname),1,%d)='%s'"}, @@ -1011,54 +940,56 @@ static const pgsql_thing_t words_after_create[] = { * CREATE CONSTRAINT TRIGGER is not supported here because it is designed * to be used only by pg_dump. */ - {"CONFIGURATION", Query_for_list_of_ts_configurations, NULL, THING_NO_SHOW}, + {"CONFIGURATION", Query_for_list_of_ts_configurations, NULL, NULL, THING_NO_SHOW}, {"CONVERSION", "SELECT pg_catalog.quote_ident(conname) FROM pg_catalog.pg_conversion WHERE substring(pg_catalog.quote_ident(conname),1,%d)='%s'"}, {"DATABASE", Query_for_list_of_databases}, - {"DEFAULT PRIVILEGES", NULL, NULL, THING_NO_CREATE | THING_NO_DROP}, - {"DICTIONARY", Query_for_list_of_ts_dictionaries, NULL, THING_NO_SHOW}, - {"DOMAIN", NULL, &Query_for_list_of_domains}, - {"EVENT TRIGGER", NULL, NULL}, + {"DEFAULT PRIVILEGES", NULL, NULL, NULL, THING_NO_CREATE | THING_NO_DROP}, + {"DICTIONARY", Query_for_list_of_ts_dictionaries, NULL, NULL, THING_NO_SHOW}, + {"DOMAIN", NULL, NULL, &Query_for_list_of_domains}, + {"EVENT TRIGGER", NULL, NULL, NULL}, {"EXTENSION", Query_for_list_of_extensions}, - {"FOREIGN DATA WRAPPER", NULL, NULL}, - {"FOREIGN TABLE", NULL, NULL}, - {"FUNCTION", NULL, &Query_for_list_of_functions}, + {"FOREIGN DATA WRAPPER", NULL, NULL, NULL}, + {"FOREIGN TABLE", NULL, NULL, NULL}, + {"FUNCTION", NULL, NULL, Query_for_list_of_functions}, {"GROUP", Query_for_list_of_roles}, - {"INDEX", NULL, &Query_for_list_of_indexes}, + {"INDEX", NULL, NULL, &Query_for_list_of_indexes}, {"LANGUAGE", Query_for_list_of_languages}, - {"LARGE OBJECT", NULL, NULL, THING_NO_CREATE | THING_NO_DROP}, - {"MATERIALIZED VIEW", NULL, &Query_for_list_of_matviews}, - {"OPERATOR", NULL, NULL}, /* Querying for this is probably not such a - * good idea. */ - {"OWNED", NULL, NULL, THING_NO_CREATE | THING_NO_ALTER}, /* for DROP OWNED BY ... */ - {"PARSER", Query_for_list_of_ts_parsers, NULL, THING_NO_SHOW}, - {"POLICY", NULL, NULL}, - {"PUBLICATION", Query_for_list_of_publications}, + {"LARGE OBJECT", NULL, NULL, NULL, THING_NO_CREATE | THING_NO_DROP}, + {"MATERIALIZED VIEW", NULL, NULL, &Query_for_list_of_matviews}, + {"OPERATOR", NULL, NULL, NULL}, /* Querying for this is probably not such + * a good idea. */ + {"OWNED", NULL, NULL, NULL, THING_NO_CREATE | THING_NO_ALTER}, /* for DROP OWNED BY ... */ + {"PARSER", Query_for_list_of_ts_parsers, NULL, NULL, THING_NO_SHOW}, + {"POLICY", NULL, NULL, NULL}, + {"PROCEDURE", NULL, NULL, Query_for_list_of_procedures}, + {"PUBLICATION", NULL, Query_for_list_of_publications}, {"ROLE", Query_for_list_of_roles}, + {"ROUTINE", NULL, NULL, &Query_for_list_of_routines, THING_NO_CREATE}, {"RULE", "SELECT pg_catalog.quote_ident(rulename) FROM pg_catalog.pg_rules WHERE substring(pg_catalog.quote_ident(rulename),1,%d)='%s'"}, {"SCHEMA", Query_for_list_of_schemas}, - {"SEQUENCE", NULL, &Query_for_list_of_sequences}, + {"SEQUENCE", NULL, NULL, &Query_for_list_of_sequences}, {"SERVER", Query_for_list_of_servers}, - {"STATISTICS", NULL, &Query_for_list_of_statistics}, - {"SUBSCRIPTION", Query_for_list_of_subscriptions}, - {"SYSTEM", NULL, NULL, THING_NO_CREATE | THING_NO_DROP}, - {"TABLE", NULL, &Query_for_list_of_tables}, + {"STATISTICS", NULL, NULL, &Query_for_list_of_statistics}, + {"SUBSCRIPTION", NULL, Query_for_list_of_subscriptions}, + {"SYSTEM", NULL, NULL, NULL, THING_NO_CREATE | THING_NO_DROP}, + {"TABLE", NULL, NULL, &Query_for_list_of_tables}, {"TABLESPACE", Query_for_list_of_tablespaces}, - {"TEMP", NULL, NULL, THING_NO_DROP | THING_NO_ALTER}, /* for CREATE TEMP TABLE - * ... */ - {"TEMPLATE", Query_for_list_of_ts_templates, NULL, THING_NO_SHOW}, - {"TEMPORARY", NULL, NULL, THING_NO_DROP | THING_NO_ALTER}, /* for CREATE TEMPORARY - * TABLE ... */ - {"TEXT SEARCH", NULL, NULL}, - {"TRANSFORM", NULL, NULL}, + {"TEMP", NULL, NULL, NULL, THING_NO_DROP | THING_NO_ALTER}, /* for CREATE TEMP TABLE + * ... */ + {"TEMPLATE", Query_for_list_of_ts_templates, NULL, NULL, THING_NO_SHOW}, + {"TEMPORARY", NULL, NULL, NULL, THING_NO_DROP | THING_NO_ALTER}, /* for CREATE TEMPORARY + * TABLE ... */ + {"TEXT SEARCH", NULL, NULL, NULL}, + {"TRANSFORM", NULL, NULL, NULL}, {"TRIGGER", "SELECT pg_catalog.quote_ident(tgname) FROM pg_catalog.pg_trigger WHERE substring(pg_catalog.quote_ident(tgname),1,%d)='%s' AND NOT tgisinternal"}, - {"TYPE", NULL, &Query_for_list_of_datatypes}, - {"UNIQUE", NULL, NULL, THING_NO_DROP | THING_NO_ALTER}, /* for CREATE UNIQUE - * INDEX ... */ - {"UNLOGGED", NULL, NULL, THING_NO_DROP | THING_NO_ALTER}, /* for CREATE UNLOGGED - * TABLE ... */ + {"TYPE", NULL, NULL, &Query_for_list_of_datatypes}, + {"UNIQUE", NULL, NULL, NULL, THING_NO_DROP | THING_NO_ALTER}, /* for CREATE UNIQUE + * INDEX ... */ + {"UNLOGGED", NULL, NULL, NULL, THING_NO_DROP | THING_NO_ALTER}, /* for CREATE UNLOGGED + * TABLE ... */ {"USER", Query_for_list_of_roles " UNION SELECT 'MAPPING FOR'"}, - {"USER MAPPING FOR", NULL, NULL}, - {"VIEW", NULL, &Query_for_list_of_views}, + {"USER MAPPING FOR", NULL, NULL, NULL}, + {"VIEW", NULL, NULL, &Query_for_list_of_views}, {NULL} /* end of list */ }; @@ -1069,8 +1000,11 @@ static char *create_command_generator(const char *text, int state); static char *drop_command_generator(const char *text, int state); static char *alter_command_generator(const char *text, int state); static char *complete_from_query(const char *text, int state); +static char *complete_from_versioned_query(const char *text, int state); static char *complete_from_schema_query(const char *text, int state); -static char *_complete_from_query(int is_schema_query, +static char *complete_from_versioned_schema_query(const char *text, int state); +static char *_complete_from_query(const char *simple_query, + const SchemaQuery *schema_query, const char *text, int state); static char *complete_from_list(const char *text, int state); static char *complete_from_const(const char *text, int state); @@ -1121,9 +1055,8 @@ initialize_readline(void) * If pattern is NULL, it's a wild card that matches any word. * If pattern begins with '!', the result is negated, ie we check that 'word' * does *not* match any alternative appearing in the rest of 'pattern'. - * Any alternative can end with '*' which is a wild card, i.e., it means - * match any word that matches the characters so far. (We do not currently - * support '*' elsewhere than the end of an alternative.) + * Any alternative can contain '*' which is a wild card, i.e., it can match + * any substring; however, we allow at most one '*' per alternative. * * For readability, callers should use the macros MatchAny and MatchAnyExcept * to invoke those two special cases for 'pattern'. (But '|' and '*' must @@ -1133,12 +1066,14 @@ initialize_readline(void) #define MatchAnyExcept(pattern) ("!" pattern) static bool -word_matches_internal(const char *pattern, - const char *word, - bool case_sensitive) +word_matches(const char *pattern, + const char *word, + bool case_sensitive) { - size_t wordlen, - patternlen; + size_t wordlen; + +#define cimatch(s1, s2, n) \ + (case_sensitive ? strncmp(s1, s2, n) == 0 : pg_strncasecmp(s1, s2, n) == 0) /* NULL pattern matches anything. */ if (pattern == NULL) @@ -1146,37 +1081,40 @@ word_matches_internal(const char *pattern, /* Handle negated patterns from the MatchAnyExcept macro. */ if (*pattern == '!') - return !word_matches_internal(pattern + 1, word, case_sensitive); + return !word_matches(pattern + 1, word, case_sensitive); /* Else consider each alternative in the pattern. */ wordlen = strlen(word); for (;;) { + const char *star = NULL; const char *c; - /* Find end of current alternative. */ + /* Find end of current alternative, and locate any wild card. */ c = pattern; while (*c != '\0' && *c != '|') + { + if (*c == '*') + star = c; c++; - /* Was there a wild card? (Assumes first alternative is not empty) */ - if (c[-1] == '*') + } + /* Was there a wild card? */ + if (star) { /* Yes, wildcard match? */ - patternlen = c - pattern - 1; - if (wordlen >= patternlen && - (case_sensitive ? - strncmp(word, pattern, patternlen) == 0 : - pg_strncasecmp(word, pattern, patternlen) == 0)) + size_t beforelen = star - pattern, + afterlen = c - star - 1; + + if (wordlen >= (beforelen + afterlen) && + cimatch(word, pattern, beforelen) && + cimatch(word + wordlen - afterlen, star + 1, afterlen)) return true; } else { /* No, plain match? */ - patternlen = c - pattern; - if (wordlen == patternlen && - (case_sensitive ? - strncmp(word, pattern, wordlen) == 0 : - pg_strncasecmp(word, pattern, wordlen) == 0)) + if (wordlen == (c - pattern) && + cimatch(word, pattern, wordlen)) return true; } /* Out of alternatives? */ @@ -1190,24 +1128,105 @@ word_matches_internal(const char *pattern, } /* - * There are enough matching calls below that it seems worth having these two - * interface routines rather than including a third parameter in every call. + * Implementation of TailMatches and TailMatchesCS macros: do the last N words + * in previous_words match the variadic arguments? * - * word_matches --- match case-insensitively. + * The array indexing might look backwards, but remember that + * previous_words[0] contains the *last* word on the line, not the first. */ static bool -word_matches(const char *pattern, const char *word) +TailMatchesImpl(bool case_sensitive, + int previous_words_count, char **previous_words, + int narg,...) { - return word_matches_internal(pattern, word, false); + va_list args; + + if (previous_words_count < narg) + return false; + + va_start(args, narg); + + for (int argno = 0; argno < narg; argno++) + { + const char *arg = va_arg(args, const char *); + + if (!word_matches(arg, previous_words[narg - argno - 1], + case_sensitive)) + { + va_end(args); + return false; + } + } + + va_end(args); + + return true; } /* - * word_matches_cs --- match case-sensitively. + * Implementation of Matches and MatchesCS macros: do all of the words + * in previous_words match the variadic arguments? */ static bool -word_matches_cs(const char *pattern, const char *word) +MatchesImpl(bool case_sensitive, + int previous_words_count, char **previous_words, + int narg,...) { - return word_matches_internal(pattern, word, true); + va_list args; + + if (previous_words_count != narg) + return false; + + va_start(args, narg); + + for (int argno = 0; argno < narg; argno++) + { + const char *arg = va_arg(args, const char *); + + if (!word_matches(arg, previous_words[narg - argno - 1], + case_sensitive)) + { + va_end(args); + return false; + } + } + + va_end(args); + + return true; +} + +/* + * Implementation of HeadMatches and HeadMatchesCS macros: do the first N + * words in previous_words match the variadic arguments? + */ +static bool +HeadMatchesImpl(bool case_sensitive, + int previous_words_count, char **previous_words, + int narg,...) +{ + va_list args; + + if (previous_words_count < narg) + return false; + + va_start(args, narg); + + for (int argno = 0; argno < narg; argno++) + { + const char *arg = va_arg(args, const char *); + + if (!word_matches(arg, previous_words[previous_words_count - argno - 1], + case_sensitive)) + { + va_end(args); + return false; + } + } + + va_end(args); + + return true; } /* @@ -1261,153 +1280,39 @@ psql_completion(const char *text, int start, int end) #define prev8_wd (previous_words[7]) #define prev9_wd (previous_words[8]) - /* Macros for matching the last N words before point, case-insensitively. */ -#define TailMatches1(p1) \ - (previous_words_count >= 1 && \ - word_matches(p1, prev_wd)) - -#define TailMatches2(p2, p1) \ - (previous_words_count >= 2 && \ - word_matches(p1, prev_wd) && \ - word_matches(p2, prev2_wd)) - -#define TailMatches3(p3, p2, p1) \ - (previous_words_count >= 3 && \ - word_matches(p1, prev_wd) && \ - word_matches(p2, prev2_wd) && \ - word_matches(p3, prev3_wd)) - -#define TailMatches4(p4, p3, p2, p1) \ - (previous_words_count >= 4 && \ - word_matches(p1, prev_wd) && \ - word_matches(p2, prev2_wd) && \ - word_matches(p3, prev3_wd) && \ - word_matches(p4, prev4_wd)) - -#define TailMatches5(p5, p4, p3, p2, p1) \ - (previous_words_count >= 5 && \ - word_matches(p1, prev_wd) && \ - word_matches(p2, prev2_wd) && \ - word_matches(p3, prev3_wd) && \ - word_matches(p4, prev4_wd) && \ - word_matches(p5, prev5_wd)) - -#define TailMatches6(p6, p5, p4, p3, p2, p1) \ - (previous_words_count >= 6 && \ - word_matches(p1, prev_wd) && \ - word_matches(p2, prev2_wd) && \ - word_matches(p3, prev3_wd) && \ - word_matches(p4, prev4_wd) && \ - word_matches(p5, prev5_wd) && \ - word_matches(p6, prev6_wd)) - -#define TailMatches7(p7, p6, p5, p4, p3, p2, p1) \ - (previous_words_count >= 7 && \ - word_matches(p1, prev_wd) && \ - word_matches(p2, prev2_wd) && \ - word_matches(p3, prev3_wd) && \ - word_matches(p4, prev4_wd) && \ - word_matches(p5, prev5_wd) && \ - word_matches(p6, prev6_wd) && \ - word_matches(p7, prev7_wd)) - -#define TailMatches8(p8, p7, p6, p5, p4, p3, p2, p1) \ - (previous_words_count >= 8 && \ - word_matches(p1, prev_wd) && \ - word_matches(p2, prev2_wd) && \ - word_matches(p3, prev3_wd) && \ - word_matches(p4, prev4_wd) && \ - word_matches(p5, prev5_wd) && \ - word_matches(p6, prev6_wd) && \ - word_matches(p7, prev7_wd) && \ - word_matches(p8, prev8_wd)) - -#define TailMatches9(p9, p8, p7, p6, p5, p4, p3, p2, p1) \ - (previous_words_count >= 9 && \ - word_matches(p1, prev_wd) && \ - word_matches(p2, prev2_wd) && \ - word_matches(p3, prev3_wd) && \ - word_matches(p4, prev4_wd) && \ - word_matches(p5, prev5_wd) && \ - word_matches(p6, prev6_wd) && \ - word_matches(p7, prev7_wd) && \ - word_matches(p8, prev8_wd) && \ - word_matches(p9, prev9_wd)) - - /* Macros for matching the last N words before point, case-sensitively. */ -#define TailMatchesCS1(p1) \ - (previous_words_count >= 1 && \ - word_matches_cs(p1, prev_wd)) -#define TailMatchesCS2(p2, p1) \ - (previous_words_count >= 2 && \ - word_matches_cs(p1, prev_wd) && \ - word_matches_cs(p2, prev2_wd)) -#define TailMatchesCS3(p3, p2, p1) \ - (previous_words_count >= 3 && \ - word_matches_cs(p1, prev_wd) && \ - word_matches_cs(p2, prev2_wd) && \ - word_matches_cs(p3, prev3_wd)) -#define TailMatchesCS4(p4, p3, p2, p1) \ - (previous_words_count >= 4 && \ - word_matches_cs(p1, prev_wd) && \ - word_matches_cs(p2, prev2_wd) && \ - word_matches_cs(p3, prev3_wd) && \ - word_matches_cs(p4, prev4_wd)) + /* Match the last N words before point, case-insensitively. */ +#define TailMatches(...) \ + TailMatchesImpl(false, previous_words_count, previous_words, \ + VA_ARGS_NARGS(__VA_ARGS__), __VA_ARGS__) - /* - * Macros for matching N words beginning at the start of the line, - * case-insensitively. - */ -#define Matches1(p1) \ - (previous_words_count == 1 && \ - TailMatches1(p1)) -#define Matches2(p1, p2) \ - (previous_words_count == 2 && \ - TailMatches2(p1, p2)) -#define Matches3(p1, p2, p3) \ - (previous_words_count == 3 && \ - TailMatches3(p1, p2, p3)) -#define Matches4(p1, p2, p3, p4) \ - (previous_words_count == 4 && \ - TailMatches4(p1, p2, p3, p4)) -#define Matches5(p1, p2, p3, p4, p5) \ - (previous_words_count == 5 && \ - TailMatches5(p1, p2, p3, p4, p5)) -#define Matches6(p1, p2, p3, p4, p5, p6) \ - (previous_words_count == 6 && \ - TailMatches6(p1, p2, p3, p4, p5, p6)) -#define Matches7(p1, p2, p3, p4, p5, p6, p7) \ - (previous_words_count == 7 && \ - TailMatches7(p1, p2, p3, p4, p5, p6, p7)) -#define Matches8(p1, p2, p3, p4, p5, p6, p7, p8) \ - (previous_words_count == 8 && \ - TailMatches8(p1, p2, p3, p4, p5, p6, p7, p8)) -#define Matches9(p1, p2, p3, p4, p5, p6, p7, p8, p9) \ - (previous_words_count == 9 && \ - TailMatches9(p1, p2, p3, p4, p5, p6, p7, p8, p9)) + /* Match the last N words before point, case-sensitively. */ +#define TailMatchesCS(...) \ + TailMatchesImpl(true, previous_words_count, previous_words, \ + VA_ARGS_NARGS(__VA_ARGS__), __VA_ARGS__) - /* - * Macros for matching N words at the start of the line, regardless of - * what is after them, case-insensitively. - */ -#define HeadMatches1(p1) \ - (previous_words_count >= 1 && \ - word_matches(p1, previous_words[previous_words_count - 1])) + /* Match N words representing all of the line, case-insensitively. */ +#define Matches(...) \ + MatchesImpl(false, previous_words_count, previous_words, \ + VA_ARGS_NARGS(__VA_ARGS__), __VA_ARGS__) -#define HeadMatches2(p1, p2) \ - (previous_words_count >= 2 && \ - word_matches(p1, previous_words[previous_words_count - 1]) && \ - word_matches(p2, previous_words[previous_words_count - 2])) + /* Match N words representing all of the line, case-sensitively. */ +#define MatchesCS(...) \ + MatchesImpl(true, previous_words_count, previous_words, \ + VA_ARGS_NARGS(__VA_ARGS__), __VA_ARGS__) -#define HeadMatches3(p1, p2, p3) \ - (previous_words_count >= 3 && \ - word_matches(p1, previous_words[previous_words_count - 1]) && \ - word_matches(p2, previous_words[previous_words_count - 2]) && \ - word_matches(p3, previous_words[previous_words_count - 3])) + /* Match the first N words on the line, case-insensitively. */ +#define HeadMatches(...) \ + HeadMatchesImpl(false, previous_words_count, previous_words, \ + VA_ARGS_NARGS(__VA_ARGS__), __VA_ARGS__) + + /* Match the first N words on the line, case-sensitively. */ +#define HeadMatchesCS(...) \ + HeadMatchesImpl(true, previous_words_count, previous_words, \ + VA_ARGS_NARGS(__VA_ARGS__), __VA_ARGS__) /* Known command-starting keywords. */ static const char *const sql_commands[] = { - "ABORT", "ALTER", "ANALYZE", "BEGIN", "CHECKPOINT", "CLOSE", "CLUSTER", + "ABORT", "ALTER", "ANALYZE", "BEGIN", "CALL", "CHECKPOINT", "CLOSE", "CLUSTER", "COMMENT", "COMMIT", "COPY", "CREATE", "DEALLOCATE", "DECLARE", "DELETE FROM", "DISCARD", "DO", "DROP", "END", "EXECUTE", "EXPLAIN", "FETCH", "GRANT", "IMPORT", "INSERT", "LISTEN", "LOAD", "LOCK", @@ -1433,7 +1338,7 @@ psql_completion(const char *text, int start, int end) "\\e", "\\echo", "\\ef", "\\elif", "\\else", "\\encoding", "\\endif", "\\errverbose", "\\ev", "\\f", - "\\g", "\\gexec", "\\gset", "\\gx", + "\\g", "\\gdesc", "\\gexec", "\\gset", "\\gx", "\\h", "\\help", "\\H", "\\i", "\\if", "\\ir", "\\l", "\\lo_import", "\\lo_export", "\\lo_list", "\\lo_unlink", @@ -1493,349 +1398,350 @@ psql_completion(const char *text, int start, int end) /* CREATE */ /* complete with something you can create */ - else if (TailMatches1("CREATE")) + else if (TailMatches("CREATE")) matches = completion_matches(text, create_command_generator); /* DROP, but not DROP embedded in other commands */ /* complete with something you can drop */ - else if (Matches1("DROP")) + else if (Matches("DROP")) matches = completion_matches(text, drop_command_generator); /* ALTER */ /* ALTER TABLE */ - else if (Matches2("ALTER", "TABLE")) + else if (Matches("ALTER", "TABLE")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, "UNION SELECT 'ALL IN TABLESPACE'"); /* ALTER something */ - else if (Matches1("ALTER")) + else if (Matches("ALTER")) matches = completion_matches(text, alter_command_generator); /* ALTER TABLE,INDEX,MATERIALIZED VIEW ALL IN TABLESPACE xxx */ - else if (TailMatches4("ALL", "IN", "TABLESPACE", MatchAny)) - COMPLETE_WITH_LIST2("SET TABLESPACE", "OWNED BY"); + else if (TailMatches("ALL", "IN", "TABLESPACE", MatchAny)) + COMPLETE_WITH("SET TABLESPACE", "OWNED BY"); /* ALTER TABLE,INDEX,MATERIALIZED VIEW ALL IN TABLESPACE xxx OWNED BY */ - else if (TailMatches6("ALL", "IN", "TABLESPACE", MatchAny, "OWNED", "BY")) + else if (TailMatches("ALL", "IN", "TABLESPACE", MatchAny, "OWNED", "BY")) COMPLETE_WITH_QUERY(Query_for_list_of_roles); /* ALTER TABLE,INDEX,MATERIALIZED VIEW ALL IN TABLESPACE xxx OWNED BY xxx */ - else if (TailMatches7("ALL", "IN", "TABLESPACE", MatchAny, "OWNED", "BY", MatchAny)) - COMPLETE_WITH_CONST("SET TABLESPACE"); - /* ALTER AGGREGATE,FUNCTION */ - else if (Matches3("ALTER", "AGGREGATE|FUNCTION", MatchAny)) - COMPLETE_WITH_CONST("("); - /* ALTER AGGREGATE,FUNCTION (...) */ - else if (Matches4("ALTER", "AGGREGATE|FUNCTION", MatchAny, MatchAny)) + else if (TailMatches("ALL", "IN", "TABLESPACE", MatchAny, "OWNED", "BY", MatchAny)) + COMPLETE_WITH("SET TABLESPACE"); + /* ALTER AGGREGATE,FUNCTION,PROCEDURE,ROUTINE */ + else if (Matches("ALTER", "AGGREGATE|FUNCTION|PROCEDURE|ROUTINE", MatchAny)) + COMPLETE_WITH("("); + /* ALTER AGGREGATE,FUNCTION,PROCEDURE,ROUTINE (...) */ + else if (Matches("ALTER", "AGGREGATE|FUNCTION|PROCEDURE|ROUTINE", MatchAny, MatchAny)) { if (ends_with(prev_wd, ')')) - COMPLETE_WITH_LIST3("OWNER TO", "RENAME TO", "SET SCHEMA"); + COMPLETE_WITH("OWNER TO", "RENAME TO", "SET SCHEMA"); else COMPLETE_WITH_FUNCTION_ARG(prev2_wd); } /* ALTER PUBLICATION */ - else if (Matches3("ALTER", "PUBLICATION", MatchAny)) - COMPLETE_WITH_LIST5("ADD TABLE", "DROP TABLE", "OWNER TO", "RENAME TO", "SET"); + else if (Matches("ALTER", "PUBLICATION", MatchAny)) + COMPLETE_WITH("ADD TABLE", "DROP TABLE", "OWNER TO", "RENAME TO", "SET"); /* ALTER PUBLICATION SET */ - else if (Matches4("ALTER", "PUBLICATION", MatchAny, "SET")) - COMPLETE_WITH_LIST2("(", "TABLE"); + else if (Matches("ALTER", "PUBLICATION", MatchAny, "SET")) + COMPLETE_WITH("(", "TABLE"); /* ALTER PUBLICATION SET ( */ - else if (HeadMatches3("ALTER", "PUBLICATION", MatchAny) && TailMatches2("SET", "(")) - COMPLETE_WITH_CONST("publish"); + else if (HeadMatches("ALTER", "PUBLICATION", MatchAny) && TailMatches("SET", "(")) + COMPLETE_WITH("publish"); /* ALTER SUBSCRIPTION */ - else if (Matches3("ALTER", "SUBSCRIPTION", MatchAny)) - COMPLETE_WITH_LIST7("CONNECTION", "ENABLE", "DISABLE", "OWNER TO", - "RENAME TO", "REFRESH PUBLICATION", "SET"); + else if (Matches("ALTER", "SUBSCRIPTION", MatchAny)) + COMPLETE_WITH("CONNECTION", "ENABLE", "DISABLE", "OWNER TO", + "RENAME TO", "REFRESH PUBLICATION", "SET"); /* ALTER SUBSCRIPTION REFRESH PUBLICATION */ - else if (HeadMatches3("ALTER", "SUBSCRIPTION", MatchAny) && - TailMatches2("REFRESH", "PUBLICATION")) - COMPLETE_WITH_CONST("WITH ("); + else if (HeadMatches("ALTER", "SUBSCRIPTION", MatchAny) && + TailMatches("REFRESH", "PUBLICATION")) + COMPLETE_WITH("WITH ("); /* ALTER SUBSCRIPTION REFRESH PUBLICATION WITH ( */ - else if (HeadMatches3("ALTER", "SUBSCRIPTION", MatchAny) && - TailMatches4("REFRESH", "PUBLICATION", "WITH", "(")) - COMPLETE_WITH_CONST("copy_data"); + else if (HeadMatches("ALTER", "SUBSCRIPTION", MatchAny) && + TailMatches("REFRESH", "PUBLICATION", "WITH", "(")) + COMPLETE_WITH("copy_data"); /* ALTER SUBSCRIPTION SET */ - else if (Matches4("ALTER", "SUBSCRIPTION", MatchAny, "SET")) - COMPLETE_WITH_LIST2("(", "PUBLICATION"); + else if (Matches("ALTER", "SUBSCRIPTION", MatchAny, "SET")) + COMPLETE_WITH("(", "PUBLICATION"); /* ALTER SUBSCRIPTION SET ( */ - else if (HeadMatches3("ALTER", "SUBSCRIPTION", MatchAny) && TailMatches2("SET", "(")) - COMPLETE_WITH_LIST2("slot_name", "synchronous_commit"); + else if (HeadMatches("ALTER", "SUBSCRIPTION", MatchAny) && TailMatches("SET", "(")) + COMPLETE_WITH("slot_name", "synchronous_commit"); /* ALTER SUBSCRIPTION SET PUBLICATION */ - else if (HeadMatches3("ALTER", "SUBSCRIPTION", MatchAny) && TailMatches2("SET", "PUBLICATION")) + else if (HeadMatches("ALTER", "SUBSCRIPTION", MatchAny) && TailMatches("SET", "PUBLICATION")) { /* complete with nothing here as this refers to remote publications */ } /* ALTER SUBSCRIPTION SET PUBLICATION */ - else if (HeadMatches3("ALTER", "SUBSCRIPTION", MatchAny) && - TailMatches3("SET", "PUBLICATION", MatchAny)) - COMPLETE_WITH_CONST("WITH ("); + else if (HeadMatches("ALTER", "SUBSCRIPTION", MatchAny) && + TailMatches("SET", "PUBLICATION", MatchAny)) + COMPLETE_WITH("WITH ("); /* ALTER SUBSCRIPTION SET PUBLICATION WITH ( */ - else if (HeadMatches3("ALTER", "SUBSCRIPTION", MatchAny) && - TailMatches5("SET", "PUBLICATION", MatchAny, "WITH", "(")) - COMPLETE_WITH_LIST2("copy_data", "refresh"); + else if (HeadMatches("ALTER", "SUBSCRIPTION", MatchAny) && + TailMatches("SET", "PUBLICATION", MatchAny, "WITH", "(")) + COMPLETE_WITH("copy_data", "refresh"); /* ALTER SCHEMA */ - else if (Matches3("ALTER", "SCHEMA", MatchAny)) - COMPLETE_WITH_LIST2("OWNER TO", "RENAME TO"); + else if (Matches("ALTER", "SCHEMA", MatchAny)) + COMPLETE_WITH("OWNER TO", "RENAME TO"); /* ALTER COLLATION */ - else if (Matches3("ALTER", "COLLATION", MatchAny)) - COMPLETE_WITH_LIST3("OWNER TO", "RENAME TO", "SET SCHEMA"); + else if (Matches("ALTER", "COLLATION", MatchAny)) + COMPLETE_WITH("OWNER TO", "RENAME TO", "SET SCHEMA"); /* ALTER CONVERSION */ - else if (Matches3("ALTER", "CONVERSION", MatchAny)) - COMPLETE_WITH_LIST3("OWNER TO", "RENAME TO", "SET SCHEMA"); + else if (Matches("ALTER", "CONVERSION", MatchAny)) + COMPLETE_WITH("OWNER TO", "RENAME TO", "SET SCHEMA"); /* ALTER DATABASE */ - else if (Matches3("ALTER", "DATABASE", MatchAny)) - COMPLETE_WITH_LIST7("RESET", "SET", "OWNER TO", "RENAME TO", - "IS_TEMPLATE", "ALLOW_CONNECTIONS", - "CONNECTION LIMIT"); + else if (Matches("ALTER", "DATABASE", MatchAny)) + COMPLETE_WITH("RESET", "SET", "OWNER TO", "RENAME TO", + "IS_TEMPLATE", "ALLOW_CONNECTIONS", + "CONNECTION LIMIT"); + + /* ALTER DATABASE SET TABLESPACE */ + else if (Matches("ALTER", "DATABASE", MatchAny, "SET", "TABLESPACE")) + COMPLETE_WITH_QUERY(Query_for_list_of_tablespaces); /* ALTER EVENT TRIGGER */ - else if (Matches3("ALTER", "EVENT", "TRIGGER")) + else if (Matches("ALTER", "EVENT", "TRIGGER")) COMPLETE_WITH_QUERY(Query_for_list_of_event_triggers); /* ALTER EVENT TRIGGER */ - else if (Matches4("ALTER", "EVENT", "TRIGGER", MatchAny)) - COMPLETE_WITH_LIST4("DISABLE", "ENABLE", "OWNER TO", "RENAME TO"); + else if (Matches("ALTER", "EVENT", "TRIGGER", MatchAny)) + COMPLETE_WITH("DISABLE", "ENABLE", "OWNER TO", "RENAME TO"); /* ALTER EVENT TRIGGER ENABLE */ - else if (Matches5("ALTER", "EVENT", "TRIGGER", MatchAny, "ENABLE")) - COMPLETE_WITH_LIST2("REPLICA", "ALWAYS"); + else if (Matches("ALTER", "EVENT", "TRIGGER", MatchAny, "ENABLE")) + COMPLETE_WITH("REPLICA", "ALWAYS"); /* ALTER EXTENSION */ - else if (Matches3("ALTER", "EXTENSION", MatchAny)) - COMPLETE_WITH_LIST4("ADD", "DROP", "UPDATE", "SET SCHEMA"); + else if (Matches("ALTER", "EXTENSION", MatchAny)) + COMPLETE_WITH("ADD", "DROP", "UPDATE", "SET SCHEMA"); /* ALTER EXTENSION UPDATE */ - else if (Matches4("ALTER", "EXTENSION", MatchAny, "UPDATE")) + else if (Matches("ALTER", "EXTENSION", MatchAny, "UPDATE")) { completion_info_charp = prev2_wd; COMPLETE_WITH_QUERY(Query_for_list_of_available_extension_versions_with_TO); } /* ALTER EXTENSION UPDATE TO */ - else if (Matches5("ALTER", "EXTENSION", MatchAny, "UPDATE", "TO")) + else if (Matches("ALTER", "EXTENSION", MatchAny, "UPDATE", "TO")) { completion_info_charp = prev3_wd; COMPLETE_WITH_QUERY(Query_for_list_of_available_extension_versions); } /* ALTER FOREIGN */ - else if (Matches2("ALTER", "FOREIGN")) - COMPLETE_WITH_LIST2("DATA WRAPPER", "TABLE"); + else if (Matches("ALTER", "FOREIGN")) + COMPLETE_WITH("DATA WRAPPER", "TABLE"); /* ALTER FOREIGN DATA WRAPPER */ - else if (Matches5("ALTER", "FOREIGN", "DATA", "WRAPPER", MatchAny)) - COMPLETE_WITH_LIST5("HANDLER", "VALIDATOR", "OPTIONS", "OWNER TO", "RENAME TO"); + else if (Matches("ALTER", "FOREIGN", "DATA", "WRAPPER", MatchAny)) + COMPLETE_WITH("HANDLER", "VALIDATOR", "OPTIONS", "OWNER TO", "RENAME TO"); /* ALTER FOREIGN TABLE */ - else if (Matches4("ALTER", "FOREIGN", "TABLE", MatchAny)) - { - static const char *const list_ALTER_FOREIGN_TABLE[] = - {"ADD", "ALTER", "DISABLE TRIGGER", "DROP", "ENABLE", "INHERIT", - "NO INHERIT", "OPTIONS", "OWNER TO", "RENAME", "SET", - "VALIDATE CONSTRAINT", NULL}; - - COMPLETE_WITH_LIST(list_ALTER_FOREIGN_TABLE); - } + else if (Matches("ALTER", "FOREIGN", "TABLE", MatchAny)) + COMPLETE_WITH("ADD", "ALTER", "DISABLE TRIGGER", "DROP", "ENABLE", + "INHERIT", "NO INHERIT", "OPTIONS", "OWNER TO", + "RENAME", "SET", "VALIDATE CONSTRAINT"); /* ALTER INDEX */ - else if (Matches2("ALTER", "INDEX")) + else if (Matches("ALTER", "INDEX")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, "UNION SELECT 'ALL IN TABLESPACE'"); /* ALTER INDEX */ - else if (Matches3("ALTER", "INDEX", MatchAny)) - COMPLETE_WITH_LIST4("OWNER TO", "RENAME TO", "SET", "RESET"); + else if (Matches("ALTER", "INDEX", MatchAny)) + COMPLETE_WITH("ALTER COLUMN", "OWNER TO", "RENAME TO", "SET", + "RESET", "ATTACH PARTITION"); + else if (Matches("ALTER", "INDEX", MatchAny, "ATTACH")) + COMPLETE_WITH("PARTITION"); + else if (Matches("ALTER", "INDEX", MatchAny, "ATTACH", "PARTITION")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, NULL); + /* ALTER INDEX ALTER COLUMN */ + else if (Matches("ALTER", "INDEX", MatchAny, "ALTER", "COLUMN", MatchAny)) + COMPLETE_WITH("SET STATISTICS"); /* ALTER INDEX SET */ - else if (Matches4("ALTER", "INDEX", MatchAny, "SET")) - COMPLETE_WITH_LIST2("(", "TABLESPACE"); + else if (Matches("ALTER", "INDEX", MatchAny, "SET")) + COMPLETE_WITH("(", "TABLESPACE"); /* ALTER INDEX RESET */ - else if (Matches4("ALTER", "INDEX", MatchAny, "RESET")) - COMPLETE_WITH_CONST("("); + else if (Matches("ALTER", "INDEX", MatchAny, "RESET")) + COMPLETE_WITH("("); /* ALTER INDEX SET|RESET ( */ - else if (Matches5("ALTER", "INDEX", MatchAny, "RESET", "(")) - COMPLETE_WITH_LIST3("fillfactor", "fastupdate", - "gin_pending_list_limit"); - else if (Matches5("ALTER", "INDEX", MatchAny, "SET", "(")) - COMPLETE_WITH_LIST3("fillfactor =", "fastupdate =", - "gin_pending_list_limit ="); + else if (Matches("ALTER", "INDEX", MatchAny, "RESET", "(")) + COMPLETE_WITH("fillfactor", "recheck_on_update", + "vacuum_cleanup_index_scale_factor", /* BTREE */ + "fastupdate", "gin_pending_list_limit", /* GIN */ + "buffering", /* GiST */ + "pages_per_range", "autosummarize" /* BRIN */ + ); + else if (Matches("ALTER", "INDEX", MatchAny, "SET", "(")) + COMPLETE_WITH("fillfactor =", "recheck_on_update =", + "vacuum_cleanup_index_scale_factor =", /* BTREE */ + "fastupdate =", "gin_pending_list_limit =", /* GIN */ + "buffering =", /* GiST */ + "pages_per_range =", "autosummarize =" /* BRIN */ + ); /* ALTER LANGUAGE */ - else if (Matches3("ALTER", "LANGUAGE", MatchAny)) - COMPLETE_WITH_LIST2("OWNER_TO", "RENAME TO"); + else if (Matches("ALTER", "LANGUAGE", MatchAny)) + COMPLETE_WITH("OWNER_TO", "RENAME TO"); /* ALTER LARGE OBJECT */ - else if (Matches4("ALTER", "LARGE", "OBJECT", MatchAny)) - COMPLETE_WITH_CONST("OWNER TO"); + else if (Matches("ALTER", "LARGE", "OBJECT", MatchAny)) + COMPLETE_WITH("OWNER TO"); /* ALTER MATERIALIZED VIEW */ - else if (Matches3("ALTER", "MATERIALIZED", "VIEW")) + else if (Matches("ALTER", "MATERIALIZED", "VIEW")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_matviews, "UNION SELECT 'ALL IN TABLESPACE'"); /* ALTER USER,ROLE */ - else if (Matches3("ALTER", "USER|ROLE", MatchAny) && - !TailMatches2("USER", "MAPPING")) - { - static const char *const list_ALTERUSER[] = - {"BYPASSRLS", "CONNECTION LIMIT", "CREATEDB", "CREATEROLE", - "ENCRYPTED PASSWORD", "INHERIT", "LOGIN", "NOBYPASSRLS", - "NOCREATEDB", "NOCREATEROLE", "NOINHERIT", - "NOLOGIN", "NOREPLICATION", "NOSUPERUSER", "PASSWORD", "RENAME TO", - "REPLICATION", "RESET", "SET", "SUPERUSER", - "VALID UNTIL", "WITH", NULL}; - - COMPLETE_WITH_LIST(list_ALTERUSER); - } + else if (Matches("ALTER", "USER|ROLE", MatchAny) && + !TailMatches("USER", "MAPPING")) + COMPLETE_WITH("BYPASSRLS", "CONNECTION LIMIT", "CREATEDB", "CREATEROLE", + "ENCRYPTED PASSWORD", "INHERIT", "LOGIN", "NOBYPASSRLS", + "NOCREATEDB", "NOCREATEROLE", "NOINHERIT", + "NOLOGIN", "NOREPLICATION", "NOSUPERUSER", "PASSWORD", + "RENAME TO", "REPLICATION", "RESET", "SET", "SUPERUSER", + "VALID UNTIL", "WITH"); /* ALTER USER,ROLE WITH */ - else if (Matches4("ALTER", "USER|ROLE", MatchAny, "WITH")) - { + else if (Matches("ALTER", "USER|ROLE", MatchAny, "WITH")) /* Similar to the above, but don't complete "WITH" again. */ - static const char *const list_ALTERUSER_WITH[] = - {"BYPASSRLS", "CONNECTION LIMIT", "CREATEDB", "CREATEROLE", - "ENCRYPTED PASSWORD", "INHERIT", "LOGIN", "NOBYPASSRLS", - "NOCREATEDB", "NOCREATEROLE", "NOINHERIT", - "NOLOGIN", "NOREPLICATION", "NOSUPERUSER", "PASSWORD", "RENAME TO", - "REPLICATION", "RESET", "SET", "SUPERUSER", - "VALID UNTIL", NULL}; - - COMPLETE_WITH_LIST(list_ALTERUSER_WITH); - } + COMPLETE_WITH("BYPASSRLS", "CONNECTION LIMIT", "CREATEDB", "CREATEROLE", + "ENCRYPTED PASSWORD", "INHERIT", "LOGIN", "NOBYPASSRLS", + "NOCREATEDB", "NOCREATEROLE", "NOINHERIT", + "NOLOGIN", "NOREPLICATION", "NOSUPERUSER", "PASSWORD", + "RENAME TO", "REPLICATION", "RESET", "SET", "SUPERUSER", + "VALID UNTIL"); /* ALTER DEFAULT PRIVILEGES */ - else if (Matches3("ALTER", "DEFAULT", "PRIVILEGES")) - COMPLETE_WITH_LIST2("FOR ROLE", "IN SCHEMA"); + else if (Matches("ALTER", "DEFAULT", "PRIVILEGES")) + COMPLETE_WITH("FOR ROLE", "IN SCHEMA"); /* ALTER DEFAULT PRIVILEGES FOR */ - else if (Matches4("ALTER", "DEFAULT", "PRIVILEGES", "FOR")) - COMPLETE_WITH_CONST("ROLE"); + else if (Matches("ALTER", "DEFAULT", "PRIVILEGES", "FOR")) + COMPLETE_WITH("ROLE"); /* ALTER DEFAULT PRIVILEGES IN */ - else if (Matches4("ALTER", "DEFAULT", "PRIVILEGES", "IN")) - COMPLETE_WITH_CONST("SCHEMA"); + else if (Matches("ALTER", "DEFAULT", "PRIVILEGES", "IN")) + COMPLETE_WITH("SCHEMA"); /* ALTER DEFAULT PRIVILEGES FOR ROLE|USER ... */ - else if (Matches6("ALTER", "DEFAULT", "PRIVILEGES", "FOR", "ROLE|USER", - MatchAny)) - COMPLETE_WITH_LIST3("GRANT", "REVOKE", "IN SCHEMA"); + else if (Matches("ALTER", "DEFAULT", "PRIVILEGES", "FOR", "ROLE|USER", + MatchAny)) + COMPLETE_WITH("GRANT", "REVOKE", "IN SCHEMA"); /* ALTER DEFAULT PRIVILEGES IN SCHEMA ... */ - else if (Matches6("ALTER", "DEFAULT", "PRIVILEGES", "IN", "SCHEMA", - MatchAny)) - COMPLETE_WITH_LIST3("GRANT", "REVOKE", "FOR ROLE"); + else if (Matches("ALTER", "DEFAULT", "PRIVILEGES", "IN", "SCHEMA", + MatchAny)) + COMPLETE_WITH("GRANT", "REVOKE", "FOR ROLE"); /* ALTER DEFAULT PRIVILEGES IN SCHEMA ... FOR */ - else if (Matches7("ALTER", "DEFAULT", "PRIVILEGES", "IN", "SCHEMA", - MatchAny, "FOR")) - COMPLETE_WITH_CONST("ROLE"); + else if (Matches("ALTER", "DEFAULT", "PRIVILEGES", "IN", "SCHEMA", + MatchAny, "FOR")) + COMPLETE_WITH("ROLE"); /* ALTER DEFAULT PRIVILEGES FOR ROLE|USER ... IN SCHEMA ... */ /* ALTER DEFAULT PRIVILEGES IN SCHEMA ... FOR ROLE|USER ... */ - else if (Matches9("ALTER", "DEFAULT", "PRIVILEGES", "FOR", "ROLE|USER", - MatchAny, "IN", "SCHEMA", MatchAny) || - Matches9("ALTER", "DEFAULT", "PRIVILEGES", "IN", "SCHEMA", - MatchAny, "FOR", "ROLE|USER", MatchAny)) - COMPLETE_WITH_LIST2("GRANT", "REVOKE"); + else if (Matches("ALTER", "DEFAULT", "PRIVILEGES", "FOR", "ROLE|USER", + MatchAny, "IN", "SCHEMA", MatchAny) || + Matches("ALTER", "DEFAULT", "PRIVILEGES", "IN", "SCHEMA", + MatchAny, "FOR", "ROLE|USER", MatchAny)) + COMPLETE_WITH("GRANT", "REVOKE"); /* ALTER DOMAIN */ - else if (Matches3("ALTER", "DOMAIN", MatchAny)) - COMPLETE_WITH_LIST6("ADD", "DROP", "OWNER TO", "RENAME", "SET", - "VALIDATE CONSTRAINT"); + else if (Matches("ALTER", "DOMAIN", MatchAny)) + COMPLETE_WITH("ADD", "DROP", "OWNER TO", "RENAME", "SET", + "VALIDATE CONSTRAINT"); /* ALTER DOMAIN DROP */ - else if (Matches4("ALTER", "DOMAIN", MatchAny, "DROP")) - COMPLETE_WITH_LIST3("CONSTRAINT", "DEFAULT", "NOT NULL"); + else if (Matches("ALTER", "DOMAIN", MatchAny, "DROP")) + COMPLETE_WITH("CONSTRAINT", "DEFAULT", "NOT NULL"); /* ALTER DOMAIN DROP|RENAME|VALIDATE CONSTRAINT */ - else if (Matches5("ALTER", "DOMAIN", MatchAny, "DROP|RENAME|VALIDATE", "CONSTRAINT")) + else if (Matches("ALTER", "DOMAIN", MatchAny, "DROP|RENAME|VALIDATE", "CONSTRAINT")) { completion_info_charp = prev3_wd; COMPLETE_WITH_QUERY(Query_for_constraint_of_type); } /* ALTER DOMAIN RENAME */ - else if (Matches4("ALTER", "DOMAIN", MatchAny, "RENAME")) - COMPLETE_WITH_LIST2("CONSTRAINT", "TO"); + else if (Matches("ALTER", "DOMAIN", MatchAny, "RENAME")) + COMPLETE_WITH("CONSTRAINT", "TO"); /* ALTER DOMAIN RENAME CONSTRAINT */ - else if (Matches6("ALTER", "DOMAIN", MatchAny, "RENAME", "CONSTRAINT", MatchAny)) - COMPLETE_WITH_CONST("TO"); + else if (Matches("ALTER", "DOMAIN", MatchAny, "RENAME", "CONSTRAINT", MatchAny)) + COMPLETE_WITH("TO"); /* ALTER DOMAIN SET */ - else if (Matches4("ALTER", "DOMAIN", MatchAny, "SET")) - COMPLETE_WITH_LIST3("DEFAULT", "NOT NULL", "SCHEMA"); + else if (Matches("ALTER", "DOMAIN", MatchAny, "SET")) + COMPLETE_WITH("DEFAULT", "NOT NULL", "SCHEMA"); /* ALTER SEQUENCE */ - else if (Matches3("ALTER", "SEQUENCE", MatchAny)) - { - static const char *const list_ALTERSEQUENCE[] = - {"INCREMENT", "MINVALUE", "MAXVALUE", "RESTART", "NO", "CACHE", "CYCLE", - "SET SCHEMA", "OWNED BY", "OWNER TO", "RENAME TO", NULL}; - - COMPLETE_WITH_LIST(list_ALTERSEQUENCE); - } + else if (Matches("ALTER", "SEQUENCE", MatchAny)) + COMPLETE_WITH("INCREMENT", "MINVALUE", "MAXVALUE", "RESTART", "NO", + "CACHE", "CYCLE", "SET SCHEMA", "OWNED BY", "OWNER TO", + "RENAME TO"); /* ALTER SEQUENCE NO */ - else if (Matches4("ALTER", "SEQUENCE", MatchAny, "NO")) - COMPLETE_WITH_LIST3("MINVALUE", "MAXVALUE", "CYCLE"); + else if (Matches("ALTER", "SEQUENCE", MatchAny, "NO")) + COMPLETE_WITH("MINVALUE", "MAXVALUE", "CYCLE"); /* ALTER SERVER */ - else if (Matches3("ALTER", "SERVER", MatchAny)) - COMPLETE_WITH_LIST4("VERSION", "OPTIONS", "OWNER TO", "RENAME TO"); + else if (Matches("ALTER", "SERVER", MatchAny)) + COMPLETE_WITH("VERSION", "OPTIONS", "OWNER TO", "RENAME TO"); /* ALTER SERVER VERSION */ - else if (Matches5("ALTER", "SERVER", MatchAny, "VERSION", MatchAny)) - COMPLETE_WITH_CONST("OPTIONS"); + else if (Matches("ALTER", "SERVER", MatchAny, "VERSION", MatchAny)) + COMPLETE_WITH("OPTIONS"); /* ALTER SYSTEM SET, RESET, RESET ALL */ - else if (Matches2("ALTER", "SYSTEM")) - COMPLETE_WITH_LIST2("SET", "RESET"); - else if (Matches3("ALTER", "SYSTEM", "SET|RESET")) + else if (Matches("ALTER", "SYSTEM")) + COMPLETE_WITH("SET", "RESET"); + else if (Matches("ALTER", "SYSTEM", "SET|RESET")) COMPLETE_WITH_QUERY(Query_for_list_of_alter_system_set_vars); - else if (Matches4("ALTER", "SYSTEM", "SET", MatchAny)) - COMPLETE_WITH_CONST("TO"); + else if (Matches("ALTER", "SYSTEM", "SET", MatchAny)) + COMPLETE_WITH("TO"); /* ALTER VIEW */ - else if (Matches3("ALTER", "VIEW", MatchAny)) - COMPLETE_WITH_LIST4("ALTER COLUMN", "OWNER TO", "RENAME TO", - "SET SCHEMA"); + else if (Matches("ALTER", "VIEW", MatchAny)) + COMPLETE_WITH("ALTER COLUMN", "OWNER TO", "RENAME TO", + "SET SCHEMA"); /* ALTER MATERIALIZED VIEW */ - else if (Matches4("ALTER", "MATERIALIZED", "VIEW", MatchAny)) - COMPLETE_WITH_LIST4("ALTER COLUMN", "OWNER TO", "RENAME TO", - "SET SCHEMA"); + else if (Matches("ALTER", "MATERIALIZED", "VIEW", MatchAny)) + COMPLETE_WITH("ALTER COLUMN", "OWNER TO", "RENAME TO", + "SET SCHEMA"); /* ALTER POLICY */ - else if (Matches2("ALTER", "POLICY")) + else if (Matches("ALTER", "POLICY")) COMPLETE_WITH_QUERY(Query_for_list_of_policies); /* ALTER POLICY ON */ - else if (Matches3("ALTER", "POLICY", MatchAny)) - COMPLETE_WITH_CONST("ON"); + else if (Matches("ALTER", "POLICY", MatchAny)) + COMPLETE_WITH("ON"); /* ALTER POLICY ON
*/ - else if (Matches4("ALTER", "POLICY", MatchAny, "ON")) + else if (Matches("ALTER", "POLICY", MatchAny, "ON")) { completion_info_charp = prev2_wd; COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_policy); } /* ALTER POLICY ON
- show options */ - else if (Matches5("ALTER", "POLICY", MatchAny, "ON", MatchAny)) - COMPLETE_WITH_LIST4("RENAME TO", "TO", "USING (", "WITH CHECK ("); + else if (Matches("ALTER", "POLICY", MatchAny, "ON", MatchAny)) + COMPLETE_WITH("RENAME TO", "TO", "USING (", "WITH CHECK ("); /* ALTER POLICY ON
TO */ - else if (Matches6("ALTER", "POLICY", MatchAny, "ON", MatchAny, "TO")) + else if (Matches("ALTER", "POLICY", MatchAny, "ON", MatchAny, "TO")) COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles); /* ALTER POLICY ON
USING ( */ - else if (Matches6("ALTER", "POLICY", MatchAny, "ON", MatchAny, "USING")) - COMPLETE_WITH_CONST("("); + else if (Matches("ALTER", "POLICY", MatchAny, "ON", MatchAny, "USING")) + COMPLETE_WITH("("); /* ALTER POLICY ON
WITH CHECK ( */ - else if (Matches7("ALTER", "POLICY", MatchAny, "ON", MatchAny, "WITH", "CHECK")) - COMPLETE_WITH_CONST("("); + else if (Matches("ALTER", "POLICY", MatchAny, "ON", MatchAny, "WITH", "CHECK")) + COMPLETE_WITH("("); /* ALTER RULE , add ON */ - else if (Matches3("ALTER", "RULE", MatchAny)) - COMPLETE_WITH_CONST("ON"); + else if (Matches("ALTER", "RULE", MatchAny)) + COMPLETE_WITH("ON"); /* If we have ALTER RULE ON, then add the correct tablename */ - else if (Matches4("ALTER", "RULE", MatchAny, "ON")) + else if (Matches("ALTER", "RULE", MatchAny, "ON")) { completion_info_charp = prev2_wd; COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_rule); } /* ALTER RULE ON */ - else if (Matches5("ALTER", "RULE", MatchAny, "ON", MatchAny)) - COMPLETE_WITH_CONST("RENAME TO"); + else if (Matches("ALTER", "RULE", MatchAny, "ON", MatchAny)) + COMPLETE_WITH("RENAME TO"); /* ALTER STATISTICS */ - else if (Matches3("ALTER", "STATISTICS", MatchAny)) - COMPLETE_WITH_LIST3("OWNER TO", "RENAME TO", "SET SCHEMA"); + else if (Matches("ALTER", "STATISTICS", MatchAny)) + COMPLETE_WITH("OWNER TO", "RENAME TO", "SET SCHEMA"); /* ALTER TRIGGER , add ON */ - else if (Matches3("ALTER", "TRIGGER", MatchAny)) - COMPLETE_WITH_CONST("ON"); + else if (Matches("ALTER", "TRIGGER", MatchAny)) + COMPLETE_WITH("ON"); - else if (Matches4("ALTER", "TRIGGER", MatchAny, MatchAny)) + else if (Matches("ALTER", "TRIGGER", MatchAny, MatchAny)) { completion_info_charp = prev2_wd; COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_trigger); @@ -1844,155 +1750,151 @@ psql_completion(const char *text, int start, int end) /* * If we have ALTER TRIGGER ON, then add the correct tablename */ - else if (Matches4("ALTER", "TRIGGER", MatchAny, "ON")) + else if (Matches("ALTER", "TRIGGER", MatchAny, "ON")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL); /* ALTER TRIGGER ON */ - else if (Matches5("ALTER", "TRIGGER", MatchAny, "ON", MatchAny)) - COMPLETE_WITH_CONST("RENAME TO"); + else if (Matches("ALTER", "TRIGGER", MatchAny, "ON", MatchAny)) + COMPLETE_WITH("RENAME TO"); /* * If we detect ALTER TABLE , suggest sub commands */ - else if (Matches3("ALTER", "TABLE", MatchAny)) - { - static const char *const list_ALTER2[] = - {"ADD", "ALTER", "CLUSTER ON", "DISABLE", "DROP", "ENABLE", "INHERIT", - "NO INHERIT", "RENAME", "RESET", "OWNER TO", "SET", - "VALIDATE CONSTRAINT", "REPLICA IDENTITY", "ATTACH PARTITION", - "DETACH PARTITION", NULL}; - - COMPLETE_WITH_LIST(list_ALTER2); - } + else if (Matches("ALTER", "TABLE", MatchAny)) + COMPLETE_WITH("ADD", "ALTER", "CLUSTER ON", "DISABLE", "DROP", + "ENABLE", "INHERIT", "NO INHERIT", "RENAME", "RESET", + "OWNER TO", "SET", "VALIDATE CONSTRAINT", + "REPLICA IDENTITY", "ATTACH PARTITION", + "DETACH PARTITION"); /* ALTER TABLE xxx ENABLE */ - else if (Matches4("ALTER", "TABLE", MatchAny, "ENABLE")) - COMPLETE_WITH_LIST5("ALWAYS", "REPLICA", "ROW LEVEL SECURITY", "RULE", - "TRIGGER"); - else if (Matches5("ALTER", "TABLE", MatchAny, "ENABLE", "REPLICA|ALWAYS")) - COMPLETE_WITH_LIST2("RULE", "TRIGGER"); - else if (Matches5("ALTER", "TABLE", MatchAny, "ENABLE", "RULE")) + else if (Matches("ALTER", "TABLE", MatchAny, "ENABLE")) + COMPLETE_WITH("ALWAYS", "REPLICA", "ROW LEVEL SECURITY", "RULE", + "TRIGGER"); + else if (Matches("ALTER", "TABLE", MatchAny, "ENABLE", "REPLICA|ALWAYS")) + COMPLETE_WITH("RULE", "TRIGGER"); + else if (Matches("ALTER", "TABLE", MatchAny, "ENABLE", "RULE")) { completion_info_charp = prev3_wd; COMPLETE_WITH_QUERY(Query_for_rule_of_table); } - else if (Matches6("ALTER", "TABLE", MatchAny, "ENABLE", MatchAny, "RULE")) + else if (Matches("ALTER", "TABLE", MatchAny, "ENABLE", MatchAny, "RULE")) { completion_info_charp = prev4_wd; COMPLETE_WITH_QUERY(Query_for_rule_of_table); } - else if (Matches5("ALTER", "TABLE", MatchAny, "ENABLE", "TRIGGER")) + else if (Matches("ALTER", "TABLE", MatchAny, "ENABLE", "TRIGGER")) { completion_info_charp = prev3_wd; COMPLETE_WITH_QUERY(Query_for_trigger_of_table); } - else if (Matches6("ALTER", "TABLE", MatchAny, "ENABLE", MatchAny, "TRIGGER")) + else if (Matches("ALTER", "TABLE", MatchAny, "ENABLE", MatchAny, "TRIGGER")) { completion_info_charp = prev4_wd; COMPLETE_WITH_QUERY(Query_for_trigger_of_table); } /* ALTER TABLE xxx INHERIT */ - else if (Matches4("ALTER", "TABLE", MatchAny, "INHERIT")) + else if (Matches("ALTER", "TABLE", MatchAny, "INHERIT")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, ""); /* ALTER TABLE xxx NO INHERIT */ - else if (Matches5("ALTER", "TABLE", MatchAny, "NO", "INHERIT")) + else if (Matches("ALTER", "TABLE", MatchAny, "NO", "INHERIT")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, ""); /* ALTER TABLE xxx DISABLE */ - else if (Matches4("ALTER", "TABLE", MatchAny, "DISABLE")) - COMPLETE_WITH_LIST3("ROW LEVEL SECURITY", "RULE", "TRIGGER"); - else if (Matches5("ALTER", "TABLE", MatchAny, "DISABLE", "RULE")) + else if (Matches("ALTER", "TABLE", MatchAny, "DISABLE")) + COMPLETE_WITH("ROW LEVEL SECURITY", "RULE", "TRIGGER"); + else if (Matches("ALTER", "TABLE", MatchAny, "DISABLE", "RULE")) { completion_info_charp = prev3_wd; COMPLETE_WITH_QUERY(Query_for_rule_of_table); } - else if (Matches5("ALTER", "TABLE", MatchAny, "DISABLE", "TRIGGER")) + else if (Matches("ALTER", "TABLE", MatchAny, "DISABLE", "TRIGGER")) { completion_info_charp = prev3_wd; COMPLETE_WITH_QUERY(Query_for_trigger_of_table); } /* ALTER TABLE xxx ALTER */ - else if (Matches4("ALTER", "TABLE", MatchAny, "ALTER")) + else if (Matches("ALTER", "TABLE", MatchAny, "ALTER")) COMPLETE_WITH_ATTR(prev2_wd, " UNION SELECT 'COLUMN' UNION SELECT 'CONSTRAINT'"); /* ALTER TABLE xxx RENAME */ - else if (Matches4("ALTER", "TABLE", MatchAny, "RENAME")) + else if (Matches("ALTER", "TABLE", MatchAny, "RENAME")) COMPLETE_WITH_ATTR(prev2_wd, " UNION SELECT 'COLUMN' UNION SELECT 'CONSTRAINT' UNION SELECT 'TO'"); - else if (Matches5("ALTER", "TABLE", MatchAny, "ALTER|RENAME", "COLUMN")) + else if (Matches("ALTER", "TABLE", MatchAny, "ALTER|RENAME", "COLUMN")) COMPLETE_WITH_ATTR(prev3_wd, ""); /* ALTER TABLE xxx RENAME yyy */ - else if (Matches5("ALTER", "TABLE", MatchAny, "RENAME", MatchAnyExcept("CONSTRAINT|TO"))) - COMPLETE_WITH_CONST("TO"); + else if (Matches("ALTER", "TABLE", MatchAny, "RENAME", MatchAnyExcept("CONSTRAINT|TO"))) + COMPLETE_WITH("TO"); /* ALTER TABLE xxx RENAME COLUMN/CONSTRAINT yyy */ - else if (Matches6("ALTER", "TABLE", MatchAny, "RENAME", "COLUMN|CONSTRAINT", MatchAnyExcept("TO"))) - COMPLETE_WITH_CONST("TO"); + else if (Matches("ALTER", "TABLE", MatchAny, "RENAME", "COLUMN|CONSTRAINT", MatchAnyExcept("TO"))) + COMPLETE_WITH("TO"); /* If we have ALTER TABLE DROP, provide COLUMN or CONSTRAINT */ - else if (Matches4("ALTER", "TABLE", MatchAny, "DROP")) - COMPLETE_WITH_LIST2("COLUMN", "CONSTRAINT"); + else if (Matches("ALTER", "TABLE", MatchAny, "DROP")) + COMPLETE_WITH("COLUMN", "CONSTRAINT"); /* If we have ALTER TABLE DROP COLUMN, provide list of columns */ - else if (Matches5("ALTER", "TABLE", MatchAny, "DROP", "COLUMN")) + else if (Matches("ALTER", "TABLE", MatchAny, "DROP", "COLUMN")) COMPLETE_WITH_ATTR(prev3_wd, ""); /* * If we have ALTER TABLE ALTER|DROP|RENAME|VALIDATE CONSTRAINT, * provide list of constraints */ - else if (Matches5("ALTER", "TABLE", MatchAny, "ALTER|DROP|RENAME|VALIDATE", "CONSTRAINT")) + else if (Matches("ALTER", "TABLE", MatchAny, "ALTER|DROP|RENAME|VALIDATE", "CONSTRAINT")) { completion_info_charp = prev3_wd; COMPLETE_WITH_QUERY(Query_for_constraint_of_table); } /* ALTER TABLE ALTER [COLUMN] */ - else if (Matches6("ALTER", "TABLE", MatchAny, "ALTER", "COLUMN", MatchAny) || - Matches5("ALTER", "TABLE", MatchAny, "ALTER", MatchAny)) - COMPLETE_WITH_LIST6("TYPE", "SET", "RESET", "RESTART", "ADD", "DROP"); + else if (Matches("ALTER", "TABLE", MatchAny, "ALTER", "COLUMN", MatchAny) || + Matches("ALTER", "TABLE", MatchAny, "ALTER", MatchAny)) + COMPLETE_WITH("TYPE", "SET", "RESET", "RESTART", "ADD", "DROP"); /* ALTER TABLE ALTER [COLUMN] SET */ - else if (Matches7("ALTER", "TABLE", MatchAny, "ALTER", "COLUMN", MatchAny, "SET") || - Matches6("ALTER", "TABLE", MatchAny, "ALTER", MatchAny, "SET")) - COMPLETE_WITH_LIST5("(", "DEFAULT", "NOT NULL", "STATISTICS", "STORAGE"); + else if (Matches("ALTER", "TABLE", MatchAny, "ALTER", "COLUMN", MatchAny, "SET") || + Matches("ALTER", "TABLE", MatchAny, "ALTER", MatchAny, "SET")) + COMPLETE_WITH("(", "DEFAULT", "NOT NULL", "STATISTICS", "STORAGE"); /* ALTER TABLE ALTER [COLUMN] SET ( */ - else if (Matches8("ALTER", "TABLE", MatchAny, "ALTER", "COLUMN", MatchAny, "SET", "(") || - Matches7("ALTER", "TABLE", MatchAny, "ALTER", MatchAny, "SET", "(")) - COMPLETE_WITH_LIST2("n_distinct", "n_distinct_inherited"); + else if (Matches("ALTER", "TABLE", MatchAny, "ALTER", "COLUMN", MatchAny, "SET", "(") || + Matches("ALTER", "TABLE", MatchAny, "ALTER", MatchAny, "SET", "(")) + COMPLETE_WITH("n_distinct", "n_distinct_inherited"); /* ALTER TABLE ALTER [COLUMN] SET STORAGE */ - else if (Matches8("ALTER", "TABLE", MatchAny, "ALTER", "COLUMN", MatchAny, "SET", "STORAGE") || - Matches7("ALTER", "TABLE", MatchAny, "ALTER", MatchAny, "SET", "STORAGE")) - COMPLETE_WITH_LIST4("PLAIN", "EXTERNAL", "EXTENDED", "MAIN"); + else if (Matches("ALTER", "TABLE", MatchAny, "ALTER", "COLUMN", MatchAny, "SET", "STORAGE") || + Matches("ALTER", "TABLE", MatchAny, "ALTER", MatchAny, "SET", "STORAGE")) + COMPLETE_WITH("PLAIN", "EXTERNAL", "EXTENDED", "MAIN"); /* ALTER TABLE ALTER [COLUMN] DROP */ - else if (Matches7("ALTER", "TABLE", MatchAny, "ALTER", "COLUMN", MatchAny, "DROP") || - Matches6("ALTER", "TABLE", MatchAny, "ALTER", MatchAny, "DROP")) - COMPLETE_WITH_LIST3("DEFAULT", "IDENTITY", "NOT NULL"); - else if (Matches4("ALTER", "TABLE", MatchAny, "CLUSTER")) - COMPLETE_WITH_CONST("ON"); - else if (Matches5("ALTER", "TABLE", MatchAny, "CLUSTER", "ON")) + else if (Matches("ALTER", "TABLE", MatchAny, "ALTER", "COLUMN", MatchAny, "DROP") || + Matches("ALTER", "TABLE", MatchAny, "ALTER", MatchAny, "DROP")) + COMPLETE_WITH("DEFAULT", "IDENTITY", "NOT NULL"); + else if (Matches("ALTER", "TABLE", MatchAny, "CLUSTER")) + COMPLETE_WITH("ON"); + else if (Matches("ALTER", "TABLE", MatchAny, "CLUSTER", "ON")) { completion_info_charp = prev3_wd; COMPLETE_WITH_QUERY(Query_for_index_of_table); } /* If we have ALTER TABLE SET, provide list of attributes and '(' */ - else if (Matches4("ALTER", "TABLE", MatchAny, "SET")) - COMPLETE_WITH_LIST7("(", "LOGGED", "SCHEMA", "TABLESPACE", "UNLOGGED", - "WITH", "WITHOUT"); + else if (Matches("ALTER", "TABLE", MatchAny, "SET")) + COMPLETE_WITH("(", "LOGGED", "SCHEMA", "TABLESPACE", "UNLOGGED", + "WITH", "WITHOUT"); /* * If we have ALTER TABLE SET TABLESPACE provide a list of * tablespaces */ - else if (Matches5("ALTER", "TABLE", MatchAny, "SET", "TABLESPACE")) + else if (Matches("ALTER", "TABLE", MatchAny, "SET", "TABLESPACE")) COMPLETE_WITH_QUERY(Query_for_list_of_tablespaces); /* If we have ALTER TABLE SET WITH provide OIDS */ - else if (Matches5("ALTER", "TABLE", MatchAny, "SET", "WITH")) - COMPLETE_WITH_CONST("OIDS"); + else if (Matches("ALTER", "TABLE", MatchAny, "SET", "WITH")) + COMPLETE_WITH("OIDS"); /* If we have ALTER TABLE SET WITHOUT provide CLUSTER or OIDS */ - else if (Matches5("ALTER", "TABLE", MatchAny, "SET", "WITHOUT")) - COMPLETE_WITH_LIST2("CLUSTER", "OIDS"); + else if (Matches("ALTER", "TABLE", MatchAny, "SET", "WITHOUT")) + COMPLETE_WITH("CLUSTER", "OIDS"); /* ALTER TABLE RESET */ - else if (Matches4("ALTER", "TABLE", MatchAny, "RESET")) - COMPLETE_WITH_CONST("("); + else if (Matches("ALTER", "TABLE", MatchAny, "RESET")) + COMPLETE_WITH("("); /* ALTER TABLE SET|RESET ( */ - else if (Matches5("ALTER", "TABLE", MatchAny, "SET|RESET", "(")) + else if (Matches("ALTER", "TABLE", MatchAny, "SET|RESET", "(")) { static const char *const list_TABLEOPTIONS[] = { @@ -2012,6 +1914,7 @@ psql_completion(const char *text, int start, int end) "fillfactor", "parallel_workers", "log_autovacuum_min_duration", + "toast_tuple_target", "toast.autovacuum_enabled", "toast.autovacuum_freeze_max_age", "toast.autovacuum_freeze_min_age", @@ -2030,177 +1933,197 @@ psql_completion(const char *text, int start, int end) COMPLETE_WITH_LIST(list_TABLEOPTIONS); } - else if (Matches7("ALTER", "TABLE", MatchAny, "REPLICA", "IDENTITY", "USING", "INDEX")) + else if (Matches("ALTER", "TABLE", MatchAny, "REPLICA", "IDENTITY", "USING", "INDEX")) { completion_info_charp = prev5_wd; COMPLETE_WITH_QUERY(Query_for_index_of_table); } - else if (Matches6("ALTER", "TABLE", MatchAny, "REPLICA", "IDENTITY", "USING")) - COMPLETE_WITH_CONST("INDEX"); - else if (Matches5("ALTER", "TABLE", MatchAny, "REPLICA", "IDENTITY")) - COMPLETE_WITH_LIST4("FULL", "NOTHING", "DEFAULT", "USING"); - else if (Matches4("ALTER", "TABLE", MatchAny, "REPLICA")) - COMPLETE_WITH_CONST("IDENTITY"); + else if (Matches("ALTER", "TABLE", MatchAny, "REPLICA", "IDENTITY", "USING")) + COMPLETE_WITH("INDEX"); + else if (Matches("ALTER", "TABLE", MatchAny, "REPLICA", "IDENTITY")) + COMPLETE_WITH("FULL", "NOTHING", "DEFAULT", "USING"); + else if (Matches("ALTER", "TABLE", MatchAny, "REPLICA")) + COMPLETE_WITH("IDENTITY"); /* * If we have ALTER TABLE ATTACH PARTITION, provide a list of * tables. */ - else if (Matches5("ALTER", "TABLE", MatchAny, "ATTACH", "PARTITION")) + else if (Matches("ALTER", "TABLE", MatchAny, "ATTACH", "PARTITION")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, ""); /* Limited completion support for partition bound specification */ - else if (TailMatches3("ATTACH", "PARTITION", MatchAny)) - COMPLETE_WITH_CONST("FOR VALUES"); - else if (TailMatches2("FOR", "VALUES")) - COMPLETE_WITH_LIST2("FROM (", "IN ("); + else if (TailMatches("ATTACH", "PARTITION", MatchAny)) + COMPLETE_WITH("FOR VALUES", "DEFAULT"); + else if (TailMatches("FOR", "VALUES")) + COMPLETE_WITH("FROM (", "IN (", "WITH ("); /* * If we have ALTER TABLE DETACH PARTITION, provide a list of * partitions of . */ - else if (Matches5("ALTER", "TABLE", MatchAny, "DETACH", "PARTITION")) + else if (Matches("ALTER", "TABLE", MatchAny, "DETACH", "PARTITION")) { completion_info_charp = prev3_wd; COMPLETE_WITH_QUERY(Query_for_partition_of_table); } /* ALTER TABLESPACE with RENAME TO, OWNER TO, SET, RESET */ - else if (Matches3("ALTER", "TABLESPACE", MatchAny)) - COMPLETE_WITH_LIST4("RENAME TO", "OWNER TO", "SET", "RESET"); + else if (Matches("ALTER", "TABLESPACE", MatchAny)) + COMPLETE_WITH("RENAME TO", "OWNER TO", "SET", "RESET"); /* ALTER TABLESPACE SET|RESET */ - else if (Matches4("ALTER", "TABLESPACE", MatchAny, "SET|RESET")) - COMPLETE_WITH_CONST("("); + else if (Matches("ALTER", "TABLESPACE", MatchAny, "SET|RESET")) + COMPLETE_WITH("("); /* ALTER TABLESPACE SET|RESET ( */ - else if (Matches5("ALTER", "TABLESPACE", MatchAny, "SET|RESET", "(")) - COMPLETE_WITH_LIST3("seq_page_cost", "random_page_cost", - "effective_io_concurrency"); + else if (Matches("ALTER", "TABLESPACE", MatchAny, "SET|RESET", "(")) + COMPLETE_WITH("seq_page_cost", "random_page_cost", + "effective_io_concurrency"); /* ALTER TEXT SEARCH */ - else if (Matches3("ALTER", "TEXT", "SEARCH")) - COMPLETE_WITH_LIST4("CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE"); - else if (Matches5("ALTER", "TEXT", "SEARCH", "TEMPLATE|PARSER", MatchAny)) - COMPLETE_WITH_LIST2("RENAME TO", "SET SCHEMA"); - else if (Matches5("ALTER", "TEXT", "SEARCH", "DICTIONARY", MatchAny)) - COMPLETE_WITH_LIST3("OWNER TO", "RENAME TO", "SET SCHEMA"); - else if (Matches5("ALTER", "TEXT", "SEARCH", "CONFIGURATION", MatchAny)) - COMPLETE_WITH_LIST6("ADD MAPPING FOR", "ALTER MAPPING", - "DROP MAPPING FOR", - "OWNER TO", "RENAME TO", "SET SCHEMA"); + else if (Matches("ALTER", "TEXT", "SEARCH")) + COMPLETE_WITH("CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE"); + else if (Matches("ALTER", "TEXT", "SEARCH", "TEMPLATE|PARSER", MatchAny)) + COMPLETE_WITH("RENAME TO", "SET SCHEMA"); + else if (Matches("ALTER", "TEXT", "SEARCH", "DICTIONARY", MatchAny)) + COMPLETE_WITH("OWNER TO", "RENAME TO", "SET SCHEMA"); + else if (Matches("ALTER", "TEXT", "SEARCH", "CONFIGURATION", MatchAny)) + COMPLETE_WITH("ADD MAPPING FOR", "ALTER MAPPING", + "DROP MAPPING FOR", + "OWNER TO", "RENAME TO", "SET SCHEMA"); /* complete ALTER TYPE with actions */ - else if (Matches3("ALTER", "TYPE", MatchAny)) - COMPLETE_WITH_LIST7("ADD ATTRIBUTE", "ADD VALUE", "ALTER ATTRIBUTE", - "DROP ATTRIBUTE", - "OWNER TO", "RENAME", "SET SCHEMA"); + else if (Matches("ALTER", "TYPE", MatchAny)) + COMPLETE_WITH("ADD ATTRIBUTE", "ADD VALUE", "ALTER ATTRIBUTE", + "DROP ATTRIBUTE", + "OWNER TO", "RENAME", "SET SCHEMA"); /* complete ALTER TYPE ADD with actions */ - else if (Matches4("ALTER", "TYPE", MatchAny, "ADD")) - COMPLETE_WITH_LIST2("ATTRIBUTE", "VALUE"); + else if (Matches("ALTER", "TYPE", MatchAny, "ADD")) + COMPLETE_WITH("ATTRIBUTE", "VALUE"); /* ALTER TYPE RENAME */ - else if (Matches4("ALTER", "TYPE", MatchAny, "RENAME")) - COMPLETE_WITH_LIST3("ATTRIBUTE", "TO", "VALUE"); + else if (Matches("ALTER", "TYPE", MatchAny, "RENAME")) + COMPLETE_WITH("ATTRIBUTE", "TO", "VALUE"); /* ALTER TYPE xxx RENAME (ATTRIBUTE|VALUE) yyy */ - else if (Matches6("ALTER", "TYPE", MatchAny, "RENAME", "ATTRIBUTE|VALUE", MatchAny)) - COMPLETE_WITH_CONST("TO"); + else if (Matches("ALTER", "TYPE", MatchAny, "RENAME", "ATTRIBUTE|VALUE", MatchAny)) + COMPLETE_WITH("TO"); /* * If we have ALTER TYPE ALTER/DROP/RENAME ATTRIBUTE, provide list * of attributes */ - else if (Matches5("ALTER", "TYPE", MatchAny, "ALTER|DROP|RENAME", "ATTRIBUTE")) + else if (Matches("ALTER", "TYPE", MatchAny, "ALTER|DROP|RENAME", "ATTRIBUTE")) COMPLETE_WITH_ATTR(prev3_wd, ""); /* ALTER TYPE ALTER ATTRIBUTE */ - else if (Matches6("ALTER", "TYPE", MatchAny, "ALTER", "ATTRIBUTE", MatchAny)) - COMPLETE_WITH_CONST("TYPE"); + else if (Matches("ALTER", "TYPE", MatchAny, "ALTER", "ATTRIBUTE", MatchAny)) + COMPLETE_WITH("TYPE"); /* complete ALTER GROUP */ - else if (Matches3("ALTER", "GROUP", MatchAny)) - COMPLETE_WITH_LIST3("ADD USER", "DROP USER", "RENAME TO"); + else if (Matches("ALTER", "GROUP", MatchAny)) + COMPLETE_WITH("ADD USER", "DROP USER", "RENAME TO"); /* complete ALTER GROUP ADD|DROP with USER */ - else if (Matches4("ALTER", "GROUP", MatchAny, "ADD|DROP")) - COMPLETE_WITH_CONST("USER"); + else if (Matches("ALTER", "GROUP", MatchAny, "ADD|DROP")) + COMPLETE_WITH("USER"); /* complete ALTER GROUP ADD|DROP USER with a user name */ - else if (Matches5("ALTER", "GROUP", MatchAny, "ADD|DROP", "USER")) + else if (Matches("ALTER", "GROUP", MatchAny, "ADD|DROP", "USER")) COMPLETE_WITH_QUERY(Query_for_list_of_roles); /* * If we have ALTER TYPE RENAME VALUE, provide list of enum values */ - else if (Matches5("ALTER", "TYPE", MatchAny, "RENAME", "VALUE")) + else if (Matches("ALTER", "TYPE", MatchAny, "RENAME", "VALUE")) COMPLETE_WITH_ENUM_VALUE(prev3_wd); +/* + * ANALYZE [ ( option [, ...] ) ] [ table_and_columns [, ...] ] + * ANALYZE [ VERBOSE ] [ table_and_columns [, ...] ] + * + * Currently the only allowed option is VERBOSE, so we can be skimpier on + * the option processing than VACUUM has to be. + */ + else if (Matches("ANALYZE")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_analyzables, + " UNION SELECT 'VERBOSE'"); + else if (Matches("ANALYZE", "(")) + COMPLETE_WITH("VERBOSE)"); + else if (HeadMatches("ANALYZE") && TailMatches("(")) + /* "ANALYZE (" should be caught above, so assume we want columns */ + COMPLETE_WITH_ATTR(prev2_wd, ""); + else if (HeadMatches("ANALYZE")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_analyzables, NULL); + /* BEGIN */ - else if (Matches1("BEGIN")) - COMPLETE_WITH_LIST6("WORK", "TRANSACTION", "ISOLATION LEVEL", "READ", "DEFERRABLE", "NOT DEFERRABLE"); + else if (Matches("BEGIN")) + COMPLETE_WITH("WORK", "TRANSACTION", "ISOLATION LEVEL", "READ", "DEFERRABLE", "NOT DEFERRABLE"); /* END, ABORT */ - else if (Matches1("END|ABORT")) - COMPLETE_WITH_LIST2("WORK", "TRANSACTION"); + else if (Matches("END|ABORT")) + COMPLETE_WITH("WORK", "TRANSACTION"); /* COMMIT */ - else if (Matches1("COMMIT")) - COMPLETE_WITH_LIST3("WORK", "TRANSACTION", "PREPARED"); + else if (Matches("COMMIT")) + COMPLETE_WITH("WORK", "TRANSACTION", "PREPARED"); /* RELEASE SAVEPOINT */ - else if (Matches1("RELEASE")) - COMPLETE_WITH_CONST("SAVEPOINT"); + else if (Matches("RELEASE")) + COMPLETE_WITH("SAVEPOINT"); /* ROLLBACK */ - else if (Matches1("ROLLBACK")) - COMPLETE_WITH_LIST4("WORK", "TRANSACTION", "TO SAVEPOINT", "PREPARED"); + else if (Matches("ROLLBACK")) + COMPLETE_WITH("WORK", "TRANSACTION", "TO SAVEPOINT", "PREPARED"); +/* CALL */ + else if (Matches("CALL")) + COMPLETE_WITH_VERSIONED_SCHEMA_QUERY(Query_for_list_of_procedures, NULL); + else if (Matches("CALL", MatchAny)) + COMPLETE_WITH("("); /* CLUSTER */ - else if (Matches1("CLUSTER")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, "UNION SELECT 'VERBOSE'"); - else if (Matches2("CLUSTER", "VERBOSE")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, NULL); + else if (Matches("CLUSTER")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_clusterables, "UNION SELECT 'VERBOSE'"); + else if (Matches("CLUSTER", "VERBOSE")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_clusterables, NULL); /* If we have CLUSTER , then add "USING" */ - else if (Matches2("CLUSTER", MatchAnyExcept("VERBOSE|ON"))) - COMPLETE_WITH_CONST("USING"); + else if (Matches("CLUSTER", MatchAnyExcept("VERBOSE|ON"))) + COMPLETE_WITH("USING"); /* If we have CLUSTER VERBOSE , then add "USING" */ - else if (Matches3("CLUSTER", "VERBOSE", MatchAny)) - COMPLETE_WITH_CONST("USING"); + else if (Matches("CLUSTER", "VERBOSE", MatchAny)) + COMPLETE_WITH("USING"); /* If we have CLUSTER USING, then add the index as well */ - else if (Matches3("CLUSTER", MatchAny, "USING") || - Matches4("CLUSTER", "VERBOSE", MatchAny, "USING")) + else if (Matches("CLUSTER", MatchAny, "USING") || + Matches("CLUSTER", "VERBOSE", MatchAny, "USING")) { completion_info_charp = prev2_wd; COMPLETE_WITH_QUERY(Query_for_index_of_table); } /* COMMENT */ - else if (Matches1("COMMENT")) - COMPLETE_WITH_CONST("ON"); - else if (Matches2("COMMENT", "ON")) - { - static const char *const list_COMMENT[] = - {"ACCESS METHOD", "CAST", "COLLATION", "CONVERSION", "DATABASE", - "EVENT TRIGGER", "EXTENSION", - "FOREIGN DATA WRAPPER", "FOREIGN TABLE", - "SERVER", "INDEX", "LANGUAGE", "POLICY", "PUBLICATION", "RULE", - "SCHEMA", "SEQUENCE", "STATISTICS", "SUBSCRIPTION", - "TABLE", "TYPE", "VIEW", "MATERIALIZED VIEW", "COLUMN", "AGGREGATE", "FUNCTION", - "OPERATOR", "TRIGGER", "CONSTRAINT", "DOMAIN", "LARGE OBJECT", - "TABLESPACE", "TEXT SEARCH", "ROLE", NULL}; - - COMPLETE_WITH_LIST(list_COMMENT); - } - else if (Matches4("COMMENT", "ON", "ACCESS", "METHOD")) + else if (Matches("COMMENT")) + COMPLETE_WITH("ON"); + else if (Matches("COMMENT", "ON")) + COMPLETE_WITH("ACCESS METHOD", "CAST", "COLLATION", "CONVERSION", + "DATABASE", "EVENT TRIGGER", "EXTENSION", + "FOREIGN DATA WRAPPER", "FOREIGN TABLE", "SERVER", + "INDEX", "LANGUAGE", "POLICY", "PUBLICATION", "RULE", + "SCHEMA", "SEQUENCE", "STATISTICS", "SUBSCRIPTION", + "TABLE", "TYPE", "VIEW", "MATERIALIZED VIEW", + "COLUMN", "AGGREGATE", "FUNCTION", + "PROCEDURE", "ROUTINE", + "OPERATOR", "TRIGGER", "CONSTRAINT", "DOMAIN", + "LARGE OBJECT", "TABLESPACE", "TEXT SEARCH", "ROLE"); + else if (Matches("COMMENT", "ON", "ACCESS", "METHOD")) COMPLETE_WITH_QUERY(Query_for_list_of_access_methods); - else if (Matches3("COMMENT", "ON", "FOREIGN")) - COMPLETE_WITH_LIST2("DATA WRAPPER", "TABLE"); - else if (Matches4("COMMENT", "ON", "TEXT", "SEARCH")) - COMPLETE_WITH_LIST4("CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE"); - else if (Matches3("COMMENT", "ON", "CONSTRAINT")) + else if (Matches("COMMENT", "ON", "FOREIGN")) + COMPLETE_WITH("DATA WRAPPER", "TABLE"); + else if (Matches("COMMENT", "ON", "TEXT", "SEARCH")) + COMPLETE_WITH("CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE"); + else if (Matches("COMMENT", "ON", "CONSTRAINT")) COMPLETE_WITH_QUERY(Query_for_all_table_constraints); - else if (Matches4("COMMENT", "ON", "CONSTRAINT", MatchAny)) - COMPLETE_WITH_CONST("ON"); - else if (Matches5("COMMENT", "ON", "CONSTRAINT", MatchAny, "ON")) + else if (Matches("COMMENT", "ON", "CONSTRAINT", MatchAny)) + COMPLETE_WITH("ON"); + else if (Matches("COMMENT", "ON", "CONSTRAINT", MatchAny, "ON")) { completion_info_charp = prev2_wd; COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_constraint); } - else if (Matches4("COMMENT", "ON", "MATERIALIZED", "VIEW")) + else if (Matches("COMMENT", "ON", "MATERIALIZED", "VIEW")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_matviews, NULL); - else if (Matches4("COMMENT", "ON", "EVENT", "TRIGGER")) + else if (Matches("COMMENT", "ON", "EVENT", "TRIGGER")) COMPLETE_WITH_QUERY(Query_for_list_of_event_triggers); - else if (Matches4("COMMENT", "ON", MatchAny, MatchAnyExcept("IS")) || - Matches5("COMMENT", "ON", MatchAny, MatchAny, MatchAnyExcept("IS")) || - Matches6("COMMENT", "ON", MatchAny, MatchAny, MatchAny, MatchAnyExcept("IS"))) - COMPLETE_WITH_CONST("IS"); + else if (Matches("COMMENT", "ON", MatchAny, MatchAnyExcept("IS")) || + Matches("COMMENT", "ON", MatchAny, MatchAny, MatchAnyExcept("IS")) || + Matches("COMMENT", "ON", MatchAny, MatchAny, MatchAny, MatchAnyExcept("IS"))) + COMPLETE_WITH("IS"); /* COPY */ @@ -2208,622 +2131,702 @@ psql_completion(const char *text, int start, int end) * If we have COPY, offer list of tables or "(" (Also cover the analogous * backslash command). */ - else if (Matches1("COPY|\\copy")) + else if (Matches("COPY|\\copy")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, " UNION ALL SELECT '('"); /* If we have COPY BINARY, complete with list of tables */ - else if (Matches2("COPY", "BINARY")) + else if (Matches("COPY", "BINARY")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL); /* If we have COPY (, complete it with legal commands */ - else if (Matches2("COPY|\\copy", "(")) - COMPLETE_WITH_LIST7("SELECT", "TABLE", "VALUES", "INSERT", "UPDATE", "DELETE", "WITH"); + else if (Matches("COPY|\\copy", "(")) + COMPLETE_WITH("SELECT", "TABLE", "VALUES", "INSERT", "UPDATE", "DELETE", "WITH"); /* If we have COPY [BINARY] , complete it with "TO" or "FROM" */ - else if (Matches2("COPY|\\copy", MatchAny) || - Matches3("COPY", "BINARY", MatchAny)) - COMPLETE_WITH_LIST2("FROM", "TO"); + else if (Matches("COPY|\\copy", MatchAny) || + Matches("COPY", "BINARY", MatchAny)) + COMPLETE_WITH("FROM", "TO"); /* If we have COPY [BINARY] FROM|TO, complete with filename */ - else if (Matches3("COPY|\\copy", MatchAny, "FROM|TO") || - Matches4("COPY", "BINARY", MatchAny, "FROM|TO")) + else if (Matches("COPY|\\copy", MatchAny, "FROM|TO") || + Matches("COPY", "BINARY", MatchAny, "FROM|TO")) { completion_charp = ""; matches = completion_matches(text, complete_from_files); } /* Handle COPY [BINARY] FROM|TO filename */ - else if (Matches4("COPY|\\copy", MatchAny, "FROM|TO", MatchAny) || - Matches5("COPY", "BINARY", MatchAny, "FROM|TO", MatchAny)) - COMPLETE_WITH_LIST6("BINARY", "OIDS", "DELIMITER", "NULL", "CSV", - "ENCODING"); + else if (Matches("COPY|\\copy", MatchAny, "FROM|TO", MatchAny) || + Matches("COPY", "BINARY", MatchAny, "FROM|TO", MatchAny)) + COMPLETE_WITH("BINARY", "OIDS", "DELIMITER", "NULL", "CSV", + "ENCODING"); /* Handle COPY [BINARY] FROM|TO filename CSV */ - else if (Matches5("COPY|\\copy", MatchAny, "FROM|TO", MatchAny, "CSV") || - Matches6("COPY", "BINARY", MatchAny, "FROM|TO", MatchAny, "CSV")) - COMPLETE_WITH_LIST5("HEADER", "QUOTE", "ESCAPE", "FORCE QUOTE", - "FORCE NOT NULL"); + else if (Matches("COPY|\\copy", MatchAny, "FROM|TO", MatchAny, "CSV") || + Matches("COPY", "BINARY", MatchAny, "FROM|TO", MatchAny, "CSV")) + COMPLETE_WITH("HEADER", "QUOTE", "ESCAPE", "FORCE QUOTE", + "FORCE NOT NULL"); /* CREATE ACCESS METHOD */ /* Complete "CREATE ACCESS METHOD " */ - else if (Matches4("CREATE", "ACCESS", "METHOD", MatchAny)) - COMPLETE_WITH_CONST("TYPE"); + else if (Matches("CREATE", "ACCESS", "METHOD", MatchAny)) + COMPLETE_WITH("TYPE"); /* Complete "CREATE ACCESS METHOD TYPE" */ - else if (Matches5("CREATE", "ACCESS", "METHOD", MatchAny, "TYPE")) - COMPLETE_WITH_CONST("INDEX"); + else if (Matches("CREATE", "ACCESS", "METHOD", MatchAny, "TYPE")) + COMPLETE_WITH("INDEX"); /* Complete "CREATE ACCESS METHOD TYPE " */ - else if (Matches6("CREATE", "ACCESS", "METHOD", MatchAny, "TYPE", MatchAny)) - COMPLETE_WITH_CONST("HANDLER"); + else if (Matches("CREATE", "ACCESS", "METHOD", MatchAny, "TYPE", MatchAny)) + COMPLETE_WITH("HANDLER"); /* CREATE DATABASE */ - else if (Matches3("CREATE", "DATABASE", MatchAny)) - COMPLETE_WITH_LIST9("OWNER", "TEMPLATE", "ENCODING", "TABLESPACE", - "IS_TEMPLATE", - "ALLOW_CONNECTIONS", "CONNECTION LIMIT", - "LC_COLLATE", "LC_CTYPE"); + else if (Matches("CREATE", "DATABASE", MatchAny)) + COMPLETE_WITH("OWNER", "TEMPLATE", "ENCODING", "TABLESPACE", + "IS_TEMPLATE", + "ALLOW_CONNECTIONS", "CONNECTION LIMIT", + "LC_COLLATE", "LC_CTYPE"); - else if (Matches4("CREATE", "DATABASE", MatchAny, "TEMPLATE")) + else if (Matches("CREATE", "DATABASE", MatchAny, "TEMPLATE")) COMPLETE_WITH_QUERY(Query_for_list_of_template_databases); /* CREATE EXTENSION */ /* Complete with available extensions rather than installed ones. */ - else if (Matches2("CREATE", "EXTENSION")) + else if (Matches("CREATE", "EXTENSION")) COMPLETE_WITH_QUERY(Query_for_list_of_available_extensions); /* CREATE EXTENSION */ - else if (Matches3("CREATE", "EXTENSION", MatchAny)) - COMPLETE_WITH_LIST3("WITH SCHEMA", "CASCADE", "VERSION"); + else if (Matches("CREATE", "EXTENSION", MatchAny)) + COMPLETE_WITH("WITH SCHEMA", "CASCADE", "VERSION"); /* CREATE EXTENSION VERSION */ - else if (Matches4("CREATE", "EXTENSION", MatchAny, "VERSION")) + else if (Matches("CREATE", "EXTENSION", MatchAny, "VERSION")) { completion_info_charp = prev2_wd; COMPLETE_WITH_QUERY(Query_for_list_of_available_extension_versions); } /* CREATE FOREIGN */ - else if (Matches2("CREATE", "FOREIGN")) - COMPLETE_WITH_LIST2("DATA WRAPPER", "TABLE"); + else if (Matches("CREATE", "FOREIGN")) + COMPLETE_WITH("DATA WRAPPER", "TABLE"); /* CREATE FOREIGN DATA WRAPPER */ - else if (Matches5("CREATE", "FOREIGN", "DATA", "WRAPPER", MatchAny)) - COMPLETE_WITH_LIST3("HANDLER", "VALIDATOR", "OPTIONS"); + else if (Matches("CREATE", "FOREIGN", "DATA", "WRAPPER", MatchAny)) + COMPLETE_WITH("HANDLER", "VALIDATOR", "OPTIONS"); /* CREATE INDEX --- is allowed inside CREATE SCHEMA, so use TailMatches */ /* First off we complete CREATE UNIQUE with "INDEX" */ - else if (TailMatches2("CREATE", "UNIQUE")) - COMPLETE_WITH_CONST("INDEX"); + else if (TailMatches("CREATE", "UNIQUE")) + COMPLETE_WITH("INDEX"); /* * If we have CREATE|UNIQUE INDEX, then add "ON", "CONCURRENTLY", and * existing indexes */ - else if (TailMatches2("CREATE|UNIQUE", "INDEX")) + else if (TailMatches("CREATE|UNIQUE", "INDEX")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, " UNION SELECT 'ON'" " UNION SELECT 'CONCURRENTLY'"); - /* Complete ... INDEX|CONCURRENTLY [] ON with a list of tables */ - else if (TailMatches3("INDEX|CONCURRENTLY", MatchAny, "ON") || - TailMatches2("INDEX|CONCURRENTLY", "ON")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, NULL); + + /* + * Complete ... INDEX|CONCURRENTLY [] ON with a list of relations + * that indexes can be created on + */ + else if (TailMatches("INDEX|CONCURRENTLY", MatchAny, "ON") || + TailMatches("INDEX|CONCURRENTLY", "ON")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexables, NULL); /* * Complete CREATE|UNIQUE INDEX CONCURRENTLY with "ON" and existing * indexes */ - else if (TailMatches3("CREATE|UNIQUE", "INDEX", "CONCURRENTLY")) + else if (TailMatches("CREATE|UNIQUE", "INDEX", "CONCURRENTLY")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, " UNION SELECT 'ON'"); /* Complete CREATE|UNIQUE INDEX [CONCURRENTLY] with "ON" */ - else if (TailMatches3("CREATE|UNIQUE", "INDEX", MatchAny) || - TailMatches4("CREATE|UNIQUE", "INDEX", "CONCURRENTLY", MatchAny)) - COMPLETE_WITH_CONST("ON"); + else if (TailMatches("CREATE|UNIQUE", "INDEX", MatchAny) || + TailMatches("CREATE|UNIQUE", "INDEX", "CONCURRENTLY", MatchAny)) + COMPLETE_WITH("ON"); /* * Complete INDEX ON
with a list of table columns (which * should really be in parens) */ - else if (TailMatches4("INDEX", MatchAny, "ON", MatchAny) || - TailMatches3("INDEX|CONCURRENTLY", "ON", MatchAny)) - COMPLETE_WITH_LIST2("(", "USING"); - else if (TailMatches5("INDEX", MatchAny, "ON", MatchAny, "(") || - TailMatches4("INDEX|CONCURRENTLY", "ON", MatchAny, "(")) + else if (TailMatches("INDEX", MatchAny, "ON", MatchAny) || + TailMatches("INDEX|CONCURRENTLY", "ON", MatchAny)) + COMPLETE_WITH("(", "USING"); + else if (TailMatches("INDEX", MatchAny, "ON", MatchAny, "(") || + TailMatches("INDEX|CONCURRENTLY", "ON", MatchAny, "(")) COMPLETE_WITH_ATTR(prev2_wd, ""); /* same if you put in USING */ - else if (TailMatches5("ON", MatchAny, "USING", MatchAny, "(")) + else if (TailMatches("ON", MatchAny, "USING", MatchAny, "(")) COMPLETE_WITH_ATTR(prev4_wd, ""); /* Complete USING with an index method */ - else if (TailMatches6("INDEX", MatchAny, MatchAny, "ON", MatchAny, "USING") || - TailMatches5("INDEX", MatchAny, "ON", MatchAny, "USING") || - TailMatches4("INDEX", "ON", MatchAny, "USING")) + else if (TailMatches("INDEX", MatchAny, MatchAny, "ON", MatchAny, "USING") || + TailMatches("INDEX", MatchAny, "ON", MatchAny, "USING") || + TailMatches("INDEX", "ON", MatchAny, "USING")) COMPLETE_WITH_QUERY(Query_for_list_of_access_methods); - else if (TailMatches4("ON", MatchAny, "USING", MatchAny) && - !TailMatches6("POLICY", MatchAny, MatchAny, MatchAny, MatchAny, MatchAny) && - !TailMatches4("FOR", MatchAny, MatchAny, MatchAny)) - COMPLETE_WITH_CONST("("); + else if (TailMatches("ON", MatchAny, "USING", MatchAny) && + !TailMatches("POLICY", MatchAny, MatchAny, MatchAny, MatchAny, MatchAny) && + !TailMatches("FOR", MatchAny, MatchAny, MatchAny)) + COMPLETE_WITH("("); /* CREATE POLICY */ /* Complete "CREATE POLICY ON" */ - else if (Matches3("CREATE", "POLICY", MatchAny)) - COMPLETE_WITH_CONST("ON"); + else if (Matches("CREATE", "POLICY", MatchAny)) + COMPLETE_WITH("ON"); /* Complete "CREATE POLICY ON
" */ - else if (Matches4("CREATE", "POLICY", MatchAny, "ON")) + else if (Matches("CREATE", "POLICY", MatchAny, "ON")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL); /* Complete "CREATE POLICY ON
AS|FOR|TO|USING|WITH CHECK" */ - else if (Matches5("CREATE", "POLICY", MatchAny, "ON", MatchAny)) - COMPLETE_WITH_LIST5("AS", "FOR", "TO", "USING (", "WITH CHECK ("); + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny)) + COMPLETE_WITH("AS", "FOR", "TO", "USING (", "WITH CHECK ("); /* CREATE POLICY ON
AS PERMISSIVE|RESTRICTIVE */ - else if (Matches6("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS")) - COMPLETE_WITH_LIST2("PERMISSIVE", "RESTRICTIVE"); + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS")) + COMPLETE_WITH("PERMISSIVE", "RESTRICTIVE"); /* * CREATE POLICY ON
AS PERMISSIVE|RESTRICTIVE * FOR|TO|USING|WITH CHECK */ - else if (Matches7("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny)) - COMPLETE_WITH_LIST4("FOR", "TO", "USING", "WITH CHECK"); + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny)) + COMPLETE_WITH("FOR", "TO", "USING", "WITH CHECK"); /* CREATE POLICY ON
FOR ALL|SELECT|INSERT|UPDATE|DELETE */ - else if (Matches6("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR")) - COMPLETE_WITH_LIST5("ALL", "SELECT", "INSERT", "UPDATE", "DELETE"); + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR")) + COMPLETE_WITH("ALL", "SELECT", "INSERT", "UPDATE", "DELETE"); /* Complete "CREATE POLICY ON
FOR INSERT TO|WITH CHECK" */ - else if (Matches7("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR", "INSERT")) - COMPLETE_WITH_LIST2("TO", "WITH CHECK ("); + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR", "INSERT")) + COMPLETE_WITH("TO", "WITH CHECK ("); /* Complete "CREATE POLICY ON
FOR SELECT|DELETE TO|USING" */ - else if (Matches7("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR", "SELECT|DELETE")) - COMPLETE_WITH_LIST2("TO", "USING ("); + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR", "SELECT|DELETE")) + COMPLETE_WITH("TO", "USING ("); /* CREATE POLICY ON
FOR ALL|UPDATE TO|USING|WITH CHECK */ - else if (Matches7("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR", "ALL|UPDATE")) - COMPLETE_WITH_LIST3("TO", "USING (", "WITH CHECK ("); + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "FOR", "ALL|UPDATE")) + COMPLETE_WITH("TO", "USING (", "WITH CHECK ("); /* Complete "CREATE POLICY ON
TO " */ - else if (Matches6("CREATE", "POLICY", MatchAny, "ON", MatchAny, "TO")) + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "TO")) COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles); /* Complete "CREATE POLICY ON
USING (" */ - else if (Matches6("CREATE", "POLICY", MatchAny, "ON", MatchAny, "USING")) - COMPLETE_WITH_CONST("("); + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "USING")) + COMPLETE_WITH("("); /* * CREATE POLICY ON
AS PERMISSIVE|RESTRICTIVE FOR * ALL|SELECT|INSERT|UPDATE|DELETE */ - else if (Matches8("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "FOR")) - COMPLETE_WITH_LIST5("ALL", "SELECT", "INSERT", "UPDATE", "DELETE"); + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "FOR")) + COMPLETE_WITH("ALL", "SELECT", "INSERT", "UPDATE", "DELETE"); /* * Complete "CREATE POLICY ON
AS PERMISSIVE|RESTRICTIVE FOR * INSERT TO|WITH CHECK" */ - else if (Matches9("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "FOR", "INSERT")) - COMPLETE_WITH_LIST2("TO", "WITH CHECK ("); + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "FOR", "INSERT")) + COMPLETE_WITH("TO", "WITH CHECK ("); /* * Complete "CREATE POLICY ON
AS PERMISSIVE|RESTRICTIVE FOR * SELECT|DELETE TO|USING" */ - else if (Matches9("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "FOR", "SELECT|DELETE")) - COMPLETE_WITH_LIST2("TO", "USING ("); + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "FOR", "SELECT|DELETE")) + COMPLETE_WITH("TO", "USING ("); /* * CREATE POLICY ON
AS PERMISSIVE|RESTRICTIVE FOR * ALL|UPDATE TO|USING|WITH CHECK */ - else if (Matches9("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "FOR", "ALL|UPDATE")) - COMPLETE_WITH_LIST3("TO", "USING (", "WITH CHECK ("); + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "FOR", "ALL|UPDATE")) + COMPLETE_WITH("TO", "USING (", "WITH CHECK ("); /* * Complete "CREATE POLICY ON
AS PERMISSIVE|RESTRICTIVE TO * " */ - else if (Matches8("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "TO")) + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "TO")) COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles); /* * Complete "CREATE POLICY ON
AS PERMISSIVE|RESTRICTIVE * USING (" */ - else if (Matches8("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "USING")) - COMPLETE_WITH_CONST("("); + else if (Matches("CREATE", "POLICY", MatchAny, "ON", MatchAny, "AS", MatchAny, "USING")) + COMPLETE_WITH("("); /* CREATE PUBLICATION */ - else if (Matches3("CREATE", "PUBLICATION", MatchAny)) - COMPLETE_WITH_LIST3("FOR TABLE", "FOR ALL TABLES", "WITH ("); - else if (Matches4("CREATE", "PUBLICATION", MatchAny, "FOR")) - COMPLETE_WITH_LIST2("TABLE", "ALL TABLES"); - /* Complete "CREATE PUBLICATION FOR TABLE
" */ - else if (Matches4("CREATE", "PUBLICATION", MatchAny, "FOR TABLE")) + else if (Matches("CREATE", "PUBLICATION", MatchAny)) + COMPLETE_WITH("FOR TABLE", "FOR ALL TABLES", "WITH ("); + else if (Matches("CREATE", "PUBLICATION", MatchAny, "FOR")) + COMPLETE_WITH("TABLE", "ALL TABLES"); + /* Complete "CREATE PUBLICATION FOR TABLE
, ..." */ + else if (HeadMatches("CREATE", "PUBLICATION", MatchAny, "FOR", "TABLE")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL); /* Complete "CREATE PUBLICATION [...] WITH" */ - else if (HeadMatches2("CREATE", "PUBLICATION") && TailMatches2("WITH", "(")) - COMPLETE_WITH_CONST("publish"); + else if (HeadMatches("CREATE", "PUBLICATION") && TailMatches("WITH", "(")) + COMPLETE_WITH("publish"); /* CREATE RULE */ /* Complete "CREATE RULE " with "AS ON" */ - else if (Matches3("CREATE", "RULE", MatchAny)) - COMPLETE_WITH_CONST("AS ON"); + else if (Matches("CREATE", "RULE", MatchAny)) + COMPLETE_WITH("AS ON"); /* Complete "CREATE RULE AS" with "ON" */ - else if (Matches4("CREATE", "RULE", MatchAny, "AS")) - COMPLETE_WITH_CONST("ON"); + else if (Matches("CREATE", "RULE", MatchAny, "AS")) + COMPLETE_WITH("ON"); /* Complete "CREATE RULE AS ON" with SELECT|UPDATE|INSERT|DELETE */ - else if (Matches5("CREATE", "RULE", MatchAny, "AS", "ON")) - COMPLETE_WITH_LIST4("SELECT", "UPDATE", "INSERT", "DELETE"); + else if (Matches("CREATE", "RULE", MatchAny, "AS", "ON")) + COMPLETE_WITH("SELECT", "UPDATE", "INSERT", "DELETE"); /* Complete "AS ON SELECT|UPDATE|INSERT|DELETE" with a "TO" */ - else if (TailMatches3("AS", "ON", "SELECT|UPDATE|INSERT|DELETE")) - COMPLETE_WITH_CONST("TO"); + else if (TailMatches("AS", "ON", "SELECT|UPDATE|INSERT|DELETE")) + COMPLETE_WITH("TO"); /* Complete "AS ON TO" with a table name */ - else if (TailMatches4("AS", "ON", "SELECT|UPDATE|INSERT|DELETE", "TO")) + else if (TailMatches("AS", "ON", "SELECT|UPDATE|INSERT|DELETE", "TO")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL); /* CREATE SEQUENCE --- is allowed inside CREATE SCHEMA, so use TailMatches */ - else if (TailMatches3("CREATE", "SEQUENCE", MatchAny) || - TailMatches4("CREATE", "TEMP|TEMPORARY", "SEQUENCE", MatchAny)) - COMPLETE_WITH_LIST8("INCREMENT BY", "MINVALUE", "MAXVALUE", "NO", "CACHE", - "CYCLE", "OWNED BY", "START WITH"); - else if (TailMatches4("CREATE", "SEQUENCE", MatchAny, "NO") || - TailMatches5("CREATE", "TEMP|TEMPORARY", "SEQUENCE", MatchAny, "NO")) - COMPLETE_WITH_LIST3("MINVALUE", "MAXVALUE", "CYCLE"); + else if (TailMatches("CREATE", "SEQUENCE", MatchAny) || + TailMatches("CREATE", "TEMP|TEMPORARY", "SEQUENCE", MatchAny)) + COMPLETE_WITH("INCREMENT BY", "MINVALUE", "MAXVALUE", "NO", "CACHE", + "CYCLE", "OWNED BY", "START WITH"); + else if (TailMatches("CREATE", "SEQUENCE", MatchAny, "NO") || + TailMatches("CREATE", "TEMP|TEMPORARY", "SEQUENCE", MatchAny, "NO")) + COMPLETE_WITH("MINVALUE", "MAXVALUE", "CYCLE"); /* CREATE SERVER */ - else if (Matches3("CREATE", "SERVER", MatchAny)) - COMPLETE_WITH_LIST3("TYPE", "VERSION", "FOREIGN DATA WRAPPER"); + else if (Matches("CREATE", "SERVER", MatchAny)) + COMPLETE_WITH("TYPE", "VERSION", "FOREIGN DATA WRAPPER"); /* CREATE STATISTICS */ - else if (Matches3("CREATE", "STATISTICS", MatchAny)) - COMPLETE_WITH_LIST2("(", "ON"); - else if (Matches4("CREATE", "STATISTICS", MatchAny, "(")) - COMPLETE_WITH_LIST2("ndistinct", "dependencies"); - else if (HeadMatches3("CREATE", "STATISTICS", MatchAny) && + else if (Matches("CREATE", "STATISTICS", MatchAny)) + COMPLETE_WITH("(", "ON"); + else if (Matches("CREATE", "STATISTICS", MatchAny, "(")) + COMPLETE_WITH("ndistinct", "dependencies"); + else if (HeadMatches("CREATE", "STATISTICS", MatchAny) && previous_words[0][0] == '(' && previous_words[0][strlen(previous_words[0]) - 1] == ')') - COMPLETE_WITH_CONST("ON"); - else if (HeadMatches3("CREATE", "STATISTICS", MatchAny) && - TailMatches1("FROM")) + COMPLETE_WITH("ON"); + else if (HeadMatches("CREATE", "STATISTICS", MatchAny) && + TailMatches("FROM")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL); /* CREATE TABLE --- is allowed inside CREATE SCHEMA, so use TailMatches */ /* Complete "CREATE TEMP/TEMPORARY" with the possible temp objects */ - else if (TailMatches2("CREATE", "TEMP|TEMPORARY")) - COMPLETE_WITH_LIST3("SEQUENCE", "TABLE", "VIEW"); + else if (TailMatches("CREATE", "TEMP|TEMPORARY")) + COMPLETE_WITH("SEQUENCE", "TABLE", "VIEW"); /* Complete "CREATE UNLOGGED" with TABLE or MATVIEW */ - else if (TailMatches2("CREATE", "UNLOGGED")) - COMPLETE_WITH_LIST2("TABLE", "MATERIALIZED VIEW"); + else if (TailMatches("CREATE", "UNLOGGED")) + COMPLETE_WITH("TABLE", "MATERIALIZED VIEW"); /* Complete PARTITION BY with RANGE ( or LIST ( or ... */ - else if (TailMatches2("PARTITION", "BY")) - COMPLETE_WITH_LIST2("RANGE (", "LIST ("); + else if (TailMatches("PARTITION", "BY")) + COMPLETE_WITH("RANGE (", "LIST (", "HASH ("); /* If we have xxx PARTITION OF, provide a list of partitioned tables */ - else if (TailMatches2("PARTITION", "OF")) + else if (TailMatches("PARTITION", "OF")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_partitioned_tables, ""); /* Limited completion support for partition bound specification */ - else if (TailMatches3("PARTITION", "OF", MatchAny)) - COMPLETE_WITH_CONST("FOR VALUES"); + else if (TailMatches("PARTITION", "OF", MatchAny)) + COMPLETE_WITH("FOR VALUES", "DEFAULT"); /* CREATE TABLESPACE */ - else if (Matches3("CREATE", "TABLESPACE", MatchAny)) - COMPLETE_WITH_LIST2("OWNER", "LOCATION"); + else if (Matches("CREATE", "TABLESPACE", MatchAny)) + COMPLETE_WITH("OWNER", "LOCATION"); /* Complete CREATE TABLESPACE name OWNER name with "LOCATION" */ - else if (Matches5("CREATE", "TABLESPACE", MatchAny, "OWNER", MatchAny)) - COMPLETE_WITH_CONST("LOCATION"); + else if (Matches("CREATE", "TABLESPACE", MatchAny, "OWNER", MatchAny)) + COMPLETE_WITH("LOCATION"); /* CREATE TEXT SEARCH */ - else if (Matches3("CREATE", "TEXT", "SEARCH")) - COMPLETE_WITH_LIST4("CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE"); - else if (Matches5("CREATE", "TEXT", "SEARCH", "CONFIGURATION", MatchAny)) - COMPLETE_WITH_CONST("("); + else if (Matches("CREATE", "TEXT", "SEARCH")) + COMPLETE_WITH("CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE"); + else if (Matches("CREATE", "TEXT", "SEARCH", "CONFIGURATION", MatchAny)) + COMPLETE_WITH("("); /* CREATE SUBSCRIPTION */ - else if (Matches3("CREATE", "SUBSCRIPTION", MatchAny)) - COMPLETE_WITH_CONST("CONNECTION"); - else if (Matches5("CREATE", "SUBSCRIPTION", MatchAny, "CONNECTION", MatchAny)) - COMPLETE_WITH_CONST("PUBLICATION"); - else if (Matches6("CREATE", "SUBSCRIPTION", MatchAny, "CONNECTION", - MatchAny, "PUBLICATION")) + else if (Matches("CREATE", "SUBSCRIPTION", MatchAny)) + COMPLETE_WITH("CONNECTION"); + else if (Matches("CREATE", "SUBSCRIPTION", MatchAny, "CONNECTION", MatchAny)) + COMPLETE_WITH("PUBLICATION"); + else if (Matches("CREATE", "SUBSCRIPTION", MatchAny, "CONNECTION", + MatchAny, "PUBLICATION")) { /* complete with nothing here as this refers to remote publications */ } - else if (HeadMatches2("CREATE", "SUBSCRIPTION") && TailMatches2("PUBLICATION", MatchAny)) - COMPLETE_WITH_CONST("WITH ("); + else if (HeadMatches("CREATE", "SUBSCRIPTION") && TailMatches("PUBLICATION", MatchAny)) + COMPLETE_WITH("WITH ("); /* Complete "CREATE SUBSCRIPTION ... WITH ( " */ - else if (HeadMatches2("CREATE", "SUBSCRIPTION") && TailMatches2("WITH", "(")) - COMPLETE_WITH_LIST6("copy_data", "connect", "create_slot", "enabled", - "slot_name", "synchronous_commit"); + else if (HeadMatches("CREATE", "SUBSCRIPTION") && TailMatches("WITH", "(")) + COMPLETE_WITH("copy_data", "connect", "create_slot", "enabled", + "slot_name", "synchronous_commit"); /* CREATE TRIGGER --- is allowed inside CREATE SCHEMA, so use TailMatches */ /* complete CREATE TRIGGER with BEFORE,AFTER,INSTEAD OF */ - else if (TailMatches3("CREATE", "TRIGGER", MatchAny)) - COMPLETE_WITH_LIST3("BEFORE", "AFTER", "INSTEAD OF"); + else if (TailMatches("CREATE", "TRIGGER", MatchAny)) + COMPLETE_WITH("BEFORE", "AFTER", "INSTEAD OF"); /* complete CREATE TRIGGER BEFORE,AFTER with an event */ - else if (TailMatches4("CREATE", "TRIGGER", MatchAny, "BEFORE|AFTER")) - COMPLETE_WITH_LIST4("INSERT", "DELETE", "UPDATE", "TRUNCATE"); + else if (TailMatches("CREATE", "TRIGGER", MatchAny, "BEFORE|AFTER")) + COMPLETE_WITH("INSERT", "DELETE", "UPDATE", "TRUNCATE"); /* complete CREATE TRIGGER INSTEAD OF with an event */ - else if (TailMatches5("CREATE", "TRIGGER", MatchAny, "INSTEAD", "OF")) - COMPLETE_WITH_LIST3("INSERT", "DELETE", "UPDATE"); + else if (TailMatches("CREATE", "TRIGGER", MatchAny, "INSTEAD", "OF")) + COMPLETE_WITH("INSERT", "DELETE", "UPDATE"); /* complete CREATE TRIGGER BEFORE,AFTER sth with OR,ON */ - else if (TailMatches5("CREATE", "TRIGGER", MatchAny, "BEFORE|AFTER", MatchAny) || - TailMatches6("CREATE", "TRIGGER", MatchAny, "INSTEAD", "OF", MatchAny)) - COMPLETE_WITH_LIST2("ON", "OR"); + else if (TailMatches("CREATE", "TRIGGER", MatchAny, "BEFORE|AFTER", MatchAny) || + TailMatches("CREATE", "TRIGGER", MatchAny, "INSTEAD", "OF", MatchAny)) + COMPLETE_WITH("ON", "OR"); /* * complete CREATE TRIGGER BEFORE,AFTER event ON with a list of - * tables + * tables. EXECUTE FUNCTION is the recommended grammar instead of EXECUTE + * PROCEDURE in version 11 and upwards. */ - else if (TailMatches6("CREATE", "TRIGGER", MatchAny, "BEFORE|AFTER", MatchAny, "ON")) + else if (TailMatches("CREATE", "TRIGGER", MatchAny, "BEFORE|AFTER", MatchAny, "ON")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL); /* complete CREATE TRIGGER ... INSTEAD OF event ON with a list of views */ - else if (TailMatches7("CREATE", "TRIGGER", MatchAny, "INSTEAD", "OF", MatchAny, "ON")) + else if (TailMatches("CREATE", "TRIGGER", MatchAny, "INSTEAD", "OF", MatchAny, "ON")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_views, NULL); - else if (HeadMatches2("CREATE", "TRIGGER") && TailMatches2("ON", MatchAny)) - COMPLETE_WITH_LIST7("NOT DEFERRABLE", "DEFERRABLE", "INITIALLY", - "REFERENCING", "FOR", "WHEN (", "EXECUTE PROCEDURE"); - else if (HeadMatches2("CREATE", "TRIGGER") && - (TailMatches1("DEFERRABLE") || TailMatches2("INITIALLY", "IMMEDIATE|DEFERRED"))) - COMPLETE_WITH_LIST4("REFERENCING", "FOR", "WHEN (", "EXECUTE PROCEDURE"); - else if (HeadMatches2("CREATE", "TRIGGER") && TailMatches1("REFERENCING")) - COMPLETE_WITH_LIST2("OLD TABLE", "NEW TABLE"); - else if (HeadMatches2("CREATE", "TRIGGER") && TailMatches2("OLD|NEW", "TABLE")) - COMPLETE_WITH_CONST("AS"); - else if (HeadMatches2("CREATE", "TRIGGER") && - (TailMatches5("REFERENCING", "OLD", "TABLE", "AS", MatchAny) || - TailMatches4("REFERENCING", "OLD", "TABLE", MatchAny))) - COMPLETE_WITH_LIST4("NEW TABLE", "FOR", "WHEN (", "EXECUTE PROCEDURE"); - else if (HeadMatches2("CREATE", "TRIGGER") && - (TailMatches5("REFERENCING", "NEW", "TABLE", "AS", MatchAny) || - TailMatches4("REFERENCING", "NEW", "TABLE", MatchAny))) - COMPLETE_WITH_LIST4("OLD TABLE", "FOR", "WHEN (", "EXECUTE PROCEDURE"); - else if (HeadMatches2("CREATE", "TRIGGER") && - (TailMatches9("REFERENCING", "OLD|NEW", "TABLE", "AS", MatchAny, "OLD|NEW", "TABLE", "AS", MatchAny) || - TailMatches8("REFERENCING", "OLD|NEW", "TABLE", MatchAny, "OLD|NEW", "TABLE", "AS", MatchAny) || - TailMatches8("REFERENCING", "OLD|NEW", "TABLE", "AS", MatchAny, "OLD|NEW", "TABLE", MatchAny) || - TailMatches7("REFERENCING", "OLD|NEW", "TABLE", MatchAny, "OLD|NEW", "TABLE", MatchAny))) - COMPLETE_WITH_LIST3("FOR", "WHEN (", "EXECUTE PROCEDURE"); - else if (HeadMatches2("CREATE", "TRIGGER") && TailMatches1("FOR")) - COMPLETE_WITH_LIST3("EACH", "ROW", "STATEMENT"); - else if (HeadMatches2("CREATE", "TRIGGER") && TailMatches2("FOR", "EACH")) - COMPLETE_WITH_LIST2("ROW", "STATEMENT"); - else if (HeadMatches2("CREATE", "TRIGGER") && - (TailMatches3("FOR", "EACH", "ROW|STATEMENT") || - TailMatches2("FOR", "ROW|STATEMENT"))) - COMPLETE_WITH_LIST2("WHEN (", "EXECUTE PROCEDURE"); - /* complete CREATE TRIGGER ... EXECUTE with PROCEDURE */ - else if (HeadMatches2("CREATE", "TRIGGER") && TailMatches1("EXECUTE")) - COMPLETE_WITH_CONST("PROCEDURE"); - else if (HeadMatches2("CREATE", "TRIGGER") && TailMatches2("EXECUTE", "PROCEDURE")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_functions, NULL); - -/* CREATE ROLE,USER,GROUP */ - else if (Matches3("CREATE", "ROLE|GROUP|USER", MatchAny) && - !TailMatches2("USER", "MAPPING")) + else if (HeadMatches("CREATE", "TRIGGER") && TailMatches("ON", MatchAny)) { - static const char *const list_CREATEROLE[] = - {"ADMIN", "BYPASSRLS", "CONNECTION LIMIT", "CREATEDB", "CREATEROLE", - "ENCRYPTED PASSWORD", "IN", "INHERIT", "LOGIN", "NOBYPASSRLS", - "NOCREATEDB", "NOCREATEROLE", "NOINHERIT", - "NOLOGIN", "NOREPLICATION", "NOSUPERUSER", "PASSWORD", - "REPLICATION", "ROLE", "SUPERUSER", "SYSID", - "VALID UNTIL", "WITH", NULL}; - - COMPLETE_WITH_LIST(list_CREATEROLE); + if (pset.sversion >= 110000) + COMPLETE_WITH("NOT DEFERRABLE", "DEFERRABLE", "INITIALLY", + "REFERENCING", "FOR", "WHEN (", "EXECUTE FUNCTION"); + else + COMPLETE_WITH("NOT DEFERRABLE", "DEFERRABLE", "INITIALLY", + "REFERENCING", "FOR", "WHEN (", "EXECUTE PROCEDURE"); } + else if (HeadMatches("CREATE", "TRIGGER") && + (TailMatches("DEFERRABLE") || TailMatches("INITIALLY", "IMMEDIATE|DEFERRED"))) + { + if (pset.sversion >= 110000) + COMPLETE_WITH("REFERENCING", "FOR", "WHEN (", "EXECUTE FUNCTION"); + else + COMPLETE_WITH("REFERENCING", "FOR", "WHEN (", "EXECUTE PROCEDURE"); + } + else if (HeadMatches("CREATE", "TRIGGER") && TailMatches("REFERENCING")) + COMPLETE_WITH("OLD TABLE", "NEW TABLE"); + else if (HeadMatches("CREATE", "TRIGGER") && TailMatches("OLD|NEW", "TABLE")) + COMPLETE_WITH("AS"); + else if (HeadMatches("CREATE", "TRIGGER") && + (TailMatches("REFERENCING", "OLD", "TABLE", "AS", MatchAny) || + TailMatches("REFERENCING", "OLD", "TABLE", MatchAny))) + { + if (pset.sversion >= 110000) + COMPLETE_WITH("NEW TABLE", "FOR", "WHEN (", "EXECUTE FUNCTION"); + else + COMPLETE_WITH("NEW TABLE", "FOR", "WHEN (", "EXECUTE PROCEDURE"); + } + else if (HeadMatches("CREATE", "TRIGGER") && + (TailMatches("REFERENCING", "NEW", "TABLE", "AS", MatchAny) || + TailMatches("REFERENCING", "NEW", "TABLE", MatchAny))) + { + if (pset.sversion >= 110000) + COMPLETE_WITH("OLD TABLE", "FOR", "WHEN (", "EXECUTE FUNCTION"); + else + COMPLETE_WITH("OLD TABLE", "FOR", "WHEN (", "EXECUTE PROCEDURE"); + } + else if (HeadMatches("CREATE", "TRIGGER") && + (TailMatches("REFERENCING", "OLD|NEW", "TABLE", "AS", MatchAny, "OLD|NEW", "TABLE", "AS", MatchAny) || + TailMatches("REFERENCING", "OLD|NEW", "TABLE", MatchAny, "OLD|NEW", "TABLE", "AS", MatchAny) || + TailMatches("REFERENCING", "OLD|NEW", "TABLE", "AS", MatchAny, "OLD|NEW", "TABLE", MatchAny) || + TailMatches("REFERENCING", "OLD|NEW", "TABLE", MatchAny, "OLD|NEW", "TABLE", MatchAny))) + { + if (pset.sversion >= 110000) + COMPLETE_WITH("FOR", "WHEN (", "EXECUTE FUNCTION"); + else + COMPLETE_WITH("FOR", "WHEN (", "EXECUTE PROCEDURE"); + } + else if (HeadMatches("CREATE", "TRIGGER") && TailMatches("FOR")) + COMPLETE_WITH("EACH", "ROW", "STATEMENT"); + else if (HeadMatches("CREATE", "TRIGGER") && TailMatches("FOR", "EACH")) + COMPLETE_WITH("ROW", "STATEMENT"); + else if (HeadMatches("CREATE", "TRIGGER") && + (TailMatches("FOR", "EACH", "ROW|STATEMENT") || + TailMatches("FOR", "ROW|STATEMENT"))) + { + if (pset.sversion >= 110000) + COMPLETE_WITH("WHEN (", "EXECUTE FUNCTION"); + else + COMPLETE_WITH("WHEN (", "EXECUTE PROCEDURE"); + } + else if (HeadMatches("CREATE", "TRIGGER") && TailMatches("WHEN", "(*)")) + { + if (pset.sversion >= 110000) + COMPLETE_WITH("EXECUTE FUNCTION"); + else + COMPLETE_WITH("EXECUTE PROCEDURE"); + } + /* complete CREATE TRIGGER ... EXECUTE with PROCEDURE|FUNCTION */ + else if (HeadMatches("CREATE", "TRIGGER") && TailMatches("EXECUTE")) + { + if (pset.sversion >= 110000) + COMPLETE_WITH("FUNCTION"); + else + COMPLETE_WITH("PROCEDURE"); + } + else if (HeadMatches("CREATE", "TRIGGER") && + TailMatches("EXECUTE", "FUNCTION|PROCEDURE")) + COMPLETE_WITH_VERSIONED_SCHEMA_QUERY(Query_for_list_of_functions, NULL); + +/* CREATE ROLE,USER,GROUP */ + else if (Matches("CREATE", "ROLE|GROUP|USER", MatchAny) && + !TailMatches("USER", "MAPPING")) + COMPLETE_WITH("ADMIN", "BYPASSRLS", "CONNECTION LIMIT", "CREATEDB", + "CREATEROLE", "ENCRYPTED PASSWORD", "IN", "INHERIT", + "LOGIN", "NOBYPASSRLS", + "NOCREATEDB", "NOCREATEROLE", "NOINHERIT", + "NOLOGIN", "NOREPLICATION", "NOSUPERUSER", "PASSWORD", + "REPLICATION", "ROLE", "SUPERUSER", "SYSID", + "VALID UNTIL", "WITH"); /* CREATE ROLE,USER,GROUP WITH */ - else if (Matches4("CREATE", "ROLE|GROUP|USER", MatchAny, "WITH")) - { + else if (Matches("CREATE", "ROLE|GROUP|USER", MatchAny, "WITH")) /* Similar to the above, but don't complete "WITH" again. */ - static const char *const list_CREATEROLE_WITH[] = - {"ADMIN", "BYPASSRLS", "CONNECTION LIMIT", "CREATEDB", "CREATEROLE", - "ENCRYPTED PASSWORD", "IN", "INHERIT", "LOGIN", "NOBYPASSRLS", - "NOCREATEDB", "NOCREATEROLE", "NOINHERIT", - "NOLOGIN", "NOREPLICATION", "NOSUPERUSER", "PASSWORD", - "REPLICATION", "ROLE", "SUPERUSER", "SYSID", - "VALID UNTIL", NULL}; - - COMPLETE_WITH_LIST(list_CREATEROLE_WITH); - } + COMPLETE_WITH("ADMIN", "BYPASSRLS", "CONNECTION LIMIT", "CREATEDB", + "CREATEROLE", "ENCRYPTED PASSWORD", "IN", "INHERIT", + "LOGIN", "NOBYPASSRLS", + "NOCREATEDB", "NOCREATEROLE", "NOINHERIT", + "NOLOGIN", "NOREPLICATION", "NOSUPERUSER", "PASSWORD", + "REPLICATION", "ROLE", "SUPERUSER", "SYSID", + "VALID UNTIL"); /* complete CREATE ROLE,USER,GROUP IN with ROLE,GROUP */ - else if (Matches4("CREATE", "ROLE|USER|GROUP", MatchAny, "IN")) - COMPLETE_WITH_LIST2("GROUP", "ROLE"); + else if (Matches("CREATE", "ROLE|USER|GROUP", MatchAny, "IN")) + COMPLETE_WITH("GROUP", "ROLE"); /* CREATE VIEW --- is allowed inside CREATE SCHEMA, so use TailMatches */ /* Complete CREATE VIEW with AS */ - else if (TailMatches3("CREATE", "VIEW", MatchAny)) - COMPLETE_WITH_CONST("AS"); + else if (TailMatches("CREATE", "VIEW", MatchAny)) + COMPLETE_WITH("AS"); /* Complete "CREATE VIEW AS with "SELECT" */ - else if (TailMatches4("CREATE", "VIEW", MatchAny, "AS")) - COMPLETE_WITH_CONST("SELECT"); + else if (TailMatches("CREATE", "VIEW", MatchAny, "AS")) + COMPLETE_WITH("SELECT"); /* CREATE MATERIALIZED VIEW */ - else if (Matches2("CREATE", "MATERIALIZED")) - COMPLETE_WITH_CONST("VIEW"); + else if (Matches("CREATE", "MATERIALIZED")) + COMPLETE_WITH("VIEW"); /* Complete CREATE MATERIALIZED VIEW with AS */ - else if (Matches4("CREATE", "MATERIALIZED", "VIEW", MatchAny)) - COMPLETE_WITH_CONST("AS"); + else if (Matches("CREATE", "MATERIALIZED", "VIEW", MatchAny)) + COMPLETE_WITH("AS"); /* Complete "CREATE MATERIALIZED VIEW AS with "SELECT" */ - else if (Matches5("CREATE", "MATERIALIZED", "VIEW", MatchAny, "AS")) - COMPLETE_WITH_CONST("SELECT"); + else if (Matches("CREATE", "MATERIALIZED", "VIEW", MatchAny, "AS")) + COMPLETE_WITH("SELECT"); /* CREATE EVENT TRIGGER */ - else if (Matches2("CREATE", "EVENT")) - COMPLETE_WITH_CONST("TRIGGER"); + else if (Matches("CREATE", "EVENT")) + COMPLETE_WITH("TRIGGER"); /* Complete CREATE EVENT TRIGGER with ON */ - else if (Matches4("CREATE", "EVENT", "TRIGGER", MatchAny)) - COMPLETE_WITH_CONST("ON"); + else if (Matches("CREATE", "EVENT", "TRIGGER", MatchAny)) + COMPLETE_WITH("ON"); /* Complete CREATE EVENT TRIGGER ON with event_type */ - else if (Matches5("CREATE", "EVENT", "TRIGGER", MatchAny, "ON")) - COMPLETE_WITH_LIST3("ddl_command_start", "ddl_command_end", "sql_drop"); + else if (Matches("CREATE", "EVENT", "TRIGGER", MatchAny, "ON")) + COMPLETE_WITH("ddl_command_start", "ddl_command_end", "sql_drop"); + /* + * Complete CREATE EVENT TRIGGER ON . EXECUTE FUNCTION + * is the recommended grammar instead of EXECUTE PROCEDURE in version 11 + * and upwards. + */ + else if (Matches("CREATE", "EVENT", "TRIGGER", MatchAny, "ON", MatchAny)) + { + if (pset.sversion >= 110000) + COMPLETE_WITH("WHEN TAG IN (", "EXECUTE FUNCTION"); + else + COMPLETE_WITH("WHEN TAG IN (", "EXECUTE PROCEDURE"); + } + else if (HeadMatches("CREATE", "EVENT", "TRIGGER") && + TailMatches("WHEN|AND", MatchAny, "IN", "(*)")) + { + if (pset.sversion >= 110000) + COMPLETE_WITH("EXECUTE FUNCTION"); + else + COMPLETE_WITH("EXECUTE PROCEDURE"); + } + else if (HeadMatches("CREATE", "EVENT", "TRIGGER") && + TailMatches("EXECUTE", "FUNCTION|PROCEDURE")) + COMPLETE_WITH_VERSIONED_SCHEMA_QUERY(Query_for_list_of_functions, NULL); /* DEALLOCATE */ - else if (Matches1("DEALLOCATE")) + else if (Matches("DEALLOCATE")) COMPLETE_WITH_QUERY(Query_for_list_of_prepared_statements); /* DECLARE */ - else if (Matches2("DECLARE", MatchAny)) - COMPLETE_WITH_LIST5("BINARY", "INSENSITIVE", "SCROLL", "NO SCROLL", - "CURSOR"); - else if (HeadMatches1("DECLARE") && TailMatches1("CURSOR")) - COMPLETE_WITH_LIST3("WITH HOLD", "WITHOUT HOLD", "FOR"); + else if (Matches("DECLARE", MatchAny)) + COMPLETE_WITH("BINARY", "INSENSITIVE", "SCROLL", "NO SCROLL", + "CURSOR"); + else if (HeadMatches("DECLARE") && TailMatches("CURSOR")) + COMPLETE_WITH("WITH HOLD", "WITHOUT HOLD", "FOR"); /* DELETE --- can be inside EXPLAIN, RULE, etc */ /* ... despite which, only complete DELETE with FROM at start of line */ - else if (Matches1("DELETE")) - COMPLETE_WITH_CONST("FROM"); + else if (Matches("DELETE")) + COMPLETE_WITH("FROM"); /* Complete DELETE FROM with a list of tables */ - else if (TailMatches2("DELETE", "FROM")) + else if (TailMatches("DELETE", "FROM")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_updatables, NULL); /* Complete DELETE FROM
*/ - else if (TailMatches3("DELETE", "FROM", MatchAny)) - COMPLETE_WITH_LIST2("USING", "WHERE"); + else if (TailMatches("DELETE", "FROM", MatchAny)) + COMPLETE_WITH("USING", "WHERE"); /* XXX: implement tab completion for DELETE ... USING */ /* DISCARD */ - else if (Matches1("DISCARD")) - COMPLETE_WITH_LIST4("ALL", "PLANS", "SEQUENCES", "TEMP"); + else if (Matches("DISCARD")) + COMPLETE_WITH("ALL", "PLANS", "SEQUENCES", "TEMP"); /* DO */ - else if (Matches1("DO")) - COMPLETE_WITH_CONST("LANGUAGE"); + else if (Matches("DO")) + COMPLETE_WITH("LANGUAGE"); /* DROP */ /* Complete DROP object with CASCADE / RESTRICT */ - else if (Matches3("DROP", - "COLLATION|CONVERSION|DOMAIN|EXTENSION|LANGUAGE|PUBLICATION|SCHEMA|SEQUENCE|SERVER|SUBSCRIPTION|STATISTICS|TABLE|TYPE|VIEW", - MatchAny) || - Matches4("DROP", "ACCESS", "METHOD", MatchAny) || - (Matches4("DROP", "AGGREGATE|FUNCTION", MatchAny, MatchAny) && + else if (Matches("DROP", + "COLLATION|CONVERSION|DOMAIN|EXTENSION|LANGUAGE|PUBLICATION|SCHEMA|SEQUENCE|SERVER|SUBSCRIPTION|STATISTICS|TABLE|TYPE|VIEW", + MatchAny) || + Matches("DROP", "ACCESS", "METHOD", MatchAny) || + (Matches("DROP", "AGGREGATE|FUNCTION|PROCEDURE|ROUTINE", MatchAny, MatchAny) && ends_with(prev_wd, ')')) || - Matches4("DROP", "EVENT", "TRIGGER", MatchAny) || - Matches5("DROP", "FOREIGN", "DATA", "WRAPPER", MatchAny) || - Matches4("DROP", "FOREIGN", "TABLE", MatchAny) || - Matches5("DROP", "TEXT", "SEARCH", "CONFIGURATION|DICTIONARY|PARSER|TEMPLATE", MatchAny)) - COMPLETE_WITH_LIST2("CASCADE", "RESTRICT"); + Matches("DROP", "EVENT", "TRIGGER", MatchAny) || + Matches("DROP", "FOREIGN", "DATA", "WRAPPER", MatchAny) || + Matches("DROP", "FOREIGN", "TABLE", MatchAny) || + Matches("DROP", "TEXT", "SEARCH", "CONFIGURATION|DICTIONARY|PARSER|TEMPLATE", MatchAny)) + COMPLETE_WITH("CASCADE", "RESTRICT"); /* help completing some of the variants */ - else if (Matches3("DROP", "AGGREGATE|FUNCTION", MatchAny)) - COMPLETE_WITH_CONST("("); - else if (Matches4("DROP", "AGGREGATE|FUNCTION", MatchAny, "(")) + else if (Matches("DROP", "AGGREGATE|FUNCTION|PROCEDURE|ROUTINE", MatchAny)) + COMPLETE_WITH("("); + else if (Matches("DROP", "AGGREGATE|FUNCTION|PROCEDURE|ROUTINE", MatchAny, "(")) COMPLETE_WITH_FUNCTION_ARG(prev2_wd); - else if (Matches2("DROP", "FOREIGN")) - COMPLETE_WITH_LIST2("DATA WRAPPER", "TABLE"); + else if (Matches("DROP", "FOREIGN")) + COMPLETE_WITH("DATA WRAPPER", "TABLE"); /* DROP INDEX */ - else if (Matches2("DROP", "INDEX")) + else if (Matches("DROP", "INDEX")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, " UNION SELECT 'CONCURRENTLY'"); - else if (Matches3("DROP", "INDEX", "CONCURRENTLY")) + else if (Matches("DROP", "INDEX", "CONCURRENTLY")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, NULL); - else if (Matches3("DROP", "INDEX", MatchAny)) - COMPLETE_WITH_LIST2("CASCADE", "RESTRICT"); - else if (Matches4("DROP", "INDEX", "CONCURRENTLY", MatchAny)) - COMPLETE_WITH_LIST2("CASCADE", "RESTRICT"); + else if (Matches("DROP", "INDEX", MatchAny)) + COMPLETE_WITH("CASCADE", "RESTRICT"); + else if (Matches("DROP", "INDEX", "CONCURRENTLY", MatchAny)) + COMPLETE_WITH("CASCADE", "RESTRICT"); /* DROP MATERIALIZED VIEW */ - else if (Matches2("DROP", "MATERIALIZED")) - COMPLETE_WITH_CONST("VIEW"); - else if (Matches3("DROP", "MATERIALIZED", "VIEW")) + else if (Matches("DROP", "MATERIALIZED")) + COMPLETE_WITH("VIEW"); + else if (Matches("DROP", "MATERIALIZED", "VIEW")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_matviews, NULL); /* DROP OWNED BY */ - else if (Matches2("DROP", "OWNED")) - COMPLETE_WITH_CONST("BY"); - else if (Matches3("DROP", "OWNED", "BY")) + else if (Matches("DROP", "OWNED")) + COMPLETE_WITH("BY"); + else if (Matches("DROP", "OWNED", "BY")) COMPLETE_WITH_QUERY(Query_for_list_of_roles); /* DROP TEXT SEARCH */ - else if (Matches3("DROP", "TEXT", "SEARCH")) - COMPLETE_WITH_LIST4("CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE"); + else if (Matches("DROP", "TEXT", "SEARCH")) + COMPLETE_WITH("CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE"); /* DROP TRIGGER */ - else if (Matches3("DROP", "TRIGGER", MatchAny)) - COMPLETE_WITH_CONST("ON"); - else if (Matches4("DROP", "TRIGGER", MatchAny, "ON")) + else if (Matches("DROP", "TRIGGER", MatchAny)) + COMPLETE_WITH("ON"); + else if (Matches("DROP", "TRIGGER", MatchAny, "ON")) { completion_info_charp = prev2_wd; COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_trigger); } - else if (Matches5("DROP", "TRIGGER", MatchAny, "ON", MatchAny)) - COMPLETE_WITH_LIST2("CASCADE", "RESTRICT"); + else if (Matches("DROP", "TRIGGER", MatchAny, "ON", MatchAny)) + COMPLETE_WITH("CASCADE", "RESTRICT"); /* DROP ACCESS METHOD */ - else if (Matches2("DROP", "ACCESS")) - COMPLETE_WITH_CONST("METHOD"); - else if (Matches3("DROP", "ACCESS", "METHOD")) + else if (Matches("DROP", "ACCESS")) + COMPLETE_WITH("METHOD"); + else if (Matches("DROP", "ACCESS", "METHOD")) COMPLETE_WITH_QUERY(Query_for_list_of_access_methods); /* DROP EVENT TRIGGER */ - else if (Matches2("DROP", "EVENT")) - COMPLETE_WITH_CONST("TRIGGER"); - else if (Matches3("DROP", "EVENT", "TRIGGER")) + else if (Matches("DROP", "EVENT")) + COMPLETE_WITH("TRIGGER"); + else if (Matches("DROP", "EVENT", "TRIGGER")) COMPLETE_WITH_QUERY(Query_for_list_of_event_triggers); /* DROP POLICY */ - else if (Matches2("DROP", "POLICY")) + else if (Matches("DROP", "POLICY")) COMPLETE_WITH_QUERY(Query_for_list_of_policies); /* DROP POLICY ON */ - else if (Matches3("DROP", "POLICY", MatchAny)) - COMPLETE_WITH_CONST("ON"); + else if (Matches("DROP", "POLICY", MatchAny)) + COMPLETE_WITH("ON"); /* DROP POLICY ON
*/ - else if (Matches4("DROP", "POLICY", MatchAny, "ON")) + else if (Matches("DROP", "POLICY", MatchAny, "ON")) { completion_info_charp = prev2_wd; COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_policy); } /* DROP RULE */ - else if (Matches3("DROP", "RULE", MatchAny)) - COMPLETE_WITH_CONST("ON"); - else if (Matches4("DROP", "RULE", MatchAny, "ON")) + else if (Matches("DROP", "RULE", MatchAny)) + COMPLETE_WITH("ON"); + else if (Matches("DROP", "RULE", MatchAny, "ON")) { completion_info_charp = prev2_wd; COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_rule); } - else if (Matches5("DROP", "RULE", MatchAny, "ON", MatchAny)) - COMPLETE_WITH_LIST2("CASCADE", "RESTRICT"); + else if (Matches("DROP", "RULE", MatchAny, "ON", MatchAny)) + COMPLETE_WITH("CASCADE", "RESTRICT"); /* EXECUTE */ - else if (Matches1("EXECUTE")) + else if (Matches("EXECUTE")) COMPLETE_WITH_QUERY(Query_for_list_of_prepared_statements); -/* EXPLAIN */ - - /* - * Complete EXPLAIN [ANALYZE] [VERBOSE] with list of EXPLAIN-able commands - */ - else if (Matches1("EXPLAIN")) - COMPLETE_WITH_LIST7("SELECT", "INSERT", "DELETE", "UPDATE", "DECLARE", - "ANALYZE", "VERBOSE"); - else if (Matches2("EXPLAIN", "ANALYZE")) - COMPLETE_WITH_LIST6("SELECT", "INSERT", "DELETE", "UPDATE", "DECLARE", - "VERBOSE"); - else if (Matches2("EXPLAIN", "VERBOSE") || - Matches3("EXPLAIN", "ANALYZE", "VERBOSE")) - COMPLETE_WITH_LIST5("SELECT", "INSERT", "DELETE", "UPDATE", "DECLARE"); +/* + * EXPLAIN [ ( option [, ...] ) ] statement + * EXPLAIN [ ANALYZE ] [ VERBOSE ] statement + */ + else if (Matches("EXPLAIN")) + COMPLETE_WITH("SELECT", "INSERT", "DELETE", "UPDATE", "DECLARE", + "ANALYZE", "VERBOSE"); + else if (HeadMatches("EXPLAIN", "(*") && + !HeadMatches("EXPLAIN", "(*)")) + { + /* + * This fires if we're in an unfinished parenthesized option list. + * get_previous_words treats a completed parenthesized option list as + * one word, so the above test is correct. + */ + if (ends_with(prev_wd, '(') || ends_with(prev_wd, ',')) + COMPLETE_WITH("ANALYZE", "VERBOSE", "COSTS", "BUFFERS", + "TIMING", "SUMMARY", "FORMAT"); + else if (TailMatches("ANALYZE|VERBOSE|COSTS|BUFFERS|TIMING|SUMMARY")) + COMPLETE_WITH("ON", "OFF"); + else if (TailMatches("FORMAT")) + COMPLETE_WITH("TEXT", "XML", "JSON", "YAML"); + } + else if (Matches("EXPLAIN", "ANALYZE")) + COMPLETE_WITH("SELECT", "INSERT", "DELETE", "UPDATE", "DECLARE", + "VERBOSE"); + else if (Matches("EXPLAIN", "(*)") || + Matches("EXPLAIN", "VERBOSE") || + Matches("EXPLAIN", "ANALYZE", "VERBOSE")) + COMPLETE_WITH("SELECT", "INSERT", "DELETE", "UPDATE", "DECLARE"); /* FETCH && MOVE */ /* Complete FETCH with one of FORWARD, BACKWARD, RELATIVE */ - else if (Matches1("FETCH|MOVE")) - COMPLETE_WITH_LIST4("ABSOLUTE", "BACKWARD", "FORWARD", "RELATIVE"); + else if (Matches("FETCH|MOVE")) + COMPLETE_WITH("ABSOLUTE", "BACKWARD", "FORWARD", "RELATIVE"); /* Complete FETCH with one of ALL, NEXT, PRIOR */ - else if (Matches2("FETCH|MOVE", MatchAny)) - COMPLETE_WITH_LIST3("ALL", "NEXT", "PRIOR"); + else if (Matches("FETCH|MOVE", MatchAny)) + COMPLETE_WITH("ALL", "NEXT", "PRIOR"); /* * Complete FETCH with "FROM" or "IN". These are equivalent, * but we may as well tab-complete both: perhaps some users prefer one * variant or the other. */ - else if (Matches3("FETCH|MOVE", MatchAny, MatchAny)) - COMPLETE_WITH_LIST2("FROM", "IN"); + else if (Matches("FETCH|MOVE", MatchAny, MatchAny)) + COMPLETE_WITH("FROM", "IN"); /* FOREIGN DATA WRAPPER */ /* applies in ALTER/DROP FDW and in CREATE SERVER */ - else if (TailMatches3("FOREIGN", "DATA", "WRAPPER") && - !TailMatches4("CREATE", MatchAny, MatchAny, MatchAny)) + else if (TailMatches("FOREIGN", "DATA", "WRAPPER") && + !TailMatches("CREATE", MatchAny, MatchAny, MatchAny)) COMPLETE_WITH_QUERY(Query_for_list_of_fdws); /* applies in CREATE SERVER */ - else if (TailMatches4("FOREIGN", "DATA", "WRAPPER", MatchAny) && - HeadMatches2("CREATE", "SERVER")) - COMPLETE_WITH_CONST("OPTIONS"); + else if (TailMatches("FOREIGN", "DATA", "WRAPPER", MatchAny) && + HeadMatches("CREATE", "SERVER")) + COMPLETE_WITH("OPTIONS"); /* FOREIGN TABLE */ - else if (TailMatches2("FOREIGN", "TABLE") && - !TailMatches3("CREATE", MatchAny, MatchAny)) + else if (TailMatches("FOREIGN", "TABLE") && + !TailMatches("CREATE", MatchAny, MatchAny)) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_foreign_tables, NULL); /* FOREIGN SERVER */ - else if (TailMatches2("FOREIGN", "SERVER")) + else if (TailMatches("FOREIGN", "SERVER")) COMPLETE_WITH_QUERY(Query_for_list_of_servers); /* @@ -2831,16 +2834,16 @@ psql_completion(const char *text, int start, int end) * ALTER DEFAULT PRIVILEGES, so use TailMatches */ /* Complete GRANT/REVOKE with a list of roles and privileges */ - else if (TailMatches1("GRANT|REVOKE")) + else if (TailMatches("GRANT|REVOKE")) { /* * With ALTER DEFAULT PRIVILEGES, restrict completion to grantable * privileges (can't grant roles) */ - if (HeadMatches3("ALTER", "DEFAULT", "PRIVILEGES")) - COMPLETE_WITH_LIST10("SELECT", "INSERT", "UPDATE", - "DELETE", "TRUNCATE", "REFERENCES", "TRIGGER", - "EXECUTE", "USAGE", "ALL"); + if (HeadMatches("ALTER", "DEFAULT", "PRIVILEGES")) + COMPLETE_WITH("SELECT", "INSERT", "UPDATE", + "DELETE", "TRUNCATE", "REFERENCES", "TRIGGER", + "EXECUTE", "USAGE", "ALL"); else COMPLETE_WITH_QUERY(Query_for_list_of_roles " UNION SELECT 'SELECT'" @@ -2862,19 +2865,18 @@ psql_completion(const char *text, int start, int end) * Complete GRANT/REVOKE with "ON", GRANT/REVOKE with * TO/FROM */ - else if (TailMatches2("GRANT|REVOKE", MatchAny)) + else if (TailMatches("GRANT|REVOKE", MatchAny)) { - if (TailMatches1("SELECT|INSERT|UPDATE|DELETE|TRUNCATE|REFERENCES|TRIGGER|CREATE|CONNECT|TEMPORARY|TEMP|EXECUTE|USAGE|ALL")) - COMPLETE_WITH_CONST("ON"); - else if (TailMatches2("GRANT", MatchAny)) - COMPLETE_WITH_CONST("TO"); + if (TailMatches("SELECT|INSERT|UPDATE|DELETE|TRUNCATE|REFERENCES|TRIGGER|CREATE|CONNECT|TEMPORARY|TEMP|EXECUTE|USAGE|ALL")) + COMPLETE_WITH("ON"); + else if (TailMatches("GRANT", MatchAny)) + COMPLETE_WITH("TO"); else - COMPLETE_WITH_CONST("FROM"); + COMPLETE_WITH("FROM"); } /* - * Complete GRANT/REVOKE ON with a list of tables, views, and - * sequences. + * Complete GRANT/REVOKE ON with a list of appropriate relations. * * Keywords like DATABASE, FUNCTION, LANGUAGE and SCHEMA added to query * result via UNION; seems to work intuitively. @@ -2883,17 +2885,19 @@ psql_completion(const char *text, int start, int end) * here will only work if the privilege list contains exactly one * privilege. */ - else if (TailMatches3("GRANT|REVOKE", MatchAny, "ON")) + else if (TailMatches("GRANT|REVOKE", MatchAny, "ON")) { /* * With ALTER DEFAULT PRIVILEGES, restrict completion to the kinds of * objects supported. */ - if (HeadMatches3("ALTER", "DEFAULT", "PRIVILEGES")) - COMPLETE_WITH_LIST5("TABLES", "SEQUENCES", "FUNCTIONS", "TYPES", "SCHEMAS"); + if (HeadMatches("ALTER", "DEFAULT", "PRIVILEGES")) + COMPLETE_WITH("TABLES", "SEQUENCES", "FUNCTIONS", "PROCEDURES", "ROUTINES", "TYPES", "SCHEMAS"); else - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tsvmf, + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_grantables, " UNION SELECT 'ALL FUNCTIONS IN SCHEMA'" + " UNION SELECT 'ALL PROCEDURES IN SCHEMA'" + " UNION SELECT 'ALL ROUTINES IN SCHEMA'" " UNION SELECT 'ALL SEQUENCES IN SCHEMA'" " UNION SELECT 'ALL TABLES IN SCHEMA'" " UNION SELECT 'DATABASE'" @@ -2903,17 +2907,22 @@ psql_completion(const char *text, int start, int end) " UNION SELECT 'FUNCTION'" " UNION SELECT 'LANGUAGE'" " UNION SELECT 'LARGE OBJECT'" + " UNION SELECT 'PROCEDURE'" + " UNION SELECT 'ROUTINE'" " UNION SELECT 'SCHEMA'" " UNION SELECT 'SEQUENCE'" " UNION SELECT 'TABLE'" " UNION SELECT 'TABLESPACE'" " UNION SELECT 'TYPE'"); } - else if (TailMatches4("GRANT|REVOKE", MatchAny, "ON", "ALL")) - COMPLETE_WITH_LIST3("FUNCTIONS IN SCHEMA", "SEQUENCES IN SCHEMA", - "TABLES IN SCHEMA"); - else if (TailMatches4("GRANT|REVOKE", MatchAny, "ON", "FOREIGN")) - COMPLETE_WITH_LIST2("DATA WRAPPER", "SERVER"); + else if (TailMatches("GRANT|REVOKE", MatchAny, "ON", "ALL")) + COMPLETE_WITH("FUNCTIONS IN SCHEMA", + "PROCEDURES IN SCHEMA", + "ROUTINES IN SCHEMA", + "SEQUENCES IN SCHEMA", + "TABLES IN SCHEMA"); + else if (TailMatches("GRANT|REVOKE", MatchAny, "ON", "FOREIGN")) + COMPLETE_WITH("DATA WRAPPER", "SERVER"); /* * Complete "GRANT/REVOKE * ON DATABASE/DOMAIN/..." with a list of @@ -2921,179 +2930,183 @@ psql_completion(const char *text, int start, int end) * * Complete "GRANT/REVOKE * ON *" with "TO/FROM". */ - else if (TailMatches4("GRANT|REVOKE", MatchAny, "ON", MatchAny)) + else if (TailMatches("GRANT|REVOKE", MatchAny, "ON", MatchAny)) { - if (TailMatches1("DATABASE")) + if (TailMatches("DATABASE")) COMPLETE_WITH_QUERY(Query_for_list_of_databases); - else if (TailMatches1("DOMAIN")) + else if (TailMatches("DOMAIN")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_domains, NULL); - else if (TailMatches1("FUNCTION")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_functions, NULL); - else if (TailMatches1("LANGUAGE")) + else if (TailMatches("FUNCTION")) + COMPLETE_WITH_VERSIONED_SCHEMA_QUERY(Query_for_list_of_functions, NULL); + else if (TailMatches("LANGUAGE")) COMPLETE_WITH_QUERY(Query_for_list_of_languages); - else if (TailMatches1("SCHEMA")) + else if (TailMatches("PROCEDURE")) + COMPLETE_WITH_VERSIONED_SCHEMA_QUERY(Query_for_list_of_procedures, NULL); + else if (TailMatches("ROUTINE")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_routines, NULL); + else if (TailMatches("SCHEMA")) COMPLETE_WITH_QUERY(Query_for_list_of_schemas); - else if (TailMatches1("SEQUENCE")) + else if (TailMatches("SEQUENCE")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_sequences, NULL); - else if (TailMatches1("TABLE")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tsvmf, NULL); - else if (TailMatches1("TABLESPACE")) + else if (TailMatches("TABLE")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_grantables, NULL); + else if (TailMatches("TABLESPACE")) COMPLETE_WITH_QUERY(Query_for_list_of_tablespaces); - else if (TailMatches1("TYPE")) + else if (TailMatches("TYPE")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_datatypes, NULL); - else if (TailMatches4("GRANT", MatchAny, MatchAny, MatchAny)) - COMPLETE_WITH_CONST("TO"); + else if (TailMatches("GRANT", MatchAny, MatchAny, MatchAny)) + COMPLETE_WITH("TO"); else - COMPLETE_WITH_CONST("FROM"); + COMPLETE_WITH("FROM"); } /* * Complete "GRANT/REVOKE ... TO/FROM" with username, PUBLIC, * CURRENT_USER, or SESSION_USER. */ - else if ((HeadMatches1("GRANT") && TailMatches1("TO")) || - (HeadMatches1("REVOKE") && TailMatches1("FROM"))) + else if ((HeadMatches("GRANT") && TailMatches("TO")) || + (HeadMatches("REVOKE") && TailMatches("FROM"))) COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles); /* Complete "ALTER DEFAULT PRIVILEGES ... GRANT/REVOKE ... TO/FROM */ - else if (HeadMatches3("ALTER", "DEFAULT", "PRIVILEGES") && TailMatches1("TO|FROM")) + else if (HeadMatches("ALTER", "DEFAULT", "PRIVILEGES") && TailMatches("TO|FROM")) COMPLETE_WITH_QUERY(Query_for_list_of_grant_roles); /* Complete "GRANT/REVOKE ... ON * *" with TO/FROM */ - else if (HeadMatches1("GRANT") && TailMatches3("ON", MatchAny, MatchAny)) - COMPLETE_WITH_CONST("TO"); - else if (HeadMatches1("REVOKE") && TailMatches3("ON", MatchAny, MatchAny)) - COMPLETE_WITH_CONST("FROM"); + else if (HeadMatches("GRANT") && TailMatches("ON", MatchAny, MatchAny)) + COMPLETE_WITH("TO"); + else if (HeadMatches("REVOKE") && TailMatches("ON", MatchAny, MatchAny)) + COMPLETE_WITH("FROM"); /* Complete "GRANT/REVOKE * ON ALL * IN SCHEMA *" with TO/FROM */ - else if (TailMatches8("GRANT|REVOKE", MatchAny, "ON", "ALL", MatchAny, "IN", "SCHEMA", MatchAny)) + else if (TailMatches("GRANT|REVOKE", MatchAny, "ON", "ALL", MatchAny, "IN", "SCHEMA", MatchAny)) { - if (TailMatches8("GRANT", MatchAny, MatchAny, MatchAny, MatchAny, MatchAny, MatchAny, MatchAny)) - COMPLETE_WITH_CONST("TO"); + if (TailMatches("GRANT", MatchAny, MatchAny, MatchAny, MatchAny, MatchAny, MatchAny, MatchAny)) + COMPLETE_WITH("TO"); else - COMPLETE_WITH_CONST("FROM"); + COMPLETE_WITH("FROM"); } /* Complete "GRANT/REVOKE * ON FOREIGN DATA WRAPPER *" with TO/FROM */ - else if (TailMatches7("GRANT|REVOKE", MatchAny, "ON", "FOREIGN", "DATA", "WRAPPER", MatchAny)) + else if (TailMatches("GRANT|REVOKE", MatchAny, "ON", "FOREIGN", "DATA", "WRAPPER", MatchAny)) { - if (TailMatches7("GRANT", MatchAny, MatchAny, MatchAny, MatchAny, MatchAny, MatchAny)) - COMPLETE_WITH_CONST("TO"); + if (TailMatches("GRANT", MatchAny, MatchAny, MatchAny, MatchAny, MatchAny, MatchAny)) + COMPLETE_WITH("TO"); else - COMPLETE_WITH_CONST("FROM"); + COMPLETE_WITH("FROM"); } /* Complete "GRANT/REVOKE * ON FOREIGN SERVER *" with TO/FROM */ - else if (TailMatches6("GRANT|REVOKE", MatchAny, "ON", "FOREIGN", "SERVER", MatchAny)) + else if (TailMatches("GRANT|REVOKE", MatchAny, "ON", "FOREIGN", "SERVER", MatchAny)) { - if (TailMatches6("GRANT", MatchAny, MatchAny, MatchAny, MatchAny, MatchAny)) - COMPLETE_WITH_CONST("TO"); + if (TailMatches("GRANT", MatchAny, MatchAny, MatchAny, MatchAny, MatchAny)) + COMPLETE_WITH("TO"); else - COMPLETE_WITH_CONST("FROM"); + COMPLETE_WITH("FROM"); } /* GROUP BY */ - else if (TailMatches3("FROM", MatchAny, "GROUP")) - COMPLETE_WITH_CONST("BY"); + else if (TailMatches("FROM", MatchAny, "GROUP")) + COMPLETE_WITH("BY"); /* IMPORT FOREIGN SCHEMA */ - else if (Matches1("IMPORT")) - COMPLETE_WITH_CONST("FOREIGN SCHEMA"); - else if (Matches2("IMPORT", "FOREIGN")) - COMPLETE_WITH_CONST("SCHEMA"); + else if (Matches("IMPORT")) + COMPLETE_WITH("FOREIGN SCHEMA"); + else if (Matches("IMPORT", "FOREIGN")) + COMPLETE_WITH("SCHEMA"); /* INSERT --- can be inside EXPLAIN, RULE, etc */ /* Complete INSERT with "INTO" */ - else if (TailMatches1("INSERT")) - COMPLETE_WITH_CONST("INTO"); + else if (TailMatches("INSERT")) + COMPLETE_WITH("INTO"); /* Complete INSERT INTO with table names */ - else if (TailMatches2("INSERT", "INTO")) + else if (TailMatches("INSERT", "INTO")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_updatables, NULL); /* Complete "INSERT INTO
(" with attribute names */ - else if (TailMatches4("INSERT", "INTO", MatchAny, "(")) + else if (TailMatches("INSERT", "INTO", MatchAny, "(")) COMPLETE_WITH_ATTR(prev2_wd, ""); /* * Complete INSERT INTO
with "(" or "VALUES" or "SELECT" or * "TABLE" or "DEFAULT VALUES" or "OVERRIDING" */ - else if (TailMatches3("INSERT", "INTO", MatchAny)) - COMPLETE_WITH_LIST6("(", "DEFAULT VALUES", "SELECT", "TABLE", "VALUES", "OVERRIDING"); + else if (TailMatches("INSERT", "INTO", MatchAny)) + COMPLETE_WITH("(", "DEFAULT VALUES", "SELECT", "TABLE", "VALUES", "OVERRIDING"); /* * Complete INSERT INTO
(attribs) with "VALUES" or "SELECT" or * "TABLE" or "OVERRIDING" */ - else if (TailMatches4("INSERT", "INTO", MatchAny, MatchAny) && + else if (TailMatches("INSERT", "INTO", MatchAny, MatchAny) && ends_with(prev_wd, ')')) - COMPLETE_WITH_LIST4("SELECT", "TABLE", "VALUES", "OVERRIDING"); + COMPLETE_WITH("SELECT", "TABLE", "VALUES", "OVERRIDING"); /* Complete OVERRIDING */ - else if (TailMatches1("OVERRIDING")) - COMPLETE_WITH_LIST2("SYSTEM VALUE", "USER VALUE"); + else if (TailMatches("OVERRIDING")) + COMPLETE_WITH("SYSTEM VALUE", "USER VALUE"); /* Complete after OVERRIDING clause */ - else if (TailMatches3("OVERRIDING", MatchAny, "VALUE")) - COMPLETE_WITH_LIST3("SELECT", "TABLE", "VALUES"); + else if (TailMatches("OVERRIDING", MatchAny, "VALUE")) + COMPLETE_WITH("SELECT", "TABLE", "VALUES"); /* Insert an open parenthesis after "VALUES" */ - else if (TailMatches1("VALUES") && !TailMatches2("DEFAULT", "VALUES")) - COMPLETE_WITH_CONST("("); + else if (TailMatches("VALUES") && !TailMatches("DEFAULT", "VALUES")) + COMPLETE_WITH("("); /* LOCK */ /* Complete LOCK [TABLE] with a list of tables */ - else if (Matches1("LOCK")) + else if (Matches("LOCK")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, " UNION SELECT 'TABLE'"); - else if (Matches2("LOCK", "TABLE")) + else if (Matches("LOCK", "TABLE")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, ""); /* For the following, handle the case of a single table only for now */ /* Complete LOCK [TABLE]
with "IN" */ - else if (Matches2("LOCK", MatchAnyExcept("TABLE")) || - Matches3("LOCK", "TABLE", MatchAny)) - COMPLETE_WITH_CONST("IN"); + else if (Matches("LOCK", MatchAnyExcept("TABLE")) || + Matches("LOCK", "TABLE", MatchAny)) + COMPLETE_WITH("IN"); /* Complete LOCK [TABLE]
IN with a lock mode */ - else if (Matches3("LOCK", MatchAny, "IN") || - Matches4("LOCK", "TABLE", MatchAny, "IN")) - COMPLETE_WITH_LIST8("ACCESS SHARE MODE", - "ROW SHARE MODE", "ROW EXCLUSIVE MODE", - "SHARE UPDATE EXCLUSIVE MODE", "SHARE MODE", - "SHARE ROW EXCLUSIVE MODE", - "EXCLUSIVE MODE", "ACCESS EXCLUSIVE MODE"); + else if (Matches("LOCK", MatchAny, "IN") || + Matches("LOCK", "TABLE", MatchAny, "IN")) + COMPLETE_WITH("ACCESS SHARE MODE", + "ROW SHARE MODE", "ROW EXCLUSIVE MODE", + "SHARE UPDATE EXCLUSIVE MODE", "SHARE MODE", + "SHARE ROW EXCLUSIVE MODE", + "EXCLUSIVE MODE", "ACCESS EXCLUSIVE MODE"); /* Complete LOCK [TABLE]
IN ACCESS|ROW with rest of lock mode */ - else if (Matches4("LOCK", MatchAny, "IN", "ACCESS|ROW") || - Matches5("LOCK", "TABLE", MatchAny, "IN", "ACCESS|ROW")) - COMPLETE_WITH_LIST2("EXCLUSIVE MODE", "SHARE MODE"); + else if (Matches("LOCK", MatchAny, "IN", "ACCESS|ROW") || + Matches("LOCK", "TABLE", MatchAny, "IN", "ACCESS|ROW")) + COMPLETE_WITH("EXCLUSIVE MODE", "SHARE MODE"); /* Complete LOCK [TABLE]
IN SHARE with rest of lock mode */ - else if (Matches4("LOCK", MatchAny, "IN", "SHARE") || - Matches5("LOCK", "TABLE", MatchAny, "IN", "SHARE")) - COMPLETE_WITH_LIST3("MODE", "ROW EXCLUSIVE MODE", - "UPDATE EXCLUSIVE MODE"); + else if (Matches("LOCK", MatchAny, "IN", "SHARE") || + Matches("LOCK", "TABLE", MatchAny, "IN", "SHARE")) + COMPLETE_WITH("MODE", "ROW EXCLUSIVE MODE", + "UPDATE EXCLUSIVE MODE"); /* NOTIFY --- can be inside EXPLAIN, RULE, etc */ - else if (TailMatches1("NOTIFY")) + else if (TailMatches("NOTIFY")) COMPLETE_WITH_QUERY("SELECT pg_catalog.quote_ident(channel) FROM pg_catalog.pg_listening_channels() AS channel WHERE substring(pg_catalog.quote_ident(channel),1,%d)='%s'"); /* OPTIONS */ - else if (TailMatches1("OPTIONS")) - COMPLETE_WITH_CONST("("); + else if (TailMatches("OPTIONS")) + COMPLETE_WITH("("); /* OWNER TO - complete with available roles */ - else if (TailMatches2("OWNER", "TO")) + else if (TailMatches("OWNER", "TO")) COMPLETE_WITH_QUERY(Query_for_list_of_roles); /* ORDER BY */ - else if (TailMatches3("FROM", MatchAny, "ORDER")) - COMPLETE_WITH_CONST("BY"); - else if (TailMatches4("FROM", MatchAny, "ORDER", "BY")) + else if (TailMatches("FROM", MatchAny, "ORDER")) + COMPLETE_WITH("BY"); + else if (TailMatches("FROM", MatchAny, "ORDER", "BY")) COMPLETE_WITH_ATTR(prev3_wd, ""); /* PREPARE xx AS */ - else if (Matches3("PREPARE", MatchAny, "AS")) - COMPLETE_WITH_LIST4("SELECT", "UPDATE", "INSERT", "DELETE FROM"); + else if (Matches("PREPARE", MatchAny, "AS")) + COMPLETE_WITH("SELECT", "UPDATE", "INSERT", "DELETE FROM"); /* * PREPARE TRANSACTION is missing on purpose. It's intended for transaction @@ -3101,154 +3114,149 @@ psql_completion(const char *text, int start, int end) */ /* REASSIGN OWNED BY xxx TO yyy */ - else if (Matches1("REASSIGN")) - COMPLETE_WITH_CONST("OWNED BY"); - else if (Matches2("REASSIGN", "OWNED")) - COMPLETE_WITH_CONST("BY"); - else if (Matches3("REASSIGN", "OWNED", "BY")) + else if (Matches("REASSIGN")) + COMPLETE_WITH("OWNED BY"); + else if (Matches("REASSIGN", "OWNED")) + COMPLETE_WITH("BY"); + else if (Matches("REASSIGN", "OWNED", "BY")) COMPLETE_WITH_QUERY(Query_for_list_of_roles); - else if (Matches4("REASSIGN", "OWNED", "BY", MatchAny)) - COMPLETE_WITH_CONST("TO"); - else if (Matches5("REASSIGN", "OWNED", "BY", MatchAny, "TO")) + else if (Matches("REASSIGN", "OWNED", "BY", MatchAny)) + COMPLETE_WITH("TO"); + else if (Matches("REASSIGN", "OWNED", "BY", MatchAny, "TO")) COMPLETE_WITH_QUERY(Query_for_list_of_roles); /* REFRESH MATERIALIZED VIEW */ - else if (Matches1("REFRESH")) - COMPLETE_WITH_CONST("MATERIALIZED VIEW"); - else if (Matches2("REFRESH", "MATERIALIZED")) - COMPLETE_WITH_CONST("VIEW"); - else if (Matches3("REFRESH", "MATERIALIZED", "VIEW")) + else if (Matches("REFRESH")) + COMPLETE_WITH("MATERIALIZED VIEW"); + else if (Matches("REFRESH", "MATERIALIZED")) + COMPLETE_WITH("VIEW"); + else if (Matches("REFRESH", "MATERIALIZED", "VIEW")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_matviews, " UNION SELECT 'CONCURRENTLY'"); - else if (Matches4("REFRESH", "MATERIALIZED", "VIEW", "CONCURRENTLY")) + else if (Matches("REFRESH", "MATERIALIZED", "VIEW", "CONCURRENTLY")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_matviews, NULL); - else if (Matches4("REFRESH", "MATERIALIZED", "VIEW", MatchAny)) - COMPLETE_WITH_CONST("WITH"); - else if (Matches5("REFRESH", "MATERIALIZED", "VIEW", "CONCURRENTLY", MatchAny)) - COMPLETE_WITH_CONST("WITH"); - else if (Matches5("REFRESH", "MATERIALIZED", "VIEW", MatchAny, "WITH")) - COMPLETE_WITH_LIST2("NO DATA", "DATA"); - else if (Matches6("REFRESH", "MATERIALIZED", "VIEW", "CONCURRENTLY", MatchAny, "WITH")) - COMPLETE_WITH_LIST2("NO DATA", "DATA"); - else if (Matches6("REFRESH", "MATERIALIZED", "VIEW", MatchAny, "WITH", "NO")) - COMPLETE_WITH_CONST("DATA"); - else if (Matches7("REFRESH", "MATERIALIZED", "VIEW", "CONCURRENTLY", MatchAny, "WITH", "NO")) - COMPLETE_WITH_CONST("DATA"); + else if (Matches("REFRESH", "MATERIALIZED", "VIEW", MatchAny)) + COMPLETE_WITH("WITH"); + else if (Matches("REFRESH", "MATERIALIZED", "VIEW", "CONCURRENTLY", MatchAny)) + COMPLETE_WITH("WITH"); + else if (Matches("REFRESH", "MATERIALIZED", "VIEW", MatchAny, "WITH")) + COMPLETE_WITH("NO DATA", "DATA"); + else if (Matches("REFRESH", "MATERIALIZED", "VIEW", "CONCURRENTLY", MatchAny, "WITH")) + COMPLETE_WITH("NO DATA", "DATA"); + else if (Matches("REFRESH", "MATERIALIZED", "VIEW", MatchAny, "WITH", "NO")) + COMPLETE_WITH("DATA"); + else if (Matches("REFRESH", "MATERIALIZED", "VIEW", "CONCURRENTLY", MatchAny, "WITH", "NO")) + COMPLETE_WITH("DATA"); /* REINDEX */ - else if (Matches1("REINDEX")) - COMPLETE_WITH_LIST5("TABLE", "INDEX", "SYSTEM", "SCHEMA", "DATABASE"); - else if (Matches2("REINDEX", "TABLE")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, NULL); - else if (Matches2("REINDEX", "INDEX")) + else if (Matches("REINDEX")) + COMPLETE_WITH("TABLE", "INDEX", "SYSTEM", "SCHEMA", "DATABASE"); + else if (Matches("REINDEX", "TABLE")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexables, NULL); + else if (Matches("REINDEX", "INDEX")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, NULL); - else if (Matches2("REINDEX", "SCHEMA")) + else if (Matches("REINDEX", "SCHEMA")) COMPLETE_WITH_QUERY(Query_for_list_of_schemas); - else if (Matches2("REINDEX", "SYSTEM|DATABASE")) + else if (Matches("REINDEX", "SYSTEM|DATABASE")) COMPLETE_WITH_QUERY(Query_for_list_of_databases); /* SECURITY LABEL */ - else if (Matches1("SECURITY")) - COMPLETE_WITH_CONST("LABEL"); - else if (Matches2("SECURITY", "LABEL")) - COMPLETE_WITH_LIST2("ON", "FOR"); - else if (Matches4("SECURITY", "LABEL", "FOR", MatchAny)) - COMPLETE_WITH_CONST("ON"); - else if (Matches3("SECURITY", "LABEL", "ON") || - Matches5("SECURITY", "LABEL", "FOR", MatchAny, "ON")) - { - static const char *const list_SECURITY_LABEL[] = - {"TABLE", "COLUMN", "AGGREGATE", "DATABASE", "DOMAIN", - "EVENT TRIGGER", "FOREIGN TABLE", "FUNCTION", "LARGE OBJECT", - "MATERIALIZED VIEW", "LANGUAGE", "PUBLICATION", "ROLE", "SCHEMA", - "SEQUENCE", "SUBSCRIPTION", "TABLESPACE", "TYPE", "VIEW", NULL}; - - COMPLETE_WITH_LIST(list_SECURITY_LABEL); - } - else if (Matches5("SECURITY", "LABEL", "ON", MatchAny, MatchAny)) - COMPLETE_WITH_CONST("IS"); + else if (Matches("SECURITY")) + COMPLETE_WITH("LABEL"); + else if (Matches("SECURITY", "LABEL")) + COMPLETE_WITH("ON", "FOR"); + else if (Matches("SECURITY", "LABEL", "FOR", MatchAny)) + COMPLETE_WITH("ON"); + else if (Matches("SECURITY", "LABEL", "ON") || + Matches("SECURITY", "LABEL", "FOR", MatchAny, "ON")) + COMPLETE_WITH("TABLE", "COLUMN", "AGGREGATE", "DATABASE", "DOMAIN", + "EVENT TRIGGER", "FOREIGN TABLE", "FUNCTION", + "LARGE OBJECT", "MATERIALIZED VIEW", "LANGUAGE", + "PUBLICATION", "PROCEDURE", "ROLE", "ROUTINE", "SCHEMA", + "SEQUENCE", "SUBSCRIPTION", "TABLESPACE", "TYPE", "VIEW"); + else if (Matches("SECURITY", "LABEL", "ON", MatchAny, MatchAny)) + COMPLETE_WITH("IS"); /* SELECT */ /* naah . . . */ /* SET, RESET, SHOW */ /* Complete with a variable name */ - else if (TailMatches1("SET|RESET") && !TailMatches3("UPDATE", MatchAny, "SET")) + else if (TailMatches("SET|RESET") && !TailMatches("UPDATE", MatchAny, "SET")) COMPLETE_WITH_QUERY(Query_for_list_of_set_vars); - else if (Matches1("SHOW")) + else if (Matches("SHOW")) COMPLETE_WITH_QUERY(Query_for_list_of_show_vars); /* Complete "SET TRANSACTION" */ - else if (Matches2("SET", "TRANSACTION")) - COMPLETE_WITH_LIST5("SNAPSHOT", "ISOLATION LEVEL", "READ", "DEFERRABLE", "NOT DEFERRABLE"); - else if (Matches2("BEGIN|START", "TRANSACTION") || - Matches2("BEGIN", "WORK") || - Matches1("BEGIN") || - Matches5("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION")) - COMPLETE_WITH_LIST4("ISOLATION LEVEL", "READ", "DEFERRABLE", "NOT DEFERRABLE"); - else if (Matches3("SET|BEGIN|START", "TRANSACTION|WORK", "NOT") || - Matches2("BEGIN", "NOT") || - Matches6("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "NOT")) - COMPLETE_WITH_CONST("DEFERRABLE"); - else if (Matches3("SET|BEGIN|START", "TRANSACTION|WORK", "ISOLATION") || - Matches2("BEGIN", "ISOLATION") || - Matches6("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "ISOLATION")) - COMPLETE_WITH_CONST("LEVEL"); - else if (Matches4("SET|BEGIN|START", "TRANSACTION|WORK", "ISOLATION", "LEVEL") || - Matches3("BEGIN", "ISOLATION", "LEVEL") || - Matches7("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "ISOLATION", "LEVEL")) - COMPLETE_WITH_LIST3("READ", "REPEATABLE READ", "SERIALIZABLE"); - else if (Matches5("SET|BEGIN|START", "TRANSACTION|WORK", "ISOLATION", "LEVEL", "READ") || - Matches4("BEGIN", "ISOLATION", "LEVEL", "READ") || - Matches8("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "ISOLATION", "LEVEL", "READ")) - COMPLETE_WITH_LIST2("UNCOMMITTED", "COMMITTED"); - else if (Matches5("SET|BEGIN|START", "TRANSACTION|WORK", "ISOLATION", "LEVEL", "REPEATABLE") || - Matches4("BEGIN", "ISOLATION", "LEVEL", "REPEATABLE") || - Matches8("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "ISOLATION", "LEVEL", "REPEATABLE")) - COMPLETE_WITH_CONST("READ"); - else if (Matches3("SET|BEGIN|START", "TRANSACTION|WORK", "READ") || - Matches2("BEGIN", "READ") || - Matches6("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "READ")) - COMPLETE_WITH_LIST2("ONLY", "WRITE"); + else if (Matches("SET", "TRANSACTION")) + COMPLETE_WITH("SNAPSHOT", "ISOLATION LEVEL", "READ", "DEFERRABLE", "NOT DEFERRABLE"); + else if (Matches("BEGIN|START", "TRANSACTION") || + Matches("BEGIN", "WORK") || + Matches("BEGIN") || + Matches("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION")) + COMPLETE_WITH("ISOLATION LEVEL", "READ", "DEFERRABLE", "NOT DEFERRABLE"); + else if (Matches("SET|BEGIN|START", "TRANSACTION|WORK", "NOT") || + Matches("BEGIN", "NOT") || + Matches("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "NOT")) + COMPLETE_WITH("DEFERRABLE"); + else if (Matches("SET|BEGIN|START", "TRANSACTION|WORK", "ISOLATION") || + Matches("BEGIN", "ISOLATION") || + Matches("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "ISOLATION")) + COMPLETE_WITH("LEVEL"); + else if (Matches("SET|BEGIN|START", "TRANSACTION|WORK", "ISOLATION", "LEVEL") || + Matches("BEGIN", "ISOLATION", "LEVEL") || + Matches("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "ISOLATION", "LEVEL")) + COMPLETE_WITH("READ", "REPEATABLE READ", "SERIALIZABLE"); + else if (Matches("SET|BEGIN|START", "TRANSACTION|WORK", "ISOLATION", "LEVEL", "READ") || + Matches("BEGIN", "ISOLATION", "LEVEL", "READ") || + Matches("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "ISOLATION", "LEVEL", "READ")) + COMPLETE_WITH("UNCOMMITTED", "COMMITTED"); + else if (Matches("SET|BEGIN|START", "TRANSACTION|WORK", "ISOLATION", "LEVEL", "REPEATABLE") || + Matches("BEGIN", "ISOLATION", "LEVEL", "REPEATABLE") || + Matches("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "ISOLATION", "LEVEL", "REPEATABLE")) + COMPLETE_WITH("READ"); + else if (Matches("SET|BEGIN|START", "TRANSACTION|WORK", "READ") || + Matches("BEGIN", "READ") || + Matches("SET", "SESSION", "CHARACTERISTICS", "AS", "TRANSACTION", "READ")) + COMPLETE_WITH("ONLY", "WRITE"); /* SET CONSTRAINTS */ - else if (Matches2("SET", "CONSTRAINTS")) + else if (Matches("SET", "CONSTRAINTS")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_constraints_with_schema, "UNION SELECT 'ALL'"); /* Complete SET CONSTRAINTS with DEFERRED|IMMEDIATE */ - else if (Matches3("SET", "CONSTRAINTS", MatchAny)) - COMPLETE_WITH_LIST2("DEFERRED", "IMMEDIATE"); + else if (Matches("SET", "CONSTRAINTS", MatchAny)) + COMPLETE_WITH("DEFERRED", "IMMEDIATE"); /* Complete SET ROLE */ - else if (Matches2("SET", "ROLE")) + else if (Matches("SET", "ROLE")) COMPLETE_WITH_QUERY(Query_for_list_of_roles); /* Complete SET SESSION with AUTHORIZATION or CHARACTERISTICS... */ - else if (Matches2("SET", "SESSION")) - COMPLETE_WITH_LIST2("AUTHORIZATION", "CHARACTERISTICS AS TRANSACTION"); + else if (Matches("SET", "SESSION")) + COMPLETE_WITH("AUTHORIZATION", "CHARACTERISTICS AS TRANSACTION"); /* Complete SET SESSION AUTHORIZATION with username */ - else if (Matches3("SET", "SESSION", "AUTHORIZATION")) + else if (Matches("SET", "SESSION", "AUTHORIZATION")) COMPLETE_WITH_QUERY(Query_for_list_of_roles " UNION SELECT 'DEFAULT'"); /* Complete RESET SESSION with AUTHORIZATION */ - else if (Matches2("RESET", "SESSION")) - COMPLETE_WITH_CONST("AUTHORIZATION"); + else if (Matches("RESET", "SESSION")) + COMPLETE_WITH("AUTHORIZATION"); /* Complete SET with "TO" */ - else if (Matches2("SET", MatchAny)) - COMPLETE_WITH_CONST("TO"); - /* Complete ALTER DATABASE|FUNCTION|ROLE|USER ... SET */ - else if (HeadMatches2("ALTER", "DATABASE|FUNCTION|ROLE|USER") && - TailMatches2("SET", MatchAny)) - COMPLETE_WITH_LIST2("FROM CURRENT", "TO"); + else if (Matches("SET", MatchAny)) + COMPLETE_WITH("TO"); + + /* + * Complete ALTER DATABASE|FUNCTION||PROCEDURE|ROLE|ROUTINE|USER ... SET + * + */ + else if (HeadMatches("ALTER", "DATABASE|FUNCTION|PROCEDURE|ROLE|ROUTINE|USER") && + TailMatches("SET", MatchAny)) + COMPLETE_WITH("FROM CURRENT", "TO"); /* Suggest possible variable values */ - else if (TailMatches3("SET", MatchAny, "TO|=")) + else if (TailMatches("SET", MatchAny, "TO|=")) { /* special cased code for individual GUCs */ - if (TailMatches2("DateStyle", "TO|=")) - { - static const char *const my_list[] = - {"ISO", "SQL", "Postgres", "German", - "YMD", "DMY", "MDY", - "US", "European", "NonEuropean", - "DEFAULT", NULL}; - - COMPLETE_WITH_LIST(my_list); - } - else if (TailMatches2("search_path", "TO|=")) + if (TailMatches("DateStyle", "TO|=")) + COMPLETE_WITH("ISO", "SQL", "Postgres", "German", + "YMD", "DMY", "MDY", + "US", "European", "NonEuropean", + "DEFAULT"); + else if (TailMatches("search_path", "TO|=")) COMPLETE_WITH_QUERY(Query_for_list_of_schemas " AND nspname not like 'pg\\_toast%%' " " AND nspname not like 'pg\\_temp%%' " @@ -3266,10 +3274,10 @@ psql_completion(const char *text, int start, int end) COMPLETE_WITH_QUERY(querybuf); } else if (guctype && strcmp(guctype, "bool") == 0) - COMPLETE_WITH_LIST9("on", "off", "true", "false", "yes", "no", - "1", "0", "DEFAULT"); + COMPLETE_WITH("on", "off", "true", "false", "yes", "no", + "1", "0", "DEFAULT"); else - COMPLETE_WITH_CONST("DEFAULT"); + COMPLETE_WITH("DEFAULT"); if (guctype) free(guctype); @@ -3277,84 +3285,98 @@ psql_completion(const char *text, int start, int end) } /* START TRANSACTION */ - else if (Matches1("START")) - COMPLETE_WITH_CONST("TRANSACTION"); + else if (Matches("START")) + COMPLETE_WITH("TRANSACTION"); /* TABLE, but not TABLE embedded in other commands */ - else if (Matches1("TABLE")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_relations, NULL); + else if (Matches("TABLE")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_selectables, NULL); /* TABLESAMPLE */ - else if (TailMatches1("TABLESAMPLE")) + else if (TailMatches("TABLESAMPLE")) COMPLETE_WITH_QUERY(Query_for_list_of_tablesample_methods); - else if (TailMatches2("TABLESAMPLE", MatchAny)) - COMPLETE_WITH_CONST("("); + else if (TailMatches("TABLESAMPLE", MatchAny)) + COMPLETE_WITH("("); /* TRUNCATE */ - else if (Matches1("TRUNCATE")) + else if (Matches("TRUNCATE")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL); /* UNLISTEN */ - else if (Matches1("UNLISTEN")) + else if (Matches("UNLISTEN")) COMPLETE_WITH_QUERY("SELECT pg_catalog.quote_ident(channel) FROM pg_catalog.pg_listening_channels() AS channel WHERE substring(pg_catalog.quote_ident(channel),1,%d)='%s' UNION SELECT '*'"); /* UPDATE --- can be inside EXPLAIN, RULE, etc */ /* If prev. word is UPDATE suggest a list of tables */ - else if (TailMatches1("UPDATE")) + else if (TailMatches("UPDATE")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_updatables, NULL); /* Complete UPDATE
with "SET" */ - else if (TailMatches2("UPDATE", MatchAny)) - COMPLETE_WITH_CONST("SET"); + else if (TailMatches("UPDATE", MatchAny)) + COMPLETE_WITH("SET"); /* Complete UPDATE
SET with list of attributes */ - else if (TailMatches3("UPDATE", MatchAny, "SET")) + else if (TailMatches("UPDATE", MatchAny, "SET")) COMPLETE_WITH_ATTR(prev2_wd, ""); /* UPDATE
SET = */ - else if (TailMatches4("UPDATE", MatchAny, "SET", MatchAny)) - COMPLETE_WITH_CONST("="); + else if (TailMatches("UPDATE", MatchAny, "SET", MatchAny)) + COMPLETE_WITH("="); /* USER MAPPING */ - else if (Matches3("ALTER|CREATE|DROP", "USER", "MAPPING")) - COMPLETE_WITH_CONST("FOR"); - else if (Matches4("CREATE", "USER", "MAPPING", "FOR")) + else if (Matches("ALTER|CREATE|DROP", "USER", "MAPPING")) + COMPLETE_WITH("FOR"); + else if (Matches("CREATE", "USER", "MAPPING", "FOR")) COMPLETE_WITH_QUERY(Query_for_list_of_roles " UNION SELECT 'CURRENT_USER'" " UNION SELECT 'PUBLIC'" " UNION SELECT 'USER'"); - else if (Matches4("ALTER|DROP", "USER", "MAPPING", "FOR")) + else if (Matches("ALTER|DROP", "USER", "MAPPING", "FOR")) COMPLETE_WITH_QUERY(Query_for_list_of_user_mappings); - else if (Matches5("CREATE|ALTER|DROP", "USER", "MAPPING", "FOR", MatchAny)) - COMPLETE_WITH_CONST("SERVER"); - else if (Matches7("CREATE|ALTER", "USER", "MAPPING", "FOR", MatchAny, "SERVER", MatchAny)) - COMPLETE_WITH_CONST("OPTIONS"); + else if (Matches("CREATE|ALTER|DROP", "USER", "MAPPING", "FOR", MatchAny)) + COMPLETE_WITH("SERVER"); + else if (Matches("CREATE|ALTER", "USER", "MAPPING", "FOR", MatchAny, "SERVER", MatchAny)) + COMPLETE_WITH("OPTIONS"); /* - * VACUUM [ FULL | FREEZE ] [ VERBOSE ] [ table ] - * VACUUM [ FULL | FREEZE ] [ VERBOSE ] ANALYZE [ table [ (column [, ...] ) ] ] + * VACUUM [ ( option [, ...] ) ] [ table_and_columns [, ...] ] + * VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ ANALYZE ] [ table_and_columns [, ...] ] */ - else if (Matches1("VACUUM")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, + else if (Matches("VACUUM")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_vacuumables, " UNION SELECT 'FULL'" " UNION SELECT 'FREEZE'" " UNION SELECT 'ANALYZE'" " UNION SELECT 'VERBOSE'"); - else if (Matches2("VACUUM", "FULL|FREEZE")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, + else if (Matches("VACUUM", "FULL")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_vacuumables, + " UNION SELECT 'FREEZE'" " UNION SELECT 'ANALYZE'" " UNION SELECT 'VERBOSE'"); - else if (Matches3("VACUUM", "FULL|FREEZE", "ANALYZE")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, - " UNION SELECT 'VERBOSE'"); - else if (Matches3("VACUUM", "FULL|FREEZE", "VERBOSE")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, + else if (Matches("VACUUM", "FREEZE") || + Matches("VACUUM", "FULL", "FREEZE")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_vacuumables, + " UNION SELECT 'VERBOSE'" " UNION SELECT 'ANALYZE'"); - else if (Matches2("VACUUM", "VERBOSE")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, + else if (Matches("VACUUM", "VERBOSE") || + Matches("VACUUM", "FULL|FREEZE", "VERBOSE") || + Matches("VACUUM", "FULL", "FREEZE", "VERBOSE")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_vacuumables, " UNION SELECT 'ANALYZE'"); - else if (Matches2("VACUUM", "ANALYZE")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, - " UNION SELECT 'VERBOSE'"); - else if (HeadMatches1("VACUUM")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, NULL); + else if (HeadMatches("VACUUM", "(*") && + !HeadMatches("VACUUM", "(*)")) + { + /* + * This fires if we're in an unfinished parenthesized option list. + * get_previous_words treats a completed parenthesized option list as + * one word, so the above test is correct. + */ + if (ends_with(prev_wd, '(') || ends_with(prev_wd, ',')) + COMPLETE_WITH("FULL", "FREEZE", "ANALYZE", "VERBOSE", + "DISABLE_PAGE_SKIPPING"); + } + else if (HeadMatches("VACUUM") && TailMatches("(")) + /* "VACUUM (" should be caught above, so assume we want columns */ + COMPLETE_WITH_ATTR(prev2_wd, ""); + else if (HeadMatches("VACUUM")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_vacuumables, NULL); /* WITH [RECURSIVE] */ @@ -3362,114 +3384,109 @@ psql_completion(const char *text, int start, int end) * Only match when WITH is the first word, as WITH may appear in many * other contexts. */ - else if (Matches1("WITH")) - COMPLETE_WITH_CONST("RECURSIVE"); - -/* ANALYZE */ - /* Complete with list of tables */ - else if (Matches1("ANALYZE")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tmf, NULL); + else if (Matches("WITH")) + COMPLETE_WITH("RECURSIVE"); /* WHERE */ /* Simple case of the word before the where being the table name */ - else if (TailMatches2(MatchAny, "WHERE")) + else if (TailMatches(MatchAny, "WHERE")) COMPLETE_WITH_ATTR(prev2_wd, ""); /* ... FROM ... */ /* TODO: also include SRF ? */ - else if (TailMatches1("FROM") && !Matches3("COPY|\\copy", MatchAny, "FROM")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tsvmf, NULL); + else if (TailMatches("FROM") && !Matches("COPY|\\copy", MatchAny, "FROM")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_selectables, NULL); /* ... JOIN ... */ - else if (TailMatches1("JOIN")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tsvmf, NULL); + else if (TailMatches("JOIN")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_selectables, NULL); /* Backslash commands */ /* TODO: \dc \dd \dl */ - else if (TailMatchesCS1("\\?")) - COMPLETE_WITH_LIST_CS3("commands", "options", "variables"); - else if (TailMatchesCS1("\\connect|\\c")) + else if (TailMatchesCS("\\?")) + COMPLETE_WITH_CS("commands", "options", "variables"); + else if (TailMatchesCS("\\connect|\\c")) { if (!recognized_connection_string(text)) COMPLETE_WITH_QUERY(Query_for_list_of_databases); } - else if (TailMatchesCS2("\\connect|\\c", MatchAny)) + else if (TailMatchesCS("\\connect|\\c", MatchAny)) { if (!recognized_connection_string(prev_wd)) COMPLETE_WITH_QUERY(Query_for_list_of_roles); } - else if (TailMatchesCS1("\\da*")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_aggregates, NULL); - else if (TailMatchesCS1("\\dA*")) + else if (TailMatchesCS("\\da*")) + COMPLETE_WITH_VERSIONED_SCHEMA_QUERY(Query_for_list_of_aggregates, NULL); + else if (TailMatchesCS("\\dA*")) COMPLETE_WITH_QUERY(Query_for_list_of_access_methods); - else if (TailMatchesCS1("\\db*")) + else if (TailMatchesCS("\\db*")) COMPLETE_WITH_QUERY(Query_for_list_of_tablespaces); - else if (TailMatchesCS1("\\dD*")) + else if (TailMatchesCS("\\dD*")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_domains, NULL); - else if (TailMatchesCS1("\\des*")) + else if (TailMatchesCS("\\des*")) COMPLETE_WITH_QUERY(Query_for_list_of_servers); - else if (TailMatchesCS1("\\deu*")) + else if (TailMatchesCS("\\deu*")) COMPLETE_WITH_QUERY(Query_for_list_of_user_mappings); - else if (TailMatchesCS1("\\dew*")) + else if (TailMatchesCS("\\dew*")) COMPLETE_WITH_QUERY(Query_for_list_of_fdws); - else if (TailMatchesCS1("\\df*")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_functions, NULL); + else if (TailMatchesCS("\\df*")) + COMPLETE_WITH_VERSIONED_SCHEMA_QUERY(Query_for_list_of_functions, NULL); - else if (TailMatchesCS1("\\dFd*")) + else if (TailMatchesCS("\\dFd*")) COMPLETE_WITH_QUERY(Query_for_list_of_ts_dictionaries); - else if (TailMatchesCS1("\\dFp*")) + else if (TailMatchesCS("\\dFp*")) COMPLETE_WITH_QUERY(Query_for_list_of_ts_parsers); - else if (TailMatchesCS1("\\dFt*")) + else if (TailMatchesCS("\\dFt*")) COMPLETE_WITH_QUERY(Query_for_list_of_ts_templates); /* must be at end of \dF alternatives: */ - else if (TailMatchesCS1("\\dF*")) + else if (TailMatchesCS("\\dF*")) COMPLETE_WITH_QUERY(Query_for_list_of_ts_configurations); - else if (TailMatchesCS1("\\di*")) + else if (TailMatchesCS("\\di*")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, NULL); - else if (TailMatchesCS1("\\dL*")) + else if (TailMatchesCS("\\dL*")) COMPLETE_WITH_QUERY(Query_for_list_of_languages); - else if (TailMatchesCS1("\\dn*")) + else if (TailMatchesCS("\\dn*")) COMPLETE_WITH_QUERY(Query_for_list_of_schemas); - else if (TailMatchesCS1("\\dp") || TailMatchesCS1("\\z")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tsvmf, NULL); - else if (TailMatchesCS1("\\ds*")) + else if (TailMatchesCS("\\dp") || TailMatchesCS("\\z")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_grantables, NULL); + else if (TailMatchesCS("\\ds*")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_sequences, NULL); - else if (TailMatchesCS1("\\dt*")) + else if (TailMatchesCS("\\dt*")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL); - else if (TailMatchesCS1("\\dT*")) + else if (TailMatchesCS("\\dT*")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_datatypes, NULL); - else if (TailMatchesCS1("\\du*") || TailMatchesCS1("\\dg*")) + else if (TailMatchesCS("\\du*") || TailMatchesCS("\\dg*")) COMPLETE_WITH_QUERY(Query_for_list_of_roles); - else if (TailMatchesCS1("\\dv*")) + else if (TailMatchesCS("\\dv*")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_views, NULL); - else if (TailMatchesCS1("\\dx*")) + else if (TailMatchesCS("\\dx*")) COMPLETE_WITH_QUERY(Query_for_list_of_extensions); - else if (TailMatchesCS1("\\dm*")) + else if (TailMatchesCS("\\dm*")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_matviews, NULL); - else if (TailMatchesCS1("\\dE*")) + else if (TailMatchesCS("\\dE*")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_foreign_tables, NULL); - else if (TailMatchesCS1("\\dy*")) + else if (TailMatchesCS("\\dy*")) COMPLETE_WITH_QUERY(Query_for_list_of_event_triggers); /* must be at end of \d alternatives: */ - else if (TailMatchesCS1("\\d*")) + else if (TailMatchesCS("\\d*")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_relations, NULL); - else if (TailMatchesCS1("\\ef")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_functions, NULL); - else if (TailMatchesCS1("\\ev")) + else if (TailMatchesCS("\\ef")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_routines, NULL); + else if (TailMatchesCS("\\ev")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_views, NULL); - else if (TailMatchesCS1("\\encoding")) + else if (TailMatchesCS("\\encoding")) COMPLETE_WITH_QUERY(Query_for_list_of_encodings); - else if (TailMatchesCS1("\\h|\\help")) + else if (TailMatchesCS("\\h|\\help")) COMPLETE_WITH_LIST(sql_commands); - else if (TailMatchesCS2("\\h|\\help", MatchAny)) + else if (TailMatchesCS("\\h|\\help", MatchAny)) { - if (TailMatches1("DROP")) + if (TailMatches("DROP")) matches = completion_matches(text, drop_command_generator); - else if (TailMatches1("ALTER")) + else if (TailMatches("ALTER")) matches = completion_matches(text, alter_command_generator); /* @@ -3477,101 +3494,96 @@ psql_completion(const char *text, int start, int end) * repeated here */ } - else if (TailMatchesCS3("\\h|\\help", MatchAny, MatchAny)) + else if (TailMatchesCS("\\h|\\help", MatchAny, MatchAny)) { - if (TailMatches2("CREATE|DROP", "ACCESS")) - COMPLETE_WITH_CONST("METHOD"); - else if (TailMatches2("ALTER", "DEFAULT")) - COMPLETE_WITH_CONST("PRIVILEGES"); - else if (TailMatches2("CREATE|ALTER|DROP", "EVENT")) - COMPLETE_WITH_CONST("TRIGGER"); - else if (TailMatches2("CREATE|ALTER|DROP", "FOREIGN")) - COMPLETE_WITH_LIST2("DATA WRAPPER", "TABLE"); - else if (TailMatches2("ALTER", "LARGE")) - COMPLETE_WITH_CONST("OBJECT"); - else if (TailMatches2("CREATE|ALTER|DROP", "MATERIALIZED")) - COMPLETE_WITH_CONST("VIEW"); - else if (TailMatches2("CREATE|ALTER|DROP", "TEXT")) - COMPLETE_WITH_CONST("SEARCH"); - else if (TailMatches2("CREATE|ALTER|DROP", "USER")) - COMPLETE_WITH_CONST("MAPPING FOR"); + if (TailMatches("CREATE|DROP", "ACCESS")) + COMPLETE_WITH("METHOD"); + else if (TailMatches("ALTER", "DEFAULT")) + COMPLETE_WITH("PRIVILEGES"); + else if (TailMatches("CREATE|ALTER|DROP", "EVENT")) + COMPLETE_WITH("TRIGGER"); + else if (TailMatches("CREATE|ALTER|DROP", "FOREIGN")) + COMPLETE_WITH("DATA WRAPPER", "TABLE"); + else if (TailMatches("ALTER", "LARGE")) + COMPLETE_WITH("OBJECT"); + else if (TailMatches("CREATE|ALTER|DROP", "MATERIALIZED")) + COMPLETE_WITH("VIEW"); + else if (TailMatches("CREATE|ALTER|DROP", "TEXT")) + COMPLETE_WITH("SEARCH"); + else if (TailMatches("CREATE|ALTER|DROP", "USER")) + COMPLETE_WITH("MAPPING FOR"); } - else if (TailMatchesCS4("\\h|\\help", MatchAny, MatchAny, MatchAny)) + else if (TailMatchesCS("\\h|\\help", MatchAny, MatchAny, MatchAny)) { - if (TailMatches3("CREATE|ALTER|DROP", "FOREIGN", "DATA")) - COMPLETE_WITH_CONST("WRAPPER"); - else if (TailMatches3("CREATE|ALTER|DROP", "TEXT", "SEARCH")) - COMPLETE_WITH_LIST4("CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE"); - else if (TailMatches3("CREATE|ALTER|DROP", "USER", "MAPPING")) - COMPLETE_WITH_CONST("FOR"); + if (TailMatches("CREATE|ALTER|DROP", "FOREIGN", "DATA")) + COMPLETE_WITH("WRAPPER"); + else if (TailMatches("CREATE|ALTER|DROP", "TEXT", "SEARCH")) + COMPLETE_WITH("CONFIGURATION", "DICTIONARY", "PARSER", "TEMPLATE"); + else if (TailMatches("CREATE|ALTER|DROP", "USER", "MAPPING")) + COMPLETE_WITH("FOR"); } - else if (TailMatchesCS1("\\l*") && !TailMatchesCS1("\\lo*")) + else if (TailMatchesCS("\\l*") && !TailMatchesCS("\\lo*")) COMPLETE_WITH_QUERY(Query_for_list_of_databases); - else if (TailMatchesCS1("\\password")) + else if (TailMatchesCS("\\password")) COMPLETE_WITH_QUERY(Query_for_list_of_roles); - else if (TailMatchesCS1("\\pset")) + else if (TailMatchesCS("\\pset")) + COMPLETE_WITH_CS("border", "columns", "expanded", + "fieldsep", "fieldsep_zero", "footer", "format", + "linestyle", "null", "numericlocale", + "pager", "pager_min_lines", + "recordsep", "recordsep_zero", + "tableattr", "title", "tuples_only", + "unicode_border_linestyle", + "unicode_column_linestyle", + "unicode_header_linestyle"); + else if (TailMatchesCS("\\pset", MatchAny)) { - static const char *const my_list[] = - {"border", "columns", "expanded", "fieldsep", "fieldsep_zero", - "footer", "format", "linestyle", "null", "numericlocale", - "pager", "pager_min_lines", "recordsep", "recordsep_zero", - "tableattr", "title", "tuples_only", "unicode_border_linestyle", - "unicode_column_linestyle", "unicode_header_linestyle", NULL}; - - COMPLETE_WITH_LIST_CS(my_list); + if (TailMatchesCS("format")) + COMPLETE_WITH_CS("aligned", "asciidoc", "html", "latex", + "latex-longtable", "troff-ms", "unaligned", + "wrapped"); + else if (TailMatchesCS("linestyle")) + COMPLETE_WITH_CS("ascii", "old-ascii", "unicode"); + else if (TailMatchesCS("pager")) + COMPLETE_WITH_CS("on", "off", "always"); + else if (TailMatchesCS("unicode_border_linestyle|" + "unicode_column_linestyle|" + "unicode_header_linestyle")) + COMPLETE_WITH_CS("single", "double"); } - else if (TailMatchesCS2("\\pset", MatchAny)) - { - if (TailMatchesCS1("format")) - { - static const char *const my_list[] = - {"unaligned", "aligned", "wrapped", "html", "asciidoc", - "latex", "latex-longtable", "troff-ms", NULL}; - - COMPLETE_WITH_LIST_CS(my_list); - } - else if (TailMatchesCS1("linestyle")) - COMPLETE_WITH_LIST_CS3("ascii", "old-ascii", "unicode"); - else if (TailMatchesCS1("pager")) - COMPLETE_WITH_LIST_CS3("on", "off", "always"); - else if (TailMatchesCS1("unicode_border_linestyle|" - "unicode_column_linestyle|" - "unicode_header_linestyle")) - COMPLETE_WITH_LIST_CS2("single", "double"); - } - else if (TailMatchesCS1("\\unset")) + else if (TailMatchesCS("\\unset")) matches = complete_from_variables(text, "", "", true); - else if (TailMatchesCS1("\\set")) + else if (TailMatchesCS("\\set")) matches = complete_from_variables(text, "", "", false); - else if (TailMatchesCS2("\\set", MatchAny)) + else if (TailMatchesCS("\\set", MatchAny)) { - if (TailMatchesCS1("AUTOCOMMIT|ON_ERROR_STOP|QUIET|" - "SINGLELINE|SINGLESTEP")) - COMPLETE_WITH_LIST_CS2("on", "off"); - else if (TailMatchesCS1("COMP_KEYWORD_CASE")) - COMPLETE_WITH_LIST_CS4("lower", "upper", - "preserve-lower", "preserve-upper"); - else if (TailMatchesCS1("ECHO")) - COMPLETE_WITH_LIST_CS4("errors", "queries", "all", "none"); - else if (TailMatchesCS1("ECHO_HIDDEN")) - COMPLETE_WITH_LIST_CS3("noexec", "off", "on"); - else if (TailMatchesCS1("HISTCONTROL")) - COMPLETE_WITH_LIST_CS4("ignorespace", "ignoredups", - "ignoreboth", "none"); - else if (TailMatchesCS1("ON_ERROR_ROLLBACK")) - COMPLETE_WITH_LIST_CS3("on", "off", "interactive"); - else if (TailMatchesCS1("SHOW_CONTEXT")) - COMPLETE_WITH_LIST_CS3("never", "errors", "always"); - else if (TailMatchesCS1("VERBOSITY")) - COMPLETE_WITH_LIST_CS3("default", "verbose", "terse"); + if (TailMatchesCS("AUTOCOMMIT|ON_ERROR_STOP|QUIET|" + "SINGLELINE|SINGLESTEP")) + COMPLETE_WITH_CS("on", "off"); + else if (TailMatchesCS("COMP_KEYWORD_CASE")) + COMPLETE_WITH_CS("lower", "upper", + "preserve-lower", "preserve-upper"); + else if (TailMatchesCS("ECHO")) + COMPLETE_WITH_CS("errors", "queries", "all", "none"); + else if (TailMatchesCS("ECHO_HIDDEN")) + COMPLETE_WITH_CS("noexec", "off", "on"); + else if (TailMatchesCS("HISTCONTROL")) + COMPLETE_WITH_CS("ignorespace", "ignoredups", + "ignoreboth", "none"); + else if (TailMatchesCS("ON_ERROR_ROLLBACK")) + COMPLETE_WITH_CS("on", "off", "interactive"); + else if (TailMatchesCS("SHOW_CONTEXT")) + COMPLETE_WITH_CS("never", "errors", "always"); + else if (TailMatchesCS("VERBOSITY")) + COMPLETE_WITH_CS("default", "verbose", "terse"); } - else if (TailMatchesCS1("\\sf*")) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_functions, NULL); - else if (TailMatchesCS1("\\sv*")) + else if (TailMatchesCS("\\sf*")) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_routines, NULL); + else if (TailMatchesCS("\\sv*")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_views, NULL); - else if (TailMatchesCS1("\\cd|\\e|\\edit|\\g|\\i|\\include|" - "\\ir|\\include_relative|\\o|\\out|" - "\\s|\\w|\\write|\\lo_import")) + else if (TailMatchesCS("\\cd|\\e|\\edit|\\g|\\i|\\include|" + "\\ir|\\include_relative|\\o|\\out|" + "\\s|\\w|\\write|\\lo_import")) { completion_charp = "\\"; matches = completion_matches(text, complete_from_files); @@ -3592,9 +3604,11 @@ psql_completion(const char *text, int start, int end) { if (words_after_create[i].query) COMPLETE_WITH_QUERY(words_after_create[i].query); + else if (words_after_create[i].vquery) + COMPLETE_WITH_VERSIONED_QUERY(words_after_create[i].vquery); else if (words_after_create[i].squery) - COMPLETE_WITH_SCHEMA_QUERY(*words_after_create[i].squery, - NULL); + COMPLETE_WITH_VERSIONED_SCHEMA_QUERY(words_after_create[i].squery, + NULL); break; } } @@ -3607,7 +3621,7 @@ psql_completion(const char *text, int start, int end) */ if (matches == NULL) { - COMPLETE_WITH_CONST(""); + COMPLETE_WITH(""); #ifdef HAVE_RL_COMPLETION_APPEND_CHARACTER rl_completion_append_character = '\0'; #endif @@ -3693,24 +3707,73 @@ alter_command_generator(const char *text, int state) return create_or_drop_command_generator(text, state, THING_NO_ALTER); } -/* The following two functions are wrappers for _complete_from_query */ +/* + * These functions generate lists using server queries. + * They are all wrappers for _complete_from_query. + */ static char * complete_from_query(const char *text, int state) { - return _complete_from_query(0, text, state); + /* query is assumed to work for any server version */ + return _complete_from_query(completion_charp, NULL, text, state); +} + +static char * +complete_from_versioned_query(const char *text, int state) +{ + const VersionedQuery *vquery = completion_vquery; + + /* Find appropriate array element */ + while (pset.sversion < vquery->min_server_version) + vquery++; + /* Fail completion if server is too old */ + if (vquery->query == NULL) + return NULL; + + return _complete_from_query(vquery->query, NULL, text, state); } static char * complete_from_schema_query(const char *text, int state) { - return _complete_from_query(1, text, state); + /* query is assumed to work for any server version */ + return _complete_from_query(completion_charp, completion_squery, + text, state); +} + +static char * +complete_from_versioned_schema_query(const char *text, int state) +{ + const SchemaQuery *squery = completion_squery; + const VersionedQuery *vquery = completion_vquery; + + /* Find appropriate array element */ + while (pset.sversion < squery->min_server_version) + squery++; + /* Fail completion if server is too old */ + if (squery->catname == NULL) + return NULL; + + /* Likewise for the add-on text, if any */ + if (vquery) + { + while (pset.sversion < vquery->min_server_version) + vquery++; + if (vquery->query == NULL) + return NULL; + } + + return _complete_from_query(vquery ? vquery->query : NULL, + squery, text, state); } /* - * This creates a list of matching things, according to a query pointed to - * by completion_charp. + * This creates a list of matching things, according to a query described by + * the initial arguments. The caller has already done any work needed to + * select the appropriate query for the server's version. + * * The query can be one of two kinds: * * 1. A simple query which must contain a %d and a %s, which will be replaced @@ -3724,13 +3787,20 @@ complete_from_schema_query(const char *text, int state) * %d %s %d %s %d %s %s %d %s * where %d is the string length of the text and %s the text itself. * + * If both simple_query and schema_query are non-NULL, then we construct + * a schema query and append the (uninterpreted) string simple_query to it. + * * It is assumed that strings should be escaped to become SQL literals * (that is, what is in the query is actually ... '%s' ...) * * See top of file for examples of both kinds of query. + * + * "text" and "state" are supplied by readline. */ static char * -_complete_from_query(int is_schema_query, const char *text, int state) +_complete_from_query(const char *simple_query, + const SchemaQuery *schema_query, + const char *text, int state) { static int list_index, byte_length; @@ -3781,26 +3851,26 @@ _complete_from_query(int is_schema_query, const char *text, int state) initPQExpBuffer(&query_buffer); - if (is_schema_query) + if (schema_query) { - /* completion_squery gives us the pieces to assemble */ - const char *qualresult = completion_squery->qualresult; + /* schema_query gives us the pieces to assemble */ + const char *qualresult = schema_query->qualresult; if (qualresult == NULL) - qualresult = completion_squery->result; + qualresult = schema_query->result; /* Get unqualified names matching the input-so-far */ appendPQExpBuffer(&query_buffer, "SELECT %s FROM %s WHERE ", - completion_squery->result, - completion_squery->catname); - if (completion_squery->selcondition) + schema_query->result, + schema_query->catname); + if (schema_query->selcondition) appendPQExpBuffer(&query_buffer, "%s AND ", - completion_squery->selcondition); + schema_query->selcondition); appendPQExpBuffer(&query_buffer, "substring(%s,1,%d)='%s'", - completion_squery->result, + schema_query->result, char_length, e_text); appendPQExpBuffer(&query_buffer, " AND %s", - completion_squery->viscondition); + schema_query->viscondition); /* * When fetching relation names, suppress system catalogs unless @@ -3808,7 +3878,7 @@ _complete_from_query(int is_schema_query, const char *text, int state) * between not offering system catalogs for completion at all, and * having them swamp the result when the input is just "p". */ - if (strcmp(completion_squery->catname, + if (strcmp(schema_query->catname, "pg_catalog.pg_class c") == 0 && strncmp(text, "pg_", 3) !=0) { @@ -3842,11 +3912,11 @@ _complete_from_query(int is_schema_query, const char *text, int state) "FROM %s, pg_catalog.pg_namespace n " "WHERE %s = n.oid AND ", qualresult, - completion_squery->catname, - completion_squery->namespace); - if (completion_squery->selcondition) + schema_query->catname, + schema_query->namespace); + if (schema_query->selcondition) appendPQExpBuffer(&query_buffer, "%s AND ", - completion_squery->selcondition); + schema_query->selcondition); appendPQExpBuffer(&query_buffer, "substring(pg_catalog.quote_ident(n.nspname) || '.' || %s,1,%d)='%s'", qualresult, char_length, e_text); @@ -3867,13 +3937,14 @@ _complete_from_query(int is_schema_query, const char *text, int state) char_length, e_text); /* If an addon query was provided, use it */ - if (completion_charp) - appendPQExpBuffer(&query_buffer, "\n%s", completion_charp); + if (simple_query) + appendPQExpBuffer(&query_buffer, "\n%s", simple_query); } else { - /* completion_charp is an sprintf-style format string */ - appendPQExpBuffer(&query_buffer, completion_charp, + Assert(simple_query); + /* simple_query is an sprintf-style format string */ + appendPQExpBuffer(&query_buffer, simple_query, char_length, e_text, e_info_charp, e_info_charp, e_info_charp2, e_info_charp2); diff --git a/src/bin/psql/tab-complete.h b/src/bin/psql/tab-complete.h index 1a42ef1c66..544318c36d 100644 --- a/src/bin/psql/tab-complete.h +++ b/src/bin/psql/tab-complete.h @@ -1,7 +1,7 @@ /* * psql - the PostgreSQL interactive terminal * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * src/bin/psql/tab-complete.h */ diff --git a/src/bin/psql/variables.c b/src/bin/psql/variables.c index 806d39bfbe..f093442644 100644 --- a/src/bin/psql/variables.c +++ b/src/bin/psql/variables.c @@ -1,7 +1,7 @@ /* * psql - the PostgreSQL interactive terminal * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * src/bin/psql/variables.c */ @@ -136,7 +136,7 @@ ParseVariableBool(const char *value, const char *name, bool *result) { /* string is not recognized; don't clobber *result */ if (name) - psql_error("unrecognized value \"%s\" for \"%s\": boolean expected\n", + psql_error("unrecognized value \"%s\" for \"%s\": Boolean expected\n", value, name); valid = false; } @@ -246,10 +246,10 @@ SetVariable(VariableSpace space, const char *name, const char *value) bool confirmed; if (current->substitute_hook) - new_value = (*current->substitute_hook) (new_value); + new_value = current->substitute_hook(new_value); if (current->assign_hook) - confirmed = (*current->assign_hook) (new_value); + confirmed = current->assign_hook(new_value); else confirmed = true; diff --git a/src/bin/psql/variables.h b/src/bin/psql/variables.h index 02d85b1bc2..03af11197c 100644 --- a/src/bin/psql/variables.h +++ b/src/bin/psql/variables.h @@ -1,7 +1,7 @@ /* * psql - the PostgreSQL interactive terminal * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * This implements a sort of variable repository. One could also think of it * as a cheap version of an associative array. Each variable has a string diff --git a/src/bin/scripts/Makefile b/src/bin/scripts/Makefile index a9c24a9f83..4c6e4b9395 100644 --- a/src/bin/scripts/Makefile +++ b/src/bin/scripts/Makefile @@ -2,7 +2,7 @@ # # Makefile for src/bin/scripts # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/bin/scripts/Makefile @@ -19,7 +19,7 @@ include $(top_builddir)/src/Makefile.global PROGRAMS = createdb createuser dropdb dropuser clusterdb vacuumdb reindexdb pg_isready override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS) -override LDFLAGS := -L$(top_builddir)/src/fe_utils -lpgfeutils $(libpq_pgport) $(LDFLAGS) +LDFLAGS_INTERNAL += -L$(top_builddir)/src/fe_utils -lpgfeutils $(libpq_pgport) all: $(PROGRAMS) diff --git a/src/bin/scripts/clusterdb.c b/src/bin/scripts/clusterdb.c index a6640aa57b..650d2ae261 100644 --- a/src/bin/scripts/clusterdb.c +++ b/src/bin/scripts/clusterdb.c @@ -2,7 +2,7 @@ * * clusterdb * - * Portions Copyright (c) 2002-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2002-2018, PostgreSQL Global Development Group * * src/bin/scripts/clusterdb.c * @@ -195,17 +195,21 @@ cluster_one_database(const char *dbname, bool verbose, const char *table, PGconn *conn; + conn = connectDatabase(dbname, host, port, username, prompt_password, + progname, echo, false, false); + initPQExpBuffer(&sql); appendPQExpBufferStr(&sql, "CLUSTER"); if (verbose) appendPQExpBufferStr(&sql, " VERBOSE"); if (table) - appendPQExpBuffer(&sql, " %s", table); + { + appendPQExpBufferChar(&sql, ' '); + appendQualifiedRelation(&sql, table, conn, progname, echo); + } appendPQExpBufferChar(&sql, ';'); - conn = connectDatabase(dbname, host, port, username, prompt_password, - progname, false, false); if (!executeMaintenanceCommand(conn, sql.data, echo)) { if (table) @@ -234,7 +238,7 @@ cluster_all_databases(bool verbose, const char *maintenance_db, int i; conn = connectMaintenanceDatabase(maintenance_db, host, port, username, - prompt_password, progname); + prompt_password, progname, echo); result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", progname, echo); PQfinish(conn); diff --git a/src/bin/scripts/common.c b/src/bin/scripts/common.c index 7394bf293e..ba6120706d 100644 --- a/src/bin/scripts/common.c +++ b/src/bin/scripts/common.c @@ -4,7 +4,7 @@ * Common support routines for bin/scripts/ * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/bin/scripts/common.c @@ -18,6 +18,8 @@ #include #include "common.h" +#include "fe_utils/connect.h" +#include "fe_utils/string_utils.h" static PGcancel *volatile cancelConn = NULL; @@ -63,9 +65,10 @@ handle_help_version_opts(int argc, char *argv[], * as before, else we might create password exposure hazards.) */ PGconn * -connectDatabase(const char *dbname, const char *pghost, const char *pgport, - const char *pguser, enum trivalue prompt_password, - const char *progname, bool fail_ok, bool allow_password_reuse) +connectDatabase(const char *dbname, const char *pghost, + const char *pgport, const char *pguser, + enum trivalue prompt_password, const char *progname, + bool echo, bool fail_ok, bool allow_password_reuse) { PGconn *conn; bool new_pass; @@ -142,6 +145,10 @@ connectDatabase(const char *dbname, const char *pghost, const char *pgport, exit(1); } + if (PQserverVersion(conn) >= 70300) + PQclear(executeQuery(conn, ALWAYS_SECURE_SEARCH_PATH_SQL, + progname, echo)); + return conn; } @@ -149,24 +156,24 @@ connectDatabase(const char *dbname, const char *pghost, const char *pgport, * Try to connect to the appropriate maintenance database. */ PGconn * -connectMaintenanceDatabase(const char *maintenance_db, const char *pghost, - const char *pgport, const char *pguser, - enum trivalue prompt_password, - const char *progname) +connectMaintenanceDatabase(const char *maintenance_db, + const char *pghost, const char *pgport, + const char *pguser, enum trivalue prompt_password, + const char *progname, bool echo) { PGconn *conn; /* If a maintenance database name was specified, just connect to it. */ if (maintenance_db) return connectDatabase(maintenance_db, pghost, pgport, pguser, - prompt_password, progname, false, false); + prompt_password, progname, echo, false, false); /* Otherwise, try postgres first and then template1. */ conn = connectDatabase("postgres", pghost, pgport, pguser, prompt_password, - progname, true, false); + progname, echo, true, false); if (!conn) conn = connectDatabase("template1", pghost, pgport, pguser, - prompt_password, progname, false, false); + prompt_password, progname, echo, false, false); return conn; } @@ -252,6 +259,115 @@ executeMaintenanceCommand(PGconn *conn, const char *query, bool echo) return r; } + +/* + * Split TABLE[(COLUMNS)] into TABLE and [(COLUMNS)] portions. When you + * finish using them, pg_free(*table). *columns is a pointer into "spec", + * possibly to its NUL terminator. + */ +static void +split_table_columns_spec(const char *spec, int encoding, + char **table, const char **columns) +{ + bool inquotes = false; + const char *cp = spec; + + /* + * Find the first '(' not identifier-quoted. Based on + * dequote_downcase_identifier(). + */ + while (*cp && (*cp != '(' || inquotes)) + { + if (*cp == '"') + { + if (inquotes && cp[1] == '"') + cp++; /* pair does not affect quoting */ + else + inquotes = !inquotes; + cp++; + } + else + cp += PQmblen(cp, encoding); + } + *table = pg_strdup(spec); + (*table)[cp - spec] = '\0'; /* no strndup */ + *columns = cp; +} + +/* + * Break apart TABLE[(COLUMNS)] of "spec". With the reset_val of search_path + * in effect, have regclassin() interpret the TABLE portion. Append to "buf" + * the qualified name of TABLE, followed by any (COLUMNS). Exit on failure. + * We use this to interpret --table=foo under the search path psql would get, + * in advance of "ANALYZE public.foo" under the always-secure search path. + */ +void +appendQualifiedRelation(PQExpBuffer buf, const char *spec, + PGconn *conn, const char *progname, bool echo) +{ + char *table; + const char *columns; + PQExpBufferData sql; + PGresult *res; + int ntups; + + /* Before 7.3, the concept of qualifying a name did not exist. */ + if (PQserverVersion(conn) < 70300) + { + appendPQExpBufferStr(&sql, spec); + return; + } + + split_table_columns_spec(spec, PQclientEncoding(conn), &table, &columns); + + /* + * Query must remain ABSOLUTELY devoid of unqualified names. This would + * be unnecessary given a regclassin() variant taking a search_path + * argument. + */ + initPQExpBuffer(&sql); + appendPQExpBufferStr(&sql, + "SELECT c.relname, ns.nspname\n" + " FROM pg_catalog.pg_class c," + " pg_catalog.pg_namespace ns\n" + " WHERE c.relnamespace OPERATOR(pg_catalog.=) ns.oid\n" + " AND c.oid OPERATOR(pg_catalog.=) "); + appendStringLiteralConn(&sql, table, conn); + appendPQExpBufferStr(&sql, "::pg_catalog.regclass;"); + + executeCommand(conn, "RESET search_path;", progname, echo); + + /* + * One row is a typical result, as is a nonexistent relation ERROR. + * regclassin() unconditionally accepts all-digits input as an OID; if no + * relation has that OID; this query returns no rows. Catalog corruption + * might elicit other row counts. + */ + res = executeQuery(conn, sql.data, progname, echo); + ntups = PQntuples(res); + if (ntups != 1) + { + fprintf(stderr, + ngettext("%s: query returned %d row instead of one: %s\n", + "%s: query returned %d rows instead of one: %s\n", + ntups), + progname, ntups, sql.data); + PQfinish(conn); + exit(1); + } + appendPQExpBufferStr(buf, + fmtQualifiedId(PQgetvalue(res, 0, 1), + PQgetvalue(res, 0, 0))); + appendPQExpBufferStr(buf, columns); + PQclear(res); + termPQExpBuffer(&sql); + pg_free(table); + + PQclear(executeQuery(conn, ALWAYS_SECURE_SEARCH_PATH_SQL, + progname, echo)); +} + + /* * Check yes/no answer in a localized way. 1=yes, 0=no, -1=neither. */ diff --git a/src/bin/scripts/common.h b/src/bin/scripts/common.h index 18d8d12d15..30a39a6247 100644 --- a/src/bin/scripts/common.h +++ b/src/bin/scripts/common.h @@ -2,7 +2,7 @@ * common.h * Common support routines for bin/scripts/ * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * src/bin/scripts/common.h */ @@ -32,11 +32,12 @@ extern void handle_help_version_opts(int argc, char *argv[], extern PGconn *connectDatabase(const char *dbname, const char *pghost, const char *pgport, const char *pguser, enum trivalue prompt_password, const char *progname, - bool fail_ok, bool allow_password_reuse); + bool echo, bool fail_ok, bool allow_password_reuse); extern PGconn *connectMaintenanceDatabase(const char *maintenance_db, - const char *pghost, const char *pgport, const char *pguser, - enum trivalue prompt_password, const char *progname); + const char *pghost, const char *pgport, + const char *pguser, enum trivalue prompt_password, + const char *progname, bool echo); extern PGresult *executeQuery(PGconn *conn, const char *query, const char *progname, bool echo); @@ -47,6 +48,9 @@ extern void executeCommand(PGconn *conn, const char *query, extern bool executeMaintenanceCommand(PGconn *conn, const char *query, bool echo); +extern void appendQualifiedRelation(PQExpBuffer buf, const char *name, + PGconn *conn, const char *progname, bool echo); + extern bool yesno_prompt(const char *question); extern void setup_cancel_handler(void); diff --git a/src/bin/scripts/createdb.c b/src/bin/scripts/createdb.c index 88ea401e39..fc108882e4 100644 --- a/src/bin/scripts/createdb.c +++ b/src/bin/scripts/createdb.c @@ -2,7 +2,7 @@ * * createdb * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/bin/scripts/createdb.c @@ -202,7 +202,7 @@ main(int argc, char *argv[]) maintenance_db = "template1"; conn = connectMaintenanceDatabase(maintenance_db, host, port, username, - prompt_password, progname); + prompt_password, progname, echo); if (echo) printf("%s\n", sql.data); diff --git a/src/bin/scripts/createuser.c b/src/bin/scripts/createuser.c index 0e36edcc5d..3420e62fdd 100644 --- a/src/bin/scripts/createuser.c +++ b/src/bin/scripts/createuser.c @@ -2,7 +2,7 @@ * * createuser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/bin/scripts/createuser.c @@ -252,7 +252,7 @@ main(int argc, char *argv[]) login = TRI_YES; conn = connectDatabase("postgres", host, port, username, prompt_password, - progname, false, false); + progname, echo, false, false); initPQExpBuffer(&sql); diff --git a/src/bin/scripts/dropdb.c b/src/bin/scripts/dropdb.c index 5dc8558e8e..ba0038891d 100644 --- a/src/bin/scripts/dropdb.c +++ b/src/bin/scripts/dropdb.c @@ -2,7 +2,7 @@ * * dropdb * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/bin/scripts/dropdb.c @@ -129,7 +129,8 @@ main(int argc, char *argv[]) maintenance_db = "template1"; conn = connectMaintenanceDatabase(maintenance_db, - host, port, username, prompt_password, progname); + host, port, username, prompt_password, + progname, echo); if (echo) printf("%s\n", sql.data); diff --git a/src/bin/scripts/dropuser.c b/src/bin/scripts/dropuser.c index 095c0a39ff..d9e7f7b036 100644 --- a/src/bin/scripts/dropuser.c +++ b/src/bin/scripts/dropuser.c @@ -2,7 +2,7 @@ * * dropuser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/bin/scripts/dropuser.c @@ -134,7 +134,7 @@ main(int argc, char *argv[]) (if_exists ? "IF EXISTS " : ""), fmtId(dropuser)); conn = connectDatabase("postgres", host, port, username, prompt_password, - progname, false, false); + progname, echo, false, false); if (echo) printf("%s\n", sql.data); diff --git a/src/bin/scripts/nls.mk b/src/bin/scripts/nls.mk index 63f4b0b7eb..4038cdb3b6 100644 --- a/src/bin/scripts/nls.mk +++ b/src/bin/scripts/nls.mk @@ -1,6 +1,6 @@ # src/bin/scripts/nls.mk CATALOG_NAME = pgscripts -AVAIL_LANGUAGES = cs de es fr he it ja ko pl pt_BR ru sv zh_CN +AVAIL_LANGUAGES = cs de es fr he it ja ko pl pt_BR ru sv tr zh_CN GETTEXT_FILES = createdb.c createuser.c \ dropdb.c dropuser.c \ clusterdb.c vacuumdb.c reindexdb.c \ diff --git a/src/bin/scripts/pg_isready.c b/src/bin/scripts/pg_isready.c index c7c06cc6ff..f7ad7b40f0 100644 --- a/src/bin/scripts/pg_isready.c +++ b/src/bin/scripts/pg_isready.c @@ -2,7 +2,7 @@ * * pg_isready --- checks the status of the PostgreSQL server * - * Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Copyright (c) 2013-2018, PostgreSQL Global Development Group * * src/bin/scripts/pg_isready.c * diff --git a/src/bin/scripts/po/de.po b/src/bin/scripts/po/de.po index b9c8c3b6ea..1122661074 100644 --- a/src/bin/scripts/po/de.po +++ b/src/bin/scripts/po/de.po @@ -1,14 +1,14 @@ # German message translation file for "scripts". -# Peter Eisentraut , 2003 - 2017. +# Peter Eisentraut , 2003 - 2018. # # Use these quotes: »%s« # msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 10\n" +"Project-Id-Version: PostgreSQL 11\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-05-12 23:46+0000\n" -"PO-Revision-Date: 2017-05-13 09:24-0400\n" +"POT-Creation-Date: 2018-05-07 00:46+0000\n" +"PO-Revision-Date: 2018-05-06 21:20-0400\n" "Last-Translator: Peter Eisentraut \n" "Language-Team: German \n" "Language: de\n" @@ -49,37 +49,36 @@ msgid_plural "(%lu rows)" msgstr[0] "(%lu Zeile)" msgstr[1] "(%lu Zeilen)" -#: ../../fe_utils/print.c:2913 +#: ../../fe_utils/print.c:2915 #, c-format msgid "Interrupted\n" msgstr "Unterbrochen\n" -#: ../../fe_utils/print.c:2977 +#: ../../fe_utils/print.c:2979 #, c-format msgid "Cannot add header to table content: column count of %d exceeded.\n" msgstr "Kann keinen weiteren Spaltenkopf zur Tabelle hinzufügen: Spaltenzahl %d überschritten.\n" -#: ../../fe_utils/print.c:3017 +#: ../../fe_utils/print.c:3019 #, c-format msgid "Cannot add cell to table content: total cell count of %d exceeded.\n" msgstr "Cann keine weitere Zelle zur Tabelle hinzufügen: Zellengesamtzahl %d überschritten.\n" -#: ../../fe_utils/print.c:3266 +#: ../../fe_utils/print.c:3268 #, c-format msgid "invalid output format (internal error): %d" msgstr "ungültiges Ausgabeformat (interner Fehler): %d" #: clusterdb.c:111 clusterdb.c:130 createdb.c:119 createdb.c:138 #: createuser.c:166 createuser.c:181 dropdb.c:94 dropdb.c:103 dropdb.c:111 -#: dropuser.c:90 dropuser.c:105 dropuser.c:120 pg_isready.c:93 -#: pg_isready.c:107 reindexdb.c:131 reindexdb.c:150 vacuumdb.c:213 -#: vacuumdb.c:232 +#: dropuser.c:90 dropuser.c:105 dropuser.c:120 pg_isready.c:93 pg_isready.c:107 +#: reindexdb.c:131 reindexdb.c:150 vacuumdb.c:217 vacuumdb.c:236 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Versuchen Sie »%s --help« für weitere Informationen.\n" #: clusterdb.c:128 createdb.c:136 createuser.c:179 dropdb.c:109 dropuser.c:103 -#: pg_isready.c:105 reindexdb.c:148 vacuumdb.c:230 +#: pg_isready.c:105 reindexdb.c:148 vacuumdb.c:234 #, c-format msgid "%s: too many command-line arguments (first is \"%s\")\n" msgstr "%s: zu viele Kommandozeilenargumente (das erste ist »%s«)\n" @@ -94,22 +93,22 @@ msgstr "%s: kann nicht alle Datenbanken und eine bestimmte gleichzeitig clustern msgid "%s: cannot cluster specific table(s) in all databases\n" msgstr "%s: kann nicht bestimmte Tabelle(n) in allen Datenbanken clustern\n" -#: clusterdb.c:212 +#: clusterdb.c:216 #, c-format msgid "%s: clustering of table \"%s\" in database \"%s\" failed: %s" msgstr "%s: Clustern der Tabelle »%s« in Datenbank »%s« fehlgeschlagen: %s" -#: clusterdb.c:215 +#: clusterdb.c:219 #, c-format msgid "%s: clustering of database \"%s\" failed: %s" msgstr "%s: Clustern der Datenbank »%s« fehlgeschlagen: %s" -#: clusterdb.c:248 +#: clusterdb.c:252 #, c-format msgid "%s: clustering database \"%s\"\n" msgstr "%s: clustere Datenbank »%s«\n" -#: clusterdb.c:269 +#: clusterdb.c:273 #, c-format msgid "" "%s clusters all previously clustered tables in a database.\n" @@ -118,19 +117,19 @@ msgstr "" "%s clustert alle vorher geclusterten Tabellen in einer Datenbank.\n" "\n" -#: clusterdb.c:270 createdb.c:252 createuser.c:343 dropdb.c:155 dropuser.c:161 -#: pg_isready.c:222 reindexdb.c:401 vacuumdb.c:952 +#: clusterdb.c:274 createdb.c:252 createuser.c:343 dropdb.c:156 dropuser.c:161 +#: pg_isready.c:222 reindexdb.c:402 vacuumdb.c:999 #, c-format msgid "Usage:\n" msgstr "Aufruf:\n" -#: clusterdb.c:271 reindexdb.c:402 vacuumdb.c:953 +#: clusterdb.c:275 reindexdb.c:403 vacuumdb.c:1000 #, c-format msgid " %s [OPTION]... [DBNAME]\n" msgstr " %s [OPTION]... [DBNAME]\n" -#: clusterdb.c:272 createdb.c:254 createuser.c:345 dropdb.c:157 dropuser.c:163 -#: pg_isready.c:225 reindexdb.c:403 vacuumdb.c:954 +#: clusterdb.c:276 createdb.c:254 createuser.c:345 dropdb.c:158 dropuser.c:163 +#: pg_isready.c:225 reindexdb.c:404 vacuumdb.c:1001 #, c-format msgid "" "\n" @@ -139,53 +138,50 @@ msgstr "" "\n" "Optionen:\n" -#: clusterdb.c:273 +#: clusterdb.c:277 #, c-format msgid " -a, --all cluster all databases\n" msgstr " -a, --all clustere alle Datenbanken\n" -#: clusterdb.c:274 +#: clusterdb.c:278 #, c-format msgid " -d, --dbname=DBNAME database to cluster\n" msgstr " -d, --dbname=DBNAME zu clusternde Datenbank\n" -#: clusterdb.c:275 createuser.c:349 dropdb.c:158 dropuser.c:164 -#: reindexdb.c:406 +#: clusterdb.c:279 createuser.c:349 dropdb.c:159 dropuser.c:164 reindexdb.c:407 #, c-format msgid " -e, --echo show the commands being sent to the server\n" msgstr "" " -e, --echo zeige die Befehle, die an den Server\n" " gesendet werden\n" -#: clusterdb.c:276 reindexdb.c:408 +#: clusterdb.c:280 reindexdb.c:409 #, c-format msgid " -q, --quiet don't write any messages\n" msgstr " -q, --quiet unterdrücke alle Mitteilungen\n" -#: clusterdb.c:277 +#: clusterdb.c:281 #, c-format msgid " -t, --table=TABLE cluster specific table(s) only\n" msgstr " -t, --table=TABELLE clustere nur bestimmte Tabelle(n)\n" -#: clusterdb.c:278 reindexdb.c:412 +#: clusterdb.c:282 reindexdb.c:413 #, c-format msgid " -v, --verbose write a lot of output\n" msgstr " -v, --verbose erzeuge viele Meldungen\n" -#: clusterdb.c:279 createuser.c:361 dropdb.c:160 dropuser.c:167 -#: reindexdb.c:413 +#: clusterdb.c:283 createuser.c:361 dropdb.c:161 dropuser.c:167 reindexdb.c:414 #, c-format msgid " -V, --version output version information, then exit\n" msgstr " -V, --version Versionsinformationen anzeigen, dann beenden\n" -#: clusterdb.c:280 createuser.c:366 dropdb.c:162 dropuser.c:169 -#: reindexdb.c:414 +#: clusterdb.c:284 createuser.c:366 dropdb.c:163 dropuser.c:169 reindexdb.c:415 #, c-format msgid " -?, --help show this help, then exit\n" msgstr " -?, --help diese Hilfe anzeigen, dann beenden\n" -#: clusterdb.c:281 createdb.c:265 createuser.c:367 dropdb.c:163 dropuser.c:170 -#: pg_isready.c:231 reindexdb.c:415 vacuumdb.c:970 +#: clusterdb.c:285 createdb.c:265 createuser.c:367 dropdb.c:164 dropuser.c:170 +#: pg_isready.c:231 reindexdb.c:416 vacuumdb.c:1017 #, c-format msgid "" "\n" @@ -194,41 +190,41 @@ msgstr "" "\n" "Verbindungsoptionen:\n" -#: clusterdb.c:282 createuser.c:368 dropdb.c:164 dropuser.c:171 -#: reindexdb.c:416 vacuumdb.c:971 +#: clusterdb.c:286 createuser.c:368 dropdb.c:165 dropuser.c:171 reindexdb.c:417 +#: vacuumdb.c:1018 #, c-format msgid " -h, --host=HOSTNAME database server host or socket directory\n" msgstr " -h, --host=HOSTNAME Name des Datenbankservers oder Socket-Verzeichnis\n" -#: clusterdb.c:283 createuser.c:369 dropdb.c:165 dropuser.c:172 -#: reindexdb.c:417 vacuumdb.c:972 +#: clusterdb.c:287 createuser.c:369 dropdb.c:166 dropuser.c:172 reindexdb.c:418 +#: vacuumdb.c:1019 #, c-format msgid " -p, --port=PORT database server port\n" msgstr " -p, --port=PORT Port des Datenbankservers\n" -#: clusterdb.c:284 dropdb.c:166 reindexdb.c:418 vacuumdb.c:973 +#: clusterdb.c:288 dropdb.c:167 reindexdb.c:419 vacuumdb.c:1020 #, c-format msgid " -U, --username=USERNAME user name to connect as\n" msgstr " -U, --username=NAME Datenbankbenutzername\n" -#: clusterdb.c:285 createuser.c:371 dropdb.c:167 dropuser.c:174 -#: reindexdb.c:419 vacuumdb.c:974 +#: clusterdb.c:289 createuser.c:371 dropdb.c:168 dropuser.c:174 reindexdb.c:420 +#: vacuumdb.c:1021 #, c-format msgid " -w, --no-password never prompt for password\n" msgstr " -w, --no-password niemals nach Passwort fragen\n" -#: clusterdb.c:286 createuser.c:372 dropdb.c:168 dropuser.c:175 -#: reindexdb.c:420 vacuumdb.c:975 +#: clusterdb.c:290 createuser.c:372 dropdb.c:169 dropuser.c:175 reindexdb.c:421 +#: vacuumdb.c:1022 #, c-format msgid " -W, --password force password prompt\n" msgstr " -W, --password Passwortfrage erzwingen\n" -#: clusterdb.c:287 dropdb.c:169 reindexdb.c:421 vacuumdb.c:976 +#: clusterdb.c:291 dropdb.c:170 reindexdb.c:422 vacuumdb.c:1023 #, c-format msgid " --maintenance-db=DBNAME alternate maintenance database\n" msgstr " --maintenance-db=DBNAME alternative Wartungsdatenbank\n" -#: clusterdb.c:288 +#: clusterdb.c:292 #, c-format msgid "" "\n" @@ -238,8 +234,8 @@ msgstr "" "Für weitere Informationen lesen Sie bitte die Beschreibung des\n" "SQL-Befehls CLUSTER.\n" -#: clusterdb.c:289 createdb.c:273 createuser.c:373 dropdb.c:170 dropuser.c:176 -#: pg_isready.c:236 reindexdb.c:423 vacuumdb.c:978 +#: clusterdb.c:293 createdb.c:273 createuser.c:373 dropdb.c:171 dropuser.c:176 +#: pg_isready.c:236 reindexdb.c:424 vacuumdb.c:1025 #, c-format msgid "" "\n" @@ -248,58 +244,65 @@ msgstr "" "\n" "Berichten Sie Fehler an .\n" -#: common.c:80 common.c:126 +#: common.c:83 common.c:129 msgid "Password: " msgstr "Passwort: " -#: common.c:113 +#: common.c:116 #, c-format msgid "%s: could not connect to database %s: out of memory\n" msgstr "%s: konnte nicht mit Datenbank %s verbinden: Speicher aufgebraucht\n" -#: common.c:140 +#: common.c:143 #, c-format msgid "%s: could not connect to database %s: %s" msgstr "%s: konnte nicht mit Datenbank %s verbinden: %s" -#: common.c:189 common.c:217 +#: common.c:196 common.c:224 #, c-format msgid "%s: query failed: %s" msgstr "%s: Anfrage fehlgeschlagen: %s" -#: common.c:191 common.c:219 +#: common.c:198 common.c:226 #, c-format msgid "%s: query was: %s\n" msgstr "%s: Anfrage war: %s\n" +#: common.c:351 +#, c-format +msgid "%s: query returned %d row instead of one: %s\n" +msgid_plural "%s: query returned %d rows instead of one: %s\n" +msgstr[0] "%s: Anfrage ergab %d Zeile anstatt einer: %s\n" +msgstr[1] "%s: Anfrage ergab %d Zeilen anstatt einer: %s\n" + #. translator: abbreviation for "yes" -#: common.c:260 +#: common.c:377 msgid "y" msgstr "j" #. translator: abbreviation for "no" -#: common.c:262 +#: common.c:379 msgid "n" msgstr "n" #. translator: This is a question followed by the translated options for #. "yes" and "no". -#: common.c:272 +#: common.c:389 #, c-format msgid "%s (%s/%s) " msgstr "%s (%s/%s) " -#: common.c:286 +#: common.c:403 #, c-format msgid "Please answer \"%s\" or \"%s\".\n" msgstr "Bitte antworten Sie »%s« oder »%s«.\n" -#: common.c:365 common.c:402 +#: common.c:482 common.c:519 #, c-format msgid "Cancel request sent\n" msgstr "Abbruchsanforderung gesendet\n" -#: common.c:368 common.c:406 +#: common.c:485 common.c:523 #, c-format msgid "Could not send cancel request: %s" msgstr "Konnte Abbruchsanforderung nicht senden: %s" @@ -599,12 +602,12 @@ msgstr "Datenbank »%s« wird unwiderruflich gelöscht werden.\n" msgid "Are you sure?" msgstr "Sind Sie sich sicher?" -#: dropdb.c:139 +#: dropdb.c:140 #, c-format msgid "%s: database removal failed: %s" msgstr "%s: Löschen der Datenbank fehlgeschlagen: %s" -#: dropdb.c:154 +#: dropdb.c:155 #, c-format msgid "" "%s removes a PostgreSQL database.\n" @@ -613,17 +616,17 @@ msgstr "" "%s löscht eine PostgreSQL-Datenbank.\n" "\n" -#: dropdb.c:156 +#: dropdb.c:157 #, c-format msgid " %s [OPTION]... DBNAME\n" msgstr " %s [OPTION]... DBNAME\n" -#: dropdb.c:159 +#: dropdb.c:160 #, c-format msgid " -i, --interactive prompt before deleting anything\n" msgstr " -i, --interactive frage nach, bevor irgendetwas gelöscht wird\n" -#: dropdb.c:161 +#: dropdb.c:162 #, c-format msgid " --if-exists don't report error if database doesn't exist\n" msgstr " --if-exists keinen Fehler ausgeben, wenn Datenbank nicht existiert\n" @@ -806,37 +809,37 @@ msgstr "%s: kann nicht bestimmte Tabelle(n) und Systemkataloge gleichzeitig rein msgid "%s: cannot reindex specific index(es) and system catalogs at the same time\n" msgstr "%s: kann nicht bestimmte Index und Systemkataloge gleichzeitig reindizieren\n" -#: reindexdb.c:307 +#: reindexdb.c:308 #, c-format msgid "%s: reindexing of table \"%s\" in database \"%s\" failed: %s" msgstr "%s: Reindizieren der Tabelle »%s« in Datenbank »%s« fehlgeschlagen: %s" -#: reindexdb.c:310 +#: reindexdb.c:311 #, c-format msgid "%s: reindexing of index \"%s\" in database \"%s\" failed: %s" msgstr "%s: Reindizieren des Index »%s« in Datenbank »%s« fehlgeschlagen: %s" -#: reindexdb.c:313 +#: reindexdb.c:314 #, c-format msgid "%s: reindexing of schema \"%s\" in database \"%s\" failed: %s" msgstr "%s: Reindizieren des Schemas »%s« in Datenbank »%s« fehlgeschlagen: %s" -#: reindexdb.c:316 +#: reindexdb.c:317 #, c-format msgid "%s: reindexing of database \"%s\" failed: %s" msgstr "%s: Reindizieren der Datenbank »%s« fehlgeschlagen: %s" -#: reindexdb.c:349 +#: reindexdb.c:350 #, c-format msgid "%s: reindexing database \"%s\"\n" msgstr "%s: reindiziere Datenbank »%s«\n" -#: reindexdb.c:388 +#: reindexdb.c:389 #, c-format msgid "%s: reindexing of system catalogs failed: %s" msgstr "%s: Reindizieren der Systemkataloge fehlgeschlagen: %s" -#: reindexdb.c:400 +#: reindexdb.c:401 #, c-format msgid "" "%s reindexes a PostgreSQL database.\n" @@ -845,37 +848,37 @@ msgstr "" "%s reindiziert eine PostgreSQL-Datenbank.\n" "\n" -#: reindexdb.c:404 +#: reindexdb.c:405 #, c-format msgid " -a, --all reindex all databases\n" msgstr " -a, --all alle Datenbanken reindizieren\n" -#: reindexdb.c:405 +#: reindexdb.c:406 #, c-format msgid " -d, --dbname=DBNAME database to reindex\n" msgstr " -d, --dbname=DBNAME zu reindizierende Datenbank\n" -#: reindexdb.c:407 +#: reindexdb.c:408 #, c-format msgid " -i, --index=INDEX recreate specific index(es) only\n" msgstr " -i, --index=INDEX nur bestimmte(n) Index(e) erneuern\n" -#: reindexdb.c:409 +#: reindexdb.c:410 #, c-format msgid " -s, --system reindex system catalogs\n" msgstr " -s, --system Systemkataloge reindizieren\n" -#: reindexdb.c:410 +#: reindexdb.c:411 #, c-format msgid " -S, --schema=SCHEMA reindex specific schema(s) only\n" msgstr " -S, --schema=SCHEMA nur bestimmte(s) Schema(s) reindizieren\n" -#: reindexdb.c:411 +#: reindexdb.c:412 #, c-format msgid " -t, --table=TABLE reindex specific table(s) only\n" msgstr " -t, --table=TABELLE nur bestimmte Tabelle(n) reindizieren\n" -#: reindexdb.c:422 +#: reindexdb.c:423 #, c-format msgid "" "\n" @@ -885,69 +888,64 @@ msgstr "" "Für weitere Informationen lesen Sie bitte die Beschreibung des\n" "SQL-Befehls REINDEX.\n" -#: vacuumdb.c:195 +#: vacuumdb.c:199 #, c-format msgid "%s: number of parallel jobs must be at least 1\n" msgstr "%s: Anzahl paralleler Jobs muss mindestens 1 sein\n" -#: vacuumdb.c:201 +#: vacuumdb.c:205 #, c-format msgid "%s: too many parallel jobs requested (maximum: %d)\n" msgstr "%s: zu viele parallele Jobs angefordert (Maximum: %d)\n" -#: vacuumdb.c:240 vacuumdb.c:246 +#: vacuumdb.c:244 vacuumdb.c:250 #, c-format msgid "%s: cannot use the \"%s\" option when performing only analyze\n" msgstr "%s: kann Option »%s« nicht verwenden, wenn nur Analyze durchgeführt wird\n" -#: vacuumdb.c:263 +#: vacuumdb.c:267 #, c-format msgid "%s: cannot vacuum all databases and a specific one at the same time\n" msgstr "%s: kann nicht alle Datenbanken und eine bestimmte gleichzeitig vacuumen\n" -#: vacuumdb.c:269 +#: vacuumdb.c:273 #, c-format msgid "%s: cannot vacuum specific table(s) in all databases\n" msgstr "%s: kann nicht bestimmte Tabelle(n) in allen Datenbanken vacuumen\n" -#: vacuumdb.c:355 +#: vacuumdb.c:359 msgid "Generating minimal optimizer statistics (1 target)" msgstr "Erzeuge minimale Optimierer-Statistiken (1 Ziel)" -#: vacuumdb.c:356 +#: vacuumdb.c:360 msgid "Generating medium optimizer statistics (10 targets)" msgstr "Erzeuge mittlere Optimierer-Statistiken (10 Ziele)" -#: vacuumdb.c:357 +#: vacuumdb.c:361 msgid "Generating default (full) optimizer statistics" msgstr "Erzeuge volle Optimierer-Statistiken" -#: vacuumdb.c:369 +#: vacuumdb.c:373 #, c-format msgid "%s: processing database \"%s\": %s\n" msgstr "%s: bearbeite Datenbank »%s«: %s\n" -#: vacuumdb.c:372 +#: vacuumdb.c:376 #, c-format msgid "%s: vacuuming database \"%s\"\n" msgstr "%s: führe Vacuum in Datenbank »%s« aus\n" -#: vacuumdb.c:708 +#: vacuumdb.c:725 #, c-format msgid "%s: vacuuming of table \"%s\" in database \"%s\" failed: %s" msgstr "%s: Vacuum der Tabelle »%s« in Datenbank »%s« fehlgeschlagen: %s" -#: vacuumdb.c:711 vacuumdb.c:828 +#: vacuumdb.c:728 vacuumdb.c:863 #, c-format msgid "%s: vacuuming of database \"%s\" failed: %s" msgstr "%s: Vacuum der Datenbank »%s« fehlgeschlagen: %s" -#: vacuumdb.c:942 -#, c-format -msgid "%s: invalid socket: %s" -msgstr "%s: ungültiges Socket: %s" - -#: vacuumdb.c:951 +#: vacuumdb.c:998 #, c-format msgid "" "%s cleans and analyzes a PostgreSQL database.\n" @@ -956,75 +954,75 @@ msgstr "" "%s säubert und analysiert eine PostgreSQL-Datenbank.\n" "\n" -#: vacuumdb.c:955 +#: vacuumdb.c:1002 #, c-format msgid " -a, --all vacuum all databases\n" msgstr " -a, --all führe Vacuum in allen Datenbanken aus\n" -#: vacuumdb.c:956 +#: vacuumdb.c:1003 #, c-format msgid " -d, --dbname=DBNAME database to vacuum\n" msgstr " -d, --dbname=DBNAME führe Vacuum in dieser Datenbank aus\n" -#: vacuumdb.c:957 +#: vacuumdb.c:1004 #, c-format msgid " -e, --echo show the commands being sent to the server\n" msgstr "" " -e, --echo zeige die Befehle, die an den Server\n" " gesendet werden\n" -#: vacuumdb.c:958 +#: vacuumdb.c:1005 #, c-format msgid " -f, --full do full vacuuming\n" msgstr " -f, --full führe volles Vacuum durch\n" -#: vacuumdb.c:959 +#: vacuumdb.c:1006 #, c-format msgid " -F, --freeze freeze row transaction information\n" msgstr " -F, --freeze Zeilentransaktionsinformationen einfrieren\n" -#: vacuumdb.c:960 +#: vacuumdb.c:1007 #, c-format msgid " -j, --jobs=NUM use this many concurrent connections to vacuum\n" msgstr "" " -j, --jobs=NUM so viele parallele Verbindungen zum Vacuum\n" " verwenden\n" -#: vacuumdb.c:961 +#: vacuumdb.c:1008 #, c-format msgid " -q, --quiet don't write any messages\n" msgstr " -q, --quiet unterdrücke alle Mitteilungen\n" -#: vacuumdb.c:962 +#: vacuumdb.c:1009 #, c-format msgid " -t, --table='TABLE[(COLUMNS)]' vacuum specific table(s) only\n" msgstr "" " -t, --table='TABELLE[(SPALTEN)]'\n" " führe Vacuum für bestimmte Tabelle(n) aus\n" -#: vacuumdb.c:963 +#: vacuumdb.c:1010 #, c-format msgid " -v, --verbose write a lot of output\n" msgstr " -v, --verbose erzeuge viele Meldungen\n" -#: vacuumdb.c:964 +#: vacuumdb.c:1011 #, c-format msgid " -V, --version output version information, then exit\n" msgstr " -V, --version Versionsinformationen anzeigen, dann beenden\n" -#: vacuumdb.c:965 +#: vacuumdb.c:1012 #, c-format msgid " -z, --analyze update optimizer statistics\n" msgstr " -z, --analyze aktualisiere Statistiken für den Optimierer\n" -#: vacuumdb.c:966 +#: vacuumdb.c:1013 #, c-format msgid " -Z, --analyze-only only update optimizer statistics; no vacuum\n" msgstr "" " -Z, --analyze-only aktualisiere nur Statistiken für den Optimierer;\n" " kein Vacuum\n" -#: vacuumdb.c:967 +#: vacuumdb.c:1014 #, c-format msgid "" " --analyze-in-stages only update optimizer statistics, in multiple\n" @@ -1034,12 +1032,12 @@ msgstr "" " in mehreren Phasen für schnellere Ergebnisse;\n" " kein Vacuum\n" -#: vacuumdb.c:969 +#: vacuumdb.c:1016 #, c-format msgid " -?, --help show this help, then exit\n" msgstr " -?, --help diese Hilfe anzeigen, dann beenden\n" -#: vacuumdb.c:977 +#: vacuumdb.c:1024 #, c-format msgid "" "\n" diff --git a/src/bin/scripts/po/it.po b/src/bin/scripts/po/it.po index 5aab660ebf..6edbb27303 100644 --- a/src/bin/scripts/po/it.po +++ b/src/bin/scripts/po/it.po @@ -1,28 +1,20 @@ # -# Translation of pgscripts to Italian -# PostgreSQL Project +# pgscripts.po +# Italian message translation file for pgscripts # -# Associazione Culturale ITPUG - Italian PostgreSQL Users Group -# http://www.itpug.org/ - info@itpug.org +# For development and bug report please use: +# https://github.com/dvarrazzo/postgresql-it # -# Traduttori: -# * Emanuele Zamprogno -# * Daniele Varrazzo +# Copyright (C) 2012-2017 PostgreSQL Global Development Group +# Copyright (C) 2010, Associazione Culturale ITPUG # -# Revisori: -# * Diego Cinelli +# Daniele Varrazzo , 2012-2017. +# Emanuele Zamprogno , 2009. +# Mirko Tebaldi , 2004. +# Fabrizio Mazzoni , 2003. # -# Traduttori precedenti: -# * Primo traduttore: Fabrizio Mazzoni , 2003. -# * Secondo traduttore: Mirko Tebaldi , 2004. +# This file is distributed under the same license as the PostgreSQL package. # -# Copyright (c) 2010, Associazione Culturale ITPUG -# Distributed under the same license of the PostgreSQL project -# -# Italian message translation file for pgscripts -# Primo traduttore: Fabrizio Mazzoni , 2003. -# Secondo traduttore: Mirko Tebaldi , 2004. -# Attuale traduttore: Emanuele Zamprogno , 2009. msgid "" msgstr "" "Project-Id-Version: pgscripts (PostgreSQL) 10\n" @@ -30,7 +22,7 @@ msgstr "" "POT-Creation-Date: 2017-05-22 07:46+0000\n" "PO-Revision-Date: 2017-05-29 17:28+0100\n" "Last-Translator: Daniele Varrazzo \n" -"Language-Team: Gruppo traduzioni ITPUG \n" +"Language-Team: https://github.com/dvarrazzo/postgresql-it\n" "Language: it\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" @@ -93,8 +85,9 @@ msgstr "formato di output non valido (errore interno): %d" #: clusterdb.c:111 clusterdb.c:130 createdb.c:119 createdb.c:138 #: createuser.c:166 createuser.c:181 dropdb.c:94 dropdb.c:103 dropdb.c:111 -#: dropuser.c:90 dropuser.c:105 dropuser.c:120 pg_isready.c:93 pg_isready.c:107 -#: reindexdb.c:131 reindexdb.c:150 vacuumdb.c:213 vacuumdb.c:232 +#: dropuser.c:90 dropuser.c:105 dropuser.c:120 pg_isready.c:93 +#: pg_isready.c:107 reindexdb.c:131 reindexdb.c:150 vacuumdb.c:213 +#: vacuumdb.c:232 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Prova \"%s --help\" per maggiori informazioni.\n" @@ -170,7 +163,8 @@ msgstr " -a, --all raggruppa tutti i database\n" msgid " -d, --dbname=DBNAME database to cluster\n" msgstr " -d, --dbname=NOMEDB database da raggruppare\n" -#: clusterdb.c:275 createuser.c:349 dropdb.c:158 dropuser.c:164 reindexdb.c:406 +#: clusterdb.c:275 createuser.c:349 dropdb.c:158 dropuser.c:164 +#: reindexdb.c:406 #, c-format msgid " -e, --echo show the commands being sent to the server\n" msgstr " -e, --echo mostra i comandi inviati al server\n" @@ -190,12 +184,14 @@ msgstr " -t, --table=TABELLA raggruppa solo le tabelle specificate\n" msgid " -v, --verbose write a lot of output\n" msgstr " -v, --verbose mostra un output completo\n" -#: clusterdb.c:279 createuser.c:361 dropdb.c:160 dropuser.c:167 reindexdb.c:413 +#: clusterdb.c:279 createuser.c:361 dropdb.c:160 dropuser.c:167 +#: reindexdb.c:413 #, c-format msgid " -V, --version output version information, then exit\n" msgstr " -V, --version mostra informazioni sulla versione ed esci\n" -#: clusterdb.c:280 createuser.c:366 dropdb.c:162 dropuser.c:169 reindexdb.c:414 +#: clusterdb.c:280 createuser.c:366 dropdb.c:162 dropuser.c:169 +#: reindexdb.c:414 #, c-format msgid " -?, --help show this help, then exit\n" msgstr " -?, --help mostra questo aiuto ed esci\n" @@ -210,14 +206,14 @@ msgstr "" "\n" "Opzioni di connessione:\n" -#: clusterdb.c:282 createuser.c:368 dropdb.c:164 dropuser.c:171 reindexdb.c:416 -#: vacuumdb.c:971 +#: clusterdb.c:282 createuser.c:368 dropdb.c:164 dropuser.c:171 +#: reindexdb.c:416 vacuumdb.c:971 #, c-format msgid " -h, --host=HOSTNAME database server host or socket directory\n" msgstr " -h, --host=HOSTNAME host del server database o directory socket\n" -#: clusterdb.c:283 createuser.c:369 dropdb.c:165 dropuser.c:172 reindexdb.c:417 -#: vacuumdb.c:972 +#: clusterdb.c:283 createuser.c:369 dropdb.c:165 dropuser.c:172 +#: reindexdb.c:417 vacuumdb.c:972 #, c-format msgid " -p, --port=PORT database server port\n" msgstr " -p, --port=PORTA porta del server database\n" @@ -227,14 +223,14 @@ msgstr " -p, --port=PORTA porta del server database\n" msgid " -U, --username=USERNAME user name to connect as\n" msgstr " -U, --username=UTENTE nome utente da utilizzare per la connessione\n" -#: clusterdb.c:285 createuser.c:371 dropdb.c:167 dropuser.c:174 reindexdb.c:419 -#: vacuumdb.c:974 +#: clusterdb.c:285 createuser.c:371 dropdb.c:167 dropuser.c:174 +#: reindexdb.c:419 vacuumdb.c:974 #, c-format msgid " -w, --no-password never prompt for password\n" msgstr " -w, --no-password non richiedere mai una password\n" -#: clusterdb.c:286 createuser.c:372 dropdb.c:168 dropuser.c:175 reindexdb.c:420 -#: vacuumdb.c:975 +#: clusterdb.c:286 createuser.c:372 dropdb.c:168 dropuser.c:175 +#: reindexdb.c:420 vacuumdb.c:975 #, c-format msgid " -W, --password force password prompt\n" msgstr " -W, --password forza la richiesta di una password\n" diff --git a/src/bin/scripts/po/ko.po b/src/bin/scripts/po/ko.po index a688749d04..af87868828 100644 --- a/src/bin/scripts/po/ko.po +++ b/src/bin/scripts/po/ko.po @@ -3,10 +3,10 @@ # msgid "" msgstr "" -"Project-Id-Version: pgscripts (PostgreSQL 9.6)\n" +"Project-Id-Version: pgscripts (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2016-09-26 14:02+0900\n" -"PO-Revision-Date: 2016-09-26 19:04+0900\n" +"POT-Creation-Date: 2017-08-16 10:59+0900\n" +"PO-Revision-Date: 2017-08-16 17:45+0900\n" "Last-Translator: Ioseph Kim \n" "Language-Team: Korean \n" "Language: ko\n" @@ -26,59 +26,57 @@ msgstr "메모리 부족\n" msgid "cannot duplicate null pointer (internal error)\n" msgstr "null 포인터를 복제할 수 없음(내부 오류)\n" -#: ../../common/username.c:45 +#: ../../common/username.c:43 #, c-format msgid "could not look up effective user ID %ld: %s" msgstr "UID %ld 해당하는 사용자를 찾을 수 없음: %s" -#: ../../common/username.c:47 +#: ../../common/username.c:45 msgid "user does not exist" msgstr "사용자 없음" -#: ../../common/username.c:62 +#: ../../common/username.c:60 #, c-format msgid "user name lookup failure: error code %lu" msgstr "사용자 이름 찾기 실패: 오류번호 %lu" -#: ../../fe_utils/print.c:354 +#: ../../fe_utils/print.c:353 #, c-format msgid "(%lu row)" msgid_plural "(%lu rows)" msgstr[0] "(%lu개 행)" -#: ../../fe_utils/print.c:2906 +#: ../../fe_utils/print.c:2913 #, c-format msgid "Interrupted\n" msgstr "인트럽트발생\n" -#: ../../fe_utils/print.c:2970 +#: ../../fe_utils/print.c:2977 #, c-format msgid "Cannot add header to table content: column count of %d exceeded.\n" msgstr "테이블 내용에 헤더를 추가할 수 없음: 열 수가 %d개를 초과했습니다.\n" -#: ../../fe_utils/print.c:3010 +#: ../../fe_utils/print.c:3017 #, c-format msgid "Cannot add cell to table content: total cell count of %d exceeded.\n" msgstr "테이블 내용에 셀을 추가할 수 없음: 총 셀 수가 %d개를 초과했습니다.\n" -#: ../../fe_utils/print.c:3259 +#: ../../fe_utils/print.c:3266 #, c-format msgid "invalid output format (internal error): %d" msgstr "잘못된 출력 형식 (내부 오류): %d" #: clusterdb.c:111 clusterdb.c:130 createdb.c:119 createdb.c:138 -#: createlang.c:89 createlang.c:119 createlang.c:174 createuser.c:169 -#: createuser.c:184 dropdb.c:94 dropdb.c:103 dropdb.c:111 droplang.c:88 -#: droplang.c:118 droplang.c:174 dropuser.c:89 dropuser.c:104 dropuser.c:115 -#: pg_isready.c:93 pg_isready.c:107 reindexdb.c:131 reindexdb.c:150 -#: vacuumdb.c:207 vacuumdb.c:226 +#: createuser.c:166 createuser.c:181 dropdb.c:94 dropdb.c:103 dropdb.c:111 +#: dropuser.c:90 dropuser.c:105 dropuser.c:120 pg_isready.c:93 +#: pg_isready.c:107 reindexdb.c:131 reindexdb.c:150 vacuumdb.c:213 +#: vacuumdb.c:232 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "보다 자세한 사용법은 \"%s --help\"\n" -#: clusterdb.c:128 createdb.c:136 createlang.c:117 createuser.c:182 -#: dropdb.c:109 droplang.c:116 dropuser.c:102 pg_isready.c:105 reindexdb.c:148 -#: vacuumdb.c:224 +#: clusterdb.c:128 createdb.c:136 createuser.c:179 dropdb.c:109 dropuser.c:103 +#: pg_isready.c:105 reindexdb.c:148 vacuumdb.c:230 #, c-format msgid "%s: too many command-line arguments (first is \"%s\")\n" msgstr "%s: 너무 많은 명령행 인수들 (시작 \"%s\")\n" @@ -118,21 +116,19 @@ msgstr "" "다시 클러스터 작업을 합니다.\n" "\n" -#: clusterdb.c:270 createdb.c:252 createlang.c:236 createuser.c:349 -#: dropdb.c:155 droplang.c:237 dropuser.c:156 pg_isready.c:222 reindexdb.c:401 -#: vacuumdb.c:942 +#: clusterdb.c:270 createdb.c:252 createuser.c:343 dropdb.c:155 dropuser.c:161 +#: pg_isready.c:222 reindexdb.c:401 vacuumdb.c:952 #, c-format msgid "Usage:\n" msgstr "사용법:\n" -#: clusterdb.c:271 reindexdb.c:402 vacuumdb.c:943 +#: clusterdb.c:271 reindexdb.c:402 vacuumdb.c:953 #, c-format msgid " %s [OPTION]... [DBNAME]\n" msgstr " %s [옵션]... [DB이름]\n" -#: clusterdb.c:272 createdb.c:254 createlang.c:238 createuser.c:351 -#: dropdb.c:157 droplang.c:239 dropuser.c:158 pg_isready.c:225 reindexdb.c:403 -#: vacuumdb.c:944 +#: clusterdb.c:272 createdb.c:254 createuser.c:345 dropdb.c:157 dropuser.c:163 +#: pg_isready.c:225 reindexdb.c:403 vacuumdb.c:954 #, c-format msgid "" "\n" @@ -151,8 +147,8 @@ msgstr " -a, --all 모든 데이터베이스를 대상으로\n" msgid " -d, --dbname=DBNAME database to cluster\n" msgstr " -d, --dbname=DBNAME 클러스터 작업할 DB\n" -#: clusterdb.c:275 createlang.c:240 createuser.c:355 dropdb.c:158 -#: droplang.c:241 dropuser.c:159 reindexdb.c:406 +#: clusterdb.c:275 createuser.c:349 dropdb.c:158 dropuser.c:164 +#: reindexdb.c:406 #, c-format msgid "" " -e, --echo show the commands being sent to the server\n" @@ -173,21 +169,20 @@ msgstr " -t, --table=TABLE 지정한 테이블들만 클러스터\n" msgid " -v, --verbose write a lot of output\n" msgstr " -v, --verbose 많은 출력 작성\n" -#: clusterdb.c:279 createlang.c:242 createuser.c:369 dropdb.c:160 -#: droplang.c:243 dropuser.c:162 reindexdb.c:413 +#: clusterdb.c:279 createuser.c:361 dropdb.c:160 dropuser.c:167 +#: reindexdb.c:413 #, c-format msgid " -V, --version output version information, then exit\n" msgstr " -V, --version 버전 정보를 보여주고 마침\n" -#: clusterdb.c:280 createlang.c:243 createuser.c:374 dropdb.c:162 -#: droplang.c:244 dropuser.c:164 reindexdb.c:414 +#: clusterdb.c:280 createuser.c:366 dropdb.c:162 dropuser.c:169 +#: reindexdb.c:414 #, c-format msgid " -?, --help show this help, then exit\n" msgstr " -?, --help 이 도움말을 보여주고 마침\n" -#: clusterdb.c:281 createdb.c:265 createlang.c:244 createuser.c:375 -#: dropdb.c:163 droplang.c:245 dropuser.c:165 pg_isready.c:231 reindexdb.c:415 -#: vacuumdb.c:960 +#: clusterdb.c:281 createdb.c:265 createuser.c:367 dropdb.c:163 dropuser.c:170 +#: pg_isready.c:231 reindexdb.c:415 vacuumdb.c:970 #, c-format msgid "" "\n" @@ -196,38 +191,37 @@ msgstr "" "\n" "연결 옵션들:\n" -#: clusterdb.c:282 createlang.c:245 createuser.c:376 dropdb.c:164 -#: droplang.c:246 dropuser.c:166 reindexdb.c:416 vacuumdb.c:961 +#: clusterdb.c:282 createuser.c:368 dropdb.c:164 dropuser.c:171 +#: reindexdb.c:416 vacuumdb.c:971 #, c-format msgid " -h, --host=HOSTNAME database server host or socket directory\n" msgstr "" " -h, --host=HOSTNAME 데이터베이스 서버 호스트 또는 소켓 디렉터리\n" -#: clusterdb.c:283 createlang.c:246 createuser.c:377 dropdb.c:165 -#: droplang.c:247 dropuser.c:167 reindexdb.c:417 vacuumdb.c:962 +#: clusterdb.c:283 createuser.c:369 dropdb.c:165 dropuser.c:172 +#: reindexdb.c:417 vacuumdb.c:972 #, c-format msgid " -p, --port=PORT database server port\n" msgstr " -p, --port=PORT 데이터베이스 서버 포트\n" -#: clusterdb.c:284 createlang.c:247 dropdb.c:166 droplang.c:248 -#: reindexdb.c:418 vacuumdb.c:963 +#: clusterdb.c:284 dropdb.c:166 reindexdb.c:418 vacuumdb.c:973 #, c-format msgid " -U, --username=USERNAME user name to connect as\n" msgstr " -U, --username=USERNAME 접속할 사용자이름\n" -#: clusterdb.c:285 createlang.c:248 createuser.c:379 dropdb.c:167 -#: droplang.c:249 dropuser.c:169 reindexdb.c:419 vacuumdb.c:964 +#: clusterdb.c:285 createuser.c:371 dropdb.c:167 dropuser.c:174 +#: reindexdb.c:419 vacuumdb.c:974 #, c-format msgid " -w, --no-password never prompt for password\n" msgstr " -w, --no-password 암호 프롬프트 표시 안 함\n" -#: clusterdb.c:286 createlang.c:249 createuser.c:380 dropdb.c:168 -#: droplang.c:250 dropuser.c:170 reindexdb.c:420 vacuumdb.c:965 +#: clusterdb.c:286 createuser.c:372 dropdb.c:168 dropuser.c:175 +#: reindexdb.c:420 vacuumdb.c:975 #, c-format msgid " -W, --password force password prompt\n" msgstr " -W, --password 암호 프롬프트 표시함\n" -#: clusterdb.c:287 dropdb.c:169 reindexdb.c:421 vacuumdb.c:966 +#: clusterdb.c:287 dropdb.c:169 reindexdb.c:421 vacuumdb.c:976 #, c-format msgid " --maintenance-db=DBNAME alternate maintenance database\n" msgstr " --maintenance-db=DBNAME 대체용 관리 대상 데이터베이스\n" @@ -241,9 +235,8 @@ msgstr "" "\n" "보다 자세한 내용은 CLUSTER SQL 명령어 설명서를 참조하십시오.\n" -#: clusterdb.c:289 createdb.c:273 createlang.c:250 createuser.c:381 -#: dropdb.c:170 droplang.c:251 dropuser.c:171 pg_isready.c:236 reindexdb.c:423 -#: vacuumdb.c:968 +#: clusterdb.c:289 createdb.c:273 createuser.c:373 dropdb.c:170 dropuser.c:176 +#: pg_isready.c:236 reindexdb.c:423 vacuumdb.c:978 #, c-format msgid "" "\n" @@ -252,7 +245,7 @@ msgstr "" "\n" "오류보고: .\n" -#: common.c:82 common.c:128 +#: common.c:80 common.c:126 msgid "Password: " msgstr "암호:" @@ -261,49 +254,49 @@ msgstr "암호:" msgid "%s: could not connect to database %s: out of memory\n" msgstr "%s: %s 데이터베이스에 연결 할 수 없음: 메모리 부족\n" -#: common.c:141 +#: common.c:140 #, c-format msgid "%s: could not connect to database %s: %s" msgstr "%s: %s 데이터베이스에 연결 할 수 없음: %s" -#: common.c:190 common.c:218 +#: common.c:189 common.c:217 #, c-format msgid "%s: query failed: %s" msgstr "%s: 쿼리 실패: %s" -#: common.c:192 common.c:220 +#: common.c:191 common.c:219 #, c-format msgid "%s: query was: %s\n" msgstr "%s: 사용된 쿼리: %s\n" #. translator: abbreviation for "yes" -#: common.c:261 +#: common.c:260 msgid "y" msgstr "y" #. translator: abbreviation for "no" -#: common.c:263 +#: common.c:262 msgid "n" msgstr "n" #. translator: This is a question followed by the translated options for #. "yes" and "no". -#: common.c:273 +#: common.c:272 #, c-format msgid "%s (%s/%s) " msgstr "%s (%s/%s) " -#: common.c:294 +#: common.c:286 #, c-format msgid "Please answer \"%s\" or \"%s\".\n" msgstr "\"%s\" 또는 \"%s\" 만 허용합니다.\n" -#: common.c:373 common.c:410 +#: common.c:365 common.c:402 #, c-format msgid "Cancel request sent\n" msgstr "취소 요청을 전송함\n" -#: common.c:376 common.c:414 +#: common.c:368 common.c:406 #, c-format msgid "Could not send cancel request: %s" msgstr "취소 요청을 전송할 수 없음: %s" @@ -441,67 +434,7 @@ msgstr "" "초기값으로, DB이름을 지정하지 않으면, 현재 사용자의 이름과 같은 데이터베이스" "가 만들어집니다.\n" -#: createlang.c:149 droplang.c:148 -msgid "Name" -msgstr "이름" - -#: createlang.c:150 droplang.c:149 -msgid "no" -msgstr "아니오" - -#: createlang.c:150 droplang.c:149 -msgid "yes" -msgstr "예" - -#: createlang.c:151 droplang.c:150 -msgid "Trusted?" -msgstr "신뢰된?" - -#: createlang.c:160 droplang.c:159 -msgid "Procedural Languages" -msgstr "프로시쥬얼 언어들" - -#: createlang.c:173 droplang.c:172 -#, c-format -msgid "%s: missing required argument language name\n" -msgstr "%s: 필수 항목인, 언어 이름을 지정할 인수가 빠졌습니다\n" - -#: createlang.c:196 -#, c-format -msgid "%s: language \"%s\" is already installed in database \"%s\"\n" -msgstr "%s: \"%s\" 언어는 이미 \"%s\" 데이터베이스에 설치되어 있습니다.\n" - -#: createlang.c:219 -#, c-format -msgid "%s: language installation failed: %s" -msgstr "%s: 언어 설치 실패: %s" - -#: createlang.c:235 -#, c-format -msgid "" -"%s installs a procedural language into a PostgreSQL database.\n" -"\n" -msgstr "" -"%s 프로그램은 PostgreSQL 데이터베이스에 프로시쥬얼 언어를 설치합니다.\n" -"\n" - -#: createlang.c:237 droplang.c:238 -#, c-format -msgid " %s [OPTION]... LANGNAME [DBNAME]\n" -msgstr " %s [옵션]... 언어이름 [DB이름]\n" - -#: createlang.c:239 -#, c-format -msgid " -d, --dbname=DBNAME database to install language in\n" -msgstr " -d, --dbname=DBNAME 언어를 설치할 DB이름\n" - -#: createlang.c:241 droplang.c:242 -#, c-format -msgid "" -" -l, --list show a list of currently installed languages\n" -msgstr " -l, --list 현재 설치 되어있는 언어들을 보여줌\n" - -#: createuser.c:191 +#: createuser.c:189 msgid "Enter name of role to add: " msgstr "추가할 새 롤(role)이름: " @@ -509,11 +442,11 @@ msgstr "추가할 새 롤(role)이름: " msgid "Enter password for new role: " msgstr "새 롤의 암호: " -#: createuser.c:207 +#: createuser.c:208 msgid "Enter it again: " msgstr "암호 확인: " -#: createuser.c:210 +#: createuser.c:211 #, c-format msgid "Passwords didn't match.\n" msgstr "암호가 서로 틀림.\n" @@ -530,17 +463,17 @@ msgstr "이 새 롤에게 데이터베이스를 만들 수 있는 권할을 줄 msgid "Shall the new role be allowed to create more new roles?" msgstr "이 새 롤에게 또 다른 롤을 만들 수 있는 권한을 줄까요?" -#: createuser.c:276 +#: createuser.c:272 #, c-format -msgid "Password encryption failed.\n" -msgstr "암호 암호화 실패.\n" +msgid "%s: password encryption failed: %s" +msgstr "%s: 암호 암호화 실패: %s" -#: createuser.c:333 +#: createuser.c:327 #, c-format msgid "%s: creation of new role failed: %s" msgstr "%s: 새 롤 만들기 실패: %s" -#: createuser.c:348 +#: createuser.c:342 #, c-format msgid "" "%s creates a new PostgreSQL role.\n" @@ -549,39 +482,34 @@ msgstr "" "%s 프로그램은 PostgreSQL 롤을 만듭니다.\n" "\n" -#: createuser.c:350 dropuser.c:157 +#: createuser.c:344 dropuser.c:162 #, c-format msgid " %s [OPTION]... [ROLENAME]\n" msgstr " %s [옵션]... [롤이름]\n" -#: createuser.c:352 +#: createuser.c:346 #, c-format msgid "" " -c, --connection-limit=N connection limit for role (default: no limit)\n" msgstr " -c, --connection-limit=N 연결 제한 수 (초기값: 무제한)\n" -#: createuser.c:353 +#: createuser.c:347 #, c-format msgid " -d, --createdb role can create new databases\n" msgstr " -d, --createdb 새 데이터베이스를 만들 수 있음\n" -#: createuser.c:354 +#: createuser.c:348 #, c-format msgid " -D, --no-createdb role cannot create databases (default)\n" msgstr "" " -D, --no-createdb 데이터베이스를 만들 수 있는 권한 없음 (초기값)\n" -#: createuser.c:356 -#, c-format -msgid " -E, --encrypted encrypt stored password\n" -msgstr " -E, --encrypted 암호화된 암호 사용\n" - -#: createuser.c:357 +#: createuser.c:350 #, c-format msgid " -g, --role=ROLE new role will be a member of this role\n" msgstr " -g, --role=ROLE 만들어지는 롤이 이 롤의 구성원이 됨\n" -#: createuser.c:358 +#: createuser.c:351 #, c-format msgid "" " -i, --inherit role inherits privileges of roles it is a\n" @@ -590,52 +518,47 @@ msgstr "" " -i, --inherit 롤의 권한을 상속할 수 있음\n" " (초기값)\n" -#: createuser.c:360 +#: createuser.c:353 #, c-format msgid " -I, --no-inherit role does not inherit privileges\n" msgstr " -I, --no-inherit 이 롤의 권한을 상속할 수 없음\n" -#: createuser.c:361 +#: createuser.c:354 #, c-format msgid " -l, --login role can login (default)\n" msgstr " -l, --login 로그인 허용 (초기값)\n" -#: createuser.c:362 +#: createuser.c:355 #, c-format msgid " -L, --no-login role cannot login\n" msgstr " -L, --no-login 로그인 할 수 없음\n" -#: createuser.c:363 -#, c-format -msgid " -N, --unencrypted do not encrypt stored password\n" -msgstr " -N, --unencrypted 암호화 되지 않은 암호 사용\n" - -#: createuser.c:364 +#: createuser.c:356 #, c-format msgid " -P, --pwprompt assign a password to new role\n" msgstr " -P, --pwprompt 새 롤의 암호 지정\n" -#: createuser.c:365 +#: createuser.c:357 #, c-format msgid " -r, --createrole role can create new roles\n" msgstr " -r, --createrole 새 롤을 만들 수 있음\n" -#: createuser.c:366 +#: createuser.c:358 #, c-format msgid " -R, --no-createrole role cannot create roles (default)\n" msgstr " -R, --no-createrole 롤 만들 수 있는 권한 없음 (초기값)\n" -#: createuser.c:367 +#: createuser.c:359 #, c-format msgid " -s, --superuser role will be superuser\n" msgstr " -s, --superuser superuser 권한으로 지정\n" -#: createuser.c:368 +#: createuser.c:360 #, c-format msgid " -S, --no-superuser role will not be superuser (default)\n" msgstr " -S, --no-superuser 슈퍼유저 권한 없음 (초기값)\n" -#: createuser.c:370 +#: createuser.c:362 #, c-format msgid "" " --interactive prompt for missing role name and attributes " @@ -645,17 +568,17 @@ msgstr "" " --interactive 롤 이름과 속성을 초기값을 쓰지 않고\n" " 각각 직접 입력 선택 함\n" -#: createuser.c:372 +#: createuser.c:364 #, c-format msgid " --replication role can initiate replication\n" msgstr " --replication 복제 기능 이용할 수 있는 롤\n" -#: createuser.c:373 +#: createuser.c:365 #, c-format msgid " --no-replication role cannot initiate replication\n" msgstr " --no-replication 복제 기능을 이용할 수 없음\n" -#: createuser.c:378 +#: createuser.c:370 #, c-format msgid "" " -U, --username=USERNAME user name to connect as (not the one to create)\n" @@ -673,7 +596,7 @@ msgstr "%s: 필수 항목인 데이터베이스 이름이 빠졌습니다\n" msgid "Database \"%s\" will be permanently removed.\n" msgstr "\"%s\" 데이터베이스가 완전히 삭제 될 것입니다.\n" -#: dropdb.c:118 dropuser.c:123 +#: dropdb.c:118 dropuser.c:128 msgid "Are you sure?" msgstr "정말 계속 할까요? (y/n) " @@ -708,51 +631,26 @@ msgid "" msgstr "" " --if-exists 해당 데이터베이스가 없어도 오류를 보고하지 않음\n" -#: droplang.c:202 -#, c-format -msgid "%s: language \"%s\" is not installed in database \"%s\"\n" -msgstr "%s: \"%s\" 언어는 \"%s\" 데이터베이스에 설치 되어있지 않습니다\n" - -#: droplang.c:221 -#, c-format -msgid "%s: language removal failed: %s" -msgstr "%s: 언어 삭제 실패: %s" - -#: droplang.c:236 -#, c-format -msgid "" -"%s removes a procedural language from a database.\n" -"\n" -msgstr "" -"%s 프로그램은 데이터베이스에서 프로시쥬얼 언어를 삭제합니다.\n" -"\n" - -#: droplang.c:240 -#, c-format -msgid "" -" -d, --dbname=DBNAME database from which to remove the language\n" -msgstr " -d, --dbname=DBNAME 언어를 삭제할 데이터베이스\n" - -#: dropuser.c:111 +#: dropuser.c:113 msgid "Enter name of role to drop: " msgstr "삭제할 롤 이름을 입력하십시오: " -#: dropuser.c:114 +#: dropuser.c:119 #, c-format msgid "%s: missing required argument role name\n" msgstr "%s: 롤 이름은 필수 입력 인자입니다\n" -#: dropuser.c:122 +#: dropuser.c:127 #, c-format msgid "Role \"%s\" will be permanently removed.\n" msgstr "\"%s\" 롤은 영구히 삭제될 것입니다.\n" -#: dropuser.c:140 +#: dropuser.c:145 #, c-format msgid "%s: removal of role \"%s\" failed: %s" msgstr "%s: \"%s\" 롤 삭제 실패: %s" -#: dropuser.c:155 +#: dropuser.c:160 #, c-format msgid "" "%s removes a PostgreSQL role.\n" @@ -761,7 +659,7 @@ msgstr "" "%s 프로그램은 PostgreSQL 롤을 삭제합니다.\n" "\n" -#: dropuser.c:160 +#: dropuser.c:165 #, c-format msgid "" " -i, --interactive prompt before deleting anything, and prompt for\n" @@ -770,12 +668,12 @@ msgstr "" " -i, --interactive 롤 이름을 입력하지 않았다면,\n" " 해당 이름을 물어봄\n" -#: dropuser.c:163 +#: dropuser.c:168 #, c-format msgid " --if-exists don't report error if user doesn't exist\n" msgstr " --if-exists 해당 롤이 없어도 오류를 보고하지 않음\n" -#: dropuser.c:168 +#: dropuser.c:173 #, c-format msgid "" " -U, --username=USERNAME user name to connect as (not the one to drop)\n" @@ -863,10 +761,10 @@ msgstr " -p, --port=PORT 데이터베이스 서버 포트\n" #: pg_isready.c:234 #, c-format -msgid " -t, --timeout=SECS seconds to wait when attempting connection, " -"0 disables (default: %s)\n" -msgstr "" -" -t, --timeout=초 연결 제한 시간, 0 무제한 (초기값: %s)\n" +msgid "" +" -t, --timeout=SECS seconds to wait when attempting connection, 0 " +"disables (default: %s)\n" +msgstr " -t, --timeout=초 연결 제한 시간, 0 무제한 (초기값: %s)\n" #: pg_isready.c:235 #, c-format @@ -1007,71 +905,71 @@ msgstr "" "\n" "보다 자세한 내용은 REINDEX SQL 명령어 설명서를 참조하십시오.\n" -#: vacuumdb.c:189 +#: vacuumdb.c:195 #, c-format msgid "%s: number of parallel jobs must be at least 1\n" msgstr "%s: 병렬 작업 숫자는 최소 1이어야 함\n" -#: vacuumdb.c:195 +#: vacuumdb.c:201 #, c-format msgid "%s: too many parallel jobs requested (maximum: %d)\n" msgstr "%s: 너무 많은 병렬 작업 요청 (최대: %d)\n" -#: vacuumdb.c:234 vacuumdb.c:240 +#: vacuumdb.c:240 vacuumdb.c:246 #, c-format msgid "%s: cannot use the \"%s\" option when performing only analyze\n" msgstr "%s: 통계 수집 전용 작업에서는 \"%s\" 옵션을 사용할 수 없음\n" -#: vacuumdb.c:257 +#: vacuumdb.c:263 #, c-format msgid "%s: cannot vacuum all databases and a specific one at the same time\n" msgstr "" "%s: -a 옵션이 있을 경우는 한 데이터베이스를 대상으로 작업을 진행할 수 없습니" "다.\n" -#: vacuumdb.c:263 +#: vacuumdb.c:269 #, c-format msgid "%s: cannot vacuum specific table(s) in all databases\n" msgstr "%s: 모든 데이터베이스를 대상으로 특정 테이블들을 청소할 수는 없음\n" -#: vacuumdb.c:349 +#: vacuumdb.c:355 msgid "Generating minimal optimizer statistics (1 target)" msgstr "최소 최적화 통계 수집 수행 중 (1% 대상)" -#: vacuumdb.c:350 +#: vacuumdb.c:356 msgid "Generating medium optimizer statistics (10 targets)" msgstr "일반 최적화 통계 수집 수행 중 (10% 대상)" -#: vacuumdb.c:351 +#: vacuumdb.c:357 msgid "Generating default (full) optimizer statistics" msgstr "최대 최적화 통계 수집 수행중 (모든 자료 대상)" -#: vacuumdb.c:363 +#: vacuumdb.c:369 #, c-format msgid "%s: processing database \"%s\": %s\n" msgstr "%s: \"%s\" 데이터베이스 작업 중: %s\n" -#: vacuumdb.c:366 +#: vacuumdb.c:372 #, c-format msgid "%s: vacuuming database \"%s\"\n" msgstr "%s: \"%s\" 데이터베이스를 청소 중\n" -#: vacuumdb.c:698 +#: vacuumdb.c:708 #, c-format msgid "%s: vacuuming of table \"%s\" in database \"%s\" failed: %s" msgstr "%s: \"%s\" 테이블 (해당 DB: \"%s\") 청소하기 실패: %s" -#: vacuumdb.c:701 vacuumdb.c:818 +#: vacuumdb.c:711 vacuumdb.c:828 #, c-format msgid "%s: vacuuming of database \"%s\" failed: %s" msgstr "%s: \"%s\" 데이터베이스 청소하기 실패: %s" -#: vacuumdb.c:932 +#: vacuumdb.c:942 #, c-format msgid "%s: invalid socket: %s" msgstr "%s: 잘못된 소켓: %s" -#: vacuumdb.c:941 +#: vacuumdb.c:951 #, c-format msgid "" "%s cleans and analyzes a PostgreSQL database.\n" @@ -1081,34 +979,34 @@ msgstr "" "퀴리 최적화기의 참고 자료를 갱신합니다.\n" "\n" -#: vacuumdb.c:945 +#: vacuumdb.c:955 #, c-format msgid " -a, --all vacuum all databases\n" msgstr " -a, --all 모든 데이터베이스 청소\n" -#: vacuumdb.c:946 +#: vacuumdb.c:956 #, c-format msgid " -d, --dbname=DBNAME database to vacuum\n" msgstr " -d, --dbname=DBNAME DBNAME 데이터베이스 청소\n" -#: vacuumdb.c:947 +#: vacuumdb.c:957 #, c-format msgid "" " -e, --echo show the commands being sent to the " "server\n" msgstr " -e, --echo 서버로 보내는 명령들을 보여줌\n" -#: vacuumdb.c:948 +#: vacuumdb.c:958 #, c-format msgid " -f, --full do full vacuuming\n" msgstr " -f, --full 대청소\n" -#: vacuumdb.c:949 +#: vacuumdb.c:959 #, c-format msgid " -F, --freeze freeze row transaction information\n" msgstr " -F, --freeze 행 트랜잭션 정보 동결\n" -#: vacuumdb.c:950 +#: vacuumdb.c:960 #, c-format msgid "" " -j, --jobs=NUM use this many concurrent connections to " @@ -1116,33 +1014,33 @@ msgid "" msgstr "" " -j, --jobs=NUM 청소 작업을 여러개의 연결로 동시에 작업함\n" -#: vacuumdb.c:951 +#: vacuumdb.c:961 #, c-format msgid " -q, --quiet don't write any messages\n" msgstr " -q, --quiet 어떠한 메시지도 보여주지 않음\n" -#: vacuumdb.c:952 +#: vacuumdb.c:962 #, c-format msgid " -t, --table='TABLE[(COLUMNS)]' vacuum specific table(s) only\n" msgstr " -t, --table='TABLE[(COLUMNS)]' 지정한 특정 테이블들만 청소\n" -#: vacuumdb.c:953 +#: vacuumdb.c:963 #, c-format msgid " -v, --verbose write a lot of output\n" msgstr " -v, --verbose 작업내역의 자세한 출력\n" -#: vacuumdb.c:954 +#: vacuumdb.c:964 #, c-format msgid "" " -V, --version output version information, then exit\n" msgstr " -V, --version 버전 정보를 보여주고 마침\n" -#: vacuumdb.c:955 +#: vacuumdb.c:965 #, c-format msgid " -z, --analyze update optimizer statistics\n" msgstr " -z, --analyze 쿼리최적화 통계 정보를 갱신함\n" -#: vacuumdb.c:956 +#: vacuumdb.c:966 #, c-format msgid "" " -Z, --analyze-only only update optimizer statistics; no " @@ -1151,22 +1049,22 @@ msgstr "" " -Z, --analyze-only 청소 작업 없이 쿼리최적화 통계 정보만 갱신" "함\n" -#: vacuumdb.c:957 +#: vacuumdb.c:967 #, c-format msgid "" " --analyze-in-stages only update optimizer statistics, in " "multiple\n" " stages for faster results; no vacuum\n" msgstr "" -" --analyze-in-stages 보다 빠른 결과를 위해 다중 스테이지에서" -" 최적화 통계치만 갱신함;청소 안함\n" +" --analyze-in-stages 보다 빠른 결과를 위해 다중 스테이지에" +"서 최적화 통계치만 갱신함;청소 안함\n" -#: vacuumdb.c:959 +#: vacuumdb.c:969 #, c-format msgid " -?, --help show this help, then exit\n" msgstr " -?, --help 이 도움말을 표시하고 종료\n" -#: vacuumdb.c:967 +#: vacuumdb.c:977 #, c-format msgid "" "\n" @@ -1174,3 +1072,67 @@ msgid "" msgstr "" "\n" "보다 자세한 내용은 VACUUM SQL 명령어 설명서를 참조하십시오.\n" + +#~ msgid "Name" +#~ msgstr "이름" + +#~ msgid "no" +#~ msgstr "아니오" + +#~ msgid "yes" +#~ msgstr "예" + +#~ msgid "Trusted?" +#~ msgstr "신뢰된?" + +#~ msgid "Procedural Languages" +#~ msgstr "프로시쥬얼 언어들" + +#~ msgid "%s: missing required argument language name\n" +#~ msgstr "%s: 필수 항목인, 언어 이름을 지정할 인수가 빠졌습니다\n" + +#~ msgid "%s: language \"%s\" is already installed in database \"%s\"\n" +#~ msgstr "%s: \"%s\" 언어는 이미 \"%s\" 데이터베이스에 설치되어 있습니다.\n" + +#~ msgid "%s: language installation failed: %s" +#~ msgstr "%s: 언어 설치 실패: %s" + +#~ msgid "" +#~ "%s installs a procedural language into a PostgreSQL database.\n" +#~ "\n" +#~ msgstr "" +#~ "%s 프로그램은 PostgreSQL 데이터베이스에 프로시쥬얼 언어를 설치합니다.\n" +#~ "\n" + +#~ msgid " %s [OPTION]... LANGNAME [DBNAME]\n" +#~ msgstr " %s [옵션]... 언어이름 [DB이름]\n" + +#~ msgid " -d, --dbname=DBNAME database to install language in\n" +#~ msgstr " -d, --dbname=DBNAME 언어를 설치할 DB이름\n" + +#~ msgid "" +#~ " -l, --list show a list of currently installed languages\n" +#~ msgstr " -l, --list 현재 설치 되어있는 언어들을 보여줌\n" + +#~ msgid " -E, --encrypted encrypt stored password\n" +#~ msgstr " -E, --encrypted 암호화된 암호 사용\n" + +#~ msgid " -N, --unencrypted do not encrypt stored password\n" +#~ msgstr " -N, --unencrypted 암호화 되지 않은 암호 사용\n" + +#~ msgid "%s: language \"%s\" is not installed in database \"%s\"\n" +#~ msgstr "%s: \"%s\" 언어는 \"%s\" 데이터베이스에 설치 되어있지 않습니다\n" + +#~ msgid "%s: language removal failed: %s" +#~ msgstr "%s: 언어 삭제 실패: %s" + +#~ msgid "" +#~ "%s removes a procedural language from a database.\n" +#~ "\n" +#~ msgstr "" +#~ "%s 프로그램은 데이터베이스에서 프로시쥬얼 언어를 삭제합니다.\n" +#~ "\n" + +#~ msgid "" +#~ " -d, --dbname=DBNAME database from which to remove the language\n" +#~ msgstr " -d, --dbname=DBNAME 언어를 삭제할 데이터베이스\n" diff --git a/src/bin/scripts/po/ru.po b/src/bin/scripts/po/ru.po index 476f72ab47..c58ff818ff 100644 --- a/src/bin/scripts/po/ru.po +++ b/src/bin/scripts/po/ru.po @@ -4,13 +4,12 @@ # Serguei A. Mokhov, , 2003-2004. # Oleg Bartunov , 2004. # Alexander Lakhin , 2012-2017. -# msgid "" msgstr "" "Project-Id-Version: pgscripts (PostgreSQL current)\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-03-27 12:46+0000\n" -"PO-Revision-Date: 2016-11-24 14:26+0300\n" +"POT-Creation-Date: 2017-08-17 23:15+0000\n" +"PO-Revision-Date: 2017-05-27 15:01+0300\n" "Last-Translator: Alexander Lakhin \n" "Language-Team: Russian \n" "Language: ru\n" @@ -78,14 +77,14 @@ msgid "invalid output format (internal error): %d" msgstr "неверный формат вывода (внутренняя ошибка): %d" #: clusterdb.c:111 clusterdb.c:130 createdb.c:119 createdb.c:138 -#: createuser.c:171 createuser.c:186 dropdb.c:94 dropdb.c:103 dropdb.c:111 +#: createuser.c:166 createuser.c:181 dropdb.c:94 dropdb.c:103 dropdb.c:111 #: dropuser.c:90 dropuser.c:105 dropuser.c:120 pg_isready.c:93 pg_isready.c:107 #: reindexdb.c:131 reindexdb.c:150 vacuumdb.c:213 vacuumdb.c:232 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Для дополнительной информации попробуйте \"%s --help\".\n" -#: clusterdb.c:128 createdb.c:136 createuser.c:184 dropdb.c:109 dropuser.c:103 +#: clusterdb.c:128 createdb.c:136 createuser.c:179 dropdb.c:109 dropuser.c:103 #: pg_isready.c:105 reindexdb.c:148 vacuumdb.c:230 #, c-format msgid "%s: too many command-line arguments (first is \"%s\")\n" @@ -125,7 +124,7 @@ msgstr "" "%s упорядочивает данные всех кластеризованных таблиц в базе данных.\n" "\n" -#: clusterdb.c:270 createdb.c:252 createuser.c:354 dropdb.c:155 dropuser.c:161 +#: clusterdb.c:270 createdb.c:252 createuser.c:343 dropdb.c:155 dropuser.c:161 #: pg_isready.c:222 reindexdb.c:401 vacuumdb.c:952 #, c-format msgid "Usage:\n" @@ -136,7 +135,7 @@ msgstr "Использование:\n" msgid " %s [OPTION]... [DBNAME]\n" msgstr " %s [ПАРАМЕТР]... [ИМЯ_БД]\n" -#: clusterdb.c:272 createdb.c:254 createuser.c:356 dropdb.c:157 dropuser.c:163 +#: clusterdb.c:272 createdb.c:254 createuser.c:345 dropdb.c:157 dropuser.c:163 #: pg_isready.c:225 reindexdb.c:403 vacuumdb.c:954 #, c-format msgid "" @@ -156,7 +155,7 @@ msgstr " -a, --all кластеризовать все базы msgid " -d, --dbname=DBNAME database to cluster\n" msgstr " -d, --dbname=ИМЯ_БД имя базы данных для кластеризации\n" -#: clusterdb.c:275 createuser.c:360 dropdb.c:158 dropuser.c:164 reindexdb.c:406 +#: clusterdb.c:275 createuser.c:349 dropdb.c:158 dropuser.c:164 reindexdb.c:406 #, c-format msgid "" " -e, --echo show the commands being sent to the server\n" @@ -178,17 +177,17 @@ msgstr "" msgid " -v, --verbose write a lot of output\n" msgstr " -v, --verbose выводить исчерпывающие сообщения\n" -#: clusterdb.c:279 createuser.c:374 dropdb.c:160 dropuser.c:167 reindexdb.c:413 +#: clusterdb.c:279 createuser.c:361 dropdb.c:160 dropuser.c:167 reindexdb.c:413 #, c-format msgid " -V, --version output version information, then exit\n" msgstr " -V, --version показать версию и выйти\n" -#: clusterdb.c:280 createuser.c:379 dropdb.c:162 dropuser.c:169 reindexdb.c:414 +#: clusterdb.c:280 createuser.c:366 dropdb.c:162 dropuser.c:169 reindexdb.c:414 #, c-format msgid " -?, --help show this help, then exit\n" msgstr " -?, --help показать эту справку и выйти\n" -#: clusterdb.c:281 createdb.c:265 createuser.c:380 dropdb.c:163 dropuser.c:170 +#: clusterdb.c:281 createdb.c:265 createuser.c:367 dropdb.c:163 dropuser.c:170 #: pg_isready.c:231 reindexdb.c:415 vacuumdb.c:970 #, c-format msgid "" @@ -198,14 +197,14 @@ msgstr "" "\n" "Параметры подключения:\n" -#: clusterdb.c:282 createuser.c:381 dropdb.c:164 dropuser.c:171 reindexdb.c:416 +#: clusterdb.c:282 createuser.c:368 dropdb.c:164 dropuser.c:171 reindexdb.c:416 #: vacuumdb.c:971 #, c-format msgid " -h, --host=HOSTNAME database server host or socket directory\n" msgstr "" " -h, --host=ИМЯ имя сервера баз данных или каталог сокетов\n" -#: clusterdb.c:283 createuser.c:382 dropdb.c:165 dropuser.c:172 reindexdb.c:417 +#: clusterdb.c:283 createuser.c:369 dropdb.c:165 dropuser.c:172 reindexdb.c:417 #: vacuumdb.c:972 #, c-format msgid " -p, --port=PORT database server port\n" @@ -217,13 +216,13 @@ msgid " -U, --username=USERNAME user name to connect as\n" msgstr "" " -U, --username=ИМЯ имя пользователя для подключения к серверу\n" -#: clusterdb.c:285 createuser.c:384 dropdb.c:167 dropuser.c:174 reindexdb.c:419 +#: clusterdb.c:285 createuser.c:371 dropdb.c:167 dropuser.c:174 reindexdb.c:419 #: vacuumdb.c:974 #, c-format msgid " -w, --no-password never prompt for password\n" msgstr " -w, --no-password не запрашивать пароль\n" -#: clusterdb.c:286 createuser.c:385 dropdb.c:168 dropuser.c:175 reindexdb.c:420 +#: clusterdb.c:286 createuser.c:372 dropdb.c:168 dropuser.c:175 reindexdb.c:420 #: vacuumdb.c:975 #, c-format msgid " -W, --password force password prompt\n" @@ -243,7 +242,7 @@ msgstr "" "\n" "Подробнее о кластеризации вы можете узнать в описании SQL-команды CLUSTER.\n" -#: clusterdb.c:289 createdb.c:273 createuser.c:386 dropdb.c:170 dropuser.c:176 +#: clusterdb.c:289 createdb.c:273 createuser.c:373 dropdb.c:170 dropuser.c:176 #: pg_isready.c:236 reindexdb.c:423 vacuumdb.c:978 #, c-format msgid "" @@ -447,46 +446,46 @@ msgstr "" "\n" "По умолчанию именем базы данных считается имя текущего пользователя.\n" -#: createuser.c:194 +#: createuser.c:189 msgid "Enter name of role to add: " msgstr "Введите имя новой роли: " -#: createuser.c:211 +#: createuser.c:206 msgid "Enter password for new role: " msgstr "Введите пароль для новой роли: " -#: createuser.c:213 +#: createuser.c:208 msgid "Enter it again: " msgstr "Повторите его: " -#: createuser.c:216 +#: createuser.c:211 #, c-format msgid "Passwords didn't match.\n" msgstr "Пароли не совпадают.\n" -#: createuser.c:224 +#: createuser.c:219 msgid "Shall the new role be a superuser?" msgstr "Должна ли новая роль иметь полномочия суперпользователя?" -#: createuser.c:239 +#: createuser.c:234 msgid "Shall the new role be allowed to create databases?" msgstr "Новая роль должна иметь право создавать базы данных?" -#: createuser.c:247 +#: createuser.c:242 msgid "Shall the new role be allowed to create more new roles?" msgstr "Новая роль должна иметь право создавать другие роли?" -#: createuser.c:281 +#: createuser.c:272 #, c-format -msgid "Password encryption failed.\n" -msgstr "Ошибка при шифровании пароля.\n" +msgid "%s: password encryption failed: %s" +msgstr "%s: ошибка при шифровании пароля: %s" -#: createuser.c:338 +#: createuser.c:327 #, c-format msgid "%s: creation of new role failed: %s" msgstr "%s: создать роль не удалось: %s" -#: createuser.c:353 +#: createuser.c:342 #, c-format msgid "" "%s creates a new PostgreSQL role.\n" @@ -495,12 +494,12 @@ msgstr "" "%s создаёт роль пользователя PostgreSQL.\n" "\n" -#: createuser.c:355 dropuser.c:162 +#: createuser.c:344 dropuser.c:162 #, c-format msgid " %s [OPTION]... [ROLENAME]\n" msgstr " %s [ПАРАМЕТР]... [ИМЯ_РОЛИ]\n" -#: createuser.c:357 +#: createuser.c:346 #, c-format msgid "" " -c, --connection-limit=N connection limit for role (default: no limit)\n" @@ -508,29 +507,24 @@ msgstr "" " -c, --connection-limit=N предел подключений для роли\n" " (по умолчанию предела нет)\n" -#: createuser.c:358 +#: createuser.c:347 #, c-format msgid " -d, --createdb role can create new databases\n" msgstr " -d, --createdb роль с правом создания баз данных\n" -#: createuser.c:359 +#: createuser.c:348 #, c-format msgid " -D, --no-createdb role cannot create databases (default)\n" msgstr "" " -D, --no-createdb роль без права создания баз данных (по " "умолчанию)\n" -#: createuser.c:361 -#, c-format -msgid " -E, --encrypted encrypt stored password\n" -msgstr " -E, --encrypted зашифровать сохранённый пароль\n" - -#: createuser.c:362 +#: createuser.c:350 #, c-format msgid " -g, --role=ROLE new role will be a member of this role\n" msgstr " -g, --role=РОЛЬ новая роль будет включена в эту роль\n" -#: createuser.c:363 +#: createuser.c:351 #, c-format msgid "" " -i, --inherit role inherits privileges of roles it is a\n" @@ -540,57 +534,52 @@ msgstr "" "она\n" " включена (по умолчанию)\n" -#: createuser.c:365 +#: createuser.c:353 #, c-format msgid " -I, --no-inherit role does not inherit privileges\n" msgstr " -I, --no-inherit роль не наследует права\n" -#: createuser.c:366 +#: createuser.c:354 #, c-format msgid " -l, --login role can login (default)\n" msgstr "" " -l, --login роль с правом подключения к серверу (по " "умолчанию)\n" -#: createuser.c:367 +#: createuser.c:355 #, c-format msgid " -L, --no-login role cannot login\n" msgstr " -L, --no-login роль без права подключения\n" -#: createuser.c:368 -#, c-format -msgid " -N, --unencrypted do not encrypt stored password\n" -msgstr " -N, --unencrypted не шифровать сохранённый пароль\n" - -#: createuser.c:369 +#: createuser.c:356 #, c-format msgid " -P, --pwprompt assign a password to new role\n" msgstr " -P, --pwprompt назначить пароль новой роли\n" -#: createuser.c:370 +#: createuser.c:357 #, c-format msgid " -r, --createrole role can create new roles\n" msgstr " -r, --createrole роль с правом создания других ролей\n" -#: createuser.c:371 +#: createuser.c:358 #, c-format msgid " -R, --no-createrole role cannot create roles (default)\n" msgstr "" " -R, --no-createrole роль без права создания ролей (по умолчанию)\n" -#: createuser.c:372 +#: createuser.c:359 #, c-format msgid " -s, --superuser role will be superuser\n" msgstr " -s, --superuser роль с полномочиями суперпользователя\n" -#: createuser.c:373 +#: createuser.c:360 #, c-format msgid " -S, --no-superuser role will not be superuser (default)\n" msgstr "" " -S, --no-superuser роль без полномочий суперпользователя (по " "умолчанию)\n" -#: createuser.c:375 +#: createuser.c:362 #, c-format msgid "" " --interactive prompt for missing role name and attributes " @@ -600,17 +589,17 @@ msgstr "" " --interactive запрашивать отсутствующие атрибуты и имя роли,\n" " а не использовать значения по умолчанию\n" -#: createuser.c:377 +#: createuser.c:364 #, c-format msgid " --replication role can initiate replication\n" msgstr " --replication роль может инициировать репликацию\n" -#: createuser.c:378 +#: createuser.c:365 #, c-format msgid " --no-replication role cannot initiate replication\n" msgstr " --no-replication роль не может инициировать репликацию\n" -#: createuser.c:383 +#: createuser.c:370 #, c-format msgid "" " -U, --username=USERNAME user name to connect as (not the one to create)\n" @@ -1114,6 +1103,12 @@ msgstr "" "\n" "Подробнее об очистке вы можете узнать в описании SQL-команды VACUUM.\n" +#~ msgid " -E, --encrypted encrypt stored password\n" +#~ msgstr " -E, --encrypted зашифровать сохранённый пароль\n" + +#~ msgid " -N, --unencrypted do not encrypt stored password\n" +#~ msgstr " -N, --unencrypted не шифровать сохранённый пароль\n" + #~ msgid "Name" #~ msgstr "Имя" diff --git a/src/bin/scripts/po/sv.po b/src/bin/scripts/po/sv.po index fe7ed56f89..e5a037f6de 100644 --- a/src/bin/scripts/po/sv.po +++ b/src/bin/scripts/po/sv.po @@ -1,5 +1,5 @@ # Swedish message translation file for postgresql -# Dennis Björklund , 2003, 2004, 2005, 2006, 2017. +# Dennis Björklund , 2003, 2004, 2005, 2006, 2017, 2018. # Peter Eisentraut , 2013. # Mats Erik Andersson , 2014. # @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: PostgreSQL 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-07-10 05:46+0000\n" -"PO-Revision-Date: 2017-07-20 21:44+0200\n" +"POT-Creation-Date: 2018-03-08 18:45+0000\n" +"PO-Revision-Date: 2018-03-10 08:30+0100\n" "Last-Translator: Dennis Björklund \n" "Language-Team: Swedish \n" "Language: sv\n" @@ -71,15 +71,14 @@ msgstr "ogiltigt utdataformat (internt fel): %d" #: clusterdb.c:111 clusterdb.c:130 createdb.c:119 createdb.c:138 #: createuser.c:166 createuser.c:181 dropdb.c:94 dropdb.c:103 dropdb.c:111 -#: dropuser.c:90 dropuser.c:105 dropuser.c:120 pg_isready.c:93 -#: pg_isready.c:107 reindexdb.c:131 reindexdb.c:150 vacuumdb.c:213 -#: vacuumdb.c:232 +#: dropuser.c:90 dropuser.c:105 dropuser.c:120 pg_isready.c:93 pg_isready.c:107 +#: reindexdb.c:131 reindexdb.c:150 vacuumdb.c:215 vacuumdb.c:234 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Försök med \"%s --help\" för mer information.\n" #: clusterdb.c:128 createdb.c:136 createuser.c:179 dropdb.c:109 dropuser.c:103 -#: pg_isready.c:105 reindexdb.c:148 vacuumdb.c:230 +#: pg_isready.c:105 reindexdb.c:148 vacuumdb.c:232 #, c-format msgid "%s: too many command-line arguments (first is \"%s\")\n" msgstr "%s: för många kommandoradsargument (första är \"%s\")\n" @@ -94,22 +93,22 @@ msgstr "%s: kan inte klustra alla databaser och en angiven på samma gång\n" msgid "%s: cannot cluster specific table(s) in all databases\n" msgstr "%s: kan inte klustra angivna tabeller i alla databaser\n" -#: clusterdb.c:212 +#: clusterdb.c:216 #, c-format msgid "%s: clustering of table \"%s\" in database \"%s\" failed: %s" msgstr "%s: klustring av tabell \"%s\" i databas \"%s\" misslyckades: %s" -#: clusterdb.c:215 +#: clusterdb.c:219 #, c-format msgid "%s: clustering of database \"%s\" failed: %s" msgstr "%s: klustring av databas \"%s\" misslyckades: %s" -#: clusterdb.c:248 +#: clusterdb.c:252 #, c-format msgid "%s: clustering database \"%s\"\n" msgstr "%s: klustring av databas \"%s\"\n" -#: clusterdb.c:269 +#: clusterdb.c:273 #, c-format msgid "" "%s clusters all previously clustered tables in a database.\n" @@ -118,111 +117,112 @@ msgstr "" "%s klustrar alla tidigare klustrade tabeller i en databas.\n" "\n" -#: clusterdb.c:270 createdb.c:252 createuser.c:343 dropdb.c:155 dropuser.c:161 -#: pg_isready.c:222 reindexdb.c:401 vacuumdb.c:952 +#: clusterdb.c:274 createdb.c:252 createuser.c:343 dropdb.c:156 dropuser.c:161 +#: pg_isready.c:222 reindexdb.c:402 vacuumdb.c:963 #, c-format msgid "Usage:\n" msgstr "Användning:\n" -#: clusterdb.c:271 reindexdb.c:402 vacuumdb.c:953 +#: clusterdb.c:275 reindexdb.c:403 vacuumdb.c:964 #, c-format msgid " %s [OPTION]... [DBNAME]\n" msgstr " %s [FLAGGA]... [DBNAMN]\n" -#: clusterdb.c:272 createdb.c:254 createuser.c:345 dropdb.c:157 dropuser.c:163 -#: pg_isready.c:225 reindexdb.c:403 vacuumdb.c:954 +#: clusterdb.c:276 createdb.c:254 createuser.c:345 dropdb.c:158 dropuser.c:163 +#: pg_isready.c:225 reindexdb.c:404 vacuumdb.c:965 #, c-format msgid "" "\n" "Options:\n" -msgstr "\nFlaggor:\n" +msgstr "" +"\n" +"Flaggor:\n" -#: clusterdb.c:273 +#: clusterdb.c:277 #, c-format msgid " -a, --all cluster all databases\n" msgstr " -a, --all klustra alla databaser\n" -#: clusterdb.c:274 +#: clusterdb.c:278 #, c-format msgid " -d, --dbname=DBNAME database to cluster\n" msgstr " -d, --dbname=DBNAME databas att klustra\n" -#: clusterdb.c:275 createuser.c:349 dropdb.c:158 dropuser.c:164 -#: reindexdb.c:406 +#: clusterdb.c:279 createuser.c:349 dropdb.c:159 dropuser.c:164 reindexdb.c:407 #, c-format msgid " -e, --echo show the commands being sent to the server\n" msgstr " -e, --echo visa kommandon som skickas till servern\n" -#: clusterdb.c:276 reindexdb.c:408 +#: clusterdb.c:280 reindexdb.c:409 #, c-format msgid " -q, --quiet don't write any messages\n" msgstr " -q, --quiet skriv inte ut några meddelanden\n" -#: clusterdb.c:277 +#: clusterdb.c:281 #, c-format msgid " -t, --table=TABLE cluster specific table(s) only\n" msgstr " -t, --table=TABELL klustra enbart ingivna tabeller\n" -#: clusterdb.c:278 reindexdb.c:412 +#: clusterdb.c:282 reindexdb.c:413 #, c-format msgid " -v, --verbose write a lot of output\n" msgstr " -v, --verbose skriv massor med utdata\n" -#: clusterdb.c:279 createuser.c:361 dropdb.c:160 dropuser.c:167 -#: reindexdb.c:413 +#: clusterdb.c:283 createuser.c:361 dropdb.c:161 dropuser.c:167 reindexdb.c:414 #, c-format msgid " -V, --version output version information, then exit\n" msgstr " -V, --version visa versionsinformation, avsluta sedan\n" -#: clusterdb.c:280 createuser.c:366 dropdb.c:162 dropuser.c:169 -#: reindexdb.c:414 +#: clusterdb.c:284 createuser.c:366 dropdb.c:163 dropuser.c:169 reindexdb.c:415 #, c-format msgid " -?, --help show this help, then exit\n" msgstr " -?, --help visa denna hjälp, avsluta sedan\n" -#: clusterdb.c:281 createdb.c:265 createuser.c:367 dropdb.c:163 dropuser.c:170 -#: pg_isready.c:231 reindexdb.c:415 vacuumdb.c:970 +#: clusterdb.c:285 createdb.c:265 createuser.c:367 dropdb.c:164 dropuser.c:170 +#: pg_isready.c:231 reindexdb.c:416 vacuumdb.c:981 #, c-format msgid "" "\n" "Connection options:\n" -msgstr "\nFlaggor för anslutning:\n" +msgstr "" +"\n" +"Flaggor för anslutning:\n" -#: clusterdb.c:282 createuser.c:368 dropdb.c:164 dropuser.c:171 -#: reindexdb.c:416 vacuumdb.c:971 +#: clusterdb.c:286 createuser.c:368 dropdb.c:165 dropuser.c:171 reindexdb.c:417 +#: vacuumdb.c:982 #, c-format msgid " -h, --host=HOSTNAME database server host or socket directory\n" msgstr " -h, --host=VÄRDNAMN databasens värdnamn eller socketkatalog\n" -#: clusterdb.c:283 createuser.c:369 dropdb.c:165 dropuser.c:172 -#: reindexdb.c:417 vacuumdb.c:972 +#: clusterdb.c:287 createuser.c:369 dropdb.c:166 dropuser.c:172 reindexdb.c:418 +#: vacuumdb.c:983 #, c-format msgid " -p, --port=PORT database server port\n" msgstr " -p, --port=PORT databasserverns port\n" -#: clusterdb.c:284 dropdb.c:166 reindexdb.c:418 vacuumdb.c:973 +#: clusterdb.c:288 dropdb.c:167 reindexdb.c:419 vacuumdb.c:984 #, c-format msgid " -U, --username=USERNAME user name to connect as\n" msgstr " -U, --username=ANVÄNDARE användarnamn att ansluta som\n" -#: clusterdb.c:285 createuser.c:371 dropdb.c:167 dropuser.c:174 -#: reindexdb.c:419 vacuumdb.c:974 +#: clusterdb.c:289 createuser.c:371 dropdb.c:168 dropuser.c:174 reindexdb.c:420 +#: vacuumdb.c:985 #, c-format msgid " -w, --no-password never prompt for password\n" msgstr " -w, --no-password fråga ej efter lösenord\n" -#: clusterdb.c:286 createuser.c:372 dropdb.c:168 dropuser.c:175 -#: reindexdb.c:420 vacuumdb.c:975 +#: clusterdb.c:290 createuser.c:372 dropdb.c:169 dropuser.c:175 reindexdb.c:421 +#: vacuumdb.c:986 #, c-format msgid " -W, --password force password prompt\n" msgstr " -W, --password framtvinga fråga om lösenord\n" -#: clusterdb.c:287 dropdb.c:169 reindexdb.c:421 vacuumdb.c:976 +#: clusterdb.c:291 dropdb.c:170 reindexdb.c:422 vacuumdb.c:987 #, c-format msgid " --maintenance-db=DBNAME alternate maintenance database\n" msgstr " --maintenance-db=DBNAMN annat val av underhållsdatabas\n" -#: clusterdb.c:288 +#: clusterdb.c:292 #, c-format msgid "" "\n" @@ -231,8 +231,8 @@ msgstr "" "\n" "Läs beskrivningen av SQL-kommandot CLUSTER för detaljer.\n" -#: clusterdb.c:289 createdb.c:273 createuser.c:373 dropdb.c:170 dropuser.c:176 -#: pg_isready.c:236 reindexdb.c:423 vacuumdb.c:978 +#: clusterdb.c:293 createdb.c:273 createuser.c:373 dropdb.c:171 dropuser.c:176 +#: pg_isready.c:236 reindexdb.c:424 vacuumdb.c:989 #, c-format msgid "" "\n" @@ -241,58 +241,65 @@ msgstr "" "\n" "Rapportera fel till .\n" -#: common.c:80 common.c:126 +#: common.c:83 common.c:129 msgid "Password: " msgstr "Lösenord: " -#: common.c:113 +#: common.c:116 #, c-format msgid "%s: could not connect to database %s: out of memory\n" msgstr "%s: kunde inte ansluta till databas %s: slut på minne\n" -#: common.c:140 +#: common.c:143 #, c-format msgid "%s: could not connect to database %s: %s" msgstr "%s: kunde inte ansluta till databas %s: %s" -#: common.c:189 common.c:217 +#: common.c:196 common.c:224 #, c-format msgid "%s: query failed: %s" msgstr "%s: fråga misslyckades: %s" -#: common.c:191 common.c:219 +#: common.c:198 common.c:226 #, c-format msgid "%s: query was: %s\n" msgstr "%s: frågan var: %s\n" +#: common.c:351 +#, c-format +msgid "%s: query returned %d row instead of one: %s\n" +msgid_plural "%s: query returned %d rows instead of one: %s\n" +msgstr[0] "%s: fråga gav %d rad istället för en: %s\n" +msgstr[1] "%s: fråga gav %d rader istället för en: %s\n" + #. translator: abbreviation for "yes" -#: common.c:260 +#: common.c:377 msgid "y" msgstr "j" #. translator: abbreviation for "no" -#: common.c:262 +#: common.c:379 msgid "n" msgstr "n" #. translator: This is a question followed by the translated options for #. "yes" and "no". -#: common.c:272 +#: common.c:389 #, c-format msgid "%s (%s/%s) " msgstr "%s (%s/%s) " -#: common.c:286 +#: common.c:403 #, c-format msgid "Please answer \"%s\" or \"%s\".\n" msgstr "Var vänlig att svara \"%s\" eller \"%s\".\n" -#: common.c:365 common.c:402 +#: common.c:482 common.c:519 #, c-format msgid "Cancel request sent\n" msgstr "Avbrottsbegäran skickad.\n" -#: common.c:368 common.c:406 +#: common.c:485 common.c:523 #, c-format msgid "Could not send cancel request: %s" msgstr "Kunde inte skicka avbrottsbegäran: %s" @@ -327,7 +334,9 @@ msgstr "%s: misslyckades att skapa kommentar (databasen skapades): %s" msgid "" "%s creates a PostgreSQL database.\n" "\n" -msgstr "%s skapar en PostgreSQL-databas.\n\n" +msgstr "" +"%s skapar en PostgreSQL-databas.\n" +"\n" #: createdb.c:253 #, c-format @@ -419,7 +428,9 @@ msgstr " --maintenance-db=DBNAMN annat val av underhållsdatabas\n" msgid "" "\n" "By default, a database with the same name as the current user is created.\n" -msgstr "\nSom standard skapas en databas med samma namn som den nuvarande användares namn.\n" +msgstr "" +"\n" +"Som standard skapas en databas med samma namn som den nuvarande användares namn.\n" #: createuser.c:189 msgid "Enter name of role to add: " @@ -465,7 +476,9 @@ msgstr "%s: misslyckades med att skapa ny roll: %s" msgid "" "%s creates a new PostgreSQL role.\n" "\n" -msgstr "%s skapar en ny PostgreSQL-roll.\n\n" +msgstr "" +"%s skapar en ny PostgreSQL-roll.\n" +"\n" #: createuser.c:344 dropuser.c:162 #, c-format @@ -579,12 +592,12 @@ msgstr "Databasen \"%s\" kommer att tas bort permanent.\n" msgid "Are you sure?" msgstr "Är du säker?" -#: dropdb.c:139 +#: dropdb.c:140 #, c-format msgid "%s: database removal failed: %s" msgstr "%s: borttagning av databas misslyckades: %s" -#: dropdb.c:154 +#: dropdb.c:155 #, c-format msgid "" "%s removes a PostgreSQL database.\n" @@ -593,17 +606,17 @@ msgstr "" "%s tar bort en PostgreSQL-databas.\n" "\n" -#: dropdb.c:156 +#: dropdb.c:157 #, c-format msgid " %s [OPTION]... DBNAME\n" msgstr " %s [FLAGGA]... DBNAMN\n" -#: dropdb.c:159 +#: dropdb.c:160 #, c-format msgid " -i, --interactive prompt before deleting anything\n" msgstr " -i, --interactive fråga innan något tas bort\n" -#: dropdb.c:161 +#: dropdb.c:162 #, c-format msgid " --if-exists don't report error if database doesn't exist\n" msgstr " --if-exists felrapportera ej om databasen saknas\n" @@ -693,7 +706,9 @@ msgstr "okänt\n" msgid "" "%s issues a connection check to a PostgreSQL database.\n" "\n" -msgstr "%s utför en anslutningskontroll mot en PostgreSQL-databas.\n\n" +msgstr "" +"%s utför en anslutningskontroll mot en PostgreSQL-databas.\n" +"\n" #: pg_isready.c:223 #, c-format @@ -780,74 +795,76 @@ msgstr "%s: kan inte omindexera specifik tabell och systemkatalogerna samtidigt\ msgid "%s: cannot reindex specific index(es) and system catalogs at the same time\n" msgstr "%s: kan inte omindexera angivna index och systemkatalogerna samtidigt.\n" -#: reindexdb.c:307 +#: reindexdb.c:308 #, c-format msgid "%s: reindexing of table \"%s\" in database \"%s\" failed: %s" msgstr "%s: omindexering av tabell \"%s\" i databasen \"%s\" misslyckades: %s" -#: reindexdb.c:310 +#: reindexdb.c:311 #, c-format msgid "%s: reindexing of index \"%s\" in database \"%s\" failed: %s" msgstr "%s: omindexering av index \"%s\" i databasen \"%s\" misslyckades: %s" -#: reindexdb.c:313 +#: reindexdb.c:314 #, c-format msgid "%s: reindexing of schema \"%s\" in database \"%s\" failed: %s" msgstr "%s: omindexering av schemat \"%s\" i databasen \"%s\" misslyckades: %s" -#: reindexdb.c:316 +#: reindexdb.c:317 #, c-format msgid "%s: reindexing of database \"%s\" failed: %s" msgstr "%s: omindexering av databasen \"%s\" misslyckades: %s" -#: reindexdb.c:349 +#: reindexdb.c:350 #, c-format msgid "%s: reindexing database \"%s\"\n" msgstr "%s: omindexering av databasen \"%s\"\n" -#: reindexdb.c:388 +#: reindexdb.c:389 #, c-format msgid "%s: reindexing of system catalogs failed: %s" msgstr "%s: omindexering av systemkatalogerna misslyckades: %s" -#: reindexdb.c:400 +#: reindexdb.c:401 #, c-format msgid "" "%s reindexes a PostgreSQL database.\n" "\n" -msgstr "%s indexerar om en PostgreSQL-databas.\n\n" +msgstr "" +"%s indexerar om en PostgreSQL-databas.\n" +"\n" -#: reindexdb.c:404 +#: reindexdb.c:405 #, c-format msgid " -a, --all reindex all databases\n" msgstr " -a, --all indexera om alla databaser\n" -#: reindexdb.c:405 +#: reindexdb.c:406 #, c-format msgid " -d, --dbname=DBNAME database to reindex\n" msgstr " -d, --dbname=DBNAME databas att indexera om\n" -#: reindexdb.c:407 +#: reindexdb.c:408 #, c-format msgid " -i, --index=INDEX recreate specific index(es) only\n" msgstr " -i, --index=INDEX återskapa enbart angivna index\n" -#: reindexdb.c:409 +#: reindexdb.c:410 #, c-format msgid " -s, --system reindex system catalogs\n" msgstr " -s, --system indexera om systemkatalogerna\n" -#: reindexdb.c:410 +#: reindexdb.c:411 #, c-format msgid " -S, --schema=SCHEMA reindex specific schema(s) only\n" msgstr " -S, --schema=SCHEMA indexera enbart om angivna scheman\n" -#: reindexdb.c:411 +#: reindexdb.c:412 #, c-format msgid " -t, --table=TABLE reindex specific table(s) only\n" msgstr " -t, --table=TABELL indexera endast om angivna tabeller\n" -#: reindexdb.c:422 +#: reindexdb.c:423 #, c-format msgid "" "\n" @@ -856,69 +873,69 @@ msgstr "" "\n" "Läs beskrivningen av SQL-kommandot REINDEX för detaljer.\n" -#: vacuumdb.c:195 +#: vacuumdb.c:197 #, c-format msgid "%s: number of parallel jobs must be at least 1\n" msgstr "%s: antalet parallella jobb måste vara minst 1\n" -#: vacuumdb.c:201 +#: vacuumdb.c:203 #, c-format msgid "%s: too many parallel jobs requested (maximum: %d)\n" msgstr "%s: för många parallella job (maximum: %d)\n" -#: vacuumdb.c:240 vacuumdb.c:246 +#: vacuumdb.c:242 vacuumdb.c:248 #, c-format msgid "%s: cannot use the \"%s\" option when performing only analyze\n" msgstr "%s: flaggan \"%s\" kan inte användas vid enbart analys\n" -#: vacuumdb.c:263 +#: vacuumdb.c:265 #, c-format msgid "%s: cannot vacuum all databases and a specific one at the same time\n" msgstr "%s: kan inte städa alla databaser och endast en angiven på samma gång\n" -#: vacuumdb.c:269 +#: vacuumdb.c:271 #, c-format msgid "%s: cannot vacuum specific table(s) in all databases\n" msgstr "%s: kan inte städa en specifik tabell i alla databaser.\n" -#: vacuumdb.c:355 +#: vacuumdb.c:357 msgid "Generating minimal optimizer statistics (1 target)" msgstr "Skapar minimal optimeringsstatistik (1 mål)" -#: vacuumdb.c:356 +#: vacuumdb.c:358 msgid "Generating medium optimizer statistics (10 targets)" msgstr "Skapar medium optimeringsstatistik (10 mål)" -#: vacuumdb.c:357 +#: vacuumdb.c:359 msgid "Generating default (full) optimizer statistics" msgstr "Skapar förvald (full) optimeringsstatistik" -#: vacuumdb.c:369 +#: vacuumdb.c:371 #, c-format msgid "%s: processing database \"%s\": %s\n" msgstr "%s: processar databasen \"%s\": %s\n" -#: vacuumdb.c:372 +#: vacuumdb.c:374 #, c-format msgid "%s: vacuuming database \"%s\"\n" msgstr "%s: städar databasen \"%s\".\n" -#: vacuumdb.c:708 +#: vacuumdb.c:719 #, c-format msgid "%s: vacuuming of table \"%s\" in database \"%s\" failed: %s" msgstr "%s: städning av tabell \"%s\" i databasen \"%s\" misslyckades: %s" -#: vacuumdb.c:711 vacuumdb.c:828 +#: vacuumdb.c:722 vacuumdb.c:839 #, c-format msgid "%s: vacuuming of database \"%s\" failed: %s" msgstr "%s: städning av databasen \"%s\" misslyckades: %s" -#: vacuumdb.c:942 +#: vacuumdb.c:953 #, c-format msgid "%s: invalid socket: %s" msgstr "%s: ogiltigt uttag: %s" -#: vacuumdb.c:951 +#: vacuumdb.c:962 #, c-format msgid "" "%s cleans and analyzes a PostgreSQL database.\n" @@ -927,67 +944,67 @@ msgstr "" "%s städar och analyserar en PostgreSQL-databas.\n" "\n" -#: vacuumdb.c:955 +#: vacuumdb.c:966 #, c-format msgid " -a, --all vacuum all databases\n" msgstr " -a, --all städa i alla databaser\n" -#: vacuumdb.c:956 +#: vacuumdb.c:967 #, c-format msgid " -d, --dbname=DBNAME database to vacuum\n" msgstr " -d, --dbname=DBNAMN databas att städa i\n" -#: vacuumdb.c:957 +#: vacuumdb.c:968 #, c-format msgid " -e, --echo show the commands being sent to the server\n" msgstr " -e, --echo visa kommandon som skickas till servern\n" -#: vacuumdb.c:958 +#: vacuumdb.c:969 #, c-format msgid " -f, --full do full vacuuming\n" msgstr " -f, --full utför full städning\n" -#: vacuumdb.c:959 +#: vacuumdb.c:970 #, c-format msgid " -F, --freeze freeze row transaction information\n" msgstr " -F, --freeze frys information om radtransaktioner\n" -#: vacuumdb.c:960 +#: vacuumdb.c:971 #, c-format msgid " -j, --jobs=NUM use this many concurrent connections to vacuum\n" msgstr " -j, --jobs=NUM använd så här många samtida anslutningar för städning\n" -#: vacuumdb.c:961 +#: vacuumdb.c:972 #, c-format msgid " -q, --quiet don't write any messages\n" msgstr " -q, --quiet skriv inte ut några meddelanden\n" -#: vacuumdb.c:962 +#: vacuumdb.c:973 #, c-format msgid " -t, --table='TABLE[(COLUMNS)]' vacuum specific table(s) only\n" msgstr " -t, --table='TABELL[(KOLUMNER)]' städa enbart i dessa tabeller\n" -#: vacuumdb.c:963 +#: vacuumdb.c:974 #, c-format msgid " -v, --verbose write a lot of output\n" msgstr " -v, --verbose skriv massor med utdata\n" -#: vacuumdb.c:964 +#: vacuumdb.c:975 #, c-format msgid " -V, --version output version information, then exit\n" msgstr " -V, --version visa versionsinformation, avsluta sedan\n" -#: vacuumdb.c:965 +#: vacuumdb.c:976 #, c-format msgid " -z, --analyze update optimizer statistics\n" msgstr " -z, --analyze uppdatera optimeringsstatistik\n" -#: vacuumdb.c:966 +#: vacuumdb.c:977 #, c-format msgid " -Z, --analyze-only only update optimizer statistics; no vacuum\n" msgstr " -Z, --analyze-only uppdatera bara optimeringsstatistik; ingen städning\n" -#: vacuumdb.c:967 +#: vacuumdb.c:978 #, c-format msgid "" " --analyze-in-stages only update optimizer statistics, in multiple\n" @@ -996,12 +1013,12 @@ msgstr "" " --analyze-in-stages uppdatera bara optimeringsstatistik, men i\n" " flera steg för snabbare resultat; ingen städning\n" -#: vacuumdb.c:969 +#: vacuumdb.c:980 #, c-format msgid " -?, --help show this help, then exit\n" msgstr " -?, --help visa denna hjälp, avsluta sedan\n" -#: vacuumdb.c:977 +#: vacuumdb.c:988 #, c-format msgid "" "\n" @@ -1010,39 +1027,29 @@ msgstr "" "\n" "Läs beskrivningen av SQL-kommandot VACUUM för detaljer.\n" -#~ msgid "%s: cannot use the \"freeze\" option when performing only analyze\n" -#~ msgstr "%s: Växeln \"freeze\" kan inte utföras med enbart analys.\n" - -#~ msgid " -d, --dbname=DBNAME database from which to remove the language\n" -#~ msgstr " -d, --dbname=DBNAMN databas från vilken språket skall tas bort\n" - -#~ msgid "" -#~ "%s removes a procedural language from a database.\n" -#~ "\n" -#~ msgstr "" -#~ "%s tar bort ett procedurspråk från en databas.\n" -#~ "\n" +#~ msgid "Name" +#~ msgstr "Namn" -#~ msgid "%s: language removal failed: %s" -#~ msgstr "%s: Borttagning av språk misslyckades: %s" +#~ msgid "no" +#~ msgstr "nej" -#~ msgid "%s: language \"%s\" is not installed in database \"%s\"\n" -#~ msgstr "%s: Språk \"%s\" är inte installerat i databasen \"%s\".\n" +#~ msgid "yes" +#~ msgstr "ja" -#~ msgid " -N, --unencrypted do not encrypt stored password\n" -#~ msgstr " -N, --unencrypted lösenordet sparas okrypterat\n" +#~ msgid "Trusted?" +#~ msgstr "Tillförlitligt?" -#~ msgid " -E, --encrypted encrypt stored password\n" -#~ msgstr " -E, --encrypted lösenordet skall sparas krypterat\n" +#~ msgid "Procedural Languages" +#~ msgstr "Procedurspråk" -#~ msgid " -l, --list show a list of currently installed languages\n" -#~ msgstr " -l, --list lista alla nu installerade språk\n" +#~ msgid "%s: missing required argument language name\n" +#~ msgstr "%s: Saknar nödvändigt språknamnsargument.\n" -#~ msgid " -d, --dbname=DBNAME database to install language in\n" -#~ msgstr " -d, --dbname=DBNAMN databas där språket installeras\n" +#~ msgid "%s: language \"%s\" is already installed in database \"%s\"\n" +#~ msgstr "%s: Språket \"%s\" är redan installerat i databasen \"%s\".\n" -#~ msgid " %s [OPTION]... LANGNAME [DBNAME]\n" -#~ msgstr " %s [FLAGGA]... SPRÅK [DBNAMN]\n" +#~ msgid "%s: language installation failed: %s" +#~ msgstr "%s: Språkinstallation misslyckades: %s" #~ msgid "" #~ "%s installs a procedural language into a PostgreSQL database.\n" @@ -1051,26 +1058,36 @@ msgstr "" #~ "%s installerar ett procedurspråk i en PostgreSQL-databas.\n" #~ "\n" -#~ msgid "%s: language installation failed: %s" -#~ msgstr "%s: Språkinstallation misslyckades: %s" +#~ msgid " %s [OPTION]... LANGNAME [DBNAME]\n" +#~ msgstr " %s [FLAGGA]... SPRÅK [DBNAMN]\n" -#~ msgid "%s: language \"%s\" is already installed in database \"%s\"\n" -#~ msgstr "%s: Språket \"%s\" är redan installerat i databasen \"%s\".\n" +#~ msgid " -d, --dbname=DBNAME database to install language in\n" +#~ msgstr " -d, --dbname=DBNAMN databas där språket installeras\n" -#~ msgid "%s: missing required argument language name\n" -#~ msgstr "%s: Saknar nödvändigt språknamnsargument.\n" +#~ msgid " -l, --list show a list of currently installed languages\n" +#~ msgstr " -l, --list lista alla nu installerade språk\n" -#~ msgid "Procedural Languages" -#~ msgstr "Procedurspråk" +#~ msgid " -E, --encrypted encrypt stored password\n" +#~ msgstr " -E, --encrypted lösenordet skall sparas krypterat\n" -#~ msgid "Trusted?" -#~ msgstr "Tillförlitligt?" +#~ msgid " -N, --unencrypted do not encrypt stored password\n" +#~ msgstr " -N, --unencrypted lösenordet sparas okrypterat\n" -#~ msgid "yes" -#~ msgstr "ja" +#~ msgid "%s: language \"%s\" is not installed in database \"%s\"\n" +#~ msgstr "%s: Språk \"%s\" är inte installerat i databasen \"%s\".\n" -#~ msgid "no" -#~ msgstr "nej" +#~ msgid "%s: language removal failed: %s" +#~ msgstr "%s: Borttagning av språk misslyckades: %s" -#~ msgid "Name" -#~ msgstr "Namn" +#~ msgid "" +#~ "%s removes a procedural language from a database.\n" +#~ "\n" +#~ msgstr "" +#~ "%s tar bort ett procedurspråk från en databas.\n" +#~ "\n" + +#~ msgid " -d, --dbname=DBNAME database from which to remove the language\n" +#~ msgstr " -d, --dbname=DBNAMN databas från vilken språket skall tas bort\n" + +#~ msgid "%s: cannot use the \"freeze\" option when performing only analyze\n" +#~ msgstr "%s: Växeln \"freeze\" kan inte utföras med enbart analys.\n" diff --git a/src/bin/scripts/po/tr.po b/src/bin/scripts/po/tr.po new file mode 100644 index 0000000000..9d4fd3107a --- /dev/null +++ b/src/bin/scripts/po/tr.po @@ -0,0 +1,1137 @@ +# translation of pgscripts-tr.po to Turkish +# Devrim GUNDUZ , 2004, 2005, 2006, 2007. +# Nicolai Tufar , 2005, 2006, 2007. +# İbrahim Edib Kökdemir <>, 2018. +# Abdullah G. GÜLNER <>, 2018. +msgid "" +msgstr "" +"Project-Id-Version: pgscripts-tr\n" +"Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" +"POT-Creation-Date: 2018-02-20 16:45+0000\n" +"PO-Revision-Date: 2018-02-22 10:51+0300\n" +"Last-Translator: Abdullah G. GüLNER\n" +"Language-Team: Turkish \n" +"Language: tr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.8.7.1\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"X-Poedit-Basepath: /home/ntufar/pg/pgsql/src/bin/scripts\n" +"X-Poedit-SearchPath-0: /home/ntufar/pg/pgsql/src/bin/scripts\n" + +#: ../../common/fe_memutils.c:35 ../../common/fe_memutils.c:75 +#: ../../common/fe_memutils.c:98 +#, c-format +msgid "out of memory\n" +msgstr "bellek yetersiz\n" + +#: ../../common/fe_memutils.c:92 +#, c-format +msgid "cannot duplicate null pointer (internal error)\n" +msgstr "null pointer duplicate edilemiyor (iç hata)\n" + +#: ../../common/username.c:43 +#, c-format +msgid "could not look up effective user ID %ld: %s" +msgstr "geçerli kullanıcı ID si bulunamadı %ld: %s" + +#: ../../common/username.c:45 +msgid "user does not exist" +msgstr "kullanıcı mevcut değil" + +#: ../../common/username.c:60 +#, c-format +msgid "user name lookup failure: error code %lu" +msgstr "kullanıcı adı arama başarısız: hata kodu %lu" + +#: ../../fe_utils/print.c:353 +#, c-format +msgid "(%lu row)" +msgid_plural "(%lu rows)" +msgstr[0] "(%lu satır)" +msgstr[1] "(%lu satır)" + +#: ../../fe_utils/print.c:2913 +#, c-format +msgid "Interrupted\n" +msgstr "kesildi\n" + +#: ../../fe_utils/print.c:2977 +#, c-format +msgid "Cannot add header to table content: column count of %d exceeded.\n" +msgstr "B aşlık tablo içeriğine eklenemedi: %d kolon sayısı aşıldı.\n" + +#: ../../fe_utils/print.c:3017 +#, c-format +msgid "Cannot add cell to table content: total cell count of %d exceeded.\n" +msgstr "Hücre tablo içeriğine eklenemedi: %d olan toplan hücre sayısı açıldı.\n" + +#: ../../fe_utils/print.c:3266 +#, c-format +msgid "invalid output format (internal error): %d" +msgstr "geçersiz çıktı biçimi (iç hata): %d" + +#: clusterdb.c:111 clusterdb.c:130 createdb.c:119 createdb.c:138 +#: createuser.c:166 createuser.c:181 dropdb.c:94 dropdb.c:103 dropdb.c:111 +#: dropuser.c:90 dropuser.c:105 dropuser.c:120 pg_isready.c:93 pg_isready.c:107 +#: reindexdb.c:131 reindexdb.c:150 vacuumdb.c:213 vacuumdb.c:232 +#, c-format +msgid "Try \"%s --help\" for more information.\n" +msgstr "Daha fazla bilgi için \"%s --help\" komutunu deneyiniz.\n" + +#: clusterdb.c:128 createdb.c:136 createuser.c:179 dropdb.c:109 dropuser.c:103 +#: pg_isready.c:105 reindexdb.c:148 vacuumdb.c:230 +#, c-format +msgid "%s: too many command-line arguments (first is \"%s\")\n" +msgstr "%s: Çok sayıda komut satırı argümanı (ilki \"%s\")\n" + +#: clusterdb.c:140 +#, c-format +msgid "%s: cannot cluster all databases and a specific one at the same time\n" +msgstr "%s: Aynı anda tüm veritabanları ve de belirli bir tanesi cluster edilemez\n" + +#: clusterdb.c:147 +#, c-format +msgid "%s: cannot cluster specific table(s) in all databases\n" +msgstr "%s: tüm veritabanlarındaki belirli tablo(lar) cluster edilemez.\n" + +#: clusterdb.c:212 +#, c-format +msgid "%s: clustering of table \"%s\" in database \"%s\" failed: %s" +msgstr "%s: \"%s\"tablosunun (\"%s\" veritabanındaki) cluster işlemi başarısız oldu: %s" + +#: clusterdb.c:215 +#, c-format +msgid "%s: clustering of database \"%s\" failed: %s" +msgstr "%s: \"%s\" veritabanının cluster işlemi başarısız oldu: %s" + +#: clusterdb.c:248 +#, c-format +msgid "%s: clustering database \"%s\"\n" +msgstr "%s: \"%s\" veritabanı cluster ediliyor\n" + +#: clusterdb.c:269 +#, c-format +msgid "" +"%s clusters all previously clustered tables in a database.\n" +"\n" +msgstr "" +"%s Konutu bir veritabanında daha önceden cluster edilmiş tüm tabloları cluster eder.\n" +"\n" + +#: clusterdb.c:270 createdb.c:252 createuser.c:343 dropdb.c:155 dropuser.c:161 +#: pg_isready.c:222 reindexdb.c:401 vacuumdb.c:952 +#, c-format +msgid "Usage:\n" +msgstr "Kullanımı:\n" + +#: clusterdb.c:271 reindexdb.c:402 vacuumdb.c:953 +#, c-format +msgid " %s [OPTION]... [DBNAME]\n" +msgstr " %s [SEÇENEK]... [VERİTABANI_ADI]\n" + +#: clusterdb.c:272 createdb.c:254 createuser.c:345 dropdb.c:157 dropuser.c:163 +#: pg_isready.c:225 reindexdb.c:403 vacuumdb.c:954 +#, c-format +msgid "" +"\n" +"Options:\n" +msgstr "" +"\n" +"Seçenekler:\n" + +#: clusterdb.c:273 +#, c-format +msgid " -a, --all cluster all databases\n" +msgstr " -a, --all tüm veritabanlarını cluster eder\n" + +#: clusterdb.c:274 +#, c-format +msgid " -d, --dbname=DBNAME database to cluster\n" +msgstr " -d, --dbname=VERİTABANI_ADI cluster edilecek veritabanı adı\n" + +#: clusterdb.c:275 createuser.c:349 dropdb.c:158 dropuser.c:164 reindexdb.c:406 +#, c-format +msgid " -e, --echo show the commands being sent to the server\n" +msgstr " -e, --echo sunucuya gönderilen komutları göster\n" + +#: clusterdb.c:276 reindexdb.c:408 +#, c-format +msgid " -q, --quiet don't write any messages\n" +msgstr " -q, --quiet hiçbir ileti yazma\n" + +#: clusterdb.c:277 +#, c-format +msgid " -t, --table=TABLE cluster specific table(s) only\n" +msgstr " -t, --table=TABLO_ADI sadece belirli (bir) tabloyu/tabloları cluster eder\n" + +#: clusterdb.c:278 reindexdb.c:412 +#, c-format +msgid " -v, --verbose write a lot of output\n" +msgstr " -v, --verbose çok miktarda çıktı yaz\n" + +#: clusterdb.c:279 createuser.c:361 dropdb.c:160 dropuser.c:167 reindexdb.c:413 +#, c-format +msgid " -V, --version output version information, then exit\n" +msgstr " -V, --version sürüm bilgisini gösterir ve sonra çıkar\n" + +#: clusterdb.c:280 createuser.c:366 dropdb.c:162 dropuser.c:169 reindexdb.c:414 +#, c-format +msgid " -?, --help show this help, then exit\n" +msgstr " -?, --help bu yardımı gösterir ve sonra çıkar\n" + +#: clusterdb.c:281 createdb.c:265 createuser.c:367 dropdb.c:163 dropuser.c:170 +#: pg_isready.c:231 reindexdb.c:415 vacuumdb.c:970 +#, c-format +msgid "" +"\n" +"Connection options:\n" +msgstr "" +"\n" +"Bağlantı seçenekleri:\n" + +#: clusterdb.c:282 createuser.c:368 dropdb.c:164 dropuser.c:171 reindexdb.c:416 +#: vacuumdb.c:971 +#, c-format +msgid " -h, --host=HOSTNAME database server host or socket directory\n" +msgstr " -h, --host=HOSTNAME veritabanı sunucusu adresi ya da soket dizini\n" + +#: clusterdb.c:283 createuser.c:369 dropdb.c:165 dropuser.c:172 reindexdb.c:417 +#: vacuumdb.c:972 +#, c-format +msgid " -p, --port=PORT database server port\n" +msgstr " -p, --port=PORT veritabanı sunucusunun portu\n" + +#: clusterdb.c:284 dropdb.c:166 reindexdb.c:418 vacuumdb.c:973 +#, c-format +msgid " -U, --username=USERNAME user name to connect as\n" +msgstr " -U, --username=KULLANICI_ADI bağlanılacak kullanıcı adı\n" + +#: clusterdb.c:285 createuser.c:371 dropdb.c:167 dropuser.c:174 reindexdb.c:419 +#: vacuumdb.c:974 +#, c-format +msgid " -w, --no-password never prompt for password\n" +msgstr " -w, --no-password parola sorma\n" + +#: clusterdb.c:286 createuser.c:372 dropdb.c:168 dropuser.c:175 reindexdb.c:420 +#: vacuumdb.c:975 +#, c-format +msgid " -W, --password force password prompt\n" +msgstr " -W, --password parola sorulmasını sağla\n" + +#: clusterdb.c:287 dropdb.c:169 reindexdb.c:421 vacuumdb.c:976 +#, c-format +msgid " --maintenance-db=DBNAME alternate maintenance database\n" +msgstr " --maintenance-db=VTADI alternatif bakım veritabanı\n" + +#: clusterdb.c:288 +#, c-format +msgid "" +"\n" +"Read the description of the SQL command CLUSTER for details.\n" +msgstr "" +"\n" +"Ayrıntılar için bir SQL komutu olan CLUSTER'in açıklamasını okuyabilirsiniz.\n" + +#: clusterdb.c:289 createdb.c:273 createuser.c:373 dropdb.c:170 dropuser.c:176 +#: pg_isready.c:236 reindexdb.c:423 vacuumdb.c:978 +#, c-format +msgid "" +"\n" +"Report bugs to .\n" +msgstr "" +"\n" +"Hataları adresine bildirebilirsiniz.\n" + +#: common.c:80 common.c:126 +msgid "Password: " +msgstr "Parola: " + +#: common.c:113 +#, c-format +msgid "%s: could not connect to database %s: out of memory\n" +msgstr "%s: %s veritabanına bağlanılamadı: bellek yetersiz\n" + +#: common.c:140 +#, c-format +msgid "%s: could not connect to database %s: %s" +msgstr "%s: %s veritabanına bağlanılamadı: %s" + +#: common.c:189 common.c:217 +#, c-format +msgid "%s: query failed: %s" +msgstr "%s: sorgu başarısız oldu: %s" + +#: common.c:191 common.c:219 +#, c-format +msgid "%s: query was: %s\n" +msgstr "%s: sorgu şu idi: %s\n" + +#. translator: abbreviation for "yes" +#: common.c:260 +msgid "y" +msgstr "e" + +#. translator: abbreviation for "no" +#: common.c:262 +msgid "n" +msgstr "h" + +#. translator: This is a question followed by the translated options for +#. "yes" and "no". +#: common.c:272 +#, c-format +msgid "%s (%s/%s) " +msgstr "%s (%s/%s) " + +#: common.c:286 +#, c-format +msgid "Please answer \"%s\" or \"%s\".\n" +msgstr "Lütfen yanıtlayınız: \"%s\" veya \"%s\".\n" + +#: common.c:365 common.c:402 +#, c-format +msgid "Cancel request sent\n" +msgstr "İptal isteği gönderildi\n" + +#: common.c:368 common.c:406 +#, c-format +msgid "Could not send cancel request: %s" +msgstr "İptal isteği gönderilemedi: %s" + +#: createdb.c:146 +#, c-format +msgid "%s: only one of --locale and --lc-ctype can be specified\n" +msgstr "%s: --locale ve --lc-ctype seçeneklerinden sadece birisi belirtilebilir\n" + +#: createdb.c:152 +#, c-format +msgid "%s: only one of --locale and --lc-collate can be specified\n" +msgstr "%s: --locale ve --lc-collate parametrelerinden sadece birisi belirtilebilir\n" + +#: createdb.c:164 +#, c-format +msgid "%s: \"%s\" is not a valid encoding name\n" +msgstr "%s: \"%s\" geçerli bir dil kodlaması değil\n" + +#: createdb.c:213 +#, c-format +msgid "%s: database creation failed: %s" +msgstr "%s: veritabanı yaratma başarısız oldu: %s" + +#: createdb.c:233 +#, c-format +msgid "%s: comment creation failed (database was created): %s" +msgstr "%s: yorum yaratma işlemi başarısız oldu (veritabanı yaratıldı): %s" + +#: createdb.c:251 +#, c-format +msgid "" +"%s creates a PostgreSQL database.\n" +"\n" +msgstr "" +"%s bir PostgreSQL veritabanı yaratır.\n" +"\n" + +#: createdb.c:253 +#, c-format +msgid " %s [OPTION]... [DBNAME] [DESCRIPTION]\n" +msgstr " %s [SEÇENEK]... [VERİTABANI_ADI] [TANIM]\n" + +#: createdb.c:255 +#, c-format +msgid " -D, --tablespace=TABLESPACE default tablespace for the database\n" +msgstr " -D, --tablespace=TABLESPACE veritabanı için öntanımlı tablo uzayı\n" + +#: createdb.c:256 +#, c-format +msgid " -e, --echo show the commands being sent to the server\n" +msgstr " -e, --echo sunucuya gönderilen komutları göster\n" + +#: createdb.c:257 +#, c-format +msgid " -E, --encoding=ENCODING encoding for the database\n" +msgstr " -E, --encoding=ENCODING veritabanı için dil kodlaması\n" + +#: createdb.c:258 +#, c-format +msgid " -l, --locale=LOCALE locale settings for the database\n" +msgstr " -l, --locale=LOCALE veritabanı için yerel ayarları\n" + +#: createdb.c:259 +#, c-format +msgid " --lc-collate=LOCALE LC_COLLATE setting for the database\n" +msgstr " --lc-collate=LOCALE Veritabanı için LC_COLLATE ayarı\n" + +#: createdb.c:260 +#, c-format +msgid " --lc-ctype=LOCALE LC_CTYPE setting for the database\n" +msgstr " --lc-ctype=LOCALE Veritabanı için LC_CTYPE ayarı\n" + +#: createdb.c:261 +#, c-format +msgid " -O, --owner=OWNER database user to own the new database\n" +msgstr " -O, --owner=OWNER Yeni veritabanının sahibi olacak veritabanı kullanıcısı\n" + +#: createdb.c:262 +#, c-format +msgid " -T, --template=TEMPLATE template database to copy\n" +msgstr " -T, --template=TEMPLATE kopyalanacak şablon veritabanı\n" + +#: createdb.c:263 +#, c-format +msgid " -V, --version output version information, then exit\n" +msgstr " -V, --version sürüm bilgisini göster, sonra çık\n" + +#: createdb.c:264 +#, c-format +msgid " -?, --help show this help, then exit\n" +msgstr " -?, --help bu yardımı gösterir ve sonra çıkar\n" + +#: createdb.c:266 +#, c-format +msgid " -h, --host=HOSTNAME database server host or socket directory\n" +msgstr " -h, --host=HOSTNAME veritabanı sunucusu adresi ya da soket dizini\n" + +#: createdb.c:267 +#, c-format +msgid " -p, --port=PORT database server port\n" +msgstr " -p, --port=PORT veritabanı sunucu portu\n" + +#: createdb.c:268 +#, c-format +msgid " -U, --username=USERNAME user name to connect as\n" +msgstr " -U, --username=KULLANICI_ADI bağlanılacak kullanıcı adı\n" + +#: createdb.c:269 +#, c-format +msgid " -w, --no-password never prompt for password\n" +msgstr " -w, --no-password asla parola sorma\n" + +#: createdb.c:270 +#, c-format +msgid " -W, --password force password prompt\n" +msgstr " -W, --password parola sormasını sağla\n" + +#: createdb.c:271 +#, c-format +msgid " --maintenance-db=DBNAME alternate maintenance database\n" +msgstr " --maintenance-db=VTADI alternatif bakım veritabanı\n" + +#: createdb.c:272 +#, c-format +msgid "" +"\n" +"By default, a database with the same name as the current user is created.\n" +msgstr "" +"\n" +"Öntanımlı olarak , mevcut kullanıcı ile aynı adda veritabanı yaratılır.\n" + +#: createuser.c:189 +msgid "Enter name of role to add: " +msgstr "Eklenecek rol adını girin: " + +#: createuser.c:206 +msgid "Enter password for new role: " +msgstr "Yeni rol için şifre girin: " + +#: createuser.c:208 +msgid "Enter it again: " +msgstr "Yeniden girin: " + +#: createuser.c:211 +#, c-format +msgid "Passwords didn't match.\n" +msgstr "Şifreler uyuşmadı.\n" + +#: createuser.c:219 +msgid "Shall the new role be a superuser?" +msgstr "Yeni rol superuser olsun mu?" + +#: createuser.c:234 +msgid "Shall the new role be allowed to create databases?" +msgstr "Yeni rol, veritabanı oluşturabilsin mi?" + +#: createuser.c:242 +msgid "Shall the new role be allowed to create more new roles?" +msgstr "Yeni rol, yeni rolleri oluşturma hakkına sahip olsun mu?" + +#: createuser.c:272 +#, c-format +msgid "%s: password encryption failed: %s" +msgstr "%s: parola şifreleme hatası: %s" + +#: createuser.c:327 +#, c-format +msgid "%s: creation of new role failed: %s" +msgstr "%s: yeni rol oluşturma işlemi başarısız oldu: %s" + +#: createuser.c:342 +#, c-format +msgid "" +"%s creates a new PostgreSQL role.\n" +"\n" +msgstr "" +"%s yeni bir PostgreSQL rol oluşturur.\n" +"\n" + +#: createuser.c:344 dropuser.c:162 +#, c-format +msgid " %s [OPTION]... [ROLENAME]\n" +msgstr " %s [SEÇENEKLER]... [ROL_ADI]\n" + +#: createuser.c:346 +#, c-format +msgid " -c, --connection-limit=N connection limit for role (default: no limit)\n" +msgstr " -c, --connection-limit=N rol için azami bağlantı sayısı (varsayılan: sınırsız)\n" + +#: createuser.c:347 +#, c-format +msgid " -d, --createdb role can create new databases\n" +msgstr " -d, --createdb rol yeni veritabanı oluşturabiliyor\n" + +#: createuser.c:348 +#, c-format +msgid " -D, --no-createdb role cannot create databases (default)\n" +msgstr " -D, --no-createdb rol veritabanı oluşturamaz (varsayılan)\n" + +#: createuser.c:350 +#, c-format +msgid " -g, --role=ROLE new role will be a member of this role\n" +msgstr " -g, --role=ROL yeni rol bu rolün üyesi olacaktır\n" + +#: createuser.c:351 +#, c-format +msgid "" +" -i, --inherit role inherits privileges of roles it is a\n" +" member of (default)\n" +msgstr "" +" -i, --inherit rol, üye olduğu rollerin yetkilerini \n" +" miras alır (varsayılan)\n" +"\n" + +#: createuser.c:353 +#, c-format +msgid " -I, --no-inherit role does not inherit privileges\n" +msgstr " -I, --no-inherit rol, hiçbir yetkiyi miras almaz\n" + +#: createuser.c:354 +#, c-format +msgid " -l, --login role can login (default)\n" +msgstr " -l, --login rol giriş yapabiliyor\n" + +#: createuser.c:355 +#, c-format +msgid " -L, --no-login role cannot login\n" +msgstr " -L, --no-login role giriş yapamaz\n" + +#: createuser.c:356 +#, c-format +msgid " -P, --pwprompt assign a password to new role\n" +msgstr " -P, --pwprompt yeni role bir şifre atar\n" + +#: createuser.c:357 +#, c-format +msgid " -r, --createrole role can create new roles\n" +msgstr " -r, --createrole rol yeni rol oluşturabiliyor\n" + +#: createuser.c:358 +#, c-format +msgid " -R, --no-createrole role cannot create roles (default)\n" +msgstr " -R, --no-createrole rol başka bir rol oluşturamaz (varsayılan)\n" + +#: createuser.c:359 +#, c-format +msgid " -s, --superuser role will be superuser\n" +msgstr " -s, --superuser rol, superuser olacaktır\n" + +#: createuser.c:360 +#, c-format +msgid " -S, --no-superuser role will not be superuser (default)\n" +msgstr " -S, --no-superuser rol, superuser olmayacaktır (varsayılan)\n" + +#: createuser.c:362 +#, c-format +msgid "" +" --interactive prompt for missing role name and attributes rather\n" +" than using defaults\n" +msgstr "" +" --interactive varsayılanları kullanmaktansa eksik rol ve niteliklerin\n" +" girilmesini sağla\n" + +#: createuser.c:364 +#, c-format +msgid " --replication role can initiate replication\n" +msgstr " --replication rol replikasyon başlatabilir\n" + +#: createuser.c:365 +#, c-format +msgid " --no-replication role cannot initiate replication\n" +msgstr " --no-replication rol replikasyon başlatamaz\n" + +#: createuser.c:370 +#, c-format +msgid " -U, --username=USERNAME user name to connect as (not the one to create)\n" +msgstr " -U, --username=KULLANICI_ADI bağlanılacak kullanıcı adı (yaratılacak değil)\n" + +#: dropdb.c:102 +#, c-format +msgid "%s: missing required argument database name\n" +msgstr "%s: Gerekli argüman eksik: Veritabanı adı\n" + +#: dropdb.c:117 +#, c-format +msgid "Database \"%s\" will be permanently removed.\n" +msgstr "\"%s\" veritabanı kalıcı olarak silinecektir.\n" + +#: dropdb.c:118 dropuser.c:128 +msgid "Are you sure?" +msgstr "Emin misiniz?" + +#: dropdb.c:139 +#, c-format +msgid "%s: database removal failed: %s" +msgstr "%s: veritabanı silme işlemi başarısız oldu: %s" + +#: dropdb.c:154 +#, c-format +msgid "" +"%s removes a PostgreSQL database.\n" +"\n" +msgstr "" +"%s PostgreSQL veritabanını siler.\n" +"\n" + +#: dropdb.c:156 +#, c-format +msgid " %s [OPTION]... DBNAME\n" +msgstr " %s [SEÇENEK]... VERİTABANI_ADI\n" + +#: dropdb.c:159 +#, c-format +msgid " -i, --interactive prompt before deleting anything\n" +msgstr " -i, --interactive herhangi birşeyi silmeden önce uyarı verir\n" + +#: dropdb.c:161 +#, c-format +msgid " --if-exists don't report error if database doesn't exist\n" +msgstr " --if-exists don't report error if database doesn't exist\n" + +#: dropuser.c:113 +msgid "Enter name of role to drop: " +msgstr "Silinecek rolün adını giriniz: " + +#: dropuser.c:119 +#, c-format +msgid "%s: missing required argument role name\n" +msgstr "%s: Gerekli bir argüman olan rol adı eksik\n" + +#: dropuser.c:127 +#, c-format +msgid "Role \"%s\" will be permanently removed.\n" +msgstr "\"%s\" rolü kalıcı olarak silinecektir.\n" + +#: dropuser.c:145 +#, c-format +msgid "%s: removal of role \"%s\" failed: %s" +msgstr "%s: \"%s\" rolün silinmesi başarısız oldu: %s" + +#: dropuser.c:160 +#, c-format +msgid "" +"%s removes a PostgreSQL role.\n" +"\n" +msgstr "" +"%s bir PostgreSQL rolünü siler.\n" +"\n" + +#: dropuser.c:165 +#, c-format +msgid "" +" -i, --interactive prompt before deleting anything, and prompt for\n" +" role name if not specified\n" +msgstr "" +" -i, --interactive herhangi birşeyi silmeden önce uyarı ver, ve\n" +" belirtilmemişse rol adının girilmesini iste\n" + +#: dropuser.c:168 +#, c-format +msgid " --if-exists don't report error if user doesn't exist\n" +msgstr " --if-exists kullanıcı mevcut değilse bildirimde bulunma\n" + +#: dropuser.c:173 +#, c-format +msgid " -U, --username=USERNAME user name to connect as (not the one to drop)\n" +msgstr " -U, --username=KULLANICI _ADI bağlanırken kullanılacak kullanıcı adı (silinecek olan değil)\n" + +#: pg_isready.c:142 +#, c-format +msgid "%s: %s" +msgstr "%s: %s" + +#: pg_isready.c:150 +#, c-format +msgid "%s: could not fetch default options\n" +msgstr "%s: varsayılan seçenekler getirilemedi\n" + +#: pg_isready.c:199 +#, c-format +msgid "accepting connections\n" +msgstr "bağlantılar kabul ediliyor\n" + +#: pg_isready.c:202 +#, c-format +msgid "rejecting connections\n" +msgstr "bağlantılar reddediliyor\n" + +#: pg_isready.c:205 +#, c-format +msgid "no response\n" +msgstr "cevap yok\n" + +#: pg_isready.c:208 +#, c-format +msgid "no attempt\n" +msgstr "deneme yok\n" + +#: pg_isready.c:211 +#, c-format +msgid "unknown\n" +msgstr "bilinmeyen\n" + +#: pg_isready.c:221 +#, c-format +msgid "" +"%s issues a connection check to a PostgreSQL database.\n" +"\n" +msgstr "" +"%s bir PostgreSQL veritabanına bağlantı kontrolü sağlar.\n" +"\n" + +#: pg_isready.c:223 +#, c-format +msgid " %s [OPTION]...\n" +msgstr " %s [SEÇENEK]...\n" + +#: pg_isready.c:226 +#, c-format +msgid " -d, --dbname=DBNAME database name\n" +msgstr " -d, --dbname=VERİTABANI_ADI veritabanı adı\n" + +#: pg_isready.c:227 +#, c-format +msgid " -q, --quiet run quietly\n" +msgstr " -q, --quiet sessizce çalış\n" + +#: pg_isready.c:228 +#, c-format +msgid " -V, --version output version information, then exit\n" +msgstr " -V, --version sürüm bilgisini gösterir ve sonra çıkar\n" + +#: pg_isready.c:229 +#, c-format +msgid " -?, --help show this help, then exit\n" +msgstr " -?, --help bu yardımı gösterir ve sonra çıkar\n" + +#: pg_isready.c:232 +#, c-format +msgid " -h, --host=HOSTNAME database server host or socket directory\n" +msgstr " -h, --host=HOSTNAME veritabanı sunucusu adresi ya da soket dizini\n" + +#: pg_isready.c:233 +#, c-format +msgid " -p, --port=PORT database server port\n" +msgstr " -p, --port=PORT veritabanı sunucusunun portu\n" + +#: pg_isready.c:234 +#, c-format +msgid " -t, --timeout=SECS seconds to wait when attempting connection, 0 disables (default: %s)\n" +msgstr " -t, --timeout=SANİYE bağlantı denenirken beklenecek saniye, 0 devre dışı bırakır (vrsayılan: %s)\n" + +#: pg_isready.c:235 +#, c-format +msgid " -U, --username=USERNAME user name to connect as\n" +msgstr " -U, --username=KULLANICI_ADI bağlanılacak kullanıcı adı\n" + +#: reindexdb.c:160 +#, c-format +msgid "%s: cannot reindex all databases and a specific one at the same time\n" +msgstr "%s: aynı anda hem tüm veritabanları hem belirli bir veritabanı reindex edilemez\n" + +#: reindexdb.c:165 +#, c-format +msgid "%s: cannot reindex all databases and system catalogs at the same time\n" +msgstr "%s: aynı anda hem tüm veritabanları hem de sistem kataloğu reindex edilemez\n" + +#: reindexdb.c:170 +#, c-format +msgid "%s: cannot reindex specific schema(s) in all databases\n" +msgstr "%s: tüm veritabanlarındaki belirli şema(lar) tekrar indekslenemez\n" + +#: reindexdb.c:175 +#, c-format +msgid "%s: cannot reindex specific table(s) in all databases\n" +msgstr "%s: tüm veritabanlarındaki belirli tablo(lar) tekrar indekslenemez\n" + +#: reindexdb.c:180 +#, c-format +msgid "%s: cannot reindex specific index(es) in all databases\n" +msgstr "%s: tüm veritabanlarındaki belirli ndeks(ler) tekrar indekslenemez\n" + +#: reindexdb.c:191 +#, c-format +msgid "%s: cannot reindex specific schema(s) and system catalogs at the same time\n" +msgstr "%s: aynı anda hem belirli şema(lar) hem de sistem kataloğu tekrar indekslenemez\n" + +#: reindexdb.c:196 +#, c-format +msgid "%s: cannot reindex specific table(s) and system catalogs at the same time\n" +msgstr "%s: aynı anda hem belirli tablo(lar) hem de sistem katalogları tekrar indekslenemez\n" + +#: reindexdb.c:201 +#, c-format +msgid "%s: cannot reindex specific index(es) and system catalogs at the same time\n" +msgstr "%s: aynı anda hem belirli indeks(ler) hem de sistem katalogları tekrar indekslenemez\n" + +#: reindexdb.c:307 +#, c-format +msgid "%s: reindexing of table \"%s\" in database \"%s\" failed: %s" +msgstr "%1$s: \"%3$s\" veritabanındaki \"%2$s\" tablosunun tekrar indeksleme işlemi başarısız: %4$s" + +#: reindexdb.c:310 +#, c-format +msgid "%s: reindexing of index \"%s\" in database \"%s\" failed: %s" +msgstr "%1$s: \"%3$s\" veritabanındaki \"%2$s\" indeksinin yeniden oluşturulması başarısız: %4$s" + +#: reindexdb.c:313 +#, c-format +msgid "%s: reindexing of schema \"%s\" in database \"%s\" failed: %s" +msgstr "%1$s: \"%3$s\" veritabanındaki \"%2$s\" şemasının tekrar indeksleme işlemi başarısız: %4$s" + +#: reindexdb.c:316 +#, c-format +msgid "%s: reindexing of database \"%s\" failed: %s" +msgstr "%s: \"%s\" veritabanının yeniden indekslenmesi başarısız oldu: %s" + +#: reindexdb.c:349 +#, c-format +msgid "%s: reindexing database \"%s\"\n" +msgstr "%s: \"%s\" veritabanı yeniden indeksleniyor\n" + +#: reindexdb.c:388 +#, c-format +msgid "%s: reindexing of system catalogs failed: %s" +msgstr "%s: sistem kataloğların yeniden indekslemesi başarısız: %s" + +#: reindexdb.c:400 +#, c-format +msgid "" +"%s reindexes a PostgreSQL database.\n" +"\n" +msgstr "" +"%s PostgreSQL veritabanını yeniden indeksler.\n" +"\n" + +#: reindexdb.c:404 +#, c-format +msgid " -a, --all reindex all databases\n" +msgstr " -a, --all tüm veritabanlarını yeniden indeksle\n" + +#: reindexdb.c:405 +#, c-format +msgid " -d, --dbname=DBNAME database to reindex\n" +msgstr " -d, --dbname=VERİTABANI_ADI yeniden indexlenecek veritabanı adı\n" + +#: reindexdb.c:407 +#, c-format +msgid " -i, --index=INDEX recreate specific index(es) only\n" +msgstr " -i, --index=INDEX sadece belirli indeks(ler)i yeniden oluştur\n" + +#: reindexdb.c:409 +#, c-format +msgid " -s, --system reindex system catalogs\n" +msgstr " -s, --system sistem kataloğunu yeniden indeksle\n" + +#: reindexdb.c:410 +#, c-format +msgid " -S, --schema=SCHEMA reindex specific schema(s) only\n" +msgstr " -S, --schema=ŞEMA sadece belirtilen şema veya şemaları tekrar indeksle\n" + +#: reindexdb.c:411 +#, c-format +msgid " -t, --table=TABLE reindex specific table(s) only\n" +msgstr " -t, --table=TABLO_ADI sadece belirli bir tablonun veya tabloların indekslerini yeniden oluştur\n" + +#: reindexdb.c:422 +#, c-format +msgid "" +"\n" +"Read the description of the SQL command REINDEX for details.\n" +msgstr "" +"\n" +"Ayrıntılar için bir REINDEX SQL komutunun açıklamasını okuyabilirsiniz.\n" + +#: vacuumdb.c:195 +#, c-format +msgid "%s: number of parallel jobs must be at least 1\n" +msgstr "%s: paralel iş sayısı en azından 1 olmalı\n" + +#: vacuumdb.c:201 +#, c-format +msgid "%s: too many parallel jobs requested (maximum: %d)\n" +msgstr "%s: çok fazla paralel iş talep edildi (azami: %d)\n" + +#: vacuumdb.c:240 vacuumdb.c:246 +#, c-format +msgid "%s: cannot use the \"%s\" option when performing only analyze\n" +msgstr "%s: sadece analyze işlemi yapılırken \"%s\" seçeneği kullanılamaz\n" + +#: vacuumdb.c:263 +#, c-format +msgid "%s: cannot vacuum all databases and a specific one at the same time\n" +msgstr "%s:Aynı anda tüm veritabanları ve de belirli bir tanesi vakumlanamaz\n" + +#: vacuumdb.c:269 +#, c-format +msgid "%s: cannot vacuum specific table(s) in all databases\n" +msgstr "%s: tüm veritabanlarındaki belirli bir tablo/tablolar vakumlanamaz.\n" + +#: vacuumdb.c:355 +msgid "Generating minimal optimizer statistics (1 target)" +msgstr "Minimal optimizer istatistikleri oluşturuluyor (1 hedef)" + +#: vacuumdb.c:356 +msgid "Generating medium optimizer statistics (10 targets)" +msgstr "Orta ölçekte optimizer istatistikleri oluşturuluyor (10 hedef)" + +#: vacuumdb.c:357 +msgid "Generating default (full) optimizer statistics" +msgstr "Varsayılan (tam) optimizer istatistikleri oluşturuluyor" + +#: vacuumdb.c:369 +#, c-format +msgid "%s: processing database \"%s\": %s\n" +msgstr "%s: \"%s\" veritabanı üzerinde işlem yapılıyor: %s\n" + +#: vacuumdb.c:372 +#, c-format +msgid "%s: vacuuming database \"%s\"\n" +msgstr "%s: \"%s\" veritabanı vakumlanıyor\n" + +#: vacuumdb.c:708 +#, c-format +msgid "%s: vacuuming of table \"%s\" in database \"%s\" failed: %s" +msgstr "%s: \"%s\" tablosunun (\"%s\" veritabanındaki) vakumlama işlemi başarısız oldu: %s" + +#: vacuumdb.c:711 vacuumdb.c:828 +#, c-format +msgid "%s: vacuuming of database \"%s\" failed: %s" +msgstr "%s: \"%s\" veritabanının vakumlanması başarısız oldu: %s" + +#: vacuumdb.c:942 +#, c-format +msgid "%s: invalid socket: %s" +msgstr "%s: geçersiz soket: %s" + +#: vacuumdb.c:951 +#, c-format +msgid "" +"%s cleans and analyzes a PostgreSQL database.\n" +"\n" +msgstr "" +"%s bir PostgreSQL veritabanını temizler ve analiz eder.\n" +"\n" + +#: vacuumdb.c:955 +#, c-format +msgid " -a, --all vacuum all databases\n" +msgstr " -a, --all tüm veritabanlarını vakumlar\n" + +#: vacuumdb.c:956 +#, c-format +msgid " -d, --dbname=DBNAME database to vacuum\n" +msgstr " -d, --dbname=VERİTABANI_ADI vakumlanacak veritabanı\n" + +#: vacuumdb.c:957 +#, c-format +msgid " -e, --echo show the commands being sent to the server\n" +msgstr " -e, --echo sunucuya gönderilen komutları yaz\n" + +#: vacuumdb.c:958 +#, c-format +msgid " -f, --full do full vacuuming\n" +msgstr " -f, --full tam (FULL) vakumlama yap\n" + +#: vacuumdb.c:959 +#, c-format +msgid " -F, --freeze freeze row transaction information\n" +msgstr " -F, --freeze Dondurulan satır transaction bilgisi\n" + +#: vacuumdb.c:960 +#, c-format +msgid " -j, --jobs=NUM use this many concurrent connections to vacuum\n" +msgstr " -j, --jobs=SAYI vakum için bu sayı kadar eşzamanlı bağlantı kullan \n" + +#: vacuumdb.c:961 +#, c-format +msgid " -q, --quiet don't write any messages\n" +msgstr " -q, --quiet hiçbir mesaj yazma\n" + +#: vacuumdb.c:962 +#, c-format +msgid " -t, --table='TABLE[(COLUMNS)]' vacuum specific table(s) only\n" +msgstr " -t, --table='TABLO[(KOLONLAR)]' sadece belirli bir tabloyu / tabloları vakumlar\n" + +#: vacuumdb.c:963 +#, c-format +msgid " -v, --verbose write a lot of output\n" +msgstr " -v, --verbose bolca çıktı yaz\n" + +#: vacuumdb.c:964 +#, c-format +msgid " -V, --version output version information, then exit\n" +msgstr " -V, --version sürüm bilgisini göster, sonra çık\n" + +#: vacuumdb.c:965 +#, c-format +msgid " -z, --analyze update optimizer statistics\n" +msgstr " -z, --analyze optimizer istatistiklerini güncelle\n" + +#: vacuumdb.c:966 +#, c-format +msgid " -Z, --analyze-only only update optimizer statistics; no vacuum\n" +msgstr " -z, --analyze-only sadece optimizer bilgilerini güncelle; vakum işlemi yok\n" + +#: vacuumdb.c:967 +#, c-format +msgid "" +" --analyze-in-stages only update optimizer statistics, in multiple\n" +" stages for faster results; no vacuum\n" +msgstr "" +" --analyze-in-stages sadece optimizer istatistiklerini güncelle, daha hızlı\n" +" sonuç için birden fazla aşamada; vakum işlemi yok\n" + +#: vacuumdb.c:969 +#, c-format +msgid " -?, --help show this help, then exit\n" +msgstr " -?, --help bu yardımı göster, sonrasında çık\n" + +#: vacuumdb.c:977 +#, c-format +msgid "" +"\n" +"Read the description of the SQL command VACUUM for details.\n" +msgstr "" +"\n" +"Ayrıntılar için, bir SQL komutu olan VACUUM'un tanımlarını okuyun.\n" + +#~ msgid "Could not send cancel request: %s\n" +#~ msgstr "İptal isteği gönderilemedi: %s\n" + +#~ msgid " -q, --quiet don't write any messages\n" +#~ msgstr " -q, --quiet Hiç bir mesaj yazma\n" + +#~ msgid "pg_strdup: cannot duplicate null pointer (internal error)\n" +#~ msgstr "pg_strdup: null pointer duplicate edilemiyor (iç hata)\n" + +#~ msgid "%s: out of memory\n" +#~ msgstr "%s: yetersiz bellek\n" + +#~ msgid "%s: could not get current user name: %s\n" +#~ msgstr "%s: geçerli kullanıcı adı alınamadı: %s\n" + +#~ msgid "%s: could not obtain information about current user: %s\n" +#~ msgstr "%s: geçerli kullanıcı hakkında bilgi alınamadı: %s\n" + +#~ msgid " --version output version information, then exit\n" +#~ msgstr " --version sürüm bilgisini göster ve çık\n" + +#~ msgid " --help show this help, then exit\n" +#~ msgstr " --help bu yardımı göster ve çık\n" + +#~ msgid "%s: cannot use the \"freeze\" option when performing only analyze\n" +#~ msgstr "%s: sadece analyze işlemi yapıldığında \"freeze\" seçeneğini kullanamaz\n" + +#~ msgid " -d, --dbname=DBNAME database from which to remove the language\n" +#~ msgstr " -d, --dbname=VERİTABANI_ADI dilin sileneceği veritabanının adı\n" + +#~ msgid "" +#~ "%s removes a procedural language from a database.\n" +#~ "\n" +#~ msgstr "" +#~ "%s veritabanından yordamsal bir dili siler.\n" +#~ "\n" + +#~ msgid "%s: language removal failed: %s" +#~ msgstr "%s: dil silme işlemi başarısız oldu: %s" + +#~ msgid "%s: still %s functions declared in language \"%s\"; language not removed\n" +#~ msgstr "%s: %s fonksiyon, \"%s\" dilinde tanımlanmış durumda; dil kaldırılamadı\n" + +#~ msgid "%s: language \"%s\" is not installed in database \"%s\"\n" +#~ msgstr "%s: \"%s\" dili \"%s\" veritabanında kurulu değil \n" + +#~ msgid "" +#~ "\n" +#~ "If one of -d, -D, -r, -R, -s, -S, and ROLENAME is not specified, you will\n" +#~ "be prompted interactively.\n" +#~ msgstr "" +#~ "\n" +#~ "Eğer -d, -D, -r, -R, -s, -S ve ROLENAME'den birisi belirtilmezse, bunlar size\n" +#~ "etkileşimli olarak sorulacaktır.\n" + +#~ msgid " -N, --unencrypted do not encrypt stored password\n" +#~ msgstr " -N, --unencrypted saklanmış şifreyi kriptolamaz\n" + +#~ msgid " -E, --encrypted encrypt stored password\n" +#~ msgstr " -E, --encrypted saklanan şifreleri encrypt eder\n" + +#~ msgid " --version output version information, then exit\n" +#~ msgstr " --version sürüm bilgisini göster ve çık\n" + +#~ msgid " --help show this help, then exit\n" +#~ msgstr " --help bu yardımı göster ve çık\n" + +#~ msgid " -l, --list show a list of currently installed languages\n" +#~ msgstr " -l, --list Şu anda kurulu olan dilleri göster\n" + +#~ msgid " -d, --dbname=DBNAME database to install language in\n" +#~ msgstr " -d, --dbname=VERİTABANI_ADI dilin kurulacağı veritabanının adı\n" + +#~ msgid " %s [OPTION]... LANGNAME [DBNAME]\n" +#~ msgstr " %s [SEÇENEK]... DİL_ADI [VERİTABANI_ADI]\n" + +#~ msgid "" +#~ "%s installs a procedural language into a PostgreSQL database.\n" +#~ "\n" +#~ msgstr "" +#~ "%s Bir PostgreSQL veritabanına yordamsal bir dil kurar.\n" +#~ "\n" + +#~ msgid "%s: language installation failed: %s" +#~ msgstr "%s: Dil kurulumu başarısız oldu: %s" + +#~ msgid "%s: language \"%s\" is already installed in database \"%s\"\n" +#~ msgstr "%s: \"%s\" dili daha önceden veritabanına yüklenmiştir \"%s\"\n" + +#~ msgid "Procedural Languages" +#~ msgstr "Yordamsal Diller" + +#~ msgid "Trusted?" +#~ msgstr "Güvenilir mi?" + +#~ msgid "no" +#~ msgstr "hayır" + +#~ msgid "yes" +#~ msgstr "evet" + +#~ msgid "Name" +#~ msgstr "Adı" + +#~ msgid " --version output version information, then exit\n" +#~ msgstr " --version sürüm bilgisini göster ve çık\n" + +#~ msgid " --help show this help, then exit\n" +#~ msgstr " --help bu yardımı göster ve çık\n" diff --git a/src/bin/scripts/reindexdb.c b/src/bin/scripts/reindexdb.c index ffd611e7bb..be1c06ebbd 100644 --- a/src/bin/scripts/reindexdb.c +++ b/src/bin/scripts/reindexdb.c @@ -2,7 +2,7 @@ * * reindexdb * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/bin/scripts/reindexdb.c * @@ -282,23 +282,24 @@ reindex_one_database(const char *name, const char *dbname, const char *type, PGconn *conn; conn = connectDatabase(dbname, host, port, username, prompt_password, - progname, false, false); + progname, echo, false, false); initPQExpBuffer(&sql); - appendPQExpBufferStr(&sql, "REINDEX"); + appendPQExpBufferStr(&sql, "REINDEX "); if (verbose) - appendPQExpBufferStr(&sql, " (VERBOSE)"); + appendPQExpBufferStr(&sql, "(VERBOSE) "); - if (strcmp(type, "TABLE") == 0) - appendPQExpBuffer(&sql, " TABLE %s", name); - else if (strcmp(type, "INDEX") == 0) - appendPQExpBuffer(&sql, " INDEX %s", name); + appendPQExpBufferStr(&sql, type); + appendPQExpBufferChar(&sql, ' '); + if (strcmp(type, "TABLE") == 0 || + strcmp(type, "INDEX") == 0) + appendQualifiedRelation(&sql, name, conn, progname, echo); else if (strcmp(type, "SCHEMA") == 0) - appendPQExpBuffer(&sql, " SCHEMA %s", name); + appendPQExpBufferStr(&sql, name); else if (strcmp(type, "DATABASE") == 0) - appendPQExpBuffer(&sql, " DATABASE %s", fmtId(PQdb(conn))); + appendPQExpBufferStr(&sql, fmtId(PQdb(conn))); appendPQExpBufferChar(&sql, ';'); if (!executeMaintenanceCommand(conn, sql.data, echo)) @@ -335,7 +336,7 @@ reindex_all_databases(const char *maintenance_db, int i; conn = connectMaintenanceDatabase(maintenance_db, host, port, username, - prompt_password, progname); + prompt_password, progname, echo); result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", progname, echo); PQfinish(conn); @@ -372,7 +373,7 @@ reindex_system_catalogs(const char *dbname, const char *host, const char *port, PQExpBufferData sql; conn = connectDatabase(dbname, host, port, username, prompt_password, - progname, false, false); + progname, echo, false, false); initPQExpBuffer(&sql); diff --git a/src/bin/scripts/t/010_clusterdb.pl b/src/bin/scripts/t/010_clusterdb.pl index e2cff0fcab..ba093fa3a7 100644 --- a/src/bin/scripts/t/010_clusterdb.pl +++ b/src/bin/scripts/t/010_clusterdb.pl @@ -22,11 +22,11 @@ 'fails with nonexistent table'); $node->safe_psql('postgres', -'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x' + 'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x' ); $node->issues_sql_like( [ 'clusterdb', '-t', 'test1' ], - qr/statement: CLUSTER test1;/, + qr/statement: CLUSTER public\.test1;/, 'cluster specific table'); $node->command_ok([qw(clusterdb --echo --verbose dbname=template1)], diff --git a/src/bin/scripts/t/040_createuser.pl b/src/bin/scripts/t/040_createuser.pl index f4fc7ea3a4..916d925947 100644 --- a/src/bin/scripts/t/040_createuser.pl +++ b/src/bin/scripts/t/040_createuser.pl @@ -15,19 +15,19 @@ $node->issues_sql_like( [ 'createuser', 'regress_user1' ], -qr/statement: CREATE ROLE regress_user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN;/, + qr/statement: CREATE ROLE regress_user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN;/, 'SQL CREATE USER run'); $node->issues_sql_like( [ 'createuser', '-L', 'regress_role1' ], -qr/statement: CREATE ROLE regress_role1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN;/, + qr/statement: CREATE ROLE regress_role1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN;/, 'create a non-login role'); $node->issues_sql_like( [ 'createuser', '-r', 'regress_user2' ], -qr/statement: CREATE ROLE regress_user2 NOSUPERUSER NOCREATEDB CREATEROLE INHERIT LOGIN;/, + qr/statement: CREATE ROLE regress_user2 NOSUPERUSER NOCREATEDB CREATEROLE INHERIT LOGIN;/, 'create a CREATEROLE user'); $node->issues_sql_like( [ 'createuser', '-s', 'regress_user3' ], -qr/statement: CREATE ROLE regress_user3 SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN;/, + qr/statement: CREATE ROLE regress_user3 SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN;/, 'create a superuser'); $node->command_fails([ 'createuser', 'regress_user1' ], diff --git a/src/bin/scripts/t/080_pg_isready.pl b/src/bin/scripts/t/080_pg_isready.pl index d9830b5b3a..6da89e1b04 100644 --- a/src/bin/scripts/t/080_pg_isready.pl +++ b/src/bin/scripts/t/080_pg_isready.pl @@ -15,4 +15,6 @@ $node->init; $node->start; -$node->command_ok(['pg_isready'], 'succeeds with server running'); +# use a long timeout for the benefit of very slow buildfarm machines +$node->command_ok([qw(pg_isready --timeout=60)], + 'succeeds with server running'); diff --git a/src/bin/scripts/t/090_reindexdb.pl b/src/bin/scripts/t/090_reindexdb.pl index 3aa3a95350..e57a5e2bad 100644 --- a/src/bin/scripts/t/090_reindexdb.pl +++ b/src/bin/scripts/t/090_reindexdb.pl @@ -24,11 +24,11 @@ 'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a);'); $node->issues_sql_like( [ 'reindexdb', '-t', 'test1', 'postgres' ], - qr/statement: REINDEX TABLE test1;/, + qr/statement: REINDEX TABLE public\.test1;/, 'reindex specific table'); $node->issues_sql_like( [ 'reindexdb', '-i', 'test1x', 'postgres' ], - qr/statement: REINDEX INDEX test1x;/, + qr/statement: REINDEX INDEX public\.test1x;/, 'reindex specific index'); $node->issues_sql_like( [ 'reindexdb', '-S', 'pg_catalog', 'postgres' ], @@ -40,7 +40,7 @@ 'reindex system tables'); $node->issues_sql_like( [ 'reindexdb', '-v', '-t', 'test1', 'postgres' ], - qr/statement: REINDEX \(VERBOSE\) TABLE test1;/, + qr/statement: REINDEX \(VERBOSE\) TABLE public\.test1;/, 'reindex with verbose output'); $node->command_ok([qw(reindexdb --echo --table=pg_am dbname=template1)], diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl index dd98df8c08..4c477a27aa 100644 --- a/src/bin/scripts/t/100_vacuumdb.pl +++ b/src/bin/scripts/t/100_vacuumdb.pl @@ -3,7 +3,7 @@ use PostgresNode; use TestLib; -use Test::More tests => 19; +use Test::More tests => 23; program_help_ok('vacuumdb'); program_version_ok('vacuumdb'); @@ -26,12 +26,37 @@ qr/statement: VACUUM \(FREEZE\);/, 'vacuumdb -F'); $node->issues_sql_like( - [ 'vacuumdb', '-z', 'postgres' ], - qr/statement: VACUUM \(ANALYZE\);/, - 'vacuumdb -z'); + [ 'vacuumdb', '-zj2', 'postgres' ], + qr/statement: VACUUM \(ANALYZE\) pg_catalog\./, + 'vacuumdb -zj2'); $node->issues_sql_like( [ 'vacuumdb', '-Z', 'postgres' ], qr/statement: ANALYZE;/, 'vacuumdb -Z'); $node->command_ok([qw(vacuumdb -Z --table=pg_am dbname=template1)], 'vacuumdb with connection string'); + +$node->command_fails( + [qw(vacuumdb -Zt pg_am;ABORT postgres)], + 'trailing command in "-t", without COLUMNS'); + +# Unwanted; better if it failed. +$node->command_ok( + [qw(vacuumdb -Zt pg_am(amname);ABORT postgres)], + 'trailing command in "-t", with COLUMNS'); + +$node->safe_psql( + 'postgres', q| + CREATE TABLE "need""q(uot" (")x" text); + + CREATE FUNCTION f0(int) RETURNS int LANGUAGE SQL AS 'SELECT $1 * $1'; + CREATE FUNCTION f1(int) RETURNS int LANGUAGE SQL AS 'SELECT f0($1)'; + CREATE TABLE funcidx (x int); + INSERT INTO funcidx VALUES (0),(1),(2),(3); + CREATE INDEX i0 ON funcidx ((f1(x))); +|); +$node->command_ok([qw|vacuumdb -Z --table="need""q(uot"(")x") postgres|], + 'column list'); +$node->command_fails( + [qw|vacuumdb -Zt funcidx postgres|], + 'unqualifed name via functional index'); diff --git a/src/bin/scripts/t/102_vacuumdb_stages.pl b/src/bin/scripts/t/102_vacuumdb_stages.pl index 1300aa7905..17a7fc720d 100644 --- a/src/bin/scripts/t/102_vacuumdb_stages.pl +++ b/src/bin/scripts/t/102_vacuumdb_stages.pl @@ -10,26 +10,26 @@ $node->issues_sql_like( [ 'vacuumdb', '--analyze-in-stages', 'postgres' ], -qr/.*statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0; - .*statement:\ ANALYZE.* + qr/statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0; + .*statement:\ ANALYZE .*statement:\ SET\ default_statistics_target=10;\ RESET\ vacuum_cost_delay; - .*statement:\ ANALYZE.* + .*statement:\ ANALYZE .*statement:\ RESET\ default_statistics_target; .*statement:\ ANALYZE/sx, 'analyze three times'); $node->issues_sql_like( [ 'vacuumdb', '--analyze-in-stages', '--all' ], -qr/.*statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0; - .*statement:\ ANALYZE.* + qr/statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0; + .*statement:\ ANALYZE .*statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0; - .*statement:\ ANALYZE.* + .*statement:\ ANALYZE .*statement:\ SET\ default_statistics_target=10;\ RESET\ vacuum_cost_delay; - .*statement:\ ANALYZE.* + .*statement:\ ANALYZE .*statement:\ SET\ default_statistics_target=10;\ RESET\ vacuum_cost_delay; - .*statement:\ ANALYZE.* + .*statement:\ ANALYZE .*statement:\ RESET\ default_statistics_target; - .*statement:\ ANALYZE.* + .*statement:\ ANALYZE .*statement:\ RESET\ default_statistics_target; .*statement:\ ANALYZE/sx, 'analyze more than one database in stages'); diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c index 5d2869ea6b..bcea9e556d 100644 --- a/src/bin/scripts/vacuumdb.c +++ b/src/bin/scripts/vacuumdb.c @@ -2,7 +2,7 @@ * * vacuumdb * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/bin/scripts/vacuumdb.c @@ -16,7 +16,7 @@ #include #endif -#include "catalog/pg_class.h" +#include "catalog/pg_class_d.h" #include "common.h" #include "fe_utils/simple_list.h" @@ -28,9 +28,8 @@ /* Parallel vacuuming stuff */ typedef struct ParallelSlot { - PGconn *connection; - pgsocket sock; - bool isFree; + PGconn *connection; /* One connection */ + bool isFree; /* Is it known to be idle? */ } ParallelSlot; /* vacuum options controlled by user flags */ @@ -61,7 +60,9 @@ static void vacuum_all_databases(vacuumingOptions *vacopts, const char *progname, bool echo, bool quiet); static void prepare_vacuum_command(PQExpBuffer sql, PGconn *conn, - vacuumingOptions *vacopts, const char *table); + vacuumingOptions *vacopts, const char *table, + bool table_pre_qualified, + const char *progname, bool echo); static void run_vacuum_command(PGconn *conn, const char *sql, bool echo, const char *table, const char *progname, bool async); @@ -69,13 +70,16 @@ static void run_vacuum_command(PGconn *conn, const char *sql, bool echo, static ParallelSlot *GetIdleSlot(ParallelSlot slots[], int numslots, const char *progname); +static bool ProcessQueryResult(PGconn *conn, PGresult *result, + const char *progname); + static bool GetQueryResult(PGconn *conn, const char *progname); static void DisconnectDatabase(ParallelSlot *slot); static int select_loop(int maxFd, fd_set *workerset, bool *aborting); -static void init_slot(ParallelSlot *slot, PGconn *conn, const char *progname); +static void init_slot(ParallelSlot *slot, PGconn *conn); static void help(const char *progname); @@ -341,7 +345,7 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, PQExpBufferData sql; PGconn *conn; SimpleStringListCell *cell; - ParallelSlot *slots = NULL; + ParallelSlot *slots; SimpleStringList dbtables = {NULL, NULL}; int i; bool failed = false; @@ -361,13 +365,13 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, (stage >= 0 && stage < ANALYZE_NUM_STAGES)); conn = connectDatabase(dbname, host, port, username, prompt_password, - progname, false, true); + progname, echo, false, true); if (!quiet) { if (stage != ANALYZE_NO_STAGE) printf(_("%s: processing database \"%s\": %s\n"), - progname, PQdb(conn), stage_messages[stage]); + progname, PQdb(conn), _(stage_messages[stage])); else printf(_("%s: vacuuming database \"%s\"\n"), progname, PQdb(conn)); @@ -385,7 +389,6 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, PQExpBufferData buf; PGresult *res; int ntups; - int i; initPQExpBuffer(&buf); @@ -403,8 +406,7 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, for (i = 0; i < ntups; i++) { appendPQExpBufferStr(&buf, - fmtQualifiedId(PQserverVersion(conn), - PQgetvalue(res, i, 1), + fmtQualifiedId(PQgetvalue(res, i, 1), PQgetvalue(res, i, 0))); simple_string_list_append(&dbtables, buf.data); @@ -430,15 +432,17 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, * for the first slot. If not in parallel mode, the first slot in the * array contains the connection. */ + if (concurrentCons <= 0) + concurrentCons = 1; slots = (ParallelSlot *) pg_malloc(sizeof(ParallelSlot) * concurrentCons); - init_slot(slots, conn, progname); + init_slot(slots, conn); if (parallel) { for (i = 1; i < concurrentCons; i++) { conn = connectDatabase(dbname, host, port, username, prompt_password, - progname, false, true); - init_slot(slots + i, conn, progname); + progname, echo, false, true); + init_slot(slots + i, conn); } } @@ -460,10 +464,8 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, cell = tables ? tables->head : NULL; do { - ParallelSlot *free_slot; const char *tabname = cell ? cell->val : NULL; - - prepare_vacuum_command(&sql, conn, vacopts, tabname); + ParallelSlot *free_slot; if (CancelRequested) { @@ -495,10 +497,17 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, else free_slot = slots; + /* + * Prepare the vacuum command. Note that in some cases this requires + * query execution, so be sure to use the free connection. + */ + prepare_vacuum_command(&sql, free_slot->connection, vacopts, tabname, + tables == &dbtables, progname, echo); + /* * Execute the vacuum. If not in parallel mode, this terminates the * program in case of an error. (The parallel case handles query - * errors in GetQueryResult through GetIdleSlot.) + * errors in ProcessQueryResult through GetIdleSlot.) */ run_vacuum_command(free_slot->connection, sql.data, echo, tabname, progname, parallel); @@ -511,13 +520,11 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, { int j; + /* wait for all connections to finish */ for (j = 0; j < concurrentCons; j++) { - /* wait for all connection to return the results */ if (!GetQueryResult((slots + j)->connection, progname)) goto finish; - - (slots + j)->isFree = true; } } @@ -554,8 +561,8 @@ vacuum_all_databases(vacuumingOptions *vacopts, int stage; int i; - conn = connectMaintenanceDatabase(maintenance_db, host, port, - username, prompt_password, progname); + conn = connectMaintenanceDatabase(maintenance_db, host, port, username, + prompt_password, progname, echo); result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", progname, echo); @@ -618,8 +625,10 @@ vacuum_all_databases(vacuumingOptions *vacopts, * quoted. The command is semicolon-terminated. */ static void -prepare_vacuum_command(PQExpBuffer sql, PGconn *conn, vacuumingOptions *vacopts, - const char *table) +prepare_vacuum_command(PQExpBuffer sql, PGconn *conn, + vacuumingOptions *vacopts, const char *table, + bool table_pre_qualified, + const char *progname, bool echo) { resetPQExpBuffer(sql); @@ -675,12 +684,19 @@ prepare_vacuum_command(PQExpBuffer sql, PGconn *conn, vacuumingOptions *vacopts, } if (table) - appendPQExpBuffer(sql, " %s", table); + { + appendPQExpBufferChar(sql, ' '); + if (table_pre_qualified) + appendPQExpBufferStr(sql, table); + else + appendQualifiedRelation(sql, table, conn, progname, echo); + } appendPQExpBufferChar(sql, ';'); } /* - * Execute a vacuum/analyze command to the server. + * Send a vacuum/analyze command to the server. In async mode, return after + * sending the command; else, wait for it to finish. * * Any errors during command execution are reported to stderr. If async is * false, this function exits the program after reporting the error. @@ -728,10 +744,6 @@ run_vacuum_command(PGconn *conn, const char *sql, bool echo, * this happens, we read the whole set and mark as free all sockets that become * available. * - * Process the slot list, if any free slot is available then return the slotid - * else perform the select on all the socket's and wait until at least one slot - * becomes available. - * * If an error occurs, NULL is returned. */ static ParallelSlot * @@ -740,31 +752,43 @@ GetIdleSlot(ParallelSlot slots[], int numslots, { int i; int firstFree = -1; - fd_set slotset; - pgsocket maxFd; - for (i = 0; i < numslots; i++) - if ((slots + i)->isFree) - return slots + i; - - FD_ZERO(&slotset); - - maxFd = slots->sock; + /* Any connection already known free? */ for (i = 0; i < numslots; i++) { - FD_SET((slots + i)->sock, &slotset); - if ((slots + i)->sock > maxFd) - maxFd = (slots + i)->sock; + if (slots[i].isFree) + return slots + i; } /* * No free slot found, so wait until one of the connections has finished * its task and return the available slot. */ - for (firstFree = -1; firstFree < 0;) + while (firstFree < 0) { + fd_set slotset; + int maxFd = 0; bool aborting; + /* We must reconstruct the fd_set for each call to select_loop */ + FD_ZERO(&slotset); + + for (i = 0; i < numslots; i++) + { + int sock = PQsocket(slots[i].connection); + + /* + * We don't really expect any connections to lose their sockets + * after startup, but just in case, cope by ignoring them. + */ + if (sock < 0) + continue; + + FD_SET(sock, &slotset); + if (sock > maxFd) + maxFd = sock; + } + SetCancelConn(slots->connection); i = select_loop(maxFd, &slotset, &aborting); ResetCancelConn(); @@ -782,64 +806,93 @@ GetIdleSlot(ParallelSlot slots[], int numslots, for (i = 0; i < numslots; i++) { - if (!FD_ISSET((slots + i)->sock, &slotset)) - continue; + int sock = PQsocket(slots[i].connection); - PQconsumeInput((slots + i)->connection); - if (PQisBusy((slots + i)->connection)) - continue; - - (slots + i)->isFree = true; + if (sock >= 0 && FD_ISSET(sock, &slotset)) + { + /* select() says input is available, so consume it */ + PQconsumeInput(slots[i].connection); + } - if (!GetQueryResult((slots + i)->connection, progname)) - return NULL; + /* Collect result(s) as long as any are available */ + while (!PQisBusy(slots[i].connection)) + { + PGresult *result = PQgetResult(slots[i].connection); - if (firstFree < 0) - firstFree = i; + if (result != NULL) + { + /* Check and discard the command result */ + if (!ProcessQueryResult(slots[i].connection, result, + progname)) + return NULL; + } + else + { + /* This connection has become idle */ + slots[i].isFree = true; + if (firstFree < 0) + firstFree = i; + break; + } + } } } return slots + firstFree; } +/* + * ProcessQueryResult + * + * Process (and delete) a query result. Returns true if there's no error, + * false otherwise -- but errors about trying to vacuum a missing relation + * are reported and subsequently ignored. + */ +static bool +ProcessQueryResult(PGconn *conn, PGresult *result, const char *progname) +{ + /* + * If it's an error, report it. Errors about a missing table are harmless + * so we continue processing; but die for other errors. + */ + if (PQresultStatus(result) != PGRES_COMMAND_OK) + { + char *sqlState = PQresultErrorField(result, PG_DIAG_SQLSTATE); + + fprintf(stderr, _("%s: vacuuming of database \"%s\" failed: %s"), + progname, PQdb(conn), PQerrorMessage(conn)); + + if (sqlState && strcmp(sqlState, ERRCODE_UNDEFINED_TABLE) != 0) + { + PQclear(result); + return false; + } + } + + PQclear(result); + return true; +} + /* * GetQueryResult * - * Process the query result. Returns true if there's no error, false - * otherwise -- but errors about trying to vacuum a missing relation are - * reported and subsequently ignored. + * Pump the conn till it's dry of results; return false if any are errors. + * Note that this will block if the conn is busy. */ static bool GetQueryResult(PGconn *conn, const char *progname) { + bool ok = true; PGresult *result; SetCancelConn(conn); while ((result = PQgetResult(conn)) != NULL) { - /* - * If errors are found, report them. Errors about a missing table are - * harmless so we continue processing; but die for other errors. - */ - if (PQresultStatus(result) != PGRES_COMMAND_OK) - { - char *sqlState = PQresultErrorField(result, PG_DIAG_SQLSTATE); - - fprintf(stderr, _("%s: vacuuming of database \"%s\" failed: %s"), - progname, PQdb(conn), PQerrorMessage(conn)); - - if (sqlState && strcmp(sqlState, ERRCODE_UNDEFINED_TABLE) != 0) - { - PQclear(result); - return false; - } - } - - PQclear(result); + if (!ProcessQueryResult(conn, result, progname)) + ok = false; } ResetCancelConn(); - - return true; + return ok; } /* @@ -931,18 +984,11 @@ select_loop(int maxFd, fd_set *workerset, bool *aborting) } static void -init_slot(ParallelSlot *slot, PGconn *conn, const char *progname) +init_slot(ParallelSlot *slot, PGconn *conn) { slot->connection = conn; + /* Initially assume connection is idle */ slot->isFree = true; - slot->sock = PQsocket(conn); - - if (slot->sock < 0) - { - fprintf(stderr, _("%s: invalid socket: %s"), progname, - PQerrorMessage(conn)); - exit(1); - } } static void diff --git a/src/common/Makefile b/src/common/Makefile index 80e78d72fe..ec8139f014 100644 --- a/src/common/Makefile +++ b/src/common/Makefile @@ -3,17 +3,21 @@ # Makefile # Makefile for src/common # -# This makefile generates two outputs: +# These files are used by the Postgres backend, and also by frontend +# programs. These files provide common functionality that isn't directly +# concerned with portability and thus doesn't belong in src/port. +# +# This makefile generates three outputs: # # libpgcommon.a - contains object files with FRONTEND defined, # for use by client applications # -# libpgcommon_srv.a - contains object files without FRONTEND defined, -# for use only by the backend binaries +# libpgcommon_shlib.a - contains object files with FRONTEND defined, +# built suitably for use in shared libraries; for use +# by frontend libraries # -# You can also symlink/copy individual source files from this directory, -# to compile with different options. (libpq does that, because it needs -# to use -fPIC on some platforms.) +# libpgcommon_srv.a - contains object files without FRONTEND defined, +# for use only by the backend # # IDENTIFICATION # src/common/Makefile @@ -24,12 +28,9 @@ subdir = src/common top_builddir = ../.. include $(top_builddir)/src/Makefile.global -override CPPFLAGS := -DFRONTEND $(CPPFLAGS) -LIBS += $(PTHREAD_LIBS) - # don't include subdirectory-path-dependent -I and -L switches STD_CPPFLAGS := $(filter-out -I$(top_srcdir)/src/include -I$(top_builddir)/src/include,$(CPPFLAGS)) -STD_LDFLAGS := $(filter-out -L$(top_builddir)/src/port,$(LDFLAGS)) +STD_LDFLAGS := $(filter-out -L$(top_builddir)/src/common -L$(top_builddir)/src/port,$(LDFLAGS)) override CPPFLAGS += -DVAL_CONFIGURE="\"$(configure_args)\"" override CPPFLAGS += -DVAL_CC="\"$(CC)\"" override CPPFLAGS += -DVAL_CPPFLAGS="\"$(STD_CPPFLAGS)\"" @@ -40,8 +41,12 @@ override CPPFLAGS += -DVAL_LDFLAGS_EX="\"$(LDFLAGS_EX)\"" override CPPFLAGS += -DVAL_LDFLAGS_SL="\"$(LDFLAGS_SL)\"" override CPPFLAGS += -DVAL_LIBS="\"$(LIBS)\"" -OBJS_COMMON = base64.o config_info.o controldata_utils.o exec.o ip.o \ - keywords.o md5.o pg_lzcompress.o pgfnames.o psprintf.o relpath.o \ +override CPPFLAGS := -DFRONTEND $(CPPFLAGS) +LIBS += $(PTHREAD_LIBS) + +OBJS_COMMON = base64.o config_info.o controldata_utils.o exec.o file_perm.o \ + ip.o keywords.o link-canary.o md5.o pg_lzcompress.o \ + pgfnames.o psprintf.o relpath.o \ rmtree.o saslprep.o scram-common.o string.o unicode_norm.o \ username.o wait_error.o @@ -51,26 +56,48 @@ else OBJS_COMMON += sha2.o endif +# A few files are currently only built for frontend, not server OBJS_FRONTEND = $(OBJS_COMMON) fe_memutils.o file_utils.o restricted_token.o +# foo.o, foo_shlib.o, and foo_srv.o are all built from foo.c +OBJS_SHLIB = $(OBJS_FRONTEND:%.o=%_shlib.o) OBJS_SRV = $(OBJS_COMMON:%.o=%_srv.o) -all: libpgcommon.a libpgcommon_srv.a +all: libpgcommon.a libpgcommon_shlib.a libpgcommon_srv.a # libpgcommon is needed by some contrib install: all installdirs $(INSTALL_STLIB) libpgcommon.a '$(DESTDIR)$(libdir)/libpgcommon.a' + $(INSTALL_STLIB) libpgcommon_shlib.a '$(DESTDIR)$(libdir)/libpgcommon_shlib.a' installdirs: $(MKDIR_P) '$(DESTDIR)$(libdir)' uninstall: rm -f '$(DESTDIR)$(libdir)/libpgcommon.a' + rm -f '$(DESTDIR)$(libdir)/libpgcommon_shlib.a' libpgcommon.a: $(OBJS_FRONTEND) rm -f $@ $(AR) $(AROPT) $@ $^ +# +# Shared library versions of object files +# + +libpgcommon_shlib.a: $(OBJS_SHLIB) + rm -f $@ + $(AR) $(AROPT) $@ $^ + +# Because this uses its own compilation rule, it doesn't use the +# dependency tracking logic from Makefile.global. To make sure that +# dependency tracking works anyway for the *_shlib.o files, depend on +# their *.o siblings as well, which do have proper dependencies. It's +# a hack that might fail someday if there is a *_shlib.o without a +# corresponding *.o, but there seems little reason for that. +%_shlib.o: %.c %.o + $(CC) $(CFLAGS) $(CFLAGS_SL) $(CPPFLAGS) -c $< -o $@ + # # Server versions of object files # @@ -86,23 +113,18 @@ libpgcommon_srv.a: $(OBJS_SRV) # a hack that might fail someday if there is a *_srv.o without a # corresponding *.o, but it works for now. %_srv.o: %.c %.o - $(CC) $(CFLAGS) $(subst -DFRONTEND ,, $(CPPFLAGS)) -c $< -o $@ - -$(OBJS_SRV): | submake-errcodes - -.PHONY: submake-errcodes - -submake-errcodes: - $(MAKE) -C ../backend submake-errcodes + $(CC) $(CFLAGS) $(subst -DFRONTEND,, $(CPPFLAGS)) -c $< -o $@ # Dependencies of keywords.o need to be managed explicitly to make sure # that you don't get broken parsing code, even in a non-enable-depend build. -# Note that gram.h isn't required for the frontend version of keywords.o. +# Note that gram.h isn't required for the frontend versions of keywords.o. $(top_builddir)/src/include/parser/gram.h: $(top_srcdir)/src/backend/parser/gram.y $(MAKE) -C $(top_builddir)/src/backend $(top_builddir)/src/include/parser/gram.h keywords.o: $(top_srcdir)/src/include/parser/kwlist.h +keywords_shlib.o: $(top_srcdir)/src/include/parser/kwlist.h keywords_srv.o: $(top_builddir)/src/include/parser/gram.h $(top_srcdir)/src/include/parser/kwlist.h clean distclean maintainer-clean: - rm -f libpgcommon.a libpgcommon_srv.a $(OBJS_FRONTEND) $(OBJS_SRV) + rm -f libpgcommon.a libpgcommon_shlib.a libpgcommon_srv.a + rm -f $(OBJS_FRONTEND) $(OBJS_SHLIB) $(OBJS_SRV) diff --git a/src/common/base64.c b/src/common/base64.c index e8e28ecca4..c6fde2a8dd 100644 --- a/src/common/base64.c +++ b/src/common/base64.c @@ -3,7 +3,7 @@ * base64.c * Encoding and decoding routines for base64 without whitespace. * - * Copyright (c) 2001-2017, PostgreSQL Global Development Group + * Copyright (c) 2001-2018, PostgreSQL Global Development Group * * * IDENTIFICATION diff --git a/src/common/config_info.c b/src/common/config_info.c index e0841a5af2..55e688e656 100644 --- a/src/common/config_info.c +++ b/src/common/config_info.c @@ -4,7 +4,7 @@ * Common code for pg_config output * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/common/controldata_utils.c b/src/common/controldata_utils.c index f1a097a974..e24af48f52 100644 --- a/src/common/controldata_utils.c +++ b/src/common/controldata_utils.c @@ -4,7 +4,7 @@ * Common code for control data file output. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -44,6 +44,7 @@ get_controlfile(const char *DataDir, const char *progname, bool *crc_ok_p) int fd; char ControlFilePath[MAXPGPATH]; pg_crc32c crc; + int r; AssertArg(crc_ok_p); @@ -64,18 +65,35 @@ get_controlfile(const char *DataDir, const char *progname, bool *crc_ok_p) } #endif - if (read(fd, ControlFile, sizeof(ControlFileData)) != sizeof(ControlFileData)) + r = read(fd, ControlFile, sizeof(ControlFileData)); + if (r != sizeof(ControlFileData)) + { + if (r < 0) #ifndef FRONTEND - ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not read file \"%s\": %m", ControlFilePath))); + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read file \"%s\": %m", ControlFilePath))); #else - { - fprintf(stderr, _("%s: could not read file \"%s\": %s\n"), - progname, ControlFilePath, strerror(errno)); - exit(EXIT_FAILURE); - } + { + fprintf(stderr, _("%s: could not read file \"%s\": %s\n"), + progname, ControlFilePath, strerror(errno)); + exit(EXIT_FAILURE); + } #endif + else +#ifndef FRONTEND + ereport(ERROR, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("could not read file \"%s\": read %d of %zu", + ControlFilePath, r, sizeof(ControlFileData)))); +#else + { + fprintf(stderr, _("%s: could not read file \"%s\": read %d of %zu\n"), + progname, ControlFilePath, r, sizeof(ControlFileData)); + exit(EXIT_FAILURE); + } +#endif + } close(fd); diff --git a/src/common/exec.c b/src/common/exec.c index 67bf4d1d79..410dc2df45 100644 --- a/src/common/exec.c +++ b/src/common/exec.c @@ -4,7 +4,7 @@ * Functions for finding and validating executable files * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -25,14 +25,24 @@ #include #include +/* + * Hacky solution to allow expressing both frontend and backend error reports + * in one macro call. First argument of log_error is an errcode() call of + * some sort (ignored if FRONTEND); the rest are errmsg_internal() arguments, + * i.e. message string and any parameters for it. + * + * Caller must provide the gettext wrapper around the message string, if + * appropriate, so that it gets translated in the FRONTEND case; this + * motivates using errmsg_internal() not errmsg(). We handle appending a + * newline, if needed, inside the macro, so that there's only one translatable + * string per call not two. + */ #ifndef FRONTEND -/* We use only 3- and 4-parameter elog calls in this file, for simplicity */ -/* NOTE: caller must provide gettext call around str! */ -#define log_error(str, param) elog(LOG, str, param) -#define log_error4(str, param, arg1) elog(LOG, str, param, arg1) +#define log_error(errcodefn, ...) \ + ereport(LOG, (errcodefn, errmsg_internal(__VA_ARGS__))) #else -#define log_error(str, param) (fprintf(stderr, str, param), fputc('\n', stderr)) -#define log_error4(str, param, arg1) (fprintf(stderr, str, param, arg1), fputc('\n', stderr)) +#define log_error(errcodefn, ...) \ + (fprintf(stderr, __VA_ARGS__), fputc('\n', stderr)) #endif #ifdef _MSC_VER @@ -124,8 +134,8 @@ find_my_exec(const char *argv0, char *retpath) if (!getcwd(cwd, MAXPGPATH)) { - log_error(_("could not identify current directory: %s"), - strerror(errno)); + log_error(errcode_for_file_access(), + _("could not identify current directory: %m")); return -1; } @@ -143,7 +153,8 @@ find_my_exec(const char *argv0, char *retpath) if (validate_exec(retpath) == 0) return resolve_symlinks(retpath); - log_error(_("invalid binary \"%s\""), retpath); + log_error(errcode(ERRCODE_WRONG_OBJECT_TYPE), + _("invalid binary \"%s\""), retpath); return -1; } @@ -192,14 +203,16 @@ find_my_exec(const char *argv0, char *retpath) case -1: /* wasn't even a candidate, keep looking */ break; case -2: /* found but disqualified */ - log_error(_("could not read binary \"%s\""), + log_error(errcode(ERRCODE_WRONG_OBJECT_TYPE), + _("could not read binary \"%s\""), retpath); break; } } while (*endp); } - log_error(_("could not find a \"%s\" to execute"), argv0); + log_error(errcode(ERRCODE_UNDEFINED_FILE), + _("could not find a \"%s\" to execute"), argv0); return -1; } @@ -238,8 +251,8 @@ resolve_symlinks(char *path) */ if (!getcwd(orig_wd, MAXPGPATH)) { - log_error(_("could not identify current directory: %s"), - strerror(errno)); + log_error(errcode_for_file_access(), + _("could not identify current directory: %m")); return -1; } @@ -254,7 +267,8 @@ resolve_symlinks(char *path) *lsep = '\0'; if (chdir(path) == -1) { - log_error4(_("could not change directory to \"%s\": %s"), path, strerror(errno)); + log_error(errcode_for_file_access(), + _("could not change directory to \"%s\": %m"), path); return -1; } fname = lsep + 1; @@ -266,10 +280,12 @@ resolve_symlinks(char *path) !S_ISLNK(buf.st_mode)) break; + errno = 0; rllen = readlink(fname, link_buf, sizeof(link_buf)); if (rllen < 0 || rllen >= sizeof(link_buf)) { - log_error(_("could not read symbolic link \"%s\""), fname); + log_error(errcode_for_file_access(), + _("could not read symbolic link \"%s\": %m"), fname); return -1; } link_buf[rllen] = '\0'; @@ -281,8 +297,8 @@ resolve_symlinks(char *path) if (!getcwd(path, MAXPGPATH)) { - log_error(_("could not identify current directory: %s"), - strerror(errno)); + log_error(errcode_for_file_access(), + _("could not identify current directory: %m")); return -1; } join_path_components(path, path, link_buf); @@ -290,7 +306,8 @@ resolve_symlinks(char *path) if (chdir(orig_wd) == -1) { - log_error4(_("could not change directory to \"%s\": %s"), orig_wd, strerror(errno)); + log_error(errcode_for_file_access(), + _("could not change directory to \"%s\": %m"), orig_wd); return -1; } #endif /* HAVE_READLINK */ @@ -308,7 +325,7 @@ find_other_exec(const char *argv0, const char *target, const char *versionstr, char *retpath) { char cmd[MAXPGPATH]; - char line[100]; + char line[MAXPGPATH]; if (find_my_exec(argv0, retpath) < 0) return -1; @@ -520,17 +537,15 @@ pclose_check(FILE *stream) if (exitstatus == -1) { /* pclose() itself failed, and hopefully set errno */ - log_error(_("pclose failed: %s"), strerror(errno)); + log_error(errcode(ERRCODE_SYSTEM_ERROR), + _("pclose failed: %m")); } else { reason = wait_result_to_str(exitstatus); - log_error("%s", reason); -#ifdef FRONTEND - free(reason); -#else + log_error(errcode(ERRCODE_SYSTEM_ERROR), + "%s", reason); pfree(reason); -#endif } return exitstatus; } @@ -651,19 +666,24 @@ AddUserToTokenDacl(HANDLE hToken) ptdd = (TOKEN_DEFAULT_DACL *) LocalAlloc(LPTR, dwSize); if (ptdd == NULL) { - log_error("could not allocate %lu bytes of memory", dwSize); + log_error(errcode(ERRCODE_OUT_OF_MEMORY), + _("out of memory")); goto cleanup; } if (!GetTokenInformation(hToken, tic, (LPVOID) ptdd, dwSize, &dwSize)) { - log_error("could not get token information: error code %lu", GetLastError()); + log_error(errcode(ERRCODE_SYSTEM_ERROR), + "could not get token information: error code %lu", + GetLastError()); goto cleanup; } } else { - log_error("could not get token information buffer size: error code %lu", GetLastError()); + log_error(errcode(ERRCODE_SYSTEM_ERROR), + "could not get token information buffer size: error code %lu", + GetLastError()); goto cleanup; } } @@ -673,7 +693,9 @@ AddUserToTokenDacl(HANDLE hToken) (DWORD) sizeof(ACL_SIZE_INFORMATION), AclSizeInformation)) { - log_error("could not get ACL information: error code %lu", GetLastError()); + log_error(errcode(ERRCODE_SYSTEM_ERROR), + "could not get ACL information: error code %lu", + GetLastError()); goto cleanup; } @@ -689,13 +711,15 @@ AddUserToTokenDacl(HANDLE hToken) pacl = (PACL) LocalAlloc(LPTR, dwNewAclSize); if (pacl == NULL) { - log_error("could not allocate %lu bytes of memory", dwNewAclSize); + log_error(errcode(ERRCODE_OUT_OF_MEMORY), + _("out of memory")); goto cleanup; } if (!InitializeAcl(pacl, dwNewAclSize, ACL_REVISION)) { - log_error("could not initialize ACL: error code %lu", GetLastError()); + log_error(errcode(ERRCODE_SYSTEM_ERROR), + "could not initialize ACL: error code %lu", GetLastError()); goto cleanup; } @@ -704,13 +728,15 @@ AddUserToTokenDacl(HANDLE hToken) { if (!GetAce(ptdd->DefaultDacl, i, (LPVOID *) &pace)) { - log_error("could not get ACE: error code %lu", GetLastError()); + log_error(errcode(ERRCODE_SYSTEM_ERROR), + "could not get ACE: error code %lu", GetLastError()); goto cleanup; } if (!AddAce(pacl, ACL_REVISION, MAXDWORD, pace, ((PACE_HEADER) pace)->AceSize)) { - log_error("could not add ACE: error code %lu", GetLastError()); + log_error(errcode(ERRCODE_SYSTEM_ERROR), + "could not add ACE: error code %lu", GetLastError()); goto cleanup; } } @@ -718,7 +744,9 @@ AddUserToTokenDacl(HANDLE hToken) /* Add the new ACE for the current user */ if (!AddAccessAllowedAceEx(pacl, ACL_REVISION, OBJECT_INHERIT_ACE, GENERIC_ALL, pTokenUser->User.Sid)) { - log_error("could not add access allowed ACE: error code %lu", GetLastError()); + log_error(errcode(ERRCODE_SYSTEM_ERROR), + "could not add access allowed ACE: error code %lu", + GetLastError()); goto cleanup; } @@ -727,7 +755,9 @@ AddUserToTokenDacl(HANDLE hToken) if (!SetTokenInformation(hToken, tic, (LPVOID) &tddNew, dwNewAclSize)) { - log_error("could not set token information: error code %lu", GetLastError()); + log_error(errcode(ERRCODE_SYSTEM_ERROR), + "could not set token information: error code %lu", + GetLastError()); goto cleanup; } @@ -773,13 +803,16 @@ GetTokenUser(HANDLE hToken, PTOKEN_USER *ppTokenUser) if (*ppTokenUser == NULL) { - log_error("could not allocate %lu bytes of memory", dwLength); + log_error(errcode(ERRCODE_OUT_OF_MEMORY), + _("out of memory")); return FALSE; } } else { - log_error("could not get token information buffer size: error code %lu", GetLastError()); + log_error(errcode(ERRCODE_SYSTEM_ERROR), + "could not get token information buffer size: error code %lu", + GetLastError()); return FALSE; } } @@ -793,7 +826,9 @@ GetTokenUser(HANDLE hToken, PTOKEN_USER *ppTokenUser) LocalFree(*ppTokenUser); *ppTokenUser = NULL; - log_error("could not get token information: error code %lu", GetLastError()); + log_error(errcode(ERRCODE_SYSTEM_ERROR), + "could not get token information: error code %lu", + GetLastError()); return FALSE; } diff --git a/src/common/fe_memutils.c b/src/common/fe_memutils.c index fb38067d97..2538661e19 100644 --- a/src/common/fe_memutils.c +++ b/src/common/fe_memutils.c @@ -3,7 +3,7 @@ * fe_memutils.c * memory management support for frontend code * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/common/file_perm.c b/src/common/file_perm.c new file mode 100644 index 0000000000..3ce4509089 --- /dev/null +++ b/src/common/file_perm.c @@ -0,0 +1,91 @@ +/*------------------------------------------------------------------------- + * + * File and directory permission routines + * + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/common/file_perm.c + * + *------------------------------------------------------------------------- + */ +#include "c.h" + +#include "common/file_perm.h" + +/* Modes for creating directories and files in the data directory */ +int pg_dir_create_mode = PG_DIR_MODE_OWNER; +int pg_file_create_mode = PG_FILE_MODE_OWNER; + +/* + * Mode mask to pass to umask(). This is more of a preventative measure since + * all file/directory creates should be performed using the create modes above. + */ +int pg_mode_mask = PG_MODE_MASK_OWNER; + +/* + * Set create modes and mask to use when writing to PGDATA based on the data + * directory mode passed. If group read/execute are present in the mode, then + * create modes and mask will be relaxed to allow group read/execute on all + * newly created files and directories. + */ +void +SetDataDirectoryCreatePerm(int dataDirMode) +{ + /* If the data directory mode has group access */ + if ((PG_DIR_MODE_GROUP & dataDirMode) == PG_DIR_MODE_GROUP) + { + pg_dir_create_mode = PG_DIR_MODE_GROUP; + pg_file_create_mode = PG_FILE_MODE_GROUP; + pg_mode_mask = PG_MODE_MASK_GROUP; + } + /* Else use default permissions */ + else + { + pg_dir_create_mode = PG_DIR_MODE_OWNER; + pg_file_create_mode = PG_FILE_MODE_OWNER; + pg_mode_mask = PG_MODE_MASK_OWNER; + } +} + +#ifdef FRONTEND + +/* + * Get the create modes and mask to use when writing to PGDATA by examining the + * mode of the PGDATA directory and calling SetDataDirectoryCreatePerm(). + * + * Errors are not handled here and should be reported by the application when + * false is returned. + * + * Suppress when on Windows, because there may not be proper support for Unix-y + * file permissions. + */ +bool +GetDataDirectoryCreatePerm(const char *dataDir) +{ +#if !defined(WIN32) && !defined(__CYGWIN__) + struct stat statBuf; + + /* + * If an error occurs getting the mode then return false. The caller is + * responsible for generating an error, if appropriate, indicating that we + * were unable to access the data directory. + */ + if (stat(dataDir, &statBuf) == -1) + return false; + + /* Set permissions */ + SetDataDirectoryCreatePerm(statBuf.st_mode); + return true; +#else /* !defined(WIN32) && !defined(__CYGWIN__) */ + /* + * On Windows, we don't have anything to do here since they don't have + * Unix-y permissions. + */ + return true; +#endif +} + + +#endif /* FRONTEND */ diff --git a/src/common/file_utils.c b/src/common/file_utils.c index 4304058acb..d952bc8c88 100644 --- a/src/common/file_utils.c +++ b/src/common/file_utils.c @@ -5,7 +5,7 @@ * Assorted utility functions to work on files. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/common/file_utils.c @@ -222,7 +222,7 @@ pre_sync_fname(const char *fname, bool isdir, const char *progname) { int fd; - fd = open(fname, O_RDONLY | PG_BINARY); + fd = open(fname, O_RDONLY | PG_BINARY, 0); if (fd < 0) { @@ -283,7 +283,7 @@ fsync_fname(const char *fname, bool isdir, const char *progname) * unsupported operations, e.g. opening a directory under Windows), and * logging others. */ - fd = open(fname, flags); + fd = open(fname, flags, 0); if (fd < 0) { if (errno == EACCES || (isdir && errno == EISDIR)) diff --git a/src/common/ip.c b/src/common/ip.c index bb536d3e86..002260ed5a 100644 --- a/src/common/ip.c +++ b/src/common/ip.c @@ -3,7 +3,7 @@ * ip.c * IPv6-aware network access. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -233,7 +233,7 @@ getnameinfo_unix(const struct sockaddr_un *sa, int salen, char *service, int servicelen, int flags) { - int ret = -1; + int ret; /* Invalid arguments. */ if (sa == NULL || sa->sun_family != AF_UNIX || @@ -243,14 +243,14 @@ getnameinfo_unix(const struct sockaddr_un *sa, int salen, if (node) { ret = snprintf(node, nodelen, "%s", "[local]"); - if (ret == -1 || ret > nodelen) + if (ret < 0 || ret >= nodelen) return EAI_MEMORY; } if (service) { ret = snprintf(service, servicelen, "%s", sa->sun_path); - if (ret == -1 || ret > servicelen) + if (ret < 0 || ret >= servicelen) return EAI_MEMORY; } diff --git a/src/common/keywords.c b/src/common/keywords.c index a5c6c41cb8..0c0c794c68 100644 --- a/src/common/keywords.c +++ b/src/common/keywords.c @@ -4,7 +4,7 @@ * lexical token lookup for key words in PostgreSQL * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/common/link-canary.c b/src/common/link-canary.c new file mode 100644 index 0000000000..5b4e562220 --- /dev/null +++ b/src/common/link-canary.c @@ -0,0 +1,36 @@ +/*------------------------------------------------------------------------- + * link-canary.c + * Detect whether src/common functions came from frontend or backend. + * + * Copyright (c) 2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/common/link-canary.c + * + *------------------------------------------------------------------------- + */ +#include "c.h" + +#include "common/link-canary.h" + +/* + * This function just reports whether this file was compiled for frontend + * or backend environment. We need this because in some systems, mainly + * ELF-based platforms, it is possible for a shlib (such as libpq) loaded + * into the backend to call a backend function named XYZ in preference to + * the shlib's own function XYZ. That's bad if the two functions don't + * act identically. This exact situation comes up for many functions in + * src/common and src/port, where the same function names exist in both + * libpq and the backend but they don't act quite identically. To verify + * that appropriate measures have been taken to prevent incorrect symbol + * resolution, libpq should test that this function returns true. + */ +bool +pg_link_canary_is_frontend(void) +{ +#ifdef FRONTEND + return true; +#else + return false; +#endif +} diff --git a/src/common/md5.c b/src/common/md5.c index ba65b02af6..c3936618b6 100644 --- a/src/common/md5.c +++ b/src/common/md5.c @@ -10,7 +10,7 @@ * * Sverre H. Huseby * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -317,7 +317,7 @@ pg_md5_binary(const void *buff, size_t len, void *outbuf) * Output format is "md5" followed by a 32-hex-digit MD5 checksum. * Hence, the output buffer "buf" must be at least 36 bytes long. * - * Returns TRUE if okay, FALSE on error (out of memory). + * Returns true if okay, false on error (out of memory). */ bool pg_md5_encrypt(const char *passwd, const char *salt, size_t salt_len, diff --git a/src/common/pg_lzcompress.c b/src/common/pg_lzcompress.c index 67f570c362..2d25da3a23 100644 --- a/src/common/pg_lzcompress.c +++ b/src/common/pg_lzcompress.c @@ -159,14 +159,14 @@ * scanned for the history add's, otherwise a literal character * is omitted and only his history entry added. * - * Acknowledgements: + * Acknowledgments: * * Many thanks to Adisak Pochanayon, who's article about SLZ * inspired me to write the PostgreSQL compression this way. * * Jan Wieck * - * Copyright (c) 1999-2017, PostgreSQL Global Development Group + * Copyright (c) 1999-2018, PostgreSQL Global Development Group * * src/common/pg_lzcompress.c * ---------- diff --git a/src/common/pgfnames.c b/src/common/pgfnames.c index e161d7dc04..ec50a36db7 100644 --- a/src/common/pgfnames.c +++ b/src/common/pgfnames.c @@ -3,7 +3,7 @@ * pgfnames.c * directory handling functions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/common/psprintf.c b/src/common/psprintf.c index 8f5903d519..2cf100f095 100644 --- a/src/common/psprintf.c +++ b/src/common/psprintf.c @@ -4,7 +4,7 @@ * sprintf into an allocated-on-demand buffer * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -45,6 +45,7 @@ char * psprintf(const char *fmt,...) { + int save_errno = errno; size_t len = 128; /* initial assumption about buffer size */ for (;;) @@ -60,6 +61,7 @@ psprintf(const char *fmt,...) result = (char *) palloc(len); /* Try to format the data. */ + errno = save_errno; va_start(args, fmt); newlen = pvsnprintf(result, len, fmt, args); va_end(args); @@ -77,7 +79,7 @@ psprintf(const char *fmt,...) * pvsnprintf * * Attempt to format text data under the control of fmt (an sprintf-style - * format string) and insert it into buf (which has length len, len > 0). + * format string) and insert it into buf (which has length len). * * If successful, return the number of bytes emitted, not counting the * trailing zero byte. This will always be strictly less than len. @@ -89,14 +91,14 @@ psprintf(const char *fmt,...) * Other error cases do not return, but exit via elog(ERROR) or exit(). * Hence, this shouldn't be used inside libpq. * - * This function exists mainly to centralize our workarounds for - * non-C99-compliant vsnprintf implementations. Generally, any call that - * pays any attention to the return value should go through here rather - * than calling snprintf or vsnprintf directly. + * Caution: callers must be sure to preserve their entry-time errno + * when looping, in case the fmt contains "%m". * * Note that the semantics of the return value are not exactly C99's. * First, we don't promise that the estimated buffer size is exactly right; * callers must be prepared to loop multiple times to get the right size. + * (Given a C99-compliant vsnprintf, that won't happen, but it is rumored + * that some implementations don't always return the same value ...) * Second, we return the recommended buffer size, not one less than that; * this lets overflow concerns be handled here rather than in the callers. */ @@ -105,28 +107,10 @@ pvsnprintf(char *buf, size_t len, const char *fmt, va_list args) { int nprinted; - Assert(len > 0); - - errno = 0; - - /* - * Assert check here is to catch buggy vsnprintf that overruns the - * specified buffer length. Solaris 7 in 64-bit mode is an example of a - * platform with such a bug. - */ -#ifdef USE_ASSERT_CHECKING - buf[len - 1] = '\0'; -#endif - nprinted = vsnprintf(buf, len, fmt, args); - Assert(buf[len - 1] == '\0'); - - /* - * If vsnprintf reports an error other than ENOMEM, fail. The possible - * causes of this are not user-facing errors, so elog should be enough. - */ - if (nprinted < 0 && errno != 0 && errno != ENOMEM) + /* We assume failure means the fmt is bogus, hence hard failure is OK */ + if (unlikely(nprinted < 0)) { #ifndef FRONTEND elog(ERROR, "vsnprintf failed: %m"); @@ -136,42 +120,21 @@ pvsnprintf(char *buf, size_t len, const char *fmt, va_list args) #endif } - /* - * Note: some versions of vsnprintf return the number of chars actually - * stored, not the total space needed as C99 specifies. And at least one - * returns -1 on failure. Be conservative about believing whether the - * print worked. - */ - if (nprinted >= 0 && (size_t) nprinted < len - 1) + if ((size_t) nprinted < len) { /* Success. Note nprinted does not include trailing null. */ return (size_t) nprinted; } - if (nprinted >= 0 && (size_t) nprinted > len) - { - /* - * This appears to be a C99-compliant vsnprintf, so believe its - * estimate of the required space. (If it's wrong, the logic will - * still work, but we may loop multiple times.) Note that the space - * needed should be only nprinted+1 bytes, but we'd better allocate - * one more than that so that the test above will succeed next time. - * - * In the corner case where the required space just barely overflows, - * fall through so that we'll error out below (possibly after - * looping). - */ - if ((size_t) nprinted <= MaxAllocSize - 2) - return nprinted + 2; - } - /* - * Buffer overrun, and we don't know how much space is needed. Estimate - * twice the previous buffer size, but not more than MaxAllocSize; if we - * are already at MaxAllocSize, choke. Note we use this palloc-oriented - * overflow limit even when in frontend. + * We assume a C99-compliant vsnprintf, so believe its estimate of the + * required space, and add one for the trailing null. (If it's wrong, the + * logic will still work, but we may loop multiple times.) + * + * Choke if the required space would exceed MaxAllocSize. Note we use + * this palloc-oriented overflow limit even when in frontend. */ - if (len >= MaxAllocSize) + if (unlikely((size_t) nprinted > MaxAllocSize - 1)) { #ifndef FRONTEND ereport(ERROR, @@ -183,8 +146,5 @@ pvsnprintf(char *buf, size_t len, const char *fmt, va_list args) #endif } - if (len >= MaxAllocSize / 2) - return MaxAllocSize; - - return len * 2; + return nprinted + 1; } diff --git a/src/common/relpath.c b/src/common/relpath.c index c2f36625c1..e8170ed712 100644 --- a/src/common/relpath.c +++ b/src/common/relpath.c @@ -4,7 +4,7 @@ * * This module also contains some logic associated with fork names. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -18,8 +18,7 @@ #include "postgres_fe.h" #endif -#include "catalog/catalog.h" -#include "catalog/pg_tablespace.h" +#include "catalog/pg_tablespace_d.h" #include "common/relpath.h" #include "storage/backendid.h" diff --git a/src/common/restricted_token.c b/src/common/restricted_token.c index 57591aaae2..8c5583da7a 100644 --- a/src/common/restricted_token.c +++ b/src/common/restricted_token.c @@ -4,7 +4,7 @@ * helper routine to ensure restricted token on Windows * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/common/rmtree.c b/src/common/rmtree.c index 09824b5463..fcf63eb953 100644 --- a/src/common/rmtree.c +++ b/src/common/rmtree.c @@ -2,7 +2,7 @@ * * rmtree.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/common/saslprep.c b/src/common/saslprep.c index 0a3585850b..4cf574fed8 100644 --- a/src/common/saslprep.c +++ b/src/common/saslprep.c @@ -12,7 +12,7 @@ * http://www.ietf.org/rfc/rfc4013.txt * * - * Portions Copyright (c) 2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2017-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/common/saslprep.c @@ -1081,6 +1081,9 @@ pg_saslprep(const char *input, char **output) unsigned char *p; pg_wchar *wp; + /* Ensure we return *output as NULL on failure */ + *output = NULL; + /* Check that the password isn't stupendously long */ if (strlen(input) > MAX_PASSWORD_LENGTH) { @@ -1112,10 +1115,7 @@ pg_saslprep(const char *input, char **output) */ input_size = pg_utf8_string_len(input); if (input_size < 0) - { - *output = NULL; return SASLPREP_INVALID_UTF8; - } input_chars = ALLOC((input_size + 1) * sizeof(pg_wchar)); if (!input_chars) @@ -1246,6 +1246,11 @@ pg_saslprep(const char *input, char **output) result = ALLOC(result_size + 1); if (!result) goto oom; + + /* + * There are no error exits below here, so the error exit paths don't need + * to worry about possibly freeing "result". + */ p = (unsigned char *) result; for (wp = output_chars; *wp; wp++) { diff --git a/src/common/scram-common.c b/src/common/scram-common.c index e43d035d4d..dc4160714f 100644 --- a/src/common/scram-common.c +++ b/src/common/scram-common.c @@ -6,7 +6,7 @@ * backend, for implement the Salted Challenge Response Authentication * Mechanism (SCRAM), per IETF's RFC 5802. * - * Portions Copyright (c) 2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2017-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/common/scram-common.c @@ -19,12 +19,9 @@ #include "postgres_fe.h" #endif -/* for htonl */ -#include -#include - #include "common/base64.h" #include "common/scram-common.h" +#include "port/pg_bswap.h" #define HMAC_IPAD 0x36 #define HMAC_OPAD 0x5C @@ -109,7 +106,7 @@ scram_SaltedPassword(const char *password, uint8 *result) { int password_len = strlen(password); - uint32 one = htonl(1); + uint32 one = pg_hton32(1); int i, j; uint8 Ui[SCRAM_KEY_LEN]; diff --git a/src/common/sha2.c b/src/common/sha2.c index d7992f1d20..5aa678f8e3 100644 --- a/src/common/sha2.c +++ b/src/common/sha2.c @@ -6,7 +6,7 @@ * This is the set of in-core functions used when there are no other * alternative options like OpenSSL. * - * Portions Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/common/sha2.c diff --git a/src/common/sha2_openssl.c b/src/common/sha2_openssl.c index b8e2e1139f..362e1318db 100644 --- a/src/common/sha2_openssl.c +++ b/src/common/sha2_openssl.c @@ -6,7 +6,7 @@ * * This should only be used if code is compiled with OpenSSL support. * - * Portions Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/common/sha2_openssl.c diff --git a/src/common/string.c b/src/common/string.c index 159d9ea7b6..499e81811a 100644 --- a/src/common/string.c +++ b/src/common/string.c @@ -4,7 +4,7 @@ * string handling helpers * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -41,3 +41,52 @@ pg_str_endswith(const char *str, const char *end) str += slen - elen; return strcmp(str, end) == 0; } + + +/* + * strtoint --- just like strtol, but returns int not long + */ +int +strtoint(const char *pg_restrict str, char **pg_restrict endptr, int base) +{ + long val; + + val = strtol(str, endptr, base); + if (val != (int) val) + errno = ERANGE; + return (int) val; +} + + +/* + * pg_clean_ascii -- Replace any non-ASCII chars with a '?' char + * + * Modifies the string passed in which must be '\0'-terminated. + * + * This function exists specifically to deal with filtering out + * non-ASCII characters in a few places where the client can provide an almost + * arbitrary string (and it isn't checked to ensure it's a valid username or + * database name or similar) and we don't want to have control characters or other + * things ending up in the log file where server admins might end up with a + * messed up terminal when looking at them. + * + * In general, this function should NOT be used- instead, consider how to handle + * the string without needing to filter out the non-ASCII characters. + * + * Ultimately, we'd like to improve the situation to not require stripping out + * all non-ASCII but perform more intelligent filtering which would allow UTF or + * similar, but it's unclear exactly what we should allow, so stick to ASCII only + * for now. + */ +void +pg_clean_ascii(char *str) +{ + /* Only allow clean ASCII chars in the string */ + char *p; + + for (p = str; *p != '\0'; p++) + { + if (*p < 32 || *p > 126) + *p = '?'; + } +} diff --git a/src/common/unicode/generate-norm_test_table.pl b/src/common/unicode/generate-norm_test_table.pl index 310d32fd29..e3510b5c81 100644 --- a/src/common/unicode/generate-norm_test_table.pl +++ b/src/common/unicode/generate-norm_test_table.pl @@ -5,7 +5,7 @@ # # NormalizationTest.txt is part of the Unicode Character Database. # -# Copyright (c) 2000-2017, PostgreSQL Global Development Group +# Copyright (c) 2000-2018, PostgreSQL Global Development Group use strict; use warnings; @@ -30,7 +30,7 @@ * norm_test_table.h * Test strings for Unicode normalization. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/common/unicode/norm_test_table.h diff --git a/src/common/unicode/generate-unicode_norm_table.pl b/src/common/unicode/generate-unicode_norm_table.pl index 1d77bb6380..f9cb406f1b 100644 --- a/src/common/unicode/generate-unicode_norm_table.pl +++ b/src/common/unicode/generate-unicode_norm_table.pl @@ -5,7 +5,7 @@ # Input: UnicodeData.txt and CompositionExclusions.txt # Output: unicode_norm_table.h # -# Copyright (c) 2000-2017, PostgreSQL Global Development Group +# Copyright (c) 2000-2018, PostgreSQL Global Development Group use strict; use warnings; @@ -74,7 +74,7 @@ * unicode_norm_table.h * Composition table used for Unicode normalization * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/common/unicode_norm_table.h diff --git a/src/common/unicode/norm_test.c b/src/common/unicode/norm_test.c index f1bd99fce4..dac455282e 100644 --- a/src/common/unicode/norm_test.c +++ b/src/common/unicode/norm_test.c @@ -2,10 +2,10 @@ * norm_test.c * Program to test Unicode normalization functions. * - * Portions Copyright (c) 2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2017-2018, PostgreSQL Global Development Group * * IDENTIFICATION - * src/common/unicode_norm.c + * src/common/unicode/norm_test.c * *------------------------------------------------------------------------- */ diff --git a/src/common/unicode_norm.c b/src/common/unicode_norm.c index 5361f5f111..1eacdb298f 100644 --- a/src/common/unicode_norm.c +++ b/src/common/unicode_norm.c @@ -5,7 +5,7 @@ * This implements Unicode normalization, per the documentation at * http://www.unicode.org/reports/tr15/. * - * Portions Copyright (c) 2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2017-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/common/unicode_norm.c diff --git a/src/common/username.c b/src/common/username.c index 7187bbde42..af382f95a5 100644 --- a/src/common/username.c +++ b/src/common/username.c @@ -3,7 +3,7 @@ * username.c * get user name * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/common/wait_error.c b/src/common/wait_error.c index f824a5f2af..941b606999 100644 --- a/src/common/wait_error.c +++ b/src/common/wait_error.c @@ -4,7 +4,7 @@ * Convert a wait/waitpid(2) result code to a human-readable string * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/fe_utils/Makefile b/src/fe_utils/Makefile index ebce38ceb4..5362cffd57 100644 --- a/src/fe_utils/Makefile +++ b/src/fe_utils/Makefile @@ -5,7 +5,7 @@ # This makefile generates a static library, libpgfeutils.a, # for use by client applications # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # IDENTIFICATION @@ -19,7 +19,7 @@ include $(top_builddir)/src/Makefile.global override CPPFLAGS := -DFRONTEND -I$(libpq_srcdir) $(CPPFLAGS) -OBJS = mbprint.o print.o psqlscan.o simple_list.o string_utils.o +OBJS = mbprint.o print.o psqlscan.o simple_list.o string_utils.o conditional.o all: libpgfeutils.a diff --git a/src/bin/psql/conditional.c b/src/fe_utils/conditional.c similarity index 82% rename from src/bin/psql/conditional.c rename to src/fe_utils/conditional.c index 63977ce5dd..db2a0a53b3 100644 --- a/src/bin/psql/conditional.c +++ b/src/fe_utils/conditional.c @@ -1,13 +1,15 @@ -/* - * psql - the PostgreSQL interactive terminal +/*------------------------------------------------------------------------- + * A stack of automaton states to handle nested conditionals. + * + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * src/fe_utils/conditional.c * - * src/bin/psql/conditional.c + *------------------------------------------------------------------------- */ #include "postgres_fe.h" -#include "conditional.h" +#include "fe_utils/conditional.h" /* * create stack @@ -63,6 +65,28 @@ conditional_stack_pop(ConditionalStack cstack) return true; } +/* + * Returns current stack depth, for debugging purposes. + */ +int +conditional_stack_depth(ConditionalStack cstack) +{ + if (cstack == NULL) + return -1; + else + { + IfStackElem *p = cstack->head; + int depth = 0; + + while (p != NULL) + { + depth++; + p = p->next; + } + return depth; + } +} + /* * Fetch the current state of the top of the stack. */ diff --git a/src/fe_utils/mbprint.c b/src/fe_utils/mbprint.c index 9aabe59f38..07c348ec49 100644 --- a/src/fe_utils/mbprint.c +++ b/src/fe_utils/mbprint.c @@ -3,7 +3,7 @@ * Multibyte character printing support for frontend code * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/fe_utils/mbprint.c diff --git a/src/fe_utils/print.c b/src/fe_utils/print.c index f756f767e5..cb9a9a0613 100644 --- a/src/fe_utils/print.c +++ b/src/fe_utils/print.c @@ -8,7 +8,7 @@ * pager open/close functions, all that stuff came with it. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/fe_utils/print.c @@ -32,7 +32,7 @@ #include "fe_utils/print.h" -#include "catalog/pg_type.h" +#include "catalog/pg_type_d.h" #include "fe_utils/mbprint.h" @@ -2870,7 +2870,9 @@ PageOutput(int lines, const printTableOpt *topt) const char *pagerprog; FILE *pagerpipe; - pagerprog = getenv("PAGER"); + pagerprog = getenv("PSQL_PAGER"); + if (!pagerprog) + pagerprog = getenv("PAGER"); if (!pagerprog) pagerprog = DEFAULT_PAGER; else diff --git a/src/fe_utils/psqlscan.l b/src/fe_utils/psqlscan.l index 27689d72da..fdf49875a7 100644 --- a/src/fe_utils/psqlscan.l +++ b/src/fe_utils/psqlscan.l @@ -23,7 +23,7 @@ * * See psqlscan_int.h for additional commentary. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -71,6 +71,8 @@ typedef int YYSTYPE; extern int psql_yyget_column(yyscan_t yyscanner); extern void psql_yyset_column(int column_no, yyscan_t yyscanner); +/* LCOV_EXCL_START */ + %} %option reentrant @@ -296,6 +298,15 @@ identifier {ident_start}{ident_cont}* typecast "::" dot_dot \.\. colon_equals ":=" + +/* + * These operator-like tokens (unlike the above ones) also match the {operator} + * rule, which means that they might be overridden by a longer match if they + * are followed by a comment start or a + or - character. Accordingly, if you + * add to this list, you must also add corresponding code to the {operator} + * block to return the correct token in such cases. (This is not needed in + * psqlscan.l since the token value is ignored there.) + */ equals_greater "=>" less_equals "<=" greater_equals ">=" @@ -745,9 +756,13 @@ other . PQUOTE_SQL_IDENT); } +:\{\?{variable_char}+\} { + psqlscan_test_variable(cur_state, yytext, yyleng); + } + /* * These rules just avoid the need for scanner backup if one of the - * two rules above fails to match completely. + * three rules above fails to match completely. */ :'{variable_char}* { @@ -762,6 +777,17 @@ other . ECHO; } +:\{\?{variable_char}* { + /* Throw back everything but the colon */ + yyless(1); + ECHO; + } +:\{ { + /* Throw back everything but the colon */ + yyless(1); + ECHO; + } + /* * Back to backend-compatible rules. */ @@ -800,20 +826,33 @@ other . * to forbid operator names like '?-' that could not be * sequences of SQL operators. */ - while (nchars > 1 && - (yytext[nchars - 1] == '+' || - yytext[nchars - 1] == '-')) + if (nchars > 1 && + (yytext[nchars - 1] == '+' || + yytext[nchars - 1] == '-')) { int ic; for (ic = nchars - 2; ic >= 0; ic--) { - if (strchr("~!@#^&|`?%", yytext[ic])) + char c = yytext[ic]; + if (c == '~' || c == '!' || c == '@' || + c == '#' || c == '^' || c == '&' || + c == '|' || c == '`' || c == '?' || + c == '%') break; } - if (ic >= 0) - break; /* found a char that makes it OK */ - nchars--; /* else remove the +/-, and check again */ + if (ic < 0) + { + /* + * didn't find a qualifying character, so remove + * all trailing [+-] + */ + do { + nchars--; + } while (nchars > 1 && + (yytext[nchars - 1] == '+' || + yytext[nchars - 1] == '-')); + } } if (nchars < yyleng) @@ -884,6 +923,8 @@ other . %% +/* LCOV_EXCL_STOP */ + /* * Create a lexer working state struct. * @@ -1442,3 +1483,28 @@ psqlscan_escape_variable(PsqlScanState state, const char *txt, int len, psqlscan_emit(state, txt, len); } } + +void +psqlscan_test_variable(PsqlScanState state, const char *txt, int len) +{ + char *varname; + char *value; + + varname = psqlscan_extract_substring(state, txt + 3, len - 4); + if (state->callbacks->get_variable) + value = state->callbacks->get_variable(varname, PQUOTE_PLAIN, + state->cb_passthrough); + else + value = NULL; + free(varname); + + if (value != NULL) + { + psqlscan_emit(state, "TRUE", 4); + free(value); + } + else + { + psqlscan_emit(state, "FALSE", 5); + } +} diff --git a/src/fe_utils/simple_list.c b/src/fe_utils/simple_list.c index 21a2e57297..ef94b34cd1 100644 --- a/src/fe_utils/simple_list.c +++ b/src/fe_utils/simple_list.c @@ -7,7 +7,7 @@ * it's all we need in, eg, pg_dump. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/fe_utils/simple_list.c diff --git a/src/fe_utils/string_utils.c b/src/fe_utils/string_utils.c index c7e42ddec9..af0d9d5173 100644 --- a/src/fe_utils/string_utils.c +++ b/src/fe_utils/string_utils.c @@ -6,7 +6,7 @@ * and interpreting backend output. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/fe_utils/string_utils.c @@ -138,8 +138,7 @@ fmtId(const char *rawid) } /* - * fmtQualifiedId - convert a qualified name to the proper format for - * the source database. + * fmtQualifiedId - construct a schema-qualified name, with quoting as needed. * * Like fmtId, use the result before calling again. * @@ -147,13 +146,13 @@ fmtId(const char *rawid) * use that buffer until we're finished with calling fmtId(). */ const char * -fmtQualifiedId(int remoteVersion, const char *schema, const char *id) +fmtQualifiedId(const char *schema, const char *id) { PQExpBuffer id_return; PQExpBuffer lcl_pqexp = createPQExpBuffer(); - /* Suppress schema name if fetching from pre-7.3 DB */ - if (remoteVersion >= 70300 && schema && *schema) + /* Some callers might fail to provide a schema name */ + if (schema && *schema) { appendPQExpBuffer(lcl_pqexp, "%s.", fmtId(schema)); } @@ -956,8 +955,9 @@ processSQLNamePattern(PGconn *conn, PQExpBuffer buf, const char *pattern, } /* - * Now decide what we need to emit. Note there will be a leading "^(" in - * the patterns in any case. + * Now decide what we need to emit. We may run under a hostile + * search_path, so qualify EVERY name. Note there will be a leading "^(" + * in the patterns in any case. */ if (namebuf.len > 2) { @@ -970,15 +970,18 @@ processSQLNamePattern(PGconn *conn, PQExpBuffer buf, const char *pattern, WHEREAND(); if (altnamevar) { - appendPQExpBuffer(buf, "(%s ~ ", namevar); + appendPQExpBuffer(buf, + "(%s OPERATOR(pg_catalog.~) ", namevar); appendStringLiteralConn(buf, namebuf.data, conn); - appendPQExpBuffer(buf, "\n OR %s ~ ", altnamevar); + appendPQExpBuffer(buf, + "\n OR %s OPERATOR(pg_catalog.~) ", + altnamevar); appendStringLiteralConn(buf, namebuf.data, conn); appendPQExpBufferStr(buf, ")\n"); } else { - appendPQExpBuffer(buf, "%s ~ ", namevar); + appendPQExpBuffer(buf, "%s OPERATOR(pg_catalog.~) ", namevar); appendStringLiteralConn(buf, namebuf.data, conn); appendPQExpBufferChar(buf, '\n'); } @@ -994,7 +997,7 @@ processSQLNamePattern(PGconn *conn, PQExpBuffer buf, const char *pattern, if (strcmp(schemabuf.data, "^(.*)$") != 0 && schemavar) { WHEREAND(); - appendPQExpBuffer(buf, "%s ~ ", schemavar); + appendPQExpBuffer(buf, "%s OPERATOR(pg_catalog.~) ", schemavar); appendStringLiteralConn(buf, schemabuf.data, conn); appendPQExpBufferChar(buf, '\n'); } diff --git a/src/include/.gitignore b/src/include/.gitignore index 49d108dbed..51819fb4dd 100644 --- a/src/include/.gitignore +++ b/src/include/.gitignore @@ -3,4 +3,3 @@ /pg_config.h /pg_config_ext.h /pg_config_os.h -/dynloader.h diff --git a/src/include/Makefile b/src/include/Makefile index a689d352b6..6bdfd7db91 100644 --- a/src/include/Makefile +++ b/src/include/Makefile @@ -19,8 +19,9 @@ all: pg_config.h pg_config_ext.h pg_config_os.h # Subdirectories containing installable headers SUBDIRS = access bootstrap catalog commands common datatype \ executor fe_utils foreign \ - lib libpq mb nodes optimizer parser postmaster regex replication \ - rewrite statistics storage tcop snowball snowball/libstemmer tsearch \ + lib libpq mb nodes optimizer parser partitioning postmaster \ + regex replication rewrite \ + statistics storage tcop snowball snowball/libstemmer tsearch \ tsearch/dicts utils port port/atomics port/win32 port/win32_msvc \ port/win32_msvc/sys port/win32/arpa port/win32/netinet \ port/win32/sys portability @@ -47,18 +48,21 @@ install: all installdirs $(INSTALL_DATA) utils/fmgroids.h '$(DESTDIR)$(includedir_server)/utils' $(INSTALL_DATA) utils/fmgrprotos.h '$(DESTDIR)$(includedir_server)/utils' # We don't use INSTALL_DATA for performance reasons --- there are a lot of files - cp $(srcdir)/*.h '$(DESTDIR)$(includedir_server)'/ || exit; \ - chmod $(INSTALL_DATA_MODE) '$(DESTDIR)$(includedir_server)'/*.h || exit; \ +# (in fact, we have to take some pains to avoid overlength shell commands here) + cp $(srcdir)/*.h '$(DESTDIR)$(includedir_server)'/ for dir in $(SUBDIRS); do \ cp $(srcdir)/$$dir/*.h '$(DESTDIR)$(includedir_server)'/$$dir/ || exit; \ - chmod $(INSTALL_DATA_MODE) '$(DESTDIR)$(includedir_server)'/$$dir/*.h || exit; \ done ifeq ($(vpath_build),yes) - for file in dynloader.h catalog/schemapg.h parser/gram.h storage/lwlocknames.h utils/probes.h; do \ + for file in catalog/schemapg.h catalog/pg_*_d.h parser/gram.h storage/lwlocknames.h utils/probes.h; do \ cp $$file '$(DESTDIR)$(includedir_server)'/$$file || exit; \ - chmod $(INSTALL_DATA_MODE) '$(DESTDIR)$(includedir_server)'/$$file || exit; \ done endif + cd '$(DESTDIR)$(includedir_server)' && chmod $(INSTALL_DATA_MODE) *.h + for dir in $(SUBDIRS); do \ + cd '$(DESTDIR)$(includedir_server)'/$$dir || exit; \ + chmod $(INSTALL_DATA_MODE) *.h || exit; \ + done installdirs: $(MKDIR_P) '$(DESTDIR)$(includedir)/libpq' '$(DESTDIR)$(includedir_internal)/libpq' @@ -73,7 +77,9 @@ uninstall: clean: - rm -f utils/fmgroids.h utils/fmgrprotos.h utils/errcodes.h parser/gram.h utils/probes.h catalog/schemapg.h + rm -f utils/fmgroids.h utils/fmgrprotos.h utils/errcodes.h utils/header-stamp + rm -f parser/gram.h storage/lwlocknames.h utils/probes.h + rm -f catalog/schemapg.h catalog/pg_*_d.h catalog/header-stamp distclean maintainer-clean: clean - rm -f pg_config.h pg_config_ext.h pg_config_os.h dynloader.h stamp-h stamp-ext-h + rm -f pg_config.h pg_config_ext.h pg_config_os.h stamp-h stamp-ext-h diff --git a/src/include/access/amapi.h b/src/include/access/amapi.h index 0db4fc73ac..14526a6bb2 100644 --- a/src/include/access/amapi.h +++ b/src/include/access/amapi.h @@ -3,7 +3,7 @@ * amapi.h * API for Postgres index access methods. * - * Copyright (c) 2015-2017, PostgreSQL Global Development Group + * Copyright (c) 2015-2018, PostgreSQL Global Development Group * * src/include/access/amapi.h * @@ -50,7 +50,8 @@ typedef enum IndexAMProperty AMPROP_CAN_ORDER, /* AM properties */ AMPROP_CAN_UNIQUE, AMPROP_CAN_MULTI_COL, - AMPROP_CAN_EXCLUDE + AMPROP_CAN_EXCLUDE, + AMPROP_CAN_INCLUDE } IndexAMProperty; @@ -191,9 +192,17 @@ typedef struct IndexAmRoutine bool ampredlocks; /* does AM support parallel scan? */ bool amcanparallel; + /* does AM support columns included with clause INCLUDE? */ + bool amcaninclude; /* type of data stored in index, or InvalidOid if variable */ Oid amkeytype; + /* + * If you add new properties to either the above or the below lists, then + * they should also (usually) be exposed via the property API (see + * IndexAMProperty at the top of the file, and utils/adt/amutils.c). + */ + /* interface functions */ ambuild_function ambuild; ambuildempty_function ambuildempty; diff --git a/src/include/access/amvalidate.h b/src/include/access/amvalidate.h index 04b7429a78..e50afbd556 100644 --- a/src/include/access/amvalidate.h +++ b/src/include/access/amvalidate.h @@ -3,7 +3,7 @@ * amvalidate.h * Support routines for index access methods' amvalidate functions. * - * Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Copyright (c) 2016-2018, PostgreSQL Global Development Group * * src/include/access/amvalidate.h * diff --git a/src/include/access/attnum.h b/src/include/access/attnum.h index d23888b098..c45a1acaaa 100644 --- a/src/include/access/attnum.h +++ b/src/include/access/attnum.h @@ -4,7 +4,7 @@ * POSTGRES attribute number definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/attnum.h diff --git a/src/include/access/brin.h b/src/include/access/brin.h index 61a38804ca..10999a38b5 100644 --- a/src/include/access/brin.h +++ b/src/include/access/brin.h @@ -1,7 +1,7 @@ /* * AM-callable functions for BRIN indexes * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/include/access/brin_internal.h b/src/include/access/brin_internal.h index 3ed67438b2..d3134f9dcd 100644 --- a/src/include/access/brin_internal.h +++ b/src/include/access/brin_internal.h @@ -2,7 +2,7 @@ * brin_internal.h * internal declarations for BRIN indexes * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/include/access/brin_page.h b/src/include/access/brin_page.h index bf03a6e9f8..82d5972c85 100644 --- a/src/include/access/brin_page.h +++ b/src/include/access/brin_page.h @@ -2,7 +2,7 @@ * brin_page.h * Prototypes and definitions for BRIN page layouts * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/include/access/brin_pageops.h b/src/include/access/brin_pageops.h index e0f5641635..5189d5ddc2 100644 --- a/src/include/access/brin_pageops.h +++ b/src/include/access/brin_pageops.h @@ -2,7 +2,7 @@ * brin_pageops.h * Prototypes for operating on BRIN pages. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -33,6 +33,6 @@ extern bool brin_start_evacuating_page(Relation idxRel, Buffer buf); extern void brin_evacuate_page(Relation idxRel, BlockNumber pagesPerRange, BrinRevmap *revmap, Buffer buf); -extern bool brin_page_cleanup(Relation idxrel, Buffer buf); +extern void brin_page_cleanup(Relation idxrel, Buffer buf); #endif /* BRIN_PAGEOPS_H */ diff --git a/src/include/access/brin_revmap.h b/src/include/access/brin_revmap.h index ddd87e040b..4dd844888f 100644 --- a/src/include/access/brin_revmap.h +++ b/src/include/access/brin_revmap.h @@ -2,7 +2,7 @@ * brin_revmap.h * Prototypes for BRIN reverse range maps * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/include/access/brin_tuple.h b/src/include/access/brin_tuple.h index 6545c0a6ff..2adaf9125e 100644 --- a/src/include/access/brin_tuple.h +++ b/src/include/access/brin_tuple.h @@ -2,7 +2,7 @@ * brin_tuple.h * Declarations for dealing with BRIN-specific tuples. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/include/access/brin_xlog.h b/src/include/access/brin_xlog.h index 10e90d3c78..40e9772c89 100644 --- a/src/include/access/brin_xlog.h +++ b/src/include/access/brin_xlog.h @@ -4,7 +4,7 @@ * POSTGRES BRIN access XLOG definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/brin_xlog.h diff --git a/src/include/access/bufmask.h b/src/include/access/bufmask.h index 95c6c3ae02..c00be32ff6 100644 --- a/src/include/access/bufmask.h +++ b/src/include/access/bufmask.h @@ -7,7 +7,7 @@ * individual rmgr, but we make things easier by providing some * common routines to handle cases which occur in multiple rmgrs. * - * Portions Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2016-2018, PostgreSQL Global Development Group * * src/include/access/bufmask.h * @@ -23,7 +23,7 @@ /* Marker used to mask pages consistently */ #define MASK_MARKER 0 -extern void mask_page_lsn(Page page); +extern void mask_page_lsn_and_checksum(Page page); extern void mask_page_hint_bits(Page page); extern void mask_unused_space(Page page); extern void mask_lp_flags(Page page); diff --git a/src/include/access/clog.h b/src/include/access/clog.h index 7bae0902b5..7681ed90ae 100644 --- a/src/include/access/clog.h +++ b/src/include/access/clog.h @@ -3,7 +3,7 @@ * * PostgreSQL transaction-commit-log manager * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/clog.h diff --git a/src/include/access/commit_ts.h b/src/include/access/commit_ts.h index 31936faf08..2f40d59695 100644 --- a/src/include/access/commit_ts.h +++ b/src/include/access/commit_ts.h @@ -3,7 +3,7 @@ * * PostgreSQL commit timestamp manager * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/commit_ts.h diff --git a/src/include/access/genam.h b/src/include/access/genam.h index b56a44f902..534fac7bf2 100644 --- a/src/include/access/genam.h +++ b/src/include/access/genam.h @@ -4,7 +4,7 @@ * POSTGRES generalized index access method definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/genam.h @@ -174,6 +174,9 @@ extern RegProcedure index_getprocid(Relation irel, AttrNumber attnum, uint16 procnum); extern FmgrInfo *index_getprocinfo(Relation irel, AttrNumber attnum, uint16 procnum); +extern void index_store_float8_orderby_distances(IndexScanDesc scan, + Oid *orderByTypes, double *distances, + bool recheckOrderBy); /* * index access method support routines (in genam.c) diff --git a/src/include/access/generic_xlog.h b/src/include/access/generic_xlog.h index 02696141ea..b23e1f684b 100644 --- a/src/include/access/generic_xlog.h +++ b/src/include/access/generic_xlog.h @@ -4,7 +4,7 @@ * Generic xlog API definition. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/generic_xlog.h diff --git a/src/include/access/gin.h b/src/include/access/gin.h index ec83058095..3d8a130b69 100644 --- a/src/include/access/gin.h +++ b/src/include/access/gin.h @@ -2,7 +2,7 @@ * gin.h * Public header file for Generalized Inverted Index access method. * - * Copyright (c) 2006-2017, PostgreSQL Global Development Group + * Copyright (c) 2006-2018, PostgreSQL Global Development Group * * src/include/access/gin.h *-------------------------------------------------------------------------- @@ -51,8 +51,8 @@ typedef struct GinStatsData /* * A ternary value used by tri-consistent functions. * - * For convenience, this is compatible with booleans. A boolean can be - * safely cast to a GinTernaryValue. + * This must be of the same size as a bool because some code will cast a + * pointer to a bool to a pointer to a GinTernaryValue. */ typedef char GinTernaryValue; diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h index adfdb0c6d9..81bf8734ce 100644 --- a/src/include/access/gin_private.h +++ b/src/include/access/gin_private.h @@ -2,7 +2,7 @@ * gin_private.h * header file for postgres inverted index access method implementation. * - * Copyright (c) 2006-2017, PostgreSQL Global Development Group + * Copyright (c) 2006-2018, PostgreSQL Global Development Group * * src/include/access/gin_private.h *-------------------------------------------------------------------------- @@ -217,7 +217,7 @@ extern ItemPointer GinDataLeafPageGetItems(Page page, int *nitems, ItemPointerDa extern int GinDataLeafPageGetItemsToTbm(Page page, TIDBitmap *tbm); extern BlockNumber createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, - GinStatsData *buildStats); + GinStatsData *buildStats, Buffer entrybuffer); extern void GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset); extern void GinPageDeletePostingItem(Page page, OffsetNumber offset); extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno, @@ -225,7 +225,6 @@ extern void ginInsertItemPointers(Relation index, BlockNumber rootBlkno, GinStatsData *buildStats); extern GinBtreeStack *ginScanBeginPostingTree(GinBtree btree, Relation index, BlockNumber rootBlkno, Snapshot snapshot); extern void ginDataFillRoot(GinBtree btree, Page root, BlockNumber lblkno, Page lpage, BlockNumber rblkno, Page rpage); -extern void ginPrepareDataScan(GinBtree btree, Relation index, BlockNumber rootBlkno); /* * This is declared in ginvacuum.c, but is passed between ginVacuumItemPointers @@ -300,7 +299,7 @@ typedef struct GinScanKeyData /* * Match status data. curItem is the TID most recently tested (could be a - * lossy-page pointer). curItemMatches is TRUE if it passes the + * lossy-page pointer). curItemMatches is true if it passes the * consistentFn test; if so, recheckCurItem is the recheck flag. * isFinished means that all the input entry streams are finished, so this * key cannot succeed for any later TIDs. @@ -392,7 +391,7 @@ extern bool ginvalidate(Oid opclassoid); /* ginbulk.c */ typedef struct GinEntryAccumulator { - RBNode rbnode; + RBTNode rbtnode; Datum key; GinNullCategory category; OffsetNumber attnum; @@ -439,7 +438,7 @@ extern void ginHeapTupleFastCollect(GinState *ginstate, OffsetNumber attnum, Datum value, bool isNull, ItemPointer ht_ctid); extern void ginInsertCleanup(GinState *ginstate, bool full_clean, - bool fill_fsm, IndexBulkDeleteResult *stats); + bool fill_fsm, bool forceCleanup, IndexBulkDeleteResult *stats); /* ginpostinglist.c */ diff --git a/src/include/access/ginblock.h b/src/include/access/ginblock.h index 114370c7d7..553566529a 100644 --- a/src/include/access/ginblock.h +++ b/src/include/access/ginblock.h @@ -2,7 +2,7 @@ * ginblock.h * details of structures stored in GIN index blocks * - * Copyright (c) 2006-2017, PostgreSQL Global Development Group + * Copyright (c) 2006-2018, PostgreSQL Global Development Group * * src/include/access/ginblock.h *-------------------------------------------------------------------------- @@ -188,8 +188,11 @@ typedef struct /* * Category codes to distinguish placeholder nulls from ordinary NULL keys. - * Note that the datatype size and the first two code values are chosen to be - * compatible with the usual usage of bool isNull flags. + * + * The first two code values were chosen to be compatible with the usual usage + * of bool isNull flags. However, casting between bool and GinNullCategory is + * risky because of the possibility of different bit patterns and type sizes, + * so it is no longer done. * * GIN_CAT_EMPTY_QUERY is never stored in the index; and notice that it is * chosen to sort before not after regular key values. diff --git a/src/include/access/ginxlog.h b/src/include/access/ginxlog.h index 42e0ae90c3..64a3c9e18b 100644 --- a/src/include/access/ginxlog.h +++ b/src/include/access/ginxlog.h @@ -2,7 +2,7 @@ * ginxlog.h * header file for postgres inverted index xlog implementation. * - * Copyright (c) 2006-2017, PostgreSQL Global Development Group + * Copyright (c) 2006-2018, PostgreSQL Global Development Group * * src/include/access/ginxlog.h *-------------------------------------------------------------------------- diff --git a/src/include/access/gist.h b/src/include/access/gist.h index 83642189db..827566dc6e 100644 --- a/src/include/access/gist.h +++ b/src/include/access/gist.h @@ -6,7 +6,7 @@ * changes should be made with care. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/gist.h diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h index bfef2df420..36ed7244ba 100644 --- a/src/include/access/gist_private.h +++ b/src/include/access/gist_private.h @@ -4,7 +4,7 @@ * private declarations for GiST -- declarations related to the * internal implementation of GiST, not the public API * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/gist_private.h @@ -503,7 +503,7 @@ extern void gistSplitByKey(Relation r, Page page, IndexTuple *itup, /* gistbuild.c */ extern IndexBuildResult *gistbuild(Relation heap, Relation index, struct IndexInfo *indexInfo); -extern void gistValidateBufferingOption(char *value); +extern void gistValidateBufferingOption(const char *value); /* gistbuildbuffers.c */ extern GISTBuildBuffers *gistInitBuildBuffers(int pagesPerBuffer, int levelStep, diff --git a/src/include/access/gistscan.h b/src/include/access/gistscan.h index 2aea6ad309..e04409afc0 100644 --- a/src/include/access/gistscan.h +++ b/src/include/access/gistscan.h @@ -4,7 +4,7 @@ * routines defined in access/gist/gistscan.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/gistscan.h diff --git a/src/include/access/gistxlog.h b/src/include/access/gistxlog.h index 3b126eca2a..1a2b9496d0 100644 --- a/src/include/access/gistxlog.h +++ b/src/include/access/gistxlog.h @@ -3,7 +3,7 @@ * gistxlog.h * gist xlog routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/gistxlog.h diff --git a/src/include/access/hash.h b/src/include/access/hash.h index 72fce3038c..02ef67c974 100644 --- a/src/include/access/hash.h +++ b/src/include/access/hash.h @@ -4,7 +4,7 @@ * header file for postgres hash access method implementation * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/hash.h @@ -38,6 +38,17 @@ typedef uint32 Bucket; #define BUCKET_TO_BLKNO(metap,B) \ ((BlockNumber) ((B) + ((B) ? (metap)->hashm_spares[_hash_spareindex((B)+1)-1] : 0)) + 1) +/* + * Rotate the high 32 bits and the low 32 bits separately. The standard + * hash function sometimes rotates the low 32 bits by one bit when + * combining elements. We want extended hash functions to be compatible with + * that algorithm when the seed is 0, so we can't just do a normal rotation. + * This works, though. + */ +#define ROTATE_HIGH_AND_LOW_32BITS(v) \ + ((((v) << 1) & UINT64CONST(0xfffffffefffffffe)) | \ + (((v) >> 31) & UINT64CONST(0x100000001))) + /* * Special space for hash index pages. * @@ -103,6 +114,51 @@ typedef struct HashScanPosItem /* what we remember about each match */ OffsetNumber indexOffset; /* index item's location within page */ } HashScanPosItem; +typedef struct HashScanPosData +{ + Buffer buf; /* if valid, the buffer is pinned */ + BlockNumber currPage; /* current hash index page */ + BlockNumber nextPage; /* next overflow page */ + BlockNumber prevPage; /* prev overflow or bucket page */ + + /* + * The items array is always ordered in index order (ie, increasing + * indexoffset). When scanning backwards it is convenient to fill the + * array back-to-front, so we start at the last slot and fill downwards. + * Hence we need both a first-valid-entry and a last-valid-entry counter. + * itemIndex is a cursor showing which entry was last returned to caller. + */ + int firstItem; /* first valid index in items[] */ + int lastItem; /* last valid index in items[] */ + int itemIndex; /* current index in items[] */ + + HashScanPosItem items[MaxIndexTuplesPerPage]; /* MUST BE LAST */ +} HashScanPosData; + +#define HashScanPosIsPinned(scanpos) \ +( \ + AssertMacro(BlockNumberIsValid((scanpos).currPage) || \ + !BufferIsValid((scanpos).buf)), \ + BufferIsValid((scanpos).buf) \ +) + +#define HashScanPosIsValid(scanpos) \ +( \ + AssertMacro(BlockNumberIsValid((scanpos).currPage) || \ + !BufferIsValid((scanpos).buf)), \ + BlockNumberIsValid((scanpos).currPage) \ +) + +#define HashScanPosInvalidate(scanpos) \ + do { \ + (scanpos).buf = InvalidBuffer; \ + (scanpos).currPage = InvalidBlockNumber; \ + (scanpos).nextPage = InvalidBlockNumber; \ + (scanpos).prevPage = InvalidBlockNumber; \ + (scanpos).firstItem = 0; \ + (scanpos).lastItem = 0; \ + (scanpos).itemIndex = 0; \ + } while (0); /* * HashScanOpaqueData is private state for a hash index scan. @@ -112,14 +168,6 @@ typedef struct HashScanOpaqueData /* Hash value of the scan key, ie, the hash key we seek */ uint32 hashso_sk_hash; - /* - * We also want to remember which buffer we're currently examining in the - * scan. We keep the buffer pinned (but not locked) across hashgettuple - * calls, in order to avoid doing a ReadBuffer() for every tuple in the - * index. - */ - Buffer hashso_curbuf; - /* remember the buffer associated with primary bucket */ Buffer hashso_bucket_buf; @@ -130,12 +178,6 @@ typedef struct HashScanOpaqueData */ Buffer hashso_split_bucket_buf; - /* Current position of the scan, as an index TID */ - ItemPointerData hashso_curpos; - - /* Current position of the scan, as a heap TID */ - ItemPointerData hashso_heappos; - /* Whether scan starts on bucket being populated due to split */ bool hashso_buc_populated; @@ -145,8 +187,14 @@ typedef struct HashScanOpaqueData */ bool hashso_buc_split; /* info about killed items if any (killedItems is NULL if never used) */ - HashScanPosItem *killedItems; /* tids and offset numbers of killed items */ + int *killedItems; /* currPos.items indexes of killed items */ int numKilled; /* number of currently stored items */ + + /* + * Identify all the matching items on a page and save them in + * HashScanPosData + */ + HashScanPosData currPos; /* current position data */ } HashScanOpaqueData; typedef HashScanOpaqueData *HashScanOpaque; @@ -182,16 +230,19 @@ typedef HashScanOpaqueData *HashScanOpaque; * * There is no particular upper limit on the size of mapp[], other than * needing to fit into the metapage. (With 8K block size, 1024 bitmaps - * limit us to 256 GB of overflow space...) + * limit us to 256 GB of overflow space...). For smaller block size we + * can not use 1024 bitmaps as it will lead to the meta page data crossing + * the block size boundary. So we use BLCKSZ to determine the maximum number + * of bitmaps. */ -#define HASH_MAX_BITMAPS 1024 +#define HASH_MAX_BITMAPS Min(BLCKSZ / 8, 1024) #define HASH_SPLITPOINT_PHASE_BITS 2 #define HASH_SPLITPOINT_PHASES_PER_GRP (1 << HASH_SPLITPOINT_PHASE_BITS) #define HASH_SPLITPOINT_PHASE_MASK (HASH_SPLITPOINT_PHASES_PER_GRP - 1) #define HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE 10 -/* defines max number of splitpoit phases a hash index can have */ +/* defines max number of splitpoint phases a hash index can have */ #define HASH_MAX_SPLITPOINT_GROUP 32 #define HASH_MAX_SPLITPOINTS \ (((HASH_MAX_SPLITPOINT_GROUP - HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) * \ @@ -215,7 +266,7 @@ typedef struct HashMetaPageData * allocated */ uint32 hashm_firstfree; /* lowest-number free ovflpage (bit#) */ uint32 hashm_nmaps; /* number of bitmap pages */ - RegProcedure hashm_procid; /* hash procedure id from pg_proc */ + RegProcedure hashm_procid; /* hash function id from pg_proc */ uint32 hashm_spares[HASH_MAX_SPLITPOINTS]; /* spare pages before each * splitpoint */ BlockNumber hashm_mapp[HASH_MAX_BITMAPS]; /* blknos of ovfl bitmaps */ @@ -232,7 +283,7 @@ typedef HashMetaPageData *HashMetaPage; sizeof(ItemIdData) - \ MAXALIGN(sizeof(HashPageOpaqueData))) -#define INDEX_MOVED_BY_SPLIT_MASK 0x2000 +#define INDEX_MOVED_BY_SPLIT_MASK INDEX_AM_RESERVED_BIT #define HASH_MIN_FILLFACTOR 10 #define HASH_DEFAULT_FILLFACTOR 75 @@ -289,12 +340,20 @@ typedef HashMetaPageData *HashMetaPage; #define HTMaxStrategyNumber 1 /* - * When a new operator class is declared, we require that the user supply - * us with an amproc procudure for hashing a key of the new type. - * Since we only have one such proc in amproc, it's number 1. + * When a new operator class is declared, we require that the user supply + * us with an amproc function for hashing a key of the new type, returning + * a 32-bit hash value. We call this the "standard" hash function. We + * also allow an optional "extended" hash function which accepts a salt and + * returns a 64-bit hash value. This is highly recommended but, for reasons + * of backward compatibility, optional. + * + * When the salt is 0, the low 32 bits of the value returned by the extended + * hash function should match the value that would have been returned by the + * standard hash function. */ -#define HASHPROC 1 -#define HASHNProcs 1 +#define HASHSTANDARD_PROC 1 +#define HASHEXTENDED_PROC 2 +#define HASHNProcs 2 /* public routines */ @@ -322,7 +381,10 @@ extern bytea *hashoptions(Datum reloptions, bool validate); extern bool hashvalidate(Oid opclassoid); extern Datum hash_any(register const unsigned char *k, register int keylen); +extern Datum hash_any_extended(register const unsigned char *k, + register int keylen, uint64 seed); extern Datum hash_uint32(uint32 k); +extern Datum hash_uint32_extended(uint32 k, uint64 seed); /* private routines */ @@ -379,7 +441,6 @@ extern void _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, /* hashsearch.c */ extern bool _hash_next(IndexScanDesc scan, ScanDirection dir); extern bool _hash_first(IndexScanDesc scan, ScanDirection dir); -extern bool _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir); /* hashsort.c */ typedef struct HSpool HSpool; /* opaque struct in hashsort.c */ diff --git a/src/include/access/hash_xlog.h b/src/include/access/hash_xlog.h index c778fdc8df..527138440b 100644 --- a/src/include/access/hash_xlog.h +++ b/src/include/access/hash_xlog.h @@ -4,7 +4,7 @@ * header file for Postgres hash AM implementation * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/hash_xlog.h @@ -149,7 +149,7 @@ typedef struct xl_hash_split_complete typedef struct xl_hash_move_page_contents { uint16 ntups; - bool is_prim_bucket_same_wrt; /* TRUE if the page to which + bool is_prim_bucket_same_wrt; /* true if the page to which * tuples are moved is same as * primary bucket page */ } xl_hash_move_page_contents; @@ -174,10 +174,10 @@ typedef struct xl_hash_squeeze_page BlockNumber prevblkno; BlockNumber nextblkno; uint16 ntups; - bool is_prim_bucket_same_wrt; /* TRUE if the page to which + bool is_prim_bucket_same_wrt; /* true if the page to which * tuples are moved is same as * primary bucket page */ - bool is_prev_bucket_same_wrt; /* TRUE if the page to which + bool is_prev_bucket_same_wrt; /* true if the page to which * tuples are moved is the page * previous to the freed overflow * page */ @@ -196,9 +196,9 @@ typedef struct xl_hash_squeeze_page */ typedef struct xl_hash_delete { - bool clear_dead_marking; /* TRUE if this operation clears + bool clear_dead_marking; /* true if this operation clears * LH_PAGE_HAS_DEAD_TUPLES flag */ - bool is_primary_bucket_page; /* TRUE if the operation is for + bool is_primary_bucket_page; /* true if the operation is for * primary bucket page */ } xl_hash_delete; diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index b2132e723e..40e153f71a 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -4,7 +4,7 @@ * POSTGRES heap access method definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/heapam.h @@ -29,6 +29,7 @@ #define HEAP_INSERT_SKIP_FSM 0x0002 #define HEAP_INSERT_FROZEN 0x0004 #define HEAP_INSERT_SPECULATIVE 0x0008 +#define HEAP_INSERT_NO_LOGICAL 0x0010 typedef struct BulkInsertStateData *BulkInsertState; @@ -130,6 +131,7 @@ extern HeapTuple heap_getnext(HeapScanDesc scan, ScanDirection direction); extern Size heap_parallelscan_estimate(Snapshot snapshot); extern void heap_parallelscan_initialize(ParallelHeapScanDesc target, Relation relation, Snapshot snapshot); +extern void heap_parallelscan_reinitialize(ParallelHeapScanDesc parallel_scan); extern HeapScanDesc heap_beginscan_parallel(Relation, ParallelHeapScanDesc); extern bool heap_fetch(Relation relation, Snapshot snapshot, @@ -155,7 +157,7 @@ extern void heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, CommandId cid, int options, BulkInsertState bistate); extern HTSU_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, - HeapUpdateFailureData *hufd); + HeapUpdateFailureData *hufd, bool changingPart); extern void heap_finish_speculative(Relation relation, HeapTuple tuple); extern void heap_abort_speculative(Relation relation, HeapTuple tuple); extern HTSU_Result heap_update(Relation relation, ItemPointer otid, @@ -167,8 +169,9 @@ extern HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple, bool follow_update, Buffer *buffer, HeapUpdateFailureData *hufd); extern void heap_inplace_update(Relation relation, HeapTuple tuple); -extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, - TransactionId cutoff_multi); +extern bool heap_freeze_tuple(HeapTupleHeader tuple, + TransactionId relfrozenxid, TransactionId relminmxid, + TransactionId cutoff_xid, TransactionId cutoff_multi); extern bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf); extern bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple); diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h index 81a6a395c4..914897f83d 100644 --- a/src/include/access/heapam_xlog.h +++ b/src/include/access/heapam_xlog.h @@ -4,7 +4,7 @@ * POSTGRES heap access XLOG definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/heapam_xlog.h @@ -32,7 +32,7 @@ #define XLOG_HEAP_INSERT 0x00 #define XLOG_HEAP_DELETE 0x10 #define XLOG_HEAP_UPDATE 0x20 -/* 0x030 is free, was XLOG_HEAP_MOVE */ +#define XLOG_HEAP_TRUNCATE 0x30 #define XLOG_HEAP_HOT_UPDATE 0x40 #define XLOG_HEAP_CONFIRM 0x50 #define XLOG_HEAP_LOCK 0x60 @@ -93,6 +93,7 @@ #define XLH_DELETE_CONTAINS_OLD_TUPLE (1<<1) #define XLH_DELETE_CONTAINS_OLD_KEY (1<<2) #define XLH_DELETE_IS_SUPER (1<<3) +#define XLH_DELETE_IS_PARTITION_MOVE (1<<4) /* convenience macro for checking whether any form of old tuple was logged */ #define XLH_DELETE_CONTAINS_OLD \ @@ -109,6 +110,27 @@ typedef struct xl_heap_delete #define SizeOfHeapDelete (offsetof(xl_heap_delete, flags) + sizeof(uint8)) +/* + * xl_heap_truncate flag values, 8 bits are available. + */ +#define XLH_TRUNCATE_CASCADE (1<<0) +#define XLH_TRUNCATE_RESTART_SEQS (1<<1) + +/* + * For truncate we list all truncated relids in an array, followed by all + * sequence relids that need to be restarted, if any. + * All rels are always within the same database, so we just list dbid once. + */ +typedef struct xl_heap_truncate +{ + Oid dbId; + uint32 nrelids; + uint8 flags; + Oid relids[FLEXIBLE_ARRAY_MEMBER]; +} xl_heap_truncate; + +#define SizeOfHeapTruncate (offsetof(xl_heap_truncate, relids)) + /* * We don't store the whole fixed part (HeapTupleHeaderData) of an inserted * or updated tuple in WAL; we can save a few bytes by reconstructing the @@ -339,13 +361,7 @@ typedef struct xl_heap_new_cid TransactionId top_xid; CommandId cmin; CommandId cmax; - - /* - * don't really need the combocid since we have the actual values right in - * this struct, but the padding makes it free and its useful for - * debugging. - */ - CommandId combocid; + CommandId combocid; /* just for debugging */ /* * Store the relfilenode/ctid pair to facilitate lookups. @@ -390,6 +406,8 @@ extern XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples); extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, + TransactionId relfrozenxid, + TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, diff --git a/src/include/access/hio.h b/src/include/access/hio.h index 4a8beb63a6..9993d5be70 100644 --- a/src/include/access/hio.h +++ b/src/include/access/hio.h @@ -4,7 +4,7 @@ * POSTGRES heap access method input/output definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/hio.h diff --git a/src/include/access/htup.h b/src/include/access/htup.h index 61b3e68639..5a4e5b05f5 100644 --- a/src/include/access/htup.h +++ b/src/include/access/htup.h @@ -4,7 +4,7 @@ * POSTGRES heap tuple definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/htup.h @@ -64,6 +64,7 @@ typedef struct HeapTupleData uint32 t_len; /* length of *t_data */ ItemPointerData t_self; /* SelfItemPointer */ Oid t_tableOid; /* table the tuple came from */ +#define FIELDNO_HEAPTUPLEDATA_DATA 3 HeapTupleHeader t_data; /* -> tuple header and data */ } HeapTupleData; diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h index 3e1676c7e6..97d240fdbb 100644 --- a/src/include/access/htup_details.h +++ b/src/include/access/htup_details.h @@ -4,7 +4,7 @@ * POSTGRES heap tuple header definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/htup_details.h @@ -83,11 +83,15 @@ * * A word about t_ctid: whenever a new tuple is stored on disk, its t_ctid * is initialized with its own TID (location). If the tuple is ever updated, - * its t_ctid is changed to point to the replacement version of the tuple. - * Thus, a tuple is the latest version of its row iff XMAX is invalid or + * its t_ctid is changed to point to the replacement version of the tuple. Or + * if the tuple is moved from one partition to another, due to an update of + * the partition key, t_ctid is set to a special value to indicate that + * (see ItemPointerSetMovedPartitions). Thus, a tuple is the latest version + * of its row iff XMAX is invalid or * t_ctid points to itself (in which case, if XMAX is valid, the tuple is * either locked or deleted). One can follow the chain of t_ctid links - * to find the newest version of the row. Beware however that VACUUM might + * to find the newest version of the row, unless it was moved to a different + * partition. Beware however that VACUUM might * erase the pointed-to (newer) tuple before erasing the pointing (older) * tuple. Hence, when following a t_ctid link, it is necessary to check * to see if the referenced slot is empty or contains an unrelated tuple. @@ -134,6 +138,11 @@ typedef struct DatumTupleFields Oid datum_typeid; /* composite type OID, or RECORDOID */ /* + * datum_typeid cannot be a domain over composite, only plain composite, + * even if the datum is meant as a value of a domain-over-composite type. + * This is in line with the general principle that CoerceToDomain does not + * change the physical representation of the base type value. + * * Note: field ordering is chosen with thought that Oid might someday * widen to 64 bits. */ @@ -152,14 +161,18 @@ struct HeapTupleHeaderData /* Fields below here must match MinimalTupleData! */ +#define FIELDNO_HEAPTUPLEHEADERDATA_INFOMASK2 2 uint16 t_infomask2; /* number of attributes + various flags */ +#define FIELDNO_HEAPTUPLEHEADERDATA_INFOMASK 3 uint16 t_infomask; /* various flag bits, see below */ +#define FIELDNO_HEAPTUPLEHEADERDATA_HOFF 4 uint8 t_hoff; /* sizeof header incl. bitmap, padding */ /* ^ - 23 bytes - ^ */ +#define FIELDNO_HEAPTUPLEHEADERDATA_BITS 5 bits8 t_bits[FLEXIBLE_ARRAY_MEMBER]; /* bitmap of NULLs */ /* MORE DATA FOLLOWS AT END OF STRUCT */ @@ -276,14 +289,6 @@ struct HeapTupleHeaderData */ #define HEAP_TUPLE_HAS_MATCH HEAP_ONLY_TUPLE /* tuple has a join match */ -/* - * Special value used in t_ctid.ip_posid, to indicate that it holds a - * speculative insertion token rather than a real TID. This must be higher - * than MaxOffsetNumber, so that it can be distinguished from a valid - * offset number in a regular item pointer. - */ -#define SpecTokenOffsetNumber 0xfffe - /* * HeapTupleHeader accessor macros * @@ -436,6 +441,13 @@ do { \ ItemPointerSet(&(tup)->t_ctid, token, SpecTokenOffsetNumber) \ ) +#define HeapTupleHeaderIndicatesMovedPartitions(tup) \ + (ItemPointerGetOffsetNumber(&(tup)->t_ctid) == MovedPartitionsOffsetNumber && \ + ItemPointerGetBlockNumberNoCheck(&(tup)->t_ctid) == MovedPartitionsBlockNumber) + +#define HeapTupleHeaderSetMovedPartitions(tup) \ + ItemPointerSet(&(tup)->t_ctid, MovedPartitionsBlockNumber, MovedPartitionsOffsetNumber) + #define HeapTupleHeaderGetDatumLength(tup) \ VARSIZE(tup) @@ -722,11 +734,11 @@ struct MinimalTupleData (*(isnull) = false), \ HeapTupleNoNulls(tup) ? \ ( \ - (tupleDesc)->attrs[(attnum)-1]->attcacheoff >= 0 ? \ + TupleDescAttr((tupleDesc), (attnum)-1)->attcacheoff >= 0 ? \ ( \ - fetchatt((tupleDesc)->attrs[(attnum)-1], \ + fetchatt(TupleDescAttr((tupleDesc), (attnum)-1), \ (char *) (tup)->t_data + (tup)->t_data->t_hoff + \ - (tupleDesc)->attrs[(attnum)-1]->attcacheoff) \ + TupleDescAttr((tupleDesc), (attnum)-1)->attcacheoff)\ ) \ : \ nocachegetattr((tup), (attnum), (tupleDesc)) \ @@ -790,7 +802,7 @@ extern void heap_fill_tuple(TupleDesc tupleDesc, Datum *values, bool *isnull, char *data, Size data_size, uint16 *infomask, bits8 *bit); -extern bool heap_attisnull(HeapTuple tup, int attnum); +extern bool heap_attisnull(HeapTuple tup, int attnum, TupleDesc tupleDesc); extern Datum nocachegetattr(HeapTuple tup, int attnum, TupleDesc att); extern Datum heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, @@ -820,5 +832,10 @@ extern void heap_free_minimal_tuple(MinimalTuple mtup); extern MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup); extern HeapTuple heap_tuple_from_minimal_tuple(MinimalTuple mtup); extern MinimalTuple minimal_tuple_from_heap_tuple(HeapTuple htup); +extern size_t varsize_any(void *p); +extern HeapTuple heap_expand_tuple(HeapTuple sourceTuple, TupleDesc tupleDesc); +extern MinimalTuple minimal_expand_tuple(HeapTuple sourceTuple, TupleDesc tupleDesc); +struct TupleTableSlot; +extern void slot_deform_tuple(struct TupleTableSlot *slot, int natts); #endif /* HTUP_DETAILS_H */ diff --git a/src/include/access/itup.h b/src/include/access/itup.h index a94e7948b4..bd3a702380 100644 --- a/src/include/access/itup.h +++ b/src/include/access/itup.h @@ -4,7 +4,7 @@ * POSTGRES index tuple definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/itup.h @@ -41,7 +41,7 @@ typedef struct IndexTupleData * * 15th (high) bit: has nulls * 14th bit: has var-width attributes - * 13th bit: unused + * 13th bit: AM-defined meaning * 12-0 bit: size of tuple * --------------- */ @@ -63,12 +63,12 @@ typedef IndexAttributeBitMapData * IndexAttributeBitMap; * t_info manipulation macros */ #define INDEX_SIZE_MASK 0x1FFF -/* bit 0x2000 is reserved for index-AM specific usage */ +#define INDEX_AM_RESERVED_BIT 0x2000 /* reserved for index-AM specific + * usage */ #define INDEX_VAR_MASK 0x4000 #define INDEX_NULL_MASK 0x8000 -#define IndexTupleSize(itup) ((Size) (((IndexTuple) (itup))->t_info & INDEX_SIZE_MASK)) -#define IndexTupleDSize(itup) ((Size) ((itup).t_info & INDEX_SIZE_MASK)) +#define IndexTupleSize(itup) ((Size) ((itup)->t_info & INDEX_SIZE_MASK)) #define IndexTupleHasNulls(itup) ((((IndexTuple) (itup))->t_info & INDEX_NULL_MASK)) #define IndexTupleHasVarwidths(itup) ((((IndexTuple) (itup))->t_info & INDEX_VAR_MASK)) @@ -103,11 +103,11 @@ typedef IndexAttributeBitMapData * IndexAttributeBitMap; *(isnull) = false, \ !IndexTupleHasNulls(tup) ? \ ( \ - (tupleDesc)->attrs[(attnum)-1]->attcacheoff >= 0 ? \ + TupleDescAttr((tupleDesc), (attnum)-1)->attcacheoff >= 0 ? \ ( \ - fetchatt((tupleDesc)->attrs[(attnum)-1], \ + fetchatt(TupleDescAttr((tupleDesc), (attnum)-1), \ (char *) (tup) + IndexInfoFindDataOffset((tup)->t_info) \ - + (tupleDesc)->attrs[(attnum)-1]->attcacheoff) \ + + TupleDescAttr((tupleDesc), (attnum)-1)->attcacheoff) \ ) \ : \ nocache_index_getattr((tup), (attnum), (tupleDesc)) \ @@ -132,8 +132,16 @@ typedef IndexAttributeBitMapData * IndexAttributeBitMap; * bitmap, so we can safely assume it's at least 1 byte bigger than a bare * IndexTupleData struct. We arrive at the divisor because each tuple * must be maxaligned, and it must have an associated item pointer. + * + * To be index-type-independent, this does not account for any special space + * on the page, and is thus conservative. + * + * Note: in btree non-leaf pages, the first tuple has no key (it's implicitly + * minus infinity), thus breaking the "at least 1 byte bigger" assumption. + * On such a page, N tuples could take one MAXALIGN quantum less space than + * estimated here, seemingly allowing one more tuple than estimated here. + * But such a page always has at least MAXALIGN special space, so we're safe. */ -#define MinIndexTupleSize MAXALIGN(sizeof(IndexTupleData) + 1) #define MaxIndexTuplesPerPage \ ((int) ((BLCKSZ - SizeOfPageHeaderData) / \ (MAXALIGN(sizeof(IndexTupleData) + 1) + sizeof(ItemIdData)))) @@ -147,5 +155,7 @@ extern Datum nocache_index_getattr(IndexTuple tup, int attnum, extern void index_deform_tuple(IndexTuple tup, TupleDesc tupleDescriptor, Datum *values, bool *isnull); extern IndexTuple CopyIndexTuple(IndexTuple source); +extern IndexTuple index_truncate_tuple(TupleDesc sourceDescriptor, + IndexTuple source, int leavenatts); #endif /* ITUP_H */ diff --git a/src/include/access/multixact.h b/src/include/access/multixact.h index d5e18c6733..18fe380c5f 100644 --- a/src/include/access/multixact.h +++ b/src/include/access/multixact.h @@ -3,7 +3,7 @@ * * PostgreSQL multi-transaction-log manager * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/multixact.h diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h index e6abbec280..ea495f1724 100644 --- a/src/include/access/nbtree.h +++ b/src/include/access/nbtree.h @@ -4,7 +4,7 @@ * header file for postgres btree access method implementation. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/nbtree.h @@ -21,6 +21,7 @@ #include "catalog/pg_index.h" #include "lib/stringinfo.h" #include "storage/bufmgr.h" +#include "storage/shm_toc.h" /* There's room for a 16-bit vacuum cycle ID in BTPageOpaqueData */ typedef uint16 BTCycleId; @@ -101,6 +102,11 @@ typedef struct BTMetaPageData uint32 btm_level; /* tree level of the root page */ BlockNumber btm_fastroot; /* current "fast" root location */ uint32 btm_fastlevel; /* tree level of the "fast" root page */ + /* following fields are available since page version 3 */ + TransactionId btm_oldest_btpo_xact; /* oldest btpo_xact among all deleted + * pages */ + float8 btm_last_cleanup_num_heap_tuples; /* number of heap tuples + * during last cleanup */ } BTMetaPageData; #define BTPageGetMeta(p) \ @@ -108,7 +114,8 @@ typedef struct BTMetaPageData #define BTREE_METAPAGE 0 /* first page is meta */ #define BTREE_MAGIC 0x053162 /* magic number of btree pages */ -#define BTREE_VERSION 2 /* current version number */ +#define BTREE_VERSION 3 /* current version number */ +#define BTREE_MIN_VERSION 2 /* minimal supported version number */ /* * Maximum size of a btree index entry, including its tuple header. @@ -132,31 +139,6 @@ typedef struct BTMetaPageData #define BTREE_DEFAULT_FILLFACTOR 90 #define BTREE_NONLEAF_FILLFACTOR 70 -/* - * Test whether two btree entries are "the same". - * - * Old comments: - * In addition, we must guarantee that all tuples in the index are unique, - * in order to satisfy some assumptions in Lehman and Yao. The way that we - * do this is by generating a new OID for every insertion that we do in the - * tree. This adds eight bytes to the size of btree index tuples. Note - * that we do not use the OID as part of a composite key; the OID only - * serves as a unique identifier for a given index tuple (logical position - * within a page). - * - * New comments: - * actually, we must guarantee that all tuples in A LEVEL - * are unique, not in ALL INDEX. So, we can use the t_tid - * as unique identifier for a given index tuple (logical position - * within a level). - vadim 04/09/97 - */ -#define BTTidSame(i1, i2) \ - ((ItemPointerGetBlockNumber(&(i1)) == ItemPointerGetBlockNumber(&(i2))) && \ - (ItemPointerGetOffsetNumber(&(i1)) == ItemPointerGetOffsetNumber(&(i2)))) -#define BTEntrySame(i1, i2) \ - BTTidSame((i1)->t_tid, (i2)->t_tid) - - /* * In general, the btree code tries to localize its knowledge about * page layout to a couple of routines. However, we need a special @@ -173,14 +155,14 @@ typedef struct BTMetaPageData */ #define P_LEFTMOST(opaque) ((opaque)->btpo_prev == P_NONE) #define P_RIGHTMOST(opaque) ((opaque)->btpo_next == P_NONE) -#define P_ISLEAF(opaque) ((opaque)->btpo_flags & BTP_LEAF) -#define P_ISROOT(opaque) ((opaque)->btpo_flags & BTP_ROOT) -#define P_ISDELETED(opaque) ((opaque)->btpo_flags & BTP_DELETED) -#define P_ISMETA(opaque) ((opaque)->btpo_flags & BTP_META) -#define P_ISHALFDEAD(opaque) ((opaque)->btpo_flags & BTP_HALF_DEAD) -#define P_IGNORE(opaque) ((opaque)->btpo_flags & (BTP_DELETED|BTP_HALF_DEAD)) -#define P_HAS_GARBAGE(opaque) ((opaque)->btpo_flags & BTP_HAS_GARBAGE) -#define P_INCOMPLETE_SPLIT(opaque) ((opaque)->btpo_flags & BTP_INCOMPLETE_SPLIT) +#define P_ISLEAF(opaque) (((opaque)->btpo_flags & BTP_LEAF) != 0) +#define P_ISROOT(opaque) (((opaque)->btpo_flags & BTP_ROOT) != 0) +#define P_ISDELETED(opaque) (((opaque)->btpo_flags & BTP_DELETED) != 0) +#define P_ISMETA(opaque) (((opaque)->btpo_flags & BTP_META) != 0) +#define P_ISHALFDEAD(opaque) (((opaque)->btpo_flags & BTP_HALF_DEAD) != 0) +#define P_IGNORE(opaque) (((opaque)->btpo_flags & (BTP_DELETED|BTP_HALF_DEAD)) != 0) +#define P_HAS_GARBAGE(opaque) (((opaque)->btpo_flags & BTP_HAS_GARBAGE) != 0) +#define P_INCOMPLETE_SPLIT(opaque) (((opaque)->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0) /* * Lehman and Yao's algorithm requires a ``high key'' on every non-rightmost @@ -204,6 +186,80 @@ typedef struct BTMetaPageData #define P_FIRSTKEY ((OffsetNumber) 2) #define P_FIRSTDATAKEY(opaque) (P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY) +/* + * INCLUDE B-Tree indexes have non-key attributes. These are extra + * attributes that may be returned by index-only scans, but do not influence + * the order of items in the index (formally, non-key attributes are not + * considered to be part of the key space). Non-key attributes are only + * present in leaf index tuples whose item pointers actually point to heap + * tuples. All other types of index tuples (collectively, "pivot" tuples) + * only have key attributes, since pivot tuples only ever need to represent + * how the key space is separated. In general, any B-Tree index that has + * more than one level (i.e. any index that does not just consist of a + * metapage and a single leaf root page) must have some number of pivot + * tuples, since pivot tuples are used for traversing the tree. + * + * We store the number of attributes present inside pivot tuples by abusing + * their item pointer offset field, since pivot tuples never need to store a + * real offset (downlinks only need to store a block number). The offset + * field only stores the number of attributes when the INDEX_ALT_TID_MASK + * bit is set (we never assume that pivot tuples must explicitly store the + * number of attributes, and currently do not bother storing the number of + * attributes unless indnkeyatts actually differs from indnatts). + * INDEX_ALT_TID_MASK is only used for pivot tuples at present, though it's + * possible that it will be used within non-pivot tuples in the future. Do + * not assume that a tuple with INDEX_ALT_TID_MASK set must be a pivot + * tuple. + * + * The 12 least significant offset bits are used to represent the number of + * attributes in INDEX_ALT_TID_MASK tuples, leaving 4 bits that are reserved + * for future use (BT_RESERVED_OFFSET_MASK bits). BT_N_KEYS_OFFSET_MASK should + * be large enough to store any number <= INDEX_MAX_KEYS. + */ +#define INDEX_ALT_TID_MASK INDEX_AM_RESERVED_BIT +#define BT_RESERVED_OFFSET_MASK 0xF000 +#define BT_N_KEYS_OFFSET_MASK 0x0FFF + +/* Get/set downlink block number */ +#define BTreeInnerTupleGetDownLink(itup) \ + ItemPointerGetBlockNumberNoCheck(&((itup)->t_tid)) +#define BTreeInnerTupleSetDownLink(itup, blkno) \ + ItemPointerSetBlockNumber(&((itup)->t_tid), (blkno)) + +/* + * Get/set leaf page highkey's link. During the second phase of deletion, the + * target leaf page's high key may point to an ancestor page (at all other + * times, the leaf level high key's link is not used). See the nbtree README + * for full details. + */ +#define BTreeTupleGetTopParent(itup) \ + ItemPointerGetBlockNumberNoCheck(&((itup)->t_tid)) +#define BTreeTupleSetTopParent(itup, blkno) \ + do { \ + ItemPointerSetBlockNumber(&((itup)->t_tid), (blkno)); \ + BTreeTupleSetNAtts((itup), 0); \ + } while(0) + +/* + * Get/set number of attributes within B-tree index tuple. Asserts should be + * removed when BT_RESERVED_OFFSET_MASK bits will be used. + */ +#define BTreeTupleGetNAtts(itup, rel) \ + ( \ + (itup)->t_info & INDEX_ALT_TID_MASK ? \ + ( \ + AssertMacro((ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) & BT_RESERVED_OFFSET_MASK) == 0), \ + ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) & BT_N_KEYS_OFFSET_MASK \ + ) \ + : \ + IndexRelationGetNumberOfAttributes(rel) \ + ) +#define BTreeTupleSetNAtts(itup, n) \ + do { \ + (itup)->t_info |= INDEX_ALT_TID_MASK; \ + Assert(((n) & BT_RESERVED_OFFSET_MASK) == 0); \ + ItemPointerSetOffsetNumber(&(itup)->t_tid, (n) & BT_N_KEYS_OFFSET_MASK); \ + } while(0) /* * Operator strategy numbers for B-tree have been moved to access/stratnum.h, @@ -218,17 +274,22 @@ typedef struct BTMetaPageData * When a new operator class is declared, we require that the user * supply us with an amproc procedure (BTORDER_PROC) for determining * whether, for two keys a and b, a < b, a = b, or a > b. This routine - * must return < 0, 0, > 0, respectively, in these three cases. (It must - * not return INT_MIN, since we may negate the result before using it.) + * must return < 0, 0, > 0, respectively, in these three cases. * * To facilitate accelerated sorting, an operator class may choose to * offer a second procedure (BTSORTSUPPORT_PROC). For full details, see * src/include/utils/sortsupport.h. + * + * To support window frames defined by "RANGE offset PRECEDING/FOLLOWING", + * an operator class may choose to offer a third amproc procedure + * (BTINRANGE_PROC), independently of whether it offers sortsupport. + * For full details, see doc/src/sgml/btree.sgml. */ #define BTORDER_PROC 1 #define BTSORTSUPPORT_PROC 2 -#define BTNProcs 2 +#define BTINRANGE_PROC 3 +#define BTNProcs 3 /* * We need to be able to tell the difference between read and write @@ -252,7 +313,7 @@ typedef struct BTStackData { BlockNumber bts_blkno; OffsetNumber bts_offset; - IndexTupleData bts_btentry; + BlockNumber bts_btentry; struct BTStackData *bts_parent; } BTStackData; @@ -430,8 +491,6 @@ typedef BTScanOpaqueData *BTScanOpaque; /* * external entry points for btree, in nbtree.c */ -extern IndexBuildResult *btbuild(Relation heap, Relation index, - struct IndexInfo *indexInfo); extern void btbuildempty(Relation index); extern bool btinsert(Relation rel, Datum *values, bool *isnull, ItemPointer ht_ctid, Relation heapRel, @@ -476,6 +535,9 @@ extern void _bt_finish_split(Relation rel, Buffer bbuf, BTStack stack); * prototypes for functions in nbtpage.c */ extern void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level); +extern void _bt_update_meta_cleanup_info(Relation rel, + TransactionId oldestBtpoXact, float8 numHeapTuples); +extern void _bt_upgrademetapage(Page page); extern Buffer _bt_getroot(Relation rel, int access); extern Buffer _bt_gettrueroot(Relation rel); extern int _bt_getrootheight(Relation rel); @@ -538,6 +600,8 @@ extern bytea *btoptions(Datum reloptions, bool validate); extern bool btproperty(Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull); +extern IndexTuple _bt_nonkey_truncate(Relation rel, IndexTuple itup); +extern bool _bt_check_natts(Relation rel, Page page, OffsetNumber offnum); /* * prototypes for functions in nbtvalidate.c @@ -547,13 +611,8 @@ extern bool btvalidate(Oid opclassoid); /* * prototypes for functions in nbtsort.c */ -typedef struct BTSpool BTSpool; /* opaque type known only within nbtsort.c */ - -extern BTSpool *_bt_spoolinit(Relation heap, Relation index, - bool isunique, bool isdead); -extern void _bt_spooldestroy(BTSpool *btspool); -extern void _bt_spool(BTSpool *btspool, ItemPointer self, - Datum *values, bool *isnull); -extern void _bt_leafbuild(BTSpool *btspool, BTSpool *spool2); +extern IndexBuildResult *btbuild(Relation heap, Relation index, + struct IndexInfo *indexInfo); +extern void _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc); #endif /* NBTREE_H */ diff --git a/src/include/access/nbtxlog.h b/src/include/access/nbtxlog.h index e3cddb2e64..819373031c 100644 --- a/src/include/access/nbtxlog.h +++ b/src/include/access/nbtxlog.h @@ -3,7 +3,7 @@ * nbtxlog.h * header file for postgres btree xlog routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/nbtxlog.h @@ -28,7 +28,8 @@ #define XLOG_BTREE_INSERT_META 0x20 /* same, plus update metapage */ #define XLOG_BTREE_SPLIT_L 0x30 /* add index tuple with split */ #define XLOG_BTREE_SPLIT_R 0x40 /* as above, new item on right */ -/* 0x50 and 0x60 are unused */ +#define XLOG_BTREE_SPLIT_L_HIGHKEY 0x50 /* as above, include truncated highkey */ +#define XLOG_BTREE_SPLIT_R_HIGHKEY 0x60 /* as above, include truncated highkey */ #define XLOG_BTREE_DELETE 0x70 /* delete leaf index tuples for a page */ #define XLOG_BTREE_UNLINK_PAGE 0x80 /* delete a half-dead page */ #define XLOG_BTREE_UNLINK_PAGE_META 0x90 /* same, and update metapage */ @@ -38,6 +39,8 @@ * vacuum */ #define XLOG_BTREE_REUSE_PAGE 0xD0 /* old page is about to be reused from * FSM */ +#define XLOG_BTREE_META_CLEANUP 0xE0 /* update cleanup-related data in the + * metapage */ /* * All that we need to regenerate the meta-data page @@ -48,6 +51,8 @@ typedef struct xl_btree_metadata uint32 level; BlockNumber fastroot; uint32 fastlevel; + TransactionId oldest_btpo_xact; + float8 last_cleanup_num_heap_tuples; } xl_btree_metadata; /* @@ -78,10 +83,11 @@ typedef struct xl_btree_insert * Note: the four XLOG_BTREE_SPLIT xl_info codes all use this data record. * The _L and _R variants indicate whether the inserted tuple went into the * left or right split page (and thus, whether newitemoff and the new item - * are stored or not). The _ROOT variants indicate that we are splitting - * the root page, and thus that a newroot record rather than an insert or - * split record should follow. Note that a split record never carries a - * metapage update --- we'll do that in the parent-level update. + * are stored or not). The _HIGHKEY variants indicate that we've logged + * explicitly left page high key value, otherwise redo should use right page + * leftmost key as a left page high key. _HIGHKEY is specified for internal + * pages where right page leftmost key is suppressed, and for leaf pages + * of covering indexes where high key have non-key attributes truncated. * * Backup Blk 0: original page / new left page * diff --git a/src/include/access/parallel.h b/src/include/access/parallel.h index e3e0cecf1e..025691fd82 100644 --- a/src/include/access/parallel.h +++ b/src/include/access/parallel.h @@ -3,7 +3,7 @@ * parallel.h * Infrastructure for launching parallel workers * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/parallel.h @@ -43,18 +43,29 @@ typedef struct ParallelContext void *private_memory; shm_toc *toc; ParallelWorkerInfo *worker; + int nknown_attached_workers; + bool *known_attached_workers; } ParallelContext; +typedef struct ParallelWorkerContext +{ + dsm_segment *seg; + shm_toc *toc; +} ParallelWorkerContext; + extern volatile bool ParallelMessagePending; -extern int ParallelWorkerNumber; -extern bool InitializingParallelWorker; +extern PGDLLIMPORT int ParallelWorkerNumber; +extern PGDLLIMPORT bool InitializingParallelWorker; #define IsParallelWorker() (ParallelWorkerNumber >= 0) -extern ParallelContext *CreateParallelContext(const char *library_name, const char *function_name, int nworkers); +extern ParallelContext *CreateParallelContext(const char *library_name, + const char *function_name, int nworkers, + bool serializable_okay); extern void InitializeParallelDSM(ParallelContext *pcxt); extern void ReinitializeParallelDSM(ParallelContext *pcxt); extern void LaunchParallelWorkers(ParallelContext *pcxt); +extern void WaitForParallelWorkersToAttach(ParallelContext *pcxt); extern void WaitForParallelWorkersToFinish(ParallelContext *pcxt); extern void DestroyParallelContext(ParallelContext *pcxt); extern bool ParallelContextActive(void); diff --git a/src/include/access/printsimple.h b/src/include/access/printsimple.h index edf28cece9..4184f16560 100644 --- a/src/include/access/printsimple.h +++ b/src/include/access/printsimple.h @@ -3,7 +3,7 @@ * printsimple.h * print simple tuples without catalog access * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/printsimple.h diff --git a/src/include/access/printtup.h b/src/include/access/printtup.h index 641715e416..94f8d705b5 100644 --- a/src/include/access/printtup.h +++ b/src/include/access/printtup.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/printtup.h @@ -20,8 +20,8 @@ extern DestReceiver *printtup_create_DR(CommandDest dest); extern void SetRemoteDestReceiverParams(DestReceiver *self, Portal portal); -extern void SendRowDescriptionMessage(TupleDesc typeinfo, List *targetlist, - int16 *formats); +extern void SendRowDescriptionMessage(StringInfo buf, + TupleDesc typeinfo, List *targetlist, int16 *formats); extern void debugStartup(DestReceiver *self, int operation, TupleDesc typeinfo); diff --git a/src/include/access/reloptions.h b/src/include/access/reloptions.h index 5cdaa3bff1..4022c14a83 100644 --- a/src/include/access/reloptions.h +++ b/src/include/access/reloptions.h @@ -9,7 +9,7 @@ * into a lot of low-level code. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/reloptions.h @@ -51,6 +51,7 @@ typedef enum relopt_kind RELOPT_KIND_PARTITIONED = (1 << 11), /* if you add a new kind, make sure you update "last_default" too */ RELOPT_KIND_LAST_DEFAULT = RELOPT_KIND_PARTITIONED, + RELOPT_KIND_INDEX = RELOPT_KIND_BTREE | RELOPT_KIND_HASH | RELOPT_KIND_GIN | RELOPT_KIND_SPGIST, /* some compilers treat enums as signed ints, so we can't use 1 << 31 */ RELOPT_KIND_MAX = (1 << 30) } relopt_kind; @@ -108,7 +109,7 @@ typedef struct relopt_real } relopt_real; /* validation routines for strings */ -typedef void (*validate_string_relopt) (char *value); +typedef void (*validate_string_relopt) (const char *value); typedef struct relopt_string { @@ -166,7 +167,7 @@ typedef struct * code block. */ #define HAVE_RELOPTION(optname, option) \ - (pg_strncasecmp(option.gen->name, optname, option.gen->namelen + 1) == 0) + (strncmp(option.gen->name, optname, option.gen->namelen + 1) == 0) #define HANDLE_INT_RELOPTION(optname, var, option, wasset) \ do { \ @@ -246,17 +247,17 @@ typedef struct extern relopt_kind add_reloption_kind(void); -extern void add_bool_reloption(bits32 kinds, char *name, char *desc, +extern void add_bool_reloption(bits32 kinds, const char *name, const char *desc, bool default_val); -extern void add_int_reloption(bits32 kinds, char *name, char *desc, +extern void add_int_reloption(bits32 kinds, const char *name, const char *desc, int default_val, int min_val, int max_val); -extern void add_real_reloption(bits32 kinds, char *name, char *desc, +extern void add_real_reloption(bits32 kinds, const char *name, const char *desc, double default_val, double min_val, double max_val); -extern void add_string_reloption(bits32 kinds, char *name, char *desc, - char *default_val, validate_string_relopt validator); +extern void add_string_reloption(bits32 kinds, const char *name, const char *desc, + const char *default_val, validate_string_relopt validator); extern Datum transformRelOptions(Datum oldOptions, List *defList, - char *namspace, char *validnsps[], + const char *namspace, char *validnsps[], bool ignoreOids, bool isReset); extern List *untransformRelOptions(Datum options); extern bytea *extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, @@ -276,6 +277,7 @@ extern bytea *heap_reloptions(char relkind, Datum reloptions, bool validate); extern bytea *view_reloptions(Datum reloptions, bool validate); extern bytea *index_reloptions(amoptions_function amoptions, Datum reloptions, bool validate); +extern bytea *index_generic_reloptions(Datum reloptions, bool validate); extern bytea *attribute_reloptions(Datum reloptions, bool validate); extern bytea *tablespace_reloptions(Datum reloptions, bool validate); extern LOCKMODE AlterTableGetRelOptionsLockLevel(List *defList); diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h index 147f862a2b..e5289b8aa7 100644 --- a/src/include/access/relscan.h +++ b/src/include/access/relscan.h @@ -4,7 +4,7 @@ * POSTGRES relation scan descriptor definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/relscan.h @@ -39,8 +39,9 @@ typedef struct ParallelHeapScanDescData BlockNumber phs_startblock; /* starting block number */ pg_atomic_uint64 phs_nallocated; /* number of blocks allocated to * workers so far. */ + bool phs_snapshot_any; /* SnapshotAny, not phs_snapshot_data? */ char phs_snapshot_data[FLEXIBLE_ARRAY_MEMBER]; -} ParallelHeapScanDescData; +} ParallelHeapScanDescData; typedef struct HeapScanDescData { diff --git a/src/include/access/rewriteheap.h b/src/include/access/rewriteheap.h index 91ff36707a..cfdf33b4bd 100644 --- a/src/include/access/rewriteheap.h +++ b/src/include/access/rewriteheap.h @@ -3,7 +3,7 @@ * rewriteheap.h * Declarations for heap rewrite support functions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994-5, Regents of the University of California * * src/include/access/rewriteheap.h @@ -45,9 +45,9 @@ typedef struct LogicalRewriteMappingData * components: * 1) database oid or InvalidOid for shared relations * 2) the oid of the relation - * 3) xid we are mapping for - * 4) upper 32bit of the LSN at which a rewrite started - * 5) lower 32bit of the LSN at which a rewrite started + * 3) upper 32bit of the LSN at which a rewrite started + * 4) lower 32bit of the LSN at which a rewrite started + * 5) xid we are mapping for * 6) xid of the xact performing the mapping * --- */ diff --git a/src/include/access/rmgrlist.h b/src/include/access/rmgrlist.h index 2f43c199d3..0bbe9879ca 100644 --- a/src/include/access/rmgrlist.h +++ b/src/include/access/rmgrlist.h @@ -6,7 +6,7 @@ * by the PG_RMGR macro, which is not defined in this file; it can be * defined by the caller for special purposes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/rmgrlist.h diff --git a/src/include/access/sdir.h b/src/include/access/sdir.h index 65eab48551..490bac11d3 100644 --- a/src/include/access/sdir.h +++ b/src/include/access/sdir.h @@ -4,7 +4,7 @@ * POSTGRES scan direction definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/sdir.h diff --git a/src/include/access/session.h b/src/include/access/session.h new file mode 100644 index 0000000000..37971c1c66 --- /dev/null +++ b/src/include/access/session.h @@ -0,0 +1,44 @@ +/*------------------------------------------------------------------------- + * + * session.h + * Encapsulation of user session. + * + * Copyright (c) 2017-2018, PostgreSQL Global Development Group + * + * src/include/access/session.h + * + *------------------------------------------------------------------------- + */ +#ifndef SESSION_H +#define SESSION_H + +#include "lib/dshash.h" + +/* Avoid including typcache.h */ +struct SharedRecordTypmodRegistry; + +/* + * A struct encapsulating some elements of a user's session. For now this + * manages state that applies to parallel query, but it principle it could + * include other things that are currently global variables. + */ +typedef struct Session +{ + dsm_segment *segment; /* The session-scoped DSM segment. */ + dsa_area *area; /* The session-scoped DSA area. */ + + /* State managed by typcache.c. */ + struct SharedRecordTypmodRegistry *shared_typmod_registry; + dshash_table *shared_record_table; + dshash_table *shared_typmod_table; +} Session; + +extern void InitializeSession(void); +extern dsm_handle GetSessionDsmHandle(void); +extern void AttachSession(dsm_handle handle); +extern void DetachSession(void); + +/* The current session, or NULL for none. */ +extern Session *CurrentSession; + +#endif /* SESSION_H */ diff --git a/src/include/access/skey.h b/src/include/access/skey.h index 2f4814f140..ab3bb2c8eb 100644 --- a/src/include/access/skey.h +++ b/src/include/access/skey.h @@ -4,7 +4,7 @@ * POSTGRES scan key definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/skey.h diff --git a/src/include/access/slru.h b/src/include/access/slru.h index d829a6fab4..0e89e48c97 100644 --- a/src/include/access/slru.h +++ b/src/include/access/slru.h @@ -3,7 +3,7 @@ * slru.h * Simple LRU buffering for transaction status logfiles * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/slru.h @@ -37,7 +37,7 @@ /* * Page status codes. Note that these do not include the "dirty" bit. - * page_dirty can be TRUE only in the VALID or WRITE_IN_PROGRESS states; + * page_dirty can be true only in the VALID or WRITE_IN_PROGRESS states; * in the latter case it implies that the page has been re-dirtied since * the write started. */ diff --git a/src/include/access/spgist.h b/src/include/access/spgist.h index d1bc396e6d..9c19e9e638 100644 --- a/src/include/access/spgist.h +++ b/src/include/access/spgist.h @@ -4,7 +4,7 @@ * Public header file for SP-GiST access method. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/spgist.h @@ -30,7 +30,9 @@ #define SPGIST_PICKSPLIT_PROC 3 #define SPGIST_INNER_CONSISTENT_PROC 4 #define SPGIST_LEAF_CONSISTENT_PROC 5 -#define SPGISTNProc 5 +#define SPGIST_COMPRESS_PROC 6 +#define SPGISTNRequiredProc 5 +#define SPGISTNProc 6 /* * Argument structs for spg_config method @@ -44,6 +46,7 @@ typedef struct spgConfigOut { Oid prefixType; /* Data type of inner-tuple prefixes */ Oid labelType; /* Data type of inner-tuple node labels */ + Oid leafType; /* Data type of leaf-tuple values */ bool canReturnData; /* Opclass can reconstruct original data */ bool longValuesOK; /* Opclass can cope with values > 1 page */ } spgConfigOut; @@ -133,7 +136,10 @@ typedef struct spgPickSplitOut typedef struct spgInnerConsistentIn { ScanKey scankeys; /* array of operators and comparison values */ - int nkeys; /* length of array */ + ScanKey orderbys; /* array of ordering operators and comparison + * values */ + int nkeys; /* length of scankeys array */ + int norderbys; /* length of orderbys array */ Datum reconstructedValue; /* value reconstructed at parent */ void *traversalValue; /* opclass-specific traverse value */ @@ -156,6 +162,7 @@ typedef struct spgInnerConsistentOut int *levelAdds; /* increment level by this much for each */ Datum *reconstructedValues; /* associated reconstructed values */ void **traversalValues; /* opclass-specific traverse values */ + double **distances; /* associated distances */ } spgInnerConsistentOut; /* @@ -164,7 +171,10 @@ typedef struct spgInnerConsistentOut typedef struct spgLeafConsistentIn { ScanKey scankeys; /* array of operators and comparison values */ - int nkeys; /* length of array */ + ScanKey orderbys; /* array of ordering operators and comparison + * values */ + int nkeys; /* length of scankeys array */ + int norderbys; /* length of orderbys array */ Datum reconstructedValue; /* value reconstructed at parent */ void *traversalValue; /* opclass-specific traverse value */ @@ -178,6 +188,8 @@ typedef struct spgLeafConsistentOut { Datum leafValue; /* reconstructed original data, if any */ bool recheck; /* set true if operator must be rechecked */ + bool recheckDistances; /* set true if distances must be rechecked */ + double *distances; /* associated distances */ } spgLeafConsistentOut; diff --git a/src/include/access/spgist_private.h b/src/include/access/spgist_private.h index 1c4b321b6c..d23862ea71 100644 --- a/src/include/access/spgist_private.h +++ b/src/include/access/spgist_private.h @@ -4,7 +4,7 @@ * Private declarations for SP-GiST access method. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/spgist_private.h @@ -18,6 +18,7 @@ #include "access/spgist.h" #include "nodes/tidbitmap.h" #include "storage/buf.h" +#include "utils/geo_decls.h" #include "utils/relcache.h" @@ -119,7 +120,8 @@ typedef struct SpGistState { spgConfigOut config; /* filled in by opclass config method */ - SpGistTypeDesc attType; /* type of input data and leaf values */ + SpGistTypeDesc attType; /* type of values to be indexed/restored */ + SpGistTypeDesc attLeafType; /* type of leaf-tuple values */ SpGistTypeDesc attPrefixType; /* type of inner-tuple prefix values */ SpGistTypeDesc attLabelType; /* type of node label values */ @@ -129,13 +131,35 @@ typedef struct SpGistState bool isBuild; /* true if doing index build */ } SpGistState; +typedef struct SpGistSearchItem +{ + pairingheap_node phNode; /* pairing heap node */ + Datum value; /* value reconstructed from parent or + * leafValue if heaptuple */ + void *traversalValue; /* opclass-specific traverse value */ + int level; /* level of items on this page */ + ItemPointerData heapPtr; /* heap info, if heap tuple */ + bool isNull; /* SearchItem is NULL item */ + bool isLeaf; /* SearchItem is heap item */ + bool recheck; /* qual recheck is needed */ + bool recheckDistances; /* distance recheck is needed */ + + /* array with numberOfOrderBys entries */ + double distances[FLEXIBLE_ARRAY_MEMBER]; +} SpGistSearchItem; + +#define SizeOfSpGistSearchItem(n_distances) \ + (offsetof(SpGistSearchItem, distances) + sizeof(double) * (n_distances)) + /* * Private state of an index scan */ typedef struct SpGistScanOpaqueData { SpGistState state; /* see above */ + pairingheap *scanQueue; /* queue of to be visited items */ MemoryContext tempCxt; /* short-lived memory context */ + MemoryContext traversalCxt; /* single scan lifetime memory context */ /* Control flags showing whether to search nulls and/or non-nulls */ bool searchNulls; /* scan matches (all) null entries */ @@ -144,9 +168,18 @@ typedef struct SpGistScanOpaqueData /* Index quals to be passed to opclass (null-related quals removed) */ int numberOfKeys; /* number of index qualifier conditions */ ScanKey keyData; /* array of index qualifier descriptors */ + int numberOfOrderBys; /* number of ordering operators */ + ScanKey orderByData; /* array of ordering op descriptors */ + Oid *orderByTypes; /* array of ordering op return types */ + Oid indexCollation; /* collation of index column */ - /* Stack of yet-to-be-visited pages */ - List *scanStack; /* List of ScanStackEntrys */ + /* Opclass defined functions: */ + FmgrInfo innerConsistentFn; + FmgrInfo leafConsistentFn; + + /* Pre-allocated workspace arrays: */ + double *zeroDistances; + double *infDistances; /* These fields are only used in amgetbitmap scans: */ TIDBitmap *tbm; /* bitmap being filled */ @@ -159,7 +192,10 @@ typedef struct SpGistScanOpaqueData int iPtr; /* index for scanning through same */ ItemPointerData heapPtrs[MaxIndexTuplesPerPage]; /* TIDs from cur page */ bool recheck[MaxIndexTuplesPerPage]; /* their recheck flags */ + bool recheckDistances[MaxIndexTuplesPerPage]; /* distance recheck + * flags */ HeapTuple reconTups[MaxIndexTuplesPerPage]; /* reconstructed tuples */ + double *distances[MaxIndexTuplesPerPage]; /* distances (for recheck) */ /* * Note: using MaxIndexTuplesPerPage above is a bit hokey since @@ -178,7 +214,8 @@ typedef struct SpGistCache { spgConfigOut config; /* filled in by opclass config method */ - SpGistTypeDesc attType; /* type of input data and leaf values */ + SpGistTypeDesc attType; /* type of values to be indexed/restored */ + SpGistTypeDesc attLeafType; /* type of leaf-tuple values */ SpGistTypeDesc attPrefixType; /* type of inner-tuple prefix values */ SpGistTypeDesc attLabelType; /* type of node label values */ @@ -300,7 +337,7 @@ typedef SpGistLeafTupleData *SpGistLeafTuple; #define SGLTHDRSZ MAXALIGN(sizeof(SpGistLeafTupleData)) #define SGLTDATAPTR(x) (((char *) (x)) + SGLTHDRSZ) -#define SGLTDATUM(x, s) ((s)->attType.attbyval ? \ +#define SGLTDATUM(x, s) ((s)->attLeafType.attbyval ? \ *(Datum *) SGLTDATAPTR(x) : \ PointerGetDatum(SGLTDATAPTR(x))) @@ -407,6 +444,9 @@ extern OffsetNumber SpGistPageAddNewItem(SpGistState *state, Page page, Item item, Size size, OffsetNumber *startOffset, bool errorOK); +extern bool spgproperty(Oid index_oid, int attno, + IndexAMProperty prop, const char *propname, + bool *res, bool *isnull); /* spgdoinsert.c */ extern void spgUpdateNodeLink(SpGistInnerTuple tup, int nodeN, @@ -418,4 +458,9 @@ extern void spgPageIndexMultiDelete(SpGistState *state, Page page, extern bool spgdoinsert(Relation index, SpGistState *state, ItemPointer heapPtr, Datum datum, bool isnull); +/* spgproc.c */ +extern double *spg_key_orderbys_distances(Datum key, bool isLeaf, + ScanKey orderbys, int norderbys); +extern BOX *box_copy(BOX *orig); + #endif /* SPGIST_PRIVATE_H */ diff --git a/src/include/access/spgxlog.h b/src/include/access/spgxlog.h index cf4331be4a..b72ccb5cc4 100644 --- a/src/include/access/spgxlog.h +++ b/src/include/access/spgxlog.h @@ -3,7 +3,7 @@ * spgxlog.h * xlog declarations for SP-GiST access method. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/spgxlog.h diff --git a/src/include/access/stratnum.h b/src/include/access/stratnum.h index 91d57605b2..0db11a1117 100644 --- a/src/include/access/stratnum.h +++ b/src/include/access/stratnum.h @@ -4,7 +4,7 @@ * POSTGRES strategy number definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/stratnum.h @@ -68,8 +68,9 @@ typedef uint16 StrategyNumber; #define RTSubEqualStrategyNumber 25 /* for inet <<= */ #define RTSuperStrategyNumber 26 /* for inet << */ #define RTSuperEqualStrategyNumber 27 /* for inet >>= */ +#define RTPrefixStrategyNumber 28 /* for text ^@ */ -#define RTMaxStrategyNumber 27 +#define RTMaxStrategyNumber 28 #endif /* STRATNUM_H */ diff --git a/src/include/access/subtrans.h b/src/include/access/subtrans.h index 41716d7b71..ce700a60de 100644 --- a/src/include/access/subtrans.h +++ b/src/include/access/subtrans.h @@ -3,7 +3,7 @@ * * PostgreSQL subtransaction-log manager * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/subtrans.h diff --git a/src/include/access/sysattr.h b/src/include/access/sysattr.h index b88c5e1141..c6f244011a 100644 --- a/src/include/access/sysattr.h +++ b/src/include/access/sysattr.h @@ -4,7 +4,7 @@ * POSTGRES system attribute definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/sysattr.h diff --git a/src/include/access/timeline.h b/src/include/access/timeline.h index 4bdb0c1f4f..a9bf18cab6 100644 --- a/src/include/access/timeline.h +++ b/src/include/access/timeline.h @@ -3,7 +3,7 @@ * * Functions for reading and writing timeline history files. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/timeline.h diff --git a/src/include/access/transam.h b/src/include/access/transam.h index 86076dede1..83ec3f1979 100644 --- a/src/include/access/transam.h +++ b/src/include/access/transam.h @@ -4,7 +4,7 @@ * postgres transaction access method support code * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/transam.h diff --git a/src/include/access/tsmapi.h b/src/include/access/tsmapi.h index 3d94cc6466..3ecd4737e5 100644 --- a/src/include/access/tsmapi.h +++ b/src/include/access/tsmapi.h @@ -3,7 +3,7 @@ * tsmapi.h * API for tablesample methods * - * Copyright (c) 2015-2017, PostgreSQL Global Development Group + * Copyright (c) 2015-2018, PostgreSQL Global Development Group * * src/include/access/tsmapi.h * diff --git a/src/include/access/tupconvert.h b/src/include/access/tupconvert.h index 173904adae..34ee8e3918 100644 --- a/src/include/access/tupconvert.h +++ b/src/include/access/tupconvert.h @@ -4,7 +4,7 @@ * Tuple conversion support. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/tupconvert.h @@ -16,6 +16,7 @@ #include "access/htup.h" #include "access/tupdesc.h" +#include "executor/tuptable.h" typedef struct TupleConversionMap @@ -41,8 +42,13 @@ extern TupleConversionMap *convert_tuples_by_name(TupleDesc indesc, extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, TupleDesc outdesc, const char *msg); +extern AttrNumber *convert_tuples_by_name_map_if_req(TupleDesc indesc, + TupleDesc outdesc, + const char *msg); -extern HeapTuple do_convert_tuple(HeapTuple tuple, TupleConversionMap *map); +extern HeapTuple execute_attr_map_tuple(HeapTuple tuple, TupleConversionMap *map); +extern TupleTableSlot *execute_attr_map_slot(AttrNumber *attrMap, + TupleTableSlot *in_slot, TupleTableSlot *out_slot); extern void free_conversion_map(TupleConversionMap *map); diff --git a/src/include/access/tupdesc.h b/src/include/access/tupdesc.h index e7065d70ba..708160f645 100644 --- a/src/include/access/tupdesc.h +++ b/src/include/access/tupdesc.h @@ -4,7 +4,7 @@ * POSTGRES tuple descriptor definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/tupdesc.h @@ -25,6 +25,8 @@ typedef struct attrDefault char *adbin; /* nodeToString representation of expr */ } AttrDefault; +typedef struct attrMissing *MissingPtr; + typedef struct constrCheck { char *ccname; @@ -38,6 +40,7 @@ typedef struct tupleConstr { AttrDefault *defval; /* array */ ConstrCheck *check; /* array */ + MissingPtr missing; /* missing attributes values, NULL if none */ uint16 num_defval; uint16 num_check; bool has_not_null; @@ -60,6 +63,12 @@ typedef struct tupleConstr * row type, or a value >= 0 to allow the rowtype to be looked up in the * typcache.c type cache. * + * Note that tdtypeid is never the OID of a domain over composite, even if + * we are dealing with values that are known (at some higher level) to be of + * a domain-over-composite type. This is because tdtypeid/tdtypmod need to + * match up with the type labeling of composite Datums, and those are never + * explicitly marked as being of a domain type, either. + * * Tuple descriptors that live in caches (relcache or typcache, at present) * are reference-counted: they can be deleted when their reference count goes * to zero. Tuple descriptors created by the executor need no reference @@ -71,15 +80,17 @@ typedef struct tupleConstr typedef struct tupleDesc { int natts; /* number of attributes in the tuple */ - Form_pg_attribute *attrs; - /* attrs[N] is a pointer to the description of Attribute Number N+1 */ - TupleConstr *constr; /* constraints, or NULL if none */ Oid tdtypeid; /* composite type ID for tuple type */ int32 tdtypmod; /* typmod for tuple type */ bool tdhasoid; /* tuple has oid attribute in its header */ int tdrefcount; /* reference count, or -1 if not counting */ + TupleConstr *constr; /* constraints, or NULL if none */ + /* attrs[N] is the description of Attribute Number N+1 */ + FormData_pg_attribute attrs[FLEXIBLE_ARRAY_MEMBER]; } *TupleDesc; +/* Accessor for the i'th attribute of tupdesc. */ +#define TupleDescAttr(tupdesc, i) (&(tupdesc)->attrs[(i)]) extern TupleDesc CreateTemplateTupleDesc(int natts, bool hasoid); @@ -90,6 +101,12 @@ extern TupleDesc CreateTupleDescCopy(TupleDesc tupdesc); extern TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc); +#define TupleDescSize(src) \ + (offsetof(struct tupleDesc, attrs) + \ + (src)->natts * sizeof(FormData_pg_attribute)) + +extern void TupleDescCopy(TupleDesc dst, TupleDesc src); + extern void TupleDescCopyEntry(TupleDesc dst, AttrNumber dstAttno, TupleDesc src, AttrNumber srcAttno); @@ -112,6 +129,8 @@ extern void DecrTupleDescRefCount(TupleDesc tupdesc); extern bool equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2); +extern uint32 hashTupleDesc(TupleDesc tupdesc); + extern void TupleDescInitEntry(TupleDesc desc, AttrNumber attributeNumber, const char *attributeName, diff --git a/src/include/access/tupdesc_details.h b/src/include/access/tupdesc_details.h new file mode 100644 index 0000000000..7c7a387e23 --- /dev/null +++ b/src/include/access/tupdesc_details.h @@ -0,0 +1,28 @@ +/*------------------------------------------------------------------------- + * + * tupdesc_details.h + * POSTGRES tuple descriptor definitions we can't include everywhere + * + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/access/tupdesc_details.h + * + *------------------------------------------------------------------------- + */ + +#ifndef TUPDESC_DETAILS_H +#define TUPDESC_DETAILS_H + +/* + * Structure used to represent value to be used when the attribute is not + * present at all in a tuple, i.e. when the column was created after the tuple + */ +typedef struct attrMissing +{ + bool am_present; /* true if non-NULL missing value exists */ + Datum am_value; /* value when attribute is missing */ +} AttrMissing; + +#endif /* TUPDESC_DETAILS_H */ diff --git a/src/include/access/tupmacs.h b/src/include/access/tupmacs.h index 6746203828..1c3741da65 100644 --- a/src/include/access/tupmacs.h +++ b/src/include/access/tupmacs.h @@ -4,7 +4,7 @@ * Tuple macros used by both index tuples and heap tuples. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/tupmacs.h diff --git a/src/include/access/tuptoaster.h b/src/include/access/tuptoaster.h index fd9f83ac44..f99291e30d 100644 --- a/src/include/access/tuptoaster.h +++ b/src/include/access/tuptoaster.h @@ -4,7 +4,7 @@ * POSTGRES definitions for external and compressed storage * of variable size attributes. * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * * src/include/access/tuptoaster.h * diff --git a/src/include/access/twophase.h b/src/include/access/twophase.h index 54dec4eeaf..0e932daa48 100644 --- a/src/include/access/twophase.h +++ b/src/include/access/twophase.h @@ -4,7 +4,7 @@ * Two-phase-commit related declarations. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/twophase.h @@ -15,6 +15,7 @@ #define TWOPHASE_H #include "access/xlogdefs.h" +#include "access/xact.h" #include "datatype/timestamp.h" #include "storage/lock.h" @@ -25,7 +26,7 @@ typedef struct GlobalTransactionData *GlobalTransaction; /* GUC variable */ -extern int max_prepared_xacts; +extern PGDLLIMPORT int max_prepared_xacts; extern Size TwoPhaseShmemSize(void); extern void TwoPhaseShmemInit(void); @@ -46,6 +47,8 @@ extern bool StandbyTransactionIdIsPrepared(TransactionId xid); extern TransactionId PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p); +extern void ParsePrepareRecord(uint8 info, char *xlrec, + xl_xact_parsed_prepare *parsed); extern void StandbyRecoverPreparedTransactions(void); extern void RecoverPreparedTransactions(void); @@ -54,7 +57,7 @@ extern void CheckPointTwoPhase(XLogRecPtr redo_horizon); extern void FinishPreparedTransaction(const char *gid, bool isCommit); extern void PrepareRedoAdd(char *buf, XLogRecPtr start_lsn, - XLogRecPtr end_lsn); + XLogRecPtr end_lsn, RepOriginId origin_id); extern void PrepareRedoRemove(TransactionId xid, bool giveWarning); extern void restoreTwoPhaseData(void); #endif /* TWOPHASE_H */ diff --git a/src/include/access/twophase_rmgr.h b/src/include/access/twophase_rmgr.h index 44cd6d202f..ba9cd932a7 100644 --- a/src/include/access/twophase_rmgr.h +++ b/src/include/access/twophase_rmgr.h @@ -4,7 +4,7 @@ * Two-phase-commit resource managers definition * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/twophase_rmgr.h diff --git a/src/include/access/valid.h b/src/include/access/valid.h index 53a7d0685a..1e2d23f645 100644 --- a/src/include/access/valid.h +++ b/src/include/access/valid.h @@ -4,7 +4,7 @@ * POSTGRES tuple qualification validity definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/valid.h diff --git a/src/include/access/visibilitymap.h b/src/include/access/visibilitymap.h index da0e76d6be..b168612b4b 100644 --- a/src/include/access/visibilitymap.h +++ b/src/include/access/visibilitymap.h @@ -4,7 +4,7 @@ * visibility map interface * * - * Portions Copyright (c) 2007-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2007-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/visibilitymap.h diff --git a/src/include/access/xact.h b/src/include/access/xact.h index ad5aad96df..689c57c592 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -4,7 +4,7 @@ * postgres transaction system definitions * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/xact.h @@ -21,6 +21,13 @@ #include "storage/sinval.h" #include "utils/datetime.h" +/* + * Maximum size of Global Transaction ID (including '\0'). + * + * Note that the max value of GIDSIZE must fit in the uint16 gidlen, + * specified in TwoPhaseFileHeader. + */ +#define GIDSIZE 200 /* * Xact isolation levels @@ -156,6 +163,7 @@ typedef void (*SubXactCallback) (SubXactEvent event, SubTransactionId mySubid, #define XACT_XINFO_HAS_TWOPHASE (1U << 4) #define XACT_XINFO_HAS_ORIGIN (1U << 5) #define XACT_XINFO_HAS_AE_LOCKS (1U << 6) +#define XACT_XINFO_HAS_GID (1U << 7) /* * Also stored in xinfo, these indicating a variety of additional actions that @@ -261,6 +269,7 @@ typedef struct xl_xact_commit /* xl_xact_relfilenodes follows if XINFO_HAS_RELFILENODES */ /* xl_xact_invals follows if XINFO_HAS_INVALS */ /* xl_xact_twophase follows if XINFO_HAS_TWOPHASE */ + /* twophase_gid follows if XINFO_HAS_GID. As a null-terminated string. */ /* xl_xact_origin follows if XINFO_HAS_ORIGIN, stored unaligned! */ } xl_xact_commit; #define MinSizeOfXactCommit (offsetof(xl_xact_commit, xact_time) + sizeof(TimestampTz)) @@ -270,11 +279,13 @@ typedef struct xl_xact_abort TimestampTz xact_time; /* time of abort */ /* xl_xact_xinfo follows if XLOG_XACT_HAS_INFO */ - /* No db_info required */ + /* xl_xact_dbinfo follows if XINFO_HAS_DBINFO */ /* xl_xact_subxacts follows if HAS_SUBXACT */ /* xl_xact_relfilenodes follows if HAS_RELFILENODES */ /* No invalidation messages needed. */ /* xl_xact_twophase follows if XINFO_HAS_TWOPHASE */ + /* twophase_gid follows if XINFO_HAS_GID. As a null-terminated string. */ + /* xl_xact_origin follows if XINFO_HAS_ORIGIN, stored unaligned! */ } xl_xact_abort; #define MinSizeOfXactAbort sizeof(xl_xact_abort) @@ -286,7 +297,6 @@ typedef struct xl_xact_abort typedef struct xl_xact_parsed_commit { TimestampTz xact_time; - uint32 xinfo; Oid dbId; /* MyDatabaseId */ @@ -302,16 +312,24 @@ typedef struct xl_xact_parsed_commit SharedInvalidationMessage *msgs; TransactionId twophase_xid; /* only for 2PC */ + char twophase_gid[GIDSIZE]; /* only for 2PC */ + int nabortrels; /* only for 2PC */ + RelFileNode *abortnodes; /* only for 2PC */ XLogRecPtr origin_lsn; TimestampTz origin_timestamp; } xl_xact_parsed_commit; +typedef xl_xact_parsed_commit xl_xact_parsed_prepare; + typedef struct xl_xact_parsed_abort { TimestampTz xact_time; uint32 xinfo; + Oid dbId; /* MyDatabaseId */ + Oid tsId; /* MyDatabaseTableSpace */ + int nsubxacts; TransactionId *subxacts; @@ -319,6 +337,10 @@ typedef struct xl_xact_parsed_abort RelFileNode *xnodes; TransactionId twophase_xid; /* only for 2PC */ + char twophase_gid[GIDSIZE]; /* only for 2PC */ + + XLogRecPtr origin_lsn; + TimestampTz origin_timestamp; } xl_xact_parsed_abort; @@ -337,6 +359,7 @@ extern SubTransactionId GetCurrentSubTransactionId(void); extern void MarkCurrentTransactionIdLoggedIfAny(void); extern bool SubTransactionIsActive(SubTransactionId subxid); extern CommandId GetCurrentCommandId(bool used); +extern void SetParallelStartTimestamps(TimestampTz xact_ts, TimestampTz stmt_ts); extern TimestampTz GetCurrentTransactionStartTimestamp(void); extern TimestampTz GetCurrentStatementStartTimestamp(void); extern TimestampTz GetCurrentTransactionStopTimestamp(void); @@ -350,12 +373,14 @@ extern void CommitTransactionCommand(void); extern void AbortCurrentTransaction(void); extern void BeginTransactionBlock(void); extern bool EndTransactionBlock(void); -extern bool PrepareTransactionBlock(char *gid); +extern bool PrepareTransactionBlock(const char *gid); extern void UserAbortTransactionBlock(void); -extern void ReleaseSavepoint(List *options); -extern void DefineSavepoint(char *name); -extern void RollbackToSavepoint(List *options); -extern void BeginInternalSubTransaction(char *name); +extern void BeginImplicitTransactionBlock(void); +extern void EndImplicitTransactionBlock(void); +extern void ReleaseSavepoint(const char *name); +extern void DefineSavepoint(const char *name); +extern void RollbackToSavepoint(const char *name); +extern void BeginInternalSubTransaction(const char *name); extern void ReleaseCurrentSubTransaction(void); extern void RollbackAndReleaseCurrentSubTransaction(void); extern bool IsSubTransaction(void); @@ -367,10 +392,10 @@ extern bool IsTransactionBlock(void); extern bool IsTransactionOrTransactionBlock(void); extern char TransactionBlockStatusCode(void); extern void AbortOutOfAnyTransaction(void); -extern void PreventTransactionChain(bool isTopLevel, const char *stmtType); -extern void RequireTransactionChain(bool isTopLevel, const char *stmtType); -extern void WarnNoTransactionChain(bool isTopLevel, const char *stmtType); -extern bool IsInTransactionChain(bool isTopLevel); +extern void PreventInTransactionBlock(bool isTopLevel, const char *stmtType); +extern void RequireTransactionBlock(bool isTopLevel, const char *stmtType); +extern void WarnNoTransactionBlock(bool isTopLevel, const char *stmtType); +extern bool IsInTransactionBlock(bool isTopLevel); extern void RegisterXactCallback(XactCallback callback, void *arg); extern void UnregisterXactCallback(XactCallback callback, void *arg); extern void RegisterSubXactCallback(SubXactCallback callback, void *arg); @@ -384,12 +409,14 @@ extern XLogRecPtr XactLogCommitRecord(TimestampTz commit_time, int nmsgs, SharedInvalidationMessage *msgs, bool relcacheInval, bool forceSync, int xactflags, - TransactionId twophase_xid); + TransactionId twophase_xid, + const char *twophase_gid); extern XLogRecPtr XactLogAbortRecord(TimestampTz abort_time, int nsubxacts, TransactionId *subxacts, int nrels, RelFileNode *rels, - int xactflags, TransactionId twophase_xid); + int xactflags, TransactionId twophase_xid, + const char *twophase_gid); extern void xact_redo(XLogReaderState *record); /* xactdesc.c */ diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h index 66bfb77295..e01d12eb7c 100644 --- a/src/include/access/xlog.h +++ b/src/include/access/xlog.h @@ -3,7 +3,7 @@ * * PostgreSQL write-ahead log manager * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/xlog.h @@ -43,7 +43,7 @@ extern bool InRecovery; /* * Like InRecovery, standbyState is only valid in the startup process. * In all other processes it will have the value STANDBY_DISABLED (so - * InHotStandby will read as FALSE). + * InHotStandby will read as false). * * In DISABLED state, we're performing crash recovery or hot standby was * disabled in postgresql.conf. @@ -94,6 +94,7 @@ extern PGDLLIMPORT XLogRecPtr XactLastCommitEnd; extern bool reachedConsistency; /* these variables are GUC parameters related to XLOG */ +extern int wal_segment_size; extern int min_wal_size_mb; extern int max_wal_size_mb; extern int wal_keep_segments; @@ -261,6 +262,7 @@ extern XLogRecPtr GetFakeLSNForUnloggedRel(void); extern Size XLOGShmemSize(void); extern void XLOGShmemInit(void); extern void BootStrapXLOG(void); +extern void LocalProcessControlFile(bool reset); extern void StartupXLOG(void); extern void ShutdownXLOG(int code, Datum arg); extern void InitXLOGAccess(void); @@ -308,7 +310,7 @@ typedef enum SessionBackupState } SessionBackupState; extern XLogRecPtr do_pg_start_backup(const char *backupidstr, bool fast, - TimeLineID *starttli_p, StringInfo labelfile, DIR *tblspcdir, + TimeLineID *starttli_p, StringInfo labelfile, List **tablespaces, StringInfo tblspcmapfile, bool infotbssize, bool needtblspcmapfile); extern XLogRecPtr do_pg_stop_backup(char *labelfile, bool waitforarchive, @@ -317,10 +319,16 @@ extern void do_pg_abort_backup(void); extern SessionBackupState get_backup_status(void); /* File path names (all relative to $PGDATA) */ +#define RECOVERY_COMMAND_FILE "recovery.conf" +#define RECOVERY_COMMAND_DONE "recovery.done" #define BACKUP_LABEL_FILE "backup_label" #define BACKUP_LABEL_OLD "backup_label.old" #define TABLESPACE_MAP "tablespace_map" #define TABLESPACE_MAP_OLD "tablespace_map.old" +/* files to signal promotion to primary */ +#define PROMOTE_SIGNAL_FILE "promote" +#define FALLBACK_PROMOTE_SIGNAL_FILE "fallback_promote" + #endif /* XLOG_H */ diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h index 7453dcbd0e..30610b3ea9 100644 --- a/src/include/access/xlog_internal.h +++ b/src/include/access/xlog_internal.h @@ -11,7 +11,7 @@ * Note: This file must be includable in both frontend and backend contexts, * to allow stand-alone tools like pg_receivewal to deal with WAL files. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/xlog_internal.h @@ -31,7 +31,7 @@ /* * Each page of XLOG file has a header like this: */ -#define XLOG_PAGE_MAGIC 0xD097 /* can be used as WAL version indicator */ +#define XLOG_PAGE_MAGIC 0xD098 /* can be used as WAL version indicator */ typedef struct XLogPageHeaderData { @@ -85,15 +85,27 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader; #define XLogPageHeaderSize(hdr) \ (((hdr)->xlp_info & XLP_LONG_HEADER) ? SizeOfXLogLongPHD : SizeOfXLogShortPHD) -/* - * The XLOG is split into WAL segments (physical files) of the size indicated - * by XLOG_SEG_SIZE. - */ -#define XLogSegSize ((uint32) XLOG_SEG_SIZE) -#define XLogSegmentsPerXLogId (UINT64CONST(0x100000000) / XLOG_SEG_SIZE) +/* wal_segment_size can range from 1MB to 1GB */ +#define WalSegMinSize 1024 * 1024 +#define WalSegMaxSize 1024 * 1024 * 1024 +/* default number of min and max wal segments */ +#define DEFAULT_MIN_WAL_SEGS 5 +#define DEFAULT_MAX_WAL_SEGS 64 + +/* check that the given size is a valid wal_segment_size */ +#define IsPowerOf2(x) (x > 0 && ((x) & ((x)-1)) == 0) +#define IsValidWalSegSize(size) \ + (IsPowerOf2(size) && \ + ((size) >= WalSegMinSize && (size) <= WalSegMaxSize)) + +#define XLogSegmentsPerXLogId(wal_segsz_bytes) \ + (UINT64CONST(0x100000000) / (wal_segsz_bytes)) + +#define XLogSegNoOffsetToRecPtr(segno, offset, wal_segsz_bytes, dest) \ + (dest) = (segno) * (wal_segsz_bytes) + (offset) -#define XLogSegNoOffsetToRecPtr(segno, offset, dest) \ - (dest) = (segno) * XLOG_SEG_SIZE + (offset) +#define XLogSegmentOffset(xlogptr, wal_segsz_bytes) \ + ((xlogptr) & ((wal_segsz_bytes) - 1)) /* * Compute a segment number from an XLogRecPtr. @@ -103,11 +115,11 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader; * for deciding which segment to write given a pointer to a record end, * for example. */ -#define XLByteToSeg(xlrp, logSegNo) \ - logSegNo = (xlrp) / XLogSegSize +#define XLByteToSeg(xlrp, logSegNo, wal_segsz_bytes) \ + logSegNo = (xlrp) / (wal_segsz_bytes) -#define XLByteToPrevSeg(xlrp, logSegNo) \ - logSegNo = ((xlrp) - 1) / XLogSegSize +#define XLByteToPrevSeg(xlrp, logSegNo, wal_segsz_bytes) \ + logSegNo = ((xlrp) - 1) / (wal_segsz_bytes) /* * Is an XLogRecPtr within a particular XLOG segment? @@ -115,11 +127,11 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader; * For XLByteInSeg, do the computation at face value. For XLByteInPrevSeg, * a boundary byte is taken to be in the previous segment. */ -#define XLByteInSeg(xlrp, logSegNo) \ - (((xlrp) / XLogSegSize) == (logSegNo)) +#define XLByteInSeg(xlrp, logSegNo, wal_segsz_bytes) \ + (((xlrp) / (wal_segsz_bytes)) == (logSegNo)) -#define XLByteInPrevSeg(xlrp, logSegNo) \ - ((((xlrp) - 1) / XLogSegSize) == (logSegNo)) +#define XLByteInPrevSeg(xlrp, logSegNo, wal_segsz_bytes) \ + ((((xlrp) - 1) / (wal_segsz_bytes)) == (logSegNo)) /* Check if an XLogRecPtr value is in a plausible range */ #define XRecOffIsValid(xlrp) \ @@ -140,10 +152,10 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader; /* Length of XLog file name */ #define XLOG_FNAME_LEN 24 -#define XLogFileName(fname, tli, logSegNo) \ +#define XLogFileName(fname, tli, logSegNo, wal_segsz_bytes) \ snprintf(fname, MAXFNAMELEN, "%08X%08X%08X", tli, \ - (uint32) ((logSegNo) / XLogSegmentsPerXLogId), \ - (uint32) ((logSegNo) % XLogSegmentsPerXLogId)) + (uint32) ((logSegNo) / XLogSegmentsPerXLogId(wal_segsz_bytes)), \ + (uint32) ((logSegNo) % XLogSegmentsPerXLogId(wal_segsz_bytes))) #define XLogFileNameById(fname, tli, log, seg) \ snprintf(fname, MAXFNAMELEN, "%08X%08X%08X", tli, log, seg) @@ -162,18 +174,18 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader; strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ strcmp((fname) + XLOG_FNAME_LEN, ".partial") == 0) -#define XLogFromFileName(fname, tli, logSegNo) \ +#define XLogFromFileName(fname, tli, logSegNo, wal_segsz_bytes) \ do { \ uint32 log; \ uint32 seg; \ sscanf(fname, "%08X%08X%08X", tli, &log, &seg); \ - *logSegNo = (uint64) log * XLogSegmentsPerXLogId + seg; \ + *logSegNo = (uint64) log * XLogSegmentsPerXLogId(wal_segsz_bytes) + seg; \ } while (0) -#define XLogFilePath(path, tli, logSegNo) \ - snprintf(path, MAXPGPATH, XLOGDIR "/%08X%08X%08X", tli, \ - (uint32) ((logSegNo) / XLogSegmentsPerXLogId), \ - (uint32) ((logSegNo) % XLogSegmentsPerXLogId)) +#define XLogFilePath(path, tli, logSegNo, wal_segsz_bytes) \ + snprintf(path, MAXPGPATH, XLOGDIR "/%08X%08X%08X", tli, \ + (uint32) ((logSegNo) / XLogSegmentsPerXLogId(wal_segsz_bytes)), \ + (uint32) ((logSegNo) % XLogSegmentsPerXLogId(wal_segsz_bytes))) #define TLHistoryFileName(fname, tli) \ snprintf(fname, MAXFNAMELEN, "%08X.history", tli) @@ -189,20 +201,22 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader; #define StatusFilePath(path, xlog, suffix) \ snprintf(path, MAXPGPATH, XLOGDIR "/archive_status/%s%s", xlog, suffix) -#define BackupHistoryFileName(fname, tli, logSegNo, offset) \ +#define BackupHistoryFileName(fname, tli, logSegNo, startpoint, wal_segsz_bytes) \ snprintf(fname, MAXFNAMELEN, "%08X%08X%08X.%08X.backup", tli, \ - (uint32) ((logSegNo) / XLogSegmentsPerXLogId), \ - (uint32) ((logSegNo) % XLogSegmentsPerXLogId), offset) + (uint32) ((logSegNo) / XLogSegmentsPerXLogId(wal_segsz_bytes)), \ + (uint32) ((logSegNo) % XLogSegmentsPerXLogId(wal_segsz_bytes)), \ + (uint32) (XLogSegmentOffset(startpoint, wal_segsz_bytes))) #define IsBackupHistoryFileName(fname) \ (strlen(fname) > XLOG_FNAME_LEN && \ strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ strcmp((fname) + strlen(fname) - strlen(".backup"), ".backup") == 0) -#define BackupHistoryFilePath(path, tli, logSegNo, offset) \ +#define BackupHistoryFilePath(path, tli, logSegNo, startpoint, wal_segsz_bytes) \ snprintf(path, MAXPGPATH, XLOGDIR "/%08X%08X%08X.%08X.backup", tli, \ - (uint32) ((logSegNo) / XLogSegmentsPerXLogId), \ - (uint32) ((logSegNo) % XLogSegmentsPerXLogId), offset) + (uint32) ((logSegNo) / XLogSegmentsPerXLogId(wal_segsz_bytes)), \ + (uint32) ((logSegNo) % XLogSegmentsPerXLogId(wal_segsz_bytes)), \ + (uint32) (XLogSegmentOffset((startpoint), wal_segsz_bytes))) /* * Information logged when we detect a change in one of the parameters @@ -307,9 +321,9 @@ extern char *recoveryRestoreCommand; extern bool RestoreArchivedFile(char *path, const char *xlogfname, const char *recovername, off_t expectedSize, bool cleanupEnabled); -extern void ExecuteRecoveryCommand(char *command, char *commandName, +extern void ExecuteRecoveryCommand(const char *command, const char *commandName, bool failOnerror); -extern void KeepFileRestoredFromArchive(char *path, char *xlogfname); +extern void KeepFileRestoredFromArchive(const char *path, const char *xlogfname); extern void XLogArchiveNotify(const char *xlog); extern void XLogArchiveNotifySeg(XLogSegNo segno); extern void XLogArchiveForceDone(const char *xlog); diff --git a/src/include/access/xlogdefs.h b/src/include/access/xlogdefs.h index 3a80d6be6f..0a48d1cfb4 100644 --- a/src/include/access/xlogdefs.h +++ b/src/include/access/xlogdefs.h @@ -4,7 +4,7 @@ * Postgres write-ahead log manager record pointer and * timeline number definitions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/xlogdefs.h diff --git a/src/include/access/xloginsert.h b/src/include/access/xloginsert.h index 174c88677f..fa62f915af 100644 --- a/src/include/access/xloginsert.h +++ b/src/include/access/xloginsert.h @@ -3,7 +3,7 @@ * * Functions for generating WAL records * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/xloginsert.h diff --git a/src/include/access/xlogreader.h b/src/include/access/xlogreader.h index 7671598334..40116f8ecb 100644 --- a/src/include/access/xlogreader.h +++ b/src/include/access/xlogreader.h @@ -3,7 +3,7 @@ * xlogreader.h * Definitions for the generic XLog reading facility * - * Portions Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2013-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/include/access/xlogreader.h @@ -73,6 +73,11 @@ struct XLogReaderState * ---------------------------------------- */ + /* + * Segment size of the to-be-parsed data (mandatory). + */ + int wal_segment_size; + /* * Data input callback (mandatory). * @@ -189,7 +194,8 @@ struct XLogReaderState }; /* Get a new XLogReader */ -extern XLogReaderState *XLogReaderAllocate(XLogPageReadCB pagereadfunc, +extern XLogReaderState *XLogReaderAllocate(int wal_segment_size, + XLogPageReadCB pagereadfunc, void *private_data); /* Free an XLogReader */ @@ -199,6 +205,10 @@ extern void XLogReaderFree(XLogReaderState *state); extern struct XLogRecord *XLogReadRecord(XLogReaderState *state, XLogRecPtr recptr, char **errormsg); +/* Validate a page */ +extern bool XLogReaderValidatePageHeader(XLogReaderState *state, + XLogRecPtr recptr, char *phdr); + /* Invalidate read state */ extern void XLogReaderInvalReadState(XLogReaderState *state); diff --git a/src/include/access/xlogrecord.h b/src/include/access/xlogrecord.h index b53960e112..863781937e 100644 --- a/src/include/access/xlogrecord.h +++ b/src/include/access/xlogrecord.h @@ -3,7 +3,7 @@ * * Definitions for the WAL record format. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/xlogrecord.h diff --git a/src/include/access/xlogutils.h b/src/include/access/xlogutils.h index 114ffbcc53..c406699936 100644 --- a/src/include/access/xlogutils.h +++ b/src/include/access/xlogutils.h @@ -3,7 +3,7 @@ * * Utilities for replaying WAL records. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/access/xlogutils.h diff --git a/src/include/bootstrap/bootstrap.h b/src/include/bootstrap/bootstrap.h index 35eb9a4ff5..7856669ff9 100644 --- a/src/include/bootstrap/bootstrap.h +++ b/src/include/bootstrap/bootstrap.h @@ -4,7 +4,7 @@ * include file for the bootstrapping code * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/bootstrap/bootstrap.h @@ -44,8 +44,6 @@ extern void InsertOneTuple(Oid objectid); extern void InsertOneValue(char *value, int i); extern void InsertOneNull(int i); -extern char *MapArrayTypeName(const char *s); - extern void index_register(Oid heap, Oid ind, IndexInfo *indexInfo); extern void build_indices(void); diff --git a/src/include/c.h b/src/include/c.h index 60e3fa333f..08c554aaa5 100644 --- a/src/include/c.h +++ b/src/include/c.h @@ -9,7 +9,7 @@ * polluting the namespace with lots of stuff... * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/c.h @@ -26,19 +26,20 @@ * section description * ------- ------------------------------------------------ * 0) pg_config.h and standard system headers - * 1) hacks to cope with non-ANSI C compilers - * 2) bool, true, false, TRUE, FALSE, NULL + * 1) compiler characteristics + * 2) bool, true, false * 3) standard system types * 4) IsValid macros for system types - * 5) offsetof, lengthof, endof, alignment + * 5) offsetof, lengthof, alignment * 6) assertions * 7) widely useful macros * 8) random stuff * 9) system-specific hacks * - * NOTE: since this file is included by both frontend and backend modules, it's - * almost certainly wrong to put an "extern" declaration here. typedefs and - * macros are the kind of thing that might go here. + * NOTE: since this file is included by both frontend and backend modules, + * it's usually wrong to put an "extern" declaration here, unless it's + * ifdef'd so that it's seen in only one case or the other. + * typedefs and macros are the kind of thing that might go here. * *---------------------------------------------------------------- */ @@ -52,36 +53,9 @@ #include "pg_config.h" #include "pg_config_manual.h" /* must be after pg_config.h */ - -/* - * We always rely on the WIN32 macro being set by our build system, - * but _WIN32 is the compiler pre-defined macro. So make sure we define - * WIN32 whenever _WIN32 is set, to facilitate standalone building. - */ -#if defined(_WIN32) && !defined(WIN32) -#define WIN32 -#endif - -#if !defined(WIN32) && !defined(__CYGWIN__) /* win32 includes further down */ #include "pg_config_os.h" /* must be before any system header files */ -#endif - -#if _MSC_VER >= 1400 || defined(HAVE_CRTDEFS_H) -#define errcode __msvc_errcode -#include -#undef errcode -#endif - -#if _MSC_VER >= 1800 && defined(USE_FP_STRICT) -#pragma fenv_access (off) -#endif - -/* - * We have to include stdlib.h here because it defines many of these macros - * on some platforms, and we only want our definitions used if stdlib.h doesn't - * have its own. The same goes for stddef and stdarg if present. - */ +/* System header files that should be available everywhere in Postgres */ #include #include #include @@ -94,61 +68,145 @@ #include #endif #include - #include #if defined(WIN32) || defined(__CYGWIN__) #include /* ensure O_BINARY is available */ #endif - -#if defined(WIN32) || defined(__CYGWIN__) -/* We have to redefine some system functions after they are included above. */ -#include "pg_config_os.h" +#include +#ifdef ENABLE_NLS +#include #endif -/* - * Force disable inlining if PG_FORCE_DISABLE_INLINE is defined. This is used - * to work around compiler bugs and might also be useful for investigatory - * purposes by defining the symbol in the platform's header.. + +/* ---------------------------------------------------------------- + * Section 1: compiler characteristics * - * This is done early (in slightly the wrong section) as functionality later - * in this file might want to rely on inline functions. + * type prefixes (const, signed, volatile, inline) are handled in pg_config.h. + * ---------------------------------------------------------------- + */ + +/* + * Disable "inline" if PG_FORCE_DISABLE_INLINE is defined. + * This is used to work around compiler bugs and might also be useful for + * investigatory purposes. */ #ifdef PG_FORCE_DISABLE_INLINE #undef inline #define inline #endif -/* Must be before gettext() games below */ -#include +/* + * Attribute macros + * + * GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html + * GCC: https://gcc.gnu.org/onlinedocs/gcc/Type-Attributes.html + * Sunpro: https://docs.oracle.com/cd/E18659_01/html/821-1384/gjzke.html + * XLC: http://www-01.ibm.com/support/knowledgecenter/SSGH2K_11.1.0/com.ibm.xlc111.aix.doc/language_ref/function_attributes.html + * XLC: http://www-01.ibm.com/support/knowledgecenter/SSGH2K_11.1.0/com.ibm.xlc111.aix.doc/language_ref/type_attrib.html + */ -#define _(x) gettext(x) +/* only GCC supports the unused attribute */ +#ifdef __GNUC__ +#define pg_attribute_unused() __attribute__((unused)) +#else +#define pg_attribute_unused() +#endif -#ifdef ENABLE_NLS -#include +/* + * Append PG_USED_FOR_ASSERTS_ONLY to definitions of variables that are only + * used in assert-enabled builds, to avoid compiler warnings about unused + * variables in assert-disabled builds. + */ +#ifdef USE_ASSERT_CHECKING +#define PG_USED_FOR_ASSERTS_ONLY #else -#define gettext(x) (x) -#define dgettext(d,x) (x) -#define ngettext(s,p,n) ((n) == 1 ? (s) : (p)) -#define dngettext(d,s,p,n) ((n) == 1 ? (s) : (p)) +#define PG_USED_FOR_ASSERTS_ONLY pg_attribute_unused() +#endif + +/* GCC and XLC support format attributes */ +#if defined(__GNUC__) || defined(__IBMC__) +#define pg_attribute_format_arg(a) __attribute__((format_arg(a))) +#define pg_attribute_printf(f,a) __attribute__((format(PG_PRINTF_ATTRIBUTE, f, a))) +#else +#define pg_attribute_format_arg(a) +#define pg_attribute_printf(f,a) #endif +/* GCC, Sunpro and XLC support aligned, packed and noreturn */ +#if defined(__GNUC__) || defined(__SUNPRO_C) || defined(__IBMC__) +#define pg_attribute_aligned(a) __attribute__((aligned(a))) +#define pg_attribute_noreturn() __attribute__((noreturn)) +#define pg_attribute_packed() __attribute__((packed)) +#define HAVE_PG_ATTRIBUTE_NORETURN 1 +#else /* - * Use this to mark string constants as needing translation at some later - * time, rather than immediately. This is useful for cases where you need - * access to the original string and translated string, and for cases where - * immediate translation is not possible, like when initializing global - * variables. - * http://www.gnu.org/software/autoconf/manual/gettext/Special-cases.html + * NB: aligned and packed are not given default definitions because they + * affect code functionality; they *must* be implemented by the compiler + * if they are to be used. */ -#define gettext_noop(x) (x) +#define pg_attribute_noreturn() +#endif +/* + * Use "pg_attribute_always_inline" in place of "inline" for functions that + * we wish to force inlining of, even when the compiler's heuristics would + * choose not to. But, if possible, don't force inlining in unoptimized + * debug builds. + */ +#if (defined(__GNUC__) && __GNUC__ > 3 && defined(__OPTIMIZE__)) || defined(__SUNPRO_C) || defined(__IBMC__) +/* GCC > 3, Sunpro and XLC support always_inline via __attribute__ */ +#define pg_attribute_always_inline __attribute__((always_inline)) inline +#elif defined(_MSC_VER) +/* MSVC has a special keyword for this */ +#define pg_attribute_always_inline __forceinline +#else +/* Otherwise, the best we can do is to say "inline" */ +#define pg_attribute_always_inline inline +#endif -/* ---------------------------------------------------------------- - * Section 1: hacks to cope with non-ANSI C compilers +/* + * Forcing a function not to be inlined can be useful if it's the slow path of + * a performance-critical function, or should be visible in profiles to allow + * for proper cost attribution. Note that unlike the pg_attribute_XXX macros + * above, this should be placed before the function's return type and name. + */ +/* GCC, Sunpro and XLC support noinline via __attribute__ */ +#if (defined(__GNUC__) && __GNUC__ > 2) || defined(__SUNPRO_C) || defined(__IBMC__) +#define pg_noinline __attribute__((noinline)) +/* msvc via declspec */ +#elif defined(_MSC_VER) +#define pg_noinline __declspec(noinline) +#else +#define pg_noinline +#endif + +/* + * Mark a point as unreachable in a portable fashion. This should preferably + * be something that the compiler understands, to aid code generation. + * In assert-enabled builds, we prefer abort() for debugging reasons. + */ +#if defined(HAVE__BUILTIN_UNREACHABLE) && !defined(USE_ASSERT_CHECKING) +#define pg_unreachable() __builtin_unreachable() +#elif defined(_MSC_VER) && !defined(USE_ASSERT_CHECKING) +#define pg_unreachable() __assume(0) +#else +#define pg_unreachable() abort() +#endif + +/* + * Hints to the compiler about the likelihood of a branch. Both likely() and + * unlikely() return the boolean value of the contained expression. * - * type prefixes (const, signed, volatile, inline) are handled in pg_config.h. - * ---------------------------------------------------------------- + * These should only be used sparingly, in very hot code paths. It's very easy + * to mis-estimate likelihoods. */ +#if __GNUC__ >= 3 +#define likely(x) __builtin_expect((x) != 0, 1) +#define unlikely(x) __builtin_expect((x) != 0, 0) +#else +#define likely(x) ((x) != 0) +#define unlikely(x) ((x) != 0) +#endif /* * CppAsString @@ -166,6 +224,39 @@ #define CppAsString2(x) CppAsString(x) #define CppConcat(x, y) x##y +/* + * VA_ARGS_NARGS + * Returns the number of macro arguments it is passed. + * + * An empty argument still counts as an argument, so effectively, this is + * "one more than the number of commas in the argument list". + * + * This works for up to 63 arguments. Internally, VA_ARGS_NARGS_() is passed + * 64+N arguments, and the C99 standard only requires macros to allow up to + * 127 arguments, so we can't portably go higher. The implementation is + * pretty trivial: VA_ARGS_NARGS_() returns its 64th argument, and we set up + * the call so that that is the appropriate one of the list of constants. + * This idea is due to Laurent Deniau. + */ +#define VA_ARGS_NARGS(...) \ + VA_ARGS_NARGS_(__VA_ARGS__, \ + 63,62,61,60, \ + 59,58,57,56,55,54,53,52,51,50, \ + 49,48,47,46,45,44,43,42,41,40, \ + 39,38,37,36,35,34,33,32,31,30, \ + 29,28,27,26,25,24,23,22,21,20, \ + 19,18,17,16,15,14,13,12,11,10, \ + 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#define VA_ARGS_NARGS_( \ + _01,_02,_03,_04,_05,_06,_07,_08,_09,_10, \ + _11,_12,_13,_14,_15,_16,_17,_18,_19,_20, \ + _21,_22,_23,_24,_25,_26,_27,_28,_29,_30, \ + _31,_32,_33,_34,_35,_36,_37,_38,_39,_40, \ + _41,_42,_43,_44,_45,_46,_47,_48,_49,_50, \ + _51,_52,_53,_54,_55,_56,_57,_58,_59,_60, \ + _61,_62,_63, N, ...) \ + (N) + /* * dummyret is used to set return values in macros that use ?: to make * assignments. gcc wants these to be void, other compilers like char @@ -187,8 +278,9 @@ #endif #endif + /* ---------------------------------------------------------------- - * Section 2: bool, true, false, TRUE, FALSE, NULL + * Section 2: bool, true, false * ---------------------------------------------------------------- */ @@ -196,12 +288,22 @@ * bool * Boolean value, either true or false. * - * XXX for C++ compilers, we assume the compiler has a compatible - * built-in definition of bool. + * Use stdbool.h if available and its bool has size 1. That's useful for + * better compiler and debugger output and for compatibility with third-party + * libraries. But PostgreSQL currently cannot deal with bool of other sizes; + * there are static assertions around the code to prevent that. + * + * For C++ compilers, we assume the compiler has a compatible built-in + * definition of bool. */ #ifndef __cplusplus +#if defined(HAVE_STDBOOL_H) && SIZEOF_BOOL == 1 +#include +#define USE_STDBOOL 1 +#else + #ifndef bool typedef char bool; #endif @@ -213,25 +315,9 @@ typedef char bool; #ifndef false #define false ((bool) 0) #endif -#endif /* not C++ */ - -typedef bool *BoolPtr; - -#ifndef TRUE -#define TRUE 1 -#endif - -#ifndef FALSE -#define FALSE 0 -#endif -/* - * NULL - * Null pointer. - */ -#ifndef NULL -#define NULL ((void *) 0) #endif +#endif /* not C++ */ /* ---------------------------------------------------------------- @@ -292,6 +378,8 @@ typedef long int int64; #ifndef HAVE_UINT64 typedef unsigned long int uint64; #endif +#define INT64CONST(x) (x##L) +#define UINT64CONST(x) (x##UL) #elif defined(HAVE_LONG_LONG_INT_64) /* We have working support for "long long int", use that */ @@ -301,32 +389,42 @@ typedef long long int int64; #ifndef HAVE_UINT64 typedef unsigned long long int uint64; #endif +#define INT64CONST(x) (x##LL) +#define UINT64CONST(x) (x##ULL) #else /* neither HAVE_LONG_INT_64 nor HAVE_LONG_LONG_INT_64 */ #error must have a working 64-bit integer datatype #endif -/* Decide if we need to decorate 64-bit constants */ -#ifdef HAVE_LL_CONSTANTS -#define INT64CONST(x) ((int64) x##LL) -#define UINT64CONST(x) ((uint64) x##ULL) -#else -#define INT64CONST(x) ((int64) x) -#define UINT64CONST(x) ((uint64) x) -#endif - /* snprintf format strings to use for 64-bit integers */ #define INT64_FORMAT "%" INT64_MODIFIER "d" #define UINT64_FORMAT "%" INT64_MODIFIER "u" /* * 128-bit signed and unsigned integers - * There currently is only a limited support for the type. E.g. 128bit - * literals and snprintf are not supported; but math is. - */ -#if defined(HAVE_INT128) -typedef PG_INT128_TYPE int128; -typedef unsigned PG_INT128_TYPE uint128; + * There currently is only limited support for such types. + * E.g. 128bit literals and snprintf are not supported; but math is. + * Also, because we exclude such types when choosing MAXIMUM_ALIGNOF, + * it must be possible to coerce the compiler to allocate them on no + * more than MAXALIGN boundaries. + */ +#if defined(PG_INT128_TYPE) +#if defined(pg_attribute_aligned) || ALIGNOF_PG_INT128_TYPE <= MAXIMUM_ALIGNOF +#define HAVE_INT128 1 + +typedef PG_INT128_TYPE int128 +#if defined(pg_attribute_aligned) +pg_attribute_aligned(MAXIMUM_ALIGNOF) +#endif +; + +typedef unsigned PG_INT128_TYPE uint128 +#if defined(pg_attribute_aligned) +pg_attribute_aligned(MAXIMUM_ALIGNOF) +#endif +; + +#endif #endif /* @@ -341,11 +439,20 @@ typedef unsigned PG_INT128_TYPE uint128; #define PG_UINT16_MAX (0xFFFF) #define PG_INT32_MIN (-0x7FFFFFFF-1) #define PG_INT32_MAX (0x7FFFFFFF) -#define PG_UINT32_MAX (0xFFFFFFFF) +#define PG_UINT32_MAX (0xFFFFFFFFU) #define PG_INT64_MIN (-INT64CONST(0x7FFFFFFFFFFFFFFF) - 1) #define PG_INT64_MAX INT64CONST(0x7FFFFFFFFFFFFFFF) #define PG_UINT64_MAX UINT64CONST(0xFFFFFFFFFFFFFFFF) +/* Max value of size_t might also be missing if we don't have stdint.h */ +#ifndef SIZE_MAX +#if SIZEOF_SIZE_T == 8 +#define SIZE_MAX PG_UINT64_MAX +#else +#define SIZE_MAX PG_UINT32_MAX +#endif +#endif + /* * We now always use int64 timestamps, but keep this symbol defined for the * benefit of external code that might test it. @@ -501,16 +608,6 @@ typedef NameData *Name; #define NameStr(name) ((name).data) -/* - * Support macros for escaping strings. escape_backslash should be TRUE - * if generating a non-standard-conforming string. Prefixing a string - * with ESCAPE_STRING_SYNTAX guarantees it is non-standard-conforming. - * Beware of multiple evaluation of the "ch" argument! - */ -#define SQL_STR_DOUBLE(ch, escape_backslash) \ - ((ch) == '\'' || ((ch) == '\\' && (escape_backslash))) - -#define ESCAPE_STRING_SYNTAX 'E' /* ---------------------------------------------------------------- * Section 4: IsValid macros for system types @@ -544,7 +641,7 @@ typedef NameData *Name; /* ---------------------------------------------------------------- - * Section 5: offsetof, lengthof, endof, alignment + * Section 5: offsetof, lengthof, alignment * ---------------------------------------------------------------- */ /* @@ -564,12 +661,6 @@ typedef NameData *Name; */ #define lengthof(array) (sizeof (array) / sizeof ((array)[0])) -/* - * endof - * Address of the element one past the last in an array. - */ -#define endof(array) (&(array)[lengthof(array)]) - /* ---------------- * Alignment macros: align a length or address appropriately for a given type. * The fooALIGN() macros round up to a multiple of the required alignment, @@ -578,6 +669,9 @@ typedef NameData *Name; * * NOTE: TYPEALIGN[_DOWN] will not work if ALIGNVAL is not a power of 2. * That case seems extremely unlikely to be needed in practice, however. + * + * NOTE: MAXIMUM_ALIGNOF, and hence MAXALIGN(), intentionally exclude any + * larger-than-8-byte types the compiler might have. * ---------------- */ @@ -615,47 +709,6 @@ typedef NameData *Name; /* we don't currently need wider versions of the other ALIGN macros */ #define MAXALIGN64(LEN) TYPEALIGN64(MAXIMUM_ALIGNOF, (LEN)) -/* ---------------- - * Attribute macros - * - * GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html - * GCC: https://gcc.gnu.org/onlinedocs/gcc/Type-Attributes.html - * Sunpro: https://docs.oracle.com/cd/E18659_01/html/821-1384/gjzke.html - * XLC: http://www-01.ibm.com/support/knowledgecenter/SSGH2K_11.1.0/com.ibm.xlc111.aix.doc/language_ref/function_attributes.html - * XLC: http://www-01.ibm.com/support/knowledgecenter/SSGH2K_11.1.0/com.ibm.xlc111.aix.doc/language_ref/type_attrib.html - * ---------------- - */ - -/* only GCC supports the unused attribute */ -#ifdef __GNUC__ -#define pg_attribute_unused() __attribute__((unused)) -#else -#define pg_attribute_unused() -#endif - -/* GCC and XLC support format attributes */ -#if defined(__GNUC__) || defined(__IBMC__) -#define pg_attribute_format_arg(a) __attribute__((format_arg(a))) -#define pg_attribute_printf(f,a) __attribute__((format(PG_PRINTF_ATTRIBUTE, f, a))) -#else -#define pg_attribute_format_arg(a) -#define pg_attribute_printf(f,a) -#endif - -/* GCC, Sunpro and XLC support aligned, packed and noreturn */ -#if defined(__GNUC__) || defined(__SUNPRO_C) || defined(__IBMC__) -#define pg_attribute_aligned(a) __attribute__((aligned(a))) -#define pg_attribute_noreturn() __attribute__((noreturn)) -#define pg_attribute_packed() __attribute__((packed)) -#define HAVE_PG_ATTRIBUTE_NORETURN 1 -#else -/* - * NB: aligned and packed are not given default definitions because they - * affect code functionality; they *must* be implemented by the compiler - * if they are to be used. - */ -#define pg_attribute_noreturn() -#endif /* ---------------------------------------------------------------- * Section 6: assertions @@ -692,6 +745,7 @@ typedef NameData *Name; #define AssertArg(condition) assert(condition) #define AssertState(condition) assert(condition) #define AssertPointerAlignment(ptr, bndr) ((void)true) + #else /* USE_ASSERT_CHECKING && !FRONTEND */ /* @@ -738,6 +792,18 @@ typedef NameData *Name; #endif /* USE_ASSERT_CHECKING && !FRONTEND */ +/* + * ExceptionalCondition is compiled into the backend whether or not + * USE_ASSERT_CHECKING is defined, so as to support use of extensions + * that are built with that #define with a backend that isn't. Hence, + * we should declare it as long as !FRONTEND. + */ +#ifndef FRONTEND +extern void ExceptionalCondition(const char *conditionName, + const char *errorType, + const char *fileName, int lineNumber) pg_attribute_noreturn(); +#endif + /* * Macros to support compile-time assertion checks. * @@ -752,17 +818,31 @@ typedef NameData *Name; * about a negative width for a struct bit-field. This will not include a * helpful error message, but it beats not getting an error at all. */ +#ifndef __cplusplus #ifdef HAVE__STATIC_ASSERT #define StaticAssertStmt(condition, errmessage) \ do { _Static_assert(condition, errmessage); } while(0) #define StaticAssertExpr(condition, errmessage) \ - ({ StaticAssertStmt(condition, errmessage); true; }) + ((void) ({ StaticAssertStmt(condition, errmessage); true; })) #else /* !HAVE__STATIC_ASSERT */ #define StaticAssertStmt(condition, errmessage) \ ((void) sizeof(struct { int static_assert_failure : (condition) ? 1 : -1; })) #define StaticAssertExpr(condition, errmessage) \ StaticAssertStmt(condition, errmessage) #endif /* HAVE__STATIC_ASSERT */ +#else /* C++ */ +#if defined(__cpp_static_assert) && __cpp_static_assert >= 200410 +#define StaticAssertStmt(condition, errmessage) \ + static_assert(condition, errmessage) +#define StaticAssertExpr(condition, errmessage) \ + ({ static_assert(condition, errmessage); }) +#else +#define StaticAssertStmt(condition, errmessage) \ + do { struct static_assert_struct { int static_assert_failure : (condition) ? 1 : -1; }; } while(0) +#define StaticAssertExpr(condition, errmessage) \ + ((void) ({ StaticAssertStmt(condition, errmessage); })) +#endif +#endif /* C++ */ /* @@ -781,14 +861,14 @@ typedef NameData *Name; StaticAssertStmt(__builtin_types_compatible_p(__typeof__(varname), typename), \ CppAsString(varname) " does not have type " CppAsString(typename)) #define AssertVariableIsOfTypeMacro(varname, typename) \ - ((void) StaticAssertExpr(__builtin_types_compatible_p(__typeof__(varname), typename), \ + (StaticAssertExpr(__builtin_types_compatible_p(__typeof__(varname), typename), \ CppAsString(varname) " does not have type " CppAsString(typename))) #else /* !HAVE__BUILTIN_TYPES_COMPATIBLE_P */ #define AssertVariableIsOfType(varname, typename) \ StaticAssertStmt(sizeof(varname) == sizeof(typename), \ CppAsString(varname) " does not have type " CppAsString(typename)) #define AssertVariableIsOfTypeMacro(varname, typename) \ - ((void) StaticAssertExpr(sizeof(varname) == sizeof(typename), \ + (StaticAssertExpr(sizeof(varname) == sizeof(typename), \ CppAsString(varname) " does not have type " CppAsString(typename))) #endif /* HAVE__BUILTIN_TYPES_COMPATIBLE_P */ @@ -937,65 +1017,90 @@ typedef NameData *Name; } while (0) -/* - * Mark a point as unreachable in a portable fashion. This should preferably - * be something that the compiler understands, to aid code generation. - * In assert-enabled builds, we prefer abort() for debugging reasons. +/* ---------------------------------------------------------------- + * Section 8: random stuff + * ---------------------------------------------------------------- */ -#if defined(HAVE__BUILTIN_UNREACHABLE) && !defined(USE_ASSERT_CHECKING) -#define pg_unreachable() __builtin_unreachable() -#elif defined(_MSC_VER) && !defined(USE_ASSERT_CHECKING) -#define pg_unreachable() __assume(0) -#else -#define pg_unreachable() abort() -#endif - /* - * Hints to the compiler about the likelihood of a branch. Both likely() and - * unlikely() return the boolean value of the contained expression. - * - * These should only be used sparingly, in very hot code paths. It's very easy - * to mis-estimate likelihoods. + * Invert the sign of a qsort-style comparison result, ie, exchange negative + * and positive integer values, being careful not to get the wrong answer + * for INT_MIN. The argument should be an integral variable. */ -#if __GNUC__ >= 3 -#define likely(x) __builtin_expect((x) != 0, 1) -#define unlikely(x) __builtin_expect((x) != 0, 0) -#else -#define likely(x) ((x) != 0) -#define unlikely(x) ((x) != 0) -#endif +#define INVERT_COMPARE_RESULT(var) \ + ((var) = ((var) < 0) ? 1 : -(var)) +/* + * Use this, not "char buf[BLCKSZ]", to declare a field or local variable + * holding a page buffer, if that page might be accessed as a page and not + * just a string of bytes. Otherwise the variable might be under-aligned, + * causing problems on alignment-picky hardware. (In some places, we use + * this to declare buffers even though we only pass them to read() and + * write(), because copying to/from aligned buffers is usually faster than + * using unaligned buffers.) We include both "double" and "int64" in the + * union to ensure that the compiler knows the value must be MAXALIGN'ed + * (cf. configure's computation of MAXIMUM_ALIGNOF). + */ +typedef union PGAlignedBlock +{ + char data[BLCKSZ]; + double force_align_d; + int64 force_align_i64; +} PGAlignedBlock; -/* ---------------------------------------------------------------- - * Section 8: random stuff - * ---------------------------------------------------------------- - */ +/* Same, but for an XLOG_BLCKSZ-sized buffer */ +typedef union PGAlignedXLogBlock +{ + char data[XLOG_BLCKSZ]; + double force_align_d; + int64 force_align_i64; +} PGAlignedXLogBlock; /* msb for char */ #define HIGHBIT (0x80) #define IS_HIGHBIT_SET(ch) ((unsigned char)(ch) & HIGHBIT) +/* + * Support macros for escaping strings. escape_backslash should be true + * if generating a non-standard-conforming string. Prefixing a string + * with ESCAPE_STRING_SYNTAX guarantees it is non-standard-conforming. + * Beware of multiple evaluation of the "ch" argument! + */ +#define SQL_STR_DOUBLE(ch, escape_backslash) \ + ((ch) == '\'' || ((ch) == '\\' && (escape_backslash))) + +#define ESCAPE_STRING_SYNTAX 'E' + + #define STATUS_OK (0) #define STATUS_ERROR (-1) #define STATUS_EOF (-2) #define STATUS_FOUND (1) #define STATUS_WAITING (2) - /* - * Append PG_USED_FOR_ASSERTS_ONLY to definitions of variables that are only - * used in assert-enabled builds, to avoid compiler warnings about unused - * variables in assert-disabled builds. + * gettext support */ -#ifdef USE_ASSERT_CHECKING -#define PG_USED_FOR_ASSERTS_ONLY -#else -#define PG_USED_FOR_ASSERTS_ONLY pg_attribute_unused() + +#ifndef ENABLE_NLS +/* stuff we'd otherwise get from */ +#define gettext(x) (x) +#define dgettext(d,x) (x) +#define ngettext(s,p,n) ((n) == 1 ? (s) : (p)) +#define dngettext(d,s,p,n) ((n) == 1 ? (s) : (p)) #endif +#define _(x) gettext(x) -/* gettext domain name mangling */ +/* + * Use this to mark string constants as needing translation at some later + * time, rather than immediately. This is useful for cases where you need + * access to the original string and translated string, and for cases where + * immediate translation is not possible, like when initializing global + * variables. + * http://www.gnu.org/software/autoconf/manual/gettext/Special-cases.html + */ +#define gettext_noop(x) (x) /* * To better support parallel installations of major PostgreSQL @@ -1016,6 +1121,30 @@ typedef NameData *Name; #define PG_TEXTDOMAIN(domain) (domain "-" PG_MAJORVERSION) #endif +/* + * Macro that allows to cast constness away from an expression, but doesn't + * allow changing the underlying type. Enforcement of the latter + * currently only works for gcc like compilers. + * + * Please note IT IS NOT SAFE to cast constness away if the result will ever + * be modified (it would be undefined behaviour). Doing so anyway can cause + * compiler misoptimizations or runtime crashes (modifying readonly memory). + * It is only safe to use when the the result will not be modified, but API + * design or language restrictions prevent you from declaring that + * (e.g. because a function returns both const and non-const variables). + * + * Note that this only works in function scope, not for global variables (it'd + * be nice, but not trivial, to improve that). + */ +#if defined(HAVE__BUILTIN_TYPES_COMPATIBLE_P) +#define unconstify(underlying_type, expr) \ + (StaticAssertExpr(__builtin_types_compatible_p(__typeof(expr), const underlying_type), \ + "wrong cast"), \ + (underlying_type) (expr)) +#else +#define unconstify(underlying_type, expr) \ + ((underlying_type) (expr)) +#endif /* ---------------------------------------------------------------- * Section 9: system-specific hacks @@ -1050,14 +1179,41 @@ typedef NameData *Name; * standard C library. */ -#if !HAVE_DECL_SNPRINTF -extern int snprintf(char *str, size_t count, const char *fmt,...) pg_attribute_printf(3, 4); +#if defined(HAVE_FDATASYNC) && !HAVE_DECL_FDATASYNC +extern int fdatasync(int fildes); +#endif + +#ifdef HAVE_LONG_LONG_INT +/* Older platforms may provide strto[u]ll functionality under other names */ +#if !defined(HAVE_STRTOLL) && defined(HAVE___STRTOLL) +#define strtoll __strtoll +#define HAVE_STRTOLL 1 +#endif + +#if !defined(HAVE_STRTOLL) && defined(HAVE_STRTOQ) +#define strtoll strtoq +#define HAVE_STRTOLL 1 +#endif + +#if !defined(HAVE_STRTOULL) && defined(HAVE___STRTOULL) +#define strtoull __strtoull +#define HAVE_STRTOULL 1 +#endif + +#if !defined(HAVE_STRTOULL) && defined(HAVE_STRTOUQ) +#define strtoull strtouq +#define HAVE_STRTOULL 1 #endif -#if !HAVE_DECL_VSNPRINTF -extern int vsnprintf(char *str, size_t count, const char *fmt, va_list args); +#if defined(HAVE_STRTOLL) && !HAVE_DECL_STRTOLL +extern long long strtoll(const char *str, char **endptr, int base); #endif +#if defined(HAVE_STRTOULL) && !HAVE_DECL_STRTOULL +extern unsigned long long strtoull(const char *str, char **endptr, int base); +#endif +#endif /* HAVE_LONG_LONG_INT */ + #if !defined(HAVE_MEMMOVE) && !defined(memmove) #define memmove(d, s, c) bcopy(s, d, c) #endif @@ -1094,30 +1250,6 @@ extern int vsnprintf(char *str, size_t count, const char *fmt, va_list args); #define siglongjmp longjmp #endif -#if defined(HAVE_FDATASYNC) && !HAVE_DECL_FDATASYNC -extern int fdatasync(int fildes); -#endif - -/* If strtoq() exists, rename it to the more standard strtoll() */ -#if defined(HAVE_LONG_LONG_INT_64) && !defined(HAVE_STRTOLL) && defined(HAVE_STRTOQ) -#define strtoll strtoq -#define HAVE_STRTOLL 1 -#endif - -/* If strtouq() exists, rename it to the more standard strtoull() */ -#if defined(HAVE_LONG_LONG_INT_64) && !defined(HAVE_STRTOULL) && defined(HAVE_STRTOUQ) -#define strtoull strtouq -#define HAVE_STRTOULL 1 -#endif - -/* - * We assume if we have these two functions, we have their friends too, and - * can use the wide-character functions. - */ -#if defined(HAVE_WCSTOMBS) && defined(HAVE_TOWLOWER) -#define USE_WIDE_UPPER_LOWER -#endif - /* EXEC_BACKEND defines */ #ifdef EXEC_BACKEND #define NON_EXEC_STATIC diff --git a/src/include/catalog/.gitignore b/src/include/catalog/.gitignore index 650202eb75..6c8da5401d 100644 --- a/src/include/catalog/.gitignore +++ b/src/include/catalog/.gitignore @@ -1 +1,3 @@ /schemapg.h +/pg_*_d.h +/header-stamp diff --git a/src/include/catalog/Makefile b/src/include/catalog/Makefile new file mode 100644 index 0000000000..1da3ea7f44 --- /dev/null +++ b/src/include/catalog/Makefile @@ -0,0 +1,31 @@ +#------------------------------------------------------------------------- +# +# Makefile for src/include/catalog +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/Makefile +# +#------------------------------------------------------------------------- + +subdir = src/include/catalog +top_builddir = ../../.. +include $(top_builddir)/src/Makefile.global + +# location of Catalog.pm +catalogdir = $(top_srcdir)/src/backend/catalog + +# 'make reformat-dat-files' is a convenience target for rewriting the +# catalog data files in our standard format. This includes collapsing +# out any entries that are redundant with a BKI_DEFAULT annotation. +reformat-dat-files: + $(PERL) -I $(catalogdir) reformat_dat_file.pl pg_*.dat + +# 'make expand-dat-files' is a convenience target for expanding out all +# default values in the catalog data files. This should be run before +# altering or removing any BKI_DEFAULT annotation. +expand-dat-files: + $(PERL) -I $(catalogdir) reformat_dat_file.pl pg_*.dat --full-tuples + +.PHONY: reformat-dat-files expand-dat-files diff --git a/src/include/catalog/binary_upgrade.h b/src/include/catalog/binary_upgrade.h index 5ff365fe53..abc6e1ae1d 100644 --- a/src/include/catalog/binary_upgrade.h +++ b/src/include/catalog/binary_upgrade.h @@ -4,7 +4,7 @@ * variables used for binary upgrades * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/binary_upgrade.h diff --git a/src/include/catalog/catalog.h b/src/include/catalog/catalog.h index 8ce9a9966a..197e77f7f4 100644 --- a/src/include/catalog/catalog.h +++ b/src/include/catalog/catalog.h @@ -4,7 +4,7 @@ * prototypes for functions in backend/catalog/catalog.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/catalog.h @@ -14,18 +14,9 @@ #ifndef CATALOG_H #define CATALOG_H -/* - * 'pgrminclude ignore' needed here because CppAsString2() does not throw - * an error if the symbol is not defined. - */ -#include "catalog/catversion.h" /* pgrminclude ignore */ #include "catalog/pg_class.h" #include "utils/relcache.h" -#define OIDCHARS 10 /* max chars printed by %u */ -#define TABLESPACE_VERSION_DIRECTORY "PG_" PG_MAJORVERSION "_" \ - CppAsString2(CATALOG_VERSION_NO) - extern bool IsSystemRelation(Relation relation); extern bool IsToastRelation(Relation relation); diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 0dafd6bf2a..7d78cbe026 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -34,7 +34,7 @@ * database contents or layout, such as altering tuple headers. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/catversion.h @@ -53,6 +53,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 201707211 +#define CATALOG_VERSION_NO 201811061 #endif diff --git a/src/include/catalog/dependency.h b/src/include/catalog/dependency.h index b9f98423cc..46c271a46c 100644 --- a/src/include/catalog/dependency.h +++ b/src/include/catalog/dependency.h @@ -4,7 +4,7 @@ * Routines to support inter-object dependencies. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/dependency.h @@ -49,6 +49,20 @@ * Example: a trigger that's created to enforce a foreign-key constraint * is made internally dependent on the constraint's pg_constraint entry. * + * DEPENDENCY_INTERNAL_AUTO ('I'): the dependent object was created as + * part of creation of the referenced object, and is really just a part + * of its internal implementation. A DROP of the dependent object will + * be disallowed outright (we'll tell the user to issue a DROP against the + * referenced object, instead). While a regular internal dependency will + * prevent the dependent object from being dropped while any such + * dependencies remain, DEPENDENCY_INTERNAL_AUTO will allow such a drop as + * long as the object can be found by following any of such dependencies. + * Example: an index on a partition is made internal-auto-dependent on + * both the partition itself as well as on the index on the parent + * partitioned table; so the partition index is dropped together with + * either the partition it indexes, or with the parent index it is attached + * to. + * DEPENDENCY_EXTENSION ('e'): the dependent object is a member of the * extension that is the referenced object. The dependent object can be * dropped only via DROP EXTENSION on the referenced object. Functionally @@ -75,6 +89,7 @@ typedef enum DependencyType DEPENDENCY_NORMAL = 'n', DEPENDENCY_AUTO = 'a', DEPENDENCY_INTERNAL = 'i', + DEPENDENCY_INTERNAL_AUTO = 'I', DEPENDENCY_EXTENSION = 'e', DEPENDENCY_AUTO_EXTENSION = 'x', DEPENDENCY_PIN = 'p' diff --git a/src/include/catalog/duplicate_oids b/src/include/catalog/duplicate_oids index 7342d618ed..072fc00c70 100755 --- a/src/include/catalog/duplicate_oids +++ b/src/include/catalog/duplicate_oids @@ -1,27 +1,40 @@ #!/usr/bin/perl +#---------------------------------------------------------------------- +# +# duplicate_oids +# Identifies any manually-assigned OIDs that are used multiple times +# in the Postgres catalog data. +# +# While duplicate OIDs would only cause a failure if they appear in +# the same catalog, our project policy is that manually assigned OIDs +# should be globally unique, to avoid confusion. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/duplicate_oids +# +#---------------------------------------------------------------------- use strict; use warnings; -BEGIN -{ - @ARGV = (glob("pg_*.h"), qw(indexing.h toasting.h)); -} +# Must run in src/include/catalog +use FindBin; +chdir $FindBin::RealBin or die "could not cd to $FindBin::RealBin: $!\n"; + +use lib "$FindBin::RealBin/../../backend/catalog/"; +use Catalog; + +my @input_files = (glob("pg_*.h"), qw(indexing.h toasting.h)); + +my $oids = Catalog::FindAllOidsFromHeaders(@input_files); my %oidcounts; -while (<>) +foreach my $oid (@{$oids}) { - next if /^CATALOG\(.*BKI_BOOTSTRAP/; - next - unless /^DATA\(insert *OID *= *(\d+)/ - || /^CATALOG\([^,]*, *(\d+).*BKI_ROWTYPE_OID\((\d+)\)/ - || /^CATALOG\([^,]*, *(\d+)/ - || /^DECLARE_INDEX\([^,]*, *(\d+)/ - || /^DECLARE_UNIQUE_INDEX\([^,]*, *(\d+)/ - || /^DECLARE_TOAST\([^,]*, *(\d+), *(\d+)/; - $oidcounts{$1}++; - $oidcounts{$2}++ if $2; + $oidcounts{$oid}++; } my $found = 0; diff --git a/src/include/catalog/genbki.h b/src/include/catalog/genbki.h index a2cb313d4a..8a4277b7c8 100644 --- a/src/include/catalog/genbki.h +++ b/src/include/catalog/genbki.h @@ -3,13 +3,13 @@ * genbki.h * Required include file for all POSTGRES catalog header files * - * genbki.h defines CATALOG(), DATA(), BKI_BOOTSTRAP and related macros + * genbki.h defines CATALOG(), BKI_BOOTSTRAP and related macros * so that the catalog header files can be read by the C compiler. * (These same words are recognized by genbki.pl to build the BKI * bootstrap file from these header files.) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/genbki.h @@ -20,20 +20,28 @@ #define GENBKI_H /* Introduces a catalog's structure definition */ -#define CATALOG(name,oid) typedef struct CppConcat(FormData_,name) +#define CATALOG(name,oid,oidmacro) typedef struct CppConcat(FormData_,name) /* Options that may appear after CATALOG (on the same line) */ #define BKI_BOOTSTRAP #define BKI_SHARED_RELATION #define BKI_WITHOUT_OIDS -#define BKI_ROWTYPE_OID(oid) +#define BKI_ROWTYPE_OID(oid,oidmacro) #define BKI_SCHEMA_MACRO + +/* Options that may appear after an attribute (on the same line) */ #define BKI_FORCE_NULL #define BKI_FORCE_NOT_NULL +/* Specifies a default value for a catalog field */ +#define BKI_DEFAULT(value) +/* Specifies a default value for auto-generated array types */ +#define BKI_ARRAY_DEFAULT(value) +/* Indicates how to perform name lookups for an OID or OID-array field */ +#define BKI_LOOKUP(catalog) + +/* The following are never defined; they are here only for documentation. */ /* - * This is never defined; it's here only for documentation. - * * Variable-length catalog fields (except possibly the first not nullable one) * should not be visible in C structures, so they are made invisible by #ifdefs * of an undefined symbol. See also MARKNOTNULL in bootstrap.c for how this is @@ -41,10 +49,14 @@ */ #undef CATALOG_VARLEN -/* Declarations that provide the initial content of a catalog */ -/* In C, these need to expand into some harmless, repeatable declaration */ -#define DATA(x) extern int no_such_variable -#define DESCR(x) extern int no_such_variable -#define SHDESCR(x) extern int no_such_variable +/* + * There is code in some catalog headers that needs to be visible to clients, + * but we don't want clients to include the full header because of safety + * issues with other code in the header. To handle that, surround code that + * should be visible to clients with "#ifdef EXPOSE_TO_CLIENT_CODE". That + * instructs genbki.pl to copy the section when generating the corresponding + * "_d" header, which can be included by both client and backend code. + */ +#undef EXPOSE_TO_CLIENT_CODE #endif /* GENBKI_H */ diff --git a/src/include/catalog/heap.h b/src/include/catalog/heap.h index cb1bc887f8..39f04b06ee 100644 --- a/src/include/catalog/heap.h +++ b/src/include/catalog/heap.h @@ -4,7 +4,7 @@ * prototypes for functions in backend/catalog/heap.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/heap.h @@ -23,6 +23,7 @@ typedef struct RawColumnDefault { AttrNumber attnum; /* attribute to attach default to */ Node *raw_default; /* default value (untransformed parse tree) */ + bool missingMode; /* true if part of add column processing */ } RawColumnDefault; typedef struct CookedConstraint @@ -71,6 +72,7 @@ extern Oid heap_create_with_catalog(const char *relname, bool use_user_acl, bool allow_system_table_mods, bool is_internal, + Oid relrewrite, ObjectAddress *typaddress); extern void heap_create_init_fork(Relation rel); @@ -100,16 +102,21 @@ extern List *AddRelationNewConstraints(Relation rel, List *newConstraints, bool allow_merge, bool is_local, - bool is_internal); + bool is_internal, + const char *queryString); + +extern void RelationClearMissing(Relation rel); +extern void SetAttrMissing(Oid relid, char *attname, char *value); extern Oid StoreAttrDefault(Relation rel, AttrNumber attnum, - Node *expr, bool is_internal); + Node *expr, bool is_internal, + bool add_column_mode); extern Node *cookDefault(ParseState *pstate, Node *raw_default, Oid atttypid, int32 atttypmod, - char *attname); + const char *attname); extern void DeleteRelationTuple(Oid relid); extern void DeleteAttributeTuples(Oid relid); @@ -120,10 +127,10 @@ extern void RemoveAttrDefault(Oid relid, AttrNumber attnum, extern void RemoveAttrDefaultById(Oid attrdefId); extern void RemoveStatistics(Oid relid, AttrNumber attnum); -extern Form_pg_attribute SystemAttributeDefinition(AttrNumber attno, +extern const FormData_pg_attribute *SystemAttributeDefinition(AttrNumber attno, bool relhasoids); -extern Form_pg_attribute SystemAttributeByName(const char *attname, +extern const FormData_pg_attribute *SystemAttributeByName(const char *attname, bool relhasoids); extern void CheckAttributeNamesTypes(TupleDesc tupdesc, char relkind, diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h index 1d4ec09f8f..35a29f3498 100644 --- a/src/include/catalog/index.h +++ b/src/include/catalog/index.h @@ -4,7 +4,7 @@ * prototypes for catalog/index.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/index.h @@ -40,11 +40,22 @@ typedef enum extern void index_check_primary_key(Relation heapRel, IndexInfo *indexInfo, - bool is_alter_table); + bool is_alter_table, + IndexStmt *stmt); + +#define INDEX_CREATE_IS_PRIMARY (1 << 0) +#define INDEX_CREATE_ADD_CONSTRAINT (1 << 1) +#define INDEX_CREATE_SKIP_BUILD (1 << 2) +#define INDEX_CREATE_CONCURRENT (1 << 3) +#define INDEX_CREATE_IF_NOT_EXISTS (1 << 4) +#define INDEX_CREATE_PARTITIONED (1 << 5) +#define INDEX_CREATE_INVALID (1 << 6) extern Oid index_create(Relation heapRelation, const char *indexRelationName, Oid indexRelationId, + Oid parentIndexRelid, + Oid parentConstraintId, Oid relFileNode, IndexInfo *indexInfo, List *indexColNames, @@ -54,26 +65,25 @@ extern Oid index_create(Relation heapRelation, Oid *classObjectId, int16 *coloptions, Datum reloptions, - bool isprimary, - bool isconstraint, - bool deferrable, - bool initdeferred, + bits16 flags, + bits16 constr_flags, bool allow_system_table_mods, - bool skip_build, - bool concurrent, bool is_internal, - bool if_not_exists); + Oid *constraintId); + +#define INDEX_CONSTR_CREATE_MARK_AS_PRIMARY (1 << 0) +#define INDEX_CONSTR_CREATE_DEFERRABLE (1 << 1) +#define INDEX_CONSTR_CREATE_INIT_DEFERRED (1 << 2) +#define INDEX_CONSTR_CREATE_UPDATE_INDEX (1 << 3) +#define INDEX_CONSTR_CREATE_REMOVE_OLD_DEPS (1 << 4) extern ObjectAddress index_constraint_create(Relation heapRelation, Oid indexRelationId, + Oid parentConstraintId, IndexInfo *indexInfo, const char *constraintName, char constraintType, - bool deferrable, - bool initdeferred, - bool mark_as_primary, - bool update_pgindex, - bool remove_old_dependencies, + bits16 constr_flags, bool allow_system_table_mods, bool is_internal); @@ -81,6 +91,11 @@ extern void index_drop(Oid indexId, bool concurrent); extern IndexInfo *BuildIndexInfo(Relation index); +extern bool CompareIndexInfo(IndexInfo *info1, IndexInfo *info2, + Oid *collations1, Oid *collations2, + Oid *opfamilies1, Oid *opfamilies2, + AttrNumber *attmap, int maplen); + extern void BuildSpeculativeIndexInfo(Relation index, IndexInfo *ii); extern void FormIndexDatum(IndexInfo *indexInfo, @@ -93,14 +108,16 @@ extern void index_build(Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, bool isprimary, - bool isreindex); + bool isreindex, + bool parallel); extern double IndexBuildHeapScan(Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, bool allow_sync, IndexBuildCallback callback, - void *callback_state); + void *callback_state, + HeapScanDesc scan); extern double IndexBuildHeapRangeScan(Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, @@ -109,7 +126,8 @@ extern double IndexBuildHeapRangeScan(Relation heapRelation, BlockNumber start_blockno, BlockNumber end_blockno, IndexBuildCallback callback, - void *callback_state); + void *callback_state, + HeapScanDesc scan); extern void validate_index(Oid heapId, Oid indexId, Snapshot snapshot); @@ -131,4 +149,10 @@ extern bool ReindexIsProcessingHeap(Oid heapOid); extern bool ReindexIsProcessingIndex(Oid indexOid); extern Oid IndexGetRelation(Oid indexId, bool missing_ok); +extern Size EstimateReindexStateSpace(void); +extern void SerializeReindexState(Size maxsize, char *start_address); +extern void RestoreReindexState(void *reindexstate); + +extern void IndexSetParentIndex(Relation idx, Oid parentOid); + #endif /* INDEX_H */ diff --git a/src/include/catalog/indexing.h b/src/include/catalog/indexing.h index ef8493674c..254fbef1f7 100644 --- a/src/include/catalog/indexing.h +++ b/src/include/catalog/indexing.h @@ -5,7 +5,7 @@ * on system catalogs * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/indexing.h @@ -43,11 +43,10 @@ extern void CatalogTupleDelete(Relation heapRel, ItemPointer tid); /* * These macros are just to keep the C compiler from spitting up on the - * upcoming commands for genbki.pl. + * upcoming commands for Catalog.pm. */ #define DECLARE_INDEX(name,oid,decl) extern int no_such_variable #define DECLARE_UNIQUE_INDEX(name,oid,decl) extern int no_such_variable -#define BUILD_INDICES /* @@ -122,12 +121,14 @@ DECLARE_UNIQUE_INDEX(pg_collation_oid_index, 3085, on pg_collation using btree(o DECLARE_INDEX(pg_constraint_conname_nsp_index, 2664, on pg_constraint using btree(conname name_ops, connamespace oid_ops)); #define ConstraintNameNspIndexId 2664 -DECLARE_INDEX(pg_constraint_conrelid_index, 2665, on pg_constraint using btree(conrelid oid_ops)); -#define ConstraintRelidIndexId 2665 +DECLARE_UNIQUE_INDEX(pg_constraint_conrelid_contypid_conname_index, 2665, on pg_constraint using btree(conrelid oid_ops, contypid oid_ops, conname name_ops)); +#define ConstraintRelidTypidNameIndexId 2665 DECLARE_INDEX(pg_constraint_contypid_index, 2666, on pg_constraint using btree(contypid oid_ops)); #define ConstraintTypidIndexId 2666 DECLARE_UNIQUE_INDEX(pg_constraint_oid_index, 2667, on pg_constraint using btree(oid oid_ops)); #define ConstraintOidIndexId 2667 +DECLARE_INDEX(pg_constraint_conparentid_index, 2579, on pg_constraint using btree(conparentid oid_ops)); +#define ConstraintParentIndexId 2579 DECLARE_UNIQUE_INDEX(pg_conversion_default_index, 2668, on pg_conversion using btree(connamespace oid_ops, conforencoding int4_ops, contoencoding int4_ops, oid oid_ops)); #define ConversionDefaultIndexId 2668 @@ -359,7 +360,4 @@ DECLARE_UNIQUE_INDEX(pg_subscription_subname_index, 6115, on pg_subscription usi DECLARE_UNIQUE_INDEX(pg_subscription_rel_srrelid_srsubid_index, 6117, on pg_subscription_rel using btree(srrelid oid_ops, srsubid oid_ops)); #define SubscriptionRelSrrelidSrsubidIndexId 6117 -/* last step of initialization script: build the indexes declared above */ -BUILD_INDICES - #endif /* INDEXING_H */ diff --git a/src/include/catalog/namespace.h b/src/include/catalog/namespace.h index f2ee935623..0e202372d5 100644 --- a/src/include/catalog/namespace.h +++ b/src/include/catalog/namespace.h @@ -4,7 +4,7 @@ * prototypes for functions in backend/catalog/namespace.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/namespace.h @@ -47,14 +47,25 @@ typedef struct OverrideSearchPath bool addTemp; /* implicitly prepend temp schema? */ } OverrideSearchPath; +/* + * Option flag bits for RangeVarGetRelidExtended(). + */ +typedef enum RVROption +{ + RVR_MISSING_OK = 1 << 0, /* don't error if relation doesn't exist */ + RVR_NOWAIT = 1 << 1, /* error if relation cannot be locked */ + RVR_SKIP_LOCKED = 1 << 2 /* skip if relation cannot be locked */ +} RVROption; + typedef void (*RangeVarGetRelidCallback) (const RangeVar *relation, Oid relId, Oid oldRelId, void *callback_arg); #define RangeVarGetRelid(relation, lockmode, missing_ok) \ - RangeVarGetRelidExtended(relation, lockmode, missing_ok, false, NULL, NULL) + RangeVarGetRelidExtended(relation, lockmode, \ + (missing_ok) ? RVR_MISSING_OK : 0, NULL, NULL) extern Oid RangeVarGetRelidExtended(const RangeVar *relation, - LOCKMODE lockmode, bool missing_ok, bool nowait, + LOCKMODE lockmode, uint32 flags, RangeVarGetRelidCallback callback, void *callback_arg); extern Oid RangeVarGetCreationNamespace(const RangeVar *newRelation); @@ -126,6 +137,7 @@ extern bool isTempToastNamespace(Oid namespaceId); extern bool isTempOrTempToastNamespace(Oid namespaceId); extern bool isAnyTempNamespace(Oid namespaceId); extern bool isOtherTempNamespace(Oid namespaceId); +extern bool isTempNamespaceInUse(Oid namespaceId); extern int GetTempNamespaceBackendId(Oid namespaceId); extern Oid GetTempToastNamespace(void); extern void GetTempNamespaceState(Oid *tempNamespaceId, diff --git a/src/include/catalog/objectaccess.h b/src/include/catalog/objectaccess.h index 251eb6fd88..e46f5605fc 100644 --- a/src/include/catalog/objectaccess.h +++ b/src/include/catalog/objectaccess.h @@ -3,7 +3,7 @@ * * Object access hooks. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California */ @@ -133,7 +133,7 @@ extern void RunObjectDropHook(Oid classId, Oid objectId, int subId, int dropflags); extern void RunObjectPostAlterHook(Oid classId, Oid objectId, int subId, Oid auxiliaryId, bool is_internal); -extern bool RunNamespaceSearchHook(Oid objectId, bool ereport_on_volation); +extern bool RunNamespaceSearchHook(Oid objectId, bool ereport_on_violation); extern void RunFunctionExecuteHook(Oid objectId); /* diff --git a/src/include/catalog/objectaddress.h b/src/include/catalog/objectaddress.h index 5fc54d0e57..6a9b1eec73 100644 --- a/src/include/catalog/objectaddress.h +++ b/src/include/catalog/objectaddress.h @@ -3,7 +3,7 @@ * objectaddress.h * functions for working with object addresses * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/objectaddress.h @@ -62,7 +62,7 @@ extern AttrNumber get_object_attnum_name(Oid class_id); extern AttrNumber get_object_attnum_namespace(Oid class_id); extern AttrNumber get_object_attnum_owner(Oid class_id); extern AttrNumber get_object_attnum_acl(Oid class_id); -extern AclObjectKind get_object_aclkind(Oid class_id); +extern ObjectType get_object_type(Oid class_id, Oid object_id); extern bool get_object_namensp_unique(Oid class_id); extern HeapTuple get_catalog_object_by_oid(Relation catalog, @@ -78,4 +78,6 @@ extern char *getObjectIdentityParts(const ObjectAddress *address, List **objname, List **objargs); extern ArrayType *strlist_to_textarray(List *list); +extern ObjectType get_relkind_objtype(char relkind); + #endif /* OBJECTADDRESS_H */ diff --git a/src/include/catalog/opfam_internal.h b/src/include/catalog/opfam_internal.h index c4a010029a..e9ac904c72 100644 --- a/src/include/catalog/opfam_internal.h +++ b/src/include/catalog/opfam_internal.h @@ -2,7 +2,7 @@ * * opfam_internal.h * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/opfam_internal.h diff --git a/src/include/catalog/partition.h b/src/include/catalog/partition.h index bef7a0f5fb..a53de2372e 100644 --- a/src/include/catalog/partition.h +++ b/src/include/catalog/partition.h @@ -4,7 +4,7 @@ * Header file for structures and utility functions related to * partitioning * - * Copyright (c) 2007-2017, PostgreSQL Global Development Group + * Copyright (c) 2007-2018, PostgreSQL Global Development Group * * src/include/catalog/partition.h * @@ -13,19 +13,11 @@ #ifndef PARTITION_H #define PARTITION_H -#include "fmgr.h" -#include "executor/tuptable.h" -#include "nodes/execnodes.h" -#include "parser/parse_node.h" -#include "utils/rel.h" +#include "partitioning/partdefs.h" +#include "utils/relcache.h" -/* - * PartitionBoundInfo encapsulates a set of partition bounds. It is usually - * associated with partitioned tables as part of its partition descriptor. - * - * The internal structure is opaque outside partition.c. - */ -typedef struct PartitionBoundInfoData *PartitionBoundInfo; +/* Seed for the extended hash function */ +#define HASH_PARTITION_SEED UINT64CONST(0x7A5B22367996DCFD) /* * Information about partitions of a partitioned table. @@ -37,67 +29,17 @@ typedef struct PartitionDescData PartitionBoundInfo boundinfo; /* collection of partition bounds */ } PartitionDescData; -typedef struct PartitionDescData *PartitionDesc; - -/*----------------------- - * PartitionDispatch - information about one partitioned table in a partition - * hierarchy required to route a tuple to one of its partitions - * - * reldesc Relation descriptor of the table - * key Partition key information of the table - * keystate Execution state required for expressions in the partition key - * partdesc Partition descriptor of the table - * tupslot A standalone TupleTableSlot initialized with this table's tuple - * descriptor - * tupmap TupleConversionMap to convert from the parent's rowtype to - * this table's rowtype (when extracting the partition key of a - * tuple just before routing it through this table) - * indexes Array with partdesc->nparts members (for details on what - * individual members represent, see how they are set in - * RelationGetPartitionDispatchInfo()) - *----------------------- - */ -typedef struct PartitionDispatchData -{ - Relation reldesc; - PartitionKey key; - List *keystate; /* list of ExprState */ - PartitionDesc partdesc; - TupleTableSlot *tupslot; - TupleConversionMap *tupmap; - int *indexes; -} PartitionDispatchData; - -typedef struct PartitionDispatchData *PartitionDispatch; - -extern void RelationBuildPartitionDesc(Relation relation); -extern bool partition_bounds_equal(int partnatts, int16 *parttyplen, - bool *parttypbyval, PartitionBoundInfo b1, - PartitionBoundInfo b2); - -extern void check_new_partition_bound(char *relname, Relation parent, - PartitionBoundSpec *spec); extern Oid get_partition_parent(Oid relid); -extern List *get_qual_from_partbound(Relation rel, Relation parent, - PartitionBoundSpec *spec); -extern List *map_partition_varattnos(List *expr, int target_varno, - Relation partrel, Relation parent, +extern List *get_partition_ancestors(Oid relid); +extern List *map_partition_varattnos(List *expr, int fromrel_varno, + Relation to_rel, Relation from_rel, bool *found_whole_row); -extern List *RelationGetPartitionQual(Relation rel); -extern Expr *get_partition_qual_relid(Oid relid); +extern bool has_partition_attrs(Relation rel, Bitmapset *attnums, + bool *used_in_expr); + +extern Oid get_default_oid_from_partdesc(PartitionDesc partdesc); +extern Oid get_default_partition_oid(Oid parentId); +extern void update_default_partition_oid(Oid parentId, Oid defaultPartId); +extern List *get_proposed_default_constraint(List *new_part_constaints); -/* For tuple routing */ -extern PartitionDispatch *RelationGetPartitionDispatchInfo(Relation rel, - int lockmode, int *num_parted, - List **leaf_part_oids); -extern void FormPartitionKeyDatum(PartitionDispatch pd, - TupleTableSlot *slot, - EState *estate, - Datum *values, - bool *isnull); -extern int get_partition_for_tuple(PartitionDispatch *pd, - TupleTableSlot *slot, - EState *estate, - PartitionDispatchData **failed_at, - TupleTableSlot **failed_slot); #endif /* PARTITION_H */ diff --git a/src/include/catalog/pg_aggregate.dat b/src/include/catalog/pg_aggregate.dat new file mode 100644 index 0000000000..b4ce0aabf9 --- /dev/null +++ b/src/include/catalog/pg_aggregate.dat @@ -0,0 +1,592 @@ +#---------------------------------------------------------------------- +# +# pg_aggregate.dat +# Initial contents of the pg_aggregate system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_aggregate.dat +# +#---------------------------------------------------------------------- + +[ + +# avg +{ aggfnoid => 'avg(int8)', aggtransfn => 'int8_avg_accum', + aggfinalfn => 'numeric_poly_avg', aggcombinefn => 'int8_avg_combine', + aggserialfn => 'int8_avg_serialize', aggdeserialfn => 'int8_avg_deserialize', + aggmtransfn => 'int8_avg_accum', aggminvtransfn => 'int8_avg_accum_inv', + aggmfinalfn => 'numeric_poly_avg', aggtranstype => 'internal', + aggtransspace => '48', aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'avg(int4)', aggtransfn => 'int4_avg_accum', + aggfinalfn => 'int8_avg', aggcombinefn => 'int4_avg_combine', + aggmtransfn => 'int4_avg_accum', aggminvtransfn => 'int4_avg_accum_inv', + aggmfinalfn => 'int8_avg', aggtranstype => '_int8', aggmtranstype => '_int8', + agginitval => '{0,0}', aggminitval => '{0,0}' }, +{ aggfnoid => 'avg(int2)', aggtransfn => 'int2_avg_accum', + aggfinalfn => 'int8_avg', aggcombinefn => 'int4_avg_combine', + aggmtransfn => 'int2_avg_accum', aggminvtransfn => 'int2_avg_accum_inv', + aggmfinalfn => 'int8_avg', aggtranstype => '_int8', aggmtranstype => '_int8', + agginitval => '{0,0}', aggminitval => '{0,0}' }, +{ aggfnoid => 'avg(numeric)', aggtransfn => 'numeric_avg_accum', + aggfinalfn => 'numeric_avg', aggcombinefn => 'numeric_avg_combine', + aggserialfn => 'numeric_avg_serialize', + aggdeserialfn => 'numeric_avg_deserialize', + aggmtransfn => 'numeric_avg_accum', aggminvtransfn => 'numeric_accum_inv', + aggmfinalfn => 'numeric_avg', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, +{ aggfnoid => 'avg(float4)', aggtransfn => 'float4_accum', + aggfinalfn => 'float8_avg', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'avg(float8)', aggtransfn => 'float8_accum', + aggfinalfn => 'float8_avg', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'avg(interval)', aggtransfn => 'interval_accum', + aggfinalfn => 'interval_avg', aggcombinefn => 'interval_combine', + aggmtransfn => 'interval_accum', aggminvtransfn => 'interval_accum_inv', + aggmfinalfn => 'interval_avg', aggtranstype => '_interval', + aggmtranstype => '_interval', agginitval => '{0 second,0 second}', + aggminitval => '{0 second,0 second}' }, + +# sum +{ aggfnoid => 'sum(int8)', aggtransfn => 'int8_avg_accum', + aggfinalfn => 'numeric_poly_sum', aggcombinefn => 'int8_avg_combine', + aggserialfn => 'int8_avg_serialize', aggdeserialfn => 'int8_avg_deserialize', + aggmtransfn => 'int8_avg_accum', aggminvtransfn => 'int8_avg_accum_inv', + aggmfinalfn => 'numeric_poly_sum', aggtranstype => 'internal', + aggtransspace => '48', aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'sum(int4)', aggtransfn => 'int4_sum', aggcombinefn => 'int8pl', + aggmtransfn => 'int4_avg_accum', aggminvtransfn => 'int4_avg_accum_inv', + aggmfinalfn => 'int2int4_sum', aggtranstype => 'int8', + aggmtranstype => '_int8', aggminitval => '{0,0}' }, +{ aggfnoid => 'sum(int2)', aggtransfn => 'int2_sum', aggcombinefn => 'int8pl', + aggmtransfn => 'int2_avg_accum', aggminvtransfn => 'int2_avg_accum_inv', + aggmfinalfn => 'int2int4_sum', aggtranstype => 'int8', + aggmtranstype => '_int8', aggminitval => '{0,0}' }, +{ aggfnoid => 'sum(float4)', aggtransfn => 'float4pl', + aggcombinefn => 'float4pl', aggtranstype => 'float4' }, +{ aggfnoid => 'sum(float8)', aggtransfn => 'float8pl', + aggcombinefn => 'float8pl', aggtranstype => 'float8' }, +{ aggfnoid => 'sum(money)', aggtransfn => 'cash_pl', aggcombinefn => 'cash_pl', + aggmtransfn => 'cash_pl', aggminvtransfn => 'cash_mi', + aggtranstype => 'money', aggmtranstype => 'money' }, +{ aggfnoid => 'sum(interval)', aggtransfn => 'interval_pl', + aggcombinefn => 'interval_pl', aggmtransfn => 'interval_pl', + aggminvtransfn => 'interval_mi', aggtranstype => 'interval', + aggmtranstype => 'interval' }, +{ aggfnoid => 'sum(numeric)', aggtransfn => 'numeric_avg_accum', + aggfinalfn => 'numeric_sum', aggcombinefn => 'numeric_avg_combine', + aggserialfn => 'numeric_avg_serialize', + aggdeserialfn => 'numeric_avg_deserialize', + aggmtransfn => 'numeric_avg_accum', aggminvtransfn => 'numeric_accum_inv', + aggmfinalfn => 'numeric_sum', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, + +# max +{ aggfnoid => 'max(int8)', aggtransfn => 'int8larger', + aggcombinefn => 'int8larger', aggsortop => '>(int8,int8)', + aggtranstype => 'int8' }, +{ aggfnoid => 'max(int4)', aggtransfn => 'int4larger', + aggcombinefn => 'int4larger', aggsortop => '>(int4,int4)', + aggtranstype => 'int4' }, +{ aggfnoid => 'max(int2)', aggtransfn => 'int2larger', + aggcombinefn => 'int2larger', aggsortop => '>(int2,int2)', + aggtranstype => 'int2' }, +{ aggfnoid => 'max(oid)', aggtransfn => 'oidlarger', + aggcombinefn => 'oidlarger', aggsortop => '>(oid,oid)', + aggtranstype => 'oid' }, +{ aggfnoid => 'max(float4)', aggtransfn => 'float4larger', + aggcombinefn => 'float4larger', aggsortop => '>(float4,float4)', + aggtranstype => 'float4' }, +{ aggfnoid => 'max(float8)', aggtransfn => 'float8larger', + aggcombinefn => 'float8larger', aggsortop => '>(float8,float8)', + aggtranstype => 'float8' }, +{ aggfnoid => 'max(date)', aggtransfn => 'date_larger', + aggcombinefn => 'date_larger', aggsortop => '>(date,date)', + aggtranstype => 'date' }, +{ aggfnoid => 'max(time)', aggtransfn => 'time_larger', + aggcombinefn => 'time_larger', aggsortop => '>(time,time)', + aggtranstype => 'time' }, +{ aggfnoid => 'max(timetz)', aggtransfn => 'timetz_larger', + aggcombinefn => 'timetz_larger', aggsortop => '>(timetz,timetz)', + aggtranstype => 'timetz' }, +{ aggfnoid => 'max(money)', aggtransfn => 'cashlarger', + aggcombinefn => 'cashlarger', aggsortop => '>(money,money)', + aggtranstype => 'money' }, +{ aggfnoid => 'max(timestamp)', aggtransfn => 'timestamp_larger', + aggcombinefn => 'timestamp_larger', aggsortop => '>(timestamp,timestamp)', + aggtranstype => 'timestamp' }, +{ aggfnoid => 'max(timestamptz)', aggtransfn => 'timestamptz_larger', + aggcombinefn => 'timestamptz_larger', + aggsortop => '>(timestamptz,timestamptz)', aggtranstype => 'timestamptz' }, +{ aggfnoid => 'max(interval)', aggtransfn => 'interval_larger', + aggcombinefn => 'interval_larger', aggsortop => '>(interval,interval)', + aggtranstype => 'interval' }, +{ aggfnoid => 'max(text)', aggtransfn => 'text_larger', + aggcombinefn => 'text_larger', aggsortop => '>(text,text)', + aggtranstype => 'text' }, +{ aggfnoid => 'max(numeric)', aggtransfn => 'numeric_larger', + aggcombinefn => 'numeric_larger', aggsortop => '>(numeric,numeric)', + aggtranstype => 'numeric' }, +{ aggfnoid => 'max(anyarray)', aggtransfn => 'array_larger', + aggcombinefn => 'array_larger', aggsortop => '>(anyarray,anyarray)', + aggtranstype => 'anyarray' }, +{ aggfnoid => 'max(bpchar)', aggtransfn => 'bpchar_larger', + aggcombinefn => 'bpchar_larger', aggsortop => '>(bpchar,bpchar)', + aggtranstype => 'bpchar' }, +{ aggfnoid => 'max(tid)', aggtransfn => 'tidlarger', + aggcombinefn => 'tidlarger', aggsortop => '>(tid,tid)', + aggtranstype => 'tid' }, +{ aggfnoid => 'max(anyenum)', aggtransfn => 'enum_larger', + aggcombinefn => 'enum_larger', aggsortop => '>(anyenum,anyenum)', + aggtranstype => 'anyenum' }, +{ aggfnoid => 'max(inet)', aggtransfn => 'network_larger', + aggcombinefn => 'network_larger', aggsortop => '>(inet,inet)', + aggtranstype => 'inet' }, + +# min +{ aggfnoid => 'min(int8)', aggtransfn => 'int8smaller', + aggcombinefn => 'int8smaller', aggsortop => '<(int8,int8)', + aggtranstype => 'int8' }, +{ aggfnoid => 'min(int4)', aggtransfn => 'int4smaller', + aggcombinefn => 'int4smaller', aggsortop => '<(int4,int4)', + aggtranstype => 'int4' }, +{ aggfnoid => 'min(int2)', aggtransfn => 'int2smaller', + aggcombinefn => 'int2smaller', aggsortop => '<(int2,int2)', + aggtranstype => 'int2' }, +{ aggfnoid => 'min(oid)', aggtransfn => 'oidsmaller', + aggcombinefn => 'oidsmaller', aggsortop => '<(oid,oid)', + aggtranstype => 'oid' }, +{ aggfnoid => 'min(float4)', aggtransfn => 'float4smaller', + aggcombinefn => 'float4smaller', aggsortop => '<(float4,float4)', + aggtranstype => 'float4' }, +{ aggfnoid => 'min(float8)', aggtransfn => 'float8smaller', + aggcombinefn => 'float8smaller', aggsortop => '<(float8,float8)', + aggtranstype => 'float8' }, +{ aggfnoid => 'min(date)', aggtransfn => 'date_smaller', + aggcombinefn => 'date_smaller', aggsortop => '<(date,date)', + aggtranstype => 'date' }, +{ aggfnoid => 'min(time)', aggtransfn => 'time_smaller', + aggcombinefn => 'time_smaller', aggsortop => '<(time,time)', + aggtranstype => 'time' }, +{ aggfnoid => 'min(timetz)', aggtransfn => 'timetz_smaller', + aggcombinefn => 'timetz_smaller', aggsortop => '<(timetz,timetz)', + aggtranstype => 'timetz' }, +{ aggfnoid => 'min(money)', aggtransfn => 'cashsmaller', + aggcombinefn => 'cashsmaller', aggsortop => '<(money,money)', + aggtranstype => 'money' }, +{ aggfnoid => 'min(timestamp)', aggtransfn => 'timestamp_smaller', + aggcombinefn => 'timestamp_smaller', aggsortop => '<(timestamp,timestamp)', + aggtranstype => 'timestamp' }, +{ aggfnoid => 'min(timestamptz)', aggtransfn => 'timestamptz_smaller', + aggcombinefn => 'timestamptz_smaller', + aggsortop => '<(timestamptz,timestamptz)', aggtranstype => 'timestamptz' }, +{ aggfnoid => 'min(interval)', aggtransfn => 'interval_smaller', + aggcombinefn => 'interval_smaller', aggsortop => '<(interval,interval)', + aggtranstype => 'interval' }, +{ aggfnoid => 'min(text)', aggtransfn => 'text_smaller', + aggcombinefn => 'text_smaller', aggsortop => '<(text,text)', + aggtranstype => 'text' }, +{ aggfnoid => 'min(numeric)', aggtransfn => 'numeric_smaller', + aggcombinefn => 'numeric_smaller', aggsortop => '<(numeric,numeric)', + aggtranstype => 'numeric' }, +{ aggfnoid => 'min(anyarray)', aggtransfn => 'array_smaller', + aggcombinefn => 'array_smaller', aggsortop => '<(anyarray,anyarray)', + aggtranstype => 'anyarray' }, +{ aggfnoid => 'min(bpchar)', aggtransfn => 'bpchar_smaller', + aggcombinefn => 'bpchar_smaller', aggsortop => '<(bpchar,bpchar)', + aggtranstype => 'bpchar' }, +{ aggfnoid => 'min(tid)', aggtransfn => 'tidsmaller', + aggcombinefn => 'tidsmaller', aggsortop => '<(tid,tid)', + aggtranstype => 'tid' }, +{ aggfnoid => 'min(anyenum)', aggtransfn => 'enum_smaller', + aggcombinefn => 'enum_smaller', aggsortop => '<(anyenum,anyenum)', + aggtranstype => 'anyenum' }, +{ aggfnoid => 'min(inet)', aggtransfn => 'network_smaller', + aggcombinefn => 'network_smaller', aggsortop => '<(inet,inet)', + aggtranstype => 'inet' }, + +# count +{ aggfnoid => 'count(any)', aggtransfn => 'int8inc_any', + aggcombinefn => 'int8pl', aggmtransfn => 'int8inc_any', + aggminvtransfn => 'int8dec_any', aggtranstype => 'int8', + aggmtranstype => 'int8', agginitval => '0', aggminitval => '0' }, +{ aggfnoid => 'count()', aggtransfn => 'int8inc', aggcombinefn => 'int8pl', + aggmtransfn => 'int8inc', aggminvtransfn => 'int8dec', aggtranstype => 'int8', + aggmtranstype => 'int8', agginitval => '0', aggminitval => '0' }, + +# var_pop +{ aggfnoid => 'var_pop(int8)', aggtransfn => 'int8_accum', + aggfinalfn => 'numeric_var_pop', aggcombinefn => 'numeric_combine', + aggserialfn => 'numeric_serialize', aggdeserialfn => 'numeric_deserialize', + aggmtransfn => 'int8_accum', aggminvtransfn => 'int8_accum_inv', + aggmfinalfn => 'numeric_var_pop', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, +{ aggfnoid => 'var_pop(int4)', aggtransfn => 'int4_accum', + aggfinalfn => 'numeric_poly_var_pop', aggcombinefn => 'numeric_poly_combine', + aggserialfn => 'numeric_poly_serialize', + aggdeserialfn => 'numeric_poly_deserialize', aggmtransfn => 'int4_accum', + aggminvtransfn => 'int4_accum_inv', aggmfinalfn => 'numeric_poly_var_pop', + aggtranstype => 'internal', aggtransspace => '48', + aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'var_pop(int2)', aggtransfn => 'int2_accum', + aggfinalfn => 'numeric_poly_var_pop', aggcombinefn => 'numeric_poly_combine', + aggserialfn => 'numeric_poly_serialize', + aggdeserialfn => 'numeric_poly_deserialize', aggmtransfn => 'int2_accum', + aggminvtransfn => 'int2_accum_inv', aggmfinalfn => 'numeric_poly_var_pop', + aggtranstype => 'internal', aggtransspace => '48', + aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'var_pop(float4)', aggtransfn => 'float4_accum', + aggfinalfn => 'float8_var_pop', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'var_pop(float8)', aggtransfn => 'float8_accum', + aggfinalfn => 'float8_var_pop', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'var_pop(numeric)', aggtransfn => 'numeric_accum', + aggfinalfn => 'numeric_var_pop', aggcombinefn => 'numeric_combine', + aggserialfn => 'numeric_serialize', aggdeserialfn => 'numeric_deserialize', + aggmtransfn => 'numeric_accum', aggminvtransfn => 'numeric_accum_inv', + aggmfinalfn => 'numeric_var_pop', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, + +# var_samp +{ aggfnoid => 'var_samp(int8)', aggtransfn => 'int8_accum', + aggfinalfn => 'numeric_var_samp', aggcombinefn => 'numeric_combine', + aggserialfn => 'numeric_serialize', aggdeserialfn => 'numeric_deserialize', + aggmtransfn => 'int8_accum', aggminvtransfn => 'int8_accum_inv', + aggmfinalfn => 'numeric_var_samp', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, +{ aggfnoid => 'var_samp(int4)', aggtransfn => 'int4_accum', + aggfinalfn => 'numeric_poly_var_samp', aggcombinefn => 'numeric_poly_combine', + aggserialfn => 'numeric_poly_serialize', + aggdeserialfn => 'numeric_poly_deserialize', aggmtransfn => 'int4_accum', + aggminvtransfn => 'int4_accum_inv', aggmfinalfn => 'numeric_poly_var_samp', + aggtranstype => 'internal', aggtransspace => '48', + aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'var_samp(int2)', aggtransfn => 'int2_accum', + aggfinalfn => 'numeric_poly_var_samp', aggcombinefn => 'numeric_poly_combine', + aggserialfn => 'numeric_poly_serialize', + aggdeserialfn => 'numeric_poly_deserialize', aggmtransfn => 'int2_accum', + aggminvtransfn => 'int2_accum_inv', aggmfinalfn => 'numeric_poly_var_samp', + aggtranstype => 'internal', aggtransspace => '48', + aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'var_samp(float4)', aggtransfn => 'float4_accum', + aggfinalfn => 'float8_var_samp', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'var_samp(float8)', aggtransfn => 'float8_accum', + aggfinalfn => 'float8_var_samp', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'var_samp(numeric)', aggtransfn => 'numeric_accum', + aggfinalfn => 'numeric_var_samp', aggcombinefn => 'numeric_combine', + aggserialfn => 'numeric_serialize', aggdeserialfn => 'numeric_deserialize', + aggmtransfn => 'numeric_accum', aggminvtransfn => 'numeric_accum_inv', + aggmfinalfn => 'numeric_var_samp', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, + +# variance: historical Postgres syntax for var_samp +{ aggfnoid => 'variance(int8)', aggtransfn => 'int8_accum', + aggfinalfn => 'numeric_var_samp', aggcombinefn => 'numeric_combine', + aggserialfn => 'numeric_serialize', aggdeserialfn => 'numeric_deserialize', + aggmtransfn => 'int8_accum', aggminvtransfn => 'int8_accum_inv', + aggmfinalfn => 'numeric_var_samp', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, +{ aggfnoid => 'variance(int4)', aggtransfn => 'int4_accum', + aggfinalfn => 'numeric_poly_var_samp', aggcombinefn => 'numeric_poly_combine', + aggserialfn => 'numeric_poly_serialize', + aggdeserialfn => 'numeric_poly_deserialize', aggmtransfn => 'int4_accum', + aggminvtransfn => 'int4_accum_inv', aggmfinalfn => 'numeric_poly_var_samp', + aggtranstype => 'internal', aggtransspace => '48', + aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'variance(int2)', aggtransfn => 'int2_accum', + aggfinalfn => 'numeric_poly_var_samp', aggcombinefn => 'numeric_poly_combine', + aggserialfn => 'numeric_poly_serialize', + aggdeserialfn => 'numeric_poly_deserialize', aggmtransfn => 'int2_accum', + aggminvtransfn => 'int2_accum_inv', aggmfinalfn => 'numeric_poly_var_samp', + aggtranstype => 'internal', aggtransspace => '48', + aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'variance(float4)', aggtransfn => 'float4_accum', + aggfinalfn => 'float8_var_samp', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'variance(float8)', aggtransfn => 'float8_accum', + aggfinalfn => 'float8_var_samp', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'variance(numeric)', aggtransfn => 'numeric_accum', + aggfinalfn => 'numeric_var_samp', aggcombinefn => 'numeric_combine', + aggserialfn => 'numeric_serialize', aggdeserialfn => 'numeric_deserialize', + aggmtransfn => 'numeric_accum', aggminvtransfn => 'numeric_accum_inv', + aggmfinalfn => 'numeric_var_samp', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, + +# stddev_pop +{ aggfnoid => 'stddev_pop(int8)', aggtransfn => 'int8_accum', + aggfinalfn => 'numeric_stddev_pop', aggcombinefn => 'numeric_combine', + aggserialfn => 'numeric_serialize', aggdeserialfn => 'numeric_deserialize', + aggmtransfn => 'int8_accum', aggminvtransfn => 'int8_accum_inv', + aggmfinalfn => 'numeric_stddev_pop', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, +{ aggfnoid => 'stddev_pop(int4)', aggtransfn => 'int4_accum', + aggfinalfn => 'numeric_poly_stddev_pop', + aggcombinefn => 'numeric_poly_combine', + aggserialfn => 'numeric_poly_serialize', + aggdeserialfn => 'numeric_poly_deserialize', aggmtransfn => 'int4_accum', + aggminvtransfn => 'int4_accum_inv', aggmfinalfn => 'numeric_poly_stddev_pop', + aggtranstype => 'internal', aggtransspace => '48', + aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'stddev_pop(int2)', aggtransfn => 'int2_accum', + aggfinalfn => 'numeric_poly_stddev_pop', + aggcombinefn => 'numeric_poly_combine', + aggserialfn => 'numeric_poly_serialize', + aggdeserialfn => 'numeric_poly_deserialize', aggmtransfn => 'int2_accum', + aggminvtransfn => 'int2_accum_inv', aggmfinalfn => 'numeric_poly_stddev_pop', + aggtranstype => 'internal', aggtransspace => '48', + aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'stddev_pop(float4)', aggtransfn => 'float4_accum', + aggfinalfn => 'float8_stddev_pop', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'stddev_pop(float8)', aggtransfn => 'float8_accum', + aggfinalfn => 'float8_stddev_pop', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'stddev_pop(numeric)', aggtransfn => 'numeric_accum', + aggfinalfn => 'numeric_stddev_pop', aggcombinefn => 'numeric_combine', + aggserialfn => 'numeric_serialize', aggdeserialfn => 'numeric_deserialize', + aggmtransfn => 'numeric_accum', aggminvtransfn => 'numeric_accum_inv', + aggmfinalfn => 'numeric_stddev_pop', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, + +# stddev_samp +{ aggfnoid => 'stddev_samp(int8)', aggtransfn => 'int8_accum', + aggfinalfn => 'numeric_stddev_samp', aggcombinefn => 'numeric_combine', + aggserialfn => 'numeric_serialize', aggdeserialfn => 'numeric_deserialize', + aggmtransfn => 'int8_accum', aggminvtransfn => 'int8_accum_inv', + aggmfinalfn => 'numeric_stddev_samp', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, +{ aggfnoid => 'stddev_samp(int4)', aggtransfn => 'int4_accum', + aggfinalfn => 'numeric_poly_stddev_samp', + aggcombinefn => 'numeric_poly_combine', + aggserialfn => 'numeric_poly_serialize', + aggdeserialfn => 'numeric_poly_deserialize', aggmtransfn => 'int4_accum', + aggminvtransfn => 'int4_accum_inv', aggmfinalfn => 'numeric_poly_stddev_samp', + aggtranstype => 'internal', aggtransspace => '48', + aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'stddev_samp(int2)', aggtransfn => 'int2_accum', + aggfinalfn => 'numeric_poly_stddev_samp', + aggcombinefn => 'numeric_poly_combine', + aggserialfn => 'numeric_poly_serialize', + aggdeserialfn => 'numeric_poly_deserialize', aggmtransfn => 'int2_accum', + aggminvtransfn => 'int2_accum_inv', aggmfinalfn => 'numeric_poly_stddev_samp', + aggtranstype => 'internal', aggtransspace => '48', + aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'stddev_samp(float4)', aggtransfn => 'float4_accum', + aggfinalfn => 'float8_stddev_samp', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'stddev_samp(float8)', aggtransfn => 'float8_accum', + aggfinalfn => 'float8_stddev_samp', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'stddev_samp(numeric)', aggtransfn => 'numeric_accum', + aggfinalfn => 'numeric_stddev_samp', aggcombinefn => 'numeric_combine', + aggserialfn => 'numeric_serialize', aggdeserialfn => 'numeric_deserialize', + aggmtransfn => 'numeric_accum', aggminvtransfn => 'numeric_accum_inv', + aggmfinalfn => 'numeric_stddev_samp', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, + +# stddev: historical Postgres syntax for stddev_samp +{ aggfnoid => 'stddev(int8)', aggtransfn => 'int8_accum', + aggfinalfn => 'numeric_stddev_samp', aggcombinefn => 'numeric_combine', + aggserialfn => 'numeric_serialize', aggdeserialfn => 'numeric_deserialize', + aggmtransfn => 'int8_accum', aggminvtransfn => 'int8_accum_inv', + aggmfinalfn => 'numeric_stddev_samp', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, +{ aggfnoid => 'stddev(int4)', aggtransfn => 'int4_accum', + aggfinalfn => 'numeric_poly_stddev_samp', + aggcombinefn => 'numeric_poly_combine', + aggserialfn => 'numeric_poly_serialize', + aggdeserialfn => 'numeric_poly_deserialize', aggmtransfn => 'int4_accum', + aggminvtransfn => 'int4_accum_inv', aggmfinalfn => 'numeric_poly_stddev_samp', + aggtranstype => 'internal', aggtransspace => '48', + aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'stddev(int2)', aggtransfn => 'int2_accum', + aggfinalfn => 'numeric_poly_stddev_samp', + aggcombinefn => 'numeric_poly_combine', + aggserialfn => 'numeric_poly_serialize', + aggdeserialfn => 'numeric_poly_deserialize', aggmtransfn => 'int2_accum', + aggminvtransfn => 'int2_accum_inv', aggmfinalfn => 'numeric_poly_stddev_samp', + aggtranstype => 'internal', aggtransspace => '48', + aggmtranstype => 'internal', aggmtransspace => '48' }, +{ aggfnoid => 'stddev(float4)', aggtransfn => 'float4_accum', + aggfinalfn => 'float8_stddev_samp', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'stddev(float8)', aggtransfn => 'float8_accum', + aggfinalfn => 'float8_stddev_samp', aggcombinefn => 'float8_combine', + aggtranstype => '_float8', agginitval => '{0,0,0}' }, +{ aggfnoid => 'stddev(numeric)', aggtransfn => 'numeric_accum', + aggfinalfn => 'numeric_stddev_samp', aggcombinefn => 'numeric_combine', + aggserialfn => 'numeric_serialize', aggdeserialfn => 'numeric_deserialize', + aggmtransfn => 'numeric_accum', aggminvtransfn => 'numeric_accum_inv', + aggmfinalfn => 'numeric_stddev_samp', aggtranstype => 'internal', + aggtransspace => '128', aggmtranstype => 'internal', + aggmtransspace => '128' }, + +# SQL2003 binary regression aggregates +{ aggfnoid => 'regr_count', aggtransfn => 'int8inc_float8_float8', + aggcombinefn => 'int8pl', aggtranstype => 'int8', agginitval => '0' }, +{ aggfnoid => 'regr_sxx', aggtransfn => 'float8_regr_accum', + aggfinalfn => 'float8_regr_sxx', aggcombinefn => 'float8_regr_combine', + aggtranstype => '_float8', agginitval => '{0,0,0,0,0,0}' }, +{ aggfnoid => 'regr_syy', aggtransfn => 'float8_regr_accum', + aggfinalfn => 'float8_regr_syy', aggcombinefn => 'float8_regr_combine', + aggtranstype => '_float8', agginitval => '{0,0,0,0,0,0}' }, +{ aggfnoid => 'regr_sxy', aggtransfn => 'float8_regr_accum', + aggfinalfn => 'float8_regr_sxy', aggcombinefn => 'float8_regr_combine', + aggtranstype => '_float8', agginitval => '{0,0,0,0,0,0}' }, +{ aggfnoid => 'regr_avgx', aggtransfn => 'float8_regr_accum', + aggfinalfn => 'float8_regr_avgx', aggcombinefn => 'float8_regr_combine', + aggtranstype => '_float8', agginitval => '{0,0,0,0,0,0}' }, +{ aggfnoid => 'regr_avgy', aggtransfn => 'float8_regr_accum', + aggfinalfn => 'float8_regr_avgy', aggcombinefn => 'float8_regr_combine', + aggtranstype => '_float8', agginitval => '{0,0,0,0,0,0}' }, +{ aggfnoid => 'regr_r2', aggtransfn => 'float8_regr_accum', + aggfinalfn => 'float8_regr_r2', aggcombinefn => 'float8_regr_combine', + aggtranstype => '_float8', agginitval => '{0,0,0,0,0,0}' }, +{ aggfnoid => 'regr_slope', aggtransfn => 'float8_regr_accum', + aggfinalfn => 'float8_regr_slope', aggcombinefn => 'float8_regr_combine', + aggtranstype => '_float8', agginitval => '{0,0,0,0,0,0}' }, +{ aggfnoid => 'regr_intercept', aggtransfn => 'float8_regr_accum', + aggfinalfn => 'float8_regr_intercept', aggcombinefn => 'float8_regr_combine', + aggtranstype => '_float8', agginitval => '{0,0,0,0,0,0}' }, +{ aggfnoid => 'covar_pop', aggtransfn => 'float8_regr_accum', + aggfinalfn => 'float8_covar_pop', aggcombinefn => 'float8_regr_combine', + aggtranstype => '_float8', agginitval => '{0,0,0,0,0,0}' }, +{ aggfnoid => 'covar_samp', aggtransfn => 'float8_regr_accum', + aggfinalfn => 'float8_covar_samp', aggcombinefn => 'float8_regr_combine', + aggtranstype => '_float8', agginitval => '{0,0,0,0,0,0}' }, +{ aggfnoid => 'corr', aggtransfn => 'float8_regr_accum', + aggfinalfn => 'float8_corr', aggcombinefn => 'float8_regr_combine', + aggtranstype => '_float8', agginitval => '{0,0,0,0,0,0}' }, + +# boolean-and and boolean-or +{ aggfnoid => 'bool_and', aggtransfn => 'booland_statefunc', + aggcombinefn => 'booland_statefunc', aggmtransfn => 'bool_accum', + aggminvtransfn => 'bool_accum_inv', aggmfinalfn => 'bool_alltrue', + aggsortop => '<(bool,bool)', aggtranstype => 'bool', + aggmtranstype => 'internal', aggmtransspace => '16' }, +{ aggfnoid => 'bool_or', aggtransfn => 'boolor_statefunc', + aggcombinefn => 'boolor_statefunc', aggmtransfn => 'bool_accum', + aggminvtransfn => 'bool_accum_inv', aggmfinalfn => 'bool_anytrue', + aggsortop => '>(bool,bool)', aggtranstype => 'bool', + aggmtranstype => 'internal', aggmtransspace => '16' }, +{ aggfnoid => 'every', aggtransfn => 'booland_statefunc', + aggcombinefn => 'booland_statefunc', aggmtransfn => 'bool_accum', + aggminvtransfn => 'bool_accum_inv', aggmfinalfn => 'bool_alltrue', + aggsortop => '<(bool,bool)', aggtranstype => 'bool', + aggmtranstype => 'internal', aggmtransspace => '16' }, + +# bitwise integer +{ aggfnoid => 'bit_and(int2)', aggtransfn => 'int2and', + aggcombinefn => 'int2and', aggtranstype => 'int2' }, +{ aggfnoid => 'bit_or(int2)', aggtransfn => 'int2or', aggcombinefn => 'int2or', + aggtranstype => 'int2' }, +{ aggfnoid => 'bit_and(int4)', aggtransfn => 'int4and', + aggcombinefn => 'int4and', aggtranstype => 'int4' }, +{ aggfnoid => 'bit_or(int4)', aggtransfn => 'int4or', aggcombinefn => 'int4or', + aggtranstype => 'int4' }, +{ aggfnoid => 'bit_and(int8)', aggtransfn => 'int8and', + aggcombinefn => 'int8and', aggtranstype => 'int8' }, +{ aggfnoid => 'bit_or(int8)', aggtransfn => 'int8or', aggcombinefn => 'int8or', + aggtranstype => 'int8' }, +{ aggfnoid => 'bit_and(bit)', aggtransfn => 'bitand', aggcombinefn => 'bitand', + aggtranstype => 'bit' }, +{ aggfnoid => 'bit_or(bit)', aggtransfn => 'bitor', aggcombinefn => 'bitor', + aggtranstype => 'bit' }, + +# xml +{ aggfnoid => 'xmlagg', aggtransfn => 'xmlconcat2', aggtranstype => 'xml' }, + +# array +{ aggfnoid => 'array_agg(anynonarray)', aggtransfn => 'array_agg_transfn', + aggfinalfn => 'array_agg_finalfn', aggfinalextra => 't', + aggtranstype => 'internal' }, +{ aggfnoid => 'array_agg(anyarray)', aggtransfn => 'array_agg_array_transfn', + aggfinalfn => 'array_agg_array_finalfn', aggfinalextra => 't', + aggtranstype => 'internal' }, + +# text +{ aggfnoid => 'string_agg(text,text)', aggtransfn => 'string_agg_transfn', + aggfinalfn => 'string_agg_finalfn', aggtranstype => 'internal' }, + +# bytea +{ aggfnoid => 'string_agg(bytea,bytea)', + aggtransfn => 'bytea_string_agg_transfn', + aggfinalfn => 'bytea_string_agg_finalfn', aggtranstype => 'internal' }, + +# json +{ aggfnoid => 'json_agg', aggtransfn => 'json_agg_transfn', + aggfinalfn => 'json_agg_finalfn', aggtranstype => 'internal' }, +{ aggfnoid => 'json_object_agg', aggtransfn => 'json_object_agg_transfn', + aggfinalfn => 'json_object_agg_finalfn', aggtranstype => 'internal' }, + +# jsonb +{ aggfnoid => 'jsonb_agg', aggtransfn => 'jsonb_agg_transfn', + aggfinalfn => 'jsonb_agg_finalfn', aggtranstype => 'internal' }, +{ aggfnoid => 'jsonb_object_agg', aggtransfn => 'jsonb_object_agg_transfn', + aggfinalfn => 'jsonb_object_agg_finalfn', aggtranstype => 'internal' }, + +# ordered-set and hypothetical-set aggregates +{ aggfnoid => 'percentile_disc(float8,anyelement)', aggkind => 'o', + aggnumdirectargs => '1', aggtransfn => 'ordered_set_transition', + aggfinalfn => 'percentile_disc_final', aggfinalextra => 't', + aggfinalmodify => 's', aggmfinalmodify => 's', aggtranstype => 'internal' }, +{ aggfnoid => 'percentile_cont(float8,float8)', aggkind => 'o', + aggnumdirectargs => '1', aggtransfn => 'ordered_set_transition', + aggfinalfn => 'percentile_cont_float8_final', aggfinalmodify => 's', + aggmfinalmodify => 's', aggtranstype => 'internal' }, +{ aggfnoid => 'percentile_cont(float8,interval)', aggkind => 'o', + aggnumdirectargs => '1', aggtransfn => 'ordered_set_transition', + aggfinalfn => 'percentile_cont_interval_final', aggfinalmodify => 's', + aggmfinalmodify => 's', aggtranstype => 'internal' }, +{ aggfnoid => 'percentile_disc(_float8,anyelement)', aggkind => 'o', + aggnumdirectargs => '1', aggtransfn => 'ordered_set_transition', + aggfinalfn => 'percentile_disc_multi_final', aggfinalextra => 't', + aggfinalmodify => 's', aggmfinalmodify => 's', aggtranstype => 'internal' }, +{ aggfnoid => 'percentile_cont(_float8,float8)', aggkind => 'o', + aggnumdirectargs => '1', aggtransfn => 'ordered_set_transition', + aggfinalfn => 'percentile_cont_float8_multi_final', aggfinalmodify => 's', + aggmfinalmodify => 's', aggtranstype => 'internal' }, +{ aggfnoid => 'percentile_cont(_float8,interval)', aggkind => 'o', + aggnumdirectargs => '1', aggtransfn => 'ordered_set_transition', + aggfinalfn => 'percentile_cont_interval_multi_final', aggfinalmodify => 's', + aggmfinalmodify => 's', aggtranstype => 'internal' }, +{ aggfnoid => 'mode', aggkind => 'o', aggtransfn => 'ordered_set_transition', + aggfinalfn => 'mode_final', aggfinalextra => 't', aggfinalmodify => 's', + aggmfinalmodify => 's', aggtranstype => 'internal' }, +{ aggfnoid => 'rank(any)', aggkind => 'h', aggnumdirectargs => '1', + aggtransfn => 'ordered_set_transition_multi', aggfinalfn => 'rank_final', + aggfinalextra => 't', aggfinalmodify => 'w', aggmfinalmodify => 'w', + aggtranstype => 'internal' }, +{ aggfnoid => 'percent_rank(any)', aggkind => 'h', aggnumdirectargs => '1', + aggtransfn => 'ordered_set_transition_multi', + aggfinalfn => 'percent_rank_final', aggfinalextra => 't', + aggfinalmodify => 'w', aggmfinalmodify => 'w', aggtranstype => 'internal' }, +{ aggfnoid => 'cume_dist(any)', aggkind => 'h', aggnumdirectargs => '1', + aggtransfn => 'ordered_set_transition_multi', aggfinalfn => 'cume_dist_final', + aggfinalextra => 't', aggfinalmodify => 'w', aggmfinalmodify => 'w', + aggtranstype => 'internal' }, +{ aggfnoid => 'dense_rank(any)', aggkind => 'h', aggnumdirectargs => '1', + aggtransfn => 'ordered_set_transition_multi', + aggfinalfn => 'dense_rank_final', aggfinalextra => 't', aggfinalmodify => 'w', + aggmfinalmodify => 'w', aggtranstype => 'internal' }, + +] diff --git a/src/include/catalog/pg_aggregate.h b/src/include/catalog/pg_aggregate.h index 4d5b9bb9a6..bcae93f5e1 100644 --- a/src/include/catalog/pg_aggregate.h +++ b/src/include/catalog/pg_aggregate.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_aggregate.h - * definition of the system "aggregate" relation (pg_aggregate) - * along with the relation's initial contents. + * definition of the "aggregate" system catalog (pg_aggregate) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_aggregate.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,62 +19,85 @@ #define PG_AGGREGATE_H #include "catalog/genbki.h" +#include "catalog/pg_aggregate_d.h" + #include "catalog/objectaddress.h" #include "nodes/pg_list.h" /* ---------------------------------------------------------------- * pg_aggregate definition. - * * cpp turns this into typedef struct FormData_pg_aggregate - * - * aggfnoid pg_proc OID of the aggregate itself - * aggkind aggregate kind, see AGGKIND_ categories below - * aggnumdirectargs number of arguments that are "direct" arguments - * aggtransfn transition function - * aggfinalfn final function (0 if none) - * aggcombinefn combine function (0 if none) - * aggserialfn function to convert transtype to bytea (0 if none) - * aggdeserialfn function to convert bytea to transtype (0 if none) - * aggmtransfn forward function for moving-aggregate mode (0 if none) - * aggminvtransfn inverse function for moving-aggregate mode (0 if none) - * aggmfinalfn final function for moving-aggregate mode (0 if none) - * aggfinalextra true to pass extra dummy arguments to aggfinalfn - * aggmfinalextra true to pass extra dummy arguments to aggmfinalfn - * aggsortop associated sort operator (0 if none) - * aggtranstype type of aggregate's transition (state) data - * aggtransspace estimated size of state data (0 for default estimate) - * aggmtranstype type of moving-aggregate state data (0 if none) - * aggmtransspace estimated size of moving-agg state (0 for default est) - * agginitval initial value for transition state (can be NULL) - * aggminitval initial value for moving-agg state (can be NULL) * ---------------------------------------------------------------- */ -#define AggregateRelationId 2600 - -CATALOG(pg_aggregate,2600) BKI_WITHOUT_OIDS +CATALOG(pg_aggregate,2600,AggregateRelationId) BKI_WITHOUT_OIDS { - regproc aggfnoid; - char aggkind; - int16 aggnumdirectargs; - regproc aggtransfn; - regproc aggfinalfn; - regproc aggcombinefn; - regproc aggserialfn; - regproc aggdeserialfn; - regproc aggmtransfn; - regproc aggminvtransfn; - regproc aggmfinalfn; - bool aggfinalextra; - bool aggmfinalextra; - Oid aggsortop; - Oid aggtranstype; - int32 aggtransspace; - Oid aggmtranstype; - int32 aggmtransspace; + /* pg_proc OID of the aggregate itself */ + regproc aggfnoid BKI_LOOKUP(pg_proc); + + /* aggregate kind, see AGGKIND_ categories below */ + char aggkind BKI_DEFAULT(n); + + /* number of arguments that are "direct" arguments */ + int16 aggnumdirectargs BKI_DEFAULT(0); + + /* transition function */ + regproc aggtransfn BKI_LOOKUP(pg_proc); + + /* final function (0 if none) */ + regproc aggfinalfn BKI_DEFAULT(-) BKI_LOOKUP(pg_proc); + + /* combine function (0 if none) */ + regproc aggcombinefn BKI_DEFAULT(-) BKI_LOOKUP(pg_proc); + + /* function to convert transtype to bytea (0 if none) */ + regproc aggserialfn BKI_DEFAULT(-) BKI_LOOKUP(pg_proc); + + /* function to convert bytea to transtype (0 if none) */ + regproc aggdeserialfn BKI_DEFAULT(-) BKI_LOOKUP(pg_proc); + + /* forward function for moving-aggregate mode (0 if none) */ + regproc aggmtransfn BKI_DEFAULT(-) BKI_LOOKUP(pg_proc); + + /* inverse function for moving-aggregate mode (0 if none) */ + regproc aggminvtransfn BKI_DEFAULT(-) BKI_LOOKUP(pg_proc); + + /* final function for moving-aggregate mode (0 if none) */ + regproc aggmfinalfn BKI_DEFAULT(-) BKI_LOOKUP(pg_proc); + + /* true to pass extra dummy arguments to aggfinalfn */ + bool aggfinalextra BKI_DEFAULT(f); + + /* true to pass extra dummy arguments to aggmfinalfn */ + bool aggmfinalextra BKI_DEFAULT(f); + + /* tells whether aggfinalfn modifies transition state */ + char aggfinalmodify BKI_DEFAULT(r); + + /* tells whether aggmfinalfn modifies transition state */ + char aggmfinalmodify BKI_DEFAULT(r); + + /* associated sort operator (0 if none) */ + Oid aggsortop BKI_DEFAULT(0) BKI_LOOKUP(pg_operator); + + /* type of aggregate's transition (state) data */ + Oid aggtranstype BKI_LOOKUP(pg_type); + + /* estimated size of state data (0 for default estimate) */ + int32 aggtransspace BKI_DEFAULT(0); + + /* type of moving-aggregate state data (0 if none) */ + Oid aggmtranstype BKI_DEFAULT(0) BKI_LOOKUP(pg_type); + + /* estimated size of moving-agg state (0 for default est) */ + int32 aggmtransspace BKI_DEFAULT(0); #ifdef CATALOG_VARLEN /* variable-length fields start here */ - text agginitval; - text aggminitval; + + /* initial value for transition state (can be NULL) */ + text agginitval BKI_DEFAULT(_null_); + + /* initial value for moving-agg state (can be NULL) */ + text aggminitval BKI_DEFAULT(_null_); #endif } FormData_pg_aggregate; @@ -86,32 +108,7 @@ CATALOG(pg_aggregate,2600) BKI_WITHOUT_OIDS */ typedef FormData_pg_aggregate *Form_pg_aggregate; -/* ---------------- - * compiler constants for pg_aggregate - * ---------------- - */ - -#define Natts_pg_aggregate 20 -#define Anum_pg_aggregate_aggfnoid 1 -#define Anum_pg_aggregate_aggkind 2 -#define Anum_pg_aggregate_aggnumdirectargs 3 -#define Anum_pg_aggregate_aggtransfn 4 -#define Anum_pg_aggregate_aggfinalfn 5 -#define Anum_pg_aggregate_aggcombinefn 6 -#define Anum_pg_aggregate_aggserialfn 7 -#define Anum_pg_aggregate_aggdeserialfn 8 -#define Anum_pg_aggregate_aggmtransfn 9 -#define Anum_pg_aggregate_aggminvtransfn 10 -#define Anum_pg_aggregate_aggmfinalfn 11 -#define Anum_pg_aggregate_aggfinalextra 12 -#define Anum_pg_aggregate_aggmfinalextra 13 -#define Anum_pg_aggregate_aggsortop 14 -#define Anum_pg_aggregate_aggtranstype 15 -#define Anum_pg_aggregate_aggtransspace 16 -#define Anum_pg_aggregate_aggmtranstype 17 -#define Anum_pg_aggregate_aggmtransspace 18 -#define Anum_pg_aggregate_agginitval 19 -#define Anum_pg_aggregate_aggminitval 20 +#ifdef EXPOSE_TO_CLIENT_CODE /* * Symbolic values for aggkind column. We distinguish normal aggregates @@ -128,196 +125,21 @@ typedef FormData_pg_aggregate *Form_pg_aggregate; /* Use this macro to test for "ordered-set agg including hypothetical case" */ #define AGGKIND_IS_ORDERED_SET(kind) ((kind) != AGGKIND_NORMAL) - -/* ---------------- - * initial contents of pg_aggregate - * --------------- +/* + * Symbolic values for aggfinalmodify and aggmfinalmodify columns. + * Preferably, finalfns do not modify the transition state value at all, + * but in some cases that would cost too much performance. We distinguish + * "pure read only" and "trashes it arbitrarily" cases, as well as the + * intermediate case where multiple finalfn calls are allowed but the + * transfn cannot be applied anymore after the first finalfn call. */ +#define AGGMODIFY_READ_ONLY 'r' +#define AGGMODIFY_SHAREABLE 's' +#define AGGMODIFY_READ_WRITE 'w' -/* avg */ -DATA(insert ( 2100 n 0 int8_avg_accum numeric_poly_avg int8_avg_combine int8_avg_serialize int8_avg_deserialize int8_avg_accum int8_avg_accum_inv numeric_poly_avg f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2101 n 0 int4_avg_accum int8_avg int4_avg_combine - - int4_avg_accum int4_avg_accum_inv int8_avg f f 0 1016 0 1016 0 "{0,0}" "{0,0}" )); -DATA(insert ( 2102 n 0 int2_avg_accum int8_avg int4_avg_combine - - int2_avg_accum int2_avg_accum_inv int8_avg f f 0 1016 0 1016 0 "{0,0}" "{0,0}" )); -DATA(insert ( 2103 n 0 numeric_avg_accum numeric_avg numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_avg f f 0 2281 128 2281 128 _null_ _null_ )); -DATA(insert ( 2104 n 0 float4_accum float8_avg float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2105 n 0 float8_accum float8_avg float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2106 n 0 interval_accum interval_avg interval_combine - - interval_accum interval_accum_inv interval_avg f f 0 1187 0 1187 0 "{0 second,0 second}" "{0 second,0 second}" )); - -/* sum */ -DATA(insert ( 2107 n 0 int8_avg_accum numeric_poly_sum int8_avg_combine int8_avg_serialize int8_avg_deserialize int8_avg_accum int8_avg_accum_inv numeric_poly_sum f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2108 n 0 int4_sum - int8pl - - int4_avg_accum int4_avg_accum_inv int2int4_sum f f 0 20 0 1016 0 _null_ "{0,0}" )); -DATA(insert ( 2109 n 0 int2_sum - int8pl - - int2_avg_accum int2_avg_accum_inv int2int4_sum f f 0 20 0 1016 0 _null_ "{0,0}" )); -DATA(insert ( 2110 n 0 float4pl - float4pl - - - - - f f 0 700 0 0 0 _null_ _null_ )); -DATA(insert ( 2111 n 0 float8pl - float8pl - - - - - f f 0 701 0 0 0 _null_ _null_ )); -DATA(insert ( 2112 n 0 cash_pl - cash_pl - - cash_pl cash_mi - f f 0 790 0 790 0 _null_ _null_ )); -DATA(insert ( 2113 n 0 interval_pl - interval_pl - - interval_pl interval_mi - f f 0 1186 0 1186 0 _null_ _null_ )); -DATA(insert ( 2114 n 0 numeric_avg_accum numeric_sum numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_sum f f 0 2281 128 2281 128 _null_ _null_ )); - -/* max */ -DATA(insert ( 2115 n 0 int8larger - int8larger - - - - - f f 413 20 0 0 0 _null_ _null_ )); -DATA(insert ( 2116 n 0 int4larger - int4larger - - - - - f f 521 23 0 0 0 _null_ _null_ )); -DATA(insert ( 2117 n 0 int2larger - int2larger - - - - - f f 520 21 0 0 0 _null_ _null_ )); -DATA(insert ( 2118 n 0 oidlarger - oidlarger - - - - - f f 610 26 0 0 0 _null_ _null_ )); -DATA(insert ( 2119 n 0 float4larger - float4larger - - - - - f f 623 700 0 0 0 _null_ _null_ )); -DATA(insert ( 2120 n 0 float8larger - float8larger - - - - - f f 674 701 0 0 0 _null_ _null_ )); -DATA(insert ( 2121 n 0 int4larger - int4larger - - - - - f f 563 702 0 0 0 _null_ _null_ )); -DATA(insert ( 2122 n 0 date_larger - date_larger - - - - - f f 1097 1082 0 0 0 _null_ _null_ )); -DATA(insert ( 2123 n 0 time_larger - time_larger - - - - - f f 1112 1083 0 0 0 _null_ _null_ )); -DATA(insert ( 2124 n 0 timetz_larger - timetz_larger - - - - - f f 1554 1266 0 0 0 _null_ _null_ )); -DATA(insert ( 2125 n 0 cashlarger - cashlarger - - - - - f f 903 790 0 0 0 _null_ _null_ )); -DATA(insert ( 2126 n 0 timestamp_larger - timestamp_larger - - - - - f f 2064 1114 0 0 0 _null_ _null_ )); -DATA(insert ( 2127 n 0 timestamptz_larger - timestamptz_larger - - - - - f f 1324 1184 0 0 0 _null_ _null_ )); -DATA(insert ( 2128 n 0 interval_larger - interval_larger - - - - - f f 1334 1186 0 0 0 _null_ _null_ )); -DATA(insert ( 2129 n 0 text_larger - text_larger - - - - - f f 666 25 0 0 0 _null_ _null_ )); -DATA(insert ( 2130 n 0 numeric_larger - numeric_larger - - - - - f f 1756 1700 0 0 0 _null_ _null_ )); -DATA(insert ( 2050 n 0 array_larger - array_larger - - - - - f f 1073 2277 0 0 0 _null_ _null_ )); -DATA(insert ( 2244 n 0 bpchar_larger - bpchar_larger - - - - - f f 1060 1042 0 0 0 _null_ _null_ )); -DATA(insert ( 2797 n 0 tidlarger - tidlarger - - - - - f f 2800 27 0 0 0 _null_ _null_ )); -DATA(insert ( 3526 n 0 enum_larger - enum_larger - - - - - f f 3519 3500 0 0 0 _null_ _null_ )); -DATA(insert ( 3564 n 0 network_larger - network_larger - - - - - f f 1205 869 0 0 0 _null_ _null_ )); - -/* min */ -DATA(insert ( 2131 n 0 int8smaller - int8smaller - - - - - f f 412 20 0 0 0 _null_ _null_ )); -DATA(insert ( 2132 n 0 int4smaller - int4smaller - - - - - f f 97 23 0 0 0 _null_ _null_ )); -DATA(insert ( 2133 n 0 int2smaller - int2smaller - - - - - f f 95 21 0 0 0 _null_ _null_ )); -DATA(insert ( 2134 n 0 oidsmaller - oidsmaller - - - - - f f 609 26 0 0 0 _null_ _null_ )); -DATA(insert ( 2135 n 0 float4smaller - float4smaller - - - - - f f 622 700 0 0 0 _null_ _null_ )); -DATA(insert ( 2136 n 0 float8smaller - float8smaller - - - - - f f 672 701 0 0 0 _null_ _null_ )); -DATA(insert ( 2137 n 0 int4smaller - int4smaller - - - - - f f 562 702 0 0 0 _null_ _null_ )); -DATA(insert ( 2138 n 0 date_smaller - date_smaller - - - - - f f 1095 1082 0 0 0 _null_ _null_ )); -DATA(insert ( 2139 n 0 time_smaller - time_smaller - - - - - f f 1110 1083 0 0 0 _null_ _null_ )); -DATA(insert ( 2140 n 0 timetz_smaller - timetz_smaller - - - - - f f 1552 1266 0 0 0 _null_ _null_ )); -DATA(insert ( 2141 n 0 cashsmaller - cashsmaller - - - - - f f 902 790 0 0 0 _null_ _null_ )); -DATA(insert ( 2142 n 0 timestamp_smaller - timestamp_smaller - - - - - f f 2062 1114 0 0 0 _null_ _null_ )); -DATA(insert ( 2143 n 0 timestamptz_smaller - timestamptz_smaller - - - - - f f 1322 1184 0 0 0 _null_ _null_ )); -DATA(insert ( 2144 n 0 interval_smaller - interval_smaller - - - - - f f 1332 1186 0 0 0 _null_ _null_ )); -DATA(insert ( 2145 n 0 text_smaller - text_smaller - - - - - f f 664 25 0 0 0 _null_ _null_ )); -DATA(insert ( 2146 n 0 numeric_smaller - numeric_smaller - - - - - f f 1754 1700 0 0 0 _null_ _null_ )); -DATA(insert ( 2051 n 0 array_smaller - array_smaller - - - - - f f 1072 2277 0 0 0 _null_ _null_ )); -DATA(insert ( 2245 n 0 bpchar_smaller - bpchar_smaller - - - - - f f 1058 1042 0 0 0 _null_ _null_ )); -DATA(insert ( 2798 n 0 tidsmaller - tidsmaller - - - - - f f 2799 27 0 0 0 _null_ _null_ )); -DATA(insert ( 3527 n 0 enum_smaller - enum_smaller - - - - - f f 3518 3500 0 0 0 _null_ _null_ )); -DATA(insert ( 3565 n 0 network_smaller - network_smaller - - - - - f f 1203 869 0 0 0 _null_ _null_ )); - -/* count */ -DATA(insert ( 2147 n 0 int8inc_any - int8pl - - int8inc_any int8dec_any - f f 0 20 0 20 0 "0" "0" )); -DATA(insert ( 2803 n 0 int8inc - int8pl - - int8inc int8dec - f f 0 20 0 20 0 "0" "0" )); - -/* var_pop */ -DATA(insert ( 2718 n 0 int8_accum numeric_var_pop numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_var_pop f f 0 2281 128 2281 128 _null_ _null_ )); -DATA(insert ( 2719 n 0 int4_accum numeric_poly_var_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_var_pop f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2720 n 0 int2_accum numeric_poly_var_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_var_pop f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2721 n 0 float4_accum float8_var_pop float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2722 n 0 float8_accum float8_var_pop float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2723 n 0 numeric_accum numeric_var_pop numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_var_pop f f 0 2281 128 2281 128 _null_ _null_ )); - -/* var_samp */ -DATA(insert ( 2641 n 0 int8_accum numeric_var_samp numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_var_samp f f 0 2281 128 2281 128 _null_ _null_ )); -DATA(insert ( 2642 n 0 int4_accum numeric_poly_var_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2643 n 0 int2_accum numeric_poly_var_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2644 n 0 float4_accum float8_var_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2645 n 0 float8_accum float8_var_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2646 n 0 numeric_accum numeric_var_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_var_samp f f 0 2281 128 2281 128 _null_ _null_ )); - -/* variance: historical Postgres syntax for var_samp */ -DATA(insert ( 2148 n 0 int8_accum numeric_var_samp numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_var_samp f f 0 2281 128 2281 128 _null_ _null_ )); -DATA(insert ( 2149 n 0 int4_accum numeric_poly_var_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2150 n 0 int2_accum numeric_poly_var_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_var_samp f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2151 n 0 float4_accum float8_var_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2152 n 0 float8_accum float8_var_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2153 n 0 numeric_accum numeric_var_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_var_samp f f 0 2281 128 2281 128 _null_ _null_ )); - -/* stddev_pop */ -DATA(insert ( 2724 n 0 int8_accum numeric_stddev_pop numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_stddev_pop f f 0 2281 128 2281 128 _null_ _null_ )); -DATA(insert ( 2725 n 0 int4_accum numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_stddev_pop f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2726 n 0 int2_accum numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_stddev_pop f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2727 n 0 float4_accum float8_stddev_pop float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2728 n 0 float8_accum float8_stddev_pop float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2729 n 0 numeric_accum numeric_stddev_pop numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_stddev_pop f f 0 2281 128 2281 128 _null_ _null_ )); - -/* stddev_samp */ -DATA(insert ( 2712 n 0 int8_accum numeric_stddev_samp numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_stddev_samp f f 0 2281 128 2281 128 _null_ _null_ )); -DATA(insert ( 2713 n 0 int4_accum numeric_poly_stddev_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2714 n 0 int2_accum numeric_poly_stddev_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2715 n 0 float4_accum float8_stddev_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2716 n 0 float8_accum float8_stddev_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2717 n 0 numeric_accum numeric_stddev_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_stddev_samp f f 0 2281 128 2281 128 _null_ _null_ )); - -/* stddev: historical Postgres syntax for stddev_samp */ -DATA(insert ( 2154 n 0 int8_accum numeric_stddev_samp numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_stddev_samp f f 0 2281 128 2281 128 _null_ _null_ )); -DATA(insert ( 2155 n 0 int4_accum numeric_poly_stddev_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2156 n 0 int2_accum numeric_poly_stddev_samp numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_stddev_samp f f 0 2281 48 2281 48 _null_ _null_ )); -DATA(insert ( 2157 n 0 float4_accum float8_stddev_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2158 n 0 float8_accum float8_stddev_samp float8_combine - - - - - f f 0 1022 0 0 0 "{0,0,0}" _null_ )); -DATA(insert ( 2159 n 0 numeric_accum numeric_stddev_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_stddev_samp f f 0 2281 128 2281 128 _null_ _null_ )); - -/* SQL2003 binary regression aggregates */ -DATA(insert ( 2818 n 0 int8inc_float8_float8 - int8pl - - - - - f f 0 20 0 0 0 "0" _null_ )); -DATA(insert ( 2819 n 0 float8_regr_accum float8_regr_sxx float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2820 n 0 float8_regr_accum float8_regr_syy float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2821 n 0 float8_regr_accum float8_regr_sxy float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2822 n 0 float8_regr_accum float8_regr_avgx float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2823 n 0 float8_regr_accum float8_regr_avgy float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2824 n 0 float8_regr_accum float8_regr_r2 float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2825 n 0 float8_regr_accum float8_regr_slope float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2826 n 0 float8_regr_accum float8_regr_intercept float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2827 n 0 float8_regr_accum float8_covar_pop float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2828 n 0 float8_regr_accum float8_covar_samp float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ )); -DATA(insert ( 2829 n 0 float8_regr_accum float8_corr float8_regr_combine - - - - - f f 0 1022 0 0 0 "{0,0,0,0,0,0}" _null_ )); - -/* boolean-and and boolean-or */ -DATA(insert ( 2517 n 0 booland_statefunc - booland_statefunc - - bool_accum bool_accum_inv bool_alltrue f f 58 16 0 2281 16 _null_ _null_ )); -DATA(insert ( 2518 n 0 boolor_statefunc - boolor_statefunc - - bool_accum bool_accum_inv bool_anytrue f f 59 16 0 2281 16 _null_ _null_ )); -DATA(insert ( 2519 n 0 booland_statefunc - booland_statefunc - - bool_accum bool_accum_inv bool_alltrue f f 58 16 0 2281 16 _null_ _null_ )); - -/* bitwise integer */ -DATA(insert ( 2236 n 0 int2and - int2and - - - - - f f 0 21 0 0 0 _null_ _null_ )); -DATA(insert ( 2237 n 0 int2or - int2or - - - - - f f 0 21 0 0 0 _null_ _null_ )); -DATA(insert ( 2238 n 0 int4and - int4and - - - - - f f 0 23 0 0 0 _null_ _null_ )); -DATA(insert ( 2239 n 0 int4or - int4or - - - - - f f 0 23 0 0 0 _null_ _null_ )); -DATA(insert ( 2240 n 0 int8and - int8and - - - - - f f 0 20 0 0 0 _null_ _null_ )); -DATA(insert ( 2241 n 0 int8or - int8or - - - - - f f 0 20 0 0 0 _null_ _null_ )); -DATA(insert ( 2242 n 0 bitand - bitand - - - - - f f 0 1560 0 0 0 _null_ _null_ )); -DATA(insert ( 2243 n 0 bitor - bitor - - - - - f f 0 1560 0 0 0 _null_ _null_ )); - -/* xml */ -DATA(insert ( 2901 n 0 xmlconcat2 - - - - - - - f f 0 142 0 0 0 _null_ _null_ )); - -/* array */ -DATA(insert ( 2335 n 0 array_agg_transfn array_agg_finalfn - - - - - - t f 0 2281 0 0 0 _null_ _null_ )); -DATA(insert ( 4053 n 0 array_agg_array_transfn array_agg_array_finalfn - - - - - - t f 0 2281 0 0 0 _null_ _null_ )); - -/* text */ -DATA(insert ( 3538 n 0 string_agg_transfn string_agg_finalfn - - - - - - f f 0 2281 0 0 0 _null_ _null_ )); - -/* bytea */ -DATA(insert ( 3545 n 0 bytea_string_agg_transfn bytea_string_agg_finalfn - - - - - - f f 0 2281 0 0 0 _null_ _null_ )); - -/* json */ -DATA(insert ( 3175 n 0 json_agg_transfn json_agg_finalfn - - - - - - f f 0 2281 0 0 0 _null_ _null_ )); -DATA(insert ( 3197 n 0 json_object_agg_transfn json_object_agg_finalfn - - - - - - f f 0 2281 0 0 0 _null_ _null_ )); - -/* jsonb */ -DATA(insert ( 3267 n 0 jsonb_agg_transfn jsonb_agg_finalfn - - - - - - f f 0 2281 0 0 0 _null_ _null_ )); -DATA(insert ( 3270 n 0 jsonb_object_agg_transfn jsonb_object_agg_finalfn - - - - - - f f 0 2281 0 0 0 _null_ _null_ )); - -/* ordered-set and hypothetical-set aggregates */ -DATA(insert ( 3972 o 1 ordered_set_transition percentile_disc_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ )); -DATA(insert ( 3974 o 1 ordered_set_transition percentile_cont_float8_final - - - - - - f f 0 2281 0 0 0 _null_ _null_ )); -DATA(insert ( 3976 o 1 ordered_set_transition percentile_cont_interval_final - - - - - - f f 0 2281 0 0 0 _null_ _null_ )); -DATA(insert ( 3978 o 1 ordered_set_transition percentile_disc_multi_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ )); -DATA(insert ( 3980 o 1 ordered_set_transition percentile_cont_float8_multi_final - - - - - - f f 0 2281 0 0 0 _null_ _null_ )); -DATA(insert ( 3982 o 1 ordered_set_transition percentile_cont_interval_multi_final - - - - - - f f 0 2281 0 0 0 _null_ _null_ )); -DATA(insert ( 3984 o 0 ordered_set_transition mode_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ )); -DATA(insert ( 3986 h 1 ordered_set_transition_multi rank_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ )); -DATA(insert ( 3988 h 1 ordered_set_transition_multi percent_rank_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ )); -DATA(insert ( 3990 h 1 ordered_set_transition_multi cume_dist_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ )); -DATA(insert ( 3992 h 1 ordered_set_transition_multi dense_rank_final - - - - - - t f 0 2281 0 0 0 _null_ _null_ )); +#endif /* EXPOSE_TO_CLIENT_CODE */ -/* - * prototypes for functions in pg_aggregate.c - */ extern ObjectAddress AggregateCreate(const char *aggName, Oid aggNamespace, char aggKind, @@ -339,6 +161,8 @@ extern ObjectAddress AggregateCreate(const char *aggName, List *aggmfinalfnName, bool finalfnExtraArgs, bool mfinalfnExtraArgs, + char finalfnModify, + char mfinalfnModify, List *aggsortopName, Oid aggTransType, int32 aggTransSpace, diff --git a/src/include/catalog/pg_am.dat b/src/include/catalog/pg_am.dat new file mode 100644 index 0000000000..bef53a319a --- /dev/null +++ b/src/include/catalog/pg_am.dat @@ -0,0 +1,34 @@ +#---------------------------------------------------------------------- +# +# pg_am.dat +# Initial contents of the pg_am system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_am.dat +# +#---------------------------------------------------------------------- + +[ + +{ oid => '403', oid_symbol => 'BTREE_AM_OID', + descr => 'b-tree index access method', + amname => 'btree', amhandler => 'bthandler', amtype => 'i' }, +{ oid => '405', oid_symbol => 'HASH_AM_OID', + descr => 'hash index access method', + amname => 'hash', amhandler => 'hashhandler', amtype => 'i' }, +{ oid => '783', oid_symbol => 'GIST_AM_OID', + descr => 'GiST index access method', + amname => 'gist', amhandler => 'gisthandler', amtype => 'i' }, +{ oid => '2742', oid_symbol => 'GIN_AM_OID', + descr => 'GIN index access method', + amname => 'gin', amhandler => 'ginhandler', amtype => 'i' }, +{ oid => '4000', oid_symbol => 'SPGIST_AM_OID', + descr => 'SP-GiST index access method', + amname => 'spgist', amhandler => 'spghandler', amtype => 'i' }, +{ oid => '3580', oid_symbol => 'BRIN_AM_OID', + descr => 'block range index (BRIN) access method', + amname => 'brin', amhandler => 'brinhandler', amtype => 'i' }, + +] diff --git a/src/include/catalog/pg_am.h b/src/include/catalog/pg_am.h index e021f5b894..26cb234987 100644 --- a/src/include/catalog/pg_am.h +++ b/src/include/catalog/pg_am.h @@ -1,21 +1,17 @@ /*------------------------------------------------------------------------- * * pg_am.h - * definition of the system "access method" relation (pg_am) - * along with the relation's initial contents. + * definition of the "access method" system catalog (pg_am) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_am.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -23,19 +19,23 @@ #define PG_AM_H #include "catalog/genbki.h" +#include "catalog/pg_am_d.h" /* ---------------- * pg_am definition. cpp turns this into * typedef struct FormData_pg_am * ---------------- */ -#define AccessMethodRelationId 2601 - -CATALOG(pg_am,2601) +CATALOG(pg_am,2601,AccessMethodRelationId) { - NameData amname; /* access method name */ - regproc amhandler; /* handler function */ - char amtype; /* see AMTYPE_xxx constants below */ + /* access method name */ + NameData amname; + + /* handler function */ + regproc amhandler BKI_LOOKUP(pg_proc); + + /* see AMTYPE_xxx constants below */ + char amtype; } FormData_pg_am; /* ---------------- @@ -45,43 +45,13 @@ CATALOG(pg_am,2601) */ typedef FormData_pg_am *Form_pg_am; -/* ---------------- - * compiler constants for pg_am - * ---------------- - */ -#define Natts_pg_am 3 -#define Anum_pg_am_amname 1 -#define Anum_pg_am_amhandler 2 -#define Anum_pg_am_amtype 3 +#ifdef EXPOSE_TO_CLIENT_CODE -/* ---------------- - * compiler constant for amtype - * ---------------- +/* + * Allowed values for amtype */ #define AMTYPE_INDEX 'i' /* index access method */ -/* ---------------- - * initial contents of pg_am - * ---------------- - */ - -DATA(insert OID = 403 ( btree bthandler i )); -DESCR("b-tree index access method"); -#define BTREE_AM_OID 403 -DATA(insert OID = 405 ( hash hashhandler i )); -DESCR("hash index access method"); -#define HASH_AM_OID 405 -DATA(insert OID = 783 ( gist gisthandler i )); -DESCR("GiST index access method"); -#define GIST_AM_OID 783 -DATA(insert OID = 2742 ( gin ginhandler i )); -DESCR("GIN index access method"); -#define GIN_AM_OID 2742 -DATA(insert OID = 4000 ( spgist spghandler i )); -DESCR("SP-GiST index access method"); -#define SPGIST_AM_OID 4000 -DATA(insert OID = 3580 ( brin brinhandler i )); -DESCR("block range index (BRIN) access method"); -#define BRIN_AM_OID 3580 +#endif /* EXPOSE_TO_CLIENT_CODE */ #endif /* PG_AM_H */ diff --git a/src/include/catalog/pg_amop.dat b/src/include/catalog/pg_amop.dat new file mode 100644 index 0000000000..075a54c4ac --- /dev/null +++ b/src/include/catalog/pg_amop.dat @@ -0,0 +1,2386 @@ +#---------------------------------------------------------------------- +# +# pg_amop.dat +# Initial contents of the pg_amop system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_amop.dat +# +#---------------------------------------------------------------------- + +[ + +# btree integer_ops + +# default operators int2 +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int2', amopstrategy => '1', amopopr => '<(int2,int2)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int2', amopstrategy => '2', amopopr => '<=(int2,int2)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int2', amopstrategy => '3', amopopr => '=(int2,int2)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int2', amopstrategy => '4', amopopr => '>=(int2,int2)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int2', amopstrategy => '5', amopopr => '>(int2,int2)', + amopmethod => 'btree' }, + +# crosstype operators int24 +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int4', amopstrategy => '1', amopopr => '<(int2,int4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int4', amopstrategy => '2', amopopr => '<=(int2,int4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int4', amopstrategy => '3', amopopr => '=(int2,int4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int4', amopstrategy => '4', amopopr => '>=(int2,int4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int4', amopstrategy => '5', amopopr => '>(int2,int4)', + amopmethod => 'btree' }, + +# crosstype operators int28 +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int8', amopstrategy => '1', amopopr => '<(int2,int8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int8', amopstrategy => '2', amopopr => '<=(int2,int8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int8', amopstrategy => '3', amopopr => '=(int2,int8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int8', amopstrategy => '4', amopopr => '>=(int2,int8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int8', amopstrategy => '5', amopopr => '>(int2,int8)', + amopmethod => 'btree' }, + +# default operators int4 +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int4', amopstrategy => '1', amopopr => '<(int4,int4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int4', amopstrategy => '2', amopopr => '<=(int4,int4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int4', amopstrategy => '3', amopopr => '=(int4,int4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int4', amopstrategy => '4', amopopr => '>=(int4,int4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int4', amopstrategy => '5', amopopr => '>(int4,int4)', + amopmethod => 'btree' }, + +# crosstype operators int42 +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int2', amopstrategy => '1', amopopr => '<(int4,int2)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int2', amopstrategy => '2', amopopr => '<=(int4,int2)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int2', amopstrategy => '3', amopopr => '=(int4,int2)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int2', amopstrategy => '4', amopopr => '>=(int4,int2)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int2', amopstrategy => '5', amopopr => '>(int4,int2)', + amopmethod => 'btree' }, + +# crosstype operators int48 +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int8', amopstrategy => '1', amopopr => '<(int4,int8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int8', amopstrategy => '2', amopopr => '<=(int4,int8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int8', amopstrategy => '3', amopopr => '=(int4,int8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int8', amopstrategy => '4', amopopr => '>=(int4,int8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int8', amopstrategy => '5', amopopr => '>(int4,int8)', + amopmethod => 'btree' }, + +# default operators int8 +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int8', amopstrategy => '1', amopopr => '<(int8,int8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int8', amopstrategy => '2', amopopr => '<=(int8,int8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int8', amopstrategy => '3', amopopr => '=(int8,int8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int8', amopstrategy => '4', amopopr => '>=(int8,int8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int8', amopstrategy => '5', amopopr => '>(int8,int8)', + amopmethod => 'btree' }, + +# crosstype operators int82 +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int2', amopstrategy => '1', amopopr => '<(int8,int2)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int2', amopstrategy => '2', amopopr => '<=(int8,int2)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int2', amopstrategy => '3', amopopr => '=(int8,int2)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int2', amopstrategy => '4', amopopr => '>=(int8,int2)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int2', amopstrategy => '5', amopopr => '>(int8,int2)', + amopmethod => 'btree' }, + +# crosstype operators int84 +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int4', amopstrategy => '1', amopopr => '<(int8,int4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int4', amopstrategy => '2', amopopr => '<=(int8,int4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int4', amopstrategy => '3', amopopr => '=(int8,int4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int4', amopstrategy => '4', amopopr => '>=(int8,int4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int4', amopstrategy => '5', amopopr => '>(int8,int4)', + amopmethod => 'btree' }, + +# btree oid_ops + +{ amopfamily => 'btree/oid_ops', amoplefttype => 'oid', amoprighttype => 'oid', + amopstrategy => '1', amopopr => '<(oid,oid)', amopmethod => 'btree' }, +{ amopfamily => 'btree/oid_ops', amoplefttype => 'oid', amoprighttype => 'oid', + amopstrategy => '2', amopopr => '<=(oid,oid)', amopmethod => 'btree' }, +{ amopfamily => 'btree/oid_ops', amoplefttype => 'oid', amoprighttype => 'oid', + amopstrategy => '3', amopopr => '=(oid,oid)', amopmethod => 'btree' }, +{ amopfamily => 'btree/oid_ops', amoplefttype => 'oid', amoprighttype => 'oid', + amopstrategy => '4', amopopr => '>=(oid,oid)', amopmethod => 'btree' }, +{ amopfamily => 'btree/oid_ops', amoplefttype => 'oid', amoprighttype => 'oid', + amopstrategy => '5', amopopr => '>(oid,oid)', amopmethod => 'btree' }, + +# btree tid_ops + +{ amopfamily => 'btree/tid_ops', amoplefttype => 'tid', amoprighttype => 'tid', + amopstrategy => '1', amopopr => '<(tid,tid)', amopmethod => 'btree' }, +{ amopfamily => 'btree/tid_ops', amoplefttype => 'tid', amoprighttype => 'tid', + amopstrategy => '2', amopopr => '<=(tid,tid)', amopmethod => 'btree' }, +{ amopfamily => 'btree/tid_ops', amoplefttype => 'tid', amoprighttype => 'tid', + amopstrategy => '3', amopopr => '=(tid,tid)', amopmethod => 'btree' }, +{ amopfamily => 'btree/tid_ops', amoplefttype => 'tid', amoprighttype => 'tid', + amopstrategy => '4', amopopr => '>=(tid,tid)', amopmethod => 'btree' }, +{ amopfamily => 'btree/tid_ops', amoplefttype => 'tid', amoprighttype => 'tid', + amopstrategy => '5', amopopr => '>(tid,tid)', amopmethod => 'btree' }, + +# btree oidvector_ops + +{ amopfamily => 'btree/oidvector_ops', amoplefttype => 'oidvector', + amoprighttype => 'oidvector', amopstrategy => '1', + amopopr => '<(oidvector,oidvector)', amopmethod => 'btree' }, +{ amopfamily => 'btree/oidvector_ops', amoplefttype => 'oidvector', + amoprighttype => 'oidvector', amopstrategy => '2', + amopopr => '<=(oidvector,oidvector)', amopmethod => 'btree' }, +{ amopfamily => 'btree/oidvector_ops', amoplefttype => 'oidvector', + amoprighttype => 'oidvector', amopstrategy => '3', + amopopr => '=(oidvector,oidvector)', amopmethod => 'btree' }, +{ amopfamily => 'btree/oidvector_ops', amoplefttype => 'oidvector', + amoprighttype => 'oidvector', amopstrategy => '4', + amopopr => '>=(oidvector,oidvector)', amopmethod => 'btree' }, +{ amopfamily => 'btree/oidvector_ops', amoplefttype => 'oidvector', + amoprighttype => 'oidvector', amopstrategy => '5', + amopopr => '>(oidvector,oidvector)', amopmethod => 'btree' }, + +# btree float_ops + +# default operators float4 +{ amopfamily => 'btree/float_ops', amoplefttype => 'float4', + amoprighttype => 'float4', amopstrategy => '1', amopopr => '<(float4,float4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float4', + amoprighttype => 'float4', amopstrategy => '2', + amopopr => '<=(float4,float4)', amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float4', + amoprighttype => 'float4', amopstrategy => '3', amopopr => '=(float4,float4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float4', + amoprighttype => 'float4', amopstrategy => '4', + amopopr => '>=(float4,float4)', amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float4', + amoprighttype => 'float4', amopstrategy => '5', amopopr => '>(float4,float4)', + amopmethod => 'btree' }, + +# crosstype operators float48 +{ amopfamily => 'btree/float_ops', amoplefttype => 'float4', + amoprighttype => 'float8', amopstrategy => '1', amopopr => '<(float4,float8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float4', + amoprighttype => 'float8', amopstrategy => '2', + amopopr => '<=(float4,float8)', amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float4', + amoprighttype => 'float8', amopstrategy => '3', amopopr => '=(float4,float8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float4', + amoprighttype => 'float8', amopstrategy => '4', + amopopr => '>=(float4,float8)', amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float4', + amoprighttype => 'float8', amopstrategy => '5', amopopr => '>(float4,float8)', + amopmethod => 'btree' }, + +# default operators float8 +{ amopfamily => 'btree/float_ops', amoplefttype => 'float8', + amoprighttype => 'float8', amopstrategy => '1', amopopr => '<(float8,float8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float8', + amoprighttype => 'float8', amopstrategy => '2', + amopopr => '<=(float8,float8)', amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float8', + amoprighttype => 'float8', amopstrategy => '3', amopopr => '=(float8,float8)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float8', + amoprighttype => 'float8', amopstrategy => '4', + amopopr => '>=(float8,float8)', amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float8', + amoprighttype => 'float8', amopstrategy => '5', amopopr => '>(float8,float8)', + amopmethod => 'btree' }, + +# crosstype operators float84 +{ amopfamily => 'btree/float_ops', amoplefttype => 'float8', + amoprighttype => 'float4', amopstrategy => '1', amopopr => '<(float8,float4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float8', + amoprighttype => 'float4', amopstrategy => '2', + amopopr => '<=(float8,float4)', amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float8', + amoprighttype => 'float4', amopstrategy => '3', amopopr => '=(float8,float4)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float8', + amoprighttype => 'float4', amopstrategy => '4', + amopopr => '>=(float8,float4)', amopmethod => 'btree' }, +{ amopfamily => 'btree/float_ops', amoplefttype => 'float8', + amoprighttype => 'float4', amopstrategy => '5', amopopr => '>(float8,float4)', + amopmethod => 'btree' }, + +# btree char_ops + +{ amopfamily => 'btree/char_ops', amoplefttype => 'char', + amoprighttype => 'char', amopstrategy => '1', amopopr => '<(char,char)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/char_ops', amoplefttype => 'char', + amoprighttype => 'char', amopstrategy => '2', amopopr => '<=(char,char)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/char_ops', amoplefttype => 'char', + amoprighttype => 'char', amopstrategy => '3', amopopr => '=(char,char)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/char_ops', amoplefttype => 'char', + amoprighttype => 'char', amopstrategy => '4', amopopr => '>=(char,char)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/char_ops', amoplefttype => 'char', + amoprighttype => 'char', amopstrategy => '5', amopopr => '>(char,char)', + amopmethod => 'btree' }, + +# btree name_ops + +{ amopfamily => 'btree/name_ops', amoplefttype => 'name', + amoprighttype => 'name', amopstrategy => '1', amopopr => '<(name,name)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/name_ops', amoplefttype => 'name', + amoprighttype => 'name', amopstrategy => '2', amopopr => '<=(name,name)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/name_ops', amoplefttype => 'name', + amoprighttype => 'name', amopstrategy => '3', amopopr => '=(name,name)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/name_ops', amoplefttype => 'name', + amoprighttype => 'name', amopstrategy => '4', amopopr => '>=(name,name)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/name_ops', amoplefttype => 'name', + amoprighttype => 'name', amopstrategy => '5', amopopr => '>(name,name)', + amopmethod => 'btree' }, + +# btree text_ops + +{ amopfamily => 'btree/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '1', amopopr => '<(text,text)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '2', amopopr => '<=(text,text)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '3', amopopr => '=(text,text)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '4', amopopr => '>=(text,text)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '5', amopopr => '>(text,text)', + amopmethod => 'btree' }, + +# btree bpchar_ops + +{ amopfamily => 'btree/bpchar_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '1', amopopr => '<(bpchar,bpchar)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/bpchar_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '2', + amopopr => '<=(bpchar,bpchar)', amopmethod => 'btree' }, +{ amopfamily => 'btree/bpchar_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '3', amopopr => '=(bpchar,bpchar)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/bpchar_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '4', + amopopr => '>=(bpchar,bpchar)', amopmethod => 'btree' }, +{ amopfamily => 'btree/bpchar_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '5', amopopr => '>(bpchar,bpchar)', + amopmethod => 'btree' }, + +# btree bytea_ops + +{ amopfamily => 'btree/bytea_ops', amoplefttype => 'bytea', + amoprighttype => 'bytea', amopstrategy => '1', amopopr => '<(bytea,bytea)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/bytea_ops', amoplefttype => 'bytea', + amoprighttype => 'bytea', amopstrategy => '2', amopopr => '<=(bytea,bytea)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/bytea_ops', amoplefttype => 'bytea', + amoprighttype => 'bytea', amopstrategy => '3', amopopr => '=(bytea,bytea)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/bytea_ops', amoplefttype => 'bytea', + amoprighttype => 'bytea', amopstrategy => '4', amopopr => '>=(bytea,bytea)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/bytea_ops', amoplefttype => 'bytea', + amoprighttype => 'bytea', amopstrategy => '5', amopopr => '>(bytea,bytea)', + amopmethod => 'btree' }, + +# btree datetime_ops + +# default operators date +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'date', amopstrategy => '1', amopopr => '<(date,date)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'date', amopstrategy => '2', amopopr => '<=(date,date)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'date', amopstrategy => '3', amopopr => '=(date,date)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'date', amopstrategy => '4', amopopr => '>=(date,date)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'date', amopstrategy => '5', amopopr => '>(date,date)', + amopmethod => 'btree' }, + +# crosstype operators vs timestamp +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'timestamp', amopstrategy => '1', + amopopr => '<(date,timestamp)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'timestamp', amopstrategy => '2', + amopopr => '<=(date,timestamp)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'timestamp', amopstrategy => '3', + amopopr => '=(date,timestamp)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'timestamp', amopstrategy => '4', + amopopr => '>=(date,timestamp)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'timestamp', amopstrategy => '5', + amopopr => '>(date,timestamp)', amopmethod => 'btree' }, + +# crosstype operators vs timestamptz +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'timestamptz', amopstrategy => '1', + amopopr => '<(date,timestamptz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'timestamptz', amopstrategy => '2', + amopopr => '<=(date,timestamptz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'timestamptz', amopstrategy => '3', + amopopr => '=(date,timestamptz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'timestamptz', amopstrategy => '4', + amopopr => '>=(date,timestamptz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'date', + amoprighttype => 'timestamptz', amopstrategy => '5', + amopopr => '>(date,timestamptz)', amopmethod => 'btree' }, + +# default operators timestamp +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamp', amopstrategy => '1', + amopopr => '<(timestamp,timestamp)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamp', amopstrategy => '2', + amopopr => '<=(timestamp,timestamp)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamp', amopstrategy => '3', + amopopr => '=(timestamp,timestamp)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamp', amopstrategy => '4', + amopopr => '>=(timestamp,timestamp)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamp', amopstrategy => '5', + amopopr => '>(timestamp,timestamp)', amopmethod => 'btree' }, + +# crosstype operators vs date +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'date', amopstrategy => '1', amopopr => '<(timestamp,date)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'date', amopstrategy => '2', amopopr => '<=(timestamp,date)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'date', amopstrategy => '3', amopopr => '=(timestamp,date)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'date', amopstrategy => '4', amopopr => '>=(timestamp,date)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'date', amopstrategy => '5', amopopr => '>(timestamp,date)', + amopmethod => 'btree' }, + +# crosstype operators vs timestamptz +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamptz', amopstrategy => '1', + amopopr => '<(timestamp,timestamptz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamptz', amopstrategy => '2', + amopopr => '<=(timestamp,timestamptz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamptz', amopstrategy => '3', + amopopr => '=(timestamp,timestamptz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamptz', amopstrategy => '4', + amopopr => '>=(timestamp,timestamptz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamptz', amopstrategy => '5', + amopopr => '>(timestamp,timestamptz)', amopmethod => 'btree' }, + +# default operators timestamptz +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamptz', amopstrategy => '1', + amopopr => '<(timestamptz,timestamptz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamptz', amopstrategy => '2', + amopopr => '<=(timestamptz,timestamptz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamptz', amopstrategy => '3', + amopopr => '=(timestamptz,timestamptz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamptz', amopstrategy => '4', + amopopr => '>=(timestamptz,timestamptz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamptz', amopstrategy => '5', + amopopr => '>(timestamptz,timestamptz)', amopmethod => 'btree' }, + +# crosstype operators vs date +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'date', amopstrategy => '1', + amopopr => '<(timestamptz,date)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'date', amopstrategy => '2', + amopopr => '<=(timestamptz,date)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'date', amopstrategy => '3', + amopopr => '=(timestamptz,date)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'date', amopstrategy => '4', + amopopr => '>=(timestamptz,date)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'date', amopstrategy => '5', + amopopr => '>(timestamptz,date)', amopmethod => 'btree' }, + +# crosstype operators vs timestamp +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamp', amopstrategy => '1', + amopopr => '<(timestamptz,timestamp)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamp', amopstrategy => '2', + amopopr => '<=(timestamptz,timestamp)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamp', amopstrategy => '3', + amopopr => '=(timestamptz,timestamp)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamp', amopstrategy => '4', + amopopr => '>=(timestamptz,timestamp)', amopmethod => 'btree' }, +{ amopfamily => 'btree/datetime_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamp', amopstrategy => '5', + amopopr => '>(timestamptz,timestamp)', amopmethod => 'btree' }, + +# btree time_ops + +{ amopfamily => 'btree/time_ops', amoplefttype => 'time', + amoprighttype => 'time', amopstrategy => '1', amopopr => '<(time,time)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/time_ops', amoplefttype => 'time', + amoprighttype => 'time', amopstrategy => '2', amopopr => '<=(time,time)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/time_ops', amoplefttype => 'time', + amoprighttype => 'time', amopstrategy => '3', amopopr => '=(time,time)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/time_ops', amoplefttype => 'time', + amoprighttype => 'time', amopstrategy => '4', amopopr => '>=(time,time)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/time_ops', amoplefttype => 'time', + amoprighttype => 'time', amopstrategy => '5', amopopr => '>(time,time)', + amopmethod => 'btree' }, + +# btree timetz_ops + +{ amopfamily => 'btree/timetz_ops', amoplefttype => 'timetz', + amoprighttype => 'timetz', amopstrategy => '1', amopopr => '<(timetz,timetz)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/timetz_ops', amoplefttype => 'timetz', + amoprighttype => 'timetz', amopstrategy => '2', + amopopr => '<=(timetz,timetz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/timetz_ops', amoplefttype => 'timetz', + amoprighttype => 'timetz', amopstrategy => '3', amopopr => '=(timetz,timetz)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/timetz_ops', amoplefttype => 'timetz', + amoprighttype => 'timetz', amopstrategy => '4', + amopopr => '>=(timetz,timetz)', amopmethod => 'btree' }, +{ amopfamily => 'btree/timetz_ops', amoplefttype => 'timetz', + amoprighttype => 'timetz', amopstrategy => '5', amopopr => '>(timetz,timetz)', + amopmethod => 'btree' }, + +# btree interval_ops + +{ amopfamily => 'btree/interval_ops', amoplefttype => 'interval', + amoprighttype => 'interval', amopstrategy => '1', + amopopr => '<(interval,interval)', amopmethod => 'btree' }, +{ amopfamily => 'btree/interval_ops', amoplefttype => 'interval', + amoprighttype => 'interval', amopstrategy => '2', + amopopr => '<=(interval,interval)', amopmethod => 'btree' }, +{ amopfamily => 'btree/interval_ops', amoplefttype => 'interval', + amoprighttype => 'interval', amopstrategy => '3', + amopopr => '=(interval,interval)', amopmethod => 'btree' }, +{ amopfamily => 'btree/interval_ops', amoplefttype => 'interval', + amoprighttype => 'interval', amopstrategy => '4', + amopopr => '>=(interval,interval)', amopmethod => 'btree' }, +{ amopfamily => 'btree/interval_ops', amoplefttype => 'interval', + amoprighttype => 'interval', amopstrategy => '5', + amopopr => '>(interval,interval)', amopmethod => 'btree' }, + +# btree macaddr + +{ amopfamily => 'btree/macaddr_ops', amoplefttype => 'macaddr', + amoprighttype => 'macaddr', amopstrategy => '1', + amopopr => '<(macaddr,macaddr)', amopmethod => 'btree' }, +{ amopfamily => 'btree/macaddr_ops', amoplefttype => 'macaddr', + amoprighttype => 'macaddr', amopstrategy => '2', + amopopr => '<=(macaddr,macaddr)', amopmethod => 'btree' }, +{ amopfamily => 'btree/macaddr_ops', amoplefttype => 'macaddr', + amoprighttype => 'macaddr', amopstrategy => '3', + amopopr => '=(macaddr,macaddr)', amopmethod => 'btree' }, +{ amopfamily => 'btree/macaddr_ops', amoplefttype => 'macaddr', + amoprighttype => 'macaddr', amopstrategy => '4', + amopopr => '>=(macaddr,macaddr)', amopmethod => 'btree' }, +{ amopfamily => 'btree/macaddr_ops', amoplefttype => 'macaddr', + amoprighttype => 'macaddr', amopstrategy => '5', + amopopr => '>(macaddr,macaddr)', amopmethod => 'btree' }, + +# btree macaddr8 + +{ amopfamily => 'btree/macaddr8_ops', amoplefttype => 'macaddr8', + amoprighttype => 'macaddr8', amopstrategy => '1', + amopopr => '<(macaddr8,macaddr8)', amopmethod => 'btree' }, +{ amopfamily => 'btree/macaddr8_ops', amoplefttype => 'macaddr8', + amoprighttype => 'macaddr8', amopstrategy => '2', + amopopr => '<=(macaddr8,macaddr8)', amopmethod => 'btree' }, +{ amopfamily => 'btree/macaddr8_ops', amoplefttype => 'macaddr8', + amoprighttype => 'macaddr8', amopstrategy => '3', + amopopr => '=(macaddr8,macaddr8)', amopmethod => 'btree' }, +{ amopfamily => 'btree/macaddr8_ops', amoplefttype => 'macaddr8', + amoprighttype => 'macaddr8', amopstrategy => '4', + amopopr => '>=(macaddr8,macaddr8)', amopmethod => 'btree' }, +{ amopfamily => 'btree/macaddr8_ops', amoplefttype => 'macaddr8', + amoprighttype => 'macaddr8', amopstrategy => '5', + amopopr => '>(macaddr8,macaddr8)', amopmethod => 'btree' }, + +# btree network + +{ amopfamily => 'btree/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '1', amopopr => '<(inet,inet)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '2', amopopr => '<=(inet,inet)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '3', amopopr => '=(inet,inet)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '4', amopopr => '>=(inet,inet)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '5', amopopr => '>(inet,inet)', + amopmethod => 'btree' }, + +# btree numeric + +{ amopfamily => 'btree/numeric_ops', amoplefttype => 'numeric', + amoprighttype => 'numeric', amopstrategy => '1', + amopopr => '<(numeric,numeric)', amopmethod => 'btree' }, +{ amopfamily => 'btree/numeric_ops', amoplefttype => 'numeric', + amoprighttype => 'numeric', amopstrategy => '2', + amopopr => '<=(numeric,numeric)', amopmethod => 'btree' }, +{ amopfamily => 'btree/numeric_ops', amoplefttype => 'numeric', + amoprighttype => 'numeric', amopstrategy => '3', + amopopr => '=(numeric,numeric)', amopmethod => 'btree' }, +{ amopfamily => 'btree/numeric_ops', amoplefttype => 'numeric', + amoprighttype => 'numeric', amopstrategy => '4', + amopopr => '>=(numeric,numeric)', amopmethod => 'btree' }, +{ amopfamily => 'btree/numeric_ops', amoplefttype => 'numeric', + amoprighttype => 'numeric', amopstrategy => '5', + amopopr => '>(numeric,numeric)', amopmethod => 'btree' }, + +# btree bool + +{ amopfamily => 'btree/bool_ops', amoplefttype => 'bool', + amoprighttype => 'bool', amopstrategy => '1', amopopr => '<(bool,bool)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/bool_ops', amoplefttype => 'bool', + amoprighttype => 'bool', amopstrategy => '2', amopopr => '<=(bool,bool)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/bool_ops', amoplefttype => 'bool', + amoprighttype => 'bool', amopstrategy => '3', amopopr => '=(bool,bool)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/bool_ops', amoplefttype => 'bool', + amoprighttype => 'bool', amopstrategy => '4', amopopr => '>=(bool,bool)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/bool_ops', amoplefttype => 'bool', + amoprighttype => 'bool', amopstrategy => '5', amopopr => '>(bool,bool)', + amopmethod => 'btree' }, + +# btree bit + +{ amopfamily => 'btree/bit_ops', amoplefttype => 'bit', amoprighttype => 'bit', + amopstrategy => '1', amopopr => '<(bit,bit)', amopmethod => 'btree' }, +{ amopfamily => 'btree/bit_ops', amoplefttype => 'bit', amoprighttype => 'bit', + amopstrategy => '2', amopopr => '<=(bit,bit)', amopmethod => 'btree' }, +{ amopfamily => 'btree/bit_ops', amoplefttype => 'bit', amoprighttype => 'bit', + amopstrategy => '3', amopopr => '=(bit,bit)', amopmethod => 'btree' }, +{ amopfamily => 'btree/bit_ops', amoplefttype => 'bit', amoprighttype => 'bit', + amopstrategy => '4', amopopr => '>=(bit,bit)', amopmethod => 'btree' }, +{ amopfamily => 'btree/bit_ops', amoplefttype => 'bit', amoprighttype => 'bit', + amopstrategy => '5', amopopr => '>(bit,bit)', amopmethod => 'btree' }, + +# btree varbit + +{ amopfamily => 'btree/varbit_ops', amoplefttype => 'varbit', + amoprighttype => 'varbit', amopstrategy => '1', amopopr => '<(varbit,varbit)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/varbit_ops', amoplefttype => 'varbit', + amoprighttype => 'varbit', amopstrategy => '2', + amopopr => '<=(varbit,varbit)', amopmethod => 'btree' }, +{ amopfamily => 'btree/varbit_ops', amoplefttype => 'varbit', + amoprighttype => 'varbit', amopstrategy => '3', amopopr => '=(varbit,varbit)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/varbit_ops', amoplefttype => 'varbit', + amoprighttype => 'varbit', amopstrategy => '4', + amopopr => '>=(varbit,varbit)', amopmethod => 'btree' }, +{ amopfamily => 'btree/varbit_ops', amoplefttype => 'varbit', + amoprighttype => 'varbit', amopstrategy => '5', amopopr => '>(varbit,varbit)', + amopmethod => 'btree' }, + +# btree text pattern + +{ amopfamily => 'btree/text_pattern_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '1', amopopr => '~<~(text,text)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/text_pattern_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '2', amopopr => '~<=~(text,text)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/text_pattern_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '3', amopopr => '=(text,text)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/text_pattern_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '4', amopopr => '~>=~(text,text)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/text_pattern_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '5', amopopr => '~>~(text,text)', + amopmethod => 'btree' }, + +# btree bpchar pattern + +{ amopfamily => 'btree/bpchar_pattern_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '1', + amopopr => '~<~(bpchar,bpchar)', amopmethod => 'btree' }, +{ amopfamily => 'btree/bpchar_pattern_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '2', + amopopr => '~<=~(bpchar,bpchar)', amopmethod => 'btree' }, +{ amopfamily => 'btree/bpchar_pattern_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '3', amopopr => '=(bpchar,bpchar)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/bpchar_pattern_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '4', + amopopr => '~>=~(bpchar,bpchar)', amopmethod => 'btree' }, +{ amopfamily => 'btree/bpchar_pattern_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '5', + amopopr => '~>~(bpchar,bpchar)', amopmethod => 'btree' }, + +# btree money_ops + +{ amopfamily => 'btree/money_ops', amoplefttype => 'money', + amoprighttype => 'money', amopstrategy => '1', amopopr => '<(money,money)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/money_ops', amoplefttype => 'money', + amoprighttype => 'money', amopstrategy => '2', amopopr => '<=(money,money)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/money_ops', amoplefttype => 'money', + amoprighttype => 'money', amopstrategy => '3', amopopr => '=(money,money)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/money_ops', amoplefttype => 'money', + amoprighttype => 'money', amopstrategy => '4', amopopr => '>=(money,money)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/money_ops', amoplefttype => 'money', + amoprighttype => 'money', amopstrategy => '5', amopopr => '>(money,money)', + amopmethod => 'btree' }, + +# btree array_ops + +{ amopfamily => 'btree/array_ops', amoplefttype => 'anyarray', + amoprighttype => 'anyarray', amopstrategy => '1', + amopopr => '<(anyarray,anyarray)', amopmethod => 'btree' }, +{ amopfamily => 'btree/array_ops', amoplefttype => 'anyarray', + amoprighttype => 'anyarray', amopstrategy => '2', + amopopr => '<=(anyarray,anyarray)', amopmethod => 'btree' }, +{ amopfamily => 'btree/array_ops', amoplefttype => 'anyarray', + amoprighttype => 'anyarray', amopstrategy => '3', + amopopr => '=(anyarray,anyarray)', amopmethod => 'btree' }, +{ amopfamily => 'btree/array_ops', amoplefttype => 'anyarray', + amoprighttype => 'anyarray', amopstrategy => '4', + amopopr => '>=(anyarray,anyarray)', amopmethod => 'btree' }, +{ amopfamily => 'btree/array_ops', amoplefttype => 'anyarray', + amoprighttype => 'anyarray', amopstrategy => '5', + amopopr => '>(anyarray,anyarray)', amopmethod => 'btree' }, + +# btree record_ops + +{ amopfamily => 'btree/record_ops', amoplefttype => 'record', + amoprighttype => 'record', amopstrategy => '1', amopopr => '<(record,record)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/record_ops', amoplefttype => 'record', + amoprighttype => 'record', amopstrategy => '2', + amopopr => '<=(record,record)', amopmethod => 'btree' }, +{ amopfamily => 'btree/record_ops', amoplefttype => 'record', + amoprighttype => 'record', amopstrategy => '3', amopopr => '=(record,record)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/record_ops', amoplefttype => 'record', + amoprighttype => 'record', amopstrategy => '4', + amopopr => '>=(record,record)', amopmethod => 'btree' }, +{ amopfamily => 'btree/record_ops', amoplefttype => 'record', + amoprighttype => 'record', amopstrategy => '5', amopopr => '>(record,record)', + amopmethod => 'btree' }, + +# btree record_image_ops + +{ amopfamily => 'btree/record_image_ops', amoplefttype => 'record', + amoprighttype => 'record', amopstrategy => '1', + amopopr => '*<(record,record)', amopmethod => 'btree' }, +{ amopfamily => 'btree/record_image_ops', amoplefttype => 'record', + amoprighttype => 'record', amopstrategy => '2', + amopopr => '*<=(record,record)', amopmethod => 'btree' }, +{ amopfamily => 'btree/record_image_ops', amoplefttype => 'record', + amoprighttype => 'record', amopstrategy => '3', + amopopr => '*=(record,record)', amopmethod => 'btree' }, +{ amopfamily => 'btree/record_image_ops', amoplefttype => 'record', + amoprighttype => 'record', amopstrategy => '4', + amopopr => '*>=(record,record)', amopmethod => 'btree' }, +{ amopfamily => 'btree/record_image_ops', amoplefttype => 'record', + amoprighttype => 'record', amopstrategy => '5', + amopopr => '*>(record,record)', amopmethod => 'btree' }, + +# btree uuid_ops + +{ amopfamily => 'btree/uuid_ops', amoplefttype => 'uuid', + amoprighttype => 'uuid', amopstrategy => '1', amopopr => '<(uuid,uuid)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/uuid_ops', amoplefttype => 'uuid', + amoprighttype => 'uuid', amopstrategy => '2', amopopr => '<=(uuid,uuid)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/uuid_ops', amoplefttype => 'uuid', + amoprighttype => 'uuid', amopstrategy => '3', amopopr => '=(uuid,uuid)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/uuid_ops', amoplefttype => 'uuid', + amoprighttype => 'uuid', amopstrategy => '4', amopopr => '>=(uuid,uuid)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/uuid_ops', amoplefttype => 'uuid', + amoprighttype => 'uuid', amopstrategy => '5', amopopr => '>(uuid,uuid)', + amopmethod => 'btree' }, + +# btree pg_lsn_ops + +{ amopfamily => 'btree/pg_lsn_ops', amoplefttype => 'pg_lsn', + amoprighttype => 'pg_lsn', amopstrategy => '1', amopopr => '<(pg_lsn,pg_lsn)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/pg_lsn_ops', amoplefttype => 'pg_lsn', + amoprighttype => 'pg_lsn', amopstrategy => '2', + amopopr => '<=(pg_lsn,pg_lsn)', amopmethod => 'btree' }, +{ amopfamily => 'btree/pg_lsn_ops', amoplefttype => 'pg_lsn', + amoprighttype => 'pg_lsn', amopstrategy => '3', amopopr => '=(pg_lsn,pg_lsn)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/pg_lsn_ops', amoplefttype => 'pg_lsn', + amoprighttype => 'pg_lsn', amopstrategy => '4', + amopopr => '>=(pg_lsn,pg_lsn)', amopmethod => 'btree' }, +{ amopfamily => 'btree/pg_lsn_ops', amoplefttype => 'pg_lsn', + amoprighttype => 'pg_lsn', amopstrategy => '5', amopopr => '>(pg_lsn,pg_lsn)', + amopmethod => 'btree' }, + +# hash index_ops + +# bpchar_ops +{ amopfamily => 'hash/bpchar_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '1', amopopr => '=(bpchar,bpchar)', + amopmethod => 'hash' }, + +# char_ops +{ amopfamily => 'hash/char_ops', amoplefttype => 'char', + amoprighttype => 'char', amopstrategy => '1', amopopr => '=(char,char)', + amopmethod => 'hash' }, + +# date_ops +{ amopfamily => 'hash/date_ops', amoplefttype => 'date', + amoprighttype => 'date', amopstrategy => '1', amopopr => '=(date,date)', + amopmethod => 'hash' }, + +# float_ops +{ amopfamily => 'hash/float_ops', amoplefttype => 'float4', + amoprighttype => 'float4', amopstrategy => '1', amopopr => '=(float4,float4)', + amopmethod => 'hash' }, +{ amopfamily => 'hash/float_ops', amoplefttype => 'float8', + amoprighttype => 'float8', amopstrategy => '1', amopopr => '=(float8,float8)', + amopmethod => 'hash' }, +{ amopfamily => 'hash/float_ops', amoplefttype => 'float4', + amoprighttype => 'float8', amopstrategy => '1', amopopr => '=(float4,float8)', + amopmethod => 'hash' }, +{ amopfamily => 'hash/float_ops', amoplefttype => 'float8', + amoprighttype => 'float4', amopstrategy => '1', amopopr => '=(float8,float4)', + amopmethod => 'hash' }, + +# network_ops +{ amopfamily => 'hash/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '1', amopopr => '=(inet,inet)', + amopmethod => 'hash' }, + +# integer_ops +{ amopfamily => 'hash/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int2', amopstrategy => '1', amopopr => '=(int2,int2)', + amopmethod => 'hash' }, +{ amopfamily => 'hash/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int4', amopstrategy => '1', amopopr => '=(int4,int4)', + amopmethod => 'hash' }, +{ amopfamily => 'hash/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int8', amopstrategy => '1', amopopr => '=(int8,int8)', + amopmethod => 'hash' }, +{ amopfamily => 'hash/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int4', amopstrategy => '1', amopopr => '=(int2,int4)', + amopmethod => 'hash' }, +{ amopfamily => 'hash/integer_ops', amoplefttype => 'int2', + amoprighttype => 'int8', amopstrategy => '1', amopopr => '=(int2,int8)', + amopmethod => 'hash' }, +{ amopfamily => 'hash/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int2', amopstrategy => '1', amopopr => '=(int4,int2)', + amopmethod => 'hash' }, +{ amopfamily => 'hash/integer_ops', amoplefttype => 'int4', + amoprighttype => 'int8', amopstrategy => '1', amopopr => '=(int4,int8)', + amopmethod => 'hash' }, +{ amopfamily => 'hash/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int2', amopstrategy => '1', amopopr => '=(int8,int2)', + amopmethod => 'hash' }, +{ amopfamily => 'hash/integer_ops', amoplefttype => 'int8', + amoprighttype => 'int4', amopstrategy => '1', amopopr => '=(int8,int4)', + amopmethod => 'hash' }, + +# interval_ops +{ amopfamily => 'hash/interval_ops', amoplefttype => 'interval', + amoprighttype => 'interval', amopstrategy => '1', + amopopr => '=(interval,interval)', amopmethod => 'hash' }, + +# macaddr_ops +{ amopfamily => 'hash/macaddr_ops', amoplefttype => 'macaddr', + amoprighttype => 'macaddr', amopstrategy => '1', + amopopr => '=(macaddr,macaddr)', amopmethod => 'hash' }, + +# macaddr8_ops +{ amopfamily => 'hash/macaddr8_ops', amoplefttype => 'macaddr8', + amoprighttype => 'macaddr8', amopstrategy => '1', + amopopr => '=(macaddr8,macaddr8)', amopmethod => 'hash' }, + +# name_ops +{ amopfamily => 'hash/name_ops', amoplefttype => 'name', + amoprighttype => 'name', amopstrategy => '1', amopopr => '=(name,name)', + amopmethod => 'hash' }, + +# oid_ops +{ amopfamily => 'hash/oid_ops', amoplefttype => 'oid', amoprighttype => 'oid', + amopstrategy => '1', amopopr => '=(oid,oid)', amopmethod => 'hash' }, + +# oidvector_ops +{ amopfamily => 'hash/oidvector_ops', amoplefttype => 'oidvector', + amoprighttype => 'oidvector', amopstrategy => '1', + amopopr => '=(oidvector,oidvector)', amopmethod => 'hash' }, + +# text_ops +{ amopfamily => 'hash/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '1', amopopr => '=(text,text)', + amopmethod => 'hash' }, + +# time_ops +{ amopfamily => 'hash/time_ops', amoplefttype => 'time', + amoprighttype => 'time', amopstrategy => '1', amopopr => '=(time,time)', + amopmethod => 'hash' }, + +# timestamptz_ops +{ amopfamily => 'hash/timestamptz_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamptz', amopstrategy => '1', + amopopr => '=(timestamptz,timestamptz)', amopmethod => 'hash' }, + +# timetz_ops +{ amopfamily => 'hash/timetz_ops', amoplefttype => 'timetz', + amoprighttype => 'timetz', amopstrategy => '1', amopopr => '=(timetz,timetz)', + amopmethod => 'hash' }, + +# timestamp_ops +{ amopfamily => 'hash/timestamp_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamp', amopstrategy => '1', + amopopr => '=(timestamp,timestamp)', amopmethod => 'hash' }, + +# bool_ops +{ amopfamily => 'hash/bool_ops', amoplefttype => 'bool', + amoprighttype => 'bool', amopstrategy => '1', amopopr => '=(bool,bool)', + amopmethod => 'hash' }, + +# bytea_ops +{ amopfamily => 'hash/bytea_ops', amoplefttype => 'bytea', + amoprighttype => 'bytea', amopstrategy => '1', amopopr => '=(bytea,bytea)', + amopmethod => 'hash' }, + +# xid_ops +{ amopfamily => 'hash/xid_ops', amoplefttype => 'xid', amoprighttype => 'xid', + amopstrategy => '1', amopopr => '=(xid,xid)', amopmethod => 'hash' }, + +# cid_ops +{ amopfamily => 'hash/cid_ops', amoplefttype => 'cid', amoprighttype => 'cid', + amopstrategy => '1', amopopr => '=(cid,cid)', amopmethod => 'hash' }, + +# text_pattern_ops +{ amopfamily => 'hash/text_pattern_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '1', amopopr => '=(text,text)', + amopmethod => 'hash' }, + +# bpchar_pattern_ops +{ amopfamily => 'hash/bpchar_pattern_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '1', amopopr => '=(bpchar,bpchar)', + amopmethod => 'hash' }, + +# aclitem_ops +{ amopfamily => 'hash/aclitem_ops', amoplefttype => 'aclitem', + amoprighttype => 'aclitem', amopstrategy => '1', + amopopr => '=(aclitem,aclitem)', amopmethod => 'hash' }, + +# uuid_ops +{ amopfamily => 'hash/uuid_ops', amoplefttype => 'uuid', + amoprighttype => 'uuid', amopstrategy => '1', amopopr => '=(uuid,uuid)', + amopmethod => 'hash' }, + +# pg_lsn_ops +{ amopfamily => 'hash/pg_lsn_ops', amoplefttype => 'pg_lsn', + amoprighttype => 'pg_lsn', amopstrategy => '1', amopopr => '=(pg_lsn,pg_lsn)', + amopmethod => 'hash' }, + +# numeric_ops +{ amopfamily => 'hash/numeric_ops', amoplefttype => 'numeric', + amoprighttype => 'numeric', amopstrategy => '1', + amopopr => '=(numeric,numeric)', amopmethod => 'hash' }, + +# array_ops +{ amopfamily => 'hash/array_ops', amoplefttype => 'anyarray', + amoprighttype => 'anyarray', amopstrategy => '1', + amopopr => '=(anyarray,anyarray)', amopmethod => 'hash' }, + +# gist box_ops +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '1', amopopr => '<<(box,box)', amopmethod => 'gist' }, +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '2', amopopr => '&<(box,box)', amopmethod => 'gist' }, +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '3', amopopr => '&&(box,box)', amopmethod => 'gist' }, +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '4', amopopr => '&>(box,box)', amopmethod => 'gist' }, +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '5', amopopr => '>>(box,box)', amopmethod => 'gist' }, +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '6', amopopr => '~=(box,box)', amopmethod => 'gist' }, +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '7', amopopr => '@>(box,box)', amopmethod => 'gist' }, +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '8', amopopr => '<@(box,box)', amopmethod => 'gist' }, +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '9', amopopr => '&<|(box,box)', amopmethod => 'gist' }, +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '10', amopopr => '<<|(box,box)', amopmethod => 'gist' }, +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '11', amopopr => '|>>(box,box)', amopmethod => 'gist' }, +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '12', amopopr => '|&>(box,box)', amopmethod => 'gist' }, +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '13', amopopr => '~(box,box)', amopmethod => 'gist' }, +{ amopfamily => 'gist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '14', amopopr => '@(box,box)', amopmethod => 'gist' }, + +# gist point_ops +{ amopfamily => 'gist/point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '11', amopopr => '>^(point,point)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '1', amopopr => '<<(point,point)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '5', amopopr => '>>(point,point)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '10', amopopr => '<^(point,point)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '6', amopopr => '~=(point,point)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '15', amoppurpose => 'o', + amopopr => '<->(point,point)', amopmethod => 'gist', + amopsortfamily => 'btree/float_ops' }, +{ amopfamily => 'gist/point_ops', amoplefttype => 'point', + amoprighttype => 'box', amopstrategy => '28', amopopr => '<@(point,box)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/point_ops', amoplefttype => 'point', + amoprighttype => 'polygon', amopstrategy => '48', + amopopr => '<@(point,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/point_ops', amoplefttype => 'point', + amoprighttype => 'circle', amopstrategy => '68', + amopopr => '<@(point,circle)', amopmethod => 'gist' }, + +# gist poly_ops (supports polygons) +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '1', + amopopr => '<<(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '2', + amopopr => '&<(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '3', + amopopr => '&&(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '4', + amopopr => '&>(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '5', + amopopr => '>>(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '6', + amopopr => '~=(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '7', + amopopr => '@>(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '8', + amopopr => '<@(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '9', + amopopr => '&<|(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '10', + amopopr => '<<|(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '11', + amopopr => '|>>(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '12', + amopopr => '|&>(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '13', + amopopr => '~(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '14', + amopopr => '@(polygon,polygon)', amopmethod => 'gist' }, +{ amopfamily => 'gist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'point', amopstrategy => '15', amoppurpose => 'o', + amopopr => '<->(polygon,point)', amopmethod => 'gist', + amopsortfamily => 'btree/float_ops' }, + +# gist circle_ops +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '1', + amopopr => '<<(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '2', + amopopr => '&<(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '3', + amopopr => '&&(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '4', + amopopr => '&>(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '5', + amopopr => '>>(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '6', + amopopr => '~=(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '7', + amopopr => '@>(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '8', + amopopr => '<@(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '9', + amopopr => '&<|(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '10', + amopopr => '<<|(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '11', + amopopr => '|>>(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '12', + amopopr => '|&>(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '13', + amopopr => '~(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'circle', amopstrategy => '14', + amopopr => '@(circle,circle)', amopmethod => 'gist' }, +{ amopfamily => 'gist/circle_ops', amoplefttype => 'circle', + amoprighttype => 'point', amopstrategy => '15', amoppurpose => 'o', + amopopr => '<->(circle,point)', amopmethod => 'gist', + amopsortfamily => 'btree/float_ops' }, + +# gin array_ops +{ amopfamily => 'gin/array_ops', amoplefttype => 'anyarray', + amoprighttype => 'anyarray', amopstrategy => '1', + amopopr => '&&(anyarray,anyarray)', amopmethod => 'gin' }, +{ amopfamily => 'gin/array_ops', amoplefttype => 'anyarray', + amoprighttype => 'anyarray', amopstrategy => '2', + amopopr => '@>(anyarray,anyarray)', amopmethod => 'gin' }, +{ amopfamily => 'gin/array_ops', amoplefttype => 'anyarray', + amoprighttype => 'anyarray', amopstrategy => '3', + amopopr => '<@(anyarray,anyarray)', amopmethod => 'gin' }, +{ amopfamily => 'gin/array_ops', amoplefttype => 'anyarray', + amoprighttype => 'anyarray', amopstrategy => '4', + amopopr => '=(anyarray,anyarray)', amopmethod => 'gin' }, + +# btree enum_ops +{ amopfamily => 'btree/enum_ops', amoplefttype => 'anyenum', + amoprighttype => 'anyenum', amopstrategy => '1', + amopopr => '<(anyenum,anyenum)', amopmethod => 'btree' }, +{ amopfamily => 'btree/enum_ops', amoplefttype => 'anyenum', + amoprighttype => 'anyenum', amopstrategy => '2', + amopopr => '<=(anyenum,anyenum)', amopmethod => 'btree' }, +{ amopfamily => 'btree/enum_ops', amoplefttype => 'anyenum', + amoprighttype => 'anyenum', amopstrategy => '3', + amopopr => '=(anyenum,anyenum)', amopmethod => 'btree' }, +{ amopfamily => 'btree/enum_ops', amoplefttype => 'anyenum', + amoprighttype => 'anyenum', amopstrategy => '4', + amopopr => '>=(anyenum,anyenum)', amopmethod => 'btree' }, +{ amopfamily => 'btree/enum_ops', amoplefttype => 'anyenum', + amoprighttype => 'anyenum', amopstrategy => '5', + amopopr => '>(anyenum,anyenum)', amopmethod => 'btree' }, + +# hash enum_ops +{ amopfamily => 'hash/enum_ops', amoplefttype => 'anyenum', + amoprighttype => 'anyenum', amopstrategy => '1', + amopopr => '=(anyenum,anyenum)', amopmethod => 'hash' }, + +# btree tsvector_ops +{ amopfamily => 'btree/tsvector_ops', amoplefttype => 'tsvector', + amoprighttype => 'tsvector', amopstrategy => '1', + amopopr => '<(tsvector,tsvector)', amopmethod => 'btree' }, +{ amopfamily => 'btree/tsvector_ops', amoplefttype => 'tsvector', + amoprighttype => 'tsvector', amopstrategy => '2', + amopopr => '<=(tsvector,tsvector)', amopmethod => 'btree' }, +{ amopfamily => 'btree/tsvector_ops', amoplefttype => 'tsvector', + amoprighttype => 'tsvector', amopstrategy => '3', + amopopr => '=(tsvector,tsvector)', amopmethod => 'btree' }, +{ amopfamily => 'btree/tsvector_ops', amoplefttype => 'tsvector', + amoprighttype => 'tsvector', amopstrategy => '4', + amopopr => '>=(tsvector,tsvector)', amopmethod => 'btree' }, +{ amopfamily => 'btree/tsvector_ops', amoplefttype => 'tsvector', + amoprighttype => 'tsvector', amopstrategy => '5', + amopopr => '>(tsvector,tsvector)', amopmethod => 'btree' }, + +# GiST tsvector_ops +{ amopfamily => 'gist/tsvector_ops', amoplefttype => 'tsvector', + amoprighttype => 'tsquery', amopstrategy => '1', + amopopr => '@@(tsvector,tsquery)', amopmethod => 'gist' }, + +# GIN tsvector_ops +{ amopfamily => 'gin/tsvector_ops', amoplefttype => 'tsvector', + amoprighttype => 'tsquery', amopstrategy => '1', + amopopr => '@@(tsvector,tsquery)', amopmethod => 'gin' }, +{ amopfamily => 'gin/tsvector_ops', amoplefttype => 'tsvector', + amoprighttype => 'tsquery', amopstrategy => '2', + amopopr => '@@@(tsvector,tsquery)', amopmethod => 'gin' }, + +# btree tsquery_ops +{ amopfamily => 'btree/tsquery_ops', amoplefttype => 'tsquery', + amoprighttype => 'tsquery', amopstrategy => '1', + amopopr => '<(tsquery,tsquery)', amopmethod => 'btree' }, +{ amopfamily => 'btree/tsquery_ops', amoplefttype => 'tsquery', + amoprighttype => 'tsquery', amopstrategy => '2', + amopopr => '<=(tsquery,tsquery)', amopmethod => 'btree' }, +{ amopfamily => 'btree/tsquery_ops', amoplefttype => 'tsquery', + amoprighttype => 'tsquery', amopstrategy => '3', + amopopr => '=(tsquery,tsquery)', amopmethod => 'btree' }, +{ amopfamily => 'btree/tsquery_ops', amoplefttype => 'tsquery', + amoprighttype => 'tsquery', amopstrategy => '4', + amopopr => '>=(tsquery,tsquery)', amopmethod => 'btree' }, +{ amopfamily => 'btree/tsquery_ops', amoplefttype => 'tsquery', + amoprighttype => 'tsquery', amopstrategy => '5', + amopopr => '>(tsquery,tsquery)', amopmethod => 'btree' }, + +# GiST tsquery_ops +{ amopfamily => 'gist/tsquery_ops', amoplefttype => 'tsquery', + amoprighttype => 'tsquery', amopstrategy => '7', + amopopr => '@>(tsquery,tsquery)', amopmethod => 'gist' }, +{ amopfamily => 'gist/tsquery_ops', amoplefttype => 'tsquery', + amoprighttype => 'tsquery', amopstrategy => '8', + amopopr => '<@(tsquery,tsquery)', amopmethod => 'gist' }, + +# btree range_ops +{ amopfamily => 'btree/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '1', + amopopr => '<(anyrange,anyrange)', amopmethod => 'btree' }, +{ amopfamily => 'btree/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '2', + amopopr => '<=(anyrange,anyrange)', amopmethod => 'btree' }, +{ amopfamily => 'btree/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '3', + amopopr => '=(anyrange,anyrange)', amopmethod => 'btree' }, +{ amopfamily => 'btree/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '4', + amopopr => '>=(anyrange,anyrange)', amopmethod => 'btree' }, +{ amopfamily => 'btree/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '5', + amopopr => '>(anyrange,anyrange)', amopmethod => 'btree' }, + +# hash range_ops +{ amopfamily => 'hash/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '1', + amopopr => '=(anyrange,anyrange)', amopmethod => 'hash' }, + +# GiST range_ops +{ amopfamily => 'gist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '1', + amopopr => '<<(anyrange,anyrange)', amopmethod => 'gist' }, +{ amopfamily => 'gist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '2', + amopopr => '&<(anyrange,anyrange)', amopmethod => 'gist' }, +{ amopfamily => 'gist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '3', + amopopr => '&&(anyrange,anyrange)', amopmethod => 'gist' }, +{ amopfamily => 'gist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '4', + amopopr => '&>(anyrange,anyrange)', amopmethod => 'gist' }, +{ amopfamily => 'gist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '5', + amopopr => '>>(anyrange,anyrange)', amopmethod => 'gist' }, +{ amopfamily => 'gist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '6', + amopopr => '-|-(anyrange,anyrange)', amopmethod => 'gist' }, +{ amopfamily => 'gist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '7', + amopopr => '@>(anyrange,anyrange)', amopmethod => 'gist' }, +{ amopfamily => 'gist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '8', + amopopr => '<@(anyrange,anyrange)', amopmethod => 'gist' }, +{ amopfamily => 'gist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyelement', amopstrategy => '16', + amopopr => '@>(anyrange,anyelement)', amopmethod => 'gist' }, +{ amopfamily => 'gist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '18', + amopopr => '=(anyrange,anyrange)', amopmethod => 'gist' }, + +# SP-GiST quad_point_ops +{ amopfamily => 'spgist/quad_point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '11', amopopr => '>^(point,point)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/quad_point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '1', amopopr => '<<(point,point)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/quad_point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '5', amopopr => '>>(point,point)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/quad_point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '10', amopopr => '<^(point,point)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/quad_point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '6', amopopr => '~=(point,point)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/quad_point_ops', amoplefttype => 'point', + amoprighttype => 'box', amopstrategy => '8', amopopr => '<@(point,box)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/quad_point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '15', amoppurpose => 'o', + amopopr => '<->(point,point)', amopmethod => 'spgist', + amopsortfamily => 'btree/float_ops' }, + +# SP-GiST kd_point_ops +{ amopfamily => 'spgist/kd_point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '11', amopopr => '>^(point,point)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/kd_point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '1', amopopr => '<<(point,point)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/kd_point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '5', amopopr => '>>(point,point)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/kd_point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '10', amopopr => '<^(point,point)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/kd_point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '6', amopopr => '~=(point,point)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/kd_point_ops', amoplefttype => 'point', + amoprighttype => 'box', amopstrategy => '8', amopopr => '<@(point,box)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/kd_point_ops', amoplefttype => 'point', + amoprighttype => 'point', amopstrategy => '15', amoppurpose => 'o', + amopopr => '<->(point,point)', amopmethod => 'spgist', + amopsortfamily => 'btree/float_ops' }, + +# SP-GiST text_ops +{ amopfamily => 'spgist/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '1', amopopr => '~<~(text,text)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '2', amopopr => '~<=~(text,text)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '3', amopopr => '=(text,text)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '4', amopopr => '~>=~(text,text)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '5', amopopr => '~>~(text,text)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '11', amopopr => '<(text,text)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '12', amopopr => '<=(text,text)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '14', amopopr => '>=(text,text)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '15', amopopr => '>(text,text)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/text_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '28', amopopr => '^@(text,text)', + amopmethod => 'spgist' }, + +# btree jsonb_ops +{ amopfamily => 'btree/jsonb_ops', amoplefttype => 'jsonb', + amoprighttype => 'jsonb', amopstrategy => '1', amopopr => '<(jsonb,jsonb)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/jsonb_ops', amoplefttype => 'jsonb', + amoprighttype => 'jsonb', amopstrategy => '2', amopopr => '<=(jsonb,jsonb)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/jsonb_ops', amoplefttype => 'jsonb', + amoprighttype => 'jsonb', amopstrategy => '3', amopopr => '=(jsonb,jsonb)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/jsonb_ops', amoplefttype => 'jsonb', + amoprighttype => 'jsonb', amopstrategy => '4', amopopr => '>=(jsonb,jsonb)', + amopmethod => 'btree' }, +{ amopfamily => 'btree/jsonb_ops', amoplefttype => 'jsonb', + amoprighttype => 'jsonb', amopstrategy => '5', amopopr => '>(jsonb,jsonb)', + amopmethod => 'btree' }, + +# hash jsonb_ops +{ amopfamily => 'hash/jsonb_ops', amoplefttype => 'jsonb', + amoprighttype => 'jsonb', amopstrategy => '1', amopopr => '=(jsonb,jsonb)', + amopmethod => 'hash' }, + +# GIN jsonb_ops +{ amopfamily => 'gin/jsonb_ops', amoplefttype => 'jsonb', + amoprighttype => 'jsonb', amopstrategy => '7', amopopr => '@>(jsonb,jsonb)', + amopmethod => 'gin' }, +{ amopfamily => 'gin/jsonb_ops', amoplefttype => 'jsonb', + amoprighttype => 'text', amopstrategy => '9', amopopr => '?(jsonb,text)', + amopmethod => 'gin' }, +{ amopfamily => 'gin/jsonb_ops', amoplefttype => 'jsonb', + amoprighttype => '_text', amopstrategy => '10', amopopr => '?|(jsonb,_text)', + amopmethod => 'gin' }, +{ amopfamily => 'gin/jsonb_ops', amoplefttype => 'jsonb', + amoprighttype => '_text', amopstrategy => '11', amopopr => '?&(jsonb,_text)', + amopmethod => 'gin' }, + +# GIN jsonb_path_ops +{ amopfamily => 'gin/jsonb_path_ops', amoplefttype => 'jsonb', + amoprighttype => 'jsonb', amopstrategy => '7', amopopr => '@>(jsonb,jsonb)', + amopmethod => 'gin' }, + +# SP-GiST range_ops +{ amopfamily => 'spgist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '1', + amopopr => '<<(anyrange,anyrange)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '2', + amopopr => '&<(anyrange,anyrange)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '3', + amopopr => '&&(anyrange,anyrange)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '4', + amopopr => '&>(anyrange,anyrange)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '5', + amopopr => '>>(anyrange,anyrange)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '6', + amopopr => '-|-(anyrange,anyrange)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '7', + amopopr => '@>(anyrange,anyrange)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '8', + amopopr => '<@(anyrange,anyrange)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyelement', amopstrategy => '16', + amopopr => '@>(anyrange,anyelement)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/range_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '18', + amopopr => '=(anyrange,anyrange)', amopmethod => 'spgist' }, + +# SP-GiST box_ops +{ amopfamily => 'spgist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '1', amopopr => '<<(box,box)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '2', amopopr => '&<(box,box)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '3', amopopr => '&&(box,box)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '4', amopopr => '&>(box,box)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '5', amopopr => '>>(box,box)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '6', amopopr => '~=(box,box)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '7', amopopr => '@>(box,box)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '8', amopopr => '<@(box,box)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '9', amopopr => '&<|(box,box)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '10', amopopr => '<<|(box,box)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '11', amopopr => '|>>(box,box)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/box_ops', amoplefttype => 'box', amoprighttype => 'box', + amopstrategy => '12', amopopr => '|&>(box,box)', amopmethod => 'spgist' }, + +# SP-GiST poly_ops (supports polygons) +{ amopfamily => 'spgist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '1', + amopopr => '<<(polygon,polygon)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '2', + amopopr => '&<(polygon,polygon)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '3', + amopopr => '&&(polygon,polygon)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '4', + amopopr => '&>(polygon,polygon)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '5', + amopopr => '>>(polygon,polygon)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '6', + amopopr => '~=(polygon,polygon)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '7', + amopopr => '@>(polygon,polygon)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '8', + amopopr => '<@(polygon,polygon)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '9', + amopopr => '&<|(polygon,polygon)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '10', + amopopr => '<<|(polygon,polygon)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '11', + amopopr => '|>>(polygon,polygon)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'polygon', amopstrategy => '12', + amopopr => '|&>(polygon,polygon)', amopmethod => 'spgist' }, +{ amopfamily => 'spgist/poly_ops', amoplefttype => 'polygon', + amoprighttype => 'point', amopstrategy => '15', amoppurpose => 'o', + amopopr => '<->(polygon,point)', amopmethod => 'spgist', + amopsortfamily => 'btree/float_ops' }, + +# GiST inet_ops +{ amopfamily => 'gist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '3', amopopr => '&&(inet,inet)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '18', amopopr => '=(inet,inet)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '19', amopopr => '<>(inet,inet)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '20', amopopr => '<(inet,inet)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '21', amopopr => '<=(inet,inet)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '22', amopopr => '>(inet,inet)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '23', amopopr => '>=(inet,inet)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '24', amopopr => '<<(inet,inet)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '25', amopopr => '<<=(inet,inet)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '26', amopopr => '>>(inet,inet)', + amopmethod => 'gist' }, +{ amopfamily => 'gist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '27', amopopr => '>>=(inet,inet)', + amopmethod => 'gist' }, + +# SP-GiST inet_ops +{ amopfamily => 'spgist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '3', amopopr => '&&(inet,inet)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '18', amopopr => '=(inet,inet)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '19', amopopr => '<>(inet,inet)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '20', amopopr => '<(inet,inet)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '21', amopopr => '<=(inet,inet)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '22', amopopr => '>(inet,inet)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '23', amopopr => '>=(inet,inet)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '24', amopopr => '<<(inet,inet)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '25', amopopr => '<<=(inet,inet)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '26', amopopr => '>>(inet,inet)', + amopmethod => 'spgist' }, +{ amopfamily => 'spgist/network_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '27', amopopr => '>>=(inet,inet)', + amopmethod => 'spgist' }, + +# BRIN opclasses + +# minmax bytea +{ amopfamily => 'brin/bytea_minmax_ops', amoplefttype => 'bytea', + amoprighttype => 'bytea', amopstrategy => '1', amopopr => '<(bytea,bytea)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/bytea_minmax_ops', amoplefttype => 'bytea', + amoprighttype => 'bytea', amopstrategy => '2', amopopr => '<=(bytea,bytea)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/bytea_minmax_ops', amoplefttype => 'bytea', + amoprighttype => 'bytea', amopstrategy => '3', amopopr => '=(bytea,bytea)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/bytea_minmax_ops', amoplefttype => 'bytea', + amoprighttype => 'bytea', amopstrategy => '4', amopopr => '>=(bytea,bytea)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/bytea_minmax_ops', amoplefttype => 'bytea', + amoprighttype => 'bytea', amopstrategy => '5', amopopr => '>(bytea,bytea)', + amopmethod => 'brin' }, + +# minmax "char" +{ amopfamily => 'brin/char_minmax_ops', amoplefttype => 'char', + amoprighttype => 'char', amopstrategy => '1', amopopr => '<(char,char)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/char_minmax_ops', amoplefttype => 'char', + amoprighttype => 'char', amopstrategy => '2', amopopr => '<=(char,char)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/char_minmax_ops', amoplefttype => 'char', + amoprighttype => 'char', amopstrategy => '3', amopopr => '=(char,char)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/char_minmax_ops', amoplefttype => 'char', + amoprighttype => 'char', amopstrategy => '4', amopopr => '>=(char,char)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/char_minmax_ops', amoplefttype => 'char', + amoprighttype => 'char', amopstrategy => '5', amopopr => '>(char,char)', + amopmethod => 'brin' }, + +# minmax name +{ amopfamily => 'brin/name_minmax_ops', amoplefttype => 'name', + amoprighttype => 'name', amopstrategy => '1', amopopr => '<(name,name)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/name_minmax_ops', amoplefttype => 'name', + amoprighttype => 'name', amopstrategy => '2', amopopr => '<=(name,name)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/name_minmax_ops', amoplefttype => 'name', + amoprighttype => 'name', amopstrategy => '3', amopopr => '=(name,name)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/name_minmax_ops', amoplefttype => 'name', + amoprighttype => 'name', amopstrategy => '4', amopopr => '>=(name,name)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/name_minmax_ops', amoplefttype => 'name', + amoprighttype => 'name', amopstrategy => '5', amopopr => '>(name,name)', + amopmethod => 'brin' }, + +# minmax integer + +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int8', amopstrategy => '1', amopopr => '<(int8,int8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int8', amopstrategy => '2', amopopr => '<=(int8,int8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int8', amopstrategy => '3', amopopr => '=(int8,int8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int8', amopstrategy => '4', amopopr => '>=(int8,int8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int8', amopstrategy => '5', amopopr => '>(int8,int8)', + amopmethod => 'brin' }, + +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int2', amopstrategy => '1', amopopr => '<(int8,int2)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int2', amopstrategy => '2', amopopr => '<=(int8,int2)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int2', amopstrategy => '3', amopopr => '=(int8,int2)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int2', amopstrategy => '4', amopopr => '>=(int8,int2)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int2', amopstrategy => '5', amopopr => '>(int8,int2)', + amopmethod => 'brin' }, + +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int4', amopstrategy => '1', amopopr => '<(int8,int4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int4', amopstrategy => '2', amopopr => '<=(int8,int4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int4', amopstrategy => '3', amopopr => '=(int8,int4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int4', amopstrategy => '4', amopopr => '>=(int8,int4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int8', + amoprighttype => 'int4', amopstrategy => '5', amopopr => '>(int8,int4)', + amopmethod => 'brin' }, + +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int2', amopstrategy => '1', amopopr => '<(int2,int2)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int2', amopstrategy => '2', amopopr => '<=(int2,int2)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int2', amopstrategy => '3', amopopr => '=(int2,int2)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int2', amopstrategy => '4', amopopr => '>=(int2,int2)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int2', amopstrategy => '5', amopopr => '>(int2,int2)', + amopmethod => 'brin' }, + +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int8', amopstrategy => '1', amopopr => '<(int2,int8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int8', amopstrategy => '2', amopopr => '<=(int2,int8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int8', amopstrategy => '3', amopopr => '=(int2,int8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int8', amopstrategy => '4', amopopr => '>=(int2,int8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int8', amopstrategy => '5', amopopr => '>(int2,int8)', + amopmethod => 'brin' }, + +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int4', amopstrategy => '1', amopopr => '<(int2,int4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int4', amopstrategy => '2', amopopr => '<=(int2,int4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int4', amopstrategy => '3', amopopr => '=(int2,int4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int4', amopstrategy => '4', amopopr => '>=(int2,int4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int2', + amoprighttype => 'int4', amopstrategy => '5', amopopr => '>(int2,int4)', + amopmethod => 'brin' }, + +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int4', amopstrategy => '1', amopopr => '<(int4,int4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int4', amopstrategy => '2', amopopr => '<=(int4,int4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int4', amopstrategy => '3', amopopr => '=(int4,int4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int4', amopstrategy => '4', amopopr => '>=(int4,int4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int4', amopstrategy => '5', amopopr => '>(int4,int4)', + amopmethod => 'brin' }, + +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int2', amopstrategy => '1', amopopr => '<(int4,int2)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int2', amopstrategy => '2', amopopr => '<=(int4,int2)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int2', amopstrategy => '3', amopopr => '=(int4,int2)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int2', amopstrategy => '4', amopopr => '>=(int4,int2)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int2', amopstrategy => '5', amopopr => '>(int4,int2)', + amopmethod => 'brin' }, + +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int8', amopstrategy => '1', amopopr => '<(int4,int8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int8', amopstrategy => '2', amopopr => '<=(int4,int8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int8', amopstrategy => '3', amopopr => '=(int4,int8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int8', amopstrategy => '4', amopopr => '>=(int4,int8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/integer_minmax_ops', amoplefttype => 'int4', + amoprighttype => 'int8', amopstrategy => '5', amopopr => '>(int4,int8)', + amopmethod => 'brin' }, + +# minmax text +{ amopfamily => 'brin/text_minmax_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '1', amopopr => '<(text,text)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/text_minmax_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '2', amopopr => '<=(text,text)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/text_minmax_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '3', amopopr => '=(text,text)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/text_minmax_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '4', amopopr => '>=(text,text)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/text_minmax_ops', amoplefttype => 'text', + amoprighttype => 'text', amopstrategy => '5', amopopr => '>(text,text)', + amopmethod => 'brin' }, + +# minmax oid +{ amopfamily => 'brin/oid_minmax_ops', amoplefttype => 'oid', + amoprighttype => 'oid', amopstrategy => '1', amopopr => '<(oid,oid)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/oid_minmax_ops', amoplefttype => 'oid', + amoprighttype => 'oid', amopstrategy => '2', amopopr => '<=(oid,oid)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/oid_minmax_ops', amoplefttype => 'oid', + amoprighttype => 'oid', amopstrategy => '3', amopopr => '=(oid,oid)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/oid_minmax_ops', amoplefttype => 'oid', + amoprighttype => 'oid', amopstrategy => '4', amopopr => '>=(oid,oid)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/oid_minmax_ops', amoplefttype => 'oid', + amoprighttype => 'oid', amopstrategy => '5', amopopr => '>(oid,oid)', + amopmethod => 'brin' }, + +# minmax tid +{ amopfamily => 'brin/tid_minmax_ops', amoplefttype => 'tid', + amoprighttype => 'tid', amopstrategy => '1', amopopr => '<(tid,tid)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/tid_minmax_ops', amoplefttype => 'tid', + amoprighttype => 'tid', amopstrategy => '2', amopopr => '<=(tid,tid)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/tid_minmax_ops', amoplefttype => 'tid', + amoprighttype => 'tid', amopstrategy => '3', amopopr => '=(tid,tid)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/tid_minmax_ops', amoplefttype => 'tid', + amoprighttype => 'tid', amopstrategy => '4', amopopr => '>=(tid,tid)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/tid_minmax_ops', amoplefttype => 'tid', + amoprighttype => 'tid', amopstrategy => '5', amopopr => '>(tid,tid)', + amopmethod => 'brin' }, + +# minmax float (float4, float8) + +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float4', + amoprighttype => 'float4', amopstrategy => '1', amopopr => '<(float4,float4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float4', + amoprighttype => 'float4', amopstrategy => '2', + amopopr => '<=(float4,float4)', amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float4', + amoprighttype => 'float4', amopstrategy => '3', amopopr => '=(float4,float4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float4', + amoprighttype => 'float4', amopstrategy => '4', + amopopr => '>=(float4,float4)', amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float4', + amoprighttype => 'float4', amopstrategy => '5', amopopr => '>(float4,float4)', + amopmethod => 'brin' }, + +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float4', + amoprighttype => 'float8', amopstrategy => '1', amopopr => '<(float4,float8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float4', + amoprighttype => 'float8', amopstrategy => '2', + amopopr => '<=(float4,float8)', amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float4', + amoprighttype => 'float8', amopstrategy => '3', amopopr => '=(float4,float8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float4', + amoprighttype => 'float8', amopstrategy => '4', + amopopr => '>=(float4,float8)', amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float4', + amoprighttype => 'float8', amopstrategy => '5', amopopr => '>(float4,float8)', + amopmethod => 'brin' }, + +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float8', + amoprighttype => 'float4', amopstrategy => '1', amopopr => '<(float8,float4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float8', + amoprighttype => 'float4', amopstrategy => '2', + amopopr => '<=(float8,float4)', amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float8', + amoprighttype => 'float4', amopstrategy => '3', amopopr => '=(float8,float4)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float8', + amoprighttype => 'float4', amopstrategy => '4', + amopopr => '>=(float8,float4)', amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float8', + amoprighttype => 'float4', amopstrategy => '5', amopopr => '>(float8,float4)', + amopmethod => 'brin' }, + +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float8', + amoprighttype => 'float8', amopstrategy => '1', amopopr => '<(float8,float8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float8', + amoprighttype => 'float8', amopstrategy => '2', + amopopr => '<=(float8,float8)', amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float8', + amoprighttype => 'float8', amopstrategy => '3', amopopr => '=(float8,float8)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float8', + amoprighttype => 'float8', amopstrategy => '4', + amopopr => '>=(float8,float8)', amopmethod => 'brin' }, +{ amopfamily => 'brin/float_minmax_ops', amoplefttype => 'float8', + amoprighttype => 'float8', amopstrategy => '5', amopopr => '>(float8,float8)', + amopmethod => 'brin' }, + +# minmax macaddr +{ amopfamily => 'brin/macaddr_minmax_ops', amoplefttype => 'macaddr', + amoprighttype => 'macaddr', amopstrategy => '1', + amopopr => '<(macaddr,macaddr)', amopmethod => 'brin' }, +{ amopfamily => 'brin/macaddr_minmax_ops', amoplefttype => 'macaddr', + amoprighttype => 'macaddr', amopstrategy => '2', + amopopr => '<=(macaddr,macaddr)', amopmethod => 'brin' }, +{ amopfamily => 'brin/macaddr_minmax_ops', amoplefttype => 'macaddr', + amoprighttype => 'macaddr', amopstrategy => '3', + amopopr => '=(macaddr,macaddr)', amopmethod => 'brin' }, +{ amopfamily => 'brin/macaddr_minmax_ops', amoplefttype => 'macaddr', + amoprighttype => 'macaddr', amopstrategy => '4', + amopopr => '>=(macaddr,macaddr)', amopmethod => 'brin' }, +{ amopfamily => 'brin/macaddr_minmax_ops', amoplefttype => 'macaddr', + amoprighttype => 'macaddr', amopstrategy => '5', + amopopr => '>(macaddr,macaddr)', amopmethod => 'brin' }, + +# minmax macaddr8 +{ amopfamily => 'brin/macaddr8_minmax_ops', amoplefttype => 'macaddr8', + amoprighttype => 'macaddr8', amopstrategy => '1', + amopopr => '<(macaddr8,macaddr8)', amopmethod => 'brin' }, +{ amopfamily => 'brin/macaddr8_minmax_ops', amoplefttype => 'macaddr8', + amoprighttype => 'macaddr8', amopstrategy => '2', + amopopr => '<=(macaddr8,macaddr8)', amopmethod => 'brin' }, +{ amopfamily => 'brin/macaddr8_minmax_ops', amoplefttype => 'macaddr8', + amoprighttype => 'macaddr8', amopstrategy => '3', + amopopr => '=(macaddr8,macaddr8)', amopmethod => 'brin' }, +{ amopfamily => 'brin/macaddr8_minmax_ops', amoplefttype => 'macaddr8', + amoprighttype => 'macaddr8', amopstrategy => '4', + amopopr => '>=(macaddr8,macaddr8)', amopmethod => 'brin' }, +{ amopfamily => 'brin/macaddr8_minmax_ops', amoplefttype => 'macaddr8', + amoprighttype => 'macaddr8', amopstrategy => '5', + amopopr => '>(macaddr8,macaddr8)', amopmethod => 'brin' }, + +# minmax inet +{ amopfamily => 'brin/network_minmax_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '1', amopopr => '<(inet,inet)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/network_minmax_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '2', amopopr => '<=(inet,inet)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/network_minmax_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '3', amopopr => '=(inet,inet)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/network_minmax_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '4', amopopr => '>=(inet,inet)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/network_minmax_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '5', amopopr => '>(inet,inet)', + amopmethod => 'brin' }, + +# inclusion inet +{ amopfamily => 'brin/network_inclusion_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '3', amopopr => '&&(inet,inet)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/network_inclusion_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '7', amopopr => '>>=(inet,inet)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/network_inclusion_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '8', amopopr => '<<=(inet,inet)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/network_inclusion_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '18', amopopr => '=(inet,inet)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/network_inclusion_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '24', amopopr => '>>(inet,inet)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/network_inclusion_ops', amoplefttype => 'inet', + amoprighttype => 'inet', amopstrategy => '26', amopopr => '<<(inet,inet)', + amopmethod => 'brin' }, + +# minmax character +{ amopfamily => 'brin/bpchar_minmax_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '1', amopopr => '<(bpchar,bpchar)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/bpchar_minmax_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '2', + amopopr => '<=(bpchar,bpchar)', amopmethod => 'brin' }, +{ amopfamily => 'brin/bpchar_minmax_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '3', amopopr => '=(bpchar,bpchar)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/bpchar_minmax_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '4', + amopopr => '>=(bpchar,bpchar)', amopmethod => 'brin' }, +{ amopfamily => 'brin/bpchar_minmax_ops', amoplefttype => 'bpchar', + amoprighttype => 'bpchar', amopstrategy => '5', amopopr => '>(bpchar,bpchar)', + amopmethod => 'brin' }, + +# minmax time without time zone +{ amopfamily => 'brin/time_minmax_ops', amoplefttype => 'time', + amoprighttype => 'time', amopstrategy => '1', amopopr => '<(time,time)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/time_minmax_ops', amoplefttype => 'time', + amoprighttype => 'time', amopstrategy => '2', amopopr => '<=(time,time)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/time_minmax_ops', amoplefttype => 'time', + amoprighttype => 'time', amopstrategy => '3', amopopr => '=(time,time)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/time_minmax_ops', amoplefttype => 'time', + amoprighttype => 'time', amopstrategy => '4', amopopr => '>=(time,time)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/time_minmax_ops', amoplefttype => 'time', + amoprighttype => 'time', amopstrategy => '5', amopopr => '>(time,time)', + amopmethod => 'brin' }, + +# minmax datetime (date, timestamp, timestamptz) + +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamp', amopstrategy => '1', + amopopr => '<(timestamp,timestamp)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamp', amopstrategy => '2', + amopopr => '<=(timestamp,timestamp)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamp', amopstrategy => '3', + amopopr => '=(timestamp,timestamp)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamp', amopstrategy => '4', + amopopr => '>=(timestamp,timestamp)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamp', amopstrategy => '5', + amopopr => '>(timestamp,timestamp)', amopmethod => 'brin' }, + +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'date', amopstrategy => '1', amopopr => '<(timestamp,date)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'date', amopstrategy => '2', amopopr => '<=(timestamp,date)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'date', amopstrategy => '3', amopopr => '=(timestamp,date)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'date', amopstrategy => '4', amopopr => '>=(timestamp,date)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'date', amopstrategy => '5', amopopr => '>(timestamp,date)', + amopmethod => 'brin' }, + +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamptz', amopstrategy => '1', + amopopr => '<(timestamp,timestamptz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamptz', amopstrategy => '2', + amopopr => '<=(timestamp,timestamptz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamptz', amopstrategy => '3', + amopopr => '=(timestamp,timestamptz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamptz', amopstrategy => '4', + amopopr => '>=(timestamp,timestamptz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamp', + amoprighttype => 'timestamptz', amopstrategy => '5', + amopopr => '>(timestamp,timestamptz)', amopmethod => 'brin' }, + +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'date', amopstrategy => '1', amopopr => '<(date,date)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'date', amopstrategy => '2', amopopr => '<=(date,date)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'date', amopstrategy => '3', amopopr => '=(date,date)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'date', amopstrategy => '4', amopopr => '>=(date,date)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'date', amopstrategy => '5', amopopr => '>(date,date)', + amopmethod => 'brin' }, + +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'timestamp', amopstrategy => '1', + amopopr => '<(date,timestamp)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'timestamp', amopstrategy => '2', + amopopr => '<=(date,timestamp)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'timestamp', amopstrategy => '3', + amopopr => '=(date,timestamp)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'timestamp', amopstrategy => '4', + amopopr => '>=(date,timestamp)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'timestamp', amopstrategy => '5', + amopopr => '>(date,timestamp)', amopmethod => 'brin' }, + +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'timestamptz', amopstrategy => '1', + amopopr => '<(date,timestamptz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'timestamptz', amopstrategy => '2', + amopopr => '<=(date,timestamptz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'timestamptz', amopstrategy => '3', + amopopr => '=(date,timestamptz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'timestamptz', amopstrategy => '4', + amopopr => '>=(date,timestamptz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'date', + amoprighttype => 'timestamptz', amopstrategy => '5', + amopopr => '>(date,timestamptz)', amopmethod => 'brin' }, + +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'date', amopstrategy => '1', + amopopr => '<(timestamptz,date)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'date', amopstrategy => '2', + amopopr => '<=(timestamptz,date)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'date', amopstrategy => '3', + amopopr => '=(timestamptz,date)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'date', amopstrategy => '4', + amopopr => '>=(timestamptz,date)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'date', amopstrategy => '5', + amopopr => '>(timestamptz,date)', amopmethod => 'brin' }, + +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamp', amopstrategy => '1', + amopopr => '<(timestamptz,timestamp)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamp', amopstrategy => '2', + amopopr => '<=(timestamptz,timestamp)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamp', amopstrategy => '3', + amopopr => '=(timestamptz,timestamp)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamp', amopstrategy => '4', + amopopr => '>=(timestamptz,timestamp)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamp', amopstrategy => '5', + amopopr => '>(timestamptz,timestamp)', amopmethod => 'brin' }, + +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamptz', amopstrategy => '1', + amopopr => '<(timestamptz,timestamptz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamptz', amopstrategy => '2', + amopopr => '<=(timestamptz,timestamptz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamptz', amopstrategy => '3', + amopopr => '=(timestamptz,timestamptz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamptz', amopstrategy => '4', + amopopr => '>=(timestamptz,timestamptz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/datetime_minmax_ops', amoplefttype => 'timestamptz', + amoprighttype => 'timestamptz', amopstrategy => '5', + amopopr => '>(timestamptz,timestamptz)', amopmethod => 'brin' }, + +# minmax interval +{ amopfamily => 'brin/interval_minmax_ops', amoplefttype => 'interval', + amoprighttype => 'interval', amopstrategy => '1', + amopopr => '<(interval,interval)', amopmethod => 'brin' }, +{ amopfamily => 'brin/interval_minmax_ops', amoplefttype => 'interval', + amoprighttype => 'interval', amopstrategy => '2', + amopopr => '<=(interval,interval)', amopmethod => 'brin' }, +{ amopfamily => 'brin/interval_minmax_ops', amoplefttype => 'interval', + amoprighttype => 'interval', amopstrategy => '3', + amopopr => '=(interval,interval)', amopmethod => 'brin' }, +{ amopfamily => 'brin/interval_minmax_ops', amoplefttype => 'interval', + amoprighttype => 'interval', amopstrategy => '4', + amopopr => '>=(interval,interval)', amopmethod => 'brin' }, +{ amopfamily => 'brin/interval_minmax_ops', amoplefttype => 'interval', + amoprighttype => 'interval', amopstrategy => '5', + amopopr => '>(interval,interval)', amopmethod => 'brin' }, + +# minmax time with time zone +{ amopfamily => 'brin/timetz_minmax_ops', amoplefttype => 'timetz', + amoprighttype => 'timetz', amopstrategy => '1', amopopr => '<(timetz,timetz)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/timetz_minmax_ops', amoplefttype => 'timetz', + amoprighttype => 'timetz', amopstrategy => '2', + amopopr => '<=(timetz,timetz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/timetz_minmax_ops', amoplefttype => 'timetz', + amoprighttype => 'timetz', amopstrategy => '3', amopopr => '=(timetz,timetz)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/timetz_minmax_ops', amoplefttype => 'timetz', + amoprighttype => 'timetz', amopstrategy => '4', + amopopr => '>=(timetz,timetz)', amopmethod => 'brin' }, +{ amopfamily => 'brin/timetz_minmax_ops', amoplefttype => 'timetz', + amoprighttype => 'timetz', amopstrategy => '5', amopopr => '>(timetz,timetz)', + amopmethod => 'brin' }, + +# minmax bit +{ amopfamily => 'brin/bit_minmax_ops', amoplefttype => 'bit', + amoprighttype => 'bit', amopstrategy => '1', amopopr => '<(bit,bit)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/bit_minmax_ops', amoplefttype => 'bit', + amoprighttype => 'bit', amopstrategy => '2', amopopr => '<=(bit,bit)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/bit_minmax_ops', amoplefttype => 'bit', + amoprighttype => 'bit', amopstrategy => '3', amopopr => '=(bit,bit)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/bit_minmax_ops', amoplefttype => 'bit', + amoprighttype => 'bit', amopstrategy => '4', amopopr => '>=(bit,bit)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/bit_minmax_ops', amoplefttype => 'bit', + amoprighttype => 'bit', amopstrategy => '5', amopopr => '>(bit,bit)', + amopmethod => 'brin' }, + +# minmax bit varying +{ amopfamily => 'brin/varbit_minmax_ops', amoplefttype => 'varbit', + amoprighttype => 'varbit', amopstrategy => '1', amopopr => '<(varbit,varbit)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/varbit_minmax_ops', amoplefttype => 'varbit', + amoprighttype => 'varbit', amopstrategy => '2', + amopopr => '<=(varbit,varbit)', amopmethod => 'brin' }, +{ amopfamily => 'brin/varbit_minmax_ops', amoplefttype => 'varbit', + amoprighttype => 'varbit', amopstrategy => '3', amopopr => '=(varbit,varbit)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/varbit_minmax_ops', amoplefttype => 'varbit', + amoprighttype => 'varbit', amopstrategy => '4', + amopopr => '>=(varbit,varbit)', amopmethod => 'brin' }, +{ amopfamily => 'brin/varbit_minmax_ops', amoplefttype => 'varbit', + amoprighttype => 'varbit', amopstrategy => '5', amopopr => '>(varbit,varbit)', + amopmethod => 'brin' }, + +# minmax numeric +{ amopfamily => 'brin/numeric_minmax_ops', amoplefttype => 'numeric', + amoprighttype => 'numeric', amopstrategy => '1', + amopopr => '<(numeric,numeric)', amopmethod => 'brin' }, +{ amopfamily => 'brin/numeric_minmax_ops', amoplefttype => 'numeric', + amoprighttype => 'numeric', amopstrategy => '2', + amopopr => '<=(numeric,numeric)', amopmethod => 'brin' }, +{ amopfamily => 'brin/numeric_minmax_ops', amoplefttype => 'numeric', + amoprighttype => 'numeric', amopstrategy => '3', + amopopr => '=(numeric,numeric)', amopmethod => 'brin' }, +{ amopfamily => 'brin/numeric_minmax_ops', amoplefttype => 'numeric', + amoprighttype => 'numeric', amopstrategy => '4', + amopopr => '>=(numeric,numeric)', amopmethod => 'brin' }, +{ amopfamily => 'brin/numeric_minmax_ops', amoplefttype => 'numeric', + amoprighttype => 'numeric', amopstrategy => '5', + amopopr => '>(numeric,numeric)', amopmethod => 'brin' }, + +# minmax uuid +{ amopfamily => 'brin/uuid_minmax_ops', amoplefttype => 'uuid', + amoprighttype => 'uuid', amopstrategy => '1', amopopr => '<(uuid,uuid)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/uuid_minmax_ops', amoplefttype => 'uuid', + amoprighttype => 'uuid', amopstrategy => '2', amopopr => '<=(uuid,uuid)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/uuid_minmax_ops', amoplefttype => 'uuid', + amoprighttype => 'uuid', amopstrategy => '3', amopopr => '=(uuid,uuid)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/uuid_minmax_ops', amoplefttype => 'uuid', + amoprighttype => 'uuid', amopstrategy => '4', amopopr => '>=(uuid,uuid)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/uuid_minmax_ops', amoplefttype => 'uuid', + amoprighttype => 'uuid', amopstrategy => '5', amopopr => '>(uuid,uuid)', + amopmethod => 'brin' }, + +# inclusion range types +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '1', + amopopr => '<<(anyrange,anyrange)', amopmethod => 'brin' }, +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '2', + amopopr => '&<(anyrange,anyrange)', amopmethod => 'brin' }, +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '3', + amopopr => '&&(anyrange,anyrange)', amopmethod => 'brin' }, +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '4', + amopopr => '&>(anyrange,anyrange)', amopmethod => 'brin' }, +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '5', + amopopr => '>>(anyrange,anyrange)', amopmethod => 'brin' }, +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '7', + amopopr => '@>(anyrange,anyrange)', amopmethod => 'brin' }, +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '8', + amopopr => '<@(anyrange,anyrange)', amopmethod => 'brin' }, +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyelement', amopstrategy => '16', + amopopr => '@>(anyrange,anyelement)', amopmethod => 'brin' }, +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '17', + amopopr => '-|-(anyrange,anyrange)', amopmethod => 'brin' }, +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '18', + amopopr => '=(anyrange,anyrange)', amopmethod => 'brin' }, +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '20', + amopopr => '<(anyrange,anyrange)', amopmethod => 'brin' }, +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '21', + amopopr => '<=(anyrange,anyrange)', amopmethod => 'brin' }, +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '22', + amopopr => '>(anyrange,anyrange)', amopmethod => 'brin' }, +{ amopfamily => 'brin/range_inclusion_ops', amoplefttype => 'anyrange', + amoprighttype => 'anyrange', amopstrategy => '23', + amopopr => '>=(anyrange,anyrange)', amopmethod => 'brin' }, + +# minmax pg_lsn +{ amopfamily => 'brin/pg_lsn_minmax_ops', amoplefttype => 'pg_lsn', + amoprighttype => 'pg_lsn', amopstrategy => '1', amopopr => '<(pg_lsn,pg_lsn)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/pg_lsn_minmax_ops', amoplefttype => 'pg_lsn', + amoprighttype => 'pg_lsn', amopstrategy => '2', + amopopr => '<=(pg_lsn,pg_lsn)', amopmethod => 'brin' }, +{ amopfamily => 'brin/pg_lsn_minmax_ops', amoplefttype => 'pg_lsn', + amoprighttype => 'pg_lsn', amopstrategy => '3', amopopr => '=(pg_lsn,pg_lsn)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/pg_lsn_minmax_ops', amoplefttype => 'pg_lsn', + amoprighttype => 'pg_lsn', amopstrategy => '4', + amopopr => '>=(pg_lsn,pg_lsn)', amopmethod => 'brin' }, +{ amopfamily => 'brin/pg_lsn_minmax_ops', amoplefttype => 'pg_lsn', + amoprighttype => 'pg_lsn', amopstrategy => '5', amopopr => '>(pg_lsn,pg_lsn)', + amopmethod => 'brin' }, + +# inclusion box +{ amopfamily => 'brin/box_inclusion_ops', amoplefttype => 'box', + amoprighttype => 'box', amopstrategy => '1', amopopr => '<<(box,box)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/box_inclusion_ops', amoplefttype => 'box', + amoprighttype => 'box', amopstrategy => '2', amopopr => '&<(box,box)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/box_inclusion_ops', amoplefttype => 'box', + amoprighttype => 'box', amopstrategy => '3', amopopr => '&&(box,box)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/box_inclusion_ops', amoplefttype => 'box', + amoprighttype => 'box', amopstrategy => '4', amopopr => '&>(box,box)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/box_inclusion_ops', amoplefttype => 'box', + amoprighttype => 'box', amopstrategy => '5', amopopr => '>>(box,box)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/box_inclusion_ops', amoplefttype => 'box', + amoprighttype => 'box', amopstrategy => '6', amopopr => '~=(box,box)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/box_inclusion_ops', amoplefttype => 'box', + amoprighttype => 'box', amopstrategy => '7', amopopr => '@>(box,box)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/box_inclusion_ops', amoplefttype => 'box', + amoprighttype => 'box', amopstrategy => '8', amopopr => '<@(box,box)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/box_inclusion_ops', amoplefttype => 'box', + amoprighttype => 'box', amopstrategy => '9', amopopr => '&<|(box,box)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/box_inclusion_ops', amoplefttype => 'box', + amoprighttype => 'box', amopstrategy => '10', amopopr => '<<|(box,box)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/box_inclusion_ops', amoplefttype => 'box', + amoprighttype => 'box', amopstrategy => '11', amopopr => '|>>(box,box)', + amopmethod => 'brin' }, +{ amopfamily => 'brin/box_inclusion_ops', amoplefttype => 'box', + amoprighttype => 'box', amopstrategy => '12', amopopr => '|&>(box,box)', + amopmethod => 'brin' }, + +# we could, but choose not to, supply entries for strategies 13 and 14 + +{ amopfamily => 'brin/box_inclusion_ops', amoplefttype => 'box', + amoprighttype => 'point', amopstrategy => '7', amopopr => '@>(box,point)', + amopmethod => 'brin' }, + +] diff --git a/src/include/catalog/pg_amop.h b/src/include/catalog/pg_amop.h index f850be490a..8e6f740887 100644 --- a/src/include/catalog/pg_amop.h +++ b/src/include/catalog/pg_amop.h @@ -1,8 +1,7 @@ /*------------------------------------------------------------------------- * * pg_amop.h - * definition of the system "amop" relation (pg_amop) - * along with the relation's initial contents. + * definition of the "access method operator" system catalog (pg_amop) * * The amop table identifies the operators associated with each index operator * family and operator class (classes are subsets of families). An associated @@ -30,14 +29,14 @@ * intentional denormalization of the catalogs to buy lookup speed. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_amop.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -45,1121 +44,53 @@ #define PG_AMOP_H #include "catalog/genbki.h" +#include "catalog/pg_amop_d.h" /* ---------------- * pg_amop definition. cpp turns this into * typedef struct FormData_pg_amop * ---------------- */ -#define AccessMethodOperatorRelationId 2602 - -CATALOG(pg_amop,2602) +CATALOG(pg_amop,2602,AccessMethodOperatorRelationId) { - Oid amopfamily; /* the index opfamily this entry is for */ - Oid amoplefttype; /* operator's left input data type */ - Oid amoprighttype; /* operator's right input data type */ - int16 amopstrategy; /* operator strategy number */ - char amoppurpose; /* is operator for 's'earch or 'o'rdering? */ - Oid amopopr; /* the operator's pg_operator OID */ - Oid amopmethod; /* the index access method this entry is for */ - Oid amopsortfamily; /* ordering opfamily OID, or 0 if search op */ -} FormData_pg_amop; - -/* allowed values of amoppurpose: */ -#define AMOP_SEARCH 's' /* operator is for search */ -#define AMOP_ORDER 'o' /* operator is for ordering */ - -/* ---------------- - * Form_pg_amop corresponds to a pointer to a tuple with - * the format of pg_amop relation. - * ---------------- - */ -typedef FormData_pg_amop *Form_pg_amop; - -/* ---------------- - * compiler constants for pg_amop - * ---------------- - */ -#define Natts_pg_amop 8 -#define Anum_pg_amop_amopfamily 1 -#define Anum_pg_amop_amoplefttype 2 -#define Anum_pg_amop_amoprighttype 3 -#define Anum_pg_amop_amopstrategy 4 -#define Anum_pg_amop_amoppurpose 5 -#define Anum_pg_amop_amopopr 6 -#define Anum_pg_amop_amopmethod 7 -#define Anum_pg_amop_amopsortfamily 8 - -/* ---------------- - * initial contents of pg_amop - * ---------------- - */ - -/* - * btree integer_ops - */ - -/* default operators int2 */ -DATA(insert ( 1976 21 21 1 s 95 403 0 )); -DATA(insert ( 1976 21 21 2 s 522 403 0 )); -DATA(insert ( 1976 21 21 3 s 94 403 0 )); -DATA(insert ( 1976 21 21 4 s 524 403 0 )); -DATA(insert ( 1976 21 21 5 s 520 403 0 )); -/* crosstype operators int24 */ -DATA(insert ( 1976 21 23 1 s 534 403 0 )); -DATA(insert ( 1976 21 23 2 s 540 403 0 )); -DATA(insert ( 1976 21 23 3 s 532 403 0 )); -DATA(insert ( 1976 21 23 4 s 542 403 0 )); -DATA(insert ( 1976 21 23 5 s 536 403 0 )); -/* crosstype operators int28 */ -DATA(insert ( 1976 21 20 1 s 1864 403 0 )); -DATA(insert ( 1976 21 20 2 s 1866 403 0 )); -DATA(insert ( 1976 21 20 3 s 1862 403 0 )); -DATA(insert ( 1976 21 20 4 s 1867 403 0 )); -DATA(insert ( 1976 21 20 5 s 1865 403 0 )); -/* default operators int4 */ -DATA(insert ( 1976 23 23 1 s 97 403 0 )); -DATA(insert ( 1976 23 23 2 s 523 403 0 )); -DATA(insert ( 1976 23 23 3 s 96 403 0 )); -DATA(insert ( 1976 23 23 4 s 525 403 0 )); -DATA(insert ( 1976 23 23 5 s 521 403 0 )); -/* crosstype operators int42 */ -DATA(insert ( 1976 23 21 1 s 535 403 0 )); -DATA(insert ( 1976 23 21 2 s 541 403 0 )); -DATA(insert ( 1976 23 21 3 s 533 403 0 )); -DATA(insert ( 1976 23 21 4 s 543 403 0 )); -DATA(insert ( 1976 23 21 5 s 537 403 0 )); -/* crosstype operators int48 */ -DATA(insert ( 1976 23 20 1 s 37 403 0 )); -DATA(insert ( 1976 23 20 2 s 80 403 0 )); -DATA(insert ( 1976 23 20 3 s 15 403 0 )); -DATA(insert ( 1976 23 20 4 s 82 403 0 )); -DATA(insert ( 1976 23 20 5 s 76 403 0 )); -/* default operators int8 */ -DATA(insert ( 1976 20 20 1 s 412 403 0 )); -DATA(insert ( 1976 20 20 2 s 414 403 0 )); -DATA(insert ( 1976 20 20 3 s 410 403 0 )); -DATA(insert ( 1976 20 20 4 s 415 403 0 )); -DATA(insert ( 1976 20 20 5 s 413 403 0 )); -/* crosstype operators int82 */ -DATA(insert ( 1976 20 21 1 s 1870 403 0 )); -DATA(insert ( 1976 20 21 2 s 1872 403 0 )); -DATA(insert ( 1976 20 21 3 s 1868 403 0 )); -DATA(insert ( 1976 20 21 4 s 1873 403 0 )); -DATA(insert ( 1976 20 21 5 s 1871 403 0 )); -/* crosstype operators int84 */ -DATA(insert ( 1976 20 23 1 s 418 403 0 )); -DATA(insert ( 1976 20 23 2 s 420 403 0 )); -DATA(insert ( 1976 20 23 3 s 416 403 0 )); -DATA(insert ( 1976 20 23 4 s 430 403 0 )); -DATA(insert ( 1976 20 23 5 s 419 403 0 )); - -/* - * btree oid_ops - */ - -DATA(insert ( 1989 26 26 1 s 609 403 0 )); -DATA(insert ( 1989 26 26 2 s 611 403 0 )); -DATA(insert ( 1989 26 26 3 s 607 403 0 )); -DATA(insert ( 1989 26 26 4 s 612 403 0 )); -DATA(insert ( 1989 26 26 5 s 610 403 0 )); - -/* - * btree tid_ops - */ - -DATA(insert ( 2789 27 27 1 s 2799 403 0 )); -DATA(insert ( 2789 27 27 2 s 2801 403 0 )); -DATA(insert ( 2789 27 27 3 s 387 403 0 )); -DATA(insert ( 2789 27 27 4 s 2802 403 0 )); -DATA(insert ( 2789 27 27 5 s 2800 403 0 )); - -/* - * btree oidvector_ops - */ - -DATA(insert ( 1991 30 30 1 s 645 403 0 )); -DATA(insert ( 1991 30 30 2 s 647 403 0 )); -DATA(insert ( 1991 30 30 3 s 649 403 0 )); -DATA(insert ( 1991 30 30 4 s 648 403 0 )); -DATA(insert ( 1991 30 30 5 s 646 403 0 )); - -/* - * btree float_ops - */ - -/* default operators float4 */ -DATA(insert ( 1970 700 700 1 s 622 403 0 )); -DATA(insert ( 1970 700 700 2 s 624 403 0 )); -DATA(insert ( 1970 700 700 3 s 620 403 0 )); -DATA(insert ( 1970 700 700 4 s 625 403 0 )); -DATA(insert ( 1970 700 700 5 s 623 403 0 )); -/* crosstype operators float48 */ -DATA(insert ( 1970 700 701 1 s 1122 403 0 )); -DATA(insert ( 1970 700 701 2 s 1124 403 0 )); -DATA(insert ( 1970 700 701 3 s 1120 403 0 )); -DATA(insert ( 1970 700 701 4 s 1125 403 0 )); -DATA(insert ( 1970 700 701 5 s 1123 403 0 )); -/* default operators float8 */ -DATA(insert ( 1970 701 701 1 s 672 403 0 )); -DATA(insert ( 1970 701 701 2 s 673 403 0 )); -DATA(insert ( 1970 701 701 3 s 670 403 0 )); -DATA(insert ( 1970 701 701 4 s 675 403 0 )); -DATA(insert ( 1970 701 701 5 s 674 403 0 )); -/* crosstype operators float84 */ -DATA(insert ( 1970 701 700 1 s 1132 403 0 )); -DATA(insert ( 1970 701 700 2 s 1134 403 0 )); -DATA(insert ( 1970 701 700 3 s 1130 403 0 )); -DATA(insert ( 1970 701 700 4 s 1135 403 0 )); -DATA(insert ( 1970 701 700 5 s 1133 403 0 )); - -/* - * btree char_ops - */ - -DATA(insert ( 429 18 18 1 s 631 403 0 )); -DATA(insert ( 429 18 18 2 s 632 403 0 )); -DATA(insert ( 429 18 18 3 s 92 403 0 )); -DATA(insert ( 429 18 18 4 s 634 403 0 )); -DATA(insert ( 429 18 18 5 s 633 403 0 )); - -/* - * btree name_ops - */ - -DATA(insert ( 1986 19 19 1 s 660 403 0 )); -DATA(insert ( 1986 19 19 2 s 661 403 0 )); -DATA(insert ( 1986 19 19 3 s 93 403 0 )); -DATA(insert ( 1986 19 19 4 s 663 403 0 )); -DATA(insert ( 1986 19 19 5 s 662 403 0 )); - -/* - * btree text_ops - */ - -DATA(insert ( 1994 25 25 1 s 664 403 0 )); -DATA(insert ( 1994 25 25 2 s 665 403 0 )); -DATA(insert ( 1994 25 25 3 s 98 403 0 )); -DATA(insert ( 1994 25 25 4 s 667 403 0 )); -DATA(insert ( 1994 25 25 5 s 666 403 0 )); - -/* - * btree bpchar_ops - */ - -DATA(insert ( 426 1042 1042 1 s 1058 403 0 )); -DATA(insert ( 426 1042 1042 2 s 1059 403 0 )); -DATA(insert ( 426 1042 1042 3 s 1054 403 0 )); -DATA(insert ( 426 1042 1042 4 s 1061 403 0 )); -DATA(insert ( 426 1042 1042 5 s 1060 403 0 )); - -/* - * btree bytea_ops - */ - -DATA(insert ( 428 17 17 1 s 1957 403 0 )); -DATA(insert ( 428 17 17 2 s 1958 403 0 )); -DATA(insert ( 428 17 17 3 s 1955 403 0 )); -DATA(insert ( 428 17 17 4 s 1960 403 0 )); -DATA(insert ( 428 17 17 5 s 1959 403 0 )); - -/* - * btree abstime_ops - */ - -DATA(insert ( 421 702 702 1 s 562 403 0 )); -DATA(insert ( 421 702 702 2 s 564 403 0 )); -DATA(insert ( 421 702 702 3 s 560 403 0 )); -DATA(insert ( 421 702 702 4 s 565 403 0 )); -DATA(insert ( 421 702 702 5 s 563 403 0 )); - -/* - * btree datetime_ops - */ - -/* default operators date */ -DATA(insert ( 434 1082 1082 1 s 1095 403 0 )); -DATA(insert ( 434 1082 1082 2 s 1096 403 0 )); -DATA(insert ( 434 1082 1082 3 s 1093 403 0 )); -DATA(insert ( 434 1082 1082 4 s 1098 403 0 )); -DATA(insert ( 434 1082 1082 5 s 1097 403 0 )); -/* crosstype operators vs timestamp */ -DATA(insert ( 434 1082 1114 1 s 2345 403 0 )); -DATA(insert ( 434 1082 1114 2 s 2346 403 0 )); -DATA(insert ( 434 1082 1114 3 s 2347 403 0 )); -DATA(insert ( 434 1082 1114 4 s 2348 403 0 )); -DATA(insert ( 434 1082 1114 5 s 2349 403 0 )); -/* crosstype operators vs timestamptz */ -DATA(insert ( 434 1082 1184 1 s 2358 403 0 )); -DATA(insert ( 434 1082 1184 2 s 2359 403 0 )); -DATA(insert ( 434 1082 1184 3 s 2360 403 0 )); -DATA(insert ( 434 1082 1184 4 s 2361 403 0 )); -DATA(insert ( 434 1082 1184 5 s 2362 403 0 )); -/* default operators timestamp */ -DATA(insert ( 434 1114 1114 1 s 2062 403 0 )); -DATA(insert ( 434 1114 1114 2 s 2063 403 0 )); -DATA(insert ( 434 1114 1114 3 s 2060 403 0 )); -DATA(insert ( 434 1114 1114 4 s 2065 403 0 )); -DATA(insert ( 434 1114 1114 5 s 2064 403 0 )); -/* crosstype operators vs date */ -DATA(insert ( 434 1114 1082 1 s 2371 403 0 )); -DATA(insert ( 434 1114 1082 2 s 2372 403 0 )); -DATA(insert ( 434 1114 1082 3 s 2373 403 0 )); -DATA(insert ( 434 1114 1082 4 s 2374 403 0 )); -DATA(insert ( 434 1114 1082 5 s 2375 403 0 )); -/* crosstype operators vs timestamptz */ -DATA(insert ( 434 1114 1184 1 s 2534 403 0 )); -DATA(insert ( 434 1114 1184 2 s 2535 403 0 )); -DATA(insert ( 434 1114 1184 3 s 2536 403 0 )); -DATA(insert ( 434 1114 1184 4 s 2537 403 0 )); -DATA(insert ( 434 1114 1184 5 s 2538 403 0 )); -/* default operators timestamptz */ -DATA(insert ( 434 1184 1184 1 s 1322 403 0 )); -DATA(insert ( 434 1184 1184 2 s 1323 403 0 )); -DATA(insert ( 434 1184 1184 3 s 1320 403 0 )); -DATA(insert ( 434 1184 1184 4 s 1325 403 0 )); -DATA(insert ( 434 1184 1184 5 s 1324 403 0 )); -/* crosstype operators vs date */ -DATA(insert ( 434 1184 1082 1 s 2384 403 0 )); -DATA(insert ( 434 1184 1082 2 s 2385 403 0 )); -DATA(insert ( 434 1184 1082 3 s 2386 403 0 )); -DATA(insert ( 434 1184 1082 4 s 2387 403 0 )); -DATA(insert ( 434 1184 1082 5 s 2388 403 0 )); -/* crosstype operators vs timestamp */ -DATA(insert ( 434 1184 1114 1 s 2540 403 0 )); -DATA(insert ( 434 1184 1114 2 s 2541 403 0 )); -DATA(insert ( 434 1184 1114 3 s 2542 403 0 )); -DATA(insert ( 434 1184 1114 4 s 2543 403 0 )); -DATA(insert ( 434 1184 1114 5 s 2544 403 0 )); - -/* - * btree time_ops - */ - -DATA(insert ( 1996 1083 1083 1 s 1110 403 0 )); -DATA(insert ( 1996 1083 1083 2 s 1111 403 0 )); -DATA(insert ( 1996 1083 1083 3 s 1108 403 0 )); -DATA(insert ( 1996 1083 1083 4 s 1113 403 0 )); -DATA(insert ( 1996 1083 1083 5 s 1112 403 0 )); - -/* - * btree timetz_ops - */ - -DATA(insert ( 2000 1266 1266 1 s 1552 403 0 )); -DATA(insert ( 2000 1266 1266 2 s 1553 403 0 )); -DATA(insert ( 2000 1266 1266 3 s 1550 403 0 )); -DATA(insert ( 2000 1266 1266 4 s 1555 403 0 )); -DATA(insert ( 2000 1266 1266 5 s 1554 403 0 )); - -/* - * btree interval_ops - */ - -DATA(insert ( 1982 1186 1186 1 s 1332 403 0 )); -DATA(insert ( 1982 1186 1186 2 s 1333 403 0 )); -DATA(insert ( 1982 1186 1186 3 s 1330 403 0 )); -DATA(insert ( 1982 1186 1186 4 s 1335 403 0 )); -DATA(insert ( 1982 1186 1186 5 s 1334 403 0 )); - -/* - * btree macaddr - */ - -DATA(insert ( 1984 829 829 1 s 1222 403 0 )); -DATA(insert ( 1984 829 829 2 s 1223 403 0 )); -DATA(insert ( 1984 829 829 3 s 1220 403 0 )); -DATA(insert ( 1984 829 829 4 s 1225 403 0 )); -DATA(insert ( 1984 829 829 5 s 1224 403 0 )); - -/* - * btree macaddr8 - */ - -DATA(insert ( 3371 774 774 1 s 3364 403 0 )); -DATA(insert ( 3371 774 774 2 s 3365 403 0 )); -DATA(insert ( 3371 774 774 3 s 3362 403 0 )); -DATA(insert ( 3371 774 774 4 s 3367 403 0 )); -DATA(insert ( 3371 774 774 5 s 3366 403 0 )); - -/* - * btree network - */ - -DATA(insert ( 1974 869 869 1 s 1203 403 0 )); -DATA(insert ( 1974 869 869 2 s 1204 403 0 )); -DATA(insert ( 1974 869 869 3 s 1201 403 0 )); -DATA(insert ( 1974 869 869 4 s 1206 403 0 )); -DATA(insert ( 1974 869 869 5 s 1205 403 0 )); - -/* - * btree numeric - */ - -DATA(insert ( 1988 1700 1700 1 s 1754 403 0 )); -DATA(insert ( 1988 1700 1700 2 s 1755 403 0 )); -DATA(insert ( 1988 1700 1700 3 s 1752 403 0 )); -DATA(insert ( 1988 1700 1700 4 s 1757 403 0 )); -DATA(insert ( 1988 1700 1700 5 s 1756 403 0 )); - -/* - * btree bool - */ - -DATA(insert ( 424 16 16 1 s 58 403 0 )); -DATA(insert ( 424 16 16 2 s 1694 403 0 )); -DATA(insert ( 424 16 16 3 s 91 403 0 )); -DATA(insert ( 424 16 16 4 s 1695 403 0 )); -DATA(insert ( 424 16 16 5 s 59 403 0 )); - -/* - * btree bit - */ - -DATA(insert ( 423 1560 1560 1 s 1786 403 0 )); -DATA(insert ( 423 1560 1560 2 s 1788 403 0 )); -DATA(insert ( 423 1560 1560 3 s 1784 403 0 )); -DATA(insert ( 423 1560 1560 4 s 1789 403 0 )); -DATA(insert ( 423 1560 1560 5 s 1787 403 0 )); - -/* - * btree varbit - */ - -DATA(insert ( 2002 1562 1562 1 s 1806 403 0 )); -DATA(insert ( 2002 1562 1562 2 s 1808 403 0 )); -DATA(insert ( 2002 1562 1562 3 s 1804 403 0 )); -DATA(insert ( 2002 1562 1562 4 s 1809 403 0 )); -DATA(insert ( 2002 1562 1562 5 s 1807 403 0 )); - -/* - * btree text pattern - */ + /* the index opfamily this entry is for */ + Oid amopfamily BKI_LOOKUP(pg_opfamily); -DATA(insert ( 2095 25 25 1 s 2314 403 0 )); -DATA(insert ( 2095 25 25 2 s 2315 403 0 )); -DATA(insert ( 2095 25 25 3 s 98 403 0 )); -DATA(insert ( 2095 25 25 4 s 2317 403 0 )); -DATA(insert ( 2095 25 25 5 s 2318 403 0 )); + /* operator's left input data type */ + Oid amoplefttype BKI_LOOKUP(pg_type); -/* - * btree bpchar pattern - */ - -DATA(insert ( 2097 1042 1042 1 s 2326 403 0 )); -DATA(insert ( 2097 1042 1042 2 s 2327 403 0 )); -DATA(insert ( 2097 1042 1042 3 s 1054 403 0 )); -DATA(insert ( 2097 1042 1042 4 s 2329 403 0 )); -DATA(insert ( 2097 1042 1042 5 s 2330 403 0 )); - -/* - * btree money_ops - */ - -DATA(insert ( 2099 790 790 1 s 902 403 0 )); -DATA(insert ( 2099 790 790 2 s 904 403 0 )); -DATA(insert ( 2099 790 790 3 s 900 403 0 )); -DATA(insert ( 2099 790 790 4 s 905 403 0 )); -DATA(insert ( 2099 790 790 5 s 903 403 0 )); - -/* - * btree reltime_ops - */ - -DATA(insert ( 2233 703 703 1 s 568 403 0 )); -DATA(insert ( 2233 703 703 2 s 570 403 0 )); -DATA(insert ( 2233 703 703 3 s 566 403 0 )); -DATA(insert ( 2233 703 703 4 s 571 403 0 )); -DATA(insert ( 2233 703 703 5 s 569 403 0 )); - -/* - * btree tinterval_ops - */ + /* operator's right input data type */ + Oid amoprighttype BKI_LOOKUP(pg_type); -DATA(insert ( 2234 704 704 1 s 813 403 0 )); -DATA(insert ( 2234 704 704 2 s 815 403 0 )); -DATA(insert ( 2234 704 704 3 s 811 403 0 )); -DATA(insert ( 2234 704 704 4 s 816 403 0 )); -DATA(insert ( 2234 704 704 5 s 814 403 0 )); + /* operator strategy number */ + int16 amopstrategy; -/* - * btree array_ops - */ + /* is operator for 's'earch or 'o'rdering? */ + char amoppurpose BKI_DEFAULT(s); -DATA(insert ( 397 2277 2277 1 s 1072 403 0 )); -DATA(insert ( 397 2277 2277 2 s 1074 403 0 )); -DATA(insert ( 397 2277 2277 3 s 1070 403 0 )); -DATA(insert ( 397 2277 2277 4 s 1075 403 0 )); -DATA(insert ( 397 2277 2277 5 s 1073 403 0 )); + /* the operator's pg_operator OID */ + Oid amopopr BKI_LOOKUP(pg_operator); -/* - * btree record_ops - */ + /* the index access method this entry is for */ + Oid amopmethod BKI_LOOKUP(pg_am); -DATA(insert ( 2994 2249 2249 1 s 2990 403 0 )); -DATA(insert ( 2994 2249 2249 2 s 2992 403 0 )); -DATA(insert ( 2994 2249 2249 3 s 2988 403 0 )); -DATA(insert ( 2994 2249 2249 4 s 2993 403 0 )); -DATA(insert ( 2994 2249 2249 5 s 2991 403 0 )); - -/* - * btree record_image_ops - */ - -DATA(insert ( 3194 2249 2249 1 s 3190 403 0 )); -DATA(insert ( 3194 2249 2249 2 s 3192 403 0 )); -DATA(insert ( 3194 2249 2249 3 s 3188 403 0 )); -DATA(insert ( 3194 2249 2249 4 s 3193 403 0 )); -DATA(insert ( 3194 2249 2249 5 s 3191 403 0 )); - -/* - * btree uuid_ops - */ - -DATA(insert ( 2968 2950 2950 1 s 2974 403 0 )); -DATA(insert ( 2968 2950 2950 2 s 2976 403 0 )); -DATA(insert ( 2968 2950 2950 3 s 2972 403 0 )); -DATA(insert ( 2968 2950 2950 4 s 2977 403 0 )); -DATA(insert ( 2968 2950 2950 5 s 2975 403 0 )); - -/* - * btree pg_lsn_ops - */ - -DATA(insert ( 3253 3220 3220 1 s 3224 403 0 )); -DATA(insert ( 3253 3220 3220 2 s 3226 403 0 )); -DATA(insert ( 3253 3220 3220 3 s 3222 403 0 )); -DATA(insert ( 3253 3220 3220 4 s 3227 403 0 )); -DATA(insert ( 3253 3220 3220 5 s 3225 403 0 )); - -/* - * hash index _ops - */ - -/* bpchar_ops */ -DATA(insert ( 427 1042 1042 1 s 1054 405 0 )); -/* char_ops */ -DATA(insert ( 431 18 18 1 s 92 405 0 )); -/* date_ops */ -DATA(insert ( 435 1082 1082 1 s 1093 405 0 )); -/* float_ops */ -DATA(insert ( 1971 700 700 1 s 620 405 0 )); -DATA(insert ( 1971 701 701 1 s 670 405 0 )); -DATA(insert ( 1971 700 701 1 s 1120 405 0 )); -DATA(insert ( 1971 701 700 1 s 1130 405 0 )); -/* network_ops */ -DATA(insert ( 1975 869 869 1 s 1201 405 0 )); -/* integer_ops */ -DATA(insert ( 1977 21 21 1 s 94 405 0 )); -DATA(insert ( 1977 23 23 1 s 96 405 0 )); -DATA(insert ( 1977 20 20 1 s 410 405 0 )); -DATA(insert ( 1977 21 23 1 s 532 405 0 )); -DATA(insert ( 1977 21 20 1 s 1862 405 0 )); -DATA(insert ( 1977 23 21 1 s 533 405 0 )); -DATA(insert ( 1977 23 20 1 s 15 405 0 )); -DATA(insert ( 1977 20 21 1 s 1868 405 0 )); -DATA(insert ( 1977 20 23 1 s 416 405 0 )); -/* interval_ops */ -DATA(insert ( 1983 1186 1186 1 s 1330 405 0 )); -/* macaddr_ops */ -DATA(insert ( 1985 829 829 1 s 1220 405 0 )); -/* macaddr8_ops */ -DATA(insert ( 3372 774 774 1 s 3362 405 0 )); -/* name_ops */ -DATA(insert ( 1987 19 19 1 s 93 405 0 )); -/* oid_ops */ -DATA(insert ( 1990 26 26 1 s 607 405 0 )); -/* oidvector_ops */ -DATA(insert ( 1992 30 30 1 s 649 405 0 )); -/* text_ops */ -DATA(insert ( 1995 25 25 1 s 98 405 0 )); -/* time_ops */ -DATA(insert ( 1997 1083 1083 1 s 1108 405 0 )); -/* timestamptz_ops */ -DATA(insert ( 1999 1184 1184 1 s 1320 405 0 )); -/* timetz_ops */ -DATA(insert ( 2001 1266 1266 1 s 1550 405 0 )); -/* timestamp_ops */ -DATA(insert ( 2040 1114 1114 1 s 2060 405 0 )); -/* bool_ops */ -DATA(insert ( 2222 16 16 1 s 91 405 0 )); -/* bytea_ops */ -DATA(insert ( 2223 17 17 1 s 1955 405 0 )); -/* xid_ops */ -DATA(insert ( 2225 28 28 1 s 352 405 0 )); -/* cid_ops */ -DATA(insert ( 2226 29 29 1 s 385 405 0 )); -/* abstime_ops */ -DATA(insert ( 2227 702 702 1 s 560 405 0 )); -/* reltime_ops */ -DATA(insert ( 2228 703 703 1 s 566 405 0 )); -/* text_pattern_ops */ -DATA(insert ( 2229 25 25 1 s 98 405 0 )); -/* bpchar_pattern_ops */ -DATA(insert ( 2231 1042 1042 1 s 1054 405 0 )); -/* aclitem_ops */ -DATA(insert ( 2235 1033 1033 1 s 974 405 0 )); -/* uuid_ops */ -DATA(insert ( 2969 2950 2950 1 s 2972 405 0 )); -/* pg_lsn_ops */ -DATA(insert ( 3254 3220 3220 1 s 3222 405 0 )); -/* numeric_ops */ -DATA(insert ( 1998 1700 1700 1 s 1752 405 0 )); -/* array_ops */ -DATA(insert ( 627 2277 2277 1 s 1070 405 0 )); - - -/* - * gist box_ops - */ - -DATA(insert ( 2593 603 603 1 s 493 783 0 )); -DATA(insert ( 2593 603 603 2 s 494 783 0 )); -DATA(insert ( 2593 603 603 3 s 500 783 0 )); -DATA(insert ( 2593 603 603 4 s 495 783 0 )); -DATA(insert ( 2593 603 603 5 s 496 783 0 )); -DATA(insert ( 2593 603 603 6 s 499 783 0 )); -DATA(insert ( 2593 603 603 7 s 498 783 0 )); -DATA(insert ( 2593 603 603 8 s 497 783 0 )); -DATA(insert ( 2593 603 603 9 s 2571 783 0 )); -DATA(insert ( 2593 603 603 10 s 2570 783 0 )); -DATA(insert ( 2593 603 603 11 s 2573 783 0 )); -DATA(insert ( 2593 603 603 12 s 2572 783 0 )); -DATA(insert ( 2593 603 603 13 s 2863 783 0 )); -DATA(insert ( 2593 603 603 14 s 2862 783 0 )); - -/* - * gist point_ops - */ -DATA(insert ( 1029 600 600 11 s 506 783 0 )); -DATA(insert ( 1029 600 600 1 s 507 783 0 )); -DATA(insert ( 1029 600 600 5 s 508 783 0 )); -DATA(insert ( 1029 600 600 10 s 509 783 0 )); -DATA(insert ( 1029 600 600 6 s 510 783 0 )); -DATA(insert ( 1029 600 600 15 o 517 783 1970 )); -DATA(insert ( 1029 600 603 28 s 511 783 0 )); -DATA(insert ( 1029 600 604 48 s 756 783 0 )); -DATA(insert ( 1029 600 718 68 s 758 783 0 )); - - -/* - * gist poly_ops (supports polygons) - */ - -DATA(insert ( 2594 604 604 1 s 485 783 0 )); -DATA(insert ( 2594 604 604 2 s 486 783 0 )); -DATA(insert ( 2594 604 604 3 s 492 783 0 )); -DATA(insert ( 2594 604 604 4 s 487 783 0 )); -DATA(insert ( 2594 604 604 5 s 488 783 0 )); -DATA(insert ( 2594 604 604 6 s 491 783 0 )); -DATA(insert ( 2594 604 604 7 s 490 783 0 )); -DATA(insert ( 2594 604 604 8 s 489 783 0 )); -DATA(insert ( 2594 604 604 9 s 2575 783 0 )); -DATA(insert ( 2594 604 604 10 s 2574 783 0 )); -DATA(insert ( 2594 604 604 11 s 2577 783 0 )); -DATA(insert ( 2594 604 604 12 s 2576 783 0 )); -DATA(insert ( 2594 604 604 13 s 2861 783 0 )); -DATA(insert ( 2594 604 604 14 s 2860 783 0 )); -DATA(insert ( 2594 604 600 15 o 3289 783 1970 )); - -/* - * gist circle_ops - */ - -DATA(insert ( 2595 718 718 1 s 1506 783 0 )); -DATA(insert ( 2595 718 718 2 s 1507 783 0 )); -DATA(insert ( 2595 718 718 3 s 1513 783 0 )); -DATA(insert ( 2595 718 718 4 s 1508 783 0 )); -DATA(insert ( 2595 718 718 5 s 1509 783 0 )); -DATA(insert ( 2595 718 718 6 s 1512 783 0 )); -DATA(insert ( 2595 718 718 7 s 1511 783 0 )); -DATA(insert ( 2595 718 718 8 s 1510 783 0 )); -DATA(insert ( 2595 718 718 9 s 2589 783 0 )); -DATA(insert ( 2595 718 718 10 s 1515 783 0 )); -DATA(insert ( 2595 718 718 11 s 1514 783 0 )); -DATA(insert ( 2595 718 718 12 s 2590 783 0 )); -DATA(insert ( 2595 718 718 13 s 2865 783 0 )); -DATA(insert ( 2595 718 718 14 s 2864 783 0 )); -DATA(insert ( 2595 718 600 15 o 3291 783 1970 )); - -/* - * gin array_ops - */ -DATA(insert ( 2745 2277 2277 1 s 2750 2742 0 )); -DATA(insert ( 2745 2277 2277 2 s 2751 2742 0 )); -DATA(insert ( 2745 2277 2277 3 s 2752 2742 0 )); -DATA(insert ( 2745 2277 2277 4 s 1070 2742 0 )); - -/* - * btree enum_ops - */ -DATA(insert ( 3522 3500 3500 1 s 3518 403 0 )); -DATA(insert ( 3522 3500 3500 2 s 3520 403 0 )); -DATA(insert ( 3522 3500 3500 3 s 3516 403 0 )); -DATA(insert ( 3522 3500 3500 4 s 3521 403 0 )); -DATA(insert ( 3522 3500 3500 5 s 3519 403 0 )); - -/* - * hash enum_ops - */ -DATA(insert ( 3523 3500 3500 1 s 3516 405 0 )); - -/* - * btree tsvector_ops - */ -DATA(insert ( 3626 3614 3614 1 s 3627 403 0 )); -DATA(insert ( 3626 3614 3614 2 s 3628 403 0 )); -DATA(insert ( 3626 3614 3614 3 s 3629 403 0 )); -DATA(insert ( 3626 3614 3614 4 s 3631 403 0 )); -DATA(insert ( 3626 3614 3614 5 s 3632 403 0 )); - -/* - * GiST tsvector_ops - */ -DATA(insert ( 3655 3614 3615 1 s 3636 783 0 )); - -/* - * GIN tsvector_ops - */ -DATA(insert ( 3659 3614 3615 1 s 3636 2742 0 )); -DATA(insert ( 3659 3614 3615 2 s 3660 2742 0 )); - -/* - * btree tsquery_ops - */ -DATA(insert ( 3683 3615 3615 1 s 3674 403 0 )); -DATA(insert ( 3683 3615 3615 2 s 3675 403 0 )); -DATA(insert ( 3683 3615 3615 3 s 3676 403 0 )); -DATA(insert ( 3683 3615 3615 4 s 3678 403 0 )); -DATA(insert ( 3683 3615 3615 5 s 3679 403 0 )); - -/* - * GiST tsquery_ops - */ -DATA(insert ( 3702 3615 3615 7 s 3693 783 0 )); -DATA(insert ( 3702 3615 3615 8 s 3694 783 0 )); - -/* - * btree range_ops - */ -DATA(insert ( 3901 3831 3831 1 s 3884 403 0 )); -DATA(insert ( 3901 3831 3831 2 s 3885 403 0 )); -DATA(insert ( 3901 3831 3831 3 s 3882 403 0 )); -DATA(insert ( 3901 3831 3831 4 s 3886 403 0 )); -DATA(insert ( 3901 3831 3831 5 s 3887 403 0 )); - -/* - * hash range_ops - */ -DATA(insert ( 3903 3831 3831 1 s 3882 405 0 )); - -/* - * GiST range_ops - */ -DATA(insert ( 3919 3831 3831 1 s 3893 783 0 )); -DATA(insert ( 3919 3831 3831 2 s 3895 783 0 )); -DATA(insert ( 3919 3831 3831 3 s 3888 783 0 )); -DATA(insert ( 3919 3831 3831 4 s 3896 783 0 )); -DATA(insert ( 3919 3831 3831 5 s 3894 783 0 )); -DATA(insert ( 3919 3831 3831 6 s 3897 783 0 )); -DATA(insert ( 3919 3831 3831 7 s 3890 783 0 )); -DATA(insert ( 3919 3831 3831 8 s 3892 783 0 )); -DATA(insert ( 3919 3831 2283 16 s 3889 783 0 )); -DATA(insert ( 3919 3831 3831 18 s 3882 783 0 )); - -/* - * SP-GiST quad_point_ops - */ -DATA(insert ( 4015 600 600 11 s 506 4000 0 )); -DATA(insert ( 4015 600 600 1 s 507 4000 0 )); -DATA(insert ( 4015 600 600 5 s 508 4000 0 )); -DATA(insert ( 4015 600 600 10 s 509 4000 0 )); -DATA(insert ( 4015 600 600 6 s 510 4000 0 )); -DATA(insert ( 4015 600 603 8 s 511 4000 0 )); - -/* - * SP-GiST kd_point_ops - */ -DATA(insert ( 4016 600 600 11 s 506 4000 0 )); -DATA(insert ( 4016 600 600 1 s 507 4000 0 )); -DATA(insert ( 4016 600 600 5 s 508 4000 0 )); -DATA(insert ( 4016 600 600 10 s 509 4000 0 )); -DATA(insert ( 4016 600 600 6 s 510 4000 0 )); -DATA(insert ( 4016 600 603 8 s 511 4000 0 )); - -/* - * SP-GiST text_ops - */ -DATA(insert ( 4017 25 25 1 s 2314 4000 0 )); -DATA(insert ( 4017 25 25 2 s 2315 4000 0 )); -DATA(insert ( 4017 25 25 3 s 98 4000 0 )); -DATA(insert ( 4017 25 25 4 s 2317 4000 0 )); -DATA(insert ( 4017 25 25 5 s 2318 4000 0 )); -DATA(insert ( 4017 25 25 11 s 664 4000 0 )); -DATA(insert ( 4017 25 25 12 s 665 4000 0 )); -DATA(insert ( 4017 25 25 14 s 667 4000 0 )); -DATA(insert ( 4017 25 25 15 s 666 4000 0 )); - -/* - * btree jsonb_ops - */ -DATA(insert ( 4033 3802 3802 1 s 3242 403 0 )); -DATA(insert ( 4033 3802 3802 2 s 3244 403 0 )); -DATA(insert ( 4033 3802 3802 3 s 3240 403 0 )); -DATA(insert ( 4033 3802 3802 4 s 3245 403 0 )); -DATA(insert ( 4033 3802 3802 5 s 3243 403 0 )); - -/* - * hash jsonb_ops - */ -DATA(insert ( 4034 3802 3802 1 s 3240 405 0 )); - -/* - * GIN jsonb_ops - */ -DATA(insert ( 4036 3802 3802 7 s 3246 2742 0 )); -DATA(insert ( 4036 3802 25 9 s 3247 2742 0 )); -DATA(insert ( 4036 3802 1009 10 s 3248 2742 0 )); -DATA(insert ( 4036 3802 1009 11 s 3249 2742 0 )); - -/* - * GIN jsonb_path_ops - */ -DATA(insert ( 4037 3802 3802 7 s 3246 2742 0 )); - -/* - * SP-GiST range_ops - */ -DATA(insert ( 3474 3831 3831 1 s 3893 4000 0 )); -DATA(insert ( 3474 3831 3831 2 s 3895 4000 0 )); -DATA(insert ( 3474 3831 3831 3 s 3888 4000 0 )); -DATA(insert ( 3474 3831 3831 4 s 3896 4000 0 )); -DATA(insert ( 3474 3831 3831 5 s 3894 4000 0 )); -DATA(insert ( 3474 3831 3831 6 s 3897 4000 0 )); -DATA(insert ( 3474 3831 3831 7 s 3890 4000 0 )); -DATA(insert ( 3474 3831 3831 8 s 3892 4000 0 )); -DATA(insert ( 3474 3831 2283 16 s 3889 4000 0 )); -DATA(insert ( 3474 3831 3831 18 s 3882 4000 0 )); - -/* - * SP-GiST box_ops - */ -DATA(insert ( 5000 603 603 1 s 493 4000 0 )); -DATA(insert ( 5000 603 603 2 s 494 4000 0 )); -DATA(insert ( 5000 603 603 3 s 500 4000 0 )); -DATA(insert ( 5000 603 603 4 s 495 4000 0 )); -DATA(insert ( 5000 603 603 5 s 496 4000 0 )); -DATA(insert ( 5000 603 603 6 s 499 4000 0 )); -DATA(insert ( 5000 603 603 7 s 498 4000 0 )); -DATA(insert ( 5000 603 603 8 s 497 4000 0 )); -DATA(insert ( 5000 603 603 9 s 2571 4000 0 )); -DATA(insert ( 5000 603 603 10 s 2570 4000 0 )); -DATA(insert ( 5000 603 603 11 s 2573 4000 0 )); -DATA(insert ( 5000 603 603 12 s 2572 4000 0 )); - -/* - * GiST inet_ops - */ -DATA(insert ( 3550 869 869 3 s 3552 783 0 )); -DATA(insert ( 3550 869 869 18 s 1201 783 0 )); -DATA(insert ( 3550 869 869 19 s 1202 783 0 )); -DATA(insert ( 3550 869 869 20 s 1203 783 0 )); -DATA(insert ( 3550 869 869 21 s 1204 783 0 )); -DATA(insert ( 3550 869 869 22 s 1205 783 0 )); -DATA(insert ( 3550 869 869 23 s 1206 783 0 )); -DATA(insert ( 3550 869 869 24 s 931 783 0 )); -DATA(insert ( 3550 869 869 25 s 932 783 0 )); -DATA(insert ( 3550 869 869 26 s 933 783 0 )); -DATA(insert ( 3550 869 869 27 s 934 783 0 )); + /* ordering opfamily OID, or 0 if search op */ + Oid amopsortfamily BKI_DEFAULT(0) BKI_LOOKUP(pg_opfamily); +} FormData_pg_amop; -/* - * SP-GiST inet_ops +/* ---------------- + * Form_pg_amop corresponds to a pointer to a tuple with + * the format of pg_amop relation. + * ---------------- */ -DATA(insert ( 3794 869 869 3 s 3552 4000 0 )); -DATA(insert ( 3794 869 869 18 s 1201 4000 0 )); -DATA(insert ( 3794 869 869 19 s 1202 4000 0 )); -DATA(insert ( 3794 869 869 20 s 1203 4000 0 )); -DATA(insert ( 3794 869 869 21 s 1204 4000 0 )); -DATA(insert ( 3794 869 869 22 s 1205 4000 0 )); -DATA(insert ( 3794 869 869 23 s 1206 4000 0 )); -DATA(insert ( 3794 869 869 24 s 931 4000 0 )); -DATA(insert ( 3794 869 869 25 s 932 4000 0 )); -DATA(insert ( 3794 869 869 26 s 933 4000 0 )); -DATA(insert ( 3794 869 869 27 s 934 4000 0 )); - -/* BRIN opclasses */ -/* minmax bytea */ -DATA(insert ( 4064 17 17 1 s 1957 3580 0 )); -DATA(insert ( 4064 17 17 2 s 1958 3580 0 )); -DATA(insert ( 4064 17 17 3 s 1955 3580 0 )); -DATA(insert ( 4064 17 17 4 s 1960 3580 0 )); -DATA(insert ( 4064 17 17 5 s 1959 3580 0 )); -/* minmax "char" */ -DATA(insert ( 4062 18 18 1 s 631 3580 0 )); -DATA(insert ( 4062 18 18 2 s 632 3580 0 )); -DATA(insert ( 4062 18 18 3 s 92 3580 0 )); -DATA(insert ( 4062 18 18 4 s 634 3580 0 )); -DATA(insert ( 4062 18 18 5 s 633 3580 0 )); -/* minmax name */ -DATA(insert ( 4065 19 19 1 s 660 3580 0 )); -DATA(insert ( 4065 19 19 2 s 661 3580 0 )); -DATA(insert ( 4065 19 19 3 s 93 3580 0 )); -DATA(insert ( 4065 19 19 4 s 663 3580 0 )); -DATA(insert ( 4065 19 19 5 s 662 3580 0 )); -/* minmax integer */ -DATA(insert ( 4054 20 20 1 s 412 3580 0 )); -DATA(insert ( 4054 20 20 2 s 414 3580 0 )); -DATA(insert ( 4054 20 20 3 s 410 3580 0 )); -DATA(insert ( 4054 20 20 4 s 415 3580 0 )); -DATA(insert ( 4054 20 20 5 s 413 3580 0 )); -DATA(insert ( 4054 20 21 1 s 1870 3580 0 )); -DATA(insert ( 4054 20 21 2 s 1872 3580 0 )); -DATA(insert ( 4054 20 21 3 s 1868 3580 0 )); -DATA(insert ( 4054 20 21 4 s 1873 3580 0 )); -DATA(insert ( 4054 20 21 5 s 1871 3580 0 )); -DATA(insert ( 4054 20 23 1 s 418 3580 0 )); -DATA(insert ( 4054 20 23 2 s 420 3580 0 )); -DATA(insert ( 4054 20 23 3 s 416 3580 0 )); -DATA(insert ( 4054 20 23 4 s 430 3580 0 )); -DATA(insert ( 4054 20 23 5 s 419 3580 0 )); -DATA(insert ( 4054 21 21 1 s 95 3580 0 )); -DATA(insert ( 4054 21 21 2 s 522 3580 0 )); -DATA(insert ( 4054 21 21 3 s 94 3580 0 )); -DATA(insert ( 4054 21 21 4 s 524 3580 0 )); -DATA(insert ( 4054 21 21 5 s 520 3580 0 )); -DATA(insert ( 4054 21 20 1 s 1864 3580 0 )); -DATA(insert ( 4054 21 20 2 s 1866 3580 0 )); -DATA(insert ( 4054 21 20 3 s 1862 3580 0 )); -DATA(insert ( 4054 21 20 4 s 1867 3580 0 )); -DATA(insert ( 4054 21 20 5 s 1865 3580 0 )); -DATA(insert ( 4054 21 23 1 s 534 3580 0 )); -DATA(insert ( 4054 21 23 2 s 540 3580 0 )); -DATA(insert ( 4054 21 23 3 s 532 3580 0 )); -DATA(insert ( 4054 21 23 4 s 542 3580 0 )); -DATA(insert ( 4054 21 23 5 s 536 3580 0 )); -DATA(insert ( 4054 23 23 1 s 97 3580 0 )); -DATA(insert ( 4054 23 23 2 s 523 3580 0 )); -DATA(insert ( 4054 23 23 3 s 96 3580 0 )); -DATA(insert ( 4054 23 23 4 s 525 3580 0 )); -DATA(insert ( 4054 23 23 5 s 521 3580 0 )); -DATA(insert ( 4054 23 21 1 s 535 3580 0 )); -DATA(insert ( 4054 23 21 2 s 541 3580 0 )); -DATA(insert ( 4054 23 21 3 s 533 3580 0 )); -DATA(insert ( 4054 23 21 4 s 543 3580 0 )); -DATA(insert ( 4054 23 21 5 s 537 3580 0 )); -DATA(insert ( 4054 23 20 1 s 37 3580 0 )); -DATA(insert ( 4054 23 20 2 s 80 3580 0 )); -DATA(insert ( 4054 23 20 3 s 15 3580 0 )); -DATA(insert ( 4054 23 20 4 s 82 3580 0 )); -DATA(insert ( 4054 23 20 5 s 76 3580 0 )); +typedef FormData_pg_amop *Form_pg_amop; -/* minmax text */ -DATA(insert ( 4056 25 25 1 s 664 3580 0 )); -DATA(insert ( 4056 25 25 2 s 665 3580 0 )); -DATA(insert ( 4056 25 25 3 s 98 3580 0 )); -DATA(insert ( 4056 25 25 4 s 667 3580 0 )); -DATA(insert ( 4056 25 25 5 s 666 3580 0 )); -/* minmax oid */ -DATA(insert ( 4068 26 26 1 s 609 3580 0 )); -DATA(insert ( 4068 26 26 2 s 611 3580 0 )); -DATA(insert ( 4068 26 26 3 s 607 3580 0 )); -DATA(insert ( 4068 26 26 4 s 612 3580 0 )); -DATA(insert ( 4068 26 26 5 s 610 3580 0 )); -/* minmax tid */ -DATA(insert ( 4069 27 27 1 s 2799 3580 0 )); -DATA(insert ( 4069 27 27 2 s 2801 3580 0 )); -DATA(insert ( 4069 27 27 3 s 387 3580 0 )); -DATA(insert ( 4069 27 27 4 s 2802 3580 0 )); -DATA(insert ( 4069 27 27 5 s 2800 3580 0 )); -/* minmax float (float4, float8) */ -DATA(insert ( 4070 700 700 1 s 622 3580 0 )); -DATA(insert ( 4070 700 700 2 s 624 3580 0 )); -DATA(insert ( 4070 700 700 3 s 620 3580 0 )); -DATA(insert ( 4070 700 700 4 s 625 3580 0 )); -DATA(insert ( 4070 700 700 5 s 623 3580 0 )); -DATA(insert ( 4070 700 701 1 s 1122 3580 0 )); -DATA(insert ( 4070 700 701 2 s 1124 3580 0 )); -DATA(insert ( 4070 700 701 3 s 1120 3580 0 )); -DATA(insert ( 4070 700 701 4 s 1125 3580 0 )); -DATA(insert ( 4070 700 701 5 s 1123 3580 0 )); -DATA(insert ( 4070 701 700 1 s 1132 3580 0 )); -DATA(insert ( 4070 701 700 2 s 1134 3580 0 )); -DATA(insert ( 4070 701 700 3 s 1130 3580 0 )); -DATA(insert ( 4070 701 700 4 s 1135 3580 0 )); -DATA(insert ( 4070 701 700 5 s 1133 3580 0 )); -DATA(insert ( 4070 701 701 1 s 672 3580 0 )); -DATA(insert ( 4070 701 701 2 s 673 3580 0 )); -DATA(insert ( 4070 701 701 3 s 670 3580 0 )); -DATA(insert ( 4070 701 701 4 s 675 3580 0 )); -DATA(insert ( 4070 701 701 5 s 674 3580 0 )); +#ifdef EXPOSE_TO_CLIENT_CODE -/* minmax abstime */ -DATA(insert ( 4072 702 702 1 s 562 3580 0 )); -DATA(insert ( 4072 702 702 2 s 564 3580 0 )); -DATA(insert ( 4072 702 702 3 s 560 3580 0 )); -DATA(insert ( 4072 702 702 4 s 565 3580 0 )); -DATA(insert ( 4072 702 702 5 s 563 3580 0 )); -/* minmax reltime */ -DATA(insert ( 4073 703 703 1 s 568 3580 0 )); -DATA(insert ( 4073 703 703 2 s 570 3580 0 )); -DATA(insert ( 4073 703 703 3 s 566 3580 0 )); -DATA(insert ( 4073 703 703 4 s 571 3580 0 )); -DATA(insert ( 4073 703 703 5 s 569 3580 0 )); -/* minmax macaddr */ -DATA(insert ( 4074 829 829 1 s 1222 3580 0 )); -DATA(insert ( 4074 829 829 2 s 1223 3580 0 )); -DATA(insert ( 4074 829 829 3 s 1220 3580 0 )); -DATA(insert ( 4074 829 829 4 s 1225 3580 0 )); -DATA(insert ( 4074 829 829 5 s 1224 3580 0 )); -/* minmax macaddr8 */ -DATA(insert ( 4109 774 774 1 s 3364 3580 0 )); -DATA(insert ( 4109 774 774 2 s 3365 3580 0 )); -DATA(insert ( 4109 774 774 3 s 3362 3580 0 )); -DATA(insert ( 4109 774 774 4 s 3367 3580 0 )); -DATA(insert ( 4109 774 774 5 s 3366 3580 0 )); -/* minmax inet */ -DATA(insert ( 4075 869 869 1 s 1203 3580 0 )); -DATA(insert ( 4075 869 869 2 s 1204 3580 0 )); -DATA(insert ( 4075 869 869 3 s 1201 3580 0 )); -DATA(insert ( 4075 869 869 4 s 1206 3580 0 )); -DATA(insert ( 4075 869 869 5 s 1205 3580 0 )); -/* inclusion inet */ -DATA(insert ( 4102 869 869 3 s 3552 3580 0 )); -DATA(insert ( 4102 869 869 7 s 934 3580 0 )); -DATA(insert ( 4102 869 869 8 s 932 3580 0 )); -DATA(insert ( 4102 869 869 18 s 1201 3580 0 )); -DATA(insert ( 4102 869 869 24 s 933 3580 0 )); -DATA(insert ( 4102 869 869 26 s 931 3580 0 )); -/* minmax character */ -DATA(insert ( 4076 1042 1042 1 s 1058 3580 0 )); -DATA(insert ( 4076 1042 1042 2 s 1059 3580 0 )); -DATA(insert ( 4076 1042 1042 3 s 1054 3580 0 )); -DATA(insert ( 4076 1042 1042 4 s 1061 3580 0 )); -DATA(insert ( 4076 1042 1042 5 s 1060 3580 0 )); -/* minmax time without time zone */ -DATA(insert ( 4077 1083 1083 1 s 1110 3580 0 )); -DATA(insert ( 4077 1083 1083 2 s 1111 3580 0 )); -DATA(insert ( 4077 1083 1083 3 s 1108 3580 0 )); -DATA(insert ( 4077 1083 1083 4 s 1113 3580 0 )); -DATA(insert ( 4077 1083 1083 5 s 1112 3580 0 )); -/* minmax datetime (date, timestamp, timestamptz) */ -DATA(insert ( 4059 1114 1114 1 s 2062 3580 0 )); -DATA(insert ( 4059 1114 1114 2 s 2063 3580 0 )); -DATA(insert ( 4059 1114 1114 3 s 2060 3580 0 )); -DATA(insert ( 4059 1114 1114 4 s 2065 3580 0 )); -DATA(insert ( 4059 1114 1114 5 s 2064 3580 0 )); -DATA(insert ( 4059 1114 1082 1 s 2371 3580 0 )); -DATA(insert ( 4059 1114 1082 2 s 2372 3580 0 )); -DATA(insert ( 4059 1114 1082 3 s 2373 3580 0 )); -DATA(insert ( 4059 1114 1082 4 s 2374 3580 0 )); -DATA(insert ( 4059 1114 1082 5 s 2375 3580 0 )); -DATA(insert ( 4059 1114 1184 1 s 2534 3580 0 )); -DATA(insert ( 4059 1114 1184 2 s 2535 3580 0 )); -DATA(insert ( 4059 1114 1184 3 s 2536 3580 0 )); -DATA(insert ( 4059 1114 1184 4 s 2537 3580 0 )); -DATA(insert ( 4059 1114 1184 5 s 2538 3580 0 )); -DATA(insert ( 4059 1082 1082 1 s 1095 3580 0 )); -DATA(insert ( 4059 1082 1082 2 s 1096 3580 0 )); -DATA(insert ( 4059 1082 1082 3 s 1093 3580 0 )); -DATA(insert ( 4059 1082 1082 4 s 1098 3580 0 )); -DATA(insert ( 4059 1082 1082 5 s 1097 3580 0 )); -DATA(insert ( 4059 1082 1114 1 s 2345 3580 0 )); -DATA(insert ( 4059 1082 1114 2 s 2346 3580 0 )); -DATA(insert ( 4059 1082 1114 3 s 2347 3580 0 )); -DATA(insert ( 4059 1082 1114 4 s 2348 3580 0 )); -DATA(insert ( 4059 1082 1114 5 s 2349 3580 0 )); -DATA(insert ( 4059 1082 1184 1 s 2358 3580 0 )); -DATA(insert ( 4059 1082 1184 2 s 2359 3580 0 )); -DATA(insert ( 4059 1082 1184 3 s 2360 3580 0 )); -DATA(insert ( 4059 1082 1184 4 s 2361 3580 0 )); -DATA(insert ( 4059 1082 1184 5 s 2362 3580 0 )); -DATA(insert ( 4059 1184 1082 1 s 2384 3580 0 )); -DATA(insert ( 4059 1184 1082 2 s 2385 3580 0 )); -DATA(insert ( 4059 1184 1082 3 s 2386 3580 0 )); -DATA(insert ( 4059 1184 1082 4 s 2387 3580 0 )); -DATA(insert ( 4059 1184 1082 5 s 2388 3580 0 )); -DATA(insert ( 4059 1184 1114 1 s 2540 3580 0 )); -DATA(insert ( 4059 1184 1114 2 s 2541 3580 0 )); -DATA(insert ( 4059 1184 1114 3 s 2542 3580 0 )); -DATA(insert ( 4059 1184 1114 4 s 2543 3580 0 )); -DATA(insert ( 4059 1184 1114 5 s 2544 3580 0 )); -DATA(insert ( 4059 1184 1184 1 s 1322 3580 0 )); -DATA(insert ( 4059 1184 1184 2 s 1323 3580 0 )); -DATA(insert ( 4059 1184 1184 3 s 1320 3580 0 )); -DATA(insert ( 4059 1184 1184 4 s 1325 3580 0 )); -DATA(insert ( 4059 1184 1184 5 s 1324 3580 0 )); +/* allowed values of amoppurpose: */ +#define AMOP_SEARCH 's' /* operator is for search */ +#define AMOP_ORDER 'o' /* operator is for ordering */ -/* minmax interval */ -DATA(insert ( 4078 1186 1186 1 s 1332 3580 0 )); -DATA(insert ( 4078 1186 1186 2 s 1333 3580 0 )); -DATA(insert ( 4078 1186 1186 3 s 1330 3580 0 )); -DATA(insert ( 4078 1186 1186 4 s 1335 3580 0 )); -DATA(insert ( 4078 1186 1186 5 s 1334 3580 0 )); -/* minmax time with time zone */ -DATA(insert ( 4058 1266 1266 1 s 1552 3580 0 )); -DATA(insert ( 4058 1266 1266 2 s 1553 3580 0 )); -DATA(insert ( 4058 1266 1266 3 s 1550 3580 0 )); -DATA(insert ( 4058 1266 1266 4 s 1555 3580 0 )); -DATA(insert ( 4058 1266 1266 5 s 1554 3580 0 )); -/* minmax bit */ -DATA(insert ( 4079 1560 1560 1 s 1786 3580 0 )); -DATA(insert ( 4079 1560 1560 2 s 1788 3580 0 )); -DATA(insert ( 4079 1560 1560 3 s 1784 3580 0 )); -DATA(insert ( 4079 1560 1560 4 s 1789 3580 0 )); -DATA(insert ( 4079 1560 1560 5 s 1787 3580 0 )); -/* minmax bit varying */ -DATA(insert ( 4080 1562 1562 1 s 1806 3580 0 )); -DATA(insert ( 4080 1562 1562 2 s 1808 3580 0 )); -DATA(insert ( 4080 1562 1562 3 s 1804 3580 0 )); -DATA(insert ( 4080 1562 1562 4 s 1809 3580 0 )); -DATA(insert ( 4080 1562 1562 5 s 1807 3580 0 )); -/* minmax numeric */ -DATA(insert ( 4055 1700 1700 1 s 1754 3580 0 )); -DATA(insert ( 4055 1700 1700 2 s 1755 3580 0 )); -DATA(insert ( 4055 1700 1700 3 s 1752 3580 0 )); -DATA(insert ( 4055 1700 1700 4 s 1757 3580 0 )); -DATA(insert ( 4055 1700 1700 5 s 1756 3580 0 )); -/* minmax uuid */ -DATA(insert ( 4081 2950 2950 1 s 2974 3580 0 )); -DATA(insert ( 4081 2950 2950 2 s 2976 3580 0 )); -DATA(insert ( 4081 2950 2950 3 s 2972 3580 0 )); -DATA(insert ( 4081 2950 2950 4 s 2977 3580 0 )); -DATA(insert ( 4081 2950 2950 5 s 2975 3580 0 )); -/* inclusion range types */ -DATA(insert ( 4103 3831 3831 1 s 3893 3580 0 )); -DATA(insert ( 4103 3831 3831 2 s 3895 3580 0 )); -DATA(insert ( 4103 3831 3831 3 s 3888 3580 0 )); -DATA(insert ( 4103 3831 3831 4 s 3896 3580 0 )); -DATA(insert ( 4103 3831 3831 5 s 3894 3580 0 )); -DATA(insert ( 4103 3831 3831 7 s 3890 3580 0 )); -DATA(insert ( 4103 3831 3831 8 s 3892 3580 0 )); -DATA(insert ( 4103 3831 2283 16 s 3889 3580 0 )); -DATA(insert ( 4103 3831 3831 17 s 3897 3580 0 )); -DATA(insert ( 4103 3831 3831 18 s 3882 3580 0 )); -DATA(insert ( 4103 3831 3831 20 s 3884 3580 0 )); -DATA(insert ( 4103 3831 3831 21 s 3885 3580 0 )); -DATA(insert ( 4103 3831 3831 22 s 3887 3580 0 )); -DATA(insert ( 4103 3831 3831 23 s 3886 3580 0 )); -/* minmax pg_lsn */ -DATA(insert ( 4082 3220 3220 1 s 3224 3580 0 )); -DATA(insert ( 4082 3220 3220 2 s 3226 3580 0 )); -DATA(insert ( 4082 3220 3220 3 s 3222 3580 0 )); -DATA(insert ( 4082 3220 3220 4 s 3227 3580 0 )); -DATA(insert ( 4082 3220 3220 5 s 3225 3580 0 )); -/* inclusion box */ -DATA(insert ( 4104 603 603 1 s 493 3580 0 )); -DATA(insert ( 4104 603 603 2 s 494 3580 0 )); -DATA(insert ( 4104 603 603 3 s 500 3580 0 )); -DATA(insert ( 4104 603 603 4 s 495 3580 0 )); -DATA(insert ( 4104 603 603 5 s 496 3580 0 )); -DATA(insert ( 4104 603 603 6 s 499 3580 0 )); -DATA(insert ( 4104 603 603 7 s 498 3580 0 )); -DATA(insert ( 4104 603 603 8 s 497 3580 0 )); -DATA(insert ( 4104 603 603 9 s 2571 3580 0 )); -DATA(insert ( 4104 603 603 10 s 2570 3580 0 )); -DATA(insert ( 4104 603 603 11 s 2573 3580 0 )); -DATA(insert ( 4104 603 603 12 s 2572 3580 0 )); -/* we could, but choose not to, supply entries for strategies 13 and 14 */ -DATA(insert ( 4104 603 600 7 s 433 3580 0 )); +#endif /* EXPOSE_TO_CLIENT_CODE */ #endif /* PG_AMOP_H */ diff --git a/src/include/catalog/pg_amproc.dat b/src/include/catalog/pg_amproc.dat new file mode 100644 index 0000000000..0ef2c0885f --- /dev/null +++ b/src/include/catalog/pg_amproc.dat @@ -0,0 +1,1232 @@ +#---------------------------------------------------------------------- +# +# pg_amproc.dat +# Initial contents of the pg_amproc system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_amproc.dat +# +#---------------------------------------------------------------------- + +[ + +# btree +{ amprocfamily => 'btree/array_ops', amproclefttype => 'anyarray', + amprocrighttype => 'anyarray', amprocnum => '1', amproc => 'btarraycmp' }, +{ amprocfamily => 'btree/bit_ops', amproclefttype => 'bit', + amprocrighttype => 'bit', amprocnum => '1', amproc => 'bitcmp' }, +{ amprocfamily => 'btree/bool_ops', amproclefttype => 'bool', + amprocrighttype => 'bool', amprocnum => '1', amproc => 'btboolcmp' }, +{ amprocfamily => 'btree/bpchar_ops', amproclefttype => 'bpchar', + amprocrighttype => 'bpchar', amprocnum => '1', amproc => 'bpcharcmp' }, +{ amprocfamily => 'btree/bpchar_ops', amproclefttype => 'bpchar', + amprocrighttype => 'bpchar', amprocnum => '2', + amproc => 'bpchar_sortsupport' }, +{ amprocfamily => 'btree/bytea_ops', amproclefttype => 'bytea', + amprocrighttype => 'bytea', amprocnum => '1', amproc => 'byteacmp' }, +{ amprocfamily => 'btree/bytea_ops', amproclefttype => 'bytea', + amprocrighttype => 'bytea', amprocnum => '2', amproc => 'bytea_sortsupport' }, +{ amprocfamily => 'btree/char_ops', amproclefttype => 'char', + amprocrighttype => 'char', amprocnum => '1', amproc => 'btcharcmp' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'date', + amprocrighttype => 'date', amprocnum => '1', amproc => 'date_cmp' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'date', + amprocrighttype => 'date', amprocnum => '2', amproc => 'date_sortsupport' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'date', + amprocrighttype => 'timestamp', amprocnum => '1', + amproc => 'date_cmp_timestamp' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'date', + amprocrighttype => 'timestamptz', amprocnum => '1', + amproc => 'date_cmp_timestamptz' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'timestamp', + amprocrighttype => 'timestamp', amprocnum => '1', amproc => 'timestamp_cmp' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'timestamp', + amprocrighttype => 'timestamp', amprocnum => '2', + amproc => 'timestamp_sortsupport' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'timestamp', + amprocrighttype => 'date', amprocnum => '1', amproc => 'timestamp_cmp_date' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'timestamp', + amprocrighttype => 'timestamptz', amprocnum => '1', + amproc => 'timestamp_cmp_timestamptz' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'timestamptz', amprocnum => '1', + amproc => 'timestamptz_cmp' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'timestamptz', amprocnum => '2', + amproc => 'timestamp_sortsupport' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'date', amprocnum => '1', + amproc => 'timestamptz_cmp_date' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'timestamp', amprocnum => '1', + amproc => 'timestamptz_cmp_timestamp' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'date', + amprocrighttype => 'interval', amprocnum => '3', + amproc => 'in_range(date,date,interval,bool,bool)' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'timestamp', + amprocrighttype => 'interval', amprocnum => '3', + amproc => 'in_range(timestamp,timestamp,interval,bool,bool)' }, +{ amprocfamily => 'btree/datetime_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'interval', amprocnum => '3', + amproc => 'in_range(timestamptz,timestamptz,interval,bool,bool)' }, +{ amprocfamily => 'btree/float_ops', amproclefttype => 'float4', + amprocrighttype => 'float4', amprocnum => '1', amproc => 'btfloat4cmp' }, +{ amprocfamily => 'btree/float_ops', amproclefttype => 'float4', + amprocrighttype => 'float4', amprocnum => '2', + amproc => 'btfloat4sortsupport' }, +{ amprocfamily => 'btree/float_ops', amproclefttype => 'float4', + amprocrighttype => 'float8', amprocnum => '1', amproc => 'btfloat48cmp' }, +{ amprocfamily => 'btree/float_ops', amproclefttype => 'float8', + amprocrighttype => 'float8', amprocnum => '1', amproc => 'btfloat8cmp' }, +{ amprocfamily => 'btree/float_ops', amproclefttype => 'float8', + amprocrighttype => 'float8', amprocnum => '2', + amproc => 'btfloat8sortsupport' }, +{ amprocfamily => 'btree/float_ops', amproclefttype => 'float8', + amprocrighttype => 'float4', amprocnum => '1', amproc => 'btfloat84cmp' }, +{ amprocfamily => 'btree/float_ops', amproclefttype => 'float8', + amprocrighttype => 'float8', amprocnum => '3', + amproc => 'in_range(float8,float8,float8,bool,bool)' }, +{ amprocfamily => 'btree/float_ops', amproclefttype => 'float4', + amprocrighttype => 'float8', amprocnum => '3', + amproc => 'in_range(float4,float4,float8,bool,bool)' }, +{ amprocfamily => 'btree/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '1', amproc => 'network_cmp' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int2', + amprocrighttype => 'int2', amprocnum => '1', amproc => 'btint2cmp' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int2', + amprocrighttype => 'int2', amprocnum => '2', amproc => 'btint2sortsupport' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int2', + amprocrighttype => 'int4', amprocnum => '1', amproc => 'btint24cmp' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int2', + amprocrighttype => 'int8', amprocnum => '1', amproc => 'btint28cmp' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int2', + amprocrighttype => 'int8', amprocnum => '3', + amproc => 'in_range(int2,int2,int8,bool,bool)' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int2', + amprocrighttype => 'int4', amprocnum => '3', + amproc => 'in_range(int2,int2,int4,bool,bool)' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int2', + amprocrighttype => 'int2', amprocnum => '3', + amproc => 'in_range(int2,int2,int2,bool,bool)' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int4', + amprocrighttype => 'int4', amprocnum => '1', amproc => 'btint4cmp' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int4', + amprocrighttype => 'int4', amprocnum => '2', amproc => 'btint4sortsupport' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int4', + amprocrighttype => 'int8', amprocnum => '1', amproc => 'btint48cmp' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int4', + amprocrighttype => 'int2', amprocnum => '1', amproc => 'btint42cmp' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int4', + amprocrighttype => 'int8', amprocnum => '3', + amproc => 'in_range(int4,int4,int8,bool,bool)' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int4', + amprocrighttype => 'int4', amprocnum => '3', + amproc => 'in_range(int4,int4,int4,bool,bool)' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int4', + amprocrighttype => 'int2', amprocnum => '3', + amproc => 'in_range(int4,int4,int2,bool,bool)' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int8', + amprocrighttype => 'int8', amprocnum => '1', amproc => 'btint8cmp' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int8', + amprocrighttype => 'int8', amprocnum => '2', amproc => 'btint8sortsupport' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int8', + amprocrighttype => 'int4', amprocnum => '1', amproc => 'btint84cmp' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int8', + amprocrighttype => 'int2', amprocnum => '1', amproc => 'btint82cmp' }, +{ amprocfamily => 'btree/integer_ops', amproclefttype => 'int8', + amprocrighttype => 'int8', amprocnum => '3', + amproc => 'in_range(int8,int8,int8,bool,bool)' }, +{ amprocfamily => 'btree/interval_ops', amproclefttype => 'interval', + amprocrighttype => 'interval', amprocnum => '1', amproc => 'interval_cmp' }, +{ amprocfamily => 'btree/interval_ops', amproclefttype => 'interval', + amprocrighttype => 'interval', amprocnum => '3', + amproc => 'in_range(interval,interval,interval,bool,bool)' }, +{ amprocfamily => 'btree/macaddr_ops', amproclefttype => 'macaddr', + amprocrighttype => 'macaddr', amprocnum => '1', amproc => 'macaddr_cmp' }, +{ amprocfamily => 'btree/macaddr_ops', amproclefttype => 'macaddr', + amprocrighttype => 'macaddr', amprocnum => '2', + amproc => 'macaddr_sortsupport' }, +{ amprocfamily => 'btree/name_ops', amproclefttype => 'name', + amprocrighttype => 'name', amprocnum => '1', amproc => 'btnamecmp' }, +{ amprocfamily => 'btree/name_ops', amproclefttype => 'name', + amprocrighttype => 'name', amprocnum => '2', amproc => 'btnamesortsupport' }, +{ amprocfamily => 'btree/numeric_ops', amproclefttype => 'numeric', + amprocrighttype => 'numeric', amprocnum => '1', amproc => 'numeric_cmp' }, +{ amprocfamily => 'btree/numeric_ops', amproclefttype => 'numeric', + amprocrighttype => 'numeric', amprocnum => '2', + amproc => 'numeric_sortsupport' }, +{ amprocfamily => 'btree/numeric_ops', amproclefttype => 'numeric', + amprocrighttype => 'numeric', amprocnum => '3', + amproc => 'in_range(numeric,numeric,numeric,bool,bool)' }, +{ amprocfamily => 'btree/oid_ops', amproclefttype => 'oid', + amprocrighttype => 'oid', amprocnum => '1', amproc => 'btoidcmp' }, +{ amprocfamily => 'btree/oid_ops', amproclefttype => 'oid', + amprocrighttype => 'oid', amprocnum => '2', amproc => 'btoidsortsupport' }, +{ amprocfamily => 'btree/oidvector_ops', amproclefttype => 'oidvector', + amprocrighttype => 'oidvector', amprocnum => '1', + amproc => 'btoidvectorcmp' }, +{ amprocfamily => 'btree/text_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '1', amproc => 'bttextcmp' }, +{ amprocfamily => 'btree/text_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '2', amproc => 'bttextsortsupport' }, +{ amprocfamily => 'btree/time_ops', amproclefttype => 'time', + amprocrighttype => 'time', amprocnum => '1', amproc => 'time_cmp' }, +{ amprocfamily => 'btree/time_ops', amproclefttype => 'time', + amprocrighttype => 'interval', amprocnum => '3', + amproc => 'in_range(time,time,interval,bool,bool)' }, +{ amprocfamily => 'btree/timetz_ops', amproclefttype => 'timetz', + amprocrighttype => 'timetz', amprocnum => '1', amproc => 'timetz_cmp' }, +{ amprocfamily => 'btree/timetz_ops', amproclefttype => 'timetz', + amprocrighttype => 'interval', amprocnum => '3', + amproc => 'in_range(timetz,timetz,interval,bool,bool)' }, +{ amprocfamily => 'btree/varbit_ops', amproclefttype => 'varbit', + amprocrighttype => 'varbit', amprocnum => '1', amproc => 'varbitcmp' }, +{ amprocfamily => 'btree/text_pattern_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '1', amproc => 'bttext_pattern_cmp' }, +{ amprocfamily => 'btree/text_pattern_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '2', + amproc => 'bttext_pattern_sortsupport' }, +{ amprocfamily => 'btree/bpchar_pattern_ops', amproclefttype => 'bpchar', + amprocrighttype => 'bpchar', amprocnum => '1', + amproc => 'btbpchar_pattern_cmp' }, +{ amprocfamily => 'btree/bpchar_pattern_ops', amproclefttype => 'bpchar', + amprocrighttype => 'bpchar', amprocnum => '2', + amproc => 'btbpchar_pattern_sortsupport' }, +{ amprocfamily => 'btree/money_ops', amproclefttype => 'money', + amprocrighttype => 'money', amprocnum => '1', amproc => 'cash_cmp' }, +{ amprocfamily => 'btree/tid_ops', amproclefttype => 'tid', + amprocrighttype => 'tid', amprocnum => '1', amproc => 'bttidcmp' }, +{ amprocfamily => 'btree/uuid_ops', amproclefttype => 'uuid', + amprocrighttype => 'uuid', amprocnum => '1', amproc => 'uuid_cmp' }, +{ amprocfamily => 'btree/uuid_ops', amproclefttype => 'uuid', + amprocrighttype => 'uuid', amprocnum => '2', amproc => 'uuid_sortsupport' }, +{ amprocfamily => 'btree/record_ops', amproclefttype => 'record', + amprocrighttype => 'record', amprocnum => '1', amproc => 'btrecordcmp' }, +{ amprocfamily => 'btree/record_image_ops', amproclefttype => 'record', + amprocrighttype => 'record', amprocnum => '1', amproc => 'btrecordimagecmp' }, +{ amprocfamily => 'btree/pg_lsn_ops', amproclefttype => 'pg_lsn', + amprocrighttype => 'pg_lsn', amprocnum => '1', amproc => 'pg_lsn_cmp' }, +{ amprocfamily => 'btree/macaddr8_ops', amproclefttype => 'macaddr8', + amprocrighttype => 'macaddr8', amprocnum => '1', amproc => 'macaddr8_cmp' }, +{ amprocfamily => 'btree/enum_ops', amproclefttype => 'anyenum', + amprocrighttype => 'anyenum', amprocnum => '1', amproc => 'enum_cmp' }, +{ amprocfamily => 'btree/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '1', amproc => 'tsvector_cmp' }, +{ amprocfamily => 'btree/tsquery_ops', amproclefttype => 'tsquery', + amprocrighttype => 'tsquery', amprocnum => '1', amproc => 'tsquery_cmp' }, +{ amprocfamily => 'btree/range_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '1', amproc => 'range_cmp' }, +{ amprocfamily => 'btree/jsonb_ops', amproclefttype => 'jsonb', + amprocrighttype => 'jsonb', amprocnum => '1', amproc => 'jsonb_cmp' }, + +# hash +{ amprocfamily => 'hash/bpchar_ops', amproclefttype => 'bpchar', + amprocrighttype => 'bpchar', amprocnum => '1', amproc => 'hashbpchar' }, +{ amprocfamily => 'hash/bpchar_ops', amproclefttype => 'bpchar', + amprocrighttype => 'bpchar', amprocnum => '2', + amproc => 'hashbpcharextended' }, +{ amprocfamily => 'hash/char_ops', amproclefttype => 'char', + amprocrighttype => 'char', amprocnum => '1', amproc => 'hashchar' }, +{ amprocfamily => 'hash/char_ops', amproclefttype => 'char', + amprocrighttype => 'char', amprocnum => '2', amproc => 'hashcharextended' }, +{ amprocfamily => 'hash/date_ops', amproclefttype => 'date', + amprocrighttype => 'date', amprocnum => '1', amproc => 'hashint4' }, +{ amprocfamily => 'hash/date_ops', amproclefttype => 'date', + amprocrighttype => 'date', amprocnum => '2', amproc => 'hashint4extended' }, +{ amprocfamily => 'hash/array_ops', amproclefttype => 'anyarray', + amprocrighttype => 'anyarray', amprocnum => '1', amproc => 'hash_array' }, +{ amprocfamily => 'hash/array_ops', amproclefttype => 'anyarray', + amprocrighttype => 'anyarray', amprocnum => '2', + amproc => 'hash_array_extended' }, +{ amprocfamily => 'hash/float_ops', amproclefttype => 'float4', + amprocrighttype => 'float4', amprocnum => '1', amproc => 'hashfloat4' }, +{ amprocfamily => 'hash/float_ops', amproclefttype => 'float4', + amprocrighttype => 'float4', amprocnum => '2', + amproc => 'hashfloat4extended' }, +{ amprocfamily => 'hash/float_ops', amproclefttype => 'float8', + amprocrighttype => 'float8', amprocnum => '1', amproc => 'hashfloat8' }, +{ amprocfamily => 'hash/float_ops', amproclefttype => 'float8', + amprocrighttype => 'float8', amprocnum => '2', + amproc => 'hashfloat8extended' }, +{ amprocfamily => 'hash/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '1', amproc => 'hashinet' }, +{ amprocfamily => 'hash/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '2', amproc => 'hashinetextended' }, +{ amprocfamily => 'hash/integer_ops', amproclefttype => 'int2', + amprocrighttype => 'int2', amprocnum => '1', amproc => 'hashint2' }, +{ amprocfamily => 'hash/integer_ops', amproclefttype => 'int2', + amprocrighttype => 'int2', amprocnum => '2', amproc => 'hashint2extended' }, +{ amprocfamily => 'hash/integer_ops', amproclefttype => 'int4', + amprocrighttype => 'int4', amprocnum => '1', amproc => 'hashint4' }, +{ amprocfamily => 'hash/integer_ops', amproclefttype => 'int4', + amprocrighttype => 'int4', amprocnum => '2', amproc => 'hashint4extended' }, +{ amprocfamily => 'hash/integer_ops', amproclefttype => 'int8', + amprocrighttype => 'int8', amprocnum => '1', amproc => 'hashint8' }, +{ amprocfamily => 'hash/integer_ops', amproclefttype => 'int8', + amprocrighttype => 'int8', amprocnum => '2', amproc => 'hashint8extended' }, +{ amprocfamily => 'hash/interval_ops', amproclefttype => 'interval', + amprocrighttype => 'interval', amprocnum => '1', amproc => 'interval_hash' }, +{ amprocfamily => 'hash/interval_ops', amproclefttype => 'interval', + amprocrighttype => 'interval', amprocnum => '2', + amproc => 'interval_hash_extended' }, +{ amprocfamily => 'hash/macaddr_ops', amproclefttype => 'macaddr', + amprocrighttype => 'macaddr', amprocnum => '1', amproc => 'hashmacaddr' }, +{ amprocfamily => 'hash/macaddr_ops', amproclefttype => 'macaddr', + amprocrighttype => 'macaddr', amprocnum => '2', + amproc => 'hashmacaddrextended' }, +{ amprocfamily => 'hash/name_ops', amproclefttype => 'name', + amprocrighttype => 'name', amprocnum => '1', amproc => 'hashname' }, +{ amprocfamily => 'hash/name_ops', amproclefttype => 'name', + amprocrighttype => 'name', amprocnum => '2', amproc => 'hashnameextended' }, +{ amprocfamily => 'hash/oid_ops', amproclefttype => 'oid', + amprocrighttype => 'oid', amprocnum => '1', amproc => 'hashoid' }, +{ amprocfamily => 'hash/oid_ops', amproclefttype => 'oid', + amprocrighttype => 'oid', amprocnum => '2', amproc => 'hashoidextended' }, +{ amprocfamily => 'hash/oidvector_ops', amproclefttype => 'oidvector', + amprocrighttype => 'oidvector', amprocnum => '1', amproc => 'hashoidvector' }, +{ amprocfamily => 'hash/oidvector_ops', amproclefttype => 'oidvector', + amprocrighttype => 'oidvector', amprocnum => '2', + amproc => 'hashoidvectorextended' }, +{ amprocfamily => 'hash/text_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '1', amproc => 'hashtext' }, +{ amprocfamily => 'hash/text_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '2', amproc => 'hashtextextended' }, +{ amprocfamily => 'hash/time_ops', amproclefttype => 'time', + amprocrighttype => 'time', amprocnum => '1', amproc => 'time_hash' }, +{ amprocfamily => 'hash/time_ops', amproclefttype => 'time', + amprocrighttype => 'time', amprocnum => '2', amproc => 'time_hash_extended' }, +{ amprocfamily => 'hash/numeric_ops', amproclefttype => 'numeric', + amprocrighttype => 'numeric', amprocnum => '1', amproc => 'hash_numeric' }, +{ amprocfamily => 'hash/numeric_ops', amproclefttype => 'numeric', + amprocrighttype => 'numeric', amprocnum => '2', + amproc => 'hash_numeric_extended' }, +{ amprocfamily => 'hash/timestamptz_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'timestamptz', amprocnum => '1', + amproc => 'timestamp_hash' }, +{ amprocfamily => 'hash/timestamptz_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'timestamptz', amprocnum => '2', + amproc => 'timestamp_hash_extended' }, +{ amprocfamily => 'hash/timetz_ops', amproclefttype => 'timetz', + amprocrighttype => 'timetz', amprocnum => '1', amproc => 'timetz_hash' }, +{ amprocfamily => 'hash/timetz_ops', amproclefttype => 'timetz', + amprocrighttype => 'timetz', amprocnum => '2', + amproc => 'timetz_hash_extended' }, +{ amprocfamily => 'hash/timestamp_ops', amproclefttype => 'timestamp', + amprocrighttype => 'timestamp', amprocnum => '1', + amproc => 'timestamp_hash' }, +{ amprocfamily => 'hash/timestamp_ops', amproclefttype => 'timestamp', + amprocrighttype => 'timestamp', amprocnum => '2', + amproc => 'timestamp_hash_extended' }, +{ amprocfamily => 'hash/bool_ops', amproclefttype => 'bool', + amprocrighttype => 'bool', amprocnum => '1', amproc => 'hashchar' }, +{ amprocfamily => 'hash/bool_ops', amproclefttype => 'bool', + amprocrighttype => 'bool', amprocnum => '2', amproc => 'hashcharextended' }, +{ amprocfamily => 'hash/bytea_ops', amproclefttype => 'bytea', + amprocrighttype => 'bytea', amprocnum => '1', amproc => 'hashvarlena' }, +{ amprocfamily => 'hash/bytea_ops', amproclefttype => 'bytea', + amprocrighttype => 'bytea', amprocnum => '2', + amproc => 'hashvarlenaextended' }, +{ amprocfamily => 'hash/xid_ops', amproclefttype => 'xid', + amprocrighttype => 'xid', amprocnum => '1', amproc => 'hashint4' }, +{ amprocfamily => 'hash/xid_ops', amproclefttype => 'xid', + amprocrighttype => 'xid', amprocnum => '2', amproc => 'hashint4extended' }, +{ amprocfamily => 'hash/cid_ops', amproclefttype => 'cid', + amprocrighttype => 'cid', amprocnum => '1', amproc => 'hashint4' }, +{ amprocfamily => 'hash/cid_ops', amproclefttype => 'cid', + amprocrighttype => 'cid', amprocnum => '2', amproc => 'hashint4extended' }, +{ amprocfamily => 'hash/text_pattern_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '1', amproc => 'hashtext' }, +{ amprocfamily => 'hash/text_pattern_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '2', amproc => 'hashtextextended' }, +{ amprocfamily => 'hash/bpchar_pattern_ops', amproclefttype => 'bpchar', + amprocrighttype => 'bpchar', amprocnum => '1', amproc => 'hashbpchar' }, +{ amprocfamily => 'hash/bpchar_pattern_ops', amproclefttype => 'bpchar', + amprocrighttype => 'bpchar', amprocnum => '2', + amproc => 'hashbpcharextended' }, +{ amprocfamily => 'hash/aclitem_ops', amproclefttype => 'aclitem', + amprocrighttype => 'aclitem', amprocnum => '1', amproc => 'hash_aclitem' }, +{ amprocfamily => 'hash/aclitem_ops', amproclefttype => 'aclitem', + amprocrighttype => 'aclitem', amprocnum => '2', + amproc => 'hash_aclitem_extended' }, +{ amprocfamily => 'hash/uuid_ops', amproclefttype => 'uuid', + amprocrighttype => 'uuid', amprocnum => '1', amproc => 'uuid_hash' }, +{ amprocfamily => 'hash/uuid_ops', amproclefttype => 'uuid', + amprocrighttype => 'uuid', amprocnum => '2', amproc => 'uuid_hash_extended' }, +{ amprocfamily => 'hash/pg_lsn_ops', amproclefttype => 'pg_lsn', + amprocrighttype => 'pg_lsn', amprocnum => '1', amproc => 'pg_lsn_hash' }, +{ amprocfamily => 'hash/pg_lsn_ops', amproclefttype => 'pg_lsn', + amprocrighttype => 'pg_lsn', amprocnum => '2', + amproc => 'pg_lsn_hash_extended' }, +{ amprocfamily => 'hash/macaddr8_ops', amproclefttype => 'macaddr8', + amprocrighttype => 'macaddr8', amprocnum => '1', amproc => 'hashmacaddr8' }, +{ amprocfamily => 'hash/macaddr8_ops', amproclefttype => 'macaddr8', + amprocrighttype => 'macaddr8', amprocnum => '2', + amproc => 'hashmacaddr8extended' }, +{ amprocfamily => 'hash/enum_ops', amproclefttype => 'anyenum', + amprocrighttype => 'anyenum', amprocnum => '1', amproc => 'hashenum' }, +{ amprocfamily => 'hash/enum_ops', amproclefttype => 'anyenum', + amprocrighttype => 'anyenum', amprocnum => '2', + amproc => 'hashenumextended' }, +{ amprocfamily => 'hash/range_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '1', amproc => 'hash_range' }, +{ amprocfamily => 'hash/range_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '2', + amproc => 'hash_range_extended' }, +{ amprocfamily => 'hash/jsonb_ops', amproclefttype => 'jsonb', + amprocrighttype => 'jsonb', amprocnum => '1', amproc => 'jsonb_hash' }, +{ amprocfamily => 'hash/jsonb_ops', amproclefttype => 'jsonb', + amprocrighttype => 'jsonb', amprocnum => '2', + amproc => 'jsonb_hash_extended' }, + +# gist +{ amprocfamily => 'gist/point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '1', + amproc => 'gist_point_consistent' }, +{ amprocfamily => 'gist/point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '2', amproc => 'gist_box_union' }, +{ amprocfamily => 'gist/point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '3', + amproc => 'gist_point_compress' }, +{ amprocfamily => 'gist/point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '5', amproc => 'gist_box_penalty' }, +{ amprocfamily => 'gist/point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '6', + amproc => 'gist_box_picksplit' }, +{ amprocfamily => 'gist/point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '7', amproc => 'gist_box_same' }, +{ amprocfamily => 'gist/point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '8', + amproc => 'gist_point_distance' }, +{ amprocfamily => 'gist/point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '9', amproc => 'gist_point_fetch' }, +{ amprocfamily => 'gist/box_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '1', amproc => 'gist_box_consistent' }, +{ amprocfamily => 'gist/box_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '2', amproc => 'gist_box_union' }, +{ amprocfamily => 'gist/box_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '5', amproc => 'gist_box_penalty' }, +{ amprocfamily => 'gist/box_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '6', amproc => 'gist_box_picksplit' }, +{ amprocfamily => 'gist/box_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '7', amproc => 'gist_box_same' }, +{ amprocfamily => 'gist/poly_ops', amproclefttype => 'polygon', + amprocrighttype => 'polygon', amprocnum => '1', + amproc => 'gist_poly_consistent' }, +{ amprocfamily => 'gist/poly_ops', amproclefttype => 'polygon', + amprocrighttype => 'polygon', amprocnum => '2', amproc => 'gist_box_union' }, +{ amprocfamily => 'gist/poly_ops', amproclefttype => 'polygon', + amprocrighttype => 'polygon', amprocnum => '3', + amproc => 'gist_poly_compress' }, +{ amprocfamily => 'gist/poly_ops', amproclefttype => 'polygon', + amprocrighttype => 'polygon', amprocnum => '5', + amproc => 'gist_box_penalty' }, +{ amprocfamily => 'gist/poly_ops', amproclefttype => 'polygon', + amprocrighttype => 'polygon', amprocnum => '6', + amproc => 'gist_box_picksplit' }, +{ amprocfamily => 'gist/poly_ops', amproclefttype => 'polygon', + amprocrighttype => 'polygon', amprocnum => '7', amproc => 'gist_box_same' }, +{ amprocfamily => 'gist/poly_ops', amproclefttype => 'polygon', + amprocrighttype => 'polygon', amprocnum => '8', + amproc => 'gist_poly_distance' }, +{ amprocfamily => 'gist/circle_ops', amproclefttype => 'circle', + amprocrighttype => 'circle', amprocnum => '1', + amproc => 'gist_circle_consistent' }, +{ amprocfamily => 'gist/circle_ops', amproclefttype => 'circle', + amprocrighttype => 'circle', amprocnum => '2', amproc => 'gist_box_union' }, +{ amprocfamily => 'gist/circle_ops', amproclefttype => 'circle', + amprocrighttype => 'circle', amprocnum => '3', + amproc => 'gist_circle_compress' }, +{ amprocfamily => 'gist/circle_ops', amproclefttype => 'circle', + amprocrighttype => 'circle', amprocnum => '5', amproc => 'gist_box_penalty' }, +{ amprocfamily => 'gist/circle_ops', amproclefttype => 'circle', + amprocrighttype => 'circle', amprocnum => '6', + amproc => 'gist_box_picksplit' }, +{ amprocfamily => 'gist/circle_ops', amproclefttype => 'circle', + amprocrighttype => 'circle', amprocnum => '7', amproc => 'gist_box_same' }, +{ amprocfamily => 'gist/circle_ops', amproclefttype => 'circle', + amprocrighttype => 'circle', amprocnum => '8', + amproc => 'gist_circle_distance' }, +{ amprocfamily => 'gist/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '1', + amproc => 'gtsvector_consistent(internal,tsvector,int2,oid,internal)' }, +{ amprocfamily => 'gist/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '2', + amproc => 'gtsvector_union' }, +{ amprocfamily => 'gist/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '3', + amproc => 'gtsvector_compress' }, +{ amprocfamily => 'gist/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '4', + amproc => 'gtsvector_decompress' }, +{ amprocfamily => 'gist/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '5', + amproc => 'gtsvector_penalty' }, +{ amprocfamily => 'gist/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '6', + amproc => 'gtsvector_picksplit' }, +{ amprocfamily => 'gist/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '7', amproc => 'gtsvector_same' }, +{ amprocfamily => 'gist/tsquery_ops', amproclefttype => 'tsquery', + amprocrighttype => 'tsquery', amprocnum => '1', + amproc => 'gtsquery_consistent(internal,tsquery,int2,oid,internal)' }, +{ amprocfamily => 'gist/tsquery_ops', amproclefttype => 'tsquery', + amprocrighttype => 'tsquery', amprocnum => '2', amproc => 'gtsquery_union' }, +{ amprocfamily => 'gist/tsquery_ops', amproclefttype => 'tsquery', + amprocrighttype => 'tsquery', amprocnum => '3', + amproc => 'gtsquery_compress' }, +{ amprocfamily => 'gist/tsquery_ops', amproclefttype => 'tsquery', + amprocrighttype => 'tsquery', amprocnum => '5', + amproc => 'gtsquery_penalty' }, +{ amprocfamily => 'gist/tsquery_ops', amproclefttype => 'tsquery', + amprocrighttype => 'tsquery', amprocnum => '6', + amproc => 'gtsquery_picksplit' }, +{ amprocfamily => 'gist/tsquery_ops', amproclefttype => 'tsquery', + amprocrighttype => 'tsquery', amprocnum => '7', amproc => 'gtsquery_same' }, +{ amprocfamily => 'gist/range_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '1', + amproc => 'range_gist_consistent' }, +{ amprocfamily => 'gist/range_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '2', + amproc => 'range_gist_union' }, +{ amprocfamily => 'gist/range_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '5', + amproc => 'range_gist_penalty' }, +{ amprocfamily => 'gist/range_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '6', + amproc => 'range_gist_picksplit' }, +{ amprocfamily => 'gist/range_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '7', + amproc => 'range_gist_same' }, +{ amprocfamily => 'gist/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '1', + amproc => 'inet_gist_consistent' }, +{ amprocfamily => 'gist/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '2', amproc => 'inet_gist_union' }, +{ amprocfamily => 'gist/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '3', amproc => 'inet_gist_compress' }, +{ amprocfamily => 'gist/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '5', amproc => 'inet_gist_penalty' }, +{ amprocfamily => 'gist/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '6', + amproc => 'inet_gist_picksplit' }, +{ amprocfamily => 'gist/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '7', amproc => 'inet_gist_same' }, +{ amprocfamily => 'gist/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '9', amproc => 'inet_gist_fetch' }, + +# gin +{ amprocfamily => 'gin/array_ops', amproclefttype => 'anyarray', + amprocrighttype => 'anyarray', amprocnum => '2', + amproc => 'ginarrayextract(anyarray,internal,internal)' }, +{ amprocfamily => 'gin/array_ops', amproclefttype => 'anyarray', + amprocrighttype => 'anyarray', amprocnum => '3', + amproc => 'ginqueryarrayextract' }, +{ amprocfamily => 'gin/array_ops', amproclefttype => 'anyarray', + amprocrighttype => 'anyarray', amprocnum => '4', + amproc => 'ginarrayconsistent' }, +{ amprocfamily => 'gin/array_ops', amproclefttype => 'anyarray', + amprocrighttype => 'anyarray', amprocnum => '6', + amproc => 'ginarraytriconsistent' }, +{ amprocfamily => 'gin/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '1', + amproc => 'gin_cmp_tslexeme' }, +{ amprocfamily => 'gin/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '2', + amproc => 'gin_extract_tsvector(tsvector,internal,internal)' }, +{ amprocfamily => 'gin/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '3', + amproc => 'gin_extract_tsquery(tsvector,internal,int2,internal,internal,internal,internal)' }, +{ amprocfamily => 'gin/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '4', + amproc => 'gin_tsquery_consistent(internal,int2,tsvector,int4,internal,internal,internal,internal)' }, +{ amprocfamily => 'gin/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '5', amproc => 'gin_cmp_prefix' }, +{ amprocfamily => 'gin/tsvector_ops', amproclefttype => 'tsvector', + amprocrighttype => 'tsvector', amprocnum => '6', + amproc => 'gin_tsquery_triconsistent' }, +{ amprocfamily => 'gin/jsonb_ops', amproclefttype => 'jsonb', + amprocrighttype => 'jsonb', amprocnum => '1', amproc => 'gin_compare_jsonb' }, +{ amprocfamily => 'gin/jsonb_ops', amproclefttype => 'jsonb', + amprocrighttype => 'jsonb', amprocnum => '2', amproc => 'gin_extract_jsonb' }, +{ amprocfamily => 'gin/jsonb_ops', amproclefttype => 'jsonb', + amprocrighttype => 'jsonb', amprocnum => '3', + amproc => 'gin_extract_jsonb_query' }, +{ amprocfamily => 'gin/jsonb_ops', amproclefttype => 'jsonb', + amprocrighttype => 'jsonb', amprocnum => '4', + amproc => 'gin_consistent_jsonb' }, +{ amprocfamily => 'gin/jsonb_ops', amproclefttype => 'jsonb', + amprocrighttype => 'jsonb', amprocnum => '6', + amproc => 'gin_triconsistent_jsonb' }, +{ amprocfamily => 'gin/jsonb_path_ops', amproclefttype => 'jsonb', + amprocrighttype => 'jsonb', amprocnum => '1', amproc => 'btint4cmp' }, +{ amprocfamily => 'gin/jsonb_path_ops', amproclefttype => 'jsonb', + amprocrighttype => 'jsonb', amprocnum => '2', + amproc => 'gin_extract_jsonb_path' }, +{ amprocfamily => 'gin/jsonb_path_ops', amproclefttype => 'jsonb', + amprocrighttype => 'jsonb', amprocnum => '3', + amproc => 'gin_extract_jsonb_query_path' }, +{ amprocfamily => 'gin/jsonb_path_ops', amproclefttype => 'jsonb', + amprocrighttype => 'jsonb', amprocnum => '4', + amproc => 'gin_consistent_jsonb_path' }, +{ amprocfamily => 'gin/jsonb_path_ops', amproclefttype => 'jsonb', + amprocrighttype => 'jsonb', amprocnum => '6', + amproc => 'gin_triconsistent_jsonb_path' }, + +# sp-gist +{ amprocfamily => 'spgist/range_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '1', + amproc => 'spg_range_quad_config' }, +{ amprocfamily => 'spgist/range_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '2', + amproc => 'spg_range_quad_choose' }, +{ amprocfamily => 'spgist/range_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '3', + amproc => 'spg_range_quad_picksplit' }, +{ amprocfamily => 'spgist/range_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '4', + amproc => 'spg_range_quad_inner_consistent' }, +{ amprocfamily => 'spgist/range_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '5', + amproc => 'spg_range_quad_leaf_consistent' }, +{ amprocfamily => 'spgist/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '1', amproc => 'inet_spg_config' }, +{ amprocfamily => 'spgist/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '2', amproc => 'inet_spg_choose' }, +{ amprocfamily => 'spgist/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '3', amproc => 'inet_spg_picksplit' }, +{ amprocfamily => 'spgist/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '4', + amproc => 'inet_spg_inner_consistent' }, +{ amprocfamily => 'spgist/network_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '5', + amproc => 'inet_spg_leaf_consistent' }, +{ amprocfamily => 'spgist/quad_point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '1', amproc => 'spg_quad_config' }, +{ amprocfamily => 'spgist/quad_point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '2', amproc => 'spg_quad_choose' }, +{ amprocfamily => 'spgist/quad_point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '3', + amproc => 'spg_quad_picksplit' }, +{ amprocfamily => 'spgist/quad_point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '4', + amproc => 'spg_quad_inner_consistent' }, +{ amprocfamily => 'spgist/quad_point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '5', + amproc => 'spg_quad_leaf_consistent' }, +{ amprocfamily => 'spgist/kd_point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '1', amproc => 'spg_kd_config' }, +{ amprocfamily => 'spgist/kd_point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '2', amproc => 'spg_kd_choose' }, +{ amprocfamily => 'spgist/kd_point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '3', amproc => 'spg_kd_picksplit' }, +{ amprocfamily => 'spgist/kd_point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '4', + amproc => 'spg_kd_inner_consistent' }, +{ amprocfamily => 'spgist/kd_point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '5', + amproc => 'spg_quad_leaf_consistent' }, +{ amprocfamily => 'spgist/text_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '1', amproc => 'spg_text_config' }, +{ amprocfamily => 'spgist/text_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '2', amproc => 'spg_text_choose' }, +{ amprocfamily => 'spgist/text_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '3', amproc => 'spg_text_picksplit' }, +{ amprocfamily => 'spgist/text_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '4', + amproc => 'spg_text_inner_consistent' }, +{ amprocfamily => 'spgist/text_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '5', + amproc => 'spg_text_leaf_consistent' }, +{ amprocfamily => 'spgist/box_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '1', amproc => 'spg_box_quad_config' }, +{ amprocfamily => 'spgist/box_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '2', amproc => 'spg_box_quad_choose' }, +{ amprocfamily => 'spgist/box_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '3', + amproc => 'spg_box_quad_picksplit' }, +{ amprocfamily => 'spgist/box_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '4', + amproc => 'spg_box_quad_inner_consistent' }, +{ amprocfamily => 'spgist/box_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '5', + amproc => 'spg_box_quad_leaf_consistent' }, +{ amprocfamily => 'spgist/poly_ops', amproclefttype => 'polygon', + amprocrighttype => 'polygon', amprocnum => '1', + amproc => 'spg_bbox_quad_config' }, +{ amprocfamily => 'spgist/poly_ops', amproclefttype => 'polygon', + amprocrighttype => 'polygon', amprocnum => '2', + amproc => 'spg_box_quad_choose' }, +{ amprocfamily => 'spgist/poly_ops', amproclefttype => 'polygon', + amprocrighttype => 'polygon', amprocnum => '3', + amproc => 'spg_box_quad_picksplit' }, +{ amprocfamily => 'spgist/poly_ops', amproclefttype => 'polygon', + amprocrighttype => 'polygon', amprocnum => '4', + amproc => 'spg_box_quad_inner_consistent' }, +{ amprocfamily => 'spgist/poly_ops', amproclefttype => 'polygon', + amprocrighttype => 'polygon', amprocnum => '5', + amproc => 'spg_box_quad_leaf_consistent' }, +{ amprocfamily => 'spgist/poly_ops', amproclefttype => 'polygon', + amprocrighttype => 'polygon', amprocnum => '6', + amproc => 'spg_poly_quad_compress' }, + +# BRIN opclasses + +# minmax bytea +{ amprocfamily => 'brin/bytea_minmax_ops', amproclefttype => 'bytea', + amprocrighttype => 'bytea', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/bytea_minmax_ops', amproclefttype => 'bytea', + amprocrighttype => 'bytea', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/bytea_minmax_ops', amproclefttype => 'bytea', + amprocrighttype => 'bytea', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/bytea_minmax_ops', amproclefttype => 'bytea', + amprocrighttype => 'bytea', amprocnum => '4', amproc => 'brin_minmax_union' }, + +# minmax "char" +{ amprocfamily => 'brin/char_minmax_ops', amproclefttype => 'char', + amprocrighttype => 'char', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/char_minmax_ops', amproclefttype => 'char', + amprocrighttype => 'char', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/char_minmax_ops', amproclefttype => 'char', + amprocrighttype => 'char', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/char_minmax_ops', amproclefttype => 'char', + amprocrighttype => 'char', amprocnum => '4', amproc => 'brin_minmax_union' }, + +# minmax name +{ amprocfamily => 'brin/name_minmax_ops', amproclefttype => 'name', + amprocrighttype => 'name', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/name_minmax_ops', amproclefttype => 'name', + amprocrighttype => 'name', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/name_minmax_ops', amproclefttype => 'name', + amprocrighttype => 'name', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/name_minmax_ops', amproclefttype => 'name', + amprocrighttype => 'name', amprocnum => '4', amproc => 'brin_minmax_union' }, + +# minmax integer: int2, int4, int8 +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int8', + amprocrighttype => 'int8', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int8', + amprocrighttype => 'int8', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int8', + amprocrighttype => 'int8', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int8', + amprocrighttype => 'int8', amprocnum => '4', amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int8', + amprocrighttype => 'int2', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int8', + amprocrighttype => 'int2', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int8', + amprocrighttype => 'int2', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int8', + amprocrighttype => 'int2', amprocnum => '4', amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int8', + amprocrighttype => 'int4', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int8', + amprocrighttype => 'int4', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int8', + amprocrighttype => 'int4', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int8', + amprocrighttype => 'int4', amprocnum => '4', amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int2', + amprocrighttype => 'int2', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int2', + amprocrighttype => 'int2', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int2', + amprocrighttype => 'int2', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int2', + amprocrighttype => 'int2', amprocnum => '4', amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int2', + amprocrighttype => 'int8', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int2', + amprocrighttype => 'int8', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int2', + amprocrighttype => 'int8', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int2', + amprocrighttype => 'int8', amprocnum => '4', amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int2', + amprocrighttype => 'int4', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int2', + amprocrighttype => 'int4', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int2', + amprocrighttype => 'int4', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int2', + amprocrighttype => 'int4', amprocnum => '4', amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int4', + amprocrighttype => 'int4', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int4', + amprocrighttype => 'int4', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int4', + amprocrighttype => 'int4', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int4', + amprocrighttype => 'int4', amprocnum => '4', amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int4', + amprocrighttype => 'int8', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int4', + amprocrighttype => 'int8', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int4', + amprocrighttype => 'int8', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int4', + amprocrighttype => 'int8', amprocnum => '4', amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int4', + amprocrighttype => 'int2', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int4', + amprocrighttype => 'int2', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int4', + amprocrighttype => 'int2', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/integer_minmax_ops', amproclefttype => 'int4', + amprocrighttype => 'int2', amprocnum => '4', amproc => 'brin_minmax_union' }, + +# minmax text +{ amprocfamily => 'brin/text_minmax_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/text_minmax_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/text_minmax_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/text_minmax_ops', amproclefttype => 'text', + amprocrighttype => 'text', amprocnum => '4', amproc => 'brin_minmax_union' }, + +# minmax oid +{ amprocfamily => 'brin/oid_minmax_ops', amproclefttype => 'oid', + amprocrighttype => 'oid', amprocnum => '1', amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/oid_minmax_ops', amproclefttype => 'oid', + amprocrighttype => 'oid', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/oid_minmax_ops', amproclefttype => 'oid', + amprocrighttype => 'oid', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/oid_minmax_ops', amproclefttype => 'oid', + amprocrighttype => 'oid', amprocnum => '4', amproc => 'brin_minmax_union' }, + +# minmax tid +{ amprocfamily => 'brin/tid_minmax_ops', amproclefttype => 'tid', + amprocrighttype => 'tid', amprocnum => '1', amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/tid_minmax_ops', amproclefttype => 'tid', + amprocrighttype => 'tid', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/tid_minmax_ops', amproclefttype => 'tid', + amprocrighttype => 'tid', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/tid_minmax_ops', amproclefttype => 'tid', + amprocrighttype => 'tid', amprocnum => '4', amproc => 'brin_minmax_union' }, + +# minmax float +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float4', + amprocrighttype => 'float4', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float4', + amprocrighttype => 'float4', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float4', + amprocrighttype => 'float4', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float4', + amprocrighttype => 'float4', amprocnum => '4', + amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float4', + amprocrighttype => 'float8', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float4', + amprocrighttype => 'float8', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float4', + amprocrighttype => 'float8', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float4', + amprocrighttype => 'float8', amprocnum => '4', + amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float8', + amprocrighttype => 'float8', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float8', + amprocrighttype => 'float8', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float8', + amprocrighttype => 'float8', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float8', + amprocrighttype => 'float8', amprocnum => '4', + amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float8', + amprocrighttype => 'float4', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float8', + amprocrighttype => 'float4', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float8', + amprocrighttype => 'float4', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/float_minmax_ops', amproclefttype => 'float8', + amprocrighttype => 'float4', amprocnum => '4', + amproc => 'brin_minmax_union' }, + +# minmax macaddr +{ amprocfamily => 'brin/macaddr_minmax_ops', amproclefttype => 'macaddr', + amprocrighttype => 'macaddr', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/macaddr_minmax_ops', amproclefttype => 'macaddr', + amprocrighttype => 'macaddr', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/macaddr_minmax_ops', amproclefttype => 'macaddr', + amprocrighttype => 'macaddr', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/macaddr_minmax_ops', amproclefttype => 'macaddr', + amprocrighttype => 'macaddr', amprocnum => '4', + amproc => 'brin_minmax_union' }, + +# minmax macaddr8 +{ amprocfamily => 'brin/macaddr8_minmax_ops', amproclefttype => 'macaddr8', + amprocrighttype => 'macaddr8', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/macaddr8_minmax_ops', amproclefttype => 'macaddr8', + amprocrighttype => 'macaddr8', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/macaddr8_minmax_ops', amproclefttype => 'macaddr8', + amprocrighttype => 'macaddr8', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/macaddr8_minmax_ops', amproclefttype => 'macaddr8', + amprocrighttype => 'macaddr8', amprocnum => '4', + amproc => 'brin_minmax_union' }, + +# minmax inet +{ amprocfamily => 'brin/network_minmax_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/network_minmax_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/network_minmax_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/network_minmax_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '4', amproc => 'brin_minmax_union' }, + +# inclusion inet +{ amprocfamily => 'brin/network_inclusion_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '1', + amproc => 'brin_inclusion_opcinfo' }, +{ amprocfamily => 'brin/network_inclusion_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '2', + amproc => 'brin_inclusion_add_value' }, +{ amprocfamily => 'brin/network_inclusion_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '3', + amproc => 'brin_inclusion_consistent' }, +{ amprocfamily => 'brin/network_inclusion_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '4', + amproc => 'brin_inclusion_union' }, +{ amprocfamily => 'brin/network_inclusion_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '11', amproc => 'inet_merge' }, +{ amprocfamily => 'brin/network_inclusion_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '12', amproc => 'inet_same_family' }, +{ amprocfamily => 'brin/network_inclusion_ops', amproclefttype => 'inet', + amprocrighttype => 'inet', amprocnum => '13', amproc => 'network_supeq' }, + +# minmax character +{ amprocfamily => 'brin/bpchar_minmax_ops', amproclefttype => 'bpchar', + amprocrighttype => 'bpchar', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/bpchar_minmax_ops', amproclefttype => 'bpchar', + amprocrighttype => 'bpchar', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/bpchar_minmax_ops', amproclefttype => 'bpchar', + amprocrighttype => 'bpchar', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/bpchar_minmax_ops', amproclefttype => 'bpchar', + amprocrighttype => 'bpchar', amprocnum => '4', + amproc => 'brin_minmax_union' }, + +# minmax time without time zone +{ amprocfamily => 'brin/time_minmax_ops', amproclefttype => 'time', + amprocrighttype => 'time', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/time_minmax_ops', amproclefttype => 'time', + amprocrighttype => 'time', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/time_minmax_ops', amproclefttype => 'time', + amprocrighttype => 'time', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/time_minmax_ops', amproclefttype => 'time', + amprocrighttype => 'time', amprocnum => '4', amproc => 'brin_minmax_union' }, + +# minmax datetime (date, timestamp, timestamptz) +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamp', + amprocrighttype => 'timestamp', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamp', + amprocrighttype => 'timestamp', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamp', + amprocrighttype => 'timestamp', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamp', + amprocrighttype => 'timestamp', amprocnum => '4', + amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamp', + amprocrighttype => 'timestamptz', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamp', + amprocrighttype => 'timestamptz', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamp', + amprocrighttype => 'timestamptz', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamp', + amprocrighttype => 'timestamptz', amprocnum => '4', + amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamp', + amprocrighttype => 'date', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamp', + amprocrighttype => 'date', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamp', + amprocrighttype => 'date', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamp', + amprocrighttype => 'date', amprocnum => '4', amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'timestamptz', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'timestamptz', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'timestamptz', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'timestamptz', amprocnum => '4', + amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'timestamp', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'timestamp', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'timestamp', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'timestamp', amprocnum => '4', + amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'date', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'date', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'date', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'timestamptz', + amprocrighttype => 'date', amprocnum => '4', amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'date', + amprocrighttype => 'date', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'date', + amprocrighttype => 'date', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'date', + amprocrighttype => 'date', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'date', + amprocrighttype => 'date', amprocnum => '4', amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'date', + amprocrighttype => 'timestamp', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'date', + amprocrighttype => 'timestamp', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'date', + amprocrighttype => 'timestamp', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'date', + amprocrighttype => 'timestamp', amprocnum => '4', + amproc => 'brin_minmax_union' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'date', + amprocrighttype => 'timestamptz', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'date', + amprocrighttype => 'timestamptz', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'date', + amprocrighttype => 'timestamptz', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/datetime_minmax_ops', amproclefttype => 'date', + amprocrighttype => 'timestamptz', amprocnum => '4', + amproc => 'brin_minmax_union' }, + +# minmax interval +{ amprocfamily => 'brin/interval_minmax_ops', amproclefttype => 'interval', + amprocrighttype => 'interval', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/interval_minmax_ops', amproclefttype => 'interval', + amprocrighttype => 'interval', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/interval_minmax_ops', amproclefttype => 'interval', + amprocrighttype => 'interval', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/interval_minmax_ops', amproclefttype => 'interval', + amprocrighttype => 'interval', amprocnum => '4', + amproc => 'brin_minmax_union' }, + +# minmax time with time zone +{ amprocfamily => 'brin/timetz_minmax_ops', amproclefttype => 'timetz', + amprocrighttype => 'timetz', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/timetz_minmax_ops', amproclefttype => 'timetz', + amprocrighttype => 'timetz', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/timetz_minmax_ops', amproclefttype => 'timetz', + amprocrighttype => 'timetz', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/timetz_minmax_ops', amproclefttype => 'timetz', + amprocrighttype => 'timetz', amprocnum => '4', + amproc => 'brin_minmax_union' }, + +# minmax bit +{ amprocfamily => 'brin/bit_minmax_ops', amproclefttype => 'bit', + amprocrighttype => 'bit', amprocnum => '1', amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/bit_minmax_ops', amproclefttype => 'bit', + amprocrighttype => 'bit', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/bit_minmax_ops', amproclefttype => 'bit', + amprocrighttype => 'bit', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/bit_minmax_ops', amproclefttype => 'bit', + amprocrighttype => 'bit', amprocnum => '4', amproc => 'brin_minmax_union' }, + +# minmax bit varying +{ amprocfamily => 'brin/varbit_minmax_ops', amproclefttype => 'varbit', + amprocrighttype => 'varbit', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/varbit_minmax_ops', amproclefttype => 'varbit', + amprocrighttype => 'varbit', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/varbit_minmax_ops', amproclefttype => 'varbit', + amprocrighttype => 'varbit', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/varbit_minmax_ops', amproclefttype => 'varbit', + amprocrighttype => 'varbit', amprocnum => '4', + amproc => 'brin_minmax_union' }, + +# minmax numeric +{ amprocfamily => 'brin/numeric_minmax_ops', amproclefttype => 'numeric', + amprocrighttype => 'numeric', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/numeric_minmax_ops', amproclefttype => 'numeric', + amprocrighttype => 'numeric', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/numeric_minmax_ops', amproclefttype => 'numeric', + amprocrighttype => 'numeric', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/numeric_minmax_ops', amproclefttype => 'numeric', + amprocrighttype => 'numeric', amprocnum => '4', + amproc => 'brin_minmax_union' }, + +# minmax uuid +{ amprocfamily => 'brin/uuid_minmax_ops', amproclefttype => 'uuid', + amprocrighttype => 'uuid', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/uuid_minmax_ops', amproclefttype => 'uuid', + amprocrighttype => 'uuid', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/uuid_minmax_ops', amproclefttype => 'uuid', + amprocrighttype => 'uuid', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/uuid_minmax_ops', amproclefttype => 'uuid', + amprocrighttype => 'uuid', amprocnum => '4', amproc => 'brin_minmax_union' }, + +# inclusion range types +{ amprocfamily => 'brin/range_inclusion_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '1', + amproc => 'brin_inclusion_opcinfo' }, +{ amprocfamily => 'brin/range_inclusion_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '2', + amproc => 'brin_inclusion_add_value' }, +{ amprocfamily => 'brin/range_inclusion_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '3', + amproc => 'brin_inclusion_consistent' }, +{ amprocfamily => 'brin/range_inclusion_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '4', + amproc => 'brin_inclusion_union' }, +{ amprocfamily => 'brin/range_inclusion_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '11', amproc => 'range_merge' }, +{ amprocfamily => 'brin/range_inclusion_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '13', + amproc => 'range_contains' }, +{ amprocfamily => 'brin/range_inclusion_ops', amproclefttype => 'anyrange', + amprocrighttype => 'anyrange', amprocnum => '14', amproc => 'isempty' }, + +# minmax pg_lsn +{ amprocfamily => 'brin/pg_lsn_minmax_ops', amproclefttype => 'pg_lsn', + amprocrighttype => 'pg_lsn', amprocnum => '1', + amproc => 'brin_minmax_opcinfo' }, +{ amprocfamily => 'brin/pg_lsn_minmax_ops', amproclefttype => 'pg_lsn', + amprocrighttype => 'pg_lsn', amprocnum => '2', + amproc => 'brin_minmax_add_value' }, +{ amprocfamily => 'brin/pg_lsn_minmax_ops', amproclefttype => 'pg_lsn', + amprocrighttype => 'pg_lsn', amprocnum => '3', + amproc => 'brin_minmax_consistent' }, +{ amprocfamily => 'brin/pg_lsn_minmax_ops', amproclefttype => 'pg_lsn', + amprocrighttype => 'pg_lsn', amprocnum => '4', + amproc => 'brin_minmax_union' }, + +# inclusion box +{ amprocfamily => 'brin/box_inclusion_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '1', + amproc => 'brin_inclusion_opcinfo' }, +{ amprocfamily => 'brin/box_inclusion_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '2', + amproc => 'brin_inclusion_add_value' }, +{ amprocfamily => 'brin/box_inclusion_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '3', + amproc => 'brin_inclusion_consistent' }, +{ amprocfamily => 'brin/box_inclusion_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '4', + amproc => 'brin_inclusion_union' }, +{ amprocfamily => 'brin/box_inclusion_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '11', amproc => 'bound_box' }, +{ amprocfamily => 'brin/box_inclusion_ops', amproclefttype => 'box', + amprocrighttype => 'box', amprocnum => '13', amproc => 'box_contain' }, + +] diff --git a/src/include/catalog/pg_amproc.h b/src/include/catalog/pg_amproc.h index 7d245b1271..c34c3faa8f 100644 --- a/src/include/catalog/pg_amproc.h +++ b/src/include/catalog/pg_amproc.h @@ -1,8 +1,7 @@ /*------------------------------------------------------------------------- * * pg_amproc.h - * definition of the system "amproc" relation (pg_amproc) - * along with the relation's initial contents. + * definition of the "access method procedure" system catalog (pg_amproc) * * The amproc table identifies support procedures associated with index * operator families and classes. These procedures can't be listed in pg_amop @@ -19,14 +18,14 @@ * some don't pay attention to non-default functions at all. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_amproc.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -34,21 +33,29 @@ #define PG_AMPROC_H #include "catalog/genbki.h" +#include "catalog/pg_amproc_d.h" /* ---------------- * pg_amproc definition. cpp turns this into * typedef struct FormData_pg_amproc * ---------------- */ -#define AccessMethodProcedureRelationId 2603 - -CATALOG(pg_amproc,2603) +CATALOG(pg_amproc,2603,AccessMethodProcedureRelationId) { - Oid amprocfamily; /* the index opfamily this entry is for */ - Oid amproclefttype; /* procedure's left input data type */ - Oid amprocrighttype; /* procedure's right input data type */ - int16 amprocnum; /* support procedure index */ - regproc amproc; /* OID of the proc */ + /* the index opfamily this entry is for */ + Oid amprocfamily BKI_LOOKUP(pg_opfamily); + + /* procedure's left input data type */ + Oid amproclefttype BKI_LOOKUP(pg_type); + + /* procedure's right input data type */ + Oid amprocrighttype BKI_LOOKUP(pg_type); + + /* support procedure index */ + int16 amprocnum; + + /* OID of the proc */ + regproc amproc BKI_LOOKUP(pg_proc); } FormData_pg_amproc; /* ---------------- @@ -58,482 +65,4 @@ CATALOG(pg_amproc,2603) */ typedef FormData_pg_amproc *Form_pg_amproc; -/* ---------------- - * compiler constants for pg_amproc - * ---------------- - */ -#define Natts_pg_amproc 5 -#define Anum_pg_amproc_amprocfamily 1 -#define Anum_pg_amproc_amproclefttype 2 -#define Anum_pg_amproc_amprocrighttype 3 -#define Anum_pg_amproc_amprocnum 4 -#define Anum_pg_amproc_amproc 5 - -/* ---------------- - * initial contents of pg_amproc - * ---------------- - */ - -/* btree */ -DATA(insert ( 397 2277 2277 1 382 )); -DATA(insert ( 421 702 702 1 357 )); -DATA(insert ( 423 1560 1560 1 1596 )); -DATA(insert ( 424 16 16 1 1693 )); -DATA(insert ( 426 1042 1042 1 1078 )); -DATA(insert ( 426 1042 1042 2 3328 )); -DATA(insert ( 428 17 17 1 1954 )); -DATA(insert ( 428 17 17 2 3331 )); -DATA(insert ( 429 18 18 1 358 )); -DATA(insert ( 434 1082 1082 1 1092 )); -DATA(insert ( 434 1082 1082 2 3136 )); -DATA(insert ( 434 1082 1114 1 2344 )); -DATA(insert ( 434 1082 1184 1 2357 )); -DATA(insert ( 434 1114 1114 1 2045 )); -DATA(insert ( 434 1114 1114 2 3137 )); -DATA(insert ( 434 1114 1082 1 2370 )); -DATA(insert ( 434 1114 1184 1 2526 )); -DATA(insert ( 434 1184 1184 1 1314 )); -DATA(insert ( 434 1184 1184 2 3137 )); -DATA(insert ( 434 1184 1082 1 2383 )); -DATA(insert ( 434 1184 1114 1 2533 )); -DATA(insert ( 1970 700 700 1 354 )); -DATA(insert ( 1970 700 700 2 3132 )); -DATA(insert ( 1970 700 701 1 2194 )); -DATA(insert ( 1970 701 701 1 355 )); -DATA(insert ( 1970 701 701 2 3133 )); -DATA(insert ( 1970 701 700 1 2195 )); -DATA(insert ( 1974 869 869 1 926 )); -DATA(insert ( 1976 21 21 1 350 )); -DATA(insert ( 1976 21 21 2 3129 )); -DATA(insert ( 1976 21 23 1 2190 )); -DATA(insert ( 1976 21 20 1 2192 )); -DATA(insert ( 1976 23 23 1 351 )); -DATA(insert ( 1976 23 23 2 3130 )); -DATA(insert ( 1976 23 20 1 2188 )); -DATA(insert ( 1976 23 21 1 2191 )); -DATA(insert ( 1976 20 20 1 842 )); -DATA(insert ( 1976 20 20 2 3131 )); -DATA(insert ( 1976 20 23 1 2189 )); -DATA(insert ( 1976 20 21 1 2193 )); -DATA(insert ( 1982 1186 1186 1 1315 )); -DATA(insert ( 1984 829 829 1 836 )); -DATA(insert ( 1984 829 829 2 3359 )); -DATA(insert ( 1986 19 19 1 359 )); -DATA(insert ( 1986 19 19 2 3135 )); -DATA(insert ( 1988 1700 1700 1 1769 )); -DATA(insert ( 1988 1700 1700 2 3283 )); -DATA(insert ( 1989 26 26 1 356 )); -DATA(insert ( 1989 26 26 2 3134 )); -DATA(insert ( 1991 30 30 1 404 )); -DATA(insert ( 1994 25 25 1 360 )); -DATA(insert ( 1994 25 25 2 3255 )); -DATA(insert ( 1996 1083 1083 1 1107 )); -DATA(insert ( 2000 1266 1266 1 1358 )); -DATA(insert ( 2002 1562 1562 1 1672 )); -DATA(insert ( 2095 25 25 1 2166 )); -DATA(insert ( 2095 25 25 2 3332 )); -DATA(insert ( 2097 1042 1042 1 2180 )); -DATA(insert ( 2097 1042 1042 2 3333 )); -DATA(insert ( 2099 790 790 1 377 )); -DATA(insert ( 2233 703 703 1 380 )); -DATA(insert ( 2234 704 704 1 381 )); -DATA(insert ( 2789 27 27 1 2794 )); -DATA(insert ( 2968 2950 2950 1 2960 )); -DATA(insert ( 2968 2950 2950 2 3300 )); -DATA(insert ( 2994 2249 2249 1 2987 )); -DATA(insert ( 3194 2249 2249 1 3187 )); -DATA(insert ( 3253 3220 3220 1 3251 )); -DATA(insert ( 3371 774 774 1 4119 )); -DATA(insert ( 3522 3500 3500 1 3514 )); -DATA(insert ( 3626 3614 3614 1 3622 )); -DATA(insert ( 3683 3615 3615 1 3668 )); -DATA(insert ( 3901 3831 3831 1 3870 )); -DATA(insert ( 4033 3802 3802 1 4044 )); - - -/* hash */ -DATA(insert ( 427 1042 1042 1 1080 )); -DATA(insert ( 431 18 18 1 454 )); -DATA(insert ( 435 1082 1082 1 450 )); -DATA(insert ( 627 2277 2277 1 626 )); -DATA(insert ( 1971 700 700 1 451 )); -DATA(insert ( 1971 701 701 1 452 )); -DATA(insert ( 1975 869 869 1 422 )); -DATA(insert ( 1977 21 21 1 449 )); -DATA(insert ( 1977 23 23 1 450 )); -DATA(insert ( 1977 20 20 1 949 )); -DATA(insert ( 1983 1186 1186 1 1697 )); -DATA(insert ( 1985 829 829 1 399 )); -DATA(insert ( 1987 19 19 1 455 )); -DATA(insert ( 1990 26 26 1 453 )); -DATA(insert ( 1992 30 30 1 457 )); -DATA(insert ( 1995 25 25 1 400 )); -DATA(insert ( 1997 1083 1083 1 1688 )); -DATA(insert ( 1998 1700 1700 1 432 )); -DATA(insert ( 1999 1184 1184 1 2039 )); -DATA(insert ( 2001 1266 1266 1 1696 )); -DATA(insert ( 2040 1114 1114 1 2039 )); -DATA(insert ( 2222 16 16 1 454 )); -DATA(insert ( 2223 17 17 1 456 )); -DATA(insert ( 2225 28 28 1 450 )); -DATA(insert ( 2226 29 29 1 450 )); -DATA(insert ( 2227 702 702 1 450 )); -DATA(insert ( 2228 703 703 1 450 )); -DATA(insert ( 2229 25 25 1 400 )); -DATA(insert ( 2231 1042 1042 1 1080 )); -DATA(insert ( 2235 1033 1033 1 329 )); -DATA(insert ( 2969 2950 2950 1 2963 )); -DATA(insert ( 3254 3220 3220 1 3252 )); -DATA(insert ( 3372 774 774 1 328 )); -DATA(insert ( 3523 3500 3500 1 3515 )); -DATA(insert ( 3903 3831 3831 1 3902 )); -DATA(insert ( 4034 3802 3802 1 4045 )); - - -/* gist */ -DATA(insert ( 1029 600 600 1 2179 )); -DATA(insert ( 1029 600 600 2 2583 )); -DATA(insert ( 1029 600 600 3 1030 )); -DATA(insert ( 1029 600 600 4 2580 )); -DATA(insert ( 1029 600 600 5 2581 )); -DATA(insert ( 1029 600 600 6 2582 )); -DATA(insert ( 1029 600 600 7 2584 )); -DATA(insert ( 1029 600 600 8 3064 )); -DATA(insert ( 1029 600 600 9 3282 )); -DATA(insert ( 2593 603 603 1 2578 )); -DATA(insert ( 2593 603 603 2 2583 )); -DATA(insert ( 2593 603 603 3 2579 )); -DATA(insert ( 2593 603 603 4 2580 )); -DATA(insert ( 2593 603 603 5 2581 )); -DATA(insert ( 2593 603 603 6 2582 )); -DATA(insert ( 2593 603 603 7 2584 )); -DATA(insert ( 2593 603 603 9 3281 )); -DATA(insert ( 2594 604 604 1 2585 )); -DATA(insert ( 2594 604 604 2 2583 )); -DATA(insert ( 2594 604 604 3 2586 )); -DATA(insert ( 2594 604 604 4 2580 )); -DATA(insert ( 2594 604 604 5 2581 )); -DATA(insert ( 2594 604 604 6 2582 )); -DATA(insert ( 2594 604 604 7 2584 )); -DATA(insert ( 2594 604 604 8 3288 )); -DATA(insert ( 2595 718 718 1 2591 )); -DATA(insert ( 2595 718 718 2 2583 )); -DATA(insert ( 2595 718 718 3 2592 )); -DATA(insert ( 2595 718 718 4 2580 )); -DATA(insert ( 2595 718 718 5 2581 )); -DATA(insert ( 2595 718 718 6 2582 )); -DATA(insert ( 2595 718 718 7 2584 )); -DATA(insert ( 2595 718 718 8 3280 )); -DATA(insert ( 3655 3614 3614 1 3654 )); -DATA(insert ( 3655 3614 3614 2 3651 )); -DATA(insert ( 3655 3614 3614 3 3648 )); -DATA(insert ( 3655 3614 3614 4 3649 )); -DATA(insert ( 3655 3614 3614 5 3653 )); -DATA(insert ( 3655 3614 3614 6 3650 )); -DATA(insert ( 3655 3614 3614 7 3652 )); -DATA(insert ( 3702 3615 3615 1 3701 )); -DATA(insert ( 3702 3615 3615 2 3698 )); -DATA(insert ( 3702 3615 3615 3 3695 )); -DATA(insert ( 3702 3615 3615 4 3696 )); -DATA(insert ( 3702 3615 3615 5 3700 )); -DATA(insert ( 3702 3615 3615 6 3697 )); -DATA(insert ( 3702 3615 3615 7 3699 )); -DATA(insert ( 3919 3831 3831 1 3875 )); -DATA(insert ( 3919 3831 3831 2 3876 )); -DATA(insert ( 3919 3831 3831 3 3877 )); -DATA(insert ( 3919 3831 3831 4 3878 )); -DATA(insert ( 3919 3831 3831 5 3879 )); -DATA(insert ( 3919 3831 3831 6 3880 )); -DATA(insert ( 3919 3831 3831 7 3881 )); -DATA(insert ( 3919 3831 3831 9 3996 )); -DATA(insert ( 3550 869 869 1 3553 )); -DATA(insert ( 3550 869 869 2 3554 )); -DATA(insert ( 3550 869 869 3 3555 )); -DATA(insert ( 3550 869 869 4 3556 )); -DATA(insert ( 3550 869 869 5 3557 )); -DATA(insert ( 3550 869 869 6 3558 )); -DATA(insert ( 3550 869 869 7 3559 )); -DATA(insert ( 3550 869 869 9 3573 )); - - -/* gin */ -DATA(insert ( 2745 2277 2277 2 2743 )); -DATA(insert ( 2745 2277 2277 3 2774 )); -DATA(insert ( 2745 2277 2277 4 2744 )); -DATA(insert ( 2745 2277 2277 6 3920 )); -DATA(insert ( 3659 3614 3614 1 3724 )); -DATA(insert ( 3659 3614 3614 2 3656 )); -DATA(insert ( 3659 3614 3614 3 3657 )); -DATA(insert ( 3659 3614 3614 4 3658 )); -DATA(insert ( 3659 3614 3614 5 2700 )); -DATA(insert ( 3659 3614 3614 6 3921 )); -DATA(insert ( 4036 3802 3802 1 3480 )); -DATA(insert ( 4036 3802 3802 2 3482 )); -DATA(insert ( 4036 3802 3802 3 3483 )); -DATA(insert ( 4036 3802 3802 4 3484 )); -DATA(insert ( 4036 3802 3802 6 3488 )); -DATA(insert ( 4037 3802 3802 1 351 )); -DATA(insert ( 4037 3802 3802 2 3485 )); -DATA(insert ( 4037 3802 3802 3 3486 )); -DATA(insert ( 4037 3802 3802 4 3487 )); -DATA(insert ( 4037 3802 3802 6 3489 )); - -/* sp-gist */ -DATA(insert ( 3474 3831 3831 1 3469 )); -DATA(insert ( 3474 3831 3831 2 3470 )); -DATA(insert ( 3474 3831 3831 3 3471 )); -DATA(insert ( 3474 3831 3831 4 3472 )); -DATA(insert ( 3474 3831 3831 5 3473 )); -DATA(insert ( 3794 869 869 1 3795 )); -DATA(insert ( 3794 869 869 2 3796 )); -DATA(insert ( 3794 869 869 3 3797 )); -DATA(insert ( 3794 869 869 4 3798 )); -DATA(insert ( 3794 869 869 5 3799 )); -DATA(insert ( 4015 600 600 1 4018 )); -DATA(insert ( 4015 600 600 2 4019 )); -DATA(insert ( 4015 600 600 3 4020 )); -DATA(insert ( 4015 600 600 4 4021 )); -DATA(insert ( 4015 600 600 5 4022 )); -DATA(insert ( 4016 600 600 1 4023 )); -DATA(insert ( 4016 600 600 2 4024 )); -DATA(insert ( 4016 600 600 3 4025 )); -DATA(insert ( 4016 600 600 4 4026 )); -DATA(insert ( 4016 600 600 5 4022 )); -DATA(insert ( 4017 25 25 1 4027 )); -DATA(insert ( 4017 25 25 2 4028 )); -DATA(insert ( 4017 25 25 3 4029 )); -DATA(insert ( 4017 25 25 4 4030 )); -DATA(insert ( 4017 25 25 5 4031 )); -DATA(insert ( 5000 603 603 1 5012 )); -DATA(insert ( 5000 603 603 2 5013 )); -DATA(insert ( 5000 603 603 3 5014 )); -DATA(insert ( 5000 603 603 4 5015 )); -DATA(insert ( 5000 603 603 5 5016 )); - -/* BRIN opclasses */ -/* minmax bytea */ -DATA(insert ( 4064 17 17 1 3383 )); -DATA(insert ( 4064 17 17 2 3384 )); -DATA(insert ( 4064 17 17 3 3385 )); -DATA(insert ( 4064 17 17 4 3386 )); -/* minmax "char" */ -DATA(insert ( 4062 18 18 1 3383 )); -DATA(insert ( 4062 18 18 2 3384 )); -DATA(insert ( 4062 18 18 3 3385 )); -DATA(insert ( 4062 18 18 4 3386 )); -/* minmax name */ -DATA(insert ( 4065 19 19 1 3383 )); -DATA(insert ( 4065 19 19 2 3384 )); -DATA(insert ( 4065 19 19 3 3385 )); -DATA(insert ( 4065 19 19 4 3386 )); -/* minmax integer: int2, int4, int8 */ -DATA(insert ( 4054 20 20 1 3383 )); -DATA(insert ( 4054 20 20 2 3384 )); -DATA(insert ( 4054 20 20 3 3385 )); -DATA(insert ( 4054 20 20 4 3386 )); -DATA(insert ( 4054 20 21 1 3383 )); -DATA(insert ( 4054 20 21 2 3384 )); -DATA(insert ( 4054 20 21 3 3385 )); -DATA(insert ( 4054 20 21 4 3386 )); -DATA(insert ( 4054 20 23 1 3383 )); -DATA(insert ( 4054 20 23 2 3384 )); -DATA(insert ( 4054 20 23 3 3385 )); -DATA(insert ( 4054 20 23 4 3386 )); - -DATA(insert ( 4054 21 21 1 3383 )); -DATA(insert ( 4054 21 21 2 3384 )); -DATA(insert ( 4054 21 21 3 3385 )); -DATA(insert ( 4054 21 21 4 3386 )); -DATA(insert ( 4054 21 20 1 3383 )); -DATA(insert ( 4054 21 20 2 3384 )); -DATA(insert ( 4054 21 20 3 3385 )); -DATA(insert ( 4054 21 20 4 3386 )); -DATA(insert ( 4054 21 23 1 3383 )); -DATA(insert ( 4054 21 23 2 3384 )); -DATA(insert ( 4054 21 23 3 3385 )); -DATA(insert ( 4054 21 23 4 3386 )); - -DATA(insert ( 4054 23 23 1 3383 )); -DATA(insert ( 4054 23 23 2 3384 )); -DATA(insert ( 4054 23 23 3 3385 )); -DATA(insert ( 4054 23 23 4 3386 )); -DATA(insert ( 4054 23 20 1 3383 )); -DATA(insert ( 4054 23 20 2 3384 )); -DATA(insert ( 4054 23 20 3 3385 )); -DATA(insert ( 4054 23 20 4 3386 )); -DATA(insert ( 4054 23 21 1 3383 )); -DATA(insert ( 4054 23 21 2 3384 )); -DATA(insert ( 4054 23 21 3 3385 )); -DATA(insert ( 4054 23 21 4 3386 )); - -/* minmax text */ -DATA(insert ( 4056 25 25 1 3383 )); -DATA(insert ( 4056 25 25 2 3384 )); -DATA(insert ( 4056 25 25 3 3385 )); -DATA(insert ( 4056 25 25 4 3386 )); -/* minmax oid */ -DATA(insert ( 4068 26 26 1 3383 )); -DATA(insert ( 4068 26 26 2 3384 )); -DATA(insert ( 4068 26 26 3 3385 )); -DATA(insert ( 4068 26 26 4 3386 )); -/* minmax tid */ -DATA(insert ( 4069 27 27 1 3383 )); -DATA(insert ( 4069 27 27 2 3384 )); -DATA(insert ( 4069 27 27 3 3385 )); -DATA(insert ( 4069 27 27 4 3386 )); -/* minmax float */ -DATA(insert ( 4070 700 700 1 3383 )); -DATA(insert ( 4070 700 700 2 3384 )); -DATA(insert ( 4070 700 700 3 3385 )); -DATA(insert ( 4070 700 700 4 3386 )); - -DATA(insert ( 4070 700 701 1 3383 )); -DATA(insert ( 4070 700 701 2 3384 )); -DATA(insert ( 4070 700 701 3 3385 )); -DATA(insert ( 4070 700 701 4 3386 )); - -DATA(insert ( 4070 701 701 1 3383 )); -DATA(insert ( 4070 701 701 2 3384 )); -DATA(insert ( 4070 701 701 3 3385 )); -DATA(insert ( 4070 701 701 4 3386 )); - -DATA(insert ( 4070 701 700 1 3383 )); -DATA(insert ( 4070 701 700 2 3384 )); -DATA(insert ( 4070 701 700 3 3385 )); -DATA(insert ( 4070 701 700 4 3386 )); - -/* minmax abstime */ -DATA(insert ( 4072 702 702 1 3383 )); -DATA(insert ( 4072 702 702 2 3384 )); -DATA(insert ( 4072 702 702 3 3385 )); -DATA(insert ( 4072 702 702 4 3386 )); -/* minmax reltime */ -DATA(insert ( 4073 703 703 1 3383 )); -DATA(insert ( 4073 703 703 2 3384 )); -DATA(insert ( 4073 703 703 3 3385 )); -DATA(insert ( 4073 703 703 4 3386 )); -/* minmax macaddr */ -DATA(insert ( 4074 829 829 1 3383 )); -DATA(insert ( 4074 829 829 2 3384 )); -DATA(insert ( 4074 829 829 3 3385 )); -DATA(insert ( 4074 829 829 4 3386 )); -/* minmax macaddr8 */ -DATA(insert ( 4109 774 774 1 3383 )); -DATA(insert ( 4109 774 774 2 3384 )); -DATA(insert ( 4109 774 774 3 3385 )); -DATA(insert ( 4109 774 774 4 3386 )); -/* minmax inet */ -DATA(insert ( 4075 869 869 1 3383 )); -DATA(insert ( 4075 869 869 2 3384 )); -DATA(insert ( 4075 869 869 3 3385 )); -DATA(insert ( 4075 869 869 4 3386 )); -/* inclusion inet */ -DATA(insert ( 4102 869 869 1 4105 )); -DATA(insert ( 4102 869 869 2 4106 )); -DATA(insert ( 4102 869 869 3 4107 )); -DATA(insert ( 4102 869 869 4 4108 )); -DATA(insert ( 4102 869 869 11 4063 )); -DATA(insert ( 4102 869 869 12 4071 )); -DATA(insert ( 4102 869 869 13 930 )); -/* minmax character */ -DATA(insert ( 4076 1042 1042 1 3383 )); -DATA(insert ( 4076 1042 1042 2 3384 )); -DATA(insert ( 4076 1042 1042 3 3385 )); -DATA(insert ( 4076 1042 1042 4 3386 )); -/* minmax time without time zone */ -DATA(insert ( 4077 1083 1083 1 3383 )); -DATA(insert ( 4077 1083 1083 2 3384 )); -DATA(insert ( 4077 1083 1083 3 3385 )); -DATA(insert ( 4077 1083 1083 4 3386 )); -/* minmax datetime (date, timestamp, timestamptz) */ -DATA(insert ( 4059 1114 1114 1 3383 )); -DATA(insert ( 4059 1114 1114 2 3384 )); -DATA(insert ( 4059 1114 1114 3 3385 )); -DATA(insert ( 4059 1114 1114 4 3386 )); -DATA(insert ( 4059 1114 1184 1 3383 )); -DATA(insert ( 4059 1114 1184 2 3384 )); -DATA(insert ( 4059 1114 1184 3 3385 )); -DATA(insert ( 4059 1114 1184 4 3386 )); -DATA(insert ( 4059 1114 1082 1 3383 )); -DATA(insert ( 4059 1114 1082 2 3384 )); -DATA(insert ( 4059 1114 1082 3 3385 )); -DATA(insert ( 4059 1114 1082 4 3386 )); - -DATA(insert ( 4059 1184 1184 1 3383 )); -DATA(insert ( 4059 1184 1184 2 3384 )); -DATA(insert ( 4059 1184 1184 3 3385 )); -DATA(insert ( 4059 1184 1184 4 3386 )); -DATA(insert ( 4059 1184 1114 1 3383 )); -DATA(insert ( 4059 1184 1114 2 3384 )); -DATA(insert ( 4059 1184 1114 3 3385 )); -DATA(insert ( 4059 1184 1114 4 3386 )); -DATA(insert ( 4059 1184 1082 1 3383 )); -DATA(insert ( 4059 1184 1082 2 3384 )); -DATA(insert ( 4059 1184 1082 3 3385 )); -DATA(insert ( 4059 1184 1082 4 3386 )); - -DATA(insert ( 4059 1082 1082 1 3383 )); -DATA(insert ( 4059 1082 1082 2 3384 )); -DATA(insert ( 4059 1082 1082 3 3385 )); -DATA(insert ( 4059 1082 1082 4 3386 )); -DATA(insert ( 4059 1082 1114 1 3383 )); -DATA(insert ( 4059 1082 1114 2 3384 )); -DATA(insert ( 4059 1082 1114 3 3385 )); -DATA(insert ( 4059 1082 1114 4 3386 )); -DATA(insert ( 4059 1082 1184 1 3383 )); -DATA(insert ( 4059 1082 1184 2 3384 )); -DATA(insert ( 4059 1082 1184 3 3385 )); -DATA(insert ( 4059 1082 1184 4 3386 )); - -/* minmax interval */ -DATA(insert ( 4078 1186 1186 1 3383 )); -DATA(insert ( 4078 1186 1186 2 3384 )); -DATA(insert ( 4078 1186 1186 3 3385 )); -DATA(insert ( 4078 1186 1186 4 3386 )); -/* minmax time with time zone */ -DATA(insert ( 4058 1266 1266 1 3383 )); -DATA(insert ( 4058 1266 1266 2 3384 )); -DATA(insert ( 4058 1266 1266 3 3385 )); -DATA(insert ( 4058 1266 1266 4 3386 )); -/* minmax bit */ -DATA(insert ( 4079 1560 1560 1 3383 )); -DATA(insert ( 4079 1560 1560 2 3384 )); -DATA(insert ( 4079 1560 1560 3 3385 )); -DATA(insert ( 4079 1560 1560 4 3386 )); -/* minmax bit varying */ -DATA(insert ( 4080 1562 1562 1 3383 )); -DATA(insert ( 4080 1562 1562 2 3384 )); -DATA(insert ( 4080 1562 1562 3 3385 )); -DATA(insert ( 4080 1562 1562 4 3386 )); -/* minmax numeric */ -DATA(insert ( 4055 1700 1700 1 3383 )); -DATA(insert ( 4055 1700 1700 2 3384 )); -DATA(insert ( 4055 1700 1700 3 3385 )); -DATA(insert ( 4055 1700 1700 4 3386 )); -/* minmax uuid */ -DATA(insert ( 4081 2950 2950 1 3383 )); -DATA(insert ( 4081 2950 2950 2 3384 )); -DATA(insert ( 4081 2950 2950 3 3385 )); -DATA(insert ( 4081 2950 2950 4 3386 )); -/* inclusion range types */ -DATA(insert ( 4103 3831 3831 1 4105 )); -DATA(insert ( 4103 3831 3831 2 4106 )); -DATA(insert ( 4103 3831 3831 3 4107 )); -DATA(insert ( 4103 3831 3831 4 4108 )); -DATA(insert ( 4103 3831 3831 11 4057 )); -DATA(insert ( 4103 3831 3831 13 3859 )); -DATA(insert ( 4103 3831 3831 14 3850 )); -/* minmax pg_lsn */ -DATA(insert ( 4082 3220 3220 1 3383 )); -DATA(insert ( 4082 3220 3220 2 3384 )); -DATA(insert ( 4082 3220 3220 3 3385 )); -DATA(insert ( 4082 3220 3220 4 3386 )); -/* inclusion box */ -DATA(insert ( 4104 603 603 1 4105 )); -DATA(insert ( 4104 603 603 2 4106 )); -DATA(insert ( 4104 603 603 3 4107 )); -DATA(insert ( 4104 603 603 4 4108 )); -DATA(insert ( 4104 603 603 11 4067 )); -DATA(insert ( 4104 603 603 13 187 )); - #endif /* PG_AMPROC_H */ diff --git a/src/include/catalog/pg_attrdef.h b/src/include/catalog/pg_attrdef.h index b877f42a2d..a9a2351efd 100644 --- a/src/include/catalog/pg_attrdef.h +++ b/src/include/catalog/pg_attrdef.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_attrdef.h - * definition of the system "attribute defaults" relation (pg_attrdef) - * along with the relation's initial contents. + * definition of the "attribute defaults" system catalog (pg_attrdef) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_attrdef.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,22 +19,20 @@ #define PG_ATTRDEF_H #include "catalog/genbki.h" +#include "catalog/pg_attrdef_d.h" /* ---------------- * pg_attrdef definition. cpp turns this into * typedef struct FormData_pg_attrdef * ---------------- */ -#define AttrDefaultRelationId 2604 - -CATALOG(pg_attrdef,2604) +CATALOG(pg_attrdef,2604,AttrDefaultRelationId) { Oid adrelid; /* OID of table containing attribute */ int16 adnum; /* attnum of attribute */ #ifdef CATALOG_VARLEN /* variable-length fields start here */ - pg_node_tree adbin; /* nodeToString representation of default */ - text adsrc; /* human-readable representation of default */ + pg_node_tree adbin BKI_FORCE_NOT_NULL; /* nodeToString representation of default */ #endif } FormData_pg_attrdef; @@ -46,14 +43,4 @@ CATALOG(pg_attrdef,2604) */ typedef FormData_pg_attrdef *Form_pg_attrdef; -/* ---------------- - * compiler constants for pg_attrdef - * ---------------- - */ -#define Natts_pg_attrdef 4 -#define Anum_pg_attrdef_adrelid 1 -#define Anum_pg_attrdef_adnum 2 -#define Anum_pg_attrdef_adbin 3 -#define Anum_pg_attrdef_adsrc 4 - #endif /* PG_ATTRDEF_H */ diff --git a/src/include/catalog/pg_attribute.h b/src/include/catalog/pg_attribute.h index bcf28e8f04..dc36753ede 100644 --- a/src/include/catalog/pg_attribute.h +++ b/src/include/catalog/pg_attribute.h @@ -1,18 +1,21 @@ /*------------------------------------------------------------------------- * * pg_attribute.h - * definition of the system "attribute" relation (pg_attribute) - * along with the relation's initial contents. + * definition of the "attribute" system catalog (pg_attribute) + * + * The initial contents of pg_attribute are generated at compile time by + * genbki.pl, so there is no pg_attribute.dat file. Only "bootstrapped" + * relations need be included. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_attribute.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,6 +23,7 @@ #define PG_ATTRIBUTE_H #include "catalog/genbki.h" +#include "catalog/pg_attribute_d.h" /* ---------------- * pg_attribute definition. cpp turns this into @@ -30,10 +34,7 @@ * You may need to change catalog/genbki.pl as well. * ---------------- */ -#define AttributeRelationId 1249 -#define AttributeRelation_Rowtype_Id 75 - -CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75) BKI_SCHEMA_MACRO +CATALOG(pg_attribute,1249,AttributeRelationId) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75,AttributeRelation_Rowtype_Id) BKI_SCHEMA_MACRO { Oid attrelid; /* OID of relation containing this attribute */ NameData attname; /* name of attribute */ @@ -54,7 +55,7 @@ CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75) BK * that no value has been explicitly set for this column, so ANALYZE * should use the default setting. */ - int32 attstattarget; + int32 attstattarget BKI_DEFAULT(-1); /* * attlen is a copy of the typlen field from pg_type for this attribute. @@ -90,7 +91,7 @@ CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75) BK * descriptor, we may then update attcacheoff in the copies. This speeds * up the attribute walking process. */ - int32 attcacheoff; + int32 attcacheoff BKI_DEFAULT(-1); /* * atttypmod records type-specific data supplied at table creation time @@ -98,7 +99,7 @@ CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75) BK * type-specific input and output functions as the third argument. The * value will generally be -1 for types that do not need typmod. */ - int32 atttypmod; + int32 atttypmod BKI_DEFAULT(-1); /* * attbyval is a copy of the typbyval field from pg_type for this @@ -131,13 +132,16 @@ CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75) BK bool attnotnull; /* Has DEFAULT value or not */ - bool atthasdef; + bool atthasdef BKI_DEFAULT(f); + + /* Has a missing value or not */ + bool atthasmissing BKI_DEFAULT(f); /* One of the ATTRIBUTE_IDENTITY_* constants below, or '\0' */ - char attidentity; + char attidentity BKI_DEFAULT('\0'); /* Is dropped (ie, logically invisible) or not */ - bool attisdropped; + bool attisdropped BKI_DEFAULT(f); /* * This flag specifies whether this column has ever had a local @@ -148,10 +152,10 @@ CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75) BK * not dropped by a parent's DROP COLUMN even if this causes the column's * attinhcount to become zero. */ - bool attislocal; + bool attislocal BKI_DEFAULT(t); /* Number of times inherited from direct parent relation(s) */ - int32 attinhcount; + int32 attinhcount BKI_DEFAULT(0); /* attribute's collation */ Oid attcollation; @@ -160,13 +164,19 @@ CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75) BK /* NOTE: The following fields are not present in tuple descriptors. */ /* Column-level access permissions */ - aclitem attacl[1]; + aclitem attacl[1] BKI_DEFAULT(_null_); /* Column-level options */ - text attoptions[1]; + text attoptions[1] BKI_DEFAULT(_null_); /* Column-level FDW options */ - text attfdwoptions[1]; + text attfdwoptions[1] BKI_DEFAULT(_null_); + + /* + * Missing value for added columns. This is a one element array which lets + * us store a value of the attribute type here. + */ + anyarray attmissingval BKI_DEFAULT(_null_); #endif } FormData_pg_attribute; @@ -186,46 +196,11 @@ CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75) BK */ typedef FormData_pg_attribute *Form_pg_attribute; -/* ---------------- - * compiler constants for pg_attribute - * ---------------- - */ - -#define Natts_pg_attribute 22 -#define Anum_pg_attribute_attrelid 1 -#define Anum_pg_attribute_attname 2 -#define Anum_pg_attribute_atttypid 3 -#define Anum_pg_attribute_attstattarget 4 -#define Anum_pg_attribute_attlen 5 -#define Anum_pg_attribute_attnum 6 -#define Anum_pg_attribute_attndims 7 -#define Anum_pg_attribute_attcacheoff 8 -#define Anum_pg_attribute_atttypmod 9 -#define Anum_pg_attribute_attbyval 10 -#define Anum_pg_attribute_attstorage 11 -#define Anum_pg_attribute_attalign 12 -#define Anum_pg_attribute_attnotnull 13 -#define Anum_pg_attribute_atthasdef 14 -#define Anum_pg_attribute_attidentity 15 -#define Anum_pg_attribute_attisdropped 16 -#define Anum_pg_attribute_attislocal 17 -#define Anum_pg_attribute_attinhcount 18 -#define Anum_pg_attribute_attcollation 19 -#define Anum_pg_attribute_attacl 20 -#define Anum_pg_attribute_attoptions 21 -#define Anum_pg_attribute_attfdwoptions 22 - - -/* ---------------- - * initial contents of pg_attribute - * - * The initial contents of pg_attribute are generated at compile time by - * genbki.pl. Only "bootstrapped" relations need be included. - * ---------------- - */ - +#ifdef EXPOSE_TO_CLIENT_CODE #define ATTRIBUTE_IDENTITY_ALWAYS 'a' #define ATTRIBUTE_IDENTITY_BY_DEFAULT 'd' +#endif /* EXPOSE_TO_CLIENT_CODE */ + #endif /* PG_ATTRIBUTE_H */ diff --git a/src/include/catalog/pg_auth_members.h b/src/include/catalog/pg_auth_members.h index 6a954fff97..277ea89c6f 100644 --- a/src/include/catalog/pg_auth_members.h +++ b/src/include/catalog/pg_auth_members.h @@ -1,18 +1,18 @@ /*------------------------------------------------------------------------- * * pg_auth_members.h - * definition of the system "authorization identifier members" relation - * (pg_auth_members) along with the relation's initial contents. + * definition of the "authorization identifier members" system catalog + * (pg_auth_members). * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_auth_members.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,16 +20,14 @@ #define PG_AUTH_MEMBERS_H #include "catalog/genbki.h" +#include "catalog/pg_auth_members_d.h" /* ---------------- * pg_auth_members definition. cpp turns this into * typedef struct FormData_pg_auth_members * ---------------- */ -#define AuthMemRelationId 1261 -#define AuthMemRelation_Rowtype_Id 2843 - -CATALOG(pg_auth_members,1261) BKI_SHARED_RELATION BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(2843) BKI_SCHEMA_MACRO +CATALOG(pg_auth_members,1261,AuthMemRelationId) BKI_SHARED_RELATION BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(2843,AuthMemRelation_Rowtype_Id) BKI_SCHEMA_MACRO { Oid roleid; /* ID of a role */ Oid member; /* ID of a member of that role */ @@ -44,14 +42,4 @@ CATALOG(pg_auth_members,1261) BKI_SHARED_RELATION BKI_WITHOUT_OIDS BKI_ROWTYPE_O */ typedef FormData_pg_auth_members *Form_pg_auth_members; -/* ---------------- - * compiler constants for pg_auth_members - * ---------------- - */ -#define Natts_pg_auth_members 4 -#define Anum_pg_auth_members_roleid 1 -#define Anum_pg_auth_members_member 2 -#define Anum_pg_auth_members_grantor 3 -#define Anum_pg_auth_members_admin_option 4 - #endif /* PG_AUTH_MEMBERS_H */ diff --git a/src/include/catalog/pg_authid.dat b/src/include/catalog/pg_authid.dat new file mode 100644 index 0000000000..55be317369 --- /dev/null +++ b/src/include/catalog/pg_authid.dat @@ -0,0 +1,64 @@ +#---------------------------------------------------------------------- +# +# pg_authid.dat +# Initial contents of the pg_authid system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_authid.dat +# +#---------------------------------------------------------------------- + +[ + +# The C code typically refers to these roles using the #define symbols, +# so make sure every entry has an oid_symbol value. + +{ oid => '10', oid_symbol => 'BOOTSTRAP_SUPERUSERID', + rolname => 'POSTGRES', rolsuper => 't', rolinherit => 't', + rolcreaterole => 't', rolcreatedb => 't', rolcanlogin => 't', + rolreplication => 't', rolbypassrls => 't', rolconnlimit => '-1', + rolpassword => '_null_', rolvaliduntil => '_null_' }, +{ oid => '3373', oid_symbol => 'DEFAULT_ROLE_MONITOR', + rolname => 'pg_monitor', rolsuper => 'f', rolinherit => 't', + rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f', + rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1', + rolpassword => '_null_', rolvaliduntil => '_null_' }, +{ oid => '3374', oid_symbol => 'DEFAULT_ROLE_READ_ALL_SETTINGS', + rolname => 'pg_read_all_settings', rolsuper => 'f', rolinherit => 't', + rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f', + rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1', + rolpassword => '_null_', rolvaliduntil => '_null_' }, +{ oid => '3375', oid_symbol => 'DEFAULT_ROLE_READ_ALL_STATS', + rolname => 'pg_read_all_stats', rolsuper => 'f', rolinherit => 't', + rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f', + rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1', + rolpassword => '_null_', rolvaliduntil => '_null_' }, +{ oid => '3377', oid_symbol => 'DEFAULT_ROLE_STAT_SCAN_TABLES', + rolname => 'pg_stat_scan_tables', rolsuper => 'f', rolinherit => 't', + rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f', + rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1', + rolpassword => '_null_', rolvaliduntil => '_null_' }, +{ oid => '4569', oid_symbol => 'DEFAULT_ROLE_READ_SERVER_FILES', + rolname => 'pg_read_server_files', rolsuper => 'f', rolinherit => 't', + rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f', + rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1', + rolpassword => '_null_', rolvaliduntil => '_null_' }, +{ oid => '4570', oid_symbol => 'DEFAULT_ROLE_WRITE_SERVER_FILES', + rolname => 'pg_write_server_files', rolsuper => 'f', rolinherit => 't', + rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f', + rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1', + rolpassword => '_null_', rolvaliduntil => '_null_' }, +{ oid => '4571', oid_symbol => 'DEFAULT_ROLE_EXECUTE_SERVER_PROGRAM', + rolname => 'pg_execute_server_program', rolsuper => 'f', rolinherit => 't', + rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f', + rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1', + rolpassword => '_null_', rolvaliduntil => '_null_' }, +{ oid => '4200', oid_symbol => 'DEFAULT_ROLE_SIGNAL_BACKENDID', + rolname => 'pg_signal_backend', rolsuper => 'f', rolinherit => 't', + rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f', + rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1', + rolpassword => '_null_', rolvaliduntil => '_null_' }, + +] diff --git a/src/include/catalog/pg_authid.h b/src/include/catalog/pg_authid.h index 9b6b52c9f9..ba482f1cb9 100644 --- a/src/include/catalog/pg_authid.h +++ b/src/include/catalog/pg_authid.h @@ -1,20 +1,19 @@ /*------------------------------------------------------------------------- * * pg_authid.h - * definition of the system "authorization identifier" relation (pg_authid) - * along with the relation's initial contents. + * definition of the "authorization identifier" system catalog (pg_authid) * * pg_shadow and pg_group are now publicly accessible views on pg_authid. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_authid.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -22,27 +21,14 @@ #define PG_AUTHID_H #include "catalog/genbki.h" - -/* - * The CATALOG definition has to refer to the type of rolvaliduntil as - * "timestamptz" (lower case) so that bootstrap mode recognizes it. But - * the C header files define this type as TimestampTz. Since the field is - * potentially-null and therefore can't be accessed directly from C code, - * there is no particular need for the C struct definition to show the - * field type as TimestampTz --- instead we just make it int. - */ -#define timestamptz int - +#include "catalog/pg_authid_d.h" /* ---------------- * pg_authid definition. cpp turns this into * typedef struct FormData_pg_authid * ---------------- */ -#define AuthIdRelationId 1260 -#define AuthIdRelation_Rowtype_Id 2842 - -CATALOG(pg_authid,1260) BKI_SHARED_RELATION BKI_ROWTYPE_OID(2842) BKI_SCHEMA_MACRO +CATALOG(pg_authid,1260,AuthIdRelationId) BKI_SHARED_RELATION BKI_ROWTYPE_OID(2842,AuthIdRelation_Rowtype_Id) BKI_SCHEMA_MACRO { NameData rolname; /* name of role */ bool rolsuper; /* read this field via superuser() only! */ @@ -61,9 +47,6 @@ CATALOG(pg_authid,1260) BKI_SHARED_RELATION BKI_ROWTYPE_OID(2842) BKI_SCHEMA_MAC #endif } FormData_pg_authid; -#undef timestamptz - - /* ---------------- * Form_pg_authid corresponds to a pointer to a tuple with * the format of pg_authid relation. @@ -71,44 +54,4 @@ CATALOG(pg_authid,1260) BKI_SHARED_RELATION BKI_ROWTYPE_OID(2842) BKI_SCHEMA_MAC */ typedef FormData_pg_authid *Form_pg_authid; -/* ---------------- - * compiler constants for pg_authid - * ---------------- - */ -#define Natts_pg_authid 11 -#define Anum_pg_authid_rolname 1 -#define Anum_pg_authid_rolsuper 2 -#define Anum_pg_authid_rolinherit 3 -#define Anum_pg_authid_rolcreaterole 4 -#define Anum_pg_authid_rolcreatedb 5 -#define Anum_pg_authid_rolcanlogin 6 -#define Anum_pg_authid_rolreplication 7 -#define Anum_pg_authid_rolbypassrls 8 -#define Anum_pg_authid_rolconnlimit 9 -#define Anum_pg_authid_rolpassword 10 -#define Anum_pg_authid_rolvaliduntil 11 - -/* ---------------- - * initial contents of pg_authid - * - * The uppercase quantities will be replaced at initdb time with - * user choices. - * - * The C code typically refers to these roles using the #define symbols, - * so be sure to keep those in sync with the DATA lines. - * ---------------- - */ -DATA(insert OID = 10 ( "POSTGRES" t t t t t t t -1 _null_ _null_)); -#define BOOTSTRAP_SUPERUSERID 10 -DATA(insert OID = 3373 ( "pg_monitor" f t f f f f f -1 _null_ _null_)); -#define DEFAULT_ROLE_MONITOR 3373 -DATA(insert OID = 3374 ( "pg_read_all_settings" f t f f f f f -1 _null_ _null_)); -#define DEFAULT_ROLE_READ_ALL_SETTINGS 3374 -DATA(insert OID = 3375 ( "pg_read_all_stats" f t f f f f f -1 _null_ _null_)); -#define DEFAULT_ROLE_READ_ALL_STATS 3375 -DATA(insert OID = 3377 ( "pg_stat_scan_tables" f t f f f f f -1 _null_ _null_)); -#define DEFAULT_ROLE_STAT_SCAN_TABLES 3377 -DATA(insert OID = 4200 ( "pg_signal_backend" f t f f f f f -1 _null_ _null_)); -#define DEFAULT_ROLE_SIGNAL_BACKENDID 4200 - #endif /* PG_AUTHID_H */ diff --git a/src/include/catalog/pg_cast.dat b/src/include/catalog/pg_cast.dat new file mode 100644 index 0000000000..8cd65b3ab5 --- /dev/null +++ b/src/include/catalog/pg_cast.dat @@ -0,0 +1,509 @@ +#---------------------------------------------------------------------- +# +# pg_cast.dat +# Initial contents of the pg_cast system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_cast.dat +# +#---------------------------------------------------------------------- + +[ + +# Note: this table has OIDs, but we don't bother to assign them manually, +# since nothing needs to know the specific OID of any built-in cast. + +# Numeric category: implicit casts are allowed in the direction +# int2->int4->int8->numeric->float4->float8, while casts in the +# reverse direction are assignment-only. +{ castsource => 'int8', casttarget => 'int2', castfunc => 'int2(int8)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'int8', casttarget => 'int4', castfunc => 'int4(int8)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'int8', casttarget => 'float4', castfunc => 'float4(int8)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int8', casttarget => 'float8', castfunc => 'float8(int8)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int8', casttarget => 'numeric', castfunc => 'numeric(int8)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'int8', castfunc => 'int8(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'int4', castfunc => 'int4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'float4', castfunc => 'float4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'float8', castfunc => 'float8(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'numeric', castfunc => 'numeric(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'int8', castfunc => 'int8(int4)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'int2', castfunc => 'int2(int4)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'float4', castfunc => 'float4(int4)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'float8', castfunc => 'float8(int4)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'numeric', castfunc => 'numeric(int4)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'float4', casttarget => 'int8', castfunc => 'int8(float4)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'float4', casttarget => 'int2', castfunc => 'int2(float4)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'float4', casttarget => 'int4', castfunc => 'int4(float4)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'float4', casttarget => 'float8', castfunc => 'float8(float4)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'float4', casttarget => 'numeric', + castfunc => 'numeric(float4)', castcontext => 'a', castmethod => 'f' }, +{ castsource => 'float8', casttarget => 'int8', castfunc => 'int8(float8)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'float8', casttarget => 'int2', castfunc => 'int2(float8)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'float8', casttarget => 'int4', castfunc => 'int4(float8)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'float8', casttarget => 'float4', castfunc => 'float4(float8)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'float8', casttarget => 'numeric', + castfunc => 'numeric(float8)', castcontext => 'a', castmethod => 'f' }, +{ castsource => 'numeric', casttarget => 'int8', castfunc => 'int8(numeric)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'numeric', casttarget => 'int2', castfunc => 'int2(numeric)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'numeric', casttarget => 'int4', castfunc => 'int4(numeric)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'numeric', casttarget => 'float4', + castfunc => 'float4(numeric)', castcontext => 'i', castmethod => 'f' }, +{ castsource => 'numeric', casttarget => 'float8', + castfunc => 'float8(numeric)', castcontext => 'i', castmethod => 'f' }, +{ castsource => 'money', casttarget => 'numeric', castfunc => 'numeric(money)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'numeric', casttarget => 'money', castfunc => 'money(numeric)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'money', castfunc => 'money(int4)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'int8', casttarget => 'money', castfunc => 'money(int8)', + castcontext => 'a', castmethod => 'f' }, + +# Allow explicit coercions between int4 and bool +{ castsource => 'int4', casttarget => 'bool', castfunc => 'bool(int4)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'bool', casttarget => 'int4', castfunc => 'int4(bool)', + castcontext => 'e', castmethod => 'f' }, + +# OID category: allow implicit conversion from any integral type (including +# int8, to support OID literals > 2G) to OID, as well as assignment coercion +# from OID to int4 or int8. Similarly for each OID-alias type. Also allow +# implicit coercions between OID and each OID-alias type, as well as +# regproc<->regprocedure and regoper<->regoperator. (Other coercions +# between alias types must pass through OID.) Lastly, there are implicit +# casts from text and varchar to regclass, which exist mainly to support +# legacy forms of nextval() and related functions. +{ castsource => 'int8', casttarget => 'oid', castfunc => 'oid', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'oid', castfunc => 'int4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'oid', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'oid', casttarget => 'int8', castfunc => 'int8(oid)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'oid', casttarget => 'int4', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, +{ castsource => 'oid', casttarget => 'regproc', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regproc', casttarget => 'oid', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'int8', casttarget => 'regproc', castfunc => 'oid', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'regproc', castfunc => 'int4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'regproc', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regproc', casttarget => 'int8', castfunc => 'int8(oid)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'regproc', casttarget => 'int4', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, +{ castsource => 'regproc', casttarget => 'regprocedure', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regprocedure', casttarget => 'regproc', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'oid', casttarget => 'regprocedure', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regprocedure', casttarget => 'oid', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'int8', casttarget => 'regprocedure', castfunc => 'oid', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'regprocedure', castfunc => 'int4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'regprocedure', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regprocedure', casttarget => 'int8', castfunc => 'int8(oid)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'regprocedure', casttarget => 'int4', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, +{ castsource => 'oid', casttarget => 'regoper', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regoper', casttarget => 'oid', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'int8', casttarget => 'regoper', castfunc => 'oid', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'regoper', castfunc => 'int4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'regoper', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regoper', casttarget => 'int8', castfunc => 'int8(oid)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'regoper', casttarget => 'int4', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, +{ castsource => 'regoper', casttarget => 'regoperator', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regoperator', casttarget => 'regoper', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'oid', casttarget => 'regoperator', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regoperator', casttarget => 'oid', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'int8', casttarget => 'regoperator', castfunc => 'oid', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'regoperator', castfunc => 'int4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'regoperator', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regoperator', casttarget => 'int8', castfunc => 'int8(oid)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'regoperator', casttarget => 'int4', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, +{ castsource => 'oid', casttarget => 'regclass', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regclass', casttarget => 'oid', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'int8', casttarget => 'regclass', castfunc => 'oid', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'regclass', castfunc => 'int4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'regclass', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regclass', casttarget => 'int8', castfunc => 'int8(oid)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'regclass', casttarget => 'int4', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, +{ castsource => 'oid', casttarget => 'regtype', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regtype', casttarget => 'oid', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'int8', casttarget => 'regtype', castfunc => 'oid', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'regtype', castfunc => 'int4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'regtype', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regtype', casttarget => 'int8', castfunc => 'int8(oid)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'regtype', casttarget => 'int4', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, +{ castsource => 'oid', casttarget => 'regconfig', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regconfig', casttarget => 'oid', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'int8', casttarget => 'regconfig', castfunc => 'oid', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'regconfig', castfunc => 'int4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'regconfig', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regconfig', casttarget => 'int8', castfunc => 'int8(oid)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'regconfig', casttarget => 'int4', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, +{ castsource => 'oid', casttarget => 'regdictionary', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regdictionary', casttarget => 'oid', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'int8', casttarget => 'regdictionary', castfunc => 'oid', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'regdictionary', castfunc => 'int4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'regdictionary', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regdictionary', casttarget => 'int8', castfunc => 'int8(oid)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'regdictionary', casttarget => 'int4', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, +{ castsource => 'text', casttarget => 'regclass', castfunc => 'regclass', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'varchar', casttarget => 'regclass', castfunc => 'regclass', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'oid', casttarget => 'regrole', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regrole', casttarget => 'oid', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'int8', casttarget => 'regrole', castfunc => 'oid', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'regrole', castfunc => 'int4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'regrole', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regrole', casttarget => 'int8', castfunc => 'int8(oid)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'regrole', casttarget => 'int4', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, +{ castsource => 'oid', casttarget => 'regnamespace', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regnamespace', casttarget => 'oid', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'int8', casttarget => 'regnamespace', castfunc => 'oid', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int2', casttarget => 'regnamespace', castfunc => 'int4(int2)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'regnamespace', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'regnamespace', casttarget => 'int8', castfunc => 'int8(oid)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'regnamespace', casttarget => 'int4', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, + +# String category +{ castsource => 'text', casttarget => 'bpchar', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'text', casttarget => 'varchar', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'bpchar', casttarget => 'text', castfunc => 'text(bpchar)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'bpchar', casttarget => 'varchar', castfunc => 'text(bpchar)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'varchar', casttarget => 'text', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'varchar', casttarget => 'bpchar', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'char', casttarget => 'text', castfunc => 'text(char)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'char', casttarget => 'bpchar', castfunc => 'bpchar(char)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'char', casttarget => 'varchar', castfunc => 'text(char)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'name', casttarget => 'text', castfunc => 'text(name)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'name', casttarget => 'bpchar', castfunc => 'bpchar(name)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'name', casttarget => 'varchar', castfunc => 'varchar(name)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'text', casttarget => 'char', castfunc => 'char(text)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'bpchar', casttarget => 'char', castfunc => 'char(text)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'varchar', casttarget => 'char', castfunc => 'char(text)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'text', casttarget => 'name', castfunc => 'name(text)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'bpchar', casttarget => 'name', castfunc => 'name(bpchar)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'varchar', casttarget => 'name', castfunc => 'name(varchar)', + castcontext => 'i', castmethod => 'f' }, + +# Allow explicit coercions between int4 and "char" +{ castsource => 'char', casttarget => 'int4', castfunc => 'int4(char)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'char', castfunc => 'char(int4)', + castcontext => 'e', castmethod => 'f' }, + +# pg_node_tree can be coerced to, but not from, text +{ castsource => 'pg_node_tree', casttarget => 'text', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, + +# pg_ndistinct can be coerced to, but not from, bytea and text +{ castsource => 'pg_ndistinct', casttarget => 'bytea', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'pg_ndistinct', casttarget => 'text', castfunc => '0', + castcontext => 'i', castmethod => 'i' }, + +# pg_dependencies can be coerced to, but not from, bytea and text +{ castsource => 'pg_dependencies', casttarget => 'bytea', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'pg_dependencies', casttarget => 'text', castfunc => '0', + castcontext => 'i', castmethod => 'i' }, + +# Datetime category +{ castsource => 'date', casttarget => 'timestamp', + castfunc => 'timestamp(date)', castcontext => 'i', castmethod => 'f' }, +{ castsource => 'date', casttarget => 'timestamptz', + castfunc => 'timestamptz(date)', castcontext => 'i', castmethod => 'f' }, +{ castsource => 'time', casttarget => 'interval', castfunc => 'interval(time)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'time', casttarget => 'timetz', castfunc => 'timetz(time)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'timestamp', casttarget => 'date', + castfunc => 'date(timestamp)', castcontext => 'a', castmethod => 'f' }, +{ castsource => 'timestamp', casttarget => 'time', + castfunc => 'time(timestamp)', castcontext => 'a', castmethod => 'f' }, +{ castsource => 'timestamp', casttarget => 'timestamptz', + castfunc => 'timestamptz(timestamp)', castcontext => 'i', castmethod => 'f' }, +{ castsource => 'timestamptz', casttarget => 'date', + castfunc => 'date(timestamptz)', castcontext => 'a', castmethod => 'f' }, +{ castsource => 'timestamptz', casttarget => 'time', + castfunc => 'time(timestamptz)', castcontext => 'a', castmethod => 'f' }, +{ castsource => 'timestamptz', casttarget => 'timestamp', + castfunc => 'timestamp(timestamptz)', castcontext => 'a', castmethod => 'f' }, +{ castsource => 'timestamptz', casttarget => 'timetz', + castfunc => 'timetz(timestamptz)', castcontext => 'a', castmethod => 'f' }, +{ castsource => 'interval', casttarget => 'time', castfunc => 'time(interval)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'timetz', casttarget => 'time', castfunc => 'time(timetz)', + castcontext => 'a', castmethod => 'f' }, + +# Geometric category +{ castsource => 'point', casttarget => 'box', castfunc => 'box(point)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'lseg', casttarget => 'point', castfunc => 'point(lseg)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'path', casttarget => 'point', castfunc => 'point(path)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'path', casttarget => 'polygon', castfunc => 'polygon(path)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'box', casttarget => 'point', castfunc => 'point(box)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'box', casttarget => 'lseg', castfunc => 'lseg(box)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'box', casttarget => 'polygon', castfunc => 'polygon(box)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'box', casttarget => 'circle', castfunc => 'circle(box)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'polygon', casttarget => 'point', castfunc => 'point(polygon)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'polygon', casttarget => 'path', castfunc => 'path', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'polygon', casttarget => 'box', castfunc => 'box(polygon)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'polygon', casttarget => 'circle', + castfunc => 'circle(polygon)', castcontext => 'e', castmethod => 'f' }, +{ castsource => 'circle', casttarget => 'point', castfunc => 'point(circle)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'circle', casttarget => 'box', castfunc => 'box(circle)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'circle', casttarget => 'polygon', + castfunc => 'polygon(circle)', castcontext => 'e', castmethod => 'f' }, + +# MAC address category +{ castsource => 'macaddr', casttarget => 'macaddr8', castfunc => 'macaddr8', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'macaddr8', casttarget => 'macaddr', castfunc => 'macaddr', + castcontext => 'i', castmethod => 'f' }, + +# INET category +{ castsource => 'cidr', casttarget => 'inet', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'inet', casttarget => 'cidr', castfunc => 'cidr', + castcontext => 'a', castmethod => 'f' }, + +# BitString category +{ castsource => 'bit', casttarget => 'varbit', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, +{ castsource => 'varbit', casttarget => 'bit', castfunc => '0', + castcontext => 'i', castmethod => 'b' }, + +# Cross-category casts between bit and int4, int8 +{ castsource => 'int8', casttarget => 'bit', castfunc => 'bit(int8,int4)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'int4', casttarget => 'bit', castfunc => 'bit(int4,int4)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'bit', casttarget => 'int8', castfunc => 'int8(bit)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'bit', casttarget => 'int4', castfunc => 'int4(bit)', + castcontext => 'e', castmethod => 'f' }, + +# Cross-category casts to and from TEXT +# We need entries here only for a few specialized cases where the behavior +# of the cast function differs from the datatype's I/O functions. Otherwise, +# parse_coerce.c will generate CoerceViaIO operations without any prompting. +# Note that the castcontext values specified here should be no stronger than +# parse_coerce.c's automatic casts ('a' to text, 'e' from text) else odd +# behavior will ensue when the automatic cast is applied instead of the +# pg_cast entry! +{ castsource => 'cidr', casttarget => 'text', castfunc => 'text(inet)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'inet', casttarget => 'text', castfunc => 'text(inet)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'bool', casttarget => 'text', castfunc => 'text(bool)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'xml', casttarget => 'text', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, +{ castsource => 'text', casttarget => 'xml', castfunc => 'xml', + castcontext => 'e', castmethod => 'f' }, + +# Cross-category casts to and from VARCHAR +# We support all the same casts as for TEXT. +{ castsource => 'cidr', casttarget => 'varchar', castfunc => 'text(inet)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'inet', casttarget => 'varchar', castfunc => 'text(inet)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'bool', casttarget => 'varchar', castfunc => 'text(bool)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'xml', casttarget => 'varchar', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, +{ castsource => 'varchar', casttarget => 'xml', castfunc => 'xml', + castcontext => 'e', castmethod => 'f' }, + +# Cross-category casts to and from BPCHAR +# We support all the same casts as for TEXT. +{ castsource => 'cidr', casttarget => 'bpchar', castfunc => 'text(inet)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'inet', casttarget => 'bpchar', castfunc => 'text(inet)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'bool', casttarget => 'bpchar', castfunc => 'text(bool)', + castcontext => 'a', castmethod => 'f' }, +{ castsource => 'xml', casttarget => 'bpchar', castfunc => '0', + castcontext => 'a', castmethod => 'b' }, +{ castsource => 'bpchar', casttarget => 'xml', castfunc => 'xml', + castcontext => 'e', castmethod => 'f' }, + +# Length-coercion functions +{ castsource => 'bpchar', casttarget => 'bpchar', + castfunc => 'bpchar(bpchar,int4,bool)', castcontext => 'i', + castmethod => 'f' }, +{ castsource => 'varchar', casttarget => 'varchar', + castfunc => 'varchar(varchar,int4,bool)', castcontext => 'i', + castmethod => 'f' }, +{ castsource => 'time', casttarget => 'time', castfunc => 'time(time,int4)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'timestamp', casttarget => 'timestamp', + castfunc => 'timestamp(timestamp,int4)', castcontext => 'i', + castmethod => 'f' }, +{ castsource => 'timestamptz', casttarget => 'timestamptz', + castfunc => 'timestamptz(timestamptz,int4)', castcontext => 'i', + castmethod => 'f' }, +{ castsource => 'interval', casttarget => 'interval', + castfunc => 'interval(interval,int4)', castcontext => 'i', + castmethod => 'f' }, +{ castsource => 'timetz', casttarget => 'timetz', + castfunc => 'timetz(timetz,int4)', castcontext => 'i', castmethod => 'f' }, +{ castsource => 'bit', casttarget => 'bit', castfunc => 'bit(bit,int4,bool)', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'varbit', casttarget => 'varbit', castfunc => 'varbit', + castcontext => 'i', castmethod => 'f' }, +{ castsource => 'numeric', casttarget => 'numeric', + castfunc => 'numeric(numeric,int4)', castcontext => 'i', castmethod => 'f' }, + +# json to/from jsonb +{ castsource => 'json', casttarget => 'jsonb', castfunc => '0', + castcontext => 'a', castmethod => 'i' }, +{ castsource => 'jsonb', casttarget => 'json', castfunc => '0', + castcontext => 'a', castmethod => 'i' }, + +# jsonb to numeric and bool types +{ castsource => 'jsonb', casttarget => 'bool', castfunc => 'bool(jsonb)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'jsonb', casttarget => 'numeric', castfunc => 'numeric(jsonb)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'jsonb', casttarget => 'int2', castfunc => 'int2(jsonb)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'jsonb', casttarget => 'int4', castfunc => 'int4(jsonb)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'jsonb', casttarget => 'int8', castfunc => 'int8(jsonb)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'jsonb', casttarget => 'float4', castfunc => 'float4(jsonb)', + castcontext => 'e', castmethod => 'f' }, +{ castsource => 'jsonb', casttarget => 'float8', castfunc => 'float8(jsonb)', + castcontext => 'e', castmethod => 'f' }, + +] diff --git a/src/include/catalog/pg_cast.h b/src/include/catalog/pg_cast.h index 17827531ad..7f4a25b2da 100644 --- a/src/include/catalog/pg_cast.h +++ b/src/include/catalog/pg_cast.h @@ -1,20 +1,19 @@ /*------------------------------------------------------------------------- * * pg_cast.h - * definition of the system "type casts" relation (pg_cast) - * along with the relation's initial contents. + * definition of the "type casts" system catalog (pg_cast) * * As of Postgres 8.0, pg_cast describes not only type coercion functions * but also length coercion functions. * - * - * Copyright (c) 2002-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_cast.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -22,25 +21,40 @@ #define PG_CAST_H #include "catalog/genbki.h" +#include "catalog/pg_cast_d.h" /* ---------------- * pg_cast definition. cpp turns this into * typedef struct FormData_pg_cast * ---------------- */ -#define CastRelationId 2605 - -CATALOG(pg_cast,2605) +CATALOG(pg_cast,2605,CastRelationId) { - Oid castsource; /* source datatype for cast */ - Oid casttarget; /* destination datatype for cast */ - Oid castfunc; /* cast function; 0 = binary coercible */ - char castcontext; /* contexts in which cast can be used */ - char castmethod; /* cast method */ + /* source datatype for cast */ + Oid castsource BKI_LOOKUP(pg_type); + + /* destination datatype for cast */ + Oid casttarget BKI_LOOKUP(pg_type); + + /* cast function; 0 = binary coercible */ + Oid castfunc BKI_LOOKUP(pg_proc); + + /* contexts in which cast can be used */ + char castcontext; + + /* cast method */ + char castmethod; } FormData_pg_cast; +/* ---------------- + * Form_pg_cast corresponds to a pointer to a tuple with + * the format of pg_cast relation. + * ---------------- + */ typedef FormData_pg_cast *Form_pg_cast; +#ifdef EXPOSE_TO_CLIENT_CODE + /* * The allowable values for pg_cast.castcontext are specified by this enum. * Since castcontext is stored as a "char", we use ASCII codes for human @@ -69,327 +83,6 @@ typedef enum CoercionMethod COERCION_METHOD_INOUT = 'i' /* use input/output functions */ } CoercionMethod; - -/* ---------------- - * compiler constants for pg_cast - * ---------------- - */ -#define Natts_pg_cast 5 -#define Anum_pg_cast_castsource 1 -#define Anum_pg_cast_casttarget 2 -#define Anum_pg_cast_castfunc 3 -#define Anum_pg_cast_castcontext 4 -#define Anum_pg_cast_castmethod 5 - -/* ---------------- - * initial contents of pg_cast - * - * Note: this table has OIDs, but we don't bother to assign them manually, - * since nothing needs to know the specific OID of any built-in cast. - * ---------------- - */ - -/* - * Numeric category: implicit casts are allowed in the direction - * int2->int4->int8->numeric->float4->float8, while casts in the - * reverse direction are assignment-only. - */ -DATA(insert ( 20 21 714 a f )); -DATA(insert ( 20 23 480 a f )); -DATA(insert ( 20 700 652 i f )); -DATA(insert ( 20 701 482 i f )); -DATA(insert ( 20 1700 1781 i f )); -DATA(insert ( 21 20 754 i f )); -DATA(insert ( 21 23 313 i f )); -DATA(insert ( 21 700 236 i f )); -DATA(insert ( 21 701 235 i f )); -DATA(insert ( 21 1700 1782 i f )); -DATA(insert ( 23 20 481 i f )); -DATA(insert ( 23 21 314 a f )); -DATA(insert ( 23 700 318 i f )); -DATA(insert ( 23 701 316 i f )); -DATA(insert ( 23 1700 1740 i f )); -DATA(insert ( 700 20 653 a f )); -DATA(insert ( 700 21 238 a f )); -DATA(insert ( 700 23 319 a f )); -DATA(insert ( 700 701 311 i f )); -DATA(insert ( 700 1700 1742 a f )); -DATA(insert ( 701 20 483 a f )); -DATA(insert ( 701 21 237 a f )); -DATA(insert ( 701 23 317 a f )); -DATA(insert ( 701 700 312 a f )); -DATA(insert ( 701 1700 1743 a f )); -DATA(insert ( 1700 20 1779 a f )); -DATA(insert ( 1700 21 1783 a f )); -DATA(insert ( 1700 23 1744 a f )); -DATA(insert ( 1700 700 1745 i f )); -DATA(insert ( 1700 701 1746 i f )); -DATA(insert ( 790 1700 3823 a f )); -DATA(insert ( 1700 790 3824 a f )); -DATA(insert ( 23 790 3811 a f )); -DATA(insert ( 20 790 3812 a f )); - -/* Allow explicit coercions between int4 and bool */ -DATA(insert ( 23 16 2557 e f )); -DATA(insert ( 16 23 2558 e f )); - -/* - * OID category: allow implicit conversion from any integral type (including - * int8, to support OID literals > 2G) to OID, as well as assignment coercion - * from OID to int4 or int8. Similarly for each OID-alias type. Also allow - * implicit coercions between OID and each OID-alias type, as well as - * regproc<->regprocedure and regoper<->regoperator. (Other coercions - * between alias types must pass through OID.) Lastly, there are implicit - * casts from text and varchar to regclass, which exist mainly to support - * legacy forms of nextval() and related functions. - */ -DATA(insert ( 20 26 1287 i f )); -DATA(insert ( 21 26 313 i f )); -DATA(insert ( 23 26 0 i b )); -DATA(insert ( 26 20 1288 a f )); -DATA(insert ( 26 23 0 a b )); -DATA(insert ( 26 24 0 i b )); -DATA(insert ( 24 26 0 i b )); -DATA(insert ( 20 24 1287 i f )); -DATA(insert ( 21 24 313 i f )); -DATA(insert ( 23 24 0 i b )); -DATA(insert ( 24 20 1288 a f )); -DATA(insert ( 24 23 0 a b )); -DATA(insert ( 24 2202 0 i b )); -DATA(insert ( 2202 24 0 i b )); -DATA(insert ( 26 2202 0 i b )); -DATA(insert ( 2202 26 0 i b )); -DATA(insert ( 20 2202 1287 i f )); -DATA(insert ( 21 2202 313 i f )); -DATA(insert ( 23 2202 0 i b )); -DATA(insert ( 2202 20 1288 a f )); -DATA(insert ( 2202 23 0 a b )); -DATA(insert ( 26 2203 0 i b )); -DATA(insert ( 2203 26 0 i b )); -DATA(insert ( 20 2203 1287 i f )); -DATA(insert ( 21 2203 313 i f )); -DATA(insert ( 23 2203 0 i b )); -DATA(insert ( 2203 20 1288 a f )); -DATA(insert ( 2203 23 0 a b )); -DATA(insert ( 2203 2204 0 i b )); -DATA(insert ( 2204 2203 0 i b )); -DATA(insert ( 26 2204 0 i b )); -DATA(insert ( 2204 26 0 i b )); -DATA(insert ( 20 2204 1287 i f )); -DATA(insert ( 21 2204 313 i f )); -DATA(insert ( 23 2204 0 i b )); -DATA(insert ( 2204 20 1288 a f )); -DATA(insert ( 2204 23 0 a b )); -DATA(insert ( 26 2205 0 i b )); -DATA(insert ( 2205 26 0 i b )); -DATA(insert ( 20 2205 1287 i f )); -DATA(insert ( 21 2205 313 i f )); -DATA(insert ( 23 2205 0 i b )); -DATA(insert ( 2205 20 1288 a f )); -DATA(insert ( 2205 23 0 a b )); -DATA(insert ( 26 2206 0 i b )); -DATA(insert ( 2206 26 0 i b )); -DATA(insert ( 20 2206 1287 i f )); -DATA(insert ( 21 2206 313 i f )); -DATA(insert ( 23 2206 0 i b )); -DATA(insert ( 2206 20 1288 a f )); -DATA(insert ( 2206 23 0 a b )); -DATA(insert ( 26 3734 0 i b )); -DATA(insert ( 3734 26 0 i b )); -DATA(insert ( 20 3734 1287 i f )); -DATA(insert ( 21 3734 313 i f )); -DATA(insert ( 23 3734 0 i b )); -DATA(insert ( 3734 20 1288 a f )); -DATA(insert ( 3734 23 0 a b )); -DATA(insert ( 26 3769 0 i b )); -DATA(insert ( 3769 26 0 i b )); -DATA(insert ( 20 3769 1287 i f )); -DATA(insert ( 21 3769 313 i f )); -DATA(insert ( 23 3769 0 i b )); -DATA(insert ( 3769 20 1288 a f )); -DATA(insert ( 3769 23 0 a b )); -DATA(insert ( 25 2205 1079 i f )); -DATA(insert ( 1043 2205 1079 i f )); -DATA(insert ( 26 4096 0 i b )); -DATA(insert ( 4096 26 0 i b )); -DATA(insert ( 20 4096 1287 i f )); -DATA(insert ( 21 4096 313 i f )); -DATA(insert ( 23 4096 0 i b )); -DATA(insert ( 4096 20 1288 a f )); -DATA(insert ( 4096 23 0 a b )); -DATA(insert ( 26 4089 0 i b )); -DATA(insert ( 4089 26 0 i b )); -DATA(insert ( 20 4089 1287 i f )); -DATA(insert ( 21 4089 313 i f )); -DATA(insert ( 23 4089 0 i b )); -DATA(insert ( 4089 20 1288 a f )); -DATA(insert ( 4089 23 0 a b )); - -/* - * String category - */ -DATA(insert ( 25 1042 0 i b )); -DATA(insert ( 25 1043 0 i b )); -DATA(insert ( 1042 25 401 i f )); -DATA(insert ( 1042 1043 401 i f )); -DATA(insert ( 1043 25 0 i b )); -DATA(insert ( 1043 1042 0 i b )); -DATA(insert ( 18 25 946 i f )); -DATA(insert ( 18 1042 860 a f )); -DATA(insert ( 18 1043 946 a f )); -DATA(insert ( 19 25 406 i f )); -DATA(insert ( 19 1042 408 a f )); -DATA(insert ( 19 1043 1401 a f )); -DATA(insert ( 25 18 944 a f )); -DATA(insert ( 1042 18 944 a f )); -DATA(insert ( 1043 18 944 a f )); -DATA(insert ( 25 19 407 i f )); -DATA(insert ( 1042 19 409 i f )); -DATA(insert ( 1043 19 1400 i f )); - -/* Allow explicit coercions between int4 and "char" */ -DATA(insert ( 18 23 77 e f )); -DATA(insert ( 23 18 78 e f )); - -/* pg_node_tree can be coerced to, but not from, text */ -DATA(insert ( 194 25 0 i b )); - -/* pg_ndistinct can be coerced to, but not from, bytea and text */ -DATA(insert ( 3361 17 0 i b )); -DATA(insert ( 3361 25 0 i i )); - -/* pg_dependencies can be coerced to, but not from, bytea and text */ -DATA(insert ( 3402 17 0 i b )); -DATA(insert ( 3402 25 0 i i )); - -/* - * Datetime category - */ -DATA(insert ( 702 1082 1179 a f )); -DATA(insert ( 702 1083 1364 a f )); -DATA(insert ( 702 1114 2023 i f )); -DATA(insert ( 702 1184 1173 i f )); -DATA(insert ( 703 1186 1177 i f )); -DATA(insert ( 1082 1114 2024 i f )); -DATA(insert ( 1082 1184 1174 i f )); -DATA(insert ( 1083 1186 1370 i f )); -DATA(insert ( 1083 1266 2047 i f )); -DATA(insert ( 1114 702 2030 a f )); -DATA(insert ( 1114 1082 2029 a f )); -DATA(insert ( 1114 1083 1316 a f )); -DATA(insert ( 1114 1184 2028 i f )); -DATA(insert ( 1184 702 1180 a f )); -DATA(insert ( 1184 1082 1178 a f )); -DATA(insert ( 1184 1083 2019 a f )); -DATA(insert ( 1184 1114 2027 a f )); -DATA(insert ( 1184 1266 1388 a f )); -DATA(insert ( 1186 703 1194 a f )); -DATA(insert ( 1186 1083 1419 a f )); -DATA(insert ( 1266 1083 2046 a f )); -/* Cross-category casts between int4 and abstime, reltime */ -DATA(insert ( 23 702 0 e b )); -DATA(insert ( 702 23 0 e b )); -DATA(insert ( 23 703 0 e b )); -DATA(insert ( 703 23 0 e b )); - -/* - * Geometric category - */ -DATA(insert ( 600 603 4091 a f )); -DATA(insert ( 601 600 1532 e f )); -DATA(insert ( 602 600 1533 e f )); -DATA(insert ( 602 604 1449 a f )); -DATA(insert ( 603 600 1534 e f )); -DATA(insert ( 603 601 1541 e f )); -DATA(insert ( 603 604 1448 a f )); -DATA(insert ( 603 718 1479 e f )); -DATA(insert ( 604 600 1540 e f )); -DATA(insert ( 604 602 1447 a f )); -DATA(insert ( 604 603 1446 e f )); -DATA(insert ( 604 718 1474 e f )); -DATA(insert ( 718 600 1416 e f )); -DATA(insert ( 718 603 1480 e f )); -DATA(insert ( 718 604 1544 e f )); - -/* - * MAC address category - */ -DATA(insert ( 829 774 4123 i f )); -DATA(insert ( 774 829 4124 i f )); - -/* - * INET category - */ -DATA(insert ( 650 869 0 i b )); -DATA(insert ( 869 650 1715 a f )); - -/* - * BitString category - */ -DATA(insert ( 1560 1562 0 i b )); -DATA(insert ( 1562 1560 0 i b )); -/* Cross-category casts between bit and int4, int8 */ -DATA(insert ( 20 1560 2075 e f )); -DATA(insert ( 23 1560 1683 e f )); -DATA(insert ( 1560 20 2076 e f )); -DATA(insert ( 1560 23 1684 e f )); - -/* - * Cross-category casts to and from TEXT - * - * We need entries here only for a few specialized cases where the behavior - * of the cast function differs from the datatype's I/O functions. Otherwise, - * parse_coerce.c will generate CoerceViaIO operations without any prompting. - * - * Note that the castcontext values specified here should be no stronger than - * parse_coerce.c's automatic casts ('a' to text, 'e' from text) else odd - * behavior will ensue when the automatic cast is applied instead of the - * pg_cast entry! - */ -DATA(insert ( 650 25 730 a f )); -DATA(insert ( 869 25 730 a f )); -DATA(insert ( 16 25 2971 a f )); -DATA(insert ( 142 25 0 a b )); -DATA(insert ( 25 142 2896 e f )); - -/* - * Cross-category casts to and from VARCHAR - * - * We support all the same casts as for TEXT. - */ -DATA(insert ( 650 1043 730 a f )); -DATA(insert ( 869 1043 730 a f )); -DATA(insert ( 16 1043 2971 a f )); -DATA(insert ( 142 1043 0 a b )); -DATA(insert ( 1043 142 2896 e f )); - -/* - * Cross-category casts to and from BPCHAR - * - * We support all the same casts as for TEXT. - */ -DATA(insert ( 650 1042 730 a f )); -DATA(insert ( 869 1042 730 a f )); -DATA(insert ( 16 1042 2971 a f )); -DATA(insert ( 142 1042 0 a b )); -DATA(insert ( 1042 142 2896 e f )); - -/* - * Length-coercion functions - */ -DATA(insert ( 1042 1042 668 i f )); -DATA(insert ( 1043 1043 669 i f )); -DATA(insert ( 1083 1083 1968 i f )); -DATA(insert ( 1114 1114 1961 i f )); -DATA(insert ( 1184 1184 1967 i f )); -DATA(insert ( 1186 1186 1200 i f )); -DATA(insert ( 1266 1266 1969 i f )); -DATA(insert ( 1560 1560 1685 i f )); -DATA(insert ( 1562 1562 1687 i f )); -DATA(insert ( 1700 1700 1703 i f )); - -/* json to/from jsonb */ -DATA(insert ( 114 3802 0 a i )); -DATA(insert ( 3802 114 0 a i )); +#endif /* EXPOSE_TO_CLIENT_CODE */ #endif /* PG_CAST_H */ diff --git a/src/include/catalog/pg_class.dat b/src/include/catalog/pg_class.dat new file mode 100644 index 0000000000..9fffdef379 --- /dev/null +++ b/src/include/catalog/pg_class.dat @@ -0,0 +1,68 @@ +#---------------------------------------------------------------------- +# +# pg_class.dat +# Initial contents of the pg_class system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_class.dat +# +#---------------------------------------------------------------------- + +[ + +# Note: only bootstrap catalogs, ie those marked BKI_BOOTSTRAP, need to +# have entries here. Be sure that the OIDs listed here match those given in +# their CATALOG and BKI_ROWTYPE_OID macros, and that the relnatts values are +# correct. + +# Note: "3" in the relfrozenxid column stands for FirstNormalTransactionId; +# similarly, "1" in relminmxid stands for FirstMultiXactId + +{ oid => '1247', + relname => 'pg_type', relnamespace => 'PGNSP', reltype => '71', + reloftype => '0', relowner => 'PGUID', relam => '0', relfilenode => '0', + reltablespace => '0', relpages => '0', reltuples => '0', relallvisible => '0', + reltoastrelid => '0', relhasindex => 'f', relisshared => 'f', + relpersistence => 'p', relkind => 'r', relnatts => '30', relchecks => '0', + relhasoids => 't', relhasrules => 'f', relhastriggers => 'f', + relhassubclass => 'f', relrowsecurity => 'f', relforcerowsecurity => 'f', + relispopulated => 't', relreplident => 'n', relispartition => 'f', + relrewrite => '0', relfrozenxid => '3', relminmxid => '1', relacl => '_null_', + reloptions => '_null_', relpartbound => '_null_' }, +{ oid => '1249', + relname => 'pg_attribute', relnamespace => 'PGNSP', reltype => '75', + reloftype => '0', relowner => 'PGUID', relam => '0', relfilenode => '0', + reltablespace => '0', relpages => '0', reltuples => '0', relallvisible => '0', + reltoastrelid => '0', relhasindex => 'f', relisshared => 'f', + relpersistence => 'p', relkind => 'r', relnatts => '24', relchecks => '0', + relhasoids => 'f', relhasrules => 'f', relhastriggers => 'f', + relhassubclass => 'f', relrowsecurity => 'f', relforcerowsecurity => 'f', + relispopulated => 't', relreplident => 'n', relispartition => 'f', + relrewrite => '0', relfrozenxid => '3', relminmxid => '1', relacl => '_null_', + reloptions => '_null_', relpartbound => '_null_' }, +{ oid => '1255', + relname => 'pg_proc', relnamespace => 'PGNSP', reltype => '81', + reloftype => '0', relowner => 'PGUID', relam => '0', relfilenode => '0', + reltablespace => '0', relpages => '0', reltuples => '0', relallvisible => '0', + reltoastrelid => '0', relhasindex => 'f', relisshared => 'f', + relpersistence => 'p', relkind => 'r', relnatts => '28', relchecks => '0', + relhasoids => 't', relhasrules => 'f', relhastriggers => 'f', + relhassubclass => 'f', relrowsecurity => 'f', relforcerowsecurity => 'f', + relispopulated => 't', relreplident => 'n', relispartition => 'f', + relrewrite => '0', relfrozenxid => '3', relminmxid => '1', relacl => '_null_', + reloptions => '_null_', relpartbound => '_null_' }, +{ oid => '1259', + relname => 'pg_class', relnamespace => 'PGNSP', reltype => '83', + reloftype => '0', relowner => 'PGUID', relam => '0', relfilenode => '0', + reltablespace => '0', relpages => '0', reltuples => '0', relallvisible => '0', + reltoastrelid => '0', relhasindex => 'f', relisshared => 'f', + relpersistence => 'p', relkind => 'r', relnatts => '33', relchecks => '0', + relhasoids => 't', relhasrules => 'f', relhastriggers => 'f', + relhassubclass => 'f', relrowsecurity => 'f', relforcerowsecurity => 'f', + relispopulated => 't', relreplident => 'n', relispartition => 'f', + relrewrite => '0', relfrozenxid => '3', relminmxid => '1', relacl => '_null_', + reloptions => '_null_', relpartbound => '_null_' }, + +] diff --git a/src/include/catalog/pg_class.h b/src/include/catalog/pg_class.h index b256657bda..788d7a31dc 100644 --- a/src/include/catalog/pg_class.h +++ b/src/include/catalog/pg_class.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_class.h - * definition of the system "relation" relation (pg_class) - * along with the relation's initial contents. + * definition of the "relation" system catalog (pg_class) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_class.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,16 +19,14 @@ #define PG_CLASS_H #include "catalog/genbki.h" +#include "catalog/pg_class_d.h" /* ---------------- * pg_class definition. cpp turns this into * typedef struct FormData_pg_class * ---------------- */ -#define RelationRelationId 1259 -#define RelationRelation_Rowtype_Id 83 - -CATALOG(pg_class,1259) BKI_BOOTSTRAP BKI_ROWTYPE_OID(83) BKI_SCHEMA_MACRO +CATALOG(pg_class,1259,RelationRelationId) BKI_BOOTSTRAP BKI_ROWTYPE_OID(83,RelationRelation_Rowtype_Id) BKI_SCHEMA_MACRO { NameData relname; /* class name */ Oid relnamespace; /* OID of namespace containing this class */ @@ -61,16 +58,17 @@ CATALOG(pg_class,1259) BKI_BOOTSTRAP BKI_ROWTYPE_OID(83) BKI_SCHEMA_MACRO */ int16 relchecks; /* # of CHECK constraints for class */ bool relhasoids; /* T if we generate OIDs for rows of rel */ - bool relhaspkey; /* has (or has had) PRIMARY KEY index */ bool relhasrules; /* has (or has had) any rules */ bool relhastriggers; /* has (or has had) any TRIGGERs */ - bool relhassubclass; /* has (or has had) derived classes */ + bool relhassubclass; /* has (or has had) child tables or indexes */ bool relrowsecurity; /* row security is enabled or not */ bool relforcerowsecurity; /* row security forced for owners or * not */ bool relispopulated; /* matview currently holds query results */ char relreplident; /* see REPLICA_IDENTITY_xxx constants */ bool relispartition; /* is relation a partition? */ + Oid relrewrite; /* heap for rewrite during DDL, link to + * original rel */ TransactionId relfrozenxid; /* all Xids < this are frozen in this rel */ TransactionId relminmxid; /* all multixacts in this rel are >= this. * this is really a MultiXactId */ @@ -94,68 +92,7 @@ CATALOG(pg_class,1259) BKI_BOOTSTRAP BKI_ROWTYPE_OID(83) BKI_SCHEMA_MACRO */ typedef FormData_pg_class *Form_pg_class; -/* ---------------- - * compiler constants for pg_class - * ---------------- - */ - -#define Natts_pg_class 33 -#define Anum_pg_class_relname 1 -#define Anum_pg_class_relnamespace 2 -#define Anum_pg_class_reltype 3 -#define Anum_pg_class_reloftype 4 -#define Anum_pg_class_relowner 5 -#define Anum_pg_class_relam 6 -#define Anum_pg_class_relfilenode 7 -#define Anum_pg_class_reltablespace 8 -#define Anum_pg_class_relpages 9 -#define Anum_pg_class_reltuples 10 -#define Anum_pg_class_relallvisible 11 -#define Anum_pg_class_reltoastrelid 12 -#define Anum_pg_class_relhasindex 13 -#define Anum_pg_class_relisshared 14 -#define Anum_pg_class_relpersistence 15 -#define Anum_pg_class_relkind 16 -#define Anum_pg_class_relnatts 17 -#define Anum_pg_class_relchecks 18 -#define Anum_pg_class_relhasoids 19 -#define Anum_pg_class_relhaspkey 20 -#define Anum_pg_class_relhasrules 21 -#define Anum_pg_class_relhastriggers 22 -#define Anum_pg_class_relhassubclass 23 -#define Anum_pg_class_relrowsecurity 24 -#define Anum_pg_class_relforcerowsecurity 25 -#define Anum_pg_class_relispopulated 26 -#define Anum_pg_class_relreplident 27 -#define Anum_pg_class_relispartition 28 -#define Anum_pg_class_relfrozenxid 29 -#define Anum_pg_class_relminmxid 30 -#define Anum_pg_class_relacl 31 -#define Anum_pg_class_reloptions 32 -#define Anum_pg_class_relpartbound 33 - -/* ---------------- - * initial contents of pg_class - * - * NOTE: only "bootstrapped" relations need to be declared here. Be sure that - * the OIDs listed here match those given in their CATALOG macros, and that - * the relnatts values are correct. - * ---------------- - */ - -/* - * Note: "3" in the relfrozenxid column stands for FirstNormalTransactionId; - * similarly, "1" in relminmxid stands for FirstMultiXactId - */ -DATA(insert OID = 1247 ( pg_type PGNSP 71 0 PGUID 0 0 0 0 0 0 0 f f p r 30 0 t f f f f f f t n f 3 1 _null_ _null_ _null_)); -DESCR(""); -DATA(insert OID = 1249 ( pg_attribute PGNSP 75 0 PGUID 0 0 0 0 0 0 0 f f p r 22 0 f f f f f f f t n f 3 1 _null_ _null_ _null_)); -DESCR(""); -DATA(insert OID = 1255 ( pg_proc PGNSP 81 0 PGUID 0 0 0 0 0 0 0 f f p r 29 0 t f f f f f f t n f 3 1 _null_ _null_ _null_)); -DESCR(""); -DATA(insert OID = 1259 ( pg_class PGNSP 83 0 PGUID 0 0 0 0 0 0 0 f f p r 33 0 t f f f f f f t n f 3 1 _null_ _null_ _null_)); -DESCR(""); - +#ifdef EXPOSE_TO_CLIENT_CODE #define RELKIND_RELATION 'r' /* ordinary table */ #define RELKIND_INDEX 'i' /* secondary index */ @@ -166,6 +103,7 @@ DESCR(""); #define RELKIND_COMPOSITE_TYPE 'c' /* composite type */ #define RELKIND_FOREIGN_TABLE 'f' /* foreign table */ #define RELKIND_PARTITIONED_TABLE 'p' /* partitioned table */ +#define RELKIND_PARTITIONED_INDEX 'I' /* partitioned index */ #define RELPERSISTENCE_PERMANENT 'p' /* regular table */ #define RELPERSISTENCE_UNLOGGED 'u' /* unlogged permanent table */ @@ -184,4 +122,6 @@ DESCR(""); */ #define REPLICA_IDENTITY_INDEX 'i' +#endif /* EXPOSE_TO_CLIENT_CODE */ + #endif /* PG_CLASS_H */ diff --git a/src/include/catalog/pg_collation.dat b/src/include/catalog/pg_collation.dat new file mode 100644 index 0000000000..ad16116cf4 --- /dev/null +++ b/src/include/catalog/pg_collation.dat @@ -0,0 +1,31 @@ +#---------------------------------------------------------------------- +# +# pg_collation.dat +# Initial contents of the pg_collation system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_collation.dat +# +#---------------------------------------------------------------------- + +[ + +{ oid => '100', oid_symbol => 'DEFAULT_COLLATION_OID', + descr => 'database\'s default collation', + collname => 'default', collnamespace => 'PGNSP', collowner => 'PGUID', + collprovider => 'd', collencoding => '-1', collcollate => '', collctype => '', + collversion => '_null_' }, +{ oid => '950', oid_symbol => 'C_COLLATION_OID', + descr => 'standard C collation', + collname => 'C', collnamespace => 'PGNSP', collowner => 'PGUID', + collprovider => 'c', collencoding => '-1', collcollate => 'C', + collctype => 'C', collversion => '_null_' }, +{ oid => '951', oid_symbol => 'POSIX_COLLATION_OID', + descr => 'standard POSIX collation', + collname => 'POSIX', collnamespace => 'PGNSP', collowner => 'PGUID', + collprovider => 'c', collencoding => '-1', collcollate => 'POSIX', + collctype => 'POSIX', collversion => '_null_' }, + +] diff --git a/src/include/catalog/pg_collation.h b/src/include/catalog/pg_collation.h index 0cac7cae72..7e0f4461c6 100644 --- a/src/include/catalog/pg_collation.h +++ b/src/include/catalog/pg_collation.h @@ -1,19 +1,17 @@ /*------------------------------------------------------------------------- * * pg_collation.h - * definition of the system "collation" relation (pg_collation) - * along with the relation's initial contents. + * definition of the "collation" system catalog (pg_collation) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * IDENTIFICATION - * src/include/catalog/pg_collation.h + * src/include/catalog/pg_collation.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -21,15 +19,14 @@ #define PG_COLLATION_H #include "catalog/genbki.h" +#include "catalog/pg_collation_d.h" /* ---------------- * pg_collation definition. cpp turns this into * typedef struct FormData_pg_collation * ---------------- */ -#define CollationRelationId 3456 - -CATALOG(pg_collation,3456) +CATALOG(pg_collation,3456,CollationRelationId) { NameData collname; /* collation name */ Oid collnamespace; /* OID of namespace containing collation */ @@ -51,38 +48,23 @@ CATALOG(pg_collation,3456) */ typedef FormData_pg_collation *Form_pg_collation; -/* ---------------- - * compiler constants for pg_collation - * ---------------- - */ -#define Natts_pg_collation 8 -#define Anum_pg_collation_collname 1 -#define Anum_pg_collation_collnamespace 2 -#define Anum_pg_collation_collowner 3 -#define Anum_pg_collation_collprovider 4 -#define Anum_pg_collation_collencoding 5 -#define Anum_pg_collation_collcollate 6 -#define Anum_pg_collation_collctype 7 -#define Anum_pg_collation_collversion 8 - -/* ---------------- - * initial contents of pg_collation - * ---------------- - */ - -DATA(insert OID = 100 ( default PGNSP PGUID d -1 "" "" _null_ )); -DESCR("database's default collation"); -#define DEFAULT_COLLATION_OID 100 -DATA(insert OID = 950 ( C PGNSP PGUID c -1 "C" "C" _null_ )); -DESCR("standard C collation"); -#define C_COLLATION_OID 950 -DATA(insert OID = 951 ( POSIX PGNSP PGUID c -1 "POSIX" "POSIX" _null_ )); -DESCR("standard POSIX collation"); -#define POSIX_COLLATION_OID 951 - +#ifdef EXPOSE_TO_CLIENT_CODE #define COLLPROVIDER_DEFAULT 'd' #define COLLPROVIDER_ICU 'i' #define COLLPROVIDER_LIBC 'c' +#endif /* EXPOSE_TO_CLIENT_CODE */ + + +extern Oid CollationCreate(const char *collname, Oid collnamespace, + Oid collowner, + char collprovider, + int32 collencoding, + const char *collcollate, const char *collctype, + const char *collversion, + bool if_not_exists, + bool quiet); +extern void RemoveCollationById(Oid collationOid); + #endif /* PG_COLLATION_H */ diff --git a/src/include/catalog/pg_collation_fn.h b/src/include/catalog/pg_collation_fn.h deleted file mode 100644 index 0ef31389d5..0000000000 --- a/src/include/catalog/pg_collation_fn.h +++ /dev/null @@ -1,27 +0,0 @@ -/*------------------------------------------------------------------------- - * - * pg_collation_fn.h - * prototypes for functions in catalog/pg_collation.c - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/catalog/pg_collation_fn.h - * - *------------------------------------------------------------------------- - */ -#ifndef PG_COLLATION_FN_H -#define PG_COLLATION_FN_H - -extern Oid CollationCreate(const char *collname, Oid collnamespace, - Oid collowner, - char collprovider, - int32 collencoding, - const char *collcollate, const char *collctype, - const char *collversion, - bool if_not_exists, - bool quiet); -extern void RemoveCollationById(Oid collationOid); - -#endif /* PG_COLLATION_FN_H */ diff --git a/src/include/catalog/pg_constraint.h b/src/include/catalog/pg_constraint.h index ec035d8434..630cabe0b8 100644 --- a/src/include/catalog/pg_constraint.h +++ b/src/include/catalog/pg_constraint.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_constraint.h - * definition of the system "constraint" relation (pg_constraint) - * along with the relation's initial contents. + * definition of the "constraint" system catalog (pg_constraint) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_constraint.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,15 +19,17 @@ #define PG_CONSTRAINT_H #include "catalog/genbki.h" +#include "catalog/pg_constraint_d.h" + +#include "catalog/dependency.h" +#include "nodes/pg_list.h" /* ---------------- * pg_constraint definition. cpp turns this into * typedef struct FormData_pg_constraint * ---------------- */ -#define ConstraintRelationId 2606 - -CATALOG(pg_constraint,2606) +CATALOG(pg_constraint,2606,ConstraintRelationId) { /* * conname + connamespace is deliberately not unique; we allow, for @@ -38,6 +39,10 @@ CATALOG(pg_constraint,2606) * global lock to generate a globally unique name for a nameless * constraint. We associate a namespace with constraint names only for * SQL-spec compatibility. + * + * However, we do require conname to be unique among the constraints of a + * single relation or domain. This is enforced by a unique index on + * conrelid + contypid + conname. */ NameData conname; /* name of this constraint */ Oid connamespace; /* OID of namespace containing constraint */ @@ -72,6 +77,12 @@ CATALOG(pg_constraint,2606) */ Oid conindid; /* index supporting this constraint */ + /* + * If this constraint is on a partition inherited from a partitioned + * table, this is the OID of the corresponding constraint in the parent. + */ + Oid conparentid; + /* * These fields, plus confkey, are only meaningful for a foreign-key * constraint. Otherwise confrelid is 0 and the char fields are spaces. @@ -131,11 +142,6 @@ CATALOG(pg_constraint,2606) * If a check constraint, nodeToString representation of expression */ pg_node_tree conbin; - - /* - * If a check constraint, source-text representation of expression - */ - text consrc; #endif } FormData_pg_constraint; @@ -146,43 +152,7 @@ CATALOG(pg_constraint,2606) */ typedef FormData_pg_constraint *Form_pg_constraint; -/* ---------------- - * compiler constants for pg_constraint - * ---------------- - */ -#define Natts_pg_constraint 24 -#define Anum_pg_constraint_conname 1 -#define Anum_pg_constraint_connamespace 2 -#define Anum_pg_constraint_contype 3 -#define Anum_pg_constraint_condeferrable 4 -#define Anum_pg_constraint_condeferred 5 -#define Anum_pg_constraint_convalidated 6 -#define Anum_pg_constraint_conrelid 7 -#define Anum_pg_constraint_contypid 8 -#define Anum_pg_constraint_conindid 9 -#define Anum_pg_constraint_confrelid 10 -#define Anum_pg_constraint_confupdtype 11 -#define Anum_pg_constraint_confdeltype 12 -#define Anum_pg_constraint_confmatchtype 13 -#define Anum_pg_constraint_conislocal 14 -#define Anum_pg_constraint_coninhcount 15 -#define Anum_pg_constraint_connoinherit 16 -#define Anum_pg_constraint_conkey 17 -#define Anum_pg_constraint_confkey 18 -#define Anum_pg_constraint_conpfeqop 19 -#define Anum_pg_constraint_conppeqop 20 -#define Anum_pg_constraint_conffeqop 21 -#define Anum_pg_constraint_conexclop 22 -#define Anum_pg_constraint_conbin 23 -#define Anum_pg_constraint_consrc 24 - -/* ---------------- - * initial contents of pg_constraint - * ---------------- - */ - -/* nothing, at present */ - +#ifdef EXPOSE_TO_CLIENT_CODE /* Valid values for contype */ #define CONSTRAINT_CHECK 'c' @@ -198,4 +168,91 @@ typedef FormData_pg_constraint *Form_pg_constraint; * the FKCONSTR_MATCH_xxx constants defined in parsenodes.h. */ +#endif /* EXPOSE_TO_CLIENT_CODE */ + +/* + * Identify constraint type for lookup purposes + */ +typedef enum ConstraintCategory +{ + CONSTRAINT_RELATION, + CONSTRAINT_DOMAIN, + CONSTRAINT_ASSERTION /* for future expansion */ +} ConstraintCategory; + +/* + * Used when cloning a foreign key constraint to a partition, so that the + * caller can optionally set up a verification pass for it. + */ +typedef struct ClonedConstraint +{ + Oid relid; + Oid refrelid; + Oid conindid; + Oid conid; + Constraint *constraint; +} ClonedConstraint; + + +extern Oid CreateConstraintEntry(const char *constraintName, + Oid constraintNamespace, + char constraintType, + bool isDeferrable, + bool isDeferred, + bool isValidated, + Oid parentConstrId, + Oid relId, + const int16 *constraintKey, + int constraintNKeys, + int constraintNTotalKeys, + Oid domainId, + Oid indexRelId, + Oid foreignRelId, + const int16 *foreignKey, + const Oid *pfEqOp, + const Oid *ppEqOp, + const Oid *ffEqOp, + int foreignNKeys, + char foreignUpdateType, + char foreignDeleteType, + char foreignMatchType, + const Oid *exclOp, + Node *conExpr, + const char *conBin, + bool conIsLocal, + int conInhCount, + bool conNoInherit, + bool is_internal); + +extern void CloneForeignKeyConstraints(Oid parentId, Oid relationId, + List **cloned); + +extern void RemoveConstraintById(Oid conId); +extern void RenameConstraintById(Oid conId, const char *newname); + +extern bool ConstraintNameIsUsed(ConstraintCategory conCat, Oid objId, + const char *conname); +extern bool ConstraintNameExists(const char *conname, Oid namespaceid); +extern char *ChooseConstraintName(const char *name1, const char *name2, + const char *label, Oid namespaceid, + List *others); + +extern void AlterConstraintNamespaces(Oid ownerId, Oid oldNspId, + Oid newNspId, bool isType, ObjectAddresses *objsMoved); +extern void ConstraintSetParentConstraint(Oid childConstrId, + Oid parentConstrId); +extern Oid get_relation_constraint_oid(Oid relid, const char *conname, bool missing_ok); +extern Bitmapset *get_relation_constraint_attnos(Oid relid, const char *conname, + bool missing_ok, Oid *constraintOid); +extern Oid get_domain_constraint_oid(Oid typid, const char *conname, bool missing_ok); +extern Oid get_relation_idx_constraint_oid(Oid relationId, Oid indexId); + +extern Bitmapset *get_primary_key_attnos(Oid relid, bool deferrableOk, + Oid *constraintOid); + +extern bool check_functional_grouping(Oid relid, + Index varno, Index varlevelsup, + List *grouping_columns, + List **constraintDeps); + #endif /* PG_CONSTRAINT_H */ diff --git a/src/include/catalog/pg_constraint_fn.h b/src/include/catalog/pg_constraint_fn.h deleted file mode 100644 index a4c46897ed..0000000000 --- a/src/include/catalog/pg_constraint_fn.h +++ /dev/null @@ -1,82 +0,0 @@ -/*------------------------------------------------------------------------- - * - * pg_constraint_fn.h - * prototypes for functions in catalog/pg_constraint.c - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/catalog/pg_constraint_fn.h - * - *------------------------------------------------------------------------- - */ -#ifndef PG_CONSTRAINT_FN_H -#define PG_CONSTRAINT_FN_H - -#include "catalog/dependency.h" -#include "nodes/pg_list.h" - -/* - * Identify constraint type for lookup purposes - */ -typedef enum ConstraintCategory -{ - CONSTRAINT_RELATION, - CONSTRAINT_DOMAIN, - CONSTRAINT_ASSERTION /* for future expansion */ -} ConstraintCategory; - -extern Oid CreateConstraintEntry(const char *constraintName, - Oid constraintNamespace, - char constraintType, - bool isDeferrable, - bool isDeferred, - bool isValidated, - Oid relId, - const int16 *constraintKey, - int constraintNKeys, - Oid domainId, - Oid indexRelId, - Oid foreignRelId, - const int16 *foreignKey, - const Oid *pfEqOp, - const Oid *ppEqOp, - const Oid *ffEqOp, - int foreignNKeys, - char foreignUpdateType, - char foreignDeleteType, - char foreignMatchType, - const Oid *exclOp, - Node *conExpr, - const char *conBin, - const char *conSrc, - bool conIsLocal, - int conInhCount, - bool conNoInherit, - bool is_internal); - -extern void RemoveConstraintById(Oid conId); -extern void RenameConstraintById(Oid conId, const char *newname); -extern void SetValidatedConstraintById(Oid conId); - -extern bool ConstraintNameIsUsed(ConstraintCategory conCat, Oid objId, - Oid objNamespace, const char *conname); -extern char *ChooseConstraintName(const char *name1, const char *name2, - const char *label, Oid namespaceid, - List *others); - -extern void AlterConstraintNamespaces(Oid ownerId, Oid oldNspId, - Oid newNspId, bool isType, ObjectAddresses *objsMoved); -extern Oid get_relation_constraint_oid(Oid relid, const char *conname, bool missing_ok); -extern Oid get_domain_constraint_oid(Oid typid, const char *conname, bool missing_ok); - -extern Bitmapset *get_primary_key_attnos(Oid relid, bool deferrableOk, - Oid *constraintOid); - -extern bool check_functional_grouping(Oid relid, - Index varno, Index varlevelsup, - List *grouping_columns, - List **constraintDeps); - -#endif /* PG_CONSTRAINT_FN_H */ diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h index 1ec03caf5f..773d9e6eba 100644 --- a/src/include/catalog/pg_control.h +++ b/src/include/catalog/pg_control.h @@ -5,7 +5,7 @@ * However, we define it here so that the format is documented. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_control.h @@ -21,7 +21,7 @@ /* Version identifier for this pg_control format */ -#define PG_CONTROL_VERSION 1002 +#define PG_CONTROL_VERSION 1100 /* Nonce key length, see below */ #define MOCK_AUTH_NONCE_LEN 32 @@ -127,7 +127,6 @@ typedef struct ControlFileData DBState state; /* see enum above */ pg_time_t time; /* time stamp of last pg_control update */ XLogRecPtr checkPoint; /* last check point record ptr */ - XLogRecPtr prevCheckPoint; /* previous check point record ptr */ CheckPoint checkPointCopy; /* copy of last check point record */ diff --git a/src/include/catalog/pg_conversion.h b/src/include/catalog/pg_conversion.h index 0682d7eb22..37515f64c2 100644 --- a/src/include/catalog/pg_conversion.h +++ b/src/include/catalog/pg_conversion.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_conversion.h - * definition of the system "conversion" relation (pg_conversion) - * along with the relation's initial contents. + * definition of the "conversion" system catalog (pg_conversion) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_conversion.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,6 +19,9 @@ #define PG_CONVERSION_H #include "catalog/genbki.h" +#include "catalog/pg_conversion_d.h" + +#include "catalog/objectaddress.h" /* ---------------------------------------------------------------- * pg_conversion definition. @@ -32,12 +34,10 @@ * conforencoding FOR encoding id * contoencoding TO encoding id * conproc OID of the conversion proc - * condefault TRUE if this is a default conversion + * condefault true if this is a default conversion * ---------------------------------------------------------------- */ -#define ConversionRelationId 2607 - -CATALOG(pg_conversion,2607) +CATALOG(pg_conversion,2607,ConversionRelationId) { NameData conname; Oid connamespace; @@ -55,23 +55,13 @@ CATALOG(pg_conversion,2607) */ typedef FormData_pg_conversion *Form_pg_conversion; -/* ---------------- - * compiler constants for pg_conversion - * ---------------- - */ - -#define Natts_pg_conversion 7 -#define Anum_pg_conversion_conname 1 -#define Anum_pg_conversion_connamespace 2 -#define Anum_pg_conversion_conowner 3 -#define Anum_pg_conversion_conforencoding 4 -#define Anum_pg_conversion_contoencoding 5 -#define Anum_pg_conversion_conproc 6 -#define Anum_pg_conversion_condefault 7 -/* ---------------- - * initial contents of pg_conversion - * --------------- - */ +extern ObjectAddress ConversionCreate(const char *conname, Oid connamespace, + Oid conowner, + int32 conforencoding, int32 contoencoding, + Oid conproc, bool def); +extern void RemoveConversionById(Oid conversionOid); +extern Oid FindDefaultConversion(Oid connamespace, int32 for_encoding, + int32 to_encoding); #endif /* PG_CONVERSION_H */ diff --git a/src/include/catalog/pg_conversion_fn.h b/src/include/catalog/pg_conversion_fn.h deleted file mode 100644 index 7074bcf13a..0000000000 --- a/src/include/catalog/pg_conversion_fn.h +++ /dev/null @@ -1,27 +0,0 @@ -/*------------------------------------------------------------------------- - * - * pg_conversion_fn.h - * prototypes for functions in catalog/pg_conversion.c - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/catalog/pg_conversion_fn.h - * - *------------------------------------------------------------------------- - */ -#ifndef PG_CONVERSION_FN_H -#define PG_CONVERSION_FN_H - - -#include "catalog/objectaddress.h" - -extern ObjectAddress ConversionCreate(const char *conname, Oid connamespace, - Oid conowner, - int32 conforencoding, int32 contoencoding, - Oid conproc, bool def); -extern void RemoveConversionById(Oid conversionOid); -extern Oid FindDefaultConversion(Oid connamespace, int32 for_encoding, int32 to_encoding); - -#endif /* PG_CONVERSION_FN_H */ diff --git a/src/include/catalog/pg_database.dat b/src/include/catalog/pg_database.dat new file mode 100644 index 0000000000..37ee2f9458 --- /dev/null +++ b/src/include/catalog/pg_database.dat @@ -0,0 +1,23 @@ +#---------------------------------------------------------------------- +# +# pg_database.dat +# Initial contents of the pg_database system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_database.dat +# +#---------------------------------------------------------------------- + +[ + +{ oid => '1', oid_symbol => 'TemplateDbOid', + descr => 'default template for new databases', + datname => 'template1', datdba => 'PGUID', encoding => 'ENCODING', + datcollate => 'LC_COLLATE', datctype => 'LC_CTYPE', datistemplate => 't', + datallowconn => 't', datconnlimit => '-1', datlastsysoid => '0', + datfrozenxid => '0', datminmxid => '1', dattablespace => '1663', + datacl => '_null_' }, + +] diff --git a/src/include/catalog/pg_database.h b/src/include/catalog/pg_database.h index e7cbca49cf..ac027b7919 100644 --- a/src/include/catalog/pg_database.h +++ b/src/include/catalog/pg_database.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_database.h - * definition of the system "database" relation (pg_database) - * along with the relation's initial contents. + * definition of the "database" system catalog (pg_database) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_database.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,16 +19,14 @@ #define PG_DATABASE_H #include "catalog/genbki.h" +#include "catalog/pg_database_d.h" /* ---------------- * pg_database definition. cpp turns this into * typedef struct FormData_pg_database * ---------------- */ -#define DatabaseRelationId 1262 -#define DatabaseRelation_Rowtype_Id 1248 - -CATALOG(pg_database,1262) BKI_SHARED_RELATION BKI_ROWTYPE_OID(1248) BKI_SCHEMA_MACRO +CATALOG(pg_database,1262,DatabaseRelationId) BKI_SHARED_RELATION BKI_ROWTYPE_OID(1248,DatabaseRelation_Rowtype_Id) BKI_SCHEMA_MACRO { NameData datname; /* database name */ Oid datdba; /* owner of database */ @@ -56,27 +53,4 @@ CATALOG(pg_database,1262) BKI_SHARED_RELATION BKI_ROWTYPE_OID(1248) BKI_SCHEMA_M */ typedef FormData_pg_database *Form_pg_database; -/* ---------------- - * compiler constants for pg_database - * ---------------- - */ -#define Natts_pg_database 13 -#define Anum_pg_database_datname 1 -#define Anum_pg_database_datdba 2 -#define Anum_pg_database_encoding 3 -#define Anum_pg_database_datcollate 4 -#define Anum_pg_database_datctype 5 -#define Anum_pg_database_datistemplate 6 -#define Anum_pg_database_datallowconn 7 -#define Anum_pg_database_datconnlimit 8 -#define Anum_pg_database_datlastsysoid 9 -#define Anum_pg_database_datfrozenxid 10 -#define Anum_pg_database_datminmxid 11 -#define Anum_pg_database_dattablespace 12 -#define Anum_pg_database_datacl 13 - -DATA(insert OID = 1 ( template1 PGUID ENCODING "LC_COLLATE" "LC_CTYPE" t t -1 0 0 1 1663 _null_)); -SHDESCR("default template for new databases"); -#define TemplateDbOid 1 - #endif /* PG_DATABASE_H */ diff --git a/src/include/catalog/pg_db_role_setting.h b/src/include/catalog/pg_db_role_setting.h index 4a8e3370c9..eff801c3ce 100644 --- a/src/include/catalog/pg_db_role_setting.h +++ b/src/include/catalog/pg_db_role_setting.h @@ -1,26 +1,27 @@ /*------------------------------------------------------------------------- * * pg_db_role_setting.h - * definition of configuration settings + * definition of the system catalog for per-database/per-user + * configuration settings (pg_db_role_setting) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_db_role_setting.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ #ifndef PG_DB_ROLE_SETTING_H #define PG_DB_ROLE_SETTING_H +#include "catalog/genbki.h" +#include "catalog/pg_db_role_setting_d.h" + #include "utils/guc.h" #include "utils/relcache.h" #include "utils/snapshot.h" @@ -30,9 +31,7 @@ * typedef struct FormData_pg_db_role_setting * ---------------- */ -#define DbRoleSettingRelationId 2964 - -CATALOG(pg_db_role_setting,2964) BKI_SHARED_RELATION BKI_WITHOUT_OIDS +CATALOG(pg_db_role_setting,2964,DbRoleSettingRelationId) BKI_SHARED_RELATION BKI_WITHOUT_OIDS { Oid setdatabase; /* database */ Oid setrole; /* role */ @@ -44,20 +43,6 @@ CATALOG(pg_db_role_setting,2964) BKI_SHARED_RELATION BKI_WITHOUT_OIDS typedef FormData_pg_db_role_setting * Form_pg_db_role_setting; -/* ---------------- - * compiler constants for pg_db_role_setting - * ---------------- - */ -#define Natts_pg_db_role_setting 3 -#define Anum_pg_db_role_setting_setdatabase 1 -#define Anum_pg_db_role_setting_setrole 2 -#define Anum_pg_db_role_setting_setconfig 3 - -/* ---------------- - * initial contents of pg_db_role_setting are NOTHING - * ---------------- - */ - /* * prototypes for functions in pg_db_role_setting.h */ diff --git a/src/include/catalog/pg_default_acl.h b/src/include/catalog/pg_default_acl.h index 09587abee6..aee49fdb6d 100644 --- a/src/include/catalog/pg_default_acl.h +++ b/src/include/catalog/pg_default_acl.h @@ -1,17 +1,18 @@ /*------------------------------------------------------------------------- * * pg_default_acl.h - * definition of default ACLs for new objects. + * definition of the system catalog for default ACLs of new objects + * (pg_default_acl) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_default_acl.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -19,22 +20,21 @@ #define PG_DEFAULT_ACL_H #include "catalog/genbki.h" +#include "catalog/pg_default_acl_d.h" /* ---------------- * pg_default_acl definition. cpp turns this into * typedef struct FormData_pg_default_acl * ---------------- */ -#define DefaultAclRelationId 826 - -CATALOG(pg_default_acl,826) +CATALOG(pg_default_acl,826,DefaultAclRelationId) { Oid defaclrole; /* OID of role owning this ACL */ Oid defaclnamespace; /* OID of namespace, or 0 for all */ char defaclobjtype; /* see DEFACLOBJ_xxx constants below */ #ifdef CATALOG_VARLEN /* variable-length fields start here */ - aclitem defaclacl[1]; /* permissions to add at CREATE time */ + aclitem defaclacl[1] BKI_FORCE_NOT_NULL; /* permissions to add at CREATE time */ #endif } FormData_pg_default_acl; @@ -45,21 +45,7 @@ CATALOG(pg_default_acl,826) */ typedef FormData_pg_default_acl *Form_pg_default_acl; -/* ---------------- - * compiler constants for pg_default_acl - * ---------------- - */ - -#define Natts_pg_default_acl 4 -#define Anum_pg_default_acl_defaclrole 1 -#define Anum_pg_default_acl_defaclnamespace 2 -#define Anum_pg_default_acl_defaclobjtype 3 -#define Anum_pg_default_acl_defaclacl 4 - -/* ---------------- - * pg_default_acl has no initial contents - * ---------------- - */ +#ifdef EXPOSE_TO_CLIENT_CODE /* * Types of objects for which the user is allowed to specify default @@ -72,4 +58,6 @@ typedef FormData_pg_default_acl *Form_pg_default_acl; #define DEFACLOBJ_TYPE 'T' /* type */ #define DEFACLOBJ_NAMESPACE 'n' /* namespace */ +#endif /* EXPOSE_TO_CLIENT_CODE */ + #endif /* PG_DEFAULT_ACL_H */ diff --git a/src/include/catalog/pg_depend.h b/src/include/catalog/pg_depend.h index 8bda78d9ef..482b8bd251 100644 --- a/src/include/catalog/pg_depend.h +++ b/src/include/catalog/pg_depend.h @@ -1,18 +1,29 @@ /*------------------------------------------------------------------------- * * pg_depend.h - * definition of the system "dependency" relation (pg_depend) - * along with the relation's initial contents. + * definition of the "dependency" system catalog (pg_depend) * + * pg_depend has no preloaded contents, so there is no pg_depend.dat + * file; system-defined dependencies are loaded into it during a late stage + * of the initdb process. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * NOTE: we do not represent all possible dependency pairs in pg_depend; + * for example, there's not much value in creating an explicit dependency + * from an attribute to its relation. Usually we make a dependency for + * cases where the relationship is conditional rather than essential + * (for example, not all triggers are dependent on constraints, but all + * attributes are dependent on relations) or where the dependency is not + * convenient to find from the contents of other catalogs. + * + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_depend.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,15 +31,14 @@ #define PG_DEPEND_H #include "catalog/genbki.h" +#include "catalog/pg_depend_d.h" /* ---------------- * pg_depend definition. cpp turns this into * typedef struct FormData_pg_depend * ---------------- */ -#define DependRelationId 2608 - -CATALOG(pg_depend,2608) BKI_WITHOUT_OIDS +CATALOG(pg_depend,2608,DependRelationId) BKI_WITHOUT_OIDS { /* * Identification of the dependent (referencing) object. @@ -60,31 +70,4 @@ CATALOG(pg_depend,2608) BKI_WITHOUT_OIDS */ typedef FormData_pg_depend *Form_pg_depend; -/* ---------------- - * compiler constants for pg_depend - * ---------------- - */ -#define Natts_pg_depend 7 -#define Anum_pg_depend_classid 1 -#define Anum_pg_depend_objid 2 -#define Anum_pg_depend_objsubid 3 -#define Anum_pg_depend_refclassid 4 -#define Anum_pg_depend_refobjid 5 -#define Anum_pg_depend_refobjsubid 6 -#define Anum_pg_depend_deptype 7 - - -/* - * pg_depend has no preloaded contents; system-defined dependencies are - * loaded into it during a late stage of the initdb process. - * - * NOTE: we do not represent all possible dependency pairs in pg_depend; - * for example, there's not much value in creating an explicit dependency - * from an attribute to its relation. Usually we make a dependency for - * cases where the relationship is conditional rather than essential - * (for example, not all triggers are dependent on constraints, but all - * attributes are dependent on relations) or where the dependency is not - * convenient to find from the contents of other catalogs. - */ - #endif /* PG_DEPEND_H */ diff --git a/src/include/catalog/pg_description.h b/src/include/catalog/pg_description.h index e0499ca2d6..74302bd451 100644 --- a/src/include/catalog/pg_description.h +++ b/src/include/catalog/pg_description.h @@ -1,7 +1,11 @@ /*------------------------------------------------------------------------- * * pg_description.h - * definition of the system "description" relation (pg_description) + * definition of the "description" system catalog (pg_description) + * + * Because the contents of this table are taken from the *.dat files + * of other catalogs, there is no pg_description.dat file. The initial + * contents are assembled by genbki.pl and loaded during initdb. * * NOTE: an object is identified by the OID of the row that primarily * defines the object, plus the OID of the table that that row appears in. @@ -19,17 +23,14 @@ * for example). * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_description.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -37,15 +38,14 @@ #define PG_DESCRIPTION_H #include "catalog/genbki.h" +#include "catalog/pg_description_d.h" /* ---------------- * pg_description definition. cpp turns this into * typedef struct FormData_pg_description * ---------------- */ -#define DescriptionRelationId 2609 - -CATALOG(pg_description,2609) BKI_WITHOUT_OIDS +CATALOG(pg_description,2609,DescriptionRelationId) BKI_WITHOUT_OIDS { Oid objoid; /* OID of object itself */ Oid classoid; /* OID of table containing object */ @@ -63,25 +63,4 @@ CATALOG(pg_description,2609) BKI_WITHOUT_OIDS */ typedef FormData_pg_description * Form_pg_description; -/* ---------------- - * compiler constants for pg_description - * ---------------- - */ -#define Natts_pg_description 4 -#define Anum_pg_description_objoid 1 -#define Anum_pg_description_classoid 2 -#define Anum_pg_description_objsubid 3 -#define Anum_pg_description_description 4 - -/* ---------------- - * initial contents of pg_description - * ---------------- - */ - -/* - * Because the contents of this table are taken from the other *.h files, - * there is no initialization here. The initial contents are extracted - * by genbki.pl and loaded during initdb. - */ - #endif /* PG_DESCRIPTION_H */ diff --git a/src/include/catalog/pg_enum.h b/src/include/catalog/pg_enum.h index 5938ba5cac..474877749b 100644 --- a/src/include/catalog/pg_enum.h +++ b/src/include/catalog/pg_enum.h @@ -1,20 +1,17 @@ /*------------------------------------------------------------------------- * * pg_enum.h - * definition of the system "enum" relation (pg_enum) - * along with the relation's initial contents. + * definition of the "enum" system catalog (pg_enum) * * - * Copyright (c) 2006-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_enum.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -22,6 +19,8 @@ #define PG_ENUM_H #include "catalog/genbki.h" +#include "catalog/pg_enum_d.h" + #include "nodes/pg_list.h" /* ---------------- @@ -29,9 +28,7 @@ * typedef struct FormData_pg_enum * ---------------- */ -#define EnumRelationId 3501 - -CATALOG(pg_enum,3501) +CATALOG(pg_enum,3501,EnumRelationId) { Oid enumtypid; /* OID of owning enum type */ float4 enumsortorder; /* sort position of this enum value */ @@ -45,20 +42,6 @@ CATALOG(pg_enum,3501) */ typedef FormData_pg_enum *Form_pg_enum; -/* ---------------- - * compiler constants for pg_enum - * ---------------- - */ -#define Natts_pg_enum 3 -#define Anum_pg_enum_enumtypid 1 -#define Anum_pg_enum_enumsortorder 2 -#define Anum_pg_enum_enumlabel 3 - -/* ---------------- - * pg_enum has no initial contents - * ---------------- - */ - /* * prototypes for functions in pg_enum.c */ @@ -69,5 +52,10 @@ extern void AddEnumLabel(Oid enumTypeOid, const char *newVal, bool skipIfExists); extern void RenameEnumLabel(Oid enumTypeOid, const char *oldVal, const char *newVal); +extern bool EnumBlacklisted(Oid enum_id); +extern Size EstimateEnumBlacklistSpace(void); +extern void SerializeEnumBlacklist(void *space, Size size); +extern void RestoreEnumBlacklist(void *space); +extern void AtEOXact_Enum(void); #endif /* PG_ENUM_H */ diff --git a/src/include/catalog/pg_event_trigger.h b/src/include/catalog/pg_event_trigger.h index f9f568b27b..9af00fd68f 100644 --- a/src/include/catalog/pg_event_trigger.h +++ b/src/include/catalog/pg_event_trigger.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_event_trigger.h - * definition of the system "event trigger" relation (pg_event_trigger) - * along with the relation's initial contents. + * definition of the "event trigger" system catalog (pg_event_trigger) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_event_trigger.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,15 +19,14 @@ #define PG_EVENT_TRIGGER_H #include "catalog/genbki.h" +#include "catalog/pg_event_trigger_d.h" /* ---------------- * pg_event_trigger definition. cpp turns this into * typedef struct FormData_pg_event_trigger * ---------------- */ -#define EventTriggerRelationId 3466 - -CATALOG(pg_event_trigger,3466) +CATALOG(pg_event_trigger,3466,EventTriggerRelationId) { NameData evtname; /* trigger's name */ NameData evtevent; /* trigger's event */ @@ -49,16 +47,4 @@ CATALOG(pg_event_trigger,3466) */ typedef FormData_pg_event_trigger *Form_pg_event_trigger; -/* ---------------- - * compiler constants for pg_event_trigger - * ---------------- - */ -#define Natts_pg_event_trigger 6 -#define Anum_pg_event_trigger_evtname 1 -#define Anum_pg_event_trigger_evtevent 2 -#define Anum_pg_event_trigger_evtowner 3 -#define Anum_pg_event_trigger_evtfoid 4 -#define Anum_pg_event_trigger_evtenabled 5 -#define Anum_pg_event_trigger_evttags 6 - #endif /* PG_EVENT_TRIGGER_H */ diff --git a/src/include/catalog/pg_extension.h b/src/include/catalog/pg_extension.h index 2ce575d17e..c698ddd699 100644 --- a/src/include/catalog/pg_extension.h +++ b/src/include/catalog/pg_extension.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_extension.h - * definition of the system "extension" relation (pg_extension) - * along with the relation's initial contents. + * definition of the "extension" system catalog (pg_extension) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_extension.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,15 +19,14 @@ #define PG_EXTENSION_H #include "catalog/genbki.h" +#include "catalog/pg_extension_d.h" /* ---------------- * pg_extension definition. cpp turns this into * typedef struct FormData_pg_extension * ---------------- */ -#define ExtensionRelationId 3079 - -CATALOG(pg_extension,3079) +CATALOG(pg_extension,3079,ExtensionRelationId) { NameData extname; /* extension name */ Oid extowner; /* extension owner */ @@ -50,23 +48,4 @@ CATALOG(pg_extension,3079) */ typedef FormData_pg_extension *Form_pg_extension; -/* ---------------- - * compiler constants for pg_extension - * ---------------- - */ - -#define Natts_pg_extension 7 -#define Anum_pg_extension_extname 1 -#define Anum_pg_extension_extowner 2 -#define Anum_pg_extension_extnamespace 3 -#define Anum_pg_extension_extrelocatable 4 -#define Anum_pg_extension_extversion 5 -#define Anum_pg_extension_extconfig 6 -#define Anum_pg_extension_extcondition 7 - -/* ---------------- - * pg_extension has no initial contents - * ---------------- - */ - #endif /* PG_EXTENSION_H */ diff --git a/src/include/catalog/pg_foreign_data_wrapper.h b/src/include/catalog/pg_foreign_data_wrapper.h index af602c74ee..3e6191e3e2 100644 --- a/src/include/catalog/pg_foreign_data_wrapper.h +++ b/src/include/catalog/pg_foreign_data_wrapper.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_foreign_data_wrapper.h - * definition of the system "foreign-data wrapper" relation (pg_foreign_data_wrapper) - * along with the relation's initial contents. + * definition of the "foreign-data wrapper" system catalog (pg_foreign_data_wrapper) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_foreign_data_wrapper.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,15 +19,14 @@ #define PG_FOREIGN_DATA_WRAPPER_H #include "catalog/genbki.h" +#include "catalog/pg_foreign_data_wrapper_d.h" /* ---------------- * pg_foreign_data_wrapper definition. cpp turns this into * typedef struct FormData_pg_foreign_data_wrapper * ---------------- */ -#define ForeignDataWrapperRelationId 2328 - -CATALOG(pg_foreign_data_wrapper,2328) +CATALOG(pg_foreign_data_wrapper,2328,ForeignDataWrapperRelationId) { NameData fdwname; /* foreign-data wrapper name */ Oid fdwowner; /* FDW owner */ @@ -48,17 +46,4 @@ CATALOG(pg_foreign_data_wrapper,2328) */ typedef FormData_pg_foreign_data_wrapper *Form_pg_foreign_data_wrapper; -/* ---------------- - * compiler constants for pg_fdw - * ---------------- - */ - -#define Natts_pg_foreign_data_wrapper 6 -#define Anum_pg_foreign_data_wrapper_fdwname 1 -#define Anum_pg_foreign_data_wrapper_fdwowner 2 -#define Anum_pg_foreign_data_wrapper_fdwhandler 3 -#define Anum_pg_foreign_data_wrapper_fdwvalidator 4 -#define Anum_pg_foreign_data_wrapper_fdwacl 5 -#define Anum_pg_foreign_data_wrapper_fdwoptions 6 - #endif /* PG_FOREIGN_DATA_WRAPPER_H */ diff --git a/src/include/catalog/pg_foreign_server.h b/src/include/catalog/pg_foreign_server.h index 689dbbb4e7..3f7cc0c046 100644 --- a/src/include/catalog/pg_foreign_server.h +++ b/src/include/catalog/pg_foreign_server.h @@ -1,16 +1,16 @@ /*------------------------------------------------------------------------- * * pg_foreign_server.h - * definition of the system "foreign server" relation (pg_foreign_server) + * definition of the "foreign server" system catalog (pg_foreign_server) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_foreign_server.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -18,15 +18,14 @@ #define PG_FOREIGN_SERVER_H #include "catalog/genbki.h" +#include "catalog/pg_foreign_server_d.h" /* ---------------- * pg_foreign_server definition. cpp turns this into * typedef struct FormData_pg_foreign_server * ---------------- */ -#define ForeignServerRelationId 1417 - -CATALOG(pg_foreign_server,1417) +CATALOG(pg_foreign_server,1417,ForeignServerRelationId) { NameData srvname; /* foreign server name */ Oid srvowner; /* server owner */ @@ -47,18 +46,4 @@ CATALOG(pg_foreign_server,1417) */ typedef FormData_pg_foreign_server *Form_pg_foreign_server; -/* ---------------- - * compiler constants for pg_foreign_server - * ---------------- - */ - -#define Natts_pg_foreign_server 7 -#define Anum_pg_foreign_server_srvname 1 -#define Anum_pg_foreign_server_srvowner 2 -#define Anum_pg_foreign_server_srvfdw 3 -#define Anum_pg_foreign_server_srvtype 4 -#define Anum_pg_foreign_server_srvversion 5 -#define Anum_pg_foreign_server_srvacl 6 -#define Anum_pg_foreign_server_srvoptions 7 - #endif /* PG_FOREIGN_SERVER_H */ diff --git a/src/include/catalog/pg_foreign_table.h b/src/include/catalog/pg_foreign_table.h index c5dbcceef1..6e8b419520 100644 --- a/src/include/catalog/pg_foreign_table.h +++ b/src/include/catalog/pg_foreign_table.h @@ -1,16 +1,16 @@ /*------------------------------------------------------------------------- * * pg_foreign_table.h - * definition of the system "foreign table" relation (pg_foreign_table) + * definition of the "foreign table" system catalog (pg_foreign_table) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_foreign_table.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -18,15 +18,14 @@ #define PG_FOREIGN_TABLE_H #include "catalog/genbki.h" +#include "catalog/pg_foreign_table_d.h" /* ---------------- * pg_foreign_table definition. cpp turns this into * typedef struct FormData_pg_foreign_table * ---------------- */ -#define ForeignTableRelationId 3118 - -CATALOG(pg_foreign_table,3118) BKI_WITHOUT_OIDS +CATALOG(pg_foreign_table,3118,ForeignTableRelationId) BKI_WITHOUT_OIDS { Oid ftrelid; /* OID of foreign table */ Oid ftserver; /* OID of foreign server */ @@ -43,14 +42,4 @@ CATALOG(pg_foreign_table,3118) BKI_WITHOUT_OIDS */ typedef FormData_pg_foreign_table *Form_pg_foreign_table; -/* ---------------- - * compiler constants for pg_foreign_table - * ---------------- - */ - -#define Natts_pg_foreign_table 3 -#define Anum_pg_foreign_table_ftrelid 1 -#define Anum_pg_foreign_table_ftserver 2 -#define Anum_pg_foreign_table_ftoptions 3 - #endif /* PG_FOREIGN_TABLE_H */ diff --git a/src/include/catalog/pg_index.h b/src/include/catalog/pg_index.h index 8505c3be5f..5f72a5571f 100644 --- a/src/include/catalog/pg_index.h +++ b/src/include/catalog/pg_index.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_index.h - * definition of the system "index" relation (pg_index) - * along with the relation's initial contents. + * definition of the "index" system catalog (pg_index) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_index.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,19 +19,19 @@ #define PG_INDEX_H #include "catalog/genbki.h" +#include "catalog/pg_index_d.h" /* ---------------- * pg_index definition. cpp turns this into * typedef struct FormData_pg_index. * ---------------- */ -#define IndexRelationId 2610 - -CATALOG(pg_index,2610) BKI_WITHOUT_OIDS BKI_SCHEMA_MACRO +CATALOG(pg_index,2610,IndexRelationId) BKI_WITHOUT_OIDS BKI_SCHEMA_MACRO { Oid indexrelid; /* OID of the index */ Oid indrelid; /* OID of the relation it indexes */ - int16 indnatts; /* number of columns in index */ + int16 indnatts; /* total number of columns in index */ + int16 indnkeyatts; /* number of key columns in index */ bool indisunique; /* is this a unique index? */ bool indisprimary; /* is this index for primary key? */ bool indisexclusion; /* is this index for exclusion constraint? */ @@ -66,30 +65,7 @@ CATALOG(pg_index,2610) BKI_WITHOUT_OIDS BKI_SCHEMA_MACRO */ typedef FormData_pg_index *Form_pg_index; -/* ---------------- - * compiler constants for pg_index - * ---------------- - */ -#define Natts_pg_index 19 -#define Anum_pg_index_indexrelid 1 -#define Anum_pg_index_indrelid 2 -#define Anum_pg_index_indnatts 3 -#define Anum_pg_index_indisunique 4 -#define Anum_pg_index_indisprimary 5 -#define Anum_pg_index_indisexclusion 6 -#define Anum_pg_index_indimmediate 7 -#define Anum_pg_index_indisclustered 8 -#define Anum_pg_index_indisvalid 9 -#define Anum_pg_index_indcheckxmin 10 -#define Anum_pg_index_indisready 11 -#define Anum_pg_index_indislive 12 -#define Anum_pg_index_indisreplident 13 -#define Anum_pg_index_indkey 14 -#define Anum_pg_index_indcollation 15 -#define Anum_pg_index_indclass 16 -#define Anum_pg_index_indoption 17 -#define Anum_pg_index_indexprs 18 -#define Anum_pg_index_indpred 19 +#ifdef EXPOSE_TO_CLIENT_CODE /* * Index AMs that support ordered scans must support these two indoption @@ -99,6 +75,8 @@ typedef FormData_pg_index *Form_pg_index; #define INDOPTION_DESC 0x0001 /* values are in reverse order */ #define INDOPTION_NULLS_FIRST 0x0002 /* NULLs are first instead of last */ +#endif /* EXPOSE_TO_CLIENT_CODE */ + /* * Use of these macros is recommended over direct examination of the state * flag columns where possible; this allows source code compatibility with diff --git a/src/include/catalog/pg_inherits.h b/src/include/catalog/pg_inherits.h index 26bfab5db6..2a98e02c6a 100644 --- a/src/include/catalog/pg_inherits.h +++ b/src/include/catalog/pg_inherits.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_inherits.h - * definition of the system "inherits" relation (pg_inherits) - * along with the relation's initial contents. + * definition of the "inherits" system catalog (pg_inherits) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_inherits.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,15 +19,17 @@ #define PG_INHERITS_H #include "catalog/genbki.h" +#include "catalog/pg_inherits_d.h" + +#include "nodes/pg_list.h" +#include "storage/lock.h" /* ---------------- * pg_inherits definition. cpp turns this into * typedef struct FormData_pg_inherits * ---------------- */ -#define InheritsRelationId 2611 - -CATALOG(pg_inherits,2611) BKI_WITHOUT_OIDS +CATALOG(pg_inherits,2611,InheritsRelationId) BKI_WITHOUT_OIDS { Oid inhrelid; Oid inhparent; @@ -42,18 +43,15 @@ CATALOG(pg_inherits,2611) BKI_WITHOUT_OIDS */ typedef FormData_pg_inherits *Form_pg_inherits; -/* ---------------- - * compiler constants for pg_inherits - * ---------------- - */ -#define Natts_pg_inherits 3 -#define Anum_pg_inherits_inhrelid 1 -#define Anum_pg_inherits_inhparent 2 -#define Anum_pg_inherits_inhseqno 3 -/* ---------------- - * pg_inherits has no initial contents - * ---------------- - */ +extern List *find_inheritance_children(Oid parentrelId, LOCKMODE lockmode); +extern List *find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, + List **parents); +extern bool has_subclass(Oid relationId); +extern bool has_superclass(Oid relationId); +extern bool typeInheritsFrom(Oid subclassTypeId, Oid superclassTypeId); +extern void StoreSingleInheritance(Oid relationId, Oid parentOid, + int32 seqNumber); +extern bool DeleteInheritsTuple(Oid inhrelid, Oid inhparent); #endif /* PG_INHERITS_H */ diff --git a/src/include/catalog/pg_inherits_fn.h b/src/include/catalog/pg_inherits_fn.h deleted file mode 100644 index 7743388899..0000000000 --- a/src/include/catalog/pg_inherits_fn.h +++ /dev/null @@ -1,27 +0,0 @@ -/*------------------------------------------------------------------------- - * - * pg_inherits_fn.h - * prototypes for functions in catalog/pg_inherits.c - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/catalog/pg_inherits_fn.h - * - *------------------------------------------------------------------------- - */ -#ifndef PG_INHERITS_FN_H -#define PG_INHERITS_FN_H - -#include "nodes/pg_list.h" -#include "storage/lock.h" - -extern List *find_inheritance_children(Oid parentrelId, LOCKMODE lockmode); -extern List *find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, - List **parents); -extern bool has_subclass(Oid relationId); -extern bool has_superclass(Oid relationId); -extern bool typeInheritsFrom(Oid subclassTypeId, Oid superclassTypeId); - -#endif /* PG_INHERITS_FN_H */ diff --git a/src/include/catalog/pg_init_privs.h b/src/include/catalog/pg_init_privs.h index 5fca36334b..39de09cb9d 100644 --- a/src/include/catalog/pg_init_privs.h +++ b/src/include/catalog/pg_init_privs.h @@ -1,7 +1,7 @@ /*------------------------------------------------------------------------- * * pg_init_privs.h - * definition of the system "initial privileges" relation (pg_init_privs) + * definition of the "initial privileges" system catalog (pg_init_privs) * * NOTE: an object is identified by the OID of the row that primarily * defines the object, plus the OID of the table that that row appears in. @@ -15,17 +15,20 @@ * for a table itself, so that it is distinct from any column privilege. * Currently, objsubid is unused and zero for all other kinds of objects. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Because the contents of this table depend on what is done with the other + * objects in the system (and, in particular, may change due to changes in + * system_views.sql), there is no pg_init_privs.dat file. The initial contents + * are loaded near the end of initdb. + * + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_init_privs.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -33,15 +36,14 @@ #define PG_INIT_PRIVS_H #include "catalog/genbki.h" +#include "catalog/pg_init_privs_d.h" /* ---------------- * pg_init_privs definition. cpp turns this into * typedef struct FormData_pg_init_privs * ---------------- */ -#define InitPrivsRelationId 3394 - -CATALOG(pg_init_privs,3394) BKI_WITHOUT_OIDS +CATALOG(pg_init_privs,3394,InitPrivsRelationId) BKI_WITHOUT_OIDS { Oid objoid; /* OID of object itself */ Oid classoid; /* OID of table containing object */ @@ -60,17 +62,6 @@ CATALOG(pg_init_privs,3394) BKI_WITHOUT_OIDS */ typedef FormData_pg_init_privs * Form_pg_init_privs; -/* ---------------- - * compiler constants for pg_init_privs - * ---------------- - */ -#define Natts_pg_init_privs 5 -#define Anum_pg_init_privs_objoid 1 -#define Anum_pg_init_privs_classoid 2 -#define Anum_pg_init_privs_objsubid 3 -#define Anum_pg_init_privs_privtype 4 -#define Anum_pg_init_privs_privs 5 - /* * It is important to know if the initial privileges are from initdb or from an * extension. This enum is used to provide that differentiation and the two @@ -84,17 +75,4 @@ typedef enum InitPrivsType INITPRIVS_EXTENSION = 'e' } InitPrivsType; -/* ---------------- - * initial contents of pg_init_privs - * ---------------- - */ - -/* - * Because the contents of this table depend on what is done with the other - * objects in the system (and, in particular, may change due to changes is - * system_views.sql), there is no initialization here. - * - * The initial contents are loaded near the end of initdb. - */ - #endif /* PG_INIT_PRIVS_H */ diff --git a/src/include/catalog/pg_language.dat b/src/include/catalog/pg_language.dat new file mode 100644 index 0000000000..a835b7ee31 --- /dev/null +++ b/src/include/catalog/pg_language.dat @@ -0,0 +1,25 @@ +#---------------------------------------------------------------------- +# +# pg_language.dat +# Initial contents of the pg_language system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_language.dat +# +#---------------------------------------------------------------------- + +[ + +{ oid => '12', oid_symbol => 'INTERNALlanguageId', + descr => 'built-in functions', + lanname => 'internal', lanvalidator => 'fmgr_internal_validator' }, +{ oid => '13', oid_symbol => 'ClanguageId', + descr => 'dynamically-loaded C functions', + lanname => 'c', lanvalidator => 'fmgr_c_validator' }, +{ oid => '14', oid_symbol => 'SQLlanguageId', + descr => 'SQL-language functions', + lanname => 'sql', lanpltrusted => 't', lanvalidator => 'fmgr_sql_validator' }, + +] diff --git a/src/include/catalog/pg_language.h b/src/include/catalog/pg_language.h index ad244e839b..359701b452 100644 --- a/src/include/catalog/pg_language.h +++ b/src/include/catalog/pg_language.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_language.h - * definition of the system "language" relation (pg_language) - * along with the relation's initial contents. + * definition of the "language" system catalog (pg_language) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_language.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,26 +19,39 @@ #define PG_LANGUAGE_H #include "catalog/genbki.h" +#include "catalog/pg_language_d.h" /* ---------------- * pg_language definition. cpp turns this into * typedef struct FormData_pg_language * ---------------- */ -#define LanguageRelationId 2612 - -CATALOG(pg_language,2612) +CATALOG(pg_language,2612,LanguageRelationId) { - NameData lanname; /* Language name */ - Oid lanowner; /* Language's owner */ - bool lanispl; /* Is a procedural language */ - bool lanpltrusted; /* PL is trusted */ - Oid lanplcallfoid; /* Call handler for PL */ - Oid laninline; /* Optional anonymous-block handler function */ - Oid lanvalidator; /* Optional validation function */ + /* Language name */ + NameData lanname; + + /* Language's owner */ + Oid lanowner BKI_DEFAULT(PGUID); + + /* Is a procedural language */ + bool lanispl BKI_DEFAULT(f); + + /* PL is trusted */ + bool lanpltrusted BKI_DEFAULT(f); + + /* Call handler, if it's a PL */ + Oid lanplcallfoid BKI_DEFAULT(0) BKI_LOOKUP(pg_proc); + + /* Optional anonymous-block handler function */ + Oid laninline BKI_DEFAULT(0) BKI_LOOKUP(pg_proc); + + /* Optional validation function */ + Oid lanvalidator BKI_DEFAULT(0) BKI_LOOKUP(pg_proc); #ifdef CATALOG_VARLEN /* variable-length fields start here */ - aclitem lanacl[1]; /* Access privileges */ + /* Access privileges */ + aclitem lanacl[1] BKI_DEFAULT(_null_); #endif } FormData_pg_language; @@ -50,33 +62,4 @@ CATALOG(pg_language,2612) */ typedef FormData_pg_language *Form_pg_language; -/* ---------------- - * compiler constants for pg_language - * ---------------- - */ -#define Natts_pg_language 8 -#define Anum_pg_language_lanname 1 -#define Anum_pg_language_lanowner 2 -#define Anum_pg_language_lanispl 3 -#define Anum_pg_language_lanpltrusted 4 -#define Anum_pg_language_lanplcallfoid 5 -#define Anum_pg_language_laninline 6 -#define Anum_pg_language_lanvalidator 7 -#define Anum_pg_language_lanacl 8 - -/* ---------------- - * initial contents of pg_language - * ---------------- - */ - -DATA(insert OID = 12 ( "internal" PGUID f f 0 0 2246 _null_ )); -DESCR("built-in functions"); -#define INTERNALlanguageId 12 -DATA(insert OID = 13 ( "c" PGUID f f 0 0 2247 _null_ )); -DESCR("dynamically-loaded C functions"); -#define ClanguageId 13 -DATA(insert OID = 14 ( "sql" PGUID f t 0 0 2248 _null_ )); -DESCR("SQL-language functions"); -#define SQLlanguageId 14 - #endif /* PG_LANGUAGE_H */ diff --git a/src/include/catalog/pg_largeobject.h b/src/include/catalog/pg_largeobject.h index f2df67c35f..a8aa42e049 100644 --- a/src/include/catalog/pg_largeobject.h +++ b/src/include/catalog/pg_largeobject.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_largeobject.h - * definition of the system "largeobject" relation (pg_largeobject) - * along with the relation's initial contents. + * definition of the "large object" system catalog (pg_largeobject) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_largeobject.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,15 +19,14 @@ #define PG_LARGEOBJECT_H #include "catalog/genbki.h" +#include "catalog/pg_largeobject_d.h" /* ---------------- * pg_largeobject definition. cpp turns this into * typedef struct FormData_pg_largeobject * ---------------- */ -#define LargeObjectRelationId 2613 - -CATALOG(pg_largeobject,2613) BKI_WITHOUT_OIDS +CATALOG(pg_largeobject,2613,LargeObjectRelationId) BKI_WITHOUT_OIDS { Oid loid; /* Identifier of large object */ int32 pageno; /* Page number (starting from 0) */ @@ -45,15 +43,6 @@ CATALOG(pg_largeobject,2613) BKI_WITHOUT_OIDS */ typedef FormData_pg_largeobject *Form_pg_largeobject; -/* ---------------- - * compiler constants for pg_largeobject - * ---------------- - */ -#define Natts_pg_largeobject 3 -#define Anum_pg_largeobject_loid 1 -#define Anum_pg_largeobject_pageno 2 -#define Anum_pg_largeobject_data 3 - extern Oid LargeObjectCreate(Oid loid); extern void LargeObjectDrop(Oid loid); extern bool LargeObjectExists(Oid loid); diff --git a/src/include/catalog/pg_largeobject_metadata.h b/src/include/catalog/pg_largeobject_metadata.h index 7ae6d8c02b..c8560dc2af 100644 --- a/src/include/catalog/pg_largeobject_metadata.h +++ b/src/include/catalog/pg_largeobject_metadata.h @@ -1,18 +1,18 @@ /*------------------------------------------------------------------------- * * pg_largeobject_metadata.h - * definition of the system "largeobject_metadata" relation (pg_largeobject_metadata) - * along with the relation's initial contents. + * definition of the "large object metadata" system catalog + * (pg_largeobject_metadata) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_largeobject_metadata.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,15 +20,14 @@ #define PG_LARGEOBJECT_METADATA_H #include "catalog/genbki.h" +#include "catalog/pg_largeobject_metadata_d.h" /* ---------------- * pg_largeobject_metadata definition. cpp turns this into * typedef struct FormData_pg_largeobject_metadata * ---------------- */ -#define LargeObjectMetadataRelationId 2995 - -CATALOG(pg_largeobject_metadata,2995) +CATALOG(pg_largeobject_metadata,2995,LargeObjectMetadataRelationId) { Oid lomowner; /* OID of the largeobject owner */ @@ -44,12 +43,4 @@ CATALOG(pg_largeobject_metadata,2995) */ typedef FormData_pg_largeobject_metadata *Form_pg_largeobject_metadata; -/* ---------------- - * compiler constants for pg_largeobject_metadata - * ---------------- - */ -#define Natts_pg_largeobject_metadata 2 -#define Anum_pg_largeobject_metadata_lomowner 1 -#define Anum_pg_largeobject_metadata_lomacl 2 - #endif /* PG_LARGEOBJECT_METADATA_H */ diff --git a/src/include/catalog/pg_namespace.dat b/src/include/catalog/pg_namespace.dat new file mode 100644 index 0000000000..8a5c5c40a7 --- /dev/null +++ b/src/include/catalog/pg_namespace.dat @@ -0,0 +1,25 @@ +#---------------------------------------------------------------------- +# +# pg_namespace.dat +# Initial contents of the pg_namespace system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_namespace.dat +# +#---------------------------------------------------------------------- + +[ + +{ oid => '11', oid_symbol => 'PG_CATALOG_NAMESPACE', + descr => 'system catalog schema', + nspname => 'pg_catalog', nspowner => 'PGUID', nspacl => '_null_' }, +{ oid => '99', oid_symbol => 'PG_TOAST_NAMESPACE', + descr => 'reserved schema for TOAST tables', + nspname => 'pg_toast', nspowner => 'PGUID', nspacl => '_null_' }, +{ oid => '2200', oid_symbol => 'PG_PUBLIC_NAMESPACE', + descr => 'standard public schema', + nspname => 'public', nspowner => 'PGUID', nspacl => '_null_' }, + +] diff --git a/src/include/catalog/pg_namespace.h b/src/include/catalog/pg_namespace.h index a61a8635f6..0923e066b4 100644 --- a/src/include/catalog/pg_namespace.h +++ b/src/include/catalog/pg_namespace.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_namespace.h - * definition of the system "namespace" relation (pg_namespace) - * along with the relation's initial contents. + * definition of the "namespace" system catalog (pg_namespace) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_namespace.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,6 +19,7 @@ #define PG_NAMESPACE_H #include "catalog/genbki.h" +#include "catalog/pg_namespace_d.h" /* ---------------------------------------------------------------- * pg_namespace definition. @@ -31,9 +31,7 @@ * nspacl access privilege list * ---------------------------------------------------------------- */ -#define NamespaceRelationId 2615 - -CATALOG(pg_namespace,2615) +CATALOG(pg_namespace,2615,NamespaceRelationId) { NameData nspname; Oid nspowner; @@ -50,33 +48,6 @@ CATALOG(pg_namespace,2615) */ typedef FormData_pg_namespace *Form_pg_namespace; -/* ---------------- - * compiler constants for pg_namespace - * ---------------- - */ - -#define Natts_pg_namespace 3 -#define Anum_pg_namespace_nspname 1 -#define Anum_pg_namespace_nspowner 2 -#define Anum_pg_namespace_nspacl 3 - - -/* ---------------- - * initial contents of pg_namespace - * --------------- - */ - -DATA(insert OID = 11 ( "pg_catalog" PGUID _null_ )); -DESCR("system catalog schema"); -#define PG_CATALOG_NAMESPACE 11 -DATA(insert OID = 99 ( "pg_toast" PGUID _null_ )); -DESCR("reserved schema for TOAST tables"); -#define PG_TOAST_NAMESPACE 99 -DATA(insert OID = 2200 ( "public" PGUID _null_ )); -DESCR("standard public schema"); -#define PG_PUBLIC_NAMESPACE 2200 - - /* * prototypes for functions in pg_namespace.c */ diff --git a/src/include/catalog/pg_opclass.dat b/src/include/catalog/pg_opclass.dat new file mode 100644 index 0000000000..13928ba4a0 --- /dev/null +++ b/src/include/catalog/pg_opclass.dat @@ -0,0 +1,341 @@ +#---------------------------------------------------------------------- +# +# pg_opclass.dat +# Initial contents of the pg_opclass system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_opclass.dat +# +#---------------------------------------------------------------------- + +[ + +# Note: we hard-wire an OID only for a few entries that have to be explicitly +# referenced in the C code or in built-in catalog entries. The rest get OIDs +# assigned on-the-fly during initdb. + +{ opcmethod => 'btree', opcname => 'array_ops', opcfamily => 'btree/array_ops', + opcintype => 'anyarray' }, +{ opcmethod => 'hash', opcname => 'array_ops', opcfamily => 'hash/array_ops', + opcintype => 'anyarray' }, +{ opcmethod => 'btree', opcname => 'bit_ops', opcfamily => 'btree/bit_ops', + opcintype => 'bit' }, +{ opcmethod => 'btree', opcname => 'bool_ops', opcfamily => 'btree/bool_ops', + opcintype => 'bool' }, +{ opcmethod => 'btree', opcname => 'bpchar_ops', + opcfamily => 'btree/bpchar_ops', opcintype => 'bpchar' }, +{ opcmethod => 'hash', opcname => 'bpchar_ops', opcfamily => 'hash/bpchar_ops', + opcintype => 'bpchar' }, +{ opcmethod => 'btree', opcname => 'bytea_ops', opcfamily => 'btree/bytea_ops', + opcintype => 'bytea' }, +{ opcmethod => 'btree', opcname => 'char_ops', opcfamily => 'btree/char_ops', + opcintype => 'char' }, +{ opcmethod => 'hash', opcname => 'char_ops', opcfamily => 'hash/char_ops', + opcintype => 'char' }, +{ opcmethod => 'btree', opcname => 'cidr_ops', opcfamily => 'btree/network_ops', + opcintype => 'inet', opcdefault => 'f' }, +{ opcmethod => 'hash', opcname => 'cidr_ops', opcfamily => 'hash/network_ops', + opcintype => 'inet', opcdefault => 'f' }, +{ oid => '3122', oid_symbol => 'DATE_BTREE_OPS_OID', + opcmethod => 'btree', opcname => 'date_ops', + opcfamily => 'btree/datetime_ops', opcintype => 'date' }, +{ opcmethod => 'hash', opcname => 'date_ops', opcfamily => 'hash/date_ops', + opcintype => 'date' }, +{ opcmethod => 'btree', opcname => 'float4_ops', opcfamily => 'btree/float_ops', + opcintype => 'float4' }, +{ opcmethod => 'hash', opcname => 'float4_ops', opcfamily => 'hash/float_ops', + opcintype => 'float4' }, +{ oid => '3123', oid_symbol => 'FLOAT8_BTREE_OPS_OID', + opcmethod => 'btree', opcname => 'float8_ops', opcfamily => 'btree/float_ops', + opcintype => 'float8' }, +{ opcmethod => 'hash', opcname => 'float8_ops', opcfamily => 'hash/float_ops', + opcintype => 'float8' }, +{ opcmethod => 'btree', opcname => 'inet_ops', opcfamily => 'btree/network_ops', + opcintype => 'inet' }, +{ opcmethod => 'hash', opcname => 'inet_ops', opcfamily => 'hash/network_ops', + opcintype => 'inet' }, +{ opcmethod => 'gist', opcname => 'inet_ops', opcfamily => 'gist/network_ops', + opcintype => 'inet', opcdefault => 'f' }, +{ opcmethod => 'spgist', opcname => 'inet_ops', + opcfamily => 'spgist/network_ops', opcintype => 'inet' }, +{ oid => '1979', oid_symbol => 'INT2_BTREE_OPS_OID', + opcmethod => 'btree', opcname => 'int2_ops', opcfamily => 'btree/integer_ops', + opcintype => 'int2' }, +{ opcmethod => 'hash', opcname => 'int2_ops', opcfamily => 'hash/integer_ops', + opcintype => 'int2' }, +{ oid => '1978', oid_symbol => 'INT4_BTREE_OPS_OID', + opcmethod => 'btree', opcname => 'int4_ops', opcfamily => 'btree/integer_ops', + opcintype => 'int4' }, +{ opcmethod => 'hash', opcname => 'int4_ops', opcfamily => 'hash/integer_ops', + opcintype => 'int4' }, +{ oid => '3124', oid_symbol => 'INT8_BTREE_OPS_OID', + opcmethod => 'btree', opcname => 'int8_ops', opcfamily => 'btree/integer_ops', + opcintype => 'int8' }, +{ opcmethod => 'hash', opcname => 'int8_ops', opcfamily => 'hash/integer_ops', + opcintype => 'int8' }, +{ opcmethod => 'btree', opcname => 'interval_ops', + opcfamily => 'btree/interval_ops', opcintype => 'interval' }, +{ opcmethod => 'hash', opcname => 'interval_ops', + opcfamily => 'hash/interval_ops', opcintype => 'interval' }, +{ opcmethod => 'btree', opcname => 'macaddr_ops', + opcfamily => 'btree/macaddr_ops', opcintype => 'macaddr' }, +{ opcmethod => 'hash', opcname => 'macaddr_ops', + opcfamily => 'hash/macaddr_ops', opcintype => 'macaddr' }, +{ opcmethod => 'btree', opcname => 'macaddr8_ops', + opcfamily => 'btree/macaddr8_ops', opcintype => 'macaddr8' }, +{ opcmethod => 'hash', opcname => 'macaddr8_ops', + opcfamily => 'hash/macaddr8_ops', opcintype => 'macaddr8' }, + +# Here's an ugly little hack to save space in the system catalog indexes. +# btree doesn't ordinarily allow a storage type different from input type; +# but cstring and name are the same thing except for trailing padding, +# and we can safely omit that within an index entry. So we declare the +# btree opclass for name as using cstring storage type. +{ opcmethod => 'btree', opcname => 'name_ops', opcfamily => 'btree/name_ops', + opcintype => 'name', opckeytype => 'cstring' }, + +{ opcmethod => 'hash', opcname => 'name_ops', opcfamily => 'hash/name_ops', + opcintype => 'name' }, +{ oid => '3125', oid_symbol => 'NUMERIC_BTREE_OPS_OID', + opcmethod => 'btree', opcname => 'numeric_ops', + opcfamily => 'btree/numeric_ops', opcintype => 'numeric' }, +{ opcmethod => 'hash', opcname => 'numeric_ops', + opcfamily => 'hash/numeric_ops', opcintype => 'numeric' }, +{ oid => '1981', oid_symbol => 'OID_BTREE_OPS_OID', + opcmethod => 'btree', opcname => 'oid_ops', opcfamily => 'btree/oid_ops', + opcintype => 'oid' }, +{ opcmethod => 'hash', opcname => 'oid_ops', opcfamily => 'hash/oid_ops', + opcintype => 'oid' }, +{ opcmethod => 'btree', opcname => 'oidvector_ops', + opcfamily => 'btree/oidvector_ops', opcintype => 'oidvector' }, +{ opcmethod => 'hash', opcname => 'oidvector_ops', + opcfamily => 'hash/oidvector_ops', opcintype => 'oidvector' }, +{ opcmethod => 'btree', opcname => 'record_ops', + opcfamily => 'btree/record_ops', opcintype => 'record' }, +{ opcmethod => 'btree', opcname => 'record_image_ops', + opcfamily => 'btree/record_image_ops', opcintype => 'record', + opcdefault => 'f' }, +{ oid => '3126', oid_symbol => 'TEXT_BTREE_OPS_OID', + opcmethod => 'btree', opcname => 'text_ops', opcfamily => 'btree/text_ops', + opcintype => 'text' }, +{ opcmethod => 'hash', opcname => 'text_ops', opcfamily => 'hash/text_ops', + opcintype => 'text' }, +{ opcmethod => 'btree', opcname => 'time_ops', opcfamily => 'btree/time_ops', + opcintype => 'time' }, +{ opcmethod => 'hash', opcname => 'time_ops', opcfamily => 'hash/time_ops', + opcintype => 'time' }, +{ oid => '3127', oid_symbol => 'TIMESTAMPTZ_BTREE_OPS_OID', + opcmethod => 'btree', opcname => 'timestamptz_ops', + opcfamily => 'btree/datetime_ops', opcintype => 'timestamptz' }, +{ opcmethod => 'hash', opcname => 'timestamptz_ops', + opcfamily => 'hash/timestamptz_ops', opcintype => 'timestamptz' }, +{ opcmethod => 'btree', opcname => 'timetz_ops', + opcfamily => 'btree/timetz_ops', opcintype => 'timetz' }, +{ opcmethod => 'hash', opcname => 'timetz_ops', opcfamily => 'hash/timetz_ops', + opcintype => 'timetz' }, +{ opcmethod => 'btree', opcname => 'varbit_ops', + opcfamily => 'btree/varbit_ops', opcintype => 'varbit' }, +{ opcmethod => 'btree', opcname => 'varchar_ops', opcfamily => 'btree/text_ops', + opcintype => 'text', opcdefault => 'f' }, +{ opcmethod => 'hash', opcname => 'varchar_ops', opcfamily => 'hash/text_ops', + opcintype => 'text', opcdefault => 'f' }, +{ oid => '3128', oid_symbol => 'TIMESTAMP_BTREE_OPS_OID', + opcmethod => 'btree', opcname => 'timestamp_ops', + opcfamily => 'btree/datetime_ops', opcintype => 'timestamp' }, +{ opcmethod => 'hash', opcname => 'timestamp_ops', + opcfamily => 'hash/timestamp_ops', opcintype => 'timestamp' }, +{ opcmethod => 'btree', opcname => 'text_pattern_ops', + opcfamily => 'btree/text_pattern_ops', opcintype => 'text', + opcdefault => 'f' }, +{ opcmethod => 'btree', opcname => 'varchar_pattern_ops', + opcfamily => 'btree/text_pattern_ops', opcintype => 'text', + opcdefault => 'f' }, +{ opcmethod => 'btree', opcname => 'bpchar_pattern_ops', + opcfamily => 'btree/bpchar_pattern_ops', opcintype => 'bpchar', + opcdefault => 'f' }, +{ opcmethod => 'btree', opcname => 'money_ops', opcfamily => 'btree/money_ops', + opcintype => 'money' }, +{ opcmethod => 'hash', opcname => 'bool_ops', opcfamily => 'hash/bool_ops', + opcintype => 'bool' }, +{ opcmethod => 'hash', opcname => 'bytea_ops', opcfamily => 'hash/bytea_ops', + opcintype => 'bytea' }, +{ opcmethod => 'btree', opcname => 'tid_ops', opcfamily => 'btree/tid_ops', + opcintype => 'tid' }, +{ opcmethod => 'hash', opcname => 'xid_ops', opcfamily => 'hash/xid_ops', + opcintype => 'xid' }, +{ opcmethod => 'hash', opcname => 'cid_ops', opcfamily => 'hash/cid_ops', + opcintype => 'cid' }, +{ opcmethod => 'hash', opcname => 'text_pattern_ops', + opcfamily => 'hash/text_pattern_ops', opcintype => 'text', + opcdefault => 'f' }, +{ opcmethod => 'hash', opcname => 'varchar_pattern_ops', + opcfamily => 'hash/text_pattern_ops', opcintype => 'text', + opcdefault => 'f' }, +{ opcmethod => 'hash', opcname => 'bpchar_pattern_ops', + opcfamily => 'hash/bpchar_pattern_ops', opcintype => 'bpchar', + opcdefault => 'f' }, +{ opcmethod => 'hash', opcname => 'aclitem_ops', + opcfamily => 'hash/aclitem_ops', opcintype => 'aclitem' }, +{ opcmethod => 'gist', opcname => 'box_ops', opcfamily => 'gist/box_ops', + opcintype => 'box' }, +{ opcmethod => 'gist', opcname => 'point_ops', opcfamily => 'gist/point_ops', + opcintype => 'point', opckeytype => 'box' }, +{ opcmethod => 'gist', opcname => 'poly_ops', opcfamily => 'gist/poly_ops', + opcintype => 'polygon', opckeytype => 'box' }, +{ opcmethod => 'gist', opcname => 'circle_ops', opcfamily => 'gist/circle_ops', + opcintype => 'circle', opckeytype => 'box' }, +{ opcmethod => 'gin', opcname => 'array_ops', opcfamily => 'gin/array_ops', + opcintype => 'anyarray', opckeytype => 'anyelement' }, +{ opcmethod => 'btree', opcname => 'uuid_ops', opcfamily => 'btree/uuid_ops', + opcintype => 'uuid' }, +{ opcmethod => 'hash', opcname => 'uuid_ops', opcfamily => 'hash/uuid_ops', + opcintype => 'uuid' }, +{ opcmethod => 'btree', opcname => 'pg_lsn_ops', + opcfamily => 'btree/pg_lsn_ops', opcintype => 'pg_lsn' }, +{ opcmethod => 'hash', opcname => 'pg_lsn_ops', opcfamily => 'hash/pg_lsn_ops', + opcintype => 'pg_lsn' }, +{ opcmethod => 'btree', opcname => 'enum_ops', opcfamily => 'btree/enum_ops', + opcintype => 'anyenum' }, +{ opcmethod => 'hash', opcname => 'enum_ops', opcfamily => 'hash/enum_ops', + opcintype => 'anyenum' }, +{ opcmethod => 'btree', opcname => 'tsvector_ops', + opcfamily => 'btree/tsvector_ops', opcintype => 'tsvector' }, +{ opcmethod => 'gist', opcname => 'tsvector_ops', + opcfamily => 'gist/tsvector_ops', opcintype => 'tsvector', + opckeytype => 'gtsvector' }, +{ opcmethod => 'gin', opcname => 'tsvector_ops', + opcfamily => 'gin/tsvector_ops', opcintype => 'tsvector', + opckeytype => 'text' }, +{ opcmethod => 'btree', opcname => 'tsquery_ops', + opcfamily => 'btree/tsquery_ops', opcintype => 'tsquery' }, +{ opcmethod => 'gist', opcname => 'tsquery_ops', + opcfamily => 'gist/tsquery_ops', opcintype => 'tsquery', + opckeytype => 'int8' }, +{ opcmethod => 'btree', opcname => 'range_ops', opcfamily => 'btree/range_ops', + opcintype => 'anyrange' }, +{ opcmethod => 'hash', opcname => 'range_ops', opcfamily => 'hash/range_ops', + opcintype => 'anyrange' }, +{ opcmethod => 'gist', opcname => 'range_ops', opcfamily => 'gist/range_ops', + opcintype => 'anyrange' }, +{ opcmethod => 'spgist', opcname => 'range_ops', + opcfamily => 'spgist/range_ops', opcintype => 'anyrange' }, +{ opcmethod => 'spgist', opcname => 'box_ops', opcfamily => 'spgist/box_ops', + opcintype => 'box' }, +{ opcmethod => 'spgist', opcname => 'quad_point_ops', + opcfamily => 'spgist/quad_point_ops', opcintype => 'point' }, +{ opcmethod => 'spgist', opcname => 'kd_point_ops', + opcfamily => 'spgist/kd_point_ops', opcintype => 'point', opcdefault => 'f' }, +{ opcmethod => 'spgist', opcname => 'text_ops', opcfamily => 'spgist/text_ops', + opcintype => 'text' }, +{ opcmethod => 'spgist', opcname => 'poly_ops', opcfamily => 'spgist/poly_ops', + opcintype => 'polygon', opckeytype => 'box' }, +{ opcmethod => 'btree', opcname => 'jsonb_ops', opcfamily => 'btree/jsonb_ops', + opcintype => 'jsonb' }, +{ opcmethod => 'hash', opcname => 'jsonb_ops', opcfamily => 'hash/jsonb_ops', + opcintype => 'jsonb' }, +{ opcmethod => 'gin', opcname => 'jsonb_ops', opcfamily => 'gin/jsonb_ops', + opcintype => 'jsonb', opckeytype => 'text' }, +{ opcmethod => 'gin', opcname => 'jsonb_path_ops', + opcfamily => 'gin/jsonb_path_ops', opcintype => 'jsonb', opcdefault => 'f', + opckeytype => 'int4' }, + +# BRIN operator classes + +# no brin opclass for bool + +{ opcmethod => 'brin', opcname => 'bytea_minmax_ops', + opcfamily => 'brin/bytea_minmax_ops', opcintype => 'bytea', + opckeytype => 'bytea' }, +{ opcmethod => 'brin', opcname => 'char_minmax_ops', + opcfamily => 'brin/char_minmax_ops', opcintype => 'char', + opckeytype => 'char' }, +{ opcmethod => 'brin', opcname => 'name_minmax_ops', + opcfamily => 'brin/name_minmax_ops', opcintype => 'name', + opckeytype => 'name' }, +{ opcmethod => 'brin', opcname => 'int8_minmax_ops', + opcfamily => 'brin/integer_minmax_ops', opcintype => 'int8', + opckeytype => 'int8' }, +{ opcmethod => 'brin', opcname => 'int2_minmax_ops', + opcfamily => 'brin/integer_minmax_ops', opcintype => 'int2', + opckeytype => 'int2' }, +{ opcmethod => 'brin', opcname => 'int4_minmax_ops', + opcfamily => 'brin/integer_minmax_ops', opcintype => 'int4', + opckeytype => 'int4' }, +{ opcmethod => 'brin', opcname => 'text_minmax_ops', + opcfamily => 'brin/text_minmax_ops', opcintype => 'text', + opckeytype => 'text' }, +{ opcmethod => 'brin', opcname => 'oid_minmax_ops', + opcfamily => 'brin/oid_minmax_ops', opcintype => 'oid', opckeytype => 'oid' }, +{ opcmethod => 'brin', opcname => 'tid_minmax_ops', + opcfamily => 'brin/tid_minmax_ops', opcintype => 'tid', opckeytype => 'tid' }, +{ opcmethod => 'brin', opcname => 'float4_minmax_ops', + opcfamily => 'brin/float_minmax_ops', opcintype => 'float4', + opckeytype => 'float4' }, +{ opcmethod => 'brin', opcname => 'float8_minmax_ops', + opcfamily => 'brin/float_minmax_ops', opcintype => 'float8', + opckeytype => 'float8' }, +{ opcmethod => 'brin', opcname => 'macaddr_minmax_ops', + opcfamily => 'brin/macaddr_minmax_ops', opcintype => 'macaddr', + opckeytype => 'macaddr' }, +{ opcmethod => 'brin', opcname => 'macaddr8_minmax_ops', + opcfamily => 'brin/macaddr8_minmax_ops', opcintype => 'macaddr8', + opckeytype => 'macaddr8' }, +{ opcmethod => 'brin', opcname => 'inet_minmax_ops', + opcfamily => 'brin/network_minmax_ops', opcintype => 'inet', + opcdefault => 'f', opckeytype => 'inet' }, +{ opcmethod => 'brin', opcname => 'inet_inclusion_ops', + opcfamily => 'brin/network_inclusion_ops', opcintype => 'inet', + opckeytype => 'inet' }, +{ opcmethod => 'brin', opcname => 'bpchar_minmax_ops', + opcfamily => 'brin/bpchar_minmax_ops', opcintype => 'bpchar', + opckeytype => 'bpchar' }, +{ opcmethod => 'brin', opcname => 'time_minmax_ops', + opcfamily => 'brin/time_minmax_ops', opcintype => 'time', + opckeytype => 'time' }, +{ opcmethod => 'brin', opcname => 'date_minmax_ops', + opcfamily => 'brin/datetime_minmax_ops', opcintype => 'date', + opckeytype => 'date' }, +{ opcmethod => 'brin', opcname => 'timestamp_minmax_ops', + opcfamily => 'brin/datetime_minmax_ops', opcintype => 'timestamp', + opckeytype => 'timestamp' }, +{ opcmethod => 'brin', opcname => 'timestamptz_minmax_ops', + opcfamily => 'brin/datetime_minmax_ops', opcintype => 'timestamptz', + opckeytype => 'timestamptz' }, +{ opcmethod => 'brin', opcname => 'interval_minmax_ops', + opcfamily => 'brin/interval_minmax_ops', opcintype => 'interval', + opckeytype => 'interval' }, +{ opcmethod => 'brin', opcname => 'timetz_minmax_ops', + opcfamily => 'brin/timetz_minmax_ops', opcintype => 'timetz', + opckeytype => 'timetz' }, +{ opcmethod => 'brin', opcname => 'bit_minmax_ops', + opcfamily => 'brin/bit_minmax_ops', opcintype => 'bit', opckeytype => 'bit' }, +{ opcmethod => 'brin', opcname => 'varbit_minmax_ops', + opcfamily => 'brin/varbit_minmax_ops', opcintype => 'varbit', + opckeytype => 'varbit' }, +{ opcmethod => 'brin', opcname => 'numeric_minmax_ops', + opcfamily => 'brin/numeric_minmax_ops', opcintype => 'numeric', + opckeytype => 'numeric' }, + +# no brin opclass for record, anyarray + +{ opcmethod => 'brin', opcname => 'uuid_minmax_ops', + opcfamily => 'brin/uuid_minmax_ops', opcintype => 'uuid', + opckeytype => 'uuid' }, +{ opcmethod => 'brin', opcname => 'range_inclusion_ops', + opcfamily => 'brin/range_inclusion_ops', opcintype => 'anyrange', + opckeytype => 'anyrange' }, +{ opcmethod => 'brin', opcname => 'pg_lsn_minmax_ops', + opcfamily => 'brin/pg_lsn_minmax_ops', opcintype => 'pg_lsn', + opckeytype => 'pg_lsn' }, + +# no brin opclass for enum, tsvector, tsquery, jsonb + +{ opcmethod => 'brin', opcname => 'box_inclusion_ops', + opcfamily => 'brin/box_inclusion_ops', opcintype => 'box', + opckeytype => 'box' }, + +# no brin opclass for the geometric types except box + +] diff --git a/src/include/catalog/pg_opclass.h b/src/include/catalog/pg_opclass.h index 28dbc747d5..b980327fc0 100644 --- a/src/include/catalog/pg_opclass.h +++ b/src/include/catalog/pg_opclass.h @@ -1,8 +1,7 @@ /*------------------------------------------------------------------------- * * pg_opclass.h - * definition of the system "opclass" relation (pg_opclass) - * along with the relation's initial contents. + * definition of the "operator class" system catalog (pg_opclass) * * The primary key for this table is --- * that is, there is a row for each valid combination of opclass name and @@ -25,14 +24,14 @@ * AMs support this. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_opclass.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -40,24 +39,38 @@ #define PG_OPCLASS_H #include "catalog/genbki.h" +#include "catalog/pg_opclass_d.h" /* ---------------- * pg_opclass definition. cpp turns this into * typedef struct FormData_pg_opclass * ---------------- */ -#define OperatorClassRelationId 2616 - -CATALOG(pg_opclass,2616) +CATALOG(pg_opclass,2616,OperatorClassRelationId) { - Oid opcmethod; /* index access method opclass is for */ - NameData opcname; /* name of this opclass */ - Oid opcnamespace; /* namespace of this opclass */ - Oid opcowner; /* opclass owner */ - Oid opcfamily; /* containing operator family */ - Oid opcintype; /* type of data indexed by opclass */ - bool opcdefault; /* T if opclass is default for opcintype */ - Oid opckeytype; /* type of data in index, or InvalidOid */ + /* index access method opclass is for */ + Oid opcmethod BKI_LOOKUP(pg_am); + + /* name of this opclass */ + NameData opcname; + + /* namespace of this opclass */ + Oid opcnamespace BKI_DEFAULT(PGNSP); + + /* opclass owner */ + Oid opcowner BKI_DEFAULT(PGUID); + + /* containing operator family */ + Oid opcfamily BKI_LOOKUP(pg_opfamily); + + /* type of data indexed by opclass */ + Oid opcintype BKI_LOOKUP(pg_type); + + /* T if opclass is default for opcintype */ + bool opcdefault BKI_DEFAULT(t); + + /* type of data in index, or InvalidOid */ + Oid opckeytype BKI_DEFAULT(0) BKI_LOOKUP(pg_type); } FormData_pg_opclass; /* ---------------- @@ -67,184 +80,4 @@ CATALOG(pg_opclass,2616) */ typedef FormData_pg_opclass *Form_pg_opclass; -/* ---------------- - * compiler constants for pg_opclass - * ---------------- - */ -#define Natts_pg_opclass 8 -#define Anum_pg_opclass_opcmethod 1 -#define Anum_pg_opclass_opcname 2 -#define Anum_pg_opclass_opcnamespace 3 -#define Anum_pg_opclass_opcowner 4 -#define Anum_pg_opclass_opcfamily 5 -#define Anum_pg_opclass_opcintype 6 -#define Anum_pg_opclass_opcdefault 7 -#define Anum_pg_opclass_opckeytype 8 - -/* ---------------- - * initial contents of pg_opclass - * - * Note: we hard-wire an OID only for a few entries that have to be explicitly - * referenced in the C code or in built-in catalog entries. The rest get OIDs - * assigned on-the-fly during initdb. - * ---------------- - */ - -DATA(insert ( 403 abstime_ops PGNSP PGUID 421 702 t 0 )); -DATA(insert ( 403 array_ops PGNSP PGUID 397 2277 t 0 )); -DATA(insert ( 405 array_ops PGNSP PGUID 627 2277 t 0 )); -DATA(insert ( 403 bit_ops PGNSP PGUID 423 1560 t 0 )); -DATA(insert ( 403 bool_ops PGNSP PGUID 424 16 t 0 )); -DATA(insert ( 403 bpchar_ops PGNSP PGUID 426 1042 t 0 )); -DATA(insert ( 405 bpchar_ops PGNSP PGUID 427 1042 t 0 )); -DATA(insert ( 403 bytea_ops PGNSP PGUID 428 17 t 0 )); -DATA(insert ( 403 char_ops PGNSP PGUID 429 18 t 0 )); -DATA(insert ( 405 char_ops PGNSP PGUID 431 18 t 0 )); -DATA(insert ( 403 cidr_ops PGNSP PGUID 1974 869 f 0 )); -DATA(insert ( 405 cidr_ops PGNSP PGUID 1975 869 f 0 )); -DATA(insert OID = 3122 ( 403 date_ops PGNSP PGUID 434 1082 t 0 )); -#define DATE_BTREE_OPS_OID 3122 -DATA(insert ( 405 date_ops PGNSP PGUID 435 1082 t 0 )); -DATA(insert ( 403 float4_ops PGNSP PGUID 1970 700 t 0 )); -DATA(insert ( 405 float4_ops PGNSP PGUID 1971 700 t 0 )); -DATA(insert OID = 3123 ( 403 float8_ops PGNSP PGUID 1970 701 t 0 )); -#define FLOAT8_BTREE_OPS_OID 3123 -DATA(insert ( 405 float8_ops PGNSP PGUID 1971 701 t 0 )); -DATA(insert ( 403 inet_ops PGNSP PGUID 1974 869 t 0 )); -DATA(insert ( 405 inet_ops PGNSP PGUID 1975 869 t 0 )); -DATA(insert ( 783 inet_ops PGNSP PGUID 3550 869 f 0 )); -DATA(insert ( 4000 inet_ops PGNSP PGUID 3794 869 t 0 )); -DATA(insert OID = 1979 ( 403 int2_ops PGNSP PGUID 1976 21 t 0 )); -#define INT2_BTREE_OPS_OID 1979 -DATA(insert ( 405 int2_ops PGNSP PGUID 1977 21 t 0 )); -DATA(insert OID = 1978 ( 403 int4_ops PGNSP PGUID 1976 23 t 0 )); -#define INT4_BTREE_OPS_OID 1978 -DATA(insert ( 405 int4_ops PGNSP PGUID 1977 23 t 0 )); -DATA(insert OID = 3124 ( 403 int8_ops PGNSP PGUID 1976 20 t 0 )); -#define INT8_BTREE_OPS_OID 3124 -DATA(insert ( 405 int8_ops PGNSP PGUID 1977 20 t 0 )); -DATA(insert ( 403 interval_ops PGNSP PGUID 1982 1186 t 0 )); -DATA(insert ( 405 interval_ops PGNSP PGUID 1983 1186 t 0 )); -DATA(insert ( 403 macaddr_ops PGNSP PGUID 1984 829 t 0 )); -DATA(insert ( 405 macaddr_ops PGNSP PGUID 1985 829 t 0 )); -DATA(insert ( 403 macaddr8_ops PGNSP PGUID 3371 774 t 0 )); -DATA(insert ( 405 macaddr8_ops PGNSP PGUID 3372 774 t 0 )); -/* - * Here's an ugly little hack to save space in the system catalog indexes. - * btree doesn't ordinarily allow a storage type different from input type; - * but cstring and name are the same thing except for trailing padding, - * and we can safely omit that within an index entry. So we declare the - * btree opclass for name as using cstring storage type. - */ -DATA(insert ( 403 name_ops PGNSP PGUID 1986 19 t 2275 )); -DATA(insert ( 405 name_ops PGNSP PGUID 1987 19 t 0 )); -DATA(insert OID = 3125 ( 403 numeric_ops PGNSP PGUID 1988 1700 t 0 )); -#define NUMERIC_BTREE_OPS_OID 3125 -DATA(insert ( 405 numeric_ops PGNSP PGUID 1998 1700 t 0 )); -DATA(insert OID = 1981 ( 403 oid_ops PGNSP PGUID 1989 26 t 0 )); -#define OID_BTREE_OPS_OID 1981 -DATA(insert ( 405 oid_ops PGNSP PGUID 1990 26 t 0 )); -DATA(insert ( 403 oidvector_ops PGNSP PGUID 1991 30 t 0 )); -DATA(insert ( 405 oidvector_ops PGNSP PGUID 1992 30 t 0 )); -DATA(insert ( 403 record_ops PGNSP PGUID 2994 2249 t 0 )); -DATA(insert ( 403 record_image_ops PGNSP PGUID 3194 2249 f 0 )); -DATA(insert OID = 3126 ( 403 text_ops PGNSP PGUID 1994 25 t 0 )); -#define TEXT_BTREE_OPS_OID 3126 -DATA(insert ( 405 text_ops PGNSP PGUID 1995 25 t 0 )); -DATA(insert ( 403 time_ops PGNSP PGUID 1996 1083 t 0 )); -DATA(insert ( 405 time_ops PGNSP PGUID 1997 1083 t 0 )); -DATA(insert OID = 3127 ( 403 timestamptz_ops PGNSP PGUID 434 1184 t 0 )); -#define TIMESTAMPTZ_BTREE_OPS_OID 3127 -DATA(insert ( 405 timestamptz_ops PGNSP PGUID 1999 1184 t 0 )); -DATA(insert ( 403 timetz_ops PGNSP PGUID 2000 1266 t 0 )); -DATA(insert ( 405 timetz_ops PGNSP PGUID 2001 1266 t 0 )); -DATA(insert ( 403 varbit_ops PGNSP PGUID 2002 1562 t 0 )); -DATA(insert ( 403 varchar_ops PGNSP PGUID 1994 25 f 0 )); -DATA(insert ( 405 varchar_ops PGNSP PGUID 1995 25 f 0 )); -DATA(insert OID = 3128 ( 403 timestamp_ops PGNSP PGUID 434 1114 t 0 )); -#define TIMESTAMP_BTREE_OPS_OID 3128 -DATA(insert ( 405 timestamp_ops PGNSP PGUID 2040 1114 t 0 )); -DATA(insert ( 403 text_pattern_ops PGNSP PGUID 2095 25 f 0 )); -DATA(insert ( 403 varchar_pattern_ops PGNSP PGUID 2095 25 f 0 )); -DATA(insert ( 403 bpchar_pattern_ops PGNSP PGUID 2097 1042 f 0 )); -DATA(insert ( 403 money_ops PGNSP PGUID 2099 790 t 0 )); -DATA(insert ( 405 bool_ops PGNSP PGUID 2222 16 t 0 )); -DATA(insert ( 405 bytea_ops PGNSP PGUID 2223 17 t 0 )); -DATA(insert ( 403 tid_ops PGNSP PGUID 2789 27 t 0 )); -DATA(insert ( 405 xid_ops PGNSP PGUID 2225 28 t 0 )); -DATA(insert ( 405 cid_ops PGNSP PGUID 2226 29 t 0 )); -DATA(insert ( 405 abstime_ops PGNSP PGUID 2227 702 t 0 )); -DATA(insert ( 405 reltime_ops PGNSP PGUID 2228 703 t 0 )); -DATA(insert ( 405 text_pattern_ops PGNSP PGUID 2229 25 f 0 )); -DATA(insert ( 405 varchar_pattern_ops PGNSP PGUID 2229 25 f 0 )); -DATA(insert ( 405 bpchar_pattern_ops PGNSP PGUID 2231 1042 f 0 )); -DATA(insert ( 403 reltime_ops PGNSP PGUID 2233 703 t 0 )); -DATA(insert ( 403 tinterval_ops PGNSP PGUID 2234 704 t 0 )); -DATA(insert ( 405 aclitem_ops PGNSP PGUID 2235 1033 t 0 )); -DATA(insert ( 783 box_ops PGNSP PGUID 2593 603 t 0 )); -DATA(insert ( 783 point_ops PGNSP PGUID 1029 600 t 603 )); -DATA(insert ( 783 poly_ops PGNSP PGUID 2594 604 t 603 )); -DATA(insert ( 783 circle_ops PGNSP PGUID 2595 718 t 603 )); -DATA(insert ( 2742 array_ops PGNSP PGUID 2745 2277 t 2283 )); -DATA(insert ( 403 uuid_ops PGNSP PGUID 2968 2950 t 0 )); -DATA(insert ( 405 uuid_ops PGNSP PGUID 2969 2950 t 0 )); -DATA(insert ( 403 pg_lsn_ops PGNSP PGUID 3253 3220 t 0 )); -DATA(insert ( 405 pg_lsn_ops PGNSP PGUID 3254 3220 t 0 )); -DATA(insert ( 403 enum_ops PGNSP PGUID 3522 3500 t 0 )); -DATA(insert ( 405 enum_ops PGNSP PGUID 3523 3500 t 0 )); -DATA(insert ( 403 tsvector_ops PGNSP PGUID 3626 3614 t 0 )); -DATA(insert ( 783 tsvector_ops PGNSP PGUID 3655 3614 t 3642 )); -DATA(insert ( 2742 tsvector_ops PGNSP PGUID 3659 3614 t 25 )); -DATA(insert ( 403 tsquery_ops PGNSP PGUID 3683 3615 t 0 )); -DATA(insert ( 783 tsquery_ops PGNSP PGUID 3702 3615 t 20 )); -DATA(insert ( 403 range_ops PGNSP PGUID 3901 3831 t 0 )); -DATA(insert ( 405 range_ops PGNSP PGUID 3903 3831 t 0 )); -DATA(insert ( 783 range_ops PGNSP PGUID 3919 3831 t 0 )); -DATA(insert ( 4000 range_ops PGNSP PGUID 3474 3831 t 0 )); -DATA(insert ( 4000 box_ops PGNSP PGUID 5000 603 t 0 )); -DATA(insert ( 4000 quad_point_ops PGNSP PGUID 4015 600 t 0 )); -DATA(insert ( 4000 kd_point_ops PGNSP PGUID 4016 600 f 0 )); -DATA(insert ( 4000 text_ops PGNSP PGUID 4017 25 t 0 )); -DATA(insert ( 403 jsonb_ops PGNSP PGUID 4033 3802 t 0 )); -DATA(insert ( 405 jsonb_ops PGNSP PGUID 4034 3802 t 0 )); -DATA(insert ( 2742 jsonb_ops PGNSP PGUID 4036 3802 t 25 )); -DATA(insert ( 2742 jsonb_path_ops PGNSP PGUID 4037 3802 f 23 )); - -/* BRIN operator classes */ -/* no brin opclass for bool */ -DATA(insert ( 3580 bytea_minmax_ops PGNSP PGUID 4064 17 t 17 )); -DATA(insert ( 3580 char_minmax_ops PGNSP PGUID 4062 18 t 18 )); -DATA(insert ( 3580 name_minmax_ops PGNSP PGUID 4065 19 t 19 )); -DATA(insert ( 3580 int8_minmax_ops PGNSP PGUID 4054 20 t 20 )); -DATA(insert ( 3580 int2_minmax_ops PGNSP PGUID 4054 21 t 21 )); -DATA(insert ( 3580 int4_minmax_ops PGNSP PGUID 4054 23 t 23 )); -DATA(insert ( 3580 text_minmax_ops PGNSP PGUID 4056 25 t 25 )); -DATA(insert ( 3580 oid_minmax_ops PGNSP PGUID 4068 26 t 26 )); -DATA(insert ( 3580 tid_minmax_ops PGNSP PGUID 4069 27 t 27 )); -DATA(insert ( 3580 float4_minmax_ops PGNSP PGUID 4070 700 t 700 )); -DATA(insert ( 3580 float8_minmax_ops PGNSP PGUID 4070 701 t 701 )); -DATA(insert ( 3580 abstime_minmax_ops PGNSP PGUID 4072 702 t 702 )); -DATA(insert ( 3580 reltime_minmax_ops PGNSP PGUID 4073 703 t 703 )); -DATA(insert ( 3580 macaddr_minmax_ops PGNSP PGUID 4074 829 t 829 )); -DATA(insert ( 3580 macaddr8_minmax_ops PGNSP PGUID 4109 774 t 774 )); -DATA(insert ( 3580 inet_minmax_ops PGNSP PGUID 4075 869 f 869 )); -DATA(insert ( 3580 inet_inclusion_ops PGNSP PGUID 4102 869 t 869 )); -DATA(insert ( 3580 bpchar_minmax_ops PGNSP PGUID 4076 1042 t 1042 )); -DATA(insert ( 3580 time_minmax_ops PGNSP PGUID 4077 1083 t 1083 )); -DATA(insert ( 3580 date_minmax_ops PGNSP PGUID 4059 1082 t 1082 )); -DATA(insert ( 3580 timestamp_minmax_ops PGNSP PGUID 4059 1114 t 1114 )); -DATA(insert ( 3580 timestamptz_minmax_ops PGNSP PGUID 4059 1184 t 1184 )); -DATA(insert ( 3580 interval_minmax_ops PGNSP PGUID 4078 1186 t 1186 )); -DATA(insert ( 3580 timetz_minmax_ops PGNSP PGUID 4058 1266 t 1266 )); -DATA(insert ( 3580 bit_minmax_ops PGNSP PGUID 4079 1560 t 1560 )); -DATA(insert ( 3580 varbit_minmax_ops PGNSP PGUID 4080 1562 t 1562 )); -DATA(insert ( 3580 numeric_minmax_ops PGNSP PGUID 4055 1700 t 1700 )); -/* no brin opclass for record, anyarray */ -DATA(insert ( 3580 uuid_minmax_ops PGNSP PGUID 4081 2950 t 2950 )); -DATA(insert ( 3580 range_inclusion_ops PGNSP PGUID 4103 3831 t 3831 )); -DATA(insert ( 3580 pg_lsn_minmax_ops PGNSP PGUID 4082 3220 t 3220 )); -/* no brin opclass for enum, tsvector, tsquery, jsonb */ -DATA(insert ( 3580 box_inclusion_ops PGNSP PGUID 4104 603 t 603 )); -/* no brin opclass for the geometric types except box */ - #endif /* PG_OPCLASS_H */ diff --git a/src/include/catalog/pg_operator.dat b/src/include/catalog/pg_operator.dat new file mode 100644 index 0000000000..ce23c2f0aa --- /dev/null +++ b/src/include/catalog/pg_operator.dat @@ -0,0 +1,3199 @@ +#---------------------------------------------------------------------- +# +# pg_operator.dat +# Initial contents of the pg_operator system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_operator.dat +# +#---------------------------------------------------------------------- + +[ + +# Note: every entry in pg_operator.dat is expected to have a 'descr' comment. +# If the operator is a deprecated equivalent of some other entry, be sure +# to comment it as such so that initdb doesn't think it's a preferred name +# for the underlying function. + +{ oid => '15', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'int4', + oprright => 'int8', oprresult => 'bool', oprcom => '=(int8,int4)', + oprnegate => '<>(int4,int8)', oprcode => 'int48eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '36', descr => 'not equal', + oprname => '<>', oprleft => 'int4', oprright => 'int8', oprresult => 'bool', + oprcom => '<>(int8,int4)', oprnegate => '=(int4,int8)', oprcode => 'int48ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '37', descr => 'less than', + oprname => '<', oprleft => 'int4', oprright => 'int8', oprresult => 'bool', + oprcom => '>(int8,int4)', oprnegate => '>=(int4,int8)', oprcode => 'int48lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '76', descr => 'greater than', + oprname => '>', oprleft => 'int4', oprright => 'int8', oprresult => 'bool', + oprcom => '<(int8,int4)', oprnegate => '<=(int4,int8)', oprcode => 'int48gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '80', descr => 'less than or equal', + oprname => '<=', oprleft => 'int4', oprright => 'int8', oprresult => 'bool', + oprcom => '>=(int8,int4)', oprnegate => '>(int4,int8)', oprcode => 'int48le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '82', descr => 'greater than or equal', + oprname => '>=', oprleft => 'int4', oprright => 'int8', oprresult => 'bool', + oprcom => '<=(int8,int4)', oprnegate => '<(int4,int8)', oprcode => 'int48ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '58', descr => 'less than', + oprname => '<', oprleft => 'bool', oprright => 'bool', oprresult => 'bool', + oprcom => '>(bool,bool)', oprnegate => '>=(bool,bool)', oprcode => 'boollt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '59', descr => 'greater than', + oprname => '>', oprleft => 'bool', oprright => 'bool', oprresult => 'bool', + oprcom => '<(bool,bool)', oprnegate => '<=(bool,bool)', oprcode => 'boolgt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '85', oid_symbol => 'BooleanNotEqualOperator', descr => 'not equal', + oprname => '<>', oprleft => 'bool', oprright => 'bool', oprresult => 'bool', + oprcom => '<>(bool,bool)', oprnegate => '=(bool,bool)', oprcode => 'boolne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '91', oid_symbol => 'BooleanEqualOperator', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'bool', + oprright => 'bool', oprresult => 'bool', oprcom => '=(bool,bool)', + oprnegate => '<>(bool,bool)', oprcode => 'booleq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '1694', descr => 'less than or equal', + oprname => '<=', oprleft => 'bool', oprright => 'bool', oprresult => 'bool', + oprcom => '>=(bool,bool)', oprnegate => '>(bool,bool)', oprcode => 'boolle', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1695', descr => 'greater than or equal', + oprname => '>=', oprleft => 'bool', oprright => 'bool', oprresult => 'bool', + oprcom => '<=(bool,bool)', oprnegate => '<(bool,bool)', oprcode => 'boolge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '92', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'char', + oprright => 'char', oprresult => 'bool', oprcom => '=(char,char)', + oprnegate => '<>(char,char)', oprcode => 'chareq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '93', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'name', + oprright => 'name', oprresult => 'bool', oprcom => '=(name,name)', + oprnegate => '<>(name,name)', oprcode => 'nameeq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '94', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'int2', + oprright => 'int2', oprresult => 'bool', oprcom => '=(int2,int2)', + oprnegate => '<>(int2,int2)', oprcode => 'int2eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '95', descr => 'less than', + oprname => '<', oprleft => 'int2', oprright => 'int2', oprresult => 'bool', + oprcom => '>(int2,int2)', oprnegate => '>=(int2,int2)', oprcode => 'int2lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '96', oid_symbol => 'Int4EqualOperator', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'int4', + oprright => 'int4', oprresult => 'bool', oprcom => '=(int4,int4)', + oprnegate => '<>(int4,int4)', oprcode => 'int4eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '97', oid_symbol => 'Int4LessOperator', descr => 'less than', + oprname => '<', oprleft => 'int4', oprright => 'int4', oprresult => 'bool', + oprcom => '>(int4,int4)', oprnegate => '>=(int4,int4)', oprcode => 'int4lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '98', oid_symbol => 'TextEqualOperator', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'text', + oprright => 'text', oprresult => 'bool', oprcom => '=(text,text)', + oprnegate => '<>(text,text)', oprcode => 'texteq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '3877', descr => 'starts with', + oprname => '^@', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprcode => 'starts_with', oprrest => 'prefixsel', + oprjoin => 'prefixjoinsel' }, + +{ oid => '349', descr => 'append element onto end of array', + oprname => '||', oprleft => 'anyarray', oprright => 'anyelement', + oprresult => 'anyarray', oprcode => 'array_append' }, +{ oid => '374', descr => 'prepend element onto front of array', + oprname => '||', oprleft => 'anyelement', oprright => 'anyarray', + oprresult => 'anyarray', oprcode => 'array_prepend' }, +{ oid => '375', descr => 'concatenate', + oprname => '||', oprleft => 'anyarray', oprright => 'anyarray', + oprresult => 'anyarray', oprcode => 'array_cat' }, + +{ oid => '352', descr => 'equal', + oprname => '=', oprcanhash => 't', oprleft => 'xid', oprright => 'xid', + oprresult => 'bool', oprcom => '=(xid,xid)', oprnegate => '<>(xid,xid)', + oprcode => 'xideq', oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '353', descr => 'equal', + oprname => '=', oprleft => 'xid', oprright => 'int4', oprresult => 'bool', + oprnegate => '<>(xid,int4)', oprcode => 'xideqint4', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '3315', descr => 'not equal', + oprname => '<>', oprleft => 'xid', oprright => 'xid', oprresult => 'bool', + oprcom => '<>(xid,xid)', oprnegate => '=(xid,xid)', oprcode => 'xidneq', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '3316', descr => 'not equal', + oprname => '<>', oprleft => 'xid', oprright => 'int4', oprresult => 'bool', + oprnegate => '=(xid,int4)', oprcode => 'xidneqint4', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, +{ oid => '388', descr => 'factorial', + oprname => '!', oprkind => 'r', oprleft => 'int8', oprright => '0', + oprresult => 'numeric', oprcode => 'numeric_fac' }, +{ oid => '389', descr => 'deprecated, use ! instead', + oprname => '!!', oprkind => 'l', oprleft => '0', oprright => 'int8', + oprresult => 'numeric', oprcode => 'numeric_fac' }, +{ oid => '385', descr => 'equal', + oprname => '=', oprcanhash => 't', oprleft => 'cid', oprright => 'cid', + oprresult => 'bool', oprcom => '=(cid,cid)', oprcode => 'cideq', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, + +{ oid => '387', oid_symbol => 'TIDEqualOperator', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprleft => 'tid', oprright => 'tid', + oprresult => 'bool', oprcom => '=(tid,tid)', oprnegate => '<>(tid,tid)', + oprcode => 'tideq', oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '402', descr => 'not equal', + oprname => '<>', oprleft => 'tid', oprright => 'tid', oprresult => 'bool', + oprcom => '<>(tid,tid)', oprnegate => '=(tid,tid)', oprcode => 'tidne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '2799', oid_symbol => 'TIDLessOperator', descr => 'less than', + oprname => '<', oprleft => 'tid', oprright => 'tid', oprresult => 'bool', + oprcom => '>(tid,tid)', oprnegate => '>=(tid,tid)', oprcode => 'tidlt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '2800', descr => 'greater than', + oprname => '>', oprleft => 'tid', oprright => 'tid', oprresult => 'bool', + oprcom => '<(tid,tid)', oprnegate => '<=(tid,tid)', oprcode => 'tidgt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '2801', descr => 'less than or equal', + oprname => '<=', oprleft => 'tid', oprright => 'tid', oprresult => 'bool', + oprcom => '>=(tid,tid)', oprnegate => '>(tid,tid)', oprcode => 'tidle', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '2802', descr => 'greater than or equal', + oprname => '>=', oprleft => 'tid', oprright => 'tid', oprresult => 'bool', + oprcom => '<=(tid,tid)', oprnegate => '<(tid,tid)', oprcode => 'tidge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '410', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'int8', + oprright => 'int8', oprresult => 'bool', oprcom => '=(int8,int8)', + oprnegate => '<>(int8,int8)', oprcode => 'int8eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '411', descr => 'not equal', + oprname => '<>', oprleft => 'int8', oprright => 'int8', oprresult => 'bool', + oprcom => '<>(int8,int8)', oprnegate => '=(int8,int8)', oprcode => 'int8ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '412', oid_symbol => 'Int8LessOperator', descr => 'less than', + oprname => '<', oprleft => 'int8', oprright => 'int8', oprresult => 'bool', + oprcom => '>(int8,int8)', oprnegate => '>=(int8,int8)', oprcode => 'int8lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '413', descr => 'greater than', + oprname => '>', oprleft => 'int8', oprright => 'int8', oprresult => 'bool', + oprcom => '<(int8,int8)', oprnegate => '<=(int8,int8)', oprcode => 'int8gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '414', descr => 'less than or equal', + oprname => '<=', oprleft => 'int8', oprright => 'int8', oprresult => 'bool', + oprcom => '>=(int8,int8)', oprnegate => '>(int8,int8)', oprcode => 'int8le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '415', descr => 'greater than or equal', + oprname => '>=', oprleft => 'int8', oprright => 'int8', oprresult => 'bool', + oprcom => '<=(int8,int8)', oprnegate => '<(int8,int8)', oprcode => 'int8ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '416', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'int8', + oprright => 'int4', oprresult => 'bool', oprcom => '=(int4,int8)', + oprnegate => '<>(int8,int4)', oprcode => 'int84eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '417', descr => 'not equal', + oprname => '<>', oprleft => 'int8', oprright => 'int4', oprresult => 'bool', + oprcom => '<>(int4,int8)', oprnegate => '=(int8,int4)', oprcode => 'int84ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '418', descr => 'less than', + oprname => '<', oprleft => 'int8', oprright => 'int4', oprresult => 'bool', + oprcom => '>(int4,int8)', oprnegate => '>=(int8,int4)', oprcode => 'int84lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '419', descr => 'greater than', + oprname => '>', oprleft => 'int8', oprright => 'int4', oprresult => 'bool', + oprcom => '<(int4,int8)', oprnegate => '<=(int8,int4)', oprcode => 'int84gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '420', descr => 'less than or equal', + oprname => '<=', oprleft => 'int8', oprright => 'int4', oprresult => 'bool', + oprcom => '>=(int4,int8)', oprnegate => '>(int8,int4)', oprcode => 'int84le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '430', descr => 'greater than or equal', + oprname => '>=', oprleft => 'int8', oprright => 'int4', oprresult => 'bool', + oprcom => '<=(int4,int8)', oprnegate => '<(int8,int4)', oprcode => 'int84ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '439', descr => 'modulus', + oprname => '%', oprleft => 'int8', oprright => 'int8', oprresult => 'int8', + oprcode => 'int8mod' }, +{ oid => '473', descr => 'absolute value', + oprname => '@', oprkind => 'l', oprleft => '0', oprright => 'int8', + oprresult => 'int8', oprcode => 'int8abs' }, + +{ oid => '484', descr => 'negate', + oprname => '-', oprkind => 'l', oprleft => '0', oprright => 'int8', + oprresult => 'int8', oprcode => 'int8um' }, +{ oid => '485', descr => 'is left of', + oprname => '<<', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcode => 'poly_left', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '486', descr => 'overlaps or is left of', + oprname => '&<', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcode => 'poly_overleft', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '487', descr => 'overlaps or is right of', + oprname => '&>', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcode => 'poly_overright', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '488', descr => 'is right of', + oprname => '>>', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcode => 'poly_right', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '489', descr => 'is contained by', + oprname => '<@', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcom => '@>(polygon,polygon)', + oprcode => 'poly_contained', oprrest => 'contsel', oprjoin => 'contjoinsel' }, +{ oid => '490', descr => 'contains', + oprname => '@>', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcom => '<@(polygon,polygon)', + oprcode => 'poly_contain', oprrest => 'contsel', oprjoin => 'contjoinsel' }, +{ oid => '491', descr => 'same as', + oprname => '~=', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcom => '~=(polygon,polygon)', oprcode => 'poly_same', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '492', descr => 'overlaps', + oprname => '&&', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcom => '&&(polygon,polygon)', + oprcode => 'poly_overlap', oprrest => 'areasel', oprjoin => 'areajoinsel' }, +{ oid => '493', descr => 'is left of', + oprname => '<<', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcode => 'box_left', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '494', descr => 'overlaps or is left of', + oprname => '&<', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcode => 'box_overleft', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '495', descr => 'overlaps or is right of', + oprname => '&>', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcode => 'box_overright', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '496', descr => 'is right of', + oprname => '>>', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcode => 'box_right', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '497', descr => 'is contained by', + oprname => '<@', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcom => '@>(box,box)', oprcode => 'box_contained', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, +{ oid => '498', descr => 'contains', + oprname => '@>', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcom => '<@(box,box)', oprcode => 'box_contain', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, +{ oid => '499', descr => 'same as', + oprname => '~=', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcom => '~=(box,box)', oprcode => 'box_same', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '500', descr => 'overlaps', + oprname => '&&', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcom => '&&(box,box)', oprcode => 'box_overlap', oprrest => 'areasel', + oprjoin => 'areajoinsel' }, +{ oid => '501', descr => 'greater than or equal by area', + oprname => '>=', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcom => '<=(box,box)', oprnegate => '<(box,box)', oprcode => 'box_ge', + oprrest => 'areasel', oprjoin => 'areajoinsel' }, +{ oid => '502', descr => 'greater than by area', + oprname => '>', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcom => '<(box,box)', oprnegate => '<=(box,box)', oprcode => 'box_gt', + oprrest => 'areasel', oprjoin => 'areajoinsel' }, +{ oid => '503', descr => 'equal by area', + oprname => '=', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcom => '=(box,box)', oprcode => 'box_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '504', descr => 'less than by area', + oprname => '<', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcom => '>(box,box)', oprnegate => '>=(box,box)', oprcode => 'box_lt', + oprrest => 'areasel', oprjoin => 'areajoinsel' }, +{ oid => '505', descr => 'less than or equal by area', + oprname => '<=', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcom => '>=(box,box)', oprnegate => '>(box,box)', oprcode => 'box_le', + oprrest => 'areasel', oprjoin => 'areajoinsel' }, +{ oid => '506', descr => 'is above', + oprname => '>^', oprleft => 'point', oprright => 'point', oprresult => 'bool', + oprcode => 'point_above', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '507', descr => 'is left of', + oprname => '<<', oprleft => 'point', oprright => 'point', oprresult => 'bool', + oprcode => 'point_left', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '508', descr => 'is right of', + oprname => '>>', oprleft => 'point', oprright => 'point', oprresult => 'bool', + oprcode => 'point_right', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '509', descr => 'is below', + oprname => '<^', oprleft => 'point', oprright => 'point', oprresult => 'bool', + oprcode => 'point_below', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '510', descr => 'same as', + oprname => '~=', oprleft => 'point', oprright => 'point', oprresult => 'bool', + oprcom => '~=(point,point)', oprnegate => '<>(point,point)', + oprcode => 'point_eq', oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '511', descr => 'point inside box', + oprname => '<@', oprleft => 'point', oprright => 'box', oprresult => 'bool', + oprcom => '@>(box,point)', oprcode => 'on_pb', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, +{ oid => '433', descr => 'contains', + oprname => '@>', oprleft => 'box', oprright => 'point', oprresult => 'bool', + oprcom => '<@(point,box)', oprcode => 'box_contain_pt', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, +{ oid => '512', descr => 'point within closed path, or point on open path', + oprname => '<@', oprleft => 'point', oprright => 'path', oprresult => 'bool', + oprcom => '@>(path,point)', oprcode => 'on_ppath' }, +{ oid => '513', descr => 'center of', + oprname => '@@', oprkind => 'l', oprleft => '0', oprright => 'box', + oprresult => 'point', oprcode => 'box_center' }, +{ oid => '514', descr => 'multiply', + oprname => '*', oprleft => 'int4', oprright => 'int4', oprresult => 'int4', + oprcom => '*(int4,int4)', oprcode => 'int4mul' }, +{ oid => '517', descr => 'distance between', + oprname => '<->', oprleft => 'point', oprright => 'point', + oprresult => 'float8', oprcom => '<->(point,point)', + oprcode => 'point_distance' }, +{ oid => '518', descr => 'not equal', + oprname => '<>', oprleft => 'int4', oprright => 'int4', oprresult => 'bool', + oprcom => '<>(int4,int4)', oprnegate => '=(int4,int4)', oprcode => 'int4ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '519', descr => 'not equal', + oprname => '<>', oprleft => 'int2', oprright => 'int2', oprresult => 'bool', + oprcom => '<>(int2,int2)', oprnegate => '=(int2,int2)', oprcode => 'int2ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '520', descr => 'greater than', + oprname => '>', oprleft => 'int2', oprright => 'int2', oprresult => 'bool', + oprcom => '<(int2,int2)', oprnegate => '<=(int2,int2)', oprcode => 'int2gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '521', descr => 'greater than', + oprname => '>', oprleft => 'int4', oprright => 'int4', oprresult => 'bool', + oprcom => '<(int4,int4)', oprnegate => '<=(int4,int4)', oprcode => 'int4gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '522', descr => 'less than or equal', + oprname => '<=', oprleft => 'int2', oprright => 'int2', oprresult => 'bool', + oprcom => '>=(int2,int2)', oprnegate => '>(int2,int2)', oprcode => 'int2le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '523', descr => 'less than or equal', + oprname => '<=', oprleft => 'int4', oprright => 'int4', oprresult => 'bool', + oprcom => '>=(int4,int4)', oprnegate => '>(int4,int4)', oprcode => 'int4le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '524', descr => 'greater than or equal', + oprname => '>=', oprleft => 'int2', oprright => 'int2', oprresult => 'bool', + oprcom => '<=(int2,int2)', oprnegate => '<(int2,int2)', oprcode => 'int2ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '525', descr => 'greater than or equal', + oprname => '>=', oprleft => 'int4', oprright => 'int4', oprresult => 'bool', + oprcom => '<=(int4,int4)', oprnegate => '<(int4,int4)', oprcode => 'int4ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '526', descr => 'multiply', + oprname => '*', oprleft => 'int2', oprright => 'int2', oprresult => 'int2', + oprcom => '*(int2,int2)', oprcode => 'int2mul' }, +{ oid => '527', descr => 'divide', + oprname => '/', oprleft => 'int2', oprright => 'int2', oprresult => 'int2', + oprcode => 'int2div' }, +{ oid => '528', descr => 'divide', + oprname => '/', oprleft => 'int4', oprright => 'int4', oprresult => 'int4', + oprcode => 'int4div' }, +{ oid => '529', descr => 'modulus', + oprname => '%', oprleft => 'int2', oprright => 'int2', oprresult => 'int2', + oprcode => 'int2mod' }, +{ oid => '530', descr => 'modulus', + oprname => '%', oprleft => 'int4', oprright => 'int4', oprresult => 'int4', + oprcode => 'int4mod' }, +{ oid => '531', descr => 'not equal', + oprname => '<>', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprcom => '<>(text,text)', oprnegate => '=(text,text)', oprcode => 'textne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '532', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'int2', + oprright => 'int4', oprresult => 'bool', oprcom => '=(int4,int2)', + oprnegate => '<>(int2,int4)', oprcode => 'int24eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '533', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'int4', + oprright => 'int2', oprresult => 'bool', oprcom => '=(int2,int4)', + oprnegate => '<>(int4,int2)', oprcode => 'int42eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '534', descr => 'less than', + oprname => '<', oprleft => 'int2', oprright => 'int4', oprresult => 'bool', + oprcom => '>(int4,int2)', oprnegate => '>=(int2,int4)', oprcode => 'int24lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '535', descr => 'less than', + oprname => '<', oprleft => 'int4', oprright => 'int2', oprresult => 'bool', + oprcom => '>(int2,int4)', oprnegate => '>=(int4,int2)', oprcode => 'int42lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '536', descr => 'greater than', + oprname => '>', oprleft => 'int2', oprright => 'int4', oprresult => 'bool', + oprcom => '<(int4,int2)', oprnegate => '<=(int2,int4)', oprcode => 'int24gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '537', descr => 'greater than', + oprname => '>', oprleft => 'int4', oprright => 'int2', oprresult => 'bool', + oprcom => '<(int2,int4)', oprnegate => '<=(int4,int2)', oprcode => 'int42gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '538', descr => 'not equal', + oprname => '<>', oprleft => 'int2', oprright => 'int4', oprresult => 'bool', + oprcom => '<>(int4,int2)', oprnegate => '=(int2,int4)', oprcode => 'int24ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '539', descr => 'not equal', + oprname => '<>', oprleft => 'int4', oprright => 'int2', oprresult => 'bool', + oprcom => '<>(int2,int4)', oprnegate => '=(int4,int2)', oprcode => 'int42ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '540', descr => 'less than or equal', + oprname => '<=', oprleft => 'int2', oprright => 'int4', oprresult => 'bool', + oprcom => '>=(int4,int2)', oprnegate => '>(int2,int4)', oprcode => 'int24le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '541', descr => 'less than or equal', + oprname => '<=', oprleft => 'int4', oprright => 'int2', oprresult => 'bool', + oprcom => '>=(int2,int4)', oprnegate => '>(int4,int2)', oprcode => 'int42le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '542', descr => 'greater than or equal', + oprname => '>=', oprleft => 'int2', oprright => 'int4', oprresult => 'bool', + oprcom => '<=(int4,int2)', oprnegate => '<(int2,int4)', oprcode => 'int24ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '543', descr => 'greater than or equal', + oprname => '>=', oprleft => 'int4', oprright => 'int2', oprresult => 'bool', + oprcom => '<=(int2,int4)', oprnegate => '<(int4,int2)', oprcode => 'int42ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '544', descr => 'multiply', + oprname => '*', oprleft => 'int2', oprright => 'int4', oprresult => 'int4', + oprcom => '*(int4,int2)', oprcode => 'int24mul' }, +{ oid => '545', descr => 'multiply', + oprname => '*', oprleft => 'int4', oprright => 'int2', oprresult => 'int4', + oprcom => '*(int2,int4)', oprcode => 'int42mul' }, +{ oid => '546', descr => 'divide', + oprname => '/', oprleft => 'int2', oprright => 'int4', oprresult => 'int4', + oprcode => 'int24div' }, +{ oid => '547', descr => 'divide', + oprname => '/', oprleft => 'int4', oprright => 'int2', oprresult => 'int4', + oprcode => 'int42div' }, +{ oid => '550', descr => 'add', + oprname => '+', oprleft => 'int2', oprright => 'int2', oprresult => 'int2', + oprcom => '+(int2,int2)', oprcode => 'int2pl' }, +{ oid => '551', descr => 'add', + oprname => '+', oprleft => 'int4', oprright => 'int4', oprresult => 'int4', + oprcom => '+(int4,int4)', oprcode => 'int4pl' }, +{ oid => '552', descr => 'add', + oprname => '+', oprleft => 'int2', oprright => 'int4', oprresult => 'int4', + oprcom => '+(int4,int2)', oprcode => 'int24pl' }, +{ oid => '553', descr => 'add', + oprname => '+', oprleft => 'int4', oprright => 'int2', oprresult => 'int4', + oprcom => '+(int2,int4)', oprcode => 'int42pl' }, +{ oid => '554', descr => 'subtract', + oprname => '-', oprleft => 'int2', oprright => 'int2', oprresult => 'int2', + oprcode => 'int2mi' }, +{ oid => '555', descr => 'subtract', + oprname => '-', oprleft => 'int4', oprright => 'int4', oprresult => 'int4', + oprcode => 'int4mi' }, +{ oid => '556', descr => 'subtract', + oprname => '-', oprleft => 'int2', oprright => 'int4', oprresult => 'int4', + oprcode => 'int24mi' }, +{ oid => '557', descr => 'subtract', + oprname => '-', oprleft => 'int4', oprright => 'int2', oprresult => 'int4', + oprcode => 'int42mi' }, +{ oid => '558', descr => 'negate', + oprname => '-', oprkind => 'l', oprleft => '0', oprright => 'int4', + oprresult => 'int4', oprcode => 'int4um' }, +{ oid => '559', descr => 'negate', + oprname => '-', oprkind => 'l', oprleft => '0', oprright => 'int2', + oprresult => 'int2', oprcode => 'int2um' }, +{ oid => '584', descr => 'negate', + oprname => '-', oprkind => 'l', oprleft => '0', oprright => 'float4', + oprresult => 'float4', oprcode => 'float4um' }, +{ oid => '585', descr => 'negate', + oprname => '-', oprkind => 'l', oprleft => '0', oprright => 'float8', + oprresult => 'float8', oprcode => 'float8um' }, +{ oid => '586', descr => 'add', + oprname => '+', oprleft => 'float4', oprright => 'float4', + oprresult => 'float4', oprcom => '+(float4,float4)', oprcode => 'float4pl' }, +{ oid => '587', descr => 'subtract', + oprname => '-', oprleft => 'float4', oprright => 'float4', + oprresult => 'float4', oprcode => 'float4mi' }, +{ oid => '588', descr => 'divide', + oprname => '/', oprleft => 'float4', oprright => 'float4', + oprresult => 'float4', oprcode => 'float4div' }, +{ oid => '589', descr => 'multiply', + oprname => '*', oprleft => 'float4', oprright => 'float4', + oprresult => 'float4', oprcom => '*(float4,float4)', oprcode => 'float4mul' }, +{ oid => '590', descr => 'absolute value', + oprname => '@', oprkind => 'l', oprleft => '0', oprright => 'float4', + oprresult => 'float4', oprcode => 'float4abs' }, +{ oid => '591', descr => 'add', + oprname => '+', oprleft => 'float8', oprright => 'float8', + oprresult => 'float8', oprcom => '+(float8,float8)', oprcode => 'float8pl' }, +{ oid => '592', descr => 'subtract', + oprname => '-', oprleft => 'float8', oprright => 'float8', + oprresult => 'float8', oprcode => 'float8mi' }, +{ oid => '593', descr => 'divide', + oprname => '/', oprleft => 'float8', oprright => 'float8', + oprresult => 'float8', oprcode => 'float8div' }, +{ oid => '594', descr => 'multiply', + oprname => '*', oprleft => 'float8', oprright => 'float8', + oprresult => 'float8', oprcom => '*(float8,float8)', oprcode => 'float8mul' }, +{ oid => '595', descr => 'absolute value', + oprname => '@', oprkind => 'l', oprleft => '0', oprright => 'float8', + oprresult => 'float8', oprcode => 'float8abs' }, +{ oid => '596', descr => 'square root', + oprname => '|/', oprkind => 'l', oprleft => '0', oprright => 'float8', + oprresult => 'float8', oprcode => 'dsqrt' }, +{ oid => '597', descr => 'cube root', + oprname => '||/', oprkind => 'l', oprleft => '0', oprright => 'float8', + oprresult => 'float8', oprcode => 'dcbrt' }, + +{ oid => '607', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'oid', + oprright => 'oid', oprresult => 'bool', oprcom => '=(oid,oid)', + oprnegate => '<>(oid,oid)', oprcode => 'oideq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '608', descr => 'not equal', + oprname => '<>', oprleft => 'oid', oprright => 'oid', oprresult => 'bool', + oprcom => '<>(oid,oid)', oprnegate => '=(oid,oid)', oprcode => 'oidne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '609', descr => 'less than', + oprname => '<', oprleft => 'oid', oprright => 'oid', oprresult => 'bool', + oprcom => '>(oid,oid)', oprnegate => '>=(oid,oid)', oprcode => 'oidlt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '610', descr => 'greater than', + oprname => '>', oprleft => 'oid', oprright => 'oid', oprresult => 'bool', + oprcom => '<(oid,oid)', oprnegate => '<=(oid,oid)', oprcode => 'oidgt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '611', descr => 'less than or equal', + oprname => '<=', oprleft => 'oid', oprright => 'oid', oprresult => 'bool', + oprcom => '>=(oid,oid)', oprnegate => '>(oid,oid)', oprcode => 'oidle', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '612', descr => 'greater than or equal', + oprname => '>=', oprleft => 'oid', oprright => 'oid', oprresult => 'bool', + oprcom => '<=(oid,oid)', oprnegate => '<(oid,oid)', oprcode => 'oidge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '644', descr => 'not equal', + oprname => '<>', oprleft => 'oidvector', oprright => 'oidvector', + oprresult => 'bool', oprcom => '<>(oidvector,oidvector)', + oprnegate => '=(oidvector,oidvector)', oprcode => 'oidvectorne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '645', descr => 'less than', + oprname => '<', oprleft => 'oidvector', oprright => 'oidvector', + oprresult => 'bool', oprcom => '>(oidvector,oidvector)', + oprnegate => '>=(oidvector,oidvector)', oprcode => 'oidvectorlt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '646', descr => 'greater than', + oprname => '>', oprleft => 'oidvector', oprright => 'oidvector', + oprresult => 'bool', oprcom => '<(oidvector,oidvector)', + oprnegate => '<=(oidvector,oidvector)', oprcode => 'oidvectorgt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '647', descr => 'less than or equal', + oprname => '<=', oprleft => 'oidvector', oprright => 'oidvector', + oprresult => 'bool', oprcom => '>=(oidvector,oidvector)', + oprnegate => '>(oidvector,oidvector)', oprcode => 'oidvectorle', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '648', descr => 'greater than or equal', + oprname => '>=', oprleft => 'oidvector', oprright => 'oidvector', + oprresult => 'bool', oprcom => '<=(oidvector,oidvector)', + oprnegate => '<(oidvector,oidvector)', oprcode => 'oidvectorge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '649', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'oidvector', + oprright => 'oidvector', oprresult => 'bool', + oprcom => '=(oidvector,oidvector)', oprnegate => '<>(oidvector,oidvector)', + oprcode => 'oidvectoreq', oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, + +{ oid => '613', descr => 'distance between', + oprname => '<->', oprleft => 'point', oprright => 'line', + oprresult => 'float8', oprcode => 'dist_pl' }, +{ oid => '614', descr => 'distance between', + oprname => '<->', oprleft => 'point', oprright => 'lseg', + oprresult => 'float8', oprcode => 'dist_ps' }, +{ oid => '615', descr => 'distance between', + oprname => '<->', oprleft => 'point', oprright => 'box', + oprresult => 'float8', oprcode => 'dist_pb' }, +{ oid => '616', descr => 'distance between', + oprname => '<->', oprleft => 'lseg', oprright => 'line', + oprresult => 'float8', oprcode => 'dist_sl' }, +{ oid => '617', descr => 'distance between', + oprname => '<->', oprleft => 'lseg', oprright => 'box', oprresult => 'float8', + oprcode => 'dist_sb' }, +{ oid => '618', descr => 'distance between', + oprname => '<->', oprleft => 'point', oprright => 'path', + oprresult => 'float8', oprcode => 'dist_ppath' }, + +{ oid => '620', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'float4', + oprright => 'float4', oprresult => 'bool', oprcom => '=(float4,float4)', + oprnegate => '<>(float4,float4)', oprcode => 'float4eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '621', descr => 'not equal', + oprname => '<>', oprleft => 'float4', oprright => 'float4', + oprresult => 'bool', oprcom => '<>(float4,float4)', + oprnegate => '=(float4,float4)', oprcode => 'float4ne', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, +{ oid => '622', descr => 'less than', + oprname => '<', oprleft => 'float4', oprright => 'float4', + oprresult => 'bool', oprcom => '>(float4,float4)', + oprnegate => '>=(float4,float4)', oprcode => 'float4lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '623', descr => 'greater than', + oprname => '>', oprleft => 'float4', oprright => 'float4', + oprresult => 'bool', oprcom => '<(float4,float4)', + oprnegate => '<=(float4,float4)', oprcode => 'float4gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '624', descr => 'less than or equal', + oprname => '<=', oprleft => 'float4', oprright => 'float4', + oprresult => 'bool', oprcom => '>=(float4,float4)', + oprnegate => '>(float4,float4)', oprcode => 'float4le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '625', descr => 'greater than or equal', + oprname => '>=', oprleft => 'float4', oprright => 'float4', + oprresult => 'bool', oprcom => '<=(float4,float4)', + oprnegate => '<(float4,float4)', oprcode => 'float4ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '630', descr => 'not equal', + oprname => '<>', oprleft => 'char', oprright => 'char', oprresult => 'bool', + oprcom => '<>(char,char)', oprnegate => '=(char,char)', oprcode => 'charne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, + +{ oid => '631', descr => 'less than', + oprname => '<', oprleft => 'char', oprright => 'char', oprresult => 'bool', + oprcom => '>(char,char)', oprnegate => '>=(char,char)', oprcode => 'charlt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '632', descr => 'less than or equal', + oprname => '<=', oprleft => 'char', oprright => 'char', oprresult => 'bool', + oprcom => '>=(char,char)', oprnegate => '>(char,char)', oprcode => 'charle', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '633', descr => 'greater than', + oprname => '>', oprleft => 'char', oprright => 'char', oprresult => 'bool', + oprcom => '<(char,char)', oprnegate => '<=(char,char)', oprcode => 'chargt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '634', descr => 'greater than or equal', + oprname => '>=', oprleft => 'char', oprright => 'char', oprresult => 'bool', + oprcom => '<=(char,char)', oprnegate => '<(char,char)', oprcode => 'charge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '639', oid_symbol => 'OID_NAME_REGEXEQ_OP', + descr => 'matches regular expression, case-sensitive', + oprname => '~', oprleft => 'name', oprright => 'text', oprresult => 'bool', + oprnegate => '!~(name,text)', oprcode => 'nameregexeq', + oprrest => 'regexeqsel', oprjoin => 'regexeqjoinsel' }, +{ oid => '640', descr => 'does not match regular expression, case-sensitive', + oprname => '!~', oprleft => 'name', oprright => 'text', oprresult => 'bool', + oprnegate => '~(name,text)', oprcode => 'nameregexne', + oprrest => 'regexnesel', oprjoin => 'regexnejoinsel' }, +{ oid => '641', oid_symbol => 'OID_TEXT_REGEXEQ_OP', + descr => 'matches regular expression, case-sensitive', + oprname => '~', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprnegate => '!~(text,text)', oprcode => 'textregexeq', + oprrest => 'regexeqsel', oprjoin => 'regexeqjoinsel' }, +{ oid => '642', descr => 'does not match regular expression, case-sensitive', + oprname => '!~', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprnegate => '~(text,text)', oprcode => 'textregexne', + oprrest => 'regexnesel', oprjoin => 'regexnejoinsel' }, +{ oid => '643', descr => 'not equal', + oprname => '<>', oprleft => 'name', oprright => 'name', oprresult => 'bool', + oprcom => '<>(name,name)', oprnegate => '=(name,name)', oprcode => 'namene', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '654', descr => 'concatenate', + oprname => '||', oprleft => 'text', oprright => 'text', oprresult => 'text', + oprcode => 'textcat' }, + +{ oid => '660', descr => 'less than', + oprname => '<', oprleft => 'name', oprright => 'name', oprresult => 'bool', + oprcom => '>(name,name)', oprnegate => '>=(name,name)', oprcode => 'namelt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '661', descr => 'less than or equal', + oprname => '<=', oprleft => 'name', oprright => 'name', oprresult => 'bool', + oprcom => '>=(name,name)', oprnegate => '>(name,name)', oprcode => 'namele', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '662', descr => 'greater than', + oprname => '>', oprleft => 'name', oprright => 'name', oprresult => 'bool', + oprcom => '<(name,name)', oprnegate => '<=(name,name)', oprcode => 'namegt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '663', descr => 'greater than or equal', + oprname => '>=', oprleft => 'name', oprright => 'name', oprresult => 'bool', + oprcom => '<=(name,name)', oprnegate => '<(name,name)', oprcode => 'namege', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '664', descr => 'less than', + oprname => '<', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprcom => '>(text,text)', oprnegate => '>=(text,text)', oprcode => 'text_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '665', descr => 'less than or equal', + oprname => '<=', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprcom => '>=(text,text)', oprnegate => '>(text,text)', oprcode => 'text_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '666', descr => 'greater than', + oprname => '>', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprcom => '<(text,text)', oprnegate => '<=(text,text)', oprcode => 'text_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '667', descr => 'greater than or equal', + oprname => '>=', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprcom => '<=(text,text)', oprnegate => '<(text,text)', oprcode => 'text_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '670', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'float8', + oprright => 'float8', oprresult => 'bool', oprcom => '=(float8,float8)', + oprnegate => '<>(float8,float8)', oprcode => 'float8eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '671', descr => 'not equal', + oprname => '<>', oprleft => 'float8', oprright => 'float8', + oprresult => 'bool', oprcom => '<>(float8,float8)', + oprnegate => '=(float8,float8)', oprcode => 'float8ne', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, +{ oid => '672', oid_symbol => 'Float8LessOperator', descr => 'less than', + oprname => '<', oprleft => 'float8', oprright => 'float8', + oprresult => 'bool', oprcom => '>(float8,float8)', + oprnegate => '>=(float8,float8)', oprcode => 'float8lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '673', descr => 'less than or equal', + oprname => '<=', oprleft => 'float8', oprright => 'float8', + oprresult => 'bool', oprcom => '>=(float8,float8)', + oprnegate => '>(float8,float8)', oprcode => 'float8le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '674', descr => 'greater than', + oprname => '>', oprleft => 'float8', oprright => 'float8', + oprresult => 'bool', oprcom => '<(float8,float8)', + oprnegate => '<=(float8,float8)', oprcode => 'float8gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '675', descr => 'greater than or equal', + oprname => '>=', oprleft => 'float8', oprright => 'float8', + oprresult => 'bool', oprcom => '<=(float8,float8)', + oprnegate => '<(float8,float8)', oprcode => 'float8ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '682', descr => 'absolute value', + oprname => '@', oprkind => 'l', oprleft => '0', oprright => 'int2', + oprresult => 'int2', oprcode => 'int2abs' }, +{ oid => '684', descr => 'add', + oprname => '+', oprleft => 'int8', oprright => 'int8', oprresult => 'int8', + oprcom => '+(int8,int8)', oprcode => 'int8pl' }, +{ oid => '685', descr => 'subtract', + oprname => '-', oprleft => 'int8', oprright => 'int8', oprresult => 'int8', + oprcode => 'int8mi' }, +{ oid => '686', descr => 'multiply', + oprname => '*', oprleft => 'int8', oprright => 'int8', oprresult => 'int8', + oprcom => '*(int8,int8)', oprcode => 'int8mul' }, +{ oid => '687', descr => 'divide', + oprname => '/', oprleft => 'int8', oprright => 'int8', oprresult => 'int8', + oprcode => 'int8div' }, + +{ oid => '688', descr => 'add', + oprname => '+', oprleft => 'int8', oprright => 'int4', oprresult => 'int8', + oprcom => '+(int4,int8)', oprcode => 'int84pl' }, +{ oid => '689', descr => 'subtract', + oprname => '-', oprleft => 'int8', oprright => 'int4', oprresult => 'int8', + oprcode => 'int84mi' }, +{ oid => '690', descr => 'multiply', + oprname => '*', oprleft => 'int8', oprright => 'int4', oprresult => 'int8', + oprcom => '*(int4,int8)', oprcode => 'int84mul' }, +{ oid => '691', descr => 'divide', + oprname => '/', oprleft => 'int8', oprright => 'int4', oprresult => 'int8', + oprcode => 'int84div' }, +{ oid => '692', descr => 'add', + oprname => '+', oprleft => 'int4', oprright => 'int8', oprresult => 'int8', + oprcom => '+(int8,int4)', oprcode => 'int48pl' }, +{ oid => '693', descr => 'subtract', + oprname => '-', oprleft => 'int4', oprright => 'int8', oprresult => 'int8', + oprcode => 'int48mi' }, +{ oid => '694', descr => 'multiply', + oprname => '*', oprleft => 'int4', oprright => 'int8', oprresult => 'int8', + oprcom => '*(int8,int4)', oprcode => 'int48mul' }, +{ oid => '695', descr => 'divide', + oprname => '/', oprleft => 'int4', oprright => 'int8', oprresult => 'int8', + oprcode => 'int48div' }, + +{ oid => '818', descr => 'add', + oprname => '+', oprleft => 'int8', oprright => 'int2', oprresult => 'int8', + oprcom => '+(int2,int8)', oprcode => 'int82pl' }, +{ oid => '819', descr => 'subtract', + oprname => '-', oprleft => 'int8', oprright => 'int2', oprresult => 'int8', + oprcode => 'int82mi' }, +{ oid => '820', descr => 'multiply', + oprname => '*', oprleft => 'int8', oprright => 'int2', oprresult => 'int8', + oprcom => '*(int2,int8)', oprcode => 'int82mul' }, +{ oid => '821', descr => 'divide', + oprname => '/', oprleft => 'int8', oprright => 'int2', oprresult => 'int8', + oprcode => 'int82div' }, +{ oid => '822', descr => 'add', + oprname => '+', oprleft => 'int2', oprright => 'int8', oprresult => 'int8', + oprcom => '+(int8,int2)', oprcode => 'int28pl' }, +{ oid => '823', descr => 'subtract', + oprname => '-', oprleft => 'int2', oprright => 'int8', oprresult => 'int8', + oprcode => 'int28mi' }, +{ oid => '824', descr => 'multiply', + oprname => '*', oprleft => 'int2', oprright => 'int8', oprresult => 'int8', + oprcom => '*(int8,int2)', oprcode => 'int28mul' }, +{ oid => '825', descr => 'divide', + oprname => '/', oprleft => 'int2', oprright => 'int8', oprresult => 'int8', + oprcode => 'int28div' }, + +{ oid => '706', descr => 'distance between', + oprname => '<->', oprleft => 'box', oprright => 'box', oprresult => 'float8', + oprcom => '<->(box,box)', oprcode => 'box_distance' }, +{ oid => '707', descr => 'distance between', + oprname => '<->', oprleft => 'path', oprright => 'path', + oprresult => 'float8', oprcom => '<->(path,path)', + oprcode => 'path_distance' }, +{ oid => '708', descr => 'distance between', + oprname => '<->', oprleft => 'line', oprright => 'line', + oprresult => 'float8', oprcom => '<->(line,line)', + oprcode => 'line_distance' }, +{ oid => '709', descr => 'distance between', + oprname => '<->', oprleft => 'lseg', oprright => 'lseg', + oprresult => 'float8', oprcom => '<->(lseg,lseg)', + oprcode => 'lseg_distance' }, +{ oid => '712', descr => 'distance between', + oprname => '<->', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'float8', oprcom => '<->(polygon,polygon)', + oprcode => 'poly_distance' }, + +{ oid => '713', descr => 'not equal', + oprname => '<>', oprleft => 'point', oprright => 'point', oprresult => 'bool', + oprcom => '<>(point,point)', oprnegate => '~=(point,point)', + oprcode => 'point_ne', oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, + +# add translation/rotation/scaling operators for geometric types. - thomas 97/05/10 +{ oid => '731', descr => 'add points (translate)', + oprname => '+', oprleft => 'point', oprright => 'point', oprresult => 'point', + oprcom => '+(point,point)', oprcode => 'point_add' }, +{ oid => '732', descr => 'subtract points (translate)', + oprname => '-', oprleft => 'point', oprright => 'point', oprresult => 'point', + oprcode => 'point_sub' }, +{ oid => '733', descr => 'multiply points (scale/rotate)', + oprname => '*', oprleft => 'point', oprright => 'point', oprresult => 'point', + oprcom => '*(point,point)', oprcode => 'point_mul' }, +{ oid => '734', descr => 'divide points (scale/rotate)', + oprname => '/', oprleft => 'point', oprright => 'point', oprresult => 'point', + oprcode => 'point_div' }, +{ oid => '735', descr => 'concatenate', + oprname => '+', oprleft => 'path', oprright => 'path', oprresult => 'path', + oprcom => '+(path,path)', oprcode => 'path_add' }, +{ oid => '736', descr => 'add (translate path)', + oprname => '+', oprleft => 'path', oprright => 'point', oprresult => 'path', + oprcode => 'path_add_pt' }, +{ oid => '737', descr => 'subtract (translate path)', + oprname => '-', oprleft => 'path', oprright => 'point', oprresult => 'path', + oprcode => 'path_sub_pt' }, +{ oid => '738', descr => 'multiply (rotate/scale path)', + oprname => '*', oprleft => 'path', oprright => 'point', oprresult => 'path', + oprcode => 'path_mul_pt' }, +{ oid => '739', descr => 'divide (rotate/scale path)', + oprname => '/', oprleft => 'path', oprright => 'point', oprresult => 'path', + oprcode => 'path_div_pt' }, +{ oid => '755', descr => 'contains', + oprname => '@>', oprleft => 'path', oprright => 'point', oprresult => 'bool', + oprcom => '<@(point,path)', oprcode => 'path_contain_pt' }, +{ oid => '756', descr => 'is contained by', + oprname => '<@', oprleft => 'point', oprright => 'polygon', + oprresult => 'bool', oprcom => '@>(polygon,point)', + oprcode => 'pt_contained_poly', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, +{ oid => '757', descr => 'contains', + oprname => '@>', oprleft => 'polygon', oprright => 'point', + oprresult => 'bool', oprcom => '<@(point,polygon)', + oprcode => 'poly_contain_pt', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, +{ oid => '758', descr => 'is contained by', + oprname => '<@', oprleft => 'point', oprright => 'circle', + oprresult => 'bool', oprcom => '@>(circle,point)', + oprcode => 'pt_contained_circle', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, +{ oid => '759', descr => 'contains', + oprname => '@>', oprleft => 'circle', oprright => 'point', + oprresult => 'bool', oprcom => '<@(point,circle)', + oprcode => 'circle_contain_pt', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, + +{ oid => '773', descr => 'absolute value', + oprname => '@', oprkind => 'l', oprleft => '0', oprright => 'int4', + oprresult => 'int4', oprcode => 'int4abs' }, + +# additional operators for geometric types - thomas 1997-07-09 +{ oid => '792', descr => 'equal', + oprname => '=', oprleft => 'path', oprright => 'path', oprresult => 'bool', + oprcom => '=(path,path)', oprcode => 'path_n_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '793', descr => 'less than', + oprname => '<', oprleft => 'path', oprright => 'path', oprresult => 'bool', + oprcom => '>(path,path)', oprcode => 'path_n_lt' }, +{ oid => '794', descr => 'greater than', + oprname => '>', oprleft => 'path', oprright => 'path', oprresult => 'bool', + oprcom => '<(path,path)', oprcode => 'path_n_gt' }, +{ oid => '795', descr => 'less than or equal', + oprname => '<=', oprleft => 'path', oprright => 'path', oprresult => 'bool', + oprcom => '>=(path,path)', oprcode => 'path_n_le' }, +{ oid => '796', descr => 'greater than or equal', + oprname => '>=', oprleft => 'path', oprright => 'path', oprresult => 'bool', + oprcom => '<=(path,path)', oprcode => 'path_n_ge' }, +{ oid => '797', descr => 'number of points', + oprname => '#', oprkind => 'l', oprleft => '0', oprright => 'path', + oprresult => 'int4', oprcode => 'path_npoints' }, +{ oid => '798', descr => 'intersect', + oprname => '?#', oprleft => 'path', oprright => 'path', oprresult => 'bool', + oprcode => 'path_inter' }, +{ oid => '799', descr => 'sum of path segment lengths', + oprname => '@-@', oprkind => 'l', oprleft => '0', oprright => 'path', + oprresult => 'float8', oprcode => 'path_length' }, +{ oid => '800', descr => 'is above (allows touching)', + oprname => '>^', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcode => 'box_above_eq', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '801', descr => 'is below (allows touching)', + oprname => '<^', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcode => 'box_below_eq', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '802', descr => 'deprecated, use && instead', + oprname => '?#', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcode => 'box_overlap', oprrest => 'areasel', oprjoin => 'areajoinsel' }, +{ oid => '803', descr => 'box intersection', + oprname => '#', oprleft => 'box', oprright => 'box', oprresult => 'box', + oprcode => 'box_intersect' }, +{ oid => '804', descr => 'add point to box (translate)', + oprname => '+', oprleft => 'box', oprright => 'point', oprresult => 'box', + oprcode => 'box_add' }, +{ oid => '805', descr => 'subtract point from box (translate)', + oprname => '-', oprleft => 'box', oprright => 'point', oprresult => 'box', + oprcode => 'box_sub' }, +{ oid => '806', descr => 'multiply box by point (scale)', + oprname => '*', oprleft => 'box', oprright => 'point', oprresult => 'box', + oprcode => 'box_mul' }, +{ oid => '807', descr => 'divide box by point (scale)', + oprname => '/', oprleft => 'box', oprright => 'point', oprresult => 'box', + oprcode => 'box_div' }, +{ oid => '808', descr => 'horizontally aligned', + oprname => '?-', oprleft => 'point', oprright => 'point', oprresult => 'bool', + oprcom => '?-(point,point)', oprcode => 'point_horiz' }, +{ oid => '809', descr => 'vertically aligned', + oprname => '?|', oprleft => 'point', oprright => 'point', oprresult => 'bool', + oprcom => '?|(point,point)', oprcode => 'point_vert' }, + +{ oid => '843', descr => 'multiply', + oprname => '*', oprleft => 'money', oprright => 'float4', + oprresult => 'money', oprcom => '*(float4,money)', + oprcode => 'cash_mul_flt4' }, +{ oid => '844', descr => 'divide', + oprname => '/', oprleft => 'money', oprright => 'float4', + oprresult => 'money', oprcode => 'cash_div_flt4' }, +{ oid => '845', descr => 'multiply', + oprname => '*', oprleft => 'float4', oprright => 'money', + oprresult => 'money', oprcom => '*(money,float4)', + oprcode => 'flt4_mul_cash' }, + +{ oid => '900', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprleft => 'money', oprright => 'money', + oprresult => 'bool', oprcom => '=(money,money)', + oprnegate => '<>(money,money)', oprcode => 'cash_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '901', descr => 'not equal', + oprname => '<>', oprleft => 'money', oprright => 'money', oprresult => 'bool', + oprcom => '<>(money,money)', oprnegate => '=(money,money)', + oprcode => 'cash_ne', oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '902', descr => 'less than', + oprname => '<', oprleft => 'money', oprright => 'money', oprresult => 'bool', + oprcom => '>(money,money)', oprnegate => '>=(money,money)', + oprcode => 'cash_lt', oprrest => 'scalarltsel', + oprjoin => 'scalarltjoinsel' }, +{ oid => '903', descr => 'greater than', + oprname => '>', oprleft => 'money', oprright => 'money', oprresult => 'bool', + oprcom => '<(money,money)', oprnegate => '<=(money,money)', + oprcode => 'cash_gt', oprrest => 'scalargtsel', + oprjoin => 'scalargtjoinsel' }, +{ oid => '904', descr => 'less than or equal', + oprname => '<=', oprleft => 'money', oprright => 'money', oprresult => 'bool', + oprcom => '>=(money,money)', oprnegate => '>(money,money)', + oprcode => 'cash_le', oprrest => 'scalarlesel', + oprjoin => 'scalarlejoinsel' }, +{ oid => '905', descr => 'greater than or equal', + oprname => '>=', oprleft => 'money', oprright => 'money', oprresult => 'bool', + oprcom => '<=(money,money)', oprnegate => '<(money,money)', + oprcode => 'cash_ge', oprrest => 'scalargesel', + oprjoin => 'scalargejoinsel' }, +{ oid => '906', descr => 'add', + oprname => '+', oprleft => 'money', oprright => 'money', oprresult => 'money', + oprcom => '+(money,money)', oprcode => 'cash_pl' }, +{ oid => '907', descr => 'subtract', + oprname => '-', oprleft => 'money', oprright => 'money', oprresult => 'money', + oprcode => 'cash_mi' }, +{ oid => '908', descr => 'multiply', + oprname => '*', oprleft => 'money', oprright => 'float8', + oprresult => 'money', oprcom => '*(float8,money)', + oprcode => 'cash_mul_flt8' }, +{ oid => '909', descr => 'divide', + oprname => '/', oprleft => 'money', oprright => 'float8', + oprresult => 'money', oprcode => 'cash_div_flt8' }, +{ oid => '3346', descr => 'multiply', + oprname => '*', oprleft => 'money', oprright => 'int8', oprresult => 'money', + oprcom => '*(int8,money)', oprcode => 'cash_mul_int8' }, +{ oid => '3347', descr => 'divide', + oprname => '/', oprleft => 'money', oprright => 'int8', oprresult => 'money', + oprcode => 'cash_div_int8' }, +{ oid => '912', descr => 'multiply', + oprname => '*', oprleft => 'money', oprright => 'int4', oprresult => 'money', + oprcom => '*(int4,money)', oprcode => 'cash_mul_int4' }, +{ oid => '913', descr => 'divide', + oprname => '/', oprleft => 'money', oprright => 'int4', oprresult => 'money', + oprcode => 'cash_div_int4' }, +{ oid => '914', descr => 'multiply', + oprname => '*', oprleft => 'money', oprright => 'int2', oprresult => 'money', + oprcom => '*(int2,money)', oprcode => 'cash_mul_int2' }, +{ oid => '915', descr => 'divide', + oprname => '/', oprleft => 'money', oprright => 'int2', oprresult => 'money', + oprcode => 'cash_div_int2' }, +{ oid => '916', descr => 'multiply', + oprname => '*', oprleft => 'float8', oprright => 'money', + oprresult => 'money', oprcom => '*(money,float8)', + oprcode => 'flt8_mul_cash' }, +{ oid => '3349', descr => 'multiply', + oprname => '*', oprleft => 'int8', oprright => 'money', oprresult => 'money', + oprcom => '*(money,int8)', oprcode => 'int8_mul_cash' }, +{ oid => '917', descr => 'multiply', + oprname => '*', oprleft => 'int4', oprright => 'money', oprresult => 'money', + oprcom => '*(money,int4)', oprcode => 'int4_mul_cash' }, +{ oid => '918', descr => 'multiply', + oprname => '*', oprleft => 'int2', oprright => 'money', oprresult => 'money', + oprcom => '*(money,int2)', oprcode => 'int2_mul_cash' }, +{ oid => '3825', descr => 'divide', + oprname => '/', oprleft => 'money', oprright => 'money', + oprresult => 'float8', oprcode => 'cash_div_cash' }, + +{ oid => '965', descr => 'exponentiation', + oprname => '^', oprleft => 'float8', oprright => 'float8', + oprresult => 'float8', oprcode => 'dpow' }, +{ oid => '966', descr => 'add/update ACL item', + oprname => '+', oprleft => '_aclitem', oprright => 'aclitem', + oprresult => '_aclitem', oprcode => 'aclinsert' }, +{ oid => '967', descr => 'remove ACL item', + oprname => '-', oprleft => '_aclitem', oprright => 'aclitem', + oprresult => '_aclitem', oprcode => 'aclremove' }, +{ oid => '968', descr => 'contains', + oprname => '@>', oprleft => '_aclitem', oprright => 'aclitem', + oprresult => 'bool', oprcode => 'aclcontains' }, +{ oid => '974', descr => 'equal', + oprname => '=', oprcanhash => 't', oprleft => 'aclitem', + oprright => 'aclitem', oprresult => 'bool', oprcom => '=(aclitem,aclitem)', + oprcode => 'aclitemeq', oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, + +# additional geometric operators - thomas 1997-07-09 +{ oid => '969', descr => 'center of', + oprname => '@@', oprkind => 'l', oprleft => '0', oprright => 'lseg', + oprresult => 'point', oprcode => 'lseg_center' }, +{ oid => '970', descr => 'center of', + oprname => '@@', oprkind => 'l', oprleft => '0', oprright => 'path', + oprresult => 'point', oprcode => 'path_center' }, +{ oid => '971', descr => 'center of', + oprname => '@@', oprkind => 'l', oprleft => '0', oprright => 'polygon', + oprresult => 'point', oprcode => 'poly_center' }, + +{ oid => '1054', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'bpchar', + oprright => 'bpchar', oprresult => 'bool', oprcom => '=(bpchar,bpchar)', + oprnegate => '<>(bpchar,bpchar)', oprcode => 'bpchareq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, + +{ oid => '1055', oid_symbol => 'OID_BPCHAR_REGEXEQ_OP', + descr => 'matches regular expression, case-sensitive', + oprname => '~', oprleft => 'bpchar', oprright => 'text', oprresult => 'bool', + oprnegate => '!~(bpchar,text)', oprcode => 'bpcharregexeq', + oprrest => 'regexeqsel', oprjoin => 'regexeqjoinsel' }, +{ oid => '1056', descr => 'does not match regular expression, case-sensitive', + oprname => '!~', oprleft => 'bpchar', oprright => 'text', oprresult => 'bool', + oprnegate => '~(bpchar,text)', oprcode => 'bpcharregexne', + oprrest => 'regexnesel', oprjoin => 'regexnejoinsel' }, +{ oid => '1057', descr => 'not equal', + oprname => '<>', oprleft => 'bpchar', oprright => 'bpchar', + oprresult => 'bool', oprcom => '<>(bpchar,bpchar)', + oprnegate => '=(bpchar,bpchar)', oprcode => 'bpcharne', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, +{ oid => '1058', descr => 'less than', + oprname => '<', oprleft => 'bpchar', oprright => 'bpchar', + oprresult => 'bool', oprcom => '>(bpchar,bpchar)', + oprnegate => '>=(bpchar,bpchar)', oprcode => 'bpcharlt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1059', descr => 'less than or equal', + oprname => '<=', oprleft => 'bpchar', oprright => 'bpchar', + oprresult => 'bool', oprcom => '>=(bpchar,bpchar)', + oprnegate => '>(bpchar,bpchar)', oprcode => 'bpcharle', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1060', descr => 'greater than', + oprname => '>', oprleft => 'bpchar', oprright => 'bpchar', + oprresult => 'bool', oprcom => '<(bpchar,bpchar)', + oprnegate => '<=(bpchar,bpchar)', oprcode => 'bpchargt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1061', descr => 'greater than or equal', + oprname => '>=', oprleft => 'bpchar', oprright => 'bpchar', + oprresult => 'bool', oprcom => '<=(bpchar,bpchar)', + oprnegate => '<(bpchar,bpchar)', oprcode => 'bpcharge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +# generic array comparison operators +{ oid => '1070', oid_symbol => 'ARRAY_EQ_OP', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'anyarray', + oprright => 'anyarray', oprresult => 'bool', oprcom => '=(anyarray,anyarray)', + oprnegate => '<>(anyarray,anyarray)', oprcode => 'array_eq', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '1071', descr => 'not equal', + oprname => '<>', oprleft => 'anyarray', oprright => 'anyarray', + oprresult => 'bool', oprcom => '<>(anyarray,anyarray)', + oprnegate => '=(anyarray,anyarray)', oprcode => 'array_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '1072', oid_symbol => 'ARRAY_LT_OP', descr => 'less than', + oprname => '<', oprleft => 'anyarray', oprright => 'anyarray', + oprresult => 'bool', oprcom => '>(anyarray,anyarray)', + oprnegate => '>=(anyarray,anyarray)', oprcode => 'array_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1073', oid_symbol => 'ARRAY_GT_OP', descr => 'greater than', + oprname => '>', oprleft => 'anyarray', oprright => 'anyarray', + oprresult => 'bool', oprcom => '<(anyarray,anyarray)', + oprnegate => '<=(anyarray,anyarray)', oprcode => 'array_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1074', descr => 'less than or equal', + oprname => '<=', oprleft => 'anyarray', oprright => 'anyarray', + oprresult => 'bool', oprcom => '>=(anyarray,anyarray)', + oprnegate => '>(anyarray,anyarray)', oprcode => 'array_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1075', descr => 'greater than or equal', + oprname => '>=', oprleft => 'anyarray', oprright => 'anyarray', + oprresult => 'bool', oprcom => '<=(anyarray,anyarray)', + oprnegate => '<(anyarray,anyarray)', oprcode => 'array_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +# date operators +{ oid => '1076', descr => 'add', + oprname => '+', oprleft => 'date', oprright => 'interval', + oprresult => 'timestamp', oprcom => '+(interval,date)', + oprcode => 'date_pl_interval' }, +{ oid => '1077', descr => 'subtract', + oprname => '-', oprleft => 'date', oprright => 'interval', + oprresult => 'timestamp', oprcode => 'date_mi_interval' }, +{ oid => '1093', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'date', + oprright => 'date', oprresult => 'bool', oprcom => '=(date,date)', + oprnegate => '<>(date,date)', oprcode => 'date_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '1094', descr => 'not equal', + oprname => '<>', oprleft => 'date', oprright => 'date', oprresult => 'bool', + oprcom => '<>(date,date)', oprnegate => '=(date,date)', oprcode => 'date_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '1095', descr => 'less than', + oprname => '<', oprleft => 'date', oprright => 'date', oprresult => 'bool', + oprcom => '>(date,date)', oprnegate => '>=(date,date)', oprcode => 'date_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1096', descr => 'less than or equal', + oprname => '<=', oprleft => 'date', oprright => 'date', oprresult => 'bool', + oprcom => '>=(date,date)', oprnegate => '>(date,date)', oprcode => 'date_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1097', descr => 'greater than', + oprname => '>', oprleft => 'date', oprright => 'date', oprresult => 'bool', + oprcom => '<(date,date)', oprnegate => '<=(date,date)', oprcode => 'date_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1098', descr => 'greater than or equal', + oprname => '>=', oprleft => 'date', oprright => 'date', oprresult => 'bool', + oprcom => '<=(date,date)', oprnegate => '<(date,date)', oprcode => 'date_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '1099', descr => 'subtract', + oprname => '-', oprleft => 'date', oprright => 'date', oprresult => 'int4', + oprcode => 'date_mi' }, +{ oid => '1100', descr => 'add', + oprname => '+', oprleft => 'date', oprright => 'int4', oprresult => 'date', + oprcom => '+(int4,date)', oprcode => 'date_pli' }, +{ oid => '1101', descr => 'subtract', + oprname => '-', oprleft => 'date', oprright => 'int4', oprresult => 'date', + oprcode => 'date_mii' }, + +# time operators +{ oid => '1108', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'time', + oprright => 'time', oprresult => 'bool', oprcom => '=(time,time)', + oprnegate => '<>(time,time)', oprcode => 'time_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '1109', descr => 'not equal', + oprname => '<>', oprleft => 'time', oprright => 'time', oprresult => 'bool', + oprcom => '<>(time,time)', oprnegate => '=(time,time)', oprcode => 'time_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '1110', descr => 'less than', + oprname => '<', oprleft => 'time', oprright => 'time', oprresult => 'bool', + oprcom => '>(time,time)', oprnegate => '>=(time,time)', oprcode => 'time_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1111', descr => 'less than or equal', + oprname => '<=', oprleft => 'time', oprright => 'time', oprresult => 'bool', + oprcom => '>=(time,time)', oprnegate => '>(time,time)', oprcode => 'time_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1112', descr => 'greater than', + oprname => '>', oprleft => 'time', oprright => 'time', oprresult => 'bool', + oprcom => '<(time,time)', oprnegate => '<=(time,time)', oprcode => 'time_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1113', descr => 'greater than or equal', + oprname => '>=', oprleft => 'time', oprright => 'time', oprresult => 'bool', + oprcom => '<=(time,time)', oprnegate => '<(time,time)', oprcode => 'time_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +# timetz operators +{ oid => '1550', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'timetz', + oprright => 'timetz', oprresult => 'bool', oprcom => '=(timetz,timetz)', + oprnegate => '<>(timetz,timetz)', oprcode => 'timetz_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '1551', descr => 'not equal', + oprname => '<>', oprleft => 'timetz', oprright => 'timetz', + oprresult => 'bool', oprcom => '<>(timetz,timetz)', + oprnegate => '=(timetz,timetz)', oprcode => 'timetz_ne', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, +{ oid => '1552', descr => 'less than', + oprname => '<', oprleft => 'timetz', oprright => 'timetz', + oprresult => 'bool', oprcom => '>(timetz,timetz)', + oprnegate => '>=(timetz,timetz)', oprcode => 'timetz_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1553', descr => 'less than or equal', + oprname => '<=', oprleft => 'timetz', oprright => 'timetz', + oprresult => 'bool', oprcom => '>=(timetz,timetz)', + oprnegate => '>(timetz,timetz)', oprcode => 'timetz_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1554', descr => 'greater than', + oprname => '>', oprleft => 'timetz', oprright => 'timetz', + oprresult => 'bool', oprcom => '<(timetz,timetz)', + oprnegate => '<=(timetz,timetz)', oprcode => 'timetz_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1555', descr => 'greater than or equal', + oprname => '>=', oprleft => 'timetz', oprright => 'timetz', + oprresult => 'bool', oprcom => '<=(timetz,timetz)', + oprnegate => '<(timetz,timetz)', oprcode => 'timetz_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +# float48 operators +{ oid => '1116', descr => 'add', + oprname => '+', oprleft => 'float4', oprright => 'float8', + oprresult => 'float8', oprcom => '+(float8,float4)', oprcode => 'float48pl' }, +{ oid => '1117', descr => 'subtract', + oprname => '-', oprleft => 'float4', oprright => 'float8', + oprresult => 'float8', oprcode => 'float48mi' }, +{ oid => '1118', descr => 'divide', + oprname => '/', oprleft => 'float4', oprright => 'float8', + oprresult => 'float8', oprcode => 'float48div' }, +{ oid => '1119', descr => 'multiply', + oprname => '*', oprleft => 'float4', oprright => 'float8', + oprresult => 'float8', oprcom => '*(float8,float4)', + oprcode => 'float48mul' }, +{ oid => '1120', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'float4', + oprright => 'float8', oprresult => 'bool', oprcom => '=(float8,float4)', + oprnegate => '<>(float4,float8)', oprcode => 'float48eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '1121', descr => 'not equal', + oprname => '<>', oprleft => 'float4', oprright => 'float8', + oprresult => 'bool', oprcom => '<>(float8,float4)', + oprnegate => '=(float4,float8)', oprcode => 'float48ne', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, +{ oid => '1122', descr => 'less than', + oprname => '<', oprleft => 'float4', oprright => 'float8', + oprresult => 'bool', oprcom => '>(float8,float4)', + oprnegate => '>=(float4,float8)', oprcode => 'float48lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1123', descr => 'greater than', + oprname => '>', oprleft => 'float4', oprright => 'float8', + oprresult => 'bool', oprcom => '<(float8,float4)', + oprnegate => '<=(float4,float8)', oprcode => 'float48gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1124', descr => 'less than or equal', + oprname => '<=', oprleft => 'float4', oprright => 'float8', + oprresult => 'bool', oprcom => '>=(float8,float4)', + oprnegate => '>(float4,float8)', oprcode => 'float48le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1125', descr => 'greater than or equal', + oprname => '>=', oprleft => 'float4', oprright => 'float8', + oprresult => 'bool', oprcom => '<=(float8,float4)', + oprnegate => '<(float4,float8)', oprcode => 'float48ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +# float84 operators +{ oid => '1126', descr => 'add', + oprname => '+', oprleft => 'float8', oprright => 'float4', + oprresult => 'float8', oprcom => '+(float4,float8)', oprcode => 'float84pl' }, +{ oid => '1127', descr => 'subtract', + oprname => '-', oprleft => 'float8', oprright => 'float4', + oprresult => 'float8', oprcode => 'float84mi' }, +{ oid => '1128', descr => 'divide', + oprname => '/', oprleft => 'float8', oprright => 'float4', + oprresult => 'float8', oprcode => 'float84div' }, +{ oid => '1129', descr => 'multiply', + oprname => '*', oprleft => 'float8', oprright => 'float4', + oprresult => 'float8', oprcom => '*(float4,float8)', + oprcode => 'float84mul' }, +{ oid => '1130', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'float8', + oprright => 'float4', oprresult => 'bool', oprcom => '=(float4,float8)', + oprnegate => '<>(float8,float4)', oprcode => 'float84eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '1131', descr => 'not equal', + oprname => '<>', oprleft => 'float8', oprright => 'float4', + oprresult => 'bool', oprcom => '<>(float4,float8)', + oprnegate => '=(float8,float4)', oprcode => 'float84ne', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, +{ oid => '1132', descr => 'less than', + oprname => '<', oprleft => 'float8', oprright => 'float4', + oprresult => 'bool', oprcom => '>(float4,float8)', + oprnegate => '>=(float8,float4)', oprcode => 'float84lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1133', descr => 'greater than', + oprname => '>', oprleft => 'float8', oprright => 'float4', + oprresult => 'bool', oprcom => '<(float4,float8)', + oprnegate => '<=(float8,float4)', oprcode => 'float84gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1134', descr => 'less than or equal', + oprname => '<=', oprleft => 'float8', oprright => 'float4', + oprresult => 'bool', oprcom => '>=(float4,float8)', + oprnegate => '>(float8,float4)', oprcode => 'float84le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1135', descr => 'greater than or equal', + oprname => '>=', oprleft => 'float8', oprright => 'float4', + oprresult => 'bool', oprcom => '<=(float4,float8)', + oprnegate => '<(float8,float4)', oprcode => 'float84ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +# LIKE hacks by Keith Parks. +{ oid => '1207', oid_symbol => 'OID_NAME_LIKE_OP', + descr => 'matches LIKE expression', + oprname => '~~', oprleft => 'name', oprright => 'text', oprresult => 'bool', + oprnegate => '!~~(name,text)', oprcode => 'namelike', oprrest => 'likesel', + oprjoin => 'likejoinsel' }, +{ oid => '1208', descr => 'does not match LIKE expression', + oprname => '!~~', oprleft => 'name', oprright => 'text', oprresult => 'bool', + oprnegate => '~~(name,text)', oprcode => 'namenlike', oprrest => 'nlikesel', + oprjoin => 'nlikejoinsel' }, +{ oid => '1209', oid_symbol => 'OID_TEXT_LIKE_OP', + descr => 'matches LIKE expression', + oprname => '~~', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprnegate => '!~~(text,text)', oprcode => 'textlike', oprrest => 'likesel', + oprjoin => 'likejoinsel' }, +{ oid => '1210', descr => 'does not match LIKE expression', + oprname => '!~~', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprnegate => '~~(text,text)', oprcode => 'textnlike', oprrest => 'nlikesel', + oprjoin => 'nlikejoinsel' }, +{ oid => '1211', oid_symbol => 'OID_BPCHAR_LIKE_OP', + descr => 'matches LIKE expression', + oprname => '~~', oprleft => 'bpchar', oprright => 'text', oprresult => 'bool', + oprnegate => '!~~(bpchar,text)', oprcode => 'bpcharlike', + oprrest => 'likesel', oprjoin => 'likejoinsel' }, +{ oid => '1212', descr => 'does not match LIKE expression', + oprname => '!~~', oprleft => 'bpchar', oprright => 'text', + oprresult => 'bool', oprnegate => '~~(bpchar,text)', oprcode => 'bpcharnlike', + oprrest => 'nlikesel', oprjoin => 'nlikejoinsel' }, + +# case-insensitive regex hacks +{ oid => '1226', oid_symbol => 'OID_NAME_ICREGEXEQ_OP', + descr => 'matches regular expression, case-insensitive', + oprname => '~*', oprleft => 'name', oprright => 'text', oprresult => 'bool', + oprnegate => '!~*(name,text)', oprcode => 'nameicregexeq', + oprrest => 'icregexeqsel', oprjoin => 'icregexeqjoinsel' }, +{ oid => '1227', + descr => 'does not match regular expression, case-insensitive', + oprname => '!~*', oprleft => 'name', oprright => 'text', oprresult => 'bool', + oprnegate => '~*(name,text)', oprcode => 'nameicregexne', + oprrest => 'icregexnesel', oprjoin => 'icregexnejoinsel' }, +{ oid => '1228', oid_symbol => 'OID_TEXT_ICREGEXEQ_OP', + descr => 'matches regular expression, case-insensitive', + oprname => '~*', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprnegate => '!~*(text,text)', oprcode => 'texticregexeq', + oprrest => 'icregexeqsel', oprjoin => 'icregexeqjoinsel' }, +{ oid => '1229', + descr => 'does not match regular expression, case-insensitive', + oprname => '!~*', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprnegate => '~*(text,text)', oprcode => 'texticregexne', + oprrest => 'icregexnesel', oprjoin => 'icregexnejoinsel' }, +{ oid => '1234', oid_symbol => 'OID_BPCHAR_ICREGEXEQ_OP', + descr => 'matches regular expression, case-insensitive', + oprname => '~*', oprleft => 'bpchar', oprright => 'text', oprresult => 'bool', + oprnegate => '!~*(bpchar,text)', oprcode => 'bpcharicregexeq', + oprrest => 'icregexeqsel', oprjoin => 'icregexeqjoinsel' }, +{ oid => '1235', + descr => 'does not match regular expression, case-insensitive', + oprname => '!~*', oprleft => 'bpchar', oprright => 'text', + oprresult => 'bool', oprnegate => '~*(bpchar,text)', + oprcode => 'bpcharicregexne', oprrest => 'icregexnesel', + oprjoin => 'icregexnejoinsel' }, + +# timestamptz operators +{ oid => '1320', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', + oprleft => 'timestamptz', oprright => 'timestamptz', oprresult => 'bool', + oprcom => '=(timestamptz,timestamptz)', + oprnegate => '<>(timestamptz,timestamptz)', oprcode => 'timestamptz_eq', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '1321', descr => 'not equal', + oprname => '<>', oprleft => 'timestamptz', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '<>(timestamptz,timestamptz)', + oprnegate => '=(timestamptz,timestamptz)', oprcode => 'timestamptz_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '1322', descr => 'less than', + oprname => '<', oprleft => 'timestamptz', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '>(timestamptz,timestamptz)', + oprnegate => '>=(timestamptz,timestamptz)', oprcode => 'timestamptz_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1323', descr => 'less than or equal', + oprname => '<=', oprleft => 'timestamptz', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '>=(timestamptz,timestamptz)', + oprnegate => '>(timestamptz,timestamptz)', oprcode => 'timestamptz_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1324', descr => 'greater than', + oprname => '>', oprleft => 'timestamptz', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '<(timestamptz,timestamptz)', + oprnegate => '<=(timestamptz,timestamptz)', oprcode => 'timestamptz_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1325', descr => 'greater than or equal', + oprname => '>=', oprleft => 'timestamptz', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '<=(timestamptz,timestamptz)', + oprnegate => '<(timestamptz,timestamptz)', oprcode => 'timestamptz_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '1327', descr => 'add', + oprname => '+', oprleft => 'timestamptz', oprright => 'interval', + oprresult => 'timestamptz', oprcom => '+(interval,timestamptz)', + oprcode => 'timestamptz_pl_interval' }, +{ oid => '1328', descr => 'subtract', + oprname => '-', oprleft => 'timestamptz', oprright => 'timestamptz', + oprresult => 'interval', oprcode => 'timestamptz_mi' }, +{ oid => '1329', descr => 'subtract', + oprname => '-', oprleft => 'timestamptz', oprright => 'interval', + oprresult => 'timestamptz', oprcode => 'timestamptz_mi_interval' }, + +# interval operators +{ oid => '1330', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'interval', + oprright => 'interval', oprresult => 'bool', oprcom => '=(interval,interval)', + oprnegate => '<>(interval,interval)', oprcode => 'interval_eq', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '1331', descr => 'not equal', + oprname => '<>', oprleft => 'interval', oprright => 'interval', + oprresult => 'bool', oprcom => '<>(interval,interval)', + oprnegate => '=(interval,interval)', oprcode => 'interval_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '1332', descr => 'less than', + oprname => '<', oprleft => 'interval', oprright => 'interval', + oprresult => 'bool', oprcom => '>(interval,interval)', + oprnegate => '>=(interval,interval)', oprcode => 'interval_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1333', descr => 'less than or equal', + oprname => '<=', oprleft => 'interval', oprright => 'interval', + oprresult => 'bool', oprcom => '>=(interval,interval)', + oprnegate => '>(interval,interval)', oprcode => 'interval_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1334', descr => 'greater than', + oprname => '>', oprleft => 'interval', oprright => 'interval', + oprresult => 'bool', oprcom => '<(interval,interval)', + oprnegate => '<=(interval,interval)', oprcode => 'interval_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1335', descr => 'greater than or equal', + oprname => '>=', oprleft => 'interval', oprright => 'interval', + oprresult => 'bool', oprcom => '<=(interval,interval)', + oprnegate => '<(interval,interval)', oprcode => 'interval_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '1336', descr => 'negate', + oprname => '-', oprkind => 'l', oprleft => '0', oprright => 'interval', + oprresult => 'interval', oprcode => 'interval_um' }, +{ oid => '1337', descr => 'add', + oprname => '+', oprleft => 'interval', oprright => 'interval', + oprresult => 'interval', oprcom => '+(interval,interval)', + oprcode => 'interval_pl' }, +{ oid => '1338', descr => 'subtract', + oprname => '-', oprleft => 'interval', oprright => 'interval', + oprresult => 'interval', oprcode => 'interval_mi' }, + +{ oid => '1360', descr => 'convert date and time to timestamp', + oprname => '+', oprleft => 'date', oprright => 'time', + oprresult => 'timestamp', oprcom => '+(time,date)', + oprcode => 'datetime_pl' }, +{ oid => '1361', + descr => 'convert date and time with time zone to timestamp with time zone', + oprname => '+', oprleft => 'date', oprright => 'timetz', + oprresult => 'timestamptz', oprcom => '+(timetz,date)', + oprcode => 'datetimetz_pl' }, +{ oid => '1363', descr => 'convert time and date to timestamp', + oprname => '+', oprleft => 'time', oprright => 'date', + oprresult => 'timestamp', oprcom => '+(date,time)', + oprcode => 'timedate_pl' }, +{ oid => '1366', + descr => 'convert time with time zone and date to timestamp with time zone', + oprname => '+', oprleft => 'timetz', oprright => 'date', + oprresult => 'timestamptz', oprcom => '+(date,timetz)', + oprcode => 'timetzdate_pl' }, + +{ oid => '1399', descr => 'subtract', + oprname => '-', oprleft => 'time', oprright => 'time', + oprresult => 'interval', oprcode => 'time_mi_time' }, + +# additional geometric operators - thomas 97/04/18 +{ oid => '1420', descr => 'center of', + oprname => '@@', oprkind => 'l', oprleft => '0', oprright => 'circle', + oprresult => 'point', oprcode => 'circle_center' }, +{ oid => '1500', descr => 'equal by area', + oprname => '=', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcom => '=(circle,circle)', + oprnegate => '<>(circle,circle)', oprcode => 'circle_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '1501', descr => 'not equal by area', + oprname => '<>', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcom => '<>(circle,circle)', + oprnegate => '=(circle,circle)', oprcode => 'circle_ne', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, +{ oid => '1502', descr => 'less than by area', + oprname => '<', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcom => '>(circle,circle)', + oprnegate => '>=(circle,circle)', oprcode => 'circle_lt', + oprrest => 'areasel', oprjoin => 'areajoinsel' }, +{ oid => '1503', descr => 'greater than by area', + oprname => '>', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcom => '<(circle,circle)', + oprnegate => '<=(circle,circle)', oprcode => 'circle_gt', + oprrest => 'areasel', oprjoin => 'areajoinsel' }, +{ oid => '1504', descr => 'less than or equal by area', + oprname => '<=', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcom => '>=(circle,circle)', + oprnegate => '>(circle,circle)', oprcode => 'circle_le', oprrest => 'areasel', + oprjoin => 'areajoinsel' }, +{ oid => '1505', descr => 'greater than or equal by area', + oprname => '>=', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcom => '<=(circle,circle)', + oprnegate => '<(circle,circle)', oprcode => 'circle_ge', oprrest => 'areasel', + oprjoin => 'areajoinsel' }, + +{ oid => '1506', descr => 'is left of', + oprname => '<<', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcode => 'circle_left', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '1507', descr => 'overlaps or is left of', + oprname => '&<', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcode => 'circle_overleft', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '1508', descr => 'overlaps or is right of', + oprname => '&>', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcode => 'circle_overright', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '1509', descr => 'is right of', + oprname => '>>', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcode => 'circle_right', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '1510', descr => 'is contained by', + oprname => '<@', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcom => '@>(circle,circle)', + oprcode => 'circle_contained', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, +{ oid => '1511', descr => 'contains', + oprname => '@>', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcom => '<@(circle,circle)', + oprcode => 'circle_contain', oprrest => 'contsel', oprjoin => 'contjoinsel' }, +{ oid => '1512', descr => 'same as', + oprname => '~=', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcom => '~=(circle,circle)', oprcode => 'circle_same', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '1513', descr => 'overlaps', + oprname => '&&', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcom => '&&(circle,circle)', + oprcode => 'circle_overlap', oprrest => 'areasel', oprjoin => 'areajoinsel' }, +{ oid => '1514', descr => 'is above', + oprname => '|>>', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcode => 'circle_above', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '1515', descr => 'is below', + oprname => '<<|', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcode => 'circle_below', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, + +{ oid => '1516', descr => 'add', + oprname => '+', oprleft => 'circle', oprright => 'point', + oprresult => 'circle', oprcode => 'circle_add_pt' }, +{ oid => '1517', descr => 'subtract', + oprname => '-', oprleft => 'circle', oprright => 'point', + oprresult => 'circle', oprcode => 'circle_sub_pt' }, +{ oid => '1518', descr => 'multiply', + oprname => '*', oprleft => 'circle', oprright => 'point', + oprresult => 'circle', oprcode => 'circle_mul_pt' }, +{ oid => '1519', descr => 'divide', + oprname => '/', oprleft => 'circle', oprright => 'point', + oprresult => 'circle', oprcode => 'circle_div_pt' }, + +{ oid => '1520', descr => 'distance between', + oprname => '<->', oprleft => 'circle', oprright => 'circle', + oprresult => 'float8', oprcom => '<->(circle,circle)', + oprcode => 'circle_distance' }, +{ oid => '1521', descr => 'number of points', + oprname => '#', oprkind => 'l', oprleft => '0', oprright => 'polygon', + oprresult => 'int4', oprcode => 'poly_npoints' }, +{ oid => '1522', descr => 'distance between', + oprname => '<->', oprleft => 'point', oprright => 'circle', + oprresult => 'float8', oprcom => '<->(circle,point)', oprcode => 'dist_pc' }, +{ oid => '3291', descr => 'distance between', + oprname => '<->', oprleft => 'circle', oprright => 'point', + oprresult => 'float8', oprcom => '<->(point,circle)', + oprcode => 'dist_cpoint' }, +{ oid => '3276', descr => 'distance between', + oprname => '<->', oprleft => 'point', oprright => 'polygon', + oprresult => 'float8', oprcom => '<->(polygon,point)', + oprcode => 'dist_ppoly' }, +{ oid => '3289', descr => 'distance between', + oprname => '<->', oprleft => 'polygon', oprright => 'point', + oprresult => 'float8', oprcom => '<->(point,polygon)', + oprcode => 'dist_polyp' }, +{ oid => '1523', descr => 'distance between', + oprname => '<->', oprleft => 'circle', oprright => 'polygon', + oprresult => 'float8', oprcode => 'dist_cpoly' }, + +# additional geometric operators - thomas 1997-07-09 +{ oid => '1524', descr => 'distance between', + oprname => '<->', oprleft => 'line', oprright => 'box', oprresult => 'float8', + oprcode => 'dist_lb' }, + +{ oid => '1525', descr => 'intersect', + oprname => '?#', oprleft => 'lseg', oprright => 'lseg', oprresult => 'bool', + oprcom => '?#(lseg,lseg)', oprcode => 'lseg_intersect' }, +{ oid => '1526', descr => 'parallel', + oprname => '?||', oprleft => 'lseg', oprright => 'lseg', oprresult => 'bool', + oprcom => '?||(lseg,lseg)', oprcode => 'lseg_parallel' }, +{ oid => '1527', descr => 'perpendicular', + oprname => '?-|', oprleft => 'lseg', oprright => 'lseg', oprresult => 'bool', + oprcom => '?-|(lseg,lseg)', oprcode => 'lseg_perp' }, +{ oid => '1528', descr => 'horizontal', + oprname => '?-', oprkind => 'l', oprleft => '0', oprright => 'lseg', + oprresult => 'bool', oprcode => 'lseg_horizontal' }, +{ oid => '1529', descr => 'vertical', + oprname => '?|', oprkind => 'l', oprleft => '0', oprright => 'lseg', + oprresult => 'bool', oprcode => 'lseg_vertical' }, +{ oid => '1535', descr => 'equal', + oprname => '=', oprleft => 'lseg', oprright => 'lseg', oprresult => 'bool', + oprcom => '=(lseg,lseg)', oprnegate => '<>(lseg,lseg)', oprcode => 'lseg_eq', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '1536', descr => 'intersection point', + oprname => '#', oprleft => 'lseg', oprright => 'lseg', oprresult => 'point', + oprcom => '#(lseg,lseg)', oprcode => 'lseg_interpt' }, +{ oid => '1537', descr => 'intersect', + oprname => '?#', oprleft => 'lseg', oprright => 'line', oprresult => 'bool', + oprcode => 'inter_sl' }, +{ oid => '1538', descr => 'intersect', + oprname => '?#', oprleft => 'lseg', oprright => 'box', oprresult => 'bool', + oprcode => 'inter_sb' }, +{ oid => '1539', descr => 'intersect', + oprname => '?#', oprleft => 'line', oprright => 'box', oprresult => 'bool', + oprcode => 'inter_lb' }, + +{ oid => '1546', descr => 'point on line', + oprname => '<@', oprleft => 'point', oprright => 'line', oprresult => 'bool', + oprcode => 'on_pl' }, +{ oid => '1547', descr => 'is contained by', + oprname => '<@', oprleft => 'point', oprright => 'lseg', oprresult => 'bool', + oprcode => 'on_ps' }, +{ oid => '1548', descr => 'lseg on line', + oprname => '<@', oprleft => 'lseg', oprright => 'line', oprresult => 'bool', + oprcode => 'on_sl' }, +{ oid => '1549', descr => 'is contained by', + oprname => '<@', oprleft => 'lseg', oprright => 'box', oprresult => 'bool', + oprcode => 'on_sb' }, + +{ oid => '1557', descr => 'closest point to A on B', + oprname => '##', oprleft => 'point', oprright => 'line', oprresult => 'point', + oprcode => 'close_pl' }, +{ oid => '1558', descr => 'closest point to A on B', + oprname => '##', oprleft => 'point', oprright => 'lseg', oprresult => 'point', + oprcode => 'close_ps' }, +{ oid => '1559', descr => 'closest point to A on B', + oprname => '##', oprleft => 'point', oprright => 'box', oprresult => 'point', + oprcode => 'close_pb' }, + +{ oid => '1566', descr => 'closest point to A on B', + oprname => '##', oprleft => 'lseg', oprright => 'line', oprresult => 'point', + oprcode => 'close_sl' }, +{ oid => '1567', descr => 'closest point to A on B', + oprname => '##', oprleft => 'lseg', oprright => 'box', oprresult => 'point', + oprcode => 'close_sb' }, +{ oid => '1568', descr => 'closest point to A on B', + oprname => '##', oprleft => 'line', oprright => 'box', oprresult => 'point', + oprcode => 'close_lb' }, +{ oid => '1577', descr => 'closest point to A on B', + oprname => '##', oprleft => 'line', oprright => 'lseg', oprresult => 'point', + oprcode => 'close_ls' }, +{ oid => '1578', descr => 'closest point to A on B', + oprname => '##', oprleft => 'lseg', oprright => 'lseg', oprresult => 'point', + oprcode => 'close_lseg' }, +{ oid => '1583', descr => 'multiply', + oprname => '*', oprleft => 'interval', oprright => 'float8', + oprresult => 'interval', oprcom => '*(float8,interval)', + oprcode => 'interval_mul' }, +{ oid => '1584', descr => 'multiply', + oprname => '*', oprleft => 'float8', oprright => 'interval', + oprresult => 'interval', oprcom => '*(interval,float8)', + oprcode => 'mul_d_interval' }, +{ oid => '1585', descr => 'divide', + oprname => '/', oprleft => 'interval', oprright => 'float8', + oprresult => 'interval', oprcode => 'interval_div' }, + +{ oid => '1586', descr => 'not equal', + oprname => '<>', oprleft => 'lseg', oprright => 'lseg', oprresult => 'bool', + oprcom => '<>(lseg,lseg)', oprnegate => '=(lseg,lseg)', oprcode => 'lseg_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '1587', descr => 'less than by length', + oprname => '<', oprleft => 'lseg', oprright => 'lseg', oprresult => 'bool', + oprcom => '>(lseg,lseg)', oprnegate => '>=(lseg,lseg)', + oprcode => 'lseg_lt' }, +{ oid => '1588', descr => 'less than or equal by length', + oprname => '<=', oprleft => 'lseg', oprright => 'lseg', oprresult => 'bool', + oprcom => '>=(lseg,lseg)', oprnegate => '>(lseg,lseg)', + oprcode => 'lseg_le' }, +{ oid => '1589', descr => 'greater than by length', + oprname => '>', oprleft => 'lseg', oprright => 'lseg', oprresult => 'bool', + oprcom => '<(lseg,lseg)', oprnegate => '<=(lseg,lseg)', + oprcode => 'lseg_gt' }, +{ oid => '1590', descr => 'greater than or equal by length', + oprname => '>=', oprleft => 'lseg', oprright => 'lseg', oprresult => 'bool', + oprcom => '<=(lseg,lseg)', oprnegate => '<(lseg,lseg)', + oprcode => 'lseg_ge' }, + +{ oid => '1591', descr => 'distance between endpoints', + oprname => '@-@', oprkind => 'l', oprleft => '0', oprright => 'lseg', + oprresult => 'float8', oprcode => 'lseg_length' }, + +{ oid => '1611', descr => 'intersect', + oprname => '?#', oprleft => 'line', oprright => 'line', oprresult => 'bool', + oprcom => '?#(line,line)', oprcode => 'line_intersect' }, +{ oid => '1612', descr => 'parallel', + oprname => '?||', oprleft => 'line', oprright => 'line', oprresult => 'bool', + oprcom => '?||(line,line)', oprcode => 'line_parallel' }, +{ oid => '1613', descr => 'perpendicular', + oprname => '?-|', oprleft => 'line', oprright => 'line', oprresult => 'bool', + oprcom => '?-|(line,line)', oprcode => 'line_perp' }, +{ oid => '1614', descr => 'horizontal', + oprname => '?-', oprkind => 'l', oprleft => '0', oprright => 'line', + oprresult => 'bool', oprcode => 'line_horizontal' }, +{ oid => '1615', descr => 'vertical', + oprname => '?|', oprkind => 'l', oprleft => '0', oprright => 'line', + oprresult => 'bool', oprcode => 'line_vertical' }, +{ oid => '1616', descr => 'equal', + oprname => '=', oprleft => 'line', oprright => 'line', oprresult => 'bool', + oprcom => '=(line,line)', oprcode => 'line_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '1617', descr => 'intersection point', + oprname => '#', oprleft => 'line', oprright => 'line', oprresult => 'point', + oprcom => '#(line,line)', oprcode => 'line_interpt' }, + +# MACADDR type +{ oid => '1220', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'macaddr', + oprright => 'macaddr', oprresult => 'bool', oprcom => '=(macaddr,macaddr)', + oprnegate => '<>(macaddr,macaddr)', oprcode => 'macaddr_eq', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '1221', descr => 'not equal', + oprname => '<>', oprleft => 'macaddr', oprright => 'macaddr', + oprresult => 'bool', oprcom => '<>(macaddr,macaddr)', + oprnegate => '=(macaddr,macaddr)', oprcode => 'macaddr_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '1222', descr => 'less than', + oprname => '<', oprleft => 'macaddr', oprright => 'macaddr', + oprresult => 'bool', oprcom => '>(macaddr,macaddr)', + oprnegate => '>=(macaddr,macaddr)', oprcode => 'macaddr_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1223', descr => 'less than or equal', + oprname => '<=', oprleft => 'macaddr', oprright => 'macaddr', + oprresult => 'bool', oprcom => '>=(macaddr,macaddr)', + oprnegate => '>(macaddr,macaddr)', oprcode => 'macaddr_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1224', descr => 'greater than', + oprname => '>', oprleft => 'macaddr', oprright => 'macaddr', + oprresult => 'bool', oprcom => '<(macaddr,macaddr)', + oprnegate => '<=(macaddr,macaddr)', oprcode => 'macaddr_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1225', descr => 'greater than or equal', + oprname => '>=', oprleft => 'macaddr', oprright => 'macaddr', + oprresult => 'bool', oprcom => '<=(macaddr,macaddr)', + oprnegate => '<(macaddr,macaddr)', oprcode => 'macaddr_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '3147', descr => 'bitwise not', + oprname => '~', oprkind => 'l', oprleft => '0', oprright => 'macaddr', + oprresult => 'macaddr', oprcode => 'macaddr_not' }, +{ oid => '3148', descr => 'bitwise and', + oprname => '&', oprleft => 'macaddr', oprright => 'macaddr', + oprresult => 'macaddr', oprcode => 'macaddr_and' }, +{ oid => '3149', descr => 'bitwise or', + oprname => '|', oprleft => 'macaddr', oprright => 'macaddr', + oprresult => 'macaddr', oprcode => 'macaddr_or' }, + +# MACADDR8 type +{ oid => '3362', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'macaddr8', + oprright => 'macaddr8', oprresult => 'bool', oprcom => '=(macaddr8,macaddr8)', + oprnegate => '<>(macaddr8,macaddr8)', oprcode => 'macaddr8_eq', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '3363', descr => 'not equal', + oprname => '<>', oprleft => 'macaddr8', oprright => 'macaddr8', + oprresult => 'bool', oprcom => '<>(macaddr8,macaddr8)', + oprnegate => '=(macaddr8,macaddr8)', oprcode => 'macaddr8_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '3364', descr => 'less than', + oprname => '<', oprleft => 'macaddr8', oprright => 'macaddr8', + oprresult => 'bool', oprcom => '>(macaddr8,macaddr8)', + oprnegate => '>=(macaddr8,macaddr8)', oprcode => 'macaddr8_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '3365', descr => 'less than or equal', + oprname => '<=', oprleft => 'macaddr8', oprright => 'macaddr8', + oprresult => 'bool', oprcom => '>=(macaddr8,macaddr8)', + oprnegate => '>(macaddr8,macaddr8)', oprcode => 'macaddr8_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '3366', descr => 'greater than', + oprname => '>', oprleft => 'macaddr8', oprright => 'macaddr8', + oprresult => 'bool', oprcom => '<(macaddr8,macaddr8)', + oprnegate => '<=(macaddr8,macaddr8)', oprcode => 'macaddr8_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '3367', descr => 'greater than or equal', + oprname => '>=', oprleft => 'macaddr8', oprright => 'macaddr8', + oprresult => 'bool', oprcom => '<=(macaddr8,macaddr8)', + oprnegate => '<(macaddr8,macaddr8)', oprcode => 'macaddr8_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '3368', descr => 'bitwise not', + oprname => '~', oprkind => 'l', oprleft => '0', oprright => 'macaddr8', + oprresult => 'macaddr8', oprcode => 'macaddr8_not' }, +{ oid => '3369', descr => 'bitwise and', + oprname => '&', oprleft => 'macaddr8', oprright => 'macaddr8', + oprresult => 'macaddr8', oprcode => 'macaddr8_and' }, +{ oid => '3370', descr => 'bitwise or', + oprname => '|', oprleft => 'macaddr8', oprright => 'macaddr8', + oprresult => 'macaddr8', oprcode => 'macaddr8_or' }, + +# INET type (these also support CIDR via implicit cast) +{ oid => '1201', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'inet', + oprright => 'inet', oprresult => 'bool', oprcom => '=(inet,inet)', + oprnegate => '<>(inet,inet)', oprcode => 'network_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '1202', descr => 'not equal', + oprname => '<>', oprleft => 'inet', oprright => 'inet', oprresult => 'bool', + oprcom => '<>(inet,inet)', oprnegate => '=(inet,inet)', + oprcode => 'network_ne', oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '1203', descr => 'less than', + oprname => '<', oprleft => 'inet', oprright => 'inet', oprresult => 'bool', + oprcom => '>(inet,inet)', oprnegate => '>=(inet,inet)', + oprcode => 'network_lt', oprrest => 'scalarltsel', + oprjoin => 'scalarltjoinsel' }, +{ oid => '1204', descr => 'less than or equal', + oprname => '<=', oprleft => 'inet', oprright => 'inet', oprresult => 'bool', + oprcom => '>=(inet,inet)', oprnegate => '>(inet,inet)', + oprcode => 'network_le', oprrest => 'scalarlesel', + oprjoin => 'scalarlejoinsel' }, +{ oid => '1205', descr => 'greater than', + oprname => '>', oprleft => 'inet', oprright => 'inet', oprresult => 'bool', + oprcom => '<(inet,inet)', oprnegate => '<=(inet,inet)', + oprcode => 'network_gt', oprrest => 'scalargtsel', + oprjoin => 'scalargtjoinsel' }, +{ oid => '1206', descr => 'greater than or equal', + oprname => '>=', oprleft => 'inet', oprright => 'inet', oprresult => 'bool', + oprcom => '<=(inet,inet)', oprnegate => '<(inet,inet)', + oprcode => 'network_ge', oprrest => 'scalargesel', + oprjoin => 'scalargejoinsel' }, +{ oid => '931', oid_symbol => 'OID_INET_SUB_OP', descr => 'is subnet', + oprname => '<<', oprleft => 'inet', oprright => 'inet', oprresult => 'bool', + oprcom => '>>(inet,inet)', oprcode => 'network_sub', oprrest => 'networksel', + oprjoin => 'networkjoinsel' }, +{ oid => '932', oid_symbol => 'OID_INET_SUBEQ_OP', + descr => 'is subnet or equal', + oprname => '<<=', oprleft => 'inet', oprright => 'inet', oprresult => 'bool', + oprcom => '>>=(inet,inet)', oprcode => 'network_subeq', + oprrest => 'networksel', oprjoin => 'networkjoinsel' }, +{ oid => '933', oid_symbol => 'OID_INET_SUP_OP', descr => 'is supernet', + oprname => '>>', oprleft => 'inet', oprright => 'inet', oprresult => 'bool', + oprcom => '<<(inet,inet)', oprcode => 'network_sup', oprrest => 'networksel', + oprjoin => 'networkjoinsel' }, +{ oid => '934', oid_symbol => 'OID_INET_SUPEQ_OP', + descr => 'is supernet or equal', + oprname => '>>=', oprleft => 'inet', oprright => 'inet', oprresult => 'bool', + oprcom => '<<=(inet,inet)', oprcode => 'network_supeq', + oprrest => 'networksel', oprjoin => 'networkjoinsel' }, +{ oid => '3552', oid_symbol => 'OID_INET_OVERLAP_OP', + descr => 'overlaps (is subnet or supernet)', + oprname => '&&', oprleft => 'inet', oprright => 'inet', oprresult => 'bool', + oprcom => '&&(inet,inet)', oprcode => 'network_overlap', + oprrest => 'networksel', oprjoin => 'networkjoinsel' }, + +{ oid => '2634', descr => 'bitwise not', + oprname => '~', oprkind => 'l', oprleft => '0', oprright => 'inet', + oprresult => 'inet', oprcode => 'inetnot' }, +{ oid => '2635', descr => 'bitwise and', + oprname => '&', oprleft => 'inet', oprright => 'inet', oprresult => 'inet', + oprcode => 'inetand' }, +{ oid => '2636', descr => 'bitwise or', + oprname => '|', oprleft => 'inet', oprright => 'inet', oprresult => 'inet', + oprcode => 'inetor' }, +{ oid => '2637', descr => 'add', + oprname => '+', oprleft => 'inet', oprright => 'int8', oprresult => 'inet', + oprcom => '+(int8,inet)', oprcode => 'inetpl' }, +{ oid => '2638', descr => 'add', + oprname => '+', oprleft => 'int8', oprright => 'inet', oprresult => 'inet', + oprcom => '+(inet,int8)', oprcode => 'int8pl_inet' }, +{ oid => '2639', descr => 'subtract', + oprname => '-', oprleft => 'inet', oprright => 'int8', oprresult => 'inet', + oprcode => 'inetmi_int8' }, +{ oid => '2640', descr => 'subtract', + oprname => '-', oprleft => 'inet', oprright => 'inet', oprresult => 'int8', + oprcode => 'inetmi' }, + +# case-insensitive LIKE hacks +{ oid => '1625', oid_symbol => 'OID_NAME_ICLIKE_OP', + descr => 'matches LIKE expression, case-insensitive', + oprname => '~~*', oprleft => 'name', oprright => 'text', oprresult => 'bool', + oprnegate => '!~~*(name,text)', oprcode => 'nameiclike', + oprrest => 'iclikesel', oprjoin => 'iclikejoinsel' }, +{ oid => '1626', descr => 'does not match LIKE expression, case-insensitive', + oprname => '!~~*', oprleft => 'name', oprright => 'text', oprresult => 'bool', + oprnegate => '~~*(name,text)', oprcode => 'nameicnlike', + oprrest => 'icnlikesel', oprjoin => 'icnlikejoinsel' }, +{ oid => '1627', oid_symbol => 'OID_TEXT_ICLIKE_OP', + descr => 'matches LIKE expression, case-insensitive', + oprname => '~~*', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprnegate => '!~~*(text,text)', oprcode => 'texticlike', + oprrest => 'iclikesel', oprjoin => 'iclikejoinsel' }, +{ oid => '1628', descr => 'does not match LIKE expression, case-insensitive', + oprname => '!~~*', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprnegate => '~~*(text,text)', oprcode => 'texticnlike', + oprrest => 'icnlikesel', oprjoin => 'icnlikejoinsel' }, +{ oid => '1629', oid_symbol => 'OID_BPCHAR_ICLIKE_OP', + descr => 'matches LIKE expression, case-insensitive', + oprname => '~~*', oprleft => 'bpchar', oprright => 'text', + oprresult => 'bool', oprnegate => '!~~*(bpchar,text)', + oprcode => 'bpchariclike', oprrest => 'iclikesel', + oprjoin => 'iclikejoinsel' }, +{ oid => '1630', descr => 'does not match LIKE expression, case-insensitive', + oprname => '!~~*', oprleft => 'bpchar', oprright => 'text', + oprresult => 'bool', oprnegate => '~~*(bpchar,text)', + oprcode => 'bpcharicnlike', oprrest => 'icnlikesel', + oprjoin => 'icnlikejoinsel' }, + +# NUMERIC type - OID's 1700-1799 +{ oid => '1751', descr => 'negate', + oprname => '-', oprkind => 'l', oprleft => '0', oprright => 'numeric', + oprresult => 'numeric', oprcode => 'numeric_uminus' }, +{ oid => '1752', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'numeric', + oprright => 'numeric', oprresult => 'bool', oprcom => '=(numeric,numeric)', + oprnegate => '<>(numeric,numeric)', oprcode => 'numeric_eq', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '1753', descr => 'not equal', + oprname => '<>', oprleft => 'numeric', oprright => 'numeric', + oprresult => 'bool', oprcom => '<>(numeric,numeric)', + oprnegate => '=(numeric,numeric)', oprcode => 'numeric_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '1754', descr => 'less than', + oprname => '<', oprleft => 'numeric', oprright => 'numeric', + oprresult => 'bool', oprcom => '>(numeric,numeric)', + oprnegate => '>=(numeric,numeric)', oprcode => 'numeric_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1755', descr => 'less than or equal', + oprname => '<=', oprleft => 'numeric', oprright => 'numeric', + oprresult => 'bool', oprcom => '>=(numeric,numeric)', + oprnegate => '>(numeric,numeric)', oprcode => 'numeric_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1756', descr => 'greater than', + oprname => '>', oprleft => 'numeric', oprright => 'numeric', + oprresult => 'bool', oprcom => '<(numeric,numeric)', + oprnegate => '<=(numeric,numeric)', oprcode => 'numeric_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1757', descr => 'greater than or equal', + oprname => '>=', oprleft => 'numeric', oprright => 'numeric', + oprresult => 'bool', oprcom => '<=(numeric,numeric)', + oprnegate => '<(numeric,numeric)', oprcode => 'numeric_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '1758', descr => 'add', + oprname => '+', oprleft => 'numeric', oprright => 'numeric', + oprresult => 'numeric', oprcom => '+(numeric,numeric)', + oprcode => 'numeric_add' }, +{ oid => '1759', descr => 'subtract', + oprname => '-', oprleft => 'numeric', oprright => 'numeric', + oprresult => 'numeric', oprcode => 'numeric_sub' }, +{ oid => '1760', descr => 'multiply', + oprname => '*', oprleft => 'numeric', oprright => 'numeric', + oprresult => 'numeric', oprcom => '*(numeric,numeric)', + oprcode => 'numeric_mul' }, +{ oid => '1761', descr => 'divide', + oprname => '/', oprleft => 'numeric', oprright => 'numeric', + oprresult => 'numeric', oprcode => 'numeric_div' }, +{ oid => '1762', descr => 'modulus', + oprname => '%', oprleft => 'numeric', oprright => 'numeric', + oprresult => 'numeric', oprcode => 'numeric_mod' }, +{ oid => '1038', descr => 'exponentiation', + oprname => '^', oprleft => 'numeric', oprright => 'numeric', + oprresult => 'numeric', oprcode => 'numeric_power' }, +{ oid => '1763', descr => 'absolute value', + oprname => '@', oprkind => 'l', oprleft => '0', oprright => 'numeric', + oprresult => 'numeric', oprcode => 'numeric_abs' }, + +{ oid => '1784', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprleft => 'bit', oprright => 'bit', + oprresult => 'bool', oprcom => '=(bit,bit)', oprnegate => '<>(bit,bit)', + oprcode => 'biteq', oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '1785', descr => 'not equal', + oprname => '<>', oprleft => 'bit', oprright => 'bit', oprresult => 'bool', + oprcom => '<>(bit,bit)', oprnegate => '=(bit,bit)', oprcode => 'bitne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '1786', descr => 'less than', + oprname => '<', oprleft => 'bit', oprright => 'bit', oprresult => 'bool', + oprcom => '>(bit,bit)', oprnegate => '>=(bit,bit)', oprcode => 'bitlt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1787', descr => 'greater than', + oprname => '>', oprleft => 'bit', oprright => 'bit', oprresult => 'bool', + oprcom => '<(bit,bit)', oprnegate => '<=(bit,bit)', oprcode => 'bitgt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1788', descr => 'less than or equal', + oprname => '<=', oprleft => 'bit', oprright => 'bit', oprresult => 'bool', + oprcom => '>=(bit,bit)', oprnegate => '>(bit,bit)', oprcode => 'bitle', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1789', descr => 'greater than or equal', + oprname => '>=', oprleft => 'bit', oprright => 'bit', oprresult => 'bool', + oprcom => '<=(bit,bit)', oprnegate => '<(bit,bit)', oprcode => 'bitge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '1791', descr => 'bitwise and', + oprname => '&', oprleft => 'bit', oprright => 'bit', oprresult => 'bit', + oprcom => '&(bit,bit)', oprcode => 'bitand' }, +{ oid => '1792', descr => 'bitwise or', + oprname => '|', oprleft => 'bit', oprright => 'bit', oprresult => 'bit', + oprcom => '|(bit,bit)', oprcode => 'bitor' }, +{ oid => '1793', descr => 'bitwise exclusive or', + oprname => '#', oprleft => 'bit', oprright => 'bit', oprresult => 'bit', + oprcom => '#(bit,bit)', oprcode => 'bitxor' }, +{ oid => '1794', descr => 'bitwise not', + oprname => '~', oprkind => 'l', oprleft => '0', oprright => 'bit', + oprresult => 'bit', oprcode => 'bitnot' }, +{ oid => '1795', descr => 'bitwise shift left', + oprname => '<<', oprleft => 'bit', oprright => 'int4', oprresult => 'bit', + oprcode => 'bitshiftleft' }, +{ oid => '1796', descr => 'bitwise shift right', + oprname => '>>', oprleft => 'bit', oprright => 'int4', oprresult => 'bit', + oprcode => 'bitshiftright' }, +{ oid => '1797', descr => 'concatenate', + oprname => '||', oprleft => 'varbit', oprright => 'varbit', + oprresult => 'varbit', oprcode => 'bitcat' }, + +{ oid => '1800', descr => 'add', + oprname => '+', oprleft => 'time', oprright => 'interval', + oprresult => 'time', oprcom => '+(interval,time)', + oprcode => 'time_pl_interval' }, +{ oid => '1801', descr => 'subtract', + oprname => '-', oprleft => 'time', oprright => 'interval', + oprresult => 'time', oprcode => 'time_mi_interval' }, +{ oid => '1802', descr => 'add', + oprname => '+', oprleft => 'timetz', oprright => 'interval', + oprresult => 'timetz', oprcom => '+(interval,timetz)', + oprcode => 'timetz_pl_interval' }, +{ oid => '1803', descr => 'subtract', + oprname => '-', oprleft => 'timetz', oprright => 'interval', + oprresult => 'timetz', oprcode => 'timetz_mi_interval' }, + +{ oid => '1804', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprleft => 'varbit', oprright => 'varbit', + oprresult => 'bool', oprcom => '=(varbit,varbit)', + oprnegate => '<>(varbit,varbit)', oprcode => 'varbiteq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '1805', descr => 'not equal', + oprname => '<>', oprleft => 'varbit', oprright => 'varbit', + oprresult => 'bool', oprcom => '<>(varbit,varbit)', + oprnegate => '=(varbit,varbit)', oprcode => 'varbitne', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, +{ oid => '1806', descr => 'less than', + oprname => '<', oprleft => 'varbit', oprright => 'varbit', + oprresult => 'bool', oprcom => '>(varbit,varbit)', + oprnegate => '>=(varbit,varbit)', oprcode => 'varbitlt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1807', descr => 'greater than', + oprname => '>', oprleft => 'varbit', oprright => 'varbit', + oprresult => 'bool', oprcom => '<(varbit,varbit)', + oprnegate => '<=(varbit,varbit)', oprcode => 'varbitgt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1808', descr => 'less than or equal', + oprname => '<=', oprleft => 'varbit', oprright => 'varbit', + oprresult => 'bool', oprcom => '>=(varbit,varbit)', + oprnegate => '>(varbit,varbit)', oprcode => 'varbitle', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1809', descr => 'greater than or equal', + oprname => '>=', oprleft => 'varbit', oprright => 'varbit', + oprresult => 'bool', oprcom => '<=(varbit,varbit)', + oprnegate => '<(varbit,varbit)', oprcode => 'varbitge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '1849', descr => 'add', + oprname => '+', oprleft => 'interval', oprright => 'time', + oprresult => 'time', oprcom => '+(time,interval)', + oprcode => 'interval_pl_time' }, + +{ oid => '1862', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'int2', + oprright => 'int8', oprresult => 'bool', oprcom => '=(int8,int2)', + oprnegate => '<>(int2,int8)', oprcode => 'int28eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '1863', descr => 'not equal', + oprname => '<>', oprleft => 'int2', oprright => 'int8', oprresult => 'bool', + oprcom => '<>(int8,int2)', oprnegate => '=(int2,int8)', oprcode => 'int28ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '1864', descr => 'less than', + oprname => '<', oprleft => 'int2', oprright => 'int8', oprresult => 'bool', + oprcom => '>(int8,int2)', oprnegate => '>=(int2,int8)', oprcode => 'int28lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1865', descr => 'greater than', + oprname => '>', oprleft => 'int2', oprright => 'int8', oprresult => 'bool', + oprcom => '<(int8,int2)', oprnegate => '<=(int2,int8)', oprcode => 'int28gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1866', descr => 'less than or equal', + oprname => '<=', oprleft => 'int2', oprright => 'int8', oprresult => 'bool', + oprcom => '>=(int8,int2)', oprnegate => '>(int2,int8)', oprcode => 'int28le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1867', descr => 'greater than or equal', + oprname => '>=', oprleft => 'int2', oprright => 'int8', oprresult => 'bool', + oprcom => '<=(int8,int2)', oprnegate => '<(int2,int8)', oprcode => 'int28ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '1868', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'int8', + oprright => 'int2', oprresult => 'bool', oprcom => '=(int2,int8)', + oprnegate => '<>(int8,int2)', oprcode => 'int82eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '1869', descr => 'not equal', + oprname => '<>', oprleft => 'int8', oprright => 'int2', oprresult => 'bool', + oprcom => '<>(int2,int8)', oprnegate => '=(int8,int2)', oprcode => 'int82ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '1870', descr => 'less than', + oprname => '<', oprleft => 'int8', oprright => 'int2', oprresult => 'bool', + oprcom => '>(int2,int8)', oprnegate => '>=(int8,int2)', oprcode => 'int82lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '1871', descr => 'greater than', + oprname => '>', oprleft => 'int8', oprright => 'int2', oprresult => 'bool', + oprcom => '<(int2,int8)', oprnegate => '<=(int8,int2)', oprcode => 'int82gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '1872', descr => 'less than or equal', + oprname => '<=', oprleft => 'int8', oprright => 'int2', oprresult => 'bool', + oprcom => '>=(int2,int8)', oprnegate => '>(int8,int2)', oprcode => 'int82le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '1873', descr => 'greater than or equal', + oprname => '>=', oprleft => 'int8', oprright => 'int2', oprresult => 'bool', + oprcom => '<=(int2,int8)', oprnegate => '<(int8,int2)', oprcode => 'int82ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +{ oid => '1874', descr => 'bitwise and', + oprname => '&', oprleft => 'int2', oprright => 'int2', oprresult => 'int2', + oprcom => '&(int2,int2)', oprcode => 'int2and' }, +{ oid => '1875', descr => 'bitwise or', + oprname => '|', oprleft => 'int2', oprright => 'int2', oprresult => 'int2', + oprcom => '|(int2,int2)', oprcode => 'int2or' }, +{ oid => '1876', descr => 'bitwise exclusive or', + oprname => '#', oprleft => 'int2', oprright => 'int2', oprresult => 'int2', + oprcom => '#(int2,int2)', oprcode => 'int2xor' }, +{ oid => '1877', descr => 'bitwise not', + oprname => '~', oprkind => 'l', oprleft => '0', oprright => 'int2', + oprresult => 'int2', oprcode => 'int2not' }, +{ oid => '1878', descr => 'bitwise shift left', + oprname => '<<', oprleft => 'int2', oprright => 'int4', oprresult => 'int2', + oprcode => 'int2shl' }, +{ oid => '1879', descr => 'bitwise shift right', + oprname => '>>', oprleft => 'int2', oprright => 'int4', oprresult => 'int2', + oprcode => 'int2shr' }, + +{ oid => '1880', descr => 'bitwise and', + oprname => '&', oprleft => 'int4', oprright => 'int4', oprresult => 'int4', + oprcom => '&(int4,int4)', oprcode => 'int4and' }, +{ oid => '1881', descr => 'bitwise or', + oprname => '|', oprleft => 'int4', oprright => 'int4', oprresult => 'int4', + oprcom => '|(int4,int4)', oprcode => 'int4or' }, +{ oid => '1882', descr => 'bitwise exclusive or', + oprname => '#', oprleft => 'int4', oprright => 'int4', oprresult => 'int4', + oprcom => '#(int4,int4)', oprcode => 'int4xor' }, +{ oid => '1883', descr => 'bitwise not', + oprname => '~', oprkind => 'l', oprleft => '0', oprright => 'int4', + oprresult => 'int4', oprcode => 'int4not' }, +{ oid => '1884', descr => 'bitwise shift left', + oprname => '<<', oprleft => 'int4', oprright => 'int4', oprresult => 'int4', + oprcode => 'int4shl' }, +{ oid => '1885', descr => 'bitwise shift right', + oprname => '>>', oprleft => 'int4', oprright => 'int4', oprresult => 'int4', + oprcode => 'int4shr' }, + +{ oid => '1886', descr => 'bitwise and', + oprname => '&', oprleft => 'int8', oprright => 'int8', oprresult => 'int8', + oprcom => '&(int8,int8)', oprcode => 'int8and' }, +{ oid => '1887', descr => 'bitwise or', + oprname => '|', oprleft => 'int8', oprright => 'int8', oprresult => 'int8', + oprcom => '|(int8,int8)', oprcode => 'int8or' }, +{ oid => '1888', descr => 'bitwise exclusive or', + oprname => '#', oprleft => 'int8', oprright => 'int8', oprresult => 'int8', + oprcom => '#(int8,int8)', oprcode => 'int8xor' }, +{ oid => '1889', descr => 'bitwise not', + oprname => '~', oprkind => 'l', oprleft => '0', oprright => 'int8', + oprresult => 'int8', oprcode => 'int8not' }, +{ oid => '1890', descr => 'bitwise shift left', + oprname => '<<', oprleft => 'int8', oprright => 'int4', oprresult => 'int8', + oprcode => 'int8shl' }, +{ oid => '1891', descr => 'bitwise shift right', + oprname => '>>', oprleft => 'int8', oprright => 'int4', oprresult => 'int8', + oprcode => 'int8shr' }, + +{ oid => '1916', descr => 'unary plus', + oprname => '+', oprkind => 'l', oprleft => '0', oprright => 'int8', + oprresult => 'int8', oprcode => 'int8up' }, +{ oid => '1917', descr => 'unary plus', + oprname => '+', oprkind => 'l', oprleft => '0', oprright => 'int2', + oprresult => 'int2', oprcode => 'int2up' }, +{ oid => '1918', descr => 'unary plus', + oprname => '+', oprkind => 'l', oprleft => '0', oprright => 'int4', + oprresult => 'int4', oprcode => 'int4up' }, +{ oid => '1919', descr => 'unary plus', + oprname => '+', oprkind => 'l', oprleft => '0', oprright => 'float4', + oprresult => 'float4', oprcode => 'float4up' }, +{ oid => '1920', descr => 'unary plus', + oprname => '+', oprkind => 'l', oprleft => '0', oprright => 'float8', + oprresult => 'float8', oprcode => 'float8up' }, +{ oid => '1921', descr => 'unary plus', + oprname => '+', oprkind => 'l', oprleft => '0', oprright => 'numeric', + oprresult => 'numeric', oprcode => 'numeric_uplus' }, + +# bytea operators +{ oid => '1955', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'bytea', + oprright => 'bytea', oprresult => 'bool', oprcom => '=(bytea,bytea)', + oprnegate => '<>(bytea,bytea)', oprcode => 'byteaeq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '1956', descr => 'not equal', + oprname => '<>', oprleft => 'bytea', oprright => 'bytea', oprresult => 'bool', + oprcom => '<>(bytea,bytea)', oprnegate => '=(bytea,bytea)', + oprcode => 'byteane', oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '1957', descr => 'less than', + oprname => '<', oprleft => 'bytea', oprright => 'bytea', oprresult => 'bool', + oprcom => '>(bytea,bytea)', oprnegate => '>=(bytea,bytea)', + oprcode => 'bytealt', oprrest => 'scalarltsel', + oprjoin => 'scalarltjoinsel' }, +{ oid => '1958', descr => 'less than or equal', + oprname => '<=', oprleft => 'bytea', oprright => 'bytea', oprresult => 'bool', + oprcom => '>=(bytea,bytea)', oprnegate => '>(bytea,bytea)', + oprcode => 'byteale', oprrest => 'scalarlesel', + oprjoin => 'scalarlejoinsel' }, +{ oid => '1959', descr => 'greater than', + oprname => '>', oprleft => 'bytea', oprright => 'bytea', oprresult => 'bool', + oprcom => '<(bytea,bytea)', oprnegate => '<=(bytea,bytea)', + oprcode => 'byteagt', oprrest => 'scalargtsel', + oprjoin => 'scalargtjoinsel' }, +{ oid => '1960', descr => 'greater than or equal', + oprname => '>=', oprleft => 'bytea', oprright => 'bytea', oprresult => 'bool', + oprcom => '<=(bytea,bytea)', oprnegate => '<(bytea,bytea)', + oprcode => 'byteage', oprrest => 'scalargesel', + oprjoin => 'scalargejoinsel' }, + +{ oid => '2016', oid_symbol => 'OID_BYTEA_LIKE_OP', + descr => 'matches LIKE expression', + oprname => '~~', oprleft => 'bytea', oprright => 'bytea', oprresult => 'bool', + oprnegate => '!~~(bytea,bytea)', oprcode => 'bytealike', oprrest => 'likesel', + oprjoin => 'likejoinsel' }, +{ oid => '2017', descr => 'does not match LIKE expression', + oprname => '!~~', oprleft => 'bytea', oprright => 'bytea', + oprresult => 'bool', oprnegate => '~~(bytea,bytea)', oprcode => 'byteanlike', + oprrest => 'nlikesel', oprjoin => 'nlikejoinsel' }, +{ oid => '2018', descr => 'concatenate', + oprname => '||', oprleft => 'bytea', oprright => 'bytea', + oprresult => 'bytea', oprcode => 'byteacat' }, + +# timestamp operators +{ oid => '2060', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'timestamp', + oprright => 'timestamp', oprresult => 'bool', + oprcom => '=(timestamp,timestamp)', oprnegate => '<>(timestamp,timestamp)', + oprcode => 'timestamp_eq', oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '2061', descr => 'not equal', + oprname => '<>', oprleft => 'timestamp', oprright => 'timestamp', + oprresult => 'bool', oprcom => '<>(timestamp,timestamp)', + oprnegate => '=(timestamp,timestamp)', oprcode => 'timestamp_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '2062', descr => 'less than', + oprname => '<', oprleft => 'timestamp', oprright => 'timestamp', + oprresult => 'bool', oprcom => '>(timestamp,timestamp)', + oprnegate => '>=(timestamp,timestamp)', oprcode => 'timestamp_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '2063', descr => 'less than or equal', + oprname => '<=', oprleft => 'timestamp', oprright => 'timestamp', + oprresult => 'bool', oprcom => '>=(timestamp,timestamp)', + oprnegate => '>(timestamp,timestamp)', oprcode => 'timestamp_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '2064', descr => 'greater than', + oprname => '>', oprleft => 'timestamp', oprright => 'timestamp', + oprresult => 'bool', oprcom => '<(timestamp,timestamp)', + oprnegate => '<=(timestamp,timestamp)', oprcode => 'timestamp_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '2065', descr => 'greater than or equal', + oprname => '>=', oprleft => 'timestamp', oprright => 'timestamp', + oprresult => 'bool', oprcom => '<=(timestamp,timestamp)', + oprnegate => '<(timestamp,timestamp)', oprcode => 'timestamp_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '2066', descr => 'add', + oprname => '+', oprleft => 'timestamp', oprright => 'interval', + oprresult => 'timestamp', oprcom => '+(interval,timestamp)', + oprcode => 'timestamp_pl_interval' }, +{ oid => '2067', descr => 'subtract', + oprname => '-', oprleft => 'timestamp', oprright => 'timestamp', + oprresult => 'interval', oprcode => 'timestamp_mi' }, +{ oid => '2068', descr => 'subtract', + oprname => '-', oprleft => 'timestamp', oprright => 'interval', + oprresult => 'timestamp', oprcode => 'timestamp_mi_interval' }, + +# character-by-character (not collation order) comparison operators for character types +{ oid => '2314', descr => 'less than', + oprname => '~<~', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprcom => '~>~(text,text)', oprnegate => '~>=~(text,text)', + oprcode => 'text_pattern_lt', oprrest => 'scalarltsel', + oprjoin => 'scalarltjoinsel' }, +{ oid => '2315', descr => 'less than or equal', + oprname => '~<=~', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprcom => '~>=~(text,text)', oprnegate => '~>~(text,text)', + oprcode => 'text_pattern_le', oprrest => 'scalarlesel', + oprjoin => 'scalarlejoinsel' }, +{ oid => '2317', descr => 'greater than or equal', + oprname => '~>=~', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprcom => '~<=~(text,text)', oprnegate => '~<~(text,text)', + oprcode => 'text_pattern_ge', oprrest => 'scalargesel', + oprjoin => 'scalargejoinsel' }, +{ oid => '2318', descr => 'greater than', + oprname => '~>~', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprcom => '~<~(text,text)', oprnegate => '~<=~(text,text)', + oprcode => 'text_pattern_gt', oprrest => 'scalargtsel', + oprjoin => 'scalargtjoinsel' }, + +{ oid => '2326', descr => 'less than', + oprname => '~<~', oprleft => 'bpchar', oprright => 'bpchar', + oprresult => 'bool', oprcom => '~>~(bpchar,bpchar)', + oprnegate => '~>=~(bpchar,bpchar)', oprcode => 'bpchar_pattern_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '2327', descr => 'less than or equal', + oprname => '~<=~', oprleft => 'bpchar', oprright => 'bpchar', + oprresult => 'bool', oprcom => '~>=~(bpchar,bpchar)', + oprnegate => '~>~(bpchar,bpchar)', oprcode => 'bpchar_pattern_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '2329', descr => 'greater than or equal', + oprname => '~>=~', oprleft => 'bpchar', oprright => 'bpchar', + oprresult => 'bool', oprcom => '~<=~(bpchar,bpchar)', + oprnegate => '~<~(bpchar,bpchar)', oprcode => 'bpchar_pattern_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '2330', descr => 'greater than', + oprname => '~>~', oprleft => 'bpchar', oprright => 'bpchar', + oprresult => 'bool', oprcom => '~<~(bpchar,bpchar)', + oprnegate => '~<=~(bpchar,bpchar)', oprcode => 'bpchar_pattern_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, + +# crosstype operations for date vs. timestamp and timestamptz +{ oid => '2345', descr => 'less than', + oprname => '<', oprleft => 'date', oprright => 'timestamp', + oprresult => 'bool', oprcom => '>(timestamp,date)', + oprnegate => '>=(date,timestamp)', oprcode => 'date_lt_timestamp', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '2346', descr => 'less than or equal', + oprname => '<=', oprleft => 'date', oprright => 'timestamp', + oprresult => 'bool', oprcom => '>=(timestamp,date)', + oprnegate => '>(date,timestamp)', oprcode => 'date_le_timestamp', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '2347', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprleft => 'date', + oprright => 'timestamp', oprresult => 'bool', oprcom => '=(timestamp,date)', + oprnegate => '<>(date,timestamp)', oprcode => 'date_eq_timestamp', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '2348', descr => 'greater than or equal', + oprname => '>=', oprleft => 'date', oprright => 'timestamp', + oprresult => 'bool', oprcom => '<=(timestamp,date)', + oprnegate => '<(date,timestamp)', oprcode => 'date_ge_timestamp', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '2349', descr => 'greater than', + oprname => '>', oprleft => 'date', oprright => 'timestamp', + oprresult => 'bool', oprcom => '<(timestamp,date)', + oprnegate => '<=(date,timestamp)', oprcode => 'date_gt_timestamp', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '2350', descr => 'not equal', + oprname => '<>', oprleft => 'date', oprright => 'timestamp', + oprresult => 'bool', oprcom => '<>(timestamp,date)', + oprnegate => '=(date,timestamp)', oprcode => 'date_ne_timestamp', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, + +{ oid => '2358', descr => 'less than', + oprname => '<', oprleft => 'date', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '>(timestamptz,date)', + oprnegate => '>=(date,timestamptz)', oprcode => 'date_lt_timestamptz', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '2359', descr => 'less than or equal', + oprname => '<=', oprleft => 'date', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '>=(timestamptz,date)', + oprnegate => '>(date,timestamptz)', oprcode => 'date_le_timestamptz', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '2360', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprleft => 'date', + oprright => 'timestamptz', oprresult => 'bool', + oprcom => '=(timestamptz,date)', oprnegate => '<>(date,timestamptz)', + oprcode => 'date_eq_timestamptz', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '2361', descr => 'greater than or equal', + oprname => '>=', oprleft => 'date', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '<=(timestamptz,date)', + oprnegate => '<(date,timestamptz)', oprcode => 'date_ge_timestamptz', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '2362', descr => 'greater than', + oprname => '>', oprleft => 'date', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '<(timestamptz,date)', + oprnegate => '<=(date,timestamptz)', oprcode => 'date_gt_timestamptz', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '2363', descr => 'not equal', + oprname => '<>', oprleft => 'date', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '<>(timestamptz,date)', + oprnegate => '=(date,timestamptz)', oprcode => 'date_ne_timestamptz', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, + +{ oid => '2371', descr => 'less than', + oprname => '<', oprleft => 'timestamp', oprright => 'date', + oprresult => 'bool', oprcom => '>(date,timestamp)', + oprnegate => '>=(timestamp,date)', oprcode => 'timestamp_lt_date', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '2372', descr => 'less than or equal', + oprname => '<=', oprleft => 'timestamp', oprright => 'date', + oprresult => 'bool', oprcom => '>=(date,timestamp)', + oprnegate => '>(timestamp,date)', oprcode => 'timestamp_le_date', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '2373', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprleft => 'timestamp', + oprright => 'date', oprresult => 'bool', oprcom => '=(date,timestamp)', + oprnegate => '<>(timestamp,date)', oprcode => 'timestamp_eq_date', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '2374', descr => 'greater than or equal', + oprname => '>=', oprleft => 'timestamp', oprright => 'date', + oprresult => 'bool', oprcom => '<=(date,timestamp)', + oprnegate => '<(timestamp,date)', oprcode => 'timestamp_ge_date', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '2375', descr => 'greater than', + oprname => '>', oprleft => 'timestamp', oprright => 'date', + oprresult => 'bool', oprcom => '<(date,timestamp)', + oprnegate => '<=(timestamp,date)', oprcode => 'timestamp_gt_date', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '2376', descr => 'not equal', + oprname => '<>', oprleft => 'timestamp', oprright => 'date', + oprresult => 'bool', oprcom => '<>(date,timestamp)', + oprnegate => '=(timestamp,date)', oprcode => 'timestamp_ne_date', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, + +{ oid => '2384', descr => 'less than', + oprname => '<', oprleft => 'timestamptz', oprright => 'date', + oprresult => 'bool', oprcom => '>(date,timestamptz)', + oprnegate => '>=(timestamptz,date)', oprcode => 'timestamptz_lt_date', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '2385', descr => 'less than or equal', + oprname => '<=', oprleft => 'timestamptz', oprright => 'date', + oprresult => 'bool', oprcom => '>=(date,timestamptz)', + oprnegate => '>(timestamptz,date)', oprcode => 'timestamptz_le_date', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '2386', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprleft => 'timestamptz', + oprright => 'date', oprresult => 'bool', oprcom => '=(date,timestamptz)', + oprnegate => '<>(timestamptz,date)', oprcode => 'timestamptz_eq_date', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '2387', descr => 'greater than or equal', + oprname => '>=', oprleft => 'timestamptz', oprright => 'date', + oprresult => 'bool', oprcom => '<=(date,timestamptz)', + oprnegate => '<(timestamptz,date)', oprcode => 'timestamptz_ge_date', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '2388', descr => 'greater than', + oprname => '>', oprleft => 'timestamptz', oprright => 'date', + oprresult => 'bool', oprcom => '<(date,timestamptz)', + oprnegate => '<=(timestamptz,date)', oprcode => 'timestamptz_gt_date', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '2389', descr => 'not equal', + oprname => '<>', oprleft => 'timestamptz', oprright => 'date', + oprresult => 'bool', oprcom => '<>(date,timestamptz)', + oprnegate => '=(timestamptz,date)', oprcode => 'timestamptz_ne_date', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, + +# crosstype operations for timestamp vs. timestamptz +{ oid => '2534', descr => 'less than', + oprname => '<', oprleft => 'timestamp', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '>(timestamptz,timestamp)', + oprnegate => '>=(timestamp,timestamptz)', + oprcode => 'timestamp_lt_timestamptz', oprrest => 'scalarltsel', + oprjoin => 'scalarltjoinsel' }, +{ oid => '2535', descr => 'less than or equal', + oprname => '<=', oprleft => 'timestamp', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '>=(timestamptz,timestamp)', + oprnegate => '>(timestamp,timestamptz)', + oprcode => 'timestamp_le_timestamptz', oprrest => 'scalarlesel', + oprjoin => 'scalarlejoinsel' }, +{ oid => '2536', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprleft => 'timestamp', + oprright => 'timestamptz', oprresult => 'bool', + oprcom => '=(timestamptz,timestamp)', + oprnegate => '<>(timestamp,timestamptz)', + oprcode => 'timestamp_eq_timestamptz', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '2537', descr => 'greater than or equal', + oprname => '>=', oprleft => 'timestamp', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '<=(timestamptz,timestamp)', + oprnegate => '<(timestamp,timestamptz)', + oprcode => 'timestamp_ge_timestamptz', oprrest => 'scalargesel', + oprjoin => 'scalargejoinsel' }, +{ oid => '2538', descr => 'greater than', + oprname => '>', oprleft => 'timestamp', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '<(timestamptz,timestamp)', + oprnegate => '<=(timestamp,timestamptz)', + oprcode => 'timestamp_gt_timestamptz', oprrest => 'scalargtsel', + oprjoin => 'scalargtjoinsel' }, +{ oid => '2539', descr => 'not equal', + oprname => '<>', oprleft => 'timestamp', oprright => 'timestamptz', + oprresult => 'bool', oprcom => '<>(timestamptz,timestamp)', + oprnegate => '=(timestamp,timestamptz)', + oprcode => 'timestamp_ne_timestamptz', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, + +{ oid => '2540', descr => 'less than', + oprname => '<', oprleft => 'timestamptz', oprright => 'timestamp', + oprresult => 'bool', oprcom => '>(timestamp,timestamptz)', + oprnegate => '>=(timestamptz,timestamp)', + oprcode => 'timestamptz_lt_timestamp', oprrest => 'scalarltsel', + oprjoin => 'scalarltjoinsel' }, +{ oid => '2541', descr => 'less than or equal', + oprname => '<=', oprleft => 'timestamptz', oprright => 'timestamp', + oprresult => 'bool', oprcom => '>=(timestamp,timestamptz)', + oprnegate => '>(timestamptz,timestamp)', + oprcode => 'timestamptz_le_timestamp', oprrest => 'scalarlesel', + oprjoin => 'scalarlejoinsel' }, +{ oid => '2542', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprleft => 'timestamptz', + oprright => 'timestamp', oprresult => 'bool', + oprcom => '=(timestamp,timestamptz)', + oprnegate => '<>(timestamptz,timestamp)', + oprcode => 'timestamptz_eq_timestamp', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '2543', descr => 'greater than or equal', + oprname => '>=', oprleft => 'timestamptz', oprright => 'timestamp', + oprresult => 'bool', oprcom => '<=(timestamp,timestamptz)', + oprnegate => '<(timestamptz,timestamp)', + oprcode => 'timestamptz_ge_timestamp', oprrest => 'scalargesel', + oprjoin => 'scalargejoinsel' }, +{ oid => '2544', descr => 'greater than', + oprname => '>', oprleft => 'timestamptz', oprright => 'timestamp', + oprresult => 'bool', oprcom => '<(timestamp,timestamptz)', + oprnegate => '<=(timestamptz,timestamp)', + oprcode => 'timestamptz_gt_timestamp', oprrest => 'scalargtsel', + oprjoin => 'scalargtjoinsel' }, +{ oid => '2545', descr => 'not equal', + oprname => '<>', oprleft => 'timestamptz', oprright => 'timestamp', + oprresult => 'bool', oprcom => '<>(timestamp,timestamptz)', + oprnegate => '=(timestamptz,timestamp)', + oprcode => 'timestamptz_ne_timestamp', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, + +# formerly-missing interval + datetime operators +{ oid => '2551', descr => 'add', + oprname => '+', oprleft => 'interval', oprright => 'date', + oprresult => 'timestamp', oprcom => '+(date,interval)', + oprcode => 'interval_pl_date' }, +{ oid => '2552', descr => 'add', + oprname => '+', oprleft => 'interval', oprright => 'timetz', + oprresult => 'timetz', oprcom => '+(timetz,interval)', + oprcode => 'interval_pl_timetz' }, +{ oid => '2553', descr => 'add', + oprname => '+', oprleft => 'interval', oprright => 'timestamp', + oprresult => 'timestamp', oprcom => '+(timestamp,interval)', + oprcode => 'interval_pl_timestamp' }, +{ oid => '2554', descr => 'add', + oprname => '+', oprleft => 'interval', oprright => 'timestamptz', + oprresult => 'timestamptz', oprcom => '+(timestamptz,interval)', + oprcode => 'interval_pl_timestamptz' }, +{ oid => '2555', descr => 'add', + oprname => '+', oprleft => 'int4', oprright => 'date', oprresult => 'date', + oprcom => '+(date,int4)', oprcode => 'integer_pl_date' }, + +# new operators for Y-direction rtree opfamilies +{ oid => '2570', descr => 'is below', + oprname => '<<|', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcode => 'box_below', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '2571', descr => 'overlaps or is below', + oprname => '&<|', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcode => 'box_overbelow', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '2572', descr => 'overlaps or is above', + oprname => '|&>', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcode => 'box_overabove', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '2573', descr => 'is above', + oprname => '|>>', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcode => 'box_above', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '2574', descr => 'is below', + oprname => '<<|', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcode => 'poly_below', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '2575', descr => 'overlaps or is below', + oprname => '&<|', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcode => 'poly_overbelow', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '2576', descr => 'overlaps or is above', + oprname => '|&>', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcode => 'poly_overabove', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '2577', descr => 'is above', + oprname => '|>>', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcode => 'poly_above', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '2589', descr => 'overlaps or is below', + oprname => '&<|', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcode => 'circle_overbelow', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, +{ oid => '2590', descr => 'overlaps or is above', + oprname => '|&>', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcode => 'circle_overabove', oprrest => 'positionsel', + oprjoin => 'positionjoinsel' }, + +# overlap/contains/contained for arrays +{ oid => '2750', oid_symbol => 'OID_ARRAY_OVERLAP_OP', descr => 'overlaps', + oprname => '&&', oprleft => 'anyarray', oprright => 'anyarray', + oprresult => 'bool', oprcom => '&&(anyarray,anyarray)', + oprcode => 'arrayoverlap', oprrest => 'arraycontsel', + oprjoin => 'arraycontjoinsel' }, +{ oid => '2751', oid_symbol => 'OID_ARRAY_CONTAINS_OP', descr => 'contains', + oprname => '@>', oprleft => 'anyarray', oprright => 'anyarray', + oprresult => 'bool', oprcom => '<@(anyarray,anyarray)', + oprcode => 'arraycontains', oprrest => 'arraycontsel', + oprjoin => 'arraycontjoinsel' }, +{ oid => '2752', oid_symbol => 'OID_ARRAY_CONTAINED_OP', + descr => 'is contained by', + oprname => '<@', oprleft => 'anyarray', oprright => 'anyarray', + oprresult => 'bool', oprcom => '@>(anyarray,anyarray)', + oprcode => 'arraycontained', oprrest => 'arraycontsel', + oprjoin => 'arraycontjoinsel' }, + +# capturing operators to preserve pre-8.3 behavior of text concatenation +{ oid => '2779', descr => 'concatenate', + oprname => '||', oprleft => 'text', oprright => 'anynonarray', + oprresult => 'text', oprcode => 'textanycat' }, +{ oid => '2780', descr => 'concatenate', + oprname => '||', oprleft => 'anynonarray', oprright => 'text', + oprresult => 'text', oprcode => 'anytextcat' }, + +# obsolete names for contains/contained-by operators; remove these someday +{ oid => '2860', descr => 'deprecated, use <@ instead', + oprname => '@', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcom => '~(polygon,polygon)', + oprcode => 'poly_contained', oprrest => 'contsel', oprjoin => 'contjoinsel' }, +{ oid => '2861', descr => 'deprecated, use @> instead', + oprname => '~', oprleft => 'polygon', oprright => 'polygon', + oprresult => 'bool', oprcom => '@(polygon,polygon)', + oprcode => 'poly_contain', oprrest => 'contsel', oprjoin => 'contjoinsel' }, +{ oid => '2862', descr => 'deprecated, use <@ instead', + oprname => '@', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcom => '~(box,box)', oprcode => 'box_contained', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, +{ oid => '2863', descr => 'deprecated, use @> instead', + oprname => '~', oprleft => 'box', oprright => 'box', oprresult => 'bool', + oprcom => '@(box,box)', oprcode => 'box_contain', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, +{ oid => '2864', descr => 'deprecated, use <@ instead', + oprname => '@', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcom => '~(circle,circle)', + oprcode => 'circle_contained', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, +{ oid => '2865', descr => 'deprecated, use @> instead', + oprname => '~', oprleft => 'circle', oprright => 'circle', + oprresult => 'bool', oprcom => '@(circle,circle)', + oprcode => 'circle_contain', oprrest => 'contsel', oprjoin => 'contjoinsel' }, +{ oid => '2866', descr => 'deprecated, use <@ instead', + oprname => '@', oprleft => 'point', oprright => 'box', oprresult => 'bool', + oprcode => 'on_pb' }, +{ oid => '2867', descr => 'deprecated, use <@ instead', + oprname => '@', oprleft => 'point', oprright => 'path', oprresult => 'bool', + oprcom => '~(path,point)', oprcode => 'on_ppath' }, +{ oid => '2868', descr => 'deprecated, use @> instead', + oprname => '~', oprleft => 'path', oprright => 'point', oprresult => 'bool', + oprcom => '@(point,path)', oprcode => 'path_contain_pt' }, +{ oid => '2869', descr => 'deprecated, use <@ instead', + oprname => '@', oprleft => 'point', oprright => 'polygon', + oprresult => 'bool', oprcom => '~(polygon,point)', + oprcode => 'pt_contained_poly' }, +{ oid => '2870', descr => 'deprecated, use @> instead', + oprname => '~', oprleft => 'polygon', oprright => 'point', + oprresult => 'bool', oprcom => '@(point,polygon)', + oprcode => 'poly_contain_pt' }, +{ oid => '2871', descr => 'deprecated, use <@ instead', + oprname => '@', oprleft => 'point', oprright => 'circle', oprresult => 'bool', + oprcom => '~(circle,point)', oprcode => 'pt_contained_circle' }, +{ oid => '2872', descr => 'deprecated, use @> instead', + oprname => '~', oprleft => 'circle', oprright => 'point', oprresult => 'bool', + oprcom => '@(point,circle)', oprcode => 'circle_contain_pt' }, +{ oid => '2873', descr => 'deprecated, use <@ instead', + oprname => '@', oprleft => 'point', oprright => 'line', oprresult => 'bool', + oprcode => 'on_pl' }, +{ oid => '2874', descr => 'deprecated, use <@ instead', + oprname => '@', oprleft => 'point', oprright => 'lseg', oprresult => 'bool', + oprcode => 'on_ps' }, +{ oid => '2875', descr => 'deprecated, use <@ instead', + oprname => '@', oprleft => 'lseg', oprright => 'line', oprresult => 'bool', + oprcode => 'on_sl' }, +{ oid => '2876', descr => 'deprecated, use <@ instead', + oprname => '@', oprleft => 'lseg', oprright => 'box', oprresult => 'bool', + oprcode => 'on_sb' }, +{ oid => '2877', descr => 'deprecated, use @> instead', + oprname => '~', oprleft => '_aclitem', oprright => 'aclitem', + oprresult => 'bool', oprcode => 'aclcontains' }, + +# uuid operators +{ oid => '2972', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'uuid', + oprright => 'uuid', oprresult => 'bool', oprcom => '=(uuid,uuid)', + oprnegate => '<>(uuid,uuid)', oprcode => 'uuid_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '2973', descr => 'not equal', + oprname => '<>', oprleft => 'uuid', oprright => 'uuid', oprresult => 'bool', + oprcom => '<>(uuid,uuid)', oprnegate => '=(uuid,uuid)', oprcode => 'uuid_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '2974', descr => 'less than', + oprname => '<', oprleft => 'uuid', oprright => 'uuid', oprresult => 'bool', + oprcom => '>(uuid,uuid)', oprnegate => '>=(uuid,uuid)', oprcode => 'uuid_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '2975', descr => 'greater than', + oprname => '>', oprleft => 'uuid', oprright => 'uuid', oprresult => 'bool', + oprcom => '<(uuid,uuid)', oprnegate => '<=(uuid,uuid)', oprcode => 'uuid_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '2976', descr => 'less than or equal', + oprname => '<=', oprleft => 'uuid', oprright => 'uuid', oprresult => 'bool', + oprcom => '>=(uuid,uuid)', oprnegate => '>(uuid,uuid)', oprcode => 'uuid_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '2977', descr => 'greater than or equal', + oprname => '>=', oprleft => 'uuid', oprright => 'uuid', oprresult => 'bool', + oprcom => '<=(uuid,uuid)', oprnegate => '<(uuid,uuid)', oprcode => 'uuid_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +# pg_lsn operators +{ oid => '3222', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'pg_lsn', + oprright => 'pg_lsn', oprresult => 'bool', oprcom => '=(pg_lsn,pg_lsn)', + oprnegate => '<>(pg_lsn,pg_lsn)', oprcode => 'pg_lsn_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '3223', descr => 'not equal', + oprname => '<>', oprleft => 'pg_lsn', oprright => 'pg_lsn', + oprresult => 'bool', oprcom => '<>(pg_lsn,pg_lsn)', + oprnegate => '=(pg_lsn,pg_lsn)', oprcode => 'pg_lsn_ne', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, +{ oid => '3224', descr => 'less than', + oprname => '<', oprleft => 'pg_lsn', oprright => 'pg_lsn', + oprresult => 'bool', oprcom => '>(pg_lsn,pg_lsn)', + oprnegate => '>=(pg_lsn,pg_lsn)', oprcode => 'pg_lsn_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '3225', descr => 'greater than', + oprname => '>', oprleft => 'pg_lsn', oprright => 'pg_lsn', + oprresult => 'bool', oprcom => '<(pg_lsn,pg_lsn)', + oprnegate => '<=(pg_lsn,pg_lsn)', oprcode => 'pg_lsn_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '3226', descr => 'less than or equal', + oprname => '<=', oprleft => 'pg_lsn', oprright => 'pg_lsn', + oprresult => 'bool', oprcom => '>=(pg_lsn,pg_lsn)', + oprnegate => '>(pg_lsn,pg_lsn)', oprcode => 'pg_lsn_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '3227', descr => 'greater than or equal', + oprname => '>=', oprleft => 'pg_lsn', oprright => 'pg_lsn', + oprresult => 'bool', oprcom => '<=(pg_lsn,pg_lsn)', + oprnegate => '<(pg_lsn,pg_lsn)', oprcode => 'pg_lsn_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '3228', descr => 'minus', + oprname => '-', oprleft => 'pg_lsn', oprright => 'pg_lsn', + oprresult => 'numeric', oprcode => 'pg_lsn_mi' }, + +# enum operators +{ oid => '3516', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'anyenum', + oprright => 'anyenum', oprresult => 'bool', oprcom => '=(anyenum,anyenum)', + oprnegate => '<>(anyenum,anyenum)', oprcode => 'enum_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '3517', descr => 'not equal', + oprname => '<>', oprleft => 'anyenum', oprright => 'anyenum', + oprresult => 'bool', oprcom => '<>(anyenum,anyenum)', + oprnegate => '=(anyenum,anyenum)', oprcode => 'enum_ne', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, +{ oid => '3518', descr => 'less than', + oprname => '<', oprleft => 'anyenum', oprright => 'anyenum', + oprresult => 'bool', oprcom => '>(anyenum,anyenum)', + oprnegate => '>=(anyenum,anyenum)', oprcode => 'enum_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '3519', descr => 'greater than', + oprname => '>', oprleft => 'anyenum', oprright => 'anyenum', + oprresult => 'bool', oprcom => '<(anyenum,anyenum)', + oprnegate => '<=(anyenum,anyenum)', oprcode => 'enum_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '3520', descr => 'less than or equal', + oprname => '<=', oprleft => 'anyenum', oprright => 'anyenum', + oprresult => 'bool', oprcom => '>=(anyenum,anyenum)', + oprnegate => '>(anyenum,anyenum)', oprcode => 'enum_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '3521', descr => 'greater than or equal', + oprname => '>=', oprleft => 'anyenum', oprright => 'anyenum', + oprresult => 'bool', oprcom => '<=(anyenum,anyenum)', + oprnegate => '<(anyenum,anyenum)', oprcode => 'enum_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +# tsearch operations +{ oid => '3627', descr => 'less than', + oprname => '<', oprleft => 'tsvector', oprright => 'tsvector', + oprresult => 'bool', oprcom => '>(tsvector,tsvector)', + oprnegate => '>=(tsvector,tsvector)', oprcode => 'tsvector_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '3628', descr => 'less than or equal', + oprname => '<=', oprleft => 'tsvector', oprright => 'tsvector', + oprresult => 'bool', oprcom => '>=(tsvector,tsvector)', + oprnegate => '>(tsvector,tsvector)', oprcode => 'tsvector_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '3629', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprleft => 'tsvector', + oprright => 'tsvector', oprresult => 'bool', oprcom => '=(tsvector,tsvector)', + oprnegate => '<>(tsvector,tsvector)', oprcode => 'tsvector_eq', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '3630', descr => 'not equal', + oprname => '<>', oprleft => 'tsvector', oprright => 'tsvector', + oprresult => 'bool', oprcom => '<>(tsvector,tsvector)', + oprnegate => '=(tsvector,tsvector)', oprcode => 'tsvector_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '3631', descr => 'greater than or equal', + oprname => '>=', oprleft => 'tsvector', oprright => 'tsvector', + oprresult => 'bool', oprcom => '<=(tsvector,tsvector)', + oprnegate => '<(tsvector,tsvector)', oprcode => 'tsvector_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '3632', descr => 'greater than', + oprname => '>', oprleft => 'tsvector', oprright => 'tsvector', + oprresult => 'bool', oprcom => '<(tsvector,tsvector)', + oprnegate => '<=(tsvector,tsvector)', oprcode => 'tsvector_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '3633', descr => 'concatenate', + oprname => '||', oprleft => 'tsvector', oprright => 'tsvector', + oprresult => 'tsvector', oprcode => 'tsvector_concat' }, +{ oid => '3636', descr => 'text search match', + oprname => '@@', oprleft => 'tsvector', oprright => 'tsquery', + oprresult => 'bool', oprcom => '@@(tsquery,tsvector)', + oprcode => 'ts_match_vq', oprrest => 'tsmatchsel', + oprjoin => 'tsmatchjoinsel' }, +{ oid => '3637', descr => 'text search match', + oprname => '@@', oprleft => 'tsquery', oprright => 'tsvector', + oprresult => 'bool', oprcom => '@@(tsvector,tsquery)', + oprcode => 'ts_match_qv', oprrest => 'tsmatchsel', + oprjoin => 'tsmatchjoinsel' }, +{ oid => '3660', descr => 'deprecated, use @@ instead', + oprname => '@@@', oprleft => 'tsvector', oprright => 'tsquery', + oprresult => 'bool', oprcom => '@@@(tsquery,tsvector)', + oprcode => 'ts_match_vq', oprrest => 'tsmatchsel', + oprjoin => 'tsmatchjoinsel' }, +{ oid => '3661', descr => 'deprecated, use @@ instead', + oprname => '@@@', oprleft => 'tsquery', oprright => 'tsvector', + oprresult => 'bool', oprcom => '@@@(tsvector,tsquery)', + oprcode => 'ts_match_qv', oprrest => 'tsmatchsel', + oprjoin => 'tsmatchjoinsel' }, +{ oid => '3674', descr => 'less than', + oprname => '<', oprleft => 'tsquery', oprright => 'tsquery', + oprresult => 'bool', oprcom => '>(tsquery,tsquery)', + oprnegate => '>=(tsquery,tsquery)', oprcode => 'tsquery_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '3675', descr => 'less than or equal', + oprname => '<=', oprleft => 'tsquery', oprright => 'tsquery', + oprresult => 'bool', oprcom => '>=(tsquery,tsquery)', + oprnegate => '>(tsquery,tsquery)', oprcode => 'tsquery_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '3676', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprleft => 'tsquery', + oprright => 'tsquery', oprresult => 'bool', oprcom => '=(tsquery,tsquery)', + oprnegate => '<>(tsquery,tsquery)', oprcode => 'tsquery_eq', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '3677', descr => 'not equal', + oprname => '<>', oprleft => 'tsquery', oprright => 'tsquery', + oprresult => 'bool', oprcom => '<>(tsquery,tsquery)', + oprnegate => '=(tsquery,tsquery)', oprcode => 'tsquery_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '3678', descr => 'greater than or equal', + oprname => '>=', oprleft => 'tsquery', oprright => 'tsquery', + oprresult => 'bool', oprcom => '<=(tsquery,tsquery)', + oprnegate => '<(tsquery,tsquery)', oprcode => 'tsquery_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, +{ oid => '3679', descr => 'greater than', + oprname => '>', oprleft => 'tsquery', oprright => 'tsquery', + oprresult => 'bool', oprcom => '<(tsquery,tsquery)', + oprnegate => '<=(tsquery,tsquery)', oprcode => 'tsquery_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '3680', descr => 'AND-concatenate', + oprname => '&&', oprleft => 'tsquery', oprright => 'tsquery', + oprresult => 'tsquery', oprcode => 'tsquery_and' }, +{ oid => '3681', descr => 'OR-concatenate', + oprname => '||', oprleft => 'tsquery', oprright => 'tsquery', + oprresult => 'tsquery', oprcode => 'tsquery_or' }, +{ oid => '5005', descr => 'phrase-concatenate', + oprname => '<->', oprleft => 'tsquery', oprright => 'tsquery', + oprresult => 'tsquery', oprcode => 'tsquery_phrase(tsquery,tsquery)' }, +{ oid => '3682', descr => 'NOT tsquery', + oprname => '!!', oprkind => 'l', oprleft => '0', oprright => 'tsquery', + oprresult => 'tsquery', oprcode => 'tsquery_not' }, +{ oid => '3693', descr => 'contains', + oprname => '@>', oprleft => 'tsquery', oprright => 'tsquery', + oprresult => 'bool', oprcom => '<@(tsquery,tsquery)', + oprcode => 'tsq_mcontains', oprrest => 'contsel', oprjoin => 'contjoinsel' }, +{ oid => '3694', descr => 'is contained by', + oprname => '<@', oprleft => 'tsquery', oprright => 'tsquery', + oprresult => 'bool', oprcom => '@>(tsquery,tsquery)', + oprcode => 'tsq_mcontained', oprrest => 'contsel', oprjoin => 'contjoinsel' }, +{ oid => '3762', descr => 'text search match', + oprname => '@@', oprleft => 'text', oprright => 'text', oprresult => 'bool', + oprcode => 'ts_match_tt', oprrest => 'contsel', oprjoin => 'contjoinsel' }, +{ oid => '3763', descr => 'text search match', + oprname => '@@', oprleft => 'text', oprright => 'tsquery', + oprresult => 'bool', oprcode => 'ts_match_tq', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, + +# generic record comparison operators +{ oid => '2988', oid_symbol => 'RECORD_EQ_OP', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprleft => 'record', oprright => 'record', + oprresult => 'bool', oprcom => '=(record,record)', + oprnegate => '<>(record,record)', oprcode => 'record_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '2989', descr => 'not equal', + oprname => '<>', oprleft => 'record', oprright => 'record', + oprresult => 'bool', oprcom => '<>(record,record)', + oprnegate => '=(record,record)', oprcode => 'record_ne', oprrest => 'neqsel', + oprjoin => 'neqjoinsel' }, +{ oid => '2990', oid_symbol => 'RECORD_LT_OP', descr => 'less than', + oprname => '<', oprleft => 'record', oprright => 'record', + oprresult => 'bool', oprcom => '>(record,record)', + oprnegate => '>=(record,record)', oprcode => 'record_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '2991', oid_symbol => 'RECORD_GT_OP', descr => 'greater than', + oprname => '>', oprleft => 'record', oprright => 'record', + oprresult => 'bool', oprcom => '<(record,record)', + oprnegate => '<=(record,record)', oprcode => 'record_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '2992', descr => 'less than or equal', + oprname => '<=', oprleft => 'record', oprright => 'record', + oprresult => 'bool', oprcom => '>=(record,record)', + oprnegate => '>(record,record)', oprcode => 'record_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '2993', descr => 'greater than or equal', + oprname => '>=', oprleft => 'record', oprright => 'record', + oprresult => 'bool', oprcom => '<=(record,record)', + oprnegate => '<(record,record)', oprcode => 'record_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +# byte-oriented tests for identical rows and fast sorting +{ oid => '3188', descr => 'identical', + oprname => '*=', oprcanmerge => 't', oprleft => 'record', + oprright => 'record', oprresult => 'bool', oprcom => '*=(record,record)', + oprnegate => '*<>(record,record)', oprcode => 'record_image_eq', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '3189', descr => 'not identical', + oprname => '*<>', oprleft => 'record', oprright => 'record', + oprresult => 'bool', oprcom => '*<>(record,record)', + oprnegate => '*=(record,record)', oprcode => 'record_image_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '3190', descr => 'less than', + oprname => '*<', oprleft => 'record', oprright => 'record', + oprresult => 'bool', oprcom => '*>(record,record)', + oprnegate => '*>=(record,record)', oprcode => 'record_image_lt', + oprrest => 'scalarltsel', oprjoin => 'scalarltjoinsel' }, +{ oid => '3191', descr => 'greater than', + oprname => '*>', oprleft => 'record', oprright => 'record', + oprresult => 'bool', oprcom => '*<(record,record)', + oprnegate => '*<=(record,record)', oprcode => 'record_image_gt', + oprrest => 'scalargtsel', oprjoin => 'scalargtjoinsel' }, +{ oid => '3192', descr => 'less than or equal', + oprname => '*<=', oprleft => 'record', oprright => 'record', + oprresult => 'bool', oprcom => '*>=(record,record)', + oprnegate => '*>(record,record)', oprcode => 'record_image_le', + oprrest => 'scalarlesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '3193', descr => 'greater than or equal', + oprname => '*>=', oprleft => 'record', oprright => 'record', + oprresult => 'bool', oprcom => '*<=(record,record)', + oprnegate => '*<(record,record)', oprcode => 'record_image_ge', + oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, + +# generic range type operators +{ oid => '3882', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'anyrange', + oprright => 'anyrange', oprresult => 'bool', oprcom => '=(anyrange,anyrange)', + oprnegate => '<>(anyrange,anyrange)', oprcode => 'range_eq', + oprrest => 'eqsel', oprjoin => 'eqjoinsel' }, +{ oid => '3883', descr => 'not equal', + oprname => '<>', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'bool', oprcom => '<>(anyrange,anyrange)', + oprnegate => '=(anyrange,anyrange)', oprcode => 'range_ne', + oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '3884', oid_symbol => 'OID_RANGE_LESS_OP', descr => 'less than', + oprname => '<', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'bool', oprcom => '>(anyrange,anyrange)', + oprnegate => '>=(anyrange,anyrange)', oprcode => 'range_lt', + oprrest => 'rangesel', oprjoin => 'scalarltjoinsel' }, +{ oid => '3885', oid_symbol => 'OID_RANGE_LESS_EQUAL_OP', + descr => 'less than or equal', + oprname => '<=', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'bool', oprcom => '>=(anyrange,anyrange)', + oprnegate => '>(anyrange,anyrange)', oprcode => 'range_le', + oprrest => 'rangesel', oprjoin => 'scalarlejoinsel' }, +{ oid => '3886', oid_symbol => 'OID_RANGE_GREATER_EQUAL_OP', + descr => 'greater than or equal', + oprname => '>=', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'bool', oprcom => '<=(anyrange,anyrange)', + oprnegate => '<(anyrange,anyrange)', oprcode => 'range_ge', + oprrest => 'rangesel', oprjoin => 'scalargejoinsel' }, +{ oid => '3887', oid_symbol => 'OID_RANGE_GREATER_OP', + descr => 'greater than', + oprname => '>', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'bool', oprcom => '<(anyrange,anyrange)', + oprnegate => '<=(anyrange,anyrange)', oprcode => 'range_gt', + oprrest => 'rangesel', oprjoin => 'scalargtjoinsel' }, +{ oid => '3888', oid_symbol => 'OID_RANGE_OVERLAP_OP', descr => 'overlaps', + oprname => '&&', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'bool', oprcom => '&&(anyrange,anyrange)', + oprcode => 'range_overlaps', oprrest => 'rangesel', + oprjoin => 'areajoinsel' }, +{ oid => '3889', oid_symbol => 'OID_RANGE_CONTAINS_ELEM_OP', + descr => 'contains', + oprname => '@>', oprleft => 'anyrange', oprright => 'anyelement', + oprresult => 'bool', oprcom => '<@(anyelement,anyrange)', + oprcode => 'range_contains_elem', oprrest => 'rangesel', + oprjoin => 'contjoinsel' }, +{ oid => '3890', oid_symbol => 'OID_RANGE_CONTAINS_OP', descr => 'contains', + oprname => '@>', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'bool', oprcom => '<@(anyrange,anyrange)', + oprcode => 'range_contains', oprrest => 'rangesel', + oprjoin => 'contjoinsel' }, +{ oid => '3891', oid_symbol => 'OID_RANGE_ELEM_CONTAINED_OP', + descr => 'is contained by', + oprname => '<@', oprleft => 'anyelement', oprright => 'anyrange', + oprresult => 'bool', oprcom => '@>(anyrange,anyelement)', + oprcode => 'elem_contained_by_range', oprrest => 'rangesel', + oprjoin => 'contjoinsel' }, +{ oid => '3892', oid_symbol => 'OID_RANGE_CONTAINED_OP', + descr => 'is contained by', + oprname => '<@', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'bool', oprcom => '@>(anyrange,anyrange)', + oprcode => 'range_contained_by', oprrest => 'rangesel', + oprjoin => 'contjoinsel' }, +{ oid => '3893', oid_symbol => 'OID_RANGE_LEFT_OP', descr => 'is left of', + oprname => '<<', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'bool', oprcom => '>>(anyrange,anyrange)', + oprcode => 'range_before', oprrest => 'rangesel', + oprjoin => 'scalarltjoinsel' }, +{ oid => '3894', oid_symbol => 'OID_RANGE_RIGHT_OP', descr => 'is right of', + oprname => '>>', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'bool', oprcom => '<<(anyrange,anyrange)', + oprcode => 'range_after', oprrest => 'rangesel', + oprjoin => 'scalargtjoinsel' }, +{ oid => '3895', oid_symbol => 'OID_RANGE_OVERLAPS_LEFT_OP', + descr => 'overlaps or is left of', + oprname => '&<', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'bool', oprcode => 'range_overleft', oprrest => 'rangesel', + oprjoin => 'scalarltjoinsel' }, +{ oid => '3896', oid_symbol => 'OID_RANGE_OVERLAPS_RIGHT_OP', + descr => 'overlaps or is right of', + oprname => '&>', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'bool', oprcode => 'range_overright', oprrest => 'rangesel', + oprjoin => 'scalargtjoinsel' }, +{ oid => '3897', descr => 'is adjacent to', + oprname => '-|-', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'bool', oprcom => '-|-(anyrange,anyrange)', + oprcode => 'range_adjacent', oprrest => 'contsel', oprjoin => 'contjoinsel' }, +{ oid => '3898', descr => 'range union', + oprname => '+', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'anyrange', oprcom => '+(anyrange,anyrange)', + oprcode => 'range_union' }, +{ oid => '3899', descr => 'range difference', + oprname => '-', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'anyrange', oprcode => 'range_minus' }, +{ oid => '3900', descr => 'range intersection', + oprname => '*', oprleft => 'anyrange', oprright => 'anyrange', + oprresult => 'anyrange', oprcom => '*(anyrange,anyrange)', + oprcode => 'range_intersect' }, +{ oid => '3962', descr => 'get json object field', + oprname => '->', oprleft => 'json', oprright => 'text', oprresult => 'json', + oprcode => 'json_object_field' }, +{ oid => '3963', descr => 'get json object field as text', + oprname => '->>', oprleft => 'json', oprright => 'text', oprresult => 'text', + oprcode => 'json_object_field_text' }, +{ oid => '3964', descr => 'get json array element', + oprname => '->', oprleft => 'json', oprright => 'int4', oprresult => 'json', + oprcode => 'json_array_element' }, +{ oid => '3965', descr => 'get json array element as text', + oprname => '->>', oprleft => 'json', oprright => 'int4', oprresult => 'text', + oprcode => 'json_array_element_text' }, +{ oid => '3966', descr => 'get value from json with path elements', + oprname => '#>', oprleft => 'json', oprright => '_text', oprresult => 'json', + oprcode => 'json_extract_path' }, +{ oid => '3967', descr => 'get value from json as text with path elements', + oprname => '#>>', oprleft => 'json', oprright => '_text', oprresult => 'text', + oprcode => 'json_extract_path_text' }, +{ oid => '3211', descr => 'get jsonb object field', + oprname => '->', oprleft => 'jsonb', oprright => 'text', oprresult => 'jsonb', + oprcode => 'jsonb_object_field' }, +{ oid => '3477', descr => 'get jsonb object field as text', + oprname => '->>', oprleft => 'jsonb', oprright => 'text', oprresult => 'text', + oprcode => 'jsonb_object_field_text' }, +{ oid => '3212', descr => 'get jsonb array element', + oprname => '->', oprleft => 'jsonb', oprright => 'int4', oprresult => 'jsonb', + oprcode => 'jsonb_array_element' }, +{ oid => '3481', descr => 'get jsonb array element as text', + oprname => '->>', oprleft => 'jsonb', oprright => 'int4', oprresult => 'text', + oprcode => 'jsonb_array_element_text' }, +{ oid => '3213', descr => 'get value from jsonb with path elements', + oprname => '#>', oprleft => 'jsonb', oprright => '_text', + oprresult => 'jsonb', oprcode => 'jsonb_extract_path' }, +{ oid => '3206', descr => 'get value from jsonb as text with path elements', + oprname => '#>>', oprleft => 'jsonb', oprright => '_text', + oprresult => 'text', oprcode => 'jsonb_extract_path_text' }, +{ oid => '3240', descr => 'equal', + oprname => '=', oprcanmerge => 't', oprcanhash => 't', oprleft => 'jsonb', + oprright => 'jsonb', oprresult => 'bool', oprcom => '=(jsonb,jsonb)', + oprnegate => '<>(jsonb,jsonb)', oprcode => 'jsonb_eq', oprrest => 'eqsel', + oprjoin => 'eqjoinsel' }, +{ oid => '3241', descr => 'not equal', + oprname => '<>', oprleft => 'jsonb', oprright => 'jsonb', oprresult => 'bool', + oprcom => '<>(jsonb,jsonb)', oprnegate => '=(jsonb,jsonb)', + oprcode => 'jsonb_ne', oprrest => 'neqsel', oprjoin => 'neqjoinsel' }, +{ oid => '3242', descr => 'less than', + oprname => '<', oprleft => 'jsonb', oprright => 'jsonb', oprresult => 'bool', + oprcom => '>(jsonb,jsonb)', oprnegate => '>=(jsonb,jsonb)', + oprcode => 'jsonb_lt', oprrest => 'scalarltsel', + oprjoin => 'scalarltjoinsel' }, +{ oid => '3243', descr => 'greater than', + oprname => '>', oprleft => 'jsonb', oprright => 'jsonb', oprresult => 'bool', + oprcom => '<(jsonb,jsonb)', oprnegate => '<=(jsonb,jsonb)', + oprcode => 'jsonb_gt', oprrest => 'scalargtsel', + oprjoin => 'scalargtjoinsel' }, +{ oid => '3244', descr => 'less than or equal', + oprname => '<=', oprleft => 'jsonb', oprright => 'jsonb', oprresult => 'bool', + oprcom => '>=(jsonb,jsonb)', oprnegate => '>(jsonb,jsonb)', + oprcode => 'jsonb_le', oprrest => 'scalarlesel', + oprjoin => 'scalarlejoinsel' }, +{ oid => '3245', descr => 'greater than or equal', + oprname => '>=', oprleft => 'jsonb', oprright => 'jsonb', oprresult => 'bool', + oprcom => '<=(jsonb,jsonb)', oprnegate => '<(jsonb,jsonb)', + oprcode => 'jsonb_ge', oprrest => 'scalargesel', + oprjoin => 'scalargejoinsel' }, +{ oid => '3246', descr => 'contains', + oprname => '@>', oprleft => 'jsonb', oprright => 'jsonb', oprresult => 'bool', + oprcom => '<@(jsonb,jsonb)', oprcode => 'jsonb_contains', + oprrest => 'contsel', oprjoin => 'contjoinsel' }, +{ oid => '3247', descr => 'key exists', + oprname => '?', oprleft => 'jsonb', oprright => 'text', oprresult => 'bool', + oprcode => 'jsonb_exists', oprrest => 'contsel', oprjoin => 'contjoinsel' }, +{ oid => '3248', descr => 'any key exists', + oprname => '?|', oprleft => 'jsonb', oprright => '_text', oprresult => 'bool', + oprcode => 'jsonb_exists_any', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, +{ oid => '3249', descr => 'all keys exist', + oprname => '?&', oprleft => 'jsonb', oprright => '_text', oprresult => 'bool', + oprcode => 'jsonb_exists_all', oprrest => 'contsel', + oprjoin => 'contjoinsel' }, +{ oid => '3250', descr => 'is contained by', + oprname => '<@', oprleft => 'jsonb', oprright => 'jsonb', oprresult => 'bool', + oprcom => '@>(jsonb,jsonb)', oprcode => 'jsonb_contained', + oprrest => 'contsel', oprjoin => 'contjoinsel' }, +{ oid => '3284', descr => 'concatenate', + oprname => '||', oprleft => 'jsonb', oprright => 'jsonb', + oprresult => 'jsonb', oprcode => 'jsonb_concat' }, +{ oid => '3285', descr => 'delete object field', + oprname => '-', oprleft => 'jsonb', oprright => 'text', oprresult => 'jsonb', + oprcode => 'jsonb_delete(jsonb,text)' }, +{ oid => '3398', descr => 'delete object fields', + oprname => '-', oprleft => 'jsonb', oprright => '_text', oprresult => 'jsonb', + oprcode => 'jsonb_delete(jsonb,_text)' }, +{ oid => '3286', descr => 'delete array element', + oprname => '-', oprleft => 'jsonb', oprright => 'int4', oprresult => 'jsonb', + oprcode => 'jsonb_delete(jsonb,int4)' }, +{ oid => '3287', descr => 'delete path', + oprname => '#-', oprleft => 'jsonb', oprright => '_text', + oprresult => 'jsonb', oprcode => 'jsonb_delete_path' }, + +] diff --git a/src/include/catalog/pg_operator.h b/src/include/catalog/pg_operator.h index ffabc2003b..3212b21418 100644 --- a/src/include/catalog/pg_operator.h +++ b/src/include/catalog/pg_operator.h @@ -1,21 +1,17 @@ /*------------------------------------------------------------------------- * * pg_operator.h - * definition of the system "operator" relation (pg_operator) - * along with the relation's initial contents. + * definition of the "operator" system catalog (pg_operator) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_operator.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -23,1835 +19,83 @@ #define PG_OPERATOR_H #include "catalog/genbki.h" +#include "catalog/pg_operator_d.h" + +#include "catalog/objectaddress.h" +#include "nodes/pg_list.h" /* ---------------- * pg_operator definition. cpp turns this into * typedef struct FormData_pg_operator * ---------------- */ -#define OperatorRelationId 2617 - -CATALOG(pg_operator,2617) +CATALOG(pg_operator,2617,OperatorRelationId) { - NameData oprname; /* name of operator */ - Oid oprnamespace; /* OID of namespace containing this oper */ - Oid oprowner; /* operator owner */ - char oprkind; /* 'l', 'r', or 'b' */ - bool oprcanmerge; /* can be used in merge join? */ - bool oprcanhash; /* can be used in hash join? */ - Oid oprleft; /* left arg type, or 0 if 'l' oprkind */ - Oid oprright; /* right arg type, or 0 if 'r' oprkind */ - Oid oprresult; /* result datatype */ - Oid oprcom; /* OID of commutator oper, or 0 if none */ - Oid oprnegate; /* OID of negator oper, or 0 if none */ - regproc oprcode; /* OID of underlying function */ - regproc oprrest; /* OID of restriction estimator, or 0 */ - regproc oprjoin; /* OID of join estimator, or 0 */ -} FormData_pg_operator; - -/* ---------------- - * Form_pg_operator corresponds to a pointer to a tuple with - * the format of pg_operator relation. - * ---------------- - */ -typedef FormData_pg_operator *Form_pg_operator; - -/* ---------------- - * compiler constants for pg_operator - * ---------------- - */ - -#define Natts_pg_operator 14 -#define Anum_pg_operator_oprname 1 -#define Anum_pg_operator_oprnamespace 2 -#define Anum_pg_operator_oprowner 3 -#define Anum_pg_operator_oprkind 4 -#define Anum_pg_operator_oprcanmerge 5 -#define Anum_pg_operator_oprcanhash 6 -#define Anum_pg_operator_oprleft 7 -#define Anum_pg_operator_oprright 8 -#define Anum_pg_operator_oprresult 9 -#define Anum_pg_operator_oprcom 10 -#define Anum_pg_operator_oprnegate 11 -#define Anum_pg_operator_oprcode 12 -#define Anum_pg_operator_oprrest 13 -#define Anum_pg_operator_oprjoin 14 - -/* ---------------- - * initial contents of pg_operator - * ---------------- - */ - -/* - * Note: every entry in pg_operator.h is expected to have a DESCR() comment. - * If the operator is a deprecated equivalent of some other entry, be sure - * to comment it as such so that initdb doesn't think it's a preferred name - * for the underlying function. - */ - -DATA(insert OID = 15 ( "=" PGNSP PGUID b t t 23 20 16 416 36 int48eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 36 ( "<>" PGNSP PGUID b f f 23 20 16 417 15 int48ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 37 ( "<" PGNSP PGUID b f f 23 20 16 419 82 int48lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 76 ( ">" PGNSP PGUID b f f 23 20 16 418 80 int48gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 80 ( "<=" PGNSP PGUID b f f 23 20 16 430 76 int48le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 82 ( ">=" PGNSP PGUID b f f 23 20 16 420 37 int48ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 58 ( "<" PGNSP PGUID b f f 16 16 16 59 1695 boollt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 59 ( ">" PGNSP PGUID b f f 16 16 16 58 1694 boolgt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 85 ( "<>" PGNSP PGUID b f f 16 16 16 85 91 boolne neqsel neqjoinsel )); -DESCR("not equal"); -#define BooleanNotEqualOperator 85 -DATA(insert OID = 91 ( "=" PGNSP PGUID b t t 16 16 16 91 85 booleq eqsel eqjoinsel )); -DESCR("equal"); -#define BooleanEqualOperator 91 -DATA(insert OID = 1694 ( "<=" PGNSP PGUID b f f 16 16 16 1695 59 boolle scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1695 ( ">=" PGNSP PGUID b f f 16 16 16 1694 58 boolge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 92 ( "=" PGNSP PGUID b t t 18 18 16 92 630 chareq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 93 ( "=" PGNSP PGUID b t t 19 19 16 93 643 nameeq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 94 ( "=" PGNSP PGUID b t t 21 21 16 94 519 int2eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 95 ( "<" PGNSP PGUID b f f 21 21 16 520 524 int2lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 96 ( "=" PGNSP PGUID b t t 23 23 16 96 518 int4eq eqsel eqjoinsel )); -DESCR("equal"); -#define Int4EqualOperator 96 -DATA(insert OID = 97 ( "<" PGNSP PGUID b f f 23 23 16 521 525 int4lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -#define Int4LessOperator 97 -DATA(insert OID = 98 ( "=" PGNSP PGUID b t t 25 25 16 98 531 texteq eqsel eqjoinsel )); -DESCR("equal"); -#define TextEqualOperator 98 - -DATA(insert OID = 349 ( "||" PGNSP PGUID b f f 2277 2283 2277 0 0 array_append - - )); -DESCR("append element onto end of array"); -DATA(insert OID = 374 ( "||" PGNSP PGUID b f f 2283 2277 2277 0 0 array_prepend - - )); -DESCR("prepend element onto front of array"); -DATA(insert OID = 375 ( "||" PGNSP PGUID b f f 2277 2277 2277 0 0 array_cat - - )); -DESCR("concatenate"); - -DATA(insert OID = 352 ( "=" PGNSP PGUID b f t 28 28 16 352 3315 xideq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 353 ( "=" PGNSP PGUID b f f 28 23 16 0 3316 xideqint4 eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 3315 ( "<>" PGNSP PGUID b f f 28 28 16 3315 352 xidneq neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 3316 ( "<>" PGNSP PGUID b f f 28 23 16 0 353 xidneqint4 neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 388 ( "!" PGNSP PGUID r f f 20 0 1700 0 0 numeric_fac - - )); -DESCR("factorial"); -DATA(insert OID = 389 ( "!!" PGNSP PGUID l f f 0 20 1700 0 0 numeric_fac - - )); -DESCR("deprecated, use ! instead"); -DATA(insert OID = 385 ( "=" PGNSP PGUID b f t 29 29 16 385 0 cideq eqsel eqjoinsel )); -DESCR("equal"); - -DATA(insert OID = 387 ( "=" PGNSP PGUID b t f 27 27 16 387 402 tideq eqsel eqjoinsel )); -DESCR("equal"); -#define TIDEqualOperator 387 -DATA(insert OID = 402 ( "<>" PGNSP PGUID b f f 27 27 16 402 387 tidne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 2799 ( "<" PGNSP PGUID b f f 27 27 16 2800 2802 tidlt scalarltsel scalarltjoinsel )); -DESCR("less than"); -#define TIDLessOperator 2799 -DATA(insert OID = 2800 ( ">" PGNSP PGUID b f f 27 27 16 2799 2801 tidgt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 2801 ( "<=" PGNSP PGUID b f f 27 27 16 2802 2800 tidle scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 2802 ( ">=" PGNSP PGUID b f f 27 27 16 2801 2799 tidge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 410 ( "=" PGNSP PGUID b t t 20 20 16 410 411 int8eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 411 ( "<>" PGNSP PGUID b f f 20 20 16 411 410 int8ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 412 ( "<" PGNSP PGUID b f f 20 20 16 413 415 int8lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -#define Int8LessOperator 412 -DATA(insert OID = 413 ( ">" PGNSP PGUID b f f 20 20 16 412 414 int8gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 414 ( "<=" PGNSP PGUID b f f 20 20 16 415 413 int8le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 415 ( ">=" PGNSP PGUID b f f 20 20 16 414 412 int8ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 416 ( "=" PGNSP PGUID b t t 20 23 16 15 417 int84eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 417 ( "<>" PGNSP PGUID b f f 20 23 16 36 416 int84ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 418 ( "<" PGNSP PGUID b f f 20 23 16 76 430 int84lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 419 ( ">" PGNSP PGUID b f f 20 23 16 37 420 int84gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 420 ( "<=" PGNSP PGUID b f f 20 23 16 82 419 int84le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 430 ( ">=" PGNSP PGUID b f f 20 23 16 80 418 int84ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 439 ( "%" PGNSP PGUID b f f 20 20 20 0 0 int8mod - - )); -DESCR("modulus"); -DATA(insert OID = 473 ( "@" PGNSP PGUID l f f 0 20 20 0 0 int8abs - - )); -DESCR("absolute value"); - -DATA(insert OID = 484 ( "-" PGNSP PGUID l f f 0 20 20 0 0 int8um - - )); -DESCR("negate"); -DATA(insert OID = 485 ( "<<" PGNSP PGUID b f f 604 604 16 0 0 poly_left positionsel positionjoinsel )); -DESCR("is left of"); -DATA(insert OID = 486 ( "&<" PGNSP PGUID b f f 604 604 16 0 0 poly_overleft positionsel positionjoinsel )); -DESCR("overlaps or is left of"); -DATA(insert OID = 487 ( "&>" PGNSP PGUID b f f 604 604 16 0 0 poly_overright positionsel positionjoinsel )); -DESCR("overlaps or is right of"); -DATA(insert OID = 488 ( ">>" PGNSP PGUID b f f 604 604 16 0 0 poly_right positionsel positionjoinsel )); -DESCR("is right of"); -DATA(insert OID = 489 ( "<@" PGNSP PGUID b f f 604 604 16 490 0 poly_contained contsel contjoinsel )); -DESCR("is contained by"); -DATA(insert OID = 490 ( "@>" PGNSP PGUID b f f 604 604 16 489 0 poly_contain contsel contjoinsel )); -DESCR("contains"); -DATA(insert OID = 491 ( "~=" PGNSP PGUID b f f 604 604 16 491 0 poly_same eqsel eqjoinsel )); -DESCR("same as"); -DATA(insert OID = 492 ( "&&" PGNSP PGUID b f f 604 604 16 492 0 poly_overlap areasel areajoinsel )); -DESCR("overlaps"); -DATA(insert OID = 493 ( "<<" PGNSP PGUID b f f 603 603 16 0 0 box_left positionsel positionjoinsel )); -DESCR("is left of"); -DATA(insert OID = 494 ( "&<" PGNSP PGUID b f f 603 603 16 0 0 box_overleft positionsel positionjoinsel )); -DESCR("overlaps or is left of"); -DATA(insert OID = 495 ( "&>" PGNSP PGUID b f f 603 603 16 0 0 box_overright positionsel positionjoinsel )); -DESCR("overlaps or is right of"); -DATA(insert OID = 496 ( ">>" PGNSP PGUID b f f 603 603 16 0 0 box_right positionsel positionjoinsel )); -DESCR("is right of"); -DATA(insert OID = 497 ( "<@" PGNSP PGUID b f f 603 603 16 498 0 box_contained contsel contjoinsel )); -DESCR("is contained by"); -DATA(insert OID = 498 ( "@>" PGNSP PGUID b f f 603 603 16 497 0 box_contain contsel contjoinsel )); -DESCR("contains"); -DATA(insert OID = 499 ( "~=" PGNSP PGUID b f f 603 603 16 499 0 box_same eqsel eqjoinsel )); -DESCR("same as"); -DATA(insert OID = 500 ( "&&" PGNSP PGUID b f f 603 603 16 500 0 box_overlap areasel areajoinsel )); -DESCR("overlaps"); -DATA(insert OID = 501 ( ">=" PGNSP PGUID b f f 603 603 16 505 504 box_ge areasel areajoinsel )); -DESCR("greater than or equal by area"); -DATA(insert OID = 502 ( ">" PGNSP PGUID b f f 603 603 16 504 505 box_gt areasel areajoinsel )); -DESCR("greater than by area"); -DATA(insert OID = 503 ( "=" PGNSP PGUID b f f 603 603 16 503 0 box_eq eqsel eqjoinsel )); -DESCR("equal by area"); -DATA(insert OID = 504 ( "<" PGNSP PGUID b f f 603 603 16 502 501 box_lt areasel areajoinsel )); -DESCR("less than by area"); -DATA(insert OID = 505 ( "<=" PGNSP PGUID b f f 603 603 16 501 502 box_le areasel areajoinsel )); -DESCR("less than or equal by area"); -DATA(insert OID = 506 ( ">^" PGNSP PGUID b f f 600 600 16 0 0 point_above positionsel positionjoinsel )); -DESCR("is above"); -DATA(insert OID = 507 ( "<<" PGNSP PGUID b f f 600 600 16 0 0 point_left positionsel positionjoinsel )); -DESCR("is left of"); -DATA(insert OID = 508 ( ">>" PGNSP PGUID b f f 600 600 16 0 0 point_right positionsel positionjoinsel )); -DESCR("is right of"); -DATA(insert OID = 509 ( "<^" PGNSP PGUID b f f 600 600 16 0 0 point_below positionsel positionjoinsel )); -DESCR("is below"); -DATA(insert OID = 510 ( "~=" PGNSP PGUID b f f 600 600 16 510 713 point_eq eqsel eqjoinsel )); -DESCR("same as"); -DATA(insert OID = 511 ( "<@" PGNSP PGUID b f f 600 603 16 433 0 on_pb contsel contjoinsel )); -DESCR("point inside box"); -DATA(insert OID = 433 ( "@>" PGNSP PGUID b f f 603 600 16 511 0 box_contain_pt contsel contjoinsel )); -DESCR("contains"); -DATA(insert OID = 512 ( "<@" PGNSP PGUID b f f 600 602 16 755 0 on_ppath - - )); -DESCR("point within closed path, or point on open path"); -DATA(insert OID = 513 ( "@@" PGNSP PGUID l f f 0 603 600 0 0 box_center - - )); -DESCR("center of"); -DATA(insert OID = 514 ( "*" PGNSP PGUID b f f 23 23 23 514 0 int4mul - - )); -DESCR("multiply"); -DATA(insert OID = 517 ( "<->" PGNSP PGUID b f f 600 600 701 517 0 point_distance - - )); -DESCR("distance between"); -DATA(insert OID = 518 ( "<>" PGNSP PGUID b f f 23 23 16 518 96 int4ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 519 ( "<>" PGNSP PGUID b f f 21 21 16 519 94 int2ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 520 ( ">" PGNSP PGUID b f f 21 21 16 95 522 int2gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 521 ( ">" PGNSP PGUID b f f 23 23 16 97 523 int4gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 522 ( "<=" PGNSP PGUID b f f 21 21 16 524 520 int2le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 523 ( "<=" PGNSP PGUID b f f 23 23 16 525 521 int4le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 524 ( ">=" PGNSP PGUID b f f 21 21 16 522 95 int2ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 525 ( ">=" PGNSP PGUID b f f 23 23 16 523 97 int4ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 526 ( "*" PGNSP PGUID b f f 21 21 21 526 0 int2mul - - )); -DESCR("multiply"); -DATA(insert OID = 527 ( "/" PGNSP PGUID b f f 21 21 21 0 0 int2div - - )); -DESCR("divide"); -DATA(insert OID = 528 ( "/" PGNSP PGUID b f f 23 23 23 0 0 int4div - - )); -DESCR("divide"); -DATA(insert OID = 529 ( "%" PGNSP PGUID b f f 21 21 21 0 0 int2mod - - )); -DESCR("modulus"); -DATA(insert OID = 530 ( "%" PGNSP PGUID b f f 23 23 23 0 0 int4mod - - )); -DESCR("modulus"); -DATA(insert OID = 531 ( "<>" PGNSP PGUID b f f 25 25 16 531 98 textne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 532 ( "=" PGNSP PGUID b t t 21 23 16 533 538 int24eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 533 ( "=" PGNSP PGUID b t t 23 21 16 532 539 int42eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 534 ( "<" PGNSP PGUID b f f 21 23 16 537 542 int24lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 535 ( "<" PGNSP PGUID b f f 23 21 16 536 543 int42lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 536 ( ">" PGNSP PGUID b f f 21 23 16 535 540 int24gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 537 ( ">" PGNSP PGUID b f f 23 21 16 534 541 int42gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 538 ( "<>" PGNSP PGUID b f f 21 23 16 539 532 int24ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 539 ( "<>" PGNSP PGUID b f f 23 21 16 538 533 int42ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 540 ( "<=" PGNSP PGUID b f f 21 23 16 543 536 int24le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 541 ( "<=" PGNSP PGUID b f f 23 21 16 542 537 int42le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 542 ( ">=" PGNSP PGUID b f f 21 23 16 541 534 int24ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 543 ( ">=" PGNSP PGUID b f f 23 21 16 540 535 int42ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 544 ( "*" PGNSP PGUID b f f 21 23 23 545 0 int24mul - - )); -DESCR("multiply"); -DATA(insert OID = 545 ( "*" PGNSP PGUID b f f 23 21 23 544 0 int42mul - - )); -DESCR("multiply"); -DATA(insert OID = 546 ( "/" PGNSP PGUID b f f 21 23 23 0 0 int24div - - )); -DESCR("divide"); -DATA(insert OID = 547 ( "/" PGNSP PGUID b f f 23 21 23 0 0 int42div - - )); -DESCR("divide"); -DATA(insert OID = 550 ( "+" PGNSP PGUID b f f 21 21 21 550 0 int2pl - - )); -DESCR("add"); -DATA(insert OID = 551 ( "+" PGNSP PGUID b f f 23 23 23 551 0 int4pl - - )); -DESCR("add"); -DATA(insert OID = 552 ( "+" PGNSP PGUID b f f 21 23 23 553 0 int24pl - - )); -DESCR("add"); -DATA(insert OID = 553 ( "+" PGNSP PGUID b f f 23 21 23 552 0 int42pl - - )); -DESCR("add"); -DATA(insert OID = 554 ( "-" PGNSP PGUID b f f 21 21 21 0 0 int2mi - - )); -DESCR("subtract"); -DATA(insert OID = 555 ( "-" PGNSP PGUID b f f 23 23 23 0 0 int4mi - - )); -DESCR("subtract"); -DATA(insert OID = 556 ( "-" PGNSP PGUID b f f 21 23 23 0 0 int24mi - - )); -DESCR("subtract"); -DATA(insert OID = 557 ( "-" PGNSP PGUID b f f 23 21 23 0 0 int42mi - - )); -DESCR("subtract"); -DATA(insert OID = 558 ( "-" PGNSP PGUID l f f 0 23 23 0 0 int4um - - )); -DESCR("negate"); -DATA(insert OID = 559 ( "-" PGNSP PGUID l f f 0 21 21 0 0 int2um - - )); -DESCR("negate"); -DATA(insert OID = 560 ( "=" PGNSP PGUID b t t 702 702 16 560 561 abstimeeq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 561 ( "<>" PGNSP PGUID b f f 702 702 16 561 560 abstimene neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 562 ( "<" PGNSP PGUID b f f 702 702 16 563 565 abstimelt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 563 ( ">" PGNSP PGUID b f f 702 702 16 562 564 abstimegt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 564 ( "<=" PGNSP PGUID b f f 702 702 16 565 563 abstimele scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 565 ( ">=" PGNSP PGUID b f f 702 702 16 564 562 abstimege scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 566 ( "=" PGNSP PGUID b t t 703 703 16 566 567 reltimeeq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 567 ( "<>" PGNSP PGUID b f f 703 703 16 567 566 reltimene neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 568 ( "<" PGNSP PGUID b f f 703 703 16 569 571 reltimelt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 569 ( ">" PGNSP PGUID b f f 703 703 16 568 570 reltimegt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 570 ( "<=" PGNSP PGUID b f f 703 703 16 571 569 reltimele scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 571 ( ">=" PGNSP PGUID b f f 703 703 16 570 568 reltimege scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 572 ( "~=" PGNSP PGUID b f f 704 704 16 572 0 tintervalsame eqsel eqjoinsel )); -DESCR("same as"); -DATA(insert OID = 573 ( "<<" PGNSP PGUID b f f 704 704 16 0 0 tintervalct - - )); -DESCR("contains"); -DATA(insert OID = 574 ( "&&" PGNSP PGUID b f f 704 704 16 574 0 tintervalov - - )); -DESCR("overlaps"); -DATA(insert OID = 575 ( "#=" PGNSP PGUID b f f 704 703 16 0 576 tintervalleneq - - )); -DESCR("equal by length"); -DATA(insert OID = 576 ( "#<>" PGNSP PGUID b f f 704 703 16 0 575 tintervallenne - - )); -DESCR("not equal by length"); -DATA(insert OID = 577 ( "#<" PGNSP PGUID b f f 704 703 16 0 580 tintervallenlt - - )); -DESCR("less than by length"); -DATA(insert OID = 578 ( "#>" PGNSP PGUID b f f 704 703 16 0 579 tintervallengt - - )); -DESCR("greater than by length"); -DATA(insert OID = 579 ( "#<=" PGNSP PGUID b f f 704 703 16 0 578 tintervallenle - - )); -DESCR("less than or equal by length"); -DATA(insert OID = 580 ( "#>=" PGNSP PGUID b f f 704 703 16 0 577 tintervallenge - - )); -DESCR("greater than or equal by length"); -DATA(insert OID = 581 ( "+" PGNSP PGUID b f f 702 703 702 0 0 timepl - - )); -DESCR("add"); -DATA(insert OID = 582 ( "-" PGNSP PGUID b f f 702 703 702 0 0 timemi - - )); -DESCR("subtract"); -DATA(insert OID = 583 ( "" PGNSP PGUID b f f 702 704 16 0 0 intinterval - - )); -DESCR("is contained by"); -DATA(insert OID = 584 ( "-" PGNSP PGUID l f f 0 700 700 0 0 float4um - - )); -DESCR("negate"); -DATA(insert OID = 585 ( "-" PGNSP PGUID l f f 0 701 701 0 0 float8um - - )); -DESCR("negate"); -DATA(insert OID = 586 ( "+" PGNSP PGUID b f f 700 700 700 586 0 float4pl - - )); -DESCR("add"); -DATA(insert OID = 587 ( "-" PGNSP PGUID b f f 700 700 700 0 0 float4mi - - )); -DESCR("subtract"); -DATA(insert OID = 588 ( "/" PGNSP PGUID b f f 700 700 700 0 0 float4div - - )); -DESCR("divide"); -DATA(insert OID = 589 ( "*" PGNSP PGUID b f f 700 700 700 589 0 float4mul - - )); -DESCR("multiply"); -DATA(insert OID = 590 ( "@" PGNSP PGUID l f f 0 700 700 0 0 float4abs - - )); -DESCR("absolute value"); -DATA(insert OID = 591 ( "+" PGNSP PGUID b f f 701 701 701 591 0 float8pl - - )); -DESCR("add"); -DATA(insert OID = 592 ( "-" PGNSP PGUID b f f 701 701 701 0 0 float8mi - - )); -DESCR("subtract"); -DATA(insert OID = 593 ( "/" PGNSP PGUID b f f 701 701 701 0 0 float8div - - )); -DESCR("divide"); -DATA(insert OID = 594 ( "*" PGNSP PGUID b f f 701 701 701 594 0 float8mul - - )); -DESCR("multiply"); -DATA(insert OID = 595 ( "@" PGNSP PGUID l f f 0 701 701 0 0 float8abs - - )); -DESCR("absolute value"); -DATA(insert OID = 596 ( "|/" PGNSP PGUID l f f 0 701 701 0 0 dsqrt - - )); -DESCR("square root"); -DATA(insert OID = 597 ( "||/" PGNSP PGUID l f f 0 701 701 0 0 dcbrt - - )); -DESCR("cube root"); -DATA(insert OID = 1284 ( "|" PGNSP PGUID l f f 0 704 702 0 0 tintervalstart - - )); -DESCR("start of interval"); -DATA(insert OID = 606 ( "<#>" PGNSP PGUID b f f 702 702 704 0 0 mktinterval - - )); -DESCR("convert to tinterval"); - -DATA(insert OID = 607 ( "=" PGNSP PGUID b t t 26 26 16 607 608 oideq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 608 ( "<>" PGNSP PGUID b f f 26 26 16 608 607 oidne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 609 ( "<" PGNSP PGUID b f f 26 26 16 610 612 oidlt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 610 ( ">" PGNSP PGUID b f f 26 26 16 609 611 oidgt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 611 ( "<=" PGNSP PGUID b f f 26 26 16 612 610 oidle scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 612 ( ">=" PGNSP PGUID b f f 26 26 16 611 609 oidge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 644 ( "<>" PGNSP PGUID b f f 30 30 16 644 649 oidvectorne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 645 ( "<" PGNSP PGUID b f f 30 30 16 646 648 oidvectorlt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 646 ( ">" PGNSP PGUID b f f 30 30 16 645 647 oidvectorgt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 647 ( "<=" PGNSP PGUID b f f 30 30 16 648 646 oidvectorle scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 648 ( ">=" PGNSP PGUID b f f 30 30 16 647 645 oidvectorge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 649 ( "=" PGNSP PGUID b t t 30 30 16 649 644 oidvectoreq eqsel eqjoinsel )); -DESCR("equal"); - -DATA(insert OID = 613 ( "<->" PGNSP PGUID b f f 600 628 701 0 0 dist_pl - - )); -DESCR("distance between"); -DATA(insert OID = 614 ( "<->" PGNSP PGUID b f f 600 601 701 0 0 dist_ps - - )); -DESCR("distance between"); -DATA(insert OID = 615 ( "<->" PGNSP PGUID b f f 600 603 701 0 0 dist_pb - - )); -DESCR("distance between"); -DATA(insert OID = 616 ( "<->" PGNSP PGUID b f f 601 628 701 0 0 dist_sl - - )); -DESCR("distance between"); -DATA(insert OID = 617 ( "<->" PGNSP PGUID b f f 601 603 701 0 0 dist_sb - - )); -DESCR("distance between"); -DATA(insert OID = 618 ( "<->" PGNSP PGUID b f f 600 602 701 0 0 dist_ppath - - )); -DESCR("distance between"); - -DATA(insert OID = 620 ( "=" PGNSP PGUID b t t 700 700 16 620 621 float4eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 621 ( "<>" PGNSP PGUID b f f 700 700 16 621 620 float4ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 622 ( "<" PGNSP PGUID b f f 700 700 16 623 625 float4lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 623 ( ">" PGNSP PGUID b f f 700 700 16 622 624 float4gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 624 ( "<=" PGNSP PGUID b f f 700 700 16 625 623 float4le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 625 ( ">=" PGNSP PGUID b f f 700 700 16 624 622 float4ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 630 ( "<>" PGNSP PGUID b f f 18 18 16 630 92 charne neqsel neqjoinsel )); -DESCR("not equal"); - -DATA(insert OID = 631 ( "<" PGNSP PGUID b f f 18 18 16 633 634 charlt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 632 ( "<=" PGNSP PGUID b f f 18 18 16 634 633 charle scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 633 ( ">" PGNSP PGUID b f f 18 18 16 631 632 chargt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 634 ( ">=" PGNSP PGUID b f f 18 18 16 632 631 charge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 639 ( "~" PGNSP PGUID b f f 19 25 16 0 640 nameregexeq regexeqsel regexeqjoinsel )); -DESCR("matches regular expression, case-sensitive"); -#define OID_NAME_REGEXEQ_OP 639 -DATA(insert OID = 640 ( "!~" PGNSP PGUID b f f 19 25 16 0 639 nameregexne regexnesel regexnejoinsel )); -DESCR("does not match regular expression, case-sensitive"); -DATA(insert OID = 641 ( "~" PGNSP PGUID b f f 25 25 16 0 642 textregexeq regexeqsel regexeqjoinsel )); -DESCR("matches regular expression, case-sensitive"); -#define OID_TEXT_REGEXEQ_OP 641 -DATA(insert OID = 642 ( "!~" PGNSP PGUID b f f 25 25 16 0 641 textregexne regexnesel regexnejoinsel )); -DESCR("does not match regular expression, case-sensitive"); -DATA(insert OID = 643 ( "<>" PGNSP PGUID b f f 19 19 16 643 93 namene neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 654 ( "||" PGNSP PGUID b f f 25 25 25 0 0 textcat - - )); -DESCR("concatenate"); - -DATA(insert OID = 660 ( "<" PGNSP PGUID b f f 19 19 16 662 663 namelt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 661 ( "<=" PGNSP PGUID b f f 19 19 16 663 662 namele scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 662 ( ">" PGNSP PGUID b f f 19 19 16 660 661 namegt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 663 ( ">=" PGNSP PGUID b f f 19 19 16 661 660 namege scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 664 ( "<" PGNSP PGUID b f f 25 25 16 666 667 text_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 665 ( "<=" PGNSP PGUID b f f 25 25 16 667 666 text_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 666 ( ">" PGNSP PGUID b f f 25 25 16 664 665 text_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 667 ( ">=" PGNSP PGUID b f f 25 25 16 665 664 text_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 670 ( "=" PGNSP PGUID b t t 701 701 16 670 671 float8eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 671 ( "<>" PGNSP PGUID b f f 701 701 16 671 670 float8ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 672 ( "<" PGNSP PGUID b f f 701 701 16 674 675 float8lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -#define Float8LessOperator 672 -DATA(insert OID = 673 ( "<=" PGNSP PGUID b f f 701 701 16 675 674 float8le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 674 ( ">" PGNSP PGUID b f f 701 701 16 672 673 float8gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 675 ( ">=" PGNSP PGUID b f f 701 701 16 673 672 float8ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 682 ( "@" PGNSP PGUID l f f 0 21 21 0 0 int2abs - - )); -DESCR("absolute value"); -DATA(insert OID = 684 ( "+" PGNSP PGUID b f f 20 20 20 684 0 int8pl - - )); -DESCR("add"); -DATA(insert OID = 685 ( "-" PGNSP PGUID b f f 20 20 20 0 0 int8mi - - )); -DESCR("subtract"); -DATA(insert OID = 686 ( "*" PGNSP PGUID b f f 20 20 20 686 0 int8mul - - )); -DESCR("multiply"); -DATA(insert OID = 687 ( "/" PGNSP PGUID b f f 20 20 20 0 0 int8div - - )); -DESCR("divide"); - -DATA(insert OID = 688 ( "+" PGNSP PGUID b f f 20 23 20 692 0 int84pl - - )); -DESCR("add"); -DATA(insert OID = 689 ( "-" PGNSP PGUID b f f 20 23 20 0 0 int84mi - - )); -DESCR("subtract"); -DATA(insert OID = 690 ( "*" PGNSP PGUID b f f 20 23 20 694 0 int84mul - - )); -DESCR("multiply"); -DATA(insert OID = 691 ( "/" PGNSP PGUID b f f 20 23 20 0 0 int84div - - )); -DESCR("divide"); -DATA(insert OID = 692 ( "+" PGNSP PGUID b f f 23 20 20 688 0 int48pl - - )); -DESCR("add"); -DATA(insert OID = 693 ( "-" PGNSP PGUID b f f 23 20 20 0 0 int48mi - - )); -DESCR("subtract"); -DATA(insert OID = 694 ( "*" PGNSP PGUID b f f 23 20 20 690 0 int48mul - - )); -DESCR("multiply"); -DATA(insert OID = 695 ( "/" PGNSP PGUID b f f 23 20 20 0 0 int48div - - )); -DESCR("divide"); - -DATA(insert OID = 818 ( "+" PGNSP PGUID b f f 20 21 20 822 0 int82pl - - )); -DESCR("add"); -DATA(insert OID = 819 ( "-" PGNSP PGUID b f f 20 21 20 0 0 int82mi - - )); -DESCR("subtract"); -DATA(insert OID = 820 ( "*" PGNSP PGUID b f f 20 21 20 824 0 int82mul - - )); -DESCR("multiply"); -DATA(insert OID = 821 ( "/" PGNSP PGUID b f f 20 21 20 0 0 int82div - - )); -DESCR("divide"); -DATA(insert OID = 822 ( "+" PGNSP PGUID b f f 21 20 20 818 0 int28pl - - )); -DESCR("add"); -DATA(insert OID = 823 ( "-" PGNSP PGUID b f f 21 20 20 0 0 int28mi - - )); -DESCR("subtract"); -DATA(insert OID = 824 ( "*" PGNSP PGUID b f f 21 20 20 820 0 int28mul - - )); -DESCR("multiply"); -DATA(insert OID = 825 ( "/" PGNSP PGUID b f f 21 20 20 0 0 int28div - - )); -DESCR("divide"); - -DATA(insert OID = 706 ( "<->" PGNSP PGUID b f f 603 603 701 706 0 box_distance - - )); -DESCR("distance between"); -DATA(insert OID = 707 ( "<->" PGNSP PGUID b f f 602 602 701 707 0 path_distance - - )); -DESCR("distance between"); -DATA(insert OID = 708 ( "<->" PGNSP PGUID b f f 628 628 701 708 0 line_distance - - )); -DESCR("distance between"); -DATA(insert OID = 709 ( "<->" PGNSP PGUID b f f 601 601 701 709 0 lseg_distance - - )); -DESCR("distance between"); -DATA(insert OID = 712 ( "<->" PGNSP PGUID b f f 604 604 701 712 0 poly_distance - - )); -DESCR("distance between"); - -DATA(insert OID = 713 ( "<>" PGNSP PGUID b f f 600 600 16 713 510 point_ne neqsel neqjoinsel )); -DESCR("not equal"); - -/* add translation/rotation/scaling operators for geometric types. - thomas 97/05/10 */ -DATA(insert OID = 731 ( "+" PGNSP PGUID b f f 600 600 600 731 0 point_add - - )); -DESCR("add points (translate)"); -DATA(insert OID = 732 ( "-" PGNSP PGUID b f f 600 600 600 0 0 point_sub - - )); -DESCR("subtract points (translate)"); -DATA(insert OID = 733 ( "*" PGNSP PGUID b f f 600 600 600 733 0 point_mul - - )); -DESCR("multiply points (scale/rotate)"); -DATA(insert OID = 734 ( "/" PGNSP PGUID b f f 600 600 600 0 0 point_div - - )); -DESCR("divide points (scale/rotate)"); -DATA(insert OID = 735 ( "+" PGNSP PGUID b f f 602 602 602 735 0 path_add - - )); -DESCR("concatenate"); -DATA(insert OID = 736 ( "+" PGNSP PGUID b f f 602 600 602 0 0 path_add_pt - - )); -DESCR("add (translate path)"); -DATA(insert OID = 737 ( "-" PGNSP PGUID b f f 602 600 602 0 0 path_sub_pt - - )); -DESCR("subtract (translate path)"); -DATA(insert OID = 738 ( "*" PGNSP PGUID b f f 602 600 602 0 0 path_mul_pt - - )); -DESCR("multiply (rotate/scale path)"); -DATA(insert OID = 739 ( "/" PGNSP PGUID b f f 602 600 602 0 0 path_div_pt - - )); -DESCR("divide (rotate/scale path)"); -DATA(insert OID = 755 ( "@>" PGNSP PGUID b f f 602 600 16 512 0 path_contain_pt - - )); -DESCR("contains"); -DATA(insert OID = 756 ( "<@" PGNSP PGUID b f f 600 604 16 757 0 pt_contained_poly contsel contjoinsel )); -DESCR("is contained by"); -DATA(insert OID = 757 ( "@>" PGNSP PGUID b f f 604 600 16 756 0 poly_contain_pt contsel contjoinsel )); -DESCR("contains"); -DATA(insert OID = 758 ( "<@" PGNSP PGUID b f f 600 718 16 759 0 pt_contained_circle contsel contjoinsel )); -DESCR("is contained by"); -DATA(insert OID = 759 ( "@>" PGNSP PGUID b f f 718 600 16 758 0 circle_contain_pt contsel contjoinsel )); -DESCR("contains"); - -DATA(insert OID = 773 ( "@" PGNSP PGUID l f f 0 23 23 0 0 int4abs - - )); -DESCR("absolute value"); - -/* additional operators for geometric types - thomas 1997-07-09 */ -DATA(insert OID = 792 ( "=" PGNSP PGUID b f f 602 602 16 792 0 path_n_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 793 ( "<" PGNSP PGUID b f f 602 602 16 794 0 path_n_lt - - )); -DESCR("less than"); -DATA(insert OID = 794 ( ">" PGNSP PGUID b f f 602 602 16 793 0 path_n_gt - - )); -DESCR("greater than"); -DATA(insert OID = 795 ( "<=" PGNSP PGUID b f f 602 602 16 796 0 path_n_le - - )); -DESCR("less than or equal"); -DATA(insert OID = 796 ( ">=" PGNSP PGUID b f f 602 602 16 795 0 path_n_ge - - )); -DESCR("greater than or equal"); -DATA(insert OID = 797 ( "#" PGNSP PGUID l f f 0 602 23 0 0 path_npoints - - )); -DESCR("number of points"); -DATA(insert OID = 798 ( "?#" PGNSP PGUID b f f 602 602 16 0 0 path_inter - - )); -DESCR("intersect"); -DATA(insert OID = 799 ( "@-@" PGNSP PGUID l f f 0 602 701 0 0 path_length - - )); -DESCR("sum of path segment lengths"); -DATA(insert OID = 800 ( ">^" PGNSP PGUID b f f 603 603 16 0 0 box_above_eq positionsel positionjoinsel )); -DESCR("is above (allows touching)"); -DATA(insert OID = 801 ( "<^" PGNSP PGUID b f f 603 603 16 0 0 box_below_eq positionsel positionjoinsel )); -DESCR("is below (allows touching)"); -DATA(insert OID = 802 ( "?#" PGNSP PGUID b f f 603 603 16 0 0 box_overlap areasel areajoinsel )); -DESCR("deprecated, use && instead"); -DATA(insert OID = 803 ( "#" PGNSP PGUID b f f 603 603 603 0 0 box_intersect - - )); -DESCR("box intersection"); -DATA(insert OID = 804 ( "+" PGNSP PGUID b f f 603 600 603 0 0 box_add - - )); -DESCR("add point to box (translate)"); -DATA(insert OID = 805 ( "-" PGNSP PGUID b f f 603 600 603 0 0 box_sub - - )); -DESCR("subtract point from box (translate)"); -DATA(insert OID = 806 ( "*" PGNSP PGUID b f f 603 600 603 0 0 box_mul - - )); -DESCR("multiply box by point (scale)"); -DATA(insert OID = 807 ( "/" PGNSP PGUID b f f 603 600 603 0 0 box_div - - )); -DESCR("divide box by point (scale)"); -DATA(insert OID = 808 ( "?-" PGNSP PGUID b f f 600 600 16 808 0 point_horiz - - )); -DESCR("horizontally aligned"); -DATA(insert OID = 809 ( "?|" PGNSP PGUID b f f 600 600 16 809 0 point_vert - - )); -DESCR("vertically aligned"); - -DATA(insert OID = 811 ( "=" PGNSP PGUID b t f 704 704 16 811 812 tintervaleq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 812 ( "<>" PGNSP PGUID b f f 704 704 16 812 811 tintervalne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 813 ( "<" PGNSP PGUID b f f 704 704 16 814 816 tintervallt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 814 ( ">" PGNSP PGUID b f f 704 704 16 813 815 tintervalgt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 815 ( "<=" PGNSP PGUID b f f 704 704 16 816 814 tintervalle scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 816 ( ">=" PGNSP PGUID b f f 704 704 16 815 813 tintervalge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 843 ( "*" PGNSP PGUID b f f 790 700 790 845 0 cash_mul_flt4 - - )); -DESCR("multiply"); -DATA(insert OID = 844 ( "/" PGNSP PGUID b f f 790 700 790 0 0 cash_div_flt4 - - )); -DESCR("divide"); -DATA(insert OID = 845 ( "*" PGNSP PGUID b f f 700 790 790 843 0 flt4_mul_cash - - )); -DESCR("multiply"); - -DATA(insert OID = 900 ( "=" PGNSP PGUID b t f 790 790 16 900 901 cash_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 901 ( "<>" PGNSP PGUID b f f 790 790 16 901 900 cash_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 902 ( "<" PGNSP PGUID b f f 790 790 16 903 905 cash_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 903 ( ">" PGNSP PGUID b f f 790 790 16 902 904 cash_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 904 ( "<=" PGNSP PGUID b f f 790 790 16 905 903 cash_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 905 ( ">=" PGNSP PGUID b f f 790 790 16 904 902 cash_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 906 ( "+" PGNSP PGUID b f f 790 790 790 906 0 cash_pl - - )); -DESCR("add"); -DATA(insert OID = 907 ( "-" PGNSP PGUID b f f 790 790 790 0 0 cash_mi - - )); -DESCR("subtract"); -DATA(insert OID = 908 ( "*" PGNSP PGUID b f f 790 701 790 916 0 cash_mul_flt8 - - )); -DESCR("multiply"); -DATA(insert OID = 909 ( "/" PGNSP PGUID b f f 790 701 790 0 0 cash_div_flt8 - - )); -DESCR("divide"); -DATA(insert OID = 3346 ( "*" PGNSP PGUID b f f 790 20 790 3349 0 cash_mul_int8 - - )); -DESCR("multiply"); -DATA(insert OID = 3347 ( "/" PGNSP PGUID b f f 790 20 790 0 0 cash_div_int8 - - )); -DESCR("divide"); -DATA(insert OID = 912 ( "*" PGNSP PGUID b f f 790 23 790 917 0 cash_mul_int4 - - )); -DESCR("multiply"); -DATA(insert OID = 913 ( "/" PGNSP PGUID b f f 790 23 790 0 0 cash_div_int4 - - )); -DESCR("divide"); -DATA(insert OID = 914 ( "*" PGNSP PGUID b f f 790 21 790 918 0 cash_mul_int2 - - )); -DESCR("multiply"); -DATA(insert OID = 915 ( "/" PGNSP PGUID b f f 790 21 790 0 0 cash_div_int2 - - )); -DESCR("divide"); -DATA(insert OID = 916 ( "*" PGNSP PGUID b f f 701 790 790 908 0 flt8_mul_cash - - )); -DESCR("multiply"); -DATA(insert OID = 3349 ( "*" PGNSP PGUID b f f 20 790 790 3346 0 int8_mul_cash - - )); -DESCR("multiply"); -DATA(insert OID = 917 ( "*" PGNSP PGUID b f f 23 790 790 912 0 int4_mul_cash - - )); -DESCR("multiply"); -DATA(insert OID = 918 ( "*" PGNSP PGUID b f f 21 790 790 914 0 int2_mul_cash - - )); -DESCR("multiply"); -DATA(insert OID = 3825 ( "/" PGNSP PGUID b f f 790 790 701 0 0 cash_div_cash - - )); -DESCR("divide"); - -DATA(insert OID = 965 ( "^" PGNSP PGUID b f f 701 701 701 0 0 dpow - - )); -DESCR("exponentiation"); -DATA(insert OID = 966 ( "+" PGNSP PGUID b f f 1034 1033 1034 0 0 aclinsert - - )); -DESCR("add/update ACL item"); -DATA(insert OID = 967 ( "-" PGNSP PGUID b f f 1034 1033 1034 0 0 aclremove - - )); -DESCR("remove ACL item"); -DATA(insert OID = 968 ( "@>" PGNSP PGUID b f f 1034 1033 16 0 0 aclcontains - - )); -DESCR("contains"); -DATA(insert OID = 974 ( "=" PGNSP PGUID b f t 1033 1033 16 974 0 aclitemeq eqsel eqjoinsel )); -DESCR("equal"); - -/* additional geometric operators - thomas 1997-07-09 */ -DATA(insert OID = 969 ( "@@" PGNSP PGUID l f f 0 601 600 0 0 lseg_center - - )); -DESCR("center of"); -DATA(insert OID = 970 ( "@@" PGNSP PGUID l f f 0 602 600 0 0 path_center - - )); -DESCR("center of"); -DATA(insert OID = 971 ( "@@" PGNSP PGUID l f f 0 604 600 0 0 poly_center - - )); -DESCR("center of"); - -DATA(insert OID = 1054 ( "=" PGNSP PGUID b t t 1042 1042 16 1054 1057 bpchareq eqsel eqjoinsel )); -DESCR("equal"); - -DATA(insert OID = 1055 ( "~" PGNSP PGUID b f f 1042 25 16 0 1056 bpcharregexeq regexeqsel regexeqjoinsel )); -DESCR("matches regular expression, case-sensitive"); -#define OID_BPCHAR_REGEXEQ_OP 1055 -DATA(insert OID = 1056 ( "!~" PGNSP PGUID b f f 1042 25 16 0 1055 bpcharregexne regexnesel regexnejoinsel )); -DESCR("does not match regular expression, case-sensitive"); -DATA(insert OID = 1057 ( "<>" PGNSP PGUID b f f 1042 1042 16 1057 1054 bpcharne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1058 ( "<" PGNSP PGUID b f f 1042 1042 16 1060 1061 bpcharlt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1059 ( "<=" PGNSP PGUID b f f 1042 1042 16 1061 1060 bpcharle scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1060 ( ">" PGNSP PGUID b f f 1042 1042 16 1058 1059 bpchargt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1061 ( ">=" PGNSP PGUID b f f 1042 1042 16 1059 1058 bpcharge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -/* generic array comparison operators */ -DATA(insert OID = 1070 ( "=" PGNSP PGUID b t t 2277 2277 16 1070 1071 array_eq eqsel eqjoinsel )); -DESCR("equal"); -#define ARRAY_EQ_OP 1070 -DATA(insert OID = 1071 ( "<>" PGNSP PGUID b f f 2277 2277 16 1071 1070 array_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1072 ( "<" PGNSP PGUID b f f 2277 2277 16 1073 1075 array_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -#define ARRAY_LT_OP 1072 -DATA(insert OID = 1073 ( ">" PGNSP PGUID b f f 2277 2277 16 1072 1074 array_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -#define ARRAY_GT_OP 1073 -DATA(insert OID = 1074 ( "<=" PGNSP PGUID b f f 2277 2277 16 1075 1073 array_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1075 ( ">=" PGNSP PGUID b f f 2277 2277 16 1074 1072 array_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -/* date operators */ -DATA(insert OID = 1076 ( "+" PGNSP PGUID b f f 1082 1186 1114 2551 0 date_pl_interval - - )); -DESCR("add"); -DATA(insert OID = 1077 ( "-" PGNSP PGUID b f f 1082 1186 1114 0 0 date_mi_interval - - )); -DESCR("subtract"); -DATA(insert OID = 1093 ( "=" PGNSP PGUID b t t 1082 1082 16 1093 1094 date_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1094 ( "<>" PGNSP PGUID b f f 1082 1082 16 1094 1093 date_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1095 ( "<" PGNSP PGUID b f f 1082 1082 16 1097 1098 date_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1096 ( "<=" PGNSP PGUID b f f 1082 1082 16 1098 1097 date_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1097 ( ">" PGNSP PGUID b f f 1082 1082 16 1095 1096 date_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1098 ( ">=" PGNSP PGUID b f f 1082 1082 16 1096 1095 date_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 1099 ( "-" PGNSP PGUID b f f 1082 1082 23 0 0 date_mi - - )); -DESCR("subtract"); -DATA(insert OID = 1100 ( "+" PGNSP PGUID b f f 1082 23 1082 2555 0 date_pli - - )); -DESCR("add"); -DATA(insert OID = 1101 ( "-" PGNSP PGUID b f f 1082 23 1082 0 0 date_mii - - )); -DESCR("subtract"); - -/* time operators */ -DATA(insert OID = 1108 ( "=" PGNSP PGUID b t t 1083 1083 16 1108 1109 time_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1109 ( "<>" PGNSP PGUID b f f 1083 1083 16 1109 1108 time_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1110 ( "<" PGNSP PGUID b f f 1083 1083 16 1112 1113 time_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1111 ( "<=" PGNSP PGUID b f f 1083 1083 16 1113 1112 time_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1112 ( ">" PGNSP PGUID b f f 1083 1083 16 1110 1111 time_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1113 ( ">=" PGNSP PGUID b f f 1083 1083 16 1111 1110 time_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); + /* name of operator */ + NameData oprname; -/* timetz operators */ -DATA(insert OID = 1550 ( "=" PGNSP PGUID b t t 1266 1266 16 1550 1551 timetz_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1551 ( "<>" PGNSP PGUID b f f 1266 1266 16 1551 1550 timetz_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1552 ( "<" PGNSP PGUID b f f 1266 1266 16 1554 1555 timetz_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1553 ( "<=" PGNSP PGUID b f f 1266 1266 16 1555 1554 timetz_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1554 ( ">" PGNSP PGUID b f f 1266 1266 16 1552 1553 timetz_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1555 ( ">=" PGNSP PGUID b f f 1266 1266 16 1553 1552 timetz_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); + /* OID of namespace containing this oper */ + Oid oprnamespace BKI_DEFAULT(PGNSP); -/* float48 operators */ -DATA(insert OID = 1116 ( "+" PGNSP PGUID b f f 700 701 701 1126 0 float48pl - - )); -DESCR("add"); -DATA(insert OID = 1117 ( "-" PGNSP PGUID b f f 700 701 701 0 0 float48mi - - )); -DESCR("subtract"); -DATA(insert OID = 1118 ( "/" PGNSP PGUID b f f 700 701 701 0 0 float48div - - )); -DESCR("divide"); -DATA(insert OID = 1119 ( "*" PGNSP PGUID b f f 700 701 701 1129 0 float48mul - - )); -DESCR("multiply"); -DATA(insert OID = 1120 ( "=" PGNSP PGUID b t t 700 701 16 1130 1121 float48eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1121 ( "<>" PGNSP PGUID b f f 700 701 16 1131 1120 float48ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1122 ( "<" PGNSP PGUID b f f 700 701 16 1133 1125 float48lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1123 ( ">" PGNSP PGUID b f f 700 701 16 1132 1124 float48gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1124 ( "<=" PGNSP PGUID b f f 700 701 16 1135 1123 float48le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1125 ( ">=" PGNSP PGUID b f f 700 701 16 1134 1122 float48ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); + /* operator owner */ + Oid oprowner BKI_DEFAULT(PGUID); -/* float84 operators */ -DATA(insert OID = 1126 ( "+" PGNSP PGUID b f f 701 700 701 1116 0 float84pl - - )); -DESCR("add"); -DATA(insert OID = 1127 ( "-" PGNSP PGUID b f f 701 700 701 0 0 float84mi - - )); -DESCR("subtract"); -DATA(insert OID = 1128 ( "/" PGNSP PGUID b f f 701 700 701 0 0 float84div - - )); -DESCR("divide"); -DATA(insert OID = 1129 ( "*" PGNSP PGUID b f f 701 700 701 1119 0 float84mul - - )); -DESCR("multiply"); -DATA(insert OID = 1130 ( "=" PGNSP PGUID b t t 701 700 16 1120 1131 float84eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1131 ( "<>" PGNSP PGUID b f f 701 700 16 1121 1130 float84ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1132 ( "<" PGNSP PGUID b f f 701 700 16 1123 1135 float84lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1133 ( ">" PGNSP PGUID b f f 701 700 16 1122 1134 float84gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1134 ( "<=" PGNSP PGUID b f f 701 700 16 1125 1133 float84le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1135 ( ">=" PGNSP PGUID b f f 701 700 16 1124 1132 float84ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); + /* 'l', 'r', or 'b' */ + char oprkind BKI_DEFAULT(b); + /* can be used in merge join? */ + bool oprcanmerge BKI_DEFAULT(f); -/* LIKE hacks by Keith Parks. */ -DATA(insert OID = 1207 ( "~~" PGNSP PGUID b f f 19 25 16 0 1208 namelike likesel likejoinsel )); -DESCR("matches LIKE expression"); -#define OID_NAME_LIKE_OP 1207 -DATA(insert OID = 1208 ( "!~~" PGNSP PGUID b f f 19 25 16 0 1207 namenlike nlikesel nlikejoinsel )); -DESCR("does not match LIKE expression"); -DATA(insert OID = 1209 ( "~~" PGNSP PGUID b f f 25 25 16 0 1210 textlike likesel likejoinsel )); -DESCR("matches LIKE expression"); -#define OID_TEXT_LIKE_OP 1209 -DATA(insert OID = 1210 ( "!~~" PGNSP PGUID b f f 25 25 16 0 1209 textnlike nlikesel nlikejoinsel )); -DESCR("does not match LIKE expression"); -DATA(insert OID = 1211 ( "~~" PGNSP PGUID b f f 1042 25 16 0 1212 bpcharlike likesel likejoinsel )); -DESCR("matches LIKE expression"); -#define OID_BPCHAR_LIKE_OP 1211 -DATA(insert OID = 1212 ( "!~~" PGNSP PGUID b f f 1042 25 16 0 1211 bpcharnlike nlikesel nlikejoinsel )); -DESCR("does not match LIKE expression"); + /* can be used in hash join? */ + bool oprcanhash BKI_DEFAULT(f); -/* case-insensitive regex hacks */ -DATA(insert OID = 1226 ( "~*" PGNSP PGUID b f f 19 25 16 0 1227 nameicregexeq icregexeqsel icregexeqjoinsel )); -DESCR("matches regular expression, case-insensitive"); -#define OID_NAME_ICREGEXEQ_OP 1226 -DATA(insert OID = 1227 ( "!~*" PGNSP PGUID b f f 19 25 16 0 1226 nameicregexne icregexnesel icregexnejoinsel )); -DESCR("does not match regular expression, case-insensitive"); -DATA(insert OID = 1228 ( "~*" PGNSP PGUID b f f 25 25 16 0 1229 texticregexeq icregexeqsel icregexeqjoinsel )); -DESCR("matches regular expression, case-insensitive"); -#define OID_TEXT_ICREGEXEQ_OP 1228 -DATA(insert OID = 1229 ( "!~*" PGNSP PGUID b f f 25 25 16 0 1228 texticregexne icregexnesel icregexnejoinsel )); -DESCR("does not match regular expression, case-insensitive"); -DATA(insert OID = 1234 ( "~*" PGNSP PGUID b f f 1042 25 16 0 1235 bpcharicregexeq icregexeqsel icregexeqjoinsel )); -DESCR("matches regular expression, case-insensitive"); -#define OID_BPCHAR_ICREGEXEQ_OP 1234 -DATA(insert OID = 1235 ( "!~*" PGNSP PGUID b f f 1042 25 16 0 1234 bpcharicregexne icregexnesel icregexnejoinsel )); -DESCR("does not match regular expression, case-insensitive"); + /* left arg type, or 0 if 'l' oprkind */ + Oid oprleft BKI_LOOKUP(pg_type); -/* timestamptz operators */ -DATA(insert OID = 1320 ( "=" PGNSP PGUID b t t 1184 1184 16 1320 1321 timestamptz_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1321 ( "<>" PGNSP PGUID b f f 1184 1184 16 1321 1320 timestamptz_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1322 ( "<" PGNSP PGUID b f f 1184 1184 16 1324 1325 timestamptz_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1323 ( "<=" PGNSP PGUID b f f 1184 1184 16 1325 1324 timestamptz_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1324 ( ">" PGNSP PGUID b f f 1184 1184 16 1322 1323 timestamptz_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1325 ( ">=" PGNSP PGUID b f f 1184 1184 16 1323 1322 timestamptz_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 1327 ( "+" PGNSP PGUID b f f 1184 1186 1184 2554 0 timestamptz_pl_interval - - )); -DESCR("add"); -DATA(insert OID = 1328 ( "-" PGNSP PGUID b f f 1184 1184 1186 0 0 timestamptz_mi - - )); -DESCR("subtract"); -DATA(insert OID = 1329 ( "-" PGNSP PGUID b f f 1184 1186 1184 0 0 timestamptz_mi_interval - - )); -DESCR("subtract"); + /* right arg type, or 0 if 'r' oprkind */ + Oid oprright BKI_LOOKUP(pg_type); -/* interval operators */ -DATA(insert OID = 1330 ( "=" PGNSP PGUID b t t 1186 1186 16 1330 1331 interval_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1331 ( "<>" PGNSP PGUID b f f 1186 1186 16 1331 1330 interval_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1332 ( "<" PGNSP PGUID b f f 1186 1186 16 1334 1335 interval_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1333 ( "<=" PGNSP PGUID b f f 1186 1186 16 1335 1334 interval_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1334 ( ">" PGNSP PGUID b f f 1186 1186 16 1332 1333 interval_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1335 ( ">=" PGNSP PGUID b f f 1186 1186 16 1333 1332 interval_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); + /* result datatype */ + Oid oprresult BKI_LOOKUP(pg_type); -DATA(insert OID = 1336 ( "-" PGNSP PGUID l f f 0 1186 1186 0 0 interval_um - - )); -DESCR("negate"); -DATA(insert OID = 1337 ( "+" PGNSP PGUID b f f 1186 1186 1186 1337 0 interval_pl - - )); -DESCR("add"); -DATA(insert OID = 1338 ( "-" PGNSP PGUID b f f 1186 1186 1186 0 0 interval_mi - - )); -DESCR("subtract"); + /* OID of commutator oper, or 0 if none */ + Oid oprcom BKI_DEFAULT(0) BKI_LOOKUP(pg_operator); -DATA(insert OID = 1360 ( "+" PGNSP PGUID b f f 1082 1083 1114 1363 0 datetime_pl - - )); -DESCR("convert date and time to timestamp"); -DATA(insert OID = 1361 ( "+" PGNSP PGUID b f f 1082 1266 1184 1366 0 datetimetz_pl - - )); -DESCR("convert date and time with time zone to timestamp with time zone"); -DATA(insert OID = 1363 ( "+" PGNSP PGUID b f f 1083 1082 1114 1360 0 timedate_pl - - )); -DESCR("convert time and date to timestamp"); -DATA(insert OID = 1366 ( "+" PGNSP PGUID b f f 1266 1082 1184 1361 0 timetzdate_pl - - )); -DESCR("convert time with time zone and date to timestamp with time zone"); + /* OID of negator oper, or 0 if none */ + Oid oprnegate BKI_DEFAULT(0) BKI_LOOKUP(pg_operator); -DATA(insert OID = 1399 ( "-" PGNSP PGUID b f f 1083 1083 1186 0 0 time_mi_time - - )); -DESCR("subtract"); + /* OID of underlying function */ + regproc oprcode BKI_LOOKUP(pg_proc); -/* additional geometric operators - thomas 97/04/18 */ -DATA(insert OID = 1420 ( "@@" PGNSP PGUID l f f 0 718 600 0 0 circle_center - - )); -DESCR("center of"); -DATA(insert OID = 1500 ( "=" PGNSP PGUID b f f 718 718 16 1500 1501 circle_eq eqsel eqjoinsel )); -DESCR("equal by area"); -DATA(insert OID = 1501 ( "<>" PGNSP PGUID b f f 718 718 16 1501 1500 circle_ne neqsel neqjoinsel )); -DESCR("not equal by area"); -DATA(insert OID = 1502 ( "<" PGNSP PGUID b f f 718 718 16 1503 1505 circle_lt areasel areajoinsel )); -DESCR("less than by area"); -DATA(insert OID = 1503 ( ">" PGNSP PGUID b f f 718 718 16 1502 1504 circle_gt areasel areajoinsel )); -DESCR("greater than by area"); -DATA(insert OID = 1504 ( "<=" PGNSP PGUID b f f 718 718 16 1505 1503 circle_le areasel areajoinsel )); -DESCR("less than or equal by area"); -DATA(insert OID = 1505 ( ">=" PGNSP PGUID b f f 718 718 16 1504 1502 circle_ge areasel areajoinsel )); -DESCR("greater than or equal by area"); + /* OID of restriction estimator, or 0 */ + regproc oprrest BKI_DEFAULT(-) BKI_LOOKUP(pg_proc); -DATA(insert OID = 1506 ( "<<" PGNSP PGUID b f f 718 718 16 0 0 circle_left positionsel positionjoinsel )); -DESCR("is left of"); -DATA(insert OID = 1507 ( "&<" PGNSP PGUID b f f 718 718 16 0 0 circle_overleft positionsel positionjoinsel )); -DESCR("overlaps or is left of"); -DATA(insert OID = 1508 ( "&>" PGNSP PGUID b f f 718 718 16 0 0 circle_overright positionsel positionjoinsel )); -DESCR("overlaps or is right of"); -DATA(insert OID = 1509 ( ">>" PGNSP PGUID b f f 718 718 16 0 0 circle_right positionsel positionjoinsel )); -DESCR("is right of"); -DATA(insert OID = 1510 ( "<@" PGNSP PGUID b f f 718 718 16 1511 0 circle_contained contsel contjoinsel )); -DESCR("is contained by"); -DATA(insert OID = 1511 ( "@>" PGNSP PGUID b f f 718 718 16 1510 0 circle_contain contsel contjoinsel )); -DESCR("contains"); -DATA(insert OID = 1512 ( "~=" PGNSP PGUID b f f 718 718 16 1512 0 circle_same eqsel eqjoinsel )); -DESCR("same as"); -DATA(insert OID = 1513 ( "&&" PGNSP PGUID b f f 718 718 16 1513 0 circle_overlap areasel areajoinsel )); -DESCR("overlaps"); -DATA(insert OID = 1514 ( "|>>" PGNSP PGUID b f f 718 718 16 0 0 circle_above positionsel positionjoinsel )); -DESCR("is above"); -DATA(insert OID = 1515 ( "<<|" PGNSP PGUID b f f 718 718 16 0 0 circle_below positionsel positionjoinsel )); -DESCR("is below"); - -DATA(insert OID = 1516 ( "+" PGNSP PGUID b f f 718 600 718 0 0 circle_add_pt - - )); -DESCR("add"); -DATA(insert OID = 1517 ( "-" PGNSP PGUID b f f 718 600 718 0 0 circle_sub_pt - - )); -DESCR("subtract"); -DATA(insert OID = 1518 ( "*" PGNSP PGUID b f f 718 600 718 0 0 circle_mul_pt - - )); -DESCR("multiply"); -DATA(insert OID = 1519 ( "/" PGNSP PGUID b f f 718 600 718 0 0 circle_div_pt - - )); -DESCR("divide"); - -DATA(insert OID = 1520 ( "<->" PGNSP PGUID b f f 718 718 701 1520 0 circle_distance - - )); -DESCR("distance between"); -DATA(insert OID = 1521 ( "#" PGNSP PGUID l f f 0 604 23 0 0 poly_npoints - - )); -DESCR("number of points"); -DATA(insert OID = 1522 ( "<->" PGNSP PGUID b f f 600 718 701 3291 0 dist_pc - - )); -DESCR("distance between"); -DATA(insert OID = 3291 ( "<->" PGNSP PGUID b f f 718 600 701 1522 0 dist_cpoint - - )); -DESCR("distance between"); -DATA(insert OID = 3276 ( "<->" PGNSP PGUID b f f 600 604 701 3289 0 dist_ppoly - - )); -DESCR("distance between"); -DATA(insert OID = 3289 ( "<->" PGNSP PGUID b f f 604 600 701 3276 0 dist_polyp - - )); -DESCR("distance between"); -DATA(insert OID = 1523 ( "<->" PGNSP PGUID b f f 718 604 701 0 0 dist_cpoly - - )); -DESCR("distance between"); - -/* additional geometric operators - thomas 1997-07-09 */ -DATA(insert OID = 1524 ( "<->" PGNSP PGUID b f f 628 603 701 0 0 dist_lb - - )); -DESCR("distance between"); - -DATA(insert OID = 1525 ( "?#" PGNSP PGUID b f f 601 601 16 1525 0 lseg_intersect - - )); -DESCR("intersect"); -DATA(insert OID = 1526 ( "?||" PGNSP PGUID b f f 601 601 16 1526 0 lseg_parallel - - )); -DESCR("parallel"); -DATA(insert OID = 1527 ( "?-|" PGNSP PGUID b f f 601 601 16 1527 0 lseg_perp - - )); -DESCR("perpendicular"); -DATA(insert OID = 1528 ( "?-" PGNSP PGUID l f f 0 601 16 0 0 lseg_horizontal - - )); -DESCR("horizontal"); -DATA(insert OID = 1529 ( "?|" PGNSP PGUID l f f 0 601 16 0 0 lseg_vertical - - )); -DESCR("vertical"); -DATA(insert OID = 1535 ( "=" PGNSP PGUID b f f 601 601 16 1535 1586 lseg_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1536 ( "#" PGNSP PGUID b f f 601 601 600 1536 0 lseg_interpt - - )); -DESCR("intersection point"); -DATA(insert OID = 1537 ( "?#" PGNSP PGUID b f f 601 628 16 0 0 inter_sl - - )); -DESCR("intersect"); -DATA(insert OID = 1538 ( "?#" PGNSP PGUID b f f 601 603 16 0 0 inter_sb - - )); -DESCR("intersect"); -DATA(insert OID = 1539 ( "?#" PGNSP PGUID b f f 628 603 16 0 0 inter_lb - - )); -DESCR("intersect"); - -DATA(insert OID = 1546 ( "<@" PGNSP PGUID b f f 600 628 16 0 0 on_pl - - )); -DESCR("point on line"); -DATA(insert OID = 1547 ( "<@" PGNSP PGUID b f f 600 601 16 0 0 on_ps - - )); -DESCR("is contained by"); -DATA(insert OID = 1548 ( "<@" PGNSP PGUID b f f 601 628 16 0 0 on_sl - - )); -DESCR("lseg on line"); -DATA(insert OID = 1549 ( "<@" PGNSP PGUID b f f 601 603 16 0 0 on_sb - - )); -DESCR("is contained by"); - -DATA(insert OID = 1557 ( "##" PGNSP PGUID b f f 600 628 600 0 0 close_pl - - )); -DESCR("closest point to A on B"); -DATA(insert OID = 1558 ( "##" PGNSP PGUID b f f 600 601 600 0 0 close_ps - - )); -DESCR("closest point to A on B"); -DATA(insert OID = 1559 ( "##" PGNSP PGUID b f f 600 603 600 0 0 close_pb - - )); -DESCR("closest point to A on B"); - -DATA(insert OID = 1566 ( "##" PGNSP PGUID b f f 601 628 600 0 0 close_sl - - )); -DESCR("closest point to A on B"); -DATA(insert OID = 1567 ( "##" PGNSP PGUID b f f 601 603 600 0 0 close_sb - - )); -DESCR("closest point to A on B"); -DATA(insert OID = 1568 ( "##" PGNSP PGUID b f f 628 603 600 0 0 close_lb - - )); -DESCR("closest point to A on B"); -DATA(insert OID = 1577 ( "##" PGNSP PGUID b f f 628 601 600 0 0 close_ls - - )); -DESCR("closest point to A on B"); -DATA(insert OID = 1578 ( "##" PGNSP PGUID b f f 601 601 600 0 0 close_lseg - - )); -DESCR("closest point to A on B"); -DATA(insert OID = 1583 ( "*" PGNSP PGUID b f f 1186 701 1186 1584 0 interval_mul - - )); -DESCR("multiply"); -DATA(insert OID = 1584 ( "*" PGNSP PGUID b f f 701 1186 1186 1583 0 mul_d_interval - - )); -DESCR("multiply"); -DATA(insert OID = 1585 ( "/" PGNSP PGUID b f f 1186 701 1186 0 0 interval_div - - )); -DESCR("divide"); - -DATA(insert OID = 1586 ( "<>" PGNSP PGUID b f f 601 601 16 1586 1535 lseg_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1587 ( "<" PGNSP PGUID b f f 601 601 16 1589 1590 lseg_lt - - )); -DESCR("less than by length"); -DATA(insert OID = 1588 ( "<=" PGNSP PGUID b f f 601 601 16 1590 1589 lseg_le - - )); -DESCR("less than or equal by length"); -DATA(insert OID = 1589 ( ">" PGNSP PGUID b f f 601 601 16 1587 1588 lseg_gt - - )); -DESCR("greater than by length"); -DATA(insert OID = 1590 ( ">=" PGNSP PGUID b f f 601 601 16 1588 1587 lseg_ge - - )); -DESCR("greater than or equal by length"); - -DATA(insert OID = 1591 ( "@-@" PGNSP PGUID l f f 0 601 701 0 0 lseg_length - - )); -DESCR("distance between endpoints"); - -DATA(insert OID = 1611 ( "?#" PGNSP PGUID b f f 628 628 16 1611 0 line_intersect - - )); -DESCR("intersect"); -DATA(insert OID = 1612 ( "?||" PGNSP PGUID b f f 628 628 16 1612 0 line_parallel - - )); -DESCR("parallel"); -DATA(insert OID = 1613 ( "?-|" PGNSP PGUID b f f 628 628 16 1613 0 line_perp - - )); -DESCR("perpendicular"); -DATA(insert OID = 1614 ( "?-" PGNSP PGUID l f f 0 628 16 0 0 line_horizontal - - )); -DESCR("horizontal"); -DATA(insert OID = 1615 ( "?|" PGNSP PGUID l f f 0 628 16 0 0 line_vertical - - )); -DESCR("vertical"); -DATA(insert OID = 1616 ( "=" PGNSP PGUID b f f 628 628 16 1616 0 line_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1617 ( "#" PGNSP PGUID b f f 628 628 600 1617 0 line_interpt - - )); -DESCR("intersection point"); - -/* MACADDR type */ -DATA(insert OID = 1220 ( "=" PGNSP PGUID b t t 829 829 16 1220 1221 macaddr_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1221 ( "<>" PGNSP PGUID b f f 829 829 16 1221 1220 macaddr_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1222 ( "<" PGNSP PGUID b f f 829 829 16 1224 1225 macaddr_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1223 ( "<=" PGNSP PGUID b f f 829 829 16 1225 1224 macaddr_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1224 ( ">" PGNSP PGUID b f f 829 829 16 1222 1223 macaddr_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1225 ( ">=" PGNSP PGUID b f f 829 829 16 1223 1222 macaddr_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 3147 ( "~" PGNSP PGUID l f f 0 829 829 0 0 macaddr_not - - )); -DESCR("bitwise not"); -DATA(insert OID = 3148 ( "&" PGNSP PGUID b f f 829 829 829 0 0 macaddr_and - - )); -DESCR("bitwise and"); -DATA(insert OID = 3149 ( "|" PGNSP PGUID b f f 829 829 829 0 0 macaddr_or - - )); -DESCR("bitwise or"); - -/* MACADDR8 type */ -DATA(insert OID = 3362 ( "=" PGNSP PGUID b t t 774 774 16 3362 3363 macaddr8_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 3363 ( "<>" PGNSP PGUID b f f 774 774 16 3363 3362 macaddr8_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 3364 ( "<" PGNSP PGUID b f f 774 774 16 3366 3367 macaddr8_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 3365 ( "<=" PGNSP PGUID b f f 774 774 16 3367 3366 macaddr8_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 3366 ( ">" PGNSP PGUID b f f 774 774 16 3364 3365 macaddr8_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 3367 ( ">=" PGNSP PGUID b f f 774 774 16 3365 3364 macaddr8_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 3368 ( "~" PGNSP PGUID l f f 0 774 774 0 0 macaddr8_not - - )); -DESCR("bitwise not"); -DATA(insert OID = 3369 ( "&" PGNSP PGUID b f f 774 774 774 0 0 macaddr8_and - - )); -DESCR("bitwise and"); -DATA(insert OID = 3370 ( "|" PGNSP PGUID b f f 774 774 774 0 0 macaddr8_or - - )); -DESCR("bitwise or"); - -/* INET type (these also support CIDR via implicit cast) */ -DATA(insert OID = 1201 ( "=" PGNSP PGUID b t t 869 869 16 1201 1202 network_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1202 ( "<>" PGNSP PGUID b f f 869 869 16 1202 1201 network_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1203 ( "<" PGNSP PGUID b f f 869 869 16 1205 1206 network_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1204 ( "<=" PGNSP PGUID b f f 869 869 16 1206 1205 network_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1205 ( ">" PGNSP PGUID b f f 869 869 16 1203 1204 network_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1206 ( ">=" PGNSP PGUID b f f 869 869 16 1204 1203 network_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 931 ( "<<" PGNSP PGUID b f f 869 869 16 933 0 network_sub networksel networkjoinsel )); -DESCR("is subnet"); -#define OID_INET_SUB_OP 931 -DATA(insert OID = 932 ( "<<=" PGNSP PGUID b f f 869 869 16 934 0 network_subeq networksel networkjoinsel )); -DESCR("is subnet or equal"); -#define OID_INET_SUBEQ_OP 932 -DATA(insert OID = 933 ( ">>" PGNSP PGUID b f f 869 869 16 931 0 network_sup networksel networkjoinsel )); -DESCR("is supernet"); -#define OID_INET_SUP_OP 933 -DATA(insert OID = 934 ( ">>=" PGNSP PGUID b f f 869 869 16 932 0 network_supeq networksel networkjoinsel )); -DESCR("is supernet or equal"); -#define OID_INET_SUPEQ_OP 934 -DATA(insert OID = 3552 ( "&&" PGNSP PGUID b f f 869 869 16 3552 0 network_overlap networksel networkjoinsel )); -DESCR("overlaps (is subnet or supernet)"); -#define OID_INET_OVERLAP_OP 3552 - -DATA(insert OID = 2634 ( "~" PGNSP PGUID l f f 0 869 869 0 0 inetnot - - )); -DESCR("bitwise not"); -DATA(insert OID = 2635 ( "&" PGNSP PGUID b f f 869 869 869 0 0 inetand - - )); -DESCR("bitwise and"); -DATA(insert OID = 2636 ( "|" PGNSP PGUID b f f 869 869 869 0 0 inetor - - )); -DESCR("bitwise or"); -DATA(insert OID = 2637 ( "+" PGNSP PGUID b f f 869 20 869 2638 0 inetpl - - )); -DESCR("add"); -DATA(insert OID = 2638 ( "+" PGNSP PGUID b f f 20 869 869 2637 0 int8pl_inet - - )); -DESCR("add"); -DATA(insert OID = 2639 ( "-" PGNSP PGUID b f f 869 20 869 0 0 inetmi_int8 - - )); -DESCR("subtract"); -DATA(insert OID = 2640 ( "-" PGNSP PGUID b f f 869 869 20 0 0 inetmi - - )); -DESCR("subtract"); - -/* case-insensitive LIKE hacks */ -DATA(insert OID = 1625 ( "~~*" PGNSP PGUID b f f 19 25 16 0 1626 nameiclike iclikesel iclikejoinsel )); -DESCR("matches LIKE expression, case-insensitive"); -#define OID_NAME_ICLIKE_OP 1625 -DATA(insert OID = 1626 ( "!~~*" PGNSP PGUID b f f 19 25 16 0 1625 nameicnlike icnlikesel icnlikejoinsel )); -DESCR("does not match LIKE expression, case-insensitive"); -DATA(insert OID = 1627 ( "~~*" PGNSP PGUID b f f 25 25 16 0 1628 texticlike iclikesel iclikejoinsel )); -DESCR("matches LIKE expression, case-insensitive"); -#define OID_TEXT_ICLIKE_OP 1627 -DATA(insert OID = 1628 ( "!~~*" PGNSP PGUID b f f 25 25 16 0 1627 texticnlike icnlikesel icnlikejoinsel )); -DESCR("does not match LIKE expression, case-insensitive"); -DATA(insert OID = 1629 ( "~~*" PGNSP PGUID b f f 1042 25 16 0 1630 bpchariclike iclikesel iclikejoinsel )); -DESCR("matches LIKE expression, case-insensitive"); -#define OID_BPCHAR_ICLIKE_OP 1629 -DATA(insert OID = 1630 ( "!~~*" PGNSP PGUID b f f 1042 25 16 0 1629 bpcharicnlike icnlikesel icnlikejoinsel )); -DESCR("does not match LIKE expression, case-insensitive"); - -/* NUMERIC type - OID's 1700-1799 */ -DATA(insert OID = 1751 ( "-" PGNSP PGUID l f f 0 1700 1700 0 0 numeric_uminus - - )); -DESCR("negate"); -DATA(insert OID = 1752 ( "=" PGNSP PGUID b t t 1700 1700 16 1752 1753 numeric_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1753 ( "<>" PGNSP PGUID b f f 1700 1700 16 1753 1752 numeric_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1754 ( "<" PGNSP PGUID b f f 1700 1700 16 1756 1757 numeric_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1755 ( "<=" PGNSP PGUID b f f 1700 1700 16 1757 1756 numeric_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1756 ( ">" PGNSP PGUID b f f 1700 1700 16 1754 1755 numeric_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1757 ( ">=" PGNSP PGUID b f f 1700 1700 16 1755 1754 numeric_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 1758 ( "+" PGNSP PGUID b f f 1700 1700 1700 1758 0 numeric_add - - )); -DESCR("add"); -DATA(insert OID = 1759 ( "-" PGNSP PGUID b f f 1700 1700 1700 0 0 numeric_sub - - )); -DESCR("subtract"); -DATA(insert OID = 1760 ( "*" PGNSP PGUID b f f 1700 1700 1700 1760 0 numeric_mul - - )); -DESCR("multiply"); -DATA(insert OID = 1761 ( "/" PGNSP PGUID b f f 1700 1700 1700 0 0 numeric_div - - )); -DESCR("divide"); -DATA(insert OID = 1762 ( "%" PGNSP PGUID b f f 1700 1700 1700 0 0 numeric_mod - - )); -DESCR("modulus"); -DATA(insert OID = 1038 ( "^" PGNSP PGUID b f f 1700 1700 1700 0 0 numeric_power - - )); -DESCR("exponentiation"); -DATA(insert OID = 1763 ( "@" PGNSP PGUID l f f 0 1700 1700 0 0 numeric_abs - - )); -DESCR("absolute value"); - -DATA(insert OID = 1784 ( "=" PGNSP PGUID b t f 1560 1560 16 1784 1785 biteq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1785 ( "<>" PGNSP PGUID b f f 1560 1560 16 1785 1784 bitne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1786 ( "<" PGNSP PGUID b f f 1560 1560 16 1787 1789 bitlt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1787 ( ">" PGNSP PGUID b f f 1560 1560 16 1786 1788 bitgt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1788 ( "<=" PGNSP PGUID b f f 1560 1560 16 1789 1787 bitle scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1789 ( ">=" PGNSP PGUID b f f 1560 1560 16 1788 1786 bitge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 1791 ( "&" PGNSP PGUID b f f 1560 1560 1560 1791 0 bitand - - )); -DESCR("bitwise and"); -DATA(insert OID = 1792 ( "|" PGNSP PGUID b f f 1560 1560 1560 1792 0 bitor - - )); -DESCR("bitwise or"); -DATA(insert OID = 1793 ( "#" PGNSP PGUID b f f 1560 1560 1560 1793 0 bitxor - - )); -DESCR("bitwise exclusive or"); -DATA(insert OID = 1794 ( "~" PGNSP PGUID l f f 0 1560 1560 0 0 bitnot - - )); -DESCR("bitwise not"); -DATA(insert OID = 1795 ( "<<" PGNSP PGUID b f f 1560 23 1560 0 0 bitshiftleft - - )); -DESCR("bitwise shift left"); -DATA(insert OID = 1796 ( ">>" PGNSP PGUID b f f 1560 23 1560 0 0 bitshiftright - - )); -DESCR("bitwise shift right"); -DATA(insert OID = 1797 ( "||" PGNSP PGUID b f f 1562 1562 1562 0 0 bitcat - - )); -DESCR("concatenate"); - -DATA(insert OID = 1800 ( "+" PGNSP PGUID b f f 1083 1186 1083 1849 0 time_pl_interval - - )); -DESCR("add"); -DATA(insert OID = 1801 ( "-" PGNSP PGUID b f f 1083 1186 1083 0 0 time_mi_interval - - )); -DESCR("subtract"); -DATA(insert OID = 1802 ( "+" PGNSP PGUID b f f 1266 1186 1266 2552 0 timetz_pl_interval - - )); -DESCR("add"); -DATA(insert OID = 1803 ( "-" PGNSP PGUID b f f 1266 1186 1266 0 0 timetz_mi_interval - - )); -DESCR("subtract"); - -DATA(insert OID = 1804 ( "=" PGNSP PGUID b t f 1562 1562 16 1804 1805 varbiteq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1805 ( "<>" PGNSP PGUID b f f 1562 1562 16 1805 1804 varbitne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1806 ( "<" PGNSP PGUID b f f 1562 1562 16 1807 1809 varbitlt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1807 ( ">" PGNSP PGUID b f f 1562 1562 16 1806 1808 varbitgt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1808 ( "<=" PGNSP PGUID b f f 1562 1562 16 1809 1807 varbitle scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1809 ( ">=" PGNSP PGUID b f f 1562 1562 16 1808 1806 varbitge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 1849 ( "+" PGNSP PGUID b f f 1186 1083 1083 1800 0 interval_pl_time - - )); -DESCR("add"); - -DATA(insert OID = 1862 ( "=" PGNSP PGUID b t t 21 20 16 1868 1863 int28eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1863 ( "<>" PGNSP PGUID b f f 21 20 16 1869 1862 int28ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1864 ( "<" PGNSP PGUID b f f 21 20 16 1871 1867 int28lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1865 ( ">" PGNSP PGUID b f f 21 20 16 1870 1866 int28gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1866 ( "<=" PGNSP PGUID b f f 21 20 16 1873 1865 int28le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1867 ( ">=" PGNSP PGUID b f f 21 20 16 1872 1864 int28ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 1868 ( "=" PGNSP PGUID b t t 20 21 16 1862 1869 int82eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1869 ( "<>" PGNSP PGUID b f f 20 21 16 1863 1868 int82ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1870 ( "<" PGNSP PGUID b f f 20 21 16 1865 1873 int82lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1871 ( ">" PGNSP PGUID b f f 20 21 16 1864 1872 int82gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1872 ( "<=" PGNSP PGUID b f f 20 21 16 1867 1871 int82le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1873 ( ">=" PGNSP PGUID b f f 20 21 16 1866 1870 int82ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 1874 ( "&" PGNSP PGUID b f f 21 21 21 1874 0 int2and - - )); -DESCR("bitwise and"); -DATA(insert OID = 1875 ( "|" PGNSP PGUID b f f 21 21 21 1875 0 int2or - - )); -DESCR("bitwise or"); -DATA(insert OID = 1876 ( "#" PGNSP PGUID b f f 21 21 21 1876 0 int2xor - - )); -DESCR("bitwise exclusive or"); -DATA(insert OID = 1877 ( "~" PGNSP PGUID l f f 0 21 21 0 0 int2not - - )); -DESCR("bitwise not"); -DATA(insert OID = 1878 ( "<<" PGNSP PGUID b f f 21 23 21 0 0 int2shl - - )); -DESCR("bitwise shift left"); -DATA(insert OID = 1879 ( ">>" PGNSP PGUID b f f 21 23 21 0 0 int2shr - - )); -DESCR("bitwise shift right"); - -DATA(insert OID = 1880 ( "&" PGNSP PGUID b f f 23 23 23 1880 0 int4and - - )); -DESCR("bitwise and"); -DATA(insert OID = 1881 ( "|" PGNSP PGUID b f f 23 23 23 1881 0 int4or - - )); -DESCR("bitwise or"); -DATA(insert OID = 1882 ( "#" PGNSP PGUID b f f 23 23 23 1882 0 int4xor - - )); -DESCR("bitwise exclusive or"); -DATA(insert OID = 1883 ( "~" PGNSP PGUID l f f 0 23 23 0 0 int4not - - )); -DESCR("bitwise not"); -DATA(insert OID = 1884 ( "<<" PGNSP PGUID b f f 23 23 23 0 0 int4shl - - )); -DESCR("bitwise shift left"); -DATA(insert OID = 1885 ( ">>" PGNSP PGUID b f f 23 23 23 0 0 int4shr - - )); -DESCR("bitwise shift right"); - -DATA(insert OID = 1886 ( "&" PGNSP PGUID b f f 20 20 20 1886 0 int8and - - )); -DESCR("bitwise and"); -DATA(insert OID = 1887 ( "|" PGNSP PGUID b f f 20 20 20 1887 0 int8or - - )); -DESCR("bitwise or"); -DATA(insert OID = 1888 ( "#" PGNSP PGUID b f f 20 20 20 1888 0 int8xor - - )); -DESCR("bitwise exclusive or"); -DATA(insert OID = 1889 ( "~" PGNSP PGUID l f f 0 20 20 0 0 int8not - - )); -DESCR("bitwise not"); -DATA(insert OID = 1890 ( "<<" PGNSP PGUID b f f 20 23 20 0 0 int8shl - - )); -DESCR("bitwise shift left"); -DATA(insert OID = 1891 ( ">>" PGNSP PGUID b f f 20 23 20 0 0 int8shr - - )); -DESCR("bitwise shift right"); - -DATA(insert OID = 1916 ( "+" PGNSP PGUID l f f 0 20 20 0 0 int8up - - )); -DESCR("unary plus"); -DATA(insert OID = 1917 ( "+" PGNSP PGUID l f f 0 21 21 0 0 int2up - - )); -DESCR("unary plus"); -DATA(insert OID = 1918 ( "+" PGNSP PGUID l f f 0 23 23 0 0 int4up - - )); -DESCR("unary plus"); -DATA(insert OID = 1919 ( "+" PGNSP PGUID l f f 0 700 700 0 0 float4up - - )); -DESCR("unary plus"); -DATA(insert OID = 1920 ( "+" PGNSP PGUID l f f 0 701 701 0 0 float8up - - )); -DESCR("unary plus"); -DATA(insert OID = 1921 ( "+" PGNSP PGUID l f f 0 1700 1700 0 0 numeric_uplus - - )); -DESCR("unary plus"); - -/* bytea operators */ -DATA(insert OID = 1955 ( "=" PGNSP PGUID b t t 17 17 16 1955 1956 byteaeq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 1956 ( "<>" PGNSP PGUID b f f 17 17 16 1956 1955 byteane neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 1957 ( "<" PGNSP PGUID b f f 17 17 16 1959 1960 bytealt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 1958 ( "<=" PGNSP PGUID b f f 17 17 16 1960 1959 byteale scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 1959 ( ">" PGNSP PGUID b f f 17 17 16 1957 1958 byteagt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 1960 ( ">=" PGNSP PGUID b f f 17 17 16 1958 1957 byteage scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -DATA(insert OID = 2016 ( "~~" PGNSP PGUID b f f 17 17 16 0 2017 bytealike likesel likejoinsel )); -DESCR("matches LIKE expression"); -#define OID_BYTEA_LIKE_OP 2016 -DATA(insert OID = 2017 ( "!~~" PGNSP PGUID b f f 17 17 16 0 2016 byteanlike nlikesel nlikejoinsel )); -DESCR("does not match LIKE expression"); -DATA(insert OID = 2018 ( "||" PGNSP PGUID b f f 17 17 17 0 0 byteacat - - )); -DESCR("concatenate"); - -/* timestamp operators */ -DATA(insert OID = 2060 ( "=" PGNSP PGUID b t t 1114 1114 16 2060 2061 timestamp_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 2061 ( "<>" PGNSP PGUID b f f 1114 1114 16 2061 2060 timestamp_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 2062 ( "<" PGNSP PGUID b f f 1114 1114 16 2064 2065 timestamp_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 2063 ( "<=" PGNSP PGUID b f f 1114 1114 16 2065 2064 timestamp_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 2064 ( ">" PGNSP PGUID b f f 1114 1114 16 2062 2063 timestamp_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 2065 ( ">=" PGNSP PGUID b f f 1114 1114 16 2063 2062 timestamp_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 2066 ( "+" PGNSP PGUID b f f 1114 1186 1114 2553 0 timestamp_pl_interval - - )); -DESCR("add"); -DATA(insert OID = 2067 ( "-" PGNSP PGUID b f f 1114 1114 1186 0 0 timestamp_mi - - )); -DESCR("subtract"); -DATA(insert OID = 2068 ( "-" PGNSP PGUID b f f 1114 1186 1114 0 0 timestamp_mi_interval - - )); -DESCR("subtract"); - -/* character-by-character (not collation order) comparison operators for character types */ - -DATA(insert OID = 2314 ( "~<~" PGNSP PGUID b f f 25 25 16 2318 2317 text_pattern_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 2315 ( "~<=~" PGNSP PGUID b f f 25 25 16 2317 2318 text_pattern_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 2317 ( "~>=~" PGNSP PGUID b f f 25 25 16 2315 2314 text_pattern_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 2318 ( "~>~" PGNSP PGUID b f f 25 25 16 2314 2315 text_pattern_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); - -DATA(insert OID = 2326 ( "~<~" PGNSP PGUID b f f 1042 1042 16 2330 2329 bpchar_pattern_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 2327 ( "~<=~" PGNSP PGUID b f f 1042 1042 16 2329 2330 bpchar_pattern_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 2329 ( "~>=~" PGNSP PGUID b f f 1042 1042 16 2327 2326 bpchar_pattern_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 2330 ( "~>~" PGNSP PGUID b f f 1042 1042 16 2326 2327 bpchar_pattern_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); - -/* crosstype operations for date vs. timestamp and timestamptz */ - -DATA(insert OID = 2345 ( "<" PGNSP PGUID b f f 1082 1114 16 2375 2348 date_lt_timestamp scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 2346 ( "<=" PGNSP PGUID b f f 1082 1114 16 2374 2349 date_le_timestamp scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 2347 ( "=" PGNSP PGUID b t f 1082 1114 16 2373 2350 date_eq_timestamp eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 2348 ( ">=" PGNSP PGUID b f f 1082 1114 16 2372 2345 date_ge_timestamp scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 2349 ( ">" PGNSP PGUID b f f 1082 1114 16 2371 2346 date_gt_timestamp scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 2350 ( "<>" PGNSP PGUID b f f 1082 1114 16 2376 2347 date_ne_timestamp neqsel neqjoinsel )); -DESCR("not equal"); - -DATA(insert OID = 2358 ( "<" PGNSP PGUID b f f 1082 1184 16 2388 2361 date_lt_timestamptz scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 2359 ( "<=" PGNSP PGUID b f f 1082 1184 16 2387 2362 date_le_timestamptz scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 2360 ( "=" PGNSP PGUID b t f 1082 1184 16 2386 2363 date_eq_timestamptz eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 2361 ( ">=" PGNSP PGUID b f f 1082 1184 16 2385 2358 date_ge_timestamptz scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 2362 ( ">" PGNSP PGUID b f f 1082 1184 16 2384 2359 date_gt_timestamptz scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 2363 ( "<>" PGNSP PGUID b f f 1082 1184 16 2389 2360 date_ne_timestamptz neqsel neqjoinsel )); -DESCR("not equal"); - -DATA(insert OID = 2371 ( "<" PGNSP PGUID b f f 1114 1082 16 2349 2374 timestamp_lt_date scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 2372 ( "<=" PGNSP PGUID b f f 1114 1082 16 2348 2375 timestamp_le_date scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 2373 ( "=" PGNSP PGUID b t f 1114 1082 16 2347 2376 timestamp_eq_date eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 2374 ( ">=" PGNSP PGUID b f f 1114 1082 16 2346 2371 timestamp_ge_date scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 2375 ( ">" PGNSP PGUID b f f 1114 1082 16 2345 2372 timestamp_gt_date scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 2376 ( "<>" PGNSP PGUID b f f 1114 1082 16 2350 2373 timestamp_ne_date neqsel neqjoinsel )); -DESCR("not equal"); - -DATA(insert OID = 2384 ( "<" PGNSP PGUID b f f 1184 1082 16 2362 2387 timestamptz_lt_date scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 2385 ( "<=" PGNSP PGUID b f f 1184 1082 16 2361 2388 timestamptz_le_date scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 2386 ( "=" PGNSP PGUID b t f 1184 1082 16 2360 2389 timestamptz_eq_date eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 2387 ( ">=" PGNSP PGUID b f f 1184 1082 16 2359 2384 timestamptz_ge_date scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 2388 ( ">" PGNSP PGUID b f f 1184 1082 16 2358 2385 timestamptz_gt_date scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 2389 ( "<>" PGNSP PGUID b f f 1184 1082 16 2363 2386 timestamptz_ne_date neqsel neqjoinsel )); -DESCR("not equal"); - -/* crosstype operations for timestamp vs. timestamptz */ - -DATA(insert OID = 2534 ( "<" PGNSP PGUID b f f 1114 1184 16 2544 2537 timestamp_lt_timestamptz scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 2535 ( "<=" PGNSP PGUID b f f 1114 1184 16 2543 2538 timestamp_le_timestamptz scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 2536 ( "=" PGNSP PGUID b t f 1114 1184 16 2542 2539 timestamp_eq_timestamptz eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 2537 ( ">=" PGNSP PGUID b f f 1114 1184 16 2541 2534 timestamp_ge_timestamptz scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 2538 ( ">" PGNSP PGUID b f f 1114 1184 16 2540 2535 timestamp_gt_timestamptz scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 2539 ( "<>" PGNSP PGUID b f f 1114 1184 16 2545 2536 timestamp_ne_timestamptz neqsel neqjoinsel )); -DESCR("not equal"); - -DATA(insert OID = 2540 ( "<" PGNSP PGUID b f f 1184 1114 16 2538 2543 timestamptz_lt_timestamp scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 2541 ( "<=" PGNSP PGUID b f f 1184 1114 16 2537 2544 timestamptz_le_timestamp scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 2542 ( "=" PGNSP PGUID b t f 1184 1114 16 2536 2545 timestamptz_eq_timestamp eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 2543 ( ">=" PGNSP PGUID b f f 1184 1114 16 2535 2540 timestamptz_ge_timestamp scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 2544 ( ">" PGNSP PGUID b f f 1184 1114 16 2534 2541 timestamptz_gt_timestamp scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 2545 ( "<>" PGNSP PGUID b f f 1184 1114 16 2539 2542 timestamptz_ne_timestamp neqsel neqjoinsel )); -DESCR("not equal"); - -/* formerly-missing interval + datetime operators */ -DATA(insert OID = 2551 ( "+" PGNSP PGUID b f f 1186 1082 1114 1076 0 interval_pl_date - - )); -DESCR("add"); -DATA(insert OID = 2552 ( "+" PGNSP PGUID b f f 1186 1266 1266 1802 0 interval_pl_timetz - - )); -DESCR("add"); -DATA(insert OID = 2553 ( "+" PGNSP PGUID b f f 1186 1114 1114 2066 0 interval_pl_timestamp - - )); -DESCR("add"); -DATA(insert OID = 2554 ( "+" PGNSP PGUID b f f 1186 1184 1184 1327 0 interval_pl_timestamptz - - )); -DESCR("add"); -DATA(insert OID = 2555 ( "+" PGNSP PGUID b f f 23 1082 1082 1100 0 integer_pl_date - - )); -DESCR("add"); - -/* new operators for Y-direction rtree opfamilies */ -DATA(insert OID = 2570 ( "<<|" PGNSP PGUID b f f 603 603 16 0 0 box_below positionsel positionjoinsel )); -DESCR("is below"); -DATA(insert OID = 2571 ( "&<|" PGNSP PGUID b f f 603 603 16 0 0 box_overbelow positionsel positionjoinsel )); -DESCR("overlaps or is below"); -DATA(insert OID = 2572 ( "|&>" PGNSP PGUID b f f 603 603 16 0 0 box_overabove positionsel positionjoinsel )); -DESCR("overlaps or is above"); -DATA(insert OID = 2573 ( "|>>" PGNSP PGUID b f f 603 603 16 0 0 box_above positionsel positionjoinsel )); -DESCR("is above"); -DATA(insert OID = 2574 ( "<<|" PGNSP PGUID b f f 604 604 16 0 0 poly_below positionsel positionjoinsel )); -DESCR("is below"); -DATA(insert OID = 2575 ( "&<|" PGNSP PGUID b f f 604 604 16 0 0 poly_overbelow positionsel positionjoinsel )); -DESCR("overlaps or is below"); -DATA(insert OID = 2576 ( "|&>" PGNSP PGUID b f f 604 604 16 0 0 poly_overabove positionsel positionjoinsel )); -DESCR("overlaps or is above"); -DATA(insert OID = 2577 ( "|>>" PGNSP PGUID b f f 604 604 16 0 0 poly_above positionsel positionjoinsel )); -DESCR("is above"); -DATA(insert OID = 2589 ( "&<|" PGNSP PGUID b f f 718 718 16 0 0 circle_overbelow positionsel positionjoinsel )); -DESCR("overlaps or is below"); -DATA(insert OID = 2590 ( "|&>" PGNSP PGUID b f f 718 718 16 0 0 circle_overabove positionsel positionjoinsel )); -DESCR("overlaps or is above"); - -/* overlap/contains/contained for arrays */ -DATA(insert OID = 2750 ( "&&" PGNSP PGUID b f f 2277 2277 16 2750 0 arrayoverlap arraycontsel arraycontjoinsel )); -DESCR("overlaps"); -#define OID_ARRAY_OVERLAP_OP 2750 -DATA(insert OID = 2751 ( "@>" PGNSP PGUID b f f 2277 2277 16 2752 0 arraycontains arraycontsel arraycontjoinsel )); -DESCR("contains"); -#define OID_ARRAY_CONTAINS_OP 2751 -DATA(insert OID = 2752 ( "<@" PGNSP PGUID b f f 2277 2277 16 2751 0 arraycontained arraycontsel arraycontjoinsel )); -DESCR("is contained by"); -#define OID_ARRAY_CONTAINED_OP 2752 - -/* capturing operators to preserve pre-8.3 behavior of text concatenation */ -DATA(insert OID = 2779 ( "||" PGNSP PGUID b f f 25 2776 25 0 0 textanycat - - )); -DESCR("concatenate"); -DATA(insert OID = 2780 ( "||" PGNSP PGUID b f f 2776 25 25 0 0 anytextcat - - )); -DESCR("concatenate"); - -/* obsolete names for contains/contained-by operators; remove these someday */ -DATA(insert OID = 2860 ( "@" PGNSP PGUID b f f 604 604 16 2861 0 poly_contained contsel contjoinsel )); -DESCR("deprecated, use <@ instead"); -DATA(insert OID = 2861 ( "~" PGNSP PGUID b f f 604 604 16 2860 0 poly_contain contsel contjoinsel )); -DESCR("deprecated, use @> instead"); -DATA(insert OID = 2862 ( "@" PGNSP PGUID b f f 603 603 16 2863 0 box_contained contsel contjoinsel )); -DESCR("deprecated, use <@ instead"); -DATA(insert OID = 2863 ( "~" PGNSP PGUID b f f 603 603 16 2862 0 box_contain contsel contjoinsel )); -DESCR("deprecated, use @> instead"); -DATA(insert OID = 2864 ( "@" PGNSP PGUID b f f 718 718 16 2865 0 circle_contained contsel contjoinsel )); -DESCR("deprecated, use <@ instead"); -DATA(insert OID = 2865 ( "~" PGNSP PGUID b f f 718 718 16 2864 0 circle_contain contsel contjoinsel )); -DESCR("deprecated, use @> instead"); -DATA(insert OID = 2866 ( "@" PGNSP PGUID b f f 600 603 16 0 0 on_pb - - )); -DESCR("deprecated, use <@ instead"); -DATA(insert OID = 2867 ( "@" PGNSP PGUID b f f 600 602 16 2868 0 on_ppath - - )); -DESCR("deprecated, use <@ instead"); -DATA(insert OID = 2868 ( "~" PGNSP PGUID b f f 602 600 16 2867 0 path_contain_pt - - )); -DESCR("deprecated, use @> instead"); -DATA(insert OID = 2869 ( "@" PGNSP PGUID b f f 600 604 16 2870 0 pt_contained_poly - - )); -DESCR("deprecated, use <@ instead"); -DATA(insert OID = 2870 ( "~" PGNSP PGUID b f f 604 600 16 2869 0 poly_contain_pt - - )); -DESCR("deprecated, use @> instead"); -DATA(insert OID = 2871 ( "@" PGNSP PGUID b f f 600 718 16 2872 0 pt_contained_circle - - )); -DESCR("deprecated, use <@ instead"); -DATA(insert OID = 2872 ( "~" PGNSP PGUID b f f 718 600 16 2871 0 circle_contain_pt - - )); -DESCR("deprecated, use @> instead"); -DATA(insert OID = 2873 ( "@" PGNSP PGUID b f f 600 628 16 0 0 on_pl - - )); -DESCR("deprecated, use <@ instead"); -DATA(insert OID = 2874 ( "@" PGNSP PGUID b f f 600 601 16 0 0 on_ps - - )); -DESCR("deprecated, use <@ instead"); -DATA(insert OID = 2875 ( "@" PGNSP PGUID b f f 601 628 16 0 0 on_sl - - )); -DESCR("deprecated, use <@ instead"); -DATA(insert OID = 2876 ( "@" PGNSP PGUID b f f 601 603 16 0 0 on_sb - - )); -DESCR("deprecated, use <@ instead"); -DATA(insert OID = 2877 ( "~" PGNSP PGUID b f f 1034 1033 16 0 0 aclcontains - - )); -DESCR("deprecated, use @> instead"); - -/* uuid operators */ -DATA(insert OID = 2972 ( "=" PGNSP PGUID b t t 2950 2950 16 2972 2973 uuid_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 2973 ( "<>" PGNSP PGUID b f f 2950 2950 16 2973 2972 uuid_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 2974 ( "<" PGNSP PGUID b f f 2950 2950 16 2975 2977 uuid_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 2975 ( ">" PGNSP PGUID b f f 2950 2950 16 2974 2976 uuid_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 2976 ( "<=" PGNSP PGUID b f f 2950 2950 16 2977 2975 uuid_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 2977 ( ">=" PGNSP PGUID b f f 2950 2950 16 2976 2974 uuid_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); - -/* pg_lsn operators */ -DATA(insert OID = 3222 ( "=" PGNSP PGUID b t t 3220 3220 16 3222 3223 pg_lsn_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 3223 ( "<>" PGNSP PGUID b f f 3220 3220 16 3223 3222 pg_lsn_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 3224 ( "<" PGNSP PGUID b f f 3220 3220 16 3225 3227 pg_lsn_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 3225 ( ">" PGNSP PGUID b f f 3220 3220 16 3224 3226 pg_lsn_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 3226 ( "<=" PGNSP PGUID b f f 3220 3220 16 3227 3225 pg_lsn_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 3227 ( ">=" PGNSP PGUID b f f 3220 3220 16 3226 3224 pg_lsn_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 3228 ( "-" PGNSP PGUID b f f 3220 3220 1700 0 0 pg_lsn_mi - - )); -DESCR("minus"); - -/* enum operators */ -DATA(insert OID = 3516 ( "=" PGNSP PGUID b t t 3500 3500 16 3516 3517 enum_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 3517 ( "<>" PGNSP PGUID b f f 3500 3500 16 3517 3516 enum_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 3518 ( "<" PGNSP PGUID b f f 3500 3500 16 3519 3521 enum_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 3519 ( ">" PGNSP PGUID b f f 3500 3500 16 3518 3520 enum_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 3520 ( "<=" PGNSP PGUID b f f 3500 3500 16 3521 3519 enum_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 3521 ( ">=" PGNSP PGUID b f f 3500 3500 16 3520 3518 enum_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); + /* OID of join estimator, or 0 */ + regproc oprjoin BKI_DEFAULT(-) BKI_LOOKUP(pg_proc); +} FormData_pg_operator; -/* - * tsearch operations +/* ---------------- + * Form_pg_operator corresponds to a pointer to a tuple with + * the format of pg_operator relation. + * ---------------- */ -DATA(insert OID = 3627 ( "<" PGNSP PGUID b f f 3614 3614 16 3632 3631 tsvector_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 3628 ( "<=" PGNSP PGUID b f f 3614 3614 16 3631 3632 tsvector_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 3629 ( "=" PGNSP PGUID b t f 3614 3614 16 3629 3630 tsvector_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 3630 ( "<>" PGNSP PGUID b f f 3614 3614 16 3630 3629 tsvector_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 3631 ( ">=" PGNSP PGUID b f f 3614 3614 16 3628 3627 tsvector_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 3632 ( ">" PGNSP PGUID b f f 3614 3614 16 3627 3628 tsvector_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 3633 ( "||" PGNSP PGUID b f f 3614 3614 3614 0 0 tsvector_concat - - )); -DESCR("concatenate"); -DATA(insert OID = 3636 ( "@@" PGNSP PGUID b f f 3614 3615 16 3637 0 ts_match_vq tsmatchsel tsmatchjoinsel )); -DESCR("text search match"); -DATA(insert OID = 3637 ( "@@" PGNSP PGUID b f f 3615 3614 16 3636 0 ts_match_qv tsmatchsel tsmatchjoinsel )); -DESCR("text search match"); -DATA(insert OID = 3660 ( "@@@" PGNSP PGUID b f f 3614 3615 16 3661 0 ts_match_vq tsmatchsel tsmatchjoinsel )); -DESCR("deprecated, use @@ instead"); -DATA(insert OID = 3661 ( "@@@" PGNSP PGUID b f f 3615 3614 16 3660 0 ts_match_qv tsmatchsel tsmatchjoinsel )); -DESCR("deprecated, use @@ instead"); -DATA(insert OID = 3674 ( "<" PGNSP PGUID b f f 3615 3615 16 3679 3678 tsquery_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 3675 ( "<=" PGNSP PGUID b f f 3615 3615 16 3678 3679 tsquery_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 3676 ( "=" PGNSP PGUID b t f 3615 3615 16 3676 3677 tsquery_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 3677 ( "<>" PGNSP PGUID b f f 3615 3615 16 3677 3676 tsquery_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 3678 ( ">=" PGNSP PGUID b f f 3615 3615 16 3675 3674 tsquery_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 3679 ( ">" PGNSP PGUID b f f 3615 3615 16 3674 3675 tsquery_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 3680 ( "&&" PGNSP PGUID b f f 3615 3615 3615 0 0 tsquery_and - - )); -DESCR("AND-concatenate"); -DATA(insert OID = 3681 ( "||" PGNSP PGUID b f f 3615 3615 3615 0 0 tsquery_or - - )); -DESCR("OR-concatenate"); -/* <-> operation calls tsquery_phrase, but function is polymorphic. So, point to OID of the tsquery_phrase */ -DATA(insert OID = 5005 ( "<->" PGNSP PGUID b f f 3615 3615 3615 0 0 5003 - - )); -DESCR("phrase-concatenate"); -DATA(insert OID = 3682 ( "!!" PGNSP PGUID l f f 0 3615 3615 0 0 tsquery_not - - )); -DESCR("NOT tsquery"); -DATA(insert OID = 3693 ( "@>" PGNSP PGUID b f f 3615 3615 16 3694 0 tsq_mcontains contsel contjoinsel )); -DESCR("contains"); -DATA(insert OID = 3694 ( "<@" PGNSP PGUID b f f 3615 3615 16 3693 0 tsq_mcontained contsel contjoinsel )); -DESCR("is contained by"); -DATA(insert OID = 3762 ( "@@" PGNSP PGUID b f f 25 25 16 0 0 ts_match_tt contsel contjoinsel )); -DESCR("text search match"); -DATA(insert OID = 3763 ( "@@" PGNSP PGUID b f f 25 3615 16 0 0 ts_match_tq contsel contjoinsel )); -DESCR("text search match"); +typedef FormData_pg_operator *Form_pg_operator; + -/* generic record comparison operators */ -DATA(insert OID = 2988 ( "=" PGNSP PGUID b t f 2249 2249 16 2988 2989 record_eq eqsel eqjoinsel )); -DESCR("equal"); -#define RECORD_EQ_OP 2988 -DATA(insert OID = 2989 ( "<>" PGNSP PGUID b f f 2249 2249 16 2989 2988 record_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 2990 ( "<" PGNSP PGUID b f f 2249 2249 16 2991 2993 record_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -#define RECORD_LT_OP 2990 -DATA(insert OID = 2991 ( ">" PGNSP PGUID b f f 2249 2249 16 2990 2992 record_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -#define RECORD_GT_OP 2991 -DATA(insert OID = 2992 ( "<=" PGNSP PGUID b f f 2249 2249 16 2993 2991 record_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 2993 ( ">=" PGNSP PGUID b f f 2249 2249 16 2992 2990 record_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); +extern ObjectAddress OperatorCreate(const char *operatorName, + Oid operatorNamespace, + Oid leftTypeId, + Oid rightTypeId, + Oid procedureId, + List *commutatorName, + List *negatorName, + Oid restrictionId, + Oid joinId, + bool canMerge, + bool canHash); -/* byte-oriented tests for identical rows and fast sorting */ -DATA(insert OID = 3188 ( "*=" PGNSP PGUID b t f 2249 2249 16 3188 3189 record_image_eq eqsel eqjoinsel )); -DESCR("identical"); -DATA(insert OID = 3189 ( "*<>" PGNSP PGUID b f f 2249 2249 16 3189 3188 record_image_ne neqsel neqjoinsel )); -DESCR("not identical"); -DATA(insert OID = 3190 ( "*<" PGNSP PGUID b f f 2249 2249 16 3191 3193 record_image_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 3191 ( "*>" PGNSP PGUID b f f 2249 2249 16 3190 3192 record_image_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 3192 ( "*<=" PGNSP PGUID b f f 2249 2249 16 3193 3191 record_image_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 3193 ( "*>=" PGNSP PGUID b f f 2249 2249 16 3192 3190 record_image_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); +extern ObjectAddress makeOperatorDependencies(HeapTuple tuple, bool isUpdate); -/* generic range type operators */ -DATA(insert OID = 3882 ( "=" PGNSP PGUID b t t 3831 3831 16 3882 3883 range_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 3883 ( "<>" PGNSP PGUID b f f 3831 3831 16 3883 3882 range_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 3884 ( "<" PGNSP PGUID b f f 3831 3831 16 3887 3886 range_lt rangesel scalarltjoinsel )); -DESCR("less than"); -#define OID_RANGE_LESS_OP 3884 -DATA(insert OID = 3885 ( "<=" PGNSP PGUID b f f 3831 3831 16 3886 3887 range_le rangesel scalarltjoinsel )); -DESCR("less than or equal"); -#define OID_RANGE_LESS_EQUAL_OP 3885 -DATA(insert OID = 3886 ( ">=" PGNSP PGUID b f f 3831 3831 16 3885 3884 range_ge rangesel scalargtjoinsel )); -DESCR("greater than or equal"); -#define OID_RANGE_GREATER_EQUAL_OP 3886 -DATA(insert OID = 3887 ( ">" PGNSP PGUID b f f 3831 3831 16 3884 3885 range_gt rangesel scalargtjoinsel )); -DESCR("greater than"); -#define OID_RANGE_GREATER_OP 3887 -DATA(insert OID = 3888 ( "&&" PGNSP PGUID b f f 3831 3831 16 3888 0 range_overlaps rangesel areajoinsel )); -DESCR("overlaps"); -#define OID_RANGE_OVERLAP_OP 3888 -DATA(insert OID = 3889 ( "@>" PGNSP PGUID b f f 3831 2283 16 3891 0 range_contains_elem rangesel contjoinsel )); -DESCR("contains"); -#define OID_RANGE_CONTAINS_ELEM_OP 3889 -DATA(insert OID = 3890 ( "@>" PGNSP PGUID b f f 3831 3831 16 3892 0 range_contains rangesel contjoinsel )); -DESCR("contains"); -#define OID_RANGE_CONTAINS_OP 3890 -DATA(insert OID = 3891 ( "<@" PGNSP PGUID b f f 2283 3831 16 3889 0 elem_contained_by_range rangesel contjoinsel )); -DESCR("is contained by"); -#define OID_RANGE_ELEM_CONTAINED_OP 3891 -DATA(insert OID = 3892 ( "<@" PGNSP PGUID b f f 3831 3831 16 3890 0 range_contained_by rangesel contjoinsel )); -DESCR("is contained by"); -#define OID_RANGE_CONTAINED_OP 3892 -DATA(insert OID = 3893 ( "<<" PGNSP PGUID b f f 3831 3831 16 3894 0 range_before rangesel scalarltjoinsel )); -DESCR("is left of"); -#define OID_RANGE_LEFT_OP 3893 -DATA(insert OID = 3894 ( ">>" PGNSP PGUID b f f 3831 3831 16 3893 0 range_after rangesel scalargtjoinsel )); -DESCR("is right of"); -#define OID_RANGE_RIGHT_OP 3894 -DATA(insert OID = 3895 ( "&<" PGNSP PGUID b f f 3831 3831 16 0 0 range_overleft rangesel scalarltjoinsel )); -DESCR("overlaps or is left of"); -#define OID_RANGE_OVERLAPS_LEFT_OP 3895 -DATA(insert OID = 3896 ( "&>" PGNSP PGUID b f f 3831 3831 16 0 0 range_overright rangesel scalargtjoinsel )); -DESCR("overlaps or is right of"); -#define OID_RANGE_OVERLAPS_RIGHT_OP 3896 -DATA(insert OID = 3897 ( "-|-" PGNSP PGUID b f f 3831 3831 16 3897 0 range_adjacent contsel contjoinsel )); -DESCR("is adjacent to"); -DATA(insert OID = 3898 ( "+" PGNSP PGUID b f f 3831 3831 3831 3898 0 range_union - - )); -DESCR("range union"); -DATA(insert OID = 3899 ( "-" PGNSP PGUID b f f 3831 3831 3831 0 0 range_minus - - )); -DESCR("range difference"); -DATA(insert OID = 3900 ( "*" PGNSP PGUID b f f 3831 3831 3831 3900 0 range_intersect - - )); -DESCR("range intersection"); -DATA(insert OID = 3962 ( "->" PGNSP PGUID b f f 114 25 114 0 0 json_object_field - - )); -DESCR("get json object field"); -DATA(insert OID = 3963 ( "->>" PGNSP PGUID b f f 114 25 25 0 0 json_object_field_text - - )); -DESCR("get json object field as text"); -DATA(insert OID = 3964 ( "->" PGNSP PGUID b f f 114 23 114 0 0 json_array_element - - )); -DESCR("get json array element"); -DATA(insert OID = 3965 ( "->>" PGNSP PGUID b f f 114 23 25 0 0 json_array_element_text - - )); -DESCR("get json array element as text"); -DATA(insert OID = 3966 ( "#>" PGNSP PGUID b f f 114 1009 114 0 0 json_extract_path - - )); -DESCR("get value from json with path elements"); -DATA(insert OID = 3967 ( "#>>" PGNSP PGUID b f f 114 1009 25 0 0 json_extract_path_text - - )); -DESCR("get value from json as text with path elements"); -DATA(insert OID = 3211 ( "->" PGNSP PGUID b f f 3802 25 3802 0 0 jsonb_object_field - - )); -DESCR("get jsonb object field"); -DATA(insert OID = 3477 ( "->>" PGNSP PGUID b f f 3802 25 25 0 0 jsonb_object_field_text - - )); -DESCR("get jsonb object field as text"); -DATA(insert OID = 3212 ( "->" PGNSP PGUID b f f 3802 23 3802 0 0 jsonb_array_element - - )); -DESCR("get jsonb array element"); -DATA(insert OID = 3481 ( "->>" PGNSP PGUID b f f 3802 23 25 0 0 jsonb_array_element_text - - )); -DESCR("get jsonb array element as text"); -DATA(insert OID = 3213 ( "#>" PGNSP PGUID b f f 3802 1009 3802 0 0 jsonb_extract_path - - )); -DESCR("get value from jsonb with path elements"); -DATA(insert OID = 3206 ( "#>>" PGNSP PGUID b f f 3802 1009 25 0 0 jsonb_extract_path_text - - )); -DESCR("get value from jsonb as text with path elements"); -DATA(insert OID = 3240 ( "=" PGNSP PGUID b t t 3802 3802 16 3240 3241 jsonb_eq eqsel eqjoinsel )); -DESCR("equal"); -DATA(insert OID = 3241 ( "<>" PGNSP PGUID b f f 3802 3802 16 3241 3240 jsonb_ne neqsel neqjoinsel )); -DESCR("not equal"); -DATA(insert OID = 3242 ( "<" PGNSP PGUID b f f 3802 3802 16 3243 3245 jsonb_lt scalarltsel scalarltjoinsel )); -DESCR("less than"); -DATA(insert OID = 3243 ( ">" PGNSP PGUID b f f 3802 3802 16 3242 3244 jsonb_gt scalargtsel scalargtjoinsel )); -DESCR("greater than"); -DATA(insert OID = 3244 ( "<=" PGNSP PGUID b f f 3802 3802 16 3245 3243 jsonb_le scalarltsel scalarltjoinsel )); -DESCR("less than or equal"); -DATA(insert OID = 3245 ( ">=" PGNSP PGUID b f f 3802 3802 16 3244 3242 jsonb_ge scalargtsel scalargtjoinsel )); -DESCR("greater than or equal"); -DATA(insert OID = 3246 ( "@>" PGNSP PGUID b f f 3802 3802 16 3250 0 jsonb_contains contsel contjoinsel )); -DESCR("contains"); -DATA(insert OID = 3247 ( "?" PGNSP PGUID b f f 3802 25 16 0 0 jsonb_exists contsel contjoinsel )); -DESCR("key exists"); -DATA(insert OID = 3248 ( "?|" PGNSP PGUID b f f 3802 1009 16 0 0 jsonb_exists_any contsel contjoinsel )); -DESCR("any key exists"); -DATA(insert OID = 3249 ( "?&" PGNSP PGUID b f f 3802 1009 16 0 0 jsonb_exists_all contsel contjoinsel )); -DESCR("all keys exist"); -DATA(insert OID = 3250 ( "<@" PGNSP PGUID b f f 3802 3802 16 3246 0 jsonb_contained contsel contjoinsel )); -DESCR("is contained by"); -DATA(insert OID = 3284 ( "||" PGNSP PGUID b f f 3802 3802 3802 0 0 jsonb_concat - - )); -DESCR("concatenate"); -DATA(insert OID = 3285 ( "-" PGNSP PGUID b f f 3802 25 3802 0 0 3302 - - )); -DESCR("delete object field"); -DATA(insert OID = 3398 ( "-" PGNSP PGUID b f f 3802 1009 3802 0 0 3343 - -)); -DESCR("delete object fields"); -DATA(insert OID = 3286 ( "-" PGNSP PGUID b f f 3802 23 3802 0 0 3303 - - )); -DESCR("delete array element"); -DATA(insert OID = 3287 ( "#-" PGNSP PGUID b f f 3802 1009 3802 0 0 jsonb_delete_path - - )); -DESCR("delete path"); +extern void OperatorUpd(Oid baseId, Oid commId, Oid negId, bool isDelete); #endif /* PG_OPERATOR_H */ diff --git a/src/include/catalog/pg_operator_fn.h b/src/include/catalog/pg_operator_fn.h deleted file mode 100644 index 37f5c712fe..0000000000 --- a/src/include/catalog/pg_operator_fn.h +++ /dev/null @@ -1,36 +0,0 @@ -/*------------------------------------------------------------------------- - * - * pg_operator_fn.h -* prototypes for functions in catalog/pg_operator.c - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/catalog/pg_operator_fn.h - * - *------------------------------------------------------------------------- - */ -#ifndef PG_OPERATOR_FN_H -#define PG_OPERATOR_FN_H - -#include "catalog/objectaddress.h" -#include "nodes/pg_list.h" - -extern ObjectAddress OperatorCreate(const char *operatorName, - Oid operatorNamespace, - Oid leftTypeId, - Oid rightTypeId, - Oid procedureId, - List *commutatorName, - List *negatorName, - Oid restrictionId, - Oid joinId, - bool canMerge, - bool canHash); - -extern ObjectAddress makeOperatorDependencies(HeapTuple tuple, bool isUpdate); - -extern void OperatorUpd(Oid baseId, Oid commId, Oid negId, bool isDelete); - -#endif /* PG_OPERATOR_FN_H */ diff --git a/src/include/catalog/pg_opfamily.dat b/src/include/catalog/pg_opfamily.dat new file mode 100644 index 0000000000..21473acf34 --- /dev/null +++ b/src/include/catalog/pg_opfamily.dat @@ -0,0 +1,232 @@ +#---------------------------------------------------------------------- +# +# pg_opfamily.dat +# Initial contents of the pg_opfamily system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_opfamily.dat +# +#---------------------------------------------------------------------- + +[ + +{ oid => '397', + opfmethod => 'btree', opfname => 'array_ops' }, +{ oid => '627', + opfmethod => 'hash', opfname => 'array_ops' }, +{ oid => '423', + opfmethod => 'btree', opfname => 'bit_ops' }, +{ oid => '424', oid_symbol => 'BOOL_BTREE_FAM_OID', + opfmethod => 'btree', opfname => 'bool_ops' }, +{ oid => '426', oid_symbol => 'BPCHAR_BTREE_FAM_OID', + opfmethod => 'btree', opfname => 'bpchar_ops' }, +{ oid => '427', + opfmethod => 'hash', opfname => 'bpchar_ops' }, +{ oid => '428', oid_symbol => 'BYTEA_BTREE_FAM_OID', + opfmethod => 'btree', opfname => 'bytea_ops' }, +{ oid => '429', + opfmethod => 'btree', opfname => 'char_ops' }, +{ oid => '431', + opfmethod => 'hash', opfname => 'char_ops' }, +{ oid => '434', + opfmethod => 'btree', opfname => 'datetime_ops' }, +{ oid => '435', + opfmethod => 'hash', opfname => 'date_ops' }, +{ oid => '1970', + opfmethod => 'btree', opfname => 'float_ops' }, +{ oid => '1971', + opfmethod => 'hash', opfname => 'float_ops' }, +{ oid => '1974', oid_symbol => 'NETWORK_BTREE_FAM_OID', + opfmethod => 'btree', opfname => 'network_ops' }, +{ oid => '1975', + opfmethod => 'hash', opfname => 'network_ops' }, +{ oid => '3550', + opfmethod => 'gist', opfname => 'network_ops' }, +{ oid => '3794', + opfmethod => 'spgist', opfname => 'network_ops' }, +{ oid => '1976', oid_symbol => 'INTEGER_BTREE_FAM_OID', + opfmethod => 'btree', opfname => 'integer_ops' }, +{ oid => '1977', + opfmethod => 'hash', opfname => 'integer_ops' }, +{ oid => '1982', + opfmethod => 'btree', opfname => 'interval_ops' }, +{ oid => '1983', + opfmethod => 'hash', opfname => 'interval_ops' }, +{ oid => '1984', + opfmethod => 'btree', opfname => 'macaddr_ops' }, +{ oid => '1985', + opfmethod => 'hash', opfname => 'macaddr_ops' }, +{ oid => '3371', + opfmethod => 'btree', opfname => 'macaddr8_ops' }, +{ oid => '3372', + opfmethod => 'hash', opfname => 'macaddr8_ops' }, +{ oid => '1986', oid_symbol => 'NAME_BTREE_FAM_OID', + opfmethod => 'btree', opfname => 'name_ops' }, +{ oid => '1987', + opfmethod => 'hash', opfname => 'name_ops' }, +{ oid => '1988', + opfmethod => 'btree', opfname => 'numeric_ops' }, +{ oid => '1998', + opfmethod => 'hash', opfname => 'numeric_ops' }, +{ oid => '1989', oid_symbol => 'OID_BTREE_FAM_OID', + opfmethod => 'btree', opfname => 'oid_ops' }, +{ oid => '1990', + opfmethod => 'hash', opfname => 'oid_ops' }, +{ oid => '1991', + opfmethod => 'btree', opfname => 'oidvector_ops' }, +{ oid => '1992', + opfmethod => 'hash', opfname => 'oidvector_ops' }, +{ oid => '2994', + opfmethod => 'btree', opfname => 'record_ops' }, +{ oid => '3194', + opfmethod => 'btree', opfname => 'record_image_ops' }, +{ oid => '1994', oid_symbol => 'TEXT_BTREE_FAM_OID', + opfmethod => 'btree', opfname => 'text_ops' }, +{ oid => '1995', + opfmethod => 'hash', opfname => 'text_ops' }, +{ oid => '1996', + opfmethod => 'btree', opfname => 'time_ops' }, +{ oid => '1997', + opfmethod => 'hash', opfname => 'time_ops' }, +{ oid => '1999', + opfmethod => 'hash', opfname => 'timestamptz_ops' }, +{ oid => '2000', + opfmethod => 'btree', opfname => 'timetz_ops' }, +{ oid => '2001', + opfmethod => 'hash', opfname => 'timetz_ops' }, +{ oid => '2002', + opfmethod => 'btree', opfname => 'varbit_ops' }, +{ oid => '2040', + opfmethod => 'hash', opfname => 'timestamp_ops' }, +{ oid => '2095', oid_symbol => 'TEXT_PATTERN_BTREE_FAM_OID', + opfmethod => 'btree', opfname => 'text_pattern_ops' }, +{ oid => '2097', oid_symbol => 'BPCHAR_PATTERN_BTREE_FAM_OID', + opfmethod => 'btree', opfname => 'bpchar_pattern_ops' }, +{ oid => '2099', + opfmethod => 'btree', opfname => 'money_ops' }, +{ oid => '2222', oid_symbol => 'BOOL_HASH_FAM_OID', + opfmethod => 'hash', opfname => 'bool_ops' }, +{ oid => '2223', + opfmethod => 'hash', opfname => 'bytea_ops' }, +{ oid => '2789', + opfmethod => 'btree', opfname => 'tid_ops' }, +{ oid => '2225', + opfmethod => 'hash', opfname => 'xid_ops' }, +{ oid => '2226', + opfmethod => 'hash', opfname => 'cid_ops' }, +{ oid => '2229', + opfmethod => 'hash', opfname => 'text_pattern_ops' }, +{ oid => '2231', + opfmethod => 'hash', opfname => 'bpchar_pattern_ops' }, +{ oid => '2235', + opfmethod => 'hash', opfname => 'aclitem_ops' }, +{ oid => '2593', + opfmethod => 'gist', opfname => 'box_ops' }, +{ oid => '2594', + opfmethod => 'gist', opfname => 'poly_ops' }, +{ oid => '2595', + opfmethod => 'gist', opfname => 'circle_ops' }, +{ oid => '1029', + opfmethod => 'gist', opfname => 'point_ops' }, +{ oid => '2745', + opfmethod => 'gin', opfname => 'array_ops' }, +{ oid => '2968', + opfmethod => 'btree', opfname => 'uuid_ops' }, +{ oid => '2969', + opfmethod => 'hash', opfname => 'uuid_ops' }, +{ oid => '3253', + opfmethod => 'btree', opfname => 'pg_lsn_ops' }, +{ oid => '3254', + opfmethod => 'hash', opfname => 'pg_lsn_ops' }, +{ oid => '3522', + opfmethod => 'btree', opfname => 'enum_ops' }, +{ oid => '3523', + opfmethod => 'hash', opfname => 'enum_ops' }, +{ oid => '3626', + opfmethod => 'btree', opfname => 'tsvector_ops' }, +{ oid => '3655', + opfmethod => 'gist', opfname => 'tsvector_ops' }, +{ oid => '3659', + opfmethod => 'gin', opfname => 'tsvector_ops' }, +{ oid => '3683', + opfmethod => 'btree', opfname => 'tsquery_ops' }, +{ oid => '3702', + opfmethod => 'gist', opfname => 'tsquery_ops' }, +{ oid => '3901', + opfmethod => 'btree', opfname => 'range_ops' }, +{ oid => '3903', + opfmethod => 'hash', opfname => 'range_ops' }, +{ oid => '3919', + opfmethod => 'gist', opfname => 'range_ops' }, +{ oid => '3474', + opfmethod => 'spgist', opfname => 'range_ops' }, +{ oid => '4015', + opfmethod => 'spgist', opfname => 'quad_point_ops' }, +{ oid => '4016', + opfmethod => 'spgist', opfname => 'kd_point_ops' }, +{ oid => '4017', oid_symbol => 'TEXT_SPGIST_FAM_OID', + opfmethod => 'spgist', opfname => 'text_ops' }, +{ oid => '4033', + opfmethod => 'btree', opfname => 'jsonb_ops' }, +{ oid => '4034', + opfmethod => 'hash', opfname => 'jsonb_ops' }, +{ oid => '4036', + opfmethod => 'gin', opfname => 'jsonb_ops' }, +{ oid => '4037', + opfmethod => 'gin', opfname => 'jsonb_path_ops' }, +{ oid => '4054', + opfmethod => 'brin', opfname => 'integer_minmax_ops' }, +{ oid => '4055', + opfmethod => 'brin', opfname => 'numeric_minmax_ops' }, +{ oid => '4056', + opfmethod => 'brin', opfname => 'text_minmax_ops' }, +{ oid => '4058', + opfmethod => 'brin', opfname => 'timetz_minmax_ops' }, +{ oid => '4059', + opfmethod => 'brin', opfname => 'datetime_minmax_ops' }, +{ oid => '4062', + opfmethod => 'brin', opfname => 'char_minmax_ops' }, +{ oid => '4064', + opfmethod => 'brin', opfname => 'bytea_minmax_ops' }, +{ oid => '4065', + opfmethod => 'brin', opfname => 'name_minmax_ops' }, +{ oid => '4068', + opfmethod => 'brin', opfname => 'oid_minmax_ops' }, +{ oid => '4069', + opfmethod => 'brin', opfname => 'tid_minmax_ops' }, +{ oid => '4070', + opfmethod => 'brin', opfname => 'float_minmax_ops' }, +{ oid => '4074', + opfmethod => 'brin', opfname => 'macaddr_minmax_ops' }, +{ oid => '4109', + opfmethod => 'brin', opfname => 'macaddr8_minmax_ops' }, +{ oid => '4075', + opfmethod => 'brin', opfname => 'network_minmax_ops' }, +{ oid => '4102', + opfmethod => 'brin', opfname => 'network_inclusion_ops' }, +{ oid => '4076', + opfmethod => 'brin', opfname => 'bpchar_minmax_ops' }, +{ oid => '4077', + opfmethod => 'brin', opfname => 'time_minmax_ops' }, +{ oid => '4078', + opfmethod => 'brin', opfname => 'interval_minmax_ops' }, +{ oid => '4079', + opfmethod => 'brin', opfname => 'bit_minmax_ops' }, +{ oid => '4080', + opfmethod => 'brin', opfname => 'varbit_minmax_ops' }, +{ oid => '4081', + opfmethod => 'brin', opfname => 'uuid_minmax_ops' }, +{ oid => '4103', + opfmethod => 'brin', opfname => 'range_inclusion_ops' }, +{ oid => '4082', + opfmethod => 'brin', opfname => 'pg_lsn_minmax_ops' }, +{ oid => '4104', + opfmethod => 'brin', opfname => 'box_inclusion_ops' }, +{ oid => '5000', + opfmethod => 'spgist', opfname => 'box_ops' }, +{ oid => '5008', + opfmethod => 'spgist', opfname => 'poly_ops' }, + +] diff --git a/src/include/catalog/pg_opfamily.h b/src/include/catalog/pg_opfamily.h index 0d0ba7c66a..99dedbc42d 100644 --- a/src/include/catalog/pg_opfamily.h +++ b/src/include/catalog/pg_opfamily.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_opfamily.h - * definition of the system "opfamily" relation (pg_opfamily) - * along with the relation's initial contents. + * definition of the "operator family" system catalog (pg_opfamily) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_opfamily.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,20 +19,26 @@ #define PG_OPFAMILY_H #include "catalog/genbki.h" +#include "catalog/pg_opfamily_d.h" /* ---------------- * pg_opfamily definition. cpp turns this into * typedef struct FormData_pg_opfamily * ---------------- */ -#define OperatorFamilyRelationId 2753 - -CATALOG(pg_opfamily,2753) +CATALOG(pg_opfamily,2753,OperatorFamilyRelationId) { - Oid opfmethod; /* index access method opfamily is for */ - NameData opfname; /* name of this opfamily */ - Oid opfnamespace; /* namespace of this opfamily */ - Oid opfowner; /* opfamily owner */ + /* index access method opfamily is for */ + Oid opfmethod BKI_LOOKUP(pg_am); + + /* name of this opfamily */ + NameData opfname; + + /* namespace of this opfamily */ + Oid opfnamespace BKI_DEFAULT(PGNSP); + + /* opfamily owner */ + Oid opfowner BKI_DEFAULT(PGUID); } FormData_pg_opfamily; /* ---------------- @@ -43,148 +48,11 @@ CATALOG(pg_opfamily,2753) */ typedef FormData_pg_opfamily *Form_pg_opfamily; -/* ---------------- - * compiler constants for pg_opfamily - * ---------------- - */ -#define Natts_pg_opfamily 4 -#define Anum_pg_opfamily_opfmethod 1 -#define Anum_pg_opfamily_opfname 2 -#define Anum_pg_opfamily_opfnamespace 3 -#define Anum_pg_opfamily_opfowner 4 - -/* ---------------- - * initial contents of pg_opfamily - * ---------------- - */ +#ifdef EXPOSE_TO_CLIENT_CODE -DATA(insert OID = 421 ( 403 abstime_ops PGNSP PGUID )); -DATA(insert OID = 397 ( 403 array_ops PGNSP PGUID )); -DATA(insert OID = 627 ( 405 array_ops PGNSP PGUID )); -DATA(insert OID = 423 ( 403 bit_ops PGNSP PGUID )); -DATA(insert OID = 424 ( 403 bool_ops PGNSP PGUID )); -#define BOOL_BTREE_FAM_OID 424 -DATA(insert OID = 426 ( 403 bpchar_ops PGNSP PGUID )); -#define BPCHAR_BTREE_FAM_OID 426 -DATA(insert OID = 427 ( 405 bpchar_ops PGNSP PGUID )); -DATA(insert OID = 428 ( 403 bytea_ops PGNSP PGUID )); -#define BYTEA_BTREE_FAM_OID 428 -DATA(insert OID = 429 ( 403 char_ops PGNSP PGUID )); -DATA(insert OID = 431 ( 405 char_ops PGNSP PGUID )); -DATA(insert OID = 434 ( 403 datetime_ops PGNSP PGUID )); -DATA(insert OID = 435 ( 405 date_ops PGNSP PGUID )); -DATA(insert OID = 1970 ( 403 float_ops PGNSP PGUID )); -DATA(insert OID = 1971 ( 405 float_ops PGNSP PGUID )); -DATA(insert OID = 1974 ( 403 network_ops PGNSP PGUID )); -#define NETWORK_BTREE_FAM_OID 1974 -DATA(insert OID = 1975 ( 405 network_ops PGNSP PGUID )); -DATA(insert OID = 3550 ( 783 network_ops PGNSP PGUID )); -DATA(insert OID = 3794 ( 4000 network_ops PGNSP PGUID )); -DATA(insert OID = 1976 ( 403 integer_ops PGNSP PGUID )); -#define INTEGER_BTREE_FAM_OID 1976 -DATA(insert OID = 1977 ( 405 integer_ops PGNSP PGUID )); -DATA(insert OID = 1982 ( 403 interval_ops PGNSP PGUID )); -DATA(insert OID = 1983 ( 405 interval_ops PGNSP PGUID )); -DATA(insert OID = 1984 ( 403 macaddr_ops PGNSP PGUID )); -DATA(insert OID = 1985 ( 405 macaddr_ops PGNSP PGUID )); -DATA(insert OID = 3371 ( 403 macaddr8_ops PGNSP PGUID )); -DATA(insert OID = 3372 ( 405 macaddr8_ops PGNSP PGUID )); -DATA(insert OID = 1986 ( 403 name_ops PGNSP PGUID )); -#define NAME_BTREE_FAM_OID 1986 -DATA(insert OID = 1987 ( 405 name_ops PGNSP PGUID )); -DATA(insert OID = 1988 ( 403 numeric_ops PGNSP PGUID )); -DATA(insert OID = 1998 ( 405 numeric_ops PGNSP PGUID )); -DATA(insert OID = 1989 ( 403 oid_ops PGNSP PGUID )); -#define OID_BTREE_FAM_OID 1989 -DATA(insert OID = 1990 ( 405 oid_ops PGNSP PGUID )); -DATA(insert OID = 1991 ( 403 oidvector_ops PGNSP PGUID )); -DATA(insert OID = 1992 ( 405 oidvector_ops PGNSP PGUID )); -DATA(insert OID = 2994 ( 403 record_ops PGNSP PGUID )); -DATA(insert OID = 3194 ( 403 record_image_ops PGNSP PGUID )); -DATA(insert OID = 1994 ( 403 text_ops PGNSP PGUID )); -#define TEXT_BTREE_FAM_OID 1994 -DATA(insert OID = 1995 ( 405 text_ops PGNSP PGUID )); -DATA(insert OID = 1996 ( 403 time_ops PGNSP PGUID )); -DATA(insert OID = 1997 ( 405 time_ops PGNSP PGUID )); -DATA(insert OID = 1999 ( 405 timestamptz_ops PGNSP PGUID )); -DATA(insert OID = 2000 ( 403 timetz_ops PGNSP PGUID )); -DATA(insert OID = 2001 ( 405 timetz_ops PGNSP PGUID )); -DATA(insert OID = 2002 ( 403 varbit_ops PGNSP PGUID )); -DATA(insert OID = 2040 ( 405 timestamp_ops PGNSP PGUID )); -DATA(insert OID = 2095 ( 403 text_pattern_ops PGNSP PGUID )); -#define TEXT_PATTERN_BTREE_FAM_OID 2095 -DATA(insert OID = 2097 ( 403 bpchar_pattern_ops PGNSP PGUID )); -#define BPCHAR_PATTERN_BTREE_FAM_OID 2097 -DATA(insert OID = 2099 ( 403 money_ops PGNSP PGUID )); -DATA(insert OID = 2222 ( 405 bool_ops PGNSP PGUID )); -#define BOOL_HASH_FAM_OID 2222 -DATA(insert OID = 2223 ( 405 bytea_ops PGNSP PGUID )); -DATA(insert OID = 2789 ( 403 tid_ops PGNSP PGUID )); -DATA(insert OID = 2225 ( 405 xid_ops PGNSP PGUID )); -DATA(insert OID = 2226 ( 405 cid_ops PGNSP PGUID )); -DATA(insert OID = 2227 ( 405 abstime_ops PGNSP PGUID )); -DATA(insert OID = 2228 ( 405 reltime_ops PGNSP PGUID )); -DATA(insert OID = 2229 ( 405 text_pattern_ops PGNSP PGUID )); -DATA(insert OID = 2231 ( 405 bpchar_pattern_ops PGNSP PGUID )); -DATA(insert OID = 2233 ( 403 reltime_ops PGNSP PGUID )); -DATA(insert OID = 2234 ( 403 tinterval_ops PGNSP PGUID )); -DATA(insert OID = 2235 ( 405 aclitem_ops PGNSP PGUID )); -DATA(insert OID = 2593 ( 783 box_ops PGNSP PGUID )); -DATA(insert OID = 2594 ( 783 poly_ops PGNSP PGUID )); -DATA(insert OID = 2595 ( 783 circle_ops PGNSP PGUID )); -DATA(insert OID = 1029 ( 783 point_ops PGNSP PGUID )); -DATA(insert OID = 2745 ( 2742 array_ops PGNSP PGUID )); -DATA(insert OID = 2968 ( 403 uuid_ops PGNSP PGUID )); -DATA(insert OID = 2969 ( 405 uuid_ops PGNSP PGUID )); -DATA(insert OID = 3253 ( 403 pg_lsn_ops PGNSP PGUID )); -DATA(insert OID = 3254 ( 405 pg_lsn_ops PGNSP PGUID )); -DATA(insert OID = 3522 ( 403 enum_ops PGNSP PGUID )); -DATA(insert OID = 3523 ( 405 enum_ops PGNSP PGUID )); -DATA(insert OID = 3626 ( 403 tsvector_ops PGNSP PGUID )); -DATA(insert OID = 3655 ( 783 tsvector_ops PGNSP PGUID )); -DATA(insert OID = 3659 ( 2742 tsvector_ops PGNSP PGUID )); -DATA(insert OID = 3683 ( 403 tsquery_ops PGNSP PGUID )); -DATA(insert OID = 3702 ( 783 tsquery_ops PGNSP PGUID )); -DATA(insert OID = 3901 ( 403 range_ops PGNSP PGUID )); -DATA(insert OID = 3903 ( 405 range_ops PGNSP PGUID )); -DATA(insert OID = 3919 ( 783 range_ops PGNSP PGUID )); -DATA(insert OID = 3474 ( 4000 range_ops PGNSP PGUID )); -DATA(insert OID = 4015 ( 4000 quad_point_ops PGNSP PGUID )); -DATA(insert OID = 4016 ( 4000 kd_point_ops PGNSP PGUID )); -DATA(insert OID = 4017 ( 4000 text_ops PGNSP PGUID )); -#define TEXT_SPGIST_FAM_OID 4017 -DATA(insert OID = 4033 ( 403 jsonb_ops PGNSP PGUID )); -DATA(insert OID = 4034 ( 405 jsonb_ops PGNSP PGUID )); -DATA(insert OID = 4035 ( 783 jsonb_ops PGNSP PGUID )); -DATA(insert OID = 4036 ( 2742 jsonb_ops PGNSP PGUID )); -DATA(insert OID = 4037 ( 2742 jsonb_path_ops PGNSP PGUID )); +#define IsBooleanOpfamily(opfamily) \ + ((opfamily) == BOOL_BTREE_FAM_OID || (opfamily) == BOOL_HASH_FAM_OID) -DATA(insert OID = 4054 ( 3580 integer_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4055 ( 3580 numeric_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4056 ( 3580 text_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4058 ( 3580 timetz_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4059 ( 3580 datetime_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4062 ( 3580 char_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4064 ( 3580 bytea_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4065 ( 3580 name_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4068 ( 3580 oid_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4069 ( 3580 tid_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4070 ( 3580 float_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4072 ( 3580 abstime_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4073 ( 3580 reltime_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4074 ( 3580 macaddr_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4109 ( 3580 macaddr8_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4075 ( 3580 network_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4102 ( 3580 network_inclusion_ops PGNSP PGUID )); -DATA(insert OID = 4076 ( 3580 bpchar_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4077 ( 3580 time_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4078 ( 3580 interval_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4079 ( 3580 bit_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4080 ( 3580 varbit_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4081 ( 3580 uuid_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4103 ( 3580 range_inclusion_ops PGNSP PGUID )); -DATA(insert OID = 4082 ( 3580 pg_lsn_minmax_ops PGNSP PGUID )); -DATA(insert OID = 4104 ( 3580 box_inclusion_ops PGNSP PGUID )); -DATA(insert OID = 5000 ( 4000 box_ops PGNSP PGUID )); +#endif /* EXPOSE_TO_CLIENT_CODE */ #endif /* PG_OPFAMILY_H */ diff --git a/src/include/catalog/pg_partitioned_table.h b/src/include/catalog/pg_partitioned_table.h index 38d64d6511..78bc5c81fb 100644 --- a/src/include/catalog/pg_partitioned_table.h +++ b/src/include/catalog/pg_partitioned_table.h @@ -1,17 +1,18 @@ /*------------------------------------------------------------------------- * * pg_partitioned_table.h - * definition of the system "partitioned table" relation - * along with the relation's initial contents. + * definition of the "partitioned table" system catalog + * (pg_partitioned_table) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_partitioned_table.h * * NOTES - * the genbki.sh script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -19,19 +20,20 @@ #define PG_PARTITIONED_TABLE_H #include "catalog/genbki.h" +#include "catalog/pg_partitioned_table_d.h" /* ---------------- * pg_partitioned_table definition. cpp turns this into * typedef struct FormData_pg_partitioned_table * ---------------- */ -#define PartitionedRelationId 3350 - -CATALOG(pg_partitioned_table,3350) BKI_WITHOUT_OIDS +CATALOG(pg_partitioned_table,3350,PartitionedRelationId) BKI_WITHOUT_OIDS { Oid partrelid; /* partitioned table oid */ char partstrat; /* partitioning strategy */ int16 partnatts; /* number of partition key columns */ + Oid partdefid; /* default partition oid; InvalidOid if there + * isn't one */ /* * variable-length fields start here, but we allow direct access to @@ -58,17 +60,4 @@ CATALOG(pg_partitioned_table,3350) BKI_WITHOUT_OIDS */ typedef FormData_pg_partitioned_table *Form_pg_partitioned_table; -/* ---------------- - * compiler constants for pg_partitioned_table - * ---------------- - */ -#define Natts_pg_partitioned_table 7 -#define Anum_pg_partitioned_table_partrelid 1 -#define Anum_pg_partitioned_table_partstrat 2 -#define Anum_pg_partitioned_table_partnatts 3 -#define Anum_pg_partitioned_table_partattrs 4 -#define Anum_pg_partitioned_table_partclass 5 -#define Anum_pg_partitioned_table_partcollation 6 -#define Anum_pg_partitioned_table_partexprs 7 - #endif /* PG_PARTITIONED_TABLE_H */ diff --git a/src/include/catalog/pg_pltemplate.dat b/src/include/catalog/pg_pltemplate.dat new file mode 100644 index 0000000000..8f1b8dab0f --- /dev/null +++ b/src/include/catalog/pg_pltemplate.dat @@ -0,0 +1,51 @@ +#---------------------------------------------------------------------- +# +# pg_pltemplate.dat +# Initial contents of the pg_pltemplate system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_pltemplate.dat +# +#---------------------------------------------------------------------- + +[ + +{ tmplname => 'plpgsql', tmpltrusted => 't', tmpldbacreate => 't', + tmplhandler => 'plpgsql_call_handler', tmplinline => 'plpgsql_inline_handler', + tmplvalidator => 'plpgsql_validator', tmpllibrary => '$libdir/plpgsql', + tmplacl => '_null_' }, +{ tmplname => 'pltcl', tmpltrusted => 't', tmpldbacreate => 't', + tmplhandler => 'pltcl_call_handler', tmplinline => '_null_', + tmplvalidator => '_null_', tmpllibrary => '$libdir/pltcl', + tmplacl => '_null_' }, +{ tmplname => 'pltclu', tmpltrusted => 'f', tmpldbacreate => 'f', + tmplhandler => 'pltclu_call_handler', tmplinline => '_null_', + tmplvalidator => '_null_', tmpllibrary => '$libdir/pltcl', + tmplacl => '_null_' }, +{ tmplname => 'plperl', tmpltrusted => 't', tmpldbacreate => 't', + tmplhandler => 'plperl_call_handler', tmplinline => 'plperl_inline_handler', + tmplvalidator => 'plperl_validator', tmpllibrary => '$libdir/plperl', + tmplacl => '_null_' }, +{ tmplname => 'plperlu', tmpltrusted => 'f', tmpldbacreate => 'f', + tmplhandler => 'plperlu_call_handler', tmplinline => 'plperlu_inline_handler', + tmplvalidator => 'plperlu_validator', tmpllibrary => '$libdir/plperl', + tmplacl => '_null_' }, +{ tmplname => 'plpythonu', tmpltrusted => 'f', tmpldbacreate => 'f', + tmplhandler => 'plpython_call_handler', + tmplinline => 'plpython_inline_handler', + tmplvalidator => 'plpython_validator', tmpllibrary => '$libdir/plpython2', + tmplacl => '_null_' }, +{ tmplname => 'plpython2u', tmpltrusted => 'f', tmpldbacreate => 'f', + tmplhandler => 'plpython2_call_handler', + tmplinline => 'plpython2_inline_handler', + tmplvalidator => 'plpython2_validator', tmpllibrary => '$libdir/plpython2', + tmplacl => '_null_' }, +{ tmplname => 'plpython3u', tmpltrusted => 'f', tmpldbacreate => 'f', + tmplhandler => 'plpython3_call_handler', + tmplinline => 'plpython3_inline_handler', + tmplvalidator => 'plpython3_validator', tmpllibrary => '$libdir/plpython3', + tmplacl => '_null_' }, + +] diff --git a/src/include/catalog/pg_pltemplate.h b/src/include/catalog/pg_pltemplate.h index fbe71bd0c3..ae06b75212 100644 --- a/src/include/catalog/pg_pltemplate.h +++ b/src/include/catalog/pg_pltemplate.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_pltemplate.h - * definition of the system "PL template" relation (pg_pltemplate) - * along with the relation's initial contents. + * definition of the "PL template" system catalog (pg_pltemplate) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_pltemplate.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,15 +19,14 @@ #define PG_PLTEMPLATE_H #include "catalog/genbki.h" +#include "catalog/pg_pltemplate_d.h" /* ---------------- * pg_pltemplate definition. cpp turns this into * typedef struct FormData_pg_pltemplate * ---------------- */ -#define PLTemplateRelationId 1136 - -CATALOG(pg_pltemplate,1136) BKI_SHARED_RELATION BKI_WITHOUT_OIDS +CATALOG(pg_pltemplate,1136,PLTemplateRelationId) BKI_SHARED_RELATION BKI_WITHOUT_OIDS { NameData tmplname; /* name of PL */ bool tmpltrusted; /* PL is trusted? */ @@ -51,33 +49,4 @@ CATALOG(pg_pltemplate,1136) BKI_SHARED_RELATION BKI_WITHOUT_OIDS */ typedef FormData_pg_pltemplate *Form_pg_pltemplate; -/* ---------------- - * compiler constants for pg_pltemplate - * ---------------- - */ -#define Natts_pg_pltemplate 8 -#define Anum_pg_pltemplate_tmplname 1 -#define Anum_pg_pltemplate_tmpltrusted 2 -#define Anum_pg_pltemplate_tmpldbacreate 3 -#define Anum_pg_pltemplate_tmplhandler 4 -#define Anum_pg_pltemplate_tmplinline 5 -#define Anum_pg_pltemplate_tmplvalidator 6 -#define Anum_pg_pltemplate_tmpllibrary 7 -#define Anum_pg_pltemplate_tmplacl 8 - - -/* ---------------- - * initial contents of pg_pltemplate - * ---------------- - */ - -DATA(insert ( "plpgsql" t t "plpgsql_call_handler" "plpgsql_inline_handler" "plpgsql_validator" "$libdir/plpgsql" _null_ )); -DATA(insert ( "pltcl" t t "pltcl_call_handler" _null_ _null_ "$libdir/pltcl" _null_ )); -DATA(insert ( "pltclu" f f "pltclu_call_handler" _null_ _null_ "$libdir/pltcl" _null_ )); -DATA(insert ( "plperl" t t "plperl_call_handler" "plperl_inline_handler" "plperl_validator" "$libdir/plperl" _null_ )); -DATA(insert ( "plperlu" f f "plperlu_call_handler" "plperlu_inline_handler" "plperlu_validator" "$libdir/plperl" _null_ )); -DATA(insert ( "plpythonu" f f "plpython_call_handler" "plpython_inline_handler" "plpython_validator" "$libdir/plpython2" _null_ )); -DATA(insert ( "plpython2u" f f "plpython2_call_handler" "plpython2_inline_handler" "plpython2_validator" "$libdir/plpython2" _null_ )); -DATA(insert ( "plpython3u" f f "plpython3_call_handler" "plpython3_inline_handler" "plpython3_validator" "$libdir/plpython3" _null_ )); - #endif /* PG_PLTEMPLATE_H */ diff --git a/src/include/catalog/pg_policy.h b/src/include/catalog/pg_policy.h index 86000737fa..0dd9c50e53 100644 --- a/src/include/catalog/pg_policy.h +++ b/src/include/catalog/pg_policy.h @@ -1,24 +1,32 @@ -/* +/*------------------------------------------------------------------------- + * * pg_policy.h - * definition of the system "policy" relation (pg_policy) + * definition of the "policy" system catalog (pg_policy) + * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * + * src/include/catalog/pg_policy.h + * + * NOTES + * The Catalog.pm module reads this file and derives schema + * information. + * + *------------------------------------------------------------------------- */ #ifndef PG_POLICY_H #define PG_POLICY_H #include "catalog/genbki.h" +#include "catalog/pg_policy_d.h" /* ---------------- * pg_policy definition. cpp turns this into * typedef struct FormData_pg_policy * ---------------- */ -#define PolicyRelationId 3256 - -CATALOG(pg_policy,3256) +CATALOG(pg_policy,3256,PolicyRelationId) { NameData polname; /* Policy name. */ Oid polrelid; /* Oid of the relation with policy. */ @@ -26,7 +34,7 @@ CATALOG(pg_policy,3256) bool polpermissive; /* restrictive or permissive policy */ #ifdef CATALOG_VARLEN - Oid polroles[1]; /* Roles associated with policy, not-NULL */ + Oid polroles[1] BKI_FORCE_NOT_NULL; /* Roles associated with policy */ pg_node_tree polqual; /* Policy quals. */ pg_node_tree polwithcheck; /* WITH CHECK quals. */ #endif @@ -39,17 +47,4 @@ CATALOG(pg_policy,3256) */ typedef FormData_pg_policy *Form_pg_policy; -/* ---------------- - * compiler constants for pg_policy - * ---------------- - */ -#define Natts_pg_policy 7 -#define Anum_pg_policy_polname 1 -#define Anum_pg_policy_polrelid 2 -#define Anum_pg_policy_polcmd 3 -#define Anum_pg_policy_polpermissive 4 -#define Anum_pg_policy_polroles 5 -#define Anum_pg_policy_polqual 6 -#define Anum_pg_policy_polwithcheck 7 - #endif /* PG_POLICY_H */ diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat new file mode 100644 index 0000000000..4026018ba9 --- /dev/null +++ b/src/include/catalog/pg_proc.dat @@ -0,0 +1,10041 @@ +#---------------------------------------------------------------------- +# +# pg_proc.dat +# Initial contents of the pg_proc system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_proc.dat +# +#---------------------------------------------------------------------- + +[ + +# Note: every entry in pg_proc.dat is expected to have a 'descr' comment, +# except for functions that implement pg_operator.dat operators and don't +# have a good reason to be called directly rather than via the operator. +# (If you do expect such a function to be used directly, you should +# duplicate the operator's comment.) initdb will supply suitable default +# comments for functions referenced by pg_operator. + +# Try to follow the style of existing functions' comments. +# Some recommended conventions: + +# "I/O" for typinput, typoutput, typreceive, typsend functions +# "I/O typmod" for typmodin, typmodout functions +# "aggregate transition function" for aggtransfn functions, unless +# they are reasonably useful in their own right +# "aggregate final function" for aggfinalfn functions (likewise) +# "convert srctypename to desttypename" for cast functions +# "less-equal-greater" for B-tree comparison functions + +# Note: pronargs is computed when this file is read, so it does not need +# to be specified in entries here. See AddDefaultValues() in Catalog.pm. + +# Once upon a time these entries were ordered by OID. Lately it's often +# been the custom to insert new entries adjacent to related older entries. +# Try to do one or the other though, don't just insert entries at random. + +# OIDS 1 - 99 + +{ oid => '1242', descr => 'I/O', + proname => 'boolin', prorettype => 'bool', proargtypes => 'cstring', + prosrc => 'boolin' }, +{ oid => '1243', descr => 'I/O', + proname => 'boolout', prorettype => 'cstring', proargtypes => 'bool', + prosrc => 'boolout' }, +{ oid => '1244', descr => 'I/O', + proname => 'byteain', prorettype => 'bytea', proargtypes => 'cstring', + prosrc => 'byteain' }, +{ oid => '31', descr => 'I/O', + proname => 'byteaout', prorettype => 'cstring', proargtypes => 'bytea', + prosrc => 'byteaout' }, +{ oid => '1245', descr => 'I/O', + proname => 'charin', prorettype => 'char', proargtypes => 'cstring', + prosrc => 'charin' }, +{ oid => '33', descr => 'I/O', + proname => 'charout', prorettype => 'cstring', proargtypes => 'char', + prosrc => 'charout' }, +{ oid => '34', descr => 'I/O', + proname => 'namein', prorettype => 'name', proargtypes => 'cstring', + prosrc => 'namein' }, +{ oid => '35', descr => 'I/O', + proname => 'nameout', prorettype => 'cstring', proargtypes => 'name', + prosrc => 'nameout' }, +{ oid => '38', descr => 'I/O', + proname => 'int2in', prorettype => 'int2', proargtypes => 'cstring', + prosrc => 'int2in' }, +{ oid => '39', descr => 'I/O', + proname => 'int2out', prorettype => 'cstring', proargtypes => 'int2', + prosrc => 'int2out' }, +{ oid => '40', descr => 'I/O', + proname => 'int2vectorin', prorettype => 'int2vector', + proargtypes => 'cstring', prosrc => 'int2vectorin' }, +{ oid => '41', descr => 'I/O', + proname => 'int2vectorout', prorettype => 'cstring', + proargtypes => 'int2vector', prosrc => 'int2vectorout' }, +{ oid => '42', descr => 'I/O', + proname => 'int4in', prorettype => 'int4', proargtypes => 'cstring', + prosrc => 'int4in' }, +{ oid => '43', descr => 'I/O', + proname => 'int4out', prorettype => 'cstring', proargtypes => 'int4', + prosrc => 'int4out' }, +{ oid => '44', descr => 'I/O', + proname => 'regprocin', provolatile => 's', prorettype => 'regproc', + proargtypes => 'cstring', prosrc => 'regprocin' }, +{ oid => '45', descr => 'I/O', + proname => 'regprocout', provolatile => 's', prorettype => 'cstring', + proargtypes => 'regproc', prosrc => 'regprocout' }, +{ oid => '3494', descr => 'convert proname to regproc', + proname => 'to_regproc', provolatile => 's', prorettype => 'regproc', + proargtypes => 'text', prosrc => 'to_regproc' }, +{ oid => '3479', descr => 'convert proname to regprocedure', + proname => 'to_regprocedure', provolatile => 's', + prorettype => 'regprocedure', proargtypes => 'text', + prosrc => 'to_regprocedure' }, +{ oid => '46', descr => 'I/O', + proname => 'textin', prorettype => 'text', proargtypes => 'cstring', + prosrc => 'textin' }, +{ oid => '47', descr => 'I/O', + proname => 'textout', prorettype => 'cstring', proargtypes => 'text', + prosrc => 'textout' }, +{ oid => '48', descr => 'I/O', + proname => 'tidin', prorettype => 'tid', proargtypes => 'cstring', + prosrc => 'tidin' }, +{ oid => '49', descr => 'I/O', + proname => 'tidout', prorettype => 'cstring', proargtypes => 'tid', + prosrc => 'tidout' }, +{ oid => '50', descr => 'I/O', + proname => 'xidin', prorettype => 'xid', proargtypes => 'cstring', + prosrc => 'xidin' }, +{ oid => '51', descr => 'I/O', + proname => 'xidout', prorettype => 'cstring', proargtypes => 'xid', + prosrc => 'xidout' }, +{ oid => '52', descr => 'I/O', + proname => 'cidin', prorettype => 'cid', proargtypes => 'cstring', + prosrc => 'cidin' }, +{ oid => '53', descr => 'I/O', + proname => 'cidout', prorettype => 'cstring', proargtypes => 'cid', + prosrc => 'cidout' }, +{ oid => '54', descr => 'I/O', + proname => 'oidvectorin', prorettype => 'oidvector', proargtypes => 'cstring', + prosrc => 'oidvectorin' }, +{ oid => '55', descr => 'I/O', + proname => 'oidvectorout', prorettype => 'cstring', + proargtypes => 'oidvector', prosrc => 'oidvectorout' }, +{ oid => '56', + proname => 'boollt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bool bool', prosrc => 'boollt' }, +{ oid => '57', + proname => 'boolgt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bool bool', prosrc => 'boolgt' }, +{ oid => '60', + proname => 'booleq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bool bool', prosrc => 'booleq' }, +{ oid => '61', + proname => 'chareq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'char char', prosrc => 'chareq' }, +{ oid => '62', + proname => 'nameeq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'name name', prosrc => 'nameeq' }, +{ oid => '63', + proname => 'int2eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int2', prosrc => 'int2eq' }, +{ oid => '64', + proname => 'int2lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int2', prosrc => 'int2lt' }, +{ oid => '65', + proname => 'int4eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int4', prosrc => 'int4eq' }, +{ oid => '66', + proname => 'int4lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int4', prosrc => 'int4lt' }, +{ oid => '67', + proname => 'texteq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'texteq' }, +{ oid => '3696', + proname => 'starts_with', proleakproof => 't', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'text_starts_with' }, +{ oid => '68', + proname => 'xideq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'xid xid', prosrc => 'xideq' }, +{ oid => '3308', + proname => 'xidneq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'xid xid', prosrc => 'xidneq' }, +{ oid => '69', + proname => 'cideq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'cid cid', prosrc => 'cideq' }, +{ oid => '70', + proname => 'charne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'char char', prosrc => 'charne' }, +{ oid => '1246', + proname => 'charlt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'char char', prosrc => 'charlt' }, +{ oid => '72', + proname => 'charle', proleakproof => 't', prorettype => 'bool', + proargtypes => 'char char', prosrc => 'charle' }, +{ oid => '73', + proname => 'chargt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'char char', prosrc => 'chargt' }, +{ oid => '74', + proname => 'charge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'char char', prosrc => 'charge' }, +{ oid => '77', descr => 'convert char to int4', + proname => 'int4', prorettype => 'int4', proargtypes => 'char', + prosrc => 'chartoi4' }, +{ oid => '78', descr => 'convert int4 to char', + proname => 'char', prorettype => 'char', proargtypes => 'int4', + prosrc => 'i4tochar' }, + +{ oid => '79', + proname => 'nameregexeq', prorettype => 'bool', proargtypes => 'name text', + prosrc => 'nameregexeq' }, +{ oid => '1252', + proname => 'nameregexne', prorettype => 'bool', proargtypes => 'name text', + prosrc => 'nameregexne' }, +{ oid => '1254', + proname => 'textregexeq', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'textregexeq' }, +{ oid => '1256', + proname => 'textregexne', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'textregexne' }, +{ oid => '1257', descr => 'length', + proname => 'textlen', prorettype => 'int4', proargtypes => 'text', + prosrc => 'textlen' }, +{ oid => '1258', + proname => 'textcat', prorettype => 'text', proargtypes => 'text text', + prosrc => 'textcat' }, + +{ oid => '84', + proname => 'boolne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bool bool', prosrc => 'boolne' }, +{ oid => '89', descr => 'PostgreSQL version string', + proname => 'version', provolatile => 's', prorettype => 'text', + proargtypes => '', prosrc => 'pgsql_version' }, + +{ oid => '86', descr => 'I/O', + proname => 'pg_ddl_command_in', prorettype => 'pg_ddl_command', + proargtypes => 'cstring', prosrc => 'pg_ddl_command_in' }, +{ oid => '87', descr => 'I/O', + proname => 'pg_ddl_command_out', prorettype => 'cstring', + proargtypes => 'pg_ddl_command', prosrc => 'pg_ddl_command_out' }, +{ oid => '88', descr => 'I/O', + proname => 'pg_ddl_command_recv', prorettype => 'pg_ddl_command', + proargtypes => 'internal', prosrc => 'pg_ddl_command_recv' }, +{ oid => '90', descr => 'I/O', + proname => 'pg_ddl_command_send', prorettype => 'bytea', + proargtypes => 'pg_ddl_command', prosrc => 'pg_ddl_command_send' }, + +# OIDS 100 - 199 + +{ oid => '101', descr => 'restriction selectivity of = and related operators', + proname => 'eqsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'eqsel' }, +{ oid => '102', + descr => 'restriction selectivity of <> and related operators', + proname => 'neqsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'neqsel' }, +{ oid => '103', + descr => 'restriction selectivity of < and related operators on scalar datatypes', + proname => 'scalarltsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'scalarltsel' }, +{ oid => '104', + descr => 'restriction selectivity of > and related operators on scalar datatypes', + proname => 'scalargtsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'scalargtsel' }, +{ oid => '105', descr => 'join selectivity of = and related operators', + proname => 'eqjoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', prosrc => 'eqjoinsel' }, +{ oid => '106', descr => 'join selectivity of <> and related operators', + proname => 'neqjoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'neqjoinsel' }, +{ oid => '107', + descr => 'join selectivity of < and related operators on scalar datatypes', + proname => 'scalarltjoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'scalarltjoinsel' }, +{ oid => '108', + descr => 'join selectivity of > and related operators on scalar datatypes', + proname => 'scalargtjoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'scalargtjoinsel' }, + +{ oid => '336', + descr => 'restriction selectivity of <= and related operators on scalar datatypes', + proname => 'scalarlesel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'scalarlesel' }, +{ oid => '337', + descr => 'restriction selectivity of >= and related operators on scalar datatypes', + proname => 'scalargesel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'scalargesel' }, +{ oid => '386', + descr => 'join selectivity of <= and related operators on scalar datatypes', + proname => 'scalarlejoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'scalarlejoinsel' }, +{ oid => '398', + descr => 'join selectivity of >= and related operators on scalar datatypes', + proname => 'scalargejoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'scalargejoinsel' }, + +{ oid => '109', descr => 'I/O', + proname => 'unknownin', prorettype => 'unknown', proargtypes => 'cstring', + prosrc => 'unknownin' }, +{ oid => '110', descr => 'I/O', + proname => 'unknownout', prorettype => 'cstring', proargtypes => 'unknown', + prosrc => 'unknownout' }, +{ oid => '111', + proname => 'numeric_fac', prorettype => 'numeric', proargtypes => 'int8', + prosrc => 'numeric_fac' }, + +{ oid => '115', + proname => 'box_above_eq', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_above_eq' }, +{ oid => '116', + proname => 'box_below_eq', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_below_eq' }, + +{ oid => '117', descr => 'I/O', + proname => 'point_in', prorettype => 'point', proargtypes => 'cstring', + prosrc => 'point_in' }, +{ oid => '118', descr => 'I/O', + proname => 'point_out', prorettype => 'cstring', proargtypes => 'point', + prosrc => 'point_out' }, +{ oid => '119', descr => 'I/O', + proname => 'lseg_in', prorettype => 'lseg', proargtypes => 'cstring', + prosrc => 'lseg_in' }, +{ oid => '120', descr => 'I/O', + proname => 'lseg_out', prorettype => 'cstring', proargtypes => 'lseg', + prosrc => 'lseg_out' }, +{ oid => '121', descr => 'I/O', + proname => 'path_in', prorettype => 'path', proargtypes => 'cstring', + prosrc => 'path_in' }, +{ oid => '122', descr => 'I/O', + proname => 'path_out', prorettype => 'cstring', proargtypes => 'path', + prosrc => 'path_out' }, +{ oid => '123', descr => 'I/O', + proname => 'box_in', prorettype => 'box', proargtypes => 'cstring', + prosrc => 'box_in' }, +{ oid => '124', descr => 'I/O', + proname => 'box_out', prorettype => 'cstring', proargtypes => 'box', + prosrc => 'box_out' }, +{ oid => '125', + proname => 'box_overlap', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_overlap' }, +{ oid => '126', + proname => 'box_ge', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_ge' }, +{ oid => '127', + proname => 'box_gt', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_gt' }, +{ oid => '128', + proname => 'box_eq', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_eq' }, +{ oid => '129', + proname => 'box_lt', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_lt' }, +{ oid => '130', + proname => 'box_le', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_le' }, +{ oid => '131', + proname => 'point_above', prorettype => 'bool', proargtypes => 'point point', + prosrc => 'point_above' }, +{ oid => '132', + proname => 'point_left', prorettype => 'bool', proargtypes => 'point point', + prosrc => 'point_left' }, +{ oid => '133', + proname => 'point_right', prorettype => 'bool', proargtypes => 'point point', + prosrc => 'point_right' }, +{ oid => '134', + proname => 'point_below', prorettype => 'bool', proargtypes => 'point point', + prosrc => 'point_below' }, +{ oid => '135', + proname => 'point_eq', prorettype => 'bool', proargtypes => 'point point', + prosrc => 'point_eq' }, +{ oid => '136', + proname => 'on_pb', prorettype => 'bool', proargtypes => 'point box', + prosrc => 'on_pb' }, +{ oid => '137', + proname => 'on_ppath', prorettype => 'bool', proargtypes => 'point path', + prosrc => 'on_ppath' }, +{ oid => '138', + proname => 'box_center', prorettype => 'point', proargtypes => 'box', + prosrc => 'box_center' }, +{ oid => '139', + descr => 'restriction selectivity for area-comparison operators', + proname => 'areasel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'areasel' }, +{ oid => '140', descr => 'join selectivity for area-comparison operators', + proname => 'areajoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'areajoinsel' }, +{ oid => '141', + proname => 'int4mul', prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'int4mul' }, +{ oid => '144', + proname => 'int4ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int4', prosrc => 'int4ne' }, +{ oid => '145', + proname => 'int2ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int2', prosrc => 'int2ne' }, +{ oid => '146', + proname => 'int2gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int2', prosrc => 'int2gt' }, +{ oid => '147', + proname => 'int4gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int4', prosrc => 'int4gt' }, +{ oid => '148', + proname => 'int2le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int2', prosrc => 'int2le' }, +{ oid => '149', + proname => 'int4le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int4', prosrc => 'int4le' }, +{ oid => '150', + proname => 'int4ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int4', prosrc => 'int4ge' }, +{ oid => '151', + proname => 'int2ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int2', prosrc => 'int2ge' }, +{ oid => '152', + proname => 'int2mul', prorettype => 'int2', proargtypes => 'int2 int2', + prosrc => 'int2mul' }, +{ oid => '153', + proname => 'int2div', prorettype => 'int2', proargtypes => 'int2 int2', + prosrc => 'int2div' }, +{ oid => '154', + proname => 'int4div', prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'int4div' }, +{ oid => '155', + proname => 'int2mod', prorettype => 'int2', proargtypes => 'int2 int2', + prosrc => 'int2mod' }, +{ oid => '156', + proname => 'int4mod', prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'int4mod' }, +{ oid => '157', + proname => 'textne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'textne' }, +{ oid => '158', + proname => 'int24eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int4', prosrc => 'int24eq' }, +{ oid => '159', + proname => 'int42eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int2', prosrc => 'int42eq' }, +{ oid => '160', + proname => 'int24lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int4', prosrc => 'int24lt' }, +{ oid => '161', + proname => 'int42lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int2', prosrc => 'int42lt' }, +{ oid => '162', + proname => 'int24gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int4', prosrc => 'int24gt' }, +{ oid => '163', + proname => 'int42gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int2', prosrc => 'int42gt' }, +{ oid => '164', + proname => 'int24ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int4', prosrc => 'int24ne' }, +{ oid => '165', + proname => 'int42ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int2', prosrc => 'int42ne' }, +{ oid => '166', + proname => 'int24le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int4', prosrc => 'int24le' }, +{ oid => '167', + proname => 'int42le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int2', prosrc => 'int42le' }, +{ oid => '168', + proname => 'int24ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int4', prosrc => 'int24ge' }, +{ oid => '169', + proname => 'int42ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int2', prosrc => 'int42ge' }, +{ oid => '170', + proname => 'int24mul', prorettype => 'int4', proargtypes => 'int2 int4', + prosrc => 'int24mul' }, +{ oid => '171', + proname => 'int42mul', prorettype => 'int4', proargtypes => 'int4 int2', + prosrc => 'int42mul' }, +{ oid => '172', + proname => 'int24div', prorettype => 'int4', proargtypes => 'int2 int4', + prosrc => 'int24div' }, +{ oid => '173', + proname => 'int42div', prorettype => 'int4', proargtypes => 'int4 int2', + prosrc => 'int42div' }, +{ oid => '176', + proname => 'int2pl', prorettype => 'int2', proargtypes => 'int2 int2', + prosrc => 'int2pl' }, +{ oid => '177', + proname => 'int4pl', prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'int4pl' }, +{ oid => '178', + proname => 'int24pl', prorettype => 'int4', proargtypes => 'int2 int4', + prosrc => 'int24pl' }, +{ oid => '179', + proname => 'int42pl', prorettype => 'int4', proargtypes => 'int4 int2', + prosrc => 'int42pl' }, +{ oid => '180', + proname => 'int2mi', prorettype => 'int2', proargtypes => 'int2 int2', + prosrc => 'int2mi' }, +{ oid => '181', + proname => 'int4mi', prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'int4mi' }, +{ oid => '182', + proname => 'int24mi', prorettype => 'int4', proargtypes => 'int2 int4', + prosrc => 'int24mi' }, +{ oid => '183', + proname => 'int42mi', prorettype => 'int4', proargtypes => 'int4 int2', + prosrc => 'int42mi' }, +{ oid => '184', + proname => 'oideq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'oid oid', prosrc => 'oideq' }, +{ oid => '185', + proname => 'oidne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'oid oid', prosrc => 'oidne' }, +{ oid => '186', + proname => 'box_same', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_same' }, +{ oid => '187', + proname => 'box_contain', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_contain' }, +{ oid => '188', + proname => 'box_left', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_left' }, +{ oid => '189', + proname => 'box_overleft', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_overleft' }, +{ oid => '190', + proname => 'box_overright', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_overright' }, +{ oid => '191', + proname => 'box_right', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_right' }, +{ oid => '192', + proname => 'box_contained', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_contained' }, +{ oid => '193', + proname => 'box_contain_pt', prorettype => 'bool', proargtypes => 'box point', + prosrc => 'box_contain_pt' }, + +{ oid => '195', descr => 'I/O', + proname => 'pg_node_tree_in', prorettype => 'pg_node_tree', + proargtypes => 'cstring', prosrc => 'pg_node_tree_in' }, +{ oid => '196', descr => 'I/O', + proname => 'pg_node_tree_out', prorettype => 'cstring', + proargtypes => 'pg_node_tree', prosrc => 'pg_node_tree_out' }, +{ oid => '197', descr => 'I/O', + proname => 'pg_node_tree_recv', provolatile => 's', + prorettype => 'pg_node_tree', proargtypes => 'internal', + prosrc => 'pg_node_tree_recv' }, +{ oid => '198', descr => 'I/O', + proname => 'pg_node_tree_send', provolatile => 's', prorettype => 'bytea', + proargtypes => 'pg_node_tree', prosrc => 'pg_node_tree_send' }, + +# OIDS 200 - 299 + +{ oid => '200', descr => 'I/O', + proname => 'float4in', prorettype => 'float4', proargtypes => 'cstring', + prosrc => 'float4in' }, +{ oid => '201', descr => 'I/O', + proname => 'float4out', prorettype => 'cstring', proargtypes => 'float4', + prosrc => 'float4out' }, +{ oid => '202', + proname => 'float4mul', prorettype => 'float4', + proargtypes => 'float4 float4', prosrc => 'float4mul' }, +{ oid => '203', + proname => 'float4div', prorettype => 'float4', + proargtypes => 'float4 float4', prosrc => 'float4div' }, +{ oid => '204', + proname => 'float4pl', prorettype => 'float4', proargtypes => 'float4 float4', + prosrc => 'float4pl' }, +{ oid => '205', + proname => 'float4mi', prorettype => 'float4', proargtypes => 'float4 float4', + prosrc => 'float4mi' }, +{ oid => '206', + proname => 'float4um', prorettype => 'float4', proargtypes => 'float4', + prosrc => 'float4um' }, +{ oid => '207', + proname => 'float4abs', prorettype => 'float4', proargtypes => 'float4', + prosrc => 'float4abs' }, +{ oid => '208', descr => 'aggregate transition function', + proname => 'float4_accum', prorettype => '_float8', + proargtypes => '_float8 float4', prosrc => 'float4_accum' }, +{ oid => '209', descr => 'larger of two', + proname => 'float4larger', prorettype => 'float4', + proargtypes => 'float4 float4', prosrc => 'float4larger' }, +{ oid => '211', descr => 'smaller of two', + proname => 'float4smaller', prorettype => 'float4', + proargtypes => 'float4 float4', prosrc => 'float4smaller' }, + +{ oid => '212', + proname => 'int4um', prorettype => 'int4', proargtypes => 'int4', + prosrc => 'int4um' }, +{ oid => '213', + proname => 'int2um', prorettype => 'int2', proargtypes => 'int2', + prosrc => 'int2um' }, + +{ oid => '214', descr => 'I/O', + proname => 'float8in', prorettype => 'float8', proargtypes => 'cstring', + prosrc => 'float8in' }, +{ oid => '215', descr => 'I/O', + proname => 'float8out', prorettype => 'cstring', proargtypes => 'float8', + prosrc => 'float8out' }, +{ oid => '216', + proname => 'float8mul', prorettype => 'float8', + proargtypes => 'float8 float8', prosrc => 'float8mul' }, +{ oid => '217', + proname => 'float8div', prorettype => 'float8', + proargtypes => 'float8 float8', prosrc => 'float8div' }, +{ oid => '218', + proname => 'float8pl', prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'float8pl' }, +{ oid => '219', + proname => 'float8mi', prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'float8mi' }, +{ oid => '220', + proname => 'float8um', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'float8um' }, +{ oid => '221', + proname => 'float8abs', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'float8abs' }, +{ oid => '222', descr => 'aggregate transition function', + proname => 'float8_accum', prorettype => '_float8', + proargtypes => '_float8 float8', prosrc => 'float8_accum' }, +{ oid => '276', descr => 'aggregate combine function', + proname => 'float8_combine', prorettype => '_float8', + proargtypes => '_float8 _float8', prosrc => 'float8_combine' }, +{ oid => '223', descr => 'larger of two', + proname => 'float8larger', prorettype => 'float8', + proargtypes => 'float8 float8', prosrc => 'float8larger' }, +{ oid => '224', descr => 'smaller of two', + proname => 'float8smaller', prorettype => 'float8', + proargtypes => 'float8 float8', prosrc => 'float8smaller' }, + +{ oid => '225', + proname => 'lseg_center', prorettype => 'point', proargtypes => 'lseg', + prosrc => 'lseg_center' }, +{ oid => '226', + proname => 'path_center', prorettype => 'point', proargtypes => 'path', + prosrc => 'path_center' }, +{ oid => '227', + proname => 'poly_center', prorettype => 'point', proargtypes => 'polygon', + prosrc => 'poly_center' }, + +{ oid => '228', descr => 'round to nearest integer', + proname => 'dround', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dround' }, +{ oid => '229', descr => 'truncate to integer', + proname => 'dtrunc', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dtrunc' }, +{ oid => '2308', descr => 'nearest integer >= value', + proname => 'ceil', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dceil' }, +{ oid => '2320', descr => 'nearest integer >= value', + proname => 'ceiling', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dceil' }, +{ oid => '2309', descr => 'nearest integer <= value', + proname => 'floor', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dfloor' }, +{ oid => '2310', descr => 'sign of value', + proname => 'sign', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dsign' }, +{ oid => '230', + proname => 'dsqrt', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dsqrt' }, +{ oid => '231', + proname => 'dcbrt', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dcbrt' }, +{ oid => '232', + proname => 'dpow', prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'dpow' }, +{ oid => '233', descr => 'natural exponential (e^x)', + proname => 'dexp', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dexp' }, +{ oid => '234', descr => 'natural logarithm', + proname => 'dlog1', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dlog1' }, +{ oid => '235', descr => 'convert int2 to float8', + proname => 'float8', prorettype => 'float8', proargtypes => 'int2', + prosrc => 'i2tod' }, +{ oid => '236', descr => 'convert int2 to float4', + proname => 'float4', prorettype => 'float4', proargtypes => 'int2', + prosrc => 'i2tof' }, +{ oid => '237', descr => 'convert float8 to int2', + proname => 'int2', prorettype => 'int2', proargtypes => 'float8', + prosrc => 'dtoi2' }, +{ oid => '238', descr => 'convert float4 to int2', + proname => 'int2', prorettype => 'int2', proargtypes => 'float4', + prosrc => 'ftoi2' }, +{ oid => '239', + proname => 'line_distance', prorettype => 'float8', + proargtypes => 'line line', prosrc => 'line_distance' }, + +{ oid => '274', + descr => 'current date and time - increments during transactions', + proname => 'timeofday', provolatile => 'v', prorettype => 'text', + proargtypes => '', prosrc => 'timeofday' }, + +{ oid => '277', + proname => 'inter_sl', prorettype => 'bool', proargtypes => 'lseg line', + prosrc => 'inter_sl' }, +{ oid => '278', + proname => 'inter_lb', prorettype => 'bool', proargtypes => 'line box', + prosrc => 'inter_lb' }, + +{ oid => '279', + proname => 'float48mul', prorettype => 'float8', + proargtypes => 'float4 float8', prosrc => 'float48mul' }, +{ oid => '280', + proname => 'float48div', prorettype => 'float8', + proargtypes => 'float4 float8', prosrc => 'float48div' }, +{ oid => '281', + proname => 'float48pl', prorettype => 'float8', + proargtypes => 'float4 float8', prosrc => 'float48pl' }, +{ oid => '282', + proname => 'float48mi', prorettype => 'float8', + proargtypes => 'float4 float8', prosrc => 'float48mi' }, +{ oid => '283', + proname => 'float84mul', prorettype => 'float8', + proargtypes => 'float8 float4', prosrc => 'float84mul' }, +{ oid => '284', + proname => 'float84div', prorettype => 'float8', + proargtypes => 'float8 float4', prosrc => 'float84div' }, +{ oid => '285', + proname => 'float84pl', prorettype => 'float8', + proargtypes => 'float8 float4', prosrc => 'float84pl' }, +{ oid => '286', + proname => 'float84mi', prorettype => 'float8', + proargtypes => 'float8 float4', prosrc => 'float84mi' }, + +{ oid => '287', + proname => 'float4eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float4 float4', prosrc => 'float4eq' }, +{ oid => '288', + proname => 'float4ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float4 float4', prosrc => 'float4ne' }, +{ oid => '289', + proname => 'float4lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float4 float4', prosrc => 'float4lt' }, +{ oid => '290', + proname => 'float4le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float4 float4', prosrc => 'float4le' }, +{ oid => '291', + proname => 'float4gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float4 float4', prosrc => 'float4gt' }, +{ oid => '292', + proname => 'float4ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float4 float4', prosrc => 'float4ge' }, + +{ oid => '293', + proname => 'float8eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float8 float8', prosrc => 'float8eq' }, +{ oid => '294', + proname => 'float8ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float8 float8', prosrc => 'float8ne' }, +{ oid => '295', + proname => 'float8lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float8 float8', prosrc => 'float8lt' }, +{ oid => '296', + proname => 'float8le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float8 float8', prosrc => 'float8le' }, +{ oid => '297', + proname => 'float8gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float8 float8', prosrc => 'float8gt' }, +{ oid => '298', + proname => 'float8ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float8 float8', prosrc => 'float8ge' }, + +{ oid => '299', + proname => 'float48eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float4 float8', prosrc => 'float48eq' }, + +# OIDS 300 - 399 + +{ oid => '300', + proname => 'float48ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float4 float8', prosrc => 'float48ne' }, +{ oid => '301', + proname => 'float48lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float4 float8', prosrc => 'float48lt' }, +{ oid => '302', + proname => 'float48le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float4 float8', prosrc => 'float48le' }, +{ oid => '303', + proname => 'float48gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float4 float8', prosrc => 'float48gt' }, +{ oid => '304', + proname => 'float48ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float4 float8', prosrc => 'float48ge' }, +{ oid => '305', + proname => 'float84eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float8 float4', prosrc => 'float84eq' }, +{ oid => '306', + proname => 'float84ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float8 float4', prosrc => 'float84ne' }, +{ oid => '307', + proname => 'float84lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float8 float4', prosrc => 'float84lt' }, +{ oid => '308', + proname => 'float84le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float8 float4', prosrc => 'float84le' }, +{ oid => '309', + proname => 'float84gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float8 float4', prosrc => 'float84gt' }, +{ oid => '310', + proname => 'float84ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'float8 float4', prosrc => 'float84ge' }, +{ oid => '320', descr => 'bucket number of operand in equal-width histogram', + proname => 'width_bucket', prorettype => 'int4', + proargtypes => 'float8 float8 float8 int4', prosrc => 'width_bucket_float8' }, + +{ oid => '311', descr => 'convert float4 to float8', + proname => 'float8', prorettype => 'float8', proargtypes => 'float4', + prosrc => 'ftod' }, +{ oid => '312', descr => 'convert float8 to float4', + proname => 'float4', prorettype => 'float4', proargtypes => 'float8', + prosrc => 'dtof' }, +{ oid => '313', descr => 'convert int2 to int4', + proname => 'int4', prorettype => 'int4', proargtypes => 'int2', + prosrc => 'i2toi4' }, +{ oid => '314', descr => 'convert int4 to int2', + proname => 'int2', prorettype => 'int2', proargtypes => 'int4', + prosrc => 'i4toi2' }, +{ oid => '316', descr => 'convert int4 to float8', + proname => 'float8', prorettype => 'float8', proargtypes => 'int4', + prosrc => 'i4tod' }, +{ oid => '317', descr => 'convert float8 to int4', + proname => 'int4', prorettype => 'int4', proargtypes => 'float8', + prosrc => 'dtoi4' }, +{ oid => '318', descr => 'convert int4 to float4', + proname => 'float4', prorettype => 'float4', proargtypes => 'int4', + prosrc => 'i4tof' }, +{ oid => '319', descr => 'convert float4 to int4', + proname => 'int4', prorettype => 'int4', proargtypes => 'float4', + prosrc => 'ftoi4' }, + +# Index access method handlers +{ oid => '330', descr => 'btree index access method handler', + proname => 'bthandler', provolatile => 'v', prorettype => 'index_am_handler', + proargtypes => 'internal', prosrc => 'bthandler' }, +{ oid => '331', descr => 'hash index access method handler', + proname => 'hashhandler', provolatile => 'v', + prorettype => 'index_am_handler', proargtypes => 'internal', + prosrc => 'hashhandler' }, +{ oid => '332', descr => 'gist index access method handler', + proname => 'gisthandler', provolatile => 'v', + prorettype => 'index_am_handler', proargtypes => 'internal', + prosrc => 'gisthandler' }, +{ oid => '333', descr => 'gin index access method handler', + proname => 'ginhandler', provolatile => 'v', prorettype => 'index_am_handler', + proargtypes => 'internal', prosrc => 'ginhandler' }, +{ oid => '334', descr => 'spgist index access method handler', + proname => 'spghandler', provolatile => 'v', prorettype => 'index_am_handler', + proargtypes => 'internal', prosrc => 'spghandler' }, +{ oid => '335', descr => 'brin index access method handler', + proname => 'brinhandler', provolatile => 'v', + prorettype => 'index_am_handler', proargtypes => 'internal', + prosrc => 'brinhandler' }, +{ oid => '3952', descr => 'brin: standalone scan new table pages', + proname => 'brin_summarize_new_values', provolatile => 'v', + proparallel => 'u', prorettype => 'int4', proargtypes => 'regclass', + prosrc => 'brin_summarize_new_values' }, +{ oid => '3999', descr => 'brin: standalone scan new table pages', + proname => 'brin_summarize_range', provolatile => 'v', proparallel => 'u', + prorettype => 'int4', proargtypes => 'regclass int8', + prosrc => 'brin_summarize_range' }, +{ oid => '4014', descr => 'brin: desummarize page range', + proname => 'brin_desummarize_range', provolatile => 'v', proparallel => 'u', + prorettype => 'void', proargtypes => 'regclass int8', + prosrc => 'brin_desummarize_range' }, + +{ oid => '338', descr => 'validate an operator class', + proname => 'amvalidate', provolatile => 'v', prorettype => 'bool', + proargtypes => 'oid', prosrc => 'amvalidate' }, + +{ oid => '636', descr => 'test property of an index access method', + proname => 'pg_indexam_has_property', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid text', + prosrc => 'pg_indexam_has_property' }, +{ oid => '637', descr => 'test property of an index', + proname => 'pg_index_has_property', provolatile => 's', prorettype => 'bool', + proargtypes => 'regclass text', prosrc => 'pg_index_has_property' }, +{ oid => '638', descr => 'test property of an index column', + proname => 'pg_index_column_has_property', provolatile => 's', + prorettype => 'bool', proargtypes => 'regclass int4 text', + prosrc => 'pg_index_column_has_property' }, + +{ oid => '339', + proname => 'poly_same', prorettype => 'bool', + proargtypes => 'polygon polygon', prosrc => 'poly_same' }, +{ oid => '340', + proname => 'poly_contain', prorettype => 'bool', + proargtypes => 'polygon polygon', prosrc => 'poly_contain' }, +{ oid => '341', + proname => 'poly_left', prorettype => 'bool', + proargtypes => 'polygon polygon', prosrc => 'poly_left' }, +{ oid => '342', + proname => 'poly_overleft', prorettype => 'bool', + proargtypes => 'polygon polygon', prosrc => 'poly_overleft' }, +{ oid => '343', + proname => 'poly_overright', prorettype => 'bool', + proargtypes => 'polygon polygon', prosrc => 'poly_overright' }, +{ oid => '344', + proname => 'poly_right', prorettype => 'bool', + proargtypes => 'polygon polygon', prosrc => 'poly_right' }, +{ oid => '345', + proname => 'poly_contained', prorettype => 'bool', + proargtypes => 'polygon polygon', prosrc => 'poly_contained' }, +{ oid => '346', + proname => 'poly_overlap', prorettype => 'bool', + proargtypes => 'polygon polygon', prosrc => 'poly_overlap' }, +{ oid => '347', descr => 'I/O', + proname => 'poly_in', prorettype => 'polygon', proargtypes => 'cstring', + prosrc => 'poly_in' }, +{ oid => '348', descr => 'I/O', + proname => 'poly_out', prorettype => 'cstring', proargtypes => 'polygon', + prosrc => 'poly_out' }, + +{ oid => '350', descr => 'less-equal-greater', + proname => 'btint2cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'int2 int2', prosrc => 'btint2cmp' }, +{ oid => '3129', descr => 'sort support', + proname => 'btint2sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'btint2sortsupport' }, +{ oid => '351', descr => 'less-equal-greater', + proname => 'btint4cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'int4 int4', prosrc => 'btint4cmp' }, +{ oid => '3130', descr => 'sort support', + proname => 'btint4sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'btint4sortsupport' }, +{ oid => '842', descr => 'less-equal-greater', + proname => 'btint8cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'int8 int8', prosrc => 'btint8cmp' }, +{ oid => '3131', descr => 'sort support', + proname => 'btint8sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'btint8sortsupport' }, +{ oid => '354', descr => 'less-equal-greater', + proname => 'btfloat4cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'float4 float4', prosrc => 'btfloat4cmp' }, +{ oid => '3132', descr => 'sort support', + proname => 'btfloat4sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'btfloat4sortsupport' }, +{ oid => '355', descr => 'less-equal-greater', + proname => 'btfloat8cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'float8 float8', prosrc => 'btfloat8cmp' }, +{ oid => '3133', descr => 'sort support', + proname => 'btfloat8sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'btfloat8sortsupport' }, +{ oid => '356', descr => 'less-equal-greater', + proname => 'btoidcmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'oid oid', prosrc => 'btoidcmp' }, +{ oid => '3134', descr => 'sort support', + proname => 'btoidsortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'btoidsortsupport' }, +{ oid => '404', descr => 'less-equal-greater', + proname => 'btoidvectorcmp', prorettype => 'int4', + proargtypes => 'oidvector oidvector', prosrc => 'btoidvectorcmp' }, +{ oid => '358', descr => 'less-equal-greater', + proname => 'btcharcmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'char char', prosrc => 'btcharcmp' }, +{ oid => '359', descr => 'less-equal-greater', + proname => 'btnamecmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'name name', prosrc => 'btnamecmp' }, +{ oid => '3135', descr => 'sort support', + proname => 'btnamesortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'btnamesortsupport' }, +{ oid => '360', descr => 'less-equal-greater', + proname => 'bttextcmp', prorettype => 'int4', proargtypes => 'text text', + prosrc => 'bttextcmp' }, +{ oid => '3255', descr => 'sort support', + proname => 'bttextsortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'bttextsortsupport' }, +{ oid => '377', descr => 'less-equal-greater', + proname => 'cash_cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'money money', prosrc => 'cash_cmp' }, +{ oid => '382', descr => 'less-equal-greater', + proname => 'btarraycmp', prorettype => 'int4', + proargtypes => 'anyarray anyarray', prosrc => 'btarraycmp' }, +{ oid => '4126', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'int8 int8 int8 bool bool', prosrc => 'in_range_int8_int8' }, +{ oid => '4127', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'int4 int4 int8 bool bool', prosrc => 'in_range_int4_int8' }, +{ oid => '4128', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'int4 int4 int4 bool bool', prosrc => 'in_range_int4_int4' }, +{ oid => '4129', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'int4 int4 int2 bool bool', prosrc => 'in_range_int4_int2' }, +{ oid => '4130', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'int2 int2 int8 bool bool', prosrc => 'in_range_int2_int8' }, +{ oid => '4131', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'int2 int2 int4 bool bool', prosrc => 'in_range_int2_int4' }, +{ oid => '4132', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'int2 int2 int2 bool bool', prosrc => 'in_range_int2_int2' }, +{ oid => '4139', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'float8 float8 float8 bool bool', + prosrc => 'in_range_float8_float8' }, +{ oid => '4140', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'float4 float4 float8 bool bool', + prosrc => 'in_range_float4_float8' }, +{ oid => '4141', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'numeric numeric numeric bool bool', + prosrc => 'in_range_numeric_numeric' }, + +{ oid => '361', + proname => 'lseg_distance', prorettype => 'float8', + proargtypes => 'lseg lseg', prosrc => 'lseg_distance' }, +{ oid => '362', + proname => 'lseg_interpt', prorettype => 'point', proargtypes => 'lseg lseg', + prosrc => 'lseg_interpt' }, +{ oid => '363', + proname => 'dist_ps', prorettype => 'float8', proargtypes => 'point lseg', + prosrc => 'dist_ps' }, +{ oid => '364', + proname => 'dist_pb', prorettype => 'float8', proargtypes => 'point box', + prosrc => 'dist_pb' }, +{ oid => '365', + proname => 'dist_sb', prorettype => 'float8', proargtypes => 'lseg box', + prosrc => 'dist_sb' }, +{ oid => '366', + proname => 'close_ps', prorettype => 'point', proargtypes => 'point lseg', + prosrc => 'close_ps' }, +{ oid => '367', + proname => 'close_pb', prorettype => 'point', proargtypes => 'point box', + prosrc => 'close_pb' }, +{ oid => '368', + proname => 'close_sb', prorettype => 'point', proargtypes => 'lseg box', + prosrc => 'close_sb' }, +{ oid => '369', + proname => 'on_ps', prorettype => 'bool', proargtypes => 'point lseg', + prosrc => 'on_ps' }, +{ oid => '370', + proname => 'path_distance', prorettype => 'float8', + proargtypes => 'path path', prosrc => 'path_distance' }, +{ oid => '371', + proname => 'dist_ppath', prorettype => 'float8', proargtypes => 'point path', + prosrc => 'dist_ppath' }, +{ oid => '372', + proname => 'on_sb', prorettype => 'bool', proargtypes => 'lseg box', + prosrc => 'on_sb' }, +{ oid => '373', + proname => 'inter_sb', prorettype => 'bool', proargtypes => 'lseg box', + prosrc => 'inter_sb' }, + +# OIDS 400 - 499 + +{ oid => '401', descr => 'convert char(n) to text', + proname => 'text', prorettype => 'text', proargtypes => 'bpchar', + prosrc => 'rtrim1' }, +{ oid => '406', descr => 'convert name to text', + proname => 'text', prorettype => 'text', proargtypes => 'name', + prosrc => 'name_text' }, +{ oid => '407', descr => 'convert text to name', + proname => 'name', prorettype => 'name', proargtypes => 'text', + prosrc => 'text_name' }, +{ oid => '408', descr => 'convert name to char(n)', + proname => 'bpchar', prorettype => 'bpchar', proargtypes => 'name', + prosrc => 'name_bpchar' }, +{ oid => '409', descr => 'convert char(n) to name', + proname => 'name', prorettype => 'name', proargtypes => 'bpchar', + prosrc => 'bpchar_name' }, + +{ oid => '449', descr => 'hash', + proname => 'hashint2', prorettype => 'int4', proargtypes => 'int2', + prosrc => 'hashint2' }, +{ oid => '441', descr => 'hash', + proname => 'hashint2extended', prorettype => 'int8', + proargtypes => 'int2 int8', prosrc => 'hashint2extended' }, +{ oid => '450', descr => 'hash', + proname => 'hashint4', prorettype => 'int4', proargtypes => 'int4', + prosrc => 'hashint4' }, +{ oid => '425', descr => 'hash', + proname => 'hashint4extended', prorettype => 'int8', + proargtypes => 'int4 int8', prosrc => 'hashint4extended' }, +{ oid => '949', descr => 'hash', + proname => 'hashint8', prorettype => 'int4', proargtypes => 'int8', + prosrc => 'hashint8' }, +{ oid => '442', descr => 'hash', + proname => 'hashint8extended', prorettype => 'int8', + proargtypes => 'int8 int8', prosrc => 'hashint8extended' }, +{ oid => '451', descr => 'hash', + proname => 'hashfloat4', prorettype => 'int4', proargtypes => 'float4', + prosrc => 'hashfloat4' }, +{ oid => '443', descr => 'hash', + proname => 'hashfloat4extended', prorettype => 'int8', + proargtypes => 'float4 int8', prosrc => 'hashfloat4extended' }, +{ oid => '452', descr => 'hash', + proname => 'hashfloat8', prorettype => 'int4', proargtypes => 'float8', + prosrc => 'hashfloat8' }, +{ oid => '444', descr => 'hash', + proname => 'hashfloat8extended', prorettype => 'int8', + proargtypes => 'float8 int8', prosrc => 'hashfloat8extended' }, +{ oid => '453', descr => 'hash', + proname => 'hashoid', prorettype => 'int4', proargtypes => 'oid', + prosrc => 'hashoid' }, +{ oid => '445', descr => 'hash', + proname => 'hashoidextended', prorettype => 'int8', proargtypes => 'oid int8', + prosrc => 'hashoidextended' }, +{ oid => '454', descr => 'hash', + proname => 'hashchar', prorettype => 'int4', proargtypes => 'char', + prosrc => 'hashchar' }, +{ oid => '446', descr => 'hash', + proname => 'hashcharextended', prorettype => 'int8', + proargtypes => 'char int8', prosrc => 'hashcharextended' }, +{ oid => '455', descr => 'hash', + proname => 'hashname', prorettype => 'int4', proargtypes => 'name', + prosrc => 'hashname' }, +{ oid => '447', descr => 'hash', + proname => 'hashnameextended', prorettype => 'int8', + proargtypes => 'name int8', prosrc => 'hashnameextended' }, +{ oid => '400', descr => 'hash', + proname => 'hashtext', prorettype => 'int4', proargtypes => 'text', + prosrc => 'hashtext' }, +{ oid => '448', descr => 'hash', + proname => 'hashtextextended', prorettype => 'int8', + proargtypes => 'text int8', prosrc => 'hashtextextended' }, +{ oid => '456', descr => 'hash', + proname => 'hashvarlena', prorettype => 'int4', proargtypes => 'internal', + prosrc => 'hashvarlena' }, +{ oid => '772', descr => 'hash', + proname => 'hashvarlenaextended', prorettype => 'int8', + proargtypes => 'internal int8', prosrc => 'hashvarlenaextended' }, +{ oid => '457', descr => 'hash', + proname => 'hashoidvector', prorettype => 'int4', proargtypes => 'oidvector', + prosrc => 'hashoidvector' }, +{ oid => '776', descr => 'hash', + proname => 'hashoidvectorextended', prorettype => 'int8', + proargtypes => 'oidvector int8', prosrc => 'hashoidvectorextended' }, +{ oid => '329', descr => 'hash', + proname => 'hash_aclitem', prorettype => 'int4', proargtypes => 'aclitem', + prosrc => 'hash_aclitem' }, +{ oid => '777', descr => 'hash', + proname => 'hash_aclitem_extended', prorettype => 'int8', + proargtypes => 'aclitem int8', prosrc => 'hash_aclitem_extended' }, +{ oid => '399', descr => 'hash', + proname => 'hashmacaddr', prorettype => 'int4', proargtypes => 'macaddr', + prosrc => 'hashmacaddr' }, +{ oid => '778', descr => 'hash', + proname => 'hashmacaddrextended', prorettype => 'int8', + proargtypes => 'macaddr int8', prosrc => 'hashmacaddrextended' }, +{ oid => '422', descr => 'hash', + proname => 'hashinet', prorettype => 'int4', proargtypes => 'inet', + prosrc => 'hashinet' }, +{ oid => '779', descr => 'hash', + proname => 'hashinetextended', prorettype => 'int8', + proargtypes => 'inet int8', prosrc => 'hashinetextended' }, +{ oid => '432', descr => 'hash', + proname => 'hash_numeric', prorettype => 'int4', proargtypes => 'numeric', + prosrc => 'hash_numeric' }, +{ oid => '780', descr => 'hash', + proname => 'hash_numeric_extended', prorettype => 'int8', + proargtypes => 'numeric int8', prosrc => 'hash_numeric_extended' }, +{ oid => '328', descr => 'hash', + proname => 'hashmacaddr8', prorettype => 'int4', proargtypes => 'macaddr8', + prosrc => 'hashmacaddr8' }, +{ oid => '781', descr => 'hash', + proname => 'hashmacaddr8extended', prorettype => 'int8', + proargtypes => 'macaddr8 int8', prosrc => 'hashmacaddr8extended' }, + +{ oid => '438', descr => 'count the number of NULL arguments', + proname => 'num_nulls', provariadic => 'any', proisstrict => 'f', + prorettype => 'int4', proargtypes => 'any', proallargtypes => '{any}', + proargmodes => '{v}', prosrc => 'pg_num_nulls' }, +{ oid => '440', descr => 'count the number of non-NULL arguments', + proname => 'num_nonnulls', provariadic => 'any', proisstrict => 'f', + prorettype => 'int4', proargtypes => 'any', proallargtypes => '{any}', + proargmodes => '{v}', prosrc => 'pg_num_nonnulls' }, + +{ oid => '458', descr => 'larger of two', + proname => 'text_larger', prorettype => 'text', proargtypes => 'text text', + prosrc => 'text_larger' }, +{ oid => '459', descr => 'smaller of two', + proname => 'text_smaller', prorettype => 'text', proargtypes => 'text text', + prosrc => 'text_smaller' }, + +{ oid => '460', descr => 'I/O', + proname => 'int8in', prorettype => 'int8', proargtypes => 'cstring', + prosrc => 'int8in' }, +{ oid => '461', descr => 'I/O', + proname => 'int8out', prorettype => 'cstring', proargtypes => 'int8', + prosrc => 'int8out' }, +{ oid => '462', + proname => 'int8um', prorettype => 'int8', proargtypes => 'int8', + prosrc => 'int8um' }, +{ oid => '463', + proname => 'int8pl', prorettype => 'int8', proargtypes => 'int8 int8', + prosrc => 'int8pl' }, +{ oid => '464', + proname => 'int8mi', prorettype => 'int8', proargtypes => 'int8 int8', + prosrc => 'int8mi' }, +{ oid => '465', + proname => 'int8mul', prorettype => 'int8', proargtypes => 'int8 int8', + prosrc => 'int8mul' }, +{ oid => '466', + proname => 'int8div', prorettype => 'int8', proargtypes => 'int8 int8', + prosrc => 'int8div' }, +{ oid => '467', + proname => 'int8eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int8', prosrc => 'int8eq' }, +{ oid => '468', + proname => 'int8ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int8', prosrc => 'int8ne' }, +{ oid => '469', + proname => 'int8lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int8', prosrc => 'int8lt' }, +{ oid => '470', + proname => 'int8gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int8', prosrc => 'int8gt' }, +{ oid => '471', + proname => 'int8le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int8', prosrc => 'int8le' }, +{ oid => '472', + proname => 'int8ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int8', prosrc => 'int8ge' }, + +{ oid => '474', + proname => 'int84eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int4', prosrc => 'int84eq' }, +{ oid => '475', + proname => 'int84ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int4', prosrc => 'int84ne' }, +{ oid => '476', + proname => 'int84lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int4', prosrc => 'int84lt' }, +{ oid => '477', + proname => 'int84gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int4', prosrc => 'int84gt' }, +{ oid => '478', + proname => 'int84le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int4', prosrc => 'int84le' }, +{ oid => '479', + proname => 'int84ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int4', prosrc => 'int84ge' }, + +{ oid => '480', descr => 'convert int8 to int4', + proname => 'int4', prorettype => 'int4', proargtypes => 'int8', + prosrc => 'int84' }, +{ oid => '481', descr => 'convert int4 to int8', + proname => 'int8', prorettype => 'int8', proargtypes => 'int4', + prosrc => 'int48' }, +{ oid => '482', descr => 'convert int8 to float8', + proname => 'float8', prorettype => 'float8', proargtypes => 'int8', + prosrc => 'i8tod' }, +{ oid => '483', descr => 'convert float8 to int8', + proname => 'int8', prorettype => 'int8', proargtypes => 'float8', + prosrc => 'dtoi8' }, + +# OIDS 500 - 599 + +# OIDS 600 - 699 + +{ oid => '626', descr => 'hash', + proname => 'hash_array', prorettype => 'int4', proargtypes => 'anyarray', + prosrc => 'hash_array' }, +{ oid => '782', descr => 'hash', + proname => 'hash_array_extended', prorettype => 'int8', + proargtypes => 'anyarray int8', prosrc => 'hash_array_extended' }, + +{ oid => '652', descr => 'convert int8 to float4', + proname => 'float4', prorettype => 'float4', proargtypes => 'int8', + prosrc => 'i8tof' }, +{ oid => '653', descr => 'convert float4 to int8', + proname => 'int8', prorettype => 'int8', proargtypes => 'float4', + prosrc => 'ftoi8' }, + +{ oid => '714', descr => 'convert int8 to int2', + proname => 'int2', prorettype => 'int2', proargtypes => 'int8', + prosrc => 'int82' }, +{ oid => '754', descr => 'convert int2 to int8', + proname => 'int8', prorettype => 'int8', proargtypes => 'int2', + prosrc => 'int28' }, + +{ oid => '655', + proname => 'namelt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'name name', prosrc => 'namelt' }, +{ oid => '656', + proname => 'namele', proleakproof => 't', prorettype => 'bool', + proargtypes => 'name name', prosrc => 'namele' }, +{ oid => '657', + proname => 'namegt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'name name', prosrc => 'namegt' }, +{ oid => '658', + proname => 'namege', proleakproof => 't', prorettype => 'bool', + proargtypes => 'name name', prosrc => 'namege' }, +{ oid => '659', + proname => 'namene', proleakproof => 't', prorettype => 'bool', + proargtypes => 'name name', prosrc => 'namene' }, + +{ oid => '668', descr => 'adjust char() to typmod length', + proname => 'bpchar', prorettype => 'bpchar', + proargtypes => 'bpchar int4 bool', prosrc => 'bpchar' }, +{ oid => '3097', descr => 'transform a varchar length coercion', + proname => 'varchar_transform', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'varchar_transform' }, +{ oid => '669', descr => 'adjust varchar() to typmod length', + proname => 'varchar', protransform => 'varchar_transform', + prorettype => 'varchar', proargtypes => 'varchar int4 bool', + prosrc => 'varchar' }, + +{ oid => '619', + proname => 'oidvectorne', prorettype => 'bool', + proargtypes => 'oidvector oidvector', prosrc => 'oidvectorne' }, +{ oid => '677', + proname => 'oidvectorlt', prorettype => 'bool', + proargtypes => 'oidvector oidvector', prosrc => 'oidvectorlt' }, +{ oid => '678', + proname => 'oidvectorle', prorettype => 'bool', + proargtypes => 'oidvector oidvector', prosrc => 'oidvectorle' }, +{ oid => '679', + proname => 'oidvectoreq', prorettype => 'bool', + proargtypes => 'oidvector oidvector', prosrc => 'oidvectoreq' }, +{ oid => '680', + proname => 'oidvectorge', prorettype => 'bool', + proargtypes => 'oidvector oidvector', prosrc => 'oidvectorge' }, +{ oid => '681', + proname => 'oidvectorgt', prorettype => 'bool', + proargtypes => 'oidvector oidvector', prosrc => 'oidvectorgt' }, + +# OIDS 700 - 799 +{ oid => '710', descr => 'deprecated, use current_user instead', + proname => 'getpgusername', provolatile => 's', prorettype => 'name', + proargtypes => '', prosrc => 'current_user' }, +{ oid => '716', + proname => 'oidlt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'oid oid', prosrc => 'oidlt' }, +{ oid => '717', + proname => 'oidle', proleakproof => 't', prorettype => 'bool', + proargtypes => 'oid oid', prosrc => 'oidle' }, + +{ oid => '720', descr => 'octet length', + proname => 'octet_length', prorettype => 'int4', proargtypes => 'bytea', + prosrc => 'byteaoctetlen' }, +{ oid => '721', descr => 'get byte', + proname => 'get_byte', prorettype => 'int4', proargtypes => 'bytea int4', + prosrc => 'byteaGetByte' }, +{ oid => '722', descr => 'set byte', + proname => 'set_byte', prorettype => 'bytea', + proargtypes => 'bytea int4 int4', prosrc => 'byteaSetByte' }, +{ oid => '723', descr => 'get bit', + proname => 'get_bit', prorettype => 'int4', proargtypes => 'bytea int4', + prosrc => 'byteaGetBit' }, +{ oid => '724', descr => 'set bit', + proname => 'set_bit', prorettype => 'bytea', proargtypes => 'bytea int4 int4', + prosrc => 'byteaSetBit' }, +{ oid => '749', descr => 'substitute portion of string', + proname => 'overlay', prorettype => 'bytea', + proargtypes => 'bytea bytea int4 int4', prosrc => 'byteaoverlay' }, +{ oid => '752', descr => 'substitute portion of string', + proname => 'overlay', prorettype => 'bytea', + proargtypes => 'bytea bytea int4', prosrc => 'byteaoverlay_no_len' }, + +{ oid => '725', + proname => 'dist_pl', prorettype => 'float8', proargtypes => 'point line', + prosrc => 'dist_pl' }, +{ oid => '726', + proname => 'dist_lb', prorettype => 'float8', proargtypes => 'line box', + prosrc => 'dist_lb' }, +{ oid => '727', + proname => 'dist_sl', prorettype => 'float8', proargtypes => 'lseg line', + prosrc => 'dist_sl' }, +{ oid => '728', + proname => 'dist_cpoly', prorettype => 'float8', + proargtypes => 'circle polygon', prosrc => 'dist_cpoly' }, +{ oid => '729', + proname => 'poly_distance', prorettype => 'float8', + proargtypes => 'polygon polygon', prosrc => 'poly_distance' }, +{ oid => '3275', + proname => 'dist_ppoly', prorettype => 'float8', + proargtypes => 'point polygon', prosrc => 'dist_ppoly' }, +{ oid => '3292', + proname => 'dist_polyp', prorettype => 'float8', + proargtypes => 'polygon point', prosrc => 'dist_polyp' }, +{ oid => '3290', + proname => 'dist_cpoint', prorettype => 'float8', + proargtypes => 'circle point', prosrc => 'dist_cpoint' }, + +{ oid => '740', + proname => 'text_lt', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'text_lt' }, +{ oid => '741', + proname => 'text_le', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'text_le' }, +{ oid => '742', + proname => 'text_gt', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'text_gt' }, +{ oid => '743', + proname => 'text_ge', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'text_ge' }, + +{ oid => '745', descr => 'current user name', + proname => 'current_user', provolatile => 's', prorettype => 'name', + proargtypes => '', prosrc => 'current_user' }, +{ oid => '746', descr => 'session user name', + proname => 'session_user', provolatile => 's', prorettype => 'name', + proargtypes => '', prosrc => 'session_user' }, + +{ oid => '744', + proname => 'array_eq', prorettype => 'bool', + proargtypes => 'anyarray anyarray', prosrc => 'array_eq' }, +{ oid => '390', + proname => 'array_ne', prorettype => 'bool', + proargtypes => 'anyarray anyarray', prosrc => 'array_ne' }, +{ oid => '391', + proname => 'array_lt', prorettype => 'bool', + proargtypes => 'anyarray anyarray', prosrc => 'array_lt' }, +{ oid => '392', + proname => 'array_gt', prorettype => 'bool', + proargtypes => 'anyarray anyarray', prosrc => 'array_gt' }, +{ oid => '393', + proname => 'array_le', prorettype => 'bool', + proargtypes => 'anyarray anyarray', prosrc => 'array_le' }, +{ oid => '396', + proname => 'array_ge', prorettype => 'bool', + proargtypes => 'anyarray anyarray', prosrc => 'array_ge' }, +{ oid => '747', descr => 'array dimensions', + proname => 'array_dims', prorettype => 'text', proargtypes => 'anyarray', + prosrc => 'array_dims' }, +{ oid => '748', descr => 'number of array dimensions', + proname => 'array_ndims', prorettype => 'int4', proargtypes => 'anyarray', + prosrc => 'array_ndims' }, +{ oid => '750', descr => 'I/O', + proname => 'array_in', provolatile => 's', prorettype => 'anyarray', + proargtypes => 'cstring oid int4', prosrc => 'array_in' }, +{ oid => '751', descr => 'I/O', + proname => 'array_out', provolatile => 's', prorettype => 'cstring', + proargtypes => 'anyarray', prosrc => 'array_out' }, +{ oid => '2091', descr => 'array lower dimension', + proname => 'array_lower', prorettype => 'int4', + proargtypes => 'anyarray int4', prosrc => 'array_lower' }, +{ oid => '2092', descr => 'array upper dimension', + proname => 'array_upper', prorettype => 'int4', + proargtypes => 'anyarray int4', prosrc => 'array_upper' }, +{ oid => '2176', descr => 'array length', + proname => 'array_length', prorettype => 'int4', + proargtypes => 'anyarray int4', prosrc => 'array_length' }, +{ oid => '3179', descr => 'array cardinality', + proname => 'cardinality', prorettype => 'int4', proargtypes => 'anyarray', + prosrc => 'array_cardinality' }, +{ oid => '378', descr => 'append element onto end of array', + proname => 'array_append', proisstrict => 'f', prorettype => 'anyarray', + proargtypes => 'anyarray anyelement', prosrc => 'array_append' }, +{ oid => '379', descr => 'prepend element onto front of array', + proname => 'array_prepend', proisstrict => 'f', prorettype => 'anyarray', + proargtypes => 'anyelement anyarray', prosrc => 'array_prepend' }, +{ oid => '383', + proname => 'array_cat', proisstrict => 'f', prorettype => 'anyarray', + proargtypes => 'anyarray anyarray', prosrc => 'array_cat' }, +{ oid => '394', descr => 'split delimited text into text[]', + proname => 'string_to_array', proisstrict => 'f', prorettype => '_text', + proargtypes => 'text text', prosrc => 'text_to_array' }, +{ oid => '395', + descr => 'concatenate array elements, using delimiter, into text', + proname => 'array_to_string', provolatile => 's', prorettype => 'text', + proargtypes => 'anyarray text', prosrc => 'array_to_text' }, +{ oid => '376', descr => 'split delimited text into text[], with null string', + proname => 'string_to_array', proisstrict => 'f', prorettype => '_text', + proargtypes => 'text text text', prosrc => 'text_to_array_null' }, +{ oid => '384', + descr => 'concatenate array elements, using delimiter and null string, into text', + proname => 'array_to_string', proisstrict => 'f', provolatile => 's', + prorettype => 'text', proargtypes => 'anyarray text text', + prosrc => 'array_to_text_null' }, +{ oid => '515', descr => 'larger of two', + proname => 'array_larger', prorettype => 'anyarray', + proargtypes => 'anyarray anyarray', prosrc => 'array_larger' }, +{ oid => '516', descr => 'smaller of two', + proname => 'array_smaller', prorettype => 'anyarray', + proargtypes => 'anyarray anyarray', prosrc => 'array_smaller' }, +{ oid => '3277', descr => 'returns an offset of value in array', + proname => 'array_position', proisstrict => 'f', prorettype => 'int4', + proargtypes => 'anyarray anyelement', prosrc => 'array_position' }, +{ oid => '3278', + descr => 'returns an offset of value in array with start index', + proname => 'array_position', proisstrict => 'f', prorettype => 'int4', + proargtypes => 'anyarray anyelement int4', prosrc => 'array_position_start' }, +{ oid => '3279', + descr => 'returns an array of offsets of some value in array', + proname => 'array_positions', proisstrict => 'f', prorettype => '_int4', + proargtypes => 'anyarray anyelement', prosrc => 'array_positions' }, +{ oid => '1191', descr => 'array subscripts generator', + proname => 'generate_subscripts', prorows => '1000', proretset => 't', + prorettype => 'int4', proargtypes => 'anyarray int4 bool', + prosrc => 'generate_subscripts' }, +{ oid => '1192', descr => 'array subscripts generator', + proname => 'generate_subscripts', prorows => '1000', proretset => 't', + prorettype => 'int4', proargtypes => 'anyarray int4', + prosrc => 'generate_subscripts_nodir' }, +{ oid => '1193', descr => 'array constructor with value', + proname => 'array_fill', proisstrict => 'f', prorettype => 'anyarray', + proargtypes => 'anyelement _int4', prosrc => 'array_fill' }, +{ oid => '1286', descr => 'array constructor with value', + proname => 'array_fill', proisstrict => 'f', prorettype => 'anyarray', + proargtypes => 'anyelement _int4 _int4', + prosrc => 'array_fill_with_lower_bounds' }, +{ oid => '2331', descr => 'expand array to set of rows', + proname => 'unnest', prorows => '100', proretset => 't', + prorettype => 'anyelement', proargtypes => 'anyarray', + prosrc => 'array_unnest' }, +{ oid => '3167', + descr => 'remove any occurrences of an element from an array', + proname => 'array_remove', proisstrict => 'f', prorettype => 'anyarray', + proargtypes => 'anyarray anyelement', prosrc => 'array_remove' }, +{ oid => '3168', descr => 'replace any occurrences of an element in an array', + proname => 'array_replace', proisstrict => 'f', prorettype => 'anyarray', + proargtypes => 'anyarray anyelement anyelement', prosrc => 'array_replace' }, +{ oid => '2333', descr => 'aggregate transition function', + proname => 'array_agg_transfn', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal anynonarray', prosrc => 'array_agg_transfn' }, +{ oid => '2334', descr => 'aggregate final function', + proname => 'array_agg_finalfn', proisstrict => 'f', prorettype => 'anyarray', + proargtypes => 'internal anynonarray', prosrc => 'array_agg_finalfn' }, +{ oid => '2335', descr => 'concatenate aggregate input into an array', + proname => 'array_agg', prokind => 'a', proisstrict => 'f', + prorettype => 'anyarray', proargtypes => 'anynonarray', + prosrc => 'aggregate_dummy' }, +{ oid => '4051', descr => 'aggregate transition function', + proname => 'array_agg_array_transfn', proisstrict => 'f', + prorettype => 'internal', proargtypes => 'internal anyarray', + prosrc => 'array_agg_array_transfn' }, +{ oid => '4052', descr => 'aggregate final function', + proname => 'array_agg_array_finalfn', proisstrict => 'f', + prorettype => 'anyarray', proargtypes => 'internal anyarray', + prosrc => 'array_agg_array_finalfn' }, +{ oid => '4053', descr => 'concatenate aggregate input into an array', + proname => 'array_agg', prokind => 'a', proisstrict => 'f', + prorettype => 'anyarray', proargtypes => 'anyarray', + prosrc => 'aggregate_dummy' }, +{ oid => '3218', + descr => 'bucket number of operand given a sorted array of bucket lower bounds', + proname => 'width_bucket', prorettype => 'int4', + proargtypes => 'anyelement anyarray', prosrc => 'width_bucket_array' }, +{ oid => '3816', descr => 'array typanalyze', + proname => 'array_typanalyze', provolatile => 's', prorettype => 'bool', + proargtypes => 'internal', prosrc => 'array_typanalyze' }, +{ oid => '3817', + descr => 'restriction selectivity for array-containment operators', + proname => 'arraycontsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'arraycontsel' }, +{ oid => '3818', descr => 'join selectivity for array-containment operators', + proname => 'arraycontjoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'arraycontjoinsel' }, + +{ oid => '760', descr => 'I/O', + proname => 'smgrin', provolatile => 's', prorettype => 'smgr', + proargtypes => 'cstring', prosrc => 'smgrin' }, +{ oid => '761', descr => 'I/O', + proname => 'smgrout', provolatile => 's', prorettype => 'cstring', + proargtypes => 'smgr', prosrc => 'smgrout' }, +{ oid => '762', descr => 'storage manager', + proname => 'smgreq', prorettype => 'bool', proargtypes => 'smgr smgr', + prosrc => 'smgreq' }, +{ oid => '763', descr => 'storage manager', + proname => 'smgrne', prorettype => 'bool', proargtypes => 'smgr smgr', + prosrc => 'smgrne' }, + +{ oid => '764', descr => 'large object import', + proname => 'lo_import', provolatile => 'v', proparallel => 'u', + prorettype => 'oid', proargtypes => 'text', prosrc => 'be_lo_import' }, +{ oid => '767', descr => 'large object import', + proname => 'lo_import', provolatile => 'v', proparallel => 'u', + prorettype => 'oid', proargtypes => 'text oid', + prosrc => 'be_lo_import_with_oid' }, +{ oid => '765', descr => 'large object export', + proname => 'lo_export', provolatile => 'v', proparallel => 'u', + prorettype => 'int4', proargtypes => 'oid text', prosrc => 'be_lo_export' }, + +{ oid => '766', descr => 'increment', + proname => 'int4inc', prorettype => 'int4', proargtypes => 'int4', + prosrc => 'int4inc' }, +{ oid => '768', descr => 'larger of two', + proname => 'int4larger', prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'int4larger' }, +{ oid => '769', descr => 'smaller of two', + proname => 'int4smaller', prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'int4smaller' }, +{ oid => '770', descr => 'larger of two', + proname => 'int2larger', prorettype => 'int2', proargtypes => 'int2 int2', + prosrc => 'int2larger' }, +{ oid => '771', descr => 'smaller of two', + proname => 'int2smaller', prorettype => 'int2', proargtypes => 'int2 int2', + prosrc => 'int2smaller' }, + +# OIDS 800 - 899 + +{ oid => '846', + proname => 'cash_mul_flt4', prorettype => 'money', + proargtypes => 'money float4', prosrc => 'cash_mul_flt4' }, +{ oid => '847', + proname => 'cash_div_flt4', prorettype => 'money', + proargtypes => 'money float4', prosrc => 'cash_div_flt4' }, +{ oid => '848', + proname => 'flt4_mul_cash', prorettype => 'money', + proargtypes => 'float4 money', prosrc => 'flt4_mul_cash' }, + +{ oid => '849', descr => 'position of substring', + proname => 'position', prorettype => 'int4', proargtypes => 'text text', + prosrc => 'textpos' }, +{ oid => '850', + proname => 'textlike', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'textlike' }, +{ oid => '851', + proname => 'textnlike', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'textnlike' }, + +{ oid => '852', + proname => 'int48eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int8', prosrc => 'int48eq' }, +{ oid => '853', + proname => 'int48ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int8', prosrc => 'int48ne' }, +{ oid => '854', + proname => 'int48lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int8', prosrc => 'int48lt' }, +{ oid => '855', + proname => 'int48gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int8', prosrc => 'int48gt' }, +{ oid => '856', + proname => 'int48le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int8', prosrc => 'int48le' }, +{ oid => '857', + proname => 'int48ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int4 int8', prosrc => 'int48ge' }, + +{ oid => '858', + proname => 'namelike', prorettype => 'bool', proargtypes => 'name text', + prosrc => 'namelike' }, +{ oid => '859', + proname => 'namenlike', prorettype => 'bool', proargtypes => 'name text', + prosrc => 'namenlike' }, + +{ oid => '860', descr => 'convert char to char(n)', + proname => 'bpchar', prorettype => 'bpchar', proargtypes => 'char', + prosrc => 'char_bpchar' }, + +{ oid => '861', descr => 'name of the current database', + proname => 'current_database', provolatile => 's', prorettype => 'name', + proargtypes => '', prosrc => 'current_database' }, +{ oid => '817', descr => 'get the currently executing query', + proname => 'current_query', proisstrict => 'f', provolatile => 'v', + proparallel => 'r', prorettype => 'text', proargtypes => '', + prosrc => 'current_query' }, + +{ oid => '3399', + proname => 'int8_mul_cash', prorettype => 'money', + proargtypes => 'int8 money', prosrc => 'int8_mul_cash' }, +{ oid => '862', + proname => 'int4_mul_cash', prorettype => 'money', + proargtypes => 'int4 money', prosrc => 'int4_mul_cash' }, +{ oid => '863', + proname => 'int2_mul_cash', prorettype => 'money', + proargtypes => 'int2 money', prosrc => 'int2_mul_cash' }, +{ oid => '3344', + proname => 'cash_mul_int8', prorettype => 'money', + proargtypes => 'money int8', prosrc => 'cash_mul_int8' }, +{ oid => '3345', + proname => 'cash_div_int8', prorettype => 'money', + proargtypes => 'money int8', prosrc => 'cash_div_int8' }, +{ oid => '864', + proname => 'cash_mul_int4', prorettype => 'money', + proargtypes => 'money int4', prosrc => 'cash_mul_int4' }, +{ oid => '865', + proname => 'cash_div_int4', prorettype => 'money', + proargtypes => 'money int4', prosrc => 'cash_div_int4' }, +{ oid => '866', + proname => 'cash_mul_int2', prorettype => 'money', + proargtypes => 'money int2', prosrc => 'cash_mul_int2' }, +{ oid => '867', + proname => 'cash_div_int2', prorettype => 'money', + proargtypes => 'money int2', prosrc => 'cash_div_int2' }, + +{ oid => '886', descr => 'I/O', + proname => 'cash_in', provolatile => 's', prorettype => 'money', + proargtypes => 'cstring', prosrc => 'cash_in' }, +{ oid => '887', descr => 'I/O', + proname => 'cash_out', provolatile => 's', prorettype => 'cstring', + proargtypes => 'money', prosrc => 'cash_out' }, +{ oid => '888', + proname => 'cash_eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'money money', prosrc => 'cash_eq' }, +{ oid => '889', + proname => 'cash_ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'money money', prosrc => 'cash_ne' }, +{ oid => '890', + proname => 'cash_lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'money money', prosrc => 'cash_lt' }, +{ oid => '891', + proname => 'cash_le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'money money', prosrc => 'cash_le' }, +{ oid => '892', + proname => 'cash_gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'money money', prosrc => 'cash_gt' }, +{ oid => '893', + proname => 'cash_ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'money money', prosrc => 'cash_ge' }, +{ oid => '894', + proname => 'cash_pl', prorettype => 'money', proargtypes => 'money money', + prosrc => 'cash_pl' }, +{ oid => '895', + proname => 'cash_mi', prorettype => 'money', proargtypes => 'money money', + prosrc => 'cash_mi' }, +{ oid => '896', + proname => 'cash_mul_flt8', prorettype => 'money', + proargtypes => 'money float8', prosrc => 'cash_mul_flt8' }, +{ oid => '897', + proname => 'cash_div_flt8', prorettype => 'money', + proargtypes => 'money float8', prosrc => 'cash_div_flt8' }, +{ oid => '898', descr => 'larger of two', + proname => 'cashlarger', prorettype => 'money', proargtypes => 'money money', + prosrc => 'cashlarger' }, +{ oid => '899', descr => 'smaller of two', + proname => 'cashsmaller', prorettype => 'money', proargtypes => 'money money', + prosrc => 'cashsmaller' }, +{ oid => '919', + proname => 'flt8_mul_cash', prorettype => 'money', + proargtypes => 'float8 money', prosrc => 'flt8_mul_cash' }, +{ oid => '935', descr => 'output money amount as words', + proname => 'cash_words', prorettype => 'text', proargtypes => 'money', + prosrc => 'cash_words' }, +{ oid => '3822', + proname => 'cash_div_cash', prorettype => 'float8', + proargtypes => 'money money', prosrc => 'cash_div_cash' }, +{ oid => '3823', descr => 'convert money to numeric', + proname => 'numeric', provolatile => 's', prorettype => 'numeric', + proargtypes => 'money', prosrc => 'cash_numeric' }, +{ oid => '3824', descr => 'convert numeric to money', + proname => 'money', provolatile => 's', prorettype => 'money', + proargtypes => 'numeric', prosrc => 'numeric_cash' }, +{ oid => '3811', descr => 'convert int4 to money', + proname => 'money', provolatile => 's', prorettype => 'money', + proargtypes => 'int4', prosrc => 'int4_cash' }, +{ oid => '3812', descr => 'convert int8 to money', + proname => 'money', provolatile => 's', prorettype => 'money', + proargtypes => 'int8', prosrc => 'int8_cash' }, + +# OIDS 900 - 999 + +{ oid => '940', descr => 'modulus', + proname => 'mod', prorettype => 'int2', proargtypes => 'int2 int2', + prosrc => 'int2mod' }, +{ oid => '941', descr => 'modulus', + proname => 'mod', prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'int4mod' }, + +{ oid => '945', + proname => 'int8mod', prorettype => 'int8', proargtypes => 'int8 int8', + prosrc => 'int8mod' }, +{ oid => '947', descr => 'modulus', + proname => 'mod', prorettype => 'int8', proargtypes => 'int8 int8', + prosrc => 'int8mod' }, + +{ oid => '944', descr => 'convert text to char', + proname => 'char', prorettype => 'char', proargtypes => 'text', + prosrc => 'text_char' }, +{ oid => '946', descr => 'convert char to text', + proname => 'text', prorettype => 'text', proargtypes => 'char', + prosrc => 'char_text' }, + +{ oid => '952', descr => 'large object open', + proname => 'lo_open', provolatile => 'v', proparallel => 'u', + prorettype => 'int4', proargtypes => 'oid int4', prosrc => 'be_lo_open' }, +{ oid => '953', descr => 'large object close', + proname => 'lo_close', provolatile => 'v', proparallel => 'u', + prorettype => 'int4', proargtypes => 'int4', prosrc => 'be_lo_close' }, +{ oid => '954', descr => 'large object read', + proname => 'loread', provolatile => 'v', proparallel => 'u', + prorettype => 'bytea', proargtypes => 'int4 int4', prosrc => 'be_loread' }, +{ oid => '955', descr => 'large object write', + proname => 'lowrite', provolatile => 'v', proparallel => 'u', + prorettype => 'int4', proargtypes => 'int4 bytea', prosrc => 'be_lowrite' }, +{ oid => '956', descr => 'large object seek', + proname => 'lo_lseek', provolatile => 'v', proparallel => 'u', + prorettype => 'int4', proargtypes => 'int4 int4 int4', + prosrc => 'be_lo_lseek' }, +{ oid => '3170', descr => 'large object seek (64 bit)', + proname => 'lo_lseek64', provolatile => 'v', proparallel => 'u', + prorettype => 'int8', proargtypes => 'int4 int8 int4', + prosrc => 'be_lo_lseek64' }, +{ oid => '957', descr => 'large object create', + proname => 'lo_creat', provolatile => 'v', proparallel => 'u', + prorettype => 'oid', proargtypes => 'int4', prosrc => 'be_lo_creat' }, +{ oid => '715', descr => 'large object create', + proname => 'lo_create', provolatile => 'v', proparallel => 'u', + prorettype => 'oid', proargtypes => 'oid', prosrc => 'be_lo_create' }, +{ oid => '958', descr => 'large object position', + proname => 'lo_tell', provolatile => 'v', proparallel => 'u', + prorettype => 'int4', proargtypes => 'int4', prosrc => 'be_lo_tell' }, +{ oid => '3171', descr => 'large object position (64 bit)', + proname => 'lo_tell64', provolatile => 'v', proparallel => 'u', + prorettype => 'int8', proargtypes => 'int4', prosrc => 'be_lo_tell64' }, +{ oid => '1004', descr => 'truncate large object', + proname => 'lo_truncate', provolatile => 'v', proparallel => 'u', + prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'be_lo_truncate' }, +{ oid => '3172', descr => 'truncate large object (64 bit)', + proname => 'lo_truncate64', provolatile => 'v', proparallel => 'u', + prorettype => 'int4', proargtypes => 'int4 int8', + prosrc => 'be_lo_truncate64' }, + +{ oid => '3457', descr => 'create new large object with given content', + proname => 'lo_from_bytea', provolatile => 'v', proparallel => 'u', + prorettype => 'oid', proargtypes => 'oid bytea', + prosrc => 'be_lo_from_bytea' }, +{ oid => '3458', descr => 'read entire large object', + proname => 'lo_get', provolatile => 'v', proparallel => 'u', + prorettype => 'bytea', proargtypes => 'oid', prosrc => 'be_lo_get' }, +{ oid => '3459', descr => 'read large object from offset for length', + proname => 'lo_get', provolatile => 'v', proparallel => 'u', + prorettype => 'bytea', proargtypes => 'oid int8 int4', + prosrc => 'be_lo_get_fragment' }, +{ oid => '3460', descr => 'write data at offset', + proname => 'lo_put', provolatile => 'v', proparallel => 'u', + prorettype => 'void', proargtypes => 'oid int8 bytea', + prosrc => 'be_lo_put' }, + +{ oid => '959', + proname => 'on_pl', prorettype => 'bool', proargtypes => 'point line', + prosrc => 'on_pl' }, +{ oid => '960', + proname => 'on_sl', prorettype => 'bool', proargtypes => 'lseg line', + prosrc => 'on_sl' }, +{ oid => '961', + proname => 'close_pl', prorettype => 'point', proargtypes => 'point line', + prosrc => 'close_pl' }, +{ oid => '962', + proname => 'close_sl', prorettype => 'point', proargtypes => 'lseg line', + prosrc => 'close_sl' }, +{ oid => '963', + proname => 'close_lb', prorettype => 'point', proargtypes => 'line box', + prosrc => 'close_lb' }, + +{ oid => '964', descr => 'large object unlink (delete)', + proname => 'lo_unlink', provolatile => 'v', proparallel => 'u', + prorettype => 'int4', proargtypes => 'oid', prosrc => 'be_lo_unlink' }, + +{ oid => '973', + proname => 'path_inter', prorettype => 'bool', proargtypes => 'path path', + prosrc => 'path_inter' }, +{ oid => '975', descr => 'box area', + proname => 'area', prorettype => 'float8', proargtypes => 'box', + prosrc => 'box_area' }, +{ oid => '976', descr => 'box width', + proname => 'width', prorettype => 'float8', proargtypes => 'box', + prosrc => 'box_width' }, +{ oid => '977', descr => 'box height', + proname => 'height', prorettype => 'float8', proargtypes => 'box', + prosrc => 'box_height' }, +{ oid => '978', + proname => 'box_distance', prorettype => 'float8', proargtypes => 'box box', + prosrc => 'box_distance' }, +{ oid => '979', descr => 'area of a closed path', + proname => 'area', prorettype => 'float8', proargtypes => 'path', + prosrc => 'path_area' }, +{ oid => '980', + proname => 'box_intersect', prorettype => 'box', proargtypes => 'box box', + prosrc => 'box_intersect' }, +{ oid => '4067', descr => 'bounding box of two boxes', + proname => 'bound_box', prorettype => 'box', proargtypes => 'box box', + prosrc => 'boxes_bound_box' }, +{ oid => '981', descr => 'box diagonal', + proname => 'diagonal', prorettype => 'lseg', proargtypes => 'box', + prosrc => 'box_diagonal' }, +{ oid => '982', + proname => 'path_n_lt', prorettype => 'bool', proargtypes => 'path path', + prosrc => 'path_n_lt' }, +{ oid => '983', + proname => 'path_n_gt', prorettype => 'bool', proargtypes => 'path path', + prosrc => 'path_n_gt' }, +{ oid => '984', + proname => 'path_n_eq', prorettype => 'bool', proargtypes => 'path path', + prosrc => 'path_n_eq' }, +{ oid => '985', + proname => 'path_n_le', prorettype => 'bool', proargtypes => 'path path', + prosrc => 'path_n_le' }, +{ oid => '986', + proname => 'path_n_ge', prorettype => 'bool', proargtypes => 'path path', + prosrc => 'path_n_ge' }, +{ oid => '987', + proname => 'path_length', prorettype => 'float8', proargtypes => 'path', + prosrc => 'path_length' }, +{ oid => '988', + proname => 'point_ne', prorettype => 'bool', proargtypes => 'point point', + prosrc => 'point_ne' }, +{ oid => '989', + proname => 'point_vert', prorettype => 'bool', proargtypes => 'point point', + prosrc => 'point_vert' }, +{ oid => '990', + proname => 'point_horiz', prorettype => 'bool', proargtypes => 'point point', + prosrc => 'point_horiz' }, +{ oid => '991', + proname => 'point_distance', prorettype => 'float8', + proargtypes => 'point point', prosrc => 'point_distance' }, +{ oid => '992', descr => 'slope between points', + proname => 'slope', prorettype => 'float8', proargtypes => 'point point', + prosrc => 'point_slope' }, +{ oid => '993', descr => 'convert points to line segment', + proname => 'lseg', prorettype => 'lseg', proargtypes => 'point point', + prosrc => 'lseg_construct' }, +{ oid => '994', + proname => 'lseg_intersect', prorettype => 'bool', proargtypes => 'lseg lseg', + prosrc => 'lseg_intersect' }, +{ oid => '995', + proname => 'lseg_parallel', prorettype => 'bool', proargtypes => 'lseg lseg', + prosrc => 'lseg_parallel' }, +{ oid => '996', + proname => 'lseg_perp', prorettype => 'bool', proargtypes => 'lseg lseg', + prosrc => 'lseg_perp' }, +{ oid => '997', + proname => 'lseg_vertical', prorettype => 'bool', proargtypes => 'lseg', + prosrc => 'lseg_vertical' }, +{ oid => '998', + proname => 'lseg_horizontal', prorettype => 'bool', proargtypes => 'lseg', + prosrc => 'lseg_horizontal' }, +{ oid => '999', + proname => 'lseg_eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'lseg lseg', prosrc => 'lseg_eq' }, + +# OIDS 1000 - 1999 + +{ oid => '3994', descr => 'transform a time zone adjustment', + proname => 'timestamp_izone_transform', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'timestamp_izone_transform' }, +{ oid => '1026', descr => 'adjust timestamp to new time zone', + proname => 'timezone', protransform => 'timestamp_izone_transform', + prorettype => 'timestamp', proargtypes => 'interval timestamptz', + prosrc => 'timestamptz_izone' }, + +{ oid => '1031', descr => 'I/O', + proname => 'aclitemin', provolatile => 's', prorettype => 'aclitem', + proargtypes => 'cstring', prosrc => 'aclitemin' }, +{ oid => '1032', descr => 'I/O', + proname => 'aclitemout', provolatile => 's', prorettype => 'cstring', + proargtypes => 'aclitem', prosrc => 'aclitemout' }, +{ oid => '1035', descr => 'add/update ACL item', + proname => 'aclinsert', prorettype => '_aclitem', + proargtypes => '_aclitem aclitem', prosrc => 'aclinsert' }, +{ oid => '1036', descr => 'remove ACL item', + proname => 'aclremove', prorettype => '_aclitem', + proargtypes => '_aclitem aclitem', prosrc => 'aclremove' }, +{ oid => '1037', descr => 'contains', + proname => 'aclcontains', prorettype => 'bool', + proargtypes => '_aclitem aclitem', prosrc => 'aclcontains' }, +{ oid => '1062', + proname => 'aclitemeq', prorettype => 'bool', + proargtypes => 'aclitem aclitem', prosrc => 'aclitem_eq' }, +{ oid => '1365', descr => 'make ACL item', + proname => 'makeaclitem', prorettype => 'aclitem', + proargtypes => 'oid oid text bool', prosrc => 'makeaclitem' }, +{ oid => '3943', + descr => 'show hardwired default privileges, primarily for use by the information schema', + proname => 'acldefault', prorettype => '_aclitem', proargtypes => 'char oid', + prosrc => 'acldefault_sql' }, +{ oid => '1689', + descr => 'convert ACL item array to table, primarily for use by information schema', + proname => 'aclexplode', prorows => '10', proretset => 't', + provolatile => 's', prorettype => 'record', proargtypes => '_aclitem', + proallargtypes => '{_aclitem,oid,oid,text,bool}', + proargmodes => '{i,o,o,o,o}', + proargnames => '{acl,grantor,grantee,privilege_type,is_grantable}', + prosrc => 'aclexplode' }, +{ oid => '1044', descr => 'I/O', + proname => 'bpcharin', prorettype => 'bpchar', + proargtypes => 'cstring oid int4', prosrc => 'bpcharin' }, +{ oid => '1045', descr => 'I/O', + proname => 'bpcharout', prorettype => 'cstring', proargtypes => 'bpchar', + prosrc => 'bpcharout' }, +{ oid => '2913', descr => 'I/O typmod', + proname => 'bpchartypmodin', prorettype => 'int4', proargtypes => '_cstring', + prosrc => 'bpchartypmodin' }, +{ oid => '2914', descr => 'I/O typmod', + proname => 'bpchartypmodout', prorettype => 'cstring', proargtypes => 'int4', + prosrc => 'bpchartypmodout' }, +{ oid => '1046', descr => 'I/O', + proname => 'varcharin', prorettype => 'varchar', + proargtypes => 'cstring oid int4', prosrc => 'varcharin' }, +{ oid => '1047', descr => 'I/O', + proname => 'varcharout', prorettype => 'cstring', proargtypes => 'varchar', + prosrc => 'varcharout' }, +{ oid => '2915', descr => 'I/O typmod', + proname => 'varchartypmodin', prorettype => 'int4', proargtypes => '_cstring', + prosrc => 'varchartypmodin' }, +{ oid => '2916', descr => 'I/O typmod', + proname => 'varchartypmodout', prorettype => 'cstring', proargtypes => 'int4', + prosrc => 'varchartypmodout' }, +{ oid => '1048', + proname => 'bpchareq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bpchar bpchar', prosrc => 'bpchareq' }, +{ oid => '1049', + proname => 'bpcharlt', prorettype => 'bool', proargtypes => 'bpchar bpchar', + prosrc => 'bpcharlt' }, +{ oid => '1050', + proname => 'bpcharle', prorettype => 'bool', proargtypes => 'bpchar bpchar', + prosrc => 'bpcharle' }, +{ oid => '1051', + proname => 'bpchargt', prorettype => 'bool', proargtypes => 'bpchar bpchar', + prosrc => 'bpchargt' }, +{ oid => '1052', + proname => 'bpcharge', prorettype => 'bool', proargtypes => 'bpchar bpchar', + prosrc => 'bpcharge' }, +{ oid => '1053', + proname => 'bpcharne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bpchar bpchar', prosrc => 'bpcharne' }, +{ oid => '1063', descr => 'larger of two', + proname => 'bpchar_larger', prorettype => 'bpchar', + proargtypes => 'bpchar bpchar', prosrc => 'bpchar_larger' }, +{ oid => '1064', descr => 'smaller of two', + proname => 'bpchar_smaller', prorettype => 'bpchar', + proargtypes => 'bpchar bpchar', prosrc => 'bpchar_smaller' }, +{ oid => '1078', descr => 'less-equal-greater', + proname => 'bpcharcmp', prorettype => 'int4', proargtypes => 'bpchar bpchar', + prosrc => 'bpcharcmp' }, +{ oid => '3328', descr => 'sort support', + proname => 'bpchar_sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'bpchar_sortsupport' }, +{ oid => '1080', descr => 'hash', + proname => 'hashbpchar', prorettype => 'int4', proargtypes => 'bpchar', + prosrc => 'hashbpchar' }, +{ oid => '972', descr => 'hash', + proname => 'hashbpcharextended', prorettype => 'int8', + proargtypes => 'bpchar int8', prosrc => 'hashbpcharextended' }, +{ oid => '1081', descr => 'format a type oid and atttypmod to canonical SQL', + proname => 'format_type', proisstrict => 'f', provolatile => 's', + prorettype => 'text', proargtypes => 'oid int4', prosrc => 'format_type' }, +{ oid => '1084', descr => 'I/O', + proname => 'date_in', provolatile => 's', prorettype => 'date', + proargtypes => 'cstring', prosrc => 'date_in' }, +{ oid => '1085', descr => 'I/O', + proname => 'date_out', provolatile => 's', prorettype => 'cstring', + proargtypes => 'date', prosrc => 'date_out' }, +{ oid => '1086', + proname => 'date_eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'date date', prosrc => 'date_eq' }, +{ oid => '1087', + proname => 'date_lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'date date', prosrc => 'date_lt' }, +{ oid => '1088', + proname => 'date_le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'date date', prosrc => 'date_le' }, +{ oid => '1089', + proname => 'date_gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'date date', prosrc => 'date_gt' }, +{ oid => '1090', + proname => 'date_ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'date date', prosrc => 'date_ge' }, +{ oid => '1091', + proname => 'date_ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'date date', prosrc => 'date_ne' }, +{ oid => '1092', descr => 'less-equal-greater', + proname => 'date_cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'date date', prosrc => 'date_cmp' }, +{ oid => '3136', descr => 'sort support', + proname => 'date_sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'date_sortsupport' }, +{ oid => '4133', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'date date interval bool bool', + prosrc => 'in_range_date_interval' }, + +# OIDS 1100 - 1199 + +{ oid => '1102', + proname => 'time_lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'time time', prosrc => 'time_lt' }, +{ oid => '1103', + proname => 'time_le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'time time', prosrc => 'time_le' }, +{ oid => '1104', + proname => 'time_gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'time time', prosrc => 'time_gt' }, +{ oid => '1105', + proname => 'time_ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'time time', prosrc => 'time_ge' }, +{ oid => '1106', + proname => 'time_ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'time time', prosrc => 'time_ne' }, +{ oid => '1107', descr => 'less-equal-greater', + proname => 'time_cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'time time', prosrc => 'time_cmp' }, +{ oid => '1138', descr => 'larger of two', + proname => 'date_larger', prorettype => 'date', proargtypes => 'date date', + prosrc => 'date_larger' }, +{ oid => '1139', descr => 'smaller of two', + proname => 'date_smaller', prorettype => 'date', proargtypes => 'date date', + prosrc => 'date_smaller' }, +{ oid => '1140', + proname => 'date_mi', prorettype => 'int4', proargtypes => 'date date', + prosrc => 'date_mi' }, +{ oid => '1141', + proname => 'date_pli', prorettype => 'date', proargtypes => 'date int4', + prosrc => 'date_pli' }, +{ oid => '1142', + proname => 'date_mii', prorettype => 'date', proargtypes => 'date int4', + prosrc => 'date_mii' }, +{ oid => '1143', descr => 'I/O', + proname => 'time_in', provolatile => 's', prorettype => 'time', + proargtypes => 'cstring oid int4', prosrc => 'time_in' }, +{ oid => '1144', descr => 'I/O', + proname => 'time_out', prorettype => 'cstring', proargtypes => 'time', + prosrc => 'time_out' }, +{ oid => '2909', descr => 'I/O typmod', + proname => 'timetypmodin', prorettype => 'int4', proargtypes => '_cstring', + prosrc => 'timetypmodin' }, +{ oid => '2910', descr => 'I/O typmod', + proname => 'timetypmodout', prorettype => 'cstring', proargtypes => 'int4', + prosrc => 'timetypmodout' }, +{ oid => '1145', + proname => 'time_eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'time time', prosrc => 'time_eq' }, + +{ oid => '1146', + proname => 'circle_add_pt', prorettype => 'circle', + proargtypes => 'circle point', prosrc => 'circle_add_pt' }, +{ oid => '1147', + proname => 'circle_sub_pt', prorettype => 'circle', + proargtypes => 'circle point', prosrc => 'circle_sub_pt' }, +{ oid => '1148', + proname => 'circle_mul_pt', prorettype => 'circle', + proargtypes => 'circle point', prosrc => 'circle_mul_pt' }, +{ oid => '1149', + proname => 'circle_div_pt', prorettype => 'circle', + proargtypes => 'circle point', prosrc => 'circle_div_pt' }, + +{ oid => '1150', descr => 'I/O', + proname => 'timestamptz_in', provolatile => 's', prorettype => 'timestamptz', + proargtypes => 'cstring oid int4', prosrc => 'timestamptz_in' }, +{ oid => '1151', descr => 'I/O', + proname => 'timestamptz_out', provolatile => 's', prorettype => 'cstring', + proargtypes => 'timestamptz', prosrc => 'timestamptz_out' }, +{ oid => '2907', descr => 'I/O typmod', + proname => 'timestamptztypmodin', prorettype => 'int4', + proargtypes => '_cstring', prosrc => 'timestamptztypmodin' }, +{ oid => '2908', descr => 'I/O typmod', + proname => 'timestamptztypmodout', prorettype => 'cstring', + proargtypes => 'int4', prosrc => 'timestamptztypmodout' }, +{ oid => '1152', + proname => 'timestamptz_eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timestamptz timestamptz', prosrc => 'timestamp_eq' }, +{ oid => '1153', + proname => 'timestamptz_ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timestamptz timestamptz', prosrc => 'timestamp_ne' }, +{ oid => '1154', + proname => 'timestamptz_lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timestamptz timestamptz', prosrc => 'timestamp_lt' }, +{ oid => '1155', + proname => 'timestamptz_le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timestamptz timestamptz', prosrc => 'timestamp_le' }, +{ oid => '1156', + proname => 'timestamptz_ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timestamptz timestamptz', prosrc => 'timestamp_ge' }, +{ oid => '1157', + proname => 'timestamptz_gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timestamptz timestamptz', prosrc => 'timestamp_gt' }, +{ oid => '1158', descr => 'convert UNIX epoch to timestamptz', + proname => 'to_timestamp', prorettype => 'timestamptz', + proargtypes => 'float8', prosrc => 'float8_timestamptz' }, +{ oid => '3995', descr => 'transform a time zone adjustment', + proname => 'timestamp_zone_transform', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'timestamp_zone_transform' }, +{ oid => '1159', descr => 'adjust timestamp to new time zone', + proname => 'timezone', protransform => 'timestamp_zone_transform', + prorettype => 'timestamp', proargtypes => 'text timestamptz', + prosrc => 'timestamptz_zone' }, + +{ oid => '1160', descr => 'I/O', + proname => 'interval_in', provolatile => 's', prorettype => 'interval', + proargtypes => 'cstring oid int4', prosrc => 'interval_in' }, +{ oid => '1161', descr => 'I/O', + proname => 'interval_out', prorettype => 'cstring', proargtypes => 'interval', + prosrc => 'interval_out' }, +{ oid => '2903', descr => 'I/O typmod', + proname => 'intervaltypmodin', prorettype => 'int4', + proargtypes => '_cstring', prosrc => 'intervaltypmodin' }, +{ oid => '2904', descr => 'I/O typmod', + proname => 'intervaltypmodout', prorettype => 'cstring', + proargtypes => 'int4', prosrc => 'intervaltypmodout' }, +{ oid => '1162', + proname => 'interval_eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'interval interval', prosrc => 'interval_eq' }, +{ oid => '1163', + proname => 'interval_ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'interval interval', prosrc => 'interval_ne' }, +{ oid => '1164', + proname => 'interval_lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'interval interval', prosrc => 'interval_lt' }, +{ oid => '1165', + proname => 'interval_le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'interval interval', prosrc => 'interval_le' }, +{ oid => '1166', + proname => 'interval_ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'interval interval', prosrc => 'interval_ge' }, +{ oid => '1167', + proname => 'interval_gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'interval interval', prosrc => 'interval_gt' }, +{ oid => '1168', + proname => 'interval_um', prorettype => 'interval', proargtypes => 'interval', + prosrc => 'interval_um' }, +{ oid => '1169', + proname => 'interval_pl', prorettype => 'interval', + proargtypes => 'interval interval', prosrc => 'interval_pl' }, +{ oid => '1170', + proname => 'interval_mi', prorettype => 'interval', + proargtypes => 'interval interval', prosrc => 'interval_mi' }, +{ oid => '1171', descr => 'extract field from timestamp with time zone', + proname => 'date_part', provolatile => 's', prorettype => 'float8', + proargtypes => 'text timestamptz', prosrc => 'timestamptz_part' }, +{ oid => '1172', descr => 'extract field from interval', + proname => 'date_part', prorettype => 'float8', + proargtypes => 'text interval', prosrc => 'interval_part' }, +{ oid => '1174', descr => 'convert date to timestamp with time zone', + proname => 'timestamptz', provolatile => 's', prorettype => 'timestamptz', + proargtypes => 'date', prosrc => 'date_timestamptz' }, +{ oid => '2711', + descr => 'promote groups of 24 hours to numbers of days and promote groups of 30 days to numbers of months', + proname => 'justify_interval', prorettype => 'interval', + proargtypes => 'interval', prosrc => 'interval_justify_interval' }, +{ oid => '1175', descr => 'promote groups of 24 hours to numbers of days', + proname => 'justify_hours', prorettype => 'interval', + proargtypes => 'interval', prosrc => 'interval_justify_hours' }, +{ oid => '1295', descr => 'promote groups of 30 days to numbers of months', + proname => 'justify_days', prorettype => 'interval', + proargtypes => 'interval', prosrc => 'interval_justify_days' }, +{ oid => '1176', descr => 'convert date and time to timestamp with time zone', + proname => 'timestamptz', prolang => '14', provolatile => 's', + prorettype => 'timestamptz', proargtypes => 'date time', + prosrc => 'select cast(($1 + $2) as timestamp with time zone)' }, +{ oid => '1178', descr => 'convert timestamp with time zone to date', + proname => 'date', provolatile => 's', prorettype => 'date', + proargtypes => 'timestamptz', prosrc => 'timestamptz_date' }, +{ oid => '1181', + descr => 'age of a transaction ID, in transactions before current transaction', + proname => 'age', provolatile => 's', proparallel => 'r', + prorettype => 'int4', proargtypes => 'xid', prosrc => 'xid_age' }, +{ oid => '3939', + descr => 'age of a multi-transaction ID, in multi-transactions before current multi-transaction', + proname => 'mxid_age', provolatile => 's', prorettype => 'int4', + proargtypes => 'xid', prosrc => 'mxid_age' }, + +{ oid => '1188', + proname => 'timestamptz_mi', prorettype => 'interval', + proargtypes => 'timestamptz timestamptz', prosrc => 'timestamp_mi' }, +{ oid => '1189', + proname => 'timestamptz_pl_interval', provolatile => 's', + prorettype => 'timestamptz', proargtypes => 'timestamptz interval', + prosrc => 'timestamptz_pl_interval' }, +{ oid => '1190', + proname => 'timestamptz_mi_interval', provolatile => 's', + prorettype => 'timestamptz', proargtypes => 'timestamptz interval', + prosrc => 'timestamptz_mi_interval' }, +{ oid => '1195', descr => 'smaller of two', + proname => 'timestamptz_smaller', prorettype => 'timestamptz', + proargtypes => 'timestamptz timestamptz', prosrc => 'timestamp_smaller' }, +{ oid => '1196', descr => 'larger of two', + proname => 'timestamptz_larger', prorettype => 'timestamptz', + proargtypes => 'timestamptz timestamptz', prosrc => 'timestamp_larger' }, +{ oid => '1197', descr => 'smaller of two', + proname => 'interval_smaller', prorettype => 'interval', + proargtypes => 'interval interval', prosrc => 'interval_smaller' }, +{ oid => '1198', descr => 'larger of two', + proname => 'interval_larger', prorettype => 'interval', + proargtypes => 'interval interval', prosrc => 'interval_larger' }, +{ oid => '1199', descr => 'date difference preserving months and years', + proname => 'age', prorettype => 'interval', + proargtypes => 'timestamptz timestamptz', prosrc => 'timestamptz_age' }, + +# OIDS 1200 - 1299 + +{ oid => '3918', descr => 'transform an interval length coercion', + proname => 'interval_transform', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'interval_transform' }, +{ oid => '1200', descr => 'adjust interval precision', + proname => 'interval', protransform => 'interval_transform', + prorettype => 'interval', proargtypes => 'interval int4', + prosrc => 'interval_scale' }, + +{ oid => '1215', descr => 'get description for object id and catalog name', + proname => 'obj_description', prolang => '14', procost => '100', + provolatile => 's', prorettype => 'text', proargtypes => 'oid name', + prosrc => 'select description from pg_catalog.pg_description where objoid = $1 and classoid = (select oid from pg_catalog.pg_class where relname = $2 and relnamespace = PGNSP) and objsubid = 0' }, +{ oid => '1216', descr => 'get description for table column', + proname => 'col_description', prolang => '14', procost => '100', + provolatile => 's', prorettype => 'text', proargtypes => 'oid int4', + prosrc => 'select description from pg_catalog.pg_description where objoid = $1 and classoid = \'pg_catalog.pg_class\'::pg_catalog.regclass and objsubid = $2' }, +{ oid => '1993', + descr => 'get description for object id and shared catalog name', + proname => 'shobj_description', prolang => '14', procost => '100', + provolatile => 's', prorettype => 'text', proargtypes => 'oid name', + prosrc => 'select description from pg_catalog.pg_shdescription where objoid = $1 and classoid = (select oid from pg_catalog.pg_class where relname = $2 and relnamespace = PGNSP)' }, + +{ oid => '1217', + descr => 'truncate timestamp with time zone to specified units', + proname => 'date_trunc', provolatile => 's', prorettype => 'timestamptz', + proargtypes => 'text timestamptz', prosrc => 'timestamptz_trunc' }, +{ oid => '1218', descr => 'truncate interval to specified units', + proname => 'date_trunc', prorettype => 'interval', + proargtypes => 'text interval', prosrc => 'interval_trunc' }, + +{ oid => '1219', descr => 'increment', + proname => 'int8inc', prorettype => 'int8', proargtypes => 'int8', + prosrc => 'int8inc' }, +{ oid => '3546', descr => 'decrement', + proname => 'int8dec', prorettype => 'int8', proargtypes => 'int8', + prosrc => 'int8dec' }, +{ oid => '2804', descr => 'increment, ignores second argument', + proname => 'int8inc_any', prorettype => 'int8', proargtypes => 'int8 any', + prosrc => 'int8inc_any' }, +{ oid => '3547', descr => 'decrement, ignores second argument', + proname => 'int8dec_any', prorettype => 'int8', proargtypes => 'int8 any', + prosrc => 'int8dec_any' }, +{ oid => '1230', + proname => 'int8abs', prorettype => 'int8', proargtypes => 'int8', + prosrc => 'int8abs' }, + +{ oid => '1236', descr => 'larger of two', + proname => 'int8larger', prorettype => 'int8', proargtypes => 'int8 int8', + prosrc => 'int8larger' }, +{ oid => '1237', descr => 'smaller of two', + proname => 'int8smaller', prorettype => 'int8', proargtypes => 'int8 int8', + prosrc => 'int8smaller' }, + +{ oid => '1238', + proname => 'texticregexeq', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'texticregexeq' }, +{ oid => '1239', + proname => 'texticregexne', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'texticregexne' }, +{ oid => '1240', + proname => 'nameicregexeq', prorettype => 'bool', proargtypes => 'name text', + prosrc => 'nameicregexeq' }, +{ oid => '1241', + proname => 'nameicregexne', prorettype => 'bool', proargtypes => 'name text', + prosrc => 'nameicregexne' }, + +{ oid => '1251', + proname => 'int4abs', prorettype => 'int4', proargtypes => 'int4', + prosrc => 'int4abs' }, +{ oid => '1253', + proname => 'int2abs', prorettype => 'int2', proargtypes => 'int2', + prosrc => 'int2abs' }, + +{ oid => '1271', descr => 'intervals overlap?', + proname => 'overlaps', proisstrict => 'f', prorettype => 'bool', + proargtypes => 'timetz timetz timetz timetz', prosrc => 'overlaps_timetz' }, +{ oid => '1272', + proname => 'datetime_pl', prorettype => 'timestamp', + proargtypes => 'date time', prosrc => 'datetime_timestamp' }, +{ oid => '1273', descr => 'extract field from time with time zone', + proname => 'date_part', prorettype => 'float8', proargtypes => 'text timetz', + prosrc => 'timetz_part' }, +{ oid => '1274', + proname => 'int84pl', prorettype => 'int8', proargtypes => 'int8 int4', + prosrc => 'int84pl' }, +{ oid => '1275', + proname => 'int84mi', prorettype => 'int8', proargtypes => 'int8 int4', + prosrc => 'int84mi' }, +{ oid => '1276', + proname => 'int84mul', prorettype => 'int8', proargtypes => 'int8 int4', + prosrc => 'int84mul' }, +{ oid => '1277', + proname => 'int84div', prorettype => 'int8', proargtypes => 'int8 int4', + prosrc => 'int84div' }, +{ oid => '1278', + proname => 'int48pl', prorettype => 'int8', proargtypes => 'int4 int8', + prosrc => 'int48pl' }, +{ oid => '1279', + proname => 'int48mi', prorettype => 'int8', proargtypes => 'int4 int8', + prosrc => 'int48mi' }, +{ oid => '1280', + proname => 'int48mul', prorettype => 'int8', proargtypes => 'int4 int8', + prosrc => 'int48mul' }, +{ oid => '1281', + proname => 'int48div', prorettype => 'int8', proargtypes => 'int4 int8', + prosrc => 'int48div' }, + +{ oid => '837', + proname => 'int82pl', prorettype => 'int8', proargtypes => 'int8 int2', + prosrc => 'int82pl' }, +{ oid => '838', + proname => 'int82mi', prorettype => 'int8', proargtypes => 'int8 int2', + prosrc => 'int82mi' }, +{ oid => '839', + proname => 'int82mul', prorettype => 'int8', proargtypes => 'int8 int2', + prosrc => 'int82mul' }, +{ oid => '840', + proname => 'int82div', prorettype => 'int8', proargtypes => 'int8 int2', + prosrc => 'int82div' }, +{ oid => '841', + proname => 'int28pl', prorettype => 'int8', proargtypes => 'int2 int8', + prosrc => 'int28pl' }, +{ oid => '942', + proname => 'int28mi', prorettype => 'int8', proargtypes => 'int2 int8', + prosrc => 'int28mi' }, +{ oid => '943', + proname => 'int28mul', prorettype => 'int8', proargtypes => 'int2 int8', + prosrc => 'int28mul' }, +{ oid => '948', + proname => 'int28div', prorettype => 'int8', proargtypes => 'int2 int8', + prosrc => 'int28div' }, + +{ oid => '1287', descr => 'convert int8 to oid', + proname => 'oid', prorettype => 'oid', proargtypes => 'int8', + prosrc => 'i8tooid' }, +{ oid => '1288', descr => 'convert oid to int8', + proname => 'int8', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'oidtoi8' }, + +{ oid => '1291', + descr => 'trigger to suppress updates when new and old records match', + proname => 'suppress_redundant_updates_trigger', provolatile => 'v', + prorettype => 'trigger', proargtypes => '', + prosrc => 'suppress_redundant_updates_trigger' }, + +{ oid => '1292', + proname => 'tideq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'tid tid', prosrc => 'tideq' }, +{ oid => '1293', descr => 'latest tid of a tuple', + proname => 'currtid', provolatile => 'v', proparallel => 'u', + prorettype => 'tid', proargtypes => 'oid tid', prosrc => 'currtid_byreloid' }, +{ oid => '1294', descr => 'latest tid of a tuple', + proname => 'currtid2', provolatile => 'v', proparallel => 'u', + prorettype => 'tid', proargtypes => 'text tid', + prosrc => 'currtid_byrelname' }, +{ oid => '1265', + proname => 'tidne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'tid tid', prosrc => 'tidne' }, +{ oid => '2790', + proname => 'tidgt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'tid tid', prosrc => 'tidgt' }, +{ oid => '2791', + proname => 'tidlt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'tid tid', prosrc => 'tidlt' }, +{ oid => '2792', + proname => 'tidge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'tid tid', prosrc => 'tidge' }, +{ oid => '2793', + proname => 'tidle', proleakproof => 't', prorettype => 'bool', + proargtypes => 'tid tid', prosrc => 'tidle' }, +{ oid => '2794', descr => 'less-equal-greater', + proname => 'bttidcmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'tid tid', prosrc => 'bttidcmp' }, +{ oid => '2795', descr => 'larger of two', + proname => 'tidlarger', prorettype => 'tid', proargtypes => 'tid tid', + prosrc => 'tidlarger' }, +{ oid => '2796', descr => 'smaller of two', + proname => 'tidsmaller', prorettype => 'tid', proargtypes => 'tid tid', + prosrc => 'tidsmaller' }, + +{ oid => '1296', + proname => 'timedate_pl', prolang => '14', prorettype => 'timestamp', + proargtypes => 'time date', prosrc => 'select ($2 + $1)' }, +{ oid => '1297', + proname => 'datetimetz_pl', prorettype => 'timestamptz', + proargtypes => 'date timetz', prosrc => 'datetimetz_timestamptz' }, +{ oid => '1298', + proname => 'timetzdate_pl', prolang => '14', prorettype => 'timestamptz', + proargtypes => 'timetz date', prosrc => 'select ($2 + $1)' }, +{ oid => '1299', descr => 'current transaction time', + proname => 'now', provolatile => 's', prorettype => 'timestamptz', + proargtypes => '', prosrc => 'now' }, +{ oid => '2647', descr => 'current transaction time', + proname => 'transaction_timestamp', provolatile => 's', + prorettype => 'timestamptz', proargtypes => '', prosrc => 'now' }, +{ oid => '2648', descr => 'current statement time', + proname => 'statement_timestamp', provolatile => 's', + prorettype => 'timestamptz', proargtypes => '', + prosrc => 'statement_timestamp' }, +{ oid => '2649', descr => 'current clock time', + proname => 'clock_timestamp', provolatile => 'v', prorettype => 'timestamptz', + proargtypes => '', prosrc => 'clock_timestamp' }, + +# OIDS 1300 - 1399 + +{ oid => '1300', + descr => 'restriction selectivity for position-comparison operators', + proname => 'positionsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'positionsel' }, +{ oid => '1301', + descr => 'join selectivity for position-comparison operators', + proname => 'positionjoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'positionjoinsel' }, +{ oid => '1302', + descr => 'restriction selectivity for containment comparison operators', + proname => 'contsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'contsel' }, +{ oid => '1303', + descr => 'join selectivity for containment comparison operators', + proname => 'contjoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'contjoinsel' }, + +{ oid => '1304', descr => 'intervals overlap?', + proname => 'overlaps', proisstrict => 'f', prorettype => 'bool', + proargtypes => 'timestamptz timestamptz timestamptz timestamptz', + prosrc => 'overlaps_timestamp' }, +{ oid => '1305', descr => 'intervals overlap?', + proname => 'overlaps', prolang => '14', proisstrict => 'f', + provolatile => 's', prorettype => 'bool', + proargtypes => 'timestamptz interval timestamptz interval', + prosrc => 'select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))' }, +{ oid => '1306', descr => 'intervals overlap?', + proname => 'overlaps', prolang => '14', proisstrict => 'f', + provolatile => 's', prorettype => 'bool', + proargtypes => 'timestamptz timestamptz timestamptz interval', + prosrc => 'select ($1, $2) overlaps ($3, ($3 + $4))' }, +{ oid => '1307', descr => 'intervals overlap?', + proname => 'overlaps', prolang => '14', proisstrict => 'f', + provolatile => 's', prorettype => 'bool', + proargtypes => 'timestamptz interval timestamptz timestamptz', + prosrc => 'select ($1, ($1 + $2)) overlaps ($3, $4)' }, + +{ oid => '1308', descr => 'intervals overlap?', + proname => 'overlaps', proisstrict => 'f', prorettype => 'bool', + proargtypes => 'time time time time', prosrc => 'overlaps_time' }, +{ oid => '1309', descr => 'intervals overlap?', + proname => 'overlaps', prolang => '14', proisstrict => 'f', + prorettype => 'bool', proargtypes => 'time interval time interval', + prosrc => 'select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))' }, +{ oid => '1310', descr => 'intervals overlap?', + proname => 'overlaps', prolang => '14', proisstrict => 'f', + prorettype => 'bool', proargtypes => 'time time time interval', + prosrc => 'select ($1, $2) overlaps ($3, ($3 + $4))' }, +{ oid => '1311', descr => 'intervals overlap?', + proname => 'overlaps', prolang => '14', proisstrict => 'f', + prorettype => 'bool', proargtypes => 'time interval time time', + prosrc => 'select ($1, ($1 + $2)) overlaps ($3, $4)' }, + +{ oid => '1312', descr => 'I/O', + proname => 'timestamp_in', provolatile => 's', prorettype => 'timestamp', + proargtypes => 'cstring oid int4', prosrc => 'timestamp_in' }, +{ oid => '1313', descr => 'I/O', + proname => 'timestamp_out', provolatile => 's', prorettype => 'cstring', + proargtypes => 'timestamp', prosrc => 'timestamp_out' }, +{ oid => '2905', descr => 'I/O typmod', + proname => 'timestamptypmodin', prorettype => 'int4', + proargtypes => '_cstring', prosrc => 'timestamptypmodin' }, +{ oid => '2906', descr => 'I/O typmod', + proname => 'timestamptypmodout', prorettype => 'cstring', + proargtypes => 'int4', prosrc => 'timestamptypmodout' }, +{ oid => '1314', descr => 'less-equal-greater', + proname => 'timestamptz_cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'timestamptz timestamptz', prosrc => 'timestamp_cmp' }, +{ oid => '1315', descr => 'less-equal-greater', + proname => 'interval_cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'interval interval', prosrc => 'interval_cmp' }, +{ oid => '1316', descr => 'convert timestamp to time', + proname => 'time', prorettype => 'time', proargtypes => 'timestamp', + prosrc => 'timestamp_time' }, + +{ oid => '1317', descr => 'length', + proname => 'length', prorettype => 'int4', proargtypes => 'text', + prosrc => 'textlen' }, +{ oid => '1318', descr => 'character length', + proname => 'length', prorettype => 'int4', proargtypes => 'bpchar', + prosrc => 'bpcharlen' }, + +{ oid => '1319', + proname => 'xideqint4', proleakproof => 't', prorettype => 'bool', + proargtypes => 'xid int4', prosrc => 'xideq' }, +{ oid => '3309', + proname => 'xidneqint4', proleakproof => 't', prorettype => 'bool', + proargtypes => 'xid int4', prosrc => 'xidneq' }, + +{ oid => '1326', + proname => 'interval_div', prorettype => 'interval', + proargtypes => 'interval float8', prosrc => 'interval_div' }, + +{ oid => '1339', descr => 'base 10 logarithm', + proname => 'dlog10', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dlog10' }, +{ oid => '1340', descr => 'base 10 logarithm', + proname => 'log', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dlog10' }, +{ oid => '1341', descr => 'natural logarithm', + proname => 'ln', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dlog1' }, +{ oid => '1342', descr => 'round to nearest integer', + proname => 'round', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dround' }, +{ oid => '1343', descr => 'truncate to integer', + proname => 'trunc', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dtrunc' }, +{ oid => '1344', descr => 'square root', + proname => 'sqrt', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dsqrt' }, +{ oid => '1345', descr => 'cube root', + proname => 'cbrt', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dcbrt' }, +{ oid => '1346', descr => 'exponentiation', + proname => 'pow', prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'dpow' }, +{ oid => '1368', descr => 'exponentiation', + proname => 'power', prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'dpow' }, +{ oid => '1347', descr => 'natural exponential (e^x)', + proname => 'exp', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dexp' }, + +# This form of obj_description is now deprecated, since it will fail if +# OIDs are not unique across system catalogs. Use the other form instead. +{ oid => '1348', descr => 'deprecated, use two-argument form instead', + proname => 'obj_description', prolang => '14', procost => '100', + provolatile => 's', prorettype => 'text', proargtypes => 'oid', + prosrc => 'select description from pg_catalog.pg_description where objoid = $1 and objsubid = 0' }, + +{ oid => '1349', descr => 'print type names of oidvector field', + proname => 'oidvectortypes', provolatile => 's', prorettype => 'text', + proargtypes => 'oidvector', prosrc => 'oidvectortypes' }, + +{ oid => '1350', descr => 'I/O', + proname => 'timetz_in', provolatile => 's', prorettype => 'timetz', + proargtypes => 'cstring oid int4', prosrc => 'timetz_in' }, +{ oid => '1351', descr => 'I/O', + proname => 'timetz_out', prorettype => 'cstring', proargtypes => 'timetz', + prosrc => 'timetz_out' }, +{ oid => '2911', descr => 'I/O typmod', + proname => 'timetztypmodin', prorettype => 'int4', proargtypes => '_cstring', + prosrc => 'timetztypmodin' }, +{ oid => '2912', descr => 'I/O typmod', + proname => 'timetztypmodout', prorettype => 'cstring', proargtypes => 'int4', + prosrc => 'timetztypmodout' }, +{ oid => '1352', + proname => 'timetz_eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timetz timetz', prosrc => 'timetz_eq' }, +{ oid => '1353', + proname => 'timetz_ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timetz timetz', prosrc => 'timetz_ne' }, +{ oid => '1354', + proname => 'timetz_lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timetz timetz', prosrc => 'timetz_lt' }, +{ oid => '1355', + proname => 'timetz_le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timetz timetz', prosrc => 'timetz_le' }, +{ oid => '1356', + proname => 'timetz_ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timetz timetz', prosrc => 'timetz_ge' }, +{ oid => '1357', + proname => 'timetz_gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timetz timetz', prosrc => 'timetz_gt' }, +{ oid => '1358', descr => 'less-equal-greater', + proname => 'timetz_cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'timetz timetz', prosrc => 'timetz_cmp' }, +{ oid => '1359', + descr => 'convert date and time with time zone to timestamp with time zone', + proname => 'timestamptz', prorettype => 'timestamptz', + proargtypes => 'date timetz', prosrc => 'datetimetz_timestamptz' }, + +{ oid => '1367', descr => 'character length', + proname => 'character_length', prorettype => 'int4', proargtypes => 'bpchar', + prosrc => 'bpcharlen' }, +{ oid => '1369', descr => 'character length', + proname => 'character_length', prorettype => 'int4', proargtypes => 'text', + prosrc => 'textlen' }, + +{ oid => '1370', descr => 'convert time to interval', + proname => 'interval', prorettype => 'interval', proargtypes => 'time', + prosrc => 'time_interval' }, +{ oid => '1372', descr => 'character length', + proname => 'char_length', prorettype => 'int4', proargtypes => 'bpchar', + prosrc => 'bpcharlen' }, +{ oid => '1374', descr => 'octet length', + proname => 'octet_length', prorettype => 'int4', proargtypes => 'text', + prosrc => 'textoctetlen' }, +{ oid => '1375', descr => 'octet length', + proname => 'octet_length', prorettype => 'int4', proargtypes => 'bpchar', + prosrc => 'bpcharoctetlen' }, + +{ oid => '1377', descr => 'larger of two', + proname => 'time_larger', prorettype => 'time', proargtypes => 'time time', + prosrc => 'time_larger' }, +{ oid => '1378', descr => 'smaller of two', + proname => 'time_smaller', prorettype => 'time', proargtypes => 'time time', + prosrc => 'time_smaller' }, +{ oid => '1379', descr => 'larger of two', + proname => 'timetz_larger', prorettype => 'timetz', + proargtypes => 'timetz timetz', prosrc => 'timetz_larger' }, +{ oid => '1380', descr => 'smaller of two', + proname => 'timetz_smaller', prorettype => 'timetz', + proargtypes => 'timetz timetz', prosrc => 'timetz_smaller' }, + +{ oid => '1381', descr => 'character length', + proname => 'char_length', prorettype => 'int4', proargtypes => 'text', + prosrc => 'textlen' }, + +{ oid => '1384', descr => 'extract field from date', + proname => 'date_part', prolang => '14', prorettype => 'float8', + proargtypes => 'text date', + prosrc => 'select pg_catalog.date_part($1, cast($2 as timestamp without time zone))' }, +{ oid => '1385', descr => 'extract field from time', + proname => 'date_part', prorettype => 'float8', proargtypes => 'text time', + prosrc => 'time_part' }, +{ oid => '1386', + descr => 'date difference from today preserving months and years', + proname => 'age', prolang => '14', provolatile => 's', + prorettype => 'interval', proargtypes => 'timestamptz', + prosrc => 'select pg_catalog.age(cast(current_date as timestamp with time zone), $1)' }, + +{ oid => '1388', + descr => 'convert timestamp with time zone to time with time zone', + proname => 'timetz', provolatile => 's', prorettype => 'timetz', + proargtypes => 'timestamptz', prosrc => 'timestamptz_timetz' }, + +{ oid => '1373', descr => 'finite date?', + proname => 'isfinite', prorettype => 'bool', proargtypes => 'date', + prosrc => 'date_finite' }, +{ oid => '1389', descr => 'finite timestamp?', + proname => 'isfinite', prorettype => 'bool', proargtypes => 'timestamptz', + prosrc => 'timestamp_finite' }, +{ oid => '1390', descr => 'finite interval?', + proname => 'isfinite', prorettype => 'bool', proargtypes => 'interval', + prosrc => 'interval_finite' }, + +{ oid => '1376', descr => 'factorial', + proname => 'factorial', prorettype => 'numeric', proargtypes => 'int8', + prosrc => 'numeric_fac' }, +{ oid => '1394', descr => 'absolute value', + proname => 'abs', prorettype => 'float4', proargtypes => 'float4', + prosrc => 'float4abs' }, +{ oid => '1395', descr => 'absolute value', + proname => 'abs', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'float8abs' }, +{ oid => '1396', descr => 'absolute value', + proname => 'abs', prorettype => 'int8', proargtypes => 'int8', + prosrc => 'int8abs' }, +{ oid => '1397', descr => 'absolute value', + proname => 'abs', prorettype => 'int4', proargtypes => 'int4', + prosrc => 'int4abs' }, +{ oid => '1398', descr => 'absolute value', + proname => 'abs', prorettype => 'int2', proargtypes => 'int2', + prosrc => 'int2abs' }, + +# OIDS 1400 - 1499 + +{ oid => '1400', descr => 'convert varchar to name', + proname => 'name', prorettype => 'name', proargtypes => 'varchar', + prosrc => 'text_name' }, +{ oid => '1401', descr => 'convert name to varchar', + proname => 'varchar', prorettype => 'varchar', proargtypes => 'name', + prosrc => 'name_text' }, + +{ oid => '1402', descr => 'current schema name', + proname => 'current_schema', provolatile => 's', prorettype => 'name', + proargtypes => '', prosrc => 'current_schema' }, +{ oid => '1403', descr => 'current schema search list', + proname => 'current_schemas', provolatile => 's', prorettype => '_name', + proargtypes => 'bool', prosrc => 'current_schemas' }, + +{ oid => '1404', descr => 'substitute portion of string', + proname => 'overlay', prorettype => 'text', + proargtypes => 'text text int4 int4', prosrc => 'textoverlay' }, +{ oid => '1405', descr => 'substitute portion of string', + proname => 'overlay', prorettype => 'text', proargtypes => 'text text int4', + prosrc => 'textoverlay_no_len' }, + +{ oid => '1406', descr => 'vertically aligned', + proname => 'isvertical', prorettype => 'bool', proargtypes => 'point point', + prosrc => 'point_vert' }, +{ oid => '1407', descr => 'horizontally aligned', + proname => 'ishorizontal', prorettype => 'bool', proargtypes => 'point point', + prosrc => 'point_horiz' }, +{ oid => '1408', descr => 'parallel', + proname => 'isparallel', prorettype => 'bool', proargtypes => 'lseg lseg', + prosrc => 'lseg_parallel' }, +{ oid => '1409', descr => 'perpendicular', + proname => 'isperp', prorettype => 'bool', proargtypes => 'lseg lseg', + prosrc => 'lseg_perp' }, +{ oid => '1410', descr => 'vertical', + proname => 'isvertical', prorettype => 'bool', proargtypes => 'lseg', + prosrc => 'lseg_vertical' }, +{ oid => '1411', descr => 'horizontal', + proname => 'ishorizontal', prorettype => 'bool', proargtypes => 'lseg', + prosrc => 'lseg_horizontal' }, +{ oid => '1412', descr => 'parallel', + proname => 'isparallel', prorettype => 'bool', proargtypes => 'line line', + prosrc => 'line_parallel' }, +{ oid => '1413', descr => 'perpendicular', + proname => 'isperp', prorettype => 'bool', proargtypes => 'line line', + prosrc => 'line_perp' }, +{ oid => '1414', descr => 'vertical', + proname => 'isvertical', prorettype => 'bool', proargtypes => 'line', + prosrc => 'line_vertical' }, +{ oid => '1415', descr => 'horizontal', + proname => 'ishorizontal', prorettype => 'bool', proargtypes => 'line', + prosrc => 'line_horizontal' }, +{ oid => '1416', descr => 'center of', + proname => 'point', prorettype => 'point', proargtypes => 'circle', + prosrc => 'circle_center' }, + +{ oid => '1419', descr => 'convert interval to time', + proname => 'time', prorettype => 'time', proargtypes => 'interval', + prosrc => 'interval_time' }, + +{ oid => '1421', descr => 'convert points to box', + proname => 'box', prorettype => 'box', proargtypes => 'point point', + prosrc => 'points_box' }, +{ oid => '1422', + proname => 'box_add', prorettype => 'box', proargtypes => 'box point', + prosrc => 'box_add' }, +{ oid => '1423', + proname => 'box_sub', prorettype => 'box', proargtypes => 'box point', + prosrc => 'box_sub' }, +{ oid => '1424', + proname => 'box_mul', prorettype => 'box', proargtypes => 'box point', + prosrc => 'box_mul' }, +{ oid => '1425', + proname => 'box_div', prorettype => 'box', proargtypes => 'box point', + prosrc => 'box_div' }, +{ oid => '1426', + proname => 'path_contain_pt', prolang => '14', prorettype => 'bool', + proargtypes => 'path point', prosrc => 'select pg_catalog.on_ppath($2, $1)' }, +{ oid => '1428', + proname => 'poly_contain_pt', prorettype => 'bool', + proargtypes => 'polygon point', prosrc => 'poly_contain_pt' }, +{ oid => '1429', + proname => 'pt_contained_poly', prorettype => 'bool', + proargtypes => 'point polygon', prosrc => 'pt_contained_poly' }, + +{ oid => '1430', descr => 'path closed?', + proname => 'isclosed', prorettype => 'bool', proargtypes => 'path', + prosrc => 'path_isclosed' }, +{ oid => '1431', descr => 'path open?', + proname => 'isopen', prorettype => 'bool', proargtypes => 'path', + prosrc => 'path_isopen' }, +{ oid => '1432', + proname => 'path_npoints', prorettype => 'int4', proargtypes => 'path', + prosrc => 'path_npoints' }, + +# pclose and popen might better be named close and open, but that crashes initdb. +# - thomas 97/04/20 +{ oid => '1433', descr => 'close path', + proname => 'pclose', prorettype => 'path', proargtypes => 'path', + prosrc => 'path_close' }, +{ oid => '1434', descr => 'open path', + proname => 'popen', prorettype => 'path', proargtypes => 'path', + prosrc => 'path_open' }, + +{ oid => '1435', + proname => 'path_add', prorettype => 'path', proargtypes => 'path path', + prosrc => 'path_add' }, +{ oid => '1436', + proname => 'path_add_pt', prorettype => 'path', proargtypes => 'path point', + prosrc => 'path_add_pt' }, +{ oid => '1437', + proname => 'path_sub_pt', prorettype => 'path', proargtypes => 'path point', + prosrc => 'path_sub_pt' }, +{ oid => '1438', + proname => 'path_mul_pt', prorettype => 'path', proargtypes => 'path point', + prosrc => 'path_mul_pt' }, +{ oid => '1439', + proname => 'path_div_pt', prorettype => 'path', proargtypes => 'path point', + prosrc => 'path_div_pt' }, + +{ oid => '1440', descr => 'convert x, y to point', + proname => 'point', prorettype => 'point', proargtypes => 'float8 float8', + prosrc => 'construct_point' }, +{ oid => '1441', + proname => 'point_add', prorettype => 'point', proargtypes => 'point point', + prosrc => 'point_add' }, +{ oid => '1442', + proname => 'point_sub', prorettype => 'point', proargtypes => 'point point', + prosrc => 'point_sub' }, +{ oid => '1443', + proname => 'point_mul', prorettype => 'point', proargtypes => 'point point', + prosrc => 'point_mul' }, +{ oid => '1444', + proname => 'point_div', prorettype => 'point', proargtypes => 'point point', + prosrc => 'point_div' }, + +{ oid => '1445', + proname => 'poly_npoints', prorettype => 'int4', proargtypes => 'polygon', + prosrc => 'poly_npoints' }, +{ oid => '1446', descr => 'convert polygon to bounding box', + proname => 'box', prorettype => 'box', proargtypes => 'polygon', + prosrc => 'poly_box' }, +{ oid => '1447', descr => 'convert polygon to path', + proname => 'path', prorettype => 'path', proargtypes => 'polygon', + prosrc => 'poly_path' }, +{ oid => '1448', descr => 'convert box to polygon', + proname => 'polygon', prorettype => 'polygon', proargtypes => 'box', + prosrc => 'box_poly' }, +{ oid => '1449', descr => 'convert path to polygon', + proname => 'polygon', prorettype => 'polygon', proargtypes => 'path', + prosrc => 'path_poly' }, + +{ oid => '1450', descr => 'I/O', + proname => 'circle_in', prorettype => 'circle', proargtypes => 'cstring', + prosrc => 'circle_in' }, +{ oid => '1451', descr => 'I/O', + proname => 'circle_out', prorettype => 'cstring', proargtypes => 'circle', + prosrc => 'circle_out' }, +{ oid => '1452', + proname => 'circle_same', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_same' }, +{ oid => '1453', + proname => 'circle_contain', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_contain' }, +{ oid => '1454', + proname => 'circle_left', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_left' }, +{ oid => '1455', + proname => 'circle_overleft', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_overleft' }, +{ oid => '1456', + proname => 'circle_overright', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_overright' }, +{ oid => '1457', + proname => 'circle_right', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_right' }, +{ oid => '1458', + proname => 'circle_contained', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_contained' }, +{ oid => '1459', + proname => 'circle_overlap', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_overlap' }, +{ oid => '1460', + proname => 'circle_below', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_below' }, +{ oid => '1461', + proname => 'circle_above', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_above' }, +{ oid => '1462', + proname => 'circle_eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_eq' }, +{ oid => '1463', + proname => 'circle_ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_ne' }, +{ oid => '1464', + proname => 'circle_lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_lt' }, +{ oid => '1465', + proname => 'circle_gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_gt' }, +{ oid => '1466', + proname => 'circle_le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_le' }, +{ oid => '1467', + proname => 'circle_ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_ge' }, +{ oid => '1468', descr => 'area of circle', + proname => 'area', prorettype => 'float8', proargtypes => 'circle', + prosrc => 'circle_area' }, +{ oid => '1469', descr => 'diameter of circle', + proname => 'diameter', prorettype => 'float8', proargtypes => 'circle', + prosrc => 'circle_diameter' }, +{ oid => '1470', descr => 'radius of circle', + proname => 'radius', prorettype => 'float8', proargtypes => 'circle', + prosrc => 'circle_radius' }, +{ oid => '1471', + proname => 'circle_distance', prorettype => 'float8', + proargtypes => 'circle circle', prosrc => 'circle_distance' }, +{ oid => '1472', + proname => 'circle_center', prorettype => 'point', proargtypes => 'circle', + prosrc => 'circle_center' }, +{ oid => '1473', descr => 'convert point and radius to circle', + proname => 'circle', prorettype => 'circle', proargtypes => 'point float8', + prosrc => 'cr_circle' }, +{ oid => '1474', descr => 'convert polygon to circle', + proname => 'circle', prorettype => 'circle', proargtypes => 'polygon', + prosrc => 'poly_circle' }, +{ oid => '1475', descr => 'convert vertex count and circle to polygon', + proname => 'polygon', prorettype => 'polygon', proargtypes => 'int4 circle', + prosrc => 'circle_poly' }, +{ oid => '1476', + proname => 'dist_pc', prorettype => 'float8', proargtypes => 'point circle', + prosrc => 'dist_pc' }, +{ oid => '1477', + proname => 'circle_contain_pt', prorettype => 'bool', + proargtypes => 'circle point', prosrc => 'circle_contain_pt' }, +{ oid => '1478', + proname => 'pt_contained_circle', prorettype => 'bool', + proargtypes => 'point circle', prosrc => 'pt_contained_circle' }, +{ oid => '4091', descr => 'convert point to empty box', + proname => 'box', prorettype => 'box', proargtypes => 'point', + prosrc => 'point_box' }, +{ oid => '1479', descr => 'convert box to circle', + proname => 'circle', prorettype => 'circle', proargtypes => 'box', + prosrc => 'box_circle' }, +{ oid => '1480', descr => 'convert circle to box', + proname => 'box', prorettype => 'box', proargtypes => 'circle', + prosrc => 'circle_box' }, + +{ oid => '1482', + proname => 'lseg_ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'lseg lseg', prosrc => 'lseg_ne' }, +{ oid => '1483', + proname => 'lseg_lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'lseg lseg', prosrc => 'lseg_lt' }, +{ oid => '1484', + proname => 'lseg_le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'lseg lseg', prosrc => 'lseg_le' }, +{ oid => '1485', + proname => 'lseg_gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'lseg lseg', prosrc => 'lseg_gt' }, +{ oid => '1486', + proname => 'lseg_ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'lseg lseg', prosrc => 'lseg_ge' }, +{ oid => '1487', + proname => 'lseg_length', prorettype => 'float8', proargtypes => 'lseg', + prosrc => 'lseg_length' }, +{ oid => '1488', + proname => 'close_ls', prorettype => 'point', proargtypes => 'line lseg', + prosrc => 'close_ls' }, +{ oid => '1489', + proname => 'close_lseg', prorettype => 'point', proargtypes => 'lseg lseg', + prosrc => 'close_lseg' }, + +{ oid => '1490', descr => 'I/O', + proname => 'line_in', prorettype => 'line', proargtypes => 'cstring', + prosrc => 'line_in' }, +{ oid => '1491', descr => 'I/O', + proname => 'line_out', prorettype => 'cstring', proargtypes => 'line', + prosrc => 'line_out' }, +{ oid => '1492', + proname => 'line_eq', prorettype => 'bool', proargtypes => 'line line', + prosrc => 'line_eq' }, +{ oid => '1493', descr => 'construct line from points', + proname => 'line', prorettype => 'line', proargtypes => 'point point', + prosrc => 'line_construct_pp' }, +{ oid => '1494', + proname => 'line_interpt', prorettype => 'point', proargtypes => 'line line', + prosrc => 'line_interpt' }, +{ oid => '1495', + proname => 'line_intersect', prorettype => 'bool', proargtypes => 'line line', + prosrc => 'line_intersect' }, +{ oid => '1496', + proname => 'line_parallel', prorettype => 'bool', proargtypes => 'line line', + prosrc => 'line_parallel' }, +{ oid => '1497', + proname => 'line_perp', prorettype => 'bool', proargtypes => 'line line', + prosrc => 'line_perp' }, +{ oid => '1498', + proname => 'line_vertical', prorettype => 'bool', proargtypes => 'line', + prosrc => 'line_vertical' }, +{ oid => '1499', + proname => 'line_horizontal', prorettype => 'bool', proargtypes => 'line', + prosrc => 'line_horizontal' }, + +# OIDS 1500 - 1599 + +{ oid => '1530', descr => 'distance between endpoints', + proname => 'length', prorettype => 'float8', proargtypes => 'lseg', + prosrc => 'lseg_length' }, +{ oid => '1531', descr => 'sum of path segments', + proname => 'length', prorettype => 'float8', proargtypes => 'path', + prosrc => 'path_length' }, + +{ oid => '1532', descr => 'center of', + proname => 'point', prorettype => 'point', proargtypes => 'lseg', + prosrc => 'lseg_center' }, +{ oid => '1533', descr => 'center of', + proname => 'point', prorettype => 'point', proargtypes => 'path', + prosrc => 'path_center' }, +{ oid => '1534', descr => 'center of', + proname => 'point', prorettype => 'point', proargtypes => 'box', + prosrc => 'box_center' }, +{ oid => '1540', descr => 'center of', + proname => 'point', prorettype => 'point', proargtypes => 'polygon', + prosrc => 'poly_center' }, +{ oid => '1541', descr => 'diagonal of', + proname => 'lseg', prorettype => 'lseg', proargtypes => 'box', + prosrc => 'box_diagonal' }, +{ oid => '1542', descr => 'center of', + proname => 'center', prorettype => 'point', proargtypes => 'box', + prosrc => 'box_center' }, +{ oid => '1543', descr => 'center of', + proname => 'center', prorettype => 'point', proargtypes => 'circle', + prosrc => 'circle_center' }, +{ oid => '1544', descr => 'convert circle to 12-vertex polygon', + proname => 'polygon', prolang => '14', prorettype => 'polygon', + proargtypes => 'circle', prosrc => 'select pg_catalog.polygon(12, $1)' }, +{ oid => '1545', descr => 'number of points', + proname => 'npoints', prorettype => 'int4', proargtypes => 'path', + prosrc => 'path_npoints' }, +{ oid => '1556', descr => 'number of points', + proname => 'npoints', prorettype => 'int4', proargtypes => 'polygon', + prosrc => 'poly_npoints' }, + +{ oid => '1564', descr => 'I/O', + proname => 'bit_in', prorettype => 'bit', proargtypes => 'cstring oid int4', + prosrc => 'bit_in' }, +{ oid => '1565', descr => 'I/O', + proname => 'bit_out', prorettype => 'cstring', proargtypes => 'bit', + prosrc => 'bit_out' }, +{ oid => '2919', descr => 'I/O typmod', + proname => 'bittypmodin', prorettype => 'int4', proargtypes => '_cstring', + prosrc => 'bittypmodin' }, +{ oid => '2920', descr => 'I/O typmod', + proname => 'bittypmodout', prorettype => 'cstring', proargtypes => 'int4', + prosrc => 'bittypmodout' }, + +{ oid => '1569', descr => 'matches LIKE expression', + proname => 'like', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'textlike' }, +{ oid => '1570', descr => 'does not match LIKE expression', + proname => 'notlike', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'textnlike' }, +{ oid => '1571', descr => 'matches LIKE expression', + proname => 'like', prorettype => 'bool', proargtypes => 'name text', + prosrc => 'namelike' }, +{ oid => '1572', descr => 'does not match LIKE expression', + proname => 'notlike', prorettype => 'bool', proargtypes => 'name text', + prosrc => 'namenlike' }, + +# SEQUENCE functions +{ oid => '1574', descr => 'sequence next value', + proname => 'nextval', provolatile => 'v', proparallel => 'u', + prorettype => 'int8', proargtypes => 'regclass', prosrc => 'nextval_oid' }, +{ oid => '1575', descr => 'sequence current value', + proname => 'currval', provolatile => 'v', proparallel => 'u', + prorettype => 'int8', proargtypes => 'regclass', prosrc => 'currval_oid' }, +{ oid => '1576', descr => 'set sequence value', + proname => 'setval', provolatile => 'v', proparallel => 'u', + prorettype => 'int8', proargtypes => 'regclass int8', + prosrc => 'setval_oid' }, +{ oid => '1765', descr => 'set sequence value and is_called status', + proname => 'setval', provolatile => 'v', proparallel => 'u', + prorettype => 'int8', proargtypes => 'regclass int8 bool', + prosrc => 'setval3_oid' }, +{ oid => '3078', + descr => 'sequence parameters, for use by information schema', + proname => 'pg_sequence_parameters', provolatile => 's', + prorettype => 'record', proargtypes => 'oid', + proallargtypes => '{oid,int8,int8,int8,int8,bool,int8,oid}', + proargmodes => '{i,o,o,o,o,o,o,o}', + proargnames => '{sequence_oid,start_value,minimum_value,maximum_value,increment,cycle_option,cache_size,data_type}', + prosrc => 'pg_sequence_parameters' }, +{ oid => '4032', descr => 'sequence last value', + proname => 'pg_sequence_last_value', provolatile => 'v', proparallel => 'u', + prorettype => 'int8', proargtypes => 'regclass', + prosrc => 'pg_sequence_last_value' }, + +{ oid => '1579', descr => 'I/O', + proname => 'varbit_in', prorettype => 'varbit', + proargtypes => 'cstring oid int4', prosrc => 'varbit_in' }, +{ oid => '1580', descr => 'I/O', + proname => 'varbit_out', prorettype => 'cstring', proargtypes => 'varbit', + prosrc => 'varbit_out' }, +{ oid => '2902', descr => 'I/O typmod', + proname => 'varbittypmodin', prorettype => 'int4', proargtypes => '_cstring', + prosrc => 'varbittypmodin' }, +{ oid => '2921', descr => 'I/O typmod', + proname => 'varbittypmodout', prorettype => 'cstring', proargtypes => 'int4', + prosrc => 'varbittypmodout' }, + +{ oid => '1581', + proname => 'biteq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bit bit', prosrc => 'biteq' }, +{ oid => '1582', + proname => 'bitne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bit bit', prosrc => 'bitne' }, +{ oid => '1592', + proname => 'bitge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bit bit', prosrc => 'bitge' }, +{ oid => '1593', + proname => 'bitgt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bit bit', prosrc => 'bitgt' }, +{ oid => '1594', + proname => 'bitle', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bit bit', prosrc => 'bitle' }, +{ oid => '1595', + proname => 'bitlt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bit bit', prosrc => 'bitlt' }, +{ oid => '1596', descr => 'less-equal-greater', + proname => 'bitcmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'bit bit', prosrc => 'bitcmp' }, + +{ oid => '1598', descr => 'random value', + proname => 'random', provolatile => 'v', proparallel => 'r', + prorettype => 'float8', proargtypes => '', prosrc => 'drandom' }, +{ oid => '1599', descr => 'set random seed', + proname => 'setseed', provolatile => 'v', proparallel => 'r', + prorettype => 'void', proargtypes => 'float8', prosrc => 'setseed' }, + +# OIDS 1600 - 1699 + +{ oid => '1600', descr => 'arcsine', + proname => 'asin', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dasin' }, +{ oid => '1601', descr => 'arccosine', + proname => 'acos', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dacos' }, +{ oid => '1602', descr => 'arctangent', + proname => 'atan', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'datan' }, +{ oid => '1603', descr => 'arctangent, two arguments', + proname => 'atan2', prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'datan2' }, +{ oid => '1604', descr => 'sine', + proname => 'sin', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dsin' }, +{ oid => '1605', descr => 'cosine', + proname => 'cos', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dcos' }, +{ oid => '1606', descr => 'tangent', + proname => 'tan', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dtan' }, +{ oid => '1607', descr => 'cotangent', + proname => 'cot', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dcot' }, + +{ oid => '2731', descr => 'arcsine, degrees', + proname => 'asind', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dasind' }, +{ oid => '2732', descr => 'arccosine, degrees', + proname => 'acosd', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dacosd' }, +{ oid => '2733', descr => 'arctangent, degrees', + proname => 'atand', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'datand' }, +{ oid => '2734', descr => 'arctangent, two arguments, degrees', + proname => 'atan2d', prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'datan2d' }, +{ oid => '2735', descr => 'sine, degrees', + proname => 'sind', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dsind' }, +{ oid => '2736', descr => 'cosine, degrees', + proname => 'cosd', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dcosd' }, +{ oid => '2737', descr => 'tangent, degrees', + proname => 'tand', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dtand' }, +{ oid => '2738', descr => 'cotangent, degrees', + proname => 'cotd', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'dcotd' }, + +{ oid => '1608', descr => 'radians to degrees', + proname => 'degrees', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'degrees' }, +{ oid => '1609', descr => 'degrees to radians', + proname => 'radians', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'radians' }, +{ oid => '1610', descr => 'PI', + proname => 'pi', prorettype => 'float8', proargtypes => '', prosrc => 'dpi' }, + +{ oid => '1618', + proname => 'interval_mul', prorettype => 'interval', + proargtypes => 'interval float8', prosrc => 'interval_mul' }, + +{ oid => '1620', descr => 'convert first char to int4', + proname => 'ascii', prorettype => 'int4', proargtypes => 'text', + prosrc => 'ascii' }, +{ oid => '1621', descr => 'convert int4 to char', + proname => 'chr', prorettype => 'text', proargtypes => 'int4', + prosrc => 'chr' }, +{ oid => '1622', descr => 'replicate string n times', + proname => 'repeat', prorettype => 'text', proargtypes => 'text int4', + prosrc => 'repeat' }, + +{ oid => '1623', descr => 'convert SQL99 regexp pattern to POSIX style', + proname => 'similar_escape', proisstrict => 'f', prorettype => 'text', + proargtypes => 'text text', prosrc => 'similar_escape' }, + +{ oid => '1624', + proname => 'mul_d_interval', prorettype => 'interval', + proargtypes => 'float8 interval', prosrc => 'mul_d_interval' }, + +{ oid => '1631', + proname => 'bpcharlike', prorettype => 'bool', proargtypes => 'bpchar text', + prosrc => 'textlike' }, +{ oid => '1632', + proname => 'bpcharnlike', prorettype => 'bool', proargtypes => 'bpchar text', + prosrc => 'textnlike' }, + +{ oid => '1633', + proname => 'texticlike', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'texticlike' }, +{ oid => '1634', + proname => 'texticnlike', prorettype => 'bool', proargtypes => 'text text', + prosrc => 'texticnlike' }, +{ oid => '1635', + proname => 'nameiclike', prorettype => 'bool', proargtypes => 'name text', + prosrc => 'nameiclike' }, +{ oid => '1636', + proname => 'nameicnlike', prorettype => 'bool', proargtypes => 'name text', + prosrc => 'nameicnlike' }, +{ oid => '1637', descr => 'convert LIKE pattern to use backslash escapes', + proname => 'like_escape', prorettype => 'text', proargtypes => 'text text', + prosrc => 'like_escape' }, + +{ oid => '1656', + proname => 'bpcharicregexeq', prorettype => 'bool', + proargtypes => 'bpchar text', prosrc => 'texticregexeq' }, +{ oid => '1657', + proname => 'bpcharicregexne', prorettype => 'bool', + proargtypes => 'bpchar text', prosrc => 'texticregexne' }, +{ oid => '1658', + proname => 'bpcharregexeq', prorettype => 'bool', + proargtypes => 'bpchar text', prosrc => 'textregexeq' }, +{ oid => '1659', + proname => 'bpcharregexne', prorettype => 'bool', + proargtypes => 'bpchar text', prosrc => 'textregexne' }, +{ oid => '1660', + proname => 'bpchariclike', prorettype => 'bool', proargtypes => 'bpchar text', + prosrc => 'texticlike' }, +{ oid => '1661', + proname => 'bpcharicnlike', prorettype => 'bool', + proargtypes => 'bpchar text', prosrc => 'texticnlike' }, + +# Oracle Compatibility Related Functions - By Edmund Mergl +{ oid => '868', descr => 'position of substring', + proname => 'strpos', prorettype => 'int4', proargtypes => 'text text', + prosrc => 'textpos' }, +{ oid => '870', descr => 'lowercase', + proname => 'lower', prorettype => 'text', proargtypes => 'text', + prosrc => 'lower' }, +{ oid => '871', descr => 'uppercase', + proname => 'upper', prorettype => 'text', proargtypes => 'text', + prosrc => 'upper' }, +{ oid => '872', descr => 'capitalize each word', + proname => 'initcap', prorettype => 'text', proargtypes => 'text', + prosrc => 'initcap' }, +{ oid => '873', descr => 'left-pad string to length', + proname => 'lpad', prorettype => 'text', proargtypes => 'text int4 text', + prosrc => 'lpad' }, +{ oid => '874', descr => 'right-pad string to length', + proname => 'rpad', prorettype => 'text', proargtypes => 'text int4 text', + prosrc => 'rpad' }, +{ oid => '875', descr => 'trim selected characters from left end of string', + proname => 'ltrim', prorettype => 'text', proargtypes => 'text text', + prosrc => 'ltrim' }, +{ oid => '876', descr => 'trim selected characters from right end of string', + proname => 'rtrim', prorettype => 'text', proargtypes => 'text text', + prosrc => 'rtrim' }, +{ oid => '877', descr => 'extract portion of string', + proname => 'substr', prorettype => 'text', proargtypes => 'text int4 int4', + prosrc => 'text_substr' }, +{ oid => '878', descr => 'map a set of characters appearing in string', + proname => 'translate', prorettype => 'text', proargtypes => 'text text text', + prosrc => 'translate' }, +{ oid => '879', descr => 'left-pad string to length', + proname => 'lpad', prolang => '14', prorettype => 'text', + proargtypes => 'text int4', + prosrc => 'select pg_catalog.lpad($1, $2, \' \')' }, +{ oid => '880', descr => 'right-pad string to length', + proname => 'rpad', prolang => '14', prorettype => 'text', + proargtypes => 'text int4', + prosrc => 'select pg_catalog.rpad($1, $2, \' \')' }, +{ oid => '881', descr => 'trim spaces from left end of string', + proname => 'ltrim', prorettype => 'text', proargtypes => 'text', + prosrc => 'ltrim1' }, +{ oid => '882', descr => 'trim spaces from right end of string', + proname => 'rtrim', prorettype => 'text', proargtypes => 'text', + prosrc => 'rtrim1' }, +{ oid => '883', descr => 'extract portion of string', + proname => 'substr', prorettype => 'text', proargtypes => 'text int4', + prosrc => 'text_substr_no_len' }, +{ oid => '884', descr => 'trim selected characters from both ends of string', + proname => 'btrim', prorettype => 'text', proargtypes => 'text text', + prosrc => 'btrim' }, +{ oid => '885', descr => 'trim spaces from both ends of string', + proname => 'btrim', prorettype => 'text', proargtypes => 'text', + prosrc => 'btrim1' }, + +{ oid => '936', descr => 'extract portion of string', + proname => 'substring', prorettype => 'text', proargtypes => 'text int4 int4', + prosrc => 'text_substr' }, +{ oid => '937', descr => 'extract portion of string', + proname => 'substring', prorettype => 'text', proargtypes => 'text int4', + prosrc => 'text_substr_no_len' }, +{ oid => '2087', + descr => 'replace all occurrences in string of old_substr with new_substr', + proname => 'replace', prorettype => 'text', proargtypes => 'text text text', + prosrc => 'replace_text' }, +{ oid => '2284', descr => 'replace text using regexp', + proname => 'regexp_replace', prorettype => 'text', + proargtypes => 'text text text', prosrc => 'textregexreplace_noopt' }, +{ oid => '2285', descr => 'replace text using regexp', + proname => 'regexp_replace', prorettype => 'text', + proargtypes => 'text text text text', prosrc => 'textregexreplace' }, +{ oid => '3396', descr => 'find first match for regexp', + proname => 'regexp_match', prorettype => '_text', proargtypes => 'text text', + prosrc => 'regexp_match_no_flags' }, +{ oid => '3397', descr => 'find first match for regexp', + proname => 'regexp_match', prorettype => '_text', + proargtypes => 'text text text', prosrc => 'regexp_match' }, +{ oid => '2763', descr => 'find match(es) for regexp', + proname => 'regexp_matches', prorows => '1', proretset => 't', + prorettype => '_text', proargtypes => 'text text', + prosrc => 'regexp_matches_no_flags' }, +{ oid => '2764', descr => 'find match(es) for regexp', + proname => 'regexp_matches', prorows => '10', proretset => 't', + prorettype => '_text', proargtypes => 'text text text', + prosrc => 'regexp_matches' }, +{ oid => '2088', descr => 'split string by field_sep and return field_num', + proname => 'split_part', prorettype => 'text', + proargtypes => 'text text int4', prosrc => 'split_text' }, +{ oid => '2765', descr => 'split string by pattern', + proname => 'regexp_split_to_table', prorows => '1000', proretset => 't', + prorettype => 'text', proargtypes => 'text text', + prosrc => 'regexp_split_to_table_no_flags' }, +{ oid => '2766', descr => 'split string by pattern', + proname => 'regexp_split_to_table', prorows => '1000', proretset => 't', + prorettype => 'text', proargtypes => 'text text text', + prosrc => 'regexp_split_to_table' }, +{ oid => '2767', descr => 'split string by pattern', + proname => 'regexp_split_to_array', prorettype => '_text', + proargtypes => 'text text', prosrc => 'regexp_split_to_array_no_flags' }, +{ oid => '2768', descr => 'split string by pattern', + proname => 'regexp_split_to_array', prorettype => '_text', + proargtypes => 'text text text', prosrc => 'regexp_split_to_array' }, +{ oid => '2089', descr => 'convert int4 number to hex', + proname => 'to_hex', prorettype => 'text', proargtypes => 'int4', + prosrc => 'to_hex32' }, +{ oid => '2090', descr => 'convert int8 number to hex', + proname => 'to_hex', prorettype => 'text', proargtypes => 'int8', + prosrc => 'to_hex64' }, + +# for character set encoding support + +# return database encoding name +{ oid => '1039', descr => 'encoding name of current database', + proname => 'getdatabaseencoding', provolatile => 's', prorettype => 'name', + proargtypes => '', prosrc => 'getdatabaseencoding' }, + +# return client encoding name i.e. session encoding +{ oid => '810', descr => 'encoding name of current database', + proname => 'pg_client_encoding', provolatile => 's', prorettype => 'name', + proargtypes => '', prosrc => 'pg_client_encoding' }, + +{ oid => '1713', descr => 'length of string in specified encoding', + proname => 'length', provolatile => 's', prorettype => 'int4', + proargtypes => 'bytea name', prosrc => 'length_in_encoding' }, + +{ oid => '1714', + descr => 'convert string with specified source encoding name', + proname => 'convert_from', provolatile => 's', prorettype => 'text', + proargtypes => 'bytea name', prosrc => 'pg_convert_from' }, + +{ oid => '1717', + descr => 'convert string with specified destination encoding name', + proname => 'convert_to', provolatile => 's', prorettype => 'bytea', + proargtypes => 'text name', prosrc => 'pg_convert_to' }, + +{ oid => '1813', descr => 'convert string with specified encoding names', + proname => 'convert', provolatile => 's', prorettype => 'bytea', + proargtypes => 'bytea name name', prosrc => 'pg_convert' }, + +{ oid => '1264', descr => 'convert encoding name to encoding id', + proname => 'pg_char_to_encoding', provolatile => 's', prorettype => 'int4', + proargtypes => 'name', prosrc => 'PG_char_to_encoding' }, + +{ oid => '1597', descr => 'convert encoding id to encoding name', + proname => 'pg_encoding_to_char', provolatile => 's', prorettype => 'name', + proargtypes => 'int4', prosrc => 'PG_encoding_to_char' }, + +{ oid => '2319', + descr => 'maximum octet length of a character in given encoding', + proname => 'pg_encoding_max_length', prorettype => 'int4', + proargtypes => 'int4', prosrc => 'pg_encoding_max_length_sql' }, + +{ oid => '1638', + proname => 'oidgt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'oid oid', prosrc => 'oidgt' }, +{ oid => '1639', + proname => 'oidge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'oid oid', prosrc => 'oidge' }, + +# System-view support functions +{ oid => '1573', descr => 'source text of a rule', + proname => 'pg_get_ruledef', provolatile => 's', prorettype => 'text', + proargtypes => 'oid', prosrc => 'pg_get_ruledef' }, +{ oid => '1640', descr => 'select statement of a view', + proname => 'pg_get_viewdef', provolatile => 's', proparallel => 'r', + prorettype => 'text', proargtypes => 'text', + prosrc => 'pg_get_viewdef_name' }, +{ oid => '1641', descr => 'select statement of a view', + proname => 'pg_get_viewdef', provolatile => 's', proparallel => 'r', + prorettype => 'text', proargtypes => 'oid', prosrc => 'pg_get_viewdef' }, +{ oid => '1642', descr => 'role name by OID (with fallback)', + proname => 'pg_get_userbyid', provolatile => 's', prorettype => 'name', + proargtypes => 'oid', prosrc => 'pg_get_userbyid' }, +{ oid => '1643', descr => 'index description', + proname => 'pg_get_indexdef', provolatile => 's', prorettype => 'text', + proargtypes => 'oid', prosrc => 'pg_get_indexdef' }, +{ oid => '3415', descr => 'extended statistics object description', + proname => 'pg_get_statisticsobjdef', provolatile => 's', + prorettype => 'text', proargtypes => 'oid', + prosrc => 'pg_get_statisticsobjdef' }, +{ oid => '3352', descr => 'partition key description', + proname => 'pg_get_partkeydef', provolatile => 's', prorettype => 'text', + proargtypes => 'oid', prosrc => 'pg_get_partkeydef' }, +{ oid => '3408', descr => 'partition constraint description', + proname => 'pg_get_partition_constraintdef', provolatile => 's', + prorettype => 'text', proargtypes => 'oid', + prosrc => 'pg_get_partition_constraintdef' }, +{ oid => '1662', descr => 'trigger description', + proname => 'pg_get_triggerdef', provolatile => 's', prorettype => 'text', + proargtypes => 'oid', prosrc => 'pg_get_triggerdef' }, +{ oid => '1387', descr => 'constraint description', + proname => 'pg_get_constraintdef', provolatile => 's', prorettype => 'text', + proargtypes => 'oid', prosrc => 'pg_get_constraintdef' }, +{ oid => '1716', descr => 'deparse an encoded expression', + proname => 'pg_get_expr', provolatile => 's', prorettype => 'text', + proargtypes => 'pg_node_tree oid', prosrc => 'pg_get_expr' }, +{ oid => '1665', descr => 'name of sequence for a serial column', + proname => 'pg_get_serial_sequence', provolatile => 's', prorettype => 'text', + proargtypes => 'text text', prosrc => 'pg_get_serial_sequence' }, +{ oid => '2098', descr => 'definition of a function', + proname => 'pg_get_functiondef', provolatile => 's', prorettype => 'text', + proargtypes => 'oid', prosrc => 'pg_get_functiondef' }, +{ oid => '2162', descr => 'argument list of a function', + proname => 'pg_get_function_arguments', provolatile => 's', + prorettype => 'text', proargtypes => 'oid', + prosrc => 'pg_get_function_arguments' }, +{ oid => '2232', descr => 'identity argument list of a function', + proname => 'pg_get_function_identity_arguments', provolatile => 's', + prorettype => 'text', proargtypes => 'oid', + prosrc => 'pg_get_function_identity_arguments' }, +{ oid => '2165', descr => 'result type of a function', + proname => 'pg_get_function_result', provolatile => 's', prorettype => 'text', + proargtypes => 'oid', prosrc => 'pg_get_function_result' }, +{ oid => '3808', descr => 'function argument default', + proname => 'pg_get_function_arg_default', provolatile => 's', + prorettype => 'text', proargtypes => 'oid int4', + prosrc => 'pg_get_function_arg_default' }, + +{ oid => '1686', descr => 'list of SQL keywords', + proname => 'pg_get_keywords', procost => '10', prorows => '400', + proretset => 't', provolatile => 's', prorettype => 'record', + proargtypes => '', proallargtypes => '{text,char,text}', + proargmodes => '{o,o,o}', proargnames => '{word,catcode,catdesc}', + prosrc => 'pg_get_keywords' }, + +{ oid => '2289', descr => 'convert generic options array to name/value table', + proname => 'pg_options_to_table', prorows => '3', proretset => 't', + provolatile => 's', prorettype => 'record', proargtypes => '_text', + proallargtypes => '{_text,text,text}', proargmodes => '{i,o,o}', + proargnames => '{options_array,option_name,option_value}', + prosrc => 'pg_options_to_table' }, + +{ oid => '1619', descr => 'type of the argument', + proname => 'pg_typeof', proisstrict => 'f', provolatile => 's', + prorettype => 'regtype', proargtypes => 'any', prosrc => 'pg_typeof' }, +{ oid => '3162', + descr => 'collation of the argument; implementation of the COLLATION FOR expression', + proname => 'pg_collation_for', proisstrict => 'f', provolatile => 's', + prorettype => 'text', proargtypes => 'any', prosrc => 'pg_collation_for' }, + +{ oid => '3842', descr => 'is a relation insertable/updatable/deletable', + proname => 'pg_relation_is_updatable', procost => '10', provolatile => 's', + prorettype => 'int4', proargtypes => 'regclass bool', + prosrc => 'pg_relation_is_updatable' }, +{ oid => '3843', descr => 'is a column updatable', + proname => 'pg_column_is_updatable', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'regclass int2 bool', + prosrc => 'pg_column_is_updatable' }, + +{ oid => '6120', descr => 'oid of replica identity index if any', + proname => 'pg_get_replica_identity_index', procost => '10', + provolatile => 's', prorettype => 'regclass', proargtypes => 'regclass', + prosrc => 'pg_get_replica_identity_index' }, + +# Deferrable unique constraint trigger +{ oid => '1250', descr => 'deferred UNIQUE constraint check', + proname => 'unique_key_recheck', provolatile => 'v', prorettype => 'trigger', + proargtypes => '', prosrc => 'unique_key_recheck' }, + +# Generic referential integrity constraint triggers +{ oid => '1644', descr => 'referential integrity FOREIGN KEY ... REFERENCES', + proname => 'RI_FKey_check_ins', provolatile => 'v', prorettype => 'trigger', + proargtypes => '', prosrc => 'RI_FKey_check_ins' }, +{ oid => '1645', descr => 'referential integrity FOREIGN KEY ... REFERENCES', + proname => 'RI_FKey_check_upd', provolatile => 'v', prorettype => 'trigger', + proargtypes => '', prosrc => 'RI_FKey_check_upd' }, +{ oid => '1646', descr => 'referential integrity ON DELETE CASCADE', + proname => 'RI_FKey_cascade_del', provolatile => 'v', prorettype => 'trigger', + proargtypes => '', prosrc => 'RI_FKey_cascade_del' }, +{ oid => '1647', descr => 'referential integrity ON UPDATE CASCADE', + proname => 'RI_FKey_cascade_upd', provolatile => 'v', prorettype => 'trigger', + proargtypes => '', prosrc => 'RI_FKey_cascade_upd' }, +{ oid => '1648', descr => 'referential integrity ON DELETE RESTRICT', + proname => 'RI_FKey_restrict_del', provolatile => 'v', + prorettype => 'trigger', proargtypes => '', + prosrc => 'RI_FKey_restrict_del' }, +{ oid => '1649', descr => 'referential integrity ON UPDATE RESTRICT', + proname => 'RI_FKey_restrict_upd', provolatile => 'v', + prorettype => 'trigger', proargtypes => '', + prosrc => 'RI_FKey_restrict_upd' }, +{ oid => '1650', descr => 'referential integrity ON DELETE SET NULL', + proname => 'RI_FKey_setnull_del', provolatile => 'v', prorettype => 'trigger', + proargtypes => '', prosrc => 'RI_FKey_setnull_del' }, +{ oid => '1651', descr => 'referential integrity ON UPDATE SET NULL', + proname => 'RI_FKey_setnull_upd', provolatile => 'v', prorettype => 'trigger', + proargtypes => '', prosrc => 'RI_FKey_setnull_upd' }, +{ oid => '1652', descr => 'referential integrity ON DELETE SET DEFAULT', + proname => 'RI_FKey_setdefault_del', provolatile => 'v', + prorettype => 'trigger', proargtypes => '', + prosrc => 'RI_FKey_setdefault_del' }, +{ oid => '1653', descr => 'referential integrity ON UPDATE SET DEFAULT', + proname => 'RI_FKey_setdefault_upd', provolatile => 'v', + prorettype => 'trigger', proargtypes => '', + prosrc => 'RI_FKey_setdefault_upd' }, +{ oid => '1654', descr => 'referential integrity ON DELETE NO ACTION', + proname => 'RI_FKey_noaction_del', provolatile => 'v', + prorettype => 'trigger', proargtypes => '', + prosrc => 'RI_FKey_noaction_del' }, +{ oid => '1655', descr => 'referential integrity ON UPDATE NO ACTION', + proname => 'RI_FKey_noaction_upd', provolatile => 'v', + prorettype => 'trigger', proargtypes => '', + prosrc => 'RI_FKey_noaction_upd' }, + +{ oid => '1666', + proname => 'varbiteq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'varbit varbit', prosrc => 'biteq' }, +{ oid => '1667', + proname => 'varbitne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'varbit varbit', prosrc => 'bitne' }, +{ oid => '1668', + proname => 'varbitge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'varbit varbit', prosrc => 'bitge' }, +{ oid => '1669', + proname => 'varbitgt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'varbit varbit', prosrc => 'bitgt' }, +{ oid => '1670', + proname => 'varbitle', proleakproof => 't', prorettype => 'bool', + proargtypes => 'varbit varbit', prosrc => 'bitle' }, +{ oid => '1671', + proname => 'varbitlt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'varbit varbit', prosrc => 'bitlt' }, +{ oid => '1672', descr => 'less-equal-greater', + proname => 'varbitcmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'varbit varbit', prosrc => 'bitcmp' }, + +# avoid the C names bitand and bitor, since they are C++ keywords +{ oid => '1673', + proname => 'bitand', prorettype => 'bit', proargtypes => 'bit bit', + prosrc => 'bit_and' }, +{ oid => '1674', + proname => 'bitor', prorettype => 'bit', proargtypes => 'bit bit', + prosrc => 'bit_or' }, +{ oid => '1675', + proname => 'bitxor', prorettype => 'bit', proargtypes => 'bit bit', + prosrc => 'bitxor' }, +{ oid => '1676', + proname => 'bitnot', prorettype => 'bit', proargtypes => 'bit', + prosrc => 'bitnot' }, +{ oid => '1677', + proname => 'bitshiftleft', prorettype => 'bit', proargtypes => 'bit int4', + prosrc => 'bitshiftleft' }, +{ oid => '1678', + proname => 'bitshiftright', prorettype => 'bit', proargtypes => 'bit int4', + prosrc => 'bitshiftright' }, +{ oid => '1679', + proname => 'bitcat', prorettype => 'varbit', proargtypes => 'varbit varbit', + prosrc => 'bitcat' }, +{ oid => '1680', descr => 'extract portion of bitstring', + proname => 'substring', prorettype => 'bit', proargtypes => 'bit int4 int4', + prosrc => 'bitsubstr' }, +{ oid => '1681', descr => 'bitstring length', + proname => 'length', prorettype => 'int4', proargtypes => 'bit', + prosrc => 'bitlength' }, +{ oid => '1682', descr => 'octet length', + proname => 'octet_length', prorettype => 'int4', proargtypes => 'bit', + prosrc => 'bitoctetlength' }, +{ oid => '1683', descr => 'convert int4 to bitstring', + proname => 'bit', prorettype => 'bit', proargtypes => 'int4 int4', + prosrc => 'bitfromint4' }, +{ oid => '1684', descr => 'convert bitstring to int4', + proname => 'int4', prorettype => 'int4', proargtypes => 'bit', + prosrc => 'bittoint4' }, + +{ oid => '1685', descr => 'adjust bit() to typmod length', + proname => 'bit', prorettype => 'bit', proargtypes => 'bit int4 bool', + prosrc => 'bit' }, +{ oid => '3158', descr => 'transform a varbit length coercion', + proname => 'varbit_transform', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'varbit_transform' }, +{ oid => '1687', descr => 'adjust varbit() to typmod length', + proname => 'varbit', protransform => 'varbit_transform', + prorettype => 'varbit', proargtypes => 'varbit int4 bool', + prosrc => 'varbit' }, + +{ oid => '1698', descr => 'position of sub-bitstring', + proname => 'position', prorettype => 'int4', proargtypes => 'bit bit', + prosrc => 'bitposition' }, +{ oid => '1699', descr => 'extract portion of bitstring', + proname => 'substring', prorettype => 'bit', proargtypes => 'bit int4', + prosrc => 'bitsubstr_no_len' }, + +{ oid => '3030', descr => 'substitute portion of bitstring', + proname => 'overlay', prorettype => 'bit', proargtypes => 'bit bit int4 int4', + prosrc => 'bitoverlay' }, +{ oid => '3031', descr => 'substitute portion of bitstring', + proname => 'overlay', prorettype => 'bit', proargtypes => 'bit bit int4', + prosrc => 'bitoverlay_no_len' }, +{ oid => '3032', descr => 'get bit', + proname => 'get_bit', prorettype => 'int4', proargtypes => 'bit int4', + prosrc => 'bitgetbit' }, +{ oid => '3033', descr => 'set bit', + proname => 'set_bit', prorettype => 'bit', proargtypes => 'bit int4 int4', + prosrc => 'bitsetbit' }, + +# for macaddr type support +{ oid => '436', descr => 'I/O', + proname => 'macaddr_in', prorettype => 'macaddr', proargtypes => 'cstring', + prosrc => 'macaddr_in' }, +{ oid => '437', descr => 'I/O', + proname => 'macaddr_out', prorettype => 'cstring', proargtypes => 'macaddr', + prosrc => 'macaddr_out' }, + +{ oid => '753', descr => 'MACADDR manufacturer fields', + proname => 'trunc', prorettype => 'macaddr', proargtypes => 'macaddr', + prosrc => 'macaddr_trunc' }, + +{ oid => '830', + proname => 'macaddr_eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'macaddr macaddr', prosrc => 'macaddr_eq' }, +{ oid => '831', + proname => 'macaddr_lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'macaddr macaddr', prosrc => 'macaddr_lt' }, +{ oid => '832', + proname => 'macaddr_le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'macaddr macaddr', prosrc => 'macaddr_le' }, +{ oid => '833', + proname => 'macaddr_gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'macaddr macaddr', prosrc => 'macaddr_gt' }, +{ oid => '834', + proname => 'macaddr_ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'macaddr macaddr', prosrc => 'macaddr_ge' }, +{ oid => '835', + proname => 'macaddr_ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'macaddr macaddr', prosrc => 'macaddr_ne' }, +{ oid => '836', descr => 'less-equal-greater', + proname => 'macaddr_cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'macaddr macaddr', prosrc => 'macaddr_cmp' }, +{ oid => '3144', + proname => 'macaddr_not', prorettype => 'macaddr', proargtypes => 'macaddr', + prosrc => 'macaddr_not' }, +{ oid => '3145', + proname => 'macaddr_and', prorettype => 'macaddr', + proargtypes => 'macaddr macaddr', prosrc => 'macaddr_and' }, +{ oid => '3146', + proname => 'macaddr_or', prorettype => 'macaddr', + proargtypes => 'macaddr macaddr', prosrc => 'macaddr_or' }, +{ oid => '3359', descr => 'sort support', + proname => 'macaddr_sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'macaddr_sortsupport' }, + +# for macaddr8 type support +{ oid => '4110', descr => 'I/O', + proname => 'macaddr8_in', prorettype => 'macaddr8', proargtypes => 'cstring', + prosrc => 'macaddr8_in' }, +{ oid => '4111', descr => 'I/O', + proname => 'macaddr8_out', prorettype => 'cstring', proargtypes => 'macaddr8', + prosrc => 'macaddr8_out' }, + +{ oid => '4112', descr => 'MACADDR8 manufacturer fields', + proname => 'trunc', prorettype => 'macaddr8', proargtypes => 'macaddr8', + prosrc => 'macaddr8_trunc' }, + +{ oid => '4113', + proname => 'macaddr8_eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'macaddr8 macaddr8', prosrc => 'macaddr8_eq' }, +{ oid => '4114', + proname => 'macaddr8_lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'macaddr8 macaddr8', prosrc => 'macaddr8_lt' }, +{ oid => '4115', + proname => 'macaddr8_le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'macaddr8 macaddr8', prosrc => 'macaddr8_le' }, +{ oid => '4116', + proname => 'macaddr8_gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'macaddr8 macaddr8', prosrc => 'macaddr8_gt' }, +{ oid => '4117', + proname => 'macaddr8_ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'macaddr8 macaddr8', prosrc => 'macaddr8_ge' }, +{ oid => '4118', + proname => 'macaddr8_ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'macaddr8 macaddr8', prosrc => 'macaddr8_ne' }, +{ oid => '4119', descr => 'less-equal-greater', + proname => 'macaddr8_cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'macaddr8 macaddr8', prosrc => 'macaddr8_cmp' }, +{ oid => '4120', + proname => 'macaddr8_not', prorettype => 'macaddr8', + proargtypes => 'macaddr8', prosrc => 'macaddr8_not' }, +{ oid => '4121', + proname => 'macaddr8_and', prorettype => 'macaddr8', + proargtypes => 'macaddr8 macaddr8', prosrc => 'macaddr8_and' }, +{ oid => '4122', + proname => 'macaddr8_or', prorettype => 'macaddr8', + proargtypes => 'macaddr8 macaddr8', prosrc => 'macaddr8_or' }, +{ oid => '4123', descr => 'convert macaddr to macaddr8', + proname => 'macaddr8', prorettype => 'macaddr8', proargtypes => 'macaddr', + prosrc => 'macaddrtomacaddr8' }, +{ oid => '4124', descr => 'convert macaddr8 to macaddr', + proname => 'macaddr', prorettype => 'macaddr', proargtypes => 'macaddr8', + prosrc => 'macaddr8tomacaddr' }, +{ oid => '4125', descr => 'set 7th bit in macaddr8', + proname => 'macaddr8_set7bit', prorettype => 'macaddr8', + proargtypes => 'macaddr8', prosrc => 'macaddr8_set7bit' }, + +# for inet type support +{ oid => '910', descr => 'I/O', + proname => 'inet_in', prorettype => 'inet', proargtypes => 'cstring', + prosrc => 'inet_in' }, +{ oid => '911', descr => 'I/O', + proname => 'inet_out', prorettype => 'cstring', proargtypes => 'inet', + prosrc => 'inet_out' }, + +# for cidr type support +{ oid => '1267', descr => 'I/O', + proname => 'cidr_in', prorettype => 'cidr', proargtypes => 'cstring', + prosrc => 'cidr_in' }, +{ oid => '1427', descr => 'I/O', + proname => 'cidr_out', prorettype => 'cstring', proargtypes => 'cidr', + prosrc => 'cidr_out' }, + +# these are used for both inet and cidr +{ oid => '920', + proname => 'network_eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'inet inet', prosrc => 'network_eq' }, +{ oid => '921', + proname => 'network_lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'inet inet', prosrc => 'network_lt' }, +{ oid => '922', + proname => 'network_le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'inet inet', prosrc => 'network_le' }, +{ oid => '923', + proname => 'network_gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'inet inet', prosrc => 'network_gt' }, +{ oid => '924', + proname => 'network_ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'inet inet', prosrc => 'network_ge' }, +{ oid => '925', + proname => 'network_ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'inet inet', prosrc => 'network_ne' }, +{ oid => '3562', descr => 'larger of two', + proname => 'network_larger', prorettype => 'inet', proargtypes => 'inet inet', + prosrc => 'network_larger' }, +{ oid => '3563', descr => 'smaller of two', + proname => 'network_smaller', prorettype => 'inet', + proargtypes => 'inet inet', prosrc => 'network_smaller' }, +{ oid => '926', descr => 'less-equal-greater', + proname => 'network_cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'inet inet', prosrc => 'network_cmp' }, +{ oid => '927', + proname => 'network_sub', prorettype => 'bool', proargtypes => 'inet inet', + prosrc => 'network_sub' }, +{ oid => '928', + proname => 'network_subeq', prorettype => 'bool', proargtypes => 'inet inet', + prosrc => 'network_subeq' }, +{ oid => '929', + proname => 'network_sup', prorettype => 'bool', proargtypes => 'inet inet', + prosrc => 'network_sup' }, +{ oid => '930', + proname => 'network_supeq', prorettype => 'bool', proargtypes => 'inet inet', + prosrc => 'network_supeq' }, +{ oid => '3551', + proname => 'network_overlap', prorettype => 'bool', + proargtypes => 'inet inet', prosrc => 'network_overlap' }, + +# inet/cidr functions +{ oid => '598', descr => 'abbreviated display of inet value', + proname => 'abbrev', prorettype => 'text', proargtypes => 'inet', + prosrc => 'inet_abbrev' }, +{ oid => '599', descr => 'abbreviated display of cidr value', + proname => 'abbrev', prorettype => 'text', proargtypes => 'cidr', + prosrc => 'cidr_abbrev' }, +{ oid => '605', descr => 'change netmask of inet', + proname => 'set_masklen', prorettype => 'inet', proargtypes => 'inet int4', + prosrc => 'inet_set_masklen' }, +{ oid => '635', descr => 'change netmask of cidr', + proname => 'set_masklen', prorettype => 'cidr', proargtypes => 'cidr int4', + prosrc => 'cidr_set_masklen' }, +{ oid => '711', descr => 'address family (4 for IPv4, 6 for IPv6)', + proname => 'family', prorettype => 'int4', proargtypes => 'inet', + prosrc => 'network_family' }, +{ oid => '683', descr => 'network part of address', + proname => 'network', prorettype => 'cidr', proargtypes => 'inet', + prosrc => 'network_network' }, +{ oid => '696', descr => 'netmask of address', + proname => 'netmask', prorettype => 'inet', proargtypes => 'inet', + prosrc => 'network_netmask' }, +{ oid => '697', descr => 'netmask length', + proname => 'masklen', prorettype => 'int4', proargtypes => 'inet', + prosrc => 'network_masklen' }, +{ oid => '698', descr => 'broadcast address of network', + proname => 'broadcast', prorettype => 'inet', proargtypes => 'inet', + prosrc => 'network_broadcast' }, +{ oid => '699', descr => 'show address octets only', + proname => 'host', prorettype => 'text', proargtypes => 'inet', + prosrc => 'network_host' }, +{ oid => '730', descr => 'show all parts of inet/cidr value', + proname => 'text', prorettype => 'text', proargtypes => 'inet', + prosrc => 'network_show' }, +{ oid => '1362', descr => 'hostmask of address', + proname => 'hostmask', prorettype => 'inet', proargtypes => 'inet', + prosrc => 'network_hostmask' }, +{ oid => '1715', descr => 'convert inet to cidr', + proname => 'cidr', prorettype => 'cidr', proargtypes => 'inet', + prosrc => 'inet_to_cidr' }, + +{ oid => '2196', descr => 'inet address of the client', + proname => 'inet_client_addr', proisstrict => 'f', provolatile => 's', + proparallel => 'r', prorettype => 'inet', proargtypes => '', + prosrc => 'inet_client_addr' }, +{ oid => '2197', descr => 'client\'s port number for this connection', + proname => 'inet_client_port', proisstrict => 'f', provolatile => 's', + proparallel => 'r', prorettype => 'int4', proargtypes => '', + prosrc => 'inet_client_port' }, +{ oid => '2198', descr => 'inet address of the server', + proname => 'inet_server_addr', proisstrict => 'f', provolatile => 's', + prorettype => 'inet', proargtypes => '', prosrc => 'inet_server_addr' }, +{ oid => '2199', descr => 'server\'s port number for this connection', + proname => 'inet_server_port', proisstrict => 'f', provolatile => 's', + prorettype => 'int4', proargtypes => '', prosrc => 'inet_server_port' }, + +{ oid => '2627', + proname => 'inetnot', prorettype => 'inet', proargtypes => 'inet', + prosrc => 'inetnot' }, +{ oid => '2628', + proname => 'inetand', prorettype => 'inet', proargtypes => 'inet inet', + prosrc => 'inetand' }, +{ oid => '2629', + proname => 'inetor', prorettype => 'inet', proargtypes => 'inet inet', + prosrc => 'inetor' }, +{ oid => '2630', + proname => 'inetpl', prorettype => 'inet', proargtypes => 'inet int8', + prosrc => 'inetpl' }, +{ oid => '2631', + proname => 'int8pl_inet', prolang => '14', prorettype => 'inet', + proargtypes => 'int8 inet', prosrc => 'select $2 + $1' }, +{ oid => '2632', + proname => 'inetmi_int8', prorettype => 'inet', proargtypes => 'inet int8', + prosrc => 'inetmi_int8' }, +{ oid => '2633', + proname => 'inetmi', prorettype => 'int8', proargtypes => 'inet inet', + prosrc => 'inetmi' }, +{ oid => '4071', descr => 'are the addresses from the same family?', + proname => 'inet_same_family', prorettype => 'bool', + proargtypes => 'inet inet', prosrc => 'inet_same_family' }, +{ oid => '4063', + descr => 'the smallest network which includes both of the given networks', + proname => 'inet_merge', prorettype => 'cidr', proargtypes => 'inet inet', + prosrc => 'inet_merge' }, + +# GiST support for inet and cidr +{ oid => '3553', descr => 'GiST support', + proname => 'inet_gist_consistent', prorettype => 'bool', + proargtypes => 'internal inet int2 oid internal', + prosrc => 'inet_gist_consistent' }, +{ oid => '3554', descr => 'GiST support', + proname => 'inet_gist_union', prorettype => 'inet', + proargtypes => 'internal internal', prosrc => 'inet_gist_union' }, +{ oid => '3555', descr => 'GiST support', + proname => 'inet_gist_compress', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'inet_gist_compress' }, +{ oid => '3573', descr => 'GiST support', + proname => 'inet_gist_fetch', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'inet_gist_fetch' }, +{ oid => '3557', descr => 'GiST support', + proname => 'inet_gist_penalty', prorettype => 'internal', + proargtypes => 'internal internal internal', prosrc => 'inet_gist_penalty' }, +{ oid => '3558', descr => 'GiST support', + proname => 'inet_gist_picksplit', prorettype => 'internal', + proargtypes => 'internal internal', prosrc => 'inet_gist_picksplit' }, +{ oid => '3559', descr => 'GiST support', + proname => 'inet_gist_same', prorettype => 'internal', + proargtypes => 'inet inet internal', prosrc => 'inet_gist_same' }, + +# SP-GiST support for inet and cidr +{ oid => '3795', descr => 'SP-GiST support', + proname => 'inet_spg_config', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'inet_spg_config' }, +{ oid => '3796', descr => 'SP-GiST support', + proname => 'inet_spg_choose', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'inet_spg_choose' }, +{ oid => '3797', descr => 'SP-GiST support', + proname => 'inet_spg_picksplit', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'inet_spg_picksplit' }, +{ oid => '3798', descr => 'SP-GiST support', + proname => 'inet_spg_inner_consistent', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'inet_spg_inner_consistent' }, +{ oid => '3799', descr => 'SP-GiST support', + proname => 'inet_spg_leaf_consistent', prorettype => 'bool', + proargtypes => 'internal internal', prosrc => 'inet_spg_leaf_consistent' }, + +# Selectivity estimation for inet and cidr +{ oid => '3560', descr => 'restriction selectivity for network operators', + proname => 'networksel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'networksel' }, +{ oid => '3561', descr => 'join selectivity for network operators', + proname => 'networkjoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'networkjoinsel' }, + +{ oid => '1690', + proname => 'time_mi_time', prorettype => 'interval', + proargtypes => 'time time', prosrc => 'time_mi_time' }, + +{ oid => '1691', + proname => 'boolle', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bool bool', prosrc => 'boolle' }, +{ oid => '1692', + proname => 'boolge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bool bool', prosrc => 'boolge' }, +{ oid => '1693', descr => 'less-equal-greater', + proname => 'btboolcmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'bool bool', prosrc => 'btboolcmp' }, + +{ oid => '1688', descr => 'hash', + proname => 'time_hash', prorettype => 'int4', proargtypes => 'time', + prosrc => 'time_hash' }, +{ oid => '3409', descr => 'hash', + proname => 'time_hash_extended', prorettype => 'int8', + proargtypes => 'time int8', prosrc => 'time_hash_extended' }, +{ oid => '1696', descr => 'hash', + proname => 'timetz_hash', prorettype => 'int4', proargtypes => 'timetz', + prosrc => 'timetz_hash' }, +{ oid => '3410', descr => 'hash', + proname => 'timetz_hash_extended', prorettype => 'int8', + proargtypes => 'timetz int8', prosrc => 'timetz_hash_extended' }, +{ oid => '1697', descr => 'hash', + proname => 'interval_hash', prorettype => 'int4', proargtypes => 'interval', + prosrc => 'interval_hash' }, +{ oid => '3418', descr => 'hash', + proname => 'interval_hash_extended', prorettype => 'int8', + proargtypes => 'interval int8', prosrc => 'interval_hash_extended' }, + +# OID's 1700 - 1799 NUMERIC data type + +{ oid => '1701', descr => 'I/O', + proname => 'numeric_in', prorettype => 'numeric', + proargtypes => 'cstring oid int4', prosrc => 'numeric_in' }, +{ oid => '1702', descr => 'I/O', + proname => 'numeric_out', prorettype => 'cstring', proargtypes => 'numeric', + prosrc => 'numeric_out' }, +{ oid => '2917', descr => 'I/O typmod', + proname => 'numerictypmodin', prorettype => 'int4', proargtypes => '_cstring', + prosrc => 'numerictypmodin' }, +{ oid => '2918', descr => 'I/O typmod', + proname => 'numerictypmodout', prorettype => 'cstring', proargtypes => 'int4', + prosrc => 'numerictypmodout' }, +{ oid => '3157', descr => 'transform a numeric length coercion', + proname => 'numeric_transform', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'numeric_transform' }, +{ oid => '1703', descr => 'adjust numeric to typmod precision/scale', + proname => 'numeric', protransform => 'numeric_transform', + prorettype => 'numeric', proargtypes => 'numeric int4', prosrc => 'numeric' }, +{ oid => '1704', + proname => 'numeric_abs', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_abs' }, +{ oid => '1705', descr => 'absolute value', + proname => 'abs', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_abs' }, +{ oid => '1706', descr => 'sign of value', + proname => 'sign', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_sign' }, +{ oid => '1707', descr => 'value rounded to \'scale\'', + proname => 'round', prorettype => 'numeric', proargtypes => 'numeric int4', + prosrc => 'numeric_round' }, +{ oid => '1708', descr => 'value rounded to \'scale\' of zero', + proname => 'round', prolang => '14', prorettype => 'numeric', + proargtypes => 'numeric', prosrc => 'select pg_catalog.round($1,0)' }, +{ oid => '1709', descr => 'value truncated to \'scale\'', + proname => 'trunc', prorettype => 'numeric', proargtypes => 'numeric int4', + prosrc => 'numeric_trunc' }, +{ oid => '1710', descr => 'value truncated to \'scale\' of zero', + proname => 'trunc', prolang => '14', prorettype => 'numeric', + proargtypes => 'numeric', prosrc => 'select pg_catalog.trunc($1,0)' }, +{ oid => '1711', descr => 'nearest integer >= value', + proname => 'ceil', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_ceil' }, +{ oid => '2167', descr => 'nearest integer >= value', + proname => 'ceiling', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_ceil' }, +{ oid => '1712', descr => 'nearest integer <= value', + proname => 'floor', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_floor' }, +{ oid => '1718', + proname => 'numeric_eq', prorettype => 'bool', + proargtypes => 'numeric numeric', prosrc => 'numeric_eq' }, +{ oid => '1719', + proname => 'numeric_ne', prorettype => 'bool', + proargtypes => 'numeric numeric', prosrc => 'numeric_ne' }, +{ oid => '1720', + proname => 'numeric_gt', prorettype => 'bool', + proargtypes => 'numeric numeric', prosrc => 'numeric_gt' }, +{ oid => '1721', + proname => 'numeric_ge', prorettype => 'bool', + proargtypes => 'numeric numeric', prosrc => 'numeric_ge' }, +{ oid => '1722', + proname => 'numeric_lt', prorettype => 'bool', + proargtypes => 'numeric numeric', prosrc => 'numeric_lt' }, +{ oid => '1723', + proname => 'numeric_le', prorettype => 'bool', + proargtypes => 'numeric numeric', prosrc => 'numeric_le' }, +{ oid => '1724', + proname => 'numeric_add', prorettype => 'numeric', + proargtypes => 'numeric numeric', prosrc => 'numeric_add' }, +{ oid => '1725', + proname => 'numeric_sub', prorettype => 'numeric', + proargtypes => 'numeric numeric', prosrc => 'numeric_sub' }, +{ oid => '1726', + proname => 'numeric_mul', prorettype => 'numeric', + proargtypes => 'numeric numeric', prosrc => 'numeric_mul' }, +{ oid => '1727', + proname => 'numeric_div', prorettype => 'numeric', + proargtypes => 'numeric numeric', prosrc => 'numeric_div' }, +{ oid => '1728', descr => 'modulus', + proname => 'mod', prorettype => 'numeric', proargtypes => 'numeric numeric', + prosrc => 'numeric_mod' }, +{ oid => '1729', + proname => 'numeric_mod', prorettype => 'numeric', + proargtypes => 'numeric numeric', prosrc => 'numeric_mod' }, +{ oid => '1730', descr => 'square root', + proname => 'sqrt', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_sqrt' }, +{ oid => '1731', descr => 'square root', + proname => 'numeric_sqrt', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_sqrt' }, +{ oid => '1732', descr => 'natural exponential (e^x)', + proname => 'exp', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_exp' }, +{ oid => '1733', descr => 'natural exponential (e^x)', + proname => 'numeric_exp', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_exp' }, +{ oid => '1734', descr => 'natural logarithm', + proname => 'ln', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_ln' }, +{ oid => '1735', descr => 'natural logarithm', + proname => 'numeric_ln', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_ln' }, +{ oid => '1736', descr => 'logarithm base m of n', + proname => 'log', prorettype => 'numeric', proargtypes => 'numeric numeric', + prosrc => 'numeric_log' }, +{ oid => '1737', descr => 'logarithm base m of n', + proname => 'numeric_log', prorettype => 'numeric', + proargtypes => 'numeric numeric', prosrc => 'numeric_log' }, +{ oid => '1738', descr => 'exponentiation', + proname => 'pow', prorettype => 'numeric', proargtypes => 'numeric numeric', + prosrc => 'numeric_power' }, +{ oid => '2169', descr => 'exponentiation', + proname => 'power', prorettype => 'numeric', proargtypes => 'numeric numeric', + prosrc => 'numeric_power' }, +{ oid => '1739', + proname => 'numeric_power', prorettype => 'numeric', + proargtypes => 'numeric numeric', prosrc => 'numeric_power' }, +{ oid => '3281', descr => 'number of decimal digits in the fractional part', + proname => 'scale', prorettype => 'int4', proargtypes => 'numeric', + prosrc => 'numeric_scale' }, +{ oid => '1740', descr => 'convert int4 to numeric', + proname => 'numeric', prorettype => 'numeric', proargtypes => 'int4', + prosrc => 'int4_numeric' }, +{ oid => '1741', descr => 'base 10 logarithm', + proname => 'log', prolang => '14', prorettype => 'numeric', + proargtypes => 'numeric', prosrc => 'select pg_catalog.log(10, $1)' }, +{ oid => '1742', descr => 'convert float4 to numeric', + proname => 'numeric', prorettype => 'numeric', proargtypes => 'float4', + prosrc => 'float4_numeric' }, +{ oid => '1743', descr => 'convert float8 to numeric', + proname => 'numeric', prorettype => 'numeric', proargtypes => 'float8', + prosrc => 'float8_numeric' }, +{ oid => '1744', descr => 'convert numeric to int4', + proname => 'int4', prorettype => 'int4', proargtypes => 'numeric', + prosrc => 'numeric_int4' }, +{ oid => '1745', descr => 'convert numeric to float4', + proname => 'float4', prorettype => 'float4', proargtypes => 'numeric', + prosrc => 'numeric_float4' }, +{ oid => '1746', descr => 'convert numeric to float8', + proname => 'float8', prorettype => 'float8', proargtypes => 'numeric', + prosrc => 'numeric_float8' }, +{ oid => '1973', descr => 'trunc(x/y)', + proname => 'div', prorettype => 'numeric', proargtypes => 'numeric numeric', + prosrc => 'numeric_div_trunc' }, +{ oid => '1980', descr => 'trunc(x/y)', + proname => 'numeric_div_trunc', prorettype => 'numeric', + proargtypes => 'numeric numeric', prosrc => 'numeric_div_trunc' }, +{ oid => '2170', descr => 'bucket number of operand in equal-width histogram', + proname => 'width_bucket', prorettype => 'int4', + proargtypes => 'numeric numeric numeric int4', + prosrc => 'width_bucket_numeric' }, + +{ oid => '1747', + proname => 'time_pl_interval', prorettype => 'time', + proargtypes => 'time interval', prosrc => 'time_pl_interval' }, +{ oid => '1748', + proname => 'time_mi_interval', prorettype => 'time', + proargtypes => 'time interval', prosrc => 'time_mi_interval' }, +{ oid => '1749', + proname => 'timetz_pl_interval', prorettype => 'timetz', + proargtypes => 'timetz interval', prosrc => 'timetz_pl_interval' }, +{ oid => '1750', + proname => 'timetz_mi_interval', prorettype => 'timetz', + proargtypes => 'timetz interval', prosrc => 'timetz_mi_interval' }, + +{ oid => '1764', descr => 'increment by one', + proname => 'numeric_inc', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_inc' }, +{ oid => '1766', descr => 'smaller of two', + proname => 'numeric_smaller', prorettype => 'numeric', + proargtypes => 'numeric numeric', prosrc => 'numeric_smaller' }, +{ oid => '1767', descr => 'larger of two', + proname => 'numeric_larger', prorettype => 'numeric', + proargtypes => 'numeric numeric', prosrc => 'numeric_larger' }, +{ oid => '1769', descr => 'less-equal-greater', + proname => 'numeric_cmp', prorettype => 'int4', + proargtypes => 'numeric numeric', prosrc => 'numeric_cmp' }, +{ oid => '3283', descr => 'sort support', + proname => 'numeric_sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'numeric_sortsupport' }, +{ oid => '1771', + proname => 'numeric_uminus', prorettype => 'numeric', + proargtypes => 'numeric', prosrc => 'numeric_uminus' }, +{ oid => '1779', descr => 'convert numeric to int8', + proname => 'int8', prorettype => 'int8', proargtypes => 'numeric', + prosrc => 'numeric_int8' }, +{ oid => '1781', descr => 'convert int8 to numeric', + proname => 'numeric', prorettype => 'numeric', proargtypes => 'int8', + prosrc => 'int8_numeric' }, +{ oid => '1782', descr => 'convert int2 to numeric', + proname => 'numeric', prorettype => 'numeric', proargtypes => 'int2', + prosrc => 'int2_numeric' }, +{ oid => '1783', descr => 'convert numeric to int2', + proname => 'int2', prorettype => 'int2', proargtypes => 'numeric', + prosrc => 'numeric_int2' }, + +{ oid => '3556', descr => 'convert jsonb to boolean', + proname => 'bool', prorettype => 'bool', proargtypes => 'jsonb', + prosrc => 'jsonb_bool' }, +{ oid => '3449', descr => 'convert jsonb to numeric', + proname => 'numeric', prorettype => 'numeric', proargtypes => 'jsonb', + prosrc => 'jsonb_numeric' }, +{ oid => '3450', descr => 'convert jsonb to int2', + proname => 'int2', prorettype => 'int2', proargtypes => 'jsonb', + prosrc => 'jsonb_int2' }, +{ oid => '3451', descr => 'convert jsonb to int4', + proname => 'int4', prorettype => 'int4', proargtypes => 'jsonb', + prosrc => 'jsonb_int4' }, +{ oid => '3452', descr => 'convert jsonb to int8', + proname => 'int8', prorettype => 'int8', proargtypes => 'jsonb', + prosrc => 'jsonb_int8' }, +{ oid => '3453', descr => 'convert jsonb to float4', + proname => 'float4', prorettype => 'float4', proargtypes => 'jsonb', + prosrc => 'jsonb_float4' }, +{ oid => '2580', descr => 'convert jsonb to float8', + proname => 'float8', prorettype => 'float8', proargtypes => 'jsonb', + prosrc => 'jsonb_float8' }, + +# formatting +{ oid => '1770', descr => 'format timestamp with time zone to text', + proname => 'to_char', provolatile => 's', prorettype => 'text', + proargtypes => 'timestamptz text', prosrc => 'timestamptz_to_char' }, +{ oid => '1772', descr => 'format numeric to text', + proname => 'to_char', provolatile => 's', prorettype => 'text', + proargtypes => 'numeric text', prosrc => 'numeric_to_char' }, +{ oid => '1773', descr => 'format int4 to text', + proname => 'to_char', provolatile => 's', prorettype => 'text', + proargtypes => 'int4 text', prosrc => 'int4_to_char' }, +{ oid => '1774', descr => 'format int8 to text', + proname => 'to_char', provolatile => 's', prorettype => 'text', + proargtypes => 'int8 text', prosrc => 'int8_to_char' }, +{ oid => '1775', descr => 'format float4 to text', + proname => 'to_char', provolatile => 's', prorettype => 'text', + proargtypes => 'float4 text', prosrc => 'float4_to_char' }, +{ oid => '1776', descr => 'format float8 to text', + proname => 'to_char', provolatile => 's', prorettype => 'text', + proargtypes => 'float8 text', prosrc => 'float8_to_char' }, +{ oid => '1777', descr => 'convert text to numeric', + proname => 'to_number', provolatile => 's', prorettype => 'numeric', + proargtypes => 'text text', prosrc => 'numeric_to_number' }, +{ oid => '1778', descr => 'convert text to timestamp with time zone', + proname => 'to_timestamp', provolatile => 's', prorettype => 'timestamptz', + proargtypes => 'text text', prosrc => 'to_timestamp' }, +{ oid => '1780', descr => 'convert text to date', + proname => 'to_date', provolatile => 's', prorettype => 'date', + proargtypes => 'text text', prosrc => 'to_date' }, +{ oid => '1768', descr => 'format interval to text', + proname => 'to_char', provolatile => 's', prorettype => 'text', + proargtypes => 'interval text', prosrc => 'interval_to_char' }, + +{ oid => '1282', descr => 'quote an identifier for usage in a querystring', + proname => 'quote_ident', prorettype => 'text', proargtypes => 'text', + prosrc => 'quote_ident' }, +{ oid => '1283', descr => 'quote a literal for usage in a querystring', + proname => 'quote_literal', prorettype => 'text', proargtypes => 'text', + prosrc => 'quote_literal' }, +{ oid => '1285', descr => 'quote a data value for usage in a querystring', + proname => 'quote_literal', prolang => '14', provolatile => 's', + prorettype => 'text', proargtypes => 'anyelement', + prosrc => 'select pg_catalog.quote_literal($1::pg_catalog.text)' }, +{ oid => '1289', + descr => 'quote a possibly-null literal for usage in a querystring', + proname => 'quote_nullable', proisstrict => 'f', prorettype => 'text', + proargtypes => 'text', prosrc => 'quote_nullable' }, +{ oid => '1290', + descr => 'quote a possibly-null data value for usage in a querystring', + proname => 'quote_nullable', prolang => '14', proisstrict => 'f', + provolatile => 's', prorettype => 'text', proargtypes => 'anyelement', + prosrc => 'select pg_catalog.quote_nullable($1::pg_catalog.text)' }, + +{ oid => '1798', descr => 'I/O', + proname => 'oidin', prorettype => 'oid', proargtypes => 'cstring', + prosrc => 'oidin' }, +{ oid => '1799', descr => 'I/O', + proname => 'oidout', prorettype => 'cstring', proargtypes => 'oid', + prosrc => 'oidout' }, + +{ oid => '3058', descr => 'concatenate values', + proname => 'concat', provariadic => 'any', proisstrict => 'f', + provolatile => 's', prorettype => 'text', proargtypes => 'any', + proallargtypes => '{any}', proargmodes => '{v}', prosrc => 'text_concat' }, +{ oid => '3059', descr => 'concatenate values with separators', + proname => 'concat_ws', provariadic => 'any', proisstrict => 'f', + provolatile => 's', prorettype => 'text', proargtypes => 'text any', + proallargtypes => '{text,any}', proargmodes => '{i,v}', + prosrc => 'text_concat_ws' }, +{ oid => '3060', descr => 'extract the first n characters', + proname => 'left', prorettype => 'text', proargtypes => 'text int4', + prosrc => 'text_left' }, +{ oid => '3061', descr => 'extract the last n characters', + proname => 'right', prorettype => 'text', proargtypes => 'text int4', + prosrc => 'text_right' }, +{ oid => '3062', descr => 'reverse text', + proname => 'reverse', prorettype => 'text', proargtypes => 'text', + prosrc => 'text_reverse' }, +{ oid => '3539', descr => 'format text message', + proname => 'format', provariadic => 'any', proisstrict => 'f', + provolatile => 's', prorettype => 'text', proargtypes => 'text any', + proallargtypes => '{text,any}', proargmodes => '{i,v}', + prosrc => 'text_format' }, +{ oid => '3540', descr => 'format text message', + proname => 'format', proisstrict => 'f', provolatile => 's', + prorettype => 'text', proargtypes => 'text', prosrc => 'text_format_nv' }, + +{ oid => '1810', descr => 'length in bits', + proname => 'bit_length', prolang => '14', prorettype => 'int4', + proargtypes => 'bytea', prosrc => 'select pg_catalog.octet_length($1) * 8' }, +{ oid => '1811', descr => 'length in bits', + proname => 'bit_length', prolang => '14', prorettype => 'int4', + proargtypes => 'text', prosrc => 'select pg_catalog.octet_length($1) * 8' }, +{ oid => '1812', descr => 'length in bits', + proname => 'bit_length', prolang => '14', prorettype => 'int4', + proargtypes => 'bit', prosrc => 'select pg_catalog.length($1)' }, + +# Selectivity estimators for LIKE and related operators +{ oid => '1814', descr => 'restriction selectivity of ILIKE', + proname => 'iclikesel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'iclikesel' }, +{ oid => '1815', descr => 'restriction selectivity of NOT ILIKE', + proname => 'icnlikesel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'icnlikesel' }, +{ oid => '1816', descr => 'join selectivity of ILIKE', + proname => 'iclikejoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'iclikejoinsel' }, +{ oid => '1817', descr => 'join selectivity of NOT ILIKE', + proname => 'icnlikejoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'icnlikejoinsel' }, +{ oid => '1818', descr => 'restriction selectivity of regex match', + proname => 'regexeqsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'regexeqsel' }, +{ oid => '1819', descr => 'restriction selectivity of LIKE', + proname => 'likesel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'likesel' }, +{ oid => '1820', + descr => 'restriction selectivity of case-insensitive regex match', + proname => 'icregexeqsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'icregexeqsel' }, +{ oid => '1821', descr => 'restriction selectivity of regex non-match', + proname => 'regexnesel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'regexnesel' }, +{ oid => '1822', descr => 'restriction selectivity of NOT LIKE', + proname => 'nlikesel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'nlikesel' }, +{ oid => '1823', + descr => 'restriction selectivity of case-insensitive regex non-match', + proname => 'icregexnesel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'icregexnesel' }, +{ oid => '1824', descr => 'join selectivity of regex match', + proname => 'regexeqjoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'regexeqjoinsel' }, +{ oid => '1825', descr => 'join selectivity of LIKE', + proname => 'likejoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'likejoinsel' }, +{ oid => '1826', descr => 'join selectivity of case-insensitive regex match', + proname => 'icregexeqjoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'icregexeqjoinsel' }, +{ oid => '1827', descr => 'join selectivity of regex non-match', + proname => 'regexnejoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'regexnejoinsel' }, +{ oid => '1828', descr => 'join selectivity of NOT LIKE', + proname => 'nlikejoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'nlikejoinsel' }, +{ oid => '1829', + descr => 'join selectivity of case-insensitive regex non-match', + proname => 'icregexnejoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'icregexnejoinsel' }, +{ oid => '3437', descr => 'restriction selectivity of exact prefix', + proname => 'prefixsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'prefixsel' }, +{ oid => '3438', descr => 'join selectivity of exact prefix', + proname => 'prefixjoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'prefixjoinsel' }, + +# Aggregate-related functions +{ oid => '1830', descr => 'aggregate final function', + proname => 'float8_avg', prorettype => 'float8', proargtypes => '_float8', + prosrc => 'float8_avg' }, +{ oid => '2512', descr => 'aggregate final function', + proname => 'float8_var_pop', prorettype => 'float8', proargtypes => '_float8', + prosrc => 'float8_var_pop' }, +{ oid => '1831', descr => 'aggregate final function', + proname => 'float8_var_samp', prorettype => 'float8', + proargtypes => '_float8', prosrc => 'float8_var_samp' }, +{ oid => '2513', descr => 'aggregate final function', + proname => 'float8_stddev_pop', prorettype => 'float8', + proargtypes => '_float8', prosrc => 'float8_stddev_pop' }, +{ oid => '1832', descr => 'aggregate final function', + proname => 'float8_stddev_samp', prorettype => 'float8', + proargtypes => '_float8', prosrc => 'float8_stddev_samp' }, +{ oid => '1833', descr => 'aggregate transition function', + proname => 'numeric_accum', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal numeric', prosrc => 'numeric_accum' }, +{ oid => '3341', descr => 'aggregate combine function', + proname => 'numeric_combine', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal internal', prosrc => 'numeric_combine' }, +{ oid => '2858', descr => 'aggregate transition function', + proname => 'numeric_avg_accum', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal numeric', prosrc => 'numeric_avg_accum' }, +{ oid => '3337', descr => 'aggregate combine function', + proname => 'numeric_avg_combine', proisstrict => 'f', + prorettype => 'internal', proargtypes => 'internal internal', + prosrc => 'numeric_avg_combine' }, +{ oid => '2740', descr => 'aggregate serial function', + proname => 'numeric_avg_serialize', prorettype => 'bytea', + proargtypes => 'internal', prosrc => 'numeric_avg_serialize' }, +{ oid => '2741', descr => 'aggregate deserial function', + proname => 'numeric_avg_deserialize', prorettype => 'internal', + proargtypes => 'bytea internal', prosrc => 'numeric_avg_deserialize' }, +{ oid => '3335', descr => 'aggregate serial function', + proname => 'numeric_serialize', prorettype => 'bytea', + proargtypes => 'internal', prosrc => 'numeric_serialize' }, +{ oid => '3336', descr => 'aggregate deserial function', + proname => 'numeric_deserialize', prorettype => 'internal', + proargtypes => 'bytea internal', prosrc => 'numeric_deserialize' }, +{ oid => '3548', descr => 'aggregate transition function', + proname => 'numeric_accum_inv', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal numeric', prosrc => 'numeric_accum_inv' }, +{ oid => '1834', descr => 'aggregate transition function', + proname => 'int2_accum', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal int2', prosrc => 'int2_accum' }, +{ oid => '1835', descr => 'aggregate transition function', + proname => 'int4_accum', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal int4', prosrc => 'int4_accum' }, +{ oid => '1836', descr => 'aggregate transition function', + proname => 'int8_accum', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal int8', prosrc => 'int8_accum' }, +{ oid => '3338', descr => 'aggregate combine function', + proname => 'numeric_poly_combine', proisstrict => 'f', + prorettype => 'internal', proargtypes => 'internal internal', + prosrc => 'numeric_poly_combine' }, +{ oid => '3339', descr => 'aggregate serial function', + proname => 'numeric_poly_serialize', prorettype => 'bytea', + proargtypes => 'internal', prosrc => 'numeric_poly_serialize' }, +{ oid => '3340', descr => 'aggregate deserial function', + proname => 'numeric_poly_deserialize', prorettype => 'internal', + proargtypes => 'bytea internal', prosrc => 'numeric_poly_deserialize' }, +{ oid => '2746', descr => 'aggregate transition function', + proname => 'int8_avg_accum', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal int8', prosrc => 'int8_avg_accum' }, +{ oid => '3567', descr => 'aggregate transition function', + proname => 'int2_accum_inv', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal int2', prosrc => 'int2_accum_inv' }, +{ oid => '3568', descr => 'aggregate transition function', + proname => 'int4_accum_inv', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal int4', prosrc => 'int4_accum_inv' }, +{ oid => '3569', descr => 'aggregate transition function', + proname => 'int8_accum_inv', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal int8', prosrc => 'int8_accum_inv' }, +{ oid => '3387', descr => 'aggregate transition function', + proname => 'int8_avg_accum_inv', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal int8', prosrc => 'int8_avg_accum_inv' }, +{ oid => '2785', descr => 'aggregate combine function', + proname => 'int8_avg_combine', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal internal', prosrc => 'int8_avg_combine' }, +{ oid => '2786', descr => 'aggregate serial function', + proname => 'int8_avg_serialize', prorettype => 'bytea', + proargtypes => 'internal', prosrc => 'int8_avg_serialize' }, +{ oid => '2787', descr => 'aggregate deserial function', + proname => 'int8_avg_deserialize', prorettype => 'internal', + proargtypes => 'bytea internal', prosrc => 'int8_avg_deserialize' }, +{ oid => '3324', descr => 'aggregate combine function', + proname => 'int4_avg_combine', prorettype => '_int8', + proargtypes => '_int8 _int8', prosrc => 'int4_avg_combine' }, +{ oid => '3178', descr => 'aggregate final function', + proname => 'numeric_sum', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'internal', prosrc => 'numeric_sum' }, +{ oid => '1837', descr => 'aggregate final function', + proname => 'numeric_avg', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'internal', prosrc => 'numeric_avg' }, +{ oid => '2514', descr => 'aggregate final function', + proname => 'numeric_var_pop', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'internal', prosrc => 'numeric_var_pop' }, +{ oid => '1838', descr => 'aggregate final function', + proname => 'numeric_var_samp', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'internal', prosrc => 'numeric_var_samp' }, +{ oid => '2596', descr => 'aggregate final function', + proname => 'numeric_stddev_pop', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'internal', prosrc => 'numeric_stddev_pop' }, +{ oid => '1839', descr => 'aggregate final function', + proname => 'numeric_stddev_samp', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'internal', prosrc => 'numeric_stddev_samp' }, +{ oid => '1840', descr => 'aggregate transition function', + proname => 'int2_sum', proisstrict => 'f', prorettype => 'int8', + proargtypes => 'int8 int2', prosrc => 'int2_sum' }, +{ oid => '1841', descr => 'aggregate transition function', + proname => 'int4_sum', proisstrict => 'f', prorettype => 'int8', + proargtypes => 'int8 int4', prosrc => 'int4_sum' }, +{ oid => '1842', descr => 'aggregate transition function', + proname => 'int8_sum', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'numeric int8', prosrc => 'int8_sum' }, +{ oid => '3388', descr => 'aggregate final function', + proname => 'numeric_poly_sum', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'internal', prosrc => 'numeric_poly_sum' }, +{ oid => '3389', descr => 'aggregate final function', + proname => 'numeric_poly_avg', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'internal', prosrc => 'numeric_poly_avg' }, +{ oid => '3390', descr => 'aggregate final function', + proname => 'numeric_poly_var_pop', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'internal', + prosrc => 'numeric_poly_var_pop' }, +{ oid => '3391', descr => 'aggregate final function', + proname => 'numeric_poly_var_samp', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'internal', + prosrc => 'numeric_poly_var_samp' }, +{ oid => '3392', descr => 'aggregate final function', + proname => 'numeric_poly_stddev_pop', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'internal', + prosrc => 'numeric_poly_stddev_pop' }, +{ oid => '3393', descr => 'aggregate final function', + proname => 'numeric_poly_stddev_samp', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'internal', + prosrc => 'numeric_poly_stddev_samp' }, + +{ oid => '1843', descr => 'aggregate transition function', + proname => 'interval_accum', prorettype => '_interval', + proargtypes => '_interval interval', prosrc => 'interval_accum' }, +{ oid => '3325', descr => 'aggregate combine function', + proname => 'interval_combine', prorettype => '_interval', + proargtypes => '_interval _interval', prosrc => 'interval_combine' }, +{ oid => '3549', descr => 'aggregate transition function', + proname => 'interval_accum_inv', prorettype => '_interval', + proargtypes => '_interval interval', prosrc => 'interval_accum_inv' }, +{ oid => '1844', descr => 'aggregate final function', + proname => 'interval_avg', prorettype => 'interval', + proargtypes => '_interval', prosrc => 'interval_avg' }, +{ oid => '1962', descr => 'aggregate transition function', + proname => 'int2_avg_accum', prorettype => '_int8', + proargtypes => '_int8 int2', prosrc => 'int2_avg_accum' }, +{ oid => '1963', descr => 'aggregate transition function', + proname => 'int4_avg_accum', prorettype => '_int8', + proargtypes => '_int8 int4', prosrc => 'int4_avg_accum' }, +{ oid => '3570', descr => 'aggregate transition function', + proname => 'int2_avg_accum_inv', prorettype => '_int8', + proargtypes => '_int8 int2', prosrc => 'int2_avg_accum_inv' }, +{ oid => '3571', descr => 'aggregate transition function', + proname => 'int4_avg_accum_inv', prorettype => '_int8', + proargtypes => '_int8 int4', prosrc => 'int4_avg_accum_inv' }, +{ oid => '1964', descr => 'aggregate final function', + proname => 'int8_avg', prorettype => 'numeric', proargtypes => '_int8', + prosrc => 'int8_avg' }, +{ oid => '3572', descr => 'aggregate final function', + proname => 'int2int4_sum', prorettype => 'int8', proargtypes => '_int8', + prosrc => 'int2int4_sum' }, +{ oid => '2805', descr => 'aggregate transition function', + proname => 'int8inc_float8_float8', prorettype => 'int8', + proargtypes => 'int8 float8 float8', prosrc => 'int8inc_float8_float8' }, +{ oid => '2806', descr => 'aggregate transition function', + proname => 'float8_regr_accum', prorettype => '_float8', + proargtypes => '_float8 float8 float8', prosrc => 'float8_regr_accum' }, +{ oid => '3342', descr => 'aggregate combine function', + proname => 'float8_regr_combine', prorettype => '_float8', + proargtypes => '_float8 _float8', prosrc => 'float8_regr_combine' }, +{ oid => '2807', descr => 'aggregate final function', + proname => 'float8_regr_sxx', prorettype => 'float8', + proargtypes => '_float8', prosrc => 'float8_regr_sxx' }, +{ oid => '2808', descr => 'aggregate final function', + proname => 'float8_regr_syy', prorettype => 'float8', + proargtypes => '_float8', prosrc => 'float8_regr_syy' }, +{ oid => '2809', descr => 'aggregate final function', + proname => 'float8_regr_sxy', prorettype => 'float8', + proargtypes => '_float8', prosrc => 'float8_regr_sxy' }, +{ oid => '2810', descr => 'aggregate final function', + proname => 'float8_regr_avgx', prorettype => 'float8', + proargtypes => '_float8', prosrc => 'float8_regr_avgx' }, +{ oid => '2811', descr => 'aggregate final function', + proname => 'float8_regr_avgy', prorettype => 'float8', + proargtypes => '_float8', prosrc => 'float8_regr_avgy' }, +{ oid => '2812', descr => 'aggregate final function', + proname => 'float8_regr_r2', prorettype => 'float8', proargtypes => '_float8', + prosrc => 'float8_regr_r2' }, +{ oid => '2813', descr => 'aggregate final function', + proname => 'float8_regr_slope', prorettype => 'float8', + proargtypes => '_float8', prosrc => 'float8_regr_slope' }, +{ oid => '2814', descr => 'aggregate final function', + proname => 'float8_regr_intercept', prorettype => 'float8', + proargtypes => '_float8', prosrc => 'float8_regr_intercept' }, +{ oid => '2815', descr => 'aggregate final function', + proname => 'float8_covar_pop', prorettype => 'float8', + proargtypes => '_float8', prosrc => 'float8_covar_pop' }, +{ oid => '2816', descr => 'aggregate final function', + proname => 'float8_covar_samp', prorettype => 'float8', + proargtypes => '_float8', prosrc => 'float8_covar_samp' }, +{ oid => '2817', descr => 'aggregate final function', + proname => 'float8_corr', prorettype => 'float8', proargtypes => '_float8', + prosrc => 'float8_corr' }, + +{ oid => '3535', descr => 'aggregate transition function', + proname => 'string_agg_transfn', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal text text', prosrc => 'string_agg_transfn' }, +{ oid => '3536', descr => 'aggregate final function', + proname => 'string_agg_finalfn', proisstrict => 'f', prorettype => 'text', + proargtypes => 'internal', prosrc => 'string_agg_finalfn' }, +{ oid => '3538', descr => 'concatenate aggregate input into a string', + proname => 'string_agg', prokind => 'a', proisstrict => 'f', + prorettype => 'text', proargtypes => 'text text', + prosrc => 'aggregate_dummy' }, +{ oid => '3543', descr => 'aggregate transition function', + proname => 'bytea_string_agg_transfn', proisstrict => 'f', + prorettype => 'internal', proargtypes => 'internal bytea bytea', + prosrc => 'bytea_string_agg_transfn' }, +{ oid => '3544', descr => 'aggregate final function', + proname => 'bytea_string_agg_finalfn', proisstrict => 'f', + prorettype => 'bytea', proargtypes => 'internal', + prosrc => 'bytea_string_agg_finalfn' }, +{ oid => '3545', descr => 'concatenate aggregate input into a bytea', + proname => 'string_agg', prokind => 'a', proisstrict => 'f', + prorettype => 'bytea', proargtypes => 'bytea bytea', + prosrc => 'aggregate_dummy' }, + +# To ASCII conversion +{ oid => '1845', descr => 'encode text from DB encoding to ASCII text', + proname => 'to_ascii', prorettype => 'text', proargtypes => 'text', + prosrc => 'to_ascii_default' }, +{ oid => '1846', descr => 'encode text from encoding to ASCII text', + proname => 'to_ascii', prorettype => 'text', proargtypes => 'text int4', + prosrc => 'to_ascii_enc' }, +{ oid => '1847', descr => 'encode text from encoding to ASCII text', + proname => 'to_ascii', prorettype => 'text', proargtypes => 'text name', + prosrc => 'to_ascii_encname' }, + +{ oid => '1848', + proname => 'interval_pl_time', prolang => '14', prorettype => 'time', + proargtypes => 'interval time', prosrc => 'select $2 + $1' }, + +{ oid => '1850', + proname => 'int28eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int8', prosrc => 'int28eq' }, +{ oid => '1851', + proname => 'int28ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int8', prosrc => 'int28ne' }, +{ oid => '1852', + proname => 'int28lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int8', prosrc => 'int28lt' }, +{ oid => '1853', + proname => 'int28gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int8', prosrc => 'int28gt' }, +{ oid => '1854', + proname => 'int28le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int8', prosrc => 'int28le' }, +{ oid => '1855', + proname => 'int28ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int2 int8', prosrc => 'int28ge' }, + +{ oid => '1856', + proname => 'int82eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int2', prosrc => 'int82eq' }, +{ oid => '1857', + proname => 'int82ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int2', prosrc => 'int82ne' }, +{ oid => '1858', + proname => 'int82lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int2', prosrc => 'int82lt' }, +{ oid => '1859', + proname => 'int82gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int2', prosrc => 'int82gt' }, +{ oid => '1860', + proname => 'int82le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int2', prosrc => 'int82le' }, +{ oid => '1861', + proname => 'int82ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'int8 int2', prosrc => 'int82ge' }, + +{ oid => '1892', + proname => 'int2and', prorettype => 'int2', proargtypes => 'int2 int2', + prosrc => 'int2and' }, +{ oid => '1893', + proname => 'int2or', prorettype => 'int2', proargtypes => 'int2 int2', + prosrc => 'int2or' }, +{ oid => '1894', + proname => 'int2xor', prorettype => 'int2', proargtypes => 'int2 int2', + prosrc => 'int2xor' }, +{ oid => '1895', + proname => 'int2not', prorettype => 'int2', proargtypes => 'int2', + prosrc => 'int2not' }, +{ oid => '1896', + proname => 'int2shl', prorettype => 'int2', proargtypes => 'int2 int4', + prosrc => 'int2shl' }, +{ oid => '1897', + proname => 'int2shr', prorettype => 'int2', proargtypes => 'int2 int4', + prosrc => 'int2shr' }, + +{ oid => '1898', + proname => 'int4and', prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'int4and' }, +{ oid => '1899', + proname => 'int4or', prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'int4or' }, +{ oid => '1900', + proname => 'int4xor', prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'int4xor' }, +{ oid => '1901', + proname => 'int4not', prorettype => 'int4', proargtypes => 'int4', + prosrc => 'int4not' }, +{ oid => '1902', + proname => 'int4shl', prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'int4shl' }, +{ oid => '1903', + proname => 'int4shr', prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'int4shr' }, + +{ oid => '1904', + proname => 'int8and', prorettype => 'int8', proargtypes => 'int8 int8', + prosrc => 'int8and' }, +{ oid => '1905', + proname => 'int8or', prorettype => 'int8', proargtypes => 'int8 int8', + prosrc => 'int8or' }, +{ oid => '1906', + proname => 'int8xor', prorettype => 'int8', proargtypes => 'int8 int8', + prosrc => 'int8xor' }, +{ oid => '1907', + proname => 'int8not', prorettype => 'int8', proargtypes => 'int8', + prosrc => 'int8not' }, +{ oid => '1908', + proname => 'int8shl', prorettype => 'int8', proargtypes => 'int8 int4', + prosrc => 'int8shl' }, +{ oid => '1909', + proname => 'int8shr', prorettype => 'int8', proargtypes => 'int8 int4', + prosrc => 'int8shr' }, + +{ oid => '1910', + proname => 'int8up', prorettype => 'int8', proargtypes => 'int8', + prosrc => 'int8up' }, +{ oid => '1911', + proname => 'int2up', prorettype => 'int2', proargtypes => 'int2', + prosrc => 'int2up' }, +{ oid => '1912', + proname => 'int4up', prorettype => 'int4', proargtypes => 'int4', + prosrc => 'int4up' }, +{ oid => '1913', + proname => 'float4up', prorettype => 'float4', proargtypes => 'float4', + prosrc => 'float4up' }, +{ oid => '1914', + proname => 'float8up', prorettype => 'float8', proargtypes => 'float8', + prosrc => 'float8up' }, +{ oid => '1915', + proname => 'numeric_uplus', prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'numeric_uplus' }, + +{ oid => '1922', descr => 'user privilege on relation by username, rel name', + proname => 'has_table_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name text text', prosrc => 'has_table_privilege_name_name' }, +{ oid => '1923', descr => 'user privilege on relation by username, rel oid', + proname => 'has_table_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name oid text', prosrc => 'has_table_privilege_name_id' }, +{ oid => '1924', descr => 'user privilege on relation by user oid, rel name', + proname => 'has_table_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text text', prosrc => 'has_table_privilege_id_name' }, +{ oid => '1925', descr => 'user privilege on relation by user oid, rel oid', + proname => 'has_table_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid oid text', prosrc => 'has_table_privilege_id_id' }, +{ oid => '1926', descr => 'current user privilege on relation by rel name', + proname => 'has_table_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'has_table_privilege_name' }, +{ oid => '1927', descr => 'current user privilege on relation by rel oid', + proname => 'has_table_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text', prosrc => 'has_table_privilege_id' }, + +{ oid => '2181', descr => 'user privilege on sequence by username, seq name', + proname => 'has_sequence_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name text text', + prosrc => 'has_sequence_privilege_name_name' }, +{ oid => '2182', descr => 'user privilege on sequence by username, seq oid', + proname => 'has_sequence_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name oid text', prosrc => 'has_sequence_privilege_name_id' }, +{ oid => '2183', descr => 'user privilege on sequence by user oid, seq name', + proname => 'has_sequence_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text text', prosrc => 'has_sequence_privilege_id_name' }, +{ oid => '2184', descr => 'user privilege on sequence by user oid, seq oid', + proname => 'has_sequence_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid oid text', prosrc => 'has_sequence_privilege_id_id' }, +{ oid => '2185', descr => 'current user privilege on sequence by seq name', + proname => 'has_sequence_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'has_sequence_privilege_name' }, +{ oid => '2186', descr => 'current user privilege on sequence by seq oid', + proname => 'has_sequence_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text', prosrc => 'has_sequence_privilege_id' }, + +{ oid => '3012', + descr => 'user privilege on column by username, rel name, col name', + proname => 'has_column_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name text text text', + prosrc => 'has_column_privilege_name_name_name' }, +{ oid => '3013', + descr => 'user privilege on column by username, rel name, col attnum', + proname => 'has_column_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name text int2 text', + prosrc => 'has_column_privilege_name_name_attnum' }, +{ oid => '3014', + descr => 'user privilege on column by username, rel oid, col name', + proname => 'has_column_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name oid text text', + prosrc => 'has_column_privilege_name_id_name' }, +{ oid => '3015', + descr => 'user privilege on column by username, rel oid, col attnum', + proname => 'has_column_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name oid int2 text', + prosrc => 'has_column_privilege_name_id_attnum' }, +{ oid => '3016', + descr => 'user privilege on column by user oid, rel name, col name', + proname => 'has_column_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text text text', + prosrc => 'has_column_privilege_id_name_name' }, +{ oid => '3017', + descr => 'user privilege on column by user oid, rel name, col attnum', + proname => 'has_column_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text int2 text', + prosrc => 'has_column_privilege_id_name_attnum' }, +{ oid => '3018', + descr => 'user privilege on column by user oid, rel oid, col name', + proname => 'has_column_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid oid text text', + prosrc => 'has_column_privilege_id_id_name' }, +{ oid => '3019', + descr => 'user privilege on column by user oid, rel oid, col attnum', + proname => 'has_column_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid oid int2 text', + prosrc => 'has_column_privilege_id_id_attnum' }, +{ oid => '3020', + descr => 'current user privilege on column by rel name, col name', + proname => 'has_column_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'text text text', prosrc => 'has_column_privilege_name_name' }, +{ oid => '3021', + descr => 'current user privilege on column by rel name, col attnum', + proname => 'has_column_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'text int2 text', + prosrc => 'has_column_privilege_name_attnum' }, +{ oid => '3022', + descr => 'current user privilege on column by rel oid, col name', + proname => 'has_column_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text text', prosrc => 'has_column_privilege_id_name' }, +{ oid => '3023', + descr => 'current user privilege on column by rel oid, col attnum', + proname => 'has_column_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid int2 text', prosrc => 'has_column_privilege_id_attnum' }, + +{ oid => '3024', + descr => 'user privilege on any column by username, rel name', + proname => 'has_any_column_privilege', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'name text text', + prosrc => 'has_any_column_privilege_name_name' }, +{ oid => '3025', descr => 'user privilege on any column by username, rel oid', + proname => 'has_any_column_privilege', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'name oid text', + prosrc => 'has_any_column_privilege_name_id' }, +{ oid => '3026', + descr => 'user privilege on any column by user oid, rel name', + proname => 'has_any_column_privilege', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid text text', + prosrc => 'has_any_column_privilege_id_name' }, +{ oid => '3027', descr => 'user privilege on any column by user oid, rel oid', + proname => 'has_any_column_privilege', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid oid text', + prosrc => 'has_any_column_privilege_id_id' }, +{ oid => '3028', descr => 'current user privilege on any column by rel name', + proname => 'has_any_column_privilege', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'text text', + prosrc => 'has_any_column_privilege_name' }, +{ oid => '3029', descr => 'current user privilege on any column by rel oid', + proname => 'has_any_column_privilege', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid text', + prosrc => 'has_any_column_privilege_id' }, + +{ oid => '3355', descr => 'I/O', + proname => 'pg_ndistinct_in', prorettype => 'pg_ndistinct', + proargtypes => 'cstring', prosrc => 'pg_ndistinct_in' }, +{ oid => '3356', descr => 'I/O', + proname => 'pg_ndistinct_out', prorettype => 'cstring', + proargtypes => 'pg_ndistinct', prosrc => 'pg_ndistinct_out' }, +{ oid => '3357', descr => 'I/O', + proname => 'pg_ndistinct_recv', provolatile => 's', + prorettype => 'pg_ndistinct', proargtypes => 'internal', + prosrc => 'pg_ndistinct_recv' }, +{ oid => '3358', descr => 'I/O', + proname => 'pg_ndistinct_send', provolatile => 's', prorettype => 'bytea', + proargtypes => 'pg_ndistinct', prosrc => 'pg_ndistinct_send' }, + +{ oid => '3404', descr => 'I/O', + proname => 'pg_dependencies_in', prorettype => 'pg_dependencies', + proargtypes => 'cstring', prosrc => 'pg_dependencies_in' }, +{ oid => '3405', descr => 'I/O', + proname => 'pg_dependencies_out', prorettype => 'cstring', + proargtypes => 'pg_dependencies', prosrc => 'pg_dependencies_out' }, +{ oid => '3406', descr => 'I/O', + proname => 'pg_dependencies_recv', provolatile => 's', + prorettype => 'pg_dependencies', proargtypes => 'internal', + prosrc => 'pg_dependencies_recv' }, +{ oid => '3407', descr => 'I/O', + proname => 'pg_dependencies_send', provolatile => 's', prorettype => 'bytea', + proargtypes => 'pg_dependencies', prosrc => 'pg_dependencies_send' }, + +{ oid => '1928', descr => 'statistics: number of scans done for table/index', + proname => 'pg_stat_get_numscans', provolatile => 's', proparallel => 'r', + prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_numscans' }, +{ oid => '1929', descr => 'statistics: number of tuples read by seqscan', + proname => 'pg_stat_get_tuples_returned', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_tuples_returned' }, +{ oid => '1930', descr => 'statistics: number of tuples fetched by idxscan', + proname => 'pg_stat_get_tuples_fetched', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_tuples_fetched' }, +{ oid => '1931', descr => 'statistics: number of tuples inserted', + proname => 'pg_stat_get_tuples_inserted', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_tuples_inserted' }, +{ oid => '1932', descr => 'statistics: number of tuples updated', + proname => 'pg_stat_get_tuples_updated', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_tuples_updated' }, +{ oid => '1933', descr => 'statistics: number of tuples deleted', + proname => 'pg_stat_get_tuples_deleted', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_tuples_deleted' }, +{ oid => '1972', descr => 'statistics: number of tuples hot updated', + proname => 'pg_stat_get_tuples_hot_updated', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_tuples_hot_updated' }, +{ oid => '2878', descr => 'statistics: number of live tuples', + proname => 'pg_stat_get_live_tuples', provolatile => 's', proparallel => 'r', + prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_live_tuples' }, +{ oid => '2879', descr => 'statistics: number of dead tuples', + proname => 'pg_stat_get_dead_tuples', provolatile => 's', proparallel => 'r', + prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_dead_tuples' }, +{ oid => '3177', + descr => 'statistics: number of tuples changed since last analyze', + proname => 'pg_stat_get_mod_since_analyze', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_mod_since_analyze' }, +{ oid => '1934', descr => 'statistics: number of blocks fetched', + proname => 'pg_stat_get_blocks_fetched', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_blocks_fetched' }, +{ oid => '1935', descr => 'statistics: number of blocks found in cache', + proname => 'pg_stat_get_blocks_hit', provolatile => 's', proparallel => 'r', + prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_blocks_hit' }, +{ oid => '2781', descr => 'statistics: last manual vacuum time for a table', + proname => 'pg_stat_get_last_vacuum_time', provolatile => 's', + proparallel => 'r', prorettype => 'timestamptz', proargtypes => 'oid', + prosrc => 'pg_stat_get_last_vacuum_time' }, +{ oid => '2782', descr => 'statistics: last auto vacuum time for a table', + proname => 'pg_stat_get_last_autovacuum_time', provolatile => 's', + proparallel => 'r', prorettype => 'timestamptz', proargtypes => 'oid', + prosrc => 'pg_stat_get_last_autovacuum_time' }, +{ oid => '2783', descr => 'statistics: last manual analyze time for a table', + proname => 'pg_stat_get_last_analyze_time', provolatile => 's', + proparallel => 'r', prorettype => 'timestamptz', proargtypes => 'oid', + prosrc => 'pg_stat_get_last_analyze_time' }, +{ oid => '2784', descr => 'statistics: last auto analyze time for a table', + proname => 'pg_stat_get_last_autoanalyze_time', provolatile => 's', + proparallel => 'r', prorettype => 'timestamptz', proargtypes => 'oid', + prosrc => 'pg_stat_get_last_autoanalyze_time' }, +{ oid => '3054', descr => 'statistics: number of manual vacuums for a table', + proname => 'pg_stat_get_vacuum_count', provolatile => 's', proparallel => 'r', + prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_vacuum_count' }, +{ oid => '3055', descr => 'statistics: number of auto vacuums for a table', + proname => 'pg_stat_get_autovacuum_count', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_autovacuum_count' }, +{ oid => '3056', descr => 'statistics: number of manual analyzes for a table', + proname => 'pg_stat_get_analyze_count', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_analyze_count' }, +{ oid => '3057', descr => 'statistics: number of auto analyzes for a table', + proname => 'pg_stat_get_autoanalyze_count', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_autoanalyze_count' }, +{ oid => '1936', descr => 'statistics: currently active backend IDs', + proname => 'pg_stat_get_backend_idset', prorows => '100', proretset => 't', + provolatile => 's', proparallel => 'r', prorettype => 'int4', + proargtypes => '', prosrc => 'pg_stat_get_backend_idset' }, +{ oid => '2022', + descr => 'statistics: information about currently active backends', + proname => 'pg_stat_get_activity', prorows => '100', proisstrict => 'f', + proretset => 't', provolatile => 's', proparallel => 'r', + prorettype => 'record', proargtypes => 'int4', + proallargtypes => '{int4,oid,int4,oid,text,text,text,text,text,timestamptz,timestamptz,timestamptz,timestamptz,inet,text,int4,xid,xid,text,bool,text,text,int4,bool,text}', + proargmodes => '{i,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{pid,datid,pid,usesysid,application_name,state,query,wait_event_type,wait_event,xact_start,query_start,backend_start,state_change,client_addr,client_hostname,client_port,backend_xid,backend_xmin,backend_type,ssl,sslversion,sslcipher,sslbits,sslcompression,sslclientdn}', + prosrc => 'pg_stat_get_activity' }, +{ oid => '3318', + descr => 'statistics: information about progress of backends running maintenance command', + proname => 'pg_stat_get_progress_info', prorows => '100', proretset => 't', + provolatile => 's', proparallel => 'r', prorettype => 'record', + proargtypes => 'text', + proallargtypes => '{text,int4,oid,oid,int8,int8,int8,int8,int8,int8,int8,int8,int8,int8}', + proargmodes => '{i,o,o,o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{cmdtype,pid,datid,relid,param1,param2,param3,param4,param5,param6,param7,param8,param9,param10}', + prosrc => 'pg_stat_get_progress_info' }, +{ oid => '3099', + descr => 'statistics: information about currently active replication', + proname => 'pg_stat_get_wal_senders', prorows => '10', proisstrict => 'f', + proretset => 't', provolatile => 's', proparallel => 'r', + prorettype => 'record', proargtypes => '', + proallargtypes => '{int4,text,pg_lsn,pg_lsn,pg_lsn,pg_lsn,interval,interval,interval,int4,text}', + proargmodes => '{o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{pid,state,sent_lsn,write_lsn,flush_lsn,replay_lsn,write_lag,flush_lag,replay_lag,sync_priority,sync_state}', + prosrc => 'pg_stat_get_wal_senders' }, +{ oid => '3317', descr => 'statistics: information about WAL receiver', + proname => 'pg_stat_get_wal_receiver', proisstrict => 'f', provolatile => 's', + proparallel => 'r', prorettype => 'record', proargtypes => '', + proallargtypes => '{int4,text,pg_lsn,int4,pg_lsn,int4,timestamptz,timestamptz,pg_lsn,timestamptz,text,text,int4,text}', + proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{pid,status,receive_start_lsn,receive_start_tli,received_lsn,received_tli,last_msg_send_time,last_msg_receipt_time,latest_end_lsn,latest_end_time,slot_name,sender_host,sender_port,conninfo}', + prosrc => 'pg_stat_get_wal_receiver' }, +{ oid => '6118', descr => 'statistics: information about subscription', + proname => 'pg_stat_get_subscription', proisstrict => 'f', provolatile => 's', + proparallel => 'r', prorettype => 'record', proargtypes => 'oid', + proallargtypes => '{oid,oid,oid,int4,pg_lsn,timestamptz,timestamptz,pg_lsn,timestamptz}', + proargmodes => '{i,o,o,o,o,o,o,o,o}', + proargnames => '{subid,subid,relid,pid,received_lsn,last_msg_send_time,last_msg_receipt_time,latest_end_lsn,latest_end_time}', + prosrc => 'pg_stat_get_subscription' }, +{ oid => '2026', descr => 'statistics: current backend PID', + proname => 'pg_backend_pid', provolatile => 's', proparallel => 'r', + prorettype => 'int4', proargtypes => '', prosrc => 'pg_backend_pid' }, +{ oid => '1937', descr => 'statistics: PID of backend', + proname => 'pg_stat_get_backend_pid', provolatile => 's', proparallel => 'r', + prorettype => 'int4', proargtypes => 'int4', + prosrc => 'pg_stat_get_backend_pid' }, +{ oid => '1938', descr => 'statistics: database ID of backend', + proname => 'pg_stat_get_backend_dbid', provolatile => 's', proparallel => 'r', + prorettype => 'oid', proargtypes => 'int4', + prosrc => 'pg_stat_get_backend_dbid' }, +{ oid => '1939', descr => 'statistics: user ID of backend', + proname => 'pg_stat_get_backend_userid', provolatile => 's', + proparallel => 'r', prorettype => 'oid', proargtypes => 'int4', + prosrc => 'pg_stat_get_backend_userid' }, +{ oid => '1940', descr => 'statistics: current query of backend', + proname => 'pg_stat_get_backend_activity', provolatile => 's', + proparallel => 'r', prorettype => 'text', proargtypes => 'int4', + prosrc => 'pg_stat_get_backend_activity' }, +{ oid => '2788', + descr => 'statistics: wait event type on which backend is currently waiting', + proname => 'pg_stat_get_backend_wait_event_type', provolatile => 's', + proparallel => 'r', prorettype => 'text', proargtypes => 'int4', + prosrc => 'pg_stat_get_backend_wait_event_type' }, +{ oid => '2853', + descr => 'statistics: wait event on which backend is currently waiting', + proname => 'pg_stat_get_backend_wait_event', provolatile => 's', + proparallel => 'r', prorettype => 'text', proargtypes => 'int4', + prosrc => 'pg_stat_get_backend_wait_event' }, +{ oid => '2094', + descr => 'statistics: start time for current query of backend', + proname => 'pg_stat_get_backend_activity_start', provolatile => 's', + proparallel => 'r', prorettype => 'timestamptz', proargtypes => 'int4', + prosrc => 'pg_stat_get_backend_activity_start' }, +{ oid => '2857', + descr => 'statistics: start time for backend\'s current transaction', + proname => 'pg_stat_get_backend_xact_start', provolatile => 's', + proparallel => 'r', prorettype => 'timestamptz', proargtypes => 'int4', + prosrc => 'pg_stat_get_backend_xact_start' }, +{ oid => '1391', + descr => 'statistics: start time for current backend session', + proname => 'pg_stat_get_backend_start', provolatile => 's', + proparallel => 'r', prorettype => 'timestamptz', proargtypes => 'int4', + prosrc => 'pg_stat_get_backend_start' }, +{ oid => '1392', + descr => 'statistics: address of client connected to backend', + proname => 'pg_stat_get_backend_client_addr', provolatile => 's', + proparallel => 'r', prorettype => 'inet', proargtypes => 'int4', + prosrc => 'pg_stat_get_backend_client_addr' }, +{ oid => '1393', + descr => 'statistics: port number of client connected to backend', + proname => 'pg_stat_get_backend_client_port', provolatile => 's', + proparallel => 'r', prorettype => 'int4', proargtypes => 'int4', + prosrc => 'pg_stat_get_backend_client_port' }, +{ oid => '1941', descr => 'statistics: number of backends in database', + proname => 'pg_stat_get_db_numbackends', provolatile => 's', + proparallel => 'r', prorettype => 'int4', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_numbackends' }, +{ oid => '1942', descr => 'statistics: transactions committed', + proname => 'pg_stat_get_db_xact_commit', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_xact_commit' }, +{ oid => '1943', descr => 'statistics: transactions rolled back', + proname => 'pg_stat_get_db_xact_rollback', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_xact_rollback' }, +{ oid => '1944', descr => 'statistics: blocks fetched for database', + proname => 'pg_stat_get_db_blocks_fetched', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_blocks_fetched' }, +{ oid => '1945', descr => 'statistics: blocks found in cache for database', + proname => 'pg_stat_get_db_blocks_hit', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_blocks_hit' }, +{ oid => '2758', descr => 'statistics: tuples returned for database', + proname => 'pg_stat_get_db_tuples_returned', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_tuples_returned' }, +{ oid => '2759', descr => 'statistics: tuples fetched for database', + proname => 'pg_stat_get_db_tuples_fetched', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_tuples_fetched' }, +{ oid => '2760', descr => 'statistics: tuples inserted in database', + proname => 'pg_stat_get_db_tuples_inserted', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_tuples_inserted' }, +{ oid => '2761', descr => 'statistics: tuples updated in database', + proname => 'pg_stat_get_db_tuples_updated', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_tuples_updated' }, +{ oid => '2762', descr => 'statistics: tuples deleted in database', + proname => 'pg_stat_get_db_tuples_deleted', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_tuples_deleted' }, +{ oid => '3065', + descr => 'statistics: recovery conflicts in database caused by drop tablespace', + proname => 'pg_stat_get_db_conflict_tablespace', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_conflict_tablespace' }, +{ oid => '3066', + descr => 'statistics: recovery conflicts in database caused by relation lock', + proname => 'pg_stat_get_db_conflict_lock', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_conflict_lock' }, +{ oid => '3067', + descr => 'statistics: recovery conflicts in database caused by snapshot expiry', + proname => 'pg_stat_get_db_conflict_snapshot', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_conflict_snapshot' }, +{ oid => '3068', + descr => 'statistics: recovery conflicts in database caused by shared buffer pin', + proname => 'pg_stat_get_db_conflict_bufferpin', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_conflict_bufferpin' }, +{ oid => '3069', + descr => 'statistics: recovery conflicts in database caused by buffer deadlock', + proname => 'pg_stat_get_db_conflict_startup_deadlock', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_conflict_startup_deadlock' }, +{ oid => '3070', descr => 'statistics: recovery conflicts in database', + proname => 'pg_stat_get_db_conflict_all', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_conflict_all' }, +{ oid => '3152', descr => 'statistics: deadlocks detected in database', + proname => 'pg_stat_get_db_deadlocks', provolatile => 's', proparallel => 'r', + prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_deadlocks' }, +{ oid => '3074', descr => 'statistics: last reset for a database', + proname => 'pg_stat_get_db_stat_reset_time', provolatile => 's', + proparallel => 'r', prorettype => 'timestamptz', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_stat_reset_time' }, +{ oid => '3150', descr => 'statistics: number of temporary files written', + proname => 'pg_stat_get_db_temp_files', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_temp_files' }, +{ oid => '3151', + descr => 'statistics: number of bytes in temporary files written', + proname => 'pg_stat_get_db_temp_bytes', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_temp_bytes' }, +{ oid => '2844', descr => 'statistics: block read time, in milliseconds', + proname => 'pg_stat_get_db_blk_read_time', provolatile => 's', + proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_blk_read_time' }, +{ oid => '2845', descr => 'statistics: block write time, in milliseconds', + proname => 'pg_stat_get_db_blk_write_time', provolatile => 's', + proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', + prosrc => 'pg_stat_get_db_blk_write_time' }, +{ oid => '3195', descr => 'statistics: information about WAL archiver', + proname => 'pg_stat_get_archiver', proisstrict => 'f', provolatile => 's', + proparallel => 'r', prorettype => 'record', proargtypes => '', + proallargtypes => '{int8,text,timestamptz,int8,text,timestamptz,timestamptz}', + proargmodes => '{o,o,o,o,o,o,o}', + proargnames => '{archived_count,last_archived_wal,last_archived_time,failed_count,last_failed_wal,last_failed_time,stats_reset}', + prosrc => 'pg_stat_get_archiver' }, +{ oid => '2769', + descr => 'statistics: number of timed checkpoints started by the bgwriter', + proname => 'pg_stat_get_bgwriter_timed_checkpoints', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => '', + prosrc => 'pg_stat_get_bgwriter_timed_checkpoints' }, +{ oid => '2770', + descr => 'statistics: number of backend requested checkpoints started by the bgwriter', + proname => 'pg_stat_get_bgwriter_requested_checkpoints', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => '', + prosrc => 'pg_stat_get_bgwriter_requested_checkpoints' }, +{ oid => '2771', + descr => 'statistics: number of buffers written by the bgwriter during checkpoints', + proname => 'pg_stat_get_bgwriter_buf_written_checkpoints', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => '', + prosrc => 'pg_stat_get_bgwriter_buf_written_checkpoints' }, +{ oid => '2772', + descr => 'statistics: number of buffers written by the bgwriter for cleaning dirty buffers', + proname => 'pg_stat_get_bgwriter_buf_written_clean', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => '', + prosrc => 'pg_stat_get_bgwriter_buf_written_clean' }, +{ oid => '2773', + descr => 'statistics: number of times the bgwriter stopped processing when it had written too many buffers while cleaning', + proname => 'pg_stat_get_bgwriter_maxwritten_clean', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => '', + prosrc => 'pg_stat_get_bgwriter_maxwritten_clean' }, +{ oid => '3075', descr => 'statistics: last reset for the bgwriter', + proname => 'pg_stat_get_bgwriter_stat_reset_time', provolatile => 's', + proparallel => 'r', prorettype => 'timestamptz', proargtypes => '', + prosrc => 'pg_stat_get_bgwriter_stat_reset_time' }, +{ oid => '3160', + descr => 'statistics: checkpoint time spent writing buffers to disk, in milliseconds', + proname => 'pg_stat_get_checkpoint_write_time', provolatile => 's', + proparallel => 'r', prorettype => 'float8', proargtypes => '', + prosrc => 'pg_stat_get_checkpoint_write_time' }, +{ oid => '3161', + descr => 'statistics: checkpoint time spent synchronizing buffers to disk, in milliseconds', + proname => 'pg_stat_get_checkpoint_sync_time', provolatile => 's', + proparallel => 'r', prorettype => 'float8', proargtypes => '', + prosrc => 'pg_stat_get_checkpoint_sync_time' }, +{ oid => '2775', descr => 'statistics: number of buffers written by backends', + proname => 'pg_stat_get_buf_written_backend', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => '', + prosrc => 'pg_stat_get_buf_written_backend' }, +{ oid => '3063', + descr => 'statistics: number of backend buffer writes that did their own fsync', + proname => 'pg_stat_get_buf_fsync_backend', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => '', + prosrc => 'pg_stat_get_buf_fsync_backend' }, +{ oid => '2859', descr => 'statistics: number of buffer allocations', + proname => 'pg_stat_get_buf_alloc', provolatile => 's', proparallel => 'r', + prorettype => 'int8', proargtypes => '', prosrc => 'pg_stat_get_buf_alloc' }, + +{ oid => '2978', descr => 'statistics: number of function calls', + proname => 'pg_stat_get_function_calls', provolatile => 's', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_function_calls' }, +{ oid => '2979', + descr => 'statistics: total execution time of function, in milliseconds', + proname => 'pg_stat_get_function_total_time', provolatile => 's', + proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', + prosrc => 'pg_stat_get_function_total_time' }, +{ oid => '2980', + descr => 'statistics: self execution time of function, in milliseconds', + proname => 'pg_stat_get_function_self_time', provolatile => 's', + proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', + prosrc => 'pg_stat_get_function_self_time' }, + +{ oid => '3037', + descr => 'statistics: number of scans done for table/index in current transaction', + proname => 'pg_stat_get_xact_numscans', provolatile => 'v', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_xact_numscans' }, +{ oid => '3038', + descr => 'statistics: number of tuples read by seqscan in current transaction', + proname => 'pg_stat_get_xact_tuples_returned', provolatile => 'v', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_xact_tuples_returned' }, +{ oid => '3039', + descr => 'statistics: number of tuples fetched by idxscan in current transaction', + proname => 'pg_stat_get_xact_tuples_fetched', provolatile => 'v', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_xact_tuples_fetched' }, +{ oid => '3040', + descr => 'statistics: number of tuples inserted in current transaction', + proname => 'pg_stat_get_xact_tuples_inserted', provolatile => 'v', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_xact_tuples_inserted' }, +{ oid => '3041', + descr => 'statistics: number of tuples updated in current transaction', + proname => 'pg_stat_get_xact_tuples_updated', provolatile => 'v', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_xact_tuples_updated' }, +{ oid => '3042', + descr => 'statistics: number of tuples deleted in current transaction', + proname => 'pg_stat_get_xact_tuples_deleted', provolatile => 'v', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_xact_tuples_deleted' }, +{ oid => '3043', + descr => 'statistics: number of tuples hot updated in current transaction', + proname => 'pg_stat_get_xact_tuples_hot_updated', provolatile => 'v', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_xact_tuples_hot_updated' }, +{ oid => '3044', + descr => 'statistics: number of blocks fetched in current transaction', + proname => 'pg_stat_get_xact_blocks_fetched', provolatile => 'v', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_xact_blocks_fetched' }, +{ oid => '3045', + descr => 'statistics: number of blocks found in cache in current transaction', + proname => 'pg_stat_get_xact_blocks_hit', provolatile => 'v', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_xact_blocks_hit' }, +{ oid => '3046', + descr => 'statistics: number of function calls in current transaction', + proname => 'pg_stat_get_xact_function_calls', provolatile => 'v', + proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', + prosrc => 'pg_stat_get_xact_function_calls' }, +{ oid => '3047', + descr => 'statistics: total execution time of function in current transaction, in milliseconds', + proname => 'pg_stat_get_xact_function_total_time', provolatile => 'v', + proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', + prosrc => 'pg_stat_get_xact_function_total_time' }, +{ oid => '3048', + descr => 'statistics: self execution time of function in current transaction, in milliseconds', + proname => 'pg_stat_get_xact_function_self_time', provolatile => 'v', + proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', + prosrc => 'pg_stat_get_xact_function_self_time' }, + +{ oid => '3788', + descr => 'statistics: timestamp of the current statistics snapshot', + proname => 'pg_stat_get_snapshot_timestamp', provolatile => 's', + proparallel => 'r', prorettype => 'timestamptz', proargtypes => '', + prosrc => 'pg_stat_get_snapshot_timestamp' }, +{ oid => '2230', + descr => 'statistics: discard current transaction\'s statistics snapshot', + proname => 'pg_stat_clear_snapshot', proisstrict => 'f', provolatile => 'v', + proparallel => 'r', prorettype => 'void', proargtypes => '', + prosrc => 'pg_stat_clear_snapshot' }, +{ oid => '2274', + descr => 'statistics: reset collected statistics for current database', + proname => 'pg_stat_reset', proisstrict => 'f', provolatile => 'v', + prorettype => 'void', proargtypes => '', prosrc => 'pg_stat_reset' }, +{ oid => '3775', + descr => 'statistics: reset collected statistics shared across the cluster', + proname => 'pg_stat_reset_shared', provolatile => 'v', prorettype => 'void', + proargtypes => 'text', prosrc => 'pg_stat_reset_shared' }, +{ oid => '3776', + descr => 'statistics: reset collected statistics for a single table or index in the current database', + proname => 'pg_stat_reset_single_table_counters', provolatile => 'v', + prorettype => 'void', proargtypes => 'oid', + prosrc => 'pg_stat_reset_single_table_counters' }, +{ oid => '3777', + descr => 'statistics: reset collected statistics for a single function in the current database', + proname => 'pg_stat_reset_single_function_counters', provolatile => 'v', + prorettype => 'void', proargtypes => 'oid', + prosrc => 'pg_stat_reset_single_function_counters' }, + +{ oid => '3163', descr => 'current trigger depth', + proname => 'pg_trigger_depth', provolatile => 's', proparallel => 'r', + prorettype => 'int4', proargtypes => '', prosrc => 'pg_trigger_depth' }, + +{ oid => '3778', descr => 'tablespace location', + proname => 'pg_tablespace_location', provolatile => 's', prorettype => 'text', + proargtypes => 'oid', prosrc => 'pg_tablespace_location' }, + +{ oid => '1946', + descr => 'convert bytea value into some ascii-only text string', + proname => 'encode', prorettype => 'text', proargtypes => 'bytea text', + prosrc => 'binary_encode' }, +{ oid => '1947', + descr => 'convert ascii-encoded text string into bytea value', + proname => 'decode', prorettype => 'bytea', proargtypes => 'text text', + prosrc => 'binary_decode' }, + +{ oid => '1948', + proname => 'byteaeq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bytea bytea', prosrc => 'byteaeq' }, +{ oid => '1949', + proname => 'bytealt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bytea bytea', prosrc => 'bytealt' }, +{ oid => '1950', + proname => 'byteale', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bytea bytea', prosrc => 'byteale' }, +{ oid => '1951', + proname => 'byteagt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bytea bytea', prosrc => 'byteagt' }, +{ oid => '1952', + proname => 'byteage', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bytea bytea', prosrc => 'byteage' }, +{ oid => '1953', + proname => 'byteane', proleakproof => 't', prorettype => 'bool', + proargtypes => 'bytea bytea', prosrc => 'byteane' }, +{ oid => '1954', descr => 'less-equal-greater', + proname => 'byteacmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'bytea bytea', prosrc => 'byteacmp' }, +{ oid => '3331', descr => 'sort support', + proname => 'bytea_sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'bytea_sortsupport' }, + +{ oid => '3917', descr => 'transform a timestamp length coercion', + proname => 'timestamp_transform', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'timestamp_transform' }, +{ oid => '3944', descr => 'transform a time length coercion', + proname => 'time_transform', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'time_transform' }, + +{ oid => '1961', descr => 'adjust timestamp precision', + proname => 'timestamp', protransform => 'timestamp_transform', + prorettype => 'timestamp', proargtypes => 'timestamp int4', + prosrc => 'timestamp_scale' }, + +{ oid => '1965', descr => 'larger of two', + proname => 'oidlarger', prorettype => 'oid', proargtypes => 'oid oid', + prosrc => 'oidlarger' }, +{ oid => '1966', descr => 'smaller of two', + proname => 'oidsmaller', prorettype => 'oid', proargtypes => 'oid oid', + prosrc => 'oidsmaller' }, + +{ oid => '1967', descr => 'adjust timestamptz precision', + proname => 'timestamptz', protransform => 'timestamp_transform', + prorettype => 'timestamptz', proargtypes => 'timestamptz int4', + prosrc => 'timestamptz_scale' }, +{ oid => '1968', descr => 'adjust time precision', + proname => 'time', protransform => 'time_transform', prorettype => 'time', + proargtypes => 'time int4', prosrc => 'time_scale' }, +{ oid => '1969', descr => 'adjust time with time zone precision', + proname => 'timetz', protransform => 'time_transform', prorettype => 'timetz', + proargtypes => 'timetz int4', prosrc => 'timetz_scale' }, + +{ oid => '2003', + proname => 'textanycat', prolang => '14', provolatile => 's', + prorettype => 'text', proargtypes => 'text anynonarray', + prosrc => 'select $1 || $2::pg_catalog.text' }, +{ oid => '2004', + proname => 'anytextcat', prolang => '14', provolatile => 's', + prorettype => 'text', proargtypes => 'anynonarray text', + prosrc => 'select $1::pg_catalog.text || $2' }, + +{ oid => '2005', + proname => 'bytealike', prorettype => 'bool', proargtypes => 'bytea bytea', + prosrc => 'bytealike' }, +{ oid => '2006', + proname => 'byteanlike', prorettype => 'bool', proargtypes => 'bytea bytea', + prosrc => 'byteanlike' }, +{ oid => '2007', descr => 'matches LIKE expression', + proname => 'like', prorettype => 'bool', proargtypes => 'bytea bytea', + prosrc => 'bytealike' }, +{ oid => '2008', descr => 'does not match LIKE expression', + proname => 'notlike', prorettype => 'bool', proargtypes => 'bytea bytea', + prosrc => 'byteanlike' }, +{ oid => '2009', descr => 'convert LIKE pattern to use backslash escapes', + proname => 'like_escape', prorettype => 'bytea', proargtypes => 'bytea bytea', + prosrc => 'like_escape_bytea' }, +{ oid => '2010', descr => 'octet length', + proname => 'length', prorettype => 'int4', proargtypes => 'bytea', + prosrc => 'byteaoctetlen' }, +{ oid => '2011', + proname => 'byteacat', prorettype => 'bytea', proargtypes => 'bytea bytea', + prosrc => 'byteacat' }, +{ oid => '2012', descr => 'extract portion of string', + proname => 'substring', prorettype => 'bytea', + proargtypes => 'bytea int4 int4', prosrc => 'bytea_substr' }, +{ oid => '2013', descr => 'extract portion of string', + proname => 'substring', prorettype => 'bytea', proargtypes => 'bytea int4', + prosrc => 'bytea_substr_no_len' }, +{ oid => '2085', descr => 'extract portion of string', + proname => 'substr', prorettype => 'bytea', proargtypes => 'bytea int4 int4', + prosrc => 'bytea_substr' }, +{ oid => '2086', descr => 'extract portion of string', + proname => 'substr', prorettype => 'bytea', proargtypes => 'bytea int4', + prosrc => 'bytea_substr_no_len' }, +{ oid => '2014', descr => 'position of substring', + proname => 'position', prorettype => 'int4', proargtypes => 'bytea bytea', + prosrc => 'byteapos' }, +{ oid => '2015', descr => 'trim both ends of string', + proname => 'btrim', prorettype => 'bytea', proargtypes => 'bytea bytea', + prosrc => 'byteatrim' }, + +{ oid => '2019', descr => 'convert timestamp with time zone to time', + proname => 'time', provolatile => 's', prorettype => 'time', + proargtypes => 'timestamptz', prosrc => 'timestamptz_time' }, +{ oid => '2020', descr => 'truncate timestamp to specified units', + proname => 'date_trunc', prorettype => 'timestamp', + proargtypes => 'text timestamp', prosrc => 'timestamp_trunc' }, +{ oid => '2021', descr => 'extract field from timestamp', + proname => 'date_part', prorettype => 'float8', + proargtypes => 'text timestamp', prosrc => 'timestamp_part' }, +{ oid => '2024', descr => 'convert date to timestamp', + proname => 'timestamp', prorettype => 'timestamp', proargtypes => 'date', + prosrc => 'date_timestamp' }, +{ oid => '2025', descr => 'convert date and time to timestamp', + proname => 'timestamp', prorettype => 'timestamp', proargtypes => 'date time', + prosrc => 'datetime_timestamp' }, +{ oid => '2027', descr => 'convert timestamp with time zone to timestamp', + proname => 'timestamp', provolatile => 's', prorettype => 'timestamp', + proargtypes => 'timestamptz', prosrc => 'timestamptz_timestamp' }, +{ oid => '2028', descr => 'convert timestamp to timestamp with time zone', + proname => 'timestamptz', provolatile => 's', prorettype => 'timestamptz', + proargtypes => 'timestamp', prosrc => 'timestamp_timestamptz' }, +{ oid => '2029', descr => 'convert timestamp to date', + proname => 'date', prorettype => 'date', proargtypes => 'timestamp', + prosrc => 'timestamp_date' }, +{ oid => '2031', + proname => 'timestamp_mi', prorettype => 'interval', + proargtypes => 'timestamp timestamp', prosrc => 'timestamp_mi' }, +{ oid => '2032', + proname => 'timestamp_pl_interval', prorettype => 'timestamp', + proargtypes => 'timestamp interval', prosrc => 'timestamp_pl_interval' }, +{ oid => '2033', + proname => 'timestamp_mi_interval', prorettype => 'timestamp', + proargtypes => 'timestamp interval', prosrc => 'timestamp_mi_interval' }, +{ oid => '2035', descr => 'smaller of two', + proname => 'timestamp_smaller', prorettype => 'timestamp', + proargtypes => 'timestamp timestamp', prosrc => 'timestamp_smaller' }, +{ oid => '2036', descr => 'larger of two', + proname => 'timestamp_larger', prorettype => 'timestamp', + proargtypes => 'timestamp timestamp', prosrc => 'timestamp_larger' }, +{ oid => '2037', descr => 'adjust time with time zone to new zone', + proname => 'timezone', provolatile => 'v', prorettype => 'timetz', + proargtypes => 'text timetz', prosrc => 'timetz_zone' }, +{ oid => '2038', descr => 'adjust time with time zone to new zone', + proname => 'timezone', prorettype => 'timetz', + proargtypes => 'interval timetz', prosrc => 'timetz_izone' }, +{ oid => '2039', descr => 'hash', + proname => 'timestamp_hash', prorettype => 'int4', proargtypes => 'timestamp', + prosrc => 'timestamp_hash' }, +{ oid => '3411', descr => 'hash', + proname => 'timestamp_hash_extended', prorettype => 'int8', + proargtypes => 'timestamp int8', prosrc => 'timestamp_hash_extended' }, +{ oid => '2041', descr => 'intervals overlap?', + proname => 'overlaps', proisstrict => 'f', prorettype => 'bool', + proargtypes => 'timestamp timestamp timestamp timestamp', + prosrc => 'overlaps_timestamp' }, +{ oid => '2042', descr => 'intervals overlap?', + proname => 'overlaps', prolang => '14', proisstrict => 'f', + prorettype => 'bool', proargtypes => 'timestamp interval timestamp interval', + prosrc => 'select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))' }, +{ oid => '2043', descr => 'intervals overlap?', + proname => 'overlaps', prolang => '14', proisstrict => 'f', + prorettype => 'bool', proargtypes => 'timestamp timestamp timestamp interval', + prosrc => 'select ($1, $2) overlaps ($3, ($3 + $4))' }, +{ oid => '2044', descr => 'intervals overlap?', + proname => 'overlaps', prolang => '14', proisstrict => 'f', + prorettype => 'bool', proargtypes => 'timestamp interval timestamp timestamp', + prosrc => 'select ($1, ($1 + $2)) overlaps ($3, $4)' }, +{ oid => '2045', descr => 'less-equal-greater', + proname => 'timestamp_cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'timestamp timestamp', prosrc => 'timestamp_cmp' }, +{ oid => '3137', descr => 'sort support', + proname => 'timestamp_sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'timestamp_sortsupport' }, + +{ oid => '4134', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'timestamp timestamp interval bool bool', + prosrc => 'in_range_timestamp_interval' }, +{ oid => '4135', descr => 'window RANGE support', + proname => 'in_range', provolatile => 's', prorettype => 'bool', + proargtypes => 'timestamptz timestamptz interval bool bool', + prosrc => 'in_range_timestamptz_interval' }, +{ oid => '4136', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'interval interval interval bool bool', + prosrc => 'in_range_interval_interval' }, +{ oid => '4137', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'time time interval bool bool', + prosrc => 'in_range_time_interval' }, +{ oid => '4138', descr => 'window RANGE support', + proname => 'in_range', prorettype => 'bool', + proargtypes => 'timetz timetz interval bool bool', + prosrc => 'in_range_timetz_interval' }, + +{ oid => '2046', descr => 'convert time with time zone to time', + proname => 'time', prorettype => 'time', proargtypes => 'timetz', + prosrc => 'timetz_time' }, +{ oid => '2047', descr => 'convert time to time with time zone', + proname => 'timetz', provolatile => 's', prorettype => 'timetz', + proargtypes => 'time', prosrc => 'time_timetz' }, +{ oid => '2048', descr => 'finite timestamp?', + proname => 'isfinite', prorettype => 'bool', proargtypes => 'timestamp', + prosrc => 'timestamp_finite' }, +{ oid => '2049', descr => 'format timestamp to text', + proname => 'to_char', provolatile => 's', prorettype => 'text', + proargtypes => 'timestamp text', prosrc => 'timestamp_to_char' }, +{ oid => '2052', + proname => 'timestamp_eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timestamp timestamp', prosrc => 'timestamp_eq' }, +{ oid => '2053', + proname => 'timestamp_ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timestamp timestamp', prosrc => 'timestamp_ne' }, +{ oid => '2054', + proname => 'timestamp_lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timestamp timestamp', prosrc => 'timestamp_lt' }, +{ oid => '2055', + proname => 'timestamp_le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timestamp timestamp', prosrc => 'timestamp_le' }, +{ oid => '2056', + proname => 'timestamp_ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timestamp timestamp', prosrc => 'timestamp_ge' }, +{ oid => '2057', + proname => 'timestamp_gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'timestamp timestamp', prosrc => 'timestamp_gt' }, +{ oid => '2058', descr => 'date difference preserving months and years', + proname => 'age', prorettype => 'interval', + proargtypes => 'timestamp timestamp', prosrc => 'timestamp_age' }, +{ oid => '2059', + descr => 'date difference from today preserving months and years', + proname => 'age', prolang => '14', provolatile => 's', + prorettype => 'interval', proargtypes => 'timestamp', + prosrc => 'select pg_catalog.age(cast(current_date as timestamp without time zone), $1)' }, + +{ oid => '2069', descr => 'adjust timestamp to new time zone', + proname => 'timezone', protransform => 'timestamp_zone_transform', + prorettype => 'timestamptz', proargtypes => 'text timestamp', + prosrc => 'timestamp_zone' }, +{ oid => '2070', descr => 'adjust timestamp to new time zone', + proname => 'timezone', protransform => 'timestamp_izone_transform', + prorettype => 'timestamptz', proargtypes => 'interval timestamp', + prosrc => 'timestamp_izone' }, +{ oid => '2071', + proname => 'date_pl_interval', prorettype => 'timestamp', + proargtypes => 'date interval', prosrc => 'date_pl_interval' }, +{ oid => '2072', + proname => 'date_mi_interval', prorettype => 'timestamp', + proargtypes => 'date interval', prosrc => 'date_mi_interval' }, + +{ oid => '2073', descr => 'extract text matching regular expression', + proname => 'substring', prorettype => 'text', proargtypes => 'text text', + prosrc => 'textregexsubstr' }, +{ oid => '2074', descr => 'extract text matching SQL99 regular expression', + proname => 'substring', prolang => '14', prorettype => 'text', + proargtypes => 'text text text', + prosrc => 'select pg_catalog.substring($1, pg_catalog.similar_escape($2, $3))' }, + +{ oid => '2075', descr => 'convert int8 to bitstring', + proname => 'bit', prorettype => 'bit', proargtypes => 'int8 int4', + prosrc => 'bitfromint8' }, +{ oid => '2076', descr => 'convert bitstring to int8', + proname => 'int8', prorettype => 'int8', proargtypes => 'bit', + prosrc => 'bittoint8' }, + +{ oid => '2077', descr => 'SHOW X as a function', + proname => 'current_setting', provolatile => 's', prorettype => 'text', + proargtypes => 'text', prosrc => 'show_config_by_name' }, +{ oid => '3294', + descr => 'SHOW X as a function, optionally no error for missing variable', + proname => 'current_setting', provolatile => 's', prorettype => 'text', + proargtypes => 'text bool', prosrc => 'show_config_by_name_missing_ok' }, +{ oid => '2078', descr => 'SET X as a function', + proname => 'set_config', proisstrict => 'f', provolatile => 'v', + proparallel => 'u', prorettype => 'text', proargtypes => 'text text bool', + prosrc => 'set_config_by_name' }, +{ oid => '2084', descr => 'SHOW ALL as a function', + proname => 'pg_show_all_settings', prorows => '1000', proretset => 't', + provolatile => 's', prorettype => 'record', proargtypes => '', + proallargtypes => '{text,text,text,text,text,text,text,text,text,text,text,_text,text,text,text,int4,bool}', + proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{name,setting,unit,category,short_desc,extra_desc,context,vartype,source,min_val,max_val,enumvals,boot_val,reset_val,sourcefile,sourceline,pending_restart}', + prosrc => 'show_all_settings' }, +{ oid => '3329', descr => 'show config file settings', + proname => 'pg_show_all_file_settings', prorows => '1000', proretset => 't', + provolatile => 'v', prorettype => 'record', proargtypes => '', + proallargtypes => '{text,int4,int4,text,text,bool,text}', + proargmodes => '{o,o,o,o,o,o,o}', + proargnames => '{sourcefile,sourceline,seqno,name,setting,applied,error}', + prosrc => 'show_all_file_settings' }, +{ oid => '3401', descr => 'show pg_hba.conf rules', + proname => 'pg_hba_file_rules', prorows => '1000', proretset => 't', + provolatile => 'v', prorettype => 'record', proargtypes => '', + proallargtypes => '{int4,text,_text,_text,text,text,text,_text,text}', + proargmodes => '{o,o,o,o,o,o,o,o,o}', + proargnames => '{line_number,type,database,user_name,address,netmask,auth_method,options,error}', + prosrc => 'pg_hba_file_rules' }, +{ oid => '1371', descr => 'view system lock information', + proname => 'pg_lock_status', prorows => '1000', proretset => 't', + provolatile => 'v', prorettype => 'record', proargtypes => '', + proallargtypes => '{text,oid,oid,int4,int2,text,xid,oid,oid,int2,text,int4,text,bool,bool}', + proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{locktype,database,relation,page,tuple,virtualxid,transactionid,classid,objid,objsubid,virtualtransaction,pid,mode,granted,fastpath}', + prosrc => 'pg_lock_status' }, +{ oid => '2561', + descr => 'get array of PIDs of sessions blocking specified backend PID from acquiring a heavyweight lock', + proname => 'pg_blocking_pids', provolatile => 'v', prorettype => '_int4', + proargtypes => 'int4', prosrc => 'pg_blocking_pids' }, +{ oid => '3376', + descr => 'get array of PIDs of sessions blocking specified backend PID from acquiring a safe snapshot', + proname => 'pg_safe_snapshot_blocking_pids', provolatile => 'v', + prorettype => '_int4', proargtypes => 'int4', + prosrc => 'pg_safe_snapshot_blocking_pids' }, +{ oid => '3378', descr => 'isolationtester support function', + proname => 'pg_isolation_test_session_is_blocked', provolatile => 'v', + prorettype => 'bool', proargtypes => 'int4 _int4', + prosrc => 'pg_isolation_test_session_is_blocked' }, +{ oid => '1065', descr => 'view two-phase transactions', + proname => 'pg_prepared_xact', prorows => '1000', proretset => 't', + provolatile => 'v', prorettype => 'record', proargtypes => '', + proallargtypes => '{xid,text,timestamptz,oid,oid}', + proargmodes => '{o,o,o,o,o}', + proargnames => '{transaction,gid,prepared,ownerid,dbid}', + prosrc => 'pg_prepared_xact' }, +{ oid => '3819', descr => 'view members of a multixactid', + proname => 'pg_get_multixact_members', prorows => '1000', proretset => 't', + provolatile => 'v', prorettype => 'record', proargtypes => 'xid', + proallargtypes => '{xid,xid,text}', proargmodes => '{i,o,o}', + proargnames => '{multixid,xid,mode}', prosrc => 'pg_get_multixact_members' }, + +{ oid => '3581', descr => 'get commit timestamp of a transaction', + proname => 'pg_xact_commit_timestamp', provolatile => 'v', + prorettype => 'timestamptz', proargtypes => 'xid', + prosrc => 'pg_xact_commit_timestamp' }, + +{ oid => '3583', + descr => 'get transaction Id and commit timestamp of latest transaction commit', + proname => 'pg_last_committed_xact', provolatile => 'v', + prorettype => 'record', proargtypes => '', + proallargtypes => '{xid,timestamptz}', proargmodes => '{o,o}', + proargnames => '{xid,timestamp}', prosrc => 'pg_last_committed_xact' }, + +{ oid => '3537', descr => 'get identification of SQL object', + proname => 'pg_describe_object', provolatile => 's', prorettype => 'text', + proargtypes => 'oid oid int4', prosrc => 'pg_describe_object' }, + +{ oid => '3839', + descr => 'get machine-parseable identification of SQL object', + proname => 'pg_identify_object', provolatile => 's', prorettype => 'record', + proargtypes => 'oid oid int4', + proallargtypes => '{oid,oid,int4,text,text,text,text}', + proargmodes => '{i,i,i,o,o,o,o}', + proargnames => '{classid,objid,objsubid,type,schema,name,identity}', + prosrc => 'pg_identify_object' }, + +{ oid => '3382', + descr => 'get identification of SQL object for pg_get_object_address()', + proname => 'pg_identify_object_as_address', provolatile => 's', + prorettype => 'record', proargtypes => 'oid oid int4', + proallargtypes => '{oid,oid,int4,text,_text,_text}', + proargmodes => '{i,i,i,o,o,o}', + proargnames => '{classid,objid,objsubid,type,object_names,object_args}', + prosrc => 'pg_identify_object_as_address' }, + +{ oid => '3954', + descr => 'get OID-based object address from name/args arrays', + proname => 'pg_get_object_address', provolatile => 's', + prorettype => 'record', proargtypes => 'text _text _text', + proallargtypes => '{text,_text,_text,oid,oid,int4}', + proargmodes => '{i,i,i,o,o,o}', + proargnames => '{type,object_names,object_args,classid,objid,objsubid}', + prosrc => 'pg_get_object_address' }, + +{ oid => '2079', descr => 'is table visible in search path?', + proname => 'pg_table_is_visible', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid', prosrc => 'pg_table_is_visible' }, +{ oid => '2080', descr => 'is type visible in search path?', + proname => 'pg_type_is_visible', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid', prosrc => 'pg_type_is_visible' }, +{ oid => '2081', descr => 'is function visible in search path?', + proname => 'pg_function_is_visible', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid', + prosrc => 'pg_function_is_visible' }, +{ oid => '2082', descr => 'is operator visible in search path?', + proname => 'pg_operator_is_visible', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid', + prosrc => 'pg_operator_is_visible' }, +{ oid => '2083', descr => 'is opclass visible in search path?', + proname => 'pg_opclass_is_visible', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid', + prosrc => 'pg_opclass_is_visible' }, +{ oid => '3829', descr => 'is opfamily visible in search path?', + proname => 'pg_opfamily_is_visible', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid', + prosrc => 'pg_opfamily_is_visible' }, +{ oid => '2093', descr => 'is conversion visible in search path?', + proname => 'pg_conversion_is_visible', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid', + prosrc => 'pg_conversion_is_visible' }, +{ oid => '3403', descr => 'is statistics object visible in search path?', + proname => 'pg_statistics_obj_is_visible', procost => '10', + provolatile => 's', prorettype => 'bool', proargtypes => 'oid', + prosrc => 'pg_statistics_obj_is_visible' }, +{ oid => '3756', descr => 'is text search parser visible in search path?', + proname => 'pg_ts_parser_is_visible', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid', + prosrc => 'pg_ts_parser_is_visible' }, +{ oid => '3757', descr => 'is text search dictionary visible in search path?', + proname => 'pg_ts_dict_is_visible', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid', + prosrc => 'pg_ts_dict_is_visible' }, +{ oid => '3768', descr => 'is text search template visible in search path?', + proname => 'pg_ts_template_is_visible', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid', + prosrc => 'pg_ts_template_is_visible' }, +{ oid => '3758', + descr => 'is text search configuration visible in search path?', + proname => 'pg_ts_config_is_visible', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid', + prosrc => 'pg_ts_config_is_visible' }, +{ oid => '3815', descr => 'is collation visible in search path?', + proname => 'pg_collation_is_visible', procost => '10', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid', + prosrc => 'pg_collation_is_visible' }, + +{ oid => '2854', descr => 'get OID of current session\'s temp schema, if any', + proname => 'pg_my_temp_schema', provolatile => 's', proparallel => 'r', + prorettype => 'oid', proargtypes => '', prosrc => 'pg_my_temp_schema' }, +{ oid => '2855', descr => 'is schema another session\'s temp schema?', + proname => 'pg_is_other_temp_schema', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid', + prosrc => 'pg_is_other_temp_schema' }, + +{ oid => '2171', descr => 'cancel a server process\' current query', + proname => 'pg_cancel_backend', provolatile => 'v', prorettype => 'bool', + proargtypes => 'int4', prosrc => 'pg_cancel_backend' }, +{ oid => '2096', descr => 'terminate a server process', + proname => 'pg_terminate_backend', provolatile => 'v', prorettype => 'bool', + proargtypes => 'int4', prosrc => 'pg_terminate_backend' }, +{ oid => '2172', descr => 'prepare for taking an online backup', + proname => 'pg_start_backup', provolatile => 'v', proparallel => 'r', + prorettype => 'pg_lsn', proargtypes => 'text bool bool', + prosrc => 'pg_start_backup' }, +{ oid => '2173', descr => 'finish taking an online backup', + proname => 'pg_stop_backup', provolatile => 'v', proparallel => 'r', + prorettype => 'pg_lsn', proargtypes => '', prosrc => 'pg_stop_backup' }, +{ oid => '2739', descr => 'finish taking an online backup', + proname => 'pg_stop_backup', prorows => '1', proretset => 't', + provolatile => 'v', proparallel => 'r', prorettype => 'record', + proargtypes => 'bool bool', proallargtypes => '{bool,bool,pg_lsn,text,text}', + proargmodes => '{i,i,o,o,o}', + proargnames => '{exclusive,wait_for_archive,lsn,labelfile,spcmapfile}', + prosrc => 'pg_stop_backup_v2' }, +{ oid => '3813', descr => 'true if server is in online backup', + proname => 'pg_is_in_backup', provolatile => 'v', prorettype => 'bool', + proargtypes => '', prosrc => 'pg_is_in_backup' }, +{ oid => '3814', descr => 'start time of an online backup', + proname => 'pg_backup_start_time', provolatile => 's', + prorettype => 'timestamptz', proargtypes => '', + prosrc => 'pg_backup_start_time' }, +{ oid => '3436', descr => 'promote standby server', + proname => 'pg_promote', provolatile => 'v', + prorettype => 'bool', proargtypes => 'bool int4', proargnames => '{wait,wait_seconds}', + prosrc => 'pg_promote' }, +{ oid => '2848', descr => 'switch to new wal file', + proname => 'pg_switch_wal', provolatile => 'v', prorettype => 'pg_lsn', + proargtypes => '', prosrc => 'pg_switch_wal' }, +{ oid => '3098', descr => 'create a named restore point', + proname => 'pg_create_restore_point', provolatile => 'v', + prorettype => 'pg_lsn', proargtypes => 'text', + prosrc => 'pg_create_restore_point' }, +{ oid => '2849', descr => 'current wal write location', + proname => 'pg_current_wal_lsn', provolatile => 'v', prorettype => 'pg_lsn', + proargtypes => '', prosrc => 'pg_current_wal_lsn' }, +{ oid => '2852', descr => 'current wal insert location', + proname => 'pg_current_wal_insert_lsn', provolatile => 'v', + prorettype => 'pg_lsn', proargtypes => '', + prosrc => 'pg_current_wal_insert_lsn' }, +{ oid => '3330', descr => 'current wal flush location', + proname => 'pg_current_wal_flush_lsn', provolatile => 'v', + prorettype => 'pg_lsn', proargtypes => '', + prosrc => 'pg_current_wal_flush_lsn' }, +{ oid => '2850', + descr => 'wal filename and byte offset, given a wal location', + proname => 'pg_walfile_name_offset', prorettype => 'record', + proargtypes => 'pg_lsn', proallargtypes => '{pg_lsn,text,int4}', + proargmodes => '{i,o,o}', proargnames => '{lsn,file_name,file_offset}', + prosrc => 'pg_walfile_name_offset' }, +{ oid => '2851', descr => 'wal filename, given a wal location', + proname => 'pg_walfile_name', prorettype => 'text', proargtypes => 'pg_lsn', + prosrc => 'pg_walfile_name' }, + +{ oid => '3165', descr => 'difference in bytes, given two wal locations', + proname => 'pg_wal_lsn_diff', prorettype => 'numeric', + proargtypes => 'pg_lsn pg_lsn', prosrc => 'pg_wal_lsn_diff' }, + +{ oid => '3809', descr => 'export a snapshot', + proname => 'pg_export_snapshot', provolatile => 'v', proparallel => 'u', + prorettype => 'text', proargtypes => '', prosrc => 'pg_export_snapshot' }, + +{ oid => '3810', descr => 'true if server is in recovery', + proname => 'pg_is_in_recovery', provolatile => 'v', prorettype => 'bool', + proargtypes => '', prosrc => 'pg_is_in_recovery' }, + +{ oid => '3820', descr => 'current wal flush location', + proname => 'pg_last_wal_receive_lsn', provolatile => 'v', + prorettype => 'pg_lsn', proargtypes => '', + prosrc => 'pg_last_wal_receive_lsn' }, +{ oid => '3821', descr => 'last wal replay location', + proname => 'pg_last_wal_replay_lsn', provolatile => 'v', + prorettype => 'pg_lsn', proargtypes => '', + prosrc => 'pg_last_wal_replay_lsn' }, +{ oid => '3830', descr => 'timestamp of last replay xact', + proname => 'pg_last_xact_replay_timestamp', provolatile => 'v', + prorettype => 'timestamptz', proargtypes => '', + prosrc => 'pg_last_xact_replay_timestamp' }, + +{ oid => '3071', descr => 'pause wal replay', + proname => 'pg_wal_replay_pause', provolatile => 'v', prorettype => 'void', + proargtypes => '', prosrc => 'pg_wal_replay_pause' }, +{ oid => '3072', descr => 'resume wal replay, if it was paused', + proname => 'pg_wal_replay_resume', provolatile => 'v', prorettype => 'void', + proargtypes => '', prosrc => 'pg_wal_replay_resume' }, +{ oid => '3073', descr => 'true if wal replay is paused', + proname => 'pg_is_wal_replay_paused', provolatile => 'v', + prorettype => 'bool', proargtypes => '', + prosrc => 'pg_is_wal_replay_paused' }, + +{ oid => '2621', descr => 'reload configuration files', + proname => 'pg_reload_conf', provolatile => 'v', prorettype => 'bool', + proargtypes => '', prosrc => 'pg_reload_conf' }, +{ oid => '2622', descr => 'rotate log file', + proname => 'pg_rotate_logfile', provolatile => 'v', prorettype => 'bool', + proargtypes => '', prosrc => 'pg_rotate_logfile_v2' }, +{ oid => '4099', descr => 'rotate log file - old version for adminpack 1.0', + proname => 'pg_rotate_logfile_old', provolatile => 'v', prorettype => 'bool', + proargtypes => '', prosrc => 'pg_rotate_logfile' }, +{ oid => '3800', descr => 'current logging collector file location', + proname => 'pg_current_logfile', proisstrict => 'f', provolatile => 'v', + prorettype => 'text', proargtypes => '', prosrc => 'pg_current_logfile' }, +{ oid => '3801', descr => 'current logging collector file location', + proname => 'pg_current_logfile', proisstrict => 'f', provolatile => 'v', + prorettype => 'text', proargtypes => 'text', + prosrc => 'pg_current_logfile_1arg' }, + +{ oid => '2623', descr => 'get information about file', + proname => 'pg_stat_file', provolatile => 'v', prorettype => 'record', + proargtypes => 'text', + proallargtypes => '{text,int8,timestamptz,timestamptz,timestamptz,timestamptz,bool}', + proargmodes => '{i,o,o,o,o,o,o}', + proargnames => '{filename,size,access,modification,change,creation,isdir}', + prosrc => 'pg_stat_file_1arg' }, +{ oid => '3307', descr => 'get information about file', + proname => 'pg_stat_file', provolatile => 'v', prorettype => 'record', + proargtypes => 'text bool', + proallargtypes => '{text,bool,int8,timestamptz,timestamptz,timestamptz,timestamptz,bool}', + proargmodes => '{i,i,o,o,o,o,o,o}', + proargnames => '{filename,missing_ok,size,access,modification,change,creation,isdir}', + prosrc => 'pg_stat_file' }, +{ oid => '2624', descr => 'read text from a file', + proname => 'pg_read_file', provolatile => 'v', prorettype => 'text', + proargtypes => 'text int8 int8', prosrc => 'pg_read_file_off_len' }, +{ oid => '3293', descr => 'read text from a file', + proname => 'pg_read_file', provolatile => 'v', prorettype => 'text', + proargtypes => 'text int8 int8 bool', prosrc => 'pg_read_file_v2' }, +{ oid => '4100', + descr => 'read text from a file - old version for adminpack 1.0', + proname => 'pg_read_file_old', provolatile => 'v', prorettype => 'text', + proargtypes => 'text int8 int8', prosrc => 'pg_read_file' }, +{ oid => '3826', descr => 'read text from a file', + proname => 'pg_read_file', provolatile => 'v', prorettype => 'text', + proargtypes => 'text', prosrc => 'pg_read_file_all' }, +{ oid => '3827', descr => 'read bytea from a file', + proname => 'pg_read_binary_file', provolatile => 'v', prorettype => 'bytea', + proargtypes => 'text int8 int8', prosrc => 'pg_read_binary_file_off_len' }, +{ oid => '3295', descr => 'read bytea from a file', + proname => 'pg_read_binary_file', provolatile => 'v', prorettype => 'bytea', + proargtypes => 'text int8 int8 bool', prosrc => 'pg_read_binary_file' }, +{ oid => '3828', descr => 'read bytea from a file', + proname => 'pg_read_binary_file', provolatile => 'v', prorettype => 'bytea', + proargtypes => 'text', prosrc => 'pg_read_binary_file_all' }, +{ oid => '2625', descr => 'list all files in a directory', + proname => 'pg_ls_dir', prorows => '1000', proretset => 't', + provolatile => 'v', prorettype => 'text', proargtypes => 'text', + prosrc => 'pg_ls_dir_1arg' }, +{ oid => '3297', descr => 'list all files in a directory', + proname => 'pg_ls_dir', prorows => '1000', proretset => 't', + provolatile => 'v', prorettype => 'text', proargtypes => 'text bool bool', + prosrc => 'pg_ls_dir' }, +{ oid => '2626', descr => 'sleep for the specified time in seconds', + proname => 'pg_sleep', provolatile => 'v', prorettype => 'void', + proargtypes => 'float8', prosrc => 'pg_sleep' }, +{ oid => '3935', descr => 'sleep for the specified interval', + proname => 'pg_sleep_for', prolang => '14', provolatile => 'v', + prorettype => 'void', proargtypes => 'interval', + prosrc => 'select pg_catalog.pg_sleep(extract(epoch from pg_catalog.clock_timestamp() operator(pg_catalog.+) $1) operator(pg_catalog.-) extract(epoch from pg_catalog.clock_timestamp()))' }, +{ oid => '3936', descr => 'sleep until the specified time', + proname => 'pg_sleep_until', prolang => '14', provolatile => 'v', + prorettype => 'void', proargtypes => 'timestamptz', + prosrc => 'select pg_catalog.pg_sleep(extract(epoch from $1) operator(pg_catalog.-) extract(epoch from pg_catalog.clock_timestamp()))' }, +{ oid => '315', descr => 'Is JIT compilation available in this session?', + proname => 'pg_jit_available', provolatile => 'v', prorettype => 'bool', + proargtypes => '', prosrc => 'pg_jit_available' }, + +{ oid => '2971', descr => 'convert boolean to text', + proname => 'text', prorettype => 'text', proargtypes => 'bool', + prosrc => 'booltext' }, + +# Aggregates (moved here from pg_aggregate for 7.3) + +{ oid => '2100', + descr => 'the average (arithmetic mean) as numeric of all bigint values', + proname => 'avg', prokind => 'a', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'int8', prosrc => 'aggregate_dummy' }, +{ oid => '2101', + descr => 'the average (arithmetic mean) as numeric of all integer values', + proname => 'avg', prokind => 'a', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'int4', prosrc => 'aggregate_dummy' }, +{ oid => '2102', + descr => 'the average (arithmetic mean) as numeric of all smallint values', + proname => 'avg', prokind => 'a', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'int2', prosrc => 'aggregate_dummy' }, +{ oid => '2103', + descr => 'the average (arithmetic mean) as numeric of all numeric values', + proname => 'avg', prokind => 'a', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'numeric', prosrc => 'aggregate_dummy' }, +{ oid => '2104', + descr => 'the average (arithmetic mean) as float8 of all float4 values', + proname => 'avg', prokind => 'a', proisstrict => 'f', prorettype => 'float8', + proargtypes => 'float4', prosrc => 'aggregate_dummy' }, +{ oid => '2105', + descr => 'the average (arithmetic mean) as float8 of all float8 values', + proname => 'avg', prokind => 'a', proisstrict => 'f', prorettype => 'float8', + proargtypes => 'float8', prosrc => 'aggregate_dummy' }, +{ oid => '2106', + descr => 'the average (arithmetic mean) as interval of all interval values', + proname => 'avg', prokind => 'a', proisstrict => 'f', + prorettype => 'interval', proargtypes => 'interval', + prosrc => 'aggregate_dummy' }, + +{ oid => '2107', descr => 'sum as numeric across all bigint input values', + proname => 'sum', prokind => 'a', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'int8', prosrc => 'aggregate_dummy' }, +{ oid => '2108', descr => 'sum as bigint across all integer input values', + proname => 'sum', prokind => 'a', proisstrict => 'f', prorettype => 'int8', + proargtypes => 'int4', prosrc => 'aggregate_dummy' }, +{ oid => '2109', descr => 'sum as bigint across all smallint input values', + proname => 'sum', prokind => 'a', proisstrict => 'f', prorettype => 'int8', + proargtypes => 'int2', prosrc => 'aggregate_dummy' }, +{ oid => '2110', descr => 'sum as float4 across all float4 input values', + proname => 'sum', prokind => 'a', proisstrict => 'f', prorettype => 'float4', + proargtypes => 'float4', prosrc => 'aggregate_dummy' }, +{ oid => '2111', descr => 'sum as float8 across all float8 input values', + proname => 'sum', prokind => 'a', proisstrict => 'f', prorettype => 'float8', + proargtypes => 'float8', prosrc => 'aggregate_dummy' }, +{ oid => '2112', descr => 'sum as money across all money input values', + proname => 'sum', prokind => 'a', proisstrict => 'f', prorettype => 'money', + proargtypes => 'money', prosrc => 'aggregate_dummy' }, +{ oid => '2113', descr => 'sum as interval across all interval input values', + proname => 'sum', prokind => 'a', proisstrict => 'f', + prorettype => 'interval', proargtypes => 'interval', + prosrc => 'aggregate_dummy' }, +{ oid => '2114', descr => 'sum as numeric across all numeric input values', + proname => 'sum', prokind => 'a', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'numeric', prosrc => 'aggregate_dummy' }, + +{ oid => '2115', descr => 'maximum value of all bigint input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'int8', + proargtypes => 'int8', prosrc => 'aggregate_dummy' }, +{ oid => '2116', descr => 'maximum value of all integer input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'int4', + proargtypes => 'int4', prosrc => 'aggregate_dummy' }, +{ oid => '2117', descr => 'maximum value of all smallint input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'int2', + proargtypes => 'int2', prosrc => 'aggregate_dummy' }, +{ oid => '2118', descr => 'maximum value of all oid input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'oid', + proargtypes => 'oid', prosrc => 'aggregate_dummy' }, +{ oid => '2119', descr => 'maximum value of all float4 input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'float4', + proargtypes => 'float4', prosrc => 'aggregate_dummy' }, +{ oid => '2120', descr => 'maximum value of all float8 input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'float8', + proargtypes => 'float8', prosrc => 'aggregate_dummy' }, +{ oid => '2122', descr => 'maximum value of all date input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'date', + proargtypes => 'date', prosrc => 'aggregate_dummy' }, +{ oid => '2123', descr => 'maximum value of all time input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'time', + proargtypes => 'time', prosrc => 'aggregate_dummy' }, +{ oid => '2124', + descr => 'maximum value of all time with time zone input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'timetz', + proargtypes => 'timetz', prosrc => 'aggregate_dummy' }, +{ oid => '2125', descr => 'maximum value of all money input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'money', + proargtypes => 'money', prosrc => 'aggregate_dummy' }, +{ oid => '2126', descr => 'maximum value of all timestamp input values', + proname => 'max', prokind => 'a', proisstrict => 'f', + prorettype => 'timestamp', proargtypes => 'timestamp', + prosrc => 'aggregate_dummy' }, +{ oid => '2127', + descr => 'maximum value of all timestamp with time zone input values', + proname => 'max', prokind => 'a', proisstrict => 'f', + prorettype => 'timestamptz', proargtypes => 'timestamptz', + prosrc => 'aggregate_dummy' }, +{ oid => '2128', descr => 'maximum value of all interval input values', + proname => 'max', prokind => 'a', proisstrict => 'f', + prorettype => 'interval', proargtypes => 'interval', + prosrc => 'aggregate_dummy' }, +{ oid => '2129', descr => 'maximum value of all text input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'text', + proargtypes => 'text', prosrc => 'aggregate_dummy' }, +{ oid => '2130', descr => 'maximum value of all numeric input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'numeric', prosrc => 'aggregate_dummy' }, +{ oid => '2050', descr => 'maximum value of all anyarray input values', + proname => 'max', prokind => 'a', proisstrict => 'f', + prorettype => 'anyarray', proargtypes => 'anyarray', + prosrc => 'aggregate_dummy' }, +{ oid => '2244', descr => 'maximum value of all bpchar input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'bpchar', + proargtypes => 'bpchar', prosrc => 'aggregate_dummy' }, +{ oid => '2797', descr => 'maximum value of all tid input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'tid', + proargtypes => 'tid', prosrc => 'aggregate_dummy' }, +{ oid => '3564', descr => 'maximum value of all inet input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'inet', + proargtypes => 'inet', prosrc => 'aggregate_dummy' }, + +{ oid => '2131', descr => 'minimum value of all bigint input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'int8', + proargtypes => 'int8', prosrc => 'aggregate_dummy' }, +{ oid => '2132', descr => 'minimum value of all integer input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'int4', + proargtypes => 'int4', prosrc => 'aggregate_dummy' }, +{ oid => '2133', descr => 'minimum value of all smallint input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'int2', + proargtypes => 'int2', prosrc => 'aggregate_dummy' }, +{ oid => '2134', descr => 'minimum value of all oid input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'oid', + proargtypes => 'oid', prosrc => 'aggregate_dummy' }, +{ oid => '2135', descr => 'minimum value of all float4 input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'float4', + proargtypes => 'float4', prosrc => 'aggregate_dummy' }, +{ oid => '2136', descr => 'minimum value of all float8 input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'float8', + proargtypes => 'float8', prosrc => 'aggregate_dummy' }, +{ oid => '2138', descr => 'minimum value of all date input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'date', + proargtypes => 'date', prosrc => 'aggregate_dummy' }, +{ oid => '2139', descr => 'minimum value of all time input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'time', + proargtypes => 'time', prosrc => 'aggregate_dummy' }, +{ oid => '2140', + descr => 'minimum value of all time with time zone input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'timetz', + proargtypes => 'timetz', prosrc => 'aggregate_dummy' }, +{ oid => '2141', descr => 'minimum value of all money input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'money', + proargtypes => 'money', prosrc => 'aggregate_dummy' }, +{ oid => '2142', descr => 'minimum value of all timestamp input values', + proname => 'min', prokind => 'a', proisstrict => 'f', + prorettype => 'timestamp', proargtypes => 'timestamp', + prosrc => 'aggregate_dummy' }, +{ oid => '2143', + descr => 'minimum value of all timestamp with time zone input values', + proname => 'min', prokind => 'a', proisstrict => 'f', + prorettype => 'timestamptz', proargtypes => 'timestamptz', + prosrc => 'aggregate_dummy' }, +{ oid => '2144', descr => 'minimum value of all interval input values', + proname => 'min', prokind => 'a', proisstrict => 'f', + prorettype => 'interval', proargtypes => 'interval', + prosrc => 'aggregate_dummy' }, +{ oid => '2145', descr => 'minimum value of all text values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'text', + proargtypes => 'text', prosrc => 'aggregate_dummy' }, +{ oid => '2146', descr => 'minimum value of all numeric input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'numeric', + proargtypes => 'numeric', prosrc => 'aggregate_dummy' }, +{ oid => '2051', descr => 'minimum value of all anyarray input values', + proname => 'min', prokind => 'a', proisstrict => 'f', + prorettype => 'anyarray', proargtypes => 'anyarray', + prosrc => 'aggregate_dummy' }, +{ oid => '2245', descr => 'minimum value of all bpchar input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'bpchar', + proargtypes => 'bpchar', prosrc => 'aggregate_dummy' }, +{ oid => '2798', descr => 'minimum value of all tid input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'tid', + proargtypes => 'tid', prosrc => 'aggregate_dummy' }, +{ oid => '3565', descr => 'minimum value of all inet input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'inet', + proargtypes => 'inet', prosrc => 'aggregate_dummy' }, + +# count has two forms: count(any) and count(*) +{ oid => '2147', + descr => 'number of input rows for which the input expression is not null', + proname => 'count', prokind => 'a', proisstrict => 'f', prorettype => 'int8', + proargtypes => 'any', prosrc => 'aggregate_dummy' }, +{ oid => '2803', descr => 'number of input rows', + proname => 'count', prokind => 'a', proisstrict => 'f', prorettype => 'int8', + proargtypes => '', prosrc => 'aggregate_dummy' }, + +{ oid => '2718', + descr => 'population variance of bigint input values (square of the population standard deviation)', + proname => 'var_pop', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int8', prosrc => 'aggregate_dummy' }, +{ oid => '2719', + descr => 'population variance of integer input values (square of the population standard deviation)', + proname => 'var_pop', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int4', prosrc => 'aggregate_dummy' }, +{ oid => '2720', + descr => 'population variance of smallint input values (square of the population standard deviation)', + proname => 'var_pop', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int2', prosrc => 'aggregate_dummy' }, +{ oid => '2721', + descr => 'population variance of float4 input values (square of the population standard deviation)', + proname => 'var_pop', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float4', + prosrc => 'aggregate_dummy' }, +{ oid => '2722', + descr => 'population variance of float8 input values (square of the population standard deviation)', + proname => 'var_pop', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2723', + descr => 'population variance of numeric input values (square of the population standard deviation)', + proname => 'var_pop', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'aggregate_dummy' }, + +{ oid => '2641', + descr => 'sample variance of bigint input values (square of the sample standard deviation)', + proname => 'var_samp', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int8', prosrc => 'aggregate_dummy' }, +{ oid => '2642', + descr => 'sample variance of integer input values (square of the sample standard deviation)', + proname => 'var_samp', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int4', prosrc => 'aggregate_dummy' }, +{ oid => '2643', + descr => 'sample variance of smallint input values (square of the sample standard deviation)', + proname => 'var_samp', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int2', prosrc => 'aggregate_dummy' }, +{ oid => '2644', + descr => 'sample variance of float4 input values (square of the sample standard deviation)', + proname => 'var_samp', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float4', + prosrc => 'aggregate_dummy' }, + +{ oid => '2645', + descr => 'sample variance of float8 input values (square of the sample standard deviation)', + proname => 'var_samp', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2646', + descr => 'sample variance of numeric input values (square of the sample standard deviation)', + proname => 'var_samp', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'aggregate_dummy' }, + +{ oid => '2148', descr => 'historical alias for var_samp', + proname => 'variance', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int8', prosrc => 'aggregate_dummy' }, +{ oid => '2149', descr => 'historical alias for var_samp', + proname => 'variance', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int4', prosrc => 'aggregate_dummy' }, +{ oid => '2150', descr => 'historical alias for var_samp', + proname => 'variance', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int2', prosrc => 'aggregate_dummy' }, +{ oid => '2151', descr => 'historical alias for var_samp', + proname => 'variance', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float4', + prosrc => 'aggregate_dummy' }, +{ oid => '2152', descr => 'historical alias for var_samp', + proname => 'variance', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2153', descr => 'historical alias for var_samp', + proname => 'variance', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'aggregate_dummy' }, + +{ oid => '2724', + descr => 'population standard deviation of bigint input values', + proname => 'stddev_pop', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int8', prosrc => 'aggregate_dummy' }, +{ oid => '2725', + descr => 'population standard deviation of integer input values', + proname => 'stddev_pop', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int4', prosrc => 'aggregate_dummy' }, +{ oid => '2726', + descr => 'population standard deviation of smallint input values', + proname => 'stddev_pop', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int2', prosrc => 'aggregate_dummy' }, +{ oid => '2727', + descr => 'population standard deviation of float4 input values', + proname => 'stddev_pop', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float4', + prosrc => 'aggregate_dummy' }, +{ oid => '2728', + descr => 'population standard deviation of float8 input values', + proname => 'stddev_pop', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2729', + descr => 'population standard deviation of numeric input values', + proname => 'stddev_pop', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'aggregate_dummy' }, + +{ oid => '2712', descr => 'sample standard deviation of bigint input values', + proname => 'stddev_samp', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int8', prosrc => 'aggregate_dummy' }, +{ oid => '2713', descr => 'sample standard deviation of integer input values', + proname => 'stddev_samp', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int4', prosrc => 'aggregate_dummy' }, +{ oid => '2714', + descr => 'sample standard deviation of smallint input values', + proname => 'stddev_samp', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int2', prosrc => 'aggregate_dummy' }, +{ oid => '2715', descr => 'sample standard deviation of float4 input values', + proname => 'stddev_samp', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float4', + prosrc => 'aggregate_dummy' }, +{ oid => '2716', descr => 'sample standard deviation of float8 input values', + proname => 'stddev_samp', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2717', descr => 'sample standard deviation of numeric input values', + proname => 'stddev_samp', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'aggregate_dummy' }, + +{ oid => '2154', descr => 'historical alias for stddev_samp', + proname => 'stddev', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int8', prosrc => 'aggregate_dummy' }, +{ oid => '2155', descr => 'historical alias for stddev_samp', + proname => 'stddev', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int4', prosrc => 'aggregate_dummy' }, +{ oid => '2156', descr => 'historical alias for stddev_samp', + proname => 'stddev', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'int2', prosrc => 'aggregate_dummy' }, +{ oid => '2157', descr => 'historical alias for stddev_samp', + proname => 'stddev', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float4', + prosrc => 'aggregate_dummy' }, +{ oid => '2158', descr => 'historical alias for stddev_samp', + proname => 'stddev', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2159', descr => 'historical alias for stddev_samp', + proname => 'stddev', prokind => 'a', proisstrict => 'f', + prorettype => 'numeric', proargtypes => 'numeric', + prosrc => 'aggregate_dummy' }, + +{ oid => '2818', + descr => 'number of input rows in which both expressions are not null', + proname => 'regr_count', prokind => 'a', proisstrict => 'f', + prorettype => 'int8', proargtypes => 'float8 float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2819', + descr => 'sum of squares of the independent variable (sum(X^2) - sum(X)^2/N)', + proname => 'regr_sxx', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2820', + descr => 'sum of squares of the dependent variable (sum(Y^2) - sum(Y)^2/N)', + proname => 'regr_syy', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2821', + descr => 'sum of products of independent times dependent variable (sum(X*Y) - sum(X) * sum(Y)/N)', + proname => 'regr_sxy', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2822', descr => 'average of the independent variable (sum(X)/N)', + proname => 'regr_avgx', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2823', descr => 'average of the dependent variable (sum(Y)/N)', + proname => 'regr_avgy', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2824', descr => 'square of the correlation coefficient', + proname => 'regr_r2', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2825', + descr => 'slope of the least-squares-fit linear equation determined by the (X, Y) pairs', + proname => 'regr_slope', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2826', + descr => 'y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs', + proname => 'regr_intercept', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'aggregate_dummy' }, + +{ oid => '2827', descr => 'population covariance', + proname => 'covar_pop', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2828', descr => 'sample covariance', + proname => 'covar_samp', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'aggregate_dummy' }, +{ oid => '2829', descr => 'correlation coefficient', + proname => 'corr', prokind => 'a', proisstrict => 'f', prorettype => 'float8', + proargtypes => 'float8 float8', prosrc => 'aggregate_dummy' }, + +{ oid => '2160', + proname => 'text_pattern_lt', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'text_pattern_lt' }, +{ oid => '2161', + proname => 'text_pattern_le', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'text_pattern_le' }, +{ oid => '2163', + proname => 'text_pattern_ge', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'text_pattern_ge' }, +{ oid => '2164', + proname => 'text_pattern_gt', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'text_pattern_gt' }, +{ oid => '2166', descr => 'less-equal-greater', + proname => 'bttext_pattern_cmp', prorettype => 'int4', + proargtypes => 'text text', prosrc => 'bttext_pattern_cmp' }, +{ oid => '3332', descr => 'sort support', + proname => 'bttext_pattern_sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'bttext_pattern_sortsupport' }, + +{ oid => '2174', + proname => 'bpchar_pattern_lt', prorettype => 'bool', + proargtypes => 'bpchar bpchar', prosrc => 'bpchar_pattern_lt' }, +{ oid => '2175', + proname => 'bpchar_pattern_le', prorettype => 'bool', + proargtypes => 'bpchar bpchar', prosrc => 'bpchar_pattern_le' }, +{ oid => '2177', + proname => 'bpchar_pattern_ge', prorettype => 'bool', + proargtypes => 'bpchar bpchar', prosrc => 'bpchar_pattern_ge' }, +{ oid => '2178', + proname => 'bpchar_pattern_gt', prorettype => 'bool', + proargtypes => 'bpchar bpchar', prosrc => 'bpchar_pattern_gt' }, +{ oid => '2180', descr => 'less-equal-greater', + proname => 'btbpchar_pattern_cmp', prorettype => 'int4', + proargtypes => 'bpchar bpchar', prosrc => 'btbpchar_pattern_cmp' }, +{ oid => '3333', descr => 'sort support', + proname => 'btbpchar_pattern_sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'btbpchar_pattern_sortsupport' }, + +{ oid => '2188', descr => 'less-equal-greater', + proname => 'btint48cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'int4 int8', prosrc => 'btint48cmp' }, +{ oid => '2189', descr => 'less-equal-greater', + proname => 'btint84cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'int8 int4', prosrc => 'btint84cmp' }, +{ oid => '2190', descr => 'less-equal-greater', + proname => 'btint24cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'int2 int4', prosrc => 'btint24cmp' }, +{ oid => '2191', descr => 'less-equal-greater', + proname => 'btint42cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'int4 int2', prosrc => 'btint42cmp' }, +{ oid => '2192', descr => 'less-equal-greater', + proname => 'btint28cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'int2 int8', prosrc => 'btint28cmp' }, +{ oid => '2193', descr => 'less-equal-greater', + proname => 'btint82cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'int8 int2', prosrc => 'btint82cmp' }, +{ oid => '2194', descr => 'less-equal-greater', + proname => 'btfloat48cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'float4 float8', prosrc => 'btfloat48cmp' }, +{ oid => '2195', descr => 'less-equal-greater', + proname => 'btfloat84cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'float8 float4', prosrc => 'btfloat84cmp' }, + +{ oid => '2212', descr => 'I/O', + proname => 'regprocedurein', provolatile => 's', prorettype => 'regprocedure', + proargtypes => 'cstring', prosrc => 'regprocedurein' }, +{ oid => '2213', descr => 'I/O', + proname => 'regprocedureout', provolatile => 's', prorettype => 'cstring', + proargtypes => 'regprocedure', prosrc => 'regprocedureout' }, +{ oid => '2214', descr => 'I/O', + proname => 'regoperin', provolatile => 's', prorettype => 'regoper', + proargtypes => 'cstring', prosrc => 'regoperin' }, +{ oid => '2215', descr => 'I/O', + proname => 'regoperout', provolatile => 's', prorettype => 'cstring', + proargtypes => 'regoper', prosrc => 'regoperout' }, +{ oid => '3492', descr => 'convert operator name to regoper', + proname => 'to_regoper', provolatile => 's', prorettype => 'regoper', + proargtypes => 'text', prosrc => 'to_regoper' }, +{ oid => '3476', descr => 'convert operator name to regoperator', + proname => 'to_regoperator', provolatile => 's', prorettype => 'regoperator', + proargtypes => 'text', prosrc => 'to_regoperator' }, +{ oid => '2216', descr => 'I/O', + proname => 'regoperatorin', provolatile => 's', prorettype => 'regoperator', + proargtypes => 'cstring', prosrc => 'regoperatorin' }, +{ oid => '2217', descr => 'I/O', + proname => 'regoperatorout', provolatile => 's', prorettype => 'cstring', + proargtypes => 'regoperator', prosrc => 'regoperatorout' }, +{ oid => '2218', descr => 'I/O', + proname => 'regclassin', provolatile => 's', prorettype => 'regclass', + proargtypes => 'cstring', prosrc => 'regclassin' }, +{ oid => '2219', descr => 'I/O', + proname => 'regclassout', provolatile => 's', prorettype => 'cstring', + proargtypes => 'regclass', prosrc => 'regclassout' }, +{ oid => '3495', descr => 'convert classname to regclass', + proname => 'to_regclass', provolatile => 's', prorettype => 'regclass', + proargtypes => 'text', prosrc => 'to_regclass' }, +{ oid => '2220', descr => 'I/O', + proname => 'regtypein', provolatile => 's', prorettype => 'regtype', + proargtypes => 'cstring', prosrc => 'regtypein' }, +{ oid => '2221', descr => 'I/O', + proname => 'regtypeout', provolatile => 's', prorettype => 'cstring', + proargtypes => 'regtype', prosrc => 'regtypeout' }, +{ oid => '3493', descr => 'convert type name to regtype', + proname => 'to_regtype', provolatile => 's', prorettype => 'regtype', + proargtypes => 'text', prosrc => 'to_regtype' }, +{ oid => '1079', descr => 'convert text to regclass', + proname => 'regclass', provolatile => 's', prorettype => 'regclass', + proargtypes => 'text', prosrc => 'text_regclass' }, + +{ oid => '4098', descr => 'I/O', + proname => 'regrolein', provolatile => 's', prorettype => 'regrole', + proargtypes => 'cstring', prosrc => 'regrolein' }, +{ oid => '4092', descr => 'I/O', + proname => 'regroleout', provolatile => 's', prorettype => 'cstring', + proargtypes => 'regrole', prosrc => 'regroleout' }, +{ oid => '4093', descr => 'convert role name to regrole', + proname => 'to_regrole', provolatile => 's', prorettype => 'regrole', + proargtypes => 'text', prosrc => 'to_regrole' }, + +{ oid => '4084', descr => 'I/O', + proname => 'regnamespacein', provolatile => 's', prorettype => 'regnamespace', + proargtypes => 'cstring', prosrc => 'regnamespacein' }, +{ oid => '4085', descr => 'I/O', + proname => 'regnamespaceout', provolatile => 's', prorettype => 'cstring', + proargtypes => 'regnamespace', prosrc => 'regnamespaceout' }, +{ oid => '4086', descr => 'convert namespace name to regnamespace', + proname => 'to_regnamespace', provolatile => 's', + prorettype => 'regnamespace', proargtypes => 'text', + prosrc => 'to_regnamespace' }, + +{ oid => '1268', + descr => 'parse qualified identifier to array of identifiers', + proname => 'parse_ident', prorettype => '_text', proargtypes => 'text bool', + proargnames => '{str,strict}', prosrc => 'parse_ident' }, + +{ oid => '2246', descr => '(internal)', + proname => 'fmgr_internal_validator', provolatile => 's', + prorettype => 'void', proargtypes => 'oid', + prosrc => 'fmgr_internal_validator' }, +{ oid => '2247', descr => '(internal)', + proname => 'fmgr_c_validator', provolatile => 's', prorettype => 'void', + proargtypes => 'oid', prosrc => 'fmgr_c_validator' }, +{ oid => '2248', descr => '(internal)', + proname => 'fmgr_sql_validator', provolatile => 's', prorettype => 'void', + proargtypes => 'oid', prosrc => 'fmgr_sql_validator' }, + +{ oid => '2250', + descr => 'user privilege on database by username, database name', + proname => 'has_database_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name text text', + prosrc => 'has_database_privilege_name_name' }, +{ oid => '2251', + descr => 'user privilege on database by username, database oid', + proname => 'has_database_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name oid text', prosrc => 'has_database_privilege_name_id' }, +{ oid => '2252', + descr => 'user privilege on database by user oid, database name', + proname => 'has_database_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text text', prosrc => 'has_database_privilege_id_name' }, +{ oid => '2253', + descr => 'user privilege on database by user oid, database oid', + proname => 'has_database_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid oid text', prosrc => 'has_database_privilege_id_id' }, +{ oid => '2254', + descr => 'current user privilege on database by database name', + proname => 'has_database_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'has_database_privilege_name' }, +{ oid => '2255', + descr => 'current user privilege on database by database oid', + proname => 'has_database_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text', prosrc => 'has_database_privilege_id' }, + +{ oid => '2256', + descr => 'user privilege on function by username, function name', + proname => 'has_function_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name text text', + prosrc => 'has_function_privilege_name_name' }, +{ oid => '2257', + descr => 'user privilege on function by username, function oid', + proname => 'has_function_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name oid text', prosrc => 'has_function_privilege_name_id' }, +{ oid => '2258', + descr => 'user privilege on function by user oid, function name', + proname => 'has_function_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text text', prosrc => 'has_function_privilege_id_name' }, +{ oid => '2259', + descr => 'user privilege on function by user oid, function oid', + proname => 'has_function_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid oid text', prosrc => 'has_function_privilege_id_id' }, +{ oid => '2260', + descr => 'current user privilege on function by function name', + proname => 'has_function_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'has_function_privilege_name' }, +{ oid => '2261', + descr => 'current user privilege on function by function oid', + proname => 'has_function_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text', prosrc => 'has_function_privilege_id' }, + +{ oid => '2262', + descr => 'user privilege on language by username, language name', + proname => 'has_language_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name text text', + prosrc => 'has_language_privilege_name_name' }, +{ oid => '2263', + descr => 'user privilege on language by username, language oid', + proname => 'has_language_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name oid text', prosrc => 'has_language_privilege_name_id' }, +{ oid => '2264', + descr => 'user privilege on language by user oid, language name', + proname => 'has_language_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text text', prosrc => 'has_language_privilege_id_name' }, +{ oid => '2265', + descr => 'user privilege on language by user oid, language oid', + proname => 'has_language_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid oid text', prosrc => 'has_language_privilege_id_id' }, +{ oid => '2266', + descr => 'current user privilege on language by language name', + proname => 'has_language_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'has_language_privilege_name' }, +{ oid => '2267', + descr => 'current user privilege on language by language oid', + proname => 'has_language_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text', prosrc => 'has_language_privilege_id' }, + +{ oid => '2268', descr => 'user privilege on schema by username, schema name', + proname => 'has_schema_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name text text', prosrc => 'has_schema_privilege_name_name' }, +{ oid => '2269', descr => 'user privilege on schema by username, schema oid', + proname => 'has_schema_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name oid text', prosrc => 'has_schema_privilege_name_id' }, +{ oid => '2270', descr => 'user privilege on schema by user oid, schema name', + proname => 'has_schema_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text text', prosrc => 'has_schema_privilege_id_name' }, +{ oid => '2271', descr => 'user privilege on schema by user oid, schema oid', + proname => 'has_schema_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid oid text', prosrc => 'has_schema_privilege_id_id' }, +{ oid => '2272', descr => 'current user privilege on schema by schema name', + proname => 'has_schema_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'has_schema_privilege_name' }, +{ oid => '2273', descr => 'current user privilege on schema by schema oid', + proname => 'has_schema_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text', prosrc => 'has_schema_privilege_id' }, + +{ oid => '2390', + descr => 'user privilege on tablespace by username, tablespace name', + proname => 'has_tablespace_privilege', provolatile => 's', + prorettype => 'bool', proargtypes => 'name text text', + prosrc => 'has_tablespace_privilege_name_name' }, +{ oid => '2391', + descr => 'user privilege on tablespace by username, tablespace oid', + proname => 'has_tablespace_privilege', provolatile => 's', + prorettype => 'bool', proargtypes => 'name oid text', + prosrc => 'has_tablespace_privilege_name_id' }, +{ oid => '2392', + descr => 'user privilege on tablespace by user oid, tablespace name', + proname => 'has_tablespace_privilege', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid text text', + prosrc => 'has_tablespace_privilege_id_name' }, +{ oid => '2393', + descr => 'user privilege on tablespace by user oid, tablespace oid', + proname => 'has_tablespace_privilege', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid oid text', + prosrc => 'has_tablespace_privilege_id_id' }, +{ oid => '2394', + descr => 'current user privilege on tablespace by tablespace name', + proname => 'has_tablespace_privilege', provolatile => 's', + prorettype => 'bool', proargtypes => 'text text', + prosrc => 'has_tablespace_privilege_name' }, +{ oid => '2395', + descr => 'current user privilege on tablespace by tablespace oid', + proname => 'has_tablespace_privilege', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid text', + prosrc => 'has_tablespace_privilege_id' }, + +{ oid => '3000', + descr => 'user privilege on foreign data wrapper by username, foreign data wrapper name', + proname => 'has_foreign_data_wrapper_privilege', provolatile => 's', + prorettype => 'bool', proargtypes => 'name text text', + prosrc => 'has_foreign_data_wrapper_privilege_name_name' }, +{ oid => '3001', + descr => 'user privilege on foreign data wrapper by username, foreign data wrapper oid', + proname => 'has_foreign_data_wrapper_privilege', provolatile => 's', + prorettype => 'bool', proargtypes => 'name oid text', + prosrc => 'has_foreign_data_wrapper_privilege_name_id' }, +{ oid => '3002', + descr => 'user privilege on foreign data wrapper by user oid, foreign data wrapper name', + proname => 'has_foreign_data_wrapper_privilege', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid text text', + prosrc => 'has_foreign_data_wrapper_privilege_id_name' }, +{ oid => '3003', + descr => 'user privilege on foreign data wrapper by user oid, foreign data wrapper oid', + proname => 'has_foreign_data_wrapper_privilege', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid oid text', + prosrc => 'has_foreign_data_wrapper_privilege_id_id' }, +{ oid => '3004', + descr => 'current user privilege on foreign data wrapper by foreign data wrapper name', + proname => 'has_foreign_data_wrapper_privilege', provolatile => 's', + prorettype => 'bool', proargtypes => 'text text', + prosrc => 'has_foreign_data_wrapper_privilege_name' }, +{ oid => '3005', + descr => 'current user privilege on foreign data wrapper by foreign data wrapper oid', + proname => 'has_foreign_data_wrapper_privilege', provolatile => 's', + prorettype => 'bool', proargtypes => 'oid text', + prosrc => 'has_foreign_data_wrapper_privilege_id' }, + +{ oid => '3006', descr => 'user privilege on server by username, server name', + proname => 'has_server_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name text text', prosrc => 'has_server_privilege_name_name' }, +{ oid => '3007', descr => 'user privilege on server by username, server oid', + proname => 'has_server_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name oid text', prosrc => 'has_server_privilege_name_id' }, +{ oid => '3008', descr => 'user privilege on server by user oid, server name', + proname => 'has_server_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text text', prosrc => 'has_server_privilege_id_name' }, +{ oid => '3009', descr => 'user privilege on server by user oid, server oid', + proname => 'has_server_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid oid text', prosrc => 'has_server_privilege_id_id' }, +{ oid => '3010', descr => 'current user privilege on server by server name', + proname => 'has_server_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'has_server_privilege_name' }, +{ oid => '3011', descr => 'current user privilege on server by server oid', + proname => 'has_server_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text', prosrc => 'has_server_privilege_id' }, + +{ oid => '3138', descr => 'user privilege on type by username, type name', + proname => 'has_type_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name text text', prosrc => 'has_type_privilege_name_name' }, +{ oid => '3139', descr => 'user privilege on type by username, type oid', + proname => 'has_type_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'name oid text', prosrc => 'has_type_privilege_name_id' }, +{ oid => '3140', descr => 'user privilege on type by user oid, type name', + proname => 'has_type_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text text', prosrc => 'has_type_privilege_id_name' }, +{ oid => '3141', descr => 'user privilege on type by user oid, type oid', + proname => 'has_type_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid oid text', prosrc => 'has_type_privilege_id_id' }, +{ oid => '3142', descr => 'current user privilege on type by type name', + proname => 'has_type_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'text text', prosrc => 'has_type_privilege_name' }, +{ oid => '3143', descr => 'current user privilege on type by type oid', + proname => 'has_type_privilege', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text', prosrc => 'has_type_privilege_id' }, + +{ oid => '2705', descr => 'user privilege on role by username, role name', + proname => 'pg_has_role', provolatile => 's', prorettype => 'bool', + proargtypes => 'name name text', prosrc => 'pg_has_role_name_name' }, +{ oid => '2706', descr => 'user privilege on role by username, role oid', + proname => 'pg_has_role', provolatile => 's', prorettype => 'bool', + proargtypes => 'name oid text', prosrc => 'pg_has_role_name_id' }, +{ oid => '2707', descr => 'user privilege on role by user oid, role name', + proname => 'pg_has_role', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid name text', prosrc => 'pg_has_role_id_name' }, +{ oid => '2708', descr => 'user privilege on role by user oid, role oid', + proname => 'pg_has_role', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid oid text', prosrc => 'pg_has_role_id_id' }, +{ oid => '2709', descr => 'current user privilege on role by role name', + proname => 'pg_has_role', provolatile => 's', prorettype => 'bool', + proargtypes => 'name text', prosrc => 'pg_has_role_name' }, +{ oid => '2710', descr => 'current user privilege on role by role oid', + proname => 'pg_has_role', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid text', prosrc => 'pg_has_role_id' }, + +{ oid => '1269', + descr => 'bytes required to store the value, perhaps with compression', + proname => 'pg_column_size', provolatile => 's', prorettype => 'int4', + proargtypes => 'any', prosrc => 'pg_column_size' }, +{ oid => '2322', + descr => 'total disk space usage for the specified tablespace', + proname => 'pg_tablespace_size', provolatile => 'v', prorettype => 'int8', + proargtypes => 'oid', prosrc => 'pg_tablespace_size_oid' }, +{ oid => '2323', + descr => 'total disk space usage for the specified tablespace', + proname => 'pg_tablespace_size', provolatile => 'v', prorettype => 'int8', + proargtypes => 'name', prosrc => 'pg_tablespace_size_name' }, +{ oid => '2324', descr => 'total disk space usage for the specified database', + proname => 'pg_database_size', provolatile => 'v', prorettype => 'int8', + proargtypes => 'oid', prosrc => 'pg_database_size_oid' }, +{ oid => '2168', descr => 'total disk space usage for the specified database', + proname => 'pg_database_size', provolatile => 'v', prorettype => 'int8', + proargtypes => 'name', prosrc => 'pg_database_size_name' }, +{ oid => '2325', + descr => 'disk space usage for the main fork of the specified table or index', + proname => 'pg_relation_size', prolang => '14', provolatile => 'v', + prorettype => 'int8', proargtypes => 'regclass', + prosrc => 'select pg_catalog.pg_relation_size($1, \'main\')' }, +{ oid => '2332', + descr => 'disk space usage for the specified fork of a table or index', + proname => 'pg_relation_size', provolatile => 'v', prorettype => 'int8', + proargtypes => 'regclass text', prosrc => 'pg_relation_size' }, +{ oid => '2286', + descr => 'total disk space usage for the specified table and associated indexes', + proname => 'pg_total_relation_size', provolatile => 'v', prorettype => 'int8', + proargtypes => 'regclass', prosrc => 'pg_total_relation_size' }, +{ oid => '2288', + descr => 'convert a long int to a human readable text using size units', + proname => 'pg_size_pretty', prorettype => 'text', proargtypes => 'int8', + prosrc => 'pg_size_pretty' }, +{ oid => '3166', + descr => 'convert a numeric to a human readable text using size units', + proname => 'pg_size_pretty', prorettype => 'text', proargtypes => 'numeric', + prosrc => 'pg_size_pretty_numeric' }, +{ oid => '3334', + descr => 'convert a size in human-readable format with size units into bytes', + proname => 'pg_size_bytes', prorettype => 'int8', proargtypes => 'text', + prosrc => 'pg_size_bytes' }, +{ oid => '2997', + descr => 'disk space usage for the specified table, including TOAST, free space and visibility map', + proname => 'pg_table_size', provolatile => 'v', prorettype => 'int8', + proargtypes => 'regclass', prosrc => 'pg_table_size' }, +{ oid => '2998', + descr => 'disk space usage for all indexes attached to the specified table', + proname => 'pg_indexes_size', provolatile => 'v', prorettype => 'int8', + proargtypes => 'regclass', prosrc => 'pg_indexes_size' }, +{ oid => '2999', descr => 'filenode identifier of relation', + proname => 'pg_relation_filenode', provolatile => 's', prorettype => 'oid', + proargtypes => 'regclass', prosrc => 'pg_relation_filenode' }, +{ oid => '3454', descr => 'relation OID for filenode and tablespace', + proname => 'pg_filenode_relation', provolatile => 's', + prorettype => 'regclass', proargtypes => 'oid oid', + prosrc => 'pg_filenode_relation' }, +{ oid => '3034', descr => 'file path of relation', + proname => 'pg_relation_filepath', provolatile => 's', prorettype => 'text', + proargtypes => 'regclass', prosrc => 'pg_relation_filepath' }, + +{ oid => '2316', descr => '(internal)', + proname => 'postgresql_fdw_validator', prorettype => 'bool', + proargtypes => '_text oid', prosrc => 'postgresql_fdw_validator' }, + +{ oid => '2290', descr => 'I/O', + proname => 'record_in', provolatile => 's', prorettype => 'record', + proargtypes => 'cstring oid int4', prosrc => 'record_in' }, +{ oid => '2291', descr => 'I/O', + proname => 'record_out', provolatile => 's', prorettype => 'cstring', + proargtypes => 'record', prosrc => 'record_out' }, +{ oid => '2292', descr => 'I/O', + proname => 'cstring_in', prorettype => 'cstring', proargtypes => 'cstring', + prosrc => 'cstring_in' }, +{ oid => '2293', descr => 'I/O', + proname => 'cstring_out', prorettype => 'cstring', proargtypes => 'cstring', + prosrc => 'cstring_out' }, +{ oid => '2294', descr => 'I/O', + proname => 'any_in', prorettype => 'any', proargtypes => 'cstring', + prosrc => 'any_in' }, +{ oid => '2295', descr => 'I/O', + proname => 'any_out', prorettype => 'cstring', proargtypes => 'any', + prosrc => 'any_out' }, +{ oid => '2296', descr => 'I/O', + proname => 'anyarray_in', prorettype => 'anyarray', proargtypes => 'cstring', + prosrc => 'anyarray_in' }, +{ oid => '2297', descr => 'I/O', + proname => 'anyarray_out', provolatile => 's', prorettype => 'cstring', + proargtypes => 'anyarray', prosrc => 'anyarray_out' }, +{ oid => '2298', descr => 'I/O', + proname => 'void_in', prorettype => 'void', proargtypes => 'cstring', + prosrc => 'void_in' }, +{ oid => '2299', descr => 'I/O', + proname => 'void_out', prorettype => 'cstring', proargtypes => 'void', + prosrc => 'void_out' }, +{ oid => '2300', descr => 'I/O', + proname => 'trigger_in', proisstrict => 'f', prorettype => 'trigger', + proargtypes => 'cstring', prosrc => 'trigger_in' }, +{ oid => '2301', descr => 'I/O', + proname => 'trigger_out', prorettype => 'cstring', proargtypes => 'trigger', + prosrc => 'trigger_out' }, +{ oid => '3594', descr => 'I/O', + proname => 'event_trigger_in', proisstrict => 'f', + prorettype => 'event_trigger', proargtypes => 'cstring', + prosrc => 'event_trigger_in' }, +{ oid => '3595', descr => 'I/O', + proname => 'event_trigger_out', prorettype => 'cstring', + proargtypes => 'event_trigger', prosrc => 'event_trigger_out' }, +{ oid => '2302', descr => 'I/O', + proname => 'language_handler_in', proisstrict => 'f', + prorettype => 'language_handler', proargtypes => 'cstring', + prosrc => 'language_handler_in' }, +{ oid => '2303', descr => 'I/O', + proname => 'language_handler_out', prorettype => 'cstring', + proargtypes => 'language_handler', prosrc => 'language_handler_out' }, +{ oid => '2304', descr => 'I/O', + proname => 'internal_in', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'cstring', prosrc => 'internal_in' }, +{ oid => '2305', descr => 'I/O', + proname => 'internal_out', prorettype => 'cstring', proargtypes => 'internal', + prosrc => 'internal_out' }, +{ oid => '2306', descr => 'I/O', + proname => 'opaque_in', proisstrict => 'f', prorettype => 'opaque', + proargtypes => 'cstring', prosrc => 'opaque_in' }, +{ oid => '2307', descr => 'I/O', + proname => 'opaque_out', prorettype => 'cstring', proargtypes => 'opaque', + prosrc => 'opaque_out' }, +{ oid => '2312', descr => 'I/O', + proname => 'anyelement_in', prorettype => 'anyelement', + proargtypes => 'cstring', prosrc => 'anyelement_in' }, +{ oid => '2313', descr => 'I/O', + proname => 'anyelement_out', prorettype => 'cstring', + proargtypes => 'anyelement', prosrc => 'anyelement_out' }, +{ oid => '2398', descr => 'I/O', + proname => 'shell_in', proisstrict => 'f', prorettype => 'opaque', + proargtypes => 'cstring', prosrc => 'shell_in' }, +{ oid => '2399', descr => 'I/O', + proname => 'shell_out', prorettype => 'cstring', proargtypes => 'opaque', + prosrc => 'shell_out' }, +{ oid => '2597', descr => 'I/O', + proname => 'domain_in', proisstrict => 'f', provolatile => 's', + prorettype => 'any', proargtypes => 'cstring oid int4', + prosrc => 'domain_in' }, +{ oid => '2598', descr => 'I/O', + proname => 'domain_recv', proisstrict => 'f', provolatile => 's', + prorettype => 'any', proargtypes => 'internal oid int4', + prosrc => 'domain_recv' }, +{ oid => '2777', descr => 'I/O', + proname => 'anynonarray_in', prorettype => 'anynonarray', + proargtypes => 'cstring', prosrc => 'anynonarray_in' }, +{ oid => '2778', descr => 'I/O', + proname => 'anynonarray_out', prorettype => 'cstring', + proargtypes => 'anynonarray', prosrc => 'anynonarray_out' }, +{ oid => '3116', descr => 'I/O', + proname => 'fdw_handler_in', proisstrict => 'f', prorettype => 'fdw_handler', + proargtypes => 'cstring', prosrc => 'fdw_handler_in' }, +{ oid => '3117', descr => 'I/O', + proname => 'fdw_handler_out', prorettype => 'cstring', + proargtypes => 'fdw_handler', prosrc => 'fdw_handler_out' }, +{ oid => '326', descr => 'I/O', + proname => 'index_am_handler_in', proisstrict => 'f', + prorettype => 'index_am_handler', proargtypes => 'cstring', + prosrc => 'index_am_handler_in' }, +{ oid => '327', descr => 'I/O', + proname => 'index_am_handler_out', prorettype => 'cstring', + proargtypes => 'index_am_handler', prosrc => 'index_am_handler_out' }, +{ oid => '3311', descr => 'I/O', + proname => 'tsm_handler_in', proisstrict => 'f', prorettype => 'tsm_handler', + proargtypes => 'cstring', prosrc => 'tsm_handler_in' }, +{ oid => '3312', descr => 'I/O', + proname => 'tsm_handler_out', prorettype => 'cstring', + proargtypes => 'tsm_handler', prosrc => 'tsm_handler_out' }, + +# tablesample method handlers +{ oid => '3313', descr => 'BERNOULLI tablesample method handler', + proname => 'bernoulli', provolatile => 'v', prorettype => 'tsm_handler', + proargtypes => 'internal', prosrc => 'tsm_bernoulli_handler' }, +{ oid => '3314', descr => 'SYSTEM tablesample method handler', + proname => 'system', provolatile => 'v', prorettype => 'tsm_handler', + proargtypes => 'internal', prosrc => 'tsm_system_handler' }, + +# cryptographic +{ oid => '2311', descr => 'MD5 hash', + proname => 'md5', proleakproof => 't', prorettype => 'text', + proargtypes => 'text', prosrc => 'md5_text' }, +{ oid => '2321', descr => 'MD5 hash', + proname => 'md5', proleakproof => 't', prorettype => 'text', + proargtypes => 'bytea', prosrc => 'md5_bytea' }, +{ oid => '3419', descr => 'SHA-224 hash', + proname => 'sha224', proleakproof => 't', prorettype => 'bytea', + proargtypes => 'bytea', prosrc => 'sha224_bytea' }, +{ oid => '3420', descr => 'SHA-256 hash', + proname => 'sha256', proleakproof => 't', prorettype => 'bytea', + proargtypes => 'bytea', prosrc => 'sha256_bytea' }, +{ oid => '3421', descr => 'SHA-384 hash', + proname => 'sha384', proleakproof => 't', prorettype => 'bytea', + proargtypes => 'bytea', prosrc => 'sha384_bytea' }, +{ oid => '3422', descr => 'SHA-512 hash', + proname => 'sha512', proleakproof => 't', prorettype => 'bytea', + proargtypes => 'bytea', prosrc => 'sha512_bytea' }, + +# crosstype operations for date vs. timestamp and timestamptz +{ oid => '2338', + proname => 'date_lt_timestamp', prorettype => 'bool', + proargtypes => 'date timestamp', prosrc => 'date_lt_timestamp' }, +{ oid => '2339', + proname => 'date_le_timestamp', prorettype => 'bool', + proargtypes => 'date timestamp', prosrc => 'date_le_timestamp' }, +{ oid => '2340', + proname => 'date_eq_timestamp', prorettype => 'bool', + proargtypes => 'date timestamp', prosrc => 'date_eq_timestamp' }, +{ oid => '2341', + proname => 'date_gt_timestamp', prorettype => 'bool', + proargtypes => 'date timestamp', prosrc => 'date_gt_timestamp' }, +{ oid => '2342', + proname => 'date_ge_timestamp', prorettype => 'bool', + proargtypes => 'date timestamp', prosrc => 'date_ge_timestamp' }, +{ oid => '2343', + proname => 'date_ne_timestamp', prorettype => 'bool', + proargtypes => 'date timestamp', prosrc => 'date_ne_timestamp' }, +{ oid => '2344', descr => 'less-equal-greater', + proname => 'date_cmp_timestamp', prorettype => 'int4', + proargtypes => 'date timestamp', prosrc => 'date_cmp_timestamp' }, + +{ oid => '2351', + proname => 'date_lt_timestamptz', provolatile => 's', prorettype => 'bool', + proargtypes => 'date timestamptz', prosrc => 'date_lt_timestamptz' }, +{ oid => '2352', + proname => 'date_le_timestamptz', provolatile => 's', prorettype => 'bool', + proargtypes => 'date timestamptz', prosrc => 'date_le_timestamptz' }, +{ oid => '2353', + proname => 'date_eq_timestamptz', provolatile => 's', prorettype => 'bool', + proargtypes => 'date timestamptz', prosrc => 'date_eq_timestamptz' }, +{ oid => '2354', + proname => 'date_gt_timestamptz', provolatile => 's', prorettype => 'bool', + proargtypes => 'date timestamptz', prosrc => 'date_gt_timestamptz' }, +{ oid => '2355', + proname => 'date_ge_timestamptz', provolatile => 's', prorettype => 'bool', + proargtypes => 'date timestamptz', prosrc => 'date_ge_timestamptz' }, +{ oid => '2356', + proname => 'date_ne_timestamptz', provolatile => 's', prorettype => 'bool', + proargtypes => 'date timestamptz', prosrc => 'date_ne_timestamptz' }, +{ oid => '2357', descr => 'less-equal-greater', + proname => 'date_cmp_timestamptz', provolatile => 's', prorettype => 'int4', + proargtypes => 'date timestamptz', prosrc => 'date_cmp_timestamptz' }, + +{ oid => '2364', + proname => 'timestamp_lt_date', prorettype => 'bool', + proargtypes => 'timestamp date', prosrc => 'timestamp_lt_date' }, +{ oid => '2365', + proname => 'timestamp_le_date', prorettype => 'bool', + proargtypes => 'timestamp date', prosrc => 'timestamp_le_date' }, +{ oid => '2366', + proname => 'timestamp_eq_date', prorettype => 'bool', + proargtypes => 'timestamp date', prosrc => 'timestamp_eq_date' }, +{ oid => '2367', + proname => 'timestamp_gt_date', prorettype => 'bool', + proargtypes => 'timestamp date', prosrc => 'timestamp_gt_date' }, +{ oid => '2368', + proname => 'timestamp_ge_date', prorettype => 'bool', + proargtypes => 'timestamp date', prosrc => 'timestamp_ge_date' }, +{ oid => '2369', + proname => 'timestamp_ne_date', prorettype => 'bool', + proargtypes => 'timestamp date', prosrc => 'timestamp_ne_date' }, +{ oid => '2370', descr => 'less-equal-greater', + proname => 'timestamp_cmp_date', prorettype => 'int4', + proargtypes => 'timestamp date', prosrc => 'timestamp_cmp_date' }, + +{ oid => '2377', + proname => 'timestamptz_lt_date', provolatile => 's', prorettype => 'bool', + proargtypes => 'timestamptz date', prosrc => 'timestamptz_lt_date' }, +{ oid => '2378', + proname => 'timestamptz_le_date', provolatile => 's', prorettype => 'bool', + proargtypes => 'timestamptz date', prosrc => 'timestamptz_le_date' }, +{ oid => '2379', + proname => 'timestamptz_eq_date', provolatile => 's', prorettype => 'bool', + proargtypes => 'timestamptz date', prosrc => 'timestamptz_eq_date' }, +{ oid => '2380', + proname => 'timestamptz_gt_date', provolatile => 's', prorettype => 'bool', + proargtypes => 'timestamptz date', prosrc => 'timestamptz_gt_date' }, +{ oid => '2381', + proname => 'timestamptz_ge_date', provolatile => 's', prorettype => 'bool', + proargtypes => 'timestamptz date', prosrc => 'timestamptz_ge_date' }, +{ oid => '2382', + proname => 'timestamptz_ne_date', provolatile => 's', prorettype => 'bool', + proargtypes => 'timestamptz date', prosrc => 'timestamptz_ne_date' }, +{ oid => '2383', descr => 'less-equal-greater', + proname => 'timestamptz_cmp_date', provolatile => 's', prorettype => 'int4', + proargtypes => 'timestamptz date', prosrc => 'timestamptz_cmp_date' }, + +# crosstype operations for timestamp vs. timestamptz +{ oid => '2520', + proname => 'timestamp_lt_timestamptz', provolatile => 's', + prorettype => 'bool', proargtypes => 'timestamp timestamptz', + prosrc => 'timestamp_lt_timestamptz' }, +{ oid => '2521', + proname => 'timestamp_le_timestamptz', provolatile => 's', + prorettype => 'bool', proargtypes => 'timestamp timestamptz', + prosrc => 'timestamp_le_timestamptz' }, +{ oid => '2522', + proname => 'timestamp_eq_timestamptz', provolatile => 's', + prorettype => 'bool', proargtypes => 'timestamp timestamptz', + prosrc => 'timestamp_eq_timestamptz' }, +{ oid => '2523', + proname => 'timestamp_gt_timestamptz', provolatile => 's', + prorettype => 'bool', proargtypes => 'timestamp timestamptz', + prosrc => 'timestamp_gt_timestamptz' }, +{ oid => '2524', + proname => 'timestamp_ge_timestamptz', provolatile => 's', + prorettype => 'bool', proargtypes => 'timestamp timestamptz', + prosrc => 'timestamp_ge_timestamptz' }, +{ oid => '2525', + proname => 'timestamp_ne_timestamptz', provolatile => 's', + prorettype => 'bool', proargtypes => 'timestamp timestamptz', + prosrc => 'timestamp_ne_timestamptz' }, +{ oid => '2526', descr => 'less-equal-greater', + proname => 'timestamp_cmp_timestamptz', provolatile => 's', + prorettype => 'int4', proargtypes => 'timestamp timestamptz', + prosrc => 'timestamp_cmp_timestamptz' }, + +{ oid => '2527', + proname => 'timestamptz_lt_timestamp', provolatile => 's', + prorettype => 'bool', proargtypes => 'timestamptz timestamp', + prosrc => 'timestamptz_lt_timestamp' }, +{ oid => '2528', + proname => 'timestamptz_le_timestamp', provolatile => 's', + prorettype => 'bool', proargtypes => 'timestamptz timestamp', + prosrc => 'timestamptz_le_timestamp' }, +{ oid => '2529', + proname => 'timestamptz_eq_timestamp', provolatile => 's', + prorettype => 'bool', proargtypes => 'timestamptz timestamp', + prosrc => 'timestamptz_eq_timestamp' }, +{ oid => '2530', + proname => 'timestamptz_gt_timestamp', provolatile => 's', + prorettype => 'bool', proargtypes => 'timestamptz timestamp', + prosrc => 'timestamptz_gt_timestamp' }, +{ oid => '2531', + proname => 'timestamptz_ge_timestamp', provolatile => 's', + prorettype => 'bool', proargtypes => 'timestamptz timestamp', + prosrc => 'timestamptz_ge_timestamp' }, +{ oid => '2532', + proname => 'timestamptz_ne_timestamp', provolatile => 's', + prorettype => 'bool', proargtypes => 'timestamptz timestamp', + prosrc => 'timestamptz_ne_timestamp' }, +{ oid => '2533', descr => 'less-equal-greater', + proname => 'timestamptz_cmp_timestamp', provolatile => 's', + prorettype => 'int4', proargtypes => 'timestamptz timestamp', + prosrc => 'timestamptz_cmp_timestamp' }, + +# send/receive functions +{ oid => '2400', descr => 'I/O', + proname => 'array_recv', provolatile => 's', prorettype => 'anyarray', + proargtypes => 'internal oid int4', prosrc => 'array_recv' }, +{ oid => '2401', descr => 'I/O', + proname => 'array_send', provolatile => 's', prorettype => 'bytea', + proargtypes => 'anyarray', prosrc => 'array_send' }, +{ oid => '2402', descr => 'I/O', + proname => 'record_recv', provolatile => 's', prorettype => 'record', + proargtypes => 'internal oid int4', prosrc => 'record_recv' }, +{ oid => '2403', descr => 'I/O', + proname => 'record_send', provolatile => 's', prorettype => 'bytea', + proargtypes => 'record', prosrc => 'record_send' }, +{ oid => '2404', descr => 'I/O', + proname => 'int2recv', prorettype => 'int2', proargtypes => 'internal', + prosrc => 'int2recv' }, +{ oid => '2405', descr => 'I/O', + proname => 'int2send', prorettype => 'bytea', proargtypes => 'int2', + prosrc => 'int2send' }, +{ oid => '2406', descr => 'I/O', + proname => 'int4recv', prorettype => 'int4', proargtypes => 'internal', + prosrc => 'int4recv' }, +{ oid => '2407', descr => 'I/O', + proname => 'int4send', prorettype => 'bytea', proargtypes => 'int4', + prosrc => 'int4send' }, +{ oid => '2408', descr => 'I/O', + proname => 'int8recv', prorettype => 'int8', proargtypes => 'internal', + prosrc => 'int8recv' }, +{ oid => '2409', descr => 'I/O', + proname => 'int8send', prorettype => 'bytea', proargtypes => 'int8', + prosrc => 'int8send' }, +{ oid => '2410', descr => 'I/O', + proname => 'int2vectorrecv', prorettype => 'int2vector', + proargtypes => 'internal', prosrc => 'int2vectorrecv' }, +{ oid => '2411', descr => 'I/O', + proname => 'int2vectorsend', prorettype => 'bytea', + proargtypes => 'int2vector', prosrc => 'int2vectorsend' }, +{ oid => '2412', descr => 'I/O', + proname => 'bytearecv', prorettype => 'bytea', proargtypes => 'internal', + prosrc => 'bytearecv' }, +{ oid => '2413', descr => 'I/O', + proname => 'byteasend', prorettype => 'bytea', proargtypes => 'bytea', + prosrc => 'byteasend' }, +{ oid => '2414', descr => 'I/O', + proname => 'textrecv', provolatile => 's', prorettype => 'text', + proargtypes => 'internal', prosrc => 'textrecv' }, +{ oid => '2415', descr => 'I/O', + proname => 'textsend', provolatile => 's', prorettype => 'bytea', + proargtypes => 'text', prosrc => 'textsend' }, +{ oid => '2416', descr => 'I/O', + proname => 'unknownrecv', prorettype => 'unknown', proargtypes => 'internal', + prosrc => 'unknownrecv' }, +{ oid => '2417', descr => 'I/O', + proname => 'unknownsend', prorettype => 'bytea', proargtypes => 'unknown', + prosrc => 'unknownsend' }, +{ oid => '2418', descr => 'I/O', + proname => 'oidrecv', prorettype => 'oid', proargtypes => 'internal', + prosrc => 'oidrecv' }, +{ oid => '2419', descr => 'I/O', + proname => 'oidsend', prorettype => 'bytea', proargtypes => 'oid', + prosrc => 'oidsend' }, +{ oid => '2420', descr => 'I/O', + proname => 'oidvectorrecv', prorettype => 'oidvector', + proargtypes => 'internal', prosrc => 'oidvectorrecv' }, +{ oid => '2421', descr => 'I/O', + proname => 'oidvectorsend', prorettype => 'bytea', proargtypes => 'oidvector', + prosrc => 'oidvectorsend' }, +{ oid => '2422', descr => 'I/O', + proname => 'namerecv', provolatile => 's', prorettype => 'name', + proargtypes => 'internal', prosrc => 'namerecv' }, +{ oid => '2423', descr => 'I/O', + proname => 'namesend', provolatile => 's', prorettype => 'bytea', + proargtypes => 'name', prosrc => 'namesend' }, +{ oid => '2424', descr => 'I/O', + proname => 'float4recv', prorettype => 'float4', proargtypes => 'internal', + prosrc => 'float4recv' }, +{ oid => '2425', descr => 'I/O', + proname => 'float4send', prorettype => 'bytea', proargtypes => 'float4', + prosrc => 'float4send' }, +{ oid => '2426', descr => 'I/O', + proname => 'float8recv', prorettype => 'float8', proargtypes => 'internal', + prosrc => 'float8recv' }, +{ oid => '2427', descr => 'I/O', + proname => 'float8send', prorettype => 'bytea', proargtypes => 'float8', + prosrc => 'float8send' }, +{ oid => '2428', descr => 'I/O', + proname => 'point_recv', prorettype => 'point', proargtypes => 'internal', + prosrc => 'point_recv' }, +{ oid => '2429', descr => 'I/O', + proname => 'point_send', prorettype => 'bytea', proargtypes => 'point', + prosrc => 'point_send' }, +{ oid => '2430', descr => 'I/O', + proname => 'bpcharrecv', provolatile => 's', prorettype => 'bpchar', + proargtypes => 'internal oid int4', prosrc => 'bpcharrecv' }, +{ oid => '2431', descr => 'I/O', + proname => 'bpcharsend', provolatile => 's', prorettype => 'bytea', + proargtypes => 'bpchar', prosrc => 'bpcharsend' }, +{ oid => '2432', descr => 'I/O', + proname => 'varcharrecv', provolatile => 's', prorettype => 'varchar', + proargtypes => 'internal oid int4', prosrc => 'varcharrecv' }, +{ oid => '2433', descr => 'I/O', + proname => 'varcharsend', provolatile => 's', prorettype => 'bytea', + proargtypes => 'varchar', prosrc => 'varcharsend' }, +{ oid => '2434', descr => 'I/O', + proname => 'charrecv', prorettype => 'char', proargtypes => 'internal', + prosrc => 'charrecv' }, +{ oid => '2435', descr => 'I/O', + proname => 'charsend', prorettype => 'bytea', proargtypes => 'char', + prosrc => 'charsend' }, +{ oid => '2436', descr => 'I/O', + proname => 'boolrecv', prorettype => 'bool', proargtypes => 'internal', + prosrc => 'boolrecv' }, +{ oid => '2437', descr => 'I/O', + proname => 'boolsend', prorettype => 'bytea', proargtypes => 'bool', + prosrc => 'boolsend' }, +{ oid => '2438', descr => 'I/O', + proname => 'tidrecv', prorettype => 'tid', proargtypes => 'internal', + prosrc => 'tidrecv' }, +{ oid => '2439', descr => 'I/O', + proname => 'tidsend', prorettype => 'bytea', proargtypes => 'tid', + prosrc => 'tidsend' }, +{ oid => '2440', descr => 'I/O', + proname => 'xidrecv', prorettype => 'xid', proargtypes => 'internal', + prosrc => 'xidrecv' }, +{ oid => '2441', descr => 'I/O', + proname => 'xidsend', prorettype => 'bytea', proargtypes => 'xid', + prosrc => 'xidsend' }, +{ oid => '2442', descr => 'I/O', + proname => 'cidrecv', prorettype => 'cid', proargtypes => 'internal', + prosrc => 'cidrecv' }, +{ oid => '2443', descr => 'I/O', + proname => 'cidsend', prorettype => 'bytea', proargtypes => 'cid', + prosrc => 'cidsend' }, +{ oid => '2444', descr => 'I/O', + proname => 'regprocrecv', prorettype => 'regproc', proargtypes => 'internal', + prosrc => 'regprocrecv' }, +{ oid => '2445', descr => 'I/O', + proname => 'regprocsend', prorettype => 'bytea', proargtypes => 'regproc', + prosrc => 'regprocsend' }, +{ oid => '2446', descr => 'I/O', + proname => 'regprocedurerecv', prorettype => 'regprocedure', + proargtypes => 'internal', prosrc => 'regprocedurerecv' }, +{ oid => '2447', descr => 'I/O', + proname => 'regproceduresend', prorettype => 'bytea', + proargtypes => 'regprocedure', prosrc => 'regproceduresend' }, +{ oid => '2448', descr => 'I/O', + proname => 'regoperrecv', prorettype => 'regoper', proargtypes => 'internal', + prosrc => 'regoperrecv' }, +{ oid => '2449', descr => 'I/O', + proname => 'regopersend', prorettype => 'bytea', proargtypes => 'regoper', + prosrc => 'regopersend' }, +{ oid => '2450', descr => 'I/O', + proname => 'regoperatorrecv', prorettype => 'regoperator', + proargtypes => 'internal', prosrc => 'regoperatorrecv' }, +{ oid => '2451', descr => 'I/O', + proname => 'regoperatorsend', prorettype => 'bytea', + proargtypes => 'regoperator', prosrc => 'regoperatorsend' }, +{ oid => '2452', descr => 'I/O', + proname => 'regclassrecv', prorettype => 'regclass', + proargtypes => 'internal', prosrc => 'regclassrecv' }, +{ oid => '2453', descr => 'I/O', + proname => 'regclasssend', prorettype => 'bytea', proargtypes => 'regclass', + prosrc => 'regclasssend' }, +{ oid => '2454', descr => 'I/O', + proname => 'regtyperecv', prorettype => 'regtype', proargtypes => 'internal', + prosrc => 'regtyperecv' }, +{ oid => '2455', descr => 'I/O', + proname => 'regtypesend', prorettype => 'bytea', proargtypes => 'regtype', + prosrc => 'regtypesend' }, + +{ oid => '4094', descr => 'I/O', + proname => 'regrolerecv', prorettype => 'regrole', proargtypes => 'internal', + prosrc => 'regrolerecv' }, +{ oid => '4095', descr => 'I/O', + proname => 'regrolesend', prorettype => 'bytea', proargtypes => 'regrole', + prosrc => 'regrolesend' }, +{ oid => '4087', descr => 'I/O', + proname => 'regnamespacerecv', prorettype => 'regnamespace', + proargtypes => 'internal', prosrc => 'regnamespacerecv' }, +{ oid => '4088', descr => 'I/O', + proname => 'regnamespacesend', prorettype => 'bytea', + proargtypes => 'regnamespace', prosrc => 'regnamespacesend' }, +{ oid => '2456', descr => 'I/O', + proname => 'bit_recv', prorettype => 'bit', + proargtypes => 'internal oid int4', prosrc => 'bit_recv' }, +{ oid => '2457', descr => 'I/O', + proname => 'bit_send', prorettype => 'bytea', proargtypes => 'bit', + prosrc => 'bit_send' }, +{ oid => '2458', descr => 'I/O', + proname => 'varbit_recv', prorettype => 'varbit', + proargtypes => 'internal oid int4', prosrc => 'varbit_recv' }, +{ oid => '2459', descr => 'I/O', + proname => 'varbit_send', prorettype => 'bytea', proargtypes => 'varbit', + prosrc => 'varbit_send' }, +{ oid => '2460', descr => 'I/O', + proname => 'numeric_recv', prorettype => 'numeric', + proargtypes => 'internal oid int4', prosrc => 'numeric_recv' }, +{ oid => '2461', descr => 'I/O', + proname => 'numeric_send', prorettype => 'bytea', proargtypes => 'numeric', + prosrc => 'numeric_send' }, +{ oid => '2468', descr => 'I/O', + proname => 'date_recv', prorettype => 'date', proargtypes => 'internal', + prosrc => 'date_recv' }, +{ oid => '2469', descr => 'I/O', + proname => 'date_send', prorettype => 'bytea', proargtypes => 'date', + prosrc => 'date_send' }, +{ oid => '2470', descr => 'I/O', + proname => 'time_recv', prorettype => 'time', + proargtypes => 'internal oid int4', prosrc => 'time_recv' }, +{ oid => '2471', descr => 'I/O', + proname => 'time_send', prorettype => 'bytea', proargtypes => 'time', + prosrc => 'time_send' }, +{ oid => '2472', descr => 'I/O', + proname => 'timetz_recv', prorettype => 'timetz', + proargtypes => 'internal oid int4', prosrc => 'timetz_recv' }, +{ oid => '2473', descr => 'I/O', + proname => 'timetz_send', prorettype => 'bytea', proargtypes => 'timetz', + prosrc => 'timetz_send' }, +{ oid => '2474', descr => 'I/O', + proname => 'timestamp_recv', prorettype => 'timestamp', + proargtypes => 'internal oid int4', prosrc => 'timestamp_recv' }, +{ oid => '2475', descr => 'I/O', + proname => 'timestamp_send', prorettype => 'bytea', + proargtypes => 'timestamp', prosrc => 'timestamp_send' }, +{ oid => '2476', descr => 'I/O', + proname => 'timestamptz_recv', prorettype => 'timestamptz', + proargtypes => 'internal oid int4', prosrc => 'timestamptz_recv' }, +{ oid => '2477', descr => 'I/O', + proname => 'timestamptz_send', prorettype => 'bytea', + proargtypes => 'timestamptz', prosrc => 'timestamptz_send' }, +{ oid => '2478', descr => 'I/O', + proname => 'interval_recv', prorettype => 'interval', + proargtypes => 'internal oid int4', prosrc => 'interval_recv' }, +{ oid => '2479', descr => 'I/O', + proname => 'interval_send', prorettype => 'bytea', proargtypes => 'interval', + prosrc => 'interval_send' }, +{ oid => '2480', descr => 'I/O', + proname => 'lseg_recv', prorettype => 'lseg', proargtypes => 'internal', + prosrc => 'lseg_recv' }, +{ oid => '2481', descr => 'I/O', + proname => 'lseg_send', prorettype => 'bytea', proargtypes => 'lseg', + prosrc => 'lseg_send' }, +{ oid => '2482', descr => 'I/O', + proname => 'path_recv', prorettype => 'path', proargtypes => 'internal', + prosrc => 'path_recv' }, +{ oid => '2483', descr => 'I/O', + proname => 'path_send', prorettype => 'bytea', proargtypes => 'path', + prosrc => 'path_send' }, +{ oid => '2484', descr => 'I/O', + proname => 'box_recv', prorettype => 'box', proargtypes => 'internal', + prosrc => 'box_recv' }, +{ oid => '2485', descr => 'I/O', + proname => 'box_send', prorettype => 'bytea', proargtypes => 'box', + prosrc => 'box_send' }, +{ oid => '2486', descr => 'I/O', + proname => 'poly_recv', prorettype => 'polygon', proargtypes => 'internal', + prosrc => 'poly_recv' }, +{ oid => '2487', descr => 'I/O', + proname => 'poly_send', prorettype => 'bytea', proargtypes => 'polygon', + prosrc => 'poly_send' }, +{ oid => '2488', descr => 'I/O', + proname => 'line_recv', prorettype => 'line', proargtypes => 'internal', + prosrc => 'line_recv' }, +{ oid => '2489', descr => 'I/O', + proname => 'line_send', prorettype => 'bytea', proargtypes => 'line', + prosrc => 'line_send' }, +{ oid => '2490', descr => 'I/O', + proname => 'circle_recv', prorettype => 'circle', proargtypes => 'internal', + prosrc => 'circle_recv' }, +{ oid => '2491', descr => 'I/O', + proname => 'circle_send', prorettype => 'bytea', proargtypes => 'circle', + prosrc => 'circle_send' }, +{ oid => '2492', descr => 'I/O', + proname => 'cash_recv', prorettype => 'money', proargtypes => 'internal', + prosrc => 'cash_recv' }, +{ oid => '2493', descr => 'I/O', + proname => 'cash_send', prorettype => 'bytea', proargtypes => 'money', + prosrc => 'cash_send' }, +{ oid => '2494', descr => 'I/O', + proname => 'macaddr_recv', prorettype => 'macaddr', proargtypes => 'internal', + prosrc => 'macaddr_recv' }, +{ oid => '2495', descr => 'I/O', + proname => 'macaddr_send', prorettype => 'bytea', proargtypes => 'macaddr', + prosrc => 'macaddr_send' }, +{ oid => '2496', descr => 'I/O', + proname => 'inet_recv', prorettype => 'inet', proargtypes => 'internal', + prosrc => 'inet_recv' }, +{ oid => '2497', descr => 'I/O', + proname => 'inet_send', prorettype => 'bytea', proargtypes => 'inet', + prosrc => 'inet_send' }, +{ oid => '2498', descr => 'I/O', + proname => 'cidr_recv', prorettype => 'cidr', proargtypes => 'internal', + prosrc => 'cidr_recv' }, +{ oid => '2499', descr => 'I/O', + proname => 'cidr_send', prorettype => 'bytea', proargtypes => 'cidr', + prosrc => 'cidr_send' }, +{ oid => '2500', descr => 'I/O', + proname => 'cstring_recv', provolatile => 's', prorettype => 'cstring', + proargtypes => 'internal', prosrc => 'cstring_recv' }, +{ oid => '2501', descr => 'I/O', + proname => 'cstring_send', provolatile => 's', prorettype => 'bytea', + proargtypes => 'cstring', prosrc => 'cstring_send' }, +{ oid => '2502', descr => 'I/O', + proname => 'anyarray_recv', provolatile => 's', prorettype => 'anyarray', + proargtypes => 'internal', prosrc => 'anyarray_recv' }, +{ oid => '2503', descr => 'I/O', + proname => 'anyarray_send', provolatile => 's', prorettype => 'bytea', + proargtypes => 'anyarray', prosrc => 'anyarray_send' }, +{ oid => '3120', descr => 'I/O', + proname => 'void_recv', prorettype => 'void', proargtypes => 'internal', + prosrc => 'void_recv' }, +{ oid => '3121', descr => 'I/O', + proname => 'void_send', prorettype => 'bytea', proargtypes => 'void', + prosrc => 'void_send' }, +{ oid => '3446', descr => 'I/O', + proname => 'macaddr8_recv', prorettype => 'macaddr8', + proargtypes => 'internal', prosrc => 'macaddr8_recv' }, +{ oid => '3447', descr => 'I/O', + proname => 'macaddr8_send', prorettype => 'bytea', proargtypes => 'macaddr8', + prosrc => 'macaddr8_send' }, + +# System-view support functions with pretty-print option +{ oid => '2504', descr => 'source text of a rule with pretty-print option', + proname => 'pg_get_ruledef', provolatile => 's', prorettype => 'text', + proargtypes => 'oid bool', prosrc => 'pg_get_ruledef_ext' }, +{ oid => '2505', + descr => 'select statement of a view with pretty-print option', + proname => 'pg_get_viewdef', provolatile => 's', proparallel => 'r', + prorettype => 'text', proargtypes => 'text bool', + prosrc => 'pg_get_viewdef_name_ext' }, +{ oid => '2506', + descr => 'select statement of a view with pretty-print option', + proname => 'pg_get_viewdef', provolatile => 's', proparallel => 'r', + prorettype => 'text', proargtypes => 'oid bool', + prosrc => 'pg_get_viewdef_ext' }, +{ oid => '3159', + descr => 'select statement of a view with pretty-printing and specified line wrapping', + proname => 'pg_get_viewdef', provolatile => 's', proparallel => 'r', + prorettype => 'text', proargtypes => 'oid int4', + prosrc => 'pg_get_viewdef_wrap' }, +{ oid => '2507', + descr => 'index description (full create statement or single expression) with pretty-print option', + proname => 'pg_get_indexdef', provolatile => 's', prorettype => 'text', + proargtypes => 'oid int4 bool', prosrc => 'pg_get_indexdef_ext' }, +{ oid => '2508', descr => 'constraint description with pretty-print option', + proname => 'pg_get_constraintdef', provolatile => 's', prorettype => 'text', + proargtypes => 'oid bool', prosrc => 'pg_get_constraintdef_ext' }, +{ oid => '2509', + descr => 'deparse an encoded expression with pretty-print option', + proname => 'pg_get_expr', provolatile => 's', prorettype => 'text', + proargtypes => 'pg_node_tree oid bool', prosrc => 'pg_get_expr_ext' }, +{ oid => '2510', descr => 'get the prepared statements for this session', + proname => 'pg_prepared_statement', prorows => '1000', proretset => 't', + provolatile => 's', proparallel => 'r', prorettype => 'record', + proargtypes => '', proallargtypes => '{text,text,timestamptz,_regtype,bool}', + proargmodes => '{o,o,o,o,o}', + proargnames => '{name,statement,prepare_time,parameter_types,from_sql}', + prosrc => 'pg_prepared_statement' }, +{ oid => '2511', descr => 'get the open cursors for this session', + proname => 'pg_cursor', prorows => '1000', proretset => 't', + provolatile => 's', proparallel => 'r', prorettype => 'record', + proargtypes => '', proallargtypes => '{text,text,bool,bool,bool,timestamptz}', + proargmodes => '{o,o,o,o,o,o}', + proargnames => '{name,statement,is_holdable,is_binary,is_scrollable,creation_time}', + prosrc => 'pg_cursor' }, +{ oid => '2599', descr => 'get the available time zone abbreviations', + proname => 'pg_timezone_abbrevs', prorows => '1000', proretset => 't', + provolatile => 's', prorettype => 'record', proargtypes => '', + proallargtypes => '{text,interval,bool}', proargmodes => '{o,o,o}', + proargnames => '{abbrev,utc_offset,is_dst}', + prosrc => 'pg_timezone_abbrevs' }, +{ oid => '2856', descr => 'get the available time zone names', + proname => 'pg_timezone_names', prorows => '1000', proretset => 't', + provolatile => 's', prorettype => 'record', proargtypes => '', + proallargtypes => '{text,text,interval,bool}', proargmodes => '{o,o,o,o}', + proargnames => '{name,abbrev,utc_offset,is_dst}', + prosrc => 'pg_timezone_names' }, +{ oid => '2730', descr => 'trigger description with pretty-print option', + proname => 'pg_get_triggerdef', provolatile => 's', prorettype => 'text', + proargtypes => 'oid bool', prosrc => 'pg_get_triggerdef_ext' }, + +# asynchronous notifications +{ oid => '3035', + descr => 'get the channels that the current backend listens to', + proname => 'pg_listening_channels', prorows => '10', proretset => 't', + provolatile => 's', proparallel => 'r', prorettype => 'text', + proargtypes => '', prosrc => 'pg_listening_channels' }, +{ oid => '3036', descr => 'send a notification event', + proname => 'pg_notify', proisstrict => 'f', provolatile => 'v', + proparallel => 'r', prorettype => 'void', proargtypes => 'text text', + prosrc => 'pg_notify' }, +{ oid => '3296', + descr => 'get the fraction of the asynchronous notification queue currently in use', + proname => 'pg_notification_queue_usage', provolatile => 'v', + prorettype => 'float8', proargtypes => '', + prosrc => 'pg_notification_queue_usage' }, + +# non-persistent series generator +{ oid => '1066', descr => 'non-persistent series generator', + proname => 'generate_series', prorows => '1000', proretset => 't', + prorettype => 'int4', proargtypes => 'int4 int4 int4', + prosrc => 'generate_series_step_int4' }, +{ oid => '1067', descr => 'non-persistent series generator', + proname => 'generate_series', prorows => '1000', proretset => 't', + prorettype => 'int4', proargtypes => 'int4 int4', + prosrc => 'generate_series_int4' }, +{ oid => '1068', descr => 'non-persistent series generator', + proname => 'generate_series', prorows => '1000', proretset => 't', + prorettype => 'int8', proargtypes => 'int8 int8 int8', + prosrc => 'generate_series_step_int8' }, +{ oid => '1069', descr => 'non-persistent series generator', + proname => 'generate_series', prorows => '1000', proretset => 't', + prorettype => 'int8', proargtypes => 'int8 int8', + prosrc => 'generate_series_int8' }, +{ oid => '3259', descr => 'non-persistent series generator', + proname => 'generate_series', prorows => '1000', proretset => 't', + prorettype => 'numeric', proargtypes => 'numeric numeric numeric', + prosrc => 'generate_series_step_numeric' }, +{ oid => '3260', descr => 'non-persistent series generator', + proname => 'generate_series', prorows => '1000', proretset => 't', + prorettype => 'numeric', proargtypes => 'numeric numeric', + prosrc => 'generate_series_numeric' }, +{ oid => '938', descr => 'non-persistent series generator', + proname => 'generate_series', prorows => '1000', proretset => 't', + prorettype => 'timestamp', proargtypes => 'timestamp timestamp interval', + prosrc => 'generate_series_timestamp' }, +{ oid => '939', descr => 'non-persistent series generator', + proname => 'generate_series', prorows => '1000', proretset => 't', + provolatile => 's', prorettype => 'timestamptz', + proargtypes => 'timestamptz timestamptz interval', + prosrc => 'generate_series_timestamptz' }, + +# boolean aggregates +{ oid => '2515', descr => 'aggregate transition function', + proname => 'booland_statefunc', prorettype => 'bool', + proargtypes => 'bool bool', prosrc => 'booland_statefunc' }, +{ oid => '2516', descr => 'aggregate transition function', + proname => 'boolor_statefunc', prorettype => 'bool', + proargtypes => 'bool bool', prosrc => 'boolor_statefunc' }, +{ oid => '3496', descr => 'aggregate transition function', + proname => 'bool_accum', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal bool', prosrc => 'bool_accum' }, +{ oid => '3497', descr => 'aggregate transition function', + proname => 'bool_accum_inv', proisstrict => 'f', prorettype => 'internal', + proargtypes => 'internal bool', prosrc => 'bool_accum_inv' }, +{ oid => '3498', descr => 'aggregate final function', + proname => 'bool_alltrue', prorettype => 'bool', proargtypes => 'internal', + prosrc => 'bool_alltrue' }, +{ oid => '3499', descr => 'aggregate final function', + proname => 'bool_anytrue', prorettype => 'bool', proargtypes => 'internal', + prosrc => 'bool_anytrue' }, +{ oid => '2517', descr => 'boolean-and aggregate', + proname => 'bool_and', prokind => 'a', proisstrict => 'f', + prorettype => 'bool', proargtypes => 'bool', prosrc => 'aggregate_dummy' }, + +# ANY, SOME? These names conflict with subquery operators. See doc. +{ oid => '2518', descr => 'boolean-or aggregate', + proname => 'bool_or', prokind => 'a', proisstrict => 'f', + prorettype => 'bool', proargtypes => 'bool', prosrc => 'aggregate_dummy' }, +{ oid => '2519', descr => 'boolean-and aggregate', + proname => 'every', prokind => 'a', proisstrict => 'f', prorettype => 'bool', + proargtypes => 'bool', prosrc => 'aggregate_dummy' }, + +# bitwise integer aggregates +{ oid => '2236', descr => 'bitwise-and smallint aggregate', + proname => 'bit_and', prokind => 'a', proisstrict => 'f', + prorettype => 'int2', proargtypes => 'int2', prosrc => 'aggregate_dummy' }, +{ oid => '2237', descr => 'bitwise-or smallint aggregate', + proname => 'bit_or', prokind => 'a', proisstrict => 'f', prorettype => 'int2', + proargtypes => 'int2', prosrc => 'aggregate_dummy' }, +{ oid => '2238', descr => 'bitwise-and integer aggregate', + proname => 'bit_and', prokind => 'a', proisstrict => 'f', + prorettype => 'int4', proargtypes => 'int4', prosrc => 'aggregate_dummy' }, +{ oid => '2239', descr => 'bitwise-or integer aggregate', + proname => 'bit_or', prokind => 'a', proisstrict => 'f', prorettype => 'int4', + proargtypes => 'int4', prosrc => 'aggregate_dummy' }, +{ oid => '2240', descr => 'bitwise-and bigint aggregate', + proname => 'bit_and', prokind => 'a', proisstrict => 'f', + prorettype => 'int8', proargtypes => 'int8', prosrc => 'aggregate_dummy' }, +{ oid => '2241', descr => 'bitwise-or bigint aggregate', + proname => 'bit_or', prokind => 'a', proisstrict => 'f', prorettype => 'int8', + proargtypes => 'int8', prosrc => 'aggregate_dummy' }, +{ oid => '2242', descr => 'bitwise-and bit aggregate', + proname => 'bit_and', prokind => 'a', proisstrict => 'f', prorettype => 'bit', + proargtypes => 'bit', prosrc => 'aggregate_dummy' }, +{ oid => '2243', descr => 'bitwise-or bit aggregate', + proname => 'bit_or', prokind => 'a', proisstrict => 'f', prorettype => 'bit', + proargtypes => 'bit', prosrc => 'aggregate_dummy' }, + +# formerly-missing interval + datetime operators +{ oid => '2546', + proname => 'interval_pl_date', prolang => '14', prorettype => 'timestamp', + proargtypes => 'interval date', prosrc => 'select $2 + $1' }, +{ oid => '2547', + proname => 'interval_pl_timetz', prolang => '14', prorettype => 'timetz', + proargtypes => 'interval timetz', prosrc => 'select $2 + $1' }, +{ oid => '2548', + proname => 'interval_pl_timestamp', prolang => '14', + prorettype => 'timestamp', proargtypes => 'interval timestamp', + prosrc => 'select $2 + $1' }, +{ oid => '2549', + proname => 'interval_pl_timestamptz', prolang => '14', provolatile => 's', + prorettype => 'timestamptz', proargtypes => 'interval timestamptz', + prosrc => 'select $2 + $1' }, +{ oid => '2550', + proname => 'integer_pl_date', prolang => '14', prorettype => 'date', + proargtypes => 'int4 date', prosrc => 'select $2 + $1' }, + +{ oid => '2556', descr => 'get OIDs of databases in a tablespace', + proname => 'pg_tablespace_databases', prorows => '1000', proretset => 't', + provolatile => 's', prorettype => 'oid', proargtypes => 'oid', + prosrc => 'pg_tablespace_databases' }, + +{ oid => '2557', descr => 'convert int4 to boolean', + proname => 'bool', prorettype => 'bool', proargtypes => 'int4', + prosrc => 'int4_bool' }, +{ oid => '2558', descr => 'convert boolean to int4', + proname => 'int4', prorettype => 'int4', proargtypes => 'bool', + prosrc => 'bool_int4' }, +{ oid => '2559', descr => 'current value from last used sequence', + proname => 'lastval', provolatile => 'v', proparallel => 'u', + prorettype => 'int8', proargtypes => '', prosrc => 'lastval' }, + +# start time function +{ oid => '2560', descr => 'postmaster start time', + proname => 'pg_postmaster_start_time', provolatile => 's', + prorettype => 'timestamptz', proargtypes => '', + prosrc => 'pg_postmaster_start_time' }, + +# config reload time function +{ oid => '2034', descr => 'configuration load time', + proname => 'pg_conf_load_time', provolatile => 's', proparallel => 'r', + prorettype => 'timestamptz', proargtypes => '', + prosrc => 'pg_conf_load_time' }, + +# new functions for Y-direction rtree opclasses +{ oid => '2562', + proname => 'box_below', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_below' }, +{ oid => '2563', + proname => 'box_overbelow', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_overbelow' }, +{ oid => '2564', + proname => 'box_overabove', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_overabove' }, +{ oid => '2565', + proname => 'box_above', prorettype => 'bool', proargtypes => 'box box', + prosrc => 'box_above' }, +{ oid => '2566', + proname => 'poly_below', prorettype => 'bool', + proargtypes => 'polygon polygon', prosrc => 'poly_below' }, +{ oid => '2567', + proname => 'poly_overbelow', prorettype => 'bool', + proargtypes => 'polygon polygon', prosrc => 'poly_overbelow' }, +{ oid => '2568', + proname => 'poly_overabove', prorettype => 'bool', + proargtypes => 'polygon polygon', prosrc => 'poly_overabove' }, +{ oid => '2569', + proname => 'poly_above', prorettype => 'bool', + proargtypes => 'polygon polygon', prosrc => 'poly_above' }, +{ oid => '2587', + proname => 'circle_overbelow', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_overbelow' }, +{ oid => '2588', + proname => 'circle_overabove', prorettype => 'bool', + proargtypes => 'circle circle', prosrc => 'circle_overabove' }, + +# support functions for GiST r-tree emulation +{ oid => '2578', descr => 'GiST support', + proname => 'gist_box_consistent', prorettype => 'bool', + proargtypes => 'internal box int2 oid internal', + prosrc => 'gist_box_consistent' }, +{ oid => '2581', descr => 'GiST support', + proname => 'gist_box_penalty', prorettype => 'internal', + proargtypes => 'internal internal internal', prosrc => 'gist_box_penalty' }, +{ oid => '2582', descr => 'GiST support', + proname => 'gist_box_picksplit', prorettype => 'internal', + proargtypes => 'internal internal', prosrc => 'gist_box_picksplit' }, +{ oid => '2583', descr => 'GiST support', + proname => 'gist_box_union', prorettype => 'box', + proargtypes => 'internal internal', prosrc => 'gist_box_union' }, +{ oid => '2584', descr => 'GiST support', + proname => 'gist_box_same', prorettype => 'internal', + proargtypes => 'box box internal', prosrc => 'gist_box_same' }, +{ oid => '2585', descr => 'GiST support', + proname => 'gist_poly_consistent', prorettype => 'bool', + proargtypes => 'internal polygon int2 oid internal', + prosrc => 'gist_poly_consistent' }, +{ oid => '2586', descr => 'GiST support', + proname => 'gist_poly_compress', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'gist_poly_compress' }, +{ oid => '2591', descr => 'GiST support', + proname => 'gist_circle_consistent', prorettype => 'bool', + proargtypes => 'internal circle int2 oid internal', + prosrc => 'gist_circle_consistent' }, +{ oid => '2592', descr => 'GiST support', + proname => 'gist_circle_compress', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'gist_circle_compress' }, +{ oid => '1030', descr => 'GiST support', + proname => 'gist_point_compress', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'gist_point_compress' }, +{ oid => '3282', descr => 'GiST support', + proname => 'gist_point_fetch', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'gist_point_fetch' }, +{ oid => '2179', descr => 'GiST support', + proname => 'gist_point_consistent', prorettype => 'bool', + proargtypes => 'internal point int2 oid internal', + prosrc => 'gist_point_consistent' }, +{ oid => '3064', descr => 'GiST support', + proname => 'gist_point_distance', prorettype => 'float8', + proargtypes => 'internal point int2 oid internal', + prosrc => 'gist_point_distance' }, +{ oid => '3280', descr => 'GiST support', + proname => 'gist_circle_distance', prorettype => 'float8', + proargtypes => 'internal circle int2 oid internal', + prosrc => 'gist_circle_distance' }, +{ oid => '3288', descr => 'GiST support', + proname => 'gist_poly_distance', prorettype => 'float8', + proargtypes => 'internal polygon int2 oid internal', + prosrc => 'gist_poly_distance' }, + +# GIN array support +{ oid => '2743', descr => 'GIN array support', + proname => 'ginarrayextract', prorettype => 'internal', + proargtypes => 'anyarray internal internal', prosrc => 'ginarrayextract' }, +{ oid => '2774', descr => 'GIN array support', + proname => 'ginqueryarrayextract', prorettype => 'internal', + proargtypes => 'anyarray internal int2 internal internal internal internal', + prosrc => 'ginqueryarrayextract' }, +{ oid => '2744', descr => 'GIN array support', + proname => 'ginarrayconsistent', prorettype => 'bool', + proargtypes => 'internal int2 anyarray int4 internal internal internal internal', + prosrc => 'ginarrayconsistent' }, +{ oid => '3920', descr => 'GIN array support', + proname => 'ginarraytriconsistent', prorettype => 'char', + proargtypes => 'internal int2 anyarray int4 internal internal internal', + prosrc => 'ginarraytriconsistent' }, +{ oid => '3076', descr => 'GIN array support (obsolete)', + proname => 'ginarrayextract', prorettype => 'internal', + proargtypes => 'anyarray internal', prosrc => 'ginarrayextract_2args' }, + +# overlap/contains/contained +{ oid => '2747', + proname => 'arrayoverlap', prorettype => 'bool', + proargtypes => 'anyarray anyarray', prosrc => 'arrayoverlap' }, +{ oid => '2748', + proname => 'arraycontains', prorettype => 'bool', + proargtypes => 'anyarray anyarray', prosrc => 'arraycontains' }, +{ oid => '2749', + proname => 'arraycontained', prorettype => 'bool', + proargtypes => 'anyarray anyarray', prosrc => 'arraycontained' }, + +# BRIN minmax +{ oid => '3383', descr => 'BRIN minmax support', + proname => 'brin_minmax_opcinfo', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'brin_minmax_opcinfo' }, +{ oid => '3384', descr => 'BRIN minmax support', + proname => 'brin_minmax_add_value', prorettype => 'bool', + proargtypes => 'internal internal internal internal', + prosrc => 'brin_minmax_add_value' }, +{ oid => '3385', descr => 'BRIN minmax support', + proname => 'brin_minmax_consistent', prorettype => 'bool', + proargtypes => 'internal internal internal', + prosrc => 'brin_minmax_consistent' }, +{ oid => '3386', descr => 'BRIN minmax support', + proname => 'brin_minmax_union', prorettype => 'bool', + proargtypes => 'internal internal internal', prosrc => 'brin_minmax_union' }, + +# BRIN inclusion +{ oid => '4105', descr => 'BRIN inclusion support', + proname => 'brin_inclusion_opcinfo', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'brin_inclusion_opcinfo' }, +{ oid => '4106', descr => 'BRIN inclusion support', + proname => 'brin_inclusion_add_value', prorettype => 'bool', + proargtypes => 'internal internal internal internal', + prosrc => 'brin_inclusion_add_value' }, +{ oid => '4107', descr => 'BRIN inclusion support', + proname => 'brin_inclusion_consistent', prorettype => 'bool', + proargtypes => 'internal internal internal', + prosrc => 'brin_inclusion_consistent' }, +{ oid => '4108', descr => 'BRIN inclusion support', + proname => 'brin_inclusion_union', prorettype => 'bool', + proargtypes => 'internal internal internal', + prosrc => 'brin_inclusion_union' }, + +# userlock replacements +{ oid => '2880', descr => 'obtain exclusive advisory lock', + proname => 'pg_advisory_lock', provolatile => 'v', proparallel => 'u', + prorettype => 'void', proargtypes => 'int8', + prosrc => 'pg_advisory_lock_int8' }, +{ oid => '3089', descr => 'obtain exclusive advisory lock', + proname => 'pg_advisory_xact_lock', provolatile => 'v', proparallel => 'u', + prorettype => 'void', proargtypes => 'int8', + prosrc => 'pg_advisory_xact_lock_int8' }, +{ oid => '2881', descr => 'obtain shared advisory lock', + proname => 'pg_advisory_lock_shared', provolatile => 'v', proparallel => 'u', + prorettype => 'void', proargtypes => 'int8', + prosrc => 'pg_advisory_lock_shared_int8' }, +{ oid => '3090', descr => 'obtain shared advisory lock', + proname => 'pg_advisory_xact_lock_shared', provolatile => 'v', + proparallel => 'u', prorettype => 'void', proargtypes => 'int8', + prosrc => 'pg_advisory_xact_lock_shared_int8' }, +{ oid => '2882', descr => 'obtain exclusive advisory lock if available', + proname => 'pg_try_advisory_lock', provolatile => 'v', proparallel => 'u', + prorettype => 'bool', proargtypes => 'int8', + prosrc => 'pg_try_advisory_lock_int8' }, +{ oid => '3091', descr => 'obtain exclusive advisory lock if available', + proname => 'pg_try_advisory_xact_lock', provolatile => 'v', + proparallel => 'u', prorettype => 'bool', proargtypes => 'int8', + prosrc => 'pg_try_advisory_xact_lock_int8' }, +{ oid => '2883', descr => 'obtain shared advisory lock if available', + proname => 'pg_try_advisory_lock_shared', provolatile => 'v', + proparallel => 'u', prorettype => 'bool', proargtypes => 'int8', + prosrc => 'pg_try_advisory_lock_shared_int8' }, +{ oid => '3092', descr => 'obtain shared advisory lock if available', + proname => 'pg_try_advisory_xact_lock_shared', provolatile => 'v', + proparallel => 'u', prorettype => 'bool', proargtypes => 'int8', + prosrc => 'pg_try_advisory_xact_lock_shared_int8' }, +{ oid => '2884', descr => 'release exclusive advisory lock', + proname => 'pg_advisory_unlock', provolatile => 'v', proparallel => 'u', + prorettype => 'bool', proargtypes => 'int8', + prosrc => 'pg_advisory_unlock_int8' }, +{ oid => '2885', descr => 'release shared advisory lock', + proname => 'pg_advisory_unlock_shared', provolatile => 'v', + proparallel => 'u', prorettype => 'bool', proargtypes => 'int8', + prosrc => 'pg_advisory_unlock_shared_int8' }, +{ oid => '2886', descr => 'obtain exclusive advisory lock', + proname => 'pg_advisory_lock', provolatile => 'v', proparallel => 'u', + prorettype => 'void', proargtypes => 'int4 int4', + prosrc => 'pg_advisory_lock_int4' }, +{ oid => '3093', descr => 'obtain exclusive advisory lock', + proname => 'pg_advisory_xact_lock', provolatile => 'v', proparallel => 'u', + prorettype => 'void', proargtypes => 'int4 int4', + prosrc => 'pg_advisory_xact_lock_int4' }, +{ oid => '2887', descr => 'obtain shared advisory lock', + proname => 'pg_advisory_lock_shared', provolatile => 'v', proparallel => 'u', + prorettype => 'void', proargtypes => 'int4 int4', + prosrc => 'pg_advisory_lock_shared_int4' }, +{ oid => '3094', descr => 'obtain shared advisory lock', + proname => 'pg_advisory_xact_lock_shared', provolatile => 'v', + proparallel => 'u', prorettype => 'void', proargtypes => 'int4 int4', + prosrc => 'pg_advisory_xact_lock_shared_int4' }, +{ oid => '2888', descr => 'obtain exclusive advisory lock if available', + proname => 'pg_try_advisory_lock', provolatile => 'v', proparallel => 'u', + prorettype => 'bool', proargtypes => 'int4 int4', + prosrc => 'pg_try_advisory_lock_int4' }, +{ oid => '3095', descr => 'obtain exclusive advisory lock if available', + proname => 'pg_try_advisory_xact_lock', provolatile => 'v', + proparallel => 'u', prorettype => 'bool', proargtypes => 'int4 int4', + prosrc => 'pg_try_advisory_xact_lock_int4' }, +{ oid => '2889', descr => 'obtain shared advisory lock if available', + proname => 'pg_try_advisory_lock_shared', provolatile => 'v', + proparallel => 'u', prorettype => 'bool', proargtypes => 'int4 int4', + prosrc => 'pg_try_advisory_lock_shared_int4' }, +{ oid => '3096', descr => 'obtain shared advisory lock if available', + proname => 'pg_try_advisory_xact_lock_shared', provolatile => 'v', + proparallel => 'u', prorettype => 'bool', proargtypes => 'int4 int4', + prosrc => 'pg_try_advisory_xact_lock_shared_int4' }, +{ oid => '2890', descr => 'release exclusive advisory lock', + proname => 'pg_advisory_unlock', provolatile => 'v', proparallel => 'u', + prorettype => 'bool', proargtypes => 'int4 int4', + prosrc => 'pg_advisory_unlock_int4' }, +{ oid => '2891', descr => 'release shared advisory lock', + proname => 'pg_advisory_unlock_shared', provolatile => 'v', + proparallel => 'u', prorettype => 'bool', proargtypes => 'int4 int4', + prosrc => 'pg_advisory_unlock_shared_int4' }, +{ oid => '2892', descr => 'release all advisory locks', + proname => 'pg_advisory_unlock_all', provolatile => 'v', proparallel => 'u', + prorettype => 'void', proargtypes => '', prosrc => 'pg_advisory_unlock_all' }, + +# XML support +{ oid => '2893', descr => 'I/O', + proname => 'xml_in', provolatile => 's', prorettype => 'xml', + proargtypes => 'cstring', prosrc => 'xml_in' }, +{ oid => '2894', descr => 'I/O', + proname => 'xml_out', prorettype => 'cstring', proargtypes => 'xml', + prosrc => 'xml_out' }, +{ oid => '2895', descr => 'generate XML comment', + proname => 'xmlcomment', prorettype => 'xml', proargtypes => 'text', + prosrc => 'xmlcomment' }, +{ oid => '2896', + descr => 'perform a non-validating parse of a character string to produce an XML value', + proname => 'xml', provolatile => 's', prorettype => 'xml', + proargtypes => 'text', prosrc => 'texttoxml' }, +{ oid => '2897', descr => 'validate an XML value', + proname => 'xmlvalidate', prorettype => 'bool', proargtypes => 'xml text', + prosrc => 'xmlvalidate' }, +{ oid => '2898', descr => 'I/O', + proname => 'xml_recv', provolatile => 's', prorettype => 'xml', + proargtypes => 'internal', prosrc => 'xml_recv' }, +{ oid => '2899', descr => 'I/O', + proname => 'xml_send', provolatile => 's', prorettype => 'bytea', + proargtypes => 'xml', prosrc => 'xml_send' }, +{ oid => '2900', descr => 'aggregate transition function', + proname => 'xmlconcat2', proisstrict => 'f', prorettype => 'xml', + proargtypes => 'xml xml', prosrc => 'xmlconcat2' }, +{ oid => '2901', descr => 'concatenate XML values', + proname => 'xmlagg', prokind => 'a', proisstrict => 'f', prorettype => 'xml', + proargtypes => 'xml', prosrc => 'aggregate_dummy' }, +{ oid => '2922', descr => 'serialize an XML value to a character string', + proname => 'text', prorettype => 'text', proargtypes => 'xml', + prosrc => 'xmltotext' }, + +{ oid => '2923', descr => 'map table contents to XML', + proname => 'table_to_xml', procost => '100', provolatile => 's', + proparallel => 'r', prorettype => 'xml', + proargtypes => 'regclass bool bool text', + proargnames => '{tbl,nulls,tableforest,targetns}', prosrc => 'table_to_xml' }, +{ oid => '2924', descr => 'map query result to XML', + proname => 'query_to_xml', procost => '100', provolatile => 'v', + proparallel => 'u', prorettype => 'xml', proargtypes => 'text bool bool text', + proargnames => '{query,nulls,tableforest,targetns}', + prosrc => 'query_to_xml' }, +{ oid => '2925', descr => 'map rows from cursor to XML', + proname => 'cursor_to_xml', procost => '100', provolatile => 'v', + proparallel => 'u', prorettype => 'xml', + proargtypes => 'refcursor int4 bool bool text', + proargnames => '{cursor,count,nulls,tableforest,targetns}', + prosrc => 'cursor_to_xml' }, +{ oid => '2926', descr => 'map table structure to XML Schema', + proname => 'table_to_xmlschema', procost => '100', provolatile => 's', + proparallel => 'r', prorettype => 'xml', + proargtypes => 'regclass bool bool text', + proargnames => '{tbl,nulls,tableforest,targetns}', + prosrc => 'table_to_xmlschema' }, +{ oid => '2927', descr => 'map query result structure to XML Schema', + proname => 'query_to_xmlschema', procost => '100', provolatile => 'v', + proparallel => 'u', prorettype => 'xml', proargtypes => 'text bool bool text', + proargnames => '{query,nulls,tableforest,targetns}', + prosrc => 'query_to_xmlschema' }, +{ oid => '2928', descr => 'map cursor structure to XML Schema', + proname => 'cursor_to_xmlschema', procost => '100', provolatile => 'v', + proparallel => 'u', prorettype => 'xml', + proargtypes => 'refcursor bool bool text', + proargnames => '{cursor,nulls,tableforest,targetns}', + prosrc => 'cursor_to_xmlschema' }, +{ oid => '2929', + descr => 'map table contents and structure to XML and XML Schema', + proname => 'table_to_xml_and_xmlschema', procost => '100', provolatile => 's', + proparallel => 'r', prorettype => 'xml', + proargtypes => 'regclass bool bool text', + proargnames => '{tbl,nulls,tableforest,targetns}', + prosrc => 'table_to_xml_and_xmlschema' }, +{ oid => '2930', + descr => 'map query result and structure to XML and XML Schema', + proname => 'query_to_xml_and_xmlschema', procost => '100', provolatile => 'v', + proparallel => 'u', prorettype => 'xml', proargtypes => 'text bool bool text', + proargnames => '{query,nulls,tableforest,targetns}', + prosrc => 'query_to_xml_and_xmlschema' }, + +{ oid => '2933', descr => 'map schema contents to XML', + proname => 'schema_to_xml', procost => '100', provolatile => 's', + proparallel => 'r', prorettype => 'xml', proargtypes => 'name bool bool text', + proargnames => '{schema,nulls,tableforest,targetns}', + prosrc => 'schema_to_xml' }, +{ oid => '2934', descr => 'map schema structure to XML Schema', + proname => 'schema_to_xmlschema', procost => '100', provolatile => 's', + proparallel => 'r', prorettype => 'xml', proargtypes => 'name bool bool text', + proargnames => '{schema,nulls,tableforest,targetns}', + prosrc => 'schema_to_xmlschema' }, +{ oid => '2935', + descr => 'map schema contents and structure to XML and XML Schema', + proname => 'schema_to_xml_and_xmlschema', procost => '100', + provolatile => 's', proparallel => 'r', prorettype => 'xml', + proargtypes => 'name bool bool text', + proargnames => '{schema,nulls,tableforest,targetns}', + prosrc => 'schema_to_xml_and_xmlschema' }, + +{ oid => '2936', descr => 'map database contents to XML', + proname => 'database_to_xml', procost => '100', provolatile => 's', + proparallel => 'r', prorettype => 'xml', proargtypes => 'bool bool text', + proargnames => '{nulls,tableforest,targetns}', prosrc => 'database_to_xml' }, +{ oid => '2937', descr => 'map database structure to XML Schema', + proname => 'database_to_xmlschema', procost => '100', provolatile => 's', + proparallel => 'r', prorettype => 'xml', proargtypes => 'bool bool text', + proargnames => '{nulls,tableforest,targetns}', + prosrc => 'database_to_xmlschema' }, +{ oid => '2938', + descr => 'map database contents and structure to XML and XML Schema', + proname => 'database_to_xml_and_xmlschema', procost => '100', + provolatile => 's', proparallel => 'r', prorettype => 'xml', + proargtypes => 'bool bool text', + proargnames => '{nulls,tableforest,targetns}', + prosrc => 'database_to_xml_and_xmlschema' }, + +{ oid => '2931', + descr => 'evaluate XPath expression, with namespaces support', + proname => 'xpath', prorettype => '_xml', proargtypes => 'text xml _text', + prosrc => 'xpath' }, +{ oid => '2932', descr => 'evaluate XPath expression', + proname => 'xpath', prolang => '14', prorettype => '_xml', + proargtypes => 'text xml', + prosrc => 'select pg_catalog.xpath($1, $2, \'{}\'::pg_catalog.text[])' }, + +{ oid => '2614', descr => 'test XML value against XPath expression', + proname => 'xmlexists', prorettype => 'bool', proargtypes => 'text xml', + prosrc => 'xmlexists' }, + +{ oid => '3049', + descr => 'test XML value against XPath expression, with namespace support', + proname => 'xpath_exists', prorettype => 'bool', + proargtypes => 'text xml _text', prosrc => 'xpath_exists' }, +{ oid => '3050', descr => 'test XML value against XPath expression', + proname => 'xpath_exists', prolang => '14', prorettype => 'bool', + proargtypes => 'text xml', + prosrc => 'select pg_catalog.xpath_exists($1, $2, \'{}\'::pg_catalog.text[])' }, +{ oid => '3051', descr => 'determine if a string is well formed XML', + proname => 'xml_is_well_formed', provolatile => 's', prorettype => 'bool', + proargtypes => 'text', prosrc => 'xml_is_well_formed' }, +{ oid => '3052', descr => 'determine if a string is well formed XML document', + proname => 'xml_is_well_formed_document', prorettype => 'bool', + proargtypes => 'text', prosrc => 'xml_is_well_formed_document' }, +{ oid => '3053', descr => 'determine if a string is well formed XML content', + proname => 'xml_is_well_formed_content', prorettype => 'bool', + proargtypes => 'text', prosrc => 'xml_is_well_formed_content' }, + +# json +{ oid => '321', descr => 'I/O', + proname => 'json_in', prorettype => 'json', proargtypes => 'cstring', + prosrc => 'json_in' }, +{ oid => '322', descr => 'I/O', + proname => 'json_out', prorettype => 'cstring', proargtypes => 'json', + prosrc => 'json_out' }, +{ oid => '323', descr => 'I/O', + proname => 'json_recv', prorettype => 'json', proargtypes => 'internal', + prosrc => 'json_recv' }, +{ oid => '324', descr => 'I/O', + proname => 'json_send', prorettype => 'bytea', proargtypes => 'json', + prosrc => 'json_send' }, +{ oid => '3153', descr => 'map array to json', + proname => 'array_to_json', provolatile => 's', prorettype => 'json', + proargtypes => 'anyarray', prosrc => 'array_to_json' }, +{ oid => '3154', descr => 'map array to json with optional pretty printing', + proname => 'array_to_json', provolatile => 's', prorettype => 'json', + proargtypes => 'anyarray bool', prosrc => 'array_to_json_pretty' }, +{ oid => '3155', descr => 'map row to json', + proname => 'row_to_json', provolatile => 's', prorettype => 'json', + proargtypes => 'record', prosrc => 'row_to_json' }, +{ oid => '3156', descr => 'map row to json with optional pretty printing', + proname => 'row_to_json', provolatile => 's', prorettype => 'json', + proargtypes => 'record bool', prosrc => 'row_to_json_pretty' }, +{ oid => '3173', descr => 'json aggregate transition function', + proname => 'json_agg_transfn', proisstrict => 'f', provolatile => 's', + prorettype => 'internal', proargtypes => 'internal anyelement', + prosrc => 'json_agg_transfn' }, +{ oid => '3174', descr => 'json aggregate final function', + proname => 'json_agg_finalfn', proisstrict => 'f', prorettype => 'json', + proargtypes => 'internal', prosrc => 'json_agg_finalfn' }, +{ oid => '3175', descr => 'aggregate input into json', + proname => 'json_agg', prokind => 'a', proisstrict => 'f', provolatile => 's', + prorettype => 'json', proargtypes => 'anyelement', + prosrc => 'aggregate_dummy' }, +{ oid => '3180', descr => 'json object aggregate transition function', + proname => 'json_object_agg_transfn', proisstrict => 'f', provolatile => 's', + prorettype => 'internal', proargtypes => 'internal any any', + prosrc => 'json_object_agg_transfn' }, +{ oid => '3196', descr => 'json object aggregate final function', + proname => 'json_object_agg_finalfn', proisstrict => 'f', + prorettype => 'json', proargtypes => 'internal', + prosrc => 'json_object_agg_finalfn' }, +{ oid => '3197', descr => 'aggregate input into a json object', + proname => 'json_object_agg', prokind => 'a', proisstrict => 'f', + provolatile => 's', prorettype => 'json', proargtypes => 'any any', + prosrc => 'aggregate_dummy' }, +{ oid => '3198', descr => 'build a json array from any inputs', + proname => 'json_build_array', provariadic => 'any', proisstrict => 'f', + provolatile => 's', prorettype => 'json', proargtypes => 'any', + proallargtypes => '{any}', proargmodes => '{v}', + prosrc => 'json_build_array' }, +{ oid => '3199', descr => 'build an empty json array', + proname => 'json_build_array', proisstrict => 'f', provolatile => 's', + prorettype => 'json', proargtypes => '', + prosrc => 'json_build_array_noargs' }, +{ oid => '3200', + descr => 'build a json object from pairwise key/value inputs', + proname => 'json_build_object', provariadic => 'any', proisstrict => 'f', + provolatile => 's', prorettype => 'json', proargtypes => 'any', + proallargtypes => '{any}', proargmodes => '{v}', + prosrc => 'json_build_object' }, +{ oid => '3201', descr => 'build an empty json object', + proname => 'json_build_object', proisstrict => 'f', provolatile => 's', + prorettype => 'json', proargtypes => '', + prosrc => 'json_build_object_noargs' }, +{ oid => '3202', descr => 'map text array of key value pairs to json object', + proname => 'json_object', prorettype => 'json', proargtypes => '_text', + prosrc => 'json_object' }, +{ oid => '3203', descr => 'map text arrays of keys and values to json object', + proname => 'json_object', prorettype => 'json', proargtypes => '_text _text', + prosrc => 'json_object_two_arg' }, +{ oid => '3176', descr => 'map input to json', + proname => 'to_json', provolatile => 's', prorettype => 'json', + proargtypes => 'anyelement', prosrc => 'to_json' }, +{ oid => '3261', descr => 'remove object fields with null values from json', + proname => 'json_strip_nulls', prorettype => 'json', proargtypes => 'json', + prosrc => 'json_strip_nulls' }, + +{ oid => '3947', + proname => 'json_object_field', prorettype => 'json', + proargtypes => 'json text', proargnames => '{from_json, field_name}', + prosrc => 'json_object_field' }, +{ oid => '3948', + proname => 'json_object_field_text', prorettype => 'text', + proargtypes => 'json text', proargnames => '{from_json, field_name}', + prosrc => 'json_object_field_text' }, +{ oid => '3949', + proname => 'json_array_element', prorettype => 'json', + proargtypes => 'json int4', proargnames => '{from_json, element_index}', + prosrc => 'json_array_element' }, +{ oid => '3950', + proname => 'json_array_element_text', prorettype => 'text', + proargtypes => 'json int4', proargnames => '{from_json, element_index}', + prosrc => 'json_array_element_text' }, +{ oid => '3951', descr => 'get value from json with path elements', + proname => 'json_extract_path', provariadic => 'text', prorettype => 'json', + proargtypes => 'json _text', proallargtypes => '{json,_text}', + proargmodes => '{i,v}', proargnames => '{from_json,path_elems}', + prosrc => 'json_extract_path' }, +{ oid => '3953', descr => 'get value from json as text with path elements', + proname => 'json_extract_path_text', provariadic => 'text', + prorettype => 'text', proargtypes => 'json _text', + proallargtypes => '{json,_text}', proargmodes => '{i,v}', + proargnames => '{from_json,path_elems}', prosrc => 'json_extract_path_text' }, +{ oid => '3955', descr => 'key value pairs of a json object', + proname => 'json_array_elements', prorows => '100', proretset => 't', + prorettype => 'json', proargtypes => 'json', proallargtypes => '{json,json}', + proargmodes => '{i,o}', proargnames => '{from_json,value}', + prosrc => 'json_array_elements' }, +{ oid => '3969', descr => 'elements of json array', + proname => 'json_array_elements_text', prorows => '100', proretset => 't', + prorettype => 'text', proargtypes => 'json', proallargtypes => '{json,text}', + proargmodes => '{i,o}', proargnames => '{from_json,value}', + prosrc => 'json_array_elements_text' }, +{ oid => '3956', descr => 'length of json array', + proname => 'json_array_length', prorettype => 'int4', proargtypes => 'json', + prosrc => 'json_array_length' }, +{ oid => '3957', descr => 'get json object keys', + proname => 'json_object_keys', prorows => '100', proretset => 't', + prorettype => 'text', proargtypes => 'json', prosrc => 'json_object_keys' }, +{ oid => '3958', descr => 'key value pairs of a json object', + proname => 'json_each', prorows => '100', proretset => 't', + prorettype => 'record', proargtypes => 'json', + proallargtypes => '{json,text,json}', proargmodes => '{i,o,o}', + proargnames => '{from_json,key,value}', prosrc => 'json_each' }, +{ oid => '3959', descr => 'key value pairs of a json object', + proname => 'json_each_text', prorows => '100', proretset => 't', + prorettype => 'record', proargtypes => 'json', + proallargtypes => '{json,text,text}', proargmodes => '{i,o,o}', + proargnames => '{from_json,key,value}', prosrc => 'json_each_text' }, +{ oid => '3960', descr => 'get record fields from a json object', + proname => 'json_populate_record', proisstrict => 'f', provolatile => 's', + prorettype => 'anyelement', proargtypes => 'anyelement json bool', + prosrc => 'json_populate_record' }, +{ oid => '3961', + descr => 'get set of records with fields from a json array of objects', + proname => 'json_populate_recordset', prorows => '100', proisstrict => 'f', + proretset => 't', provolatile => 's', prorettype => 'anyelement', + proargtypes => 'anyelement json bool', prosrc => 'json_populate_recordset' }, +{ oid => '3204', descr => 'get record fields from a json object', + proname => 'json_to_record', provolatile => 's', prorettype => 'record', + proargtypes => 'json', prosrc => 'json_to_record' }, +{ oid => '3205', + descr => 'get set of records with fields from a json array of objects', + proname => 'json_to_recordset', prorows => '100', proisstrict => 'f', + proretset => 't', provolatile => 's', prorettype => 'record', + proargtypes => 'json', prosrc => 'json_to_recordset' }, +{ oid => '3968', descr => 'get the type of a json value', + proname => 'json_typeof', prorettype => 'text', proargtypes => 'json', + prosrc => 'json_typeof' }, + +# uuid +{ oid => '2952', descr => 'I/O', + proname => 'uuid_in', prorettype => 'uuid', proargtypes => 'cstring', + prosrc => 'uuid_in' }, +{ oid => '2953', descr => 'I/O', + proname => 'uuid_out', prorettype => 'cstring', proargtypes => 'uuid', + prosrc => 'uuid_out' }, +{ oid => '2954', + proname => 'uuid_lt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'uuid uuid', prosrc => 'uuid_lt' }, +{ oid => '2955', + proname => 'uuid_le', proleakproof => 't', prorettype => 'bool', + proargtypes => 'uuid uuid', prosrc => 'uuid_le' }, +{ oid => '2956', + proname => 'uuid_eq', proleakproof => 't', prorettype => 'bool', + proargtypes => 'uuid uuid', prosrc => 'uuid_eq' }, +{ oid => '2957', + proname => 'uuid_ge', proleakproof => 't', prorettype => 'bool', + proargtypes => 'uuid uuid', prosrc => 'uuid_ge' }, +{ oid => '2958', + proname => 'uuid_gt', proleakproof => 't', prorettype => 'bool', + proargtypes => 'uuid uuid', prosrc => 'uuid_gt' }, +{ oid => '2959', + proname => 'uuid_ne', proleakproof => 't', prorettype => 'bool', + proargtypes => 'uuid uuid', prosrc => 'uuid_ne' }, +{ oid => '2960', descr => 'less-equal-greater', + proname => 'uuid_cmp', proleakproof => 't', prorettype => 'int4', + proargtypes => 'uuid uuid', prosrc => 'uuid_cmp' }, +{ oid => '3300', descr => 'sort support', + proname => 'uuid_sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'uuid_sortsupport' }, +{ oid => '2961', descr => 'I/O', + proname => 'uuid_recv', prorettype => 'uuid', proargtypes => 'internal', + prosrc => 'uuid_recv' }, +{ oid => '2962', descr => 'I/O', + proname => 'uuid_send', prorettype => 'bytea', proargtypes => 'uuid', + prosrc => 'uuid_send' }, +{ oid => '2963', descr => 'hash', + proname => 'uuid_hash', prorettype => 'int4', proargtypes => 'uuid', + prosrc => 'uuid_hash' }, +{ oid => '3412', descr => 'hash', + proname => 'uuid_hash_extended', prorettype => 'int8', + proargtypes => 'uuid int8', prosrc => 'uuid_hash_extended' }, + +# pg_lsn +{ oid => '3229', descr => 'I/O', + proname => 'pg_lsn_in', prorettype => 'pg_lsn', proargtypes => 'cstring', + prosrc => 'pg_lsn_in' }, +{ oid => '3230', descr => 'I/O', + proname => 'pg_lsn_out', prorettype => 'cstring', proargtypes => 'pg_lsn', + prosrc => 'pg_lsn_out' }, +{ oid => '3231', + proname => 'pg_lsn_lt', prorettype => 'bool', proargtypes => 'pg_lsn pg_lsn', + prosrc => 'pg_lsn_lt' }, +{ oid => '3232', + proname => 'pg_lsn_le', prorettype => 'bool', proargtypes => 'pg_lsn pg_lsn', + prosrc => 'pg_lsn_le' }, +{ oid => '3233', + proname => 'pg_lsn_eq', prorettype => 'bool', proargtypes => 'pg_lsn pg_lsn', + prosrc => 'pg_lsn_eq' }, +{ oid => '3234', + proname => 'pg_lsn_ge', prorettype => 'bool', proargtypes => 'pg_lsn pg_lsn', + prosrc => 'pg_lsn_ge' }, +{ oid => '3235', + proname => 'pg_lsn_gt', prorettype => 'bool', proargtypes => 'pg_lsn pg_lsn', + prosrc => 'pg_lsn_gt' }, +{ oid => '3236', + proname => 'pg_lsn_ne', prorettype => 'bool', proargtypes => 'pg_lsn pg_lsn', + prosrc => 'pg_lsn_ne' }, +{ oid => '3237', + proname => 'pg_lsn_mi', prorettype => 'numeric', + proargtypes => 'pg_lsn pg_lsn', prosrc => 'pg_lsn_mi' }, +{ oid => '3238', descr => 'I/O', + proname => 'pg_lsn_recv', prorettype => 'pg_lsn', proargtypes => 'internal', + prosrc => 'pg_lsn_recv' }, +{ oid => '3239', descr => 'I/O', + proname => 'pg_lsn_send', prorettype => 'bytea', proargtypes => 'pg_lsn', + prosrc => 'pg_lsn_send' }, +{ oid => '3251', descr => 'less-equal-greater', + proname => 'pg_lsn_cmp', prorettype => 'int4', proargtypes => 'pg_lsn pg_lsn', + prosrc => 'pg_lsn_cmp' }, +{ oid => '3252', descr => 'hash', + proname => 'pg_lsn_hash', prorettype => 'int4', proargtypes => 'pg_lsn', + prosrc => 'pg_lsn_hash' }, +{ oid => '3413', descr => 'hash', + proname => 'pg_lsn_hash_extended', prorettype => 'int8', + proargtypes => 'pg_lsn int8', prosrc => 'pg_lsn_hash_extended' }, + +# enum related procs +{ oid => '3504', descr => 'I/O', + proname => 'anyenum_in', prorettype => 'anyenum', proargtypes => 'cstring', + prosrc => 'anyenum_in' }, +{ oid => '3505', descr => 'I/O', + proname => 'anyenum_out', provolatile => 's', prorettype => 'cstring', + proargtypes => 'anyenum', prosrc => 'anyenum_out' }, +{ oid => '3506', descr => 'I/O', + proname => 'enum_in', provolatile => 's', prorettype => 'anyenum', + proargtypes => 'cstring oid', prosrc => 'enum_in' }, +{ oid => '3507', descr => 'I/O', + proname => 'enum_out', provolatile => 's', prorettype => 'cstring', + proargtypes => 'anyenum', prosrc => 'enum_out' }, +{ oid => '3508', + proname => 'enum_eq', prorettype => 'bool', proargtypes => 'anyenum anyenum', + prosrc => 'enum_eq' }, +{ oid => '3509', + proname => 'enum_ne', prorettype => 'bool', proargtypes => 'anyenum anyenum', + prosrc => 'enum_ne' }, +{ oid => '3510', + proname => 'enum_lt', prorettype => 'bool', proargtypes => 'anyenum anyenum', + prosrc => 'enum_lt' }, +{ oid => '3511', + proname => 'enum_gt', prorettype => 'bool', proargtypes => 'anyenum anyenum', + prosrc => 'enum_gt' }, +{ oid => '3512', + proname => 'enum_le', prorettype => 'bool', proargtypes => 'anyenum anyenum', + prosrc => 'enum_le' }, +{ oid => '3513', + proname => 'enum_ge', prorettype => 'bool', proargtypes => 'anyenum anyenum', + prosrc => 'enum_ge' }, +{ oid => '3514', descr => 'less-equal-greater', + proname => 'enum_cmp', prorettype => 'int4', proargtypes => 'anyenum anyenum', + prosrc => 'enum_cmp' }, +{ oid => '3515', descr => 'hash', + proname => 'hashenum', prorettype => 'int4', proargtypes => 'anyenum', + prosrc => 'hashenum' }, +{ oid => '3414', descr => 'hash', + proname => 'hashenumextended', prorettype => 'int8', + proargtypes => 'anyenum int8', prosrc => 'hashenumextended' }, +{ oid => '3524', descr => 'smaller of two', + proname => 'enum_smaller', prorettype => 'anyenum', + proargtypes => 'anyenum anyenum', prosrc => 'enum_smaller' }, +{ oid => '3525', descr => 'larger of two', + proname => 'enum_larger', prorettype => 'anyenum', + proargtypes => 'anyenum anyenum', prosrc => 'enum_larger' }, +{ oid => '3526', descr => 'maximum value of all enum input values', + proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'anyenum', + proargtypes => 'anyenum', prosrc => 'aggregate_dummy' }, +{ oid => '3527', descr => 'minimum value of all enum input values', + proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'anyenum', + proargtypes => 'anyenum', prosrc => 'aggregate_dummy' }, +{ oid => '3528', descr => 'first value of the input enum type', + proname => 'enum_first', proisstrict => 'f', provolatile => 's', + prorettype => 'anyenum', proargtypes => 'anyenum', prosrc => 'enum_first' }, +{ oid => '3529', descr => 'last value of the input enum type', + proname => 'enum_last', proisstrict => 'f', provolatile => 's', + prorettype => 'anyenum', proargtypes => 'anyenum', prosrc => 'enum_last' }, +{ oid => '3530', + descr => 'range between the two given enum values, as an ordered array', + proname => 'enum_range', proisstrict => 'f', provolatile => 's', + prorettype => 'anyarray', proargtypes => 'anyenum anyenum', + prosrc => 'enum_range_bounds' }, +{ oid => '3531', descr => 'range of the given enum type, as an ordered array', + proname => 'enum_range', proisstrict => 'f', provolatile => 's', + prorettype => 'anyarray', proargtypes => 'anyenum', + prosrc => 'enum_range_all' }, +{ oid => '3532', descr => 'I/O', + proname => 'enum_recv', provolatile => 's', prorettype => 'anyenum', + proargtypes => 'internal oid', prosrc => 'enum_recv' }, +{ oid => '3533', descr => 'I/O', + proname => 'enum_send', provolatile => 's', prorettype => 'bytea', + proargtypes => 'anyenum', prosrc => 'enum_send' }, + +# text search stuff +{ oid => '3610', descr => 'I/O', + proname => 'tsvectorin', prorettype => 'tsvector', proargtypes => 'cstring', + prosrc => 'tsvectorin' }, +{ oid => '3639', descr => 'I/O', + proname => 'tsvectorrecv', prorettype => 'tsvector', + proargtypes => 'internal', prosrc => 'tsvectorrecv' }, +{ oid => '3611', descr => 'I/O', + proname => 'tsvectorout', prorettype => 'cstring', proargtypes => 'tsvector', + prosrc => 'tsvectorout' }, +{ oid => '3638', descr => 'I/O', + proname => 'tsvectorsend', prorettype => 'bytea', proargtypes => 'tsvector', + prosrc => 'tsvectorsend' }, +{ oid => '3612', descr => 'I/O', + proname => 'tsqueryin', prorettype => 'tsquery', proargtypes => 'cstring', + prosrc => 'tsqueryin' }, +{ oid => '3641', descr => 'I/O', + proname => 'tsqueryrecv', prorettype => 'tsquery', proargtypes => 'internal', + prosrc => 'tsqueryrecv' }, +{ oid => '3613', descr => 'I/O', + proname => 'tsqueryout', prorettype => 'cstring', proargtypes => 'tsquery', + prosrc => 'tsqueryout' }, +{ oid => '3640', descr => 'I/O', + proname => 'tsquerysend', prorettype => 'bytea', proargtypes => 'tsquery', + prosrc => 'tsquerysend' }, +{ oid => '3646', descr => 'I/O', + proname => 'gtsvectorin', prorettype => 'gtsvector', proargtypes => 'cstring', + prosrc => 'gtsvectorin' }, +{ oid => '3647', descr => 'I/O', + proname => 'gtsvectorout', prorettype => 'cstring', + proargtypes => 'gtsvector', prosrc => 'gtsvectorout' }, + +{ oid => '3616', + proname => 'tsvector_lt', prorettype => 'bool', + proargtypes => 'tsvector tsvector', prosrc => 'tsvector_lt' }, +{ oid => '3617', + proname => 'tsvector_le', prorettype => 'bool', + proargtypes => 'tsvector tsvector', prosrc => 'tsvector_le' }, +{ oid => '3618', + proname => 'tsvector_eq', prorettype => 'bool', + proargtypes => 'tsvector tsvector', prosrc => 'tsvector_eq' }, +{ oid => '3619', + proname => 'tsvector_ne', prorettype => 'bool', + proargtypes => 'tsvector tsvector', prosrc => 'tsvector_ne' }, +{ oid => '3620', + proname => 'tsvector_ge', prorettype => 'bool', + proargtypes => 'tsvector tsvector', prosrc => 'tsvector_ge' }, +{ oid => '3621', + proname => 'tsvector_gt', prorettype => 'bool', + proargtypes => 'tsvector tsvector', prosrc => 'tsvector_gt' }, +{ oid => '3622', descr => 'less-equal-greater', + proname => 'tsvector_cmp', prorettype => 'int4', + proargtypes => 'tsvector tsvector', prosrc => 'tsvector_cmp' }, + +{ oid => '3711', descr => 'number of lexemes', + proname => 'length', prorettype => 'int4', proargtypes => 'tsvector', + prosrc => 'tsvector_length' }, +{ oid => '3623', descr => 'strip position information', + proname => 'strip', prorettype => 'tsvector', proargtypes => 'tsvector', + prosrc => 'tsvector_strip' }, +{ oid => '3624', descr => 'set given weight for whole tsvector', + proname => 'setweight', prorettype => 'tsvector', + proargtypes => 'tsvector char', prosrc => 'tsvector_setweight' }, +{ oid => '3320', descr => 'set given weight for given lexemes', + proname => 'setweight', prorettype => 'tsvector', + proargtypes => 'tsvector char _text', + prosrc => 'tsvector_setweight_by_filter' }, +{ oid => '3625', + proname => 'tsvector_concat', prorettype => 'tsvector', + proargtypes => 'tsvector tsvector', prosrc => 'tsvector_concat' }, +{ oid => '3321', descr => 'delete lexeme', + proname => 'ts_delete', prorettype => 'tsvector', + proargtypes => 'tsvector text', prosrc => 'tsvector_delete_str' }, +{ oid => '3323', descr => 'delete given lexemes', + proname => 'ts_delete', prorettype => 'tsvector', + proargtypes => 'tsvector _text', prosrc => 'tsvector_delete_arr' }, +{ oid => '3322', descr => 'expand tsvector to set of rows', + proname => 'unnest', prorows => '10', proretset => 't', + prorettype => 'record', proargtypes => 'tsvector', + proallargtypes => '{tsvector,text,_int2,_text}', proargmodes => '{i,o,o,o}', + proargnames => '{tsvector,lexeme,positions,weights}', + prosrc => 'tsvector_unnest' }, +{ oid => '3326', descr => 'convert tsvector to array of lexemes', + proname => 'tsvector_to_array', prorettype => '_text', + proargtypes => 'tsvector', prosrc => 'tsvector_to_array' }, +{ oid => '3327', descr => 'build tsvector from array of lexemes', + proname => 'array_to_tsvector', prorettype => 'tsvector', + proargtypes => '_text', prosrc => 'array_to_tsvector' }, +{ oid => '3319', + descr => 'delete lexemes that do not have one of the given weights', + proname => 'ts_filter', prorettype => 'tsvector', + proargtypes => 'tsvector _char', prosrc => 'tsvector_filter' }, + +{ oid => '3634', + proname => 'ts_match_vq', prorettype => 'bool', + proargtypes => 'tsvector tsquery', prosrc => 'ts_match_vq' }, +{ oid => '3635', + proname => 'ts_match_qv', prorettype => 'bool', + proargtypes => 'tsquery tsvector', prosrc => 'ts_match_qv' }, +{ oid => '3760', + proname => 'ts_match_tt', procost => '100', provolatile => 's', + prorettype => 'bool', proargtypes => 'text text', prosrc => 'ts_match_tt' }, +{ oid => '3761', + proname => 'ts_match_tq', procost => '100', provolatile => 's', + prorettype => 'bool', proargtypes => 'text tsquery', + prosrc => 'ts_match_tq' }, + +{ oid => '3648', descr => 'GiST tsvector support', + proname => 'gtsvector_compress', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'gtsvector_compress' }, +{ oid => '3649', descr => 'GiST tsvector support', + proname => 'gtsvector_decompress', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'gtsvector_decompress' }, +{ oid => '3650', descr => 'GiST tsvector support', + proname => 'gtsvector_picksplit', prorettype => 'internal', + proargtypes => 'internal internal', prosrc => 'gtsvector_picksplit' }, +{ oid => '3651', descr => 'GiST tsvector support', + proname => 'gtsvector_union', prorettype => 'gtsvector', + proargtypes => 'internal internal', prosrc => 'gtsvector_union' }, +{ oid => '3652', descr => 'GiST tsvector support', + proname => 'gtsvector_same', prorettype => 'internal', + proargtypes => 'gtsvector gtsvector internal', prosrc => 'gtsvector_same' }, +{ oid => '3653', descr => 'GiST tsvector support', + proname => 'gtsvector_penalty', prorettype => 'internal', + proargtypes => 'internal internal internal', prosrc => 'gtsvector_penalty' }, +{ oid => '3654', descr => 'GiST tsvector support', + proname => 'gtsvector_consistent', prorettype => 'bool', + proargtypes => 'internal tsvector int2 oid internal', + prosrc => 'gtsvector_consistent' }, +{ oid => '3790', descr => 'GiST tsvector support (obsolete)', + proname => 'gtsvector_consistent', prorettype => 'bool', + proargtypes => 'internal gtsvector int4 oid internal', + prosrc => 'gtsvector_consistent_oldsig' }, + +{ oid => '3656', descr => 'GIN tsvector support', + proname => 'gin_extract_tsvector', prorettype => 'internal', + proargtypes => 'tsvector internal internal', + prosrc => 'gin_extract_tsvector' }, +{ oid => '3657', descr => 'GIN tsvector support', + proname => 'gin_extract_tsquery', prorettype => 'internal', + proargtypes => 'tsvector internal int2 internal internal internal internal', + prosrc => 'gin_extract_tsquery' }, +{ oid => '3658', descr => 'GIN tsvector support', + proname => 'gin_tsquery_consistent', prorettype => 'bool', + proargtypes => 'internal int2 tsvector int4 internal internal internal internal', + prosrc => 'gin_tsquery_consistent' }, +{ oid => '3921', descr => 'GIN tsvector support', + proname => 'gin_tsquery_triconsistent', prorettype => 'char', + proargtypes => 'internal int2 tsvector int4 internal internal internal', + prosrc => 'gin_tsquery_triconsistent' }, +{ oid => '3724', descr => 'GIN tsvector support', + proname => 'gin_cmp_tslexeme', prorettype => 'int4', + proargtypes => 'text text', prosrc => 'gin_cmp_tslexeme' }, +{ oid => '2700', descr => 'GIN tsvector support', + proname => 'gin_cmp_prefix', prorettype => 'int4', + proargtypes => 'text text int2 internal', prosrc => 'gin_cmp_prefix' }, +{ oid => '3077', descr => 'GIN tsvector support (obsolete)', + proname => 'gin_extract_tsvector', prorettype => 'internal', + proargtypes => 'tsvector internal', prosrc => 'gin_extract_tsvector_2args' }, +{ oid => '3087', descr => 'GIN tsvector support (obsolete)', + proname => 'gin_extract_tsquery', prorettype => 'internal', + proargtypes => 'tsquery internal int2 internal internal', + prosrc => 'gin_extract_tsquery_5args' }, +{ oid => '3088', descr => 'GIN tsvector support (obsolete)', + proname => 'gin_tsquery_consistent', prorettype => 'bool', + proargtypes => 'internal int2 tsquery int4 internal internal', + prosrc => 'gin_tsquery_consistent_6args' }, +{ oid => '3791', descr => 'GIN tsvector support (obsolete)', + proname => 'gin_extract_tsquery', prorettype => 'internal', + proargtypes => 'tsquery internal int2 internal internal internal internal', + prosrc => 'gin_extract_tsquery_oldsig' }, +{ oid => '3792', descr => 'GIN tsvector support (obsolete)', + proname => 'gin_tsquery_consistent', prorettype => 'bool', + proargtypes => 'internal int2 tsquery int4 internal internal internal internal', + prosrc => 'gin_tsquery_consistent_oldsig' }, + +{ oid => '3789', descr => 'clean up GIN pending list', + proname => 'gin_clean_pending_list', provolatile => 'v', proparallel => 'u', + prorettype => 'int8', proargtypes => 'regclass', + prosrc => 'gin_clean_pending_list' }, + +{ oid => '3662', + proname => 'tsquery_lt', prorettype => 'bool', + proargtypes => 'tsquery tsquery', prosrc => 'tsquery_lt' }, +{ oid => '3663', + proname => 'tsquery_le', prorettype => 'bool', + proargtypes => 'tsquery tsquery', prosrc => 'tsquery_le' }, +{ oid => '3664', + proname => 'tsquery_eq', prorettype => 'bool', + proargtypes => 'tsquery tsquery', prosrc => 'tsquery_eq' }, +{ oid => '3665', + proname => 'tsquery_ne', prorettype => 'bool', + proargtypes => 'tsquery tsquery', prosrc => 'tsquery_ne' }, +{ oid => '3666', + proname => 'tsquery_ge', prorettype => 'bool', + proargtypes => 'tsquery tsquery', prosrc => 'tsquery_ge' }, +{ oid => '3667', + proname => 'tsquery_gt', prorettype => 'bool', + proargtypes => 'tsquery tsquery', prosrc => 'tsquery_gt' }, +{ oid => '3668', descr => 'less-equal-greater', + proname => 'tsquery_cmp', prorettype => 'int4', + proargtypes => 'tsquery tsquery', prosrc => 'tsquery_cmp' }, + +{ oid => '3669', + proname => 'tsquery_and', prorettype => 'tsquery', + proargtypes => 'tsquery tsquery', prosrc => 'tsquery_and' }, +{ oid => '3670', + proname => 'tsquery_or', prorettype => 'tsquery', + proargtypes => 'tsquery tsquery', prosrc => 'tsquery_or' }, +{ oid => '5003', + proname => 'tsquery_phrase', prorettype => 'tsquery', + proargtypes => 'tsquery tsquery', prosrc => 'tsquery_phrase' }, +{ oid => '5004', descr => 'phrase-concatenate with distance', + proname => 'tsquery_phrase', prorettype => 'tsquery', + proargtypes => 'tsquery tsquery int4', prosrc => 'tsquery_phrase_distance' }, +{ oid => '3671', + proname => 'tsquery_not', prorettype => 'tsquery', proargtypes => 'tsquery', + prosrc => 'tsquery_not' }, + +{ oid => '3691', + proname => 'tsq_mcontains', prorettype => 'bool', + proargtypes => 'tsquery tsquery', prosrc => 'tsq_mcontains' }, +{ oid => '3692', + proname => 'tsq_mcontained', prorettype => 'bool', + proargtypes => 'tsquery tsquery', prosrc => 'tsq_mcontained' }, + +{ oid => '3672', descr => 'number of nodes', + proname => 'numnode', prorettype => 'int4', proargtypes => 'tsquery', + prosrc => 'tsquery_numnode' }, +{ oid => '3673', descr => 'show real useful query for GiST index', + proname => 'querytree', prorettype => 'text', proargtypes => 'tsquery', + prosrc => 'tsquerytree' }, + +{ oid => '3684', descr => 'rewrite tsquery', + proname => 'ts_rewrite', prorettype => 'tsquery', + proargtypes => 'tsquery tsquery tsquery', prosrc => 'tsquery_rewrite' }, +{ oid => '3685', descr => 'rewrite tsquery', + proname => 'ts_rewrite', procost => '100', provolatile => 'v', + proparallel => 'u', prorettype => 'tsquery', proargtypes => 'tsquery text', + prosrc => 'tsquery_rewrite_query' }, + +{ oid => '3695', descr => 'GiST tsquery support', + proname => 'gtsquery_compress', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'gtsquery_compress' }, +{ oid => '3697', descr => 'GiST tsquery support', + proname => 'gtsquery_picksplit', prorettype => 'internal', + proargtypes => 'internal internal', prosrc => 'gtsquery_picksplit' }, +{ oid => '3698', descr => 'GiST tsquery support', + proname => 'gtsquery_union', prorettype => 'int8', + proargtypes => 'internal internal', prosrc => 'gtsquery_union' }, +{ oid => '3699', descr => 'GiST tsquery support', + proname => 'gtsquery_same', prorettype => 'internal', + proargtypes => 'int8 int8 internal', prosrc => 'gtsquery_same' }, +{ oid => '3700', descr => 'GiST tsquery support', + proname => 'gtsquery_penalty', prorettype => 'internal', + proargtypes => 'internal internal internal', prosrc => 'gtsquery_penalty' }, +{ oid => '3701', descr => 'GiST tsquery support', + proname => 'gtsquery_consistent', prorettype => 'bool', + proargtypes => 'internal tsquery int2 oid internal', + prosrc => 'gtsquery_consistent' }, +{ oid => '3793', descr => 'GiST tsquery support (obsolete)', + proname => 'gtsquery_consistent', prorettype => 'bool', + proargtypes => 'internal internal int4 oid internal', + prosrc => 'gtsquery_consistent_oldsig' }, + +{ oid => '3686', descr => 'restriction selectivity of tsvector @@ tsquery', + proname => 'tsmatchsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'tsmatchsel' }, +{ oid => '3687', descr => 'join selectivity of tsvector @@ tsquery', + proname => 'tsmatchjoinsel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int2 internal', + prosrc => 'tsmatchjoinsel' }, +{ oid => '3688', descr => 'tsvector typanalyze', + proname => 'ts_typanalyze', provolatile => 's', prorettype => 'bool', + proargtypes => 'internal', prosrc => 'ts_typanalyze' }, + +{ oid => '3689', descr => 'statistics of tsvector column', + proname => 'ts_stat', procost => '10', prorows => '10000', proretset => 't', + provolatile => 'v', proparallel => 'u', prorettype => 'record', + proargtypes => 'text', proallargtypes => '{text,text,int4,int4}', + proargmodes => '{i,o,o,o}', proargnames => '{query,word,ndoc,nentry}', + prosrc => 'ts_stat1' }, +{ oid => '3690', descr => 'statistics of tsvector column', + proname => 'ts_stat', procost => '10', prorows => '10000', proretset => 't', + provolatile => 'v', proparallel => 'u', prorettype => 'record', + proargtypes => 'text text', proallargtypes => '{text,text,text,int4,int4}', + proargmodes => '{i,i,o,o,o}', + proargnames => '{query,weights,word,ndoc,nentry}', prosrc => 'ts_stat2' }, + +{ oid => '3703', descr => 'relevance', + proname => 'ts_rank', prorettype => 'float4', + proargtypes => '_float4 tsvector tsquery int4', prosrc => 'ts_rank_wttf' }, +{ oid => '3704', descr => 'relevance', + proname => 'ts_rank', prorettype => 'float4', + proargtypes => '_float4 tsvector tsquery', prosrc => 'ts_rank_wtt' }, +{ oid => '3705', descr => 'relevance', + proname => 'ts_rank', prorettype => 'float4', + proargtypes => 'tsvector tsquery int4', prosrc => 'ts_rank_ttf' }, +{ oid => '3706', descr => 'relevance', + proname => 'ts_rank', prorettype => 'float4', + proargtypes => 'tsvector tsquery', prosrc => 'ts_rank_tt' }, +{ oid => '3707', descr => 'relevance', + proname => 'ts_rank_cd', prorettype => 'float4', + proargtypes => '_float4 tsvector tsquery int4', prosrc => 'ts_rankcd_wttf' }, +{ oid => '3708', descr => 'relevance', + proname => 'ts_rank_cd', prorettype => 'float4', + proargtypes => '_float4 tsvector tsquery', prosrc => 'ts_rankcd_wtt' }, +{ oid => '3709', descr => 'relevance', + proname => 'ts_rank_cd', prorettype => 'float4', + proargtypes => 'tsvector tsquery int4', prosrc => 'ts_rankcd_ttf' }, +{ oid => '3710', descr => 'relevance', + proname => 'ts_rank_cd', prorettype => 'float4', + proargtypes => 'tsvector tsquery', prosrc => 'ts_rankcd_tt' }, + +{ oid => '3713', descr => 'get parser\'s token types', + proname => 'ts_token_type', prorows => '16', proretset => 't', + prorettype => 'record', proargtypes => 'oid', + proallargtypes => '{oid,int4,text,text}', proargmodes => '{i,o,o,o}', + proargnames => '{parser_oid,tokid,alias,description}', + prosrc => 'ts_token_type_byid' }, +{ oid => '3714', descr => 'get parser\'s token types', + proname => 'ts_token_type', prorows => '16', proretset => 't', + provolatile => 's', prorettype => 'record', proargtypes => 'text', + proallargtypes => '{text,int4,text,text}', proargmodes => '{i,o,o,o}', + proargnames => '{parser_name,tokid,alias,description}', + prosrc => 'ts_token_type_byname' }, +{ oid => '3715', descr => 'parse text to tokens', + proname => 'ts_parse', prorows => '1000', proretset => 't', + prorettype => 'record', proargtypes => 'oid text', + proallargtypes => '{oid,text,int4,text}', proargmodes => '{i,i,o,o}', + proargnames => '{parser_oid,txt,tokid,token}', prosrc => 'ts_parse_byid' }, +{ oid => '3716', descr => 'parse text to tokens', + proname => 'ts_parse', prorows => '1000', proretset => 't', + provolatile => 's', prorettype => 'record', proargtypes => 'text text', + proallargtypes => '{text,text,int4,text}', proargmodes => '{i,i,o,o}', + proargnames => '{parser_name,txt,tokid,token}', prosrc => 'ts_parse_byname' }, + +{ oid => '3717', descr => '(internal)', + proname => 'prsd_start', prorettype => 'internal', + proargtypes => 'internal int4', prosrc => 'prsd_start' }, +{ oid => '3718', descr => '(internal)', + proname => 'prsd_nexttoken', prorettype => 'internal', + proargtypes => 'internal internal internal', prosrc => 'prsd_nexttoken' }, +{ oid => '3719', descr => '(internal)', + proname => 'prsd_end', prorettype => 'void', proargtypes => 'internal', + prosrc => 'prsd_end' }, +{ oid => '3720', descr => '(internal)', + proname => 'prsd_headline', prorettype => 'internal', + proargtypes => 'internal internal tsquery', prosrc => 'prsd_headline' }, +{ oid => '3721', descr => '(internal)', + proname => 'prsd_lextype', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'prsd_lextype' }, + +{ oid => '3723', descr => 'normalize one word by dictionary', + proname => 'ts_lexize', prorettype => '_text', + proargtypes => 'regdictionary text', prosrc => 'ts_lexize' }, + +{ oid => '3725', descr => '(internal)', + proname => 'dsimple_init', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'dsimple_init' }, +{ oid => '3726', descr => '(internal)', + proname => 'dsimple_lexize', prorettype => 'internal', + proargtypes => 'internal internal internal internal', + prosrc => 'dsimple_lexize' }, + +{ oid => '3728', descr => '(internal)', + proname => 'dsynonym_init', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'dsynonym_init' }, +{ oid => '3729', descr => '(internal)', + proname => 'dsynonym_lexize', prorettype => 'internal', + proargtypes => 'internal internal internal internal', + prosrc => 'dsynonym_lexize' }, + +{ oid => '3731', descr => '(internal)', + proname => 'dispell_init', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'dispell_init' }, +{ oid => '3732', descr => '(internal)', + proname => 'dispell_lexize', prorettype => 'internal', + proargtypes => 'internal internal internal internal', + prosrc => 'dispell_lexize' }, + +{ oid => '3740', descr => '(internal)', + proname => 'thesaurus_init', prorettype => 'internal', + proargtypes => 'internal', prosrc => 'thesaurus_init' }, +{ oid => '3741', descr => '(internal)', + proname => 'thesaurus_lexize', prorettype => 'internal', + proargtypes => 'internal internal internal internal', + prosrc => 'thesaurus_lexize' }, + +{ oid => '3743', descr => 'generate headline', + proname => 'ts_headline', procost => '100', prorettype => 'text', + proargtypes => 'regconfig text tsquery text', + prosrc => 'ts_headline_byid_opt' }, +{ oid => '3744', descr => 'generate headline', + proname => 'ts_headline', procost => '100', prorettype => 'text', + proargtypes => 'regconfig text tsquery', prosrc => 'ts_headline_byid' }, +{ oid => '3754', descr => 'generate headline', + proname => 'ts_headline', procost => '100', provolatile => 's', + prorettype => 'text', proargtypes => 'text tsquery text', + prosrc => 'ts_headline_opt' }, +{ oid => '3755', descr => 'generate headline', + proname => 'ts_headline', procost => '100', provolatile => 's', + prorettype => 'text', proargtypes => 'text tsquery', + prosrc => 'ts_headline' }, + +{ oid => '4201', descr => 'generate headline from jsonb', + proname => 'ts_headline', procost => '100', prorettype => 'jsonb', + proargtypes => 'regconfig jsonb tsquery text', + prosrc => 'ts_headline_jsonb_byid_opt' }, +{ oid => '4202', descr => 'generate headline from jsonb', + proname => 'ts_headline', procost => '100', prorettype => 'jsonb', + proargtypes => 'regconfig jsonb tsquery', + prosrc => 'ts_headline_jsonb_byid' }, +{ oid => '4203', descr => 'generate headline from jsonb', + proname => 'ts_headline', procost => '100', provolatile => 's', + prorettype => 'jsonb', proargtypes => 'jsonb tsquery text', + prosrc => 'ts_headline_jsonb_opt' }, +{ oid => '4204', descr => 'generate headline from jsonb', + proname => 'ts_headline', procost => '100', provolatile => 's', + prorettype => 'jsonb', proargtypes => 'jsonb tsquery', + prosrc => 'ts_headline_jsonb' }, + +{ oid => '4205', descr => 'generate headline from json', + proname => 'ts_headline', procost => '100', prorettype => 'json', + proargtypes => 'regconfig json tsquery text', + prosrc => 'ts_headline_json_byid_opt' }, +{ oid => '4206', descr => 'generate headline from json', + proname => 'ts_headline', procost => '100', prorettype => 'json', + proargtypes => 'regconfig json tsquery', prosrc => 'ts_headline_json_byid' }, +{ oid => '4207', descr => 'generate headline from json', + proname => 'ts_headline', procost => '100', provolatile => 's', + prorettype => 'json', proargtypes => 'json tsquery text', + prosrc => 'ts_headline_json_opt' }, +{ oid => '4208', descr => 'generate headline from json', + proname => 'ts_headline', procost => '100', provolatile => 's', + prorettype => 'json', proargtypes => 'json tsquery', + prosrc => 'ts_headline_json' }, + +{ oid => '3745', descr => 'transform to tsvector', + proname => 'to_tsvector', procost => '100', prorettype => 'tsvector', + proargtypes => 'regconfig text', prosrc => 'to_tsvector_byid' }, +{ oid => '3746', descr => 'make tsquery', + proname => 'to_tsquery', procost => '100', prorettype => 'tsquery', + proargtypes => 'regconfig text', prosrc => 'to_tsquery_byid' }, +{ oid => '3747', descr => 'transform to tsquery', + proname => 'plainto_tsquery', procost => '100', prorettype => 'tsquery', + proargtypes => 'regconfig text', prosrc => 'plainto_tsquery_byid' }, +{ oid => '5006', descr => 'transform to tsquery', + proname => 'phraseto_tsquery', procost => '100', prorettype => 'tsquery', + proargtypes => 'regconfig text', prosrc => 'phraseto_tsquery_byid' }, +{ oid => '5007', descr => 'transform to tsquery', + proname => 'websearch_to_tsquery', procost => '100', prorettype => 'tsquery', + proargtypes => 'regconfig text', prosrc => 'websearch_to_tsquery_byid' }, +{ oid => '3749', descr => 'transform to tsvector', + proname => 'to_tsvector', procost => '100', provolatile => 's', + prorettype => 'tsvector', proargtypes => 'text', prosrc => 'to_tsvector' }, +{ oid => '3750', descr => 'make tsquery', + proname => 'to_tsquery', procost => '100', provolatile => 's', + prorettype => 'tsquery', proargtypes => 'text', prosrc => 'to_tsquery' }, +{ oid => '3751', descr => 'transform to tsquery', + proname => 'plainto_tsquery', procost => '100', provolatile => 's', + prorettype => 'tsquery', proargtypes => 'text', prosrc => 'plainto_tsquery' }, +{ oid => '5001', descr => 'transform to tsquery', + proname => 'phraseto_tsquery', procost => '100', provolatile => 's', + prorettype => 'tsquery', proargtypes => 'text', + prosrc => 'phraseto_tsquery' }, +{ oid => '5009', descr => 'transform to tsquery', + proname => 'websearch_to_tsquery', procost => '100', provolatile => 's', + prorettype => 'tsquery', proargtypes => 'text', + prosrc => 'websearch_to_tsquery' }, +{ oid => '4209', descr => 'transform string values from jsonb to tsvector', + proname => 'to_tsvector', procost => '100', provolatile => 's', + prorettype => 'tsvector', proargtypes => 'jsonb', + prosrc => 'jsonb_string_to_tsvector' }, +{ oid => '4213', descr => 'transform specified values from jsonb to tsvector', + proname => 'jsonb_to_tsvector', procost => '100', provolatile => 's', + prorettype => 'tsvector', proargtypes => 'jsonb jsonb', + prosrc => 'jsonb_to_tsvector' }, +{ oid => '4210', descr => 'transform string values from json to tsvector', + proname => 'to_tsvector', procost => '100', provolatile => 's', + prorettype => 'tsvector', proargtypes => 'json', + prosrc => 'json_string_to_tsvector' }, +{ oid => '4215', descr => 'transform specified values from json to tsvector', + proname => 'json_to_tsvector', procost => '100', provolatile => 's', + prorettype => 'tsvector', proargtypes => 'json jsonb', + prosrc => 'json_to_tsvector' }, +{ oid => '4211', descr => 'transform string values from jsonb to tsvector', + proname => 'to_tsvector', procost => '100', prorettype => 'tsvector', + proargtypes => 'regconfig jsonb', prosrc => 'jsonb_string_to_tsvector_byid' }, +{ oid => '4214', descr => 'transform specified values from jsonb to tsvector', + proname => 'jsonb_to_tsvector', procost => '100', prorettype => 'tsvector', + proargtypes => 'regconfig jsonb jsonb', prosrc => 'jsonb_to_tsvector_byid' }, +{ oid => '4212', descr => 'transform string values from json to tsvector', + proname => 'to_tsvector', procost => '100', prorettype => 'tsvector', + proargtypes => 'regconfig json', prosrc => 'json_string_to_tsvector_byid' }, +{ oid => '4216', descr => 'transform specified values from json to tsvector', + proname => 'json_to_tsvector', procost => '100', prorettype => 'tsvector', + proargtypes => 'regconfig json jsonb', prosrc => 'json_to_tsvector_byid' }, + +{ oid => '3752', descr => 'trigger for automatic update of tsvector column', + proname => 'tsvector_update_trigger', proisstrict => 'f', provolatile => 'v', + prorettype => 'trigger', proargtypes => '', + prosrc => 'tsvector_update_trigger_byid' }, +{ oid => '3753', descr => 'trigger for automatic update of tsvector column', + proname => 'tsvector_update_trigger_column', proisstrict => 'f', + provolatile => 'v', prorettype => 'trigger', proargtypes => '', + prosrc => 'tsvector_update_trigger_bycolumn' }, + +{ oid => '3759', descr => 'get current tsearch configuration', + proname => 'get_current_ts_config', provolatile => 's', + prorettype => 'regconfig', proargtypes => '', + prosrc => 'get_current_ts_config' }, + +{ oid => '3736', descr => 'I/O', + proname => 'regconfigin', provolatile => 's', prorettype => 'regconfig', + proargtypes => 'cstring', prosrc => 'regconfigin' }, +{ oid => '3737', descr => 'I/O', + proname => 'regconfigout', provolatile => 's', prorettype => 'cstring', + proargtypes => 'regconfig', prosrc => 'regconfigout' }, +{ oid => '3738', descr => 'I/O', + proname => 'regconfigrecv', prorettype => 'regconfig', + proargtypes => 'internal', prosrc => 'regconfigrecv' }, +{ oid => '3739', descr => 'I/O', + proname => 'regconfigsend', prorettype => 'bytea', proargtypes => 'regconfig', + prosrc => 'regconfigsend' }, + +{ oid => '3771', descr => 'I/O', + proname => 'regdictionaryin', provolatile => 's', + prorettype => 'regdictionary', proargtypes => 'cstring', + prosrc => 'regdictionaryin' }, +{ oid => '3772', descr => 'I/O', + proname => 'regdictionaryout', provolatile => 's', prorettype => 'cstring', + proargtypes => 'regdictionary', prosrc => 'regdictionaryout' }, +{ oid => '3773', descr => 'I/O', + proname => 'regdictionaryrecv', prorettype => 'regdictionary', + proargtypes => 'internal', prosrc => 'regdictionaryrecv' }, +{ oid => '3774', descr => 'I/O', + proname => 'regdictionarysend', prorettype => 'bytea', + proargtypes => 'regdictionary', prosrc => 'regdictionarysend' }, + +# jsonb +{ oid => '3806', descr => 'I/O', + proname => 'jsonb_in', prorettype => 'jsonb', proargtypes => 'cstring', + prosrc => 'jsonb_in' }, +{ oid => '3805', descr => 'I/O', + proname => 'jsonb_recv', prorettype => 'jsonb', proargtypes => 'internal', + prosrc => 'jsonb_recv' }, +{ oid => '3804', descr => 'I/O', + proname => 'jsonb_out', prorettype => 'cstring', proargtypes => 'jsonb', + prosrc => 'jsonb_out' }, +{ oid => '3803', descr => 'I/O', + proname => 'jsonb_send', prorettype => 'bytea', proargtypes => 'jsonb', + prosrc => 'jsonb_send' }, + +{ oid => '3263', descr => 'map text array of key value pairs to jsonb object', + proname => 'jsonb_object', prorettype => 'jsonb', proargtypes => '_text', + prosrc => 'jsonb_object' }, +{ oid => '3264', descr => 'map text array of key value pairs to jsonb object', + proname => 'jsonb_object', prorettype => 'jsonb', + proargtypes => '_text _text', prosrc => 'jsonb_object_two_arg' }, +{ oid => '3787', descr => 'map input to jsonb', + proname => 'to_jsonb', provolatile => 's', prorettype => 'jsonb', + proargtypes => 'anyelement', prosrc => 'to_jsonb' }, +{ oid => '3265', descr => 'jsonb aggregate transition function', + proname => 'jsonb_agg_transfn', proisstrict => 'f', provolatile => 's', + prorettype => 'internal', proargtypes => 'internal anyelement', + prosrc => 'jsonb_agg_transfn' }, +{ oid => '3266', descr => 'jsonb aggregate final function', + proname => 'jsonb_agg_finalfn', proisstrict => 'f', provolatile => 's', + prorettype => 'jsonb', proargtypes => 'internal', + prosrc => 'jsonb_agg_finalfn' }, +{ oid => '3267', descr => 'aggregate input into jsonb', + proname => 'jsonb_agg', prokind => 'a', proisstrict => 'f', + provolatile => 's', prorettype => 'jsonb', proargtypes => 'anyelement', + prosrc => 'aggregate_dummy' }, +{ oid => '3268', descr => 'jsonb object aggregate transition function', + proname => 'jsonb_object_agg_transfn', proisstrict => 'f', provolatile => 's', + prorettype => 'internal', proargtypes => 'internal any any', + prosrc => 'jsonb_object_agg_transfn' }, +{ oid => '3269', descr => 'jsonb object aggregate final function', + proname => 'jsonb_object_agg_finalfn', proisstrict => 'f', provolatile => 's', + prorettype => 'jsonb', proargtypes => 'internal', + prosrc => 'jsonb_object_agg_finalfn' }, +{ oid => '3270', descr => 'aggregate inputs into jsonb object', + proname => 'jsonb_object_agg', prokind => 'a', proisstrict => 'f', + prorettype => 'jsonb', proargtypes => 'any any', + prosrc => 'aggregate_dummy' }, +{ oid => '3271', descr => 'build a jsonb array from any inputs', + proname => 'jsonb_build_array', provariadic => 'any', proisstrict => 'f', + provolatile => 's', prorettype => 'jsonb', proargtypes => 'any', + proallargtypes => '{any}', proargmodes => '{v}', + prosrc => 'jsonb_build_array' }, +{ oid => '3272', descr => 'build an empty jsonb array', + proname => 'jsonb_build_array', proisstrict => 'f', provolatile => 's', + prorettype => 'jsonb', proargtypes => '', + prosrc => 'jsonb_build_array_noargs' }, +{ oid => '3273', + descr => 'build a jsonb object from pairwise key/value inputs', + proname => 'jsonb_build_object', provariadic => 'any', proisstrict => 'f', + provolatile => 's', prorettype => 'jsonb', proargtypes => 'any', + proallargtypes => '{any}', proargmodes => '{v}', + prosrc => 'jsonb_build_object' }, +{ oid => '3274', descr => 'build an empty jsonb object', + proname => 'jsonb_build_object', proisstrict => 'f', provolatile => 's', + prorettype => 'jsonb', proargtypes => '', + prosrc => 'jsonb_build_object_noargs' }, +{ oid => '3262', descr => 'remove object fields with null values from jsonb', + proname => 'jsonb_strip_nulls', prorettype => 'jsonb', proargtypes => 'jsonb', + prosrc => 'jsonb_strip_nulls' }, + +{ oid => '3478', + proname => 'jsonb_object_field', prorettype => 'jsonb', + proargtypes => 'jsonb text', proargnames => '{from_json, field_name}', + prosrc => 'jsonb_object_field' }, +{ oid => '3214', + proname => 'jsonb_object_field_text', prorettype => 'text', + proargtypes => 'jsonb text', proargnames => '{from_json, field_name}', + prosrc => 'jsonb_object_field_text' }, +{ oid => '3215', + proname => 'jsonb_array_element', prorettype => 'jsonb', + proargtypes => 'jsonb int4', proargnames => '{from_json, element_index}', + prosrc => 'jsonb_array_element' }, +{ oid => '3216', + proname => 'jsonb_array_element_text', prorettype => 'text', + proargtypes => 'jsonb int4', proargnames => '{from_json, element_index}', + prosrc => 'jsonb_array_element_text' }, +{ oid => '3217', descr => 'get value from jsonb with path elements', + proname => 'jsonb_extract_path', provariadic => 'text', prorettype => 'jsonb', + proargtypes => 'jsonb _text', proallargtypes => '{jsonb,_text}', + proargmodes => '{i,v}', proargnames => '{from_json,path_elems}', + prosrc => 'jsonb_extract_path' }, +{ oid => '3940', descr => 'get value from jsonb as text with path elements', + proname => 'jsonb_extract_path_text', provariadic => 'text', + prorettype => 'text', proargtypes => 'jsonb _text', + proallargtypes => '{jsonb,_text}', proargmodes => '{i,v}', + proargnames => '{from_json,path_elems}', + prosrc => 'jsonb_extract_path_text' }, +{ oid => '3219', descr => 'elements of a jsonb array', + proname => 'jsonb_array_elements', prorows => '100', proretset => 't', + prorettype => 'jsonb', proargtypes => 'jsonb', + proallargtypes => '{jsonb,jsonb}', proargmodes => '{i,o}', + proargnames => '{from_json,value}', prosrc => 'jsonb_array_elements' }, +{ oid => '3465', descr => 'elements of jsonb array', + proname => 'jsonb_array_elements_text', prorows => '100', proretset => 't', + prorettype => 'text', proargtypes => 'jsonb', + proallargtypes => '{jsonb,text}', proargmodes => '{i,o}', + proargnames => '{from_json,value}', prosrc => 'jsonb_array_elements_text' }, +{ oid => '3207', descr => 'length of jsonb array', + proname => 'jsonb_array_length', prorettype => 'int4', proargtypes => 'jsonb', + prosrc => 'jsonb_array_length' }, +{ oid => '3931', descr => 'get jsonb object keys', + proname => 'jsonb_object_keys', prorows => '100', proretset => 't', + prorettype => 'text', proargtypes => 'jsonb', prosrc => 'jsonb_object_keys' }, +{ oid => '3208', descr => 'key value pairs of a jsonb object', + proname => 'jsonb_each', prorows => '100', proretset => 't', + prorettype => 'record', proargtypes => 'jsonb', + proallargtypes => '{jsonb,text,jsonb}', proargmodes => '{i,o,o}', + proargnames => '{from_json,key,value}', prosrc => 'jsonb_each' }, +{ oid => '3932', descr => 'key value pairs of a jsonb object', + proname => 'jsonb_each_text', prorows => '100', proretset => 't', + prorettype => 'record', proargtypes => 'jsonb', + proallargtypes => '{jsonb,text,text}', proargmodes => '{i,o,o}', + proargnames => '{from_json,key,value}', prosrc => 'jsonb_each_text' }, +{ oid => '3209', descr => 'get record fields from a jsonb object', + proname => 'jsonb_populate_record', proisstrict => 'f', provolatile => 's', + prorettype => 'anyelement', proargtypes => 'anyelement jsonb', + prosrc => 'jsonb_populate_record' }, +{ oid => '3475', + descr => 'get set of records with fields from a jsonb array of objects', + proname => 'jsonb_populate_recordset', prorows => '100', proisstrict => 'f', + proretset => 't', provolatile => 's', prorettype => 'anyelement', + proargtypes => 'anyelement jsonb', prosrc => 'jsonb_populate_recordset' }, +{ oid => '3490', descr => 'get record fields from a jsonb object', + proname => 'jsonb_to_record', provolatile => 's', prorettype => 'record', + proargtypes => 'jsonb', prosrc => 'jsonb_to_record' }, +{ oid => '3491', + descr => 'get set of records with fields from a jsonb array of objects', + proname => 'jsonb_to_recordset', prorows => '100', proisstrict => 'f', + proretset => 't', provolatile => 's', prorettype => 'record', + proargtypes => 'jsonb', prosrc => 'jsonb_to_recordset' }, +{ oid => '3210', descr => 'get the type of a jsonb value', + proname => 'jsonb_typeof', prorettype => 'text', proargtypes => 'jsonb', + prosrc => 'jsonb_typeof' }, +{ oid => '4038', + proname => 'jsonb_ne', prorettype => 'bool', proargtypes => 'jsonb jsonb', + prosrc => 'jsonb_ne' }, +{ oid => '4039', + proname => 'jsonb_lt', prorettype => 'bool', proargtypes => 'jsonb jsonb', + prosrc => 'jsonb_lt' }, +{ oid => '4040', + proname => 'jsonb_gt', prorettype => 'bool', proargtypes => 'jsonb jsonb', + prosrc => 'jsonb_gt' }, +{ oid => '4041', + proname => 'jsonb_le', prorettype => 'bool', proargtypes => 'jsonb jsonb', + prosrc => 'jsonb_le' }, +{ oid => '4042', + proname => 'jsonb_ge', prorettype => 'bool', proargtypes => 'jsonb jsonb', + prosrc => 'jsonb_ge' }, +{ oid => '4043', + proname => 'jsonb_eq', prorettype => 'bool', proargtypes => 'jsonb jsonb', + prosrc => 'jsonb_eq' }, +{ oid => '4044', descr => 'less-equal-greater', + proname => 'jsonb_cmp', prorettype => 'int4', proargtypes => 'jsonb jsonb', + prosrc => 'jsonb_cmp' }, +{ oid => '4045', descr => 'hash', + proname => 'jsonb_hash', prorettype => 'int4', proargtypes => 'jsonb', + prosrc => 'jsonb_hash' }, +{ oid => '3416', descr => 'hash', + proname => 'jsonb_hash_extended', prorettype => 'int8', + proargtypes => 'jsonb int8', prosrc => 'jsonb_hash_extended' }, +{ oid => '4046', + proname => 'jsonb_contains', prorettype => 'bool', + proargtypes => 'jsonb jsonb', prosrc => 'jsonb_contains' }, +{ oid => '4047', + proname => 'jsonb_exists', prorettype => 'bool', proargtypes => 'jsonb text', + prosrc => 'jsonb_exists' }, +{ oid => '4048', + proname => 'jsonb_exists_any', prorettype => 'bool', + proargtypes => 'jsonb _text', prosrc => 'jsonb_exists_any' }, +{ oid => '4049', + proname => 'jsonb_exists_all', prorettype => 'bool', + proargtypes => 'jsonb _text', prosrc => 'jsonb_exists_all' }, +{ oid => '4050', + proname => 'jsonb_contained', prorettype => 'bool', + proargtypes => 'jsonb jsonb', prosrc => 'jsonb_contained' }, +{ oid => '3480', descr => 'GIN support', + proname => 'gin_compare_jsonb', prorettype => 'int4', + proargtypes => 'text text', prosrc => 'gin_compare_jsonb' }, +{ oid => '3482', descr => 'GIN support', + proname => 'gin_extract_jsonb', prorettype => 'internal', + proargtypes => 'jsonb internal internal', prosrc => 'gin_extract_jsonb' }, +{ oid => '3483', descr => 'GIN support', + proname => 'gin_extract_jsonb_query', prorettype => 'internal', + proargtypes => 'jsonb internal int2 internal internal internal internal', + prosrc => 'gin_extract_jsonb_query' }, +{ oid => '3484', descr => 'GIN support', + proname => 'gin_consistent_jsonb', prorettype => 'bool', + proargtypes => 'internal int2 jsonb int4 internal internal internal internal', + prosrc => 'gin_consistent_jsonb' }, +{ oid => '3488', descr => 'GIN support', + proname => 'gin_triconsistent_jsonb', prorettype => 'char', + proargtypes => 'internal int2 jsonb int4 internal internal internal', + prosrc => 'gin_triconsistent_jsonb' }, +{ oid => '3485', descr => 'GIN support', + proname => 'gin_extract_jsonb_path', prorettype => 'internal', + proargtypes => 'jsonb internal internal', + prosrc => 'gin_extract_jsonb_path' }, +{ oid => '3486', descr => 'GIN support', + proname => 'gin_extract_jsonb_query_path', prorettype => 'internal', + proargtypes => 'jsonb internal int2 internal internal internal internal', + prosrc => 'gin_extract_jsonb_query_path' }, +{ oid => '3487', descr => 'GIN support', + proname => 'gin_consistent_jsonb_path', prorettype => 'bool', + proargtypes => 'internal int2 jsonb int4 internal internal internal internal', + prosrc => 'gin_consistent_jsonb_path' }, +{ oid => '3489', descr => 'GIN support', + proname => 'gin_triconsistent_jsonb_path', prorettype => 'char', + proargtypes => 'internal int2 jsonb int4 internal internal internal', + prosrc => 'gin_triconsistent_jsonb_path' }, +{ oid => '3301', + proname => 'jsonb_concat', prorettype => 'jsonb', + proargtypes => 'jsonb jsonb', prosrc => 'jsonb_concat' }, +{ oid => '3302', + proname => 'jsonb_delete', prorettype => 'jsonb', proargtypes => 'jsonb text', + prosrc => 'jsonb_delete' }, +{ oid => '3303', + proname => 'jsonb_delete', prorettype => 'jsonb', proargtypes => 'jsonb int4', + prosrc => 'jsonb_delete_idx' }, +{ oid => '3343', + proname => 'jsonb_delete', provariadic => 'text', prorettype => 'jsonb', + proargtypes => 'jsonb _text', proallargtypes => '{jsonb,_text}', + proargmodes => '{i,v}', proargnames => '{from_json,path_elems}', + prosrc => 'jsonb_delete_array' }, +{ oid => '3304', + proname => 'jsonb_delete_path', prorettype => 'jsonb', + proargtypes => 'jsonb _text', prosrc => 'jsonb_delete_path' }, +{ oid => '3305', descr => 'Set part of a jsonb', + proname => 'jsonb_set', prorettype => 'jsonb', + proargtypes => 'jsonb _text jsonb bool', prosrc => 'jsonb_set' }, +{ oid => '3306', descr => 'Indented text from jsonb', + proname => 'jsonb_pretty', prorettype => 'text', proargtypes => 'jsonb', + prosrc => 'jsonb_pretty' }, +{ oid => '3579', descr => 'Insert value into a jsonb', + proname => 'jsonb_insert', prorettype => 'jsonb', + proargtypes => 'jsonb _text jsonb bool', prosrc => 'jsonb_insert' }, + +# txid +{ oid => '2939', descr => 'I/O', + proname => 'txid_snapshot_in', prorettype => 'txid_snapshot', + proargtypes => 'cstring', prosrc => 'txid_snapshot_in' }, +{ oid => '2940', descr => 'I/O', + proname => 'txid_snapshot_out', prorettype => 'cstring', + proargtypes => 'txid_snapshot', prosrc => 'txid_snapshot_out' }, +{ oid => '2941', descr => 'I/O', + proname => 'txid_snapshot_recv', prorettype => 'txid_snapshot', + proargtypes => 'internal', prosrc => 'txid_snapshot_recv' }, +{ oid => '2942', descr => 'I/O', + proname => 'txid_snapshot_send', prorettype => 'bytea', + proargtypes => 'txid_snapshot', prosrc => 'txid_snapshot_send' }, +{ oid => '2943', descr => 'get current transaction ID', + proname => 'txid_current', provolatile => 's', proparallel => 'u', + prorettype => 'int8', proargtypes => '', prosrc => 'txid_current' }, +{ oid => '3348', descr => 'get current transaction ID', + proname => 'txid_current_if_assigned', provolatile => 's', proparallel => 'u', + prorettype => 'int8', proargtypes => '', + prosrc => 'txid_current_if_assigned' }, +{ oid => '2944', descr => 'get current snapshot', + proname => 'txid_current_snapshot', provolatile => 's', + prorettype => 'txid_snapshot', proargtypes => '', + prosrc => 'txid_current_snapshot' }, +{ oid => '2945', descr => 'get xmin of snapshot', + proname => 'txid_snapshot_xmin', prorettype => 'int8', + proargtypes => 'txid_snapshot', prosrc => 'txid_snapshot_xmin' }, +{ oid => '2946', descr => 'get xmax of snapshot', + proname => 'txid_snapshot_xmax', prorettype => 'int8', + proargtypes => 'txid_snapshot', prosrc => 'txid_snapshot_xmax' }, +{ oid => '2947', descr => 'get set of in-progress txids in snapshot', + proname => 'txid_snapshot_xip', prorows => '50', proretset => 't', + prorettype => 'int8', proargtypes => 'txid_snapshot', + prosrc => 'txid_snapshot_xip' }, +{ oid => '2948', descr => 'is txid visible in snapshot?', + proname => 'txid_visible_in_snapshot', prorettype => 'bool', + proargtypes => 'int8 txid_snapshot', prosrc => 'txid_visible_in_snapshot' }, +{ oid => '3360', descr => 'commit status of transaction', + proname => 'txid_status', provolatile => 'v', prorettype => 'text', + proargtypes => 'int8', prosrc => 'txid_status' }, + +# record comparison using normal comparison rules +{ oid => '2981', + proname => 'record_eq', prorettype => 'bool', proargtypes => 'record record', + prosrc => 'record_eq' }, +{ oid => '2982', + proname => 'record_ne', prorettype => 'bool', proargtypes => 'record record', + prosrc => 'record_ne' }, +{ oid => '2983', + proname => 'record_lt', prorettype => 'bool', proargtypes => 'record record', + prosrc => 'record_lt' }, +{ oid => '2984', + proname => 'record_gt', prorettype => 'bool', proargtypes => 'record record', + prosrc => 'record_gt' }, +{ oid => '2985', + proname => 'record_le', prorettype => 'bool', proargtypes => 'record record', + prosrc => 'record_le' }, +{ oid => '2986', + proname => 'record_ge', prorettype => 'bool', proargtypes => 'record record', + prosrc => 'record_ge' }, +{ oid => '2987', descr => 'less-equal-greater', + proname => 'btrecordcmp', prorettype => 'int4', + proargtypes => 'record record', prosrc => 'btrecordcmp' }, + +# record comparison using raw byte images +{ oid => '3181', + proname => 'record_image_eq', prorettype => 'bool', + proargtypes => 'record record', prosrc => 'record_image_eq' }, +{ oid => '3182', + proname => 'record_image_ne', prorettype => 'bool', + proargtypes => 'record record', prosrc => 'record_image_ne' }, +{ oid => '3183', + proname => 'record_image_lt', prorettype => 'bool', + proargtypes => 'record record', prosrc => 'record_image_lt' }, +{ oid => '3184', + proname => 'record_image_gt', prorettype => 'bool', + proargtypes => 'record record', prosrc => 'record_image_gt' }, +{ oid => '3185', + proname => 'record_image_le', prorettype => 'bool', + proargtypes => 'record record', prosrc => 'record_image_le' }, +{ oid => '3186', + proname => 'record_image_ge', prorettype => 'bool', + proargtypes => 'record record', prosrc => 'record_image_ge' }, +{ oid => '3187', descr => 'less-equal-greater based on byte images', + proname => 'btrecordimagecmp', prorettype => 'int4', + proargtypes => 'record record', prosrc => 'btrecordimagecmp' }, + +# Extensions +{ oid => '3082', descr => 'list available extensions', + proname => 'pg_available_extensions', procost => '10', prorows => '100', + proretset => 't', provolatile => 's', prorettype => 'record', + proargtypes => '', proallargtypes => '{name,text,text}', + proargmodes => '{o,o,o}', proargnames => '{name,default_version,comment}', + prosrc => 'pg_available_extensions' }, +{ oid => '3083', descr => 'list available extension versions', + proname => 'pg_available_extension_versions', procost => '10', + prorows => '100', proretset => 't', provolatile => 's', + prorettype => 'record', proargtypes => '', + proallargtypes => '{name,text,bool,bool,name,_name,text}', + proargmodes => '{o,o,o,o,o,o,o}', + proargnames => '{name,version,superuser,relocatable,schema,requires,comment}', + prosrc => 'pg_available_extension_versions' }, +{ oid => '3084', descr => 'list an extension\'s version update paths', + proname => 'pg_extension_update_paths', procost => '10', prorows => '100', + proretset => 't', provolatile => 's', prorettype => 'record', + proargtypes => 'name', proallargtypes => '{name,text,text,text}', + proargmodes => '{i,o,o,o}', proargnames => '{name,source,target,path}', + prosrc => 'pg_extension_update_paths' }, +{ oid => '3086', + descr => 'flag an extension\'s table contents to be emitted by pg_dump', + proname => 'pg_extension_config_dump', provolatile => 'v', proparallel => 'u', + prorettype => 'void', proargtypes => 'regclass text', + prosrc => 'pg_extension_config_dump' }, + +# SQL-spec window functions +{ oid => '3100', descr => 'row number within partition', + proname => 'row_number', prokind => 'w', proisstrict => 'f', + prorettype => 'int8', proargtypes => '', prosrc => 'window_row_number' }, +{ oid => '3101', descr => 'integer rank with gaps', + proname => 'rank', prokind => 'w', proisstrict => 'f', prorettype => 'int8', + proargtypes => '', prosrc => 'window_rank' }, +{ oid => '3102', descr => 'integer rank without gaps', + proname => 'dense_rank', prokind => 'w', proisstrict => 'f', + prorettype => 'int8', proargtypes => '', prosrc => 'window_dense_rank' }, +{ oid => '3103', descr => 'fractional rank within partition', + proname => 'percent_rank', prokind => 'w', proisstrict => 'f', + prorettype => 'float8', proargtypes => '', prosrc => 'window_percent_rank' }, +{ oid => '3104', descr => 'fractional row number within partition', + proname => 'cume_dist', prokind => 'w', proisstrict => 'f', + prorettype => 'float8', proargtypes => '', prosrc => 'window_cume_dist' }, +{ oid => '3105', descr => 'split rows into N groups', + proname => 'ntile', prokind => 'w', prorettype => 'int4', + proargtypes => 'int4', prosrc => 'window_ntile' }, +{ oid => '3106', descr => 'fetch the preceding row value', + proname => 'lag', prokind => 'w', prorettype => 'anyelement', + proargtypes => 'anyelement', prosrc => 'window_lag' }, +{ oid => '3107', descr => 'fetch the Nth preceding row value', + proname => 'lag', prokind => 'w', prorettype => 'anyelement', + proargtypes => 'anyelement int4', prosrc => 'window_lag_with_offset' }, +{ oid => '3108', descr => 'fetch the Nth preceding row value with default', + proname => 'lag', prokind => 'w', prorettype => 'anyelement', + proargtypes => 'anyelement int4 anyelement', + prosrc => 'window_lag_with_offset_and_default' }, +{ oid => '3109', descr => 'fetch the following row value', + proname => 'lead', prokind => 'w', prorettype => 'anyelement', + proargtypes => 'anyelement', prosrc => 'window_lead' }, +{ oid => '3110', descr => 'fetch the Nth following row value', + proname => 'lead', prokind => 'w', prorettype => 'anyelement', + proargtypes => 'anyelement int4', prosrc => 'window_lead_with_offset' }, +{ oid => '3111', descr => 'fetch the Nth following row value with default', + proname => 'lead', prokind => 'w', prorettype => 'anyelement', + proargtypes => 'anyelement int4 anyelement', + prosrc => 'window_lead_with_offset_and_default' }, +{ oid => '3112', descr => 'fetch the first row value', + proname => 'first_value', prokind => 'w', prorettype => 'anyelement', + proargtypes => 'anyelement', prosrc => 'window_first_value' }, +{ oid => '3113', descr => 'fetch the last row value', + proname => 'last_value', prokind => 'w', prorettype => 'anyelement', + proargtypes => 'anyelement', prosrc => 'window_last_value' }, +{ oid => '3114', descr => 'fetch the Nth row value', + proname => 'nth_value', prokind => 'w', prorettype => 'anyelement', + proargtypes => 'anyelement int4', prosrc => 'window_nth_value' }, + +# functions for range types +{ oid => '3832', descr => 'I/O', + proname => 'anyrange_in', provolatile => 's', prorettype => 'anyrange', + proargtypes => 'cstring oid int4', prosrc => 'anyrange_in' }, +{ oid => '3833', descr => 'I/O', + proname => 'anyrange_out', provolatile => 's', prorettype => 'cstring', + proargtypes => 'anyrange', prosrc => 'anyrange_out' }, +{ oid => '3834', descr => 'I/O', + proname => 'range_in', provolatile => 's', prorettype => 'anyrange', + proargtypes => 'cstring oid int4', prosrc => 'range_in' }, +{ oid => '3835', descr => 'I/O', + proname => 'range_out', provolatile => 's', prorettype => 'cstring', + proargtypes => 'anyrange', prosrc => 'range_out' }, +{ oid => '3836', descr => 'I/O', + proname => 'range_recv', provolatile => 's', prorettype => 'anyrange', + proargtypes => 'internal oid int4', prosrc => 'range_recv' }, +{ oid => '3837', descr => 'I/O', + proname => 'range_send', provolatile => 's', prorettype => 'bytea', + proargtypes => 'anyrange', prosrc => 'range_send' }, +{ oid => '3848', descr => 'lower bound of range', + proname => 'lower', prorettype => 'anyelement', proargtypes => 'anyrange', + prosrc => 'range_lower' }, +{ oid => '3849', descr => 'upper bound of range', + proname => 'upper', prorettype => 'anyelement', proargtypes => 'anyrange', + prosrc => 'range_upper' }, +{ oid => '3850', descr => 'is the range empty?', + proname => 'isempty', prorettype => 'bool', proargtypes => 'anyrange', + prosrc => 'range_empty' }, +{ oid => '3851', descr => 'is the range\'s lower bound inclusive?', + proname => 'lower_inc', prorettype => 'bool', proargtypes => 'anyrange', + prosrc => 'range_lower_inc' }, +{ oid => '3852', descr => 'is the range\'s upper bound inclusive?', + proname => 'upper_inc', prorettype => 'bool', proargtypes => 'anyrange', + prosrc => 'range_upper_inc' }, +{ oid => '3853', descr => 'is the range\'s lower bound infinite?', + proname => 'lower_inf', prorettype => 'bool', proargtypes => 'anyrange', + prosrc => 'range_lower_inf' }, +{ oid => '3854', descr => 'is the range\'s upper bound infinite?', + proname => 'upper_inf', prorettype => 'bool', proargtypes => 'anyrange', + prosrc => 'range_upper_inf' }, +{ oid => '3855', + proname => 'range_eq', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_eq' }, +{ oid => '3856', + proname => 'range_ne', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_ne' }, +{ oid => '3857', + proname => 'range_overlaps', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_overlaps' }, +{ oid => '3858', + proname => 'range_contains_elem', prorettype => 'bool', + proargtypes => 'anyrange anyelement', prosrc => 'range_contains_elem' }, +{ oid => '3859', + proname => 'range_contains', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_contains' }, +{ oid => '3860', + proname => 'elem_contained_by_range', prorettype => 'bool', + proargtypes => 'anyelement anyrange', prosrc => 'elem_contained_by_range' }, +{ oid => '3861', + proname => 'range_contained_by', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_contained_by' }, +{ oid => '3862', + proname => 'range_adjacent', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_adjacent' }, +{ oid => '3863', + proname => 'range_before', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_before' }, +{ oid => '3864', + proname => 'range_after', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_after' }, +{ oid => '3865', + proname => 'range_overleft', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_overleft' }, +{ oid => '3866', + proname => 'range_overright', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_overright' }, +{ oid => '3867', + proname => 'range_union', prorettype => 'anyrange', + proargtypes => 'anyrange anyrange', prosrc => 'range_union' }, +{ oid => '4057', + descr => 'the smallest range which includes both of the given ranges', + proname => 'range_merge', prorettype => 'anyrange', + proargtypes => 'anyrange anyrange', prosrc => 'range_merge' }, +{ oid => '3868', + proname => 'range_intersect', prorettype => 'anyrange', + proargtypes => 'anyrange anyrange', prosrc => 'range_intersect' }, +{ oid => '3869', + proname => 'range_minus', prorettype => 'anyrange', + proargtypes => 'anyrange anyrange', prosrc => 'range_minus' }, +{ oid => '3870', descr => 'less-equal-greater', + proname => 'range_cmp', prorettype => 'int4', + proargtypes => 'anyrange anyrange', prosrc => 'range_cmp' }, +{ oid => '3871', + proname => 'range_lt', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_lt' }, +{ oid => '3872', + proname => 'range_le', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_le' }, +{ oid => '3873', + proname => 'range_ge', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_ge' }, +{ oid => '3874', + proname => 'range_gt', prorettype => 'bool', + proargtypes => 'anyrange anyrange', prosrc => 'range_gt' }, +{ oid => '3875', descr => 'GiST support', + proname => 'range_gist_consistent', prorettype => 'bool', + proargtypes => 'internal anyrange int2 oid internal', + prosrc => 'range_gist_consistent' }, +{ oid => '3876', descr => 'GiST support', + proname => 'range_gist_union', prorettype => 'anyrange', + proargtypes => 'internal internal', prosrc => 'range_gist_union' }, +{ oid => '3879', descr => 'GiST support', + proname => 'range_gist_penalty', prorettype => 'internal', + proargtypes => 'internal internal internal', prosrc => 'range_gist_penalty' }, +{ oid => '3880', descr => 'GiST support', + proname => 'range_gist_picksplit', prorettype => 'internal', + proargtypes => 'internal internal', prosrc => 'range_gist_picksplit' }, +{ oid => '3881', descr => 'GiST support', + proname => 'range_gist_same', prorettype => 'internal', + proargtypes => 'anyrange anyrange internal', prosrc => 'range_gist_same' }, +{ oid => '3902', descr => 'hash a range', + proname => 'hash_range', prorettype => 'int4', proargtypes => 'anyrange', + prosrc => 'hash_range' }, +{ oid => '3417', descr => 'hash a range', + proname => 'hash_range_extended', prorettype => 'int8', + proargtypes => 'anyrange int8', prosrc => 'hash_range_extended' }, +{ oid => '3916', descr => 'range typanalyze', + proname => 'range_typanalyze', provolatile => 's', prorettype => 'bool', + proargtypes => 'internal', prosrc => 'range_typanalyze' }, +{ oid => '3169', descr => 'restriction selectivity for range operators', + proname => 'rangesel', provolatile => 's', prorettype => 'float8', + proargtypes => 'internal oid internal int4', prosrc => 'rangesel' }, + +{ oid => '3914', descr => 'convert an int4 range to canonical form', + proname => 'int4range_canonical', prorettype => 'int4range', + proargtypes => 'int4range', prosrc => 'int4range_canonical' }, +{ oid => '3928', descr => 'convert an int8 range to canonical form', + proname => 'int8range_canonical', prorettype => 'int8range', + proargtypes => 'int8range', prosrc => 'int8range_canonical' }, +{ oid => '3915', descr => 'convert a date range to canonical form', + proname => 'daterange_canonical', prorettype => 'daterange', + proargtypes => 'daterange', prosrc => 'daterange_canonical' }, +{ oid => '3922', descr => 'float8 difference of two int4 values', + proname => 'int4range_subdiff', prorettype => 'float8', + proargtypes => 'int4 int4', prosrc => 'int4range_subdiff' }, +{ oid => '3923', descr => 'float8 difference of two int8 values', + proname => 'int8range_subdiff', prorettype => 'float8', + proargtypes => 'int8 int8', prosrc => 'int8range_subdiff' }, +{ oid => '3924', descr => 'float8 difference of two numeric values', + proname => 'numrange_subdiff', prorettype => 'float8', + proargtypes => 'numeric numeric', prosrc => 'numrange_subdiff' }, +{ oid => '3925', descr => 'float8 difference of two date values', + proname => 'daterange_subdiff', prorettype => 'float8', + proargtypes => 'date date', prosrc => 'daterange_subdiff' }, +{ oid => '3929', descr => 'float8 difference of two timestamp values', + proname => 'tsrange_subdiff', prorettype => 'float8', + proargtypes => 'timestamp timestamp', prosrc => 'tsrange_subdiff' }, +{ oid => '3930', + descr => 'float8 difference of two timestamp with time zone values', + proname => 'tstzrange_subdiff', prorettype => 'float8', + proargtypes => 'timestamptz timestamptz', prosrc => 'tstzrange_subdiff' }, + +{ oid => '3840', descr => 'int4range constructor', + proname => 'int4range', proisstrict => 'f', prorettype => 'int4range', + proargtypes => 'int4 int4', prosrc => 'range_constructor2' }, +{ oid => '3841', descr => 'int4range constructor', + proname => 'int4range', proisstrict => 'f', prorettype => 'int4range', + proargtypes => 'int4 int4 text', prosrc => 'range_constructor3' }, +{ oid => '3844', descr => 'numrange constructor', + proname => 'numrange', proisstrict => 'f', prorettype => 'numrange', + proargtypes => 'numeric numeric', prosrc => 'range_constructor2' }, +{ oid => '3845', descr => 'numrange constructor', + proname => 'numrange', proisstrict => 'f', prorettype => 'numrange', + proargtypes => 'numeric numeric text', prosrc => 'range_constructor3' }, +{ oid => '3933', descr => 'tsrange constructor', + proname => 'tsrange', proisstrict => 'f', prorettype => 'tsrange', + proargtypes => 'timestamp timestamp', prosrc => 'range_constructor2' }, +{ oid => '3934', descr => 'tsrange constructor', + proname => 'tsrange', proisstrict => 'f', prorettype => 'tsrange', + proargtypes => 'timestamp timestamp text', prosrc => 'range_constructor3' }, +{ oid => '3937', descr => 'tstzrange constructor', + proname => 'tstzrange', proisstrict => 'f', prorettype => 'tstzrange', + proargtypes => 'timestamptz timestamptz', prosrc => 'range_constructor2' }, +{ oid => '3938', descr => 'tstzrange constructor', + proname => 'tstzrange', proisstrict => 'f', prorettype => 'tstzrange', + proargtypes => 'timestamptz timestamptz text', + prosrc => 'range_constructor3' }, +{ oid => '3941', descr => 'daterange constructor', + proname => 'daterange', proisstrict => 'f', prorettype => 'daterange', + proargtypes => 'date date', prosrc => 'range_constructor2' }, +{ oid => '3942', descr => 'daterange constructor', + proname => 'daterange', proisstrict => 'f', prorettype => 'daterange', + proargtypes => 'date date text', prosrc => 'range_constructor3' }, +{ oid => '3945', descr => 'int8range constructor', + proname => 'int8range', proisstrict => 'f', prorettype => 'int8range', + proargtypes => 'int8 int8', prosrc => 'range_constructor2' }, +{ oid => '3946', descr => 'int8range constructor', + proname => 'int8range', proisstrict => 'f', prorettype => 'int8range', + proargtypes => 'int8 int8 text', prosrc => 'range_constructor3' }, + +# date, time, timestamp constructors +{ oid => '3846', descr => 'construct date', + proname => 'make_date', prorettype => 'date', proargtypes => 'int4 int4 int4', + proargnames => '{year,month,day}', prosrc => 'make_date' }, +{ oid => '3847', descr => 'construct time', + proname => 'make_time', prorettype => 'time', + proargtypes => 'int4 int4 float8', proargnames => '{hour,min,sec}', + prosrc => 'make_time' }, +{ oid => '3461', descr => 'construct timestamp', + proname => 'make_timestamp', prorettype => 'timestamp', + proargtypes => 'int4 int4 int4 int4 int4 float8', + proargnames => '{year,month,mday,hour,min,sec}', prosrc => 'make_timestamp' }, +{ oid => '3462', descr => 'construct timestamp with time zone', + proname => 'make_timestamptz', provolatile => 's', + prorettype => 'timestamptz', proargtypes => 'int4 int4 int4 int4 int4 float8', + proargnames => '{year,month,mday,hour,min,sec}', + prosrc => 'make_timestamptz' }, +{ oid => '3463', descr => 'construct timestamp with time zone', + proname => 'make_timestamptz', provolatile => 's', + prorettype => 'timestamptz', + proargtypes => 'int4 int4 int4 int4 int4 float8 text', + proargnames => '{year,month,mday,hour,min,sec,timezone}', + prosrc => 'make_timestamptz_at_timezone' }, +{ oid => '3464', descr => 'construct interval', + proname => 'make_interval', prorettype => 'interval', + proargtypes => 'int4 int4 int4 int4 int4 int4 float8', + proargnames => '{years,months,weeks,days,hours,mins,secs}', + prosrc => 'make_interval' }, + +# spgist opclasses +{ oid => '4018', descr => 'SP-GiST support for quad tree over point', + proname => 'spg_quad_config', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_quad_config' }, +{ oid => '4019', descr => 'SP-GiST support for quad tree over point', + proname => 'spg_quad_choose', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_quad_choose' }, +{ oid => '4020', descr => 'SP-GiST support for quad tree over point', + proname => 'spg_quad_picksplit', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_quad_picksplit' }, +{ oid => '4021', descr => 'SP-GiST support for quad tree over point', + proname => 'spg_quad_inner_consistent', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_quad_inner_consistent' }, +{ oid => '4022', + descr => 'SP-GiST support for quad tree and k-d tree over point', + proname => 'spg_quad_leaf_consistent', prorettype => 'bool', + proargtypes => 'internal internal', prosrc => 'spg_quad_leaf_consistent' }, + +{ oid => '4023', descr => 'SP-GiST support for k-d tree over point', + proname => 'spg_kd_config', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_kd_config' }, +{ oid => '4024', descr => 'SP-GiST support for k-d tree over point', + proname => 'spg_kd_choose', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_kd_choose' }, +{ oid => '4025', descr => 'SP-GiST support for k-d tree over point', + proname => 'spg_kd_picksplit', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_kd_picksplit' }, +{ oid => '4026', descr => 'SP-GiST support for k-d tree over point', + proname => 'spg_kd_inner_consistent', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_kd_inner_consistent' }, + +{ oid => '4027', descr => 'SP-GiST support for radix tree over text', + proname => 'spg_text_config', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_text_config' }, +{ oid => '4028', descr => 'SP-GiST support for radix tree over text', + proname => 'spg_text_choose', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_text_choose' }, +{ oid => '4029', descr => 'SP-GiST support for radix tree over text', + proname => 'spg_text_picksplit', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_text_picksplit' }, +{ oid => '4030', descr => 'SP-GiST support for radix tree over text', + proname => 'spg_text_inner_consistent', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_text_inner_consistent' }, +{ oid => '4031', descr => 'SP-GiST support for radix tree over text', + proname => 'spg_text_leaf_consistent', prorettype => 'bool', + proargtypes => 'internal internal', prosrc => 'spg_text_leaf_consistent' }, + +{ oid => '3469', descr => 'SP-GiST support for quad tree over range', + proname => 'spg_range_quad_config', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_range_quad_config' }, +{ oid => '3470', descr => 'SP-GiST support for quad tree over range', + proname => 'spg_range_quad_choose', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_range_quad_choose' }, +{ oid => '3471', descr => 'SP-GiST support for quad tree over range', + proname => 'spg_range_quad_picksplit', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_range_quad_picksplit' }, +{ oid => '3472', descr => 'SP-GiST support for quad tree over range', + proname => 'spg_range_quad_inner_consistent', prorettype => 'void', + proargtypes => 'internal internal', + prosrc => 'spg_range_quad_inner_consistent' }, +{ oid => '3473', descr => 'SP-GiST support for quad tree over range', + proname => 'spg_range_quad_leaf_consistent', prorettype => 'bool', + proargtypes => 'internal internal', + prosrc => 'spg_range_quad_leaf_consistent' }, + +{ oid => '5012', descr => 'SP-GiST support for quad tree over box', + proname => 'spg_box_quad_config', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_box_quad_config' }, +{ oid => '5013', descr => 'SP-GiST support for quad tree over box', + proname => 'spg_box_quad_choose', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_box_quad_choose' }, +{ oid => '5014', descr => 'SP-GiST support for quad tree over box', + proname => 'spg_box_quad_picksplit', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_box_quad_picksplit' }, +{ oid => '5015', descr => 'SP-GiST support for quad tree over box', + proname => 'spg_box_quad_inner_consistent', prorettype => 'void', + proargtypes => 'internal internal', + prosrc => 'spg_box_quad_inner_consistent' }, +{ oid => '5016', descr => 'SP-GiST support for quad tree over box', + proname => 'spg_box_quad_leaf_consistent', prorettype => 'bool', + proargtypes => 'internal internal', + prosrc => 'spg_box_quad_leaf_consistent' }, + +{ oid => '5010', + descr => 'SP-GiST support for quad tree over 2-D types represented by their bounding boxes', + proname => 'spg_bbox_quad_config', prorettype => 'void', + proargtypes => 'internal internal', prosrc => 'spg_bbox_quad_config' }, +{ oid => '5011', descr => 'SP-GiST support for quad tree over polygons', + proname => 'spg_poly_quad_compress', prorettype => 'box', + proargtypes => 'polygon', prosrc => 'spg_poly_quad_compress' }, + +# replication slots +{ oid => '3779', descr => 'create a physical replication slot', + proname => 'pg_create_physical_replication_slot', provolatile => 'v', + proparallel => 'u', prorettype => 'record', proargtypes => 'name bool bool', + proallargtypes => '{name,bool,bool,name,pg_lsn}', + proargmodes => '{i,i,i,o,o}', + proargnames => '{slot_name,immediately_reserve,temporary,slot_name,lsn}', + prosrc => 'pg_create_physical_replication_slot' }, +{ oid => '3780', descr => 'drop a replication slot', + proname => 'pg_drop_replication_slot', provolatile => 'v', proparallel => 'u', + prorettype => 'void', proargtypes => 'name', + prosrc => 'pg_drop_replication_slot' }, +{ oid => '3781', + descr => 'information about replication slots currently in use', + proname => 'pg_get_replication_slots', prorows => '10', proisstrict => 'f', + proretset => 't', provolatile => 's', prorettype => 'record', + proargtypes => '', + proallargtypes => '{name,name,text,oid,bool,bool,int4,xid,xid,pg_lsn,pg_lsn}', + proargmodes => '{o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{slot_name,plugin,slot_type,datoid,temporary,active,active_pid,xmin,catalog_xmin,restart_lsn,confirmed_flush_lsn}', + prosrc => 'pg_get_replication_slots' }, +{ oid => '3786', descr => 'set up a logical replication slot', + proname => 'pg_create_logical_replication_slot', provolatile => 'v', + proparallel => 'u', prorettype => 'record', proargtypes => 'name name bool', + proallargtypes => '{name,name,bool,name,pg_lsn}', + proargmodes => '{i,i,i,o,o}', + proargnames => '{slot_name,plugin,temporary,slot_name,lsn}', + prosrc => 'pg_create_logical_replication_slot' }, +{ oid => '3782', descr => 'get changes from replication slot', + proname => 'pg_logical_slot_get_changes', procost => '1000', + prorows => '1000', provariadic => 'text', proisstrict => 'f', + proretset => 't', provolatile => 'v', proparallel => 'u', + prorettype => 'record', proargtypes => 'name pg_lsn int4 _text', + proallargtypes => '{name,pg_lsn,int4,_text,pg_lsn,xid,text}', + proargmodes => '{i,i,i,v,o,o,o}', + proargnames => '{slot_name,upto_lsn,upto_nchanges,options,lsn,xid,data}', + prosrc => 'pg_logical_slot_get_changes' }, +{ oid => '3783', descr => 'get binary changes from replication slot', + proname => 'pg_logical_slot_get_binary_changes', procost => '1000', + prorows => '1000', provariadic => 'text', proisstrict => 'f', + proretset => 't', provolatile => 'v', proparallel => 'u', + prorettype => 'record', proargtypes => 'name pg_lsn int4 _text', + proallargtypes => '{name,pg_lsn,int4,_text,pg_lsn,xid,bytea}', + proargmodes => '{i,i,i,v,o,o,o}', + proargnames => '{slot_name,upto_lsn,upto_nchanges,options,lsn,xid,data}', + prosrc => 'pg_logical_slot_get_binary_changes' }, +{ oid => '3784', descr => 'peek at changes from replication slot', + proname => 'pg_logical_slot_peek_changes', procost => '1000', + prorows => '1000', provariadic => 'text', proisstrict => 'f', + proretset => 't', provolatile => 'v', proparallel => 'u', + prorettype => 'record', proargtypes => 'name pg_lsn int4 _text', + proallargtypes => '{name,pg_lsn,int4,_text,pg_lsn,xid,text}', + proargmodes => '{i,i,i,v,o,o,o}', + proargnames => '{slot_name,upto_lsn,upto_nchanges,options,lsn,xid,data}', + prosrc => 'pg_logical_slot_peek_changes' }, +{ oid => '3785', descr => 'peek at binary changes from replication slot', + proname => 'pg_logical_slot_peek_binary_changes', procost => '1000', + prorows => '1000', provariadic => 'text', proisstrict => 'f', + proretset => 't', provolatile => 'v', proparallel => 'u', + prorettype => 'record', proargtypes => 'name pg_lsn int4 _text', + proallargtypes => '{name,pg_lsn,int4,_text,pg_lsn,xid,bytea}', + proargmodes => '{i,i,i,v,o,o,o}', + proargnames => '{slot_name,upto_lsn,upto_nchanges,options,lsn,xid,data}', + prosrc => 'pg_logical_slot_peek_binary_changes' }, +{ oid => '3878', descr => 'advance logical replication slot', + proname => 'pg_replication_slot_advance', provolatile => 'v', + proparallel => 'u', prorettype => 'record', proargtypes => 'name pg_lsn', + proallargtypes => '{name,pg_lsn,name,pg_lsn}', proargmodes => '{i,i,o,o}', + proargnames => '{slot_name,upto_lsn,slot_name,end_lsn}', + prosrc => 'pg_replication_slot_advance' }, +{ oid => '3577', descr => 'emit a textual logical decoding message', + proname => 'pg_logical_emit_message', provolatile => 'v', proparallel => 'u', + prorettype => 'pg_lsn', proargtypes => 'bool text text', + prosrc => 'pg_logical_emit_message_text' }, +{ oid => '3578', descr => 'emit a binary logical decoding message', + proname => 'pg_logical_emit_message', provolatile => 'v', proparallel => 'u', + prorettype => 'pg_lsn', proargtypes => 'bool text bytea', + prosrc => 'pg_logical_emit_message_bytea' }, + +# event triggers +{ oid => '3566', descr => 'list objects dropped by the current command', + proname => 'pg_event_trigger_dropped_objects', procost => '10', + prorows => '100', proretset => 't', provolatile => 's', proparallel => 'r', + prorettype => 'record', proargtypes => '', + proallargtypes => '{oid,oid,int4,bool,bool,bool,text,text,text,text,_text,_text}', + proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{classid, objid, objsubid, original, normal, is_temporary, object_type, schema_name, object_name, object_identity, address_names, address_args}', + prosrc => 'pg_event_trigger_dropped_objects' }, +{ oid => '4566', descr => 'return Oid of the table getting rewritten', + proname => 'pg_event_trigger_table_rewrite_oid', provolatile => 's', + proparallel => 'r', prorettype => 'oid', proargtypes => '', + proallargtypes => '{oid}', proargmodes => '{o}', proargnames => '{oid}', + prosrc => 'pg_event_trigger_table_rewrite_oid' }, +{ oid => '4567', descr => 'return reason code for table getting rewritten', + proname => 'pg_event_trigger_table_rewrite_reason', provolatile => 's', + proparallel => 'r', prorettype => 'int4', proargtypes => '', + prosrc => 'pg_event_trigger_table_rewrite_reason' }, +{ oid => '4568', + descr => 'list DDL actions being executed by the current command', + proname => 'pg_event_trigger_ddl_commands', procost => '10', prorows => '100', + proretset => 't', provolatile => 's', proparallel => 'r', + prorettype => 'record', proargtypes => '', + proallargtypes => '{oid,oid,int4,text,text,text,text,bool,pg_ddl_command}', + proargmodes => '{o,o,o,o,o,o,o,o,o}', + proargnames => '{classid, objid, objsubid, command_tag, object_type, schema_name, object_identity, in_extension, command}', + prosrc => 'pg_event_trigger_ddl_commands' }, + +# generic transition functions for ordered-set aggregates +{ oid => '3970', descr => 'aggregate transition function', + proname => 'ordered_set_transition', proisstrict => 'f', + prorettype => 'internal', proargtypes => 'internal any', + prosrc => 'ordered_set_transition' }, +{ oid => '3971', descr => 'aggregate transition function', + proname => 'ordered_set_transition_multi', provariadic => 'any', + proisstrict => 'f', prorettype => 'internal', proargtypes => 'internal any', + proallargtypes => '{internal,any}', proargmodes => '{i,v}', + prosrc => 'ordered_set_transition_multi' }, + +# inverse distribution aggregates (and their support functions) +{ oid => '3972', descr => 'discrete percentile', + proname => 'percentile_disc', prokind => 'a', proisstrict => 'f', + prorettype => 'anyelement', proargtypes => 'float8 anyelement', + prosrc => 'aggregate_dummy' }, +{ oid => '3973', descr => 'aggregate final function', + proname => 'percentile_disc_final', proisstrict => 'f', + prorettype => 'anyelement', proargtypes => 'internal float8 anyelement', + prosrc => 'percentile_disc_final' }, +{ oid => '3974', descr => 'continuous distribution percentile', + proname => 'percentile_cont', prokind => 'a', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'float8 float8', + prosrc => 'aggregate_dummy' }, +{ oid => '3975', descr => 'aggregate final function', + proname => 'percentile_cont_float8_final', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'internal float8', + prosrc => 'percentile_cont_float8_final' }, +{ oid => '3976', descr => 'continuous distribution percentile', + proname => 'percentile_cont', prokind => 'a', proisstrict => 'f', + prorettype => 'interval', proargtypes => 'float8 interval', + prosrc => 'aggregate_dummy' }, +{ oid => '3977', descr => 'aggregate final function', + proname => 'percentile_cont_interval_final', proisstrict => 'f', + prorettype => 'interval', proargtypes => 'internal float8', + prosrc => 'percentile_cont_interval_final' }, +{ oid => '3978', descr => 'multiple discrete percentiles', + proname => 'percentile_disc', prokind => 'a', proisstrict => 'f', + prorettype => 'anyarray', proargtypes => '_float8 anyelement', + prosrc => 'aggregate_dummy' }, +{ oid => '3979', descr => 'aggregate final function', + proname => 'percentile_disc_multi_final', proisstrict => 'f', + prorettype => 'anyarray', proargtypes => 'internal _float8 anyelement', + prosrc => 'percentile_disc_multi_final' }, +{ oid => '3980', descr => 'multiple continuous percentiles', + proname => 'percentile_cont', prokind => 'a', proisstrict => 'f', + prorettype => '_float8', proargtypes => '_float8 float8', + prosrc => 'aggregate_dummy' }, +{ oid => '3981', descr => 'aggregate final function', + proname => 'percentile_cont_float8_multi_final', proisstrict => 'f', + prorettype => '_float8', proargtypes => 'internal _float8', + prosrc => 'percentile_cont_float8_multi_final' }, +{ oid => '3982', descr => 'multiple continuous percentiles', + proname => 'percentile_cont', prokind => 'a', proisstrict => 'f', + prorettype => '_interval', proargtypes => '_float8 interval', + prosrc => 'aggregate_dummy' }, +{ oid => '3983', descr => 'aggregate final function', + proname => 'percentile_cont_interval_multi_final', proisstrict => 'f', + prorettype => '_interval', proargtypes => 'internal _float8', + prosrc => 'percentile_cont_interval_multi_final' }, +{ oid => '3984', descr => 'most common value', + proname => 'mode', prokind => 'a', proisstrict => 'f', + prorettype => 'anyelement', proargtypes => 'anyelement', + prosrc => 'aggregate_dummy' }, +{ oid => '3985', descr => 'aggregate final function', + proname => 'mode_final', proisstrict => 'f', prorettype => 'anyelement', + proargtypes => 'internal anyelement', prosrc => 'mode_final' }, + +# hypothetical-set aggregates (and their support functions) +{ oid => '3986', descr => 'rank of hypothetical row', + proname => 'rank', provariadic => 'any', prokind => 'a', proisstrict => 'f', + prorettype => 'int8', proargtypes => 'any', proallargtypes => '{any}', + proargmodes => '{v}', prosrc => 'aggregate_dummy' }, +{ oid => '3987', descr => 'aggregate final function', + proname => 'rank_final', provariadic => 'any', proisstrict => 'f', + prorettype => 'int8', proargtypes => 'internal any', + proallargtypes => '{internal,any}', proargmodes => '{i,v}', + prosrc => 'hypothetical_rank_final' }, +{ oid => '3988', descr => 'fractional rank of hypothetical row', + proname => 'percent_rank', provariadic => 'any', prokind => 'a', + proisstrict => 'f', prorettype => 'float8', proargtypes => 'any', + proallargtypes => '{any}', proargmodes => '{v}', + prosrc => 'aggregate_dummy' }, +{ oid => '3989', descr => 'aggregate final function', + proname => 'percent_rank_final', provariadic => 'any', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'internal any', + proallargtypes => '{internal,any}', proargmodes => '{i,v}', + prosrc => 'hypothetical_percent_rank_final' }, +{ oid => '3990', descr => 'cumulative distribution of hypothetical row', + proname => 'cume_dist', provariadic => 'any', prokind => 'a', + proisstrict => 'f', prorettype => 'float8', proargtypes => 'any', + proallargtypes => '{any}', proargmodes => '{v}', + prosrc => 'aggregate_dummy' }, +{ oid => '3991', descr => 'aggregate final function', + proname => 'cume_dist_final', provariadic => 'any', proisstrict => 'f', + prorettype => 'float8', proargtypes => 'internal any', + proallargtypes => '{internal,any}', proargmodes => '{i,v}', + prosrc => 'hypothetical_cume_dist_final' }, +{ oid => '3992', descr => 'rank of hypothetical row without gaps', + proname => 'dense_rank', provariadic => 'any', prokind => 'a', + proisstrict => 'f', prorettype => 'int8', proargtypes => 'any', + proallargtypes => '{any}', proargmodes => '{v}', + prosrc => 'aggregate_dummy' }, +{ oid => '3993', descr => 'aggregate final function', + proname => 'dense_rank_final', provariadic => 'any', proisstrict => 'f', + prorettype => 'int8', proargtypes => 'internal any', + proallargtypes => '{internal,any}', proargmodes => '{i,v}', + prosrc => 'hypothetical_dense_rank_final' }, + +# pg_upgrade support +{ oid => '3582', descr => 'for use by pg_upgrade', + proname => 'binary_upgrade_set_next_pg_type_oid', provolatile => 'v', + proparallel => 'r', prorettype => 'void', proargtypes => 'oid', + prosrc => 'binary_upgrade_set_next_pg_type_oid' }, +{ oid => '3584', descr => 'for use by pg_upgrade', + proname => 'binary_upgrade_set_next_array_pg_type_oid', provolatile => 'v', + proparallel => 'r', prorettype => 'void', proargtypes => 'oid', + prosrc => 'binary_upgrade_set_next_array_pg_type_oid' }, +{ oid => '3585', descr => 'for use by pg_upgrade', + proname => 'binary_upgrade_set_next_toast_pg_type_oid', provolatile => 'v', + proparallel => 'r', prorettype => 'void', proargtypes => 'oid', + prosrc => 'binary_upgrade_set_next_toast_pg_type_oid' }, +{ oid => '3586', descr => 'for use by pg_upgrade', + proname => 'binary_upgrade_set_next_heap_pg_class_oid', provolatile => 'v', + proparallel => 'r', prorettype => 'void', proargtypes => 'oid', + prosrc => 'binary_upgrade_set_next_heap_pg_class_oid' }, +{ oid => '3587', descr => 'for use by pg_upgrade', + proname => 'binary_upgrade_set_next_index_pg_class_oid', provolatile => 'v', + proparallel => 'r', prorettype => 'void', proargtypes => 'oid', + prosrc => 'binary_upgrade_set_next_index_pg_class_oid' }, +{ oid => '3588', descr => 'for use by pg_upgrade', + proname => 'binary_upgrade_set_next_toast_pg_class_oid', provolatile => 'v', + proparallel => 'r', prorettype => 'void', proargtypes => 'oid', + prosrc => 'binary_upgrade_set_next_toast_pg_class_oid' }, +{ oid => '3589', descr => 'for use by pg_upgrade', + proname => 'binary_upgrade_set_next_pg_enum_oid', provolatile => 'v', + proparallel => 'r', prorettype => 'void', proargtypes => 'oid', + prosrc => 'binary_upgrade_set_next_pg_enum_oid' }, +{ oid => '3590', descr => 'for use by pg_upgrade', + proname => 'binary_upgrade_set_next_pg_authid_oid', provolatile => 'v', + proparallel => 'r', prorettype => 'void', proargtypes => 'oid', + prosrc => 'binary_upgrade_set_next_pg_authid_oid' }, +{ oid => '3591', descr => 'for use by pg_upgrade', + proname => 'binary_upgrade_create_empty_extension', proisstrict => 'f', + provolatile => 'v', proparallel => 'u', prorettype => 'void', + proargtypes => 'text text bool text _oid _text _text', + prosrc => 'binary_upgrade_create_empty_extension' }, +{ oid => '4083', descr => 'for use by pg_upgrade', + proname => 'binary_upgrade_set_record_init_privs', provolatile => 'v', + proparallel => 'r', prorettype => 'void', proargtypes => 'bool', + prosrc => 'binary_upgrade_set_record_init_privs' }, +{ oid => '4101', descr => 'for use by pg_upgrade', + proname => 'binary_upgrade_set_missing_value', provolatile => 'v', + proparallel => 'u', prorettype => 'void', proargtypes => 'oid text text', + prosrc => 'binary_upgrade_set_missing_value' }, + +# replication/origin.h +{ oid => '6003', descr => 'create a replication origin', + proname => 'pg_replication_origin_create', provolatile => 'v', + proparallel => 'u', prorettype => 'oid', proargtypes => 'text', + prosrc => 'pg_replication_origin_create' }, + +{ oid => '6004', descr => 'drop replication origin identified by its name', + proname => 'pg_replication_origin_drop', provolatile => 'v', + proparallel => 'u', prorettype => 'void', proargtypes => 'text', + prosrc => 'pg_replication_origin_drop' }, + +{ oid => '6005', + descr => 'translate the replication origin\'s name to its id', + proname => 'pg_replication_origin_oid', provolatile => 's', + prorettype => 'oid', proargtypes => 'text', + prosrc => 'pg_replication_origin_oid' }, + +{ oid => '6006', + descr => 'configure session to maintain replication progress tracking for the passed in origin', + proname => 'pg_replication_origin_session_setup', provolatile => 'v', + proparallel => 'u', prorettype => 'void', proargtypes => 'text', + prosrc => 'pg_replication_origin_session_setup' }, + +{ oid => '6007', descr => 'teardown configured replication progress tracking', + proname => 'pg_replication_origin_session_reset', provolatile => 'v', + proparallel => 'u', prorettype => 'void', proargtypes => '', + prosrc => 'pg_replication_origin_session_reset' }, + +{ oid => '6008', + descr => 'is a replication origin configured in this session', + proname => 'pg_replication_origin_session_is_setup', provolatile => 'v', + proparallel => 'r', prorettype => 'bool', proargtypes => '', + prosrc => 'pg_replication_origin_session_is_setup' }, + +{ oid => '6009', + descr => 'get the replication progress of the current session', + proname => 'pg_replication_origin_session_progress', provolatile => 'v', + proparallel => 'u', prorettype => 'pg_lsn', proargtypes => 'bool', + prosrc => 'pg_replication_origin_session_progress' }, + +{ oid => '6010', descr => 'setup the transaction\'s origin lsn and timestamp', + proname => 'pg_replication_origin_xact_setup', provolatile => 'v', + proparallel => 'r', prorettype => 'void', proargtypes => 'pg_lsn timestamptz', + prosrc => 'pg_replication_origin_xact_setup' }, + +{ oid => '6011', descr => 'reset the transaction\'s origin lsn and timestamp', + proname => 'pg_replication_origin_xact_reset', provolatile => 'v', + proparallel => 'r', prorettype => 'void', proargtypes => '', + prosrc => 'pg_replication_origin_xact_reset' }, + +{ oid => '6012', + descr => 'advance replication identifier to specific location', + proname => 'pg_replication_origin_advance', provolatile => 'v', + proparallel => 'u', prorettype => 'void', proargtypes => 'text pg_lsn', + prosrc => 'pg_replication_origin_advance' }, + +{ oid => '6013', + descr => 'get an individual replication origin\'s replication progress', + proname => 'pg_replication_origin_progress', provolatile => 'v', + proparallel => 'u', prorettype => 'pg_lsn', proargtypes => 'text bool', + prosrc => 'pg_replication_origin_progress' }, + +{ oid => '6014', descr => 'get progress for all replication origins', + proname => 'pg_show_replication_origin_status', prorows => '100', + proisstrict => 'f', proretset => 't', provolatile => 'v', proparallel => 'r', + prorettype => 'record', proargtypes => '', + proallargtypes => '{oid,text,pg_lsn,pg_lsn}', proargmodes => '{o,o,o,o}', + proargnames => '{local_id, external_id, remote_lsn, local_lsn}', + prosrc => 'pg_show_replication_origin_status' }, + +# publications +{ oid => '6119', descr => 'get OIDs of tables in a publication', + proname => 'pg_get_publication_tables', prorows => '1000', proretset => 't', + provolatile => 's', prorettype => 'oid', proargtypes => 'text', + proallargtypes => '{text,oid}', proargmodes => '{i,o}', + proargnames => '{pubname,relid}', prosrc => 'pg_get_publication_tables' }, +{ oid => '6121', + descr => 'returns whether a relation can be part of a publication', + proname => 'pg_relation_is_publishable', provolatile => 's', + prorettype => 'bool', proargtypes => 'regclass', + prosrc => 'pg_relation_is_publishable' }, + +# rls +{ oid => '3298', + descr => 'row security for current context active on table by table oid', + proname => 'row_security_active', provolatile => 's', prorettype => 'bool', + proargtypes => 'oid', prosrc => 'row_security_active' }, +{ oid => '3299', + descr => 'row security for current context active on table by table name', + proname => 'row_security_active', provolatile => 's', prorettype => 'bool', + proargtypes => 'text', prosrc => 'row_security_active_name' }, + +# pg_config +{ oid => '3400', descr => 'pg_config binary as a function', + proname => 'pg_config', prorows => '23', proretset => 't', proparallel => 'r', + prorettype => 'record', proargtypes => '', proallargtypes => '{text,text}', + proargmodes => '{o,o}', proargnames => '{name,setting}', + prosrc => 'pg_config' }, + +# pg_controldata related functions +{ oid => '3441', + descr => 'pg_controldata general state information as a function', + proname => 'pg_control_system', provolatile => 'v', prorettype => 'record', + proargtypes => '', proallargtypes => '{int4,int4,int8,timestamptz}', + proargmodes => '{o,o,o,o}', + proargnames => '{pg_control_version,catalog_version_no,system_identifier,pg_control_last_modified}', + prosrc => 'pg_control_system' }, + +{ oid => '3442', + descr => 'pg_controldata checkpoint state information as a function', + proname => 'pg_control_checkpoint', provolatile => 'v', + prorettype => 'record', proargtypes => '', + proallargtypes => '{pg_lsn,pg_lsn,text,int4,int4,bool,text,oid,xid,xid,xid,oid,xid,xid,oid,xid,xid,timestamptz}', + proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{checkpoint_lsn,redo_lsn,redo_wal_file,timeline_id,prev_timeline_id,full_page_writes,next_xid,next_oid,next_multixact_id,next_multi_offset,oldest_xid,oldest_xid_dbid,oldest_active_xid,oldest_multi_xid,oldest_multi_dbid,oldest_commit_ts_xid,newest_commit_ts_xid,checkpoint_time}', + prosrc => 'pg_control_checkpoint' }, + +{ oid => '3443', + descr => 'pg_controldata recovery state information as a function', + proname => 'pg_control_recovery', provolatile => 'v', prorettype => 'record', + proargtypes => '', proallargtypes => '{pg_lsn,int4,pg_lsn,pg_lsn,bool}', + proargmodes => '{o,o,o,o,o}', + proargnames => '{min_recovery_end_lsn,min_recovery_end_timeline,backup_start_lsn,backup_end_lsn,end_of_backup_record_required}', + prosrc => 'pg_control_recovery' }, + +{ oid => '3444', + descr => 'pg_controldata init state information as a function', + proname => 'pg_control_init', provolatile => 'v', prorettype => 'record', + proargtypes => '', + proallargtypes => '{int4,int4,int4,int4,int4,int4,int4,int4,int4,bool,bool,int4}', + proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o}', + proargnames => '{max_data_alignment,database_block_size,blocks_per_segment,wal_block_size,bytes_per_wal_segment,max_identifier_length,max_index_columns,max_toast_chunk_size,large_object_chunk_size,float4_pass_by_value,float8_pass_by_value,data_page_checksum_version}', + prosrc => 'pg_control_init' }, + +# collation management functions +{ oid => '3445', descr => 'import collations from operating system', + proname => 'pg_import_system_collations', procost => '100', + provolatile => 'v', proparallel => 'u', prorettype => 'int4', + proargtypes => 'regnamespace', prosrc => 'pg_import_system_collations' }, + +{ oid => '3448', + descr => 'get actual version of collation from operating system', + proname => 'pg_collation_actual_version', procost => '100', + provolatile => 'v', prorettype => 'text', proargtypes => 'oid', + prosrc => 'pg_collation_actual_version' }, + +# system management/monitoring related functions +{ oid => '3353', descr => 'list files in the log directory', + proname => 'pg_ls_logdir', procost => '10', prorows => '20', proretset => 't', + provolatile => 'v', prorettype => 'record', proargtypes => '', + proallargtypes => '{text,int8,timestamptz}', proargmodes => '{o,o,o}', + proargnames => '{name,size,modification}', prosrc => 'pg_ls_logdir' }, +{ oid => '3354', descr => 'list of files in the WAL directory', + proname => 'pg_ls_waldir', procost => '10', prorows => '20', proretset => 't', + provolatile => 'v', prorettype => 'record', proargtypes => '', + proallargtypes => '{text,int8,timestamptz}', proargmodes => '{o,o,o}', + proargnames => '{name,size,modification}', prosrc => 'pg_ls_waldir' }, +{ oid => '5031', descr => 'list of files in the archive_status directory', + proname => 'pg_ls_archive_statusdir', procost => '10', prorows => '20', proretset => 't', + provolatile => 'v', prorettype => 'record', proargtypes => '', + proallargtypes => '{text,int8,timestamptz}', proargmodes => '{o,o,o}', + proargnames => '{name,size,modification}', prosrc => 'pg_ls_archive_statusdir' }, +{ oid => '5029', descr => 'list files in the pgsql_tmp directory', + proname => 'pg_ls_tmpdir', procost => '10', prorows => '20', proretset => 't', + provolatile => 'v', prorettype => 'record', proargtypes => '', + proallargtypes => '{text,int8,timestamptz}', proargmodes => '{o,o,o}', + proargnames => '{name,size,modification}', prosrc => 'pg_ls_tmpdir_noargs' }, +{ oid => '5030', descr => 'list files in the pgsql_tmp directory', + proname => 'pg_ls_tmpdir', procost => '10', prorows => '20', proretset => 't', + provolatile => 'v', prorettype => 'record', proargtypes => 'oid', + proallargtypes => '{oid,text,int8,timestamptz}', proargmodes => '{i,o,o,o}', + proargnames => '{tablespace,name,size,modification}', + prosrc => 'pg_ls_tmpdir_1arg' }, + +# hash partitioning constraint function +{ oid => '5028', descr => 'hash partition CHECK constraint', + proname => 'satisfies_hash_partition', provariadic => 'any', + proisstrict => 'f', prorettype => 'bool', proargtypes => 'oid int4 int4 any', + proargmodes => '{i,i,i,v}', prosrc => 'satisfies_hash_partition' }, + +# information about a partition tree +{ oid => '3423', descr => 'view partition tree tables', + proname => 'pg_partition_tree', prorows => '1000', proretset => 't', + provolatile => 'v', prorettype => 'record', proargtypes => 'regclass', + proallargtypes => '{regclass,regclass,regclass,bool,int4}', + proargmodes => '{i,o,o,o,o}', + proargnames => '{rootrelid,relid,parentrelid,isleaf,level}', + prosrc => 'pg_partition_tree' } + +] diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index 8b33b4e0ea..a34b2596fa 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -1,22 +1,16 @@ /*------------------------------------------------------------------------- * * pg_proc.h - * definition of the system "procedure" relation (pg_proc) - * along with the relation's initial contents. + * definition of the "procedure" system catalog (pg_proc) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_proc.h * * NOTES - * The script catalog/genbki.pl reads this file and generates .bki - * information from the DATA() statements. utils/Gen_fmgrtab.pl - * generates fmgroids.h and fmgrtab.c the same way. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... - * XXX (eg. #if 0 #endif won't do what you think) + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -24,5456 +18,128 @@ #define PG_PROC_H #include "catalog/genbki.h" +#include "catalog/pg_proc_d.h" + +#include "catalog/objectaddress.h" +#include "nodes/pg_list.h" /* ---------------- * pg_proc definition. cpp turns this into * typedef struct FormData_pg_proc * ---------------- */ -#define ProcedureRelationId 1255 -#define ProcedureRelation_Rowtype_Id 81 - -CATALOG(pg_proc,1255) BKI_BOOTSTRAP BKI_ROWTYPE_OID(81) BKI_SCHEMA_MACRO +CATALOG(pg_proc,1255,ProcedureRelationId) BKI_BOOTSTRAP BKI_ROWTYPE_OID(81,ProcedureRelation_Rowtype_Id) BKI_SCHEMA_MACRO { - NameData proname; /* procedure name */ - Oid pronamespace; /* OID of namespace containing this proc */ - Oid proowner; /* procedure owner */ - Oid prolang; /* OID of pg_language entry */ - float4 procost; /* estimated execution cost */ - float4 prorows; /* estimated # of rows out (if proretset) */ - Oid provariadic; /* element type of variadic array, or 0 */ - regproc protransform; /* transforms calls to it during planning */ - bool proisagg; /* is it an aggregate? */ - bool proiswindow; /* is it a window function? */ - bool prosecdef; /* security definer */ - bool proleakproof; /* is it a leak-proof function? */ - bool proisstrict; /* strict with respect to NULLs? */ - bool proretset; /* returns a set? */ - char provolatile; /* see PROVOLATILE_ categories below */ - char proparallel; /* see PROPARALLEL_ categories below */ - int16 pronargs; /* number of arguments */ - int16 pronargdefaults; /* number of arguments with defaults */ - Oid prorettype; /* OID of result type */ - - /* - * variable-length fields start here, but we allow direct access to - * proargtypes - */ - oidvector proargtypes; /* parameter types (excludes OUT params) */ - -#ifdef CATALOG_VARLEN - Oid proallargtypes[1]; /* all param types (NULL if IN only) */ - char proargmodes[1]; /* parameter modes (NULL if IN only) */ - text proargnames[1]; /* parameter names (NULL if no names) */ - pg_node_tree proargdefaults; /* list of expression trees for argument - * defaults (NULL if none) */ - Oid protrftypes[1]; /* types for which to apply transforms */ - text prosrc BKI_FORCE_NOT_NULL; /* procedure source text */ - text probin; /* secondary procedure info (can be NULL) */ - text proconfig[1]; /* procedure-local GUC settings */ - aclitem proacl[1]; /* access permissions */ -#endif -} FormData_pg_proc; - -/* ---------------- - * Form_pg_proc corresponds to a pointer to a tuple with - * the format of pg_proc relation. - * ---------------- - */ -typedef FormData_pg_proc *Form_pg_proc; - -/* ---------------- - * compiler constants for pg_proc - * ---------------- - */ -#define Natts_pg_proc 29 -#define Anum_pg_proc_proname 1 -#define Anum_pg_proc_pronamespace 2 -#define Anum_pg_proc_proowner 3 -#define Anum_pg_proc_prolang 4 -#define Anum_pg_proc_procost 5 -#define Anum_pg_proc_prorows 6 -#define Anum_pg_proc_provariadic 7 -#define Anum_pg_proc_protransform 8 -#define Anum_pg_proc_proisagg 9 -#define Anum_pg_proc_proiswindow 10 -#define Anum_pg_proc_prosecdef 11 -#define Anum_pg_proc_proleakproof 12 -#define Anum_pg_proc_proisstrict 13 -#define Anum_pg_proc_proretset 14 -#define Anum_pg_proc_provolatile 15 -#define Anum_pg_proc_proparallel 16 -#define Anum_pg_proc_pronargs 17 -#define Anum_pg_proc_pronargdefaults 18 -#define Anum_pg_proc_prorettype 19 -#define Anum_pg_proc_proargtypes 20 -#define Anum_pg_proc_proallargtypes 21 -#define Anum_pg_proc_proargmodes 22 -#define Anum_pg_proc_proargnames 23 -#define Anum_pg_proc_proargdefaults 24 -#define Anum_pg_proc_protrftypes 25 -#define Anum_pg_proc_prosrc 26 -#define Anum_pg_proc_probin 27 -#define Anum_pg_proc_proconfig 28 -#define Anum_pg_proc_proacl 29 - -/* ---------------- - * initial contents of pg_proc - * ---------------- - */ - -/* - * Note: every entry in pg_proc.h is expected to have a DESCR() comment, - * except for functions that implement pg_operator.h operators and don't - * have a good reason to be called directly rather than via the operator. - * (If you do expect such a function to be used directly, you should - * duplicate the operator's comment.) initdb will supply suitable default - * comments for functions referenced by pg_operator. - * - * Try to follow the style of existing functions' comments. - * Some recommended conventions: - * "I/O" for typinput, typoutput, typreceive, typsend functions - * "I/O typmod" for typmodin, typmodout functions - * "aggregate transition function" for aggtransfn functions, unless - * they are reasonably useful in their own right - * "aggregate final function" for aggfinalfn functions (likewise) - * "convert srctypename to desttypename" for cast functions - * "less-equal-greater" for B-tree comparison functions - */ - -/* keep the following ordered by OID so that later changes can be made easier */ - -/* OIDS 1 - 99 */ - -DATA(insert OID = 1242 ( boolin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "2275" _null_ _null_ _null_ _null_ _null_ boolin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1243 ( boolout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "16" _null_ _null_ _null_ _null_ _null_ boolout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1244 ( byteain PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2275" _null_ _null_ _null_ _null_ _null_ byteain _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 31 ( byteaout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "17" _null_ _null_ _null_ _null_ _null_ byteaout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1245 ( charin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 18 "2275" _null_ _null_ _null_ _null_ _null_ charin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 33 ( charout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "18" _null_ _null_ _null_ _null_ _null_ charout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 34 ( namein PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 19 "2275" _null_ _null_ _null_ _null_ _null_ namein _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 35 ( nameout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "19" _null_ _null_ _null_ _null_ _null_ nameout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 38 ( int2in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "2275" _null_ _null_ _null_ _null_ _null_ int2in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 39 ( int2out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "21" _null_ _null_ _null_ _null_ _null_ int2out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 40 ( int2vectorin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 22 "2275" _null_ _null_ _null_ _null_ _null_ int2vectorin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 41 ( int2vectorout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "22" _null_ _null_ _null_ _null_ _null_ int2vectorout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 42 ( int4in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "2275" _null_ _null_ _null_ _null_ _null_ int4in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 43 ( int4out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ int4out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 44 ( regprocin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 24 "2275" _null_ _null_ _null_ _null_ _null_ regprocin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 45 ( regprocout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "24" _null_ _null_ _null_ _null_ _null_ regprocout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3494 ( to_regproc PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 24 "25" _null_ _null_ _null_ _null_ _null_ to_regproc _null_ _null_ _null_ )); -DESCR("convert proname to regproc"); -DATA(insert OID = 3479 ( to_regprocedure PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2202 "25" _null_ _null_ _null_ _null_ _null_ to_regprocedure _null_ _null_ _null_ )); -DESCR("convert proname to regprocedure"); -DATA(insert OID = 46 ( textin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "2275" _null_ _null_ _null_ _null_ _null_ textin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 47 ( textout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "25" _null_ _null_ _null_ _null_ _null_ textout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 48 ( tidin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 27 "2275" _null_ _null_ _null_ _null_ _null_ tidin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 49 ( tidout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "27" _null_ _null_ _null_ _null_ _null_ tidout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 50 ( xidin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 28 "2275" _null_ _null_ _null_ _null_ _null_ xidin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 51 ( xidout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "28" _null_ _null_ _null_ _null_ _null_ xidout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 52 ( cidin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 29 "2275" _null_ _null_ _null_ _null_ _null_ cidin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 53 ( cidout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "29" _null_ _null_ _null_ _null_ _null_ cidout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 54 ( oidvectorin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 30 "2275" _null_ _null_ _null_ _null_ _null_ oidvectorin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 55 ( oidvectorout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "30" _null_ _null_ _null_ _null_ _null_ oidvectorout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 56 ( boollt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "16 16" _null_ _null_ _null_ _null_ _null_ boollt _null_ _null_ _null_ )); -DATA(insert OID = 57 ( boolgt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "16 16" _null_ _null_ _null_ _null_ _null_ boolgt _null_ _null_ _null_ )); -DATA(insert OID = 60 ( booleq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "16 16" _null_ _null_ _null_ _null_ _null_ booleq _null_ _null_ _null_ )); -DATA(insert OID = 61 ( chareq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "18 18" _null_ _null_ _null_ _null_ _null_ chareq _null_ _null_ _null_ )); -DATA(insert OID = 62 ( nameeq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "19 19" _null_ _null_ _null_ _null_ _null_ nameeq _null_ _null_ _null_ )); -DATA(insert OID = 63 ( int2eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 21" _null_ _null_ _null_ _null_ _null_ int2eq _null_ _null_ _null_ )); -DATA(insert OID = 64 ( int2lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 21" _null_ _null_ _null_ _null_ _null_ int2lt _null_ _null_ _null_ )); -DATA(insert OID = 65 ( int4eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ int4eq _null_ _null_ _null_ )); -DATA(insert OID = 66 ( int4lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ int4lt _null_ _null_ _null_ )); -DATA(insert OID = 67 ( texteq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ texteq _null_ _null_ _null_ )); -DATA(insert OID = 68 ( xideq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "28 28" _null_ _null_ _null_ _null_ _null_ xideq _null_ _null_ _null_ )); -DATA(insert OID = 3308 ( xidneq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "28 28" _null_ _null_ _null_ _null_ _null_ xidneq _null_ _null_ _null_ )); -DATA(insert OID = 69 ( cideq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "29 29" _null_ _null_ _null_ _null_ _null_ cideq _null_ _null_ _null_ )); -DATA(insert OID = 70 ( charne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "18 18" _null_ _null_ _null_ _null_ _null_ charne _null_ _null_ _null_ )); -DATA(insert OID = 1246 ( charlt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "18 18" _null_ _null_ _null_ _null_ _null_ charlt _null_ _null_ _null_ )); -DATA(insert OID = 72 ( charle PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "18 18" _null_ _null_ _null_ _null_ _null_ charle _null_ _null_ _null_ )); -DATA(insert OID = 73 ( chargt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "18 18" _null_ _null_ _null_ _null_ _null_ chargt _null_ _null_ _null_ )); -DATA(insert OID = 74 ( charge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "18 18" _null_ _null_ _null_ _null_ _null_ charge _null_ _null_ _null_ )); -DATA(insert OID = 77 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "18" _null_ _null_ _null_ _null_ _null_ chartoi4 _null_ _null_ _null_ )); -DESCR("convert char to int4"); -DATA(insert OID = 78 ( char PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 18 "23" _null_ _null_ _null_ _null_ _null_ i4tochar _null_ _null_ _null_ )); -DESCR("convert int4 to char"); - -DATA(insert OID = 79 ( nameregexeq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ nameregexeq _null_ _null_ _null_ )); -DATA(insert OID = 1252 ( nameregexne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ nameregexne _null_ _null_ _null_ )); -DATA(insert OID = 1254 ( textregexeq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ textregexeq _null_ _null_ _null_ )); -DATA(insert OID = 1256 ( textregexne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ textregexne _null_ _null_ _null_ )); -DATA(insert OID = 1257 ( textlen PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "25" _null_ _null_ _null_ _null_ _null_ textlen _null_ _null_ _null_ )); -DESCR("length"); -DATA(insert OID = 1258 ( textcat PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ textcat _null_ _null_ _null_ )); - -DATA(insert OID = 84 ( boolne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "16 16" _null_ _null_ _null_ _null_ _null_ boolne _null_ _null_ _null_ )); -DATA(insert OID = 89 ( version PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 25 "" _null_ _null_ _null_ _null_ _null_ pgsql_version _null_ _null_ _null_ )); -DESCR("PostgreSQL version string"); - -DATA(insert OID = 86 ( pg_ddl_command_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 32 "2275" _null_ _null_ _null_ _null_ _null_ pg_ddl_command_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 87 ( pg_ddl_command_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "32" _null_ _null_ _null_ _null_ _null_ pg_ddl_command_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 88 ( pg_ddl_command_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 32 "2281" _null_ _null_ _null_ _null_ _null_ pg_ddl_command_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 90 ( pg_ddl_command_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "32" _null_ _null_ _null_ _null_ _null_ pg_ddl_command_send _null_ _null_ _null_ )); -DESCR("I/O"); - -/* OIDS 100 - 199 */ - -DATA(insert OID = 101 ( eqsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ eqsel _null_ _null_ _null_ )); -DESCR("restriction selectivity of = and related operators"); -DATA(insert OID = 102 ( neqsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ neqsel _null_ _null_ _null_ )); -DESCR("restriction selectivity of <> and related operators"); -DATA(insert OID = 103 ( scalarltsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ scalarltsel _null_ _null_ _null_ )); -DESCR("restriction selectivity of < and related operators on scalar datatypes"); -DATA(insert OID = 104 ( scalargtsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ scalargtsel _null_ _null_ _null_ )); -DESCR("restriction selectivity of > and related operators on scalar datatypes"); -DATA(insert OID = 105 ( eqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ eqjoinsel _null_ _null_ _null_ )); -DESCR("join selectivity of = and related operators"); -DATA(insert OID = 106 ( neqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ neqjoinsel _null_ _null_ _null_ )); -DESCR("join selectivity of <> and related operators"); -DATA(insert OID = 107 ( scalarltjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ scalarltjoinsel _null_ _null_ _null_ )); -DESCR("join selectivity of < and related operators on scalar datatypes"); -DATA(insert OID = 108 ( scalargtjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ scalargtjoinsel _null_ _null_ _null_ )); -DESCR("join selectivity of > and related operators on scalar datatypes"); - -DATA(insert OID = 109 ( unknownin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 705 "2275" _null_ _null_ _null_ _null_ _null_ unknownin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 110 ( unknownout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "705" _null_ _null_ _null_ _null_ _null_ unknownout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 111 ( numeric_fac PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ numeric_fac _null_ _null_ _null_ )); - -DATA(insert OID = 115 ( box_above_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_above_eq _null_ _null_ _null_ )); -DATA(insert OID = 116 ( box_below_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_below_eq _null_ _null_ _null_ )); - -DATA(insert OID = 117 ( point_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "2275" _null_ _null_ _null_ _null_ _null_ point_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 118 ( point_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "600" _null_ _null_ _null_ _null_ _null_ point_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 119 ( lseg_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 601 "2275" _null_ _null_ _null_ _null_ _null_ lseg_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 120 ( lseg_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "601" _null_ _null_ _null_ _null_ _null_ lseg_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 121 ( path_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 602 "2275" _null_ _null_ _null_ _null_ _null_ path_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 122 ( path_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "602" _null_ _null_ _null_ _null_ _null_ path_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 123 ( box_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 603 "2275" _null_ _null_ _null_ _null_ _null_ box_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 124 ( box_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "603" _null_ _null_ _null_ _null_ _null_ box_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 125 ( box_overlap PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_overlap _null_ _null_ _null_ )); -DATA(insert OID = 126 ( box_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_ge _null_ _null_ _null_ )); -DATA(insert OID = 127 ( box_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_gt _null_ _null_ _null_ )); -DATA(insert OID = 128 ( box_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_eq _null_ _null_ _null_ )); -DATA(insert OID = 129 ( box_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_lt _null_ _null_ _null_ )); -DATA(insert OID = 130 ( box_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_le _null_ _null_ _null_ )); -DATA(insert OID = 131 ( point_above PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 600" _null_ _null_ _null_ _null_ _null_ point_above _null_ _null_ _null_ )); -DATA(insert OID = 132 ( point_left PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 600" _null_ _null_ _null_ _null_ _null_ point_left _null_ _null_ _null_ )); -DATA(insert OID = 133 ( point_right PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 600" _null_ _null_ _null_ _null_ _null_ point_right _null_ _null_ _null_ )); -DATA(insert OID = 134 ( point_below PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 600" _null_ _null_ _null_ _null_ _null_ point_below _null_ _null_ _null_ )); -DATA(insert OID = 135 ( point_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 600" _null_ _null_ _null_ _null_ _null_ point_eq _null_ _null_ _null_ )); -DATA(insert OID = 136 ( on_pb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 603" _null_ _null_ _null_ _null_ _null_ on_pb _null_ _null_ _null_ )); -DATA(insert OID = 137 ( on_ppath PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 602" _null_ _null_ _null_ _null_ _null_ on_ppath _null_ _null_ _null_ )); -DATA(insert OID = 138 ( box_center PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "603" _null_ _null_ _null_ _null_ _null_ box_center _null_ _null_ _null_ )); -DATA(insert OID = 139 ( areasel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ areasel _null_ _null_ _null_ )); -DESCR("restriction selectivity for area-comparison operators"); -DATA(insert OID = 140 ( areajoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ areajoinsel _null_ _null_ _null_ )); -DESCR("join selectivity for area-comparison operators"); -DATA(insert OID = 141 ( int4mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4mul _null_ _null_ _null_ )); -DATA(insert OID = 144 ( int4ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ int4ne _null_ _null_ _null_ )); -DATA(insert OID = 145 ( int2ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 21" _null_ _null_ _null_ _null_ _null_ int2ne _null_ _null_ _null_ )); -DATA(insert OID = 146 ( int2gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 21" _null_ _null_ _null_ _null_ _null_ int2gt _null_ _null_ _null_ )); -DATA(insert OID = 147 ( int4gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ int4gt _null_ _null_ _null_ )); -DATA(insert OID = 148 ( int2le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 21" _null_ _null_ _null_ _null_ _null_ int2le _null_ _null_ _null_ )); -DATA(insert OID = 149 ( int4le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ int4le _null_ _null_ _null_ )); -DATA(insert OID = 150 ( int4ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ int4ge _null_ _null_ _null_ )); -DATA(insert OID = 151 ( int2ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 21" _null_ _null_ _null_ _null_ _null_ int2ge _null_ _null_ _null_ )); -DATA(insert OID = 152 ( int2mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 21 "21 21" _null_ _null_ _null_ _null_ _null_ int2mul _null_ _null_ _null_ )); -DATA(insert OID = 153 ( int2div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 21 "21 21" _null_ _null_ _null_ _null_ _null_ int2div _null_ _null_ _null_ )); -DATA(insert OID = 154 ( int4div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4div _null_ _null_ _null_ )); -DATA(insert OID = 155 ( int2mod PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 21 "21 21" _null_ _null_ _null_ _null_ _null_ int2mod _null_ _null_ _null_ )); -DATA(insert OID = 156 ( int4mod PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4mod _null_ _null_ _null_ )); -DATA(insert OID = 157 ( textne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ textne _null_ _null_ _null_ )); -DATA(insert OID = 158 ( int24eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 23" _null_ _null_ _null_ _null_ _null_ int24eq _null_ _null_ _null_ )); -DATA(insert OID = 159 ( int42eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 21" _null_ _null_ _null_ _null_ _null_ int42eq _null_ _null_ _null_ )); -DATA(insert OID = 160 ( int24lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 23" _null_ _null_ _null_ _null_ _null_ int24lt _null_ _null_ _null_ )); -DATA(insert OID = 161 ( int42lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 21" _null_ _null_ _null_ _null_ _null_ int42lt _null_ _null_ _null_ )); -DATA(insert OID = 162 ( int24gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 23" _null_ _null_ _null_ _null_ _null_ int24gt _null_ _null_ _null_ )); -DATA(insert OID = 163 ( int42gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 21" _null_ _null_ _null_ _null_ _null_ int42gt _null_ _null_ _null_ )); -DATA(insert OID = 164 ( int24ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 23" _null_ _null_ _null_ _null_ _null_ int24ne _null_ _null_ _null_ )); -DATA(insert OID = 165 ( int42ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 21" _null_ _null_ _null_ _null_ _null_ int42ne _null_ _null_ _null_ )); -DATA(insert OID = 166 ( int24le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 23" _null_ _null_ _null_ _null_ _null_ int24le _null_ _null_ _null_ )); -DATA(insert OID = 167 ( int42le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 21" _null_ _null_ _null_ _null_ _null_ int42le _null_ _null_ _null_ )); -DATA(insert OID = 168 ( int24ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 23" _null_ _null_ _null_ _null_ _null_ int24ge _null_ _null_ _null_ )); -DATA(insert OID = 169 ( int42ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 21" _null_ _null_ _null_ _null_ _null_ int42ge _null_ _null_ _null_ )); -DATA(insert OID = 170 ( int24mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "21 23" _null_ _null_ _null_ _null_ _null_ int24mul _null_ _null_ _null_ )); -DATA(insert OID = 171 ( int42mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 21" _null_ _null_ _null_ _null_ _null_ int42mul _null_ _null_ _null_ )); -DATA(insert OID = 172 ( int24div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "21 23" _null_ _null_ _null_ _null_ _null_ int24div _null_ _null_ _null_ )); -DATA(insert OID = 173 ( int42div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 21" _null_ _null_ _null_ _null_ _null_ int42div _null_ _null_ _null_ )); -DATA(insert OID = 176 ( int2pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 21 "21 21" _null_ _null_ _null_ _null_ _null_ int2pl _null_ _null_ _null_ )); -DATA(insert OID = 177 ( int4pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4pl _null_ _null_ _null_ )); -DATA(insert OID = 178 ( int24pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "21 23" _null_ _null_ _null_ _null_ _null_ int24pl _null_ _null_ _null_ )); -DATA(insert OID = 179 ( int42pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 21" _null_ _null_ _null_ _null_ _null_ int42pl _null_ _null_ _null_ )); -DATA(insert OID = 180 ( int2mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 21 "21 21" _null_ _null_ _null_ _null_ _null_ int2mi _null_ _null_ _null_ )); -DATA(insert OID = 181 ( int4mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4mi _null_ _null_ _null_ )); -DATA(insert OID = 182 ( int24mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "21 23" _null_ _null_ _null_ _null_ _null_ int24mi _null_ _null_ _null_ )); -DATA(insert OID = 183 ( int42mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 21" _null_ _null_ _null_ _null_ _null_ int42mi _null_ _null_ _null_ )); -DATA(insert OID = 184 ( oideq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "26 26" _null_ _null_ _null_ _null_ _null_ oideq _null_ _null_ _null_ )); -DATA(insert OID = 185 ( oidne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "26 26" _null_ _null_ _null_ _null_ _null_ oidne _null_ _null_ _null_ )); -DATA(insert OID = 186 ( box_same PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_same _null_ _null_ _null_ )); -DATA(insert OID = 187 ( box_contain PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_contain _null_ _null_ _null_ )); -DATA(insert OID = 188 ( box_left PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_left _null_ _null_ _null_ )); -DATA(insert OID = 189 ( box_overleft PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_overleft _null_ _null_ _null_ )); -DATA(insert OID = 190 ( box_overright PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_overright _null_ _null_ _null_ )); -DATA(insert OID = 191 ( box_right PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_right _null_ _null_ _null_ )); -DATA(insert OID = 192 ( box_contained PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_contained _null_ _null_ _null_ )); -DATA(insert OID = 193 ( box_contain_pt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 600" _null_ _null_ _null_ _null_ _null_ box_contain_pt _null_ _null_ _null_ )); - -DATA(insert OID = 195 ( pg_node_tree_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 194 "2275" _null_ _null_ _null_ _null_ _null_ pg_node_tree_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 196 ( pg_node_tree_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "194" _null_ _null_ _null_ _null_ _null_ pg_node_tree_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 197 ( pg_node_tree_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 194 "2281" _null_ _null_ _null_ _null_ _null_ pg_node_tree_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 198 ( pg_node_tree_send PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "194" _null_ _null_ _null_ _null_ _null_ pg_node_tree_send _null_ _null_ _null_ )); -DESCR("I/O"); - -/* OIDS 200 - 299 */ - -DATA(insert OID = 200 ( float4in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "2275" _null_ _null_ _null_ _null_ _null_ float4in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 201 ( float4out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "700" _null_ _null_ _null_ _null_ _null_ float4out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 202 ( float4mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 700 "700 700" _null_ _null_ _null_ _null_ _null_ float4mul _null_ _null_ _null_ )); -DATA(insert OID = 203 ( float4div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 700 "700 700" _null_ _null_ _null_ _null_ _null_ float4div _null_ _null_ _null_ )); -DATA(insert OID = 204 ( float4pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 700 "700 700" _null_ _null_ _null_ _null_ _null_ float4pl _null_ _null_ _null_ )); -DATA(insert OID = 205 ( float4mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 700 "700 700" _null_ _null_ _null_ _null_ _null_ float4mi _null_ _null_ _null_ )); -DATA(insert OID = 206 ( float4um PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ float4um _null_ _null_ _null_ )); -DATA(insert OID = 207 ( float4abs PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ float4abs _null_ _null_ _null_ )); -DATA(insert OID = 208 ( float4_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1022 "1022 700" _null_ _null_ _null_ _null_ _null_ float4_accum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 209 ( float4larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 700 "700 700" _null_ _null_ _null_ _null_ _null_ float4larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 211 ( float4smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 700 "700 700" _null_ _null_ _null_ _null_ _null_ float4smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); - -DATA(insert OID = 212 ( int4um PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ int4um _null_ _null_ _null_ )); -DATA(insert OID = 213 ( int2um PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "21" _null_ _null_ _null_ _null_ _null_ int2um _null_ _null_ _null_ )); - -DATA(insert OID = 214 ( float8in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "2275" _null_ _null_ _null_ _null_ _null_ float8in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 215 ( float8out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "701" _null_ _null_ _null_ _null_ _null_ float8out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 216 ( float8mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ float8mul _null_ _null_ _null_ )); -DATA(insert OID = 217 ( float8div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ float8div _null_ _null_ _null_ )); -DATA(insert OID = 218 ( float8pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ float8pl _null_ _null_ _null_ )); -DATA(insert OID = 219 ( float8mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ float8mi _null_ _null_ _null_ )); -DATA(insert OID = 220 ( float8um PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ float8um _null_ _null_ _null_ )); -DATA(insert OID = 221 ( float8abs PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ float8abs _null_ _null_ _null_ )); -DATA(insert OID = 222 ( float8_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1022 "1022 701" _null_ _null_ _null_ _null_ _null_ float8_accum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 276 ( float8_combine PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1022 "1022 1022" _null_ _null_ _null_ _null_ _null_ float8_combine _null_ _null_ _null_ )); -DESCR("aggregate combine function"); -DATA(insert OID = 223 ( float8larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ float8larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 224 ( float8smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ float8smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); - -DATA(insert OID = 225 ( lseg_center PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "601" _null_ _null_ _null_ _null_ _null_ lseg_center _null_ _null_ _null_ )); -DATA(insert OID = 226 ( path_center PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "602" _null_ _null_ _null_ _null_ _null_ path_center _null_ _null_ _null_ )); -DATA(insert OID = 227 ( poly_center PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "604" _null_ _null_ _null_ _null_ _null_ poly_center _null_ _null_ _null_ )); - -DATA(insert OID = 228 ( dround PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dround _null_ _null_ _null_ )); -DESCR("round to nearest integer"); -DATA(insert OID = 229 ( dtrunc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dtrunc _null_ _null_ _null_ )); -DESCR("truncate to integer"); -DATA(insert OID = 2308 ( ceil PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dceil _null_ _null_ _null_ )); -DESCR("nearest integer >= value"); -DATA(insert OID = 2320 ( ceiling PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dceil _null_ _null_ _null_ )); -DESCR("nearest integer >= value"); -DATA(insert OID = 2309 ( floor PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dfloor _null_ _null_ _null_ )); -DESCR("nearest integer <= value"); -DATA(insert OID = 2310 ( sign PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dsign _null_ _null_ _null_ )); -DESCR("sign of value"); -DATA(insert OID = 230 ( dsqrt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dsqrt _null_ _null_ _null_ )); -DATA(insert OID = 231 ( dcbrt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dcbrt _null_ _null_ _null_ )); -DATA(insert OID = 232 ( dpow PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ dpow _null_ _null_ _null_ )); -DATA(insert OID = 233 ( dexp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dexp _null_ _null_ _null_ )); -DESCR("natural exponential (e^x)"); -DATA(insert OID = 234 ( dlog1 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dlog1 _null_ _null_ _null_ )); -DESCR("natural logarithm"); -DATA(insert OID = 235 ( float8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "21" _null_ _null_ _null_ _null_ _null_ i2tod _null_ _null_ _null_ )); -DESCR("convert int2 to float8"); -DATA(insert OID = 236 ( float4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "21" _null_ _null_ _null_ _null_ _null_ i2tof _null_ _null_ _null_ )); -DESCR("convert int2 to float4"); -DATA(insert OID = 237 ( int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "701" _null_ _null_ _null_ _null_ _null_ dtoi2 _null_ _null_ _null_ )); -DESCR("convert float8 to int2"); -DATA(insert OID = 238 ( int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "700" _null_ _null_ _null_ _null_ _null_ ftoi2 _null_ _null_ _null_ )); -DESCR("convert float4 to int2"); -DATA(insert OID = 239 ( line_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "628 628" _null_ _null_ _null_ _null_ _null_ line_distance _null_ _null_ _null_ )); - -DATA(insert OID = 240 ( abstimein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 702 "2275" _null_ _null_ _null_ _null_ _null_ abstimein _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 241 ( abstimeout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "702" _null_ _null_ _null_ _null_ _null_ abstimeout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 242 ( reltimein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 703 "2275" _null_ _null_ _null_ _null_ _null_ reltimein _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 243 ( reltimeout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "703" _null_ _null_ _null_ _null_ _null_ reltimeout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 244 ( timepl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 702 "702 703" _null_ _null_ _null_ _null_ _null_ timepl _null_ _null_ _null_ )); -DATA(insert OID = 245 ( timemi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 702 "702 703" _null_ _null_ _null_ _null_ _null_ timemi _null_ _null_ _null_ )); -DATA(insert OID = 246 ( tintervalin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 704 "2275" _null_ _null_ _null_ _null_ _null_ tintervalin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 247 ( tintervalout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "704" _null_ _null_ _null_ _null_ _null_ tintervalout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 248 ( intinterval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "702 704" _null_ _null_ _null_ _null_ _null_ intinterval _null_ _null_ _null_ )); -DATA(insert OID = 249 ( tintervalrel PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 703 "704" _null_ _null_ _null_ _null_ _null_ tintervalrel _null_ _null_ _null_ )); -DESCR("tinterval to reltime"); -DATA(insert OID = 250 ( timenow PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 702 "" _null_ _null_ _null_ _null_ _null_ timenow _null_ _null_ _null_ )); -DESCR("current date and time (abstime)"); -DATA(insert OID = 251 ( abstimeeq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "702 702" _null_ _null_ _null_ _null_ _null_ abstimeeq _null_ _null_ _null_ )); -DATA(insert OID = 252 ( abstimene PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "702 702" _null_ _null_ _null_ _null_ _null_ abstimene _null_ _null_ _null_ )); -DATA(insert OID = 253 ( abstimelt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "702 702" _null_ _null_ _null_ _null_ _null_ abstimelt _null_ _null_ _null_ )); -DATA(insert OID = 254 ( abstimegt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "702 702" _null_ _null_ _null_ _null_ _null_ abstimegt _null_ _null_ _null_ )); -DATA(insert OID = 255 ( abstimele PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "702 702" _null_ _null_ _null_ _null_ _null_ abstimele _null_ _null_ _null_ )); -DATA(insert OID = 256 ( abstimege PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "702 702" _null_ _null_ _null_ _null_ _null_ abstimege _null_ _null_ _null_ )); -DATA(insert OID = 257 ( reltimeeq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "703 703" _null_ _null_ _null_ _null_ _null_ reltimeeq _null_ _null_ _null_ )); -DATA(insert OID = 258 ( reltimene PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "703 703" _null_ _null_ _null_ _null_ _null_ reltimene _null_ _null_ _null_ )); -DATA(insert OID = 259 ( reltimelt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "703 703" _null_ _null_ _null_ _null_ _null_ reltimelt _null_ _null_ _null_ )); -DATA(insert OID = 260 ( reltimegt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "703 703" _null_ _null_ _null_ _null_ _null_ reltimegt _null_ _null_ _null_ )); -DATA(insert OID = 261 ( reltimele PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "703 703" _null_ _null_ _null_ _null_ _null_ reltimele _null_ _null_ _null_ )); -DATA(insert OID = 262 ( reltimege PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "703 703" _null_ _null_ _null_ _null_ _null_ reltimege _null_ _null_ _null_ )); -DATA(insert OID = 263 ( tintervalsame PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "704 704" _null_ _null_ _null_ _null_ _null_ tintervalsame _null_ _null_ _null_ )); -DATA(insert OID = 264 ( tintervalct PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "704 704" _null_ _null_ _null_ _null_ _null_ tintervalct _null_ _null_ _null_ )); -DATA(insert OID = 265 ( tintervalov PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "704 704" _null_ _null_ _null_ _null_ _null_ tintervalov _null_ _null_ _null_ )); -DATA(insert OID = 266 ( tintervalleneq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "704 703" _null_ _null_ _null_ _null_ _null_ tintervalleneq _null_ _null_ _null_ )); -DATA(insert OID = 267 ( tintervallenne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "704 703" _null_ _null_ _null_ _null_ _null_ tintervallenne _null_ _null_ _null_ )); -DATA(insert OID = 268 ( tintervallenlt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "704 703" _null_ _null_ _null_ _null_ _null_ tintervallenlt _null_ _null_ _null_ )); -DATA(insert OID = 269 ( tintervallengt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "704 703" _null_ _null_ _null_ _null_ _null_ tintervallengt _null_ _null_ _null_ )); -DATA(insert OID = 270 ( tintervallenle PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "704 703" _null_ _null_ _null_ _null_ _null_ tintervallenle _null_ _null_ _null_ )); -DATA(insert OID = 271 ( tintervallenge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "704 703" _null_ _null_ _null_ _null_ _null_ tintervallenge _null_ _null_ _null_ )); -DATA(insert OID = 272 ( tintervalstart PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 702 "704" _null_ _null_ _null_ _null_ _null_ tintervalstart _null_ _null_ _null_ )); -DATA(insert OID = 273 ( tintervalend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 702 "704" _null_ _null_ _null_ _null_ _null_ tintervalend _null_ _null_ _null_ )); -DESCR("end of interval"); -DATA(insert OID = 274 ( timeofday PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 25 "" _null_ _null_ _null_ _null_ _null_ timeofday _null_ _null_ _null_ )); -DESCR("current date and time - increments during transactions"); -DATA(insert OID = 275 ( isfinite PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "702" _null_ _null_ _null_ _null_ _null_ abstime_finite _null_ _null_ _null_ )); -DESCR("finite abstime?"); - -DATA(insert OID = 277 ( inter_sl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "601 628" _null_ _null_ _null_ _null_ _null_ inter_sl _null_ _null_ _null_ )); -DATA(insert OID = 278 ( inter_lb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "628 603" _null_ _null_ _null_ _null_ _null_ inter_lb _null_ _null_ _null_ )); - -DATA(insert OID = 279 ( float48mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "700 701" _null_ _null_ _null_ _null_ _null_ float48mul _null_ _null_ _null_ )); -DATA(insert OID = 280 ( float48div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "700 701" _null_ _null_ _null_ _null_ _null_ float48div _null_ _null_ _null_ )); -DATA(insert OID = 281 ( float48pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "700 701" _null_ _null_ _null_ _null_ _null_ float48pl _null_ _null_ _null_ )); -DATA(insert OID = 282 ( float48mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "700 701" _null_ _null_ _null_ _null_ _null_ float48mi _null_ _null_ _null_ )); -DATA(insert OID = 283 ( float84mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 700" _null_ _null_ _null_ _null_ _null_ float84mul _null_ _null_ _null_ )); -DATA(insert OID = 284 ( float84div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 700" _null_ _null_ _null_ _null_ _null_ float84div _null_ _null_ _null_ )); -DATA(insert OID = 285 ( float84pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 700" _null_ _null_ _null_ _null_ _null_ float84pl _null_ _null_ _null_ )); -DATA(insert OID = 286 ( float84mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 700" _null_ _null_ _null_ _null_ _null_ float84mi _null_ _null_ _null_ )); - -DATA(insert OID = 287 ( float4eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "700 700" _null_ _null_ _null_ _null_ _null_ float4eq _null_ _null_ _null_ )); -DATA(insert OID = 288 ( float4ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "700 700" _null_ _null_ _null_ _null_ _null_ float4ne _null_ _null_ _null_ )); -DATA(insert OID = 289 ( float4lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "700 700" _null_ _null_ _null_ _null_ _null_ float4lt _null_ _null_ _null_ )); -DATA(insert OID = 290 ( float4le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "700 700" _null_ _null_ _null_ _null_ _null_ float4le _null_ _null_ _null_ )); -DATA(insert OID = 291 ( float4gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "700 700" _null_ _null_ _null_ _null_ _null_ float4gt _null_ _null_ _null_ )); -DATA(insert OID = 292 ( float4ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "700 700" _null_ _null_ _null_ _null_ _null_ float4ge _null_ _null_ _null_ )); - -DATA(insert OID = 293 ( float8eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "701 701" _null_ _null_ _null_ _null_ _null_ float8eq _null_ _null_ _null_ )); -DATA(insert OID = 294 ( float8ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "701 701" _null_ _null_ _null_ _null_ _null_ float8ne _null_ _null_ _null_ )); -DATA(insert OID = 295 ( float8lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "701 701" _null_ _null_ _null_ _null_ _null_ float8lt _null_ _null_ _null_ )); -DATA(insert OID = 296 ( float8le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "701 701" _null_ _null_ _null_ _null_ _null_ float8le _null_ _null_ _null_ )); -DATA(insert OID = 297 ( float8gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "701 701" _null_ _null_ _null_ _null_ _null_ float8gt _null_ _null_ _null_ )); -DATA(insert OID = 298 ( float8ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "701 701" _null_ _null_ _null_ _null_ _null_ float8ge _null_ _null_ _null_ )); - -DATA(insert OID = 299 ( float48eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "700 701" _null_ _null_ _null_ _null_ _null_ float48eq _null_ _null_ _null_ )); - -/* OIDS 300 - 399 */ - -DATA(insert OID = 300 ( float48ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "700 701" _null_ _null_ _null_ _null_ _null_ float48ne _null_ _null_ _null_ )); -DATA(insert OID = 301 ( float48lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "700 701" _null_ _null_ _null_ _null_ _null_ float48lt _null_ _null_ _null_ )); -DATA(insert OID = 302 ( float48le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "700 701" _null_ _null_ _null_ _null_ _null_ float48le _null_ _null_ _null_ )); -DATA(insert OID = 303 ( float48gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "700 701" _null_ _null_ _null_ _null_ _null_ float48gt _null_ _null_ _null_ )); -DATA(insert OID = 304 ( float48ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "700 701" _null_ _null_ _null_ _null_ _null_ float48ge _null_ _null_ _null_ )); -DATA(insert OID = 305 ( float84eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "701 700" _null_ _null_ _null_ _null_ _null_ float84eq _null_ _null_ _null_ )); -DATA(insert OID = 306 ( float84ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "701 700" _null_ _null_ _null_ _null_ _null_ float84ne _null_ _null_ _null_ )); -DATA(insert OID = 307 ( float84lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "701 700" _null_ _null_ _null_ _null_ _null_ float84lt _null_ _null_ _null_ )); -DATA(insert OID = 308 ( float84le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "701 700" _null_ _null_ _null_ _null_ _null_ float84le _null_ _null_ _null_ )); -DATA(insert OID = 309 ( float84gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "701 700" _null_ _null_ _null_ _null_ _null_ float84gt _null_ _null_ _null_ )); -DATA(insert OID = 310 ( float84ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "701 700" _null_ _null_ _null_ _null_ _null_ float84ge _null_ _null_ _null_ )); -DATA(insert OID = 320 ( width_bucket PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 23 "701 701 701 23" _null_ _null_ _null_ _null_ _null_ width_bucket_float8 _null_ _null_ _null_ )); -DESCR("bucket number of operand in equal-width histogram"); - -DATA(insert OID = 311 ( float8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ ftod _null_ _null_ _null_ )); -DESCR("convert float4 to float8"); -DATA(insert OID = 312 ( float4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "701" _null_ _null_ _null_ _null_ _null_ dtof _null_ _null_ _null_ )); -DESCR("convert float8 to float4"); -DATA(insert OID = 313 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "21" _null_ _null_ _null_ _null_ _null_ i2toi4 _null_ _null_ _null_ )); -DESCR("convert int2 to int4"); -DATA(insert OID = 314 ( int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "23" _null_ _null_ _null_ _null_ _null_ i4toi2 _null_ _null_ _null_ )); -DESCR("convert int4 to int2"); -DATA(insert OID = 316 ( float8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "23" _null_ _null_ _null_ _null_ _null_ i4tod _null_ _null_ _null_ )); -DESCR("convert int4 to float8"); -DATA(insert OID = 317 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "701" _null_ _null_ _null_ _null_ _null_ dtoi4 _null_ _null_ _null_ )); -DESCR("convert float8 to int4"); -DATA(insert OID = 318 ( float4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "23" _null_ _null_ _null_ _null_ _null_ i4tof _null_ _null_ _null_ )); -DESCR("convert int4 to float4"); -DATA(insert OID = 319 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "700" _null_ _null_ _null_ _null_ _null_ ftoi4 _null_ _null_ _null_ )); -DESCR("convert float4 to int4"); - -/* Index access method handlers */ -DATA(insert OID = 330 ( bthandler PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 325 "2281" _null_ _null_ _null_ _null_ _null_ bthandler _null_ _null_ _null_ )); -DESCR("btree index access method handler"); -DATA(insert OID = 331 ( hashhandler PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 325 "2281" _null_ _null_ _null_ _null_ _null_ hashhandler _null_ _null_ _null_ )); -DESCR("hash index access method handler"); -DATA(insert OID = 332 ( gisthandler PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 325 "2281" _null_ _null_ _null_ _null_ _null_ gisthandler _null_ _null_ _null_ )); -DESCR("gist index access method handler"); -DATA(insert OID = 333 ( ginhandler PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 325 "2281" _null_ _null_ _null_ _null_ _null_ ginhandler _null_ _null_ _null_ )); -DESCR("gin index access method handler"); -DATA(insert OID = 334 ( spghandler PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 325 "2281" _null_ _null_ _null_ _null_ _null_ spghandler _null_ _null_ _null_ )); -DESCR("spgist index access method handler"); -DATA(insert OID = 335 ( brinhandler PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 325 "2281" _null_ _null_ _null_ _null_ _null_ brinhandler _null_ _null_ _null_ )); -DESCR("brin index access method handler"); -DATA(insert OID = 3952 ( brin_summarize_new_values PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 23 "2205" _null_ _null_ _null_ _null_ _null_ brin_summarize_new_values _null_ _null_ _null_ )); -DESCR("brin: standalone scan new table pages"); -DATA(insert OID = 3999 ( brin_summarize_range PGNSP PGUID 12 1 0 0 0 f f f f t f v s 2 0 23 "2205 20" _null_ _null_ _null_ _null_ _null_ brin_summarize_range _null_ _null_ _null_ )); -DESCR("brin: standalone scan new table pages"); -DATA(insert OID = 4014 ( brin_desummarize_range PGNSP PGUID 12 1 0 0 0 f f f f t f v s 2 0 2278 "2205 20" _null_ _null_ _null_ _null_ _null_ brin_desummarize_range _null_ _null_ _null_ )); -DESCR("brin: desummarize page range"); - -DATA(insert OID = 338 ( amvalidate PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ amvalidate _null_ _null_ _null_ )); -DESCR("validate an operator class"); - -DATA(insert OID = 636 ( pg_indexam_has_property PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ pg_indexam_has_property _null_ _null_ _null_ )); -DESCR("test property of an index access method"); -DATA(insert OID = 637 ( pg_index_has_property PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "2205 25" _null_ _null_ _null_ _null_ _null_ pg_index_has_property _null_ _null_ _null_ )); -DESCR("test property of an index"); -DATA(insert OID = 638 ( pg_index_column_has_property PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "2205 23 25" _null_ _null_ _null_ _null_ _null_ pg_index_column_has_property _null_ _null_ _null_ )); -DESCR("test property of an index column"); - -DATA(insert OID = 339 ( poly_same PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_same _null_ _null_ _null_ )); -DATA(insert OID = 340 ( poly_contain PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_contain _null_ _null_ _null_ )); -DATA(insert OID = 341 ( poly_left PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_left _null_ _null_ _null_ )); -DATA(insert OID = 342 ( poly_overleft PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_overleft _null_ _null_ _null_ )); -DATA(insert OID = 343 ( poly_overright PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_overright _null_ _null_ _null_ )); -DATA(insert OID = 344 ( poly_right PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_right _null_ _null_ _null_ )); -DATA(insert OID = 345 ( poly_contained PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_contained _null_ _null_ _null_ )); -DATA(insert OID = 346 ( poly_overlap PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_overlap _null_ _null_ _null_ )); -DATA(insert OID = 347 ( poly_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 604 "2275" _null_ _null_ _null_ _null_ _null_ poly_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 348 ( poly_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "604" _null_ _null_ _null_ _null_ _null_ poly_out _null_ _null_ _null_ )); -DESCR("I/O"); - -DATA(insert OID = 350 ( btint2cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "21 21" _null_ _null_ _null_ _null_ _null_ btint2cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3129 ( btint2sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ btint2sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); -DATA(insert OID = 351 ( btint4cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ btint4cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3130 ( btint4sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ btint4sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); -DATA(insert OID = 842 ( btint8cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "20 20" _null_ _null_ _null_ _null_ _null_ btint8cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3131 ( btint8sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ btint8sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); -DATA(insert OID = 354 ( btfloat4cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "700 700" _null_ _null_ _null_ _null_ _null_ btfloat4cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3132 ( btfloat4sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ btfloat4sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); -DATA(insert OID = 355 ( btfloat8cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "701 701" _null_ _null_ _null_ _null_ _null_ btfloat8cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3133 ( btfloat8sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ btfloat8sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); -DATA(insert OID = 356 ( btoidcmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "26 26" _null_ _null_ _null_ _null_ _null_ btoidcmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3134 ( btoidsortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ btoidsortsupport _null_ _null_ _null_ )); -DESCR("sort support"); -DATA(insert OID = 404 ( btoidvectorcmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "30 30" _null_ _null_ _null_ _null_ _null_ btoidvectorcmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 357 ( btabstimecmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "702 702" _null_ _null_ _null_ _null_ _null_ btabstimecmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 358 ( btcharcmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "18 18" _null_ _null_ _null_ _null_ _null_ btcharcmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 359 ( btnamecmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "19 19" _null_ _null_ _null_ _null_ _null_ btnamecmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3135 ( btnamesortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ btnamesortsupport _null_ _null_ _null_ )); -DESCR("sort support"); -DATA(insert OID = 360 ( bttextcmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "25 25" _null_ _null_ _null_ _null_ _null_ bttextcmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3255 ( bttextsortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ bttextsortsupport _null_ _null_ _null_ )); -DESCR("sort support"); -DATA(insert OID = 377 ( cash_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "790 790" _null_ _null_ _null_ _null_ _null_ cash_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 380 ( btreltimecmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "703 703" _null_ _null_ _null_ _null_ _null_ btreltimecmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 381 ( bttintervalcmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "704 704" _null_ _null_ _null_ _null_ _null_ bttintervalcmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 382 ( btarraycmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "2277 2277" _null_ _null_ _null_ _null_ _null_ btarraycmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - -DATA(insert OID = 361 ( lseg_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "601 601" _null_ _null_ _null_ _null_ _null_ lseg_distance _null_ _null_ _null_ )); -DATA(insert OID = 362 ( lseg_interpt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "601 601" _null_ _null_ _null_ _null_ _null_ lseg_interpt _null_ _null_ _null_ )); -DATA(insert OID = 363 ( dist_ps PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "600 601" _null_ _null_ _null_ _null_ _null_ dist_ps _null_ _null_ _null_ )); -DATA(insert OID = 364 ( dist_pb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "600 603" _null_ _null_ _null_ _null_ _null_ dist_pb _null_ _null_ _null_ )); -DATA(insert OID = 365 ( dist_sb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "601 603" _null_ _null_ _null_ _null_ _null_ dist_sb _null_ _null_ _null_ )); -DATA(insert OID = 366 ( close_ps PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "600 601" _null_ _null_ _null_ _null_ _null_ close_ps _null_ _null_ _null_ )); -DATA(insert OID = 367 ( close_pb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "600 603" _null_ _null_ _null_ _null_ _null_ close_pb _null_ _null_ _null_ )); -DATA(insert OID = 368 ( close_sb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "601 603" _null_ _null_ _null_ _null_ _null_ close_sb _null_ _null_ _null_ )); -DATA(insert OID = 369 ( on_ps PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 601" _null_ _null_ _null_ _null_ _null_ on_ps _null_ _null_ _null_ )); -DATA(insert OID = 370 ( path_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "602 602" _null_ _null_ _null_ _null_ _null_ path_distance _null_ _null_ _null_ )); -DATA(insert OID = 371 ( dist_ppath PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "600 602" _null_ _null_ _null_ _null_ _null_ dist_ppath _null_ _null_ _null_ )); -DATA(insert OID = 372 ( on_sb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "601 603" _null_ _null_ _null_ _null_ _null_ on_sb _null_ _null_ _null_ )); -DATA(insert OID = 373 ( inter_sb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "601 603" _null_ _null_ _null_ _null_ _null_ inter_sb _null_ _null_ _null_ )); - -/* OIDS 400 - 499 */ - -DATA(insert OID = 401 ( text PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "1042" _null_ _null_ _null_ _null_ _null_ rtrim1 _null_ _null_ _null_ )); -DESCR("convert char(n) to text"); -DATA(insert OID = 406 ( text PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "19" _null_ _null_ _null_ _null_ _null_ name_text _null_ _null_ _null_ )); -DESCR("convert name to text"); -DATA(insert OID = 407 ( name PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 19 "25" _null_ _null_ _null_ _null_ _null_ text_name _null_ _null_ _null_ )); -DESCR("convert text to name"); -DATA(insert OID = 408 ( bpchar PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1042 "19" _null_ _null_ _null_ _null_ _null_ name_bpchar _null_ _null_ _null_ )); -DESCR("convert name to char(n)"); -DATA(insert OID = 409 ( name PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 19 "1042" _null_ _null_ _null_ _null_ _null_ bpchar_name _null_ _null_ _null_ )); -DESCR("convert char(n) to name"); - -DATA(insert OID = 449 ( hashint2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "21" _null_ _null_ _null_ _null_ _null_ hashint2 _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 450 ( hashint4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ hashint4 _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 949 ( hashint8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "20" _null_ _null_ _null_ _null_ _null_ hashint8 _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 451 ( hashfloat4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "700" _null_ _null_ _null_ _null_ _null_ hashfloat4 _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 452 ( hashfloat8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "701" _null_ _null_ _null_ _null_ _null_ hashfloat8 _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 453 ( hashoid PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "26" _null_ _null_ _null_ _null_ _null_ hashoid _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 454 ( hashchar PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "18" _null_ _null_ _null_ _null_ _null_ hashchar _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 455 ( hashname PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "19" _null_ _null_ _null_ _null_ _null_ hashname _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 400 ( hashtext PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "25" _null_ _null_ _null_ _null_ _null_ hashtext _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 456 ( hashvarlena PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "2281" _null_ _null_ _null_ _null_ _null_ hashvarlena _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 457 ( hashoidvector PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "30" _null_ _null_ _null_ _null_ _null_ hashoidvector _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 329 ( hash_aclitem PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1033" _null_ _null_ _null_ _null_ _null_ hash_aclitem _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 399 ( hashmacaddr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "829" _null_ _null_ _null_ _null_ _null_ hashmacaddr _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 422 ( hashinet PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "869" _null_ _null_ _null_ _null_ _null_ hashinet _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 432 ( hash_numeric PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1700" _null_ _null_ _null_ _null_ _null_ hash_numeric _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 328 ( hashmacaddr8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "774" _null_ _null_ _null_ _null_ _null_ hashmacaddr8 _null_ _null_ _null_ )); -DESCR("hash"); - -DATA(insert OID = 438 ( num_nulls PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 1 0 23 "2276" "{2276}" "{v}" _null_ _null_ _null_ pg_num_nulls _null_ _null_ _null_ )); -DESCR("count the number of NULL arguments"); -DATA(insert OID = 440 ( num_nonnulls PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 1 0 23 "2276" "{2276}" "{v}" _null_ _null_ _null_ pg_num_nonnulls _null_ _null_ _null_ )); -DESCR("count the number of non-NULL arguments"); - -DATA(insert OID = 458 ( text_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ text_larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 459 ( text_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ text_smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); - -DATA(insert OID = 460 ( int8in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "2275" _null_ _null_ _null_ _null_ _null_ int8in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 461 ( int8out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "20" _null_ _null_ _null_ _null_ _null_ int8out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 462 ( int8um PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "20" _null_ _null_ _null_ _null_ _null_ int8um _null_ _null_ _null_ )); -DATA(insert OID = 463 ( int8pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8pl _null_ _null_ _null_ )); -DATA(insert OID = 464 ( int8mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8mi _null_ _null_ _null_ )); -DATA(insert OID = 465 ( int8mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8mul _null_ _null_ _null_ )); -DATA(insert OID = 466 ( int8div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8div _null_ _null_ _null_ )); -DATA(insert OID = 467 ( int8eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 20" _null_ _null_ _null_ _null_ _null_ int8eq _null_ _null_ _null_ )); -DATA(insert OID = 468 ( int8ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 20" _null_ _null_ _null_ _null_ _null_ int8ne _null_ _null_ _null_ )); -DATA(insert OID = 469 ( int8lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 20" _null_ _null_ _null_ _null_ _null_ int8lt _null_ _null_ _null_ )); -DATA(insert OID = 470 ( int8gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 20" _null_ _null_ _null_ _null_ _null_ int8gt _null_ _null_ _null_ )); -DATA(insert OID = 471 ( int8le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 20" _null_ _null_ _null_ _null_ _null_ int8le _null_ _null_ _null_ )); -DATA(insert OID = 472 ( int8ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 20" _null_ _null_ _null_ _null_ _null_ int8ge _null_ _null_ _null_ )); - -DATA(insert OID = 474 ( int84eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 23" _null_ _null_ _null_ _null_ _null_ int84eq _null_ _null_ _null_ )); -DATA(insert OID = 475 ( int84ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 23" _null_ _null_ _null_ _null_ _null_ int84ne _null_ _null_ _null_ )); -DATA(insert OID = 476 ( int84lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 23" _null_ _null_ _null_ _null_ _null_ int84lt _null_ _null_ _null_ )); -DATA(insert OID = 477 ( int84gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 23" _null_ _null_ _null_ _null_ _null_ int84gt _null_ _null_ _null_ )); -DATA(insert OID = 478 ( int84le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 23" _null_ _null_ _null_ _null_ _null_ int84le _null_ _null_ _null_ )); -DATA(insert OID = 479 ( int84ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 23" _null_ _null_ _null_ _null_ _null_ int84ge _null_ _null_ _null_ )); - -DATA(insert OID = 480 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "20" _null_ _null_ _null_ _null_ _null_ int84 _null_ _null_ _null_ )); -DESCR("convert int8 to int4"); -DATA(insert OID = 481 ( int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "23" _null_ _null_ _null_ _null_ _null_ int48 _null_ _null_ _null_ )); -DESCR("convert int4 to int8"); -DATA(insert OID = 482 ( float8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "20" _null_ _null_ _null_ _null_ _null_ i8tod _null_ _null_ _null_ )); -DESCR("convert int8 to float8"); -DATA(insert OID = 483 ( int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "701" _null_ _null_ _null_ _null_ _null_ dtoi8 _null_ _null_ _null_ )); -DESCR("convert float8 to int8"); - -/* OIDS 500 - 599 */ - -/* OIDS 600 - 699 */ - -DATA(insert OID = 626 ( hash_array PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "2277" _null_ _null_ _null_ _null_ _null_ hash_array _null_ _null_ _null_ )); -DESCR("hash"); - -DATA(insert OID = 652 ( float4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "20" _null_ _null_ _null_ _null_ _null_ i8tof _null_ _null_ _null_ )); -DESCR("convert int8 to float4"); -DATA(insert OID = 653 ( int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "700" _null_ _null_ _null_ _null_ _null_ ftoi8 _null_ _null_ _null_ )); -DESCR("convert float4 to int8"); - -DATA(insert OID = 714 ( int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "20" _null_ _null_ _null_ _null_ _null_ int82 _null_ _null_ _null_ )); -DESCR("convert int8 to int2"); -DATA(insert OID = 754 ( int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "21" _null_ _null_ _null_ _null_ _null_ int28 _null_ _null_ _null_ )); -DESCR("convert int2 to int8"); - -DATA(insert OID = 655 ( namelt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "19 19" _null_ _null_ _null_ _null_ _null_ namelt _null_ _null_ _null_ )); -DATA(insert OID = 656 ( namele PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "19 19" _null_ _null_ _null_ _null_ _null_ namele _null_ _null_ _null_ )); -DATA(insert OID = 657 ( namegt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "19 19" _null_ _null_ _null_ _null_ _null_ namegt _null_ _null_ _null_ )); -DATA(insert OID = 658 ( namege PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "19 19" _null_ _null_ _null_ _null_ _null_ namege _null_ _null_ _null_ )); -DATA(insert OID = 659 ( namene PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "19 19" _null_ _null_ _null_ _null_ _null_ namene _null_ _null_ _null_ )); - -DATA(insert OID = 668 ( bpchar PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1042 "1042 23 16" _null_ _null_ _null_ _null_ _null_ bpchar _null_ _null_ _null_ )); -DESCR("adjust char() to typmod length"); -DATA(insert OID = 3097 ( varchar_transform PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ varchar_transform _null_ _null_ _null_ )); -DESCR("transform a varchar length coercion"); -DATA(insert OID = 669 ( varchar PGNSP PGUID 12 1 0 0 varchar_transform f f f f t f i s 3 0 1043 "1043 23 16" _null_ _null_ _null_ _null_ _null_ varchar _null_ _null_ _null_ )); -DESCR("adjust varchar() to typmod length"); - -DATA(insert OID = 676 ( mktinterval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 704 "702 702" _null_ _null_ _null_ _null_ _null_ mktinterval _null_ _null_ _null_ )); - -DATA(insert OID = 619 ( oidvectorne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "30 30" _null_ _null_ _null_ _null_ _null_ oidvectorne _null_ _null_ _null_ )); -DATA(insert OID = 677 ( oidvectorlt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "30 30" _null_ _null_ _null_ _null_ _null_ oidvectorlt _null_ _null_ _null_ )); -DATA(insert OID = 678 ( oidvectorle PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "30 30" _null_ _null_ _null_ _null_ _null_ oidvectorle _null_ _null_ _null_ )); -DATA(insert OID = 679 ( oidvectoreq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "30 30" _null_ _null_ _null_ _null_ _null_ oidvectoreq _null_ _null_ _null_ )); -DATA(insert OID = 680 ( oidvectorge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "30 30" _null_ _null_ _null_ _null_ _null_ oidvectorge _null_ _null_ _null_ )); -DATA(insert OID = 681 ( oidvectorgt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "30 30" _null_ _null_ _null_ _null_ _null_ oidvectorgt _null_ _null_ _null_ )); - -/* OIDS 700 - 799 */ -DATA(insert OID = 710 ( getpgusername PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 19 "" _null_ _null_ _null_ _null_ _null_ current_user _null_ _null_ _null_ )); -DESCR("deprecated, use current_user instead"); -DATA(insert OID = 716 ( oidlt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "26 26" _null_ _null_ _null_ _null_ _null_ oidlt _null_ _null_ _null_ )); -DATA(insert OID = 717 ( oidle PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "26 26" _null_ _null_ _null_ _null_ _null_ oidle _null_ _null_ _null_ )); - -DATA(insert OID = 720 ( octet_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "17" _null_ _null_ _null_ _null_ _null_ byteaoctetlen _null_ _null_ _null_ )); -DESCR("octet length"); -DATA(insert OID = 721 ( get_byte PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "17 23" _null_ _null_ _null_ _null_ _null_ byteaGetByte _null_ _null_ _null_ )); -DESCR("get byte"); -DATA(insert OID = 722 ( set_byte PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 17 "17 23 23" _null_ _null_ _null_ _null_ _null_ byteaSetByte _null_ _null_ _null_ )); -DESCR("set byte"); -DATA(insert OID = 723 ( get_bit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "17 23" _null_ _null_ _null_ _null_ _null_ byteaGetBit _null_ _null_ _null_ )); -DESCR("get bit"); -DATA(insert OID = 724 ( set_bit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 17 "17 23 23" _null_ _null_ _null_ _null_ _null_ byteaSetBit _null_ _null_ _null_ )); -DESCR("set bit"); -DATA(insert OID = 749 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 17 "17 17 23 23" _null_ _null_ _null_ _null_ _null_ byteaoverlay _null_ _null_ _null_ )); -DESCR("substitute portion of string"); -DATA(insert OID = 752 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 17 "17 17 23" _null_ _null_ _null_ _null_ _null_ byteaoverlay_no_len _null_ _null_ _null_ )); -DESCR("substitute portion of string"); - -DATA(insert OID = 725 ( dist_pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "600 628" _null_ _null_ _null_ _null_ _null_ dist_pl _null_ _null_ _null_ )); -DATA(insert OID = 726 ( dist_lb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "628 603" _null_ _null_ _null_ _null_ _null_ dist_lb _null_ _null_ _null_ )); -DATA(insert OID = 727 ( dist_sl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "601 628" _null_ _null_ _null_ _null_ _null_ dist_sl _null_ _null_ _null_ )); -DATA(insert OID = 728 ( dist_cpoly PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "718 604" _null_ _null_ _null_ _null_ _null_ dist_cpoly _null_ _null_ _null_ )); -DATA(insert OID = 729 ( poly_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "604 604" _null_ _null_ _null_ _null_ _null_ poly_distance _null_ _null_ _null_ )); -DATA(insert OID = 3275 ( dist_ppoly PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "600 604" _null_ _null_ _null_ _null_ _null_ dist_ppoly _null_ _null_ _null_ )); -DATA(insert OID = 3292 ( dist_polyp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "604 600" _null_ _null_ _null_ _null_ _null_ dist_polyp _null_ _null_ _null_ )); -DATA(insert OID = 3290 ( dist_cpoint PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "718 600" _null_ _null_ _null_ _null_ _null_ dist_cpoint _null_ _null_ _null_ )); - -DATA(insert OID = 740 ( text_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_lt _null_ _null_ _null_ )); -DATA(insert OID = 741 ( text_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_le _null_ _null_ _null_ )); -DATA(insert OID = 742 ( text_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_gt _null_ _null_ _null_ )); -DATA(insert OID = 743 ( text_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_ge _null_ _null_ _null_ )); - -DATA(insert OID = 745 ( current_user PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 19 "" _null_ _null_ _null_ _null_ _null_ current_user _null_ _null_ _null_ )); -DESCR("current user name"); -DATA(insert OID = 746 ( session_user PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 19 "" _null_ _null_ _null_ _null_ _null_ session_user _null_ _null_ _null_ )); -DESCR("session user name"); - -DATA(insert OID = 744 ( array_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2277 2277" _null_ _null_ _null_ _null_ _null_ array_eq _null_ _null_ _null_ )); -DATA(insert OID = 390 ( array_ne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2277 2277" _null_ _null_ _null_ _null_ _null_ array_ne _null_ _null_ _null_ )); -DATA(insert OID = 391 ( array_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2277 2277" _null_ _null_ _null_ _null_ _null_ array_lt _null_ _null_ _null_ )); -DATA(insert OID = 392 ( array_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2277 2277" _null_ _null_ _null_ _null_ _null_ array_gt _null_ _null_ _null_ )); -DATA(insert OID = 393 ( array_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2277 2277" _null_ _null_ _null_ _null_ _null_ array_le _null_ _null_ _null_ )); -DATA(insert OID = 396 ( array_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2277 2277" _null_ _null_ _null_ _null_ _null_ array_ge _null_ _null_ _null_ )); -DATA(insert OID = 747 ( array_dims PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "2277" _null_ _null_ _null_ _null_ _null_ array_dims _null_ _null_ _null_ )); -DESCR("array dimensions"); -DATA(insert OID = 748 ( array_ndims PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "2277" _null_ _null_ _null_ _null_ _null_ array_ndims _null_ _null_ _null_ )); -DESCR("number of array dimensions"); -DATA(insert OID = 750 ( array_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 2277 "2275 26 23" _null_ _null_ _null_ _null_ _null_ array_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 751 ( array_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "2277" _null_ _null_ _null_ _null_ _null_ array_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2091 ( array_lower PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "2277 23" _null_ _null_ _null_ _null_ _null_ array_lower _null_ _null_ _null_ )); -DESCR("array lower dimension"); -DATA(insert OID = 2092 ( array_upper PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "2277 23" _null_ _null_ _null_ _null_ _null_ array_upper _null_ _null_ _null_ )); -DESCR("array upper dimension"); -DATA(insert OID = 2176 ( array_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "2277 23" _null_ _null_ _null_ _null_ _null_ array_length _null_ _null_ _null_ )); -DESCR("array length"); -DATA(insert OID = 3179 ( cardinality PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "2277" _null_ _null_ _null_ _null_ _null_ array_cardinality _null_ _null_ _null_ )); -DESCR("array cardinality"); -DATA(insert OID = 378 ( array_append PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2277 "2277 2283" _null_ _null_ _null_ _null_ _null_ array_append _null_ _null_ _null_ )); -DESCR("append element onto end of array"); -DATA(insert OID = 379 ( array_prepend PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2277 "2283 2277" _null_ _null_ _null_ _null_ _null_ array_prepend _null_ _null_ _null_ )); -DESCR("prepend element onto front of array"); -DATA(insert OID = 383 ( array_cat PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2277 "2277 2277" _null_ _null_ _null_ _null_ _null_ array_cat _null_ _null_ _null_ )); -DATA(insert OID = 394 ( string_to_array PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 1009 "25 25" _null_ _null_ _null_ _null_ _null_ text_to_array _null_ _null_ _null_ )); -DESCR("split delimited text into text[]"); -DATA(insert OID = 395 ( array_to_string PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "2277 25" _null_ _null_ _null_ _null_ _null_ array_to_text _null_ _null_ _null_ )); -DESCR("concatenate array elements, using delimiter, into text"); -DATA(insert OID = 376 ( string_to_array PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 1009 "25 25 25" _null_ _null_ _null_ _null_ _null_ text_to_array_null _null_ _null_ _null_ )); -DESCR("split delimited text into text[], with null string"); -DATA(insert OID = 384 ( array_to_string PGNSP PGUID 12 1 0 0 0 f f f f f f s s 3 0 25 "2277 25 25" _null_ _null_ _null_ _null_ _null_ array_to_text_null _null_ _null_ _null_ )); -DESCR("concatenate array elements, using delimiter and null string, into text"); -DATA(insert OID = 515 ( array_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2277 "2277 2277" _null_ _null_ _null_ _null_ _null_ array_larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 516 ( array_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2277 "2277 2277" _null_ _null_ _null_ _null_ _null_ array_smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); -DATA(insert OID = 3277 ( array_position PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 23 "2277 2283" _null_ _null_ _null_ _null_ _null_ array_position _null_ _null_ _null_ )); -DESCR("returns an offset of value in array"); -DATA(insert OID = 3278 ( array_position PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 23 "2277 2283 23" _null_ _null_ _null_ _null_ _null_ array_position_start _null_ _null_ _null_ )); -DESCR("returns an offset of value in array with start index"); -DATA(insert OID = 3279 ( array_positions PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 1007 "2277 2283" _null_ _null_ _null_ _null_ _null_ array_positions _null_ _null_ _null_ )); -DESCR("returns an array of offsets of some value in array"); -DATA(insert OID = 1191 ( generate_subscripts PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 3 0 23 "2277 23 16" _null_ _null_ _null_ _null_ _null_ generate_subscripts _null_ _null_ _null_ )); -DESCR("array subscripts generator"); -DATA(insert OID = 1192 ( generate_subscripts PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 2 0 23 "2277 23" _null_ _null_ _null_ _null_ _null_ generate_subscripts_nodir _null_ _null_ _null_ )); -DESCR("array subscripts generator"); -DATA(insert OID = 1193 ( array_fill PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2277 "2283 1007" _null_ _null_ _null_ _null_ _null_ array_fill _null_ _null_ _null_ )); -DESCR("array constructor with value"); -DATA(insert OID = 1286 ( array_fill PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 2277 "2283 1007 1007" _null_ _null_ _null_ _null_ _null_ array_fill_with_lower_bounds _null_ _null_ _null_ )); -DESCR("array constructor with value"); -DATA(insert OID = 2331 ( unnest PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 2283 "2277" _null_ _null_ _null_ _null_ _null_ array_unnest _null_ _null_ _null_ )); -DESCR("expand array to set of rows"); -DATA(insert OID = 3167 ( array_remove PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2277 "2277 2283" _null_ _null_ _null_ _null_ _null_ array_remove _null_ _null_ _null_ )); -DESCR("remove any occurrences of an element from an array"); -DATA(insert OID = 3168 ( array_replace PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 2277 "2277 2283 2283" _null_ _null_ _null_ _null_ _null_ array_replace _null_ _null_ _null_ )); -DESCR("replace any occurrences of an element in an array"); -DATA(insert OID = 2333 ( array_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2776" _null_ _null_ _null_ _null_ _null_ array_agg_transfn _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 2334 ( array_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2277 "2281 2776" _null_ _null_ _null_ _null_ _null_ array_agg_finalfn _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2335 ( array_agg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 2277 "2776" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("concatenate aggregate input into an array"); -DATA(insert OID = 4051 ( array_agg_array_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2277" _null_ _null_ _null_ _null_ _null_ array_agg_array_transfn _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 4052 ( array_agg_array_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2277 "2281 2277" _null_ _null_ _null_ _null_ _null_ array_agg_array_finalfn _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 4053 ( array_agg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 2277 "2277" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("concatenate aggregate input into an array"); -DATA(insert OID = 3218 ( width_bucket PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "2283 2277" _null_ _null_ _null_ _null_ _null_ width_bucket_array _null_ _null_ _null_ )); -DESCR("bucket number of operand given a sorted array of bucket lower bounds"); -DATA(insert OID = 3816 ( array_typanalyze PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 16 "2281" _null_ _null_ _null_ _null_ _null_ array_typanalyze _null_ _null_ _null_ )); -DESCR("array typanalyze"); -DATA(insert OID = 3817 ( arraycontsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ arraycontsel _null_ _null_ _null_ )); -DESCR("restriction selectivity for array-containment operators"); -DATA(insert OID = 3818 ( arraycontjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ arraycontjoinsel _null_ _null_ _null_ )); -DESCR("join selectivity for array-containment operators"); - -DATA(insert OID = 760 ( smgrin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 210 "2275" _null_ _null_ _null_ _null_ _null_ smgrin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 761 ( smgrout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "210" _null_ _null_ _null_ _null_ _null_ smgrout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 762 ( smgreq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "210 210" _null_ _null_ _null_ _null_ _null_ smgreq _null_ _null_ _null_ )); -DESCR("storage manager"); -DATA(insert OID = 763 ( smgrne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "210 210" _null_ _null_ _null_ _null_ _null_ smgrne _null_ _null_ _null_ )); -DESCR("storage manager"); - -DATA(insert OID = 764 ( lo_import PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 26 "25" _null_ _null_ _null_ _null_ _null_ be_lo_import _null_ _null_ _null_ )); -DESCR("large object import"); -DATA(insert OID = 767 ( lo_import PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 26 "25 26" _null_ _null_ _null_ _null_ _null_ be_lo_import_with_oid _null_ _null_ _null_ )); -DESCR("large object import"); -DATA(insert OID = 765 ( lo_export PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 23 "26 25" _null_ _null_ _null_ _null_ _null_ be_lo_export _null_ _null_ _null_ )); -DESCR("large object export"); - -DATA(insert OID = 766 ( int4inc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ int4inc _null_ _null_ _null_ )); -DESCR("increment"); -DATA(insert OID = 768 ( int4larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 769 ( int4smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); -DATA(insert OID = 770 ( int2larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 21 "21 21" _null_ _null_ _null_ _null_ _null_ int2larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 771 ( int2smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 21 "21 21" _null_ _null_ _null_ _null_ _null_ int2smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); - -DATA(insert OID = 784 ( tintervaleq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "704 704" _null_ _null_ _null_ _null_ _null_ tintervaleq _null_ _null_ _null_ )); -DATA(insert OID = 785 ( tintervalne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "704 704" _null_ _null_ _null_ _null_ _null_ tintervalne _null_ _null_ _null_ )); -DATA(insert OID = 786 ( tintervallt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "704 704" _null_ _null_ _null_ _null_ _null_ tintervallt _null_ _null_ _null_ )); -DATA(insert OID = 787 ( tintervalgt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "704 704" _null_ _null_ _null_ _null_ _null_ tintervalgt _null_ _null_ _null_ )); -DATA(insert OID = 788 ( tintervalle PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "704 704" _null_ _null_ _null_ _null_ _null_ tintervalle _null_ _null_ _null_ )); -DATA(insert OID = 789 ( tintervalge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "704 704" _null_ _null_ _null_ _null_ _null_ tintervalge _null_ _null_ _null_ )); - -/* OIDS 800 - 899 */ - -DATA(insert OID = 846 ( cash_mul_flt4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 700" _null_ _null_ _null_ _null_ _null_ cash_mul_flt4 _null_ _null_ _null_ )); -DATA(insert OID = 847 ( cash_div_flt4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 700" _null_ _null_ _null_ _null_ _null_ cash_div_flt4 _null_ _null_ _null_ )); -DATA(insert OID = 848 ( flt4_mul_cash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "700 790" _null_ _null_ _null_ _null_ _null_ flt4_mul_cash _null_ _null_ _null_ )); - -DATA(insert OID = 849 ( position PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "25 25" _null_ _null_ _null_ _null_ _null_ textpos _null_ _null_ _null_ )); -DESCR("position of substring"); -DATA(insert OID = 850 ( textlike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ textlike _null_ _null_ _null_ )); -DATA(insert OID = 851 ( textnlike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ textnlike _null_ _null_ _null_ )); - -DATA(insert OID = 852 ( int48eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 20" _null_ _null_ _null_ _null_ _null_ int48eq _null_ _null_ _null_ )); -DATA(insert OID = 853 ( int48ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 20" _null_ _null_ _null_ _null_ _null_ int48ne _null_ _null_ _null_ )); -DATA(insert OID = 854 ( int48lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 20" _null_ _null_ _null_ _null_ _null_ int48lt _null_ _null_ _null_ )); -DATA(insert OID = 855 ( int48gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 20" _null_ _null_ _null_ _null_ _null_ int48gt _null_ _null_ _null_ )); -DATA(insert OID = 856 ( int48le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 20" _null_ _null_ _null_ _null_ _null_ int48le _null_ _null_ _null_ )); -DATA(insert OID = 857 ( int48ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 20" _null_ _null_ _null_ _null_ _null_ int48ge _null_ _null_ _null_ )); - -DATA(insert OID = 858 ( namelike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ namelike _null_ _null_ _null_ )); -DATA(insert OID = 859 ( namenlike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ namenlike _null_ _null_ _null_ )); - -DATA(insert OID = 860 ( bpchar PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1042 "18" _null_ _null_ _null_ _null_ _null_ char_bpchar _null_ _null_ _null_ )); -DESCR("convert char to char(n)"); - -DATA(insert OID = 861 ( current_database PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 19 "" _null_ _null_ _null_ _null_ _null_ current_database _null_ _null_ _null_ )); -DESCR("name of the current database"); -DATA(insert OID = 817 ( current_query PGNSP PGUID 12 1 0 0 0 f f f f f f v r 0 0 25 "" _null_ _null_ _null_ _null_ _null_ current_query _null_ _null_ _null_ )); -DESCR("get the currently executing query"); - -DATA(insert OID = 3399 ( int8_mul_cash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "20 790" _null_ _null_ _null_ _null_ _null_ int8_mul_cash _null_ _null_ _null_ )); -DATA(insert OID = 862 ( int4_mul_cash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "23 790" _null_ _null_ _null_ _null_ _null_ int4_mul_cash _null_ _null_ _null_ )); -DATA(insert OID = 863 ( int2_mul_cash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "21 790" _null_ _null_ _null_ _null_ _null_ int2_mul_cash _null_ _null_ _null_ )); -DATA(insert OID = 3344 ( cash_mul_int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 20" _null_ _null_ _null_ _null_ _null_ cash_mul_int8 _null_ _null_ _null_ )); -DATA(insert OID = 3345 ( cash_div_int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 20" _null_ _null_ _null_ _null_ _null_ cash_div_int8 _null_ _null_ _null_ )); -DATA(insert OID = 864 ( cash_mul_int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 23" _null_ _null_ _null_ _null_ _null_ cash_mul_int4 _null_ _null_ _null_ )); -DATA(insert OID = 865 ( cash_div_int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 23" _null_ _null_ _null_ _null_ _null_ cash_div_int4 _null_ _null_ _null_ )); -DATA(insert OID = 866 ( cash_mul_int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 21" _null_ _null_ _null_ _null_ _null_ cash_mul_int2 _null_ _null_ _null_ )); -DATA(insert OID = 867 ( cash_div_int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 21" _null_ _null_ _null_ _null_ _null_ cash_div_int2 _null_ _null_ _null_ )); - -DATA(insert OID = 886 ( cash_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "2275" _null_ _null_ _null_ _null_ _null_ cash_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 887 ( cash_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "790" _null_ _null_ _null_ _null_ _null_ cash_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 888 ( cash_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_eq _null_ _null_ _null_ )); -DATA(insert OID = 889 ( cash_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_ne _null_ _null_ _null_ )); -DATA(insert OID = 890 ( cash_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_lt _null_ _null_ _null_ )); -DATA(insert OID = 891 ( cash_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_le _null_ _null_ _null_ )); -DATA(insert OID = 892 ( cash_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_gt _null_ _null_ _null_ )); -DATA(insert OID = 893 ( cash_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_ge _null_ _null_ _null_ )); -DATA(insert OID = 894 ( cash_pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 790" _null_ _null_ _null_ _null_ _null_ cash_pl _null_ _null_ _null_ )); -DATA(insert OID = 895 ( cash_mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 790" _null_ _null_ _null_ _null_ _null_ cash_mi _null_ _null_ _null_ )); -DATA(insert OID = 896 ( cash_mul_flt8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 701" _null_ _null_ _null_ _null_ _null_ cash_mul_flt8 _null_ _null_ _null_ )); -DATA(insert OID = 897 ( cash_div_flt8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 701" _null_ _null_ _null_ _null_ _null_ cash_div_flt8 _null_ _null_ _null_ )); -DATA(insert OID = 898 ( cashlarger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 790" _null_ _null_ _null_ _null_ _null_ cashlarger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 899 ( cashsmaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 790" _null_ _null_ _null_ _null_ _null_ cashsmaller _null_ _null_ _null_ )); -DESCR("smaller of two"); -DATA(insert OID = 919 ( flt8_mul_cash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "701 790" _null_ _null_ _null_ _null_ _null_ flt8_mul_cash _null_ _null_ _null_ )); -DATA(insert OID = 935 ( cash_words PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "790" _null_ _null_ _null_ _null_ _null_ cash_words _null_ _null_ _null_ )); -DESCR("output money amount as words"); -DATA(insert OID = 3822 ( cash_div_cash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "790 790" _null_ _null_ _null_ _null_ _null_ cash_div_cash _null_ _null_ _null_ )); -DATA(insert OID = 3823 ( numeric PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1700 "790" _null_ _null_ _null_ _null_ _null_ cash_numeric _null_ _null_ _null_ )); -DESCR("convert money to numeric"); -DATA(insert OID = 3824 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "1700" _null_ _null_ _null_ _null_ _null_ numeric_cash _null_ _null_ _null_ )); -DESCR("convert numeric to money"); -DATA(insert OID = 3811 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "23" _null_ _null_ _null_ _null_ _null_ int4_cash _null_ _null_ _null_ )); -DESCR("convert int4 to money"); -DATA(insert OID = 3812 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "20" _null_ _null_ _null_ _null_ _null_ int8_cash _null_ _null_ _null_ )); -DESCR("convert int8 to money"); - -/* OIDS 900 - 999 */ - -DATA(insert OID = 940 ( mod PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 21 "21 21" _null_ _null_ _null_ _null_ _null_ int2mod _null_ _null_ _null_ )); -DESCR("modulus"); -DATA(insert OID = 941 ( mod PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4mod _null_ _null_ _null_ )); -DESCR("modulus"); - -DATA(insert OID = 945 ( int8mod PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8mod _null_ _null_ _null_ )); -DATA(insert OID = 947 ( mod PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8mod _null_ _null_ _null_ )); -DESCR("modulus"); - -DATA(insert OID = 944 ( char PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 18 "25" _null_ _null_ _null_ _null_ _null_ text_char _null_ _null_ _null_ )); -DESCR("convert text to char"); -DATA(insert OID = 946 ( text PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "18" _null_ _null_ _null_ _null_ _null_ char_text _null_ _null_ _null_ )); -DESCR("convert char to text"); - -DATA(insert OID = 952 ( lo_open PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 23 "26 23" _null_ _null_ _null_ _null_ _null_ be_lo_open _null_ _null_ _null_ )); -DESCR("large object open"); -DATA(insert OID = 953 ( lo_close PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ be_lo_close _null_ _null_ _null_ )); -DESCR("large object close"); -DATA(insert OID = 954 ( loread PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 17 "23 23" _null_ _null_ _null_ _null_ _null_ be_loread _null_ _null_ _null_ )); -DESCR("large object read"); -DATA(insert OID = 955 ( lowrite PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 23 "23 17" _null_ _null_ _null_ _null_ _null_ be_lowrite _null_ _null_ _null_ )); -DESCR("large object write"); -DATA(insert OID = 956 ( lo_lseek PGNSP PGUID 12 1 0 0 0 f f f f t f v u 3 0 23 "23 23 23" _null_ _null_ _null_ _null_ _null_ be_lo_lseek _null_ _null_ _null_ )); -DESCR("large object seek"); -DATA(insert OID = 3170 ( lo_lseek64 PGNSP PGUID 12 1 0 0 0 f f f f t f v u 3 0 20 "23 20 23" _null_ _null_ _null_ _null_ _null_ be_lo_lseek64 _null_ _null_ _null_ )); -DESCR("large object seek (64 bit)"); -DATA(insert OID = 957 ( lo_creat PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 26 "23" _null_ _null_ _null_ _null_ _null_ be_lo_creat _null_ _null_ _null_ )); -DESCR("large object create"); -DATA(insert OID = 715 ( lo_create PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 26 "26" _null_ _null_ _null_ _null_ _null_ be_lo_create _null_ _null_ _null_ )); -DESCR("large object create"); -DATA(insert OID = 958 ( lo_tell PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ be_lo_tell _null_ _null_ _null_ )); -DESCR("large object position"); -DATA(insert OID = 3171 ( lo_tell64 PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "23" _null_ _null_ _null_ _null_ _null_ be_lo_tell64 _null_ _null_ _null_ )); -DESCR("large object position (64 bit)"); -DATA(insert OID = 1004 ( lo_truncate PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ be_lo_truncate _null_ _null_ _null_ )); -DESCR("truncate large object"); -DATA(insert OID = 3172 ( lo_truncate64 PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 23 "23 20" _null_ _null_ _null_ _null_ _null_ be_lo_truncate64 _null_ _null_ _null_ )); -DESCR("truncate large object (64 bit)"); - -DATA(insert OID = 3457 ( lo_from_bytea PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 26 "26 17" _null_ _null_ _null_ _null_ _null_ be_lo_from_bytea _null_ _null_ _null_ )); -DESCR("create new large object with given content"); -DATA(insert OID = 3458 ( lo_get PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 17 "26" _null_ _null_ _null_ _null_ _null_ be_lo_get _null_ _null_ _null_ )); -DESCR("read entire large object"); -DATA(insert OID = 3459 ( lo_get PGNSP PGUID 12 1 0 0 0 f f f f t f v u 3 0 17 "26 20 23" _null_ _null_ _null_ _null_ _null_ be_lo_get_fragment _null_ _null_ _null_ )); -DESCR("read large object from offset for length"); -DATA(insert OID = 3460 ( lo_put PGNSP PGUID 12 1 0 0 0 f f f f t f v u 3 0 2278 "26 20 17" _null_ _null_ _null_ _null_ _null_ be_lo_put _null_ _null_ _null_ )); -DESCR("write data at offset"); - -DATA(insert OID = 959 ( on_pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 628" _null_ _null_ _null_ _null_ _null_ on_pl _null_ _null_ _null_ )); -DATA(insert OID = 960 ( on_sl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "601 628" _null_ _null_ _null_ _null_ _null_ on_sl _null_ _null_ _null_ )); -DATA(insert OID = 961 ( close_pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "600 628" _null_ _null_ _null_ _null_ _null_ close_pl _null_ _null_ _null_ )); -DATA(insert OID = 962 ( close_sl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "601 628" _null_ _null_ _null_ _null_ _null_ close_sl _null_ _null_ _null_ )); -DATA(insert OID = 963 ( close_lb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "628 603" _null_ _null_ _null_ _null_ _null_ close_lb _null_ _null_ _null_ )); - -DATA(insert OID = 964 ( lo_unlink PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 23 "26" _null_ _null_ _null_ _null_ _null_ be_lo_unlink _null_ _null_ _null_ )); -DESCR("large object unlink (delete)"); + /* procedure name */ + NameData proname; -DATA(insert OID = 973 ( path_inter PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "602 602" _null_ _null_ _null_ _null_ _null_ path_inter _null_ _null_ _null_ )); -DATA(insert OID = 975 ( area PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "603" _null_ _null_ _null_ _null_ _null_ box_area _null_ _null_ _null_ )); -DESCR("box area"); -DATA(insert OID = 976 ( width PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "603" _null_ _null_ _null_ _null_ _null_ box_width _null_ _null_ _null_ )); -DESCR("box width"); -DATA(insert OID = 977 ( height PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "603" _null_ _null_ _null_ _null_ _null_ box_height _null_ _null_ _null_ )); -DESCR("box height"); -DATA(insert OID = 978 ( box_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "603 603" _null_ _null_ _null_ _null_ _null_ box_distance _null_ _null_ _null_ )); -DATA(insert OID = 979 ( area PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "602" _null_ _null_ _null_ _null_ _null_ path_area _null_ _null_ _null_ )); -DESCR("area of a closed path"); -DATA(insert OID = 980 ( box_intersect PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 603 "603 603" _null_ _null_ _null_ _null_ _null_ box_intersect _null_ _null_ _null_ )); -DATA(insert OID = 4067 ( bound_box PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 603 "603 603" _null_ _null_ _null_ _null_ _null_ boxes_bound_box _null_ _null_ _null_ )); -DESCR("bounding box of two boxes"); -DATA(insert OID = 981 ( diagonal PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 601 "603" _null_ _null_ _null_ _null_ _null_ box_diagonal _null_ _null_ _null_ )); -DESCR("box diagonal"); -DATA(insert OID = 982 ( path_n_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "602 602" _null_ _null_ _null_ _null_ _null_ path_n_lt _null_ _null_ _null_ )); -DATA(insert OID = 983 ( path_n_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "602 602" _null_ _null_ _null_ _null_ _null_ path_n_gt _null_ _null_ _null_ )); -DATA(insert OID = 984 ( path_n_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "602 602" _null_ _null_ _null_ _null_ _null_ path_n_eq _null_ _null_ _null_ )); -DATA(insert OID = 985 ( path_n_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "602 602" _null_ _null_ _null_ _null_ _null_ path_n_le _null_ _null_ _null_ )); -DATA(insert OID = 986 ( path_n_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "602 602" _null_ _null_ _null_ _null_ _null_ path_n_ge _null_ _null_ _null_ )); -DATA(insert OID = 987 ( path_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "602" _null_ _null_ _null_ _null_ _null_ path_length _null_ _null_ _null_ )); -DATA(insert OID = 988 ( point_ne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 600" _null_ _null_ _null_ _null_ _null_ point_ne _null_ _null_ _null_ )); -DATA(insert OID = 989 ( point_vert PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 600" _null_ _null_ _null_ _null_ _null_ point_vert _null_ _null_ _null_ )); -DATA(insert OID = 990 ( point_horiz PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 600" _null_ _null_ _null_ _null_ _null_ point_horiz _null_ _null_ _null_ )); -DATA(insert OID = 991 ( point_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "600 600" _null_ _null_ _null_ _null_ _null_ point_distance _null_ _null_ _null_ )); -DATA(insert OID = 992 ( slope PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "600 600" _null_ _null_ _null_ _null_ _null_ point_slope _null_ _null_ _null_ )); -DESCR("slope between points"); -DATA(insert OID = 993 ( lseg PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 601 "600 600" _null_ _null_ _null_ _null_ _null_ lseg_construct _null_ _null_ _null_ )); -DESCR("convert points to line segment"); -DATA(insert OID = 994 ( lseg_intersect PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "601 601" _null_ _null_ _null_ _null_ _null_ lseg_intersect _null_ _null_ _null_ )); -DATA(insert OID = 995 ( lseg_parallel PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "601 601" _null_ _null_ _null_ _null_ _null_ lseg_parallel _null_ _null_ _null_ )); -DATA(insert OID = 996 ( lseg_perp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "601 601" _null_ _null_ _null_ _null_ _null_ lseg_perp _null_ _null_ _null_ )); -DATA(insert OID = 997 ( lseg_vertical PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "601" _null_ _null_ _null_ _null_ _null_ lseg_vertical _null_ _null_ _null_ )); -DATA(insert OID = 998 ( lseg_horizontal PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "601" _null_ _null_ _null_ _null_ _null_ lseg_horizontal _null_ _null_ _null_ )); -DATA(insert OID = 999 ( lseg_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "601 601" _null_ _null_ _null_ _null_ _null_ lseg_eq _null_ _null_ _null_ )); + /* OID of namespace containing this proc */ + Oid pronamespace BKI_DEFAULT(PGNSP); -/* OIDS 1000 - 1999 */ + /* procedure owner */ + Oid proowner BKI_DEFAULT(PGUID); -DATA(insert OID = 3994 ( timestamp_izone_transform PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ timestamp_izone_transform _null_ _null_ _null_ )); -DESCR("transform a time zone adjustment"); -DATA(insert OID = 1026 ( timezone PGNSP PGUID 12 1 0 0 timestamp_izone_transform f f f f t f i s 2 0 1114 "1186 1184" _null_ _null_ _null_ _null_ _null_ timestamptz_izone _null_ _null_ _null_ )); -DESCR("adjust timestamp to new time zone"); + /* OID of pg_language entry */ + Oid prolang BKI_DEFAULT(12); -DATA(insert OID = 1031 ( aclitemin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1033 "2275" _null_ _null_ _null_ _null_ _null_ aclitemin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1032 ( aclitemout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "1033" _null_ _null_ _null_ _null_ _null_ aclitemout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1035 ( aclinsert PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1034 "1034 1033" _null_ _null_ _null_ _null_ _null_ aclinsert _null_ _null_ _null_ )); -DESCR("add/update ACL item"); -DATA(insert OID = 1036 ( aclremove PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1034 "1034 1033" _null_ _null_ _null_ _null_ _null_ aclremove _null_ _null_ _null_ )); -DESCR("remove ACL item"); -DATA(insert OID = 1037 ( aclcontains PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1034 1033" _null_ _null_ _null_ _null_ _null_ aclcontains _null_ _null_ _null_ )); -DESCR("contains"); -DATA(insert OID = 1062 ( aclitemeq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1033 1033" _null_ _null_ _null_ _null_ _null_ aclitem_eq _null_ _null_ _null_ )); -DATA(insert OID = 1365 ( makeaclitem PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 1033 "26 26 25 16" _null_ _null_ _null_ _null_ _null_ makeaclitem _null_ _null_ _null_ )); -DESCR("make ACL item"); -DATA(insert OID = 3943 ( acldefault PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1034 "18 26" _null_ _null_ _null_ _null_ _null_ acldefault_sql _null_ _null_ _null_ )); -DESCR("TODO"); -DATA(insert OID = 1689 ( aclexplode PGNSP PGUID 12 1 10 0 0 f f f f t t s s 1 0 2249 "1034" "{1034,26,26,25,16}" "{i,o,o,o,o}" "{acl,grantor,grantee,privilege_type,is_grantable}" _null_ _null_ aclexplode _null_ _null_ _null_ )); -DESCR("convert ACL item array to table, for use by information schema"); -DATA(insert OID = 1044 ( bpcharin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1042 "2275 26 23" _null_ _null_ _null_ _null_ _null_ bpcharin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1045 ( bpcharout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1042" _null_ _null_ _null_ _null_ _null_ bpcharout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2913 ( bpchartypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ bpchartypmodin _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 2914 ( bpchartypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ bpchartypmodout _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 1046 ( varcharin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1043 "2275 26 23" _null_ _null_ _null_ _null_ _null_ varcharin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1047 ( varcharout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1043" _null_ _null_ _null_ _null_ _null_ varcharout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2915 ( varchartypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ varchartypmodin _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 2916 ( varchartypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ varchartypmodout _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 1048 ( bpchareq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1042 1042" _null_ _null_ _null_ _null_ _null_ bpchareq _null_ _null_ _null_ )); -DATA(insert OID = 1049 ( bpcharlt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 1042" _null_ _null_ _null_ _null_ _null_ bpcharlt _null_ _null_ _null_ )); -DATA(insert OID = 1050 ( bpcharle PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 1042" _null_ _null_ _null_ _null_ _null_ bpcharle _null_ _null_ _null_ )); -DATA(insert OID = 1051 ( bpchargt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 1042" _null_ _null_ _null_ _null_ _null_ bpchargt _null_ _null_ _null_ )); -DATA(insert OID = 1052 ( bpcharge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 1042" _null_ _null_ _null_ _null_ _null_ bpcharge _null_ _null_ _null_ )); -DATA(insert OID = 1053 ( bpcharne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1042 1042" _null_ _null_ _null_ _null_ _null_ bpcharne _null_ _null_ _null_ )); -DATA(insert OID = 1063 ( bpchar_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1042 "1042 1042" _null_ _null_ _null_ _null_ _null_ bpchar_larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 1064 ( bpchar_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1042 "1042 1042" _null_ _null_ _null_ _null_ _null_ bpchar_smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); -DATA(insert OID = 1078 ( bpcharcmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1042 1042" _null_ _null_ _null_ _null_ _null_ bpcharcmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3328 ( bpchar_sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ bpchar_sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); -DATA(insert OID = 1080 ( hashbpchar PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1042" _null_ _null_ _null_ _null_ _null_ hashbpchar _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 1081 ( format_type PGNSP PGUID 12 1 0 0 0 f f f f f f s s 2 0 25 "26 23" _null_ _null_ _null_ _null_ _null_ format_type _null_ _null_ _null_ )); -DESCR("format a type oid and atttypmod to canonical SQL"); -DATA(insert OID = 1084 ( date_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1082 "2275" _null_ _null_ _null_ _null_ _null_ date_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1085 ( date_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "1082" _null_ _null_ _null_ _null_ _null_ date_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1086 ( date_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1082 1082" _null_ _null_ _null_ _null_ _null_ date_eq _null_ _null_ _null_ )); -DATA(insert OID = 1087 ( date_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1082 1082" _null_ _null_ _null_ _null_ _null_ date_lt _null_ _null_ _null_ )); -DATA(insert OID = 1088 ( date_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1082 1082" _null_ _null_ _null_ _null_ _null_ date_le _null_ _null_ _null_ )); -DATA(insert OID = 1089 ( date_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1082 1082" _null_ _null_ _null_ _null_ _null_ date_gt _null_ _null_ _null_ )); -DATA(insert OID = 1090 ( date_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1082 1082" _null_ _null_ _null_ _null_ _null_ date_ge _null_ _null_ _null_ )); -DATA(insert OID = 1091 ( date_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1082 1082" _null_ _null_ _null_ _null_ _null_ date_ne _null_ _null_ _null_ )); -DATA(insert OID = 1092 ( date_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1082 1082" _null_ _null_ _null_ _null_ _null_ date_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3136 ( date_sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ date_sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); + /* estimated execution cost */ + float4 procost BKI_DEFAULT(1); -/* OIDS 1100 - 1199 */ + /* estimated # of rows out (if proretset) */ + float4 prorows BKI_DEFAULT(0); -DATA(insert OID = 1102 ( time_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_lt _null_ _null_ _null_ )); -DATA(insert OID = 1103 ( time_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_le _null_ _null_ _null_ )); -DATA(insert OID = 1104 ( time_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_gt _null_ _null_ _null_ )); -DATA(insert OID = 1105 ( time_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_ge _null_ _null_ _null_ )); -DATA(insert OID = 1106 ( time_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_ne _null_ _null_ _null_ )); -DATA(insert OID = 1107 ( time_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 1138 ( date_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1082 "1082 1082" _null_ _null_ _null_ _null_ _null_ date_larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 1139 ( date_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1082 "1082 1082" _null_ _null_ _null_ _null_ _null_ date_smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); -DATA(insert OID = 1140 ( date_mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1082 1082" _null_ _null_ _null_ _null_ _null_ date_mi _null_ _null_ _null_ )); -DATA(insert OID = 1141 ( date_pli PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1082 "1082 23" _null_ _null_ _null_ _null_ _null_ date_pli _null_ _null_ _null_ )); -DATA(insert OID = 1142 ( date_mii PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1082 "1082 23" _null_ _null_ _null_ _null_ _null_ date_mii _null_ _null_ _null_ )); -DATA(insert OID = 1143 ( time_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 1083 "2275 26 23" _null_ _null_ _null_ _null_ _null_ time_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1144 ( time_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1083" _null_ _null_ _null_ _null_ _null_ time_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2909 ( timetypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timetypmodin _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 2910 ( timetypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timetypmodout _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 1145 ( time_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_eq _null_ _null_ _null_ )); + /* element type of variadic array, or 0 */ + Oid provariadic BKI_DEFAULT(0) BKI_LOOKUP(pg_type); -DATA(insert OID = 1146 ( circle_add_pt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 718 "718 600" _null_ _null_ _null_ _null_ _null_ circle_add_pt _null_ _null_ _null_ )); -DATA(insert OID = 1147 ( circle_sub_pt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 718 "718 600" _null_ _null_ _null_ _null_ _null_ circle_sub_pt _null_ _null_ _null_ )); -DATA(insert OID = 1148 ( circle_mul_pt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 718 "718 600" _null_ _null_ _null_ _null_ _null_ circle_mul_pt _null_ _null_ _null_ )); -DATA(insert OID = 1149 ( circle_div_pt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 718 "718 600" _null_ _null_ _null_ _null_ _null_ circle_div_pt _null_ _null_ _null_ )); + /* transforms calls to it during planning */ + regproc protransform BKI_DEFAULT(0) BKI_LOOKUP(pg_proc); -DATA(insert OID = 1150 ( timestamptz_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 1184 "2275 26 23" _null_ _null_ _null_ _null_ _null_ timestamptz_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1151 ( timestamptz_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "1184" _null_ _null_ _null_ _null_ _null_ timestamptz_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2907 ( timestamptztypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timestamptztypmodin _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 2908 ( timestamptztypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timestamptztypmodout _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 1152 ( timestamptz_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_eq _null_ _null_ _null_ )); -DATA(insert OID = 1153 ( timestamptz_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_ne _null_ _null_ _null_ )); -DATA(insert OID = 1154 ( timestamptz_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_lt _null_ _null_ _null_ )); -DATA(insert OID = 1155 ( timestamptz_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_le _null_ _null_ _null_ )); -DATA(insert OID = 1156 ( timestamptz_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_ge _null_ _null_ _null_ )); -DATA(insert OID = 1157 ( timestamptz_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_gt _null_ _null_ _null_ )); -DATA(insert OID = 1158 ( to_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1184 "701" _null_ _null_ _null_ _null_ _null_ float8_timestamptz _null_ _null_ _null_ )); -DESCR("convert UNIX epoch to timestamptz"); -DATA(insert OID = 3995 ( timestamp_zone_transform PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ timestamp_zone_transform _null_ _null_ _null_ )); -DESCR("transform a time zone adjustment"); -DATA(insert OID = 1159 ( timezone PGNSP PGUID 12 1 0 0 timestamp_zone_transform f f f f t f i s 2 0 1114 "25 1184" _null_ _null_ _null_ _null_ _null_ timestamptz_zone _null_ _null_ _null_ )); -DESCR("adjust timestamp to new time zone"); + /* see PROKIND_ categories below */ + char prokind BKI_DEFAULT(f); -DATA(insert OID = 1160 ( interval_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 1186 "2275 26 23" _null_ _null_ _null_ _null_ _null_ interval_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1161 ( interval_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1186" _null_ _null_ _null_ _null_ _null_ interval_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2903 ( intervaltypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ intervaltypmodin _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 2904 ( intervaltypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ intervaltypmodout _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 1162 ( interval_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_eq _null_ _null_ _null_ )); -DATA(insert OID = 1163 ( interval_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_ne _null_ _null_ _null_ )); -DATA(insert OID = 1164 ( interval_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_lt _null_ _null_ _null_ )); -DATA(insert OID = 1165 ( interval_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_le _null_ _null_ _null_ )); -DATA(insert OID = 1166 ( interval_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_ge _null_ _null_ _null_ )); -DATA(insert OID = 1167 ( interval_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_gt _null_ _null_ _null_ )); -DATA(insert OID = 1168 ( interval_um PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1186 "1186" _null_ _null_ _null_ _null_ _null_ interval_um _null_ _null_ _null_ )); -DATA(insert OID = 1169 ( interval_pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_pl _null_ _null_ _null_ )); -DATA(insert OID = 1170 ( interval_mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_mi _null_ _null_ _null_ )); -DATA(insert OID = 1171 ( date_part PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 701 "25 1184" _null_ _null_ _null_ _null_ _null_ timestamptz_part _null_ _null_ _null_ )); -DESCR("extract field from timestamp with time zone"); -DATA(insert OID = 1172 ( date_part PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "25 1186" _null_ _null_ _null_ _null_ _null_ interval_part _null_ _null_ _null_ )); -DESCR("extract field from interval"); -DATA(insert OID = 1173 ( timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1184 "702" _null_ _null_ _null_ _null_ _null_ abstime_timestamptz _null_ _null_ _null_ )); -DESCR("convert abstime to timestamp with time zone"); -DATA(insert OID = 1174 ( timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1184 "1082" _null_ _null_ _null_ _null_ _null_ date_timestamptz _null_ _null_ _null_ )); -DESCR("convert date to timestamp with time zone"); -DATA(insert OID = 2711 ( justify_interval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1186 "1186" _null_ _null_ _null_ _null_ _null_ interval_justify_interval _null_ _null_ _null_ )); -DESCR("promote groups of 24 hours to numbers of days and promote groups of 30 days to numbers of months"); -DATA(insert OID = 1175 ( justify_hours PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1186 "1186" _null_ _null_ _null_ _null_ _null_ interval_justify_hours _null_ _null_ _null_ )); -DESCR("promote groups of 24 hours to numbers of days"); -DATA(insert OID = 1295 ( justify_days PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1186 "1186" _null_ _null_ _null_ _null_ _null_ interval_justify_days _null_ _null_ _null_ )); -DESCR("promote groups of 30 days to numbers of months"); -DATA(insert OID = 1176 ( timestamptz PGNSP PGUID 14 1 0 0 0 f f f f t f s s 2 0 1184 "1082 1083" _null_ _null_ _null_ _null_ _null_ "select cast(($1 + $2) as timestamp with time zone)" _null_ _null_ _null_ )); -DESCR("convert date and time to timestamp with time zone"); -DATA(insert OID = 1177 ( interval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1186 "703" _null_ _null_ _null_ _null_ _null_ reltime_interval _null_ _null_ _null_ )); -DESCR("convert reltime to interval"); -DATA(insert OID = 1178 ( date PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1082 "1184" _null_ _null_ _null_ _null_ _null_ timestamptz_date _null_ _null_ _null_ )); -DESCR("convert timestamp with time zone to date"); -DATA(insert OID = 1179 ( date PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1082 "702" _null_ _null_ _null_ _null_ _null_ abstime_date _null_ _null_ _null_ )); -DESCR("convert abstime to date"); -DATA(insert OID = 1180 ( abstime PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 702 "1184" _null_ _null_ _null_ _null_ _null_ timestamptz_abstime _null_ _null_ _null_ )); -DESCR("convert timestamp with time zone to abstime"); -DATA(insert OID = 1181 ( age PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 23 "28" _null_ _null_ _null_ _null_ _null_ xid_age _null_ _null_ _null_ )); -DESCR("age of a transaction ID, in transactions before current transaction"); -DATA(insert OID = 3939 ( mxid_age PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 23 "28" _null_ _null_ _null_ _null_ _null_ mxid_age _null_ _null_ _null_ )); -DESCR("age of a multi-transaction ID, in multi-transactions before current multi-transaction"); + /* security definer */ + bool prosecdef BKI_DEFAULT(f); -DATA(insert OID = 1188 ( timestamptz_mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_mi _null_ _null_ _null_ )); -DATA(insert OID = 1189 ( timestamptz_pl_interval PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 1184 "1184 1186" _null_ _null_ _null_ _null_ _null_ timestamptz_pl_interval _null_ _null_ _null_ )); -DATA(insert OID = 1190 ( timestamptz_mi_interval PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 1184 "1184 1186" _null_ _null_ _null_ _null_ _null_ timestamptz_mi_interval _null_ _null_ _null_ )); -DATA(insert OID = 1194 ( reltime PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 703 "1186" _null_ _null_ _null_ _null_ _null_ interval_reltime _null_ _null_ _null_ )); -DESCR("convert interval to reltime"); -DATA(insert OID = 1195 ( timestamptz_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1184 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); -DATA(insert OID = 1196 ( timestamptz_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1184 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 1197 ( interval_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); -DATA(insert OID = 1198 ( interval_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 1199 ( age PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamptz_age _null_ _null_ _null_ )); -DESCR("date difference preserving months and years"); + /* is it a leak-proof function? */ + bool proleakproof BKI_DEFAULT(f); -/* OIDS 1200 - 1299 */ + /* strict with respect to NULLs? */ + bool proisstrict BKI_DEFAULT(t); -DATA(insert OID = 3918 ( interval_transform PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ interval_transform _null_ _null_ _null_ )); -DESCR("transform an interval length coercion"); -DATA(insert OID = 1200 ( interval PGNSP PGUID 12 1 0 0 interval_transform f f f f t f i s 2 0 1186 "1186 23" _null_ _null_ _null_ _null_ _null_ interval_scale _null_ _null_ _null_ )); -DESCR("adjust interval precision"); + /* returns a set? */ + bool proretset BKI_DEFAULT(f); -DATA(insert OID = 1215 ( obj_description PGNSP PGUID 14 100 0 0 0 f f f f t f s s 2 0 25 "26 19" _null_ _null_ _null_ _null_ _null_ "select description from pg_catalog.pg_description where objoid = $1 and classoid = (select oid from pg_catalog.pg_class where relname = $2 and relnamespace = PGNSP) and objsubid = 0" _null_ _null_ _null_ )); -DESCR("get description for object id and catalog name"); -DATA(insert OID = 1216 ( col_description PGNSP PGUID 14 100 0 0 0 f f f f t f s s 2 0 25 "26 23" _null_ _null_ _null_ _null_ _null_ "select description from pg_catalog.pg_description where objoid = $1 and classoid = ''pg_catalog.pg_class''::pg_catalog.regclass and objsubid = $2" _null_ _null_ _null_ )); -DESCR("get description for table column"); -DATA(insert OID = 1993 ( shobj_description PGNSP PGUID 14 100 0 0 0 f f f f t f s s 2 0 25 "26 19" _null_ _null_ _null_ _null_ _null_ "select description from pg_catalog.pg_shdescription where objoid = $1 and classoid = (select oid from pg_catalog.pg_class where relname = $2 and relnamespace = PGNSP)" _null_ _null_ _null_ )); -DESCR("get description for object id and shared catalog name"); + /* see PROVOLATILE_ categories below */ + char provolatile BKI_DEFAULT(i); -DATA(insert OID = 1217 ( date_trunc PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 1184 "25 1184" _null_ _null_ _null_ _null_ _null_ timestamptz_trunc _null_ _null_ _null_ )); -DESCR("truncate timestamp with time zone to specified units"); -DATA(insert OID = 1218 ( date_trunc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "25 1186" _null_ _null_ _null_ _null_ _null_ interval_trunc _null_ _null_ _null_ )); -DESCR("truncate interval to specified units"); + /* see PROPARALLEL_ categories below */ + char proparallel BKI_DEFAULT(s); -DATA(insert OID = 1219 ( int8inc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "20" _null_ _null_ _null_ _null_ _null_ int8inc _null_ _null_ _null_ )); -DESCR("increment"); -DATA(insert OID = 3546 ( int8dec PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "20" _null_ _null_ _null_ _null_ _null_ int8dec _null_ _null_ _null_ )); -DESCR("decrement"); -DATA(insert OID = 2804 ( int8inc_any PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 2276" _null_ _null_ _null_ _null_ _null_ int8inc_any _null_ _null_ _null_ )); -DESCR("increment, ignores second argument"); -DATA(insert OID = 3547 ( int8dec_any PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 2276" _null_ _null_ _null_ _null_ _null_ int8dec_any _null_ _null_ _null_ )); -DESCR("decrement, ignores second argument"); -DATA(insert OID = 1230 ( int8abs PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "20" _null_ _null_ _null_ _null_ _null_ int8abs _null_ _null_ _null_ )); + /* number of arguments */ + /* Note: need not be given in pg_proc.dat; genbki.pl will compute it */ + int16 pronargs; -DATA(insert OID = 1236 ( int8larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 1237 ( int8smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); + /* number of arguments with defaults */ + int16 pronargdefaults BKI_DEFAULT(0); -DATA(insert OID = 1238 ( texticregexeq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ texticregexeq _null_ _null_ _null_ )); -DATA(insert OID = 1239 ( texticregexne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ texticregexne _null_ _null_ _null_ )); -DATA(insert OID = 1240 ( nameicregexeq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ nameicregexeq _null_ _null_ _null_ )); -DATA(insert OID = 1241 ( nameicregexne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ nameicregexne _null_ _null_ _null_ )); + /* OID of result type */ + Oid prorettype BKI_LOOKUP(pg_type); -DATA(insert OID = 1251 ( int4abs PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ int4abs _null_ _null_ _null_ )); -DATA(insert OID = 1253 ( int2abs PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "21" _null_ _null_ _null_ _null_ _null_ int2abs _null_ _null_ _null_ )); - -DATA(insert OID = 1271 ( overlaps PGNSP PGUID 12 1 0 0 0 f f f f f f i s 4 0 16 "1266 1266 1266 1266" _null_ _null_ _null_ _null_ _null_ overlaps_timetz _null_ _null_ _null_ )); -DESCR("intervals overlap?"); -DATA(insert OID = 1272 ( datetime_pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1114 "1082 1083" _null_ _null_ _null_ _null_ _null_ datetime_timestamp _null_ _null_ _null_ )); -DATA(insert OID = 1273 ( date_part PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "25 1266" _null_ _null_ _null_ _null_ _null_ timetz_part _null_ _null_ _null_ )); -DESCR("extract field from time with time zone"); -DATA(insert OID = 1274 ( int84pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 23" _null_ _null_ _null_ _null_ _null_ int84pl _null_ _null_ _null_ )); -DATA(insert OID = 1275 ( int84mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 23" _null_ _null_ _null_ _null_ _null_ int84mi _null_ _null_ _null_ )); -DATA(insert OID = 1276 ( int84mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 23" _null_ _null_ _null_ _null_ _null_ int84mul _null_ _null_ _null_ )); -DATA(insert OID = 1277 ( int84div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 23" _null_ _null_ _null_ _null_ _null_ int84div _null_ _null_ _null_ )); -DATA(insert OID = 1278 ( int48pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "23 20" _null_ _null_ _null_ _null_ _null_ int48pl _null_ _null_ _null_ )); -DATA(insert OID = 1279 ( int48mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "23 20" _null_ _null_ _null_ _null_ _null_ int48mi _null_ _null_ _null_ )); -DATA(insert OID = 1280 ( int48mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "23 20" _null_ _null_ _null_ _null_ _null_ int48mul _null_ _null_ _null_ )); -DATA(insert OID = 1281 ( int48div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "23 20" _null_ _null_ _null_ _null_ _null_ int48div _null_ _null_ _null_ )); - -DATA(insert OID = 837 ( int82pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 21" _null_ _null_ _null_ _null_ _null_ int82pl _null_ _null_ _null_ )); -DATA(insert OID = 838 ( int82mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 21" _null_ _null_ _null_ _null_ _null_ int82mi _null_ _null_ _null_ )); -DATA(insert OID = 839 ( int82mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 21" _null_ _null_ _null_ _null_ _null_ int82mul _null_ _null_ _null_ )); -DATA(insert OID = 840 ( int82div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 21" _null_ _null_ _null_ _null_ _null_ int82div _null_ _null_ _null_ )); -DATA(insert OID = 841 ( int28pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "21 20" _null_ _null_ _null_ _null_ _null_ int28pl _null_ _null_ _null_ )); -DATA(insert OID = 942 ( int28mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "21 20" _null_ _null_ _null_ _null_ _null_ int28mi _null_ _null_ _null_ )); -DATA(insert OID = 943 ( int28mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "21 20" _null_ _null_ _null_ _null_ _null_ int28mul _null_ _null_ _null_ )); -DATA(insert OID = 948 ( int28div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "21 20" _null_ _null_ _null_ _null_ _null_ int28div _null_ _null_ _null_ )); - -DATA(insert OID = 1287 ( oid PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 26 "20" _null_ _null_ _null_ _null_ _null_ i8tooid _null_ _null_ _null_ )); -DESCR("convert int8 to oid"); -DATA(insert OID = 1288 ( int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ oidtoi8 _null_ _null_ _null_ )); -DESCR("convert oid to int8"); - -DATA(insert OID = 1291 ( suppress_redundant_updates_trigger PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ suppress_redundant_updates_trigger _null_ _null_ _null_ )); -DESCR("trigger to suppress updates when new and old records match"); + /* + * variable-length fields start here, but we allow direct access to + * proargtypes + */ -DATA(insert OID = 1292 ( tideq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "27 27" _null_ _null_ _null_ _null_ _null_ tideq _null_ _null_ _null_ )); -DATA(insert OID = 1293 ( currtid PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 27 "26 27" _null_ _null_ _null_ _null_ _null_ currtid_byreloid _null_ _null_ _null_ )); -DESCR("latest tid of a tuple"); -DATA(insert OID = 1294 ( currtid2 PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 27 "25 27" _null_ _null_ _null_ _null_ _null_ currtid_byrelname _null_ _null_ _null_ )); -DESCR("latest tid of a tuple"); -DATA(insert OID = 1265 ( tidne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "27 27" _null_ _null_ _null_ _null_ _null_ tidne _null_ _null_ _null_ )); -DATA(insert OID = 2790 ( tidgt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "27 27" _null_ _null_ _null_ _null_ _null_ tidgt _null_ _null_ _null_ )); -DATA(insert OID = 2791 ( tidlt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "27 27" _null_ _null_ _null_ _null_ _null_ tidlt _null_ _null_ _null_ )); -DATA(insert OID = 2792 ( tidge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "27 27" _null_ _null_ _null_ _null_ _null_ tidge _null_ _null_ _null_ )); -DATA(insert OID = 2793 ( tidle PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "27 27" _null_ _null_ _null_ _null_ _null_ tidle _null_ _null_ _null_ )); -DATA(insert OID = 2794 ( bttidcmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "27 27" _null_ _null_ _null_ _null_ _null_ bttidcmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 2795 ( tidlarger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 27 "27 27" _null_ _null_ _null_ _null_ _null_ tidlarger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 2796 ( tidsmaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 27 "27 27" _null_ _null_ _null_ _null_ _null_ tidsmaller _null_ _null_ _null_ )); -DESCR("smaller of two"); + /* parameter types (excludes OUT params) */ + oidvector proargtypes BKI_LOOKUP(pg_type); -DATA(insert OID = 1296 ( timedate_pl PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1114 "1083 1082" _null_ _null_ _null_ _null_ _null_ "select ($2 + $1)" _null_ _null_ _null_ )); -DATA(insert OID = 1297 ( datetimetz_pl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1184 "1082 1266" _null_ _null_ _null_ _null_ _null_ datetimetz_timestamptz _null_ _null_ _null_ )); -DATA(insert OID = 1298 ( timetzdate_pl PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1184 "1266 1082" _null_ _null_ _null_ _null_ _null_ "select ($2 + $1)" _null_ _null_ _null_ )); -DATA(insert OID = 1299 ( now PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ now _null_ _null_ _null_ )); -DESCR("current transaction time"); -DATA(insert OID = 2647 ( transaction_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ now _null_ _null_ _null_ )); -DESCR("current transaction time"); -DATA(insert OID = 2648 ( statement_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ statement_timestamp _null_ _null_ _null_ )); -DESCR("current statement time"); -DATA(insert OID = 2649 ( clock_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ clock_timestamp _null_ _null_ _null_ )); -DESCR("current clock time"); +#ifdef CATALOG_VARLEN -/* OIDS 1300 - 1399 */ + /* all param types (NULL if IN only) */ + Oid proallargtypes[1] BKI_DEFAULT(_null_) BKI_LOOKUP(pg_type); -DATA(insert OID = 1300 ( positionsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ positionsel _null_ _null_ _null_ )); -DESCR("restriction selectivity for position-comparison operators"); -DATA(insert OID = 1301 ( positionjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ positionjoinsel _null_ _null_ _null_ )); -DESCR("join selectivity for position-comparison operators"); -DATA(insert OID = 1302 ( contsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ contsel _null_ _null_ _null_ )); -DESCR("restriction selectivity for containment comparison operators"); -DATA(insert OID = 1303 ( contjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ contjoinsel _null_ _null_ _null_ )); -DESCR("join selectivity for containment comparison operators"); + /* parameter modes (NULL if IN only) */ + char proargmodes[1] BKI_DEFAULT(_null_); -DATA(insert OID = 1304 ( overlaps PGNSP PGUID 12 1 0 0 0 f f f f f f i s 4 0 16 "1184 1184 1184 1184" _null_ _null_ _null_ _null_ _null_ overlaps_timestamp _null_ _null_ _null_ )); -DESCR("intervals overlap?"); -DATA(insert OID = 1305 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f s s 4 0 16 "1184 1186 1184 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ )); -DESCR("intervals overlap?"); -DATA(insert OID = 1306 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f s s 4 0 16 "1184 1184 1184 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, $2) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ )); -DESCR("intervals overlap?"); -DATA(insert OID = 1307 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f s s 4 0 16 "1184 1186 1184 1184" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" _null_ _null_ _null_ )); -DESCR("intervals overlap?"); + /* parameter names (NULL if no names) */ + text proargnames[1] BKI_DEFAULT(_null_); -DATA(insert OID = 1308 ( overlaps PGNSP PGUID 12 1 0 0 0 f f f f f f i s 4 0 16 "1083 1083 1083 1083" _null_ _null_ _null_ _null_ _null_ overlaps_time _null_ _null_ _null_ )); -DESCR("intervals overlap?"); -DATA(insert OID = 1309 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1083 1186 1083 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ )); -DESCR("intervals overlap?"); -DATA(insert OID = 1310 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1083 1083 1083 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, $2) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ )); -DESCR("intervals overlap?"); -DATA(insert OID = 1311 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1083 1186 1083 1083" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" _null_ _null_ _null_ )); -DESCR("intervals overlap?"); + /* list of expression trees for argument defaults (NULL if none) */ + pg_node_tree proargdefaults BKI_DEFAULT(_null_); -DATA(insert OID = 1312 ( timestamp_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 1114 "2275 26 23" _null_ _null_ _null_ _null_ _null_ timestamp_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1313 ( timestamp_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "1114" _null_ _null_ _null_ _null_ _null_ timestamp_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2905 ( timestamptypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timestamptypmodin _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 2906 ( timestamptypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timestamptypmodout _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 1314 ( timestamptz_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 1315 ( interval_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 1316 ( time PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1083 "1114" _null_ _null_ _null_ _null_ _null_ timestamp_time _null_ _null_ _null_ )); -DESCR("convert timestamp to time"); + /* types for which to apply transforms */ + Oid protrftypes[1] BKI_DEFAULT(_null_); -DATA(insert OID = 1317 ( length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "25" _null_ _null_ _null_ _null_ _null_ textlen _null_ _null_ _null_ )); -DESCR("length"); -DATA(insert OID = 1318 ( length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1042" _null_ _null_ _null_ _null_ _null_ bpcharlen _null_ _null_ _null_ )); -DESCR("character length"); + /* procedure source text */ + text prosrc BKI_FORCE_NOT_NULL; -DATA(insert OID = 1319 ( xideqint4 PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "28 23" _null_ _null_ _null_ _null_ _null_ xideq _null_ _null_ _null_ )); -DATA(insert OID = 3309 ( xidneqint4 PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "28 23" _null_ _null_ _null_ _null_ _null_ xidneq _null_ _null_ _null_ )); + /* secondary procedure info (can be NULL) */ + text probin BKI_DEFAULT(_null_); -DATA(insert OID = 1326 ( interval_div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "1186 701" _null_ _null_ _null_ _null_ _null_ interval_div _null_ _null_ _null_ )); + /* procedure-local GUC settings */ + text proconfig[1] BKI_DEFAULT(_null_); -DATA(insert OID = 1339 ( dlog10 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dlog10 _null_ _null_ _null_ )); -DESCR("base 10 logarithm"); -DATA(insert OID = 1340 ( log PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dlog10 _null_ _null_ _null_ )); -DESCR("base 10 logarithm"); -DATA(insert OID = 1341 ( ln PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dlog1 _null_ _null_ _null_ )); -DESCR("natural logarithm"); -DATA(insert OID = 1342 ( round PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dround _null_ _null_ _null_ )); -DESCR("round to nearest integer"); -DATA(insert OID = 1343 ( trunc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dtrunc _null_ _null_ _null_ )); -DESCR("truncate to integer"); -DATA(insert OID = 1344 ( sqrt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dsqrt _null_ _null_ _null_ )); -DESCR("square root"); -DATA(insert OID = 1345 ( cbrt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dcbrt _null_ _null_ _null_ )); -DESCR("cube root"); -DATA(insert OID = 1346 ( pow PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ dpow _null_ _null_ _null_ )); -DESCR("exponentiation"); -DATA(insert OID = 1368 ( power PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ dpow _null_ _null_ _null_ )); -DESCR("exponentiation"); -DATA(insert OID = 1347 ( exp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dexp _null_ _null_ _null_ )); -DESCR("natural exponential (e^x)"); + /* access permissions */ + aclitem proacl[1] BKI_DEFAULT(_null_); +#endif +} FormData_pg_proc; -/* - * This form of obj_description is now deprecated, since it will fail if - * OIDs are not unique across system catalogs. Use the other form instead. +/* ---------------- + * Form_pg_proc corresponds to a pointer to a tuple with + * the format of pg_proc relation. + * ---------------- */ -DATA(insert OID = 1348 ( obj_description PGNSP PGUID 14 100 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ "select description from pg_catalog.pg_description where objoid = $1 and objsubid = 0" _null_ _null_ _null_ )); -DESCR("deprecated, use two-argument form instead"); -DATA(insert OID = 1349 ( oidvectortypes PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "30" _null_ _null_ _null_ _null_ _null_ oidvectortypes _null_ _null_ _null_ )); -DESCR("print type names of oidvector field"); - - -DATA(insert OID = 1350 ( timetz_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 1266 "2275 26 23" _null_ _null_ _null_ _null_ _null_ timetz_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1351 ( timetz_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1266" _null_ _null_ _null_ _null_ _null_ timetz_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2911 ( timetztypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timetztypmodin _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 2912 ( timetztypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timetztypmodout _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 1352 ( timetz_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1266 1266" _null_ _null_ _null_ _null_ _null_ timetz_eq _null_ _null_ _null_ )); -DATA(insert OID = 1353 ( timetz_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1266 1266" _null_ _null_ _null_ _null_ _null_ timetz_ne _null_ _null_ _null_ )); -DATA(insert OID = 1354 ( timetz_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1266 1266" _null_ _null_ _null_ _null_ _null_ timetz_lt _null_ _null_ _null_ )); -DATA(insert OID = 1355 ( timetz_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1266 1266" _null_ _null_ _null_ _null_ _null_ timetz_le _null_ _null_ _null_ )); -DATA(insert OID = 1356 ( timetz_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1266 1266" _null_ _null_ _null_ _null_ _null_ timetz_ge _null_ _null_ _null_ )); -DATA(insert OID = 1357 ( timetz_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1266 1266" _null_ _null_ _null_ _null_ _null_ timetz_gt _null_ _null_ _null_ )); -DATA(insert OID = 1358 ( timetz_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1266 1266" _null_ _null_ _null_ _null_ _null_ timetz_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 1359 ( timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1184 "1082 1266" _null_ _null_ _null_ _null_ _null_ datetimetz_timestamptz _null_ _null_ _null_ )); -DESCR("convert date and time with time zone to timestamp with time zone"); - -DATA(insert OID = 1364 ( time PGNSP PGUID 14 1 0 0 0 f f f f t f s s 1 0 1083 "702" _null_ _null_ _null_ _null_ _null_ "select cast(cast($1 as timestamp without time zone) as pg_catalog.time)" _null_ _null_ _null_ )); -DESCR("convert abstime to time"); - -DATA(insert OID = 1367 ( character_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1042" _null_ _null_ _null_ _null_ _null_ bpcharlen _null_ _null_ _null_ )); -DESCR("character length"); -DATA(insert OID = 1369 ( character_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "25" _null_ _null_ _null_ _null_ _null_ textlen _null_ _null_ _null_ )); -DESCR("character length"); - -DATA(insert OID = 1370 ( interval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1186 "1083" _null_ _null_ _null_ _null_ _null_ time_interval _null_ _null_ _null_ )); -DESCR("convert time to interval"); -DATA(insert OID = 1372 ( char_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1042" _null_ _null_ _null_ _null_ _null_ bpcharlen _null_ _null_ _null_ )); -DESCR("character length"); -DATA(insert OID = 1374 ( octet_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "25" _null_ _null_ _null_ _null_ _null_ textoctetlen _null_ _null_ _null_ )); -DESCR("octet length"); -DATA(insert OID = 1375 ( octet_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1042" _null_ _null_ _null_ _null_ _null_ bpcharoctetlen _null_ _null_ _null_ )); -DESCR("octet length"); - -DATA(insert OID = 1377 ( time_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1083 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 1378 ( time_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1083 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); -DATA(insert OID = 1379 ( timetz_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1266 "1266 1266" _null_ _null_ _null_ _null_ _null_ timetz_larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 1380 ( timetz_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1266 "1266 1266" _null_ _null_ _null_ _null_ _null_ timetz_smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); - -DATA(insert OID = 1381 ( char_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "25" _null_ _null_ _null_ _null_ _null_ textlen _null_ _null_ _null_ )); -DESCR("character length"); - -DATA(insert OID = 1382 ( date_part PGNSP PGUID 14 1 0 0 0 f f f f t f s s 2 0 701 "25 702" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.date_part($1, cast($2 as timestamp with time zone))" _null_ _null_ _null_ )); -DESCR("extract field from abstime"); -DATA(insert OID = 1383 ( date_part PGNSP PGUID 14 1 0 0 0 f f f f t f s s 2 0 701 "25 703" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.date_part($1, cast($2 as pg_catalog.interval))" _null_ _null_ _null_ )); -DESCR("extract field from reltime"); -DATA(insert OID = 1384 ( date_part PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 701 "25 1082" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.date_part($1, cast($2 as timestamp without time zone))" _null_ _null_ _null_ )); -DESCR("extract field from date"); -DATA(insert OID = 1385 ( date_part PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "25 1083" _null_ _null_ _null_ _null_ _null_ time_part _null_ _null_ _null_ )); -DESCR("extract field from time"); -DATA(insert OID = 1386 ( age PGNSP PGUID 14 1 0 0 0 f f f f t f s s 1 0 1186 "1184" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.age(cast(current_date as timestamp with time zone), $1)" _null_ _null_ _null_ )); -DESCR("date difference from today preserving months and years"); - -DATA(insert OID = 1388 ( timetz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1266 "1184" _null_ _null_ _null_ _null_ _null_ timestamptz_timetz _null_ _null_ _null_ )); -DESCR("convert timestamp with time zone to time with time zone"); - -DATA(insert OID = 1373 ( isfinite PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "1082" _null_ _null_ _null_ _null_ _null_ date_finite _null_ _null_ _null_ )); -DESCR("finite date?"); -DATA(insert OID = 1389 ( isfinite PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "1184" _null_ _null_ _null_ _null_ _null_ timestamp_finite _null_ _null_ _null_ )); -DESCR("finite timestamp?"); -DATA(insert OID = 1390 ( isfinite PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "1186" _null_ _null_ _null_ _null_ _null_ interval_finite _null_ _null_ _null_ )); -DESCR("finite interval?"); - - -DATA(insert OID = 1376 ( factorial PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ numeric_fac _null_ _null_ _null_ )); -DESCR("factorial"); -DATA(insert OID = 1394 ( abs PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ float4abs _null_ _null_ _null_ )); -DESCR("absolute value"); -DATA(insert OID = 1395 ( abs PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ float8abs _null_ _null_ _null_ )); -DESCR("absolute value"); -DATA(insert OID = 1396 ( abs PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "20" _null_ _null_ _null_ _null_ _null_ int8abs _null_ _null_ _null_ )); -DESCR("absolute value"); -DATA(insert OID = 1397 ( abs PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ int4abs _null_ _null_ _null_ )); -DESCR("absolute value"); -DATA(insert OID = 1398 ( abs PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "21" _null_ _null_ _null_ _null_ _null_ int2abs _null_ _null_ _null_ )); -DESCR("absolute value"); - -/* OIDS 1400 - 1499 */ - -DATA(insert OID = 1400 ( name PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 19 "1043" _null_ _null_ _null_ _null_ _null_ text_name _null_ _null_ _null_ )); -DESCR("convert varchar to name"); -DATA(insert OID = 1401 ( varchar PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1043 "19" _null_ _null_ _null_ _null_ _null_ name_text _null_ _null_ _null_ )); -DESCR("convert name to varchar"); - -DATA(insert OID = 1402 ( current_schema PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 19 "" _null_ _null_ _null_ _null_ _null_ current_schema _null_ _null_ _null_ )); -DESCR("current schema name"); -DATA(insert OID = 1403 ( current_schemas PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1003 "16" _null_ _null_ _null_ _null_ _null_ current_schemas _null_ _null_ _null_ )); -DESCR("current schema search list"); - -DATA(insert OID = 1404 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 25 "25 25 23 23" _null_ _null_ _null_ _null_ _null_ textoverlay _null_ _null_ _null_ )); -DESCR("substitute portion of string"); -DATA(insert OID = 1405 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 25 23" _null_ _null_ _null_ _null_ _null_ textoverlay_no_len _null_ _null_ _null_ )); -DESCR("substitute portion of string"); - -DATA(insert OID = 1406 ( isvertical PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 600" _null_ _null_ _null_ _null_ _null_ point_vert _null_ _null_ _null_ )); -DESCR("vertically aligned"); -DATA(insert OID = 1407 ( ishorizontal PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 600" _null_ _null_ _null_ _null_ _null_ point_horiz _null_ _null_ _null_ )); -DESCR("horizontally aligned"); -DATA(insert OID = 1408 ( isparallel PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "601 601" _null_ _null_ _null_ _null_ _null_ lseg_parallel _null_ _null_ _null_ )); -DESCR("parallel"); -DATA(insert OID = 1409 ( isperp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "601 601" _null_ _null_ _null_ _null_ _null_ lseg_perp _null_ _null_ _null_ )); -DESCR("perpendicular"); -DATA(insert OID = 1410 ( isvertical PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "601" _null_ _null_ _null_ _null_ _null_ lseg_vertical _null_ _null_ _null_ )); -DESCR("vertical"); -DATA(insert OID = 1411 ( ishorizontal PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "601" _null_ _null_ _null_ _null_ _null_ lseg_horizontal _null_ _null_ _null_ )); -DESCR("horizontal"); -DATA(insert OID = 1412 ( isparallel PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "628 628" _null_ _null_ _null_ _null_ _null_ line_parallel _null_ _null_ _null_ )); -DESCR("parallel"); -DATA(insert OID = 1413 ( isperp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "628 628" _null_ _null_ _null_ _null_ _null_ line_perp _null_ _null_ _null_ )); -DESCR("perpendicular"); -DATA(insert OID = 1414 ( isvertical PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "628" _null_ _null_ _null_ _null_ _null_ line_vertical _null_ _null_ _null_ )); -DESCR("vertical"); -DATA(insert OID = 1415 ( ishorizontal PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "628" _null_ _null_ _null_ _null_ _null_ line_horizontal _null_ _null_ _null_ )); -DESCR("horizontal"); -DATA(insert OID = 1416 ( point PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "718" _null_ _null_ _null_ _null_ _null_ circle_center _null_ _null_ _null_ )); -DESCR("center of"); - -DATA(insert OID = 1419 ( time PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1083 "1186" _null_ _null_ _null_ _null_ _null_ interval_time _null_ _null_ _null_ )); -DESCR("convert interval to time"); - -DATA(insert OID = 1421 ( box PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 603 "600 600" _null_ _null_ _null_ _null_ _null_ points_box _null_ _null_ _null_ )); -DESCR("convert points to box"); -DATA(insert OID = 1422 ( box_add PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 603 "603 600" _null_ _null_ _null_ _null_ _null_ box_add _null_ _null_ _null_ )); -DATA(insert OID = 1423 ( box_sub PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 603 "603 600" _null_ _null_ _null_ _null_ _null_ box_sub _null_ _null_ _null_ )); -DATA(insert OID = 1424 ( box_mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 603 "603 600" _null_ _null_ _null_ _null_ _null_ box_mul _null_ _null_ _null_ )); -DATA(insert OID = 1425 ( box_div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 603 "603 600" _null_ _null_ _null_ _null_ _null_ box_div _null_ _null_ _null_ )); -DATA(insert OID = 1426 ( path_contain_pt PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 16 "602 600" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.on_ppath($2, $1)" _null_ _null_ _null_ )); -DATA(insert OID = 1428 ( poly_contain_pt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 600" _null_ _null_ _null_ _null_ _null_ poly_contain_pt _null_ _null_ _null_ )); -DATA(insert OID = 1429 ( pt_contained_poly PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 604" _null_ _null_ _null_ _null_ _null_ pt_contained_poly _null_ _null_ _null_ )); +typedef FormData_pg_proc *Form_pg_proc; -DATA(insert OID = 1430 ( isclosed PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "602" _null_ _null_ _null_ _null_ _null_ path_isclosed _null_ _null_ _null_ )); -DESCR("path closed?"); -DATA(insert OID = 1431 ( isopen PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "602" _null_ _null_ _null_ _null_ _null_ path_isopen _null_ _null_ _null_ )); -DESCR("path open?"); -DATA(insert OID = 1432 ( path_npoints PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "602" _null_ _null_ _null_ _null_ _null_ path_npoints _null_ _null_ _null_ )); +#ifdef EXPOSE_TO_CLIENT_CODE -/* pclose and popen might better be named close and open, but that crashes initdb. - * - thomas 97/04/20 +/* + * Symbolic values for prokind column */ - -DATA(insert OID = 1433 ( pclose PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 602 "602" _null_ _null_ _null_ _null_ _null_ path_close _null_ _null_ _null_ )); -DESCR("close path"); -DATA(insert OID = 1434 ( popen PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 602 "602" _null_ _null_ _null_ _null_ _null_ path_open _null_ _null_ _null_ )); -DESCR("open path"); -DATA(insert OID = 1435 ( path_add PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 602 "602 602" _null_ _null_ _null_ _null_ _null_ path_add _null_ _null_ _null_ )); -DATA(insert OID = 1436 ( path_add_pt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 602 "602 600" _null_ _null_ _null_ _null_ _null_ path_add_pt _null_ _null_ _null_ )); -DATA(insert OID = 1437 ( path_sub_pt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 602 "602 600" _null_ _null_ _null_ _null_ _null_ path_sub_pt _null_ _null_ _null_ )); -DATA(insert OID = 1438 ( path_mul_pt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 602 "602 600" _null_ _null_ _null_ _null_ _null_ path_mul_pt _null_ _null_ _null_ )); -DATA(insert OID = 1439 ( path_div_pt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 602 "602 600" _null_ _null_ _null_ _null_ _null_ path_div_pt _null_ _null_ _null_ )); - -DATA(insert OID = 1440 ( point PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "701 701" _null_ _null_ _null_ _null_ _null_ construct_point _null_ _null_ _null_ )); -DESCR("convert x, y to point"); -DATA(insert OID = 1441 ( point_add PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "600 600" _null_ _null_ _null_ _null_ _null_ point_add _null_ _null_ _null_ )); -DATA(insert OID = 1442 ( point_sub PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "600 600" _null_ _null_ _null_ _null_ _null_ point_sub _null_ _null_ _null_ )); -DATA(insert OID = 1443 ( point_mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "600 600" _null_ _null_ _null_ _null_ _null_ point_mul _null_ _null_ _null_ )); -DATA(insert OID = 1444 ( point_div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "600 600" _null_ _null_ _null_ _null_ _null_ point_div _null_ _null_ _null_ )); - -DATA(insert OID = 1445 ( poly_npoints PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "604" _null_ _null_ _null_ _null_ _null_ poly_npoints _null_ _null_ _null_ )); -DATA(insert OID = 1446 ( box PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 603 "604" _null_ _null_ _null_ _null_ _null_ poly_box _null_ _null_ _null_ )); -DESCR("convert polygon to bounding box"); -DATA(insert OID = 1447 ( path PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 602 "604" _null_ _null_ _null_ _null_ _null_ poly_path _null_ _null_ _null_ )); -DESCR("convert polygon to path"); -DATA(insert OID = 1448 ( polygon PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 604 "603" _null_ _null_ _null_ _null_ _null_ box_poly _null_ _null_ _null_ )); -DESCR("convert box to polygon"); -DATA(insert OID = 1449 ( polygon PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 604 "602" _null_ _null_ _null_ _null_ _null_ path_poly _null_ _null_ _null_ )); -DESCR("convert path to polygon"); - -DATA(insert OID = 1450 ( circle_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 718 "2275" _null_ _null_ _null_ _null_ _null_ circle_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1451 ( circle_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "718" _null_ _null_ _null_ _null_ _null_ circle_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1452 ( circle_same PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_same _null_ _null_ _null_ )); -DATA(insert OID = 1453 ( circle_contain PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_contain _null_ _null_ _null_ )); -DATA(insert OID = 1454 ( circle_left PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_left _null_ _null_ _null_ )); -DATA(insert OID = 1455 ( circle_overleft PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_overleft _null_ _null_ _null_ )); -DATA(insert OID = 1456 ( circle_overright PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_overright _null_ _null_ _null_ )); -DATA(insert OID = 1457 ( circle_right PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_right _null_ _null_ _null_ )); -DATA(insert OID = 1458 ( circle_contained PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_contained _null_ _null_ _null_ )); -DATA(insert OID = 1459 ( circle_overlap PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_overlap _null_ _null_ _null_ )); -DATA(insert OID = 1460 ( circle_below PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_below _null_ _null_ _null_ )); -DATA(insert OID = 1461 ( circle_above PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_above _null_ _null_ _null_ )); -DATA(insert OID = 1462 ( circle_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_eq _null_ _null_ _null_ )); -DATA(insert OID = 1463 ( circle_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_ne _null_ _null_ _null_ )); -DATA(insert OID = 1464 ( circle_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_lt _null_ _null_ _null_ )); -DATA(insert OID = 1465 ( circle_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_gt _null_ _null_ _null_ )); -DATA(insert OID = 1466 ( circle_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_le _null_ _null_ _null_ )); -DATA(insert OID = 1467 ( circle_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_ge _null_ _null_ _null_ )); -DATA(insert OID = 1468 ( area PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "718" _null_ _null_ _null_ _null_ _null_ circle_area _null_ _null_ _null_ )); -DESCR("area of circle"); -DATA(insert OID = 1469 ( diameter PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "718" _null_ _null_ _null_ _null_ _null_ circle_diameter _null_ _null_ _null_ )); -DESCR("diameter of circle"); -DATA(insert OID = 1470 ( radius PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "718" _null_ _null_ _null_ _null_ _null_ circle_radius _null_ _null_ _null_ )); -DESCR("radius of circle"); -DATA(insert OID = 1471 ( circle_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "718 718" _null_ _null_ _null_ _null_ _null_ circle_distance _null_ _null_ _null_ )); -DATA(insert OID = 1472 ( circle_center PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "718" _null_ _null_ _null_ _null_ _null_ circle_center _null_ _null_ _null_ )); -DATA(insert OID = 1473 ( circle PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 718 "600 701" _null_ _null_ _null_ _null_ _null_ cr_circle _null_ _null_ _null_ )); -DESCR("convert point and radius to circle"); -DATA(insert OID = 1474 ( circle PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 718 "604" _null_ _null_ _null_ _null_ _null_ poly_circle _null_ _null_ _null_ )); -DESCR("convert polygon to circle"); -DATA(insert OID = 1475 ( polygon PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 604 "23 718" _null_ _null_ _null_ _null_ _null_ circle_poly _null_ _null_ _null_ )); -DESCR("convert vertex count and circle to polygon"); -DATA(insert OID = 1476 ( dist_pc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "600 718" _null_ _null_ _null_ _null_ _null_ dist_pc _null_ _null_ _null_ )); -DATA(insert OID = 1477 ( circle_contain_pt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "718 600" _null_ _null_ _null_ _null_ _null_ circle_contain_pt _null_ _null_ _null_ )); -DATA(insert OID = 1478 ( pt_contained_circle PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 718" _null_ _null_ _null_ _null_ _null_ pt_contained_circle _null_ _null_ _null_ )); -DATA(insert OID = 4091 ( box PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 603 "600" _null_ _null_ _null_ _null_ _null_ point_box _null_ _null_ _null_ )); -DESCR("convert point to empty box"); -DATA(insert OID = 1479 ( circle PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 718 "603" _null_ _null_ _null_ _null_ _null_ box_circle _null_ _null_ _null_ )); -DESCR("convert box to circle"); -DATA(insert OID = 1480 ( box PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 603 "718" _null_ _null_ _null_ _null_ _null_ circle_box _null_ _null_ _null_ )); -DESCR("convert circle to box"); -DATA(insert OID = 1481 ( tinterval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 704 "702 702" _null_ _null_ _null_ _null_ _null_ mktinterval _null_ _null_ _null_ )); -DESCR("convert to tinterval"); - -DATA(insert OID = 1482 ( lseg_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "601 601" _null_ _null_ _null_ _null_ _null_ lseg_ne _null_ _null_ _null_ )); -DATA(insert OID = 1483 ( lseg_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "601 601" _null_ _null_ _null_ _null_ _null_ lseg_lt _null_ _null_ _null_ )); -DATA(insert OID = 1484 ( lseg_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "601 601" _null_ _null_ _null_ _null_ _null_ lseg_le _null_ _null_ _null_ )); -DATA(insert OID = 1485 ( lseg_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "601 601" _null_ _null_ _null_ _null_ _null_ lseg_gt _null_ _null_ _null_ )); -DATA(insert OID = 1486 ( lseg_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "601 601" _null_ _null_ _null_ _null_ _null_ lseg_ge _null_ _null_ _null_ )); -DATA(insert OID = 1487 ( lseg_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "601" _null_ _null_ _null_ _null_ _null_ lseg_length _null_ _null_ _null_ )); -DATA(insert OID = 1488 ( close_ls PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "628 601" _null_ _null_ _null_ _null_ _null_ close_ls _null_ _null_ _null_ )); -DATA(insert OID = 1489 ( close_lseg PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "601 601" _null_ _null_ _null_ _null_ _null_ close_lseg _null_ _null_ _null_ )); - -DATA(insert OID = 1490 ( line_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 628 "2275" _null_ _null_ _null_ _null_ _null_ line_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1491 ( line_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "628" _null_ _null_ _null_ _null_ _null_ line_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1492 ( line_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "628 628" _null_ _null_ _null_ _null_ _null_ line_eq _null_ _null_ _null_ )); -DATA(insert OID = 1493 ( line PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 628 "600 600" _null_ _null_ _null_ _null_ _null_ line_construct_pp _null_ _null_ _null_ )); -DESCR("construct line from points"); -DATA(insert OID = 1494 ( line_interpt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 600 "628 628" _null_ _null_ _null_ _null_ _null_ line_interpt _null_ _null_ _null_ )); -DATA(insert OID = 1495 ( line_intersect PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "628 628" _null_ _null_ _null_ _null_ _null_ line_intersect _null_ _null_ _null_ )); -DATA(insert OID = 1496 ( line_parallel PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "628 628" _null_ _null_ _null_ _null_ _null_ line_parallel _null_ _null_ _null_ )); -DATA(insert OID = 1497 ( line_perp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "628 628" _null_ _null_ _null_ _null_ _null_ line_perp _null_ _null_ _null_ )); -DATA(insert OID = 1498 ( line_vertical PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "628" _null_ _null_ _null_ _null_ _null_ line_vertical _null_ _null_ _null_ )); -DATA(insert OID = 1499 ( line_horizontal PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "628" _null_ _null_ _null_ _null_ _null_ line_horizontal _null_ _null_ _null_ )); - -/* OIDS 1500 - 1599 */ - -DATA(insert OID = 1530 ( length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "601" _null_ _null_ _null_ _null_ _null_ lseg_length _null_ _null_ _null_ )); -DESCR("distance between endpoints"); -DATA(insert OID = 1531 ( length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "602" _null_ _null_ _null_ _null_ _null_ path_length _null_ _null_ _null_ )); -DESCR("sum of path segments"); - - -DATA(insert OID = 1532 ( point PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "601" _null_ _null_ _null_ _null_ _null_ lseg_center _null_ _null_ _null_ )); -DESCR("center of"); -DATA(insert OID = 1533 ( point PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "602" _null_ _null_ _null_ _null_ _null_ path_center _null_ _null_ _null_ )); -DESCR("center of"); -DATA(insert OID = 1534 ( point PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "603" _null_ _null_ _null_ _null_ _null_ box_center _null_ _null_ _null_ )); -DESCR("center of"); -DATA(insert OID = 1540 ( point PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "604" _null_ _null_ _null_ _null_ _null_ poly_center _null_ _null_ _null_ )); -DESCR("center of"); -DATA(insert OID = 1541 ( lseg PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 601 "603" _null_ _null_ _null_ _null_ _null_ box_diagonal _null_ _null_ _null_ )); -DESCR("diagonal of"); -DATA(insert OID = 1542 ( center PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "603" _null_ _null_ _null_ _null_ _null_ box_center _null_ _null_ _null_ )); -DESCR("center of"); -DATA(insert OID = 1543 ( center PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "718" _null_ _null_ _null_ _null_ _null_ circle_center _null_ _null_ _null_ )); -DESCR("center of"); -DATA(insert OID = 1544 ( polygon PGNSP PGUID 14 1 0 0 0 f f f f t f i s 1 0 604 "718" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.polygon(12, $1)" _null_ _null_ _null_ )); -DESCR("convert circle to 12-vertex polygon"); -DATA(insert OID = 1545 ( npoints PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "602" _null_ _null_ _null_ _null_ _null_ path_npoints _null_ _null_ _null_ )); -DESCR("number of points"); -DATA(insert OID = 1556 ( npoints PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "604" _null_ _null_ _null_ _null_ _null_ poly_npoints _null_ _null_ _null_ )); -DESCR("number of points"); - -DATA(insert OID = 1564 ( bit_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1560 "2275 26 23" _null_ _null_ _null_ _null_ _null_ bit_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1565 ( bit_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1560" _null_ _null_ _null_ _null_ _null_ bit_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2919 ( bittypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ bittypmodin _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 2920 ( bittypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ bittypmodout _null_ _null_ _null_ )); -DESCR("I/O typmod"); - -DATA(insert OID = 1569 ( like PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ textlike _null_ _null_ _null_ )); -DESCR("matches LIKE expression"); -DATA(insert OID = 1570 ( notlike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ textnlike _null_ _null_ _null_ )); -DESCR("does not match LIKE expression"); -DATA(insert OID = 1571 ( like PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ namelike _null_ _null_ _null_ )); -DESCR("matches LIKE expression"); -DATA(insert OID = 1572 ( notlike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ namenlike _null_ _null_ _null_ )); -DESCR("does not match LIKE expression"); - - -/* SEQUENCE functions */ -DATA(insert OID = 1574 ( nextval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ nextval_oid _null_ _null_ _null_ )); -DESCR("sequence next value"); -DATA(insert OID = 1575 ( currval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ currval_oid _null_ _null_ _null_ )); -DESCR("sequence current value"); -DATA(insert OID = 1576 ( setval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 20 "2205 20" _null_ _null_ _null_ _null_ _null_ setval_oid _null_ _null_ _null_ )); -DESCR("set sequence value"); -DATA(insert OID = 1765 ( setval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 3 0 20 "2205 20 16" _null_ _null_ _null_ _null_ _null_ setval3_oid _null_ _null_ _null_ )); -DESCR("set sequence value and is_called status"); -DATA(insert OID = 3078 ( pg_sequence_parameters PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2249 "26" "{26,20,20,20,20,16,20,26}" "{i,o,o,o,o,o,o,o}" "{sequence_oid,start_value,minimum_value,maximum_value,increment,cycle_option,cache_size,data_type}" _null_ _null_ pg_sequence_parameters _null_ _null_ _null_)); -DESCR("sequence parameters, for use by information schema"); -DATA(insert OID = 4032 ( pg_sequence_last_value PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ pg_sequence_last_value _null_ _null_ _null_ )); -DESCR("sequence last value"); - -DATA(insert OID = 1579 ( varbit_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1562 "2275 26 23" _null_ _null_ _null_ _null_ _null_ varbit_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1580 ( varbit_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1562" _null_ _null_ _null_ _null_ _null_ varbit_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2902 ( varbittypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ varbittypmodin _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 2921 ( varbittypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ varbittypmodout _null_ _null_ _null_ )); -DESCR("I/O typmod"); - -DATA(insert OID = 1581 ( biteq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1560 1560" _null_ _null_ _null_ _null_ _null_ biteq _null_ _null_ _null_ )); -DATA(insert OID = 1582 ( bitne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1560 1560" _null_ _null_ _null_ _null_ _null_ bitne _null_ _null_ _null_ )); -DATA(insert OID = 1592 ( bitge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1560 1560" _null_ _null_ _null_ _null_ _null_ bitge _null_ _null_ _null_ )); -DATA(insert OID = 1593 ( bitgt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1560 1560" _null_ _null_ _null_ _null_ _null_ bitgt _null_ _null_ _null_ )); -DATA(insert OID = 1594 ( bitle PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1560 1560" _null_ _null_ _null_ _null_ _null_ bitle _null_ _null_ _null_ )); -DATA(insert OID = 1595 ( bitlt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1560 1560" _null_ _null_ _null_ _null_ _null_ bitlt _null_ _null_ _null_ )); -DATA(insert OID = 1596 ( bitcmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1560 1560" _null_ _null_ _null_ _null_ _null_ bitcmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - -DATA(insert OID = 1598 ( random PGNSP PGUID 12 1 0 0 0 f f f f t f v r 0 0 701 "" _null_ _null_ _null_ _null_ _null_ drandom _null_ _null_ _null_ )); -DESCR("random value"); -DATA(insert OID = 1599 ( setseed PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "701" _null_ _null_ _null_ _null_ _null_ setseed _null_ _null_ _null_ )); -DESCR("set random seed"); - -/* OIDS 1600 - 1699 */ - -DATA(insert OID = 1600 ( asin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dasin _null_ _null_ _null_ )); -DESCR("arcsine"); -DATA(insert OID = 1601 ( acos PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dacos _null_ _null_ _null_ )); -DESCR("arccosine"); -DATA(insert OID = 1602 ( atan PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ datan _null_ _null_ _null_ )); -DESCR("arctangent"); -DATA(insert OID = 1603 ( atan2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ datan2 _null_ _null_ _null_ )); -DESCR("arctangent, two arguments"); -DATA(insert OID = 1604 ( sin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dsin _null_ _null_ _null_ )); -DESCR("sine"); -DATA(insert OID = 1605 ( cos PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dcos _null_ _null_ _null_ )); -DESCR("cosine"); -DATA(insert OID = 1606 ( tan PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dtan _null_ _null_ _null_ )); -DESCR("tangent"); -DATA(insert OID = 1607 ( cot PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dcot _null_ _null_ _null_ )); -DESCR("cotangent"); - -DATA(insert OID = 2731 ( asind PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dasind _null_ _null_ _null_ )); -DESCR("arcsine, degrees"); -DATA(insert OID = 2732 ( acosd PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dacosd _null_ _null_ _null_ )); -DESCR("arccosine, degrees"); -DATA(insert OID = 2733 ( atand PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ datand _null_ _null_ _null_ )); -DESCR("arctangent, degrees"); -DATA(insert OID = 2734 ( atan2d PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ datan2d _null_ _null_ _null_ )); -DESCR("arctangent, two arguments, degrees"); -DATA(insert OID = 2735 ( sind PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dsind _null_ _null_ _null_ )); -DESCR("sine, degrees"); -DATA(insert OID = 2736 ( cosd PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dcosd _null_ _null_ _null_ )); -DESCR("cosine, degrees"); -DATA(insert OID = 2737 ( tand PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dtand _null_ _null_ _null_ )); -DESCR("tangent, degrees"); -DATA(insert OID = 2738 ( cotd PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ dcotd _null_ _null_ _null_ )); -DESCR("cotangent, degrees"); - -DATA(insert OID = 1608 ( degrees PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ degrees _null_ _null_ _null_ )); -DESCR("radians to degrees"); -DATA(insert OID = 1609 ( radians PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ radians _null_ _null_ _null_ )); -DESCR("degrees to radians"); -DATA(insert OID = 1610 ( pi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 0 0 701 "" _null_ _null_ _null_ _null_ _null_ dpi _null_ _null_ _null_ )); -DESCR("PI"); - -DATA(insert OID = 1618 ( interval_mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "1186 701" _null_ _null_ _null_ _null_ _null_ interval_mul _null_ _null_ _null_ )); - -DATA(insert OID = 1620 ( ascii PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "25" _null_ _null_ _null_ _null_ _null_ ascii _null_ _null_ _null_ )); -DESCR("convert first char to int4"); -DATA(insert OID = 1621 ( chr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "23" _null_ _null_ _null_ _null_ _null_ chr _null_ _null_ _null_ )); -DESCR("convert int4 to char"); -DATA(insert OID = 1622 ( repeat PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ repeat _null_ _null_ _null_ )); -DESCR("replicate string n times"); - -DATA(insert OID = 1623 ( similar_escape PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ similar_escape _null_ _null_ _null_ )); -DESCR("convert SQL99 regexp pattern to POSIX style"); - -DATA(insert OID = 1624 ( mul_d_interval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "701 1186" _null_ _null_ _null_ _null_ _null_ mul_d_interval _null_ _null_ _null_ )); - -DATA(insert OID = 1631 ( bpcharlike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 25" _null_ _null_ _null_ _null_ _null_ textlike _null_ _null_ _null_ )); -DATA(insert OID = 1632 ( bpcharnlike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 25" _null_ _null_ _null_ _null_ _null_ textnlike _null_ _null_ _null_ )); - -DATA(insert OID = 1633 ( texticlike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ texticlike _null_ _null_ _null_ )); -DATA(insert OID = 1634 ( texticnlike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ texticnlike _null_ _null_ _null_ )); -DATA(insert OID = 1635 ( nameiclike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ nameiclike _null_ _null_ _null_ )); -DATA(insert OID = 1636 ( nameicnlike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ nameicnlike _null_ _null_ _null_ )); -DATA(insert OID = 1637 ( like_escape PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ like_escape _null_ _null_ _null_ )); -DESCR("convert LIKE pattern to use backslash escapes"); - -DATA(insert OID = 1656 ( bpcharicregexeq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 25" _null_ _null_ _null_ _null_ _null_ texticregexeq _null_ _null_ _null_ )); -DATA(insert OID = 1657 ( bpcharicregexne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 25" _null_ _null_ _null_ _null_ _null_ texticregexne _null_ _null_ _null_ )); -DATA(insert OID = 1658 ( bpcharregexeq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 25" _null_ _null_ _null_ _null_ _null_ textregexeq _null_ _null_ _null_ )); -DATA(insert OID = 1659 ( bpcharregexne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 25" _null_ _null_ _null_ _null_ _null_ textregexne _null_ _null_ _null_ )); -DATA(insert OID = 1660 ( bpchariclike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 25" _null_ _null_ _null_ _null_ _null_ texticlike _null_ _null_ _null_ )); -DATA(insert OID = 1661 ( bpcharicnlike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 25" _null_ _null_ _null_ _null_ _null_ texticnlike _null_ _null_ _null_ )); - -/* Oracle Compatibility Related Functions - By Edmund Mergl */ -DATA(insert OID = 868 ( strpos PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "25 25" _null_ _null_ _null_ _null_ _null_ textpos _null_ _null_ _null_ )); -DESCR("position of substring"); -DATA(insert OID = 870 ( lower PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ lower _null_ _null_ _null_ )); -DESCR("lowercase"); -DATA(insert OID = 871 ( upper PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ upper _null_ _null_ _null_ )); -DESCR("uppercase"); -DATA(insert OID = 872 ( initcap PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ initcap _null_ _null_ _null_ )); -DESCR("capitalize each word"); -DATA(insert OID = 873 ( lpad PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 23 25" _null_ _null_ _null_ _null_ _null_ lpad _null_ _null_ _null_ )); -DESCR("left-pad string to length"); -DATA(insert OID = 874 ( rpad PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 23 25" _null_ _null_ _null_ _null_ _null_ rpad _null_ _null_ _null_ )); -DESCR("right-pad string to length"); -DATA(insert OID = 875 ( ltrim PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ ltrim _null_ _null_ _null_ )); -DESCR("trim selected characters from left end of string"); -DATA(insert OID = 876 ( rtrim PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ rtrim _null_ _null_ _null_ )); -DESCR("trim selected characters from right end of string"); -DATA(insert OID = 877 ( substr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 23 23" _null_ _null_ _null_ _null_ _null_ text_substr _null_ _null_ _null_ )); -DESCR("extract portion of string"); -DATA(insert OID = 878 ( translate PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 25 25" _null_ _null_ _null_ _null_ _null_ translate _null_ _null_ _null_ )); -DESCR("map a set of characters appearing in string"); -DATA(insert OID = 879 ( lpad PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.lpad($1, $2, '' '')" _null_ _null_ _null_ )); -DESCR("left-pad string to length"); -DATA(insert OID = 880 ( rpad PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.rpad($1, $2, '' '')" _null_ _null_ _null_ )); -DESCR("right-pad string to length"); -DATA(insert OID = 881 ( ltrim PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ ltrim1 _null_ _null_ _null_ )); -DESCR("trim spaces from left end of string"); -DATA(insert OID = 882 ( rtrim PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ rtrim1 _null_ _null_ _null_ )); -DESCR("trim spaces from right end of string"); -DATA(insert OID = 883 ( substr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ text_substr_no_len _null_ _null_ _null_ )); -DESCR("extract portion of string"); -DATA(insert OID = 884 ( btrim PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ btrim _null_ _null_ _null_ )); -DESCR("trim selected characters from both ends of string"); -DATA(insert OID = 885 ( btrim PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ btrim1 _null_ _null_ _null_ )); -DESCR("trim spaces from both ends of string"); - -DATA(insert OID = 936 ( substring PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 23 23" _null_ _null_ _null_ _null_ _null_ text_substr _null_ _null_ _null_ )); -DESCR("extract portion of string"); -DATA(insert OID = 937 ( substring PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ text_substr_no_len _null_ _null_ _null_ )); -DESCR("extract portion of string"); -DATA(insert OID = 2087 ( replace PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 25 25" _null_ _null_ _null_ _null_ _null_ replace_text _null_ _null_ _null_ )); -DESCR("replace all occurrences in string of old_substr with new_substr"); -DATA(insert OID = 2284 ( regexp_replace PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 25 25" _null_ _null_ _null_ _null_ _null_ textregexreplace_noopt _null_ _null_ _null_ )); -DESCR("replace text using regexp"); -DATA(insert OID = 2285 ( regexp_replace PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 25 "25 25 25 25" _null_ _null_ _null_ _null_ _null_ textregexreplace _null_ _null_ _null_ )); -DESCR("replace text using regexp"); -DATA(insert OID = 3396 ( regexp_match PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1009 "25 25" _null_ _null_ _null_ _null_ _null_ regexp_match_no_flags _null_ _null_ _null_ )); -DESCR("find first match for regexp"); -DATA(insert OID = 3397 ( regexp_match PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1009 "25 25 25" _null_ _null_ _null_ _null_ _null_ regexp_match _null_ _null_ _null_ )); -DESCR("find first match for regexp"); -DATA(insert OID = 2763 ( regexp_matches PGNSP PGUID 12 1 1 0 0 f f f f t t i s 2 0 1009 "25 25" _null_ _null_ _null_ _null_ _null_ regexp_matches_no_flags _null_ _null_ _null_ )); -DESCR("find match(es) for regexp"); -DATA(insert OID = 2764 ( regexp_matches PGNSP PGUID 12 1 10 0 0 f f f f t t i s 3 0 1009 "25 25 25" _null_ _null_ _null_ _null_ _null_ regexp_matches _null_ _null_ _null_ )); -DESCR("find match(es) for regexp"); -DATA(insert OID = 2088 ( split_part PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 25 23" _null_ _null_ _null_ _null_ _null_ split_text _null_ _null_ _null_ )); -DESCR("split string by field_sep and return field_num"); -DATA(insert OID = 2765 ( regexp_split_to_table PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ regexp_split_to_table_no_flags _null_ _null_ _null_ )); -DESCR("split string by pattern"); -DATA(insert OID = 2766 ( regexp_split_to_table PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 3 0 25 "25 25 25" _null_ _null_ _null_ _null_ _null_ regexp_split_to_table _null_ _null_ _null_ )); -DESCR("split string by pattern"); -DATA(insert OID = 2767 ( regexp_split_to_array PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1009 "25 25" _null_ _null_ _null_ _null_ _null_ regexp_split_to_array_no_flags _null_ _null_ _null_ )); -DESCR("split string by pattern"); -DATA(insert OID = 2768 ( regexp_split_to_array PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1009 "25 25 25" _null_ _null_ _null_ _null_ _null_ regexp_split_to_array _null_ _null_ _null_ )); -DESCR("split string by pattern"); -DATA(insert OID = 2089 ( to_hex PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "23" _null_ _null_ _null_ _null_ _null_ to_hex32 _null_ _null_ _null_ )); -DESCR("convert int4 number to hex"); -DATA(insert OID = 2090 ( to_hex PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "20" _null_ _null_ _null_ _null_ _null_ to_hex64 _null_ _null_ _null_ )); -DESCR("convert int8 number to hex"); - -/* for character set encoding support */ - -/* return database encoding name */ -DATA(insert OID = 1039 ( getdatabaseencoding PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 19 "" _null_ _null_ _null_ _null_ _null_ getdatabaseencoding _null_ _null_ _null_ )); -DESCR("encoding name of current database"); - -/* return client encoding name i.e. session encoding */ -DATA(insert OID = 810 ( pg_client_encoding PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 19 "" _null_ _null_ _null_ _null_ _null_ pg_client_encoding _null_ _null_ _null_ )); -DESCR("encoding name of current database"); - -DATA(insert OID = 1713 ( length PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 23 "17 19" _null_ _null_ _null_ _null_ _null_ length_in_encoding _null_ _null_ _null_ )); -DESCR("length of string in specified encoding"); - -DATA(insert OID = 1714 ( convert_from PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "17 19" _null_ _null_ _null_ _null_ _null_ pg_convert_from _null_ _null_ _null_ )); -DESCR("convert string with specified source encoding name"); - -DATA(insert OID = 1717 ( convert_to PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 17 "25 19" _null_ _null_ _null_ _null_ _null_ pg_convert_to _null_ _null_ _null_ )); -DESCR("convert string with specified destination encoding name"); - -DATA(insert OID = 1813 ( convert PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 17 "17 19 19" _null_ _null_ _null_ _null_ _null_ pg_convert _null_ _null_ _null_ )); -DESCR("convert string with specified encoding names"); - -DATA(insert OID = 1264 ( pg_char_to_encoding PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 23 "19" _null_ _null_ _null_ _null_ _null_ PG_char_to_encoding _null_ _null_ _null_ )); -DESCR("convert encoding name to encoding id"); - -DATA(insert OID = 1597 ( pg_encoding_to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 19 "23" _null_ _null_ _null_ _null_ _null_ PG_encoding_to_char _null_ _null_ _null_ )); -DESCR("convert encoding id to encoding name"); - -DATA(insert OID = 2319 ( pg_encoding_max_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ pg_encoding_max_length_sql _null_ _null_ _null_ )); -DESCR("maximum octet length of a character in given encoding"); - -DATA(insert OID = 1638 ( oidgt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "26 26" _null_ _null_ _null_ _null_ _null_ oidgt _null_ _null_ _null_ )); -DATA(insert OID = 1639 ( oidge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "26 26" _null_ _null_ _null_ _null_ _null_ oidge _null_ _null_ _null_ )); - -/* System-view support functions */ -DATA(insert OID = 1573 ( pg_get_ruledef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_ruledef _null_ _null_ _null_ )); -DESCR("source text of a rule"); -DATA(insert OID = 1640 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_name _null_ _null_ _null_ )); -DESCR("select statement of a view"); -DATA(insert OID = 1641 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef _null_ _null_ _null_ )); -DESCR("select statement of a view"); -DATA(insert OID = 1642 ( pg_get_userbyid PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 19 "26" _null_ _null_ _null_ _null_ _null_ pg_get_userbyid _null_ _null_ _null_ )); -DESCR("role name by OID (with fallback)"); -DATA(insert OID = 1643 ( pg_get_indexdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_indexdef _null_ _null_ _null_ )); -DESCR("index description"); -DATA(insert OID = 3415 ( pg_get_statisticsobjdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_statisticsobjdef _null_ _null_ _null_ )); -DESCR("extended statistics object description"); -DATA(insert OID = 3352 ( pg_get_partkeydef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_partkeydef _null_ _null_ _null_ )); -DESCR("partition key description"); -DATA(insert OID = 3408 ( pg_get_partition_constraintdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_partition_constraintdef _null_ _null_ _null_ )); -DESCR("partition constraint description"); -DATA(insert OID = 1662 ( pg_get_triggerdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_triggerdef _null_ _null_ _null_ )); -DESCR("trigger description"); -DATA(insert OID = 1387 ( pg_get_constraintdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_constraintdef _null_ _null_ _null_ )); -DESCR("constraint description"); -DATA(insert OID = 1716 ( pg_get_expr PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "194 26" _null_ _null_ _null_ _null_ _null_ pg_get_expr _null_ _null_ _null_ )); -DESCR("deparse an encoded expression"); -DATA(insert OID = 1665 ( pg_get_serial_sequence PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ pg_get_serial_sequence _null_ _null_ _null_ )); -DESCR("name of sequence for a serial column"); -DATA(insert OID = 2098 ( pg_get_functiondef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_functiondef _null_ _null_ _null_ )); -DESCR("definition of a function"); -DATA(insert OID = 2162 ( pg_get_function_arguments PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_function_arguments _null_ _null_ _null_ )); -DESCR("argument list of a function"); -DATA(insert OID = 2232 ( pg_get_function_identity_arguments PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_function_identity_arguments _null_ _null_ _null_ )); -DESCR("identity argument list of a function"); -DATA(insert OID = 2165 ( pg_get_function_result PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_function_result _null_ _null_ _null_ )); -DESCR("result type of a function"); -DATA(insert OID = 3808 ( pg_get_function_arg_default PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "26 23" _null_ _null_ _null_ _null_ _null_ pg_get_function_arg_default _null_ _null_ _null_ )); -DESCR("function argument default"); - -DATA(insert OID = 1686 ( pg_get_keywords PGNSP PGUID 12 10 400 0 0 f f f f t t s s 0 0 2249 "" "{25,18,25}" "{o,o,o}" "{word,catcode,catdesc}" _null_ _null_ pg_get_keywords _null_ _null_ _null_ )); -DESCR("list of SQL keywords"); - -DATA(insert OID = 2289 ( pg_options_to_table PGNSP PGUID 12 1 3 0 0 f f f f t t s s 1 0 2249 "1009" "{1009,25,25}" "{i,o,o}" "{options_array,option_name,option_value}" _null_ _null_ pg_options_to_table _null_ _null_ _null_ )); -DESCR("convert generic options array to name/value table"); - -DATA(insert OID = 1619 ( pg_typeof PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 2206 "2276" _null_ _null_ _null_ _null_ _null_ pg_typeof _null_ _null_ _null_ )); -DESCR("type of the argument"); -DATA(insert OID = 3162 ( pg_collation_for PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 25 "2276" _null_ _null_ _null_ _null_ _null_ pg_collation_for _null_ _null_ _null_ )); -DESCR("collation of the argument; implementation of the COLLATION FOR expression"); - -DATA(insert OID = 3842 ( pg_relation_is_updatable PGNSP PGUID 12 10 0 0 0 f f f f t f s s 2 0 23 "2205 16" _null_ _null_ _null_ _null_ _null_ pg_relation_is_updatable _null_ _null_ _null_ )); -DESCR("is a relation insertable/updatable/deletable"); -DATA(insert OID = 3843 ( pg_column_is_updatable PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "2205 21 16" _null_ _null_ _null_ _null_ _null_ pg_column_is_updatable _null_ _null_ _null_ )); -DESCR("is a column updatable"); - -DATA(insert OID = 6120 ( pg_get_replica_identity_index PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 2205 "2205" _null_ _null_ _null_ _null_ _null_ pg_get_replica_identity_index _null_ _null_ _null_ )); -DESCR("oid of replica identity index if any"); - -/* Deferrable unique constraint trigger */ -DATA(insert OID = 1250 ( unique_key_recheck PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ unique_key_recheck _null_ _null_ _null_ )); -DESCR("deferred UNIQUE constraint check"); - -/* Generic referential integrity constraint triggers */ -DATA(insert OID = 1644 ( RI_FKey_check_ins PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ RI_FKey_check_ins _null_ _null_ _null_ )); -DESCR("referential integrity FOREIGN KEY ... REFERENCES"); -DATA(insert OID = 1645 ( RI_FKey_check_upd PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ RI_FKey_check_upd _null_ _null_ _null_ )); -DESCR("referential integrity FOREIGN KEY ... REFERENCES"); -DATA(insert OID = 1646 ( RI_FKey_cascade_del PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ RI_FKey_cascade_del _null_ _null_ _null_ )); -DESCR("referential integrity ON DELETE CASCADE"); -DATA(insert OID = 1647 ( RI_FKey_cascade_upd PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ RI_FKey_cascade_upd _null_ _null_ _null_ )); -DESCR("referential integrity ON UPDATE CASCADE"); -DATA(insert OID = 1648 ( RI_FKey_restrict_del PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ RI_FKey_restrict_del _null_ _null_ _null_ )); -DESCR("referential integrity ON DELETE RESTRICT"); -DATA(insert OID = 1649 ( RI_FKey_restrict_upd PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ RI_FKey_restrict_upd _null_ _null_ _null_ )); -DESCR("referential integrity ON UPDATE RESTRICT"); -DATA(insert OID = 1650 ( RI_FKey_setnull_del PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ RI_FKey_setnull_del _null_ _null_ _null_ )); -DESCR("referential integrity ON DELETE SET NULL"); -DATA(insert OID = 1651 ( RI_FKey_setnull_upd PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ RI_FKey_setnull_upd _null_ _null_ _null_ )); -DESCR("referential integrity ON UPDATE SET NULL"); -DATA(insert OID = 1652 ( RI_FKey_setdefault_del PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ RI_FKey_setdefault_del _null_ _null_ _null_ )); -DESCR("referential integrity ON DELETE SET DEFAULT"); -DATA(insert OID = 1653 ( RI_FKey_setdefault_upd PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ RI_FKey_setdefault_upd _null_ _null_ _null_ )); -DESCR("referential integrity ON UPDATE SET DEFAULT"); -DATA(insert OID = 1654 ( RI_FKey_noaction_del PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ RI_FKey_noaction_del _null_ _null_ _null_ )); -DESCR("referential integrity ON DELETE NO ACTION"); -DATA(insert OID = 1655 ( RI_FKey_noaction_upd PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ RI_FKey_noaction_upd _null_ _null_ _null_ )); -DESCR("referential integrity ON UPDATE NO ACTION"); - -DATA(insert OID = 1666 ( varbiteq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1562 1562" _null_ _null_ _null_ _null_ _null_ biteq _null_ _null_ _null_ )); -DATA(insert OID = 1667 ( varbitne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1562 1562" _null_ _null_ _null_ _null_ _null_ bitne _null_ _null_ _null_ )); -DATA(insert OID = 1668 ( varbitge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1562 1562" _null_ _null_ _null_ _null_ _null_ bitge _null_ _null_ _null_ )); -DATA(insert OID = 1669 ( varbitgt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1562 1562" _null_ _null_ _null_ _null_ _null_ bitgt _null_ _null_ _null_ )); -DATA(insert OID = 1670 ( varbitle PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1562 1562" _null_ _null_ _null_ _null_ _null_ bitle _null_ _null_ _null_ )); -DATA(insert OID = 1671 ( varbitlt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1562 1562" _null_ _null_ _null_ _null_ _null_ bitlt _null_ _null_ _null_ )); -DATA(insert OID = 1672 ( varbitcmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1562 1562" _null_ _null_ _null_ _null_ _null_ bitcmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - -/* avoid the C names bitand and bitor, since they are C++ keywords */ -DATA(insert OID = 1673 ( bitand PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1560 "1560 1560" _null_ _null_ _null_ _null_ _null_ bit_and _null_ _null_ _null_ )); -DATA(insert OID = 1674 ( bitor PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1560 "1560 1560" _null_ _null_ _null_ _null_ _null_ bit_or _null_ _null_ _null_ )); -DATA(insert OID = 1675 ( bitxor PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1560 "1560 1560" _null_ _null_ _null_ _null_ _null_ bitxor _null_ _null_ _null_ )); -DATA(insert OID = 1676 ( bitnot PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1560 "1560" _null_ _null_ _null_ _null_ _null_ bitnot _null_ _null_ _null_ )); -DATA(insert OID = 1677 ( bitshiftleft PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1560 "1560 23" _null_ _null_ _null_ _null_ _null_ bitshiftleft _null_ _null_ _null_ )); -DATA(insert OID = 1678 ( bitshiftright PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1560 "1560 23" _null_ _null_ _null_ _null_ _null_ bitshiftright _null_ _null_ _null_ )); -DATA(insert OID = 1679 ( bitcat PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1562 "1562 1562" _null_ _null_ _null_ _null_ _null_ bitcat _null_ _null_ _null_ )); -DATA(insert OID = 1680 ( substring PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1560 "1560 23 23" _null_ _null_ _null_ _null_ _null_ bitsubstr _null_ _null_ _null_ )); -DESCR("extract portion of bitstring"); -DATA(insert OID = 1681 ( length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1560" _null_ _null_ _null_ _null_ _null_ bitlength _null_ _null_ _null_ )); -DESCR("bitstring length"); -DATA(insert OID = 1682 ( octet_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1560" _null_ _null_ _null_ _null_ _null_ bitoctetlength _null_ _null_ _null_ )); -DESCR("octet length"); -DATA(insert OID = 1683 ( bit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1560 "23 23" _null_ _null_ _null_ _null_ _null_ bitfromint4 _null_ _null_ _null_ )); -DESCR("convert int4 to bitstring"); -DATA(insert OID = 1684 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1560" _null_ _null_ _null_ _null_ _null_ bittoint4 _null_ _null_ _null_ )); -DESCR("convert bitstring to int4"); - -DATA(insert OID = 1685 ( bit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1560 "1560 23 16" _null_ _null_ _null_ _null_ _null_ bit _null_ _null_ _null_ )); -DESCR("adjust bit() to typmod length"); -DATA(insert OID = 3158 ( varbit_transform PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ varbit_transform _null_ _null_ _null_ )); -DESCR("transform a varbit length coercion"); -DATA(insert OID = 1687 ( varbit PGNSP PGUID 12 1 0 0 varbit_transform f f f f t f i s 3 0 1562 "1562 23 16" _null_ _null_ _null_ _null_ _null_ varbit _null_ _null_ _null_ )); -DESCR("adjust varbit() to typmod length"); - -DATA(insert OID = 1698 ( position PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1560 1560" _null_ _null_ _null_ _null_ _null_ bitposition _null_ _null_ _null_ )); -DESCR("position of sub-bitstring"); -DATA(insert OID = 1699 ( substring PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1560 "1560 23" _null_ _null_ _null_ _null_ _null_ bitsubstr_no_len _null_ _null_ _null_ )); -DESCR("extract portion of bitstring"); - -DATA(insert OID = 3030 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 1560 "1560 1560 23 23" _null_ _null_ _null_ _null_ _null_ bitoverlay _null_ _null_ _null_ )); -DESCR("substitute portion of bitstring"); -DATA(insert OID = 3031 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1560 "1560 1560 23" _null_ _null_ _null_ _null_ _null_ bitoverlay_no_len _null_ _null_ _null_ )); -DESCR("substitute portion of bitstring"); -DATA(insert OID = 3032 ( get_bit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1560 23" _null_ _null_ _null_ _null_ _null_ bitgetbit _null_ _null_ _null_ )); -DESCR("get bit"); -DATA(insert OID = 3033 ( set_bit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1560 "1560 23 23" _null_ _null_ _null_ _null_ _null_ bitsetbit _null_ _null_ _null_ )); -DESCR("set bit"); - -/* for macaddr type support */ -DATA(insert OID = 436 ( macaddr_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 829 "2275" _null_ _null_ _null_ _null_ _null_ macaddr_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 437 ( macaddr_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "829" _null_ _null_ _null_ _null_ _null_ macaddr_out _null_ _null_ _null_ )); -DESCR("I/O"); - -DATA(insert OID = 753 ( trunc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 829 "829" _null_ _null_ _null_ _null_ _null_ macaddr_trunc _null_ _null_ _null_ )); -DESCR("MACADDR manufacturer fields"); - -DATA(insert OID = 830 ( macaddr_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_eq _null_ _null_ _null_ )); -DATA(insert OID = 831 ( macaddr_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_lt _null_ _null_ _null_ )); -DATA(insert OID = 832 ( macaddr_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_le _null_ _null_ _null_ )); -DATA(insert OID = 833 ( macaddr_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_gt _null_ _null_ _null_ )); -DATA(insert OID = 834 ( macaddr_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_ge _null_ _null_ _null_ )); -DATA(insert OID = 835 ( macaddr_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_ne _null_ _null_ _null_ )); -DATA(insert OID = 836 ( macaddr_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3144 ( macaddr_not PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 829 "829" _null_ _null_ _null_ _null_ _null_ macaddr_not _null_ _null_ _null_ )); -DATA(insert OID = 3145 ( macaddr_and PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 829 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_and _null_ _null_ _null_ )); -DATA(insert OID = 3146 ( macaddr_or PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 829 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_or _null_ _null_ _null_ )); -DATA(insert OID = 3359 ( macaddr_sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ macaddr_sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); - -/* for macaddr8 type support */ -DATA(insert OID = 4110 ( macaddr8_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 774 "2275" _null_ _null_ _null_ _null_ _null_ macaddr8_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 4111 ( macaddr8_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "774" _null_ _null_ _null_ _null_ _null_ macaddr8_out _null_ _null_ _null_ )); -DESCR("I/O"); - -DATA(insert OID = 4112 ( trunc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 774 "774" _null_ _null_ _null_ _null_ _null_ macaddr8_trunc _null_ _null_ _null_ )); -DESCR("MACADDR8 manufacturer fields"); - -DATA(insert OID = 4113 ( macaddr8_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "774 774" _null_ _null_ _null_ _null_ _null_ macaddr8_eq _null_ _null_ _null_ )); -DATA(insert OID = 4114 ( macaddr8_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "774 774" _null_ _null_ _null_ _null_ _null_ macaddr8_lt _null_ _null_ _null_ )); -DATA(insert OID = 4115 ( macaddr8_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "774 774" _null_ _null_ _null_ _null_ _null_ macaddr8_le _null_ _null_ _null_ )); -DATA(insert OID = 4116 ( macaddr8_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "774 774" _null_ _null_ _null_ _null_ _null_ macaddr8_gt _null_ _null_ _null_ )); -DATA(insert OID = 4117 ( macaddr8_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "774 774" _null_ _null_ _null_ _null_ _null_ macaddr8_ge _null_ _null_ _null_ )); -DATA(insert OID = 4118 ( macaddr8_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "774 774" _null_ _null_ _null_ _null_ _null_ macaddr8_ne _null_ _null_ _null_ )); -DATA(insert OID = 4119 ( macaddr8_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "774 774" _null_ _null_ _null_ _null_ _null_ macaddr8_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 4120 ( macaddr8_not PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 774 "774" _null_ _null_ _null_ _null_ _null_ macaddr8_not _null_ _null_ _null_ )); -DATA(insert OID = 4121 ( macaddr8_and PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 774 "774 774" _null_ _null_ _null_ _null_ _null_ macaddr8_and _null_ _null_ _null_ )); -DATA(insert OID = 4122 ( macaddr8_or PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 774 "774 774" _null_ _null_ _null_ _null_ _null_ macaddr8_or _null_ _null_ _null_ )); -DATA(insert OID = 4123 ( macaddr8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 774 "829" _null_ _null_ _null_ _null_ _null_ macaddrtomacaddr8 _null_ _null_ _null_ )); -DESCR("convert macaddr to macaddr8"); -DATA(insert OID = 4124 ( macaddr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 829 "774" _null_ _null_ _null_ _null_ _null_ macaddr8tomacaddr _null_ _null_ _null_ )); -DESCR("convert macaddr8 to macaddr"); -DATA(insert OID = 4125 ( macaddr8_set7bit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 774 "774" _null_ _null_ _null_ _null_ _null_ macaddr8_set7bit _null_ _null_ _null_ )); -DESCR("set 7th bit in macaddr8"); - -/* for inet type support */ -DATA(insert OID = 910 ( inet_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "2275" _null_ _null_ _null_ _null_ _null_ inet_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 911 ( inet_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "869" _null_ _null_ _null_ _null_ _null_ inet_out _null_ _null_ _null_ )); -DESCR("I/O"); - -/* for cidr type support */ -DATA(insert OID = 1267 ( cidr_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 650 "2275" _null_ _null_ _null_ _null_ _null_ cidr_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1427 ( cidr_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "650" _null_ _null_ _null_ _null_ _null_ cidr_out _null_ _null_ _null_ )); -DESCR("I/O"); - -/* these are used for both inet and cidr */ -DATA(insert OID = 920 ( network_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_eq _null_ _null_ _null_ )); -DATA(insert OID = 921 ( network_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_lt _null_ _null_ _null_ )); -DATA(insert OID = 922 ( network_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_le _null_ _null_ _null_ )); -DATA(insert OID = 923 ( network_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_gt _null_ _null_ _null_ )); -DATA(insert OID = 924 ( network_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_ge _null_ _null_ _null_ )); -DATA(insert OID = 925 ( network_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_ne _null_ _null_ _null_ )); -DATA(insert OID = 3562 ( network_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ network_larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 3563 ( network_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ network_smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); -DATA(insert OID = 926 ( network_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "869 869" _null_ _null_ _null_ _null_ _null_ network_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 927 ( network_sub PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_sub _null_ _null_ _null_ )); -DATA(insert OID = 928 ( network_subeq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_subeq _null_ _null_ _null_ )); -DATA(insert OID = 929 ( network_sup PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_sup _null_ _null_ _null_ )); -DATA(insert OID = 930 ( network_supeq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_supeq _null_ _null_ _null_ )); -DATA(insert OID = 3551 ( network_overlap PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_overlap _null_ _null_ _null_ )); - -/* inet/cidr functions */ -DATA(insert OID = 598 ( abbrev PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "869" _null_ _null_ _null_ _null_ _null_ inet_abbrev _null_ _null_ _null_ )); -DESCR("abbreviated display of inet value"); -DATA(insert OID = 599 ( abbrev PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "650" _null_ _null_ _null_ _null_ _null_ cidr_abbrev _null_ _null_ _null_ )); -DESCR("abbreviated display of cidr value"); -DATA(insert OID = 605 ( set_masklen PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 23" _null_ _null_ _null_ _null_ _null_ inet_set_masklen _null_ _null_ _null_ )); -DESCR("change netmask of inet"); -DATA(insert OID = 635 ( set_masklen PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 650 "650 23" _null_ _null_ _null_ _null_ _null_ cidr_set_masklen _null_ _null_ _null_ )); -DESCR("change netmask of cidr"); -DATA(insert OID = 711 ( family PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "869" _null_ _null_ _null_ _null_ _null_ network_family _null_ _null_ _null_ )); -DESCR("address family (4 for IPv4, 6 for IPv6)"); -DATA(insert OID = 683 ( network PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 650 "869" _null_ _null_ _null_ _null_ _null_ network_network _null_ _null_ _null_ )); -DESCR("network part of address"); -DATA(insert OID = 696 ( netmask PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "869" _null_ _null_ _null_ _null_ _null_ network_netmask _null_ _null_ _null_ )); -DESCR("netmask of address"); -DATA(insert OID = 697 ( masklen PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "869" _null_ _null_ _null_ _null_ _null_ network_masklen _null_ _null_ _null_ )); -DESCR("netmask length"); -DATA(insert OID = 698 ( broadcast PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "869" _null_ _null_ _null_ _null_ _null_ network_broadcast _null_ _null_ _null_ )); -DESCR("broadcast address of network"); -DATA(insert OID = 699 ( host PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "869" _null_ _null_ _null_ _null_ _null_ network_host _null_ _null_ _null_ )); -DESCR("show address octets only"); -DATA(insert OID = 730 ( text PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "869" _null_ _null_ _null_ _null_ _null_ network_show _null_ _null_ _null_ )); -DESCR("show all parts of inet/cidr value"); -DATA(insert OID = 1362 ( hostmask PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "869" _null_ _null_ _null_ _null_ _null_ network_hostmask _null_ _null_ _null_ )); -DESCR("hostmask of address"); -DATA(insert OID = 1715 ( cidr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 650 "869" _null_ _null_ _null_ _null_ _null_ inet_to_cidr _null_ _null_ _null_ )); -DESCR("convert inet to cidr"); - -DATA(insert OID = 2196 ( inet_client_addr PGNSP PGUID 12 1 0 0 0 f f f f f f s r 0 0 869 "" _null_ _null_ _null_ _null_ _null_ inet_client_addr _null_ _null_ _null_ )); -DESCR("inet address of the client"); -DATA(insert OID = 2197 ( inet_client_port PGNSP PGUID 12 1 0 0 0 f f f f f f s r 0 0 23 "" _null_ _null_ _null_ _null_ _null_ inet_client_port _null_ _null_ _null_ )); -DESCR("client's port number for this connection"); -DATA(insert OID = 2198 ( inet_server_addr PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 869 "" _null_ _null_ _null_ _null_ _null_ inet_server_addr _null_ _null_ _null_ )); -DESCR("inet address of the server"); -DATA(insert OID = 2199 ( inet_server_port PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 23 "" _null_ _null_ _null_ _null_ _null_ inet_server_port _null_ _null_ _null_ )); -DESCR("server's port number for this connection"); - -DATA(insert OID = 2627 ( inetnot PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "869" _null_ _null_ _null_ _null_ _null_ inetnot _null_ _null_ _null_ )); -DATA(insert OID = 2628 ( inetand PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ inetand _null_ _null_ _null_ )); -DATA(insert OID = 2629 ( inetor PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ inetor _null_ _null_ _null_ )); -DATA(insert OID = 2630 ( inetpl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 20" _null_ _null_ _null_ _null_ _null_ inetpl _null_ _null_ _null_ )); -DATA(insert OID = 2631 ( int8pl_inet PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 869 "20 869" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); -DATA(insert OID = 2632 ( inetmi_int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 20" _null_ _null_ _null_ _null_ _null_ inetmi_int8 _null_ _null_ _null_ )); -DATA(insert OID = 2633 ( inetmi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "869 869" _null_ _null_ _null_ _null_ _null_ inetmi _null_ _null_ _null_ )); -DATA(insert OID = 4071 ( inet_same_family PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ inet_same_family _null_ _null_ _null_ )); -DESCR("are the addresses from the same family?"); -DATA(insert OID = 4063 ( inet_merge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 650 "869 869" _null_ _null_ _null_ _null_ _null_ inet_merge _null_ _null_ _null_ )); -DESCR("the smallest network which includes both of the given networks"); - -/* GiST support for inet and cidr */ -DATA(insert OID = 3553 ( inet_gist_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 869 21 26 2281" _null_ _null_ _null_ _null_ _null_ inet_gist_consistent _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3554 ( inet_gist_union PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "2281 2281" _null_ _null_ _null_ _null_ _null_ inet_gist_union _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3555 ( inet_gist_compress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ inet_gist_compress _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3556 ( inet_gist_decompress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ inet_gist_decompress _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3573 ( inet_gist_fetch PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ inet_gist_fetch _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3557 ( inet_gist_penalty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ inet_gist_penalty _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3558 ( inet_gist_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ inet_gist_picksplit _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3559 ( inet_gist_same PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "869 869 2281" _null_ _null_ _null_ _null_ _null_ inet_gist_same _null_ _null_ _null_ )); -DESCR("GiST support"); - -/* SP-GiST support for inet and cidr */ -DATA(insert OID = 3795 ( inet_spg_config PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ inet_spg_config _null_ _null_ _null_ )); -DESCR("SP-GiST support"); -DATA(insert OID = 3796 ( inet_spg_choose PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ inet_spg_choose _null_ _null_ _null_ )); -DESCR("SP-GiST support"); -DATA(insert OID = 3797 ( inet_spg_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ inet_spg_picksplit _null_ _null_ _null_ )); -DESCR("SP-GiST support"); -DATA(insert OID = 3798 ( inet_spg_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ inet_spg_inner_consistent _null_ _null_ _null_ )); -DESCR("SP-GiST support"); -DATA(insert OID = 3799 ( inet_spg_leaf_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2281 2281" _null_ _null_ _null_ _null_ _null_ inet_spg_leaf_consistent _null_ _null_ _null_ )); -DESCR("SP-GiST support"); - -/* Selectivity estimation for inet and cidr */ -DATA(insert OID = 3560 ( networksel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ networksel _null_ _null_ _null_ )); -DESCR("restriction selectivity for network operators"); -DATA(insert OID = 3561 ( networkjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ networkjoinsel _null_ _null_ _null_ )); -DESCR("join selectivity for network operators"); - -DATA(insert OID = 1690 ( time_mi_time PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_mi_time _null_ _null_ _null_ )); - -DATA(insert OID = 1691 ( boolle PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "16 16" _null_ _null_ _null_ _null_ _null_ boolle _null_ _null_ _null_ )); -DATA(insert OID = 1692 ( boolge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "16 16" _null_ _null_ _null_ _null_ _null_ boolge _null_ _null_ _null_ )); -DATA(insert OID = 1693 ( btboolcmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "16 16" _null_ _null_ _null_ _null_ _null_ btboolcmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - -DATA(insert OID = 1688 ( time_hash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1083" _null_ _null_ _null_ _null_ _null_ time_hash _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 1696 ( timetz_hash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1266" _null_ _null_ _null_ _null_ _null_ timetz_hash _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 1697 ( interval_hash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1186" _null_ _null_ _null_ _null_ _null_ interval_hash _null_ _null_ _null_ )); -DESCR("hash"); - - -/* OID's 1700 - 1799 NUMERIC data type */ -DATA(insert OID = 1701 ( numeric_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1700 "2275 26 23" _null_ _null_ _null_ _null_ _null_ numeric_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1702 ( numeric_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1700" _null_ _null_ _null_ _null_ _null_ numeric_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2917 ( numerictypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ numerictypmodin _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 2918 ( numerictypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ numerictypmodout _null_ _null_ _null_ )); -DESCR("I/O typmod"); -DATA(insert OID = 3157 ( numeric_transform PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ numeric_transform _null_ _null_ _null_ )); -DESCR("transform a numeric length coercion"); -DATA(insert OID = 1703 ( numeric PGNSP PGUID 12 1 0 0 numeric_transform f f f f t f i s 2 0 1700 "1700 23" _null_ _null_ _null_ _null_ _null_ numeric _null_ _null_ _null_ )); -DESCR("adjust numeric to typmod precision/scale"); -DATA(insert OID = 1704 ( numeric_abs PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_abs _null_ _null_ _null_ )); -DATA(insert OID = 1705 ( abs PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_abs _null_ _null_ _null_ )); -DESCR("absolute value"); -DATA(insert OID = 1706 ( sign PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_sign _null_ _null_ _null_ )); -DESCR("sign of value"); -DATA(insert OID = 1707 ( round PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 23" _null_ _null_ _null_ _null_ _null_ numeric_round _null_ _null_ _null_ )); -DESCR("value rounded to 'scale'"); -DATA(insert OID = 1708 ( round PGNSP PGUID 14 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.round($1,0)" _null_ _null_ _null_ )); -DESCR("value rounded to 'scale' of zero"); -DATA(insert OID = 1709 ( trunc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 23" _null_ _null_ _null_ _null_ _null_ numeric_trunc _null_ _null_ _null_ )); -DESCR("value truncated to 'scale'"); -DATA(insert OID = 1710 ( trunc PGNSP PGUID 14 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.trunc($1,0)" _null_ _null_ _null_ )); -DESCR("value truncated to 'scale' of zero"); -DATA(insert OID = 1711 ( ceil PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_ceil _null_ _null_ _null_ )); -DESCR("nearest integer >= value"); -DATA(insert OID = 2167 ( ceiling PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_ceil _null_ _null_ _null_ )); -DESCR("nearest integer >= value"); -DATA(insert OID = 1712 ( floor PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_floor _null_ _null_ _null_ )); -DESCR("nearest integer <= value"); -DATA(insert OID = 1718 ( numeric_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_eq _null_ _null_ _null_ )); -DATA(insert OID = 1719 ( numeric_ne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_ne _null_ _null_ _null_ )); -DATA(insert OID = 1720 ( numeric_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_gt _null_ _null_ _null_ )); -DATA(insert OID = 1721 ( numeric_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_ge _null_ _null_ _null_ )); -DATA(insert OID = 1722 ( numeric_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_lt _null_ _null_ _null_ )); -DATA(insert OID = 1723 ( numeric_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_le _null_ _null_ _null_ )); -DATA(insert OID = 1724 ( numeric_add PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_add _null_ _null_ _null_ )); -DATA(insert OID = 1725 ( numeric_sub PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_sub _null_ _null_ _null_ )); -DATA(insert OID = 1726 ( numeric_mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_mul _null_ _null_ _null_ )); -DATA(insert OID = 1727 ( numeric_div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_div _null_ _null_ _null_ )); -DATA(insert OID = 1728 ( mod PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_mod _null_ _null_ _null_ )); -DESCR("modulus"); -DATA(insert OID = 1729 ( numeric_mod PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_mod _null_ _null_ _null_ )); -DATA(insert OID = 1730 ( sqrt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_sqrt _null_ _null_ _null_ )); -DESCR("square root"); -DATA(insert OID = 1731 ( numeric_sqrt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_sqrt _null_ _null_ _null_ )); -DESCR("square root"); -DATA(insert OID = 1732 ( exp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_exp _null_ _null_ _null_ )); -DESCR("natural exponential (e^x)"); -DATA(insert OID = 1733 ( numeric_exp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_exp _null_ _null_ _null_ )); -DESCR("natural exponential (e^x)"); -DATA(insert OID = 1734 ( ln PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_ln _null_ _null_ _null_ )); -DESCR("natural logarithm"); -DATA(insert OID = 1735 ( numeric_ln PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_ln _null_ _null_ _null_ )); -DESCR("natural logarithm"); -DATA(insert OID = 1736 ( log PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_log _null_ _null_ _null_ )); -DESCR("logarithm base m of n"); -DATA(insert OID = 1737 ( numeric_log PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_log _null_ _null_ _null_ )); -DESCR("logarithm base m of n"); -DATA(insert OID = 1738 ( pow PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_power _null_ _null_ _null_ )); -DESCR("exponentiation"); -DATA(insert OID = 2169 ( power PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_power _null_ _null_ _null_ )); -DESCR("exponentiation"); -DATA(insert OID = 1739 ( numeric_power PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_power _null_ _null_ _null_ )); -DATA(insert OID = 8888 ( scale PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1700" _null_ _null_ _null_ _null_ _null_ numeric_scale _null_ _null_ _null_ )); -DESCR("number of decimal digits in the fractional part"); -DATA(insert OID = 1740 ( numeric PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ int4_numeric _null_ _null_ _null_ )); -DESCR("convert int4 to numeric"); -DATA(insert OID = 1741 ( log PGNSP PGUID 14 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.log(10, $1)" _null_ _null_ _null_ )); -DESCR("base 10 logarithm"); -DATA(insert OID = 1742 ( numeric PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "700" _null_ _null_ _null_ _null_ _null_ float4_numeric _null_ _null_ _null_ )); -DESCR("convert float4 to numeric"); -DATA(insert OID = 1743 ( numeric PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "701" _null_ _null_ _null_ _null_ _null_ float8_numeric _null_ _null_ _null_ )); -DESCR("convert float8 to numeric"); -DATA(insert OID = 1744 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1700" _null_ _null_ _null_ _null_ _null_ numeric_int4 _null_ _null_ _null_ )); -DESCR("convert numeric to int4"); -DATA(insert OID = 1745 ( float4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_float4 _null_ _null_ _null_ )); -DESCR("convert numeric to float4"); -DATA(insert OID = 1746 ( float8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1700" _null_ _null_ _null_ _null_ _null_ numeric_float8 _null_ _null_ _null_ )); -DESCR("convert numeric to float8"); -DATA(insert OID = 1973 ( div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_div_trunc _null_ _null_ _null_ )); -DESCR("trunc(x/y)"); -DATA(insert OID = 1980 ( numeric_div_trunc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_div_trunc _null_ _null_ _null_ )); -DESCR("trunc(x/y)"); -DATA(insert OID = 2170 ( width_bucket PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 23 "1700 1700 1700 23" _null_ _null_ _null_ _null_ _null_ width_bucket_numeric _null_ _null_ _null_ )); -DESCR("bucket number of operand in equal-width histogram"); - -DATA(insert OID = 1747 ( time_pl_interval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1083 "1083 1186" _null_ _null_ _null_ _null_ _null_ time_pl_interval _null_ _null_ _null_ )); -DATA(insert OID = 1748 ( time_mi_interval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1083 "1083 1186" _null_ _null_ _null_ _null_ _null_ time_mi_interval _null_ _null_ _null_ )); -DATA(insert OID = 1749 ( timetz_pl_interval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1266 "1266 1186" _null_ _null_ _null_ _null_ _null_ timetz_pl_interval _null_ _null_ _null_ )); -DATA(insert OID = 1750 ( timetz_mi_interval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1266 "1266 1186" _null_ _null_ _null_ _null_ _null_ timetz_mi_interval _null_ _null_ _null_ )); - -DATA(insert OID = 1764 ( numeric_inc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_inc _null_ _null_ _null_ )); -DESCR("increment by one"); -DATA(insert OID = 1766 ( numeric_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); -DATA(insert OID = 1767 ( numeric_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 1769 ( numeric_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1700 1700" _null_ _null_ _null_ _null_ _null_ numeric_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3283 ( numeric_sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ numeric_sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); -DATA(insert OID = 1771 ( numeric_uminus PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_uminus _null_ _null_ _null_ )); -DATA(insert OID = 1779 ( int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "1700" _null_ _null_ _null_ _null_ _null_ numeric_int8 _null_ _null_ _null_ )); -DESCR("convert numeric to int8"); -DATA(insert OID = 1781 ( numeric PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ int8_numeric _null_ _null_ _null_ )); -DESCR("convert int8 to numeric"); -DATA(insert OID = 1782 ( numeric PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ int2_numeric _null_ _null_ _null_ )); -DESCR("convert int2 to numeric"); -DATA(insert OID = 1783 ( int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "1700" _null_ _null_ _null_ _null_ _null_ numeric_int2 _null_ _null_ _null_ )); -DESCR("convert numeric to int2"); - -/* formatting */ -DATA(insert OID = 1770 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "1184 25" _null_ _null_ _null_ _null_ _null_ timestamptz_to_char _null_ _null_ _null_ )); -DESCR("format timestamp with time zone to text"); -DATA(insert OID = 1772 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "1700 25" _null_ _null_ _null_ _null_ _null_ numeric_to_char _null_ _null_ _null_ )); -DESCR("format numeric to text"); -DATA(insert OID = 1773 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "23 25" _null_ _null_ _null_ _null_ _null_ int4_to_char _null_ _null_ _null_ )); -DESCR("format int4 to text"); -DATA(insert OID = 1774 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "20 25" _null_ _null_ _null_ _null_ _null_ int8_to_char _null_ _null_ _null_ )); -DESCR("format int8 to text"); -DATA(insert OID = 1775 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "700 25" _null_ _null_ _null_ _null_ _null_ float4_to_char _null_ _null_ _null_ )); -DESCR("format float4 to text"); -DATA(insert OID = 1776 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "701 25" _null_ _null_ _null_ _null_ _null_ float8_to_char _null_ _null_ _null_ )); -DESCR("format float8 to text"); -DATA(insert OID = 1777 ( to_number PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 1700 "25 25" _null_ _null_ _null_ _null_ _null_ numeric_to_number _null_ _null_ _null_ )); -DESCR("convert text to numeric"); -DATA(insert OID = 1778 ( to_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 1184 "25 25" _null_ _null_ _null_ _null_ _null_ to_timestamp _null_ _null_ _null_ )); -DESCR("convert text to timestamp with time zone"); -DATA(insert OID = 1780 ( to_date PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 1082 "25 25" _null_ _null_ _null_ _null_ _null_ to_date _null_ _null_ _null_ )); -DESCR("convert text to date"); -DATA(insert OID = 1768 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "1186 25" _null_ _null_ _null_ _null_ _null_ interval_to_char _null_ _null_ _null_ )); -DESCR("format interval to text"); - -DATA(insert OID = 1282 ( quote_ident PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ quote_ident _null_ _null_ _null_ )); -DESCR("quote an identifier for usage in a querystring"); -DATA(insert OID = 1283 ( quote_literal PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ quote_literal _null_ _null_ _null_ )); -DESCR("quote a literal for usage in a querystring"); -DATA(insert OID = 1285 ( quote_literal PGNSP PGUID 14 1 0 0 0 f f f f t f s s 1 0 25 "2283" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.quote_literal($1::pg_catalog.text)" _null_ _null_ _null_ )); -DESCR("quote a data value for usage in a querystring"); -DATA(insert OID = 1289 ( quote_nullable PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ quote_nullable _null_ _null_ _null_ )); -DESCR("quote a possibly-null literal for usage in a querystring"); -DATA(insert OID = 1290 ( quote_nullable PGNSP PGUID 14 1 0 0 0 f f f f f f s s 1 0 25 "2283" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.quote_nullable($1::pg_catalog.text)" _null_ _null_ _null_ )); -DESCR("quote a possibly-null data value for usage in a querystring"); - -DATA(insert OID = 1798 ( oidin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 26 "2275" _null_ _null_ _null_ _null_ _null_ oidin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 1799 ( oidout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "26" _null_ _null_ _null_ _null_ _null_ oidout _null_ _null_ _null_ )); -DESCR("I/O"); - -DATA(insert OID = 3058 ( concat PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 25 "2276" "{2276}" "{v}" _null_ _null_ _null_ text_concat _null_ _null_ _null_ )); -DESCR("concatenate values"); -DATA(insert OID = 3059 ( concat_ws PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 2 0 25 "25 2276" "{25,2276}" "{i,v}" _null_ _null_ _null_ text_concat_ws _null_ _null_ _null_ )); -DESCR("concatenate values with separators"); -DATA(insert OID = 3060 ( left PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ text_left _null_ _null_ _null_ )); -DESCR("extract the first n characters"); -DATA(insert OID = 3061 ( right PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ text_right _null_ _null_ _null_ )); -DESCR("extract the last n characters"); -DATA(insert OID = 3062 ( reverse PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ text_reverse _null_ _null_ _null_ )); -DESCR("reverse text"); -DATA(insert OID = 3539 ( format PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 2 0 25 "25 2276" "{25,2276}" "{i,v}" _null_ _null_ _null_ text_format _null_ _null_ _null_ )); -DESCR("format text message"); -DATA(insert OID = 3540 ( format PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ text_format_nv _null_ _null_ _null_ )); -DESCR("format text message"); - -DATA(insert OID = 1810 ( bit_length PGNSP PGUID 14 1 0 0 0 f f f f t f i s 1 0 23 "17" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.octet_length($1) * 8" _null_ _null_ _null_ )); -DESCR("length in bits"); -DATA(insert OID = 1811 ( bit_length PGNSP PGUID 14 1 0 0 0 f f f f t f i s 1 0 23 "25" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.octet_length($1) * 8" _null_ _null_ _null_ )); -DESCR("length in bits"); -DATA(insert OID = 1812 ( bit_length PGNSP PGUID 14 1 0 0 0 f f f f t f i s 1 0 23 "1560" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.length($1)" _null_ _null_ _null_ )); -DESCR("length in bits"); - -/* Selectivity estimators for LIKE and related operators */ -DATA(insert OID = 1814 ( iclikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ iclikesel _null_ _null_ _null_ )); -DESCR("restriction selectivity of ILIKE"); -DATA(insert OID = 1815 ( icnlikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icnlikesel _null_ _null_ _null_ )); -DESCR("restriction selectivity of NOT ILIKE"); -DATA(insert OID = 1816 ( iclikejoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ iclikejoinsel _null_ _null_ _null_ )); -DESCR("join selectivity of ILIKE"); -DATA(insert OID = 1817 ( icnlikejoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ icnlikejoinsel _null_ _null_ _null_ )); -DESCR("join selectivity of NOT ILIKE"); -DATA(insert OID = 1818 ( regexeqsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ regexeqsel _null_ _null_ _null_ )); -DESCR("restriction selectivity of regex match"); -DATA(insert OID = 1819 ( likesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ likesel _null_ _null_ _null_ )); -DESCR("restriction selectivity of LIKE"); -DATA(insert OID = 1820 ( icregexeqsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icregexeqsel _null_ _null_ _null_ )); -DESCR("restriction selectivity of case-insensitive regex match"); -DATA(insert OID = 1821 ( regexnesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ regexnesel _null_ _null_ _null_ )); -DESCR("restriction selectivity of regex non-match"); -DATA(insert OID = 1822 ( nlikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ nlikesel _null_ _null_ _null_ )); -DESCR("restriction selectivity of NOT LIKE"); -DATA(insert OID = 1823 ( icregexnesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icregexnesel _null_ _null_ _null_ )); -DESCR("restriction selectivity of case-insensitive regex non-match"); -DATA(insert OID = 1824 ( regexeqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ regexeqjoinsel _null_ _null_ _null_ )); -DESCR("join selectivity of regex match"); -DATA(insert OID = 1825 ( likejoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ likejoinsel _null_ _null_ _null_ )); -DESCR("join selectivity of LIKE"); -DATA(insert OID = 1826 ( icregexeqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ icregexeqjoinsel _null_ _null_ _null_ )); -DESCR("join selectivity of case-insensitive regex match"); -DATA(insert OID = 1827 ( regexnejoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ regexnejoinsel _null_ _null_ _null_ )); -DESCR("join selectivity of regex non-match"); -DATA(insert OID = 1828 ( nlikejoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ nlikejoinsel _null_ _null_ _null_ )); -DESCR("join selectivity of NOT LIKE"); -DATA(insert OID = 1829 ( icregexnejoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ icregexnejoinsel _null_ _null_ _null_ )); -DESCR("join selectivity of case-insensitive regex non-match"); - -/* Aggregate-related functions */ -DATA(insert OID = 1830 ( float8_avg PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_avg _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2512 ( float8_var_pop PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_var_pop _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 1831 ( float8_var_samp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_var_samp _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2513 ( float8_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_stddev_pop _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 1832 ( float8_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_stddev_samp _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 1833 ( numeric_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3341 ( numeric_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_combine _null_ _null_ _null_ )); -DESCR("aggregate combine function"); -DATA(insert OID = 2858 ( numeric_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_avg_accum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3337 ( numeric_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_combine _null_ _null_ _null_ )); -DESCR("aggregate combine function"); -DATA(insert OID = 2740 ( numeric_avg_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_serialize _null_ _null_ _null_ )); -DESCR("aggregate serial function"); -DATA(insert OID = 2741 ( numeric_avg_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "17 2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_deserialize _null_ _null_ _null_ )); -DESCR("aggregate deserial function"); -DATA(insert OID = 3335 ( numeric_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_serialize _null_ _null_ _null_ )); -DESCR("aggregate serial function"); -DATA(insert OID = 3336 ( numeric_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "17 2281" _null_ _null_ _null_ _null_ _null_ numeric_deserialize _null_ _null_ _null_ )); -DESCR("aggregate deserial function"); -DATA(insert OID = 3548 ( numeric_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum_inv _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 1834 ( int2_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 21" _null_ _null_ _null_ _null_ _null_ int2_accum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 1835 ( int4_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 23" _null_ _null_ _null_ _null_ _null_ int4_accum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 1836 ( int8_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_accum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3338 ( numeric_poly_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_combine _null_ _null_ _null_ )); -DESCR("aggregate combine function"); -DATA(insert OID = 3339 ( numeric_poly_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_serialize _null_ _null_ _null_ )); -DESCR("aggregate serial function"); -DATA(insert OID = 3340 ( numeric_poly_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "17 2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_deserialize _null_ _null_ _null_ )); -DESCR("aggregate deserial function"); -DATA(insert OID = 2746 ( int8_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3567 ( int2_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 21" _null_ _null_ _null_ _null_ _null_ int2_accum_inv _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3568 ( int4_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 23" _null_ _null_ _null_ _null_ _null_ int4_accum_inv _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3569 ( int8_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_accum_inv _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3387 ( int8_avg_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum_inv _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 2785 ( int8_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ int8_avg_combine _null_ _null_ _null_ )); -DESCR("aggregate combine function"); -DATA(insert OID = 2786 ( int8_avg_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ int8_avg_serialize _null_ _null_ _null_ )); -DESCR("aggregate serial function"); -DATA(insert OID = 2787 ( int8_avg_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "17 2281" _null_ _null_ _null_ _null_ _null_ int8_avg_deserialize _null_ _null_ _null_ )); -DESCR("aggregate deserial function"); -DATA(insert OID = 3324 ( int4_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1016 "1016 1016" _null_ _null_ _null_ _null_ _null_ int4_avg_combine _null_ _null_ _null_ )); -DESCR("aggregate combine function"); -DATA(insert OID = 3178 ( numeric_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_sum _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 1837 ( numeric_avg PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_avg _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2514 ( numeric_var_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_var_pop _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 1838 ( numeric_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_var_samp _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2596 ( numeric_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_stddev_pop _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 1839 ( numeric_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_stddev_samp _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 1840 ( int2_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 20 "20 21" _null_ _null_ _null_ _null_ _null_ int2_sum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 1841 ( int4_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 20 "20 23" _null_ _null_ _null_ _null_ _null_ int4_sum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 1842 ( int8_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 1700 "1700 20" _null_ _null_ _null_ _null_ _null_ int8_sum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3388 ( numeric_poly_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_sum _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3389 ( numeric_poly_avg PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_avg _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3390 ( numeric_poly_var_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_pop _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3391 ( numeric_poly_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_samp _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3392 ( numeric_poly_stddev_pop PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_pop _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3393 ( numeric_poly_stddev_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_stddev_samp _null_ _null_ _null_ )); -DESCR("aggregate final function"); - -DATA(insert OID = 1843 ( interval_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3325 ( interval_combine PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1187" _null_ _null_ _null_ _null_ _null_ interval_combine _null_ _null_ _null_ )); -DESCR("aggregate combine function"); -DATA(insert OID = 3549 ( interval_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum_inv _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 1844 ( interval_avg PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1186 "1187" _null_ _null_ _null_ _null_ _null_ interval_avg _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 1962 ( int2_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1016 "1016 21" _null_ _null_ _null_ _null_ _null_ int2_avg_accum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 1963 ( int4_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1016 "1016 23" _null_ _null_ _null_ _null_ _null_ int4_avg_accum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3570 ( int2_avg_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1016 "1016 21" _null_ _null_ _null_ _null_ _null_ int2_avg_accum_inv _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3571 ( int4_avg_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1016 "1016 23" _null_ _null_ _null_ _null_ _null_ int4_avg_accum_inv _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 1964 ( int8_avg PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1016" _null_ _null_ _null_ _null_ _null_ int8_avg _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3572 ( int2int4_sum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "1016" _null_ _null_ _null_ _null_ _null_ int2int4_sum _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2805 ( int8inc_float8_float8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 20 "20 701 701" _null_ _null_ _null_ _null_ _null_ int8inc_float8_float8 _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 2806 ( float8_regr_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1022 "1022 701 701" _null_ _null_ _null_ _null_ _null_ float8_regr_accum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3342 ( float8_regr_combine PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1022 "1022 1022" _null_ _null_ _null_ _null_ _null_ float8_regr_combine _null_ _null_ _null_ )); -DESCR("aggregate combine function"); -DATA(insert OID = 2807 ( float8_regr_sxx PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_sxx _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2808 ( float8_regr_syy PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_syy _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2809 ( float8_regr_sxy PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_sxy _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2810 ( float8_regr_avgx PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_avgx _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2811 ( float8_regr_avgy PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_avgy _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2812 ( float8_regr_r2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_r2 _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2813 ( float8_regr_slope PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_slope _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2814 ( float8_regr_intercept PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_regr_intercept _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2815 ( float8_covar_pop PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_covar_pop _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2816 ( float8_covar_samp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_covar_samp _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2817 ( float8_corr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "1022" _null_ _null_ _null_ _null_ _null_ float8_corr _null_ _null_ _null_ )); -DESCR("aggregate final function"); - -DATA(insert OID = 3535 ( string_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 2281 "2281 25 25" _null_ _null_ _null_ _null_ _null_ string_agg_transfn _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3536 ( string_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 25 "2281" _null_ _null_ _null_ _null_ _null_ string_agg_finalfn _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3538 ( string_agg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("concatenate aggregate input into a string"); -DATA(insert OID = 3543 ( bytea_string_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 2281 "2281 17 17" _null_ _null_ _null_ _null_ _null_ bytea_string_agg_transfn _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3544 ( bytea_string_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ bytea_string_agg_finalfn _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3545 ( string_agg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 17 "17 17" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("concatenate aggregate input into a bytea"); - -/* To ASCII conversion */ -DATA(insert OID = 1845 ( to_ascii PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ to_ascii_default _null_ _null_ _null_ )); -DESCR("encode text from DB encoding to ASCII text"); -DATA(insert OID = 1846 ( to_ascii PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ to_ascii_enc _null_ _null_ _null_ )); -DESCR("encode text from encoding to ASCII text"); -DATA(insert OID = 1847 ( to_ascii PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 19" _null_ _null_ _null_ _null_ _null_ to_ascii_encname _null_ _null_ _null_ )); -DESCR("encode text from encoding to ASCII text"); - -DATA(insert OID = 1848 ( interval_pl_time PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1083 "1186 1083" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); - -DATA(insert OID = 1850 ( int28eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 20" _null_ _null_ _null_ _null_ _null_ int28eq _null_ _null_ _null_ )); -DATA(insert OID = 1851 ( int28ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 20" _null_ _null_ _null_ _null_ _null_ int28ne _null_ _null_ _null_ )); -DATA(insert OID = 1852 ( int28lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 20" _null_ _null_ _null_ _null_ _null_ int28lt _null_ _null_ _null_ )); -DATA(insert OID = 1853 ( int28gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 20" _null_ _null_ _null_ _null_ _null_ int28gt _null_ _null_ _null_ )); -DATA(insert OID = 1854 ( int28le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 20" _null_ _null_ _null_ _null_ _null_ int28le _null_ _null_ _null_ )); -DATA(insert OID = 1855 ( int28ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "21 20" _null_ _null_ _null_ _null_ _null_ int28ge _null_ _null_ _null_ )); - -DATA(insert OID = 1856 ( int82eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 21" _null_ _null_ _null_ _null_ _null_ int82eq _null_ _null_ _null_ )); -DATA(insert OID = 1857 ( int82ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 21" _null_ _null_ _null_ _null_ _null_ int82ne _null_ _null_ _null_ )); -DATA(insert OID = 1858 ( int82lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 21" _null_ _null_ _null_ _null_ _null_ int82lt _null_ _null_ _null_ )); -DATA(insert OID = 1859 ( int82gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 21" _null_ _null_ _null_ _null_ _null_ int82gt _null_ _null_ _null_ )); -DATA(insert OID = 1860 ( int82le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 21" _null_ _null_ _null_ _null_ _null_ int82le _null_ _null_ _null_ )); -DATA(insert OID = 1861 ( int82ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "20 21" _null_ _null_ _null_ _null_ _null_ int82ge _null_ _null_ _null_ )); - -DATA(insert OID = 1892 ( int2and PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 21 "21 21" _null_ _null_ _null_ _null_ _null_ int2and _null_ _null_ _null_ )); -DATA(insert OID = 1893 ( int2or PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 21 "21 21" _null_ _null_ _null_ _null_ _null_ int2or _null_ _null_ _null_ )); -DATA(insert OID = 1894 ( int2xor PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 21 "21 21" _null_ _null_ _null_ _null_ _null_ int2xor _null_ _null_ _null_ )); -DATA(insert OID = 1895 ( int2not PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "21" _null_ _null_ _null_ _null_ _null_ int2not _null_ _null_ _null_ )); -DATA(insert OID = 1896 ( int2shl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 21 "21 23" _null_ _null_ _null_ _null_ _null_ int2shl _null_ _null_ _null_ )); -DATA(insert OID = 1897 ( int2shr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 21 "21 23" _null_ _null_ _null_ _null_ _null_ int2shr _null_ _null_ _null_ )); - -DATA(insert OID = 1898 ( int4and PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4and _null_ _null_ _null_ )); -DATA(insert OID = 1899 ( int4or PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4or _null_ _null_ _null_ )); -DATA(insert OID = 1900 ( int4xor PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4xor _null_ _null_ _null_ )); -DATA(insert OID = 1901 ( int4not PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ int4not _null_ _null_ _null_ )); -DATA(insert OID = 1902 ( int4shl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4shl _null_ _null_ _null_ )); -DATA(insert OID = 1903 ( int4shr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4shr _null_ _null_ _null_ )); - -DATA(insert OID = 1904 ( int8and PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8and _null_ _null_ _null_ )); -DATA(insert OID = 1905 ( int8or PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8or _null_ _null_ _null_ )); -DATA(insert OID = 1906 ( int8xor PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ int8xor _null_ _null_ _null_ )); -DATA(insert OID = 1907 ( int8not PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "20" _null_ _null_ _null_ _null_ _null_ int8not _null_ _null_ _null_ )); -DATA(insert OID = 1908 ( int8shl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 23" _null_ _null_ _null_ _null_ _null_ int8shl _null_ _null_ _null_ )); -DATA(insert OID = 1909 ( int8shr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "20 23" _null_ _null_ _null_ _null_ _null_ int8shr _null_ _null_ _null_ )); - -DATA(insert OID = 1910 ( int8up PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "20" _null_ _null_ _null_ _null_ _null_ int8up _null_ _null_ _null_ )); -DATA(insert OID = 1911 ( int2up PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "21" _null_ _null_ _null_ _null_ _null_ int2up _null_ _null_ _null_ )); -DATA(insert OID = 1912 ( int4up PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ int4up _null_ _null_ _null_ )); -DATA(insert OID = 1913 ( float4up PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ float4up _null_ _null_ _null_ )); -DATA(insert OID = 1914 ( float8up PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ float8up _null_ _null_ _null_ )); -DATA(insert OID = 1915 ( numeric_uplus PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ numeric_uplus _null_ _null_ _null_ )); - -DATA(insert OID = 1922 ( has_table_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_table_privilege_name_name _null_ _null_ _null_ )); -DESCR("user privilege on relation by username, rel name"); -DATA(insert OID = 1923 ( has_table_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_table_privilege_name_id _null_ _null_ _null_ )); -DESCR("user privilege on relation by username, rel oid"); -DATA(insert OID = 1924 ( has_table_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_table_privilege_id_name _null_ _null_ _null_ )); -DESCR("user privilege on relation by user oid, rel name"); -DATA(insert OID = 1925 ( has_table_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_table_privilege_id_id _null_ _null_ _null_ )); -DESCR("user privilege on relation by user oid, rel oid"); -DATA(insert OID = 1926 ( has_table_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ has_table_privilege_name _null_ _null_ _null_ )); -DESCR("current user privilege on relation by rel name"); -DATA(insert OID = 1927 ( has_table_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_table_privilege_id _null_ _null_ _null_ )); -DESCR("current user privilege on relation by rel oid"); - -DATA(insert OID = 2181 ( has_sequence_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_sequence_privilege_name_name _null_ _null_ _null_ )); -DESCR("user privilege on sequence by username, seq name"); -DATA(insert OID = 2182 ( has_sequence_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_sequence_privilege_name_id _null_ _null_ _null_ )); -DESCR("user privilege on sequence by username, seq oid"); -DATA(insert OID = 2183 ( has_sequence_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_sequence_privilege_id_name _null_ _null_ _null_ )); -DESCR("user privilege on sequence by user oid, seq name"); -DATA(insert OID = 2184 ( has_sequence_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_sequence_privilege_id_id _null_ _null_ _null_ )); -DESCR("user privilege on sequence by user oid, seq oid"); -DATA(insert OID = 2185 ( has_sequence_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ has_sequence_privilege_name _null_ _null_ _null_ )); -DESCR("current user privilege on sequence by seq name"); -DATA(insert OID = 2186 ( has_sequence_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_sequence_privilege_id _null_ _null_ _null_ )); -DESCR("current user privilege on sequence by seq oid"); - -DATA(insert OID = 3012 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 16 "19 25 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_name_name _null_ _null_ _null_ )); -DESCR("user privilege on column by username, rel name, col name"); -DATA(insert OID = 3013 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 16 "19 25 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_name_attnum _null_ _null_ _null_ )); -DESCR("user privilege on column by username, rel name, col attnum"); -DATA(insert OID = 3014 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 16 "19 26 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_id_name _null_ _null_ _null_ )); -DESCR("user privilege on column by username, rel oid, col name"); -DATA(insert OID = 3015 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 16 "19 26 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_id_attnum _null_ _null_ _null_ )); -DESCR("user privilege on column by username, rel oid, col attnum"); -DATA(insert OID = 3016 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 16 "26 25 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_name_name _null_ _null_ _null_ )); -DESCR("user privilege on column by user oid, rel name, col name"); -DATA(insert OID = 3017 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 16 "26 25 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_name_attnum _null_ _null_ _null_ )); -DESCR("user privilege on column by user oid, rel name, col attnum"); -DATA(insert OID = 3018 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 16 "26 26 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_id_name _null_ _null_ _null_ )); -DESCR("user privilege on column by user oid, rel oid, col name"); -DATA(insert OID = 3019 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 16 "26 26 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_id_attnum _null_ _null_ _null_ )); -DESCR("user privilege on column by user oid, rel oid, col attnum"); -DATA(insert OID = 3020 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "25 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_name _null_ _null_ _null_ )); -DESCR("current user privilege on column by rel name, col name"); -DATA(insert OID = 3021 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "25 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_attnum _null_ _null_ _null_ )); -DESCR("current user privilege on column by rel name, col attnum"); -DATA(insert OID = 3022 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_name _null_ _null_ _null_ )); -DESCR("current user privilege on column by rel oid, col name"); -DATA(insert OID = 3023 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_attnum _null_ _null_ _null_ )); -DESCR("current user privilege on column by rel oid, col attnum"); - -DATA(insert OID = 3024 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_name_name _null_ _null_ _null_ )); -DESCR("user privilege on any column by username, rel name"); -DATA(insert OID = 3025 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_name_id _null_ _null_ _null_ )); -DESCR("user privilege on any column by username, rel oid"); -DATA(insert OID = 3026 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_id_name _null_ _null_ _null_ )); -DESCR("user privilege on any column by user oid, rel name"); -DATA(insert OID = 3027 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_id_id _null_ _null_ _null_ )); -DESCR("user privilege on any column by user oid, rel oid"); -DATA(insert OID = 3028 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_name _null_ _null_ _null_ )); -DESCR("current user privilege on any column by rel name"); -DATA(insert OID = 3029 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_id _null_ _null_ _null_ )); -DESCR("current user privilege on any column by rel oid"); - -DATA(insert OID = 3355 ( pg_ndistinct_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3361 "2275" _null_ _null_ _null_ _null_ _null_ pg_ndistinct_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3356 ( pg_ndistinct_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3361" _null_ _null_ _null_ _null_ _null_ pg_ndistinct_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3357 ( pg_ndistinct_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 3361 "2281" _null_ _null_ _null_ _null_ _null_ pg_ndistinct_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3358 ( pg_ndistinct_send PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "3361" _null_ _null_ _null_ _null_ _null_ pg_ndistinct_send _null_ _null_ _null_ )); -DESCR("I/O"); - -DATA(insert OID = 3404 ( pg_dependencies_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3402 "2275" _null_ _null_ _null_ _null_ _null_ pg_dependencies_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3405 ( pg_dependencies_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3402" _null_ _null_ _null_ _null_ _null_ pg_dependencies_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3406 ( pg_dependencies_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 3402 "2281" _null_ _null_ _null_ _null_ _null_ pg_dependencies_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3407 ( pg_dependencies_send PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "3402" _null_ _null_ _null_ _null_ _null_ pg_dependencies_send _null_ _null_ _null_ )); -DESCR("I/O"); - -DATA(insert OID = 1928 ( pg_stat_get_numscans PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_numscans _null_ _null_ _null_ )); -DESCR("statistics: number of scans done for table/index"); -DATA(insert OID = 1929 ( pg_stat_get_tuples_returned PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_tuples_returned _null_ _null_ _null_ )); -DESCR("statistics: number of tuples read by seqscan"); -DATA(insert OID = 1930 ( pg_stat_get_tuples_fetched PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_tuples_fetched _null_ _null_ _null_ )); -DESCR("statistics: number of tuples fetched by idxscan"); -DATA(insert OID = 1931 ( pg_stat_get_tuples_inserted PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_tuples_inserted _null_ _null_ _null_ )); -DESCR("statistics: number of tuples inserted"); -DATA(insert OID = 1932 ( pg_stat_get_tuples_updated PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_tuples_updated _null_ _null_ _null_ )); -DESCR("statistics: number of tuples updated"); -DATA(insert OID = 1933 ( pg_stat_get_tuples_deleted PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_tuples_deleted _null_ _null_ _null_ )); -DESCR("statistics: number of tuples deleted"); -DATA(insert OID = 1972 ( pg_stat_get_tuples_hot_updated PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_tuples_hot_updated _null_ _null_ _null_ )); -DESCR("statistics: number of tuples hot updated"); -DATA(insert OID = 2878 ( pg_stat_get_live_tuples PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_live_tuples _null_ _null_ _null_ )); -DESCR("statistics: number of live tuples"); -DATA(insert OID = 2879 ( pg_stat_get_dead_tuples PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_dead_tuples _null_ _null_ _null_ )); -DESCR("statistics: number of dead tuples"); -DATA(insert OID = 3177 ( pg_stat_get_mod_since_analyze PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_mod_since_analyze _null_ _null_ _null_ )); -DESCR("statistics: number of tuples changed since last analyze"); -DATA(insert OID = 1934 ( pg_stat_get_blocks_fetched PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_blocks_fetched _null_ _null_ _null_ )); -DESCR("statistics: number of blocks fetched"); -DATA(insert OID = 1935 ( pg_stat_get_blocks_hit PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_blocks_hit _null_ _null_ _null_ )); -DESCR("statistics: number of blocks found in cache"); -DATA(insert OID = 2781 ( pg_stat_get_last_vacuum_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_vacuum_time _null_ _null_ _null_ )); -DESCR("statistics: last manual vacuum time for a table"); -DATA(insert OID = 2782 ( pg_stat_get_last_autovacuum_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_autovacuum_time _null_ _null_ _null_ )); -DESCR("statistics: last auto vacuum time for a table"); -DATA(insert OID = 2783 ( pg_stat_get_last_analyze_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_analyze_time _null_ _null_ _null_ )); -DESCR("statistics: last manual analyze time for a table"); -DATA(insert OID = 2784 ( pg_stat_get_last_autoanalyze_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_autoanalyze_time _null_ _null_ _null_ )); -DESCR("statistics: last auto analyze time for a table"); -DATA(insert OID = 3054 ( pg_stat_get_vacuum_count PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_vacuum_count _null_ _null_ _null_ )); -DESCR("statistics: number of manual vacuums for a table"); -DATA(insert OID = 3055 ( pg_stat_get_autovacuum_count PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_autovacuum_count _null_ _null_ _null_ )); -DESCR("statistics: number of auto vacuums for a table"); -DATA(insert OID = 3056 ( pg_stat_get_analyze_count PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_analyze_count _null_ _null_ _null_ )); -DESCR("statistics: number of manual analyzes for a table"); -DATA(insert OID = 3057 ( pg_stat_get_autoanalyze_count PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_autoanalyze_count _null_ _null_ _null_ )); -DESCR("statistics: number of auto analyzes for a table"); -DATA(insert OID = 1936 ( pg_stat_get_backend_idset PGNSP PGUID 12 1 100 0 0 f f f f t t s r 0 0 23 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_backend_idset _null_ _null_ _null_ )); -DESCR("statistics: currently active backend IDs"); -DATA(insert OID = 2022 ( pg_stat_get_activity PGNSP PGUID 12 1 100 0 0 f f f f f t s r 1 0 2249 "23" "{23,26,23,26,25,25,25,25,25,1184,1184,1184,1184,869,25,23,28,28,25,16,25,25,23,16,25}" "{i,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{pid,datid,pid,usesysid,application_name,state,query,wait_event_type,wait_event,xact_start,query_start,backend_start,state_change,client_addr,client_hostname,client_port,backend_xid,backend_xmin,backend_type,ssl,sslversion,sslcipher,sslbits,sslcompression,sslclientdn}" _null_ _null_ pg_stat_get_activity _null_ _null_ _null_ )); -DESCR("statistics: information about currently active backends"); -DATA(insert OID = 3318 ( pg_stat_get_progress_info PGNSP PGUID 12 1 100 0 0 f f f f t t s r 1 0 2249 "25" "{25,23,26,26,20,20,20,20,20,20,20,20,20,20}" "{i,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{cmdtype,pid,datid,relid,param1,param2,param3,param4,param5,param6,param7,param8,param9,param10}" _null_ _null_ pg_stat_get_progress_info _null_ _null_ _null_ )); -DESCR("statistics: information about progress of backends running maintenance command"); -DATA(insert OID = 3099 ( pg_stat_get_wal_senders PGNSP PGUID 12 1 10 0 0 f f f f f t s r 0 0 2249 "" "{23,25,3220,3220,3220,3220,1186,1186,1186,23,25}" "{o,o,o,o,o,o,o,o,o,o,o}" "{pid,state,sent_lsn,write_lsn,flush_lsn,replay_lsn,write_lag,flush_lag,replay_lag,sync_priority,sync_state}" _null_ _null_ pg_stat_get_wal_senders _null_ _null_ _null_ )); -DESCR("statistics: information about currently active replication"); -DATA(insert OID = 3317 ( pg_stat_get_wal_receiver PGNSP PGUID 12 1 0 0 0 f f f f f f s r 0 0 2249 "" "{23,25,3220,23,3220,23,1184,1184,3220,1184,25,25}" "{o,o,o,o,o,o,o,o,o,o,o,o}" "{pid,status,receive_start_lsn,receive_start_tli,received_lsn,received_tli,last_msg_send_time,last_msg_receipt_time,latest_end_lsn,latest_end_time,slot_name,conninfo}" _null_ _null_ pg_stat_get_wal_receiver _null_ _null_ _null_ )); -DESCR("statistics: information about WAL receiver"); -DATA(insert OID = 6118 ( pg_stat_get_subscription PGNSP PGUID 12 1 0 0 0 f f f f f f s r 1 0 2249 "26" "{26,26,26,23,3220,1184,1184,3220,1184}" "{i,o,o,o,o,o,o,o,o}" "{subid,subid,relid,pid,received_lsn,last_msg_send_time,last_msg_receipt_time,latest_end_lsn,latest_end_time}" _null_ _null_ pg_stat_get_subscription _null_ _null_ _null_ )); -DESCR("statistics: information about subscription"); -DATA(insert OID = 2026 ( pg_backend_pid PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 23 "" _null_ _null_ _null_ _null_ _null_ pg_backend_pid _null_ _null_ _null_ )); -DESCR("statistics: current backend PID"); -DATA(insert OID = 1937 ( pg_stat_get_backend_pid PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ pg_stat_get_backend_pid _null_ _null_ _null_ )); -DESCR("statistics: PID of backend"); -DATA(insert OID = 1938 ( pg_stat_get_backend_dbid PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 26 "23" _null_ _null_ _null_ _null_ _null_ pg_stat_get_backend_dbid _null_ _null_ _null_ )); -DESCR("statistics: database ID of backend"); -DATA(insert OID = 1939 ( pg_stat_get_backend_userid PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 26 "23" _null_ _null_ _null_ _null_ _null_ pg_stat_get_backend_userid _null_ _null_ _null_ )); -DESCR("statistics: user ID of backend"); -DATA(insert OID = 1940 ( pg_stat_get_backend_activity PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 25 "23" _null_ _null_ _null_ _null_ _null_ pg_stat_get_backend_activity _null_ _null_ _null_ )); -DESCR("statistics: current query of backend"); -DATA(insert OID = 2788 ( pg_stat_get_backend_wait_event_type PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 25 "23" _null_ _null_ _null_ _null_ _null_ pg_stat_get_backend_wait_event_type _null_ _null_ _null_ )); -DESCR("statistics: wait event type on which backend is currently waiting"); -DATA(insert OID = 2853 ( pg_stat_get_backend_wait_event PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 25 "23" _null_ _null_ _null_ _null_ _null_ pg_stat_get_backend_wait_event _null_ _null_ _null_ )); -DESCR("statistics: wait event on which backend is currently waiting"); -DATA(insert OID = 2094 ( pg_stat_get_backend_activity_start PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "23" _null_ _null_ _null_ _null_ _null_ pg_stat_get_backend_activity_start _null_ _null_ _null_ )); -DESCR("statistics: start time for current query of backend"); -DATA(insert OID = 2857 ( pg_stat_get_backend_xact_start PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "23" _null_ _null_ _null_ _null_ _null_ pg_stat_get_backend_xact_start _null_ _null_ _null_ )); -DESCR("statistics: start time for backend's current transaction"); -DATA(insert OID = 1391 ( pg_stat_get_backend_start PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "23" _null_ _null_ _null_ _null_ _null_ pg_stat_get_backend_start _null_ _null_ _null_ )); -DESCR("statistics: start time for current backend session"); -DATA(insert OID = 1392 ( pg_stat_get_backend_client_addr PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 869 "23" _null_ _null_ _null_ _null_ _null_ pg_stat_get_backend_client_addr _null_ _null_ _null_ )); -DESCR("statistics: address of client connected to backend"); -DATA(insert OID = 1393 ( pg_stat_get_backend_client_port PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ pg_stat_get_backend_client_port _null_ _null_ _null_ )); -DESCR("statistics: port number of client connected to backend"); -DATA(insert OID = 1941 ( pg_stat_get_db_numbackends PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 23 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_numbackends _null_ _null_ _null_ )); -DESCR("statistics: number of backends in database"); -DATA(insert OID = 1942 ( pg_stat_get_db_xact_commit PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_xact_commit _null_ _null_ _null_ )); -DESCR("statistics: transactions committed"); -DATA(insert OID = 1943 ( pg_stat_get_db_xact_rollback PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_xact_rollback _null_ _null_ _null_ )); -DESCR("statistics: transactions rolled back"); -DATA(insert OID = 1944 ( pg_stat_get_db_blocks_fetched PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_blocks_fetched _null_ _null_ _null_ )); -DESCR("statistics: blocks fetched for database"); -DATA(insert OID = 1945 ( pg_stat_get_db_blocks_hit PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_blocks_hit _null_ _null_ _null_ )); -DESCR("statistics: blocks found in cache for database"); -DATA(insert OID = 2758 ( pg_stat_get_db_tuples_returned PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_tuples_returned _null_ _null_ _null_ )); -DESCR("statistics: tuples returned for database"); -DATA(insert OID = 2759 ( pg_stat_get_db_tuples_fetched PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_tuples_fetched _null_ _null_ _null_ )); -DESCR("statistics: tuples fetched for database"); -DATA(insert OID = 2760 ( pg_stat_get_db_tuples_inserted PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_tuples_inserted _null_ _null_ _null_ )); -DESCR("statistics: tuples inserted in database"); -DATA(insert OID = 2761 ( pg_stat_get_db_tuples_updated PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_tuples_updated _null_ _null_ _null_ )); -DESCR("statistics: tuples updated in database"); -DATA(insert OID = 2762 ( pg_stat_get_db_tuples_deleted PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_tuples_deleted _null_ _null_ _null_ )); -DESCR("statistics: tuples deleted in database"); -DATA(insert OID = 3065 ( pg_stat_get_db_conflict_tablespace PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_conflict_tablespace _null_ _null_ _null_ )); -DESCR("statistics: recovery conflicts in database caused by drop tablespace"); -DATA(insert OID = 3066 ( pg_stat_get_db_conflict_lock PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_conflict_lock _null_ _null_ _null_ )); -DESCR("statistics: recovery conflicts in database caused by relation lock"); -DATA(insert OID = 3067 ( pg_stat_get_db_conflict_snapshot PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_conflict_snapshot _null_ _null_ _null_ )); -DESCR("statistics: recovery conflicts in database caused by snapshot expiry"); -DATA(insert OID = 3068 ( pg_stat_get_db_conflict_bufferpin PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_conflict_bufferpin _null_ _null_ _null_ )); -DESCR("statistics: recovery conflicts in database caused by shared buffer pin"); -DATA(insert OID = 3069 ( pg_stat_get_db_conflict_startup_deadlock PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_conflict_startup_deadlock _null_ _null_ _null_ )); -DESCR("statistics: recovery conflicts in database caused by buffer deadlock"); -DATA(insert OID = 3070 ( pg_stat_get_db_conflict_all PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_conflict_all _null_ _null_ _null_ )); -DESCR("statistics: recovery conflicts in database"); -DATA(insert OID = 3152 ( pg_stat_get_db_deadlocks PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_deadlocks _null_ _null_ _null_ )); -DESCR("statistics: deadlocks detected in database"); -DATA(insert OID = 3074 ( pg_stat_get_db_stat_reset_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_stat_reset_time _null_ _null_ _null_ )); -DESCR("statistics: last reset for a database"); -DATA(insert OID = 3150 ( pg_stat_get_db_temp_files PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_temp_files _null_ _null_ _null_ )); -DESCR("statistics: number of temporary files written"); -DATA(insert OID = 3151 ( pg_stat_get_db_temp_bytes PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_temp_bytes _null_ _null_ _null_ )); -DESCR("statistics: number of bytes in temporary files written"); -DATA(insert OID = 2844 ( pg_stat_get_db_blk_read_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 701 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_blk_read_time _null_ _null_ _null_ )); -DESCR("statistics: block read time, in milliseconds"); -DATA(insert OID = 2845 ( pg_stat_get_db_blk_write_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 701 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_db_blk_write_time _null_ _null_ _null_ )); -DESCR("statistics: block write time, in milliseconds"); -DATA(insert OID = 3195 ( pg_stat_get_archiver PGNSP PGUID 12 1 0 0 0 f f f f f f s r 0 0 2249 "" "{20,25,1184,20,25,1184,1184}" "{o,o,o,o,o,o,o}" "{archived_count,last_archived_wal,last_archived_time,failed_count,last_failed_wal,last_failed_time,stats_reset}" _null_ _null_ pg_stat_get_archiver _null_ _null_ _null_ )); -DESCR("statistics: information about WAL archiver"); -DATA(insert OID = 2769 ( pg_stat_get_bgwriter_timed_checkpoints PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 20 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_bgwriter_timed_checkpoints _null_ _null_ _null_ )); -DESCR("statistics: number of timed checkpoints started by the bgwriter"); -DATA(insert OID = 2770 ( pg_stat_get_bgwriter_requested_checkpoints PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 20 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_bgwriter_requested_checkpoints _null_ _null_ _null_ )); -DESCR("statistics: number of backend requested checkpoints started by the bgwriter"); -DATA(insert OID = 2771 ( pg_stat_get_bgwriter_buf_written_checkpoints PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 20 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_bgwriter_buf_written_checkpoints _null_ _null_ _null_ )); -DESCR("statistics: number of buffers written by the bgwriter during checkpoints"); -DATA(insert OID = 2772 ( pg_stat_get_bgwriter_buf_written_clean PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 20 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_bgwriter_buf_written_clean _null_ _null_ _null_ )); -DESCR("statistics: number of buffers written by the bgwriter for cleaning dirty buffers"); -DATA(insert OID = 2773 ( pg_stat_get_bgwriter_maxwritten_clean PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 20 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_bgwriter_maxwritten_clean _null_ _null_ _null_ )); -DESCR("statistics: number of times the bgwriter stopped processing when it had written too many buffers while cleaning"); -DATA(insert OID = 3075 ( pg_stat_get_bgwriter_stat_reset_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_bgwriter_stat_reset_time _null_ _null_ _null_ )); -DESCR("statistics: last reset for the bgwriter"); -DATA(insert OID = 3160 ( pg_stat_get_checkpoint_write_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 701 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_checkpoint_write_time _null_ _null_ _null_ )); -DESCR("statistics: checkpoint time spent writing buffers to disk, in milliseconds"); -DATA(insert OID = 3161 ( pg_stat_get_checkpoint_sync_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 701 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_checkpoint_sync_time _null_ _null_ _null_ )); -DESCR("statistics: checkpoint time spent synchronizing buffers to disk, in milliseconds"); -DATA(insert OID = 2775 ( pg_stat_get_buf_written_backend PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 20 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_buf_written_backend _null_ _null_ _null_ )); -DESCR("statistics: number of buffers written by backends"); -DATA(insert OID = 3063 ( pg_stat_get_buf_fsync_backend PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 20 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_buf_fsync_backend _null_ _null_ _null_ )); -DESCR("statistics: number of backend buffer writes that did their own fsync"); -DATA(insert OID = 2859 ( pg_stat_get_buf_alloc PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 20 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_buf_alloc _null_ _null_ _null_ )); -DESCR("statistics: number of buffer allocations"); - -DATA(insert OID = 2978 ( pg_stat_get_function_calls PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_function_calls _null_ _null_ _null_ )); -DESCR("statistics: number of function calls"); -DATA(insert OID = 2979 ( pg_stat_get_function_total_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 701 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_function_total_time _null_ _null_ _null_ )); -DESCR("statistics: total execution time of function, in milliseconds"); -DATA(insert OID = 2980 ( pg_stat_get_function_self_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 701 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_function_self_time _null_ _null_ _null_ )); -DESCR("statistics: self execution time of function, in milliseconds"); - -DATA(insert OID = 3037 ( pg_stat_get_xact_numscans PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_xact_numscans _null_ _null_ _null_ )); -DESCR("statistics: number of scans done for table/index in current transaction"); -DATA(insert OID = 3038 ( pg_stat_get_xact_tuples_returned PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_xact_tuples_returned _null_ _null_ _null_ )); -DESCR("statistics: number of tuples read by seqscan in current transaction"); -DATA(insert OID = 3039 ( pg_stat_get_xact_tuples_fetched PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_xact_tuples_fetched _null_ _null_ _null_ )); -DESCR("statistics: number of tuples fetched by idxscan in current transaction"); -DATA(insert OID = 3040 ( pg_stat_get_xact_tuples_inserted PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_xact_tuples_inserted _null_ _null_ _null_ )); -DESCR("statistics: number of tuples inserted in current transaction"); -DATA(insert OID = 3041 ( pg_stat_get_xact_tuples_updated PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_xact_tuples_updated _null_ _null_ _null_ )); -DESCR("statistics: number of tuples updated in current transaction"); -DATA(insert OID = 3042 ( pg_stat_get_xact_tuples_deleted PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_xact_tuples_deleted _null_ _null_ _null_ )); -DESCR("statistics: number of tuples deleted in current transaction"); -DATA(insert OID = 3043 ( pg_stat_get_xact_tuples_hot_updated PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_xact_tuples_hot_updated _null_ _null_ _null_ )); -DESCR("statistics: number of tuples hot updated in current transaction"); -DATA(insert OID = 3044 ( pg_stat_get_xact_blocks_fetched PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_xact_blocks_fetched _null_ _null_ _null_ )); -DESCR("statistics: number of blocks fetched in current transaction"); -DATA(insert OID = 3045 ( pg_stat_get_xact_blocks_hit PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_xact_blocks_hit _null_ _null_ _null_ )); -DESCR("statistics: number of blocks found in cache in current transaction"); -DATA(insert OID = 3046 ( pg_stat_get_xact_function_calls PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_xact_function_calls _null_ _null_ _null_ )); -DESCR("statistics: number of function calls in current transaction"); -DATA(insert OID = 3047 ( pg_stat_get_xact_function_total_time PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 701 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_xact_function_total_time _null_ _null_ _null_ )); -DESCR("statistics: total execution time of function in current transaction, in milliseconds"); -DATA(insert OID = 3048 ( pg_stat_get_xact_function_self_time PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 701 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_xact_function_self_time _null_ _null_ _null_ )); -DESCR("statistics: self execution time of function in current transaction, in milliseconds"); - -DATA(insert OID = 3788 ( pg_stat_get_snapshot_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_snapshot_timestamp _null_ _null_ _null_ )); -DESCR("statistics: timestamp of the current statistics snapshot"); -DATA(insert OID = 2230 ( pg_stat_clear_snapshot PGNSP PGUID 12 1 0 0 0 f f f f f f v r 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_stat_clear_snapshot _null_ _null_ _null_ )); -DESCR("statistics: discard current transaction's statistics snapshot"); -DATA(insert OID = 2274 ( pg_stat_reset PGNSP PGUID 12 1 0 0 0 f f f f f f v s 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_stat_reset _null_ _null_ _null_ )); -DESCR("statistics: reset collected statistics for current database"); -DATA(insert OID = 3775 ( pg_stat_reset_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_shared _null_ _null_ _null_ )); -DESCR("statistics: reset collected statistics shared across the cluster"); -DATA(insert OID = 3776 ( pg_stat_reset_single_table_counters PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_single_table_counters _null_ _null_ _null_ )); -DESCR("statistics: reset collected statistics for a single table or index in the current database"); -DATA(insert OID = 3777 ( pg_stat_reset_single_function_counters PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_single_function_counters _null_ _null_ _null_ )); -DESCR("statistics: reset collected statistics for a single function in the current database"); - -DATA(insert OID = 3163 ( pg_trigger_depth PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 23 "" _null_ _null_ _null_ _null_ _null_ pg_trigger_depth _null_ _null_ _null_ )); -DESCR("current trigger depth"); - -DATA(insert OID = 3778 ( pg_tablespace_location PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_tablespace_location _null_ _null_ _null_ )); -DESCR("tablespace location"); - -DATA(insert OID = 1946 ( encode PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "17 25" _null_ _null_ _null_ _null_ _null_ binary_encode _null_ _null_ _null_ )); -DESCR("convert bytea value into some ascii-only text string"); -DATA(insert OID = 1947 ( decode PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 17 "25 25" _null_ _null_ _null_ _null_ _null_ binary_decode _null_ _null_ _null_ )); -DESCR("convert ascii-encoded text string into bytea value"); - -DATA(insert OID = 1948 ( byteaeq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "17 17" _null_ _null_ _null_ _null_ _null_ byteaeq _null_ _null_ _null_ )); -DATA(insert OID = 1949 ( bytealt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "17 17" _null_ _null_ _null_ _null_ _null_ bytealt _null_ _null_ _null_ )); -DATA(insert OID = 1950 ( byteale PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "17 17" _null_ _null_ _null_ _null_ _null_ byteale _null_ _null_ _null_ )); -DATA(insert OID = 1951 ( byteagt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "17 17" _null_ _null_ _null_ _null_ _null_ byteagt _null_ _null_ _null_ )); -DATA(insert OID = 1952 ( byteage PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "17 17" _null_ _null_ _null_ _null_ _null_ byteage _null_ _null_ _null_ )); -DATA(insert OID = 1953 ( byteane PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "17 17" _null_ _null_ _null_ _null_ _null_ byteane _null_ _null_ _null_ )); -DATA(insert OID = 1954 ( byteacmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "17 17" _null_ _null_ _null_ _null_ _null_ byteacmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3331 ( bytea_sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ bytea_sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); - -DATA(insert OID = 3917 ( timestamp_transform PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ timestamp_transform _null_ _null_ _null_ )); -DESCR("transform a timestamp length coercion"); -DATA(insert OID = 3944 ( time_transform PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ time_transform _null_ _null_ _null_ )); -DESCR("transform a time length coercion"); - -DATA(insert OID = 1961 ( timestamp PGNSP PGUID 12 1 0 0 timestamp_transform f f f f t f i s 2 0 1114 "1114 23" _null_ _null_ _null_ _null_ _null_ timestamp_scale _null_ _null_ _null_ )); -DESCR("adjust timestamp precision"); - -DATA(insert OID = 1965 ( oidlarger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 26 "26 26" _null_ _null_ _null_ _null_ _null_ oidlarger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 1966 ( oidsmaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 26 "26 26" _null_ _null_ _null_ _null_ _null_ oidsmaller _null_ _null_ _null_ )); -DESCR("smaller of two"); - -DATA(insert OID = 1967 ( timestamptz PGNSP PGUID 12 1 0 0 timestamp_transform f f f f t f i s 2 0 1184 "1184 23" _null_ _null_ _null_ _null_ _null_ timestamptz_scale _null_ _null_ _null_ )); -DESCR("adjust timestamptz precision"); -DATA(insert OID = 1968 ( time PGNSP PGUID 12 1 0 0 time_transform f f f f t f i s 2 0 1083 "1083 23" _null_ _null_ _null_ _null_ _null_ time_scale _null_ _null_ _null_ )); -DESCR("adjust time precision"); -DATA(insert OID = 1969 ( timetz PGNSP PGUID 12 1 0 0 time_transform f f f f t f i s 2 0 1266 "1266 23" _null_ _null_ _null_ _null_ _null_ timetz_scale _null_ _null_ _null_ )); -DESCR("adjust time with time zone precision"); - -DATA(insert OID = 2003 ( textanycat PGNSP PGUID 14 1 0 0 0 f f f f t f s s 2 0 25 "25 2776" _null_ _null_ _null_ _null_ _null_ "select $1 || $2::pg_catalog.text" _null_ _null_ _null_ )); -DATA(insert OID = 2004 ( anytextcat PGNSP PGUID 14 1 0 0 0 f f f f t f s s 2 0 25 "2776 25" _null_ _null_ _null_ _null_ _null_ "select $1::pg_catalog.text || $2" _null_ _null_ _null_ )); - -DATA(insert OID = 2005 ( bytealike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "17 17" _null_ _null_ _null_ _null_ _null_ bytealike _null_ _null_ _null_ )); -DATA(insert OID = 2006 ( byteanlike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "17 17" _null_ _null_ _null_ _null_ _null_ byteanlike _null_ _null_ _null_ )); -DATA(insert OID = 2007 ( like PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "17 17" _null_ _null_ _null_ _null_ _null_ bytealike _null_ _null_ _null_ )); -DESCR("matches LIKE expression"); -DATA(insert OID = 2008 ( notlike PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "17 17" _null_ _null_ _null_ _null_ _null_ byteanlike _null_ _null_ _null_ )); -DESCR("does not match LIKE expression"); -DATA(insert OID = 2009 ( like_escape PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 17 "17 17" _null_ _null_ _null_ _null_ _null_ like_escape_bytea _null_ _null_ _null_ )); -DESCR("convert LIKE pattern to use backslash escapes"); -DATA(insert OID = 2010 ( length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "17" _null_ _null_ _null_ _null_ _null_ byteaoctetlen _null_ _null_ _null_ )); -DESCR("octet length"); -DATA(insert OID = 2011 ( byteacat PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 17 "17 17" _null_ _null_ _null_ _null_ _null_ byteacat _null_ _null_ _null_ )); -DATA(insert OID = 2012 ( substring PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 17 "17 23 23" _null_ _null_ _null_ _null_ _null_ bytea_substr _null_ _null_ _null_ )); -DESCR("extract portion of string"); -DATA(insert OID = 2013 ( substring PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 17 "17 23" _null_ _null_ _null_ _null_ _null_ bytea_substr_no_len _null_ _null_ _null_ )); -DESCR("extract portion of string"); -DATA(insert OID = 2085 ( substr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 17 "17 23 23" _null_ _null_ _null_ _null_ _null_ bytea_substr _null_ _null_ _null_ )); -DESCR("extract portion of string"); -DATA(insert OID = 2086 ( substr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 17 "17 23" _null_ _null_ _null_ _null_ _null_ bytea_substr_no_len _null_ _null_ _null_ )); -DESCR("extract portion of string"); -DATA(insert OID = 2014 ( position PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "17 17" _null_ _null_ _null_ _null_ _null_ byteapos _null_ _null_ _null_ )); -DESCR("position of substring"); -DATA(insert OID = 2015 ( btrim PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 17 "17 17" _null_ _null_ _null_ _null_ _null_ byteatrim _null_ _null_ _null_ )); -DESCR("trim both ends of string"); - -DATA(insert OID = 2019 ( time PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1083 "1184" _null_ _null_ _null_ _null_ _null_ timestamptz_time _null_ _null_ _null_ )); -DESCR("convert timestamp with time zone to time"); -DATA(insert OID = 2020 ( date_trunc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1114 "25 1114" _null_ _null_ _null_ _null_ _null_ timestamp_trunc _null_ _null_ _null_ )); -DESCR("truncate timestamp to specified units"); -DATA(insert OID = 2021 ( date_part PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "25 1114" _null_ _null_ _null_ _null_ _null_ timestamp_part _null_ _null_ _null_ )); -DESCR("extract field from timestamp"); -DATA(insert OID = 2023 ( timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1114 "702" _null_ _null_ _null_ _null_ _null_ abstime_timestamp _null_ _null_ _null_ )); -DESCR("convert abstime to timestamp"); -DATA(insert OID = 2024 ( timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1114 "1082" _null_ _null_ _null_ _null_ _null_ date_timestamp _null_ _null_ _null_ )); -DESCR("convert date to timestamp"); -DATA(insert OID = 2025 ( timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1114 "1082 1083" _null_ _null_ _null_ _null_ _null_ datetime_timestamp _null_ _null_ _null_ )); -DESCR("convert date and time to timestamp"); -DATA(insert OID = 2027 ( timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1114 "1184" _null_ _null_ _null_ _null_ _null_ timestamptz_timestamp _null_ _null_ _null_ )); -DESCR("convert timestamp with time zone to timestamp"); -DATA(insert OID = 2028 ( timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1184 "1114" _null_ _null_ _null_ _null_ _null_ timestamp_timestamptz _null_ _null_ _null_ )); -DESCR("convert timestamp to timestamp with time zone"); -DATA(insert OID = 2029 ( date PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1082 "1114" _null_ _null_ _null_ _null_ _null_ timestamp_date _null_ _null_ _null_ )); -DESCR("convert timestamp to date"); -DATA(insert OID = 2030 ( abstime PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 702 "1114" _null_ _null_ _null_ _null_ _null_ timestamp_abstime _null_ _null_ _null_ )); -DESCR("convert timestamp to abstime"); -DATA(insert OID = 2031 ( timestamp_mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "1114 1114" _null_ _null_ _null_ _null_ _null_ timestamp_mi _null_ _null_ _null_ )); -DATA(insert OID = 2032 ( timestamp_pl_interval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1114 "1114 1186" _null_ _null_ _null_ _null_ _null_ timestamp_pl_interval _null_ _null_ _null_ )); -DATA(insert OID = 2033 ( timestamp_mi_interval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1114 "1114 1186" _null_ _null_ _null_ _null_ _null_ timestamp_mi_interval _null_ _null_ _null_ )); -DATA(insert OID = 2035 ( timestamp_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1114 "1114 1114" _null_ _null_ _null_ _null_ _null_ timestamp_smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); -DATA(insert OID = 2036 ( timestamp_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1114 "1114 1114" _null_ _null_ _null_ _null_ _null_ timestamp_larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 2037 ( timezone PGNSP PGUID 12 1 0 0 0 f f f f t f v s 2 0 1266 "25 1266" _null_ _null_ _null_ _null_ _null_ timetz_zone _null_ _null_ _null_ )); -DESCR("adjust time with time zone to new zone"); -DATA(insert OID = 2038 ( timezone PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1266 "1186 1266" _null_ _null_ _null_ _null_ _null_ timetz_izone _null_ _null_ _null_ )); -DESCR("adjust time with time zone to new zone"); -DATA(insert OID = 2039 ( timestamp_hash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1114" _null_ _null_ _null_ _null_ _null_ timestamp_hash _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 2041 ( overlaps PGNSP PGUID 12 1 0 0 0 f f f f f f i s 4 0 16 "1114 1114 1114 1114" _null_ _null_ _null_ _null_ _null_ overlaps_timestamp _null_ _null_ _null_ )); -DESCR("intervals overlap?"); -DATA(insert OID = 2042 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1186 1114 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ )); -DESCR("intervals overlap?"); -DATA(insert OID = 2043 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1114 1114 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, $2) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ )); -DESCR("intervals overlap?"); -DATA(insert OID = 2044 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1186 1114 1114" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" _null_ _null_ _null_ )); -DESCR("intervals overlap?"); -DATA(insert OID = 2045 ( timestamp_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1114 1114" _null_ _null_ _null_ _null_ _null_ timestamp_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3137 ( timestamp_sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ timestamp_sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); -DATA(insert OID = 2046 ( time PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1083 "1266" _null_ _null_ _null_ _null_ _null_ timetz_time _null_ _null_ _null_ )); -DESCR("convert time with time zone to time"); -DATA(insert OID = 2047 ( timetz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1266 "1083" _null_ _null_ _null_ _null_ _null_ time_timetz _null_ _null_ _null_ )); -DESCR("convert time to time with time zone"); -DATA(insert OID = 2048 ( isfinite PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "1114" _null_ _null_ _null_ _null_ _null_ timestamp_finite _null_ _null_ _null_ )); -DESCR("finite timestamp?"); -DATA(insert OID = 2049 ( to_char PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "1114 25" _null_ _null_ _null_ _null_ _null_ timestamp_to_char _null_ _null_ _null_ )); -DESCR("format timestamp to text"); -DATA(insert OID = 2052 ( timestamp_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1114 1114" _null_ _null_ _null_ _null_ _null_ timestamp_eq _null_ _null_ _null_ )); -DATA(insert OID = 2053 ( timestamp_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1114 1114" _null_ _null_ _null_ _null_ _null_ timestamp_ne _null_ _null_ _null_ )); -DATA(insert OID = 2054 ( timestamp_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1114 1114" _null_ _null_ _null_ _null_ _null_ timestamp_lt _null_ _null_ _null_ )); -DATA(insert OID = 2055 ( timestamp_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1114 1114" _null_ _null_ _null_ _null_ _null_ timestamp_le _null_ _null_ _null_ )); -DATA(insert OID = 2056 ( timestamp_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1114 1114" _null_ _null_ _null_ _null_ _null_ timestamp_ge _null_ _null_ _null_ )); -DATA(insert OID = 2057 ( timestamp_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1114 1114" _null_ _null_ _null_ _null_ _null_ timestamp_gt _null_ _null_ _null_ )); -DATA(insert OID = 2058 ( age PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "1114 1114" _null_ _null_ _null_ _null_ _null_ timestamp_age _null_ _null_ _null_ )); -DESCR("date difference preserving months and years"); -DATA(insert OID = 2059 ( age PGNSP PGUID 14 1 0 0 0 f f f f t f s s 1 0 1186 "1114" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.age(cast(current_date as timestamp without time zone), $1)" _null_ _null_ _null_ )); -DESCR("date difference from today preserving months and years"); - -DATA(insert OID = 2069 ( timezone PGNSP PGUID 12 1 0 0 timestamp_zone_transform f f f f t f i s 2 0 1184 "25 1114" _null_ _null_ _null_ _null_ _null_ timestamp_zone _null_ _null_ _null_ )); -DESCR("adjust timestamp to new time zone"); -DATA(insert OID = 2070 ( timezone PGNSP PGUID 12 1 0 0 timestamp_izone_transform f f f f t f i s 2 0 1184 "1186 1114" _null_ _null_ _null_ _null_ _null_ timestamp_izone _null_ _null_ _null_ )); -DESCR("adjust timestamp to new time zone"); -DATA(insert OID = 2071 ( date_pl_interval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1114 "1082 1186" _null_ _null_ _null_ _null_ _null_ date_pl_interval _null_ _null_ _null_ )); -DATA(insert OID = 2072 ( date_mi_interval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1114 "1082 1186" _null_ _null_ _null_ _null_ _null_ date_mi_interval _null_ _null_ _null_ )); - -DATA(insert OID = 2073 ( substring PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ textregexsubstr _null_ _null_ _null_ )); -DESCR("extract text matching regular expression"); -DATA(insert OID = 2074 ( substring PGNSP PGUID 14 1 0 0 0 f f f f t f i s 3 0 25 "25 25 25" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.substring($1, pg_catalog.similar_escape($2, $3))" _null_ _null_ _null_ )); -DESCR("extract text matching SQL99 regular expression"); - -DATA(insert OID = 2075 ( bit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1560 "20 23" _null_ _null_ _null_ _null_ _null_ bitfromint8 _null_ _null_ _null_ )); -DESCR("convert int8 to bitstring"); -DATA(insert OID = 2076 ( int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "1560" _null_ _null_ _null_ _null_ _null_ bittoint8 _null_ _null_ _null_ )); -DESCR("convert bitstring to int8"); - -DATA(insert OID = 2077 ( current_setting PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ show_config_by_name _null_ _null_ _null_ )); -DESCR("SHOW X as a function"); -DATA(insert OID = 3294 ( current_setting PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "25 16" _null_ _null_ _null_ _null_ _null_ show_config_by_name_missing_ok _null_ _null_ _null_ )); -DESCR("SHOW X as a function, optionally no error for missing variable"); -DATA(insert OID = 2078 ( set_config PGNSP PGUID 12 1 0 0 0 f f f f f f v u 3 0 25 "25 25 16" _null_ _null_ _null_ _null_ _null_ set_config_by_name _null_ _null_ _null_ )); -DESCR("SET X as a function"); -DATA(insert OID = 2084 ( pg_show_all_settings PGNSP PGUID 12 1 1000 0 0 f f f f t t s s 0 0 2249 "" "{25,25,25,25,25,25,25,25,25,25,25,1009,25,25,25,23,16}" "{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{name,setting,unit,category,short_desc,extra_desc,context,vartype,source,min_val,max_val,enumvals,boot_val,reset_val,sourcefile,sourceline,pending_restart}" _null_ _null_ show_all_settings _null_ _null_ _null_ )); -DESCR("SHOW ALL as a function"); -DATA(insert OID = 3329 ( pg_show_all_file_settings PGNSP PGUID 12 1 1000 0 0 f f f f t t v s 0 0 2249 "" "{25,23,23,25,25,16,25}" "{o,o,o,o,o,o,o}" "{sourcefile,sourceline,seqno,name,setting,applied,error}" _null_ _null_ show_all_file_settings _null_ _null_ _null_ )); -DESCR("show config file settings"); -DATA(insert OID = 3401 ( pg_hba_file_rules PGNSP PGUID 12 1 1000 0 0 f f f f t t v s 0 0 2249 "" "{23,25,1009,1009,25,25,25,1009,25}" "{o,o,o,o,o,o,o,o,o}" "{line_number,type,database,user_name,address,netmask,auth_method,options,error}" _null_ _null_ pg_hba_file_rules _null_ _null_ _null_ )); -DESCR("show pg_hba.conf rules"); -DATA(insert OID = 1371 ( pg_lock_status PGNSP PGUID 12 1 1000 0 0 f f f f t t v s 0 0 2249 "" "{25,26,26,23,21,25,28,26,26,21,25,23,25,16,16}" "{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{locktype,database,relation,page,tuple,virtualxid,transactionid,classid,objid,objsubid,virtualtransaction,pid,mode,granted,fastpath}" _null_ _null_ pg_lock_status _null_ _null_ _null_ )); -DESCR("view system lock information"); -DATA(insert OID = 2561 ( pg_blocking_pids PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 1007 "23" _null_ _null_ _null_ _null_ _null_ pg_blocking_pids _null_ _null_ _null_ )); -DESCR("get array of PIDs of sessions blocking specified backend PID from acquiring a heavyweight lock"); -DATA(insert OID = 3376 ( pg_safe_snapshot_blocking_pids PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 1007 "23" _null_ _null_ _null_ _null_ _null_ pg_safe_snapshot_blocking_pids _null_ _null_ _null_ )); -DESCR("get array of PIDs of sessions blocking specified backend PID from acquiring a safe snapshot"); -DATA(insert OID = 3378 ( pg_isolation_test_session_is_blocked PGNSP PGUID 12 1 0 0 0 f f f f t f v s 2 0 16 "23 1007" _null_ _null_ _null_ _null_ _null_ pg_isolation_test_session_is_blocked _null_ _null_ _null_ )); -DESCR("isolationtester support function"); -DATA(insert OID = 1065 ( pg_prepared_xact PGNSP PGUID 12 1 1000 0 0 f f f f t t v s 0 0 2249 "" "{28,25,1184,26,26}" "{o,o,o,o,o}" "{transaction,gid,prepared,ownerid,dbid}" _null_ _null_ pg_prepared_xact _null_ _null_ _null_ )); -DESCR("view two-phase transactions"); -DATA(insert OID = 3819 ( pg_get_multixact_members PGNSP PGUID 12 1 1000 0 0 f f f f t t v s 1 0 2249 "28" "{28,28,25}" "{i,o,o}" "{multixid,xid,mode}" _null_ _null_ pg_get_multixact_members _null_ _null_ _null_ )); -DESCR("view members of a multixactid"); - -DATA(insert OID = 3581 ( pg_xact_commit_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 1184 "28" _null_ _null_ _null_ _null_ _null_ pg_xact_commit_timestamp _null_ _null_ _null_ )); -DESCR("get commit timestamp of a transaction"); - -DATA(insert OID = 3583 ( pg_last_committed_xact PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{28,1184}" "{o,o}" "{xid,timestamp}" _null_ _null_ pg_last_committed_xact _null_ _null_ _null_ )); -DESCR("get transaction Id and commit timestamp of latest transaction commit"); - -DATA(insert OID = 3537 ( pg_describe_object PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 25 "26 26 23" _null_ _null_ _null_ _null_ _null_ pg_describe_object _null_ _null_ _null_ )); -DESCR("get identification of SQL object"); - -DATA(insert OID = 3839 ( pg_identify_object PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 2249 "26 26 23" "{26,26,23,25,25,25,25}" "{i,i,i,o,o,o,o}" "{classid,objid,objsubid,type,schema,name,identity}" _null_ _null_ pg_identify_object _null_ _null_ _null_ )); -DESCR("get machine-parseable identification of SQL object"); - -DATA(insert OID = 3382 ( pg_identify_object_as_address PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 2249 "26 26 23" "{26,26,23,25,1009,1009}" "{i,i,i,o,o,o}" "{classid,objid,objsubid,type,object_names,object_args}" _null_ _null_ pg_identify_object_as_address _null_ _null_ _null_ )); -DESCR("get identification of SQL object for pg_get_object_address()"); - -DATA(insert OID = 3954 ( pg_get_object_address PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 2249 "25 1009 1009" "{25,1009,1009,26,26,23}" "{i,i,i,o,o,o}" "{type,name,args,classid,objid,objsubid}" _null_ _null_ pg_get_object_address _null_ _null_ _null_ )); -DESCR("get OID-based object address from name/args arrays"); - -DATA(insert OID = 2079 ( pg_table_is_visible PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_table_is_visible _null_ _null_ _null_ )); -DESCR("is table visible in search path?"); -DATA(insert OID = 2080 ( pg_type_is_visible PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_type_is_visible _null_ _null_ _null_ )); -DESCR("is type visible in search path?"); -DATA(insert OID = 2081 ( pg_function_is_visible PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_function_is_visible _null_ _null_ _null_ )); -DESCR("is function visible in search path?"); -DATA(insert OID = 2082 ( pg_operator_is_visible PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_operator_is_visible _null_ _null_ _null_ )); -DESCR("is operator visible in search path?"); -DATA(insert OID = 2083 ( pg_opclass_is_visible PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_opclass_is_visible _null_ _null_ _null_ )); -DESCR("is opclass visible in search path?"); -DATA(insert OID = 3829 ( pg_opfamily_is_visible PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_opfamily_is_visible _null_ _null_ _null_ )); -DESCR("is opfamily visible in search path?"); -DATA(insert OID = 2093 ( pg_conversion_is_visible PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_conversion_is_visible _null_ _null_ _null_ )); -DESCR("is conversion visible in search path?"); -DATA(insert OID = 3403 ( pg_statistics_obj_is_visible PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_statistics_obj_is_visible _null_ _null_ _null_ )); -DESCR("is statistics object visible in search path?"); -DATA(insert OID = 3756 ( pg_ts_parser_is_visible PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_ts_parser_is_visible _null_ _null_ _null_ )); -DESCR("is text search parser visible in search path?"); -DATA(insert OID = 3757 ( pg_ts_dict_is_visible PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_ts_dict_is_visible _null_ _null_ _null_ )); -DESCR("is text search dictionary visible in search path?"); -DATA(insert OID = 3768 ( pg_ts_template_is_visible PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_ts_template_is_visible _null_ _null_ _null_ )); -DESCR("is text search template visible in search path?"); -DATA(insert OID = 3758 ( pg_ts_config_is_visible PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_ts_config_is_visible _null_ _null_ _null_ )); -DESCR("is text search configuration visible in search path?"); -DATA(insert OID = 3815 ( pg_collation_is_visible PGNSP PGUID 12 10 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_collation_is_visible _null_ _null_ _null_ )); -DESCR("is collation visible in search path?"); - -DATA(insert OID = 2854 ( pg_my_temp_schema PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 26 "" _null_ _null_ _null_ _null_ _null_ pg_my_temp_schema _null_ _null_ _null_ )); -DESCR("get OID of current session's temp schema, if any"); -DATA(insert OID = 2855 ( pg_is_other_temp_schema PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ pg_is_other_temp_schema _null_ _null_ _null_ )); -DESCR("is schema another session's temp schema?"); - -DATA(insert OID = 2171 ( pg_cancel_backend PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 16 "23" _null_ _null_ _null_ _null_ _null_ pg_cancel_backend _null_ _null_ _null_ )); -DESCR("cancel a server process' current query"); -DATA(insert OID = 2096 ( pg_terminate_backend PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 16 "23" _null_ _null_ _null_ _null_ _null_ pg_terminate_backend _null_ _null_ _null_ )); -DESCR("terminate a server process"); -DATA(insert OID = 2172 ( pg_start_backup PGNSP PGUID 12 1 0 0 0 f f f f t f v r 3 0 3220 "25 16 16" _null_ _null_ _null_ _null_ _null_ pg_start_backup _null_ _null_ _null_ )); -DESCR("prepare for taking an online backup"); -DATA(insert OID = 2173 ( pg_stop_backup PGNSP PGUID 12 1 0 0 0 f f f f t f v r 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_stop_backup _null_ _null_ _null_ )); -DESCR("finish taking an online backup"); -DATA(insert OID = 2739 ( pg_stop_backup PGNSP PGUID 12 1 1 0 0 f f f f t t v r 2 0 2249 "16 16" "{16,16,3220,25,25}" "{i,i,o,o,o}" "{exclusive,wait_for_archive,lsn,labelfile,spcmapfile}" _null_ _null_ pg_stop_backup_v2 _null_ _null_ _null_ )); -DESCR("finish taking an online backup"); -DATA(insert OID = 3813 ( pg_is_in_backup PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_is_in_backup _null_ _null_ _null_ )); -DESCR("true if server is in online backup"); -DATA(insert OID = 3814 ( pg_backup_start_time PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ pg_backup_start_time _null_ _null_ _null_ )); -DESCR("start time of an online backup"); -DATA(insert OID = 2848 ( pg_switch_wal PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_switch_wal _null_ _null_ _null_ )); -DESCR("switch to new wal file"); -DATA(insert OID = 3098 ( pg_create_restore_point PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 3220 "25" _null_ _null_ _null_ _null_ _null_ pg_create_restore_point _null_ _null_ _null_ )); -DESCR("create a named restore point"); -DATA(insert OID = 2849 ( pg_current_wal_lsn PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_current_wal_lsn _null_ _null_ _null_ )); -DESCR("current wal write location"); -DATA(insert OID = 2852 ( pg_current_wal_insert_lsn PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_current_wal_insert_lsn _null_ _null_ _null_ )); -DESCR("current wal insert location"); -DATA(insert OID = 3330 ( pg_current_wal_flush_lsn PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_current_wal_flush_lsn _null_ _null_ _null_ )); -DESCR("current wal flush location"); -DATA(insert OID = 2850 ( pg_walfile_name_offset PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2249 "3220" "{3220,25,23}" "{i,o,o}" "{lsn,file_name,file_offset}" _null_ _null_ pg_walfile_name_offset _null_ _null_ _null_ )); -DESCR("wal filename and byte offset, given a wal location"); -DATA(insert OID = 2851 ( pg_walfile_name PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "3220" _null_ _null_ _null_ _null_ _null_ pg_walfile_name _null_ _null_ _null_ )); -DESCR("wal filename, given a wal location"); - -DATA(insert OID = 3165 ( pg_wal_lsn_diff PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "3220 3220" _null_ _null_ _null_ _null_ _null_ pg_wal_lsn_diff _null_ _null_ _null_ )); -DESCR("difference in bytes, given two wal locations"); - -DATA(insert OID = 3809 ( pg_export_snapshot PGNSP PGUID 12 1 0 0 0 f f f f t f v u 0 0 25 "" _null_ _null_ _null_ _null_ _null_ pg_export_snapshot _null_ _null_ _null_ )); -DESCR("export a snapshot"); - -DATA(insert OID = 3810 ( pg_is_in_recovery PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_is_in_recovery _null_ _null_ _null_ )); -DESCR("true if server is in recovery"); - -DATA(insert OID = 3820 ( pg_last_wal_receive_lsn PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_last_wal_receive_lsn _null_ _null_ _null_ )); -DESCR("current wal flush location"); -DATA(insert OID = 3821 ( pg_last_wal_replay_lsn PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_last_wal_replay_lsn _null_ _null_ _null_ )); -DESCR("last wal replay location"); -DATA(insert OID = 3830 ( pg_last_xact_replay_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ pg_last_xact_replay_timestamp _null_ _null_ _null_ )); -DESCR("timestamp of last replay xact"); - -DATA(insert OID = 3071 ( pg_wal_replay_pause PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_wal_replay_pause _null_ _null_ _null_ )); -DESCR("pause wal replay"); -DATA(insert OID = 3072 ( pg_wal_replay_resume PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_wal_replay_resume _null_ _null_ _null_ )); -DESCR("resume wal replay, if it was paused"); -DATA(insert OID = 3073 ( pg_is_wal_replay_paused PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_is_wal_replay_paused _null_ _null_ _null_ )); -DESCR("true if wal replay is paused"); - -DATA(insert OID = 2621 ( pg_reload_conf PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_reload_conf _null_ _null_ _null_ )); -DESCR("reload configuration files"); -DATA(insert OID = 2622 ( pg_rotate_logfile PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_rotate_logfile _null_ _null_ _null_ )); -DESCR("rotate log file"); -DATA(insert OID = 3800 ( pg_current_logfile PGNSP PGUID 12 1 0 0 0 f f f f f f v s 0 0 25 "" _null_ _null_ _null_ _null_ _null_ pg_current_logfile _null_ _null_ _null_ )); -DESCR("current logging collector file location"); -DATA(insert OID = 3801 ( pg_current_logfile PGNSP PGUID 12 1 0 0 0 f f f f f f v s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ pg_current_logfile_1arg _null_ _null_ _null_ )); -DESCR("current logging collector file location"); - -DATA(insert OID = 2623 ( pg_stat_file PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2249 "25" "{25,20,1184,1184,1184,1184,16}" "{i,o,o,o,o,o,o}" "{filename,size,access,modification,change,creation,isdir}" _null_ _null_ pg_stat_file_1arg _null_ _null_ _null_ )); -DESCR("get information about file"); -DATA(insert OID = 3307 ( pg_stat_file PGNSP PGUID 12 1 0 0 0 f f f f t f v s 2 0 2249 "25 16" "{25,16,20,1184,1184,1184,1184,16}" "{i,i,o,o,o,o,o,o}" "{filename,missing_ok,size,access,modification,change,creation,isdir}" _null_ _null_ pg_stat_file _null_ _null_ _null_ )); -DESCR("get information about file"); -DATA(insert OID = 2624 ( pg_read_file PGNSP PGUID 12 1 0 0 0 f f f f t f v s 3 0 25 "25 20 20" _null_ _null_ _null_ _null_ _null_ pg_read_file_off_len _null_ _null_ _null_ )); -DESCR("read text from a file"); -DATA(insert OID = 3293 ( pg_read_file PGNSP PGUID 12 1 0 0 0 f f f f t f v s 4 0 25 "25 20 20 16" _null_ _null_ _null_ _null_ _null_ pg_read_file _null_ _null_ _null_ )); -DESCR("read text from a file"); -DATA(insert OID = 3826 ( pg_read_file PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ pg_read_file_all _null_ _null_ _null_ )); -DESCR("read text from a file"); -DATA(insert OID = 3827 ( pg_read_binary_file PGNSP PGUID 12 1 0 0 0 f f f f t f v s 3 0 17 "25 20 20" _null_ _null_ _null_ _null_ _null_ pg_read_binary_file_off_len _null_ _null_ _null_ )); -DESCR("read bytea from a file"); -DATA(insert OID = 3295 ( pg_read_binary_file PGNSP PGUID 12 1 0 0 0 f f f f t f v s 4 0 17 "25 20 20 16" _null_ _null_ _null_ _null_ _null_ pg_read_binary_file _null_ _null_ _null_ )); -DESCR("read bytea from a file"); -DATA(insert OID = 3828 ( pg_read_binary_file PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 17 "25" _null_ _null_ _null_ _null_ _null_ pg_read_binary_file_all _null_ _null_ _null_ )); -DESCR("read bytea from a file"); -DATA(insert OID = 2625 ( pg_ls_dir PGNSP PGUID 12 1 1000 0 0 f f f f t t v s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ pg_ls_dir_1arg _null_ _null_ _null_ )); -DESCR("list all files in a directory"); -DATA(insert OID = 3297 ( pg_ls_dir PGNSP PGUID 12 1 1000 0 0 f f f f t t v s 3 0 25 "25 16 16" _null_ _null_ _null_ _null_ _null_ pg_ls_dir _null_ _null_ _null_ )); -DESCR("list all files in a directory"); -DATA(insert OID = 2626 ( pg_sleep PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "701" _null_ _null_ _null_ _null_ _null_ pg_sleep _null_ _null_ _null_ )); -DESCR("sleep for the specified time in seconds"); -DATA(insert OID = 3935 ( pg_sleep_for PGNSP PGUID 14 1 0 0 0 f f f f t f v s 1 0 2278 "1186" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.pg_sleep(extract(epoch from pg_catalog.clock_timestamp() operator(pg_catalog.+) $1) operator(pg_catalog.-) extract(epoch from pg_catalog.clock_timestamp()))" _null_ _null_ _null_ )); -DESCR("sleep for the specified interval"); -DATA(insert OID = 3936 ( pg_sleep_until PGNSP PGUID 14 1 0 0 0 f f f f t f v s 1 0 2278 "1184" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.pg_sleep(extract(epoch from $1) operator(pg_catalog.-) extract(epoch from pg_catalog.clock_timestamp()))" _null_ _null_ _null_ )); -DESCR("sleep until the specified time"); - -DATA(insert OID = 2971 ( text PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "16" _null_ _null_ _null_ _null_ _null_ booltext _null_ _null_ _null_ )); -DESCR("convert boolean to text"); - -/* Aggregates (moved here from pg_aggregate for 7.3) */ - -DATA(insert OID = 2100 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("the average (arithmetic mean) as numeric of all bigint values"); -DATA(insert OID = 2101 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("the average (arithmetic mean) as numeric of all integer values"); -DATA(insert OID = 2102 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("the average (arithmetic mean) as numeric of all smallint values"); -DATA(insert OID = 2103 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("the average (arithmetic mean) as numeric of all numeric values"); -DATA(insert OID = 2104 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("the average (arithmetic mean) as float8 of all float4 values"); -DATA(insert OID = 2105 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("the average (arithmetic mean) as float8 of all float8 values"); -DATA(insert OID = 2106 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1186 "1186" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("the average (arithmetic mean) as interval of all interval values"); - -DATA(insert OID = 2107 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sum as numeric across all bigint input values"); -DATA(insert OID = 2108 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sum as bigint across all integer input values"); -DATA(insert OID = 2109 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sum as bigint across all smallint input values"); -DATA(insert OID = 2110 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sum as float4 across all float4 input values"); -DATA(insert OID = 2111 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sum as float8 across all float8 input values"); -DATA(insert OID = 2112 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sum as money across all money input values"); -DATA(insert OID = 2113 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1186 "1186" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sum as interval across all interval input values"); -DATA(insert OID = 2114 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sum as numeric across all numeric input values"); - -DATA(insert OID = 2115 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all bigint input values"); -DATA(insert OID = 2116 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all integer input values"); -DATA(insert OID = 2117 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 21 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all smallint input values"); -DATA(insert OID = 2118 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 26 "26" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all oid input values"); -DATA(insert OID = 2119 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all float4 input values"); -DATA(insert OID = 2120 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all float8 input values"); -DATA(insert OID = 2121 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 702 "702" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all abstime input values"); -DATA(insert OID = 2122 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1082 "1082" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all date input values"); -DATA(insert OID = 2123 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1083 "1083" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all time input values"); -DATA(insert OID = 2124 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1266 "1266" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all time with time zone input values"); -DATA(insert OID = 2125 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all money input values"); -DATA(insert OID = 2126 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1114 "1114" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all timestamp input values"); -DATA(insert OID = 2127 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1184 "1184" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all timestamp with time zone input values"); -DATA(insert OID = 2128 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1186 "1186" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all interval input values"); -DATA(insert OID = 2129 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all text input values"); -DATA(insert OID = 2130 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all numeric input values"); -DATA(insert OID = 2050 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 2277 "2277" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all anyarray input values"); -DATA(insert OID = 2244 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1042 "1042" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all bpchar input values"); -DATA(insert OID = 2797 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 27 "27" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all tid input values"); -DATA(insert OID = 3564 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 869 "869" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all inet input values"); - -DATA(insert OID = 2131 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all bigint input values"); -DATA(insert OID = 2132 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all integer input values"); -DATA(insert OID = 2133 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 21 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all smallint input values"); -DATA(insert OID = 2134 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 26 "26" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all oid input values"); -DATA(insert OID = 2135 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all float4 input values"); -DATA(insert OID = 2136 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all float8 input values"); -DATA(insert OID = 2137 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 702 "702" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all abstime input values"); -DATA(insert OID = 2138 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1082 "1082" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all date input values"); -DATA(insert OID = 2139 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1083 "1083" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all time input values"); -DATA(insert OID = 2140 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1266 "1266" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all time with time zone input values"); -DATA(insert OID = 2141 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all money input values"); -DATA(insert OID = 2142 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1114 "1114" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all timestamp input values"); -DATA(insert OID = 2143 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1184 "1184" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all timestamp with time zone input values"); -DATA(insert OID = 2144 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1186 "1186" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all interval input values"); -DATA(insert OID = 2145 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all text values"); -DATA(insert OID = 2146 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all numeric input values"); -DATA(insert OID = 2051 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 2277 "2277" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all anyarray input values"); -DATA(insert OID = 2245 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1042 "1042" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all bpchar input values"); -DATA(insert OID = 2798 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 27 "27" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all tid input values"); -DATA(insert OID = 3565 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 869 "869" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all inet input values"); - -/* count has two forms: count(any) and count(*) */ -DATA(insert OID = 2147 ( count PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "2276" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("number of input rows for which the input expression is not null"); -DATA(insert OID = 2803 ( count PGNSP PGUID 12 1 0 0 0 t f f f f f i s 0 0 20 "" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("number of input rows"); - -DATA(insert OID = 2718 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("population variance of bigint input values (square of the population standard deviation)"); -DATA(insert OID = 2719 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("population variance of integer input values (square of the population standard deviation)"); -DATA(insert OID = 2720 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("population variance of smallint input values (square of the population standard deviation)"); -DATA(insert OID = 2721 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("population variance of float4 input values (square of the population standard deviation)"); -DATA(insert OID = 2722 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("population variance of float8 input values (square of the population standard deviation)"); -DATA(insert OID = 2723 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("population variance of numeric input values (square of the population standard deviation)"); - -DATA(insert OID = 2641 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sample variance of bigint input values (square of the sample standard deviation)"); -DATA(insert OID = 2642 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sample variance of integer input values (square of the sample standard deviation)"); -DATA(insert OID = 2643 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sample variance of smallint input values (square of the sample standard deviation)"); -DATA(insert OID = 2644 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sample variance of float4 input values (square of the sample standard deviation)"); - -DATA(insert OID = 2645 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sample variance of float8 input values (square of the sample standard deviation)"); -DATA(insert OID = 2646 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sample variance of numeric input values (square of the sample standard deviation)"); - -DATA(insert OID = 2148 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("historical alias for var_samp"); -DATA(insert OID = 2149 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("historical alias for var_samp"); -DATA(insert OID = 2150 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("historical alias for var_samp"); -DATA(insert OID = 2151 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("historical alias for var_samp"); -DATA(insert OID = 2152 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("historical alias for var_samp"); -DATA(insert OID = 2153 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("historical alias for var_samp"); - -DATA(insert OID = 2724 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("population standard deviation of bigint input values"); -DATA(insert OID = 2725 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("population standard deviation of integer input values"); -DATA(insert OID = 2726 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("population standard deviation of smallint input values"); -DATA(insert OID = 2727 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("population standard deviation of float4 input values"); -DATA(insert OID = 2728 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("population standard deviation of float8 input values"); -DATA(insert OID = 2729 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("population standard deviation of numeric input values"); - -DATA(insert OID = 2712 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sample standard deviation of bigint input values"); -DATA(insert OID = 2713 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sample standard deviation of integer input values"); -DATA(insert OID = 2714 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sample standard deviation of smallint input values"); -DATA(insert OID = 2715 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sample standard deviation of float4 input values"); -DATA(insert OID = 2716 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sample standard deviation of float8 input values"); -DATA(insert OID = 2717 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sample standard deviation of numeric input values"); - -DATA(insert OID = 2154 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("historical alias for stddev_samp"); -DATA(insert OID = 2155 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("historical alias for stddev_samp"); -DATA(insert OID = 2156 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("historical alias for stddev_samp"); -DATA(insert OID = 2157 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("historical alias for stddev_samp"); -DATA(insert OID = 2158 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("historical alias for stddev_samp"); -DATA(insert OID = 2159 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("historical alias for stddev_samp"); - -DATA(insert OID = 2818 ( regr_count PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 20 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("number of input rows in which both expressions are not null"); -DATA(insert OID = 2819 ( regr_sxx PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sum of squares of the independent variable (sum(X^2) - sum(X)^2/N)"); -DATA(insert OID = 2820 ( regr_syy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sum of squares of the dependent variable (sum(Y^2) - sum(Y)^2/N)"); -DATA(insert OID = 2821 ( regr_sxy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sum of products of independent times dependent variable (sum(X*Y) - sum(X) * sum(Y)/N)"); -DATA(insert OID = 2822 ( regr_avgx PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("average of the independent variable (sum(X)/N)"); -DATA(insert OID = 2823 ( regr_avgy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("average of the dependent variable (sum(Y)/N)"); -DATA(insert OID = 2824 ( regr_r2 PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("square of the correlation coefficient"); -DATA(insert OID = 2825 ( regr_slope PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("slope of the least-squares-fit linear equation determined by the (X, Y) pairs"); -DATA(insert OID = 2826 ( regr_intercept PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs"); - -DATA(insert OID = 2827 ( covar_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("population covariance"); -DATA(insert OID = 2828 ( covar_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("sample covariance"); -DATA(insert OID = 2829 ( corr PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("correlation coefficient"); - -DATA(insert OID = 2160 ( text_pattern_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_pattern_lt _null_ _null_ _null_ )); -DATA(insert OID = 2161 ( text_pattern_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_pattern_le _null_ _null_ _null_ )); -DATA(insert OID = 2163 ( text_pattern_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_pattern_ge _null_ _null_ _null_ )); -DATA(insert OID = 2164 ( text_pattern_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_pattern_gt _null_ _null_ _null_ )); -DATA(insert OID = 2166 ( bttext_pattern_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "25 25" _null_ _null_ _null_ _null_ _null_ bttext_pattern_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3332 ( bttext_pattern_sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ bttext_pattern_sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); - -DATA(insert OID = 2174 ( bpchar_pattern_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 1042" _null_ _null_ _null_ _null_ _null_ bpchar_pattern_lt _null_ _null_ _null_ )); -DATA(insert OID = 2175 ( bpchar_pattern_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 1042" _null_ _null_ _null_ _null_ _null_ bpchar_pattern_le _null_ _null_ _null_ )); -DATA(insert OID = 2177 ( bpchar_pattern_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 1042" _null_ _null_ _null_ _null_ _null_ bpchar_pattern_ge _null_ _null_ _null_ )); -DATA(insert OID = 2178 ( bpchar_pattern_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1042 1042" _null_ _null_ _null_ _null_ _null_ bpchar_pattern_gt _null_ _null_ _null_ )); -DATA(insert OID = 2180 ( btbpchar_pattern_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1042 1042" _null_ _null_ _null_ _null_ _null_ btbpchar_pattern_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3333 ( btbpchar_pattern_sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ btbpchar_pattern_sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); - -DATA(insert OID = 2188 ( btint48cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 20" _null_ _null_ _null_ _null_ _null_ btint48cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 2189 ( btint84cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "20 23" _null_ _null_ _null_ _null_ _null_ btint84cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 2190 ( btint24cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "21 23" _null_ _null_ _null_ _null_ _null_ btint24cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 2191 ( btint42cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 21" _null_ _null_ _null_ _null_ _null_ btint42cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 2192 ( btint28cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "21 20" _null_ _null_ _null_ _null_ _null_ btint28cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 2193 ( btint82cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "20 21" _null_ _null_ _null_ _null_ _null_ btint82cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 2194 ( btfloat48cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "700 701" _null_ _null_ _null_ _null_ _null_ btfloat48cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 2195 ( btfloat84cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "701 700" _null_ _null_ _null_ _null_ _null_ btfloat84cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - -DATA(insert OID = 2212 ( regprocedurein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2202 "2275" _null_ _null_ _null_ _null_ _null_ regprocedurein _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2213 ( regprocedureout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "2202" _null_ _null_ _null_ _null_ _null_ regprocedureout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2214 ( regoperin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2203 "2275" _null_ _null_ _null_ _null_ _null_ regoperin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2215 ( regoperout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "2203" _null_ _null_ _null_ _null_ _null_ regoperout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3492 ( to_regoper PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2203 "25" _null_ _null_ _null_ _null_ _null_ to_regoper _null_ _null_ _null_ )); -DESCR("convert operator name to regoper"); -DATA(insert OID = 3476 ( to_regoperator PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2204 "25" _null_ _null_ _null_ _null_ _null_ to_regoperator _null_ _null_ _null_ )); -DESCR("convert operator name to regoperator"); -DATA(insert OID = 2216 ( regoperatorin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2204 "2275" _null_ _null_ _null_ _null_ _null_ regoperatorin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2217 ( regoperatorout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "2204" _null_ _null_ _null_ _null_ _null_ regoperatorout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2218 ( regclassin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2205 "2275" _null_ _null_ _null_ _null_ _null_ regclassin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2219 ( regclassout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "2205" _null_ _null_ _null_ _null_ _null_ regclassout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3495 ( to_regclass PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2205 "25" _null_ _null_ _null_ _null_ _null_ to_regclass _null_ _null_ _null_ )); -DESCR("convert classname to regclass"); -DATA(insert OID = 2220 ( regtypein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2206 "2275" _null_ _null_ _null_ _null_ _null_ regtypein _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2221 ( regtypeout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "2206" _null_ _null_ _null_ _null_ _null_ regtypeout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3493 ( to_regtype PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2206 "25" _null_ _null_ _null_ _null_ _null_ to_regtype _null_ _null_ _null_ )); -DESCR("convert type name to regtype"); -DATA(insert OID = 1079 ( regclass PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2205 "25" _null_ _null_ _null_ _null_ _null_ text_regclass _null_ _null_ _null_ )); -DESCR("convert text to regclass"); - -DATA(insert OID = 4098 ( regrolein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 4096 "2275" _null_ _null_ _null_ _null_ _null_ regrolein _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 4092 ( regroleout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "4096" _null_ _null_ _null_ _null_ _null_ regroleout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 4093 ( to_regrole PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 4096 "25" _null_ _null_ _null_ _null_ _null_ to_regrole _null_ _null_ _null_ )); -DESCR("convert role name to regrole"); - -DATA(insert OID = 4084 ( regnamespacein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 4089 "2275" _null_ _null_ _null_ _null_ _null_ regnamespacein _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 4085 ( regnamespaceout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "4089" _null_ _null_ _null_ _null_ _null_ regnamespaceout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 4086 ( to_regnamespace PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 4089 "25" _null_ _null_ _null_ _null_ _null_ to_regnamespace _null_ _null_ _null_ )); -DESCR("convert namespace name to regnamespace"); - -DATA(insert OID = 1268 ( parse_ident PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1009 "25 16" _null_ _null_ "{str,strict}" _null_ _null_ parse_ident _null_ _null_ _null_ )); -DESCR("parse qualified identifier to array of identifiers"); - -DATA(insert OID = 2246 ( fmgr_internal_validator PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ fmgr_internal_validator _null_ _null_ _null_ )); -DESCR("(internal)"); -DATA(insert OID = 2247 ( fmgr_c_validator PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ fmgr_c_validator _null_ _null_ _null_ )); -DESCR("(internal)"); -DATA(insert OID = 2248 ( fmgr_sql_validator PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ fmgr_sql_validator _null_ _null_ _null_ )); -DESCR("(internal)"); - -DATA(insert OID = 2250 ( has_database_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_database_privilege_name_name _null_ _null_ _null_ )); -DESCR("user privilege on database by username, database name"); -DATA(insert OID = 2251 ( has_database_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_database_privilege_name_id _null_ _null_ _null_ )); -DESCR("user privilege on database by username, database oid"); -DATA(insert OID = 2252 ( has_database_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_database_privilege_id_name _null_ _null_ _null_ )); -DESCR("user privilege on database by user oid, database name"); -DATA(insert OID = 2253 ( has_database_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_database_privilege_id_id _null_ _null_ _null_ )); -DESCR("user privilege on database by user oid, database oid"); -DATA(insert OID = 2254 ( has_database_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ has_database_privilege_name _null_ _null_ _null_ )); -DESCR("current user privilege on database by database name"); -DATA(insert OID = 2255 ( has_database_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_database_privilege_id _null_ _null_ _null_ )); -DESCR("current user privilege on database by database oid"); - -DATA(insert OID = 2256 ( has_function_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_function_privilege_name_name _null_ _null_ _null_ )); -DESCR("user privilege on function by username, function name"); -DATA(insert OID = 2257 ( has_function_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_function_privilege_name_id _null_ _null_ _null_ )); -DESCR("user privilege on function by username, function oid"); -DATA(insert OID = 2258 ( has_function_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_function_privilege_id_name _null_ _null_ _null_ )); -DESCR("user privilege on function by user oid, function name"); -DATA(insert OID = 2259 ( has_function_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_function_privilege_id_id _null_ _null_ _null_ )); -DESCR("user privilege on function by user oid, function oid"); -DATA(insert OID = 2260 ( has_function_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ has_function_privilege_name _null_ _null_ _null_ )); -DESCR("current user privilege on function by function name"); -DATA(insert OID = 2261 ( has_function_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_function_privilege_id _null_ _null_ _null_ )); -DESCR("current user privilege on function by function oid"); - -DATA(insert OID = 2262 ( has_language_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_language_privilege_name_name _null_ _null_ _null_ )); -DESCR("user privilege on language by username, language name"); -DATA(insert OID = 2263 ( has_language_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_language_privilege_name_id _null_ _null_ _null_ )); -DESCR("user privilege on language by username, language oid"); -DATA(insert OID = 2264 ( has_language_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_language_privilege_id_name _null_ _null_ _null_ )); -DESCR("user privilege on language by user oid, language name"); -DATA(insert OID = 2265 ( has_language_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_language_privilege_id_id _null_ _null_ _null_ )); -DESCR("user privilege on language by user oid, language oid"); -DATA(insert OID = 2266 ( has_language_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ has_language_privilege_name _null_ _null_ _null_ )); -DESCR("current user privilege on language by language name"); -DATA(insert OID = 2267 ( has_language_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_language_privilege_id _null_ _null_ _null_ )); -DESCR("current user privilege on language by language oid"); - -DATA(insert OID = 2268 ( has_schema_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_schema_privilege_name_name _null_ _null_ _null_ )); -DESCR("user privilege on schema by username, schema name"); -DATA(insert OID = 2269 ( has_schema_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_schema_privilege_name_id _null_ _null_ _null_ )); -DESCR("user privilege on schema by username, schema oid"); -DATA(insert OID = 2270 ( has_schema_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_schema_privilege_id_name _null_ _null_ _null_ )); -DESCR("user privilege on schema by user oid, schema name"); -DATA(insert OID = 2271 ( has_schema_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_schema_privilege_id_id _null_ _null_ _null_ )); -DESCR("user privilege on schema by user oid, schema oid"); -DATA(insert OID = 2272 ( has_schema_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ has_schema_privilege_name _null_ _null_ _null_ )); -DESCR("current user privilege on schema by schema name"); -DATA(insert OID = 2273 ( has_schema_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_schema_privilege_id _null_ _null_ _null_ )); -DESCR("current user privilege on schema by schema oid"); - -DATA(insert OID = 2390 ( has_tablespace_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_tablespace_privilege_name_name _null_ _null_ _null_ )); -DESCR("user privilege on tablespace by username, tablespace name"); -DATA(insert OID = 2391 ( has_tablespace_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_tablespace_privilege_name_id _null_ _null_ _null_ )); -DESCR("user privilege on tablespace by username, tablespace oid"); -DATA(insert OID = 2392 ( has_tablespace_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_tablespace_privilege_id_name _null_ _null_ _null_ )); -DESCR("user privilege on tablespace by user oid, tablespace name"); -DATA(insert OID = 2393 ( has_tablespace_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_tablespace_privilege_id_id _null_ _null_ _null_ )); -DESCR("user privilege on tablespace by user oid, tablespace oid"); -DATA(insert OID = 2394 ( has_tablespace_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ has_tablespace_privilege_name _null_ _null_ _null_ )); -DESCR("current user privilege on tablespace by tablespace name"); -DATA(insert OID = 2395 ( has_tablespace_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_tablespace_privilege_id _null_ _null_ _null_ )); -DESCR("current user privilege on tablespace by tablespace oid"); - -DATA(insert OID = 3000 ( has_foreign_data_wrapper_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_foreign_data_wrapper_privilege_name_name _null_ _null_ _null_ )); -DESCR("user privilege on foreign data wrapper by username, foreign data wrapper name"); -DATA(insert OID = 3001 ( has_foreign_data_wrapper_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_foreign_data_wrapper_privilege_name_id _null_ _null_ _null_ )); -DESCR("user privilege on foreign data wrapper by username, foreign data wrapper oid"); -DATA(insert OID = 3002 ( has_foreign_data_wrapper_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_foreign_data_wrapper_privilege_id_name _null_ _null_ _null_ )); -DESCR("user privilege on foreign data wrapper by user oid, foreign data wrapper name"); -DATA(insert OID = 3003 ( has_foreign_data_wrapper_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_foreign_data_wrapper_privilege_id_id _null_ _null_ _null_ )); -DESCR("user privilege on foreign data wrapper by user oid, foreign data wrapper oid"); -DATA(insert OID = 3004 ( has_foreign_data_wrapper_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ has_foreign_data_wrapper_privilege_name _null_ _null_ _null_ )); -DESCR("current user privilege on foreign data wrapper by foreign data wrapper name"); -DATA(insert OID = 3005 ( has_foreign_data_wrapper_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_foreign_data_wrapper_privilege_id _null_ _null_ _null_ )); -DESCR("current user privilege on foreign data wrapper by foreign data wrapper oid"); - -DATA(insert OID = 3006 ( has_server_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_server_privilege_name_name _null_ _null_ _null_ )); -DESCR("user privilege on server by username, server name"); -DATA(insert OID = 3007 ( has_server_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_server_privilege_name_id _null_ _null_ _null_ )); -DESCR("user privilege on server by username, server oid"); -DATA(insert OID = 3008 ( has_server_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_server_privilege_id_name _null_ _null_ _null_ )); -DESCR("user privilege on server by user oid, server name"); -DATA(insert OID = 3009 ( has_server_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_server_privilege_id_id _null_ _null_ _null_ )); -DESCR("user privilege on server by user oid, server oid"); -DATA(insert OID = 3010 ( has_server_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ has_server_privilege_name _null_ _null_ _null_ )); -DESCR("current user privilege on server by server name"); -DATA(insert OID = 3011 ( has_server_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_server_privilege_id _null_ _null_ _null_ )); -DESCR("current user privilege on server by server oid"); - -DATA(insert OID = 3138 ( has_type_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_type_privilege_name_name _null_ _null_ _null_ )); -DESCR("user privilege on type by username, type name"); -DATA(insert OID = 3139 ( has_type_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_type_privilege_name_id _null_ _null_ _null_ )); -DESCR("user privilege on type by username, type oid"); -DATA(insert OID = 3140 ( has_type_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_type_privilege_id_name _null_ _null_ _null_ )); -DESCR("user privilege on type by user oid, type name"); -DATA(insert OID = 3141 ( has_type_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_type_privilege_id_id _null_ _null_ _null_ )); -DESCR("user privilege on type by user oid, type oid"); -DATA(insert OID = 3142 ( has_type_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ has_type_privilege_name _null_ _null_ _null_ )); -DESCR("current user privilege on type by type name"); -DATA(insert OID = 3143 ( has_type_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_type_privilege_id _null_ _null_ _null_ )); -DESCR("current user privilege on type by type oid"); - -DATA(insert OID = 2705 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 19 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_name_name _null_ _null_ _null_ )); -DESCR("user privilege on role by username, role name"); -DATA(insert OID = 2706 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_name_id _null_ _null_ _null_ )); -DESCR("user privilege on role by username, role oid"); -DATA(insert OID = 2707 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 19 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_id_name _null_ _null_ _null_ )); -DESCR("user privilege on role by user oid, role name"); -DATA(insert OID = 2708 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_id_id _null_ _null_ _null_ )); -DESCR("user privilege on role by user oid, role oid"); -DATA(insert OID = 2709 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_name _null_ _null_ _null_ )); -DESCR("current user privilege on role by role name"); -DATA(insert OID = 2710 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_id _null_ _null_ _null_ )); -DESCR("current user privilege on role by role oid"); - -DATA(insert OID = 1269 ( pg_column_size PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 23 "2276" _null_ _null_ _null_ _null_ _null_ pg_column_size _null_ _null_ _null_ )); -DESCR("bytes required to store the value, perhaps with compression"); -DATA(insert OID = 2322 ( pg_tablespace_size PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_tablespace_size_oid _null_ _null_ _null_ )); -DESCR("total disk space usage for the specified tablespace"); -DATA(insert OID = 2323 ( pg_tablespace_size PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 20 "19" _null_ _null_ _null_ _null_ _null_ pg_tablespace_size_name _null_ _null_ _null_ )); -DESCR("total disk space usage for the specified tablespace"); -DATA(insert OID = 2324 ( pg_database_size PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_database_size_oid _null_ _null_ _null_ )); -DESCR("total disk space usage for the specified database"); -DATA(insert OID = 2168 ( pg_database_size PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 20 "19" _null_ _null_ _null_ _null_ _null_ pg_database_size_name _null_ _null_ _null_ )); -DESCR("total disk space usage for the specified database"); -DATA(insert OID = 2325 ( pg_relation_size PGNSP PGUID 14 1 0 0 0 f f f f t f v s 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.pg_relation_size($1, ''main'')" _null_ _null_ _null_ )); -DESCR("disk space usage for the main fork of the specified table or index"); -DATA(insert OID = 2332 ( pg_relation_size PGNSP PGUID 12 1 0 0 0 f f f f t f v s 2 0 20 "2205 25" _null_ _null_ _null_ _null_ _null_ pg_relation_size _null_ _null_ _null_ )); -DESCR("disk space usage for the specified fork of a table or index"); -DATA(insert OID = 2286 ( pg_total_relation_size PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ pg_total_relation_size _null_ _null_ _null_ )); -DESCR("total disk space usage for the specified table and associated indexes"); -DATA(insert OID = 2288 ( pg_size_pretty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "20" _null_ _null_ _null_ _null_ _null_ pg_size_pretty _null_ _null_ _null_ )); -DESCR("convert a long int to a human readable text using size units"); -DATA(insert OID = 3166 ( pg_size_pretty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "1700" _null_ _null_ _null_ _null_ _null_ pg_size_pretty_numeric _null_ _null_ _null_ )); -DESCR("convert a numeric to a human readable text using size units"); -DATA(insert OID = 3334 ( pg_size_bytes PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "25" _null_ _null_ _null_ _null_ _null_ pg_size_bytes _null_ _null_ _null_ )); -DESCR("convert a size in human-readable format with size units into bytes"); -DATA(insert OID = 2997 ( pg_table_size PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ pg_table_size _null_ _null_ _null_ )); -DESCR("disk space usage for the specified table, including TOAST, free space and visibility map"); -DATA(insert OID = 2998 ( pg_indexes_size PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ pg_indexes_size _null_ _null_ _null_ )); -DESCR("disk space usage for all indexes attached to the specified table"); -DATA(insert OID = 2999 ( pg_relation_filenode PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 26 "2205" _null_ _null_ _null_ _null_ _null_ pg_relation_filenode _null_ _null_ _null_ )); -DESCR("filenode identifier of relation"); -DATA(insert OID = 3454 ( pg_filenode_relation PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 2205 "26 26" _null_ _null_ _null_ _null_ _null_ pg_filenode_relation _null_ _null_ _null_ )); -DESCR("relation OID for filenode and tablespace"); -DATA(insert OID = 3034 ( pg_relation_filepath PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "2205" _null_ _null_ _null_ _null_ _null_ pg_relation_filepath _null_ _null_ _null_ )); -DESCR("file path of relation"); - -DATA(insert OID = 2316 ( postgresql_fdw_validator PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1009 26" _null_ _null_ _null_ _null_ _null_ postgresql_fdw_validator _null_ _null_ _null_)); -DESCR("(internal)"); - -DATA(insert OID = 2290 ( record_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 2249 "2275 26 23" _null_ _null_ _null_ _null_ _null_ record_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2291 ( record_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "2249" _null_ _null_ _null_ _null_ _null_ record_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2292 ( cstring_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "2275" _null_ _null_ _null_ _null_ _null_ cstring_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2293 ( cstring_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "2275" _null_ _null_ _null_ _null_ _null_ cstring_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2294 ( any_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2276 "2275" _null_ _null_ _null_ _null_ _null_ any_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2295 ( any_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "2276" _null_ _null_ _null_ _null_ _null_ any_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2296 ( anyarray_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2277 "2275" _null_ _null_ _null_ _null_ _null_ anyarray_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2297 ( anyarray_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "2277" _null_ _null_ _null_ _null_ _null_ anyarray_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2298 ( void_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2275" _null_ _null_ _null_ _null_ _null_ void_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2299 ( void_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "2278" _null_ _null_ _null_ _null_ _null_ void_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2300 ( trigger_in PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 2279 "2275" _null_ _null_ _null_ _null_ _null_ trigger_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2301 ( trigger_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "2279" _null_ _null_ _null_ _null_ _null_ trigger_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3594 ( event_trigger_in PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 3838 "2275" _null_ _null_ _null_ _null_ _null_ event_trigger_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3595 ( event_trigger_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3838" _null_ _null_ _null_ _null_ _null_ event_trigger_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2302 ( language_handler_in PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 2280 "2275" _null_ _null_ _null_ _null_ _null_ language_handler_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2303 ( language_handler_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "2280" _null_ _null_ _null_ _null_ _null_ language_handler_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2304 ( internal_in PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 2281 "2275" _null_ _null_ _null_ _null_ _null_ internal_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2305 ( internal_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "2281" _null_ _null_ _null_ _null_ _null_ internal_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2306 ( opaque_in PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 2282 "2275" _null_ _null_ _null_ _null_ _null_ opaque_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2307 ( opaque_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "2282" _null_ _null_ _null_ _null_ _null_ opaque_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2312 ( anyelement_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2283 "2275" _null_ _null_ _null_ _null_ _null_ anyelement_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2313 ( anyelement_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "2283" _null_ _null_ _null_ _null_ _null_ anyelement_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2398 ( shell_in PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 2282 "2275" _null_ _null_ _null_ _null_ _null_ shell_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2399 ( shell_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "2282" _null_ _null_ _null_ _null_ _null_ shell_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2597 ( domain_in PGNSP PGUID 12 1 0 0 0 f f f f f f s s 3 0 2276 "2275 26 23" _null_ _null_ _null_ _null_ _null_ domain_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2598 ( domain_recv PGNSP PGUID 12 1 0 0 0 f f f f f f s s 3 0 2276 "2281 26 23" _null_ _null_ _null_ _null_ _null_ domain_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2777 ( anynonarray_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2776 "2275" _null_ _null_ _null_ _null_ _null_ anynonarray_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2778 ( anynonarray_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "2776" _null_ _null_ _null_ _null_ _null_ anynonarray_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3116 ( fdw_handler_in PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 3115 "2275" _null_ _null_ _null_ _null_ _null_ fdw_handler_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3117 ( fdw_handler_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3115" _null_ _null_ _null_ _null_ _null_ fdw_handler_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 326 ( index_am_handler_in PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 325 "2275" _null_ _null_ _null_ _null_ _null_ index_am_handler_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 327 ( index_am_handler_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "325" _null_ _null_ _null_ _null_ _null_ index_am_handler_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3311 ( tsm_handler_in PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 3310 "2275" _null_ _null_ _null_ _null_ _null_ tsm_handler_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3312 ( tsm_handler_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3310" _null_ _null_ _null_ _null_ _null_ tsm_handler_out _null_ _null_ _null_ )); -DESCR("I/O"); - -/* tablesample method handlers */ -DATA(insert OID = 3313 ( bernoulli PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 3310 "2281" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_handler _null_ _null_ _null_ )); -DESCR("BERNOULLI tablesample method handler"); -DATA(insert OID = 3314 ( system PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 3310 "2281" _null_ _null_ _null_ _null_ _null_ tsm_system_handler _null_ _null_ _null_ )); -DESCR("SYSTEM tablesample method handler"); - -/* cryptographic */ -DATA(insert OID = 2311 ( md5 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ md5_text _null_ _null_ _null_ )); -DESCR("MD5 hash"); -DATA(insert OID = 2321 ( md5 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "17" _null_ _null_ _null_ _null_ _null_ md5_bytea _null_ _null_ _null_ )); -DESCR("MD5 hash"); - -/* crosstype operations for date vs. timestamp and timestamptz */ -DATA(insert OID = 2338 ( date_lt_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1082 1114" _null_ _null_ _null_ _null_ _null_ date_lt_timestamp _null_ _null_ _null_ )); -DATA(insert OID = 2339 ( date_le_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1082 1114" _null_ _null_ _null_ _null_ _null_ date_le_timestamp _null_ _null_ _null_ )); -DATA(insert OID = 2340 ( date_eq_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1082 1114" _null_ _null_ _null_ _null_ _null_ date_eq_timestamp _null_ _null_ _null_ )); -DATA(insert OID = 2341 ( date_gt_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1082 1114" _null_ _null_ _null_ _null_ _null_ date_gt_timestamp _null_ _null_ _null_ )); -DATA(insert OID = 2342 ( date_ge_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1082 1114" _null_ _null_ _null_ _null_ _null_ date_ge_timestamp _null_ _null_ _null_ )); -DATA(insert OID = 2343 ( date_ne_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1082 1114" _null_ _null_ _null_ _null_ _null_ date_ne_timestamp _null_ _null_ _null_ )); -DATA(insert OID = 2344 ( date_cmp_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1082 1114" _null_ _null_ _null_ _null_ _null_ date_cmp_timestamp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - -DATA(insert OID = 2351 ( date_lt_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1082 1184" _null_ _null_ _null_ _null_ _null_ date_lt_timestamptz _null_ _null_ _null_ )); -DATA(insert OID = 2352 ( date_le_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1082 1184" _null_ _null_ _null_ _null_ _null_ date_le_timestamptz _null_ _null_ _null_ )); -DATA(insert OID = 2353 ( date_eq_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1082 1184" _null_ _null_ _null_ _null_ _null_ date_eq_timestamptz _null_ _null_ _null_ )); -DATA(insert OID = 2354 ( date_gt_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1082 1184" _null_ _null_ _null_ _null_ _null_ date_gt_timestamptz _null_ _null_ _null_ )); -DATA(insert OID = 2355 ( date_ge_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1082 1184" _null_ _null_ _null_ _null_ _null_ date_ge_timestamptz _null_ _null_ _null_ )); -DATA(insert OID = 2356 ( date_ne_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1082 1184" _null_ _null_ _null_ _null_ _null_ date_ne_timestamptz _null_ _null_ _null_ )); -DATA(insert OID = 2357 ( date_cmp_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 23 "1082 1184" _null_ _null_ _null_ _null_ _null_ date_cmp_timestamptz _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - -DATA(insert OID = 2364 ( timestamp_lt_date PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1114 1082" _null_ _null_ _null_ _null_ _null_ timestamp_lt_date _null_ _null_ _null_ )); -DATA(insert OID = 2365 ( timestamp_le_date PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1114 1082" _null_ _null_ _null_ _null_ _null_ timestamp_le_date _null_ _null_ _null_ )); -DATA(insert OID = 2366 ( timestamp_eq_date PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1114 1082" _null_ _null_ _null_ _null_ _null_ timestamp_eq_date _null_ _null_ _null_ )); -DATA(insert OID = 2367 ( timestamp_gt_date PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1114 1082" _null_ _null_ _null_ _null_ _null_ timestamp_gt_date _null_ _null_ _null_ )); -DATA(insert OID = 2368 ( timestamp_ge_date PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1114 1082" _null_ _null_ _null_ _null_ _null_ timestamp_ge_date _null_ _null_ _null_ )); -DATA(insert OID = 2369 ( timestamp_ne_date PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1114 1082" _null_ _null_ _null_ _null_ _null_ timestamp_ne_date _null_ _null_ _null_ )); -DATA(insert OID = 2370 ( timestamp_cmp_date PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1114 1082" _null_ _null_ _null_ _null_ _null_ timestamp_cmp_date _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - -DATA(insert OID = 2377 ( timestamptz_lt_date PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1184 1082" _null_ _null_ _null_ _null_ _null_ timestamptz_lt_date _null_ _null_ _null_ )); -DATA(insert OID = 2378 ( timestamptz_le_date PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1184 1082" _null_ _null_ _null_ _null_ _null_ timestamptz_le_date _null_ _null_ _null_ )); -DATA(insert OID = 2379 ( timestamptz_eq_date PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1184 1082" _null_ _null_ _null_ _null_ _null_ timestamptz_eq_date _null_ _null_ _null_ )); -DATA(insert OID = 2380 ( timestamptz_gt_date PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1184 1082" _null_ _null_ _null_ _null_ _null_ timestamptz_gt_date _null_ _null_ _null_ )); -DATA(insert OID = 2381 ( timestamptz_ge_date PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1184 1082" _null_ _null_ _null_ _null_ _null_ timestamptz_ge_date _null_ _null_ _null_ )); -DATA(insert OID = 2382 ( timestamptz_ne_date PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1184 1082" _null_ _null_ _null_ _null_ _null_ timestamptz_ne_date _null_ _null_ _null_ )); -DATA(insert OID = 2383 ( timestamptz_cmp_date PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 23 "1184 1082" _null_ _null_ _null_ _null_ _null_ timestamptz_cmp_date _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - -/* crosstype operations for timestamp vs. timestamptz */ -DATA(insert OID = 2520 ( timestamp_lt_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1114 1184" _null_ _null_ _null_ _null_ _null_ timestamp_lt_timestamptz _null_ _null_ _null_ )); -DATA(insert OID = 2521 ( timestamp_le_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1114 1184" _null_ _null_ _null_ _null_ _null_ timestamp_le_timestamptz _null_ _null_ _null_ )); -DATA(insert OID = 2522 ( timestamp_eq_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1114 1184" _null_ _null_ _null_ _null_ _null_ timestamp_eq_timestamptz _null_ _null_ _null_ )); -DATA(insert OID = 2523 ( timestamp_gt_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1114 1184" _null_ _null_ _null_ _null_ _null_ timestamp_gt_timestamptz _null_ _null_ _null_ )); -DATA(insert OID = 2524 ( timestamp_ge_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1114 1184" _null_ _null_ _null_ _null_ _null_ timestamp_ge_timestamptz _null_ _null_ _null_ )); -DATA(insert OID = 2525 ( timestamp_ne_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1114 1184" _null_ _null_ _null_ _null_ _null_ timestamp_ne_timestamptz _null_ _null_ _null_ )); -DATA(insert OID = 2526 ( timestamp_cmp_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 23 "1114 1184" _null_ _null_ _null_ _null_ _null_ timestamp_cmp_timestamptz _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - -DATA(insert OID = 2527 ( timestamptz_lt_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1184 1114" _null_ _null_ _null_ _null_ _null_ timestamptz_lt_timestamp _null_ _null_ _null_ )); -DATA(insert OID = 2528 ( timestamptz_le_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1184 1114" _null_ _null_ _null_ _null_ _null_ timestamptz_le_timestamp _null_ _null_ _null_ )); -DATA(insert OID = 2529 ( timestamptz_eq_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1184 1114" _null_ _null_ _null_ _null_ _null_ timestamptz_eq_timestamp _null_ _null_ _null_ )); -DATA(insert OID = 2530 ( timestamptz_gt_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1184 1114" _null_ _null_ _null_ _null_ _null_ timestamptz_gt_timestamp _null_ _null_ _null_ )); -DATA(insert OID = 2531 ( timestamptz_ge_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1184 1114" _null_ _null_ _null_ _null_ _null_ timestamptz_ge_timestamp _null_ _null_ _null_ )); -DATA(insert OID = 2532 ( timestamptz_ne_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "1184 1114" _null_ _null_ _null_ _null_ _null_ timestamptz_ne_timestamp _null_ _null_ _null_ )); -DATA(insert OID = 2533 ( timestamptz_cmp_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 23 "1184 1114" _null_ _null_ _null_ _null_ _null_ timestamptz_cmp_timestamp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - - -/* send/receive functions */ -DATA(insert OID = 2400 ( array_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 2277 "2281 26 23" _null_ _null_ _null_ _null_ _null_ array_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2401 ( array_send PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "2277" _null_ _null_ _null_ _null_ _null_ array_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2402 ( record_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 2249 "2281 26 23" _null_ _null_ _null_ _null_ _null_ record_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2403 ( record_send PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "2249" _null_ _null_ _null_ _null_ _null_ record_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2404 ( int2recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "2281" _null_ _null_ _null_ _null_ _null_ int2recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2405 ( int2send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "21" _null_ _null_ _null_ _null_ _null_ int2send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2406 ( int4recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "2281" _null_ _null_ _null_ _null_ _null_ int4recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2407 ( int4send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "23" _null_ _null_ _null_ _null_ _null_ int4send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2408 ( int8recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "2281" _null_ _null_ _null_ _null_ _null_ int8recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2409 ( int8send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "20" _null_ _null_ _null_ _null_ _null_ int8send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2410 ( int2vectorrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 22 "2281" _null_ _null_ _null_ _null_ _null_ int2vectorrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2411 ( int2vectorsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "22" _null_ _null_ _null_ _null_ _null_ int2vectorsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2412 ( bytearecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ bytearecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2413 ( byteasend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "17" _null_ _null_ _null_ _null_ _null_ byteasend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2414 ( textrecv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 25 "2281" _null_ _null_ _null_ _null_ _null_ textrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2415 ( textsend PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "25" _null_ _null_ _null_ _null_ _null_ textsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2416 ( unknownrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 705 "2281" _null_ _null_ _null_ _null_ _null_ unknownrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2417 ( unknownsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "705" _null_ _null_ _null_ _null_ _null_ unknownsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2418 ( oidrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 26 "2281" _null_ _null_ _null_ _null_ _null_ oidrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2419 ( oidsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "26" _null_ _null_ _null_ _null_ _null_ oidsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2420 ( oidvectorrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 30 "2281" _null_ _null_ _null_ _null_ _null_ oidvectorrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2421 ( oidvectorsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "30" _null_ _null_ _null_ _null_ _null_ oidvectorsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2422 ( namerecv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 19 "2281" _null_ _null_ _null_ _null_ _null_ namerecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2423 ( namesend PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "19" _null_ _null_ _null_ _null_ _null_ namesend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2424 ( float4recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "2281" _null_ _null_ _null_ _null_ _null_ float4recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2425 ( float4send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "700" _null_ _null_ _null_ _null_ _null_ float4send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2426 ( float8recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "2281" _null_ _null_ _null_ _null_ _null_ float8recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2427 ( float8send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "701" _null_ _null_ _null_ _null_ _null_ float8send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2428 ( point_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "2281" _null_ _null_ _null_ _null_ _null_ point_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2429 ( point_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "600" _null_ _null_ _null_ _null_ _null_ point_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2430 ( bpcharrecv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 1042 "2281 26 23" _null_ _null_ _null_ _null_ _null_ bpcharrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2431 ( bpcharsend PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "1042" _null_ _null_ _null_ _null_ _null_ bpcharsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2432 ( varcharrecv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 1043 "2281 26 23" _null_ _null_ _null_ _null_ _null_ varcharrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2433 ( varcharsend PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "1043" _null_ _null_ _null_ _null_ _null_ varcharsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2434 ( charrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 18 "2281" _null_ _null_ _null_ _null_ _null_ charrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2435 ( charsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "18" _null_ _null_ _null_ _null_ _null_ charsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2436 ( boolrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "2281" _null_ _null_ _null_ _null_ _null_ boolrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2437 ( boolsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "16" _null_ _null_ _null_ _null_ _null_ boolsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2438 ( tidrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 27 "2281" _null_ _null_ _null_ _null_ _null_ tidrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2439 ( tidsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "27" _null_ _null_ _null_ _null_ _null_ tidsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2440 ( xidrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 28 "2281" _null_ _null_ _null_ _null_ _null_ xidrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2441 ( xidsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "28" _null_ _null_ _null_ _null_ _null_ xidsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2442 ( cidrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 29 "2281" _null_ _null_ _null_ _null_ _null_ cidrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2443 ( cidsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "29" _null_ _null_ _null_ _null_ _null_ cidsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2444 ( regprocrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 24 "2281" _null_ _null_ _null_ _null_ _null_ regprocrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2445 ( regprocsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "24" _null_ _null_ _null_ _null_ _null_ regprocsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2446 ( regprocedurerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2202 "2281" _null_ _null_ _null_ _null_ _null_ regprocedurerecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2447 ( regproceduresend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2202" _null_ _null_ _null_ _null_ _null_ regproceduresend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2448 ( regoperrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2203 "2281" _null_ _null_ _null_ _null_ _null_ regoperrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2449 ( regopersend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2203" _null_ _null_ _null_ _null_ _null_ regopersend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2450 ( regoperatorrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2204 "2281" _null_ _null_ _null_ _null_ _null_ regoperatorrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2451 ( regoperatorsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2204" _null_ _null_ _null_ _null_ _null_ regoperatorsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2452 ( regclassrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2205 "2281" _null_ _null_ _null_ _null_ _null_ regclassrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2453 ( regclasssend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2205" _null_ _null_ _null_ _null_ _null_ regclasssend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2454 ( regtyperecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2206 "2281" _null_ _null_ _null_ _null_ _null_ regtyperecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2455 ( regtypesend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2206" _null_ _null_ _null_ _null_ _null_ regtypesend _null_ _null_ _null_ )); -DESCR("I/O"); - -DATA(insert OID = 4094 ( regrolerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 4096 "2281" _null_ _null_ _null_ _null_ _null_ regrolerecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 4095 ( regrolesend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "4096" _null_ _null_ _null_ _null_ _null_ regrolesend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 4087 ( regnamespacerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 4089 "2281" _null_ _null_ _null_ _null_ _null_ regnamespacerecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 4088 ( regnamespacesend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "4089" _null_ _null_ _null_ _null_ _null_ regnamespacesend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2456 ( bit_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1560 "2281 26 23" _null_ _null_ _null_ _null_ _null_ bit_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2457 ( bit_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "1560" _null_ _null_ _null_ _null_ _null_ bit_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2458 ( varbit_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1562 "2281 26 23" _null_ _null_ _null_ _null_ _null_ varbit_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2459 ( varbit_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "1562" _null_ _null_ _null_ _null_ _null_ varbit_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2460 ( numeric_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1700 "2281 26 23" _null_ _null_ _null_ _null_ _null_ numeric_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2461 ( numeric_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "1700" _null_ _null_ _null_ _null_ _null_ numeric_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2462 ( abstimerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 702 "2281" _null_ _null_ _null_ _null_ _null_ abstimerecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2463 ( abstimesend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "702" _null_ _null_ _null_ _null_ _null_ abstimesend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2464 ( reltimerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 703 "2281" _null_ _null_ _null_ _null_ _null_ reltimerecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2465 ( reltimesend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "703" _null_ _null_ _null_ _null_ _null_ reltimesend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2466 ( tintervalrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 704 "2281" _null_ _null_ _null_ _null_ _null_ tintervalrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2467 ( tintervalsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "704" _null_ _null_ _null_ _null_ _null_ tintervalsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2468 ( date_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1082 "2281" _null_ _null_ _null_ _null_ _null_ date_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2469 ( date_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "1082" _null_ _null_ _null_ _null_ _null_ date_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2470 ( time_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1083 "2281 26 23" _null_ _null_ _null_ _null_ _null_ time_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2471 ( time_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "1083" _null_ _null_ _null_ _null_ _null_ time_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2472 ( timetz_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1266 "2281 26 23" _null_ _null_ _null_ _null_ _null_ timetz_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2473 ( timetz_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "1266" _null_ _null_ _null_ _null_ _null_ timetz_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2474 ( timestamp_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1114 "2281 26 23" _null_ _null_ _null_ _null_ _null_ timestamp_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2475 ( timestamp_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "1114" _null_ _null_ _null_ _null_ _null_ timestamp_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2476 ( timestamptz_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1184 "2281 26 23" _null_ _null_ _null_ _null_ _null_ timestamptz_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2477 ( timestamptz_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "1184" _null_ _null_ _null_ _null_ _null_ timestamptz_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2478 ( interval_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1186 "2281 26 23" _null_ _null_ _null_ _null_ _null_ interval_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2479 ( interval_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "1186" _null_ _null_ _null_ _null_ _null_ interval_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2480 ( lseg_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 601 "2281" _null_ _null_ _null_ _null_ _null_ lseg_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2481 ( lseg_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "601" _null_ _null_ _null_ _null_ _null_ lseg_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2482 ( path_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 602 "2281" _null_ _null_ _null_ _null_ _null_ path_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2483 ( path_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "602" _null_ _null_ _null_ _null_ _null_ path_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2484 ( box_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 603 "2281" _null_ _null_ _null_ _null_ _null_ box_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2485 ( box_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "603" _null_ _null_ _null_ _null_ _null_ box_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2486 ( poly_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 604 "2281" _null_ _null_ _null_ _null_ _null_ poly_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2487 ( poly_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "604" _null_ _null_ _null_ _null_ _null_ poly_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2488 ( line_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 628 "2281" _null_ _null_ _null_ _null_ _null_ line_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2489 ( line_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "628" _null_ _null_ _null_ _null_ _null_ line_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2490 ( circle_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 718 "2281" _null_ _null_ _null_ _null_ _null_ circle_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2491 ( circle_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "718" _null_ _null_ _null_ _null_ _null_ circle_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2492 ( cash_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 790 "2281" _null_ _null_ _null_ _null_ _null_ cash_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2493 ( cash_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "790" _null_ _null_ _null_ _null_ _null_ cash_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2494 ( macaddr_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 829 "2281" _null_ _null_ _null_ _null_ _null_ macaddr_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2495 ( macaddr_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "829" _null_ _null_ _null_ _null_ _null_ macaddr_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2496 ( inet_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "2281" _null_ _null_ _null_ _null_ _null_ inet_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2497 ( inet_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "869" _null_ _null_ _null_ _null_ _null_ inet_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2498 ( cidr_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 650 "2281" _null_ _null_ _null_ _null_ _null_ cidr_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2499 ( cidr_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "650" _null_ _null_ _null_ _null_ _null_ cidr_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2500 ( cstring_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "2281" _null_ _null_ _null_ _null_ _null_ cstring_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2501 ( cstring_send PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "2275" _null_ _null_ _null_ _null_ _null_ cstring_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2502 ( anyarray_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2277 "2281" _null_ _null_ _null_ _null_ _null_ anyarray_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2503 ( anyarray_send PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "2277" _null_ _null_ _null_ _null_ _null_ anyarray_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3120 ( void_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ void_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3121 ( void_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2278" _null_ _null_ _null_ _null_ _null_ void_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3446 ( macaddr8_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 774 "2281" _null_ _null_ _null_ _null_ _null_ macaddr8_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3447 ( macaddr8_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "774" _null_ _null_ _null_ _null_ _null_ macaddr8_send _null_ _null_ _null_ )); -DESCR("I/O"); - -/* System-view support functions with pretty-print option */ -DATA(insert OID = 2504 ( pg_get_ruledef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_ruledef_ext _null_ _null_ _null_ )); -DESCR("source text of a rule with pretty-print option"); -DATA(insert OID = 2505 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "25 16" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_name_ext _null_ _null_ _null_ )); -DESCR("select statement of a view with pretty-print option"); -DATA(insert OID = 2506 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_ext _null_ _null_ _null_ )); -DESCR("select statement of a view with pretty-print option"); -DATA(insert OID = 3159 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "26 23" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_wrap _null_ _null_ _null_ )); -DESCR("select statement of a view with pretty-printing and specified line wrapping"); -DATA(insert OID = 2507 ( pg_get_indexdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 25 "26 23 16" _null_ _null_ _null_ _null_ _null_ pg_get_indexdef_ext _null_ _null_ _null_ )); -DESCR("index description (full create statement or single expression) with pretty-print option"); -DATA(insert OID = 2508 ( pg_get_constraintdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_constraintdef_ext _null_ _null_ _null_ )); -DESCR("constraint description with pretty-print option"); -DATA(insert OID = 2509 ( pg_get_expr PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 25 "194 26 16" _null_ _null_ _null_ _null_ _null_ pg_get_expr_ext _null_ _null_ _null_ )); -DESCR("deparse an encoded expression with pretty-print option"); -DATA(insert OID = 2510 ( pg_prepared_statement PGNSP PGUID 12 1 1000 0 0 f f f f t t s r 0 0 2249 "" "{25,25,1184,2211,16}" "{o,o,o,o,o}" "{name,statement,prepare_time,parameter_types,from_sql}" _null_ _null_ pg_prepared_statement _null_ _null_ _null_ )); -DESCR("get the prepared statements for this session"); -DATA(insert OID = 2511 ( pg_cursor PGNSP PGUID 12 1 1000 0 0 f f f f t t s r 0 0 2249 "" "{25,25,16,16,16,1184}" "{o,o,o,o,o,o}" "{name,statement,is_holdable,is_binary,is_scrollable,creation_time}" _null_ _null_ pg_cursor _null_ _null_ _null_ )); -DESCR("get the open cursors for this session"); -DATA(insert OID = 2599 ( pg_timezone_abbrevs PGNSP PGUID 12 1 1000 0 0 f f f f t t s s 0 0 2249 "" "{25,1186,16}" "{o,o,o}" "{abbrev,utc_offset,is_dst}" _null_ _null_ pg_timezone_abbrevs _null_ _null_ _null_ )); -DESCR("get the available time zone abbreviations"); -DATA(insert OID = 2856 ( pg_timezone_names PGNSP PGUID 12 1 1000 0 0 f f f f t t s s 0 0 2249 "" "{25,25,1186,16}" "{o,o,o,o}" "{name,abbrev,utc_offset,is_dst}" _null_ _null_ pg_timezone_names _null_ _null_ _null_ )); -DESCR("get the available time zone names"); -DATA(insert OID = 2730 ( pg_get_triggerdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_triggerdef_ext _null_ _null_ _null_ )); -DESCR("trigger description with pretty-print option"); - -/* asynchronous notifications */ -DATA(insert OID = 3035 ( pg_listening_channels PGNSP PGUID 12 1 10 0 0 f f f f t t s r 0 0 25 "" _null_ _null_ _null_ _null_ _null_ pg_listening_channels _null_ _null_ _null_ )); -DESCR("get the channels that the current backend listens to"); -DATA(insert OID = 3036 ( pg_notify PGNSP PGUID 12 1 0 0 0 f f f f f f v r 2 0 2278 "25 25" _null_ _null_ _null_ _null_ _null_ pg_notify _null_ _null_ _null_ )); -DESCR("send a notification event"); -DATA(insert OID = 3296 ( pg_notification_queue_usage PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 701 "" _null_ _null_ _null_ _null_ _null_ pg_notification_queue_usage _null_ _null_ _null_ )); -DESCR("get the fraction of the asynchronous notification queue currently in use"); - -/* non-persistent series generator */ -DATA(insert OID = 1066 ( generate_series PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 3 0 23 "23 23 23" _null_ _null_ _null_ _null_ _null_ generate_series_step_int4 _null_ _null_ _null_ )); -DESCR("non-persistent series generator"); -DATA(insert OID = 1067 ( generate_series PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ generate_series_int4 _null_ _null_ _null_ )); -DESCR("non-persistent series generator"); -DATA(insert OID = 1068 ( generate_series PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 3 0 20 "20 20 20" _null_ _null_ _null_ _null_ _null_ generate_series_step_int8 _null_ _null_ _null_ )); -DESCR("non-persistent series generator"); -DATA(insert OID = 1069 ( generate_series PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 2 0 20 "20 20" _null_ _null_ _null_ _null_ _null_ generate_series_int8 _null_ _null_ _null_ )); -DESCR("non-persistent series generator"); -DATA(insert OID = 3259 ( generate_series PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 3 0 1700 "1700 1700 1700" _null_ _null_ _null_ _null_ _null_ generate_series_step_numeric _null_ _null_ _null_ )); -DESCR("non-persistent series generator"); -DATA(insert OID = 3260 ( generate_series PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 2 0 1700 "1700 1700" _null_ _null_ _null_ _null_ _null_ generate_series_numeric _null_ _null_ _null_ )); -DESCR("non-persistent series generator"); -DATA(insert OID = 938 ( generate_series PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 3 0 1114 "1114 1114 1186" _null_ _null_ _null_ _null_ _null_ generate_series_timestamp _null_ _null_ _null_ )); -DESCR("non-persistent series generator"); -DATA(insert OID = 939 ( generate_series PGNSP PGUID 12 1 1000 0 0 f f f f t t s s 3 0 1184 "1184 1184 1186" _null_ _null_ _null_ _null_ _null_ generate_series_timestamptz _null_ _null_ _null_ )); -DESCR("non-persistent series generator"); - -/* boolean aggregates */ -DATA(insert OID = 2515 ( booland_statefunc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "16 16" _null_ _null_ _null_ _null_ _null_ booland_statefunc _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 2516 ( boolor_statefunc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "16 16" _null_ _null_ _null_ _null_ _null_ boolor_statefunc _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3496 ( bool_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 16" _null_ _null_ _null_ _null_ _null_ bool_accum _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3497 ( bool_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 16" _null_ _null_ _null_ _null_ _null_ bool_accum_inv _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3498 ( bool_alltrue PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "2281" _null_ _null_ _null_ _null_ _null_ bool_alltrue _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3499 ( bool_anytrue PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "2281" _null_ _null_ _null_ _null_ _null_ bool_anytrue _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 2517 ( bool_and PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 16 "16" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("boolean-and aggregate"); -/* ANY, SOME? These names conflict with subquery operators. See doc. */ -DATA(insert OID = 2518 ( bool_or PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 16 "16" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("boolean-or aggregate"); -DATA(insert OID = 2519 ( every PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 16 "16" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("boolean-and aggregate"); - -/* bitwise integer aggregates */ -DATA(insert OID = 2236 ( bit_and PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 21 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("bitwise-and smallint aggregate"); -DATA(insert OID = 2237 ( bit_or PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 21 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("bitwise-or smallint aggregate"); -DATA(insert OID = 2238 ( bit_and PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("bitwise-and integer aggregate"); -DATA(insert OID = 2239 ( bit_or PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("bitwise-or integer aggregate"); -DATA(insert OID = 2240 ( bit_and PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("bitwise-and bigint aggregate"); -DATA(insert OID = 2241 ( bit_or PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("bitwise-or bigint aggregate"); -DATA(insert OID = 2242 ( bit_and PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1560 "1560" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("bitwise-and bit aggregate"); -DATA(insert OID = 2243 ( bit_or PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1560 "1560" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("bitwise-or bit aggregate"); - -/* formerly-missing interval + datetime operators */ -DATA(insert OID = 2546 ( interval_pl_date PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1114 "1186 1082" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); -DATA(insert OID = 2547 ( interval_pl_timetz PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1266 "1186 1266" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); -DATA(insert OID = 2548 ( interval_pl_timestamp PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1114 "1186 1114" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); -DATA(insert OID = 2549 ( interval_pl_timestamptz PGNSP PGUID 14 1 0 0 0 f f f f t f s s 2 0 1184 "1186 1184" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); -DATA(insert OID = 2550 ( integer_pl_date PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1082 "23 1082" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ )); - -DATA(insert OID = 2556 ( pg_tablespace_databases PGNSP PGUID 12 1 1000 0 0 f f f f t t s s 1 0 26 "26" _null_ _null_ _null_ _null_ _null_ pg_tablespace_databases _null_ _null_ _null_ )); -DESCR("get OIDs of databases in a tablespace"); - -DATA(insert OID = 2557 ( bool PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "23" _null_ _null_ _null_ _null_ _null_ int4_bool _null_ _null_ _null_ )); -DESCR("convert int4 to boolean"); -DATA(insert OID = 2558 ( int4 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "16" _null_ _null_ _null_ _null_ _null_ bool_int4 _null_ _null_ _null_ )); -DESCR("convert boolean to int4"); -DATA(insert OID = 2559 ( lastval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 0 0 20 "" _null_ _null_ _null_ _null_ _null_ lastval _null_ _null_ _null_ )); -DESCR("current value from last used sequence"); - -/* start time function */ -DATA(insert OID = 2560 ( pg_postmaster_start_time PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ pg_postmaster_start_time _null_ _null_ _null_ )); -DESCR("postmaster start time"); -/* config reload time function */ -DATA(insert OID = 2034 ( pg_conf_load_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ pg_conf_load_time _null_ _null_ _null_ )); -DESCR("configuration load time"); - -/* new functions for Y-direction rtree opclasses */ -DATA(insert OID = 2562 ( box_below PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_below _null_ _null_ _null_ )); -DATA(insert OID = 2563 ( box_overbelow PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_overbelow _null_ _null_ _null_ )); -DATA(insert OID = 2564 ( box_overabove PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_overabove _null_ _null_ _null_ )); -DATA(insert OID = 2565 ( box_above PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_above _null_ _null_ _null_ )); -DATA(insert OID = 2566 ( poly_below PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_below _null_ _null_ _null_ )); -DATA(insert OID = 2567 ( poly_overbelow PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_overbelow _null_ _null_ _null_ )); -DATA(insert OID = 2568 ( poly_overabove PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_overabove _null_ _null_ _null_ )); -DATA(insert OID = 2569 ( poly_above PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_above _null_ _null_ _null_ )); -DATA(insert OID = 2587 ( circle_overbelow PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_overbelow _null_ _null_ _null_ )); -DATA(insert OID = 2588 ( circle_overabove PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "718 718" _null_ _null_ _null_ _null_ _null_ circle_overabove _null_ _null_ _null_ )); - -/* support functions for GiST r-tree emulation */ -DATA(insert OID = 2578 ( gist_box_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 603 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_box_consistent _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 2579 ( gist_box_compress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gist_box_compress _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 2580 ( gist_box_decompress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gist_box_decompress _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3281 ( gist_box_fetch PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gist_box_fetch _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 2581 ( gist_box_penalty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gist_box_penalty _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 2582 ( gist_box_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ gist_box_picksplit _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 2583 ( gist_box_union PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 603 "2281 2281" _null_ _null_ _null_ _null_ _null_ gist_box_union _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 2584 ( gist_box_same PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "603 603 2281" _null_ _null_ _null_ _null_ _null_ gist_box_same _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 2585 ( gist_poly_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 604 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_poly_consistent _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 2586 ( gist_poly_compress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gist_poly_compress _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 2591 ( gist_circle_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 718 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_circle_consistent _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 2592 ( gist_circle_compress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gist_circle_compress _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 1030 ( gist_point_compress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gist_point_compress _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3282 ( gist_point_fetch PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gist_point_fetch _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 2179 ( gist_point_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 600 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_point_consistent _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3064 ( gist_point_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 600 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_point_distance _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3280 ( gist_circle_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 718 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_circle_distance _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3288 ( gist_poly_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 604 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_poly_distance _null_ _null_ _null_ )); -DESCR("GiST support"); - -/* GIN array support */ -DATA(insert OID = 2743 ( ginarrayextract PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "2277 2281 2281" _null_ _null_ _null_ _null_ _null_ ginarrayextract _null_ _null_ _null_ )); -DESCR("GIN array support"); -DATA(insert OID = 2774 ( ginqueryarrayextract PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 2281 "2277 2281 21 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ ginqueryarrayextract _null_ _null_ _null_ )); -DESCR("GIN array support"); -DATA(insert OID = 2744 ( ginarrayconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 8 0 16 "2281 21 2277 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ ginarrayconsistent _null_ _null_ _null_ )); -DESCR("GIN array support"); -DATA(insert OID = 3920 ( ginarraytriconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 18 "2281 21 2277 23 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ ginarraytriconsistent _null_ _null_ _null_ )); -DESCR("GIN array support"); -DATA(insert OID = 3076 ( ginarrayextract PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "2277 2281" _null_ _null_ _null_ _null_ _null_ ginarrayextract_2args _null_ _null_ _null_ )); -DESCR("GIN array support (obsolete)"); - -/* overlap/contains/contained */ -DATA(insert OID = 2747 ( arrayoverlap PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2277 2277" _null_ _null_ _null_ _null_ _null_ arrayoverlap _null_ _null_ _null_ )); -DATA(insert OID = 2748 ( arraycontains PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2277 2277" _null_ _null_ _null_ _null_ _null_ arraycontains _null_ _null_ _null_ )); -DATA(insert OID = 2749 ( arraycontained PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2277 2277" _null_ _null_ _null_ _null_ _null_ arraycontained _null_ _null_ _null_ )); - -/* BRIN minmax */ -DATA(insert OID = 3383 ( brin_minmax_opcinfo PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ brin_minmax_opcinfo _null_ _null_ _null_ )); -DESCR("BRIN minmax support"); -DATA(insert OID = 3384 ( brin_minmax_add_value PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 16 "2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ brin_minmax_add_value _null_ _null_ _null_ )); -DESCR("BRIN minmax support"); -DATA(insert OID = 3385 ( brin_minmax_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 16 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ brin_minmax_consistent _null_ _null_ _null_ )); -DESCR("BRIN minmax support"); -DATA(insert OID = 3386 ( brin_minmax_union PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 16 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ brin_minmax_union _null_ _null_ _null_ )); -DESCR("BRIN minmax support"); - -/* BRIN inclusion */ -DATA(insert OID = 4105 ( brin_inclusion_opcinfo PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ brin_inclusion_opcinfo _null_ _null_ _null_ )); -DESCR("BRIN inclusion support"); -DATA(insert OID = 4106 ( brin_inclusion_add_value PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 16 "2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ brin_inclusion_add_value _null_ _null_ _null_ )); -DESCR("BRIN inclusion support"); -DATA(insert OID = 4107 ( brin_inclusion_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 16 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ brin_inclusion_consistent _null_ _null_ _null_ )); -DESCR("BRIN inclusion support"); -DATA(insert OID = 4108 ( brin_inclusion_union PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 16 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ brin_inclusion_union _null_ _null_ _null_ )); -DESCR("BRIN inclusion support"); - -/* userlock replacements */ -DATA(insert OID = 2880 ( pg_advisory_lock PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 2278 "20" _null_ _null_ _null_ _null_ _null_ pg_advisory_lock_int8 _null_ _null_ _null_ )); -DESCR("obtain exclusive advisory lock"); -DATA(insert OID = 3089 ( pg_advisory_xact_lock PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 2278 "20" _null_ _null_ _null_ _null_ _null_ pg_advisory_xact_lock_int8 _null_ _null_ _null_ )); -DESCR("obtain exclusive advisory lock"); -DATA(insert OID = 2881 ( pg_advisory_lock_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 2278 "20" _null_ _null_ _null_ _null_ _null_ pg_advisory_lock_shared_int8 _null_ _null_ _null_ )); -DESCR("obtain shared advisory lock"); -DATA(insert OID = 3090 ( pg_advisory_xact_lock_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 2278 "20" _null_ _null_ _null_ _null_ _null_ pg_advisory_xact_lock_shared_int8 _null_ _null_ _null_ )); -DESCR("obtain shared advisory lock"); -DATA(insert OID = 2882 ( pg_try_advisory_lock PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_try_advisory_lock_int8 _null_ _null_ _null_ )); -DESCR("obtain exclusive advisory lock if available"); -DATA(insert OID = 3091 ( pg_try_advisory_xact_lock PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_try_advisory_xact_lock_int8 _null_ _null_ _null_ )); -DESCR("obtain exclusive advisory lock if available"); -DATA(insert OID = 2883 ( pg_try_advisory_lock_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_try_advisory_lock_shared_int8 _null_ _null_ _null_ )); -DESCR("obtain shared advisory lock if available"); -DATA(insert OID = 3092 ( pg_try_advisory_xact_lock_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_try_advisory_xact_lock_shared_int8 _null_ _null_ _null_ )); -DESCR("obtain shared advisory lock if available"); -DATA(insert OID = 2884 ( pg_advisory_unlock PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_advisory_unlock_int8 _null_ _null_ _null_ )); -DESCR("release exclusive advisory lock"); -DATA(insert OID = 2885 ( pg_advisory_unlock_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 16 "20" _null_ _null_ _null_ _null_ _null_ pg_advisory_unlock_shared_int8 _null_ _null_ _null_ )); -DESCR("release shared advisory lock"); -DATA(insert OID = 2886 ( pg_advisory_lock PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 2278 "23 23" _null_ _null_ _null_ _null_ _null_ pg_advisory_lock_int4 _null_ _null_ _null_ )); -DESCR("obtain exclusive advisory lock"); -DATA(insert OID = 3093 ( pg_advisory_xact_lock PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 2278 "23 23" _null_ _null_ _null_ _null_ _null_ pg_advisory_xact_lock_int4 _null_ _null_ _null_ )); -DESCR("obtain exclusive advisory lock"); -DATA(insert OID = 2887 ( pg_advisory_lock_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 2278 "23 23" _null_ _null_ _null_ _null_ _null_ pg_advisory_lock_shared_int4 _null_ _null_ _null_ )); -DESCR("obtain shared advisory lock"); -DATA(insert OID = 3094 ( pg_advisory_xact_lock_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 2278 "23 23" _null_ _null_ _null_ _null_ _null_ pg_advisory_xact_lock_shared_int4 _null_ _null_ _null_ )); -DESCR("obtain shared advisory lock"); -DATA(insert OID = 2888 ( pg_try_advisory_lock PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ pg_try_advisory_lock_int4 _null_ _null_ _null_ )); -DESCR("obtain exclusive advisory lock if available"); -DATA(insert OID = 3095 ( pg_try_advisory_xact_lock PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ pg_try_advisory_xact_lock_int4 _null_ _null_ _null_ )); -DESCR("obtain exclusive advisory lock if available"); -DATA(insert OID = 2889 ( pg_try_advisory_lock_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ pg_try_advisory_lock_shared_int4 _null_ _null_ _null_ )); -DESCR("obtain shared advisory lock if available"); -DATA(insert OID = 3096 ( pg_try_advisory_xact_lock_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ pg_try_advisory_xact_lock_shared_int4 _null_ _null_ _null_ )); -DESCR("obtain shared advisory lock if available"); -DATA(insert OID = 2890 ( pg_advisory_unlock PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ pg_advisory_unlock_int4 _null_ _null_ _null_ )); -DESCR("release exclusive advisory lock"); -DATA(insert OID = 2891 ( pg_advisory_unlock_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ pg_advisory_unlock_shared_int4 _null_ _null_ _null_ )); -DESCR("release shared advisory lock"); -DATA(insert OID = 2892 ( pg_advisory_unlock_all PGNSP PGUID 12 1 0 0 0 f f f f t f v u 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_advisory_unlock_all _null_ _null_ _null_ )); -DESCR("release all advisory locks"); - -/* XML support */ -DATA(insert OID = 2893 ( xml_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 142 "2275" _null_ _null_ _null_ _null_ _null_ xml_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2894 ( xml_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "142" _null_ _null_ _null_ _null_ _null_ xml_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2895 ( xmlcomment PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 142 "25" _null_ _null_ _null_ _null_ _null_ xmlcomment _null_ _null_ _null_ )); -DESCR("generate XML comment"); -DATA(insert OID = 2896 ( xml PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 142 "25" _null_ _null_ _null_ _null_ _null_ texttoxml _null_ _null_ _null_ )); -DESCR("perform a non-validating parse of a character string to produce an XML value"); -DATA(insert OID = 2897 ( xmlvalidate PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "142 25" _null_ _null_ _null_ _null_ _null_ xmlvalidate _null_ _null_ _null_ )); -DESCR("validate an XML value"); -DATA(insert OID = 2898 ( xml_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 142 "2281" _null_ _null_ _null_ _null_ _null_ xml_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2899 ( xml_send PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "142" _null_ _null_ _null_ _null_ _null_ xml_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2900 ( xmlconcat2 PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 142 "142 142" _null_ _null_ _null_ _null_ _null_ xmlconcat2 _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 2901 ( xmlagg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 142 "142" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("concatenate XML values"); -DATA(insert OID = 2922 ( text PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "142" _null_ _null_ _null_ _null_ _null_ xmltotext _null_ _null_ _null_ )); -DESCR("serialize an XML value to a character string"); - -DATA(insert OID = 2923 ( table_to_xml PGNSP PGUID 12 100 0 0 0 f f f f t f s r 4 0 142 "2205 16 16 25" _null_ _null_ "{tbl,nulls,tableforest,targetns}" _null_ _null_ table_to_xml _null_ _null_ _null_ )); -DESCR("map table contents to XML"); -DATA(insert OID = 2924 ( query_to_xml PGNSP PGUID 12 100 0 0 0 f f f f t f s u 4 0 142 "25 16 16 25" _null_ _null_ "{query,nulls,tableforest,targetns}" _null_ _null_ query_to_xml _null_ _null_ _null_ )); -DESCR("map query result to XML"); -DATA(insert OID = 2925 ( cursor_to_xml PGNSP PGUID 12 100 0 0 0 f f f f t f s r 5 0 142 "1790 23 16 16 25" _null_ _null_ "{cursor,count,nulls,tableforest,targetns}" _null_ _null_ cursor_to_xml _null_ _null_ _null_ )); -DESCR("map rows from cursor to XML"); -DATA(insert OID = 2926 ( table_to_xmlschema PGNSP PGUID 12 100 0 0 0 f f f f t f s r 4 0 142 "2205 16 16 25" _null_ _null_ "{tbl,nulls,tableforest,targetns}" _null_ _null_ table_to_xmlschema _null_ _null_ _null_ )); -DESCR("map table structure to XML Schema"); -DATA(insert OID = 2927 ( query_to_xmlschema PGNSP PGUID 12 100 0 0 0 f f f f t f s u 4 0 142 "25 16 16 25" _null_ _null_ "{query,nulls,tableforest,targetns}" _null_ _null_ query_to_xmlschema _null_ _null_ _null_ )); -DESCR("map query result structure to XML Schema"); -DATA(insert OID = 2928 ( cursor_to_xmlschema PGNSP PGUID 12 100 0 0 0 f f f f t f s r 4 0 142 "1790 16 16 25" _null_ _null_ "{cursor,nulls,tableforest,targetns}" _null_ _null_ cursor_to_xmlschema _null_ _null_ _null_ )); -DESCR("map cursor structure to XML Schema"); -DATA(insert OID = 2929 ( table_to_xml_and_xmlschema PGNSP PGUID 12 100 0 0 0 f f f f t f s r 4 0 142 "2205 16 16 25" _null_ _null_ "{tbl,nulls,tableforest,targetns}" _null_ _null_ table_to_xml_and_xmlschema _null_ _null_ _null_ )); -DESCR("map table contents and structure to XML and XML Schema"); -DATA(insert OID = 2930 ( query_to_xml_and_xmlschema PGNSP PGUID 12 100 0 0 0 f f f f t f s u 4 0 142 "25 16 16 25" _null_ _null_ "{query,nulls,tableforest,targetns}" _null_ _null_ query_to_xml_and_xmlschema _null_ _null_ _null_ )); -DESCR("map query result and structure to XML and XML Schema"); - -DATA(insert OID = 2933 ( schema_to_xml PGNSP PGUID 12 100 0 0 0 f f f f t f s r 4 0 142 "19 16 16 25" _null_ _null_ "{schema,nulls,tableforest,targetns}" _null_ _null_ schema_to_xml _null_ _null_ _null_ )); -DESCR("map schema contents to XML"); -DATA(insert OID = 2934 ( schema_to_xmlschema PGNSP PGUID 12 100 0 0 0 f f f f t f s r 4 0 142 "19 16 16 25" _null_ _null_ "{schema,nulls,tableforest,targetns}" _null_ _null_ schema_to_xmlschema _null_ _null_ _null_ )); -DESCR("map schema structure to XML Schema"); -DATA(insert OID = 2935 ( schema_to_xml_and_xmlschema PGNSP PGUID 12 100 0 0 0 f f f f t f s r 4 0 142 "19 16 16 25" _null_ _null_ "{schema,nulls,tableforest,targetns}" _null_ _null_ schema_to_xml_and_xmlschema _null_ _null_ _null_ )); -DESCR("map schema contents and structure to XML and XML Schema"); - -DATA(insert OID = 2936 ( database_to_xml PGNSP PGUID 12 100 0 0 0 f f f f t f s r 3 0 142 "16 16 25" _null_ _null_ "{nulls,tableforest,targetns}" _null_ _null_ database_to_xml _null_ _null_ _null_ )); -DESCR("map database contents to XML"); -DATA(insert OID = 2937 ( database_to_xmlschema PGNSP PGUID 12 100 0 0 0 f f f f t f s r 3 0 142 "16 16 25" _null_ _null_ "{nulls,tableforest,targetns}" _null_ _null_ database_to_xmlschema _null_ _null_ _null_ )); -DESCR("map database structure to XML Schema"); -DATA(insert OID = 2938 ( database_to_xml_and_xmlschema PGNSP PGUID 12 100 0 0 0 f f f f t f s r 3 0 142 "16 16 25" _null_ _null_ "{nulls,tableforest,targetns}" _null_ _null_ database_to_xml_and_xmlschema _null_ _null_ _null_ )); -DESCR("map database contents and structure to XML and XML Schema"); - -DATA(insert OID = 2931 ( xpath PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 143 "25 142 1009" _null_ _null_ _null_ _null_ _null_ xpath _null_ _null_ _null_ )); -DESCR("evaluate XPath expression, with namespaces support"); -DATA(insert OID = 2932 ( xpath PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 143 "25 142" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.xpath($1, $2, ''{}''::pg_catalog.text[])" _null_ _null_ _null_ )); -DESCR("evaluate XPath expression"); - -DATA(insert OID = 2614 ( xmlexists PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 142" _null_ _null_ _null_ _null_ _null_ xmlexists _null_ _null_ _null_ )); -DESCR("test XML value against XPath expression"); - -DATA(insert OID = 3049 ( xpath_exists PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 16 "25 142 1009" _null_ _null_ _null_ _null_ _null_ xpath_exists _null_ _null_ _null_ )); -DESCR("test XML value against XPath expression, with namespace support"); -DATA(insert OID = 3050 ( xpath_exists PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 16 "25 142" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.xpath_exists($1, $2, ''{}''::pg_catalog.text[])" _null_ _null_ _null_ )); -DESCR("test XML value against XPath expression"); -DATA(insert OID = 3051 ( xml_is_well_formed PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 16 "25" _null_ _null_ _null_ _null_ _null_ xml_is_well_formed _null_ _null_ _null_ )); -DESCR("determine if a string is well formed XML"); -DATA(insert OID = 3052 ( xml_is_well_formed_document PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "25" _null_ _null_ _null_ _null_ _null_ xml_is_well_formed_document _null_ _null_ _null_ )); -DESCR("determine if a string is well formed XML document"); -DATA(insert OID = 3053 ( xml_is_well_formed_content PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "25" _null_ _null_ _null_ _null_ _null_ xml_is_well_formed_content _null_ _null_ _null_ )); -DESCR("determine if a string is well formed XML content"); - -/* json */ -DATA(insert OID = 321 ( json_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 114 "2275" _null_ _null_ _null_ _null_ _null_ json_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 322 ( json_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "114" _null_ _null_ _null_ _null_ _null_ json_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 323 ( json_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 324 ( json_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "114" _null_ _null_ _null_ _null_ _null_ json_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3153 ( array_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 114 "2277" _null_ _null_ _null_ _null_ _null_ array_to_json _null_ _null_ _null_ )); -DESCR("map array to json"); -DATA(insert OID = 3154 ( array_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 114 "2277 16" _null_ _null_ _null_ _null_ _null_ array_to_json_pretty _null_ _null_ _null_ )); -DESCR("map array to json with optional pretty printing"); -DATA(insert OID = 3155 ( row_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 114 "2249" _null_ _null_ _null_ _null_ _null_ row_to_json _null_ _null_ _null_ )); -DESCR("map row to json"); -DATA(insert OID = 3156 ( row_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 114 "2249 16" _null_ _null_ _null_ _null_ _null_ row_to_json_pretty _null_ _null_ _null_ )); -DESCR("map row to json with optional pretty printing"); -DATA(insert OID = 3173 ( json_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f s s 2 0 2281 "2281 2283" _null_ _null_ _null_ _null_ _null_ json_agg_transfn _null_ _null_ _null_ )); -DESCR("json aggregate transition function"); -DATA(insert OID = 3174 ( json_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_agg_finalfn _null_ _null_ _null_ )); -DESCR("json aggregate final function"); -DATA(insert OID = 3175 ( json_agg PGNSP PGUID 12 1 0 0 0 t f f f f f s s 1 0 114 "2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("aggregate input into json"); -DATA(insert OID = 3180 ( json_object_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f s s 3 0 2281 "2281 2276 2276" _null_ _null_ _null_ _null_ _null_ json_object_agg_transfn _null_ _null_ _null_ )); -DESCR("json object aggregate transition function"); -DATA(insert OID = 3196 ( json_object_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_object_agg_finalfn _null_ _null_ _null_ )); -DESCR("json object aggregate final function"); -DATA(insert OID = 3197 ( json_object_agg PGNSP PGUID 12 1 0 0 0 t f f f f f s s 2 0 114 "2276 2276" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("aggregate input into a json object"); -DATA(insert OID = 3198 ( json_build_array PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 114 "2276" "{2276}" "{v}" _null_ _null_ _null_ json_build_array _null_ _null_ _null_ )); -DESCR("build a json array from any inputs"); -DATA(insert OID = 3199 ( json_build_array PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 114 "" _null_ _null_ _null_ _null_ _null_ json_build_array_noargs _null_ _null_ _null_ )); -DESCR("build an empty json array"); -DATA(insert OID = 3200 ( json_build_object PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 114 "2276" "{2276}" "{v}" _null_ _null_ _null_ json_build_object _null_ _null_ _null_ )); -DESCR("build a json object from pairwise key/value inputs"); -DATA(insert OID = 3201 ( json_build_object PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 114 "" _null_ _null_ _null_ _null_ _null_ json_build_object_noargs _null_ _null_ _null_ )); -DESCR("build an empty json object"); -DATA(insert OID = 3202 ( json_object PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 114 "1009" _null_ _null_ _null_ _null_ _null_ json_object _null_ _null_ _null_ )); -DESCR("map text array of key value pairs to json object"); -DATA(insert OID = 3203 ( json_object PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 114 "1009 1009" _null_ _null_ _null_ _null_ _null_ json_object_two_arg _null_ _null_ _null_ )); -DESCR("map text arrays of keys and values to json object"); -DATA(insert OID = 3176 ( to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 114 "2283" _null_ _null_ _null_ _null_ _null_ to_json _null_ _null_ _null_ )); -DESCR("map input to json"); -DATA(insert OID = 3261 ( json_strip_nulls PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 114 "114" _null_ _null_ _null_ _null_ _null_ json_strip_nulls _null_ _null_ _null_ )); -DESCR("remove object fields with null values from json"); - -DATA(insert OID = 3947 ( json_object_field PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 114 "114 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ json_object_field _null_ _null_ _null_ )); -DATA(insert OID = 3948 ( json_object_field_text PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "114 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ json_object_field_text _null_ _null_ _null_ )); -DATA(insert OID = 3949 ( json_array_element PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 114 "114 23" _null_ _null_ "{from_json, element_index}" _null_ _null_ json_array_element _null_ _null_ _null_ )); -DATA(insert OID = 3950 ( json_array_element_text PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "114 23" _null_ _null_ "{from_json, element_index}" _null_ _null_ json_array_element_text _null_ _null_ _null_ )); -DATA(insert OID = 3951 ( json_extract_path PGNSP PGUID 12 1 0 25 0 f f f f t f i s 2 0 114 "114 1009" "{114,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ json_extract_path _null_ _null_ _null_ )); -DESCR("get value from json with path elements"); -DATA(insert OID = 3953 ( json_extract_path_text PGNSP PGUID 12 1 0 25 0 f f f f t f i s 2 0 25 "114 1009" "{114,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ json_extract_path_text _null_ _null_ _null_ )); -DESCR("get value from json as text with path elements"); -DATA(insert OID = 3955 ( json_array_elements PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 114 "114" "{114,114}" "{i,o}" "{from_json,value}" _null_ _null_ json_array_elements _null_ _null_ _null_ )); -DESCR("key value pairs of a json object"); -DATA(insert OID = 3969 ( json_array_elements_text PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 25 "114" "{114,25}" "{i,o}" "{from_json,value}" _null_ _null_ json_array_elements_text _null_ _null_ _null_ )); -DESCR("elements of json array"); -DATA(insert OID = 3956 ( json_array_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "114" _null_ _null_ _null_ _null_ _null_ json_array_length _null_ _null_ _null_ )); -DESCR("length of json array"); -DATA(insert OID = 3957 ( json_object_keys PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 25 "114" _null_ _null_ _null_ _null_ _null_ json_object_keys _null_ _null_ _null_ )); -DESCR("get json object keys"); -DATA(insert OID = 3958 ( json_each PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 2249 "114" "{114,25,114}" "{i,o,o}" "{from_json,key,value}" _null_ _null_ json_each _null_ _null_ _null_ )); -DESCR("key value pairs of a json object"); -DATA(insert OID = 3959 ( json_each_text PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 2249 "114" "{114,25,25}" "{i,o,o}" "{from_json,key,value}" _null_ _null_ json_each_text _null_ _null_ _null_ )); -DESCR("key value pairs of a json object"); -DATA(insert OID = 3960 ( json_populate_record PGNSP PGUID 12 1 0 0 0 f f f f f f s s 3 0 2283 "2283 114 16" _null_ _null_ _null_ _null_ _null_ json_populate_record _null_ _null_ _null_ )); -DESCR("get record fields from a json object"); -DATA(insert OID = 3961 ( json_populate_recordset PGNSP PGUID 12 1 100 0 0 f f f f f t s s 3 0 2283 "2283 114 16" _null_ _null_ _null_ _null_ _null_ json_populate_recordset _null_ _null_ _null_ )); -DESCR("get set of records with fields from a json array of objects"); -DATA(insert OID = 3204 ( json_to_record PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2249 "114" _null_ _null_ _null_ _null_ _null_ json_to_record _null_ _null_ _null_ )); -DESCR("get record fields from a json object"); -DATA(insert OID = 3205 ( json_to_recordset PGNSP PGUID 12 1 100 0 0 f f f f f t s s 1 0 2249 "114" _null_ _null_ _null_ _null_ _null_ json_to_recordset _null_ _null_ _null_ )); -DESCR("get set of records with fields from a json array of objects"); -DATA(insert OID = 3968 ( json_typeof PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "114" _null_ _null_ _null_ _null_ _null_ json_typeof _null_ _null_ _null_ )); -DESCR("get the type of a json value"); - -/* uuid */ -DATA(insert OID = 2952 ( uuid_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2950 "2275" _null_ _null_ _null_ _null_ _null_ uuid_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2953 ( uuid_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "2950" _null_ _null_ _null_ _null_ _null_ uuid_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2954 ( uuid_lt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "2950 2950" _null_ _null_ _null_ _null_ _null_ uuid_lt _null_ _null_ _null_ )); -DATA(insert OID = 2955 ( uuid_le PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "2950 2950" _null_ _null_ _null_ _null_ _null_ uuid_le _null_ _null_ _null_ )); -DATA(insert OID = 2956 ( uuid_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "2950 2950" _null_ _null_ _null_ _null_ _null_ uuid_eq _null_ _null_ _null_ )); -DATA(insert OID = 2957 ( uuid_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "2950 2950" _null_ _null_ _null_ _null_ _null_ uuid_ge _null_ _null_ _null_ )); -DATA(insert OID = 2958 ( uuid_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "2950 2950" _null_ _null_ _null_ _null_ _null_ uuid_gt _null_ _null_ _null_ )); -DATA(insert OID = 2959 ( uuid_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "2950 2950" _null_ _null_ _null_ _null_ _null_ uuid_ne _null_ _null_ _null_ )); -DATA(insert OID = 2960 ( uuid_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "2950 2950" _null_ _null_ _null_ _null_ _null_ uuid_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3300 ( uuid_sortsupport PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ uuid_sortsupport _null_ _null_ _null_ )); -DESCR("sort support"); -DATA(insert OID = 2961 ( uuid_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2950 "2281" _null_ _null_ _null_ _null_ _null_ uuid_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2962 ( uuid_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2950" _null_ _null_ _null_ _null_ _null_ uuid_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2963 ( uuid_hash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "2950" _null_ _null_ _null_ _null_ _null_ uuid_hash _null_ _null_ _null_ )); -DESCR("hash"); - -/* pg_lsn */ -DATA(insert OID = 3229 ( pg_lsn_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3220 "2275" _null_ _null_ _null_ _null_ _null_ pg_lsn_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3230 ( pg_lsn_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3220" _null_ _null_ _null_ _null_ _null_ pg_lsn_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3231 ( pg_lsn_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3220 3220" _null_ _null_ _null_ _null_ _null_ pg_lsn_lt _null_ _null_ _null_ )); -DATA(insert OID = 3232 ( pg_lsn_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3220 3220" _null_ _null_ _null_ _null_ _null_ pg_lsn_le _null_ _null_ _null_ )); -DATA(insert OID = 3233 ( pg_lsn_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3220 3220" _null_ _null_ _null_ _null_ _null_ pg_lsn_eq _null_ _null_ _null_ )); -DATA(insert OID = 3234 ( pg_lsn_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3220 3220" _null_ _null_ _null_ _null_ _null_ pg_lsn_ge _null_ _null_ _null_ )); -DATA(insert OID = 3235 ( pg_lsn_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3220 3220" _null_ _null_ _null_ _null_ _null_ pg_lsn_gt _null_ _null_ _null_ )); -DATA(insert OID = 3236 ( pg_lsn_ne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3220 3220" _null_ _null_ _null_ _null_ _null_ pg_lsn_ne _null_ _null_ _null_ )); -DATA(insert OID = 3237 ( pg_lsn_mi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1700 "3220 3220" _null_ _null_ _null_ _null_ _null_ pg_lsn_mi _null_ _null_ _null_ )); -DATA(insert OID = 3238 ( pg_lsn_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3220 "2281" _null_ _null_ _null_ _null_ _null_ pg_lsn_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3239 ( pg_lsn_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3220" _null_ _null_ _null_ _null_ _null_ pg_lsn_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3251 ( pg_lsn_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "3220 3220" _null_ _null_ _null_ _null_ _null_ pg_lsn_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3252 ( pg_lsn_hash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "3220" _null_ _null_ _null_ _null_ _null_ pg_lsn_hash _null_ _null_ _null_ )); -DESCR("hash"); - -/* enum related procs */ -DATA(insert OID = 3504 ( anyenum_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3500 "2275" _null_ _null_ _null_ _null_ _null_ anyenum_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3505 ( anyenum_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "3500" _null_ _null_ _null_ _null_ _null_ anyenum_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3506 ( enum_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 3500 "2275 26" _null_ _null_ _null_ _null_ _null_ enum_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3507 ( enum_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "3500" _null_ _null_ _null_ _null_ _null_ enum_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3508 ( enum_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3500 3500" _null_ _null_ _null_ _null_ _null_ enum_eq _null_ _null_ _null_ )); -DATA(insert OID = 3509 ( enum_ne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3500 3500" _null_ _null_ _null_ _null_ _null_ enum_ne _null_ _null_ _null_ )); -DATA(insert OID = 3510 ( enum_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3500 3500" _null_ _null_ _null_ _null_ _null_ enum_lt _null_ _null_ _null_ )); -DATA(insert OID = 3511 ( enum_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3500 3500" _null_ _null_ _null_ _null_ _null_ enum_gt _null_ _null_ _null_ )); -DATA(insert OID = 3512 ( enum_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3500 3500" _null_ _null_ _null_ _null_ _null_ enum_le _null_ _null_ _null_ )); -DATA(insert OID = 3513 ( enum_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3500 3500" _null_ _null_ _null_ _null_ _null_ enum_ge _null_ _null_ _null_ )); -DATA(insert OID = 3514 ( enum_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "3500 3500" _null_ _null_ _null_ _null_ _null_ enum_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3515 ( hashenum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "3500" _null_ _null_ _null_ _null_ _null_ hashenum _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 3524 ( enum_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3500 "3500 3500" _null_ _null_ _null_ _null_ _null_ enum_smaller _null_ _null_ _null_ )); -DESCR("smaller of two"); -DATA(insert OID = 3525 ( enum_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3500 "3500 3500" _null_ _null_ _null_ _null_ _null_ enum_larger _null_ _null_ _null_ )); -DESCR("larger of two"); -DATA(insert OID = 3526 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 3500 "3500" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("maximum value of all enum input values"); -DATA(insert OID = 3527 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 3500 "3500" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("minimum value of all enum input values"); -DATA(insert OID = 3528 ( enum_first PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 3500 "3500" _null_ _null_ _null_ _null_ _null_ enum_first _null_ _null_ _null_ )); -DESCR("first value of the input enum type"); -DATA(insert OID = 3529 ( enum_last PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 3500 "3500" _null_ _null_ _null_ _null_ _null_ enum_last _null_ _null_ _null_ )); -DESCR("last value of the input enum type"); -DATA(insert OID = 3530 ( enum_range PGNSP PGUID 12 1 0 0 0 f f f f f f s s 2 0 2277 "3500 3500" _null_ _null_ _null_ _null_ _null_ enum_range_bounds _null_ _null_ _null_ )); -DESCR("range between the two given enum values, as an ordered array"); -DATA(insert OID = 3531 ( enum_range PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 2277 "3500" _null_ _null_ _null_ _null_ _null_ enum_range_all _null_ _null_ _null_ )); -DESCR("range of the given enum type, as an ordered array"); -DATA(insert OID = 3532 ( enum_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 3500 "2281 26" _null_ _null_ _null_ _null_ _null_ enum_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3533 ( enum_send PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "3500" _null_ _null_ _null_ _null_ _null_ enum_send _null_ _null_ _null_ )); -DESCR("I/O"); - -/* text search stuff */ -DATA(insert OID = 3610 ( tsvectorin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3614 "2275" _null_ _null_ _null_ _null_ _null_ tsvectorin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3639 ( tsvectorrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3614 "2281" _null_ _null_ _null_ _null_ _null_ tsvectorrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3611 ( tsvectorout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3614" _null_ _null_ _null_ _null_ _null_ tsvectorout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3638 ( tsvectorsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3614" _null_ _null_ _null_ _null_ _null_ tsvectorsend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3612 ( tsqueryin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3615 "2275" _null_ _null_ _null_ _null_ _null_ tsqueryin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3641 ( tsqueryrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3615 "2281" _null_ _null_ _null_ _null_ _null_ tsqueryrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3613 ( tsqueryout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3615" _null_ _null_ _null_ _null_ _null_ tsqueryout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3640 ( tsquerysend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3615" _null_ _null_ _null_ _null_ _null_ tsquerysend _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3646 ( gtsvectorin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3642 "2275" _null_ _null_ _null_ _null_ _null_ gtsvectorin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3647 ( gtsvectorout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3642" _null_ _null_ _null_ _null_ _null_ gtsvectorout _null_ _null_ _null_ )); -DESCR("I/O"); - -DATA(insert OID = 3616 ( tsvector_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3614 3614" _null_ _null_ _null_ _null_ _null_ tsvector_lt _null_ _null_ _null_ )); -DATA(insert OID = 3617 ( tsvector_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3614 3614" _null_ _null_ _null_ _null_ _null_ tsvector_le _null_ _null_ _null_ )); -DATA(insert OID = 3618 ( tsvector_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3614 3614" _null_ _null_ _null_ _null_ _null_ tsvector_eq _null_ _null_ _null_ )); -DATA(insert OID = 3619 ( tsvector_ne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3614 3614" _null_ _null_ _null_ _null_ _null_ tsvector_ne _null_ _null_ _null_ )); -DATA(insert OID = 3620 ( tsvector_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3614 3614" _null_ _null_ _null_ _null_ _null_ tsvector_ge _null_ _null_ _null_ )); -DATA(insert OID = 3621 ( tsvector_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3614 3614" _null_ _null_ _null_ _null_ _null_ tsvector_gt _null_ _null_ _null_ )); -DATA(insert OID = 3622 ( tsvector_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "3614 3614" _null_ _null_ _null_ _null_ _null_ tsvector_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - -DATA(insert OID = 3711 ( length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "3614" _null_ _null_ _null_ _null_ _null_ tsvector_length _null_ _null_ _null_ )); -DESCR("number of lexemes"); -DATA(insert OID = 3623 ( strip PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3614 "3614" _null_ _null_ _null_ _null_ _null_ tsvector_strip _null_ _null_ _null_ )); -DESCR("strip position information"); -DATA(insert OID = 3624 ( setweight PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 18" _null_ _null_ _null_ _null_ _null_ tsvector_setweight _null_ _null_ _null_ )); -DESCR("set given weight for whole tsvector"); -DATA(insert OID = 3320 ( setweight PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 3614 "3614 18 1009" _null_ _null_ _null_ _null_ _null_ tsvector_setweight_by_filter _null_ _null_ _null_ )); -DESCR("set given weight for given lexemes"); -DATA(insert OID = 3625 ( tsvector_concat PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 3614" _null_ _null_ _null_ _null_ _null_ tsvector_concat _null_ _null_ _null_ )); -DATA(insert OID = 3321 ( ts_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 25" _null_ _null_ _null_ _null_ _null_ tsvector_delete_str _null_ _null_ _null_ )); -DESCR("delete lexeme"); -DATA(insert OID = 3323 ( ts_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 1009" _null_ _null_ _null_ _null_ _null_ tsvector_delete_arr _null_ _null_ _null_ )); -DESCR("delete given lexemes"); -DATA(insert OID = 3322 ( unnest PGNSP PGUID 12 1 10 0 0 f f f f t t i s 1 0 2249 "3614" "{3614,25,1005,1009}" "{i,o,o,o}" "{tsvector,lexeme,positions,weights}" _null_ _null_ tsvector_unnest _null_ _null_ _null_ )); -DESCR("expand tsvector to set of rows"); -DATA(insert OID = 3326 ( tsvector_to_array PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1009 "3614" _null_ _null_ _null_ _null_ _null_ tsvector_to_array _null_ _null_ _null_ )); -DESCR("convert tsvector to array of lexemes"); -DATA(insert OID = 3327 ( array_to_tsvector PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3614 "1009" _null_ _null_ _null_ _null_ _null_ array_to_tsvector _null_ _null_ _null_ )); -DESCR("build tsvector from array of lexemes"); -DATA(insert OID = 3319 ( ts_filter PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 1002" _null_ _null_ _null_ _null_ _null_ tsvector_filter _null_ _null_ _null_ )); -DESCR("delete lexemes that do not have one of the given weights"); - -DATA(insert OID = 3634 ( ts_match_vq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3614 3615" _null_ _null_ _null_ _null_ _null_ ts_match_vq _null_ _null_ _null_ )); -DATA(insert OID = 3635 ( ts_match_qv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3615 3614" _null_ _null_ _null_ _null_ _null_ ts_match_qv _null_ _null_ _null_ )); -DATA(insert OID = 3760 ( ts_match_tt PGNSP PGUID 12 100 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ ts_match_tt _null_ _null_ _null_ )); -DATA(insert OID = 3761 ( ts_match_tq PGNSP PGUID 12 100 0 0 0 f f f f t f s s 2 0 16 "25 3615" _null_ _null_ _null_ _null_ _null_ ts_match_tq _null_ _null_ _null_ )); - -DATA(insert OID = 3648 ( gtsvector_compress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gtsvector_compress _null_ _null_ _null_ )); -DESCR("GiST tsvector support"); -DATA(insert OID = 3649 ( gtsvector_decompress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gtsvector_decompress _null_ _null_ _null_ )); -DESCR("GiST tsvector support"); -DATA(insert OID = 3650 ( gtsvector_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ gtsvector_picksplit _null_ _null_ _null_ )); -DESCR("GiST tsvector support"); -DATA(insert OID = 3651 ( gtsvector_union PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3642 "2281 2281" _null_ _null_ _null_ _null_ _null_ gtsvector_union _null_ _null_ _null_ )); -DESCR("GiST tsvector support"); -DATA(insert OID = 3652 ( gtsvector_same PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "3642 3642 2281" _null_ _null_ _null_ _null_ _null_ gtsvector_same _null_ _null_ _null_ )); -DESCR("GiST tsvector support"); -DATA(insert OID = 3653 ( gtsvector_penalty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gtsvector_penalty _null_ _null_ _null_ )); -DESCR("GiST tsvector support"); -DATA(insert OID = 3654 ( gtsvector_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 3614 21 26 2281" _null_ _null_ _null_ _null_ _null_ gtsvector_consistent _null_ _null_ _null_ )); -DESCR("GiST tsvector support"); -DATA(insert OID = 3790 ( gtsvector_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 3642 23 26 2281" _null_ _null_ _null_ _null_ _null_ gtsvector_consistent_oldsig _null_ _null_ _null_ )); -DESCR("GiST tsvector support (obsolete)"); - -DATA(insert OID = 3656 ( gin_extract_tsvector PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "3614 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsvector _null_ _null_ _null_ )); -DESCR("GIN tsvector support"); -DATA(insert OID = 3657 ( gin_extract_tsquery PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 2281 "3614 2281 21 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsquery _null_ _null_ _null_ )); -DESCR("GIN tsvector support"); -DATA(insert OID = 3658 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 8 0 16 "2281 21 3614 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent _null_ _null_ _null_ )); -DESCR("GIN tsvector support"); -DATA(insert OID = 3921 ( gin_tsquery_triconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 18 "2281 21 3614 23 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_triconsistent _null_ _null_ _null_ )); -DESCR("GIN tsvector support"); -DATA(insert OID = 3724 ( gin_cmp_tslexeme PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "25 25" _null_ _null_ _null_ _null_ _null_ gin_cmp_tslexeme _null_ _null_ _null_ )); -DESCR("GIN tsvector support"); -DATA(insert OID = 2700 ( gin_cmp_prefix PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 23 "25 25 21 2281" _null_ _null_ _null_ _null_ _null_ gin_cmp_prefix _null_ _null_ _null_ )); -DESCR("GIN tsvector support"); -DATA(insert OID = 3077 ( gin_extract_tsvector PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "3614 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsvector_2args _null_ _null_ _null_ )); -DESCR("GIN tsvector support (obsolete)"); -DATA(insert OID = 3087 ( gin_extract_tsquery PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 2281 "3615 2281 21 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsquery_5args _null_ _null_ _null_ )); -DESCR("GIN tsvector support (obsolete)"); -DATA(insert OID = 3088 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 6 0 16 "2281 21 3615 23 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent_6args _null_ _null_ _null_ )); -DESCR("GIN tsvector support (obsolete)"); -DATA(insert OID = 3791 ( gin_extract_tsquery PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 2281 "3615 2281 21 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsquery_oldsig _null_ _null_ _null_ )); -DESCR("GIN tsvector support (obsolete)"); -DATA(insert OID = 3792 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 8 0 16 "2281 21 3615 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent_oldsig _null_ _null_ _null_ )); -DESCR("GIN tsvector support (obsolete)"); - -DATA(insert OID = 3789 ( gin_clean_pending_list PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ gin_clean_pending_list _null_ _null_ _null_ )); -DESCR("clean up GIN pending list"); - -DATA(insert OID = 3662 ( tsquery_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsquery_lt _null_ _null_ _null_ )); -DATA(insert OID = 3663 ( tsquery_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsquery_le _null_ _null_ _null_ )); -DATA(insert OID = 3664 ( tsquery_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsquery_eq _null_ _null_ _null_ )); -DATA(insert OID = 3665 ( tsquery_ne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsquery_ne _null_ _null_ _null_ )); -DATA(insert OID = 3666 ( tsquery_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsquery_ge _null_ _null_ _null_ )); -DATA(insert OID = 3667 ( tsquery_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsquery_gt _null_ _null_ _null_ )); -DATA(insert OID = 3668 ( tsquery_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsquery_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - -DATA(insert OID = 3669 ( tsquery_and PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3615 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsquery_and _null_ _null_ _null_ )); -DATA(insert OID = 3670 ( tsquery_or PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3615 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsquery_or _null_ _null_ _null_ )); -DATA(insert OID = 5003 ( tsquery_phrase PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3615 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsquery_phrase _null_ _null_ _null_ )); -DATA(insert OID = 5004 ( tsquery_phrase PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 3615 "3615 3615 23" _null_ _null_ _null_ _null_ _null_ tsquery_phrase_distance _null_ _null_ _null_ )); -DESCR("phrase-concatenate with distance"); -DATA(insert OID = 3671 ( tsquery_not PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3615 "3615" _null_ _null_ _null_ _null_ _null_ tsquery_not _null_ _null_ _null_ )); - -DATA(insert OID = 3691 ( tsq_mcontains PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsq_mcontains _null_ _null_ _null_ )); -DATA(insert OID = 3692 ( tsq_mcontained PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsq_mcontained _null_ _null_ _null_ )); - -DATA(insert OID = 3672 ( numnode PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "3615" _null_ _null_ _null_ _null_ _null_ tsquery_numnode _null_ _null_ _null_ )); -DESCR("number of nodes"); -DATA(insert OID = 3673 ( querytree PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "3615" _null_ _null_ _null_ _null_ _null_ tsquerytree _null_ _null_ _null_ )); -DESCR("show real useful query for GiST index"); - -DATA(insert OID = 3684 ( ts_rewrite PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 3615 "3615 3615 3615" _null_ _null_ _null_ _null_ _null_ tsquery_rewrite _null_ _null_ _null_ )); -DESCR("rewrite tsquery"); -DATA(insert OID = 3685 ( ts_rewrite PGNSP PGUID 12 100 0 0 0 f f f f t f v s 2 0 3615 "3615 25" _null_ _null_ _null_ _null_ _null_ tsquery_rewrite_query _null_ _null_ _null_ )); -DESCR("rewrite tsquery"); - -DATA(insert OID = 3695 ( gtsquery_compress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gtsquery_compress _null_ _null_ _null_ )); -DESCR("GiST tsquery support"); -DATA(insert OID = 3696 ( gtsquery_decompress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gtsquery_decompress _null_ _null_ _null_ )); -DESCR("GiST tsquery support"); -DATA(insert OID = 3697 ( gtsquery_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ gtsquery_picksplit _null_ _null_ _null_ )); -DESCR("GiST tsquery support"); -DATA(insert OID = 3698 ( gtsquery_union PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 20 "2281 2281" _null_ _null_ _null_ _null_ _null_ gtsquery_union _null_ _null_ _null_ )); -DESCR("GiST tsquery support"); -DATA(insert OID = 3699 ( gtsquery_same PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "20 20 2281" _null_ _null_ _null_ _null_ _null_ gtsquery_same _null_ _null_ _null_ )); -DESCR("GiST tsquery support"); -DATA(insert OID = 3700 ( gtsquery_penalty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gtsquery_penalty _null_ _null_ _null_ )); -DESCR("GiST tsquery support"); -DATA(insert OID = 3701 ( gtsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 3615 21 26 2281" _null_ _null_ _null_ _null_ _null_ gtsquery_consistent _null_ _null_ _null_ )); -DESCR("GiST tsquery support"); -DATA(insert OID = 3793 ( gtsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 2281 23 26 2281" _null_ _null_ _null_ _null_ _null_ gtsquery_consistent_oldsig _null_ _null_ _null_ )); -DESCR("GiST tsquery support (obsolete)"); - -DATA(insert OID = 3686 ( tsmatchsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ tsmatchsel _null_ _null_ _null_ )); -DESCR("restriction selectivity of tsvector @@ tsquery"); -DATA(insert OID = 3687 ( tsmatchjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ tsmatchjoinsel _null_ _null_ _null_ )); -DESCR("join selectivity of tsvector @@ tsquery"); -DATA(insert OID = 3688 ( ts_typanalyze PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 16 "2281" _null_ _null_ _null_ _null_ _null_ ts_typanalyze _null_ _null_ _null_ )); -DESCR("tsvector typanalyze"); - -DATA(insert OID = 3689 ( ts_stat PGNSP PGUID 12 10 10000 0 0 f f f f t t v s 1 0 2249 "25" "{25,25,23,23}" "{i,o,o,o}" "{query,word,ndoc,nentry}" _null_ _null_ ts_stat1 _null_ _null_ _null_ )); -DESCR("statistics of tsvector column"); -DATA(insert OID = 3690 ( ts_stat PGNSP PGUID 12 10 10000 0 0 f f f f t t v s 2 0 2249 "25 25" "{25,25,25,23,23}" "{i,i,o,o,o}" "{query,weights,word,ndoc,nentry}" _null_ _null_ ts_stat2 _null_ _null_ _null_ )); -DESCR("statistics of tsvector column"); - -DATA(insert OID = 3703 ( ts_rank PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 700 "1021 3614 3615 23" _null_ _null_ _null_ _null_ _null_ ts_rank_wttf _null_ _null_ _null_ )); -DESCR("relevance"); -DATA(insert OID = 3704 ( ts_rank PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 700 "1021 3614 3615" _null_ _null_ _null_ _null_ _null_ ts_rank_wtt _null_ _null_ _null_ )); -DESCR("relevance"); -DATA(insert OID = 3705 ( ts_rank PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 700 "3614 3615 23" _null_ _null_ _null_ _null_ _null_ ts_rank_ttf _null_ _null_ _null_ )); -DESCR("relevance"); -DATA(insert OID = 3706 ( ts_rank PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 700 "3614 3615" _null_ _null_ _null_ _null_ _null_ ts_rank_tt _null_ _null_ _null_ )); -DESCR("relevance"); -DATA(insert OID = 3707 ( ts_rank_cd PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 700 "1021 3614 3615 23" _null_ _null_ _null_ _null_ _null_ ts_rankcd_wttf _null_ _null_ _null_ )); -DESCR("relevance"); -DATA(insert OID = 3708 ( ts_rank_cd PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 700 "1021 3614 3615" _null_ _null_ _null_ _null_ _null_ ts_rankcd_wtt _null_ _null_ _null_ )); -DESCR("relevance"); -DATA(insert OID = 3709 ( ts_rank_cd PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 700 "3614 3615 23" _null_ _null_ _null_ _null_ _null_ ts_rankcd_ttf _null_ _null_ _null_ )); -DESCR("relevance"); -DATA(insert OID = 3710 ( ts_rank_cd PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 700 "3614 3615" _null_ _null_ _null_ _null_ _null_ ts_rankcd_tt _null_ _null_ _null_ )); -DESCR("relevance"); - -DATA(insert OID = 3713 ( ts_token_type PGNSP PGUID 12 1 16 0 0 f f f f t t i s 1 0 2249 "26" "{26,23,25,25}" "{i,o,o,o}" "{parser_oid,tokid,alias,description}" _null_ _null_ ts_token_type_byid _null_ _null_ _null_ )); -DESCR("get parser's token types"); -DATA(insert OID = 3714 ( ts_token_type PGNSP PGUID 12 1 16 0 0 f f f f t t s s 1 0 2249 "25" "{25,23,25,25}" "{i,o,o,o}" "{parser_name,tokid,alias,description}" _null_ _null_ ts_token_type_byname _null_ _null_ _null_ )); -DESCR("get parser's token types"); -DATA(insert OID = 3715 ( ts_parse PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 2 0 2249 "26 25" "{26,25,23,25}" "{i,i,o,o}" "{parser_oid,txt,tokid,token}" _null_ _null_ ts_parse_byid _null_ _null_ _null_ )); -DESCR("parse text to tokens"); -DATA(insert OID = 3716 ( ts_parse PGNSP PGUID 12 1 1000 0 0 f f f f t t s s 2 0 2249 "25 25" "{25,25,23,25}" "{i,i,o,o}" "{parser_name,txt,tokid,token}" _null_ _null_ ts_parse_byname _null_ _null_ _null_ )); -DESCR("parse text to tokens"); - -DATA(insert OID = 3717 ( prsd_start PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "2281 23" _null_ _null_ _null_ _null_ _null_ prsd_start _null_ _null_ _null_ )); -DESCR("(internal)"); -DATA(insert OID = 3718 ( prsd_nexttoken PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ prsd_nexttoken _null_ _null_ _null_ )); -DESCR("(internal)"); -DATA(insert OID = 3719 ( prsd_end PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ prsd_end _null_ _null_ _null_ )); -DESCR("(internal)"); -DATA(insert OID = 3720 ( prsd_headline PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "2281 2281 3615" _null_ _null_ _null_ _null_ _null_ prsd_headline _null_ _null_ _null_ )); -DESCR("(internal)"); -DATA(insert OID = 3721 ( prsd_lextype PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ prsd_lextype _null_ _null_ _null_ )); -DESCR("(internal)"); - -DATA(insert OID = 3723 ( ts_lexize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1009 "3769 25" _null_ _null_ _null_ _null_ _null_ ts_lexize _null_ _null_ _null_ )); -DESCR("normalize one word by dictionary"); - -DATA(insert OID = 3725 ( dsimple_init PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ dsimple_init _null_ _null_ _null_ )); -DESCR("(internal)"); -DATA(insert OID = 3726 ( dsimple_lexize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 2281 "2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ dsimple_lexize _null_ _null_ _null_ )); -DESCR("(internal)"); - -DATA(insert OID = 3728 ( dsynonym_init PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ dsynonym_init _null_ _null_ _null_ )); -DESCR("(internal)"); -DATA(insert OID = 3729 ( dsynonym_lexize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 2281 "2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ dsynonym_lexize _null_ _null_ _null_ )); -DESCR("(internal)"); - -DATA(insert OID = 3731 ( dispell_init PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ dispell_init _null_ _null_ _null_ )); -DESCR("(internal)"); -DATA(insert OID = 3732 ( dispell_lexize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 2281 "2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ dispell_lexize _null_ _null_ _null_ )); -DESCR("(internal)"); - -DATA(insert OID = 3740 ( thesaurus_init PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ thesaurus_init _null_ _null_ _null_ )); -DESCR("(internal)"); -DATA(insert OID = 3741 ( thesaurus_lexize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 2281 "2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ thesaurus_lexize _null_ _null_ _null_ )); -DESCR("(internal)"); - -DATA(insert OID = 3743 ( ts_headline PGNSP PGUID 12 100 0 0 0 f f f f t f i s 4 0 25 "3734 25 3615 25" _null_ _null_ _null_ _null_ _null_ ts_headline_byid_opt _null_ _null_ _null_ )); -DESCR("generate headline"); -DATA(insert OID = 3744 ( ts_headline PGNSP PGUID 12 100 0 0 0 f f f f t f i s 3 0 25 "3734 25 3615" _null_ _null_ _null_ _null_ _null_ ts_headline_byid _null_ _null_ _null_ )); -DESCR("generate headline"); -DATA(insert OID = 3754 ( ts_headline PGNSP PGUID 12 100 0 0 0 f f f f t f s s 3 0 25 "25 3615 25" _null_ _null_ _null_ _null_ _null_ ts_headline_opt _null_ _null_ _null_ )); -DESCR("generate headline"); -DATA(insert OID = 3755 ( ts_headline PGNSP PGUID 12 100 0 0 0 f f f f t f s s 2 0 25 "25 3615" _null_ _null_ _null_ _null_ _null_ ts_headline _null_ _null_ _null_ )); -DESCR("generate headline"); - -DATA(insert OID = 4201 ( ts_headline PGNSP PGUID 12 100 0 0 0 f f f f t f i s 4 0 3802 "3734 3802 3615 25" _null_ _null_ _null_ _null_ _null_ ts_headline_jsonb_byid_opt _null_ _null_ _null_ )); -DESCR("generate headline from jsonb"); -DATA(insert OID = 4202 ( ts_headline PGNSP PGUID 12 100 0 0 0 f f f f t f i s 3 0 3802 "3734 3802 3615" _null_ _null_ _null_ _null_ _null_ ts_headline_jsonb_byid _null_ _null_ _null_ )); -DESCR("generate headline from jsonb"); -DATA(insert OID = 4203 ( ts_headline PGNSP PGUID 12 100 0 0 0 f f f f t f s s 3 0 3802 "3802 3615 25" _null_ _null_ _null_ _null_ _null_ ts_headline_jsonb_opt _null_ _null_ _null_ )); -DESCR("generate headline from jsonb"); -DATA(insert OID = 4204 ( ts_headline PGNSP PGUID 12 100 0 0 0 f f f f t f s s 2 0 3802 "3802 3615" _null_ _null_ _null_ _null_ _null_ ts_headline_jsonb _null_ _null_ _null_ )); -DESCR("generate headline from jsonb"); - -DATA(insert OID = 4205 ( ts_headline PGNSP PGUID 12 100 0 0 0 f f f f t f i s 4 0 114 "3734 114 3615 25" _null_ _null_ _null_ _null_ _null_ ts_headline_json_byid_opt _null_ _null_ _null_ )); -DESCR("generate headline from json"); -DATA(insert OID = 4206 ( ts_headline PGNSP PGUID 12 100 0 0 0 f f f f t f i s 3 0 114 "3734 114 3615" _null_ _null_ _null_ _null_ _null_ ts_headline_json_byid _null_ _null_ _null_ )); -DESCR("generate headline from json"); -DATA(insert OID = 4207 ( ts_headline PGNSP PGUID 12 100 0 0 0 f f f f t f s s 3 0 114 "114 3615 25" _null_ _null_ _null_ _null_ _null_ ts_headline_json_opt _null_ _null_ _null_ )); -DESCR("generate headline from json"); -DATA(insert OID = 4208 ( ts_headline PGNSP PGUID 12 100 0 0 0 f f f f t f s s 2 0 114 "114 3615" _null_ _null_ _null_ _null_ _null_ ts_headline_json _null_ _null_ _null_ )); -DESCR("generate headline from json"); - -DATA(insert OID = 3745 ( to_tsvector PGNSP PGUID 12 100 0 0 0 f f f f t f i s 2 0 3614 "3734 25" _null_ _null_ _null_ _null_ _null_ to_tsvector_byid _null_ _null_ _null_ )); -DESCR("transform to tsvector"); -DATA(insert OID = 3746 ( to_tsquery PGNSP PGUID 12 100 0 0 0 f f f f t f i s 2 0 3615 "3734 25" _null_ _null_ _null_ _null_ _null_ to_tsquery_byid _null_ _null_ _null_ )); -DESCR("make tsquery"); -DATA(insert OID = 3747 ( plainto_tsquery PGNSP PGUID 12 100 0 0 0 f f f f t f i s 2 0 3615 "3734 25" _null_ _null_ _null_ _null_ _null_ plainto_tsquery_byid _null_ _null_ _null_ )); -DESCR("transform to tsquery"); -DATA(insert OID = 5006 ( phraseto_tsquery PGNSP PGUID 12 100 0 0 0 f f f f t f i s 2 0 3615 "3734 25" _null_ _null_ _null_ _null_ _null_ phraseto_tsquery_byid _null_ _null_ _null_ )); -DESCR("transform to tsquery"); -DATA(insert OID = 3749 ( to_tsvector PGNSP PGUID 12 100 0 0 0 f f f f t f s s 1 0 3614 "25" _null_ _null_ _null_ _null_ _null_ to_tsvector _null_ _null_ _null_ )); -DESCR("transform to tsvector"); -DATA(insert OID = 3750 ( to_tsquery PGNSP PGUID 12 100 0 0 0 f f f f t f s s 1 0 3615 "25" _null_ _null_ _null_ _null_ _null_ to_tsquery _null_ _null_ _null_ )); -DESCR("make tsquery"); -DATA(insert OID = 3751 ( plainto_tsquery PGNSP PGUID 12 100 0 0 0 f f f f t f s s 1 0 3615 "25" _null_ _null_ _null_ _null_ _null_ plainto_tsquery _null_ _null_ _null_ )); -DESCR("transform to tsquery"); -DATA(insert OID = 5001 ( phraseto_tsquery PGNSP PGUID 12 100 0 0 0 f f f f t f s s 1 0 3615 "25" _null_ _null_ _null_ _null_ _null_ phraseto_tsquery _null_ _null_ _null_ )); -DESCR("transform to tsquery"); -DATA(insert OID = 4209 ( to_tsvector PGNSP PGUID 12 100 0 0 0 f f f f t f s s 1 0 3614 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_to_tsvector _null_ _null_ _null_ )); -DESCR("transform jsonb to tsvector"); -DATA(insert OID = 4210 ( to_tsvector PGNSP PGUID 12 100 0 0 0 f f f f t f s s 1 0 3614 "114" _null_ _null_ _null_ _null_ _null_ json_to_tsvector _null_ _null_ _null_ )); -DESCR("transform json to tsvector"); -DATA(insert OID = 4211 ( to_tsvector PGNSP PGUID 12 100 0 0 0 f f f f t f i s 2 0 3614 "3734 3802" _null_ _null_ _null_ _null_ _null_ jsonb_to_tsvector_byid _null_ _null_ _null_ )); -DESCR("transform jsonb to tsvector"); -DATA(insert OID = 4212 ( to_tsvector PGNSP PGUID 12 100 0 0 0 f f f f t f i s 2 0 3614 "3734 114" _null_ _null_ _null_ _null_ _null_ json_to_tsvector_byid _null_ _null_ _null_ )); -DESCR("transform json to tsvector"); - -DATA(insert OID = 3752 ( tsvector_update_trigger PGNSP PGUID 12 1 0 0 0 f f f f f f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ tsvector_update_trigger_byid _null_ _null_ _null_ )); -DESCR("trigger for automatic update of tsvector column"); -DATA(insert OID = 3753 ( tsvector_update_trigger_column PGNSP PGUID 12 1 0 0 0 f f f f f f v s 0 0 2279 "" _null_ _null_ _null_ _null_ _null_ tsvector_update_trigger_bycolumn _null_ _null_ _null_ )); -DESCR("trigger for automatic update of tsvector column"); - -DATA(insert OID = 3759 ( get_current_ts_config PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 3734 "" _null_ _null_ _null_ _null_ _null_ get_current_ts_config _null_ _null_ _null_ )); -DESCR("get current tsearch configuration"); - -DATA(insert OID = 3736 ( regconfigin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 3734 "2275" _null_ _null_ _null_ _null_ _null_ regconfigin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3737 ( regconfigout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "3734" _null_ _null_ _null_ _null_ _null_ regconfigout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3738 ( regconfigrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3734 "2281" _null_ _null_ _null_ _null_ _null_ regconfigrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3739 ( regconfigsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3734" _null_ _null_ _null_ _null_ _null_ regconfigsend _null_ _null_ _null_ )); -DESCR("I/O"); - -DATA(insert OID = 3771 ( regdictionaryin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 3769 "2275" _null_ _null_ _null_ _null_ _null_ regdictionaryin _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3772 ( regdictionaryout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "3769" _null_ _null_ _null_ _null_ _null_ regdictionaryout _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3773 ( regdictionaryrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3769 "2281" _null_ _null_ _null_ _null_ _null_ regdictionaryrecv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3774 ( regdictionarysend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3769" _null_ _null_ _null_ _null_ _null_ regdictionarysend _null_ _null_ _null_ )); -DESCR("I/O"); - -/* jsonb */ -DATA(insert OID = 3806 ( jsonb_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3802 "2275" _null_ _null_ _null_ _null_ _null_ jsonb_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3805 ( jsonb_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3802 "2281" _null_ _null_ _null_ _null_ _null_ jsonb_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3804 ( jsonb_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3803 ( jsonb_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_send _null_ _null_ _null_ )); -DESCR("I/O"); - -DATA(insert OID = 3263 ( jsonb_object PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3802 "1009" _null_ _null_ _null_ _null_ _null_ jsonb_object _null_ _null_ _null_ )); -DESCR("map text array of key value pairs to jsonb object"); -DATA(insert OID = 3264 ( jsonb_object PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3802 "1009 1009" _null_ _null_ _null_ _null_ _null_ jsonb_object_two_arg _null_ _null_ _null_ )); -DESCR("map text array of key value pairs to jsonb object"); -DATA(insert OID = 3787 ( to_jsonb PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 3802 "2283" _null_ _null_ _null_ _null_ _null_ to_jsonb _null_ _null_ _null_ )); -DESCR("map input to jsonb"); -DATA(insert OID = 3265 ( jsonb_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f s s 2 0 2281 "2281 2283" _null_ _null_ _null_ _null_ _null_ jsonb_agg_transfn _null_ _null_ _null_ )); -DESCR("jsonb aggregate transition function"); -DATA(insert OID = 3266 ( jsonb_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 3802 "2281" _null_ _null_ _null_ _null_ _null_ jsonb_agg_finalfn _null_ _null_ _null_ )); -DESCR("jsonb aggregate final function"); -DATA(insert OID = 3267 ( jsonb_agg PGNSP PGUID 12 1 0 0 0 t f f f f f s s 1 0 3802 "2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("aggregate input into jsonb"); -DATA(insert OID = 3268 ( jsonb_object_agg_transfn PGNSP PGUID 12 1 0 0 0 f f f f f f s s 3 0 2281 "2281 2276 2276" _null_ _null_ _null_ _null_ _null_ jsonb_object_agg_transfn _null_ _null_ _null_ )); -DESCR("jsonb object aggregate transition function"); -DATA(insert OID = 3269 ( jsonb_object_agg_finalfn PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 3802 "2281" _null_ _null_ _null_ _null_ _null_ jsonb_object_agg_finalfn _null_ _null_ _null_ )); -DESCR("jsonb object aggregate final function"); -DATA(insert OID = 3270 ( jsonb_object_agg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 3802 "2276 2276" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("aggregate inputs into jsonb object"); -DATA(insert OID = 3271 ( jsonb_build_array PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_array _null_ _null_ _null_ )); -DESCR("build a jsonb array from any inputs"); -DATA(insert OID = 3272 ( jsonb_build_array PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 3802 "" _null_ _null_ _null_ _null_ _null_ jsonb_build_array_noargs _null_ _null_ _null_ )); -DESCR("build an empty jsonb array"); -DATA(insert OID = 3273 ( jsonb_build_object PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_object _null_ _null_ _null_ )); -DESCR("build a jsonb object from pairwise key/value inputs"); -DATA(insert OID = 3274 ( jsonb_build_object PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 3802 "" _null_ _null_ _null_ _null_ _null_ jsonb_build_object_noargs _null_ _null_ _null_ )); -DESCR("build an empty jsonb object"); -DATA(insert OID = 3262 ( jsonb_strip_nulls PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3802 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_strip_nulls _null_ _null_ _null_ )); -DESCR("remove object fields with null values from jsonb"); - -DATA(insert OID = 3478 ( jsonb_object_field PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3802 "3802 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ jsonb_object_field _null_ _null_ _null_ )); -DATA(insert OID = 3214 ( jsonb_object_field_text PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "3802 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ jsonb_object_field_text _null_ _null_ _null_ )); -DATA(insert OID = 3215 ( jsonb_array_element PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3802 "3802 23" _null_ _null_ "{from_json, element_index}" _null_ _null_ jsonb_array_element _null_ _null_ _null_ )); -DATA(insert OID = 3216 ( jsonb_array_element_text PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "3802 23" _null_ _null_ "{from_json, element_index}" _null_ _null_ jsonb_array_element_text _null_ _null_ _null_ )); -DATA(insert OID = 3217 ( jsonb_extract_path PGNSP PGUID 12 1 0 25 0 f f f f t f i s 2 0 3802 "3802 1009" "{3802,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ jsonb_extract_path _null_ _null_ _null_ )); -DESCR("get value from jsonb with path elements"); -DATA(insert OID = 3940 ( jsonb_extract_path_text PGNSP PGUID 12 1 0 25 0 f f f f t f i s 2 0 25 "3802 1009" "{3802,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ jsonb_extract_path_text _null_ _null_ _null_ )); -DESCR("get value from jsonb as text with path elements"); -DATA(insert OID = 3219 ( jsonb_array_elements PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 3802 "3802" "{3802,3802}" "{i,o}" "{from_json,value}" _null_ _null_ jsonb_array_elements _null_ _null_ _null_ )); -DESCR("elements of a jsonb array"); -DATA(insert OID = 3465 ( jsonb_array_elements_text PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 25 "3802" "{3802,25}" "{i,o}" "{from_json,value}" _null_ _null_ jsonb_array_elements_text _null_ _null_ _null_ )); -DESCR("elements of jsonb array"); -DATA(insert OID = 3207 ( jsonb_array_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_array_length _null_ _null_ _null_ )); -DESCR("length of jsonb array"); -DATA(insert OID = 3931 ( jsonb_object_keys PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 25 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_object_keys _null_ _null_ _null_ )); -DESCR("get jsonb object keys"); -DATA(insert OID = 3208 ( jsonb_each PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 2249 "3802" "{3802,25,3802}" "{i,o,o}" "{from_json,key,value}" _null_ _null_ jsonb_each _null_ _null_ _null_ )); -DESCR("key value pairs of a jsonb object"); -DATA(insert OID = 3932 ( jsonb_each_text PGNSP PGUID 12 1 100 0 0 f f f f t t i s 1 0 2249 "3802" "{3802,25,25}" "{i,o,o}" "{from_json,key,value}" _null_ _null_ jsonb_each_text _null_ _null_ _null_ )); -DESCR("key value pairs of a jsonb object"); -DATA(insert OID = 3209 ( jsonb_populate_record PGNSP PGUID 12 1 0 0 0 f f f f f f s s 2 0 2283 "2283 3802" _null_ _null_ _null_ _null_ _null_ jsonb_populate_record _null_ _null_ _null_ )); -DESCR("get record fields from a jsonb object"); -DATA(insert OID = 3475 ( jsonb_populate_recordset PGNSP PGUID 12 1 100 0 0 f f f f f t s s 2 0 2283 "2283 3802" _null_ _null_ _null_ _null_ _null_ jsonb_populate_recordset _null_ _null_ _null_ )); -DESCR("get set of records with fields from a jsonb array of objects"); -DATA(insert OID = 3490 ( jsonb_to_record PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2249 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_to_record _null_ _null_ _null_ )); -DESCR("get record fields from a jsonb object"); -DATA(insert OID = 3491 ( jsonb_to_recordset PGNSP PGUID 12 1 100 0 0 f f f f f t s s 1 0 2249 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_to_recordset _null_ _null_ _null_ )); -DESCR("get set of records with fields from a jsonb array of objects"); -DATA(insert OID = 3210 ( jsonb_typeof PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_typeof _null_ _null_ _null_ )); -DESCR("get the type of a jsonb value"); -DATA(insert OID = 4038 ( jsonb_ne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3802 3802" _null_ _null_ _null_ _null_ _null_ jsonb_ne _null_ _null_ _null_ )); -DATA(insert OID = 4039 ( jsonb_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3802 3802" _null_ _null_ _null_ _null_ _null_ jsonb_lt _null_ _null_ _null_ )); -DATA(insert OID = 4040 ( jsonb_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3802 3802" _null_ _null_ _null_ _null_ _null_ jsonb_gt _null_ _null_ _null_ )); -DATA(insert OID = 4041 ( jsonb_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3802 3802" _null_ _null_ _null_ _null_ _null_ jsonb_le _null_ _null_ _null_ )); -DATA(insert OID = 4042 ( jsonb_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3802 3802" _null_ _null_ _null_ _null_ _null_ jsonb_ge _null_ _null_ _null_ )); -DATA(insert OID = 4043 ( jsonb_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3802 3802" _null_ _null_ _null_ _null_ _null_ jsonb_eq _null_ _null_ _null_ )); -DATA(insert OID = 4044 ( jsonb_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "3802 3802" _null_ _null_ _null_ _null_ _null_ jsonb_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 4045 ( jsonb_hash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_hash _null_ _null_ _null_ )); -DESCR("hash"); -DATA(insert OID = 4046 ( jsonb_contains PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3802 3802" _null_ _null_ _null_ _null_ _null_ jsonb_contains _null_ _null_ _null_ )); -DATA(insert OID = 4047 ( jsonb_exists PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3802 25" _null_ _null_ _null_ _null_ _null_ jsonb_exists _null_ _null_ _null_ )); -DATA(insert OID = 4048 ( jsonb_exists_any PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3802 1009" _null_ _null_ _null_ _null_ _null_ jsonb_exists_any _null_ _null_ _null_ )); -DATA(insert OID = 4049 ( jsonb_exists_all PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3802 1009" _null_ _null_ _null_ _null_ _null_ jsonb_exists_all _null_ _null_ _null_ )); -DATA(insert OID = 4050 ( jsonb_contained PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3802 3802" _null_ _null_ _null_ _null_ _null_ jsonb_contained _null_ _null_ _null_ )); -DATA(insert OID = 3480 ( gin_compare_jsonb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "25 25" _null_ _null_ _null_ _null_ _null_ gin_compare_jsonb _null_ _null_ _null_ )); -DESCR("GIN support"); -DATA(insert OID = 3482 ( gin_extract_jsonb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "3802 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_jsonb _null_ _null_ _null_ )); -DESCR("GIN support"); -DATA(insert OID = 3483 ( gin_extract_jsonb_query PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 2281 "3802 2281 21 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_jsonb_query _null_ _null_ _null_ )); -DESCR("GIN support"); -DATA(insert OID = 3484 ( gin_consistent_jsonb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 8 0 16 "2281 21 3802 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_consistent_jsonb _null_ _null_ _null_ )); -DESCR("GIN support"); -DATA(insert OID = 3488 ( gin_triconsistent_jsonb PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 18 "2281 21 3802 23 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_triconsistent_jsonb _null_ _null_ _null_ )); -DESCR("GIN support"); -DATA(insert OID = 3485 ( gin_extract_jsonb_path PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "3802 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_jsonb_path _null_ _null_ _null_ )); -DESCR("GIN support"); -DATA(insert OID = 3486 ( gin_extract_jsonb_query_path PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 2281 "3802 2281 21 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_jsonb_query_path _null_ _null_ _null_ )); -DESCR("GIN support"); -DATA(insert OID = 3487 ( gin_consistent_jsonb_path PGNSP PGUID 12 1 0 0 0 f f f f t f i s 8 0 16 "2281 21 3802 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_consistent_jsonb_path _null_ _null_ _null_ )); -DESCR("GIN support"); -DATA(insert OID = 3489 ( gin_triconsistent_jsonb_path PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 18 "2281 21 3802 23 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_triconsistent_jsonb_path _null_ _null_ _null_ )); -DESCR("GIN support"); -DATA(insert OID = 3301 ( jsonb_concat PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3802 "3802 3802" _null_ _null_ _null_ _null_ _null_ jsonb_concat _null_ _null_ _null_ )); -DATA(insert OID = 3302 ( jsonb_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3802 "3802 25" _null_ _null_ _null_ _null_ _null_ jsonb_delete _null_ _null_ _null_ )); -DATA(insert OID = 3303 ( jsonb_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3802 "3802 23" _null_ _null_ _null_ _null_ _null_ jsonb_delete_idx _null_ _null_ _null_ )); -DATA(insert OID = 3343 ( jsonb_delete PGNSP PGUID 12 1 0 25 0 f f f f t f i s 2 0 3802 "3802 1009" "{3802,1009}" "{i,v}" "{from_json,path_elems}" _null_ _null_ jsonb_delete_array _null_ _null_ _null_ )); -DATA(insert OID = 3304 ( jsonb_delete_path PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3802 "3802 1009" _null_ _null_ _null_ _null_ _null_ jsonb_delete_path _null_ _null_ _null_ )); -DATA(insert OID = 3305 ( jsonb_set PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 3802 "3802 1009 3802 16" _null_ _null_ _null_ _null_ _null_ jsonb_set _null_ _null_ _null_ )); -DESCR("Set part of a jsonb"); -DATA(insert OID = 3306 ( jsonb_pretty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_pretty _null_ _null_ _null_ )); -DESCR("Indented text from jsonb"); -DATA(insert OID = 3579 ( jsonb_insert PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 3802 "3802 1009 3802 16" _null_ _null_ _null_ _null_ _null_ jsonb_insert _null_ _null_ _null_ )); -DESCR("Insert value into a jsonb"); -/* txid */ -DATA(insert OID = 2939 ( txid_snapshot_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2970 "2275" _null_ _null_ _null_ _null_ _null_ txid_snapshot_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2940 ( txid_snapshot_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "2970" _null_ _null_ _null_ _null_ _null_ txid_snapshot_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2941 ( txid_snapshot_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2970 "2281" _null_ _null_ _null_ _null_ _null_ txid_snapshot_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2942 ( txid_snapshot_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2970" _null_ _null_ _null_ _null_ _null_ txid_snapshot_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 2943 ( txid_current PGNSP PGUID 12 1 0 0 0 f f f f t f s u 0 0 20 "" _null_ _null_ _null_ _null_ _null_ txid_current _null_ _null_ _null_ )); -DESCR("get current transaction ID"); -DATA(insert OID = 3348 ( txid_current_if_assigned PGNSP PGUID 12 1 0 0 0 f f f f t f s u 0 0 20 "" _null_ _null_ _null_ _null_ _null_ txid_current_if_assigned _null_ _null_ _null_ )); -DESCR("get current transaction ID"); -DATA(insert OID = 2944 ( txid_current_snapshot PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 2970 "" _null_ _null_ _null_ _null_ _null_ txid_current_snapshot _null_ _null_ _null_ )); -DESCR("get current snapshot"); -DATA(insert OID = 2945 ( txid_snapshot_xmin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "2970" _null_ _null_ _null_ _null_ _null_ txid_snapshot_xmin _null_ _null_ _null_ )); -DESCR("get xmin of snapshot"); -DATA(insert OID = 2946 ( txid_snapshot_xmax PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "2970" _null_ _null_ _null_ _null_ _null_ txid_snapshot_xmax _null_ _null_ _null_ )); -DESCR("get xmax of snapshot"); -DATA(insert OID = 2947 ( txid_snapshot_xip PGNSP PGUID 12 1 50 0 0 f f f f t t i s 1 0 20 "2970" _null_ _null_ _null_ _null_ _null_ txid_snapshot_xip _null_ _null_ _null_ )); -DESCR("get set of in-progress txids in snapshot"); -DATA(insert OID = 2948 ( txid_visible_in_snapshot PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "20 2970" _null_ _null_ _null_ _null_ _null_ txid_visible_in_snapshot _null_ _null_ _null_ )); -DESCR("is txid visible in snapshot?"); -DATA(insert OID = 3360 ( txid_status PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 25 "20" _null_ _null_ _null_ _null_ _null_ txid_status _null_ _null_ _null_ )); -DESCR("commit status of transaction"); - -/* record comparison using normal comparison rules */ -DATA(insert OID = 2981 ( record_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2249 2249" _null_ _null_ _null_ _null_ _null_ record_eq _null_ _null_ _null_ )); -DATA(insert OID = 2982 ( record_ne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2249 2249" _null_ _null_ _null_ _null_ _null_ record_ne _null_ _null_ _null_ )); -DATA(insert OID = 2983 ( record_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2249 2249" _null_ _null_ _null_ _null_ _null_ record_lt _null_ _null_ _null_ )); -DATA(insert OID = 2984 ( record_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2249 2249" _null_ _null_ _null_ _null_ _null_ record_gt _null_ _null_ _null_ )); -DATA(insert OID = 2985 ( record_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2249 2249" _null_ _null_ _null_ _null_ _null_ record_le _null_ _null_ _null_ )); -DATA(insert OID = 2986 ( record_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2249 2249" _null_ _null_ _null_ _null_ _null_ record_ge _null_ _null_ _null_ )); -DATA(insert OID = 2987 ( btrecordcmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "2249 2249" _null_ _null_ _null_ _null_ _null_ btrecordcmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); - -/* record comparison using raw byte images */ -DATA(insert OID = 3181 ( record_image_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2249 2249" _null_ _null_ _null_ _null_ _null_ record_image_eq _null_ _null_ _null_ )); -DATA(insert OID = 3182 ( record_image_ne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2249 2249" _null_ _null_ _null_ _null_ _null_ record_image_ne _null_ _null_ _null_ )); -DATA(insert OID = 3183 ( record_image_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2249 2249" _null_ _null_ _null_ _null_ _null_ record_image_lt _null_ _null_ _null_ )); -DATA(insert OID = 3184 ( record_image_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2249 2249" _null_ _null_ _null_ _null_ _null_ record_image_gt _null_ _null_ _null_ )); -DATA(insert OID = 3185 ( record_image_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2249 2249" _null_ _null_ _null_ _null_ _null_ record_image_le _null_ _null_ _null_ )); -DATA(insert OID = 3186 ( record_image_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2249 2249" _null_ _null_ _null_ _null_ _null_ record_image_ge _null_ _null_ _null_ )); -DATA(insert OID = 3187 ( btrecordimagecmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "2249 2249" _null_ _null_ _null_ _null_ _null_ btrecordimagecmp _null_ _null_ _null_ )); -DESCR("less-equal-greater based on byte images"); - -/* Extensions */ -DATA(insert OID = 3082 ( pg_available_extensions PGNSP PGUID 12 10 100 0 0 f f f f t t s s 0 0 2249 "" "{19,25,25}" "{o,o,o}" "{name,default_version,comment}" _null_ _null_ pg_available_extensions _null_ _null_ _null_ )); -DESCR("list available extensions"); -DATA(insert OID = 3083 ( pg_available_extension_versions PGNSP PGUID 12 10 100 0 0 f f f f t t s s 0 0 2249 "" "{19,25,16,16,19,1003,25}" "{o,o,o,o,o,o,o}" "{name,version,superuser,relocatable,schema,requires,comment}" _null_ _null_ pg_available_extension_versions _null_ _null_ _null_ )); -DESCR("list available extension versions"); -DATA(insert OID = 3084 ( pg_extension_update_paths PGNSP PGUID 12 10 100 0 0 f f f f t t s s 1 0 2249 "19" "{19,25,25,25}" "{i,o,o,o}" "{name,source,target,path}" _null_ _null_ pg_extension_update_paths _null_ _null_ _null_ )); -DESCR("list an extension's version update paths"); -DATA(insert OID = 3086 ( pg_extension_config_dump PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 2278 "2205 25" _null_ _null_ _null_ _null_ _null_ pg_extension_config_dump _null_ _null_ _null_ )); -DESCR("flag an extension's table contents to be emitted by pg_dump"); - -/* SQL-spec window functions */ -DATA(insert OID = 3100 ( row_number PGNSP PGUID 12 1 0 0 0 f t f f f f i s 0 0 20 "" _null_ _null_ _null_ _null_ _null_ window_row_number _null_ _null_ _null_ )); -DESCR("row number within partition"); -DATA(insert OID = 3101 ( rank PGNSP PGUID 12 1 0 0 0 f t f f f f i s 0 0 20 "" _null_ _null_ _null_ _null_ _null_ window_rank _null_ _null_ _null_ )); -DESCR("integer rank with gaps"); -DATA(insert OID = 3102 ( dense_rank PGNSP PGUID 12 1 0 0 0 f t f f f f i s 0 0 20 "" _null_ _null_ _null_ _null_ _null_ window_dense_rank _null_ _null_ _null_ )); -DESCR("integer rank without gaps"); -DATA(insert OID = 3103 ( percent_rank PGNSP PGUID 12 1 0 0 0 f t f f f f i s 0 0 701 "" _null_ _null_ _null_ _null_ _null_ window_percent_rank _null_ _null_ _null_ )); -DESCR("fractional rank within partition"); -DATA(insert OID = 3104 ( cume_dist PGNSP PGUID 12 1 0 0 0 f t f f f f i s 0 0 701 "" _null_ _null_ _null_ _null_ _null_ window_cume_dist _null_ _null_ _null_ )); -DESCR("fractional row number within partition"); -DATA(insert OID = 3105 ( ntile PGNSP PGUID 12 1 0 0 0 f t f f t f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ window_ntile _null_ _null_ _null_ )); -DESCR("split rows into N groups"); -DATA(insert OID = 3106 ( lag PGNSP PGUID 12 1 0 0 0 f t f f t f i s 1 0 2283 "2283" _null_ _null_ _null_ _null_ _null_ window_lag _null_ _null_ _null_ )); -DESCR("fetch the preceding row value"); -DATA(insert OID = 3107 ( lag PGNSP PGUID 12 1 0 0 0 f t f f t f i s 2 0 2283 "2283 23" _null_ _null_ _null_ _null_ _null_ window_lag_with_offset _null_ _null_ _null_ )); -DESCR("fetch the Nth preceding row value"); -DATA(insert OID = 3108 ( lag PGNSP PGUID 12 1 0 0 0 f t f f t f i s 3 0 2283 "2283 23 2283" _null_ _null_ _null_ _null_ _null_ window_lag_with_offset_and_default _null_ _null_ _null_ )); -DESCR("fetch the Nth preceding row value with default"); -DATA(insert OID = 3109 ( lead PGNSP PGUID 12 1 0 0 0 f t f f t f i s 1 0 2283 "2283" _null_ _null_ _null_ _null_ _null_ window_lead _null_ _null_ _null_ )); -DESCR("fetch the following row value"); -DATA(insert OID = 3110 ( lead PGNSP PGUID 12 1 0 0 0 f t f f t f i s 2 0 2283 "2283 23" _null_ _null_ _null_ _null_ _null_ window_lead_with_offset _null_ _null_ _null_ )); -DESCR("fetch the Nth following row value"); -DATA(insert OID = 3111 ( lead PGNSP PGUID 12 1 0 0 0 f t f f t f i s 3 0 2283 "2283 23 2283" _null_ _null_ _null_ _null_ _null_ window_lead_with_offset_and_default _null_ _null_ _null_ )); -DESCR("fetch the Nth following row value with default"); -DATA(insert OID = 3112 ( first_value PGNSP PGUID 12 1 0 0 0 f t f f t f i s 1 0 2283 "2283" _null_ _null_ _null_ _null_ _null_ window_first_value _null_ _null_ _null_ )); -DESCR("fetch the first row value"); -DATA(insert OID = 3113 ( last_value PGNSP PGUID 12 1 0 0 0 f t f f t f i s 1 0 2283 "2283" _null_ _null_ _null_ _null_ _null_ window_last_value _null_ _null_ _null_ )); -DESCR("fetch the last row value"); -DATA(insert OID = 3114 ( nth_value PGNSP PGUID 12 1 0 0 0 f t f f t f i s 2 0 2283 "2283 23" _null_ _null_ _null_ _null_ _null_ window_nth_value _null_ _null_ _null_ )); -DESCR("fetch the Nth row value"); - -/* functions for range types */ -DATA(insert OID = 3832 ( anyrange_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 3831 "2275 26 23" _null_ _null_ _null_ _null_ _null_ anyrange_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3833 ( anyrange_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "3831" _null_ _null_ _null_ _null_ _null_ anyrange_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3834 ( range_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 3831 "2275 26 23" _null_ _null_ _null_ _null_ _null_ range_in _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3835 ( range_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "3831" _null_ _null_ _null_ _null_ _null_ range_out _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3836 ( range_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 3831 "2281 26 23" _null_ _null_ _null_ _null_ _null_ range_recv _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3837 ( range_send PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "3831" _null_ _null_ _null_ _null_ _null_ range_send _null_ _null_ _null_ )); -DESCR("I/O"); -DATA(insert OID = 3848 ( lower PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2283 "3831" _null_ _null_ _null_ _null_ _null_ range_lower _null_ _null_ _null_ )); -DESCR("lower bound of range"); -DATA(insert OID = 3849 ( upper PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2283 "3831" _null_ _null_ _null_ _null_ _null_ range_upper _null_ _null_ _null_ )); -DESCR("upper bound of range"); -DATA(insert OID = 3850 ( isempty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "3831" _null_ _null_ _null_ _null_ _null_ range_empty _null_ _null_ _null_ )); -DESCR("is the range empty?"); -DATA(insert OID = 3851 ( lower_inc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "3831" _null_ _null_ _null_ _null_ _null_ range_lower_inc _null_ _null_ _null_ )); -DESCR("is the range's lower bound inclusive?"); -DATA(insert OID = 3852 ( upper_inc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "3831" _null_ _null_ _null_ _null_ _null_ range_upper_inc _null_ _null_ _null_ )); -DESCR("is the range's upper bound inclusive?"); -DATA(insert OID = 3853 ( lower_inf PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "3831" _null_ _null_ _null_ _null_ _null_ range_lower_inf _null_ _null_ _null_ )); -DESCR("is the range's lower bound infinite?"); -DATA(insert OID = 3854 ( upper_inf PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 16 "3831" _null_ _null_ _null_ _null_ _null_ range_upper_inf _null_ _null_ _null_ )); -DESCR("is the range's upper bound infinite?"); -DATA(insert OID = 3855 ( range_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_eq _null_ _null_ _null_ )); -DATA(insert OID = 3856 ( range_ne PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_ne _null_ _null_ _null_ )); -DATA(insert OID = 3857 ( range_overlaps PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_overlaps _null_ _null_ _null_ )); -DATA(insert OID = 3858 ( range_contains_elem PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 2283" _null_ _null_ _null_ _null_ _null_ range_contains_elem _null_ _null_ _null_ )); -DATA(insert OID = 3859 ( range_contains PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_contains _null_ _null_ _null_ )); -DATA(insert OID = 3860 ( elem_contained_by_range PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2283 3831" _null_ _null_ _null_ _null_ _null_ elem_contained_by_range _null_ _null_ _null_ )); -DATA(insert OID = 3861 ( range_contained_by PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_contained_by _null_ _null_ _null_ )); -DATA(insert OID = 3862 ( range_adjacent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_adjacent _null_ _null_ _null_ )); -DATA(insert OID = 3863 ( range_before PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_before _null_ _null_ _null_ )); -DATA(insert OID = 3864 ( range_after PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_after _null_ _null_ _null_ )); -DATA(insert OID = 3865 ( range_overleft PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_overleft _null_ _null_ _null_ )); -DATA(insert OID = 3866 ( range_overright PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_overright _null_ _null_ _null_ )); -DATA(insert OID = 3867 ( range_union PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3831 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_union _null_ _null_ _null_ )); -DATA(insert OID = 4057 ( range_merge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3831 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_merge _null_ _null_ _null_ )); -DESCR("the smallest range which includes both of the given ranges"); -DATA(insert OID = 3868 ( range_intersect PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3831 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_intersect _null_ _null_ _null_ )); -DATA(insert OID = 3869 ( range_minus PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3831 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_minus _null_ _null_ _null_ )); -DATA(insert OID = 3870 ( range_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_cmp _null_ _null_ _null_ )); -DESCR("less-equal-greater"); -DATA(insert OID = 3871 ( range_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_lt _null_ _null_ _null_ )); -DATA(insert OID = 3872 ( range_le PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_le _null_ _null_ _null_ )); -DATA(insert OID = 3873 ( range_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_ge _null_ _null_ _null_ )); -DATA(insert OID = 3874 ( range_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ _null_ range_gt _null_ _null_ _null_ )); -DATA(insert OID = 3875 ( range_gist_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 3831 21 26 2281" _null_ _null_ _null_ _null_ _null_ range_gist_consistent _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3876 ( range_gist_union PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3831 "2281 2281" _null_ _null_ _null_ _null_ _null_ range_gist_union _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3877 ( range_gist_compress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ range_gist_compress _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3878 ( range_gist_decompress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ range_gist_decompress _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3996 ( range_gist_fetch PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ range_gist_fetch _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3879 ( range_gist_penalty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ range_gist_penalty _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3880 ( range_gist_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ range_gist_picksplit _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3881 ( range_gist_same PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "3831 3831 2281" _null_ _null_ _null_ _null_ _null_ range_gist_same _null_ _null_ _null_ )); -DESCR("GiST support"); -DATA(insert OID = 3902 ( hash_range PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "3831" _null_ _null_ _null_ _null_ _null_ hash_range _null_ _null_ _null_ )); -DESCR("hash a range"); -DATA(insert OID = 3916 ( range_typanalyze PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 16 "2281" _null_ _null_ _null_ _null_ _null_ range_typanalyze _null_ _null_ _null_ )); -DESCR("range typanalyze"); -DATA(insert OID = 3169 ( rangesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ rangesel _null_ _null_ _null_ )); -DESCR("restriction selectivity for range operators"); - -DATA(insert OID = 3914 ( int4range_canonical PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3904 "3904" _null_ _null_ _null_ _null_ _null_ int4range_canonical _null_ _null_ _null_ )); -DESCR("convert an int4 range to canonical form"); -DATA(insert OID = 3928 ( int8range_canonical PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3926 "3926" _null_ _null_ _null_ _null_ _null_ int8range_canonical _null_ _null_ _null_ )); -DESCR("convert an int8 range to canonical form"); -DATA(insert OID = 3915 ( daterange_canonical PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3912 "3912" _null_ _null_ _null_ _null_ _null_ daterange_canonical _null_ _null_ _null_ )); -DESCR("convert a date range to canonical form"); -DATA(insert OID = 3922 ( int4range_subdiff PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "23 23" _null_ _null_ _null_ _null_ _null_ int4range_subdiff _null_ _null_ _null_ )); -DESCR("float8 difference of two int4 values"); -DATA(insert OID = 3923 ( int8range_subdiff PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "20 20" _null_ _null_ _null_ _null_ _null_ int8range_subdiff _null_ _null_ _null_ )); -DESCR("float8 difference of two int8 values"); -DATA(insert OID = 3924 ( numrange_subdiff PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "1700 1700" _null_ _null_ _null_ _null_ _null_ numrange_subdiff _null_ _null_ _null_ )); -DESCR("float8 difference of two numeric values"); -DATA(insert OID = 3925 ( daterange_subdiff PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "1082 1082" _null_ _null_ _null_ _null_ _null_ daterange_subdiff _null_ _null_ _null_ )); -DESCR("float8 difference of two date values"); -DATA(insert OID = 3929 ( tsrange_subdiff PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "1114 1114" _null_ _null_ _null_ _null_ _null_ tsrange_subdiff _null_ _null_ _null_ )); -DESCR("float8 difference of two timestamp values"); -DATA(insert OID = 3930 ( tstzrange_subdiff PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "1184 1184" _null_ _null_ _null_ _null_ _null_ tstzrange_subdiff _null_ _null_ _null_ )); -DESCR("float8 difference of two timestamp with time zone values"); - -DATA(insert OID = 3840 ( int4range PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 3904 "23 23" _null_ _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ )); -DESCR("int4range constructor"); -DATA(insert OID = 3841 ( int4range PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 3904 "23 23 25" _null_ _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ )); -DESCR("int4range constructor"); -DATA(insert OID = 3844 ( numrange PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 3906 "1700 1700" _null_ _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ )); -DESCR("numrange constructor"); -DATA(insert OID = 3845 ( numrange PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 3906 "1700 1700 25" _null_ _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ )); -DESCR("numrange constructor"); -DATA(insert OID = 3933 ( tsrange PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 3908 "1114 1114" _null_ _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ )); -DESCR("tsrange constructor"); -DATA(insert OID = 3934 ( tsrange PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 3908 "1114 1114 25" _null_ _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ )); -DESCR("tsrange constructor"); -DATA(insert OID = 3937 ( tstzrange PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 3910 "1184 1184" _null_ _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ )); -DESCR("tstzrange constructor"); -DATA(insert OID = 3938 ( tstzrange PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 3910 "1184 1184 25" _null_ _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ )); -DESCR("tstzrange constructor"); -DATA(insert OID = 3941 ( daterange PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 3912 "1082 1082" _null_ _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ )); -DESCR("daterange constructor"); -DATA(insert OID = 3942 ( daterange PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 3912 "1082 1082 25" _null_ _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ )); -DESCR("daterange constructor"); -DATA(insert OID = 3945 ( int8range PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 3926 "20 20" _null_ _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ )); -DESCR("int8range constructor"); -DATA(insert OID = 3946 ( int8range PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 3926 "20 20 25" _null_ _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ )); -DESCR("int8range constructor"); - -/* date, time, timestamp constructors */ -DATA(insert OID = 3846 ( make_date PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1082 "23 23 23" _null_ _null_ "{year,month,day}" _null_ _null_ make_date _null_ _null_ _null_ )); -DESCR("construct date"); -DATA(insert OID = 3847 ( make_time PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1083 "23 23 701" _null_ _null_ "{hour,min,sec}" _null_ _null_ make_time _null_ _null_ _null_ )); -DESCR("construct time"); -DATA(insert OID = 3461 ( make_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 6 0 1114 "23 23 23 23 23 701" _null_ _null_ "{year,month,mday,hour,min,sec}" _null_ _null_ make_timestamp _null_ _null_ _null_ )); -DESCR("construct timestamp"); -DATA(insert OID = 3462 ( make_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 6 0 1184 "23 23 23 23 23 701" _null_ _null_ "{year,month,mday,hour,min,sec}" _null_ _null_ make_timestamptz _null_ _null_ _null_ )); -DESCR("construct timestamp with time zone"); -DATA(insert OID = 3463 ( make_timestamptz PGNSP PGUID 12 1 0 0 0 f f f f t f s s 7 0 1184 "23 23 23 23 23 701 25" _null_ _null_ "{year,month,mday,hour,min,sec,timezone}" _null_ _null_ make_timestamptz_at_timezone _null_ _null_ _null_ )); -DESCR("construct timestamp with time zone"); -DATA(insert OID = 3464 ( make_interval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 1186 "23 23 23 23 23 23 701" _null_ _null_ "{years,months,weeks,days,hours,mins,secs}" _null_ _null_ make_interval _null_ _null_ _null_ )); -DESCR("construct interval"); - -/* spgist opclasses */ -DATA(insert OID = 4018 ( spg_quad_config PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_quad_config _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over point"); -DATA(insert OID = 4019 ( spg_quad_choose PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_quad_choose _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over point"); -DATA(insert OID = 4020 ( spg_quad_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_quad_picksplit _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over point"); -DATA(insert OID = 4021 ( spg_quad_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_quad_inner_consistent _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over point"); -DATA(insert OID = 4022 ( spg_quad_leaf_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_quad_leaf_consistent _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree and k-d tree over point"); - -DATA(insert OID = 4023 ( spg_kd_config PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_kd_config _null_ _null_ _null_ )); -DESCR("SP-GiST support for k-d tree over point"); -DATA(insert OID = 4024 ( spg_kd_choose PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_kd_choose _null_ _null_ _null_ )); -DESCR("SP-GiST support for k-d tree over point"); -DATA(insert OID = 4025 ( spg_kd_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_kd_picksplit _null_ _null_ _null_ )); -DESCR("SP-GiST support for k-d tree over point"); -DATA(insert OID = 4026 ( spg_kd_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_kd_inner_consistent _null_ _null_ _null_ )); -DESCR("SP-GiST support for k-d tree over point"); - -DATA(insert OID = 4027 ( spg_text_config PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_text_config _null_ _null_ _null_ )); -DESCR("SP-GiST support for radix tree over text"); -DATA(insert OID = 4028 ( spg_text_choose PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_text_choose _null_ _null_ _null_ )); -DESCR("SP-GiST support for radix tree over text"); -DATA(insert OID = 4029 ( spg_text_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_text_picksplit _null_ _null_ _null_ )); -DESCR("SP-GiST support for radix tree over text"); -DATA(insert OID = 4030 ( spg_text_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_text_inner_consistent _null_ _null_ _null_ )); -DESCR("SP-GiST support for radix tree over text"); -DATA(insert OID = 4031 ( spg_text_leaf_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_text_leaf_consistent _null_ _null_ _null_ )); -DESCR("SP-GiST support for radix tree over text"); - -DATA(insert OID = 3469 ( spg_range_quad_config PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_range_quad_config _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over range"); -DATA(insert OID = 3470 ( spg_range_quad_choose PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_range_quad_choose _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over range"); -DATA(insert OID = 3471 ( spg_range_quad_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_range_quad_picksplit _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over range"); -DATA(insert OID = 3472 ( spg_range_quad_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_range_quad_inner_consistent _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over range"); -DATA(insert OID = 3473 ( spg_range_quad_leaf_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_range_quad_leaf_consistent _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over range"); - -DATA(insert OID = 5012 ( spg_box_quad_config PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_box_quad_config _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over box"); -DATA(insert OID = 5013 ( spg_box_quad_choose PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_box_quad_choose _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over box"); -DATA(insert OID = 5014 ( spg_box_quad_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_box_quad_picksplit _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over box"); -DATA(insert OID = 5015 ( spg_box_quad_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_box_quad_inner_consistent _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over box"); -DATA(insert OID = 5016 ( spg_box_quad_leaf_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_box_quad_leaf_consistent _null_ _null_ _null_ )); -DESCR("SP-GiST support for quad tree over box"); - -/* replication slots */ -DATA(insert OID = 3779 ( pg_create_physical_replication_slot PGNSP PGUID 12 1 0 0 0 f f f f t f v u 3 0 2249 "19 16 16" "{19,16,16,19,3220}" "{i,i,i,o,o}" "{slot_name,immediately_reserve,temporary,slot_name,lsn}" _null_ _null_ pg_create_physical_replication_slot _null_ _null_ _null_ )); -DESCR("create a physical replication slot"); -DATA(insert OID = 3780 ( pg_drop_replication_slot PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 2278 "19" _null_ _null_ _null_ _null_ _null_ pg_drop_replication_slot _null_ _null_ _null_ )); -DESCR("drop a replication slot"); -DATA(insert OID = 3781 ( pg_get_replication_slots PGNSP PGUID 12 1 10 0 0 f f f f f t s s 0 0 2249 "" "{19,19,25,26,16,16,23,28,28,3220,3220}" "{o,o,o,o,o,o,o,o,o,o,o}" "{slot_name,plugin,slot_type,datoid,temporary,active,active_pid,xmin,catalog_xmin,restart_lsn,confirmed_flush_lsn}" _null_ _null_ pg_get_replication_slots _null_ _null_ _null_ )); -DESCR("information about replication slots currently in use"); -DATA(insert OID = 3786 ( pg_create_logical_replication_slot PGNSP PGUID 12 1 0 0 0 f f f f t f v u 3 0 2249 "19 19 16" "{19,19,16,25,3220}" "{i,i,i,o,o}" "{slot_name,plugin,temporary,slot_name,lsn}" _null_ _null_ pg_create_logical_replication_slot _null_ _null_ _null_ )); -DESCR("set up a logical replication slot"); -DATA(insert OID = 3782 ( pg_logical_slot_get_changes PGNSP PGUID 12 1000 1000 25 0 f f f f f t v u 4 0 2249 "19 3220 23 1009" "{19,3220,23,1009,3220,28,25}" "{i,i,i,v,o,o,o}" "{slot_name,upto_lsn,upto_nchanges,options,lsn,xid,data}" _null_ _null_ pg_logical_slot_get_changes _null_ _null_ _null_ )); -DESCR("get changes from replication slot"); -DATA(insert OID = 3783 ( pg_logical_slot_get_binary_changes PGNSP PGUID 12 1000 1000 25 0 f f f f f t v u 4 0 2249 "19 3220 23 1009" "{19,3220,23,1009,3220,28,17}" "{i,i,i,v,o,o,o}" "{slot_name,upto_lsn,upto_nchanges,options,lsn,xid,data}" _null_ _null_ pg_logical_slot_get_binary_changes _null_ _null_ _null_ )); -DESCR("get binary changes from replication slot"); -DATA(insert OID = 3784 ( pg_logical_slot_peek_changes PGNSP PGUID 12 1000 1000 25 0 f f f f f t v u 4 0 2249 "19 3220 23 1009" "{19,3220,23,1009,3220,28,25}" "{i,i,i,v,o,o,o}" "{slot_name,upto_lsn,upto_nchanges,options,lsn,xid,data}" _null_ _null_ pg_logical_slot_peek_changes _null_ _null_ _null_ )); -DESCR("peek at changes from replication slot"); -DATA(insert OID = 3785 ( pg_logical_slot_peek_binary_changes PGNSP PGUID 12 1000 1000 25 0 f f f f f t v u 4 0 2249 "19 3220 23 1009" "{19,3220,23,1009,3220,28,17}" "{i,i,i,v,o,o,o}" "{slot_name,upto_lsn,upto_nchanges,options,lsn,xid,data}" _null_ _null_ pg_logical_slot_peek_binary_changes _null_ _null_ _null_ )); -DESCR("peek at binary changes from replication slot"); -DATA(insert OID = 3577 ( pg_logical_emit_message PGNSP PGUID 12 1 0 0 0 f f f f t f v u 3 0 3220 "16 25 25" _null_ _null_ _null_ _null_ _null_ pg_logical_emit_message_text _null_ _null_ _null_ )); -DESCR("emit a textual logical decoding message"); -DATA(insert OID = 3578 ( pg_logical_emit_message PGNSP PGUID 12 1 0 0 0 f f f f t f v u 3 0 3220 "16 25 17" _null_ _null_ _null_ _null_ _null_ pg_logical_emit_message_bytea _null_ _null_ _null_ )); -DESCR("emit a binary logical decoding message"); - -/* event triggers */ -DATA(insert OID = 3566 ( pg_event_trigger_dropped_objects PGNSP PGUID 12 10 100 0 0 f f f f t t s r 0 0 2249 "" "{26,26,23,16,16,16,25,25,25,25,1009,1009}" "{o,o,o,o,o,o,o,o,o,o,o,o}" "{classid, objid, objsubid, original, normal, is_temporary, object_type, schema_name, object_name, object_identity, address_names, address_args}" _null_ _null_ pg_event_trigger_dropped_objects _null_ _null_ _null_ )); -DESCR("list objects dropped by the current command"); -DATA(insert OID = 4566 ( pg_event_trigger_table_rewrite_oid PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 26 "" "{26}" "{o}" "{oid}" _null_ _null_ pg_event_trigger_table_rewrite_oid _null_ _null_ _null_ )); -DESCR("return Oid of the table getting rewritten"); -DATA(insert OID = 4567 ( pg_event_trigger_table_rewrite_reason PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 23 "" _null_ _null_ _null_ _null_ _null_ pg_event_trigger_table_rewrite_reason _null_ _null_ _null_ )); -DESCR("return reason code for table getting rewritten"); -DATA(insert OID = 4568 ( pg_event_trigger_ddl_commands PGNSP PGUID 12 10 100 0 0 f f f f t t s r 0 0 2249 "" "{26,26,23,25,25,25,25,16,32}" "{o,o,o,o,o,o,o,o,o}" "{classid, objid, objsubid, command_tag, object_type, schema_name, object_identity, in_extension, command}" _null_ _null_ pg_event_trigger_ddl_commands _null_ _null_ _null_ )); -DESCR("list DDL actions being executed by the current command"); - -/* generic transition functions for ordered-set aggregates */ -DATA(insert OID = 3970 ( ordered_set_transition PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2276" _null_ _null_ _null_ _null_ _null_ ordered_set_transition _null_ _null_ _null_ )); -DESCR("aggregate transition function"); -DATA(insert OID = 3971 ( ordered_set_transition_multi PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 2281 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ ordered_set_transition_multi _null_ _null_ _null_ )); -DESCR("aggregate transition function"); - -/* inverse distribution aggregates (and their support functions) */ -DATA(insert OID = 3972 ( percentile_disc PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 2283 "701 2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("discrete percentile"); -DATA(insert OID = 3973 ( percentile_disc_final PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 2283 "2281 701 2283" _null_ _null_ _null_ _null_ _null_ percentile_disc_final _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3974 ( percentile_cont PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("continuous distribution percentile"); -DATA(insert OID = 3975 ( percentile_cont_float8_final PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 701 "2281 701" _null_ _null_ _null_ _null_ _null_ percentile_cont_float8_final _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3976 ( percentile_cont PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 1186 "701 1186" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("continuous distribution percentile"); -DATA(insert OID = 3977 ( percentile_cont_interval_final PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 1186 "2281 701" _null_ _null_ _null_ _null_ _null_ percentile_cont_interval_final _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3978 ( percentile_disc PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 2277 "1022 2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("multiple discrete percentiles"); -DATA(insert OID = 3979 ( percentile_disc_multi_final PGNSP PGUID 12 1 0 0 0 f f f f f f i s 3 0 2277 "2281 1022 2283" _null_ _null_ _null_ _null_ _null_ percentile_disc_multi_final _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3980 ( percentile_cont PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 1022 "1022 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("multiple continuous percentiles"); -DATA(insert OID = 3981 ( percentile_cont_float8_multi_final PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 1022 "2281 1022" _null_ _null_ _null_ _null_ _null_ percentile_cont_float8_multi_final _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3982 ( percentile_cont PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 1187 "1022 1186" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("multiple continuous percentiles"); -DATA(insert OID = 3983 ( percentile_cont_interval_multi_final PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 1187 "2281 1022" _null_ _null_ _null_ _null_ _null_ percentile_cont_interval_multi_final _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3984 ( mode PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 2283 "2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("most common value"); -DATA(insert OID = 3985 ( mode_final PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2283 "2281 2283" _null_ _null_ _null_ _null_ _null_ mode_final _null_ _null_ _null_ )); -DESCR("aggregate final function"); - -/* hypothetical-set aggregates (and their support functions) */ -DATA(insert OID = 3986 ( rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("rank of hypothetical row"); -DATA(insert OID = 3987 ( rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_rank_final _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3988 ( percent_rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 701 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("fractional rank of hypothetical row"); -DATA(insert OID = 3989 ( percent_rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 701 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_percent_rank_final _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3990 ( cume_dist PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 701 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("cumulative distribution of hypothetical row"); -DATA(insert OID = 3991 ( cume_dist_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 701 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_cume_dist_final _null_ _null_ _null_ )); -DESCR("aggregate final function"); -DATA(insert OID = 3992 ( dense_rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ )); -DESCR("rank of hypothetical row without gaps"); -DATA(insert OID = 3993 ( dense_rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_dense_rank_final _null_ _null_ _null_ )); -DESCR("aggregate final function"); - -/* pg_upgrade support */ -DATA(insert OID = 3582 ( binary_upgrade_set_next_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_type_oid _null_ _null_ _null_ )); -DESCR("for use by pg_upgrade"); -DATA(insert OID = 3584 ( binary_upgrade_set_next_array_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_array_pg_type_oid _null_ _null_ _null_ )); -DESCR("for use by pg_upgrade"); -DATA(insert OID = 3585 ( binary_upgrade_set_next_toast_pg_type_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_type_oid _null_ _null_ _null_ )); -DESCR("for use by pg_upgrade"); -DATA(insert OID = 3586 ( binary_upgrade_set_next_heap_pg_class_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_heap_pg_class_oid _null_ _null_ _null_ )); -DESCR("for use by pg_upgrade"); -DATA(insert OID = 3587 ( binary_upgrade_set_next_index_pg_class_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_index_pg_class_oid _null_ _null_ _null_ )); -DESCR("for use by pg_upgrade"); -DATA(insert OID = 3588 ( binary_upgrade_set_next_toast_pg_class_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_class_oid _null_ _null_ _null_ )); -DESCR("for use by pg_upgrade"); -DATA(insert OID = 3589 ( binary_upgrade_set_next_pg_enum_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_enum_oid _null_ _null_ _null_ )); -DESCR("for use by pg_upgrade"); -DATA(insert OID = 3590 ( binary_upgrade_set_next_pg_authid_oid PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_authid_oid _null_ _null_ _null_ )); -DESCR("for use by pg_upgrade"); -DATA(insert OID = 3591 ( binary_upgrade_create_empty_extension PGNSP PGUID 12 1 0 0 0 f f f f f f v r 7 0 2278 "25 25 16 25 1028 1009 1009" _null_ _null_ _null_ _null_ _null_ binary_upgrade_create_empty_extension _null_ _null_ _null_ )); -DESCR("for use by pg_upgrade"); -DATA(insert OID = 4083 ( binary_upgrade_set_record_init_privs PGNSP PGUID 12 1 0 0 0 f f f f t f v r 1 0 2278 "16" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_record_init_privs _null_ _null_ _null_ )); -DESCR("for use by pg_upgrade"); - -/* replication/origin.h */ -DATA(insert OID = 6003 ( pg_replication_origin_create PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 26 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_create _null_ _null_ _null_ )); -DESCR("create a replication origin"); - -DATA(insert OID = 6004 ( pg_replication_origin_drop PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_drop _null_ _null_ _null_ )); -DESCR("drop replication origin identified by its name"); - -DATA(insert OID = 6005 ( pg_replication_origin_oid PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 26 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_oid _null_ _null_ _null_ )); -DESCR("translate the replication origin's name to its id"); - -DATA(insert OID = 6006 ( pg_replication_origin_session_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_setup _null_ _null_ _null_ )); -DESCR("configure session to maintain replication progress tracking for the passed in origin"); - -DATA(insert OID = 6007 ( pg_replication_origin_session_reset PGNSP PGUID 12 1 0 0 0 f f f f t f v u 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_reset _null_ _null_ _null_ )); -DESCR("teardown configured replication progress tracking"); - -DATA(insert OID = 6008 ( pg_replication_origin_session_is_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v r 0 0 16 "" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_is_setup _null_ _null_ _null_ )); -DESCR("is a replication origin configured in this session"); - -DATA(insert OID = 6009 ( pg_replication_origin_session_progress PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 3220 "16" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_session_progress _null_ _null_ _null_ )); -DESCR("get the replication progress of the current session"); - -DATA(insert OID = 6010 ( pg_replication_origin_xact_setup PGNSP PGUID 12 1 0 0 0 f f f f t f v r 2 0 2278 "3220 1184" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_xact_setup _null_ _null_ _null_ )); -DESCR("setup the transaction's origin lsn and timestamp"); - -DATA(insert OID = 6011 ( pg_replication_origin_xact_reset PGNSP PGUID 12 1 0 0 0 f f f f t f v r 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_xact_reset _null_ _null_ _null_ )); -DESCR("reset the transaction's origin lsn and timestamp"); - -DATA(insert OID = 6012 ( pg_replication_origin_advance PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 2278 "25 3220" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_advance _null_ _null_ _null_ )); -DESCR("advance replication identifier to specific location"); - -DATA(insert OID = 6013 ( pg_replication_origin_progress PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 3220 "25 16" _null_ _null_ _null_ _null_ _null_ pg_replication_origin_progress _null_ _null_ _null_ )); -DESCR("get an individual replication origin's replication progress"); - -DATA(insert OID = 6014 ( pg_show_replication_origin_status PGNSP PGUID 12 1 100 0 0 f f f f f t v r 0 0 2249 "" "{26,25,3220,3220}" "{o,o,o,o}" "{local_id, external_id, remote_lsn, local_lsn}" _null_ _null_ pg_show_replication_origin_status _null_ _null_ _null_ )); -DESCR("get progress for all replication origins"); - -/* publications */ -DATA(insert OID = 6119 ( pg_get_publication_tables PGNSP PGUID 12 1 1000 0 0 f f f f t t s s 1 0 26 "25" "{25,26}" "{i,o}" "{pubname,relid}" _null_ _null_ pg_get_publication_tables _null_ _null_ _null_ )); -DESCR("get OIDs of tables in a publication"); -DATA(insert OID = 6121 ( pg_relation_is_publishable PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 16 "2205" _null_ _null_ _null_ _null_ _null_ pg_relation_is_publishable _null_ _null_ _null_ )); -DESCR("returns whether a relation can be part of a publication"); - -/* rls */ -DATA(insert OID = 3298 ( row_security_active PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ row_security_active _null_ _null_ _null_ )); -DESCR("row security for current context active on table by table oid"); -DATA(insert OID = 3299 ( row_security_active PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 16 "25" _null_ _null_ _null_ _null_ _null_ row_security_active_name _null_ _null_ _null_ )); -DESCR("row security for current context active on table by table name"); - -/* pg_config */ -DATA(insert OID = 3400 ( pg_config PGNSP PGUID 12 1 23 0 0 f f f f t t i r 0 0 2249 "" "{25,25}" "{o,o}" "{name,setting}" _null_ _null_ pg_config _null_ _null_ _null_ )); -DESCR("pg_config binary as a function"); - -/* pg_controldata related functions */ -DATA(insert OID = 3441 ( pg_control_system PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{23,23,20,1184}" "{o,o,o,o}" "{pg_control_version,catalog_version_no,system_identifier,pg_control_last_modified}" _null_ _null_ pg_control_system _null_ _null_ _null_ )); -DESCR("pg_controldata general state information as a function"); - -DATA(insert OID = 3442 ( pg_control_checkpoint PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{3220,3220,3220,25,23,23,16,25,26,28,28,28,26,28,28,26,28,28,1184}" "{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{checkpoint_lsn,prior_lsn,redo_lsn,redo_wal_file,timeline_id,prev_timeline_id,full_page_writes,next_xid,next_oid,next_multixact_id,next_multi_offset,oldest_xid,oldest_xid_dbid,oldest_active_xid,oldest_multi_xid,oldest_multi_dbid,oldest_commit_ts_xid,newest_commit_ts_xid,checkpoint_time}" _null_ _null_ pg_control_checkpoint _null_ _null_ _null_ )); -DESCR("pg_controldata checkpoint state information as a function"); - -DATA(insert OID = 3443 ( pg_control_recovery PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{3220,23,3220,3220,16}" "{o,o,o,o,o}" "{min_recovery_end_lsn,min_recovery_end_timeline,backup_start_lsn,backup_end_lsn,end_of_backup_record_required}" _null_ _null_ pg_control_recovery _null_ _null_ _null_ )); -DESCR("pg_controldata recovery state information as a function"); - -DATA(insert OID = 3444 ( pg_control_init PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{23,23,23,23,23,23,23,23,23,16,16,23}" "{o,o,o,o,o,o,o,o,o,o,o,o}" "{max_data_alignment,database_block_size,blocks_per_segment,wal_block_size,bytes_per_wal_segment,max_identifier_length,max_index_columns,max_toast_chunk_size,large_object_chunk_size,float4_pass_by_value,float8_pass_by_value,data_page_checksum_version}" _null_ _null_ pg_control_init _null_ _null_ _null_ )); -DESCR("pg_controldata init state information as a function"); - -/* collation management functions */ -DATA(insert OID = 3445 ( pg_import_system_collations PGNSP PGUID 12 100 0 0 0 f f f f t f v r 1 0 23 "4089" _null_ _null_ _null_ _null_ _null_ pg_import_system_collations _null_ _null_ _null_ )); -DESCR("import collations from operating system"); - -DATA(insert OID = 3448 ( pg_collation_actual_version PGNSP PGUID 12 100 0 0 0 f f f f t f v s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_collation_actual_version _null_ _null_ _null_ )); -DESCR("get actual version of collation from operating system"); - -/* system management/monitoring related functions */ -DATA(insert OID = 3353 ( pg_ls_logdir PGNSP PGUID 12 10 20 0 0 f f f f t t v s 0 0 2249 "" "{25,20,1184}" "{o,o,o}" "{name,size,modification}" _null_ _null_ pg_ls_logdir _null_ _null_ _null_ )); -DESCR("list files in the log directory"); -DATA(insert OID = 3354 ( pg_ls_waldir PGNSP PGUID 12 10 20 0 0 f f f f t t v s 0 0 2249 "" "{25,20,1184}" "{o,o,o}" "{name,size,modification}" _null_ _null_ pg_ls_waldir _null_ _null_ _null_ )); -DESCR("list of files in the WAL directory"); +#define PROKIND_FUNCTION 'f' +#define PROKIND_AGGREGATE 'a' +#define PROKIND_WINDOW 'w' +#define PROKIND_PROCEDURE 'p' /* * Symbolic values for provolatile column: these indicate whether the result @@ -5507,4 +173,37 @@ DESCR("list of files in the WAL directory"); #define PROARGMODE_VARIADIC 'v' #define PROARGMODE_TABLE 't' +#endif /* EXPOSE_TO_CLIENT_CODE */ + + +extern ObjectAddress ProcedureCreate(const char *procedureName, + Oid procNamespace, + bool replace, + bool returnsSet, + Oid returnType, + Oid proowner, + Oid languageObjectId, + Oid languageValidator, + const char *prosrc, + const char *probin, + char prokind, + bool security_definer, + bool isLeakProof, + bool isStrict, + char volatility, + char parallel, + oidvector *parameterTypes, + Datum allParameterTypes, + Datum parameterModes, + Datum parameterNames, + List *parameterDefaults, + Datum trftypes, + Datum proconfig, + float4 procost, + float4 prorows); + +extern bool function_parse_error_transpose(const char *prosrc); + +extern List *oid_array_to_list(Datum datum); + #endif /* PG_PROC_H */ diff --git a/src/include/catalog/pg_proc_fn.h b/src/include/catalog/pg_proc_fn.h deleted file mode 100644 index 2c85f5d267..0000000000 --- a/src/include/catalog/pg_proc_fn.h +++ /dev/null @@ -1,51 +0,0 @@ -/*------------------------------------------------------------------------- - * - * pg_proc_fn.h - * prototypes for functions in catalog/pg_proc.c - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/catalog/pg_proc_fn.h - * - *------------------------------------------------------------------------- - */ -#ifndef PG_PROC_FN_H -#define PG_PROC_FN_H - -#include "catalog/objectaddress.h" -#include "nodes/pg_list.h" - -extern ObjectAddress ProcedureCreate(const char *procedureName, - Oid procNamespace, - bool replace, - bool returnsSet, - Oid returnType, - Oid proowner, - Oid languageObjectId, - Oid languageValidator, - const char *prosrc, - const char *probin, - bool isAgg, - bool isWindowFunc, - bool security_definer, - bool isLeakProof, - bool isStrict, - char volatility, - char parallel, - oidvector *parameterTypes, - Datum allParameterTypes, - Datum parameterModes, - Datum parameterNames, - List *parameterDefaults, - Datum trftypes, - Datum proconfig, - float4 procost, - float4 prorows); - -extern bool function_parse_error_transpose(const char *prosrc); - -extern List *oid_array_to_list(Datum datum); - -#endif /* PG_PROC_FN_H */ diff --git a/src/include/catalog/pg_publication.h b/src/include/catalog/pg_publication.h index aa148960cd..a5d5570f76 100644 --- a/src/include/catalog/pg_publication.h +++ b/src/include/catalog/pg_publication.h @@ -1,16 +1,16 @@ /*------------------------------------------------------------------------- * * pg_publication.h - * definition of the relation sets relation (pg_publication) + * definition of the "publication" system catalog (pg_publication) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_publication.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -18,17 +18,16 @@ #define PG_PUBLICATION_H #include "catalog/genbki.h" +#include "catalog/pg_publication_d.h" + #include "catalog/objectaddress.h" /* ---------------- * pg_publication definition. cpp turns this into * typedef struct FormData_pg_publication - * * ---------------- */ -#define PublicationRelationId 6104 - -CATALOG(pg_publication,6104) +CATALOG(pg_publication,6104,PublicationRelationId) { NameData pubname; /* name of the publication */ @@ -49,6 +48,9 @@ CATALOG(pg_publication,6104) /* true if deletes are published */ bool pubdelete; + /* true if truncates are published */ + bool pubtruncate; + } FormData_pg_publication; /* ---------------- @@ -58,24 +60,12 @@ CATALOG(pg_publication,6104) */ typedef FormData_pg_publication *Form_pg_publication; -/* ---------------- - * compiler constants for pg_publication - * ---------------- - */ - -#define Natts_pg_publication 6 -#define Anum_pg_publication_pubname 1 -#define Anum_pg_publication_pubowner 2 -#define Anum_pg_publication_puballtables 3 -#define Anum_pg_publication_pubinsert 4 -#define Anum_pg_publication_pubupdate 5 -#define Anum_pg_publication_pubdelete 6 - typedef struct PublicationActions { bool pubinsert; bool pubupdate; bool pubdelete; + bool pubtruncate; } PublicationActions; typedef struct Publication @@ -93,11 +83,12 @@ extern List *GetPublicationRelations(Oid pubid); extern List *GetAllTablesPublications(void); extern List *GetAllTablesPublicationRelations(void); +extern bool is_publishable_relation(Relation rel); extern ObjectAddress publication_add_relation(Oid pubid, Relation targetrel, bool if_not_exists); extern Oid get_publication_oid(const char *pubname, bool missing_ok); -extern char *get_publication_name(Oid pubid); +extern char *get_publication_name(Oid pubid, bool missing_ok); extern Datum pg_get_publication_tables(PG_FUNCTION_ARGS); diff --git a/src/include/catalog/pg_publication_rel.h b/src/include/catalog/pg_publication_rel.h index 3729e5abdc..d97b0fe9bd 100644 --- a/src/include/catalog/pg_publication_rel.h +++ b/src/include/catalog/pg_publication_rel.h @@ -1,16 +1,17 @@ /*------------------------------------------------------------------------- * * pg_publication_rel.h - * definition of the publication to relation map (pg_publication_rel) - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * definition of the system catalog for mappings between relations and + * publications (pg_publication_rel) + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_publication_rel.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -18,16 +19,14 @@ #define PG_PUBLICATION_REL_H #include "catalog/genbki.h" +#include "catalog/pg_publication_rel_d.h" /* ---------------- * pg_publication_rel definition. cpp turns this into * typedef struct FormData_pg_publication_rel - * * ---------------- */ -#define PublicationRelRelationId 6106 - -CATALOG(pg_publication_rel,6106) +CATALOG(pg_publication_rel,6106,PublicationRelRelationId) { Oid prpubid; /* Oid of the publication */ Oid prrelid; /* Oid of the relation */ @@ -40,13 +39,4 @@ CATALOG(pg_publication_rel,6106) */ typedef FormData_pg_publication_rel *Form_pg_publication_rel; -/* ---------------- - * compiler constants for pg_publication_rel - * ---------------- - */ - -#define Natts_pg_publication_rel 2 -#define Anum_pg_publication_rel_prpubid 1 -#define Anum_pg_publication_rel_prrelid 2 - #endif /* PG_PUBLICATION_REL_H */ diff --git a/src/include/catalog/pg_range.dat b/src/include/catalog/pg_range.dat new file mode 100644 index 0000000000..54d4d1648f --- /dev/null +++ b/src/include/catalog/pg_range.dat @@ -0,0 +1,31 @@ +#---------------------------------------------------------------------- +# +# pg_range.dat +# Initial contents of the pg_range system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_range.dat +# +#---------------------------------------------------------------------- + +[ + +{ rngtypid => 'int4range', rngsubtype => 'int4', rngsubopc => 'btree/int4_ops', + rngcanonical => 'int4range_canonical', rngsubdiff => 'int4range_subdiff' }, +{ rngtypid => 'numrange', rngsubtype => 'numeric', + rngsubopc => 'btree/numeric_ops', rngcanonical => '-', + rngsubdiff => 'numrange_subdiff' }, +{ rngtypid => 'tsrange', rngsubtype => 'timestamp', + rngsubopc => 'btree/timestamp_ops', rngcanonical => '-', + rngsubdiff => 'tsrange_subdiff' }, +{ rngtypid => 'tstzrange', rngsubtype => 'timestamptz', + rngsubopc => 'btree/timestamptz_ops', rngcanonical => '-', + rngsubdiff => 'tstzrange_subdiff' }, +{ rngtypid => 'daterange', rngsubtype => 'date', rngsubopc => 'btree/date_ops', + rngcanonical => 'daterange_canonical', rngsubdiff => 'daterange_subdiff' }, +{ rngtypid => 'int8range', rngsubtype => 'int8', rngsubopc => 'btree/int8_ops', + rngcanonical => 'int8range_canonical', rngsubdiff => 'int8range_subdiff' }, + +] diff --git a/src/include/catalog/pg_range.h b/src/include/catalog/pg_range.h index f12e82b2f2..ca2b28b493 100644 --- a/src/include/catalog/pg_range.h +++ b/src/include/catalog/pg_range.h @@ -1,21 +1,17 @@ /*------------------------------------------------------------------------- * * pg_range.h - * definition of the system "range" relation (pg_range) - * along with the relation's initial contents. + * definition of the "range type" system catalog (pg_range) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_range.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -23,22 +19,32 @@ #define PG_RANGE_H #include "catalog/genbki.h" +#include "catalog/pg_range_d.h" /* ---------------- * pg_range definition. cpp turns this into * typedef struct FormData_pg_range * ---------------- */ -#define RangeRelationId 3541 - -CATALOG(pg_range,3541) BKI_WITHOUT_OIDS +CATALOG(pg_range,3541,RangeRelationId) BKI_WITHOUT_OIDS { - Oid rngtypid; /* OID of owning range type */ - Oid rngsubtype; /* OID of range's element type (subtype) */ - Oid rngcollation; /* collation for this range type, or 0 */ - Oid rngsubopc; /* subtype's btree opclass */ - regproc rngcanonical; /* canonicalize range, or 0 */ - regproc rngsubdiff; /* subtype difference as a float8, or 0 */ + /* OID of owning range type */ + Oid rngtypid BKI_LOOKUP(pg_type); + + /* OID of range's element type (subtype) */ + Oid rngsubtype BKI_LOOKUP(pg_type); + + /* collation for this range type, or 0 */ + Oid rngcollation BKI_DEFAULT(0); + + /* subtype's btree opclass */ + Oid rngsubopc BKI_LOOKUP(pg_opclass); + + /* canonicalize range, or 0 */ + regproc rngcanonical BKI_LOOKUP(pg_proc); + + /* subtype difference as a float8, or 0 */ + regproc rngsubdiff BKI_LOOKUP(pg_proc); } FormData_pg_range; /* ---------------- @@ -48,31 +54,6 @@ CATALOG(pg_range,3541) BKI_WITHOUT_OIDS */ typedef FormData_pg_range *Form_pg_range; -/* ---------------- - * compiler constants for pg_range - * ---------------- - */ -#define Natts_pg_range 6 -#define Anum_pg_range_rngtypid 1 -#define Anum_pg_range_rngsubtype 2 -#define Anum_pg_range_rngcollation 3 -#define Anum_pg_range_rngsubopc 4 -#define Anum_pg_range_rngcanonical 5 -#define Anum_pg_range_rngsubdiff 6 - - -/* ---------------- - * initial contents of pg_range - * ---------------- - */ -DATA(insert ( 3904 23 0 1978 int4range_canonical int4range_subdiff)); -DATA(insert ( 3906 1700 0 3125 - numrange_subdiff)); -DATA(insert ( 3908 1114 0 3128 - tsrange_subdiff)); -DATA(insert ( 3910 1184 0 3127 - tstzrange_subdiff)); -DATA(insert ( 3912 1082 0 3122 daterange_canonical daterange_subdiff)); -DATA(insert ( 3926 20 0 3124 int8range_canonical int8range_subdiff)); - - /* * prototypes for functions in pg_range.c */ diff --git a/src/include/catalog/pg_replication_origin.h b/src/include/catalog/pg_replication_origin.h index 24c8a8430c..82ff7a7c69 100644 --- a/src/include/catalog/pg_replication_origin.h +++ b/src/include/catalog/pg_replication_origin.h @@ -1,16 +1,17 @@ /*------------------------------------------------------------------------- * * pg_replication_origin.h - * Persistent replication origin registry + * definition of the "replication origin" system catalog + * (pg_replication_origin) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_replication_origin.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -18,6 +19,8 @@ #define PG_REPLICATION_ORIGIN_H #include "catalog/genbki.h" +#include "catalog/pg_replication_origin_d.h" + #include "access/xlogdefs.h" /* ---------------- @@ -25,9 +28,7 @@ * typedef struct FormData_pg_replication_origin * ---------------- */ -#define ReplicationOriginRelationId 6000 - -CATALOG(pg_replication_origin,6000) BKI_SHARED_RELATION BKI_WITHOUT_OIDS +CATALOG(pg_replication_origin,6000,ReplicationOriginRelationId) BKI_SHARED_RELATION BKI_WITHOUT_OIDS { /* * Locally known id that get included into WAL. @@ -54,17 +55,4 @@ CATALOG(pg_replication_origin,6000) BKI_SHARED_RELATION BKI_WITHOUT_OIDS typedef FormData_pg_replication_origin *Form_pg_replication_origin; -/* ---------------- - * compiler constants for pg_replication_origin - * ---------------- - */ -#define Natts_pg_replication_origin 2 -#define Anum_pg_replication_origin_roident 1 -#define Anum_pg_replication_origin_roname 2 - -/* ---------------- - * pg_replication_origin has no initial contents - * ---------------- - */ - #endif /* PG_REPLICATION_ORIGIN_H */ diff --git a/src/include/catalog/pg_rewrite.h b/src/include/catalog/pg_rewrite.h index 48b9333a9d..0e50b87926 100644 --- a/src/include/catalog/pg_rewrite.h +++ b/src/include/catalog/pg_rewrite.h @@ -1,21 +1,20 @@ /*------------------------------------------------------------------------- * * pg_rewrite.h - * definition of the system "rewrite-rule" relation (pg_rewrite) - * along with the relation's initial contents. + * definition of the "rewrite rule" system catalog (pg_rewrite) * * As of Postgres 7.3, the primary key for this table is * --- ie, rule names are only unique among the rules of a given table. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_rewrite.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -23,15 +22,14 @@ #define PG_REWRITE_H #include "catalog/genbki.h" +#include "catalog/pg_rewrite_d.h" /* ---------------- * pg_rewrite definition. cpp turns this into * typedef struct FormData_pg_rewrite * ---------------- */ -#define RewriteRelationId 2618 - -CATALOG(pg_rewrite,2618) +CATALOG(pg_rewrite,2618,RewriteRelationId) { NameData rulename; Oid ev_class; @@ -40,8 +38,8 @@ CATALOG(pg_rewrite,2618) bool is_instead; #ifdef CATALOG_VARLEN /* variable-length fields start here */ - pg_node_tree ev_qual; - pg_node_tree ev_action; + pg_node_tree ev_qual BKI_FORCE_NOT_NULL; + pg_node_tree ev_action BKI_FORCE_NOT_NULL; #endif } FormData_pg_rewrite; @@ -52,17 +50,4 @@ CATALOG(pg_rewrite,2618) */ typedef FormData_pg_rewrite *Form_pg_rewrite; -/* ---------------- - * compiler constants for pg_rewrite - * ---------------- - */ -#define Natts_pg_rewrite 7 -#define Anum_pg_rewrite_rulename 1 -#define Anum_pg_rewrite_ev_class 2 -#define Anum_pg_rewrite_ev_type 3 -#define Anum_pg_rewrite_ev_enabled 4 -#define Anum_pg_rewrite_is_instead 5 -#define Anum_pg_rewrite_ev_qual 6 -#define Anum_pg_rewrite_ev_action 7 - #endif /* PG_REWRITE_H */ diff --git a/src/include/catalog/pg_seclabel.h b/src/include/catalog/pg_seclabel.h index 3db9612fc3..6889369b5d 100644 --- a/src/include/catalog/pg_seclabel.h +++ b/src/include/catalog/pg_seclabel.h @@ -1,26 +1,31 @@ /* ------------------------------------------------------------------------- * * pg_seclabel.h - * definition of the system "security label" relation (pg_seclabel) + * definition of the "security label" system catalog (pg_seclabel) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * + * src/include/catalog/pg_seclabel.h + * + * NOTES + * The Catalog.pm module reads this file and derives schema + * information. + * * ------------------------------------------------------------------------- */ #ifndef PG_SECLABEL_H #define PG_SECLABEL_H #include "catalog/genbki.h" +#include "catalog/pg_seclabel_d.h" /* ---------------- * pg_seclabel definition. cpp turns this into * typedef struct FormData_pg_seclabel * ---------------- */ -#define SecLabelRelationId 3596 - -CATALOG(pg_seclabel,3596) BKI_WITHOUT_OIDS +CATALOG(pg_seclabel,3596,SecLabelRelationId) BKI_WITHOUT_OIDS { Oid objoid; /* OID of the object itself */ Oid classoid; /* OID of table containing the object */ @@ -32,15 +37,4 @@ CATALOG(pg_seclabel,3596) BKI_WITHOUT_OIDS #endif } FormData_pg_seclabel; -/* ---------------- - * compiler constants for pg_seclabel - * ---------------- - */ -#define Natts_pg_seclabel 5 -#define Anum_pg_seclabel_objoid 1 -#define Anum_pg_seclabel_classoid 2 -#define Anum_pg_seclabel_objsubid 3 -#define Anum_pg_seclabel_provider 4 -#define Anum_pg_seclabel_label 5 - #endif /* PG_SECLABEL_H */ diff --git a/src/include/catalog/pg_sequence.h b/src/include/catalog/pg_sequence.h index 8ae6b7143d..beea215c81 100644 --- a/src/include/catalog/pg_sequence.h +++ b/src/include/catalog/pg_sequence.h @@ -1,11 +1,26 @@ +/* ------------------------------------------------------------------------- + * + * pg_sequence.h + * definition of the "sequence" system catalog (pg_sequence) + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/catalog/pg_sequence.h + * + * NOTES + * The Catalog.pm module reads this file and derives schema + * information. + * + * ------------------------------------------------------------------------- + */ #ifndef PG_SEQUENCE_H #define PG_SEQUENCE_H #include "catalog/genbki.h" +#include "catalog/pg_sequence_d.h" -#define SequenceRelationId 2224 - -CATALOG(pg_sequence,2224) BKI_WITHOUT_OIDS +CATALOG(pg_sequence,2224,SequenceRelationId) BKI_WITHOUT_OIDS { Oid seqrelid; Oid seqtypid; @@ -17,16 +32,11 @@ CATALOG(pg_sequence,2224) BKI_WITHOUT_OIDS bool seqcycle; } FormData_pg_sequence; +/* ---------------- + * Form_pg_sequence corresponds to a pointer to a tuple with + * the format of pg_sequence relation. + * ---------------- + */ typedef FormData_pg_sequence *Form_pg_sequence; -#define Natts_pg_sequence 8 -#define Anum_pg_sequence_seqrelid 1 -#define Anum_pg_sequence_seqtypid 2 -#define Anum_pg_sequence_seqstart 3 -#define Anum_pg_sequence_seqincrement 4 -#define Anum_pg_sequence_seqmax 5 -#define Anum_pg_sequence_seqmin 6 -#define Anum_pg_sequence_seqcache 7 -#define Anum_pg_sequence_seqcycle 8 - #endif /* PG_SEQUENCE_H */ diff --git a/src/include/catalog/pg_shdepend.h b/src/include/catalog/pg_shdepend.h index 51b6588d3e..01e1eb760c 100644 --- a/src/include/catalog/pg_shdepend.h +++ b/src/include/catalog/pg_shdepend.h @@ -1,18 +1,25 @@ /*------------------------------------------------------------------------- * * pg_shdepend.h - * definition of the system "shared dependency" relation (pg_shdepend) - * along with the relation's initial contents. + * definition of the "shared dependency" system catalog (pg_shdepend) * + * pg_shdepend has no preloaded contents, so there is no pg_shdepend.dat + * file; system-defined dependencies are loaded into it during a late stage + * of the initdb process. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * NOTE: we do not represent all possible dependency pairs in pg_shdepend; + * for example, there's not much value in creating an explicit dependency + * from a relation to its database. Currently, only dependencies on roles + * are explicitly stored in pg_shdepend. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_shdepend.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,15 +27,14 @@ #define PG_SHDEPEND_H #include "catalog/genbki.h" +#include "catalog/pg_shdepend_d.h" /* ---------------- * pg_shdepend definition. cpp turns this into * typedef struct FormData_pg_shdepend * ---------------- */ -#define SharedDependRelationId 1214 - -CATALOG(pg_shdepend,1214) BKI_SHARED_RELATION BKI_WITHOUT_OIDS +CATALOG(pg_shdepend,1214,SharedDependRelationId) BKI_SHARED_RELATION BKI_WITHOUT_OIDS { /* * Identification of the dependent (referencing) object. @@ -63,28 +69,4 @@ CATALOG(pg_shdepend,1214) BKI_SHARED_RELATION BKI_WITHOUT_OIDS */ typedef FormData_pg_shdepend *Form_pg_shdepend; -/* ---------------- - * compiler constants for pg_shdepend - * ---------------- - */ -#define Natts_pg_shdepend 7 -#define Anum_pg_shdepend_dbid 1 -#define Anum_pg_shdepend_classid 2 -#define Anum_pg_shdepend_objid 3 -#define Anum_pg_shdepend_objsubid 4 -#define Anum_pg_shdepend_refclassid 5 -#define Anum_pg_shdepend_refobjid 6 -#define Anum_pg_shdepend_deptype 7 - - -/* - * pg_shdepend has no preloaded contents; system-defined dependencies are - * loaded into it during a late stage of the initdb process. - * - * NOTE: we do not represent all possible dependency pairs in pg_shdepend; - * for example, there's not much value in creating an explicit dependency - * from a relation to its database. Currently, only dependencies on roles - * are explicitly stored in pg_shdepend. - */ - #endif /* PG_SHDEPEND_H */ diff --git a/src/include/catalog/pg_shdescription.h b/src/include/catalog/pg_shdescription.h index 154c48b584..53cade1548 100644 --- a/src/include/catalog/pg_shdescription.h +++ b/src/include/catalog/pg_shdescription.h @@ -1,9 +1,13 @@ /*------------------------------------------------------------------------- * * pg_shdescription.h - * definition of the system "shared description" relation + * definition of the "shared description" system catalog * (pg_shdescription) * + * Because the contents of this table are taken from the *.dat files + * of other catalogs, there is no pg_shdescription.dat file. The initial + * contents are assembled by genbki.pl and loaded during initdb. + * * NOTE: an object is identified by the OID of the row that primarily * defines the object, plus the OID of the table that that row appears in. * For example, a database is identified by the OID of its pg_database row @@ -12,17 +16,14 @@ * across tables. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_shdescription.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -30,15 +31,14 @@ #define PG_SHDESCRIPTION_H #include "catalog/genbki.h" +#include "catalog/pg_shdescription_d.h" /* ---------------- * pg_shdescription definition. cpp turns this into * typedef struct FormData_pg_shdescription * ---------------- */ -#define SharedDescriptionRelationId 2396 - -CATALOG(pg_shdescription,2396) BKI_SHARED_RELATION BKI_WITHOUT_OIDS +CATALOG(pg_shdescription,2396,SharedDescriptionRelationId) BKI_SHARED_RELATION BKI_WITHOUT_OIDS { Oid objoid; /* OID of object itself */ Oid classoid; /* OID of table containing object */ @@ -55,24 +55,4 @@ CATALOG(pg_shdescription,2396) BKI_SHARED_RELATION BKI_WITHOUT_OIDS */ typedef FormData_pg_shdescription * Form_pg_shdescription; -/* ---------------- - * compiler constants for pg_shdescription - * ---------------- - */ -#define Natts_pg_shdescription 3 -#define Anum_pg_shdescription_objoid 1 -#define Anum_pg_shdescription_classoid 2 -#define Anum_pg_shdescription_description 3 - -/* ---------------- - * initial contents of pg_shdescription - * ---------------- - */ - -/* - * Because the contents of this table are taken from the other *.h files, - * there is no initialization here. The initial contents are extracted - * by genbki.pl and loaded during initdb. - */ - #endif /* PG_SHDESCRIPTION_H */ diff --git a/src/include/catalog/pg_shseclabel.h b/src/include/catalog/pg_shseclabel.h index f8a906bb12..2630f965fd 100644 --- a/src/include/catalog/pg_shseclabel.h +++ b/src/include/catalog/pg_shseclabel.h @@ -1,27 +1,31 @@ /* ------------------------------------------------------------------------- * * pg_shseclabel.h - * definition of the system "security label" relation (pg_shseclabel) + * definition of the "shared security label" system catalog (pg_shseclabel) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * + * src/include/catalog/pg_shseclabel.h + * + * NOTES + * The Catalog.pm module reads this file and derives schema + * information. + * * ------------------------------------------------------------------------- */ #ifndef PG_SHSECLABEL_H #define PG_SHSECLABEL_H #include "catalog/genbki.h" +#include "catalog/pg_shseclabel_d.h" /* ---------------- * pg_shseclabel definition. cpp turns this into * typedef struct FormData_pg_shseclabel * ---------------- */ -#define SharedSecLabelRelationId 3592 -#define SharedSecLabelRelation_Rowtype_Id 4066 - -CATALOG(pg_shseclabel,3592) BKI_SHARED_RELATION BKI_ROWTYPE_OID(4066) BKI_WITHOUT_OIDS BKI_SCHEMA_MACRO +CATALOG(pg_shseclabel,3592,SharedSecLabelRelationId) BKI_SHARED_RELATION BKI_ROWTYPE_OID(4066,SharedSecLabelRelation_Rowtype_Id) BKI_WITHOUT_OIDS BKI_SCHEMA_MACRO { Oid objoid; /* OID of the shared object itself */ Oid classoid; /* OID of table containing the shared object */ @@ -34,14 +38,4 @@ CATALOG(pg_shseclabel,3592) BKI_SHARED_RELATION BKI_ROWTYPE_OID(4066) BKI_WITHOU typedef FormData_pg_shseclabel * Form_pg_shseclabel; -/* ---------------- - * compiler constants for pg_shseclabel - * ---------------- - */ -#define Natts_pg_shseclabel 4 -#define Anum_pg_shseclabel_objoid 1 -#define Anum_pg_shseclabel_classoid 2 -#define Anum_pg_shseclabel_provider 3 -#define Anum_pg_shseclabel_label 4 - #endif /* PG_SHSECLABEL_H */ diff --git a/src/include/catalog/pg_statistic.h b/src/include/catalog/pg_statistic.h index 3713a56bbd..141e260de9 100644 --- a/src/include/catalog/pg_statistic.h +++ b/src/include/catalog/pg_statistic.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_statistic.h - * definition of the system "statistic" relation (pg_statistic) - * along with the relation's initial contents. + * definition of the "statistics" system catalog (pg_statistic) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_statistic.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,15 +19,14 @@ #define PG_STATISTIC_H #include "catalog/genbki.h" +#include "catalog/pg_statistic_d.h" /* ---------------- * pg_statistic definition. cpp turns this into * typedef struct FormData_pg_statistic * ---------------- */ -#define StatisticRelationId 2619 - -CATALOG(pg_statistic,2619) BKI_WITHOUT_OIDS +CATALOG(pg_statistic,2619,StatisticRelationId) BKI_WITHOUT_OIDS { /* These fields form the unique key for the entry: */ Oid starelid; /* relation containing attribute */ @@ -128,45 +126,13 @@ CATALOG(pg_statistic,2619) BKI_WITHOUT_OIDS */ typedef FormData_pg_statistic *Form_pg_statistic; -/* ---------------- - * compiler constants for pg_statistic - * ---------------- - */ -#define Natts_pg_statistic 26 -#define Anum_pg_statistic_starelid 1 -#define Anum_pg_statistic_staattnum 2 -#define Anum_pg_statistic_stainherit 3 -#define Anum_pg_statistic_stanullfrac 4 -#define Anum_pg_statistic_stawidth 5 -#define Anum_pg_statistic_stadistinct 6 -#define Anum_pg_statistic_stakind1 7 -#define Anum_pg_statistic_stakind2 8 -#define Anum_pg_statistic_stakind3 9 -#define Anum_pg_statistic_stakind4 10 -#define Anum_pg_statistic_stakind5 11 -#define Anum_pg_statistic_staop1 12 -#define Anum_pg_statistic_staop2 13 -#define Anum_pg_statistic_staop3 14 -#define Anum_pg_statistic_staop4 15 -#define Anum_pg_statistic_staop5 16 -#define Anum_pg_statistic_stanumbers1 17 -#define Anum_pg_statistic_stanumbers2 18 -#define Anum_pg_statistic_stanumbers3 19 -#define Anum_pg_statistic_stanumbers4 20 -#define Anum_pg_statistic_stanumbers5 21 -#define Anum_pg_statistic_stavalues1 22 -#define Anum_pg_statistic_stavalues2 23 -#define Anum_pg_statistic_stavalues3 24 -#define Anum_pg_statistic_stavalues4 25 -#define Anum_pg_statistic_stavalues5 26 +#ifdef EXPOSE_TO_CLIENT_CODE /* - * Currently, five statistical slot "kinds" are defined by core PostgreSQL, - * as documented below. Additional "kinds" will probably appear in - * future to help cope with non-scalar datatypes. Also, custom data types - * can define their own "kind" codes by mutual agreement between a custom - * typanalyze routine and the selectivity estimation functions of the type's - * operators. + * Several statistical slot "kinds" are defined by core PostgreSQL, as + * documented below. Also, custom data types can define their own "kind" + * codes by mutual agreement between a custom typanalyze routine and the + * selectivity estimation functions of the type's operators. * * Code reading the pg_statistic relation should not assume that a particular * data "kind" will appear in any particular slot. Instead, search the @@ -291,4 +257,6 @@ typedef FormData_pg_statistic *Form_pg_statistic; */ #define STATISTIC_KIND_BOUNDS_HISTOGRAM 7 +#endif /* EXPOSE_TO_CLIENT_CODE */ + #endif /* PG_STATISTIC_H */ diff --git a/src/include/catalog/pg_statistic_ext.h b/src/include/catalog/pg_statistic_ext.h index 78138026db..443798ae52 100644 --- a/src/include/catalog/pg_statistic_ext.h +++ b/src/include/catalog/pg_statistic_ext.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_statistic_ext.h - * definition of the system "extended statistic" relation (pg_statistic_ext) - * along with the relation's initial contents. + * definition of the "extended statistics" system catalog (pg_statistic_ext) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_statistic_ext.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,15 +19,14 @@ #define PG_STATISTIC_EXT_H #include "catalog/genbki.h" +#include "catalog/pg_statistic_ext_d.h" /* ---------------- * pg_statistic_ext definition. cpp turns this into * typedef struct FormData_pg_statistic_ext * ---------------- */ -#define StatisticExtRelationId 3381 - -CATALOG(pg_statistic_ext,3381) +CATALOG(pg_statistic_ext,3381,StatisticExtRelationId) { Oid stxrelid; /* relation containing attributes */ @@ -45,7 +43,7 @@ CATALOG(pg_statistic_ext,3381) int2vector stxkeys; /* array of column keys */ #ifdef CATALOG_VARLEN - char stxkind[1] BKI_FORCE_NOT_NULL; /* statistic types requested + char stxkind[1] BKI_FORCE_NOT_NULL; /* statistics kinds requested * to build */ pg_ndistinct stxndistinct; /* ndistinct coefficients (serialized) */ pg_dependencies stxdependencies; /* dependencies (serialized) */ @@ -60,21 +58,11 @@ CATALOG(pg_statistic_ext,3381) */ typedef FormData_pg_statistic_ext *Form_pg_statistic_ext; -/* ---------------- - * compiler constants for pg_statistic_ext - * ---------------- - */ -#define Natts_pg_statistic_ext 8 -#define Anum_pg_statistic_ext_stxrelid 1 -#define Anum_pg_statistic_ext_stxname 2 -#define Anum_pg_statistic_ext_stxnamespace 3 -#define Anum_pg_statistic_ext_stxowner 4 -#define Anum_pg_statistic_ext_stxkeys 5 -#define Anum_pg_statistic_ext_stxkind 6 -#define Anum_pg_statistic_ext_stxndistinct 7 -#define Anum_pg_statistic_ext_stxdependencies 8 +#ifdef EXPOSE_TO_CLIENT_CODE #define STATS_EXT_NDISTINCT 'd' #define STATS_EXT_DEPENDENCIES 'f' +#endif /* EXPOSE_TO_CLIENT_CODE */ + #endif /* PG_STATISTIC_EXT_H */ diff --git a/src/include/catalog/pg_subscription.h b/src/include/catalog/pg_subscription.h index 274ff6bc42..e4dc771cf5 100644 --- a/src/include/catalog/pg_subscription.h +++ b/src/include/catalog/pg_subscription.h @@ -1,17 +1,25 @@ /* ------------------------------------------------------------------------- * * pg_subscription.h - * Definition of the subscription catalog (pg_subscription). + * definition of the "subscription" system catalog (pg_subscription) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * + * src/include/catalog/pg_subscription.h + * + * NOTES + * The Catalog.pm module reads this file and derives schema + * information. + * * ------------------------------------------------------------------------- */ #ifndef PG_SUBSCRIPTION_H #define PG_SUBSCRIPTION_H #include "catalog/genbki.h" +#include "catalog/pg_subscription_d.h" + #include "nodes/pg_list.h" /* ---------------- @@ -19,8 +27,6 @@ * typedef struct FormData_pg_subscription * ---------------- */ -#define SubscriptionRelationId 6100 -#define SubscriptionRelation_Rowtype_Id 6101 /* * Technically, the subscriptions live inside the database, so a shared catalog @@ -30,7 +36,7 @@ * * NOTE: When adding a column, also update system_views.sql. */ -CATALOG(pg_subscription,6100) BKI_SHARED_RELATION BKI_ROWTYPE_OID(6101) BKI_SCHEMA_MACRO +CATALOG(pg_subscription,6100,SubscriptionRelationId) BKI_SHARED_RELATION BKI_ROWTYPE_OID(6101,SubscriptionRelation_Rowtype_Id) BKI_SCHEMA_MACRO { Oid subdbid; /* Database the subscription is in. */ NameData subname; /* Name of the subscription */ @@ -57,21 +63,6 @@ CATALOG(pg_subscription,6100) BKI_SHARED_RELATION BKI_ROWTYPE_OID(6101) BKI_SCHE typedef FormData_pg_subscription *Form_pg_subscription; -/* ---------------- - * compiler constants for pg_subscription - * ---------------- - */ -#define Natts_pg_subscription 8 -#define Anum_pg_subscription_subdbid 1 -#define Anum_pg_subscription_subname 2 -#define Anum_pg_subscription_subowner 3 -#define Anum_pg_subscription_subenabled 4 -#define Anum_pg_subscription_subconninfo 5 -#define Anum_pg_subscription_subslotname 6 -#define Anum_pg_subscription_subsynccommit 7 -#define Anum_pg_subscription_subpublications 8 - - typedef struct Subscription { Oid oid; /* Oid of the subscription */ @@ -89,7 +80,7 @@ typedef struct Subscription extern Subscription *GetSubscription(Oid subid, bool missing_ok); extern void FreeSubscription(Subscription *sub); extern Oid get_subscription_oid(const char *subname, bool missing_ok); -extern char *get_subscription_name(Oid subid); +extern char *get_subscription_name(Oid subid, bool missing_ok); extern int CountDBSubscriptions(Oid dbid); diff --git a/src/include/catalog/pg_subscription_rel.h b/src/include/catalog/pg_subscription_rel.h index 991ca9d552..556cb94841 100644 --- a/src/include/catalog/pg_subscription_rel.h +++ b/src/include/catalog/pg_subscription_rel.h @@ -1,19 +1,27 @@ /* ------------------------------------------------------------------------- * * pg_subscription_rel.h - * Local info about tables that come from the publisher of a - * subscription (pg_subscription_rel). + * definition of the system catalog containing the state for each + * replicated table in each subscription (pg_subscription_rel) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * + * src/include/catalog/pg_subscription_rel.h + * + * NOTES + * The Catalog.pm module reads this file and derives schema + * information. + * * ------------------------------------------------------------------------- */ #ifndef PG_SUBSCRIPTION_REL_H #define PG_SUBSCRIPTION_REL_H -#include "access/xlogdefs.h" #include "catalog/genbki.h" +#include "catalog/pg_subscription_rel_d.h" + +#include "access/xlogdefs.h" #include "nodes/pg_list.h" /* ---------------- @@ -21,31 +29,18 @@ * typedef struct FormData_pg_subscription_rel * ---------------- */ -#define SubscriptionRelRelationId 6102 - -/* Workaround for genbki not knowing about XLogRecPtr */ -#define pg_lsn XLogRecPtr - -CATALOG(pg_subscription_rel,6102) BKI_WITHOUT_OIDS +CATALOG(pg_subscription_rel,6102,SubscriptionRelRelationId) BKI_WITHOUT_OIDS { Oid srsubid; /* Oid of subscription */ Oid srrelid; /* Oid of relation */ char srsubstate; /* state of the relation in subscription */ - pg_lsn srsublsn; /* remote lsn of the state change used for + XLogRecPtr srsublsn; /* remote lsn of the state change used for * synchronization coordination */ } FormData_pg_subscription_rel; typedef FormData_pg_subscription_rel *Form_pg_subscription_rel; -/* ---------------- - * compiler constants for pg_subscription_rel - * ---------------- - */ -#define Natts_pg_subscription_rel 4 -#define Anum_pg_subscription_rel_srsubid 1 -#define Anum_pg_subscription_rel_srrelid 2 -#define Anum_pg_subscription_rel_srsubstate 3 -#define Anum_pg_subscription_rel_srsublsn 4 +#ifdef EXPOSE_TO_CLIENT_CODE /* ---------------- * substate constants @@ -63,6 +58,8 @@ typedef FormData_pg_subscription_rel *Form_pg_subscription_rel; #define SUBREL_STATE_SYNCWAIT 'w' /* waiting for sync */ #define SUBREL_STATE_CATCHUP 'c' /* catching up with apply */ +#endif /* EXPOSE_TO_CLIENT_CODE */ + typedef struct SubscriptionRelState { Oid relid; @@ -70,8 +67,10 @@ typedef struct SubscriptionRelState char state; } SubscriptionRelState; -extern Oid SetSubscriptionRelState(Oid subid, Oid relid, char state, - XLogRecPtr sublsn, bool update_only); +extern Oid AddSubscriptionRelState(Oid subid, Oid relid, char state, + XLogRecPtr sublsn); +extern Oid UpdateSubscriptionRelState(Oid subid, Oid relid, char state, + XLogRecPtr sublsn); extern char GetSubscriptionRelState(Oid subid, Oid relid, XLogRecPtr *sublsn, bool missing_ok); extern void RemoveSubscriptionRel(Oid subid, Oid relid); diff --git a/src/include/catalog/pg_tablespace.dat b/src/include/catalog/pg_tablespace.dat new file mode 100644 index 0000000000..dc942f4be0 --- /dev/null +++ b/src/include/catalog/pg_tablespace.dat @@ -0,0 +1,22 @@ +#---------------------------------------------------------------------- +# +# pg_tablespace.dat +# Initial contents of the pg_tablespace system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_tablespace.dat +# +#---------------------------------------------------------------------- + +[ + +{ oid => '1663', oid_symbol => 'DEFAULTTABLESPACE_OID', + spcname => 'pg_default', spcowner => 'PGUID', spcacl => '_null_', + spcoptions => '_null_' }, +{ oid => '1664', oid_symbol => 'GLOBALTABLESPACE_OID', + spcname => 'pg_global', spcowner => 'PGUID', spcacl => '_null_', + spcoptions => '_null_' }, + +] diff --git a/src/include/catalog/pg_tablespace.h b/src/include/catalog/pg_tablespace.h index b759d5cea4..4010f1b5a7 100644 --- a/src/include/catalog/pg_tablespace.h +++ b/src/include/catalog/pg_tablespace.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_tablespace.h - * definition of the system "tablespace" relation (pg_tablespace) - * along with the relation's initial contents. + * definition of the "tablespace" system catalog (pg_tablespace) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_tablespace.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,15 +19,14 @@ #define PG_TABLESPACE_H #include "catalog/genbki.h" +#include "catalog/pg_tablespace_d.h" /* ---------------- * pg_tablespace definition. cpp turns this into * typedef struct FormData_pg_tablespace * ---------------- */ -#define TableSpaceRelationId 1213 - -CATALOG(pg_tablespace,1213) BKI_SHARED_RELATION +CATALOG(pg_tablespace,1213,TableSpaceRelationId) BKI_SHARED_RELATION { NameData spcname; /* tablespace name */ Oid spcowner; /* owner of tablespace */ @@ -46,21 +44,4 @@ CATALOG(pg_tablespace,1213) BKI_SHARED_RELATION */ typedef FormData_pg_tablespace *Form_pg_tablespace; -/* ---------------- - * compiler constants for pg_tablespace - * ---------------- - */ - -#define Natts_pg_tablespace 4 -#define Anum_pg_tablespace_spcname 1 -#define Anum_pg_tablespace_spcowner 2 -#define Anum_pg_tablespace_spcacl 3 -#define Anum_pg_tablespace_spcoptions 4 - -DATA(insert OID = 1663 ( pg_default PGUID _null_ _null_ )); -DATA(insert OID = 1664 ( pg_global PGUID _null_ _null_ )); - -#define DEFAULTTABLESPACE_OID 1663 -#define GLOBALTABLESPACE_OID 1664 - #endif /* PG_TABLESPACE_H */ diff --git a/src/include/catalog/pg_transform.h b/src/include/catalog/pg_transform.h index 8b1610bb83..f46ff0a90e 100644 --- a/src/include/catalog/pg_transform.h +++ b/src/include/catalog/pg_transform.h @@ -1,14 +1,17 @@ /*------------------------------------------------------------------------- * * pg_transform.h + * definition of the "transform" system catalog (pg_transform) * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_transform.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -16,15 +19,14 @@ #define PG_TRANSFORM_H #include "catalog/genbki.h" +#include "catalog/pg_transform_d.h" /* ---------------- * pg_transform definition. cpp turns this into * typedef struct FormData_pg_transform * ---------------- */ -#define TransformRelationId 3576 - -CATALOG(pg_transform,3576) +CATALOG(pg_transform,3576,TransformRelationId) { Oid trftype; Oid trflang; @@ -32,16 +34,11 @@ CATALOG(pg_transform,3576) regproc trftosql; } FormData_pg_transform; -typedef FormData_pg_transform *Form_pg_transform; - /* ---------------- - * compiler constants for pg_transform + * Form_pg_transform corresponds to a pointer to a tuple with + * the format of pg_transform relation. * ---------------- */ -#define Natts_pg_transform 4 -#define Anum_pg_transform_trftype 1 -#define Anum_pg_transform_trflang 2 -#define Anum_pg_transform_trffromsql 3 -#define Anum_pg_transform_trftosql 4 +typedef FormData_pg_transform *Form_pg_transform; #endif /* PG_TRANSFORM_H */ diff --git a/src/include/catalog/pg_trigger.h b/src/include/catalog/pg_trigger.h index f413caf34f..951d7d86e7 100644 --- a/src/include/catalog/pg_trigger.h +++ b/src/include/catalog/pg_trigger.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_trigger.h - * definition of the system "trigger" relation (pg_trigger) - * along with the relation's initial contents. + * definition of the "trigger" system catalog (pg_trigger) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_trigger.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,6 +19,7 @@ #define PG_TRIGGER_H #include "catalog/genbki.h" +#include "catalog/pg_trigger_d.h" /* ---------------- * pg_trigger definition. cpp turns this into @@ -31,9 +31,7 @@ * to be associated with a deferrable constraint. * ---------------- */ -#define TriggerRelationId 2620 - -CATALOG(pg_trigger,2620) +CATALOG(pg_trigger,2620,TriggerRelationId) { Oid tgrelid; /* relation trigger is attached to */ NameData tgname; /* trigger's name */ @@ -71,28 +69,7 @@ CATALOG(pg_trigger,2620) */ typedef FormData_pg_trigger *Form_pg_trigger; -/* ---------------- - * compiler constants for pg_trigger - * ---------------- - */ -#define Natts_pg_trigger 17 -#define Anum_pg_trigger_tgrelid 1 -#define Anum_pg_trigger_tgname 2 -#define Anum_pg_trigger_tgfoid 3 -#define Anum_pg_trigger_tgtype 4 -#define Anum_pg_trigger_tgenabled 5 -#define Anum_pg_trigger_tgisinternal 6 -#define Anum_pg_trigger_tgconstrrelid 7 -#define Anum_pg_trigger_tgconstrindid 8 -#define Anum_pg_trigger_tgconstraint 9 -#define Anum_pg_trigger_tgdeferrable 10 -#define Anum_pg_trigger_tginitdeferred 11 -#define Anum_pg_trigger_tgnargs 12 -#define Anum_pg_trigger_tgattr 13 -#define Anum_pg_trigger_tgargs 14 -#define Anum_pg_trigger_tgqual 15 -#define Anum_pg_trigger_tgoldtable 16 -#define Anum_pg_trigger_tgnewtable 17 +#ifdef EXPOSE_TO_CLIENT_CODE /* Bits within tgtype */ #define TRIGGER_TYPE_ROW (1 << 0) @@ -153,4 +130,6 @@ typedef FormData_pg_trigger *Form_pg_trigger; #define TRIGGER_USES_TRANSITION_TABLE(namepointer) \ ((namepointer) != (char *) NULL) +#endif /* EXPOSE_TO_CLIENT_CODE */ + #endif /* PG_TRIGGER_H */ diff --git a/src/include/catalog/pg_ts_config.dat b/src/include/catalog/pg_ts_config.dat new file mode 100644 index 0000000000..142aa35ab6 --- /dev/null +++ b/src/include/catalog/pg_ts_config.dat @@ -0,0 +1,19 @@ +#---------------------------------------------------------------------- +# +# pg_ts_config.dat +# Initial contents of the pg_ts_config system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_ts_config.dat +# +#---------------------------------------------------------------------- + +[ + +{ oid => '3748', descr => 'simple configuration', + cfgname => 'simple', cfgnamespace => 'PGNSP', cfgowner => 'PGUID', + cfgparser => '3722' }, + +] diff --git a/src/include/catalog/pg_ts_config.h b/src/include/catalog/pg_ts_config.h index 0ba79a596f..ce93c2b7ff 100644 --- a/src/include/catalog/pg_ts_config.h +++ b/src/include/catalog/pg_ts_config.h @@ -1,20 +1,18 @@ /*------------------------------------------------------------------------- * * pg_ts_config.h - * definition of configuration of tsearch + * definition of the "text search configuration" system catalog + * (pg_ts_config) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_ts_config.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -22,15 +20,14 @@ #define PG_TS_CONFIG_H #include "catalog/genbki.h" +#include "catalog/pg_ts_config_d.h" /* ---------------- * pg_ts_config definition. cpp turns this into * typedef struct FormData_pg_ts_config * ---------------- */ -#define TSConfigRelationId 3602 - -CATALOG(pg_ts_config,3602) +CATALOG(pg_ts_config,3602,TSConfigRelationId) { NameData cfgname; /* name of configuration */ Oid cfgnamespace; /* name space */ @@ -40,21 +37,4 @@ CATALOG(pg_ts_config,3602) typedef FormData_pg_ts_config *Form_pg_ts_config; -/* ---------------- - * compiler constants for pg_ts_config - * ---------------- - */ -#define Natts_pg_ts_config 4 -#define Anum_pg_ts_config_cfgname 1 -#define Anum_pg_ts_config_cfgnamespace 2 -#define Anum_pg_ts_config_cfgowner 3 -#define Anum_pg_ts_config_cfgparser 4 - -/* ---------------- - * initial contents of pg_ts_config - * ---------------- - */ -DATA(insert OID = 3748 ( "simple" PGNSP PGUID 3722 )); -DESCR("simple configuration"); - #endif /* PG_TS_CONFIG_H */ diff --git a/src/include/catalog/pg_ts_config_map.dat b/src/include/catalog/pg_ts_config_map.dat new file mode 100644 index 0000000000..097a9f5e6d --- /dev/null +++ b/src/include/catalog/pg_ts_config_map.dat @@ -0,0 +1,35 @@ +#---------------------------------------------------------------------- +# +# pg_ts_config_map.dat +# Initial contents of the pg_ts_config_map system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_ts_config_map.dat +# +#---------------------------------------------------------------------- + +[ + +{ mapcfg => '3748', maptokentype => '1', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '2', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '3', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '4', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '5', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '6', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '7', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '8', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '9', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '10', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '11', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '15', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '16', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '17', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '18', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '19', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '20', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '21', mapseqno => '1', mapdict => '3765' }, +{ mapcfg => '3748', maptokentype => '22', mapseqno => '1', mapdict => '3765' }, + +] diff --git a/src/include/catalog/pg_ts_config_map.h b/src/include/catalog/pg_ts_config_map.h index 3df05195be..5856323373 100644 --- a/src/include/catalog/pg_ts_config_map.h +++ b/src/include/catalog/pg_ts_config_map.h @@ -1,20 +1,18 @@ /*------------------------------------------------------------------------- * * pg_ts_config_map.h - * definition of token mappings for configurations of tsearch + * definition of the system catalog for text search token mappings + * (pg_ts_config_map) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_ts_config_map.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -22,15 +20,14 @@ #define PG_TS_CONFIG_MAP_H #include "catalog/genbki.h" +#include "catalog/pg_ts_config_map_d.h" /* ---------------- * pg_ts_config_map definition. cpp turns this into * typedef struct FormData_pg_ts_config_map * ---------------- */ -#define TSConfigMapRelationId 3603 - -CATALOG(pg_ts_config_map,3603) BKI_WITHOUT_OIDS +CATALOG(pg_ts_config_map,3603,TSConfigMapRelationId) BKI_WITHOUT_OIDS { Oid mapcfg; /* OID of configuration owning this entry */ int32 maptokentype; /* token type from parser */ @@ -40,39 +37,4 @@ CATALOG(pg_ts_config_map,3603) BKI_WITHOUT_OIDS typedef FormData_pg_ts_config_map *Form_pg_ts_config_map; -/* ---------------- - * compiler constants for pg_ts_config_map - * ---------------- - */ -#define Natts_pg_ts_config_map 4 -#define Anum_pg_ts_config_map_mapcfg 1 -#define Anum_pg_ts_config_map_maptokentype 2 -#define Anum_pg_ts_config_map_mapseqno 3 -#define Anum_pg_ts_config_map_mapdict 4 - -/* ---------------- - * initial contents of pg_ts_config_map - * ---------------- - */ - -DATA(insert ( 3748 1 1 3765 )); -DATA(insert ( 3748 2 1 3765 )); -DATA(insert ( 3748 3 1 3765 )); -DATA(insert ( 3748 4 1 3765 )); -DATA(insert ( 3748 5 1 3765 )); -DATA(insert ( 3748 6 1 3765 )); -DATA(insert ( 3748 7 1 3765 )); -DATA(insert ( 3748 8 1 3765 )); -DATA(insert ( 3748 9 1 3765 )); -DATA(insert ( 3748 10 1 3765 )); -DATA(insert ( 3748 11 1 3765 )); -DATA(insert ( 3748 15 1 3765 )); -DATA(insert ( 3748 16 1 3765 )); -DATA(insert ( 3748 17 1 3765 )); -DATA(insert ( 3748 18 1 3765 )); -DATA(insert ( 3748 19 1 3765 )); -DATA(insert ( 3748 20 1 3765 )); -DATA(insert ( 3748 21 1 3765 )); -DATA(insert ( 3748 22 1 3765 )); - #endif /* PG_TS_CONFIG_MAP_H */ diff --git a/src/include/catalog/pg_ts_dict.dat b/src/include/catalog/pg_ts_dict.dat new file mode 100644 index 0000000000..6db8284bb7 --- /dev/null +++ b/src/include/catalog/pg_ts_dict.dat @@ -0,0 +1,20 @@ +#---------------------------------------------------------------------- +# +# pg_ts_dict.dat +# Initial contents of the pg_ts_dict system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_ts_dict.dat +# +#---------------------------------------------------------------------- + +[ + +{ oid => '3765', + descr => 'simple dictionary: just lower case and check for stopword', + dictname => 'simple', dictnamespace => 'PGNSP', dictowner => 'PGUID', + dicttemplate => '3727', dictinitoption => '_null_' }, + +] diff --git a/src/include/catalog/pg_ts_dict.h b/src/include/catalog/pg_ts_dict.h index 634ea703e3..cd5f150fe8 100644 --- a/src/include/catalog/pg_ts_dict.h +++ b/src/include/catalog/pg_ts_dict.h @@ -1,20 +1,17 @@ /*------------------------------------------------------------------------- * * pg_ts_dict.h - * definition of dictionaries for tsearch + * definition of the "text search dictionary" system catalog (pg_ts_dict) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_ts_dict.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -22,15 +19,14 @@ #define PG_TS_DICT_H #include "catalog/genbki.h" +#include "catalog/pg_ts_dict_d.h" /* ---------------- * pg_ts_dict definition. cpp turns this into * typedef struct FormData_pg_ts_dict * ---------------- */ -#define TSDictionaryRelationId 3600 - -CATALOG(pg_ts_dict,3600) +CATALOG(pg_ts_dict,3600,TSDictionaryRelationId) { NameData dictname; /* dictionary name */ Oid dictnamespace; /* name space */ @@ -44,23 +40,4 @@ CATALOG(pg_ts_dict,3600) typedef FormData_pg_ts_dict *Form_pg_ts_dict; -/* ---------------- - * compiler constants for pg_ts_dict - * ---------------- - */ -#define Natts_pg_ts_dict 5 -#define Anum_pg_ts_dict_dictname 1 -#define Anum_pg_ts_dict_dictnamespace 2 -#define Anum_pg_ts_dict_dictowner 3 -#define Anum_pg_ts_dict_dicttemplate 4 -#define Anum_pg_ts_dict_dictinitoption 5 - -/* ---------------- - * initial contents of pg_ts_dict - * ---------------- - */ - -DATA(insert OID = 3765 ( "simple" PGNSP PGUID 3727 _null_)); -DESCR("simple dictionary: just lower case and check for stopword"); - #endif /* PG_TS_DICT_H */ diff --git a/src/include/catalog/pg_ts_parser.dat b/src/include/catalog/pg_ts_parser.dat new file mode 100644 index 0000000000..68a639b50d --- /dev/null +++ b/src/include/catalog/pg_ts_parser.dat @@ -0,0 +1,20 @@ +#---------------------------------------------------------------------- +# +# pg_ts_parser.dat +# Initial contents of the pg_ts_parser system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_ts_parser.dat +# +#---------------------------------------------------------------------- + +[ + +{ oid => '3722', descr => 'default word parser', + prsname => 'default', prsstart => 'prsd_start', prstoken => 'prsd_nexttoken', + prsend => 'prsd_end', prsheadline => 'prsd_headline', + prslextype => 'prsd_lextype' }, + +] diff --git a/src/include/catalog/pg_ts_parser.h b/src/include/catalog/pg_ts_parser.h index 96e09bdcd8..34585eac00 100644 --- a/src/include/catalog/pg_ts_parser.h +++ b/src/include/catalog/pg_ts_parser.h @@ -1,20 +1,17 @@ /*------------------------------------------------------------------------- * * pg_ts_parser.h - * definition of parsers for tsearch + * definition of the "text search parser" system catalog (pg_ts_parser) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_ts_parser.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -22,46 +19,37 @@ #define PG_TS_PARSER_H #include "catalog/genbki.h" +#include "catalog/pg_ts_parser_d.h" /* ---------------- * pg_ts_parser definition. cpp turns this into * typedef struct FormData_pg_ts_parser * ---------------- */ -#define TSParserRelationId 3601 - -CATALOG(pg_ts_parser,3601) +CATALOG(pg_ts_parser,3601,TSParserRelationId) { - NameData prsname; /* parser's name */ - Oid prsnamespace; /* name space */ - regproc prsstart; /* init parsing session */ - regproc prstoken; /* return next token */ - regproc prsend; /* finalize parsing session */ - regproc prsheadline; /* return data for headline creation */ - regproc prslextype; /* return descriptions of lexeme's types */ -} FormData_pg_ts_parser; + /* parser's name */ + NameData prsname; -typedef FormData_pg_ts_parser *Form_pg_ts_parser; + /* name space */ + Oid prsnamespace BKI_DEFAULT(PGNSP); -/* ---------------- - * compiler constants for pg_ts_parser - * ---------------- - */ -#define Natts_pg_ts_parser 7 -#define Anum_pg_ts_parser_prsname 1 -#define Anum_pg_ts_parser_prsnamespace 2 -#define Anum_pg_ts_parser_prsstart 3 -#define Anum_pg_ts_parser_prstoken 4 -#define Anum_pg_ts_parser_prsend 5 -#define Anum_pg_ts_parser_prsheadline 6 -#define Anum_pg_ts_parser_prslextype 7 + /* init parsing session */ + regproc prsstart BKI_LOOKUP(pg_proc); -/* ---------------- - * initial contents of pg_ts_parser - * ---------------- - */ + /* return next token */ + regproc prstoken BKI_LOOKUP(pg_proc); + + /* finalize parsing session */ + regproc prsend BKI_LOOKUP(pg_proc); + + /* return data for headline creation */ + regproc prsheadline BKI_LOOKUP(pg_proc); -DATA(insert OID = 3722 ( "default" PGNSP prsd_start prsd_nexttoken prsd_end prsd_headline prsd_lextype )); -DESCR("default word parser"); + /* return descriptions of lexeme's types */ + regproc prslextype BKI_LOOKUP(pg_proc); +} FormData_pg_ts_parser; + +typedef FormData_pg_ts_parser *Form_pg_ts_parser; #endif /* PG_TS_PARSER_H */ diff --git a/src/include/catalog/pg_ts_template.dat b/src/include/catalog/pg_ts_template.dat new file mode 100644 index 0000000000..8d750ecac0 --- /dev/null +++ b/src/include/catalog/pg_ts_template.dat @@ -0,0 +1,30 @@ +#---------------------------------------------------------------------- +# +# pg_ts_template.dat +# Initial contents of the pg_ts_template system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_ts_template.dat +# +#---------------------------------------------------------------------- + +[ + +{ oid => '3727', + descr => 'simple dictionary: just lower case and check for stopword', + tmplname => 'simple', tmplinit => 'dsimple_init', + tmpllexize => 'dsimple_lexize' }, +{ oid => '3730', descr => 'synonym dictionary: replace word by its synonym', + tmplname => 'synonym', tmplinit => 'dsynonym_init', + tmpllexize => 'dsynonym_lexize' }, +{ oid => '3733', descr => 'ispell dictionary', + tmplname => 'ispell', tmplinit => 'dispell_init', + tmpllexize => 'dispell_lexize' }, +{ oid => '3742', + descr => 'thesaurus dictionary: phrase by phrase substitution', + tmplname => 'thesaurus', tmplinit => 'thesaurus_init', + tmpllexize => 'thesaurus_lexize' }, + +] diff --git a/src/include/catalog/pg_ts_template.h b/src/include/catalog/pg_ts_template.h index dc0148c68b..50be08eca5 100644 --- a/src/include/catalog/pg_ts_template.h +++ b/src/include/catalog/pg_ts_template.h @@ -1,20 +1,17 @@ /*------------------------------------------------------------------------- * * pg_ts_template.h - * definition of dictionary templates for tsearch + * definition of the "text search template" system catalog (pg_ts_template) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_ts_template.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. - * - * XXX do NOT break up DATA() statements into multiple lines! - * the scripts are not as smart as you might think... + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -22,46 +19,28 @@ #define PG_TS_TEMPLATE_H #include "catalog/genbki.h" +#include "catalog/pg_ts_template_d.h" /* ---------------- * pg_ts_template definition. cpp turns this into * typedef struct FormData_pg_ts_template * ---------------- */ -#define TSTemplateRelationId 3764 - -CATALOG(pg_ts_template,3764) +CATALOG(pg_ts_template,3764,TSTemplateRelationId) { - NameData tmplname; /* template name */ - Oid tmplnamespace; /* name space */ - regproc tmplinit; /* initialization method of dict (may be 0) */ - regproc tmpllexize; /* base method of dictionary */ -} FormData_pg_ts_template; + /* template name */ + NameData tmplname; -typedef FormData_pg_ts_template *Form_pg_ts_template; + /* name space */ + Oid tmplnamespace BKI_DEFAULT(PGNSP); -/* ---------------- - * compiler constants for pg_ts_template - * ---------------- - */ -#define Natts_pg_ts_template 4 -#define Anum_pg_ts_template_tmplname 1 -#define Anum_pg_ts_template_tmplnamespace 2 -#define Anum_pg_ts_template_tmplinit 3 -#define Anum_pg_ts_template_tmpllexize 4 + /* initialization method of dict (may be 0) */ + regproc tmplinit BKI_LOOKUP(pg_proc); -/* ---------------- - * initial contents of pg_ts_template - * ---------------- - */ + /* base method of dictionary */ + regproc tmpllexize BKI_LOOKUP(pg_proc); +} FormData_pg_ts_template; -DATA(insert OID = 3727 ( "simple" PGNSP dsimple_init dsimple_lexize )); -DESCR("simple dictionary: just lower case and check for stopword"); -DATA(insert OID = 3730 ( "synonym" PGNSP dsynonym_init dsynonym_lexize )); -DESCR("synonym dictionary: replace word by its synonym"); -DATA(insert OID = 3733 ( "ispell" PGNSP dispell_init dispell_lexize )); -DESCR("ispell dictionary"); -DATA(insert OID = 3742 ( "thesaurus" PGNSP thesaurus_init thesaurus_lexize )); -DESCR("thesaurus dictionary: phrase by phrase substitution"); +typedef FormData_pg_ts_template *Form_pg_ts_template; #endif /* PG_TS_TEMPLATE_H */ diff --git a/src/include/catalog/pg_type.dat b/src/include/catalog/pg_type.dat new file mode 100644 index 0000000000..d295eae1b9 --- /dev/null +++ b/src/include/catalog/pg_type.dat @@ -0,0 +1,589 @@ +#---------------------------------------------------------------------- +# +# pg_type.dat +# Initial contents of the pg_type system catalog. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/pg_type.dat +# +#---------------------------------------------------------------------- + +[ + +# For types used in the system catalogs, make sure the values here match +# TypInfo[] in bootstrap.c. + +# OID symbol macro names for pg_type OIDs are generated by genbki.pl +# according to the following rule, so you don't need to specify them +# here: +# foo_bar -> FOO_BAROID +# _foo_bar -> FOO_BARARRAYOID +# +# The only oid_symbol entries in this file are for names that don't match +# this rule, and are grandfathered in. + +# To autogenerate an array type, add 'array_type_oid => 'nnnn' to the element +# type, which will instruct genbki.pl to generate a BKI entry for it. +# In a few cases, the array type's properties don't match the normal pattern +# so it can't be autogenerated; in such cases do not write array_type_oid. + +# Once upon a time these entries were ordered by OID. Lately it's often +# been the custom to insert new entries adjacent to related older entries. +# Try to do one or the other though, don't just insert entries at random. + +# OIDS 1 - 99 + +{ oid => '16', array_type_oid => '1000', + descr => 'boolean, \'true\'/\'false\'', + typname => 'bool', typlen => '1', typbyval => 't', typcategory => 'B', + typispreferred => 't', typinput => 'boolin', typoutput => 'boolout', + typreceive => 'boolrecv', typsend => 'boolsend', typalign => 'c' }, +{ oid => '17', array_type_oid => '1001', + descr => 'variable-length string, binary values escaped', + typname => 'bytea', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'byteain', typoutput => 'byteaout', typreceive => 'bytearecv', + typsend => 'byteasend', typalign => 'i', typstorage => 'x' }, +{ oid => '18', array_type_oid => '1002', descr => 'single character', + typname => 'char', typlen => '1', typbyval => 't', typcategory => 'S', + typinput => 'charin', typoutput => 'charout', typreceive => 'charrecv', + typsend => 'charsend', typalign => 'c' }, +{ oid => '19', array_type_oid => '1003', + descr => '63-byte type for storing system identifiers', + typname => 'name', typlen => 'NAMEDATALEN', typbyval => 'f', + typcategory => 'S', typelem => 'char', typinput => 'namein', + typoutput => 'nameout', typreceive => 'namerecv', typsend => 'namesend', + typalign => 'c' }, +{ oid => '20', array_type_oid => '1016', + descr => '~18 digit integer, 8-byte storage', + typname => 'int8', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'N', typinput => 'int8in', typoutput => 'int8out', + typreceive => 'int8recv', typsend => 'int8send', typalign => 'd' }, +{ oid => '21', array_type_oid => '1005', + descr => '-32 thousand to 32 thousand, 2-byte storage', + typname => 'int2', typlen => '2', typbyval => 't', typcategory => 'N', + typinput => 'int2in', typoutput => 'int2out', typreceive => 'int2recv', + typsend => 'int2send', typalign => 's' }, +{ oid => '22', array_type_oid => '1006', + descr => 'array of int2, used in system tables', + typname => 'int2vector', typlen => '-1', typbyval => 'f', typcategory => 'A', + typelem => 'int2', typinput => 'int2vectorin', typoutput => 'int2vectorout', + typreceive => 'int2vectorrecv', typsend => 'int2vectorsend', + typalign => 'i' }, +{ oid => '23', array_type_oid => '1007', + descr => '-2 billion to 2 billion integer, 4-byte storage', + typname => 'int4', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'int4in', typoutput => 'int4out', typreceive => 'int4recv', + typsend => 'int4send', typalign => 'i' }, +{ oid => '24', array_type_oid => '1008', descr => 'registered procedure', + typname => 'regproc', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regprocin', typoutput => 'regprocout', + typreceive => 'regprocrecv', typsend => 'regprocsend', typalign => 'i' }, +{ oid => '25', array_type_oid => '1009', + descr => 'variable-length string, no limit specified', + typname => 'text', typlen => '-1', typbyval => 'f', typcategory => 'S', + typispreferred => 't', typinput => 'textin', typoutput => 'textout', + typreceive => 'textrecv', typsend => 'textsend', typalign => 'i', + typstorage => 'x', typcollation => '100' }, +{ oid => '26', array_type_oid => '1028', + descr => 'object identifier(oid), maximum 4 billion', + typname => 'oid', typlen => '4', typbyval => 't', typcategory => 'N', + typispreferred => 't', typinput => 'oidin', typoutput => 'oidout', + typreceive => 'oidrecv', typsend => 'oidsend', typalign => 'i' }, +{ oid => '27', array_type_oid => '1010', + descr => '(block, offset), physical location of tuple', + typname => 'tid', typlen => '6', typbyval => 'f', typcategory => 'U', + typinput => 'tidin', typoutput => 'tidout', typreceive => 'tidrecv', + typsend => 'tidsend', typalign => 's' }, +{ oid => '28', array_type_oid => '1011', descr => 'transaction id', + typname => 'xid', typlen => '4', typbyval => 't', typcategory => 'U', + typinput => 'xidin', typoutput => 'xidout', typreceive => 'xidrecv', + typsend => 'xidsend', typalign => 'i' }, +{ oid => '29', array_type_oid => '1012', + descr => 'command identifier type, sequence in transaction id', + typname => 'cid', typlen => '4', typbyval => 't', typcategory => 'U', + typinput => 'cidin', typoutput => 'cidout', typreceive => 'cidrecv', + typsend => 'cidsend', typalign => 'i' }, +{ oid => '30', array_type_oid => '1013', + descr => 'array of oids, used in system tables', + typname => 'oidvector', typlen => '-1', typbyval => 'f', typcategory => 'A', + typelem => 'oid', typinput => 'oidvectorin', typoutput => 'oidvectorout', + typreceive => 'oidvectorrecv', typsend => 'oidvectorsend', typalign => 'i' }, + +# hand-built rowtype entries for bootstrapped catalogs +# NB: OIDs assigned here must match the BKI_ROWTYPE_OID declarations +{ oid => '71', + typname => 'pg_type', typlen => '-1', typbyval => 'f', typtype => 'c', + typcategory => 'C', typrelid => '1247', typinput => 'record_in', + typoutput => 'record_out', typreceive => 'record_recv', + typsend => 'record_send', typalign => 'd', typstorage => 'x' }, +{ oid => '75', + typname => 'pg_attribute', typlen => '-1', typbyval => 'f', typtype => 'c', + typcategory => 'C', typrelid => '1249', typinput => 'record_in', + typoutput => 'record_out', typreceive => 'record_recv', + typsend => 'record_send', typalign => 'd', typstorage => 'x' }, +{ oid => '81', + typname => 'pg_proc', typlen => '-1', typbyval => 'f', typtype => 'c', + typcategory => 'C', typrelid => '1255', typinput => 'record_in', + typoutput => 'record_out', typreceive => 'record_recv', + typsend => 'record_send', typalign => 'd', typstorage => 'x' }, +{ oid => '83', + typname => 'pg_class', typlen => '-1', typbyval => 'f', typtype => 'c', + typcategory => 'C', typrelid => '1259', typinput => 'record_in', + typoutput => 'record_out', typreceive => 'record_recv', + typsend => 'record_send', typalign => 'd', typstorage => 'x' }, + +# OIDS 100 - 199 + +{ oid => '114', array_type_oid => '199', descr => 'JSON stored as text', + typname => 'json', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'json_in', typoutput => 'json_out', typreceive => 'json_recv', + typsend => 'json_send', typalign => 'i', typstorage => 'x' }, +{ oid => '142', array_type_oid => '143', descr => 'XML content', + typname => 'xml', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'xml_in', typoutput => 'xml_out', typreceive => 'xml_recv', + typsend => 'xml_send', typalign => 'i', typstorage => 'x' }, +{ oid => '194', oid_symbol => 'PGNODETREEOID', + descr => 'string representing an internal node tree', + typname => 'pg_node_tree', typlen => '-1', typbyval => 'f', + typcategory => 'S', typinput => 'pg_node_tree_in', + typoutput => 'pg_node_tree_out', typreceive => 'pg_node_tree_recv', + typsend => 'pg_node_tree_send', typalign => 'i', typstorage => 'x', + typcollation => '100' }, +{ oid => '3361', oid_symbol => 'PGNDISTINCTOID', + descr => 'multivariate ndistinct coefficients', + typname => 'pg_ndistinct', typlen => '-1', typbyval => 'f', + typcategory => 'S', typinput => 'pg_ndistinct_in', + typoutput => 'pg_ndistinct_out', typreceive => 'pg_ndistinct_recv', + typsend => 'pg_ndistinct_send', typalign => 'i', typstorage => 'x', + typcollation => '100' }, +{ oid => '3402', oid_symbol => 'PGDEPENDENCIESOID', + descr => 'multivariate dependencies', + typname => 'pg_dependencies', typlen => '-1', typbyval => 'f', + typcategory => 'S', typinput => 'pg_dependencies_in', + typoutput => 'pg_dependencies_out', typreceive => 'pg_dependencies_recv', + typsend => 'pg_dependencies_send', typalign => 'i', typstorage => 'x', + typcollation => '100' }, +{ oid => '32', oid_symbol => 'PGDDLCOMMANDOID', + descr => 'internal type for passing CollectedCommand', + typname => 'pg_ddl_command', typlen => 'SIZEOF_POINTER', typbyval => 't', + typtype => 'p', typcategory => 'P', typinput => 'pg_ddl_command_in', + typoutput => 'pg_ddl_command_out', typreceive => 'pg_ddl_command_recv', + typsend => 'pg_ddl_command_send', typalign => 'ALIGNOF_POINTER' }, + +# OIDS 200 - 299 + +{ oid => '210', descr => 'storage manager', + typname => 'smgr', typlen => '2', typbyval => 't', typcategory => 'U', + typinput => 'smgrin', typoutput => 'smgrout', typreceive => '-', + typsend => '-', typalign => 's' }, + +# OIDS 600 - 699 + +{ oid => '600', array_type_oid => '1017', + descr => 'geometric point \'(x, y)\'', + typname => 'point', typlen => '16', typbyval => 'f', typcategory => 'G', + typelem => 'float8', typinput => 'point_in', typoutput => 'point_out', + typreceive => 'point_recv', typsend => 'point_send', typalign => 'd' }, +{ oid => '601', array_type_oid => '1018', + descr => 'geometric line segment \'(pt1,pt2)\'', + typname => 'lseg', typlen => '32', typbyval => 'f', typcategory => 'G', + typelem => 'point', typinput => 'lseg_in', typoutput => 'lseg_out', + typreceive => 'lseg_recv', typsend => 'lseg_send', typalign => 'd' }, +{ oid => '602', array_type_oid => '1019', + descr => 'geometric path \'(pt1,...)\'', + typname => 'path', typlen => '-1', typbyval => 'f', typcategory => 'G', + typinput => 'path_in', typoutput => 'path_out', typreceive => 'path_recv', + typsend => 'path_send', typalign => 'd', typstorage => 'x' }, +{ oid => '603', array_type_oid => '1020', + descr => 'geometric box \'(lower left,upper right)\'', + typname => 'box', typlen => '32', typbyval => 'f', typcategory => 'G', + typdelim => ';', typelem => 'point', typinput => 'box_in', + typoutput => 'box_out', typreceive => 'box_recv', typsend => 'box_send', + typalign => 'd' }, +{ oid => '604', array_type_oid => '1027', + descr => 'geometric polygon \'(pt1,...)\'', + typname => 'polygon', typlen => '-1', typbyval => 'f', typcategory => 'G', + typinput => 'poly_in', typoutput => 'poly_out', typreceive => 'poly_recv', + typsend => 'poly_send', typalign => 'd', typstorage => 'x' }, +{ oid => '628', array_type_oid => '629', descr => 'geometric line', + typname => 'line', typlen => '24', typbyval => 'f', typcategory => 'G', + typelem => 'float8', typinput => 'line_in', typoutput => 'line_out', + typreceive => 'line_recv', typsend => 'line_send', typalign => 'd' }, + +# OIDS 700 - 799 + +{ oid => '700', array_type_oid => '1021', + descr => 'single-precision floating point number, 4-byte storage', + typname => 'float4', typlen => '4', typbyval => 'FLOAT4PASSBYVAL', + typcategory => 'N', typinput => 'float4in', typoutput => 'float4out', + typreceive => 'float4recv', typsend => 'float4send', typalign => 'i' }, +{ oid => '701', array_type_oid => '1022', + descr => 'double-precision floating point number, 8-byte storage', + typname => 'float8', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'N', typispreferred => 't', typinput => 'float8in', + typoutput => 'float8out', typreceive => 'float8recv', typsend => 'float8send', + typalign => 'd' }, +{ oid => '705', descr => 'pseudo-type representing an undetermined type', + typname => 'unknown', typlen => '-2', typbyval => 'f', typtype => 'p', + typcategory => 'X', typinput => 'unknownin', typoutput => 'unknownout', + typreceive => 'unknownrecv', typsend => 'unknownsend', typalign => 'c' }, +{ oid => '718', array_type_oid => '719', + descr => 'geometric circle \'(center,radius)\'', + typname => 'circle', typlen => '24', typbyval => 'f', typcategory => 'G', + typinput => 'circle_in', typoutput => 'circle_out', + typreceive => 'circle_recv', typsend => 'circle_send', typalign => 'd' }, +{ oid => '790', oid_symbol => 'CASHOID', array_type_oid => '791', + descr => 'monetary amounts, $d,ddd.cc', + typname => 'money', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'N', typinput => 'cash_in', typoutput => 'cash_out', + typreceive => 'cash_recv', typsend => 'cash_send', typalign => 'd' }, + +# OIDS 800 - 899 + +{ oid => '829', array_type_oid => '1040', + descr => 'XX:XX:XX:XX:XX:XX, MAC address', + typname => 'macaddr', typlen => '6', typbyval => 'f', typcategory => 'U', + typinput => 'macaddr_in', typoutput => 'macaddr_out', + typreceive => 'macaddr_recv', typsend => 'macaddr_send', typalign => 'i' }, +{ oid => '869', array_type_oid => '1041', + descr => 'IP address/netmask, host address, netmask optional', + typname => 'inet', typlen => '-1', typbyval => 'f', typcategory => 'I', + typispreferred => 't', typinput => 'inet_in', typoutput => 'inet_out', + typreceive => 'inet_recv', typsend => 'inet_send', typalign => 'i', + typstorage => 'm' }, +{ oid => '650', array_type_oid => '651', + descr => 'network IP address/netmask, network address', + typname => 'cidr', typlen => '-1', typbyval => 'f', typcategory => 'I', + typinput => 'cidr_in', typoutput => 'cidr_out', typreceive => 'cidr_recv', + typsend => 'cidr_send', typalign => 'i', typstorage => 'm' }, +{ oid => '774', array_type_oid => '775', + descr => 'XX:XX:XX:XX:XX:XX:XX:XX, MAC address', + typname => 'macaddr8', typlen => '8', typbyval => 'f', typcategory => 'U', + typinput => 'macaddr8_in', typoutput => 'macaddr8_out', + typreceive => 'macaddr8_recv', typsend => 'macaddr8_send', typalign => 'i' }, + +# OIDS 1000 - 1099 + +{ oid => '1033', array_type_oid => '1034', descr => 'access control list', + typname => 'aclitem', typlen => '12', typbyval => 'f', typcategory => 'U', + typinput => 'aclitemin', typoutput => 'aclitemout', typreceive => '-', + typsend => '-', typalign => 'i' }, +{ oid => '1042', array_type_oid => '1014', + descr => 'char(length), blank-padded string, fixed storage length', + typname => 'bpchar', typlen => '-1', typbyval => 'f', typcategory => 'S', + typinput => 'bpcharin', typoutput => 'bpcharout', typreceive => 'bpcharrecv', + typsend => 'bpcharsend', typmodin => 'bpchartypmodin', + typmodout => 'bpchartypmodout', typalign => 'i', typstorage => 'x', + typcollation => '100' }, +{ oid => '1043', array_type_oid => '1015', + descr => 'varchar(length), non-blank-padded string, variable storage length', + typname => 'varchar', typlen => '-1', typbyval => 'f', typcategory => 'S', + typinput => 'varcharin', typoutput => 'varcharout', + typreceive => 'varcharrecv', typsend => 'varcharsend', + typmodin => 'varchartypmodin', typmodout => 'varchartypmodout', + typalign => 'i', typstorage => 'x', typcollation => '100' }, +{ oid => '1082', array_type_oid => '1182', descr => 'date', + typname => 'date', typlen => '4', typbyval => 't', typcategory => 'D', + typinput => 'date_in', typoutput => 'date_out', typreceive => 'date_recv', + typsend => 'date_send', typalign => 'i' }, +{ oid => '1083', array_type_oid => '1183', descr => 'time of day', + typname => 'time', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'D', typinput => 'time_in', typoutput => 'time_out', + typreceive => 'time_recv', typsend => 'time_send', typmodin => 'timetypmodin', + typmodout => 'timetypmodout', typalign => 'd' }, + +# OIDS 1100 - 1199 + +{ oid => '1114', array_type_oid => '1115', descr => 'date and time', + typname => 'timestamp', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'D', typinput => 'timestamp_in', typoutput => 'timestamp_out', + typreceive => 'timestamp_recv', typsend => 'timestamp_send', + typmodin => 'timestamptypmodin', typmodout => 'timestamptypmodout', + typalign => 'd' }, +{ oid => '1184', array_type_oid => '1185', + descr => 'date and time with time zone', + typname => 'timestamptz', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'D', typispreferred => 't', typinput => 'timestamptz_in', + typoutput => 'timestamptz_out', typreceive => 'timestamptz_recv', + typsend => 'timestamptz_send', typmodin => 'timestamptztypmodin', + typmodout => 'timestamptztypmodout', typalign => 'd' }, +{ oid => '1186', array_type_oid => '1187', + descr => '@ , time interval', + typname => 'interval', typlen => '16', typbyval => 'f', typcategory => 'T', + typispreferred => 't', typinput => 'interval_in', typoutput => 'interval_out', + typreceive => 'interval_recv', typsend => 'interval_send', + typmodin => 'intervaltypmodin', typmodout => 'intervaltypmodout', + typalign => 'd' }, + +# OIDS 1200 - 1299 + +{ oid => '1266', array_type_oid => '1270', + descr => 'time of day with time zone', + typname => 'timetz', typlen => '12', typbyval => 'f', typcategory => 'D', + typinput => 'timetz_in', typoutput => 'timetz_out', + typreceive => 'timetz_recv', typsend => 'timetz_send', + typmodin => 'timetztypmodin', typmodout => 'timetztypmodout', + typalign => 'd' }, + +# OIDS 1500 - 1599 + +{ oid => '1560', array_type_oid => '1561', descr => 'fixed-length bit string', + typname => 'bit', typlen => '-1', typbyval => 'f', typcategory => 'V', + typinput => 'bit_in', typoutput => 'bit_out', typreceive => 'bit_recv', + typsend => 'bit_send', typmodin => 'bittypmodin', typmodout => 'bittypmodout', + typalign => 'i', typstorage => 'x' }, +{ oid => '1562', array_type_oid => '1563', + descr => 'variable-length bit string', + typname => 'varbit', typlen => '-1', typbyval => 'f', typcategory => 'V', + typispreferred => 't', typinput => 'varbit_in', typoutput => 'varbit_out', + typreceive => 'varbit_recv', typsend => 'varbit_send', + typmodin => 'varbittypmodin', typmodout => 'varbittypmodout', typalign => 'i', + typstorage => 'x' }, + +# OIDS 1700 - 1799 + +{ oid => '1700', array_type_oid => '1231', + descr => 'numeric(precision, decimal), arbitrary precision number', + typname => 'numeric', typlen => '-1', typbyval => 'f', typcategory => 'N', + typinput => 'numeric_in', typoutput => 'numeric_out', + typreceive => 'numeric_recv', typsend => 'numeric_send', + typmodin => 'numerictypmodin', typmodout => 'numerictypmodout', + typalign => 'i', typstorage => 'm' }, + +{ oid => '1790', array_type_oid => '2201', + descr => 'reference to cursor (portal name)', + typname => 'refcursor', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'textin', typoutput => 'textout', typreceive => 'textrecv', + typsend => 'textsend', typalign => 'i', typstorage => 'x' }, + +# OIDS 2200 - 2299 + +{ oid => '2202', array_type_oid => '2207', + descr => 'registered procedure (with args)', + typname => 'regprocedure', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regprocedurein', typoutput => 'regprocedureout', + typreceive => 'regprocedurerecv', typsend => 'regproceduresend', + typalign => 'i' }, +{ oid => '2203', array_type_oid => '2208', descr => 'registered operator', + typname => 'regoper', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regoperin', typoutput => 'regoperout', + typreceive => 'regoperrecv', typsend => 'regopersend', typalign => 'i' }, +{ oid => '2204', array_type_oid => '2209', + descr => 'registered operator (with args)', + typname => 'regoperator', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regoperatorin', typoutput => 'regoperatorout', + typreceive => 'regoperatorrecv', typsend => 'regoperatorsend', + typalign => 'i' }, +{ oid => '2205', array_type_oid => '2210', descr => 'registered class', + typname => 'regclass', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regclassin', typoutput => 'regclassout', + typreceive => 'regclassrecv', typsend => 'regclasssend', typalign => 'i' }, +{ oid => '2206', array_type_oid => '2211', descr => 'registered type', + typname => 'regtype', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regtypein', typoutput => 'regtypeout', + typreceive => 'regtyperecv', typsend => 'regtypesend', typalign => 'i' }, +{ oid => '4096', array_type_oid => '4097', descr => 'registered role', + typname => 'regrole', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regrolein', typoutput => 'regroleout', + typreceive => 'regrolerecv', typsend => 'regrolesend', typalign => 'i' }, +{ oid => '4089', array_type_oid => '4090', descr => 'registered namespace', + typname => 'regnamespace', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regnamespacein', typoutput => 'regnamespaceout', + typreceive => 'regnamespacerecv', typsend => 'regnamespacesend', + typalign => 'i' }, + +# uuid +{ oid => '2950', array_type_oid => '2951', descr => 'UUID datatype', + typname => 'uuid', typlen => '16', typbyval => 'f', typcategory => 'U', + typinput => 'uuid_in', typoutput => 'uuid_out', typreceive => 'uuid_recv', + typsend => 'uuid_send', typalign => 'c' }, + +# pg_lsn +{ oid => '3220', oid_symbol => 'LSNOID', array_type_oid => '3221', + descr => 'PostgreSQL LSN datatype', + typname => 'pg_lsn', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'U', typinput => 'pg_lsn_in', typoutput => 'pg_lsn_out', + typreceive => 'pg_lsn_recv', typsend => 'pg_lsn_send', typalign => 'd' }, + +# text search +{ oid => '3614', array_type_oid => '3643', + descr => 'text representation for text search', + typname => 'tsvector', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'tsvectorin', typoutput => 'tsvectorout', + typreceive => 'tsvectorrecv', typsend => 'tsvectorsend', + typanalyze => 'ts_typanalyze', typalign => 'i', typstorage => 'x' }, +{ oid => '3642', array_type_oid => '3644', + descr => 'GiST index internal text representation for text search', + typname => 'gtsvector', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'gtsvectorin', typoutput => 'gtsvectorout', typreceive => '-', + typsend => '-', typalign => 'i' }, +{ oid => '3615', array_type_oid => '3645', + descr => 'query representation for text search', + typname => 'tsquery', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'tsqueryin', typoutput => 'tsqueryout', + typreceive => 'tsqueryrecv', typsend => 'tsquerysend', typalign => 'i' }, +{ oid => '3734', array_type_oid => '3735', + descr => 'registered text search configuration', + typname => 'regconfig', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regconfigin', typoutput => 'regconfigout', + typreceive => 'regconfigrecv', typsend => 'regconfigsend', typalign => 'i' }, +{ oid => '3769', array_type_oid => '3770', + descr => 'registered text search dictionary', + typname => 'regdictionary', typlen => '4', typbyval => 't', + typcategory => 'N', typinput => 'regdictionaryin', + typoutput => 'regdictionaryout', typreceive => 'regdictionaryrecv', + typsend => 'regdictionarysend', typalign => 'i' }, + +# jsonb +{ oid => '3802', array_type_oid => '3807', descr => 'Binary JSON', + typname => 'jsonb', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'jsonb_in', typoutput => 'jsonb_out', typreceive => 'jsonb_recv', + typsend => 'jsonb_send', typalign => 'i', typstorage => 'x' }, + +{ oid => '2970', array_type_oid => '2949', descr => 'txid snapshot', + typname => 'txid_snapshot', typlen => '-1', typbyval => 'f', + typcategory => 'U', typinput => 'txid_snapshot_in', + typoutput => 'txid_snapshot_out', typreceive => 'txid_snapshot_recv', + typsend => 'txid_snapshot_send', typalign => 'd', typstorage => 'x' }, + +# range types +{ oid => '3904', array_type_oid => '3905', descr => 'range of integers', + typname => 'int4range', typlen => '-1', typbyval => 'f', typtype => 'r', + typcategory => 'R', typinput => 'range_in', typoutput => 'range_out', + typreceive => 'range_recv', typsend => 'range_send', + typanalyze => 'range_typanalyze', typalign => 'i', typstorage => 'x' }, +{ oid => '3906', array_type_oid => '3907', descr => 'range of numerics', + typname => 'numrange', typlen => '-1', typbyval => 'f', typtype => 'r', + typcategory => 'R', typinput => 'range_in', typoutput => 'range_out', + typreceive => 'range_recv', typsend => 'range_send', + typanalyze => 'range_typanalyze', typalign => 'i', typstorage => 'x' }, +{ oid => '3908', array_type_oid => '3909', + descr => 'range of timestamps without time zone', + typname => 'tsrange', typlen => '-1', typbyval => 'f', typtype => 'r', + typcategory => 'R', typinput => 'range_in', typoutput => 'range_out', + typreceive => 'range_recv', typsend => 'range_send', + typanalyze => 'range_typanalyze', typalign => 'd', typstorage => 'x' }, +{ oid => '3910', array_type_oid => '3911', + descr => 'range of timestamps with time zone', + typname => 'tstzrange', typlen => '-1', typbyval => 'f', typtype => 'r', + typcategory => 'R', typinput => 'range_in', typoutput => 'range_out', + typreceive => 'range_recv', typsend => 'range_send', + typanalyze => 'range_typanalyze', typalign => 'd', typstorage => 'x' }, +{ oid => '3912', array_type_oid => '3913', descr => 'range of dates', + typname => 'daterange', typlen => '-1', typbyval => 'f', typtype => 'r', + typcategory => 'R', typinput => 'range_in', typoutput => 'range_out', + typreceive => 'range_recv', typsend => 'range_send', + typanalyze => 'range_typanalyze', typalign => 'i', typstorage => 'x' }, +{ oid => '3926', array_type_oid => '3927', descr => 'range of bigints', + typname => 'int8range', typlen => '-1', typbyval => 'f', typtype => 'r', + typcategory => 'R', typinput => 'range_in', typoutput => 'range_out', + typreceive => 'range_recv', typsend => 'range_send', + typanalyze => 'range_typanalyze', typalign => 'd', typstorage => 'x' }, + +# pseudo-types +# types with typtype='p' represent various special cases in the type system. +# These cannot be used to define table columns, but are valid as function +# argument and result types (if supported by the function's implementation +# language). +# Note: cstring is a borderline case; it is still considered a pseudo-type, +# but there is now support for it in records and arrays. Perhaps we should +# just treat it as a regular base type? + +{ oid => '2249', descr => 'pseudo-type representing any composite type', + typname => 'record', typlen => '-1', typbyval => 'f', typtype => 'p', + typcategory => 'P', typarray => '_record', typinput => 'record_in', + typoutput => 'record_out', typreceive => 'record_recv', + typsend => 'record_send', typalign => 'd', typstorage => 'x' }, +# Arrays of records have typcategory P, so they can't be autogenerated. +{ oid => '2287', + typname => '_record', typlen => '-1', typbyval => 'f', typtype => 'p', + typcategory => 'P', typelem => 'record', typinput => 'array_in', + typoutput => 'array_out', typreceive => 'array_recv', typsend => 'array_send', + typanalyze => 'array_typanalyze', typalign => 'd', typstorage => 'x' }, +{ oid => '2275', array_type_oid => '1263', descr => 'C-style string', + typname => 'cstring', typlen => '-2', typbyval => 'f', typtype => 'p', + typcategory => 'P', typinput => 'cstring_in', typoutput => 'cstring_out', + typreceive => 'cstring_recv', typsend => 'cstring_send', typalign => 'c' }, +{ oid => '2276', descr => 'pseudo-type representing any type', + typname => 'any', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'any_in', typoutput => 'any_out', + typreceive => '-', typsend => '-', typalign => 'i' }, +{ oid => '2277', descr => 'pseudo-type representing a polymorphic array type', + typname => 'anyarray', typlen => '-1', typbyval => 'f', typtype => 'p', + typcategory => 'P', typinput => 'anyarray_in', typoutput => 'anyarray_out', + typreceive => 'anyarray_recv', typsend => 'anyarray_send', typalign => 'd', + typstorage => 'x' }, +{ oid => '2278', + descr => 'pseudo-type for the result of a function with no real result', + typname => 'void', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'void_in', typoutput => 'void_out', + typreceive => 'void_recv', typsend => 'void_send', typalign => 'i' }, +{ oid => '2279', descr => 'pseudo-type for the result of a trigger function', + typname => 'trigger', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'trigger_in', typoutput => 'trigger_out', + typreceive => '-', typsend => '-', typalign => 'i' }, +{ oid => '3838', oid_symbol => 'EVTTRIGGEROID', + descr => 'pseudo-type for the result of an event trigger function', + typname => 'event_trigger', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'event_trigger_in', + typoutput => 'event_trigger_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '2280', + descr => 'pseudo-type for the result of a language handler function', + typname => 'language_handler', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'language_handler_in', + typoutput => 'language_handler_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '2281', + descr => 'pseudo-type representing an internal data structure', + typname => 'internal', typlen => 'SIZEOF_POINTER', typbyval => 't', + typtype => 'p', typcategory => 'P', typinput => 'internal_in', + typoutput => 'internal_out', typreceive => '-', typsend => '-', + typalign => 'ALIGNOF_POINTER' }, +{ oid => '2282', descr => 'obsolete, deprecated pseudo-type', + typname => 'opaque', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'opaque_in', typoutput => 'opaque_out', + typreceive => '-', typsend => '-', typalign => 'i' }, +{ oid => '2283', descr => 'pseudo-type representing a polymorphic base type', + typname => 'anyelement', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'anyelement_in', + typoutput => 'anyelement_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '2776', + descr => 'pseudo-type representing a polymorphic base type that is not an array', + typname => 'anynonarray', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'anynonarray_in', + typoutput => 'anynonarray_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '3500', + descr => 'pseudo-type representing a polymorphic base type that is an enum', + typname => 'anyenum', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'anyenum_in', typoutput => 'anyenum_out', + typreceive => '-', typsend => '-', typalign => 'i' }, +{ oid => '3115', + descr => 'pseudo-type for the result of an FDW handler function', + typname => 'fdw_handler', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'fdw_handler_in', + typoutput => 'fdw_handler_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '325', + descr => 'pseudo-type for the result of an index AM handler function', + typname => 'index_am_handler', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'index_am_handler_in', + typoutput => 'index_am_handler_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '3310', + descr => 'pseudo-type for the result of a tablesample method function', + typname => 'tsm_handler', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'tsm_handler_in', + typoutput => 'tsm_handler_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '3831', + descr => 'pseudo-type representing a polymorphic base type that is a range', + typname => 'anyrange', typlen => '-1', typbyval => 'f', typtype => 'p', + typcategory => 'P', typinput => 'anyrange_in', typoutput => 'anyrange_out', + typreceive => '-', typsend => '-', typalign => 'd', typstorage => 'x' }, + +] diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h index ffdb452b02..87c9b673cf 100644 --- a/src/include/catalog/pg_type.h +++ b/src/include/catalog/pg_type.h @@ -1,18 +1,17 @@ /*------------------------------------------------------------------------- * * pg_type.h - * definition of the system "type" relation (pg_type) - * along with the relation's initial contents. + * definition of the "type" system catalog (pg_type) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_type.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -20,6 +19,10 @@ #define PG_TYPE_H #include "catalog/genbki.h" +#include "catalog/pg_type_d.h" + +#include "catalog/objectaddress.h" +#include "nodes/nodes.h" /* ---------------- * pg_type definition. cpp turns this into @@ -31,14 +34,16 @@ * See struct FormData_pg_attribute for details. * ---------------- */ -#define TypeRelationId 1247 -#define TypeRelation_Rowtype_Id 71 - -CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO +CATALOG(pg_type,1247,TypeRelationId) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71,TypeRelation_Rowtype_Id) BKI_SCHEMA_MACRO { - NameData typname; /* type name */ - Oid typnamespace; /* OID of namespace containing this type */ - Oid typowner; /* type owner */ + /* type name */ + NameData typname; + + /* OID of namespace containing this type */ + Oid typnamespace BKI_DEFAULT(PGNSP); + + /* type owner */ + Oid typowner BKI_DEFAULT(PGUID); /* * For a fixed-size type, typlen is the number of bytes we use to @@ -47,17 +52,17 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO * "varlena" type (one that has a length word), -2 to indicate a * null-terminated C string. */ - int16 typlen; + int16 typlen BKI_ARRAY_DEFAULT(-1); /* * typbyval determines whether internal Postgres routines pass a value of - * this type by value or by reference. typbyval had better be FALSE if + * this type by value or by reference. typbyval had better be false if * the length is not 1, 2, or 4 (or 8 on 8-byte-Datum machines). * Variable-length types are always passed by reference. Note that - * typbyval can be false even if the length would allow pass-by-value; - * this is currently true for type float4, for example. + * typbyval can be false even if the length would allow pass-by-value; for + * example, type macaddr8 is pass-by-ref even when Datum is 8 bytes. */ - bool typbyval; + bool typbyval BKI_ARRAY_DEFAULT(f); /* * typtype is 'b' for a base type, 'c' for a composite type (e.g., a @@ -66,7 +71,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO * * If typtype is 'c', typrelid is the OID of the class' entry in pg_class. */ - char typtype; + char typtype BKI_DEFAULT(b) BKI_ARRAY_DEFAULT(b); /* * typcategory and typispreferred help the parser distinguish preferred @@ -74,19 +79,25 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO * character (but not \0). The categories used for built-in types are * identified by the TYPCATEGORY macros below. */ - char typcategory; /* arbitrary type classification */ - bool typispreferred; /* is type "preferred" within its category? */ + /* arbitrary type classification */ + char typcategory BKI_ARRAY_DEFAULT(A); + + /* is type "preferred" within its category? */ + bool typispreferred BKI_DEFAULT(f) BKI_ARRAY_DEFAULT(f); /* * If typisdefined is false, the entry is only a placeholder (forward - * reference). We know the type name, but not yet anything else about it. + * reference). We know the type's name and owner, but not yet anything + * else about it. */ - bool typisdefined; + bool typisdefined BKI_DEFAULT(t); - char typdelim; /* delimiter for arrays of this type */ + /* delimiter for arrays of this type */ + char typdelim BKI_DEFAULT(','); - Oid typrelid; /* 0 if not a composite type */ + /* associated pg_class OID if a composite type, else 0 */ + Oid typrelid BKI_DEFAULT(0) BKI_ARRAY_DEFAULT(0); /* * If typelem is not 0 then it identifies another row in pg_type. The @@ -99,32 +110,36 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO * * typelem != 0 and typlen == -1. */ - Oid typelem; + Oid typelem BKI_DEFAULT(0) BKI_LOOKUP(pg_type); /* * If there is a "true" array type having this type as element type, * typarray links to it. Zero if no associated "true" array type. */ - Oid typarray; + Oid typarray BKI_DEFAULT(0) BKI_ARRAY_DEFAULT(0) BKI_LOOKUP(pg_type); /* * I/O conversion procedures for the datatype. */ - regproc typinput; /* text format (required) */ - regproc typoutput; - regproc typreceive; /* binary format (optional) */ - regproc typsend; + + /* text format (required) */ + regproc typinput BKI_ARRAY_DEFAULT(array_in) BKI_LOOKUP(pg_proc); + regproc typoutput BKI_ARRAY_DEFAULT(array_out) BKI_LOOKUP(pg_proc); + + /* binary format (optional) */ + regproc typreceive BKI_ARRAY_DEFAULT(array_recv) BKI_LOOKUP(pg_proc); + regproc typsend BKI_ARRAY_DEFAULT(array_send) BKI_LOOKUP(pg_proc); /* * I/O functions for optional type modifiers. */ - regproc typmodin; - regproc typmodout; + regproc typmodin BKI_DEFAULT(-) BKI_LOOKUP(pg_proc); + regproc typmodout BKI_DEFAULT(-) BKI_LOOKUP(pg_proc); /* * Custom ANALYZE procedure for the datatype (0 selects the default). */ - regproc typanalyze; + regproc typanalyze BKI_DEFAULT(-) BKI_ARRAY_DEFAULT(array_typanalyze) BKI_LOOKUP(pg_proc); /* ---------------- * typalign is the alignment required when storing a value of this @@ -162,7 +177,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO * 'm' MAIN like 'x' but try to keep in main tuple * ---------------- */ - char typstorage; + char typstorage BKI_DEFAULT(p) BKI_ARRAY_DEFAULT(x); /* * This flag represents a "NOT NULL" constraint against this datatype. @@ -172,32 +187,32 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO * * Used primarily for domain types. */ - bool typnotnull; + bool typnotnull BKI_DEFAULT(f); /* * Domains use typbasetype to show the base (or domain) type that the * domain is based on. Zero if the type is not a domain. */ - Oid typbasetype; + Oid typbasetype BKI_DEFAULT(0); /* * Domains use typtypmod to record the typmod to be applied to their base * type (-1 if base type does not use a typmod). -1 if this type is not a * domain. */ - int32 typtypmod; + int32 typtypmod BKI_DEFAULT(-1); /* * typndims is the declared number of dimensions for an array domain type * (i.e., typbasetype is an array type). Otherwise zero. */ - int32 typndims; + int32 typndims BKI_DEFAULT(0); /* * Collation: 0 if type cannot use collations, DEFAULT_COLLATION_OID for * collatable base types, possibly other OID for domains */ - Oid typcollation; + Oid typcollation BKI_DEFAULT(0); #ifdef CATALOG_VARLEN /* variable-length fields start here */ @@ -206,7 +221,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO * a default expression for the type. Currently this is only used for * domains. */ - pg_node_tree typdefaultbin; + pg_node_tree typdefaultbin BKI_DEFAULT(_null_) BKI_ARRAY_DEFAULT(_null_); /* * typdefault is NULL if the type has no associated default value. If @@ -216,12 +231,12 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO * external representation of the type's default value, which may be fed * to the type's input converter to produce a constant. */ - text typdefault; + text typdefault BKI_DEFAULT(_null_) BKI_ARRAY_DEFAULT(_null_); /* * Access permissions */ - aclitem typacl[1]; + aclitem typacl[1] BKI_DEFAULT(_null_); #endif } FormData_pg_type; @@ -232,490 +247,10 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO */ typedef FormData_pg_type *Form_pg_type; -/* ---------------- - * compiler constants for pg_type - * ---------------- - */ -#define Natts_pg_type 30 -#define Anum_pg_type_typname 1 -#define Anum_pg_type_typnamespace 2 -#define Anum_pg_type_typowner 3 -#define Anum_pg_type_typlen 4 -#define Anum_pg_type_typbyval 5 -#define Anum_pg_type_typtype 6 -#define Anum_pg_type_typcategory 7 -#define Anum_pg_type_typispreferred 8 -#define Anum_pg_type_typisdefined 9 -#define Anum_pg_type_typdelim 10 -#define Anum_pg_type_typrelid 11 -#define Anum_pg_type_typelem 12 -#define Anum_pg_type_typarray 13 -#define Anum_pg_type_typinput 14 -#define Anum_pg_type_typoutput 15 -#define Anum_pg_type_typreceive 16 -#define Anum_pg_type_typsend 17 -#define Anum_pg_type_typmodin 18 -#define Anum_pg_type_typmodout 19 -#define Anum_pg_type_typanalyze 20 -#define Anum_pg_type_typalign 21 -#define Anum_pg_type_typstorage 22 -#define Anum_pg_type_typnotnull 23 -#define Anum_pg_type_typbasetype 24 -#define Anum_pg_type_typtypmod 25 -#define Anum_pg_type_typndims 26 -#define Anum_pg_type_typcollation 27 -#define Anum_pg_type_typdefaultbin 28 -#define Anum_pg_type_typdefault 29 -#define Anum_pg_type_typacl 30 - - -/* ---------------- - * initial contents of pg_type - * ---------------- - */ +#ifdef EXPOSE_TO_CLIENT_CODE /* - * Keep the following ordered by OID so that later changes can be made more - * easily. - * - * For types used in the system catalogs, make sure the values here match - * TypInfo[] in bootstrap.c. - */ - -/* OIDS 1 - 99 */ -DATA(insert OID = 16 ( bool PGNSP PGUID 1 t b B t t \054 0 0 1000 boolin boolout boolrecv boolsend - - - c p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("boolean, 'true'/'false'"); -#define BOOLOID 16 - -DATA(insert OID = 17 ( bytea PGNSP PGUID -1 f b U f t \054 0 0 1001 byteain byteaout bytearecv byteasend - - - i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("variable-length string, binary values escaped"); -#define BYTEAOID 17 - -DATA(insert OID = 18 ( char PGNSP PGUID 1 t b S f t \054 0 0 1002 charin charout charrecv charsend - - - c p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("single character"); -#define CHAROID 18 - -DATA(insert OID = 19 ( name PGNSP PGUID NAMEDATALEN f b S f t \054 0 18 1003 namein nameout namerecv namesend - - - c p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("63-byte type for storing system identifiers"); -#define NAMEOID 19 - -DATA(insert OID = 20 ( int8 PGNSP PGUID 8 FLOAT8PASSBYVAL b N f t \054 0 0 1016 int8in int8out int8recv int8send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("~18 digit integer, 8-byte storage"); -#define INT8OID 20 - -DATA(insert OID = 21 ( int2 PGNSP PGUID 2 t b N f t \054 0 0 1005 int2in int2out int2recv int2send - - - s p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("-32 thousand to 32 thousand, 2-byte storage"); -#define INT2OID 21 - -DATA(insert OID = 22 ( int2vector PGNSP PGUID -1 f b A f t \054 0 21 1006 int2vectorin int2vectorout int2vectorrecv int2vectorsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("array of int2, used in system tables"); -#define INT2VECTOROID 22 - -DATA(insert OID = 23 ( int4 PGNSP PGUID 4 t b N f t \054 0 0 1007 int4in int4out int4recv int4send - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("-2 billion to 2 billion integer, 4-byte storage"); -#define INT4OID 23 - -DATA(insert OID = 24 ( regproc PGNSP PGUID 4 t b N f t \054 0 0 1008 regprocin regprocout regprocrecv regprocsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered procedure"); -#define REGPROCOID 24 - -DATA(insert OID = 25 ( text PGNSP PGUID -1 f b S t t \054 0 0 1009 textin textout textrecv textsend - - - i x f 0 -1 0 100 _null_ _null_ _null_ )); -DESCR("variable-length string, no limit specified"); -#define TEXTOID 25 - -DATA(insert OID = 26 ( oid PGNSP PGUID 4 t b N t t \054 0 0 1028 oidin oidout oidrecv oidsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("object identifier(oid), maximum 4 billion"); -#define OIDOID 26 - -DATA(insert OID = 27 ( tid PGNSP PGUID 6 f b U f t \054 0 0 1010 tidin tidout tidrecv tidsend - - - s p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("(block, offset), physical location of tuple"); -#define TIDOID 27 - -DATA(insert OID = 28 ( xid PGNSP PGUID 4 t b U f t \054 0 0 1011 xidin xidout xidrecv xidsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("transaction id"); -#define XIDOID 28 - -DATA(insert OID = 29 ( cid PGNSP PGUID 4 t b U f t \054 0 0 1012 cidin cidout cidrecv cidsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("command identifier type, sequence in transaction id"); -#define CIDOID 29 - -DATA(insert OID = 30 ( oidvector PGNSP PGUID -1 f b A f t \054 0 26 1013 oidvectorin oidvectorout oidvectorrecv oidvectorsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("array of oids, used in system tables"); -#define OIDVECTOROID 30 - -/* hand-built rowtype entries for bootstrapped catalogs */ -/* NB: OIDs assigned here must match the BKI_ROWTYPE_OID declarations */ - -DATA(insert OID = 71 ( pg_type PGNSP PGUID -1 f c C f t \054 1247 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 75 ( pg_attribute PGNSP PGUID -1 f c C f t \054 1249 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 81 ( pg_proc PGNSP PGUID -1 f c C f t \054 1255 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 83 ( pg_class PGNSP PGUID -1 f c C f t \054 1259 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* OIDS 100 - 199 */ -DATA(insert OID = 114 ( json PGNSP PGUID -1 f b U f t \054 0 0 199 json_in json_out json_recv json_send - - - i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define JSONOID 114 -DATA(insert OID = 142 ( xml PGNSP PGUID -1 f b U f t \054 0 0 143 xml_in xml_out xml_recv xml_send - - - i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("XML content"); -#define XMLOID 142 -DATA(insert OID = 143 ( _xml PGNSP PGUID -1 f b A f t \054 0 142 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 199 ( _json PGNSP PGUID -1 f b A f t \054 0 114 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -DATA(insert OID = 194 ( pg_node_tree PGNSP PGUID -1 f b S f t \054 0 0 0 pg_node_tree_in pg_node_tree_out pg_node_tree_recv pg_node_tree_send - - - i x f 0 -1 0 100 _null_ _null_ _null_ )); -DESCR("string representing an internal node tree"); -#define PGNODETREEOID 194 - -DATA(insert OID = 3361 ( pg_ndistinct PGNSP PGUID -1 f b S f t \054 0 0 0 pg_ndistinct_in pg_ndistinct_out pg_ndistinct_recv pg_ndistinct_send - - - i x f 0 -1 0 100 _null_ _null_ _null_ )); -DESCR("multivariate ndistinct coefficients"); -#define PGNDISTINCTOID 3361 - -DATA(insert OID = 3402 ( pg_dependencies PGNSP PGUID -1 f b S f t \054 0 0 0 pg_dependencies_in pg_dependencies_out pg_dependencies_recv pg_dependencies_send - - - i x f 0 -1 0 100 _null_ _null_ _null_ )); -DESCR("multivariate dependencies"); -#define PGDEPENDENCIESOID 3402 - -DATA(insert OID = 32 ( pg_ddl_command PGNSP PGUID SIZEOF_POINTER t p P f t \054 0 0 0 pg_ddl_command_in pg_ddl_command_out pg_ddl_command_recv pg_ddl_command_send - - - ALIGNOF_POINTER p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("internal type for passing CollectedCommand"); -#define PGDDLCOMMANDOID 32 - -/* OIDS 200 - 299 */ - -DATA(insert OID = 210 ( smgr PGNSP PGUID 2 t b U f t \054 0 0 0 smgrin smgrout - - - - - s p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("storage manager"); - -/* OIDS 300 - 399 */ - -/* OIDS 400 - 499 */ - -/* OIDS 500 - 599 */ - -/* OIDS 600 - 699 */ -DATA(insert OID = 600 ( point PGNSP PGUID 16 f b G f t \054 0 701 1017 point_in point_out point_recv point_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric point '(x, y)'"); -#define POINTOID 600 -DATA(insert OID = 601 ( lseg PGNSP PGUID 32 f b G f t \054 0 600 1018 lseg_in lseg_out lseg_recv lseg_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric line segment '(pt1,pt2)'"); -#define LSEGOID 601 -DATA(insert OID = 602 ( path PGNSP PGUID -1 f b G f t \054 0 0 1019 path_in path_out path_recv path_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric path '(pt1,...)'"); -#define PATHOID 602 -DATA(insert OID = 603 ( box PGNSP PGUID 32 f b G f t \073 0 600 1020 box_in box_out box_recv box_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric box '(lower left,upper right)'"); -#define BOXOID 603 -DATA(insert OID = 604 ( polygon PGNSP PGUID -1 f b G f t \054 0 0 1027 poly_in poly_out poly_recv poly_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric polygon '(pt1,...)'"); -#define POLYGONOID 604 - -DATA(insert OID = 628 ( line PGNSP PGUID 24 f b G f t \054 0 701 629 line_in line_out line_recv line_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric line"); -#define LINEOID 628 -DATA(insert OID = 629 ( _line PGNSP PGUID -1 f b A f t \054 0 628 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* OIDS 700 - 799 */ - -DATA(insert OID = 700 ( float4 PGNSP PGUID 4 FLOAT4PASSBYVAL b N f t \054 0 0 1021 float4in float4out float4recv float4send - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("single-precision floating point number, 4-byte storage"); -#define FLOAT4OID 700 -DATA(insert OID = 701 ( float8 PGNSP PGUID 8 FLOAT8PASSBYVAL b N t t \054 0 0 1022 float8in float8out float8recv float8send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("double-precision floating point number, 8-byte storage"); -#define FLOAT8OID 701 -DATA(insert OID = 702 ( abstime PGNSP PGUID 4 t b D f t \054 0 0 1023 abstimein abstimeout abstimerecv abstimesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("absolute, limited-range date and time (Unix system time)"); -#define ABSTIMEOID 702 -DATA(insert OID = 703 ( reltime PGNSP PGUID 4 t b T f t \054 0 0 1024 reltimein reltimeout reltimerecv reltimesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("relative, limited-range time interval (Unix delta time)"); -#define RELTIMEOID 703 -DATA(insert OID = 704 ( tinterval PGNSP PGUID 12 f b T f t \054 0 0 1025 tintervalin tintervalout tintervalrecv tintervalsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("(abstime,abstime), time interval"); -#define TINTERVALOID 704 -DATA(insert OID = 705 ( unknown PGNSP PGUID -2 f p X f t \054 0 0 0 unknownin unknownout unknownrecv unknownsend - - - c p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR(""); -#define UNKNOWNOID 705 - -DATA(insert OID = 718 ( circle PGNSP PGUID 24 f b G f t \054 0 0 719 circle_in circle_out circle_recv circle_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("geometric circle '(center,radius)'"); -#define CIRCLEOID 718 -DATA(insert OID = 719 ( _circle PGNSP PGUID -1 f b A f t \054 0 718 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 790 ( money PGNSP PGUID 8 FLOAT8PASSBYVAL b N f t \054 0 0 791 cash_in cash_out cash_recv cash_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("monetary amounts, $d,ddd.cc"); -#define CASHOID 790 -DATA(insert OID = 791 ( _money PGNSP PGUID -1 f b A f t \054 0 790 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* OIDS 800 - 899 */ -DATA(insert OID = 829 ( macaddr PGNSP PGUID 6 f b U f t \054 0 0 1040 macaddr_in macaddr_out macaddr_recv macaddr_send - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("XX:XX:XX:XX:XX:XX, MAC address"); -#define MACADDROID 829 -DATA(insert OID = 869 ( inet PGNSP PGUID -1 f b I t t \054 0 0 1041 inet_in inet_out inet_recv inet_send - - - i m f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("IP address/netmask, host address, netmask optional"); -#define INETOID 869 -DATA(insert OID = 650 ( cidr PGNSP PGUID -1 f b I f t \054 0 0 651 cidr_in cidr_out cidr_recv cidr_send - - - i m f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("network IP address/netmask, network address"); -#define CIDROID 650 -DATA(insert OID = 774 ( macaddr8 PGNSP PGUID 8 f b U f t \054 0 0 775 macaddr8_in macaddr8_out macaddr8_recv macaddr8_send - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("XX:XX:XX:XX:XX:XX:XX:XX, MAC address"); -#define MACADDR8OID 774 - -/* OIDS 900 - 999 */ - -/* OIDS 1000 - 1099 */ -DATA(insert OID = 1000 ( _bool PGNSP PGUID -1 f b A f t \054 0 16 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1001 ( _bytea PGNSP PGUID -1 f b A f t \054 0 17 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1002 ( _char PGNSP PGUID -1 f b A f t \054 0 18 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1003 ( _name PGNSP PGUID -1 f b A f t \054 0 19 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1005 ( _int2 PGNSP PGUID -1 f b A f t \054 0 21 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define INT2ARRAYOID 1005 -DATA(insert OID = 1006 ( _int2vector PGNSP PGUID -1 f b A f t \054 0 22 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1007 ( _int4 PGNSP PGUID -1 f b A f t \054 0 23 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define INT4ARRAYOID 1007 -DATA(insert OID = 1008 ( _regproc PGNSP PGUID -1 f b A f t \054 0 24 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1009 ( _text PGNSP PGUID -1 f b A f t \054 0 25 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 100 _null_ _null_ _null_ )); -#define TEXTARRAYOID 1009 -DATA(insert OID = 1028 ( _oid PGNSP PGUID -1 f b A f t \054 0 26 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define OIDARRAYOID 1028 -DATA(insert OID = 1010 ( _tid PGNSP PGUID -1 f b A f t \054 0 27 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1011 ( _xid PGNSP PGUID -1 f b A f t \054 0 28 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1012 ( _cid PGNSP PGUID -1 f b A f t \054 0 29 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1013 ( _oidvector PGNSP PGUID -1 f b A f t \054 0 30 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1014 ( _bpchar PGNSP PGUID -1 f b A f t \054 0 1042 0 array_in array_out array_recv array_send bpchartypmodin bpchartypmodout array_typanalyze i x f 0 -1 0 100 _null_ _null_ _null_ )); -DATA(insert OID = 1015 ( _varchar PGNSP PGUID -1 f b A f t \054 0 1043 0 array_in array_out array_recv array_send varchartypmodin varchartypmodout array_typanalyze i x f 0 -1 0 100 _null_ _null_ _null_ )); -DATA(insert OID = 1016 ( _int8 PGNSP PGUID -1 f b A f t \054 0 20 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1017 ( _point PGNSP PGUID -1 f b A f t \054 0 600 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1018 ( _lseg PGNSP PGUID -1 f b A f t \054 0 601 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1019 ( _path PGNSP PGUID -1 f b A f t \054 0 602 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1020 ( _box PGNSP PGUID -1 f b A f t \073 0 603 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1021 ( _float4 PGNSP PGUID -1 f b A f t \054 0 700 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define FLOAT4ARRAYOID 1021 -DATA(insert OID = 1022 ( _float8 PGNSP PGUID -1 f b A f t \054 0 701 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1023 ( _abstime PGNSP PGUID -1 f b A f t \054 0 702 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1024 ( _reltime PGNSP PGUID -1 f b A f t \054 0 703 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1025 ( _tinterval PGNSP PGUID -1 f b A f t \054 0 704 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1027 ( _polygon PGNSP PGUID -1 f b A f t \054 0 604 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1033 ( aclitem PGNSP PGUID 12 f b U f t \054 0 0 1034 aclitemin aclitemout - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("access control list"); -#define ACLITEMOID 1033 -DATA(insert OID = 1034 ( _aclitem PGNSP PGUID -1 f b A f t \054 0 1033 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1040 ( _macaddr PGNSP PGUID -1 f b A f t \054 0 829 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 775 ( _macaddr8 PGNSP PGUID -1 f b A f t \054 0 774 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1041 ( _inet PGNSP PGUID -1 f b A f t \054 0 869 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 651 ( _cidr PGNSP PGUID -1 f b A f t \054 0 650 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1263 ( _cstring PGNSP PGUID -1 f b A f t \054 0 2275 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define CSTRINGARRAYOID 1263 - -DATA(insert OID = 1042 ( bpchar PGNSP PGUID -1 f b S f t \054 0 0 1014 bpcharin bpcharout bpcharrecv bpcharsend bpchartypmodin bpchartypmodout - i x f 0 -1 0 100 _null_ _null_ _null_ )); -DESCR("char(length), blank-padded string, fixed storage length"); -#define BPCHAROID 1042 -DATA(insert OID = 1043 ( varchar PGNSP PGUID -1 f b S f t \054 0 0 1015 varcharin varcharout varcharrecv varcharsend varchartypmodin varchartypmodout - i x f 0 -1 0 100 _null_ _null_ _null_ )); -DESCR("varchar(length), non-blank-padded string, variable storage length"); -#define VARCHAROID 1043 - -DATA(insert OID = 1082 ( date PGNSP PGUID 4 t b D f t \054 0 0 1182 date_in date_out date_recv date_send - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("date"); -#define DATEOID 1082 -DATA(insert OID = 1083 ( time PGNSP PGUID 8 FLOAT8PASSBYVAL b D f t \054 0 0 1183 time_in time_out time_recv time_send timetypmodin timetypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("time of day"); -#define TIMEOID 1083 - -/* OIDS 1100 - 1199 */ -DATA(insert OID = 1114 ( timestamp PGNSP PGUID 8 FLOAT8PASSBYVAL b D f t \054 0 0 1115 timestamp_in timestamp_out timestamp_recv timestamp_send timestamptypmodin timestamptypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("date and time"); -#define TIMESTAMPOID 1114 -DATA(insert OID = 1115 ( _timestamp PGNSP PGUID -1 f b A f t \054 0 1114 0 array_in array_out array_recv array_send timestamptypmodin timestamptypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1182 ( _date PGNSP PGUID -1 f b A f t \054 0 1082 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1183 ( _time PGNSP PGUID -1 f b A f t \054 0 1083 0 array_in array_out array_recv array_send timetypmodin timetypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1184 ( timestamptz PGNSP PGUID 8 FLOAT8PASSBYVAL b D t t \054 0 0 1185 timestamptz_in timestamptz_out timestamptz_recv timestamptz_send timestamptztypmodin timestamptztypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("date and time with time zone"); -#define TIMESTAMPTZOID 1184 -DATA(insert OID = 1185 ( _timestamptz PGNSP PGUID -1 f b A f t \054 0 1184 0 array_in array_out array_recv array_send timestamptztypmodin timestamptztypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1186 ( interval PGNSP PGUID 16 f b T t t \054 0 0 1187 interval_in interval_out interval_recv interval_send intervaltypmodin intervaltypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("@ , time interval"); -#define INTERVALOID 1186 -DATA(insert OID = 1187 ( _interval PGNSP PGUID -1 f b A f t \054 0 1186 0 array_in array_out array_recv array_send intervaltypmodin intervaltypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* OIDS 1200 - 1299 */ -DATA(insert OID = 1231 ( _numeric PGNSP PGUID -1 f b A f t \054 0 1700 0 array_in array_out array_recv array_send numerictypmodin numerictypmodout array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1266 ( timetz PGNSP PGUID 12 f b D f t \054 0 0 1270 timetz_in timetz_out timetz_recv timetz_send timetztypmodin timetztypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("time of day with time zone"); -#define TIMETZOID 1266 -DATA(insert OID = 1270 ( _timetz PGNSP PGUID -1 f b A f t \054 0 1266 0 array_in array_out array_recv array_send timetztypmodin timetztypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* OIDS 1500 - 1599 */ -DATA(insert OID = 1560 ( bit PGNSP PGUID -1 f b V f t \054 0 0 1561 bit_in bit_out bit_recv bit_send bittypmodin bittypmodout - i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("fixed-length bit string"); -#define BITOID 1560 -DATA(insert OID = 1561 ( _bit PGNSP PGUID -1 f b A f t \054 0 1560 0 array_in array_out array_recv array_send bittypmodin bittypmodout array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 1562 ( varbit PGNSP PGUID -1 f b V t t \054 0 0 1563 varbit_in varbit_out varbit_recv varbit_send varbittypmodin varbittypmodout - i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("variable-length bit string"); -#define VARBITOID 1562 -DATA(insert OID = 1563 ( _varbit PGNSP PGUID -1 f b A f t \054 0 1562 0 array_in array_out array_recv array_send varbittypmodin varbittypmodout array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* OIDS 1600 - 1699 */ - -/* OIDS 1700 - 1799 */ -DATA(insert OID = 1700 ( numeric PGNSP PGUID -1 f b N f t \054 0 0 1231 numeric_in numeric_out numeric_recv numeric_send numerictypmodin numerictypmodout - i m f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("numeric(precision, decimal), arbitrary precision number"); -#define NUMERICOID 1700 - -DATA(insert OID = 1790 ( refcursor PGNSP PGUID -1 f b U f t \054 0 0 2201 textin textout textrecv textsend - - - i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("reference to cursor (portal name)"); -#define REFCURSOROID 1790 - -/* OIDS 2200 - 2299 */ -DATA(insert OID = 2201 ( _refcursor PGNSP PGUID -1 f b A f t \054 0 1790 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -DATA(insert OID = 2202 ( regprocedure PGNSP PGUID 4 t b N f t \054 0 0 2207 regprocedurein regprocedureout regprocedurerecv regproceduresend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered procedure (with args)"); -#define REGPROCEDUREOID 2202 - -DATA(insert OID = 2203 ( regoper PGNSP PGUID 4 t b N f t \054 0 0 2208 regoperin regoperout regoperrecv regopersend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered operator"); -#define REGOPEROID 2203 - -DATA(insert OID = 2204 ( regoperator PGNSP PGUID 4 t b N f t \054 0 0 2209 regoperatorin regoperatorout regoperatorrecv regoperatorsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered operator (with args)"); -#define REGOPERATOROID 2204 - -DATA(insert OID = 2205 ( regclass PGNSP PGUID 4 t b N f t \054 0 0 2210 regclassin regclassout regclassrecv regclasssend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered class"); -#define REGCLASSOID 2205 - -DATA(insert OID = 2206 ( regtype PGNSP PGUID 4 t b N f t \054 0 0 2211 regtypein regtypeout regtyperecv regtypesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered type"); -#define REGTYPEOID 2206 - -DATA(insert OID = 4096 ( regrole PGNSP PGUID 4 t b N f t \054 0 0 4097 regrolein regroleout regrolerecv regrolesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered role"); -#define REGROLEOID 4096 - -DATA(insert OID = 4089 ( regnamespace PGNSP PGUID 4 t b N f t \054 0 0 4090 regnamespacein regnamespaceout regnamespacerecv regnamespacesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered namespace"); -#define REGNAMESPACEOID 4089 - -DATA(insert OID = 2207 ( _regprocedure PGNSP PGUID -1 f b A f t \054 0 2202 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 2208 ( _regoper PGNSP PGUID -1 f b A f t \054 0 2203 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 2209 ( _regoperator PGNSP PGUID -1 f b A f t \054 0 2204 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 2210 ( _regclass PGNSP PGUID -1 f b A f t \054 0 2205 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 2211 ( _regtype PGNSP PGUID -1 f b A f t \054 0 2206 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -#define REGTYPEARRAYOID 2211 -DATA(insert OID = 4097 ( _regrole PGNSP PGUID -1 f b A f t \054 0 4096 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 4090 ( _regnamespace PGNSP PGUID -1 f b A f t \054 0 4089 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* uuid */ -DATA(insert OID = 2950 ( uuid PGNSP PGUID 16 f b U f t \054 0 0 2951 uuid_in uuid_out uuid_recv uuid_send - - - c p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("UUID datatype"); -#define UUIDOID 2950 -DATA(insert OID = 2951 ( _uuid PGNSP PGUID -1 f b A f t \054 0 2950 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* pg_lsn */ -DATA(insert OID = 3220 ( pg_lsn PGNSP PGUID 8 FLOAT8PASSBYVAL b U f t \054 0 0 3221 pg_lsn_in pg_lsn_out pg_lsn_recv pg_lsn_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("PostgreSQL LSN datatype"); -#define LSNOID 3220 -DATA(insert OID = 3221 ( _pg_lsn PGNSP PGUID -1 f b A f t \054 0 3220 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* text search */ -DATA(insert OID = 3614 ( tsvector PGNSP PGUID -1 f b U f t \054 0 0 3643 tsvectorin tsvectorout tsvectorrecv tsvectorsend - - ts_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("text representation for text search"); -#define TSVECTOROID 3614 -DATA(insert OID = 3642 ( gtsvector PGNSP PGUID -1 f b U f t \054 0 0 3644 gtsvectorin gtsvectorout - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("GiST index internal text representation for text search"); -#define GTSVECTOROID 3642 -DATA(insert OID = 3615 ( tsquery PGNSP PGUID -1 f b U f t \054 0 0 3645 tsqueryin tsqueryout tsqueryrecv tsquerysend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("query representation for text search"); -#define TSQUERYOID 3615 -DATA(insert OID = 3734 ( regconfig PGNSP PGUID 4 t b N f t \054 0 0 3735 regconfigin regconfigout regconfigrecv regconfigsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered text search configuration"); -#define REGCONFIGOID 3734 -DATA(insert OID = 3769 ( regdictionary PGNSP PGUID 4 t b N f t \054 0 0 3770 regdictionaryin regdictionaryout regdictionaryrecv regdictionarysend - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("registered text search dictionary"); -#define REGDICTIONARYOID 3769 - -DATA(insert OID = 3643 ( _tsvector PGNSP PGUID -1 f b A f t \054 0 3614 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3644 ( _gtsvector PGNSP PGUID -1 f b A f t \054 0 3642 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3645 ( _tsquery PGNSP PGUID -1 f b A f t \054 0 3615 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3735 ( _regconfig PGNSP PGUID -1 f b A f t \054 0 3734 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3770 ( _regdictionary PGNSP PGUID -1 f b A f t \054 0 3769 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* jsonb */ -DATA(insert OID = 3802 ( jsonb PGNSP PGUID -1 f b U f t \054 0 0 3807 jsonb_in jsonb_out jsonb_recv jsonb_send - - - i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("Binary JSON"); -#define JSONBOID 3802 -DATA(insert OID = 3807 ( _jsonb PGNSP PGUID -1 f b A f t \054 0 3802 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); - -DATA(insert OID = 2970 ( txid_snapshot PGNSP PGUID -1 f b U f t \054 0 0 2949 txid_snapshot_in txid_snapshot_out txid_snapshot_recv txid_snapshot_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("txid snapshot"); -DATA(insert OID = 2949 ( _txid_snapshot PGNSP PGUID -1 f b A f t \054 0 2970 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* range types */ -DATA(insert OID = 3904 ( int4range PGNSP PGUID -1 f r R f t \054 0 0 3905 range_in range_out range_recv range_send - - range_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("range of integers"); -#define INT4RANGEOID 3904 -DATA(insert OID = 3905 ( _int4range PGNSP PGUID -1 f b A f t \054 0 3904 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3906 ( numrange PGNSP PGUID -1 f r R f t \054 0 0 3907 range_in range_out range_recv range_send - - range_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("range of numerics"); -DATA(insert OID = 3907 ( _numrange PGNSP PGUID -1 f b A f t \054 0 3906 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3908 ( tsrange PGNSP PGUID -1 f r R f t \054 0 0 3909 range_in range_out range_recv range_send - - range_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("range of timestamps without time zone"); -DATA(insert OID = 3909 ( _tsrange PGNSP PGUID -1 f b A f t \054 0 3908 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3910 ( tstzrange PGNSP PGUID -1 f r R f t \054 0 0 3911 range_in range_out range_recv range_send - - range_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("range of timestamps with time zone"); -DATA(insert OID = 3911 ( _tstzrange PGNSP PGUID -1 f b A f t \054 0 3910 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3912 ( daterange PGNSP PGUID -1 f r R f t \054 0 0 3913 range_in range_out range_recv range_send - - range_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("range of dates"); -DATA(insert OID = 3913 ( _daterange PGNSP PGUID -1 f b A f t \054 0 3912 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ )); -DATA(insert OID = 3926 ( int8range PGNSP PGUID -1 f r R f t \054 0 0 3927 range_in range_out range_recv range_send - - range_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -DESCR("range of bigints"); -DATA(insert OID = 3927 ( _int8range PGNSP PGUID -1 f b A f t \054 0 3926 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); - -/* - * pseudo-types - * - * types with typtype='p' represent various special cases in the type system. - * - * These cannot be used to define table columns, but are valid as function - * argument and result types (if supported by the function's implementation - * language). - * - * Note: cstring is a borderline case; it is still considered a pseudo-type, - * but there is now support for it in records and arrays. Perhaps we should - * just treat it as a regular base type? - */ -DATA(insert OID = 2249 ( record PGNSP PGUID -1 f p P f t \054 0 0 2287 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -#define RECORDOID 2249 -DATA(insert OID = 2287 ( _record PGNSP PGUID -1 f p P f t \054 0 2249 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ )); -#define RECORDARRAYOID 2287 -DATA(insert OID = 2275 ( cstring PGNSP PGUID -2 f p P f t \054 0 0 1263 cstring_in cstring_out cstring_recv cstring_send - - - c p f 0 -1 0 0 _null_ _null_ _null_ )); -#define CSTRINGOID 2275 -DATA(insert OID = 2276 ( any PGNSP PGUID 4 t p P f t \054 0 0 0 any_in any_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define ANYOID 2276 -DATA(insert OID = 2277 ( anyarray PGNSP PGUID -1 f p P f t \054 0 0 0 anyarray_in anyarray_out anyarray_recv anyarray_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -#define ANYARRAYOID 2277 -DATA(insert OID = 2278 ( void PGNSP PGUID 4 t p P f t \054 0 0 0 void_in void_out void_recv void_send - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define VOIDOID 2278 -DATA(insert OID = 2279 ( trigger PGNSP PGUID 4 t p P f t \054 0 0 0 trigger_in trigger_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define TRIGGEROID 2279 -DATA(insert OID = 3838 ( event_trigger PGNSP PGUID 4 t p P f t \054 0 0 0 event_trigger_in event_trigger_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define EVTTRIGGEROID 3838 -DATA(insert OID = 2280 ( language_handler PGNSP PGUID 4 t p P f t \054 0 0 0 language_handler_in language_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define LANGUAGE_HANDLEROID 2280 -DATA(insert OID = 2281 ( internal PGNSP PGUID SIZEOF_POINTER t p P f t \054 0 0 0 internal_in internal_out - - - - - ALIGNOF_POINTER p f 0 -1 0 0 _null_ _null_ _null_ )); -#define INTERNALOID 2281 -DATA(insert OID = 2282 ( opaque PGNSP PGUID 4 t p P f t \054 0 0 0 opaque_in opaque_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define OPAQUEOID 2282 -DATA(insert OID = 2283 ( anyelement PGNSP PGUID 4 t p P f t \054 0 0 0 anyelement_in anyelement_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define ANYELEMENTOID 2283 -DATA(insert OID = 2776 ( anynonarray PGNSP PGUID 4 t p P f t \054 0 0 0 anynonarray_in anynonarray_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define ANYNONARRAYOID 2776 -DATA(insert OID = 3500 ( anyenum PGNSP PGUID 4 t p P f t \054 0 0 0 anyenum_in anyenum_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define ANYENUMOID 3500 -DATA(insert OID = 3115 ( fdw_handler PGNSP PGUID 4 t p P f t \054 0 0 0 fdw_handler_in fdw_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define FDW_HANDLEROID 3115 -DATA(insert OID = 325 ( index_am_handler PGNSP PGUID 4 t p P f t \054 0 0 0 index_am_handler_in index_am_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define INDEX_AM_HANDLEROID 325 -DATA(insert OID = 3310 ( tsm_handler PGNSP PGUID 4 t p P f t \054 0 0 0 tsm_handler_in tsm_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); -#define TSM_HANDLEROID 3310 -DATA(insert OID = 3831 ( anyrange PGNSP PGUID -1 f p P f t \054 0 0 0 anyrange_in anyrange_out - - - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); -#define ANYRANGEOID 3831 - - -/* - * macros + * macros for values of poor-mans-enumerated-type columns */ #define TYPTYPE_BASE 'b' /* base type (ordinary scalar type) */ #define TYPTYPE_COMPOSITE 'c' /* composite (e.g., table's rowtype) */ @@ -749,4 +284,60 @@ DATA(insert OID = 3831 ( anyrange PGNSP PGUID -1 f p P f t \054 0 0 0 anyrange (typid) == ANYENUMOID || \ (typid) == ANYRANGEOID) +#endif /* EXPOSE_TO_CLIENT_CODE */ + + +extern ObjectAddress TypeShellMake(const char *typeName, + Oid typeNamespace, + Oid ownerId); + +extern ObjectAddress TypeCreate(Oid newTypeOid, + const char *typeName, + Oid typeNamespace, + Oid relationOid, + char relationKind, + Oid ownerId, + int16 internalSize, + char typeType, + char typeCategory, + bool typePreferred, + char typDelim, + Oid inputProcedure, + Oid outputProcedure, + Oid receiveProcedure, + Oid sendProcedure, + Oid typmodinProcedure, + Oid typmodoutProcedure, + Oid analyzeProcedure, + Oid elementType, + bool isImplicitArray, + Oid arrayType, + Oid baseType, + const char *defaultTypeValue, + char *defaultTypeBin, + bool passedByValue, + char alignment, + char storage, + int32 typeMod, + int32 typNDims, + bool typeNotNull, + Oid typeCollation); + +extern void GenerateTypeDependencies(Oid typeObjectId, + Form_pg_type typeForm, + Node *defaultExpr, + void *typacl, + char relationKind, /* only for relation rowtypes */ + bool isImplicitArray, + bool isDependentType, + bool rebuild); + +extern void RenameTypeInternal(Oid typeOid, const char *newTypeName, + Oid typeNamespace); + +extern char *makeArrayTypeName(const char *typeName, Oid typeNamespace); + +extern bool moveArrayTypeName(Oid typeOid, const char *typeName, + Oid typeNamespace); + #endif /* PG_TYPE_H */ diff --git a/src/include/catalog/pg_type_fn.h b/src/include/catalog/pg_type_fn.h deleted file mode 100644 index b570d3588f..0000000000 --- a/src/include/catalog/pg_type_fn.h +++ /dev/null @@ -1,84 +0,0 @@ -/*------------------------------------------------------------------------- - * - * pg_type_fn.h - * prototypes for functions in catalog/pg_type.c - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/catalog/pg_type_fn.h - * - *------------------------------------------------------------------------- - */ -#ifndef PG_TYPE_FN_H -#define PG_TYPE_FN_H - -#include "catalog/objectaddress.h" -#include "nodes/nodes.h" - - -extern ObjectAddress TypeShellMake(const char *typeName, - Oid typeNamespace, - Oid ownerId); - -extern ObjectAddress TypeCreate(Oid newTypeOid, - const char *typeName, - Oid typeNamespace, - Oid relationOid, - char relationKind, - Oid ownerId, - int16 internalSize, - char typeType, - char typeCategory, - bool typePreferred, - char typDelim, - Oid inputProcedure, - Oid outputProcedure, - Oid receiveProcedure, - Oid sendProcedure, - Oid typmodinProcedure, - Oid typmodoutProcedure, - Oid analyzeProcedure, - Oid elementType, - bool isImplicitArray, - Oid arrayType, - Oid baseType, - const char *defaultTypeValue, - char *defaultTypeBin, - bool passedByValue, - char alignment, - char storage, - int32 typeMod, - int32 typNDims, - bool typeNotNull, - Oid typeCollation); - -extern void GenerateTypeDependencies(Oid typeNamespace, - Oid typeObjectId, - Oid relationOid, - char relationKind, - Oid owner, - Oid inputProcedure, - Oid outputProcedure, - Oid receiveProcedure, - Oid sendProcedure, - Oid typmodinProcedure, - Oid typmodoutProcedure, - Oid analyzeProcedure, - Oid elementType, - bool isImplicitArray, - Oid baseType, - Oid typeCollation, - Node *defaultExpr, - bool rebuild); - -extern void RenameTypeInternal(Oid typeOid, const char *newTypeName, - Oid typeNamespace); - -extern char *makeArrayTypeName(const char *typeName, Oid typeNamespace); - -extern bool moveArrayTypeName(Oid typeOid, const char *typeName, - Oid typeNamespace); - -#endif /* PG_TYPE_FN_H */ diff --git a/src/include/catalog/pg_user_mapping.h b/src/include/catalog/pg_user_mapping.h index f08e6a72ed..e4e2ff36dc 100644 --- a/src/include/catalog/pg_user_mapping.h +++ b/src/include/catalog/pg_user_mapping.h @@ -1,16 +1,16 @@ /*------------------------------------------------------------------------- * * pg_user_mapping.h - * definition of the system "user mapping" relation (pg_user_mapping) + * definition of the "user mapping" system catalog (pg_user_mapping) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/catalog/pg_user_mapping.h * * NOTES - * the genbki.pl script reads this file and generates .bki - * information from the DATA() statements. + * The Catalog.pm module reads this file and derives schema + * information. * *------------------------------------------------------------------------- */ @@ -18,15 +18,14 @@ #define PG_USER_MAPPING_H #include "catalog/genbki.h" +#include "catalog/pg_user_mapping_d.h" /* ---------------- * pg_user_mapping definition. cpp turns this into * typedef struct FormData_pg_user_mapping * ---------------- */ -#define UserMappingRelationId 1418 - -CATALOG(pg_user_mapping,1418) +CATALOG(pg_user_mapping,1418,UserMappingRelationId) { Oid umuser; /* Id of the user, InvalidOid if PUBLIC is * wanted */ @@ -44,14 +43,4 @@ CATALOG(pg_user_mapping,1418) */ typedef FormData_pg_user_mapping *Form_pg_user_mapping; -/* ---------------- - * compiler constants for pg_user_mapping - * ---------------- - */ - -#define Natts_pg_user_mapping 3 -#define Anum_pg_user_mapping_umuser 1 -#define Anum_pg_user_mapping_umserver 2 -#define Anum_pg_user_mapping_umoptions 3 - #endif /* PG_USER_MAPPING_H */ diff --git a/src/include/catalog/reformat_dat_file.pl b/src/include/catalog/reformat_dat_file.pl new file mode 100755 index 0000000000..ca20fb86da --- /dev/null +++ b/src/include/catalog/reformat_dat_file.pl @@ -0,0 +1,316 @@ +#!/usr/bin/perl +#---------------------------------------------------------------------- +# +# reformat_dat_file.pl +# Perl script that reads in catalog data file(s) and writes out +# functionally equivalent file(s) in a standard format. +# +# In each entry of a reformatted file, metadata fields (if present) +# come first, with normal attributes starting on the following line, +# in the same order as the columns of the corresponding catalog. +# Comments and blank lines are preserved. +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/include/catalog/reformat_dat_file.pl +# +#---------------------------------------------------------------------- + +use strict; +use warnings; + +# If you copy this script to somewhere other than src/include/catalog, +# you'll need to modify this "use lib" or provide a suitable -I switch. +use FindBin; +use lib "$FindBin::RealBin/../../backend/catalog/"; +use Catalog; + +# Names of the metadata fields of a catalog entry. (line_number is also +# a metadata field, but we never write it out, so it's not listed here.) +my @METADATA = + ('oid', 'oid_symbol', 'array_type_oid', 'descr', 'autogenerated'); + +my @input_files; +my $output_path = ''; +my $full_tuples = 0; + +# Process command line switches. +while (@ARGV) +{ + my $arg = shift @ARGV; + if ($arg !~ /^-/) + { + push @input_files, $arg; + } + elsif ($arg =~ /^-o/) + { + $output_path = length($arg) > 2 ? substr($arg, 2) : shift @ARGV; + } + elsif ($arg eq '--full-tuples') + { + $full_tuples = 1; + } + else + { + usage(); + } +} + +# Sanity check arguments. +die "No input files.\n" + if !@input_files; + +# Make sure output_path ends in a slash. +if ($output_path ne '' && substr($output_path, -1) ne '/') +{ + $output_path .= '/'; +} + +# Read all the input files into internal data structures. +# We pass data file names as arguments and then look for matching +# headers to parse the schema from. +my %catalogs; +my %catalog_data; +my @catnames; +foreach my $datfile (@input_files) +{ + $datfile =~ /(.+)\.dat$/ + or die "Input files need to be data (.dat) files.\n"; + + my $header = "$1.h"; + die "There in no header file corresponding to $datfile" + if !-e $header; + + my $catalog = Catalog::ParseHeader($header); + my $catname = $catalog->{catname}; + my $schema = $catalog->{columns}; + + push @catnames, $catname; + $catalogs{$catname} = $catalog; + + $catalog_data{$catname} = Catalog::ParseData($datfile, $schema, 1); +} + +######################################################################## +# At this point, we have read all the data. If you are modifying this +# script for bulk editing, this is a good place to build lookup tables, +# if you need to. In the following example, the "next if !ref $row" +# check below is a hack to filter out non-hash objects. This is because +# we build the lookup tables from data that we read using the +# "preserve_formatting" parameter. +# +##Index access method lookup. +#my %amnames; +#foreach my $row (@{ $catalog_data{pg_am} }) +#{ +# next if !ref $row; +# $amnames{$row->{oid}} = $row->{amname}; +#} +######################################################################## + +# Write the data. +foreach my $catname (@catnames) +{ + my $catalog = $catalogs{$catname}; + my @attnames; + my $schema = $catalog->{columns}; + + foreach my $column (@$schema) + { + my $attname = $column->{name}; + push @attnames, $attname; + } + + # Overwrite .dat files in place, since they are under version control. + my $datfile = "$output_path$catname.dat"; + open my $dat, '>', $datfile + or die "can't open $datfile: $!"; + + foreach my $data (@{ $catalog_data{$catname} }) + { + + # Hash ref representing a data entry. + if (ref $data eq 'HASH') + { + my %values = %$data; + + ############################################################ + # At this point we have the full tuple in memory as a hash + # and can do any operations we want. As written, it only + # removes default values, but this script can be adapted to + # do one-off bulk-editing. + ############################################################ + + if (!$full_tuples) + { + # If it's an autogenerated entry, drop it completely. + next if $values{autogenerated}; + # Else, just drop any default/computed fields. + strip_default_values(\%values, $schema, $catname); + } + + print $dat "{"; + + # Separate out metadata fields for readability. + my $metadata_str = format_hash(\%values, @METADATA); + if ($metadata_str) + { + print $dat $metadata_str; + + # User attributes start on next line. + print $dat ",\n "; + } + + my $data_str = format_hash(\%values, @attnames); + print $dat $data_str; + print $dat " },\n"; + } + + # Preserve blank lines. + elsif ($data =~ /^\s*$/) + { + print $dat "\n"; + } + + # Preserve comments or brackets that are on their own line. + elsif ($data =~ /^\s*(\[|\]|#.*?)\s*$/) + { + print $dat "$1\n"; + } + } + close $dat; +} + +# Remove column values for which there is a matching default, +# or if the value can be computed from other columns. +sub strip_default_values +{ + my ($row, $schema, $catname) = @_; + + # Delete values that match defaults. + foreach my $column (@$schema) + { + my $attname = $column->{name}; + die "strip_default_values: $catname.$attname undefined\n" + if !defined $row->{$attname}; + + if (defined $column->{default} + and ($row->{$attname} eq $column->{default})) + { + delete $row->{$attname}; + } + } + + # Delete computed values. See AddDefaultValues() in Catalog.pm. + # Note: This must be done after deleting values matching defaults. + if ($catname eq 'pg_proc') + { + delete $row->{pronargs} if defined $row->{proargtypes}; + } + + # If a pg_type entry has an auto-generated array type, then its + # typarray field is a computed value too (see GenerateArrayTypes). + if ($catname eq 'pg_type') + { + delete $row->{typarray} if defined $row->{array_type_oid}; + } + + return; +} + +# Format the individual elements of a Perl hash into a valid string +# representation. We do this ourselves, rather than use native Perl +# facilities, so we can keep control over the exact formatting of the +# data files. +sub format_hash +{ + my $data = shift; + my @orig_attnames = @_; + + # Copy attname to new array if it has a value, so we can determine + # the last populated element. We do this because we may have default + # values or empty metadata fields. + my @attnames; + foreach my $orig_attname (@orig_attnames) + { + push @attnames, $orig_attname + if defined $data->{$orig_attname}; + } + + # When calling this function, we ether have an open-bracket or a + # leading space already. + my $char_count = 1; + + my $threshold; + my $hash_str = ''; + my $element_count = 0; + + foreach my $attname (@attnames) + { + $element_count++; + + # To limit the line to 80 chars, we need to account for the + # trailing characters. + if ($element_count == $#attnames + 1) + { + # Last element, so allow space for ' },' + $threshold = 77; + } + else + { + # Just need space for trailing comma + $threshold = 79; + } + + if ($element_count > 1) + { + $hash_str .= ','; + $char_count++; + } + + my $value = $data->{$attname}; + + # Escape single quotes. + $value =~ s/'/\\'/g; + + # Include a leading space in the key-value pair, since this will + # always go after either a comma or an additional padding space on + # the next line. + my $element = " $attname => '$value'"; + my $element_length = length($element); + + # If adding the element to the current line would expand the line + # beyond 80 chars, put it on the next line. We don't do this for + # the first element, since that would create a blank line. + if ($element_count > 1 and $char_count + $element_length > $threshold) + { + + # Put on next line with an additional space preceding. There + # are now two spaces in front of the key-value pair, lining + # it up with the line above it. + $hash_str .= "\n $element"; + $char_count = $element_length + 1; + } + else + { + $hash_str .= $element; + $char_count += $element_length; + } + } + return $hash_str; +} + +sub usage +{ + die < last + 1) { - if ($1 > last + 2) { - print last + 1, "-", $1 - 1; - } else { - print last + 1; +my $oids = Catalog::FindAllOidsFromHeaders(@input_files); + +# Also push FirstBootstrapObjectId to serve as a terminator for the last gap. +my $FirstBootstrapObjectId = + Catalog::FindDefinedSymbol('access/transam.h', '..', + 'FirstBootstrapObjectId'); +push @{$oids}, $FirstBootstrapObjectId; + +my $prev_oid = 0; +foreach my $oid (sort { $a <=> $b } @{$oids}) +{ + if ($oid > $prev_oid + 1) + { + if ($oid > $prev_oid + 2) + { + printf "%d - %d\n", $prev_oid + 1, $oid - 1; + } + else + { + printf "%d\n", $prev_oid + 1; } } - last = $1; + $prev_oid = $oid; } -END { - print last + 1, "-", ENVIRON["FIRSTOBJECTID"]-1; -}' diff --git a/src/include/commands/alter.h b/src/include/commands/alter.h index 4365357ab8..402a4b2d58 100644 --- a/src/include/commands/alter.h +++ b/src/include/commands/alter.h @@ -4,7 +4,7 @@ * prototypes for commands/alter.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/alter.h diff --git a/src/include/commands/async.h b/src/include/commands/async.h index 939711d8d9..d5868c42a0 100644 --- a/src/include/commands/async.h +++ b/src/include/commands/async.h @@ -3,7 +3,7 @@ * async.h * Asynchronous notification: NOTIFY, LISTEN, UNLISTEN * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/async.h diff --git a/src/include/commands/cluster.h b/src/include/commands/cluster.h index 7bade9fad2..f37a60c1c1 100644 --- a/src/include/commands/cluster.h +++ b/src/include/commands/cluster.h @@ -3,7 +3,7 @@ * cluster.h * header file for postgres cluster command stuff * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994-5, Regents of the University of California * * src/include/commands/cluster.h @@ -19,8 +19,7 @@ extern void cluster(ClusterStmt *stmt, bool isTopLevel); -extern void cluster_rel(Oid tableOid, Oid indexOid, bool recheck, - bool verbose); +extern void cluster_rel(Oid tableOid, Oid indexOid, int options); extern void check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck, LOCKMODE lockmode); extern void mark_index_clustered(Relation rel, Oid indexOid, bool is_internal); diff --git a/src/include/commands/collationcmds.h b/src/include/commands/collationcmds.h index 30e847432e..9b0f00a997 100644 --- a/src/include/commands/collationcmds.h +++ b/src/include/commands/collationcmds.h @@ -4,7 +4,7 @@ * prototypes for collationcmds.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/collationcmds.h diff --git a/src/include/commands/comment.h b/src/include/commands/comment.h index 85bd801513..411433f862 100644 --- a/src/include/commands/comment.h +++ b/src/include/commands/comment.h @@ -7,7 +7,7 @@ * * Prototypes for functions in commands/comment.c * - * Copyright (c) 1999-2017, PostgreSQL Global Development Group + * Copyright (c) 1999-2018, PostgreSQL Global Development Group * *------------------------------------------------------------------------- */ @@ -34,11 +34,11 @@ extern ObjectAddress CommentObject(CommentStmt *stmt); extern void DeleteComments(Oid oid, Oid classoid, int32 subid); -extern void CreateComments(Oid oid, Oid classoid, int32 subid, char *comment); +extern void CreateComments(Oid oid, Oid classoid, int32 subid, const char *comment); extern void DeleteSharedComments(Oid oid, Oid classoid); -extern void CreateSharedComments(Oid oid, Oid classoid, char *comment); +extern void CreateSharedComments(Oid oid, Oid classoid, const char *comment); extern char *GetComment(Oid oid, Oid classoid, int32 subid); diff --git a/src/include/commands/conversioncmds.h b/src/include/commands/conversioncmds.h index 7054505794..9fd40cb6f0 100644 --- a/src/include/commands/conversioncmds.h +++ b/src/include/commands/conversioncmds.h @@ -4,7 +4,7 @@ * prototypes for conversioncmds.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/conversioncmds.h diff --git a/src/include/commands/copy.h b/src/include/commands/copy.h index 8b2971d287..f393e7e73d 100644 --- a/src/include/commands/copy.h +++ b/src/include/commands/copy.h @@ -4,7 +4,7 @@ * Definitions for using the POSTGRES copy command. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/copy.h diff --git a/src/include/commands/createas.h b/src/include/commands/createas.h index aaf4fac97b..03ba21ded8 100644 --- a/src/include/commands/createas.h +++ b/src/include/commands/createas.h @@ -4,7 +4,7 @@ * prototypes for createas.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/createas.h diff --git a/src/include/commands/dbcommands.h b/src/include/commands/dbcommands.h index f42c8cdbe3..677c7fc5fc 100644 --- a/src/include/commands/dbcommands.h +++ b/src/include/commands/dbcommands.h @@ -4,7 +4,7 @@ * Database management commands (create/drop database). * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/dbcommands.h diff --git a/src/include/commands/dbcommands_xlog.h b/src/include/commands/dbcommands_xlog.h index 63b1a6470c..83048d6c5b 100644 --- a/src/include/commands/dbcommands_xlog.h +++ b/src/include/commands/dbcommands_xlog.h @@ -4,7 +4,7 @@ * Database resource manager XLOG definitions (create/drop database). * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/dbcommands_xlog.h diff --git a/src/include/commands/defrem.h b/src/include/commands/defrem.h index f7bb4a54f7..1d05a4bcdc 100644 --- a/src/include/commands/defrem.h +++ b/src/include/commands/defrem.h @@ -4,7 +4,7 @@ * POSTGRES define and remove utility definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/defrem.h @@ -15,7 +15,9 @@ #define DEFREM_H #include "catalog/objectaddress.h" +#include "nodes/params.h" #include "nodes/parsenodes.h" +#include "tcop/dest.h" #include "utils/array.h" /* commands/dropcmds.c */ @@ -25,26 +27,29 @@ extern void RemoveObjects(DropStmt *stmt); extern ObjectAddress DefineIndex(Oid relationId, IndexStmt *stmt, Oid indexRelationId, + Oid parentIndexId, + Oid parentConstraintId, bool is_alter_table, bool check_rights, bool check_not_in_use, bool skip_build, bool quiet); -extern Oid ReindexIndex(RangeVar *indexRelation, int options); +extern void ReindexIndex(RangeVar *indexRelation, int options); extern Oid ReindexTable(RangeVar *relation, int options); extern void ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind, int options); extern char *makeObjectName(const char *name1, const char *name2, const char *label); extern char *ChooseRelationName(const char *name1, const char *name2, - const char *label, Oid namespaceid); + const char *label, Oid namespaceid, + bool isconstraint); extern bool CheckIndexCompatible(Oid oldId, - char *accessMethodName, + const char *accessMethodName, List *attributeList, List *exclusionOpNames); extern Oid GetDefaultOpClass(Oid type_id, Oid am_id); extern Oid ResolveOpClass(List *opclass, Oid attrType, - char *accessMethodName, Oid accessMethodId); + const char *accessMethodName, Oid accessMethodId); /* commands/functioncmds.c */ extern ObjectAddress CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt); @@ -58,13 +63,15 @@ extern ObjectAddress CreateTransform(CreateTransformStmt *stmt); extern void DropTransformById(Oid transformOid); extern void IsThereFunctionInNamespace(const char *proname, int pronargs, oidvector *proargtypes, Oid nspOid); -extern void ExecuteDoStmt(DoStmt *stmt); +extern void ExecuteDoStmt(DoStmt *stmt, bool atomic); +extern void ExecuteCallStmt(CallStmt *stmt, ParamListInfo params, bool atomic, DestReceiver *dest); +extern TupleDesc CallStmtResultDesc(CallStmt *stmt); extern Oid get_cast_oid(Oid sourcetypeid, Oid targettypeid, bool missing_ok); extern Oid get_transform_oid(Oid type_id, Oid lang_id, bool missing_ok); extern void interpret_function_parameter_list(ParseState *pstate, List *parameters, Oid languageOid, - bool is_aggregate, + ObjectType objtype, oidvector **parameterTypes, ArrayType **allParameterTypes, ArrayType **parameterModes, diff --git a/src/include/commands/discard.h b/src/include/commands/discard.h index 8ea0b30ddf..bbec7ef6ef 100644 --- a/src/include/commands/discard.h +++ b/src/include/commands/discard.h @@ -4,7 +4,7 @@ * prototypes for discard.c. * * - * Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/include/commands/discard.h * diff --git a/src/include/commands/event_trigger.h b/src/include/commands/event_trigger.h index 2ce528272c..0e1959462e 100644 --- a/src/include/commands/event_trigger.h +++ b/src/include/commands/event_trigger.h @@ -3,7 +3,7 @@ * event_trigger.h * Declarations for command trigger handling. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/event_trigger.h @@ -50,7 +50,6 @@ extern void AlterEventTriggerOwner_oid(Oid, Oid newOwnerId); extern bool EventTriggerSupportsObjectType(ObjectType obtype); extern bool EventTriggerSupportsObjectClass(ObjectClass objclass); -extern bool EventTriggerSupportsGrantObjectType(GrantObjectType objtype); extern void EventTriggerDDLCommandStart(Node *parsetree); extern void EventTriggerDDLCommandEnd(Node *parsetree); extern void EventTriggerSQLDrop(Node *parsetree); diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h index 78822b766a..d3f70fda08 100644 --- a/src/include/commands/explain.h +++ b/src/include/commands/explain.h @@ -3,7 +3,7 @@ * explain.h * prototypes for explain.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994-5, Regents of the University of California * * src/include/commands/explain.h @@ -53,7 +53,8 @@ typedef void (*ExplainOneQuery_hook_type) (Query *query, IntoClause *into, ExplainState *es, const char *queryString, - ParamListInfo params); + ParamListInfo params, + QueryEnvironment *queryEnv); extern PGDLLIMPORT ExplainOneQuery_hook_type ExplainOneQuery_hook; /* Hook for plugins to get control in explain_get_index_name() */ @@ -80,6 +81,10 @@ extern void ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, extern void ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc); extern void ExplainPrintTriggers(ExplainState *es, QueryDesc *queryDesc); +extern void ExplainPrintJITSummary(ExplainState *es, QueryDesc *queryDesc); +extern void ExplainPrintJIT(ExplainState *es, int jit_flags, + struct JitInstrumentation *jit_instr, int worker_i); + extern void ExplainQueryText(ExplainState *es, QueryDesc *queryDesc); extern void ExplainBeginOutput(ExplainState *es); @@ -92,13 +97,16 @@ extern void ExplainPropertyListNested(const char *qlabel, List *data, ExplainState *es); extern void ExplainPropertyText(const char *qlabel, const char *value, ExplainState *es); -extern void ExplainPropertyInteger(const char *qlabel, int value, - ExplainState *es); -extern void ExplainPropertyLong(const char *qlabel, long value, - ExplainState *es); -extern void ExplainPropertyFloat(const char *qlabel, double value, int ndigits, - ExplainState *es); +extern void ExplainPropertyInteger(const char *qlabel, const char *unit, + int64 value, ExplainState *es); +extern void ExplainPropertyFloat(const char *qlabel, const char *unit, + double value, int ndigits, ExplainState *es); extern void ExplainPropertyBool(const char *qlabel, bool value, ExplainState *es); +extern void ExplainOpenGroup(const char *objtype, const char *labelname, + bool labeled, ExplainState *es); +extern void ExplainCloseGroup(const char *objtype, const char *labelname, + bool labeled, ExplainState *es); + #endif /* EXPLAIN_H */ diff --git a/src/include/commands/extension.h b/src/include/commands/extension.h index 73bba3c784..068a3754aa 100644 --- a/src/include/commands/extension.h +++ b/src/include/commands/extension.h @@ -4,7 +4,7 @@ * Extension management commands (create/drop extension). * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/extension.h @@ -28,7 +28,7 @@ * them from the extension first. */ extern PGDLLIMPORT bool creating_extension; -extern Oid CurrentExtensionObject; +extern PGDLLIMPORT Oid CurrentExtensionObject; extern ObjectAddress CreateExtension(ParseState *pstate, CreateExtensionStmt *stmt); diff --git a/src/include/commands/lockcmds.h b/src/include/commands/lockcmds.h index cdb5c62a52..6420e13e75 100644 --- a/src/include/commands/lockcmds.h +++ b/src/include/commands/lockcmds.h @@ -4,7 +4,7 @@ * prototypes for lockcmds.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/lockcmds.h diff --git a/src/include/commands/matview.h b/src/include/commands/matview.h index 3feb137ef4..3b30ad76b2 100644 --- a/src/include/commands/matview.h +++ b/src/include/commands/matview.h @@ -4,7 +4,7 @@ * prototypes for matview.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/matview.h diff --git a/src/include/commands/policy.h b/src/include/commands/policy.h index d6a920ccef..acf621a8ec 100644 --- a/src/include/commands/policy.h +++ b/src/include/commands/policy.h @@ -4,7 +4,7 @@ * prototypes for policy.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/policy.h diff --git a/src/include/commands/portalcmds.h b/src/include/commands/portalcmds.h index 488ce60cd6..99dd04594f 100644 --- a/src/include/commands/portalcmds.h +++ b/src/include/commands/portalcmds.h @@ -4,7 +4,7 @@ * prototypes for portalcmds.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/portalcmds.h diff --git a/src/include/commands/prepare.h b/src/include/commands/prepare.h index 5ec1200e0a..ffec029df4 100644 --- a/src/include/commands/prepare.h +++ b/src/include/commands/prepare.h @@ -4,7 +4,7 @@ * PREPARE, EXECUTE and DEALLOCATE commands, and prepared-stmt storage * * - * Copyright (c) 2002-2017, PostgreSQL Global Development Group + * Copyright (c) 2002-2018, PostgreSQL Global Development Group * * src/include/commands/prepare.h * diff --git a/src/include/commands/progress.h b/src/include/commands/progress.h index 9472ecca63..6a6b467fee 100644 --- a/src/include/commands/progress.h +++ b/src/include/commands/progress.h @@ -7,7 +7,7 @@ * constants, you probably also need to update the views based on them * in system_views.sql. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/progress.h diff --git a/src/include/commands/publicationcmds.h b/src/include/commands/publicationcmds.h index a2e0f4a21e..0c0d7795cf 100644 --- a/src/include/commands/publicationcmds.h +++ b/src/include/commands/publicationcmds.h @@ -4,7 +4,7 @@ * prototypes for publicationcmds.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/publicationcmds.h diff --git a/src/include/commands/schemacmds.h b/src/include/commands/schemacmds.h index 381e5b8dae..bf00754245 100644 --- a/src/include/commands/schemacmds.h +++ b/src/include/commands/schemacmds.h @@ -4,7 +4,7 @@ * prototypes for schemacmds.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/schemacmds.h diff --git a/src/include/commands/seclabel.h b/src/include/commands/seclabel.h index a97c3293b2..85f2cf67aa 100644 --- a/src/include/commands/seclabel.h +++ b/src/include/commands/seclabel.h @@ -3,7 +3,7 @@ * * Prototypes for functions in commands/seclabel.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California */ #ifndef SECLABEL_H diff --git a/src/include/commands/sequence.h b/src/include/commands/sequence.h index caab195130..3f58bae31a 100644 --- a/src/include/commands/sequence.h +++ b/src/include/commands/sequence.h @@ -3,7 +3,7 @@ * sequence.h * prototypes for sequence.c. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/sequence.h diff --git a/src/include/commands/subscriptioncmds.h b/src/include/commands/subscriptioncmds.h index 3d92a682a1..6d70ad71b1 100644 --- a/src/include/commands/subscriptioncmds.h +++ b/src/include/commands/subscriptioncmds.h @@ -4,7 +4,7 @@ * prototypes for subscriptioncmds.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/subscriptioncmds.h diff --git a/src/include/commands/tablecmds.h b/src/include/commands/tablecmds.h index abd31b68d4..138de84e83 100644 --- a/src/include/commands/tablecmds.h +++ b/src/include/commands/tablecmds.h @@ -4,7 +4,7 @@ * prototypes for tablecmds.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/tablecmds.h @@ -53,6 +53,8 @@ extern void AlterRelationNamespaceInternal(Relation classRel, Oid relOid, extern void CheckTableNotInUse(Relation rel, const char *stmt); extern void ExecuteTruncate(TruncateStmt *stmt); +extern void ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged, + DropBehavior behavior, bool restart_seqs); extern void SetRelationHasSubclass(Oid relationId, bool relhassubclass); @@ -73,6 +75,10 @@ extern void find_composite_type_dependencies(Oid typeOid, extern void check_of_type(HeapTuple typetuple); +extern void createForeignKeyTriggers(Relation rel, Oid refRelOid, + Constraint *fkconstraint, Oid constraintOid, + Oid indexOid, bool create_action); + extern void register_on_commit_action(Oid relid, OnCommitAction action); extern void remove_on_commit_action(Oid relid); @@ -87,4 +93,7 @@ extern void RangeVarCallbackOwnsTable(const RangeVar *relation, extern void RangeVarCallbackOwnsRelation(const RangeVar *relation, Oid relId, Oid oldRelId, void *noCatalogs); +extern bool PartConstraintImpliedByRelConstraint(Relation scanrel, + List *partConstraint); + #endif /* TABLECMDS_H */ diff --git a/src/include/commands/tablespace.h b/src/include/commands/tablespace.h index ba8de32c7b..d52b73d57a 100644 --- a/src/include/commands/tablespace.h +++ b/src/include/commands/tablespace.h @@ -4,7 +4,7 @@ * Tablespace management commands (create/drop tablespace). * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/tablespace.h diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h index aeb363f13e..1031448c14 100644 --- a/src/include/commands/trigger.h +++ b/src/include/commands/trigger.h @@ -3,7 +3,7 @@ * trigger.h * Declarations for trigger handling. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/trigger.h @@ -43,13 +43,21 @@ typedef struct TriggerData /* * The state for capturing old and new tuples into transition tables for a - * single ModifyTable node. + * single ModifyTable node (or other operation source, e.g. copy.c). + * + * This is per-caller to avoid conflicts in setting tcs_map or + * tcs_original_insert_tuple. Note, however, that the pointed-to + * private data may be shared across multiple callers. */ +struct AfterTriggersTableData; /* private in trigger.c */ + typedef struct TransitionCaptureState { /* * Is there at least one trigger specifying each transition relation on * the relation explicitly named in the DML statement or COPY command? + * Note: in current usage, these flags could be part of the private state, + * but it seems possibly useful to let callers see them. */ bool tcs_delete_old_table; bool tcs_update_old_table; @@ -60,7 +68,7 @@ typedef struct TransitionCaptureState * For UPDATE and DELETE, AfterTriggerSaveEvent may need to convert the * new and old tuples from a child table's format to the format of the * relation named in a query so that it is compatible with the transition - * tuplestores. + * tuplestores. The caller must store the conversion map here if so. */ TupleConversionMap *tcs_map; @@ -74,17 +82,9 @@ typedef struct TransitionCaptureState HeapTuple tcs_original_insert_tuple; /* - * The tuplestores backing the transition tables. We use separate - * tuplestores for INSERT and UPDATE, because INSERT ... ON CONFLICT ... - * DO UPDATE causes INSERT and UPDATE triggers to fire and needs a way to - * keep track of the new tuple images resulting from the two cases - * separately. We only need a single old image tuplestore, because there - * is no statement that can both update and delete at the same time. + * Private data including the tuplestore(s) into which to insert tuples. */ - Tuplestorestate *tcs_old_tuplestore; /* for DELETE and UPDATE old - * images */ - Tuplestorestate *tcs_insert_tuplestore; /* for INSERT new images */ - Tuplestorestate *tcs_update_tuplestore; /* for UPDATE new images */ + struct AfterTriggersTableData *tcs_private; } TransitionCaptureState; /* @@ -159,7 +159,8 @@ extern PGDLLIMPORT int SessionReplicationRole; extern ObjectAddress CreateTrigger(CreateTrigStmt *stmt, const char *queryString, Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid, - bool isInternal); + Oid funcoid, Oid parentTriggerOid, Node *whenClause, + bool isInternal, bool in_partition); extern void RemoveTriggerById(Oid trigOid); extern Oid get_trigger_oid(Oid relid, const char *name, bool missing_ok); @@ -167,15 +168,16 @@ extern Oid get_trigger_oid(Oid relid, const char *name, bool missing_ok); extern ObjectAddress renametrig(RenameStmt *stmt); extern void EnableDisableTrigger(Relation rel, const char *tgname, - char fires_when, bool skip_system); + char fires_when, bool skip_system, LOCKMODE lockmode); extern void RelationBuildTriggers(Relation relation); extern TriggerDesc *CopyTriggerDesc(TriggerDesc *trigdesc); extern const char *FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc); -extern TransitionCaptureState *MakeTransitionCaptureState(TriggerDesc *trigdesc); -extern void DestroyTransitionCaptureState(TransitionCaptureState *tcs); + +extern TransitionCaptureState *MakeTransitionCaptureState(TriggerDesc *trigdesc, + Oid relid, CmdType cmdType); extern void FreeTriggerDesc(TriggerDesc *trigdesc); @@ -204,7 +206,8 @@ extern bool ExecBRDeleteTriggers(EState *estate, EPQState *epqstate, ResultRelInfo *relinfo, ItemPointer tupleid, - HeapTuple fdw_trigtuple); + HeapTuple fdw_trigtuple, + TupleTableSlot **epqslot); extern void ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo, ItemPointer tupleid, diff --git a/src/include/commands/typecmds.h b/src/include/commands/typecmds.h index 34f6fe328f..ac4ce50ef6 100644 --- a/src/include/commands/typecmds.h +++ b/src/include/commands/typecmds.h @@ -4,7 +4,7 @@ * prototypes for typecmds.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/typecmds.h @@ -34,7 +34,7 @@ extern ObjectAddress AlterDomainDefault(List *names, Node *defaultRaw); extern ObjectAddress AlterDomainNotNull(List *names, bool notNull); extern ObjectAddress AlterDomainAddConstraint(List *names, Node *constr, ObjectAddress *constrAddr); -extern ObjectAddress AlterDomainValidateConstraint(List *names, char *constrName); +extern ObjectAddress AlterDomainValidateConstraint(List *names, const char *constrName); extern ObjectAddress AlterDomainDropConstraint(List *names, const char *constrName, DropBehavior behavior, bool missing_ok); diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index a9035112e9..2f4303e40d 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -4,7 +4,7 @@ * header file for postgres vacuum cleaner and statistics analyzer * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/vacuum.h @@ -15,6 +15,7 @@ #define VACUUM_H #include "access/htup.h" +#include "catalog/pg_class.h" #include "catalog/pg_statistic.h" #include "catalog/pg_type.h" #include "nodes/parsenodes.h" @@ -29,8 +30,8 @@ * so they live until the end of the ANALYZE operation. * * The type-specific typanalyze function is passed a pointer to this struct - * and must return TRUE to continue analysis, FALSE to skip analysis of this - * column. In the TRUE case it must set the compute_stats and minrows fields, + * and must return true to continue analysis, false to skip analysis of this + * column. In the true case it must set the compute_stats and minrows fields, * and can optionally set extra_data to pass additional info to compute_stats. * minrows is its request for the minimum number of sample rows to be gathered * (but note this request might not be honored, eg if there are fewer rows @@ -45,7 +46,7 @@ * The fetchfunc may be called with rownum running from 0 to samplerows-1. * It returns a Datum and an isNull flag. * - * compute_stats should set stats_valid TRUE if it is able to compute + * compute_stats should set stats_valid true if it is able to compute * any useful statistics. If it does, the remainder of the struct holds * the information to be stored in a pg_statistic row for the column. Be * careful to allocate any pointed-to data in anl_context, which will NOT @@ -86,7 +87,7 @@ typedef struct VacAttrStats /* * These fields must be filled in by the typanalyze routine, unless it - * returns FALSE. + * returns false. */ AnalyzeAttrComputeStatsFunc compute_stats; /* function pointer */ int minrows; /* Minimum # of rows wanted for stats */ @@ -157,13 +158,12 @@ extern int vacuum_multixact_freeze_table_age; /* in commands/vacuum.c */ extern void ExecVacuum(VacuumStmt *vacstmt, bool isTopLevel); -extern void vacuum(int options, RangeVar *relation, Oid relid, - VacuumParams *params, List *va_cols, +extern void vacuum(int options, List *relations, VacuumParams *params, BufferAccessStrategy bstrategy, bool isTopLevel); extern void vac_open_indexes(Relation relation, LOCKMODE lockmode, int *nindexes, Relation **Irel); extern void vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode); -extern double vac_estimate_reltuples(Relation relation, bool is_analyze, +extern double vac_estimate_reltuples(Relation relation, BlockNumber total_pages, BlockNumber scanned_pages, double scanned_tuples); @@ -186,6 +186,10 @@ extern void vacuum_set_xid_limits(Relation rel, MultiXactId *mxactFullScanLimit); extern void vac_update_datfrozenxid(void); extern void vacuum_delay_point(void); +extern bool vacuum_is_relation_owner(Oid relid, Form_pg_class reltuple, + int options); +extern Relation vacuum_open_relation(Oid relid, RangeVar *relation, + VacuumParams *params, int options, LOCKMODE lmode); /* in commands/vacuumlazy.c */ extern void lazy_vacuum_rel(Relation onerel, int options, diff --git a/src/include/commands/variable.h b/src/include/commands/variable.h index 575339a6d8..7373a3f99f 100644 --- a/src/include/commands/variable.h +++ b/src/include/commands/variable.h @@ -2,7 +2,7 @@ * variable.h * Routines for handling specialized SET variables. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/variable.h @@ -22,9 +22,7 @@ extern bool check_log_timezone(char **newval, void **extra, GucSource source); extern void assign_log_timezone(const char *newval, void *extra); extern const char *show_log_timezone(void); extern bool check_transaction_read_only(bool *newval, void **extra, GucSource source); -extern bool check_XactIsoLevel(char **newval, void **extra, GucSource source); -extern void assign_XactIsoLevel(const char *newval, void *extra); -extern const char *show_XactIsoLevel(void); +extern bool check_XactIsoLevel(int *newval, void **extra, GucSource source); extern bool check_transaction_deferrable(bool *newval, void **extra, GucSource source); extern bool check_random_seed(double *newval, void **extra, GucSource source); extern void assign_random_seed(double newval, void *extra); diff --git a/src/include/commands/view.h b/src/include/commands/view.h index cf08ce2ac7..4703922ff6 100644 --- a/src/include/commands/view.h +++ b/src/include/commands/view.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/commands/view.h @@ -17,7 +17,7 @@ #include "catalog/objectaddress.h" #include "nodes/parsenodes.h" -extern void validateWithCheckOption(char *value); +extern void validateWithCheckOption(const char *value); extern ObjectAddress DefineView(ViewStmt *stmt, const char *queryString, int stmt_location, int stmt_len); diff --git a/src/include/common/base64.h b/src/include/common/base64.h index 7fe19bb432..32cec4b210 100644 --- a/src/include/common/base64.h +++ b/src/include/common/base64.h @@ -3,7 +3,7 @@ * Encoding and decoding routines for base64 without whitespace * support. * - * Portions Copyright (c) 2001-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2001-2018, PostgreSQL Global Development Group * * src/include/common/base64.h */ diff --git a/src/include/common/config_info.h b/src/include/common/config_info.h index f775327861..72014a915a 100644 --- a/src/include/common/config_info.h +++ b/src/include/common/config_info.h @@ -2,7 +2,7 @@ * config_info.h * Common code for pg_config output * - * Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Copyright (c) 2016-2018, PostgreSQL Global Development Group * * src/include/common/config_info.h */ diff --git a/src/include/common/controldata_utils.h b/src/include/common/controldata_utils.h index e97abe6a51..d8fd316396 100644 --- a/src/include/common/controldata_utils.h +++ b/src/include/common/controldata_utils.h @@ -2,7 +2,7 @@ * controldata_utils.h * Common code for pg_controldata output * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/common/controldata_utils.h diff --git a/src/include/common/fe_memutils.h b/src/include/common/fe_memutils.h index 6708670b96..458743dd40 100644 --- a/src/include/common/fe_memutils.h +++ b/src/include/common/fe_memutils.h @@ -2,7 +2,7 @@ * fe_memutils.h * memory management support for frontend code * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * src/include/common/fe_memutils.h */ diff --git a/src/include/common/file_perm.h b/src/include/common/file_perm.h new file mode 100644 index 0000000000..cfa0546385 --- /dev/null +++ b/src/include/common/file_perm.h @@ -0,0 +1,56 @@ +/*------------------------------------------------------------------------- + * + * File and directory permission definitions + * + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/common/file_perm.h + * + *------------------------------------------------------------------------- + */ +#ifndef FILE_PERM_H +#define FILE_PERM_H + +#include + +/* + * Mode mask for data directory permissions that only allows the owner to + * read/write directories and files. + * + * This is the default. + */ +#define PG_MODE_MASK_OWNER (S_IRWXG | S_IRWXO) + +/* + * Mode mask for data directory permissions that also allows group read/execute. + */ +#define PG_MODE_MASK_GROUP (S_IWGRP | S_IRWXO) + +/* Default mode for creating directories */ +#define PG_DIR_MODE_OWNER S_IRWXU + +/* Mode for creating directories that allows group read/execute */ +#define PG_DIR_MODE_GROUP (S_IRWXU | S_IRGRP | S_IXGRP) + +/* Default mode for creating files */ +#define PG_FILE_MODE_OWNER (S_IRUSR | S_IWUSR) + +/* Mode for creating files that allows group read */ +#define PG_FILE_MODE_GROUP (S_IRUSR | S_IWUSR | S_IRGRP) + +/* Modes for creating directories and files in the data directory */ +extern int pg_dir_create_mode; +extern int pg_file_create_mode; + +/* Mode mask to pass to umask() */ +extern int pg_mode_mask; + +/* Set permissions and mask based on the provided mode */ +extern void SetDataDirectoryCreatePerm(int dataDirMode); + +/* Set permissions and mask based on the mode of the data directory */ +extern bool GetDataDirectoryCreatePerm(const char *dataDir); + +#endif /* FILE_PERM_H */ diff --git a/src/include/common/file_utils.h b/src/include/common/file_utils.h index 52af7f0baa..71f638562e 100644 --- a/src/include/common/file_utils.h +++ b/src/include/common/file_utils.h @@ -5,7 +5,7 @@ * Assorted utility functions to work on files. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/common/file_utils.h diff --git a/src/include/common/int.h b/src/include/common/int.h new file mode 100644 index 0000000000..ff410f0eae --- /dev/null +++ b/src/include/common/int.h @@ -0,0 +1,273 @@ +/*------------------------------------------------------------------------- + * + * int.h + * Routines to perform integer math, while checking for overflows. + * + * The routines in this file are intended to be well defined C, without + * relying on compiler flags like -fwrapv. + * + * To reduce the overhead of these routines try to use compiler intrinsics + * where available. That's not that important for the 16, 32 bit cases, but + * the 64 bit cases can be considerably faster with intrinsics. In case no + * intrinsics are available 128 bit math is used where available. + * + * Copyright (c) 2017-2018, PostgreSQL Global Development Group + * + * src/include/common/int.h + * + *------------------------------------------------------------------------- + */ +#ifndef COMMON_INT_H +#define COMMON_INT_H + +/* + * If a + b overflows, return true, otherwise store the result of a + b into + * *result. The content of *result is implementation defined in case of + * overflow. + */ +static inline bool +pg_add_s16_overflow(int16 a, int16 b, int16 *result) +{ +#if defined(HAVE__BUILTIN_OP_OVERFLOW) + return __builtin_add_overflow(a, b, result); +#else + int32 res = (int32) a + (int32) b; + + if (res > PG_INT16_MAX || res < PG_INT16_MIN) + { + *result = 0x5EED; /* to avoid spurious warnings */ + return true; + } + *result = (int16) res; + return false; +#endif +} + +/* + * If a - b overflows, return true, otherwise store the result of a - b into + * *result. The content of *result is implementation defined in case of + * overflow. + */ +static inline bool +pg_sub_s16_overflow(int16 a, int16 b, int16 *result) +{ +#if defined(HAVE__BUILTIN_OP_OVERFLOW) + return __builtin_sub_overflow(a, b, result); +#else + int32 res = (int32) a - (int32) b; + + if (res > PG_INT16_MAX || res < PG_INT16_MIN) + { + *result = 0x5EED; /* to avoid spurious warnings */ + return true; + } + *result = (int16) res; + return false; +#endif +} + +/* + * If a * b overflows, return true, otherwise store the result of a * b into + * *result. The content of *result is implementation defined in case of + * overflow. + */ +static inline bool +pg_mul_s16_overflow(int16 a, int16 b, int16 *result) +{ +#if defined(HAVE__BUILTIN_OP_OVERFLOW) + return __builtin_mul_overflow(a, b, result); +#else + int32 res = (int32) a * (int32) b; + + if (res > PG_INT16_MAX || res < PG_INT16_MIN) + { + *result = 0x5EED; /* to avoid spurious warnings */ + return true; + } + *result = (int16) res; + return false; +#endif +} + +/* + * If a + b overflows, return true, otherwise store the result of a + b into + * *result. The content of *result is implementation defined in case of + * overflow. + */ +static inline bool +pg_add_s32_overflow(int32 a, int32 b, int32 *result) +{ +#if defined(HAVE__BUILTIN_OP_OVERFLOW) + return __builtin_add_overflow(a, b, result); +#else + int64 res = (int64) a + (int64) b; + + if (res > PG_INT32_MAX || res < PG_INT32_MIN) + { + *result = 0x5EED; /* to avoid spurious warnings */ + return true; + } + *result = (int32) res; + return false; +#endif +} + +/* + * If a - b overflows, return true, otherwise store the result of a - b into + * *result. The content of *result is implementation defined in case of + * overflow. + */ +static inline bool +pg_sub_s32_overflow(int32 a, int32 b, int32 *result) +{ +#if defined(HAVE__BUILTIN_OP_OVERFLOW) + return __builtin_sub_overflow(a, b, result); +#else + int64 res = (int64) a - (int64) b; + + if (res > PG_INT32_MAX || res < PG_INT32_MIN) + { + *result = 0x5EED; /* to avoid spurious warnings */ + return true; + } + *result = (int32) res; + return false; +#endif +} + +/* + * If a * b overflows, return true, otherwise store the result of a * b into + * *result. The content of *result is implementation defined in case of + * overflow. + */ +static inline bool +pg_mul_s32_overflow(int32 a, int32 b, int32 *result) +{ +#if defined(HAVE__BUILTIN_OP_OVERFLOW) + return __builtin_mul_overflow(a, b, result); +#else + int64 res = (int64) a * (int64) b; + + if (res > PG_INT32_MAX || res < PG_INT32_MIN) + { + *result = 0x5EED; /* to avoid spurious warnings */ + return true; + } + *result = (int32) res; + return false; +#endif +} + +/* + * If a + b overflows, return true, otherwise store the result of a + b into + * *result. The content of *result is implementation defined in case of + * overflow. + */ +static inline bool +pg_add_s64_overflow(int64 a, int64 b, int64 *result) +{ +#if defined(HAVE__BUILTIN_OP_OVERFLOW) + return __builtin_add_overflow(a, b, result); +#elif defined(HAVE_INT128) + int128 res = (int128) a + (int128) b; + + if (res > PG_INT64_MAX || res < PG_INT64_MIN) + { + *result = 0x5EED; /* to avoid spurious warnings */ + return true; + } + *result = (int64) res; + return false; +#else + if ((a > 0 && b > 0 && a > PG_INT64_MAX - b) || + (a < 0 && b < 0 && a < PG_INT64_MIN - b)) + { + *result = 0x5EED; /* to avoid spurious warnings */ + return true; + } + *result = a + b; + return false; +#endif +} + +/* + * If a - b overflows, return true, otherwise store the result of a - b into + * *result. The content of *result is implementation defined in case of + * overflow. + */ +static inline bool +pg_sub_s64_overflow(int64 a, int64 b, int64 *result) +{ +#if defined(HAVE__BUILTIN_OP_OVERFLOW) + return __builtin_sub_overflow(a, b, result); +#elif defined(HAVE_INT128) + int128 res = (int128) a - (int128) b; + + if (res > PG_INT64_MAX || res < PG_INT64_MIN) + { + *result = 0x5EED; /* to avoid spurious warnings */ + return true; + } + *result = (int64) res; + return false; +#else + if ((a < 0 && b > 0 && a < PG_INT64_MIN + b) || + (a > 0 && b < 0 && a > PG_INT64_MAX + b)) + { + *result = 0x5EED; /* to avoid spurious warnings */ + return true; + } + *result = a - b; + return false; +#endif +} + +/* + * If a * b overflows, return true, otherwise store the result of a * b into + * *result. The content of *result is implementation defined in case of + * overflow. + */ +static inline bool +pg_mul_s64_overflow(int64 a, int64 b, int64 *result) +{ +#if defined(HAVE__BUILTIN_OP_OVERFLOW) + return __builtin_mul_overflow(a, b, result); +#elif defined(HAVE_INT128) + int128 res = (int128) a * (int128) b; + + if (res > PG_INT64_MAX || res < PG_INT64_MIN) + { + *result = 0x5EED; /* to avoid spurious warnings */ + return true; + } + *result = (int64) res; + return false; +#else + /* + * Overflow can only happen if at least one value is outside the range + * sqrt(min)..sqrt(max) so check that first as the division can be quite a + * bit more expensive than the multiplication. + * + * Multiplying by 0 or 1 can't overflow of course and checking for 0 + * separately avoids any risk of dividing by 0. Be careful about dividing + * INT_MIN by -1 also, note reversing the a and b to ensure we're always + * dividing it by a positive value. + * + */ + if ((a > PG_INT32_MAX || a < PG_INT32_MIN || + b > PG_INT32_MAX || b < PG_INT32_MIN) && + a != 0 && a != 1 && b != 0 && b != 1 && + ((a > 0 && b > 0 && a > PG_INT64_MAX / b) || + (a > 0 && b < 0 && b < PG_INT64_MIN / a) || + (a < 0 && b > 0 && a < PG_INT64_MIN / b) || + (a < 0 && b < 0 && a < PG_INT64_MAX / b))) + { + *result = 0x5EED; /* to avoid spurious warnings */ + return true; + } + *result = a * b; + return false; +#endif +} + +#endif /* COMMON_INT_H */ diff --git a/src/include/common/int128.h b/src/include/common/int128.h index af2c93da46..2654f18f85 100644 --- a/src/include/common/int128.h +++ b/src/include/common/int128.h @@ -8,7 +8,7 @@ * * See src/tools/testint128.c for a simple test harness for this file. * - * Copyright (c) 2017, PostgreSQL Global Development Group + * Copyright (c) 2017-2018, PostgreSQL Global Development Group * * src/include/common/int128.h * diff --git a/src/include/common/ip.h b/src/include/common/ip.h index f530139876..33147891d1 100644 --- a/src/include/common/ip.h +++ b/src/include/common/ip.h @@ -5,7 +5,7 @@ * * These definitions are used by both frontend and backend code. * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * src/include/common/ip.h * diff --git a/src/include/common/keywords.h b/src/include/common/keywords.h index 60522715a8..0b31505b66 100644 --- a/src/include/common/keywords.h +++ b/src/include/common/keywords.h @@ -4,7 +4,7 @@ * lexical token lookup for key words in PostgreSQL * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/common/keywords.h diff --git a/src/include/common/link-canary.h b/src/include/common/link-canary.h new file mode 100644 index 0000000000..917faae9ef --- /dev/null +++ b/src/include/common/link-canary.h @@ -0,0 +1,17 @@ +/*------------------------------------------------------------------------- + * + * link-canary.h + * Detect whether src/common functions came from frontend or backend. + * + * Copyright (c) 2018, PostgreSQL Global Development Group + * + * src/include/common/link-canary.h + * + *------------------------------------------------------------------------- + */ +#ifndef LINK_CANARY_H +#define LINK_CANARY_H + +extern bool pg_link_canary_is_frontend(void); + +#endif /* LINK_CANARY_H */ diff --git a/src/include/common/md5.h b/src/include/common/md5.h index ccaaeddbf4..905d3aa219 100644 --- a/src/include/common/md5.h +++ b/src/include/common/md5.h @@ -6,7 +6,7 @@ * These definitions are needed by both frontend and backend code to work * with MD5-encrypted passwords. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/common/md5.h diff --git a/src/include/common/relpath.h b/src/include/common/relpath.h index ec5ef99451..82d817a53c 100644 --- a/src/include/common/relpath.h +++ b/src/include/common/relpath.h @@ -3,7 +3,7 @@ * relpath.h * Declarations for GetRelationPath() and friends * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/common/relpath.h @@ -13,6 +13,22 @@ #ifndef RELPATH_H #define RELPATH_H +/* + * 'pgrminclude ignore' needed here because CppAsString2() does not throw + * an error if the symbol is not defined. + */ +#include "catalog/catversion.h" /* pgrminclude ignore */ + + +/* + * Name of major-version-specific tablespace subdirectories + */ +#define TABLESPACE_VERSION_DIRECTORY "PG_" PG_MAJORVERSION "_" \ + CppAsString2(CATALOG_VERSION_NO) + +/* Characters to allow for an OID in a relation path */ +#define OIDCHARS 10 /* max chars printed by %u */ + /* * Stuff for fork names. * diff --git a/src/include/common/restricted_token.h b/src/include/common/restricted_token.h index 51be5a760c..a4a263fdee 100644 --- a/src/include/common/restricted_token.h +++ b/src/include/common/restricted_token.h @@ -2,7 +2,7 @@ * restricted_token.h * helper routine to ensure restricted token on Windows * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/common/restricted_token.h diff --git a/src/include/common/saslprep.h b/src/include/common/saslprep.h index c7b620cf19..dc1af15030 100644 --- a/src/include/common/saslprep.h +++ b/src/include/common/saslprep.h @@ -5,7 +5,7 @@ * * These definitions are used by both frontend and backend code. * - * Copyright (c) 2017, PostgreSQL Global Development Group + * Copyright (c) 2017-2018, PostgreSQL Global Development Group * * src/include/common/saslprep.h * diff --git a/src/include/common/scram-common.h b/src/include/common/scram-common.h index ebb733df4b..2131303169 100644 --- a/src/include/common/scram-common.h +++ b/src/include/common/scram-common.h @@ -3,7 +3,7 @@ * scram-common.h * Declarations for helper functions used for SCRAM authentication * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/common/scram-common.h @@ -15,6 +15,10 @@ #include "common/sha2.h" +/* Name of SCRAM mechanisms per IANA */ +#define SCRAM_SHA_256_NAME "SCRAM-SHA-256" +#define SCRAM_SHA_256_PLUS_NAME "SCRAM-SHA-256-PLUS" /* with channel binding */ + /* Length of SCRAM keys (client and server) */ #define SCRAM_KEY_LEN PG_SHA256_DIGEST_LENGTH @@ -28,10 +32,17 @@ */ #define SCRAM_RAW_NONCE_LEN 18 -/* length of salt when generating new verifiers */ -#define SCRAM_DEFAULT_SALT_LEN 12 +/* + * Length of salt when generating new verifiers, in bytes. (It will be stored + * and sent over the wire encoded in Base64.) 16 bytes is what the example in + * RFC 7677 uses. + */ +#define SCRAM_DEFAULT_SALT_LEN 16 -/* default number of iterations when generating verifier */ +/* + * Default number of iterations when generating verifier. Should be at least + * 4096 per RFC 7677. + */ #define SCRAM_DEFAULT_ITERATIONS 4096 /* diff --git a/src/include/common/sha2.h b/src/include/common/sha2.h index a31b3979d8..f3fd0d0d28 100644 --- a/src/include/common/sha2.h +++ b/src/include/common/sha2.h @@ -3,7 +3,7 @@ * sha2.h * Generic headers for SHA224, 256, 384 AND 512 functions of PostgreSQL. * - * Portions Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/include/common/sha2.h diff --git a/src/include/common/string.h b/src/include/common/string.h index 5f3ea71d61..7c3594557a 100644 --- a/src/include/common/string.h +++ b/src/include/common/string.h @@ -2,7 +2,7 @@ * string.h * string handling helpers * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/common/string.h @@ -11,5 +11,8 @@ #define COMMON_STRING_H extern bool pg_str_endswith(const char *str, const char *end); +extern int strtoint(const char *pg_restrict str, char **pg_restrict endptr, + int base); +extern void pg_clean_ascii(char *str); #endif /* COMMON_STRING_H */ diff --git a/src/include/common/unicode_norm.h b/src/include/common/unicode_norm.h index 8741209751..34ca2622ec 100644 --- a/src/include/common/unicode_norm.h +++ b/src/include/common/unicode_norm.h @@ -5,7 +5,7 @@ * * These definitions are used by both frontend and backend code. * - * Copyright (c) 2017, PostgreSQL Global Development Group + * Copyright (c) 2017-2018, PostgreSQL Global Development Group * * src/include/common/unicode_norm.h * diff --git a/src/include/common/unicode_norm_table.h b/src/include/common/unicode_norm_table.h index da08e487e3..3444bc8d80 100644 --- a/src/include/common/unicode_norm_table.h +++ b/src/include/common/unicode_norm_table.h @@ -3,7 +3,7 @@ * unicode_norm_table.h * Composition table used for Unicode normalization * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/common/unicode_norm_table.h diff --git a/src/include/common/username.h b/src/include/common/username.h index 735572d382..1bb3496f9e 100644 --- a/src/include/common/username.h +++ b/src/include/common/username.h @@ -2,7 +2,7 @@ * username.h * lookup effective username * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * src/include/common/username.h */ diff --git a/src/include/datatype/timestamp.h b/src/include/datatype/timestamp.h index 6f48d1c71b..f5b6026ef5 100644 --- a/src/include/datatype/timestamp.h +++ b/src/include/datatype/timestamp.h @@ -5,7 +5,7 @@ * * Note: this file must be includable in both frontend and backend contexts. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/datatype/timestamp.h diff --git a/src/include/executor/execExpr.h b/src/include/executor/execExpr.h index 8ee0496e01..ac53935d70 100644 --- a/src/include/executor/execExpr.h +++ b/src/include/executor/execExpr.h @@ -4,7 +4,7 @@ * Low level infrastructure related to expression evaluation * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/execExpr.h @@ -14,9 +14,11 @@ #ifndef EXEC_EXPR_H #define EXEC_EXPR_H +#include "executor/nodeAgg.h" #include "nodes/execnodes.h" -/* forward reference to avoid circularity */ +/* forward references to avoid circularity */ +struct ExprEvalStep; struct ArrayRefState; /* Bits in ExprState->flags (see also execnodes.h for public flag bits): */ @@ -25,6 +27,11 @@ struct ArrayRefState; /* jump-threading is in use */ #define EEO_FLAG_DIRECT_THREADED (1 << 2) +/* Typical API for out-of-line evaluation subroutines */ +typedef void (*ExecEvalSubroutine) (ExprState *state, + struct ExprEvalStep *op, + ExprContext *econtext); + /* * Discriminator for ExprEvalSteps. * @@ -45,12 +52,8 @@ typedef enum ExprEvalOp EEOP_SCAN_FETCHSOME, /* compute non-system Var value */ - /* "FIRST" variants are used only the first time through */ - EEOP_INNER_VAR_FIRST, EEOP_INNER_VAR, - EEOP_OUTER_VAR_FIRST, EEOP_OUTER_VAR, - EEOP_SCAN_VAR_FIRST, EEOP_SCAN_VAR, /* compute system Var value */ @@ -61,8 +64,11 @@ typedef enum ExprEvalOp /* compute wholerow Var */ EEOP_WHOLEROW, - /* compute non-system Var value, assign it into ExprState's resultslot */ - /* (these are not used if _FIRST checks would be needed) */ + /* + * Compute non-system Var value, assign it into ExprState's resultslot. + * These are not used if a CheckVarSlotCompatibility() check would be + * needed. + */ EEOP_ASSIGN_INNER_VAR, EEOP_ASSIGN_OUTER_VAR, EEOP_ASSIGN_SCAN_VAR, @@ -131,6 +137,7 @@ typedef enum ExprEvalOp /* evaluate PARAM_EXEC/EXTERN parameters */ EEOP_PARAM_EXEC, EEOP_PARAM_EXTERN, + EEOP_PARAM_CALLBACK, /* return CaseTestExpr value */ EEOP_CASE_TESTVAL, @@ -141,6 +148,7 @@ typedef enum ExprEvalOp /* evaluate assorted special-purpose expression types */ EEOP_IOCOERCE, EEOP_DISTINCT, + EEOP_NOT_DISTINCT, EEOP_NULLIF, EEOP_SQLVALUEFUNCTION, EEOP_CURRENTOFEXPR, @@ -212,6 +220,17 @@ typedef enum ExprEvalOp EEOP_SUBPLAN, EEOP_ALTERNATIVE_SUBPLAN, + /* aggregation related nodes */ + EEOP_AGG_STRICT_DESERIALIZE, + EEOP_AGG_DESERIALIZE, + EEOP_AGG_STRICT_INPUT_CHECK, + EEOP_AGG_INIT_TRANS, + EEOP_AGG_STRICT_TRANS_CHECK, + EEOP_AGG_PLAIN_TRANS_BYVAL, + EEOP_AGG_PLAIN_TRANS, + EEOP_AGG_ORDERED_TRANS_DATUM, + EEOP_AGG_ORDERED_TRANS_TUPLE, + /* non-existent operation, used e.g. to check array lengths */ EEOP_LAST } ExprEvalOp; @@ -243,6 +262,7 @@ typedef struct ExprEvalStep { /* attribute number up to which to fetch (inclusive) */ int last_var; + TupleDesc known_desc; } fetch; /* for EEOP_INNER/OUTER/SCAN_[SYS]VAR[_FIRST] */ @@ -331,6 +351,15 @@ typedef struct ExprEvalStep Oid paramtype; /* OID of parameter's datatype */ } param; + /* for EEOP_PARAM_CALLBACK */ + struct + { + ExecEvalSubroutine paramfunc; /* add-on evaluation subroutine */ + void *paramarg; /* private data for same */ + int paramid; /* numeric ID for parameter */ + Oid paramtype; /* OID of parameter's datatype */ + } cparam; + /* for EEOP_CASE_TESTVAL/DOMAIN_TESTVAL */ struct { @@ -385,10 +414,8 @@ typedef struct ExprEvalStep /* for EEOP_ARRAYCOERCE */ struct { - ArrayCoerceExpr *coerceexpr; + ExprState *elemexprstate; /* null if no per-element work */ Oid resultelemtype; /* element type of result array */ - FmgrInfo *elemfunc; /* lookup info for element coercion - * function */ struct ArrayMapState *amstate; /* workspace for array_map */ } arraycoerce; @@ -560,6 +587,55 @@ typedef struct ExprEvalStep /* out-of-line state, created by nodeSubplan.c */ AlternativeSubPlanState *asstate; } alternative_subplan; + + /* for EEOP_AGG_*DESERIALIZE */ + struct + { + AggState *aggstate; + FunctionCallInfo fcinfo_data; + int jumpnull; + } agg_deserialize; + + /* for EEOP_AGG_STRICT_INPUT_CHECK */ + struct + { + bool *nulls; + int nargs; + int jumpnull; + } agg_strict_input_check; + + /* for EEOP_AGG_INIT_TRANS */ + struct + { + AggState *aggstate; + AggStatePerTrans pertrans; + ExprContext *aggcontext; + int setno; + int transno; + int setoff; + int jumpnull; + } agg_init_trans; + + /* for EEOP_AGG_STRICT_TRANS_CHECK */ + struct + { + AggState *aggstate; + int setno; + int transno; + int setoff; + int jumpnull; + } agg_strict_trans_check; + + /* for EEOP_AGG_{PLAIN,ORDERED}_TRANS* */ + struct + { + AggState *aggstate; + AggStatePerTrans pertrans; + ExprContext *aggcontext; + int setno; + int transno; + int setoff; + } agg_trans; } d; } ExprEvalStep; @@ -600,15 +676,25 @@ typedef struct ArrayRefState } ArrayRefState; -extern void ExecReadyInterpretedExpr(ExprState *state); +/* functions in execExpr.c */ +extern void ExprEvalPushStep(ExprState *es, const ExprEvalStep *s); +/* functions in execExprInterp.c */ +extern void ExecReadyInterpretedExpr(ExprState *state); extern ExprEvalOp ExecEvalStepOp(ExprState *state, ExprEvalStep *op); +extern Datum ExecInterpExprStillValid(ExprState *state, ExprContext *econtext, bool *isNull); +extern void CheckExprStillValid(ExprState *state, ExprContext *econtext); + /* * Non fast-path execution functions. These are externs instead of statics in * execExprInterp.c, because that allows them to be used by other methods of * expression evaluation, reducing code duplication. */ +extern void ExecEvalFuncExprFusage(ExprState *state, ExprEvalStep *op, + ExprContext *econtext); +extern void ExecEvalFuncExprStrictFusage(ExprState *state, ExprEvalStep *op, + ExprContext *econtext); extern void ExecEvalParamExec(ExprState *state, ExprEvalStep *op, ExprContext *econtext); extern void ExecEvalParamExtern(ExprState *state, ExprEvalStep *op, @@ -621,7 +707,8 @@ extern void ExecEvalRowNull(ExprState *state, ExprEvalStep *op, extern void ExecEvalRowNotNull(ExprState *state, ExprEvalStep *op, ExprContext *econtext); extern void ExecEvalArrayExpr(ExprState *state, ExprEvalStep *op); -extern void ExecEvalArrayCoerce(ExprState *state, ExprEvalStep *op); +extern void ExecEvalArrayCoerce(ExprState *state, ExprEvalStep *op, + ExprContext *econtext); extern void ExecEvalRow(ExprState *state, ExprEvalStep *op); extern void ExecEvalMinMax(ExprState *state, ExprEvalStep *op); extern void ExecEvalFieldSelect(ExprState *state, ExprEvalStep *op, @@ -647,5 +734,16 @@ extern void ExecEvalAlternativeSubPlan(ExprState *state, ExprEvalStep *op, ExprContext *econtext); extern void ExecEvalWholeRowVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext); +extern void ExecEvalSysVar(ExprState *state, ExprEvalStep *op, + ExprContext *econtext, TupleTableSlot *slot); + +extern void ExecAggInitGroup(AggState *aggstate, AggStatePerTrans pertrans, AggStatePerGroup pergroup); +extern Datum ExecAggTransReparent(AggState *aggstate, AggStatePerTrans pertrans, + Datum newValue, bool newValueIsNull, + Datum oldValue, bool oldValueIsNull); +extern void ExecEvalAggOrderedTransDatum(ExprState *state, ExprEvalStep *op, + ExprContext *econtext); +extern void ExecEvalAggOrderedTransTuple(ExprState *state, ExprEvalStep *op, + ExprContext *econtext); #endif /* EXEC_EXPR_H */ diff --git a/src/include/executor/execParallel.h b/src/include/executor/execParallel.h index bd0a87fa04..1b4c35e5f1 100644 --- a/src/include/executor/execParallel.h +++ b/src/include/executor/execParallel.h @@ -2,7 +2,7 @@ * execParallel.h * POSTGRES parallel execution interface * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -23,20 +23,27 @@ typedef struct SharedExecutorInstrumentation SharedExecutorInstrumentation; typedef struct ParallelExecutorInfo { - PlanState *planstate; - ParallelContext *pcxt; - BufferUsage *buffer_usage; - SharedExecutorInstrumentation *instrumentation; - shm_mq_handle **tqueue; - dsa_area *area; - bool finished; + PlanState *planstate; /* plan subtree we're running in parallel */ + ParallelContext *pcxt; /* parallel context we're using */ + BufferUsage *buffer_usage; /* points to bufusage area in DSM */ + SharedExecutorInstrumentation *instrumentation; /* optional */ + struct SharedJitInstrumentation *jit_instrumentation; /* optional */ + dsa_area *area; /* points to DSA area in DSM */ + dsa_pointer param_exec; /* serialized PARAM_EXEC parameters */ + bool finished; /* set true by ExecParallelFinish */ + /* These two arrays have pcxt->nworkers_launched entries: */ + shm_mq_handle **tqueue; /* tuple queues for worker output */ + struct TupleQueueReader **reader; /* tuple reader/writer support */ } ParallelExecutorInfo; extern ParallelExecutorInfo *ExecInitParallelPlan(PlanState *planstate, - EState *estate, int nworkers); + EState *estate, Bitmapset *sendParam, int nworkers, + int64 tuples_needed); +extern void ExecParallelCreateReaders(ParallelExecutorInfo *pei); extern void ExecParallelFinish(ParallelExecutorInfo *pei); extern void ExecParallelCleanup(ParallelExecutorInfo *pei); -extern void ExecParallelReinitialize(ParallelExecutorInfo *pei); +extern void ExecParallelReinitialize(PlanState *planstate, + ParallelExecutorInfo *pei, Bitmapset *sendParam); extern void ParallelQueryMain(dsm_segment *seg, shm_toc *toc); diff --git a/src/include/executor/execPartition.h b/src/include/executor/execPartition.h new file mode 100644 index 0000000000..3e08104ea4 --- /dev/null +++ b/src/include/executor/execPartition.h @@ -0,0 +1,202 @@ +/*-------------------------------------------------------------------- + * execPartition.h + * POSTGRES partitioning executor interface + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/include/executor/execPartition.h + *-------------------------------------------------------------------- + */ + +#ifndef EXECPARTITION_H +#define EXECPARTITION_H + +#include "nodes/execnodes.h" +#include "nodes/parsenodes.h" +#include "nodes/plannodes.h" +#include "partitioning/partprune.h" + +/* See execPartition.c for the definition. */ +typedef struct PartitionDispatchData *PartitionDispatch; + +/*----------------------- + * PartitionTupleRouting - Encapsulates all information required to execute + * tuple-routing between partitions. + * + * partition_dispatch_info Array of PartitionDispatch objects with one + * entry for every partitioned table in the + * partition tree. + * num_dispatch number of partitioned tables in the partition + * tree (= length of partition_dispatch_info[]) + * partition_oids Array of leaf partitions OIDs with one entry + * for every leaf partition in the partition tree, + * initialized in full by + * ExecSetupPartitionTupleRouting. + * partitions Array of ResultRelInfo* objects with one entry + * for every leaf partition in the partition tree, + * initialized lazily by ExecInitPartitionInfo. + * num_partitions Number of leaf partitions in the partition tree + * (= 'partitions_oid'/'partitions' array length) + * parent_child_tupconv_maps Array of TupleConversionMap objects with one + * entry for every leaf partition (required to + * convert tuple from the root table's rowtype to + * a leaf partition's rowtype after tuple routing + * is done) + * child_parent_tupconv_maps Array of TupleConversionMap objects with one + * entry for every leaf partition (required to + * convert an updated tuple from the leaf + * partition's rowtype to the root table's rowtype + * so that tuple routing can be done) + * child_parent_map_not_required Array of bool. True value means that a map is + * determined to be not required for the given + * partition. False means either we haven't yet + * checked if a map is required, or it was + * determined to be required. + * subplan_partition_offsets Integer array ordered by UPDATE subplans. Each + * element of this array has the index into the + * corresponding partition in partitions array. + * num_subplan_partition_offsets Length of 'subplan_partition_offsets' array + * partition_tuple_slots Array of TupleTableSlot objects; if non-NULL, + * contains one entry for every leaf partition, + * of which only those of the leaf partitions + * whose attribute numbers differ from the root + * parent have a non-NULL value. NULL if all of + * the partitions encountered by a given command + * happen to have same rowtype as the root parent + * root_tuple_slot TupleTableSlot to be used to transiently hold + * copy of a tuple that's being moved across + * partitions in the root partitioned table's + * rowtype + *----------------------- + */ +typedef struct PartitionTupleRouting +{ + PartitionDispatch *partition_dispatch_info; + int num_dispatch; + Oid *partition_oids; + ResultRelInfo **partitions; + int num_partitions; + TupleConversionMap **parent_child_tupconv_maps; + TupleConversionMap **child_parent_tupconv_maps; + bool *child_parent_map_not_required; + int *subplan_partition_offsets; + int num_subplan_partition_offsets; + TupleTableSlot **partition_tuple_slots; + TupleTableSlot *root_tuple_slot; +} PartitionTupleRouting; + +/* + * PartitionedRelPruningData - Per-partitioned-table data for run-time pruning + * of partitions. For a multilevel partitioned table, we have one of these + * for the topmost partition plus one for each non-leaf child partition. + * + * subplan_map[] and subpart_map[] have the same definitions as in + * PartitionedRelPruneInfo (see plannodes.h); though note that here, + * subpart_map contains indexes into PartitionPruningData.partrelprunedata[]. + * + * subplan_map Subplan index by partition index, or -1. + * subpart_map Subpart index by partition index, or -1. + * present_parts A Bitmapset of the partition indexes that we + * have subplans or subparts for. + * context Contains the context details required to call + * the partition pruning code. + * pruning_steps List of PartitionPruneSteps used to + * perform the actual pruning. + * do_initial_prune true if pruning should be performed during + * executor startup (for this partitioning level). + * do_exec_prune true if pruning should be performed during + * executor run (for this partitioning level). + */ +typedef struct PartitionedRelPruningData +{ + int *subplan_map; + int *subpart_map; + Bitmapset *present_parts; + PartitionPruneContext context; + List *pruning_steps; + bool do_initial_prune; + bool do_exec_prune; +} PartitionedRelPruningData; + +/* + * PartitionPruningData - Holds all the run-time pruning information for + * a single partitioning hierarchy containing one or more partitions. + * partrelprunedata[] is an array ordered such that parents appear before + * their children; in particular, the first entry is the topmost partition, + * which was actually named in the SQL query. + */ +typedef struct PartitionPruningData +{ + int num_partrelprunedata; /* number of array entries */ + PartitionedRelPruningData partrelprunedata[FLEXIBLE_ARRAY_MEMBER]; +} PartitionPruningData; + +/* + * PartitionPruneState - State object required for plan nodes to perform + * run-time partition pruning. + * + * This struct can be attached to plan types which support arbitrary Lists of + * subplans containing partitions, to allow subplans to be eliminated due to + * the clauses being unable to match to any tuple that the subplan could + * possibly produce. + * + * execparamids Contains paramids of PARAM_EXEC Params found within + * any of the partprunedata structs. Pruning must be + * done again each time the value of one of these + * parameters changes. + * other_subplans Contains indexes of subplans that don't belong to any + * "partprunedata", e.g UNION ALL children that are not + * partitioned tables, or a partitioned table that the + * planner deemed run-time pruning to be useless for. + * These must not be pruned. + * prune_context A short-lived memory context in which to execute the + * partition pruning functions. + * do_initial_prune true if pruning should be performed during executor + * startup (at any hierarchy level). + * do_exec_prune true if pruning should be performed during + * executor run (at any hierarchy level). + * num_partprunedata Number of items in "partprunedata" array. + * partprunedata Array of PartitionPruningData pointers for the plan's + * partitioned relation(s), one for each partitioning + * hierarchy that requires run-time pruning. + */ +typedef struct PartitionPruneState +{ + Bitmapset *execparamids; + Bitmapset *other_subplans; + MemoryContext prune_context; + bool do_initial_prune; + bool do_exec_prune; + int num_partprunedata; + PartitionPruningData *partprunedata[FLEXIBLE_ARRAY_MEMBER]; +} PartitionPruneState; + +extern PartitionTupleRouting *ExecSetupPartitionTupleRouting(ModifyTableState *mtstate, + Relation rel); +extern int ExecFindPartition(ResultRelInfo *resultRelInfo, + PartitionDispatch *pd, + TupleTableSlot *slot, + EState *estate); +extern ResultRelInfo *ExecInitPartitionInfo(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo, + PartitionTupleRouting *proute, + EState *estate, int partidx); +extern void ExecInitRoutingInfo(ModifyTableState *mtstate, + EState *estate, + PartitionTupleRouting *proute, + ResultRelInfo *partRelInfo, + int partidx); +extern void ExecSetupChildParentMapForLeaf(PartitionTupleRouting *proute); +extern TupleConversionMap *TupConvMapForLeaf(PartitionTupleRouting *proute, + ResultRelInfo *rootRelInfo, int leaf_index); +extern void ExecCleanupTupleRouting(ModifyTableState *mtstate, + PartitionTupleRouting *proute); +extern PartitionPruneState *ExecCreatePartitionPruneState(PlanState *planstate, + PartitionPruneInfo *partitionpruneinfo); +extern Bitmapset *ExecFindMatchingSubPlans(PartitionPruneState *prunestate); +extern Bitmapset *ExecFindInitialMatchingSubPlans(PartitionPruneState *prunestate, + int nsubplans); + +#endif /* EXECPARTITION_H */ diff --git a/src/include/executor/execdebug.h b/src/include/executor/execdebug.h index cd04b60176..236b2cc4fd 100644 --- a/src/include/executor/execdebug.h +++ b/src/include/executor/execdebug.h @@ -7,7 +7,7 @@ * for debug printouts, because that's more flexible than printf(). * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/execdebug.h diff --git a/src/include/executor/execdesc.h b/src/include/executor/execdesc.h index 8c09961e28..10e9ded246 100644 --- a/src/include/executor/execdesc.h +++ b/src/include/executor/execdesc.h @@ -5,7 +5,7 @@ * and related modules. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/execdesc.h diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 60326f9d03..8441265784 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -4,7 +4,7 @@ * support for the POSTGRES executor module * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/executor.h @@ -14,9 +14,9 @@ #ifndef EXECUTOR_H #define EXECUTOR_H -#include "catalog/partition.h" #include "executor/execdesc.h" #include "nodes/parsenodes.h" +#include "utils/memutils.h" /* @@ -112,26 +112,19 @@ extern bool execCurrentOf(CurrentOfExpr *cexpr, /* * prototypes from functions in execGrouping.c */ -extern bool execTuplesMatch(TupleTableSlot *slot1, - TupleTableSlot *slot2, - int numCols, - AttrNumber *matchColIdx, - FmgrInfo *eqfunctions, - MemoryContext evalContext); -extern bool execTuplesUnequal(TupleTableSlot *slot1, - TupleTableSlot *slot2, - int numCols, - AttrNumber *matchColIdx, - FmgrInfo *eqfunctions, - MemoryContext evalContext); -extern FmgrInfo *execTuplesMatchPrepare(int numCols, - Oid *eqOperators); +extern ExprState *execTuplesMatchPrepare(TupleDesc desc, + int numCols, + AttrNumber *keyColIdx, + Oid *eqOperators, + PlanState *parent); extern void execTuplesHashPrepare(int numCols, Oid *eqOperators, - FmgrInfo **eqFunctions, + Oid **eqFuncOids, FmgrInfo **hashFunctions); -extern TupleHashTable BuildTupleHashTable(int numCols, AttrNumber *keyColIdx, - FmgrInfo *eqfunctions, +extern TupleHashTable BuildTupleHashTable(PlanState *parent, + TupleDesc inputDesc, + int numCols, AttrNumber *keyColIdx, + Oid *eqfuncoids, FmgrInfo *hashfunctions, long nbuckets, Size additionalsize, MemoryContext tablecxt, @@ -141,7 +134,7 @@ extern TupleHashEntry LookupTupleHashEntry(TupleHashTable hashtable, bool *isnew); extern TupleHashEntry FindTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, - FmgrInfo *eqfunctions, + ExprState *eqcomp, FmgrInfo *hashfunctions); /* @@ -177,7 +170,7 @@ extern void ExecutorEnd(QueryDesc *queryDesc); extern void standard_ExecutorEnd(QueryDesc *queryDesc); extern void ExecutorRewind(QueryDesc *queryDesc); extern bool ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation); -extern void CheckValidResultRel(Relation resultRel, CmdType operation); +extern void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation); extern void InitResultRelInfo(ResultRelInfo *resultRelInfo, Relation resultRelationDesc, Index resultRelationIndex, @@ -188,6 +181,10 @@ extern void ExecCleanUpTriggerState(EState *estate); extern bool ExecContextForcesOids(PlanState *planstate, bool *hasoids); extern void ExecConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate); +extern bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, EState *estate, bool emitError); +extern void ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, EState *estate); extern void ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate); extern LockTupleMode ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo); @@ -206,17 +203,6 @@ extern void EvalPlanQualSetPlan(EPQState *epqstate, extern void EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple); extern HeapTuple EvalPlanQualGetTuple(EPQState *epqstate, Index rti); -extern void ExecSetupPartitionTupleRouting(Relation rel, - Index resultRTindex, - PartitionDispatch **pd, - ResultRelInfo **partitions, - TupleConversionMap ***tup_conv_maps, - TupleTableSlot **partition_tuple_slot, - int *num_parted, int *num_partitions); -extern int ExecFindPartition(ResultRelInfo *resultRelInfo, - PartitionDispatch *pd, - TupleTableSlot *slot, - EState *estate); #define EvalPlanQualSetSlot(epqstate, slot) ((epqstate)->origslot = (slot)) extern void EvalPlanQualFetchRowMarks(EPQState *epqstate); @@ -228,9 +214,11 @@ extern void EvalPlanQualEnd(EPQState *epqstate); * functions in execProcnode.c */ extern PlanState *ExecInitNode(Plan *node, EState *estate, int eflags); +extern void ExecSetExecProcNode(PlanState *node, ExecProcNodeMtd function); extern Node *MultiExecProcNode(PlanState *node); extern void ExecEndNode(PlanState *node); extern bool ExecShutdownNode(PlanState *node); +extern void ExecSetTupleBound(int64 tuples_needed, PlanState *child_node); /* ---------------------------------------------------------------- @@ -254,9 +242,17 @@ ExecProcNode(PlanState *node) * prototypes from functions in execExpr.c */ extern ExprState *ExecInitExpr(Expr *node, PlanState *parent); +extern ExprState *ExecInitExprWithParams(Expr *node, ParamListInfo ext_params); extern ExprState *ExecInitQual(List *qual, PlanState *parent); extern ExprState *ExecInitCheck(List *qual, PlanState *parent); extern List *ExecInitExprList(List *nodes, PlanState *parent); +extern ExprState *ExecBuildAggTrans(AggState *aggstate, struct AggStatePerPhaseData *phase, + bool doSort, bool doHash); +extern ExprState *ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc, + int numCols, + AttrNumber *keyColIdx, + Oid *eqfunctions, + PlanState *parent); extern ProjectionInfo *ExecBuildProjectionInfo(List *targetList, ExprContext *econtext, TupleTableSlot *slot, @@ -285,7 +281,7 @@ ExecEvalExpr(ExprState *state, ExprContext *econtext, bool *isNull) { - return (*state->evalfunc) (state, econtext, isNull); + return state->evalfunc(state, econtext, isNull); } #endif @@ -304,7 +300,7 @@ ExecEvalExprSwitchContext(ExprState *state, MemoryContext oldContext; oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); - retDatum = (*state->evalfunc) (state, econtext, isNull); + retDatum = state->evalfunc(state, econtext, isNull); MemoryContextSwitchTo(oldContext); return retDatum; } @@ -344,7 +340,7 @@ ExecProject(ProjectionInfo *projInfo) * Successfully formed a result row. Mark the result slot as containing a * valid virtual tuple (inlined version of ExecStoreVirtualTuple()). */ - slot->tts_isempty = false; + slot->tts_flags &= ~TTS_FLAG_EMPTY; slot->tts_nvalid = slot->tts_tupleDescriptor->natts; return slot; @@ -382,6 +378,22 @@ ExecQual(ExprState *state, ExprContext *econtext) } #endif +/* + * ExecQualAndReset() - evaluate qual with ExecQual() and reset expression + * context. + */ +#ifndef FRONTEND +static inline bool +ExecQualAndReset(ExprState *state, ExprContext *econtext) +{ + bool ret = ExecQual(state, econtext); + + /* inline ResetExprContext, to avoid ordering issue in this file */ + MemoryContextReset(econtext->ecxt_per_tuple_memory); + return ret; +} +#endif + extern bool ExecCheck(ExprState *state, ExprContext *context); /* @@ -398,6 +410,7 @@ extern SetExprState *ExecInitFunctionResultSet(Expr *expr, ExprContext *econtext, PlanState *parent); extern Datum ExecMakeFunctionResultSet(SetExprState *fcache, ExprContext *econtext, + MemoryContext argContext, bool *isNull, ExprDoneCond *isDone); @@ -416,9 +429,12 @@ extern void ExecScanReScan(ScanState *node); /* * prototypes from functions in execTuples.c */ -extern void ExecInitResultTupleSlot(EState *estate, PlanState *planstate); -extern void ExecInitScanTupleSlot(EState *estate, ScanState *scanstate); -extern TupleTableSlot *ExecInitExtraTupleSlot(EState *estate); +extern void ExecInitResultTypeTL(PlanState *planstate); +extern void ExecInitResultSlot(PlanState *planstate); +extern void ExecInitResultTupleSlotTL(PlanState *planstate); +extern void ExecInitScanTupleSlot(EState *estate, ScanState *scanstate, TupleDesc tupleDesc); +extern TupleTableSlot *ExecInitExtraTupleSlot(EState *estate, + TupleDesc tupleDesc); extern TupleTableSlot *ExecInitNullTupleSlot(EState *estate, TupleDesc tupType); extern TupleDesc ExecTypeFromTL(List *targetList, bool hasoid); @@ -487,19 +503,29 @@ extern ExprContext *MakePerTupleExprContext(EState *estate); } while (0) extern void ExecAssignExprContext(EState *estate, PlanState *planstate); -extern void ExecAssignResultType(PlanState *planstate, TupleDesc tupDesc); -extern void ExecAssignResultTypeFromTL(PlanState *planstate); extern TupleDesc ExecGetResultType(PlanState *planstate); extern void ExecAssignProjectionInfo(PlanState *planstate, TupleDesc inputDesc); +extern void ExecConditionalAssignProjectionInfo(PlanState *planstate, + TupleDesc inputDesc, Index varno); extern void ExecFreeExprContext(PlanState *planstate); extern void ExecAssignScanType(ScanState *scanstate, TupleDesc tupDesc); -extern void ExecAssignScanTypeFromOuterPlan(ScanState *scanstate); +extern void ExecCreateScanSlotFromOuterPlan(EState *estate, ScanState *scanstate); extern bool ExecRelationIsTargetRelation(EState *estate, Index scanrelid); extern Relation ExecOpenScanRelation(EState *estate, Index scanrelid, int eflags); -extern void ExecCloseScanRelation(Relation scanrel); + +extern void ExecInitRangeTable(EState *estate, List *rangeTable); + +static inline RangeTblEntry * +exec_rt_fetch(Index rti, EState *estate) +{ + Assert(rti > 0 && rti <= estate->es_range_table_size); + return estate->es_range_table_array[rti - 1]; +} + +extern Relation ExecGetRangeTableRelation(EState *estate, Index rti); extern int executor_errposition(EState *estate, int location); @@ -510,8 +536,6 @@ extern void UnregisterExprContextCallback(ExprContext *econtext, ExprContextCallbackFunction function, Datum arg); -extern void ExecLockNonLeafAppendTables(List *partitioned_rels, EState *estate); - extern Datum GetAttributeByName(HeapTupleHeader tuple, const char *attname, bool *isNull); extern Datum GetAttributeByNum(HeapTupleHeader tuple, AttrNumber attrno, diff --git a/src/include/executor/functions.h b/src/include/executor/functions.h index 718d8947a3..a309809ba8 100644 --- a/src/include/executor/functions.h +++ b/src/include/executor/functions.h @@ -4,7 +4,7 @@ * Declarations for execution of SQL-language functions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/functions.h @@ -29,6 +29,8 @@ extern SQLFunctionParseInfoPtr prepare_sql_fn_parse_info(HeapTuple procedureTupl extern void sql_fn_parser_setup(struct ParseState *pstate, SQLFunctionParseInfoPtr pinfo); +extern void check_sql_fn_statements(List *queryTreeList); + extern bool check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, bool *modifyTargetList, diff --git a/src/include/executor/hashjoin.h b/src/include/executor/hashjoin.h index 82acadf85b..a9f9872a78 100644 --- a/src/include/executor/hashjoin.h +++ b/src/include/executor/hashjoin.h @@ -4,7 +4,7 @@ * internal structures for hash joins * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/hashjoin.h @@ -15,7 +15,10 @@ #define HASHJOIN_H #include "nodes/execnodes.h" +#include "port/atomics.h" +#include "storage/barrier.h" #include "storage/buffile.h" +#include "storage/lwlock.h" /* ---------------------------------------------------------------- * hash-join hash table structures @@ -63,7 +66,12 @@ typedef struct HashJoinTupleData { - struct HashJoinTupleData *next; /* link to next tuple in same bucket */ + /* link to next tuple in same bucket */ + union + { + struct HashJoinTupleData *unshared; + dsa_pointer shared; + } next; uint32 hashvalue; /* tuple's hash code */ /* Tuple data, in MinimalTuple format, follows on a MAXALIGN boundary */ } HashJoinTupleData; @@ -109,20 +117,170 @@ typedef struct HashSkewBucket typedef struct HashMemoryChunkData { int ntuples; /* number of tuples stored in this chunk */ - size_t maxlen; /* size of the buffer holding the tuples */ + size_t maxlen; /* size of the chunk's tuple buffer */ size_t used; /* number of buffer bytes already used */ - struct HashMemoryChunkData *next; /* pointer to the next chunk (linked - * list) */ + /* pointer to the next chunk (linked list) */ + union + { + struct HashMemoryChunkData *unshared; + dsa_pointer shared; + } next; - char data[FLEXIBLE_ARRAY_MEMBER]; /* buffer allocated at the end */ + /* + * The chunk's tuple buffer starts after the HashMemoryChunkData struct, + * at offset HASH_CHUNK_HEADER_SIZE (which must be maxaligned). Note that + * that offset is not included in "maxlen" or "used". + */ } HashMemoryChunkData; typedef struct HashMemoryChunkData *HashMemoryChunk; #define HASH_CHUNK_SIZE (32 * 1024L) +#define HASH_CHUNK_HEADER_SIZE MAXALIGN(sizeof(HashMemoryChunkData)) +#define HASH_CHUNK_DATA(hc) (((char *) (hc)) + HASH_CHUNK_HEADER_SIZE) +/* tuples exceeding HASH_CHUNK_THRESHOLD bytes are put in their own chunk */ #define HASH_CHUNK_THRESHOLD (HASH_CHUNK_SIZE / 4) +/* + * For each batch of a Parallel Hash Join, we have a ParallelHashJoinBatch + * object in shared memory to coordinate access to it. Since they are + * followed by variable-sized objects, they are arranged in contiguous memory + * but not accessed directly as an array. + */ +typedef struct ParallelHashJoinBatch +{ + dsa_pointer buckets; /* array of hash table buckets */ + Barrier batch_barrier; /* synchronization for joining this batch */ + + dsa_pointer chunks; /* chunks of tuples loaded */ + size_t size; /* size of buckets + chunks in memory */ + size_t estimated_size; /* size of buckets + chunks while writing */ + size_t ntuples; /* number of tuples loaded */ + size_t old_ntuples; /* number of tuples before repartitioning */ + bool space_exhausted; + + /* + * Variable-sized SharedTuplestore objects follow this struct in memory. + * See the accessor macros below. + */ +} ParallelHashJoinBatch; + +/* Accessor for inner batch tuplestore following a ParallelHashJoinBatch. */ +#define ParallelHashJoinBatchInner(batch) \ + ((SharedTuplestore *) \ + ((char *) (batch) + MAXALIGN(sizeof(ParallelHashJoinBatch)))) + +/* Accessor for outer batch tuplestore following a ParallelHashJoinBatch. */ +#define ParallelHashJoinBatchOuter(batch, nparticipants) \ + ((SharedTuplestore *) \ + ((char *) ParallelHashJoinBatchInner(batch) + \ + MAXALIGN(sts_estimate(nparticipants)))) + +/* Total size of a ParallelHashJoinBatch and tuplestores. */ +#define EstimateParallelHashJoinBatch(hashtable) \ + (MAXALIGN(sizeof(ParallelHashJoinBatch)) + \ + MAXALIGN(sts_estimate((hashtable)->parallel_state->nparticipants)) * 2) + +/* Accessor for the nth ParallelHashJoinBatch given the base. */ +#define NthParallelHashJoinBatch(base, n) \ + ((ParallelHashJoinBatch *) \ + ((char *) (base) + \ + EstimateParallelHashJoinBatch(hashtable) * (n))) + +/* + * Each backend requires a small amount of per-batch state to interact with + * each ParallelHashJoinBatch. + */ +typedef struct ParallelHashJoinBatchAccessor +{ + ParallelHashJoinBatch *shared; /* pointer to shared state */ + + /* Per-backend partial counters to reduce contention. */ + size_t preallocated; /* pre-allocated space for this backend */ + size_t ntuples; /* number of tuples */ + size_t size; /* size of partition in memory */ + size_t estimated_size; /* size of partition on disk */ + size_t old_ntuples; /* how many tuples before repartitioning? */ + bool at_least_one_chunk; /* has this backend allocated a chunk? */ + + bool done; /* flag to remember that a batch is done */ + SharedTuplestoreAccessor *inner_tuples; + SharedTuplestoreAccessor *outer_tuples; +} ParallelHashJoinBatchAccessor; + +/* + * While hashing the inner relation, any participant might determine that it's + * time to increase the number of buckets to reduce the load factor or batches + * to reduce the memory size. This is indicated by setting the growth flag to + * these values. + */ +typedef enum ParallelHashGrowth +{ + /* The current dimensions are sufficient. */ + PHJ_GROWTH_OK, + /* The load factor is too high, so we need to add buckets. */ + PHJ_GROWTH_NEED_MORE_BUCKETS, + /* The memory budget would be exhausted, so we need to repartition. */ + PHJ_GROWTH_NEED_MORE_BATCHES, + /* Repartitioning didn't help last time, so don't try to do that again. */ + PHJ_GROWTH_DISABLED +} ParallelHashGrowth; + +/* + * The shared state used to coordinate a Parallel Hash Join. This is stored + * in the DSM segment. + */ +typedef struct ParallelHashJoinState +{ + dsa_pointer batches; /* array of ParallelHashJoinBatch */ + dsa_pointer old_batches; /* previous generation during repartition */ + int nbatch; /* number of batches now */ + int old_nbatch; /* previous number of batches */ + int nbuckets; /* number of buckets */ + ParallelHashGrowth growth; /* control batch/bucket growth */ + dsa_pointer chunk_work_queue; /* chunk work queue */ + int nparticipants; + size_t space_allowed; + size_t total_tuples; /* total number of inner tuples */ + LWLock lock; /* lock protecting the above */ + + Barrier build_barrier; /* synchronization for the build phases */ + Barrier grow_batches_barrier; + Barrier grow_buckets_barrier; + pg_atomic_uint32 distributor; /* counter for load balancing */ + + SharedFileSet fileset; /* space for shared temporary files */ +} ParallelHashJoinState; + +/* The phases for building batches, used by build_barrier. */ +#define PHJ_BUILD_ELECTING 0 +#define PHJ_BUILD_ALLOCATING 1 +#define PHJ_BUILD_HASHING_INNER 2 +#define PHJ_BUILD_HASHING_OUTER 3 +#define PHJ_BUILD_DONE 4 + +/* The phases for probing each batch, used by for batch_barrier. */ +#define PHJ_BATCH_ELECTING 0 +#define PHJ_BATCH_ALLOCATING 1 +#define PHJ_BATCH_LOADING 2 +#define PHJ_BATCH_PROBING 3 +#define PHJ_BATCH_DONE 4 + +/* The phases of batch growth while hashing, for grow_batches_barrier. */ +#define PHJ_GROW_BATCHES_ELECTING 0 +#define PHJ_GROW_BATCHES_ALLOCATING 1 +#define PHJ_GROW_BATCHES_REPARTITIONING 2 +#define PHJ_GROW_BATCHES_DECIDING 3 +#define PHJ_GROW_BATCHES_FINISHING 4 +#define PHJ_GROW_BATCHES_PHASE(n) ((n) % 5) /* circular phases */ + +/* The phases of bucket growth while hashing, for grow_buckets_barrier. */ +#define PHJ_GROW_BUCKETS_ELECTING 0 +#define PHJ_GROW_BUCKETS_ALLOCATING 1 +#define PHJ_GROW_BUCKETS_REINSERTING 2 +#define PHJ_GROW_BUCKETS_PHASE(n) ((n) % 3) /* circular phases */ + typedef struct HashJoinTableData { int nbuckets; /* # buckets in the in-memory hash table */ @@ -133,8 +291,13 @@ typedef struct HashJoinTableData int log2_nbuckets_optimal; /* log2(nbuckets_optimal) */ /* buckets[i] is head of list of tuples in i'th in-memory bucket */ - struct HashJoinTupleData **buckets; - /* buckets array is per-batch storage, as are all the tuples */ + union + { + /* unshared array is per-batch storage, as are all the tuples */ + struct HashJoinTupleData **unshared; + /* shared array is per-query DSA area, as are all the tuples */ + dsa_pointer_atomic *shared; + } buckets; bool keepNulls; /* true to store unmatchable NULL tuples */ @@ -153,6 +316,7 @@ typedef struct HashJoinTableData bool growEnabled; /* flag to shut off nbatch increases */ double totalTuples; /* # tuples obtained from inner plan */ + double partialTuples; /* # tuples obtained from inner plan by me */ double skewTuples; /* # tuples inserted into skew tuples */ /* @@ -185,6 +349,13 @@ typedef struct HashJoinTableData /* used for dense allocation of tuples (into linked chunks) */ HashMemoryChunk chunks; /* one list for the whole batch */ + + /* Shared and private state for Parallel Hash. */ + HashMemoryChunk current_chunk; /* this backend's current chunk */ + dsa_area *area; /* DSA area to allocate memory from */ + ParallelHashJoinState *parallel_state; + ParallelHashJoinBatchAccessor *batches; + dsa_pointer current_chunk_shared; } HashJoinTableData; #endif /* HASHJOIN_H */ diff --git a/src/include/executor/instrument.h b/src/include/executor/instrument.h index 31573145a9..6d0efa7222 100644 --- a/src/include/executor/instrument.h +++ b/src/include/executor/instrument.h @@ -4,7 +4,7 @@ * definitions for run-time statistics collection * * - * Copyright (c) 2001-2017, PostgreSQL Global Development Group + * Copyright (c) 2001-2018, PostgreSQL Global Development Group * * src/include/executor/instrument.h * @@ -44,10 +44,10 @@ typedef enum InstrumentOption typedef struct Instrumentation { /* Parameters set at node creation: */ - bool need_timer; /* TRUE if we need timer data */ - bool need_bufusage; /* TRUE if we need buffer usage data */ + bool need_timer; /* true if we need timer data */ + bool need_bufusage; /* true if we need buffer usage data */ /* Info about current plan cycle: */ - bool running; /* TRUE if we've completed first tuple */ + bool running; /* true if we've completed first tuple */ instr_time starttime; /* Start time of current iteration of node */ instr_time counter; /* Accumulated runtime for this node */ double firsttuple; /* Time for first tuple of this cycle */ @@ -57,6 +57,7 @@ typedef struct Instrumentation double startup; /* Total startup time (in seconds) */ double total; /* Total total time (in seconds) */ double ntuples; /* Total tuples produced */ + double ntuples2; /* Secondary node-specific tuple counter */ double nloops; /* # of run cycles for this node */ double nfiltered1; /* # tuples removed by scanqual or joinqual */ double nfiltered2; /* # tuples removed by "other" quals */ diff --git a/src/include/executor/nodeAgg.h b/src/include/executor/nodeAgg.h index eff5af9c2a..8fb8c8fe80 100644 --- a/src/include/executor/nodeAgg.h +++ b/src/include/executor/nodeAgg.h @@ -4,7 +4,7 @@ * prototypes for nodeAgg.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeAgg.h @@ -16,6 +16,295 @@ #include "nodes/execnodes.h" + +/* + * AggStatePerTransData - per aggregate state value information + * + * Working state for updating the aggregate's state value, by calling the + * transition function with an input row. This struct does not store the + * information needed to produce the final aggregate result from the transition + * state, that's stored in AggStatePerAggData instead. This separation allows + * multiple aggregate results to be produced from a single state value. + */ +typedef struct AggStatePerTransData +{ + /* + * These values are set up during ExecInitAgg() and do not change + * thereafter: + */ + + /* + * Link to an Aggref expr this state value is for. + * + * There can be multiple Aggref's sharing the same state value, so long as + * the inputs and transition functions are identical and the final + * functions are not read-write. This points to the first one of them. + */ + Aggref *aggref; + + /* + * Is this state value actually being shared by more than one Aggref? + */ + bool aggshared; + + /* + * Number of aggregated input columns. This includes ORDER BY expressions + * in both the plain-agg and ordered-set cases. Ordered-set direct args + * are not counted, though. + */ + int numInputs; + + /* + * Number of aggregated input columns to pass to the transfn. This + * includes the ORDER BY columns for ordered-set aggs, but not for plain + * aggs. (This doesn't count the transition state value!) + */ + int numTransInputs; + + /* Oid of the state transition or combine function */ + Oid transfn_oid; + + /* Oid of the serialization function or InvalidOid */ + Oid serialfn_oid; + + /* Oid of the deserialization function or InvalidOid */ + Oid deserialfn_oid; + + /* Oid of state value's datatype */ + Oid aggtranstype; + + /* + * fmgr lookup data for transition function or combine function. Note in + * particular that the fn_strict flag is kept here. + */ + FmgrInfo transfn; + + /* fmgr lookup data for serialization function */ + FmgrInfo serialfn; + + /* fmgr lookup data for deserialization function */ + FmgrInfo deserialfn; + + /* Input collation derived for aggregate */ + Oid aggCollation; + + /* number of sorting columns */ + int numSortCols; + + /* number of sorting columns to consider in DISTINCT comparisons */ + /* (this is either zero or the same as numSortCols) */ + int numDistinctCols; + + /* deconstructed sorting information (arrays of length numSortCols) */ + AttrNumber *sortColIdx; + Oid *sortOperators; + Oid *sortCollations; + bool *sortNullsFirst; + + /* + * Comparators for input columns --- only set/used when aggregate has + * DISTINCT flag. equalfnOne version is used for single-column + * comparisons, equalfnMulti for the case of multiple columns. + */ + FmgrInfo equalfnOne; + ExprState *equalfnMulti; + + /* + * initial value from pg_aggregate entry + */ + Datum initValue; + bool initValueIsNull; + + /* + * We need the len and byval info for the agg's input and transition data + * types in order to know how to copy/delete values. + * + * Note that the info for the input type is used only when handling + * DISTINCT aggs with just one argument, so there is only one input type. + */ + int16 inputtypeLen, + transtypeLen; + bool inputtypeByVal, + transtypeByVal; + + /* + * Slots for holding the evaluated input arguments. These are set up + * during ExecInitAgg() and then used for each input row requiring either + * FILTER or ORDER BY/DISTINCT processing. + */ + TupleTableSlot *sortslot; /* current input tuple */ + TupleTableSlot *uniqslot; /* used for multi-column DISTINCT */ + TupleDesc sortdesc; /* descriptor of input tuples */ + + /* + * These values are working state that is initialized at the start of an + * input tuple group and updated for each input tuple. + * + * For a simple (non DISTINCT/ORDER BY) aggregate, we just feed the input + * values straight to the transition function. If it's DISTINCT or + * requires ORDER BY, we pass the input values into a Tuplesort object; + * then at completion of the input tuple group, we scan the sorted values, + * eliminate duplicates if needed, and run the transition function on the + * rest. + * + * We need a separate tuplesort for each grouping set. + */ + + Tuplesortstate **sortstates; /* sort objects, if DISTINCT or ORDER BY */ + + /* + * This field is a pre-initialized FunctionCallInfo struct used for + * calling this aggregate's transfn. We save a few cycles per row by not + * re-initializing the unchanging fields; which isn't much, but it seems + * worth the extra space consumption. + */ + FunctionCallInfoData transfn_fcinfo; + + /* Likewise for serialization and deserialization functions */ + FunctionCallInfoData serialfn_fcinfo; + + FunctionCallInfoData deserialfn_fcinfo; +} AggStatePerTransData; + +/* + * AggStatePerAggData - per-aggregate information + * + * This contains the information needed to call the final function, to produce + * a final aggregate result from the state value. If there are multiple + * identical Aggrefs in the query, they can all share the same per-agg data. + * + * These values are set up during ExecInitAgg() and do not change thereafter. + */ +typedef struct AggStatePerAggData +{ + /* + * Link to an Aggref expr this state value is for. + * + * There can be multiple identical Aggref's sharing the same per-agg. This + * points to the first one of them. + */ + Aggref *aggref; + + /* index to the state value which this agg should use */ + int transno; + + /* Optional Oid of final function (may be InvalidOid) */ + Oid finalfn_oid; + + /* + * fmgr lookup data for final function --- only valid when finalfn_oid is + * not InvalidOid. + */ + FmgrInfo finalfn; + + /* + * Number of arguments to pass to the finalfn. This is always at least 1 + * (the transition state value) plus any ordered-set direct args. If the + * finalfn wants extra args then we pass nulls corresponding to the + * aggregated input columns. + */ + int numFinalArgs; + + /* ExprStates for any direct-argument expressions */ + List *aggdirectargs; + + /* + * We need the len and byval info for the agg's result data type in order + * to know how to copy/delete values. + */ + int16 resulttypeLen; + bool resulttypeByVal; + + /* + * "shareable" is false if this agg cannot share state values with other + * aggregates because the final function is read-write. + */ + bool shareable; +} AggStatePerAggData; + +/* + * AggStatePerGroupData - per-aggregate-per-group working state + * + * These values are working state that is initialized at the start of + * an input tuple group and updated for each input tuple. + * + * In AGG_PLAIN and AGG_SORTED modes, we have a single array of these + * structs (pointed to by aggstate->pergroup); we re-use the array for + * each input group, if it's AGG_SORTED mode. In AGG_HASHED mode, the + * hash table contains an array of these structs for each tuple group. + * + * Logically, the sortstate field belongs in this struct, but we do not + * keep it here for space reasons: we don't support DISTINCT aggregates + * in AGG_HASHED mode, so there's no reason to use up a pointer field + * in every entry of the hashtable. + */ +typedef struct AggStatePerGroupData +{ +#define FIELDNO_AGGSTATEPERGROUPDATA_TRANSVALUE 0 + Datum transValue; /* current transition value */ +#define FIELDNO_AGGSTATEPERGROUPDATA_TRANSVALUEISNULL 1 + bool transValueIsNull; + +#define FIELDNO_AGGSTATEPERGROUPDATA_NOTRANSVALUE 2 + bool noTransValue; /* true if transValue not set yet */ + + /* + * Note: noTransValue initially has the same value as transValueIsNull, + * and if true both are cleared to false at the same time. They are not + * the same though: if transfn later returns a NULL, we want to keep that + * NULL and not auto-replace it with a later input value. Only the first + * non-NULL input will be auto-substituted. + */ +} AggStatePerGroupData; + +/* + * AggStatePerPhaseData - per-grouping-set-phase state + * + * Grouping sets are divided into "phases", where a single phase can be + * processed in one pass over the input. If there is more than one phase, then + * at the end of input from the current phase, state is reset and another pass + * taken over the data which has been re-sorted in the mean time. + * + * Accordingly, each phase specifies a list of grouping sets and group clause + * information, plus each phase after the first also has a sort order. + */ +typedef struct AggStatePerPhaseData +{ + AggStrategy aggstrategy; /* strategy for this phase */ + int numsets; /* number of grouping sets (or 0) */ + int *gset_lengths; /* lengths of grouping sets */ + Bitmapset **grouped_cols; /* column groupings for rollup */ + ExprState **eqfunctions; /* expression returning equality, indexed by + * nr of cols to compare */ + Agg *aggnode; /* Agg node for phase data */ + Sort *sortnode; /* Sort node for input ordering for phase */ + + ExprState *evaltrans; /* evaluation of transition functions */ +} AggStatePerPhaseData; + +/* + * AggStatePerHashData - per-hashtable state + * + * When doing grouping sets with hashing, we have one of these for each + * grouping set. (When doing hashing without grouping sets, we have just one of + * them.) + */ +typedef struct AggStatePerHashData +{ + TupleHashTable hashtable; /* hash table with one entry per group */ + TupleHashIterator hashiter; /* for iterating through hash table */ + TupleTableSlot *hashslot; /* slot for loading hash table */ + FmgrInfo *hashfunctions; /* per-grouping-field hash fns */ + Oid *eqfuncoids; /* per-grouping-field equality fns */ + int numCols; /* number of hash key columns */ + int numhashGrpCols; /* number of columns in hash table */ + int largestGrpColIdx; /* largest col required for hashing */ + AttrNumber *hashGrpColIdxInput; /* hash col indices in input slot */ + AttrNumber *hashGrpColIdxHash; /* indices in hashtbl tuples */ + Agg *aggnode; /* original Agg node, for numGroups etc. */ +} AggStatePerHashData; + + extern AggState *ExecInitAgg(Agg *node, EState *estate, int eflags); extern void ExecEndAgg(AggState *node); extern void ExecReScanAgg(AggState *node); diff --git a/src/include/executor/nodeAppend.h b/src/include/executor/nodeAppend.h index 4e38a1380e..4e31a9fcd8 100644 --- a/src/include/executor/nodeAppend.h +++ b/src/include/executor/nodeAppend.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeAppend.h @@ -14,10 +14,15 @@ #ifndef NODEAPPEND_H #define NODEAPPEND_H +#include "access/parallel.h" #include "nodes/execnodes.h" extern AppendState *ExecInitAppend(Append *node, EState *estate, int eflags); extern void ExecEndAppend(AppendState *node); extern void ExecReScanAppend(AppendState *node); +extern void ExecAppendEstimate(AppendState *node, ParallelContext *pcxt); +extern void ExecAppendInitializeDSM(AppendState *node, ParallelContext *pcxt); +extern void ExecAppendReInitializeDSM(AppendState *node, ParallelContext *pcxt); +extern void ExecAppendInitializeWorker(AppendState *node, ParallelWorkerContext *pwcxt); #endif /* NODEAPPEND_H */ diff --git a/src/include/executor/nodeBitmapAnd.h b/src/include/executor/nodeBitmapAnd.h index 5d848b61af..029e5b600d 100644 --- a/src/include/executor/nodeBitmapAnd.h +++ b/src/include/executor/nodeBitmapAnd.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeBitmapAnd.h diff --git a/src/include/executor/nodeBitmapHeapscan.h b/src/include/executor/nodeBitmapHeapscan.h index c77694cf22..e86d3e10d4 100644 --- a/src/include/executor/nodeBitmapHeapscan.h +++ b/src/include/executor/nodeBitmapHeapscan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeBitmapHeapscan.h @@ -24,7 +24,9 @@ extern void ExecBitmapHeapEstimate(BitmapHeapScanState *node, ParallelContext *pcxt); extern void ExecBitmapHeapInitializeDSM(BitmapHeapScanState *node, ParallelContext *pcxt); +extern void ExecBitmapHeapReInitializeDSM(BitmapHeapScanState *node, + ParallelContext *pcxt); extern void ExecBitmapHeapInitializeWorker(BitmapHeapScanState *node, - shm_toc *toc); + ParallelWorkerContext *pwcxt); #endif /* NODEBITMAPHEAPSCAN_H */ diff --git a/src/include/executor/nodeBitmapIndexscan.h b/src/include/executor/nodeBitmapIndexscan.h index 842193f4df..8b93baabea 100644 --- a/src/include/executor/nodeBitmapIndexscan.h +++ b/src/include/executor/nodeBitmapIndexscan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeBitmapIndexscan.h diff --git a/src/include/executor/nodeBitmapOr.h b/src/include/executor/nodeBitmapOr.h index 526904eb4d..96f84d22ed 100644 --- a/src/include/executor/nodeBitmapOr.h +++ b/src/include/executor/nodeBitmapOr.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeBitmapOr.h diff --git a/src/include/executor/nodeCtescan.h b/src/include/executor/nodeCtescan.h index d2fbcbd586..21f4f191cd 100644 --- a/src/include/executor/nodeCtescan.h +++ b/src/include/executor/nodeCtescan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeCtescan.h diff --git a/src/include/executor/nodeCustom.h b/src/include/executor/nodeCustom.h index a1cc63ae1f..454a684a40 100644 --- a/src/include/executor/nodeCustom.h +++ b/src/include/executor/nodeCustom.h @@ -4,7 +4,7 @@ * * prototypes for CustomScan nodes * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------ @@ -34,8 +34,10 @@ extern void ExecCustomScanEstimate(CustomScanState *node, ParallelContext *pcxt); extern void ExecCustomScanInitializeDSM(CustomScanState *node, ParallelContext *pcxt); +extern void ExecCustomScanReInitializeDSM(CustomScanState *node, + ParallelContext *pcxt); extern void ExecCustomScanInitializeWorker(CustomScanState *node, - shm_toc *toc); + ParallelWorkerContext *pwcxt); extern void ExecShutdownCustomScan(CustomScanState *node); #endif /* NODECUSTOM_H */ diff --git a/src/include/executor/nodeForeignscan.h b/src/include/executor/nodeForeignscan.h index 0b662597d8..ccb66be733 100644 --- a/src/include/executor/nodeForeignscan.h +++ b/src/include/executor/nodeForeignscan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeForeignscan.h @@ -25,8 +25,10 @@ extern void ExecForeignScanEstimate(ForeignScanState *node, ParallelContext *pcxt); extern void ExecForeignScanInitializeDSM(ForeignScanState *node, ParallelContext *pcxt); +extern void ExecForeignScanReInitializeDSM(ForeignScanState *node, + ParallelContext *pcxt); extern void ExecForeignScanInitializeWorker(ForeignScanState *node, - shm_toc *toc); + ParallelWorkerContext *pwcxt); extern void ExecShutdownForeignScan(ForeignScanState *node); #endif /* NODEFOREIGNSCAN_H */ diff --git a/src/include/executor/nodeFunctionscan.h b/src/include/executor/nodeFunctionscan.h index aaa9d8c316..5de8d15a5f 100644 --- a/src/include/executor/nodeFunctionscan.h +++ b/src/include/executor/nodeFunctionscan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeFunctionscan.h diff --git a/src/include/executor/nodeGather.h b/src/include/executor/nodeGather.h index 189bd70041..4477b855c4 100644 --- a/src/include/executor/nodeGather.h +++ b/src/include/executor/nodeGather.h @@ -4,7 +4,7 @@ * prototypes for nodeGather.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeGather.h diff --git a/src/include/executor/nodeGatherMerge.h b/src/include/executor/nodeGatherMerge.h index 0154d73312..1e514ff090 100644 --- a/src/include/executor/nodeGatherMerge.h +++ b/src/include/executor/nodeGatherMerge.h @@ -4,7 +4,7 @@ * prototypes for nodeGatherMerge.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeGatherMerge.h diff --git a/src/include/executor/nodeGroup.h b/src/include/executor/nodeGroup.h index b0d7e312c9..483390db8c 100644 --- a/src/include/executor/nodeGroup.h +++ b/src/include/executor/nodeGroup.h @@ -4,7 +4,7 @@ * prototypes for nodeGroup.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeGroup.h diff --git a/src/include/executor/nodeHash.h b/src/include/executor/nodeHash.h index 3ae556fb6c..8d700c06c5 100644 --- a/src/include/executor/nodeHash.h +++ b/src/include/executor/nodeHash.h @@ -4,7 +4,7 @@ * prototypes for nodeHash.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeHash.h @@ -14,19 +14,35 @@ #ifndef NODEHASH_H #define NODEHASH_H +#include "access/parallel.h" #include "nodes/execnodes.h" +struct SharedHashJoinBatch; + extern HashState *ExecInitHash(Hash *node, EState *estate, int eflags); extern Node *MultiExecHash(HashState *node); extern void ExecEndHash(HashState *node); extern void ExecReScanHash(HashState *node); -extern HashJoinTable ExecHashTableCreate(Hash *node, List *hashOperators, +extern HashJoinTable ExecHashTableCreate(HashState *state, List *hashOperators, bool keepNulls); +extern void ExecParallelHashTableAlloc(HashJoinTable hashtable, + int batchno); extern void ExecHashTableDestroy(HashJoinTable hashtable); +extern void ExecHashTableDetach(HashJoinTable hashtable); +extern void ExecHashTableDetachBatch(HashJoinTable hashtable); +extern void ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, + int batchno); + extern void ExecHashTableInsert(HashJoinTable hashtable, TupleTableSlot *slot, uint32 hashvalue); +extern void ExecParallelHashTableInsert(HashJoinTable hashtable, + TupleTableSlot *slot, + uint32 hashvalue); +extern void ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable, + TupleTableSlot *slot, + uint32 hashvalue); extern bool ExecHashGetHashValue(HashJoinTable hashtable, ExprContext *econtext, List *hashkeys, @@ -38,15 +54,26 @@ extern void ExecHashGetBucketAndBatch(HashJoinTable hashtable, int *bucketno, int *batchno); extern bool ExecScanHashBucket(HashJoinState *hjstate, ExprContext *econtext); +extern bool ExecParallelScanHashBucket(HashJoinState *hjstate, ExprContext *econtext); extern void ExecPrepHashTableForUnmatched(HashJoinState *hjstate); extern bool ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext); extern void ExecHashTableReset(HashJoinTable hashtable); extern void ExecHashTableResetMatchFlags(HashJoinTable hashtable); extern void ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, + bool try_combined_work_mem, + int parallel_workers, + size_t *space_allowed, int *numbuckets, int *numbatches, int *num_skew_mcvs); extern int ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue); +extern void ExecHashEstimate(HashState *node, ParallelContext *pcxt); +extern void ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt); +extern void ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt); +extern void ExecHashRetrieveInstrumentation(HashState *node); +extern void ExecShutdownHash(HashState *node); +extern void ExecHashGetInstrumentation(HashInstrumentation *instrument, + HashJoinTable hashtable); #endif /* NODEHASH_H */ diff --git a/src/include/executor/nodeHashjoin.h b/src/include/executor/nodeHashjoin.h index 7469bfbf60..4086dd5382 100644 --- a/src/include/executor/nodeHashjoin.h +++ b/src/include/executor/nodeHashjoin.h @@ -4,7 +4,7 @@ * prototypes for nodeHashjoin.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeHashjoin.h @@ -14,12 +14,19 @@ #ifndef NODEHASHJOIN_H #define NODEHASHJOIN_H +#include "access/parallel.h" #include "nodes/execnodes.h" #include "storage/buffile.h" extern HashJoinState *ExecInitHashJoin(HashJoin *node, EState *estate, int eflags); extern void ExecEndHashJoin(HashJoinState *node); extern void ExecReScanHashJoin(HashJoinState *node); +extern void ExecShutdownHashJoin(HashJoinState *node); +extern void ExecHashJoinEstimate(HashJoinState *state, ParallelContext *pcxt); +extern void ExecHashJoinInitializeDSM(HashJoinState *state, ParallelContext *pcxt); +extern void ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *pcxt); +extern void ExecHashJoinInitializeWorker(HashJoinState *state, + ParallelWorkerContext *pwcxt); extern void ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue, BufFile **fileptr); diff --git a/src/include/executor/nodeIndexonlyscan.h b/src/include/executor/nodeIndexonlyscan.h index c8a709c26e..8f6c5a8d09 100644 --- a/src/include/executor/nodeIndexonlyscan.h +++ b/src/include/executor/nodeIndexonlyscan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeIndexonlyscan.h @@ -28,7 +28,9 @@ extern void ExecIndexOnlyScanEstimate(IndexOnlyScanState *node, ParallelContext *pcxt); extern void ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node, ParallelContext *pcxt); +extern void ExecIndexOnlyScanReInitializeDSM(IndexOnlyScanState *node, + ParallelContext *pcxt); extern void ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node, - shm_toc *toc); + ParallelWorkerContext *pwcxt); #endif /* NODEINDEXONLYSCAN_H */ diff --git a/src/include/executor/nodeIndexscan.h b/src/include/executor/nodeIndexscan.h index 1668e347ee..822a9c9fad 100644 --- a/src/include/executor/nodeIndexscan.h +++ b/src/include/executor/nodeIndexscan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeIndexscan.h @@ -24,7 +24,9 @@ extern void ExecIndexRestrPos(IndexScanState *node); extern void ExecReScanIndexScan(IndexScanState *node); extern void ExecIndexScanEstimate(IndexScanState *node, ParallelContext *pcxt); extern void ExecIndexScanInitializeDSM(IndexScanState *node, ParallelContext *pcxt); -extern void ExecIndexScanInitializeWorker(IndexScanState *node, shm_toc *toc); +extern void ExecIndexScanReInitializeDSM(IndexScanState *node, ParallelContext *pcxt); +extern void ExecIndexScanInitializeWorker(IndexScanState *node, + ParallelWorkerContext *pwcxt); /* * These routines are exported to share code with nodeIndexonlyscan.c and diff --git a/src/include/executor/nodeLimit.h b/src/include/executor/nodeLimit.h index db65b5524c..160ae5b026 100644 --- a/src/include/executor/nodeLimit.h +++ b/src/include/executor/nodeLimit.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeLimit.h diff --git a/src/include/executor/nodeLockRows.h b/src/include/executor/nodeLockRows.h index c9d05b87f1..e3d77dff56 100644 --- a/src/include/executor/nodeLockRows.h +++ b/src/include/executor/nodeLockRows.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeLockRows.h diff --git a/src/include/executor/nodeMaterial.h b/src/include/executor/nodeMaterial.h index 4b3c2578c9..84004efd5e 100644 --- a/src/include/executor/nodeMaterial.h +++ b/src/include/executor/nodeMaterial.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeMaterial.h diff --git a/src/include/executor/nodeMergeAppend.h b/src/include/executor/nodeMergeAppend.h index a0ccbae965..e3bb7d91ff 100644 --- a/src/include/executor/nodeMergeAppend.h +++ b/src/include/executor/nodeMergeAppend.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeMergeAppend.h diff --git a/src/include/executor/nodeMergejoin.h b/src/include/executor/nodeMergejoin.h index d20e41505d..456a39d914 100644 --- a/src/include/executor/nodeMergejoin.h +++ b/src/include/executor/nodeMergejoin.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeMergejoin.h diff --git a/src/include/executor/nodeModifyTable.h b/src/include/executor/nodeModifyTable.h index a2e7af98de..0d7e579e1c 100644 --- a/src/include/executor/nodeModifyTable.h +++ b/src/include/executor/nodeModifyTable.h @@ -3,7 +3,7 @@ * nodeModifyTable.h * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeModifyTable.h diff --git a/src/include/executor/nodeNamedtuplestorescan.h b/src/include/executor/nodeNamedtuplestorescan.h index 395d978f62..6c7300bcb8 100644 --- a/src/include/executor/nodeNamedtuplestorescan.h +++ b/src/include/executor/nodeNamedtuplestorescan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeNamedtuplestorescan.h diff --git a/src/include/executor/nodeNestloop.h b/src/include/executor/nodeNestloop.h index 0d6486cc57..06b90d150e 100644 --- a/src/include/executor/nodeNestloop.h +++ b/src/include/executor/nodeNestloop.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeNestloop.h diff --git a/src/include/executor/nodeProjectSet.h b/src/include/executor/nodeProjectSet.h index a0b0521f8d..c365589754 100644 --- a/src/include/executor/nodeProjectSet.h +++ b/src/include/executor/nodeProjectSet.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeProjectSet.h diff --git a/src/include/executor/nodeRecursiveunion.h b/src/include/executor/nodeRecursiveunion.h index e6ce1b4783..09f211c500 100644 --- a/src/include/executor/nodeRecursiveunion.h +++ b/src/include/executor/nodeRecursiveunion.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeRecursiveunion.h diff --git a/src/include/executor/nodeResult.h b/src/include/executor/nodeResult.h index 20e0063410..422a2ffd47 100644 --- a/src/include/executor/nodeResult.h +++ b/src/include/executor/nodeResult.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeResult.h diff --git a/src/include/executor/nodeSamplescan.h b/src/include/executor/nodeSamplescan.h index 607bbd9412..0d489496cd 100644 --- a/src/include/executor/nodeSamplescan.h +++ b/src/include/executor/nodeSamplescan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeSamplescan.h diff --git a/src/include/executor/nodeSeqscan.h b/src/include/executor/nodeSeqscan.h index 0fba79f8de..020b40c6b7 100644 --- a/src/include/executor/nodeSeqscan.h +++ b/src/include/executor/nodeSeqscan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeSeqscan.h @@ -24,6 +24,8 @@ extern void ExecReScanSeqScan(SeqScanState *node); /* parallel scan support */ extern void ExecSeqScanEstimate(SeqScanState *node, ParallelContext *pcxt); extern void ExecSeqScanInitializeDSM(SeqScanState *node, ParallelContext *pcxt); -extern void ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc); +extern void ExecSeqScanReInitializeDSM(SeqScanState *node, ParallelContext *pcxt); +extern void ExecSeqScanInitializeWorker(SeqScanState *node, + ParallelWorkerContext *pwcxt); #endif /* NODESEQSCAN_H */ diff --git a/src/include/executor/nodeSetOp.h b/src/include/executor/nodeSetOp.h index c15f945046..d41fcbdc6e 100644 --- a/src/include/executor/nodeSetOp.h +++ b/src/include/executor/nodeSetOp.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeSetOp.h diff --git a/src/include/executor/nodeSort.h b/src/include/executor/nodeSort.h index ed0e9dbb53..22f69ee1ea 100644 --- a/src/include/executor/nodeSort.h +++ b/src/include/executor/nodeSort.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeSort.h @@ -14,6 +14,7 @@ #ifndef NODESORT_H #define NODESORT_H +#include "access/parallel.h" #include "nodes/execnodes.h" extern SortState *ExecInitSort(Sort *node, EState *estate, int eflags); @@ -22,4 +23,10 @@ extern void ExecSortMarkPos(SortState *node); extern void ExecSortRestrPos(SortState *node); extern void ExecReScanSort(SortState *node); +/* parallel instrumentation support */ +extern void ExecSortEstimate(SortState *node, ParallelContext *pcxt); +extern void ExecSortInitializeDSM(SortState *node, ParallelContext *pcxt); +extern void ExecSortInitializeWorker(SortState *node, ParallelWorkerContext *pwcxt); +extern void ExecSortRetrieveInstrumentation(SortState *node); + #endif /* NODESORT_H */ diff --git a/src/include/executor/nodeSubplan.h b/src/include/executor/nodeSubplan.h index 5dbaeeb29a..fd21b5df8f 100644 --- a/src/include/executor/nodeSubplan.h +++ b/src/include/executor/nodeSubplan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeSubplan.h @@ -28,4 +28,6 @@ extern void ExecReScanSetParamPlan(SubPlanState *node, PlanState *parent); extern void ExecSetParamPlan(SubPlanState *node, ExprContext *econtext); +extern void ExecSetParamPlanMulti(const Bitmapset *params, ExprContext *econtext); + #endif /* NODESUBPLAN_H */ diff --git a/src/include/executor/nodeSubqueryscan.h b/src/include/executor/nodeSubqueryscan.h index 710e050285..4bd292159c 100644 --- a/src/include/executor/nodeSubqueryscan.h +++ b/src/include/executor/nodeSubqueryscan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeSubqueryscan.h diff --git a/src/include/executor/nodeTableFuncscan.h b/src/include/executor/nodeTableFuncscan.h index c4672c0ac0..06ebbf31eb 100644 --- a/src/include/executor/nodeTableFuncscan.h +++ b/src/include/executor/nodeTableFuncscan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeTableFuncscan.h diff --git a/src/include/executor/nodeTidscan.h b/src/include/executor/nodeTidscan.h index e68aaf3829..30d21ff229 100644 --- a/src/include/executor/nodeTidscan.h +++ b/src/include/executor/nodeTidscan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeTidscan.h diff --git a/src/include/executor/nodeUnique.h b/src/include/executor/nodeUnique.h index 008774ae0f..b8a44b73c1 100644 --- a/src/include/executor/nodeUnique.h +++ b/src/include/executor/nodeUnique.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeUnique.h diff --git a/src/include/executor/nodeValuesscan.h b/src/include/executor/nodeValuesscan.h index 772a5e9705..b4f384b111 100644 --- a/src/include/executor/nodeValuesscan.h +++ b/src/include/executor/nodeValuesscan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeValuesscan.h diff --git a/src/include/executor/nodeWindowAgg.h b/src/include/executor/nodeWindowAgg.h index 1c177309ae..677e2d8d07 100644 --- a/src/include/executor/nodeWindowAgg.h +++ b/src/include/executor/nodeWindowAgg.h @@ -4,7 +4,7 @@ * prototypes for nodeWindowAgg.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeWindowAgg.h diff --git a/src/include/executor/nodeWorktablescan.h b/src/include/executor/nodeWorktablescan.h index df05e75111..7daee1e40a 100644 --- a/src/include/executor/nodeWorktablescan.h +++ b/src/include/executor/nodeWorktablescan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeWorktablescan.h diff --git a/src/include/executor/spi.h b/src/include/executor/spi.h index acade7e92e..b16440cf00 100644 --- a/src/include/executor/spi.h +++ b/src/include/executor/spi.h @@ -3,7 +3,7 @@ * spi.h * Server Programming Interface public declarations * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/spi.h @@ -65,6 +65,8 @@ typedef struct _SPI_plan *SPIPlanPtr; #define SPI_OK_REL_UNREGISTER 16 #define SPI_OK_TD_REGISTER 17 +#define SPI_OPT_NONATOMIC (1 << 0) + /* These used to be functions, now just no-ops for backwards compatibility */ #define SPI_push() ((void) 0) #define SPI_pop() ((void) 0) @@ -78,6 +80,7 @@ extern PGDLLIMPORT SPITupleTable *SPI_tuptable; extern PGDLLIMPORT int SPI_result; extern int SPI_connect(void); +extern int SPI_connect_ext(int options); extern int SPI_finish(void); extern int SPI_execute(const char *src, bool read_only, long tcount); extern int SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls, @@ -156,7 +159,13 @@ extern int SPI_register_relation(EphemeralNamedRelation enr); extern int SPI_unregister_relation(const char *name); extern int SPI_register_trigger_data(TriggerData *tdata); +extern void SPI_start_transaction(void); +extern void SPI_commit(void); +extern void SPI_rollback(void); + +extern void SPICleanup(void); extern void AtEOXact_SPI(bool isCommit); extern void AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid); +extern bool SPI_inside_nonatomic_context(void); #endif /* SPI_H */ diff --git a/src/include/executor/spi_priv.h b/src/include/executor/spi_priv.h index ba7fb98875..0da3a41050 100644 --- a/src/include/executor/spi_priv.h +++ b/src/include/executor/spi_priv.h @@ -3,7 +3,7 @@ * spi_priv.h * Server Programming Interface private declarations * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/spi_priv.h @@ -26,6 +26,9 @@ typedef struct Oid lastoid; SPITupleTable *tuptable; /* tuptable currently being built */ + /* subtransaction in which current Executor call was started */ + SubTransactionId execSubid; + /* resources of this execution context */ slist_head tuptables; /* list of all live SPITupleTables */ MemoryContext procCxt; /* procedure context */ @@ -33,6 +36,18 @@ typedef struct MemoryContext savedcxt; /* context of SPI_connect's caller */ SubTransactionId connectSubid; /* ID of connecting subtransaction */ QueryEnvironment *queryEnv; /* query environment setup for SPI level */ + + /* transaction management support */ + bool atomic; /* atomic execution context, does not allow + * transactions */ + bool internal_xact; /* SPI-managed transaction boundary, skip + * cleanup */ + + /* saved values of API global variables for previous nesting level */ + uint64 outer_processed; + Oid outer_lastoid; + SPITupleTable *outer_tuptable; + int outer_result; } _SPI_connection; /* @@ -79,6 +94,7 @@ typedef struct _SPI_plan int magic; /* should equal _SPI_PLAN_MAGIC */ bool saved; /* saved or unsaved plan? */ bool oneshot; /* one-shot plan? */ + bool no_snapshots; /* let the caller handle the snapshots */ List *plancache_list; /* one CachedPlanSource per parsetree */ MemoryContext plancxt; /* Context containing _SPI_plan and data */ int cursor_options; /* Cursor options used for planning */ diff --git a/src/include/executor/tablefunc.h b/src/include/executor/tablefunc.h index a24a555b75..52424ae2ff 100644 --- a/src/include/executor/tablefunc.h +++ b/src/include/executor/tablefunc.h @@ -3,7 +3,7 @@ * tablefunc.h * interface for TableFunc executor node * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/tablefunc.h @@ -47,17 +47,17 @@ struct TableFuncScanState; * * DestroyBuilder shall release all resources associated with a table builder * context. It may be called either because all rows have been consumed, or - * because an error ocurred while processing the table expression. + * because an error occurred while processing the table expression. */ typedef struct TableFuncRoutine { void (*InitOpaque) (struct TableFuncScanState *state, int natts); void (*SetDocument) (struct TableFuncScanState *state, Datum value); - void (*SetNamespace) (struct TableFuncScanState *state, char *name, - char *uri); - void (*SetRowFilter) (struct TableFuncScanState *state, char *path); + void (*SetNamespace) (struct TableFuncScanState *state, const char *name, + const char *uri); + void (*SetRowFilter) (struct TableFuncScanState *state, const char *path); void (*SetColumnFilter) (struct TableFuncScanState *state, - char *path, int colnum); + const char *path, int colnum); bool (*FetchRow) (struct TableFuncScanState *state); Datum (*GetValue) (struct TableFuncScanState *state, int colnum, Oid typid, int32 typmod, bool *isnull); diff --git a/src/include/executor/tqueue.h b/src/include/executor/tqueue.h index a717ac6184..0fe3639252 100644 --- a/src/include/executor/tqueue.h +++ b/src/include/executor/tqueue.h @@ -3,7 +3,7 @@ * tqueue.h * Use shm_mq to send & receive tuples between parallel backends * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/tqueue.h @@ -24,8 +24,7 @@ typedef struct TupleQueueReader TupleQueueReader; extern DestReceiver *CreateTupleQueueDestReceiver(shm_mq_handle *handle); /* Use these to receive tuples from a shm_mq. */ -extern TupleQueueReader *CreateTupleQueueReader(shm_mq_handle *handle, - TupleDesc tupledesc); +extern TupleQueueReader *CreateTupleQueueReader(shm_mq_handle *handle); extern void DestroyTupleQueueReader(TupleQueueReader *reader); extern HeapTuple TupleQueueReaderNext(TupleQueueReader *reader, bool nowait, bool *done); diff --git a/src/include/executor/tstoreReceiver.h b/src/include/executor/tstoreReceiver.h index ac4de3a663..5e2f83123c 100644 --- a/src/include/executor/tstoreReceiver.h +++ b/src/include/executor/tstoreReceiver.h @@ -4,7 +4,7 @@ * prototypes for tstoreReceiver.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/tstoreReceiver.h diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h index 55f4cce4ee..b41b400ef1 100644 --- a/src/include/executor/tuptable.h +++ b/src/include/executor/tuptable.h @@ -4,7 +4,7 @@ * tuple table support stuff * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/tuptable.h @@ -65,11 +65,11 @@ * ie, only as needed. This serves to avoid repeated extraction of data * from the physical tuple. * - * A TupleTableSlot can also be "empty", holding no valid data. This is - * the only valid state for a freshly-created slot that has not yet had a - * tuple descriptor assigned to it. In this state, tts_isempty must be - * TRUE, tts_shouldFree FALSE, tts_tuple NULL, tts_buffer InvalidBuffer, - * and tts_nvalid zero. + * A TupleTableSlot can also be "empty", indicated by flag TTS_EMPTY set in + * tts_flags, holding no valid data. This is the only valid state for a + * freshly-created slot that has not yet had a tuple descriptor assigned to it. + * In this state, TTS_SHOULDFREE should not be set in tts_flag, tts_tuple must + * be NULL, tts_buffer InvalidBuffer, and tts_nvalid zero. * * The tupleDescriptor is simply referenced, not copied, by the TupleTableSlot * code. The caller of ExecSetSlotDescriptor() is responsible for providing @@ -79,8 +79,9 @@ * mechanism to do more. However, the slot will increment the tupdesc * reference count if a reference-counted tupdesc is supplied.) * - * When tts_shouldFree is true, the physical tuple is "owned" by the slot - * and should be freed when the slot's reference to the tuple is dropped. + * When TTS_SHOULDFREE is set in tts_flags, the physical tuple is "owned" by + * the slot and should be freed when the slot's reference to the tuple is + * dropped. * * If tts_buffer is not InvalidBuffer, then the slot is holding a pin * on the indicated buffer page; drop the pin when we release the @@ -106,27 +107,52 @@ * MINIMAL_TUPLE_OFFSET bytes before tts_mintuple. This allows column * extraction to treat the case identically to regular physical tuples. * - * tts_slow/tts_off are saved state for slot_deform_tuple, and should not - * be touched by any other code. + * TTS_SLOW flag in tts_flags and tts_off are saved state for + * slot_deform_tuple, and should not be touched by any other code. *---------- */ + +/* true = slot is empty */ +#define TTS_FLAG_EMPTY (1 << 1) +#define TTS_EMPTY(slot) (((slot)->tts_flags & TTS_FLAG_EMPTY) != 0) + +/* should pfree tts_tuple? */ +#define TTS_FLAG_SHOULDFREE (1 << 2) +#define TTS_SHOULDFREE(slot) (((slot)->tts_flags & TTS_FLAG_SHOULDFREE) != 0) + +/* should pfree tts_mintuple? */ +#define TTS_FLAG_SHOULDFREEMIN (1 << 3) +#define TTS_SHOULDFREEMIN(slot) (((slot)->tts_flags & TTS_FLAG_SHOULDFREEMIN) != 0) + +/* saved state for slot_deform_tuple */ +#define TTS_FLAG_SLOW (1 << 4) +#define TTS_SLOW(slot) (((slot)->tts_flags & TTS_FLAG_SLOW) != 0) + +/* fixed tuple descriptor */ +#define TTS_FLAG_FIXED (1 << 5) +#define TTS_FIXED(slot) (((slot)->tts_flags & TTS_FLAG_FIXED) != 0) + typedef struct TupleTableSlot { NodeTag type; - bool tts_isempty; /* true = slot is empty */ - bool tts_shouldFree; /* should pfree tts_tuple? */ - bool tts_shouldFreeMin; /* should pfree tts_mintuple? */ - bool tts_slow; /* saved state for slot_deform_tuple */ +#define FIELDNO_TUPLETABLESLOT_FLAGS 1 + uint16 tts_flags; /* Boolean states */ +#define FIELDNO_TUPLETABLESLOT_NVALID 2 + AttrNumber tts_nvalid; /* # of valid values in tts_values */ +#define FIELDNO_TUPLETABLESLOT_TUPLE 3 HeapTuple tts_tuple; /* physical tuple, or NULL if virtual */ +#define FIELDNO_TUPLETABLESLOT_TUPLEDESCRIPTOR 4 TupleDesc tts_tupleDescriptor; /* slot's tuple descriptor */ MemoryContext tts_mcxt; /* slot itself is in this context */ Buffer tts_buffer; /* tuple's buffer, or InvalidBuffer */ - int tts_nvalid; /* # of valid values in tts_values */ +#define FIELDNO_TUPLETABLESLOT_OFF 7 + uint32 tts_off; /* saved state for slot_deform_tuple */ +#define FIELDNO_TUPLETABLESLOT_VALUES 8 Datum *tts_values; /* current per-attribute values */ +#define FIELDNO_TUPLETABLESLOT_ISNULL 9 bool *tts_isnull; /* current per-attribute isnull flags */ MinimalTuple tts_mintuple; /* minimal tuple, or NULL if none */ HeapTupleData tts_minhdr; /* workspace for minimal-tuple-only case */ - long tts_off; /* saved state for slot_deform_tuple */ } TupleTableSlot; #define TTS_HAS_PHYSICAL_TUPLE(slot) \ @@ -136,19 +162,21 @@ typedef struct TupleTableSlot * TupIsNull -- is a TupleTableSlot empty? */ #define TupIsNull(slot) \ - ((slot) == NULL || (slot)->tts_isempty) + ((slot) == NULL || TTS_EMPTY(slot)) /* in executor/execTuples.c */ -extern TupleTableSlot *MakeTupleTableSlot(void); -extern TupleTableSlot *ExecAllocTableSlot(List **tupleTable); +extern TupleTableSlot *MakeTupleTableSlot(TupleDesc desc); +extern TupleTableSlot *ExecAllocTableSlot(List **tupleTable, TupleDesc desc); extern void ExecResetTupleTable(List *tupleTable, bool shouldFree); extern TupleTableSlot *MakeSingleTupleTableSlot(TupleDesc tupdesc); extern void ExecDropSingleTupleTableSlot(TupleTableSlot *slot); extern void ExecSetSlotDescriptor(TupleTableSlot *slot, TupleDesc tupdesc); -extern TupleTableSlot *ExecStoreTuple(HeapTuple tuple, - TupleTableSlot *slot, - Buffer buffer, - bool shouldFree); +extern TupleTableSlot *ExecStoreHeapTuple(HeapTuple tuple, + TupleTableSlot *slot, + bool shouldFree); +extern TupleTableSlot *ExecStoreBufferHeapTuple(HeapTuple tuple, + TupleTableSlot *slot, + Buffer buffer); extern TupleTableSlot *ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree); @@ -163,11 +191,33 @@ extern Datum ExecFetchSlotTupleDatum(TupleTableSlot *slot); extern HeapTuple ExecMaterializeSlot(TupleTableSlot *slot); extern TupleTableSlot *ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot); +extern void slot_getmissingattrs(TupleTableSlot *slot, int startAttNum, + int lastAttNum); +extern Datum slot_getattr(TupleTableSlot *slot, int attnum, + bool *isnull); +extern void slot_getsomeattrs(TupleTableSlot *slot, int attnum); /* in access/common/heaptuple.c */ -extern Datum slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull); -extern void slot_getallattrs(TupleTableSlot *slot); -extern void slot_getsomeattrs(TupleTableSlot *slot, int attnum); extern bool slot_attisnull(TupleTableSlot *slot, int attnum); +extern bool slot_getsysattr(TupleTableSlot *slot, int attnum, + Datum *value, bool *isnull); +extern Datum getmissingattr(TupleDesc tupleDesc, + int attnum, bool *isnull); + +#ifndef FRONTEND + +/* + * slot_getallattrs + * This function forces all the entries of the slot's Datum/isnull + * arrays to be valid. The caller may then extract data directly + * from those arrays instead of using slot_getattr. + */ +static inline void +slot_getallattrs(TupleTableSlot *slot) +{ + slot_getsomeattrs(slot, slot->tts_tupleDescriptor->natts); +} + +#endif #endif /* TUPTABLE_H */ diff --git a/src/bin/psql/conditional.h b/src/include/fe_utils/conditional.h similarity index 75% rename from src/bin/psql/conditional.h rename to src/include/fe_utils/conditional.h index 0957627742..9b91de5a3d 100644 --- a/src/bin/psql/conditional.h +++ b/src/include/fe_utils/conditional.h @@ -1,9 +1,24 @@ -/* - * psql - the PostgreSQL interactive terminal +/*------------------------------------------------------------------------- + * A stack of automaton states to handle nested conditionals. + * + * This file describes a stack of automaton states which + * allow a manage nested conditionals. + * + * It is used by: + * - "psql" interpretor for handling \if ... \endif + * - "pgbench" interpretor for handling \if ... \endif + * - "pgbench" syntax checker to test for proper nesting * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * The stack holds the state of enclosing conditionals (are we in + * a true branch? in a false branch? have we already encountered + * a true branch?) so that the interpreter knows whether to execute + * code and whether to evaluate conditions. * - * src/bin/psql/conditional.h + * Copyright (c) 2000-2018, PostgreSQL Global Development Group + * + * src/include/fe_utils/conditional.h + * + *------------------------------------------------------------------------- */ #ifndef CONDITIONAL_H #define CONDITIONAL_H @@ -60,6 +75,8 @@ extern ConditionalStack conditional_stack_create(void); extern void conditional_stack_destroy(ConditionalStack cstack); +extern int conditional_stack_depth(ConditionalStack cstack); + extern void conditional_stack_push(ConditionalStack cstack, ifState new_state); extern bool conditional_stack_pop(ConditionalStack cstack); diff --git a/src/include/fe_utils/connect.h b/src/include/fe_utils/connect.h new file mode 100644 index 0000000000..d62f5a3724 --- /dev/null +++ b/src/include/fe_utils/connect.h @@ -0,0 +1,28 @@ +/*------------------------------------------------------------------------- + * + * Interfaces in support of FE/BE connections. + * + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/fe_utils/connect.h + * + *------------------------------------------------------------------------- + */ +#ifndef CONNECT_H +#define CONNECT_H + +/* + * This SQL statement installs an always-secure search path, so malicious + * users can't take control. CREATE of an unqualified name will fail, because + * this selects no creation schema. This does not demote pg_temp, so it is + * suitable where we control the entire FE/BE connection but not suitable in + * SECURITY DEFINER functions. This is portable to PostgreSQL 7.3, which + * introduced schemas. When connected to an older version from code that + * might work with the old server, skip this. + */ +#define ALWAYS_SECURE_SEARCH_PATH_SQL \ + "SELECT pg_catalog.set_config('search_path', '', false);" + +#endif /* CONNECT_H */ diff --git a/src/include/fe_utils/mbprint.h b/src/include/fe_utils/mbprint.h index e3cfaf3ddd..7d8019c203 100644 --- a/src/include/fe_utils/mbprint.h +++ b/src/include/fe_utils/mbprint.h @@ -3,7 +3,7 @@ * Multibyte character printing support for frontend code * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/fe_utils/mbprint.h diff --git a/src/include/fe_utils/print.h b/src/include/fe_utils/print.h index 36b89e7d57..b761349bc7 100644 --- a/src/include/fe_utils/print.h +++ b/src/include/fe_utils/print.h @@ -3,7 +3,7 @@ * Query-result printing support for frontend code * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/fe_utils/print.h @@ -26,14 +26,14 @@ enum printFormat { PRINT_NOTHING = 0, /* to make sure someone initializes this */ - PRINT_UNALIGNED, PRINT_ALIGNED, - PRINT_WRAPPED, - PRINT_HTML, PRINT_ASCIIDOC, + PRINT_HTML, PRINT_LATEX, PRINT_LATEX_LONGTABLE, - PRINT_TROFF_MS + PRINT_TROFF_MS, + PRINT_UNALIGNED, + PRINT_WRAPPED /* add your favourite output format here ... */ }; diff --git a/src/include/fe_utils/psqlscan.h b/src/include/fe_utils/psqlscan.h index c199a2917e..2d58c071f3 100644 --- a/src/include/fe_utils/psqlscan.h +++ b/src/include/fe_utils/psqlscan.h @@ -10,7 +10,7 @@ * backslash commands. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/fe_utils/psqlscan.h diff --git a/src/include/fe_utils/psqlscan_int.h b/src/include/fe_utils/psqlscan_int.h index c70ff29f4e..0be0db69ab 100644 --- a/src/include/fe_utils/psqlscan_int.h +++ b/src/include/fe_utils/psqlscan_int.h @@ -34,7 +34,7 @@ * same flex version, or if they don't use the same flex options. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/fe_utils/psqlscan_int.h @@ -142,5 +142,7 @@ extern char *psqlscan_extract_substring(PsqlScanState state, extern void psqlscan_escape_variable(PsqlScanState state, const char *txt, int len, PsqlScanQuoteType quote); +extern void psqlscan_test_variable(PsqlScanState state, + const char *txt, int len); #endif /* PSQLSCAN_INT_H */ diff --git a/src/include/fe_utils/simple_list.h b/src/include/fe_utils/simple_list.h index 97bb34f191..9785489128 100644 --- a/src/include/fe_utils/simple_list.h +++ b/src/include/fe_utils/simple_list.h @@ -7,7 +7,7 @@ * it's all we need in, eg, pg_dump. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/fe_utils/simple_list.h diff --git a/src/include/fe_utils/string_utils.h b/src/include/fe_utils/string_utils.h index bc6b87d6f1..8199682e63 100644 --- a/src/include/fe_utils/string_utils.h +++ b/src/include/fe_utils/string_utils.h @@ -6,7 +6,7 @@ * assorted contexts. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/fe_utils/string_utils.h @@ -25,8 +25,7 @@ extern PQExpBuffer (*getLocalPQExpBuffer) (void); /* Functions */ extern const char *fmtId(const char *identifier); -extern const char *fmtQualifiedId(int remoteVersion, - const char *schema, const char *id); +extern const char *fmtQualifiedId(const char *schema, const char *id); extern char *formatPGVersionNumber(int version_number, bool include_minor, char *buf, size_t buflen); diff --git a/src/include/fmgr.h b/src/include/fmgr.h index 0216965bfc..101f513ba6 100644 --- a/src/include/fmgr.h +++ b/src/include/fmgr.h @@ -8,7 +8,7 @@ * or call fmgr-callable functions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/fmgr.h @@ -80,9 +80,12 @@ typedef struct FunctionCallInfoData fmNodePtr context; /* pass info about context of call */ fmNodePtr resultinfo; /* pass or return extra info about result */ Oid fncollation; /* collation for function to use */ +#define FIELDNO_FUNCTIONCALLINFODATA_ISNULL 4 bool isnull; /* function must set true if result is NULL */ short nargs; /* # arguments actually passed */ +#define FIELDNO_FUNCTIONCALLINFODATA_ARG 6 Datum arg[FUNC_MAX_ARGS]; /* Arguments passed to function */ +#define FIELDNO_FUNCTIONCALLINFODATA_ARGNULL 7 bool argnull[FUNC_MAX_ARGS]; /* T if arg[i] is actually NULL */ } FunctionCallInfoData; @@ -110,6 +113,8 @@ extern void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, extern void fmgr_info_copy(FmgrInfo *dstinfo, FmgrInfo *srcinfo, MemoryContext destcxt); +extern void fmgr_symbol(Oid functionId, char **mod, char **fn); + /* * This macro initializes all the fields of a FunctionCallInfoData except * for the arg[] and argnull[] arrays. Performance testing has shown that @@ -325,6 +330,7 @@ extern struct varlena *pg_detoast_datum_packed(struct varlena *datum); #define PG_RETURN_FLOAT4(x) return Float4GetDatum(x) #define PG_RETURN_FLOAT8(x) return Float8GetDatum(x) #define PG_RETURN_INT64(x) return Int64GetDatum(x) +#define PG_RETURN_UINT64(x) return UInt64GetDatum(x) /* RETURN macros for other pass-by-ref types will typically look like this: */ #define PG_RETURN_BYTEA_P(x) PG_RETURN_POINTER(x) #define PG_RETURN_TEXT_P(x) PG_RETURN_POINTER(x) @@ -697,6 +703,7 @@ extern int AggCheckCallContext(FunctionCallInfo fcinfo, MemoryContext *aggcontext); extern fmAggrefPtr AggGetAggref(FunctionCallInfo fcinfo); extern MemoryContext AggGetTempMemoryContext(FunctionCallInfo fcinfo); +extern bool AggStateIsShared(FunctionCallInfo fcinfo); extern void AggRegisterCallback(FunctionCallInfo fcinfo, fmExprContextCallbackFunction func, Datum arg); @@ -728,19 +735,4 @@ extern PGDLLIMPORT fmgr_hook_type fmgr_hook; #define FmgrHookIsNeeded(fn_oid) \ (!needs_fmgr_hook ? false : (*needs_fmgr_hook)(fn_oid)) -/* - * !!! OLD INTERFACE !!! - * - * fmgr() is the only remaining vestige of the old-style caller support - * functions. It's no longer used anywhere in the Postgres distribution, - * but we should leave it around for a release or two to ease the transition - * for user-supplied C functions. OidFunctionCallN() replaces it for new - * code. - */ - -/* - * DEPRECATED, DO NOT USE IN NEW CODE - */ -extern char *fmgr(Oid procedureId,...); - #endif /* FMGR_H */ diff --git a/src/include/foreign/fdwapi.h b/src/include/foreign/fdwapi.h index e391f20fb8..c14eb546c6 100644 --- a/src/include/foreign/fdwapi.h +++ b/src/include/foreign/fdwapi.h @@ -3,7 +3,7 @@ * fdwapi.h * API for foreign-data wrappers * - * Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Copyright (c) 2010-2018, PostgreSQL Global Development Group * * src/include/foreign/fdwapi.h * @@ -62,7 +62,8 @@ typedef void (*GetForeignJoinPaths_function) (PlannerInfo *root, typedef void (*GetForeignUpperPaths_function) (PlannerInfo *root, UpperRelationKind stage, RelOptInfo *input_rel, - RelOptInfo *output_rel); + RelOptInfo *output_rel, + void *extra); typedef void (*AddForeignUpdateTargets_function) (Query *parsetree, RangeTblEntry *target_rte, @@ -97,6 +98,12 @@ typedef TupleTableSlot *(*ExecForeignDelete_function) (EState *estate, typedef void (*EndForeignModify_function) (EState *estate, ResultRelInfo *rinfo); +typedef void (*BeginForeignInsert_function) (ModifyTableState *mtstate, + ResultRelInfo *rinfo); + +typedef void (*EndForeignInsert_function) (EState *estate, + ResultRelInfo *rinfo); + typedef int (*IsForeignRelUpdatable_function) (Relation rel); typedef bool (*PlanDirectModify_function) (PlannerInfo *root, @@ -148,6 +155,9 @@ typedef Size (*EstimateDSMForeignScan_function) (ForeignScanState *node, typedef void (*InitializeDSMForeignScan_function) (ForeignScanState *node, ParallelContext *pcxt, void *coordinate); +typedef void (*ReInitializeDSMForeignScan_function) (ForeignScanState *node, + ParallelContext *pcxt, + void *coordinate); typedef void (*InitializeWorkerForeignScan_function) (ForeignScanState *node, shm_toc *toc, void *coordinate); @@ -155,6 +165,9 @@ typedef void (*ShutdownForeignScan_function) (ForeignScanState *node); typedef bool (*IsForeignScanParallelSafe_function) (PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); +typedef List *(*ReparameterizeForeignPathByChild_function) (PlannerInfo *root, + List *fdw_private, + RelOptInfo *child_rel); /* * FdwRoutine is the struct returned by a foreign-data wrapper's handler @@ -198,6 +211,8 @@ typedef struct FdwRoutine ExecForeignUpdate_function ExecForeignUpdate; ExecForeignDelete_function ExecForeignDelete; EndForeignModify_function EndForeignModify; + BeginForeignInsert_function BeginForeignInsert; + EndForeignInsert_function EndForeignInsert; IsForeignRelUpdatable_function IsForeignRelUpdatable; PlanDirectModify_function PlanDirectModify; BeginDirectModify_function BeginDirectModify; @@ -224,8 +239,12 @@ typedef struct FdwRoutine IsForeignScanParallelSafe_function IsForeignScanParallelSafe; EstimateDSMForeignScan_function EstimateDSMForeignScan; InitializeDSMForeignScan_function InitializeDSMForeignScan; + ReInitializeDSMForeignScan_function ReInitializeDSMForeignScan; InitializeWorkerForeignScan_function InitializeWorkerForeignScan; ShutdownForeignScan_function ShutdownForeignScan; + + /* Support functions for path reparameterization. */ + ReparameterizeForeignPathByChild_function ReparameterizeForeignPathByChild; } FdwRoutine; diff --git a/src/include/foreign/foreign.h b/src/include/foreign/foreign.h index 2f4c569d1d..3ca12e64d2 100644 --- a/src/include/foreign/foreign.h +++ b/src/include/foreign/foreign.h @@ -4,7 +4,7 @@ * support for foreign-data wrappers, servers and user mappings. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/include/foreign/foreign.h * diff --git a/src/include/funcapi.h b/src/include/funcapi.h index 951af2aad3..6a9eb4366c 100644 --- a/src/include/funcapi.h +++ b/src/include/funcapi.h @@ -2,12 +2,13 @@ * * funcapi.h * Definitions for functions which return composite type and/or sets + * or work on VARIADIC inputs. * * This file must be included by all Postgres modules that either define * or call FUNCAPI-callable functions or macros. * * - * Copyright (c) 2002-2017, PostgreSQL Global Development Group + * Copyright (c) 2002-2018, PostgreSQL Global Development Group * * src/include/funcapi.h * @@ -73,14 +74,6 @@ typedef struct FuncCallContext */ uint64 max_calls; - /* - * OPTIONAL pointer to result slot - * - * This is obsolete and only present for backwards compatibility, viz, - * user-defined SRFs that use the deprecated TupleDescGetSlot(). - */ - TupleTableSlot *slot; - /* * OPTIONAL pointer to miscellaneous user-provided context information * @@ -143,6 +136,10 @@ typedef struct FuncCallContext * get_call_result_type. Note: the cases in which rowtypes cannot be * determined are different from the cases for get_call_result_type. * Do *not* use this if you can use one of the others. + * + * See also get_expr_result_tupdesc(), which is a convenient wrapper around + * get_expr_result_type() for use when the caller only cares about + * determinable-rowtype cases. *---------- */ @@ -151,6 +148,7 @@ typedef enum TypeFuncClass { TYPEFUNC_SCALAR, /* scalar result type */ TYPEFUNC_COMPOSITE, /* determinable rowtype result */ + TYPEFUNC_COMPOSITE_DOMAIN, /* domain over determinable rowtype result */ TYPEFUNC_RECORD, /* indeterminate rowtype result */ TYPEFUNC_OTHER /* bogus type, eg pseudotype */ } TypeFuncClass; @@ -165,6 +163,8 @@ extern TypeFuncClass get_func_result_type(Oid functionId, Oid *resultTypeId, TupleDesc *resultTupleDesc); +extern TupleDesc get_expr_result_tupdesc(Node *expr, bool noError); + extern bool resolve_polymorphic_argtypes(int numargs, Oid *argtypes, char *argmodes, Node *call_expr); @@ -179,7 +179,8 @@ extern int get_func_input_arg_names(Datum proargnames, Datum proargmodes, extern int get_func_trftypes(HeapTuple procTup, Oid **p_trftypes); extern char *get_func_result_name(Oid functionId); -extern TupleDesc build_function_result_tupdesc_d(Datum proallargtypes, +extern TupleDesc build_function_result_tupdesc_d(char prokind, + Datum proallargtypes, Datum proargmodes, Datum proargnames); extern TupleDesc build_function_result_tupdesc_t(HeapTuple procTuple); @@ -212,8 +213,6 @@ extern TupleDesc build_function_result_tupdesc_t(HeapTuple procTuple); * TupleDesc based on a named relation. * TupleDesc TypeGetTupleDesc(Oid typeoid, List *colaliases) - Use to get a * TupleDesc based on a type OID. - * TupleTableSlot *TupleDescGetSlot(TupleDesc tupdesc) - Builds a - * TupleTableSlot, which is not needed anymore. * TupleGetDatum(TupleTableSlot *slot, HeapTuple tuple) - get a Datum * given a tuple and a slot. *---------- @@ -231,7 +230,6 @@ extern TupleDesc BlessTupleDesc(TupleDesc tupdesc); extern AttInMetadata *TupleDescGetAttInMetadata(TupleDesc tupdesc); extern HeapTuple BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values); extern Datum HeapTupleHeaderGetDatum(HeapTupleHeader tuple); -extern TupleTableSlot *TupleDescGetSlot(TupleDesc tupdesc); /*---------- @@ -315,4 +313,26 @@ extern void end_MultiFuncCall(PG_FUNCTION_ARGS, FuncCallContext *funcctx); PG_RETURN_NULL(); \ } while (0) +/*---------- + * Support to ease writing of functions dealing with VARIADIC inputs + *---------- + * + * This function extracts a set of argument values, types and NULL markers + * for a given input function. This returns a set of data: + * - **values includes the set of Datum values extracted. + * - **types the data type OID for each element. + * - **nulls tracks if an element is NULL. + * + * variadic_start indicates the argument number where the VARIADIC argument + * starts. + * convert_unknown set to true will enforce the conversion of arguments + * with unknown data type to text. + * + * The return result is the number of elements stored, or -1 in the case of + * "VARIADIC NULL". + */ +extern int extract_variadic_args(FunctionCallInfo fcinfo, int variadic_start, + bool convert_unknown, Datum **values, + Oid **types, bool **nulls); + #endif /* FUNCAPI_H */ diff --git a/src/include/getaddrinfo.h b/src/include/getaddrinfo.h index 3dcfc1fa25..1b460a3e5f 100644 --- a/src/include/getaddrinfo.h +++ b/src/include/getaddrinfo.h @@ -13,7 +13,7 @@ * This code will also work on platforms where struct addrinfo is defined * in the system headers but no getaddrinfo() can be located. * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * src/include/getaddrinfo.h * diff --git a/src/include/getopt_long.h b/src/include/getopt_long.h index c55d45348a..a9013b40f7 100644 --- a/src/include/getopt_long.h +++ b/src/include/getopt_long.h @@ -2,7 +2,7 @@ * Portions Copyright (c) 1987, 1993, 1994 * The Regents of the University of California. All rights reserved. * - * Portions Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2003-2018, PostgreSQL Global Development Group * * src/include/getopt_long.h */ diff --git a/src/include/jit/jit.h b/src/include/jit/jit.h new file mode 100644 index 0000000000..6cb3bdc89f --- /dev/null +++ b/src/include/jit/jit.h @@ -0,0 +1,105 @@ +/*------------------------------------------------------------------------- + * jit.h + * Provider independent JIT infrastructure. + * + * Copyright (c) 2016-2018, PostgreSQL Global Development Group + * + * src/include/jit/jit.h + * + *------------------------------------------------------------------------- + */ +#ifndef JIT_H +#define JIT_H + +#include "executor/instrument.h" +#include "utils/resowner.h" + + +/* Flags determining what kind of JIT operations to perform */ +#define PGJIT_NONE 0 +#define PGJIT_PERFORM (1 << 0) +#define PGJIT_OPT3 (1 << 1) +#define PGJIT_INLINE (1 << 2) +#define PGJIT_EXPR (1 << 3) +#define PGJIT_DEFORM (1 << 4) + + +typedef struct JitInstrumentation +{ + /* number of emitted functions */ + size_t created_functions; + + /* accumulated time to generate code */ + instr_time generation_counter; + + /* accumulated time for inlining */ + instr_time inlining_counter; + + /* accumulated time for optimization */ + instr_time optimization_counter; + + /* accumulated time for code emission */ + instr_time emission_counter; +} JitInstrumentation; + +/* + * DSM structure for accumulating jit instrumentation of all workers. + */ +typedef struct SharedJitInstrumentation +{ + int num_workers; + JitInstrumentation jit_instr[FLEXIBLE_ARRAY_MEMBER]; +} SharedJitInstrumentation; + +typedef struct JitContext +{ + /* see PGJIT_* above */ + int flags; + + ResourceOwner resowner; + + JitInstrumentation instr; +} JitContext; + +typedef struct JitProviderCallbacks JitProviderCallbacks; + +extern void _PG_jit_provider_init(JitProviderCallbacks *cb); +typedef void (*JitProviderInit) (JitProviderCallbacks *cb); +typedef void (*JitProviderResetAfterErrorCB) (void); +typedef void (*JitProviderReleaseContextCB) (JitContext *context); +struct ExprState; +typedef bool (*JitProviderCompileExprCB) (struct ExprState *state); + +struct JitProviderCallbacks +{ + JitProviderResetAfterErrorCB reset_after_error; + JitProviderReleaseContextCB release_context; + JitProviderCompileExprCB compile_expr; +}; + + +/* GUCs */ +extern bool jit_enabled; +extern char *jit_provider; +extern bool jit_debugging_support; +extern bool jit_dump_bitcode; +extern bool jit_expressions; +extern bool jit_profiling_support; +extern bool jit_tuple_deforming; +extern double jit_above_cost; +extern double jit_inline_above_cost; +extern double jit_optimize_above_cost; + + +extern void jit_reset_after_error(void); +extern void jit_release_context(JitContext *context); + +/* + * Functions for attempting to JIT code. Callers must accept that these might + * not be able to perform JIT (i.e. return false). + */ +extern bool jit_compile_expr(struct ExprState *state); +extern void InstrJitAgg(JitInstrumentation *dst, JitInstrumentation *add); + + +#endif /* JIT_H */ diff --git a/src/include/jit/llvmjit.h b/src/include/jit/llvmjit.h new file mode 100644 index 0000000000..f3ea249283 --- /dev/null +++ b/src/include/jit/llvmjit.h @@ -0,0 +1,138 @@ +/*------------------------------------------------------------------------- + * llvmjit.h + * LLVM JIT provider. + * + * Copyright (c) 2016-2018, PostgreSQL Global Development Group + * + * src/include/jit/llvmjit.h + * + *------------------------------------------------------------------------- + */ +#ifndef LLVMJIT_H +#define LLVMJIT_H + +#ifndef USE_LLVM +#error "llvmjit.h should only be included by code dealing with llvm" +#endif + +#include + + +/* + * File needs to be includable by both C and C++ code, and include other + * headers doing the same. Therefore wrap C portion in our own extern "C" if + * in C++ mode. + */ +#ifdef __cplusplus +extern "C" +{ +#endif + + +#include "fmgr.h" +#include "jit/jit.h" +#include "nodes/pg_list.h" +#include "access/tupdesc.h" + + +typedef struct LLVMJitContext +{ + JitContext base; + + /* number of modules created */ + size_t module_generation; + + /* current, "open for write", module */ + LLVMModuleRef module; + + /* is there any pending code that needs to be emitted */ + bool compiled; + + /* # of objects emitted, used to generate non-conflicting names */ + int counter; + + /* list of handles for code emitted via Orc */ + List *handles; +} LLVMJitContext; + + +/* type and struct definitions */ +extern LLVMTypeRef TypeParamBool; +extern LLVMTypeRef TypePGFunction; +extern LLVMTypeRef TypeSizeT; +extern LLVMTypeRef TypeStorageBool; + +extern LLVMTypeRef StructtupleDesc; +extern LLVMTypeRef StructHeapTupleData; +extern LLVMTypeRef StructTupleTableSlot; +extern LLVMTypeRef StructMemoryContextData; +extern LLVMTypeRef StructFunctionCallInfoData; +extern LLVMTypeRef StructExprContext; +extern LLVMTypeRef StructExprEvalStep; +extern LLVMTypeRef StructExprState; +extern LLVMTypeRef StructAggState; +extern LLVMTypeRef StructAggStatePerTransData; +extern LLVMTypeRef StructAggStatePerGroupData; + +extern LLVMValueRef AttributeTemplate; +extern LLVMValueRef FuncStrlen; +extern LLVMValueRef FuncVarsizeAny; +extern LLVMValueRef FuncSlotGetsomeattrs; +extern LLVMValueRef FuncSlotGetmissingattrs; +extern LLVMValueRef FuncMakeExpandedObjectReadOnlyInternal; +extern LLVMValueRef FuncExecEvalArrayRefSubscript; +extern LLVMValueRef FuncExecEvalSysVar; +extern LLVMValueRef FuncExecAggTransReparent; +extern LLVMValueRef FuncExecAggInitGroup; + + +extern void llvm_enter_fatal_on_oom(void); +extern void llvm_leave_fatal_on_oom(void); +extern void llvm_reset_after_error(void); +extern void llvm_assert_in_fatal_section(void); + +extern LLVMJitContext *llvm_create_context(int jitFlags); +extern LLVMModuleRef llvm_mutable_module(LLVMJitContext *context); +extern char *llvm_expand_funcname(LLVMJitContext *context, const char *basename); +extern void *llvm_get_function(LLVMJitContext *context, const char *funcname); +extern void llvm_split_symbol_name(const char *name, char **modname, char **funcname); +extern LLVMValueRef llvm_get_decl(LLVMModuleRef mod, LLVMValueRef f); +extern void llvm_copy_attributes(LLVMValueRef from, LLVMValueRef to); +extern LLVMValueRef llvm_function_reference(LLVMJitContext *context, + LLVMBuilderRef builder, + LLVMModuleRef mod, + FunctionCallInfo fcinfo); + +extern void llvm_inline(LLVMModuleRef mod); + +/* + **************************************************************************** + * Code generation functions. + **************************************************************************** + */ +extern bool llvm_compile_expr(struct ExprState *state); +extern LLVMValueRef slot_compile_deform(struct LLVMJitContext *context, TupleDesc desc, int natts); + +/* + **************************************************************************** + * Extensions / Backward compatibility section of the LLVM C API + * Error handling related functions. + **************************************************************************** + */ +#if defined(HAVE_DECL_LLVMGETHOSTCPUNAME) && !HAVE_DECL_LLVMGETHOSTCPUNAME +/** Get the host CPU as a string. The result needs to be disposed with + LLVMDisposeMessage. */ +extern char *LLVMGetHostCPUName(void); +#endif + +#if defined(HAVE_DECL_LLVMGETHOSTCPUFEATURES) && !HAVE_DECL_LLVMGETHOSTCPUFEATURES +/** Get the host CPU features as a string. The result needs to be disposed + with LLVMDisposeMessage. */ +extern char *LLVMGetHostCPUFeatures(void); +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* LLVMJIT_H */ diff --git a/src/include/jit/llvmjit_emit.h b/src/include/jit/llvmjit_emit.h new file mode 100644 index 0000000000..0d1b246f42 --- /dev/null +++ b/src/include/jit/llvmjit_emit.h @@ -0,0 +1,211 @@ +/* + * llvmjit_emit.h + * Helpers to make emitting LLVM IR a it more concise and pgindent proof. + * + * Copyright (c) 2018, PostgreSQL Global Development Group + * + * src/include/lib/llvmjit_emit.h + */ +#ifndef LLVMJIT_EMIT_H +#define LLVMJIT_EMIT_H + + +#include + + +/* + * Emit a non-LLVM pointer as an LLVM constant. + */ +static inline LLVMValueRef +l_ptr_const(void *ptr, LLVMTypeRef type) +{ + LLVMValueRef c = LLVMConstInt(TypeSizeT, (uintptr_t) ptr, false); + + return LLVMConstIntToPtr(c, type); +} + +/* + * Emit pointer. + */ +static inline LLVMTypeRef +l_ptr(LLVMTypeRef t) +{ + return LLVMPointerType(t, 0); +} + +/* + * Emit constant integer. + */ +static inline LLVMValueRef +l_int8_const(int8 i) +{ + return LLVMConstInt(LLVMInt8Type(), i, false); +} + +/* + * Emit constant integer. + */ +static inline LLVMValueRef +l_int16_const(int16 i) +{ + return LLVMConstInt(LLVMInt16Type(), i, false); +} + +/* + * Emit constant integer. + */ +static inline LLVMValueRef +l_int32_const(int32 i) +{ + return LLVMConstInt(LLVMInt32Type(), i, false); +} + +/* + * Emit constant integer. + */ +static inline LLVMValueRef +l_int64_const(int64 i) +{ + return LLVMConstInt(LLVMInt64Type(), i, false); +} + +/* + * Emit constant integer. + */ +static inline LLVMValueRef +l_sizet_const(size_t i) +{ + return LLVMConstInt(TypeSizeT, i, false); +} + +/* + * Emit constant boolean, as used for storage (e.g. global vars, structs). + */ +static inline LLVMValueRef +l_sbool_const(bool i) +{ + return LLVMConstInt(TypeStorageBool, (int) i, false); +} + +/* + * Emit constant boolean, as used for parameters (e.g. function parameters). + */ +static inline LLVMValueRef +l_pbool_const(bool i) +{ + return LLVMConstInt(TypeParamBool, (int) i, false); +} + +/* + * Load a pointer member idx from a struct. + */ +static inline LLVMValueRef +l_load_struct_gep(LLVMBuilderRef b, LLVMValueRef v, int32 idx, const char *name) +{ + LLVMValueRef v_ptr = LLVMBuildStructGEP(b, v, idx, ""); + + return LLVMBuildLoad(b, v_ptr, name); +} + +/* + * Load value of a pointer, after applying one index operation. + */ +static inline LLVMValueRef +l_load_gep1(LLVMBuilderRef b, LLVMValueRef v, LLVMValueRef idx, const char *name) +{ + LLVMValueRef v_ptr = LLVMBuildGEP(b, v, &idx, 1, ""); + + return LLVMBuildLoad(b, v_ptr, name); +} + +/* separate, because pg_attribute_printf(2, 3) can't appear in definition */ +static inline LLVMBasicBlockRef l_bb_before_v(LLVMBasicBlockRef r, const char *fmt,...) pg_attribute_printf(2, 3); + +/* + * Insert a new basic block, just before r, the name being determined by fmt + * and arguments. + */ +static inline LLVMBasicBlockRef +l_bb_before_v(LLVMBasicBlockRef r, const char *fmt,...) +{ + char buf[512]; + va_list args; + + va_start(args, fmt); + vsnprintf(buf, sizeof(buf), fmt, args); + va_end(args); + + return LLVMInsertBasicBlock(r, buf); +} + +/* separate, because pg_attribute_printf(2, 3) can't appear in definition */ +static inline LLVMBasicBlockRef l_bb_append_v(LLVMValueRef f, const char *fmt,...) pg_attribute_printf(2, 3); + +/* + * Insert a new basic block after previous basic blocks, the name being + * determined by fmt and arguments. + */ +static inline LLVMBasicBlockRef +l_bb_append_v(LLVMValueRef f, const char *fmt,...) +{ + char buf[512]; + va_list args; + + va_start(args, fmt); + vsnprintf(buf, sizeof(buf), fmt, args); + va_end(args); + + return LLVMAppendBasicBlock(f, buf); +} + +/* + * Mark a callsite as readonly. + */ +static inline void +l_callsite_ro(LLVMValueRef f) +{ + const char argname[] = "readonly"; + LLVMAttributeRef ref; + + ref = LLVMCreateStringAttribute(LLVMGetGlobalContext(), + argname, + sizeof(argname) - 1, + NULL, 0); + + LLVMAddCallSiteAttribute(f, LLVMAttributeFunctionIndex, ref); +} + +/* + * Mark a callsite as alwaysinline. + */ +static inline void +l_callsite_alwaysinline(LLVMValueRef f) +{ + const char argname[] = "alwaysinline"; + int id; + LLVMAttributeRef attr; + + id = LLVMGetEnumAttributeKindForName(argname, + sizeof(argname) - 1); + attr = LLVMCreateEnumAttribute(LLVMGetGlobalContext(), id, 0); + LLVMAddCallSiteAttribute(f, LLVMAttributeFunctionIndex, attr); +} + +/* + * Emit code to switch memory context. + */ +static inline LLVMValueRef +l_mcxt_switch(LLVMModuleRef mod, LLVMBuilderRef b, LLVMValueRef nc) +{ + const char *cmc = "CurrentMemoryContext"; + LLVMValueRef cur; + LLVMValueRef ret; + + if (!(cur = LLVMGetNamedGlobal(mod, cmc))) + cur = LLVMAddGlobal(mod, l_ptr(StructMemoryContextData), cmc); + ret = LLVMBuildLoad(b, cur, cmc); + LLVMBuildStore(b, nc, cur); + + return ret; +} +#endif diff --git a/src/include/lib/binaryheap.h b/src/include/lib/binaryheap.h index da7504bd55..9399e0d60b 100644 --- a/src/include/lib/binaryheap.h +++ b/src/include/lib/binaryheap.h @@ -3,7 +3,7 @@ * * A simple binary heap implementation * - * Portions Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group * * src/include/lib/binaryheap.h */ diff --git a/src/include/lib/bipartite_match.h b/src/include/lib/bipartite_match.h index 8f580bbd97..c184c0d38e 100644 --- a/src/include/lib/bipartite_match.h +++ b/src/include/lib/bipartite_match.h @@ -1,7 +1,7 @@ /* * bipartite_match.h * - * Copyright (c) 2015-2017, PostgreSQL Global Development Group + * Copyright (c) 2015-2018, PostgreSQL Global Development Group * * src/include/lib/bipartite_match.h */ diff --git a/src/include/lib/bloomfilter.h b/src/include/lib/bloomfilter.h new file mode 100644 index 0000000000..6cbdd9bfd9 --- /dev/null +++ b/src/include/lib/bloomfilter.h @@ -0,0 +1,27 @@ +/*------------------------------------------------------------------------- + * + * bloomfilter.h + * Space-efficient set membership testing + * + * Copyright (c) 2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/include/lib/bloomfilter.h + * + *------------------------------------------------------------------------- + */ +#ifndef BLOOMFILTER_H +#define BLOOMFILTER_H + +typedef struct bloom_filter bloom_filter; + +extern bloom_filter *bloom_create(int64 total_elems, int bloom_work_mem, + uint64 seed); +extern void bloom_free(bloom_filter *filter); +extern void bloom_add_element(bloom_filter *filter, unsigned char *elem, + size_t len); +extern bool bloom_lacks_element(bloom_filter *filter, unsigned char *elem, + size_t len); +extern double bloom_prop_bits_set(bloom_filter *filter); + +#endif /* BLOOMFILTER_H */ diff --git a/src/include/lib/dshash.h b/src/include/lib/dshash.h new file mode 100644 index 0000000000..8c733bfe25 --- /dev/null +++ b/src/include/lib/dshash.h @@ -0,0 +1,90 @@ +/*------------------------------------------------------------------------- + * + * dshash.h + * Concurrent hash tables backed by dynamic shared memory areas. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/include/lib/dshash.h + * + *------------------------------------------------------------------------- + */ +#ifndef DSHASH_H +#define DSHASH_H + +#include "utils/dsa.h" + +/* The opaque type representing a hash table. */ +struct dshash_table; +typedef struct dshash_table dshash_table; + +/* A handle for a dshash_table which can be shared with other processes. */ +typedef dsa_pointer dshash_table_handle; + +/* The type for hash values. */ +typedef uint32 dshash_hash; + +/* A function type for comparing keys. */ +typedef int (*dshash_compare_function) (const void *a, const void *b, + size_t size, void *arg); + +/* A function type for computing hash values for keys. */ +typedef dshash_hash (*dshash_hash_function) (const void *v, size_t size, + void *arg); + +/* + * The set of parameters needed to create or attach to a hash table. The + * members tranche_id and tranche_name do not need to be initialized when + * attaching to an existing hash table. + * + * Compare and hash functions must be supplied even when attaching, because we + * can't safely share function pointers between backends in general. Either + * the arg variants or the non-arg variants should be supplied; the other + * function pointers should be NULL. If the arg variants are supplied then the + * user data pointer supplied to the create and attach functions will be + * passed to the hash and compare functions. + */ +typedef struct dshash_parameters +{ + size_t key_size; /* Size of the key (initial bytes of entry) */ + size_t entry_size; /* Total size of entry */ + dshash_compare_function compare_function; /* Compare function */ + dshash_hash_function hash_function; /* Hash function */ + int tranche_id; /* The tranche ID to use for locks */ +} dshash_parameters; + +/* Forward declaration of private types for use only by dshash.c. */ +struct dshash_table_item; +typedef struct dshash_table_item dshash_table_item; + +/* Creating, sharing and destroying from hash tables. */ +extern dshash_table *dshash_create(dsa_area *area, + const dshash_parameters *params, + void *arg); +extern dshash_table *dshash_attach(dsa_area *area, + const dshash_parameters *params, + dshash_table_handle handle, + void *arg); +extern void dshash_detach(dshash_table *hash_table); +extern dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table); +extern void dshash_destroy(dshash_table *hash_table); + +/* Finding, creating, deleting entries. */ +extern void *dshash_find(dshash_table *hash_table, + const void *key, bool exclusive); +extern void *dshash_find_or_insert(dshash_table *hash_table, + const void *key, bool *found); +extern bool dshash_delete_key(dshash_table *hash_table, const void *key); +extern void dshash_delete_entry(dshash_table *hash_table, void *entry); +extern void dshash_release_lock(dshash_table *hash_table, void *entry); + +/* Convenience hash and compare functions wrapping memcmp and tag_hash. */ +extern int dshash_memcmp(const void *a, const void *b, size_t size, void *arg); +extern dshash_hash dshash_memhash(const void *v, size_t size, void *arg); + +/* Debugging support. */ +extern void dshash_dump(dshash_table *hash_table); + +#endif /* DSHASH_H */ diff --git a/src/include/lib/hyperloglog.h b/src/include/lib/hyperloglog.h index 7a249cd252..f735111f91 100644 --- a/src/include/lib/hyperloglog.h +++ b/src/include/lib/hyperloglog.h @@ -3,7 +3,7 @@ * * A simple HyperLogLog cardinality estimator implementation * - * Portions Copyright (c) 2014-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2014-2018, PostgreSQL Global Development Group * * Based on Hideaki Ohno's C++ implementation. The copyright terms of Ohno's * original version (the MIT license) follow. diff --git a/src/include/lib/ilist.h b/src/include/lib/ilist.h index e5ac5c218a..fc9d6b3ee4 100644 --- a/src/include/lib/ilist.h +++ b/src/include/lib/ilist.h @@ -96,7 +96,7 @@ * } * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/include/lib/knapsack.h b/src/include/lib/knapsack.h index 4485738e2a..9f17004d48 100644 --- a/src/include/lib/knapsack.h +++ b/src/include/lib/knapsack.h @@ -1,14 +1,13 @@ /* * knapsack.h * - * Copyright (c) 2017, PostgreSQL Global Development Group + * Copyright (c) 2017-2018, PostgreSQL Global Development Group * * src/include/lib/knapsack.h */ #ifndef KNAPSACK_H #define KNAPSACK_H -#include "postgres.h" #include "nodes/bitmapset.h" extern Bitmapset *DiscreteKnapsack(int max_weight, int num_items, diff --git a/src/include/lib/pairingheap.h b/src/include/lib/pairingheap.h index e3a75f51f7..9d3de79601 100644 --- a/src/include/lib/pairingheap.h +++ b/src/include/lib/pairingheap.h @@ -3,7 +3,7 @@ * * A Pairing Heap implementation * - * Portions Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group * * src/include/lib/pairingheap.h */ diff --git a/src/include/lib/rbtree.h b/src/include/lib/rbtree.h index a7183bb0b4..51e4ed321c 100644 --- a/src/include/lib/rbtree.h +++ b/src/include/lib/rbtree.h @@ -3,7 +3,7 @@ * rbtree.h * interface for PostgreSQL generic Red-Black binary tree package * - * Copyright (c) 2009-2017, PostgreSQL Global Development Group + * Copyright (c) 2009-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/include/lib/rbtree.h @@ -14,31 +14,29 @@ #define RBTREE_H /* - * RBNode is intended to be used as the first field of a larger struct, + * RBTNode is intended to be used as the first field of a larger struct, * whose additional fields carry whatever payload data the caller needs * for a tree entry. (The total size of that larger struct is passed to - * rb_create.) RBNode is declared here to support this usage, but + * rbt_create.) RBTNode is declared here to support this usage, but * callers must treat it as an opaque struct. */ -typedef struct RBNode +typedef struct RBTNode { char color; /* node's current color, red or black */ - struct RBNode *left; /* left child, or RBNIL if none */ - struct RBNode *right; /* right child, or RBNIL if none */ - struct RBNode *parent; /* parent, or NULL (not RBNIL!) if none */ -} RBNode; + struct RBTNode *left; /* left child, or RBTNIL if none */ + struct RBTNode *right; /* right child, or RBTNIL if none */ + struct RBTNode *parent; /* parent, or NULL (not RBTNIL!) if none */ +} RBTNode; /* Opaque struct representing a whole tree */ typedef struct RBTree RBTree; /* Available tree iteration orderings */ -typedef enum RBOrderControl +typedef enum RBTOrderControl { LeftRightWalk, /* inorder: left child, node, right child */ - RightLeftWalk, /* reverse inorder: right, node, left */ - DirectWalk, /* preorder: node, left child, right child */ - InvertedWalk /* postorder: left child, right child, node */ -} RBOrderControl; + RightLeftWalk /* reverse inorder: right, node, left */ +} RBTOrderControl; /* * RBTreeIterator holds state while traversing a tree. This is declared @@ -49,34 +47,33 @@ typedef struct RBTreeIterator RBTreeIterator; struct RBTreeIterator { - RBTree *rb; - RBNode *(*iterate) (RBTreeIterator *iter); - RBNode *last_visited; - char next_step; + RBTree *rbt; + RBTNode *(*iterate) (RBTreeIterator *iter); + RBTNode *last_visited; bool is_over; }; /* Support functions to be provided by caller */ -typedef int (*rb_comparator) (const RBNode *a, const RBNode *b, void *arg); -typedef void (*rb_combiner) (RBNode *existing, const RBNode *newdata, void *arg); -typedef RBNode *(*rb_allocfunc) (void *arg); -typedef void (*rb_freefunc) (RBNode *x, void *arg); +typedef int (*rbt_comparator) (const RBTNode *a, const RBTNode *b, void *arg); +typedef void (*rbt_combiner) (RBTNode *existing, const RBTNode *newdata, void *arg); +typedef RBTNode *(*rbt_allocfunc) (void *arg); +typedef void (*rbt_freefunc) (RBTNode *x, void *arg); -extern RBTree *rb_create(Size node_size, - rb_comparator comparator, - rb_combiner combiner, - rb_allocfunc allocfunc, - rb_freefunc freefunc, - void *arg); +extern RBTree *rbt_create(Size node_size, + rbt_comparator comparator, + rbt_combiner combiner, + rbt_allocfunc allocfunc, + rbt_freefunc freefunc, + void *arg); -extern RBNode *rb_find(RBTree *rb, const RBNode *data); -extern RBNode *rb_leftmost(RBTree *rb); +extern RBTNode *rbt_find(RBTree *rbt, const RBTNode *data); +extern RBTNode *rbt_leftmost(RBTree *rbt); -extern RBNode *rb_insert(RBTree *rb, const RBNode *data, bool *isNew); -extern void rb_delete(RBTree *rb, RBNode *node); +extern RBTNode *rbt_insert(RBTree *rbt, const RBTNode *data, bool *isNew); +extern void rbt_delete(RBTree *rbt, RBTNode *node); -extern void rb_begin_iterate(RBTree *rb, RBOrderControl ctrl, - RBTreeIterator *iter); -extern RBNode *rb_iterate(RBTreeIterator *iter); +extern void rbt_begin_iterate(RBTree *rbt, RBTOrderControl ctrl, + RBTreeIterator *iter); +extern RBTNode *rbt_iterate(RBTreeIterator *iter); #endif /* RBTREE_H */ diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h index c5af5b96a7..3b30c185cd 100644 --- a/src/include/lib/simplehash.h +++ b/src/include/lib/simplehash.h @@ -32,7 +32,7 @@ * - SH_STORE_HASH - if defined the hash is stored in the elements * - SH_GET_HASH(tb, a) - return the field to store the hash in * - * For examples of usage look at simplehash.c (file local definition) and + * For examples of usage look at tidbitmap.c (file local definition) and * execnodes.h/execGrouping.c (exposed declaration, file local * implementation). * @@ -65,8 +65,8 @@ /* type declarations */ #define SH_TYPE SH_MAKE_NAME(hash) #define SH_STATUS SH_MAKE_NAME(status) -#define SH_STATUS_EMPTY SH_MAKE_NAME(EMPTY) -#define SH_STATUS_IN_USE SH_MAKE_NAME(IN_USE) +#define SH_STATUS_EMPTY SH_MAKE_NAME(SH_EMPTY) +#define SH_STATUS_IN_USE SH_MAKE_NAME(SH_IN_USE) #define SH_ITERATOR SH_MAKE_NAME(iterator) /* function declarations */ @@ -174,6 +174,10 @@ SH_SCOPE void SH_STAT(SH_TYPE * tb); #ifndef SH_GROW_MAX_MOVE #define SH_GROW_MAX_MOVE 150 #endif +#ifndef SH_GROW_MIN_FILLFACTOR +/* but do not grow due to SH_GROW_MAX_* if below */ +#define SH_GROW_MIN_FILLFACTOR 0.1 +#endif #ifdef SH_STORE_HASH #define SH_COMPARE_KEYS(tb, ahash, akey, b) (ahash == SH_GET_HASH(tb, b) && SH_EQUAL(tb, b->SH_KEY, akey)) @@ -181,6 +185,16 @@ SH_SCOPE void SH_STAT(SH_TYPE * tb); #define SH_COMPARE_KEYS(tb, ahash, akey, b) (SH_EQUAL(tb, b->SH_KEY, akey)) #endif +/* + * Wrap the following definitions in include guards, to avoid multiple + * definition errors if this header is included more than once. The rest of + * the file deliberately has no include guards, because it can be included + * with different parameters to define functions and types with non-colliding + * names. + */ +#ifndef SIMPLEHASH_H +#define SIMPLEHASH_H + /* FIXME: can we move these to a central location? */ /* calculate ceil(log base 2) of num */ @@ -202,6 +216,8 @@ sh_pow2(uint64 num) return ((uint64) 1) << sh_log2(num); } +#endif + /* * Compute sizing parameters for hashtable. Called when creating and growing * the hashtable. @@ -574,9 +590,12 @@ SH_INSERT(SH_TYPE * tb, SH_KEY_TYPE key, bool *found) * hashtables, grow the hashtable if collisions would require * us to move a lot of entries. The most likely cause of such * imbalance is filling a (currently) small table, from a - * currently big one, in hash-table order. + * currently big one, in hash-table order. Don't grow if the + * hashtable would be too empty, to prevent quick space + * explosion for some weird edge cases. */ - if (++emptydist > SH_GROW_MAX_MOVE) + if (unlikely(++emptydist > SH_GROW_MAX_MOVE) && + ((double) tb->members / tb->size) >= SH_GROW_MIN_FILLFACTOR) { tb->grow_threshold = 0; goto restart; @@ -621,9 +640,12 @@ SH_INSERT(SH_TYPE * tb, SH_KEY_TYPE key, bool *found) * To avoid negative consequences from overly imbalanced hashtables, * grow the hashtable if collisions lead to large runs. The most * likely cause of such imbalance is filling a (currently) small - * table, from a currently big one, in hash-table order. + * table, from a currently big one, in hash-table order. Don't grow + * if the hashtable would be too empty, to prevent quick space + * explosion for some weird edge cases. */ - if (insertdist > SH_GROW_MAX_DIB) + if (unlikely(insertdist > SH_GROW_MAX_DIB) && + ((double) tb->members / tb->size) >= SH_GROW_MIN_FILLFACTOR) { tb->grow_threshold = 0; goto restart; @@ -914,6 +936,7 @@ SH_STAT(SH_TYPE * tb) #undef SH_GET_HASH #undef SH_STORE_HASH #undef SH_USE_NONDEFAULT_ALLOCATOR +#undef SH_EQUAL /* undefine locally declared macros */ #undef SH_MAKE_PREFIX @@ -923,6 +946,7 @@ SH_STAT(SH_TYPE * tb) #undef SH_MAX_FILLFACTOR #undef SH_GROW_MAX_DIB #undef SH_GROW_MAX_MOVE +#undef SH_GROW_MIN_FILLFACTOR #undef SH_MAX_SIZE /* types */ diff --git a/src/include/lib/stringinfo.h b/src/include/lib/stringinfo.h index 9694ea3f21..8551237fc6 100644 --- a/src/include/lib/stringinfo.h +++ b/src/include/lib/stringinfo.h @@ -7,7 +7,7 @@ * It can be used to buffer either ordinary C strings (null-terminated text) * or arbitrary binary data. All storage is allocated with palloc(). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/lib/stringinfo.h @@ -143,6 +143,14 @@ extern void appendStringInfoSpaces(StringInfo str, int count); extern void appendBinaryStringInfo(StringInfo str, const char *data, int datalen); +/*------------------------ + * appendBinaryStringInfoNT + * Append arbitrary binary data to a StringInfo, allocating more space + * if necessary. Does not ensure a trailing null-byte exists. + */ +extern void appendBinaryStringInfoNT(StringInfo str, + const char *data, int datalen); + /*------------------------ * enlargeStringInfo * Make sure a StringInfo's buffer can hold at least 'needed' more bytes. diff --git a/src/include/libpq/auth.h b/src/include/libpq/auth.h index 871cc03add..e8a1dc14ff 100644 --- a/src/include/libpq/auth.h +++ b/src/include/libpq/auth.h @@ -4,7 +4,7 @@ * Definitions for network authentication routines * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/libpq/auth.h diff --git a/src/include/libpq/be-fsstubs.h b/src/include/libpq/be-fsstubs.h index 96bcaa0f08..ed31e54323 100644 --- a/src/include/libpq/be-fsstubs.h +++ b/src/include/libpq/be-fsstubs.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/libpq/be-fsstubs.h @@ -14,11 +14,6 @@ #ifndef BE_FSSTUBS_H #define BE_FSSTUBS_H -/* - * compatibility option for access control - */ -extern bool lo_compat_privileges; - /* * These are not fmgr-callable, but are available to C code. * Probably these should have had the underscore-free names, diff --git a/src/include/libpq/crypt.h b/src/include/libpq/crypt.h index 9bad67c890..4279a4a9ed 100644 --- a/src/include/libpq/crypt.h +++ b/src/include/libpq/crypt.h @@ -3,7 +3,7 @@ * crypt.h * Interface to libpq/crypt.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/libpq/crypt.h diff --git a/src/include/libpq/hba.h b/src/include/libpq/hba.h index 07d92d4f9f..5f68f4c666 100644 --- a/src/include/libpq/hba.h +++ b/src/include/libpq/hba.h @@ -75,11 +75,13 @@ typedef struct HbaLine char *pamservice; bool pam_use_hostname; bool ldaptls; + char *ldapscheme; char *ldapserver; int ldapport; char *ldapbinddn; char *ldapbindpasswd; char *ldapsearchattribute; + char *ldapsearchfilter; char *ldapbasedn; int ldapscope; char *ldapprefix; diff --git a/src/include/libpq/ifaddr.h b/src/include/libpq/ifaddr.h index be19ff8823..1d35597a7f 100644 --- a/src/include/libpq/ifaddr.h +++ b/src/include/libpq/ifaddr.h @@ -3,7 +3,7 @@ * ifaddr.h * IP netmask calculations, and enumerating network interfaces. * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * src/include/libpq/ifaddr.h * diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h index 7bde744d51..b2c59df54e 100644 --- a/src/include/libpq/libpq-be.h +++ b/src/include/libpq/libpq-be.h @@ -8,7 +8,7 @@ * Structs that need to be client-visible are in pqcomm.h. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/libpq/libpq-be.h @@ -139,16 +139,16 @@ typedef struct Port List *guc_options; /* - * Information that needs to be held during the authentication cycle. + * The startup packet application name, only used here for the "connection + * authorized" log message. We shouldn't use this post-startup, instead + * the GUC should be used as application can change it afterward. */ - HbaLine *hba; + char *application_name; /* - * Information that really has no business at all being in struct Port, - * but since it gets used by elog.c in the same way as database_name and - * other members of this struct, we may as well keep it here. + * Information that needs to be held during the authentication cycle. */ - TimestampTz SessionStartTime; /* backend start time */ + HbaLine *hba; /* * TCP keepalive settings. @@ -193,24 +193,90 @@ typedef struct Port } Port; #ifdef USE_SSL +/* + * Hardcoded DH parameters, used in ephemeral DH keying. (See also + * README.SSL for more details on EDH.) + * + * If you want to create your own hardcoded DH parameters + * for fun and profit, review "Assigned Number for SKIP + * Protocols" (http://www.skip-vpn.org/spec/numbers.html) + * for suggestions. + */ +#define FILE_DH2048 \ +"-----BEGIN DH PARAMETERS-----\n\ +MIIBCAKCAQEA9kJXtwh/CBdyorrWqULzBej5UxE5T7bxbrlLOCDaAadWoxTpj0BV\n\ +89AHxstDqZSt90xkhkn4DIO9ZekX1KHTUPj1WV/cdlJPPT2N286Z4VeSWc39uK50\n\ +T8X8dryDxUcwYc58yWb/Ffm7/ZFexwGq01uejaClcjrUGvC/RgBYK+X0iP1YTknb\n\ +zSC0neSRBzZrM2w4DUUdD3yIsxx8Wy2O9vPJI8BD8KVbGI2Ou1WMuF040zT9fBdX\n\ +Q6MdGGzeMyEstSr/POGxKUAYEY18hKcKctaGxAMZyAcpesqVDNmWn6vQClCbAkbT\n\ +CD1mpF1Bn5x8vYlLIhkmuquiXsNV6TILOwIBAg==\n\ +-----END DH PARAMETERS-----\n" + /* * These functions are implemented by the glue code specific to each * SSL implementation (e.g. be-secure-openssl.c) */ + +/* + * Initialize global SSL context. + * + * If isServerStart is true, report any errors as FATAL (so we don't return). + * Otherwise, log errors at LOG level and return -1 to indicate trouble, + * preserving the old SSL state if any. Returns 0 if OK. + */ extern int be_tls_init(bool isServerStart); + +/* + * Destroy global SSL context, if any. + */ extern void be_tls_destroy(void); + +/* + * Attempt to negotiate SSL connection. + */ extern int be_tls_open_server(Port *port); + +/* + * Close SSL connection. + */ extern void be_tls_close(Port *port); + +/* + * Read data from a secure connection. + */ extern ssize_t be_tls_read(Port *port, void *ptr, size_t len, int *waitfor); + +/* + * Write data to a secure connection. + */ extern ssize_t be_tls_write(Port *port, void *ptr, size_t len, int *waitfor); +/* + * Return information about the SSL connection. + */ extern int be_tls_get_cipher_bits(Port *port); extern bool be_tls_get_compression(Port *port); -extern void be_tls_get_version(Port *port, char *ptr, size_t len); -extern void be_tls_get_cipher(Port *port, char *ptr, size_t len); +extern const char *be_tls_get_version(Port *port); +extern const char *be_tls_get_cipher(Port *port); extern void be_tls_get_peerdn_name(Port *port, char *ptr, size_t len); + +/* + * Get the server certificate hash for SCRAM channel binding type + * tls-server-end-point. + * + * The result is a palloc'd hash of the server certificate with its + * size, and NULL if there is no certificate available. + * + * This is not supported with old versions of OpenSSL that don't have + * the X509_get_signature_nid() function. + */ +#if defined(USE_OPENSSL) && defined(HAVE_X509_GET_SIGNATURE_NID) +#define HAVE_BE_TLS_GET_CERTIFICATE_HASH +extern char *be_tls_get_certificate_hash(Port *port, size_t *len); #endif +#endif /* USE_SSL */ + extern ProtocolVersion FrontendProtocol; /* TCP keepalives configuration. These are no-ops on an AF_UNIX socket. */ diff --git a/src/include/libpq/libpq-fs.h b/src/include/libpq/libpq-fs.h index ce4b2a1892..e63d11ef19 100644 --- a/src/include/libpq/libpq-fs.h +++ b/src/include/libpq/libpq-fs.h @@ -4,7 +4,7 @@ * definitions for using Inversion file system routines (ie, large objects) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/libpq/libpq-fs.h diff --git a/src/include/libpq/libpq.h b/src/include/libpq/libpq.h index fd2dd5853c..c7762f68a6 100644 --- a/src/include/libpq/libpq.h +++ b/src/include/libpq/libpq.h @@ -4,7 +4,7 @@ * POSTGRES LIBPQ buffer structure definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/libpq/libpq.h @@ -33,7 +33,7 @@ typedef struct void (*endcopyout) (bool errorAbort); } PQcommMethods; -extern PGDLLIMPORT PQcommMethods *PqCommMethods; +extern const PGDLLIMPORT PQcommMethods *PqCommMethods; #define pq_comm_reset() (PqCommMethods->comm_reset()) #define pq_flush() (PqCommMethods->flush()) @@ -75,11 +75,14 @@ extern int pq_putbytes(const char *s, size_t len); /* * prototypes for functions in be-secure.c */ +extern char *ssl_library; extern char *ssl_cert_file; extern char *ssl_key_file; extern char *ssl_ca_file; extern char *ssl_crl_file; extern char *ssl_dh_params_file; +extern char *ssl_passphrase_command; +extern bool ssl_passphrase_command_supports_reload; extern int secure_initialize(bool isServerStart); extern bool secure_loaded_verify_locations(void); @@ -100,4 +103,12 @@ extern char *SSLCipherSuites; extern char *SSLECDHCurve; extern bool SSLPreferServerCiphers; +/* + * prototypes for functions in be-secure-common.c + */ +extern int run_ssl_passphrase_command(const char *prompt, bool is_server_start, + char *buf, int size); +extern bool check_ssl_key_file_permissions(const char *ssl_key_file, + bool isServerStart); + #endif /* LIBPQ_H */ diff --git a/src/include/libpq/pqcomm.h b/src/include/libpq/pqcomm.h index 10c7434c41..cc0e0b32c7 100644 --- a/src/include/libpq/pqcomm.h +++ b/src/include/libpq/pqcomm.h @@ -6,7 +6,7 @@ * NOTE: for historical reasons, this does not correspond to pqcomm.c. * pqcomm.c's routines are declared in libpq.h. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/libpq/pqcomm.h diff --git a/src/include/libpq/pqformat.h b/src/include/libpq/pqformat.h index 32112547a0..f0337325bb 100644 --- a/src/include/libpq/pqformat.h +++ b/src/include/libpq/pqformat.h @@ -3,7 +3,7 @@ * pqformat.h * Definitions for formatting and parsing frontend/backend messages * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/libpq/pqformat.h @@ -14,20 +14,180 @@ #define PQFORMAT_H #include "lib/stringinfo.h" +#include "mb/pg_wchar.h" +#include "port/pg_bswap.h" extern void pq_beginmessage(StringInfo buf, char msgtype); -extern void pq_sendbyte(StringInfo buf, int byt); +extern void pq_beginmessage_reuse(StringInfo buf, char msgtype); +extern void pq_endmessage(StringInfo buf); +extern void pq_endmessage_reuse(StringInfo buf); + extern void pq_sendbytes(StringInfo buf, const char *data, int datalen); extern void pq_sendcountedtext(StringInfo buf, const char *str, int slen, bool countincludesself); extern void pq_sendtext(StringInfo buf, const char *str, int slen); extern void pq_sendstring(StringInfo buf, const char *str); extern void pq_send_ascii_string(StringInfo buf, const char *str); -extern void pq_sendint(StringInfo buf, int i, int b); -extern void pq_sendint64(StringInfo buf, int64 i); extern void pq_sendfloat4(StringInfo buf, float4 f); extern void pq_sendfloat8(StringInfo buf, float8 f); -extern void pq_endmessage(StringInfo buf); + +/* + * Append a [u]int8 to a StringInfo buffer, which already has enough space + * preallocated. + * + * The use of pg_restrict allows the compiler to optimize the code based on + * the assumption that buf, buf->len, buf->data and *buf->data don't + * overlap. Without the annotation buf->len etc cannot be kept in a register + * over subsequent pq_writeintN calls. + * + * The use of StringInfoData * rather than StringInfo is due to MSVC being + * overly picky and demanding a * before a restrict. + */ +static inline void +pq_writeint8(StringInfoData *pg_restrict buf, uint8 i) +{ + uint8 ni = i; + + Assert(buf->len + (int) sizeof(uint8) <= buf->maxlen); + memcpy((char *pg_restrict) (buf->data + buf->len), &ni, sizeof(uint8)); + buf->len += sizeof(uint8); +} + +/* + * Append a [u]int16 to a StringInfo buffer, which already has enough space + * preallocated. + */ +static inline void +pq_writeint16(StringInfoData *pg_restrict buf, uint16 i) +{ + uint16 ni = pg_hton16(i); + + Assert(buf->len + (int) sizeof(uint16) <= buf->maxlen); + memcpy((char *pg_restrict) (buf->data + buf->len), &ni, sizeof(uint16)); + buf->len += sizeof(uint16); +} + +/* + * Append a [u]int32 to a StringInfo buffer, which already has enough space + * preallocated. + */ +static inline void +pq_writeint32(StringInfoData *pg_restrict buf, uint32 i) +{ + uint32 ni = pg_hton32(i); + + Assert(buf->len + (int) sizeof(uint32) <= buf->maxlen); + memcpy((char *pg_restrict) (buf->data + buf->len), &ni, sizeof(uint32)); + buf->len += sizeof(uint32); +} + +/* + * Append a [u]int64 to a StringInfo buffer, which already has enough space + * preallocated. + */ +static inline void +pq_writeint64(StringInfoData *pg_restrict buf, uint64 i) +{ + uint64 ni = pg_hton64(i); + + Assert(buf->len + (int) sizeof(uint64) <= buf->maxlen); + memcpy((char *pg_restrict) (buf->data + buf->len), &ni, sizeof(uint64)); + buf->len += sizeof(uint64); +} + +/* + * Append a null-terminated text string (with conversion) to a buffer with + * preallocated space. + * + * NB: The pre-allocated space needs to be sufficient for the string after + * converting to client encoding. + * + * NB: passed text string must be null-terminated, and so is the data + * sent to the frontend. + */ +static inline void +pq_writestring(StringInfoData *pg_restrict buf, const char *pg_restrict str) +{ + int slen = strlen(str); + char *p; + + p = pg_server_to_client(str, slen); + if (p != str) /* actual conversion has been done? */ + slen = strlen(p); + + Assert(buf->len + slen + 1 <= buf->maxlen); + + memcpy(((char *pg_restrict) buf->data + buf->len), p, slen + 1); + buf->len += slen + 1; + + if (p != str) + pfree(p); +} + +/* append a binary [u]int8 to a StringInfo buffer */ +static inline void +pq_sendint8(StringInfo buf, uint8 i) +{ + enlargeStringInfo(buf, sizeof(uint8)); + pq_writeint8(buf, i); +} + +/* append a binary [u]int16 to a StringInfo buffer */ +static inline void +pq_sendint16(StringInfo buf, uint16 i) +{ + enlargeStringInfo(buf, sizeof(uint16)); + pq_writeint16(buf, i); +} + +/* append a binary [u]int32 to a StringInfo buffer */ +static inline void +pq_sendint32(StringInfo buf, uint32 i) +{ + enlargeStringInfo(buf, sizeof(uint32)); + pq_writeint32(buf, i); +} + +/* append a binary [u]int64 to a StringInfo buffer */ +static inline void +pq_sendint64(StringInfo buf, uint64 i) +{ + enlargeStringInfo(buf, sizeof(uint64)); + pq_writeint64(buf, i); +} + +/* append a binary byte to a StringInfo buffer */ +static inline void +pq_sendbyte(StringInfo buf, uint8 byt) +{ + pq_sendint8(buf, byt); +} + +/* + * Append a binary integer to a StringInfo buffer + * + * This function is deprecated; prefer use of the functions above. + */ +static inline void +pq_sendint(StringInfo buf, uint32 i, int b) +{ + switch (b) + { + case 1: + pq_sendint8(buf, (uint8) i); + break; + case 2: + pq_sendint16(buf, (uint16) i); + break; + case 4: + pq_sendint32(buf, (uint32) i); + break; + default: + elog(ERROR, "unsupported integer size %d", b); + break; + } +} + extern void pq_begintypsend(StringInfo buf); extern bytea *pq_endtypsend(StringInfo buf); diff --git a/src/include/libpq/pqmq.h b/src/include/libpq/pqmq.h index 86436d6753..e273656fde 100644 --- a/src/include/libpq/pqmq.h +++ b/src/include/libpq/pqmq.h @@ -3,7 +3,7 @@ * pqmq.h * Use the frontend/backend protocol for communication over a shm_mq * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/libpq/pqmq.h diff --git a/src/include/libpq/pqsignal.h b/src/include/libpq/pqsignal.h index af4e61ba4d..f292591dfc 100644 --- a/src/include/libpq/pqsignal.h +++ b/src/include/libpq/pqsignal.h @@ -3,7 +3,7 @@ * pqsignal.h * Backend signal(2) support (see also src/port/pqsignal.c) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/libpq/pqsignal.h diff --git a/src/include/libpq/scram.h b/src/include/libpq/scram.h index 0166e1945d..f7865ca5fc 100644 --- a/src/include/libpq/scram.h +++ b/src/include/libpq/scram.h @@ -3,7 +3,7 @@ * scram.h * Interface to libpq/scram.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/libpq/scram.h @@ -13,8 +13,8 @@ #ifndef PG_SCRAM_H #define PG_SCRAM_H -/* Name of SCRAM-SHA-256 per IANA */ -#define SCRAM_SHA256_NAME "SCRAM-SHA-256" +#include "lib/stringinfo.h" +#include "libpq/libpq-be.h" /* Status codes for message exchange */ #define SASL_EXCHANGE_CONTINUE 0 @@ -22,7 +22,8 @@ #define SASL_EXCHANGE_FAILURE 2 /* Routines dedicated to authentication */ -extern void *pg_be_scram_init(const char *username, const char *shadow_pass); +extern void pg_be_scram_get_mechanisms(Port *port, StringInfo buf); +extern void *pg_be_scram_init(Port *port, const char *selected_mech, const char *shadow_pass); extern int pg_be_scram_exchange(void *opaq, char *input, int inputlen, char **output, int *outputlen, char **logdetail); diff --git a/src/include/mb/pg_wchar.h b/src/include/mb/pg_wchar.h index d57ef017cb..748db5ba5f 100644 --- a/src/include/mb/pg_wchar.h +++ b/src/include/mb/pg_wchar.h @@ -3,7 +3,7 @@ * pg_wchar.h * multibyte-character support * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/mb/pg_wchar.h @@ -304,6 +304,17 @@ typedef enum pg_enc /* On FE are possible all encodings */ #define PG_VALID_FE_ENCODING(_enc) PG_VALID_ENCODING(_enc) +/* + * When converting strings between different encodings, we assume that space + * for converted result is 4-to-1 growth in the worst case. The rate for + * currently supported encoding pairs are within 3 (SJIS JIS X0201 half width + * kanna -> UTF8 is the worst case). So "4" should be enough for the moment. + * + * Note that this is not the same as the maximum character width in any + * particular encoding. + */ +#define MAX_CONVERSION_GROWTH 4 + /* * Table for mapping an encoding number to official encoding name and * possibly other subsidiary data. Be careful to check encoding number @@ -399,7 +410,7 @@ extern const pg_wchar_tbl pg_wchar_table[]; * points to a lookup table for the second byte. And so on. * * Physically, all the trees are stored in one big array, in 'chars16' or - * 'chars32', depending on the maximum value that needs to be reprented. For + * 'chars32', depending on the maximum value that needs to be represented. For * each level in each tree, we also store lower and upper bound of allowed * values - values outside those bounds are considered invalid, and are left * out of the tables. diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index dad98de98d..d6b32c070c 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -10,7 +10,7 @@ * Over time, this has also become the preferred place for widely known * resource-limitation stuff, such as work_mem and check_stack_depth(). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/miscadmin.h @@ -25,6 +25,7 @@ #include +#include "datatype/timestamp.h" /* for TimestampTZ */ #include "pgtime.h" /* for pg_time_t */ @@ -77,13 +78,13 @@ /* in globals.c */ /* these are marked volatile because they are set by signal handlers: */ -extern PGDLLIMPORT volatile bool InterruptPending; -extern PGDLLIMPORT volatile bool QueryCancelPending; -extern PGDLLIMPORT volatile bool ProcDiePending; -extern PGDLLIMPORT volatile bool IdleInTransactionSessionTimeoutPending; +extern PGDLLIMPORT volatile sig_atomic_t InterruptPending; +extern PGDLLIMPORT volatile sig_atomic_t QueryCancelPending; +extern PGDLLIMPORT volatile sig_atomic_t ProcDiePending; +extern PGDLLIMPORT volatile sig_atomic_t IdleInTransactionSessionTimeoutPending; extern PGDLLIMPORT volatile sig_atomic_t ConfigReloadPending; -extern volatile bool ClientConnectionLost; +extern PGDLLIMPORT volatile sig_atomic_t ClientConnectionLost; /* these are marked volatile because they are examined by signal handlers: */ extern PGDLLIMPORT volatile uint32 InterruptHoldoffCount; @@ -150,18 +151,20 @@ extern PGDLLIMPORT bool IsUnderPostmaster; extern PGDLLIMPORT bool IsBackgroundWorker; extern PGDLLIMPORT bool IsBinaryUpgrade; -extern bool ExitOnAnyError; +extern PGDLLIMPORT bool ExitOnAnyError; extern PGDLLIMPORT char *DataDir; +extern PGDLLIMPORT int data_directory_mode; extern PGDLLIMPORT int NBuffers; -extern int MaxBackends; -extern int MaxConnections; -extern int max_worker_processes; -extern int max_parallel_workers; +extern PGDLLIMPORT int MaxBackends; +extern PGDLLIMPORT int MaxConnections; +extern PGDLLIMPORT int max_worker_processes; +extern PGDLLIMPORT int max_parallel_workers; extern PGDLLIMPORT int MyProcPid; extern PGDLLIMPORT pg_time_t MyStartTime; +extern PGDLLIMPORT TimestampTz MyStartTimestamp; extern PGDLLIMPORT struct Port *MyProcPort; extern PGDLLIMPORT struct Latch *MyLatch; extern int32 MyCancelKey; @@ -238,10 +241,10 @@ extern PGDLLIMPORT int IntervalStyle; #define MAXTZLEN 10 /* max TZ name len, not counting tr. null */ extern bool enableFsync; -extern bool allowSystemTableMods; +extern PGDLLIMPORT bool allowSystemTableMods; extern PGDLLIMPORT int work_mem; extern PGDLLIMPORT int maintenance_work_mem; -extern PGDLLIMPORT int replacement_sort_tuples; +extern PGDLLIMPORT int max_parallel_maintenance_workers; extern int VacuumCostPageHit; extern int VacuumCostPageMiss; @@ -256,6 +259,8 @@ extern int VacuumPageDirty; extern int VacuumCostBalance; extern bool VacuumCostActive; +extern double vacuum_cleanup_index_scale_factor; + /* in tcop/postgres.c */ @@ -321,6 +326,7 @@ extern void SetSessionAuthorization(Oid userid, bool is_superuser); extern Oid GetCurrentRoleId(void); extern void SetCurrentRoleId(Oid roleid, bool is_superuser); +extern void checkDataDir(void); extern void SetDataDir(const char *dir); extern void ChangeToDataDir(void); @@ -419,7 +425,7 @@ extern AuxProcType MyAuxProcType; extern void pg_split_opts(char **argv, int *argcp, const char *optstr); extern void InitializeMaxBackends(void); extern void InitPostgres(const char *in_dbname, Oid dboid, const char *username, - Oid useroid, char *out_dbname); + Oid useroid, char *out_dbname, bool override_allow_connections); extern void BaseInit(void); /* in utils/init/miscinit.c */ diff --git a/src/include/nodes/bitmapset.h b/src/include/nodes/bitmapset.h index aa3fb253c2..b6f1a9e6e5 100644 --- a/src/include/nodes/bitmapset.h +++ b/src/include/nodes/bitmapset.h @@ -11,7 +11,7 @@ * bms_is_empty() in preference to testing for NULL.) * * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * src/include/nodes/bitmapset.h * @@ -65,6 +65,7 @@ typedef enum extern Bitmapset *bms_copy(const Bitmapset *a); extern bool bms_equal(const Bitmapset *a, const Bitmapset *b); +extern int bms_compare(const Bitmapset *a, const Bitmapset *b); extern Bitmapset *bms_make_singleton(int x); extern void bms_free(Bitmapset *a); @@ -90,6 +91,7 @@ extern bool bms_is_empty(const Bitmapset *a); extern Bitmapset *bms_add_member(Bitmapset *a, int x); extern Bitmapset *bms_del_member(Bitmapset *a, int x); extern Bitmapset *bms_add_members(Bitmapset *a, const Bitmapset *b); +extern Bitmapset *bms_add_range(Bitmapset *a, int lower, int upper); extern Bitmapset *bms_int_members(Bitmapset *a, const Bitmapset *b); extern Bitmapset *bms_del_members(Bitmapset *a, const Bitmapset *b); extern Bitmapset *bms_join(Bitmapset *a, Bitmapset *b); @@ -97,6 +99,7 @@ extern Bitmapset *bms_join(Bitmapset *a, Bitmapset *b); /* support for iterating through the integer elements of a set: */ extern int bms_first_member(Bitmapset *a); extern int bms_next_member(const Bitmapset *a, int prevbit); +extern int bms_prev_member(const Bitmapset *a, int prevbit); /* support for hashtables using Bitmapsets as keys: */ extern uint32 bms_hash_value(const Bitmapset *a); diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 577499465d..18544566f7 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -4,7 +4,7 @@ * definitions for executor state nodes * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/execnodes.h @@ -24,6 +24,7 @@ #include "utils/hsearch.h" #include "utils/queryenvironment.h" #include "utils/reltrigger.h" +#include "utils/sharedtuplestore.h" #include "utils/sortsupport.h" #include "utils/tuplestore.h" #include "utils/tuplesort.h" @@ -31,6 +32,15 @@ #include "storage/condition_variable.h" +struct PlanState; /* forward references in this file */ +struct ParallelHashJoinState; +struct ExecRowMark; +struct ExprState; +struct ExprContext; +struct RangeTblEntry; /* avoid including parsenodes.h here */ +struct ExprEvalStep; /* avoid including execExpr.h everywhere */ + + /* ---------------- * ExprState node * @@ -38,10 +48,6 @@ * It contains instructions (in ->steps) to evaluate the expression. * ---------------- */ -struct ExprState; /* forward references in this file */ -struct ExprContext; -struct ExprEvalStep; /* avoid including execExpr.h everywhere */ - typedef Datum (*ExprStateEvalFunc) (struct ExprState *expression, struct ExprContext *econtext, bool *isNull); @@ -60,12 +66,15 @@ typedef struct ExprState * Storage for result value of a scalar expression, or for individual * column results within expressions built by ExecBuildProjectionInfo(). */ +#define FIELDNO_EXPRSTATE_RESNULL 2 bool resnull; +#define FIELDNO_EXPRSTATE_RESVALUE 3 Datum resvalue; /* * If projecting a tuple result, this slot holds the result; else NULL. */ +#define FIELDNO_EXPRSTATE_RESULTSLOT 4 TupleTableSlot *resultslot; /* @@ -82,13 +91,20 @@ typedef struct ExprState /* original expression tree, for debugging only */ Expr *expr; + /* private state for an evalfunc */ + void *evalfunc_private; + /* - * XXX: following only needed during "compilation", could be thrown away. + * XXX: following fields only needed during "compilation" (ExecInitExpr); + * could be thrown away afterwards. */ int steps_len; /* number of steps currently */ int steps_alloc; /* allocated length of steps array */ + struct PlanState *parent; /* parent PlanState node, if any */ + ParamListInfo ext_params; /* for compiling PARAM_EXTERN nodes */ + Datum *innermost_caseval; bool *innermost_casenull; @@ -104,9 +120,11 @@ typedef struct ExprState * entries for a particular index. Used for both index_build and * retail creation of index entries. * - * NumIndexAttrs number of columns in this index - * KeyAttrNumbers underlying-rel attribute numbers used as keys - * (zeroes indicate expressions) + * NumIndexAttrs total number of columns in this index + * NumIndexKeyAttrs number of key columns in index + * IndexAttrNumbers underlying-rel attribute numbers used as keys + * (zeroes indicate expressions). It also contains + * info about included columns. * Expressions expr trees for expression entries, or NIL if none * ExpressionsState exec state for expressions, or NIL if none * Predicate partial-index predicate, or NIL if none @@ -121,18 +139,21 @@ typedef struct ExprState * ReadyForInserts is it valid for inserts? * Concurrent are we doing a concurrent index build? * BrokenHotChain did we detect any broken HOT chains? + * ParallelWorkers # of workers requested (excludes leader) + * Am Oid of index AM * AmCache private cache area for index AM * Context memory context holding this IndexInfo * - * ii_Concurrent and ii_BrokenHotChain are used only during index build; - * they're conventionally set to false otherwise. + * ii_Concurrent, ii_BrokenHotChain, and ii_ParallelWorkers are used only + * during index build; they're conventionally zeroed otherwise. * ---------------- */ typedef struct IndexInfo { NodeTag type; - int ii_NumIndexAttrs; - AttrNumber ii_KeyAttrNumbers[INDEX_MAX_KEYS]; + int ii_NumIndexAttrs; /* total number of columns in index */ + int ii_NumIndexKeyAttrs; /* number of key columns in index */ + AttrNumber ii_IndexAttrNumbers[INDEX_MAX_KEYS]; List *ii_Expressions; /* list of Expr */ List *ii_ExpressionsState; /* list of ExprState */ List *ii_Predicate; /* list of Expr */ @@ -147,6 +168,8 @@ typedef struct IndexInfo bool ii_ReadyForInserts; bool ii_Concurrent; bool ii_BrokenHotChain; + int ii_ParallelWorkers; + Oid ii_Am; void *ii_AmCache; MemoryContext ii_Context; } IndexInfo; @@ -194,8 +217,11 @@ typedef struct ExprContext NodeTag type; /* Tuples that Var nodes in expression may refer to */ +#define FIELDNO_EXPRCONTEXT_SCANTUPLE 1 TupleTableSlot *ecxt_scantuple; +#define FIELDNO_EXPRCONTEXT_INNERTUPLE 2 TupleTableSlot *ecxt_innertuple; +#define FIELDNO_EXPRCONTEXT_OUTERTUPLE 3 TupleTableSlot *ecxt_outertuple; /* Memory contexts for expression evaluation --- see notes above */ @@ -210,15 +236,21 @@ typedef struct ExprContext * Values to substitute for Aggref nodes in the expressions of an Agg * node, or for WindowFunc nodes within a WindowAgg node. */ +#define FIELDNO_EXPRCONTEXT_AGGVALUES 8 Datum *ecxt_aggvalues; /* precomputed values for aggs/windowfuncs */ +#define FIELDNO_EXPRCONTEXT_AGGNULLS 9 bool *ecxt_aggnulls; /* null flags for aggs/windowfuncs */ /* Value to substitute for CaseTestExpr nodes in expression */ +#define FIELDNO_EXPRCONTEXT_CASEDATUM 10 Datum caseValue_datum; +#define FIELDNO_EXPRCONTEXT_CASENULL 11 bool caseValue_isNull; /* Value to substitute for CoerceToDomainValue nodes in expression */ +#define FIELDNO_EXPRCONTEXT_DOMAINDATUM 12 Datum domainValue_datum; +#define FIELDNO_EXPRCONTEXT_DOMAINNULL 13 bool domainValue_isNull; /* Link to containing EState (NULL if a standalone ExprContext) */ @@ -336,18 +368,39 @@ typedef struct JunkFilter AttrNumber jf_junkAttNo; } JunkFilter; +/* + * OnConflictSetState + * + * Executor state of an ON CONFLICT DO UPDATE operation. + */ +typedef struct OnConflictSetState +{ + NodeTag type; + + ProjectionInfo *oc_ProjInfo; /* for ON CONFLICT DO UPDATE SET */ + TupleDesc oc_ProjTupdesc; /* TupleDesc for the above projection */ + ExprState *oc_WhereClause; /* state for the WHERE clause */ +} OnConflictSetState; + /* * ResultRelInfo * * Whenever we update an existing relation, we have to update indexes on the * relation, and perhaps also fire triggers. ResultRelInfo holds all the * information needed about a result relation, including indexes. + * + * Normally, a ResultRelInfo refers to a table that is in the query's + * range table; then ri_RangeTableIndex is the RT index and ri_RelationDesc + * is just a copy of the relevant es_relations[] entry. But sometimes, + * in ResultRelInfos used only for triggers, ri_RangeTableIndex is zero + * and ri_RelationDesc is a separately-opened relcache pointer that needs + * to be separately closed. See ExecGetTriggerResultRel. */ typedef struct ResultRelInfo { NodeTag type; - /* result relation's range table index */ + /* result relation's range table index, or 0 if not in range table */ Index ri_RangeTableIndex; /* relation descriptor for result relation */ @@ -395,14 +448,17 @@ typedef struct ResultRelInfo /* for removing junk attributes from tuples */ JunkFilter *ri_junkFilter; + /* list of RETURNING expressions */ + List *ri_returningList; + /* for computing a RETURNING list */ ProjectionInfo *ri_projectReturning; - /* for computing ON CONFLICT DO UPDATE SET */ - ProjectionInfo *ri_onConflictSetProj; + /* list of arbiter indexes to use to check conflicts */ + List *ri_onConflictArbiterIndexes; - /* list of ON CONFLICT DO UPDATE exprs (qual) */ - ExprState *ri_onConflictSetWhere; + /* ON CONFLICT evaluation state */ + OnConflictSetState *ri_onConflict; /* partition check expression */ List *ri_PartitionCheck; @@ -412,6 +468,9 @@ typedef struct ResultRelInfo /* relation descriptor for root partitioned table */ Relation ri_PartitionRoot; + + /* true if ready for tuple routing */ + bool ri_PartitionReadyForRouting; } ResultRelInfo; /* ---------------- @@ -429,6 +488,12 @@ typedef struct EState Snapshot es_snapshot; /* time qual to use */ Snapshot es_crosscheck_snapshot; /* crosscheck time qual for RI */ List *es_range_table; /* List of RangeTblEntry */ + struct RangeTblEntry **es_range_table_array; /* equivalent array */ + Index es_range_table_size; /* size of the range table arrays */ + Relation *es_relations; /* Array of per-range-table-entry Relation + * pointers, or NULL if not yet opened */ + struct ExecRowMark **es_rowmarks; /* Array of per-range-table-entry + * ExecRowMarks, or NULL if none */ PlannedStmt *es_plannedstmt; /* link to top of plan tree */ const char *es_sourceText; /* Source text from QueryDesc */ @@ -443,15 +508,20 @@ typedef struct EState ResultRelInfo *es_result_relation_info; /* currently active array elt */ /* - * Info about the target partitioned target table root(s) for - * update/delete queries. They required only to fire any per-statement - * triggers defined on the table. It exists separately from - * es_result_relations, because partitioned tables don't appear in the - * plan tree for the update/delete cases. + * Info about the partition root table(s) for insert/update/delete queries + * targeting partitioned tables. Only leaf partitions are mentioned in + * es_result_relations, but we need access to the roots for firing + * triggers and for runtime tuple routing. */ ResultRelInfo *es_root_result_relations; /* array of ResultRelInfos */ int es_num_root_result_relations; /* length of the array */ + /* + * The following list contains ResultRelInfos created by the tuple routing + * code for partitions that don't already have one. + */ + List *es_tuple_routing_result_relations; + /* Stuff used for firing triggers: */ List *es_trig_target_relations; /* trigger-only ResultRelInfos */ TupleTableSlot *es_trig_tuple_slot; /* for trigger output tuples */ @@ -469,8 +539,6 @@ typedef struct EState List *es_tupleTable; /* List of TupleTableSlots */ - List *es_rowMarks; /* List of ExecRowMarks */ - uint64 es_processed; /* # of tuples processed */ Oid es_lastoid; /* last oid processed (by INSERT) */ @@ -498,14 +566,29 @@ typedef struct EState * return, or NULL if nothing to return; es_epqTupleSet[] is true if a * particular array entry is valid; and es_epqScanDone[] is state to * remember if the tuple has been returned already. Arrays are of size - * list_length(es_range_table) and are indexed by scan node scanrelid - 1. + * es_range_table_size and are indexed by scan node scanrelid - 1. */ HeapTuple *es_epqTuple; /* array of EPQ substitute tuples */ bool *es_epqTupleSet; /* true if EPQ tuple is provided */ bool *es_epqScanDone; /* true if EPQ tuple has been fetched */ + bool es_use_parallel_mode; /* can we use parallel workers? */ + /* The per-query shared memory area to use for parallel execution. */ struct dsa_area *es_query_dsa; + + /* + * JIT information. es_jit_flags indicates whether JIT should be performed + * and with which options. es_jit is created on-demand when JITing is + * performed. + * + * es_jit_combined_instr is the the combined, on demand allocated, + * instrumentation from all workers. The leader's instrumentation is kept + * separate, and is combined on demand by ExplainPrintJITSummary(). + */ + int es_jit_flags; + struct JitContext *es_jit; + struct JitInstrumentation *es_jit_worker_instr; } EState; @@ -524,7 +607,9 @@ typedef struct EState * node that sources the relation (e.g., for a foreign table the FDW can use * ermExtra to hold information). * - * EState->es_rowMarks is a list of these structs. + * EState->es_rowmarks is an array of these structs, indexed by RT index, + * with NULLs for irrelevant RT indexes. es_rowmarks itself is NULL if + * there are no rowmarks. */ typedef struct ExecRowMark { @@ -546,7 +631,7 @@ typedef struct ExecRowMark * additional runtime representation of FOR [KEY] UPDATE/SHARE clauses * * Each LockRows and ModifyTable node keeps a list of the rowmarks it needs to - * deal with. In addition to a pointer to the related entry in es_rowMarks, + * deal with. In addition to a pointer to the related entry in es_rowmarks, * this struct carries the column number(s) of the resjunk columns associated * with the rowmark (see comments for PlanRowMark for more detail). In the * case of ModifyTable, there has to be a separate ExecAuxRowMark list for @@ -555,7 +640,7 @@ typedef struct ExecRowMark */ typedef struct ExecAuxRowMark { - ExecRowMark *rowmark; /* related entry in es_rowMarks */ + ExecRowMark *rowmark; /* related entry in es_rowmarks */ AttrNumber ctidAttNo; /* resno of ctid junk attribute, if any */ AttrNumber toidAttNo; /* resno of tableoid junk attribute, if any */ AttrNumber wholeAttNo; /* resno of whole-row junk attribute, if any */ @@ -572,10 +657,10 @@ typedef struct ExecAuxRowMark * Normally these are the only functions used, but FindTupleHashEntry() * supports searching a hashtable using cross-data-type hashing. For that, * the caller must supply hash functions for the LHS datatype as well as - * the cross-type equality operators to use. in_hash_funcs and cur_eq_funcs + * the cross-type equality operators to use. in_hash_funcs and cur_eq_func * are set to point to the caller's function arrays while doing such a search. * During LookupTupleHashEntry(), they point to tab_hash_funcs and - * tab_eq_funcs respectively. + * tab_eq_func respectively. * ---------------------------------------------------------------- */ typedef struct TupleHashEntryData *TupleHashEntry; @@ -603,7 +688,7 @@ typedef struct TupleHashTableData int numCols; /* number of columns in lookup key */ AttrNumber *keyColIdx; /* attr numbers of key columns */ FmgrInfo *tab_hash_funcs; /* hash functions for table datatype(s) */ - FmgrInfo *tab_eq_funcs; /* equality functions for table datatype(s) */ + ExprState *tab_eq_func; /* comparator for table datatype(s) */ MemoryContext tablecxt; /* memory context containing table */ MemoryContext tempcxt; /* context for function evaluations */ Size entrysize; /* actual size to make each hash entry */ @@ -611,8 +696,9 @@ typedef struct TupleHashTableData /* The following fields are set transiently for each table search: */ TupleTableSlot *inputslot; /* current input tuple's slot */ FmgrInfo *in_hash_funcs; /* hash functions for input datatype(s) */ - FmgrInfo *cur_eq_funcs; /* equality functions for input vs. table */ + ExprState *cur_eq_func; /* comparator for input vs. table */ uint32 hash_iv; /* hash-function IV */ + ExprContext *exprcontext; /* expression context */ } TupleHashTableData; typedef tuplehash_iterator TupleHashIterator; @@ -759,20 +845,24 @@ typedef struct SubPlanState HeapTuple curTuple; /* copy of most recent tuple from subplan */ Datum curArray; /* most recent array from ARRAY() subplan */ /* these are used when hashing the subselect's output: */ + TupleDesc descRight; /* subselect desc after projection */ ProjectionInfo *projLeft; /* for projecting lefthand exprs */ ProjectionInfo *projRight; /* for projecting subselect output */ TupleHashTable hashtable; /* hash table for no-nulls subselect rows */ TupleHashTable hashnulls; /* hash table for rows with null(s) */ - bool havehashrows; /* TRUE if hashtable is not empty */ - bool havenullrows; /* TRUE if hashnulls is not empty */ + bool havehashrows; /* true if hashtable is not empty */ + bool havenullrows; /* true if hashnulls is not empty */ MemoryContext hashtablecxt; /* memory context containing hash tables */ MemoryContext hashtempcxt; /* temp memory context for hash tables */ ExprContext *innerecontext; /* econtext for computing inner tuples */ AttrNumber *keyColIdx; /* control data for hash tables */ + Oid *tab_eq_funcoids; /* equality func oids for table + * datatype(s) */ FmgrInfo *tab_hash_funcs; /* hash functions for table datatype(s) */ FmgrInfo *tab_eq_funcs; /* equality functions for table datatype(s) */ FmgrInfo *lhs_hash_funcs; /* hash functions for lefthand datatype(s) */ FmgrInfo *cur_eq_funcs; /* equality functions for LHS vs. table */ + ExprState *cur_eq_comp; /* equality comparator for LHS vs. table */ } SubPlanState; /* ---------------- @@ -818,8 +908,6 @@ typedef struct DomainConstraintState * ---------------------------------------------------------------- */ -struct PlanState; - /* ---------------- * ExecProcNodeMtd * @@ -854,6 +942,9 @@ typedef struct PlanState Instrumentation *instrument; /* Optional runtime stats for this node */ WorkerInstrumentation *worker_instrument; /* per-worker instrumentation */ + /* Per-worker JIT instrumentation */ + struct SharedJitInstrumentation *worker_jit_instrument; + /* * Common structural data for all Plan types. These links to subsidiary * state trees parallel links in the associated plan tree (except for the @@ -862,6 +953,7 @@ typedef struct PlanState ExprState *qual; /* boolean qual condition */ struct PlanState *lefttree; /* input plan tree(s) */ struct PlanState *righttree; + List *initPlan; /* Init SubPlanState nodes (un-correlated expr * subselects) */ List *subPlan; /* SubPlanState nodes in my expressions */ @@ -874,9 +966,17 @@ typedef struct PlanState /* * Other run-time state needed by most if not all node types. */ + TupleDesc ps_ResultTupleDesc; /* node's return type */ TupleTableSlot *ps_ResultTupleSlot; /* slot for my result tuples */ ExprContext *ps_ExprContext; /* node's expression-evaluation context */ ProjectionInfo *ps_ProjInfo; /* info for doing tuple projection */ + + /* + * Scanslot's descriptor if known. This is a bit of a hack, but otherwise + * it's hard for expression compilation to optimize based on the + * descriptor, without encoding knowledge about all executor nodes. + */ + TupleDesc scandesc; } PlanState; /* ---------------- @@ -890,6 +990,11 @@ typedef struct PlanState #define outerPlanState(node) (((PlanState *)(node))->lefttree) /* Macros for inline access to certain instrumentation counters */ +#define InstrCountTuples2(node, delta) \ + do { \ + if (((PlanState *)(node))->instrument) \ + ((PlanState *)(node))->instrument->ntuples2 += (delta); \ + } while (0) #define InstrCountFiltered1(node, delta) \ do { \ if (((PlanState *)(node))->instrument) \ @@ -943,6 +1048,7 @@ typedef struct ProjectSetState ExprDoneCond *elemdone; /* array of per-SRF is-done states */ int nelems; /* length of elemdone[] array */ bool pending_srf_tuples; /* still evaluating srfs in tlist? */ + MemoryContext argcontext; /* context for SRF arguments */ } ProjectSetState; /* ---------------- @@ -964,41 +1070,56 @@ typedef struct ModifyTableState List **mt_arowmarks; /* per-subplan ExecAuxRowMark lists */ EPQState mt_epqstate; /* for evaluating EvalPlanQual rechecks */ bool fireBSTriggers; /* do we need to fire stmt triggers? */ - OnConflictAction mt_onconflict; /* ON CONFLICT type */ - List *mt_arbiterindexes; /* unique index OIDs to arbitrate taking - * alt path */ TupleTableSlot *mt_existing; /* slot to store existing target tuple in */ List *mt_excludedtlist; /* the excluded pseudo relation's tlist */ TupleTableSlot *mt_conflproj; /* CONFLICT ... SET ... projection target */ - struct PartitionDispatchData **mt_partition_dispatch_info; + /* Tuple-routing support info */ - int mt_num_dispatch; /* Number of entries in the above array */ - int mt_num_partitions; /* Number of members in the following - * arrays */ - ResultRelInfo *mt_partitions; /* Per partition result relation */ - TupleConversionMap **mt_partition_tupconv_maps; - /* Per partition tuple conversion map */ - TupleTableSlot *mt_partition_tuple_slot; + struct PartitionTupleRouting *mt_partition_tuple_routing; + + /* controls transition table population for specified operation */ struct TransitionCaptureState *mt_transition_capture; - /* controls transition table population */ - TupleConversionMap **mt_transition_tupconv_maps; - /* Per plan/partition tuple conversion */ + + /* controls transition table population for INSERT...ON CONFLICT UPDATE */ + struct TransitionCaptureState *mt_oc_transition_capture; + + /* Per plan map for tuple conversion from child to root */ + TupleConversionMap **mt_per_subplan_tupconv_maps; } ModifyTableState; /* ---------------- * AppendState information * - * nplans how many plans are in the array - * whichplan which plan is being executed (0 .. n-1) + * nplans how many plans are in the array + * whichplan which plan is being executed (0 .. n-1), or a + * special negative value. See nodeAppend.c. + * pruningstate details required to allow partitions to be + * eliminated from the scan, or NULL if not possible. + * valid_subplans for runtime pruning, valid appendplans indexes to + * scan. * ---------------- */ -typedef struct AppendState + +struct AppendState; +typedef struct AppendState AppendState; +struct ParallelAppendState; +typedef struct ParallelAppendState ParallelAppendState; +struct PartitionPruneState; + +struct AppendState { PlanState ps; /* its first field is NodeTag */ PlanState **appendplans; /* array of PlanStates for my inputs */ int as_nplans; int as_whichplan; -} AppendState; + int as_first_partial_plan; /* Index of 'appendplans' containing + * the first partial plan */ + ParallelAppendState *as_pstate; /* parallel coordination info */ + Size pstate_len; /* size of parallel coordination info */ + struct PartitionPruneState *as_prune_state; + Bitmapset *as_valid_subplans; + bool (*choose_next_subplan) (AppendState *); +}; /* ---------------- * MergeAppendState information @@ -1009,6 +1130,12 @@ typedef struct AppendState * slots current output tuple of each subplan * heap heap of active tuples * initialized true if we have fetched first tuple from each subplan + * noopscan true if partition pruning proved that none of the + * mergeplans can contain a record to satisfy this query. + * prune_state details required to allow partitions to be + * eliminated from the scan, or NULL if not possible. + * valid_subplans for runtime pruning, valid mergeplans indexes to + * scan. * ---------------- */ typedef struct MergeAppendState @@ -1021,6 +1148,9 @@ typedef struct MergeAppendState TupleTableSlot **ms_slots; /* array of length ms_nplans */ struct binaryheap *ms_heap; /* binary heap of slot indices */ bool ms_initialized; /* are subplans started? */ + bool ms_noopscan; + struct PartitionPruneState *ms_prune_state; + Bitmapset *ms_valid_subplans; } MergeAppendState; /* ---------------- @@ -1042,7 +1172,7 @@ typedef struct RecursiveUnionState Tuplestorestate *working_table; Tuplestorestate *intermediate_table; /* Remaining fields are unused in UNION ALL case */ - FmgrInfo *eqfunctions; /* per-grouping-field equality fns */ + Oid *eqfuncoids; /* per-grouping-field equality fns */ FmgrInfo *hashfunctions; /* per-grouping-field hash fns */ MemoryContext tempContext; /* short-term context for comparisons */ TupleHashTable hashtable; /* hash table for tuples already seen */ @@ -1216,7 +1346,6 @@ typedef struct IndexScanState * RelationDesc index relation descriptor * ScanDesc index scan descriptor * VMBuffer buffer in use for visibility map testing, if any - * HeapFetches number of tuples we were forced to fetch from heap * ioss_PscanLen Size of parallel index-only scan descriptor * ---------------- */ @@ -1235,7 +1364,6 @@ typedef struct IndexOnlyScanState Relation ioss_RelationDesc; IndexScanDesc ioss_ScanDesc; Buffer ioss_VMBuffer; - long ioss_HeapFetches; Size ioss_PscanLen; } IndexOnlyScanState; @@ -1323,6 +1451,10 @@ typedef struct ParallelBitmapHeapState * tbm bitmap obtained from child index scan(s) * tbmiterator iterator for scanning current pages * tbmres current-page data + * can_skip_fetch can we potentially skip tuple fetches in this scan? + * skip_fetch are we skipping tuple fetches on this page? + * vmbuffer buffer for visibility-map lookups + * pvmbuffer ditto, for prefetched pages * exact_pages total number of exact pages retrieved * lossy_pages total number of lossy pages retrieved * prefetch_iterator iterator for prefetching ahead of current page @@ -1343,6 +1475,10 @@ typedef struct BitmapHeapScanState TIDBitmap *tbm; TBMIterator *tbmiterator; TBMIterateResult *tbmres; + bool can_skip_fetch; + bool skip_fetch; + Buffer vmbuffer; + Buffer pvmbuffer; long exact_pages; long lossy_pages; TBMIterator *prefetch_iterator; @@ -1460,15 +1596,15 @@ typedef struct TableFuncScanState ExprState *rowexpr; /* state for row-generating expression */ List *colexprs; /* state for column-generating expression */ List *coldefexprs; /* state for column default expressions */ - List *ns_names; /* list of str nodes with namespace names */ - List *ns_uris; /* list of states of namespace uri exprs */ + List *ns_names; /* same as TableFunc.ns_names */ + List *ns_uris; /* list of states of namespace URI exprs */ Bitmapset *notnulls; /* nullability flag for each output column */ void *opaque; /* table builder private space */ const struct TableFuncRoutine *routine; /* table builder methods */ FmgrInfo *in_functions; /* input function for each column */ Oid *typioparams; /* typioparam for each column */ int64 ordinal; /* row number to be output next */ - MemoryContext perValueCxt; /* short life context for value evaluation */ + MemoryContext perTableCxt; /* per-table context */ Tuplestorestate *tupstore; /* output tuple store */ } TableFuncScanState; @@ -1727,6 +1863,16 @@ typedef struct MaterialState Tuplestorestate *tuplestorestate; } MaterialState; +/* ---------------- + * Shared memory container for per-worker sort information + * ---------------- + */ +typedef struct SharedSortInfo +{ + int num_workers; + TuplesortInstrumentation sinstrument[FLEXIBLE_ARRAY_MEMBER]; +} SharedSortInfo; + /* ---------------- * SortState information * ---------------- @@ -1741,6 +1887,8 @@ typedef struct SortState bool bounded_Done; /* value of bounded we did the sort with */ int64 bound_Done; /* value of bound we did the sort with */ void *tuplesortstate; /* private state of tuplesort.c */ + bool am_worker; /* are we a worker? */ + SharedSortInfo *shared_info; /* one entry per worker */ } SortState; /* --------------------- @@ -1750,7 +1898,7 @@ typedef struct SortState typedef struct GroupState { ScanState ss; /* its first field is NodeTag */ - FmgrInfo *eqfunctions; /* per-field lookup data for equality fns */ + ExprState *eqfunction; /* equality function */ bool grp_done; /* indicates completion of Group scan */ } GroupState; @@ -1789,11 +1937,15 @@ typedef struct AggState ExprContext *hashcontext; /* econtexts for long-lived data (hashtable) */ ExprContext **aggcontexts; /* econtexts for long-lived data (per GS) */ ExprContext *tmpcontext; /* econtext for input expressions */ +#define FIELDNO_AGGSTATE_CURAGGCONTEXT 14 ExprContext *curaggcontext; /* currently active aggcontext */ - AggStatePerTrans curpertrans; /* currently active trans state */ + AggStatePerAgg curperagg; /* currently active aggregate, if any */ +#define FIELDNO_AGGSTATE_CURPERTRANS 16 + AggStatePerTrans curpertrans; /* currently active trans state, if any */ bool input_done; /* indicates end of input */ bool agg_done; /* indicates completion of Agg scan */ int projected_set; /* The last projected grouping set */ +#define FIELDNO_AGGSTATE_CURRENT_SET 20 int current_set; /* The current grouping set being evaluated */ Bitmapset *grouped_cols; /* grouped cols in current projection */ List *all_grouped_cols; /* list of all grouped cols in DESC order */ @@ -1804,17 +1956,21 @@ typedef struct AggState Tuplesortstate *sort_out; /* input is copied here for next phase */ TupleTableSlot *sort_slot; /* slot for sort results */ /* these fields are used in AGG_PLAIN and AGG_SORTED modes: */ - AggStatePerGroup pergroup; /* per-Aggref-per-group working state */ + AggStatePerGroup *pergroups; /* grouping set indexed array of per-group + * pointers */ HeapTuple grp_firstTuple; /* copy of first tuple of current group */ /* these fields are used in AGG_HASHED and AGG_MIXED modes: */ bool table_filled; /* hash table filled yet? */ int num_hashes; - AggStatePerHash perhash; - AggStatePerGroup *hash_pergroup; /* array of per-group pointers */ - /* support for evaluation of agg inputs */ - TupleTableSlot *evalslot; /* slot for agg inputs */ - ProjectionInfo *evalproj; /* projection machinery */ - TupleDesc evaldesc; /* descriptor of input tuples */ + AggStatePerHash perhash; /* array of per-hashtable data */ + AggStatePerGroup *hash_pergroup; /* grouping set indexed array of + * per-group pointers */ + + /* support for evaluation of agg input expressions: */ +#define FIELDNO_AGGSTATE_ALL_PERGROUPS 34 + AggStatePerGroup *all_pergroups; /* array of first ->pergroups, than + * ->hash_pergroup */ + ProjectionInfo *combinedproj; /* projection machinery */ } AggState; /* ---------------- @@ -1836,14 +1992,17 @@ typedef struct WindowAggState WindowStatePerFunc perfunc; /* per-window-function information */ WindowStatePerAgg peragg; /* per-plain-aggregate information */ - FmgrInfo *partEqfunctions; /* equality funcs for partition columns */ - FmgrInfo *ordEqfunctions; /* equality funcs for ordering columns */ + ExprState *partEqfunction; /* equality funcs for partition columns */ + ExprState *ordEqfunction; /* equality funcs for ordering columns */ Tuplestorestate *buffer; /* stores rows of current partition */ - int current_ptr; /* read pointer # for current */ + int current_ptr; /* read pointer # for current row */ + int framehead_ptr; /* read pointer # for frame head, if used */ + int frametail_ptr; /* read pointer # for frame tail, if used */ + int grouptail_ptr; /* read pointer # for group tail, if used */ int64 spooled_rows; /* total # of rows in buffer */ int64 currentpos; /* position of current row in partition */ int64 frameheadpos; /* current frame head position */ - int64 frametailpos; /* current frame tail position */ + int64 frametailpos; /* current frame tail position (frame end+1) */ /* use struct pointer to avoid including windowapi.h here */ struct WindowObjectData *agg_winobj; /* winobj for aggregate fetches */ int64 aggregatedbase; /* start row for current aggregates */ @@ -1855,6 +2014,20 @@ typedef struct WindowAggState Datum startOffsetValue; /* result of startOffset evaluation */ Datum endOffsetValue; /* result of endOffset evaluation */ + /* these fields are used with RANGE offset PRECEDING/FOLLOWING: */ + FmgrInfo startInRangeFunc; /* in_range function for startOffset */ + FmgrInfo endInRangeFunc; /* in_range function for endOffset */ + Oid inRangeColl; /* collation for in_range tests */ + bool inRangeAsc; /* use ASC sort order for in_range tests? */ + bool inRangeNullsFirst; /* nulls sort first for in_range tests? */ + + /* these fields are used in GROUPS mode: */ + int64 currentgroup; /* peer group # of current row in partition */ + int64 frameheadgroup; /* peer group # of frame head row */ + int64 frametailgroup; /* peer group # of frame tail row */ + int64 groupheadpos; /* current row's peer group head position */ + int64 grouptailpos; /* " " " " tail position (group end+1) */ + MemoryContext partcontext; /* context for partition-lifespan data */ MemoryContext aggcontext; /* shared context for aggregate working data */ MemoryContext curaggcontext; /* current aggregate's working data */ @@ -1870,9 +2043,13 @@ typedef struct WindowAggState * date for current row */ bool frametail_valid; /* true if frametailpos is known up to * date for current row */ + bool grouptail_valid; /* true if grouptailpos is known up to + * date for current row */ TupleTableSlot *first_part_slot; /* first tuple of current or next * partition */ + TupleTableSlot *framehead_slot; /* first tuple of current frame */ + TupleTableSlot *frametail_slot; /* first tuple after current frame */ /* temporary slots for tuples fetched back from tuplestore */ TupleTableSlot *agg_row_slot; @@ -1894,8 +2071,7 @@ typedef struct WindowAggState typedef struct UniqueState { PlanState ps; /* its first field is NodeTag */ - FmgrInfo *eqfunctions; /* per-field lookup data for equality fns */ - MemoryContext tempContext; /* short-term context for comparisons */ + ExprState *eqfunction; /* tuple equality qual */ } UniqueState; /* ---------------- @@ -1908,14 +2084,17 @@ typedef struct UniqueState typedef struct GatherState { PlanState ps; /* its first field is NodeTag */ - bool initialized; - struct ParallelExecutorInfo *pei; - int nreaders; - int nextreader; - int nworkers_launched; - struct TupleQueueReader **reader; + bool initialized; /* workers launched? */ + bool need_to_scan_locally; /* need to read from local plan? */ + int64 tuples_needed; /* tuple bound, see ExecSetTupleBound */ + /* these fields are set up once: */ TupleTableSlot *funnel_slot; - bool need_to_scan_locally; + struct ParallelExecutorInfo *pei; + /* all remaining fields are reinitialized during a rescan: */ + int nworkers_launched; /* original number of workers */ + int nreaders; /* number of still-active workers */ + int nextreader; /* next one to try to read from */ + struct TupleQueueReader **reader; /* array with nreaders active entries */ } GatherState; /* ---------------- @@ -1926,26 +2105,53 @@ typedef struct GatherState * merge the results into a single sorted stream. * ---------------- */ -struct GMReaderTuple; +struct GMReaderTupleBuffer; /* private in nodeGatherMerge.c */ typedef struct GatherMergeState { PlanState ps; /* its first field is NodeTag */ - bool initialized; + bool initialized; /* workers launched? */ + bool gm_initialized; /* gather_merge_init() done? */ + bool need_to_scan_locally; /* need to read from local plan? */ + int64 tuples_needed; /* tuple bound, see ExecSetTupleBound */ + /* these fields are set up once: */ + TupleDesc tupDesc; /* descriptor for subplan result tuples */ + int gm_nkeys; /* number of sort columns */ + SortSupport gm_sortkeys; /* array of length gm_nkeys */ struct ParallelExecutorInfo *pei; - int nreaders; - int nworkers_launched; - struct TupleQueueReader **reader; - TupleDesc tupDesc; - TupleTableSlot **gm_slots; + /* all remaining fields are reinitialized during a rescan */ + /* (but the arrays are not reallocated, just cleared) */ + int nworkers_launched; /* original number of workers */ + int nreaders; /* number of active workers */ + TupleTableSlot **gm_slots; /* array with nreaders+1 entries */ + struct TupleQueueReader **reader; /* array with nreaders active entries */ + struct GMReaderTupleBuffer *gm_tuple_buffers; /* nreaders tuple buffers */ struct binaryheap *gm_heap; /* binary heap of slot indices */ - bool gm_initialized; /* gather merge initilized ? */ - bool need_to_scan_locally; - int gm_nkeys; - SortSupport gm_sortkeys; /* array of length ms_nkeys */ - struct GMReaderTupleBuffer *gm_tuple_buffers; /* tuple buffer per reader */ } GatherMergeState; +/* ---------------- + * Values displayed by EXPLAIN ANALYZE + * ---------------- + */ +typedef struct HashInstrumentation +{ + int nbuckets; /* number of buckets at end of execution */ + int nbuckets_original; /* planned number of buckets */ + int nbatch; /* number of batches at end of execution */ + int nbatch_original; /* planned number of batches */ + size_t space_peak; /* speak memory usage in bytes */ +} HashInstrumentation; + +/* ---------------- + * Shared memory container for per-worker hash information + * ---------------- + */ +typedef struct SharedHashInfo +{ + int num_workers; + HashInstrumentation hinstrument[FLEXIBLE_ARRAY_MEMBER]; +} SharedHashInfo; + /* ---------------- * HashState information * ---------------- @@ -1956,6 +2162,12 @@ typedef struct HashState HashJoinTable hashtable; /* hash table for the hashjoin */ List *hashkeys; /* list of ExprState nodes */ /* hashkeys is same as parent's hj_InnerHashKeys */ + + SharedHashInfo *shared_info; /* one entry per worker */ + HashInstrumentation *hinstrument; /* this worker's entry */ + + /* Parallel hash state. */ + struct ParallelHashJoinState *parallel_state; } HashState; /* ---------------- @@ -1973,11 +2185,11 @@ typedef struct SetOpStatePerGroupData *SetOpStatePerGroup; typedef struct SetOpState { PlanState ps; /* its first field is NodeTag */ - FmgrInfo *eqfunctions; /* per-grouping-field equality fns */ + ExprState *eqfunction; /* equality comparator */ + Oid *eqfuncoids; /* per-grouping-field equality fns */ FmgrInfo *hashfunctions; /* per-grouping-field hash fns */ bool setop_done; /* indicates completion of output scan */ long numOutput; /* number of dups left to output */ - MemoryContext tempContext; /* short-term context for comparisons */ /* these fields are used in SETOP_SORTED mode: */ SetOpStatePerGroup pergroup; /* per-group working state */ HeapTuple grp_firstTuple; /* copy of first tuple of current group */ diff --git a/src/include/nodes/extensible.h b/src/include/nodes/extensible.h index 7325bf536a..3f909fb459 100644 --- a/src/include/nodes/extensible.h +++ b/src/include/nodes/extensible.h @@ -4,7 +4,7 @@ * Definitions for extensible nodes and custom scans * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/extensible.h @@ -96,6 +96,9 @@ typedef struct CustomPathMethods List *tlist, List *clauses, List *custom_plans); + struct List *(*ReparameterizeCustomPathByChild) (PlannerInfo *root, + List *custom_private, + RelOptInfo *child_rel); } CustomPathMethods; /* @@ -136,6 +139,9 @@ typedef struct CustomExecMethods void (*InitializeDSMCustomScan) (CustomScanState *node, ParallelContext *pcxt, void *coordinate); + void (*ReInitializeDSMCustomScan) (CustomScanState *node, + ParallelContext *pcxt, + void *coordinate); void (*InitializeWorkerCustomScan) (CustomScanState *node, shm_toc *toc, void *coordinate); diff --git a/src/include/nodes/lockoptions.h b/src/include/nodes/lockoptions.h index e0981dac6f..24afd6efd4 100644 --- a/src/include/nodes/lockoptions.h +++ b/src/include/nodes/lockoptions.h @@ -4,7 +4,7 @@ * Common header for some locking-related declarations. * * - * Copyright (c) 2014-2017, PostgreSQL Global Development Group + * Copyright (c) 2014-2018, PostgreSQL Global Development Group * * src/include/nodes/lockoptions.h * diff --git a/src/include/nodes/makefuncs.h b/src/include/nodes/makefuncs.h index 46a79b1817..57bd52ff24 100644 --- a/src/include/nodes/makefuncs.h +++ b/src/include/nodes/makefuncs.h @@ -4,7 +4,7 @@ * prototypes for the creator functions (for primitive nodes) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/makefuncs.h @@ -86,4 +86,6 @@ extern DefElem *makeDefElemExtended(char *nameSpace, char *name, Node *arg, extern GroupingSet *makeGroupingSet(GroupingSetKind kind, List *content, int location); +extern VacuumRelation *makeVacuumRelation(RangeVar *relation, Oid oid, List *va_cols); + #endif /* MAKEFUNC_H */ diff --git a/src/include/nodes/memnodes.h b/src/include/nodes/memnodes.h index 7a0c6763df..2a8d83f3d0 100644 --- a/src/include/nodes/memnodes.h +++ b/src/include/nodes/memnodes.h @@ -4,7 +4,7 @@ * POSTGRES memory context node definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/memnodes.h @@ -39,30 +39,33 @@ typedef struct MemoryContextCounters * A logical context in which memory allocations occur. * * MemoryContext itself is an abstract type that can have multiple - * implementations, though for now we have only AllocSetContext. + * implementations. * The function pointers in MemoryContextMethods define one specific * implementation of MemoryContext --- they are a virtual function table * in C++ terms. * * Node types that are actual implementations of memory contexts must - * begin with the same fields as MemoryContext. + * begin with the same fields as MemoryContextData. * * Note: for largely historical reasons, typedef MemoryContext is a pointer * to the context struct rather than the struct type itself. */ +typedef void (*MemoryStatsPrintFunc) (MemoryContext context, void *passthru, + const char *stats_string); + typedef struct MemoryContextMethods { void *(*alloc) (MemoryContext context, Size size); /* call this free_p in case someone #define's free() */ void (*free_p) (MemoryContext context, void *pointer); void *(*realloc) (MemoryContext context, void *pointer, Size size); - void (*init) (MemoryContext context); void (*reset) (MemoryContext context); void (*delete_context) (MemoryContext context); Size (*get_chunk_space) (MemoryContext context, void *pointer); bool (*is_empty) (MemoryContext context); - void (*stats) (MemoryContext context, int level, bool print, + void (*stats) (MemoryContext context, + MemoryStatsPrintFunc printfunc, void *passthru, MemoryContextCounters *totals); #ifdef MEMORY_CONTEXT_CHECKING void (*check) (MemoryContext context); @@ -76,12 +79,13 @@ typedef struct MemoryContextData /* these two fields are placed here to minimize alignment wastage: */ bool isReset; /* T = no space alloced since last reset */ bool allowInCritSection; /* allow palloc in critical section */ - MemoryContextMethods *methods; /* virtual function table */ + const MemoryContextMethods *methods; /* virtual function table */ MemoryContext parent; /* NULL if no parent (toplevel context) */ MemoryContext firstchild; /* head of linked list of children */ MemoryContext prevchild; /* previous child of same parent */ MemoryContext nextchild; /* next child of same parent */ - char *name; /* context name (just for debugging) */ + const char *name; /* context name (just for debugging) */ + const char *ident; /* context ID if any (just for debugging) */ MemoryContextCallback *reset_cbs; /* list of reset/delete callbacks */ } MemoryContextData; @@ -96,6 +100,8 @@ typedef struct MemoryContextData */ #define MemoryContextIsValid(context) \ ((context) != NULL && \ - (IsA((context), AllocSetContext) || IsA((context), SlabContext))) + (IsA((context), AllocSetContext) || \ + IsA((context), SlabContext) || \ + IsA((context), GenerationContext))) #endif /* MEMNODES_H */ diff --git a/src/include/nodes/nodeFuncs.h b/src/include/nodes/nodeFuncs.h index 3366983936..849f34d2a8 100644 --- a/src/include/nodes/nodeFuncs.h +++ b/src/include/nodes/nodeFuncs.h @@ -3,7 +3,7 @@ * nodeFuncs.h * Various general-purpose manipulations of Node trees * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/nodeFuncs.h diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index 27bd4f3363..cac6ff0eda 100644 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -4,7 +4,7 @@ * Definitions for tagged nodes. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/nodes.h @@ -34,6 +34,7 @@ typedef enum NodeTag T_ExprContext, T_ProjectionInfo, T_JunkFilter, + T_OnConflictSetState, T_ResultRelInfo, T_EState, T_TupleTableSlot, @@ -86,6 +87,10 @@ typedef enum NodeTag /* these aren't subclasses of Plan: */ T_NestLoopParam, T_PlanRowMark, + T_PartitionPruneInfo, + T_PartitionedRelPruneInfo, + T_PartitionPruneStepOp, + T_PartitionPruneStepCombine, T_PlanInvalItem, /* @@ -260,7 +265,6 @@ typedef enum NodeTag T_PlaceHolderVar, T_SpecialJoinInfo, T_AppendRelInfo, - T_PartitionedChildRelInfo, T_PlaceHolderInfo, T_MinMaxAggInfo, T_PlannerParamItem, @@ -274,6 +278,7 @@ typedef enum NodeTag T_MemoryContext, T_AllocSetContext, T_SlabContext, + T_GenerationContext, /* * TAGS FOR VALUE NODES (value.h) @@ -413,6 +418,7 @@ typedef enum NodeTag T_DropSubscriptionStmt, T_CreateStatsStmt, T_AlterCollationStmt, + T_CallStmt, /* * TAGS FOR PARSE TREE NODES (parsenodes.h) @@ -468,6 +474,7 @@ typedef enum NodeTag T_PartitionBoundSpec, T_PartitionRangeDatum, T_PartitionCmd, + T_VacuumRelation, /* * TAGS FOR REPLICATION GRAMMAR PARSE NODES (replnodes.h) @@ -497,7 +504,8 @@ typedef enum NodeTag T_FdwRoutine, /* in foreign/fdwapi.h */ T_IndexAmRoutine, /* in access/amapi.h */ T_TsmRoutine, /* in access/tsmapi.h */ - T_ForeignKeyCacheInfo /* in utils/rel.h */ + T_ForeignKeyCacheInfo, /* in utils/rel.h */ + T_CallContext /* in nodes/parsenodes.h */ } NodeTag; /* @@ -602,7 +610,10 @@ extern char *bmsToString(const struct Bitmapset *bms); /* * nodes/{readfuncs.c,read.c} */ -extern void *stringToNode(char *str); +extern void *stringToNode(const char *str); +#ifdef WRITE_READ_PARSE_PLAN_TREES +extern void *stringToNodeWithLocations(const char *str); +#endif extern struct Bitmapset *readBitmapset(void); extern uintptr_t readDatum(bool typbyval); extern bool *readBoolCols(int numCols); diff --git a/src/include/nodes/params.h b/src/include/nodes/params.h index 55219dab6e..04b03c7303 100644 --- a/src/include/nodes/params.h +++ b/src/include/nodes/params.h @@ -4,7 +4,7 @@ * Support for finding the values associated with Param nodes. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/params.h @@ -16,16 +16,23 @@ /* Forward declarations, to avoid including other headers */ struct Bitmapset; +struct ExprState; +struct Param; struct ParseState; -/* ---------------- +/* * ParamListInfo * - * ParamListInfo arrays are used to pass parameters into the executor - * for parameterized plans. Each entry in the array defines the value - * to be substituted for a PARAM_EXTERN parameter. The "paramid" - * of a PARAM_EXTERN Param can range from 1 to numParams. + * ParamListInfo structures are used to pass parameters into the executor + * for parameterized plans. We support two basic approaches to supplying + * parameter values, the "static" way and the "dynamic" way. + * + * In the static approach, per-parameter data is stored in an array of + * ParamExternData structs appended to the ParamListInfo struct. + * Each entry in the array defines the value to be substituted for a + * PARAM_EXTERN parameter. The "paramid" of a PARAM_EXTERN Param + * can range from 1 to numParams. * * Although parameter numbers are normally consecutive, we allow * ptype == InvalidOid to signal an unused array entry. @@ -35,18 +42,47 @@ struct ParseState; * as a constant (i.e., generate a plan that works only for this value * of the parameter). * - * There are two hook functions that can be associated with a ParamListInfo - * array to support dynamic parameter handling. First, if paramFetch - * isn't null and the executor requires a value for an invalid parameter - * (one with ptype == InvalidOid), the paramFetch hook is called to give - * it a chance to fill in the parameter value. Second, a parserSetup - * hook can be supplied to re-instantiate the original parsing hooks if - * a query needs to be re-parsed/planned (as a substitute for supposing - * that the current ptype values represent a fixed set of parameter types). - + * In the dynamic approach, all access to parameter values is done through + * hook functions found in the ParamListInfo struct. In this case, + * the ParamExternData array is typically unused and not allocated; + * but the legal range of paramid is still 1 to numParams. + * * Although the data structure is really an array, not a list, we keep * the old typedef name to avoid unnecessary code changes. - * ---------------- + * + * There are 3 hook functions that can be associated with a ParamListInfo + * structure: + * + * If paramFetch isn't null, it is called to fetch the ParamExternData + * for a particular param ID, rather than accessing the relevant element + * of the ParamExternData array. This supports the case where the array + * isn't there at all, as well as cases where the data in the array + * might be obsolete or lazily evaluated. paramFetch must return the + * address of a ParamExternData struct describing the specified param ID; + * the convention above about ptype == InvalidOid signaling an invalid + * param ID still applies. The returned struct can either be placed in + * the "workspace" supplied by the caller, or it can be in storage + * controlled by the paramFetch hook if that's more convenient. + * (In either case, the struct is not expected to be long-lived.) + * If "speculative" is true, the paramFetch hook should not risk errors + * in trying to fetch the parameter value, and should report an invalid + * parameter instead. + * + * If paramCompile isn't null, then it controls what execExpr.c compiles + * for PARAM_EXTERN Param nodes --- typically, this hook would emit a + * EEOP_PARAM_CALLBACK step. This allows unnecessary work to be + * optimized away in compiled expressions. + * + * If parserSetup isn't null, then it is called to re-instantiate the + * original parsing hooks when a query needs to be re-parsed/planned. + * This is especially useful if the types of parameters might change + * from time to time, since it can replace the need to supply a fixed + * list of parameter types to the parser. + * + * Notice that the paramFetch and paramCompile hooks are actually passed + * the ParamListInfo struct's address; they can therefore access all + * three of the "arg" fields, and the distinction between paramFetchArg + * and paramCompileArg is rather arbitrary. */ #define PARAM_FLAG_CONST 0x0001 /* parameter is constant */ @@ -61,7 +97,13 @@ typedef struct ParamExternData typedef struct ParamListInfoData *ParamListInfo; -typedef void (*ParamFetchHook) (ParamListInfo params, int paramid); +typedef ParamExternData *(*ParamFetchHook) (ParamListInfo params, + int paramid, bool speculative, + ParamExternData *workspace); + +typedef void (*ParamCompileHook) (ParamListInfo params, struct Param *param, + struct ExprState *state, + Datum *resv, bool *resnull); typedef void (*ParserSetupHook) (struct ParseState *pstate, void *arg); @@ -69,10 +111,16 @@ typedef struct ParamListInfoData { ParamFetchHook paramFetch; /* parameter fetch hook */ void *paramFetchArg; + ParamCompileHook paramCompile; /* parameter compile hook */ + void *paramCompileArg; ParserSetupHook parserSetup; /* parser setup hook */ void *parserSetupArg; - int numParams; /* number of ParamExternDatas following */ - struct Bitmapset *paramMask; /* if non-NULL, can ignore omitted params */ + int numParams; /* nominal/maximum # of Params represented */ + + /* + * params[] may be of length zero if paramFetch is supplied; otherwise it + * must be of length numParams. + */ ParamExternData params[FLEXIBLE_ARRAY_MEMBER]; } ParamListInfoData; diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index 5f2a4a75da..9da8bf2f88 100644 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -12,7 +12,7 @@ * identifying statement boundaries in multi-statement source strings. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/parsenodes.h @@ -26,6 +26,8 @@ #include "nodes/lockoptions.h" #include "nodes/primnodes.h" #include "nodes/value.h" +#include "partitioning/partdefs.h" + typedef enum OverridingKind { @@ -111,7 +113,7 @@ typedef struct Query QuerySource querySource; /* where did I come from? */ - uint32 queryId; /* query identifier (can be set by plugins) */ + uint64 queryId; /* query identifier (can be set by plugins) */ bool canSetTag; /* do I set the command result tag? */ @@ -166,9 +168,8 @@ typedef struct Query List *constraintDeps; /* a list of pg_constraint OIDs that the query * depends on to be semantically valid */ - List *withCheckOptions; /* a list of WithCheckOption's, which are - * only added during rewrite and therefore - * are not written out as part of Query. */ + List *withCheckOptions; /* a list of WithCheckOption's (added + * during rewrite) */ /* * The following two fields identify the portion of the source text string @@ -198,7 +199,7 @@ typedef struct Query * Similarly, if "typmods" is NIL then the actual typmod is expected to * be prespecified in typemod, otherwise typemod is unused. * - * If pct_type is TRUE, then names is actually a field name and we look up + * If pct_type is true, then names is actually a field name and we look up * the type of that field. Otherwise (the normal case), names is a type * name possibly qualified with schema and database name. */ @@ -499,27 +500,33 @@ typedef struct WindowDef * which were defaulted; the correct behavioral bits must be set either way. * The START_foo and END_foo options must come in pairs of adjacent bits for * the convenience of gram.y, even though some of them are useless/invalid. - * We will need more bits (and fields) to cover the full SQL:2008 option set. */ #define FRAMEOPTION_NONDEFAULT 0x00001 /* any specified? */ #define FRAMEOPTION_RANGE 0x00002 /* RANGE behavior */ #define FRAMEOPTION_ROWS 0x00004 /* ROWS behavior */ -#define FRAMEOPTION_BETWEEN 0x00008 /* BETWEEN given? */ -#define FRAMEOPTION_START_UNBOUNDED_PRECEDING 0x00010 /* start is U. P. */ -#define FRAMEOPTION_END_UNBOUNDED_PRECEDING 0x00020 /* (disallowed) */ -#define FRAMEOPTION_START_UNBOUNDED_FOLLOWING 0x00040 /* (disallowed) */ -#define FRAMEOPTION_END_UNBOUNDED_FOLLOWING 0x00080 /* end is U. F. */ -#define FRAMEOPTION_START_CURRENT_ROW 0x00100 /* start is C. R. */ -#define FRAMEOPTION_END_CURRENT_ROW 0x00200 /* end is C. R. */ -#define FRAMEOPTION_START_VALUE_PRECEDING 0x00400 /* start is V. P. */ -#define FRAMEOPTION_END_VALUE_PRECEDING 0x00800 /* end is V. P. */ -#define FRAMEOPTION_START_VALUE_FOLLOWING 0x01000 /* start is V. F. */ -#define FRAMEOPTION_END_VALUE_FOLLOWING 0x02000 /* end is V. F. */ - -#define FRAMEOPTION_START_VALUE \ - (FRAMEOPTION_START_VALUE_PRECEDING | FRAMEOPTION_START_VALUE_FOLLOWING) -#define FRAMEOPTION_END_VALUE \ - (FRAMEOPTION_END_VALUE_PRECEDING | FRAMEOPTION_END_VALUE_FOLLOWING) +#define FRAMEOPTION_GROUPS 0x00008 /* GROUPS behavior */ +#define FRAMEOPTION_BETWEEN 0x00010 /* BETWEEN given? */ +#define FRAMEOPTION_START_UNBOUNDED_PRECEDING 0x00020 /* start is U. P. */ +#define FRAMEOPTION_END_UNBOUNDED_PRECEDING 0x00040 /* (disallowed) */ +#define FRAMEOPTION_START_UNBOUNDED_FOLLOWING 0x00080 /* (disallowed) */ +#define FRAMEOPTION_END_UNBOUNDED_FOLLOWING 0x00100 /* end is U. F. */ +#define FRAMEOPTION_START_CURRENT_ROW 0x00200 /* start is C. R. */ +#define FRAMEOPTION_END_CURRENT_ROW 0x00400 /* end is C. R. */ +#define FRAMEOPTION_START_OFFSET_PRECEDING 0x00800 /* start is O. P. */ +#define FRAMEOPTION_END_OFFSET_PRECEDING 0x01000 /* end is O. P. */ +#define FRAMEOPTION_START_OFFSET_FOLLOWING 0x02000 /* start is O. F. */ +#define FRAMEOPTION_END_OFFSET_FOLLOWING 0x04000 /* end is O. F. */ +#define FRAMEOPTION_EXCLUDE_CURRENT_ROW 0x08000 /* omit C.R. */ +#define FRAMEOPTION_EXCLUDE_GROUP 0x10000 /* omit C.R. & peers */ +#define FRAMEOPTION_EXCLUDE_TIES 0x20000 /* omit C.R.'s peers */ + +#define FRAMEOPTION_START_OFFSET \ + (FRAMEOPTION_START_OFFSET_PRECEDING | FRAMEOPTION_START_OFFSET_FOLLOWING) +#define FRAMEOPTION_END_OFFSET \ + (FRAMEOPTION_END_OFFSET_PRECEDING | FRAMEOPTION_END_OFFSET_FOLLOWING) +#define FRAMEOPTION_EXCLUSION \ + (FRAMEOPTION_EXCLUDE_CURRENT_ROW | FRAMEOPTION_EXCLUDE_GROUP | \ + FRAMEOPTION_EXCLUDE_TIES) #define FRAMEOPTION_DEFAULTS \ (FRAMEOPTION_RANGE | FRAMEOPTION_START_UNBOUNDED_PRECEDING | \ @@ -642,11 +649,12 @@ typedef struct ColumnDef bool is_local; /* column has local (non-inherited) def'n */ bool is_not_null; /* NOT NULL constraint specified? */ bool is_from_type; /* column definition came from table type */ - bool is_from_parent; /* column def came from partition parent */ char storage; /* attstorage setting, or 0 for default */ Node *raw_default; /* default value (untransformed parse tree) */ Node *cooked_default; /* default value (transformed expr tree) */ char identity; /* attidentity setting */ + RangeVar *identitySequence; /* to store identity sequence name for + * ALTER TABLE ... ADD COLUMN */ CollateClause *collClause; /* untransformed COLLATE spec, if any */ Oid collOid; /* collation OID (InvalidOid if not set) */ List *constraints; /* other constraints on column */ @@ -666,12 +674,13 @@ typedef struct TableLikeClause typedef enum TableLikeOption { - CREATE_TABLE_LIKE_DEFAULTS = 1 << 0, + CREATE_TABLE_LIKE_COMMENTS = 1 << 0, CREATE_TABLE_LIKE_CONSTRAINTS = 1 << 1, - CREATE_TABLE_LIKE_IDENTITY = 1 << 2, - CREATE_TABLE_LIKE_INDEXES = 1 << 3, - CREATE_TABLE_LIKE_STORAGE = 1 << 4, - CREATE_TABLE_LIKE_COMMENTS = 1 << 5, + CREATE_TABLE_LIKE_DEFAULTS = 1 << 2, + CREATE_TABLE_LIKE_IDENTITY = 1 << 3, + CREATE_TABLE_LIKE_INDEXES = 1 << 4, + CREATE_TABLE_LIKE_STATISTICS = 1 << 5, + CREATE_TABLE_LIKE_STORAGE = 1 << 6, CREATE_TABLE_LIKE_ALL = PG_INT32_MAX } TableLikeOption; @@ -777,12 +786,14 @@ typedef struct PartitionElem typedef struct PartitionSpec { NodeTag type; - char *strategy; /* partitioning strategy ('list' or 'range') */ + char *strategy; /* partitioning strategy ('hash', 'list' or + * 'range') */ List *partParams; /* List of PartitionElems */ int location; /* token location, or -1 if unknown */ } PartitionSpec; /* Internal codes for partitioning strategies */ +#define PARTITION_STRATEGY_HASH 'h' #define PARTITION_STRATEGY_LIST 'l' #define PARTITION_STRATEGY_RANGE 'r' @@ -792,11 +803,16 @@ typedef struct PartitionSpec * This represents the portion of the partition key space assigned to a * particular partition. These are stored on disk in pg_class.relpartbound. */ -typedef struct PartitionBoundSpec +struct PartitionBoundSpec { NodeTag type; char strategy; /* see PARTITION_STRATEGY codes above */ + bool is_default; /* is it a default partition bound? */ + + /* Partitioning info for HASH strategy: */ + int modulus; + int remainder; /* Partitioning info for LIST strategy: */ List *listdatums; /* List of Consts (or A_Consts in raw tree) */ @@ -806,7 +822,7 @@ typedef struct PartitionBoundSpec List *upperdatums; /* List of PartitionRangeDatums */ int location; /* token location, or -1 if unknown */ -} PartitionBoundSpec; +}; /* * PartitionRangeDatum - one of the values in a range partition bound @@ -832,7 +848,7 @@ typedef struct PartitionRangeDatum } PartitionRangeDatum; /* - * PartitionCmd - info for ALTER TABLE ATTACH/DETACH PARTITION commands + * PartitionCmd - info for ALTER TABLE/INDEX ATTACH/DETACH PARTITION commands */ typedef struct PartitionCmd { @@ -887,8 +903,8 @@ typedef struct PartitionCmd * them from the joinaliasvars list, because that would affect the attnums * of Vars referencing the rest of the list.) * - * inh is TRUE for relation references that should be expanded to include - * inheritance children, if the rel has any. This *must* be FALSE for + * inh is true for relation references that should be expanded to include + * inheritance children, if the rel has any. This *must* be false for * RTEs other than RTE_RELATION entries. * * inFromCl marks those range variables that are listed in the FROM clause. @@ -956,9 +972,21 @@ typedef struct RangeTblEntry * that the tuple format of the tuplestore is the same as the referenced * relation. This allows plans referencing AFTER trigger transition * tables to be invalidated if the underlying table is altered. + * + * rellockmode is really LOCKMODE, but it's declared int to avoid having + * to include lock-related headers here. It must be RowExclusiveLock if + * the RTE is an INSERT/UPDATE/DELETE target, else RowShareLock if the RTE + * is a SELECT FOR UPDATE/FOR SHARE target, else AccessShareLock. + * + * Note: in some cases, rule expansion may result in RTEs that are marked + * with RowExclusiveLock even though they are not the target of the + * current query; this happens if a DO ALSO rule simply scans the original + * target table. We leave such RTEs with their original lockmode so as to + * avoid getting an additional, lesser lock. */ Oid relid; /* OID of the relation */ char relkind; /* relation kind (see pg_class.relkind) */ + int rellockmode; /* lock level that query requires on the rel */ struct TableSampleClause *tablesample; /* sampling info, or NULL */ /* @@ -1016,7 +1044,7 @@ typedef struct RangeTblEntry bool self_reference; /* is this a recursive self-reference? */ /* - * Fields valid for table functions, values, CTE and ENR RTEs (else NIL): + * Fields valid for CTE, VALUES, ENR, and TableFunc RTEs (else NIL): * * We need these for CTE RTEs so that the types of self-referential * columns are well-defined. For VALUES RTEs, storing these explicitly @@ -1024,7 +1052,14 @@ typedef struct RangeTblEntry * ENRs, we store the types explicitly here (we could get the information * from the catalogs if 'relid' was supplied, but we'd still need these * for TupleDesc-based ENRs, so we might as well always store the type - * info here). + * info here). For TableFuncs, these fields are redundant with data in + * the TableFunc node, but keeping them here allows some code sharing with + * the other cases. + * + * For ENRs only, we have to consider the possibility of dropped columns. + * A dropped column is included in these lists, but it will have zeroes in + * all three lists (as well as an empty-string entry in eref). Testing + * for zero coltype is the standard way to detect a dropped column. */ List *coltypes; /* OID list of column type OIDs */ List *coltypmods; /* integer list of column typmods */ @@ -1141,7 +1176,7 @@ typedef struct WithCheckOption * or InvalidOid if not available. * nulls_first means about what you'd expect. If sortop is InvalidOid * then nulls_first is meaningless and should be set to false. - * hashable is TRUE if eqop is hashable (note this condition also depends + * hashable is true if eqop is hashable (note this condition also depends * on the datatype of the input expression). * * In an ORDER BY item, all fields must be valid. (The eqop isn't essential @@ -1263,6 +1298,9 @@ typedef struct GroupingSet * if the clause originally came from WINDOW, and is NULL if it originally * was an OVER clause (but note that we collapse out duplicate OVERs). * partitionClause and orderClause are lists of SortGroupClause structs. + * If we have RANGE with offset PRECEDING/FOLLOWING, the semantics of that are + * specified by startInRangeFunc/inRangeColl/inRangeAsc/inRangeNullsFirst + * for the start offset, or endInRangeFunc/inRange* for the end offset. * winref is an ID number referenced by WindowFunc nodes; it must be unique * among the members of a Query's windowClause list. * When refname isn't null, the partitionClause is always copied from there; @@ -1279,6 +1317,11 @@ typedef struct WindowClause int frameOptions; /* frame_clause options, see WindowDef */ Node *startOffset; /* expression for starting bound, if any */ Node *endOffset; /* expression for ending bound, if any */ + Oid startInRangeFunc; /* in_range function for startOffset */ + Oid endInRangeFunc; /* in_range function for endOffset */ + Oid inRangeColl; /* collation for in_range tests */ + bool inRangeAsc; /* use ASC sort order for in_range tests? */ + bool inRangeNullsFirst; /* nulls sort first for in_range tests? */ Index winref; /* ID referenced by window functions */ bool copiedOrder; /* did we copy orderClause from refname? */ } WindowClause; @@ -1630,9 +1673,11 @@ typedef enum ObjectType OBJECT_OPERATOR, OBJECT_OPFAMILY, OBJECT_POLICY, + OBJECT_PROCEDURE, OBJECT_PUBLICATION, OBJECT_PUBLICATION_REL, OBJECT_ROLE, + OBJECT_ROUTINE, OBJECT_RULE, OBJECT_SCHEMA, OBJECT_SEQUENCE, @@ -1707,6 +1752,7 @@ typedef enum AlterTableType AT_AddConstraint, /* add constraint */ AT_AddConstraintRecurse, /* internal to commands/tablecmds.c */ AT_ReAddConstraint, /* internal to commands/tablecmds.c */ + AT_ReAddDomainConstraint, /* internal to commands/tablecmds.c */ AT_AlterConstraint, /* alter constraint */ AT_ValidateConstraint, /* validate constraint */ AT_ValidateConstraintRecurse, /* internal to commands/tablecmds.c */ @@ -1772,6 +1818,8 @@ typedef struct AlterTableCmd /* one subcommand of an ALTER TABLE */ AlterTableType subtype; /* Type of table alteration to apply */ char *name; /* column, constraint, or trigger to act on, * or tablespace */ + int16 num; /* attribute number for columns referenced by + * number */ RoleSpec *newowner; Node *def; /* definition of new column, index, * constraint, or parent table */ @@ -1828,29 +1876,12 @@ typedef enum GrantTargetType ACL_TARGET_DEFAULTS /* ALTER DEFAULT PRIVILEGES */ } GrantTargetType; -typedef enum GrantObjectType -{ - ACL_OBJECT_COLUMN, /* column */ - ACL_OBJECT_RELATION, /* table, view */ - ACL_OBJECT_SEQUENCE, /* sequence */ - ACL_OBJECT_DATABASE, /* database */ - ACL_OBJECT_DOMAIN, /* domain */ - ACL_OBJECT_FDW, /* foreign-data wrapper */ - ACL_OBJECT_FOREIGN_SERVER, /* foreign server */ - ACL_OBJECT_FUNCTION, /* function */ - ACL_OBJECT_LANGUAGE, /* procedural language */ - ACL_OBJECT_LARGEOBJECT, /* largeobject */ - ACL_OBJECT_NAMESPACE, /* namespace */ - ACL_OBJECT_TABLESPACE, /* tablespace */ - ACL_OBJECT_TYPE /* type */ -} GrantObjectType; - typedef struct GrantStmt { NodeTag type; bool is_grant; /* true = GRANT, false = REVOKE */ GrantTargetType targtype; /* type of the grant target */ - GrantObjectType objtype; /* kind of object being operated on */ + ObjectType objtype; /* kind of object being operated on */ List *objects; /* list of RangeVar nodes, ObjectWithArgs * nodes, or plain names (as Value strings) */ List *privileges; /* list of AccessPriv nodes */ @@ -2085,7 +2116,10 @@ typedef struct Constraint char generated_when; /* Fields used for unique constraints (UNIQUE and PRIMARY KEY): */ - List *keys; /* String nodes naming referenced column(s) */ + List *keys; /* String nodes naming referenced key + * column(s) */ + List *including; /* String nodes naming referenced nonkey + * column(s) */ /* Fields used for EXCLUSION constraints: */ List *exclusions; /* list of (IndexElem, operator name) pairs */ @@ -2671,7 +2705,7 @@ typedef struct FetchStmt FetchDirection direction; /* see above */ long howMany; /* number of rows, or position argument */ char *portalname; /* name of portal (cursor) */ - bool ismove; /* TRUE if MOVE */ + bool ismove; /* true if MOVE */ } FetchStmt; /* ---------------------- @@ -2683,6 +2717,10 @@ typedef struct FetchStmt * index, just a UNIQUE/PKEY constraint using an existing index. isconstraint * must always be true in this case, and the fields describing the index * properties are empty. + * + * The relation to build the index on can be represented either by name + * (in which case the RangeVar indicates whether to recurse or not) or by OID + * (in which case the command is always recursive). * ---------------------- */ typedef struct IndexStmt @@ -2690,9 +2728,12 @@ typedef struct IndexStmt NodeTag type; char *idxname; /* name of new index, or NULL for default */ RangeVar *relation; /* relation to build index on */ + Oid relationId; /* OID of relation to build index on */ char *accessMethod; /* name of access method (eg. btree) */ char *tableSpace; /* tablespace, or NULL for default */ List *indexParams; /* columns to index: a list of IndexElem */ + List *indexIncludingParams; /* additional columns to index: a list + * of IndexElem */ List *options; /* WITH clause options: a list of DefElem */ Node *whereClause; /* qualification (partial-index predicate) */ List *excludeOpNames; /* exclusion operator names, or NIL if none */ @@ -2720,6 +2761,7 @@ typedef struct CreateStatsStmt List *stat_types; /* stat types (list of Value strings) */ List *exprs; /* expressions to build statistics on */ List *relations; /* rels to build stats on (list of RangeVar) */ + char *stxcomment; /* comment to apply to stats, or NULL */ bool if_not_exists; /* do nothing if stats name already exists */ } CreateStatsStmt; @@ -2730,12 +2772,12 @@ typedef struct CreateStatsStmt typedef struct CreateFunctionStmt { NodeTag type; + bool is_procedure; /* it's really CREATE PROCEDURE */ bool replace; /* T => replace if already exists */ List *funcname; /* qualified name of function to create */ List *parameters; /* a list of FunctionParameter */ TypeName *returnType; /* the return type */ List *options; /* a list of DefElem */ - List *withClause; /* a list of DefElem */ } CreateFunctionStmt; typedef enum FunctionParameterMode @@ -2760,6 +2802,7 @@ typedef struct FunctionParameter typedef struct AlterFunctionStmt { NodeTag type; + ObjectType objtype; ObjectWithArgs *func; /* name and args of function */ List *actions; /* list of DefElem */ } AlterFunctionStmt; @@ -2782,8 +2825,26 @@ typedef struct InlineCodeBlock char *source_text; /* source text of anonymous code block */ Oid langOid; /* OID of selected language */ bool langIsTrusted; /* trusted property of the language */ + bool atomic; /* atomic execution context */ } InlineCodeBlock; +/* ---------------------- + * CALL statement + * ---------------------- + */ +typedef struct CallStmt +{ + NodeTag type; + FuncCall *funccall; /* from the parser */ + FuncExpr *funcexpr; /* transformed */ +} CallStmt; + +typedef struct CallContext +{ + NodeTag type; + bool atomic; +} CallContext; + /* ---------------------- * Alter Object Rename Statement * ---------------------- @@ -2924,7 +2985,8 @@ typedef struct TransactionStmt { NodeTag type; TransactionStmtKind kind; /* see above */ - List *options; /* for BEGIN/START and savepoint commands */ + List *options; /* for BEGIN/START commands */ + char *savepoint_name; /* for savepoint commands */ char *gid; /* for two-phase-commit related commands */ } TransactionStmt; @@ -3062,12 +3124,18 @@ typedef struct AlterSystemStmt * Cluster Statement (support pbrown's cluster index implementation) * ---------------------- */ +typedef enum ClusterOption +{ + CLUOPT_RECHECK = 1 << 0, /* recheck relation state */ + CLUOPT_VERBOSE = 1 << 1 /* print progress info */ +} ClusterOption; + typedef struct ClusterStmt { NodeTag type; RangeVar *relation; /* relation being indexed, or NULL if all */ char *indexname; /* original index defined */ - bool verbose; /* print progress info */ + int options; /* OR of ClusterOption flags */ } ClusterStmt; /* ---------------------- @@ -3085,17 +3153,31 @@ typedef enum VacuumOption VACOPT_VERBOSE = 1 << 2, /* print progress info */ VACOPT_FREEZE = 1 << 3, /* FREEZE option */ VACOPT_FULL = 1 << 4, /* FULL (non-concurrent) vacuum */ - VACOPT_NOWAIT = 1 << 5, /* don't wait to get lock (autovacuum only) */ + VACOPT_SKIP_LOCKED = 1 << 5, /* skip if cannot get lock */ VACOPT_SKIPTOAST = 1 << 6, /* don't process the TOAST table, if any */ VACOPT_DISABLE_PAGE_SKIPPING = 1 << 7 /* don't skip any pages */ } VacuumOption; +/* + * Info about a single target table of VACUUM/ANALYZE. + * + * If the OID field is set, it always identifies the table to process. + * Then the relation field can be NULL; if it isn't, it's used only to report + * failure to open/lock the relation. + */ +typedef struct VacuumRelation +{ + NodeTag type; + RangeVar *relation; /* table name to process, or NULL */ + Oid oid; /* table's OID; InvalidOid if not looked up */ + List *va_cols; /* list of column names, or NIL for all */ +} VacuumRelation; + typedef struct VacuumStmt { NodeTag type; int options; /* OR of VacuumOption flags */ - RangeVar *relation; /* single table to process, or NULL */ - List *va_cols; /* list of column names, or NIL for all */ + List *rels; /* list of VacuumRelation, or NIL for all */ } VacuumStmt; /* ---------------------- @@ -3368,7 +3450,7 @@ typedef struct AlterTSConfigurationStmt typedef struct CreatePublicationStmt { NodeTag type; - char *pubname; /* Name of of the publication */ + char *pubname; /* Name of the publication */ List *options; /* List of DefElem nodes */ List *tables; /* Optional list of tables to add */ bool for_all_tables; /* Special publication for all tables in db */ @@ -3377,7 +3459,7 @@ typedef struct CreatePublicationStmt typedef struct AlterPublicationStmt { NodeTag type; - char *pubname; /* Name of of the publication */ + char *pubname; /* Name of the publication */ /* parameters used for ALTER PUBLICATION ... WITH */ List *options; /* List of DefElem nodes */ @@ -3391,7 +3473,7 @@ typedef struct AlterPublicationStmt typedef struct CreateSubscriptionStmt { NodeTag type; - char *subname; /* Name of of the subscription */ + char *subname; /* Name of the subscription */ char *conninfo; /* Connection string to publisher */ List *publication; /* One or more publication to subscribe to */ List *options; /* List of DefElem nodes */ @@ -3410,7 +3492,7 @@ typedef struct AlterSubscriptionStmt { NodeTag type; AlterSubscriptionType kind; /* ALTER_SUBSCRIPTION_OPTIONS, etc */ - char *subname; /* Name of of the subscription */ + char *subname; /* Name of the subscription */ char *conninfo; /* Connection string to publisher */ List *publication; /* One or more publication to subscribe to */ List *options; /* List of DefElem nodes */ @@ -3419,7 +3501,7 @@ typedef struct AlterSubscriptionStmt typedef struct DropSubscriptionStmt { NodeTag type; - char *subname; /* Name of of the subscription */ + char *subname; /* Name of the subscription */ bool missing_ok; /* Skip error if missing? */ DropBehavior behavior; /* RESTRICT or CASCADE behavior */ } DropSubscriptionStmt; diff --git a/src/include/nodes/pg_list.h b/src/include/nodes/pg_list.h index 667d5e269c..e6cd2cdfba 100644 --- a/src/include/nodes/pg_list.h +++ b/src/include/nodes/pg_list.h @@ -27,7 +27,7 @@ * always be so; try to be careful to maintain the distinction.) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/pg_list.h @@ -269,6 +269,9 @@ extern void list_free_deep(List *list); extern List *list_copy(const List *list); extern List *list_copy_tail(const List *list, int nskip); +typedef int (*list_qsort_comparator) (const void *a, const void *b); +extern List *list_qsort(const List *list, list_qsort_comparator cmp); + /* * To ease migration to the new list API, a set of compatibility * macros are provided that reduce the impact of the list API changes diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index 7c51e7f9d2..5e3d4cdc58 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -4,7 +4,7 @@ * definitions for query plan nodes * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/plannodes.h @@ -15,6 +15,7 @@ #define PLANNODES_H #include "access/sdir.h" +#include "access/stratnum.h" #include "lib/stringinfo.h" #include "nodes/bitmapset.h" #include "nodes/lockoptions.h" @@ -44,7 +45,7 @@ typedef struct PlannedStmt CmdType commandType; /* select|insert|update|delete|utility */ - uint32 queryId; /* query identifier (copied from Query) */ + uint64 queryId; /* query identifier (copied from Query) */ bool hasReturning; /* is it insert|update|delete RETURNING? */ @@ -58,6 +59,8 @@ typedef struct PlannedStmt bool parallelModeNeeded; /* parallel mode required to execute? */ + int jitFlags; /* which forms of JIT should be performed */ + struct Plan *planTree; /* tree of Plan nodes */ List *rtable; /* list of RangeTblEntry nodes */ @@ -66,15 +69,8 @@ typedef struct PlannedStmt List *resultRelations; /* integer list of RT indexes, or NIL */ /* - * rtable indexes of non-leaf target relations for UPDATE/DELETE on all - * the partitioned tables mentioned in the query. - */ - List *nonleafResultRelations; - - /* - * rtable indexes of root target relations for UPDATE/DELETE; this list - * maintains a subset of the RT indexes in nonleafResultRelations, - * indicating the roots of the respective partition hierarchies. + * rtable indexes of partitioned table roots that are UPDATE/DELETE + * targets; needed for trigger firing. */ List *rootResultRelations; @@ -89,7 +85,7 @@ typedef struct PlannedStmt List *invalItems; /* other dependencies, as PlanInvalItems */ - int nParamExec; /* number of PARAM_EXEC Params used */ + List *paramExecTypes; /* type OIDs for PARAM_EXEC Params */ Node *utilityStmt; /* non-null if this is utility stmt */ @@ -207,6 +203,12 @@ typedef struct ProjectSet * Apply rows produced by subplan(s) to result table(s), * by inserting, updating, or deleting. * + * If the originally named target table is a partitioned table, both + * nominalRelation and rootRelation contain the RT index of the partition + * root, which is not otherwise mentioned in the plan. Otherwise rootRelation + * is zero. However, nominalRelation will always be set, as it's the rel that + * EXPLAIN should claim is the INSERT/UPDATE/DELETE target. + * * Note that rowMarks and epqParam are presumed to be valid for all the * subplan(s); they can't contain any info that varies across subplans. * ---------------- @@ -217,8 +219,8 @@ typedef struct ModifyTable CmdType operation; /* INSERT, UPDATE, or DELETE */ bool canSetTag; /* do we set the command tag/es_processed? */ Index nominalRelation; /* Parent RT index for use of EXPLAIN */ - /* RT indexes of non-leaf tables in a partition tree */ - List *partitioned_rels; + Index rootRelation; /* Root RT index, if target is partitioned */ + bool partColsUpdated; /* some part key in hierarchy updated */ List *resultRelations; /* integer list of RT indexes */ int resultRelIndex; /* index of first resultRel in plan's list */ int rootResultRelIndex; /* index of the partitioned table root */ @@ -237,6 +239,8 @@ typedef struct ModifyTable List *exclRelTlist; /* tlist of the EXCLUDED pseudo relation */ } ModifyTable; +struct PartitionPruneInfo; /* forward reference to struct below */ + /* ---------------- * Append node - * Generate the concatenation of the results of sub-plans. @@ -245,9 +249,16 @@ typedef struct ModifyTable typedef struct Append { Plan plan; - /* RT indexes of non-leaf tables in a partition tree */ - List *partitioned_rels; List *appendplans; + + /* + * All 'appendplans' preceding this index are non-partial plans. All + * 'appendplans' from this index onwards are partial plans. + */ + int first_partial_plan; + + /* Info for run-time subplan pruning; NULL if we're not doing that */ + struct PartitionPruneInfo *part_prune_info; } Append; /* ---------------- @@ -258,15 +269,15 @@ typedef struct Append typedef struct MergeAppend { Plan plan; - /* RT indexes of non-leaf tables in a partition tree */ - List *partitioned_rels; List *mergeplans; - /* remaining fields are just like the sort-key info in struct Sort */ + /* these fields are just like the sort-key info in struct Sort: */ int numCols; /* number of sort-key columns */ AttrNumber *sortColIdx; /* their indexes in the target list */ Oid *sortOperators; /* OIDs of operators to sort them by */ Oid *collations; /* OIDs of collations */ bool *nullsFirst; /* NULLS FIRST/LAST directions */ + /* Info for run-time subplan pruning; NULL if we're not doing that */ + struct PartitionPruneInfo *part_prune_info; } MergeAppend; /* ---------------- @@ -809,6 +820,12 @@ typedef struct WindowAgg int frameOptions; /* frame_clause options, see WindowDef */ Node *startOffset; /* expression for starting bound, if any */ Node *endOffset; /* expression for ending bound, if any */ + /* these fields are used with RANGE offset PRECEDING/FOLLOWING: */ + Oid startInRangeFunc; /* in_range function for startOffset */ + Oid endInRangeFunc; /* in_range function for endOffset */ + Oid inRangeColl; /* collation for in_range tests */ + bool inRangeAsc; /* use ASC sort order for in_range tests? */ + bool inRangeNullsFirst; /* nulls sort first for in_range tests? */ } WindowAgg; /* ---------------- @@ -825,14 +842,24 @@ typedef struct Unique /* ------------ * gather node + * + * Note: rescan_param is the ID of a PARAM_EXEC parameter slot. That slot + * will never actually contain a value, but the Gather node must flag it as + * having changed whenever it is rescanned. The child parallel-aware scan + * nodes are marked as depending on that parameter, so that the rescan + * machinery is aware that their output is likely to change across rescans. + * In some cases we don't need a rescan Param, so rescan_param is set to -1. * ------------ */ typedef struct Gather { Plan plan; - int num_workers; - bool single_copy; + int num_workers; /* planned number of worker processes */ + int rescan_param; /* ID of Param that signals a rescan, or -1 */ + bool single_copy; /* don't execute plan more than once */ bool invisible; /* suppress EXPLAIN display (for testing)? */ + Bitmapset *initParam; /* param id's of initplans which are referred + * at gather or one of it's child node */ } Gather; /* ------------ @@ -842,13 +869,16 @@ typedef struct Gather typedef struct GatherMerge { Plan plan; - int num_workers; + int num_workers; /* planned number of worker processes */ + int rescan_param; /* ID of Param that signals a rescan, or -1 */ /* remaining fields are just like the sort-key info in struct Sort */ int numCols; /* number of sort-key columns */ AttrNumber *sortColIdx; /* their indexes in the target list */ Oid *sortOperators; /* OIDs of operators to sort them by */ Oid *collations; /* OIDs of collations */ bool *nullsFirst; /* NULLS FIRST/LAST directions */ + Bitmapset *initParam; /* param id's of initplans which are referred + * at gather merge or one of it's child node */ } GatherMerge; /* ---------------- @@ -866,6 +896,7 @@ typedef struct Hash AttrNumber skewColumn; /* outer join key's column #, or zero */ bool skewInherit; /* is outer join rel an inheritance tree? */ /* all other info is in the parent HashJoin node */ + double rows_total; /* estimate total rows if parallel_aware */ } Hash; /* ---------------- @@ -1012,6 +1043,148 @@ typedef struct PlanRowMark } PlanRowMark; +/* + * Node types to represent partition pruning information. + */ + +/* + * PartitionPruneInfo - Details required to allow the executor to prune + * partitions. + * + * Here we store mapping details to allow translation of a partitioned table's + * index as returned by the partition pruning code into subplan indexes for + * plan types which support arbitrary numbers of subplans, such as Append. + * We also store various details to tell the executor when it should be + * performing partition pruning. + * + * Each PartitionedRelPruneInfo describes the partitioning rules for a single + * partitioned table (a/k/a level of partitioning). Since a partitioning + * hierarchy could contain multiple levels, we represent it by a List of + * PartitionedRelPruneInfos, where the first entry represents the topmost + * partitioned table and additional entries represent non-leaf child + * partitions, ordered such that parents appear before their children. + * Then, since an Append-type node could have multiple partitioning + * hierarchies among its children, we have an unordered List of those Lists. + * + * prune_infos List of Lists containing PartitionedRelPruneInfo nodes, + * one sublist per run-time-prunable partition hierarchy + * appearing in the parent plan node's subplans. + * other_subplans Indexes of any subplans that are not accounted for + * by any of the PartitionedRelPruneInfo nodes in + * "prune_infos". These subplans must not be pruned. + */ +typedef struct PartitionPruneInfo +{ + NodeTag type; + List *prune_infos; + Bitmapset *other_subplans; +} PartitionPruneInfo; + +/* + * PartitionedRelPruneInfo - Details required to allow the executor to prune + * partitions for a single partitioned table. + * + * subplan_map[] and subpart_map[] are indexed by partition index (where + * zero is the topmost partition, and non-leaf partitions must come before + * their children). For a leaf partition p, subplan_map[p] contains the + * zero-based index of the partition's subplan in the parent plan's subplan + * list; it is -1 if the partition is non-leaf or has been pruned. For a + * non-leaf partition p, subpart_map[p] contains the zero-based index of + * that sub-partition's PartitionedRelPruneInfo in the hierarchy's + * PartitionedRelPruneInfo list; it is -1 if the partition is a leaf or has + * been pruned. Note that subplan indexes are global across the parent plan + * node, but partition indexes are valid only within a particular hierarchy. + */ +typedef struct PartitionedRelPruneInfo +{ + NodeTag type; + Index rtindex; /* RT index of partition rel for this level */ + List *pruning_steps; /* List of PartitionPruneStep, see below */ + Bitmapset *present_parts; /* Indexes of all partitions which subplans or + * subparts are present for. */ + int nparts; /* Length of subplan_map[] and subpart_map[] */ + int nexprs; /* Length of hasexecparam[] */ + int *subplan_map; /* subplan index by partition index, or -1 */ + int *subpart_map; /* subpart index by partition index, or -1 */ + bool *hasexecparam; /* true if corresponding pruning_step contains + * any PARAM_EXEC Params. */ + bool do_initial_prune; /* true if pruning should be performed + * during executor startup. */ + bool do_exec_prune; /* true if pruning should be performed during + * executor run. */ + Bitmapset *execparamids; /* All PARAM_EXEC Param IDs in pruning_steps */ +} PartitionedRelPruneInfo; + +/* + * Abstract Node type for partition pruning steps (there are no concrete + * Nodes of this type). + * + * step_id is the global identifier of the step within its pruning context. + */ +typedef struct PartitionPruneStep +{ + NodeTag type; + int step_id; +} PartitionPruneStep; + +/* + * PartitionPruneStepOp - Information to prune using a set of mutually AND'd + * OpExpr clauses + * + * This contains information extracted from up to partnatts OpExpr clauses, + * where partnatts is the number of partition key columns. 'opstrategy' is the + * strategy of the operator in the clause matched to the last partition key. + * 'exprs' contains expressions which comprise the lookup key to be passed to + * the partition bound search function. 'cmpfns' contains the OIDs of + * comparison functions used to compare aforementioned expressions with + * partition bounds. Both 'exprs' and 'cmpfns' contain the same number of + * items, up to partnatts items. + * + * Once we find the offset of a partition bound using the lookup key, we + * determine which partitions to include in the result based on the value of + * 'opstrategy'. For example, if it were equality, we'd return just the + * partition that would contain that key or a set of partitions if the key + * didn't consist of all partitioning columns. For non-equality strategies, + * we'd need to include other partitions as appropriate. + * + * 'nullkeys' is the set containing the offset of the partition keys (0 to + * partnatts - 1) that were matched to an IS NULL clause. This is only + * considered for hash partitioning as we need to pass which keys are null + * to the hash partition bound search function. It is never possible to + * have an expression be present in 'exprs' for a given partition key and + * the corresponding bit set in 'nullkeys'. + */ +typedef struct PartitionPruneStepOp +{ + PartitionPruneStep step; + + StrategyNumber opstrategy; + List *exprs; + List *cmpfns; + Bitmapset *nullkeys; +} PartitionPruneStepOp; + +/* + * PartitionPruneStepCombine - Information to prune using a BoolExpr clause + * + * For BoolExpr clauses, we combine the set of partitions determined for each + * of the argument clauses. + */ +typedef enum PartitionPruneCombineOp +{ + PARTPRUNE_COMBINE_UNION, + PARTPRUNE_COMBINE_INTERSECT +} PartitionPruneCombineOp; + +typedef struct PartitionPruneStepCombine +{ + PartitionPruneStep step; + + PartitionPruneCombineOp combineOp; + List *source_stepids; +} PartitionPruneStepCombine; + + /* * Plan invalidation info * diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h index 8c536a8d38..b886ed3534 100644 --- a/src/include/nodes/primnodes.h +++ b/src/include/nodes/primnodes.h @@ -7,7 +7,7 @@ * and join trees. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/primnodes.h @@ -75,12 +75,15 @@ typedef struct RangeVar /* * TableFunc - node for a table function, such as XMLTABLE. + * + * Entries in the ns_names list are either string Value nodes containing + * literal namespace names, or NULL pointers to represent DEFAULT. */ typedef struct TableFunc { NodeTag type; - List *ns_uris; /* list of namespace uri */ - List *ns_names; /* list of namespace names */ + List *ns_uris; /* list of namespace URI expressions */ + List *ns_names; /* list of namespace names or NULL */ Node *docexpr; /* input document expression */ Node *rowexpr; /* row filter expression */ List *colnames; /* column names (list of String) */ @@ -166,7 +169,7 @@ typedef struct Var Index varno; /* index of this var's relation in the range * table, or INNER_VAR/OUTER_VAR/INDEX_VAR */ AttrNumber varattno; /* attribute number of this var, or zero for - * all */ + * all attrs ("whole-row Var") */ Oid vartype; /* pg_type OID for the type of this var */ int32 vartypmod; /* pg_attribute typmod value */ Oid varcollid; /* OID of collation, or InvalidOid if none */ @@ -302,7 +305,7 @@ typedef struct Aggref List *aggorder; /* ORDER BY (list of SortGroupClause) */ List *aggdistinct; /* DISTINCT (list of SortGroupClause) */ Expr *aggfilter; /* FILTER expression, if any */ - bool aggstar; /* TRUE if argument list was really '*' */ + bool aggstar; /* true if argument list was really '*' */ bool aggvariadic; /* true if variadic arguments have been * combined into an array last argument */ char aggkind; /* aggregate kind (see pg_aggregate.h) */ @@ -359,7 +362,7 @@ typedef struct WindowFunc List *args; /* arguments to the window function */ Expr *aggfilter; /* FILTER expression, if any */ Index winref; /* index of associated WindowClause */ - bool winstar; /* TRUE if argument list was really '*' */ + bool winstar; /* true if argument list was really '*' */ bool winagg; /* is function a simple aggregate? */ int location; /* token location, or -1 if unknown */ } WindowFunc; @@ -695,9 +698,9 @@ typedef struct SubPlan Oid firstColCollation; /* Collation of first column of subplan * result */ /* Information about execution strategy: */ - bool useHashTable; /* TRUE to store subselect output in a hash + bool useHashTable; /* true to store subselect output in a hash * table (implies we are doing "IN") */ - bool unknownEqFalse; /* TRUE if it's okay to return FALSE when the + bool unknownEqFalse; /* true if it's okay to return FALSE when the * spec result is UNKNOWN; this allows much * simpler handling of null values */ bool parallel_safe; /* is the subplan parallel-safe? */ @@ -755,6 +758,9 @@ typedef struct FieldSelect * the assign case of ArrayRef, this is used to implement UPDATE of a * portion of a column. * + * resulttype is always a named composite type (not a domain). To update + * a composite domain value, apply CoerceToDomain to the FieldStore. + * * A single FieldStore can actually represent updates of several different * fields. The parser only generates FieldStores with single-element lists, * but the planner will collapse multiple updates of the same base column @@ -820,11 +826,12 @@ typedef struct CoerceViaIO * ArrayCoerceExpr * * ArrayCoerceExpr represents a type coercion from one array type to another, - * which is implemented by applying the indicated element-type coercion - * function to each element of the source array. If elemfuncid is InvalidOid - * then the element types are binary-compatible, but the coercion still - * requires some effort (we have to fix the element type ID stored in the - * array header). + * which is implemented by applying the per-element coercion expression + * "elemexpr" to each element of the source array. Within elemexpr, the + * source element is represented by a CaseTestExpr node. Note that even if + * elemexpr is a no-op (that is, just CaseTestExpr + RelabelType), the + * coercion still requires some effort: we have to fix the element type OID + * stored in the array header. * ---------------- */ @@ -832,11 +839,10 @@ typedef struct ArrayCoerceExpr { Expr xpr; Expr *arg; /* input expression (yields an array) */ - Oid elemfuncid; /* OID of element coercion function, or 0 */ + Expr *elemexpr; /* expression representing per-element work */ Oid resulttype; /* output type of coercion (an array type) */ int32 resulttypmod; /* output typmod (also element typmod) */ Oid resultcollid; /* OID of collation, or InvalidOid if none */ - bool isExplicit; /* conversion semantics flag to pass to func */ CoercionForm coerceformat; /* how to display this node */ int location; /* token location, or -1 if unknown */ } ArrayCoerceExpr; @@ -849,7 +855,8 @@ typedef struct ArrayCoerceExpr * needed for the destination type plus possibly others; the columns need not * be in the same positions, but are matched up by name. This is primarily * used to convert a whole-row value of an inheritance child table into a - * valid whole-row value of its parent table's rowtype. + * valid whole-row value of its parent table's rowtype. Both resulttype + * and the exposed type of "arg" must be named composite types (not domains). * ---------------- */ @@ -927,8 +934,20 @@ typedef struct CaseWhen * This is effectively like a Param, but can be implemented more simply * since we need only one replacement value at a time. * - * We also use this in nested UPDATE expressions. - * See transformAssignmentIndirection(). + * We also abuse this node type for some other purposes, including: + * * Placeholder for the current array element value in ArrayCoerceExpr; + * see build_coercion_expression(). + * * Nested FieldStore/ArrayRef assignment expressions in INSERT/UPDATE; + * see transformAssignmentIndirection(). + * + * The uses in CaseExpr and ArrayCoerceExpr are safe only to the extent that + * there is not any other CaseExpr or ArrayCoerceExpr between the value source + * node and its child CaseTestExpr(s). This is true in the parse analysis + * output, but the planner's function-inlining logic has to be careful not to + * break it. + * + * The nested-assignment-expression case is safe because the only node types + * that can be above such CaseTestExprs are FieldStore and ArrayRef. */ typedef struct CaseTestExpr { @@ -987,6 +1006,9 @@ typedef struct RowExpr Oid row_typeid; /* RECORDOID or a composite type's ID */ /* + * row_typeid cannot be a domain over composite, only plain composite. To + * create a composite domain value, apply CoerceToDomain to the RowExpr. + * * Note: we deliberately do NOT store a typmod. Although a typmod will be * associated with specific RECORD types at runtime, it will differ for * different backends, and so cannot safely be stored in stored diff --git a/src/include/nodes/print.h b/src/include/nodes/print.h index fa01c2ad84..fba87c6c13 100644 --- a/src/include/nodes/print.h +++ b/src/include/nodes/print.h @@ -4,7 +4,7 @@ * definitions for nodes/print.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/print.h diff --git a/src/include/nodes/readfuncs.h b/src/include/nodes/readfuncs.h index c80fae2311..4f0d3c2192 100644 --- a/src/include/nodes/readfuncs.h +++ b/src/include/nodes/readfuncs.h @@ -4,7 +4,7 @@ * header file for read.c and readfuncs.c. These functions are internal * to the stringToNode interface and should not be used by anyone else. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/readfuncs.h @@ -16,12 +16,19 @@ #include "nodes/nodes.h" +/* + * variable in read.c that needs to be accessible to readfuncs.c + */ +#ifdef WRITE_READ_PARSE_PLAN_TREES +extern bool restore_location_fields; +#endif + /* * prototypes for functions in read.c (the lisp token parser) */ -extern char *pg_strtok(int *length); -extern char *debackslash(char *token, int length); -extern void *nodeRead(char *token, int tok_len); +extern const char *pg_strtok(int *length); +extern char *debackslash(const char *token, int length); +extern void *nodeRead(const char *token, int tok_len); /* * prototypes for functions in readfuncs.c diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h index 3ccc9d1b03..6fd24203dd 100644 --- a/src/include/nodes/relation.h +++ b/src/include/nodes/relation.h @@ -4,7 +4,7 @@ * Definitions for planner's internal data structures. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/relation.h @@ -15,6 +15,7 @@ #define RELATION_H #include "access/sdir.h" +#include "fmgr.h" #include "lib/stringinfo.h" #include "nodes/params.h" #include "nodes/parsenodes.h" @@ -71,6 +72,8 @@ typedef struct AggClauseCosts typedef enum UpperRelationKind { UPPERREL_SETOP, /* result of UNION/INTERSECT/EXCEPT, if any */ + UPPERREL_PARTIAL_GROUP_AGG, /* result of partial grouping/aggregation, if + * any */ UPPERREL_GROUP_AGG, /* result of grouping/aggregation, if any */ UPPERREL_WINDOW, /* result of window functions, if any */ UPPERREL_DISTINCT, /* result of "SELECT DISTINCT", if any */ @@ -79,6 +82,17 @@ typedef enum UpperRelationKind /* NB: UPPERREL_FINAL must be last enum entry; it's used to size arrays */ } UpperRelationKind; +/* + * This enum identifies which type of relation is being planned through the + * inheritance planner. INHKIND_NONE indicates the inheritance planner + * was not used. + */ +typedef enum InheritanceKind +{ + INHKIND_NONE, + INHKIND_INHERITED, + INHKIND_PARTITIONED +} InheritanceKind; /*---------- * PlannerGlobal @@ -107,14 +121,13 @@ typedef struct PlannerGlobal List *resultRelations; /* "flat" list of integer RT indexes */ - List *nonleafResultRelations; /* "flat" list of integer RT indexes */ List *rootResultRelations; /* "flat" list of integer RT indexes */ List *relationOids; /* OIDs of relations the plan depends on */ List *invalItems; /* other dependencies, as PlanInvalItems */ - int nParamExec; /* number of PARAM_EXEC Params used */ + List *paramExecTypes; /* type OIDs for PARAM_EXEC Params */ Index lastPHId; /* highest PlaceHolderVar ID assigned */ @@ -148,6 +161,8 @@ typedef struct PlannerGlobal * the passed-in Query data structure; someday that should stop. *---------- */ +struct AppendRelInfo; + typedef struct PlannerInfo { NodeTag type; @@ -187,6 +202,14 @@ typedef struct PlannerInfo */ RangeTblEntry **simple_rte_array; /* rangetable as an array */ + /* + * append_rel_array is the same length as the above arrays, and holds + * pointers to the corresponding AppendRelInfo entry indexed by + * child_relid, or NULL if none. The array itself is not allocated if + * append_rel_list is empty. + */ + struct AppendRelInfo **append_rel_array; + /* * all_baserels is a Relids set of all base relids (but not "other" * relids) in the query; that is, the Relids identifier of the final join @@ -251,8 +274,6 @@ typedef struct PlannerInfo List *append_rel_list; /* list of AppendRelInfos */ - List *pcinfo_list; /* list of PartitionedChildRelInfos */ - List *rowMarks; /* list of PlanRowMarks */ List *placeholder_list; /* list of PlaceHolderInfos */ @@ -266,6 +287,9 @@ typedef struct PlannerInfo List *distinct_pathkeys; /* distinctClause pathkeys, if any */ List *sort_pathkeys; /* sortClause pathkeys, if any */ + List *part_schemes; /* Canonicalised partition schemes used in the + * query. */ + List *initial_rels; /* RelOptInfos we are now trying to join */ /* Use fetch_upper_rel() to get any particular upper rel */ @@ -286,7 +310,8 @@ typedef struct PlannerInfo MemoryContext planner_cxt; /* context holding PlannerInfo */ - double total_table_pages; /* # of pages in all tables of query */ + double total_table_pages; /* # of pages in all non-dummy tables of + * query */ double tuple_fraction; /* tuple_fraction passed to query_planner */ double limit_tuples; /* limit_tuples passed to query_planner */ @@ -294,8 +319,9 @@ typedef struct PlannerInfo Index qual_security_level; /* minimum security_level for quals */ /* Note: qual_security_level is zero if there are no securityQuals */ - bool hasInheritedTarget; /* true if parse->resultRelation is an - * inheritance child rel */ + InheritanceKind inhTargetKind; /* indicates if the target relation is an + * inheritance child or partition or a + * partitioned table */ bool hasJoinRTEs; /* true if any RTEs are RTE_JOIN kind */ bool hasLateralRTEs; /* true if any RTEs are marked LATERAL */ bool hasDeletedRTEs; /* true if any RTE was deleted from jointree */ @@ -314,6 +340,9 @@ typedef struct PlannerInfo /* optional private data for join_search_hook, e.g., GEQO */ void *join_search_private; + + /* Does this query modify any partition key columns? */ + bool partColsUpdated; } PlannerInfo; @@ -326,6 +355,37 @@ typedef struct PlannerInfo ((root)->simple_rte_array ? (root)->simple_rte_array[rti] : \ rt_fetch(rti, (root)->parse->rtable)) +/* + * If multiple relations are partitioned the same way, all such partitions + * will have a pointer to the same PartitionScheme. A list of PartitionScheme + * objects is attached to the PlannerInfo. By design, the partition scheme + * incorporates only the general properties of the partition method (LIST vs. + * RANGE, number of partitioning columns and the type information for each) + * and not the specific bounds. + * + * We store the opclass-declared input data types instead of the partition key + * datatypes since the former rather than the latter are used to compare + * partition bounds. Since partition key data types and the opclass declared + * input data types are expected to be binary compatible (per ResolveOpClass), + * both of those should have same byval and length properties. + */ +typedef struct PartitionSchemeData +{ + char strategy; /* partition strategy */ + int16 partnatts; /* number of partition attributes */ + Oid *partopfamily; /* OIDs of operator families */ + Oid *partopcintype; /* OIDs of opclass declared input data types */ + Oid *partcollation; /* OIDs of partitioning collations */ + + /* Cached information about partition key data types. */ + int16 *parttyplen; + bool *parttypbyval; + + /* Cached information about partition comparison functions. */ + FmgrInfo *partsupfunc; +} PartitionSchemeData; + +typedef struct PartitionSchemeData *PartitionScheme; /*---------- * RelOptInfo @@ -360,6 +420,11 @@ typedef struct PlannerInfo * handling join alias Vars. Currently this is not needed because all join * alias Vars are expanded to non-aliased form during preprocess_expression. * + * We also have relations representing joins between child relations of + * different partitioned tables. These relations are not added to + * join_rel_level lists as they are not joined directly by the dynamic + * programming algorithm. + * * There is also a RelOptKind for "upper" relations, which are RelOptInfos * that describe post-scan/join processing steps, such as aggregation. * Many of the fields in these RelOptInfos are meaningless, but their Path @@ -456,7 +521,7 @@ typedef struct PlannerInfo * other rels for which we have tried and failed to prove * this one unique * - * The presence of the remaining fields depends on the restrictions + * The presence of the following fields depends on the restrictions * and joins that the relation participates in: * * baserestrictinfo - List of RestrictInfo nodes, containing info about @@ -487,6 +552,29 @@ typedef struct PlannerInfo * We store baserestrictcost in the RelOptInfo (for base relations) because * we know we will need it at least once (to price the sequential scan) * and may need it multiple times to price index scans. + * + * If the relation is partitioned, these fields will be set: + * + * part_scheme - Partitioning scheme of the relation + * nparts - Number of partitions + * boundinfo - Partition bounds + * partition_qual - Partition constraint if not the root + * part_rels - RelOptInfos for each partition + * partexprs, nullable_partexprs - Partition key expressions + * partitioned_child_rels - RT indexes of unpruned partitions of + * this relation that are partitioned tables + * themselves, in hierarchical order + * + * Note: A base relation always has only one set of partition keys, but a join + * relation may have as many sets of partition keys as the number of relations + * being joined. partexprs and nullable_partexprs are arrays containing + * part_scheme->partnatts elements each. Each of these elements is a list of + * partition key expressions. For a base relation each list in partexprs + * contains only one expression and nullable_partexprs is not populated. For a + * join relation, partexprs and nullable_partexprs contain partition key + * expressions from non-nullable and nullable relations resp. Lists at any + * given position in those arrays together contain as many elements as the + * number of joining relations. *---------- */ typedef enum RelOptKind @@ -494,7 +582,9 @@ typedef enum RelOptKind RELOPT_BASEREL, RELOPT_JOINREL, RELOPT_OTHER_MEMBER_REL, + RELOPT_OTHER_JOINREL, RELOPT_UPPER_REL, + RELOPT_OTHER_UPPER_REL, RELOPT_DEADREL } RelOptKind; @@ -507,13 +597,20 @@ typedef enum RelOptKind (rel)->reloptkind == RELOPT_OTHER_MEMBER_REL) /* Is the given relation a join relation? */ -#define IS_JOIN_REL(rel) ((rel)->reloptkind == RELOPT_JOINREL) +#define IS_JOIN_REL(rel) \ + ((rel)->reloptkind == RELOPT_JOINREL || \ + (rel)->reloptkind == RELOPT_OTHER_JOINREL) /* Is the given relation an upper relation? */ -#define IS_UPPER_REL(rel) ((rel)->reloptkind == RELOPT_UPPER_REL) +#define IS_UPPER_REL(rel) \ + ((rel)->reloptkind == RELOPT_UPPER_REL || \ + (rel)->reloptkind == RELOPT_OTHER_UPPER_REL) /* Is the given relation an "other" relation? */ -#define IS_OTHER_REL(rel) ((rel)->reloptkind == RELOPT_OTHER_MEMBER_REL) +#define IS_OTHER_REL(rel) \ + ((rel)->reloptkind == RELOPT_OTHER_MEMBER_REL || \ + (rel)->reloptkind == RELOPT_OTHER_JOINREL || \ + (rel)->reloptkind == RELOPT_OTHER_UPPER_REL) typedef struct RelOptInfo { @@ -590,19 +687,57 @@ typedef struct RelOptInfo * involving this rel */ bool has_eclass_joins; /* T means joininfo is incomplete */ - /* used by "other" relations */ - Relids top_parent_relids; /* Relids of topmost parents */ + /* used by partitionwise joins: */ + bool consider_partitionwise_join; /* consider partitionwise join + * paths? (if partitioned rel) */ + Relids top_parent_relids; /* Relids of topmost parents (if "other" + * rel) */ + + /* used for partitioned relations */ + PartitionScheme part_scheme; /* Partitioning scheme. */ + int nparts; /* number of partitions */ + struct PartitionBoundInfoData *boundinfo; /* Partition bounds */ + List *partition_qual; /* partition constraint */ + struct RelOptInfo **part_rels; /* Array of RelOptInfos of partitions, + * stored in the same order of bounds */ + List **partexprs; /* Non-nullable partition key expressions. */ + List **nullable_partexprs; /* Nullable partition key expressions. */ + List *partitioned_child_rels; /* List of RT indexes. */ } RelOptInfo; +/* + * Is given relation partitioned? + * + * It's not enough to test whether rel->part_scheme is set, because it might + * be that the basic partitioning properties of the input relations matched + * but the partition bounds did not. + * + * We treat dummy relations as unpartitioned. We could alternatively + * treat them as partitioned, but it's not clear whether that's a useful thing + * to do. + */ +#define IS_PARTITIONED_REL(rel) \ + ((rel)->part_scheme && (rel)->boundinfo && (rel)->nparts > 0 && \ + (rel)->part_rels && !(IS_DUMMY_REL(rel))) + +/* + * Convenience macro to make sure that a partitioned relation has all the + * required members set. + */ +#define REL_HAS_ALL_PART_PROPS(rel) \ + ((rel)->part_scheme && (rel)->boundinfo && (rel)->nparts > 0 && \ + (rel)->part_rels && (rel)->partexprs && (rel)->nullable_partexprs) + /* * IndexOptInfo * Per-index information for planning/optimization * - * indexkeys[], indexcollations[], opfamily[], and opcintype[] - * each have ncolumns entries. + * indexkeys[], indexcollations[] each have ncolumns entries. + * opfamily[], and opcintype[] each have nkeycolumns entries. They do + * not contain any information about included attributes. * - * sortopfamily[], reverse_sort[], and nulls_first[] likewise have - * ncolumns entries, if the index is ordered; but if it is unordered, + * sortopfamily[], reverse_sort[], and nulls_first[] have + * nkeycolumns entries, if the index is ordered; but if it is unordered, * those pointers are NULL. * * Zeroes in the indexkeys[] array indicate index columns that are @@ -639,7 +774,9 @@ typedef struct IndexOptInfo /* index descriptor information */ int ncolumns; /* number of columns in index */ - int *indexkeys; /* column numbers of index's keys, or 0 */ + int nkeycolumns; /* number of key columns in index */ + int *indexkeys; /* column numbers of index's attributes both + * key and included columns, or 0 */ Oid *indexcollations; /* OIDs of collations of index columns */ Oid *opfamily; /* OIDs of operator families for columns */ Oid *opcintype; /* OIDs of opclass declared input data types */ @@ -1167,6 +1304,9 @@ typedef struct CustomPath * AppendPath represents an Append plan, ie, successive execution of * several member plans. * + * For partial Append, 'subpaths' contains non-partial subpaths followed by + * partial subpaths. + * * Note: it is possible for "subpaths" to contain only one, or even no, * elements. These cases are optimized during create_append_plan. * In particular, an AppendPath with no subpaths is a "dummy" path that @@ -1178,6 +1318,9 @@ typedef struct AppendPath /* RT indexes of non-leaf tables in a partition tree */ List *partitioned_rels; List *subpaths; /* list of component Paths */ + + /* Index of first partial path in subpaths */ + int first_partial_path; } AppendPath; #define IS_DUMMY_PATH(p) \ @@ -1268,9 +1411,9 @@ typedef struct GatherPath } GatherPath; /* - * GatherMergePath runs several copies of a plan in parallel and - * collects the results. For gather merge parallel leader always execute the - * plan. + * GatherMergePath runs several copies of a plan in parallel and collects + * the results, preserving their common sort order. For gather merge, the + * parallel leader always executes the plan too, so we don't need single_copy. */ typedef struct GatherMergePath { @@ -1335,14 +1478,14 @@ typedef JoinPath NestPath; * mergejoin. If it is not NIL then it is a PathKeys list describing * the ordering that must be created by an explicit Sort node. * - * skip_mark_restore is TRUE if the executor need not do mark/restore calls. + * skip_mark_restore is true if the executor need not do mark/restore calls. * Mark/restore overhead is usually required, but can be skipped if we know * that the executor need find only one match per outer tuple, and that the * mergeclauses are sufficient to identify a match. In such cases the * executor can immediately advance the outer relation after processing a - * match, and therefoere it need never back up the inner relation. + * match, and therefore it need never back up the inner relation. * - * materialize_inner is TRUE if a Material node should be placed atop the + * materialize_inner is true if a Material node should be placed atop the * inner input. This may appear with or without an inner Sort step. */ @@ -1370,6 +1513,7 @@ typedef struct HashPath JoinPath jpath; List *path_hashclauses; /* join clauses used for hashing */ int num_batches; /* number of batches expected */ + double inner_rows_total; /* total inner rows expected */ } HashPath; /* @@ -1512,17 +1656,12 @@ typedef struct MinMaxAggPath /* * WindowAggPath represents generic computation of window functions - * - * Note: winpathkeys is separate from path.pathkeys because the actual sort - * order might be an extension of winpathkeys; but createplan.c needs to - * know exactly how many pathkeys match the window clause. */ typedef struct WindowAggPath { Path path; Path *subpath; /* path representing input source */ WindowClause *winclause; /* WindowClause we'll be using */ - List *winpathkeys; /* PathKeys for PARTITION keys + ORDER keys */ } WindowAggPath; /* @@ -1577,8 +1716,8 @@ typedef struct ModifyTablePath CmdType operation; /* INSERT, UPDATE, or DELETE */ bool canSetTag; /* do we set the command tag/es_processed? */ Index nominalRelation; /* Parent RT index for use of EXPLAIN */ - /* RT indexes of non-leaf tables in a partition tree */ - List *partitioned_rels; + Index rootRelation; /* Root RT index, if target is partitioned */ + bool partColsUpdated; /* some part key in hierarchy updated */ List *resultRelations; /* integer list of RT indexes */ List *subpaths; /* Path(s) producing source data */ List *subroots; /* per-target-table PlannerInfos */ @@ -1669,7 +1808,8 @@ typedef struct LimitPath * if we decide that it can be pushed down into the nullable side of the join. * In that case it acts as a plain filter qual for wherever it gets evaluated. * (In short, is_pushed_down is only false for non-degenerate outer join - * conditions. Possibly we should rename it to reflect that meaning?) + * conditions. Possibly we should rename it to reflect that meaning? But + * see also the comments for RINFO_IS_PUSHED_DOWN, below.) * * RestrictInfo nodes also contain an outerjoin_delayed flag, which is true * if the clause's applicability must be delayed due to any outer joins @@ -1746,15 +1886,15 @@ typedef struct RestrictInfo Expr *clause; /* the represented clause of WHERE or JOIN */ - bool is_pushed_down; /* TRUE if clause was pushed down in level */ + bool is_pushed_down; /* true if clause was pushed down in level */ - bool outerjoin_delayed; /* TRUE if delayed by lower outer join */ + bool outerjoin_delayed; /* true if delayed by lower outer join */ bool can_join; /* see comment above */ bool pseudoconstant; /* see comment above */ - bool leakproof; /* TRUE if known to contain no leaked Vars */ + bool leakproof; /* true if known to contain no leaked Vars */ Index security_level; /* see comment above */ @@ -1811,6 +1951,20 @@ typedef struct RestrictInfo Selectivity right_mcvfreq; /* right side's most common val's freq */ } RestrictInfo; +/* + * This macro embodies the correct way to test whether a RestrictInfo is + * "pushed down" to a given outer join, that is, should be treated as a filter + * clause rather than a join clause at that outer join. This is certainly so + * if is_pushed_down is true; but examining that is not sufficient anymore, + * because outer-join clauses will get pushed down to lower outer joins when + * we generate a path for the lower outer join that is parameterized by the + * LHS of the upper one. We can detect such a clause by noting that its + * required_relids exceed the scope of the join. + */ +#define RINFO_IS_PUSHED_DOWN(rinfo, joinrelids) \ + ((rinfo)->is_pushed_down || \ + !bms_is_subset((rinfo)->required_relids, joinrelids)) + /* * Since mergejoinscansel() is a relatively expensive function, and would * otherwise be invoked many times while planning a large join tree, @@ -1885,7 +2039,7 @@ typedef struct PlaceHolderVar * syntactically below this special join. (These are needed to help compute * min_lefthand and min_righthand for higher joins.) * - * delay_upper_joins is set TRUE if we detect a pushed-down clause that has + * delay_upper_joins is set true if we detect a pushed-down clause that has * to be evaluated after this join is formed (because it references the RHS). * Any outer joins that have such a clause and this join in their RHS cannot * commute with this join, because that would leave noplace to check the @@ -1935,10 +2089,10 @@ typedef struct SpecialJoinInfo * * When we expand an inheritable table or a UNION-ALL subselect into an * "append relation" (essentially, a list of child RTEs), we build an - * AppendRelInfo for each non-partitioned child RTE. The list of - * AppendRelInfos indicates which child RTEs must be included when expanding - * the parent, and each node carries information needed to translate Vars - * referencing the parent into Vars referencing that child. + * AppendRelInfo for each child RTE. The list of AppendRelInfos indicates + * which child RTEs must be included when expanding the parent, and each node + * carries information needed to translate Vars referencing the parent into + * Vars referencing that child. * * These structs are kept in the PlannerInfo node's append_rel_list. * Note that we just throw all the structs into one list, and scan the @@ -2012,25 +2166,6 @@ typedef struct AppendRelInfo Oid parent_reloid; /* OID of parent relation */ } AppendRelInfo; -/* - * For a partitioned table, this maps its RT index to the list of RT indexes - * of the partitioned child tables in the partition tree. We need to - * separately store this information, because we do not create AppendRelInfos - * for the partitioned child tables of a parent table, since AppendRelInfos - * contain information that is unnecessary for the partitioned child tables. - * The child_rels list must contain at least one element, because the parent - * partitioned table is itself counted as a child. - * - * These structs are kept in the PlannerInfo node's pcinfo_list. - */ -typedef struct PartitionedChildRelInfo -{ - NodeTag type; - - Index parent_relid; - List *child_rels; -} PartitionedChildRelInfo; - /* * For each distinct placeholder expression generated during planning, we * store a PlaceHolderInfo node in the PlannerInfo node's placeholder_list. @@ -2131,8 +2266,8 @@ typedef struct MinMaxAggInfo * from subplans (values that are setParam items for those subplans). These * IDs need not be tracked via PlannerParamItems, since we do not need any * duplicate-elimination nor later processing of the represented expressions. - * Instead, we just record the assignment of the slot number by incrementing - * root->glob->nParamExec. + * Instead, we just record the assignment of the slot number by appending to + * root->glob->paramExecTypes. */ typedef struct PlannerParamItem { @@ -2187,6 +2322,71 @@ typedef struct JoinPathExtraData Relids param_source_rels; } JoinPathExtraData; +/* + * Various flags indicating what kinds of grouping are possible. + * + * GROUPING_CAN_USE_SORT should be set if it's possible to perform + * sort-based implementations of grouping. When grouping sets are in use, + * this will be true if sorting is potentially usable for any of the grouping + * sets, even if it's not usable for all of them. + * + * GROUPING_CAN_USE_HASH should be set if it's possible to perform + * hash-based implementations of grouping. + * + * GROUPING_CAN_PARTIAL_AGG should be set if the aggregation is of a type + * for which we support partial aggregation (not, for example, grouping sets). + * It says nothing about parallel-safety or the availability of suitable paths. + */ +#define GROUPING_CAN_USE_SORT 0x0001 +#define GROUPING_CAN_USE_HASH 0x0002 +#define GROUPING_CAN_PARTIAL_AGG 0x0004 + +/* + * What kind of partitionwise aggregation is in use? + * + * PARTITIONWISE_AGGREGATE_NONE: Not used. + * + * PARTITIONWISE_AGGREGATE_FULL: Aggregate each partition separately, and + * append the results. + * + * PARTITIONWISE_AGGREGATE_PARTIAL: Partially aggregate each partition + * separately, append the results, and then finalize aggregation. + */ +typedef enum +{ + PARTITIONWISE_AGGREGATE_NONE, + PARTITIONWISE_AGGREGATE_FULL, + PARTITIONWISE_AGGREGATE_PARTIAL +} PartitionwiseAggregateType; + +/* + * Struct for extra information passed to subroutines of create_grouping_paths + * + * flags indicating what kinds of grouping are possible. + * partial_costs_set is true if the agg_partial_costs and agg_final_costs + * have been initialized. + * agg_partial_costs gives partial aggregation costs. + * agg_final_costs gives finalization costs. + * target_parallel_safe is true if target is parallel safe. + * havingQual gives list of quals to be applied after aggregation. + * targetList gives list of columns to be projected. + * patype is the type of partitionwise aggregation that is being performed. + */ +typedef struct +{ + /* Data which remains constant once set. */ + int flags; + bool partial_costs_set; + AggClauseCosts agg_partial_costs; + AggClauseCosts agg_final_costs; + + /* Data which may differ across partitions. */ + bool target_parallel_safe; + Node *havingQual; + List *targetList; + PartitionwiseAggregateType patype; +} GroupPathExtraData; + /* * For speed reasons, cost estimation for join paths is performed in two * phases: the first phase tries to quickly derive a lower bound for the @@ -2221,6 +2421,7 @@ typedef struct JoinCostWorkspace /* private for cost_hashjoin code */ int numbuckets; int numbatches; + double inner_rows_total; } JoinCostWorkspace; #endif /* RELATION_H */ diff --git a/src/include/nodes/replnodes.h b/src/include/nodes/replnodes.h index dea61e90e9..66c948d85e 100644 --- a/src/include/nodes/replnodes.h +++ b/src/include/nodes/replnodes.h @@ -4,7 +4,7 @@ * definitions for replication grammar parse nodes * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/nodes/replnodes.h @@ -68,6 +68,7 @@ typedef struct DropReplicationSlotCmd { NodeTag type; char *slotname; + bool wait; } DropReplicationSlotCmd; diff --git a/src/include/nodes/tidbitmap.h b/src/include/nodes/tidbitmap.h index f9a1902da8..31532e9769 100644 --- a/src/include/nodes/tidbitmap.h +++ b/src/include/nodes/tidbitmap.h @@ -13,7 +13,7 @@ * fact that a particular page needs to be visited. * * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * src/include/nodes/tidbitmap.h * @@ -70,5 +70,6 @@ extern void tbm_end_iterate(TBMIterator *iterator); extern void tbm_end_shared_iterate(TBMSharedIterator *iterator); extern TBMSharedIterator *tbm_attach_shared_iterate(dsa_area *dsa, dsa_pointer dp); +extern long tbm_calculate_entries(double maxbytes); #endif /* TIDBITMAP_H */ diff --git a/src/include/nodes/value.h b/src/include/nodes/value.h index 83f5a19fe9..1665714515 100644 --- a/src/include/nodes/value.h +++ b/src/include/nodes/value.h @@ -4,7 +4,7 @@ * interface for Value nodes * * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * src/include/nodes/value.h * @@ -34,7 +34,7 @@ * better to use the more general representation.) * * Note that an integer-looking string will get lexed as T_Float if - * the value is too large to fit in a 'long'. + * the value is too large to fit in an 'int'. * * Nulls, of course, don't need the value part at all. *---------------------- @@ -44,7 +44,7 @@ typedef struct Value NodeTag type; /* tag appropriately (eg. T_String) */ union ValUnion { - long ival; /* machine integer */ + int ival; /* machine integer */ char *str; /* string */ } val; } Value; @@ -53,7 +53,7 @@ typedef struct Value #define floatVal(v) atof(((Value *)(v))->val.str) #define strVal(v) (((Value *)(v))->val.str) -extern Value *makeInteger(long i); +extern Value *makeInteger(int i); extern Value *makeFloat(char *numericStr); extern Value *makeString(char *str); extern Value *makeBitString(char *str); diff --git a/src/include/optimizer/clauses.h b/src/include/optimizer/clauses.h index e3672218f3..ed854fdd40 100644 --- a/src/include/optimizer/clauses.h +++ b/src/include/optimizer/clauses.h @@ -4,7 +4,7 @@ * prototypes for clauses.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/clauses.h @@ -14,9 +14,9 @@ #ifndef CLAUSES_H #define CLAUSES_H +#include "access/htup.h" #include "nodes/relation.h" - #define is_opclause(clause) ((clause) != NULL && IsA(clause, OpExpr)) #define is_funcclause(clause) ((clause) != NULL && IsA(clause, FuncExpr)) @@ -85,4 +85,7 @@ extern Node *estimate_expression_value(PlannerInfo *root, Node *node); extern Query *inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte); +extern List *expand_function_arguments(List *args, Oid result_type, + HeapTuple func_tuple); + #endif /* CLAUSES_H */ diff --git a/src/include/optimizer/cost.h b/src/include/optimizer/cost.h index 63feba06e7..77ca7ff837 100644 --- a/src/include/optimizer/cost.h +++ b/src/include/optimizer/cost.h @@ -4,7 +4,7 @@ * prototypes for costsize.c and clausesel.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/cost.h @@ -53,21 +53,26 @@ extern PGDLLIMPORT double cpu_operator_cost; extern PGDLLIMPORT double parallel_tuple_cost; extern PGDLLIMPORT double parallel_setup_cost; extern PGDLLIMPORT int effective_cache_size; -extern Cost disable_cost; -extern int max_parallel_workers_per_gather; -extern bool enable_seqscan; -extern bool enable_indexscan; -extern bool enable_indexonlyscan; -extern bool enable_bitmapscan; -extern bool enable_tidscan; -extern bool enable_sort; -extern bool enable_hashagg; -extern bool enable_nestloop; -extern bool enable_material; -extern bool enable_mergejoin; -extern bool enable_hashjoin; -extern bool enable_gathermerge; -extern int constraint_exclusion; +extern PGDLLIMPORT Cost disable_cost; +extern PGDLLIMPORT int max_parallel_workers_per_gather; +extern PGDLLIMPORT bool enable_seqscan; +extern PGDLLIMPORT bool enable_indexscan; +extern PGDLLIMPORT bool enable_indexonlyscan; +extern PGDLLIMPORT bool enable_bitmapscan; +extern PGDLLIMPORT bool enable_tidscan; +extern PGDLLIMPORT bool enable_sort; +extern PGDLLIMPORT bool enable_hashagg; +extern PGDLLIMPORT bool enable_nestloop; +extern PGDLLIMPORT bool enable_material; +extern PGDLLIMPORT bool enable_mergejoin; +extern PGDLLIMPORT bool enable_hashjoin; +extern PGDLLIMPORT bool enable_gathermerge; +extern PGDLLIMPORT bool enable_partitionwise_join; +extern PGDLLIMPORT bool enable_partitionwise_aggregate; +extern PGDLLIMPORT bool enable_parallel_append; +extern PGDLLIMPORT bool enable_parallel_hash; +extern PGDLLIMPORT bool enable_partition_pruning; +extern PGDLLIMPORT int constraint_exclusion; extern double clamp_row_est(double nrows); extern double index_pages_fetched(double tuples_fetched, BlockNumber pages, @@ -105,6 +110,7 @@ extern void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples); +extern void cost_append(AppendPath *path); extern void cost_merge_append(Path *path, PlannerInfo *root, List *pathkeys, int n_streams, Cost input_startup_cost, Cost input_total_cost, @@ -115,6 +121,7 @@ extern void cost_material(Path *path, extern void cost_agg(Path *path, PlannerInfo *root, AggStrategy aggstrategy, const AggClauseCosts *aggcosts, int numGroupCols, double numGroups, + List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples); extern void cost_windowagg(Path *path, PlannerInfo *root, @@ -123,6 +130,7 @@ extern void cost_windowagg(Path *path, PlannerInfo *root, double input_tuples); extern void cost_group(Path *path, PlannerInfo *root, int numGroupCols, double numGroups, + List *quals, Cost input_startup_cost, Cost input_total_cost, double input_tuples); extern void initial_cost_nestloop(PlannerInfo *root, @@ -148,7 +156,8 @@ extern void initial_cost_hashjoin(PlannerInfo *root, JoinType jointype, List *hashclauses, Path *outer_path, Path *inner_path, - JoinPathExtraData *extra); + JoinPathExtraData *extra, + bool parallel_hash); extern void final_cost_hashjoin(PlannerInfo *root, HashPath *path, JoinCostWorkspace *workspace, JoinPathExtraData *extra); @@ -158,6 +167,7 @@ extern void cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan); extern void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root); extern void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root); extern void compute_semi_anti_join_factors(PlannerInfo *root, + RelOptInfo *joinrel, RelOptInfo *outerrel, RelOptInfo *innerrel, JoinType jointype, diff --git a/src/include/optimizer/geqo.h b/src/include/optimizer/geqo.h index d0158d7adf..4ae4b6374a 100644 --- a/src/include/optimizer/geqo.h +++ b/src/include/optimizer/geqo.h @@ -3,7 +3,7 @@ * geqo.h * prototypes for various files in optimizer/geqo * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/geqo.h diff --git a/src/include/optimizer/geqo_copy.h b/src/include/optimizer/geqo_copy.h index 4035b4ff13..f70786bef4 100644 --- a/src/include/optimizer/geqo_copy.h +++ b/src/include/optimizer/geqo_copy.h @@ -3,7 +3,7 @@ * geqo_copy.h * prototypes for copy functions in optimizer/geqo * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/geqo_copy.h diff --git a/src/include/optimizer/geqo_gene.h b/src/include/optimizer/geqo_gene.h index 3282193284..3ddd268449 100644 --- a/src/include/optimizer/geqo_gene.h +++ b/src/include/optimizer/geqo_gene.h @@ -3,7 +3,7 @@ * geqo_gene.h * genome representation in optimizer/geqo * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/geqo_gene.h diff --git a/src/include/optimizer/geqo_misc.h b/src/include/optimizer/geqo_misc.h index 89f91b3a4d..26a3669006 100644 --- a/src/include/optimizer/geqo_misc.h +++ b/src/include/optimizer/geqo_misc.h @@ -3,7 +3,7 @@ * geqo_misc.h * prototypes for printout routines in optimizer/geqo * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/geqo_misc.h diff --git a/src/include/optimizer/geqo_mutation.h b/src/include/optimizer/geqo_mutation.h index c5a94e03cf..c9a0025523 100644 --- a/src/include/optimizer/geqo_mutation.h +++ b/src/include/optimizer/geqo_mutation.h @@ -3,7 +3,7 @@ * geqo_mutation.h * prototypes for mutation functions in optimizer/geqo * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/geqo_mutation.h diff --git a/src/include/optimizer/geqo_pool.h b/src/include/optimizer/geqo_pool.h index 9fac307d69..eb343412f8 100644 --- a/src/include/optimizer/geqo_pool.h +++ b/src/include/optimizer/geqo_pool.h @@ -3,7 +3,7 @@ * geqo_pool.h * pool representation in optimizer/geqo * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/geqo_pool.h diff --git a/src/include/optimizer/geqo_random.h b/src/include/optimizer/geqo_random.h index 2665a096b3..03bd0ae8eb 100644 --- a/src/include/optimizer/geqo_random.h +++ b/src/include/optimizer/geqo_random.h @@ -3,7 +3,7 @@ * geqo_random.h * random number generator * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/geqo_random.h diff --git a/src/include/optimizer/geqo_recombination.h b/src/include/optimizer/geqo_recombination.h index 60286c6c27..3ca89d8091 100644 --- a/src/include/optimizer/geqo_recombination.h +++ b/src/include/optimizer/geqo_recombination.h @@ -3,7 +3,7 @@ * geqo_recombination.h * prototypes for recombination in the genetic query optimizer * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/geqo_recombination.h diff --git a/src/include/optimizer/geqo_selection.h b/src/include/optimizer/geqo_selection.h index 69ce2b4b8a..d6bea3c2bd 100644 --- a/src/include/optimizer/geqo_selection.h +++ b/src/include/optimizer/geqo_selection.h @@ -3,7 +3,7 @@ * geqo_selection.h * prototypes for selection routines in optimizer/geqo * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/geqo_selection.h diff --git a/src/include/optimizer/joininfo.h b/src/include/optimizer/joininfo.h index 4450d81408..48f6d625e2 100644 --- a/src/include/optimizer/joininfo.h +++ b/src/include/optimizer/joininfo.h @@ -4,7 +4,7 @@ * prototypes for joininfo.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/joininfo.h diff --git a/src/include/optimizer/orclauses.h b/src/include/optimizer/orclauses.h index a3d8e1d9f9..2154e66746 100644 --- a/src/include/optimizer/orclauses.h +++ b/src/include/optimizer/orclauses.h @@ -4,7 +4,7 @@ * prototypes for orclauses.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/orclauses.h diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h index e372f8862b..81abcf53a8 100644 --- a/src/include/optimizer/pathnode.h +++ b/src/include/optimizer/pathnode.h @@ -4,7 +4,7 @@ * prototypes for pathnode.c, relnode.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/pathnode.h @@ -14,6 +14,7 @@ #ifndef PATHNODE_H #define PATHNODE_H +#include "nodes/bitmapset.h" #include "nodes/relation.h" @@ -63,9 +64,11 @@ extern BitmapOrPath *create_bitmap_or_path(PlannerInfo *root, List *bitmapquals); extern TidPath *create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals, Relids required_outer); -extern AppendPath *create_append_path(RelOptInfo *rel, List *subpaths, - Relids required_outer, int parallel_workers, - List *partitioned_rels); +extern AppendPath *create_append_path(PlannerInfo *root, RelOptInfo *rel, + List *subpaths, List *partial_subpaths, + Relids required_outer, + int parallel_workers, bool parallel_aware, + List *partitioned_rels, double rows); extern MergeAppendPath *create_merge_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, @@ -150,6 +153,7 @@ extern HashPath *create_hashjoin_path(PlannerInfo *root, JoinPathExtraData *extra, Path *outer_path, Path *inner_path, + bool parallel_hash, List *restrict_clauses, Relids required_outer, List *hashclauses); @@ -174,7 +178,6 @@ extern SortPath *create_sort_path(PlannerInfo *root, extern GroupPath *create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, - PathTarget *target, List *groupClause, List *qual, double numGroups); @@ -196,7 +199,6 @@ extern AggPath *create_agg_path(PlannerInfo *root, extern GroupingSetsPath *create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, - PathTarget *target, List *having_qual, AggStrategy aggstrategy, List *rollups, @@ -212,8 +214,7 @@ extern WindowAggPath *create_windowagg_path(PlannerInfo *root, Path *subpath, PathTarget *target, List *windowFuncs, - WindowClause *winclause, - List *winpathkeys); + WindowClause *winclause); extern SetOpPath *create_setop_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, @@ -237,7 +238,8 @@ extern LockRowsPath *create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, extern ModifyTablePath *create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, CmdType operation, bool canSetTag, - Index nominalRelation, List *partitioned_rels, + Index nominalRelation, Index rootRelation, + bool partColsUpdated, List *resultRelations, List *subpaths, List *subroots, List *withCheckOptionLists, List *returningLists, @@ -251,11 +253,14 @@ extern LimitPath *create_limit_path(PlannerInfo *root, RelOptInfo *rel, extern Path *reparameterize_path(PlannerInfo *root, Path *path, Relids required_outer, double loop_count); +extern Path *reparameterize_path_by_child(PlannerInfo *root, Path *path, + RelOptInfo *child_rel); /* * prototypes for relnode.c */ extern void setup_simple_rel_arrays(PlannerInfo *root); +extern void setup_append_rel_array(PlannerInfo *root); extern RelOptInfo *build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent); extern RelOptInfo *find_base_rel(PlannerInfo *root, int relid); @@ -273,8 +278,6 @@ extern Relids min_join_parameterization(PlannerInfo *root, extern RelOptInfo *build_empty_join_rel(PlannerInfo *root); extern RelOptInfo *fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids); -extern AppendRelInfo *find_childrel_appendrelinfo(PlannerInfo *root, - RelOptInfo *rel); extern Relids find_childrel_parents(PlannerInfo *root, RelOptInfo *rel); extern ParamPathInfo *get_baserel_parampathinfo(PlannerInfo *root, RelOptInfo *baserel, @@ -290,5 +293,9 @@ extern ParamPathInfo *get_appendrel_parampathinfo(RelOptInfo *appendrel, Relids required_outer); extern ParamPathInfo *find_param_path_info(RelOptInfo *rel, Relids required_outer); +extern RelOptInfo *build_child_join_rel(PlannerInfo *root, + RelOptInfo *outer_rel, RelOptInfo *inner_rel, + RelOptInfo *parent_joinrel, List *restrictlist, + SpecialJoinInfo *sjinfo, JoinType jointype); #endif /* PATHNODE_H */ diff --git a/src/include/optimizer/paths.h b/src/include/optimizer/paths.h index 4e06b2e299..cafde307ad 100644 --- a/src/include/optimizer/paths.h +++ b/src/include/optimizer/paths.h @@ -4,7 +4,7 @@ * prototypes for various files in optimizer/path * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/paths.h @@ -20,10 +20,10 @@ /* * allpaths.c */ -extern bool enable_geqo; -extern int geqo_threshold; -extern int min_parallel_table_scan_size; -extern int min_parallel_index_scan_size; +extern PGDLLIMPORT bool enable_geqo; +extern PGDLLIMPORT int geqo_threshold; +extern PGDLLIMPORT int min_parallel_table_scan_size; +extern PGDLLIMPORT int min_parallel_index_scan_size; /* Hook for plugins to get control in set_rel_pathlist() */ typedef void (*set_rel_pathlist_hook_type) (PlannerInfo *root, @@ -53,11 +53,14 @@ extern void set_dummy_rel_pathlist(RelOptInfo *rel); extern RelOptInfo *standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels); -extern void generate_gather_paths(PlannerInfo *root, RelOptInfo *rel); +extern void generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, + bool override_rows); extern int compute_parallel_worker(RelOptInfo *rel, double heap_pages, - double index_pages); + double index_pages, int max_workers); extern void create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel, Path *bitmapqual); +extern void generate_partitionwise_join_paths(PlannerInfo *root, + RelOptInfo *rel); #ifdef OPTIMIZER_DEBUG extern void debug_print_rel(PlannerInfo *root, RelOptInfo *rel); @@ -111,6 +114,10 @@ extern bool have_join_order_restriction(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2); extern bool have_dangerous_phv(PlannerInfo *root, Relids outer_relids, Relids inner_params); +extern void mark_dummy_rel(RelOptInfo *rel); +extern bool have_partkey_equi_join(RelOptInfo *joinrel, + RelOptInfo *rel1, RelOptInfo *rel2, + JoinType jointype, List *restrictlist); /* * equivclass.c @@ -122,7 +129,8 @@ typedef bool (*ec_matches_callback_type) (PlannerInfo *root, EquivalenceMember *em, void *arg); -extern bool process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo, +extern bool process_equivalence(PlannerInfo *root, + RestrictInfo **p_restrictinfo, bool below_outer_join); extern Expr *canonicalize_ec_expression(Expr *expr, Oid req_type, Oid req_collation); @@ -210,16 +218,18 @@ extern void initialize_mergeclause_eclasses(PlannerInfo *root, RestrictInfo *restrictinfo); extern void update_mergeclause_eclasses(PlannerInfo *root, RestrictInfo *restrictinfo); -extern List *find_mergeclauses_for_pathkeys(PlannerInfo *root, - List *pathkeys, - bool outer_keys, - List *restrictinfos); +extern List *find_mergeclauses_for_outer_pathkeys(PlannerInfo *root, + List *pathkeys, + List *restrictinfos); extern List *select_outer_pathkeys_for_merge(PlannerInfo *root, List *mergeclauses, RelOptInfo *joinrel); extern List *make_inner_pathkeys_for_merge(PlannerInfo *root, List *mergeclauses, List *outer_pathkeys); +extern List *trim_mergeclauses_for_inner_pathkeys(PlannerInfo *root, + List *mergeclauses, + List *pathkeys); extern List *truncate_useless_pathkeys(PlannerInfo *root, RelOptInfo *rel, List *pathkeys); @@ -227,5 +237,7 @@ extern bool has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel); extern PathKey *make_canonical_pathkey(PlannerInfo *root, EquivalenceClass *eclass, Oid opfamily, int strategy, bool nulls_first); +extern void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, + List *live_childrels); #endif /* PATHS_H */ diff --git a/src/include/optimizer/placeholder.h b/src/include/optimizer/placeholder.h index 5a4d46ba9d..91ebdb90fc 100644 --- a/src/include/optimizer/placeholder.h +++ b/src/include/optimizer/placeholder.h @@ -4,7 +4,7 @@ * prototypes for optimizer/util/placeholder.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/placeholder.h @@ -28,5 +28,7 @@ extern void fix_placeholder_input_needed_levels(PlannerInfo *root); extern void add_placeholders_to_base_rels(PlannerInfo *root); extern void add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outer_rel, RelOptInfo *inner_rel); +extern void add_placeholders_to_child_joinrel(PlannerInfo *root, + RelOptInfo *childrel, RelOptInfo *parentrel); #endif /* PLACEHOLDER_H */ diff --git a/src/include/optimizer/plancat.h b/src/include/optimizer/plancat.h index 71f0faf938..7d53cbbb87 100644 --- a/src/include/optimizer/plancat.h +++ b/src/include/optimizer/plancat.h @@ -4,7 +4,7 @@ * prototypes for plancat.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/plancat.h diff --git a/src/include/optimizer/planmain.h b/src/include/optimizer/planmain.h index f1d16cffab..c8ab0280d2 100644 --- a/src/include/optimizer/planmain.h +++ b/src/include/optimizer/planmain.h @@ -4,7 +4,7 @@ * prototypes for various files in optimizer/plan * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/planmain.h @@ -29,6 +29,7 @@ typedef enum #define DEFAULT_CURSOR_TUPLE_FRACTION 0.1 extern double cursor_tuple_fraction; extern int force_parallel_mode; +extern bool parallel_leader_participation; /* query_planner callback to compute query_pathkeys */ typedef void (*query_pathkeys_callback) (PlannerInfo *root, void *extra); @@ -107,7 +108,7 @@ extern void reduce_unique_semijoins(PlannerInfo *root); extern bool query_supports_distinctness(Query *query); extern bool query_is_distinct_for(Query *query, List *colnos, List *opids); extern bool innerrel_is_unique(PlannerInfo *root, - Relids outerrelids, RelOptInfo *innerrel, + Relids joinrelids, Relids outerrelids, RelOptInfo *innerrel, JoinType jointype, List *restrictlist, bool force_cache); /* diff --git a/src/include/optimizer/planner.h b/src/include/optimizer/planner.h index 2a4cf71e10..3e733b34ed 100644 --- a/src/include/optimizer/planner.h +++ b/src/include/optimizer/planner.h @@ -4,7 +4,7 @@ * prototypes for planner.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/planner.h @@ -28,7 +28,8 @@ extern PGDLLIMPORT planner_hook_type planner_hook; typedef void (*create_upper_paths_hook_type) (PlannerInfo *root, UpperRelationKind stage, RelOptInfo *input_rel, - RelOptInfo *output_rel); + RelOptInfo *output_rel, + void *extra); extern PGDLLIMPORT create_upper_paths_hook_type create_upper_paths_hook; @@ -46,6 +47,8 @@ extern bool is_dummy_plan(Plan *plan); extern RowMarkType select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength); +extern bool limit_needed(Query *parse); + extern void mark_partial_aggref(Aggref *agg, AggSplit aggsplit); extern Path *get_cheapest_fractional_path(RelOptInfo *rel, @@ -56,7 +59,6 @@ extern Expr *expression_planner(Expr *expr); extern Expr *preprocess_phv_expression(PlannerInfo *root, Expr *expr); extern bool plan_cluster_use_sort(Oid tableOid, Oid indexOid); - -extern List *get_partitioned_child_rels(PlannerInfo *root, Index rti); +extern int plan_create_index_workers(Oid tableOid, Oid indexOid); #endif /* PLANNER_H */ diff --git a/src/include/optimizer/predtest.h b/src/include/optimizer/predtest.h index bccb8d63a9..69d87ea5c5 100644 --- a/src/include/optimizer/predtest.h +++ b/src/include/optimizer/predtest.h @@ -4,7 +4,7 @@ * prototypes for predtest.c * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/predtest.h @@ -18,8 +18,8 @@ extern bool predicate_implied_by(List *predicate_list, List *clause_list, - bool clause_is_check); + bool weak); extern bool predicate_refuted_by(List *predicate_list, List *clause_list, - bool clause_is_check); + bool weak); #endif /* PREDTEST_H */ diff --git a/src/include/optimizer/prep.h b/src/include/optimizer/prep.h index 4be0afd566..38608770a2 100644 --- a/src/include/optimizer/prep.h +++ b/src/include/optimizer/prep.h @@ -4,7 +4,7 @@ * prototypes for files in optimizer/prep/ * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/prep.h @@ -33,15 +33,12 @@ extern Relids get_relids_for_join(PlannerInfo *root, int joinrelid); * prototypes for prepqual.c */ extern Node *negate_clause(Node *node); -extern Expr *canonicalize_qual(Expr *qual); +extern Expr *canonicalize_qual(Expr *qual, bool is_check); /* * prototypes for preptlist.c */ -extern List *preprocess_targetlist(PlannerInfo *root, List *tlist); - -extern List *preprocess_onconflict_targetlist(List *tlist, - int result_relation, List *range_table); +extern List *preprocess_targetlist(PlannerInfo *root); extern PlanRowMark *get_plan_rowmark(List *rowmarks, Index rtindex); @@ -62,4 +59,10 @@ extern Node *adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, extern AppendRelInfo **find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos); +extern SpecialJoinInfo *build_child_join_sjinfo(PlannerInfo *root, + SpecialJoinInfo *parent_sjinfo, + Relids left_relids, Relids right_relids); +extern Relids adjust_child_relids_multilevel(PlannerInfo *root, Relids relids, + Relids child_relids, Relids top_parent_relids); + #endif /* PREP_H */ diff --git a/src/include/optimizer/restrictinfo.h b/src/include/optimizer/restrictinfo.h index b2a69998fd..a734d798c1 100644 --- a/src/include/optimizer/restrictinfo.h +++ b/src/include/optimizer/restrictinfo.h @@ -4,7 +4,7 @@ * prototypes for restrictinfo.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/restrictinfo.h @@ -36,6 +36,7 @@ extern List *get_actual_clauses(List *restrictinfo_list); extern List *extract_actual_clauses(List *restrictinfo_list, bool pseudoconstant); extern void extract_actual_join_clauses(List *restrictinfo_list, + Relids joinrelids, List **joinquals, List **otherquals); extern bool join_clause_is_movable_to(RestrictInfo *rinfo, RelOptInfo *baserel); diff --git a/src/include/optimizer/subselect.h b/src/include/optimizer/subselect.h index ecd2011d54..d28c993b3a 100644 --- a/src/include/optimizer/subselect.h +++ b/src/include/optimizer/subselect.h @@ -2,7 +2,7 @@ * * subselect.h * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/subselect.h diff --git a/src/include/optimizer/tlist.h b/src/include/optimizer/tlist.h index 0d3ec920dd..9fa52e1278 100644 --- a/src/include/optimizer/tlist.h +++ b/src/include/optimizer/tlist.h @@ -4,7 +4,7 @@ * prototypes for tlist.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/tlist.h diff --git a/src/include/optimizer/var.h b/src/include/optimizer/var.h index 61861528af..43c53b5344 100644 --- a/src/include/optimizer/var.h +++ b/src/include/optimizer/var.h @@ -4,7 +4,7 @@ * prototypes for optimizer/util/var.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/optimizer/var.h diff --git a/src/include/parser/analyze.h b/src/include/parser/analyze.h index 40f22339f1..7b5b90c4b3 100644 --- a/src/include/parser/analyze.h +++ b/src/include/parser/analyze.h @@ -4,7 +4,7 @@ * parse analysis for optimizable statements * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/analyze.h @@ -43,4 +43,7 @@ extern void applyLockingClause(Query *qry, Index rtindex, LockClauseStrength strength, LockWaitPolicy waitPolicy, bool pushedDown); +extern List *BuildOnConflictExcludedTargetlist(Relation targetrel, + Index exclRelIndex); + #endif /* ANALYZE_H */ diff --git a/src/include/parser/gramparse.h b/src/include/parser/gramparse.h index b6b67fb92c..42e7edee6d 100644 --- a/src/include/parser/gramparse.h +++ b/src/include/parser/gramparse.h @@ -8,7 +8,7 @@ * Definitions that are needed outside the core parser should be in parser.h. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/gramparse.h diff --git a/src/include/parser/kwlist.h b/src/include/parser/kwlist.h index f50e45e886..23db40147b 100644 --- a/src/include/parser/kwlist.h +++ b/src/include/parser/kwlist.h @@ -7,7 +7,7 @@ * by the PG_KEYWORD macro, which is not defined in this file; it can * be defined by the caller for special purposes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -63,6 +63,7 @@ PG_KEYWORD("boolean", BOOLEAN_P, COL_NAME_KEYWORD) PG_KEYWORD("both", BOTH, RESERVED_KEYWORD) PG_KEYWORD("by", BY, UNRESERVED_KEYWORD) PG_KEYWORD("cache", CACHE, UNRESERVED_KEYWORD) +PG_KEYWORD("call", CALL, UNRESERVED_KEYWORD) PG_KEYWORD("called", CALLED, UNRESERVED_KEYWORD) PG_KEYWORD("cascade", CASCADE, UNRESERVED_KEYWORD) PG_KEYWORD("cascaded", CASCADED, UNRESERVED_KEYWORD) @@ -181,6 +182,7 @@ PG_KEYWORD("granted", GRANTED, UNRESERVED_KEYWORD) PG_KEYWORD("greatest", GREATEST, COL_NAME_KEYWORD) PG_KEYWORD("group", GROUP_P, RESERVED_KEYWORD) PG_KEYWORD("grouping", GROUPING, COL_NAME_KEYWORD) +PG_KEYWORD("groups", GROUPS, UNRESERVED_KEYWORD) PG_KEYWORD("handler", HANDLER, UNRESERVED_KEYWORD) PG_KEYWORD("having", HAVING, RESERVED_KEYWORD) PG_KEYWORD("header", HEADER_P, UNRESERVED_KEYWORD) @@ -194,6 +196,7 @@ PG_KEYWORD("immutable", IMMUTABLE, UNRESERVED_KEYWORD) PG_KEYWORD("implicit", IMPLICIT_P, UNRESERVED_KEYWORD) PG_KEYWORD("import", IMPORT_P, UNRESERVED_KEYWORD) PG_KEYWORD("in", IN_P, RESERVED_KEYWORD) +PG_KEYWORD("include", INCLUDE, UNRESERVED_KEYWORD) PG_KEYWORD("including", INCLUDING, UNRESERVED_KEYWORD) PG_KEYWORD("increment", INCREMENT, UNRESERVED_KEYWORD) PG_KEYWORD("index", INDEX, UNRESERVED_KEYWORD) @@ -282,6 +285,7 @@ PG_KEYWORD("options", OPTIONS, UNRESERVED_KEYWORD) PG_KEYWORD("or", OR, RESERVED_KEYWORD) PG_KEYWORD("order", ORDER, RESERVED_KEYWORD) PG_KEYWORD("ordinality", ORDINALITY, UNRESERVED_KEYWORD) +PG_KEYWORD("others", OTHERS, UNRESERVED_KEYWORD) PG_KEYWORD("out", OUT_P, COL_NAME_KEYWORD) PG_KEYWORD("outer", OUTER_P, TYPE_FUNC_NAME_KEYWORD) PG_KEYWORD("over", OVER, UNRESERVED_KEYWORD) @@ -310,6 +314,7 @@ PG_KEYWORD("prior", PRIOR, UNRESERVED_KEYWORD) PG_KEYWORD("privileges", PRIVILEGES, UNRESERVED_KEYWORD) PG_KEYWORD("procedural", PROCEDURAL, UNRESERVED_KEYWORD) PG_KEYWORD("procedure", PROCEDURE, UNRESERVED_KEYWORD) +PG_KEYWORD("procedures", PROCEDURES, UNRESERVED_KEYWORD) PG_KEYWORD("program", PROGRAM, UNRESERVED_KEYWORD) PG_KEYWORD("publication", PUBLICATION, UNRESERVED_KEYWORD) PG_KEYWORD("quote", QUOTE, UNRESERVED_KEYWORD) @@ -340,6 +345,8 @@ PG_KEYWORD("right", RIGHT, TYPE_FUNC_NAME_KEYWORD) PG_KEYWORD("role", ROLE, UNRESERVED_KEYWORD) PG_KEYWORD("rollback", ROLLBACK, UNRESERVED_KEYWORD) PG_KEYWORD("rollup", ROLLUP, UNRESERVED_KEYWORD) +PG_KEYWORD("routine", ROUTINE, UNRESERVED_KEYWORD) +PG_KEYWORD("routines", ROUTINES, UNRESERVED_KEYWORD) PG_KEYWORD("row", ROW, COL_NAME_KEYWORD) PG_KEYWORD("rows", ROWS, UNRESERVED_KEYWORD) PG_KEYWORD("rule", RULE, UNRESERVED_KEYWORD) @@ -393,6 +400,7 @@ PG_KEYWORD("template", TEMPLATE, UNRESERVED_KEYWORD) PG_KEYWORD("temporary", TEMPORARY, UNRESERVED_KEYWORD) PG_KEYWORD("text", TEXT_P, UNRESERVED_KEYWORD) PG_KEYWORD("then", THEN, RESERVED_KEYWORD) +PG_KEYWORD("ties", TIES, UNRESERVED_KEYWORD) PG_KEYWORD("time", TIME, COL_NAME_KEYWORD) PG_KEYWORD("timestamp", TIMESTAMP, COL_NAME_KEYWORD) PG_KEYWORD("to", TO, RESERVED_KEYWORD) diff --git a/src/include/parser/parse_agg.h b/src/include/parser/parse_agg.h index 6947a0186e..0e6adffe57 100644 --- a/src/include/parser/parse_agg.h +++ b/src/include/parser/parse_agg.h @@ -3,7 +3,7 @@ * parse_agg.h * handle aggregates and window functions in parser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_agg.h diff --git a/src/include/parser/parse_clause.h b/src/include/parser/parse_clause.h index 1d205c6327..2c0e092862 100644 --- a/src/include/parser/parse_clause.h +++ b/src/include/parser/parse_clause.h @@ -4,7 +4,7 @@ * handle clauses in parser * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_clause.h diff --git a/src/include/parser/parse_coerce.h b/src/include/parser/parse_coerce.h index 06f65293cb..93c1ab4ef8 100644 --- a/src/include/parser/parse_coerce.h +++ b/src/include/parser/parse_coerce.h @@ -4,7 +4,7 @@ * Routines for type coercion. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_coerce.h @@ -41,16 +41,15 @@ extern Node *coerce_to_target_type(ParseState *pstate, CoercionContext ccontext, CoercionForm cformat, int location); -extern bool can_coerce_type(int nargs, Oid *input_typeids, Oid *target_typeids, +extern bool can_coerce_type(int nargs, const Oid *input_typeids, const Oid *target_typeids, CoercionContext ccontext); extern Node *coerce_type(ParseState *pstate, Node *node, Oid inputTypeId, Oid targetTypeId, int32 targetTypeMod, CoercionContext ccontext, CoercionForm cformat, int location); extern Node *coerce_to_domain(Node *arg, Oid baseTypeId, int32 baseTypeMod, Oid typeId, - CoercionForm cformat, int location, - bool hideInputCoercion, - bool lengthCoercionDone); + CoercionContext ccontext, CoercionForm cformat, int location, + bool hideInputCoercion); extern Node *coerce_to_boolean(ParseState *pstate, Node *node, const char *constructName); @@ -72,10 +71,10 @@ extern Node *coerce_to_common_type(ParseState *pstate, Node *node, Oid targetTypeId, const char *context); -extern bool check_generic_type_consistency(Oid *actual_arg_types, - Oid *declared_arg_types, +extern bool check_generic_type_consistency(const Oid *actual_arg_types, + const Oid *declared_arg_types, int nargs); -extern Oid enforce_generic_type_consistency(Oid *actual_arg_types, +extern Oid enforce_generic_type_consistency(const Oid *actual_arg_types, Oid *declared_arg_types, int nargs, Oid rettype, diff --git a/src/include/parser/parse_collate.h b/src/include/parser/parse_collate.h index 7279fa4e7c..aea297ce38 100644 --- a/src/include/parser/parse_collate.h +++ b/src/include/parser/parse_collate.h @@ -4,7 +4,7 @@ * Routines for assigning collation information. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_collate.h diff --git a/src/include/parser/parse_cte.h b/src/include/parser/parse_cte.h index 695e88c7ed..7e862d1906 100644 --- a/src/include/parser/parse_cte.h +++ b/src/include/parser/parse_cte.h @@ -4,7 +4,7 @@ * handle CTEs (common table expressions) in parser * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_cte.h diff --git a/src/include/parser/parse_enr.h b/src/include/parser/parse_enr.h index 9c68ddbb04..e8af457e02 100644 --- a/src/include/parser/parse_enr.h +++ b/src/include/parser/parse_enr.h @@ -4,7 +4,7 @@ * Internal definitions for parser * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_enr.h diff --git a/src/include/parser/parse_expr.h b/src/include/parser/parse_expr.h index 3af09b0056..e5aff61b8f 100644 --- a/src/include/parser/parse_expr.h +++ b/src/include/parser/parse_expr.h @@ -3,7 +3,7 @@ * parse_expr.h * handle expressions in parser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_expr.h diff --git a/src/include/parser/parse_func.h b/src/include/parser/parse_func.h index b4b6084b1b..11f9046e38 100644 --- a/src/include/parser/parse_func.h +++ b/src/include/parser/parse_func.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_func.h @@ -24,6 +24,7 @@ typedef enum FUNCDETAIL_NOTFOUND, /* no matching function */ FUNCDETAIL_MULTIPLE, /* too many matching functions */ FUNCDETAIL_NORMAL, /* found a matching regular function */ + FUNCDETAIL_PROCEDURE, /* found a matching procedure */ FUNCDETAIL_AGGREGATE, /* found a matching aggregate function */ FUNCDETAIL_WINDOWFUNC, /* found a matching window function */ FUNCDETAIL_COERCION /* it's a type coercion request */ @@ -31,7 +32,8 @@ typedef enum extern Node *ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, - Node *last_srf, FuncCall *fn, int location); + Node *last_srf, FuncCall *fn, bool proc_call, + int location); extern FuncDetailCode func_get_detail(List *funcname, List *fargs, List *fargnames, @@ -62,10 +64,8 @@ extern const char *func_signature_string(List *funcname, int nargs, extern Oid LookupFuncName(List *funcname, int nargs, const Oid *argtypes, bool noError); -extern Oid LookupFuncWithArgs(ObjectWithArgs *func, +extern Oid LookupFuncWithArgs(ObjectType objtype, ObjectWithArgs *func, bool noError); -extern Oid LookupAggWithArgs(ObjectWithArgs *agg, - bool noError); extern void check_srf_call_placement(ParseState *pstate, Node *last_srf, int location); diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h index 68930c1f4a..0230543810 100644 --- a/src/include/parser/parse_node.h +++ b/src/include/parser/parse_node.h @@ -4,7 +4,7 @@ * Internal definitions for parser * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_node.h @@ -45,6 +45,7 @@ typedef enum ParseExprKind EXPR_KIND_WINDOW_ORDER, /* window definition ORDER BY */ EXPR_KIND_WINDOW_FRAME_RANGE, /* window frame clause with RANGE */ EXPR_KIND_WINDOW_FRAME_ROWS, /* window frame clause with ROWS */ + EXPR_KIND_WINDOW_FRAME_GROUPS, /* window frame clause with GROUPS */ EXPR_KIND_SELECT_TARGET, /* SELECT target list item */ EXPR_KIND_INSERT_TARGET, /* INSERT target list item */ EXPR_KIND_UPDATE_SOURCE, /* UPDATE assignment source item */ @@ -67,7 +68,8 @@ typedef enum ParseExprKind EXPR_KIND_EXECUTE_PARAMETER, /* parameter value in EXECUTE */ EXPR_KIND_TRIGGER_WHEN, /* WHEN condition in CREATE TRIGGER */ EXPR_KIND_POLICY, /* USING or WITH CHECK expr in policy */ - EXPR_KIND_PARTITION_EXPRESSION /* PARTITION BY expression */ + EXPR_KIND_PARTITION_EXPRESSION, /* PARTITION BY expression */ + EXPR_KIND_CALL_ARGUMENT /* procedure argument in CALL */ } ParseExprKind; @@ -111,7 +113,7 @@ typedef Node *(*CoerceParamHook) (ParseState *pstate, Param *param, * namespace for table and column lookup. (The RTEs listed here may be just * a subset of the whole rtable. See ParseNamespaceItem comments below.) * - * p_lateral_active: TRUE if we are currently parsing a LATERAL subexpression + * p_lateral_active: true if we are currently parsing a LATERAL subexpression * of this parse level. This makes p_lateral_only namespace items visible, * whereas they are not visible when p_lateral_active is FALSE. * diff --git a/src/include/parser/parse_oper.h b/src/include/parser/parse_oper.h index 3cab732b1f..da9cd3877c 100644 --- a/src/include/parser/parse_oper.h +++ b/src/include/parser/parse_oper.h @@ -4,7 +4,7 @@ * handle operator things for parser * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_oper.h diff --git a/src/include/parser/parse_param.h b/src/include/parser/parse_param.h index 50c21cfc9d..fd6c695ebf 100644 --- a/src/include/parser/parse_param.h +++ b/src/include/parser/parse_param.h @@ -3,7 +3,7 @@ * parse_param.h * handle parameters in parser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_param.h diff --git a/src/include/parser/parse_relation.h b/src/include/parser/parse_relation.h index 91542d4f15..bf0c3b057b 100644 --- a/src/include/parser/parse_relation.h +++ b/src/include/parser/parse_relation.h @@ -4,7 +4,7 @@ * prototypes for parse_relation.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_relation.h @@ -54,9 +54,9 @@ extern RangeTblEntry *GetRTEByRangeTablePosn(ParseState *pstate, extern CommonTableExpr *GetCTEForRTE(ParseState *pstate, RangeTblEntry *rte, int rtelevelsup); extern Node *scanRTEForColumn(ParseState *pstate, RangeTblEntry *rte, - char *colname, int location, + const char *colname, int location, int fuzzy_rte_penalty, FuzzyAttrMatchState *fuzzystate); -extern Node *colNameToVar(ParseState *pstate, char *colname, bool localonly, +extern Node *colNameToVar(ParseState *pstate, const char *colname, bool localonly, int location); extern void markVarForSelectPriv(ParseState *pstate, Var *var, RangeTblEntry *rte); @@ -69,6 +69,7 @@ extern RangeTblEntry *addRangeTableEntry(ParseState *pstate, bool inFromCl); extern RangeTblEntry *addRangeTableEntryForRelation(ParseState *pstate, Relation rel, + int lockmode, Alias *alias, bool inh, bool inFromCl); @@ -117,14 +118,14 @@ extern void addRTEtoQuery(ParseState *pstate, RangeTblEntry *rte, bool addToRelNameSpace, bool addToVarNameSpace); extern void errorMissingRTE(ParseState *pstate, RangeVar *relation) pg_attribute_noreturn(); extern void errorMissingColumn(ParseState *pstate, - char *relname, char *colname, int location) pg_attribute_noreturn(); + const char *relname, const char *colname, int location) pg_attribute_noreturn(); extern void expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up, int location, bool include_dropped, List **colnames, List **colvars); extern List *expandRelAttrs(ParseState *pstate, RangeTblEntry *rte, int rtindex, int sublevels_up, int location); extern int attnameAttNum(Relation rd, const char *attname, bool sysColOK); -extern Name attnumAttName(Relation rd, int attid); +extern const NameData *attnumAttName(Relation rd, int attid); extern Oid attnumTypeId(Relation rd, int attid); extern Oid attnumCollationId(Relation rd, int attid); extern bool isQueryUsingTempRelation(Query *query); diff --git a/src/include/parser/parse_target.h b/src/include/parser/parse_target.h index 44af46b1aa..ec6e0c102f 100644 --- a/src/include/parser/parse_target.h +++ b/src/include/parser/parse_target.h @@ -4,7 +4,7 @@ * handle target lists * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_target.h @@ -28,7 +28,7 @@ extern TargetEntry *transformTargetEntry(ParseState *pstate, char *colname, bool resjunk); extern Expr *transformAssignedExpr(ParseState *pstate, Expr *expr, ParseExprKind exprKind, - char *colname, + const char *colname, int attrno, List *indirection, int location); diff --git a/src/include/parser/parse_type.h b/src/include/parser/parse_type.h index 7b843d0b9d..ab16737d57 100644 --- a/src/include/parser/parse_type.h +++ b/src/include/parser/parse_type.h @@ -3,7 +3,7 @@ * parse_type.h * handle type operations for parser * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_type.h @@ -46,10 +46,12 @@ extern Oid typeTypeCollation(Type typ); extern Datum stringTypeDatum(Type tp, char *string, int32 atttypmod); extern Oid typeidTypeRelid(Oid type_id); +extern Oid typeOrDomainTypeRelid(Oid type_id); extern TypeName *typeStringToTypeName(const char *str); extern void parseTypeString(const char *str, Oid *typeid_p, int32 *typmod_p, bool missing_ok); -#define ISCOMPLEX(typeid) (typeidTypeRelid(typeid) != InvalidOid) +/* true if typeid is composite, or domain over composite, but not RECORD */ +#define ISCOMPLEX(typeid) (typeOrDomainTypeRelid(typeid) != InvalidOid) #endif /* PARSE_TYPE_H */ diff --git a/src/include/parser/parse_utilcmd.h b/src/include/parser/parse_utilcmd.h index e749432ef0..35ac97940a 100644 --- a/src/include/parser/parse_utilcmd.h +++ b/src/include/parser/parse_utilcmd.h @@ -4,7 +4,7 @@ * parse analysis for utility commands * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_utilcmd.h @@ -27,5 +27,9 @@ extern void transformRuleStmt(RuleStmt *stmt, const char *queryString, extern List *transformCreateSchemaStmt(CreateSchemaStmt *stmt); extern PartitionBoundSpec *transformPartitionBound(ParseState *pstate, Relation parent, PartitionBoundSpec *spec); +extern IndexStmt *generateClonedIndexStmt(RangeVar *heapRel, Oid heapOid, + Relation source_idx, + const AttrNumber *attmap, int attmap_length, + Oid *constraintOid); #endif /* PARSE_UTILCMD_H */ diff --git a/src/include/parser/parser.h b/src/include/parser/parser.h index 8370df6bbb..f26f6e22e1 100644 --- a/src/include/parser/parser.h +++ b/src/include/parser/parser.h @@ -5,7 +5,7 @@ * * This is the external API for the raw lexing/parsing functions. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parser.h diff --git a/src/include/parser/parsetree.h b/src/include/parser/parsetree.h index 96cb2e2f67..fe16d7d1fa 100644 --- a/src/include/parser/parsetree.h +++ b/src/include/parser/parsetree.h @@ -5,7 +5,7 @@ * parse trees. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parsetree.h @@ -31,16 +31,6 @@ #define rt_fetch(rangetable_index, rangetable) \ ((RangeTblEntry *) list_nth(rangetable, (rangetable_index)-1)) -/* - * getrelid - * - * Given the range index of a relation, return the corresponding - * relation OID. Note that InvalidOid will be returned if the - * RTE is for a non-relation-type RTE. - */ -#define getrelid(rangeindex,rangetable) \ - (rt_fetch(rangeindex, rangetable)->relid) - /* * Given an RTE and an attribute number, return the appropriate * variable name or alias for that attribute of that RTE. diff --git a/src/include/parser/scanner.h b/src/include/parser/scanner.h index bb95de730b..20bf1e6672 100644 --- a/src/include/parser/scanner.h +++ b/src/include/parser/scanner.h @@ -8,7 +8,7 @@ * higher-level API provided by parser.h. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/scanner.h diff --git a/src/include/parser/scansup.h b/src/include/parser/scansup.h index f9a36b5cbd..766b3f908a 100644 --- a/src/include/parser/scansup.h +++ b/src/include/parser/scansup.h @@ -4,7 +4,7 @@ * scanner support routines. used by both the bootstrap lexer * as well as the normal lexer * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/scansup.h diff --git a/src/include/partitioning/partbounds.h b/src/include/partitioning/partbounds.h new file mode 100644 index 0000000000..c7535e32fc --- /dev/null +++ b/src/include/partitioning/partbounds.h @@ -0,0 +1,150 @@ +/*------------------------------------------------------------------------- + * + * partbounds.h + * + * Copyright (c) 2007-2018, PostgreSQL Global Development Group + * + * src/include/partitioning/partbounds.h + * + *------------------------------------------------------------------------- + */ +#ifndef PARTBOUNDS_H +#define PARTBOUNDS_H + +#include "fmgr.h" +#include "nodes/parsenodes.h" +#include "nodes/pg_list.h" +#include "partitioning/partdefs.h" +#include "utils/relcache.h" + + +/* + * PartitionBoundInfoData encapsulates a set of partition bounds. It is + * usually associated with partitioned tables as part of its partition + * descriptor, but may also be used to represent a virtual partitioned + * table such as a partitioned joinrel within the planner. + * + * A list partition datum that is known to be NULL is never put into the + * datums array. Instead, it is tracked using the null_index field. + * + * In the case of range partitioning, ndatums will typically be far less than + * 2 * nparts, because a partition's upper bound and the next partition's lower + * bound are the same in most common cases, and we only store one of them (the + * upper bound). In case of hash partitioning, ndatums will be same as the + * number of partitions. + * + * For range and list partitioned tables, datums is an array of datum-tuples + * with key->partnatts datums each. For hash partitioned tables, it is an array + * of datum-tuples with 2 datums, modulus and remainder, corresponding to a + * given partition. + * + * The datums in datums array are arranged in increasing order as defined by + * functions qsort_partition_rbound_cmp(), qsort_partition_list_value_cmp() and + * qsort_partition_hbound_cmp() for range, list and hash partitioned tables + * respectively. For range and list partitions this simply means that the + * datums in the datums array are arranged in increasing order as defined by + * the partition key's operator classes and collations. + * + * In the case of list partitioning, the indexes array stores one entry for + * every datum, which is the index of the partition that accepts a given datum. + * In case of range partitioning, it stores one entry per distinct range + * datum, which is the index of the partition for which a given datum + * is an upper bound. In the case of hash partitioning, the number of the + * entries in the indexes array is same as the greatest modulus amongst all + * partitions. For a given partition key datum-tuple, the index of the + * partition which would accept that datum-tuple would be given by the entry + * pointed by remainder produced when hash value of the datum-tuple is divided + * by the greatest modulus. + */ + +typedef struct PartitionBoundInfoData +{ + char strategy; /* hash, list or range? */ + int ndatums; /* Length of the datums following array */ + Datum **datums; + PartitionRangeDatumKind **kind; /* The kind of each range bound datum; + * NULL for hash and list partitioned + * tables */ + int *indexes; /* Partition indexes */ + int null_index; /* Index of the null-accepting partition; -1 + * if there isn't one */ + int default_index; /* Index of the default partition; -1 if there + * isn't one */ +} PartitionBoundInfoData; + +#define partition_bound_accepts_nulls(bi) ((bi)->null_index != -1) +#define partition_bound_has_default(bi) ((bi)->default_index != -1) + +/* + * When qsort'ing partition bounds after reading from the catalog, each bound + * is represented with one of the following structs. + */ + +/* One bound of a hash partition */ +typedef struct PartitionHashBound +{ + int modulus; + int remainder; + int index; +} PartitionHashBound; + +/* One value coming from some (index'th) list partition */ +typedef struct PartitionListValue +{ + int index; + Datum value; +} PartitionListValue; + +/* One bound of a range partition */ +typedef struct PartitionRangeBound +{ + int index; + Datum *datums; /* range bound datums */ + PartitionRangeDatumKind *kind; /* the kind of each datum */ + bool lower; /* this is the lower (vs upper) bound */ +} PartitionRangeBound; + +extern int get_hash_partition_greatest_modulus(PartitionBoundInfo b); +extern uint64 compute_partition_hash_value(int partnatts, FmgrInfo *partsupfunc, + Datum *values, bool *isnull); +extern List *get_qual_from_partbound(Relation rel, Relation parent, + PartitionBoundSpec *spec); +extern bool partition_bounds_equal(int partnatts, int16 *parttyplen, + bool *parttypbyval, PartitionBoundInfo b1, + PartitionBoundInfo b2); +extern PartitionBoundInfo partition_bounds_copy(PartitionBoundInfo src, + PartitionKey key); +extern void check_new_partition_bound(char *relname, Relation parent, + PartitionBoundSpec *spec); +extern void check_default_partition_contents(Relation parent, + Relation defaultRel, + PartitionBoundSpec *new_spec); + +extern PartitionRangeBound *make_one_partition_rbound(PartitionKey key, int index, + List *datums, bool lower); +extern int32 partition_hbound_cmp(int modulus1, int remainder1, int modulus2, + int remainder2); +extern int32 partition_rbound_cmp(int partnatts, FmgrInfo *partsupfunc, + Oid *partcollation, Datum *datums1, + PartitionRangeDatumKind *kind1, bool lower1, + PartitionRangeBound *b2); +extern int32 partition_rbound_datum_cmp(FmgrInfo *partsupfunc, + Oid *partcollation, + Datum *rb_datums, PartitionRangeDatumKind *rb_kind, + Datum *tuple_datums, int n_tuple_datums); +extern int partition_list_bsearch(FmgrInfo *partsupfunc, + Oid *partcollation, + PartitionBoundInfo boundinfo, + Datum value, bool *is_equal); +extern int partition_range_bsearch(int partnatts, FmgrInfo *partsupfunc, + Oid *partcollation, + PartitionBoundInfo boundinfo, + PartitionRangeBound *probe, bool *is_equal); +extern int partition_range_datum_bsearch(FmgrInfo *partsupfunc, + Oid *partcollation, + PartitionBoundInfo boundinfo, + int nvalues, Datum *values, bool *is_equal); +extern int partition_hash_bsearch(PartitionBoundInfo boundinfo, + int modulus, int remainder); + +#endif /* PARTBOUNDS_H */ diff --git a/src/include/partitioning/partdefs.h b/src/include/partitioning/partdefs.h new file mode 100644 index 0000000000..1fe1b4868e --- /dev/null +++ b/src/include/partitioning/partdefs.h @@ -0,0 +1,24 @@ +/*------------------------------------------------------------------------- + * + * partdefs.h + * Base definitions for partitioned table handling + * + * Copyright (c) 2007-2018, PostgreSQL Global Development Group + * + * src/include/partitioning/partdefs.h + * + *------------------------------------------------------------------------- + */ +#ifndef PARTDEFS_H +#define PARTDEFS_H + + +typedef struct PartitionBoundInfoData *PartitionBoundInfo; + +typedef struct PartitionKeyData *PartitionKey; + +typedef struct PartitionBoundSpec PartitionBoundSpec; + +typedef struct PartitionDescData *PartitionDesc; + +#endif /* PARTDEFS_H */ diff --git a/src/include/partitioning/partprune.h b/src/include/partitioning/partprune.h new file mode 100644 index 0000000000..e07aaaf798 --- /dev/null +++ b/src/include/partitioning/partprune.h @@ -0,0 +1,83 @@ +/*------------------------------------------------------------------------- + * + * partprune.h + * prototypes for partprune.c + * + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/partitioning/partprune.h + * + *------------------------------------------------------------------------- + */ +#ifndef PARTPRUNE_H +#define PARTPRUNE_H + +#include "nodes/execnodes.h" +#include "nodes/relation.h" + + +/* + * PartitionPruneContext + * Stores information needed at runtime for pruning computations + * related to a single partitioned table. + * + * strategy Partition strategy, e.g. LIST, RANGE, HASH. + * partnatts Number of columns in the partition key. + * nparts Number of partitions in this partitioned table. + * boundinfo Partition boundary info for the partitioned table. + * partcollation Array of partnatts elements, storing the collations of the + * partition key columns. + * partsupfunc Array of FmgrInfos for the comparison or hashing functions + * associated with the partition keys (partnatts elements). + * (This points into the partrel's partition key, typically.) + * stepcmpfuncs Array of FmgrInfos for the comparison or hashing function + * for each pruning step and partition key. + * ppccontext Memory context holding this PartitionPruneContext's + * subsidiary data, such as the FmgrInfos. + * planstate Points to the parent plan node's PlanState when called + * during execution; NULL when called from the planner. + * exprstates Array of ExprStates, indexed as per PruneCtxStateIdx; one + * for each partition key in each pruning step. Allocated if + * planstate is non-NULL, otherwise NULL. + * exprhasexecparam Array of bools, each true if corresponding 'exprstate' + * expression contains any PARAM_EXEC Params. (Can be NULL + * if planstate is NULL.) + * evalexecparams True if it's safe to evaluate PARAM_EXEC Params. + */ +typedef struct PartitionPruneContext +{ + char strategy; + int partnatts; + int nparts; + PartitionBoundInfo boundinfo; + Oid *partcollation; + FmgrInfo *partsupfunc; + FmgrInfo *stepcmpfuncs; + MemoryContext ppccontext; + PlanState *planstate; + ExprState **exprstates; + bool *exprhasexecparam; + bool evalexecparams; +} PartitionPruneContext; + +/* + * PruneCxtStateIdx() computes the correct index into the stepcmpfuncs[], + * exprstates[] and exprhasexecparam[] arrays for step step_id and + * partition key column keyno. (Note: there is code that assumes the + * entries for a given step are sequential, so this is not chosen freely.) + */ +#define PruneCxtStateIdx(partnatts, step_id, keyno) \ + ((partnatts) * (step_id) + (keyno)) + +extern PartitionPruneInfo *make_partition_pruneinfo(PlannerInfo *root, + RelOptInfo *parentrel, + List *subpaths, + List *partitioned_rels, + List *prunequal); +extern Relids prune_append_rel_partitions(RelOptInfo *rel); +extern Bitmapset *get_matching_partitions(PartitionPruneContext *context, + List *pruning_steps); + +#endif /* PARTPRUNE_H */ diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in index dcb7a1a320..6ac75cd02c 100644 --- a/src/include/pg_config.h.in +++ b/src/include/pg_config.h.in @@ -27,6 +27,9 @@ /* The normal alignment of `long long int', in bytes. */ #undef ALIGNOF_LONG_LONG_INT +/* The normal alignment of `PG_INT128_TYPE', in bytes. */ +#undef ALIGNOF_PG_INT128_TYPE + /* The normal alignment of `short', in bytes. */ #undef ALIGNOF_SHORT @@ -111,6 +114,12 @@ /* Define to 1 if your compiler handles computed gotos. */ #undef HAVE_COMPUTED_GOTO +/* Define to 1 if you have the `copyfile' function. */ +#undef HAVE_COPYFILE + +/* Define to 1 if you have the header file. */ +#undef HAVE_COPYFILE_H + /* Define to 1 if you have the header file. */ #undef HAVE_CRTDEFS_H @@ -131,13 +140,37 @@ don't. */ #undef HAVE_DECL_F_FULLFSYNC +/* Define to 1 if you have the declaration of + `LLVMCreateGDBRegistrationListener', and to 0 if you don't. */ +#undef HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER + +/* Define to 1 if you have the declaration of + `LLVMCreatePerfJITEventListener', and to 0 if you don't. */ +#undef HAVE_DECL_LLVMCREATEPERFJITEVENTLISTENER + +/* Define to 1 if you have the declaration of `LLVMGetHostCPUFeatures', and to + 0 if you don't. */ +#undef HAVE_DECL_LLVMGETHOSTCPUFEATURES + +/* Define to 1 if you have the declaration of `LLVMGetHostCPUName', and to 0 + if you don't. */ +#undef HAVE_DECL_LLVMGETHOSTCPUNAME + +/* Define to 1 if you have the declaration of `LLVMOrcGetSymbolAddressIn', and + to 0 if you don't. */ +#undef HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN + /* Define to 1 if you have the declaration of `posix_fadvise', and to 0 if you don't. */ #undef HAVE_DECL_POSIX_FADVISE -/* Define to 1 if you have the declaration of `snprintf', and to 0 if you +/* Define to 1 if you have the declaration of `RTLD_GLOBAL', and to 0 if you + don't. */ +#undef HAVE_DECL_RTLD_GLOBAL + +/* Define to 1 if you have the declaration of `RTLD_NOW', and to 0 if you don't. */ -#undef HAVE_DECL_SNPRINTF +#undef HAVE_DECL_RTLD_NOW /* Define to 1 if you have the declaration of `strlcat', and to 0 if you don't. */ @@ -147,16 +180,21 @@ don't. */ #undef HAVE_DECL_STRLCPY -/* Define to 1 if you have the declaration of `sys_siglist', and to 0 if you +/* Define to 1 if you have the declaration of `strnlen', and to 0 if you don't. */ -#undef HAVE_DECL_SYS_SIGLIST +#undef HAVE_DECL_STRNLEN + +/* Define to 1 if you have the declaration of `strtoll', and to 0 if you + don't. */ +#undef HAVE_DECL_STRTOLL -/* Define to 1 if you have the declaration of `vsnprintf', and to 0 if you +/* Define to 1 if you have the declaration of `strtoull', and to 0 if you don't. */ -#undef HAVE_DECL_VSNPRINTF +#undef HAVE_DECL_STRTOULL -/* Define to 1 if you have the header file. */ -#undef HAVE_DLD_H +/* Define to 1 if you have the declaration of `sys_siglist', and to 0 if you + don't. */ +#undef HAVE_DECL_SYS_SIGLIST /* Define to 1 if you have the `dlopen' function. */ #undef HAVE_DLOPEN @@ -197,20 +235,21 @@ /* Define to 1 if you have __atomic_compare_exchange_n(int *, int *, int). */ #undef HAVE_GCC__ATOMIC_INT32_CAS -/* Define to 1 if you have __atomic_compare_exchange_n(int64 *, int *, int64). - */ +/* Define to 1 if you have __atomic_compare_exchange_n(int64 *, int64 *, + int64). */ #undef HAVE_GCC__ATOMIC_INT64_CAS /* Define to 1 if you have __sync_lock_test_and_set(char *) and friends. */ #undef HAVE_GCC__SYNC_CHAR_TAS -/* Define to 1 if you have __sync_compare_and_swap(int *, int, int). */ +/* Define to 1 if you have __sync_val_compare_and_swap(int *, int, int). */ #undef HAVE_GCC__SYNC_INT32_CAS /* Define to 1 if you have __sync_lock_test_and_set(int *) and friends. */ #undef HAVE_GCC__SYNC_INT32_TAS -/* Define to 1 if you have __sync_compare_and_swap(int64 *, int64, int64). */ +/* Define to 1 if you have __sync_val_compare_and_swap(int64 *, int64, int64). + */ #undef HAVE_GCC__SYNC_INT64_CAS /* Define to 1 if you have the `getaddrinfo' function. */ @@ -303,6 +342,9 @@ /* Define to 1 if you have the header file. */ #undef HAVE_LDAP_H +/* Define to 1 if you have the `ldap_initialize' function. */ +#undef HAVE_LDAP_INITIALIZE + /* Define to 1 if you have the `crypto' library (-lcrypto). */ #undef HAVE_LIBCRYPTO @@ -339,10 +381,6 @@ /* Define to 1 if you have the `z' library (-lz). */ #undef HAVE_LIBZ -/* Define to 1 if constants of type 'long long int' should have the suffix LL. - */ -#undef HAVE_LL_CONSTANTS - /* Define to 1 if the system has the type `locale_t'. */ #undef HAVE_LOCALE_T @@ -397,9 +435,18 @@ /* Define to 1 if you have the `posix_fadvise' function. */ #undef HAVE_POSIX_FADVISE +/* Define to 1 if you have the `posix_fallocate' function. */ +#undef HAVE_POSIX_FALLOCATE + /* Define to 1 if the assembler supports PPC's LWARX mutex hint bit. */ #undef HAVE_PPC_LWARX_MUTEX_HINT +/* Define to 1 if you have the `ppoll' function. */ +#undef HAVE_PPOLL + +/* Define to 1 if you have the `pread' function. */ +#undef HAVE_PREAD + /* Define to 1 if you have the `pstat' function. */ #undef HAVE_PSTAT @@ -415,6 +462,9 @@ /* Have PTHREAD_PRIO_INHERIT. */ #undef HAVE_PTHREAD_PRIO_INHERIT +/* Define to 1 if you have the `pwrite' function. */ +#undef HAVE_PWRITE + /* Define to 1 if you have the `random' function. */ #undef HAVE_RANDOM @@ -455,32 +505,38 @@ /* Define to 1 if you have the `setproctitle' function. */ #undef HAVE_SETPROCTITLE +/* Define to 1 if you have the `setproctitle_fast' function. */ +#undef HAVE_SETPROCTITLE_FAST + /* Define to 1 if you have the `setsid' function. */ #undef HAVE_SETSID /* Define to 1 if you have the `shm_open' function. */ #undef HAVE_SHM_OPEN -/* Define to 1 if you have the `snprintf' function. */ -#undef HAVE_SNPRINTF - /* Define to 1 if you have spinlocks. */ #undef HAVE_SPINLOCKS /* Define to 1 if you have the `srandom' function. */ #undef HAVE_SRANDOM +/* Define to 1 if you have the `SSL_clear_options' function. */ +#undef HAVE_SSL_CLEAR_OPTIONS + /* Define to 1 if you have the `SSL_get_current_compression' function. */ #undef HAVE_SSL_GET_CURRENT_COMPRESSION +/* Define to 1 if stdbool.h conforms to C99. */ +#undef HAVE_STDBOOL_H + /* Define to 1 if you have the header file. */ #undef HAVE_STDINT_H /* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H -/* Define to 1 if you have the `strerror' function. */ -#undef HAVE_STRERROR +/* Define to 1 if you have the `strchrnul' function. */ +#undef HAVE_STRCHRNUL /* Define to 1 if you have the `strerror_r' function. */ #undef HAVE_STRERROR_R @@ -497,6 +553,9 @@ /* Define to 1 if you have the `strlcpy' function. */ #undef HAVE_STRLCPY +/* Define to 1 if you have the `strnlen' function. */ +#undef HAVE_STRNLEN + /* Define to use have a strong random number source */ #undef HAVE_STRONG_RANDOM @@ -557,6 +616,12 @@ /* Define to 1 if you have the header file. */ #undef HAVE_SYS_IPC_H +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_PRCTL_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_PROCCTL_H + /* Define to 1 if you have the header file. */ #undef HAVE_SYS_PSTAT_H @@ -597,9 +662,6 @@ `HAVE_STRUCT_TM_TM_ZONE' instead. */ #undef HAVE_TM_ZONE -/* Define to 1 if you have the `towlower' function. */ -#undef HAVE_TOWLOWER - /* Define to 1 if your compiler understands `typeof' or something similar. */ #undef HAVE_TYPEOF @@ -657,15 +719,9 @@ /* Define to 1 if you have the header file. */ #undef HAVE_UUID_UUID_H -/* Define to 1 if you have the `vsnprintf' function. */ -#undef HAVE_VSNPRINTF - /* Define to 1 if you have the header file. */ #undef HAVE_WCHAR_H -/* Define to 1 if you have the `wcstombs' function. */ -#undef HAVE_WCSTOMBS - /* Define to 1 if you have the `wcstombs_l' function. */ #undef HAVE_WCSTOMBS_L @@ -675,6 +731,15 @@ /* Define to 1 if you have the header file. */ #undef HAVE_WINLDAP_H +/* Define to 1 if you have the `X509_get_signature_nid' function. */ +#undef HAVE_X509_GET_SIGNATURE_NID + +/* Define to 1 if the system has the type `_Bool'. */ +#undef HAVE__BOOL + +/* Define to 1 if your compiler understands __builtin_bswap16. */ +#undef HAVE__BUILTIN_BSWAP16 + /* Define to 1 if your compiler understands __builtin_bswap32. */ #undef HAVE__BUILTIN_BSWAP32 @@ -684,6 +749,9 @@ /* Define to 1 if your compiler understands __builtin_constant_p. */ #undef HAVE__BUILTIN_CONSTANT_P +/* Define to 1 if your compiler understands __builtin_$op_overflow. */ +#undef HAVE__BUILTIN_OP_OVERFLOW + /* Define to 1 if your compiler understands __builtin_types_compatible_p. */ #undef HAVE__BUILTIN_TYPES_COMPATIBLE_P @@ -699,10 +767,13 @@ /* Define to 1 if your compiler understands _Static_assert. */ #undef HAVE__STATIC_ASSERT -/* Define to 1 if your compiler understands __VA_ARGS__ in macros. */ -#undef HAVE__VA_ARGS +/* Define to 1 if you have the `__strtoll' function. */ +#undef HAVE___STRTOLL + +/* Define to 1 if you have the `__strtoull' function. */ +#undef HAVE___STRTOULL -/* Define to the appropriate snprintf length modifier for 64-bit ints. */ +/* Define to the appropriate printf length modifier for 64-bit ints. */ #undef INT64_MODIFIER /* Define to 1 if `locale_t' requires . */ @@ -745,7 +816,7 @@ /* PostgreSQL major version as a string */ #undef PG_MAJORVERSION -/* Define to gnu_printf if compiler supports it, else printf. */ +/* Define to best printf format archetype, usually gnu_printf if available. */ #undef PG_PRINTF_ATTRIBUTE /* PostgreSQL version as a string */ @@ -778,6 +849,9 @@ RELSEG_SIZE requires an initdb. */ #undef RELSEG_SIZE +/* The size of `bool', as computed by sizeof. */ +#undef SIZEOF_BOOL + /* The size of `long', as computed by sizeof. */ #undef SIZEOF_LONG @@ -793,12 +867,18 @@ /* Define to 1 if you have the ANSI C header files. */ #undef STDC_HEADERS -/* Define to 1 if strerror_r() returns a int. */ +/* Define to 1 if strerror_r() returns int. */ #undef STRERROR_R_INT /* Define to 1 if your declares `struct tm'. */ #undef TM_IN_SYS_TIME +/* Define to 1 to use ARMv8 CRC Extension. */ +#undef USE_ARMV8_CRC32C + +/* Define to 1 to use ARMv8 CRC Extension with a runtime check. */ +#undef USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK + /* Define to 1 to build with assertion checks. (--enable-cassert) */ #undef USE_ASSERT_CHECKING @@ -832,6 +912,9 @@ (--with-libxslt) */ #undef USE_LIBXSLT +/* Define to 1 to build with LLVM based JIT support. (--with-llvm) */ +#undef USE_LLVM + /* Define to select named POSIX semaphores. */ #undef USE_NAMED_POSIX_SEMAPHORES @@ -844,16 +927,13 @@ /* Define to 1 to build with PAM support. (--with-pam) */ #undef USE_PAM -/* Use replacement snprintf() functions. */ -#undef USE_REPL_SNPRINTF - -/* Define to 1 to use Intel SSE 4.2 CRC instructions with a runtime check. */ +/* Define to 1 to use software CRC-32C implementation (slicing-by-8). */ #undef USE_SLICING_BY_8_CRC32C /* Define to 1 use Intel SSE 4.2 CRC instructions. */ #undef USE_SSE42_CRC32C -/* Define to 1 to use Intel SSSE 4.2 CRC instructions with a runtime check. */ +/* Define to 1 to use Intel SSE 4.2 CRC instructions with a runtime check. */ #undef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK /* Define to build with systemd support. (--with-systemd) */ @@ -899,11 +979,6 @@ */ #undef XLOG_BLCKSZ -/* XLOG_SEG_SIZE is the size of a single WAL file. This must be a power of 2 - and larger than XLOG_BLCKSZ (preferably, a great deal larger than - XLOG_BLCKSZ). Changing XLOG_SEG_SIZE requires an initdb. */ -#undef XLOG_SEG_SIZE - /* Number of bits in a file offset, on hosts where this is settable. */ @@ -925,6 +1000,24 @@ if such a type exists, and if the system does not define it. */ #undef intptr_t +/* Define to keyword to use for C99 restrict support, or to nothing if not + supported */ +#undef pg_restrict + +/* Define to the equivalent of the C99 'restrict' keyword, or to + nothing if this is not supported. Do not define if restrict is + supported directly. */ +#undef restrict +/* Work around a bug in Sun C++: it does not support _Restrict or + __restrict__, even though the corresponding Sun C compiler ends up with + "#define restrict _Restrict" or "#define restrict __restrict__" in the + previous line. Perhaps some future version of Sun C++ will work with + restrict; if so, hopefully it defines __RESTRICT like Sun C does. */ +#if defined __SUNPRO_CC && !defined __RESTRICT +# define _Restrict +# define __restrict__ +#endif + /* Define to empty if the C compiler does not understand signed types. */ #undef signed diff --git a/src/include/pg_config.h.win32 b/src/include/pg_config.h.win32 index 0b4110472e..894d658a20 100644 --- a/src/include/pg_config.h.win32 +++ b/src/include/pg_config.h.win32 @@ -34,6 +34,9 @@ /* The alignment requirement of a `long long int'. */ #define ALIGNOF_LONG_LONG_INT 8 +/* The normal alignment of `PG_INT128_TYPE', in bytes. */ +#undef ALIGNOF_PG_INT128_TYPE + /* The alignment requirement of a `short'. */ #define ALIGNOF_SHORT 2 @@ -69,6 +72,15 @@ # define gettimeofday(a,b) gettimeofday(a) #endif +/* Define to 1 if you have the `ASN1_STRING_get0_data' function. */ +/* #undef HAVE_ASN1_STRING_GET0_DATA */ + +/* Define to 1 if you have the `BIO_get_data' function. */ +/* #undef HAVE_BIO_GET_DATA */ + +/* Define to 1 if you have the `BIO_meth_new' function. */ +/* #undef HAVE_BIO_METH_NEW */ + /* Define to 1 if you have the `cbrt' function. */ //#define HAVE_CBRT 1 @@ -95,16 +107,45 @@ don't. */ #define HAVE_DECL_F_FULLFSYNC 0 -/* Define to 1 if you have the declaration of `snprintf', and to 0 if you +/* Define to 1 if you have the declaration of + `LLVMCreateGDBRegistrationListener', and to 0 if you don't. */ +#undef HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER + +/* Define to 1 if you have the declaration of + `LLVMCreatePerfJITEventListener', and to 0 if you don't. */ +#undef HAVE_DECL_LLVMCREATEPERFJITEVENTLISTENER + +/* Define to 1 if you have the declaration of `LLVMGetHostCPUName', and to 0 + if you don't. */ +#define HAVE_DECL_LLVMGETHOSTCPUNAME 0 + +/* Define to 1 if you have the declaration of `LLVMGetHostCPUFeatures', and to 0 + if you don't. */ +#define HAVE_DECL_LLVMGETHOSTCPUFEATURES 0 + +/* Define to 1 if you have the declaration of `LLVMOrcGetSymbolAddressIn', and + to 0 if you don't. */ +#define HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN 0 + +/* Define to 1 if you have the declaration of `RTLD_GLOBAL', and to 0 if you + don't. */ +#define HAVE_DECL_RTLD_GLOBAL 0 + +/* Define to 1 if you have the declaration of `RTLD_NOW', and to 0 if you don't. */ -#define HAVE_DECL_SNPRINTF 1 +#define HAVE_DECL_RTLD_NOW 0 -/* Define to 1 if you have the declaration of `vsnprintf', and to 0 if you +/* Define to 1 if you have the declaration of `strnlen', and to 0 if you don't. */ -#define HAVE_DECL_VSNPRINTF 1 +#define HAVE_DECL_STRNLEN 1 -/* Define to 1 if you have the header file. */ -/* #undef HAVE_DLD_H */ +/* Define to 1 if you have the declaration of `strtoll', and to 0 if you + don't. */ +#define HAVE_DECL_STRTOLL 1 + +/* Define to 1 if you have the declaration of `strtoull', and to 0 if you + don't. */ +#define HAVE_DECL_STRTOULL 1 /* Define to 1 if you have the `dlopen' function. */ /* #undef HAVE_DLOPEN */ @@ -202,6 +243,9 @@ /* Define to 1 if you have the header file. */ /* #undef HAVE_LDAP_H */ +/* Define to 1 if you have the `ldap_initialize' function. */ +/* #undef HAVE_LDAP_INITIALIZE */ + /* Define to 1 if you have the `crypto' library (-lcrypto). */ /* #undef HAVE_LIBCRYPTO */ @@ -223,21 +267,20 @@ /* Define to 1 if you have the `z' library (-lz). */ /* #undef HAVE_LIBZ */ -/* Define to 1 if constants of type 'long long int' should have the suffix LL. - */ -#if (_MSC_VER > 1200) -#define HAVE_LL_CONSTANTS 1 -#endif - /* Define to 1 if the system has the type `locale_t'. */ #define HAVE_LOCALE_T 1 /* Define to 1 if `long int' works and is 64 bits. */ /* #undef HAVE_LONG_INT_64 */ +/* Define to 1 if the system has the type `long long int'. */ +#if (_MSC_VER > 1200) +#define HAVE_LONG_LONG_INT 1 +#endif + /* Define to 1 if `long long int' works and is 64 bits. */ #if (_MSC_VER > 1200) -#define HAVE_LONG_LONG_INT_64 +#define HAVE_LONG_LONG_INT_64 1 #endif /* Define to 1 if you have the `mbstowcs_l' function. */ @@ -258,21 +301,39 @@ /* Define to 1 if you have the header file. */ /* #undef HAVE_NETINET_TCP_H */ +/* Define to 1 if you have the `OPENSSL_init_ssl' function. */ +/* #undef HAVE_OPENSSL_INIT_SSL */ + /* Define to 1 if you have the header file. */ /* #undef HAVE_PAM_PAM_APPL_H */ +/* Define to 1 if you have the `strnlen' function. */ +#define HAVE_STRNLEN 1 + /* Define to 1 if you have the `poll' function. */ /* #undef HAVE_POLL */ /* Define to 1 if you have the header file. */ /* #undef HAVE_POLL_H */ +/* Define to 1 if you have the `posix_fallocate' function. */ +/* #undef HAVE_POSIX_FALLOCATE */ + +/* Define to 1 if you have the `ppoll' function. */ +/* #undef HAVE_PPOLL */ + +/* Define to 1 if you have the `pread' function. */ +/* #undef HAVE_PREAD */ + /* Define to 1 if you have the `pstat' function. */ /* #undef HAVE_PSTAT */ /* Define to 1 if the PS_STRINGS thing exists. */ /* #undef HAVE_PS_STRINGS */ +/* Define to 1 if you have the `pwrite' function. */ +/* #undef HAVE_PWRITE */ + /* Define to 1 if you have the `random' function. */ /* #undef HAVE_RANDOM */ @@ -313,9 +374,6 @@ /* Define to 1 if you have the `setsid' function. */ /* #undef HAVE_SETSID */ -/* Define to 1 if you have the `snprintf' function. */ -/* #undef HAVE_SNPRINTF */ - /* Define to 1 if you have spinlocks. */ #define HAVE_SPINLOCKS 1 @@ -325,19 +383,25 @@ /* Define to 1 if you have the `srandom' function. */ /* #undef HAVE_SRANDOM */ +/* Define to 1 if you have the `SSL_clear_options' function. */ +#define HAVE_SSL_CLEAR_OPTIONS 1 + /* Define to 1 if you have the `SSL_get_current_compression' function. */ #define HAVE_SSL_GET_CURRENT_COMPRESSION 1 +/* Define to 1 if stdbool.h conforms to C99. */ +#if (_MSC_VER >= 1800) +#define HAVE_STDBOOL_H 1 +#endif + /* Define to 1 if you have the header file. */ /* #undef HAVE_STDINT_H */ /* Define to 1 if you have the header file. */ #define HAVE_STDLIB_H 1 -/* Define to 1 if you have the `strerror' function. */ -#ifndef HAVE_STRERROR -#define HAVE_STRERROR 1 -#endif +/* Define to 1 if you have the `strchrnul' function. */ +/* #undef HAVE_STRCHRNUL */ /* Define to 1 if you have the `strerror_r' function. */ /* #undef HAVE_STRERROR_R */ @@ -352,16 +416,22 @@ #define HAVE_STRONG_RANDOM 1 /* Define to 1 if you have the `strtoll' function. */ -//#define HAVE_STRTOLL 1 - -/* Define to 1 if you have the `strtoq' function. */ -/* #undef HAVE_STRTOQ */ +#ifdef HAVE_LONG_LONG_INT_64 +#define HAVE_STRTOLL 1 +/* Before VS2013, use Microsoft's nonstandard equivalent function */ +#if (_MSC_VER < 1800) +#define strtoll _strtoi64 +#endif +#endif /* Define to 1 if you have the `strtoull' function. */ -//#define HAVE_STRTOULL 1 - -/* Define to 1 if you have the `strtouq' function. */ -/* #undef HAVE_STRTOUQ */ +#ifdef HAVE_LONG_LONG_INT_64 +#define HAVE_STRTOULL 1 +/* Before VS2013, use Microsoft's nonstandard equivalent function */ +#if (_MSC_VER < 1800) +#define strtoull _strtoui64 +#endif +#endif /* Define to 1 if the system has the type `struct addrinfo'. */ #if (_MSC_VER > 1200) @@ -417,6 +487,9 @@ /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_IPC_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_PRCTL_H */ + /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_PSTAT_H */ @@ -448,9 +521,6 @@ `HAVE_STRUCT_TM_TM_ZONE' instead. */ /* #undef HAVE_TM_ZONE */ -/* Define to 1 if you have the `towlower' function. */ -#define HAVE_TOWLOWER 1 - /* Define to 1 if your compiler understands `typeof' or something similar. */ /* #undef HAVE_TYPEOF */ @@ -484,15 +554,9 @@ /* Define to 1 if you have the header file. */ #define HAVE_UTIME_H 1 -/* Define to 1 if you have the `vsnprintf' function. */ -#define HAVE_VSNPRINTF 1 - /* Define to 1 if you have the header file. */ #define HAVE_WCHAR_H 1 -/* Define to 1 if you have the `wcstombs' function. */ -#define HAVE_WCSTOMBS 1 - /* Define to 1 if you have the `wcstombs_l' function. */ #define HAVE_WCSTOMBS_L 1 @@ -502,6 +566,15 @@ /* Define to 1 if you have the header file. */ /* #undef HAVE_WINLDAP_H */ +/* Define to 1 if you have the `X509_get_signature_nid' function. */ +#define HAVE_X509_GET_SIGNATURE_NID 1 + +/* Define to 1 if the system has the type `_Bool'. */ +/* #undef HAVE__BOOL */ + +/* Define to 1 if your compiler understands __builtin_bswap16. */ +/* #undef HAVE__BUILTIN_BSWAP16 */ + /* Define to 1 if your compiler understands __builtin_bswap32. */ /* #undef HAVE__BUILTIN_BSWAP32 */ @@ -511,6 +584,9 @@ /* Define to 1 if your compiler understands __builtin_constant_p. */ /* #undef HAVE__BUILTIN_CONSTANT_P */ +/* Define to 1 if your compiler understands __builtin_$op_overflow. */ +/* #undef HAVE__BUILTIN_OP_OVERFLOW */ + /* Define to 1 if your compiler understands __builtin_types_compatible_p. */ /* #undef HAVE__BUILTIN_TYPES_COMPATIBLE_P */ @@ -526,10 +602,7 @@ /* Define to 1 if your compiler understands _Static_assert. */ /* #undef HAVE__STATIC_ASSERT */ -/* Define to 1 if your compiler understands __VA_ARGS__ in macros. */ -#define HAVE__VA_ARGS 1 - -/* Define to the appropriate snprintf length modifier for 64-bit ints. */ +/* Define to the appropriate printf length modifier for 64-bit ints. */ #define INT64_MODIFIER "ll" /* Define to 1 if `locale_t' requires . */ @@ -551,10 +624,10 @@ #define PACKAGE_NAME "PostgreSQL" /* Define to the full name and version of this package. */ -#define PACKAGE_STRING "PostgreSQL 11devel" +#define PACKAGE_STRING "PostgreSQL 12devel" /* Define to the version of this package. */ -#define PACKAGE_VERSION "11devel" +#define PACKAGE_VERSION "12devel" /* Define to the name of a signed 128-bit integer type. */ #undef PG_INT128_TYPE @@ -563,10 +636,10 @@ #define PG_INT64_TYPE long long int /* PostgreSQL version as a string */ -#define PG_VERSION "11devel" +#define PG_VERSION "12devel" /* PostgreSQL version as a number */ -#define PG_VERSION_NUM 110000 +#define PG_VERSION_NUM 120000 /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "postgresql" @@ -578,6 +651,9 @@ /* A string containing the version number, platform, and C compiler */ #define PG_VERSION_STR "Uninitialized version string (win32)" +/* The size of `bool', as computed by sizeof. */ +#define SIZEOF_BOOL 1 + /* The size of `long', as computed by sizeof. */ #define SIZEOF_LONG 4 @@ -598,7 +674,7 @@ /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 -/* Define to 1 if strerror_r() returns a int. */ +/* Define to 1 if strerror_r() returns int. */ /* #undef STRERROR_R_INT */ /* Define to 1 if your declares `struct tm'. */ @@ -619,6 +695,9 @@ /* Define to 1 to build with LDAP support. (--with-ldap) */ /* #undef USE_LDAP */ +/* Define to 1 to build with LLVM based JIT support. (--with-llvm) */ +/* #undef USE_LLVM */ + /* Define to select named POSIX semaphores. */ /* #undef USE_NAMED_POSIX_SEMAPHORES */ @@ -631,10 +710,7 @@ /* Define to 1 to build with PAM support. (--with-pam) */ /* #undef USE_PAM */ -/* Use replacement snprintf() functions. */ -#define USE_REPL_SNPRINTF 1 - -/* Define to 1 to use Intel SSE 4.2 CRC instructions with a runtime check. */ +/* Define to 1 to use software CRC-32C implementation (slicing-by-8). */ #if (_MSC_VER < 1500) #define USE_SLICING_BY_8_CRC32C 1 #endif @@ -642,7 +718,7 @@ /* Define to 1 use Intel SSE 4.2 CRC instructions. */ /* #undef USE_SSE42_CRC32C */ -/* Define to 1 to use Intel SSSE 4.2 CRC instructions with a runtime check. */ +/* Define to 1 to use Intel SSE 4.2 CRC instructions with a runtime check. */ #if (_MSC_VER >= 1500) #define USE_SSE42_CRC32C_WITH_RUNTIME_CHECK #endif @@ -680,6 +756,20 @@ #define inline __inline #endif +/* Define to keyword to use for C99 restrict support, or to nothing if this is + not supported */ +/* Works for C and C++ in Visual Studio 2008 and upwards */ +#if (_MSC_VER >= 1500) +#define pg_restrict __restrict +#else +#define pg_restrict +#endif + +/* Define to the equivalent of the C99 'restrict' keyword, or to + nothing if this is not supported. Do not define if restrict is + supported directly. */ +/* not defined, because it'd conflict with __declspec(restrict) */ + /* Define to empty if the C compiler does not understand signed types. */ /* #undef signed */ diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h index f3b35297d1..cc5eedfc41 100644 --- a/src/include/pg_config_manual.h +++ b/src/include/pg_config_manual.h @@ -6,13 +6,19 @@ * for developers. If you edit any of these, be sure to do a *full* * rebuild (and an initdb if noted). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/pg_config_manual.h *------------------------------------------------------------------------ */ +/* + * This is default value for wal_segment_size to be used at initdb when run + * without --walsegsize option. Must be a valid segment size. + */ +#define DEFAULT_XLOG_SEG_SIZE (16*1024*1024) + /* * Maximum length for identifiers (e.g. table names, column names, * function names). Names actually are limited to one less byte than this, @@ -66,16 +72,6 @@ */ #define NUM_ATOMICS_SEMAPHORES 64 -/* - * Define this if you want to allow the lo_import and lo_export SQL - * functions to be executed by ordinary users. By default these - * functions are only available to the Postgres superuser. CAUTION: - * These functions are SECURITY HOLES since they can read and write - * any file that the PostgreSQL server has permission to access. If - * you turn this on, don't say we didn't warn you. - */ -/* #define ALLOW_DANGEROUS_LO_FUNCTIONS */ - /* * MAXPGPATH: standard size of a pathname buffer in PostgreSQL (hence, * maximum usable pathname length is one less). @@ -140,7 +136,9 @@ /* * USE_PREFETCH code should be compiled only if we have a way to implement * prefetching. (This is decoupled from USE_POSIX_FADVISE because there - * might in future be support for alternative low-level prefetch APIs.) + * might in future be support for alternative low-level prefetch APIs. + * If you change this, you probably need to adjust the error message in + * check_effective_io_concurrency.) */ #ifdef USE_POSIX_FADVISE #define USE_PREFETCH @@ -290,6 +288,13 @@ */ /* #define COPY_PARSE_PLAN_TREES */ +/* + * Define this to force all parse and plan trees to be passed through + * outfuncs.c/readfuncs.c, to facilitate catching errors and omissions in + * those modules. + */ +/* #define WRITE_READ_PARSE_PLAN_TREES */ + /* * Define this to force all raw parse trees for DML statements to be scanned * by raw_expression_tree_walker(), to facilitate catching errors and diff --git a/src/include/pg_getopt.h b/src/include/pg_getopt.h index 16d5a326f9..c050f2025f 100644 --- a/src/include/pg_getopt.h +++ b/src/include/pg_getopt.h @@ -2,7 +2,7 @@ * Portions Copyright (c) 1987, 1993, 1994 * The Regents of the University of California. All rights reserved. * - * Portions Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2003-2018, PostgreSQL Global Development Group * * src/include/pg_getopt.h */ diff --git a/src/include/pg_trace.h b/src/include/pg_trace.h index 29bc95d0d3..e00e39da4e 100644 --- a/src/include/pg_trace.h +++ b/src/include/pg_trace.h @@ -3,7 +3,7 @@ * * Definitions for the PostgreSQL tracing framework * - * Copyright (c) 2006-2017, PostgreSQL Global Development Group + * Copyright (c) 2006-2018, PostgreSQL Global Development Group * * src/include/pg_trace.h * ---------- diff --git a/src/include/pgstat.h b/src/include/pgstat.h index cb05d9b81e..f1c10d16b8 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -3,7 +3,7 @@ * * Definitions for the PostgreSQL statistics collector daemon. * - * Copyright (c) 2001-2017, PostgreSQL Global Development Group + * Copyright (c) 2001-2018, PostgreSQL Global Development Group * * src/include/pgstat.h * ---------- @@ -759,8 +759,8 @@ typedef enum WAIT_EVENT_BGWRITER_HIBERNATE, WAIT_EVENT_BGWRITER_MAIN, WAIT_EVENT_CHECKPOINTER_MAIN, - WAIT_EVENT_LOGICAL_LAUNCHER_MAIN, WAIT_EVENT_LOGICAL_APPLY_MAIN, + WAIT_EVENT_LOGICAL_LAUNCHER_MAIN, WAIT_EVENT_PGSTAT_MAIN, WAIT_EVENT_RECOVERY_WAL_ALL, WAIT_EVENT_RECOVERY_WAL_STREAM, @@ -802,16 +802,34 @@ typedef enum WAIT_EVENT_BGWORKER_SHUTDOWN = PG_WAIT_IPC, WAIT_EVENT_BGWORKER_STARTUP, WAIT_EVENT_BTREE_PAGE, + WAIT_EVENT_CLOG_GROUP_UPDATE, WAIT_EVENT_EXECUTE_GATHER, + WAIT_EVENT_HASH_BATCH_ALLOCATING, + WAIT_EVENT_HASH_BATCH_ELECTING, + WAIT_EVENT_HASH_BATCH_LOADING, + WAIT_EVENT_HASH_BUILD_ALLOCATING, + WAIT_EVENT_HASH_BUILD_ELECTING, + WAIT_EVENT_HASH_BUILD_HASHING_INNER, + WAIT_EVENT_HASH_BUILD_HASHING_OUTER, + WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATING, + WAIT_EVENT_HASH_GROW_BATCHES_DECIDING, + WAIT_EVENT_HASH_GROW_BATCHES_ELECTING, + WAIT_EVENT_HASH_GROW_BATCHES_FINISHING, + WAIT_EVENT_HASH_GROW_BATCHES_REPARTITIONING, + WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATING, + WAIT_EVENT_HASH_GROW_BUCKETS_ELECTING, + WAIT_EVENT_HASH_GROW_BUCKETS_REINSERTING, WAIT_EVENT_LOGICAL_SYNC_DATA, WAIT_EVENT_LOGICAL_SYNC_STATE_CHANGE, WAIT_EVENT_MQ_INTERNAL, WAIT_EVENT_MQ_PUT_MESSAGE, WAIT_EVENT_MQ_RECEIVE, WAIT_EVENT_MQ_SEND, - WAIT_EVENT_PARALLEL_FINISH, WAIT_EVENT_PARALLEL_BITMAP_SCAN, + WAIT_EVENT_PARALLEL_CREATE_INDEX_SCAN, + WAIT_EVENT_PARALLEL_FINISH, WAIT_EVENT_PROCARRAY_GROUP_UPDATE, + WAIT_EVENT_PROMOTE, WAIT_EVENT_REPLICATION_ORIGIN_DROP, WAIT_EVENT_REPLICATION_SLOT_DROP, WAIT_EVENT_SAFE_SNAPSHOT, @@ -904,6 +922,7 @@ typedef enum WAIT_EVENT_WAL_INIT_SYNC, WAIT_EVENT_WAL_INIT_WRITE, WAIT_EVENT_WAL_READ, + WAIT_EVENT_WAL_SYNC, WAIT_EVENT_WAL_SYNC_METHOD_ASSIGN, WAIT_EVENT_WAL_WRITE } WaitEventIO; @@ -1002,8 +1021,14 @@ typedef struct PgBackendStatus /* application name; MUST be null-terminated */ char *st_appname; - /* current command string; MUST be null-terminated */ - char *st_activity; + /* + * Current command string; MUST be null-terminated. Note that this string + * possibly is truncated in the middle of a multi-byte character. As + * activity strings are stored more frequently than read, that allows to + * move the cost of correct truncation to the display side. Use + * pgstat_clip_activity() to truncate correctly. + */ + char *st_activity_raw; /* * Command progress reporting. Any command which wishes can advertise @@ -1192,6 +1217,8 @@ extern PgStat_BackendFunctionEntry *find_funcstat_entry(Oid func_id); extern void pgstat_initstats(Relation rel); +extern char *pgstat_clip_activity(const char *raw_activity); + /* ---------- * pgstat_report_wait_start() - * diff --git a/src/include/pgtar.h b/src/include/pgtar.h index 9a1be4c9f6..9eaa8bf15d 100644 --- a/src/include/pgtar.h +++ b/src/include/pgtar.h @@ -4,7 +4,7 @@ * Functions for manipulating tarfile datastructures (src/port/tar.c) * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/pgtar.h diff --git a/src/include/pgtime.h b/src/include/pgtime.h index 4fd8f75ef9..5f18b04b20 100644 --- a/src/include/pgtime.h +++ b/src/include/pgtime.h @@ -3,7 +3,7 @@ * pgtime.h * PostgreSQL internal timezone library * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/include/pgtime.h @@ -70,7 +70,7 @@ extern size_t pg_strftime(char *s, size_t max, const char *format, /* these functions and variables are in pgtz.c */ -extern pg_tz *session_timezone; +extern PGDLLIMPORT pg_tz *session_timezone; extern pg_tz *log_timezone; extern void pg_timezone_initialize(void); diff --git a/src/include/port.h b/src/include/port.h index b1ba645655..81583d557c 100644 --- a/src/include/port.h +++ b/src/include/port.h @@ -3,7 +3,7 @@ * port.h * Header for src/port/ compatibility functions. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/port.h @@ -17,6 +17,15 @@ #include #include +/* + * Windows has enough specialized port stuff that we push most of it off + * into another file. + * Note: Some CYGWIN includes might #define WIN32. + */ +#if defined(WIN32) && !defined(__CYGWIN__) +#include "port/win32_port.h" +#endif + /* socket has a different definition on WIN32 */ #ifndef WIN32 typedef int pgsocket; @@ -101,11 +110,6 @@ extern int find_other_exec(const char *argv0, const char *target, /* Doesn't belong here, but this is used with find_other_exec(), so... */ #define PG_BACKEND_VERSIONSTR "postgres (PostgreSQL) " PG_VERSION "\n" -/* Windows security token manipulation (in exec.c) */ -#ifdef WIN32 -extern BOOL AddUserToTokenDacl(HANDLE hToken); -#endif - #if defined(WIN32) || defined(__CYGWIN__) #define EXE ".exe" @@ -130,7 +134,12 @@ extern unsigned char pg_tolower(unsigned char ch); extern unsigned char pg_ascii_toupper(unsigned char ch); extern unsigned char pg_ascii_tolower(unsigned char ch); -#ifdef USE_REPL_SNPRINTF +/* + * Beginning in v12, we always replace snprintf() and friends with our own + * implementation. This symbol is no longer consulted by the core code, + * but keep it defined anyway in case any extensions are looking at it. + */ +#define USE_REPL_SNPRINTF 1 /* * Versions of libintl >= 0.13 try to replace printf() and friends with @@ -143,6 +152,9 @@ extern unsigned char pg_ascii_tolower(unsigned char ch); #ifdef snprintf #undef snprintf #endif +#ifdef vsprintf +#undef vsprintf +#endif #ifdef sprintf #undef sprintf #endif @@ -152,69 +164,55 @@ extern unsigned char pg_ascii_tolower(unsigned char ch); #ifdef fprintf #undef fprintf #endif +#ifdef vprintf +#undef vprintf +#endif #ifdef printf #undef printf #endif extern int pg_vsnprintf(char *str, size_t count, const char *fmt, va_list args); extern int pg_snprintf(char *str, size_t count, const char *fmt,...) pg_attribute_printf(3, 4); +extern int pg_vsprintf(char *str, const char *fmt, va_list args); extern int pg_sprintf(char *str, const char *fmt,...) pg_attribute_printf(2, 3); extern int pg_vfprintf(FILE *stream, const char *fmt, va_list args); extern int pg_fprintf(FILE *stream, const char *fmt,...) pg_attribute_printf(2, 3); +extern int pg_vprintf(const char *fmt, va_list args); extern int pg_printf(const char *fmt,...) pg_attribute_printf(1, 2); /* - * The GCC-specific code below prevents the pg_attribute_printf above from - * being replaced, and this is required because gcc doesn't know anything - * about pg_printf. + * We use __VA_ARGS__ for printf to prevent replacing references to + * the "printf" format archetype in format() attribute declarations. + * That unfortunately means that taking a function pointer to printf + * will not do what we'd wish. (If you need to do that, you must name + * pg_printf explicitly.) For printf's sibling functions, use + * parameterless macros so that function pointers will work unsurprisingly. */ -#ifdef __GNUC__ -#define vsnprintf(...) pg_vsnprintf(__VA_ARGS__) -#define snprintf(...) pg_snprintf(__VA_ARGS__) -#define sprintf(...) pg_sprintf(__VA_ARGS__) -#define vfprintf(...) pg_vfprintf(__VA_ARGS__) -#define fprintf(...) pg_fprintf(__VA_ARGS__) -#define printf(...) pg_printf(__VA_ARGS__) -#else #define vsnprintf pg_vsnprintf #define snprintf pg_snprintf +#define vsprintf pg_vsprintf #define sprintf pg_sprintf #define vfprintf pg_vfprintf #define fprintf pg_fprintf -#define printf pg_printf -#endif -#endif /* USE_REPL_SNPRINTF */ +#define vprintf pg_vprintf +#define printf(...) pg_printf(__VA_ARGS__) -#if defined(WIN32) -/* - * Versions of libintl >= 0.18? try to replace setlocale() with a macro - * to their own versions. Remove the macro, if it exists, because it - * ends up calling the wrong version when the backend and libintl use - * different versions of msvcrt. - */ -#if defined(setlocale) -#undef setlocale -#endif +/* This is also provided by snprintf.c */ +extern int pg_strfromd(char *str, size_t count, int precision, double value); -/* - * Define our own wrapper macro around setlocale() to work around bugs in - * Windows' native setlocale() function. - */ -extern char *pgwin32_setlocale(int category, const char *locale); +/* Replace strerror() with our own, somewhat more robust wrapper */ +extern char *pg_strerror(int errnum); +#define strerror pg_strerror -#define setlocale(a,b) pgwin32_setlocale(a,b) -#endif /* WIN32 */ +/* Likewise for strerror_r(); note we prefer the GNU API for that */ +extern char *pg_strerror_r(int errnum, char *buf, size_t buflen); +#define strerror_r pg_strerror_r +#define PG_STRERROR_R_BUFLEN 256 /* Recommended buffer size for strerror_r */ /* Portable prompt handling */ extern void simple_prompt(const char *prompt, char *destination, size_t destlen, bool echo); -#ifdef WIN32 -#define PG_SIGNAL_COUNT 32 -#define kill(pid,sig) pgkill(pid,sig) -extern int pgkill(int pid, int sig); -#endif - extern int pclose_check(FILE *stream); /* Global variable holding time zone information. */ @@ -262,23 +260,6 @@ extern bool pgwin32_is_junction(const char *path); extern bool rmtree(const char *path, bool rmtopdir); -/* - * stat() is not guaranteed to set the st_size field on win32, so we - * redefine it to our own implementation that is. - * - * We must pull in sys/stat.h here so the system header definition - * goes in first, and we redefine that, and not the other way around. - * - * Some frontends don't need the size from stat, so if UNSAFE_STAT_OK - * is defined we don't bother with this. - */ -#if defined(WIN32) && !defined(__CYGWIN__) && !defined(UNSAFE_STAT_OK) -#include -extern int pgwin32_safestat(const char *path, struct stat *buf); - -#define stat(a,b) pgwin32_safestat(a,b) -#endif - #if defined(WIN32) && !defined(__CYGWIN__) /* @@ -288,11 +269,8 @@ extern int pgwin32_safestat(const char *path, struct stat *buf); #define O_DIRECT 0x80000000 extern int pgwin32_open(const char *, int,...); extern FILE *pgwin32_fopen(const char *, const char *); - -#ifndef FRONTEND #define open(a,b,c) pgwin32_open(a,b,c) #define fopen(a,b) pgwin32_fopen(a,b) -#endif /* * Mingw-w64 headers #define popen and pclose to _popen and _pclose. We want @@ -353,7 +331,7 @@ extern int gettimeofday(struct timeval *tp, struct timezone *tzp); extern char *crypt(const char *key, const char *setting); #endif -/* WIN32 handled in port/win32.h */ +/* WIN32 handled in port/win32_port.h */ #ifndef WIN32 #define pgoff_t off_t #ifdef __NetBSD__ @@ -382,7 +360,23 @@ extern int getpeereid(int sock, uid_t *uid, gid_t *gid); #ifndef HAVE_ISINF extern int isinf(double x); -#endif +#else +/* + * Glibc doesn't use the builtin for clang due to a *gcc* bug in a version + * newer than the gcc compatibility clang claims to have. This would cause a + * *lot* of superfluous function calls, therefore revert when using clang. In + * C++ there's issues with libc++ (not libstdc++), so disable as well. + */ +#if defined(__clang__) && !defined(__cplusplus) +/* needs to be separate to not confuse other compilers */ +#if __has_builtin(__builtin_isinf) +/* need to include before, to avoid getting overwritten */ +#include +#undef isinf +#define isinf __builtin_isinf +#endif /* __has_builtin(isinf) */ +#endif /* __clang__ && !__cplusplus */ +#endif /* !HAVE_ISINF */ #ifndef HAVE_MKDTEMP extern char *mkdtemp(char *path); @@ -398,6 +392,23 @@ extern double rint(double x); extern int inet_aton(const char *cp, struct in_addr *addr); #endif +/* + * Windows and older Unix don't have pread(2) and pwrite(2). We have + * replacement functions, but they have slightly different semantics so we'll + * use a name with a pg_ prefix to avoid confusion. + */ +#ifdef HAVE_PREAD +#define pg_pread pread +#else +extern ssize_t pg_pread(int fd, void *buf, size_t nbyte, off_t offset); +#endif + +#ifdef HAVE_PWRITE +#define pg_pwrite pwrite +#else +extern ssize_t pg_pwrite(int fd, const void *buf, size_t nbyte, off_t offset); +#endif + #if !HAVE_DECL_STRLCAT extern size_t strlcat(char *dst, const char *src, size_t siz); #endif @@ -406,6 +417,10 @@ extern size_t strlcat(char *dst, const char *src, size_t siz); extern size_t strlcpy(char *dst, const char *src, size_t siz); #endif +#if !HAVE_DECL_STRNLEN +extern size_t strnlen(const char *str, size_t maxlen); +#endif + #if !defined(HAVE_RANDOM) extern long random(void); #endif @@ -422,9 +437,30 @@ extern void srandom(unsigned int seed); #define SSL_get_current_compression(x) 0 #endif -/* thread.h */ -extern char *pqStrerror(int errnum, char *strerrbuf, size_t buflen); +#ifndef HAVE_DLOPEN +extern void *dlopen(const char *file, int mode); +extern void *dlsym(void *handle, const char *symbol); +extern int dlclose(void *handle); +extern char *dlerror(void); +#endif + +/* + * In some older systems, the RTLD_NOW flag isn't defined and the mode + * argument to dlopen must always be 1. + */ +#if !HAVE_DECL_RTLD_NOW +#define RTLD_NOW 1 +#endif +/* + * The RTLD_GLOBAL flag is wanted if available, but it doesn't exist + * everywhere. If it doesn't exist, set it to 0 so it has no effect. + */ +#if !HAVE_DECL_RTLD_GLOBAL +#define RTLD_GLOBAL 0 +#endif + +/* thread.h */ #ifndef WIN32 extern int pqGetpwuid(uid_t uid, struct passwd *resultbuf, char *buffer, size_t buflen, struct passwd **result); diff --git a/src/include/port/atomics.h b/src/include/port/atomics.h index 2bfd1ed728..0470391675 100644 --- a/src/include/port/atomics.h +++ b/src/include/port/atomics.h @@ -28,7 +28,7 @@ * For an introduction to using memory barriers within the PostgreSQL backend, * see src/backend/storage/lmgr/README.barrier * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/port/atomics.h @@ -174,8 +174,6 @@ static inline void pg_atomic_init_flag(volatile pg_atomic_flag *ptr) { - AssertPointerAlignment(ptr, sizeof(*ptr)); - pg_atomic_init_flag_impl(ptr); } @@ -189,8 +187,6 @@ pg_atomic_init_flag(volatile pg_atomic_flag *ptr) static inline bool pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr) { - AssertPointerAlignment(ptr, sizeof(*ptr)); - return pg_atomic_test_set_flag_impl(ptr); } @@ -204,8 +200,6 @@ pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr) static inline bool pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr) { - AssertPointerAlignment(ptr, sizeof(*ptr)); - return pg_atomic_unlocked_test_flag_impl(ptr); } @@ -217,8 +211,6 @@ pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr) static inline void pg_atomic_clear_flag(volatile pg_atomic_flag *ptr) { - AssertPointerAlignment(ptr, sizeof(*ptr)); - pg_atomic_clear_flag_impl(ptr); } diff --git a/src/include/port/atomics/arch-arm.h b/src/include/port/atomics/arch-arm.h index 58614ae2ca..99fe3bbfd2 100644 --- a/src/include/port/atomics/arch-arm.h +++ b/src/include/port/atomics/arch-arm.h @@ -3,7 +3,7 @@ * arch-arm.h * Atomic operations considerations specific to ARM * - * Portions Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2013-2018, PostgreSQL Global Development Group * * NOTES: * diff --git a/src/include/port/atomics/arch-hppa.h b/src/include/port/atomics/arch-hppa.h index c4176c6f42..818a3e0c87 100644 --- a/src/include/port/atomics/arch-hppa.h +++ b/src/include/port/atomics/arch-hppa.h @@ -3,7 +3,7 @@ * arch-hppa.h * Atomic operations considerations specific to HPPA * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * NOTES: diff --git a/src/include/port/atomics/arch-ia64.h b/src/include/port/atomics/arch-ia64.h index 3dc4b298e1..571a79ee67 100644 --- a/src/include/port/atomics/arch-ia64.h +++ b/src/include/port/atomics/arch-ia64.h @@ -3,7 +3,7 @@ * arch-ia64.h * Atomic operations considerations specific to intel itanium * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * NOTES: diff --git a/src/include/port/atomics/arch-ppc.h b/src/include/port/atomics/arch-ppc.h index ed30468398..faeb7a5d15 100644 --- a/src/include/port/atomics/arch-ppc.h +++ b/src/include/port/atomics/arch-ppc.h @@ -3,7 +3,7 @@ * arch-ppc.h * Atomic operations considerations specific to PowerPC * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * NOTES: diff --git a/src/include/port/atomics/arch-x86.h b/src/include/port/atomics/arch-x86.h index bf8152573d..5a3d95e056 100644 --- a/src/include/port/atomics/arch-x86.h +++ b/src/include/port/atomics/arch-x86.h @@ -7,7 +7,7 @@ * support for xadd and cmpxchg. Given that the 386 isn't supported anywhere * anymore that's not much of a restriction luckily. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * NOTES: diff --git a/src/include/port/atomics/fallback.h b/src/include/port/atomics/fallback.h index 4e07add0a4..88a967ad5b 100644 --- a/src/include/port/atomics/fallback.h +++ b/src/include/port/atomics/fallback.h @@ -4,7 +4,7 @@ * Fallback for platforms without spinlock and/or atomics support. Slower * than native atomics support, but not unusably slow. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/port/atomics/fallback.h @@ -80,6 +80,7 @@ typedef struct pg_atomic_flag #else int sema; #endif + volatile bool value; } pg_atomic_flag; #endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */ @@ -132,17 +133,7 @@ extern bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr); extern void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr); #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG -static inline bool -pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr) -{ - /* - * Can't do this efficiently in the semaphore based implementation - we'd - * have to try to acquire the semaphore - so always return true. That's - * correct, because this is only an unlocked test anyway. Do this in the - * header so compilers can optimize the test away. - */ - return true; -} +extern bool pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr); #endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */ diff --git a/src/include/port/atomics/generic-acc.h b/src/include/port/atomics/generic-acc.h index 4ea3ed3fc7..bd6caaf817 100644 --- a/src/include/port/atomics/generic-acc.h +++ b/src/include/port/atomics/generic-acc.h @@ -3,7 +3,7 @@ * generic-acc.h * Atomic operations support when using HPs acc on HPUX * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * NOTES: diff --git a/src/include/port/atomics/generic-gcc.h b/src/include/port/atomics/generic-gcc.h index 7efc0861e7..3a1dc88377 100644 --- a/src/include/port/atomics/generic-gcc.h +++ b/src/include/port/atomics/generic-gcc.h @@ -3,7 +3,7 @@ * generic-gcc.h * Atomic operations, implemented using gcc (or compatible) intrinsics. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * NOTES: @@ -176,6 +176,8 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, } #endif +/* if we have 32-bit __sync_val_compare_and_swap, assume we have these too: */ + #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(HAVE_GCC__SYNC_INT32_CAS) #define PG_HAVE_ATOMIC_FETCH_ADD_U32 static inline uint32 @@ -185,6 +187,33 @@ pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) } #endif +#if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) && defined(HAVE_GCC__SYNC_INT32_CAS) +#define PG_HAVE_ATOMIC_FETCH_SUB_U32 +static inline uint32 +pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_) +{ + return __sync_fetch_and_sub(&ptr->value, sub_); +} +#endif + +#if !defined(PG_HAVE_ATOMIC_FETCH_AND_U32) && defined(HAVE_GCC__SYNC_INT32_CAS) +#define PG_HAVE_ATOMIC_FETCH_AND_U32 +static inline uint32 +pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_) +{ + return __sync_fetch_and_and(&ptr->value, and_); +} +#endif + +#if !defined(PG_HAVE_ATOMIC_FETCH_OR_U32) && defined(HAVE_GCC__SYNC_INT32_CAS) +#define PG_HAVE_ATOMIC_FETCH_OR_U32 +static inline uint32 +pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_) +{ + return __sync_fetch_and_or(&ptr->value, or_); +} +#endif + #if !defined(PG_DISABLE_64_BIT_ATOMICS) @@ -214,6 +243,8 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, } #endif +/* if we have 64-bit __sync_val_compare_and_swap, assume we have these too: */ + #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(HAVE_GCC__SYNC_INT64_CAS) #define PG_HAVE_ATOMIC_FETCH_ADD_U64 static inline uint64 @@ -223,6 +254,33 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) } #endif +#if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) && defined(HAVE_GCC__SYNC_INT64_CAS) +#define PG_HAVE_ATOMIC_FETCH_SUB_U64 +static inline uint64 +pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_) +{ + return __sync_fetch_and_sub(&ptr->value, sub_); +} +#endif + +#if !defined(PG_HAVE_ATOMIC_FETCH_AND_U64) && defined(HAVE_GCC__SYNC_INT64_CAS) +#define PG_HAVE_ATOMIC_FETCH_AND_U64 +static inline uint64 +pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_) +{ + return __sync_fetch_and_and(&ptr->value, and_); +} +#endif + +#if !defined(PG_HAVE_ATOMIC_FETCH_OR_U64) && defined(HAVE_GCC__SYNC_INT64_CAS) +#define PG_HAVE_ATOMIC_FETCH_OR_U64 +static inline uint64 +pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_) +{ + return __sync_fetch_and_or(&ptr->value, or_); +} +#endif + #endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */ diff --git a/src/include/port/atomics/generic-msvc.h b/src/include/port/atomics/generic-msvc.h index f82cfec8ec..59211c2203 100644 --- a/src/include/port/atomics/generic-msvc.h +++ b/src/include/port/atomics/generic-msvc.h @@ -3,7 +3,7 @@ * generic-msvc.h * Atomic operations support when using MSVC * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * NOTES: diff --git a/src/include/port/atomics/generic-sunpro.h b/src/include/port/atomics/generic-sunpro.h index a58e8e3bad..e903243b68 100644 --- a/src/include/port/atomics/generic-sunpro.h +++ b/src/include/port/atomics/generic-sunpro.h @@ -3,7 +3,7 @@ * generic-sunpro.h * Atomic operations for solaris' CC * - * Portions Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2013-2018, PostgreSQL Global Development Group * * NOTES: * diff --git a/src/include/port/atomics/generic-xlc.h b/src/include/port/atomics/generic-xlc.h index f854612d39..f18207568c 100644 --- a/src/include/port/atomics/generic-xlc.h +++ b/src/include/port/atomics/generic-xlc.h @@ -3,7 +3,7 @@ * generic-xlc.h * Atomic operations for IBM's CC * - * Portions Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2013-2018, PostgreSQL Global Development Group * * NOTES: * diff --git a/src/include/port/atomics/generic.h b/src/include/port/atomics/generic.h index 424543604a..ea11698a35 100644 --- a/src/include/port/atomics/generic.h +++ b/src/include/port/atomics/generic.h @@ -4,7 +4,7 @@ * Implement higher level operations based on some lower level atomic * operations. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/port/atomics/generic.h @@ -45,7 +45,7 @@ typedef pg_atomic_uint32 pg_atomic_flag; static inline uint32 pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr) { - return *(&ptr->value); + return ptr->value; } #endif @@ -170,12 +170,9 @@ static inline uint32 pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_) { uint32 old; - while (true) - { - old = pg_atomic_read_u32_impl(ptr); - if (pg_atomic_compare_exchange_u32_impl(ptr, &old, xchg_)) - break; - } + old = ptr->value; /* ok if read is not atomic */ + while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, xchg_)) + /* skip */; return old; } #endif @@ -186,12 +183,9 @@ static inline uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) { uint32 old; - while (true) - { - old = pg_atomic_read_u32_impl(ptr); - if (pg_atomic_compare_exchange_u32_impl(ptr, &old, old + add_)) - break; - } + old = ptr->value; /* ok if read is not atomic */ + while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old + add_)) + /* skip */; return old; } #endif @@ -211,12 +205,9 @@ static inline uint32 pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_) { uint32 old; - while (true) - { - old = pg_atomic_read_u32_impl(ptr); - if (pg_atomic_compare_exchange_u32_impl(ptr, &old, old & and_)) - break; - } + old = ptr->value; /* ok if read is not atomic */ + while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old & and_)) + /* skip */; return old; } #endif @@ -227,12 +218,9 @@ static inline uint32 pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_) { uint32 old; - while (true) - { - old = pg_atomic_read_u32_impl(ptr); - if (pg_atomic_compare_exchange_u32_impl(ptr, &old, old | or_)) - break; - } + old = ptr->value; /* ok if read is not atomic */ + while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old | or_)) + /* skip */; return old; } #endif @@ -261,12 +249,9 @@ static inline uint64 pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_) { uint64 old; - while (true) - { - old = ptr->value; - if (pg_atomic_compare_exchange_u64_impl(ptr, &old, xchg_)) - break; - } + old = ptr->value; /* ok if read is not atomic */ + while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, xchg_)) + /* skip */; return old; } #endif @@ -314,12 +299,10 @@ static inline uint64 pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr) { /* - * On this platform aligned 64bit reads are guaranteed to be atomic, - * except if using the fallback implementation, where can't guarantee the - * required alignment. + * On this platform aligned 64-bit reads are guaranteed to be atomic. */ AssertPointerAlignment(ptr, 8); - return *(&ptr->value); + return ptr->value; } #else @@ -330,10 +313,10 @@ pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr) uint64 old = 0; /* - * 64 bit reads aren't safe on all platforms. In the generic + * 64-bit reads aren't atomic on all platforms. In the generic * implementation implement them as a compare/exchange with 0. That'll - * fail or succeed, but always return the old value. Possible might store - * a 0, but only if the prev. value also was a 0 - i.e. harmless. + * fail or succeed, but always return the old value. Possibly might store + * a 0, but only if the previous value also was a 0 - i.e. harmless. */ pg_atomic_compare_exchange_u64_impl(ptr, &old, 0); @@ -357,12 +340,9 @@ static inline uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) { uint64 old; - while (true) - { - old = pg_atomic_read_u64_impl(ptr); - if (pg_atomic_compare_exchange_u64_impl(ptr, &old, old + add_)) - break; - } + old = ptr->value; /* ok if read is not atomic */ + while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old + add_)) + /* skip */; return old; } #endif @@ -382,12 +362,9 @@ static inline uint64 pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_) { uint64 old; - while (true) - { - old = pg_atomic_read_u64_impl(ptr); - if (pg_atomic_compare_exchange_u64_impl(ptr, &old, old & and_)) - break; - } + old = ptr->value; /* ok if read is not atomic */ + while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old & and_)) + /* skip */; return old; } #endif @@ -398,12 +375,9 @@ static inline uint64 pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_) { uint64 old; - while (true) - { - old = pg_atomic_read_u64_impl(ptr); - if (pg_atomic_compare_exchange_u64_impl(ptr, &old, old | or_)) - break; - } + old = ptr->value; /* ok if read is not atomic */ + while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old | or_)) + /* skip */; return old; } #endif diff --git a/src/include/port/pg_bswap.h b/src/include/port/pg_bswap.h index 50a6bd106b..5b05a6d297 100644 --- a/src/include/port/pg_bswap.h +++ b/src/include/port/pg_bswap.h @@ -3,17 +3,15 @@ * pg_bswap.h * Byte swapping. * - * Macros for reversing the byte order of 32-bit and 64-bit unsigned integers. + * Macros for reversing the byte order of 16, 32 and 64-bit unsigned integers. * For example, 0xAABBCCDD becomes 0xDDCCBBAA. These are just wrappers for * built-in functions provided by the compiler where support exists. - * Elsewhere, beware of multiple evaluations of the arguments! * - * Note that the GCC built-in functions __builtin_bswap32() and - * __builtin_bswap64() are documented as accepting single arguments of type - * uint32_t and uint64_t respectively (these are also the respective return - * types). Use caution when using these wrapper macros with signed integers. + * Note that all of these functions accept unsigned integers as arguments and + * return the same. Use caution when using these wrapper macros with signed + * integers. * - * Copyright (c) 2015-2017, PostgreSQL Global Development Group + * Copyright (c) 2015-2018, PostgreSQL Global Development Group * * src/include/port/pg_bswap.h * @@ -22,28 +20,114 @@ #ifndef PG_BSWAP_H #define PG_BSWAP_H -#ifdef HAVE__BUILTIN_BSWAP32 -#define BSWAP32(x) __builtin_bswap32(x) + +/* + * In all supported versions msvc provides _byteswap_* functions in stdlib.h, + * already included by c.h. + */ + + +/* implementation of uint16 pg_bswap16(uint16) */ +#if defined(HAVE__BUILTIN_BSWAP16) + +#define pg_bswap16(x) __builtin_bswap16(x) + +#elif defined(_MSC_VER) + +#define pg_bswap16(x) _byteswap_ushort(x) + #else -#define BSWAP32(x) ((((x) << 24) & 0xff000000) | \ - (((x) << 8) & 0x00ff0000) | \ - (((x) >> 8) & 0x0000ff00) | \ - (((x) >> 24) & 0x000000ff)) + +static inline uint16 +pg_bswap16(uint16 x) +{ + return + ((x << 8) & 0xff00) | + ((x >> 8) & 0x00ff); +} + +#endif /* HAVE__BUILTIN_BSWAP16 */ + + +/* implementation of uint32 pg_bswap32(uint32) */ +#if defined(HAVE__BUILTIN_BSWAP32) + +#define pg_bswap32(x) __builtin_bswap32(x) + +#elif defined(_MSC_VER) + +#define pg_bswap32(x) _byteswap_ulong(x) + +#else + +static inline uint32 +pg_bswap32(uint32 x) +{ + return + ((x << 24) & 0xff000000) | + ((x << 8) & 0x00ff0000) | + ((x >> 8) & 0x0000ff00) | + ((x >> 24) & 0x000000ff); +} + #endif /* HAVE__BUILTIN_BSWAP32 */ -#ifdef HAVE__BUILTIN_BSWAP64 -#define BSWAP64(x) __builtin_bswap64(x) + +/* implementation of uint64 pg_bswap64(uint64) */ +#if defined(HAVE__BUILTIN_BSWAP64) + +#define pg_bswap64(x) __builtin_bswap64(x) + + +#elif defined(_MSC_VER) + +#define pg_bswap64(x) _byteswap_uint64(x) + #else -#define BSWAP64(x) ((((x) << 56) & UINT64CONST(0xff00000000000000)) | \ - (((x) << 40) & UINT64CONST(0x00ff000000000000)) | \ - (((x) << 24) & UINT64CONST(0x0000ff0000000000)) | \ - (((x) << 8) & UINT64CONST(0x000000ff00000000)) | \ - (((x) >> 8) & UINT64CONST(0x00000000ff000000)) | \ - (((x) >> 24) & UINT64CONST(0x0000000000ff0000)) | \ - (((x) >> 40) & UINT64CONST(0x000000000000ff00)) | \ - (((x) >> 56) & UINT64CONST(0x00000000000000ff))) + +static inline uint64 +pg_bswap64(uint64 x) +{ + return + ((x << 56) & UINT64CONST(0xff00000000000000)) | + ((x << 40) & UINT64CONST(0x00ff000000000000)) | + ((x << 24) & UINT64CONST(0x0000ff0000000000)) | + ((x << 8) & UINT64CONST(0x000000ff00000000)) | + ((x >> 8) & UINT64CONST(0x00000000ff000000)) | + ((x >> 24) & UINT64CONST(0x0000000000ff0000)) | + ((x >> 40) & UINT64CONST(0x000000000000ff00)) | + ((x >> 56) & UINT64CONST(0x00000000000000ff)); +} #endif /* HAVE__BUILTIN_BSWAP64 */ + +/* + * Portable and fast equivalents for ntohs, ntohl, htons, htonl, + * additionally extended to 64 bits. + */ +#ifdef WORDS_BIGENDIAN + +#define pg_hton16(x) (x) +#define pg_hton32(x) (x) +#define pg_hton64(x) (x) + +#define pg_ntoh16(x) (x) +#define pg_ntoh32(x) (x) +#define pg_ntoh64(x) (x) + +#else + +#define pg_hton16(x) pg_bswap16(x) +#define pg_hton32(x) pg_bswap32(x) +#define pg_hton64(x) pg_bswap64(x) + +#define pg_ntoh16(x) pg_bswap16(x) +#define pg_ntoh32(x) pg_bswap32(x) +#define pg_ntoh64(x) pg_bswap64(x) + +#endif /* WORDS_BIGENDIAN */ + + /* * Rearrange the bytes of a Datum from big-endian order into the native byte * order. On big-endian machines, this does nothing at all. Note that the C @@ -60,9 +144,9 @@ #define DatumBigEndianToNative(x) (x) #else /* !WORDS_BIGENDIAN */ #if SIZEOF_DATUM == 8 -#define DatumBigEndianToNative(x) BSWAP64(x) +#define DatumBigEndianToNative(x) pg_bswap64(x) #else /* SIZEOF_DATUM != 8 */ -#define DatumBigEndianToNative(x) BSWAP32(x) +#define DatumBigEndianToNative(x) pg_bswap32(x) #endif /* SIZEOF_DATUM == 8 */ #endif /* WORDS_BIGENDIAN */ diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h index cd58ecc988..9a26295c8e 100644 --- a/src/include/port/pg_crc32c.h +++ b/src/include/port/pg_crc32c.h @@ -23,7 +23,7 @@ * EQ_CRC32C(c1, c2) * Check for equality of two CRCs. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/port/pg_crc32c.h @@ -42,26 +42,42 @@ typedef uint32 pg_crc32c; #define EQ_CRC32C(c1, c2) ((c1) == (c2)) #if defined(USE_SSE42_CRC32C) -/* Use SSE4.2 instructions. */ +/* Use Intel SSE4.2 instructions. */ #define COMP_CRC32C(crc, data, len) \ ((crc) = pg_comp_crc32c_sse42((crc), (data), (len))) #define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len); -#elif defined(USE_SSE42_CRC32C_WITH_RUNTIME_CHECK) +#elif defined(USE_ARMV8_CRC32C) +/* Use ARMv8 CRC Extension instructions. */ + +#define COMP_CRC32C(crc, data, len) \ + ((crc) = pg_comp_crc32c_armv8((crc), (data), (len))) +#define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) + +extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t len); + +#elif defined(USE_SSE42_CRC32C_WITH_RUNTIME_CHECK) || defined(USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK) + /* - * Use SSE4.2 instructions, but perform a runtime check first to check that - * they are available. + * Use Intel SSE 4.2 or ARMv8 instructions, but perform a runtime check first + * to check that they are available. */ #define COMP_CRC32C(crc, data, len) \ ((crc) = pg_comp_crc32c((crc), (data), (len))) #define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) -extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len); extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len); extern pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); +#ifdef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK +extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len); +#endif +#ifdef USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK +extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t len); +#endif + #else /* * Use slicing-by-8 algorithm. @@ -73,7 +89,7 @@ extern pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len) #define COMP_CRC32C(crc, data, len) \ ((crc) = pg_comp_crc32c_sb8((crc), (data), (len))) #ifdef WORDS_BIGENDIAN -#define FIN_CRC32C(crc) ((crc) = BSWAP32(crc) ^ 0xFFFFFFFF) +#define FIN_CRC32C(crc) ((crc) = pg_bswap32(crc) ^ 0xFFFFFFFF) #else #define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) #endif diff --git a/src/include/port/win32.h b/src/include/port/win32.h index 23f89748ac..9f48a58aed 100644 --- a/src/include/port/win32.h +++ b/src/include/port/win32.h @@ -1,16 +1,22 @@ /* src/include/port/win32.h */ +/* + * We always rely on the WIN32 macro being set by our build system, + * but _WIN32 is the compiler pre-defined macro. So make sure we define + * WIN32 whenever _WIN32 is set, to facilitate standalone building. + */ +#if defined(_WIN32) && !defined(WIN32) +#define WIN32 +#endif + /* * Make sure _WIN32_WINNT has the minimum required value. * Leave a higher value in place. When building with at least Visual * Studio 2015 the minimum requirement is Windows Vista (0x0600) to * get support for GetLocaleInfoEx() with locales. For everything else * the minimum version is Windows XP (0x0501). - * Also for VS2015, add a define that stops compiler complaints about - * using the old Winsock API. */ #if defined(_MSC_VER) && _MSC_VER >= 1900 -#define _WINSOCK_DEPRECATED_NO_WARNINGS #define MIN_WINNT 0x0600 #else #define MIN_WINNT 0x0501 @@ -25,64 +31,23 @@ #endif /* - * Always build with SSPI support. Keep it as a #define in case - * we want a switch to disable it sometime in the future. + * We need to prevent from defining a symbol conflicting with + * our errcode() function. Since it's likely to get included by standard + * system headers, pre-emptively include it now. */ -#define ENABLE_SSPI 1 - -/* undefine and redefine after #include */ -#undef mkdir - -#undef ERROR - -/* - * The Mingw64 headers choke if this is already defined - they - * define it themselves. - */ -#if !defined(__MINGW64_VERSION_MAJOR) || defined(_MSC_VER) -#define _WINSOCKAPI_ +#if _MSC_VER >= 1400 || defined(HAVE_CRTDEFS_H) +#define errcode __msvc_errcode +#include +#undef errcode #endif -#include -#include -#include -#undef small -#include -#include -#include -#include -#include /* for non-unicode version */ -#undef near - -/* Must be here to avoid conflicting with prototype in windows.h */ -#define mkdir(a,b) mkdir(a) - -#define ftruncate(a,b) chsize(a,b) - -/* Windows doesn't have fsync() as such, use _commit() */ -#define fsync(fd) _commit(fd) /* - * For historical reasons, we allow setting wal_sync_method to - * fsync_writethrough on Windows, even though it's really identical to fsync - * (both code paths wind up at _commit()). + * defines for dynamic linking on Win32 platform */ -#define HAVE_FSYNC_WRITETHROUGH -#define FSYNC_WRITETHROUGH_IS_FSYNC - -#define USES_WINSOCK - -/* defines for dynamic linking on Win32 platform - * - * http://support.microsoft.com/kb/132044 - * http://msdn.microsoft.com/en-us/library/8fskxacy(v=vs.80).aspx - * http://msdn.microsoft.com/en-us/library/a90k134d(v=vs.80).aspx - */ - -#if defined(WIN32) || defined(__CYGWIN__) #ifdef BUILDING_DLL #define PGDLLIMPORT __declspec (dllexport) -#else /* not BUILDING_DLL */ +#else #define PGDLLIMPORT __declspec (dllimport) #endif @@ -91,365 +56,3 @@ #else #define PGDLLEXPORT #endif -#else /* not CYGWIN, not MSVC, not MingW */ -#define PGDLLIMPORT -#define PGDLLEXPORT -#endif - - -/* - * IPC defines - */ -#undef HAVE_UNION_SEMUN -#define HAVE_UNION_SEMUN 1 - -#define IPC_RMID 256 -#define IPC_CREAT 512 -#define IPC_EXCL 1024 -#define IPC_PRIVATE 234564 -#define IPC_NOWAIT 2048 -#define IPC_STAT 4096 - -#define EACCESS 2048 -#ifndef EIDRM -#define EIDRM 4096 -#endif - -#define SETALL 8192 -#define GETNCNT 16384 -#define GETVAL 65536 -#define SETVAL 131072 -#define GETPID 262144 - - -/* - * Signal stuff - * - * For WIN32, there is no wait() call so there are no wait() macros - * to interpret the return value of system(). Instead, system() - * return values < 0x100 are used for exit() termination, and higher - * values are used to indicated non-exit() termination, which is - * similar to a unix-style signal exit (think SIGSEGV == - * STATUS_ACCESS_VIOLATION). Return values are broken up into groups: - * - * http://msdn2.microsoft.com/en-gb/library/aa489609.aspx - * - * NT_SUCCESS 0 - 0x3FFFFFFF - * NT_INFORMATION 0x40000000 - 0x7FFFFFFF - * NT_WARNING 0x80000000 - 0xBFFFFFFF - * NT_ERROR 0xC0000000 - 0xFFFFFFFF - * - * Effectively, we don't care on the severity of the return value from - * system(), we just need to know if it was because of exit() or generated - * by the system, and it seems values >= 0x100 are system-generated. - * See this URL for a list of WIN32 STATUS_* values: - * - * Wine (URL used in our error messages) - - * http://source.winehq.org/source/include/ntstatus.h - * Descriptions - http://www.comp.nus.edu.sg/~wuyongzh/my_doc/ntstatus.txt - * MS SDK - http://www.nologs.com/ntstatus.html - * - * It seems the exception lists are in both ntstatus.h and winnt.h, but - * ntstatus.h has a more comprehensive list, and it only contains - * exception values, rather than winnt, which contains lots of other - * things: - * - * http://www.microsoft.com/msj/0197/exception/exception.aspx - * - * The ExceptionCode parameter is the number that the operating system - * assigned to the exception. You can see a list of various exception codes - * in WINNT.H by searching for #defines that start with "STATUS_". For - * example, the code for the all-too-familiar STATUS_ACCESS_VIOLATION is - * 0xC0000005. A more complete set of exception codes can be found in - * NTSTATUS.H from the Windows NT DDK. - * - * Some day we might want to print descriptions for the most common - * exceptions, rather than printing an include file name. We could use - * RtlNtStatusToDosError() and pass to FormatMessage(), which can print - * the text of error values, but MinGW does not support - * RtlNtStatusToDosError(). - */ -#define WIFEXITED(w) (((w) & 0XFFFFFF00) == 0) -#define WIFSIGNALED(w) (!WIFEXITED(w)) -#define WEXITSTATUS(w) (w) -#define WTERMSIG(w) (w) - -#define sigmask(sig) ( 1 << ((sig)-1) ) - -/* Signal function return values */ -#undef SIG_DFL -#undef SIG_ERR -#undef SIG_IGN -#define SIG_DFL ((pqsigfunc)0) -#define SIG_ERR ((pqsigfunc)-1) -#define SIG_IGN ((pqsigfunc)1) - -/* Some extra signals */ -#define SIGHUP 1 -#define SIGQUIT 3 -#define SIGTRAP 5 -#define SIGABRT 22 /* Set to match W32 value -- not UNIX value */ -#define SIGKILL 9 -#define SIGPIPE 13 -#define SIGALRM 14 -#define SIGSTOP 17 -#define SIGTSTP 18 -#define SIGCONT 19 -#define SIGCHLD 20 -#define SIGTTIN 21 -#define SIGTTOU 22 /* Same as SIGABRT -- no problem, I hope */ -#define SIGWINCH 28 -#define SIGUSR1 30 -#define SIGUSR2 31 - -/* - * New versions of mingw have gettimeofday() and also declare - * struct timezone to support it. - */ -#ifndef HAVE_GETTIMEOFDAY -struct timezone -{ - int tz_minuteswest; /* Minutes west of GMT. */ - int tz_dsttime; /* Nonzero if DST is ever in effect. */ -}; -#endif - -/* for setitimer in backend/port/win32/timer.c */ -#define ITIMER_REAL 0 -struct itimerval -{ - struct timeval it_interval; - struct timeval it_value; -}; - -int setitimer(int which, const struct itimerval *value, struct itimerval *ovalue); - -/* - * WIN32 does not provide 64-bit off_t, but does provide the functions operating - * with 64-bit offsets. - */ -#define pgoff_t __int64 -#ifdef _MSC_VER -#define fseeko(stream, offset, origin) _fseeki64(stream, offset, origin) -#define ftello(stream) _ftelli64(stream) -#else -#ifndef fseeko -#define fseeko(stream, offset, origin) fseeko64(stream, offset, origin) -#endif -#ifndef ftello -#define ftello(stream) ftello64(stream) -#endif -#endif - -/* - * Supplement to . - * - * Perl already has typedefs for uid_t and gid_t. - */ -#ifndef PLPERL_HAVE_UID_GID -typedef int uid_t; -typedef int gid_t; -#endif -typedef long key_t; - -#ifdef _MSC_VER -typedef int pid_t; -#endif - -/* - * Supplement to . - */ -#define lstat(path, sb) stat((path), (sb)) - -/* - * Supplement to . - * This is the same value as _O_NOINHERIT in the MS header file. This is - * to ensure that we don't collide with a future definition. It means - * we cannot use _O_NOINHERIT ourselves. - */ -#define O_DSYNC 0x0080 - -/* - * Supplement to . - * - * We redefine network-related Berkeley error symbols as the corresponding WSA - * constants. This allows elog.c to recognize them as being in the Winsock - * error code range and pass them off to pgwin32_socket_strerror(), since - * Windows' version of plain strerror() won't cope. Note that this will break - * if these names are used for anything else besides Windows Sockets errors. - * See TranslateSocketError() when changing this list. - */ -#undef EAGAIN -#define EAGAIN WSAEWOULDBLOCK -#undef EINTR -#define EINTR WSAEINTR -#undef EMSGSIZE -#define EMSGSIZE WSAEMSGSIZE -#undef EAFNOSUPPORT -#define EAFNOSUPPORT WSAEAFNOSUPPORT -#undef EWOULDBLOCK -#define EWOULDBLOCK WSAEWOULDBLOCK -#undef ECONNABORTED -#define ECONNABORTED WSAECONNABORTED -#undef ECONNRESET -#define ECONNRESET WSAECONNRESET -#undef EINPROGRESS -#define EINPROGRESS WSAEINPROGRESS -#undef EISCONN -#define EISCONN WSAEISCONN -#undef ENOBUFS -#define ENOBUFS WSAENOBUFS -#undef EPROTONOSUPPORT -#define EPROTONOSUPPORT WSAEPROTONOSUPPORT -#undef ECONNREFUSED -#define ECONNREFUSED WSAECONNREFUSED -#undef ENOTSOCK -#define ENOTSOCK WSAENOTSOCK -#undef EOPNOTSUPP -#define EOPNOTSUPP WSAEOPNOTSUPP -#undef EADDRINUSE -#define EADDRINUSE WSAEADDRINUSE -#undef EADDRNOTAVAIL -#define EADDRNOTAVAIL WSAEADDRNOTAVAIL -#undef EHOSTUNREACH -#define EHOSTUNREACH WSAEHOSTUNREACH -#undef ENOTCONN -#define ENOTCONN WSAENOTCONN - -/* - * Extended locale functions with gratuitous underscore prefixes. - * (These APIs are nevertheless fully documented by Microsoft.) - */ -#define locale_t _locale_t -#define tolower_l _tolower_l -#define toupper_l _toupper_l -#define towlower_l _towlower_l -#define towupper_l _towupper_l -#define isdigit_l _isdigit_l -#define iswdigit_l _iswdigit_l -#define isalpha_l _isalpha_l -#define iswalpha_l _iswalpha_l -#define isalnum_l _isalnum_l -#define iswalnum_l _iswalnum_l -#define isupper_l _isupper_l -#define iswupper_l _iswupper_l -#define islower_l _islower_l -#define iswlower_l _iswlower_l -#define isgraph_l _isgraph_l -#define iswgraph_l _iswgraph_l -#define isprint_l _isprint_l -#define iswprint_l _iswprint_l -#define ispunct_l _ispunct_l -#define iswpunct_l _iswpunct_l -#define isspace_l _isspace_l -#define iswspace_l _iswspace_l -#define strcoll_l _strcoll_l -#define strxfrm_l _strxfrm_l -#define wcscoll_l _wcscoll_l -#define wcstombs_l _wcstombs_l -#define mbstowcs_l _mbstowcs_l - - -/* In backend/port/win32/signal.c */ -extern PGDLLIMPORT volatile int pg_signal_queue; -extern PGDLLIMPORT int pg_signal_mask; -extern HANDLE pgwin32_signal_event; -extern HANDLE pgwin32_initial_signal_pipe; - -#define UNBLOCKED_SIGNAL_QUEUE() (pg_signal_queue & ~pg_signal_mask) - - -void pgwin32_signal_initialize(void); -HANDLE pgwin32_create_signal_listener(pid_t pid); -void pgwin32_dispatch_queued_signals(void); -void pg_queue_signal(int signum); - -/* In backend/port/win32/socket.c */ -#ifndef FRONTEND -#define socket(af, type, protocol) pgwin32_socket(af, type, protocol) -#define bind(s, addr, addrlen) pgwin32_bind(s, addr, addrlen) -#define listen(s, backlog) pgwin32_listen(s, backlog) -#define accept(s, addr, addrlen) pgwin32_accept(s, addr, addrlen) -#define connect(s, name, namelen) pgwin32_connect(s, name, namelen) -#define select(n, r, w, e, timeout) pgwin32_select(n, r, w, e, timeout) -#define recv(s, buf, len, flags) pgwin32_recv(s, buf, len, flags) -#define send(s, buf, len, flags) pgwin32_send(s, buf, len, flags) - -SOCKET pgwin32_socket(int af, int type, int protocol); -int pgwin32_bind(SOCKET s, struct sockaddr *addr, int addrlen); -int pgwin32_listen(SOCKET s, int backlog); -SOCKET pgwin32_accept(SOCKET s, struct sockaddr *addr, int *addrlen); -int pgwin32_connect(SOCKET s, const struct sockaddr *name, int namelen); -int pgwin32_select(int nfds, fd_set *readfs, fd_set *writefds, fd_set *exceptfds, const struct timeval *timeout); -int pgwin32_recv(SOCKET s, char *buf, int len, int flags); -int pgwin32_send(SOCKET s, const void *buf, int len, int flags); - -const char *pgwin32_socket_strerror(int err); -int pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout); - -extern int pgwin32_noblock; - -#endif - -/* in backend/port/win32_shmem.c */ -extern int pgwin32_ReserveSharedMemoryRegion(HANDLE); - -/* in backend/port/win32/crashdump.c */ -extern void pgwin32_install_crashdump_handler(void); - -/* in port/win32error.c */ -extern void _dosmaperr(unsigned long); - -/* in port/win32env.c */ -extern int pgwin32_putenv(const char *); -extern void pgwin32_unsetenv(const char *); - -/* in port/win32security.c */ -extern int pgwin32_is_service(void); -extern int pgwin32_is_admin(void); - -#define putenv(x) pgwin32_putenv(x) -#define unsetenv(x) pgwin32_unsetenv(x) - -/* Things that exist in MingW headers, but need to be added to MSVC */ -#ifdef _MSC_VER - -#ifndef _WIN64 -typedef long ssize_t; -#else -typedef __int64 ssize_t; -#endif - -typedef unsigned short mode_t; - -#define S_IRUSR _S_IREAD -#define S_IWUSR _S_IWRITE -#define S_IXUSR _S_IEXEC -#define S_IRWXU (S_IRUSR | S_IWUSR | S_IXUSR) -/* see also S_IRGRP etc below */ -#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) -#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) - -#define F_OK 0 -#define W_OK 2 -#define R_OK 4 - -#if (_MSC_VER < 1800) -#define isinf(x) ((_fpclass(x) == _FPCLASS_PINF) || (_fpclass(x) == _FPCLASS_NINF)) -#define isnan(x) _isnan(x) -#endif - -/* Pulled from Makefile.port in mingw */ -#define DLSUFFIX ".dll" - -#endif /* _MSC_VER */ - -/* These aren't provided by either MingW or MSVC */ -#define S_IRGRP 0 -#define S_IWGRP 0 -#define S_IXGRP 0 -#define S_IRWXG 0 -#define S_IROTH 0 -#define S_IWOTH 0 -#define S_IXOTH 0 -#define S_IRWXO 0 diff --git a/src/include/port/win32_port.h b/src/include/port/win32_port.h new file mode 100644 index 0000000000..360dbdf3a7 --- /dev/null +++ b/src/include/port/win32_port.h @@ -0,0 +1,520 @@ +/*------------------------------------------------------------------------- + * + * win32_port.h + * Windows-specific compatibility stuff. + * + * Note this is read in MinGW as well as native Windows builds, + * but not in Cygwin builds. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/port/win32_port.h + * + *------------------------------------------------------------------------- + */ +#ifndef PG_WIN32_PORT_H +#define PG_WIN32_PORT_H + +/* + * Always build with SSPI support. Keep it as a #define in case + * we want a switch to disable it sometime in the future. + */ +#define ENABLE_SSPI 1 + +/* undefine and redefine after #include */ +#undef mkdir + +#undef ERROR + +/* + * VS2013 and later issue warnings about using the old Winsock API, + * which we don't really want to hear about. + */ +#ifdef _MSC_VER +#define _WINSOCK_DEPRECATED_NO_WARNINGS +#endif + +/* + * The MinGW64 headers choke if this is already defined - they + * define it themselves. + */ +#if !defined(__MINGW64_VERSION_MAJOR) || defined(_MSC_VER) +#define _WINSOCKAPI_ +#endif + +#include +#include +#include +#undef small +#include +#include +#include +#include /* for non-unicode version */ +#undef near +#include /* needed before sys/stat hacking below */ + +/* Must be here to avoid conflicting with prototype in windows.h */ +#define mkdir(a,b) mkdir(a) + +#define ftruncate(a,b) chsize(a,b) + +/* Windows doesn't have fsync() as such, use _commit() */ +#define fsync(fd) _commit(fd) + +/* + * For historical reasons, we allow setting wal_sync_method to + * fsync_writethrough on Windows, even though it's really identical to fsync + * (both code paths wind up at _commit()). + */ +#define HAVE_FSYNC_WRITETHROUGH +#define FSYNC_WRITETHROUGH_IS_FSYNC + +#define USES_WINSOCK + +/* + * IPC defines + */ +#undef HAVE_UNION_SEMUN +#define HAVE_UNION_SEMUN 1 + +#define IPC_RMID 256 +#define IPC_CREAT 512 +#define IPC_EXCL 1024 +#define IPC_PRIVATE 234564 +#define IPC_NOWAIT 2048 +#define IPC_STAT 4096 + +#define EACCESS 2048 +#ifndef EIDRM +#define EIDRM 4096 +#endif + +#define SETALL 8192 +#define GETNCNT 16384 +#define GETVAL 65536 +#define SETVAL 131072 +#define GETPID 262144 + + +/* + * Signal stuff + * + * For WIN32, there is no wait() call so there are no wait() macros + * to interpret the return value of system(). Instead, system() + * return values < 0x100 are used for exit() termination, and higher + * values are used to indicated non-exit() termination, which is + * similar to a unix-style signal exit (think SIGSEGV == + * STATUS_ACCESS_VIOLATION). Return values are broken up into groups: + * + * http://msdn2.microsoft.com/en-gb/library/aa489609.aspx + * + * NT_SUCCESS 0 - 0x3FFFFFFF + * NT_INFORMATION 0x40000000 - 0x7FFFFFFF + * NT_WARNING 0x80000000 - 0xBFFFFFFF + * NT_ERROR 0xC0000000 - 0xFFFFFFFF + * + * Effectively, we don't care on the severity of the return value from + * system(), we just need to know if it was because of exit() or generated + * by the system, and it seems values >= 0x100 are system-generated. + * See this URL for a list of WIN32 STATUS_* values: + * + * Wine (URL used in our error messages) - + * http://source.winehq.org/source/include/ntstatus.h + * Descriptions - http://www.comp.nus.edu.sg/~wuyongzh/my_doc/ntstatus.txt + * MS SDK - http://www.nologs.com/ntstatus.html + * + * It seems the exception lists are in both ntstatus.h and winnt.h, but + * ntstatus.h has a more comprehensive list, and it only contains + * exception values, rather than winnt, which contains lots of other + * things: + * + * http://www.microsoft.com/msj/0197/exception/exception.aspx + * + * The ExceptionCode parameter is the number that the operating system + * assigned to the exception. You can see a list of various exception codes + * in WINNT.H by searching for #defines that start with "STATUS_". For + * example, the code for the all-too-familiar STATUS_ACCESS_VIOLATION is + * 0xC0000005. A more complete set of exception codes can be found in + * NTSTATUS.H from the Windows NT DDK. + * + * Some day we might want to print descriptions for the most common + * exceptions, rather than printing an include file name. We could use + * RtlNtStatusToDosError() and pass to FormatMessage(), which can print + * the text of error values, but MinGW does not support + * RtlNtStatusToDosError(). + */ +#define WIFEXITED(w) (((w) & 0XFFFFFF00) == 0) +#define WIFSIGNALED(w) (!WIFEXITED(w)) +#define WEXITSTATUS(w) (w) +#define WTERMSIG(w) (w) + +#define sigmask(sig) ( 1 << ((sig)-1) ) + +/* Signal function return values */ +#undef SIG_DFL +#undef SIG_ERR +#undef SIG_IGN +#define SIG_DFL ((pqsigfunc)0) +#define SIG_ERR ((pqsigfunc)-1) +#define SIG_IGN ((pqsigfunc)1) + +/* Some extra signals */ +#define SIGHUP 1 +#define SIGQUIT 3 +#define SIGTRAP 5 +#define SIGABRT 22 /* Set to match W32 value -- not UNIX value */ +#define SIGKILL 9 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGSTOP 17 +#define SIGTSTP 18 +#define SIGCONT 19 +#define SIGCHLD 20 +#define SIGTTIN 21 +#define SIGTTOU 22 /* Same as SIGABRT -- no problem, I hope */ +#define SIGWINCH 28 +#define SIGUSR1 30 +#define SIGUSR2 31 + +/* + * New versions of MinGW have gettimeofday() and also declare + * struct timezone to support it. + */ +#ifndef HAVE_GETTIMEOFDAY +struct timezone +{ + int tz_minuteswest; /* Minutes west of GMT. */ + int tz_dsttime; /* Nonzero if DST is ever in effect. */ +}; +#endif + +/* for setitimer in backend/port/win32/timer.c */ +#define ITIMER_REAL 0 +struct itimerval +{ + struct timeval it_interval; + struct timeval it_value; +}; + +int setitimer(int which, const struct itimerval *value, struct itimerval *ovalue); + +/* + * WIN32 does not provide 64-bit off_t, but does provide the functions operating + * with 64-bit offsets. + */ +#define pgoff_t __int64 +#ifdef _MSC_VER +#define fseeko(stream, offset, origin) _fseeki64(stream, offset, origin) +#define ftello(stream) _ftelli64(stream) +#else +#ifndef fseeko +#define fseeko(stream, offset, origin) fseeko64(stream, offset, origin) +#endif +#ifndef ftello +#define ftello(stream) ftello64(stream) +#endif +#endif + +/* + * Win32 also doesn't have symlinks, but we can emulate them with + * junction points on newer Win32 versions. + * + * Cygwin has its own symlinks which work on Win95/98/ME where + * junction points don't, so use those instead. We have no way of + * knowing what type of system Cygwin binaries will be run on. + * Note: Some CYGWIN includes might #define WIN32. + */ +extern int pgsymlink(const char *oldpath, const char *newpath); +extern int pgreadlink(const char *path, char *buf, size_t size); +extern bool pgwin32_is_junction(const char *path); + +#define symlink(oldpath, newpath) pgsymlink(oldpath, newpath) +#define readlink(path, buf, size) pgreadlink(path, buf, size) + +/* + * Supplement to . + * + * Perl already has typedefs for uid_t and gid_t. + */ +#ifndef PLPERL_HAVE_UID_GID +typedef int uid_t; +typedef int gid_t; +#endif +typedef long key_t; + +#ifdef _MSC_VER +typedef int pid_t; +#endif + +/* + * Supplement to . + * + * We must pull in sys/stat.h before this part, else our overrides lose. + */ +#define lstat(path, sb) stat(path, sb) + +/* + * stat() is not guaranteed to set the st_size field on win32, so we + * redefine it to our own implementation that is. + * + * Some frontends don't need the size from stat, so if UNSAFE_STAT_OK + * is defined we don't bother with this. + */ +#ifndef UNSAFE_STAT_OK +extern int pgwin32_safestat(const char *path, struct stat *buf); +#define stat(a,b) pgwin32_safestat(a,b) +#endif + +/* These macros are not provided by older MinGW, nor by MSVC */ +#ifndef S_IRUSR +#define S_IRUSR _S_IREAD +#endif +#ifndef S_IWUSR +#define S_IWUSR _S_IWRITE +#endif +#ifndef S_IXUSR +#define S_IXUSR _S_IEXEC +#endif +#ifndef S_IRWXU +#define S_IRWXU (S_IRUSR | S_IWUSR | S_IXUSR) +#endif +#ifndef S_IRGRP +#define S_IRGRP 0 +#endif +#ifndef S_IWGRP +#define S_IWGRP 0 +#endif +#ifndef S_IXGRP +#define S_IXGRP 0 +#endif +#ifndef S_IRWXG +#define S_IRWXG 0 +#endif +#ifndef S_IROTH +#define S_IROTH 0 +#endif +#ifndef S_IWOTH +#define S_IWOTH 0 +#endif +#ifndef S_IXOTH +#define S_IXOTH 0 +#endif +#ifndef S_IRWXO +#define S_IRWXO 0 +#endif +#ifndef S_ISDIR +#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) +#endif +#ifndef S_ISREG +#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) +#endif + +/* + * Supplement to . + * This is the same value as _O_NOINHERIT in the MS header file. This is + * to ensure that we don't collide with a future definition. It means + * we cannot use _O_NOINHERIT ourselves. + */ +#define O_DSYNC 0x0080 + +/* + * Supplement to . + * + * We redefine network-related Berkeley error symbols as the corresponding WSA + * constants. This allows strerror.c to recognize them as being in the Winsock + * error code range and pass them off to win32_socket_strerror(), since + * Windows' version of plain strerror() won't cope. Note that this will break + * if these names are used for anything else besides Windows Sockets errors. + * See TranslateSocketError() when changing this list. + */ +#undef EAGAIN +#define EAGAIN WSAEWOULDBLOCK +#undef EINTR +#define EINTR WSAEINTR +#undef EMSGSIZE +#define EMSGSIZE WSAEMSGSIZE +#undef EAFNOSUPPORT +#define EAFNOSUPPORT WSAEAFNOSUPPORT +#undef EWOULDBLOCK +#define EWOULDBLOCK WSAEWOULDBLOCK +#undef ECONNABORTED +#define ECONNABORTED WSAECONNABORTED +#undef ECONNRESET +#define ECONNRESET WSAECONNRESET +#undef EINPROGRESS +#define EINPROGRESS WSAEINPROGRESS +#undef EISCONN +#define EISCONN WSAEISCONN +#undef ENOBUFS +#define ENOBUFS WSAENOBUFS +#undef EPROTONOSUPPORT +#define EPROTONOSUPPORT WSAEPROTONOSUPPORT +#undef ECONNREFUSED +#define ECONNREFUSED WSAECONNREFUSED +#undef ENOTSOCK +#define ENOTSOCK WSAENOTSOCK +#undef EOPNOTSUPP +#define EOPNOTSUPP WSAEOPNOTSUPP +#undef EADDRINUSE +#define EADDRINUSE WSAEADDRINUSE +#undef EADDRNOTAVAIL +#define EADDRNOTAVAIL WSAEADDRNOTAVAIL +#undef EHOSTUNREACH +#define EHOSTUNREACH WSAEHOSTUNREACH +#undef ENOTCONN +#define ENOTCONN WSAENOTCONN + +/* + * Locale stuff. + * + * Extended locale functions with gratuitous underscore prefixes. + * (These APIs are nevertheless fully documented by Microsoft.) + */ +#define locale_t _locale_t +#define tolower_l _tolower_l +#define toupper_l _toupper_l +#define towlower_l _towlower_l +#define towupper_l _towupper_l +#define isdigit_l _isdigit_l +#define iswdigit_l _iswdigit_l +#define isalpha_l _isalpha_l +#define iswalpha_l _iswalpha_l +#define isalnum_l _isalnum_l +#define iswalnum_l _iswalnum_l +#define isupper_l _isupper_l +#define iswupper_l _iswupper_l +#define islower_l _islower_l +#define iswlower_l _iswlower_l +#define isgraph_l _isgraph_l +#define iswgraph_l _iswgraph_l +#define isprint_l _isprint_l +#define iswprint_l _iswprint_l +#define ispunct_l _ispunct_l +#define iswpunct_l _iswpunct_l +#define isspace_l _isspace_l +#define iswspace_l _iswspace_l +#define strcoll_l _strcoll_l +#define strxfrm_l _strxfrm_l +#define wcscoll_l _wcscoll_l +#define wcstombs_l _wcstombs_l +#define mbstowcs_l _mbstowcs_l + +/* + * Versions of libintl >= 0.18? try to replace setlocale() with a macro + * to their own versions. Remove the macro, if it exists, because it + * ends up calling the wrong version when the backend and libintl use + * different versions of msvcrt. + */ +#if defined(setlocale) +#undef setlocale +#endif + +/* + * Define our own wrapper macro around setlocale() to work around bugs in + * Windows' native setlocale() function. + */ +extern char *pgwin32_setlocale(int category, const char *locale); + +#define setlocale(a,b) pgwin32_setlocale(a,b) + + +/* In backend/port/win32/signal.c */ +extern PGDLLIMPORT volatile int pg_signal_queue; +extern PGDLLIMPORT int pg_signal_mask; +extern HANDLE pgwin32_signal_event; +extern HANDLE pgwin32_initial_signal_pipe; + +#define UNBLOCKED_SIGNAL_QUEUE() (pg_signal_queue & ~pg_signal_mask) +#define PG_SIGNAL_COUNT 32 + +void pgwin32_signal_initialize(void); +HANDLE pgwin32_create_signal_listener(pid_t pid); +void pgwin32_dispatch_queued_signals(void); +void pg_queue_signal(int signum); + +/* In src/port/kill.c */ +#define kill(pid,sig) pgkill(pid,sig) +extern int pgkill(int pid, int sig); + +/* In backend/port/win32/socket.c */ +#ifndef FRONTEND +#define socket(af, type, protocol) pgwin32_socket(af, type, protocol) +#define bind(s, addr, addrlen) pgwin32_bind(s, addr, addrlen) +#define listen(s, backlog) pgwin32_listen(s, backlog) +#define accept(s, addr, addrlen) pgwin32_accept(s, addr, addrlen) +#define connect(s, name, namelen) pgwin32_connect(s, name, namelen) +#define select(n, r, w, e, timeout) pgwin32_select(n, r, w, e, timeout) +#define recv(s, buf, len, flags) pgwin32_recv(s, buf, len, flags) +#define send(s, buf, len, flags) pgwin32_send(s, buf, len, flags) + +SOCKET pgwin32_socket(int af, int type, int protocol); +int pgwin32_bind(SOCKET s, struct sockaddr *addr, int addrlen); +int pgwin32_listen(SOCKET s, int backlog); +SOCKET pgwin32_accept(SOCKET s, struct sockaddr *addr, int *addrlen); +int pgwin32_connect(SOCKET s, const struct sockaddr *name, int namelen); +int pgwin32_select(int nfds, fd_set *readfs, fd_set *writefds, fd_set *exceptfds, const struct timeval *timeout); +int pgwin32_recv(SOCKET s, char *buf, int len, int flags); +int pgwin32_send(SOCKET s, const void *buf, int len, int flags); +int pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout); + +extern int pgwin32_noblock; + +#endif /* FRONTEND */ + +/* in backend/port/win32_shmem.c */ +extern int pgwin32_ReserveSharedMemoryRegion(HANDLE); + +/* in backend/port/win32/crashdump.c */ +extern void pgwin32_install_crashdump_handler(void); + +/* in port/win32error.c */ +extern void _dosmaperr(unsigned long); + +/* in port/win32env.c */ +extern int pgwin32_putenv(const char *); +extern void pgwin32_unsetenv(const char *); + +/* in port/win32security.c */ +extern int pgwin32_is_service(void); +extern int pgwin32_is_admin(void); + +/* Windows security token manipulation (in src/common/exec.c) */ +extern BOOL AddUserToTokenDacl(HANDLE hToken); + +#define putenv(x) pgwin32_putenv(x) +#define unsetenv(x) pgwin32_unsetenv(x) + +/* Things that exist in MinGW headers, but need to be added to MSVC */ +#ifdef _MSC_VER + +#ifndef _WIN64 +typedef long ssize_t; +#else +typedef __int64 ssize_t; +#endif + +typedef unsigned short mode_t; + +#define F_OK 0 +#define W_OK 2 +#define R_OK 4 + +/* + * isinf() and isnan() should per spec be in , but MSVC older than + * 2013 does not have them there. It does have _fpclass() and _isnan(), but + * they're in , so include that here even though it means float.h + * percolates to our whole tree. Recent versions don't require any of this. + */ +#if (_MSC_VER < 1800) +#include +#define isinf(x) ((_fpclass(x) == _FPCLASS_PINF) || (_fpclass(x) == _FPCLASS_NINF)) +#define isnan(x) _isnan(x) +#endif + +/* Pulled from Makefile.port in MinGW */ +#define DLSUFFIX ".dll" + +#endif /* _MSC_VER */ + +#endif /* PG_WIN32_PORT_H */ diff --git a/src/include/portability/instr_time.h b/src/include/portability/instr_time.h index b6e8c58d44..f968444671 100644 --- a/src/include/portability/instr_time.h +++ b/src/include/portability/instr_time.h @@ -43,7 +43,7 @@ * Beware of multiple evaluations of the macro arguments. * * - * Copyright (c) 2001-2017, PostgreSQL Global Development Group + * Copyright (c) 2001-2018, PostgreSQL Global Development Group * * src/include/portability/instr_time.h * diff --git a/src/include/portability/mem.h b/src/include/portability/mem.h index fa507c2581..ba183f5368 100644 --- a/src/include/portability/mem.h +++ b/src/include/portability/mem.h @@ -3,7 +3,7 @@ * mem.h * portability definitions for various memory operations * - * Copyright (c) 2001-2017, PostgreSQL Global Development Group + * Copyright (c) 2001-2018, PostgreSQL Global Development Group * * src/include/portability/mem.h * diff --git a/src/include/postgres.h b/src/include/postgres.h index 1ca9b60ea1..b596fcb513 100644 --- a/src/include/postgres.h +++ b/src/include/postgres.h @@ -7,7 +7,7 @@ * Client-side code should include postgres_fe.h instead. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1995, Regents of the University of California * * src/include/postgres.h @@ -24,8 +24,7 @@ * section description * ------- ------------------------------------------------ * 1) variable-length datatypes (TOAST support) - * 2) datum type + support macros - * 3) exception handling backend support + * 2) Datum type + support macros * * NOTES * @@ -322,6 +321,8 @@ typedef struct (VARATT_IS_EXTERNAL(PTR) && VARTAG_EXTERNAL(PTR) == VARTAG_EXPANDED_RW) #define VARATT_IS_EXTERNAL_EXPANDED(PTR) \ (VARATT_IS_EXTERNAL(PTR) && VARTAG_IS_EXPANDED(VARTAG_EXTERNAL(PTR))) +#define VARATT_IS_EXTERNAL_NON_EXPANDED(PTR) \ + (VARATT_IS_EXTERNAL(PTR) && !VARTAG_IS_EXPANDED(VARTAG_EXTERNAL(PTR))) #define VARATT_IS_SHORT(PTR) VARATT_IS_1B(PTR) #define VARATT_IS_EXTENDED(PTR) (!VARATT_IS_4B_U(PTR)) @@ -349,60 +350,38 @@ typedef struct /* ---------------------------------------------------------------- - * Section 2: datum type + support macros + * Section 2: Datum type + support macros * ---------------------------------------------------------------- */ /* - * Port Notes: - * Postgres makes the following assumptions about datatype sizes: + * A Datum contains either a value of a pass-by-value type or a pointer to a + * value of a pass-by-reference type. Therefore, we require: * - * sizeof(Datum) == sizeof(void *) == 4 or 8 - * sizeof(char) == 1 - * sizeof(short) == 2 + * sizeof(Datum) == sizeof(void *) == 4 or 8 * - * When a type narrower than Datum is stored in a Datum, we place it in the - * low-order bits and are careful that the DatumGetXXX macro for it discards - * the unused high-order bits (as opposed to, say, assuming they are zero). - * This is needed to support old-style user-defined functions, since depending - * on architecture and compiler, the return value of a function returning char - * or short may contain garbage when called as if it returned Datum. + * The macros below and the analogous macros for other types should be used to + * convert between a Datum and the appropriate C type. */ typedef uintptr_t Datum; #define SIZEOF_DATUM SIZEOF_VOID_P -typedef Datum *DatumPtr; - -#define GET_1_BYTE(datum) (((Datum) (datum)) & 0x000000ff) -#define GET_2_BYTES(datum) (((Datum) (datum)) & 0x0000ffff) -#define GET_4_BYTES(datum) (((Datum) (datum)) & 0xffffffff) -#if SIZEOF_DATUM == 8 -#define GET_8_BYTES(datum) ((Datum) (datum)) -#endif -#define SET_1_BYTE(value) (((Datum) (value)) & 0x000000ff) -#define SET_2_BYTES(value) (((Datum) (value)) & 0x0000ffff) -#define SET_4_BYTES(value) (((Datum) (value)) & 0xffffffff) -#if SIZEOF_DATUM == 8 -#define SET_8_BYTES(value) ((Datum) (value)) -#endif - /* * DatumGetBool * Returns boolean value of a datum. * - * Note: any nonzero value will be considered TRUE, but we ignore bits to - * the left of the width of bool, per comment above. + * Note: any nonzero value will be considered true. */ -#define DatumGetBool(X) ((bool) (GET_1_BYTE(X) != 0)) +#define DatumGetBool(X) ((bool) ((X) != 0)) /* * BoolGetDatum * Returns datum representation for a boolean. * - * Note: any nonzero value will be considered TRUE. + * Note: any nonzero value will be considered true. */ #define BoolGetDatum(X) ((Datum) ((X) ? 1 : 0)) @@ -412,140 +391,140 @@ typedef Datum *DatumPtr; * Returns character value of a datum. */ -#define DatumGetChar(X) ((char) GET_1_BYTE(X)) +#define DatumGetChar(X) ((char) (X)) /* * CharGetDatum * Returns datum representation for a character. */ -#define CharGetDatum(X) ((Datum) SET_1_BYTE(X)) +#define CharGetDatum(X) ((Datum) (X)) /* * Int8GetDatum * Returns datum representation for an 8-bit integer. */ -#define Int8GetDatum(X) ((Datum) SET_1_BYTE(X)) +#define Int8GetDatum(X) ((Datum) (X)) /* * DatumGetUInt8 * Returns 8-bit unsigned integer value of a datum. */ -#define DatumGetUInt8(X) ((uint8) GET_1_BYTE(X)) +#define DatumGetUInt8(X) ((uint8) (X)) /* * UInt8GetDatum * Returns datum representation for an 8-bit unsigned integer. */ -#define UInt8GetDatum(X) ((Datum) SET_1_BYTE(X)) +#define UInt8GetDatum(X) ((Datum) (X)) /* * DatumGetInt16 * Returns 16-bit integer value of a datum. */ -#define DatumGetInt16(X) ((int16) GET_2_BYTES(X)) +#define DatumGetInt16(X) ((int16) (X)) /* * Int16GetDatum * Returns datum representation for a 16-bit integer. */ -#define Int16GetDatum(X) ((Datum) SET_2_BYTES(X)) +#define Int16GetDatum(X) ((Datum) (X)) /* * DatumGetUInt16 * Returns 16-bit unsigned integer value of a datum. */ -#define DatumGetUInt16(X) ((uint16) GET_2_BYTES(X)) +#define DatumGetUInt16(X) ((uint16) (X)) /* * UInt16GetDatum * Returns datum representation for a 16-bit unsigned integer. */ -#define UInt16GetDatum(X) ((Datum) SET_2_BYTES(X)) +#define UInt16GetDatum(X) ((Datum) (X)) /* * DatumGetInt32 * Returns 32-bit integer value of a datum. */ -#define DatumGetInt32(X) ((int32) GET_4_BYTES(X)) +#define DatumGetInt32(X) ((int32) (X)) /* * Int32GetDatum * Returns datum representation for a 32-bit integer. */ -#define Int32GetDatum(X) ((Datum) SET_4_BYTES(X)) +#define Int32GetDatum(X) ((Datum) (X)) /* * DatumGetUInt32 * Returns 32-bit unsigned integer value of a datum. */ -#define DatumGetUInt32(X) ((uint32) GET_4_BYTES(X)) +#define DatumGetUInt32(X) ((uint32) (X)) /* * UInt32GetDatum * Returns datum representation for a 32-bit unsigned integer. */ -#define UInt32GetDatum(X) ((Datum) SET_4_BYTES(X)) +#define UInt32GetDatum(X) ((Datum) (X)) /* * DatumGetObjectId * Returns object identifier value of a datum. */ -#define DatumGetObjectId(X) ((Oid) GET_4_BYTES(X)) +#define DatumGetObjectId(X) ((Oid) (X)) /* * ObjectIdGetDatum * Returns datum representation for an object identifier. */ -#define ObjectIdGetDatum(X) ((Datum) SET_4_BYTES(X)) +#define ObjectIdGetDatum(X) ((Datum) (X)) /* * DatumGetTransactionId * Returns transaction identifier value of a datum. */ -#define DatumGetTransactionId(X) ((TransactionId) GET_4_BYTES(X)) +#define DatumGetTransactionId(X) ((TransactionId) (X)) /* * TransactionIdGetDatum * Returns datum representation for a transaction identifier. */ -#define TransactionIdGetDatum(X) ((Datum) SET_4_BYTES((X))) +#define TransactionIdGetDatum(X) ((Datum) (X)) /* * MultiXactIdGetDatum * Returns datum representation for a multixact identifier. */ -#define MultiXactIdGetDatum(X) ((Datum) SET_4_BYTES((X))) +#define MultiXactIdGetDatum(X) ((Datum) (X)) /* * DatumGetCommandId * Returns command identifier value of a datum. */ -#define DatumGetCommandId(X) ((CommandId) GET_4_BYTES(X)) +#define DatumGetCommandId(X) ((CommandId) (X)) /* * CommandIdGetDatum * Returns datum representation for a command identifier. */ -#define CommandIdGetDatum(X) ((Datum) SET_4_BYTES(X)) +#define CommandIdGetDatum(X) ((Datum) (X)) /* * DatumGetPointer @@ -608,7 +587,7 @@ typedef Datum *DatumPtr; */ #ifdef USE_FLOAT8_BYVAL -#define DatumGetInt64(X) ((int64) GET_8_BYTES(X)) +#define DatumGetInt64(X) ((int64) (X)) #else #define DatumGetInt64(X) (* ((int64 *) DatumGetPointer(X))) #endif @@ -622,7 +601,7 @@ typedef Datum *DatumPtr; */ #ifdef USE_FLOAT8_BYVAL -#define Int64GetDatum(X) ((Datum) SET_8_BYTES(X)) +#define Int64GetDatum(X) ((Datum) (X)) #else extern Datum Int64GetDatum(int64 X); #endif @@ -635,7 +614,7 @@ extern Datum Int64GetDatum(int64 X); */ #ifdef USE_FLOAT8_BYVAL -#define DatumGetUInt64(X) ((uint64) GET_8_BYTES(X)) +#define DatumGetUInt64(X) ((uint64) (X)) #else #define DatumGetUInt64(X) (* ((uint64 *) DatumGetPointer(X))) #endif @@ -649,7 +628,7 @@ extern Datum Int64GetDatum(int64 X); */ #ifdef USE_FLOAT8_BYVAL -#define UInt64GetDatum(X) ((Datum) SET_8_BYTES(X)) +#define UInt64GetDatum(X) ((Datum) (X)) #else #define UInt64GetDatum(X) Int64GetDatum((int64) (X)) #endif @@ -788,19 +767,4 @@ extern Datum Float8GetDatum(float8 X); #define Float4GetDatumFast(X) PointerGetDatum(&(X)) #endif - -/* ---------------------------------------------------------------- - * Section 3: exception handling backend support - * ---------------------------------------------------------------- - */ - -/* - * Backend only infrastructure for the assertion-related macros in c.h. - * - * ExceptionalCondition must be present even when assertions are not enabled. - */ -extern void ExceptionalCondition(const char *conditionName, - const char *errorType, - const char *fileName, int lineNumber) pg_attribute_noreturn(); - #endif /* POSTGRES_H */ diff --git a/src/include/postgres_fe.h b/src/include/postgres_fe.h index 1dd01d0283..14567953f2 100644 --- a/src/include/postgres_fe.h +++ b/src/include/postgres_fe.h @@ -8,7 +8,7 @@ * postgres.h. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1995, Regents of the University of California * * src/include/postgres_fe.h diff --git a/src/include/postmaster/autovacuum.h b/src/include/postmaster/autovacuum.h index 3469915ae2..96752caed8 100644 --- a/src/include/postmaster/autovacuum.h +++ b/src/include/postmaster/autovacuum.h @@ -4,7 +4,7 @@ * header file for integrated autovacuum daemon * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/postmaster/autovacuum.h @@ -71,7 +71,7 @@ extern void AutovacuumWorkerIAm(void); extern void AutovacuumLauncherIAm(void); #endif -extern void AutoVacuumRequestWork(AutoVacuumWorkItemType type, +extern bool AutoVacuumRequestWork(AutoVacuumWorkItemType type, Oid relationId, BlockNumber blkno); /* shared memory stuff */ diff --git a/src/include/postmaster/bgworker.h b/src/include/postmaster/bgworker.h index e2ecd3c9eb..de16d11c2f 100644 --- a/src/include/postmaster/bgworker.h +++ b/src/include/postmaster/bgworker.h @@ -31,7 +31,7 @@ * different) code. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -82,12 +82,13 @@ typedef enum #define BGW_DEFAULT_RESTART_INTERVAL 60 #define BGW_NEVER_RESTART -1 -#define BGW_MAXLEN 64 +#define BGW_MAXLEN 96 #define BGW_EXTRALEN 128 typedef struct BackgroundWorker { char bgw_name[BGW_MAXLEN]; + char bgw_type[BGW_MAXLEN]; int bgw_flags; BgWorkerStartTime bgw_start_time; int bgw_restart_time; /* in seconds, or BGW_NEVER_RESTART */ @@ -122,6 +123,7 @@ extern BgwHandleStatus GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, extern BgwHandleStatus WaitForBackgroundWorkerStartup(BackgroundWorkerHandle *handle, pid_t *pid); extern BgwHandleStatus WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *); +extern const char *GetBackgroundWorkerTypeByPid(pid_t pid); /* Terminate a bgworker */ extern void TerminateBackgroundWorker(BackgroundWorkerHandle *handle); @@ -138,10 +140,19 @@ extern PGDLLIMPORT BackgroundWorker *MyBgworkerEntry; * If dbname is NULL, connection is made to no specific database; * only shared catalogs can be accessed. */ -extern void BackgroundWorkerInitializeConnection(char *dbname, char *username); +extern void BackgroundWorkerInitializeConnection(const char *dbname, const char *username, uint32 flags); /* Just like the above, but specifying database and user by OID. */ -extern void BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid); +extern void BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid, uint32 flags); + +/* + * Flags to BackgroundWorkerInitializeConnection et al + * + * + * Allow bypassing datallowconn restrictions when connecting to database + */ +#define BGWORKER_BYPASS_ALLOWCONN 1 + /* Block/unblock signals in a background worker process */ extern void BackgroundWorkerBlockSignals(void); diff --git a/src/include/postmaster/bgworker_internals.h b/src/include/postmaster/bgworker_internals.h index bff10ba53c..548dc1c146 100644 --- a/src/include/postmaster/bgworker_internals.h +++ b/src/include/postmaster/bgworker_internals.h @@ -2,7 +2,7 @@ * bgworker_internals.h * POSTGRES pluggable background workers internals * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/include/postmaster/bgwriter.h b/src/include/postmaster/bgwriter.h index a9b8bc7e8e..941c6aba7d 100644 --- a/src/include/postmaster/bgwriter.h +++ b/src/include/postmaster/bgwriter.h @@ -6,7 +6,7 @@ * The bgwriter process used to handle checkpointing duties too. Now * there is a separate process, but we did not bother to split this header. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/include/postmaster/bgwriter.h * diff --git a/src/include/postmaster/fork_process.h b/src/include/postmaster/fork_process.h index 9d5b97aca7..d552e0297c 100644 --- a/src/include/postmaster/fork_process.h +++ b/src/include/postmaster/fork_process.h @@ -3,7 +3,7 @@ * fork_process.h * Exports from postmaster/fork_process.c. * - * Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/include/postmaster/fork_process.h * diff --git a/src/include/postmaster/pgarch.h b/src/include/postmaster/pgarch.h index ab1ddcf52c..292e63a26a 100644 --- a/src/include/postmaster/pgarch.h +++ b/src/include/postmaster/pgarch.h @@ -3,7 +3,7 @@ * pgarch.h * Exports from postmaster/pgarch.c. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/postmaster/pgarch.h diff --git a/src/include/postmaster/postmaster.h b/src/include/postmaster/postmaster.h index 0f85908b09..a40d66e890 100644 --- a/src/include/postmaster/postmaster.h +++ b/src/include/postmaster/postmaster.h @@ -3,7 +3,7 @@ * postmaster.h * Exports from postmaster/postmaster.c. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/postmaster/postmaster.h @@ -16,7 +16,7 @@ /* GUC options */ extern bool EnableSSL; extern int ReservedBackends; -extern int PostPortNumber; +extern PGDLLIMPORT int PostPortNumber; extern int Unix_socket_permissions; extern char *Unix_socket_group; extern char *Unix_socket_directories; @@ -44,10 +44,11 @@ extern int postmaster_alive_fds[2]; #define POSTMASTER_FD_OWN 1 /* kept open by postmaster only */ #endif -extern const char *progname; +extern PGDLLIMPORT const char *progname; extern void PostmasterMain(int argc, char *argv[]) pg_attribute_noreturn(); extern void ClosePostmasterPorts(bool am_syslogger); +extern void InitProcessGlobals(void); extern int MaxLivePostmasterChildren(void); diff --git a/src/include/postmaster/startup.h b/src/include/postmaster/startup.h index 883bd395bd..1bef7239b2 100644 --- a/src/include/postmaster/startup.h +++ b/src/include/postmaster/startup.h @@ -3,7 +3,7 @@ * startup.h * Exports from postmaster/startup.c. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/include/postmaster/startup.h * diff --git a/src/include/postmaster/syslogger.h b/src/include/postmaster/syslogger.h index f4248ef5d4..3fcb26cdb8 100644 --- a/src/include/postmaster/syslogger.h +++ b/src/include/postmaster/syslogger.h @@ -3,7 +3,7 @@ * syslogger.h * Exports from postmaster/syslogger.c. * - * Copyright (c) 2004-2017, PostgreSQL Global Development Group + * Copyright (c) 2004-2018, PostgreSQL Global Development Group * * src/include/postmaster/syslogger.h * @@ -87,6 +87,9 @@ extern void write_syslogger_file(const char *buffer, int count, int dest); extern void SysLoggerMain(int argc, char *argv[]) pg_attribute_noreturn(); #endif +extern bool CheckLogrotateSignal(void); +extern void RemoveLogrotateSignalFiles(void); + /* * Name of files saving meta-data information about the log * files currently in use by the syslogger diff --git a/src/include/postmaster/walwriter.h b/src/include/postmaster/walwriter.h index f0eb425802..26ec256b96 100644 --- a/src/include/postmaster/walwriter.h +++ b/src/include/postmaster/walwriter.h @@ -3,7 +3,7 @@ * walwriter.h * Exports from postmaster/walwriter.c. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/include/postmaster/walwriter.h * diff --git a/src/include/regex/regexport.h b/src/include/regex/regexport.h index 1c84d15d55..ff6ff3d791 100644 --- a/src/include/regex/regexport.h +++ b/src/include/regex/regexport.h @@ -17,7 +17,7 @@ * line and start/end of string. Colors are numbered 0..C-1, but note that * color 0 is "white" (all unused characters) and can generally be ignored. * - * Portions Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2013-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1998, 1999 Henry Spencer * * IDENTIFICATION diff --git a/src/include/replication/basebackup.h b/src/include/replication/basebackup.h index 1a165c860b..941d07b99f 100644 --- a/src/include/replication/basebackup.h +++ b/src/include/replication/basebackup.h @@ -3,7 +3,7 @@ * basebackup.h * Exports from replication/basebackup.c. * - * Portions Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2010-2018, PostgreSQL Global Development Group * * src/include/replication/basebackup.h * diff --git a/src/include/replication/decode.h b/src/include/replication/decode.h index f9d81d77d0..0e63c0b296 100644 --- a/src/include/replication/decode.h +++ b/src/include/replication/decode.h @@ -2,7 +2,7 @@ * decode.h * PostgreSQL WAL to logical transformation * - * Portions Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group * *------------------------------------------------------------------------- */ diff --git a/src/include/replication/logical.h b/src/include/replication/logical.h index 7f0e0fa881..c25ac1fa85 100644 --- a/src/include/replication/logical.h +++ b/src/include/replication/logical.h @@ -2,7 +2,7 @@ * logical.h * PostgreSQL logical decoding coordination * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Copyright (c) 2012-2018, PostgreSQL Global Development Group * *------------------------------------------------------------------------- */ @@ -45,6 +45,13 @@ typedef struct LogicalDecodingContext struct ReorderBuffer *reorder; struct SnapBuild *snapshot_builder; + /* + * Marks the logical decoding context as fast forward decoding one. Such a + * context does not have plugin loaded so most of the the following + * properties are unused. + */ + bool fast_forward; + OutputPluginCallbacks callbacks; OutputPluginOptions options; @@ -97,6 +104,7 @@ extern LogicalDecodingContext *CreateInitDecodingContext(char *plugin, extern LogicalDecodingContext *CreateDecodingContext( XLogRecPtr start_lsn, List *output_plugin_options, + bool fast_forward, XLogPageReadCB read_page, LogicalOutputPluginWriterPrepareWrite prepare_write, LogicalOutputPluginWriterWrite do_write, diff --git a/src/include/replication/logicalfuncs.h b/src/include/replication/logicalfuncs.h index 71faee18cf..4236b40bc6 100644 --- a/src/include/replication/logicalfuncs.h +++ b/src/include/replication/logicalfuncs.h @@ -2,7 +2,7 @@ * logicalfuncs.h * PostgreSQL WAL to logical transformation support functions * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Copyright (c) 2012-2018, PostgreSQL Global Development Group * *------------------------------------------------------------------------- */ diff --git a/src/include/replication/logicallauncher.h b/src/include/replication/logicallauncher.h index 78016c448f..9f840b7bc1 100644 --- a/src/include/replication/logicallauncher.h +++ b/src/include/replication/logicallauncher.h @@ -3,7 +3,7 @@ * logicallauncher.h * Exports for logical replication launcher. * - * Portions Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2016-2018, PostgreSQL Global Development Group * * src/include/replication/logicallauncher.h * @@ -24,6 +24,7 @@ extern void ApplyLauncherShmemInit(void); extern void ApplyLauncherWakeupAtCommit(void); extern bool XactManipulatesLogicalReplicationWorkers(void); extern void AtEOXact_ApplyLauncher(bool isCommit); +extern void AtEOSubXact_ApplyLauncher(bool isCommit, int nestDepth); extern bool IsLogicalLauncher(void); diff --git a/src/include/replication/logicalproto.h b/src/include/replication/logicalproto.h index a9736e1bf6..8192f79ce3 100644 --- a/src/include/replication/logicalproto.h +++ b/src/include/replication/logicalproto.h @@ -3,7 +3,7 @@ * logicalproto.h * logical replication protocol * - * Copyright (c) 2015-2017, PostgreSQL Global Development Group + * Copyright (c) 2015-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/include/replication/logicalproto.h @@ -55,10 +55,9 @@ typedef struct LogicalRepRelation /* Type mapping info */ typedef struct LogicalRepTyp { - Oid remoteid; /* unique id of the type */ - char *nspname; /* schema name */ - char *typname; /* name of the type */ - Oid typoid; /* local type Oid */ + Oid remoteid; /* unique id of the remote type */ + char *nspname; /* schema name of remote type */ + char *typname; /* name of the remote type */ } LogicalRepTyp; /* Transaction info */ @@ -98,6 +97,10 @@ extern void logicalrep_write_delete(StringInfo out, Relation rel, HeapTuple oldtuple); extern LogicalRepRelId logicalrep_read_delete(StringInfo in, LogicalRepTupleData *oldtup); +extern void logicalrep_write_truncate(StringInfo out, int nrelids, Oid relids[], + bool cascade, bool restart_seqs); +extern List *logicalrep_read_truncate(StringInfo in, + bool *cascade, bool *restart_seqs); extern void logicalrep_write_rel(StringInfo out, Relation rel); extern LogicalRepRelation *logicalrep_read_rel(StringInfo in); extern void logicalrep_write_typ(StringInfo out, Oid typoid); diff --git a/src/include/replication/logicalrelation.h b/src/include/replication/logicalrelation.h index 8352705650..73e4805827 100644 --- a/src/include/replication/logicalrelation.h +++ b/src/include/replication/logicalrelation.h @@ -3,7 +3,7 @@ * logicalrelation.h * Relation definitions for logical replication relation mapping. * - * Portions Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2016-2018, PostgreSQL Global Development Group * * src/include/replication/logicalrelation.h * @@ -37,6 +37,6 @@ extern void logicalrep_rel_close(LogicalRepRelMapEntry *rel, LOCKMODE lockmode); extern void logicalrep_typmap_update(LogicalRepTyp *remotetyp); -extern Oid logicalrep_typmap_getid(Oid remoteid); +extern char *logicalrep_typmap_gettypname(Oid remoteid); #endif /* LOGICALRELATION_H */ diff --git a/src/include/replication/logicalworker.h b/src/include/replication/logicalworker.h index 2557d5a23b..6379aae7ce 100644 --- a/src/include/replication/logicalworker.h +++ b/src/include/replication/logicalworker.h @@ -3,7 +3,7 @@ * logicalworker.h * Exports for logical replication workers. * - * Portions Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2016-2018, PostgreSQL Global Development Group * * src/include/replication/logicalworker.h * diff --git a/src/include/replication/message.h b/src/include/replication/message.h index 1f2d0bb535..37e1bd32d6 100644 --- a/src/include/replication/message.h +++ b/src/include/replication/message.h @@ -2,7 +2,7 @@ * message.h * Exports from replication/logical/message.c * - * Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Copyright (c) 2013-2018, PostgreSQL Global Development Group * * src/include/replication/message.h *------------------------------------------------------------------------- diff --git a/src/include/replication/origin.h b/src/include/replication/origin.h index a9595c3c3d..34fc01207d 100644 --- a/src/include/replication/origin.h +++ b/src/include/replication/origin.h @@ -2,7 +2,7 @@ * origin.h * Exports from replication/logical/origin.c * - * Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Copyright (c) 2013-2018, PostgreSQL Global Development Group * * src/include/replication/origin.h *------------------------------------------------------------------------- diff --git a/src/include/replication/output_plugin.h b/src/include/replication/output_plugin.h index 26ff024882..1ee0a56f03 100644 --- a/src/include/replication/output_plugin.h +++ b/src/include/replication/output_plugin.h @@ -2,7 +2,7 @@ * output_plugin.h * PostgreSQL Logical Decode Plugin Interface * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Copyright (c) 2012-2018, PostgreSQL Global Development Group * *------------------------------------------------------------------------- */ @@ -26,6 +26,7 @@ typedef enum OutputPluginOutputType typedef struct OutputPluginOptions { OutputPluginOutputType output_type; + bool receive_rewrites; } OutputPluginOptions; /* @@ -60,6 +61,15 @@ typedef void (*LogicalDecodeChangeCB) (struct LogicalDecodingContext *ctx, Relation relation, ReorderBufferChange *change); +/* + * Callback for every TRUNCATE in a successful transaction. + */ +typedef void (*LogicalDecodeTruncateCB) (struct LogicalDecodingContext *ctx, + ReorderBufferTXN *txn, + int nrelations, + Relation relations[], + ReorderBufferChange *change); + /* * Called for every (explicit or implicit) COMMIT of a successful transaction. */ @@ -97,6 +107,7 @@ typedef struct OutputPluginCallbacks LogicalDecodeStartupCB startup_cb; LogicalDecodeBeginCB begin_cb; LogicalDecodeChangeCB change_cb; + LogicalDecodeTruncateCB truncate_cb; LogicalDecodeCommitCB commit_cb; LogicalDecodeMessageCB message_cb; LogicalDecodeFilterByOriginCB filter_by_origin_cb; diff --git a/src/include/replication/pgoutput.h b/src/include/replication/pgoutput.h index 1ec4500d9d..29178da1d1 100644 --- a/src/include/replication/pgoutput.h +++ b/src/include/replication/pgoutput.h @@ -3,7 +3,7 @@ * pgoutput.h * Logical Replication output plugin * - * Copyright (c) 2015-2017, PostgreSQL Global Development Group + * Copyright (c) 2015-2018, PostgreSQL Global Development Group * * IDENTIFICATION * pgoutput.h diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index 86effe106b..7787edf7b6 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -2,7 +2,7 @@ * reorderbuffer.h * PostgreSQL logical replay/reorder buffer management. * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Copyright (c) 2012-2018, PostgreSQL Global Development Group * * src/include/replication/reorderbuffer.h */ @@ -59,7 +59,8 @@ enum ReorderBufferChangeType REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID, REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID, REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT, - REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM + REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM, + REORDER_BUFFER_CHANGE_TRUNCATE }; /* @@ -99,6 +100,18 @@ typedef struct ReorderBufferChange ReorderBufferTupleBuf *newtuple; } tp; + /* + * Truncate data for REORDER_BUFFER_CHANGE_TRUNCATE representing one + * set of relations to be truncated. + */ + struct + { + Size nrelids; + bool cascade; + bool restart_seqs; + Oid *relids; + } truncate; + /* Message with arbitrary data. */ struct { @@ -147,10 +160,9 @@ typedef struct ReorderBufferTXN /* did the TX have catalog changes */ bool has_catalog_changes; - /* - * Do we know this is a subxact? - */ + /* Do we know this is a subxact? Xid of top-level txn if so */ bool is_known_as_subxact; + TransactionId toplevel_xid; /* * LSN of the first data carrying, WAL record with knowledge about this @@ -168,6 +180,8 @@ typedef struct ReorderBufferTXN * * plain abort record * * prepared transaction abort * * error during decoding + * * for a crashed transaction, the LSN of the last change, regardless of + * what it was. * ---- */ XLogRecPtr final_lsn; @@ -194,10 +208,13 @@ typedef struct ReorderBufferTXN TimestampTz commit_time; /* - * Base snapshot or NULL. + * The base snapshot is used to decode all changes until either this + * transaction modifies the catalog, or another catalog-modifying + * transaction commits. */ Snapshot base_snapshot; XLogRecPtr base_snapshot_lsn; + dlist_node base_snapshot_node; /* link in txns_by_base_snapshot_lsn */ /* * How many ReorderBufferChange's do we have in this txn. @@ -264,7 +281,7 @@ typedef struct ReorderBufferTXN * Position in one of three lists: * * list of subtransactions if we are *known* to be subxact * * list of toplevel xacts (can be an as-yet unknown subxact) - * * list of preallocated ReorderBufferTXNs + * * list of preallocated ReorderBufferTXNs (if unused) * --- */ dlist_node node; @@ -281,6 +298,14 @@ typedef void (*ReorderBufferApplyChangeCB) ( Relation relation, ReorderBufferChange *change); +/* truncate callback signature */ +typedef void (*ReorderBufferApplyTruncateCB) ( + ReorderBuffer *rb, + ReorderBufferTXN *txn, + int nrelations, + Relation relations[], + ReorderBufferChange *change); + /* begin callback signature */ typedef void (*ReorderBufferBeginCB) ( ReorderBuffer *rb, @@ -314,6 +339,15 @@ struct ReorderBuffer */ dlist_head toplevel_by_lsn; + /* + * Transactions and subtransactions that have a base snapshot, ordered by + * LSN of the record which caused us to first obtain the base snapshot. + * This is not the same as toplevel_by_lsn, because we only set the base + * snapshot on the first logical-decoding-relevant record (eg. heap + * writes), whereas the initial LSN could be set by other operations. + */ + dlist_head txns_by_base_snapshot_lsn; + /* * one-entry sized cache for by_txn. Very frequently the same txn gets * looked up over and over again. @@ -326,6 +360,7 @@ struct ReorderBuffer */ ReorderBufferBeginCB begin; ReorderBufferApplyChangeCB apply_change; + ReorderBufferApplyTruncateCB apply_truncate; ReorderBufferCommitCB commit; ReorderBufferMessageCB message; @@ -334,6 +369,11 @@ struct ReorderBuffer */ void *private_data; + /* + * Saved output plugin option + */ + bool output_rewrites; + /* * Private memory context. */ @@ -344,20 +384,7 @@ struct ReorderBuffer */ MemoryContext change_context; MemoryContext txn_context; - - /* - * Data structure slab cache. - * - * We allocate/deallocate some structures very frequently, to avoid bigger - * overhead we cache some unused ones here. - * - * The maximum number of cached entries is controlled by const variables - * on top of reorderbuffer.c - */ - - /* cached ReorderBufferTupleBufs */ - slist_head cached_tuplebufs; - Size nr_cached_tuplebufs; + MemoryContext tup_context; XLogRecPtr current_restart_decoding_lsn; @@ -375,6 +402,9 @@ void ReorderBufferReturnTupleBuf(ReorderBuffer *, ReorderBufferTupleBuf *tuple) ReorderBufferChange *ReorderBufferGetChange(ReorderBuffer *); void ReorderBufferReturnChange(ReorderBuffer *, ReorderBufferChange *); +Oid * ReorderBufferGetRelids(ReorderBuffer *, int nrelids); +void ReorderBufferReturnRelids(ReorderBuffer *, Oid *relids); + void ReorderBufferQueueChange(ReorderBuffer *, TransactionId, XLogRecPtr lsn, ReorderBufferChange *); void ReorderBufferQueueMessage(ReorderBuffer *, TransactionId, Snapshot snapshot, XLogRecPtr lsn, bool transactional, const char *prefix, @@ -406,6 +436,7 @@ bool ReorderBufferXidHasCatalogChanges(ReorderBuffer *, TransactionId xid); bool ReorderBufferXidHasBaseSnapshot(ReorderBuffer *, TransactionId xid); ReorderBufferTXN *ReorderBufferGetOldestTXN(ReorderBuffer *); +TransactionId ReorderBufferGetOldestXmin(ReorderBuffer *rb); void ReorderBufferSetRestartPoint(ReorderBuffer *, XLogRecPtr ptr); diff --git a/src/include/replication/slot.h b/src/include/replication/slot.h index 0bf2611fe9..7964ae254f 100644 --- a/src/include/replication/slot.h +++ b/src/include/replication/slot.h @@ -2,7 +2,7 @@ * slot.h * Replication slot management. * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Copyright (c) 2012-2018, PostgreSQL Global Development Group * *------------------------------------------------------------------------- */ @@ -22,9 +22,13 @@ * * Slots marked as PERSISTENT are crash-safe and will not be dropped when * released. Slots marked as EPHEMERAL will be dropped when released or after - * restarts. + * restarts. Slots marked TEMPORARY will be dropped at the end of a session + * or on error. * - * EPHEMERAL slots can be made PERSISTENT by calling ReplicationSlotPersist(). + * EPHEMERAL is used as a not-quite-ready state when creating persistent + * slots. EPHEMERAL slots can be made PERSISTENT by calling + * ReplicationSlotPersist(). For a slot that goes away at the end of a + * session, TEMPORARY is the appropriate choice. */ typedef enum ReplicationSlotPersistency { @@ -82,6 +86,19 @@ typedef struct ReplicationSlotPersistentData /* * Shared memory state of a single replication slot. + * + * The in-memory data of replication slots follows a locking model based + * on two linked concepts: + * - A replication slot's in_use flag is switched when added or discarded using + * the LWLock ReplicationSlotControlLock, which needs to be hold in exclusive + * mode when updating the flag by the backend owning the slot and doing the + * operation, while readers (concurrent backends not owning the slot) need + * to hold it in shared mode when looking at replication slot data. + * - Individual fields are protected by mutex where only the backend owning + * the slot is authorized to update the fields from its own slot. The + * backend owning the slot does not need to take this lock when reading its + * own fields, while concurrent backends not owning this slot should take the + * lock when reading this slot's data. */ typedef struct ReplicationSlot { diff --git a/src/include/replication/snapbuild.h b/src/include/replication/snapbuild.h index 7653717f83..56257430ae 100644 --- a/src/include/replication/snapbuild.h +++ b/src/include/replication/snapbuild.h @@ -3,7 +3,7 @@ * snapbuild.h * Exports from replication/logical/snapbuild.c. * - * Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Copyright (c) 2012-2018, PostgreSQL Global Development Group * * src/include/replication/snapbuild.h * diff --git a/src/include/replication/syncrep.h b/src/include/replication/syncrep.h index ceafe2cbea..bc43b4e109 100644 --- a/src/include/replication/syncrep.h +++ b/src/include/replication/syncrep.h @@ -3,7 +3,7 @@ * syncrep.h * Exports from replication/syncrep.c. * - * Portions Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2010-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/include/replication/syncrep.h diff --git a/src/include/replication/walreceiver.h b/src/include/replication/walreceiver.h index 9a8b2e207e..5913b580c2 100644 --- a/src/include/replication/walreceiver.h +++ b/src/include/replication/walreceiver.h @@ -3,7 +3,7 @@ * walreceiver.h * Exports from replication/walreceiverfuncs.c. * - * Portions Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2010-2018, PostgreSQL Global Development Group * * src/include/replication/walreceiver.h * @@ -15,6 +15,7 @@ #include "access/xlog.h" #include "access/xlogdefs.h" #include "fmgr.h" +#include "getaddrinfo.h" /* for NI_MAXHOST */ #include "replication/logicalproto.h" #include "replication/walsender.h" #include "storage/latch.h" @@ -108,6 +109,13 @@ typedef struct */ char conninfo[MAXCONNINFO]; + /* + * Host name (this can be a host name, an IP address, or a directory path) + * and port number of the active replication connection. + */ + char sender_host[NI_MAXHOST]; + int sender_port; + /* * replication slot name; is also used for walreceiver to connect with the * primary @@ -117,14 +125,6 @@ typedef struct /* set true once conninfo is ready to display (obfuscated pwds etc) */ bool ready_to_display; - slock_t mutex; /* locks shared variables shown above */ - - /* - * force walreceiver reply? This doesn't need to be locked; memory - * barriers for ordering are sufficient. - */ - bool force_reply; - /* * Latch used by startup process to wake up walreceiver after telling it * where to start streaming (after setting receiveStart and @@ -133,6 +133,15 @@ typedef struct * normally mapped to procLatch when walreceiver is running. */ Latch *latch; + + slock_t mutex; /* locks shared variables shown above */ + + /* + * force walreceiver reply? This doesn't need to be locked; memory + * barriers for ordering are sufficient. But we do need atomic fetch and + * store semantics, so use sig_atomic_t. + */ + sig_atomic_t force_reply; /* used as a bool */ } WalRcvData; extern WalRcvData *WalRcv; @@ -196,6 +205,9 @@ typedef WalReceiverConn *(*walrcv_connect_fn) (const char *conninfo, bool logica char **err); typedef void (*walrcv_check_conninfo_fn) (const char *conninfo); typedef char *(*walrcv_get_conninfo_fn) (WalReceiverConn *conn); +typedef void (*walrcv_get_senderinfo_fn) (WalReceiverConn *conn, + char **sender_host, + int *sender_port); typedef char *(*walrcv_identify_system_fn) (WalReceiverConn *conn, TimeLineID *primary_tli, int *server_version); @@ -226,6 +238,7 @@ typedef struct WalReceiverFunctionsType walrcv_connect_fn walrcv_connect; walrcv_check_conninfo_fn walrcv_check_conninfo; walrcv_get_conninfo_fn walrcv_get_conninfo; + walrcv_get_senderinfo_fn walrcv_get_senderinfo; walrcv_identify_system_fn walrcv_identify_system; walrcv_readtimelinehistoryfile_fn walrcv_readtimelinehistoryfile; walrcv_startstreaming_fn walrcv_startstreaming; @@ -245,6 +258,8 @@ extern PGDLLIMPORT WalReceiverFunctionsType *WalReceiverFunctions; WalReceiverFunctions->walrcv_check_conninfo(conninfo) #define walrcv_get_conninfo(conn) \ WalReceiverFunctions->walrcv_get_conninfo(conn) +#define walrcv_get_senderinfo(conn, sender_host, sender_port) \ + WalReceiverFunctions->walrcv_get_senderinfo(conn, sender_host, sender_port) #define walrcv_identify_system(conn, primary_tli, server_version) \ WalReceiverFunctions->walrcv_identify_system(conn, primary_tli, server_version) #define walrcv_readtimelinehistoryfile(conn, tli, filename, content, size) \ diff --git a/src/include/replication/walsender.h b/src/include/replication/walsender.h index 1f20db827a..45b72a76db 100644 --- a/src/include/replication/walsender.h +++ b/src/include/replication/walsender.h @@ -3,7 +3,7 @@ * walsender.h * Exports from replication/walsender.c. * - * Portions Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2010-2018, PostgreSQL Global Development Group * * src/include/replication/walsender.h * diff --git a/src/include/replication/walsender_private.h b/src/include/replication/walsender_private.h index 17c68cba23..4b90477936 100644 --- a/src/include/replication/walsender_private.h +++ b/src/include/replication/walsender_private.h @@ -3,7 +3,7 @@ * walsender_private.h * Private definitions from replication/walsender.c. * - * Portions Copyright (c) 2010-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2010-2018, PostgreSQL Global Development Group * * src/include/replication/walsender_private.h * diff --git a/src/include/replication/worker_internal.h b/src/include/replication/worker_internal.h index 7b8728cced..ef079111cd 100644 --- a/src/include/replication/worker_internal.h +++ b/src/include/replication/worker_internal.h @@ -3,7 +3,7 @@ * worker_internal.h * Internal headers shared by logical replication workers. * - * Portions Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2016-2018, PostgreSQL Global Development Group * * src/include/replication/worker_internal.h * @@ -27,7 +27,7 @@ typedef struct LogicalRepWorker /* Indicates if this slot is used or free. */ bool in_use; - /* Increased everytime the slot is taken by new worker. */ + /* Increased every time the slot is taken by new worker. */ uint16 generation; /* Pointer to proc array. NULL if not running. */ diff --git a/src/include/rewrite/prs2lock.h b/src/include/rewrite/prs2lock.h index 419d140bd5..9e7c87a8c1 100644 --- a/src/include/rewrite/prs2lock.h +++ b/src/include/rewrite/prs2lock.h @@ -3,7 +3,7 @@ * prs2lock.h * data structures for POSTGRES Rule System II (rewrite rules only) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/rewrite/prs2lock.h diff --git a/src/include/rewrite/rewriteDefine.h b/src/include/rewrite/rewriteDefine.h index 2e25288bb4..4ceaaffee7 100644 --- a/src/include/rewrite/rewriteDefine.h +++ b/src/include/rewrite/rewriteDefine.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/rewrite/rewriteDefine.h @@ -25,7 +25,7 @@ extern ObjectAddress DefineRule(RuleStmt *stmt, const char *queryString); -extern ObjectAddress DefineQueryRewrite(char *rulename, +extern ObjectAddress DefineQueryRewrite(const char *rulename, Oid event_relid, Node *event_qual, CmdType event_type, diff --git a/src/include/rewrite/rewriteHandler.h b/src/include/rewrite/rewriteHandler.h index 494fa29f10..8128199fc3 100644 --- a/src/include/rewrite/rewriteHandler.h +++ b/src/include/rewrite/rewriteHandler.h @@ -4,7 +4,7 @@ * External interface to query rewriter. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/rewrite/rewriteHandler.h @@ -23,6 +23,9 @@ extern void AcquireRewriteLocks(Query *parsetree, bool forUpdatePushedDown); extern Node *build_column_default(Relation rel, int attrno); +extern void rewriteTargetListUD(Query *parsetree, RangeTblEntry *target_rte, + Relation target_relation); + extern Query *get_view_query(Relation view); extern const char *view_query_is_auto_updatable(Query *viewquery, bool check_cols); diff --git a/src/include/rewrite/rewriteManip.h b/src/include/rewrite/rewriteManip.h index f0a7a8b2cd..f0299bc703 100644 --- a/src/include/rewrite/rewriteManip.h +++ b/src/include/rewrite/rewriteManip.h @@ -4,7 +4,7 @@ * Querytree manipulation subroutines for query rewriter. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/rewrite/rewriteManip.h diff --git a/src/include/rewrite/rewriteRemove.h b/src/include/rewrite/rewriteRemove.h index d7d53b0137..351e26c307 100644 --- a/src/include/rewrite/rewriteRemove.h +++ b/src/include/rewrite/rewriteRemove.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/rewrite/rewriteRemove.h diff --git a/src/include/rewrite/rewriteSupport.h b/src/include/rewrite/rewriteSupport.h index 60800aae25..f34bfda05f 100644 --- a/src/include/rewrite/rewriteSupport.h +++ b/src/include/rewrite/rewriteSupport.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/rewrite/rewriteSupport.h diff --git a/src/include/rewrite/rowsecurity.h b/src/include/rewrite/rowsecurity.h index 0dbc1a4bee..f2f5251c71 100644 --- a/src/include/rewrite/rowsecurity.h +++ b/src/include/rewrite/rowsecurity.h @@ -5,7 +5,7 @@ * prototypes for rewrite/rowsecurity.c and the structures for managing * the row security policies for relations in relcache. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------- diff --git a/src/include/rusagestub.h b/src/include/rusagestub.h index f54f66e6f3..1423e2699e 100644 --- a/src/include/rusagestub.h +++ b/src/include/rusagestub.h @@ -4,7 +4,7 @@ * Stubs for getrusage(3). * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/rusagestub.h diff --git a/src/include/snowball/header.h b/src/include/snowball/header.h index 10acf4c53d..c0450ae4eb 100644 --- a/src/include/snowball/header.h +++ b/src/include/snowball/header.h @@ -13,7 +13,7 @@ * * NOTE: this file should not be included into any non-snowball sources! * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/include/snowball/header.h * diff --git a/src/include/snowball/libstemmer/header.h b/src/include/snowball/libstemmer/header.h index 4d3078f50f..6bccf0f2c7 100644 --- a/src/include/snowball/libstemmer/header.h +++ b/src/include/snowball/libstemmer/header.h @@ -54,5 +54,7 @@ extern int insert_v(struct SN_env * z, int bra, int ket, const symbol * p); extern symbol * slice_to(struct SN_env * z, symbol * p); extern symbol * assign_to(struct SN_env * z, symbol * p); +extern int len_utf8(const symbol * p); + extern void debug(struct SN_env * z, int number, int line_count); diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_danish.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_danish.h index 49c5559cdf..57f16f8960 100644 --- a/src/include/snowball/libstemmer/stem_ISO_8859_1_danish.h +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_danish.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_dutch.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_dutch.h index e67d11152c..5423842969 100644 --- a/src/include/snowball/libstemmer/stem_ISO_8859_1_dutch.h +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_dutch.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_english.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_english.h index e685dcf7ef..d5a56a98f0 100644 --- a/src/include/snowball/libstemmer/stem_ISO_8859_1_english.h +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_english.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_finnish.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_finnish.h index c67b67b944..ba197d8a70 100644 --- a/src/include/snowball/libstemmer/stem_ISO_8859_1_finnish.h +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_finnish.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_french.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_french.h index 21244d6162..29321023a2 100644 --- a/src/include/snowball/libstemmer/stem_ISO_8859_1_french.h +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_french.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_german.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_german.h index 8525389227..0887ac8164 100644 --- a/src/include/snowball/libstemmer/stem_ISO_8859_1_german.h +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_german.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_hungarian.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_hungarian.h deleted file mode 100644 index c3177e5019..0000000000 --- a/src/include/snowball/libstemmer/stem_ISO_8859_1_hungarian.h +++ /dev/null @@ -1,16 +0,0 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ - -#ifdef __cplusplus -extern "C" { -#endif - -extern struct SN_env * hungarian_ISO_8859_1_create_env(void); -extern void hungarian_ISO_8859_1_close_env(struct SN_env * z); - -extern int hungarian_ISO_8859_1_stem(struct SN_env * z); - -#ifdef __cplusplus -} -#endif - diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_indonesian.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_indonesian.h new file mode 100644 index 0000000000..aac192cace --- /dev/null +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_indonesian.h @@ -0,0 +1,16 @@ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ + +#ifdef __cplusplus +extern "C" { +#endif + +extern struct SN_env * indonesian_ISO_8859_1_create_env(void); +extern void indonesian_ISO_8859_1_close_env(struct SN_env * z); + +extern int indonesian_ISO_8859_1_stem(struct SN_env * z); + +#ifdef __cplusplus +} +#endif + diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_irish.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_irish.h new file mode 100644 index 0000000000..0e146373ae --- /dev/null +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_irish.h @@ -0,0 +1,16 @@ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ + +#ifdef __cplusplus +extern "C" { +#endif + +extern struct SN_env * irish_ISO_8859_1_create_env(void); +extern void irish_ISO_8859_1_close_env(struct SN_env * z); + +extern int irish_ISO_8859_1_stem(struct SN_env * z); + +#ifdef __cplusplus +} +#endif + diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_italian.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_italian.h index dccbfd5e97..9e3fcf7f78 100644 --- a/src/include/snowball/libstemmer/stem_ISO_8859_1_italian.h +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_italian.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_norwegian.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_norwegian.h index e09e34e52f..424c8cd468 100644 --- a/src/include/snowball/libstemmer/stem_ISO_8859_1_norwegian.h +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_norwegian.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_porter.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_porter.h index 5c8fd01db1..99bc54ff0d 100644 --- a/src/include/snowball/libstemmer/stem_ISO_8859_1_porter.h +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_porter.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_portuguese.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_portuguese.h index 0279bc94da..e9abc0de5f 100644 --- a/src/include/snowball/libstemmer/stem_ISO_8859_1_portuguese.h +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_portuguese.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_spanish.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_spanish.h index 83f1498403..1e661aacfe 100644 --- a/src/include/snowball/libstemmer/stem_ISO_8859_1_spanish.h +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_spanish.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_1_swedish.h b/src/include/snowball/libstemmer/stem_ISO_8859_1_swedish.h index 4184e5ca39..5e6f6ac8af 100644 --- a/src/include/snowball/libstemmer/stem_ISO_8859_1_swedish.h +++ b/src/include/snowball/libstemmer/stem_ISO_8859_1_swedish.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_2_hungarian.h b/src/include/snowball/libstemmer/stem_ISO_8859_2_hungarian.h new file mode 100644 index 0000000000..c019a7d456 --- /dev/null +++ b/src/include/snowball/libstemmer/stem_ISO_8859_2_hungarian.h @@ -0,0 +1,16 @@ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ + +#ifdef __cplusplus +extern "C" { +#endif + +extern struct SN_env * hungarian_ISO_8859_2_create_env(void); +extern void hungarian_ISO_8859_2_close_env(struct SN_env * z); + +extern int hungarian_ISO_8859_2_stem(struct SN_env * z); + +#ifdef __cplusplus +} +#endif + diff --git a/src/include/snowball/libstemmer/stem_ISO_8859_2_romanian.h b/src/include/snowball/libstemmer/stem_ISO_8859_2_romanian.h index 931f269ceb..ef9bd3f3a4 100644 --- a/src/include/snowball/libstemmer/stem_ISO_8859_2_romanian.h +++ b/src/include/snowball/libstemmer/stem_ISO_8859_2_romanian.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_KOI8_R_russian.h b/src/include/snowball/libstemmer/stem_KOI8_R_russian.h index de2179d29f..cc58324c66 100644 --- a/src/include/snowball/libstemmer/stem_KOI8_R_russian.h +++ b/src/include/snowball/libstemmer/stem_KOI8_R_russian.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_arabic.h b/src/include/snowball/libstemmer/stem_UTF_8_arabic.h new file mode 100644 index 0000000000..5ef61a1031 --- /dev/null +++ b/src/include/snowball/libstemmer/stem_UTF_8_arabic.h @@ -0,0 +1,16 @@ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ + +#ifdef __cplusplus +extern "C" { +#endif + +extern struct SN_env * arabic_UTF_8_create_env(void); +extern void arabic_UTF_8_close_env(struct SN_env * z); + +extern int arabic_UTF_8_stem(struct SN_env * z); + +#ifdef __cplusplus +} +#endif + diff --git a/src/include/snowball/libstemmer/stem_UTF_8_danish.h b/src/include/snowball/libstemmer/stem_UTF_8_danish.h index ed744d454f..71f93155ed 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_danish.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_danish.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_dutch.h b/src/include/snowball/libstemmer/stem_UTF_8_dutch.h index a99646452b..6af7d0adf3 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_dutch.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_dutch.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_english.h b/src/include/snowball/libstemmer/stem_UTF_8_english.h index 619a8bc72a..d0ea10ef9f 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_english.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_english.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_finnish.h b/src/include/snowball/libstemmer/stem_UTF_8_finnish.h index d2f2fd9638..7d07fbaf4a 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_finnish.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_finnish.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_french.h b/src/include/snowball/libstemmer/stem_UTF_8_french.h index 08e341846d..b19139e538 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_french.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_french.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_german.h b/src/include/snowball/libstemmer/stem_UTF_8_german.h index 5bd84d431f..2a95132e9f 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_german.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_german.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_hungarian.h b/src/include/snowball/libstemmer/stem_UTF_8_hungarian.h index d81bd23469..d662a83635 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_hungarian.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_hungarian.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_indonesian.h b/src/include/snowball/libstemmer/stem_UTF_8_indonesian.h new file mode 100644 index 0000000000..7e07e73c34 --- /dev/null +++ b/src/include/snowball/libstemmer/stem_UTF_8_indonesian.h @@ -0,0 +1,16 @@ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ + +#ifdef __cplusplus +extern "C" { +#endif + +extern struct SN_env * indonesian_UTF_8_create_env(void); +extern void indonesian_UTF_8_close_env(struct SN_env * z); + +extern int indonesian_UTF_8_stem(struct SN_env * z); + +#ifdef __cplusplus +} +#endif + diff --git a/src/include/snowball/libstemmer/stem_UTF_8_irish.h b/src/include/snowball/libstemmer/stem_UTF_8_irish.h new file mode 100644 index 0000000000..ea8b81860e --- /dev/null +++ b/src/include/snowball/libstemmer/stem_UTF_8_irish.h @@ -0,0 +1,16 @@ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ + +#ifdef __cplusplus +extern "C" { +#endif + +extern struct SN_env * irish_UTF_8_create_env(void); +extern void irish_UTF_8_close_env(struct SN_env * z); + +extern int irish_UTF_8_stem(struct SN_env * z); + +#ifdef __cplusplus +} +#endif + diff --git a/src/include/snowball/libstemmer/stem_UTF_8_italian.h b/src/include/snowball/libstemmer/stem_UTF_8_italian.h index 3bee080d52..4177c3ebf8 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_italian.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_italian.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_lithuanian.h b/src/include/snowball/libstemmer/stem_UTF_8_lithuanian.h new file mode 100644 index 0000000000..8a9b1241a8 --- /dev/null +++ b/src/include/snowball/libstemmer/stem_UTF_8_lithuanian.h @@ -0,0 +1,16 @@ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ + +#ifdef __cplusplus +extern "C" { +#endif + +extern struct SN_env * lithuanian_UTF_8_create_env(void); +extern void lithuanian_UTF_8_close_env(struct SN_env * z); + +extern int lithuanian_UTF_8_stem(struct SN_env * z); + +#ifdef __cplusplus +} +#endif + diff --git a/src/include/snowball/libstemmer/stem_UTF_8_nepali.h b/src/include/snowball/libstemmer/stem_UTF_8_nepali.h new file mode 100644 index 0000000000..20b2b919ac --- /dev/null +++ b/src/include/snowball/libstemmer/stem_UTF_8_nepali.h @@ -0,0 +1,16 @@ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ + +#ifdef __cplusplus +extern "C" { +#endif + +extern struct SN_env * nepali_UTF_8_create_env(void); +extern void nepali_UTF_8_close_env(struct SN_env * z); + +extern int nepali_UTF_8_stem(struct SN_env * z); + +#ifdef __cplusplus +} +#endif + diff --git a/src/include/snowball/libstemmer/stem_UTF_8_norwegian.h b/src/include/snowball/libstemmer/stem_UTF_8_norwegian.h index c75444bcd9..b155ebb718 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_norwegian.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_norwegian.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_porter.h b/src/include/snowball/libstemmer/stem_UTF_8_porter.h index 82d469ac45..54efb6f130 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_porter.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_porter.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_portuguese.h b/src/include/snowball/libstemmer/stem_UTF_8_portuguese.h index 9fe7f9aa81..68dc2554c3 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_portuguese.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_portuguese.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_romanian.h b/src/include/snowball/libstemmer/stem_UTF_8_romanian.h index d01e8132e2..2cc1ad011d 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_romanian.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_romanian.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_russian.h b/src/include/snowball/libstemmer/stem_UTF_8_russian.h index 4ef774ddcc..2bc621bff9 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_russian.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_russian.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_spanish.h b/src/include/snowball/libstemmer/stem_UTF_8_spanish.h index 10572ecc37..075ba876ed 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_spanish.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_spanish.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_swedish.h b/src/include/snowball/libstemmer/stem_UTF_8_swedish.h index 1444ebb49a..095623db80 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_swedish.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_swedish.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/snowball/libstemmer/stem_UTF_8_tamil.h b/src/include/snowball/libstemmer/stem_UTF_8_tamil.h new file mode 100644 index 0000000000..65505dc028 --- /dev/null +++ b/src/include/snowball/libstemmer/stem_UTF_8_tamil.h @@ -0,0 +1,16 @@ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ + +#ifdef __cplusplus +extern "C" { +#endif + +extern struct SN_env * tamil_UTF_8_create_env(void); +extern void tamil_UTF_8_close_env(struct SN_env * z); + +extern int tamil_UTF_8_stem(struct SN_env * z); + +#ifdef __cplusplus +} +#endif + diff --git a/src/include/snowball/libstemmer/stem_UTF_8_turkish.h b/src/include/snowball/libstemmer/stem_UTF_8_turkish.h index 8173a17486..7d94a2f9ef 100644 --- a/src/include/snowball/libstemmer/stem_UTF_8_turkish.h +++ b/src/include/snowball/libstemmer/stem_UTF_8_turkish.h @@ -1,5 +1,5 @@ - -/* This file was generated automatically by the Snowball to ANSI C compiler */ +/* This file was generated automatically by the Snowball to ISO C compiler */ +/* http://snowballstem.org/ */ #ifdef __cplusplus extern "C" { diff --git a/src/include/statistics/extended_stats_internal.h b/src/include/statistics/extended_stats_internal.h index 738ff3fadc..b3ca0c1229 100644 --- a/src/include/statistics/extended_stats_internal.h +++ b/src/include/statistics/extended_stats_internal.h @@ -3,7 +3,7 @@ * extended_stats_internal.h * POSTGRES extended statistics internal declarations * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/include/statistics/statistics.h b/src/include/statistics/statistics.h index 1d68c39df0..8009fee322 100644 --- a/src/include/statistics/statistics.h +++ b/src/include/statistics/statistics.h @@ -3,7 +3,7 @@ * statistics.h * Extended statistics and selectivity estimation functions. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/statistics/statistics.h diff --git a/src/include/storage/backendid.h b/src/include/storage/backendid.h index bf31ba4f48..e000bb848c 100644 --- a/src/include/storage/backendid.h +++ b/src/include/storage/backendid.h @@ -4,7 +4,7 @@ * POSTGRES backend id communication definitions * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/backendid.h diff --git a/src/include/storage/barrier.h b/src/include/storage/barrier.h new file mode 100644 index 0000000000..8b5a3751ea --- /dev/null +++ b/src/include/storage/barrier.h @@ -0,0 +1,45 @@ +/*------------------------------------------------------------------------- + * + * barrier.h + * Barriers for synchronizing cooperating processes. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/storage/barrier.h + * + *------------------------------------------------------------------------- + */ +#ifndef BARRIER_H +#define BARRIER_H + +/* + * For the header previously known as "barrier.h", please include + * "port/atomics.h", which deals with atomics, compiler barriers and memory + * barriers. + */ + +#include "storage/condition_variable.h" +#include "storage/spin.h" + +typedef struct Barrier +{ + slock_t mutex; + int phase; /* phase counter */ + int participants; /* the number of participants attached */ + int arrived; /* the number of participants that have + * arrived */ + int elected; /* highest phase elected */ + bool static_party; /* used only for assertions */ + ConditionVariable condition_variable; +} Barrier; + +extern void BarrierInit(Barrier *barrier, int num_workers); +extern bool BarrierArriveAndWait(Barrier *barrier, uint32 wait_event_info); +extern bool BarrierArriveAndDetach(Barrier *barrier); +extern int BarrierAttach(Barrier *barrier); +extern bool BarrierDetach(Barrier *barrier); +extern int BarrierPhase(Barrier *barrier); +extern int BarrierParticipants(Barrier *barrier); + +#endif /* BARRIER_H */ diff --git a/src/include/storage/block.h b/src/include/storage/block.h index 33840798a8..182aa8d453 100644 --- a/src/include/storage/block.h +++ b/src/include/storage/block.h @@ -4,7 +4,7 @@ * POSTGRES disk block definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/block.h @@ -22,7 +22,7 @@ * contains exactly one disk block). the blocks are numbered * sequentially, 0 to 0xFFFFFFFE. * - * InvalidBlockNumber is the same thing as P_NEW in buf.h. + * InvalidBlockNumber is the same thing as P_NEW in bufmgr.h. * * the access methods, the buffer manager and the storage manager are * more or less the only pieces of code that should be accessing disk diff --git a/src/include/storage/buf.h b/src/include/storage/buf.h index 054f482bd7..3b89983b42 100644 --- a/src/include/storage/buf.h +++ b/src/include/storage/buf.h @@ -4,7 +4,7 @@ * Basic buffer manager data types. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/buf.h diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h index b768b6fc96..5370035f0c 100644 --- a/src/include/storage/buf_internals.h +++ b/src/include/storage/buf_internals.h @@ -5,7 +5,7 @@ * strategy. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/buf_internals.h @@ -317,6 +317,7 @@ extern void StrategyNotifyBgWriter(int bgwprocno); extern Size StrategyShmemSize(void); extern void StrategyInitialize(bool init); +extern bool have_free_buffer(void); /* buf_table.c */ extern Size BufTableShmemSize(int size); diff --git a/src/include/storage/buffile.h b/src/include/storage/buffile.h index fafcb3f089..a6cdeb451c 100644 --- a/src/include/storage/buffile.h +++ b/src/include/storage/buffile.h @@ -1,7 +1,7 @@ /*------------------------------------------------------------------------- * * buffile.h - * Management of large buffered files, primarily temporary files. + * Management of large buffered temporary files. * * The BufFile routines provide a partial replacement for stdio atop * virtual file descriptors managed by fd.c. Currently they only support @@ -15,7 +15,7 @@ * but currently we have no need for oversize temp files without buffered * access. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/buffile.h @@ -26,6 +26,8 @@ #ifndef BUFFILE_H #define BUFFILE_H +#include "storage/sharedfileset.h" + /* BufFile is an opaque type whose details are not known outside buffile.c. */ typedef struct BufFile BufFile; @@ -41,5 +43,12 @@ extern size_t BufFileWrite(BufFile *file, void *ptr, size_t size); extern int BufFileSeek(BufFile *file, int fileno, off_t offset, int whence); extern void BufFileTell(BufFile *file, int *fileno, off_t *offset); extern int BufFileSeekBlock(BufFile *file, long blknum); +extern off_t BufFileSize(BufFile *file); +extern long BufFileAppend(BufFile *target, BufFile *source); + +extern BufFile *BufFileCreateShared(SharedFileSet *fileset, const char *name); +extern void BufFileExportShared(BufFile *file); +extern BufFile *BufFileOpenShared(SharedFileSet *fileset, const char *name); +extern void BufFileDeleteShared(SharedFileSet *fileset, const char *name); #endif /* BUFFILE_H */ diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h index 98b63fc5ba..3cce3906a0 100644 --- a/src/include/storage/bufmgr.h +++ b/src/include/storage/bufmgr.h @@ -4,7 +4,7 @@ * POSTGRES buffer manager definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/bufmgr.h diff --git a/src/include/storage/bufpage.h b/src/include/storage/bufpage.h index 50c72a3c8d..85dd10c45a 100644 --- a/src/include/storage/bufpage.h +++ b/src/include/storage/bufpage.h @@ -4,7 +4,7 @@ * Standard POSTGRES buffer page definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/bufpage.h diff --git a/src/include/storage/checksum.h b/src/include/storage/checksum.h index b85f714712..433755e279 100644 --- a/src/include/storage/checksum.h +++ b/src/include/storage/checksum.h @@ -3,7 +3,7 @@ * checksum.h * Checksum implementation for data pages. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/checksum.h diff --git a/src/include/storage/checksum_impl.h b/src/include/storage/checksum_impl.h index bffd061de8..a49d27febb 100644 --- a/src/include/storage/checksum_impl.h +++ b/src/include/storage/checksum_impl.h @@ -8,7 +8,7 @@ * referenced by storage/checksum.h. (Note: you may need to redefine * Assert() as empty to compile this successfully externally.) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/checksum_impl.h @@ -107,6 +107,13 @@ /* prime multiplier of FNV-1a hash */ #define FNV_PRIME 16777619 +/* Use a union so that this code is valid under strict aliasing */ +typedef union +{ + PageHeaderData phdr; + uint32 data[BLCKSZ / (sizeof(uint32) * N_SUMS)][N_SUMS]; +} PGChecksummablePage; + /* * Base offsets to initialize each of the parallel FNV hashes into a * different initial state. @@ -132,28 +139,27 @@ do { \ } while (0) /* - * Block checksum algorithm. The data argument must be aligned on a 4-byte - * boundary. + * Block checksum algorithm. The page must be adequately aligned + * (at least on 4-byte boundary). */ static uint32 -pg_checksum_block(char *data, uint32 size) +pg_checksum_block(const PGChecksummablePage *page) { uint32 sums[N_SUMS]; - uint32 (*dataArr)[N_SUMS] = (uint32 (*)[N_SUMS]) data; uint32 result = 0; uint32 i, j; /* ensure that the size is compatible with the algorithm */ - Assert((size % (sizeof(uint32) * N_SUMS)) == 0); + Assert(sizeof(PGChecksummablePage) == BLCKSZ); /* initialize partial checksums to their corresponding offsets */ memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets)); /* main checksum calculation */ - for (i = 0; i < size / sizeof(uint32) / N_SUMS; i++) + for (i = 0; i < (uint32) (BLCKSZ / (sizeof(uint32) * N_SUMS)); i++) for (j = 0; j < N_SUMS; j++) - CHECKSUM_COMP(sums[j], dataArr[i][j]); + CHECKSUM_COMP(sums[j], page->data[i][j]); /* finally add in two rounds of zeroes for additional mixing */ for (i = 0; i < 2; i++) @@ -168,8 +174,10 @@ pg_checksum_block(char *data, uint32 size) } /* - * Compute the checksum for a Postgres page. The page must be aligned on a - * 4-byte boundary. + * Compute the checksum for a Postgres page. + * + * The page must be adequately aligned (at least on a 4-byte boundary). + * Beware also that the checksum field of the page is transiently zeroed. * * The checksum includes the block number (to detect the case where a page is * somehow moved to a different location), the page header (excluding the @@ -178,12 +186,12 @@ pg_checksum_block(char *data, uint32 size) uint16 pg_checksum_page(char *page, BlockNumber blkno) { - PageHeader phdr = (PageHeader) page; + PGChecksummablePage *cpage = (PGChecksummablePage *) page; uint16 save_checksum; uint32 checksum; /* We only calculate the checksum for properly-initialized pages */ - Assert(!PageIsNew(page)); + Assert(!PageIsNew(&cpage->phdr)); /* * Save pd_checksum and temporarily set it to zero, so that the checksum @@ -191,10 +199,10 @@ pg_checksum_page(char *page, BlockNumber blkno) * Restore it after, because actually updating the checksum is NOT part of * the API of this function. */ - save_checksum = phdr->pd_checksum; - phdr->pd_checksum = 0; - checksum = pg_checksum_block(page, BLCKSZ); - phdr->pd_checksum = save_checksum; + save_checksum = cpage->phdr.pd_checksum; + cpage->phdr.pd_checksum = 0; + checksum = pg_checksum_block(cpage); + cpage->phdr.pd_checksum = save_checksum; /* Mix in the block number to detect transposed pages */ checksum ^= blkno; diff --git a/src/include/storage/condition_variable.h b/src/include/storage/condition_variable.h index f77c0b22ad..32e645c02a 100644 --- a/src/include/storage/condition_variable.h +++ b/src/include/storage/condition_variable.h @@ -12,7 +12,7 @@ * can be cancelled prior to the fulfillment of the condition) and do not * use pointers internally (so that they are safe to use within DSMs). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/condition_variable.h @@ -27,33 +27,34 @@ typedef struct { - slock_t mutex; - proclist_head wakeup; + slock_t mutex; /* spinlock protecting the wakeup list */ + proclist_head wakeup; /* list of wake-able processes */ } ConditionVariable; /* Initialize a condition variable. */ -extern void ConditionVariableInit(ConditionVariable *); +extern void ConditionVariableInit(ConditionVariable *cv); /* * To sleep on a condition variable, a process should use a loop which first * checks the condition, exiting the loop if it is met, and then calls * ConditionVariableSleep. Spurious wakeups are possible, but should be - * infrequent. After exiting the loop, ConditionVariableCancelSleep should + * infrequent. After exiting the loop, ConditionVariableCancelSleep must * be called to ensure that the process is no longer in the wait list for * the condition variable. */ -extern void ConditionVariableSleep(ConditionVariable *, uint32 wait_event_info); +extern void ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info); extern void ConditionVariableCancelSleep(void); /* - * The use of this function is optional and not necessary for correctness; - * for efficiency, it should be called prior entering the loop described above - * if it is thought that the condition is unlikely to hold immediately. + * Optionally, ConditionVariablePrepareToSleep can be called before entering + * the test-and-sleep loop described above. Doing so is more efficient if + * at least one sleep is needed, whereas not doing so is more efficient when + * no sleep is needed because the test condition is true the first time. */ -extern void ConditionVariablePrepareToSleep(ConditionVariable *); +extern void ConditionVariablePrepareToSleep(ConditionVariable *cv); /* Wake up a single waiter (via signal) or all waiters (via broadcast). */ -extern bool ConditionVariableSignal(ConditionVariable *); -extern int ConditionVariableBroadcast(ConditionVariable *); +extern void ConditionVariableSignal(ConditionVariable *cv); +extern void ConditionVariableBroadcast(ConditionVariable *cv); #endif /* CONDITION_VARIABLE_H */ diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h index f88a044509..4fef3e2107 100644 --- a/src/include/storage/copydir.h +++ b/src/include/storage/copydir.h @@ -3,7 +3,7 @@ * copydir.h * Copy a directory. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/copydir.h diff --git a/src/include/storage/dsm.h b/src/include/storage/dsm.h index 31b1f4da9c..b4654cb5ca 100644 --- a/src/include/storage/dsm.h +++ b/src/include/storage/dsm.h @@ -3,7 +3,7 @@ * dsm.h * manage dynamic shared memory segments * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/dsm.h @@ -33,11 +33,9 @@ extern void dsm_detach_all(void); extern void dsm_set_control_handle(dsm_handle h); #endif -/* Functions that create, update, or remove mappings. */ +/* Functions that create or remove mappings. */ extern dsm_segment *dsm_create(Size size, int flags); extern dsm_segment *dsm_attach(dsm_handle h); -extern void *dsm_resize(dsm_segment *seg, Size size); -extern void *dsm_remap(dsm_segment *seg); extern void dsm_detach(dsm_segment *seg); /* Resource management functions. */ diff --git a/src/include/storage/dsm_impl.h b/src/include/storage/dsm_impl.h index c2060431ba..9485446c91 100644 --- a/src/include/storage/dsm_impl.h +++ b/src/include/storage/dsm_impl.h @@ -3,7 +3,7 @@ * dsm_impl.h * low-level dynamic shared memory primitives * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/dsm_impl.h @@ -14,7 +14,6 @@ #define DSM_IMPL_H /* Dynamic shared memory implementations. */ -#define DSM_IMPL_NONE 0 #define DSM_IMPL_POSIX 1 #define DSM_IMPL_SYSV 2 #define DSM_IMPL_WINDOWS 3 @@ -60,7 +59,6 @@ typedef enum DSM_OP_CREATE, DSM_OP_ATTACH, DSM_OP_DETACH, - DSM_OP_RESIZE, DSM_OP_DESTROY } dsm_op; @@ -69,9 +67,6 @@ extern bool dsm_impl_op(dsm_op op, dsm_handle handle, Size request_size, void **impl_private, void **mapped_address, Size *mapped_size, int elevel); -/* Some implementations cannot resize segments. Can this one? */ -extern bool dsm_impl_can_resize(void); - /* Implementation-dependent actions required to keep segment until shutdown. */ extern void dsm_impl_pin_segment(dsm_handle handle, void *impl_private, void **impl_private_pm_handle); diff --git a/src/include/storage/fd.h b/src/include/storage/fd.h index faef39e78d..1289589a46 100644 --- a/src/include/storage/fd.h +++ b/src/include/storage/fd.h @@ -4,7 +4,7 @@ * Virtual file descriptor definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/fd.h @@ -15,14 +15,14 @@ /* * calls: * - * File {Close, Read, Write, Seek, Tell, Sync} + * File {Close, Read, Write, Size, Sync} * {Path Name Open, Allocate, Free} File * * These are NOT JUST RENAMINGS OF THE UNIX ROUTINES. * Use them for all file activity... * * File fd; - * fd = PathNameOpenFile("foo", O_RDONLY, 0600); + * fd = PathNameOpenFile("foo", O_RDONLY); * * AllocateFile(); * FreeFile(); @@ -42,17 +42,11 @@ #include -/* - * FileSeek uses the standard UNIX lseek(2) flags. - */ - -typedef char *FileName; - typedef int File; /* GUC parameter */ -extern int max_files_per_process; +extern PGDLLIMPORT int max_files_per_process; /* * This is private to fd.c, but exported for save/restore_backend_variables() @@ -65,20 +59,29 @@ extern int max_safe_fds; */ /* Operations on virtual Files --- equivalent to Unix kernel file ops */ -extern File PathNameOpenFile(FileName fileName, int fileFlags, int fileMode); +extern File PathNameOpenFile(const char *fileName, int fileFlags); +extern File PathNameOpenFilePerm(const char *fileName, int fileFlags, mode_t fileMode); extern File OpenTemporaryFile(bool interXact); extern void FileClose(File file); extern int FilePrefetch(File file, off_t offset, int amount, uint32 wait_event_info); -extern int FileRead(File file, char *buffer, int amount, uint32 wait_event_info); -extern int FileWrite(File file, char *buffer, int amount, uint32 wait_event_info); +extern int FileRead(File file, char *buffer, int amount, off_t offset, uint32 wait_event_info); +extern int FileWrite(File file, char *buffer, int amount, off_t offset, uint32 wait_event_info); extern int FileSync(File file, uint32 wait_event_info); -extern off_t FileSeek(File file, off_t offset, int whence); +extern off_t FileSize(File file); extern int FileTruncate(File file, off_t offset, uint32 wait_event_info); extern void FileWriteback(File file, off_t offset, off_t nbytes, uint32 wait_event_info); extern char *FilePathName(File file); extern int FileGetRawDesc(File file); extern int FileGetRawFlags(File file); -extern int FileGetRawMode(File file); +extern mode_t FileGetRawMode(File file); + +/* Operations used for sharing named temporary files */ +extern File PathNameCreateTemporaryFile(const char *name, bool error_on_failure); +extern File PathNameOpenTemporaryFile(const char *name); +extern bool PathNameDeleteTemporaryFile(const char *name, bool error_on_failure); +extern void PathNameCreateTemporaryDir(const char *base, const char *name); +extern void PathNameDeleteTemporaryDir(const char *name); +extern void TempTablespacePath(char *path, Oid tablespace); /* Operations that allow use of regular stdio --- USE WITH CAUTION */ extern FILE *AllocateFile(const char *name, const char *mode); @@ -91,14 +94,21 @@ extern int ClosePipeStream(FILE *file); /* Operations to allow use of the library routines */ extern DIR *AllocateDir(const char *dirname); extern struct dirent *ReadDir(DIR *dir, const char *dirname); +extern struct dirent *ReadDirExtended(DIR *dir, const char *dirname, + int elevel); extern int FreeDir(DIR *dir); /* Operations to allow use of a plain kernel FD, with automatic cleanup */ -extern int OpenTransientFile(FileName fileName, int fileFlags, int fileMode); +extern int OpenTransientFile(const char *fileName, int fileFlags); +extern int OpenTransientFilePerm(const char *fileName, int fileFlags, mode_t fileMode); extern int CloseTransientFile(int fd); /* If you've really really gotta have a plain kernel FD, use this */ -extern int BasicOpenFile(FileName fileName, int fileFlags, int fileMode); +extern int BasicOpenFile(const char *fileName, int fileFlags); +extern int BasicOpenFilePerm(const char *fileName, int fileFlags, mode_t fileMode); + + /* Make a directory with default permissions */ +extern int MakePGDirectory(const char *directoryName); /* Miscellaneous support routines */ extern void InitFileAccess(void); @@ -106,11 +116,13 @@ extern void set_max_safe_fds(void); extern void closeAllVfds(void); extern void SetTempTablespaces(Oid *tableSpaces, int numSpaces); extern bool TempTablespacesAreSet(void); +extern int GetTempTablespaces(Oid *tableSpaces, int numSpaces); extern Oid GetNextTempTableSpace(void); -extern void AtEOXact_Files(void); +extern void AtEOXact_Files(bool isCommit); extern void AtEOSubXact_Files(bool isCommit, SubTransactionId mySubid, SubTransactionId parentSubid); extern void RemovePgTempFiles(void); +extern bool looks_like_temp_rel_name(const char *name); extern int pg_fsync(int fd); extern int pg_fsync_no_writethrough(int fd); @@ -123,7 +135,7 @@ extern int durable_unlink(const char *fname, int loglevel); extern int durable_link_or_rename(const char *oldfile, const char *newfile, int loglevel); extern void SyncDataDirectory(void); -/* Filename components for OpenTemporaryFile */ +/* Filename components */ #define PG_TEMP_FILES_DIR "pgsql_tmp" #define PG_TEMP_FILE_PREFIX "pgsql_tmp" diff --git a/src/include/storage/freespace.h b/src/include/storage/freespace.h index d110f006af..726eb30fb8 100644 --- a/src/include/storage/freespace.h +++ b/src/include/storage/freespace.h @@ -4,7 +4,7 @@ * POSTGRES free space map for quickly finding free space in relations * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/freespace.h @@ -32,9 +32,7 @@ extern void XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk, extern void FreeSpaceMapTruncateRel(Relation rel, BlockNumber nblocks); extern void FreeSpaceMapVacuum(Relation rel); -extern void UpdateFreeSpaceMap(Relation rel, - BlockNumber startBlkNum, - BlockNumber endBlkNum, - Size freespace); +extern void FreeSpaceMapVacuumRange(Relation rel, BlockNumber start, + BlockNumber end); #endif /* FREESPACE_H_ */ diff --git a/src/include/storage/fsm_internals.h b/src/include/storage/fsm_internals.h index 722e649123..d6b8187861 100644 --- a/src/include/storage/fsm_internals.h +++ b/src/include/storage/fsm_internals.h @@ -4,7 +4,7 @@ * internal functions for free space map * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/fsm_internals.h diff --git a/src/include/storage/indexfsm.h b/src/include/storage/indexfsm.h index f8045f0df8..07cbef4106 100644 --- a/src/include/storage/indexfsm.h +++ b/src/include/storage/indexfsm.h @@ -4,7 +4,7 @@ * POSTGRES free space map for quickly finding an unused page in index * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/indexfsm.h diff --git a/src/include/storage/ipc.h b/src/include/storage/ipc.h index bde635f502..6a05a89349 100644 --- a/src/include/storage/ipc.h +++ b/src/include/storage/ipc.h @@ -8,7 +8,7 @@ * exit-time cleanup for either a postmaster or a backend. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/ipc.h @@ -63,6 +63,7 @@ typedef void (*shmem_startup_hook_type) (void); /* ipc.c */ extern PGDLLIMPORT bool proc_exit_inprogress; +extern PGDLLIMPORT bool shmem_exit_inprogress; extern void proc_exit(int code) pg_attribute_noreturn(); extern void shmem_exit(int code); diff --git a/src/include/storage/item.h b/src/include/storage/item.h index 72426a2d48..44a52fbdfb 100644 --- a/src/include/storage/item.h +++ b/src/include/storage/item.h @@ -4,7 +4,7 @@ * POSTGRES disk item definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/item.h diff --git a/src/include/storage/itemid.h b/src/include/storage/itemid.h index 2ec86b57fc..60570cc53d 100644 --- a/src/include/storage/itemid.h +++ b/src/include/storage/itemid.h @@ -4,7 +4,7 @@ * Standard POSTGRES buffer page item identifier definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/itemid.h diff --git a/src/include/storage/itemptr.h b/src/include/storage/itemptr.h index 8f8e22444a..d87101f270 100644 --- a/src/include/storage/itemptr.h +++ b/src/include/storage/itemptr.h @@ -4,7 +4,7 @@ * POSTGRES disk item pointer definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/itemptr.h @@ -48,6 +48,28 @@ ItemPointerData; typedef ItemPointerData *ItemPointer; +/* ---------------- + * special values used in heap tuples (t_ctid) + * ---------------- + */ + +/* + * If a heap tuple holds a speculative insertion token rather than a real + * TID, ip_posid is set to SpecTokenOffsetNumber, and the token is stored in + * ip_blkid. SpecTokenOffsetNumber must be higher than MaxOffsetNumber, so + * that it can be distinguished from a valid offset number in a regular item + * pointer. + */ +#define SpecTokenOffsetNumber 0xfffe + +/* + * When a tuple is moved to a different partition by UPDATE, the t_ctid of + * the old tuple version is set to this magic value. + */ +#define MovedPartitionsOffsetNumber 0xfffd +#define MovedPartitionsBlockNumber InvalidBlockNumber + + /* ---------------- * support macros * ---------------- @@ -154,6 +176,25 @@ typedef ItemPointerData *ItemPointer; (pointer)->ip_posid = InvalidOffsetNumber \ ) +/* + * ItemPointerIndicatesMovedPartitions + * True iff the block number indicates the tuple has moved to another + * partition. + */ +#define ItemPointerIndicatesMovedPartitions(pointer) \ +( \ + ItemPointerGetOffsetNumber(pointer) == MovedPartitionsOffsetNumber && \ + ItemPointerGetBlockNumberNoCheck(pointer) == MovedPartitionsBlockNumber \ +) + +/* + * ItemPointerSetMovedPartitions + * Indicate that the item referenced by the itempointer has moved into a + * different partition. + */ +#define ItemPointerSetMovedPartitions(pointer) \ + ItemPointerSet((pointer), MovedPartitionsBlockNumber, MovedPartitionsOffsetNumber) + /* ---------------- * externs * ---------------- diff --git a/src/include/storage/large_object.h b/src/include/storage/large_object.h index 796a8fdeea..a234d5cf8e 100644 --- a/src/include/storage/large_object.h +++ b/src/include/storage/large_object.h @@ -5,7 +5,7 @@ * zillions of large objects (internal, external, jaquith, inversion). * Now we only support inversion. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/large_object.h @@ -27,9 +27,9 @@ * offset is the current seek offset within the LO * flags contains some flag bits * - * NOTE: in current usage, flag bit IFS_RDLOCK is *always* set, and we don't - * bother to test for it. Permission checks are made at first read or write - * attempt, not during inv_open(), so we have other bits to remember that. + * NOTE: as of v11, permission checks are made when the large object is + * opened; therefore IFS_RDLOCK/IFS_WRLOCK indicate that read or write mode + * has been requested *and* the corresponding permission has been checked. * * NOTE: before 7.1, we also had to store references to the separate table * and index of a specific large object. Now they all live in pg_largeobject @@ -47,8 +47,6 @@ typedef struct LargeObjectDesc /* bits in flags: */ #define IFS_RDLOCK (1 << 0) /* LO was opened for reading */ #define IFS_WRLOCK (1 << 1) /* LO was opened for writing */ -#define IFS_RD_PERM_OK (1 << 2) /* read permission has been verified */ -#define IFS_WR_PERM_OK (1 << 3) /* write permission has been verified */ } LargeObjectDesc; @@ -78,6 +76,11 @@ typedef struct LargeObjectDesc #define MAX_LARGE_OBJECT_SIZE ((int64) INT_MAX * LOBLKSIZE) +/* + * GUC: backwards-compatibility flag to suppress LO permission checks + */ +extern bool lo_compat_privileges; + /* * Function definitions... */ diff --git a/src/include/storage/latch.h b/src/include/storage/latch.h index a43193c916..fd8735b7f5 100644 --- a/src/include/storage/latch.h +++ b/src/include/storage/latch.h @@ -90,7 +90,7 @@ * efficient than using WaitLatch or WaitLatchOrSocket. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/latch.h @@ -129,7 +129,7 @@ typedef struct Latch #ifdef WIN32 #define WL_SOCKET_CONNECTED (1 << 5) #else -/* avoid having to to deal with case on platforms not requiring it */ +/* avoid having to deal with case on platforms not requiring it */ #define WL_SOCKET_CONNECTED WL_SOCKET_WRITEABLE #endif diff --git a/src/include/storage/lmgr.h b/src/include/storage/lmgr.h index 0b923227a2..e5356b7d54 100644 --- a/src/include/storage/lmgr.h +++ b/src/include/storage/lmgr.h @@ -4,7 +4,7 @@ * POSTGRES lock manager definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/lmgr.h @@ -45,6 +45,8 @@ extern void UnlockRelationOid(Oid relid, LOCKMODE lockmode); extern void LockRelation(Relation relation, LOCKMODE lockmode); extern bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode); extern void UnlockRelation(Relation relation, LOCKMODE lockmode); +extern bool CheckRelationLockedByMe(Relation relation, LOCKMODE lockmode, + bool orstronger); extern bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode); extern void LockRelationIdForSession(LockRelId *relid, LOCKMODE lockmode); diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h index 765431e299..a37fda7b63 100644 --- a/src/include/storage/lock.h +++ b/src/include/storage/lock.h @@ -4,7 +4,7 @@ * POSTGRES low-level lock mechanism * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/lock.h @@ -408,9 +408,10 @@ typedef struct LOCALLOCK PROCLOCK *proclock; /* associated PROCLOCK object, if any */ uint32 hashcode; /* copy of LOCKTAG's hash value */ int64 nLocks; /* total number of times lock is held */ + bool holdsStrongLockCount; /* bumped FastPathStrongRelationLocks */ + bool lockCleared; /* we read all sinval msgs for lock */ int numLockOwners; /* # of relevant ResourceOwners */ int maxLockOwners; /* allocated size of array */ - bool holdsStrongLockCount; /* bumped FastPathStrongRelationLocks */ LOCALLOCKOWNER *lockOwners; /* dynamically resizable array */ } LOCALLOCK; @@ -472,7 +473,8 @@ typedef enum { LOCKACQUIRE_NOT_AVAIL, /* lock not available, and dontWait=true */ LOCKACQUIRE_OK, /* lock successfully acquired */ - LOCKACQUIRE_ALREADY_HELD /* incremented count for lock already held */ + LOCKACQUIRE_ALREADY_HELD, /* incremented count for lock already held */ + LOCKACQUIRE_ALREADY_CLEAR /* incremented count for lock already clear */ } LockAcquireResult; /* Deadlock states identified by DeadLockCheck() */ @@ -528,14 +530,17 @@ extern LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, - bool report_memory_error); + bool reportMemoryError, + LOCALLOCK **locallockp); extern void AbortStrongLockAcquire(void); +extern void MarkLockClear(LOCALLOCK *locallock); extern bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock); extern void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks); extern void LockReleaseSession(LOCKMETHODID lockmethodid); extern void LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks); extern void LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks); +extern bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode); extern bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock); extern VirtualTransactionId *GetLockConflicts(const LOCKTAG *locktag, diff --git a/src/include/storage/lockdefs.h b/src/include/storage/lockdefs.h index fe9f7cb310..9c933fab3f 100644 --- a/src/include/storage/lockdefs.h +++ b/src/include/storage/lockdefs.h @@ -7,7 +7,7 @@ * contains definition that have to (indirectly) be available when included by * FRONTEND code. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/lockdefs.h @@ -45,11 +45,15 @@ typedef int LOCKMODE; #define AccessExclusiveLock 8 /* ALTER TABLE, DROP TABLE, VACUUM FULL, * and unqualified LOCK TABLE */ +#define MaxLockMode 8 + + +/* WAL representation of an AccessExclusiveLock on a table */ typedef struct xl_standby_lock { TransactionId xid; /* xid of holder of AccessExclusiveLock */ - Oid dbOid; - Oid relOid; + Oid dbOid; /* DB containing table */ + Oid relOid; /* OID of table */ } xl_standby_lock; #endif /* LOCKDEF_H_ */ diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h index 3d16132c88..b2dcb73287 100644 --- a/src/include/storage/lwlock.h +++ b/src/include/storage/lwlock.h @@ -4,7 +4,7 @@ * Lightweight lock manager * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/lwlock.h @@ -88,7 +88,7 @@ typedef union LWLockMinimallyPadded } LWLockMinimallyPadded; extern PGDLLIMPORT LWLockPadded *MainLWLockArray; -extern char *MainLWLockNames[]; +extern const char *const MainLWLockNames[]; /* struct for storing named tranche information */ typedef struct NamedLWLockTranche @@ -184,7 +184,7 @@ extern LWLockPadded *GetNamedLWLockTranche(const char *tranche_name); * registration in the main shared memory segment wouldn't work for that case. */ extern int LWLockNewTrancheId(void); -extern void LWLockRegisterTranche(int tranche_id, char *tranche_name); +extern void LWLockRegisterTranche(int tranche_id, const char *tranche_name); extern void LWLockInitialize(LWLock *lock, int tranche_id); /* @@ -211,8 +211,14 @@ typedef enum BuiltinTrancheIds LWTRANCHE_BUFFER_MAPPING, LWTRANCHE_LOCK_MANAGER, LWTRANCHE_PREDICATE_LOCK_MANAGER, + LWTRANCHE_PARALLEL_HASH_JOIN, LWTRANCHE_PARALLEL_QUERY_DSA, + LWTRANCHE_SESSION_DSA, + LWTRANCHE_SESSION_RECORD_TABLE, + LWTRANCHE_SESSION_TYPMOD_TABLE, + LWTRANCHE_SHARED_TUPLESTORE, LWTRANCHE_TBM, + LWTRANCHE_PARALLEL_APPEND, LWTRANCHE_FIRST_USER_DEFINED } BuiltinTrancheIds; diff --git a/src/include/storage/off.h b/src/include/storage/off.h index 7228808b94..6179f2f854 100644 --- a/src/include/storage/off.h +++ b/src/include/storage/off.h @@ -4,7 +4,7 @@ * POSTGRES disk "offset" definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/off.h diff --git a/src/include/storage/pg_sema.h b/src/include/storage/pg_sema.h index 65db86f578..2072cf0e8f 100644 --- a/src/include/storage/pg_sema.h +++ b/src/include/storage/pg_sema.h @@ -10,7 +10,7 @@ * be provided by each port. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/pg_sema.h diff --git a/src/include/storage/pg_shmem.h b/src/include/storage/pg_shmem.h index e3ee096229..6b1e040251 100644 --- a/src/include/storage/pg_shmem.h +++ b/src/include/storage/pg_shmem.h @@ -14,7 +14,7 @@ * only one ID number. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/pg_shmem.h diff --git a/src/include/storage/pmsignal.h b/src/include/storage/pmsignal.h index 4b954d7614..074734156b 100644 --- a/src/include/storage/pmsignal.h +++ b/src/include/storage/pmsignal.h @@ -4,7 +4,7 @@ * routines for signaling the postmaster from its child processes * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/pmsignal.h @@ -14,6 +14,16 @@ #ifndef PMSIGNAL_H #define PMSIGNAL_H +#include + +#ifdef HAVE_SYS_PRCTL_H +#include "sys/prctl.h" +#endif + +#ifdef HAVE_SYS_PROCCTL_H +#include "sys/procctl.h" +#endif + /* * Reasons for signaling the postmaster. We can cope with simultaneous * signals for different reasons. If the same reason is signaled multiple @@ -51,6 +61,34 @@ extern bool IsPostmasterChildWalSender(int slot); extern void MarkPostmasterChildActive(void); extern void MarkPostmasterChildInactive(void); extern void MarkPostmasterChildWalSender(void); -extern bool PostmasterIsAlive(void); +extern bool PostmasterIsAliveInternal(void); +extern void PostmasterDeathSignalInit(void); + + +/* + * Do we have a way to ask for a signal on parent death? + * + * If we do, pmsignal.c will set up a signal handler, that sets a flag when + * the parent dies. Checking the flag first makes PostmasterIsAlive() a lot + * cheaper in usual case that the postmaster is alive. + */ +#if (defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_PDEATHSIG)) || \ + (defined(HAVE_SYS_PROCCTL_H) && defined(PROC_PDEATHSIG_CTL)) +#define USE_POSTMASTER_DEATH_SIGNAL +#endif + +#ifdef USE_POSTMASTER_DEATH_SIGNAL +extern volatile sig_atomic_t postmaster_possibly_dead; + +static inline bool +PostmasterIsAlive(void) +{ + if (likely(!postmaster_possibly_dead)) + return true; + return PostmasterIsAliveInternal(); +} +#else +#define PostmasterIsAlive() PostmasterIsAliveInternal() +#endif #endif /* PMSIGNAL_H */ diff --git a/src/include/storage/predicate.h b/src/include/storage/predicate.h index 06bcbf2471..6a3464daa1 100644 --- a/src/include/storage/predicate.h +++ b/src/include/storage/predicate.h @@ -4,7 +4,7 @@ * POSTGRES public predicate locking definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/predicate.h diff --git a/src/include/storage/predicate_internals.h b/src/include/storage/predicate_internals.h index 89874a5c3b..0f736d37df 100644 --- a/src/include/storage/predicate_internals.h +++ b/src/include/storage/predicate_internals.h @@ -4,7 +4,7 @@ * POSTGRES internal predicate locking definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/predicate_internals.h diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index 7dbaa81a8f..cb613c8076 100644 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -4,7 +4,7 @@ * per-process shared memory data structures * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/proc.h @@ -14,6 +14,7 @@ #ifndef _PROC_H_ #define _PROC_H_ +#include "access/clog.h" #include "access/xlogdefs.h" #include "lib/ilist.h" #include "storage/latch.h" @@ -113,6 +114,9 @@ struct PGPROC Oid databaseId; /* OID of database this backend is using */ Oid roleId; /* OID of role using this backend */ + Oid tempNamespaceId; /* OID of temp schema this backend is + * using */ + bool isBackgroundWorker; /* true if background worker. */ /* @@ -171,6 +175,17 @@ struct PGPROC uint32 wait_event_info; /* proc's wait information */ + /* Support for group transaction status update. */ + bool clogGroupMember; /* true, if member of clog group */ + pg_atomic_uint32 clogGroupNext; /* next clog group member */ + TransactionId clogGroupMemberXid; /* transaction id of clog group member */ + XidStatus clogGroupMemberXidStatus; /* transaction status of clog + * group member */ + int clogGroupMemberPage; /* clog page corresponding to + * transaction id of clog group member */ + XLogRecPtr clogGroupMemberLsn; /* WAL location of commit record for clog + * group member */ + /* Per-backend LWLock. Protects fields below (but not group fields). */ LWLock backendLock; @@ -242,6 +257,8 @@ typedef struct PROC_HDR PGPROC *bgworkerFreeProcs; /* First pgproc waiting for group XID clear */ pg_atomic_uint32 procArrayGroupFirst; + /* First pgproc waiting for group transaction status update */ + pg_atomic_uint32 clogGroupFirst; /* WALWriter process's latch */ Latch *walwriterLatch; /* Checkpointer process's latch */ @@ -255,7 +272,7 @@ typedef struct PROC_HDR int startupBufferPinWaitBufId; } PROC_HDR; -extern PROC_HDR *ProcGlobal; +extern PGDLLIMPORT PROC_HDR *ProcGlobal; extern PGPROC *PreparedXactProcs; @@ -273,7 +290,7 @@ extern PGPROC *PreparedXactProcs; #define NUM_AUXILIARY_PROCS 4 /* configurable options */ -extern int DeadlockTimeout; +extern PGDLLIMPORT int DeadlockTimeout; extern int StatementTimeout; extern int LockTimeout; extern int IdleInTransactionSessionTimeout; diff --git a/src/include/storage/procarray.h b/src/include/storage/procarray.h index 174c537be4..75bab2985f 100644 --- a/src/include/storage/procarray.h +++ b/src/include/storage/procarray.h @@ -4,7 +4,7 @@ * POSTGRES process array definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/procarray.h diff --git a/src/include/storage/proclist.h b/src/include/storage/proclist.h index 9f22f3fd48..59a478e1f6 100644 --- a/src/include/storage/proclist.h +++ b/src/include/storage/proclist.h @@ -10,7 +10,7 @@ * See proclist_types.h for the structs that these functions operate on. They * are separated to break a header dependency cycle with proc.h. * - * Portions Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/include/storage/proclist.h @@ -42,7 +42,7 @@ proclist_is_empty(proclist_head *list) /* * Get a pointer to a proclist_node inside a given PGPROC, given a procno and - * an offset. + * the proclist_node field's offset within struct PGPROC. */ static inline proclist_node * proclist_node_get(int procno, size_t node_offset) @@ -53,13 +53,15 @@ proclist_node_get(int procno, size_t node_offset) } /* - * Insert a node at the beginning of a list. + * Insert a process at the beginning of a list. */ static inline void proclist_push_head_offset(proclist_head *list, int procno, size_t node_offset) { proclist_node *node = proclist_node_get(procno, node_offset); + Assert(node->next == 0 && node->prev == 0); + if (list->head == INVALID_PGPROCNO) { Assert(list->tail == INVALID_PGPROCNO); @@ -79,13 +81,15 @@ proclist_push_head_offset(proclist_head *list, int procno, size_t node_offset) } /* - * Insert a node at the end of a list. + * Insert a process at the end of a list. */ static inline void proclist_push_tail_offset(proclist_head *list, int procno, size_t node_offset) { proclist_node *node = proclist_node_get(procno, node_offset); + Assert(node->next == 0 && node->prev == 0); + if (list->tail == INVALID_PGPROCNO) { Assert(list->head == INVALID_PGPROCNO); @@ -105,30 +109,38 @@ proclist_push_tail_offset(proclist_head *list, int procno, size_t node_offset) } /* - * Delete a node. The node must be in the list. + * Delete a process from a list --- it must be in the list! */ static inline void proclist_delete_offset(proclist_head *list, int procno, size_t node_offset) { proclist_node *node = proclist_node_get(procno, node_offset); + Assert(node->next != 0 || node->prev != 0); + if (node->prev == INVALID_PGPROCNO) + { + Assert(list->head == procno); list->head = node->next; + } else proclist_node_get(node->prev, node_offset)->next = node->next; if (node->next == INVALID_PGPROCNO) + { + Assert(list->tail == procno); list->tail = node->prev; + } else proclist_node_get(node->next, node_offset)->prev = node->prev; - node->next = node->prev = INVALID_PGPROCNO; + node->next = node->prev = 0; } /* - * Check if a node is currently in a list. It must be known that the node is - * not in any _other_ proclist that uses the same proclist_node, so that the - * only possibilities are that it is in this list or none. + * Check if a process is currently in a list. It must be known that the + * process is not in any _other_ proclist that uses the same proclist_node, + * so that the only possibilities are that it is in this list or none. */ static inline bool proclist_contains_offset(proclist_head *list, int procno, @@ -136,27 +148,26 @@ proclist_contains_offset(proclist_head *list, int procno, { proclist_node *node = proclist_node_get(procno, node_offset); - /* - * If this is not a member of a proclist, then the next and prev pointers - * should be 0. Circular lists are not allowed so this condition is not - * confusable with a real pgprocno 0. - */ + /* If it's not in any list, it's definitely not in this one. */ if (node->prev == 0 && node->next == 0) return false; - /* If there is a previous node, then this node must be in the list. */ - if (node->prev != INVALID_PGPROCNO) - return true; - /* - * There is no previous node, so the only way this node can be in the list - * is if it's the head node. + * It must, in fact, be in this list. Ideally, in assert-enabled builds, + * we'd verify that. But since this function is typically used while + * holding a spinlock, crawling the whole list is unacceptable. However, + * we can verify matters in O(1) time when the node is a list head or + * tail, and that seems worth doing, since in practice that should often + * be enough to catch mistakes. */ - return list->head == procno; + Assert(node->prev != INVALID_PGPROCNO || list->head == procno); + Assert(node->next != INVALID_PGPROCNO || list->tail == procno); + + return true; } /* - * Remove and return the first node from a list (there must be one). + * Remove and return the first process from a list (there must be one). */ static inline PGPROC * proclist_pop_head_node_offset(proclist_head *list, size_t node_offset) @@ -205,4 +216,4 @@ proclist_pop_head_node_offset(proclist_head *list, size_t node_offset) proclist_node_get((iter).cur, \ offsetof(PGPROC, link_member))->next) -#endif +#endif /* PROCLIST_H */ diff --git a/src/include/storage/proclist_types.h b/src/include/storage/proclist_types.h index 716c4498d5..f4dac10fb6 100644 --- a/src/include/storage/proclist_types.h +++ b/src/include/storage/proclist_types.h @@ -5,7 +5,7 @@ * * See proclist.h for functions that operate on these types. * - * Portions Copyright (c) 2016-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2016-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/include/storage/proclist_types.h @@ -16,7 +16,12 @@ #define PROCLIST_TYPES_H /* - * A node in a list of processes. + * A node in a doubly-linked list of processes. The link fields contain + * the 0-based PGPROC indexes of the next and previous process, or + * INVALID_PGPROCNO in the next-link of the last node and the prev-link + * of the first node. A node that is currently not in any list + * should have next == prev == 0; this is not a possible state for a node + * that is in a list, because we disallow circularity. */ typedef struct proclist_node { @@ -25,7 +30,8 @@ typedef struct proclist_node } proclist_node; /* - * Head of a doubly-linked list of PGPROCs, identified by pgprocno. + * Header of a doubly-linked list of PGPROCs, identified by pgprocno. + * An empty list is represented by head == tail == INVALID_PGPROCNO. */ typedef struct proclist_head { @@ -42,4 +48,4 @@ typedef struct proclist_mutable_iter int next; /* pgprocno of the next PGPROC */ } proclist_mutable_iter; -#endif +#endif /* PROCLIST_TYPES_H */ diff --git a/src/include/storage/procsignal.h b/src/include/storage/procsignal.h index 20bb05b177..6db0d69b71 100644 --- a/src/include/storage/procsignal.h +++ b/src/include/storage/procsignal.h @@ -4,7 +4,7 @@ * Routines for interprocess signalling * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/procsignal.h diff --git a/src/include/storage/reinit.h b/src/include/storage/reinit.h index 90e494e933..a62703c647 100644 --- a/src/include/storage/reinit.h +++ b/src/include/storage/reinit.h @@ -4,7 +4,7 @@ * Reinitialization of unlogged relations * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/reinit.h @@ -15,7 +15,12 @@ #ifndef REINIT_H #define REINIT_H +#include "common/relpath.h" + + extern void ResetUnloggedRelations(int op); +extern bool parse_filename_for_nontemp_relation(const char *name, + int *oidchars, ForkNumber *fork); #define UNLOGGED_RELATION_CLEANUP 0x0001 #define UNLOGGED_RELATION_INIT 0x0002 diff --git a/src/include/storage/relfilenode.h b/src/include/storage/relfilenode.h index fb596e2ee7..abffd84a1c 100644 --- a/src/include/storage/relfilenode.h +++ b/src/include/storage/relfilenode.h @@ -4,7 +4,7 @@ * Physical access information for relations. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/relfilenode.h diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h index bbf505e246..04175dbaaa 100644 --- a/src/include/storage/s_lock.h +++ b/src/include/storage/s_lock.h @@ -22,7 +22,7 @@ * Unlock a previously acquired lock. * * bool S_LOCK_FREE(slock_t *lock) - * Tests if the lock is free. Returns TRUE if free, FALSE if locked. + * Tests if the lock is free. Returns true if free, false if locked. * This does *not* change the state of the lock. * * void SPIN_DELAY(void) @@ -86,7 +86,7 @@ * when using the SysV semaphore code. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/s_lock.h @@ -543,6 +543,30 @@ tas(volatile slock_t *lock) #endif /* (__mc68000__ || __m68k__) && __linux__ */ +/* Motorola 88k */ +#if defined(__m88k__) +#define HAS_TEST_AND_SET + +typedef unsigned int slock_t; + +#define TAS(lock) tas(lock) + +static __inline__ int +tas(volatile slock_t *lock) +{ + register slock_t _res = 1; + + __asm__ __volatile__( + " xmem %0, %2, %%r0 \n" +: "+r"(_res), "+m"(*lock) +: "r"(lock) +: "memory"); + return (int) _res; +} + +#endif /* __m88k__ */ + + /* * VAXen -- even multiprocessor ones * (thanks to Tom Ivar Helbekkmo) diff --git a/src/include/storage/sharedfileset.h b/src/include/storage/sharedfileset.h new file mode 100644 index 0000000000..ed6b178aed --- /dev/null +++ b/src/include/storage/sharedfileset.h @@ -0,0 +1,45 @@ +/*------------------------------------------------------------------------- + * + * sharedfileset.h + * Shared temporary file management. + * + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/storage/sharedfileset.h + * + *------------------------------------------------------------------------- + */ + +#ifndef SHAREDFILESET_H +#define SHAREDFILESET_H + +#include "storage/dsm.h" +#include "storage/fd.h" +#include "storage/spin.h" + +/* + * A set of temporary files that can be shared by multiple backends. + */ +typedef struct SharedFileSet +{ + pid_t creator_pid; /* PID of the creating process */ + uint32 number; /* per-PID identifier */ + slock_t mutex; /* mutex protecting the reference count */ + int refcnt; /* number of attached backends */ + int ntablespaces; /* number of tablespaces to use */ + Oid tablespaces[8]; /* OIDs of tablespaces to use. Assumes that + * it's rare that there more than temp + * tablespaces. */ +} SharedFileSet; + +extern void SharedFileSetInit(SharedFileSet *fileset, dsm_segment *seg); +extern void SharedFileSetAttach(SharedFileSet *fileset, dsm_segment *seg); +extern File SharedFileSetCreate(SharedFileSet *fileset, const char *name); +extern File SharedFileSetOpen(SharedFileSet *fileset, const char *name); +extern bool SharedFileSetDelete(SharedFileSet *fileset, const char *name, + bool error_on_failure); +extern void SharedFileSetDeleteAll(SharedFileSet *fileset); + +#endif diff --git a/src/include/storage/shm_mq.h b/src/include/storage/shm_mq.h index 02a93e0222..f85f2eb7d1 100644 --- a/src/include/storage/shm_mq.h +++ b/src/include/storage/shm_mq.h @@ -3,7 +3,7 @@ * shm_mq.h * single-reader, single-writer shared memory message queue * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/shm_mq.h @@ -62,8 +62,8 @@ extern shm_mq_handle *shm_mq_attach(shm_mq *mq, dsm_segment *seg, /* Associate worker handle with shm_mq. */ extern void shm_mq_set_handle(shm_mq_handle *, BackgroundWorkerHandle *); -/* Break connection. */ -extern void shm_mq_detach(shm_mq *); +/* Break connection, release handle resources. */ +extern void shm_mq_detach(shm_mq_handle *mqh); /* Get the shm_mq from handle. */ extern shm_mq *shm_mq_get_queue(shm_mq_handle *mqh); diff --git a/src/include/storage/shm_toc.h b/src/include/storage/shm_toc.h index 8ccd35d96b..4efe1723b4 100644 --- a/src/include/storage/shm_toc.h +++ b/src/include/storage/shm_toc.h @@ -12,7 +12,7 @@ * other data structure within the segment and only put the pointer to * the data structure itself in the table of contents. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/shm_toc.h diff --git a/src/include/storage/shmem.h b/src/include/storage/shmem.h index c6993387ff..b84d104347 100644 --- a/src/include/storage/shmem.h +++ b/src/include/storage/shmem.h @@ -11,7 +11,7 @@ * at the same address. This means shared memory pointers can be passed * around directly between different processes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/shmem.h diff --git a/src/include/storage/sinval.h b/src/include/storage/sinval.h index 84c0b02da0..7156780215 100644 --- a/src/include/storage/sinval.h +++ b/src/include/storage/sinval.h @@ -4,7 +4,7 @@ * POSTGRES shared cache invalidation communication definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/sinval.h diff --git a/src/include/storage/sinvaladt.h b/src/include/storage/sinvaladt.h index 751735fc9a..2652b87b60 100644 --- a/src/include/storage/sinvaladt.h +++ b/src/include/storage/sinvaladt.h @@ -12,7 +12,7 @@ * The struct type SharedInvalidationMessage, defining the contents of * a single message, is defined in sinval.h. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/sinvaladt.h diff --git a/src/include/storage/smgr.h b/src/include/storage/smgr.h index 2279134588..c843bbc969 100644 --- a/src/include/storage/smgr.h +++ b/src/include/storage/smgr.h @@ -4,7 +4,7 @@ * storage manager switch public interface declarations. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/smgr.h @@ -143,5 +143,6 @@ extern void RememberFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno); extern void ForgetRelationFsyncRequests(RelFileNode rnode, ForkNumber forknum); extern void ForgetDatabaseFsyncRequests(Oid dbid); +extern void DropRelationFiles(RelFileNode *delrels, int ndelrels, bool isRedo); #endif /* SMGR_H */ diff --git a/src/include/storage/spin.h b/src/include/storage/spin.h index 66698645c2..b49fc10aa4 100644 --- a/src/include/storage/spin.h +++ b/src/include/storage/spin.h @@ -19,7 +19,7 @@ * Unlock a previously acquired lock. * * bool SpinLockFree(slock_t *lock) - * Tests if the lock is free. Returns TRUE if free, FALSE if locked. + * Tests if the lock is free. Returns true if free, false if locked. * This does *not* change the state of the lock. * * Callers must beware that the macro argument may be evaluated multiple @@ -41,7 +41,7 @@ * be again. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/spin.h diff --git a/src/include/storage/standby.h b/src/include/storage/standby.h index f5404b4c1f..1fcd8cf1b5 100644 --- a/src/include/storage/standby.h +++ b/src/include/storage/standby.h @@ -4,7 +4,7 @@ * Definitions for hot standby mode. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/standby.h @@ -50,7 +50,7 @@ extern void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid extern void StandbyReleaseLockTree(TransactionId xid, int nsubxids, TransactionId *subxids); extern void StandbyReleaseAllLocks(void); -extern void StandbyReleaseOldLocks(int nxids, TransactionId *xids); +extern void StandbyReleaseOldLocks(TransactionId oldxid); #define MinSizeOfXactRunningXacts offsetof(xl_running_xacts, xids) diff --git a/src/include/storage/standbydefs.h b/src/include/storage/standbydefs.h index a0af6788e9..17b74ca3b6 100644 --- a/src/include/storage/standbydefs.h +++ b/src/include/storage/standbydefs.h @@ -4,7 +4,7 @@ * Frontend exposed definitions for hot standby mode. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/storage/standbydefs.h @@ -64,7 +64,7 @@ typedef struct xl_invalidations { Oid dbId; /* MyDatabaseId */ Oid tsId; /* MyDatabaseTableSpace */ - bool relcacheInitFileInval; /* invalidate relcache init file */ + bool relcacheInitFileInval; /* invalidate relcache init files */ int nmsgs; /* number of shared inval msgs */ SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER]; } xl_invalidations; diff --git a/src/include/tcop/deparse_utility.h b/src/include/tcop/deparse_utility.h index 9c4e608934..766332f6a5 100644 --- a/src/include/tcop/deparse_utility.h +++ b/src/include/tcop/deparse_utility.h @@ -2,7 +2,7 @@ * * deparse_utility.h * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/tcop/deparse_utility.h @@ -44,6 +44,7 @@ typedef struct CollectedATSubcmd typedef struct CollectedCommand { CollectedCommandType type; + bool in_extension; Node *parsetree; @@ -97,9 +98,11 @@ typedef struct CollectedCommand /* ALTER DEFAULT PRIVILEGES */ struct { - GrantObjectType objtype; + ObjectType objtype; } defprivs; } d; + + struct CollectedCommand *parent; /* when nested */ } CollectedCommand; #endif /* DEPARSE_UTILITY_H */ diff --git a/src/include/tcop/dest.h b/src/include/tcop/dest.h index c990544a16..82f0f2e741 100644 --- a/src/include/tcop/dest.h +++ b/src/include/tcop/dest.h @@ -57,7 +57,7 @@ * calls in portal and cursor manipulations. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/tcop/dest.h @@ -129,7 +129,8 @@ struct _DestReceiver /* Private fields might appear beyond this point... */ }; -extern DestReceiver *None_Receiver; /* permanent receiver for DestNone */ +extern PGDLLIMPORT DestReceiver *None_Receiver; /* permanent receiver for + * DestNone */ /* The primary destination management functions */ diff --git a/src/include/tcop/fastpath.h b/src/include/tcop/fastpath.h index 4a7c35f1a9..6e1608a4fa 100644 --- a/src/include/tcop/fastpath.h +++ b/src/include/tcop/fastpath.h @@ -3,7 +3,7 @@ * fastpath.h * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/tcop/fastpath.h diff --git a/src/include/tcop/pquery.h b/src/include/tcop/pquery.h index 6abfe7b282..507d89cf69 100644 --- a/src/include/tcop/pquery.h +++ b/src/include/tcop/pquery.h @@ -4,7 +4,7 @@ * prototypes for pquery.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/tcop/pquery.h diff --git a/src/include/tcop/tcopprot.h b/src/include/tcop/tcopprot.h index f8c535c91e..63b4e4864d 100644 --- a/src/include/tcop/tcopprot.h +++ b/src/include/tcop/tcopprot.h @@ -4,7 +4,7 @@ * prototypes for postgres.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/tcop/tcopprot.h @@ -45,7 +45,7 @@ typedef enum LOGSTMT_ALL /* log all statements */ } LogStmtLevel; -extern int log_statement; +extern PGDLLIMPORT int log_statement; extern List *pg_parse_query(const char *query_string); extern List *pg_analyze_and_rewrite(RawStmt *parsetree, diff --git a/src/include/tcop/utility.h b/src/include/tcop/utility.h index 5bd386ddaa..da6767917c 100644 --- a/src/include/tcop/utility.h +++ b/src/include/tcop/utility.h @@ -4,7 +4,7 @@ * prototypes for utility.c. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/tcop/utility.h @@ -20,6 +20,8 @@ typedef enum { PROCESS_UTILITY_TOPLEVEL, /* toplevel interactive command */ PROCESS_UTILITY_QUERY, /* a complete query, but not toplevel */ + PROCESS_UTILITY_QUERY_NONATOMIC, /* a complete query, nonatomic + * execution context */ PROCESS_UTILITY_SUBCOMMAND /* a portion of a query */ } ProcessUtilityContext; diff --git a/src/include/tsearch/dicts/regis.h b/src/include/tsearch/dicts/regis.h index 7fdf82af65..15dba3a505 100644 --- a/src/include/tsearch/dicts/regis.h +++ b/src/include/tsearch/dicts/regis.h @@ -4,7 +4,7 @@ * * Declarations for fast regex subset, used by ISpell * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/include/tsearch/dicts/regis.h * diff --git a/src/include/tsearch/dicts/spell.h b/src/include/tsearch/dicts/spell.h index 3032d0b508..210f97dda9 100644 --- a/src/include/tsearch/dicts/spell.h +++ b/src/include/tsearch/dicts/spell.h @@ -4,7 +4,7 @@ * * Declarations for ISpell dictionary * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/include/tsearch/dicts/spell.h * diff --git a/src/include/tsearch/ts_cache.h b/src/include/tsearch/ts_cache.h index abff0fdfcc..410f1d54af 100644 --- a/src/include/tsearch/ts_cache.h +++ b/src/include/tsearch/ts_cache.h @@ -3,7 +3,7 @@ * ts_cache.h * Tsearch related object caches. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/tsearch/ts_cache.h diff --git a/src/include/tsearch/ts_locale.h b/src/include/tsearch/ts_locale.h index c32f0743aa..02809539dd 100644 --- a/src/include/tsearch/ts_locale.h +++ b/src/include/tsearch/ts_locale.h @@ -3,7 +3,7 @@ * ts_locale.h * locale compatibility layer for tsearch * - * Copyright (c) 1998-2017, PostgreSQL Global Development Group + * Copyright (c) 1998-2018, PostgreSQL Global Development Group * * src/include/tsearch/ts_locale.h * @@ -41,27 +41,15 @@ typedef struct #define TOUCHAR(x) (*((const unsigned char *) (x))) -#ifdef USE_WIDE_UPPER_LOWER - -extern int t_isdigit(const char *ptr); -extern int t_isspace(const char *ptr); -extern int t_isalpha(const char *ptr); -extern int t_isprint(const char *ptr); - /* The second argument of t_iseq() must be a plain ASCII character */ #define t_iseq(x,c) (TOUCHAR(x) == (unsigned char) (c)) #define COPYCHAR(d,s) memcpy(d, s, pg_mblen(s)) -#else /* not USE_WIDE_UPPER_LOWER */ -#define t_isdigit(x) isdigit(TOUCHAR(x)) -#define t_isspace(x) isspace(TOUCHAR(x)) -#define t_isalpha(x) isalpha(TOUCHAR(x)) -#define t_isprint(x) isprint(TOUCHAR(x)) -#define t_iseq(x,c) (TOUCHAR(x) == (unsigned char) (c)) - -#define COPYCHAR(d,s) (*((unsigned char *) (d)) = TOUCHAR(s)) -#endif /* USE_WIDE_UPPER_LOWER */ +extern int t_isdigit(const char *ptr); +extern int t_isspace(const char *ptr); +extern int t_isalpha(const char *ptr); +extern int t_isprint(const char *ptr); extern char *lowerstr(const char *str); extern char *lowerstr_with_len(const char *str, int len); diff --git a/src/include/tsearch/ts_public.h b/src/include/tsearch/ts_public.h index 94ba7fcb20..0b7a5aa68e 100644 --- a/src/include/tsearch/ts_public.h +++ b/src/include/tsearch/ts_public.h @@ -4,7 +4,7 @@ * Public interface to various tsearch modules, such as * parsers and dictionaries. * - * Copyright (c) 1998-2017, PostgreSQL Global Development Group + * Copyright (c) 1998-2018, PostgreSQL Global Development Group * * src/include/tsearch/ts_public.h * diff --git a/src/include/tsearch/ts_type.h b/src/include/tsearch/ts_type.h index 30d7c4bccd..ccf5701aa3 100644 --- a/src/include/tsearch/ts_type.h +++ b/src/include/tsearch/ts_type.h @@ -3,7 +3,7 @@ * ts_type.h * Definitions for the tsvector and tsquery types * - * Copyright (c) 1998-2017, PostgreSQL Global Development Group + * Copyright (c) 1998-2018, PostgreSQL Global Development Group * * src/include/tsearch/ts_type.h * diff --git a/src/include/tsearch/ts_utils.h b/src/include/tsearch/ts_utils.h index 3312353026..d59e38c36b 100644 --- a/src/include/tsearch/ts_utils.h +++ b/src/include/tsearch/ts_utils.h @@ -3,7 +3,7 @@ * ts_utils.h * helper utilities for tsearch * - * Copyright (c) 1998-2017, PostgreSQL Global Development Group + * Copyright (c) 1998-2018, PostgreSQL Global Development Group * * src/include/tsearch/ts_utils.h * @@ -25,9 +25,11 @@ struct TSVectorParseStateData; /* opaque struct in tsvector_parser.c */ typedef struct TSVectorParseStateData *TSVectorParseState; -extern TSVectorParseState init_tsvector_parser(char *input, - bool oprisdelim, - bool is_tsquery); +#define P_TSV_OPR_IS_DELIM (1 << 0) +#define P_TSV_IS_TSQUERY (1 << 1) +#define P_TSV_IS_WEB (1 << 2) + +extern TSVectorParseState init_tsvector_parser(char *input, int flags); extern void reset_tsvector_parser(TSVectorParseState state, char *input); extern bool gettoken_tsvector(TSVectorParseState state, char **token, int *len, @@ -35,6 +37,16 @@ extern bool gettoken_tsvector(TSVectorParseState state, char **endptr); extern void close_tsvector_parser(TSVectorParseState state); +/* phrase operator begins with '<' */ +#define ISOPERATOR(x) \ + ( pg_mblen(x) == 1 && ( *(x) == '!' || \ + *(x) == '&' || \ + *(x) == '|' || \ + *(x) == '(' || \ + *(x) == ')' || \ + *(x) == '<' \ + ) ) + /* parse_tsquery */ struct TSQueryParserStateData; /* private in backend/utils/adt/tsquery.c */ @@ -46,9 +58,13 @@ typedef void (*PushFunction) (Datum opaque, TSQueryParserState state, * QueryOperand struct */ bool prefix); +#define P_TSQ_PLAIN (1 << 0) +#define P_TSQ_WEB (1 << 1) + extern TSQuery parse_tsquery(char *buf, PushFunction pushval, - Datum opaque, bool isplain); + Datum opaque, + int flags); /* Functions for use by PushFunction implementations */ extern void pushValue(TSQueryParserState state, @@ -146,7 +162,7 @@ typedef struct ExecPhraseData * val: lexeme to test for presence of * data: to be filled with lexeme positions; NULL if position data not needed * - * Return TRUE if lexeme is present in data, else FALSE. If data is not + * Return true if lexeme is present in data, else false. If data is not * NULL, it should be filled with lexeme positions, but function can leave * it as zeroes if position data is not available. */ @@ -167,7 +183,7 @@ typedef bool (*TSExecuteCallback) (void *arg, QueryOperand *val, #define TS_EXEC_CALC_NOT (0x01) /* * If TS_EXEC_PHRASE_NO_POS is set, allow OP_PHRASE to be executed lossily - * in the absence of position information: a TRUE result indicates that the + * in the absence of position information: a true result indicates that the * phrase might be present. Without this flag, OP_PHRASE always returns * false if lexeme position information is not available. */ diff --git a/src/include/utils/.gitignore b/src/include/utils/.gitignore index 25db658da5..05cfa7a8d6 100644 --- a/src/include/utils/.gitignore +++ b/src/include/utils/.gitignore @@ -2,3 +2,4 @@ /fmgrprotos.h /probes.h /errcodes.h +/header-stamp diff --git a/src/include/utils/acl.h b/src/include/utils/acl.h index 43273eaab5..72936eeb4d 100644 --- a/src/include/utils/acl.h +++ b/src/include/utils/acl.h @@ -4,7 +4,7 @@ * Definition of (and support for) access control list data structures. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/acl.h @@ -12,9 +12,17 @@ * NOTES * An ACL array is simply an array of AclItems, representing the union * of the privileges represented by the individual items. A zero-length - * array represents "no privileges". There are no assumptions about the - * ordering of the items, but we do expect that there are no two entries - * in the array with the same grantor and grantee. + * array represents "no privileges". + * + * The order of items in the array is important as client utilities (in + * particular, pg_dump, though possibly other clients) expect to be able + * to issue GRANTs in the ordering of the items in the array. The reason + * this matters is that GRANTs WITH GRANT OPTION must be before any GRANTs + * which depend on it. This happens naturally in the backend during + * operations as we update ACLs in-place, new items are appended, and + * existing entries are only removed if there's no dependency on them (no + * GRANT can been based on it, or, if there was, those GRANTs are also + * removed). * * For backward-compatibility purposes we have to allow null ACL entries * in system catalogs. A null ACL will be treated as meaning "default @@ -155,7 +163,7 @@ typedef ArrayType Acl; #define ACL_ALL_RIGHTS_FUNCTION (ACL_EXECUTE) #define ACL_ALL_RIGHTS_LANGUAGE (ACL_USAGE) #define ACL_ALL_RIGHTS_LARGEOBJECT (ACL_SELECT|ACL_UPDATE) -#define ACL_ALL_RIGHTS_NAMESPACE (ACL_USAGE|ACL_CREATE) +#define ACL_ALL_RIGHTS_SCHEMA (ACL_USAGE|ACL_CREATE) #define ACL_ALL_RIGHTS_TABLESPACE (ACL_CREATE) #define ACL_ALL_RIGHTS_TYPE (ACL_USAGE) @@ -174,44 +182,15 @@ typedef enum ACLCHECK_NOT_OWNER } AclResult; -/* this enum covers all object types that can have privilege errors */ -/* currently it's only used to tell aclcheck_error what to say */ -typedef enum AclObjectKind -{ - ACL_KIND_COLUMN, /* pg_attribute */ - ACL_KIND_CLASS, /* pg_class */ - ACL_KIND_SEQUENCE, /* pg_sequence */ - ACL_KIND_DATABASE, /* pg_database */ - ACL_KIND_PROC, /* pg_proc */ - ACL_KIND_OPER, /* pg_operator */ - ACL_KIND_TYPE, /* pg_type */ - ACL_KIND_LANGUAGE, /* pg_language */ - ACL_KIND_LARGEOBJECT, /* pg_largeobject */ - ACL_KIND_NAMESPACE, /* pg_namespace */ - ACL_KIND_OPCLASS, /* pg_opclass */ - ACL_KIND_OPFAMILY, /* pg_opfamily */ - ACL_KIND_COLLATION, /* pg_collation */ - ACL_KIND_CONVERSION, /* pg_conversion */ - ACL_KIND_STATISTICS, /* pg_statistic_ext */ - ACL_KIND_TABLESPACE, /* pg_tablespace */ - ACL_KIND_TSDICTIONARY, /* pg_ts_dict */ - ACL_KIND_TSCONFIGURATION, /* pg_ts_config */ - ACL_KIND_FDW, /* pg_foreign_data_wrapper */ - ACL_KIND_FOREIGN_SERVER, /* pg_foreign_server */ - ACL_KIND_EVENT_TRIGGER, /* pg_event_trigger */ - ACL_KIND_EXTENSION, /* pg_extension */ - ACL_KIND_PUBLICATION, /* pg_publication */ - ACL_KIND_SUBSCRIPTION, /* pg_subscription */ - MAX_ACL_KIND /* MUST BE LAST */ -} AclObjectKind; - /* * routines used internally */ -extern Acl *acldefault(GrantObjectType objtype, Oid ownerId); -extern Acl *get_user_default_acl(GrantObjectType objtype, Oid ownerId, +extern Acl *acldefault(ObjectType objtype, Oid ownerId); +extern Acl *get_user_default_acl(ObjectType objtype, Oid ownerId, Oid nsp_oid); +extern void recordDependencyOnNewAcl(Oid classId, Oid objectId, int32 objsubId, + Oid ownerId, Acl *acl); extern Acl *aclupdate(const Acl *old_acl, const AclItem *mod_aip, int modechg, Oid ownerId, DropBehavior behavior); @@ -293,10 +272,10 @@ extern AclResult pg_foreign_data_wrapper_aclcheck(Oid fdw_oid, Oid roleid, AclMo extern AclResult pg_foreign_server_aclcheck(Oid srv_oid, Oid roleid, AclMode mode); extern AclResult pg_type_aclcheck(Oid type_oid, Oid roleid, AclMode mode); -extern void aclcheck_error(AclResult aclerr, AclObjectKind objectkind, +extern void aclcheck_error(AclResult aclerr, ObjectType objtype, const char *objectname); -extern void aclcheck_error_col(AclResult aclerr, AclObjectKind objectkind, +extern void aclcheck_error_col(AclResult aclerr, ObjectType objtype, const char *objectname, const char *colname); extern void aclcheck_error_type(AclResult aclerr, Oid typeOid); diff --git a/src/include/utils/aclchk_internal.h b/src/include/utils/aclchk_internal.h index 3374edb638..f7c44fcd4b 100644 --- a/src/include/utils/aclchk_internal.h +++ b/src/include/utils/aclchk_internal.h @@ -2,7 +2,7 @@ * * aclchk_internal.h * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/aclchk_internal.h @@ -26,12 +26,12 @@ * Note: 'all_privs' and 'privileges' represent object-level privileges only. * There might also be column-level privilege specifications, which are * represented in col_privs (this is a list of untransformed AccessPriv nodes). - * Column privileges are only valid for objtype ACL_OBJECT_RELATION. + * Column privileges are only valid for objtype OBJECT_TABLE. */ typedef struct { bool is_grant; - GrantObjectType objtype; + ObjectType objtype; List *objects; bool all_privs; AclMode privileges; diff --git a/src/include/utils/array.h b/src/include/utils/array.h index 61a67a21e3..afbb532e9c 100644 --- a/src/include/utils/array.h +++ b/src/include/utils/array.h @@ -51,7 +51,7 @@ * arrays holding the elements. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/array.h @@ -64,6 +64,10 @@ #include "fmgr.h" #include "utils/expandeddatum.h" +/* avoid including execnodes.h here */ +struct ExprState; +struct ExprContext; + /* * Arrays are varlena objects, so must meet the varlena convention that @@ -252,7 +256,7 @@ typedef struct ArrayIteratorData *ArrayIterator; #define PG_RETURN_EXPANDED_ARRAY(x) PG_RETURN_DATUM(EOHPGetRWDatum(&(x)->hdr)) /* fmgr macros for AnyArrayType (ie, get either varlena or expanded form) */ -#define PG_GETARG_ANY_ARRAY(n) DatumGetAnyArray(PG_GETARG_DATUM(n)) +#define PG_GETARG_ANY_ARRAY_P(n) DatumGetAnyArrayP(PG_GETARG_DATUM(n)) /* * Access macros for varlena array header fields. @@ -360,8 +364,9 @@ extern ArrayType *array_set(ArrayType *array, int nSubscripts, int *indx, Datum dataValue, bool isNull, int arraytyplen, int elmlen, bool elmbyval, char elmalign); -extern Datum array_map(FunctionCallInfo fcinfo, Oid retType, - ArrayMapState *amstate); +extern Datum array_map(Datum arrayd, + struct ExprState *exprstate, struct ExprContext *econtext, + Oid retType, ArrayMapState *amstate); extern void array_bitmap_copy(bits8 *destbitmap, int destoffset, const bits8 *srcbitmap, int srcoffset, @@ -440,7 +445,7 @@ extern Datum expand_array(Datum arraydatum, MemoryContext parentcontext, extern ExpandedArrayHeader *DatumGetExpandedArray(Datum d); extern ExpandedArrayHeader *DatumGetExpandedArrayX(Datum d, ArrayMetaState *metacache); -extern AnyArrayType *DatumGetAnyArray(Datum d); +extern AnyArrayType *DatumGetAnyArrayP(Datum d); extern void deconstruct_expanded_array(ExpandedArrayHeader *eah); #endif /* ARRAY_H */ diff --git a/src/include/utils/arrayaccess.h b/src/include/utils/arrayaccess.h index 7655c80bed..f04752213e 100644 --- a/src/include/utils/arrayaccess.h +++ b/src/include/utils/arrayaccess.h @@ -4,7 +4,7 @@ * Declarations for element-by-element access to Postgres arrays. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/arrayaccess.h diff --git a/src/include/utils/ascii.h b/src/include/utils/ascii.h index d3b183f11f..9ecf164047 100644 --- a/src/include/utils/ascii.h +++ b/src/include/utils/ascii.h @@ -1,7 +1,7 @@ /*----------------------------------------------------------------------- * ascii.h * - * Portions Copyright (c) 1999-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1999-2018, PostgreSQL Global Development Group * * src/include/utils/ascii.h * diff --git a/src/include/utils/attoptcache.h b/src/include/utils/attoptcache.h index 4eef793181..f36ce279ac 100644 --- a/src/include/utils/attoptcache.h +++ b/src/include/utils/attoptcache.h @@ -3,7 +3,7 @@ * attoptcache.h * Attribute options cache. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/attoptcache.h diff --git a/src/include/utils/backend_random.h b/src/include/utils/backend_random.h index 9781aea0ac..99ea2cb9fb 100644 --- a/src/include/utils/backend_random.h +++ b/src/include/utils/backend_random.h @@ -3,7 +3,7 @@ * backend_random.h * Declarations for backend random number generation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/include/utils/backend_random.h * diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h index 762532f636..61785a2433 100644 --- a/src/include/utils/builtins.h +++ b/src/include/utils/builtins.h @@ -4,7 +4,7 @@ * Declarations for operations on built-in types. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/builtins.h @@ -37,12 +37,14 @@ extern unsigned hex_decode(const char *src, unsigned len, char *dst); extern int2vector *buildint2vector(const int16 *int2s, int n); /* name.c */ -extern int namecpy(Name n1, Name n2); +extern int namecpy(Name n1, const NameData *n2); extern int namestrcpy(Name name, const char *str); extern int namestrcmp(Name name, const char *str); /* numutils.c */ extern int32 pg_atoi(const char *s, int size, int c); +extern int16 pg_strtoint16(const char *s); +extern int32 pg_strtoint32(const char *s); extern void pg_itoa(int16 i, char *a); extern void pg_ltoa(int32 l, char *a); extern void pg_lltoa(int64 ll, char *a); @@ -50,20 +52,6 @@ extern char *pg_ltostr_zeropad(char *str, int32 value, int32 minwidth); extern char *pg_ltostr(char *str, int32 value); extern uint64 pg_strtouint64(const char *str, char **endptr, int base); -/* float.c */ -extern PGDLLIMPORT int extra_float_digits; - -extern double get_float8_infinity(void); -extern float get_float4_infinity(void); -extern double get_float8_nan(void); -extern float get_float4_nan(void); -extern int is_infinite(double val); -extern double float8in_internal(char *num, char **endptr_p, - const char *type_name, const char *orig_string); -extern char *float8out_internal(double num); -extern int float4_cmp_internal(float4 a, float4 b); -extern int float8_cmp_internal(float8 a, float8 b); - /* oid.c */ extern oidvector *buildoidvector(const Oid *oids, int n); extern Oid oidparse(Node *node); @@ -78,6 +66,10 @@ extern bool quote_all_identifiers; extern const char *quote_identifier(const char *ident); extern char *quote_qualified_identifier(const char *qualifier, const char *ident); +extern void generate_operator_clause(fmStringInfo buf, + const char *leftop, Oid leftoptype, + Oid opoid, + const char *rightop, Oid rightoptype); /* varchar.c */ extern int bpchartruelen(char *s, int len); @@ -103,7 +95,7 @@ extern int inet_net_pton(int af, const char *src, void *dst, size_t size); /* network.c */ -extern double convert_network_to_scalar(Datum value, Oid typid); +extern double convert_network_to_scalar(Datum value, Oid typid, bool *failure); extern Datum network_scan_first(Datum in); extern Datum network_scan_last(Datum in); extern void clean_ipv6_addr(int addr_family, char *addr); @@ -112,10 +104,17 @@ extern void clean_ipv6_addr(int addr_family, char *addr); extern Datum numeric_float8_no_overflow(PG_FUNCTION_ARGS); /* format_type.c */ + +/* Control flags for format_type_extended */ +#define FORMAT_TYPE_TYPEMOD_GIVEN 0x01 /* typemod defined by caller */ +#define FORMAT_TYPE_ALLOW_INVALID 0x02 /* allow invalid types */ +#define FORMAT_TYPE_FORCE_QUALIFY 0x04 /* force qualification of type */ +extern char *format_type_extended(Oid type_oid, int32 typemod, bits16 flags); + extern char *format_type_be(Oid type_oid); extern char *format_type_be_qualified(Oid type_oid); extern char *format_type_with_typemod(Oid type_oid, int32 typemod); -extern char *format_type_with_typemod_qualified(Oid type_oid, int32 typemod); + extern int32 type_maximum_size(Oid type_oid, int32 typemod); /* quote.c */ diff --git a/src/include/utils/bytea.h b/src/include/utils/bytea.h index d7bd30842e..a959dde791 100644 --- a/src/include/utils/bytea.h +++ b/src/include/utils/bytea.h @@ -4,7 +4,7 @@ * Declarations for BYTEA data type support. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/bytea.h diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h index 200a3022e7..7b22f9c7bc 100644 --- a/src/include/utils/catcache.h +++ b/src/include/utils/catcache.h @@ -10,7 +10,7 @@ * guarantee that there can only be one matching row for a key combination. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/catcache.h @@ -34,25 +34,33 @@ #define CATCACHE_MAXKEYS 4 + +/* function computing a datum's hash */ +typedef uint32 (*CCHashFN) (Datum datum); + +/* function computing equality of two datums */ +typedef bool (*CCFastEqualFN) (Datum a, Datum b); + typedef struct catcache { int id; /* cache identifier --- see syscache.h */ - slist_node cc_next; /* list link */ + int cc_nbuckets; /* # of hash buckets in this cache */ + TupleDesc cc_tupdesc; /* tuple descriptor (copied from reldesc) */ + dlist_head *cc_bucket; /* hash buckets */ + CCHashFN cc_hashfunc[CATCACHE_MAXKEYS]; /* hash function for each key */ + CCFastEqualFN cc_fastequal[CATCACHE_MAXKEYS]; /* fast equal function for + * each key */ + int cc_keyno[CATCACHE_MAXKEYS]; /* AttrNumber of each key */ + dlist_head cc_lists; /* list of CatCList structs */ + int cc_ntup; /* # of tuples currently in this cache */ + int cc_nkeys; /* # of keys (1..CATCACHE_MAXKEYS) */ const char *cc_relname; /* name of relation the tuples come from */ Oid cc_reloid; /* OID of relation the tuples come from */ Oid cc_indexoid; /* OID of index matching cache keys */ bool cc_relisshared; /* is relation shared across databases? */ - TupleDesc cc_tupdesc; /* tuple descriptor (copied from reldesc) */ - int cc_ntup; /* # of tuples currently in this cache */ - int cc_nbuckets; /* # of hash buckets in this cache */ - int cc_nkeys; /* # of keys (1..CATCACHE_MAXKEYS) */ - int cc_key[CATCACHE_MAXKEYS]; /* AttrNumber of each key */ - PGFunction cc_hashfunc[CATCACHE_MAXKEYS]; /* hash function for each key */ + slist_node cc_next; /* list link */ ScanKeyData cc_skey[CATCACHE_MAXKEYS]; /* precomputed key info for heap * scans */ - bool cc_isname[CATCACHE_MAXKEYS]; /* flag "name" key columns */ - dlist_head cc_lists; /* list of CatCList structs */ - dlist_head *cc_bucket; /* hash buckets */ /* * Keep these at the end, so that compiling catcache.c with CATCACHE_STATS @@ -79,7 +87,14 @@ typedef struct catctup { int ct_magic; /* for identifying CatCTup entries */ #define CT_MAGIC 0x57261502 - CatCache *my_cache; /* link to owning catcache */ + + uint32 hash_value; /* hash value for this tuple's keys */ + + /* + * Lookup keys for the entry. By-reference datums point into the tuple for + * positive cache entries, and are separately allocated for negative ones. + */ + Datum keys[CATCACHE_MAXKEYS]; /* * Each tuple in a cache is a member of a dlist that stores the elements @@ -88,15 +103,6 @@ typedef struct catctup */ dlist_node cache_elem; /* list member of per-bucket list */ - /* - * The tuple may also be a member of at most one CatCList. (If a single - * catcache is list-searched with varying numbers of keys, we may have to - * make multiple entries for the same tuple because of this restriction. - * Currently, that's not expected to be common, so we accept the potential - * inefficiency.) - */ - struct catclist *c_list; /* containing CatCList, or NULL if none */ - /* * A tuple marked "dead" must not be returned by subsequent searches. * However, it won't be physically deleted from the cache until its @@ -112,46 +118,63 @@ typedef struct catctup int refcount; /* number of active references */ bool dead; /* dead but not yet removed? */ bool negative; /* negative cache entry? */ - uint32 hash_value; /* hash value for this tuple's keys */ HeapTupleData tuple; /* tuple management header */ + + /* + * The tuple may also be a member of at most one CatCList. (If a single + * catcache is list-searched with varying numbers of keys, we may have to + * make multiple entries for the same tuple because of this restriction. + * Currently, that's not expected to be common, so we accept the potential + * inefficiency.) + */ + struct catclist *c_list; /* containing CatCList, or NULL if none */ + + CatCache *my_cache; /* link to owning catcache */ + /* properly aligned tuple data follows, unless a negative entry */ } CatCTup; +/* + * A CatCList describes the result of a partial search, ie, a search using + * only the first K key columns of an N-key cache. We store the keys used + * into the keys attribute to represent the stored key set. The CatCList + * object contains links to cache entries for all the table rows satisfying + * the partial key. (Note: none of these will be negative cache entries.) + * + * A CatCList is only a member of a per-cache list; we do not currently + * divide them into hash buckets. + * + * A list marked "dead" must not be returned by subsequent searches. + * However, it won't be physically deleted from the cache until its + * refcount goes to zero. (A list should be marked dead if any of its + * member entries are dead.) + * + * If "ordered" is true then the member tuples appear in the order of the + * cache's underlying index. This will be true in normal operation, but + * might not be true during bootstrap or recovery operations. (namespace.c + * is able to save some cycles when it is true.) + */ typedef struct catclist { int cl_magic; /* for identifying CatCList entries */ #define CL_MAGIC 0x52765103 - CatCache *my_cache; /* link to owning catcache */ + + uint32 hash_value; /* hash value for lookup keys */ + + dlist_node cache_elem; /* list member of per-catcache list */ /* - * A CatCList describes the result of a partial search, ie, a search using - * only the first K key columns of an N-key cache. We form the keys used - * into a tuple (with other attributes NULL) to represent the stored key - * set. The CatCList object contains links to cache entries for all the - * table rows satisfying the partial key. (Note: none of these will be - * negative cache entries.) - * - * A CatCList is only a member of a per-cache list; we do not currently - * divide them into hash buckets. - * - * A list marked "dead" must not be returned by subsequent searches. - * However, it won't be physically deleted from the cache until its - * refcount goes to zero. (A list should be marked dead if any of its - * member entries are dead.) - * - * If "ordered" is true then the member tuples appear in the order of the - * cache's underlying index. This will be true in normal operation, but - * might not be true during bootstrap or recovery operations. (namespace.c - * is able to save some cycles when it is true.) + * Lookup keys for the entry, with the first nkeys elements being valid. + * All by-reference are separately allocated. */ - dlist_node cache_elem; /* list member of per-catcache list */ + Datum keys[CATCACHE_MAXKEYS]; + int refcount; /* number of active references */ bool dead; /* dead but not yet removed? */ bool ordered; /* members listed in index order? */ short nkeys; /* number of lookup keys specified */ - uint32 hash_value; /* hash value for lookup keys */ - HeapTupleData tuple; /* header for tuple holding keys */ int n_members; /* number of member tuples */ + CatCache *my_cache; /* link to owning catcache */ CatCTup *members[FLEXIBLE_ARRAY_MEMBER]; /* members */ } CatCList; @@ -174,8 +197,15 @@ extern CatCache *InitCatCache(int id, Oid reloid, Oid indexoid, extern void InitCatCachePhase2(CatCache *cache, bool touch_index); extern HeapTuple SearchCatCache(CatCache *cache, - Datum v1, Datum v2, - Datum v3, Datum v4); + Datum v1, Datum v2, Datum v3, Datum v4); +extern HeapTuple SearchCatCache1(CatCache *cache, + Datum v1); +extern HeapTuple SearchCatCache2(CatCache *cache, + Datum v1, Datum v2); +extern HeapTuple SearchCatCache3(CatCache *cache, + Datum v1, Datum v2, Datum v3); +extern HeapTuple SearchCatCache4(CatCache *cache, + Datum v1, Datum v2, Datum v3, Datum v4); extern void ReleaseCatCache(HeapTuple tuple); extern uint32 GetCatCacheHashValue(CatCache *cache, @@ -184,7 +214,7 @@ extern uint32 GetCatCacheHashValue(CatCache *cache, extern CatCList *SearchCatCacheList(CatCache *cache, int nkeys, Datum v1, Datum v2, - Datum v3, Datum v4); + Datum v3); extern void ReleaseCatCacheList(CatCList *list); extern void ResetCatalogCaches(void); diff --git a/src/include/utils/combocid.h b/src/include/utils/combocid.h index 6d8be8bf0f..094a9cf98b 100644 --- a/src/include/utils/combocid.h +++ b/src/include/utils/combocid.h @@ -4,7 +4,7 @@ * Combo command ID support routines * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/combocid.h diff --git a/src/include/utils/date.h b/src/include/utils/date.h index 0736a72946..eb6d2a16fe 100644 --- a/src/include/utils/date.h +++ b/src/include/utils/date.h @@ -4,7 +4,7 @@ * Definitions for the SQL "date" and "time" types. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/date.h @@ -17,7 +17,8 @@ #include #include "fmgr.h" - +#include "pgtime.h" +#include "datatype/timestamp.h" typedef int32 DateADT; @@ -73,5 +74,7 @@ extern void EncodeSpecialDate(DateADT dt, char *str); extern DateADT GetSQLCurrentDate(void); extern TimeTzADT *GetSQLCurrentTime(int32 typmod); extern TimeADT GetSQLLocalTime(int32 typmod); +extern int time2tm(TimeADT time, struct pg_tm *tm, fsec_t *fsec); +extern int timetz2tm(TimeTzADT *time, struct pg_tm *tm, fsec_t *fsec, int *tzp); #endif /* DATE_H */ diff --git a/src/include/utils/datetime.h b/src/include/utils/datetime.h index 7968569fda..de9e9ade5c 100644 --- a/src/include/utils/datetime.h +++ b/src/include/utils/datetime.h @@ -3,10 +3,10 @@ * datetime.h * Definitions for date/time support code. * The support code is shared with other date data types, - * including abstime, reltime, date, and time. + * including date, and time. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/datetime.h diff --git a/src/include/utils/datum.h b/src/include/utils/datum.h index d2f0f9ed51..90ab5381aa 100644 --- a/src/include/utils/datum.h +++ b/src/include/utils/datum.h @@ -8,7 +8,7 @@ * of the Datum. (We do it this way because in most situations the caller * can look up the info just once and use it for many per-datum operations.) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/datum.h diff --git a/src/include/utils/dsa.h b/src/include/utils/dsa.h index 516ef610f0..5c6792e525 100644 --- a/src/include/utils/dsa.h +++ b/src/include/utils/dsa.h @@ -3,7 +3,7 @@ * dsa.h * Dynamic shared memory areas. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -22,7 +22,7 @@ struct dsa_area; typedef struct dsa_area dsa_area; /* - * If this system only uses a 32-bit value for Size, then use the 32-bit + * If this system only uses a 32-bit value for size_t, then use the 32-bit * implementation of DSA. This limits the amount of DSA that can be created * to something significantly less than the entire 4GB address space because * the DSA pointer must encode both a segment identifier and an offset, but @@ -102,7 +102,7 @@ typedef dsm_handle dsa_handle; extern void dsa_startup(void); extern dsa_area *dsa_create(int tranche_id); -extern dsa_area *dsa_create_in_place(void *place, Size size, +extern dsa_area *dsa_create_in_place(void *place, size_t size, int tranche_id, dsm_segment *segment); extern dsa_area *dsa_attach(dsa_handle handle); extern dsa_area *dsa_attach_in_place(void *place, dsm_segment *segment); @@ -113,10 +113,10 @@ extern void dsa_pin_mapping(dsa_area *area); extern void dsa_detach(dsa_area *area); extern void dsa_pin(dsa_area *area); extern void dsa_unpin(dsa_area *area); -extern void dsa_set_size_limit(dsa_area *area, Size limit); -extern Size dsa_minimum_size(void); +extern void dsa_set_size_limit(dsa_area *area, size_t limit); +extern size_t dsa_minimum_size(void); extern dsa_handle dsa_get_handle(dsa_area *area); -extern dsa_pointer dsa_allocate_extended(dsa_area *area, Size size, int flags); +extern dsa_pointer dsa_allocate_extended(dsa_area *area, size_t size, int flags); extern void dsa_free(dsa_area *area, dsa_pointer dp); extern void *dsa_get_address(dsa_area *area, dsa_pointer dp); extern void dsa_trim(dsa_area *area); diff --git a/src/include/utils/dynahash.h b/src/include/utils/dynahash.h index 8e03245a03..4365e1b439 100644 --- a/src/include/utils/dynahash.h +++ b/src/include/utils/dynahash.h @@ -4,7 +4,7 @@ * POSTGRES dynahash.h file definitions * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/dynahash.h diff --git a/src/include/utils/dynamic_loader.h b/src/include/utils/dynamic_loader.h deleted file mode 100644 index 6c9287b611..0000000000 --- a/src/include/utils/dynamic_loader.h +++ /dev/null @@ -1,25 +0,0 @@ -/*------------------------------------------------------------------------- - * - * dynamic_loader.h - * - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/utils/dynamic_loader.h - * - *------------------------------------------------------------------------- - */ -#ifndef DYNAMIC_LOADER_H -#define DYNAMIC_LOADER_H - -#include "fmgr.h" - - -extern void *pg_dlopen(char *filename); -extern PGFunction pg_dlsym(void *handle, char *funcname); -extern void pg_dlclose(void *handle); -extern char *pg_dlerror(void); - -#endif /* DYNAMIC_LOADER_H */ diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h index 7bfd25a9e9..33c6b53e27 100644 --- a/src/include/utils/elog.h +++ b/src/include/utils/elog.h @@ -4,7 +4,7 @@ * POSTGRES error reporting/logging definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/elog.h @@ -70,6 +70,23 @@ /* SQLSTATE codes for errors are defined in a separate file */ #include "utils/errcodes.h" +/* + * Provide a way to prevent "errno" from being accidentally used inside an + * elog() or ereport() invocation. Since we know that some operating systems + * define errno as something involving a function call, we'll put a local + * variable of the same name as that function in the local scope to force a + * compile error. On platforms that don't define errno in that way, nothing + * happens, so we get no warning ... but we can live with that as long as it + * happens on some popular platforms. + */ +#if defined(errno) && defined(__linux__) +#define pg_prevent_errno_in_scope() int __errno_location pg_attribute_unused() +#elif defined(errno) && (defined(__darwin__) || defined(__freebsd__)) +#define pg_prevent_errno_in_scope() int __error pg_attribute_unused() +#else +#define pg_prevent_errno_in_scope() +#endif + /*---------- * New-style error reporting API: to be used in this way: @@ -103,6 +120,7 @@ #ifdef HAVE__BUILTIN_CONSTANT_P #define ereport_domain(elevel, domain, rest) \ do { \ + pg_prevent_errno_in_scope(); \ if (errstart(elevel, __FILE__, __LINE__, PG_FUNCNAME_MACRO, domain)) \ errfinish rest; \ if (__builtin_constant_p(elevel) && (elevel) >= ERROR) \ @@ -112,6 +130,7 @@ #define ereport_domain(elevel, domain, rest) \ do { \ const int elevel_ = (elevel); \ + pg_prevent_errno_in_scope(); \ if (errstart(elevel_, __FILE__, __LINE__, PG_FUNCNAME_MACRO, domain)) \ errfinish rest; \ if (elevel_ >= ERROR) \ @@ -188,9 +207,8 @@ extern int getinternalerrposition(void); * elog(ERROR, "portal \"%s\" not found", stmt->portalname); *---------- */ -#ifdef HAVE__VA_ARGS /* - * If we have variadic macros, we can give the compiler a hint about the + * Using variadic macros, we can give the compiler a hint about the * call not returning when elevel >= ERROR. See comments for ereport(). * Note that historically elog() has called elog_start (which saves errno) * before evaluating "elevel", so we preserve that behavior here. @@ -198,6 +216,7 @@ extern int getinternalerrposition(void); #ifdef HAVE__BUILTIN_CONSTANT_P #define elog(elevel, ...) \ do { \ + pg_prevent_errno_in_scope(); \ elog_start(__FILE__, __LINE__, PG_FUNCNAME_MACRO); \ elog_finish(elevel, __VA_ARGS__); \ if (__builtin_constant_p(elevel) && (elevel) >= ERROR) \ @@ -206,6 +225,7 @@ extern int getinternalerrposition(void); #else /* !HAVE__BUILTIN_CONSTANT_P */ #define elog(elevel, ...) \ do { \ + pg_prevent_errno_in_scope(); \ elog_start(__FILE__, __LINE__, PG_FUNCNAME_MACRO); \ { \ const int elevel_ = (elevel); \ @@ -215,11 +235,6 @@ extern int getinternalerrposition(void); } \ } while(0) #endif /* HAVE__BUILTIN_CONSTANT_P */ -#else /* !HAVE__VA_ARGS */ -#define elog \ - elog_start(__FILE__, __LINE__, PG_FUNCNAME_MACRO), \ - elog_finish -#endif /* HAVE__VA_ARGS */ extern void elog_start(const char *filename, int lineno, const char *funcname); extern void elog_finish(int elevel, const char *fmt,...) pg_attribute_printf(2, 3); diff --git a/src/include/utils/evtcache.h b/src/include/utils/evtcache.h index 9774eac5a6..0d0dd3d817 100644 --- a/src/include/utils/evtcache.h +++ b/src/include/utils/evtcache.h @@ -1,13 +1,13 @@ /*------------------------------------------------------------------------- * - * evtcache.c + * evtcache.h * Special-purpose cache for event trigger data. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * src/backend/utils/cache/evtcache.c + * src/include/utils/evtcache.h * *------------------------------------------------------------------------- */ diff --git a/src/include/utils/expandeddatum.h b/src/include/utils/expandeddatum.h index 7116b860cc..3361bb25ad 100644 --- a/src/include/utils/expandeddatum.h +++ b/src/include/utils/expandeddatum.h @@ -34,7 +34,7 @@ * value if they fail partway through. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/expandeddatum.h diff --git a/src/include/utils/expandedrecord.h b/src/include/utils/expandedrecord.h new file mode 100644 index 0000000000..c999f44f38 --- /dev/null +++ b/src/include/utils/expandedrecord.h @@ -0,0 +1,231 @@ +/*------------------------------------------------------------------------- + * + * expandedrecord.h + * Declarations for composite expanded objects. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/utils/expandedrecord.h + * + *------------------------------------------------------------------------- + */ +#ifndef EXPANDEDRECORD_H +#define EXPANDEDRECORD_H + +#include "access/htup.h" +#include "access/tupdesc.h" +#include "fmgr.h" +#include "utils/expandeddatum.h" + + +/* + * An expanded record is contained within a private memory context (as + * all expanded objects must be) and has a control structure as below. + * + * The expanded record might contain a regular "flat" tuple if that was the + * original input and we've not modified it. Otherwise, the contents are + * represented by Datum/isnull arrays plus type information. We could also + * have both forms, if we've deconstructed the original tuple for access + * purposes but not yet changed it. For pass-by-reference field types, the + * Datums would point into the flat tuple in this situation. Once we start + * modifying tuple fields, new pass-by-ref fields are separately palloc'd + * within the memory context. + * + * It's possible to build an expanded record that references a "flat" tuple + * stored externally, if the caller can guarantee that that tuple will not + * change for the lifetime of the expanded record. (This frammish is mainly + * meant to avoid unnecessary data copying in trigger functions.) + */ +#define ER_MAGIC 1384727874 /* ID for debugging crosschecks */ + +typedef struct ExpandedRecordHeader +{ + /* Standard header for expanded objects */ + ExpandedObjectHeader hdr; + + /* Magic value identifying an expanded record (for debugging only) */ + int er_magic; + + /* Assorted flag bits */ + int flags; +#define ER_FLAG_FVALUE_VALID 0x0001 /* fvalue is up to date? */ +#define ER_FLAG_FVALUE_ALLOCED 0x0002 /* fvalue is local storage? */ +#define ER_FLAG_DVALUES_VALID 0x0004 /* dvalues/dnulls are up to date? */ +#define ER_FLAG_DVALUES_ALLOCED 0x0008 /* any field values local storage? */ +#define ER_FLAG_HAVE_EXTERNAL 0x0010 /* any field values are external? */ +#define ER_FLAG_TUPDESC_ALLOCED 0x0020 /* tupdesc is local storage? */ +#define ER_FLAG_IS_DOMAIN 0x0040 /* er_decltypeid is domain? */ +#define ER_FLAG_IS_DUMMY 0x0080 /* this header is dummy (see below) */ +/* flag bits that are not to be cleared when replacing tuple data: */ +#define ER_FLAGS_NON_DATA \ + (ER_FLAG_TUPDESC_ALLOCED | ER_FLAG_IS_DOMAIN | ER_FLAG_IS_DUMMY) + + /* Declared type of the record variable (could be a domain type) */ + Oid er_decltypeid; + + /* + * Actual composite type/typmod; never a domain (if ER_FLAG_IS_DOMAIN, + * these identify the composite base type). These will match + * er_tupdesc->tdtypeid/tdtypmod, as well as the header fields of + * composite datums made from or stored in this expanded record. + */ + Oid er_typeid; /* type OID of the composite type */ + int32 er_typmod; /* typmod of the composite type */ + + /* + * Tuple descriptor, if we have one, else NULL. This may point to a + * reference-counted tupdesc originally belonging to the typcache, in + * which case we use a memory context reset callback to release the + * refcount. It can also be locally allocated in this object's private + * context (in which case ER_FLAG_TUPDESC_ALLOCED is set). + */ + TupleDesc er_tupdesc; + + /* + * Unique-within-process identifier for the tupdesc (see typcache.h). This + * field will never be equal to INVALID_TUPLEDESC_IDENTIFIER. + */ + uint64 er_tupdesc_id; + + /* + * If we have a Datum-array representation of the record, it's kept here; + * else ER_FLAG_DVALUES_VALID is not set, and dvalues/dnulls may be NULL + * if they've not yet been allocated. If allocated, the dvalues and + * dnulls arrays are palloc'd within the object private context, and are + * of length matching er_tupdesc->natts. For pass-by-ref field types, + * dvalues entries might point either into the fstartptr..fendptr area, or + * to separately palloc'd chunks. + */ + Datum *dvalues; /* array of Datums */ + bool *dnulls; /* array of is-null flags for Datums */ + int nfields; /* length of above arrays */ + + /* + * flat_size is the current space requirement for the flat equivalent of + * the expanded record, if known; otherwise it's 0. We store this to make + * consecutive calls of get_flat_size cheap. If flat_size is not 0, the + * component values data_len, hoff, and hasnull must be valid too. + */ + Size flat_size; + + Size data_len; /* data len within flat_size */ + int hoff; /* header offset */ + bool hasnull; /* null bitmap needed? */ + + /* + * fvalue points to the flat representation if we have one, else it is + * NULL. If the flat representation is valid (up to date) then + * ER_FLAG_FVALUE_VALID is set. Even if we've outdated the flat + * representation due to changes of user fields, it can still be used to + * fetch system column values. If we have a flat representation then + * fstartptr/fendptr point to the start and end+1 of its data area; this + * is so that we can tell which Datum pointers point into the flat + * representation rather than being pointers to separately palloc'd data. + */ + HeapTuple fvalue; /* might or might not be private storage */ + char *fstartptr; /* start of its data area */ + char *fendptr; /* end+1 of its data area */ + + /* Some operations on the expanded record need a short-lived context */ + MemoryContext er_short_term_cxt; /* short-term memory context */ + + /* Working state for domain checking, used if ER_FLAG_IS_DOMAIN is set */ + struct ExpandedRecordHeader *er_dummy_header; /* dummy record header */ + void *er_domaininfo; /* cache space for domain_check() */ + + /* Callback info (it's active if er_mcb.arg is not NULL) */ + MemoryContextCallback er_mcb; +} ExpandedRecordHeader; + +/* fmgr macros for expanded record objects */ +#define PG_GETARG_EXPANDED_RECORD(n) DatumGetExpandedRecord(PG_GETARG_DATUM(n)) +#define ExpandedRecordGetDatum(erh) EOHPGetRWDatum(&(erh)->hdr) +#define ExpandedRecordGetRODatum(erh) EOHPGetRODatum(&(erh)->hdr) +#define PG_RETURN_EXPANDED_RECORD(x) PG_RETURN_DATUM(ExpandedRecordGetDatum(x)) + +/* assorted other macros */ +#define ExpandedRecordIsEmpty(erh) \ + (((erh)->flags & (ER_FLAG_DVALUES_VALID | ER_FLAG_FVALUE_VALID)) == 0) +#define ExpandedRecordIsDomain(erh) \ + (((erh)->flags & ER_FLAG_IS_DOMAIN) != 0) + +/* this can substitute for TransferExpandedObject() when we already have erh */ +#define TransferExpandedRecord(erh, cxt) \ + MemoryContextSetParent((erh)->hdr.eoh_context, cxt) + +/* information returned by expanded_record_lookup_field() */ +typedef struct ExpandedRecordFieldInfo +{ + int fnumber; /* field's attr number in record */ + Oid ftypeid; /* field's type/typmod info */ + int32 ftypmod; + Oid fcollation; /* field's collation if any */ +} ExpandedRecordFieldInfo; + +/* + * prototypes for functions defined in expandedrecord.c + */ +extern ExpandedRecordHeader *make_expanded_record_from_typeid(Oid type_id, int32 typmod, + MemoryContext parentcontext); +extern ExpandedRecordHeader *make_expanded_record_from_tupdesc(TupleDesc tupdesc, + MemoryContext parentcontext); +extern ExpandedRecordHeader *make_expanded_record_from_exprecord(ExpandedRecordHeader *olderh, + MemoryContext parentcontext); +extern void expanded_record_set_tuple(ExpandedRecordHeader *erh, + HeapTuple tuple, bool copy, bool expand_external); +extern Datum make_expanded_record_from_datum(Datum recorddatum, + MemoryContext parentcontext); +extern TupleDesc expanded_record_fetch_tupdesc(ExpandedRecordHeader *erh); +extern HeapTuple expanded_record_get_tuple(ExpandedRecordHeader *erh); +extern ExpandedRecordHeader *DatumGetExpandedRecord(Datum d); +extern void deconstruct_expanded_record(ExpandedRecordHeader *erh); +extern bool expanded_record_lookup_field(ExpandedRecordHeader *erh, + const char *fieldname, + ExpandedRecordFieldInfo *finfo); +extern Datum expanded_record_fetch_field(ExpandedRecordHeader *erh, int fnumber, + bool *isnull); +extern void expanded_record_set_field_internal(ExpandedRecordHeader *erh, + int fnumber, + Datum newValue, bool isnull, + bool expand_external, + bool check_constraints); +extern void expanded_record_set_fields(ExpandedRecordHeader *erh, + const Datum *newValues, const bool *isnulls, + bool expand_external); + +/* outside code should never call expanded_record_set_field_internal as such */ +#define expanded_record_set_field(erh, fnumber, newValue, isnull, expand_external) \ + expanded_record_set_field_internal(erh, fnumber, newValue, isnull, expand_external, true) + +/* + * Inline-able fast cases. The expanded_record_fetch_xxx functions above + * handle the general cases. + */ + +/* Get the tupdesc for the expanded record's actual type */ +static inline TupleDesc +expanded_record_get_tupdesc(ExpandedRecordHeader *erh) +{ + if (likely(erh->er_tupdesc != NULL)) + return erh->er_tupdesc; + else + return expanded_record_fetch_tupdesc(erh); +} + +/* Get value of record field */ +static inline Datum +expanded_record_get_field(ExpandedRecordHeader *erh, int fnumber, + bool *isnull) +{ + if ((erh->flags & ER_FLAG_DVALUES_VALID) && + likely(fnumber > 0 && fnumber <= erh->nfields)) + { + *isnull = erh->dnulls[fnumber - 1]; + return erh->dvalues[fnumber - 1]; + } + else + return expanded_record_fetch_field(erh, fnumber, isnull); +} + +#endif /* EXPANDEDRECORD_H */ diff --git a/src/include/utils/float.h b/src/include/utils/float.h new file mode 100644 index 0000000000..05e1b27637 --- /dev/null +++ b/src/include/utils/float.h @@ -0,0 +1,376 @@ +/*------------------------------------------------------------------------- + * + * float.h + * Definitions for the built-in floating-point types + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/include/utils/float.h + * + *------------------------------------------------------------------------- + */ +#ifndef FLOAT_H +#define FLOAT_H + +#include + +#ifndef M_PI +/* From my RH5.2 gcc math.h file - thomas 2000-04-03 */ +#define M_PI 3.14159265358979323846 +#endif + +/* Radians per degree, a.k.a. PI / 180 */ +#define RADIANS_PER_DEGREE 0.0174532925199432957692 + +/* Visual C++ etc lacks NAN, and won't accept 0.0/0.0. */ +#if defined(WIN32) && !defined(NAN) +static const uint32 nan[2] = {0xffffffff, 0x7fffffff}; + +#define NAN (*(const float8 *) nan) +#endif + +extern PGDLLIMPORT int extra_float_digits; + +/* + * Utility functions in float.c + */ +extern int is_infinite(float8 val); +extern float8 float8in_internal(char *num, char **endptr_p, + const char *type_name, const char *orig_string); +extern char *float8out_internal(float8 num); +extern int float4_cmp_internal(float4 a, float4 b); +extern int float8_cmp_internal(float8 a, float8 b); + +/* + * Routines to provide reasonably platform-independent handling of + * infinity and NaN + * + * We assume that isinf() and isnan() are available and work per spec. + * (On some platforms, we have to supply our own; see src/port.) However, + * generating an Infinity or NaN in the first place is less well standardized; + * pre-C99 systems tend not to have C99's INFINITY and NaN macros. We + * centralize our workarounds for this here. + */ + +/* + * The funny placements of the two #pragmas is necessary because of a + * long lived bug in the Microsoft compilers. + * See http://support.microsoft.com/kb/120968/en-us for details + */ +#if (_MSC_VER >= 1800) +#pragma warning(disable:4756) +#endif +static inline float4 +get_float4_infinity(void) +{ +#ifdef INFINITY + /* C99 standard way */ + return (float4) INFINITY; +#else +#if (_MSC_VER >= 1800) +#pragma warning(default:4756) +#endif + + /* + * On some platforms, HUGE_VAL is an infinity, elsewhere it's just the + * largest normal float8. We assume forcing an overflow will get us a + * true infinity. + */ + return (float4) (HUGE_VAL * HUGE_VAL); +#endif +} + +static inline float8 +get_float8_infinity(void) +{ +#ifdef INFINITY + /* C99 standard way */ + return (float8) INFINITY; +#else + + /* + * On some platforms, HUGE_VAL is an infinity, elsewhere it's just the + * largest normal float8. We assume forcing an overflow will get us a + * true infinity. + */ + return (float8) (HUGE_VAL * HUGE_VAL); +#endif +} + +static inline float4 +get_float4_nan(void) +{ +#ifdef NAN + /* C99 standard way */ + return (float4) NAN; +#else + /* Assume we can get a NAN via zero divide */ + return (float4) (0.0 / 0.0); +#endif +} + +static inline float8 +get_float8_nan(void) +{ + /* (float8) NAN doesn't work on some NetBSD/MIPS releases */ +#if defined(NAN) && !(defined(__NetBSD__) && defined(__mips__)) + /* C99 standard way */ + return (float8) NAN; +#else + /* Assume we can get a NaN via zero divide */ + return (float8) (0.0 / 0.0); +#endif +} + +/* + * Checks to see if a float4/8 val has underflowed or overflowed + */ + +static inline void +check_float4_val(const float4 val, const bool inf_is_valid, + const bool zero_is_valid) +{ + if (!inf_is_valid && unlikely(isinf(val))) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("value out of range: overflow"))); + + if (!zero_is_valid && unlikely(val == 0.0)) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("value out of range: underflow"))); +} + +static inline void +check_float8_val(const float8 val, const bool inf_is_valid, + const bool zero_is_valid) +{ + if (!inf_is_valid && unlikely(isinf(val))) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("value out of range: overflow"))); + + if (!zero_is_valid && unlikely(val == 0.0)) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("value out of range: underflow"))); +} + +/* + * Routines for operations with the checks above + * + * There isn't any way to check for underflow of addition/subtraction + * because numbers near the underflow value have already been rounded to + * the point where we can't detect that the two values were originally + * different, e.g. on x86, '1e-45'::float4 == '2e-45'::float4 == + * 1.4013e-45. + */ + +static inline float4 +float4_pl(const float4 val1, const float4 val2) +{ + float4 result; + + result = val1 + val2; + check_float4_val(result, isinf(val1) || isinf(val2), true); + + return result; +} + +static inline float8 +float8_pl(const float8 val1, const float8 val2) +{ + float8 result; + + result = val1 + val2; + check_float8_val(result, isinf(val1) || isinf(val2), true); + + return result; +} + +static inline float4 +float4_mi(const float4 val1, const float4 val2) +{ + float4 result; + + result = val1 - val2; + check_float4_val(result, isinf(val1) || isinf(val2), true); + + return result; +} + +static inline float8 +float8_mi(const float8 val1, const float8 val2) +{ + float8 result; + + result = val1 - val2; + check_float8_val(result, isinf(val1) || isinf(val2), true); + + return result; +} + +static inline float4 +float4_mul(const float4 val1, const float4 val2) +{ + float4 result; + + result = val1 * val2; + check_float4_val(result, isinf(val1) || isinf(val2), + val1 == 0.0f || val2 == 0.0f); + + return result; +} + +static inline float8 +float8_mul(const float8 val1, const float8 val2) +{ + float8 result; + + result = val1 * val2; + check_float8_val(result, isinf(val1) || isinf(val2), + val1 == 0.0 || val2 == 0.0); + + return result; +} + +static inline float4 +float4_div(const float4 val1, const float4 val2) +{ + float4 result; + + if (val2 == 0.0f) + ereport(ERROR, + (errcode(ERRCODE_DIVISION_BY_ZERO), + errmsg("division by zero"))); + + result = val1 / val2; + check_float4_val(result, isinf(val1) || isinf(val2), val1 == 0.0f); + + return result; +} + +static inline float8 +float8_div(const float8 val1, const float8 val2) +{ + float8 result; + + if (val2 == 0.0) + ereport(ERROR, + (errcode(ERRCODE_DIVISION_BY_ZERO), + errmsg("division by zero"))); + + result = val1 / val2; + check_float8_val(result, isinf(val1) || isinf(val2), val1 == 0.0); + + return result; +} + +/* + * Routines for NaN-aware comparisons + * + * We consider all NaNs to be equal and larger than any non-NaN. This is + * somewhat arbitrary; the important thing is to have a consistent sort + * order. + */ + +static inline bool +float4_eq(const float4 val1, const float4 val2) +{ + return isnan(val1) ? isnan(val2) : !isnan(val2) && val1 == val2; +} + +static inline bool +float8_eq(const float8 val1, const float8 val2) +{ + return isnan(val1) ? isnan(val2) : !isnan(val2) && val1 == val2; +} + +static inline bool +float4_ne(const float4 val1, const float4 val2) +{ + return isnan(val1) ? !isnan(val2) : isnan(val2) || val1 != val2; +} + +static inline bool +float8_ne(const float8 val1, const float8 val2) +{ + return isnan(val1) ? !isnan(val2) : isnan(val2) || val1 != val2; +} + +static inline bool +float4_lt(const float4 val1, const float4 val2) +{ + return !isnan(val1) && (isnan(val2) || val1 < val2); +} + +static inline bool +float8_lt(const float8 val1, const float8 val2) +{ + return !isnan(val1) && (isnan(val2) || val1 < val2); +} + +static inline bool +float4_le(const float4 val1, const float4 val2) +{ + return isnan(val2) || (!isnan(val1) && val1 <= val2); +} + +static inline bool +float8_le(const float8 val1, const float8 val2) +{ + return isnan(val2) || (!isnan(val1) && val1 <= val2); +} + +static inline bool +float4_gt(const float4 val1, const float4 val2) +{ + return !isnan(val2) && (isnan(val1) || val1 > val2); +} + +static inline bool +float8_gt(const float8 val1, const float8 val2) +{ + return !isnan(val2) && (isnan(val1) || val1 > val2); +} + +static inline bool +float4_ge(const float4 val1, const float4 val2) +{ + return isnan(val1) || (!isnan(val2) && val1 >= val2); +} + +static inline bool +float8_ge(const float8 val1, const float8 val2) +{ + return isnan(val1) || (!isnan(val2) && val1 >= val2); +} + +static inline float4 +float4_min(const float4 val1, const float4 val2) +{ + return float4_lt(val1, val2) ? val1 : val2; +} + +static inline float8 +float8_min(const float8 val1, const float8 val2) +{ + return float8_lt(val1, val2) ? val1 : val2; +} + +static inline float4 +float4_max(const float4 val1, const float4 val2) +{ + return float4_gt(val1, val2) ? val1 : val2; +} + +static inline float8 +float8_max(const float8 val1, const float8 val2) +{ + return float8_gt(val1, val2) ? val1 : val2; +} + +#endif /* FLOAT_H */ diff --git a/src/include/utils/fmgrtab.h b/src/include/utils/fmgrtab.h index 6130ef8f9c..6122ed3e97 100644 --- a/src/include/utils/fmgrtab.h +++ b/src/include/utils/fmgrtab.h @@ -3,7 +3,7 @@ * fmgrtab.h * The function manager's table of internal functions. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/fmgrtab.h @@ -13,22 +13,22 @@ #ifndef FMGRTAB_H #define FMGRTAB_H +#include "access/transam.h" #include "fmgr.h" /* * This table stores info about all the built-in functions (ie, functions - * that are compiled into the Postgres executable). The table entries are - * required to appear in Oid order, so that binary search can be used. + * that are compiled into the Postgres executable). */ typedef struct { Oid foid; /* OID of the function */ - const char *funcName; /* C name of the function */ short nargs; /* 0..FUNC_MAX_ARGS, or -1 if variable count */ bool strict; /* T if function is "strict" */ bool retset; /* T if function returns a set */ + const char *funcName; /* C name of the function */ PGFunction func; /* pointer to compiled function */ } FmgrBuiltin; @@ -36,4 +36,11 @@ extern const FmgrBuiltin fmgr_builtins[]; extern const int fmgr_nbuiltins; /* number of entries in table */ +/* + * Mapping from a builtin function's oid to the index in the fmgr_builtins + * array. + */ +#define InvalidOidBuiltinMapping PG_UINT16_MAX +extern const uint16 fmgr_builtin_oid_index[FirstBootstrapObjectId]; + #endif /* FMGRTAB_H */ diff --git a/src/include/utils/formatting.h b/src/include/utils/formatting.h index 8eaf2c3052..a9f5548b46 100644 --- a/src/include/utils/formatting.h +++ b/src/include/utils/formatting.h @@ -4,7 +4,7 @@ * src/include/utils/formatting.h * * - * Portions Copyright (c) 1999-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1999-2018, PostgreSQL Global Development Group * * The PostgreSQL routines for a DateTime/int/float/numeric formatting, * inspire with Oracle TO_CHAR() / TO_DATE() / TO_NUMBER() routines. diff --git a/src/include/utils/freepage.h b/src/include/utils/freepage.h index c370c733ee..cbbf267fb3 100644 --- a/src/include/utils/freepage.h +++ b/src/include/utils/freepage.h @@ -3,7 +3,7 @@ * freepage.h * Management of page-organized free memory. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/freepage.h diff --git a/src/include/utils/geo_decls.h b/src/include/utils/geo_decls.h index 44c6381b85..9f8505804e 100644 --- a/src/include/utils/geo_decls.h +++ b/src/include/utils/geo_decls.h @@ -3,14 +3,11 @@ * geo_decls.h - Declarations for various 2D constructs. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/geo_decls.h * - * NOTE - * These routines do *not* use the float types from adt/. - * * XXX These routines were not written by a numerical analyst. * * XXX I have made some attempt to flesh out the operators @@ -21,14 +18,14 @@ #ifndef GEO_DECLS_H #define GEO_DECLS_H -#include - #include "fmgr.h" /*-------------------------------------------------------------------- * Useful floating point utilities and constants. - *-------------------------------------------------------------------*/ - + *------------------------------------------------------------------- + * + * XXX: They are not NaN-aware. + */ #define EPSILON 1.0E-06 @@ -57,7 +54,7 @@ *-------------------------------------------------------------------*/ typedef struct { - double x, + float8 x, y; } Point; @@ -89,7 +86,7 @@ typedef struct *-------------------------------------------------------------------*/ typedef struct { - double A, + float8 A, B, C; } LINE; @@ -124,7 +121,7 @@ typedef struct typedef struct { Point center; - double radius; + float8 radius; } CIRCLE; /* @@ -178,9 +175,6 @@ typedef struct * in geo_ops.c */ -/* private point routines */ -extern double point_dt(Point *pt1, Point *pt2); -extern double point_sl(Point *pt1, Point *pt2); -extern double pg_hypot(double x, double y); +extern float8 pg_hypot(float8 x, float8 y); #endif /* GEO_DECLS_H */ diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h index c1870d2130..f462eabe59 100644 --- a/src/include/utils/guc.h +++ b/src/include/utils/guc.h @@ -4,7 +4,7 @@ * External declarations pertaining to backend/utils/misc/guc.c and * backend/utils/misc/guc-file.l * - * Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Copyright (c) 2000-2018, PostgreSQL Global Development Group * Written by Peter Eisentraut . * * src/include/utils/guc.h @@ -219,6 +219,7 @@ typedef enum #define GUC_UNIT_BLOCKS 0x2000 /* value is in blocks */ #define GUC_UNIT_XBLOCKS 0x3000 /* value is in xlog blocks */ #define GUC_UNIT_MB 0x4000 /* value is in megabytes */ +#define GUC_UNIT_BYTE 0x8000 /* value is in bytes */ #define GUC_UNIT_MEMORY 0xF000 /* mask for size-related units */ #define GUC_UNIT_MS 0x10000 /* value is in milliseconds */ @@ -244,10 +245,11 @@ extern bool log_btree_build_stats; extern PGDLLIMPORT bool check_function_bodies; extern bool default_with_oids; +extern bool session_auth_is_superuser; extern int log_min_error_statement; -extern int log_min_messages; -extern int client_min_messages; +extern PGDLLIMPORT int log_min_messages; +extern PGDLLIMPORT int client_min_messages; extern int log_min_duration_statement; extern int log_temp_files; @@ -256,12 +258,12 @@ extern int temp_file_limit; extern int num_temp_buffers; extern char *cluster_name; -extern char *ConfigFileName; +extern PGDLLIMPORT char *ConfigFileName; extern char *HbaFileName; extern char *IdentFileName; extern char *external_pid_file; -extern char *application_name; +extern PGDLLIMPORT char *application_name; extern int tcp_keepalives_idle; extern int tcp_keepalives_interval; @@ -345,8 +347,9 @@ extern void DefineCustomEnumVariable( extern void EmitWarningsOnPlaceholders(const char *className); extern const char *GetConfigOption(const char *name, bool missing_ok, - bool restrict_superuser); + bool restrict_privileged); extern const char *GetConfigOptionResetString(const char *name); +extern int GetConfigOptionFlags(const char *name, bool missing_ok); extern void ProcessConfigFile(GucContext context); extern void InitializeGUCOptions(void); extern bool SelectConfigFiles(const char *userDoption, const char *progname); diff --git a/src/include/utils/guc_tables.h b/src/include/utils/guc_tables.h index 042f7a0152..668d9efd35 100644 --- a/src/include/utils/guc_tables.h +++ b/src/include/utils/guc_tables.h @@ -5,7 +5,7 @@ * * See src/backend/utils/misc/README for design notes. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/include/utils/guc_tables.h * @@ -56,7 +56,8 @@ enum config_group FILE_LOCATIONS, CONN_AUTH, CONN_AUTH_SETTINGS, - CONN_AUTH_SECURITY, + CONN_AUTH_AUTH, + CONN_AUTH_SSL, RESOURCES, RESOURCES_MEM, RESOURCES_DISK, diff --git a/src/include/utils/hashutils.h b/src/include/utils/hashutils.h new file mode 100644 index 0000000000..5e9fe65012 --- /dev/null +++ b/src/include/utils/hashutils.h @@ -0,0 +1,53 @@ +/* + * Utilities for working with hash values. + * + * Portions Copyright (c) 2017-2018, PostgreSQL Global Development Group + */ + +#ifndef HASHUTILS_H +#define HASHUTILS_H + +/* + * Combine two 32-bit hash values, resulting in another hash value, with + * decent bit mixing. + * + * Similar to boost's hash_combine(). + */ +static inline uint32 +hash_combine(uint32 a, uint32 b) +{ + a ^= b + 0x9e3779b9 + (a << 6) + (a >> 2); + return a; +} + +/* + * Combine two 64-bit hash values, resulting in another hash value, using the + * same kind of technique as hash_combine(). Testing shows that this also + * produces good bit mixing. + */ +static inline uint64 +hash_combine64(uint64 a, uint64 b) +{ + /* 0x49a0f4dd15e5a8e3 is 64bit random data */ + a ^= b + UINT64CONST(0x49a0f4dd15e5a8e3) + (a << 54) + (a >> 7); + return a; +} + +/* + * Simple inline murmur hash implementation hashing a 32 bit integer, for + * performance. + */ +static inline uint32 +murmurhash32(uint32 data) +{ + uint32 h = data; + + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + return h; +} + +#endif /* HASHUTILS_H */ diff --git a/src/include/utils/help_config.h b/src/include/utils/help_config.h index 3f433d10d7..f35d4da8a8 100644 --- a/src/include/utils/help_config.h +++ b/src/include/utils/help_config.h @@ -3,7 +3,7 @@ * help_config.h * Interface to the --help-config option of main.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/include/utils/help_config.h * diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h index bc5873ed20..8357faac5a 100644 --- a/src/include/utils/hsearch.h +++ b/src/include/utils/hsearch.h @@ -4,7 +4,7 @@ * exported definitions for utils/hash/dynahash.c; see notes therein * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/hsearch.h diff --git a/src/include/utils/index_selfuncs.h b/src/include/utils/index_selfuncs.h index 24f2f3a866..ae2b96943d 100644 --- a/src/include/utils/index_selfuncs.h +++ b/src/include/utils/index_selfuncs.h @@ -9,7 +9,7 @@ * If you make it depend on anything besides access/amapi.h, that's likely * a mistake. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/index_selfuncs.h diff --git a/src/include/utils/inet.h b/src/include/utils/inet.h index f2aa864a35..e3bec398b1 100644 --- a/src/include/utils/inet.h +++ b/src/include/utils/inet.h @@ -4,7 +4,7 @@ * Declarations for operations on INET datatypes. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/inet.h diff --git a/src/include/utils/int8.h b/src/include/utils/int8.h index 8b78983fee..91171ee4cc 100644 --- a/src/include/utils/int8.h +++ b/src/include/utils/int8.h @@ -4,7 +4,7 @@ * Declarations for operations on 64-bit integers. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/int8.h diff --git a/src/include/utils/inval.h b/src/include/utils/inval.h index 361543f412..7a66d466f7 100644 --- a/src/include/utils/inval.h +++ b/src/include/utils/inval.h @@ -4,7 +4,7 @@ * POSTGRES cache invalidation dispatcher definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/inval.h diff --git a/src/include/utils/json.h b/src/include/utils/json.h index e3ffe6fc44..549bd4d287 100644 --- a/src/include/utils/json.h +++ b/src/include/utils/json.h @@ -3,7 +3,7 @@ * json.h * Declarations for JSON data type support. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/json.h diff --git a/src/include/utils/jsonapi.h b/src/include/utils/jsonapi.h index 4336823de2..6b483a15a6 100644 --- a/src/include/utils/jsonapi.h +++ b/src/include/utils/jsonapi.h @@ -3,7 +3,7 @@ * jsonapi.h * Declarations for JSON API support. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/jsonapi.h @@ -132,19 +132,35 @@ extern JsonLexContext *makeJsonLexContextCstringLen(char *json, */ extern bool IsValidJsonNumber(const char *str, int len); -/* an action that will be applied to each value in iterate_json(b)_string_vaues functions */ +/* + * Flag types for iterate_json(b)_values to specify what elements from a + * json(b) document we want to iterate. + */ +typedef enum JsonToIndex +{ + jtiKey = 0x01, + jtiString = 0x02, + jtiNumeric = 0x04, + jtiBool = 0x08, + jtiAll = jtiKey | jtiString | jtiNumeric | jtiBool +} JsonToIndex; + +/* an action that will be applied to each value in iterate_json(b)_vaues functions */ typedef void (*JsonIterateStringValuesAction) (void *state, char *elem_value, int elem_len); -/* an action that will be applied to each value in transform_json(b)_string_values functions */ +/* an action that will be applied to each value in transform_json(b)_values functions */ typedef text *(*JsonTransformStringValuesAction) (void *state, char *elem_value, int elem_len); -extern void iterate_jsonb_string_values(Jsonb *jb, void *state, - JsonIterateStringValuesAction action); -extern void iterate_json_string_values(text *json, void *action_state, - JsonIterateStringValuesAction action); +extern uint32 parse_jsonb_index_flags(Jsonb *jb); +extern void iterate_jsonb_values(Jsonb *jb, uint32 flags, void *state, + JsonIterateStringValuesAction action); +extern void iterate_json_values(text *json, uint32 flags, void *action_state, + JsonIterateStringValuesAction action); extern Jsonb *transform_jsonb_string_values(Jsonb *jsonb, void *action_state, JsonTransformStringValuesAction transform_action); extern text *transform_json_string_values(text *json, void *action_state, JsonTransformStringValuesAction transform_action); +extern char *JsonEncodeDateTime(char *buf, Datum value, Oid typid); + #endif /* JSONAPI_H */ diff --git a/src/include/utils/jsonb.h b/src/include/utils/jsonb.h index ea9dd17540..27873d4d10 100644 --- a/src/include/utils/jsonb.h +++ b/src/include/utils/jsonb.h @@ -3,7 +3,7 @@ * jsonb.h * Declarations for jsonb data type support. * - * Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/include/utils/jsonb.h * @@ -65,10 +65,10 @@ typedef enum #define JGIN_MAXLENGTH 125 /* max length of text part before hashing */ /* Convenience macros */ -#define DatumGetJsonb(d) ((Jsonb *) PG_DETOAST_DATUM(d)) -#define JsonbGetDatum(p) PointerGetDatum(p) -#define PG_GETARG_JSONB(x) DatumGetJsonb(PG_GETARG_DATUM(x)) -#define PG_RETURN_JSONB(x) PG_RETURN_POINTER(x) +#define DatumGetJsonbP(d) ((Jsonb *) PG_DETOAST_DATUM(d)) +#define JsonbPGetDatum(p) PointerGetDatum(p) +#define PG_GETARG_JSONB_P(x) DatumGetJsonbP(PG_GETARG_DATUM(x)) +#define PG_RETURN_JSONB_P(x) PG_RETURN_POINTER(x) typedef struct JsonbPair JsonbPair; typedef struct JsonbValue JsonbValue; @@ -370,6 +370,8 @@ extern Jsonb *JsonbValueToJsonb(JsonbValue *val); extern bool JsonbDeepContains(JsonbIterator **val, JsonbIterator **mContained); extern void JsonbHashScalarValue(const JsonbValue *scalarVal, uint32 *hash); +extern void JsonbHashScalarValueExtended(const JsonbValue *scalarVal, + uint64 *hash, uint64 seed); /* jsonb.c support functions */ extern char *JsonbToCString(StringInfo out, JsonbContainer *in, diff --git a/src/include/utils/logtape.h b/src/include/utils/logtape.h index a1e869b80c..06dc734eb6 100644 --- a/src/include/utils/logtape.h +++ b/src/include/utils/logtape.h @@ -5,7 +5,7 @@ * * See logtape.c for explanations. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/logtape.h @@ -16,15 +16,46 @@ #ifndef LOGTAPE_H #define LOGTAPE_H +#include "storage/sharedfileset.h" + /* LogicalTapeSet is an opaque type whose details are not known outside logtape.c. */ typedef struct LogicalTapeSet LogicalTapeSet; +/* + * The approach tuplesort.c takes to parallel external sorts is that workers, + * whose state is almost the same as independent serial sorts, are made to + * produce a final materialized tape of sorted output in all cases. This is + * frozen, just like any case requiring a final materialized tape. However, + * there is one difference, which is that freezing will also export an + * underlying shared fileset BufFile for sharing. Freezing produces TapeShare + * metadata for the worker when this happens, which is passed along through + * shared memory to leader. + * + * The leader process can then pass an array of TapeShare metadata (one per + * worker participant) to LogicalTapeSetCreate(), alongside a handle to a + * shared fileset, which is sufficient to construct a new logical tapeset that + * consists of each of the tapes materialized by workers. + * + * Note that while logtape.c does create an empty leader tape at the end of the + * tapeset in the leader case, it can never be written to due to a restriction + * in the shared buffile infrastructure. + */ +typedef struct TapeShare +{ + /* + * Currently, all the leader process needs is the location of the + * materialized tape's first block. + */ + long firstblocknumber; +} TapeShare; + /* * prototypes for functions in logtape.c */ -extern LogicalTapeSet *LogicalTapeSetCreate(int ntapes); +extern LogicalTapeSet *LogicalTapeSetCreate(int ntapes, TapeShare *shared, + SharedFileSet *fileset, int worker); extern void LogicalTapeSetClose(LogicalTapeSet *lts); extern void LogicalTapeSetForgetFreeSpace(LogicalTapeSet *lts); extern size_t LogicalTapeRead(LogicalTapeSet *lts, int tapenum, @@ -34,7 +65,8 @@ extern void LogicalTapeWrite(LogicalTapeSet *lts, int tapenum, extern void LogicalTapeRewindForRead(LogicalTapeSet *lts, int tapenum, size_t buffer_size); extern void LogicalTapeRewindForWrite(LogicalTapeSet *lts, int tapenum); -extern void LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum); +extern void LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum, + TapeShare *share); extern size_t LogicalTapeBackspace(LogicalTapeSet *lts, int tapenum, size_t size); extern void LogicalTapeSeek(LogicalTapeSet *lts, int tapenum, diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h index 07208b56ce..ff1705ad2b 100644 --- a/src/include/utils/lsyscache.h +++ b/src/include/utils/lsyscache.h @@ -3,7 +3,7 @@ * lsyscache.h * Convenience routines for common queries in the system catalog cache. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/lsyscache.h @@ -83,12 +83,9 @@ extern List *get_op_btree_interpretation(Oid opno); extern bool equality_ops_are_compatible(Oid opno1, Oid opno2); extern Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum); -extern char *get_attname(Oid relid, AttrNumber attnum); -extern char *get_relid_attribute_name(Oid relid, AttrNumber attnum); +extern char *get_attname(Oid relid, AttrNumber attnum, bool missing_ok); extern AttrNumber get_attnum(Oid relid, const char *attname); -extern char get_attidentity(Oid relid, AttrNumber attnum); extern Oid get_atttype(Oid relid, AttrNumber attnum); -extern int32 get_atttypmod(Oid relid, AttrNumber attnum); extern void get_atttypetypmodcoll(Oid relid, AttrNumber attnum, Oid *typid, int32 *typmod, Oid *collid); extern char *get_collation_name(Oid colloid); @@ -96,6 +93,8 @@ extern char *get_constraint_name(Oid conoid); extern char *get_language_name(Oid langoid, bool missing_ok); extern Oid get_opclass_family(Oid opclass); extern Oid get_opclass_input_type(Oid opclass); +extern bool get_opclass_opfamily_and_input_type(Oid opclass, + Oid *opfamily, Oid *opcintype); extern RegProcedure get_opcode(Oid opno); extern char *get_opname(Oid opno); extern Oid get_op_rettype(Oid opno); @@ -118,6 +117,7 @@ extern bool get_func_retset(Oid funcid); extern bool func_strict(Oid funcid); extern char func_volatile(Oid funcid); extern char func_parallel(Oid funcid); +extern char get_func_prokind(Oid funcid); extern bool get_func_leakproof(Oid funcid); extern float4 get_func_cost(Oid funcid); extern float4 get_func_rows(Oid funcid); @@ -126,6 +126,7 @@ extern char *get_rel_name(Oid relid); extern Oid get_rel_namespace(Oid relid); extern Oid get_rel_type_id(Oid relid); extern char get_rel_relkind(Oid relid); +extern bool get_rel_relispartition(Oid relid); extern Oid get_rel_tablespace(Oid relid); extern char get_rel_persistence(Oid relid); extern Oid get_transform_fromsql(Oid typid, Oid langid, List *trftypes); @@ -176,6 +177,7 @@ extern void free_attstatsslot(AttStatsSlot *sslot); extern char *get_namespace_name(Oid nspid); extern char *get_namespace_name_or_temp(Oid nspid); extern Oid get_range_subtype(Oid rangeOid); +extern Oid get_index_column_opclass(Oid index_oid, int attno); #define type_is_array(typid) (get_element_type(typid) != InvalidOid) /* type_is_array_domain accepts both plain arrays and domains over arrays */ diff --git a/src/include/utils/memdebug.h b/src/include/utils/memdebug.h index a73d505be8..c2c58b9cdd 100644 --- a/src/include/utils/memdebug.h +++ b/src/include/utils/memdebug.h @@ -7,7 +7,7 @@ * empty definitions for Valgrind client request macros we use. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/memdebug.h diff --git a/src/include/utils/memutils.h b/src/include/utils/memutils.h index c553349066..d68010f977 100644 --- a/src/include/utils/memutils.h +++ b/src/include/utils/memutils.h @@ -7,7 +7,7 @@ * of the API of the memory management subsystem. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/memutils.h @@ -41,7 +41,7 @@ #define AllocSizeIsValid(size) ((Size) (size) <= MaxAllocSize) -#define MaxAllocHugeSize ((Size) -1 >> 1) /* SIZE_MAX / 2 */ +#define MaxAllocHugeSize (SIZE_MAX / 2) #define AllocHugeSizeIsValid(size) ((Size) (size) <= MaxAllocHugeSize) @@ -76,6 +76,7 @@ extern void MemoryContextDelete(MemoryContext context); extern void MemoryContextResetOnly(MemoryContext context); extern void MemoryContextResetChildren(MemoryContext context); extern void MemoryContextDeleteChildren(MemoryContext context); +extern void MemoryContextSetIdentifier(MemoryContext context, const char *id); extern void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent); extern Size GetMemoryChunkSpace(void *pointer); @@ -91,6 +92,10 @@ extern void MemoryContextCheck(MemoryContext context); #endif extern bool MemoryContextContains(MemoryContext context, void *pointer); +/* Handy macro for copying and assigning context ID ... but note double eval */ +#define MemoryContextCopyAndSetIdentifier(cxt, id) \ + MemoryContextSetIdentifier(cxt, MemoryContextStrdup(cxt, id)) + /* * GetMemoryChunkContext * Given a currently-allocated chunk, determine the context @@ -132,8 +137,9 @@ GetMemoryChunkContext(void *pointer) * context creation. It's intended to be called from context-type- * specific creation routines, and noplace else. */ -extern MemoryContext MemoryContextCreate(NodeTag tag, Size size, - MemoryContextMethods *methods, +extern void MemoryContextCreate(MemoryContext node, + NodeTag tag, + const MemoryContextMethods *methods, MemoryContext parent, const char *name); @@ -143,11 +149,26 @@ extern MemoryContext MemoryContextCreate(NodeTag tag, Size size, */ /* aset.c */ -extern MemoryContext AllocSetContextCreate(MemoryContext parent, - const char *name, - Size minContextSize, - Size initBlockSize, - Size maxBlockSize); +extern MemoryContext AllocSetContextCreateInternal(MemoryContext parent, + const char *name, + Size minContextSize, + Size initBlockSize, + Size maxBlockSize); + +/* + * This wrapper macro exists to check for non-constant strings used as context + * names; that's no longer supported. (Use MemoryContextSetIdentifier if you + * want to provide a variable identifier.) + */ +#ifdef HAVE__BUILTIN_CONSTANT_P +#define AllocSetContextCreate(parent, name, ...) \ + (StaticAssertExpr(__builtin_constant_p(name), \ + "memory context names must be constant strings"), \ + AllocSetContextCreateInternal(parent, name, __VA_ARGS__)) +#else +#define AllocSetContextCreate \ + AllocSetContextCreateInternal +#endif /* slab.c */ extern MemoryContext SlabContextCreate(MemoryContext parent, @@ -155,6 +176,11 @@ extern MemoryContext SlabContextCreate(MemoryContext parent, Size blockSize, Size chunkSize); +/* generation.c */ +extern MemoryContext GenerationContextCreate(MemoryContext parent, + const char *name, + Size blockSize); + /* * Recommended default alloc parameters, suitable for "ordinary" contexts * that might hold quite a lot of data. diff --git a/src/include/utils/nabstime.h b/src/include/utils/nabstime.h deleted file mode 100644 index 69133952d1..0000000000 --- a/src/include/utils/nabstime.h +++ /dev/null @@ -1,103 +0,0 @@ -/*------------------------------------------------------------------------- - * - * nabstime.h - * Definitions for the "new" abstime code. - * - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/utils/nabstime.h - * - *------------------------------------------------------------------------- - */ -#ifndef NABSTIME_H -#define NABSTIME_H - -#include - -#include "fmgr.h" -#include "pgtime.h" - - -/* ---------------------------------------------------------------- - * - * time types + support macros - * - * ---------------------------------------------------------------- - */ - -/* - * Although time_t generally is a long int on 64 bit systems, these two - * types must be 4 bytes, because that's what pg_type.h assumes. They - * should be yanked (long) before 2038 and be replaced by timestamp and - * interval. - */ -typedef int32 AbsoluteTime; -typedef int32 RelativeTime; - -typedef struct -{ - int32 status; - AbsoluteTime data[2]; -} TimeIntervalData; - -typedef TimeIntervalData *TimeInterval; - -/* - * Macros for fmgr-callable functions. - */ -#define DatumGetAbsoluteTime(X) ((AbsoluteTime) DatumGetInt32(X)) -#define DatumGetRelativeTime(X) ((RelativeTime) DatumGetInt32(X)) -#define DatumGetTimeInterval(X) ((TimeInterval) DatumGetPointer(X)) - -#define AbsoluteTimeGetDatum(X) Int32GetDatum(X) -#define RelativeTimeGetDatum(X) Int32GetDatum(X) -#define TimeIntervalGetDatum(X) PointerGetDatum(X) - -#define PG_GETARG_ABSOLUTETIME(n) DatumGetAbsoluteTime(PG_GETARG_DATUM(n)) -#define PG_GETARG_RELATIVETIME(n) DatumGetRelativeTime(PG_GETARG_DATUM(n)) -#define PG_GETARG_TIMEINTERVAL(n) DatumGetTimeInterval(PG_GETARG_DATUM(n)) - -#define PG_RETURN_ABSOLUTETIME(x) return AbsoluteTimeGetDatum(x) -#define PG_RETURN_RELATIVETIME(x) return RelativeTimeGetDatum(x) -#define PG_RETURN_TIMEINTERVAL(x) return TimeIntervalGetDatum(x) - -/* - * Reserved values - * Epoch is Unix system time zero, but needs to be kept as a reserved - * value rather than converting to time since timezone calculations - * might move it away from 1970-01-01 00:00:00Z - tgl 97/02/20 - * - * Pre-v6.1 code had large decimal numbers for reserved values. - * These were chosen as special 32-bit bit patterns, - * so redefine them explicitly using these bit patterns. - tgl 97/02/24 - */ -#define INVALID_ABSTIME ((AbsoluteTime) 0x7FFFFFFE) /* 2147483647 (2^31 - 1) */ -#define NOEND_ABSTIME ((AbsoluteTime) 0x7FFFFFFC) /* 2147483645 (2^31 - 3) */ -#define NOSTART_ABSTIME ((AbsoluteTime) INT_MIN) /* -2147483648 */ - -#define INVALID_RELTIME ((RelativeTime) 0x7FFFFFFE) /* 2147483647 (2^31 - 1) */ - -#define AbsoluteTimeIsValid(time) \ - ((bool) ((time) != INVALID_ABSTIME)) - -/* - * Because NOSTART_ABSTIME is defined as INT_MIN, there can't be any - * AbsoluteTime values less than it. Therefore, we can code the test - * "time > NOSTART_ABSTIME" as "time != NOSTART_ABSTIME", which avoids - * compiler bugs on some platforms. --- tgl & az, 11/2000 - */ -#define AbsoluteTimeIsReal(time) \ - ((bool) (((AbsoluteTime) (time)) < NOEND_ABSTIME && \ - ((AbsoluteTime) (time)) != NOSTART_ABSTIME)) - -#define RelativeTimeIsValid(time) \ - ((bool) (((RelativeTime) (time)) != INVALID_RELTIME)) - - -/* non-fmgr-callable support routines */ -extern AbsoluteTime GetCurrentAbsoluteTime(void); -extern void abstime2tm(AbsoluteTime time, int *tzp, struct pg_tm *tm, char **tzn); - -#endif /* NABSTIME_H */ diff --git a/src/include/utils/numeric.h b/src/include/utils/numeric.h index 3aa7fef947..cd8da8bdc2 100644 --- a/src/include/utils/numeric.h +++ b/src/include/utils/numeric.h @@ -5,7 +5,7 @@ * * Original coding 1998, Jan Wieck. Heavily revised 2003, Tom Lane. * - * Copyright (c) 1998-2017, PostgreSQL Global Development Group + * Copyright (c) 1998-2018, PostgreSQL Global Development Group * * src/include/utils/numeric.h * diff --git a/src/include/utils/palloc.h b/src/include/utils/palloc.h index a7dc837724..781e948f69 100644 --- a/src/include/utils/palloc.h +++ b/src/include/utils/palloc.h @@ -18,7 +18,7 @@ * everything that should be freed. See utils/mmgr/README for more info. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/palloc.h diff --git a/src/include/utils/partcache.h b/src/include/utils/partcache.h new file mode 100644 index 0000000000..873c60fafd --- /dev/null +++ b/src/include/utils/partcache.h @@ -0,0 +1,96 @@ +/*------------------------------------------------------------------------- + * + * partcache.h + * + * Copyright (c) 1996-2018, PostgreSQL Global Development Group + * + * src/include/utils/partcache.h + * + *------------------------------------------------------------------------- + */ +#ifndef PARTCACHE_H +#define PARTCACHE_H + +#include "access/attnum.h" +#include "fmgr.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "partitioning/partdefs.h" +#include "utils/relcache.h" + +/* + * Information about the partition key of a relation + */ +typedef struct PartitionKeyData +{ + char strategy; /* partitioning strategy */ + int16 partnatts; /* number of columns in the partition key */ + AttrNumber *partattrs; /* attribute numbers of columns in the + * partition key */ + List *partexprs; /* list of expressions in the partitioning + * key, or NIL */ + + Oid *partopfamily; /* OIDs of operator families */ + Oid *partopcintype; /* OIDs of opclass declared input data types */ + FmgrInfo *partsupfunc; /* lookup info for support funcs */ + + /* Partitioning collation per attribute */ + Oid *partcollation; + + /* Type information per attribute */ + Oid *parttypid; + int32 *parttypmod; + int16 *parttyplen; + bool *parttypbyval; + char *parttypalign; + Oid *parttypcoll; +} PartitionKeyData; + +extern void RelationBuildPartitionKey(Relation relation); +extern void RelationBuildPartitionDesc(Relation rel); +extern List *RelationGetPartitionQual(Relation rel); +extern Expr *get_partition_qual_relid(Oid relid); + +/* + * PartitionKey inquiry functions + */ +static inline int +get_partition_strategy(PartitionKey key) +{ + return key->strategy; +} + +static inline int +get_partition_natts(PartitionKey key) +{ + return key->partnatts; +} + +static inline List * +get_partition_exprs(PartitionKey key) +{ + return key->partexprs; +} + +/* + * PartitionKey inquiry functions - one column + */ +static inline int16 +get_partition_col_attnum(PartitionKey key, int col) +{ + return key->partattrs[col]; +} + +static inline Oid +get_partition_col_typid(PartitionKey key, int col) +{ + return key->parttypid[col]; +} + +static inline int32 +get_partition_col_typmod(PartitionKey key, int col) +{ + return key->parttypmod[col]; +} + +#endif /* PARTCACHE_H */ diff --git a/src/include/utils/pg_crc.h b/src/include/utils/pg_crc.h index 9ea0622321..48bca045c2 100644 --- a/src/include/utils/pg_crc.h +++ b/src/include/utils/pg_crc.h @@ -26,7 +26,7 @@ * * The CRC-32C variant is in port/pg_crc32c.h. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/pg_crc.h diff --git a/src/include/utils/pg_locale.h b/src/include/utils/pg_locale.h index f3e04d4d8c..88a3134862 100644 --- a/src/include/utils/pg_locale.h +++ b/src/include/utils/pg_locale.h @@ -4,7 +4,7 @@ * * src/include/utils/pg_locale.h * - * Copyright (c) 2002-2017, PostgreSQL Global Development Group + * Copyright (c) 2002-2018, PostgreSQL Global Development Group * *----------------------------------------------------------------------- */ @@ -110,11 +110,9 @@ extern int32_t icu_from_uchar(char **result, const UChar *buff_uchar, int32_t le #endif /* These functions convert from/to libc's wchar_t, *not* pg_wchar_t */ -#ifdef USE_WIDE_UPPER_LOWER extern size_t wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale); extern size_t char2wchar(wchar_t *to, size_t tolen, const char *from, size_t fromlen, pg_locale_t locale); -#endif #endif /* _PG_LOCALE_ */ diff --git a/src/include/utils/pg_lsn.h b/src/include/utils/pg_lsn.h index cc51b2a078..0db478a259 100644 --- a/src/include/utils/pg_lsn.h +++ b/src/include/utils/pg_lsn.h @@ -5,7 +5,7 @@ * PostgreSQL. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/pg_lsn.h diff --git a/src/include/utils/pg_rusage.h b/src/include/utils/pg_rusage.h index cea51f0cb2..5768caab81 100644 --- a/src/include/utils/pg_rusage.h +++ b/src/include/utils/pg_rusage.h @@ -4,7 +4,7 @@ * header file for resource usage measurement support routines * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/pg_rusage.h diff --git a/src/include/utils/pidfile.h b/src/include/utils/pidfile.h index c3db4c46e3..d3c47aea42 100644 --- a/src/include/utils/pidfile.h +++ b/src/include/utils/pidfile.h @@ -3,7 +3,7 @@ * pidfile.h * Declarations describing the data directory lock file (postmaster.pid) * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/pidfile.h diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h index 87fab19f3c..5fc7903a06 100644 --- a/src/include/utils/plancache.h +++ b/src/include/utils/plancache.h @@ -5,7 +5,7 @@ * * See plancache.c for comments. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/plancache.h @@ -182,4 +182,15 @@ extern CachedPlan *GetCachedPlan(CachedPlanSource *plansource, QueryEnvironment *queryEnv); extern void ReleaseCachedPlan(CachedPlan *plan, bool useResOwner); +/* possible values for plan_cache_mode */ +typedef enum +{ + PLAN_CACHE_MODE_AUTO, + PLAN_CACHE_MODE_FORCE_GENERIC_PLAN, + PLAN_CACHE_MODE_FORCE_CUSTOM_PLAN +} PlanCacheMode; + +/* GUC parameter */ +extern int plan_cache_mode; + #endif /* PLANCACHE_H */ diff --git a/src/include/utils/portal.h b/src/include/utils/portal.h index cb6f00081d..e4929b936e 100644 --- a/src/include/utils/portal.h +++ b/src/include/utils/portal.h @@ -36,7 +36,7 @@ * to look like NO SCROLL cursors. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/portal.h @@ -116,7 +116,7 @@ typedef struct PortalData /* Bookkeeping data */ const char *name; /* portal's name */ const char *prepStmtName; /* source prepared statement (NULL if none) */ - MemoryContext heap; /* subsidiary memory for portal */ + MemoryContext portalContext; /* subsidiary memory for portal */ ResourceOwner resowner; /* resources owned by portal */ void (*cleanup) (Portal portal); /* cleanup hook */ @@ -147,6 +147,8 @@ typedef struct PortalData /* Status data */ PortalStatus status; /* see above */ bool portalPinned; /* a pinned portal can't be dropped */ + bool autoHeld; /* was automatically converted from pinned to + * held (see HoldPinnedPortals()) */ /* If not NULL, Executor is active; call ExecutorEnd eventually: */ QueryDesc *queryDesc; /* info needed for executor invocation */ @@ -198,18 +200,13 @@ typedef struct PortalData */ #define PortalIsValid(p) PointerIsValid(p) -/* - * Access macros for Portal ... use these in preference to field access. - */ -#define PortalGetQueryDesc(portal) ((portal)->queryDesc) -#define PortalGetHeapMemory(portal) ((portal)->heap) - /* Prototypes for functions in utils/mmgr/portalmem.c */ extern void EnablePortalManager(void); extern bool PreCommit_Portals(bool isPrepare); extern void AtAbort_Portals(void); extern void AtCleanup_Portals(void); +extern void PortalErrorCleanup(void); extern void AtSubCommit_Portals(SubTransactionId mySubid, SubTransactionId parentSubid, ResourceOwner parentXactOwner); @@ -237,5 +234,6 @@ extern PlannedStmt *PortalGetPrimaryStmt(Portal portal); extern void PortalCreateHoldStore(Portal portal); extern void PortalHashTableDeleteAll(void); extern bool ThereAreNoReadyPortals(void); +extern void HoldPinnedPortals(void); #endif /* PORTAL_H */ diff --git a/src/include/utils/queryenvironment.h b/src/include/utils/queryenvironment.h index 08e6051b4e..34fde6401f 100644 --- a/src/include/utils/queryenvironment.h +++ b/src/include/utils/queryenvironment.h @@ -4,7 +4,7 @@ * Access to functions to mutate the query environment and retrieve the * actual data related to entries (if any). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/queryenvironment.h diff --git a/src/include/utils/rangetypes.h b/src/include/utils/rangetypes.h index 5544889317..83e94e005b 100644 --- a/src/include/utils/rangetypes.h +++ b/src/include/utils/rangetypes.h @@ -4,7 +4,7 @@ * Declarations for Postgres range types. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/rangetypes.h @@ -68,12 +68,12 @@ typedef struct /* * fmgr macros for range type objects */ -#define DatumGetRangeType(X) ((RangeType *) PG_DETOAST_DATUM(X)) -#define DatumGetRangeTypeCopy(X) ((RangeType *) PG_DETOAST_DATUM_COPY(X)) -#define RangeTypeGetDatum(X) PointerGetDatum(X) -#define PG_GETARG_RANGE(n) DatumGetRangeType(PG_GETARG_DATUM(n)) -#define PG_GETARG_RANGE_COPY(n) DatumGetRangeTypeCopy(PG_GETARG_DATUM(n)) -#define PG_RETURN_RANGE(x) return RangeTypeGetDatum(x) +#define DatumGetRangeTypeP(X) ((RangeType *) PG_DETOAST_DATUM(X)) +#define DatumGetRangeTypePCopy(X) ((RangeType *) PG_DETOAST_DATUM_COPY(X)) +#define RangeTypePGetDatum(X) PointerGetDatum(X) +#define PG_GETARG_RANGE_P(n) DatumGetRangeTypeP(PG_GETARG_DATUM(n)) +#define PG_GETARG_RANGE_P_COPY(n) DatumGetRangeTypePCopy(PG_GETARG_DATUM(n)) +#define PG_RETURN_RANGE_P(x) return RangeTypePGetDatum(x) /* Operator strategy numbers used in the GiST and SP-GiST range opclasses */ /* Numbers are chosen to match up operator names with existing usages */ diff --git a/src/include/utils/regproc.h b/src/include/utils/regproc.h index ba46bd7d58..5b9a8cbee8 100644 --- a/src/include/utils/regproc.h +++ b/src/include/utils/regproc.h @@ -3,7 +3,7 @@ * regproc.h * Functions for the built-in types regproc, regclass, regtype, etc. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/regproc.h diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h index 4bc61e5380..84469f5715 100644 --- a/src/include/utils/rel.h +++ b/src/include/utils/rel.h @@ -4,7 +4,7 @@ * POSTGRES relation descriptor (a/k/a relcache entry) definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/rel.h @@ -46,36 +46,6 @@ typedef struct LockInfoData typedef LockInfoData *LockInfo; -/* - * Information about the partition key of a relation - */ -typedef struct PartitionKeyData -{ - char strategy; /* partitioning strategy */ - int16 partnatts; /* number of columns in the partition key */ - AttrNumber *partattrs; /* attribute numbers of columns in the - * partition key */ - List *partexprs; /* list of expressions in the partitioning - * key, or NIL */ - - Oid *partopfamily; /* OIDs of operator families */ - Oid *partopcintype; /* OIDs of opclass declared input data types */ - FmgrInfo *partsupfunc; /* lookup info for support funcs */ - - /* Partitioning collation per attribute */ - Oid *partcollation; - - /* Type information per attribute */ - Oid *parttypid; - int32 *parttypmod; - int16 *parttyplen; - bool *parttypbyval; - char *parttypalign; - Oid *parttypcoll; -} PartitionKeyData; - -typedef struct PartitionKeyData *PartitionKey; - /* * Here are the contents of a relation cache entry. */ @@ -141,10 +111,12 @@ typedef struct RelationData List *rd_statlist; /* list of OIDs of extended stats */ /* data managed by RelationGetIndexAttrBitmap: */ - Bitmapset *rd_indexattr; /* identifies columns used in indexes */ + Bitmapset *rd_indexattr; /* columns used in non-projection indexes */ + Bitmapset *rd_projindexattr; /* columns used in projection indexes */ Bitmapset *rd_keyattr; /* cols that can be ref'd by foreign keys */ Bitmapset *rd_pkattr; /* cols included in primary key */ Bitmapset *rd_idattr; /* included in replica identity index */ + Bitmapset *rd_projidx; /* Oids of projection indexes */ PublicationActions *rd_pubactions; /* publication actions */ @@ -230,12 +202,13 @@ typedef struct RelationData * The per-FK-column arrays can be fixed-size because we allow at most * INDEX_MAX_KEYS columns in a foreign key constraint. * - * Currently, we only cache fields of interest to the planner, but the - * set of fields could be expanded in future. + * Currently, we mostly cache fields of interest to the planner, but the set + * of fields has already grown the constraint OID for other uses. */ typedef struct ForeignKeyCacheInfo { NodeTag type; + Oid conoid; /* oid of the constraint itself */ Oid conrelid; /* relation constrained by the foreign key */ Oid confrelid; /* relation referenced by the foreign key */ int nkeys; /* number of columns in the foreign key */ @@ -245,6 +218,14 @@ typedef struct ForeignKeyCacheInfo Oid conpfeqop[INDEX_MAX_KEYS]; /* PK = FK operator OIDs */ } ForeignKeyCacheInfo; +/* + * Options common for all indexes + */ +typedef struct GenericIndexOpts +{ + int32 vl_len_; + bool recheck_on_update; +} GenericIndexOpts; /* * StdRdOptions @@ -277,6 +258,9 @@ typedef struct StdRdOptions { int32 vl_len_; /* varlena header (do not touch directly!) */ int fillfactor; /* page fill factor in percent (0..100) */ + /* fraction of newly inserted tuples prior to trigger index cleanup */ + float8 vacuum_cleanup_index_scale_factor; + int toast_tuple_target; /* target for tuple toasting */ AutoVacOpts autovacuum; /* autovacuum-related options */ bool user_catalog_table; /* use as an additional catalog relation */ int parallel_workers; /* max number of parallel workers */ @@ -285,6 +269,14 @@ typedef struct StdRdOptions #define HEAP_MIN_FILLFACTOR 10 #define HEAP_DEFAULT_FILLFACTOR 100 +/* + * RelationGetToastTupleTarget + * Returns the relation's toast_tuple_target. Note multiple eval of argument! + */ +#define RelationGetToastTupleTarget(relation, defaulttarg) \ + ((relation)->rd_options ? \ + ((StdRdOptions *) (relation)->rd_options)->toast_tuple_target : (defaulttarg)) + /* * RelationGetFillFactor * Returns the relation's fillfactor. Note multiple eval of argument! @@ -417,10 +409,24 @@ typedef struct ViewOptions /* * RelationGetNumberOfAttributes - * Returns the number of attributes in a relation. + * Returns the total number of attributes in a relation. */ #define RelationGetNumberOfAttributes(relation) ((relation)->rd_rel->relnatts) +/* + * IndexRelationGetNumberOfAttributes + * Returns the number of attributes in an index. + */ +#define IndexRelationGetNumberOfAttributes(relation) \ + ((relation)->rd_index->indnatts) + +/* + * IndexRelationGetNumberOfKeyAttributes + * Returns the number of key attributes in an index. + */ +#define IndexRelationGetNumberOfKeyAttributes(relation) \ + ((relation)->rd_index->indnkeyatts) + /* * RelationGetDescr * Returns tuple descriptor for a relation. @@ -583,48 +589,6 @@ typedef struct ViewOptions */ #define RelationGetPartitionKey(relation) ((relation)->rd_partkey) -/* - * PartitionKey inquiry functions - */ -static inline int -get_partition_strategy(PartitionKey key) -{ - return key->strategy; -} - -static inline int -get_partition_natts(PartitionKey key) -{ - return key->partnatts; -} - -static inline List * -get_partition_exprs(PartitionKey key) -{ - return key->partexprs; -} - -/* - * PartitionKey inquiry functions - one column - */ -static inline int16 -get_partition_col_attnum(PartitionKey key, int col) -{ - return key->partattrs[col]; -} - -static inline Oid -get_partition_col_typid(PartitionKey key, int col) -{ - return key->parttypid[col]; -} - -static inline int32 -get_partition_col_typmod(PartitionKey key, int col) -{ - return key->parttypmod[col]; -} - /* * RelationGetPartitionDesc * Returns partition descriptor for a relation. diff --git a/src/include/utils/relcache.h b/src/include/utils/relcache.h index 3c53cefe4b..dbbf41b0c1 100644 --- a/src/include/utils/relcache.h +++ b/src/include/utils/relcache.h @@ -4,7 +4,7 @@ * Relation descriptor cache definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/relcache.h @@ -18,6 +18,11 @@ #include "nodes/bitmapset.h" +/* + * Name of relcache init file(s), used to speed up backend startup + */ +#define RELCACHE_INIT_FILENAME "pg_internal.init" + typedef struct RelationData *Relation; /* ---------------- @@ -48,7 +53,8 @@ extern List *RelationGetIndexPredicate(Relation relation); typedef enum IndexAttrBitmapKind { - INDEX_ATTR_BITMAP_ALL, + INDEX_ATTR_BITMAP_HOT, + INDEX_ATTR_BITMAP_PROJ, INDEX_ATTR_BITMAP_KEY, INDEX_ATTR_BITMAP_PRIMARY_KEY, INDEX_ATTR_BITMAP_IDENTITY_KEY diff --git a/src/include/utils/relfilenodemap.h b/src/include/utils/relfilenodemap.h index b3ee555fb7..e79115d34f 100644 --- a/src/include/utils/relfilenodemap.h +++ b/src/include/utils/relfilenodemap.h @@ -3,7 +3,7 @@ * relfilenodemap.h * relfilenode to oid mapping cache. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/relfilenodemap.h diff --git a/src/include/utils/relmapper.h b/src/include/utils/relmapper.h index 7af69ba6cf..fb51943498 100644 --- a/src/include/utils/relmapper.h +++ b/src/include/utils/relmapper.h @@ -4,7 +4,7 @@ * Catalog-to-filenode mapping * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/relmapper.h @@ -48,7 +48,7 @@ extern void RelationMapInvalidate(bool shared); extern void RelationMapInvalidateAll(void); extern void AtCCI_RelationMap(void); -extern void AtEOXact_RelationMap(bool isCommit); +extern void AtEOXact_RelationMap(bool isCommit, bool isParallelWorker); extern void AtPrepare_RelationMap(void); extern void CheckPointRelationMap(void); @@ -59,6 +59,10 @@ extern void RelationMapInitialize(void); extern void RelationMapInitializePhase2(void); extern void RelationMapInitializePhase3(void); +extern Size EstimateRelationMapSpace(void); +extern void SerializeRelationMap(Size maxSize, char *startAddress); +extern void RestoreRelationMap(char *startAddress); + extern void relmap_redo(XLogReaderState *record); extern void relmap_desc(StringInfo buf, XLogReaderState *record); extern const char *relmap_identify(uint8 info); diff --git a/src/include/utils/relptr.h b/src/include/utils/relptr.h index 06e592e0c6..4b74571e91 100644 --- a/src/include/utils/relptr.h +++ b/src/include/utils/relptr.h @@ -3,7 +3,7 @@ * relptr.h * This file contains basic declarations for relative pointers. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/relptr.h diff --git a/src/include/utils/reltrigger.h b/src/include/utils/reltrigger.h index 2169b0306b..9b4dc7f810 100644 --- a/src/include/utils/reltrigger.h +++ b/src/include/utils/reltrigger.h @@ -4,7 +4,7 @@ * POSTGRES relation trigger definitions. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/reltrigger.h diff --git a/src/include/utils/resowner.h b/src/include/utils/resowner.h index 07d30d93bc..fa03942b6c 100644 --- a/src/include/utils/resowner.h +++ b/src/include/utils/resowner.h @@ -9,7 +9,7 @@ * See utils/resowner/README for more info. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/resowner.h @@ -33,6 +33,7 @@ typedef struct ResourceOwnerData *ResourceOwner; extern PGDLLIMPORT ResourceOwner CurrentResourceOwner; extern PGDLLIMPORT ResourceOwner CurTransactionResourceOwner; extern PGDLLIMPORT ResourceOwner TopTransactionResourceOwner; +extern PGDLLIMPORT ResourceOwner AuxProcessResourceOwner; /* * Resource releasing is done in three phases: pre-locks, locks, and @@ -78,5 +79,7 @@ extern void RegisterResourceReleaseCallback(ResourceReleaseCallback callback, void *arg); extern void UnregisterResourceReleaseCallback(ResourceReleaseCallback callback, void *arg); +extern void CreateAuxProcessResourceOwner(void); +extern void ReleaseAuxProcessResources(bool isCommit); #endif /* RESOWNER_H */ diff --git a/src/include/utils/resowner_private.h b/src/include/utils/resowner_private.h index 2420b651b3..a6e8eb71ab 100644 --- a/src/include/utils/resowner_private.h +++ b/src/include/utils/resowner_private.h @@ -6,7 +6,7 @@ * See utils/resowner/README for more info. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/resowner_private.h @@ -88,4 +88,11 @@ extern void ResourceOwnerRememberDSM(ResourceOwner owner, extern void ResourceOwnerForgetDSM(ResourceOwner owner, dsm_segment *); +/* support for JITContext management */ +extern void ResourceOwnerEnlargeJIT(ResourceOwner owner); +extern void ResourceOwnerRememberJIT(ResourceOwner owner, + Datum handle); +extern void ResourceOwnerForgetJIT(ResourceOwner owner, + Datum handle); + #endif /* RESOWNER_PRIVATE_H */ diff --git a/src/include/utils/rls.h b/src/include/utils/rls.h index f9780ad0c0..f3b4858333 100644 --- a/src/include/utils/rls.h +++ b/src/include/utils/rls.h @@ -4,7 +4,7 @@ * Header file for Row Level Security (RLS) utility commands to be used * with the rowsecurity feature. * - * Copyright (c) 2007-2017, PostgreSQL Global Development Group + * Copyright (c) 2007-2018, PostgreSQL Global Development Group * * src/include/utils/rls.h * diff --git a/src/include/utils/ruleutils.h b/src/include/utils/ruleutils.h index e9c5193855..9f9b029ab8 100644 --- a/src/include/utils/ruleutils.h +++ b/src/include/utils/ruleutils.h @@ -3,7 +3,7 @@ * ruleutils.h * Declarations for ruleutils.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/ruleutils.h diff --git a/src/include/utils/sampling.h b/src/include/utils/sampling.h index f566e0b866..c82148515d 100644 --- a/src/include/utils/sampling.h +++ b/src/include/utils/sampling.h @@ -3,7 +3,7 @@ * sampling.h * definitions for sampling functions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/sampling.h diff --git a/src/include/utils/selfuncs.h b/src/include/utils/selfuncs.h index dc6069d435..95e44280c4 100644 --- a/src/include/utils/selfuncs.h +++ b/src/include/utils/selfuncs.h @@ -5,7 +5,7 @@ * infrastructure for selectivity and cost estimation. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/selfuncs.h @@ -81,14 +81,17 @@ typedef struct VariableStatData #define ReleaseVariableStats(vardata) \ do { \ if (HeapTupleIsValid((vardata).statsTuple)) \ - (* (vardata).freefunc) ((vardata).statsTuple); \ + (vardata).freefunc((vardata).statsTuple); \ } while(0) typedef enum { - Pattern_Type_Like, Pattern_Type_Like_IC, - Pattern_Type_Regex, Pattern_Type_Regex_IC + Pattern_Type_Like, + Pattern_Type_Like_IC, + Pattern_Type_Regex, + Pattern_Type_Regex_IC, + Pattern_Type_Prefix } Pattern_Type; typedef enum diff --git a/src/include/utils/sharedtuplestore.h b/src/include/utils/sharedtuplestore.h new file mode 100644 index 0000000000..834773511d --- /dev/null +++ b/src/include/utils/sharedtuplestore.h @@ -0,0 +1,61 @@ +/*------------------------------------------------------------------------- + * + * sharedtuplestore.h + * Simple mechinism for sharing tuples between backends. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/utils/sharedtuplestore.h + * + *------------------------------------------------------------------------- + */ +#ifndef SHAREDTUPLESTORE_H +#define SHAREDTUPLESTORE_H + +#include "access/htup.h" +#include "storage/fd.h" +#include "storage/sharedfileset.h" + +struct SharedTuplestore; +typedef struct SharedTuplestore SharedTuplestore; + +struct SharedTuplestoreAccessor; +typedef struct SharedTuplestoreAccessor SharedTuplestoreAccessor; + +/* + * A flag indicating that the tuplestore will only be scanned once, so backing + * files can be unlinked early. + */ +#define SHARED_TUPLESTORE_SINGLE_PASS 0x01 + +extern size_t sts_estimate(int participants); + +extern SharedTuplestoreAccessor *sts_initialize(SharedTuplestore *sts, + int participants, + int my_participant_number, + size_t meta_data_size, + int flags, + SharedFileSet *fileset, + const char *name); + +extern SharedTuplestoreAccessor *sts_attach(SharedTuplestore *sts, + int my_participant_number, + SharedFileSet *fileset); + +extern void sts_end_write(SharedTuplestoreAccessor *accessor); + +extern void sts_reinitialize(SharedTuplestoreAccessor *accessor); + +extern void sts_begin_parallel_scan(SharedTuplestoreAccessor *accessor); + +extern void sts_end_parallel_scan(SharedTuplestoreAccessor *accessor); + +extern void sts_puttuple(SharedTuplestoreAccessor *accessor, + void *meta_data, + MinimalTuple tuple); + +extern MinimalTuple sts_parallel_scan_next(SharedTuplestoreAccessor *accessor, + void *meta_data); + +#endif /* SHAREDTUPLESTORE_H */ diff --git a/src/include/utils/snapmgr.h b/src/include/utils/snapmgr.h index fc64153780..83806f3040 100644 --- a/src/include/utils/snapmgr.h +++ b/src/include/utils/snapmgr.h @@ -3,7 +3,7 @@ * snapmgr.h * POSTGRES snapshot manager * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/snapmgr.h @@ -56,10 +56,10 @@ extern TimestampTz GetOldSnapshotThresholdTimestamp(void); extern bool FirstSnapshotSet; -extern TransactionId TransactionXmin; -extern TransactionId RecentXmin; +extern PGDLLIMPORT TransactionId TransactionXmin; +extern PGDLLIMPORT TransactionId RecentXmin; extern PGDLLIMPORT TransactionId RecentGlobalXmin; -extern TransactionId RecentGlobalDataXmin; +extern PGDLLIMPORT TransactionId RecentGlobalDataXmin; extern Snapshot GetTransactionSnapshot(void); extern Snapshot GetLatestSnapshot(void); diff --git a/src/include/utils/snapshot.h b/src/include/utils/snapshot.h index 074cc81864..a8a5a8f4c0 100644 --- a/src/include/utils/snapshot.h +++ b/src/include/utils/snapshot.h @@ -3,7 +3,7 @@ * snapshot.h * POSTGRES snapshot definition * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/snapshot.h @@ -41,6 +41,7 @@ typedef bool (*SnapshotSatisfiesFunc) (HeapTuple htup, * * MVCC snapshots taken during recovery (in Hot-Standby mode) * * Historic MVCC snapshots used during logical decoding * * snapshots passed to HeapTupleSatisfiesDirty() + * * snapshots passed to HeapTupleSatisfiesNonVacuumable() * * snapshots used for SatisfiesAny, Toast, Self where no members are * accessed. * @@ -56,7 +57,8 @@ typedef struct SnapshotData /* * The remaining fields are used only for MVCC snapshots, and are normally * just zeroes in special snapshots. (But xmin and xmax are used - * specially by HeapTupleSatisfiesDirty.) + * specially by HeapTupleSatisfiesDirty, and xmin is used specially by + * HeapTupleSatisfiesNonVacuumable.) * * An MVCC snapshot can never see the effects of XIDs >= xmax. It can see * the effects of all older XIDs except those listed in the snapshot. xmin diff --git a/src/include/utils/sortsupport.h b/src/include/utils/sortsupport.h index 6e8444b4ff..818e0b1843 100644 --- a/src/include/utils/sortsupport.h +++ b/src/include/utils/sortsupport.h @@ -42,7 +42,7 @@ * function for such cases, but probably not any other acceleration method. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/sortsupport.h @@ -96,8 +96,7 @@ typedef struct SortSupportData * Comparator function has the same API as the traditional btree * comparison function, ie, return <0, 0, or >0 according as x is less * than, equal to, or greater than y. Note that x and y are guaranteed - * not null, and there is no way to return null either. Do not return - * INT_MIN, as callers are allowed to negate the result before using it. + * not null, and there is no way to return null either. * * This may be either the authoritative comparator, or the abbreviated * comparator. Core code may switch this over the initial preference of @@ -222,9 +221,9 @@ ApplySortComparator(Datum datum1, bool isNull1, } else { - compare = (*ssup->comparator) (datum1, datum2, ssup); + compare = ssup->comparator(datum1, datum2, ssup); if (ssup->ssup_reverse) - compare = -compare; + INVERT_COMPARE_RESULT(compare); } return compare; @@ -260,9 +259,9 @@ ApplySortAbbrevFullComparator(Datum datum1, bool isNull1, } else { - compare = (*ssup->abbrev_full_comparator) (datum1, datum2, ssup); + compare = ssup->abbrev_full_comparator(datum1, datum2, ssup); if (ssup->ssup_reverse) - compare = -compare; + INVERT_COMPARE_RESULT(compare); } return compare; diff --git a/src/include/utils/spccache.h b/src/include/utils/spccache.h index 7c45bb11eb..2f30c982b2 100644 --- a/src/include/utils/spccache.h +++ b/src/include/utils/spccache.h @@ -3,7 +3,7 @@ * spccache.h * Tablespace cache. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/spccache.h diff --git a/src/include/utils/syscache.h b/src/include/utils/syscache.h index 8352b40f4e..4f333586ee 100644 --- a/src/include/utils/syscache.h +++ b/src/include/utils/syscache.h @@ -6,7 +6,7 @@ * See also lsyscache.h, which provides convenience routines for * common cache-lookup operations. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/syscache.h @@ -117,6 +117,20 @@ extern void InitCatalogCachePhase2(void); extern HeapTuple SearchSysCache(int cacheId, Datum key1, Datum key2, Datum key3, Datum key4); + +/* + * The use of argument specific numbers is encouraged. They're faster, and + * insulates the caller from changes in the maximum number of keys. + */ +extern HeapTuple SearchSysCache1(int cacheId, + Datum key1); +extern HeapTuple SearchSysCache2(int cacheId, + Datum key1, Datum key2); +extern HeapTuple SearchSysCache3(int cacheId, + Datum key1, Datum key2, Datum key3); +extern HeapTuple SearchSysCache4(int cacheId, + Datum key1, Datum key2, Datum key3, Datum key4); + extern void ReleaseSysCache(HeapTuple tuple); /* convenience routines */ @@ -131,6 +145,9 @@ extern HeapTuple SearchSysCacheAttName(Oid relid, const char *attname); extern HeapTuple SearchSysCacheCopyAttName(Oid relid, const char *attname); extern bool SearchSysCacheExistsAttName(Oid relid, const char *attname); +extern HeapTuple SearchSysCacheAttNum(Oid relid, int16 attnum); +extern HeapTuple SearchSysCacheCopyAttNum(Oid relid, int16 attnum); + extern Datum SysCacheGetAttr(int cacheId, HeapTuple tup, AttrNumber attributeNumber, bool *isNull); @@ -140,7 +157,7 @@ extern uint32 GetSysCacheHashValue(int cacheId, /* list-search interface. Users of this must import catcache.h too */ struct catclist; extern struct catclist *SearchSysCacheList(int cacheId, int nkeys, - Datum key1, Datum key2, Datum key3, Datum key4); + Datum key1, Datum key2, Datum key3); extern void SysCacheInvalidate(int cacheId, uint32 hashValue); @@ -153,15 +170,6 @@ extern bool RelationSupportsSysCache(Oid relid); * functions is encouraged, as it insulates the caller from changes in the * maximum number of keys. */ -#define SearchSysCache1(cacheId, key1) \ - SearchSysCache(cacheId, key1, 0, 0, 0) -#define SearchSysCache2(cacheId, key1, key2) \ - SearchSysCache(cacheId, key1, key2, 0, 0) -#define SearchSysCache3(cacheId, key1, key2, key3) \ - SearchSysCache(cacheId, key1, key2, key3, 0) -#define SearchSysCache4(cacheId, key1, key2, key3, key4) \ - SearchSysCache(cacheId, key1, key2, key3, key4) - #define SearchSysCacheCopy1(cacheId, key1) \ SearchSysCacheCopy(cacheId, key1, 0, 0, 0) #define SearchSysCacheCopy2(cacheId, key1, key2) \ @@ -199,13 +207,11 @@ extern bool RelationSupportsSysCache(Oid relid); GetSysCacheHashValue(cacheId, key1, key2, key3, key4) #define SearchSysCacheList1(cacheId, key1) \ - SearchSysCacheList(cacheId, 1, key1, 0, 0, 0) + SearchSysCacheList(cacheId, 1, key1, 0, 0) #define SearchSysCacheList2(cacheId, key1, key2) \ - SearchSysCacheList(cacheId, 2, key1, key2, 0, 0) + SearchSysCacheList(cacheId, 2, key1, key2, 0) #define SearchSysCacheList3(cacheId, key1, key2, key3) \ - SearchSysCacheList(cacheId, 3, key1, key2, key3, 0) -#define SearchSysCacheList4(cacheId, key1, key2, key3, key4) \ - SearchSysCacheList(cacheId, 4, key1, key2, key3, key4) + SearchSysCacheList(cacheId, 3, key1, key2, key3) #define ReleaseSysCacheList(x) ReleaseCatCacheList(x) diff --git a/src/include/utils/timeout.h b/src/include/utils/timeout.h index 5a2efc0dd9..dcc7307c16 100644 --- a/src/include/utils/timeout.h +++ b/src/include/utils/timeout.h @@ -4,7 +4,7 @@ * Routines to multiplex SIGALRM interrupts for multiple timeout reasons. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/timeout.h diff --git a/src/include/utils/timestamp.h b/src/include/utils/timestamp.h index 3f2d31dec8..2b3b35703e 100644 --- a/src/include/utils/timestamp.h +++ b/src/include/utils/timestamp.h @@ -3,7 +3,7 @@ * timestamp.h * Definitions for the SQL "timestamp" and "interval" types. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/timestamp.h diff --git a/src/include/utils/tqual.h b/src/include/utils/tqual.h index 036d9898d6..d3b6e99bb4 100644 --- a/src/include/utils/tqual.h +++ b/src/include/utils/tqual.h @@ -5,7 +5,7 @@ * * Should be moved/renamed... - vadim 07/28/98 * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/tqual.h @@ -66,6 +66,8 @@ extern bool HeapTupleSatisfiesToast(HeapTuple htup, Snapshot snapshot, Buffer buffer); extern bool HeapTupleSatisfiesDirty(HeapTuple htup, Snapshot snapshot, Buffer buffer); +extern bool HeapTupleSatisfiesNonVacuumable(HeapTuple htup, + Snapshot snapshot, Buffer buffer); extern bool HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot, Buffer buffer); @@ -76,6 +78,7 @@ extern HTSV_Result HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin, Buffer buffer); extern bool HeapTupleIsSurelyDead(HeapTuple htup, TransactionId OldestXmin); +extern bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot); extern void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer, uint16 infomask, TransactionId xid); @@ -100,6 +103,14 @@ extern bool ResolveCminCmaxDuringDecoding(struct HTAB *tuplecid_data, #define InitDirtySnapshot(snapshotdata) \ ((snapshotdata).satisfies = HeapTupleSatisfiesDirty) +/* + * Similarly, some initialization is required for a NonVacuumable snapshot. + * The caller must supply the xmin horizon to use (e.g., RecentGlobalXmin). + */ +#define InitNonVacuumableSnapshot(snapshotdata, xmin_horizon) \ + ((snapshotdata).satisfies = HeapTupleSatisfiesNonVacuumable, \ + (snapshotdata).xmin = (xmin_horizon)) + /* * Similarly, some initialization is required for SnapshotToast. We need * to set lsn and whenTaken correctly to support snapshot_too_old. diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h index 28c168a801..32908b6625 100644 --- a/src/include/utils/tuplesort.h +++ b/src/include/utils/tuplesort.h @@ -8,9 +8,10 @@ * if necessary). It works efficiently for both small and large amounts * of data. Small amounts are sorted in-memory using qsort(). Large * amounts are sorted using temporary files and a standard external sort - * algorithm. + * algorithm. Parallel sorts use a variant of this external sort + * algorithm, and are typically only used for large amounts of data. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/tuplesort.h @@ -23,13 +24,67 @@ #include "access/itup.h" #include "executor/tuptable.h" #include "fmgr.h" +#include "storage/dsm.h" #include "utils/relcache.h" -/* Tuplesortstate is an opaque type whose details are not known outside - * tuplesort.c. +/* + * Tuplesortstate and Sharedsort are opaque types whose details are not + * known outside tuplesort.c. */ typedef struct Tuplesortstate Tuplesortstate; +typedef struct Sharedsort Sharedsort; + +/* + * Tuplesort parallel coordination state, allocated by each participant in + * local memory. Participant caller initializes everything. See usage notes + * below. + */ +typedef struct SortCoordinateData +{ + /* Worker process? If not, must be leader. */ + bool isWorker; + + /* + * Leader-process-passed number of participants known launched (workers + * set this to -1). Includes state within leader needed for it to + * participate as a worker, if any. + */ + int nParticipants; + + /* Private opaque state (points to shared memory) */ + Sharedsort *sharedsort; +} SortCoordinateData; + +typedef struct SortCoordinateData *SortCoordinate; + +/* + * Data structures for reporting sort statistics. Note that + * TuplesortInstrumentation can't contain any pointers because we + * sometimes put it in shared memory. + */ +typedef enum +{ + SORT_TYPE_STILL_IN_PROGRESS = 0, + SORT_TYPE_TOP_N_HEAPSORT, + SORT_TYPE_QUICKSORT, + SORT_TYPE_EXTERNAL_SORT, + SORT_TYPE_EXTERNAL_MERGE +} TuplesortMethod; + +typedef enum +{ + SORT_SPACE_TYPE_DISK, + SORT_SPACE_TYPE_MEMORY +} TuplesortSpaceType; + +typedef struct TuplesortInstrumentation +{ + TuplesortMethod sortMethod; /* sort algorithm used */ + TuplesortSpaceType spaceType; /* type of space spaceUsed represents */ + long spaceUsed; /* space consumption, in kB */ +} TuplesortInstrumentation; + /* * We provide multiple interfaces to what is essentially the same code, @@ -38,6 +93,8 @@ typedef struct Tuplesortstate Tuplesortstate; * sorting HeapTuples and two more for sorting IndexTuples. Yet another * API supports sorting bare Datums. * + * Serial sort callers should pass NULL for their coordinate argument. + * * The "heap" API actually stores/sorts MinimalTuples, which means it doesn't * preserve the system columns (tuple identity and transaction visibility * info). The sort keys are specified by column numbers within the tuples @@ -56,30 +113,107 @@ typedef struct Tuplesortstate Tuplesortstate; * * The "index_hash" API is similar to index_btree, but the tuples are * actually sorted by their hash codes not the raw data. + * + * Parallel sort callers are required to coordinate multiple tuplesort states + * in a leader process and one or more worker processes. The leader process + * must launch workers, and have each perform an independent "partial" + * tuplesort, typically fed by the parallel heap interface. The leader later + * produces the final output (internally, it merges runs output by workers). + * + * Callers must do the following to perform a sort in parallel using multiple + * worker processes: + * + * 1. Request tuplesort-private shared memory for n workers. Use + * tuplesort_estimate_shared() to get the required size. + * 2. Have leader process initialize allocated shared memory using + * tuplesort_initialize_shared(). Launch workers. + * 3. Initialize a coordinate argument within both the leader process, and + * for each worker process. This has a pointer to the shared + * tuplesort-private structure, as well as some caller-initialized fields. + * Leader's coordinate argument reliably indicates number of workers + * launched (this is unused by workers). + * 4. Begin a tuplesort using some appropriate tuplesort_begin* routine, + * (passing the coordinate argument) within each worker. The workMem + * arguments need not be identical. All other arguments should match + * exactly, though. + * 5. tuplesort_attach_shared() should be called by all workers. Feed tuples + * to each worker, and call tuplesort_performsort() within each when input + * is exhausted. + * 6. Call tuplesort_end() in each worker process. Worker processes can shut + * down once tuplesort_end() returns. + * 7. Begin a tuplesort in the leader using the same tuplesort_begin* + * routine, passing a leader-appropriate coordinate argument (this can + * happen as early as during step 3, actually, since we only need to know + * the number of workers successfully launched). The leader must now wait + * for workers to finish. Caller must use own mechanism for ensuring that + * next step isn't reached until all workers have called and returned from + * tuplesort_performsort(). (Note that it's okay if workers have already + * also called tuplesort_end() by then.) + * 8. Call tuplesort_performsort() in leader. Consume output using the + * appropriate tuplesort_get* routine. Leader can skip this step if + * tuplesort turns out to be unnecessary. + * 9. Call tuplesort_end() in leader. + * + * This division of labor assumes nothing about how input tuples are produced, + * but does require that caller combine the state of multiple tuplesorts for + * any purpose other than producing the final output. For example, callers + * must consider that tuplesort_get_stats() reports on only one worker's role + * in a sort (or the leader's role), and not statistics for the sort as a + * whole. + * + * Note that callers may use the leader process to sort runs as if it was an + * independent worker process (prior to the process performing a leader sort + * to produce the final sorted output). Doing so only requires a second + * "partial" tuplesort within the leader process, initialized like that of a + * worker process. The steps above don't touch on this directly. The only + * difference is that the tuplesort_attach_shared() call is never needed within + * leader process, because the backend as a whole holds the shared fileset + * reference. A worker Tuplesortstate in leader is expected to do exactly the + * same amount of total initial processing work as a worker process + * Tuplesortstate, since the leader process has nothing else to do before + * workers finish. + * + * Note that only a very small amount of memory will be allocated prior to + * the leader state first consuming input, and that workers will free the + * vast majority of their memory upon returning from tuplesort_performsort(). + * Callers can rely on this to arrange for memory to be used in a way that + * respects a workMem-style budget across an entire parallel sort operation. + * + * Callers are responsible for parallel safety in general. However, they + * can at least rely on there being no parallel safety hazards within + * tuplesort, because tuplesort thinks of the sort as several independent + * sorts whose results are combined. Since, in general, the behavior of + * sort operators is immutable, caller need only worry about the parallel + * safety of whatever the process is through which input tuples are + * generated (typically, caller uses a parallel heap scan). */ extern Tuplesortstate *tuplesort_begin_heap(TupleDesc tupDesc, int nkeys, AttrNumber *attNums, Oid *sortOperators, Oid *sortCollations, bool *nullsFirstFlags, - int workMem, bool randomAccess); + int workMem, SortCoordinate coordinate, + bool randomAccess); extern Tuplesortstate *tuplesort_begin_cluster(TupleDesc tupDesc, - Relation indexRel, - int workMem, bool randomAccess); + Relation indexRel, int workMem, + SortCoordinate coordinate, bool randomAccess); extern Tuplesortstate *tuplesort_begin_index_btree(Relation heapRel, Relation indexRel, bool enforceUnique, - int workMem, bool randomAccess); + int workMem, SortCoordinate coordinate, + bool randomAccess); extern Tuplesortstate *tuplesort_begin_index_hash(Relation heapRel, Relation indexRel, uint32 high_mask, uint32 low_mask, uint32 max_buckets, - int workMem, bool randomAccess); + int workMem, SortCoordinate coordinate, + bool randomAccess); extern Tuplesortstate *tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, - int workMem, bool randomAccess); + int workMem, SortCoordinate coordinate, + bool randomAccess); extern void tuplesort_set_bound(Tuplesortstate *state, int64 bound); @@ -107,16 +241,22 @@ extern bool tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, extern void tuplesort_end(Tuplesortstate *state); extern void tuplesort_get_stats(Tuplesortstate *state, - const char **sortMethod, - const char **spaceType, - long *spaceUsed); + TuplesortInstrumentation *stats); +extern const char *tuplesort_method_name(TuplesortMethod m); +extern const char *tuplesort_space_type_name(TuplesortSpaceType t); extern int tuplesort_merge_order(int64 allowedMem); +extern Size tuplesort_estimate_shared(int nworkers); +extern void tuplesort_initialize_shared(Sharedsort *shared, int nWorkers, + dsm_segment *seg); +extern void tuplesort_attach_shared(Sharedsort *shared, dsm_segment *seg); + /* * These routines may only be called if randomAccess was specified 'true'. * Likewise, backwards scan in gettuple/getdatum is only allowed if - * randomAccess was specified. + * randomAccess was specified. Note that parallel sorts do not support + * randomAccess. */ extern void tuplesort_rescan(Tuplesortstate *state); diff --git a/src/include/utils/tuplestore.h b/src/include/utils/tuplestore.h index 7f4e1e318f..2c0c3f8219 100644 --- a/src/include/utils/tuplestore.h +++ b/src/include/utils/tuplestore.h @@ -21,7 +21,7 @@ * Also, we have changed the API to return tuples in TupleTableSlots, * so that there is a check to prevent attempted access to system columns. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/tuplestore.h diff --git a/src/include/utils/typcache.h b/src/include/utils/typcache.h index c12631dafe..217d064da5 100644 --- a/src/include/utils/typcache.h +++ b/src/include/utils/typcache.h @@ -6,7 +6,7 @@ * The type cache exists to speed lookup of certain information about data * types that is not directly available from a type's pg_type row. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/typcache.h @@ -18,6 +18,8 @@ #include "access/tupdesc.h" #include "fmgr.h" +#include "storage/dsm.h" +#include "utils/dsa.h" /* DomainConstraintCache is an opaque struct known only within typcache.c */ @@ -38,6 +40,7 @@ typedef struct TypeCacheEntry char typstorage; char typtype; Oid typrelid; + Oid typelem; /* * Information obtained from opfamily entries @@ -56,6 +59,7 @@ typedef struct TypeCacheEntry Oid gt_opr; /* the greater-than operator */ Oid cmp_proc; /* the btree comparison function */ Oid hash_proc; /* the hash calculation function */ + Oid hash_extended_proc; /* the extended hash calculation function */ /* * Pre-set-up fmgr call info for the equality operator, the btree @@ -67,13 +71,19 @@ typedef struct TypeCacheEntry FmgrInfo eq_opr_finfo; FmgrInfo cmp_proc_finfo; FmgrInfo hash_proc_finfo; + FmgrInfo hash_extended_proc_finfo; /* * Tuple descriptor if it's a composite type (row type). NULL if not * composite or information hasn't yet been requested. (NOTE: this is a * reference-counted tupledesc.) + * + * To simplify caching dependent info, tupDesc_identifier is an identifier + * for this tupledesc that is unique for the life of the process, and + * changes anytime the tupledesc does. Zero if not yet determined. */ TupleDesc tupDesc; + uint64 tupDesc_identifier; /* * Fields computed when TYPECACHE_RANGE_INFO is requested. Zeroes if not @@ -87,6 +97,13 @@ typedef struct TypeCacheEntry FmgrInfo rng_canonical_finfo; /* canonicalization function, if any */ FmgrInfo rng_subdiff_finfo; /* difference function, if any */ + /* + * Domain's base type and typmod if it's a domain type. Zeroes if not + * domain, or if information hasn't been requested. + */ + Oid domainBaseType; + int32 domainBaseTypmod; + /* * Domain constraint data if it's a domain type. NULL if not domain, or * if domain has no constraints, or if information hasn't been requested. @@ -119,7 +136,13 @@ typedef struct TypeCacheEntry #define TYPECACHE_BTREE_OPFAMILY 0x0200 #define TYPECACHE_HASH_OPFAMILY 0x0400 #define TYPECACHE_RANGE_INFO 0x0800 -#define TYPECACHE_DOMAIN_INFO 0x1000 +#define TYPECACHE_DOMAIN_BASE_INFO 0x1000 +#define TYPECACHE_DOMAIN_CONSTR_INFO 0x2000 +#define TYPECACHE_HASH_EXTENDED_PROC 0x4000 +#define TYPECACHE_HASH_EXTENDED_PROC_FINFO 0x8000 + +/* This value will not equal any valid tupledesc identifier, nor 0 */ +#define INVALID_TUPLEDESC_IDENTIFIER ((uint64) 1) /* * Callers wishing to maintain a long-lived reference to a domain's constraint @@ -139,6 +162,7 @@ typedef struct DomainConstraintRef MemoryContextCallback callback; /* used to release refcount when done */ } DomainConstraintRef; +typedef struct SharedRecordTypmodRegistry SharedRecordTypmodRegistry; extern TypeCacheEntry *lookup_type_cache(Oid type_id, int flags); @@ -156,8 +180,20 @@ extern TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, extern TupleDesc lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod); +extern TupleDesc lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, + bool noError); + extern void assign_record_type_typmod(TupleDesc tupDesc); +extern uint64 assign_record_type_identifier(Oid type_id, int32 typmod); + extern int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2); +extern size_t SharedRecordTypmodRegistryEstimate(void); + +extern void SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *, + dsm_segment *segment, dsa_area *area); + +extern void SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *); + #endif /* TYPCACHE_H */ diff --git a/src/include/utils/tzparser.h b/src/include/utils/tzparser.h index 1e444e1159..5a16d12103 100644 --- a/src/include/utils/tzparser.h +++ b/src/include/utils/tzparser.h @@ -3,7 +3,7 @@ * tzparser.h * Timezone offset file parsing definitions. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/tzparser.h diff --git a/src/include/utils/uuid.h b/src/include/utils/uuid.h index ed3ec28959..312c8c48c6 100644 --- a/src/include/utils/uuid.h +++ b/src/include/utils/uuid.h @@ -5,7 +5,7 @@ * to avoid conflicts with any uuid_t type that might be defined by * the system headers. * - * Copyright (c) 2007-2017, PostgreSQL Global Development Group + * Copyright (c) 2007-2018, PostgreSQL Global Development Group * * src/include/utils/uuid.h * diff --git a/src/include/utils/varbit.h b/src/include/utils/varbit.h index 2a4ec67698..3278cb5461 100644 --- a/src/include/utils/varbit.h +++ b/src/include/utils/varbit.h @@ -5,7 +5,7 @@ * * Code originally contributed by Adriaan Joubert. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/varbit.h diff --git a/src/include/utils/varlena.h b/src/include/utils/varlena.h index cab82ee888..c776931bc4 100644 --- a/src/include/utils/varlena.h +++ b/src/include/utils/varlena.h @@ -3,7 +3,7 @@ * varlena.h * Functions for the variable-length built-in types. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/varlena.h @@ -16,7 +16,7 @@ #include "nodes/pg_list.h" #include "utils/sortsupport.h" -extern int varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid); +extern int varstr_cmp(const char *arg1, int len1, const char *arg2, int len2, Oid collid); extern void varstr_sortsupport(SortSupport ssup, Oid collid, bool bpchar); extern int varstr_levenshtein(const char *source, int slen, const char *target, int tlen, @@ -31,6 +31,8 @@ extern bool SplitIdentifierString(char *rawstring, char separator, List **namelist); extern bool SplitDirectoriesString(char *rawstring, char separator, List **namelist); +extern bool SplitGUCList(char *rawstring, char separator, + List **namelist); extern text *replace_text_regexp(text *src_text, void *regexp, text *replace_text, bool glob); diff --git a/src/include/utils/xml.h b/src/include/utils/xml.h index e6fa0e2051..fe488da42e 100644 --- a/src/include/utils/xml.h +++ b/src/include/utils/xml.h @@ -4,7 +4,7 @@ * Declarations for XML data type support. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/utils/xml.h @@ -65,14 +65,14 @@ extern xmltype *xmlelement(XmlExpr *xexpr, Datum *named_argvalue, bool *named_argnull, Datum *argvalue, bool *argnull); extern xmltype *xmlparse(text *data, XmlOptionType xmloption, bool preserve_whitespace); -extern xmltype *xmlpi(char *target, text *arg, bool arg_is_null, bool *result_is_null); +extern xmltype *xmlpi(const char *target, text *arg, bool arg_is_null, bool *result_is_null); extern xmltype *xmlroot(xmltype *data, text *version, int standalone); extern bool xml_is_document(xmltype *arg); extern text *xmltotext_with_xmloption(xmltype *data, XmlOptionType xmloption_arg); extern char *escape_xml(const char *str); -extern char *map_sql_identifier_to_xml_name(char *ident, bool fully_escaped, bool escape_period); -extern char *map_xml_name_to_sql_identifier(char *name); +extern char *map_sql_identifier_to_xml_name(const char *ident, bool fully_escaped, bool escape_period); +extern char *map_xml_name_to_sql_identifier(const char *name); extern char *map_sql_value_to_xml_value(Datum value, Oid type, bool xml_escape_strings); extern int xmlbinary; /* XmlBinaryType, but int for guc enum */ diff --git a/src/include/windowapi.h b/src/include/windowapi.h index 0aa23ef2f5..f6f18e5159 100644 --- a/src/include/windowapi.h +++ b/src/include/windowapi.h @@ -19,7 +19,7 @@ * function in nodeWindowAgg.c for details. * * - * Portions Copyright (c) 2000-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2000-2018, PostgreSQL Global Development Group * * src/include/windowapi.h * diff --git a/src/interfaces/ecpg/compatlib/.gitignore b/src/interfaces/ecpg/compatlib/.gitignore index 6eb8a0dc06..926385c6b9 100644 --- a/src/interfaces/ecpg/compatlib/.gitignore +++ b/src/interfaces/ecpg/compatlib/.gitignore @@ -1,4 +1,3 @@ /compatlib.def /blibecpg_compatdll.def /exports.list -/snprintf.c diff --git a/src/interfaces/ecpg/compatlib/Makefile b/src/interfaces/ecpg/compatlib/Makefile index 04ddcfeab2..092e456f42 100644 --- a/src/interfaces/ecpg/compatlib/Makefile +++ b/src/interfaces/ecpg/compatlib/Makefile @@ -2,7 +2,7 @@ # # Makefile for ecpg compatibility library # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/interfaces/ecpg/compatlib/Makefile @@ -22,16 +22,13 @@ override CPPFLAGS := -I../include -I$(top_srcdir)/src/interfaces/ecpg/include \ -I$(libpq_srcdir) -DFRONTEND $(CPPFLAGS) override CFLAGS += $(PTHREAD_CFLAGS) -SHLIB_LINK = -L../ecpglib -lecpg -L../pgtypeslib -lpgtypes $(libpq) \ - $(filter -lintl -lm, $(LIBS)) $(PTHREAD_LIBS) +SHLIB_LINK_INTERNAL = -L../ecpglib -lecpg -L../pgtypeslib -lpgtypes $(libpq_pgport_shlib) +SHLIB_LINK = $(filter -lintl -lm, $(LIBS)) $(PTHREAD_LIBS) SHLIB_PREREQS = submake-ecpglib submake-pgtypeslib SHLIB_EXPORTS = exports.txt -# Need to recompile any libpgport object files -LIBS := $(filter-out -lpgport, $(LIBS)) - -OBJS= informix.o $(filter snprintf.o, $(LIBOBJS)) $(WIN32RES) +OBJS= informix.o $(WIN32RES) PKG_CONFIG_REQUIRES_PRIVATE = libecpg libpgtypes @@ -48,9 +45,6 @@ submake-pgtypeslib: # Shared library stuff include $(top_srcdir)/src/Makefile.shlib -snprintf.c: % : $(top_srcdir)/src/port/% - rm -f $@ && $(LN_S) $< . - install: all installdirs install-lib installdirs: installdirs-lib @@ -58,6 +52,6 @@ installdirs: installdirs-lib uninstall: uninstall-lib clean distclean: clean-lib - rm -f $(OBJS) snprintf.c + rm -f $(OBJS) maintainer-clean: distclean maintainer-clean-lib diff --git a/src/interfaces/ecpg/compatlib/informix.c b/src/interfaces/ecpg/compatlib/informix.c index 2508ed9b8f..13058cf7bf 100644 --- a/src/interfaces/ecpg/compatlib/informix.c +++ b/src/interfaces/ecpg/compatlib/informix.c @@ -79,7 +79,7 @@ deccall2(decimal *arg1, decimal *arg2, int (*ptr) (numeric *, numeric *)) PGTYPESnumeric_free(a1); PGTYPESnumeric_free(a2); - return (i); + return i; } static int @@ -143,7 +143,7 @@ deccall3(decimal *arg1, decimal *arg2, decimal *result, int (*ptr) (numeric *, n PGTYPESnumeric_free(a1); PGTYPESnumeric_free(a2); - return (i); + return i; } /* we start with the numeric functions */ @@ -166,7 +166,7 @@ decadd(decimal *arg1, decimal *arg2, decimal *sum) int deccmp(decimal *arg1, decimal *arg2) { - return (deccall2(arg1, arg2, PGTYPESnumeric_cmp)); + return deccall2(arg1, arg2, PGTYPESnumeric_cmp); } void @@ -195,7 +195,7 @@ ecpg_strndup(const char *str, size_t len) } int -deccvasc(char *cp, int len, decimal *np) +deccvasc(const char *cp, int len, decimal *np) { char *str; int ret = 0; @@ -261,7 +261,7 @@ deccvdbl(double dbl, decimal *np) result = PGTYPESnumeric_to_decimal(nres, np); PGTYPESnumeric_free(nres); - return (result); + return result; } int @@ -283,7 +283,7 @@ deccvint(int in, decimal *np) result = PGTYPESnumeric_to_decimal(nres, np); PGTYPESnumeric_free(nres); - return (result); + return result; } int @@ -305,7 +305,7 @@ deccvlong(long lng, decimal *np) result = PGTYPESnumeric_to_decimal(nres, np); PGTYPESnumeric_free(nres); - return (result); + return result; } int @@ -520,7 +520,7 @@ rdatestr(date d, char *str) * */ int -rstrdate(char *str, date * d) +rstrdate(const char *str, date * d) { return rdefmtdate(d, "mm/dd/yyyy", str); } @@ -545,7 +545,7 @@ rjulmdy(date d, short mdy[3]) } int -rdefmtdate(date * d, char *fmt, char *str) +rdefmtdate(date * d, const char *fmt, const char *str) { /* TODO: take care of DBCENTURY environment variable */ /* PGSQL functions allow all centuries */ @@ -571,7 +571,7 @@ rdefmtdate(date * d, char *fmt, char *str) } int -rfmtdate(date d, char *fmt, char *str) +rfmtdate(date d, const char *fmt, char *str) { errno = 0; if (PGTYPESdate_fmt_asc(d, fmt, str) == 0) @@ -598,7 +598,7 @@ rmdyjul(short mdy[3], date * d) int rdayofweek(date d) { - return (PGTYPESdate_dayofweek(d)); + return PGTYPESdate_dayofweek(d); } /* And the datetime stuff */ @@ -747,7 +747,7 @@ initValue(long lng_val) /* return the position oft the right-most dot in some string */ static int -getRightMostDot(char *str) +getRightMostDot(const char *str) { size_t len = strlen(str); int i, @@ -765,7 +765,7 @@ getRightMostDot(char *str) /* And finally some misc functions */ int -rfmtlong(long lng_val, char *fmt, char *outbuf) +rfmtlong(long lng_val, const char *fmt, char *outbuf) { size_t fmt_len = strlen(fmt); size_t temp_len; @@ -1047,7 +1047,7 @@ rsetnull(int t, char *ptr) } int -risnull(int t, char *ptr) +risnull(int t, const char *ptr) { - return (ECPGis_noind_null(t, ptr)); + return ECPGis_noind_null(t, ptr); } diff --git a/src/interfaces/ecpg/ecpglib/.gitignore b/src/interfaces/ecpg/ecpglib/.gitignore index 8ef6401dd0..f2bf3e7a4a 100644 --- a/src/interfaces/ecpg/ecpglib/.gitignore +++ b/src/interfaces/ecpg/ecpglib/.gitignore @@ -1,10 +1,3 @@ /ecpglib.def /blibecpgdll.def /exports.list -/path.c -/pgstrcasecmp.c -/snprintf.c -/strlcpy.c -/thread.c -/win32setlocale.c -/isinf.c diff --git a/src/interfaces/ecpg/ecpglib/Makefile b/src/interfaces/ecpg/ecpglib/Makefile index fbb14073ce..3b0f39c671 100644 --- a/src/interfaces/ecpg/ecpglib/Makefile +++ b/src/interfaces/ecpg/ecpglib/Makefile @@ -2,7 +2,7 @@ # # Makefile for ecpg library # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/interfaces/ecpg/ecpglib/Makefile @@ -22,19 +22,11 @@ override CPPFLAGS := -I../include -I$(top_srcdir)/src/interfaces/ecpg/include \ -I$(libpq_srcdir) -I$(top_builddir)/src/port -DFRONTEND $(CPPFLAGS) override CFLAGS += $(PTHREAD_CFLAGS) -# Need to recompile any libpgport object files -LIBS := $(filter-out -lpgport, $(LIBS)) +OBJS= execute.o typename.o descriptor.o sqlda.o data.o error.o prepare.o \ + memory.o connect.o misc.o $(WIN32RES) -OBJS= execute.o typename.o descriptor.o sqlda.o data.o error.o prepare.o memory.o \ - connect.o misc.o path.o pgstrcasecmp.o \ - $(filter snprintf.o strlcpy.o win32setlocale.o isinf.o, $(LIBOBJS)) $(WIN32RES) - -# thread.c is needed only for non-WIN32 implementation of path.c -ifneq ($(PORTNAME), win32) -OBJS += thread.o -endif - -SHLIB_LINK = -L../pgtypeslib -lpgtypes $(libpq) $(filter -lintl -lm, $(LIBS)) $(PTHREAD_LIBS) +SHLIB_LINK_INTERNAL = -L../pgtypeslib -lpgtypes $(libpq_pgport_shlib) +SHLIB_LINK = $(filter -lintl -lm, $(LIBS)) $(PTHREAD_LIBS) SHLIB_PREREQS = submake-libpq submake-pgtypeslib SHLIB_EXPORTS = exports.txt @@ -50,16 +42,8 @@ submake-pgtypeslib: # Shared library stuff include $(top_srcdir)/src/Makefile.shlib -# We use some port modules verbatim, but since we need to -# compile with appropriate options to build a shared lib, we can't -# necessarily use the same object files as the backend uses. Instead, -# symlink the source files in here and build our own object file. - -path.c pgstrcasecmp.c snprintf.c strlcpy.c thread.c win32setlocale.c isinf.c: % : $(top_srcdir)/src/port/% - rm -f $@ && $(LN_S) $< . - +# Make dependency on pg_config_paths.h visible. misc.o: misc.c $(top_builddir)/src/port/pg_config_paths.h -path.o: path.c $(top_builddir)/src/port/pg_config_paths.h $(top_builddir)/src/port/pg_config_paths.h: $(MAKE) -C $(top_builddir)/src/port pg_config_paths.h @@ -72,6 +56,5 @@ uninstall: uninstall-lib clean distclean: clean-lib rm -f $(OBJS) - rm -f path.c pgstrcasecmp.c snprintf.c strlcpy.c thread.c win32setlocale.c isinf.c maintainer-clean: distclean maintainer-clean-lib diff --git a/src/interfaces/ecpg/ecpglib/connect.c b/src/interfaces/ecpg/ecpglib/connect.c index 0716abdd7e..71fa275363 100644 --- a/src/interfaces/ecpg/ecpglib/connect.c +++ b/src/interfaces/ecpg/ecpglib/connect.c @@ -67,7 +67,7 @@ ecpg_get_connection_nr(const char *connection_name) ret = con; } - return (ret); + return ret; } struct connection * @@ -106,7 +106,7 @@ ecpg_get_connection(const char *connection_name) #endif } - return (ret); + return ret; } static void @@ -168,7 +168,7 @@ ECPGsetcommit(int lineno, const char *mode, const char *connection_name) PGresult *results; if (!ecpg_init(con, connection_name, lineno)) - return (false); + return false; ecpg_log("ECPGsetcommit on line %d: action \"%s\"; connection \"%s\"\n", lineno, mode, con->name); @@ -204,7 +204,7 @@ ECPGsetconn(int lineno, const char *connection_name) struct connection *con = ecpg_get_connection(connection_name); if (!ecpg_init(con, connection_name, lineno)) - return (false); + return false; #ifdef ENABLE_THREAD_SAFETY pthread_setspecific(actual_connection_key, con); @@ -675,7 +675,7 @@ ECPGdisconnect(int lineno, const char *connection_name) { ecpg_raise(lineno, ECPG_OUT_OF_MEMORY, ECPG_SQLSTATE_ECPG_OUT_OF_MEMORY, NULL); - return (false); + return false; } #ifdef ENABLE_THREAD_SAFETY @@ -702,7 +702,7 @@ ECPGdisconnect(int lineno, const char *connection_name) #ifdef ENABLE_THREAD_SAFETY pthread_mutex_unlock(&connections_mutex); #endif - return (false); + return false; } else ecpg_finish(con); diff --git a/src/interfaces/ecpg/ecpglib/data.c b/src/interfaces/ecpg/ecpglib/data.c index 5dbfded873..f3d326a50b 100644 --- a/src/interfaces/ecpg/ecpglib/data.c +++ b/src/interfaces/ecpg/ecpglib/data.c @@ -3,7 +3,6 @@ #define POSTGRES_ECPG_INTERNAL #include "postgres_fe.h" -#include #include #include "ecpgtype.h" @@ -44,7 +43,7 @@ array_boundary(enum ARRAY_TYPE isarray, char c) /* returns true if some garbage is found at the end of the scanned string */ static bool -garbage_left(enum ARRAY_TYPE isarray, char *scan_length, enum COMPAT_MODE compat) +garbage_left(enum ARRAY_TYPE isarray, char **scan_length, enum COMPAT_MODE compat) { /* * INFORMIX allows for selecting a numeric into an int, the result is @@ -52,13 +51,19 @@ garbage_left(enum ARRAY_TYPE isarray, char *scan_length, enum COMPAT_MODE compat */ if (isarray == ECPG_ARRAY_NONE) { - if (INFORMIX_MODE(compat) && *scan_length == '.') - return false; + if (INFORMIX_MODE(compat) && **scan_length == '.') + { + /* skip invalid characters */ + do + { + (*scan_length)++; + } while (isdigit((unsigned char) **scan_length)); + } - if (*scan_length != ' ' && *scan_length != '\0') + if (**scan_length != ' ' && **scan_length != '\0') return true; } - else if (ECPG_IS_ARRAY(isarray) && !array_delimiter(isarray, *scan_length) && !array_boundary(isarray, *scan_length)) + else if (ECPG_IS_ARRAY(isarray) && !array_delimiter(isarray, **scan_length) && !array_boundary(isarray, **scan_length)) return true; return false; @@ -134,7 +139,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, { ecpg_raise(lineno, ECPG_OUT_OF_MEMORY, ECPG_SQLSTATE_ECPG_OUT_OF_MEMORY, NULL); - return (false); + return false; } /* @@ -156,7 +161,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, * at least one tuple, but let's play it safe. */ ecpg_raise(lineno, ECPG_NOT_FOUND, ECPG_SQLSTATE_NO_DATA, NULL); - return (false); + return false; } /* We will have to decode the value */ @@ -204,7 +209,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, ecpg_raise(lineno, ECPG_MISSING_INDICATOR, ECPG_SQLSTATE_NULL_VALUE_NO_INDICATOR_PARAMETER, NULL); - return (false); + return false; } } break; @@ -212,12 +217,12 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, ecpg_raise(lineno, ECPG_UNSUPPORTED, ECPG_SQLSTATE_ECPG_INTERNAL_ERROR, ecpg_type_name(ind_type)); - return (false); + return false; break; } if (value_for_indicator == -1) - return (true); + return true; /* let's check if it really is an array if it should be one */ if (isarray == ECPG_ARRAY_ARRAY) @@ -226,7 +231,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, { ecpg_raise(lineno, ECPG_DATA_NOT_ARRAY, ECPG_SQLSTATE_DATATYPE_MISMATCH, NULL); - return (false); + return false; } switch (type) @@ -303,11 +308,11 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, case ECPGt_int: case ECPGt_long: res = strtol(pval, &scan_length, 10); - if (garbage_left(isarray, scan_length, compat)) + if (garbage_left(isarray, &scan_length, compat)) { ecpg_raise(lineno, ECPG_INT_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; } pval = scan_length; @@ -332,11 +337,11 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, case ECPGt_unsigned_int: case ECPGt_unsigned_long: ures = strtoul(pval, &scan_length, 10); - if (garbage_left(isarray, scan_length, compat)) + if (garbage_left(isarray, &scan_length, compat)) { ecpg_raise(lineno, ECPG_UINT_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; } pval = scan_length; @@ -361,10 +366,10 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, #ifdef HAVE_STRTOLL case ECPGt_long_long: *((long long int *) (var + offset * act_tuple)) = strtoll(pval, &scan_length, 10); - if (garbage_left(isarray, scan_length, compat)) + if (garbage_left(isarray, &scan_length, compat)) { ecpg_raise(lineno, ECPG_INT_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; } pval = scan_length; @@ -373,10 +378,10 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, #ifdef HAVE_STRTOULL case ECPGt_unsigned_long_long: *((unsigned long long int *) (var + offset * act_tuple)) = strtoull(pval, &scan_length, 10); - if (garbage_left(isarray, scan_length, compat)) + if (garbage_left(isarray, &scan_length, compat)) { ecpg_raise(lineno, ECPG_UINT_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; } pval = scan_length; @@ -395,11 +400,12 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, if (isarray && *scan_length == '"') scan_length++; - if (garbage_left(isarray, scan_length, compat)) + /* no special INFORMIX treatment for floats */ + if (garbage_left(isarray, &scan_length, ECPG_COMPAT_PGSQL)) { ecpg_raise(lineno, ECPG_FLOAT_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; } pval = scan_length; @@ -438,7 +444,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, ecpg_raise(lineno, ECPG_CONVERT_BOOL, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; break; case ECPGt_char: @@ -457,7 +463,52 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, if (varcharsize == 0 || varcharsize > size) { - strncpy(str, pval, size + 1); + /* + * compatibility mode, blank pad and null + * terminate char array + */ + if (ORACLE_MODE(compat) && (type == ECPGt_char || type == ECPGt_unsigned_char)) + { + memset(str, ' ', varcharsize); + memcpy(str, pval, size); + str[varcharsize - 1] = '\0'; + + /* + * compatibility mode empty string gets -1 + * indicator but no warning + */ + if (size == 0) + { + /* truncation */ + switch (ind_type) + { + case ECPGt_short: + case ECPGt_unsigned_short: + *((short *) (ind + ind_offset * act_tuple)) = -1; + break; + case ECPGt_int: + case ECPGt_unsigned_int: + *((int *) (ind + ind_offset * act_tuple)) = -1; + break; + case ECPGt_long: + case ECPGt_unsigned_long: + *((long *) (ind + ind_offset * act_tuple)) = -1; + break; +#ifdef HAVE_LONG_LONG_INT + case ECPGt_long_long: + case ECPGt_unsigned_long_long: + *((long long int *) (ind + ind_offset * act_tuple)) = -1; + break; +#endif /* HAVE_LONG_LONG_INT */ + default: + break; + } + } + } + else + { + strncpy(str, pval, size + 1); + } /* do the rtrim() */ if (type == ECPGt_string) { @@ -474,7 +525,14 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, { strncpy(str, pval, varcharsize); - if (varcharsize < size) + /* compatibility mode, null terminate char array */ + if (ORACLE_MODE(compat) && (varcharsize - 1) < size) + { + if (type == ECPGt_char || type == ECPGt_unsigned_char) + str[varcharsize - 1] = '\0'; + } + + if (varcharsize < size || (ORACLE_MODE(compat) && (varcharsize - 1) < size)) { /* truncation */ switch (ind_type) @@ -581,24 +639,24 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, { ecpg_raise(lineno, ECPG_OUT_OF_MEMORY, ECPG_SQLSTATE_ECPG_OUT_OF_MEMORY, NULL); - return (false); + return false; } } else { ecpg_raise(lineno, ECPG_NUMERIC_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; } } else { - if (!isarray && garbage_left(isarray, scan_length, compat)) + if (!isarray && garbage_left(isarray, &scan_length, compat)) { free(nres); ecpg_raise(lineno, ECPG_NUMERIC_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; } } pval = scan_length; @@ -635,7 +693,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, */ ires = (interval *) ecpg_alloc(sizeof(interval), lineno); if (!ires) - return (false); + return false; ECPGset_noind_null(ECPGt_interval, ires); } @@ -643,7 +701,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, { ecpg_raise(lineno, ECPG_INTERVAL_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; } } else @@ -651,12 +709,12 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, if (*scan_length == '"') scan_length++; - if (!isarray && garbage_left(isarray, scan_length, compat)) + if (!isarray && garbage_left(isarray, &scan_length, compat)) { free(ires); ecpg_raise(lineno, ECPG_INTERVAL_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; } } pval = scan_length; @@ -693,7 +751,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, { ecpg_raise(lineno, ECPG_DATE_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; } } else @@ -701,11 +759,11 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, if (*scan_length == '"') scan_length++; - if (!isarray && garbage_left(isarray, scan_length, compat)) + if (!isarray && garbage_left(isarray, &scan_length, compat)) { ecpg_raise(lineno, ECPG_DATE_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; } } @@ -741,7 +799,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, { ecpg_raise(lineno, ECPG_TIMESTAMP_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; } } else @@ -749,11 +807,11 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, if (*scan_length == '"') scan_length++; - if (!isarray && garbage_left(isarray, scan_length, compat)) + if (!isarray && garbage_left(isarray, &scan_length, compat)) { ecpg_raise(lineno, ECPG_TIMESTAMP_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval); - return (false); + return false; } } @@ -765,7 +823,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, ecpg_raise(lineno, ECPG_UNSUPPORTED, ECPG_SQLSTATE_ECPG_INTERNAL_ERROR, ecpg_type_name(type)); - return (false); + return false; break; } if (ECPG_IS_ARRAY(isarray)) @@ -791,5 +849,5 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, } } while (*pval != '\0' && !array_boundary(isarray, *pval)); - return (true); + return true; } diff --git a/src/interfaces/ecpg/ecpglib/descriptor.c b/src/interfaces/ecpg/ecpglib/descriptor.c index 1fa00b892f..8fdf5606c0 100644 --- a/src/interfaces/ecpg/ecpglib/descriptor.c +++ b/src/interfaces/ecpg/ecpglib/descriptor.c @@ -5,7 +5,8 @@ #define POSTGRES_ECPG_INTERNAL #include "postgres_fe.h" -#include "pg_type.h" + +#include "catalog/pg_type_d.h" #include "ecpg-pthread-win32.h" #include "ecpgtype.h" @@ -150,10 +151,10 @@ get_int_item(int lineno, void *var, enum ECPGttype vartype, int value) break; default: ecpg_raise(lineno, ECPG_VAR_NOT_NUMERIC, ECPG_SQLSTATE_RESTRICTED_DATA_TYPE_ATTRIBUTE_VIOLATION, NULL); - return (false); + return false; } - return (true); + return true; } static bool @@ -195,7 +196,7 @@ set_int_item(int lineno, int *target, const void *var, enum ECPGttype vartype) break; default: ecpg_raise(lineno, ECPG_VAR_NOT_NUMERIC, ECPG_SQLSTATE_RESTRICTED_DATA_TYPE_ATTRIBUTE_VIOLATION, NULL); - return (false); + return false; } return true; @@ -217,7 +218,7 @@ get_char_item(int lineno, void *var, enum ECPGttype vartype, char *value, int va (struct ECPGgeneric_varchar *) var; if (varcharsize == 0) - strncpy(variable->arr, value, strlen(value)); + memcpy(variable->arr, value, strlen(value)); else strncpy(variable->arr, value, varcharsize); @@ -228,17 +229,17 @@ get_char_item(int lineno, void *var, enum ECPGttype vartype, char *value, int va break; default: ecpg_raise(lineno, ECPG_VAR_NOT_CHAR, ECPG_SQLSTATE_RESTRICTED_DATA_TYPE_ATTRIBUTE_VIOLATION, NULL); - return (false); + return false; } - return (true); + return true; } #define RETURN_IF_NO_DATA if (ntuples < 1) \ { \ va_end(args); \ ecpg_raise(lineno, ECPG_NOT_FOUND, ECPG_SQLSTATE_NO_DATA, NULL); \ - return (false); \ + return false; \ } bool @@ -265,7 +266,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) if (!ECPGresult) { va_end(args); - return (false); + return false; } ntuples = PQntuples(ECPGresult); @@ -274,7 +275,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) { ecpg_raise(lineno, ECPG_INVALID_DESCRIPTOR_INDEX, ECPG_SQLSTATE_INVALID_DESCRIPTOR_INDEX, NULL); va_end(args); - return (false); + return false; } ecpg_log("ECPGget_desc: reading items for tuple %d\n", index); @@ -333,7 +334,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) if (!get_char_item(lineno, var, vartype, PQfname(ECPGresult, index), varcharsize)) { va_end(args); - return (false); + return false; } ecpg_log("ECPGget_desc: NAME = %s\n", PQfname(ECPGresult, index)); @@ -343,7 +344,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) if (!get_int_item(lineno, var, vartype, 1)) { va_end(args); - return (false); + return false; } break; @@ -352,7 +353,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) if (!get_int_item(lineno, var, vartype, 0)) { va_end(args); - return (false); + return false; } break; @@ -361,7 +362,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) if (!get_int_item(lineno, var, vartype, (PQfmod(ECPGresult, index) - VARHDRSZ) & 0xffff)) { va_end(args); - return (false); + return false; } ecpg_log("ECPGget_desc: SCALE = %d\n", (PQfmod(ECPGresult, index) - VARHDRSZ) & 0xffff); @@ -371,7 +372,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) if (!get_int_item(lineno, var, vartype, PQfmod(ECPGresult, index) >> 16)) { va_end(args); - return (false); + return false; } ecpg_log("ECPGget_desc: PRECISION = %d\n", PQfmod(ECPGresult, index) >> 16); @@ -381,7 +382,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) if (!get_int_item(lineno, var, vartype, PQfsize(ECPGresult, index))) { va_end(args); - return (false); + return false; } ecpg_log("ECPGget_desc: OCTET_LENGTH = %d\n", PQfsize(ECPGresult, index)); @@ -391,7 +392,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) if (!get_int_item(lineno, var, vartype, PQfmod(ECPGresult, index) - VARHDRSZ)) { va_end(args); - return (false); + return false; } ecpg_log("ECPGget_desc: LENGTH = %d\n", PQfmod(ECPGresult, index) - VARHDRSZ); @@ -401,7 +402,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) if (!get_int_item(lineno, var, vartype, ecpg_dynamic_type(PQftype(ECPGresult, index)))) { va_end(args); - return (false); + return false; } ecpg_log("ECPGget_desc: TYPE = %d\n", ecpg_dynamic_type(PQftype(ECPGresult, index))); @@ -411,7 +412,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) if (!get_int_item(lineno, var, vartype, ecpg_dynamic_type_DDT(PQftype(ECPGresult, index)))) { va_end(args); - return (false); + return false; } ecpg_log("ECPGget_desc: TYPE = %d\n", ecpg_dynamic_type_DDT(PQftype(ECPGresult, index))); @@ -421,7 +422,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) if (!get_int_item(lineno, var, vartype, PQntuples(ECPGresult))) { va_end(args); - return (false); + return false; } ecpg_log("ECPGget_desc: CARDINALITY = %d\n", PQntuples(ECPGresult)); @@ -462,7 +463,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) if (!get_int_item(lineno, var, vartype, PQgetlength(ECPGresult, act_tuple, index))) { va_end(args); - return (false); + return false; } var = (char *) var + offset; ecpg_log("ECPGget_desc: RETURNED[%d] = %d\n", act_tuple, PQgetlength(ECPGresult, act_tuple, index)); @@ -473,7 +474,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) snprintf(type_str, sizeof(type_str), "%d", type); ecpg_raise(lineno, ECPG_UNKNOWN_DESCRIPTOR_ITEM, ECPG_SQLSTATE_ECPG_INTERNAL_ERROR, type_str); va_end(args); - return (false); + return false; } type = va_arg(args, enum ECPGdtype); @@ -539,7 +540,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) if (!get_int_item(lineno, data_var.ind_value, data_var.ind_type, -PQgetisnull(ECPGresult, act_tuple, index))) { va_end(args); - return (false); + return false; } data_var.ind_value = (char *) data_var.ind_value + data_var.ind_offset; ecpg_log("ECPGget_desc: INDICATOR[%d] = %d\n", act_tuple, -PQgetisnull(ECPGresult, act_tuple, index)); @@ -547,7 +548,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...) } sqlca->sqlerrd[2] = ntuples; va_end(args); - return (true); + return true; } #undef RETURN_IF_NO_DATA diff --git a/src/interfaces/ecpg/ecpglib/error.c b/src/interfaces/ecpg/ecpglib/error.c index 77d6cc2dae..f34ae4afb8 100644 --- a/src/interfaces/ecpg/ecpglib/error.c +++ b/src/interfaces/ecpg/ecpglib/error.c @@ -286,23 +286,23 @@ ecpg_check_PQresult(PGresult *results, int lineno, PGconn *connection, enum COMP { ecpg_log("ecpg_check_PQresult on line %d: no result - %s", lineno, PQerrorMessage(connection)); ecpg_raise_backend(lineno, NULL, connection, compat); - return (false); + return false; } switch (PQresultStatus(results)) { case PGRES_TUPLES_OK: - return (true); + return true; break; case PGRES_EMPTY_QUERY: /* do nothing */ ecpg_raise(lineno, ECPG_EMPTY, ECPG_SQLSTATE_ECPG_INTERNAL_ERROR, NULL); PQclear(results); - return (false); + return false; break; case PGRES_COMMAND_OK: - return (true); + return true; break; case PGRES_NONFATAL_ERROR: case PGRES_FATAL_ERROR: @@ -310,23 +310,23 @@ ecpg_check_PQresult(PGresult *results, int lineno, PGconn *connection, enum COMP ecpg_log("ecpg_check_PQresult on line %d: bad response - %s", lineno, PQresultErrorMessage(results)); ecpg_raise_backend(lineno, results, connection, compat); PQclear(results); - return (false); + return false; break; case PGRES_COPY_OUT: - return (true); + return true; break; case PGRES_COPY_IN: ecpg_log("ecpg_check_PQresult on line %d: COPY IN data transfer in progress\n", lineno); PQendcopy(connection); PQclear(results); - return (false); + return false; break; default: ecpg_log("ecpg_check_PQresult on line %d: unknown execution status type\n", lineno); ecpg_raise_backend(lineno, results, connection, compat); PQclear(results); - return (false); + return false; break; } } diff --git a/src/interfaces/ecpg/ecpglib/execute.c b/src/interfaces/ecpg/ecpglib/execute.c index 03c55d3593..42640ba9be 100644 --- a/src/interfaces/ecpg/ecpglib/execute.c +++ b/src/interfaces/ecpg/ecpglib/execute.c @@ -16,10 +16,9 @@ #define POSTGRES_ECPG_INTERNAL #include "postgres_fe.h" -#include #include -#include "pg_type.h" +#include "catalog/pg_type_d.h" #include "ecpgtype.h" #include "ecpglib.h" @@ -58,7 +57,7 @@ quote_postgres(char *arg, bool quote, int lineno) buffer_len = 2 * length + 1; res = (char *) ecpg_alloc(buffer_len + 3, lineno); if (!res) - return (res); + return res; escaped_len = PQescapeString(res + 1, arg, buffer_len); if (length == escaped_len) { @@ -108,14 +107,14 @@ free_statement(struct statement *stmt) } static int -next_insert(char *text, int pos, bool questionmarks) +next_insert(char *text, int pos, bool questionmarks, bool std_strings) { bool string = false; int p = pos; for (; text[p] != '\0'; p++) { - if (text[p] == '\\') /* escape character */ + if (string && !std_strings && text[p] == '\\') /* escape character */ p++; else if (text[p] == '\'') string = string ? false : true; @@ -151,13 +150,13 @@ ecpg_type_infocache_push(struct ECPGtype_information_cache **cache, int oid, enu = (struct ECPGtype_information_cache *) ecpg_alloc(sizeof(struct ECPGtype_information_cache), lineno); if (new_entry == NULL) - return (false); + return false; new_entry->oid = oid; new_entry->isarray = isarray; new_entry->next = *cache; *cache = new_entry; - return (true); + return true; } static enum ARRAY_TYPE @@ -178,89 +177,83 @@ ecpg_is_type_an_array(int type, const struct statement *stmt, const struct varia /* populate cache with well known types to speed things up */ if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), BOOLOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), BYTEAOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), CHAROID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), NAMEOID, not_an_array_in_ecpg, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), INT8OID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), INT2OID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), INT2VECTOROID, ECPG_ARRAY_VECTOR, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), INT4OID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), REGPROCOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TEXTOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), OIDOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TIDOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), XIDOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), CIDOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), OIDVECTOROID, ECPG_ARRAY_VECTOR, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), POINTOID, ECPG_ARRAY_VECTOR, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), LSEGOID, ECPG_ARRAY_VECTOR, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), PATHOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), BOXOID, ECPG_ARRAY_VECTOR, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), POLYGONOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), LINEOID, ECPG_ARRAY_VECTOR, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), FLOAT4OID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), FLOAT8OID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); - if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), ABSTIMEOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); - if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), RELTIMEOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); - if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TINTERVALOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), UNKNOWNOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), CIRCLEOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), CASHOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), INETOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), CIDROID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), BPCHAROID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), VARCHAROID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), DATEOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TIMEOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TIMESTAMPOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TIMESTAMPTZOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), INTERVALOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), TIMETZOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); - if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), ZPBITOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; + if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), BITOID, ECPG_ARRAY_NONE, stmt->lineno)) + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), VARBITOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; if (!ecpg_type_infocache_push(&(stmt->connection->cache_head), NUMERICOID, ECPG_ARRAY_NONE, stmt->lineno)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; } for (cache_entry = (stmt->connection->cache_head); cache_entry != NULL; cache_entry = cache_entry->next) @@ -271,13 +264,13 @@ ecpg_is_type_an_array(int type, const struct statement *stmt, const struct varia array_query = (char *) ecpg_alloc(strlen("select typlen from pg_type where oid= and typelem<>0") + 11, stmt->lineno); if (array_query == NULL) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; sprintf(array_query, "select typlen from pg_type where oid=%d and typelem<>0", type); query = PQexec(stmt->connection->connection, array_query); ecpg_free(array_query); if (!ecpg_check_PQresult(query, stmt->lineno, stmt->connection->connection, stmt->compat)) - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; else if (PQresultStatus(query) == PGRES_TUPLES_OK) { if (PQntuples(query) == 0) @@ -297,7 +290,7 @@ ecpg_is_type_an_array(int type, const struct statement *stmt, const struct varia PQclear(query); } else - return (ECPG_ARRAY_ERROR); + return ECPG_ARRAY_ERROR; ecpg_type_infocache_push(&(stmt->connection->cache_head), type, isarray, stmt->lineno); ecpg_log("ecpg_is_type_an_array on line %d: type (%d); C (%d); array (%s)\n", stmt->lineno, type, var->type, ECPG_IS_ARRAY(isarray) ? "yes" : "no"); @@ -1109,6 +1102,13 @@ ecpg_build_params(struct statement *stmt) struct variable *var; int desc_counter = 0; int position = 0; + const char *value; + bool std_strings = false; + + /* Get standard_conforming_strings setting. */ + value = PQparameterStatus(stmt->connection->connection, "standard_conforming_strings"); + if (value && strcmp(value, "on") == 0) + std_strings = true; /* * If the type is one of the fill in types then we take the argument and @@ -1299,11 +1299,11 @@ ecpg_build_params(struct statement *stmt) * now tobeinserted points to an area that contains the next * parameter; now find the position in the string where it belongs */ - if ((position = next_insert(stmt->command, position, stmt->questionmarks) + 1) == 0) + if ((position = next_insert(stmt->command, position, stmt->questionmarks, std_strings) + 1) == 0) { /* - * We have an argument but we dont have the matched up placeholder - * in the string + * We have an argument but we don't have the matched up + * placeholder in the string */ ecpg_raise(stmt->lineno, ECPG_TOO_MANY_ARGUMENTS, ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_PARAMETERS, @@ -1386,7 +1386,7 @@ ecpg_build_params(struct statement *stmt) } /* Check if there are unmatched things left. */ - if (next_insert(stmt->command, position, stmt->questionmarks) >= 0) + if (next_insert(stmt->command, position, stmt->questionmarks, std_strings) >= 0) { ecpg_raise(stmt->lineno, ECPG_TOO_FEW_ARGUMENTS, ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_PARAMETERS, NULL); @@ -1486,7 +1486,7 @@ ecpg_process_output(struct statement *stmt, bool clear_result) { ecpg_raise(stmt->lineno, ECPG_OUT_OF_MEMORY, ECPG_SQLSTATE_ECPG_OUT_OF_MEMORY, NULL); - return (false); + return false; } var = stmt->outlist; @@ -1654,7 +1654,7 @@ ecpg_process_output(struct statement *stmt, bool clear_result) else if (!INFORMIX_MODE(stmt->compat)) { ecpg_raise(stmt->lineno, ECPG_TOO_FEW_ARGUMENTS, ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_TARGETS, NULL); - return (false); + return false; } } @@ -1722,12 +1722,13 @@ ecpg_process_output(struct statement *stmt, bool clear_result) } /* check for asynchronous returns */ - notify = PQnotifies(stmt->connection->connection); - if (notify) + PQconsumeInput(stmt->connection->connection); + while ((notify = PQnotifies(stmt->connection->connection)) != NULL) { ecpg_log("ecpg_process_output on line %d: asynchronous notification of \"%s\" from backend PID %d received\n", stmt->lineno, notify->relname, notify->be_pid); PQfreemem(notify); + PQconsumeInput(stmt->connection->connection); } return status; @@ -1830,7 +1831,7 @@ ecpg_do_prologue(int lineno, const int compat, const int force_indicator, { ecpg_raise(lineno, ECPG_INVALID_STMT, ECPG_SQLSTATE_INVALID_SQL_STATEMENT_NAME, stmt->command); ecpg_do_epilogue(stmt); - return (false); + return false; } } diff --git a/src/interfaces/ecpg/ecpglib/extern.h b/src/interfaces/ecpg/ecpglib/extern.h index 91c7367b8b..a88f34106c 100644 --- a/src/interfaces/ecpg/ecpglib/extern.h +++ b/src/interfaces/ecpg/ecpglib/extern.h @@ -15,12 +15,13 @@ enum COMPAT_MODE { - ECPG_COMPAT_PGSQL = 0, ECPG_COMPAT_INFORMIX, ECPG_COMPAT_INFORMIX_SE + ECPG_COMPAT_PGSQL = 0, ECPG_COMPAT_INFORMIX, ECPG_COMPAT_INFORMIX_SE, ECPG_COMPAT_ORACLE }; extern bool ecpg_internal_regression_mode; #define INFORMIX_MODE(X) ((X) == ECPG_COMPAT_INFORMIX || (X) == ECPG_COMPAT_INFORMIX_SE) +#define ORACLE_MODE(X) ((X) == ECPG_COMPAT_ORACLE) enum ARRAY_TYPE { diff --git a/src/interfaces/ecpg/ecpglib/memory.c b/src/interfaces/ecpg/ecpglib/memory.c index a7268bb0f6..dc548a4cda 100644 --- a/src/interfaces/ecpg/ecpglib/memory.c +++ b/src/interfaces/ecpg/ecpglib/memory.c @@ -26,7 +26,7 @@ ecpg_alloc(long size, int lineno) return NULL; } - return (new); + return new; } char * @@ -40,7 +40,7 @@ ecpg_realloc(void *ptr, long size, int lineno) return NULL; } - return (new); + return new; } char * @@ -58,7 +58,7 @@ ecpg_strdup(const char *string, int lineno) return NULL; } - return (new); + return new; } /* keep a list of memory we allocated for the user */ diff --git a/src/interfaces/ecpg/ecpglib/misc.c b/src/interfaces/ecpg/ecpglib/misc.c index edd7302d54..be9cac6e7b 100644 --- a/src/interfaces/ecpg/ecpglib/misc.c +++ b/src/interfaces/ecpg/ecpglib/misc.c @@ -110,7 +110,7 @@ ecpg_init(const struct connection *con, const char *connection_name, const int l { ecpg_raise(lineno, ECPG_OUT_OF_MEMORY, ECPG_SQLSTATE_ECPG_OUT_OF_MEMORY, NULL); - return (false); + return false; } ecpg_init_sqlca(sqlca); @@ -118,10 +118,10 @@ ecpg_init(const struct connection *con, const char *connection_name, const int l { ecpg_raise(lineno, ECPG_NO_CONN, ECPG_SQLSTATE_CONNECTION_DOES_NOT_EXIST, connection_name ? connection_name : ecpg_gettext("NULL")); - return (false); + return false; } - return (true); + return true; } #ifdef ENABLE_THREAD_SAFETY @@ -155,9 +155,9 @@ ECPGget_sqlca(void) ecpg_init_sqlca(sqlca); pthread_setspecific(sqlca_key, sqlca); } - return (sqlca); + return sqlca; #else - return (&sqlca); + return &sqlca; #endif } @@ -167,7 +167,7 @@ ECPGstatus(int lineno, const char *connection_name) struct connection *con = ecpg_get_connection(connection_name); if (!ecpg_init(con, connection_name, lineno)) - return (false); + return false; /* are we connected? */ if (con->connection == NULL) @@ -176,7 +176,7 @@ ECPGstatus(int lineno, const char *connection_name) return false; } - return (true); + return true; } PGTransactionStatusType @@ -202,7 +202,7 @@ ECPGtrans(int lineno, const char *connection_name, const char *transaction) struct connection *con = ecpg_get_connection(connection_name); if (!ecpg_init(con, connection_name, lineno)) - return (false); + return false; ecpg_log("ECPGtrans on line %d: action \"%s\"; connection \"%s\"\n", lineno, transaction, con ? con->name : "null"); @@ -225,13 +225,13 @@ ECPGtrans(int lineno, const char *connection_name, const char *transaction) { res = PQexec(con->connection, "begin transaction"); if (!ecpg_check_PQresult(res, lineno, con->connection, ECPG_COMPAT_PGSQL)) - return FALSE; + return false; PQclear(res); } res = PQexec(con->connection, transaction); if (!ecpg_check_PQresult(res, lineno, con->connection, ECPG_COMPAT_PGSQL)) - return FALSE; + return false; PQclear(res); } @@ -375,7 +375,7 @@ ECPGset_noind_null(enum ECPGttype type, void *ptr) } static bool -_check(unsigned char *ptr, int length) +_check(const unsigned char *ptr, int length) { for (length--; length >= 0; length--) if (ptr[length] != 0xff) @@ -385,62 +385,62 @@ _check(unsigned char *ptr, int length) } bool -ECPGis_noind_null(enum ECPGttype type, void *ptr) +ECPGis_noind_null(enum ECPGttype type, const void *ptr) { switch (type) { case ECPGt_char: case ECPGt_unsigned_char: case ECPGt_string: - if (*((char *) ptr) == '\0') + if (*((const char *) ptr) == '\0') return true; break; case ECPGt_short: case ECPGt_unsigned_short: - if (*((short int *) ptr) == SHRT_MIN) + if (*((const short int *) ptr) == SHRT_MIN) return true; break; case ECPGt_int: case ECPGt_unsigned_int: - if (*((int *) ptr) == INT_MIN) + if (*((const int *) ptr) == INT_MIN) return true; break; case ECPGt_long: case ECPGt_unsigned_long: case ECPGt_date: - if (*((long *) ptr) == LONG_MIN) + if (*((const long *) ptr) == LONG_MIN) return true; break; #ifdef HAVE_LONG_LONG_INT case ECPGt_long_long: case ECPGt_unsigned_long_long: - if (*((long long *) ptr) == LONG_LONG_MIN) + if (*((const long long *) ptr) == LONG_LONG_MIN) return true; break; #endif /* HAVE_LONG_LONG_INT */ case ECPGt_float: - return (_check(ptr, sizeof(float))); + return _check(ptr, sizeof(float)); break; case ECPGt_double: - return (_check(ptr, sizeof(double))); + return _check(ptr, sizeof(double)); break; case ECPGt_varchar: - if (*(((struct ECPGgeneric_varchar *) ptr)->arr) == 0x00) + if (*(((const struct ECPGgeneric_varchar *) ptr)->arr) == 0x00) return true; break; case ECPGt_decimal: - if (((decimal *) ptr)->sign == NUMERIC_NULL) + if (((const decimal *) ptr)->sign == NUMERIC_NULL) return true; break; case ECPGt_numeric: - if (((numeric *) ptr)->sign == NUMERIC_NULL) + if (((const numeric *) ptr)->sign == NUMERIC_NULL) return true; break; case ECPGt_interval: - return (_check(ptr, sizeof(interval))); + return _check(ptr, sizeof(interval)); break; case ECPGt_timestamp: - return (_check(ptr, sizeof(timestamp))); + return _check(ptr, sizeof(timestamp)); break; default: break; diff --git a/src/interfaces/ecpg/ecpglib/nls.mk b/src/interfaces/ecpg/ecpglib/nls.mk index 3057a00b32..5ce97ead8e 100644 --- a/src/interfaces/ecpg/ecpglib/nls.mk +++ b/src/interfaces/ecpg/ecpglib/nls.mk @@ -1,6 +1,6 @@ # src/interfaces/ecpg/ecpglib/nls.mk CATALOG_NAME = ecpglib -AVAIL_LANGUAGES = cs de es fr it ja ko pl pt_BR ru tr zh_CN +AVAIL_LANGUAGES = cs de es fr it ja ko pl pt_BR ru sv tr vi zh_CN GETTEXT_FILES = connect.c descriptor.c error.c execute.c misc.c GETTEXT_TRIGGERS = ecpg_gettext GETTEXT_FLAGS = ecpg_gettext:1:pass-c-format diff --git a/src/interfaces/ecpg/ecpglib/pg_type.h b/src/interfaces/ecpg/ecpglib/pg_type.h deleted file mode 100644 index 94d2d9287b..0000000000 --- a/src/interfaces/ecpg/ecpglib/pg_type.h +++ /dev/null @@ -1,79 +0,0 @@ -/*------------------------------------------------------------------------- - * - * pg_type.h - * Hard-wired knowledge about some standard type OIDs. - * - * XXX keep this in sync with src/include/catalog/pg_type.h - * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/interfaces/ecpg/ecpglib/pg_type.h - * - *------------------------------------------------------------------------- - */ -#ifndef PG_TYPE_H -#define PG_TYPE_H - -#define BOOLOID 16 -#define BYTEAOID 17 -#define CHAROID 18 -#define NAMEOID 19 -#define INT8OID 20 -#define INT2OID 21 -#define INT2VECTOROID 22 -#define INT4OID 23 -#define REGPROCOID 24 -#define TEXTOID 25 -#define OIDOID 26 -#define TIDOID 27 -#define XIDOID 28 -#define CIDOID 29 -#define OIDVECTOROID 30 -#define POINTOID 600 -#define LSEGOID 601 -#define PATHOID 602 -#define BOXOID 603 -#define POLYGONOID 604 -#define LINEOID 628 -#define FLOAT4OID 700 -#define FLOAT8OID 701 -#define ABSTIMEOID 702 -#define RELTIMEOID 703 -#define TINTERVALOID 704 -#define UNKNOWNOID 705 -#define CIRCLEOID 718 -#define CASHOID 790 -#define INETOID 869 -#define CIDROID 650 -#define BPCHAROID 1042 -#define VARCHAROID 1043 -#define DATEOID 1082 -#define TIMEOID 1083 -#define TIMESTAMPOID 1114 -#define TIMESTAMPTZOID 1184 -#define INTERVALOID 1186 -#define TIMETZOID 1266 -#define ZPBITOID 1560 -#define VARBITOID 1562 -#define NUMERICOID 1700 -#define REFCURSOROID 1790 -#define REGPROCEDUREOID 2202 -#define REGOPEROID 2203 -#define REGOPERATOROID 2204 -#define REGCLASSOID 2205 -#define REGTYPEOID 2206 -#define REGROLEOID 4096 -#define REGNAMESPACEOID 4089 -#define REGTYPEARRAYOID 2211 -#define UUIDOID 2950 -#define LSNOID 3220 -#define TSVECTOROID 3614 -#define GTSVECTOROID 3642 -#define TSQUERYOID 3615 -#define REGCONFIGOID 3734 -#define REGDICTIONARYOID 3769 -#define JSONBOID 3802 -#define INT4RANGEOID 3904 - -#endif /* PG_TYPE_H */ diff --git a/src/interfaces/ecpg/ecpglib/po/it.po b/src/interfaces/ecpg/ecpglib/po/it.po index 20f4494c8d..24d7435f2f 100644 --- a/src/interfaces/ecpg/ecpglib/po/it.po +++ b/src/interfaces/ecpg/ecpglib/po/it.po @@ -1,27 +1,26 @@ # -# Translation of ecpglib to Italian -# PostgreSQL Project +# ecpglib.po +# Italian message translation file for ecpglib # -# Associazione Culturale ITPUG - Italian PostgreSQL Users Group -# http://www.itpug.org/ - info@itpug.org +# For development and bug report please use: +# https://github.com/dvarrazzo/postgresql-it # -# Traduttori: -# * Maurizio Totti +# Copyright (C) 2012-2017 PostgreSQL Global Development Group +# Copyright (C) 2010, Associazione Culturale ITPUG # -# Revisori: -# * Gabriele Bartolini +# Daniele Varrazzo , 2012-2017 +# Maurizio Totti , 2010 # -# Copyright (c) 2010, Associazione Culturale ITPUG -# Distributed under the same license of the PostgreSQL project +# This file is distributed under the same license as the PostgreSQL package. # msgid "" msgstr "" -"Project-Id-Version: ecpglib (PostgreSQL) 9.3\n" +"Project-Id-Version: ecpglib (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2013-03-15 22:10+0000\n" +"POT-Creation-Date: 2016-04-17 00:07+0000\n" "PO-Revision-Date: 2012-10-30 13:08+0100\n" "Last-Translator: Daniele Varrazzo \n" -"Language-Team: Gruppo traduzioni ITPUG \n" +"Language-Team: https://github.com/dvarrazzo/postgresql-it\n" "Language: it\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" @@ -30,163 +29,189 @@ msgstr "" "Plural-Forms: nplurals=2; plural=n != 1;\n" "X-Generator: Poedit 1.5.4\n" -#: connect.c:231 +#: connect.c:237 msgid "empty message text" msgstr "messaggio di testo vuoto" -#: connect.c:384 connect.c:413 connect.c:618 +#: connect.c:401 connect.c:430 connect.c:638 msgid "" msgstr "" -#: descriptor.c:807 misc.c:113 +#: descriptor.c:833 misc.c:120 msgid "NULL" msgstr "NULL" -#: error.c:29 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:33 #, c-format msgid "no data found on line %d" msgstr "non ci sono dati alla riga %d" # Utilizzerei 'memoria esaurita' al posto di 'errore di memoria' (GB) -#: error.c:39 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:40 #, c-format msgid "out of memory on line %d" msgstr "memoria esaurita alla riga %d" -#: error.c:49 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:47 #, c-format msgid "unsupported type \"%s\" on line %d" msgstr "tipo \"%s\" non supportato alla riga %d" -#: error.c:59 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:54 #, c-format msgid "too many arguments on line %d" msgstr "troppi argomenti alla riga %d" -#: error.c:69 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:61 #, c-format msgid "too few arguments on line %d" msgstr "numero di argomenti non sufficiente alla riga %d" -#: error.c:79 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:68 #, c-format msgid "invalid input syntax for type int: \"%s\", on line %d" msgstr "sintassi in input non valida per il tipo int: \"%s\", alla riga %d" -#: error.c:89 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:75 #, c-format msgid "invalid input syntax for type unsigned int: \"%s\", on line %d" msgstr "sintassi in input non valida per il tipo unsigned int: \"%s\", alla riga %d" -#: error.c:99 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:82 #, c-format msgid "invalid input syntax for floating-point type: \"%s\", on line %d" msgstr "sintassi in input non valida per il tipo floating-point: \"%s\", alla riga %d" -#: error.c:110 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:90 #, c-format msgid "invalid syntax for type boolean: \"%s\", on line %d" msgstr "sintassi in input non valida per il tipo boolean: \"%s\", alla riga %d" # Originariamente da MT: non si può convertire il valore booleano: la dimensione è sbagliata (disallineata), alla riga %d -#: error.c:118 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:95 #, c-format msgid "could not convert boolean value: size mismatch, on line %d" msgstr "conversione fallita per il valore booleano: dimensione incompatibile, alla riga %d" -#: error.c:128 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:102 #, c-format msgid "empty query on line %d" msgstr "query vuota alla riga %d" -#: error.c:138 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:109 #, c-format msgid "null value without indicator on line %d" msgstr "valore nullo senza variabile 'indicatore' alla riga %d" # è difficile da tradurre diversamente (GB) -#: error.c:148 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:116 #, c-format msgid "variable does not have an array type on line %d" msgstr "la variabile non è di tipo array alla riga %d" -#: error.c:158 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:123 #, c-format msgid "data read from server is not an array on line %d" msgstr "i dati letti dal server non sono di tipo array alla riga %d" -#: error.c:168 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:130 #, c-format msgid "inserting an array of variables is not supported on line %d" msgstr "inserire un array di variabili non è supportato alla riga %d" -#: error.c:178 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:137 #, c-format msgid "connection \"%s\" does not exist on line %d" msgstr "la connessione \"%s\" non esiste alla riga %d" # Inizialmente (MT): non si è connessi alla connessione \"%s\" alla riga %d -#: error.c:188 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:144 #, c-format msgid "not connected to connection \"%s\" on line %d" msgstr "connessione \"%s\" non attiva alla riga %d" -#: error.c:198 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:151 #, c-format msgid "invalid statement name \"%s\" on line %d" msgstr "nome di istruzione non valido \"%s\" alla riga %d" -#: error.c:208 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:158 #, c-format msgid "descriptor \"%s\" not found on line %d" msgstr "il descrittore \"%s\" non esiste alla riga %d" # userei intervallo al posto di range (GB) -#: error.c:218 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:165 #, c-format msgid "descriptor index out of range on line %d" msgstr "l'indice del descrittore è fuori intervallo alla riga %d" -#: error.c:228 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:172 #, c-format msgid "unrecognized descriptor item \"%s\" on line %d" msgstr "elemento del descrittore \"%s\" sconosciuto alla riga %d" -#: error.c:238 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:179 #, c-format msgid "variable does not have a numeric type on line %d" msgstr "la variabile non è di tipo numerico alla riga %d" -#: error.c:248 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:186 #, c-format msgid "variable does not have a character type on line %d" msgstr "la variabile non è di tipo carattere alla riga %d" -#: error.c:258 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:193 #, c-format msgid "error in transaction processing on line %d" msgstr "errore nel processare la transazione alla riga %d" # Inizialmente (MT): non posso connettermi al database \"%s\" alla riga %d -#: error.c:268 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:200 #, c-format msgid "could not connect to database \"%s\" on line %d" msgstr "connessione fallita al database \"%s\" alla riga %d" -#: error.c:278 +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:207 #, c-format msgid "SQL error %d on line %d" msgstr "errore SQL %d alla riga %d" -#: error.c:318 +#: error.c:254 msgid "the connection to the server was lost" msgstr "la connessione con il server è andata persa" -#: error.c:405 +#: error.c:347 #, c-format msgid "SQL error: %s\n" msgstr "errore SQL: %s\n" -#: execute.c:1921 +#: execute.c:1962 msgid "" msgstr "" diff --git a/src/interfaces/ecpg/ecpglib/po/ko.po b/src/interfaces/ecpg/ecpglib/po/ko.po index 69bffb8e17..a059c47757 100644 --- a/src/interfaces/ecpg/ecpglib/po/ko.po +++ b/src/interfaces/ecpg/ecpglib/po/ko.po @@ -5,10 +5,10 @@ # msgid "" msgstr "" -"Project-Id-Version: ecpglib (PostgreSQL) 9.6\n" +"Project-Id-Version: ecpglib (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" "POT-Creation-Date: 2016-09-26 14:02+0900\n" -"PO-Revision-Date: 2016-09-26 15:37+0900\n" +"PO-Revision-Date: 2017-08-17 13:20+0900\n" "Last-Translator: Ioseph Kim \n" "Language-Team: Korean \n" "Language: ko\n" diff --git a/src/interfaces/ecpg/ecpglib/po/ru.po b/src/interfaces/ecpg/ecpglib/po/ru.po index 1fc65d93ad..c5fa23d5fb 100644 --- a/src/interfaces/ecpg/ecpglib/po/ru.po +++ b/src/interfaces/ecpg/ecpglib/po/ru.po @@ -2,12 +2,11 @@ # Copyright (C) 2012-2016 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. # Alexander Lakhin , 2012-2017. -# msgid "" msgstr "" "Project-Id-Version: ecpglib (PostgreSQL current)\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-03-27 12:38+0000\n" +"POT-Creation-Date: 2017-10-30 19:30+0300\n" "PO-Revision-Date: 2016-09-20 12:00+0300\n" "Last-Translator: Alexander Lakhin \n" "Language-Team: Russian \n" @@ -197,6 +196,6 @@ msgstr "подключение к серверу потеряно" msgid "SQL error: %s\n" msgstr "ошибка SQL: %s\n" -#: execute.c:1961 +#: execute.c:1968 msgid "" msgstr "<>" diff --git a/src/interfaces/ecpg/ecpglib/po/sv.po b/src/interfaces/ecpg/ecpglib/po/sv.po new file mode 100644 index 0000000000..72754fb0f4 --- /dev/null +++ b/src/interfaces/ecpg/ecpglib/po/sv.po @@ -0,0 +1,198 @@ +# SWEDISH message translation file for ecpglib +# Copyright (C) 2017 PostgreSQL Global Development Group +# This file is distributed under the same license as the PostgreSQL package. +# Dennis Björklund , 2017. +# +msgid "" +msgstr "" +"Project-Id-Version: ecpglib (PostgreSQL) 10\n" +"Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" +"POT-Creation-Date: 2017-09-08 23:08+0000\n" +"PO-Revision-Date: 2017-09-10 11:27+0200\n" +"Last-Translator: Dennis Björklund , 2017\n" +"Language-Team: Swedish \n" +"Language: sv\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: connect.c:237 +msgid "empty message text" +msgstr "tom meddelandetext" + +#: connect.c:401 connect.c:430 connect.c:638 +msgid "" +msgstr "" + +#: descriptor.c:833 misc.c:120 +msgid "NULL" +msgstr "NULL" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:33 +#, c-format +msgid "no data found on line %d" +msgstr "ingen data hittad på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:40 +#, c-format +msgid "out of memory on line %d" +msgstr "slut på minne på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:47 +#, c-format +msgid "unsupported type \"%s\" on line %d" +msgstr "ej stöd för typ \"%s\" på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:54 +#, c-format +msgid "too many arguments on line %d" +msgstr "för många argument på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:61 +#, c-format +msgid "too few arguments on line %d" +msgstr "för få argument på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:68 +#, c-format +msgid "invalid input syntax for type int: \"%s\", on line %d" +msgstr "ogiltig inputsyntax för typ int: \"%s\", på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:75 +#, c-format +msgid "invalid input syntax for type unsigned int: \"%s\", on line %d" +msgstr "ogiltig inputsyntax för typ unsigned int: \"%s\", på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:82 +#, c-format +msgid "invalid input syntax for floating-point type: \"%s\", on line %d" +msgstr "ogiltig inputsyntaxc för flyttalstyp: \"%s\", på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:90 +#, c-format +msgid "invalid syntax for type boolean: \"%s\", on line %d" +msgstr "ogiltig syntax för typ boolean: \"%s\", på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:95 +#, c-format +msgid "could not convert boolean value: size mismatch, on line %d" +msgstr "kunde inte konvertera booleanskt värde: storlekarna matchar inte, på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:102 +#, c-format +msgid "empty query on line %d" +msgstr "tom fråga på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:109 +#, c-format +msgid "null value without indicator on line %d" +msgstr "null-värde utan indikator på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:116 +#, c-format +msgid "variable does not have an array type on line %d" +msgstr "variabel har inte array-typ på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:123 +#, c-format +msgid "data read from server is not an array on line %d" +msgstr "data inläst från servern är inte en array på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:130 +#, c-format +msgid "inserting an array of variables is not supported on line %d" +msgstr "sätta in en array med variabler stöds inte på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:137 +#, c-format +msgid "connection \"%s\" does not exist on line %d" +msgstr "anslutning \"%s\" funns inte på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:144 +#, c-format +msgid "not connected to connection \"%s\" on line %d" +msgstr "ej ansluten till anslutning \"%s\" på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:151 +#, c-format +msgid "invalid statement name \"%s\" on line %d" +msgstr "ogiltigt satsnamn \"%s\" på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:158 +#, c-format +msgid "descriptor \"%s\" not found on line %d" +msgstr "deskriptor \"%s\" hittades inte på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:165 +#, c-format +msgid "descriptor index out of range on line %d" +msgstr "deskriptor-index utanför sitt intervall på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:172 +#, c-format +msgid "unrecognized descriptor item \"%s\" on line %d" +msgstr "okänd deskriptor-post \"%s\" på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:179 +#, c-format +msgid "variable does not have a numeric type on line %d" +msgstr "variabel har ej numerisk typ på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:186 +#, c-format +msgid "variable does not have a character type on line %d" +msgstr "variabel har ej character-typ på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:193 +#, c-format +msgid "error in transaction processing on line %d" +msgstr "fel i transaktionsprocessande på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:200 +#, c-format +msgid "could not connect to database \"%s\" on line %d" +msgstr "kunde inte ansluta till databas \"%s\" på rad %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:207 +#, c-format +msgid "SQL error %d on line %d" +msgstr "SQL-fel %d på rad %d" + +#: error.c:254 +msgid "the connection to the server was lost" +msgstr "anslutningen till servern tappades" + +#: error.c:347 +#, c-format +msgid "SQL error: %s\n" +msgstr "SQL-fel: %s\n" + +#: execute.c:1961 +msgid "" +msgstr "" diff --git a/src/interfaces/ecpg/ecpglib/po/vi.po b/src/interfaces/ecpg/ecpglib/po/vi.po new file mode 100644 index 0000000000..850b54f26c --- /dev/null +++ b/src/interfaces/ecpg/ecpglib/po/vi.po @@ -0,0 +1,200 @@ +# LANGUAGE message translation file for ecpglib +# Copyright (C) 2018 PostgreSQL Global Development Group +# This file is distributed under the same license as the ecpglib (PostgreSQL) package. +# FIRST AUTHOR , 2018. +# +msgid "" +msgstr "" +"Project-Id-Version: ecpglib (PostgreSQL) 11\n" +"Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" +"POT-Creation-Date: 2018-04-22 12:08+0000\n" +"PO-Revision-Date: 2018-04-23 21:34+0900\n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 2.0.6\n" +"Last-Translator: Dang Minh Huong \n" +"Plural-Forms: nplurals=1; plural=0;\n" +"Language: vi_VN\n" + +#: connect.c:237 +msgid "empty message text" +msgstr "văn bản tin nhắn trống" + +#: connect.c:401 connect.c:430 connect.c:638 +msgid "" +msgstr "" + +#: descriptor.c:834 misc.c:120 +msgid "NULL" +msgstr "NULL" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:33 +#, c-format +msgid "no data found on line %d" +msgstr "không tìm thấy dữ liệu trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:40 +#, c-format +msgid "out of memory on line %d" +msgstr "hết bộ nhớ trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:47 +#, c-format +msgid "unsupported type \"%s\" on line %d" +msgstr "không hỗ trợ kiểu \"%s\" trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:54 +#, c-format +msgid "too many arguments on line %d" +msgstr "quá nhiều đối số trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:61 +#, c-format +msgid "too few arguments on line %d" +msgstr "quá ít đối số trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:68 +#, c-format +msgid "invalid input syntax for type int: \"%s\", on line %d" +msgstr "cú pháp nhập không hợp lệ cho kiểu int: \"%s\", trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:75 +#, c-format +msgid "invalid input syntax for type unsigned int: \"%s\", on line %d" +msgstr "cú pháp nhập không hợp lệ cho kiểu unsigned int: \"%s\", trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:82 +#, c-format +msgid "invalid input syntax for floating-point type: \"%s\", on line %d" +msgstr "cú pháp nhập không hợp lệ cho kiểu dấu phẩy động: \"%s\", trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:90 +#, c-format +msgid "invalid syntax for type boolean: \"%s\", on line %d" +msgstr "cú pháp không hợp lệ cho kiểu boolean: \"%s\", trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:95 +#, c-format +msgid "could not convert boolean value: size mismatch, on line %d" +msgstr "không thể chuyển đổi giá trị boolean: kích thước không khớp, trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:102 +#, c-format +msgid "empty query on line %d" +msgstr "truy vấn trống trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:109 +#, c-format +msgid "null value without indicator on line %d" +msgstr "giá trị null không có chỉ báo (indicator) trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:116 +#, c-format +msgid "variable does not have an array type on line %d" +msgstr "biến không có kiểu mảng trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:123 +#, c-format +msgid "data read from server is not an array on line %d" +msgstr "dữ liệu đọc từ server không phải là một mảng trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:130 +#, c-format +msgid "inserting an array of variables is not supported on line %d" +msgstr "chèn một mảng các biến không được hỗ trợ trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:137 +#, c-format +msgid "connection \"%s\" does not exist on line %d" +msgstr "kết nối \"%s\" không tồn tại trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:144 +#, c-format +msgid "not connected to connection \"%s\" on line %d" +msgstr "chưa kết nối tới kết nối \"%s\" trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:151 +#, c-format +msgid "invalid statement name \"%s\" on line %d" +msgstr "tên statement không hợp lệ \"%s\" trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:158 +#, c-format +msgid "descriptor \"%s\" not found on line %d" +msgstr "không tìm thấy descriptor \"%s\" trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:165 +#, c-format +msgid "descriptor index out of range on line %d" +msgstr "chỉ mục của descriptor nằm ngoài phạm vi trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:172 +#, c-format +msgid "unrecognized descriptor item \"%s\" on line %d" +msgstr "không nhận ra descriptor item \"%s\" trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:179 +#, c-format +msgid "variable does not have a numeric type on line %d" +msgstr "biến số không có kiểu numeric trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:186 +#, c-format +msgid "variable does not have a character type on line %d" +msgstr "biến số không có kiểu ký tự trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:193 +#, c-format +msgid "error in transaction processing on line %d" +msgstr "lỗi trong xử lý giao dịch trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:200 +#, c-format +msgid "could not connect to database \"%s\" on line %d" +msgstr "không thể kết nối với cơ sở dữ liệu \"%s\" trên dòng %d" + +#. translator: this string will be truncated at 149 characters expanded. +#: error.c:207 +#, c-format +msgid "SQL error %d on line %d" +msgstr "lỗi SQL %d trên dòng %d" + +#: error.c:254 +msgid "the connection to the server was lost" +msgstr "kết nối với server bị mất" + +#: error.c:347 +#, c-format +msgid "SQL error: %s\n" +msgstr "lỗi SQL: %s\n" + +#: execute.c:1968 +msgid "" +msgstr "" diff --git a/src/interfaces/ecpg/ecpglib/prepare.c b/src/interfaces/ecpg/ecpglib/prepare.c index 151aa80dc6..5368886f67 100644 --- a/src/interfaces/ecpg/ecpglib/prepare.c +++ b/src/interfaces/ecpg/ecpglib/prepare.c @@ -13,19 +13,29 @@ #define STMTID_SIZE 32 +/* + * The statement cache contains stmtCacheNBuckets hash buckets, each + * having stmtCacheEntPerBucket entries, which we recycle as needed, + * giving up the least-executed entry in the bucket. + * stmtCacheEntries[0] is never used, so that zero can be a "not found" + * indicator. + */ +#define stmtCacheNBuckets 2039 /* should be a prime number */ +#define stmtCacheEntPerBucket 8 + +#define stmtCacheArraySize (stmtCacheNBuckets * stmtCacheEntPerBucket + 1) + typedef struct { int lineno; char stmtID[STMTID_SIZE]; char *ecpgQuery; - long execs; /* # of executions */ - const char *connection; /* connection for the statement */ + long execs; /* # of executions */ + const char *connection; /* connection for the statement */ } stmtCacheEntry; static int nextStmtID = 1; -static const int stmtCacheNBuckets = 2039; /* # buckets - a prime # */ -static const int stmtCacheEntPerBucket = 8; /* # entries/bucket */ -static stmtCacheEntry stmtCacheEntries[16384] = {{0, {0}, 0, 0, 0}}; +static stmtCacheEntry *stmtCacheEntries = NULL; static bool deallocate_one(int lineno, enum COMPAT_MODE c, struct connection *con, struct prepared_statement *prev, struct prepared_statement *this); @@ -42,7 +52,7 @@ isvarchar(unsigned char c) if (c >= 128) return true; - return (false); + return false; } static bool @@ -64,9 +74,9 @@ replace_variables(char **text, int lineno) ptr += 2; /* skip '::' */ else { + /* a rough guess of the size we need: */ + int buffersize = sizeof(int) * CHAR_BIT * 10 / 3; int len; - int buffersize = sizeof(int) * CHAR_BIT * 10 / 3; /* a rough guess of the - * size we need */ char *buffer, *newcopy; @@ -75,7 +85,8 @@ replace_variables(char **text, int lineno) snprintf(buffer, buffersize, "$%d", counter++); - for (len = 1; (*text)[ptr + len] && isvarchar((*text)[ptr + len]); len++); + for (len = 1; (*text)[ptr + len] && isvarchar((*text)[ptr + len]); len++) + /* skip */ ; if (!(newcopy = (char *) ecpg_alloc(strlen(*text) -len + strlen(buffer) + 1, lineno))) { ecpg_free(buffer); @@ -158,13 +169,15 @@ prepare_common(int lineno, struct connection *con, const char *name, const char /* handle the EXEC SQL PREPARE statement */ /* questionmarks is not needed but remains in there for the time being to not change the API */ bool -ECPGprepare(int lineno, const char *connection_name, const bool questionmarks, const char *name, const char *variable) +ECPGprepare(int lineno, const char *connection_name, const bool questionmarks, + const char *name, const char *variable) { struct connection *con; struct prepared_statement *this, *prev; (void) questionmarks; /* quiet the compiler */ + con = ecpg_get_connection(connection_name); if (!ecpg_init(con, connection_name, lineno)) @@ -185,7 +198,9 @@ ecpg_find_prepared_statement(const char *name, struct prepared_statement *this, *prev; - for (this = con->prep_stmts, prev = NULL; this != NULL; prev = this, this = this->next) + for (this = con->prep_stmts, prev = NULL; + this != NULL; + prev = this, this = this->next) { if (strcmp(this->name, name) == 0) { @@ -198,7 +213,8 @@ ecpg_find_prepared_statement(const char *name, } static bool -deallocate_one(int lineno, enum COMPAT_MODE c, struct connection *con, struct prepared_statement *prev, struct prepared_statement *this) +deallocate_one(int lineno, enum COMPAT_MODE c, struct connection *con, + struct prepared_statement *prev, struct prepared_statement *this) { bool r = false; @@ -217,7 +233,9 @@ deallocate_one(int lineno, enum COMPAT_MODE c, struct connection *con, struct pr sprintf(text, "deallocate \"%s\"", this->name); query = PQexec(this->stmt->connection->connection, text); ecpg_free(text); - if (ecpg_check_PQresult(query, lineno, this->stmt->connection->connection, this->stmt->compat)) + if (ecpg_check_PQresult(query, lineno, + this->stmt->connection->connection, + this->stmt->compat)) { PQclear(query); r = true; @@ -288,7 +306,8 @@ ecpg_deallocate_all_conn(int lineno, enum COMPAT_MODE c, struct connection *con) bool ECPGdeallocate_all(int lineno, int compat, const char *connection_name) { - return ecpg_deallocate_all_conn(lineno, compat, ecpg_get_connection(connection_name)); + return ecpg_deallocate_all_conn(lineno, compat, + ecpg_get_connection(connection_name)); } char * @@ -319,27 +338,28 @@ HashStmt(const char *ecpgQuery) bucketNo, hashLeng, stmtLeng; - long long hashVal, + uint64 hashVal, rotVal; stmtLeng = strlen(ecpgQuery); - hashLeng = 50; /* use 1st 50 characters of statement */ - if (hashLeng > stmtLeng) /* if the statement isn't that long */ - hashLeng = stmtLeng; /* use its actual length */ + hashLeng = 50; /* use 1st 50 characters of statement */ + if (hashLeng > stmtLeng) /* if the statement isn't that long */ + hashLeng = stmtLeng; /* use its actual length */ hashVal = 0; for (stmtIx = 0; stmtIx < hashLeng; ++stmtIx) { - hashVal = hashVal + (int) ecpgQuery[stmtIx]; + hashVal = hashVal + (unsigned char) ecpgQuery[stmtIx]; + /* rotate 32-bit hash value left 13 bits */ hashVal = hashVal << 13; - rotVal = (hashVal & 0x1fff00000000LL) >> 32; - hashVal = (hashVal & 0xffffffffLL) | rotVal; + rotVal = (hashVal & UINT64CONST(0x1fff00000000)) >> 32; + hashVal = (hashVal & UINT64CONST(0xffffffff)) | rotVal; } bucketNo = hashVal % stmtCacheNBuckets; - bucketNo += 1; /* don't use bucket # 0 */ - return (bucketNo * stmtCacheEntPerBucket); + /* Add 1 so that array entry 0 is never used */ + return bucketNo * stmtCacheEntPerBucket + 1; } /* @@ -353,25 +373,29 @@ SearchStmtCache(const char *ecpgQuery) int entNo, entIx; -/* hash the statement */ + /* quick failure if cache not set up */ + if (stmtCacheEntries == NULL) + return 0; + + /* hash the statement */ entNo = HashStmt(ecpgQuery); -/* search the cache */ + /* search the cache */ for (entIx = 0; entIx < stmtCacheEntPerBucket; ++entIx) { - if (stmtCacheEntries[entNo].stmtID[0]) /* check if entry is in use */ + if (stmtCacheEntries[entNo].stmtID[0]) /* check if entry is in use */ { if (strcmp(ecpgQuery, stmtCacheEntries[entNo].ecpgQuery) == 0) - break; /* found it */ + break; /* found it */ } - ++entNo; /* incr entry # */ + ++entNo; /* incr entry # */ } -/* if entry wasn't found - set entry # to zero */ + /* if entry wasn't found - set entry # to zero */ if (entIx >= stmtCacheEntPerBucket) entNo = 0; - return (entNo); + return entNo; } /* @@ -380,34 +404,39 @@ SearchStmtCache(const char *ecpgQuery) * OR negative error code */ static int -ecpg_freeStmtCacheEntry(int lineno, int compat, int entNo) /* entry # to free */ +ecpg_freeStmtCacheEntry(int lineno, int compat, + int entNo) /* entry # to free */ { stmtCacheEntry *entry; struct connection *con; struct prepared_statement *this, *prev; + /* fail if cache isn't set up */ + if (stmtCacheEntries == NULL) + return -1; + entry = &stmtCacheEntries[entNo]; - if (!entry->stmtID[0]) /* return if the entry isn't in use */ - return (0); + if (!entry->stmtID[0]) /* return if the entry isn't in use */ + return 0; con = ecpg_get_connection(entry->connection); - /* free the 'prepared_statement' list entry */ + /* free the 'prepared_statement' list entry */ this = ecpg_find_prepared_statement(entry->stmtID, con, &prev); if (this && !deallocate_one(lineno, compat, con, prev, this)) - return (-1); + return -1; entry->stmtID[0] = '\0'; - /* free the memory used by the cache entry */ + /* free the memory used by the cache entry */ if (entry->ecpgQuery) { ecpg_free(entry->ecpgQuery); entry->ecpgQuery = 0; } - return (entNo); + return entNo; } /* @@ -415,11 +444,11 @@ ecpg_freeStmtCacheEntry(int lineno, int compat, int entNo) /* entry # to free */ * returns entry # in cache used OR negative error code */ static int -AddStmtToCache(int lineno, /* line # of statement */ - const char *stmtID, /* statement ID */ - const char *connection, /* connection */ +AddStmtToCache(int lineno, /* line # of statement */ + const char *stmtID, /* statement ID */ + const char *connection, /* connection */ int compat, /* compatibility level */ - const char *ecpgQuery) /* query */ + const char *ecpgQuery) /* query */ { int ix, initEntNo, @@ -427,32 +456,44 @@ AddStmtToCache(int lineno, /* line # of statement */ entNo; stmtCacheEntry *entry; -/* hash the statement */ + /* allocate and zero cache array if we haven't already */ + if (stmtCacheEntries == NULL) + { + stmtCacheEntries = (stmtCacheEntry *) + ecpg_alloc(sizeof(stmtCacheEntry) * stmtCacheArraySize, lineno); + if (stmtCacheEntries == NULL) + return -1; + } + + /* hash the statement */ initEntNo = HashStmt(ecpgQuery); -/* search for an unused entry */ + /* search for an unused entry */ entNo = initEntNo; /* start with the initial entry # for the - * bucket */ - luEntNo = initEntNo; /* use it as the initial 'least used' entry */ + * bucket */ + luEntNo = initEntNo; /* use it as the initial 'least used' entry */ for (ix = 0; ix < stmtCacheEntPerBucket; ++ix) { entry = &stmtCacheEntries[entNo]; - if (!entry->stmtID[0]) /* unused entry - use it */ + if (!entry->stmtID[0]) /* unused entry - use it */ break; if (entry->execs < stmtCacheEntries[luEntNo].execs) - luEntNo = entNo; /* save new 'least used' entry */ - ++entNo; /* increment entry # */ + luEntNo = entNo; /* save new 'least used' entry */ + ++entNo; /* increment entry # */ } -/* if no unused entries were found - use the 'least used' entry found in the bucket */ - if (ix >= stmtCacheEntPerBucket) /* if no unused entries were found */ - entNo = luEntNo; /* re-use the 'least used' entry */ + /* + * if no unused entries were found, re-use the 'least used' entry found in + * the bucket + */ + if (ix >= stmtCacheEntPerBucket) + entNo = luEntNo; -/* 'entNo' is the entry to use - make sure its free */ + /* 'entNo' is the entry to use - make sure its free */ if (ecpg_freeStmtCacheEntry(lineno, compat, entNo) < 0) - return (-1); + return -1; -/* add the query to the entry */ + /* add the query to the entry */ entry = &stmtCacheEntries[entNo]; entry->lineno = lineno; entry->ecpgQuery = ecpg_strdup(ecpgQuery, lineno); @@ -460,7 +501,7 @@ AddStmtToCache(int lineno, /* line # of statement */ entry->execs = 0; memcpy(entry->stmtID, stmtID, sizeof(entry->stmtID)); - return (entNo); + return entNo; } /* handle cache and preparation of statements in auto-prepare mode */ @@ -469,10 +510,10 @@ ecpg_auto_prepare(int lineno, const char *connection_name, const int compat, cha { int entNo; - /* search the statement cache for this statement */ + /* search the statement cache for this statement */ entNo = SearchStmtCache(query); - /* if not found - add the statement to the cache */ + /* if not found - add the statement to the cache */ if (entNo) { char *stmtID; @@ -487,7 +528,7 @@ ecpg_auto_prepare(int lineno, const char *connection_name, const int compat, cha prep = ecpg_find_prepared_statement(stmtID, con, NULL); /* This prepared name doesn't exist on this connection. */ if (!prep && !prepare_common(lineno, con, stmtID, query)) - return (false); + return false; *name = ecpg_strdup(stmtID, lineno); } @@ -501,9 +542,11 @@ ecpg_auto_prepare(int lineno, const char *connection_name, const int compat, cha sprintf(stmtID, "ecpg%d", nextStmtID++); if (!ECPGprepare(lineno, connection_name, 0, stmtID, query)) - return (false); - if (AddStmtToCache(lineno, stmtID, connection_name, compat, query) < 0) - return (false); + return false; + + entNo = AddStmtToCache(lineno, stmtID, connection_name, compat, query); + if (entNo < 0) + return false; *name = ecpg_strdup(stmtID, lineno); } @@ -511,5 +554,5 @@ ecpg_auto_prepare(int lineno, const char *connection_name, const int compat, cha /* increase usage counter */ stmtCacheEntries[entNo].execs++; - return (true); + return true; } diff --git a/src/interfaces/ecpg/ecpglib/sqlda.c b/src/interfaces/ecpg/ecpglib/sqlda.c index c1ba989166..317d22fa4e 100644 --- a/src/interfaces/ecpg/ecpglib/sqlda.c +++ b/src/interfaces/ecpg/ecpglib/sqlda.c @@ -8,7 +8,8 @@ #define POSTGRES_ECPG_INTERNAL #include "postgres_fe.h" -#include "pg_type.h" + +#include "catalog/pg_type_d.h" #include "ecpg-pthread-win32.h" #include "decimal.h" diff --git a/src/interfaces/ecpg/ecpglib/typename.c b/src/interfaces/ecpg/ecpglib/typename.c index 48587e49c7..9da1cdf4c2 100644 --- a/src/interfaces/ecpg/ecpglib/typename.c +++ b/src/interfaces/ecpg/ecpglib/typename.c @@ -3,12 +3,13 @@ #define POSTGRES_ECPG_INTERNAL #include "postgres_fe.h" +#include "catalog/pg_type_d.h" + #include "ecpgtype.h" #include "ecpglib.h" #include "extern.h" #include "sqltypes.h" #include "sql3types.h" -#include "pg_type.h" /* * This function is used to generate the correct type names. diff --git a/src/interfaces/ecpg/include/Makefile b/src/interfaces/ecpg/include/Makefile index e92e56f26f..9c68bf3c47 100644 --- a/src/interfaces/ecpg/include/Makefile +++ b/src/interfaces/ecpg/include/Makefile @@ -14,7 +14,7 @@ install: all installdirs install-headers .PHONY: install-headers ecpg_headers = ecpgerrno.h ecpglib.h ecpgtype.h sqlca.h sql3types.h ecpg_informix.h \ - pgtypes_error.h pgtypes_numeric.h pgtypes_timestamp.h pgtypes_date.h pgtypes_interval.h \ + pgtypes_error.h pgtypes_numeric.h pgtypes_timestamp.h pgtypes_date.h pgtypes_interval.h pgtypes.h \ sqlda.h sqlda-compat.h sqlda-native.h informix_headers = datetime.h decimal.h sqltypes.h diff --git a/src/interfaces/ecpg/include/ecpg_informix.h b/src/interfaces/ecpg/include/ecpg_informix.h index dd6258152a..a5260a5542 100644 --- a/src/interfaces/ecpg/include/ecpg_informix.h +++ b/src/interfaces/ecpg/include/ecpg_informix.h @@ -36,15 +36,15 @@ extern "C" extern int rdatestr(date, char *); extern void rtoday(date *); extern int rjulmdy(date, short *); -extern int rdefmtdate(date *, char *, char *); -extern int rfmtdate(date, char *, char *); +extern int rdefmtdate(date *, const char *, const char *); +extern int rfmtdate(date, const char *, char *); extern int rmdyjul(short *, date *); -extern int rstrdate(char *, date *); +extern int rstrdate(const char *, date *); extern int rdayofweek(date); -extern int rfmtlong(long, char *, char *); +extern int rfmtlong(long, const char *, char *); extern int rgetmsg(int, char *, int); -extern int risnull(int, char *); +extern int risnull(int, const char *); extern int rsetnull(int, char *); extern int rtypalign(int, int); extern int rtypmsize(int, int); @@ -62,7 +62,7 @@ extern void ECPG_informix_reset_sqlca(void); int decadd(decimal *, decimal *, decimal *); int deccmp(decimal *, decimal *); void deccopy(decimal *, decimal *); -int deccvasc(char *, int, decimal *); +int deccvasc(const char *, int, decimal *); int deccvdbl(double, decimal *); int deccvint(int, decimal *); int deccvlong(long, decimal *); diff --git a/src/interfaces/ecpg/include/ecpglib.h b/src/interfaces/ecpg/include/ecpglib.h index 536b7506ff..8a601996d2 100644 --- a/src/interfaces/ecpg/include/ecpglib.h +++ b/src/interfaces/ecpg/include/ecpglib.h @@ -80,7 +80,7 @@ bool ECPGset_desc_header(int, const char *, int); bool ECPGset_desc(int, const char *, int,...); void ECPGset_noind_null(enum ECPGttype, void *); -bool ECPGis_noind_null(enum ECPGttype, void *); +bool ECPGis_noind_null(enum ECPGttype, const void *); bool ECPGdescribe(int, int, bool, const char *, const char *,...); void ECPGset_var(int, void *, int); diff --git a/src/interfaces/ecpg/include/pgtypes.h b/src/interfaces/ecpg/include/pgtypes.h new file mode 100644 index 0000000000..dbf759b45f --- /dev/null +++ b/src/interfaces/ecpg/include/pgtypes.h @@ -0,0 +1,17 @@ +/* src/interfaces/ecpg/include/pgtypes.h */ + +#ifndef PGTYPES_H +#define PGTYPES_H + +#ifdef __cplusplus +extern "C" +{ +#endif + +extern void PGTYPESchar_free(char *ptr); + +#ifdef __cplusplus +} +#endif + +#endif /* PGTYPES_H */ diff --git a/src/interfaces/ecpg/include/pgtypes_date.h b/src/interfaces/ecpg/include/pgtypes_date.h index 3d1a181b2b..c668097466 100644 --- a/src/interfaces/ecpg/include/pgtypes_date.h +++ b/src/interfaces/ecpg/include/pgtypes_date.h @@ -3,6 +3,7 @@ #ifndef PGTYPES_DATETIME #define PGTYPES_DATETIME +#include #include typedef long date; @@ -21,7 +22,7 @@ extern void PGTYPESdate_julmdy(date, int *); extern void PGTYPESdate_mdyjul(int *, date *); extern int PGTYPESdate_dayofweek(date); extern void PGTYPESdate_today(date *); -extern int PGTYPESdate_defmt_asc(date *, const char *, char *); +extern int PGTYPESdate_defmt_asc(date *, const char *, const char *); extern int PGTYPESdate_fmt_asc(date, const char *, char *); #ifdef __cplusplus diff --git a/src/interfaces/ecpg/include/pgtypes_interval.h b/src/interfaces/ecpg/include/pgtypes_interval.h index 5747736fe1..3b17cd1d11 100644 --- a/src/interfaces/ecpg/include/pgtypes_interval.h +++ b/src/interfaces/ecpg/include/pgtypes_interval.h @@ -4,6 +4,7 @@ #define PGTYPES_INTERVAL #include +#include #ifndef C_H diff --git a/src/interfaces/ecpg/include/pgtypes_numeric.h b/src/interfaces/ecpg/include/pgtypes_numeric.h index 56c46ea272..5c763a9eb6 100644 --- a/src/interfaces/ecpg/include/pgtypes_numeric.h +++ b/src/interfaces/ecpg/include/pgtypes_numeric.h @@ -1,6 +1,8 @@ #ifndef PGTYPES_NUMERIC #define PGTYPES_NUMERIC +#include + #define NUMERIC_POS 0x0000 #define NUMERIC_NEG 0x4000 #define NUMERIC_NAN 0xC000 diff --git a/src/interfaces/ecpg/include/pgtypes_timestamp.h b/src/interfaces/ecpg/include/pgtypes_timestamp.h index 283ecca25e..3e29837891 100644 --- a/src/interfaces/ecpg/include/pgtypes_timestamp.h +++ b/src/interfaces/ecpg/include/pgtypes_timestamp.h @@ -3,6 +3,7 @@ #ifndef PGTYPES_TIMESTAMP #define PGTYPES_TIMESTAMP +#include /* pgtypes_interval.h includes ecpg_config.h */ #include @@ -19,7 +20,7 @@ extern char *PGTYPEStimestamp_to_asc(timestamp); extern int PGTYPEStimestamp_sub(timestamp *, timestamp *, interval *); extern int PGTYPEStimestamp_fmt_asc(timestamp *, char *, int, const char *); extern void PGTYPEStimestamp_current(timestamp *); -extern int PGTYPEStimestamp_defmt_asc(char *, const char *, timestamp *); +extern int PGTYPEStimestamp_defmt_asc(const char *, const char *, timestamp *); extern int PGTYPEStimestamp_add_interval(timestamp * tin, interval * span, timestamp * tout); extern int PGTYPEStimestamp_sub_interval(timestamp * tin, interval * span, timestamp * tout); diff --git a/src/interfaces/ecpg/include/sqlda-compat.h b/src/interfaces/ecpg/include/sqlda-compat.h index 7393182aa9..7b0ac45c42 100644 --- a/src/interfaces/ecpg/include/sqlda-compat.h +++ b/src/interfaces/ecpg/include/sqlda-compat.h @@ -1,5 +1,5 @@ /* - * pgsql/src/interfaces/ecpg/include/sqlda-infx-compat.h + * src/interfaces/ecpg/include/sqlda-compat.h */ #ifndef ECPG_SQLDA_COMPAT_H diff --git a/src/interfaces/ecpg/pgtypeslib/.gitignore b/src/interfaces/ecpg/pgtypeslib/.gitignore index fbcd68d7d3..91402ad88b 100644 --- a/src/interfaces/ecpg/pgtypeslib/.gitignore +++ b/src/interfaces/ecpg/pgtypeslib/.gitignore @@ -1,6 +1,3 @@ /pgtypeslib.def /blibpgtypesdll.def /exports.list -/pgstrcasecmp.c -/rint.c -/snprintf.c diff --git a/src/interfaces/ecpg/pgtypeslib/Makefile b/src/interfaces/ecpg/pgtypeslib/Makefile index 9fc75661b5..c1b41636ac 100644 --- a/src/interfaces/ecpg/pgtypeslib/Makefile +++ b/src/interfaces/ecpg/pgtypeslib/Makefile @@ -2,7 +2,7 @@ # # Makefile for ecpg pgtypes library # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/interfaces/ecpg/pgtypeslib/Makefile @@ -22,30 +22,19 @@ override CPPFLAGS := -I../include -I$(top_srcdir)/src/interfaces/ecpg/include \ -DFRONTEND $(CPPFLAGS) override CFLAGS += $(PTHREAD_CFLAGS) -# Need to recompile any libpgport object files -LIBS := $(filter-out -lpgport, $(LIBS)) - -SHLIB_LINK += -lm +SHLIB_LINK_INTERNAL = -lpgcommon_shlib -lpgport_shlib +SHLIB_LINK += $(filter -lintl -lm, $(LIBS)) SHLIB_EXPORTS = exports.txt OBJS= numeric.o datetime.o common.o dt_common.o timestamp.o interval.o \ - pgstrcasecmp.o \ - $(filter rint.o snprintf.o, $(LIBOBJS)) $(WIN32RES) + $(WIN32RES) all: all-lib # Shared library stuff include $(top_srcdir)/src/Makefile.shlib -# We use some port modules verbatim, but since we need to -# compile with appropriate options to build a shared lib, we can't -# necessarily use the same object files as the backend uses. Instead, -# symlink the source files in here and build our own object file. - -pgstrcasecmp.c rint.c snprintf.c: % : $(top_srcdir)/src/port/% - rm -f $@ && $(LN_S) $< . - install: all installdirs install-lib installdirs: installdirs-lib @@ -53,6 +42,6 @@ installdirs: installdirs-lib uninstall: uninstall-lib clean distclean: clean-lib - rm -f $(OBJS) pgstrcasecmp.c rint.c snprintf.c + rm -f $(OBJS) maintainer-clean: distclean maintainer-clean-lib diff --git a/src/interfaces/ecpg/pgtypeslib/common.c b/src/interfaces/ecpg/pgtypeslib/common.c index 9084fd06b4..a8a7e02be0 100644 --- a/src/interfaces/ecpg/pgtypeslib/common.c +++ b/src/interfaces/ecpg/pgtypeslib/common.c @@ -3,6 +3,7 @@ #include "postgres_fe.h" #include "extern.h" +#include "pgtypes.h" /* Return value is zero-filled. */ char * @@ -12,7 +13,7 @@ pgtypes_alloc(long size) if (!new) errno = ENOMEM; - return (new); + return new; } char * @@ -22,7 +23,7 @@ pgtypes_strdup(const char *str) if (!new) errno = ENOMEM; - return (new); + return new; } int @@ -42,10 +43,8 @@ pgtypes_fmt_replace(union un_fmt_comb replace_val, int replace_type, char **outp i = strlen(replace_val.str_val); if (i + 1 <= *pstr_len) { - /* - * copy over i + 1 bytes, that includes the tailing terminator - */ - strncpy(*output, replace_val.str_val, i + 1); + /* include trailing terminator in what we copy */ + memcpy(*output, replace_val.str_val, i + 1); *pstr_len -= i; *output += i; if (replace_type == PGTYPES_TYPE_STRING_MALLOCED) @@ -111,7 +110,7 @@ pgtypes_fmt_replace(union un_fmt_comb replace_val, int replace_type, char **outp break; } - if (i < 0) + if (i < 0 || i >= PGTYPES_FMT_NUM_MAX_DIGITS) { free(t); return -1; @@ -138,3 +137,12 @@ pgtypes_fmt_replace(union un_fmt_comb replace_val, int replace_type, char **outp } return 0; } + +/* Functions declared in pgtypes.h. */ + +/* Just frees memory (mostly needed for Windows) */ +void +PGTYPESchar_free(char *ptr) +{ + free(ptr); +} diff --git a/src/interfaces/ecpg/pgtypeslib/datetime.c b/src/interfaces/ecpg/pgtypeslib/datetime.c index 33c9011a71..ed321febf2 100644 --- a/src/interfaces/ecpg/pgtypeslib/datetime.c +++ b/src/interfaces/ecpg/pgtypeslib/datetime.c @@ -4,7 +4,6 @@ #include #include -#include #include #include "extern.h" @@ -59,7 +58,7 @@ PGTYPESdate_from_asc(char *str, char **endptr) char *realptr; char **ptr = (endptr != NULL) ? endptr : &realptr; - bool EuroDates = FALSE; + bool EuroDates = false; errno = 0; if (strlen(str) > MAXDATELEN) @@ -105,7 +104,7 @@ PGTYPESdate_to_asc(date dDate) *tm = &tt; char buf[MAXDATELEN + 1]; int DateStyle = 1; - bool EuroDates = FALSE; + bool EuroDates = false; j2date(dDate + date2j(2000, 1, 1), &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday)); EncodeDateOnly(tm, DateStyle, buf, EuroDates); @@ -329,7 +328,7 @@ PGTYPESdate_fmt_asc(date dDate, const char *fmtstring, char *outbuf) #define PGTYPES_DATE_MONTH_MAXLENGTH 20 /* probably even less :-) */ int -PGTYPESdate_defmt_asc(date * d, const char *fmt, char *str) +PGTYPESdate_defmt_asc(date * d, const char *fmt, const char *str) { /* * token[2] = { 4,6 } means that token 2 starts at position 4 and ends at diff --git a/src/interfaces/ecpg/pgtypeslib/dt.h b/src/interfaces/ecpg/pgtypeslib/dt.h index 5a192ddc45..2c967b303a 100644 --- a/src/interfaces/ecpg/pgtypeslib/dt.h +++ b/src/interfaces/ecpg/pgtypeslib/dt.h @@ -313,12 +313,12 @@ do { \ int DecodeInterval(char **, int *, int, int *, struct tm *, fsec_t *); int DecodeTime(char *, int *, struct tm *, fsec_t *); -int EncodeDateTime(struct tm *tm, fsec_t fsec, bool print_tz, int tz, const char *tzn, int style, char *str, bool EuroDates); -int EncodeInterval(struct tm *tm, fsec_t fsec, int style, char *str); +void EncodeDateTime(struct tm *tm, fsec_t fsec, bool print_tz, int tz, const char *tzn, int style, char *str, bool EuroDates); +void EncodeInterval(struct tm *tm, fsec_t fsec, int style, char *str); int tm2timestamp(struct tm *, fsec_t, int *, timestamp *); int DecodeUnits(int field, char *lowtoken, int *val); bool CheckDateTokenTables(void); -int EncodeDateOnly(struct tm *tm, int style, char *str, bool EuroDates); +void EncodeDateOnly(struct tm *tm, int style, char *str, bool EuroDates); int GetEpochTime(struct tm *); int ParseDateTime(char *, char *, char **, int *, int *, char **); int DecodeDateTime(char **, int *, int, int *, struct tm *, fsec_t *, bool); @@ -336,6 +336,6 @@ extern char *pgtypes_date_weekdays_short[]; extern char *pgtypes_date_months[]; extern char *months[]; extern char *days[]; -extern int day_tab[2][13]; +extern const int day_tab[2][13]; #endif /* DT_H */ diff --git a/src/interfaces/ecpg/pgtypeslib/dt_common.c b/src/interfaces/ecpg/pgtypeslib/dt_common.c index a26d61b32c..ba4dd3a924 100644 --- a/src/interfaces/ecpg/pgtypeslib/dt_common.c +++ b/src/interfaces/ecpg/pgtypeslib/dt_common.c @@ -10,13 +10,13 @@ #include "dt.h" #include "pgtypes_timestamp.h" -int day_tab[2][13] = { +const int day_tab[2][13] = { {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 0}, {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 0}}; typedef long AbsoluteTime; -static datetkn datetktbl[] = { +static const datetkn datetktbl[] = { /* text, token, lexval */ {EARLY, RESERV, DTK_EARLY}, /* "-infinity" reserved for "early time" */ {"acsst", DTZ, 37800}, /* Cent. Australia */ @@ -420,7 +420,7 @@ static datetkn datetktbl[] = { {ZULU, TZ, 0}, /* UTC */ }; -static datetkn deltatktbl[] = { +static const datetkn deltatktbl[] = { /* text, token, lexval */ {"@", IGNORE_DTF, 0}, /* postgres relative prefix */ {DAGO, AGO, 0}, /* "ago" indicates negative time offset */ @@ -490,9 +490,9 @@ static datetkn deltatktbl[] = { static const unsigned int szdatetktbl = lengthof(datetktbl); static const unsigned int szdeltatktbl = lengthof(deltatktbl); -static datetkn *datecache[MAXDATEFIELDS] = {NULL}; +static const datetkn *datecache[MAXDATEFIELDS] = {NULL}; -static datetkn *deltacache[MAXDATEFIELDS] = {NULL}; +static const datetkn *deltacache[MAXDATEFIELDS] = {NULL}; char *months[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", NULL}; @@ -502,12 +502,12 @@ char *pgtypes_date_weekdays_short[] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fr char *pgtypes_date_months[] = {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", NULL}; -static datetkn * -datebsearch(char *key, datetkn *base, unsigned int nel) +static const datetkn * +datebsearch(const char *key, const datetkn *base, unsigned int nel) { if (nel > 0) { - datetkn *last = base + nel - 1, + const datetkn *last = base + nel - 1, *position; int result; @@ -540,7 +540,7 @@ int DecodeUnits(int field, char *lowtoken, int *val) { int type; - datetkn *tp; + const datetkn *tp; /* use strncmp so that we match truncated tokens */ if (deltacache[field] != NULL && @@ -641,7 +641,7 @@ static int DecodeSpecial(int field, char *lowtoken, int *val) { int type; - datetkn *tp; + const datetkn *tp; /* use strncmp so that we match truncated tokens */ if (datecache[field] != NULL && @@ -671,11 +671,10 @@ DecodeSpecial(int field, char *lowtoken, int *val) /* EncodeDateOnly() * Encode date as local time. */ -int +void EncodeDateOnly(struct tm *tm, int style, char *str, bool EuroDates) { - if (tm->tm_mon < 1 || tm->tm_mon > MONTHS_PER_YEAR) - return -1; + Assert(tm->tm_mon >= 1 && tm->tm_mon <= MONTHS_PER_YEAR); switch (style) { @@ -723,9 +722,7 @@ EncodeDateOnly(struct tm *tm, int style, char *str, bool EuroDates) sprintf(str + 5, "-%04d %s", -(tm->tm_year - 1), "BC"); break; } - - return TRUE; -} /* EncodeDateOnly() */ +} void TrimTrailingZeros(char *str) @@ -758,7 +755,7 @@ TrimTrailingZeros(char *str) * US - mm/dd/yyyy * European - dd/mm/yyyy */ -int +void EncodeDateTime(struct tm *tm, fsec_t fsec, bool print_tz, int tz, const char *tzn, int style, char *str, bool EuroDates) { int day, @@ -836,7 +833,7 @@ EncodeDateTime(struct tm *tm, fsec_t fsec, bool print_tz, int tz, const char *tz /* * Note: the uses of %.*s in this function would be risky if the * timezone names ever contain non-ASCII characters. However, all - * TZ abbreviations in the Olson database are plain ASCII. + * TZ abbreviations in the IANA database are plain ASCII. */ if (print_tz) @@ -951,9 +948,7 @@ EncodeDateTime(struct tm *tm, fsec_t fsec, bool print_tz, int tz, const char *tz } break; } - - return TRUE; -} /* EncodeDateTime() */ +} int GetEpochTime(struct tm *tm) @@ -1094,7 +1089,7 @@ dt2time(double jd, int *hour, int *min, int *sec, fsec_t *fsec) */ static int DecodeNumberField(int len, char *str, int fmask, - int *tmask, struct tm *tm, fsec_t *fsec, int *is2digits) + int *tmask, struct tm *tm, fsec_t *fsec, bool *is2digits) { char *cp; @@ -1149,7 +1144,7 @@ DecodeNumberField(int len, char *str, int fmask, tm->tm_mon = atoi(str + 2); *(str + 2) = '\0'; tm->tm_year = atoi(str + 0); - *is2digits = TRUE; + *is2digits = true; return DTK_DATE; } @@ -1161,7 +1156,7 @@ DecodeNumberField(int len, char *str, int fmask, *(str + 2) = '\0'; tm->tm_mon = 1; tm->tm_year = atoi(str + 0); - *is2digits = TRUE; + *is2digits = true; return DTK_DATE; } @@ -1204,7 +1199,7 @@ DecodeNumberField(int len, char *str, int fmask, */ static int DecodeNumber(int flen, char *str, int fmask, - int *tmask, struct tm *tm, fsec_t *fsec, int *is2digits, bool EuroDates) + int *tmask, struct tm *tm, fsec_t *fsec, bool *is2digits, bool EuroDates) { int val; char *cp; @@ -1319,8 +1314,8 @@ DecodeDate(char *str, int fmask, int *tmask, struct tm *tm, bool EuroDates) int nf = 0; int i, len; - int bc = FALSE; - int is2digits = FALSE; + bool bc = false; + bool is2digits = false; int type, val, dmask = 0; @@ -1797,9 +1792,9 @@ DecodeDateTime(char **field, int *ftype, int nf, int i; int val; int mer = HR24; - int haveTextMonth = FALSE; - int is2digits = FALSE; - int bc = FALSE; + bool haveTextMonth = false; + bool is2digits = false; + bool bc = false; int t = 0; int *tzp = &t; @@ -2205,7 +2200,7 @@ DecodeDateTime(char **field, int *ftype, int nf, tm->tm_mday = tm->tm_mon; tmask = DTK_M(DAY); } - haveTextMonth = TRUE; + haveTextMonth = true; tm->tm_mon = val; break; diff --git a/src/interfaces/ecpg/pgtypeslib/exports.txt b/src/interfaces/ecpg/pgtypeslib/exports.txt index 70ef01a8a7..2d5ec17656 100644 --- a/src/interfaces/ecpg/pgtypeslib/exports.txt +++ b/src/interfaces/ecpg/pgtypeslib/exports.txt @@ -45,3 +45,4 @@ PGTYPEStimestamp_from_asc 42 PGTYPEStimestamp_sub 43 PGTYPEStimestamp_sub_interval 44 PGTYPEStimestamp_to_asc 45 +PGTYPESchar_free 46 diff --git a/src/interfaces/ecpg/pgtypeslib/interval.c b/src/interfaces/ecpg/pgtypeslib/interval.c index 30f2ccbcb7..4fdbcd01cc 100644 --- a/src/interfaces/ecpg/pgtypeslib/interval.c +++ b/src/interfaces/ecpg/pgtypeslib/interval.c @@ -9,25 +9,13 @@ #error -ffast-math is known to break this code #endif +#include "common/string.h" + #include "extern.h" #include "dt.h" #include "pgtypes_error.h" #include "pgtypes_interval.h" -/* copy&pasted from .../src/backend/utils/adt/datetime.c */ -static int -strtoint(const char *nptr, char **endptr, int base) -{ - long val; - - val = strtol(nptr, endptr, base); -#ifdef HAVE_LONG_INT_64 - if (val != (long) ((int32) val)) - errno = ERANGE; -#endif - return (int) val; -} - /* copy&pasted from .../src/backend/utils/adt/datetime.c * and changesd struct pg_tm to struct tm */ @@ -65,7 +53,7 @@ AdjustFractDays(double frac, struct /* pg_ */ tm *tm, fsec_t *fsec, int scale) /* copy&pasted from .../src/backend/utils/adt/datetime.c */ static int -ParseISO8601Number(char *str, char **endptr, int *ipart, double *fpart) +ParseISO8601Number(const char *str, char **endptr, int *ipart, double *fpart) { double val; @@ -90,7 +78,7 @@ ParseISO8601Number(char *str, char **endptr, int *ipart, double *fpart) /* copy&pasted from .../src/backend/utils/adt/datetime.c */ static int -ISO8601IntegerWidth(char *fieldstart) +ISO8601IntegerWidth(const char *fieldstart) { /* We might have had a leading '-' */ if (*fieldstart == '-') @@ -196,6 +184,7 @@ DecodeISO8601Interval(char *str, continue; } /* Else fall through to extended alternative format */ + /* FALLTHROUGH */ case '-': /* ISO 8601 4.4.3.3 Alternative Format, * Extended */ if (havefield) @@ -274,6 +263,7 @@ DecodeISO8601Interval(char *str, return 0; } /* Else fall through to extended alternative format */ + /* FALLTHROUGH */ case ':': /* ISO 8601 4.4.3.3 Alternative Format, * Extended */ if (havefield) @@ -328,11 +318,9 @@ DecodeISO8601Interval(char *str, * places where DecodeTime is called; and added * int range = INTERVAL_FULL_RANGE; * - * * ECPG semes not to have a global IntervalStyle + * * ECPG seems not to have a global IntervalStyle * so added * int IntervalStyle = INTSTYLE_POSTGRES; - * - * * Assert wasn't available so removed it. */ int DecodeInterval(char **field, int *ftype, int nf, /* int range, */ @@ -340,7 +328,7 @@ DecodeInterval(char **field, int *ftype, int nf, /* int range, */ { int IntervalStyle = INTSTYLE_POSTGRES_VERBOSE; int range = INTERVAL_FULL_RANGE; - bool is_before = FALSE; + bool is_before = false; char *cp; int fmask = 0, tmask, @@ -374,7 +362,7 @@ DecodeInterval(char **field, int *ftype, int nf, /* int range, */ * least one digit; there could be ':', '.', '-' embedded in * it as well. */ - /* Assert(*field[i] == '-' || *field[i] == '+'); */ + Assert(*field[i] == '-' || *field[i] == '+'); /* * Try for hh:mm or hh:mm:ss. If not, fall through to @@ -585,7 +573,7 @@ DecodeInterval(char **field, int *ftype, int nf, /* int range, */ break; case AGO: - is_before = TRUE; + is_before = true; type = val; break; @@ -707,7 +695,7 @@ AddVerboseIntPart(char *cp, int value, const char *units, else if (*is_before) value = -value; sprintf(cp, " %d %s%s", value, units, (value == 1) ? "" : "s"); - *is_zero = FALSE; + *is_zero = false; return cp + strlen(cp); } @@ -730,7 +718,7 @@ AddPostgresIntPart(char *cp, int value, const char *units, * tad bizarre but it's how it worked before... */ *is_before = (value < 0); - *is_zero = FALSE; + *is_zero = false; return cp + strlen(cp); } @@ -771,7 +759,7 @@ AppendSeconds(char *cp, int sec, fsec_t fsec, int precision, bool fillzeros) * Change pg_tm to tm */ -int +void EncodeInterval(struct /* pg_ */ tm *tm, fsec_t fsec, int style, char *str) { char *cp = str; @@ -781,8 +769,8 @@ EncodeInterval(struct /* pg_ */ tm *tm, fsec_t fsec, int style, char *str) int hour = tm->tm_hour; int min = tm->tm_min; int sec = tm->tm_sec; - bool is_before = FALSE; - bool is_zero = TRUE; + bool is_before = false; + bool is_zero = true; /* * The sign of year and month are guaranteed to match, since they are @@ -928,7 +916,7 @@ EncodeInterval(struct /* pg_ */ tm *tm, fsec_t fsec, int style, char *str) if (sec < 0 || (sec == 0 && fsec < 0)) { if (is_zero) - is_before = TRUE; + is_before = true; else if (!is_before) *cp++ = '-'; } @@ -938,7 +926,7 @@ EncodeInterval(struct /* pg_ */ tm *tm, fsec_t fsec, int style, char *str) cp += strlen(cp); sprintf(cp, " sec%s", (abs(sec) != 1 || fsec != 0) ? "s" : ""); - is_zero = FALSE; + is_zero = false; } /* identically zero? then put in a unitless zero... */ if (is_zero) @@ -947,9 +935,7 @@ EncodeInterval(struct /* pg_ */ tm *tm, fsec_t fsec, int style, char *str) strcat(cp, " ago"); break; } - - return 0; -} /* EncodeInterval() */ +} /* interval2tm() @@ -1091,11 +1077,7 @@ PGTYPESinterval_to_asc(interval * span) return NULL; } - if (EncodeInterval(tm, fsec, IntervalStyle, buf) != 0) - { - errno = PGTYPES_INTVL_BAD_INTERVAL; - return NULL; - } + EncodeInterval(tm, fsec, IntervalStyle, buf); return pgtypes_strdup(buf); } diff --git a/src/interfaces/ecpg/pgtypeslib/numeric.c b/src/interfaces/ecpg/pgtypeslib/numeric.c index a93d074de2..6643242ab1 100644 --- a/src/interfaces/ecpg/pgtypeslib/numeric.c +++ b/src/interfaces/ecpg/pgtypeslib/numeric.c @@ -40,7 +40,7 @@ apply_typmod(numeric *var, long typmod) /* Do nothing if we have a default typmod (-1) */ if (typmod < (long) (VARHDRSZ)) - return (0); + return 0; typmod -= VARHDRSZ; precision = (typmod >> 16) & 0xffff; @@ -100,7 +100,7 @@ apply_typmod(numeric *var, long typmod) var->rscale = scale; var->dscale = scale; - return (0); + return 0; } #endif @@ -162,7 +162,7 @@ PGTYPESdecimal_new(void) static int set_var_from_str(char *str, char **ptr, numeric *dest) { - bool have_dp = FALSE; + bool have_dp = false; int i = 0; errno = 0; @@ -214,7 +214,7 @@ set_var_from_str(char *str, char **ptr, numeric *dest) if (*(*ptr) == '.') { - have_dp = TRUE; + have_dp = true; (*ptr)++; } @@ -241,7 +241,7 @@ set_var_from_str(char *str, char **ptr, numeric *dest) errno = PGTYPES_NUM_BAD_NUMERIC; return -1; } - have_dp = TRUE; + have_dp = true; (*ptr)++; } else @@ -296,7 +296,7 @@ set_var_from_str(char *str, char **ptr, numeric *dest) dest->weight = 0; dest->rscale = dest->dscale; - return (0); + return 0; } @@ -412,16 +412,16 @@ PGTYPESnumeric_from_asc(char *str, char **endptr) char **ptr = (endptr != NULL) ? endptr : &realptr; if (!value) - return (NULL); + return NULL; ret = set_var_from_str(str, ptr, value); if (ret) { PGTYPESnumeric_free(value); - return (NULL); + return NULL; } - return (value); + return value; } char * @@ -445,7 +445,7 @@ PGTYPESnumeric_to_asc(numeric *num, int dscale) /* get_str_from_var may change its argument */ s = get_str_from_var(numcopy, dscale); PGTYPESnumeric_free(numcopy); - return (s); + return s; } /* ---------- diff --git a/src/interfaces/ecpg/pgtypeslib/timestamp.c b/src/interfaces/ecpg/pgtypeslib/timestamp.c index 78931399e6..4cd4fe2da2 100644 --- a/src/interfaces/ecpg/pgtypeslib/timestamp.c +++ b/src/interfaces/ecpg/pgtypeslib/timestamp.c @@ -4,7 +4,6 @@ #include "postgres_fe.h" #include -#include #include #include @@ -192,7 +191,7 @@ timestamp2tm(timestamp dt, int *tzp, struct tm *tm, fsec_t *fsec, const char **t /* EncodeSpecialTimestamp() * * Convert reserved timestamp data type to string. * */ -static int +static void EncodeSpecialTimestamp(timestamp dt, char *str) { if (TIMESTAMP_IS_NOBEGIN(dt)) @@ -200,10 +199,8 @@ EncodeSpecialTimestamp(timestamp dt, char *str) else if (TIMESTAMP_IS_NOEND(dt)) strcpy(str, LATE); else - return FALSE; - - return TRUE; -} /* EncodeSpecialTimestamp() */ + abort(); /* shouldn't happen */ +} timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr) @@ -224,14 +221,14 @@ PGTYPEStimestamp_from_asc(char *str, char **endptr) if (strlen(str) > MAXDATELEN) { errno = PGTYPES_TS_BAD_TIMESTAMP; - return (noresult); + return noresult; } if (ParseDateTime(str, lowstr, field, ftype, &nf, ptr) != 0 || DecodeDateTime(field, ftype, nf, &dtype, tm, &fsec, 0) != 0) { errno = PGTYPES_TS_BAD_TIMESTAMP; - return (noresult); + return noresult; } switch (dtype) @@ -240,7 +237,7 @@ PGTYPEStimestamp_from_asc(char *str, char **endptr) if (tm2timestamp(tm, fsec, NULL, &result) != 0) { errno = PGTYPES_TS_BAD_TIMESTAMP; - return (noresult); + return noresult; } break; @@ -258,11 +255,11 @@ PGTYPEStimestamp_from_asc(char *str, char **endptr) case DTK_INVALID: errno = PGTYPES_TS_BAD_TIMESTAMP; - return (noresult); + return noresult; default: errno = PGTYPES_TS_BAD_TIMESTAMP; - return (noresult); + return noresult; } /* AdjustTimestampForTypmod(&result, typmod); */ @@ -815,7 +812,7 @@ PGTYPEStimestamp_sub(timestamp * ts1, timestamp * ts2, interval * iv) } int -PGTYPEStimestamp_defmt_asc(char *str, const char *fmt, timestamp * d) +PGTYPEStimestamp_defmt_asc(const char *str, const char *fmt, timestamp * d) { int year, month, diff --git a/src/interfaces/ecpg/preproc/Makefile b/src/interfaces/ecpg/preproc/Makefile index 02a6e65daf..07721752c8 100644 --- a/src/interfaces/ecpg/preproc/Makefile +++ b/src/interfaces/ecpg/preproc/Makefile @@ -2,7 +2,7 @@ # # Makefile for src/interfaces/ecpg/preproc # -# Copyright (c) 1998-2017, PostgreSQL Global Development Group +# Copyright (c) 1998-2018, PostgreSQL Global Development Group # # src/interfaces/ecpg/preproc/Makefile # @@ -39,7 +39,10 @@ ecpg: $(OBJS) | submake-libpgport ../ecpglib/typename.o: ../ecpglib/typename.c $(MAKE) -C $(dir $@) $(notdir $@) -preproc.h: preproc.c ; +# See notes in src/backend/parser/Makefile about the following two rules +preproc.h: preproc.c + touch $@ + preproc.c: BISONFLAGS += -d preproc.y: ../../../backend/parser/gram.y parse.pl ecpg.addons ecpg.header ecpg.tokens ecpg.trailer ecpg.type @@ -61,8 +64,6 @@ uninstall: clean distclean: rm -f *.o ecpg$(X) -# garbage from development - @rm -f core a.out # `make distclean' must not remove preproc.y, preproc.c, preproc.h, or pgc.c # since we want to ship those files in the distribution for people with diff --git a/src/interfaces/ecpg/preproc/check_rules.pl b/src/interfaces/ecpg/preproc/check_rules.pl index e681943856..8b06bd8368 100644 --- a/src/interfaces/ecpg/preproc/check_rules.pl +++ b/src/interfaces/ecpg/preproc/check_rules.pl @@ -1,9 +1,9 @@ #!/usr/bin/perl # src/interfaces/ecpg/preproc/check_rules.pl -# test parser generater for ecpg +# test parser generator for ecpg # call with backend parser as stdin # -# Copyright (c) 2009-2017, PostgreSQL Global Development Group +# Copyright (c) 2009-2018, PostgreSQL Global Development Group # # Written by Michael Meskes # Andy Colson @@ -39,7 +39,7 @@ 'ExecuteStmtEXECUTEnameexecute_param_clause' => 'EXECUTE prepared_name execute_param_clause execute_rest', -'ExecuteStmtCREATEOptTempTABLEcreate_as_targetASEXECUTEnameexecute_param_clause' + 'ExecuteStmtCREATEOptTempTABLEcreate_as_targetASEXECUTEnameexecute_param_clause' => 'CREATE OptTemp TABLE create_as_target AS EXECUTE prepared_name execute_param_clause', 'PrepareStmtPREPAREnameprep_type_clauseASPreparableStmt' => diff --git a/src/interfaces/ecpg/preproc/ecpg.c b/src/interfaces/ecpg/preproc/ecpg.c index bad0a667fc..f39bf697d6 100644 --- a/src/interfaces/ecpg/preproc/ecpg.c +++ b/src/interfaces/ecpg/preproc/ecpg.c @@ -1,7 +1,7 @@ /* src/interfaces/ecpg/preproc/ecpg.c */ /* Main for ecpg, the PostgreSQL embedded SQL precompiler. */ -/* Copyright (c) 1996-2017, PostgreSQL Global Development Group */ +/* Copyright (c) 1996-2018, PostgreSQL Global Development Group */ #include "postgres_fe.h" @@ -41,7 +41,7 @@ help(const char *progname) printf(_(" -c automatically generate C code from embedded SQL code;\n" " this affects EXEC SQL TYPE\n")); printf(_(" -C MODE set compatibility mode; MODE can be one of\n" - " \"INFORMIX\", \"INFORMIX_SE\"\n")); + " \"INFORMIX\", \"INFORMIX_SE\", \"ORACLE\"\n")); #ifdef YYDEBUG printf(_(" -d generate parser debug output\n")); #endif @@ -137,7 +137,7 @@ main(int argc, char *const argv[]) if (find_my_exec(argv[0], my_exec_path) < 0) { fprintf(stderr, _("%s: could not locate my own executable path\n"), argv[0]); - return (ILLEGAL_OPTION); + return ILLEGAL_OPTION; } if (argc > 1) @@ -149,7 +149,7 @@ main(int argc, char *const argv[]) } if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) { - printf("ecpg %s\n", PG_VERSION); + printf("ecpg (PostgreSQL) %s\n", PG_VERSION); exit(0); } } @@ -189,8 +189,8 @@ main(int argc, char *const argv[]) break; case 'h': header_mode = true; - /* this must include "-c" to make sense */ - /* so do not place a "break;" here */ + /* this must include "-c" to make sense, so fall through */ + /* FALLTHROUGH */ case 'c': auto_create_c = true; break; @@ -198,16 +198,20 @@ main(int argc, char *const argv[]) system_includes = true; break; case 'C': - if (strncmp(optarg, "INFORMIX", strlen("INFORMIX")) == 0) + if (pg_strcasecmp(optarg, "INFORMIX") == 0 || pg_strcasecmp(optarg, "INFORMIX_SE") == 0) { char pkginclude_path[MAXPGPATH]; char informix_path[MAXPGPATH]; - compat = (strcmp(optarg, "INFORMIX") == 0) ? ECPG_COMPAT_INFORMIX : ECPG_COMPAT_INFORMIX_SE; + compat = (pg_strcasecmp(optarg, "INFORMIX") == 0) ? ECPG_COMPAT_INFORMIX : ECPG_COMPAT_INFORMIX_SE; get_pkginclude_path(my_exec_path, pkginclude_path); snprintf(informix_path, MAXPGPATH, "%s/informix/esql", pkginclude_path); add_include_path(informix_path); } + else if (strncmp(optarg, "ORACLE", strlen("ORACLE")) == 0) + { + compat = ECPG_COMPAT_ORACLE; + } else { fprintf(stderr, _("Try \"%s --help\" for more information.\n"), argv[0]); @@ -266,7 +270,7 @@ main(int argc, char *const argv[]) { fprintf(stderr, _("%s: no input files specified\n"), progname); fprintf(stderr, _("Try \"%s --help\" for more information.\n"), argv[0]); - return (ILLEGAL_OPTION); + return ILLEGAL_OPTION; } else { @@ -327,6 +331,7 @@ main(int argc, char *const argv[]) fprintf(stderr, _("%s: could not open file \"%s\": %s\n"), progname, output_filename, strerror(errno)); free(output_filename); + output_filename = NULL; free(input_filename); continue; } @@ -475,7 +480,10 @@ main(int argc, char *const argv[]) } if (output_filename && out_option == 0) + { free(output_filename); + output_filename = NULL; + } free(input_filename); } diff --git a/src/interfaces/ecpg/preproc/ecpg.header b/src/interfaces/ecpg/preproc/ecpg.header index 2562366bbe..8921bcbeae 100644 --- a/src/interfaces/ecpg/preproc/ecpg.header +++ b/src/interfaces/ecpg/preproc/ecpg.header @@ -142,7 +142,7 @@ cat2_str(char *str1, char *str2) strcat(res_str, str2); free(str1); free(str2); - return(res_str); + return res_str; } static char * @@ -162,7 +162,7 @@ cat_str(int count, ...) va_end(args); - return(res_str); + return res_str; } static char * @@ -174,7 +174,7 @@ make2_str(char *str1, char *str2) strcat(res_str, str2); free(str1); free(str2); - return(res_str); + return res_str; } static char * @@ -188,7 +188,7 @@ make3_str(char *str1, char *str2, char *str3) free(str1); free(str2); free(str3); - return(res_str); + return res_str; } /* and the rest */ @@ -233,7 +233,7 @@ create_questionmarks(char *name, bool array) /* removed the trailing " ," */ result[strlen(result)-3] = '\0'; - return(result); + return result; } static char * @@ -352,7 +352,7 @@ adjust_outofscope_cursor_vars(struct cursor *cur) else { newvar = new_variable(cat_str(4, mm_strdup("("), - mm_strdup(ecpg_type_name(ptr->variable->type->type)), + mm_strdup(ecpg_type_name(ptr->variable->type->u.element->type)), mm_strdup(" *)(ECPGget_var("), mm_strdup(var_text)), ECPGmake_array_type(ECPGmake_simple_type(ptr->variable->type->u.element->type, diff --git a/src/interfaces/ecpg/preproc/ecpg.tokens b/src/interfaces/ecpg/preproc/ecpg.tokens index 68ba925efe..1d613af02f 100644 --- a/src/interfaces/ecpg/preproc/ecpg.tokens +++ b/src/interfaces/ecpg/preproc/ecpg.tokens @@ -2,7 +2,7 @@ /* special embedded SQL tokens */ %token SQL_ALLOCATE SQL_AUTOCOMMIT SQL_BOOL SQL_BREAK - SQL_CALL SQL_CARDINALITY SQL_CONNECT + SQL_CARDINALITY SQL_CONNECT SQL_COUNT SQL_DATETIME_INTERVAL_CODE SQL_DATETIME_INTERVAL_PRECISION SQL_DESCRIBE diff --git a/src/interfaces/ecpg/preproc/ecpg.trailer b/src/interfaces/ecpg/preproc/ecpg.trailer index d273070dab..19dc781885 100644 --- a/src/interfaces/ecpg/preproc/ecpg.trailer +++ b/src/interfaces/ecpg/preproc/ecpg.trailer @@ -1454,13 +1454,19 @@ action : CONTINUE_P $$.command = NULL; $$.str = mm_strdup("break"); } - | SQL_CALL name '(' c_args ')' + | DO CONTINUE_P + { + $$.code = W_CONTINUE; + $$.command = NULL; + $$.str = mm_strdup("continue"); + } + | CALL name '(' c_args ')' { $$.code = W_DO; $$.command = cat_str(4, $2, mm_strdup("("), $4, mm_strdup(")")); $$.str = cat2_str(mm_strdup("call"), mm_strdup($$.command)); } - | SQL_CALL name + | CALL name { $$.code = W_DO; $$.command = cat2_str($2, mm_strdup("()")); @@ -1476,7 +1482,6 @@ ECPGKeywords: ECPGKeywords_vanames { $$ = $1; } ; ECPGKeywords_vanames: SQL_BREAK { $$ = mm_strdup("break"); } - | SQL_CALL { $$ = mm_strdup("call"); } | SQL_CARDINALITY { $$ = mm_strdup("cardinality"); } | SQL_COUNT { $$ = mm_strdup("count"); } | SQL_DATETIME_INTERVAL_CODE { $$ = mm_strdup("datetime_interval_code"); } diff --git a/src/interfaces/ecpg/preproc/ecpg_keywords.c b/src/interfaces/ecpg/preproc/ecpg_keywords.c index 3b52b8f3a2..848b2d4849 100644 --- a/src/interfaces/ecpg/preproc/ecpg_keywords.c +++ b/src/interfaces/ecpg/preproc/ecpg_keywords.c @@ -33,7 +33,6 @@ static const ScanKeyword ECPGScanKeywords[] = { {"autocommit", SQL_AUTOCOMMIT, 0}, {"bool", SQL_BOOL, 0}, {"break", SQL_BREAK, 0}, - {"call", SQL_CALL, 0}, {"cardinality", SQL_CARDINALITY, 0}, {"connect", SQL_CONNECT, 0}, {"count", SQL_COUNT, 0}, diff --git a/src/interfaces/ecpg/preproc/extern.h b/src/interfaces/ecpg/preproc/extern.h index 2c35426b7f..d0c4e47016 100644 --- a/src/interfaces/ecpg/preproc/extern.h +++ b/src/interfaces/ecpg/preproc/extern.h @@ -122,10 +122,12 @@ extern int filtered_base_yylex(void); enum COMPAT_MODE { - ECPG_COMPAT_PGSQL = 0, ECPG_COMPAT_INFORMIX, ECPG_COMPAT_INFORMIX_SE + ECPG_COMPAT_PGSQL = 0, ECPG_COMPAT_INFORMIX, ECPG_COMPAT_INFORMIX_SE, ECPG_COMPAT_ORACLE }; extern enum COMPAT_MODE compat; #define INFORMIX_MODE (compat == ECPG_COMPAT_INFORMIX || compat == ECPG_COMPAT_INFORMIX_SE) +#define ORACLE_MODE (compat == ECPG_COMPAT_ORACLE) + #endif /* _ECPG_PREPROC_EXTERN_H */ diff --git a/src/interfaces/ecpg/preproc/keywords.c b/src/interfaces/ecpg/preproc/keywords.c index f016d7fc6f..21e1f928fd 100644 --- a/src/interfaces/ecpg/preproc/keywords.c +++ b/src/interfaces/ecpg/preproc/keywords.c @@ -4,7 +4,7 @@ * lexical token lookup for key words in PostgreSQL * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/interfaces/ecpg/preproc/nls.mk b/src/interfaces/ecpg/preproc/nls.mk index 91297a2323..06f6d3192d 100644 --- a/src/interfaces/ecpg/preproc/nls.mk +++ b/src/interfaces/ecpg/preproc/nls.mk @@ -1,6 +1,6 @@ # src/interfaces/ecpg/preproc/nls.mk CATALOG_NAME = ecpg -AVAIL_LANGUAGES = cs de es fr it ja ko pl pt_BR ru tr zh_CN zh_TW +AVAIL_LANGUAGES = cs de es fr it ja ko pl pt_BR ru sv tr vi zh_CN zh_TW GETTEXT_FILES = descriptor.c ecpg.c pgc.c preproc.c type.c variable.c GETTEXT_TRIGGERS = mmerror:3 mmfatal:2 GETTEXT_FLAGS = mmerror:3:c-format mmfatal:2:c-format diff --git a/src/interfaces/ecpg/preproc/output.c b/src/interfaces/ecpg/preproc/output.c index 0479c93c99..0465857eb6 100644 --- a/src/interfaces/ecpg/preproc/output.c +++ b/src/interfaces/ecpg/preproc/output.c @@ -51,6 +51,9 @@ print_action(struct when *w) case W_BREAK: fprintf(base_yyout, "break;"); break; + case W_CONTINUE: + fprintf(base_yyout, "continue;"); + break; default: fprintf(base_yyout, "{/* %d not implemented yet */}", w->code); break; @@ -155,6 +158,7 @@ output_statement(char *stmt, int whenever_mode, enum ECPG_statement_type st) free(stmt); if (connection != NULL) free(connection); + connection = NULL; } void @@ -169,6 +173,7 @@ output_prepare_statement(char *name, char *stmt) free(name); if (connection != NULL) free(connection); + connection = NULL; } void @@ -189,6 +194,7 @@ output_deallocate_prepare_statement(char *name) free(name); if (connection != NULL) free(connection); + connection = NULL; } static void diff --git a/src/interfaces/ecpg/preproc/parse.pl b/src/interfaces/ecpg/preproc/parse.pl index 768df3a6b1..e1c0a2ccb7 100644 --- a/src/interfaces/ecpg/preproc/parse.pl +++ b/src/interfaces/ecpg/preproc/parse.pl @@ -1,9 +1,9 @@ #!/usr/bin/perl # src/interfaces/ecpg/preproc/parse.pl -# parser generater for ecpg version 2 +# parser generator for ecpg version 2 # call with backend parser as stdin # -# Copyright (c) 2007-2017, PostgreSQL Global Development Group +# Copyright (c) 2007-2018, PostgreSQL Global Development Group # # Written by Mike Aubury # Michael Meskes @@ -101,7 +101,7 @@ 'RETURNING target_list opt_ecpg_into', 'ExecuteStmtEXECUTEnameexecute_param_clause' => 'EXECUTE prepared_name execute_param_clause execute_rest', -'ExecuteStmtCREATEOptTempTABLEcreate_as_targetASEXECUTEnameexecute_param_clause' + 'ExecuteStmtCREATEOptTempTABLEcreate_as_targetASEXECUTEnameexecute_param_clause' => 'CREATE OptTemp TABLE create_as_target AS EXECUTE prepared_name execute_param_clause', 'PrepareStmtPREPAREnameprep_type_clauseASPreparableStmt' => 'PREPARE prepared_name prep_type_clause AS PreparableStmt', @@ -134,20 +134,20 @@ sub main chomp; - # comment out the line below to make the result file match (blank line wise) - # the prior version. - #next if ($_ eq ''); - - # Dump the action for a rule - - # stmt_mode indicates if we are processing the 'stmt:' - # rule (mode==0 means normal, mode==1 means stmt:) - # flds are the fields to use. These may start with a '$' - in - # which case they are the result of a previous non-terminal - # - # if they dont start with a '$' then they are token name - # - # len is the number of fields in flds... - # leadin is the padding to apply at the beginning (just use for formatting) + # comment out the line below to make the result file match (blank line wise) + # the prior version. + #next if ($_ eq ''); + + # Dump the action for a rule - + # stmt_mode indicates if we are processing the 'stmt:' + # rule (mode==0 means normal, mode==1 means stmt:) + # flds are the fields to use. These may start with a '$' - in + # which case they are the result of a previous non-terminal + # + # if they don't start with a '$' then they are token name + # + # len is the number of fields in flds... + # leadin is the padding to apply at the beginning (just use for formatting) if (/^%%/) { @@ -223,7 +223,7 @@ sub main next line; } - # Dont worry about anything if we're not in the right section of gram.y + # Don't worry about anything if we're not in the right section of gram.y if ($yaccmode != 1) { next line; @@ -415,6 +415,7 @@ sub main } } } + return; } @@ -431,6 +432,7 @@ sub include_file add_to_buffer($buffer, $_); } close($fh); + return; } sub include_addon @@ -472,6 +474,7 @@ sub include_addon sub add_to_buffer { push(@{ $buff{ $_[0] } }, "$_[1]\n"); + return; } sub dump_buffer @@ -480,6 +483,7 @@ sub dump_buffer print '/* ', $buffer, ' */', "\n"; my $ref = $buff{$buffer}; print @$ref; + return; } sub dump_fields @@ -501,7 +505,7 @@ sub dump_fields if ($flds->[0] ne 'create' || $flds->[2] ne 'table') { add_to_buffer('rules', -'mmerror(PARSE_ERROR, ET_WARNING, "unsupported feature will be passed to server");' + 'mmerror(PARSE_ERROR, ET_WARNING, "unsupported feature will be passed to server");' ); } $feature_not_supported = 0; @@ -582,6 +586,7 @@ sub dump_fields add_to_buffer('rules', ' { $$ = NULL; }'); } } + return; } @@ -632,8 +637,8 @@ sub preload_addons my $filename = $path . "/ecpg.addons"; open(my $fh, '<', $filename) or die; - # there may be multiple lines starting ECPG: and then multiple lines of code. - # the code need to be add to all prior ECPG records. + # there may be multiple lines starting ECPG: and then multiple lines of code. + # the code need to be add to all prior ECPG records. my (@needsRules, @code, $record); # there may be comments before the first ECPG line, skip them @@ -673,4 +678,5 @@ sub preload_addons push(@{ $x->{lines} }, @code); } } + return; } diff --git a/src/interfaces/ecpg/preproc/parser.c b/src/interfaces/ecpg/preproc/parser.c index 0c2705cd2b..e5a8f9d170 100644 --- a/src/interfaces/ecpg/preproc/parser.c +++ b/src/interfaces/ecpg/preproc/parser.c @@ -7,7 +7,7 @@ * need to bother with re-entrant interfaces. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/interfaces/ecpg/preproc/pgc.l b/src/interfaces/ecpg/preproc/pgc.l index 3598a200d0..0792118cfe 100644 --- a/src/interfaces/ecpg/preproc/pgc.l +++ b/src/interfaces/ecpg/preproc/pgc.l @@ -7,7 +7,7 @@ * This is a modified version of src/backend/parser/scan.l * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -21,6 +21,8 @@ #include #include +#include "common/string.h" + #include "extern.h" #include "preproc.h" } @@ -79,6 +81,8 @@ static struct _if_value short else_branch; } stacked_if_value[MAX_NESTED_IF]; +/* LCOV_EXCL_START */ + %} %option 8bit @@ -241,6 +245,15 @@ array ({ident_cont}|{whitespace}|[\[\]\+\-\*\%\/\(\)\>\.])* typecast "::" dot_dot \.\. colon_equals ":=" + +/* + * These operator-like tokens (unlike the above ones) also match the {operator} + * rule, which means that they might be overridden by a longer match if they + * are followed by a comment start or a + or - character. Accordingly, if you + * add to this list, you must also add corresponding code to the {operator} + * block to return the correct token in such cases. (This is not needed in + * psqlscan.l since the token value is ignored there.) + */ equals_greater "=>" less_equals "<=" greater_equals ">=" @@ -686,20 +699,33 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ * to forbid operator names like '?-' that could not be * sequences of SQL operators. */ - while (nchars > 1 && - (yytext[nchars-1] == '+' || - yytext[nchars-1] == '-')) + if (nchars > 1 && + (yytext[nchars - 1] == '+' || + yytext[nchars - 1] == '-')) { int ic; - for (ic = nchars-2; ic >= 0; ic--) + for (ic = nchars - 2; ic >= 0; ic--) { - if (strchr("~!@#^&|`?%", yytext[ic])) + char c = yytext[ic]; + if (c == '~' || c == '!' || c == '@' || + c == '#' || c == '^' || c == '&' || + c == '|' || c == '`' || c == '?' || + c == '%') break; } - if (ic >= 0) - break; /* found a char that makes it OK */ - nchars--; /* else remove the +/-, and check again */ + if (ic < 0) + { + /* + * didn't find a qualifying character, so remove + * all trailing [+-] + */ + do { + nchars--; + } while (nchars > 1 && + (yytext[nchars - 1] == '+' || + yytext[nchars - 1] == '-')); + } } if (nchars < yyleng) @@ -715,6 +741,25 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ if (nchars == 1 && strchr(",()[].;:+-*/%^<>=", yytext[0])) return yytext[0]; + /* + * Likewise, if what we have left is two chars, and + * those match the tokens ">=", "<=", "=>", "<>" or + * "!=", then we must return the appropriate token + * rather than the generic Op. + */ + if (nchars == 2) + { + if (yytext[0] == '=' && yytext[1] == '>') + return EQUALS_GREATER; + if (yytext[0] == '>' && yytext[1] == '=') + return GREATER_EQUALS; + if (yytext[0] == '<' && yytext[1] == '=') + return LESS_EQUALS; + if (yytext[0] == '<' && yytext[1] == '>') + return NOT_EQUALS; + if (yytext[0] == '!' && yytext[1] == '=') + return NOT_EQUALS; + } } base_yylval.str = mm_strdup(yytext); @@ -725,17 +770,12 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ return PARAM; } {integer} { - long val; + int val; char* endptr; errno = 0; - val = strtol((char *)yytext, &endptr,10); - if (*endptr != '\0' || errno == ERANGE -#ifdef HAVE_LONG_INT_64 - /* if long > 32 bits, check for overflow of int4 */ - || val != (long) ((int32) val) -#endif - ) + val = strtoint(yytext, &endptr, 10); + if (*endptr != '\0' || errno == ERANGE) { errno = 0; base_yylval.str = mm_strdup(yytext); @@ -768,7 +808,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ } :{identifier}((("->"|\.){identifier})|(\[{array}\]))* { base_yylval.str = mm_strdup(yytext+1); - return(CVARIABLE); + return CVARIABLE; } {identifier} { const ScanKeyword *keyword; @@ -832,7 +872,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ else { base_yylval.str = mm_strdup(yytext); - return(CPP_LINE); + return CPP_LINE; } } {cppinclude_next} { @@ -844,12 +884,12 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ else { base_yylval.str = mm_strdup(yytext); - return(CPP_LINE); + return CPP_LINE; } } {cppline} { base_yylval.str = mm_strdup(yytext); - return(CPP_LINE); + return CPP_LINE; } {identifier} { const ScanKeyword *keyword; @@ -879,38 +919,38 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ } } {xcstop} { mmerror(PARSE_ERROR, ET_ERROR, "nested /* ... */ comments"); } -":" { return(':'); } -";" { return(';'); } -"," { return(','); } -"*" { return('*'); } -"%" { return('%'); } -"/" { return('/'); } -"+" { return('+'); } -"-" { return('-'); } -"(" { parenths_open++; return('('); } -")" { parenths_open--; return(')'); } +":" { return ':'; } +";" { return ';'; } +"," { return ','; } +"*" { return '*'; } +"%" { return '%'; } +"/" { return '/'; } +"+" { return '+'; } +"-" { return '-'; } +"(" { parenths_open++; return '('; } +")" { parenths_open--; return ')'; } {space} { ECHO; } -\{ { return('{'); } -\} { return('}'); } -\[ { return('['); } -\] { return(']'); } -\= { return('='); } -"->" { return(S_MEMBER); } -">>" { return(S_RSHIFT); } -"<<" { return(S_LSHIFT); } -"||" { return(S_OR); } -"&&" { return(S_AND); } -"++" { return(S_INC); } -"--" { return(S_DEC); } -"==" { return(S_EQUAL); } -"!=" { return(S_NEQUAL); } -"+=" { return(S_ADD); } -"-=" { return(S_SUB); } -"*=" { return(S_MUL); } -"/=" { return(S_DIV); } -"%=" { return(S_MOD); } -"->*" { return(S_MEMPOINT); } -".*" { return(S_DOTPOINT); } +\{ { return '{'; } +\} { return '}'; } +\[ { return '['; } +\] { return ']'; } +\= { return '='; } +"->" { return S_MEMBER; } +">>" { return S_RSHIFT; } +"<<" { return S_LSHIFT; } +"||" { return S_OR; } +"&&" { return S_AND; } +"++" { return S_INC; } +"--" { return S_DEC; } +"==" { return S_EQUAL; } +"!=" { return S_NEQUAL; } +"+=" { return S_ADD; } +"-=" { return S_SUB; } +"*=" { return S_MUL; } +"/=" { return S_DIV; } +"%=" { return S_MOD; } +"->*" { return S_MEMPOINT; } +".*" { return S_DOTPOINT; } {other} { return S_ANYTHING; } {exec_sql}{define}{space}* { BEGIN(def_ident); } {informix_special}{define}{space}* { @@ -922,7 +962,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ else { yyless(1); - return (S_ANYTHING); + return S_ANYTHING; } } {exec_sql}{undef}{space}* { BEGIN(undef); } @@ -935,7 +975,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ else { yyless(1); - return (S_ANYTHING); + return S_ANYTHING; } } {identifier}{space}*";" { @@ -984,35 +1024,35 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ else { yyless(1); - return (S_ANYTHING); + return S_ANYTHING; } } -{exec_sql}{ifdef}{space}* { ifcond = TRUE; BEGIN(xcond); } +{exec_sql}{ifdef}{space}* { ifcond = true; BEGIN(xcond); } {informix_special}{ifdef}{space}* { /* are we simulating Informix? */ if (INFORMIX_MODE) { - ifcond = TRUE; + ifcond = true; BEGIN(xcond); } else { yyless(1); - return (S_ANYTHING); + return S_ANYTHING; } } -{exec_sql}{ifndef}{space}* { ifcond = FALSE; BEGIN(xcond); } +{exec_sql}{ifndef}{space}* { ifcond = false; BEGIN(xcond); } {informix_special}{ifndef}{space}* { /* are we simulating Informix? */ if (INFORMIX_MODE) { - ifcond = FALSE; + ifcond = false; BEGIN(xcond); } else { yyless(1); - return (S_ANYTHING); + return S_ANYTHING; } } {exec_sql}{elif}{space}* { /* pop stack */ @@ -1024,7 +1064,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ else preproc_tos--; - ifcond = TRUE; BEGIN(xcond); + ifcond = true; BEGIN(xcond); } {informix_special}{elif}{space}* { /* are we simulating Informix? */ @@ -1037,13 +1077,13 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ else preproc_tos--; - ifcond = TRUE; + ifcond = true; BEGIN(xcond); } else { yyless(1); - return (S_ANYTHING); + return S_ANYTHING; } } @@ -1052,7 +1092,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ mmfatal(PARSE_ERROR, "more than one EXEC SQL ELSE"); else { - stacked_if_value[preproc_tos].else_branch = TRUE; + stacked_if_value[preproc_tos].else_branch = true; stacked_if_value[preproc_tos].condition = (stacked_if_value[preproc_tos-1].condition && !stacked_if_value[preproc_tos].condition); @@ -1071,7 +1111,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ mmfatal(PARSE_ERROR, "more than one EXEC SQL ELSE"); else { - stacked_if_value[preproc_tos].else_branch = TRUE; + stacked_if_value[preproc_tos].else_branch = true; stacked_if_value[preproc_tos].condition = (stacked_if_value[preproc_tos-1].condition && !stacked_if_value[preproc_tos].condition); @@ -1085,7 +1125,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ else { yyless(1); - return (S_ANYTHING); + return S_ANYTHING; } } {exec_sql}{endif}{space}*";" { @@ -1116,7 +1156,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ else { yyless(1); - return (S_ANYTHING); + return S_ANYTHING; } } @@ -1145,7 +1185,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ defptr = defptr->next); preproc_tos++; - stacked_if_value[preproc_tos].else_branch = FALSE; + stacked_if_value[preproc_tos].else_branch = false; stacked_if_value[preproc_tos].condition = (defptr ? ifcond : !ifcond) && stacked_if_value[preproc_tos-1].condition; } @@ -1249,7 +1289,11 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ } } {other}|\n { mmfatal(PARSE_ERROR, "internal error: unreachable state; please report this to "); } + %% + +/* LCOV_EXCL_STOP */ + void lex_init(void) { @@ -1259,9 +1303,9 @@ lex_init(void) preproc_tos = 0; yylineno = 1; - ifcond = TRUE; + ifcond = true; stacked_if_value[preproc_tos].condition = ifcond; - stacked_if_value[preproc_tos].else_branch = FALSE; + stacked_if_value[preproc_tos].else_branch = false; /* initialize literal buffer to a reasonable but expansible size */ if (literalbuf == NULL) @@ -1406,7 +1450,7 @@ parse_include(void) } /* - * ecpg_isspace() --- return TRUE if flex scanner considers char whitespace + * ecpg_isspace() --- return true if flex scanner considers char whitespace */ static bool ecpg_isspace(char ch) diff --git a/src/interfaces/ecpg/preproc/po/de.po b/src/interfaces/ecpg/preproc/po/de.po index 8cd9678b1d..c729ea5c99 100644 --- a/src/interfaces/ecpg/preproc/po/de.po +++ b/src/interfaces/ecpg/preproc/po/de.po @@ -1,16 +1,16 @@ # German message translation file for ecpg -# Copyright (C) 2009-2017 PostgreSQL Global Development Group +# Copyright (C) 2009-2018 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. -# Peter Eisentraut , 2009-2017. +# Peter Eisentraut , 2009-2018. # # Use these quotes: »%s« # msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 10\n" +"Project-Id-Version: PostgreSQL 11\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-02-10 21:38+0000\n" -"PO-Revision-Date: 2017-02-10 18:33-0500\n" +"POT-Creation-Date: 2018-05-07 00:38+0000\n" +"PO-Revision-Date: 2018-05-06 21:09-0400\n" "Last-Translator: Peter Eisentraut \n" "Language-Team: German \n" "Language: de\n" @@ -92,10 +92,10 @@ msgstr "" #, c-format msgid "" " -C MODE set compatibility mode; MODE can be one of\n" -" \"INFORMIX\", \"INFORMIX_SE\"\n" +" \"INFORMIX\", \"INFORMIX_SE\", \"ORACLE\"\n" msgstr "" " -C MODUS Kompatibilitätsmodus setzen; MODUS kann sein:\n" -" »INFORMIX« oder »INFORMIX_SE«\n" +" »INFORMIX«, »INFORMIX_SE«, »ORACLE«\n" #: ecpg.c:46 #, c-format @@ -181,147 +181,147 @@ msgstr "" msgid "%s: could not locate my own executable path\n" msgstr "%s: konnte Pfad des eigenen Programs nicht finden\n" -#: ecpg.c:174 ecpg.c:327 ecpg.c:337 +#: ecpg.c:174 ecpg.c:331 ecpg.c:342 #, c-format msgid "%s: could not open file \"%s\": %s\n" msgstr "%s: konnte Datei »%s« nicht öffnen: %s\n" -#: ecpg.c:213 ecpg.c:226 ecpg.c:242 ecpg.c:268 +#: ecpg.c:217 ecpg.c:230 ecpg.c:246 ecpg.c:272 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "Versuchen Sie »%s --help« für weitere Informationen.\n" -#: ecpg.c:237 +#: ecpg.c:241 #, c-format msgid "%s: parser debug support (-d) not available\n" msgstr "%s: Unterstützung für Parserdebugging (-d) nicht verfügbar\n" -#: ecpg.c:256 +#: ecpg.c:260 #, c-format msgid "%s, the PostgreSQL embedded C preprocessor, version %s\n" msgstr "%s, der PostgreSQL-Embedded-C-Präprozessor, Version %s\n" -#: ecpg.c:258 +#: ecpg.c:262 #, c-format msgid "EXEC SQL INCLUDE ... search starts here:\n" msgstr "EXEC SQL INCLUDE ... Suche beginnt hier:\n" -#: ecpg.c:261 +#: ecpg.c:265 #, c-format msgid "end of search list\n" msgstr "Ende der Suchliste\n" -#: ecpg.c:267 +#: ecpg.c:271 #, c-format msgid "%s: no input files specified\n" msgstr "%s: keine Eingabedateien angegeben\n" -#: ecpg.c:460 +#: ecpg.c:465 #, c-format msgid "cursor \"%s\" has been declared but not opened" msgstr "Cursor »%s« wurde deklariert aber nicht geöffnet" -#: ecpg.c:473 preproc.y:127 +#: ecpg.c:478 preproc.y:127 #, c-format msgid "could not remove output file \"%s\"\n" msgstr "konnte Ausgabedatei »%s« nicht entfernen\n" -#: pgc.l:432 +#: pgc.l:435 #, c-format msgid "unterminated /* comment" msgstr "/*-Kommentar nicht abgeschlossen" -#: pgc.l:445 +#: pgc.l:448 #, c-format msgid "invalid bit string literal" msgstr "ungültige Bitkettenkonstante" -#: pgc.l:454 +#: pgc.l:457 #, c-format msgid "unterminated bit string literal" msgstr "Bitkettenkonstante nicht abgeschlossen" -#: pgc.l:470 +#: pgc.l:473 #, c-format msgid "unterminated hexadecimal string literal" msgstr "hexadezimale Zeichenkette nicht abgeschlossen" -#: pgc.l:548 +#: pgc.l:551 #, c-format msgid "unterminated quoted string" msgstr "Zeichenkette in Anführungszeichen nicht abgeschlossen" -#: pgc.l:605 pgc.l:618 +#: pgc.l:609 pgc.l:622 #, c-format msgid "zero-length delimited identifier" msgstr "Bezeichner in Anführungszeichen hat Länge null" -#: pgc.l:626 +#: pgc.l:630 #, c-format msgid "unterminated quoted identifier" msgstr "Bezeichner in Anführungszeichen nicht abgeschlossen" -#: pgc.l:881 +#: pgc.l:880 #, c-format msgid "nested /* ... */ comments" msgstr "geschachtelte /* ... */-Kommentare" -#: pgc.l:974 +#: pgc.l:973 #, c-format msgid "missing identifier in EXEC SQL UNDEF command" msgstr "fehlender Bezeichner im Befehl EXEC SQL UNDEF" -#: pgc.l:1020 pgc.l:1034 +#: pgc.l:1019 pgc.l:1033 #, c-format msgid "missing matching \"EXEC SQL IFDEF\" / \"EXEC SQL IFNDEF\"" msgstr "passendes »EXEC SQL IFDEF« / »EXEC SQL IFNDEF« fehlt" -#: pgc.l:1023 pgc.l:1036 pgc.l:1212 +#: pgc.l:1022 pgc.l:1035 pgc.l:1211 #, c-format msgid "missing \"EXEC SQL ENDIF;\"" msgstr "»EXEC SQL ENDIF;« fehlt" -#: pgc.l:1052 pgc.l:1071 +#: pgc.l:1051 pgc.l:1070 #, c-format msgid "more than one EXEC SQL ELSE" msgstr "mehr als ein EXEC SQL ENDIF" -#: pgc.l:1093 pgc.l:1107 +#: pgc.l:1092 pgc.l:1106 #, c-format msgid "unmatched EXEC SQL ENDIF" msgstr "unzusammenhängendes EXEC SQL ENDIF" -#: pgc.l:1127 +#: pgc.l:1126 #, c-format msgid "too many nested EXEC SQL IFDEF conditions" msgstr "zu viele verschachtelte EXEC SQL IFDEF-Bedingungen" -#: pgc.l:1160 +#: pgc.l:1159 #, c-format msgid "missing identifier in EXEC SQL IFDEF command" msgstr "fehlender Bezeichner im Befehl EXEC SQL IFDEF" -#: pgc.l:1169 +#: pgc.l:1168 #, c-format msgid "missing identifier in EXEC SQL DEFINE command" msgstr "fehlender Bezeichner im Befehl EXEC SQL DEFINE" -#: pgc.l:1202 +#: pgc.l:1201 #, c-format msgid "syntax error in EXEC SQL INCLUDE command" msgstr "Syntaxfehler im Befehl EXEC SQL INCLUDE" -#: pgc.l:1251 +#: pgc.l:1250 #, c-format msgid "internal error: unreachable state; please report this to " msgstr "interner Fehler: unerreichbarer Zustand; bitte an berichten" -#: pgc.l:1375 +#: pgc.l:1379 #, c-format msgid "Error: include path \"%s/%s\" is too long on line %d, skipping\n" msgstr "Fehler: Include-Pfad »%s/%s« ist zu lang auf Zeile %d, wird übersprungen\n" -#: pgc.l:1398 +#: pgc.l:1402 #, c-format msgid "could not open include file \"%s\" on line %d" msgstr "konnte Include-Datei »%s« nicht öffnen auf Zeile %d" @@ -355,185 +355,185 @@ msgstr "Initialisierungswert nicht erlaubt in Typdefinition" msgid "type name \"string\" is reserved in Informix mode" msgstr "Typname »string« ist im Informix-Modus reserviert" -#: preproc.y:546 preproc.y:15083 +#: preproc.y:546 preproc.y:15722 #, c-format msgid "type \"%s\" is already defined" msgstr "Typ »%s« ist bereits definiert" -#: preproc.y:570 preproc.y:15741 preproc.y:16061 variable.c:620 +#: preproc.y:570 preproc.y:16380 preproc.y:16705 variable.c:620 #, c-format msgid "multidimensional arrays for simple data types are not supported" msgstr "mehrdimensionale Arrays für einfache Datentypen werden nicht unterstützt" -#: preproc.y:1667 +#: preproc.y:1693 #, c-format msgid "AT option not allowed in CLOSE DATABASE statement" msgstr "AT-Option ist nicht erlaubt im Befehl CLOSE DATABASE" -#: preproc.y:1888 +#: preproc.y:1902 #, c-format msgid "AT option not allowed in CONNECT statement" msgstr "AT-Option ist nicht erlaubt im Befehl CONNECT" -#: preproc.y:1922 +#: preproc.y:1936 #, c-format msgid "AT option not allowed in DISCONNECT statement" msgstr "AT-Option ist nicht erlaubt im Befehl DISCONNECT" -#: preproc.y:1977 +#: preproc.y:1991 #, c-format msgid "AT option not allowed in SET CONNECTION statement" msgstr "AT-Option ist nicht erlaubt im Befehl SET CONNECTION" -#: preproc.y:1999 +#: preproc.y:2013 #, c-format msgid "AT option not allowed in TYPE statement" msgstr "AT-Option ist nicht erlaubt im TYPE-Befehl" -#: preproc.y:2008 +#: preproc.y:2022 #, c-format msgid "AT option not allowed in VAR statement" msgstr "AT-Option ist nicht erlaubt im VAR-Befehl" -#: preproc.y:2015 +#: preproc.y:2029 #, c-format msgid "AT option not allowed in WHENEVER statement" msgstr "AT-Option ist nicht erlaubt im WHENEVER-Befehl" -#: preproc.y:2267 preproc.y:2272 preproc.y:2388 preproc.y:3925 preproc.y:5484 -#: preproc.y:5493 preproc.y:5793 preproc.y:7233 preproc.y:8613 preproc.y:8618 -#: preproc.y:11229 preproc.y:11850 +#: preproc.y:2106 preproc.y:2278 preproc.y:2283 preproc.y:2399 preproc.y:4044 +#: preproc.y:5602 preproc.y:5611 preproc.y:5911 preproc.y:7510 preproc.y:9003 +#: preproc.y:9008 preproc.y:11790 #, c-format msgid "unsupported feature will be passed to server" msgstr "nicht mehr unterstütztes Feature wird an Server weitergereicht werden" -#: preproc.y:2646 +#: preproc.y:2657 #, c-format msgid "SHOW ALL is not implemented" msgstr "SHOW ALL ist nicht implementiert" -#: preproc.y:3270 +#: preproc.y:3385 #, c-format msgid "COPY FROM STDIN is not implemented" msgstr "COPY FROM STDIN ist nicht implementiert" -#: preproc.y:9488 preproc.y:14672 +#: preproc.y:9956 preproc.y:15311 #, c-format msgid "using variable \"%s\" in different declare statements is not supported" msgstr "Verwendung der Variable »%s« in verschiedenen DECLARE-Anweisungen wird nicht unterstützt" -#: preproc.y:9490 preproc.y:14674 +#: preproc.y:9958 preproc.y:15313 #, c-format msgid "cursor \"%s\" is already defined" msgstr "Cursor »%s« ist bereits definiert" -#: preproc.y:9920 +#: preproc.y:10388 #, c-format msgid "no longer supported LIMIT #,# syntax passed to server" msgstr "nicht mehr unterstützte Syntax LIMIT x,y wird an Server weitergereicht" -#: preproc.y:10228 preproc.y:10235 +#: preproc.y:10704 preproc.y:10711 #, c-format msgid "subquery in FROM must have an alias" msgstr "Unteranfrage in FROM muss Aliasnamen erhalten" -#: preproc.y:14402 +#: preproc.y:15041 #, c-format msgid "CREATE TABLE AS cannot specify INTO" msgstr "CREATE TABLE AS kann INTO nicht verwenden" -#: preproc.y:14438 +#: preproc.y:15077 #, c-format msgid "expected \"@\", found \"%s\"" msgstr "»@« erwartet, »%s« gefunden" -#: preproc.y:14450 +#: preproc.y:15089 #, c-format msgid "only protocols \"tcp\" and \"unix\" and database type \"postgresql\" are supported" msgstr "er werden nur die Protokolle »tcp« und »unix« und der Datenbanktyp »postgresql« unterstützt" -#: preproc.y:14453 +#: preproc.y:15092 #, c-format msgid "expected \"://\", found \"%s\"" msgstr "»://« erwartet, »%s« gefunden" -#: preproc.y:14458 +#: preproc.y:15097 #, c-format msgid "Unix-domain sockets only work on \"localhost\" but not on \"%s\"" msgstr "Unix-Domain-Sockets funktionieren nur mit »localhost«, aber nicht mit »%s«" -#: preproc.y:14484 +#: preproc.y:15123 #, c-format msgid "expected \"postgresql\", found \"%s\"" msgstr "»postgresql« erwartet, »%s« gefunden" -#: preproc.y:14487 +#: preproc.y:15126 #, c-format msgid "invalid connection type: %s" msgstr "ungültiger Verbindungstyp: %s" -#: preproc.y:14496 +#: preproc.y:15135 #, c-format msgid "expected \"@\" or \"://\", found \"%s\"" msgstr "»@« oder »://« erwartet, »%s« gefunden" -#: preproc.y:14571 preproc.y:14589 +#: preproc.y:15210 preproc.y:15228 #, c-format msgid "invalid data type" msgstr "ungültiger Datentyp" -#: preproc.y:14600 preproc.y:14617 +#: preproc.y:15239 preproc.y:15256 #, c-format msgid "incomplete statement" msgstr "unvollständige Anweisung" -#: preproc.y:14603 preproc.y:14620 +#: preproc.y:15242 preproc.y:15259 #, c-format msgid "unrecognized token \"%s\"" msgstr "nicht erkanntes Token »%s«" -#: preproc.y:14894 +#: preproc.y:15533 #, c-format msgid "only data types numeric and decimal have precision/scale argument" msgstr "nur die Datentypen NUMERIC und DECIMAL haben Argumente für Präzision und Skala" -#: preproc.y:14906 +#: preproc.y:15545 #, c-format msgid "interval specification not allowed here" msgstr "Intervallangabe hier nicht erlaubt" -#: preproc.y:15058 preproc.y:15110 +#: preproc.y:15697 preproc.y:15749 #, c-format msgid "too many levels in nested structure/union definition" msgstr "zu viele Ebenen in verschachtelter Definition von Struktur/Union" -#: preproc.y:15249 +#: preproc.y:15888 #, c-format msgid "pointers to varchar are not implemented" msgstr "Zeiger auf varchar sind nicht implementiert" -#: preproc.y:15436 preproc.y:15461 +#: preproc.y:16075 preproc.y:16100 #, c-format msgid "using unsupported DESCRIBE statement" msgstr "nicht unterstützter DESCRIBE-Befehl wird verwendet" -#: preproc.y:15708 +#: preproc.y:16347 #, c-format msgid "initializer not allowed in EXEC SQL VAR command" msgstr "Initialisierungswert nicht erlaubt in Befehl EXEC SQL VAR" -#: preproc.y:16019 +#: preproc.y:16663 #, c-format msgid "arrays of indicators are not allowed on input" msgstr "Array aus Indikatoren bei der Eingabe nicht erlaubt" -#: preproc.y:16240 +#: preproc.y:16884 #, c-format msgid "operator not allowed in variable definition" msgstr "Operator nicht erlaubt in Variablendefinition" #. translator: %s is typically the translation of "syntax error" -#: preproc.y:16278 +#: preproc.y:16925 #, c-format msgid "%s at or near \"%s\"" msgstr "%s bei »%s«" @@ -543,7 +543,7 @@ msgstr "%s bei »%s«" msgid "out of memory" msgstr "Speicher aufgebraucht" -#: type.c:212 type.c:664 +#: type.c:212 type.c:676 #, c-format msgid "unrecognized variable type code %d" msgstr "unbekannter Variablentypcode %d" @@ -588,7 +588,17 @@ msgstr "Indikator für struct muss ein struct sein" msgid "indicator for simple data type has to be simple" msgstr "Indikator für einfachen Typ muss einfachen Typ haben" -#: type.c:723 +#: type.c:616 +#, c-format +msgid "indicator struct \"%s\" has too few members" +msgstr "Indikator-Struct »%s« hat zu wenige Mitglieder" + +#: type.c:624 +#, c-format +msgid "indicator struct \"%s\" has too many members" +msgstr "Indikator-Struct »%s« hat zu viele Mitglieder" + +#: type.c:735 #, c-format msgid "unrecognized descriptor item code %d" msgstr "unbekannter Deskriptorelementcode %d" diff --git a/src/interfaces/ecpg/preproc/po/fr.po b/src/interfaces/ecpg/preproc/po/fr.po index d408bd36a9..100ba084f6 100644 --- a/src/interfaces/ecpg/preproc/po/fr.po +++ b/src/interfaces/ecpg/preproc/po/fr.po @@ -9,8 +9,8 @@ msgid "" msgstr "" "Project-Id-Version: PostgreSQL 9.6\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-07-02 04:38+0000\n" -"PO-Revision-Date: 2017-07-02 17:45+0200\n" +"POT-Creation-Date: 2018-02-05 14:08+0000\n" +"PO-Revision-Date: 2018-02-10 17:46+0100\n" "Last-Translator: Guillaume Lelarge \n" "Language-Team: PostgreSQLfr \n" "Language: fr\n" @@ -18,7 +18,7 @@ msgstr "" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" -"X-Generator: Poedit 1.8.12\n" +"X-Generator: Poedit 2.0.3\n" #: descriptor.c:64 #, c-format @@ -320,14 +320,14 @@ msgstr "" "erreur interne : l'état ne peut être atteint ; merci de rapporter ceci à\n" "" -#: pgc.l:1375 +#: pgc.l:1376 #, c-format msgid "Error: include path \"%s/%s\" is too long on line %d, skipping\n" msgstr "" "Erreur : le chemin d'en-tête « %s/%s » est trop long sur la ligne %d,\n" "ignoré\n" -#: pgc.l:1398 +#: pgc.l:1399 #, c-format msgid "could not open include file \"%s\" on line %d" msgstr "n'a pas pu ouvrir le fichier d'en-tête « %s » sur la ligne %d" @@ -361,193 +361,193 @@ msgstr "initialiseur non autorisé dans la définition du type" msgid "type name \"string\" is reserved in Informix mode" msgstr "le nom du type « string » est réservé dans le mode Informix" -#: preproc.y:546 preproc.y:15412 +#: preproc.y:546 preproc.y:15393 #, c-format msgid "type \"%s\" is already defined" msgstr "le type « %s » est déjà défini" -#: preproc.y:570 preproc.y:16070 preproc.y:16390 variable.c:620 +#: preproc.y:570 preproc.y:16051 preproc.y:16371 variable.c:620 #, c-format msgid "multidimensional arrays for simple data types are not supported" msgstr "" "les tableaux multi-dimensionnels pour les types de données simples ne sont\n" "pas supportés" -#: preproc.y:1681 +#: preproc.y:1674 #, c-format msgid "AT option not allowed in CLOSE DATABASE statement" msgstr "option AT non autorisée dans une instruction CLOSE DATABASE" -#: preproc.y:1894 +#: preproc.y:1883 #, c-format msgid "AT option not allowed in CONNECT statement" msgstr "option AT non autorisée dans une instruction CONNECT" -#: preproc.y:1928 +#: preproc.y:1917 #, c-format msgid "AT option not allowed in DISCONNECT statement" msgstr "option AT non autorisée dans une instruction DISCONNECT" -#: preproc.y:1983 +#: preproc.y:1972 #, c-format msgid "AT option not allowed in SET CONNECTION statement" msgstr "option AT non autorisée dans une instruction SET CONNECTION" -#: preproc.y:2005 +#: preproc.y:1994 #, c-format msgid "AT option not allowed in TYPE statement" msgstr "option AT non autorisée dans une instruction TYPE" -#: preproc.y:2014 +#: preproc.y:2003 #, c-format msgid "AT option not allowed in VAR statement" msgstr "option AT non autorisée dans une instruction VAR" -#: preproc.y:2021 +#: preproc.y:2010 #, c-format msgid "AT option not allowed in WHENEVER statement" msgstr "option AT non autorisée dans une instruction WHENEVER" -#: preproc.y:2090 preproc.y:2274 preproc.y:2279 preproc.y:2395 preproc.y:3969 preproc.y:5519 -#: preproc.y:5528 preproc.y:5828 preproc.y:7364 preproc.y:8801 preproc.y:8806 preproc.y:11538 -#: preproc.y:12159 +#: preproc.y:2079 preproc.y:2251 preproc.y:2256 preproc.y:2372 preproc.y:3950 preproc.y:5500 +#: preproc.y:5509 preproc.y:5809 preproc.y:7345 preproc.y:8782 preproc.y:8787 preproc.y:11519 +#: preproc.y:12140 #, c-format msgid "unsupported feature will be passed to server" msgstr "la fonctionnalité non supportée sera passée au serveur" -#: preproc.y:2653 +#: preproc.y:2630 #, c-format msgid "SHOW ALL is not implemented" msgstr "SHOW ALL n'est pas implanté" -#: preproc.y:3325 +#: preproc.y:3306 #, c-format msgid "COPY FROM STDIN is not implemented" msgstr "COPY FROM STDIN n'est pas implanté" -#: preproc.y:9704 preproc.y:15001 +#: preproc.y:9685 preproc.y:14982 #, c-format msgid "using variable \"%s\" in different declare statements is not supported" msgstr "" "l'utilisation de la variable « %s » dans différentes instructions de déclaration\n" "n'est pas supportée" -#: preproc.y:9706 preproc.y:15003 +#: preproc.y:9687 preproc.y:14984 #, c-format msgid "cursor \"%s\" is already defined" msgstr "le curseur « %s » est déjà défini" -#: preproc.y:10136 +#: preproc.y:10117 #, c-format msgid "no longer supported LIMIT #,# syntax passed to server" msgstr "la syntaxe obsolète LIMIT #,# a été passée au serveur" -#: preproc.y:10452 preproc.y:10459 +#: preproc.y:10433 preproc.y:10440 #, c-format msgid "subquery in FROM must have an alias" msgstr "la sous-requête du FROM doit avoir un alias" -#: preproc.y:14731 +#: preproc.y:14712 #, c-format msgid "CREATE TABLE AS cannot specify INTO" msgstr "CREATE TABLE AS ne peut pas indiquer INTO" -#: preproc.y:14767 +#: preproc.y:14748 #, c-format msgid "expected \"@\", found \"%s\"" msgstr "« @ » attendu, « %s » trouvé" -#: preproc.y:14779 +#: preproc.y:14760 #, c-format msgid "only protocols \"tcp\" and \"unix\" and database type \"postgresql\" are supported" msgstr "" "seuls les protocoles « tcp » et « unix » et les types de base de données\n" "« postgresql » sont supportés" -#: preproc.y:14782 +#: preproc.y:14763 #, c-format msgid "expected \"://\", found \"%s\"" msgstr "« :// » attendu, « %s » trouvé" -#: preproc.y:14787 +#: preproc.y:14768 #, c-format msgid "Unix-domain sockets only work on \"localhost\" but not on \"%s\"" msgstr "les sockets de domaine Unix fonctionnent seulement sur « localhost », mais pas sur « %s »" -#: preproc.y:14813 +#: preproc.y:14794 #, c-format msgid "expected \"postgresql\", found \"%s\"" msgstr "« postgresql » attendu, « %s » trouvé" -#: preproc.y:14816 +#: preproc.y:14797 #, c-format msgid "invalid connection type: %s" msgstr "type de connexion invalide : %s" -#: preproc.y:14825 +#: preproc.y:14806 #, c-format msgid "expected \"@\" or \"://\", found \"%s\"" msgstr "« @ » ou « :// » attendu, « %s » trouvé" -#: preproc.y:14900 preproc.y:14918 +#: preproc.y:14881 preproc.y:14899 #, c-format msgid "invalid data type" msgstr "type de données invalide" -#: preproc.y:14929 preproc.y:14946 +#: preproc.y:14910 preproc.y:14927 #, c-format msgid "incomplete statement" msgstr "instruction incomplète" -#: preproc.y:14932 preproc.y:14949 +#: preproc.y:14913 preproc.y:14930 #, c-format msgid "unrecognized token \"%s\"" msgstr "jeton « %s » non reconnu" -#: preproc.y:15223 +#: preproc.y:15204 #, c-format msgid "only data types numeric and decimal have precision/scale argument" msgstr "" "seuls les types de données numeric et decimal ont des arguments de\n" "précision et d'échelle" -#: preproc.y:15235 +#: preproc.y:15216 #, c-format msgid "interval specification not allowed here" msgstr "interval de spécification non autorisé ici" -#: preproc.y:15387 preproc.y:15439 +#: preproc.y:15368 preproc.y:15420 #, c-format msgid "too many levels in nested structure/union definition" msgstr "trop de niveaux dans la définition de structure/union imbriquée" -#: preproc.y:15578 +#: preproc.y:15559 #, c-format msgid "pointers to varchar are not implemented" msgstr "les pointeurs sur des chaînes de caractères (varchar) ne sont pas implantés" -#: preproc.y:15765 preproc.y:15790 +#: preproc.y:15746 preproc.y:15771 #, c-format msgid "using unsupported DESCRIBE statement" msgstr "utilisation de l'instruction DESCRIBE non supporté" -#: preproc.y:16037 +#: preproc.y:16018 #, c-format msgid "initializer not allowed in EXEC SQL VAR command" msgstr "initialiseur non autorisé dans la commande EXEC SQL VAR" -#: preproc.y:16348 +#: preproc.y:16329 #, c-format msgid "arrays of indicators are not allowed on input" msgstr "les tableaux d'indicateurs ne sont pas autorisés en entrée" -#: preproc.y:16569 +#: preproc.y:16550 #, c-format msgid "operator not allowed in variable definition" msgstr "opérateur non autorisé dans la définition de la variable" #. translator: %s is typically the translation of "syntax error" -#: preproc.y:16607 +#: preproc.y:16591 #, c-format msgid "%s at or near \"%s\"" msgstr "%s sur ou près de « %s »" @@ -557,7 +557,7 @@ msgstr "%s sur ou près de « %s »" msgid "out of memory" msgstr "mémoire épuisée" -#: type.c:212 type.c:664 +#: type.c:212 type.c:674 #, c-format msgid "unrecognized variable type code %d" msgstr "code %d du type de variable non reconnu" @@ -606,7 +606,17 @@ msgstr "l'indicateur d'un struct doit être un struct" msgid "indicator for simple data type has to be simple" msgstr "l'indicateur d'un type de données simple doit être simple" -#: type.c:723 +#: type.c:615 +#, c-format +msgid "indicator struct \"%s\" has too few members" +msgstr "le struct indicateur « %s » a trop peu de membres" + +#: type.c:622 +#, c-format +msgid "indicator struct \"%s\" has too many members" +msgstr "le struct indicateur « %s » a trop de membres" + +#: type.c:733 #, c-format msgid "unrecognized descriptor item code %d" msgstr "code %d de l'élément du descripteur non reconnu" @@ -677,23 +687,23 @@ msgstr "ce type de données ne supporte pas les pointeurs de pointeur" msgid "multidimensional arrays for structures are not supported" msgstr "les tableaux multidimensionnels ne sont pas supportés pour les structures" -#~ msgid " --version output version information, then exit\n" -#~ msgstr " --version affiche la version et quitte\n" +#~ msgid "COPY TO STDIN is not possible" +#~ msgstr "COPY TO STDIN n'est pas possible" -#~ msgid "AT option not allowed in DEALLOCATE statement" -#~ msgstr "option AT non autorisée dans une instruction DEALLOCATE" +#~ msgid "COPY FROM STDOUT is not possible" +#~ msgstr "COPY FROM STDOUT n'est pas possible" -#~ msgid "constraint declared INITIALLY DEFERRED must be DEFERRABLE" -#~ msgstr "une contrainte déclarée INITIALLY DEFERRED doit être DEFERRABLE" +#~ msgid "NEW used in query that is not in a rule" +#~ msgstr "NEW utilisé dans une requête qui n'est pas dans une règle" #~ msgid "OLD used in query that is not in a rule" #~ msgstr "OLD utilisé dans une requête qui n'est pas dans une règle" -#~ msgid "NEW used in query that is not in a rule" -#~ msgstr "NEW utilisé dans une requête qui n'est pas dans une règle" +#~ msgid "constraint declared INITIALLY DEFERRED must be DEFERRABLE" +#~ msgstr "une contrainte déclarée INITIALLY DEFERRED doit être DEFERRABLE" -#~ msgid "COPY FROM STDOUT is not possible" -#~ msgstr "COPY FROM STDOUT n'est pas possible" +#~ msgid "AT option not allowed in DEALLOCATE statement" +#~ msgstr "option AT non autorisée dans une instruction DEALLOCATE" -#~ msgid "COPY TO STDIN is not possible" -#~ msgstr "COPY TO STDIN n'est pas possible" +#~ msgid " --version output version information, then exit\n" +#~ msgstr " --version affiche la version et quitte\n" diff --git a/src/interfaces/ecpg/preproc/po/it.po b/src/interfaces/ecpg/preproc/po/it.po index 19c77755ea..1b47698688 100644 --- a/src/interfaces/ecpg/preproc/po/it.po +++ b/src/interfaces/ecpg/preproc/po/it.po @@ -1,20 +1,17 @@ # -# Translation of ecpg to Italian -# PostgreSQL Project +# ecpg.po +# Italian message translation file for ecpg # -# Associazione Culturale ITPUG - Italian PostgreSQL Users Group -# http://www.itpug.org/ - info@itpug.org +# For development and bug report please use: +# https://github.com/dvarrazzo/postgresql-it # -# Traduttori: -# * Daniele Varrazzo -# * Maurizio Totti +# Copyright (C) 2012-2017 PostgreSQL Global Development Group +# Copyright (C) 2010, Associazione Culturale ITPUG # -# Revisori: -# * Emanuele Zamprogno +# Daniele Varrazzo , 2012-2017. +# Maurizio Totti , 2010. # -# -# Copyright (c) 2010, Associazione Culturale ITPUG -# Distributed under the same license of the PostgreSQL project +# This file is distributed under the same license as the PostgreSQL package. # msgid "" msgstr "" @@ -23,7 +20,7 @@ msgstr "" "POT-Creation-Date: 2017-04-22 22:38+0000\n" "PO-Revision-Date: 2017-04-23 04:44+0100\n" "Last-Translator: Daniele Varrazzo \n" -"Language-Team: Gruppo traduzioni ITPUG \n" +"Language-Team: https://github.com/dvarrazzo/postgresql-it\n" "Language: it\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" diff --git a/src/interfaces/ecpg/preproc/po/ko.po b/src/interfaces/ecpg/preproc/po/ko.po index 124482f963..787058e059 100644 --- a/src/interfaces/ecpg/preproc/po/ko.po +++ b/src/interfaces/ecpg/preproc/po/ko.po @@ -1,9 +1,14 @@ +# LANGUAGE message translation file for ecpg +# Copyright (C) 2017 PostgreSQL Global Development Group +# This file is distributed under the same license as the PostgreSQL package. +# Ioseph Kim , 2017. +# msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 9.6\n" +"Project-Id-Version: ecpg (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2016-09-26 14:02+0900\n" -"PO-Revision-Date: 2016-09-26 15:33+0900\n" +"POT-Creation-Date: 2017-08-02 13:57+0900\n" +"PO-Revision-Date: 2017-08-17 13:21+0900\n" "Last-Translator: Ioseph Kim \n" "Language-Team: Korean Team \n" "Language: ko\n" @@ -22,7 +27,7 @@ msgstr "\"%s\" 변수는 숫자 형식이어야 함" msgid "descriptor \"%s\" does not exist" msgstr "\"%s\" 설명자가 없음" -#: descriptor.c:161 descriptor.c:212 +#: descriptor.c:161 descriptor.c:213 #, c-format msgid "descriptor header item \"%d\" does not exist" msgstr "설명자 헤더 항목 \"%d\"이(가) 없음" @@ -37,12 +42,12 @@ msgstr "null 허용 여부는 항상 1" msgid "key_member is always 0" msgstr "key_member는 항상 0" -#: descriptor.c:279 +#: descriptor.c:280 #, c-format msgid "descriptor item \"%s\" is not implemented" msgstr "설명자 항목 \"%s\"이(가) 구현되지 않음" -#: descriptor.c:289 +#: descriptor.c:290 #, c-format msgid "descriptor item \"%s\" cannot be set" msgstr "설명자 항목 \"%s\"을(를) 설정할 수 없음" @@ -144,8 +149,8 @@ msgstr " -t 트랜잭션 자동 커밋 설정\n" #: ecpg.c:57 #, c-format -msgid " --version output version information, then exit\n" -msgstr " --version 버전 정보를 출력하고 종료\n" +msgid " -V, --version output version information, then exit\n" +msgstr " -V, --version 버전 정보 보여주고 마침\n" #: ecpg.c:58 #, c-format @@ -172,142 +177,142 @@ msgstr "" "\n" "오류보고: .\n" -#: ecpg.c:143 +#: ecpg.c:139 #, c-format msgid "%s: could not locate my own executable path\n" msgstr "%s: 실행 가능한 경로를 지정할 수 없습니다\n" -#: ecpg.c:186 ecpg.c:337 ecpg.c:347 +#: ecpg.c:174 ecpg.c:327 ecpg.c:337 #, c-format msgid "%s: could not open file \"%s\": %s\n" msgstr "%s: \"%s\" 파일 열 수 없음: %s\n" -#: ecpg.c:225 ecpg.c:238 ecpg.c:254 ecpg.c:279 +#: ecpg.c:213 ecpg.c:226 ecpg.c:242 ecpg.c:268 #, c-format msgid "Try \"%s --help\" for more information.\n" msgstr "자제한 사항은 \"%s --help\" 명령으로 살펴보십시오.\n" -#: ecpg.c:249 +#: ecpg.c:237 #, c-format msgid "%s: parser debug support (-d) not available\n" msgstr "%s: 파서 디버그 지원(-d)을 사용할 수 없음\n" -#: ecpg.c:267 +#: ecpg.c:256 #, c-format -msgid "%s, the PostgreSQL embedded C preprocessor, version %d.%d.%d\n" -msgstr "PostgreSQL 포함 C 전처리기 %s의 버전 %d.%d.%d\n" +msgid "%s, the PostgreSQL embedded C preprocessor, version %s\n" +msgstr "%s, PostgreSQL 포함 C 전처리기, 버전 %s\n" -#: ecpg.c:269 +#: ecpg.c:258 #, c-format msgid "EXEC SQL INCLUDE ... search starts here:\n" msgstr "EXEC SQL INCLUDE ... 여기서 검색 시작:\n" -#: ecpg.c:272 +#: ecpg.c:261 #, c-format msgid "end of search list\n" msgstr "검색 목록의 끝\n" -#: ecpg.c:278 +#: ecpg.c:267 #, c-format msgid "%s: no input files specified\n" msgstr "%s: 지정된 입력 파일 없음\n" -#: ecpg.c:470 +#: ecpg.c:460 #, c-format msgid "cursor \"%s\" has been declared but not opened" msgstr "\"%s\" 커서가 선언되었지만 열리지 않음" -#: ecpg.c:483 preproc.y:127 +#: ecpg.c:473 preproc.y:127 #, c-format msgid "could not remove output file \"%s\"\n" msgstr "출력 파일 \"%s\"을(를) 제거할 수 없음\n" -#: pgc.l:440 +#: pgc.l:431 #, c-format msgid "unterminated /* comment" msgstr "마무리 안된 /* 주석" -#: pgc.l:453 +#: pgc.l:444 #, c-format msgid "invalid bit string literal" msgstr "잘못된 비트 문자열 리터럴" -#: pgc.l:462 +#: pgc.l:453 #, c-format msgid "unterminated bit string literal" msgstr "마무리 안된 비트 문자열 문자" -#: pgc.l:478 +#: pgc.l:469 #, c-format msgid "unterminated hexadecimal string literal" msgstr "마무리 안된 16진수 문자열 문자" -#: pgc.l:556 +#: pgc.l:547 #, c-format msgid "unterminated quoted string" msgstr "마무리 안된 따옴표 안의 문자열" -#: pgc.l:613 pgc.l:626 +#: pgc.l:605 pgc.l:618 #, c-format msgid "zero-length delimited identifier" msgstr "길이가 0인 구분 식별자" -#: pgc.l:634 +#: pgc.l:626 #, c-format msgid "unterminated quoted identifier" msgstr "마무리 안된 따옴표 안의 식별자" -#: pgc.l:889 +#: pgc.l:881 #, c-format msgid "nested /* ... */ comments" msgstr "중첩된 /* ... */ 주석" -#: pgc.l:982 +#: pgc.l:974 #, c-format msgid "missing identifier in EXEC SQL UNDEF command" msgstr "EXEC SQL UNDEF 명령에 식별자 누락" -#: pgc.l:1028 pgc.l:1042 +#: pgc.l:1020 pgc.l:1034 #, c-format msgid "missing matching \"EXEC SQL IFDEF\" / \"EXEC SQL IFNDEF\"" msgstr "일치하는 \"EXEC SQL IFDEF\" / \"EXEC SQL IFNDEF\" 누락" -#: pgc.l:1031 pgc.l:1044 pgc.l:1220 +#: pgc.l:1023 pgc.l:1036 pgc.l:1212 #, c-format msgid "missing \"EXEC SQL ENDIF;\"" msgstr "\"EXEC SQL ENDIF;\" 누락" -#: pgc.l:1060 pgc.l:1079 +#: pgc.l:1052 pgc.l:1071 #, c-format msgid "more than one EXEC SQL ELSE" msgstr "두 개 이상의 EXEC SQL ELSE" -#: pgc.l:1101 pgc.l:1115 +#: pgc.l:1093 pgc.l:1107 #, c-format msgid "unmatched EXEC SQL ENDIF" msgstr "일치하지 않는 EXEC SQL ENDIF" -#: pgc.l:1135 +#: pgc.l:1127 #, c-format msgid "too many nested EXEC SQL IFDEF conditions" msgstr "중첩된 EXEC SQL IFDEF 조건이 너무 많음" -#: pgc.l:1168 +#: pgc.l:1160 #, c-format msgid "missing identifier in EXEC SQL IFDEF command" msgstr "EXEC SQL IFDEF 명령에 식별자 누락" -#: pgc.l:1177 +#: pgc.l:1169 #, c-format msgid "missing identifier in EXEC SQL DEFINE command" msgstr "EXEC SQL DEFINE 명령에 식별자 누락" -#: pgc.l:1210 +#: pgc.l:1202 #, c-format msgid "syntax error in EXEC SQL INCLUDE command" msgstr "EXEC SQL INCLUDE 명령에 구문 오류 발생" -#: pgc.l:1259 +#: pgc.l:1251 #, c-format msgid "" "internal error: unreachable state; please report this to \n" "Language-Team: Brazilian Portuguese \n" "Language: pt_BR\n" @@ -58,9 +58,7 @@ msgstr "item do descritor \"%s\" não pode ser definido" msgid "" "%s is the PostgreSQL embedded SQL preprocessor for C programs.\n" "\n" -msgstr "" -"%s é o pré-processador SQL embutido do PostgeSQL para programas em C.\n" -"\n" +msgstr "%s é o pré-processador SQL embutido do PostgreSQL para programas em C.\n\n" #: ecpg.c:37 #, c-format diff --git a/src/interfaces/ecpg/preproc/po/ru.po b/src/interfaces/ecpg/preproc/po/ru.po index e91843c39f..ffcaf90a16 100644 --- a/src/interfaces/ecpg/preproc/po/ru.po +++ b/src/interfaces/ecpg/preproc/po/ru.po @@ -1,14 +1,13 @@ # Russian message translation file for ecpg # Copyright (C) 2012-2016 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. -# Alexander Lakhin , 2012-2017. -# +# Alexander Lakhin , 2012-2017, 2018. msgid "" msgstr "" "Project-Id-Version: ecpg (PostgreSQL current)\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-04-02 23:38+0000\n" -"PO-Revision-Date: 2017-03-27 16:26+0300\n" +"POT-Creation-Date: 2018-01-31 07:53+0300\n" +"PO-Revision-Date: 2018-01-31 08:18+0300\n" "Last-Translator: Alexander Lakhin \n" "Language-Team: Russian \n" "Language: ru\n" @@ -321,14 +320,14 @@ msgstr "" "внутренняя ошибка: недостижимое состояние; пожалуйста, сообщите в " -#: pgc.l:1375 +#: pgc.l:1376 #, c-format msgid "Error: include path \"%s/%s\" is too long on line %d, skipping\n" msgstr "" "Ошибка: путь включаемых файлов \"%s/%s\" в строке %d слишком длинный, " "пропускается\n" -#: pgc.l:1398 +#: pgc.l:1399 #, c-format msgid "could not open include file \"%s\" on line %d" msgstr "не удалось открыть включаемый файл \"%s\" (строка %d)" @@ -362,100 +361,100 @@ msgstr "определение типа не может включать ини msgid "type name \"string\" is reserved in Informix mode" msgstr "имя типа \"string\" в режиме Informix зарезервировано" -#: preproc.y:546 preproc.y:15336 +#: preproc.y:546 preproc.y:15393 #, c-format msgid "type \"%s\" is already defined" msgstr "тип \"%s\" уже определён" -#: preproc.y:570 preproc.y:15994 preproc.y:16314 variable.c:620 +#: preproc.y:570 preproc.y:16051 preproc.y:16371 variable.c:620 #, c-format msgid "multidimensional arrays for simple data types are not supported" msgstr "многомерные массивы с простыми типами данных не поддерживаются" -#: preproc.y:1679 +#: preproc.y:1674 #, c-format msgid "AT option not allowed in CLOSE DATABASE statement" msgstr "оператор CLOSE DATABASE с параметром AT не поддерживается" -#: preproc.y:1892 +#: preproc.y:1883 #, c-format msgid "AT option not allowed in CONNECT statement" msgstr "оператор CONNECT с параметром AT не поддерживается" -#: preproc.y:1926 +#: preproc.y:1917 #, c-format msgid "AT option not allowed in DISCONNECT statement" msgstr "оператор DISCONNECT с параметром AT не поддерживается" -#: preproc.y:1981 +#: preproc.y:1972 #, c-format msgid "AT option not allowed in SET CONNECTION statement" msgstr "оператор SET CONNECTION с параметром AT не поддерживается" -#: preproc.y:2003 +#: preproc.y:1994 #, c-format msgid "AT option not allowed in TYPE statement" msgstr "оператор TYPE с параметром AT не поддерживается" -#: preproc.y:2012 +#: preproc.y:2003 #, c-format msgid "AT option not allowed in VAR statement" msgstr "оператор VAR с параметром AT не поддерживается" -#: preproc.y:2019 +#: preproc.y:2010 #, c-format msgid "AT option not allowed in WHENEVER statement" msgstr "оператор WHENEVER с параметром AT не поддерживается" -#: preproc.y:2271 preproc.y:2276 preproc.y:2392 preproc.y:3929 preproc.y:5460 -#: preproc.y:5469 preproc.y:5777 preproc.y:7313 preproc.y:8745 preproc.y:8750 -#: preproc.y:11462 preproc.y:12083 +#: preproc.y:2079 preproc.y:2251 preproc.y:2256 preproc.y:2372 preproc.y:3950 +#: preproc.y:5500 preproc.y:5509 preproc.y:5809 preproc.y:7345 preproc.y:8782 +#: preproc.y:8787 preproc.y:11519 preproc.y:12140 #, c-format msgid "unsupported feature will be passed to server" msgstr "неподдерживаемая функция будет передана серверу" -#: preproc.y:2650 +#: preproc.y:2630 #, c-format msgid "SHOW ALL is not implemented" msgstr "SHOW ALL не реализовано" -#: preproc.y:3274 +#: preproc.y:3306 #, c-format msgid "COPY FROM STDIN is not implemented" msgstr "операция COPY FROM STDIN не реализована" -#: preproc.y:9628 preproc.y:14925 +#: preproc.y:9685 preproc.y:14982 #, c-format msgid "using variable \"%s\" in different declare statements is not supported" msgstr "" "использование переменной \"%s\" в разных операторах DECLARE не поддерживается" -#: preproc.y:9630 preproc.y:14927 +#: preproc.y:9687 preproc.y:14984 #, c-format msgid "cursor \"%s\" is already defined" msgstr "курсор \"%s\" уже определён" -#: preproc.y:10060 +#: preproc.y:10117 #, c-format msgid "no longer supported LIMIT #,# syntax passed to server" msgstr "не поддерживаемое более предложение LIMIT #,# передано на сервер" -#: preproc.y:10376 preproc.y:10383 +#: preproc.y:10433 preproc.y:10440 #, c-format msgid "subquery in FROM must have an alias" msgstr "подзапрос во FROM должен иметь псевдоним" -#: preproc.y:14655 +#: preproc.y:14712 #, c-format msgid "CREATE TABLE AS cannot specify INTO" msgstr "в CREATE TABLE AS нельзя указать INTO" -#: preproc.y:14691 +#: preproc.y:14748 #, c-format msgid "expected \"@\", found \"%s\"" msgstr "ожидался знак \"@\", но на этом месте \"%s\"" -#: preproc.y:14703 +#: preproc.y:14760 #, c-format msgid "" "only protocols \"tcp\" and \"unix\" and database type \"postgresql\" are " @@ -464,90 +463,89 @@ msgstr "" "поддерживаются только протоколы \"tcp\" и \"unix\", а тип базы данных - " "\"postgresql\"" -#: preproc.y:14706 +#: preproc.y:14763 #, c-format msgid "expected \"://\", found \"%s\"" msgstr "ожидалось \"://\", но на этом месте \"%s\"" -#: preproc.y:14711 +#: preproc.y:14768 #, c-format msgid "Unix-domain sockets only work on \"localhost\" but not on \"%s\"" -msgstr "" -"Доменные сокеты Unix работают только с \"localhost\", но не с адресом \"%s\"" +msgstr "Unix-сокеты работают только с \"localhost\", но не с адресом \"%s\"" -#: preproc.y:14737 +#: preproc.y:14794 #, c-format msgid "expected \"postgresql\", found \"%s\"" msgstr "ожидался тип \"postgresql\", но на этом месте \"%s\"" -#: preproc.y:14740 +#: preproc.y:14797 #, c-format msgid "invalid connection type: %s" msgstr "неверный тип подключения: %s" -#: preproc.y:14749 +#: preproc.y:14806 #, c-format msgid "expected \"@\" or \"://\", found \"%s\"" msgstr "ожидалось \"@\" или \"://\", но на этом месте \"%s\"" -#: preproc.y:14824 preproc.y:14842 +#: preproc.y:14881 preproc.y:14899 #, c-format msgid "invalid data type" msgstr "неверный тип данных" -#: preproc.y:14853 preproc.y:14870 +#: preproc.y:14910 preproc.y:14927 #, c-format msgid "incomplete statement" msgstr "неполный оператор" -#: preproc.y:14856 preproc.y:14873 +#: preproc.y:14913 preproc.y:14930 #, c-format msgid "unrecognized token \"%s\"" msgstr "нераспознанное ключевое слово \"%s\"" -#: preproc.y:15147 +#: preproc.y:15204 #, c-format msgid "only data types numeric and decimal have precision/scale argument" msgstr "" "точность/масштаб можно указать только для типов данных numeric и decimal" -#: preproc.y:15159 +#: preproc.y:15216 #, c-format msgid "interval specification not allowed here" msgstr "определение интервала здесь не допускается" -#: preproc.y:15311 preproc.y:15363 +#: preproc.y:15368 preproc.y:15420 #, c-format msgid "too many levels in nested structure/union definition" msgstr "слишком много уровней в определении вложенной структуры/объединения" -#: preproc.y:15502 +#: preproc.y:15559 #, c-format msgid "pointers to varchar are not implemented" msgstr "указатели на varchar не реализованы" -#: preproc.y:15689 preproc.y:15714 +#: preproc.y:15746 preproc.y:15771 #, c-format msgid "using unsupported DESCRIBE statement" msgstr "используется неподдерживаемый оператор DESCRIBE" -#: preproc.y:15961 +#: preproc.y:16018 #, c-format msgid "initializer not allowed in EXEC SQL VAR command" msgstr "команда EXEC SQL VAR не может включать инициализатор" -#: preproc.y:16272 +#: preproc.y:16329 #, c-format msgid "arrays of indicators are not allowed on input" msgstr "массивы индикаторов на входе недопустимы" -#: preproc.y:16493 +#: preproc.y:16550 #, c-format msgid "operator not allowed in variable definition" msgstr "недопустимый оператор в определении переменной" #. translator: %s is typically the translation of "syntax error" -#: preproc.y:16531 +#: preproc.y:16591 #, c-format msgid "%s at or near \"%s\"" msgstr "%s (примерное положение: \"%s\")" @@ -557,7 +555,7 @@ msgstr "%s (примерное положение: \"%s\")" msgid "out of memory" msgstr "нехватка памяти" -#: type.c:212 type.c:664 +#: type.c:212 type.c:674 #, c-format msgid "unrecognized variable type code %d" msgstr "нераспознанный код типа переменной %d" @@ -603,7 +601,17 @@ msgstr "индикатор структуры должен быть структ msgid "indicator for simple data type has to be simple" msgstr "индикатор простого типа должен быть простым" -#: type.c:723 +#: type.c:615 +#, c-format +msgid "indicator struct \"%s\" has too few members" +msgstr "в структуре индикаторе \"%s\" слишком мало членов" + +#: type.c:622 +#, c-format +msgid "indicator struct \"%s\" has too many members" +msgstr "в структуре индикаторе \"%s\" слишком много членов" + +#: type.c:733 #, c-format msgid "unrecognized descriptor item code %d" msgstr "нераспознанный код элемента дескриптора %d" diff --git a/src/interfaces/ecpg/preproc/po/sv.po b/src/interfaces/ecpg/preproc/po/sv.po new file mode 100644 index 0000000000..e9fb14a015 --- /dev/null +++ b/src/interfaces/ecpg/preproc/po/sv.po @@ -0,0 +1,664 @@ +# SWEDISHE message translation file for ecpg +# Copyright (C) 2017 PostgreSQL Global Development Group +# This file is distributed under the same license as the PostgreSQL package. +# Dennis Björklund , 2017, 2018. +# +msgid "" +msgstr "" +"Project-Id-Version: ecpg (PostgreSQL) 10\n" +"Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" +"POT-Creation-Date: 2018-04-28 22:08+0000\n" +"PO-Revision-Date: 2018-04-29 12:28+0200\n" +"Last-Translator: Dennis Björklund \n" +"Language-Team: Swedish \n" +"Language: sv\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=n != 1;\n" + +#: descriptor.c:64 +#, c-format +msgid "variable \"%s\" must have a numeric type" +msgstr "variabel \"%s\" måste ha en numerisk typ" + +#: descriptor.c:124 descriptor.c:146 +#, c-format +msgid "descriptor \"%s\" does not exist" +msgstr "deskriptor \"%s\" finns inte" + +#: descriptor.c:161 descriptor.c:213 +#, c-format +msgid "descriptor header item \"%d\" does not exist" +msgstr "deskriptor-header-post \"%d\" finns inte" + +#: descriptor.c:183 +#, c-format +msgid "nullable is always 1" +msgstr "nullable är alltid 1" + +#: descriptor.c:186 +#, c-format +msgid "key_member is always 0" +msgstr "key_member är alltid 0" + +#: descriptor.c:280 +#, c-format +msgid "descriptor item \"%s\" is not implemented" +msgstr "deskriptor-post \"%s\" är inte implementerad" + +#: descriptor.c:290 +#, c-format +msgid "descriptor item \"%s\" cannot be set" +msgstr "deskriptor-post \"%s\" kan inte sättas" + +#: ecpg.c:35 +#, c-format +msgid "" +"%s is the PostgreSQL embedded SQL preprocessor for C programs.\n" +"\n" +msgstr "" +"%s är PostgreSQLs inbäddade SQL-preprocessor för C-program.\n" +"\n" + +#: ecpg.c:37 +#, c-format +msgid "" +"Usage:\n" +" %s [OPTION]... FILE...\n" +"\n" +msgstr "" +"Användning:\n" +" %s [FLAGGA]... FIL...\n" +"\n" + +#: ecpg.c:40 +#, c-format +msgid "Options:\n" +msgstr "Flaggor:\n" + +#: ecpg.c:41 +#, c-format +msgid "" +" -c automatically generate C code from embedded SQL code;\n" +" this affects EXEC SQL TYPE\n" +msgstr "" +" -c generera automatiskt C-kod från inbäddad SQL-kod;\n" +" detta påverkar EXEC SQL TYPE\n" + +#: ecpg.c:43 +#, c-format +msgid "" +" -C MODE set compatibility mode; MODE can be one of\n" +" \"INFORMIX\", \"INFORMIX_SE\", \"ORACLE\"\n" +msgstr "" +" -C LÄGE sätt kompabilitetsläge; LÄGE kan vara en av\n" +" \"INFORMIX\", \"INFORMIX_SE\", \"ORACLE\"\n" + +#: ecpg.c:46 +#, c-format +msgid " -d generate parser debug output\n" +msgstr " -d generera parser-debug-utmatning\n" + +#: ecpg.c:48 +#, c-format +msgid " -D SYMBOL define SYMBOL\n" +msgstr " -D SYMBOL definiera SYMBOL\n" + +#: ecpg.c:49 +#, c-format +msgid " -h parse a header file, this option includes option \"-c\"\n" +msgstr " -h parsa en header-fil, denna flagga inkluderar flaggan \"-c\"\n" + +#: ecpg.c:50 +#, c-format +msgid " -i parse system include files as well\n" +msgstr " -i parsa system-include-filer dessutom\n" + +#: ecpg.c:51 +#, c-format +msgid " -I DIRECTORY search DIRECTORY for include files\n" +msgstr " -I KATALOG sök i KATALOG efter include-filer\n" + +#: ecpg.c:52 +#, c-format +msgid " -o OUTFILE write result to OUTFILE\n" +msgstr " -o UTFIL skriv resultat till UTFIL\n" + +#: ecpg.c:53 +#, c-format +msgid "" +" -r OPTION specify run-time behavior; OPTION can be:\n" +" \"no_indicator\", \"prepare\", \"questionmarks\"\n" +msgstr "" +" -r FLAGGA ange runtime-beteende; FLAGGA kan vara en av:\n" +" \"no_indicator\", \"prepare\", \"questionmarks\"\n" + +#: ecpg.c:55 +#, c-format +msgid " --regression run in regression testing mode\n" +msgstr " --regression kör i regressions-test-läge\n" + +#: ecpg.c:56 +#, c-format +msgid " -t turn on autocommit of transactions\n" +msgstr " -t slå på auto-commit av transaktioner\n" + +#: ecpg.c:57 +#, c-format +msgid " -V, --version output version information, then exit\n" +msgstr " -V, --version visa versionsinformation, avsluta sedan\n" + +#: ecpg.c:58 +#, c-format +msgid " -?, --help show this help, then exit\n" +msgstr " -?, --help visa denna hjälp, avsluta sedan\n" + +#: ecpg.c:59 +#, c-format +msgid "" +"\n" +"If no output file is specified, the name is formed by adding .c to the\n" +"input file name, after stripping off .pgc if present.\n" +msgstr "" +"\n" +"Om ingen utdatafil anges så skapas namnet genom att lägga till .c till\n" +"indatafilnamnet, detta efter att .pgc strippats bort om det var med.\n" + +#: ecpg.c:61 +#, c-format +msgid "" +"\n" +"Report bugs to .\n" +msgstr "" +"\n" +"Rapportera fel till .\n" + +#: ecpg.c:139 +#, c-format +msgid "%s: could not locate my own executable path\n" +msgstr "%s: kunde inte hitta min egna körbara fils sökväg\n" + +#: ecpg.c:174 ecpg.c:331 ecpg.c:342 +#, c-format +msgid "%s: could not open file \"%s\": %s\n" +msgstr "%s: kunde inte öppna fil \"%s\": %s\n" + +#: ecpg.c:217 ecpg.c:230 ecpg.c:246 ecpg.c:272 +#, c-format +msgid "Try \"%s --help\" for more information.\n" +msgstr "Försök med \"%s --help\" för mer information.\n" + +#: ecpg.c:241 +#, c-format +msgid "%s: parser debug support (-d) not available\n" +msgstr "%s: parser-debug-stöd (-d) är inte tillgängligt\n" + +#: ecpg.c:260 +#, c-format +msgid "%s, the PostgreSQL embedded C preprocessor, version %s\n" +msgstr "%s, PostgreSQLs inbäddade C-preprocessor, version %s\n" + +#: ecpg.c:262 +#, c-format +msgid "EXEC SQL INCLUDE ... search starts here:\n" +msgstr "EXEC SQL INCLUDE ... sökning startar här:\n" + +#: ecpg.c:265 +#, c-format +msgid "end of search list\n" +msgstr "slut på söklista\n" + +#: ecpg.c:271 +#, c-format +msgid "%s: no input files specified\n" +msgstr "%s: inga indatafiler angivna\n" + +#: ecpg.c:465 +#, c-format +msgid "cursor \"%s\" has been declared but not opened" +msgstr "markören \"%s\" har deklarerats men inte öppnats" + +#: ecpg.c:478 preproc.y:127 +#, c-format +msgid "could not remove output file \"%s\"\n" +msgstr "kunde inte ta bort utdatafil \"%s\"\n" + +#: pgc.l:435 +#, c-format +msgid "unterminated /* comment" +msgstr "ej avslutad /*-kommentar" + +#: pgc.l:448 +#, c-format +msgid "invalid bit string literal" +msgstr "ogiltig bit-sträng-literal" + +#: pgc.l:457 +#, c-format +msgid "unterminated bit string literal" +msgstr "ej avslutad bitsträngslitteral" + +#: pgc.l:473 +#, c-format +msgid "unterminated hexadecimal string literal" +msgstr "ej avslutad hexadecimal stränglitteral" + +#: pgc.l:551 +#, c-format +msgid "unterminated quoted string" +msgstr "icketerminerad citerad sträng" + +#: pgc.l:609 pgc.l:622 +#, c-format +msgid "zero-length delimited identifier" +msgstr "noll-längds avdelad identifierare" + +#: pgc.l:630 +#, c-format +msgid "unterminated quoted identifier" +msgstr "ej avslutad citerad identifierare" + +#: pgc.l:880 +#, c-format +msgid "nested /* ... */ comments" +msgstr "nästlade /* ... */-kommentarer" + +#: pgc.l:973 +#, c-format +msgid "missing identifier in EXEC SQL UNDEF command" +msgstr "saknar identifierare i EXEC SQL UNDEF-kommando" + +#: pgc.l:1019 pgc.l:1033 +#, c-format +msgid "missing matching \"EXEC SQL IFDEF\" / \"EXEC SQL IFNDEF\"" +msgstr "saknar matchande \"EXEC SQL IFDEF\" / \"EXEC SQL IFNDEF\"" + +#: pgc.l:1022 pgc.l:1035 pgc.l:1211 +#, c-format +msgid "missing \"EXEC SQL ENDIF;\"" +msgstr "saknar \"EXEC SQL ENDIF;\"" + +#: pgc.l:1051 pgc.l:1070 +#, c-format +msgid "more than one EXEC SQL ELSE" +msgstr "mer än en EXEC SQL ELSE" + +#: pgc.l:1092 pgc.l:1106 +#, c-format +msgid "unmatched EXEC SQL ENDIF" +msgstr "ej matchad EXEC SQL ENDIF" + +#: pgc.l:1126 +#, c-format +msgid "too many nested EXEC SQL IFDEF conditions" +msgstr "för många nästlade EXEC SQL IFDEF-villkor" + +#: pgc.l:1159 +#, c-format +msgid "missing identifier in EXEC SQL IFDEF command" +msgstr "saknar identifierare i EXEC SQL IFDEF-kommando" + +#: pgc.l:1168 +#, c-format +msgid "missing identifier in EXEC SQL DEFINE command" +msgstr "saknar identifierare i EXEC SQL DEFINE-kommando" + +#: pgc.l:1201 +#, c-format +msgid "syntax error in EXEC SQL INCLUDE command" +msgstr "syntaxfel i EXEC SQL INCLUDE-kommando" + +#: pgc.l:1250 +#, c-format +msgid "internal error: unreachable state; please report this to " +msgstr "internt fel: state som ej skall kunna nås; vänligen rapportera detta till " + +#: pgc.l:1379 +#, c-format +msgid "Error: include path \"%s/%s\" is too long on line %d, skipping\n" +msgstr "Fel: include-sökväg \"%s/%s\" är för lång på rad %d, hoppar över\n" + +#: pgc.l:1402 +#, c-format +msgid "could not open include file \"%s\" on line %d" +msgstr "kunde inte öppna inkludefil \"%s\" på rad %d" + +#: preproc.y:31 +msgid "syntax error" +msgstr "syntaxfel" + +#: preproc.y:81 +#, c-format +msgid "WARNING: " +msgstr "VARNING: " + +#: preproc.y:84 +#, c-format +msgid "ERROR: " +msgstr "FEL: " + +#: preproc.y:508 +#, c-format +msgid "cursor \"%s\" does not exist" +msgstr "markör \"%s\" existerar inte" + +#: preproc.y:537 +#, c-format +msgid "initializer not allowed in type definition" +msgstr "initialiserare tillåts inte i typdefinition" + +#: preproc.y:539 +#, c-format +msgid "type name \"string\" is reserved in Informix mode" +msgstr "typnamn \"string\" är reserverat i Informix-läge" + +#: preproc.y:546 preproc.y:15722 +#, c-format +msgid "type \"%s\" is already defined" +msgstr "typen \"%s\" är redan definierad" + +#: preproc.y:570 preproc.y:16380 preproc.y:16705 variable.c:620 +#, c-format +msgid "multidimensional arrays for simple data types are not supported" +msgstr "multidimensionella array:er för enkla datatyper stöds inte" + +#: preproc.y:1693 +#, c-format +msgid "AT option not allowed in CLOSE DATABASE statement" +msgstr "AT-flaggan tillåts inte i CLOSE DATABASE-sats" + +#: preproc.y:1902 +#, c-format +msgid "AT option not allowed in CONNECT statement" +msgstr "AT-flaggan tillåts inte i CONNECT-sats" + +#: preproc.y:1936 +#, c-format +msgid "AT option not allowed in DISCONNECT statement" +msgstr "AT-flaggan tillåts inte i DISCONNECT-sats" + +#: preproc.y:1991 +#, c-format +msgid "AT option not allowed in SET CONNECTION statement" +msgstr "AT-flaggan tillåts inte i SET CONNECTION-sats" + +#: preproc.y:2013 +#, c-format +msgid "AT option not allowed in TYPE statement" +msgstr "AT-flaggan tillåts inte i TYPE-sats" + +#: preproc.y:2022 +#, c-format +msgid "AT option not allowed in VAR statement" +msgstr "AT-flaggan tillåts inte i VAR-sats" + +#: preproc.y:2029 +#, c-format +msgid "AT option not allowed in WHENEVER statement" +msgstr "AT-flaggan tillåts inte i WHENEVER-sats" + +#: preproc.y:2106 preproc.y:2278 preproc.y:2283 preproc.y:2399 preproc.y:4044 +#: preproc.y:5602 preproc.y:5611 preproc.y:5911 preproc.y:7510 preproc.y:9003 +#: preproc.y:9008 preproc.y:11790 +#, c-format +msgid "unsupported feature will be passed to server" +msgstr "ej stödd funktion skickass till servern" + +#: preproc.y:2657 +#, c-format +msgid "SHOW ALL is not implemented" +msgstr "SHOW ALL är inte implementerad" + +#: preproc.y:3385 +#, c-format +msgid "COPY FROM STDIN is not implemented" +msgstr "COPY FROM STDIN är inte implementerad" + +#: preproc.y:9956 preproc.y:15311 +#, c-format +msgid "using variable \"%s\" in different declare statements is not supported" +msgstr "använda variabel \"%s\" i olika deklarationssatser stöds inte" + +#: preproc.y:9958 preproc.y:15313 +#, c-format +msgid "cursor \"%s\" is already defined" +msgstr "markören \"%s\" är redan definierad" + +#: preproc.y:10388 +#, c-format +msgid "no longer supported LIMIT #,# syntax passed to server" +msgstr "ej längre stödd syntax LIMIT #,# har skickats till servern" + +#: preproc.y:10704 preproc.y:10711 +#, c-format +msgid "subquery in FROM must have an alias" +msgstr "subfråga i FROM måste ha ett alias" + +#: preproc.y:15041 +#, c-format +msgid "CREATE TABLE AS cannot specify INTO" +msgstr "CREATE TABLE AS kan inte ange INTO" + +#: preproc.y:15077 +#, c-format +msgid "expected \"@\", found \"%s\"" +msgstr "förväntade \"@\", hittade \"%s\"" + +#: preproc.y:15089 +#, c-format +msgid "only protocols \"tcp\" and \"unix\" and database type \"postgresql\" are supported" +msgstr "bara protokoll \"tcp\" och \"unix\" samt databastyp \"postgresql\" stöds" + +#: preproc.y:15092 +#, c-format +msgid "expected \"://\", found \"%s\"" +msgstr "förväntade \"://\", hittade \"%s\"" + +#: preproc.y:15097 +#, c-format +msgid "Unix-domain sockets only work on \"localhost\" but not on \"%s\"" +msgstr "Unix-domän-socket fungerart bara på \"localhost\" men inte på \"%s\"" + +#: preproc.y:15123 +#, c-format +msgid "expected \"postgresql\", found \"%s\"" +msgstr "förväntade \"postgresql\", hittade \"%s\"" + +#: preproc.y:15126 +#, c-format +msgid "invalid connection type: %s" +msgstr "ogiltig anslutningstyp: %s" + +#: preproc.y:15135 +#, c-format +msgid "expected \"@\" or \"://\", found \"%s\"" +msgstr "förväntade \"@\" eller \"://\", hittade \"%s\"" + +#: preproc.y:15210 preproc.y:15228 +#, c-format +msgid "invalid data type" +msgstr "ogiltig datatyp" + +#: preproc.y:15239 preproc.y:15256 +#, c-format +msgid "incomplete statement" +msgstr "ofullständig sats" + +#: preproc.y:15242 preproc.y:15259 +#, c-format +msgid "unrecognized token \"%s\"" +msgstr "okänd symbol \"%s\"" + +#: preproc.y:15533 +#, c-format +msgid "only data types numeric and decimal have precision/scale argument" +msgstr "bara datatyperna numeric och decimal har precision/skala-argument" + +#: preproc.y:15545 +#, c-format +msgid "interval specification not allowed here" +msgstr "intervallspecifikation tillåts inte här" + +#: preproc.y:15697 preproc.y:15749 +#, c-format +msgid "too many levels in nested structure/union definition" +msgstr "för många nästlade nivåer i struktur/union-definition" + +#: preproc.y:15888 +#, c-format +msgid "pointers to varchar are not implemented" +msgstr "pekare till varchar är inte implementerat" + +#: preproc.y:16075 preproc.y:16100 +#, c-format +msgid "using unsupported DESCRIBE statement" +msgstr "använder ej stödd DESCRIBE-sats" + +#: preproc.y:16347 +#, c-format +msgid "initializer not allowed in EXEC SQL VAR command" +msgstr "initialiserare tillåts inte i EXEC SQL VAR-kommando" + +#: preproc.y:16663 +#, c-format +msgid "arrays of indicators are not allowed on input" +msgstr "array:er av indikatorer tillåts inte vid indata" + +#: preproc.y:16884 +#, c-format +msgid "operator not allowed in variable definition" +msgstr "operator tillåts inte i variabeldefinition" + +#. translator: %s is typically the translation of "syntax error" +#: preproc.y:16925 +#, c-format +msgid "%s at or near \"%s\"" +msgstr "%s vid eller nära \"%s\"" + +#: type.c:18 type.c:30 +#, c-format +msgid "out of memory" +msgstr "slut på minne" + +#: type.c:212 type.c:676 +#, c-format +msgid "unrecognized variable type code %d" +msgstr "okänd variabeltypkod %d" + +#: type.c:261 +#, c-format +msgid "variable \"%s\" is hidden by a local variable of a different type" +msgstr "variabel \"%s\" döljs av en lokal variabel av annan typ" + +#: type.c:263 +#, c-format +msgid "variable \"%s\" is hidden by a local variable" +msgstr "variabel \"%s\" döljs av en lokal variabel" + +#: type.c:275 +#, c-format +msgid "indicator variable \"%s\" is hidden by a local variable of a different type" +msgstr "indikatorvariabel \"%s\" döljs av en lokal variabel av annan typ" + +#: type.c:277 +#, c-format +msgid "indicator variable \"%s\" is hidden by a local variable" +msgstr "indikatorvariabel \"%s\" döljs av en lokal variabel" + +#: type.c:285 +#, c-format +msgid "indicator for array/pointer has to be array/pointer" +msgstr "indikator för array/pekare måste vara en array/pekare" + +#: type.c:289 +#, c-format +msgid "nested arrays are not supported (except strings)" +msgstr "nästlade array:er stöds inte (förutom strängar)" + +#: type.c:331 +#, c-format +msgid "indicator for struct has to be a struct" +msgstr "indikator för en struktur måste vara en struktur" + +#: type.c:351 type.c:372 type.c:392 +#, c-format +msgid "indicator for simple data type has to be simple" +msgstr "indikator för enkla datatyper måste vara enkel" + +#: type.c:616 +#, c-format +msgid "indicator struct \"%s\" has too few members" +msgstr "indikatorstruktur \"%s\" har för få medlemmar" + +#: type.c:624 +#, c-format +msgid "indicator struct \"%s\" has too many members" +msgstr "indikatorstruktur \"%s\" har för många medlemmar" + +#: type.c:735 +#, c-format +msgid "unrecognized descriptor item code %d" +msgstr "okänd deskriptor-post-kod %d" + +#: variable.c:89 variable.c:116 +#, c-format +msgid "incorrectly formed variable \"%s\"" +msgstr "inkorrekt formatterad variabel \"%s\"" + +#: variable.c:139 +#, c-format +msgid "variable \"%s\" is not a pointer" +msgstr "variabel \"%s\" är inte en pekare" + +#: variable.c:142 variable.c:167 +#, c-format +msgid "variable \"%s\" is not a pointer to a structure or a union" +msgstr "variabel \"%s\" är inte en pekare till en struktur eller union" + +#: variable.c:154 +#, c-format +msgid "variable \"%s\" is neither a structure nor a union" +msgstr "variabel \"%s\" är varken en struktur eller en union" + +#: variable.c:164 +#, c-format +msgid "variable \"%s\" is not an array" +msgstr "variabel \"%s\" är inte en array" + +#: variable.c:233 variable.c:255 +#, c-format +msgid "variable \"%s\" is not declared" +msgstr "variabel \"%s\" är inte deklarerad" + +#: variable.c:494 +#, c-format +msgid "indicator variable must have an integer type" +msgstr "indikatorvariabel måste ha en heltalstyp" + +#: variable.c:506 +#, c-format +msgid "unrecognized data type name \"%s\"" +msgstr "okänt datatypsnamn \"%s\"" + +#: variable.c:517 variable.c:525 variable.c:542 variable.c:545 +#, c-format +msgid "multidimensional arrays are not supported" +msgstr "multidimensionella array:er stöds inte" + +#: variable.c:534 +#, c-format +msgid "multilevel pointers (more than 2 levels) are not supported; found %d level" +msgid_plural "multilevel pointers (more than 2 levels) are not supported; found %d levels" +msgstr[0] "multinivåpekare (mer än 2 nivåer) stöds inte; hittade %d nivå" +msgstr[1] "multinivåpekare (mer än 2 nivåer) stöds inte; hittade %d nivåer" + +#: variable.c:539 +#, c-format +msgid "pointer to pointer is not supported for this data type" +msgstr "pekare till pekare stöds inte för denna datatyp" + +#: variable.c:559 +#, c-format +msgid "multidimensional arrays for structures are not supported" +msgstr "multidimensionella array:er av strukturer stöds inte" diff --git a/src/interfaces/ecpg/preproc/po/vi.po b/src/interfaces/ecpg/preproc/po/vi.po new file mode 100644 index 0000000000..5c7c35758a --- /dev/null +++ b/src/interfaces/ecpg/preproc/po/vi.po @@ -0,0 +1,684 @@ +# LANGUAGE message translation file for ecpg +# Copyright (C) 2018 PostgreSQL Global Development Group +# This file is distributed under the same license as the ecpg (PostgreSQL) package. +# FIRST AUTHOR , 2018. +# +msgid "" +msgstr "" +"Project-Id-Version: ecpg (PostgreSQL) 11\n" +"Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" +"POT-Creation-Date: 2018-04-22 12:08+0000\n" +"PO-Revision-Date: 2018-05-04 22:20+0900\n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 2.0.6\n" +"Last-Translator: Dang Minh Huong \n" +"Language: vi_VN\n" + +#: descriptor.c:64 +#, c-format +msgid "variable \"%s\" must have a numeric type" +msgstr "biến \"%s\" phải là kiểu numeric" + +#: descriptor.c:124 descriptor.c:146 +#, c-format +msgid "descriptor \"%s\" does not exist" +msgstr "descriptor \"%s\" không tồn tại" + +#: descriptor.c:161 descriptor.c:213 +#, c-format +msgid "descriptor header item \"%d\" does not exist" +msgstr "descriptor header item \"%d\" không tồn tại" + +#: descriptor.c:183 +#, c-format +msgid "nullable is always 1" +msgstr "giá trị có thể là NULL luôn là 1" + +#: descriptor.c:186 +#, c-format +msgid "key_member is always 0" +msgstr "key_member luôn là 0" + +#: descriptor.c:280 +#, c-format +msgid "descriptor item \"%s\" is not implemented" +msgstr "descriptor item \"%s\" chưa được thực thi" + +#: descriptor.c:290 +#, c-format +msgid "descriptor item \"%s\" cannot be set" +msgstr "descriptor item \"%s\" không thể thiết lập được" + +#: ecpg.c:35 +#, c-format +msgid "" +"%s is the PostgreSQL embedded SQL preprocessor for C programs.\n" +"\n" +msgstr "" +"%s là tiền xử lý nhúng SQL của PostgreSQL cho ngôn ngữ C.\n" +"\n" + +#: ecpg.c:37 +#, c-format +msgid "" +"Usage:\n" +" %s [OPTION]... FILE...\n" +"\n" +msgstr "" +"Cách sử dụng:\n" +" %s [TÙY CHỌN]... TỆP...\n" +"\n" + +#: ecpg.c:40 +#, c-format +msgid "Options:\n" +msgstr "Tùy chọn:\n" + +#: ecpg.c:41 +#, c-format +msgid "" +" -c automatically generate C code from embedded SQL code;\n" +" this affects EXEC SQL TYPE\n" +msgstr "" +" -c tự động sinh mã C từ mã SQL nhúng;\n" +" điều này ảnh hưởng tới EXEC SQL TYPE\n" + +#: ecpg.c:43 +#, c-format +msgid "" +" -C MODE set compatibility mode; MODE can be one of\n" +" \"INFORMIX\", \"INFORMIX_SE\", \"ORACLE\"\n" +msgstr "" +" -C MODE thiết lập chế độ tương thích; MODE có thể là \n" +" \"INFORMIX\", \"INFORMIX_SE\" hoặc \"ORACLE\"\n" + +#: ecpg.c:46 +#, c-format +msgid " -d generate parser debug output\n" +msgstr " -d xuất debug log cho trình phân tích cú pháp\n" + +#: ecpg.c:48 +#, c-format +msgid " -D SYMBOL define SYMBOL\n" +msgstr " -D SYMBOL định nghĩa SYMBOL\n" + +#: ecpg.c:49 +#, c-format +msgid "" +" -h parse a header file, this option includes option \"-c\"\n" +msgstr "" +" -h phân tích cú pháp tệp header, tùy chọn này bao gồm \n" +" cả tùy chọn \"-c\"\n" + +#: ecpg.c:50 +#, c-format +msgid " -i parse system include files as well\n" +msgstr " -i phân tích cả cú pháp của các tệp include files\n" + +#: ecpg.c:51 +#, c-format +msgid " -I DIRECTORY search DIRECTORY for include files\n" +msgstr " -I DIRECTORY tìm kiếm THƯ MỤC cho các include files\n" + +#: ecpg.c:52 +#, c-format +msgid " -o OUTFILE write result to OUTFILE\n" +msgstr " -o OUTFILE ghi kết quả ra OUTFILE\n" + +#: ecpg.c:53 +#, c-format +msgid "" +" -r OPTION specify run-time behavior; OPTION can be:\n" +" \"no_indicator\", \"prepare\", \"questionmarks\"\n" +msgstr "" +" -r OPTION chỉ định cách thức run-time; OPTION có thể là:\n" +" \"no_indicator\", \"prepare\" hay \"questionmarks\"\n" + +#: ecpg.c:55 +#, c-format +msgid " --regression run in regression testing mode\n" +msgstr " --regression chạy trong chế độ regression test\n" + +#: ecpg.c:56 +#, c-format +msgid " -t turn on autocommit of transactions\n" +msgstr "" +" -t thiết lập autocommit sang on trong các transactions\n" + +#: ecpg.c:57 +#, c-format +msgid " -V, --version output version information, then exit\n" +msgstr " -V, --version xuât thông tin phiên bản, sau đó thoát\n" + +#: ecpg.c:58 +#, c-format +msgid " -?, --help show this help, then exit\n" +msgstr " -?, --help hiển thị nội dung hướng dẫn này, sau đó thoát\n" + +#: ecpg.c:59 +#, c-format +msgid "" +"\n" +"If no output file is specified, the name is formed by adding .c to the\n" +"input file name, after stripping off .pgc if present.\n" +msgstr "" +"\n" +"Nếu tệp xuất ra (OUTFILE) không được chỉ định, tên của tệp .c sẽ được \n" +"lấy từ tên tệp .pgc đầu vào sau khi đã bỏ đi phần mở rộng .pgc\n" + +#: ecpg.c:61 +#, c-format +msgid "" +"\n" +"Report bugs to .\n" +msgstr "" +"\n" +"Báo cáo lỗi tới .\n" + +#: ecpg.c:139 +#, c-format +msgid "%s: could not locate my own executable path\n" +msgstr "%s: không thể xác định ví trí đường dẫn để thực thi\n" + +#: ecpg.c:174 ecpg.c:331 ecpg.c:342 +#, c-format +msgid "%s: could not open file \"%s\": %s\n" +msgstr "%s: không thể mở file \"%s\": %s\n" + +#: ecpg.c:217 ecpg.c:230 ecpg.c:246 ecpg.c:272 +#, c-format +msgid "Try \"%s --help\" for more information.\n" +msgstr "Thử \"%s --help\" để biết thêm thông tin.\n" + +#: ecpg.c:241 +#, c-format +msgid "%s: parser debug support (-d) not available\n" +msgstr "" +"%s: chế độ hỗ trợ debug (-d) cho trình phân tích cú pháp chưa sẵn sàng\n" + +#: ecpg.c:260 +#, c-format +msgid "%s, the PostgreSQL embedded C preprocessor, version %s\n" +msgstr "%s, tiền xử lý nhúng ngôn ngữ C cho PostgreSQL, phiên bản %s\n" + +#: ecpg.c:262 +#, c-format +msgid "EXEC SQL INCLUDE ... search starts here:\n" +msgstr "EXEC SQL INCLUDE ... tìm kiếm bắt đầu từ đây:\n" + +#: ecpg.c:265 +#, c-format +msgid "end of search list\n" +msgstr "cuối danh sách tìm kiếm\n" + +#: ecpg.c:271 +#, c-format +msgid "%s: no input files specified\n" +msgstr "%s: không có tệp đầu vào nào được chỉ định\n" + +#: ecpg.c:465 +#, c-format +msgid "cursor \"%s\" has been declared but not opened" +msgstr "con trỏ \"%s\" đã được định nghĩa nhưng chưa được open" + +#: ecpg.c:478 preproc.y:127 +#, c-format +msgid "could not remove output file \"%s\"\n" +msgstr "không thể xóa tệp đầu ra \"%s\"\n" + +#: pgc.l:435 +#, c-format +msgid "unterminated /* comment" +msgstr "dấu bình luận /* chưa được kết thúc" + +#: pgc.l:448 +#, c-format +msgid "invalid bit string literal" +msgstr "chuỗi ký tự bit không hợp lệ" + +#: pgc.l:457 +#, c-format +msgid "unterminated bit string literal" +msgstr "chuỗi bít ký tự chưa hoàn chỉnh" + +#: pgc.l:473 +#, c-format +msgid "unterminated hexadecimal string literal" +msgstr "chuỗi ký tự thập lục phân chưa hoàn chỉnh" + +#: pgc.l:551 +#, c-format +msgid "unterminated quoted string" +msgstr "chuỗi trích dẫn chưa hoàn chỉnh" + +#: pgc.l:609 pgc.l:622 +#, c-format +msgid "zero-length delimited identifier" +msgstr "ký tự phân cách có độ dài bằng 0" + +#: pgc.l:630 +#, c-format +msgid "unterminated quoted identifier" +msgstr "identifier trích dẫn chưa hoàn chỉnh" + +#: pgc.l:880 +#, c-format +msgid "nested /* ... */ comments" +msgstr "dấu bình luận /* ... */ lồng nhau" + +#: pgc.l:973 +#, c-format +msgid "missing identifier in EXEC SQL UNDEF command" +msgstr "thiếu identifier trong câu lệnh EXEC SQL UNDEF" + +#: pgc.l:1019 pgc.l:1033 +#, c-format +msgid "missing matching \"EXEC SQL IFDEF\" / \"EXEC SQL IFNDEF\"" +msgstr "không khớp giữa \"EXEC SQL IFDEF\" / \"EXEC SQL IFNDEF\"" + +#: pgc.l:1022 pgc.l:1035 pgc.l:1211 +#, c-format +msgid "missing \"EXEC SQL ENDIF;\"" +msgstr "thiếu câu lệnh \"EXEC SQL ENDIF;\"" + +#: pgc.l:1051 pgc.l:1070 +#, c-format +msgid "more than one EXEC SQL ELSE" +msgstr "có nhiều hơn một câu lệnh EXEC SQL ELSE" + +#: pgc.l:1092 pgc.l:1106 +#, c-format +msgid "unmatched EXEC SQL ENDIF" +msgstr "câu lệnh EXEC SQL ENDIF không khớp" + +#: pgc.l:1126 +#, c-format +msgid "too many nested EXEC SQL IFDEF conditions" +msgstr "quá nhiều điều kiện lồng EXEC SQL IFDEF" + +#: pgc.l:1159 +#, c-format +msgid "missing identifier in EXEC SQL IFDEF command" +msgstr "thiếu identifier trong câu lệnh EXEC SQL IFDEF" + +#: pgc.l:1168 +#, c-format +msgid "missing identifier in EXEC SQL DEFINE command" +msgstr "thiếu identifier trong câu lệnh EXEC SQL DEFINE" + +#: pgc.l:1201 +#, c-format +msgid "syntax error in EXEC SQL INCLUDE command" +msgstr "lỗi cú pháp trong câu lệnh EXEC SQL INCLUDE" + +#: pgc.l:1250 +#, c-format +msgid "" +"internal error: unreachable state; please report this to " +msgstr "" +"lỗi nội bộ: trạng thái không thể truy cập; vui lòng báo cáo điều này với " +"" + +#: pgc.l:1379 +#, c-format +msgid "Error: include path \"%s/%s\" is too long on line %d, skipping\n" +msgstr "Lỗi: đường dẫn include \"%s/%s\" quá dài trên dòng %d, bỏ qua\n" + +#: pgc.l:1402 +#, c-format +msgid "could not open include file \"%s\" on line %d" +msgstr "không thể mở tệp include \"%s\" trên dòng %d" + +#: preproc.y:31 +msgid "syntax error" +msgstr "lỗi cú pháp" + +#: preproc.y:81 +#, c-format +msgid "WARNING: " +msgstr "CẢNH BÁO: " + +#: preproc.y:84 +#, c-format +msgid "ERROR: " +msgstr "LỖI: " + +#: preproc.y:508 +#, c-format +msgid "cursor \"%s\" does not exist" +msgstr "on trỏ \"%s\" không tồn tại" + +#: preproc.y:537 +#, c-format +msgid "initializer not allowed in type definition" +msgstr "initializer không được cho phép trong định nghĩa kiểu" + +#: preproc.y:539 +#, c-format +msgid "type name \"string\" is reserved in Informix mode" +msgstr "kiểu \"string\" được dành riêng trong chế độ Informix" + +#: preproc.y:546 preproc.y:15714 +#, c-format +msgid "type \"%s\" is already defined" +msgstr "kiểu \"%s\" đã được định nghĩa" + +#: preproc.y:570 preproc.y:16372 preproc.y:16697 variable.c:620 +#, c-format +msgid "multidimensional arrays for simple data types are not supported" +msgstr "mảng đa chiều cho các kiểu dữ liệu đơn giản không được hỗ trợ" + +#: preproc.y:1693 +#, c-format +msgid "AT option not allowed in CLOSE DATABASE statement" +msgstr "Tùy chọn AT không được phép trong câu lệnh CLOSE DATABASE" + +#: preproc.y:1902 +#, c-format +msgid "AT option not allowed in CONNECT statement" +msgstr "Tùy chọn AT không được phép trong câu lệnh CONNECT" + +#: preproc.y:1936 +#, c-format +msgid "AT option not allowed in DISCONNECT statement" +msgstr "Tùy chọn AT không được phép trong câu lệnh DISCONNECT" + +#: preproc.y:1991 +#, c-format +msgid "AT option not allowed in SET CONNECTION statement" +msgstr "Tùy chọn AT không được cho phép trong câu lệnh SET CONNECTION" + +#: preproc.y:2013 +#, c-format +msgid "AT option not allowed in TYPE statement" +msgstr "Tùy chọn AT không được phép trong câu lệnh TYPE" + +#: preproc.y:2022 +#, c-format +msgid "AT option not allowed in VAR statement" +msgstr "Tùy chọn AT không được phép trong câu lệnh VAR" + +#: preproc.y:2029 +#, c-format +msgid "AT option not allowed in WHENEVER statement" +msgstr "Tùy chọn AT không được phép trong câu lệnh WHENEVER" + +#: preproc.y:2106 preproc.y:2278 preproc.y:2283 preproc.y:2399 preproc.y:4036 +#: preproc.y:5594 preproc.y:5603 preproc.y:5903 preproc.y:7502 preproc.y:8995 +#: preproc.y:9000 preproc.y:11782 +#, c-format +msgid "unsupported feature will be passed to server" +msgstr "tính năng không được hỗ trợ sẽ được chuyển đến server" + +#: preproc.y:2657 +#, c-format +msgid "SHOW ALL is not implemented" +msgstr "Lệnh SHOW ALL chưa được thực thi" + +#: preproc.y:3377 +#, c-format +msgid "COPY FROM STDIN is not implemented" +msgstr "Lệnh COPY FROM STDIN chưa được thực thi" + +#: preproc.y:9948 preproc.y:15303 +#, c-format +msgid "using variable \"%s\" in different declare statements is not supported" +msgstr "" +"sử dụng biến \"%s\" trong các câu lệnh khai báo khác nhau không được hỗ trợ" + +#: preproc.y:9950 preproc.y:15305 +#, c-format +msgid "cursor \"%s\" is already defined" +msgstr "con trỏ \"%s\" đã được định nghĩa" + +#: preproc.y:10380 +#, c-format +msgid "no longer supported LIMIT #,# syntax passed to server" +msgstr "cú pháp không còn được hỗ trợ LIMIT #, # được chuyển đến server" + +#: preproc.y:10696 preproc.y:10703 +#, c-format +msgid "subquery in FROM must have an alias" +msgstr "truy vấn phụ trong FROM phải có bí danh" + +#: preproc.y:15033 +#, c-format +msgid "CREATE TABLE AS cannot specify INTO" +msgstr "Không thể sử dụng INTO trong câu lệnh CREATE TABLE AS" + +#: preproc.y:15069 +#, c-format +msgid "expected \"@\", found \"%s\"" +msgstr "kì vọng \"@\", tìm thấy \"%s\"" + +#: preproc.y:15081 +#, c-format +msgid "" +"only protocols \"tcp\" and \"unix\" and database type \"postgresql\" are " +"supported" +msgstr "" +"chỉ các giao thức \"tcp\" và \"unix\" và kiểu cơ sở dữ liệu \"postgresql\" " +"được hỗ trợ" + +#: preproc.y:15084 +#, c-format +msgid "expected \"://\", found \"%s\"" +msgstr "kỳ vọng \"://\", tìm thấy \"%s\"" + +#: preproc.y:15089 +#, c-format +msgid "Unix-domain sockets only work on \"localhost\" but not on \"%s\"" +msgstr "" +"Unix-domain sockets chỉ hoạt động trên \"localhost\" nhưng không hoạt động " +"trên \"%s\"" + +#: preproc.y:15115 +#, c-format +msgid "expected \"postgresql\", found \"%s\"" +msgstr "kỳ vọng \"postgresql\", tìm thấy \"%s\"" + +#: preproc.y:15118 +#, c-format +msgid "invalid connection type: %s" +msgstr "loại kết nối không hợp lệ: %s" + +#: preproc.y:15127 +#, c-format +msgid "expected \"@\" or \"://\", found \"%s\"" +msgstr "kỳ vọng \"@\" or \"://\", tìm được \"%s\"" + +#: preproc.y:15202 preproc.y:15220 +#, c-format +msgid "invalid data type" +msgstr "kiểu dữ liệu không hợp lệ" + +#: preproc.y:15231 preproc.y:15248 +#, c-format +msgid "incomplete statement" +msgstr "câu lệnh chưa hoàn chỉnh" + +#: preproc.y:15234 preproc.y:15251 +#, c-format +msgid "unrecognized token \"%s\"" +msgstr "không nhận ra token \"%s\"" + +#: preproc.y:15525 +#, c-format +msgid "only data types numeric and decimal have precision/scale argument" +msgstr "chỉ các kiểu dữ liệu numeric và decimal có đối số precision và scale" + +#: preproc.y:15537 +#, c-format +msgid "interval specification not allowed here" +msgstr "chỉ định interval không được phép ở đây" + +#: preproc.y:15689 preproc.y:15741 +#, c-format +msgid "too many levels in nested structure/union definition" +msgstr "quá nhiều cấp độ lồng nhau trong định nghĩa structure/union" + +#: preproc.y:15880 +#, c-format +msgid "pointers to varchar are not implemented" +msgstr "con trỏ đến kiểu varchar chưa được thực thi" + +#: preproc.y:16067 preproc.y:16092 +#, c-format +msgid "using unsupported DESCRIBE statement" +msgstr "sử dụng câu lệnh chưa được hỗ trợ DESCRIBE" + +#: preproc.y:16339 +#, c-format +msgid "initializer not allowed in EXEC SQL VAR command" +msgstr "initializer không được phép trong câu lệnh EXEC SQL VAR" + +#: preproc.y:16655 +#, c-format +msgid "arrays of indicators are not allowed on input" +msgstr "đầu vào là mảng của các indicators không được cho phép" + +#: preproc.y:16876 +#, c-format +msgid "operator not allowed in variable definition" +msgstr "toán tử không được phép trong định nghĩa biến" + +#. translator: %s is typically the translation of "syntax error" +#: preproc.y:16917 +#, c-format +msgid "%s at or near \"%s\"" +msgstr "%s tại hoặc gần \"%s\"" + +#: type.c:18 type.c:30 +#, c-format +msgid "out of memory" +msgstr "hết bộ nhớ" + +#: type.c:212 type.c:674 +#, c-format +msgid "unrecognized variable type code %d" +msgstr "không nhận ra kiểu mã biến %d" + +#: type.c:261 +#, c-format +msgid "variable \"%s\" is hidden by a local variable of a different type" +msgstr "biến \"%s\" bị ẩn bởi biến cục bộ thuộc một kiểu khác" + +#: type.c:263 +#, c-format +msgid "variable \"%s\" is hidden by a local variable" +msgstr "biến \"%s\" bị ẩn bởi biến cục bộ" + +#: type.c:275 +#, c-format +msgid "" +"indicator variable \"%s\" is hidden by a local variable of a different type" +msgstr "biến indicator \"%s\" bị ẩn bởi một biến cục bộ thuộc một kiểu khác" + +#: type.c:277 +#, c-format +msgid "indicator variable \"%s\" is hidden by a local variable" +msgstr "biến indicator \"%s\" bị ẩn bởi biến cục bộ" + +#: type.c:285 +#, c-format +msgid "indicator for array/pointer has to be array/pointer" +msgstr "mảng/con trỏ cho indicator phải là mảng/con trỏ" + +#: type.c:289 +#, c-format +msgid "nested arrays are not supported (except strings)" +msgstr "mảng lồng nhau không được hỗ trợ (ngoại trừ chuỗi)" + +#: type.c:331 +#, c-format +msgid "indicator for struct has to be a struct" +msgstr "indicator cho structure phải là một structure" + +#: type.c:351 type.c:372 type.c:392 +#, c-format +msgid "indicator for simple data type has to be simple" +msgstr "indicator cho kiểu dữ liệu đơn giản phải đơn giản" + +#: type.c:615 +#, c-format +msgid "indicator struct \"%s\" has too few members" +msgstr "cấu trúc của indicator \"%s\" có quá ít thành viên" + +#: type.c:622 +#, c-format +msgid "indicator struct \"%s\" has too many members" +msgstr "cấu trúc của indicator \"%s\" có quá nhiều thành viên" + +#: type.c:733 +#, c-format +msgid "unrecognized descriptor item code %d" +msgstr "không nhận ra mã descriptor item %d" + +#: variable.c:89 variable.c:116 +#, c-format +msgid "incorrectly formed variable \"%s\"" +msgstr "biến được định dạng không chính xác \"%s\"" + +#: variable.c:139 +#, c-format +msgid "variable \"%s\" is not a pointer" +msgstr "biến \"%s\" không phải là con trỏ" + +#: variable.c:142 variable.c:167 +#, c-format +msgid "variable \"%s\" is not a pointer to a structure or a union" +msgstr "biến \"%s\" không phải là con trỏ trỏ tới structure hoặc union" + +#: variable.c:154 +#, c-format +msgid "variable \"%s\" is neither a structure nor a union" +msgstr "biến \"%s\" không phải là kiểu structure hay union" + +#: variable.c:164 +#, c-format +msgid "variable \"%s\" is not an array" +msgstr "biến \"%s\" không phải là mảng" + +#: variable.c:233 variable.c:255 +#, c-format +msgid "variable \"%s\" is not declared" +msgstr "biến \"%s\" chưa được định nghĩa" + +#: variable.c:494 +#, c-format +msgid "indicator variable must have an integer type" +msgstr "biến indicator phải có kiểu integer" + +#: variable.c:506 +#, c-format +msgid "unrecognized data type name \"%s\"" +msgstr "không nhận ra kiểu dữ liệu \"%s\"" + +#: variable.c:517 variable.c:525 variable.c:542 variable.c:545 +#, c-format +msgid "multidimensional arrays are not supported" +msgstr "mảng đa chiều không được hỗ trợ" + +#: variable.c:534 +#, c-format +msgid "" +"multilevel pointers (more than 2 levels) are not supported; found %d level" +msgid_plural "" +"multilevel pointers (more than 2 levels) are not supported; found %d levels" +msgstr[0] "" +"các con trỏ đa cấp (nhiều hơn 2 cấp) không được hỗ trợ; đã tìm thấy %d cấp" + +#: variable.c:539 +#, c-format +msgid "pointer to pointer is not supported for this data type" +msgstr "con trỏ đến con trỏ không được hỗ trợ cho kiểu dữ liệu này" + +#: variable.c:559 +#, c-format +msgid "multidimensional arrays for structures are not supported" +msgstr "mảng đa chiều cho kiểu cấu trúc không được hỗ trợ" diff --git a/src/interfaces/ecpg/preproc/type.c b/src/interfaces/ecpg/preproc/type.c index 750cdf9c7c..253873dd4e 100644 --- a/src/interfaces/ecpg/preproc/type.c +++ b/src/interfaces/ecpg/preproc/type.c @@ -69,12 +69,12 @@ ECPGstruct_member_dup(struct ECPGstruct_member *rm) rm = rm->next; } - return (new); + return new; } /* The NAME argument is copied. The type argument is preserved as a pointer. */ void -ECPGmake_struct_member(char *name, struct ECPGtype *type, struct ECPGstruct_member **start) +ECPGmake_struct_member(const char *name, struct ECPGtype *type, struct ECPGstruct_member **start) { struct ECPGstruct_member *ptr, *ne = @@ -135,78 +135,78 @@ get_type(enum ECPGttype type) switch (type) { case ECPGt_char: - return ("ECPGt_char"); + return "ECPGt_char"; break; case ECPGt_unsigned_char: - return ("ECPGt_unsigned_char"); + return "ECPGt_unsigned_char"; break; case ECPGt_short: - return ("ECPGt_short"); + return "ECPGt_short"; break; case ECPGt_unsigned_short: - return ("ECPGt_unsigned_short"); + return "ECPGt_unsigned_short"; break; case ECPGt_int: - return ("ECPGt_int"); + return "ECPGt_int"; break; case ECPGt_unsigned_int: - return ("ECPGt_unsigned_int"); + return "ECPGt_unsigned_int"; break; case ECPGt_long: - return ("ECPGt_long"); + return "ECPGt_long"; break; case ECPGt_unsigned_long: - return ("ECPGt_unsigned_long"); + return "ECPGt_unsigned_long"; break; case ECPGt_long_long: - return ("ECPGt_long_long"); + return "ECPGt_long_long"; break; case ECPGt_unsigned_long_long: - return ("ECPGt_unsigned_long_long"); + return "ECPGt_unsigned_long_long"; break; case ECPGt_float: - return ("ECPGt_float"); + return "ECPGt_float"; break; case ECPGt_double: - return ("ECPGt_double"); + return "ECPGt_double"; break; case ECPGt_bool: - return ("ECPGt_bool"); + return "ECPGt_bool"; break; case ECPGt_varchar: - return ("ECPGt_varchar"); + return "ECPGt_varchar"; case ECPGt_NO_INDICATOR: /* no indicator */ - return ("ECPGt_NO_INDICATOR"); + return "ECPGt_NO_INDICATOR"; break; case ECPGt_char_variable: /* string that should not be quoted */ - return ("ECPGt_char_variable"); + return "ECPGt_char_variable"; break; case ECPGt_const: /* constant string quoted */ - return ("ECPGt_const"); + return "ECPGt_const"; break; case ECPGt_decimal: - return ("ECPGt_decimal"); + return "ECPGt_decimal"; break; case ECPGt_numeric: - return ("ECPGt_numeric"); + return "ECPGt_numeric"; break; case ECPGt_interval: - return ("ECPGt_interval"); + return "ECPGt_interval"; break; case ECPGt_descriptor: - return ("ECPGt_descriptor"); + return "ECPGt_descriptor"; break; case ECPGt_sqlda: - return ("ECPGt_sqlda"); + return "ECPGt_sqlda"; break; case ECPGt_date: - return ("ECPGt_date"); + return "ECPGt_date"; break; case ECPGt_timestamp: - return ("ECPGt_timestamp"); + return "ECPGt_timestamp"; break; case ECPGt_string: - return ("ECPGt_string"); + return "ECPGt_string"; break; default: mmerror(PARSE_ERROR, ET_ERROR, "unrecognized variable type code %d", type); @@ -609,7 +609,19 @@ ECPGdump_a_struct(FILE *o, const char *name, const char *ind_name, char *arrsize prefix, ind_prefix, arrsize, type->struct_sizeof, (ind_p != NULL) ? ind_type->struct_sizeof : NULL); if (ind_p != NULL && ind_p != &struct_no_indicator) + { ind_p = ind_p->next; + if (ind_p == NULL && p->next != NULL) + { + mmerror(PARSE_ERROR, ET_WARNING, "indicator struct \"%s\" has too few members", ind_name); + ind_p = &struct_no_indicator; + } + } + } + + if (ind_type != NULL && ind_p != NULL && ind_p != &struct_no_indicator) + { + mmerror(PARSE_ERROR, ET_WARNING, "indicator struct \"%s\" has too many members", ind_name); } free(pbuf); @@ -674,51 +686,51 @@ get_dtype(enum ECPGdtype type) switch (type) { case ECPGd_count: - return ("ECPGd_countr"); + return "ECPGd_countr"; break; case ECPGd_data: - return ("ECPGd_data"); + return "ECPGd_data"; break; case ECPGd_di_code: - return ("ECPGd_di_code"); + return "ECPGd_di_code"; break; case ECPGd_di_precision: - return ("ECPGd_di_precision"); + return "ECPGd_di_precision"; break; case ECPGd_indicator: - return ("ECPGd_indicator"); + return "ECPGd_indicator"; break; case ECPGd_key_member: - return ("ECPGd_key_member"); + return "ECPGd_key_member"; break; case ECPGd_length: - return ("ECPGd_length"); + return "ECPGd_length"; break; case ECPGd_name: - return ("ECPGd_name"); + return "ECPGd_name"; break; case ECPGd_nullable: - return ("ECPGd_nullable"); + return "ECPGd_nullable"; break; case ECPGd_octet: - return ("ECPGd_octet"); + return "ECPGd_octet"; break; case ECPGd_precision: - return ("ECPGd_precision"); + return "ECPGd_precision"; break; case ECPGd_ret_length: - return ("ECPGd_ret_length"); + return "ECPGd_ret_length"; case ECPGd_ret_octet: - return ("ECPGd_ret_octet"); + return "ECPGd_ret_octet"; break; case ECPGd_scale: - return ("ECPGd_scale"); + return "ECPGd_scale"; break; case ECPGd_type: - return ("ECPGd_type"); + return "ECPGd_type"; break; case ECPGd_cardinality: - return ("ECPGd_cardinality"); + return "ECPGd_cardinality"; default: mmerror(PARSE_ERROR, ET_ERROR, "unrecognized descriptor item code %d", type); } diff --git a/src/interfaces/ecpg/preproc/type.h b/src/interfaces/ecpg/preproc/type.h index 4b93336480..fc70d7d218 100644 --- a/src/interfaces/ecpg/preproc/type.h +++ b/src/interfaces/ecpg/preproc/type.h @@ -33,7 +33,7 @@ struct ECPGtype }; /* Everything is malloced. */ -void ECPGmake_struct_member(char *, struct ECPGtype *, struct ECPGstruct_member **); +void ECPGmake_struct_member(const char *, struct ECPGtype *, struct ECPGstruct_member **); struct ECPGtype *ECPGmake_simple_type(enum ECPGttype, char *, int); struct ECPGtype *ECPGmake_array_type(struct ECPGtype *, char *); struct ECPGtype *ECPGmake_struct_type(struct ECPGstruct_member *, enum ECPGttype, char *, char *); diff --git a/src/interfaces/ecpg/preproc/variable.c b/src/interfaces/ecpg/preproc/variable.c index 31225738e0..39bf3b2474 100644 --- a/src/interfaces/ecpg/preproc/variable.c +++ b/src/interfaces/ecpg/preproc/variable.c @@ -18,7 +18,7 @@ new_variable(const char *name, struct ECPGtype *type, int brace_level) p->next = allvariables; allvariables = p; - return (p); + return p; } static struct variable * @@ -44,12 +44,12 @@ find_struct_member(char *name, char *str, struct ECPGstruct_member *members, int switch (members->type->type) { case ECPGt_array: - return (new_variable(name, ECPGmake_array_type(ECPGmake_simple_type(members->type->u.element->type, members->type->u.element->size, members->type->u.element->counter), members->type->size), brace_level)); + return new_variable(name, ECPGmake_array_type(ECPGmake_simple_type(members->type->u.element->type, members->type->u.element->size, members->type->u.element->counter), members->type->size), brace_level); case ECPGt_struct: case ECPGt_union: - return (new_variable(name, ECPGmake_struct_type(members->type->u.members, members->type->type, members->type->type_name, members->type->struct_sizeof), brace_level)); + return new_variable(name, ECPGmake_struct_type(members->type->u.members, members->type->type, members->type->type_name, members->type->struct_sizeof), brace_level); default: - return (new_variable(name, ECPGmake_simple_type(members->type->type, members->type->size, members->type->counter), brace_level)); + return new_variable(name, ECPGmake_simple_type(members->type->type, members->type->size, members->type->counter), brace_level); } } else @@ -91,26 +91,26 @@ find_struct_member(char *name, char *str, struct ECPGstruct_member *members, int switch (members->type->u.element->type) { case ECPGt_array: - return (new_variable(name, ECPGmake_array_type(ECPGmake_simple_type(members->type->u.element->u.element->type, members->type->u.element->u.element->size, members->type->u.element->u.element->counter), members->type->u.element->size), brace_level)); + return new_variable(name, ECPGmake_array_type(ECPGmake_simple_type(members->type->u.element->u.element->type, members->type->u.element->u.element->size, members->type->u.element->u.element->counter), members->type->u.element->size), brace_level); case ECPGt_struct: case ECPGt_union: - return (new_variable(name, ECPGmake_struct_type(members->type->u.element->u.members, members->type->u.element->type, members->type->u.element->type_name, members->type->u.element->struct_sizeof), brace_level)); + return new_variable(name, ECPGmake_struct_type(members->type->u.element->u.members, members->type->u.element->type, members->type->u.element->type_name, members->type->u.element->struct_sizeof), brace_level); default: - return (new_variable(name, ECPGmake_simple_type(members->type->u.element->type, members->type->u.element->size, members->type->u.element->counter), brace_level)); + return new_variable(name, ECPGmake_simple_type(members->type->u.element->type, members->type->u.element->size, members->type->u.element->counter), brace_level); } break; case '-': if (members->type->type == ECPGt_array) - return (find_struct_member(name, ++end, members->type->u.element->u.members, brace_level)); + return find_struct_member(name, ++end, members->type->u.element->u.members, brace_level); else - return (find_struct_member(name, ++end, members->type->u.members, brace_level)); + return find_struct_member(name, ++end, members->type->u.members, brace_level); break; break; case '.': if (members->type->type == ECPGt_array) - return (find_struct_member(name, end, members->type->u.element->u.members, brace_level)); + return find_struct_member(name, end, members->type->u.element->u.members, brace_level); else - return (find_struct_member(name, end, members->type->u.members, brace_level)); + return find_struct_member(name, end, members->type->u.members, brace_level); break; default: mmfatal(PARSE_ERROR, "incorrectly formed variable \"%s\"", name); @@ -120,7 +120,7 @@ find_struct_member(char *name, char *str, struct ECPGstruct_member *members, int } } - return (NULL); + return NULL; } static struct variable * @@ -185,7 +185,7 @@ find_simple(char *name) return p; } - return (NULL); + return NULL; } /* Note that this function will end the program in case of an unknown */ @@ -236,12 +236,12 @@ find_variable(char *name) switch (p->type->u.element->type) { case ECPGt_array: - return (new_variable(name, ECPGmake_array_type(ECPGmake_simple_type(p->type->u.element->u.element->type, p->type->u.element->u.element->size, p->type->u.element->u.element->counter), p->type->u.element->size), p->brace_level)); + return new_variable(name, ECPGmake_array_type(ECPGmake_simple_type(p->type->u.element->u.element->type, p->type->u.element->u.element->size, p->type->u.element->u.element->counter), p->type->u.element->size), p->brace_level); case ECPGt_struct: case ECPGt_union: - return (new_variable(name, ECPGmake_struct_type(p->type->u.element->u.members, p->type->u.element->type, p->type->u.element->type_name, p->type->u.element->struct_sizeof), p->brace_level)); + return new_variable(name, ECPGmake_struct_type(p->type->u.element->u.members, p->type->u.element->type, p->type->u.element->type_name, p->type->u.element->struct_sizeof), p->brace_level); default: - return (new_variable(name, ECPGmake_simple_type(p->type->u.element->type, p->type->u.element->size, p->type->u.element->counter), p->brace_level)); + return new_variable(name, ECPGmake_simple_type(p->type->u.element->type, p->type->u.element->size, p->type->u.element->counter), p->brace_level); } } } @@ -254,7 +254,7 @@ find_variable(char *name) if (p == NULL) mmfatal(PARSE_ERROR, "variable \"%s\" is not declared", name); - return (p); + return p; } void @@ -505,7 +505,7 @@ get_typedef(char *name) if (!this) mmfatal(PARSE_ERROR, "unrecognized data type name \"%s\"", name); - return (this); + return this; } void diff --git a/src/interfaces/ecpg/test/Makefile b/src/interfaces/ecpg/test/Makefile index 73ac9e2ac0..c761a4dcb0 100644 --- a/src/interfaces/ecpg/test/Makefile +++ b/src/interfaces/ecpg/test/Makefile @@ -30,6 +30,7 @@ all install installdirs uninstall distprep: $(MAKE) -C pgtypeslib $@ $(MAKE) -C preproc $@ $(MAKE) -C compat_informix $@ + $(MAKE) -C compat_oracle $@ $(MAKE) -C thread $@ clean distclean maintainer-clean: @@ -38,6 +39,7 @@ clean distclean maintainer-clean: $(MAKE) -C pgtypeslib $@ $(MAKE) -C preproc $@ $(MAKE) -C compat_informix $@ + $(MAKE) -C compat_oracle $@ $(MAKE) -C thread $@ rm -rf tmp_check results log rm -f pg_regress regression.diffs regression.out pg_regress_ecpg.o $(WIN32RES) @@ -66,7 +68,7 @@ $(top_builddir)/src/port/pg_config_paths.h: $(top_builddir)/src/Makefile.global # $(srcdir) to the build directory. ifdef VPATH -remaining_files_src := $(wildcard $(srcdir)/*/*.pgc) $(wildcard $(srcdir)/expected/*.c) $(wildcard $(srcdir)/expected/*.stdout) $(wildcard $(srcdir)/expected/*.stderr) $(srcdir)/resultmap +remaining_files_src := $(wildcard $(srcdir)/*/*.pgc) $(wildcard $(srcdir)/expected/*.c) $(wildcard $(srcdir)/expected/*.stdout) $(wildcard $(srcdir)/expected/*.stderr) remaining_files_build := $(patsubst $(srcdir)/%, $(abs_builddir)/%, $(remaining_files_src)) all: $(remaining_files_build) @@ -80,9 +82,9 @@ REGRESS_OPTS = --dbname=ecpg1_regression,ecpg2_regression --create-role=regress_ check: all $(with_temp_install) ./pg_regress $(REGRESS_OPTS) --temp-instance=./tmp_check $(TEMP_CONF) --bindir= $(pg_regress_locale_flags) $(THREAD) --schedule=$(srcdir)/ecpg_schedule sql/twophase -# the same options, but with --listen-on-tcp -checktcp: all - $(with_temp_install) ./pg_regress $(REGRESS_OPTS) --temp-instance=./tmp_check $(TEMP_CONF) --bindir= $(pg_regress_locale_flags) $(THREAD) --schedule=$(srcdir)/ecpg_schedule_tcp --host=localhost +# Connect to the server using TCP, and add a TCP-specific test. +checktcp: all | temp-install + $(with_temp_install) ./pg_regress $(REGRESS_OPTS) --temp-instance=./tmp_check $(TEMP_CONF) --bindir= $(pg_regress_locale_flags) $(THREAD) --schedule=$(srcdir)/ecpg_schedule --host=localhost sql/twophase connect/test1 installcheck: all ./pg_regress $(REGRESS_OPTS) --bindir='$(bindir)' $(pg_regress_locale_flags) $(THREAD) --schedule=$(srcdir)/ecpg_schedule @@ -95,5 +97,5 @@ installcheck: all installcheck-prepared-txns: all ./pg_regress $(REGRESS_OPTS) --bindir='$(bindir)' $(pg_regress_locale_flags) $(THREAD) --schedule=$(srcdir)/ecpg_schedule sql/twophase -check-prepared-txns: all +check-prepared-txns: all | temp-install $(with_temp_install) ./pg_regress $(REGRESS_OPTS) --temp-instance=./tmp_check $(TEMP_CONF) --bindir= $(pg_regress_locale_flags) $(THREAD) --schedule=$(srcdir)/ecpg_schedule sql/twophase diff --git a/src/interfaces/ecpg/test/Makefile.regress b/src/interfaces/ecpg/test/Makefile.regress index b3d7c1e874..4da1bb8a03 100644 --- a/src/interfaces/ecpg/test/Makefile.regress +++ b/src/interfaces/ecpg/test/Makefile.regress @@ -5,8 +5,9 @@ override CPPFLAGS := -I../../include -I$(top_srcdir)/src/interfaces/ecpg/include -I$(libpq_srcdir) $(CPPFLAGS) override CFLAGS += $(PTHREAD_CFLAGS) -override LDFLAGS := -L../../ecpglib -L../../pgtypeslib $(filter-out -l%, $(libpq)) $(LDFLAGS) -override LIBS := -lecpg -lpgtypes $(filter -l%, $(libpq)) $(LIBS) $(PTHREAD_LIBS) +LDFLAGS_INTERNAL += -L../../ecpglib -lecpg -L../../pgtypeslib -lpgtypes $(libpq) + +override LIBS += $(PTHREAD_LIBS) # Standard way to invoke the ecpg preprocessor ECPG = ../../preproc/ecpg --regression -I$(srcdir)/../../include -I$(srcdir) @@ -14,6 +15,7 @@ ECPG = ../../preproc/ecpg --regression -I$(srcdir)/../../include -I$(srcdir) # Files that most or all ecpg preprocessor test outputs depend on ECPG_TEST_DEPENDENCIES = ../../preproc/ecpg$(X) \ $(srcdir)/../regression.h \ + $(srcdir)/../printf_hack.h \ $(srcdir)/../../include/sqlca.h \ $(srcdir)/../../include/sqlda.h \ $(srcdir)/../../include/sqltypes.h \ diff --git a/src/interfaces/ecpg/test/compat_informix/Makefile b/src/interfaces/ecpg/test/compat_informix/Makefile index 8a5e854496..d50fdc29fd 100644 --- a/src/interfaces/ecpg/test/compat_informix/Makefile +++ b/src/interfaces/ecpg/test/compat_informix/Makefile @@ -6,8 +6,7 @@ include $(top_srcdir)/$(subdir)/../Makefile.regress # Use special informix compatibility switch for all tests in this directory ECPG += -C INFORMIX -override LDFLAGS := -L../../compatlib $(LDFLAGS) -override LIBS := -lecpg_compat $(LIBS) +LDFLAGS_INTERNAL += -L../../compatlib -lecpg_compat TESTS = test_informix test_informix.c \ test_informix2 test_informix2.c \ diff --git a/src/interfaces/ecpg/test/compat_informix/dec_test.pgc b/src/interfaces/ecpg/test/compat_informix/dec_test.pgc index b374bda724..f6a9f425d6 100644 --- a/src/interfaces/ecpg/test/compat_informix/dec_test.pgc +++ b/src/interfaces/ecpg/test/compat_informix/dec_test.pgc @@ -7,14 +7,7 @@ exec sql include ../regression; - -/* - -NOTE: This file has a different expect file for regression tests on MinGW32 - -*/ - - +exec sql include ../printf_hack; /* @@ -115,7 +108,9 @@ main(void) /* this is a libc problem since we only call strtod() */ r = dectodbl(dec, &dbl); if (r) check_errno(); - printf("dec[%d,10]: %g (r: %d)\n", i, r?0.0:dbl, r); + printf("dec[%d,10]: ", i); + print_double(r ? 0.0 : dbl); + printf(" (r: %d)\n", r); } PGTYPESdecimal_free(din); @@ -206,7 +201,7 @@ main(void) } free(decarr); - return (0); + return 0; } static void diff --git a/src/interfaces/ecpg/test/compat_informix/describe.pgc b/src/interfaces/ecpg/test/compat_informix/describe.pgc index 6fcccc6ab4..4ee7254dff 100644 --- a/src/interfaces/ecpg/test/compat_informix/describe.pgc +++ b/src/interfaces/ecpg/test/compat_informix/describe.pgc @@ -195,5 +195,5 @@ exec sql end declare section; strcpy(msg, "disconnect"); exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/compat_informix/rfmtdate.pgc b/src/interfaces/ecpg/test/compat_informix/rfmtdate.pgc index c799de6762..a147f405ab 100644 --- a/src/interfaces/ecpg/test/compat_informix/rfmtdate.pgc +++ b/src/interfaces/ecpg/test/compat_informix/rfmtdate.pgc @@ -13,7 +13,7 @@ static void check_return(int ret); static void -date_test_strdate(char *input) +date_test_strdate(const char *input) { static int i; date d; @@ -38,7 +38,7 @@ date_test_strdate(char *input) } static void -date_test_defmt(char *fmt, char *input) +date_test_defmt(const char *fmt, const char *input) { static int i; char dbuf[11]; @@ -63,7 +63,7 @@ date_test_defmt(char *fmt, char *input) } static void -date_test_fmt(date d, char *fmt) +date_test_fmt(date d, const char *fmt) { static int i; char buf[200]; @@ -147,7 +147,7 @@ main(void) /* ECPG_INFORMIX_BAD_YEAR */ /* ??? */ - return (0); + return 0; } static void diff --git a/src/interfaces/ecpg/test/compat_informix/rfmtlong.pgc b/src/interfaces/ecpg/test/compat_informix/rfmtlong.pgc index 162b42505f..2ecf09c837 100644 --- a/src/interfaces/ecpg/test/compat_informix/rfmtlong.pgc +++ b/src/interfaces/ecpg/test/compat_informix/rfmtlong.pgc @@ -13,7 +13,7 @@ static void check_return(int ret); static void -fmtlong(long lng, char *fmt) +fmtlong(long lng, const char *fmt) { static int i; int r; @@ -45,7 +45,7 @@ main(void) fmtlong(-8494493, "abc: ################+-+"); fmtlong(-8494493, "+<<<<,<<<,<<<,<<<"); - return (0); + return 0; } static void diff --git a/src/interfaces/ecpg/test/compat_informix/sqlda.pgc b/src/interfaces/ecpg/test/compat_informix/sqlda.pgc index e1142d2b22..87e0110aed 100644 --- a/src/interfaces/ecpg/test/compat_informix/sqlda.pgc +++ b/src/interfaces/ecpg/test/compat_informix/sqlda.pgc @@ -37,7 +37,7 @@ dump_sqlda(sqlda_t *sqlda) printf("name sqlda descriptor: '%s' value %d\n", sqlda->sqlvar[i].sqlname, *(int *)sqlda->sqlvar[i].sqldata); break; case SQLFLOAT: - printf("name sqlda descriptor: '%s' value %lf\n", sqlda->sqlvar[i].sqlname, *(double *)sqlda->sqlvar[i].sqldata); + printf("name sqlda descriptor: '%s' value %f\n", sqlda->sqlvar[i].sqlname, *(double *)sqlda->sqlvar[i].sqldata); break; case SQLDECIMAL: { @@ -246,5 +246,5 @@ exec sql end declare section; strcpy(msg, "disconnect"); exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/compat_informix/test_informix2.pgc b/src/interfaces/ecpg/test/compat_informix/test_informix2.pgc index 0386093d70..5380f9eb5a 100644 --- a/src/interfaces/ecpg/test/compat_informix/test_informix2.pgc +++ b/src/interfaces/ecpg/test/compat_informix/test_informix2.pgc @@ -7,7 +7,7 @@ EXEC SQL include ../regression; EXEC SQL DEFINE MAXDBLEN 30; /* Check SQLCODE, and produce a "standard error" if it's wrong! */ -static void sql_check(char *fn, char *caller, int ignore) +static void sql_check(const char *fn, const char *caller, int ignore) { char errorstring[255]; diff --git a/src/interfaces/ecpg/test/compat_oracle/.gitignore b/src/interfaces/ecpg/test/compat_oracle/.gitignore new file mode 100644 index 0000000000..63b37660ea --- /dev/null +++ b/src/interfaces/ecpg/test/compat_oracle/.gitignore @@ -0,0 +1,2 @@ +/char_array +/char_array.c diff --git a/src/interfaces/ecpg/test/compat_oracle/Makefile b/src/interfaces/ecpg/test/compat_oracle/Makefile new file mode 100644 index 0000000000..cd4e7e8ac4 --- /dev/null +++ b/src/interfaces/ecpg/test/compat_oracle/Makefile @@ -0,0 +1,11 @@ +subdir = src/interfaces/ecpg/test/compat_oracle +top_builddir = ../../../../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/$(subdir)/../Makefile.regress + +# Use special oracle compatibility switch for all tests in this directory +ECPG += -C ORACLE + +TESTS = char_array char_array.c + +all: $(TESTS) diff --git a/src/interfaces/ecpg/test/compat_oracle/char_array.pgc b/src/interfaces/ecpg/test/compat_oracle/char_array.pgc new file mode 100644 index 0000000000..8d7d9bf2d1 --- /dev/null +++ b/src/interfaces/ecpg/test/compat_oracle/char_array.pgc @@ -0,0 +1,64 @@ +#include +#include +#include + +EXEC SQL INCLUDE ../regression; + +static void warn(void) +{ + fprintf(stderr, "Warning: At least one column was truncated\n"); +} + +/* Compatible handling of char array to retrieve varchar field to char array + should be fixed-length, blank-padded, then null-terminated. + Conforms to the ANSI Fixed Character type. */ + +int main() { + + EXEC SQL WHENEVER SQLWARNING do warn(); + EXEC SQL WHENEVER SQLERROR SQLPRINT; + + const char *ppppp = "XXXXX"; + + EXEC SQL BEGIN DECLARE SECTION; + char shortstr[5]; + char bigstr[11]; + short shstr_ind = 0; + short bigstr_ind = 0; + EXEC SQL END DECLARE SECTION; + + ECPGdebug(1, stderr); + EXEC SQL CONNECT TO REGRESSDB1; + + EXEC SQL CREATE TABLE strdbase (strval varchar(10)); + EXEC SQL INSERT INTO strdbase values (''); + EXEC SQL INSERT INTO strdbase values ('AB'); + EXEC SQL INSERT INTO strdbase values ('ABCD'); + EXEC SQL INSERT INTO strdbase values ('ABCDE'); + EXEC SQL INSERT INTO strdbase values ('ABCDEF'); + EXEC SQL INSERT INTO strdbase values ('ABCDEFGHIJ'); + + EXEC SQL declare C cursor for select strval, strval from strdbase; + EXEC SQL OPEN C; + + EXEC SQL WHENEVER NOT FOUND DO BREAK; + + printf("Full Str. : Short Ind.\n"); + while(1) { + strncpy(shortstr, ppppp, sizeof shortstr); + memset(bigstr, 0, sizeof bigstr); + EXEC SQL FETCH C into :bigstr :bigstr_ind, :shortstr :shstr_ind; + printf("\"%s\": \"%s\" %d\n", bigstr, shortstr, shstr_ind); + } + + EXEC SQL CLOSE C; + EXEC SQL DROP TABLE strdbase; + + printf("\nGOOD-BYE!!\n\n"); + + EXEC SQL COMMIT WORK; + + EXEC SQL DISCONNECT ALL; + + return 0; +} diff --git a/src/interfaces/ecpg/test/connect/test1.pgc b/src/interfaces/ecpg/test/connect/test1.pgc index 4868b3dd81..101b806d5b 100644 --- a/src/interfaces/ecpg/test/connect/test1.pgc +++ b/src/interfaces/ecpg/test/connect/test1.pgc @@ -54,12 +54,12 @@ exec sql end declare section; exec sql disconnect; /* wrong port */ - exec sql connect to tcp:postgresql://localhost:20/ecpg2_regression user regress_ecpg_user1 identified by connectpw; + exec sql connect to tcp:postgresql://127.0.0.1:20/ecpg2_regression user regress_ecpg_user1 identified by connectpw; /* no disconnect necessary */ /* wrong password */ exec sql connect to unix:postgresql://localhost/ecpg2_regression user regress_ecpg_user1 identified by "wrongpw"; /* no disconnect necessary */ - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/connect/test2.pgc b/src/interfaces/ecpg/test/connect/test2.pgc index 0ced76ec6e..f31a7f9bb0 100644 --- a/src/interfaces/ecpg/test/connect/test2.pgc +++ b/src/interfaces/ecpg/test/connect/test2.pgc @@ -42,5 +42,5 @@ exec sql end declare section; /* disconnect from "second" */ exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/connect/test3.pgc b/src/interfaces/ecpg/test/connect/test3.pgc index ecf68d42ac..5d075f0e99 100644 --- a/src/interfaces/ecpg/test/connect/test3.pgc +++ b/src/interfaces/ecpg/test/connect/test3.pgc @@ -48,5 +48,5 @@ exec sql end declare section; * are used in other tests */ - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/connect/test4.pgc b/src/interfaces/ecpg/test/connect/test4.pgc index 185582ca2a..b20b17471c 100644 --- a/src/interfaces/ecpg/test/connect/test4.pgc +++ b/src/interfaces/ecpg/test/connect/test4.pgc @@ -16,5 +16,5 @@ main(void) exec sql disconnect DEFAULT; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/connect/test5.pgc b/src/interfaces/ecpg/test/connect/test5.pgc index d64ca50c93..53b86556b1 100644 --- a/src/interfaces/ecpg/test/connect/test5.pgc +++ b/src/interfaces/ecpg/test/connect/test5.pgc @@ -72,5 +72,5 @@ exec sql end declare section; /* not connected */ exec sql disconnect nonexistant; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/ecpg_schedule b/src/interfaces/ecpg/test/ecpg_schedule index c3ec125c36..991b8cb5e2 100644 --- a/src/interfaces/ecpg/test/ecpg_schedule +++ b/src/interfaces/ecpg/test/ecpg_schedule @@ -7,6 +7,7 @@ test: compat_informix/sqlda test: compat_informix/describe test: compat_informix/test_informix test: compat_informix/test_informix2 +test: compat_oracle/char_array test: connect/test2 test: connect/test3 test: connect/test4 @@ -28,6 +29,7 @@ test: preproc/type test: preproc/variable test: preproc/outofscope test: preproc/whenever +test: preproc/whenever_do_continue test: sql/array test: sql/binary test: sql/code100 diff --git a/src/interfaces/ecpg/test/ecpg_schedule_tcp b/src/interfaces/ecpg/test/ecpg_schedule_tcp deleted file mode 100644 index 77481b5147..0000000000 --- a/src/interfaces/ecpg/test/ecpg_schedule_tcp +++ /dev/null @@ -1,55 +0,0 @@ -test: compat_informix/dec_test -test: compat_informix/charfuncs -test: compat_informix/rfmtdate -test: compat_informix/rfmtlong -test: compat_informix/rnull -test: compat_informix/sqlda -test: compat_informix/describe -test: compat_informix/test_informix -test: compat_informix/test_informix2 -test: connect/test2 -test: connect/test3 -test: connect/test4 -test: connect/test5 -test: pgtypeslib/dt_test -test: pgtypeslib/dt_test2 -test: pgtypeslib/num_test -test: pgtypeslib/num_test2 -test: pgtypeslib/nan_test -test: preproc/array_of_struct -test: preproc/autoprep -test: preproc/comment -test: preproc/cursor -test: preproc/define -test: preproc/init -test: preproc/strings -test: preproc/type -test: preproc/variable -test: preproc/outofscope -test: preproc/whenever -test: sql/array -test: sql/binary -test: sql/code100 -test: sql/copystdout -test: sql/define -test: sql/desc -test: sql/sqlda -test: sql/describe -test: sql/dynalloc -test: sql/dynalloc2 -test: sql/dyntest -test: sql/execute -test: sql/fetch -test: sql/func -test: sql/indicators -test: sql/oldexec -test: sql/quote -test: sql/show -test: sql/insupd -test: sql/parser -test: thread/thread -test: thread/thread_implicit -test: thread/prep -test: thread/alloc -test: thread/descriptor -test: connect/test1 diff --git a/src/interfaces/ecpg/test/expected/compat_informix-dec_test-MinGW32.stdout b/src/interfaces/ecpg/test/expected/compat_informix-dec_test-MinGW32.stdout deleted file mode 100644 index 91faa2c04c..0000000000 --- a/src/interfaces/ecpg/test/expected/compat_informix-dec_test-MinGW32.stdout +++ /dev/null @@ -1,1293 +0,0 @@ -(no errno set) - dec[0,1]: r: -1, * -(no errno set) - dec[0,2]: r: -1, * -(no errno set) - dec[0,3]: r: -1, * -(no errno set) - dec[0,4]: r: -1, * -dec[0,5]: r: 0, 0.00 -(errno == PGTYPES_NUM_OVERFLOW) - dec[0,6]: 0 (r: -1) -(errno == PGTYPES_NUM_OVERFLOW) - dec[0,8]: 0 (r: -1) -(errno == PGTYPES_NUM_OVERFLOW) - dec[0,10]: 0 (r: -1) - -dec[1,1]: r: 0, -2 -dec[1,2]: r: 0, -2 -dec[1,3]: r: 0, -2.0 -dec[1,4]: r: 0, -2.00 -dec[1,5]: r: 0, 0.00 -dec[1,6]: -2 (r: 0) -dec[1,7]: -2.00 (r: 0 - cmp: 0) -dec[1,8]: -2 (r: 0) -dec[1,9]: -2.00 (r: 0 - cmp: 0) -dec[1,10]: -2 (r: 0) - -dec[2,1]: r: 0, 0.794 -dec[2,2]: r: 0, 1 -dec[2,3]: r: 0, 0.8 -dec[2,4]: r: 0, 0.79 -dec[2,5]: r: 0, 0.00 -dec[2,6]: 1 (r: 0) -dec[2,7]: 1.00 (r: 0 - cmp: -1) -dec[2,8]: 1 (r: 0) -dec[2,9]: 1.00 (r: 0 - cmp: -1) -dec[2,10]: 0.794 (r: 0) - -dec[3,1]: r: 0, 3.44 -dec[3,2]: r: 0, 3 -dec[3,3]: r: 0, 3.4 -dec[3,4]: r: 0, 3.44 -dec[3,5]: r: 0, 0.00 -dec[3,6]: 3 (r: 0) -dec[3,7]: 3.00 (r: 0 - cmp: 1) -dec[3,8]: 3 (r: 0) -dec[3,9]: 3.00 (r: 0 - cmp: 1) -dec[3,10]: 3.44 (r: 0) - -dec[4,1]: r: 0, 592490000000000000000000 -dec[4,2]: r: 0, 592490000000000000000000 -dec[4,3]: r: 0, 592490000000000000000000.0 -dec[4,4]: r: 0, 592490000000000000000000.00 -dec[4,5]: r: 0, 0.00 -(errno == PGTYPES_NUM_OVERFLOW) - dec[4,6]: 0 (r: -1) -(errno == PGTYPES_NUM_OVERFLOW) - dec[4,8]: 0 (r: -1) -dec[4,10]: 5.9249e+023 (r: 0) - -dec[5,1]: r: 0, -328400 -dec[5,2]: r: 0, -328400 -dec[5,3]: r: 0, -328400.0 -dec[5,4]: r: 0, -328400.00 -dec[5,5]: r: 0, 0.00 -dec[5,6]: -328400 (r: 0) -dec[5,7]: -328400.00 (r: 0 - cmp: 0) -dec[5,8]: -328400 (r: 0) -dec[5,9]: -328400.00 (r: 0 - cmp: 0) -dec[5,10]: -328400 (r: 0) - -(no errno set) - dec[6,1]: r: -1, * -dec[6,2]: r: 0, 0 -dec[6,3]: r: 0, 0.0 -dec[6,4]: r: 0, 0.00 -dec[6,5]: r: 0, 0.00 -dec[6,6]: 0 (r: 0) -dec[6,7]: 0.00 (r: 0 - cmp: 1) -dec[6,8]: 0 (r: 0) -dec[6,9]: 0.00 (r: 0 - cmp: 1) - -dec[7,1]: r: 0, 0.001 -dec[7,2]: r: 0, 0 -dec[7,3]: r: 0, 0.0 -dec[7,4]: r: 0, 0.00 -dec[7,5]: r: 0, 0.00 -dec[7,6]: 0 (r: 0) -dec[7,7]: 0.00 (r: 0 - cmp: 1) -dec[7,8]: 0 (r: 0) -dec[7,9]: 0.00 (r: 0 - cmp: 1) -dec[7,10]: 0.001 (r: 0) - -dec[8,1]: r: 0, 0.0 -dec[8,2]: r: 0, 0 -dec[8,3]: r: 0, 0.0 -dec[8,4]: r: 0, 0.00 -dec[8,5]: r: 0, 0.00 -dec[8,6]: 0 (r: 0) -dec[8,7]: 0.00 (r: 0 - cmp: 0) -dec[8,8]: 0 (r: 0) -dec[8,9]: 0.00 (r: 0 - cmp: 0) -dec[8,10]: 0 (r: 0) - -dec[9,1]: r: 0, -0.000059249 -dec[9,2]: r: 0, -0 -dec[9,3]: r: 0, -0.0 -dec[9,4]: r: 0, -0.00 -dec[9,5]: r: 0, 0.00 -dec[9,6]: 0 (r: 0) -dec[9,7]: 0.00 (r: 0 - cmp: -1) -dec[9,8]: 0 (r: 0) -dec[9,9]: 0.00 (r: 0 - cmp: -1) -dec[9,10]: -5.9249e-005 (r: 0) - -dec[10,1]: r: 0, 0.003284 -dec[10,2]: r: 0, 0 -dec[10,3]: r: 0, 0.0 -dec[10,4]: r: 0, 0.00 -dec[10,5]: r: 0, 0.00 -dec[10,6]: 0 (r: 0) -dec[10,7]: 0.00 (r: 0 - cmp: 1) -dec[10,8]: 0 (r: 0) -dec[10,9]: 0.00 (r: 0 - cmp: 1) -dec[10,10]: 0.003284 (r: 0) - -dec[11,1]: r: 0, 0.500001 -dec[11,2]: r: 0, 1 -dec[11,3]: r: 0, 0.5 -dec[11,4]: r: 0, 0.50 -dec[11,5]: r: 0, 0.00 -dec[11,6]: 1 (r: 0) -dec[11,7]: 1.00 (r: 0 - cmp: -1) -dec[11,8]: 1 (r: 0) -dec[11,9]: 1.00 (r: 0 - cmp: -1) -dec[11,10]: 0.500001 (r: 0) - -dec[12,1]: r: 0, -0.5000001 -dec[12,2]: r: 0, -1 -dec[12,3]: r: 0, -0.5 -dec[12,4]: r: 0, -0.50 -dec[12,5]: r: 0, 0.00 -dec[12,6]: -1 (r: 0) -dec[12,7]: -1.00 (r: 0 - cmp: 1) -dec[12,8]: -1 (r: 0) -dec[12,9]: -1.00 (r: 0 - cmp: 1) -dec[12,10]: -0.5 (r: 0) - -dec[13,1]: r: 0, 1234567890123456789012345678.91 -dec[13,2]: r: 0, 1234567890123456789012345679 -dec[13,3]: r: 0, 1234567890123456789012345678.9 -dec[13,4]: r: 0, 1234567890123456789012345678.91 -dec[13,5]: r: 0, 0.00 -(errno == PGTYPES_NUM_OVERFLOW) - dec[13,6]: 0 (r: -1) -(errno == PGTYPES_NUM_OVERFLOW) - dec[13,8]: 0 (r: -1) -dec[13,10]: 1.23457e+027 (r: 0) - -(errno == PGTYPES_NUM_OVERFLOW) - dec[14,0]: r: -1200 -(errno == PGTYPES_NUM_BAD_NUMERIC) - dec[15,0]: r: -1213 -dec[14]: NULL -dec[0]: NOT NULL -(errno == PGTYPES_NUM_BAD_NUMERIC) - dectoasc with len == -1: r: -1 -(errno == PGTYPES_NUM_BAD_NUMERIC) - dectoasc with len == 0: r: -1 -dec[c,0,0]: 0 -dec[a,0,0]: * -dec[s,0,0]: 0 -dec[m,0,0]: * -dec[d,0,0]: 1.00000000000000000 -dec[c,0,1]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,0,1]: -dec[m,0,1]: * -dec[d,0,1]: * -dec[c,0,2]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,0,2]: -dec[m,0,2]: * -dec[d,0,2]: -dec[c,0,3]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,0,3]: -dec[m,0,3]: * -dec[d,0,3]: -dec[c,0,4]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,0,4]: -dec[m,0,4]: * -dec[d,0,4]: -dec[c,0,5]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,0,5]: -dec[m,0,5]: * -dec[d,0,5]: -dec[c,0,6]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,0,6]: -dec[m,0,6]: * -dec[d,0,6]: * -dec[c,0,7]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,0,7]: -dec[m,0,7]: * -dec[d,0,7]: * -dec[c,0,8]: 1 -dec[a,0,8]: * -dec[s,0,8]: * -dec[m,0,8]: 0.0 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,0,9]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,0,9]: -dec[m,0,9]: * -dec[d,0,9]: -dec[c,0,10]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,0,10]: -dec[m,0,10]: * -dec[d,0,10]: -dec[c,0,11]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,0,11]: -dec[m,0,11]: * -dec[d,0,11]: -dec[c,0,12]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,0,12]: -dec[m,0,12]: * -dec[d,0,12]: -dec[c,0,13]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,0,13]: -dec[m,0,13]: * -dec[d,0,13]: -dec[c,0,14]: 2147483647 -dec[a,0,14]: -dec[s,0,14]: -dec[m,0,14]: * -dec[d,0,14]: -dec[c,1,0]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,1,0]: -dec[m,1,0]: * -dec[d,1,0]: * -dec[c,1,1]: 0 -dec[a,1,1]: -4 -dec[s,1,1]: 0 -dec[m,1,1]: 4 -dec[d,1,1]: 1.00000000000000000 -dec[c,1,2]: -1 -dec[a,1,2]: -1.206 -dec[s,1,2]: -2.794 -dec[m,1,2]: -1.588 -dec[d,1,2]: -2.5188916876574307 -dec[c,1,3]: -1 -dec[a,1,3]: 1.44 -dec[s,1,3]: -5.44 -dec[m,1,3]: -6.88 -dec[d,1,3]: -0.58139534883720930 -dec[c,1,4]: -1 -dec[a,1,4]: 592489999999999999999998 -dec[s,1,4]: -592490000000000000000002 -dec[m,1,4]: -1184980000000000000000000 -dec[d,1,4]: -0.0000000000000000000000033755843980489122 -dec[c,1,5]: 1 -dec[a,1,5]: -328402 -dec[s,1,5]: 328398 -dec[m,1,5]: 656800 -dec[d,1,5]: 0.0000060901339829476248 -dec[c,1,6]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,1,6]: -dec[m,1,6]: * -dec[d,1,6]: * -dec[c,1,7]: -1 -dec[a,1,7]: -1.999 -dec[s,1,7]: -2.001 -dec[m,1,7]: -0.002 -dec[d,1,7]: -2000.0000000000000 -dec[c,1,8]: -1 -dec[a,1,8]: -2.0 -dec[s,1,8]: -2.0 -dec[m,1,8]: 0.0 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,1,9]: -1 -dec[a,1,9]: -2.000059249 -dec[s,1,9]: -1.999940751 -dec[m,1,9]: 0.000118498 -dec[d,1,9]: 33755.843980489122 -dec[c,1,10]: -1 -dec[a,1,10]: -1.996716 -dec[s,1,10]: -2.003284 -dec[m,1,10]: -0.006568 -dec[d,1,10]: -609.01339829476248 -dec[c,1,11]: -1 -dec[a,1,11]: -1.499999 -dec[s,1,11]: -2.500001 -dec[m,1,11]: -1.000002 -dec[d,1,11]: -3.9999920000160000 -dec[c,1,12]: -1 -dec[a,1,12]: -2.5000001 -dec[s,1,12]: -1.4999999 -dec[m,1,12]: 1.0000002 -dec[d,1,12]: 3.9999992000001600 -dec[c,1,13]: -1 -dec[a,1,13]: 1234567890123456789012345676.91 -dec[s,1,13]: -1234567890123456789012345680.91 -dec[m,1,13]: -2469135780246913578024691357.82 -dec[d,1,13]: -0.0000000000000000000000000016200000145800001 -dec[c,1,14]: 2147483647 -dec[a,1,14]: 1234567890123456789012345676.91 -dec[s,1,14]: -1234567890123456789012345680.91 -dec[m,1,14]: -2469135780246913578024691357.82 -dec[d,1,14]: -0.0000000000000000000000000016200000145800001 -dec[c,2,0]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,2,0]: -dec[m,2,0]: * -dec[d,2,0]: * -dec[c,2,1]: 1 -dec[a,2,1]: -1.206 -dec[s,2,1]: 2.794 -dec[m,2,1]: -1.588 -dec[d,2,1]: -0.39700000000000000 -dec[c,2,2]: 0 -dec[a,2,2]: 1.588 -dec[s,2,2]: 0.000 -dec[m,2,2]: 0.630436 -dec[d,2,2]: 1.00000000000000000 -dec[c,2,3]: -1 -dec[a,2,3]: 4.234 -dec[s,2,3]: -2.646 -dec[m,2,3]: 2.73136 -dec[d,2,3]: 0.23081395348837209 -dec[c,2,4]: -1 -dec[a,2,4]: 592490000000000000000000.794 -dec[s,2,4]: -592489999999999999999999.206 -dec[m,2,4]: 470437060000000000000000.000 -dec[d,2,4]: 0.0000000000000000000000013401070060254182 -dec[c,2,5]: 1 -dec[a,2,5]: -328399.206 -dec[s,2,5]: 328400.794 -dec[m,2,5]: -260749.600 -dec[d,2,5]: -0.0000024177831912302071 -dec[c,2,6]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,2,6]: -dec[m,2,6]: * -dec[d,2,6]: * -dec[c,2,7]: 1 -dec[a,2,7]: 0.795 -dec[s,2,7]: 0.793 -dec[m,2,7]: 0.000794 -dec[d,2,7]: 794.00000000000000 -dec[c,2,8]: 1 -dec[a,2,8]: 0.794 -dec[s,2,8]: 0.794 -dec[m,2,8]: 0.0000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,2,9]: 1 -dec[a,2,9]: 0.793940751 -dec[s,2,9]: 0.794059249 -dec[m,2,9]: -0.000047043706 -dec[d,2,9]: -13401.070060254182 -dec[c,2,10]: 1 -dec[a,2,10]: 0.797284 -dec[s,2,10]: 0.790716 -dec[m,2,10]: 0.002607496 -dec[d,2,10]: 241.77831912302071 -dec[c,2,11]: 1 -dec[a,2,11]: 1.294001 -dec[s,2,11]: 0.293999 -dec[m,2,11]: 0.397000794 -dec[d,2,11]: 1.5879968240063520 -dec[c,2,12]: 1 -dec[a,2,12]: 0.2939999 -dec[s,2,12]: 1.2940001 -dec[m,2,12]: -0.3970000794 -dec[d,2,12]: -1.5879996824000635 -dec[c,2,13]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,2,13]: -dec[m,2,13]: -dec[d,2,13]: 0.00000000000000000000000000064314000578826005 -dec[c,2,14]: 2147483647 -dec[a,2,14]: -dec[s,2,14]: -dec[m,2,14]: -dec[d,2,14]: 0.00000000000000000000000000064314000578826005 -dec[c,3,0]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,3,0]: -dec[m,3,0]: * -dec[d,3,0]: * -dec[c,3,1]: 1 -dec[a,3,1]: 1.44 -dec[s,3,1]: 5.44 -dec[m,3,1]: -6.88 -dec[d,3,1]: -1.7200000000000000 -dec[c,3,2]: 1 -dec[a,3,2]: 4.234 -dec[s,3,2]: 2.646 -dec[m,3,2]: 2.73136 -dec[d,3,2]: 4.3324937027707809 -dec[c,3,3]: 0 -dec[a,3,3]: 6.88 -dec[s,3,3]: 0.00 -dec[m,3,3]: 11.8336 -dec[d,3,3]: 1.00000000000000000 -dec[c,3,4]: -1 -dec[a,3,4]: 592490000000000000000003.44 -dec[s,3,4]: -592489999999999999999996.56 -dec[m,3,4]: 2038165600000000000000000.00 -dec[d,3,4]: 0.0000000000000000000000058060051646441290 -dec[c,3,5]: 1 -dec[a,3,5]: -328396.56 -dec[s,3,5]: 328403.44 -dec[m,3,5]: -1129696.00 -dec[d,3,5]: -0.0000104750304506699147 -dec[c,3,6]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,3,6]: -dec[m,3,6]: * -dec[d,3,6]: * -dec[c,3,7]: 1 -dec[a,3,7]: 3.441 -dec[s,3,7]: 3.439 -dec[m,3,7]: 0.00344 -dec[d,3,7]: 3440.0000000000000 -dec[c,3,8]: 1 -dec[a,3,8]: 3.44 -dec[s,3,8]: 3.44 -dec[m,3,8]: 0.000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,3,9]: 1 -dec[a,3,9]: 3.439940751 -dec[s,3,9]: 3.440059249 -dec[m,3,9]: -0.00020381656 -dec[d,3,9]: -58060.051646441290 -dec[c,3,10]: 1 -dec[a,3,10]: 3.443284 -dec[s,3,10]: 3.436716 -dec[m,3,10]: 0.01129696 -dec[d,3,10]: 1047.50304506699147 -dec[c,3,11]: 1 -dec[a,3,11]: 3.940001 -dec[s,3,11]: 2.939999 -dec[m,3,11]: 1.72000344 -dec[d,3,11]: 6.8799862400275199 -dec[c,3,12]: 1 -dec[a,3,12]: 2.9399999 -dec[s,3,12]: 3.9400001 -dec[m,3,12]: -1.720000344 -dec[d,3,12]: -6.8799986240002752 -dec[c,3,13]: -1 -dec[a,3,13]: 1234567890123456789012345682.35 -dec[s,3,13]: -1234567890123456789012345675.47 -dec[m,3,13]: -dec[d,3,13]: 0.0000000000000000000000000027864000250776002 -dec[c,3,14]: 2147483647 -dec[a,3,14]: 1234567890123456789012345682.35 -dec[s,3,14]: -1234567890123456789012345675.47 -dec[m,3,14]: -dec[d,3,14]: 0.0000000000000000000000000027864000250776002 -dec[c,4,0]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,4,0]: -dec[m,4,0]: * -dec[d,4,0]: * -dec[c,4,1]: 1 -dec[a,4,1]: 592489999999999999999998 -dec[s,4,1]: 592490000000000000000002 -dec[m,4,1]: -1184980000000000000000000 -dec[d,4,1]: -296245000000000000000000 -dec[c,4,2]: 1 -dec[a,4,2]: 592490000000000000000000.794 -dec[s,4,2]: 592489999999999999999999.206 -dec[m,4,2]: 470437060000000000000000.000 -dec[d,4,2]: -dec[c,4,3]: 1 -dec[a,4,3]: 592490000000000000000003.44 -dec[s,4,3]: 592489999999999999999996.56 -dec[m,4,3]: 2038165600000000000000000.00 -dec[d,4,3]: 172235465116279069767441.86 -dec[c,4,4]: 0 -dec[a,4,4]: 1184980000000000000000000 -dec[s,4,4]: 0 -dec[m,4,4]: 351044400100000000000000000000000000000000000000 -dec[d,4,4]: 1.00000000000000000 -dec[c,4,5]: 1 -dec[a,4,5]: 592489999999999999671600 -dec[s,4,5]: 592490000000000000328400 -dec[m,4,5]: -194573716000000000000000000000 -dec[d,4,5]: -1804171741778319123 -dec[c,4,6]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,4,6]: -dec[m,4,6]: * -dec[d,4,6]: * -dec[c,4,7]: 1 -dec[a,4,7]: 592490000000000000000000.001 -dec[s,4,7]: 592489999999999999999999.999 -dec[m,4,7]: 592490000000000000000.000 -dec[d,4,7]: 592490000000000000000000000.000 -dec[c,4,8]: 1 -dec[a,4,8]: 592490000000000000000000.0 -dec[s,4,8]: 592490000000000000000000.0 -dec[m,4,8]: 0.0 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,4,9]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,4,9]: -dec[m,4,9]: -35104440010000000000.000000000 -dec[d,4,9]: -10000000000000000000000000000.000000000 -dec[c,4,10]: 1 -dec[a,4,10]: 592490000000000000000000.003284 -dec[s,4,10]: 592489999999999999999999.996716 -dec[m,4,10]: 1945737160000000000000.000000 -dec[d,4,10]: -dec[c,4,11]: 1 -dec[a,4,11]: 592490000000000000000000.500001 -dec[s,4,11]: 592489999999999999999999.499999 -dec[m,4,11]: 296245592490000000000000.000000 -dec[d,4,11]: -dec[c,4,12]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,4,12]: -dec[m,4,12]: -296245059249000000000000.0000000 -dec[d,4,12]: -dec[c,4,13]: -1 -dec[a,4,13]: 1235160380123456789012345678.91 -dec[s,4,13]: -1233975400123456789012345678.91 -dec[m,4,13]: -dec[d,4,13]: 0.00047991690431925214 -dec[c,4,14]: 2147483647 -dec[a,4,14]: 1235160380123456789012345678.91 -dec[s,4,14]: -1233975400123456789012345678.91 -dec[m,4,14]: -dec[d,4,14]: 0.00047991690431925214 -dec[c,5,0]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,5,0]: -dec[m,5,0]: * -dec[d,5,0]: * -dec[c,5,1]: -1 -dec[a,5,1]: -328402 -dec[s,5,1]: -328398 -dec[m,5,1]: 656800 -dec[d,5,1]: 164200.00000000000 -dec[c,5,2]: -1 -dec[a,5,2]: -328399.206 -dec[s,5,2]: -328400.794 -dec[m,5,2]: -260749.600 -dec[d,5,2]: -413602.01511335013 -dec[c,5,3]: -1 -dec[a,5,3]: -328396.56 -dec[s,5,3]: -328403.44 -dec[m,5,3]: -1129696.00 -dec[d,5,3]: -95465.116279069767 -dec[c,5,4]: -1 -dec[a,5,4]: 592489999999999999671600 -dec[s,5,4]: -592490000000000000328400 -dec[m,5,4]: -194573716000000000000000000000 -dec[d,5,4]: -0.00000000000000000055427095815963139 -dec[c,5,5]: 0 -dec[a,5,5]: -656800 -dec[s,5,5]: 0 -dec[m,5,5]: 107846560000 -dec[d,5,5]: 1.00000000000000000 -dec[c,5,6]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,5,6]: -dec[m,5,6]: * -dec[d,5,6]: * -dec[c,5,7]: -1 -dec[a,5,7]: -328399.999 -dec[s,5,7]: -328400.001 -dec[m,5,7]: -328.400 -dec[d,5,7]: -328400000.00000000 -dec[c,5,8]: -1 -dec[a,5,8]: -328400.0 -dec[s,5,8]: -328400.0 -dec[m,5,8]: 0.0 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,5,9]: -1 -dec[a,5,9]: -328400.000059249 -dec[s,5,9]: -328399.999940751 -dec[m,5,9]: 19.457371600 -dec[d,5,9]: 5542709581.596313862 -dec[c,5,10]: -1 -dec[a,5,10]: -328399.996716 -dec[s,5,10]: -328400.003284 -dec[m,5,10]: -1078.465600 -dec[d,5,10]: -100000000.000000000 -dec[c,5,11]: -1 -dec[a,5,11]: -328399.499999 -dec[s,5,11]: -328400.500001 -dec[m,5,11]: -164200.328400 -dec[d,5,11]: -656798.68640262719 -dec[c,5,12]: -1 -dec[a,5,12]: -328400.5000001 -dec[s,5,12]: -328399.4999999 -dec[m,5,12]: 164200.0328400 -dec[d,5,12]: 656799.86864002627 -dec[c,5,13]: -1 -dec[a,5,13]: 1234567890123456789012017278.91 -dec[s,5,13]: -1234567890123456789012674078.91 -dec[m,5,13]: -dec[d,5,13]: -0.00000000000000000000026600400239403602 -dec[c,5,14]: 2147483647 -dec[a,5,14]: 1234567890123456789012017278.91 -dec[s,5,14]: -1234567890123456789012674078.91 -dec[m,5,14]: -dec[d,5,14]: -0.00000000000000000000026600400239403602 -dec[c,6,0]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,6,0]: -dec[m,6,0]: * -dec[d,6,0]: * -dec[c,6,1]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,6,1]: -dec[m,6,1]: * -dec[d,6,1]: * -dec[c,6,2]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,6,2]: -dec[m,6,2]: * -dec[d,6,2]: * -dec[c,6,3]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,6,3]: -dec[m,6,3]: * -dec[d,6,3]: * -dec[c,6,4]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,6,4]: -dec[m,6,4]: * -dec[d,6,4]: * -dec[c,6,5]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,6,5]: -dec[m,6,5]: * -dec[d,6,5]: * -dec[c,6,6]: 0 -dec[a,6,6]: * -dec[s,6,6]: * -dec[m,6,6]: * -dec[d,6,6]: * -dec[c,6,7]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,6,7]: -dec[m,6,7]: * -dec[d,6,7]: * -dec[c,6,8]: 1 -dec[a,6,8]: * -dec[s,6,8]: * -dec[m,6,8]: * -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,6,9]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,6,9]: -dec[m,6,9]: * -dec[d,6,9]: * -dec[c,6,10]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,6,10]: -dec[m,6,10]: * -dec[d,6,10]: * -dec[c,6,11]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,6,11]: -dec[m,6,11]: * -dec[d,6,11]: * -dec[c,6,12]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,6,12]: -dec[m,6,12]: * -dec[d,6,12]: * -dec[c,6,13]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,6,13]: -dec[m,6,13]: * -dec[d,6,13]: * -dec[c,6,14]: 2147483647 -dec[a,6,14]: -dec[s,6,14]: -dec[m,6,14]: * -dec[d,6,14]: * -dec[c,7,0]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,7,0]: -dec[m,7,0]: * -dec[d,7,0]: * -dec[c,7,1]: 1 -dec[a,7,1]: -1.999 -dec[s,7,1]: 2.001 -dec[m,7,1]: -0.002 -dec[d,7,1]: -0.00050000000000000000 -dec[c,7,2]: -1 -dec[a,7,2]: 0.795 -dec[s,7,2]: -0.793 -dec[m,7,2]: 0.000794 -dec[d,7,2]: 0.0012594458438287154 -dec[c,7,3]: -1 -dec[a,7,3]: 3.441 -dec[s,7,3]: -3.439 -dec[m,7,3]: 0.00344 -dec[d,7,3]: 0.00029069767441860465 -dec[c,7,4]: -1 -dec[a,7,4]: 592490000000000000000000.001 -dec[s,7,4]: -592489999999999999999999.999 -dec[m,7,4]: 592490000000000000000.000 -dec[d,7,4]: 0.0000000000000000000000000016877921990244561 -dec[c,7,5]: 1 -dec[a,7,5]: -328399.999 -dec[s,7,5]: 328400.001 -dec[m,7,5]: -328.400 -dec[d,7,5]: -0.0000000030450669914738124 -dec[c,7,6]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,7,6]: -dec[m,7,6]: * -dec[d,7,6]: * -dec[c,7,7]: 0 -dec[a,7,7]: 0.002 -dec[s,7,7]: 0.000 -dec[m,7,7]: 0.000001 -dec[d,7,7]: 1.00000000000000000 -dec[c,7,8]: 1 -dec[a,7,8]: 0.001 -dec[s,7,8]: 0.001 -dec[m,7,8]: 0.0000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,7,9]: 1 -dec[a,7,9]: 0.000940751 -dec[s,7,9]: 0.001059249 -dec[m,7,9]: -0.000000059249 -dec[d,7,9]: -16.877921990244561 -dec[c,7,10]: -1 -dec[a,7,10]: 0.004284 -dec[s,7,10]: -0.002284 -dec[m,7,10]: 0.000003284 -dec[d,7,10]: 0.30450669914738124 -dec[c,7,11]: -1 -dec[a,7,11]: 0.501001 -dec[s,7,11]: -0.499001 -dec[m,7,11]: 0.000500001 -dec[d,7,11]: 0.0019999960000080000 -dec[c,7,12]: 1 -dec[a,7,12]: -0.4990001 -dec[s,7,12]: 0.5010001 -dec[m,7,12]: -0.0005000001 -dec[d,7,12]: -0.0019999996000000800 -dec[c,7,13]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,7,13]: -dec[m,7,13]: 1234567890123456789012345.67891 -dec[d,7,13]: 0.00000000000000000000000000000081000000729000007 -dec[c,7,14]: 2147483647 -dec[a,7,14]: -dec[s,7,14]: -dec[m,7,14]: 1234567890123456789012345.67891 -dec[d,7,14]: 0.00000000000000000000000000000081000000729000007 -dec[c,8,0]: -1 -dec[a,8,0]: * -dec[s,8,0]: * -dec[m,8,0]: 0.0 -dec[d,8,0]: 0 -dec[c,8,1]: 1 -dec[a,8,1]: -2.0 -dec[s,8,1]: 2.0 -dec[m,8,1]: 0.0 -dec[d,8,1]: 0 -dec[c,8,2]: -1 -dec[a,8,2]: 0.794 -dec[s,8,2]: -0.794 -dec[m,8,2]: 0.0000 -dec[d,8,2]: 0 -dec[c,8,3]: -1 -dec[a,8,3]: 3.44 -dec[s,8,3]: -3.44 -dec[m,8,3]: 0.000 -dec[d,8,3]: 0 -dec[c,8,4]: -1 -dec[a,8,4]: 592490000000000000000000.0 -dec[s,8,4]: -592490000000000000000000.0 -dec[m,8,4]: 0.0 -dec[d,8,4]: 0 -dec[c,8,5]: 1 -dec[a,8,5]: -328400.0 -dec[s,8,5]: 328400.0 -dec[m,8,5]: 0.0 -dec[d,8,5]: 0 -dec[c,8,6]: -1 -dec[a,8,6]: * -dec[s,8,6]: * -dec[m,8,6]: * -dec[d,8,6]: 0 -dec[c,8,7]: -1 -dec[a,8,7]: 0.001 -dec[s,8,7]: -0.001 -dec[m,8,7]: 0.0000 -dec[d,8,7]: 0 -dec[c,8,8]: 0 -dec[a,8,8]: 0.0 -dec[s,8,8]: 0.0 -dec[m,8,8]: 0.00 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,8,9]: 1 -dec[a,8,9]: -0.000059249 -dec[s,8,9]: 0.000059249 -dec[m,8,9]: 0.0000000000 -dec[d,8,9]: 0 -dec[c,8,10]: -1 -dec[a,8,10]: 0.003284 -dec[s,8,10]: -0.003284 -dec[m,8,10]: 0.0000000 -dec[d,8,10]: 0 -dec[c,8,11]: -1 -dec[a,8,11]: 0.500001 -dec[s,8,11]: -0.500001 -dec[m,8,11]: 0.0000000 -dec[d,8,11]: 0 -dec[c,8,12]: 1 -dec[a,8,12]: -0.5000001 -dec[s,8,12]: 0.5000001 -dec[m,8,12]: 0.00000000 -dec[d,8,12]: 0 -dec[c,8,13]: -1 -dec[a,8,13]: 1234567890123456789012345678.91 -dec[s,8,13]: -1234567890123456789012345678.91 -dec[m,8,13]: 0.000 -dec[d,8,13]: 0 -dec[c,8,14]: 2147483647 -dec[a,8,14]: 1234567890123456789012345678.91 -dec[s,8,14]: -1234567890123456789012345678.91 -dec[m,8,14]: 0.000 -dec[d,8,14]: 0 -dec[c,9,0]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,9,0]: -dec[m,9,0]: * -dec[d,9,0]: * -dec[c,9,1]: 1 -dec[a,9,1]: -2.000059249 -dec[s,9,1]: 1.999940751 -dec[m,9,1]: 0.000118498 -dec[d,9,1]: 0.000029624500000000000 -dec[c,9,2]: -1 -dec[a,9,2]: 0.793940751 -dec[s,9,2]: -0.794059249 -dec[m,9,2]: -0.000047043706 -dec[d,9,2]: -0.000074620906801007557 -dec[c,9,3]: -1 -dec[a,9,3]: 3.439940751 -dec[s,9,3]: -3.440059249 -dec[m,9,3]: -0.00020381656 -dec[d,9,3]: -0.000017223546511627907 -dec[c,9,4]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,9,4]: -dec[m,9,4]: -35104440010000000000.000000000 -dec[d,9,4]: -0.000000000000000000000000000100000000000000000 -dec[c,9,5]: 1 -dec[a,9,5]: -328400.000059249 -dec[s,9,5]: 328399.999940751 -dec[m,9,5]: 19.457371600 -dec[d,9,5]: 0.00000000018041717417783191 -dec[c,9,6]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,9,6]: -dec[m,9,6]: * -dec[d,9,6]: * -dec[c,9,7]: -1 -dec[a,9,7]: 0.000940751 -dec[s,9,7]: -0.001059249 -dec[m,9,7]: -0.000000059249 -dec[d,9,7]: -0.059249000000000000 -dec[c,9,8]: -1 -dec[a,9,8]: -0.000059249 -dec[s,9,8]: -0.000059249 -dec[m,9,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,9,9]: 0 -dec[a,9,9]: -0.000118498 -dec[s,9,9]: 0.000000000 -dec[m,9,9]: 0.000000003510444001 -dec[d,9,9]: 1.00000000000000000 -dec[c,9,10]: -1 -dec[a,9,10]: 0.003224751 -dec[s,9,10]: -0.003343249 -dec[m,9,10]: -0.000000194573716 -dec[d,9,10]: -0.018041717417783191 -dec[c,9,11]: -1 -dec[a,9,11]: 0.499941751 -dec[s,9,11]: -0.500060249 -dec[m,9,11]: -0.000029624559249 -dec[d,9,11]: -0.000118497763004473991 -dec[c,9,12]: 1 -dec[a,9,12]: -0.500059349 -dec[s,9,12]: 0.499940851 -dec[m,9,12]: 0.0000296245059249 -dec[d,9,12]: 0.000118497976300404740 -dec[c,9,13]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,9,13]: -dec[m,9,13]: -dec[d,9,13]: -0.000000000000000000000000000000047991690431925214 -dec[c,9,14]: 2147483647 -dec[a,9,14]: -dec[s,9,14]: -dec[m,9,14]: -dec[d,9,14]: -0.000000000000000000000000000000047991690431925214 -dec[c,10,0]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,10,0]: -dec[m,10,0]: * -dec[d,10,0]: * -dec[c,10,1]: 1 -dec[a,10,1]: -1.996716 -dec[s,10,1]: 2.003284 -dec[m,10,1]: -0.006568 -dec[d,10,1]: -0.0016420000000000000 -dec[c,10,2]: -1 -dec[a,10,2]: 0.797284 -dec[s,10,2]: -0.790716 -dec[m,10,2]: 0.002607496 -dec[d,10,2]: 0.0041360201511335013 -dec[c,10,3]: -1 -dec[a,10,3]: 3.443284 -dec[s,10,3]: -3.436716 -dec[m,10,3]: 0.01129696 -dec[d,10,3]: 0.00095465116279069767 -dec[c,10,4]: -1 -dec[a,10,4]: 592490000000000000000000.003284 -dec[s,10,4]: -592489999999999999999999.996716 -dec[m,10,4]: 1945737160000000000000.000000 -dec[d,10,4]: 0.0000000000000000000000000055427095815963139 -dec[c,10,5]: 1 -dec[a,10,5]: -328399.996716 -dec[s,10,5]: 328400.003284 -dec[m,10,5]: -1078.465600 -dec[d,10,5]: -0.0000000100000000000000000 -dec[c,10,6]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,10,6]: -dec[m,10,6]: * -dec[d,10,6]: * -dec[c,10,7]: 1 -dec[a,10,7]: 0.004284 -dec[s,10,7]: 0.002284 -dec[m,10,7]: 0.000003284 -dec[d,10,7]: 3.2840000000000000 -dec[c,10,8]: 1 -dec[a,10,8]: 0.003284 -dec[s,10,8]: 0.003284 -dec[m,10,8]: 0.0000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,10,9]: 1 -dec[a,10,9]: 0.003224751 -dec[s,10,9]: 0.003343249 -dec[m,10,9]: -0.000000194573716 -dec[d,10,9]: -55.427095815963139 -dec[c,10,10]: 0 -dec[a,10,10]: 0.006568 -dec[s,10,10]: 0.000000 -dec[m,10,10]: 0.000010784656 -dec[d,10,10]: 1.00000000000000000 -dec[c,10,11]: -1 -dec[a,10,11]: 0.503285 -dec[s,10,11]: -0.496717 -dec[m,10,11]: 0.001642003284 -dec[d,10,11]: 0.0065679868640262719 -dec[c,10,12]: 1 -dec[a,10,12]: -0.4967161 -dec[s,10,12]: 0.5032841 -dec[m,10,12]: -0.0016420003284 -dec[d,10,12]: -0.0065679986864002627 -dec[c,10,13]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,10,13]: -dec[m,10,13]: -dec[d,10,13]: 0.0000000000000000000000000000026600400239403602 -dec[c,10,14]: 2147483647 -dec[a,10,14]: -dec[s,10,14]: -dec[m,10,14]: -dec[d,10,14]: 0.0000000000000000000000000000026600400239403602 -dec[c,11,0]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,11,0]: -dec[m,11,0]: * -dec[d,11,0]: * -dec[c,11,1]: 1 -dec[a,11,1]: -1.499999 -dec[s,11,1]: 2.500001 -dec[m,11,1]: -1.000002 -dec[d,11,1]: -0.25000050000000000 -dec[c,11,2]: -1 -dec[a,11,2]: 1.294001 -dec[s,11,2]: -0.293999 -dec[m,11,2]: 0.397000794 -dec[d,11,2]: 0.62972418136020151 -dec[c,11,3]: -1 -dec[a,11,3]: 3.940001 -dec[s,11,3]: -2.939999 -dec[m,11,3]: 1.72000344 -dec[d,11,3]: 0.14534912790697674 -dec[c,11,4]: -1 -dec[a,11,4]: 592490000000000000000000.500001 -dec[s,11,4]: -592489999999999999999999.499999 -dec[m,11,4]: 296245592490000000000000.000000 -dec[d,11,4]: 0.00000000000000000000000084389778730442708 -dec[c,11,5]: 1 -dec[a,11,5]: -328399.499999 -dec[s,11,5]: 328400.500001 -dec[m,11,5]: -164200.328400 -dec[d,11,5]: -0.0000015225365408038977 -dec[c,11,6]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,11,6]: -dec[m,11,6]: * -dec[d,11,6]: * -dec[c,11,7]: 1 -dec[a,11,7]: 0.501001 -dec[s,11,7]: 0.499001 -dec[m,11,7]: 0.000500001 -dec[d,11,7]: 500.00100000000000 -dec[c,11,8]: 1 -dec[a,11,8]: 0.500001 -dec[s,11,8]: 0.500001 -dec[m,11,8]: 0.0000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,11,9]: 1 -dec[a,11,9]: 0.499941751 -dec[s,11,9]: 0.500060249 -dec[m,11,9]: -0.000029624559249 -dec[d,11,9]: -8438.9778730442708 -dec[c,11,10]: 1 -dec[a,11,10]: 0.503285 -dec[s,11,10]: 0.496717 -dec[m,11,10]: 0.001642003284 -dec[d,11,10]: 152.25365408038977 -dec[c,11,11]: 0 -dec[a,11,11]: 1.000002 -dec[s,11,11]: 0.000000 -dec[m,11,11]: 0.250001000001 -dec[d,11,11]: 1.00000000000000000 -dec[c,11,12]: 1 -dec[a,11,12]: 0.0000009 -dec[s,11,12]: 1.0000011 -dec[m,11,12]: -0.2500005500001 -dec[d,11,12]: -1.00000179999964000 -dec[c,11,13]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,11,13]: -dec[m,11,13]: -dec[d,11,13]: 0.00000000000000000000000000040500081364500732 -dec[c,11,14]: 2147483647 -dec[a,11,14]: -dec[s,11,14]: -dec[m,11,14]: -dec[d,11,14]: 0.00000000000000000000000000040500081364500732 -dec[c,12,0]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,12,0]: -dec[m,12,0]: * -dec[d,12,0]: * -dec[c,12,1]: 1 -dec[a,12,1]: -2.5000001 -dec[s,12,1]: 1.4999999 -dec[m,12,1]: 1.0000002 -dec[d,12,1]: 0.25000005000000000 -dec[c,12,2]: -1 -dec[a,12,2]: 0.2939999 -dec[s,12,2]: -1.2940001 -dec[m,12,2]: -0.3970000794 -dec[d,12,2]: -0.62972304785894207 -dec[c,12,3]: -1 -dec[a,12,3]: 2.9399999 -dec[s,12,3]: -3.9400001 -dec[m,12,3]: -1.720000344 -dec[d,12,3]: -0.14534886627906977 -dec[c,12,4]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,12,4]: -dec[m,12,4]: -296245059249000000000000.0000000 -dec[d,12,4]: -0.00000000000000000000000084389626829144796 -dec[c,12,5]: 1 -dec[a,12,5]: -328400.5000001 -dec[s,12,5]: 328399.4999999 -dec[m,12,5]: 164200.0328400 -dec[d,12,5]: 0.0000015225338002436054 -dec[c,12,6]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,12,6]: -dec[m,12,6]: * -dec[d,12,6]: * -dec[c,12,7]: -1 -dec[a,12,7]: -0.4990001 -dec[s,12,7]: -0.5010001 -dec[m,12,7]: -0.0005000001 -dec[d,12,7]: -500.00010000000000 -dec[c,12,8]: -1 -dec[a,12,8]: -0.5000001 -dec[s,12,8]: -0.5000001 -dec[m,12,8]: 0.00000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,12,9]: -1 -dec[a,12,9]: -0.500059349 -dec[s,12,9]: -0.499940851 -dec[m,12,9]: 0.0000296245059249 -dec[d,12,9]: 8438.9626829144796 -dec[c,12,10]: -1 -dec[a,12,10]: -0.4967161 -dec[s,12,10]: -0.5032841 -dec[m,12,10]: -0.0016420003284 -dec[d,12,10]: -152.25338002436054 -dec[c,12,11]: -1 -dec[a,12,11]: 0.0000009 -dec[s,12,11]: -1.0000011 -dec[m,12,11]: -0.2500005500001 -dec[d,12,11]: -0.99999820000359999 -dec[c,12,12]: 0 -dec[a,12,12]: -1.0000002 -dec[s,12,12]: 0.0000000 -dec[m,12,12]: 0.25000010000001 -dec[d,12,12]: 1.00000000000000000 -dec[c,12,13]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,12,13]: -dec[m,12,13]: -dec[d,12,13]: -0.00000000000000000000000000040500008464500076 -dec[c,12,14]: 2147483647 -dec[a,12,14]: -dec[s,12,14]: -dec[m,12,14]: -dec[d,12,14]: -0.00000000000000000000000000040500008464500076 -dec[c,13,0]: -1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,13,0]: -dec[m,13,0]: * -dec[d,13,0]: * -dec[c,13,1]: 1 -dec[a,13,1]: 1234567890123456789012345676.91 -dec[s,13,1]: 1234567890123456789012345680.91 -dec[m,13,1]: -2469135780246913578024691357.82 -dec[d,13,1]: -617283945061728394506172839.46 -dec[c,13,2]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,13,2]: -dec[m,13,2]: -dec[d,13,2]: -dec[c,13,3]: 1 -dec[a,13,3]: 1234567890123456789012345682.35 -dec[s,13,3]: 1234567890123456789012345675.47 -dec[m,13,3]: -dec[d,13,3]: -dec[c,13,4]: 1 -dec[a,13,4]: 1235160380123456789012345678.91 -dec[s,13,4]: 1233975400123456789012345678.91 -dec[m,13,4]: -dec[d,13,4]: 2083.6940541164522 -dec[c,13,5]: 1 -dec[a,13,5]: 1234567890123456789012017278.91 -dec[s,13,5]: 1234567890123456789012674078.91 -dec[m,13,5]: -dec[d,13,5]: -3759341930948406787491.92 -dec[c,13,6]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,13,6]: -dec[m,13,6]: * -dec[d,13,6]: * -dec[c,13,7]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,13,7]: -dec[m,13,7]: 1234567890123456789012345.67891 -dec[d,13,7]: 1234567890123456789012345678910.000 -dec[c,13,8]: 1 -dec[a,13,8]: 1234567890123456789012345678.91 -dec[s,13,8]: 1234567890123456789012345678.91 -dec[m,13,8]: 0.000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1202 -dec[c,13,9]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,13,9]: -dec[m,13,9]: -dec[d,13,9]: -dec[c,13,10]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,13,10]: -dec[m,13,10]: -dec[d,13,10]: -dec[c,13,11]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,13,11]: -dec[m,13,11]: -dec[d,13,11]: -dec[c,13,12]: 1 -(errno == PGTYPES_NUM_OVERFLOW) - r: -1200 -dec[s,13,12]: -dec[m,13,12]: -dec[d,13,12]: -dec[c,13,13]: 0 -dec[a,13,13]: 2469135780246913578024691357.82 -dec[s,13,13]: 0.00 -dec[m,13,13]: -dec[d,13,13]: 1.00000000000000000 -dec[c,13,14]: 2147483647 -dec[a,13,14]: 2469135780246913578024691357.82 -dec[s,13,14]: 0.00 -dec[m,13,14]: -dec[d,13,14]: 1.00000000000000000 -dec[c,14,0]: 2147483647 -dec[a,14,0]: 2469135780246913578024691357.82 -dec[s,14,0]: 0.00 -dec[m,14,0]: -dec[d,14,0]: 1.00000000000000000 -dec[c,14,1]: 2147483647 -dec[a,14,1]: 2469135780246913578024691357.82 -dec[s,14,1]: 0.00 -dec[m,14,1]: -dec[d,14,1]: 1.00000000000000000 -dec[c,14,2]: 2147483647 -dec[a,14,2]: 2469135780246913578024691357.82 -dec[s,14,2]: 0.00 -dec[m,14,2]: -dec[d,14,2]: 1.00000000000000000 -dec[c,14,3]: 2147483647 -dec[a,14,3]: 2469135780246913578024691357.82 -dec[s,14,3]: 0.00 -dec[m,14,3]: -dec[d,14,3]: 1.00000000000000000 -dec[c,14,4]: 2147483647 -dec[a,14,4]: 2469135780246913578024691357.82 -dec[s,14,4]: 0.00 -dec[m,14,4]: -dec[d,14,4]: 1.00000000000000000 -dec[c,14,5]: 2147483647 -dec[a,14,5]: 2469135780246913578024691357.82 -dec[s,14,5]: 0.00 -dec[m,14,5]: -dec[d,14,5]: 1.00000000000000000 -dec[c,14,6]: 2147483647 -dec[a,14,6]: 2469135780246913578024691357.82 -dec[s,14,6]: 0.00 -dec[m,14,6]: -dec[d,14,6]: 1.00000000000000000 -dec[c,14,7]: 2147483647 -dec[a,14,7]: 2469135780246913578024691357.82 -dec[s,14,7]: 0.00 -dec[m,14,7]: -dec[d,14,7]: 1.00000000000000000 -dec[c,14,8]: 2147483647 -dec[a,14,8]: 2469135780246913578024691357.82 -dec[s,14,8]: 0.00 -dec[m,14,8]: -dec[d,14,8]: 1.00000000000000000 -dec[c,14,9]: 2147483647 -dec[a,14,9]: 2469135780246913578024691357.82 -dec[s,14,9]: 0.00 -dec[m,14,9]: -dec[d,14,9]: 1.00000000000000000 -dec[c,14,10]: 2147483647 -dec[a,14,10]: 2469135780246913578024691357.82 -dec[s,14,10]: 0.00 -dec[m,14,10]: -dec[d,14,10]: 1.00000000000000000 -dec[c,14,11]: 2147483647 -dec[a,14,11]: 2469135780246913578024691357.82 -dec[s,14,11]: 0.00 -dec[m,14,11]: -dec[d,14,11]: 1.00000000000000000 -dec[c,14,12]: 2147483647 -dec[a,14,12]: 2469135780246913578024691357.82 -dec[s,14,12]: 0.00 -dec[m,14,12]: -dec[d,14,12]: 1.00000000000000000 -dec[c,14,13]: 2147483647 -dec[a,14,13]: 2469135780246913578024691357.82 -dec[s,14,13]: 0.00 -dec[m,14,13]: -dec[d,14,13]: 1.00000000000000000 -dec[c,14,14]: 2147483647 -dec[a,14,14]: 2469135780246913578024691357.82 -dec[s,14,14]: 0.00 -dec[m,14,14]: -dec[d,14,14]: 1.00000000000000000 -0: * -1: -2 -2: 0.794 -3: 3.44 -4: 592490000000000000000000 -5: -328400 -6: * -7: 0.001 -8: 0.0 -9: -0.000059249 -10: 0.003284 -11: 0.500001 -12: -0.5000001 -13: 1234567890123456789012345678.91 -14: diff --git a/src/interfaces/ecpg/test/expected/compat_informix-dec_test.c b/src/interfaces/ecpg/test/expected/compat_informix-dec_test.c index 3b443e3ffd..8586650e87 100644 --- a/src/interfaces/ecpg/test/expected/compat_informix-dec_test.c +++ b/src/interfaces/ecpg/test/expected/compat_informix-dec_test.c @@ -28,12 +28,38 @@ +#line 1 "printf_hack.h" /* + * print_double(x) has the same effect as printf("%g", x), but is intended + * to produce the same formatting across all platforms. + */ +static void +print_double(double x) +{ +#ifdef WIN32 + /* Change Windows' 3-digit exponents to look like everyone else's */ + char convert[128]; + int vallen; -NOTE: This file has a different expect file for regression tests on MinGW32 + sprintf(convert, "%g", x); + vallen = strlen(convert); -*/ + if (vallen >= 6 && + convert[vallen - 5] == 'e' && + convert[vallen - 3] == '0') + { + convert[vallen - 3] = convert[vallen - 2]; + convert[vallen - 2] = convert[vallen - 1]; + convert[vallen - 1] = '\0'; + } + + printf("%s", convert); +#else + printf("%g", x); +#endif +} +#line 10 "dec_test.pgc" @@ -135,7 +161,9 @@ main(void) /* this is a libc problem since we only call strtod() */ r = dectodbl(dec, &dbl); if (r) check_errno(); - printf("dec[%d,10]: %g (r: %d)\n", i, r?0.0:dbl, r); + printf("dec[%d,10]: ", i); + print_double(r ? 0.0 : dbl); + printf(" (r: %d)\n", r); } PGTYPESdecimal_free(din); @@ -226,7 +254,7 @@ main(void) } free(decarr); - return (0); + return 0; } static void diff --git a/src/interfaces/ecpg/test/expected/compat_informix-describe.c b/src/interfaces/ecpg/test/expected/compat_informix-describe.c index 1b5aae0df7..031a2d776c 100644 --- a/src/interfaces/ecpg/test/expected/compat_informix-describe.c +++ b/src/interfaces/ecpg/test/expected/compat_informix-describe.c @@ -463,5 +463,5 @@ if (sqlca.sqlcode < 0) exit (1);} #line 196 "describe.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/compat_informix-rfmtdate.c b/src/interfaces/ecpg/test/expected/compat_informix-rfmtdate.c index ac133c52ef..68be08276d 100644 --- a/src/interfaces/ecpg/test/expected/compat_informix-rfmtdate.c +++ b/src/interfaces/ecpg/test/expected/compat_informix-rfmtdate.c @@ -24,7 +24,7 @@ static void check_return(int ret); static void -date_test_strdate(char *input) +date_test_strdate(const char *input) { static int i; date d; @@ -49,7 +49,7 @@ date_test_strdate(char *input) } static void -date_test_defmt(char *fmt, char *input) +date_test_defmt(const char *fmt, const char *input) { static int i; char dbuf[11]; @@ -74,7 +74,7 @@ date_test_defmt(char *fmt, char *input) } static void -date_test_fmt(date d, char *fmt) +date_test_fmt(date d, const char *fmt) { static int i; char buf[200]; @@ -158,7 +158,7 @@ main(void) /* ECPG_INFORMIX_BAD_YEAR */ /* ??? */ - return (0); + return 0; } static void diff --git a/src/interfaces/ecpg/test/expected/compat_informix-rfmtlong.c b/src/interfaces/ecpg/test/expected/compat_informix-rfmtlong.c index 5f44b35ee7..b2e397e38c 100644 --- a/src/interfaces/ecpg/test/expected/compat_informix-rfmtlong.c +++ b/src/interfaces/ecpg/test/expected/compat_informix-rfmtlong.c @@ -24,7 +24,7 @@ static void check_return(int ret); static void -fmtlong(long lng, char *fmt) +fmtlong(long lng, const char *fmt) { static int i; int r; @@ -56,7 +56,7 @@ main(void) fmtlong(-8494493, "abc: ################+-+"); fmtlong(-8494493, "+<<<<,<<<,<<<,<<<"); - return (0); + return 0; } static void diff --git a/src/interfaces/ecpg/test/expected/compat_informix-sqlda.c b/src/interfaces/ecpg/test/expected/compat_informix-sqlda.c index 1df87f83ef..ad3188d1e6 100644 --- a/src/interfaces/ecpg/test/expected/compat_informix-sqlda.c +++ b/src/interfaces/ecpg/test/expected/compat_informix-sqlda.c @@ -142,7 +142,7 @@ dump_sqlda(sqlda_t *sqlda) printf("name sqlda descriptor: '%s' value %d\n", sqlda->sqlvar[i].sqlname, *(int *)sqlda->sqlvar[i].sqldata); break; case SQLFLOAT: - printf("name sqlda descriptor: '%s' value %lf\n", sqlda->sqlvar[i].sqlname, *(double *)sqlda->sqlvar[i].sqldata); + printf("name sqlda descriptor: '%s' value %f\n", sqlda->sqlvar[i].sqlname, *(double *)sqlda->sqlvar[i].sqldata); break; case SQLDECIMAL: { @@ -526,5 +526,5 @@ if (sqlca.sqlcode < 0) exit (1);} #line 247 "sqlda.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c b/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c index 4e372a5799..eeb9b62ab4 100644 --- a/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c +++ b/src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c @@ -97,7 +97,7 @@ struct sqlca_t *ECPGget_sqlca(void); /* Check SQLCODE, and produce a "standard error" if it's wrong! */ -static void sql_check(char *fn, char *caller, int ignore) +static void sql_check(const char *fn, const char *caller, int ignore) { char errorstring[255]; diff --git a/src/interfaces/ecpg/test/expected/compat_oracle-char_array.c b/src/interfaces/ecpg/test/expected/compat_oracle-char_array.c new file mode 100644 index 0000000000..c9d2badb6a --- /dev/null +++ b/src/interfaces/ecpg/test/expected/compat_oracle-char_array.c @@ -0,0 +1,223 @@ +/* Processed by ecpg (regression mode) */ +/* These include files are added by the preprocessor */ +#include +#include +#include +/* End of automatic include section */ +#define ECPGdebug(X,Y) ECPGdebug((X)+100,(Y)) + +#line 1 "char_array.pgc" +#include +#include +#include + + +#line 1 "regression.h" + + + + + + +#line 5 "char_array.pgc" + + +static void warn(void) +{ + fprintf(stderr, "Warning: At least one column was truncated\n"); +} + +/* Compatible handling of char array to retrieve varchar field to char array + should be fixed-length, blank-padded, then null-terminated. + Conforms to the ANSI Fixed Character type. */ + +int main() { + + /* exec sql whenever sql_warning do warn ( ) ; */ +#line 18 "char_array.pgc" + + /* exec sql whenever sqlerror sqlprint ; */ +#line 19 "char_array.pgc" + + + const char *ppppp = "XXXXX"; + + /* exec sql begin declare section */ + + + + + +#line 24 "char_array.pgc" + char shortstr [ 5 ] ; + +#line 25 "char_array.pgc" + char bigstr [ 11 ] ; + +#line 26 "char_array.pgc" + short shstr_ind = 0 ; + +#line 27 "char_array.pgc" + short bigstr_ind = 0 ; +/* exec sql end declare section */ +#line 28 "char_array.pgc" + + + ECPGdebug(1, stderr); + { ECPGconnect(__LINE__, 3, "ecpg1_regression" , NULL, NULL , NULL, 0); +#line 31 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 31 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 31 "char_array.pgc" + + + { ECPGdo(__LINE__, 3, 1, NULL, 0, ECPGst_normal, "create table strdbase ( strval varchar ( 10 ) )", ECPGt_EOIT, ECPGt_EORT); +#line 33 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 33 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 33 "char_array.pgc" + + { ECPGdo(__LINE__, 3, 1, NULL, 0, ECPGst_normal, "insert into strdbase values ( '' )", ECPGt_EOIT, ECPGt_EORT); +#line 34 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 34 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 34 "char_array.pgc" + + { ECPGdo(__LINE__, 3, 1, NULL, 0, ECPGst_normal, "insert into strdbase values ( 'AB' )", ECPGt_EOIT, ECPGt_EORT); +#line 35 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 35 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 35 "char_array.pgc" + + { ECPGdo(__LINE__, 3, 1, NULL, 0, ECPGst_normal, "insert into strdbase values ( 'ABCD' )", ECPGt_EOIT, ECPGt_EORT); +#line 36 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 36 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 36 "char_array.pgc" + + { ECPGdo(__LINE__, 3, 1, NULL, 0, ECPGst_normal, "insert into strdbase values ( 'ABCDE' )", ECPGt_EOIT, ECPGt_EORT); +#line 37 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 37 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 37 "char_array.pgc" + + { ECPGdo(__LINE__, 3, 1, NULL, 0, ECPGst_normal, "insert into strdbase values ( 'ABCDEF' )", ECPGt_EOIT, ECPGt_EORT); +#line 38 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 38 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 38 "char_array.pgc" + + { ECPGdo(__LINE__, 3, 1, NULL, 0, ECPGst_normal, "insert into strdbase values ( 'ABCDEFGHIJ' )", ECPGt_EOIT, ECPGt_EORT); +#line 39 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 39 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 39 "char_array.pgc" + + + /* declare C cursor for select strval , strval from strdbase */ +#line 41 "char_array.pgc" + + { ECPGdo(__LINE__, 3, 1, NULL, 0, ECPGst_normal, "declare C cursor for select strval , strval from strdbase", ECPGt_EOIT, ECPGt_EORT); +#line 42 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 42 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 42 "char_array.pgc" + + + /* exec sql whenever not found break ; */ +#line 44 "char_array.pgc" + + + printf("Full Str. : Short Ind.\n"); + while(1) { + strncpy(shortstr, ppppp, sizeof shortstr); + memset(bigstr, 0, sizeof bigstr); + { ECPGdo(__LINE__, 3, 1, NULL, 0, ECPGst_normal, "fetch C", ECPGt_EOIT, + ECPGt_char,(bigstr),(long)11,(long)1,(11)*sizeof(char), + ECPGt_short,&(bigstr_ind),(long)1,(long)1,sizeof(short), + ECPGt_char,(shortstr),(long)5,(long)1,(5)*sizeof(char), + ECPGt_short,&(shstr_ind),(long)1,(long)1,sizeof(short), ECPGt_EORT); +#line 50 "char_array.pgc" + +if (sqlca.sqlcode == ECPG_NOT_FOUND) break; +#line 50 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 50 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 50 "char_array.pgc" + + printf("\"%s\": \"%s\" %d\n", bigstr, shortstr, shstr_ind); + } + + { ECPGdo(__LINE__, 3, 1, NULL, 0, ECPGst_normal, "close C", ECPGt_EOIT, ECPGt_EORT); +#line 54 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 54 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 54 "char_array.pgc" + + { ECPGdo(__LINE__, 3, 1, NULL, 0, ECPGst_normal, "drop table strdbase", ECPGt_EOIT, ECPGt_EORT); +#line 55 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 55 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 55 "char_array.pgc" + + + printf("\nGOOD-BYE!!\n\n"); + + { ECPGtrans(__LINE__, NULL, "commit work"); +#line 59 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 59 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 59 "char_array.pgc" + + + { ECPGdisconnect(__LINE__, "ALL"); +#line 61 "char_array.pgc" + +if (sqlca.sqlwarn[0] == 'W') warn ( ); +#line 61 "char_array.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 61 "char_array.pgc" + + + return 0; +} diff --git a/src/interfaces/ecpg/test/expected/compat_oracle-char_array.stderr b/src/interfaces/ecpg/test/expected/compat_oracle-char_array.stderr new file mode 100644 index 0000000000..40d9f7ddb0 --- /dev/null +++ b/src/interfaces/ecpg/test/expected/compat_oracle-char_array.stderr @@ -0,0 +1,139 @@ +[NO_PID]: ECPGdebug: set to 1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ECPGconnect: opening database ecpg1_regression on port +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 33: query: create table strdbase ( strval varchar ( 10 ) ); with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 33: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 33: OK: CREATE TABLE +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 34: query: insert into strdbase values ( '' ); with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 34: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 34: OK: INSERT 0 1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 35: query: insert into strdbase values ( 'AB' ); with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 35: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 35: OK: INSERT 0 1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 36: query: insert into strdbase values ( 'ABCD' ); with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 36: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 36: OK: INSERT 0 1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 37: query: insert into strdbase values ( 'ABCDE' ); with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 37: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 37: OK: INSERT 0 1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 38: query: insert into strdbase values ( 'ABCDEF' ); with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 38: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 38: OK: INSERT 0 1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 39: query: insert into strdbase values ( 'ABCDEFGHIJ' ); with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 39: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 39: OK: INSERT 0 1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 42: query: declare C cursor for select strval , strval from strdbase; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 42: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 42: OK: DECLARE CURSOR +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 50: query: fetch C; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 50: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 50: correctly got 1 tuples with 2 fields +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 50: RESULT: offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 50: RESULT: offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 50: query: fetch C; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 50: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 50: correctly got 1 tuples with 2 fields +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 50: RESULT: AB offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 50: RESULT: AB offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 50: query: fetch C; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 50: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 50: correctly got 1 tuples with 2 fields +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 50: RESULT: ABCD offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 50: RESULT: ABCD offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 50: query: fetch C; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 50: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 50: correctly got 1 tuples with 2 fields +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 50: RESULT: ABCDE offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 50: RESULT: ABCDE offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +Warning: At least one column was truncated +[NO_PID]: ecpg_execute on line 50: query: fetch C; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 50: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 50: correctly got 1 tuples with 2 fields +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 50: RESULT: ABCDEF offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 50: RESULT: ABCDEF offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +Warning: At least one column was truncated +[NO_PID]: ecpg_execute on line 50: query: fetch C; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 50: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 50: correctly got 1 tuples with 2 fields +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 50: RESULT: ABCDEFGHIJ offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 50: RESULT: ABCDEFGHIJ offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +Warning: At least one column was truncated +[NO_PID]: ecpg_execute on line 50: query: fetch C; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 50: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 50: correctly got 0 tuples with 2 fields +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: raising sqlcode 100 on line 50: no data found on line 50 +[NO_PID]: sqlca: code: 100, state: 02000 +[NO_PID]: ecpg_execute on line 54: query: close C; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 54: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 54: OK: CLOSE CURSOR +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 55: query: drop table strdbase; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 55: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 55: OK: DROP TABLE +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ECPGtrans on line 59: action "commit work"; connection "ecpg1_regression" +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_finish: connection ecpg1_regression closed +[NO_PID]: sqlca: code: 0, state: 00000 diff --git a/src/interfaces/ecpg/test/expected/compat_oracle-char_array.stdout b/src/interfaces/ecpg/test/expected/compat_oracle-char_array.stdout new file mode 100644 index 0000000000..d58b3c7be4 --- /dev/null +++ b/src/interfaces/ecpg/test/expected/compat_oracle-char_array.stdout @@ -0,0 +1,10 @@ +Full Str. : Short Ind. +" ": " " -1 +"AB ": "AB " 0 +"ABCD ": "ABCD" 0 +"ABCDE ": "ABCD" 5 +"ABCDEF ": "ABCD" 6 +"ABCDEFGHIJ": "ABCD" 10 + +GOOD-BYE!! + diff --git a/src/interfaces/ecpg/test/expected/connect-test1.c b/src/interfaces/ecpg/test/expected/connect-test1.c index 6471abb623..98b7e717c7 100644 --- a/src/interfaces/ecpg/test/expected/connect-test1.c +++ b/src/interfaces/ecpg/test/expected/connect-test1.c @@ -109,7 +109,7 @@ main(void) /* wrong port */ - { ECPGconnect(__LINE__, 0, "tcp:postgresql://localhost:20/ecpg2_regression" , "regress_ecpg_user1" , "connectpw" , NULL, 0); } + { ECPGconnect(__LINE__, 0, "tcp:postgresql://127.0.0.1:20/ecpg2_regression" , "regress_ecpg_user1" , "connectpw" , NULL, 0); } #line 57 "test1.pgc" /* no disconnect necessary */ @@ -120,5 +120,5 @@ main(void) /* no disconnect necessary */ - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/connect-test1.stderr b/src/interfaces/ecpg/test/expected/connect-test1.stderr index 0e43a1a398..ad806a0225 100644 --- a/src/interfaces/ecpg/test/expected/connect-test1.stderr +++ b/src/interfaces/ecpg/test/expected/connect-test1.stderr @@ -63,13 +63,10 @@ [NO_PID]: sqlca: code: -402, state: 08001 [NO_PID]: raising sqlcode -220 on line 54: connection "CURRENT" does not exist on line 54 [NO_PID]: sqlca: code: -220, state: 08003 -[NO_PID]: ECPGconnect: opening database ecpg2_regression on localhost port for user regress_ecpg_user1 +[NO_PID]: ECPGconnect: opening database ecpg2_regression on 127.0.0.1 port for user regress_ecpg_user1 [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ECPGconnect: could not open database: could not connect to server: Connection refused - Is the server running on host "localhost" (::1) and accepting - TCP/IP connections on port 20? -could not connect to server: Connection refused - Is the server running on host "localhost" (127.0.0.1) and accepting + Is the server running on host "127.0.0.1" and accepting TCP/IP connections on port 20? [NO_PID]: sqlca: code: 0, state: 00000 diff --git a/src/interfaces/ecpg/test/expected/connect-test2.c b/src/interfaces/ecpg/test/expected/connect-test2.c index cf87c63386..deb7f19170 100644 --- a/src/interfaces/ecpg/test/expected/connect-test2.c +++ b/src/interfaces/ecpg/test/expected/connect-test2.c @@ -100,5 +100,5 @@ main(void) #line 43 "test2.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/connect-test3.c b/src/interfaces/ecpg/test/expected/connect-test3.c index 5bab6ba8f0..1a74a06973 100644 --- a/src/interfaces/ecpg/test/expected/connect-test3.c +++ b/src/interfaces/ecpg/test/expected/connect-test3.c @@ -102,5 +102,5 @@ main(void) * are used in other tests */ - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/connect-test4.c b/src/interfaces/ecpg/test/expected/connect-test4.c index e1ae3e9a66..ff13e4ec41 100644 --- a/src/interfaces/ecpg/test/expected/connect-test4.c +++ b/src/interfaces/ecpg/test/expected/connect-test4.c @@ -40,5 +40,5 @@ main(void) #line 17 "test4.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/connect-test5.c b/src/interfaces/ecpg/test/expected/connect-test5.c index e991ee79b6..4b6569e763 100644 --- a/src/interfaces/ecpg/test/expected/connect-test5.c +++ b/src/interfaces/ecpg/test/expected/connect-test5.c @@ -158,5 +158,5 @@ main(void) #line 73 "test5.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/pgtypeslib-dt_test.c b/src/interfaces/ecpg/test/expected/pgtypeslib-dt_test.c index 00d43915b2..1509a56fd8 100644 --- a/src/interfaces/ecpg/test/expected/pgtypeslib-dt_test.c +++ b/src/interfaces/ecpg/test/expected/pgtypeslib-dt_test.c @@ -113,18 +113,18 @@ if (sqlca.sqlcode < 0) sqlprint ( );} text = PGTYPESdate_to_asc(date1); printf ("Date: %s\n", text); - free(text); + PGTYPESchar_free(text); text = PGTYPEStimestamp_to_asc(ts1); printf ("timestamp: %s\n", text); - free(text); + PGTYPESchar_free(text); iv1 = PGTYPESinterval_from_asc("13556 days 12 hours 34 minutes 14 seconds ", NULL); PGTYPESinterval_copy(iv1, &iv2); text = PGTYPESinterval_to_asc(&iv2); printf ("interval: %s\n", text); PGTYPESinterval_free(iv1); - free(text); + PGTYPESchar_free(text); PGTYPESdate_mdyjul(mdy, &date2); printf("m: %d, d: %d, y: %d\n", mdy[0], mdy[1], mdy[2]); @@ -144,7 +144,7 @@ if (sqlca.sqlcode < 0) sqlprint ( );} PGTYPESdate_fmt_asc(date1, fmt, out); printf("date_day of %s is %d\n", text, PGTYPESdate_dayofweek(date1)); printf("Above date in format \"%s\" is \"%s\"\n", fmt, out); - free(text); + PGTYPESchar_free(text); free(out); out = (char*) malloc(48); @@ -155,7 +155,7 @@ if (sqlca.sqlcode < 0) sqlprint ( );} /* rdate_defmt_asc() */ - date1 = 0; text = ""; + date1 = 0; fmt = "yy/mm/dd"; in = "In the year 1995, the month of December, it is the 25th day"; /* 0123456789012345678901234567890123456789012345678901234567890 @@ -164,108 +164,108 @@ if (sqlca.sqlcode < 0) sqlprint ( );} PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc1: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "mmmm. dd. yyyy"; in = "12/25/95"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc2: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "yy/mm/dd"; in = "95/12/25"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc3: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "yy/mm/dd"; in = "1995, December 25th"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc4: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "dd-mm-yy"; in = "This is 25th day of December, 1995"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc5: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "mmddyy"; in = "Dec. 25th, 1995"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc6: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "mmm. dd. yyyy"; in = "dec 25th 1995"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc7: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "mmm. dd. yyyy"; in = "DEC-25-1995"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc8: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "mm yy dd."; in = "12199525"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc9: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "yyyy fierj mm dd."; in = "19951225"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc10: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "mm/dd/yy"; in = "122595"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc12: %s\n", text); - free(text); + PGTYPESchar_free(text); PGTYPEStimestamp_current(&ts1); text = PGTYPEStimestamp_to_asc(ts1); /* can't output this in regression mode */ /* printf("timestamp_current: Now: %s\n", text); */ - free(text); + PGTYPESchar_free(text); ts1 = PGTYPEStimestamp_from_asc("96-02-29", NULL); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_to_asc1: %s\n", text); - free(text); + PGTYPESchar_free(text); ts1 = PGTYPEStimestamp_from_asc("1994-02-11 3:10:35", NULL); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_to_asc2: %s\n", text); - free(text); + PGTYPESchar_free(text); ts1 = PGTYPEStimestamp_from_asc("1994-02-11 26:10:35", NULL); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_to_asc3: %s\n", text); - free(text); + PGTYPESchar_free(text); /* abc-03:10:35-def-02/11/94-gh */ /* 12345678901234567890123456789 */ @@ -280,161 +280,161 @@ if (sqlca.sqlcode < 0) sqlprint ( );} i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %d %H:%M:%S %z %Y"; in = "Tue Jul 22 17:28:44 +0200 2003"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %d %H:%M:%S %z %Y"; in = "Tue Feb 29 17:28:44 +0200 2000"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %d %H:%M:%S %z %Y"; in = "Tue Feb 29 17:28:44 +0200 1900"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %d %H:%M:%S %z %Y"; in = "Tue Feb 29 17:28:44 +0200 1996"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%b %d %H:%M:%S %z %Y"; in = " Jul 31 17:28:44 +0200 1996"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%b %d %H:%M:%S %z %Y"; in = " Jul 32 17:28:44 +0200 1996"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %d %H:%M:%S %z %Y"; in = "Tue Feb 29 17:28:44 +0200 1997"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%"; in = "Tue Jul 22 17:28:44 +0200 2003"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "a %"; in = "Tue Jul 22 17:28:44 +0200 2003"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%b, %d %H_%M`%S %z %Y"; in = " Jul, 22 17_28 `44 +0200 2003 "; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %%%d %H:%M:%S %Z %Y"; in = "Tue Jul %22 17:28:44 CEST 2003"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %%%d %H:%M:%S %Z %Y"; in = "Tue Jul %22 17:28:44 CEST 2003"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "abc%n %C %B %%%d %H:%M:%S %Z %Y"; in = "abc\n 19 October %22 17:28:44 CEST 2003"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "abc%n %C %B %%%d %H:%M:%S %Z %y"; in = "abc\n 18 October %34 17:28:44 CEST 80"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = ""; in = "abc\n 18 October %34 17:28:44 CEST 80"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = NULL; in = "1980-04-12 3:49:44 "; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, NULL) = %s, error: %d\n", in, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%B %d, %Y. Time: %I:%M%p"; in = "July 14, 1988. Time: 9:15am"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); in = "September 6 at 01:30 pm in the year 1983"; fmt = "%B %d at %I:%M %p in the year %Y"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); in = " 1976, July 14. Time: 9:15am"; fmt = "%Y, %B %d. Time: %I:%M %p"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); in = " 1976, July 14. Time: 9:15 am"; fmt = "%Y, %B %d. Time: %I:%M%p"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); in = " 1976, P.M. July 14. Time: 9:15"; fmt = "%Y, %P %B %d. Time: %I:%M"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); in = "1234567890"; fmt = "%s"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); { ECPGtrans(__LINE__, NULL, "rollback"); #line 365 "dt_test.pgc" @@ -449,5 +449,5 @@ if (sqlca.sqlcode < 0) sqlprint ( );} #line 366 "dt_test.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/pgtypeslib-dt_test2.c b/src/interfaces/ecpg/test/expected/pgtypeslib-dt_test2.c index b6e77562b2..b3e1e7519d 100644 --- a/src/interfaces/ecpg/test/expected/pgtypeslib-dt_test2.c +++ b/src/interfaces/ecpg/test/expected/pgtypeslib-dt_test2.c @@ -110,14 +110,14 @@ main(void) text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp: %s\n", text); - free(text); + PGTYPESchar_free(text); date1 = PGTYPESdate_from_timestamp(ts1); dc = PGTYPESdate_new(); *dc = date1; text = PGTYPESdate_to_asc(*dc); printf("Date of timestamp: %s\n", text); - free(text); + PGTYPESchar_free(text); PGTYPESdate_free(dc); for (i = 0; dates[i]; i++) @@ -132,7 +132,7 @@ main(void) i, err ? "-" : text, endptr ? 'N' : 'Y', err ? 'T' : 'F'); - free(text); + PGTYPESchar_free(text); if (!err) { for (j = 0; times[j]; j++) @@ -147,7 +147,7 @@ main(void) text = PGTYPEStimestamp_to_asc(ts1); printf("TS[%d,%d]: %s\n", i, j, errno ? "-" : text); - free(text); + PGTYPESchar_free(text); free(t); } } @@ -171,16 +171,16 @@ main(void) continue; text = PGTYPESinterval_to_asc(i1); printf("interval[%d]: %s\n", i, text ? text : "-"); - free(text); + PGTYPESchar_free(text); ic = PGTYPESinterval_new(); PGTYPESinterval_copy(i1, ic); text = PGTYPESinterval_to_asc(i1); printf("interval_copy[%d]: %s\n", i, text ? text : "-"); - free(text); + PGTYPESchar_free(text); PGTYPESinterval_free(ic); PGTYPESinterval_free(i1); } - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/pgtypeslib-nan_test.c b/src/interfaces/ecpg/test/expected/pgtypeslib-nan_test.c index ecd2343117..c831284059 100644 --- a/src/interfaces/ecpg/test/expected/pgtypeslib-nan_test.c +++ b/src/interfaces/ecpg/test/expected/pgtypeslib-nan_test.c @@ -269,5 +269,5 @@ if (sqlca.sqlcode < 0) sqlprint ( );} #line 91 "nan_test.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/pgtypeslib-num_test-MinGW32.stdout b/src/interfaces/ecpg/test/expected/pgtypeslib-num_test-MinGW32.stdout deleted file mode 100644 index 185a00ea59..0000000000 --- a/src/interfaces/ecpg/test/expected/pgtypeslib-num_test-MinGW32.stdout +++ /dev/null @@ -1,6 +0,0 @@ -from int = 1407.0 -add = 2379.7 -sub = 2369.7 -mul = 13306998429.873000000 -div = 1330699.84298730000 1.330700e+006 -to long(0) = 20000000 14 diff --git a/src/interfaces/ecpg/test/expected/pgtypeslib-num_test.c b/src/interfaces/ecpg/test/expected/pgtypeslib-num_test.c index 8019a8f63e..bf312549b4 100644 --- a/src/interfaces/ecpg/test/expected/pgtypeslib-num_test.c +++ b/src/interfaces/ecpg/test/expected/pgtypeslib-num_test.c @@ -24,11 +24,39 @@ +#line 1 "printf_hack.h" /* + * print_double(x) has the same effect as printf("%g", x), but is intended + * to produce the same formatting across all platforms. + */ +static void +print_double(double x) +{ +#ifdef WIN32 + /* Change Windows' 3-digit exponents to look like everyone else's */ + char convert[128]; + int vallen; + + sprintf(convert, "%g", x); + vallen = strlen(convert); + + if (vallen >= 6 && + convert[vallen - 5] == 'e' && + convert[vallen - 3] == '0') + { + convert[vallen - 3] = convert[vallen - 2]; + convert[vallen - 2] = convert[vallen - 1]; + convert[vallen - 1] = '\0'; + } + + printf("%s", convert); +#else + printf("%g", x); +#endif +} -NOTE: This file has a different expect file for regression tests on MinGW32 +#line 8 "num_test.pgc" -*/ int @@ -40,10 +68,10 @@ main(void) /* = {0, 0, 0, 0, 0, NULL, NULL} ; */ -#line 22 "num_test.pgc" +#line 17 "num_test.pgc" numeric * des ; /* exec sql end declare section */ -#line 24 "num_test.pgc" +#line 19 "num_test.pgc" double d; long l1, l2; @@ -51,34 +79,34 @@ main(void) ECPGdebug(1, stderr); /* exec sql whenever sqlerror do sqlprint ( ) ; */ -#line 30 "num_test.pgc" +#line 25 "num_test.pgc" { ECPGconnect(__LINE__, 0, "ecpg1_regression" , NULL, NULL , NULL, 0); -#line 32 "num_test.pgc" +#line 27 "num_test.pgc" if (sqlca.sqlcode < 0) sqlprint ( );} -#line 32 "num_test.pgc" +#line 27 "num_test.pgc" { ECPGsetcommit(__LINE__, "off", NULL); -#line 34 "num_test.pgc" +#line 29 "num_test.pgc" if (sqlca.sqlcode < 0) sqlprint ( );} -#line 34 "num_test.pgc" +#line 29 "num_test.pgc" { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "create table test ( text char ( 5 ) , num numeric ( 14 , 7 ) )", ECPGt_EOIT, ECPGt_EORT); -#line 35 "num_test.pgc" +#line 30 "num_test.pgc" if (sqlca.sqlcode < 0) sqlprint ( );} -#line 35 "num_test.pgc" +#line 30 "num_test.pgc" value1 = PGTYPESnumeric_new(); PGTYPESnumeric_from_int(1407, value1); text = PGTYPESnumeric_to_asc(value1, -1); printf("from int = %s\n", text); - free(text); + PGTYPESchar_free(text); PGTYPESnumeric_free(value1); value1 = PGTYPESnumeric_from_asc("2369.7", NULL); @@ -87,12 +115,12 @@ if (sqlca.sqlcode < 0) sqlprint ( );} PGTYPESnumeric_add(value1, value2, res); text = PGTYPESnumeric_to_asc(res, -1); printf("add = %s\n", text); - free(text); + PGTYPESchar_free(text); PGTYPESnumeric_sub(res, value2, res); text = PGTYPESnumeric_to_asc(res, -1); printf("sub = %s\n", text); - free(text); + PGTYPESchar_free(text); PGTYPESnumeric_free(value2); des = PGTYPESnumeric_new(); @@ -100,10 +128,10 @@ if (sqlca.sqlcode < 0) sqlprint ( );} { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "insert into test ( text , num ) values ( 'test' , $1 )", ECPGt_numeric,&(des),(long)1,(long)0,sizeof(numeric), ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT); -#line 60 "num_test.pgc" +#line 55 "num_test.pgc" if (sqlca.sqlcode < 0) sqlprint ( );} -#line 60 "num_test.pgc" +#line 55 "num_test.pgc" value2 = PGTYPESnumeric_from_asc("2369.7", NULL); @@ -113,23 +141,25 @@ if (sqlca.sqlcode < 0) sqlprint ( );} { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "select num from test where text = 'test'", ECPGt_EOIT, ECPGt_numeric,&(des),(long)1,(long)0,sizeof(numeric), ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT); -#line 66 "num_test.pgc" +#line 61 "num_test.pgc" if (sqlca.sqlcode < 0) sqlprint ( );} -#line 66 "num_test.pgc" +#line 61 "num_test.pgc" PGTYPESnumeric_mul(res, des, res); text = PGTYPESnumeric_to_asc(res, -1); printf("mul = %s\n", text); - free(text); + PGTYPESchar_free(text); PGTYPESnumeric_free(des); value2 = PGTYPESnumeric_from_asc("10000", NULL); PGTYPESnumeric_div(res, value2, res); text = PGTYPESnumeric_to_asc(res, -1); PGTYPESnumeric_to_double(res, &d); - printf("div = %s %e\n", text, d); + printf("div = %s ", text); + print_double(d); + printf("\n"); PGTYPESnumeric_free(value1); PGTYPESnumeric_free(value2); @@ -139,23 +169,23 @@ if (sqlca.sqlcode < 0) sqlprint ( );} i = PGTYPESnumeric_to_long(value1, &l1) | PGTYPESnumeric_to_long(value2, &l2); printf("to long(%d) = %ld %ld\n", i, l1, l2); - free(text); + PGTYPESchar_free(text); PGTYPESnumeric_free(value1); PGTYPESnumeric_free(value2); PGTYPESnumeric_free(res); { ECPGtrans(__LINE__, NULL, "rollback"); -#line 93 "num_test.pgc" +#line 90 "num_test.pgc" if (sqlca.sqlcode < 0) sqlprint ( );} -#line 93 "num_test.pgc" +#line 90 "num_test.pgc" { ECPGdisconnect(__LINE__, "CURRENT"); -#line 94 "num_test.pgc" +#line 91 "num_test.pgc" if (sqlca.sqlcode < 0) sqlprint ( );} -#line 94 "num_test.pgc" +#line 91 "num_test.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/pgtypeslib-num_test.stderr b/src/interfaces/ecpg/test/expected/pgtypeslib-num_test.stderr index d834c22aab..a7d125402a 100644 --- a/src/interfaces/ecpg/test/expected/pgtypeslib-num_test.stderr +++ b/src/interfaces/ecpg/test/expected/pgtypeslib-num_test.stderr @@ -2,31 +2,31 @@ [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ECPGconnect: opening database ecpg1_regression on port [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ECPGsetcommit on line 34: action "off"; connection "ecpg1_regression" +[NO_PID]: ECPGsetcommit on line 29: action "off"; connection "ecpg1_regression" [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 35: query: create table test ( text char ( 5 ) , num numeric ( 14 , 7 ) ); with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: ecpg_execute on line 30: query: create table test ( text char ( 5 ) , num numeric ( 14 , 7 ) ); with 0 parameter(s) on connection ecpg1_regression [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 35: using PQexec +[NO_PID]: ecpg_execute on line 30: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 35: OK: CREATE TABLE +[NO_PID]: ecpg_process_output on line 30: OK: CREATE TABLE [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 60: query: insert into test ( text , num ) values ( 'test' , $1 ); with 1 parameter(s) on connection ecpg1_regression +[NO_PID]: ecpg_execute on line 55: query: insert into test ( text , num ) values ( 'test' , $1 ); with 1 parameter(s) on connection ecpg1_regression [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 60: using PQexecParams +[NO_PID]: ecpg_execute on line 55: using PQexecParams [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_free_params on line 60: parameter 1 = 2369.7 +[NO_PID]: ecpg_free_params on line 55: parameter 1 = 2369.7 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 60: OK: INSERT 0 1 +[NO_PID]: ecpg_process_output on line 55: OK: INSERT 0 1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 66: query: select num from test where text = 'test'; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: ecpg_execute on line 61: query: select num from test where text = 'test'; with 0 parameter(s) on connection ecpg1_regression [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 66: using PQexec +[NO_PID]: ecpg_execute on line 61: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 66: correctly got 1 tuples with 1 fields +[NO_PID]: ecpg_process_output on line 61: correctly got 1 tuples with 1 fields [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 66: RESULT: 2369.7000000 offset: -1; array: no +[NO_PID]: ecpg_get_data on line 61: RESULT: 2369.7000000 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ECPGtrans on line 93: action "rollback"; connection "ecpg1_regression" +[NO_PID]: ECPGtrans on line 90: action "rollback"; connection "ecpg1_regression" [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_finish: connection ecpg1_regression closed [NO_PID]: sqlca: code: 0, state: 00000 diff --git a/src/interfaces/ecpg/test/expected/pgtypeslib-num_test.stdout b/src/interfaces/ecpg/test/expected/pgtypeslib-num_test.stdout index 52515ebde2..204c3cf6c0 100644 --- a/src/interfaces/ecpg/test/expected/pgtypeslib-num_test.stdout +++ b/src/interfaces/ecpg/test/expected/pgtypeslib-num_test.stdout @@ -2,5 +2,5 @@ from int = 1407.0 add = 2379.7 sub = 2369.7 mul = 13306998429.873000000 -div = 1330699.84298730000 1.330700e+06 +div = 1330699.84298730000 1.3307e+06 to long(0) = 20000000 14 diff --git a/src/interfaces/ecpg/test/expected/pgtypeslib-num_test2-MinGW32.stdout b/src/interfaces/ecpg/test/expected/pgtypeslib-num_test2-MinGW32.stdout deleted file mode 100644 index 2dad8a91a1..0000000000 --- a/src/interfaces/ecpg/test/expected/pgtypeslib-num_test2-MinGW32.stdout +++ /dev/null @@ -1,1117 +0,0 @@ -endptr of 0 is not NULL -num[0,1]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -num[0,2]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -num[0,3]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0 -num[0,4]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.00 -num[0,5]: 0.00 -(errno == PGTYPES_NUM_OVERFLOW) - num[0,6]: 0 (r: -1) -(errno == PGTYPES_NUM_OVERFLOW) - num[0,8]: 0 (r: -1) -(errno == PGTYPES_NUM_OVERFLOW) - num[0,10]: 0 (r: -1) -num[0,11]: - (r: 0) -num[0,12]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.00 (r: 0 - cmp: 0) - -endptr of 1 is not NULL -num[1,1]: -2 -num[1,2]: -2 -num[1,3]: -2.0 -num[1,4]: -2.00 -num[1,5]: 0.00 -num[1,6]: -2 (r: 0) -num[1,7]: -2.00 (r: 0 - cmp: 0) -num[1,8]: -2 (r: 0) -num[1,9]: -2.00 (r: 0 - cmp: 0) -num[1,10]: -2 (r: 0) -num[1,11]: - (r: 0) -num[1,12]: -2.00 (r: 0 - cmp: 0) - -endptr of 2 is not NULL -num[2,1]: 0.794 -num[2,2]: 1 -num[2,3]: 0.8 -num[2,4]: 0.79 -num[2,5]: 0.00 -num[2,6]: 1 (r: 0) -num[2,7]: 1.00 (r: 0 - cmp: -1) -num[2,8]: 1 (r: 0) -num[2,9]: 1.00 (r: 0 - cmp: -1) -num[2,10]: 0.794 (r: 0) -num[2,11]: - (r: 0) -num[2,12]: 0.79 (r: 0 - cmp: 0) - -endptr of 3 is not NULL -num[3,1]: 3.44 -num[3,2]: 3 -num[3,3]: 3.4 -num[3,4]: 3.44 -num[3,5]: 0.00 -num[3,6]: 3 (r: 0) -num[3,7]: 3.00 (r: 0 - cmp: 1) -num[3,8]: 3 (r: 0) -num[3,9]: 3.00 (r: 0 - cmp: 1) -num[3,10]: 3.44 (r: 0) -num[3,11]: - (r: 0) -num[3,12]: 3.44 (r: 0 - cmp: 0) - -endptr of 4 is not NULL -num[4,1]: 592490000000000000000000 -num[4,2]: 592490000000000000000000 -num[4,3]: 592490000000000000000000.0 -num[4,4]: 592490000000000000000000.00 -num[4,5]: 0.00 -(errno == PGTYPES_NUM_OVERFLOW) - num[4,6]: 0 (r: -1) -(errno == PGTYPES_NUM_OVERFLOW) - num[4,8]: 0 (r: -1) -num[4,10]: 5.9249e+023 (r: 0) -num[4,11]: - (r: 0) -num[4,12]: 592490000000000000000000.00 (r: 0 - cmp: 0) - -endptr of 5 is not NULL -num[5,1]: -328400 -num[5,2]: -328400 -num[5,3]: -328400.0 -num[5,4]: -328400.00 -num[5,5]: 0.00 -num[5,6]: -328400 (r: 0) -num[5,7]: -328400.00 (r: 0 - cmp: 0) -num[5,8]: -328400 (r: 0) -num[5,9]: -328400.00 (r: 0 - cmp: 0) -num[5,10]: -328400 (r: 0) -num[5,11]: - (r: 0) -num[5,12]: -328400.00 (r: 0 - cmp: 0) - -endptr of 6 is not NULL -num[6,1]: 0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002 -num[6,2]: 0 -num[6,3]: 0.0 -num[6,4]: 0.00 -num[6,5]: 0.00 -num[6,6]: 0 (r: 0) -num[6,7]: 0.00 (r: 0 - cmp: 1) -num[6,8]: 0 (r: 0) -num[6,9]: 0.00 (r: 0 - cmp: 1) -num[6,11]: - (r: 0) -num[6,12]: 0.00 (r: 0 - cmp: 0) - -endptr of 7 is not NULL -num[7,1]: 0.001 -num[7,2]: 0 -num[7,3]: 0.0 -num[7,4]: 0.00 -num[7,5]: 0.00 -num[7,6]: 0 (r: 0) -num[7,7]: 0.00 (r: 0 - cmp: 1) -num[7,8]: 0 (r: 0) -num[7,9]: 0.00 (r: 0 - cmp: 1) -num[7,10]: 0.001 (r: 0) -num[7,11]: - (r: 0) -num[7,12]: 0.00 (r: 0 - cmp: 0) - -endptr of 8 is not NULL -num[8,1]: 0.0 -num[8,2]: 0 -num[8,3]: 0.0 -num[8,4]: 0.00 -num[8,5]: 0.00 -num[8,6]: 0 (r: 0) -num[8,7]: 0.00 (r: 0 - cmp: 0) -num[8,8]: 0 (r: 0) -num[8,9]: 0.00 (r: 0 - cmp: 0) -num[8,10]: 0 (r: 0) -num[8,11]: - (r: 0) -num[8,12]: 0.00 (r: 0 - cmp: 0) - -endptr of 9 is not NULL -num[9,1]: -0.000059249 -num[9,2]: -0 -num[9,3]: -0.0 -num[9,4]: -0.00 -num[9,5]: 0.00 -num[9,6]: 0 (r: 0) -num[9,7]: 0.00 (r: 0 - cmp: -1) -num[9,8]: 0 (r: 0) -num[9,9]: 0.00 (r: 0 - cmp: -1) -num[9,10]: -5.9249e-005 (r: 0) -num[9,11]: - (r: 0) -num[9,12]: -0.00 (r: 0 - cmp: 0) - -endptr of 10 is not NULL -num[10,1]: 0.003284 -num[10,2]: 0 -num[10,3]: 0.0 -num[10,4]: 0.00 -num[10,5]: 0.00 -num[10,6]: 0 (r: 0) -num[10,7]: 0.00 (r: 0 - cmp: 1) -num[10,8]: 0 (r: 0) -num[10,9]: 0.00 (r: 0 - cmp: 1) -num[10,10]: 0.003284 (r: 0) -num[10,11]: - (r: 0) -num[10,12]: 0.00 (r: 0 - cmp: 0) - -endptr of 11 is not NULL -num[11,1]: 0.500001 -num[11,2]: 1 -num[11,3]: 0.5 -num[11,4]: 0.50 -num[11,5]: 0.00 -num[11,6]: 1 (r: 0) -num[11,7]: 1.00 (r: 0 - cmp: -1) -num[11,8]: 1 (r: 0) -num[11,9]: 1.00 (r: 0 - cmp: -1) -num[11,10]: 0.500001 (r: 0) -num[11,11]: - (r: 0) -num[11,12]: 0.50 (r: 0 - cmp: 0) - -endptr of 12 is not NULL -num[12,1]: -0.5000001 -num[12,2]: -1 -num[12,3]: -0.5 -num[12,4]: -0.50 -num[12,5]: 0.00 -num[12,6]: -1 (r: 0) -num[12,7]: -1.00 (r: 0 - cmp: 1) -num[12,8]: -1 (r: 0) -num[12,9]: -1.00 (r: 0 - cmp: 1) -num[12,10]: -0.5 (r: 0) -num[12,11]: - (r: 0) -num[12,12]: -0.50 (r: 0 - cmp: 0) - -endptr of 13 is not NULL -num[13,1]: 1234567890123456789012345678.91 -num[13,2]: 1234567890123456789012345679 -num[13,3]: 1234567890123456789012345678.9 -num[13,4]: 1234567890123456789012345678.91 -num[13,5]: 0.00 -(errno == PGTYPES_NUM_OVERFLOW) - num[13,6]: 0 (r: -1) -(errno == PGTYPES_NUM_OVERFLOW) - num[13,8]: 0 (r: -1) -num[13,10]: 1.23457e+027 (r: 0) -num[13,11]: - (r: 0) -num[13,12]: 1234567890123456789012345678.91 (r: 0 - cmp: 0) - -endptr of 14 is not NULL -num[14,1]: 1234567890123456789012345678.921 -num[14,2]: 1234567890123456789012345679 -num[14,3]: 1234567890123456789012345678.9 -num[14,4]: 1234567890123456789012345678.92 -num[14,5]: 0.00 -(errno == PGTYPES_NUM_OVERFLOW) - num[14,6]: 0 (r: -1) -(errno == PGTYPES_NUM_OVERFLOW) - num[14,8]: 0 (r: -1) -num[14,10]: 1.23457e+027 (r: 0) -(errno == PGTYPES_NUM_OVERFLOW) - num[14,11]: - (r: -1) - -(errno == PGTYPES_NUM_BAD_NUMERIC) - endptr of 15 is not NULL -*endptr of 15 is not \0 -num[a,0,0]: 40000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[s,0,0]: 0.0000000000 -num[m,0,0]: 400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,0,0]: 1.0000000000 -num[a,0,1]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999998.0000000000 -num[s,0,1]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002.0000000000 -num[m,0,1]: -40000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,0,1]: -10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,0,2]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.7940000000 -num[s,0,2]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999.2060000000 -num[m,0,2]: 15880000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,0,2]: 25188916876574307304785894206549118387909319899244332493702770780856423173803526448362720403022670025188916876574307304785894206549118387909319899244332493702770780856423173803526448362720403022670025188916876574307304785894206549118387909319899244332493702770780856423173803526448362720403022670025188916876574307304785894206549118387909319899244332493702770780856423173803526448362720403022670.0251889000 -num[a,0,3]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003.4400000000 -num[s,0,3]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999996.5600000000 -num[m,0,3]: 68800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,0,3]: 5813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093023255813953488372093.0232560000 -num[a,0,4]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000592490000000000000000000.0000000000 -num[s,0,4]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999407510000000000000000000.0000000000 -num[m,0,4]: 11849800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,0,4]: 33755843980489122179277287380377727894141673277186112845786426775135445323971712602744350115613765633175243464024709277793718037435230974362436496818511704838900234603115664399399145977147293625208864284629276443484278215666087191345001603402589073233303515671150567942074971729480666340360174855271818933652888656348630356630491653867575824064541173690695201606778173471.2822000000 -num[a,0,5]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999671600.0000000000 -num[s,0,5]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000328400.0000000000 -num[m,0,5]: -6568000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,0,5]: -60901339829476248477466504263093788063337393422655298416565164433617539585870889159561510353227771010962241169305724725943970767356881851400730816077953714981729598051157125456760048721071863580998781973203410475030450669914738124238733252131546894031668696711327649208282582216808769792935444579780755176613885505481120584652862362971985383678440925700365408038976857490864799025578562728.3800000000 -num[a,0,6]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[s,0,6]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[m,0,6]: 4.0000000000 -num[d,0,6]: 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,0,7]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0010000000 -num[s,0,7]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999.9990000000 -num[m,0,7]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,0,7]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,0,8]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[s,0,8]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[m,0,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,0,9]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999.9999407510 -num[s,0,9]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000592490 -num[m,0,9]: -1184980000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,0,9]: -337558439804891221792772873803777278941416732771861128457864267751354453239717126027443501156137656331752434640247092777937180374352309743624364968185117048389002346031156643993991459771472936252088642846292764434842782156660871913450016034025890732333035156711505679420749717294806663403601748552718189336528886563486303566304916538675758240645411736906952016067781734712822157335988793059798477611.4364799406 -num[a,0,10]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0032840000 -num[s,0,10]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999.9967160000 -num[m,0,10]: 65680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,0,10]: 6090133982947624847746650426309378806333739342265529841656516443361753958587088915956151035322777101096224116930572472594397076735688185140073081607795371498172959805115712545676004872107186358099878197320341047503045066991473812423873325213154689403166869671132764920828258221680876979293544457978075517661388550548112058465286236297198538367844092570036540803897685749086479902557856272838002436.0535931790 -num[a,0,11]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.5000010000 -num[s,0,11]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999.4999990000 -num[m,0,11]: 10000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,0,11]: 39999920000159999680000639998720002559994880010239979520040959918080163839672320655358689282621434757130485739028521942956114087771824456351087297825404349191301617396765206469587060825878348243303513392973214053571892856214287571424857150285699428601142797714404571190857618284763430473139053721892556214887570224859550280899438201123597752804494391011217977564044871910256179487641024717950564.0988718023 -num[a,0,12]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999.4999999000 -num[s,0,12]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.5000001000 -num[m,0,12]: -10000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,0,12]: -39999992000001599999680000063999987200002559999488000102399979520004095999180800163839967232006553598689280262143947571210485757902848419430316113936777212644557471088505782298843540231291953741609251678149664370067125986574802685039462992107401578519684296063140787371842525631494873701025259794948041010391797921640415671916865616626876674624665075066984986603002679399464120107175978564804287.0391425922 -num[a,0,13]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001234567890123456789012345678.9100000000 -num[s,0,13]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999998765432109876543210987654321.0900000000 -num[m,0,13]: 24691357802469135780246913578200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,0,13]: 16200000145800001326780012073583089868436639792144602011794432069173427871944627216535488452104037734402488812766825268543092076559937753306592463425498481952238394402210698594884666469792189859469175471551488967831431174229215285702844063565115973562205366453218447154498968406694142978268167639546201917272112583939026078314516169317627127550253962230423465305471042.6305370000 -num[a,0,14]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001234567890123456789012345678.9210000000 -num[s,0,14]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999998765432109876543210987654321.0790000000 -num[m,0,14]: 24691357802469135780246913578420000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,0,14]: 16200000145800001326780012073438747865838483756809679983855488190421495112429743605252505619668606245113387993074870449075163411519793685453399903975484793505772589916586682299621591493014451434717743879521702761509433606809200816287108380036339147553262638409152127519194255768451119299228069799803818551847328103399274511743550433402484767562070619244777160576307850.4589877000 -num[a,1,0]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999998.0000000000 -num[s,1,0]: -20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002.0000000000 -num[m,1,0]: -40000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,1,0]: -0.0000000000 -num[a,1,1]: -4.0000000000 -num[s,1,1]: 0.0000000000 -num[m,1,1]: 4.0000000000 -num[d,1,1]: 1.0000000000 -num[a,1,2]: -1.2060000000 -num[s,1,2]: -2.7940000000 -num[m,1,2]: -1.5880000000 -num[d,1,2]: -2.5188916877 -num[a,1,3]: 1.4400000000 -num[s,1,3]: -5.4400000000 -num[m,1,3]: -6.8800000000 -num[d,1,3]: -0.5813953488 -num[a,1,4]: 592489999999999999999998.0000000000 -num[s,1,4]: -592490000000000000000002.0000000000 -num[m,1,4]: -1184980000000000000000000.0000000000 -num[d,1,4]: -0.0000000000 -num[a,1,5]: -328402.0000000000 -num[s,1,5]: 328398.0000000000 -num[m,1,5]: 656800.0000000000 -num[d,1,5]: 0.0000060901 -num[a,1,6]: -2.0000000000 -num[s,1,6]: -2.0000000000 -num[m,1,6]: -0.0000000000 -num[d,1,6]: -10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,1,7]: -1.9990000000 -num[s,1,7]: -2.0010000000 -num[m,1,7]: -0.0020000000 -num[d,1,7]: -2000.0000000000 -num[a,1,8]: -2.0000000000 -num[s,1,8]: -2.0000000000 -num[m,1,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,1,9]: -2.0000592490 -num[s,1,9]: -1.9999407510 -num[m,1,9]: 0.0001184980 -num[d,1,9]: 33755.8439804891 -num[a,1,10]: -1.9967160000 -num[s,1,10]: -2.0032840000 -num[m,1,10]: -0.0065680000 -num[d,1,10]: -609.0133982948 -num[a,1,11]: -1.4999990000 -num[s,1,11]: -2.5000010000 -num[m,1,11]: -1.0000020000 -num[d,1,11]: -3.9999920000 -num[a,1,12]: -2.5000001000 -num[s,1,12]: -1.4999999000 -num[m,1,12]: 1.0000002000 -num[d,1,12]: 3.9999992000 -num[a,1,13]: 1234567890123456789012345676.9100000000 -num[s,1,13]: -1234567890123456789012345680.9100000000 -num[m,1,13]: -2469135780246913578024691357.8200000000 -num[d,1,13]: -0.0000000000 -num[a,1,14]: 1234567890123456789012345676.9210000000 -num[s,1,14]: -1234567890123456789012345680.9210000000 -num[m,1,14]: -2469135780246913578024691357.8420000000 -num[d,1,14]: -0.0000000000 -num[a,2,0]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.7940000000 -num[s,2,0]: -19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999.2060000000 -num[m,2,0]: 15880000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,2,0]: 0.0000000000 -num[a,2,1]: -1.2060000000 -num[s,2,1]: 2.7940000000 -num[m,2,1]: -1.5880000000 -num[d,2,1]: -0.3970000000 -num[a,2,2]: 1.5880000000 -num[s,2,2]: 0.0000000000 -num[m,2,2]: 0.6304360000 -num[d,2,2]: 1.0000000000 -num[a,2,3]: 4.2340000000 -num[s,2,3]: -2.6460000000 -num[m,2,3]: 2.7313600000 -num[d,2,3]: 0.2308139535 -num[a,2,4]: 592490000000000000000000.7940000000 -num[s,2,4]: -592489999999999999999999.2060000000 -num[m,2,4]: 470437060000000000000000.0000000000 -num[d,2,4]: 0.0000000000 -num[a,2,5]: -328399.2060000000 -num[s,2,5]: 328400.7940000000 -num[m,2,5]: -260749.6000000000 -num[d,2,5]: -0.0000024178 -num[a,2,6]: 0.7940000000 -num[s,2,6]: 0.7940000000 -num[m,2,6]: 0.0000000000 -num[d,2,6]: 3970000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,2,7]: 0.7950000000 -num[s,2,7]: 0.7930000000 -num[m,2,7]: 0.0007940000 -num[d,2,7]: 794.0000000000 -num[a,2,8]: 0.7940000000 -num[s,2,8]: 0.7940000000 -num[m,2,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,2,9]: 0.7939407510 -num[s,2,9]: 0.7940592490 -num[m,2,9]: -0.0000470437 -num[d,2,9]: -13401.0700602542 -num[a,2,10]: 0.7972840000 -num[s,2,10]: 0.7907160000 -num[m,2,10]: 0.0026074960 -num[d,2,10]: 241.7783191230 -num[a,2,11]: 1.2940010000 -num[s,2,11]: 0.2939990000 -num[m,2,11]: 0.3970007940 -num[d,2,11]: 1.5879968240 -num[a,2,12]: 0.2939999000 -num[s,2,12]: 1.2940001000 -num[m,2,12]: -0.3970000794 -num[d,2,12]: -1.5879996824 -num[a,2,13]: 1234567890123456789012345679.7040000000 -num[s,2,13]: -1234567890123456789012345678.1160000000 -num[m,2,13]: 980246904758024690475802469.0545400000 -num[d,2,13]: 0.0000000000 -num[a,2,14]: 1234567890123456789012345679.7150000000 -num[s,2,14]: -1234567890123456789012345678.1270000000 -num[m,2,14]: 980246904758024690475802469.0632740000 -num[d,2,14]: 0.0000000000 -num[a,3,0]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003.4400000000 -num[s,3,0]: -19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999996.5600000000 -num[m,3,0]: 68800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,3,0]: 0.0000000000 -num[a,3,1]: 1.4400000000 -num[s,3,1]: 5.4400000000 -num[m,3,1]: -6.8800000000 -num[d,3,1]: -1.7200000000 -num[a,3,2]: 4.2340000000 -num[s,3,2]: 2.6460000000 -num[m,3,2]: 2.7313600000 -num[d,3,2]: 4.3324937028 -num[a,3,3]: 6.8800000000 -num[s,3,3]: 0.0000000000 -num[m,3,3]: 11.8336000000 -num[d,3,3]: 1.0000000000 -num[a,3,4]: 592490000000000000000003.4400000000 -num[s,3,4]: -592489999999999999999996.5600000000 -num[m,3,4]: 2038165600000000000000000.0000000000 -num[d,3,4]: 0.0000000000 -num[a,3,5]: -328396.5600000000 -num[s,3,5]: 328403.4400000000 -num[m,3,5]: -1129696.0000000000 -num[d,3,5]: -0.0000104750 -num[a,3,6]: 3.4400000000 -num[s,3,6]: 3.4400000000 -num[m,3,6]: 0.0000000000 -num[d,3,6]: 17200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,3,7]: 3.4410000000 -num[s,3,7]: 3.4390000000 -num[m,3,7]: 0.0034400000 -num[d,3,7]: 3440.0000000000 -num[a,3,8]: 3.4400000000 -num[s,3,8]: 3.4400000000 -num[m,3,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,3,9]: 3.4399407510 -num[s,3,9]: 3.4400592490 -num[m,3,9]: -0.0002038166 -num[d,3,9]: -58060.0516464413 -num[a,3,10]: 3.4432840000 -num[s,3,10]: 3.4367160000 -num[m,3,10]: 0.0112969600 -num[d,3,10]: 1047.5030450670 -num[a,3,11]: 3.9400010000 -num[s,3,11]: 2.9399990000 -num[m,3,11]: 1.7200034400 -num[d,3,11]: 6.8799862400 -num[a,3,12]: 2.9399999000 -num[s,3,12]: 3.9400001000 -num[m,3,12]: -1.7200003440 -num[d,3,12]: -6.8799986240 -num[a,3,13]: 1234567890123456789012345682.3500000000 -num[s,3,13]: -1234567890123456789012345675.4700000000 -num[m,3,13]: 4246913542024691354202469135.4504000000 -num[d,3,13]: 0.0000000000 -num[a,3,14]: 1234567890123456789012345682.3610000000 -num[s,3,14]: -1234567890123456789012345675.4810000000 -num[m,3,14]: 4246913542024691354202469135.4882400000 -num[d,3,14]: 0.0000000000 -num[a,4,0]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000592490000000000000000000.0000000000 -num[s,4,0]: -19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999407510000000000000000000.0000000000 -num[m,4,0]: 11849800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,4,0]: 0.0000000000 -num[a,4,1]: 592489999999999999999998.0000000000 -num[s,4,1]: 592490000000000000000002.0000000000 -num[m,4,1]: -1184980000000000000000000.0000000000 -num[d,4,1]: -296245000000000000000000.0000000000 -num[a,4,2]: 592490000000000000000000.7940000000 -num[s,4,2]: 592489999999999999999999.2060000000 -num[m,4,2]: 470437060000000000000000.0000000000 -num[d,4,2]: 746209068010075566750629.7229219000 -num[a,4,3]: 592490000000000000000003.4400000000 -num[s,4,3]: 592489999999999999999996.5600000000 -num[m,4,3]: 2038165600000000000000000.0000000000 -num[d,4,3]: 172235465116279069767441.8604650000 -num[a,4,4]: 1184980000000000000000000.0000000000 -num[s,4,4]: 0.0000000000 -num[m,4,4]: 351044400100000000000000000000000000000000000000.0000000000 -num[d,4,4]: 1.0000000000 -num[a,4,5]: 592489999999999999671600.0000000000 -num[s,4,5]: 592490000000000000328400.0000000000 -num[m,4,5]: -194573716000000000000000000000.0000000000 -num[d,4,5]: -1804171741778319123.0207000000 -num[a,4,6]: 592490000000000000000000.0000000000 -num[s,4,6]: 592490000000000000000000.0000000000 -num[m,4,6]: 0.0000000000 -num[d,4,6]: 2962450000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,4,7]: 592490000000000000000000.0010000000 -num[s,4,7]: 592489999999999999999999.9990000000 -num[m,4,7]: 592490000000000000000.0000000000 -num[d,4,7]: 592490000000000000000000000.0000000000 -num[a,4,8]: 592490000000000000000000.0000000000 -num[s,4,8]: 592490000000000000000000.0000000000 -num[m,4,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,4,9]: 592489999999999999999999.9999407510 -num[s,4,9]: 592490000000000000000000.0000592490 -num[m,4,9]: -35104440010000000000.0000000000 -num[d,4,9]: -10000000000000000000000000000.0000000000 -num[a,4,10]: 592490000000000000000000.0032840000 -num[s,4,10]: 592489999999999999999999.9967160000 -num[m,4,10]: 1945737160000000000000.0000000000 -num[d,4,10]: 180417174177831912302070645.5542021924 -num[a,4,11]: 592490000000000000000000.5000010000 -num[s,4,11]: 592489999999999999999999.4999990000 -num[m,4,11]: 296245592490000000000000.0000000000 -num[d,4,11]: 1184977630044739910520178.9596420807 -num[a,4,12]: 592489999999999999999999.4999999000 -num[s,4,12]: 592490000000000000000000.5000001000 -num[m,4,12]: -296245059249000000000000.0000000000 -num[d,4,12]: -1184979763004047399190520.1618959676 -num[a,4,13]: 1235160380123456789012345678.9100000000 -num[s,4,13]: -1233975400123456789012345678.9100000000 -num[m,4,13]: 731469129219246912921924691297385900000000000000000.0000000000 -num[d,4,13]: 0.0004799169 -num[a,4,14]: 1235160380123456789012345678.9210000000 -num[s,4,14]: -1233975400123456789012345678.9210000000 -num[m,4,14]: 731469129219246912921924691303903290000000000000000.0000000000 -num[d,4,14]: 0.0004799169 -num[a,5,0]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999671600.0000000000 -num[s,5,0]: -20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000328400.0000000000 -num[m,5,0]: -6568000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,5,0]: -0.0000000000 -num[a,5,1]: -328402.0000000000 -num[s,5,1]: -328398.0000000000 -num[m,5,1]: 656800.0000000000 -num[d,5,1]: 164200.0000000000 -num[a,5,2]: -328399.2060000000 -num[s,5,2]: -328400.7940000000 -num[m,5,2]: -260749.6000000000 -num[d,5,2]: -413602.0151133501 -num[a,5,3]: -328396.5600000000 -num[s,5,3]: -328403.4400000000 -num[m,5,3]: -1129696.0000000000 -num[d,5,3]: -95465.1162790698 -num[a,5,4]: 592489999999999999671600.0000000000 -num[s,5,4]: -592490000000000000328400.0000000000 -num[m,5,4]: -194573716000000000000000000000.0000000000 -num[d,5,4]: -0.0000000000 -num[a,5,5]: -656800.0000000000 -num[s,5,5]: 0.0000000000 -num[m,5,5]: 107846560000.0000000000 -num[d,5,5]: 1.0000000000 -num[a,5,6]: -328400.0000000000 -num[s,5,6]: -328400.0000000000 -num[m,5,6]: -0.0000000000 -num[d,5,6]: -1642000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,5,7]: -328399.9990000000 -num[s,5,7]: -328400.0010000000 -num[m,5,7]: -328.4000000000 -num[d,5,7]: -328400000.0000000000 -num[a,5,8]: -328400.0000000000 -num[s,5,8]: -328400.0000000000 -num[m,5,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,5,9]: -328400.0000592490 -num[s,5,9]: -328399.9999407510 -num[m,5,9]: 19.4573716000 -num[d,5,9]: 5542709581.5963138618 -num[a,5,10]: -328399.9967160000 -num[s,5,10]: -328400.0032840000 -num[m,5,10]: -1078.4656000000 -num[d,5,10]: -100000000.0000000000 -num[a,5,11]: -328399.4999990000 -num[s,5,11]: -328400.5000010000 -num[m,5,11]: -164200.3284000000 -num[d,5,11]: -656798.6864026272 -num[a,5,12]: -328400.5000001000 -num[s,5,12]: -328399.4999999000 -num[m,5,12]: 164200.0328400000 -num[d,5,12]: 656799.8686400263 -num[a,5,13]: 1234567890123456789012017278.9100000000 -num[s,5,13]: -1234567890123456789012674078.9100000000 -num[m,5,13]: -405432095116543209511654320954044.0000000000 -num[d,5,13]: -0.0000000000 -num[a,5,14]: 1234567890123456789012017278.9210000000 -num[s,5,14]: -1234567890123456789012674078.9210000000 -num[m,5,14]: -405432095116543209511654320957656.4000000000 -num[d,5,14]: -0.0000000000 -num[a,6,0]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[s,6,0]: -20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[m,6,0]: 4.0000000000 -num[d,6,0]: 0.0000000000 -num[a,6,1]: -2.0000000000 -num[s,6,1]: 2.0000000000 -num[m,6,1]: -0.0000000000 -num[d,6,1]: -0.0000000000 -num[a,6,2]: 0.7940000000 -num[s,6,2]: -0.7940000000 -num[m,6,2]: 0.0000000000 -num[d,6,2]: 0.0000000000 -num[a,6,3]: 3.4400000000 -num[s,6,3]: -3.4400000000 -num[m,6,3]: 0.0000000000 -num[d,6,3]: 0.0000000000 -num[a,6,4]: 592490000000000000000000.0000000000 -num[s,6,4]: -592490000000000000000000.0000000000 -num[m,6,4]: 0.0000000000 -num[d,6,4]: 0.0000000000 -num[a,6,5]: -328400.0000000000 -num[s,6,5]: 328400.0000000000 -num[m,6,5]: -0.0000000000 -num[d,6,5]: -0.0000000000 -num[a,6,6]: 0.0000000000 -num[s,6,6]: 0.0000000000 -num[m,6,6]: 0.0000000000 -num[d,6,6]: 1.0000000000 -num[a,6,7]: 0.0010000000 -num[s,6,7]: -0.0010000000 -num[m,6,7]: 0.0000000000 -num[d,6,7]: 0.0000000000 -num[a,6,8]: 0.0000000000 -num[s,6,8]: 0.0000000000 -num[m,6,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,6,9]: -0.0000592490 -num[s,6,9]: 0.0000592490 -num[m,6,9]: -0.0000000000 -num[d,6,9]: -0.0000000000 -num[a,6,10]: 0.0032840000 -num[s,6,10]: -0.0032840000 -num[m,6,10]: 0.0000000000 -num[d,6,10]: 0.0000000000 -num[a,6,11]: 0.5000010000 -num[s,6,11]: -0.5000010000 -num[m,6,11]: 0.0000000000 -num[d,6,11]: 0.0000000000 -num[a,6,12]: -0.5000001000 -num[s,6,12]: 0.5000001000 -num[m,6,12]: -0.0000000000 -num[d,6,12]: -0.0000000000 -num[a,6,13]: 1234567890123456789012345678.9100000000 -num[s,6,13]: -1234567890123456789012345678.9100000000 -num[m,6,13]: 0.0000000000 -num[d,6,13]: 0.0000000000 -num[a,6,14]: 1234567890123456789012345678.9210000000 -num[s,6,14]: -1234567890123456789012345678.9210000000 -num[m,6,14]: 0.0000000000 -num[d,6,14]: 0.0000000000 -num[a,7,0]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0010000000 -num[s,7,0]: -19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999.9990000000 -num[m,7,0]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,7,0]: 0.0000000000 -num[a,7,1]: -1.9990000000 -num[s,7,1]: 2.0010000000 -num[m,7,1]: -0.0020000000 -num[d,7,1]: -0.0005000000 -num[a,7,2]: 0.7950000000 -num[s,7,2]: -0.7930000000 -num[m,7,2]: 0.0007940000 -num[d,7,2]: 0.0012594458 -num[a,7,3]: 3.4410000000 -num[s,7,3]: -3.4390000000 -num[m,7,3]: 0.0034400000 -num[d,7,3]: 0.0002906977 -num[a,7,4]: 592490000000000000000000.0010000000 -num[s,7,4]: -592489999999999999999999.9990000000 -num[m,7,4]: 592490000000000000000.0000000000 -num[d,7,4]: 0.0000000000 -num[a,7,5]: -328399.9990000000 -num[s,7,5]: 328400.0010000000 -num[m,7,5]: -328.4000000000 -num[d,7,5]: -0.0000000030 -num[a,7,6]: 0.0010000000 -num[s,7,6]: 0.0010000000 -num[m,7,6]: 0.0000000000 -num[d,7,6]: 5000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,7,7]: 0.0020000000 -num[s,7,7]: 0.0000000000 -num[m,7,7]: 0.0000010000 -num[d,7,7]: 1.0000000000 -num[a,7,8]: 0.0010000000 -num[s,7,8]: 0.0010000000 -num[m,7,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,7,9]: 0.0009407510 -num[s,7,9]: 0.0010592490 -num[m,7,9]: -0.0000000592 -num[d,7,9]: -16.8779219902 -num[a,7,10]: 0.0042840000 -num[s,7,10]: -0.0022840000 -num[m,7,10]: 0.0000032840 -num[d,7,10]: 0.3045066991 -num[a,7,11]: 0.5010010000 -num[s,7,11]: -0.4990010000 -num[m,7,11]: 0.0005000010 -num[d,7,11]: 0.0019999960 -num[a,7,12]: -0.4990001000 -num[s,7,12]: 0.5010001000 -num[m,7,12]: -0.0005000001 -num[d,7,12]: -0.0019999996 -num[a,7,13]: 1234567890123456789012345678.9110000000 -num[s,7,13]: -1234567890123456789012345678.9090000000 -num[m,7,13]: 1234567890123456789012345.6789100000 -num[d,7,13]: 0.0000000000 -num[a,7,14]: 1234567890123456789012345678.9220000000 -num[s,7,14]: -1234567890123456789012345678.9200000000 -num[m,7,14]: 1234567890123456789012345.6789210000 -num[d,7,14]: 0.0000000000 -num[a,8,0]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[s,8,0]: -20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[m,8,0]: 0.0000000000 -num[d,8,0]: 0.0000000000 -num[a,8,1]: -2.0000000000 -num[s,8,1]: 2.0000000000 -num[m,8,1]: 0.0000000000 -num[d,8,1]: 0.0000000000 -num[a,8,2]: 0.7940000000 -num[s,8,2]: -0.7940000000 -num[m,8,2]: 0.0000000000 -num[d,8,2]: 0.0000000000 -num[a,8,3]: 3.4400000000 -num[s,8,3]: -3.4400000000 -num[m,8,3]: 0.0000000000 -num[d,8,3]: 0.0000000000 -num[a,8,4]: 592490000000000000000000.0000000000 -num[s,8,4]: -592490000000000000000000.0000000000 -num[m,8,4]: 0.0000000000 -num[d,8,4]: 0.0000000000 -num[a,8,5]: -328400.0000000000 -num[s,8,5]: 328400.0000000000 -num[m,8,5]: 0.0000000000 -num[d,8,5]: 0.0000000000 -num[a,8,6]: 0.0000000000 -num[s,8,6]: -0.0000000000 -num[m,8,6]: 0.0000000000 -num[d,8,6]: 0.0000000000 -num[a,8,7]: 0.0010000000 -num[s,8,7]: -0.0010000000 -num[m,8,7]: 0.0000000000 -num[d,8,7]: 0.0000000000 -num[a,8,8]: 0.0000000000 -num[s,8,8]: 0.0000000000 -num[m,8,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,8,9]: -0.0000592490 -num[s,8,9]: 0.0000592490 -num[m,8,9]: 0.0000000000 -num[d,8,9]: 0.0000000000 -num[a,8,10]: 0.0032840000 -num[s,8,10]: -0.0032840000 -num[m,8,10]: 0.0000000000 -num[d,8,10]: 0.0000000000 -num[a,8,11]: 0.5000010000 -num[s,8,11]: -0.5000010000 -num[m,8,11]: 0.0000000000 -num[d,8,11]: 0.0000000000 -num[a,8,12]: -0.5000001000 -num[s,8,12]: 0.5000001000 -num[m,8,12]: 0.0000000000 -num[d,8,12]: 0.0000000000 -num[a,8,13]: 1234567890123456789012345678.9100000000 -num[s,8,13]: -1234567890123456789012345678.9100000000 -num[m,8,13]: 0.0000000000 -num[d,8,13]: 0.0000000000 -num[a,8,14]: 1234567890123456789012345678.9210000000 -num[s,8,14]: -1234567890123456789012345678.9210000000 -num[m,8,14]: 0.0000000000 -num[d,8,14]: 0.0000000000 -num[a,9,0]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999.9999407510 -num[s,9,0]: -20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000592490 -num[m,9,0]: -1184980000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,9,0]: -0.0000000000 -num[a,9,1]: -2.0000592490 -num[s,9,1]: 1.9999407510 -num[m,9,1]: 0.0001184980 -num[d,9,1]: 0.0000296245 -num[a,9,2]: 0.7939407510 -num[s,9,2]: -0.7940592490 -num[m,9,2]: -0.0000470437 -num[d,9,2]: -0.0000746209 -num[a,9,3]: 3.4399407510 -num[s,9,3]: -3.4400592490 -num[m,9,3]: -0.0002038166 -num[d,9,3]: -0.0000172235 -num[a,9,4]: 592489999999999999999999.9999407510 -num[s,9,4]: -592490000000000000000000.0000592490 -num[m,9,4]: -35104440010000000000.0000000000 -num[d,9,4]: -0.0000000000 -num[a,9,5]: -328400.0000592490 -num[s,9,5]: 328399.9999407510 -num[m,9,5]: 19.4573716000 -num[d,9,5]: 0.0000000002 -num[a,9,6]: -0.0000592490 -num[s,9,6]: -0.0000592490 -num[m,9,6]: -0.0000000000 -num[d,9,6]: -296245000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,9,7]: 0.0009407510 -num[s,9,7]: -0.0010592490 -num[m,9,7]: -0.0000000592 -num[d,9,7]: -0.0592490000 -num[a,9,8]: -0.0000592490 -num[s,9,8]: -0.0000592490 -num[m,9,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,9,9]: -0.0001184980 -num[s,9,9]: 0.0000000000 -num[m,9,9]: 0.0000000035 -num[d,9,9]: 1.0000000000 -num[a,9,10]: 0.0032247510 -num[s,9,10]: -0.0033432490 -num[m,9,10]: -0.0000001946 -num[d,9,10]: -0.0180417174 -num[a,9,11]: 0.4999417510 -num[s,9,11]: -0.5000602490 -num[m,9,11]: -0.0000296246 -num[d,9,11]: -0.0001184978 -num[a,9,12]: -0.5000593490 -num[s,9,12]: 0.4999408510 -num[m,9,12]: 0.0000296245 -num[d,9,12]: 0.0001184980 -num[a,9,13]: 1234567890123456789012345678.9099407510 -num[s,9,13]: -1234567890123456789012345678.9100592490 -num[m,9,13]: -73146912921924691292192.4691297386 -num[d,9,13]: -0.0000000000 -num[a,9,14]: 1234567890123456789012345678.9209407510 -num[s,9,14]: -1234567890123456789012345678.9210592490 -num[m,9,14]: -73146912921924691292192.4691303903 -num[d,9,14]: -0.0000000000 -num[a,10,0]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0032840000 -num[s,10,0]: -19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999.9967160000 -num[m,10,0]: 65680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,10,0]: 0.0000000000 -num[a,10,1]: -1.9967160000 -num[s,10,1]: 2.0032840000 -num[m,10,1]: -0.0065680000 -num[d,10,1]: -0.0016420000 -num[a,10,2]: 0.7972840000 -num[s,10,2]: -0.7907160000 -num[m,10,2]: 0.0026074960 -num[d,10,2]: 0.0041360202 -num[a,10,3]: 3.4432840000 -num[s,10,3]: -3.4367160000 -num[m,10,3]: 0.0112969600 -num[d,10,3]: 0.0009546512 -num[a,10,4]: 592490000000000000000000.0032840000 -num[s,10,4]: -592489999999999999999999.9967160000 -num[m,10,4]: 1945737160000000000000.0000000000 -num[d,10,4]: 0.0000000000 -num[a,10,5]: -328399.9967160000 -num[s,10,5]: 328400.0032840000 -num[m,10,5]: -1078.4656000000 -num[d,10,5]: -0.0000000100 -num[a,10,6]: 0.0032840000 -num[s,10,6]: 0.0032840000 -num[m,10,6]: 0.0000000000 -num[d,10,6]: 16420000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,10,7]: 0.0042840000 -num[s,10,7]: 0.0022840000 -num[m,10,7]: 0.0000032840 -num[d,10,7]: 3.2840000000 -num[a,10,8]: 0.0032840000 -num[s,10,8]: 0.0032840000 -num[m,10,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,10,9]: 0.0032247510 -num[s,10,9]: 0.0033432490 -num[m,10,9]: -0.0000001946 -num[d,10,9]: -55.4270958160 -num[a,10,10]: 0.0065680000 -num[s,10,10]: 0.0000000000 -num[m,10,10]: 0.0000107847 -num[d,10,10]: 1.0000000000 -num[a,10,11]: 0.5032850000 -num[s,10,11]: -0.4967170000 -num[m,10,11]: 0.0016420033 -num[d,10,11]: 0.0065679869 -num[a,10,12]: -0.4967161000 -num[s,10,12]: 0.5032841000 -num[m,10,12]: -0.0016420003 -num[d,10,12]: -0.0065679987 -num[a,10,13]: 1234567890123456789012345678.9132840000 -num[s,10,13]: -1234567890123456789012345678.9067160000 -num[m,10,13]: 4054320951165432095116543.2095404400 -num[d,10,13]: 0.0000000000 -num[a,10,14]: 1234567890123456789012345678.9242840000 -num[s,10,14]: -1234567890123456789012345678.9177160000 -num[m,10,14]: 4054320951165432095116543.2095765640 -num[d,10,14]: 0.0000000000 -num[a,11,0]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.5000010000 -num[s,11,0]: -19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999.4999990000 -num[m,11,0]: 10000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,11,0]: 0.0000000000 -num[a,11,1]: -1.4999990000 -num[s,11,1]: 2.5000010000 -num[m,11,1]: -1.0000020000 -num[d,11,1]: -0.2500005000 -num[a,11,2]: 1.2940010000 -num[s,11,2]: -0.2939990000 -num[m,11,2]: 0.3970007940 -num[d,11,2]: 0.6297241814 -num[a,11,3]: 3.9400010000 -num[s,11,3]: -2.9399990000 -num[m,11,3]: 1.7200034400 -num[d,11,3]: 0.1453491279 -num[a,11,4]: 592490000000000000000000.5000010000 -num[s,11,4]: -592489999999999999999999.4999990000 -num[m,11,4]: 296245592490000000000000.0000000000 -num[d,11,4]: 0.0000000000 -num[a,11,5]: -328399.4999990000 -num[s,11,5]: 328400.5000010000 -num[m,11,5]: -164200.3284000000 -num[d,11,5]: -0.0000015225 -num[a,11,6]: 0.5000010000 -num[s,11,6]: 0.5000010000 -num[m,11,6]: 0.0000000000 -num[d,11,6]: 2500005000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,11,7]: 0.5010010000 -num[s,11,7]: 0.4990010000 -num[m,11,7]: 0.0005000010 -num[d,11,7]: 500.0010000000 -num[a,11,8]: 0.5000010000 -num[s,11,8]: 0.5000010000 -num[m,11,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,11,9]: 0.4999417510 -num[s,11,9]: 0.5000602490 -num[m,11,9]: -0.0000296246 -num[d,11,9]: -8438.9778730443 -num[a,11,10]: 0.5032850000 -num[s,11,10]: 0.4967170000 -num[m,11,10]: 0.0016420033 -num[d,11,10]: 152.2536540804 -num[a,11,11]: 1.0000020000 -num[s,11,11]: 0.0000000000 -num[m,11,11]: 0.2500010000 -num[d,11,11]: 1.0000000000 -num[a,11,12]: 0.0000009000 -num[s,11,12]: 1.0000011000 -num[m,11,12]: -0.2500005500 -num[d,11,12]: -1.0000018000 -num[a,11,13]: 1234567890123456789012345679.4100010000 -num[s,11,13]: -1234567890123456789012345678.4099990000 -num[m,11,13]: 617285179629618517962961851.8006789100 -num[d,11,13]: 0.0000000000 -num[a,11,14]: 1234567890123456789012345679.4210010000 -num[s,11,14]: -1234567890123456789012345678.4209990000 -num[m,11,14]: 617285179629618517962961851.8061789210 -num[d,11,14]: 0.0000000000 -num[a,12,0]: 19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999.4999999000 -num[s,12,0]: -20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.5000001000 -num[m,12,0]: -10000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,12,0]: -0.0000000000 -num[a,12,1]: -2.5000001000 -num[s,12,1]: 1.4999999000 -num[m,12,1]: 1.0000002000 -num[d,12,1]: 0.2500000500 -num[a,12,2]: 0.2939999000 -num[s,12,2]: -1.2940001000 -num[m,12,2]: -0.3970000794 -num[d,12,2]: -0.6297230479 -num[a,12,3]: 2.9399999000 -num[s,12,3]: -3.9400001000 -num[m,12,3]: -1.7200003440 -num[d,12,3]: -0.1453488663 -num[a,12,4]: 592489999999999999999999.4999999000 -num[s,12,4]: -592490000000000000000000.5000001000 -num[m,12,4]: -296245059249000000000000.0000000000 -num[d,12,4]: -0.0000000000 -num[a,12,5]: -328400.5000001000 -num[s,12,5]: 328399.4999999000 -num[m,12,5]: 164200.0328400000 -num[d,12,5]: 0.0000015225 -num[a,12,6]: -0.5000001000 -num[s,12,6]: -0.5000001000 -num[m,12,6]: -0.0000000000 -num[d,12,6]: -2500000500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,12,7]: -0.4990001000 -num[s,12,7]: -0.5010001000 -num[m,12,7]: -0.0005000001 -num[d,12,7]: -500.0001000000 -num[a,12,8]: -0.5000001000 -num[s,12,8]: -0.5000001000 -num[m,12,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,12,9]: -0.5000593490 -num[s,12,9]: -0.4999408510 -num[m,12,9]: 0.0000296245 -num[d,12,9]: 8438.9626829145 -num[a,12,10]: -0.4967161000 -num[s,12,10]: -0.5032841000 -num[m,12,10]: -0.0016420003 -num[d,12,10]: -152.2533800244 -num[a,12,11]: 0.0000009000 -num[s,12,11]: -1.0000011000 -num[m,12,11]: -0.2500005500 -num[d,12,11]: -0.9999982000 -num[a,12,12]: -1.0000002000 -num[s,12,12]: 0.0000000000 -num[m,12,12]: 0.2500001000 -num[d,12,12]: 1.0000000000 -num[a,12,13]: 1234567890123456789012345678.4099999000 -num[s,12,13]: -1234567890123456789012345679.4100001000 -num[m,12,13]: -617284068518517406851851740.6895678910 -num[d,12,13]: -0.0000000000 -num[a,12,14]: 1234567890123456789012345678.4209999000 -num[s,12,14]: -1234567890123456789012345679.4210001000 -num[m,12,14]: -617284068518517406851851740.6950678921 -num[d,12,14]: -0.0000000000 -num[a,13,0]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001234567890123456789012345678.9100000000 -num[s,13,0]: -19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999998765432109876543210987654321.0900000000 -num[m,13,0]: 24691357802469135780246913578200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,13,0]: 0.0000000000 -num[a,13,1]: 1234567890123456789012345676.9100000000 -num[s,13,1]: 1234567890123456789012345680.9100000000 -num[m,13,1]: -2469135780246913578024691357.8200000000 -num[d,13,1]: -617283945061728394506172839.4550000000 -num[a,13,2]: 1234567890123456789012345679.7040000000 -num[s,13,2]: 1234567890123456789012345678.1160000000 -num[m,13,2]: 980246904758024690475802469.0545400000 -num[d,13,2]: 1554871398140373789688092794.5969773000 -num[a,13,3]: 1234567890123456789012345682.3500000000 -num[s,13,3]: 1234567890123456789012345675.4700000000 -num[m,13,3]: 4246913542024691354202469135.4504000000 -num[d,13,3]: 358886014570772322387309790.3808140000 -num[a,13,4]: 1235160380123456789012345678.9100000000 -num[s,13,4]: 1233975400123456789012345678.9100000000 -num[m,13,4]: 731469129219246912921924691297385900000000000000000.0000000000 -num[d,13,4]: 2083.6940541165 -num[a,13,5]: 1234567890123456789012017278.9100000000 -num[s,13,5]: 1234567890123456789012674078.9100000000 -num[m,13,5]: -405432095116543209511654320954044.0000000000 -num[d,13,5]: -3759341930948406787491.9174140000 -num[a,13,6]: 1234567890123456789012345678.9100000000 -num[s,13,6]: 1234567890123456789012345678.9100000000 -num[m,13,6]: 0.0000000000 -num[d,13,6]: 6172839450617283945061728394550000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,13,7]: 1234567890123456789012345678.9110000000 -num[s,13,7]: 1234567890123456789012345678.9090000000 -num[m,13,7]: 1234567890123456789012345.6789100000 -num[d,13,7]: 1234567890123456789012345678910.0000000000 -num[a,13,8]: 1234567890123456789012345678.9100000000 -num[s,13,8]: 1234567890123456789012345678.9100000000 -num[m,13,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,13,9]: 1234567890123456789012345678.9099407510 -num[s,13,9]: 1234567890123456789012345678.9100592490 -num[m,13,9]: -73146912921924691292192.4691297386 -num[d,13,9]: -20836940541164522422527733445458.9950885247 -num[a,13,10]: 1234567890123456789012345678.9132840000 -num[s,13,10]: 1234567890123456789012345678.9067160000 -num[m,13,10]: 4054320951165432095116543.2095404400 -num[d,13,10]: 375934193094840678749191741446.4068209501 -num[a,13,11]: 1234567890123456789012345679.4100010000 -num[s,13,11]: 1234567890123456789012345678.4099990000 -num[m,13,11]: 617285179629618517962961851.8006789100 -num[d,13,11]: 2469130841985229607565476226.8675462649 -num[a,13,12]: 1234567890123456789012345678.4099999000 -num[s,13,12]: 1234567890123456789012345679.4100001000 -num[m,13,12]: -617284068518517406851851740.6895678910 -num[d,13,12]: -2469135286419856294053432547.1334905733 -num[a,13,13]: 2469135780246913578024691357.8200000000 -num[s,13,13]: 0.0000000000 -num[m,13,13]: 1524157875323883675049535156278311236552659655767748818.7881000000 -num[d,13,13]: 1.0000000000 -num[a,13,14]: 2469135780246913578024691357.8310000000 -num[s,13,14]: -0.0110000000 -num[m,13,14]: 1524157875323883675049535156291891483344017680446884621.2561100000 -num[d,13,14]: 1.0000000000 -num[a,14,0]: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001234567890123456789012345678.9210000000 -num[s,14,0]: -19999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999998765432109876543210987654321.0790000000 -num[m,14,0]: 24691357802469135780246913578420000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[d,14,0]: 0.0000000000 -num[a,14,1]: 1234567890123456789012345676.9210000000 -num[s,14,1]: 1234567890123456789012345680.9210000000 -num[m,14,1]: -2469135780246913578024691357.8420000000 -num[d,14,1]: -617283945061728394506172839.4605000000 -num[a,14,2]: 1234567890123456789012345679.7150000000 -num[s,14,2]: 1234567890123456789012345678.1270000000 -num[m,14,2]: 980246904758024690475802469.0632740000 -num[d,14,2]: 1554871398140373789688092794.6108312000 -num[a,14,3]: 1234567890123456789012345682.3610000000 -num[s,14,3]: 1234567890123456789012345675.4810000000 -num[m,14,3]: 4246913542024691354202469135.4882400000 -num[d,14,3]: 358886014570772322387309790.3840116000 -num[a,14,4]: 1235160380123456789012345678.9210000000 -num[s,14,4]: 1233975400123456789012345678.9210000000 -num[m,14,4]: 731469129219246912921924691303903290000000000000000.0000000000 -num[d,14,4]: 2083.6940541165 -num[a,14,5]: 1234567890123456789012017278.9210000000 -num[s,14,5]: 1234567890123456789012674078.9210000000 -num[m,14,5]: -405432095116543209511654320957656.4000000000 -num[d,14,5]: -3759341930948406787491.9174145000 -num[a,14,6]: 1234567890123456789012345678.9210000000 -num[s,14,6]: 1234567890123456789012345678.9210000000 -num[m,14,6]: 0.0000000000 -num[d,14,6]: 6172839450617283945061728394605000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0000000000 -num[a,14,7]: 1234567890123456789012345678.9220000000 -num[s,14,7]: 1234567890123456789012345678.9200000000 -num[m,14,7]: 1234567890123456789012345.6789210000 -num[d,14,7]: 1234567890123456789012345678921.0000000000 -num[a,14,8]: 1234567890123456789012345678.9210000000 -num[s,14,8]: 1234567890123456789012345678.9210000000 -num[m,14,8]: 0.0000000000 -(errno == PGTYPES_NUM_DIVIDE_ZERO) - r: -1 -num[a,14,9]: 1234567890123456789012345678.9209407510 -num[s,14,9]: 1234567890123456789012345678.9210592490 -num[m,14,9]: -73146912921924691292192.4691303903 -num[d,14,9]: -20836940541164522422527733445644.6522304174 -num[a,14,10]: 1234567890123456789012345678.9242840000 -num[s,14,10]: 1234567890123456789012345678.9177160000 -num[m,14,10]: 4054320951165432095116543.2095765640 -num[d,14,10]: 375934193094840678749191741449.7563946407 -num[a,14,11]: 1234567890123456789012345679.4210010000 -num[s,14,11]: 1234567890123456789012345678.4209990000 -num[m,14,11]: 617285179629618517962961851.8061789210 -num[d,14,11]: 2469130841985229607565476226.8895462209 -num[a,14,12]: 1234567890123456789012345678.4209999000 -num[s,14,12]: 1234567890123456789012345679.4210001000 -num[m,14,12]: -617284068518517406851851740.6950678921 -num[d,14,12]: -2469135286419856294053432547.1554905689 -num[a,14,13]: 2469135780246913578024691357.8310000000 -num[s,14,13]: 0.0110000000 -num[m,14,13]: 1524157875323883675049535156291891483344017680446884621.2561100000 -num[d,14,13]: 1.0000000000 -num[a,14,14]: 2469135780246913578024691357.8420000000 -num[s,14,14]: 0.0000000000 -num[m,14,14]: 1524157875323883675049535156305471730135375705126020423.7242410000 -num[d,14,14]: 1.0000000000 -0: 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -1: -2 -2: 0.794 -3: 3.44 -4: 592490000000000000000000 -5: -328400 -6: 0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002 -7: 0.001 -8: 0.0 -9: -0.000059249 -10: 0.003284 -11: 0.500001 -12: -0.5000001 -13: 1234567890123456789012345678.91 -14: 1234567890123456789012345678.921 diff --git a/src/interfaces/ecpg/test/expected/pgtypeslib-num_test2.c b/src/interfaces/ecpg/test/expected/pgtypeslib-num_test2.c index 83636ad880..9debc34e79 100644 --- a/src/interfaces/ecpg/test/expected/pgtypeslib-num_test2.c +++ b/src/interfaces/ecpg/test/expected/pgtypeslib-num_test2.c @@ -25,11 +25,39 @@ +#line 1 "printf_hack.h" /* + * print_double(x) has the same effect as printf("%g", x), but is intended + * to produce the same formatting across all platforms. + */ +static void +print_double(double x) +{ +#ifdef WIN32 + /* Change Windows' 3-digit exponents to look like everyone else's */ + char convert[128]; + int vallen; + + sprintf(convert, "%g", x); + vallen = strlen(convert); + + if (vallen >= 6 && + convert[vallen - 5] == 'e' && + convert[vallen - 3] == '0') + { + convert[vallen - 3] = convert[vallen - 2]; + convert[vallen - 2] = convert[vallen - 1]; + convert[vallen - 1] = '\0'; + } + + printf("%s", convert); +#else + printf("%g", x); +#endif +} -NOTE: This file has a different expect file for regression tests on MinGW32 +#line 9 "num_test2.pgc" -*/ char* nums[] = { "2E394", "-2", ".794", "3.44", "592.49E21", "-32.84e4", @@ -77,21 +105,21 @@ main(void) text = PGTYPESnumeric_to_asc(num, -1); if (!text) check_errno(); - printf("num[%d,1]: %s\n", i, text); free(text); + printf("num[%d,1]: %s\n", i, text); PGTYPESchar_free(text); text = PGTYPESnumeric_to_asc(num, 0); if (!text) check_errno(); - printf("num[%d,2]: %s\n", i, text); free(text); + printf("num[%d,2]: %s\n", i, text); PGTYPESchar_free(text); text = PGTYPESnumeric_to_asc(num, 1); if (!text) check_errno(); - printf("num[%d,3]: %s\n", i, text); free(text); + printf("num[%d,3]: %s\n", i, text); PGTYPESchar_free(text); text = PGTYPESnumeric_to_asc(num, 2); if (!text) check_errno(); - printf("num[%d,4]: %s\n", i, text); free(text); + printf("num[%d,4]: %s\n", i, text); PGTYPESchar_free(text); nin = PGTYPESnumeric_new(); text = PGTYPESnumeric_to_asc(nin, 2); if (!text) check_errno(); - printf("num[%d,5]: %s\n", i, text); free(text); + printf("num[%d,5]: %s\n", i, text); PGTYPESchar_free(text); r = PGTYPESnumeric_to_long(num, &l); if (r) check_errno(); @@ -103,7 +131,7 @@ main(void) text = PGTYPESnumeric_to_asc(nin, 2); q = PGTYPESnumeric_cmp(num, nin); printf("num[%d,7]: %s (r: %d - cmp: %d)\n", i, text, r, q); - free(text); + PGTYPESchar_free(text); } r = PGTYPESnumeric_to_int(num, &k); @@ -116,7 +144,7 @@ main(void) text = PGTYPESnumeric_to_asc(nin, 2); q = PGTYPESnumeric_cmp(num, nin); printf("num[%d,9]: %s (r: %d - cmp: %d)\n", i, text, r, q); - free(text); + PGTYPESchar_free(text); } if (i != 6) @@ -126,7 +154,9 @@ main(void) r = PGTYPESnumeric_to_double(num, &d); if (r) check_errno(); - printf("num[%d,10]: %g (r: %d)\n", i, r?0.0:d, r); + printf("num[%d,10]: ", i); + print_double(r ? 0.0 : d); + printf(" (r: %d)\n", r); } /* do not test double to numeric because @@ -147,7 +177,7 @@ main(void) text = PGTYPESnumeric_to_asc(nin, 2); q = PGTYPESnumeric_cmp(num, nin); printf("num[%d,12]: %s (r: %d - cmp: %d)\n", i, text, r, q); - free(text); + PGTYPESchar_free(text); } PGTYPESdecimal_free(dec); @@ -173,7 +203,7 @@ main(void) { text = PGTYPESnumeric_to_asc(a, 10); printf("num[a,%d,%d]: %s\n", i, j, text); - free(text); + PGTYPESchar_free(text); } r = PGTYPESnumeric_sub(numarr[i], numarr[j], s); if (r) @@ -185,7 +215,7 @@ main(void) { text = PGTYPESnumeric_to_asc(s, 10); printf("num[s,%d,%d]: %s\n", i, j, text); - free(text); + PGTYPESchar_free(text); } r = PGTYPESnumeric_mul(numarr[i], numarr[j], m); if (r) @@ -197,7 +227,7 @@ main(void) { text = PGTYPESnumeric_to_asc(m, 10); printf("num[m,%d,%d]: %s\n", i, j, text); - free(text); + PGTYPESchar_free(text); } r = PGTYPESnumeric_div(numarr[i], numarr[j], d); if (r) @@ -209,7 +239,7 @@ main(void) { text = PGTYPESnumeric_to_asc(d, 10); printf("num[d,%d,%d]: %s\n", i, j, text); - free(text); + PGTYPESchar_free(text); } PGTYPESnumeric_free(a); @@ -223,12 +253,12 @@ main(void) { text = PGTYPESnumeric_to_asc(numarr[i], -1); printf("%d: %s\n", i, text); - free(text); + PGTYPESchar_free(text); PGTYPESnumeric_free(numarr[i]); } free(numarr); - return (0); + return 0; } static void diff --git a/src/interfaces/ecpg/test/expected/preproc-array_of_struct.c b/src/interfaces/ecpg/test/expected/preproc-array_of_struct.c index c4ae862b49..1cf371092f 100644 --- a/src/interfaces/ecpg/test/expected/preproc-array_of_struct.c +++ b/src/interfaces/ecpg/test/expected/preproc-array_of_struct.c @@ -284,5 +284,5 @@ if (sqlca.sqlcode < 0) sqlprint();} #line 92 "array_of_struct.pgc" - return( 0 ); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/preproc-autoprep.stderr b/src/interfaces/ecpg/test/expected/preproc-autoprep.stderr index ea21e82ca6..bfeea59a75 100644 --- a/src/interfaces/ecpg/test/expected/preproc-autoprep.stderr +++ b/src/interfaces/ecpg/test/expected/preproc-autoprep.stderr @@ -30,7 +30,7 @@ [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_process_output on line 24: OK: INSERT 0 1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_auto_prepare on line 26: statement found in cache; entry 1640 +[NO_PID]: ecpg_auto_prepare on line 26: statement found in cache; entry 1633 [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_execute on line 26: query: insert into T values ( 1 , $1 ); with 1 parameter(s) on connection ecpg1_regression [NO_PID]: sqlca: code: 0, state: 00000 @@ -168,7 +168,7 @@ [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_process_output on line 21: OK: CREATE TABLE [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_auto_prepare on line 23: statement found in cache; entry 15328 +[NO_PID]: ecpg_auto_prepare on line 23: statement found in cache; entry 15321 [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: prepare_common on line 23: name ecpg1; query: "insert into T values ( 1 , null )" [NO_PID]: sqlca: code: 0, state: 00000 @@ -178,7 +178,7 @@ [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_process_output on line 23: OK: INSERT 0 1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_auto_prepare on line 24: statement found in cache; entry 1640 +[NO_PID]: ecpg_auto_prepare on line 24: statement found in cache; entry 1633 [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: prepare_common on line 24: name ecpg2; query: "insert into T values ( 1 , $1 )" [NO_PID]: sqlca: code: 0, state: 00000 @@ -190,7 +190,7 @@ [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_process_output on line 24: OK: INSERT 0 1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_auto_prepare on line 26: statement found in cache; entry 1640 +[NO_PID]: ecpg_auto_prepare on line 26: statement found in cache; entry 1633 [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_execute on line 26: query: insert into T values ( 1 , $1 ); with 1 parameter(s) on connection ecpg1_regression [NO_PID]: sqlca: code: 0, state: 00000 @@ -208,7 +208,7 @@ [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_process_output on line 28: OK: INSERT 0 1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_auto_prepare on line 30: statement found in cache; entry 13056 +[NO_PID]: ecpg_auto_prepare on line 30: statement found in cache; entry 13049 [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: prepare_common on line 30: name ecpg3; query: "select Item2 from T order by Item2 nulls last" [NO_PID]: sqlca: code: 0, state: 00000 diff --git a/src/interfaces/ecpg/test/expected/preproc-cursor.c b/src/interfaces/ecpg/test/expected/preproc-cursor.c index f7da753a3d..4822901742 100644 --- a/src/interfaces/ecpg/test/expected/preproc-cursor.c +++ b/src/interfaces/ecpg/test/expected/preproc-cursor.c @@ -830,5 +830,5 @@ if (sqlca.sqlcode < 0) exit (1);} #line 253 "cursor.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/preproc-define.c b/src/interfaces/ecpg/test/expected/preproc-define.c index c8ae6f98dc..bde15b74a0 100644 --- a/src/interfaces/ecpg/test/expected/preproc-define.c +++ b/src/interfaces/ecpg/test/expected/preproc-define.c @@ -164,5 +164,5 @@ if (sqlca.sqlcode < 0) sqlprint();} #line 59 "define.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/preproc-describe.c b/src/interfaces/ecpg/test/expected/preproc-describe.c index 1a9dd85438..143e966261 100644 --- a/src/interfaces/ecpg/test/expected/preproc-describe.c +++ b/src/interfaces/ecpg/test/expected/preproc-describe.c @@ -477,5 +477,5 @@ if (sqlca.sqlcode < 0) exit (1);} #line 144 "describe.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/preproc-init.c b/src/interfaces/ecpg/test/expected/preproc-init.c index ca23d348d6..b0e04731fe 100644 --- a/src/interfaces/ecpg/test/expected/preproc-init.c +++ b/src/interfaces/ecpg/test/expected/preproc-init.c @@ -114,7 +114,7 @@ static int fe(enum e x) return (int)x; } -static void sqlnotice(char *notice, short trans) +static void sqlnotice(const char *notice, short trans) { if (!notice) notice = "-empty-"; diff --git a/src/interfaces/ecpg/test/expected/preproc-outofscope.c b/src/interfaces/ecpg/test/expected/preproc-outofscope.c index b3deb221d7..b23107714c 100644 --- a/src/interfaces/ecpg/test/expected/preproc-outofscope.c +++ b/src/interfaces/ecpg/test/expected/preproc-outofscope.c @@ -28,6 +28,8 @@ #ifndef PGTYPES_NUMERIC #define PGTYPES_NUMERIC +#include + #define NUMERIC_POS 0x0000 #define NUMERIC_NEG 0x4000 #define NUMERIC_NAN 0xC000 @@ -337,7 +339,7 @@ if (sqlca.sqlcode < 0) exit (1);} get_record1(); if (sqlca.sqlcode == ECPG_NOT_FOUND) break; - printf("id=%d%s t='%s'%s d1=%lf%s d2=%lf%s c = '%s'%s\n", + printf("id=%d%s t='%s'%s d1=%f%s d2=%f%s c = '%s'%s\n", myvar->id, mynullvar->id ? " (NULL)" : "", myvar->t, mynullvar->t ? " (NULL)" : "", myvar->d1, mynullvar->d1 ? " (NULL)" : "", @@ -374,5 +376,5 @@ if (sqlca.sqlcode < 0) exit (1);} #line 124 "outofscope.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/preproc-pointer_to_struct.c b/src/interfaces/ecpg/test/expected/preproc-pointer_to_struct.c index 5a0f9caee3..7b1f58e835 100644 --- a/src/interfaces/ecpg/test/expected/preproc-pointer_to_struct.c +++ b/src/interfaces/ecpg/test/expected/preproc-pointer_to_struct.c @@ -289,5 +289,5 @@ if (sqlca.sqlcode < 0) sqlprint();} /* All the memory will anyway be freed at the end */ - return( 0 ); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/preproc-strings.c b/src/interfaces/ecpg/test/expected/preproc-strings.c index 89d17e96c9..2053443e81 100644 --- a/src/interfaces/ecpg/test/expected/preproc-strings.c +++ b/src/interfaces/ecpg/test/expected/preproc-strings.c @@ -66,5 +66,5 @@ int main(void) { ECPGdisconnect(__LINE__, "CURRENT");} #line 25 "strings.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/preproc-variable.c b/src/interfaces/ecpg/test/expected/preproc-variable.c index 7fd03ba7d3..08e2355d16 100644 --- a/src/interfaces/ecpg/test/expected/preproc-variable.c +++ b/src/interfaces/ecpg/test/expected/preproc-variable.c @@ -272,5 +272,5 @@ if (sqlca.sqlcode < 0) exit (1);} #line 98 "variable.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/preproc-whenever.c b/src/interfaces/ecpg/test/expected/preproc-whenever.c index 922ef76b92..332ef85b10 100644 --- a/src/interfaces/ecpg/test/expected/preproc-whenever.c +++ b/src/interfaces/ecpg/test/expected/preproc-whenever.c @@ -24,7 +24,7 @@ #line 5 "whenever.pgc" -static void print(char *msg) +static void print(const char *msg) { fprintf(stderr, "Error in statement '%s':\n", msg); sqlprint(); diff --git a/src/interfaces/ecpg/test/expected/preproc-whenever_do_continue.c b/src/interfaces/ecpg/test/expected/preproc-whenever_do_continue.c new file mode 100644 index 0000000000..a367af00f3 --- /dev/null +++ b/src/interfaces/ecpg/test/expected/preproc-whenever_do_continue.c @@ -0,0 +1,161 @@ +/* Processed by ecpg (regression mode) */ +/* These include files are added by the preprocessor */ +#include +#include +#include +/* End of automatic include section */ +#define ECPGdebug(X,Y) ECPGdebug((X)+100,(Y)) + +#line 1 "whenever_do_continue.pgc" +#include + + +#line 1 "regression.h" + + + + + + +#line 3 "whenever_do_continue.pgc" + + +/* exec sql whenever sqlerror sqlprint ; */ +#line 5 "whenever_do_continue.pgc" + + +int main(void) +{ + /* exec sql begin declare section */ + + + + + + + + + +#line 15 "whenever_do_continue.pgc" + struct { +#line 12 "whenever_do_continue.pgc" + char ename [ 12 ] ; + +#line 13 "whenever_do_continue.pgc" + float sal ; + +#line 14 "whenever_do_continue.pgc" + float comm ; + } emp ; + +#line 17 "whenever_do_continue.pgc" + char msg [ 128 ] ; +/* exec sql end declare section */ +#line 18 "whenever_do_continue.pgc" + + + ECPGdebug(1, stderr); + + strcpy(msg, "connect"); + { ECPGconnect(__LINE__, 0, "ecpg1_regression" , NULL, NULL , NULL, 0); +#line 23 "whenever_do_continue.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 23 "whenever_do_continue.pgc" + + + strcpy(msg, "create"); + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "create table emp ( ename varchar , sal double precision , comm double precision )", ECPGt_EOIT, ECPGt_EORT); +#line 26 "whenever_do_continue.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 26 "whenever_do_continue.pgc" + + + strcpy(msg, "insert"); + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "insert into emp values ( 'Ram' , 111100 , 21 )", ECPGt_EOIT, ECPGt_EORT); +#line 29 "whenever_do_continue.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 29 "whenever_do_continue.pgc" + + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "insert into emp values ( 'aryan' , 11110 , null )", ECPGt_EOIT, ECPGt_EORT); +#line 30 "whenever_do_continue.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 30 "whenever_do_continue.pgc" + + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "insert into emp values ( 'josh' , 10000 , 10 )", ECPGt_EOIT, ECPGt_EORT); +#line 31 "whenever_do_continue.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 31 "whenever_do_continue.pgc" + + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "insert into emp values ( 'tom' , 20000 , null )", ECPGt_EOIT, ECPGt_EORT); +#line 32 "whenever_do_continue.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 32 "whenever_do_continue.pgc" + + + /* declare c cursor for select ename , sal , comm from emp order by ename collate \"C\" asc */ +#line 34 "whenever_do_continue.pgc" + + + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "declare c cursor for select ename , sal , comm from emp order by ename collate \"C\" asc", ECPGt_EOIT, ECPGt_EORT); +#line 36 "whenever_do_continue.pgc" + +if (sqlca.sqlcode < 0) sqlprint();} +#line 36 "whenever_do_continue.pgc" + + + /* The 'BREAK' condition to exit the loop. */ + /* exec sql whenever not found break ; */ +#line 39 "whenever_do_continue.pgc" + + + /* The DO CONTINUE makes the loop start at the next iteration when an error occurs.*/ + /* exec sql whenever sqlerror continue ; */ +#line 42 "whenever_do_continue.pgc" + + + while (1) + { + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "fetch c", ECPGt_EOIT, + ECPGt_char,&(emp.ename),(long)12,(long)1,(12)*sizeof(char), + ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, + ECPGt_float,&(emp.sal),(long)1,(long)1,sizeof(float), + ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, + ECPGt_float,&(emp.comm),(long)1,(long)1,sizeof(float), + ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT); +#line 46 "whenever_do_continue.pgc" + +if (sqlca.sqlcode == ECPG_NOT_FOUND) break; +#line 46 "whenever_do_continue.pgc" + +if (sqlca.sqlcode < 0) continue;} +#line 46 "whenever_do_continue.pgc" + + /* The employees with non-NULL commissions will be displayed. */ + printf("%s %7.2f %9.2f\n", emp.ename, emp.sal, emp.comm); + } + + /* + * This 'CONTINUE' shuts off the 'DO CONTINUE' and allow the program to + * proceed if any further errors do occur. + */ + /* exec sql whenever sqlerror continue ; */ +#line 55 "whenever_do_continue.pgc" + + + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "close c", ECPGt_EOIT, ECPGt_EORT);} +#line 57 "whenever_do_continue.pgc" + + + strcpy(msg, "drop"); + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "drop table emp", ECPGt_EOIT, ECPGt_EORT);} +#line 60 "whenever_do_continue.pgc" + + + exit(0); +} diff --git a/src/interfaces/ecpg/test/expected/preproc-whenever_do_continue.stderr b/src/interfaces/ecpg/test/expected/preproc-whenever_do_continue.stderr new file mode 100644 index 0000000000..46bc4a5600 --- /dev/null +++ b/src/interfaces/ecpg/test/expected/preproc-whenever_do_continue.stderr @@ -0,0 +1,112 @@ +[NO_PID]: ECPGdebug: set to 1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ECPGconnect: opening database ecpg1_regression on port +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 26: query: create table emp ( ename varchar , sal double precision , comm double precision ); with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 26: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 26: OK: CREATE TABLE +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 29: query: insert into emp values ( 'Ram' , 111100 , 21 ); with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 29: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 29: OK: INSERT 0 1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 30: query: insert into emp values ( 'aryan' , 11110 , null ); with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 30: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 30: OK: INSERT 0 1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 31: query: insert into emp values ( 'josh' , 10000 , 10 ); with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 31: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 31: OK: INSERT 0 1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 32: query: insert into emp values ( 'tom' , 20000 , null ); with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 32: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 32: OK: INSERT 0 1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 36: query: declare c cursor for select ename , sal , comm from emp order by ename collate "C" asc; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 36: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 36: OK: DECLARE CURSOR +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 46: query: fetch c; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 46: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 46: correctly got 1 tuples with 3 fields +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 46: RESULT: Ram offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 46: RESULT: 111100 offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 46: RESULT: 21 offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 46: query: fetch c; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 46: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 46: correctly got 1 tuples with 3 fields +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 46: RESULT: aryan offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 46: RESULT: 11110 offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 46: RESULT: offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: raising sqlcode -213 on line 46: null value without indicator on line 46 +[NO_PID]: sqlca: code: -213, state: 22002 +[NO_PID]: ecpg_execute on line 46: query: fetch c; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 46: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 46: correctly got 1 tuples with 3 fields +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 46: RESULT: josh offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 46: RESULT: 10000 offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 46: RESULT: 10 offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 46: query: fetch c; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 46: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 46: correctly got 1 tuples with 3 fields +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 46: RESULT: tom offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 46: RESULT: 20000 offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 46: RESULT: offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: raising sqlcode -213 on line 46: null value without indicator on line 46 +[NO_PID]: sqlca: code: -213, state: 22002 +[NO_PID]: ecpg_execute on line 46: query: fetch c; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 46: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 46: correctly got 0 tuples with 3 fields +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: raising sqlcode 100 on line 46: no data found on line 46 +[NO_PID]: sqlca: code: 100, state: 02000 +[NO_PID]: ecpg_execute on line 57: query: close c; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 57: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 57: OK: CLOSE CURSOR +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 60: query: drop table emp; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 60: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 60: OK: DROP TABLE +[NO_PID]: sqlca: code: 0, state: 00000 diff --git a/src/interfaces/ecpg/test/expected/preproc-whenever_do_continue.stdout b/src/interfaces/ecpg/test/expected/preproc-whenever_do_continue.stdout new file mode 100644 index 0000000000..d6ac5a0280 --- /dev/null +++ b/src/interfaces/ecpg/test/expected/preproc-whenever_do_continue.stdout @@ -0,0 +1,2 @@ +Ram 111100.00 21.00 +josh 10000.00 10.00 diff --git a/src/interfaces/ecpg/test/expected/sql-array.c b/src/interfaces/ecpg/test/expected/sql-array.c index 781c426771..f5eb73d185 100644 --- a/src/interfaces/ecpg/test/expected/sql-array.c +++ b/src/interfaces/ecpg/test/expected/sql-array.c @@ -351,5 +351,5 @@ if (sqlca.sqlcode < 0) sqlprint();} free(t); - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/sql-describe.c b/src/interfaces/ecpg/test/expected/sql-describe.c index 155e206f29..b79a6f4016 100644 --- a/src/interfaces/ecpg/test/expected/sql-describe.c +++ b/src/interfaces/ecpg/test/expected/sql-describe.c @@ -461,5 +461,5 @@ if (sqlca.sqlcode < 0) exit (1);} #line 196 "describe.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/sql-execute.c b/src/interfaces/ecpg/test/expected/sql-execute.c index aee3c1bcb7..cac91dc599 100644 --- a/src/interfaces/ecpg/test/expected/sql-execute.c +++ b/src/interfaces/ecpg/test/expected/sql-execute.c @@ -327,5 +327,5 @@ if (sqlca.sqlcode < 0) sqlprint();} #line 110 "execute.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/sql-oldexec.c b/src/interfaces/ecpg/test/expected/sql-oldexec.c index 5b74dda9b5..d6a661e3fb 100644 --- a/src/interfaces/ecpg/test/expected/sql-oldexec.c +++ b/src/interfaces/ecpg/test/expected/sql-oldexec.c @@ -247,5 +247,5 @@ if (sqlca.sqlcode < 0) sqlprint();} #line 87 "oldexec.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/sql-sqlda.c b/src/interfaces/ecpg/test/expected/sql-sqlda.c index 15c81c6b12..398dcedccf 100644 --- a/src/interfaces/ecpg/test/expected/sql-sqlda.c +++ b/src/interfaces/ecpg/test/expected/sql-sqlda.c @@ -10,6 +10,7 @@ #include #include #include +#include "ecpg_config.h" #line 1 "regression.h" @@ -19,8 +20,7 @@ -#line 5 "sqlda.pgc" - +#line 6 "sqlda.pgc" #line 1 "sqlda.h" @@ -50,6 +50,8 @@ typedef struct sqlda_struct sqlda_t; #ifndef PGTYPES_NUMERIC #define PGTYPES_NUMERIC +#include + #define NUMERIC_POS 0x0000 #define NUMERIC_NEG 0x4000 #define NUMERIC_NAN 0xC000 @@ -149,8 +151,16 @@ dump_sqlda(sqlda_t *sqlda) case ECPGt_int: printf("name sqlda descriptor: '%s' value %d\n", sqlda->sqlvar[i].sqlname.data, *(int *)sqlda->sqlvar[i].sqldata); break; + case ECPGt_long: + printf("name sqlda descriptor: '%s' value %ld\n", sqlda->sqlvar[i].sqlname.data, *(long int *)sqlda->sqlvar[i].sqldata); + break; +#ifdef HAVE_LONG_LONG_INT + case ECPGt_long_long: + printf("name sqlda descriptor: '%s' value %lld\n", sqlda->sqlvar[i].sqlname.data, *(long long int *)sqlda->sqlvar[i].sqldata); + break; +#endif case ECPGt_double: - printf("name sqlda descriptor: '%s' value %lf\n", sqlda->sqlvar[i].sqlname.data, *(double *)sqlda->sqlvar[i].sqldata); + printf("name sqlda descriptor: '%s' value %f\n", sqlda->sqlvar[i].sqlname.data, *(double *)sqlda->sqlvar[i].sqldata); break; case ECPGt_numeric: { @@ -158,7 +168,7 @@ dump_sqlda(sqlda_t *sqlda) val = PGTYPESnumeric_to_asc((numeric*)sqlda->sqlvar[i].sqldata, -1); printf("name sqlda descriptor: '%s' value NUMERIC '%s'\n", sqlda->sqlvar[i].sqlname.data, val); - free(val); + PGTYPESchar_free(val); break; } } @@ -174,19 +184,19 @@ main (void) -#line 59 "sqlda.pgc" +#line 67 "sqlda.pgc" char * stmt1 = "SELECT * FROM t1" ; -#line 60 "sqlda.pgc" +#line 68 "sqlda.pgc" char * stmt2 = "SELECT * FROM t1 WHERE id = ?" ; -#line 61 "sqlda.pgc" +#line 69 "sqlda.pgc" int rec ; -#line 62 "sqlda.pgc" +#line 70 "sqlda.pgc" int id ; /* exec sql end declare section */ -#line 63 "sqlda.pgc" +#line 71 "sqlda.pgc" char msg[128]; @@ -195,42 +205,42 @@ main (void) strcpy(msg, "connect"); { ECPGconnect(__LINE__, 0, "ecpg1_regression" , NULL, NULL , "regress1", 0); -#line 70 "sqlda.pgc" +#line 78 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 70 "sqlda.pgc" +#line 78 "sqlda.pgc" strcpy(msg, "set"); { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "set datestyle to iso", ECPGt_EOIT, ECPGt_EORT); -#line 73 "sqlda.pgc" +#line 81 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 73 "sqlda.pgc" +#line 81 "sqlda.pgc" strcpy(msg, "create"); - { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "create table t1 ( id integer , t text , d1 numeric , d2 float8 , c char ( 10 ) )", ECPGt_EOIT, ECPGt_EORT); -#line 81 "sqlda.pgc" + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "create table t1 ( id integer , t text , d1 numeric , d2 float8 , c char ( 10 ) , big bigint )", ECPGt_EOIT, ECPGt_EORT); +#line 91 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 81 "sqlda.pgc" +#line 91 "sqlda.pgc" strcpy(msg, "insert"); - { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "insert into t1 values ( 1 , 'a' , 1.0 , 1 , 'a' ) , ( 2 , null , null , null , null ) , ( 4 , 'd' , 4.0 , 4 , 'd' )", ECPGt_EOIT, ECPGt_EORT); -#line 87 "sqlda.pgc" + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "insert into t1 values ( 1 , 'a' , 1.0 , 1 , 'a' , 1111111111111111111 ) , ( 2 , null , null , null , null , null ) , ( 4 , 'd' , 4.0 , 4 , 'd' , 4444444444444444444 )", ECPGt_EOIT, ECPGt_EORT); +#line 97 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 87 "sqlda.pgc" +#line 97 "sqlda.pgc" strcpy(msg, "commit"); { ECPGtrans(__LINE__, NULL, "commit"); -#line 90 "sqlda.pgc" +#line 100 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 90 "sqlda.pgc" +#line 100 "sqlda.pgc" /* SQLDA test for getting all records from a table */ @@ -239,29 +249,29 @@ if (sqlca.sqlcode < 0) exit (1);} strcpy(msg, "prepare"); { ECPGprepare(__LINE__, NULL, 0, "st_id1", stmt1); -#line 97 "sqlda.pgc" +#line 107 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 97 "sqlda.pgc" +#line 107 "sqlda.pgc" strcpy(msg, "declare"); /* declare mycur1 cursor for $1 */ -#line 100 "sqlda.pgc" +#line 110 "sqlda.pgc" strcpy(msg, "open"); { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "declare mycur1 cursor for $1", ECPGt_char_variable,(ECPGprepared_statement(NULL, "st_id1", __LINE__)),(long)1,(long)1,(1)*sizeof(char), ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT); -#line 103 "sqlda.pgc" +#line 113 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 103 "sqlda.pgc" +#line 113 "sqlda.pgc" /* exec sql whenever not found break ; */ -#line 105 "sqlda.pgc" +#line 115 "sqlda.pgc" rec = 0; @@ -271,13 +281,13 @@ if (sqlca.sqlcode < 0) exit (1);} { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "fetch 1 from mycur1", ECPGt_EOIT, ECPGt_sqlda, &outp_sqlda, 0L, 0L, 0L, ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT); -#line 111 "sqlda.pgc" +#line 121 "sqlda.pgc" if (sqlca.sqlcode == ECPG_NOT_FOUND) break; -#line 111 "sqlda.pgc" +#line 121 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 111 "sqlda.pgc" +#line 121 "sqlda.pgc" printf("FETCH RECORD %d\n", ++rec); @@ -285,23 +295,23 @@ if (sqlca.sqlcode < 0) exit (1);} } /* exec sql whenever not found continue ; */ -#line 117 "sqlda.pgc" +#line 127 "sqlda.pgc" strcpy(msg, "close"); { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "close mycur1", ECPGt_EOIT, ECPGt_EORT); -#line 120 "sqlda.pgc" +#line 130 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 120 "sqlda.pgc" +#line 130 "sqlda.pgc" strcpy(msg, "deallocate"); { ECPGdeallocate(__LINE__, 0, NULL, "st_id1"); -#line 123 "sqlda.pgc" +#line 133 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 123 "sqlda.pgc" +#line 133 "sqlda.pgc" free(outp_sqlda); @@ -312,35 +322,35 @@ if (sqlca.sqlcode < 0) exit (1);} strcpy(msg, "prepare"); { ECPGprepare(__LINE__, NULL, 0, "st_id2", stmt1); -#line 132 "sqlda.pgc" +#line 142 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 132 "sqlda.pgc" +#line 142 "sqlda.pgc" strcpy(msg, "declare"); /* declare mycur2 cursor for $1 */ -#line 135 "sqlda.pgc" +#line 145 "sqlda.pgc" strcpy(msg, "open"); { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "declare mycur2 cursor for $1", ECPGt_char_variable,(ECPGprepared_statement(NULL, "st_id2", __LINE__)),(long)1,(long)1,(1)*sizeof(char), ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT); -#line 138 "sqlda.pgc" +#line 148 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 138 "sqlda.pgc" +#line 148 "sqlda.pgc" strcpy(msg, "fetch"); { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "fetch all from mycur2", ECPGt_EOIT, ECPGt_sqlda, &outp_sqlda, 0L, 0L, 0L, ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT); -#line 141 "sqlda.pgc" +#line 151 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 141 "sqlda.pgc" +#line 151 "sqlda.pgc" outp_sqlda1 = outp_sqlda; @@ -358,18 +368,18 @@ if (sqlca.sqlcode < 0) exit (1);} strcpy(msg, "close"); { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "close mycur2", ECPGt_EOIT, ECPGt_EORT); -#line 157 "sqlda.pgc" +#line 167 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 157 "sqlda.pgc" +#line 167 "sqlda.pgc" strcpy(msg, "deallocate"); { ECPGdeallocate(__LINE__, 0, NULL, "st_id2"); -#line 160 "sqlda.pgc" +#line 170 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 160 "sqlda.pgc" +#line 170 "sqlda.pgc" /* SQLDA test for getting one record using an input descriptor */ @@ -393,10 +403,10 @@ if (sqlca.sqlcode < 0) exit (1);} strcpy(msg, "prepare"); { ECPGprepare(__LINE__, NULL, 0, "st_id3", stmt2); -#line 182 "sqlda.pgc" +#line 192 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 182 "sqlda.pgc" +#line 192 "sqlda.pgc" strcpy(msg, "execute"); @@ -405,20 +415,20 @@ if (sqlca.sqlcode < 0) exit (1);} ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_sqlda, &outp_sqlda, 0L, 0L, 0L, ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT); -#line 185 "sqlda.pgc" +#line 195 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 185 "sqlda.pgc" +#line 195 "sqlda.pgc" dump_sqlda(outp_sqlda); strcpy(msg, "deallocate"); { ECPGdeallocate(__LINE__, 0, NULL, "st_id3"); -#line 190 "sqlda.pgc" +#line 200 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 190 "sqlda.pgc" +#line 200 "sqlda.pgc" free(inp_sqlda); @@ -429,10 +439,10 @@ if (sqlca.sqlcode < 0) exit (1);} */ { ECPGconnect(__LINE__, 0, "ecpg1_regression" , NULL, NULL , "con2", 0); -#line 199 "sqlda.pgc" +#line 209 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 199 "sqlda.pgc" +#line 209 "sqlda.pgc" /* @@ -454,10 +464,10 @@ if (sqlca.sqlcode < 0) exit (1);} strcpy(msg, "prepare"); { ECPGprepare(__LINE__, "con2", 0, "st_id4", stmt2); -#line 219 "sqlda.pgc" +#line 229 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 219 "sqlda.pgc" +#line 229 "sqlda.pgc" strcpy(msg, "execute"); @@ -466,28 +476,28 @@ if (sqlca.sqlcode < 0) exit (1);} ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_sqlda, &outp_sqlda, 0L, 0L, 0L, ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT); -#line 222 "sqlda.pgc" +#line 232 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 222 "sqlda.pgc" +#line 232 "sqlda.pgc" dump_sqlda(outp_sqlda); strcpy(msg, "commit"); { ECPGtrans(__LINE__, "con2", "commit"); -#line 227 "sqlda.pgc" +#line 237 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 227 "sqlda.pgc" +#line 237 "sqlda.pgc" strcpy(msg, "deallocate"); { ECPGdeallocate(__LINE__, 0, NULL, "st_id4"); -#line 230 "sqlda.pgc" +#line 240 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 230 "sqlda.pgc" +#line 240 "sqlda.pgc" free(inp_sqlda); @@ -495,37 +505,37 @@ if (sqlca.sqlcode < 0) exit (1);} strcpy(msg, "disconnect"); { ECPGdisconnect(__LINE__, "con2"); -#line 236 "sqlda.pgc" +#line 246 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 236 "sqlda.pgc" +#line 246 "sqlda.pgc" /* End test */ strcpy(msg, "drop"); { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "drop table t1", ECPGt_EOIT, ECPGt_EORT); -#line 241 "sqlda.pgc" +#line 251 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 241 "sqlda.pgc" +#line 251 "sqlda.pgc" strcpy(msg, "commit"); { ECPGtrans(__LINE__, NULL, "commit"); -#line 244 "sqlda.pgc" +#line 254 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 244 "sqlda.pgc" +#line 254 "sqlda.pgc" strcpy(msg, "disconnect"); { ECPGdisconnect(__LINE__, "CURRENT"); -#line 247 "sqlda.pgc" +#line 257 "sqlda.pgc" if (sqlca.sqlcode < 0) exit (1);} -#line 247 "sqlda.pgc" +#line 257 "sqlda.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/sql-sqlda.stderr b/src/interfaces/ecpg/test/expected/sql-sqlda.stderr index fdddf9ea31..8c70100bde 100644 --- a/src/interfaces/ecpg/test/expected/sql-sqlda.stderr +++ b/src/interfaces/ecpg/test/expected/sql-sqlda.stderr @@ -2,307 +2,335 @@ [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ECPGconnect: opening database ecpg1_regression on port [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 73: query: set datestyle to iso; with 0 parameter(s) on connection regress1 +[NO_PID]: ecpg_execute on line 81: query: set datestyle to iso; with 0 parameter(s) on connection regress1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 73: using PQexec +[NO_PID]: ecpg_execute on line 81: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 73: OK: SET +[NO_PID]: ecpg_process_output on line 81: OK: SET [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 76: query: create table t1 ( id integer , t text , d1 numeric , d2 float8 , c char ( 10 ) ); with 0 parameter(s) on connection regress1 +[NO_PID]: ecpg_execute on line 84: query: create table t1 ( id integer , t text , d1 numeric , d2 float8 , c char ( 10 ) , big bigint ); with 0 parameter(s) on connection regress1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 76: using PQexec +[NO_PID]: ecpg_execute on line 84: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 76: OK: CREATE TABLE +[NO_PID]: ecpg_process_output on line 84: OK: CREATE TABLE [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 84: query: insert into t1 values ( 1 , 'a' , 1.0 , 1 , 'a' ) , ( 2 , null , null , null , null ) , ( 4 , 'd' , 4.0 , 4 , 'd' ); with 0 parameter(s) on connection regress1 +[NO_PID]: ecpg_execute on line 94: query: insert into t1 values ( 1 , 'a' , 1.0 , 1 , 'a' , 1111111111111111111 ) , ( 2 , null , null , null , null , null ) , ( 4 , 'd' , 4.0 , 4 , 'd' , 4444444444444444444 ); with 0 parameter(s) on connection regress1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 84: using PQexec +[NO_PID]: ecpg_execute on line 94: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 94: OK: INSERT 0 3 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ECPGtrans on line 100: action "commit"; connection "regress1" [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 84: OK: INSERT 0 3 +[NO_PID]: prepare_common on line 107: name st_id1; query: "SELECT * FROM t1" [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ECPGtrans on line 90: action "commit"; connection "regress1" +[NO_PID]: ecpg_execute on line 113: query: declare mycur1 cursor for SELECT * FROM t1; with 0 parameter(s) on connection regress1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: prepare_common on line 97: name st_id1; query: "SELECT * FROM t1" +[NO_PID]: ecpg_execute on line 113: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 103: query: declare mycur1 cursor for SELECT * FROM t1; with 0 parameter(s) on connection regress1 +[NO_PID]: ecpg_process_output on line 113: OK: DECLARE CURSOR [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 103: using PQexec +[NO_PID]: ecpg_execute on line 121: query: fetch 1 from mycur1; with 0 parameter(s) on connection regress1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 103: OK: DECLARE CURSOR +[NO_PID]: ecpg_execute on line 121: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 111: query: fetch 1 from mycur1; with 0 parameter(s) on connection regress1 +[NO_PID]: ecpg_process_output on line 121: correctly got 1 tuples with 6 fields [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 111: using PQexec +[NO_PID]: ecpg_build_native_sqlda on line 121 sqld = 6 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 111: correctly got 1 tuples with 5 fields +[NO_PID]: ecpg_process_output on line 121: new sqlda was built [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_build_native_sqlda on line 111 sqld = 5 +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 0 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 111: new sqlda was built +[NO_PID]: ecpg_get_data on line 121: RESULT: 1 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 0 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 1 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 111: RESULT: 1 offset: -1; array: no +[NO_PID]: ecpg_get_data on line 121: RESULT: a offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 1 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 2 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 111: RESULT: a offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 3 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 2 IS NOT NULL +[NO_PID]: ecpg_get_data on line 121: RESULT: 1 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 3 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 4 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 111: RESULT: 1 offset: -1; array: no +[NO_PID]: ecpg_get_data on line 121: RESULT: a offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 4 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 5 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 111: RESULT: a offset: -1; array: no +[NO_PID]: ecpg_get_data on line 121: RESULT: 1111111111111111111 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 111: putting result (1 tuple 5 fields) into sqlda descriptor +[NO_PID]: ecpg_process_output on line 121: putting result (1 tuple 6 fields) into sqlda descriptor [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 111: query: fetch 1 from mycur1; with 0 parameter(s) on connection regress1 +[NO_PID]: ecpg_execute on line 121: query: fetch 1 from mycur1; with 0 parameter(s) on connection regress1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 111: using PQexec +[NO_PID]: ecpg_execute on line 121: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 111: correctly got 1 tuples with 5 fields +[NO_PID]: ecpg_process_output on line 121: correctly got 1 tuples with 6 fields [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_build_native_sqlda on line 111 sqld = 5 +[NO_PID]: ecpg_build_native_sqlda on line 121 sqld = 6 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 111: new sqlda was built +[NO_PID]: ecpg_process_output on line 121: new sqlda was built [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 0 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 0 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 111: RESULT: 2 offset: -1; array: no +[NO_PID]: ecpg_get_data on line 121: RESULT: 2 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 1 IS NULL +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 1 IS NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 2 IS NULL +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 2 IS NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 3 IS NULL +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 3 IS NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 4 IS NULL +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 4 IS NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 111: putting result (1 tuple 5 fields) into sqlda descriptor +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 5 IS NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 111: query: fetch 1 from mycur1; with 0 parameter(s) on connection regress1 +[NO_PID]: ecpg_process_output on line 121: putting result (1 tuple 6 fields) into sqlda descriptor [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 111: using PQexec +[NO_PID]: ecpg_execute on line 121: query: fetch 1 from mycur1; with 0 parameter(s) on connection regress1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 111: correctly got 1 tuples with 5 fields +[NO_PID]: ecpg_execute on line 121: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_build_native_sqlda on line 111 sqld = 5 +[NO_PID]: ecpg_process_output on line 121: correctly got 1 tuples with 6 fields [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 111: new sqlda was built +[NO_PID]: ecpg_build_native_sqlda on line 121 sqld = 6 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 0 IS NOT NULL +[NO_PID]: ecpg_process_output on line 121: new sqlda was built [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 111: RESULT: 4 offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 0 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 1 IS NOT NULL +[NO_PID]: ecpg_get_data on line 121: RESULT: 4 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 111: RESULT: d offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 1 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 2 IS NOT NULL +[NO_PID]: ecpg_get_data on line 121: RESULT: d offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 3 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 2 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 111: RESULT: 4 offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 3 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 111 row 0 col 4 IS NOT NULL +[NO_PID]: ecpg_get_data on line 121: RESULT: 4 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 111: RESULT: d offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 4 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 111: putting result (1 tuple 5 fields) into sqlda descriptor +[NO_PID]: ecpg_get_data on line 121: RESULT: d offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 111: query: fetch 1 from mycur1; with 0 parameter(s) on connection regress1 +[NO_PID]: ecpg_set_native_sqlda on line 121 row 0 col 5 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 111: using PQexec +[NO_PID]: ecpg_get_data on line 121: RESULT: 4444444444444444444 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 111: correctly got 0 tuples with 5 fields +[NO_PID]: ecpg_process_output on line 121: putting result (1 tuple 6 fields) into sqlda descriptor [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: raising sqlcode 100 on line 111: no data found on line 111 +[NO_PID]: ecpg_execute on line 121: query: fetch 1 from mycur1; with 0 parameter(s) on connection regress1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 121: using PQexec +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 121: correctly got 0 tuples with 6 fields +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: raising sqlcode 100 on line 121: no data found on line 121 [NO_PID]: sqlca: code: 100, state: 02000 -[NO_PID]: ecpg_execute on line 120: query: close mycur1; with 0 parameter(s) on connection regress1 +[NO_PID]: ecpg_execute on line 130: query: close mycur1; with 0 parameter(s) on connection regress1 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 130: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 120: using PQexec +[NO_PID]: ecpg_process_output on line 130: OK: CLOSE CURSOR [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 120: OK: CLOSE CURSOR +[NO_PID]: deallocate_one on line 133: name st_id1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: deallocate_one on line 123: name st_id1 +[NO_PID]: prepare_common on line 142: name st_id2; query: "SELECT * FROM t1" [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: prepare_common on line 132: name st_id2; query: "SELECT * FROM t1" +[NO_PID]: ecpg_execute on line 148: query: declare mycur2 cursor for SELECT * FROM t1; with 0 parameter(s) on connection regress1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 138: query: declare mycur2 cursor for SELECT * FROM t1; with 0 parameter(s) on connection regress1 +[NO_PID]: ecpg_execute on line 148: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 138: using PQexec +[NO_PID]: ecpg_process_output on line 148: OK: DECLARE CURSOR [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 138: OK: DECLARE CURSOR +[NO_PID]: ecpg_execute on line 151: query: fetch all from mycur2; with 0 parameter(s) on connection regress1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 141: query: fetch all from mycur2; with 0 parameter(s) on connection regress1 +[NO_PID]: ecpg_execute on line 151: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 141: using PQexec +[NO_PID]: ecpg_process_output on line 151: correctly got 3 tuples with 6 fields [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 141: correctly got 3 tuples with 5 fields +[NO_PID]: ecpg_build_native_sqlda on line 151 sqld = 6 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_build_native_sqlda on line 141 sqld = 5 +[NO_PID]: ecpg_process_output on line 151: new sqlda was built [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 141: new sqlda was built +[NO_PID]: ecpg_set_native_sqlda on line 151 row 2 col 0 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 2 col 0 IS NOT NULL +[NO_PID]: ecpg_get_data on line 151: RESULT: 4 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 141: RESULT: 4 offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 151 row 2 col 1 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 2 col 1 IS NOT NULL +[NO_PID]: ecpg_get_data on line 151: RESULT: d offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 141: RESULT: d offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 151 row 2 col 2 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 2 col 2 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 151 row 2 col 3 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 2 col 3 IS NOT NULL +[NO_PID]: ecpg_get_data on line 151: RESULT: 4 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 141: RESULT: 4 offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 151 row 2 col 4 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 2 col 4 IS NOT NULL +[NO_PID]: ecpg_get_data on line 151: RESULT: d offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 141: RESULT: d offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 151 row 2 col 5 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 141: putting result (1 tuple 5 fields) into sqlda descriptor +[NO_PID]: ecpg_get_data on line 151: RESULT: 4444444444444444444 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_build_native_sqlda on line 141 sqld = 5 +[NO_PID]: ecpg_process_output on line 151: putting result (1 tuple 6 fields) into sqlda descriptor [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 141: new sqlda was built +[NO_PID]: ecpg_build_native_sqlda on line 151 sqld = 6 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 1 col 0 IS NOT NULL +[NO_PID]: ecpg_process_output on line 151: new sqlda was built [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 141: RESULT: 2 offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 151 row 1 col 0 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 1 col 1 IS NULL +[NO_PID]: ecpg_get_data on line 151: RESULT: 2 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 1 col 2 IS NULL +[NO_PID]: ecpg_set_native_sqlda on line 151 row 1 col 1 IS NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 1 col 3 IS NULL +[NO_PID]: ecpg_set_native_sqlda on line 151 row 1 col 2 IS NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 1 col 4 IS NULL +[NO_PID]: ecpg_set_native_sqlda on line 151 row 1 col 3 IS NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 141: putting result (1 tuple 5 fields) into sqlda descriptor +[NO_PID]: ecpg_set_native_sqlda on line 151 row 1 col 4 IS NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_build_native_sqlda on line 141 sqld = 5 +[NO_PID]: ecpg_set_native_sqlda on line 151 row 1 col 5 IS NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 141: new sqlda was built +[NO_PID]: ecpg_process_output on line 151: putting result (1 tuple 6 fields) into sqlda descriptor [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 0 col 0 IS NOT NULL +[NO_PID]: ecpg_build_native_sqlda on line 151 sqld = 6 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 141: RESULT: 1 offset: -1; array: no +[NO_PID]: ecpg_process_output on line 151: new sqlda was built [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 0 col 1 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 151 row 0 col 0 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 141: RESULT: a offset: -1; array: no +[NO_PID]: ecpg_get_data on line 151: RESULT: 1 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 0 col 2 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 151 row 0 col 1 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 0 col 3 IS NOT NULL +[NO_PID]: ecpg_get_data on line 151: RESULT: a offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 141: RESULT: 1 offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 151 row 0 col 2 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 141 row 0 col 4 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 151 row 0 col 3 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 141: RESULT: a offset: -1; array: no +[NO_PID]: ecpg_get_data on line 151: RESULT: 1 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 141: putting result (1 tuple 5 fields) into sqlda descriptor +[NO_PID]: ecpg_set_native_sqlda on line 151 row 0 col 4 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 157: query: close mycur2; with 0 parameter(s) on connection regress1 +[NO_PID]: ecpg_get_data on line 151: RESULT: a offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 157: using PQexec +[NO_PID]: ecpg_set_native_sqlda on line 151 row 0 col 5 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 157: OK: CLOSE CURSOR +[NO_PID]: ecpg_get_data on line 151: RESULT: 1111111111111111111 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: deallocate_one on line 160: name st_id2 +[NO_PID]: ecpg_process_output on line 151: putting result (1 tuple 6 fields) into sqlda descriptor [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: prepare_common on line 182: name st_id3; query: "SELECT * FROM t1 WHERE id = $1" +[NO_PID]: ecpg_execute on line 167: query: close mycur2; with 0 parameter(s) on connection regress1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 185: query: SELECT * FROM t1 WHERE id = $1; with 1 parameter(s) on connection regress1 +[NO_PID]: ecpg_execute on line 167: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 185: using PQexecPrepared for "SELECT * FROM t1 WHERE id = $1" +[NO_PID]: ecpg_process_output on line 167: OK: CLOSE CURSOR [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_free_params on line 185: parameter 1 = 4 +[NO_PID]: deallocate_one on line 170: name st_id2 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 185: correctly got 1 tuples with 5 fields +[NO_PID]: prepare_common on line 192: name st_id3; query: "SELECT * FROM t1 WHERE id = $1" [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_build_native_sqlda on line 185 sqld = 5 +[NO_PID]: ecpg_execute on line 195: query: SELECT * FROM t1 WHERE id = $1; with 1 parameter(s) on connection regress1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 185: new sqlda was built +[NO_PID]: ecpg_execute on line 195: using PQexecPrepared for "SELECT * FROM t1 WHERE id = $1" [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 185 row 0 col 0 IS NOT NULL +[NO_PID]: ecpg_free_params on line 195: parameter 1 = 4 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 185: RESULT: 4 offset: -1; array: no +[NO_PID]: ecpg_process_output on line 195: correctly got 1 tuples with 6 fields [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 185 row 0 col 1 IS NOT NULL +[NO_PID]: ecpg_build_native_sqlda on line 195 sqld = 6 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 185: RESULT: d offset: -1; array: no +[NO_PID]: ecpg_process_output on line 195: new sqlda was built [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 185 row 0 col 2 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 195 row 0 col 0 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 185 row 0 col 3 IS NOT NULL +[NO_PID]: ecpg_get_data on line 195: RESULT: 4 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 185: RESULT: 4 offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 195 row 0 col 1 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 185 row 0 col 4 IS NOT NULL +[NO_PID]: ecpg_get_data on line 195: RESULT: d offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 185: RESULT: d offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 195 row 0 col 2 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 185: putting result (1 tuple 5 fields) into sqlda descriptor +[NO_PID]: ecpg_set_native_sqlda on line 195 row 0 col 3 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: deallocate_one on line 190: name st_id3 +[NO_PID]: ecpg_get_data on line 195: RESULT: 4 offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_set_native_sqlda on line 195 row 0 col 4 IS NOT NULL +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 195: RESULT: d offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_set_native_sqlda on line 195 row 0 col 5 IS NOT NULL +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_get_data on line 195: RESULT: 4444444444444444444 offset: -1; array: no +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_process_output on line 195: putting result (1 tuple 6 fields) into sqlda descriptor +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: deallocate_one on line 200: name st_id3 [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ECPGconnect: opening database ecpg1_regression on port [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: prepare_common on line 219: name st_id4; query: "SELECT * FROM t1 WHERE id = $1" +[NO_PID]: prepare_common on line 229: name st_id4; query: "SELECT * FROM t1 WHERE id = $1" +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 232: query: SELECT * FROM t1 WHERE id = $1; with 1 parameter(s) on connection con2 +[NO_PID]: sqlca: code: 0, state: 00000 +[NO_PID]: ecpg_execute on line 232: using PQexecPrepared for "SELECT * FROM t1 WHERE id = $1" [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 222: query: SELECT * FROM t1 WHERE id = $1; with 1 parameter(s) on connection con2 +[NO_PID]: ecpg_free_params on line 232: parameter 1 = 4 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 222: using PQexecPrepared for "SELECT * FROM t1 WHERE id = $1" +[NO_PID]: ecpg_process_output on line 232: correctly got 1 tuples with 6 fields [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_free_params on line 222: parameter 1 = 4 +[NO_PID]: ecpg_build_native_sqlda on line 232 sqld = 6 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 222: correctly got 1 tuples with 5 fields +[NO_PID]: ecpg_process_output on line 232: new sqlda was built [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_build_native_sqlda on line 222 sqld = 5 +[NO_PID]: ecpg_set_native_sqlda on line 232 row 0 col 0 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 222: new sqlda was built +[NO_PID]: ecpg_get_data on line 232: RESULT: 4 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 222 row 0 col 0 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 232 row 0 col 1 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 222: RESULT: 4 offset: -1; array: no +[NO_PID]: ecpg_get_data on line 232: RESULT: d offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 222 row 0 col 1 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 232 row 0 col 2 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 222: RESULT: d offset: -1; array: no +[NO_PID]: ecpg_set_native_sqlda on line 232 row 0 col 3 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 222 row 0 col 2 IS NOT NULL +[NO_PID]: ecpg_get_data on line 232: RESULT: 4 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 222 row 0 col 3 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 232 row 0 col 4 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 222: RESULT: 4 offset: -1; array: no +[NO_PID]: ecpg_get_data on line 232: RESULT: d offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_set_native_sqlda on line 222 row 0 col 4 IS NOT NULL +[NO_PID]: ecpg_set_native_sqlda on line 232 row 0 col 5 IS NOT NULL [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 222: RESULT: d offset: -1; array: no +[NO_PID]: ecpg_get_data on line 232: RESULT: 4444444444444444444 offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 222: putting result (1 tuple 5 fields) into sqlda descriptor +[NO_PID]: ecpg_process_output on line 232: putting result (1 tuple 6 fields) into sqlda descriptor [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ECPGtrans on line 227: action "commit"; connection "con2" +[NO_PID]: ECPGtrans on line 237: action "commit"; connection "con2" [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: deallocate_one on line 230: name st_id4 +[NO_PID]: deallocate_one on line 240: name st_id4 [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_finish: connection con2 closed [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 241: query: drop table t1; with 0 parameter(s) on connection regress1 +[NO_PID]: ecpg_execute on line 251: query: drop table t1; with 0 parameter(s) on connection regress1 [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 241: using PQexec +[NO_PID]: ecpg_execute on line 251: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_process_output on line 241: OK: DROP TABLE +[NO_PID]: ecpg_process_output on line 251: OK: DROP TABLE [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ECPGtrans on line 244: action "commit"; connection "regress1" +[NO_PID]: ECPGtrans on line 254: action "commit"; connection "regress1" [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_finish: connection regress1 closed [NO_PID]: sqlca: code: 0, state: 00000 diff --git a/src/interfaces/ecpg/test/expected/sql-sqlda.stdout b/src/interfaces/ecpg/test/expected/sql-sqlda.stdout index a59c2e32e2..26390df915 100644 --- a/src/interfaces/ecpg/test/expected/sql-sqlda.stdout +++ b/src/interfaces/ecpg/test/expected/sql-sqlda.stdout @@ -4,45 +4,53 @@ name sqlda descriptor: 't' value 'a' name sqlda descriptor: 'd1' value NUMERIC '1.0' name sqlda descriptor: 'd2' value 1.000000 name sqlda descriptor: 'c' value 'a ' +name sqlda descriptor: 'big' value 1111111111111111111 FETCH RECORD 2 name sqlda descriptor: 'id' value 2 name sqlda descriptor: 't' value NULL' name sqlda descriptor: 'd1' value NULL' name sqlda descriptor: 'd2' value NULL' name sqlda descriptor: 'c' value NULL' +name sqlda descriptor: 'big' value NULL' FETCH RECORD 3 name sqlda descriptor: 'id' value 4 name sqlda descriptor: 't' value 'd' name sqlda descriptor: 'd1' value NUMERIC '4.0' name sqlda descriptor: 'd2' value 4.000000 name sqlda descriptor: 'c' value 'd ' +name sqlda descriptor: 'big' value 4444444444444444444 FETCH RECORD 1 name sqlda descriptor: 'id' value 1 name sqlda descriptor: 't' value 'a' name sqlda descriptor: 'd1' value NUMERIC '1.0' name sqlda descriptor: 'd2' value 1.000000 name sqlda descriptor: 'c' value 'a ' +name sqlda descriptor: 'big' value 1111111111111111111 FETCH RECORD 2 name sqlda descriptor: 'id' value 2 name sqlda descriptor: 't' value NULL' name sqlda descriptor: 'd1' value NULL' name sqlda descriptor: 'd2' value NULL' name sqlda descriptor: 'c' value NULL' +name sqlda descriptor: 'big' value NULL' FETCH RECORD 3 name sqlda descriptor: 'id' value 4 name sqlda descriptor: 't' value 'd' name sqlda descriptor: 'd1' value NUMERIC '4.0' name sqlda descriptor: 'd2' value 4.000000 name sqlda descriptor: 'c' value 'd ' +name sqlda descriptor: 'big' value 4444444444444444444 EXECUTE RECORD 4 name sqlda descriptor: 'id' value 4 name sqlda descriptor: 't' value 'd' name sqlda descriptor: 'd1' value NUMERIC '4.0' name sqlda descriptor: 'd2' value 4.000000 name sqlda descriptor: 'c' value 'd ' +name sqlda descriptor: 'big' value 4444444444444444444 EXECUTE RECORD 4 name sqlda descriptor: 'id' value 4 name sqlda descriptor: 't' value 'd' name sqlda descriptor: 'd1' value NUMERIC '4.0' name sqlda descriptor: 'd2' value 4.000000 name sqlda descriptor: 'c' value 'd ' +name sqlda descriptor: 'big' value 4444444444444444444 diff --git a/src/interfaces/ecpg/test/expected/sql-twophase.c b/src/interfaces/ecpg/test/expected/sql-twophase.c index cf491fc078..20b54d35e5 100644 --- a/src/interfaces/ecpg/test/expected/sql-twophase.c +++ b/src/interfaces/ecpg/test/expected/sql-twophase.c @@ -110,5 +110,5 @@ if (sqlca.sqlcode < 0) sqlprint();} #line 41 "twophase.pgc" - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/expected/thread-alloc.c b/src/interfaces/ecpg/test/expected/thread-alloc.c index 9f8ac59430..e7b69b387f 100644 --- a/src/interfaces/ecpg/test/expected/thread-alloc.c +++ b/src/interfaces/ecpg/test/expected/thread-alloc.c @@ -22,6 +22,7 @@ main(void) #define WIN32_LEAN_AND_MEAN #include #include +#include #else #include #endif @@ -99,7 +100,7 @@ struct sqlca_t *ECPGget_sqlca(void); #endif -#line 24 "alloc.pgc" +#line 25 "alloc.pgc" #line 1 "regression.h" @@ -109,14 +110,14 @@ struct sqlca_t *ECPGget_sqlca(void); -#line 25 "alloc.pgc" +#line 26 "alloc.pgc" /* exec sql whenever sqlerror sqlprint ; */ -#line 27 "alloc.pgc" +#line 28 "alloc.pgc" /* exec sql whenever not found sqlprint ; */ -#line 28 "alloc.pgc" +#line 29 "alloc.pgc" #ifdef WIN32 @@ -132,54 +133,60 @@ static void* fn(void* arg) -#line 39 "alloc.pgc" +#line 40 "alloc.pgc" int value ; -#line 40 "alloc.pgc" +#line 41 "alloc.pgc" char name [ 100 ] ; -#line 41 "alloc.pgc" +#line 42 "alloc.pgc" char ** r = NULL ; /* exec sql end declare section */ -#line 42 "alloc.pgc" +#line 43 "alloc.pgc" + +#ifdef WIN32 +#ifdef _MSC_VER /* requires MSVC */ + _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); +#endif +#endif value = (long)arg; sprintf(name, "Connection: %d", value); { ECPGconnect(__LINE__, 0, "ecpg1_regression" , NULL, NULL , name, 0); -#line 47 "alloc.pgc" +#line 54 "alloc.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 47 "alloc.pgc" +#line 54 "alloc.pgc" { ECPGsetcommit(__LINE__, "on", NULL); -#line 48 "alloc.pgc" +#line 55 "alloc.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 48 "alloc.pgc" +#line 55 "alloc.pgc" for (i = 1; i <= REPEATS; ++i) { { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "select relname from pg_class where relname = 'pg_class'", ECPGt_EOIT, ECPGt_char,&(r),(long)0,(long)0,(1)*sizeof(char), ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT); -#line 51 "alloc.pgc" +#line 58 "alloc.pgc" if (sqlca.sqlcode == ECPG_NOT_FOUND) sqlprint(); -#line 51 "alloc.pgc" +#line 58 "alloc.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 51 "alloc.pgc" +#line 58 "alloc.pgc" free(r); r = NULL; } { ECPGdisconnect(__LINE__, name); -#line 55 "alloc.pgc" +#line 62 "alloc.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 55 "alloc.pgc" +#line 62 "alloc.pgc" return 0; diff --git a/src/interfaces/ecpg/test/expected/thread-descriptor.c b/src/interfaces/ecpg/test/expected/thread-descriptor.c index 607df7ce24..03cebad603 100644 --- a/src/interfaces/ecpg/test/expected/thread-descriptor.c +++ b/src/interfaces/ecpg/test/expected/thread-descriptor.c @@ -12,6 +12,7 @@ #define WIN32_LEAN_AND_MEAN #include #include +#include #else #include #endif @@ -90,13 +91,13 @@ struct sqlca_t *ECPGget_sqlca(void); #endif -#line 15 "descriptor.pgc" +#line 16 "descriptor.pgc" /* exec sql whenever sqlerror sqlprint ; */ -#line 16 "descriptor.pgc" +#line 17 "descriptor.pgc" /* exec sql whenever not found sqlprint ; */ -#line 17 "descriptor.pgc" +#line 18 "descriptor.pgc" #if defined(ENABLE_THREAD_SAFETY) && defined(WIN32) @@ -107,19 +108,25 @@ static void* fn(void* arg) { int i; +#ifdef WIN32 +#ifdef _MSC_VER /* requires MSVC */ + _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); +#endif +#endif + for (i = 1; i <= REPEATS; ++i) { ECPGallocate_desc(__LINE__, "mydesc"); -#line 29 "descriptor.pgc" +#line 36 "descriptor.pgc" if (sqlca.sqlcode < 0) sqlprint(); -#line 29 "descriptor.pgc" +#line 36 "descriptor.pgc" ECPGdeallocate_desc(__LINE__, "mydesc"); -#line 30 "descriptor.pgc" +#line 37 "descriptor.pgc" if (sqlca.sqlcode < 0) sqlprint(); -#line 30 "descriptor.pgc" +#line 37 "descriptor.pgc" } diff --git a/src/interfaces/ecpg/test/expected/thread-prep.c b/src/interfaces/ecpg/test/expected/thread-prep.c index 72ca568151..94e02933cd 100644 --- a/src/interfaces/ecpg/test/expected/thread-prep.c +++ b/src/interfaces/ecpg/test/expected/thread-prep.c @@ -22,6 +22,7 @@ main(void) #define WIN32_LEAN_AND_MEAN #include #include +#include #else #include #endif @@ -99,7 +100,7 @@ struct sqlca_t *ECPGget_sqlca(void); #endif -#line 24 "prep.pgc" +#line 25 "prep.pgc" #line 1 "regression.h" @@ -109,14 +110,14 @@ struct sqlca_t *ECPGget_sqlca(void); -#line 25 "prep.pgc" +#line 26 "prep.pgc" /* exec sql whenever sqlerror sqlprint ; */ -#line 27 "prep.pgc" +#line 28 "prep.pgc" /* exec sql whenever not found sqlprint ; */ -#line 28 "prep.pgc" +#line 29 "prep.pgc" #ifdef WIN32 @@ -132,64 +133,70 @@ static void* fn(void* arg) -#line 39 "prep.pgc" +#line 40 "prep.pgc" int value ; -#line 40 "prep.pgc" +#line 41 "prep.pgc" char name [ 100 ] ; -#line 41 "prep.pgc" +#line 42 "prep.pgc" char query [ 256 ] = "INSERT INTO T VALUES ( ? )" ; /* exec sql end declare section */ -#line 42 "prep.pgc" +#line 43 "prep.pgc" + +#ifdef WIN32 +#ifdef _MSC_VER /* requires MSVC */ + _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); +#endif +#endif value = (long)arg; sprintf(name, "Connection: %d", value); { ECPGconnect(__LINE__, 0, "ecpg1_regression" , NULL, NULL , name, 0); -#line 47 "prep.pgc" +#line 54 "prep.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 47 "prep.pgc" +#line 54 "prep.pgc" { ECPGsetcommit(__LINE__, "on", NULL); -#line 48 "prep.pgc" +#line 55 "prep.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 48 "prep.pgc" +#line 55 "prep.pgc" for (i = 1; i <= REPEATS; ++i) { { ECPGprepare(__LINE__, NULL, 0, "i", query); -#line 51 "prep.pgc" +#line 58 "prep.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 51 "prep.pgc" +#line 58 "prep.pgc" { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_execute, "i", ECPGt_int,&(value),(long)1,(long)1,sizeof(int), ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT); -#line 52 "prep.pgc" +#line 59 "prep.pgc" if (sqlca.sqlcode == ECPG_NOT_FOUND) sqlprint(); -#line 52 "prep.pgc" +#line 59 "prep.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 52 "prep.pgc" +#line 59 "prep.pgc" } { ECPGdeallocate(__LINE__, 0, NULL, "i"); -#line 54 "prep.pgc" +#line 61 "prep.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 54 "prep.pgc" +#line 61 "prep.pgc" { ECPGdisconnect(__LINE__, name); -#line 55 "prep.pgc" +#line 62 "prep.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 55 "prep.pgc" +#line 62 "prep.pgc" return 0; @@ -205,34 +212,34 @@ int main () #endif { ECPGconnect(__LINE__, 0, "ecpg1_regression" , NULL, NULL , NULL, 0); -#line 69 "prep.pgc" +#line 76 "prep.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 69 "prep.pgc" +#line 76 "prep.pgc" { ECPGsetcommit(__LINE__, "on", NULL); -#line 70 "prep.pgc" +#line 77 "prep.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 70 "prep.pgc" +#line 77 "prep.pgc" { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "drop table if exists T", ECPGt_EOIT, ECPGt_EORT); -#line 71 "prep.pgc" +#line 78 "prep.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 71 "prep.pgc" +#line 78 "prep.pgc" { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "create table T ( i int )", ECPGt_EOIT, ECPGt_EORT); -#line 72 "prep.pgc" +#line 79 "prep.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 72 "prep.pgc" +#line 79 "prep.pgc" { ECPGdisconnect(__LINE__, "CURRENT"); -#line 73 "prep.pgc" +#line 80 "prep.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 73 "prep.pgc" +#line 80 "prep.pgc" #ifdef WIN32 diff --git a/src/interfaces/ecpg/test/expected/thread-thread.c b/src/interfaces/ecpg/test/expected/thread-thread.c index 61d3c5c6e4..420bbf194a 100644 --- a/src/interfaces/ecpg/test/expected/thread-thread.c +++ b/src/interfaces/ecpg/test/expected/thread-thread.c @@ -26,6 +26,7 @@ main(void) #include #else #include +#include #endif @@ -36,7 +37,7 @@ main(void) -#line 22 "thread.pgc" +#line 23 "thread.pgc" void *test_thread(void *arg); @@ -55,10 +56,10 @@ int main() /* exec sql begin declare section */ -#line 38 "thread.pgc" +#line 39 "thread.pgc" int l_rows ; /* exec sql end declare section */ -#line 39 "thread.pgc" +#line 40 "thread.pgc" /* Do not switch on debug output for regression tests. The threads get executed in @@ -67,22 +68,22 @@ int main() /* setup test_thread table */ { ECPGconnect(__LINE__, 0, "ecpg1_regression" , NULL, NULL , NULL, 0); } -#line 46 "thread.pgc" +#line 47 "thread.pgc" { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "drop table test_thread", ECPGt_EOIT, ECPGt_EORT);} -#line 47 "thread.pgc" +#line 48 "thread.pgc" /* DROP might fail */ { ECPGtrans(__LINE__, NULL, "commit");} -#line 48 "thread.pgc" +#line 49 "thread.pgc" { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "create table test_thread ( tstamp timestamp not null default cast ( timeofday ( ) as timestamp ) , thread text not null , iteration integer not null , primary key ( thread , iteration ) )", ECPGt_EOIT, ECPGt_EORT);} -#line 53 "thread.pgc" +#line 54 "thread.pgc" { ECPGtrans(__LINE__, NULL, "commit");} -#line 54 "thread.pgc" +#line 55 "thread.pgc" { ECPGdisconnect(__LINE__, "CURRENT");} -#line 55 "thread.pgc" +#line 56 "thread.pgc" /* create, and start, threads */ @@ -90,7 +91,7 @@ int main() if( threads == NULL ) { fprintf(stderr, "Cannot alloc memory\n"); - return( 1 ); + return 1; } for( n = 0; n < nthreads; n++ ) { @@ -114,42 +115,49 @@ int main() /* and check results */ { ECPGconnect(__LINE__, 0, "ecpg1_regression" , NULL, NULL , NULL, 0); } -#line 85 "thread.pgc" +#line 86 "thread.pgc" { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "select count ( * ) from test_thread", ECPGt_EOIT, ECPGt_int,&(l_rows),(long)1,(long)1,sizeof(int), ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);} -#line 86 "thread.pgc" +#line 87 "thread.pgc" { ECPGtrans(__LINE__, NULL, "commit");} -#line 87 "thread.pgc" +#line 88 "thread.pgc" { ECPGdisconnect(__LINE__, "CURRENT");} -#line 88 "thread.pgc" +#line 89 "thread.pgc" if( l_rows == (nthreads * iterations) ) printf("Success.\n"); else printf("ERROR: Failure - expecting %d rows, got %d.\n", nthreads * iterations, l_rows); - return( 0 ); + return 0; } void *test_thread(void *arg) { long threadnum = (long)arg; + /* exec sql begin declare section */ -#line 101 "thread.pgc" +#line 103 "thread.pgc" int l_i ; -#line 102 "thread.pgc" +#line 104 "thread.pgc" char l_connection [ 128 ] ; /* exec sql end declare section */ -#line 103 "thread.pgc" +#line 105 "thread.pgc" + +#ifdef WIN32 +#ifdef _MSC_VER /* requires MSVC */ + _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); +#endif +#endif /* build up connection name, and connect to database */ #ifndef _MSC_VER @@ -158,24 +166,24 @@ void *test_thread(void *arg) _snprintf(l_connection, sizeof(l_connection), "thread_%03ld", threadnum); #endif /* exec sql whenever sqlerror sqlprint ; */ -#line 111 "thread.pgc" +#line 119 "thread.pgc" { ECPGconnect(__LINE__, 0, "ecpg1_regression" , NULL, NULL , l_connection, 0); -#line 112 "thread.pgc" +#line 120 "thread.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 112 "thread.pgc" +#line 120 "thread.pgc" if( sqlca.sqlcode != 0 ) { printf("%s: ERROR: cannot connect to database!\n", l_connection); - return( NULL ); + return NULL; } { ECPGtrans(__LINE__, l_connection, "begin"); -#line 118 "thread.pgc" +#line 126 "thread.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 118 "thread.pgc" +#line 126 "thread.pgc" /* insert into test_thread table */ @@ -186,10 +194,10 @@ if (sqlca.sqlcode < 0) sqlprint();} ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_int,&(l_i),(long)1,(long)1,sizeof(int), ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT); -#line 123 "thread.pgc" +#line 131 "thread.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 123 "thread.pgc" +#line 131 "thread.pgc" if( sqlca.sqlcode != 0 ) printf("%s: ERROR: insert failed!\n", l_connection); @@ -197,17 +205,17 @@ if (sqlca.sqlcode < 0) sqlprint();} /* all done */ { ECPGtrans(__LINE__, l_connection, "commit"); -#line 129 "thread.pgc" +#line 137 "thread.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 129 "thread.pgc" +#line 137 "thread.pgc" { ECPGdisconnect(__LINE__, l_connection); -#line 130 "thread.pgc" +#line 138 "thread.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 130 "thread.pgc" +#line 138 "thread.pgc" - return( NULL ); + return NULL; } #endif /* ENABLE_THREAD_SAFETY */ diff --git a/src/interfaces/ecpg/test/expected/thread-thread_implicit.c b/src/interfaces/ecpg/test/expected/thread-thread_implicit.c index c43c1ada46..4bddca9fb9 100644 --- a/src/interfaces/ecpg/test/expected/thread-thread_implicit.c +++ b/src/interfaces/ecpg/test/expected/thread-thread_implicit.c @@ -27,6 +27,7 @@ main(void) #include #else #include +#include #endif @@ -37,7 +38,7 @@ main(void) -#line 23 "thread_implicit.pgc" +#line 24 "thread_implicit.pgc" void *test_thread(void *arg); @@ -56,10 +57,10 @@ int main() /* exec sql begin declare section */ -#line 39 "thread_implicit.pgc" +#line 40 "thread_implicit.pgc" int l_rows ; /* exec sql end declare section */ -#line 40 "thread_implicit.pgc" +#line 41 "thread_implicit.pgc" /* Do not switch on debug output for regression tests. The threads get executed in @@ -68,22 +69,22 @@ int main() /* setup test_thread table */ { ECPGconnect(__LINE__, 0, "ecpg1_regression" , NULL, NULL , NULL, 0); } -#line 47 "thread_implicit.pgc" +#line 48 "thread_implicit.pgc" { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "drop table test_thread", ECPGt_EOIT, ECPGt_EORT);} -#line 48 "thread_implicit.pgc" +#line 49 "thread_implicit.pgc" /* DROP might fail */ { ECPGtrans(__LINE__, NULL, "commit");} -#line 49 "thread_implicit.pgc" +#line 50 "thread_implicit.pgc" { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "create table test_thread ( tstamp timestamp not null default cast ( timeofday ( ) as timestamp ) , thread text not null , iteration integer not null , primary key ( thread , iteration ) )", ECPGt_EOIT, ECPGt_EORT);} -#line 54 "thread_implicit.pgc" +#line 55 "thread_implicit.pgc" { ECPGtrans(__LINE__, NULL, "commit");} -#line 55 "thread_implicit.pgc" +#line 56 "thread_implicit.pgc" { ECPGdisconnect(__LINE__, "CURRENT");} -#line 56 "thread_implicit.pgc" +#line 57 "thread_implicit.pgc" /* create, and start, threads */ @@ -91,7 +92,7 @@ int main() if( threads == NULL ) { fprintf(stderr, "Cannot alloc memory\n"); - return( 1 ); + return 1; } for( n = 0; n < nthreads; n++ ) { @@ -115,42 +116,49 @@ int main() /* and check results */ { ECPGconnect(__LINE__, 0, "ecpg1_regression" , NULL, NULL , NULL, 0); } -#line 86 "thread_implicit.pgc" +#line 87 "thread_implicit.pgc" { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "select count ( * ) from test_thread", ECPGt_EOIT, ECPGt_int,&(l_rows),(long)1,(long)1,sizeof(int), ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);} -#line 87 "thread_implicit.pgc" +#line 88 "thread_implicit.pgc" { ECPGtrans(__LINE__, NULL, "commit");} -#line 88 "thread_implicit.pgc" +#line 89 "thread_implicit.pgc" { ECPGdisconnect(__LINE__, "CURRENT");} -#line 89 "thread_implicit.pgc" +#line 90 "thread_implicit.pgc" if( l_rows == (nthreads * iterations) ) printf("Success.\n"); else printf("ERROR: Failure - expecting %d rows, got %d.\n", nthreads * iterations, l_rows); - return( 0 ); + return 0; } void *test_thread(void *arg) { long threadnum = (long)arg; + /* exec sql begin declare section */ -#line 102 "thread_implicit.pgc" +#line 104 "thread_implicit.pgc" int l_i ; -#line 103 "thread_implicit.pgc" +#line 105 "thread_implicit.pgc" char l_connection [ 128 ] ; /* exec sql end declare section */ -#line 104 "thread_implicit.pgc" +#line 106 "thread_implicit.pgc" + +#ifdef WIN32 +#ifdef _MSC_VER /* requires MSVC */ + _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); +#endif +#endif /* build up connection name, and connect to database */ #ifndef _MSC_VER @@ -159,24 +167,24 @@ void *test_thread(void *arg) _snprintf(l_connection, sizeof(l_connection), "thread_%03ld", threadnum); #endif /* exec sql whenever sqlerror sqlprint ; */ -#line 112 "thread_implicit.pgc" +#line 120 "thread_implicit.pgc" { ECPGconnect(__LINE__, 0, "ecpg1_regression" , NULL, NULL , l_connection, 0); -#line 113 "thread_implicit.pgc" +#line 121 "thread_implicit.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 113 "thread_implicit.pgc" +#line 121 "thread_implicit.pgc" if( sqlca.sqlcode != 0 ) { printf("%s: ERROR: cannot connect to database!\n", l_connection); - return( NULL ); + return NULL; } { ECPGtrans(__LINE__, NULL, "begin"); -#line 119 "thread_implicit.pgc" +#line 127 "thread_implicit.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 119 "thread_implicit.pgc" +#line 127 "thread_implicit.pgc" /* insert into test_thread table */ @@ -187,10 +195,10 @@ if (sqlca.sqlcode < 0) sqlprint();} ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_int,&(l_i),(long)1,(long)1,sizeof(int), ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT); -#line 124 "thread_implicit.pgc" +#line 132 "thread_implicit.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 124 "thread_implicit.pgc" +#line 132 "thread_implicit.pgc" if( sqlca.sqlcode != 0 ) printf("%s: ERROR: insert failed!\n", l_connection); @@ -198,17 +206,17 @@ if (sqlca.sqlcode < 0) sqlprint();} /* all done */ { ECPGtrans(__LINE__, NULL, "commit"); -#line 130 "thread_implicit.pgc" +#line 138 "thread_implicit.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 130 "thread_implicit.pgc" +#line 138 "thread_implicit.pgc" { ECPGdisconnect(__LINE__, l_connection); -#line 131 "thread_implicit.pgc" +#line 139 "thread_implicit.pgc" if (sqlca.sqlcode < 0) sqlprint();} -#line 131 "thread_implicit.pgc" +#line 139 "thread_implicit.pgc" - return( NULL ); + return NULL; } #endif /* ENABLE_THREAD_SAFETY */ diff --git a/src/interfaces/ecpg/test/performance/perftest.pgc b/src/interfaces/ecpg/test/performance/perftest.pgc index 3ed2ba0f5e..c8a9934986 100644 --- a/src/interfaces/ecpg/test/performance/perftest.pgc +++ b/src/interfaces/ecpg/test/performance/perftest.pgc @@ -140,5 +140,5 @@ exec sql end declare section; exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/pg_regress_ecpg.c b/src/interfaces/ecpg/test/pg_regress_ecpg.c index b6ecb618e6..a975a7e4e4 100644 --- a/src/interfaces/ecpg/test/pg_regress_ecpg.c +++ b/src/interfaces/ecpg/test/pg_regress_ecpg.c @@ -8,7 +8,7 @@ * * This code is released under the terms of the PostgreSQL License. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/interfaces/ecpg/test/pg_regress_ecpg.c diff --git a/src/interfaces/ecpg/test/pgtypeslib/dt_test.pgc b/src/interfaces/ecpg/test/pgtypeslib/dt_test.pgc index 768cbd5e6f..95632fffee 100644 --- a/src/interfaces/ecpg/test/pgtypeslib/dt_test.pgc +++ b/src/interfaces/ecpg/test/pgtypeslib/dt_test.pgc @@ -39,18 +39,18 @@ main(void) text = PGTYPESdate_to_asc(date1); printf ("Date: %s\n", text); - free(text); + PGTYPESchar_free(text); text = PGTYPEStimestamp_to_asc(ts1); printf ("timestamp: %s\n", text); - free(text); + PGTYPESchar_free(text); iv1 = PGTYPESinterval_from_asc("13556 days 12 hours 34 minutes 14 seconds ", NULL); PGTYPESinterval_copy(iv1, &iv2); text = PGTYPESinterval_to_asc(&iv2); printf ("interval: %s\n", text); PGTYPESinterval_free(iv1); - free(text); + PGTYPESchar_free(text); PGTYPESdate_mdyjul(mdy, &date2); printf("m: %d, d: %d, y: %d\n", mdy[0], mdy[1], mdy[2]); @@ -70,7 +70,7 @@ main(void) PGTYPESdate_fmt_asc(date1, fmt, out); printf("date_day of %s is %d\n", text, PGTYPESdate_dayofweek(date1)); printf("Above date in format \"%s\" is \"%s\"\n", fmt, out); - free(text); + PGTYPESchar_free(text); free(out); out = (char*) malloc(48); @@ -81,7 +81,7 @@ main(void) /* rdate_defmt_asc() */ - date1 = 0; text = ""; + date1 = 0; fmt = "yy/mm/dd"; in = "In the year 1995, the month of December, it is the 25th day"; /* 0123456789012345678901234567890123456789012345678901234567890 @@ -90,108 +90,108 @@ main(void) PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc1: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "mmmm. dd. yyyy"; in = "12/25/95"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc2: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "yy/mm/dd"; in = "95/12/25"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc3: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "yy/mm/dd"; in = "1995, December 25th"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc4: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "dd-mm-yy"; in = "This is 25th day of December, 1995"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc5: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "mmddyy"; in = "Dec. 25th, 1995"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc6: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "mmm. dd. yyyy"; in = "dec 25th 1995"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc7: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "mmm. dd. yyyy"; in = "DEC-25-1995"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc8: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "mm yy dd."; in = "12199525"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc9: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "yyyy fierj mm dd."; in = "19951225"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc10: %s\n", text); - free(text); + PGTYPESchar_free(text); - date1 = 0; text = ""; + date1 = 0; fmt = "mm/dd/yy"; in = "122595"; PGTYPESdate_defmt_asc(&date1, fmt, in); text = PGTYPESdate_to_asc(date1); printf("date_defmt_asc12: %s\n", text); - free(text); + PGTYPESchar_free(text); PGTYPEStimestamp_current(&ts1); text = PGTYPEStimestamp_to_asc(ts1); /* can't output this in regression mode */ /* printf("timestamp_current: Now: %s\n", text); */ - free(text); + PGTYPESchar_free(text); ts1 = PGTYPEStimestamp_from_asc("96-02-29", NULL); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_to_asc1: %s\n", text); - free(text); + PGTYPESchar_free(text); ts1 = PGTYPEStimestamp_from_asc("1994-02-11 3:10:35", NULL); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_to_asc2: %s\n", text); - free(text); + PGTYPESchar_free(text); ts1 = PGTYPEStimestamp_from_asc("1994-02-11 26:10:35", NULL); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_to_asc3: %s\n", text); - free(text); + PGTYPESchar_free(text); /* abc-03:10:35-def-02/11/94-gh */ /* 12345678901234567890123456789 */ @@ -206,164 +206,164 @@ main(void) i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %d %H:%M:%S %z %Y"; in = "Tue Jul 22 17:28:44 +0200 2003"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %d %H:%M:%S %z %Y"; in = "Tue Feb 29 17:28:44 +0200 2000"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %d %H:%M:%S %z %Y"; in = "Tue Feb 29 17:28:44 +0200 1900"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %d %H:%M:%S %z %Y"; in = "Tue Feb 29 17:28:44 +0200 1996"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%b %d %H:%M:%S %z %Y"; in = " Jul 31 17:28:44 +0200 1996"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%b %d %H:%M:%S %z %Y"; in = " Jul 32 17:28:44 +0200 1996"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %d %H:%M:%S %z %Y"; in = "Tue Feb 29 17:28:44 +0200 1997"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%"; in = "Tue Jul 22 17:28:44 +0200 2003"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "a %"; in = "Tue Jul 22 17:28:44 +0200 2003"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%b, %d %H_%M`%S %z %Y"; in = " Jul, 22 17_28 `44 +0200 2003 "; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %%%d %H:%M:%S %Z %Y"; in = "Tue Jul %22 17:28:44 CEST 2003"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%a %b %%%d %H:%M:%S %Z %Y"; in = "Tue Jul %22 17:28:44 CEST 2003"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "abc%n %C %B %%%d %H:%M:%S %Z %Y"; in = "abc\n 19 October %22 17:28:44 CEST 2003"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = "abc%n %C %B %%%d %H:%M:%S %Z %y"; in = "abc\n 18 October %34 17:28:44 CEST 80"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = ""; in = "abc\n 18 October %34 17:28:44 CEST 80"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); fmt = NULL; in = "1980-04-12 3:49:44 "; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, NULL) = %s, error: %d\n", in, text, i); - free(text); + PGTYPESchar_free(text); fmt = "%B %d, %Y. Time: %I:%M%p"; in = "July 14, 1988. Time: 9:15am"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); in = "September 6 at 01:30 pm in the year 1983"; fmt = "%B %d at %I:%M %p in the year %Y"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); in = " 1976, July 14. Time: 9:15am"; fmt = "%Y, %B %d. Time: %I:%M %p"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); in = " 1976, July 14. Time: 9:15 am"; fmt = "%Y, %B %d. Time: %I:%M%p"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); in = " 1976, P.M. July 14. Time: 9:15"; fmt = "%Y, %P %B %d. Time: %I:%M"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); in = "1234567890"; fmt = "%s"; i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1); text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i); - free(text); + PGTYPESchar_free(text); exec sql rollback; exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/pgtypeslib/dt_test2.pgc b/src/interfaces/ecpg/test/pgtypeslib/dt_test2.pgc index d519305e18..62b934b07e 100644 --- a/src/interfaces/ecpg/test/pgtypeslib/dt_test2.pgc +++ b/src/interfaces/ecpg/test/pgtypeslib/dt_test2.pgc @@ -75,14 +75,14 @@ main(void) text = PGTYPEStimestamp_to_asc(ts1); printf("timestamp: %s\n", text); - free(text); + PGTYPESchar_free(text); date1 = PGTYPESdate_from_timestamp(ts1); dc = PGTYPESdate_new(); *dc = date1; text = PGTYPESdate_to_asc(*dc); printf("Date of timestamp: %s\n", text); - free(text); + PGTYPESchar_free(text); PGTYPESdate_free(dc); for (i = 0; dates[i]; i++) @@ -97,7 +97,7 @@ main(void) i, err ? "-" : text, endptr ? 'N' : 'Y', err ? 'T' : 'F'); - free(text); + PGTYPESchar_free(text); if (!err) { for (j = 0; times[j]; j++) @@ -112,7 +112,7 @@ main(void) text = PGTYPEStimestamp_to_asc(ts1); printf("TS[%d,%d]: %s\n", i, j, errno ? "-" : text); - free(text); + PGTYPESchar_free(text); free(t); } } @@ -136,16 +136,16 @@ main(void) continue; text = PGTYPESinterval_to_asc(i1); printf("interval[%d]: %s\n", i, text ? text : "-"); - free(text); + PGTYPESchar_free(text); ic = PGTYPESinterval_new(); PGTYPESinterval_copy(i1, ic); text = PGTYPESinterval_to_asc(i1); printf("interval_copy[%d]: %s\n", i, text ? text : "-"); - free(text); + PGTYPESchar_free(text); PGTYPESinterval_free(ic); PGTYPESinterval_free(i1); } - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/pgtypeslib/nan_test.pgc b/src/interfaces/ecpg/test/pgtypeslib/nan_test.pgc index 3b5781632e..bc682b93d5 100644 --- a/src/interfaces/ecpg/test/pgtypeslib/nan_test.pgc +++ b/src/interfaces/ecpg/test/pgtypeslib/nan_test.pgc @@ -90,5 +90,5 @@ main(void) exec sql rollback; exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/pgtypeslib/num_test.pgc b/src/interfaces/ecpg/test/pgtypeslib/num_test.pgc index a024c8702f..254aeb4129 100644 --- a/src/interfaces/ecpg/test/pgtypeslib/num_test.pgc +++ b/src/interfaces/ecpg/test/pgtypeslib/num_test.pgc @@ -5,12 +5,7 @@ exec sql include ../regression; - -/* - -NOTE: This file has a different expect file for regression tests on MinGW32 - -*/ +exec sql include ../printf_hack; int @@ -38,7 +33,7 @@ main(void) PGTYPESnumeric_from_int(1407, value1); text = PGTYPESnumeric_to_asc(value1, -1); printf("from int = %s\n", text); - free(text); + PGTYPESchar_free(text); PGTYPESnumeric_free(value1); value1 = PGTYPESnumeric_from_asc("2369.7", NULL); @@ -47,12 +42,12 @@ main(void) PGTYPESnumeric_add(value1, value2, res); text = PGTYPESnumeric_to_asc(res, -1); printf("add = %s\n", text); - free(text); + PGTYPESchar_free(text); PGTYPESnumeric_sub(res, value2, res); text = PGTYPESnumeric_to_asc(res, -1); printf("sub = %s\n", text); - free(text); + PGTYPESchar_free(text); PGTYPESnumeric_free(value2); des = PGTYPESnumeric_new(); @@ -68,14 +63,16 @@ main(void) PGTYPESnumeric_mul(res, des, res); text = PGTYPESnumeric_to_asc(res, -1); printf("mul = %s\n", text); - free(text); + PGTYPESchar_free(text); PGTYPESnumeric_free(des); value2 = PGTYPESnumeric_from_asc("10000", NULL); PGTYPESnumeric_div(res, value2, res); text = PGTYPESnumeric_to_asc(res, -1); PGTYPESnumeric_to_double(res, &d); - printf("div = %s %e\n", text, d); + printf("div = %s ", text); + print_double(d); + printf("\n"); PGTYPESnumeric_free(value1); PGTYPESnumeric_free(value2); @@ -85,7 +82,7 @@ main(void) i = PGTYPESnumeric_to_long(value1, &l1) | PGTYPESnumeric_to_long(value2, &l2); printf("to long(%d) = %ld %ld\n", i, l1, l2); - free(text); + PGTYPESchar_free(text); PGTYPESnumeric_free(value1); PGTYPESnumeric_free(value2); PGTYPESnumeric_free(res); @@ -93,5 +90,5 @@ main(void) exec sql rollback; exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/pgtypeslib/num_test2.pgc b/src/interfaces/ecpg/test/pgtypeslib/num_test2.pgc index 2ac666f7c0..8241d45ca5 100644 --- a/src/interfaces/ecpg/test/pgtypeslib/num_test2.pgc +++ b/src/interfaces/ecpg/test/pgtypeslib/num_test2.pgc @@ -6,12 +6,7 @@ exec sql include ../regression; - -/* - -NOTE: This file has a different expect file for regression tests on MinGW32 - -*/ +exec sql include ../printf_hack; char* nums[] = { "2E394", "-2", ".794", "3.44", "592.49E21", "-32.84e4", @@ -59,21 +54,21 @@ main(void) text = PGTYPESnumeric_to_asc(num, -1); if (!text) check_errno(); - printf("num[%d,1]: %s\n", i, text); free(text); + printf("num[%d,1]: %s\n", i, text); PGTYPESchar_free(text); text = PGTYPESnumeric_to_asc(num, 0); if (!text) check_errno(); - printf("num[%d,2]: %s\n", i, text); free(text); + printf("num[%d,2]: %s\n", i, text); PGTYPESchar_free(text); text = PGTYPESnumeric_to_asc(num, 1); if (!text) check_errno(); - printf("num[%d,3]: %s\n", i, text); free(text); + printf("num[%d,3]: %s\n", i, text); PGTYPESchar_free(text); text = PGTYPESnumeric_to_asc(num, 2); if (!text) check_errno(); - printf("num[%d,4]: %s\n", i, text); free(text); + printf("num[%d,4]: %s\n", i, text); PGTYPESchar_free(text); nin = PGTYPESnumeric_new(); text = PGTYPESnumeric_to_asc(nin, 2); if (!text) check_errno(); - printf("num[%d,5]: %s\n", i, text); free(text); + printf("num[%d,5]: %s\n", i, text); PGTYPESchar_free(text); r = PGTYPESnumeric_to_long(num, &l); if (r) check_errno(); @@ -85,7 +80,7 @@ main(void) text = PGTYPESnumeric_to_asc(nin, 2); q = PGTYPESnumeric_cmp(num, nin); printf("num[%d,7]: %s (r: %d - cmp: %d)\n", i, text, r, q); - free(text); + PGTYPESchar_free(text); } r = PGTYPESnumeric_to_int(num, &k); @@ -98,7 +93,7 @@ main(void) text = PGTYPESnumeric_to_asc(nin, 2); q = PGTYPESnumeric_cmp(num, nin); printf("num[%d,9]: %s (r: %d - cmp: %d)\n", i, text, r, q); - free(text); + PGTYPESchar_free(text); } if (i != 6) @@ -108,7 +103,9 @@ main(void) r = PGTYPESnumeric_to_double(num, &d); if (r) check_errno(); - printf("num[%d,10]: %g (r: %d)\n", i, r?0.0:d, r); + printf("num[%d,10]: ", i); + print_double(r ? 0.0 : d); + printf(" (r: %d)\n", r); } /* do not test double to numeric because @@ -129,7 +126,7 @@ main(void) text = PGTYPESnumeric_to_asc(nin, 2); q = PGTYPESnumeric_cmp(num, nin); printf("num[%d,12]: %s (r: %d - cmp: %d)\n", i, text, r, q); - free(text); + PGTYPESchar_free(text); } PGTYPESdecimal_free(dec); @@ -155,7 +152,7 @@ main(void) { text = PGTYPESnumeric_to_asc(a, 10); printf("num[a,%d,%d]: %s\n", i, j, text); - free(text); + PGTYPESchar_free(text); } r = PGTYPESnumeric_sub(numarr[i], numarr[j], s); if (r) @@ -167,7 +164,7 @@ main(void) { text = PGTYPESnumeric_to_asc(s, 10); printf("num[s,%d,%d]: %s\n", i, j, text); - free(text); + PGTYPESchar_free(text); } r = PGTYPESnumeric_mul(numarr[i], numarr[j], m); if (r) @@ -179,7 +176,7 @@ main(void) { text = PGTYPESnumeric_to_asc(m, 10); printf("num[m,%d,%d]: %s\n", i, j, text); - free(text); + PGTYPESchar_free(text); } r = PGTYPESnumeric_div(numarr[i], numarr[j], d); if (r) @@ -191,7 +188,7 @@ main(void) { text = PGTYPESnumeric_to_asc(d, 10); printf("num[d,%d,%d]: %s\n", i, j, text); - free(text); + PGTYPESchar_free(text); } PGTYPESnumeric_free(a); @@ -205,12 +202,12 @@ main(void) { text = PGTYPESnumeric_to_asc(numarr[i], -1); printf("%d: %s\n", i, text); - free(text); + PGTYPESchar_free(text); PGTYPESnumeric_free(numarr[i]); } free(numarr); - return (0); + return 0; } static void diff --git a/src/interfaces/ecpg/test/preproc/.gitignore b/src/interfaces/ecpg/test/preproc/.gitignore index ffca98e8c0..fd63e645a3 100644 --- a/src/interfaces/ecpg/test/preproc/.gitignore +++ b/src/interfaces/ecpg/test/preproc/.gitignore @@ -22,3 +22,5 @@ /variable.c /whenever /whenever.c +/whenever_do_continue +/whenever_do_continue.c diff --git a/src/interfaces/ecpg/test/preproc/Makefile b/src/interfaces/ecpg/test/preproc/Makefile index d658a4d6b2..39b1974f5f 100644 --- a/src/interfaces/ecpg/test/preproc/Makefile +++ b/src/interfaces/ecpg/test/preproc/Makefile @@ -15,6 +15,7 @@ TESTS = array_of_struct array_of_struct.c \ type type.c \ variable variable.c \ whenever whenever.c \ + whenever_do_continue whenever_do_continue.c \ pointer_to_struct pointer_to_struct.c all: $(TESTS) diff --git a/src/interfaces/ecpg/test/preproc/array_of_struct.pgc b/src/interfaces/ecpg/test/preproc/array_of_struct.pgc index f9e1946b3f..69f5758474 100644 --- a/src/interfaces/ecpg/test/preproc/array_of_struct.pgc +++ b/src/interfaces/ecpg/test/preproc/array_of_struct.pgc @@ -91,5 +91,5 @@ int main() EXEC SQL disconnect all; - return( 0 ); + return 0; } diff --git a/src/interfaces/ecpg/test/preproc/cursor.pgc b/src/interfaces/ecpg/test/preproc/cursor.pgc index 4247912989..8a286ad523 100644 --- a/src/interfaces/ecpg/test/preproc/cursor.pgc +++ b/src/interfaces/ecpg/test/preproc/cursor.pgc @@ -252,5 +252,5 @@ exec sql end declare section; strcpy(msg, "disconnect"); exec sql disconnect all; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/preproc/define.pgc b/src/interfaces/ecpg/test/preproc/define.pgc index 2161733f49..0d07ebfe63 100644 --- a/src/interfaces/ecpg/test/preproc/define.pgc +++ b/src/interfaces/ecpg/test/preproc/define.pgc @@ -58,5 +58,5 @@ exec sql end declare section; exec sql commit; exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/preproc/init.pgc b/src/interfaces/ecpg/test/preproc/init.pgc index 11dc01ade4..b1f71997a2 100644 --- a/src/interfaces/ecpg/test/preproc/init.pgc +++ b/src/interfaces/ecpg/test/preproc/init.pgc @@ -35,7 +35,7 @@ static int fe(enum e x) return (int)x; } -static void sqlnotice(char *notice, short trans) +static void sqlnotice(const char *notice, short trans) { if (!notice) notice = "-empty-"; diff --git a/src/interfaces/ecpg/test/preproc/outofscope.pgc b/src/interfaces/ecpg/test/preproc/outofscope.pgc index 6b5d2707ce..b03743c991 100644 --- a/src/interfaces/ecpg/test/preproc/outofscope.pgc +++ b/src/interfaces/ecpg/test/preproc/outofscope.pgc @@ -101,7 +101,7 @@ main (void) get_record1(); if (sqlca.sqlcode == ECPG_NOT_FOUND) break; - printf("id=%d%s t='%s'%s d1=%lf%s d2=%lf%s c = '%s'%s\n", + printf("id=%d%s t='%s'%s d1=%f%s d2=%f%s c = '%s'%s\n", myvar->id, mynullvar->id ? " (NULL)" : "", myvar->t, mynullvar->t ? " (NULL)" : "", myvar->d1, mynullvar->d1 ? " (NULL)" : "", @@ -123,5 +123,5 @@ main (void) strcpy(msg, "disconnect"); exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/preproc/pointer_to_struct.pgc b/src/interfaces/ecpg/test/preproc/pointer_to_struct.pgc index ec94273408..1ec651e3fc 100644 --- a/src/interfaces/ecpg/test/preproc/pointer_to_struct.pgc +++ b/src/interfaces/ecpg/test/preproc/pointer_to_struct.pgc @@ -96,5 +96,5 @@ int main() EXEC SQL disconnect all; /* All the memory will anyway be freed at the end */ - return( 0 ); + return 0; } diff --git a/src/interfaces/ecpg/test/preproc/strings.pgc b/src/interfaces/ecpg/test/preproc/strings.pgc index d6ec9a4cb8..f004ddf6dc 100644 --- a/src/interfaces/ecpg/test/preproc/strings.pgc +++ b/src/interfaces/ecpg/test/preproc/strings.pgc @@ -23,5 +23,5 @@ int main(void) printf("%s %s %s %s %s %s\n", s1, s2, s3, s4, s5, s6); exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/preproc/variable.pgc b/src/interfaces/ecpg/test/preproc/variable.pgc index 05420afdb2..697a7dc814 100644 --- a/src/interfaces/ecpg/test/preproc/variable.pgc +++ b/src/interfaces/ecpg/test/preproc/variable.pgc @@ -97,5 +97,5 @@ exec sql end declare section; strcpy(msg, "disconnect"); exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/preproc/whenever.pgc b/src/interfaces/ecpg/test/preproc/whenever.pgc index 9b3ae9e9ec..14cf571e6a 100644 --- a/src/interfaces/ecpg/test/preproc/whenever.pgc +++ b/src/interfaces/ecpg/test/preproc/whenever.pgc @@ -4,7 +4,7 @@ exec sql include ../regression; exec sql whenever sqlerror sqlprint; -static void print(char *msg) +static void print(const char *msg) { fprintf(stderr, "Error in statement '%s':\n", msg); sqlprint(); diff --git a/src/interfaces/ecpg/test/preproc/whenever_do_continue.pgc b/src/interfaces/ecpg/test/preproc/whenever_do_continue.pgc new file mode 100644 index 0000000000..2a925a3c54 --- /dev/null +++ b/src/interfaces/ecpg/test/preproc/whenever_do_continue.pgc @@ -0,0 +1,63 @@ +#include + +exec sql include ../regression; + +exec sql whenever sqlerror sqlprint; + +int main(void) +{ + exec sql begin declare section; + struct + { + char ename[12]; + float sal; + float comm; + } emp; + + char msg[128]; + exec sql end declare section; + + ECPGdebug(1, stderr); + + strcpy(msg, "connect"); + exec sql connect to REGRESSDB1; + + strcpy(msg, "create"); + exec sql create table emp(ename varchar,sal double precision, comm double precision); + + strcpy(msg, "insert"); + exec sql insert into emp values ('Ram',111100,21); + exec sql insert into emp values ('aryan',11110,null); + exec sql insert into emp values ('josh',10000,10); + exec sql insert into emp values ('tom',20000,null); + + exec sql declare c cursor for select ename, sal, comm from emp order by ename collate "C" asc; + + exec sql open c; + + /* The 'BREAK' condition to exit the loop. */ + exec sql whenever not found do break; + + /* The DO CONTINUE makes the loop start at the next iteration when an error occurs.*/ + exec sql whenever sqlerror do continue; + + while (1) + { + exec sql fetch c into :emp; + /* The employees with non-NULL commissions will be displayed. */ + printf("%s %7.2f %9.2f\n", emp.ename, emp.sal, emp.comm); + } + + /* + * This 'CONTINUE' shuts off the 'DO CONTINUE' and allow the program to + * proceed if any further errors do occur. + */ + exec sql whenever sqlerror continue; + + exec sql close c; + + strcpy(msg, "drop"); + exec sql drop table emp; + + exit(0); +} diff --git a/src/interfaces/ecpg/test/printf_hack.h b/src/interfaces/ecpg/test/printf_hack.h new file mode 100644 index 0000000000..ef584c0d54 --- /dev/null +++ b/src/interfaces/ecpg/test/printf_hack.h @@ -0,0 +1,29 @@ +/* + * print_double(x) has the same effect as printf("%g", x), but is intended + * to produce the same formatting across all platforms. + */ +static void +print_double(double x) +{ +#ifdef WIN32 + /* Change Windows' 3-digit exponents to look like everyone else's */ + char convert[128]; + int vallen; + + sprintf(convert, "%g", x); + vallen = strlen(convert); + + if (vallen >= 6 && + convert[vallen - 5] == 'e' && + convert[vallen - 3] == '0') + { + convert[vallen - 3] = convert[vallen - 2]; + convert[vallen - 2] = convert[vallen - 1]; + convert[vallen - 1] = '\0'; + } + + printf("%s", convert); +#else + printf("%g", x); +#endif +} diff --git a/src/interfaces/ecpg/test/resultmap b/src/interfaces/ecpg/test/resultmap deleted file mode 100644 index aef7338e81..0000000000 --- a/src/interfaces/ecpg/test/resultmap +++ /dev/null @@ -1,12 +0,0 @@ -compat_informix/dec_test:stdout:i.86-pc-win32vc=compat_informix-dec_test-MinGW32.stdout -compat_informix/dec_test:stdout:i.86-pc-mingw32=compat_informix-dec_test-MinGW32.stdout -compat_informix/dec_test:stdout:x86_64-w64-mingw32=compat_informix-dec_test-MinGW32.stdout -compat_informix/dec_test:stdout:i.86-w64-mingw32=compat_informix-dec_test-MinGW32.stdout -pgtypeslib/num_test:stdout:i.86-pc-win32vc=pgtypeslib-num_test-MinGW32.stdout -pgtypeslib/num_test:stdout:i.86-pc-mingw32=pgtypeslib-num_test-MinGW32.stdout -pgtypeslib/num_test:stdout:x86_64-w64-mingw32=pgtypeslib-num_test-MinGW32.stdout -pgtypeslib/num_test:stdout:i.86-w64-mingw32=pgtypeslib-num_test-MinGW32.stdout -pgtypeslib/num_test2:stdout:i.86-pc-win32vc=pgtypeslib-num_test2-MinGW32.stdout -pgtypeslib/num_test2:stdout:i.86-pc-mingw32=pgtypeslib-num_test2-MinGW32.stdout -pgtypeslib/num_test2:stdout:x86_64-w64-mingw32=pgtypeslib-num_test2-MinGW32.stdout -pgtypeslib/num_test2:stdout:i.86-w64-mingw32=pgtypeslib-num_test2-MinGW32.stdout diff --git a/src/interfaces/ecpg/test/sql/array.pgc b/src/interfaces/ecpg/test/sql/array.pgc index 5f12c472c9..15c9cfa5f7 100644 --- a/src/interfaces/ecpg/test/sql/array.pgc +++ b/src/interfaces/ecpg/test/sql/array.pgc @@ -107,5 +107,5 @@ EXEC SQL END DECLARE SECTION; free(t); - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/sql/describe.pgc b/src/interfaces/ecpg/test/sql/describe.pgc index b95ab351bd..87d6bd9a29 100644 --- a/src/interfaces/ecpg/test/sql/describe.pgc +++ b/src/interfaces/ecpg/test/sql/describe.pgc @@ -195,5 +195,5 @@ exec sql end declare section; strcpy(msg, "disconnect"); exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/sql/execute.pgc b/src/interfaces/ecpg/test/sql/execute.pgc index b8364c78bb..cc9814e9be 100644 --- a/src/interfaces/ecpg/test/sql/execute.pgc +++ b/src/interfaces/ecpg/test/sql/execute.pgc @@ -109,5 +109,5 @@ exec sql end declare section; exec sql commit; exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/sql/oldexec.pgc b/src/interfaces/ecpg/test/sql/oldexec.pgc index 2988f2ab8a..4f94a18aa1 100644 --- a/src/interfaces/ecpg/test/sql/oldexec.pgc +++ b/src/interfaces/ecpg/test/sql/oldexec.pgc @@ -86,5 +86,5 @@ exec sql end declare section; exec sql commit; exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/sql/sqlda.pgc b/src/interfaces/ecpg/test/sql/sqlda.pgc index 29774b5909..ec4d256b39 100644 --- a/src/interfaces/ecpg/test/sql/sqlda.pgc +++ b/src/interfaces/ecpg/test/sql/sqlda.pgc @@ -1,9 +1,9 @@ #include #include #include +#include "ecpg_config.h" exec sql include ../regression; - exec sql include sqlda.h; exec sql include pgtypes_numeric.h; @@ -36,8 +36,16 @@ dump_sqlda(sqlda_t *sqlda) case ECPGt_int: printf("name sqlda descriptor: '%s' value %d\n", sqlda->sqlvar[i].sqlname.data, *(int *)sqlda->sqlvar[i].sqldata); break; + case ECPGt_long: + printf("name sqlda descriptor: '%s' value %ld\n", sqlda->sqlvar[i].sqlname.data, *(long int *)sqlda->sqlvar[i].sqldata); + break; +#ifdef HAVE_LONG_LONG_INT + case ECPGt_long_long: + printf("name sqlda descriptor: '%s' value %lld\n", sqlda->sqlvar[i].sqlname.data, *(long long int *)sqlda->sqlvar[i].sqldata); + break; +#endif case ECPGt_double: - printf("name sqlda descriptor: '%s' value %lf\n", sqlda->sqlvar[i].sqlname.data, *(double *)sqlda->sqlvar[i].sqldata); + printf("name sqlda descriptor: '%s' value %f\n", sqlda->sqlvar[i].sqlname.data, *(double *)sqlda->sqlvar[i].sqldata); break; case ECPGt_numeric: { @@ -45,7 +53,7 @@ dump_sqlda(sqlda_t *sqlda) val = PGTYPESnumeric_to_asc((numeric*)sqlda->sqlvar[i].sqldata, -1); printf("name sqlda descriptor: '%s' value NUMERIC '%s'\n", sqlda->sqlvar[i].sqlname.data, val); - free(val); + PGTYPESchar_free(val); break; } } @@ -78,13 +86,15 @@ exec sql end declare section; t text, d1 numeric, d2 float8, - c char(10)); + c char(10), + big bigint + ); strcpy(msg, "insert"); exec sql insert into t1 values - (1, 'a', 1.0, 1, 'a'), - (2, null, null, null, null), - (4, 'd', 4.0, 4, 'd'); + (1, 'a', 1.0, 1, 'a',1111111111111111111), + (2, null, null, null, null,null), + (4, 'd', 4.0, 4, 'd',4444444444444444444); strcpy(msg, "commit"); exec sql commit; @@ -246,5 +256,5 @@ exec sql end declare section; strcpy(msg, "disconnect"); exec sql disconnect; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/sql/twophase.pgc b/src/interfaces/ecpg/test/sql/twophase.pgc index 867a28e4c4..38913d7af2 100644 --- a/src/interfaces/ecpg/test/sql/twophase.pgc +++ b/src/interfaces/ecpg/test/sql/twophase.pgc @@ -40,5 +40,5 @@ int main(void) strcpy(msg, "disconnect"); exec sql disconnect current; - return (0); + return 0; } diff --git a/src/interfaces/ecpg/test/thread/alloc.pgc b/src/interfaces/ecpg/test/thread/alloc.pgc index ea98495be4..b13bcb860b 100644 --- a/src/interfaces/ecpg/test/thread/alloc.pgc +++ b/src/interfaces/ecpg/test/thread/alloc.pgc @@ -13,6 +13,7 @@ main(void) #define WIN32_LEAN_AND_MEAN #include #include +#include #else #include #endif @@ -41,6 +42,12 @@ static void* fn(void* arg) char **r = NULL; EXEC SQL END DECLARE SECTION; +#ifdef WIN32 +#ifdef _MSC_VER /* requires MSVC */ + _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); +#endif +#endif + value = (long)arg; sprintf(name, "Connection: %d", value); diff --git a/src/interfaces/ecpg/test/thread/descriptor.pgc b/src/interfaces/ecpg/test/thread/descriptor.pgc index e07a5e22b7..3f28c6d329 100644 --- a/src/interfaces/ecpg/test/thread/descriptor.pgc +++ b/src/interfaces/ecpg/test/thread/descriptor.pgc @@ -3,6 +3,7 @@ #define WIN32_LEAN_AND_MEAN #include #include +#include #else #include #endif @@ -24,6 +25,12 @@ static void* fn(void* arg) { int i; +#ifdef WIN32 +#ifdef _MSC_VER /* requires MSVC */ + _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); +#endif +#endif + for (i = 1; i <= REPEATS; ++i) { EXEC SQL ALLOCATE DESCRIPTOR mydesc; diff --git a/src/interfaces/ecpg/test/thread/prep.pgc b/src/interfaces/ecpg/test/thread/prep.pgc index 45205ddc8b..3a2467c9ab 100644 --- a/src/interfaces/ecpg/test/thread/prep.pgc +++ b/src/interfaces/ecpg/test/thread/prep.pgc @@ -13,6 +13,7 @@ main(void) #define WIN32_LEAN_AND_MEAN #include #include +#include #else #include #endif @@ -41,6 +42,12 @@ static void* fn(void* arg) char query[256] = "INSERT INTO T VALUES ( ? )"; EXEC SQL END DECLARE SECTION; +#ifdef WIN32 +#ifdef _MSC_VER /* requires MSVC */ + _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); +#endif +#endif + value = (long)arg; sprintf(name, "Connection: %d", value); diff --git a/src/interfaces/ecpg/test/thread/thread.pgc b/src/interfaces/ecpg/test/thread/thread.pgc index cc23b82484..ae6b229962 100644 --- a/src/interfaces/ecpg/test/thread/thread.pgc +++ b/src/interfaces/ecpg/test/thread/thread.pgc @@ -17,6 +17,7 @@ main(void) #include #else #include +#include #endif exec sql include ../regression; @@ -59,7 +60,7 @@ int main() if( threads == NULL ) { fprintf(stderr, "Cannot alloc memory\n"); - return( 1 ); + return 1; } for( n = 0; n < nthreads; n++ ) { @@ -91,17 +92,24 @@ int main() else printf("ERROR: Failure - expecting %d rows, got %d.\n", nthreads * iterations, l_rows); - return( 0 ); + return 0; } void *test_thread(void *arg) { long threadnum = (long)arg; + EXEC SQL BEGIN DECLARE SECTION; int l_i; char l_connection[128]; EXEC SQL END DECLARE SECTION; +#ifdef WIN32 +#ifdef _MSC_VER /* requires MSVC */ + _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); +#endif +#endif + /* build up connection name, and connect to database */ #ifndef _MSC_VER snprintf(l_connection, sizeof(l_connection), "thread_%03ld", threadnum); @@ -113,7 +121,7 @@ void *test_thread(void *arg) if( sqlca.sqlcode != 0 ) { printf("%s: ERROR: cannot connect to database!\n", l_connection); - return( NULL ); + return NULL; } EXEC SQL AT :l_connection BEGIN; @@ -128,6 +136,6 @@ void *test_thread(void *arg) /* all done */ EXEC SQL AT :l_connection COMMIT; EXEC SQL DISCONNECT :l_connection; - return( NULL ); + return NULL; } #endif /* ENABLE_THREAD_SAFETY */ diff --git a/src/interfaces/ecpg/test/thread/thread_implicit.pgc b/src/interfaces/ecpg/test/thread/thread_implicit.pgc index 96e0e993ac..0dfcb7172b 100644 --- a/src/interfaces/ecpg/test/thread/thread_implicit.pgc +++ b/src/interfaces/ecpg/test/thread/thread_implicit.pgc @@ -18,6 +18,7 @@ main(void) #include #else #include +#include #endif exec sql include ../regression; @@ -60,7 +61,7 @@ int main() if( threads == NULL ) { fprintf(stderr, "Cannot alloc memory\n"); - return( 1 ); + return 1; } for( n = 0; n < nthreads; n++ ) { @@ -92,17 +93,24 @@ int main() else printf("ERROR: Failure - expecting %d rows, got %d.\n", nthreads * iterations, l_rows); - return( 0 ); + return 0; } void *test_thread(void *arg) { long threadnum = (long)arg; + EXEC SQL BEGIN DECLARE SECTION; int l_i; char l_connection[128]; EXEC SQL END DECLARE SECTION; +#ifdef WIN32 +#ifdef _MSC_VER /* requires MSVC */ + _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); +#endif +#endif + /* build up connection name, and connect to database */ #ifndef _MSC_VER snprintf(l_connection, sizeof(l_connection), "thread_%03ld", threadnum); @@ -114,7 +122,7 @@ void *test_thread(void *arg) if( sqlca.sqlcode != 0 ) { printf("%s: ERROR: cannot connect to database!\n", l_connection); - return( NULL ); + return NULL; } EXEC SQL BEGIN; @@ -129,6 +137,6 @@ void *test_thread(void *arg) /* all done */ EXEC SQL COMMIT; EXEC SQL DISCONNECT :l_connection; - return( NULL ); + return NULL; } #endif /* ENABLE_THREAD_SAFETY */ diff --git a/src/interfaces/libpq/.gitignore b/src/interfaces/libpq/.gitignore index 6c02dc7055..9be338dec8 100644 --- a/src/interfaces/libpq/.gitignore +++ b/src/interfaces/libpq/.gitignore @@ -1,33 +1,5 @@ /exports.list /libpq.rc # .c files that are symlinked in from elsewhere -/chklocale.c -/crypt.c -/erand48.c -/getaddrinfo.c -/getpeereid.c -/inet_aton.c -/inet_net_ntop.c -/noblock.c -/open.c -/system.c -/pgsleep.c -/pg_strong_random.c -/pgstrcasecmp.c -/pqsignal.c -/snprintf.c -/strerror.c -/strlcpy.c -/thread.c -/win32error.c -/win32setlocale.c -/ip.c -/md5.c -/base64.c -/scram-common.c -/sha2.c -/sha2_openssl.c -/saslprep.c -/unicode_norm.c /encnames.c /wchar.c diff --git a/src/interfaces/libpq/Makefile b/src/interfaces/libpq/Makefile index 87f22d242f..c2171d0856 100644 --- a/src/interfaces/libpq/Makefile +++ b/src/interfaces/libpq/Makefile @@ -2,7 +2,7 @@ # # Makefile for src/interfaces/libpq library # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/interfaces/libpq/Makefile @@ -24,37 +24,18 @@ ifneq ($(PORTNAME), win32) override CFLAGS += $(PTHREAD_CFLAGS) endif -# Need to recompile any external C files because we need -# all object files to use the same compile flags as libpq; some -# platforms require special flags. -LIBS := $(LIBS:-lpgport=) +# The MSVC build system scrapes OBJS from this file. If you change any of +# the conditional additions of files to OBJS, update Mkvcbuild.pm to match. -# We can't use Makefile variables here because the MSVC build system scrapes -# OBJS from this file. OBJS= fe-auth.o fe-auth-scram.o fe-connect.o fe-exec.o fe-misc.o fe-print.o fe-lobj.o \ fe-protocol2.o fe-protocol3.o pqexpbuffer.o fe-secure.o \ libpq-events.o -# libpgport C files we always use -OBJS += chklocale.o inet_net_ntop.o noblock.o pgstrcasecmp.o pqsignal.o \ - thread.o -# libpgport C files that are needed if identified by configure -OBJS += $(filter crypt.o getaddrinfo.o getpeereid.o inet_aton.o open.o system.o snprintf.o strerror.o strlcpy.o win32error.o win32setlocale.o, $(LIBOBJS)) - -ifeq ($(enable_strong_random), yes) -OBJS += pg_strong_random.o -else -OBJS += erand48.o -endif # src/backend/utils/mb OBJS += encnames.o wchar.o -# src/common -OBJS += base64.o ip.o md5.o scram-common.o saslprep.o unicode_norm.o ifeq ($(with_openssl),yes) -OBJS += fe-secure-openssl.o sha2_openssl.o -else -OBJS += sha2.o +OBJS += fe-secure-openssl.o fe-secure-common.o endif ifeq ($(PORTNAME), cygwin) @@ -62,8 +43,7 @@ override shlib = cyg$(NAME)$(DLSUFFIX) endif ifeq ($(PORTNAME), win32) -# pgsleep.o is from libpgport -OBJS += pgsleep.o win32.o libpqrc.o +OBJS += win32.o libpqrc.o libpqrc.o: libpq.rc $(WINDRES) -i $< -o $@ @@ -76,11 +56,14 @@ endif # Add libraries that libpq depends (or might depend) on into the # shared library link. (The order in which you list them here doesn't -# matter.) +# matter.) Note that we filter out -lpgcommon and -lpgport from LIBS and +# instead link with -lpgcommon_shlib and -lpgport_shlib, to get object files +# that are built correctly for use in a shlib. +SHLIB_LINK_INTERNAL = -lpgcommon_shlib -lpgport_shlib ifneq ($(PORTNAME), win32) -SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lk5crypto -lkrb5 -lgssapi_krb5 -lgss -lgssapi -lssl -lsocket -lnsl -lresolv -lintl, $(LIBS)) $(LDAP_LIBS_FE) $(PTHREAD_LIBS) +SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lk5crypto -lkrb5 -lgssapi_krb5 -lgss -lgssapi -lssl -lsocket -lnsl -lresolv -lintl -lm, $(LIBS)) $(LDAP_LIBS_FE) $(PTHREAD_LIBS) else -SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lk5crypto -lkrb5 -lgssapi32 -lssl -lsocket -lnsl -lresolv -lintl $(PTHREAD_LIBS), $(LIBS)) $(LDAP_LIBS_FE) +SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lk5crypto -lkrb5 -lgssapi32 -lssl -lsocket -lnsl -lresolv -lintl -lm $(PTHREAD_LIBS), $(LIBS)) $(LDAP_LIBS_FE) endif ifeq ($(PORTNAME), win32) SHLIB_LINK += -lshell32 -lws2_32 -lsecur32 $(filter -leay32 -lssleay32 -lcomerr32 -lkrb5_32, $(LIBS)) @@ -90,25 +73,19 @@ SHLIB_EXPORTS = exports.txt all: all-lib +all-lib: | submake-libpgport + # Shared library stuff include $(top_srcdir)/src/Makefile.shlib backend_src = $(top_srcdir)/src/backend -# We use several libpgport and backend modules verbatim, but since we need +# We use a few backend modules verbatim, but since we need # to compile with appropriate options to build a shared lib, we can't -# necessarily use the same object files built for libpgport and the backend. +# use the same object files built for the backend. # Instead, symlink the source files in here and build our own object files. -# For some libpgport modules, this only happens if configure decides -# the module is needed (see filter hack in OBJS, above). # When you add a file here, remember to add it in the "clean" target below. -chklocale.c crypt.c erand48.c getaddrinfo.c getpeereid.c inet_aton.c inet_net_ntop.c noblock.c open.c system.c pgsleep.c pg_strong_random.c pgstrcasecmp.c pqsignal.c snprintf.c strerror.c strlcpy.c thread.c win32error.c win32setlocale.c: % : $(top_srcdir)/src/port/% - rm -f $@ && $(LN_S) $< . - -ip.c md5.c base64.c scram-common.c sha2.c sha2_openssl.c saslprep.c unicode_norm.c: % : $(top_srcdir)/src/common/% - rm -f $@ && $(LN_S) $< . - encnames.c wchar.c: % : $(backend_src)/utils/mb/% rm -f $@ && $(LN_S) $< . @@ -123,6 +100,7 @@ libpq.rc libpq-dist.rc: libpq.rc.in # installations and is only updated by distprep.) libpq.rc: $(top_builddir)/src/Makefile.global +# Make dependencies on pg_config_paths.h visible, too. fe-connect.o: fe-connect.c $(top_builddir)/src/port/pg_config_paths.h fe-misc.o: fe-misc.c $(top_builddir)/src/port/pg_config_paths.h @@ -154,9 +132,7 @@ clean distclean: clean-lib rm -f $(OBJS) pthread.h libpq.rc # Might be left over from a Win32 client-only build rm -f pg_config_paths.h -# Remove files we (may have) symlinked in from src/port and other places - rm -f chklocale.c crypt.c erand48.c getaddrinfo.c getpeereid.c inet_aton.c inet_net_ntop.c noblock.c open.c system.c pgsleep.c pg_strong_random.c pgstrcasecmp.c pqsignal.c snprintf.c strerror.c strlcpy.c thread.c win32error.c win32setlocale.c - rm -f ip.c md5.c base64.c scram-common.c sha2.c sha2_openssl.c saslprep.c unicode_norm.c +# Remove files we (may have) symlinked in from other places rm -f encnames.c wchar.c maintainer-clean: distclean maintainer-clean-lib diff --git a/src/interfaces/libpq/exports.txt b/src/interfaces/libpq/exports.txt index d6a38d0df8..4359fae30d 100644 --- a/src/interfaces/libpq/exports.txt +++ b/src/interfaces/libpq/exports.txt @@ -116,7 +116,7 @@ PQserverVersion 113 PQgetssl 114 pg_char_to_encoding 115 pg_valid_server_encoding 116 -pqsignal 117 +# pqsignal 117 # no longer exported, see libpgport instead PQprepare 118 PQsendPrepare 119 PQgetCancel 120 @@ -172,3 +172,4 @@ PQsslAttribute 169 PQsetErrorContextVisibility 170 PQresultVerboseErrorMessage 171 PQencryptPasswordConn 172 +PQresultMemorySize 173 diff --git a/src/interfaces/libpq/fe-auth-scram.c b/src/interfaces/libpq/fe-auth-scram.c index d1c7037101..603ef4c002 100644 --- a/src/interfaces/libpq/fe-auth-scram.c +++ b/src/interfaces/libpq/fe-auth-scram.c @@ -3,7 +3,7 @@ * fe-auth-scram.c * The front-end (client) implementation of SCRAM authentication. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -42,8 +42,9 @@ typedef struct fe_scram_state_enum state; /* These are supplied by the user */ - const char *username; + PGconn *conn; char *password; + char *sasl_mechanism; /* We construct these */ uint8 SaltedPassword[SCRAM_KEY_LEN]; @@ -63,14 +64,10 @@ typedef struct char ServerSignature[SCRAM_KEY_LEN]; } fe_scram_state; -static bool read_server_first_message(fe_scram_state *state, char *input, - PQExpBuffer errormessage); -static bool read_server_final_message(fe_scram_state *state, char *input, - PQExpBuffer errormessage); -static char *build_client_first_message(fe_scram_state *state, - PQExpBuffer errormessage); -static char *build_client_final_message(fe_scram_state *state, - PQExpBuffer errormessage); +static bool read_server_first_message(fe_scram_state *state, char *input); +static bool read_server_final_message(fe_scram_state *state, char *input); +static char *build_client_first_message(fe_scram_state *state); +static char *build_client_final_message(fe_scram_state *state); static bool verify_server_signature(fe_scram_state *state); static void calculate_client_proof(fe_scram_state *state, const char *client_final_message_without_proof, @@ -81,23 +78,35 @@ static bool pg_frontend_random(char *dst, int len); * Initialize SCRAM exchange status. */ void * -pg_fe_scram_init(const char *username, const char *password) +pg_fe_scram_init(PGconn *conn, + const char *password, + const char *sasl_mechanism) { fe_scram_state *state; char *prep_password; pg_saslprep_rc rc; + Assert(sasl_mechanism != NULL); + state = (fe_scram_state *) malloc(sizeof(fe_scram_state)); if (!state) return NULL; memset(state, 0, sizeof(fe_scram_state)); + state->conn = conn; state->state = FE_SCRAM_INIT; - state->username = username; + state->sasl_mechanism = strdup(sasl_mechanism); + + if (!state->sasl_mechanism) + { + free(state); + return NULL; + } /* Normalize the password with SASLprep, if possible */ rc = pg_saslprep(password, &prep_password); if (rc == SASLPREP_OOM) { + free(state->sasl_mechanism); free(state); return NULL; } @@ -106,6 +115,7 @@ pg_fe_scram_init(const char *username, const char *password) prep_password = strdup(password); if (!prep_password) { + free(state->sasl_mechanism); free(state); return NULL; } @@ -125,6 +135,8 @@ pg_fe_scram_free(void *opaq) if (state->password) free(state->password); + if (state->sasl_mechanism) + free(state->sasl_mechanism); /* client messages */ if (state->client_nonce) @@ -155,9 +167,10 @@ pg_fe_scram_free(void *opaq) void pg_fe_scram_exchange(void *opaq, char *input, int inputlen, char **output, int *outputlen, - bool *done, bool *success, PQExpBuffer errorMessage) + bool *done, bool *success) { fe_scram_state *state = (fe_scram_state *) opaq; + PGconn *conn = state->conn; *done = false; *success = false; @@ -172,13 +185,13 @@ pg_fe_scram_exchange(void *opaq, char *input, int inputlen, { if (inputlen == 0) { - printfPQExpBuffer(errorMessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("malformed SCRAM message (empty message)\n")); goto error; } if (inputlen != strlen(input)) { - printfPQExpBuffer(errorMessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("malformed SCRAM message (length mismatch)\n")); goto error; } @@ -188,7 +201,7 @@ pg_fe_scram_exchange(void *opaq, char *input, int inputlen, { case FE_SCRAM_INIT: /* Begin the SCRAM handshake, by sending client nonce */ - *output = build_client_first_message(state, errorMessage); + *output = build_client_first_message(state); if (*output == NULL) goto error; @@ -199,10 +212,10 @@ pg_fe_scram_exchange(void *opaq, char *input, int inputlen, case FE_SCRAM_NONCE_SENT: /* Receive salt and server nonce, send response. */ - if (!read_server_first_message(state, input, errorMessage)) + if (!read_server_first_message(state, input)) goto error; - *output = build_client_final_message(state, errorMessage); + *output = build_client_final_message(state); if (*output == NULL) goto error; @@ -213,7 +226,7 @@ pg_fe_scram_exchange(void *opaq, char *input, int inputlen, case FE_SCRAM_PROOF_SENT: /* Receive server signature */ - if (!read_server_final_message(state, input, errorMessage)) + if (!read_server_final_message(state, input)) goto error; /* @@ -227,8 +240,8 @@ pg_fe_scram_exchange(void *opaq, char *input, int inputlen, else { *success = false; - printfPQExpBuffer(errorMessage, - libpq_gettext("invalid server signature\n")); + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("incorrect server signature\n")); } *done = true; state->state = FE_SCRAM_FINISHED; @@ -236,7 +249,7 @@ pg_fe_scram_exchange(void *opaq, char *input, int inputlen, default: /* shouldn't happen */ - printfPQExpBuffer(errorMessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("invalid SCRAM exchange state\n")); goto error; } @@ -249,7 +262,7 @@ pg_fe_scram_exchange(void *opaq, char *input, int inputlen, } /* - * Read value for an attribute part of a SASL message. + * Read value for an attribute part of a SCRAM message. */ static char * read_attr_value(char **input, char attr, PQExpBuffer errorMessage) @@ -260,7 +273,7 @@ read_attr_value(char **input, char attr, PQExpBuffer errorMessage) if (*begin != attr) { printfPQExpBuffer(errorMessage, - libpq_gettext("malformed SCRAM message (%c expected)\n"), + libpq_gettext("malformed SCRAM message (attribute \"%c\" expected)\n"), attr); return NULL; } @@ -269,7 +282,7 @@ read_attr_value(char **input, char attr, PQExpBuffer errorMessage) if (*begin != '=') { printfPQExpBuffer(errorMessage, - libpq_gettext("malformed SCRAM message (expected = in attr '%c')\n"), + libpq_gettext("malformed SCRAM message (expected character \"=\" for attribute \"%c\")\n"), attr); return NULL; } @@ -294,12 +307,14 @@ read_attr_value(char **input, char attr, PQExpBuffer errorMessage) * Build the first exchange message sent by the client. */ static char * -build_client_first_message(fe_scram_state *state, PQExpBuffer errormessage) +build_client_first_message(fe_scram_state *state) { + PGconn *conn = state->conn; char raw_nonce[SCRAM_RAW_NONCE_LEN + 1]; - char *buf; - char buflen; + char *result; + int channel_info_len; int encoded_len; + PQExpBufferData buf; /* * Generate a "raw" nonce. This is converted to ASCII-printable form by @@ -307,7 +322,7 @@ build_client_first_message(fe_scram_state *state, PQExpBuffer errormessage) */ if (!pg_frontend_random(raw_nonce, SCRAM_RAW_NONCE_LEN)) { - printfPQExpBuffer(errormessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("could not generate nonce\n")); return NULL; } @@ -315,7 +330,7 @@ build_client_first_message(fe_scram_state *state, PQExpBuffer errormessage) state->client_nonce = malloc(pg_b64_enc_len(SCRAM_RAW_NONCE_LEN) + 1); if (state->client_nonce == NULL) { - printfPQExpBuffer(errormessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("out of memory\n")); return NULL; } @@ -328,35 +343,73 @@ build_client_first_message(fe_scram_state *state, PQExpBuffer errormessage) * prepared with SASLprep, the message parsing would fail if it includes * '=' or ',' characters. */ - buflen = 8 + strlen(state->client_nonce) + 1; - buf = malloc(buflen); - if (buf == NULL) + + initPQExpBuffer(&buf); + + /* + * First build the gs2-header with channel binding information. + */ + if (strcmp(state->sasl_mechanism, SCRAM_SHA_256_PLUS_NAME) == 0) { - printfPQExpBuffer(errormessage, - libpq_gettext("out of memory\n")); - return NULL; + Assert(conn->ssl_in_use); + appendPQExpBuffer(&buf, "p=tls-server-end-point"); } - snprintf(buf, buflen, "n,,n=,r=%s", state->client_nonce); - - state->client_first_message_bare = strdup(buf + 3); - if (!state->client_first_message_bare) +#ifdef HAVE_PGTLS_GET_PEER_CERTIFICATE_HASH + else if (conn->ssl_in_use) { - free(buf); - printfPQExpBuffer(errormessage, - libpq_gettext("out of memory\n")); - return NULL; + /* + * Client supports channel binding, but thinks the server does not. + */ + appendPQExpBuffer(&buf, "y"); } +#endif + else + { + /* + * Client does not support channel binding. + */ + appendPQExpBuffer(&buf, "n"); + } + + if (PQExpBufferDataBroken(buf)) + goto oom_error; + + channel_info_len = buf.len; + + appendPQExpBuffer(&buf, ",,n=,r=%s", state->client_nonce); + if (PQExpBufferDataBroken(buf)) + goto oom_error; + + /* + * The first message content needs to be saved without channel binding + * information. + */ + state->client_first_message_bare = strdup(buf.data + channel_info_len + 2); + if (!state->client_first_message_bare) + goto oom_error; + + result = strdup(buf.data); + if (result == NULL) + goto oom_error; - return buf; + termPQExpBuffer(&buf); + return result; + +oom_error: + termPQExpBuffer(&buf); + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("out of memory\n")); + return NULL; } /* * Build the final exchange message sent from the client. */ static char * -build_client_final_message(fe_scram_state *state, PQExpBuffer errormessage) +build_client_final_message(fe_scram_state *state) { PQExpBufferData buf; + PGconn *conn = state->conn; uint8 client_proof[SCRAM_KEY_LEN]; char *result; @@ -365,8 +418,78 @@ build_client_final_message(fe_scram_state *state, PQExpBuffer errormessage) /* * Construct client-final-message-without-proof. We need to remember it * for verifying the server proof in the final step of authentication. + * + * The channel binding flag handling (p/y/n) must be consistent with + * build_client_first_message(), because the server will check that it's + * the same flag both times. */ - appendPQExpBuffer(&buf, "c=biws,r=%s", state->nonce); + if (strcmp(state->sasl_mechanism, SCRAM_SHA_256_PLUS_NAME) == 0) + { +#ifdef HAVE_PGTLS_GET_PEER_CERTIFICATE_HASH + char *cbind_data = NULL; + size_t cbind_data_len = 0; + size_t cbind_header_len; + char *cbind_input; + size_t cbind_input_len; + + /* Fetch hash data of server's SSL certificate */ + cbind_data = + pgtls_get_peer_certificate_hash(state->conn, + &cbind_data_len); + if (cbind_data == NULL) + { + /* error message is already set on error */ + termPQExpBuffer(&buf); + return NULL; + } + + appendPQExpBuffer(&buf, "c="); + + /* p=type,, */ + cbind_header_len = strlen("p=tls-server-end-point,,"); + cbind_input_len = cbind_header_len + cbind_data_len; + cbind_input = malloc(cbind_input_len); + if (!cbind_input) + { + free(cbind_data); + goto oom_error; + } + memcpy(cbind_input, "p=tls-server-end-point,,", cbind_header_len); + memcpy(cbind_input + cbind_header_len, cbind_data, cbind_data_len); + + if (!enlargePQExpBuffer(&buf, pg_b64_enc_len(cbind_input_len))) + { + free(cbind_data); + free(cbind_input); + goto oom_error; + } + buf.len += pg_b64_encode(cbind_input, cbind_input_len, buf.data + buf.len); + buf.data[buf.len] = '\0'; + + free(cbind_data); + free(cbind_input); +#else + /* + * Chose channel binding, but the SSL library doesn't support it. + * Shouldn't happen. + */ + termPQExpBuffer(&buf); + printfPQExpBuffer(&conn->errorMessage, + "channel binding not supported by this build\n"); + return NULL; +#endif /* HAVE_PGTLS_GET_PEER_CERTIFICATE_HASH */ + } +#ifdef HAVE_PGTLS_GET_PEER_CERTIFICATE_HASH + else if (conn->ssl_in_use) + appendPQExpBuffer(&buf, "c=eSws"); /* base64 of "y,," */ +#endif + else + appendPQExpBuffer(&buf, "c=biws"); /* base64 of "n,," */ + + if (PQExpBufferDataBroken(buf)) + goto oom_error; + + appendPQExpBuffer(&buf, ",r=%s", state->nonce); if (PQExpBufferDataBroken(buf)) goto oom_error; @@ -396,7 +519,7 @@ build_client_final_message(fe_scram_state *state, PQExpBuffer errormessage) oom_error: termPQExpBuffer(&buf); - printfPQExpBuffer(errormessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("out of memory\n")); return NULL; } @@ -405,9 +528,9 @@ build_client_final_message(fe_scram_state *state, PQExpBuffer errormessage) * Read the first exchange message coming from the server. */ static bool -read_server_first_message(fe_scram_state *state, char *input, - PQExpBuffer errormessage) +read_server_first_message(fe_scram_state *state, char *input) { + PGconn *conn = state->conn; char *iterations_str; char *endptr; char *encoded_salt; @@ -416,13 +539,14 @@ read_server_first_message(fe_scram_state *state, char *input, state->server_first_message = strdup(input); if (state->server_first_message == NULL) { - printfPQExpBuffer(errormessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("out of memory\n")); return false; } /* parse the message */ - nonce = read_attr_value(&input, 'r', errormessage); + nonce = read_attr_value(&input, 'r', + &conn->errorMessage); if (nonce == NULL) { /* read_attr_value() has generated an error string */ @@ -433,7 +557,7 @@ read_server_first_message(fe_scram_state *state, char *input, if (strlen(nonce) < strlen(state->client_nonce) || memcmp(nonce, state->client_nonce, strlen(state->client_nonce)) != 0) { - printfPQExpBuffer(errormessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("invalid SCRAM response (nonce mismatch)\n")); return false; } @@ -441,12 +565,12 @@ read_server_first_message(fe_scram_state *state, char *input, state->nonce = strdup(nonce); if (state->nonce == NULL) { - printfPQExpBuffer(errormessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("out of memory\n")); return false; } - encoded_salt = read_attr_value(&input, 's', errormessage); + encoded_salt = read_attr_value(&input, 's', &conn->errorMessage); if (encoded_salt == NULL) { /* read_attr_value() has generated an error string */ @@ -455,7 +579,7 @@ read_server_first_message(fe_scram_state *state, char *input, state->salt = malloc(pg_b64_dec_len(strlen(encoded_salt))); if (state->salt == NULL) { - printfPQExpBuffer(errormessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("out of memory\n")); return false; } @@ -463,7 +587,7 @@ read_server_first_message(fe_scram_state *state, char *input, strlen(encoded_salt), state->salt); - iterations_str = read_attr_value(&input, 'i', errormessage); + iterations_str = read_attr_value(&input, 'i', &conn->errorMessage); if (iterations_str == NULL) { /* read_attr_value() has generated an error string */ @@ -472,13 +596,13 @@ read_server_first_message(fe_scram_state *state, char *input, state->iterations = strtol(iterations_str, &endptr, 10); if (*endptr != '\0' || state->iterations < 1) { - printfPQExpBuffer(errormessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("malformed SCRAM message (invalid iteration count)\n")); return false; } if (*input != '\0') - printfPQExpBuffer(errormessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("malformed SCRAM message (garbage at end of server-first-message)\n")); return true; @@ -488,16 +612,16 @@ read_server_first_message(fe_scram_state *state, char *input, * Read the final exchange message coming from the server. */ static bool -read_server_final_message(fe_scram_state *state, char *input, - PQExpBuffer errormessage) +read_server_final_message(fe_scram_state *state, char *input) { + PGconn *conn = state->conn; char *encoded_server_signature; int server_signature_len; state->server_final_message = strdup(input); if (!state->server_final_message) { - printfPQExpBuffer(errormessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("out of memory\n")); return false; } @@ -505,16 +629,18 @@ read_server_final_message(fe_scram_state *state, char *input, /* Check for error result. */ if (*input == 'e') { - char *errmsg = read_attr_value(&input, 'e', errormessage); + char *errmsg = read_attr_value(&input, 'e', + &conn->errorMessage); - printfPQExpBuffer(errormessage, - libpq_gettext("error received from server in SASL exchange: %s\n"), + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("error received from server in SCRAM exchange: %s\n"), errmsg); return false; } /* Parse the message. */ - encoded_server_signature = read_attr_value(&input, 'v', errormessage); + encoded_server_signature = read_attr_value(&input, 'v', + &conn->errorMessage); if (encoded_server_signature == NULL) { /* read_attr_value() has generated an error message */ @@ -522,7 +648,7 @@ read_server_final_message(fe_scram_state *state, char *input, } if (*input != '\0') - printfPQExpBuffer(errormessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("malformed SCRAM message (garbage at end of server-final-message)\n")); server_signature_len = pg_b64_decode(encoded_server_signature, @@ -530,7 +656,7 @@ read_server_final_message(fe_scram_state *state, char *input, state->ServerSignature); if (server_signature_len != SCRAM_KEY_LEN) { - printfPQExpBuffer(errormessage, + printfPQExpBuffer(&conn->errorMessage, libpq_gettext("malformed SCRAM message (invalid server signature)\n")); return false; } @@ -621,7 +747,7 @@ verify_server_signature(fe_scram_state *state) char * pg_fe_scram_build_verifier(const char *password) { - char *prep_password = NULL; + char *prep_password; pg_saslprep_rc rc; char saltbuf[SCRAM_DEFAULT_SALT_LEN]; char *result; diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c index 382558f3f8..92641fe5e9 100644 --- a/src/interfaces/libpq/fe-auth.c +++ b/src/interfaces/libpq/fe-auth.c @@ -3,7 +3,7 @@ * fe-auth.c * The front-end (client) authorization routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -39,8 +39,8 @@ #endif #include "common/md5.h" +#include "common/scram-common.h" #include "libpq-fe.h" -#include "libpq/scram.h" #include "fe-auth.h" @@ -199,7 +199,7 @@ pg_GSS_startup(PGconn *conn, int payloadlen) min_stat; int maxlen; gss_buffer_desc temp_gbuf; - char *host = PQhost(conn); + char *host = conn->connhost[conn->whichhost].host; if (!(host && host[0] != '\0')) { @@ -414,7 +414,7 @@ pg_SSPI_startup(PGconn *conn, int use_negotiate, int payloadlen) { SECURITY_STATUS r; TimeStamp expire; - char *host = PQhost(conn); + char *host = conn->connhost[conn->whichhost].host; if (conn->sspictx) { @@ -491,6 +491,7 @@ pg_SASL_init(PGconn *conn, int payloadlen) bool success; const char *selected_mechanism; PQExpBufferData mechanism_buf; + char *password; initPQExpBuffer(&mechanism_buf); @@ -504,7 +505,8 @@ pg_SASL_init(PGconn *conn, int payloadlen) /* * Parse the list of SASL authentication mechanisms in the * AuthenticationSASL message, and select the best mechanism that we - * support. (Only SCRAM-SHA-256 is supported at the moment.) + * support. SCRAM-SHA-256-PLUS and SCRAM-SHA-256 are the only ones + * supported at the moment, listed by order of decreasing importance. */ selected_mechanism = NULL; for (;;) @@ -523,35 +525,34 @@ pg_SASL_init(PGconn *conn, int payloadlen) break; /* - * If we have already selected a mechanism, just skip through the rest - * of the list. + * Select the mechanism to use. Pick SCRAM-SHA-256-PLUS over anything + * else if a channel binding type is set. Pick SCRAM-SHA-256 if + * nothing else has already been picked. If we add more mechanisms, a + * more refined priority mechanism might become necessary. */ - if (selected_mechanism) - continue; - - /* - * Do we support this mechanism? - */ - if (strcmp(mechanism_buf.data, SCRAM_SHA256_NAME) == 0) + if (strcmp(mechanism_buf.data, SCRAM_SHA_256_PLUS_NAME) == 0) { - char *password; - - conn->password_needed = true; - password = conn->connhost[conn->whichhost].password; - if (password == NULL) - password = conn->pgpass; - if (password == NULL || password[0] == '\0') + if (conn->ssl_in_use) + selected_mechanism = SCRAM_SHA_256_PLUS_NAME; + else { + /* + * The server offered SCRAM-SHA-256-PLUS, but the connection + * is not SSL-encrypted. That's not sane. Perhaps SSL was + * stripped by a proxy? There's no point in continuing, + * because the server will reject the connection anyway if we + * try authenticate without channel binding even though both + * the client and server supported it. The SCRAM exchange + * checks for that, to prevent downgrade attacks. + */ printfPQExpBuffer(&conn->errorMessage, - PQnoPasswordSupplied); + libpq_gettext("server offered SCRAM-SHA-256-PLUS authentication over a non-SSL connection\n")); goto error; } - - conn->sasl_state = pg_fe_scram_init(conn->pguser, password); - if (!conn->sasl_state) - goto oom_error; - selected_mechanism = SCRAM_SHA256_NAME; } + else if (strcmp(mechanism_buf.data, SCRAM_SHA_256_NAME) == 0 && + !selected_mechanism) + selected_mechanism = SCRAM_SHA_256_NAME; } if (!selected_mechanism) @@ -561,11 +562,44 @@ pg_SASL_init(PGconn *conn, int payloadlen) goto error; } + /* + * Now that the SASL mechanism has been chosen for the exchange, + * initialize its state information. + */ + + /* + * First, select the password to use for the exchange, complaining if + * there isn't one. Currently, all supported SASL mechanisms require a + * password, so we can just go ahead here without further distinction. + */ + conn->password_needed = true; + password = conn->connhost[conn->whichhost].password; + if (password == NULL) + password = conn->pgpass; + if (password == NULL || password[0] == '\0') + { + printfPQExpBuffer(&conn->errorMessage, + PQnoPasswordSupplied); + goto error; + } + + /* + * Initialize the SASL state information with all the information gathered + * during the initial exchange. + * + * Note: Only tls-unique is supported for the moment. + */ + conn->sasl_state = pg_fe_scram_init(conn, + password, + selected_mechanism); + if (!conn->sasl_state) + goto oom_error; + /* Get the mechanism-specific Initial Client Response, if any */ pg_fe_scram_exchange(conn->sasl_state, NULL, -1, &initialresponse, &initialresponselen, - &done, &success, &conn->errorMessage); + &done, &success); if (done && !success) goto error; @@ -646,7 +680,7 @@ pg_SASL_continue(PGconn *conn, int payloadlen, bool final) pg_fe_scram_exchange(conn->sasl_state, challenge, payloadlen, &output, &outputlen, - &done, &success, &conn->errorMessage); + &done, &success); free(challenge); /* don't need the input anymore */ if (final && !done) @@ -722,11 +756,11 @@ pg_local_sendauth(PGconn *conn) if (sendmsg(conn->sock, &msg, 0) == -1) { - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; printfPQExpBuffer(&conn->errorMessage, "pg_local_sendauth: sendmsg: %s\n", - pqStrerror(errno, sebuf, sizeof(sebuf))); + strerror_r(errno, sebuf, sizeof(sebuf))); return STATUS_ERROR; } return STATUS_OK; @@ -1064,7 +1098,7 @@ pg_fe_getauthname(PQExpBuffer errorMessage) printfPQExpBuffer(errorMessage, libpq_gettext("could not look up local user ID %d: %s\n"), (int) user_id, - pqStrerror(pwerr, pwdbuf, sizeof(pwdbuf))); + strerror_r(pwerr, pwdbuf, sizeof(pwdbuf))); else printfPQExpBuffer(errorMessage, libpq_gettext("local user with ID %d does not exist\n"), diff --git a/src/interfaces/libpq/fe-auth.h b/src/interfaces/libpq/fe-auth.h index 5dc6bb5341..a8a27c24a6 100644 --- a/src/interfaces/libpq/fe-auth.h +++ b/src/interfaces/libpq/fe-auth.h @@ -4,7 +4,7 @@ * * Definitions for network authentication routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/interfaces/libpq/fe-auth.h @@ -23,11 +23,13 @@ extern int pg_fe_sendauth(AuthRequest areq, int payloadlen, PGconn *conn); extern char *pg_fe_getauthname(PQExpBuffer errorMessage); /* Prototypes for functions in fe-auth-scram.c */ -extern void *pg_fe_scram_init(const char *username, const char *password); +extern void *pg_fe_scram_init(PGconn *conn, + const char *password, + const char *sasl_mechanism); extern void pg_fe_scram_free(void *opaq); extern void pg_fe_scram_exchange(void *opaq, char *input, int inputlen, char **output, int *outputlen, - bool *done, bool *success, PQExpBuffer errorMessage); + bool *done, bool *success); extern char *pg_fe_scram_build_verifier(const char *password); #endif /* FE_AUTH_H */ diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index d0e97ecdd4..d001bc513d 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -3,7 +3,7 @@ * fe-connect.c * functions related to setting up a connection to the backend * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -47,7 +47,6 @@ #ifdef HAVE_NETINET_TCP_H #include #endif -#include #endif #ifdef ENABLE_THREAD_SAFETY @@ -72,7 +71,10 @@ static int ldapServiceLookup(const char *purl, PQconninfoOption *options, #endif #include "common/ip.h" +#include "common/link-canary.h" +#include "common/scram-common.h" #include "mb/pg_wchar.h" +#include "port/pg_bswap.h" #ifndef WIN32 @@ -235,7 +237,7 @@ static const internalPQconninfoOption PQconninfoOptions[] = { offsetof(struct pg_conn, pgtty)}, {"options", "PGOPTIONS", DefaultOption, NULL, - "Backend-Debug-Options", "D", 40, + "Backend-Options", "", 40, offsetof(struct pg_conn, pgoptions)}, {"application_name", "PGAPPNAME", NULL, NULL, @@ -272,7 +274,7 @@ static const internalPQconninfoOption PQconninfoOptions[] = { "SSL-Mode", "", 12, /* sizeof("verify-full") == 12 */ offsetof(struct pg_conn, sslmode)}, - {"sslcompression", "PGSSLCOMPRESSION", "1", NULL, + {"sslcompression", "PGSSLCOMPRESSION", "0", NULL, "SSL-Compression", "", 1, offsetof(struct pg_conn, sslcompression)}, @@ -359,7 +361,7 @@ static PGconn *makeEmptyPGconn(void); static bool fillPGconn(PGconn *conn, PQconninfoOption *connOptions); static void freePGconn(PGconn *conn); static void closePGconn(PGconn *conn); -static void release_all_addrinfo(PGconn *conn); +static void release_conn_addrinfo(PGconn *conn); static void sendTerminateConn(PGconn *conn); static PQconninfoOption *conninfo_init(PQExpBuffer errorMessage); static PQconninfoOption *parse_connection_string(const char *conninfo, @@ -398,9 +400,9 @@ static int parseServiceFile(const char *serviceFile, PQconninfoOption *options, PQExpBuffer errorMessage, bool *group_found); -static char *pwdfMatchesString(char *buf, char *token); -static char *passwordFromFile(char *hostname, char *port, char *dbname, - char *username, char *pgpassfile); +static char *pwdfMatchesString(char *buf, const char *token); +static char *passwordFromFile(const char *hostname, const char *port, const char *dbname, + const char *username, const char *pgpassfile); static void pgpassfileWarning(PGconn *conn); static void default_threadlock(int acquire); @@ -414,7 +416,8 @@ pgthreadlock_t pg_g_threadlock = default_threadlock; * * Close any physical connection to the server, and reset associated * state inside the connection object. We don't release state that - * would be needed to reconnect, though. + * would be needed to reconnect, though, nor local state that might still + * be useful later. * * We can always flush the output buffer, since there's no longer any hope * of sending that data. However, unprocessed input data might still be @@ -481,6 +484,64 @@ pqDropConnection(PGconn *conn, bool flushInput) } +/* + * pqDropServerData + * + * Clear all connection state data that was received from (or deduced about) + * the server. This is essential to do between connection attempts to + * different servers, else we may incorrectly hold over some data from the + * old server. + * + * It would be better to merge this into pqDropConnection, perhaps, but + * right now we cannot because that function is called immediately on + * detection of connection loss (cf. pqReadData, for instance). This data + * should be kept until we are actually starting a new connection. + */ +static void +pqDropServerData(PGconn *conn) +{ + PGnotify *notify; + pgParameterStatus *pstatus; + + /* Forget pending notifies */ + notify = conn->notifyHead; + while (notify != NULL) + { + PGnotify *prev = notify; + + notify = notify->next; + free(prev); + } + conn->notifyHead = conn->notifyTail = NULL; + + /* Reset ParameterStatus data, as well as variables deduced from it */ + pstatus = conn->pstatus; + while (pstatus != NULL) + { + pgParameterStatus *prev = pstatus; + + pstatus = pstatus->next; + free(prev); + } + conn->pstatus = NULL; + conn->client_encoding = PG_SQL_ASCII; + conn->std_strings = false; + conn->sversion = 0; + + /* Drop large-object lookup data */ + if (conn->lobjfuncs) + free(conn->lobjfuncs); + conn->lobjfuncs = NULL; + + /* Reset assorted other per-connection state */ + conn->last_sqlstate[0] = '\0'; + conn->auth_req_received = false; + conn->password_needed = false; + conn->be_pid = 0; + conn->be_key = 0; +} + + /* * Connecting to a Database * @@ -894,6 +955,8 @@ parse_comma_separated_list(char **startptr, bool *more) static bool connectOptions2(PGconn *conn) { + int i; + /* * Allocate memory for details about each host to which we might possibly * try to connect. For that, count the number of elements in the hostaddr @@ -913,11 +976,10 @@ connectOptions2(PGconn *conn) /* * We now have one pg_conn_host structure per possible host. Fill in the - * host details for each one. + * host and hostaddr fields for each, by splitting the parameter strings. */ if (conn->pghostaddr != NULL && conn->pghostaddr[0] != '\0') { - int i; char *s = conn->pghostaddr; bool more = true; @@ -926,8 +988,6 @@ connectOptions2(PGconn *conn) conn->connhost[i].hostaddr = parse_comma_separated_list(&s, &more); if (conn->connhost[i].hostaddr == NULL) goto oom_error; - - conn->connhost[i].type = CHT_HOST_ADDRESS; } /* @@ -941,7 +1001,6 @@ connectOptions2(PGconn *conn) if (conn->pghost != NULL && conn->pghost[0] != '\0') { - int i; char *s = conn->pghost; bool more = true; @@ -950,51 +1009,62 @@ connectOptions2(PGconn *conn) conn->connhost[i].host = parse_comma_separated_list(&s, &more); if (conn->connhost[i].host == NULL) goto oom_error; - - /* Identify the type of host. */ - if (conn->pghostaddr == NULL || conn->pghostaddr[0] == '\0') - { - conn->connhost[i].type = CHT_HOST_NAME; -#ifdef HAVE_UNIX_SOCKETS - if (is_absolute_path(conn->connhost[i].host)) - conn->connhost[i].type = CHT_UNIX_SOCKET; -#endif - } } + + /* Check for wrong number of host items. */ if (more || i != conn->nconnhost) { conn->status = CONNECTION_BAD; printfPQExpBuffer(&conn->errorMessage, - libpq_gettext("could not match %d host names to %d hostaddrs\n"), + libpq_gettext("could not match %d host names to %d hostaddr values\n"), count_comma_separated_elems(conn->pghost), conn->nconnhost); return false; } } /* - * If neither host or hostaddr options was given, connect to default host. + * Now, for each host slot, identify the type of address spec, and fill in + * the default address if nothing was given. */ - if ((conn->pghostaddr == NULL || conn->pghostaddr[0] == '\0') && - (conn->pghost == NULL || conn->pghost[0] == '\0')) + for (i = 0; i < conn->nconnhost; i++) { - Assert(conn->nconnhost == 1); + pg_conn_host *ch = &conn->connhost[i]; + + if (ch->hostaddr != NULL && ch->hostaddr[0] != '\0') + ch->type = CHT_HOST_ADDRESS; + else if (ch->host != NULL && ch->host[0] != '\0') + { + ch->type = CHT_HOST_NAME; +#ifdef HAVE_UNIX_SOCKETS + if (is_absolute_path(ch->host)) + ch->type = CHT_UNIX_SOCKET; +#endif + } + else + { + if (ch->host) + free(ch->host); #ifdef HAVE_UNIX_SOCKETS - conn->connhost[0].host = strdup(DEFAULT_PGSOCKET_DIR); - conn->connhost[0].type = CHT_UNIX_SOCKET; + ch->host = strdup(DEFAULT_PGSOCKET_DIR); + ch->type = CHT_UNIX_SOCKET; #else - conn->connhost[0].host = strdup(DefaultHost); - conn->connhost[0].type = CHT_HOST_NAME; + ch->host = strdup(DefaultHost); + ch->type = CHT_HOST_NAME; #endif - if (conn->connhost[0].host == NULL) - goto oom_error; + if (ch->host == NULL) + goto oom_error; + } } /* * Next, work out the port number corresponding to each host name. + * + * Note: unlike the above for host names, this could leave the port fields + * as null or empty strings. We will substitute DEF_PGPORT whenever we + * read such a port field. */ if (conn->pgport != NULL && conn->pgport[0] != '\0') { - int i; char *s = conn->pgport; bool more = true; @@ -1058,57 +1128,49 @@ connectOptions2(PGconn *conn) } /* - * Supply default password if none given. Note that the password might be - * different for each host/port pair. + * If password was not given, try to look it up in password file. Note + * that the result might be different for each host/port pair. */ if (conn->pgpass == NULL || conn->pgpass[0] == '\0') { - int i; - + /* If password file wasn't specified, use ~/PGPASSFILE */ if (conn->pgpassfile == NULL || conn->pgpassfile[0] == '\0') { - /* Identify password file to use; fail if we can't */ char homedir[MAXPGPATH]; - if (!pqGetHomeDirectory(homedir, sizeof(homedir))) + if (pqGetHomeDirectory(homedir, sizeof(homedir))) { - conn->status = CONNECTION_BAD; - printfPQExpBuffer(&conn->errorMessage, - libpq_gettext("could not get home directory to locate password file\n")); - return false; + if (conn->pgpassfile) + free(conn->pgpassfile); + conn->pgpassfile = malloc(MAXPGPATH); + if (!conn->pgpassfile) + goto oom_error; + snprintf(conn->pgpassfile, MAXPGPATH, "%s/%s", + homedir, PGPASSFILE); } - - if (conn->pgpassfile) - free(conn->pgpassfile); - conn->pgpassfile = malloc(MAXPGPATH); - if (!conn->pgpassfile) - goto oom_error; - - snprintf(conn->pgpassfile, MAXPGPATH, "%s/%s", homedir, PGPASSFILE); } - for (i = 0; i < conn->nconnhost; i++) + if (conn->pgpassfile != NULL && conn->pgpassfile[0] != '\0') { - /* - * Try to get a password for this host from pgpassfile. We use - * host name rather than host address in the same manner to - * PQhost(). - */ - char *pwhost = conn->connhost[i].host; - - if (conn->connhost[i].type == CHT_HOST_ADDRESS && - conn->connhost[i].host != NULL && conn->connhost[i].host[0] != '\0') - pwhost = conn->connhost[i].hostaddr; - - conn->connhost[i].password = - passwordFromFile(pwhost, - conn->connhost[i].port, - conn->dbName, - conn->pguser, - conn->pgpassfile); - /* If we got one, set pgpassfile_used */ - if (conn->connhost[i].password != NULL) - conn->pgpassfile_used = true; + for (i = 0; i < conn->nconnhost; i++) + { + /* + * Try to get a password for this host from file. We use host + * for the hostname search key if given, else hostaddr (at + * least one of them is guaranteed nonempty by now). + */ + const char *pwhost = conn->connhost[i].host; + + if (pwhost == NULL || pwhost[0] == '\0') + pwhost = conn->connhost[i].hostaddr; + + conn->connhost[i].password = + passwordFromFile(pwhost, + conn->connhost[i].port, + conn->dbName, + conn->pguser, + conn->pgpassfile); + } } } @@ -1397,7 +1459,7 @@ connectNoDelay(PGconn *conn) (char *) &on, sizeof(on)) < 0) { - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not set socket to TCP no delay mode: %s\n"), @@ -1418,7 +1480,7 @@ connectNoDelay(PGconn *conn) static void connectFailureMessage(PGconn *conn, int errorno) { - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; #ifdef HAVE_UNIX_SOCKETS if (IS_AF_UNIX(conn->raddr.addr.ss_family)) @@ -1525,6 +1587,34 @@ useKeepalives(PGconn *conn) return val != 0 ? 1 : 0; } +/* + * Parse and try to interpret "value" as an integer value, and if successful, + * store it in *result, complaining if there is any trailing garbage or an + * overflow. + */ +static bool +parse_int_param(const char *value, int *result, PGconn *conn, + const char *context) +{ + char *end; + long numval; + + *result = 0; + + errno = 0; + numval = strtol(value, &end, 10); + if (errno == 0 && *end == '\0' && numval == (int) numval) + { + *result = numval; + return true; + } + + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("invalid integer value \"%s\" for keyword \"%s\"\n"), + value, context); + return false; +} + #ifndef WIN32 /* * Set the keepalive idle timer. @@ -1537,7 +1627,9 @@ setKeepalivesIdle(PGconn *conn) if (conn->keepalives_idle == NULL) return 1; - idle = atoi(conn->keepalives_idle); + if (!parse_int_param(conn->keepalives_idle, &idle, conn, + "keepalives_idle")) + return 0; if (idle < 0) idle = 0; @@ -1545,7 +1637,7 @@ setKeepalivesIdle(PGconn *conn) if (setsockopt(conn->sock, IPPROTO_TCP, PG_TCP_KEEPALIVE_IDLE, (char *) &idle, sizeof(idle)) < 0) { - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; appendPQExpBuffer(&conn->errorMessage, libpq_gettext("setsockopt(%s) failed: %s\n"), @@ -1569,7 +1661,9 @@ setKeepalivesInterval(PGconn *conn) if (conn->keepalives_interval == NULL) return 1; - interval = atoi(conn->keepalives_interval); + if (!parse_int_param(conn->keepalives_interval, &interval, conn, + "keepalives_interval")) + return 0; if (interval < 0) interval = 0; @@ -1577,7 +1671,7 @@ setKeepalivesInterval(PGconn *conn) if (setsockopt(conn->sock, IPPROTO_TCP, TCP_KEEPINTVL, (char *) &interval, sizeof(interval)) < 0) { - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; appendPQExpBuffer(&conn->errorMessage, libpq_gettext("setsockopt(%s) failed: %s\n"), @@ -1602,7 +1696,9 @@ setKeepalivesCount(PGconn *conn) if (conn->keepalives_count == NULL) return 1; - count = atoi(conn->keepalives_count); + if (!parse_int_param(conn->keepalives_count, &count, conn, + "keepalives_count")) + return 0; if (count < 0) count = 0; @@ -1610,7 +1706,7 @@ setKeepalivesCount(PGconn *conn) if (setsockopt(conn->sock, IPPROTO_TCP, TCP_KEEPCNT, (char *) &count, sizeof(count)) < 0) { - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; appendPQExpBuffer(&conn->errorMessage, libpq_gettext("setsockopt(%s) failed: %s\n"), @@ -1636,13 +1732,17 @@ setKeepalivesWin32(PGconn *conn) int idle = 0; int interval = 0; - if (conn->keepalives_idle) - idle = atoi(conn->keepalives_idle); + if (conn->keepalives_idle && + !parse_int_param(conn->keepalives_idle, &idle, conn, + "keepalives_idle")) + return 0; if (idle <= 0) idle = 2 * 60 * 60; /* 2 hours = default */ - if (conn->keepalives_interval) - interval = atoi(conn->keepalives_interval); + if (conn->keepalives_interval && + !parse_int_param(conn->keepalives_interval, &interval, conn, + "keepalives_interval")) + return 0; if (interval <= 0) interval = 1; /* 1 second = default */ @@ -1681,130 +1781,45 @@ setKeepalivesWin32(PGconn *conn) static int connectDBStart(PGconn *conn) { - char portstr[MAXPGPATH]; - int ret; - int i; - if (!conn) return 0; if (!conn->options_valid) goto connect_errReturn; + /* + * Check for bad linking to backend-internal versions of src/common + * functions (see comments in link-canary.c for the reason we need this). + * Nobody but developers should see this message, so we don't bother + * translating it. + */ + if (!pg_link_canary_is_frontend()) + { + printfPQExpBuffer(&conn->errorMessage, + "libpq is incorrectly linked to backend functions\n"); + goto connect_errReturn; + } + /* Ensure our buffers are empty */ conn->inStart = conn->inCursor = conn->inEnd = 0; conn->outCount = 0; /* - * Look up socket addresses for each possible host using - * pg_getaddrinfo_all. + * Ensure errorMessage is empty, too. PQconnectPoll will append messages + * to it in the process of scanning for a working server. Thus, if we + * fail to connect to multiple hosts, the final error message will include + * details about each failure. */ - for (i = 0; i < conn->nconnhost; ++i) - { - pg_conn_host *ch = &conn->connhost[i]; - struct addrinfo hint; - int thisport; - - /* Initialize hint structure */ - MemSet(&hint, 0, sizeof(hint)); - hint.ai_socktype = SOCK_STREAM; - hint.ai_family = AF_UNSPEC; - - /* Figure out the port number we're going to use. */ - if (ch->port == NULL || ch->port[0] == '\0') - thisport = DEF_PGPORT; - else - { - thisport = atoi(ch->port); - if (thisport < 1 || thisport > 65535) - { - appendPQExpBuffer(&conn->errorMessage, - libpq_gettext("invalid port number: \"%s\"\n"), - ch->port); - conn->options_valid = false; - goto connect_errReturn; - } - } - snprintf(portstr, sizeof(portstr), "%d", thisport); - - /* Use pg_getaddrinfo_all() to resolve the address */ - ret = 1; - switch (ch->type) - { - case CHT_HOST_NAME: - ret = pg_getaddrinfo_all(ch->host, portstr, &hint, &ch->addrlist); - if (ret || !ch->addrlist) - appendPQExpBuffer(&conn->errorMessage, - libpq_gettext("could not translate host name \"%s\" to address: %s\n"), - ch->host, gai_strerror(ret)); - break; - - case CHT_HOST_ADDRESS: - hint.ai_flags = AI_NUMERICHOST; - ret = pg_getaddrinfo_all(ch->hostaddr, portstr, &hint, &ch->addrlist); - if (ret || !ch->addrlist) - appendPQExpBuffer(&conn->errorMessage, - libpq_gettext("could not parse network address \"%s\": %s\n"), - ch->host, gai_strerror(ret)); - break; - - case CHT_UNIX_SOCKET: -#ifdef HAVE_UNIX_SOCKETS - hint.ai_family = AF_UNIX; - UNIXSOCK_PATH(portstr, thisport, ch->host); - if (strlen(portstr) >= UNIXSOCK_PATH_BUFLEN) - { - appendPQExpBuffer(&conn->errorMessage, - libpq_gettext("Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n"), - portstr, - (int) (UNIXSOCK_PATH_BUFLEN - 1)); - conn->options_valid = false; - goto connect_errReturn; - } - - /* - * NULL hostname tells pg_getaddrinfo_all to parse the service - * name as a Unix-domain socket path. - */ - ret = pg_getaddrinfo_all(NULL, portstr, &hint, &ch->addrlist); - if (ret || !ch->addrlist) - appendPQExpBuffer(&conn->errorMessage, - libpq_gettext("could not translate Unix-domain socket path \"%s\" to address: %s\n"), - portstr, gai_strerror(ret)); - break; -#else - Assert(false); - conn->options_valid = false; - goto connect_errReturn; -#endif - } - if (ret || !ch->addrlist) - { - if (ch->addrlist) - { - pg_freeaddrinfo_all(hint.ai_family, ch->addrlist); - ch->addrlist = NULL; - } - conn->options_valid = false; - goto connect_errReturn; - } - } - -#ifdef USE_SSL - /* setup values based on SSL mode */ - if (conn->sslmode[0] == 'd') /* "disable" */ - conn->allow_ssl_try = false; - else if (conn->sslmode[0] == 'a') /* "allow" */ - conn->wait_ssl_try = true; -#endif + resetPQExpBuffer(&conn->errorMessage); /* - * Set up to try to connect, with protocol 3.0 as the first attempt. + * Set up to try to connect to the first host. (Setting whichhost = -1 is + * a bit of a cheat, but PQconnectPoll will advance it to 0 before + * anything else looks at it.) */ - conn->whichhost = 0; - conn->addr_cur = conn->connhost[0].addrlist; - conn->pversion = PG_PROTOCOL(3, 0); - conn->send_appname = true; + conn->whichhost = -1; + conn->try_next_addr = false; + conn->try_next_host = true; conn->status = CONNECTION_NEEDED; /* @@ -1818,6 +1833,12 @@ connectDBStart(PGconn *conn) return 1; connect_errReturn: + + /* + * If we managed to open a socket, close it immediately rather than + * waiting till PQfinish. (The application cannot have gotten the socket + * from PQsocket yet, so this doesn't risk breaking anything.) + */ pqDropConnection(conn, true); conn->status = CONNECTION_BAD; return 0; @@ -1837,6 +1858,8 @@ connectDBComplete(PGconn *conn) PostgresPollingStatusType flag = PGRES_POLLING_WRITING; time_t finish_time = ((time_t) -1); int timeout = 0; + int last_whichhost = -2; /* certainly different from whichhost */ + struct addrinfo *last_addr_cur = NULL; if (conn == NULL || conn->status == CONNECTION_BAD) return 0; @@ -1846,23 +1869,43 @@ connectDBComplete(PGconn *conn) */ if (conn->connect_timeout != NULL) { - timeout = atoi(conn->connect_timeout); + if (!parse_int_param(conn->connect_timeout, &timeout, conn, + "connect_timeout")) + return 0; + if (timeout > 0) { /* - * Rounding could cause connection to fail; need at least 2 secs + * Rounding could cause connection to fail unexpectedly quickly; + * to prevent possibly waiting hardly-at-all, insist on at least + * two seconds. */ if (timeout < 2) timeout = 2; - /* calculate the finish time based on start + timeout */ - finish_time = time(NULL) + timeout; } + else /* negative means 0 */ + timeout = 0; } for (;;) { int ret = 0; + /* + * (Re)start the connect_timeout timer if it's active and we are + * considering a different host than we were last time through. If + * we've already succeeded, though, needn't recalculate. + */ + if (flag != PGRES_POLLING_OK && + timeout > 0 && + (conn->whichhost != last_whichhost || + conn->addr_cur != last_addr_cur)) + { + finish_time = time(NULL) + timeout; + last_whichhost = conn->whichhost; + last_addr_cur = conn->addr_cur; + } + /* * Wait, if necessary. Note that the initial state (just after * PQconnectStart) is to wait for the socket to select for writing. @@ -1882,6 +1925,7 @@ connectDBComplete(PGconn *conn) ret = pqWaitTimed(1, 0, conn, finish_time); if (ret == -1) { + /* hard failure, eg select() problem, aborts everything */ conn->status = CONNECTION_BAD; return 0; } @@ -1891,6 +1935,7 @@ connectDBComplete(PGconn *conn) ret = pqWaitTimed(0, 1, conn, finish_time); if (ret == -1) { + /* hard failure, eg select() problem, aborts everything */ conn->status = CONNECTION_BAD; return 0; } @@ -1905,25 +1950,10 @@ connectDBComplete(PGconn *conn) if (ret == 1) /* connect_timeout elapsed */ { /* - * If there are no more hosts, return (the error message is - * already set) + * Give up on current server/address, try the next one. */ - if (++conn->whichhost >= conn->nconnhost) - { - conn->whichhost = 0; - conn->status = CONNECTION_BAD; - return 0; - } - - /* - * Attempt connection to the next host, starting the - * connect_timeout timer - */ - pqDropConnection(conn, true); - conn->addr_cur = conn->connhost[conn->whichhost].addrlist; + conn->try_next_addr = true; conn->status = CONNECTION_NEEDED; - if (conn->connect_timeout != NULL) - finish_time = time(NULL) + timeout; } /* @@ -1935,27 +1965,29 @@ connectDBComplete(PGconn *conn) /* * This subroutine saves conn->errorMessage, which will be restored back by - * restoreErrorMessage subroutine. + * restoreErrorMessage subroutine. Returns false on OOM failure. */ static bool saveErrorMessage(PGconn *conn, PQExpBuffer savedMessage) { initPQExpBuffer(savedMessage); + appendPQExpBufferStr(savedMessage, + conn->errorMessage.data); if (PQExpBufferBroken(savedMessage)) { printfPQExpBuffer(&conn->errorMessage, libpq_gettext("out of memory\n")); return false; } - - appendPQExpBufferStr(savedMessage, - conn->errorMessage.data); + /* Clear whatever is in errorMessage now */ resetPQExpBuffer(&conn->errorMessage); return true; } /* - * Restores saved error messages back to conn->errorMessage. + * Restores saved error messages back to conn->errorMessage, prepending them + * to whatever is in conn->errorMessage already. (This does the right thing + * if anything's been added to conn->errorMessage since saveErrorMessage.) */ static void restoreErrorMessage(PGconn *conn, PQExpBuffer savedMessage) @@ -1963,6 +1995,11 @@ restoreErrorMessage(PGconn *conn, PQExpBuffer savedMessage) appendPQExpBufferStr(savedMessage, conn->errorMessage.data); resetPQExpBuffer(&conn->errorMessage); appendPQExpBufferStr(&conn->errorMessage, savedMessage->data); + /* If any step above hit OOM, just report that */ + if (PQExpBufferBroken(savedMessage) || + PQExpBufferBroken(&conn->errorMessage)) + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("out of memory\n")); termPQExpBuffer(savedMessage); } @@ -1996,8 +2033,10 @@ restoreErrorMessage(PGconn *conn, PQExpBuffer savedMessage) PostgresPollingStatusType PQconnectPoll(PGconn *conn) { + bool reset_connection_state_machine = false; + bool need_new_connection = false; PGresult *res; - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; int optval; PQExpBufferData savedMessage; @@ -2059,6 +2098,180 @@ PQconnectPoll(PGconn *conn) keep_going: /* We will come back to here until there is * nothing left to do. */ + + /* Time to advance to next address, or next host if no more addresses? */ + if (conn->try_next_addr) + { + if (conn->addr_cur && conn->addr_cur->ai_next) + { + conn->addr_cur = conn->addr_cur->ai_next; + reset_connection_state_machine = true; + } + else + conn->try_next_host = true; + conn->try_next_addr = false; + } + + /* Time to advance to next connhost[] entry? */ + if (conn->try_next_host) + { + pg_conn_host *ch; + struct addrinfo hint; + int thisport; + int ret; + char portstr[MAXPGPATH]; + + if (conn->whichhost + 1 >= conn->nconnhost) + { + /* + * Oops, no more hosts. An appropriate error message is already + * set up, so just set the right status. + */ + goto error_return; + } + conn->whichhost++; + + /* Drop any address info for previous host */ + release_conn_addrinfo(conn); + + /* + * Look up info for the new host. On failure, log the problem in + * conn->errorMessage, then loop around to try the next host. (Note + * we don't clear try_next_host until we've succeeded.) + */ + ch = &conn->connhost[conn->whichhost]; + + /* Initialize hint structure */ + MemSet(&hint, 0, sizeof(hint)); + hint.ai_socktype = SOCK_STREAM; + conn->addrlist_family = hint.ai_family = AF_UNSPEC; + + /* Figure out the port number we're going to use. */ + if (ch->port == NULL || ch->port[0] == '\0') + thisport = DEF_PGPORT; + else + { + if (!parse_int_param(ch->port, &thisport, conn, "port")) + goto error_return; + + if (thisport < 1 || thisport > 65535) + { + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("invalid port number: \"%s\"\n"), + ch->port); + goto keep_going; + } + } + snprintf(portstr, sizeof(portstr), "%d", thisport); + + /* Use pg_getaddrinfo_all() to resolve the address */ + switch (ch->type) + { + case CHT_HOST_NAME: + ret = pg_getaddrinfo_all(ch->host, portstr, &hint, + &conn->addrlist); + if (ret || !conn->addrlist) + { + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("could not translate host name \"%s\" to address: %s\n"), + ch->host, gai_strerror(ret)); + goto keep_going; + } + break; + + case CHT_HOST_ADDRESS: + hint.ai_flags = AI_NUMERICHOST; + ret = pg_getaddrinfo_all(ch->hostaddr, portstr, &hint, + &conn->addrlist); + if (ret || !conn->addrlist) + { + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("could not parse network address \"%s\": %s\n"), + ch->hostaddr, gai_strerror(ret)); + goto keep_going; + } + break; + + case CHT_UNIX_SOCKET: +#ifdef HAVE_UNIX_SOCKETS + conn->addrlist_family = hint.ai_family = AF_UNIX; + UNIXSOCK_PATH(portstr, thisport, ch->host); + if (strlen(portstr) >= UNIXSOCK_PATH_BUFLEN) + { + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n"), + portstr, + (int) (UNIXSOCK_PATH_BUFLEN - 1)); + goto keep_going; + } + + /* + * NULL hostname tells pg_getaddrinfo_all to parse the service + * name as a Unix-domain socket path. + */ + ret = pg_getaddrinfo_all(NULL, portstr, &hint, + &conn->addrlist); + if (ret || !conn->addrlist) + { + appendPQExpBuffer(&conn->errorMessage, + libpq_gettext("could not translate Unix-domain socket path \"%s\" to address: %s\n"), + portstr, gai_strerror(ret)); + goto keep_going; + } +#else + Assert(false); +#endif + break; + } + + /* OK, scan this addrlist for a working server address */ + conn->addr_cur = conn->addrlist; + reset_connection_state_machine = true; + conn->try_next_host = false; + } + + /* Reset connection state machine? */ + if (reset_connection_state_machine) + { + /* + * (Re) initialize our connection control variables for a set of + * connection attempts to a single server address. These variables + * must persist across individual connection attempts, but we must + * reset them when we start to consider a new server. + */ + conn->pversion = PG_PROTOCOL(3, 0); + conn->send_appname = true; +#ifdef USE_SSL + /* initialize these values based on SSL mode */ + conn->allow_ssl_try = (conn->sslmode[0] != 'd'); /* "disable" */ + conn->wait_ssl_try = (conn->sslmode[0] == 'a'); /* "allow" */ +#endif + + reset_connection_state_machine = false; + need_new_connection = true; + } + + /* Force a new connection (perhaps to the same server as before)? */ + if (need_new_connection) + { + /* Drop any existing connection */ + pqDropConnection(conn, true); + + /* Reset all state obtained from old server */ + pqDropServerData(conn); + + /* Drop any PGresult we might have, too */ + conn->asyncStatus = PGASYNC_IDLE; + conn->xactStatus = PQTRANS_IDLE; + pqClearAsyncResult(conn); + + /* Reset conn->status to put the state machine in the right state */ + conn->status = CONNECTION_NEEDED; + + need_new_connection = false; + } + + /* Now try to advance the state machine for this connection */ switch (conn->status) { case CONNECTION_NEEDED: @@ -2066,29 +2279,25 @@ PQconnectPoll(PGconn *conn) /* * Try to initiate a connection to one of the addresses * returned by pg_getaddrinfo_all(). conn->addr_cur is the - * next one to try. We fail when we run out of addresses. + * next one to try. + * + * The extra level of braces here is historical. It's not + * worth reindenting this whole switch case to remove 'em. */ - for (;;) { - struct addrinfo *addr_cur; + struct addrinfo *addr_cur = conn->addr_cur; /* * Advance to next possible host, if we've tried all of * the addresses for the current host. */ - if (conn->addr_cur == NULL) + if (addr_cur == NULL) { - if (++conn->whichhost >= conn->nconnhost) - { - conn->whichhost = 0; - break; - } - conn->addr_cur = - conn->connhost[conn->whichhost].addrlist; + conn->try_next_host = true; + goto keep_going; } /* Remember current address for possible error msg */ - addr_cur = conn->addr_cur; memcpy(&conn->raddr.addr, addr_cur->ai_addr, addr_cur->ai_addrlen); conn->raddr.salen = addr_cur->ai_addrlen; @@ -2097,33 +2306,35 @@ PQconnectPoll(PGconn *conn) if (conn->sock == PGINVALID_SOCKET) { /* - * ignore socket() failure if we have more addresses - * to try + * Silently ignore socket() failure if we have more + * addresses to try; this reduces useless chatter in + * cases where the address list includes both IPv4 and + * IPv6 but kernel only accepts one family. */ if (addr_cur->ai_next != NULL || conn->whichhost + 1 < conn->nconnhost) { - conn->addr_cur = addr_cur->ai_next; - continue; + conn->try_next_addr = true; + goto keep_going; } appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not create socket: %s\n"), SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); - break; + goto error_return; } /* * Select socket options: no delay of outgoing data for - * TCP sockets, nonblock mode, close-on-exec. Fail if any - * of this fails. + * TCP sockets, nonblock mode, close-on-exec. Try the + * next address if any of this fails. */ if (!IS_AF_UNIX(addr_cur->ai_family)) { if (!connectNoDelay(conn)) { - pqDropConnection(conn, true); - conn->addr_cur = addr_cur->ai_next; - continue; + /* error message already created */ + conn->try_next_addr = true; + goto keep_going; } } if (!pg_set_noblock(conn->sock)) @@ -2131,9 +2342,8 @@ PQconnectPoll(PGconn *conn) appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not set socket to nonblocking mode: %s\n"), SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); - pqDropConnection(conn, true); - conn->addr_cur = addr_cur->ai_next; - continue; + conn->try_next_addr = true; + goto keep_going; } #ifdef F_SETFD @@ -2142,9 +2352,8 @@ PQconnectPoll(PGconn *conn) appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not set socket to close-on-exec mode: %s\n"), SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf))); - pqDropConnection(conn, true); - conn->addr_cur = addr_cur->ai_next; - continue; + conn->try_next_addr = true; + goto keep_going; } #endif /* F_SETFD */ @@ -2190,9 +2399,8 @@ PQconnectPoll(PGconn *conn) if (err) { - pqDropConnection(conn, true); - conn->addr_cur = addr_cur->ai_next; - continue; + conn->try_next_addr = true; + goto keep_going; } } @@ -2271,25 +2479,13 @@ PQconnectPoll(PGconn *conn) } /* - * This connection failed --- set up error report, then - * close socket (do it this way in case close() affects - * the value of errno...). We will ignore the connect() - * failure and keep going if there are more addresses. + * This connection failed. Add the error report to + * conn->errorMessage, then try the next address if any. */ connectFailureMessage(conn, SOCK_ERRNO); - pqDropConnection(conn, true); - - /* - * Try the next address, if any. - */ - conn->addr_cur = addr_cur->ai_next; - } /* loop over addresses */ - - /* - * Oops, no more addresses. An appropriate error message is - * already set up, so just set the right status. - */ - goto error_return; + conn->try_next_addr = true; + goto keep_going; + } } case CONNECTION_STARTED: @@ -2322,20 +2518,13 @@ PQconnectPoll(PGconn *conn) * error message. */ connectFailureMessage(conn, optval); - pqDropConnection(conn, true); /* - * If more addresses remain, keep trying, just as in the - * case where connect() returned failure immediately. + * Try the next address if any, just as in the case where + * connect() returned failure immediately. */ - if (conn->addr_cur->ai_next != NULL || - conn->whichhost + 1 < conn->nconnhost) - { - conn->addr_cur = conn->addr_cur->ai_next; - conn->status = CONNECTION_NEEDED; - goto keep_going; - } - goto error_return; + conn->try_next_addr = true; + goto keep_going; } /* Fill in the client address */ @@ -2391,7 +2580,7 @@ PQconnectPoll(PGconn *conn) else appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not get peer credentials: %s\n"), - pqStrerror(errno, sebuf, sizeof(sebuf))); + strerror_r(errno, sebuf, sizeof(sebuf))); goto error_return; } @@ -2402,7 +2591,7 @@ PQconnectPoll(PGconn *conn) appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not look up local user ID %d: %s\n"), (int) uid, - pqStrerror(passerr, sebuf, sizeof(sebuf))); + strerror_r(passerr, sebuf, sizeof(sebuf))); else appendPQExpBuffer(&conn->errorMessage, libpq_gettext("local user with ID %d does not exist\n"), @@ -2443,7 +2632,7 @@ PQconnectPoll(PGconn *conn) * shouldn't since we only got here if the socket is * write-ready. */ - pv = htonl(NEGOTIATE_SSL_CODE); + pv = pg_hton32(NEGOTIATE_SSL_CODE); if (pqPacketSend(conn, 0, &pv, sizeof(pv)) != STATUS_OK) { appendPQExpBuffer(&conn->errorMessage, @@ -2610,12 +2799,13 @@ PQconnectPoll(PGconn *conn) { /* only retry once */ conn->allow_ssl_try = false; - /* Must drop the old connection */ - pqDropConnection(conn, true); - conn->status = CONNECTION_NEEDED; + need_new_connection = true; goto keep_going; } + /* Else it's a hard failure */ + goto error_return; } + /* Else, return POLLING_READING or POLLING_WRITING status */ return pollres; #else /* !USE_SSL */ /* can't get here */ @@ -2722,9 +2912,7 @@ PQconnectPoll(PGconn *conn) if (PG_PROTOCOL_MAJOR(conn->pversion) >= 3) { conn->pversion = PG_PROTOCOL(2, 0); - /* Must drop the old connection */ - pqDropConnection(conn, true); - conn->status = CONNECTION_NEEDED; + need_new_connection = true; goto keep_going; } @@ -2775,6 +2963,9 @@ PQconnectPoll(PGconn *conn) /* OK, we read the message; mark data consumed */ conn->inStart = conn->inCursor; + /* Check to see if we should mention pgpassfile */ + pgpassfileWarning(conn); + #ifdef USE_SSL /* @@ -2788,9 +2979,7 @@ PQconnectPoll(PGconn *conn) { /* only retry once */ conn->wait_ssl_try = false; - /* Must drop the old connection */ - pqDropConnection(conn, true); - conn->status = CONNECTION_NEEDED; + need_new_connection = true; goto keep_going; } @@ -2799,14 +2988,13 @@ PQconnectPoll(PGconn *conn) * then do a non-SSL retry */ if (conn->sslmode[0] == 'p' /* "prefer" */ - && conn->allow_ssl_try + && conn->ssl_in_use + && conn->allow_ssl_try /* redundant? */ && !conn->wait_ssl_try) /* redundant? */ { /* only retry once */ conn->allow_ssl_try = false; - /* Must drop the old connection */ - pqDropConnection(conn, true); - conn->status = CONNECTION_NEEDED; + need_new_connection = true; goto keep_going; } #endif @@ -2942,9 +3130,7 @@ PQconnectPoll(PGconn *conn) { PQclear(res); conn->send_appname = false; - /* Must drop the old connection */ - pqDropConnection(conn, true); - conn->status = CONNECTION_NEEDED; + need_new_connection = true; goto keep_going; } } @@ -2973,17 +3159,21 @@ PQconnectPoll(PGconn *conn) /* * If a read-write connection is required, see if we have one. + * + * Servers before 7.4 lack the transaction_read_only GUC, but + * by the same token they don't have any read-only mode, so we + * may just skip the test in that case. */ - if (conn->target_session_attrs != NULL && + if (conn->sversion >= 70400 && + conn->target_session_attrs != NULL && strcmp(conn->target_session_attrs, "read-write") == 0) { /* - * We are yet to make a connection. Save all existing - * error messages until we make a successful connection - * state. This is important because PQsendQuery is going - * to reset conn->errorMessage and we will lose error - * messages related to previous hosts we have tried to - * connect and failed. + * Save existing error messages across the PQsendQuery + * attempt. This is necessary because PQsendQuery is + * going to reset conn->errorMessage, so we would lose + * error messages related to previous hosts we have tried + * and failed to connect to. */ if (!saveErrorMessage(conn, &savedMessage)) goto error_return; @@ -3000,8 +3190,8 @@ PQconnectPoll(PGconn *conn) return PGRES_POLLING_READING; } - /* We can release the address lists now. */ - release_all_addrinfo(conn); + /* We can release the address list now. */ + release_conn_addrinfo(conn); /* We are open for business! */ conn->status = CONNECTION_OK; @@ -3036,9 +3226,16 @@ PQconnectPoll(PGconn *conn) } /* - * If a read-write connection is requested check for same. + * If a read-write connection is required, see if we have one. + * (This should match the stanza in the CONNECTION_AUTH_OK case + * above.) + * + * Servers before 7.4 lack the transaction_read_only GUC, but by + * the same token they don't have any read-only mode, so we may + * just skip the test in that case. */ - if (conn->target_session_attrs != NULL && + if (conn->sversion >= 70400 && + conn->target_session_attrs != NULL && strcmp(conn->target_session_attrs, "read-write") == 0) { if (!saveErrorMessage(conn, &savedMessage)) @@ -3056,8 +3253,8 @@ PQconnectPoll(PGconn *conn) return PGRES_POLLING_READING; } - /* We can release the address lists now. */ - release_all_addrinfo(conn); + /* We can release the address list now. */ + release_conn_addrinfo(conn); /* We are open for business! */ conn->status = CONNECTION_OK; @@ -3072,7 +3269,6 @@ PQconnectPoll(PGconn *conn) if (PQisBusy(conn)) { conn->status = CONNECTION_CONSUME; - restoreErrorMessage(conn, &savedMessage); return PGRES_POLLING_READING; } @@ -3087,6 +3283,9 @@ PQconnectPoll(PGconn *conn) goto keep_going; } + /* We can release the address list now. */ + release_conn_addrinfo(conn); + /* We are open for business! */ conn->status = CONNECTION_OK; return PGRES_POLLING_OK; @@ -3122,9 +3321,14 @@ PQconnectPoll(PGconn *conn) val = PQgetvalue(res, 0, 0); if (strncmp(val, "on", 2) == 0) { + /* Not writable; fail this connection. */ const char *displayed_host; const char *displayed_port; + PQclear(res); + restoreErrorMessage(conn, &savedMessage); + + /* Append error report to conn->errorMessage. */ if (conn->connhost[conn->whichhost].type == CHT_HOST_ADDRESS) displayed_host = conn->connhost[conn->whichhost].hostaddr; else @@ -3133,36 +3337,28 @@ PQconnectPoll(PGconn *conn) if (displayed_port == NULL || displayed_port[0] == '\0') displayed_port = DEF_PGPORT_STR; - PQclear(res); - restoreErrorMessage(conn, &savedMessage); - - /* Not writable; close connection. */ appendPQExpBuffer(&conn->errorMessage, libpq_gettext("could not make a writable " "connection to server " "\"%s:%s\"\n"), displayed_host, displayed_port); + + /* Close connection politely. */ conn->status = CONNECTION_OK; sendTerminateConn(conn); - pqDropConnection(conn, true); - /* Skip any remaining addresses for this host. */ - conn->addr_cur = NULL; - if (conn->whichhost + 1 < conn->nconnhost) - { - conn->status = CONNECTION_NEEDED; - goto keep_going; - } - - /* No more addresses to try. So we fail. */ - goto error_return; + /* + * Try next host if any, but we don't want to consider + * additional addresses for this host. + */ + conn->try_next_host = true; + goto keep_going; } + + /* Session is read-write, so we're good. */ PQclear(res); termPQExpBuffer(&savedMessage); - /* We can release the address lists now. */ - release_all_addrinfo(conn); - /* * Finish reading any remaining messages before being * considered as ready. @@ -3179,6 +3375,7 @@ PQconnectPoll(PGconn *conn) PQclear(res); restoreErrorMessage(conn, &savedMessage); + /* Append error report to conn->errorMessage. */ if (conn->connhost[conn->whichhost].type == CHT_HOST_ADDRESS) displayed_host = conn->connhost[conn->whichhost].hostaddr; else @@ -3190,20 +3387,14 @@ PQconnectPoll(PGconn *conn) libpq_gettext("test \"SHOW transaction_read_only\" failed " "on server \"%s:%s\"\n"), displayed_host, displayed_port); + + /* Close connection politely. */ conn->status = CONNECTION_OK; sendTerminateConn(conn); - pqDropConnection(conn, true); - - if (conn->addr_cur->ai_next != NULL || - conn->whichhost + 1 < conn->nconnhost) - { - conn->addr_cur = conn->addr_cur->ai_next; - conn->status = CONNECTION_NEEDED; - goto keep_going; - } - /* No more addresses to try. So we fail. */ - goto error_return; + /* Try next address */ + conn->try_next_addr = true; + goto keep_going; } default: @@ -3218,8 +3409,6 @@ PQconnectPoll(PGconn *conn) error_return: - pgpassfileWarning(conn); - /* * We used to close the socket at this point, but that makes it awkward * for those above us if they wish to remove this socket from their own @@ -3347,14 +3536,6 @@ makeEmptyPGconn(void) conn->verbosity = PQERRORS_DEFAULT; conn->show_context = PQSHOW_CONTEXT_ERRORS; conn->sock = PGINVALID_SOCKET; - conn->auth_req_received = false; - conn->password_needed = false; - conn->pgpassfile_used = false; -#ifdef USE_SSL - conn->allow_ssl_try = true; - conn->wait_ssl_try = false; - conn->ssl_in_use = false; -#endif /* * We try to send at least 8K at a time, which is the usual size of pipe @@ -3514,32 +3695,18 @@ freePGconn(PGconn *conn) } /* - * release_all_addrinfo - * - free addrinfo of all hostconn elements. + * release_conn_addrinfo + * - Free any addrinfo list in the PGconn. */ - static void -release_all_addrinfo(PGconn *conn) +release_conn_addrinfo(PGconn *conn) { - if (conn->connhost != NULL) + if (conn->addrlist) { - int i; - - for (i = 0; i < conn->nconnhost; ++i) - { - int family = AF_UNSPEC; - -#ifdef HAVE_UNIX_SOCKETS - if (conn->connhost[i].type == CHT_UNIX_SOCKET) - family = AF_UNIX; -#endif - - pg_freeaddrinfo_all(family, - conn->connhost[i].addrlist); - conn->connhost[i].addrlist = NULL; - } + pg_freeaddrinfo_all(conn->addrlist_family, conn->addrlist); + conn->addrlist = NULL; + conn->addr_cur = NULL; /* for safety */ } - conn->addr_cur = NULL; } /* @@ -3576,9 +3743,9 @@ sendTerminateConn(PGconn *conn) static void closePGconn(PGconn *conn) { - PGnotify *notify; - pgParameterStatus *pstatus; - + /* + * If possible, send Terminate message to close the connection politely. + */ sendTerminateConn(conn); /* @@ -3587,7 +3754,7 @@ closePGconn(PGconn *conn) * Don't call PQsetnonblocking() because it will fail if it's unable to * flush the connection. */ - conn->nonblocking = FALSE; + conn->nonblocking = false; /* * Close the connection, reset all transient state, flush I/O buffers. @@ -3595,31 +3762,13 @@ closePGconn(PGconn *conn) pqDropConnection(conn, true); conn->status = CONNECTION_BAD; /* Well, not really _bad_ - just absent */ conn->asyncStatus = PGASYNC_IDLE; + conn->xactStatus = PQTRANS_IDLE; pqClearAsyncResult(conn); /* deallocate result */ resetPQExpBuffer(&conn->errorMessage); - release_all_addrinfo(conn); - - notify = conn->notifyHead; - while (notify != NULL) - { - PGnotify *prev = notify; - - notify = notify->next; - free(prev); - } - conn->notifyHead = conn->notifyTail = NULL; - pstatus = conn->pstatus; - while (pstatus != NULL) - { - pgParameterStatus *prev = pstatus; + release_conn_addrinfo(conn); - pstatus = pstatus->next; - free(prev); - } - conn->pstatus = NULL; - if (conn->lobjfuncs) - free(conn->lobjfuncs); - conn->lobjfuncs = NULL; + /* Reset all state obtained from server, too */ + pqDropServerData(conn); } /* @@ -3782,8 +3931,8 @@ PQfreeCancel(PGcancel *cancel) * PQcancel and PQrequestCancel: attempt to request cancellation of the * current operation. * - * The return value is TRUE if the cancel request was successfully - * dispatched, FALSE if not (in which case an error message is available). + * The return value is true if the cancel request was successfully + * dispatched, false if not (in which case an error message is available). * Note: successful dispatch is no guarantee that there will be any effect at * the backend. The application must read the operation result as usual. * @@ -3804,7 +3953,7 @@ internal_cancel(SockAddr *raddr, int be_pid, int be_key, { int save_errno = SOCK_ERRNO; pgsocket tmpsock = PGINVALID_SOCKET; - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; int maxlen; struct { @@ -3838,10 +3987,10 @@ internal_cancel(SockAddr *raddr, int be_pid, int be_key, /* Create and send the cancel request packet. */ - crp.packetlen = htonl((uint32) sizeof(crp)); - crp.cp.cancelRequestCode = (MsgType) htonl(CANCEL_REQUEST_CODE); - crp.cp.backendPID = htonl(be_pid); - crp.cp.cancelAuthCode = htonl(be_key); + crp.packetlen = pg_hton32((uint32) sizeof(crp)); + crp.cp.cancelRequestCode = (MsgType) pg_hton32(CANCEL_REQUEST_CODE); + crp.cp.backendPID = pg_hton32(be_pid); + crp.cp.cancelAuthCode = pg_hton32(be_key); retry4: if (send(tmpsock, (char *) &crp, sizeof(crp), 0) != (int) sizeof(crp)) @@ -3872,7 +4021,7 @@ internal_cancel(SockAddr *raddr, int be_pid, int be_key, /* All done */ closesocket(tmpsock); SOCK_ERRNO_SET(save_errno); - return TRUE; + return true; cancel_errReturn: @@ -3890,13 +4039,13 @@ internal_cancel(SockAddr *raddr, int be_pid, int be_key, if (tmpsock != PGINVALID_SOCKET) closesocket(tmpsock); SOCK_ERRNO_SET(save_errno); - return FALSE; + return false; } /* * PQcancel: request query cancel * - * Returns TRUE if able to send the cancel request, FALSE if not. + * Returns true if able to send the cancel request, false if not. * * On failure, an error message is stored in *errbuf, which must be of size * errbufsize (recommended size is 256 bytes). *errbuf is not changed on @@ -3908,7 +4057,7 @@ PQcancel(PGcancel *cancel, char *errbuf, int errbufsize) if (!cancel) { strlcpy(errbuf, "PQcancel() -- no cancel object supplied", errbufsize); - return FALSE; + return false; } return internal_cancel(&cancel->raddr, cancel->be_pid, cancel->be_key, @@ -3918,7 +4067,7 @@ PQcancel(PGcancel *cancel, char *errbuf, int errbufsize) /* * PQrequestCancel: old, not thread-safe function for requesting query cancel * - * Returns TRUE if able to send the cancel request, FALSE if not. + * Returns true if able to send the cancel request, false if not. * * On failure, the error message is saved in conn->errorMessage; this means * that this can't be used when there might be other active operations on @@ -3934,7 +4083,7 @@ PQrequestCancel(PGconn *conn) /* Check we have an open connection */ if (!conn) - return FALSE; + return false; if (conn->sock == PGINVALID_SOCKET) { @@ -3943,7 +4092,7 @@ PQrequestCancel(PGconn *conn) conn->errorMessage.maxlen); conn->errorMessage.len = strlen(conn->errorMessage.data); - return FALSE; + return false; } r = internal_cancel(&conn->raddr, conn->be_pid, conn->be_key, @@ -4469,6 +4618,16 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options, #define MAXBUFSIZE 256 +/* + * parseServiceInfo: if a service name has been given, look it up and absorb + * connection options from it into *options. + * + * Returns 0 on success, nonzero on failure. On failure, if errorMessage + * isn't null, also store an error message there. (Note: the only reason + * this function and related ones don't dump core on errorMessage == NULL + * is the undocumented fact that printfPQExpBuffer does nothing when passed + * a null PQExpBuffer pointer.) + */ static int parseServiceInfo(PQconninfoOption *options, PQExpBuffer errorMessage) { @@ -4487,9 +4646,14 @@ parseServiceInfo(PQconninfoOption *options, PQExpBuffer errorMessage) if (service == NULL) service = getenv("PGSERVICE"); + /* If no service name given, nothing to do */ if (service == NULL) return 0; + /* + * Try PGSERVICEFILE if specified, else try ~/.pg_service.conf (if that + * exists). + */ if ((env = getenv("PGSERVICEFILE")) != NULL) strlcpy(serviceFile, env, sizeof(serviceFile)); else @@ -4497,13 +4661,9 @@ parseServiceInfo(PQconninfoOption *options, PQExpBuffer errorMessage) char homedir[MAXPGPATH]; if (!pqGetHomeDirectory(homedir, sizeof(homedir))) - { - printfPQExpBuffer(errorMessage, libpq_gettext("could not get home directory to locate service definition file")); - return 1; - } + goto next_file; snprintf(serviceFile, MAXPGPATH, "%s/%s", homedir, ".pg_service.conf"); - errno = 0; - if (stat(serviceFile, &stat_buf) != 0 && errno == ENOENT) + if (stat(serviceFile, &stat_buf) != 0) goto next_file; } @@ -4519,8 +4679,7 @@ parseServiceInfo(PQconninfoOption *options, PQExpBuffer errorMessage) */ snprintf(serviceFile, MAXPGPATH, "%s/pg_service.conf", getenv("PGSYSCONFDIR") ? getenv("PGSYSCONFDIR") : SYSCONFDIR); - errno = 0; - if (stat(serviceFile, &stat_buf) != 0 && errno == ENOENT) + if (stat(serviceFile, &stat_buf) != 0) goto last_file; status = parseServiceFile(serviceFile, service, options, errorMessage, &group_found); @@ -4772,7 +4931,7 @@ conninfo_init(PQExpBuffer errorMessage) * Returns a malloc'd PQconninfoOption array, if parsing is successful. * Otherwise, NULL is returned and an error message is left in errorMessage. * - * If use_defaults is TRUE, default values are filled in (from a service file, + * If use_defaults is true, default values are filled in (from a service file, * environment variables, etc). */ static PQconninfoOption * @@ -4995,7 +5154,7 @@ conninfo_parse(const char *conninfo, PQExpBuffer errorMessage, * If not successful, NULL is returned and an error message is * left in errorMessage. * Defaults are supplied (from a service file, environment variables, etc) - * for unspecified options, but only if use_defaults is TRUE. + * for unspecified options, but only if use_defaults is true. * * If expand_dbname is non-zero, and the value passed for the first occurrence * of "dbname" keyword is a connection string (as indicated by @@ -5166,7 +5325,7 @@ conninfo_array_parse(const char *const *keywords, const char *const *values, * * Defaults are obtained from a service file, environment variables, etc. * - * Returns TRUE if successful, otherwise FALSE; errorMessage, if supplied, + * Returns true if successful, otherwise false; errorMessage, if supplied, * is filled in upon failure. Note that failure to locate a default value * is not an error condition here --- we just leave the option's value as * NULL. @@ -5817,7 +5976,7 @@ conninfo_getval(PQconninfoOption *connOptions, * * If not successful, returns NULL and fills errorMessage accordingly. * However, if the reason of failure is an invalid keyword being passed and - * ignoreMissing is TRUE, errorMessage will be left untouched. + * ignoreMissing is true, errorMessage will be left untouched. */ static PQconninfoOption * conninfo_storeval(PQconninfoOption *connOptions, @@ -5999,19 +6158,18 @@ PQhost(const PGconn *conn) { if (!conn) return NULL; - if (conn->connhost != NULL && - conn->connhost[conn->whichhost].type != CHT_HOST_ADDRESS) - return conn->connhost[conn->whichhost].host; - else if (conn->pghost != NULL && conn->pghost[0] != '\0') - return conn->pghost; - else + + if (conn->connhost != NULL) { -#ifdef HAVE_UNIX_SOCKETS - return DEFAULT_PGSOCKET_DIR; -#else - return DefaultHost; -#endif + if (conn->connhost[conn->whichhost].host != NULL && + conn->connhost[conn->whichhost].host[0] != '\0') + return conn->connhost[conn->whichhost].host; + else if (conn->connhost[conn->whichhost].hostaddr != NULL && + conn->connhost[conn->whichhost].hostaddr[0] != '\0') + return conn->connhost[conn->whichhost].hostaddr; } + + return ""; } char * @@ -6019,9 +6177,11 @@ PQport(const PGconn *conn) { if (!conn) return NULL; + if (conn->connhost != NULL) return conn->connhost[conn->whichhost].port; - return conn->pgport; + + return ""; } char * @@ -6297,8 +6457,8 @@ defaultNoticeReceiver(void *arg, const PGresult *res) { (void) arg; /* not used */ if (res->noticeHooks.noticeProc != NULL) - (*res->noticeHooks.noticeProc) (res->noticeHooks.noticeProcArg, - PQresultErrorMessage(res)); + res->noticeHooks.noticeProc(res->noticeHooks.noticeProcArg, + PQresultErrorMessage(res)); } /* @@ -6320,10 +6480,10 @@ defaultNoticeProcessor(void *arg, const char *message) * token doesn't match */ static char * -pwdfMatchesString(char *buf, char *token) +pwdfMatchesString(char *buf, const char *token) { - char *tbuf, - *ttok; + char *tbuf; + const char *ttok; bool bslash = false; if (buf == NULL || token == NULL) @@ -6357,8 +6517,8 @@ pwdfMatchesString(char *buf, char *token) /* Get a password from the password file. Return value is malloc'd. */ static char * -passwordFromFile(char *hostname, char *port, char *dbname, - char *username, char *pgpassfile) +passwordFromFile(const char *hostname, const char *port, const char *dbname, + const char *username, const char *pgpassfile) { FILE *fp; struct stat stat_buf; @@ -6366,14 +6526,14 @@ passwordFromFile(char *hostname, char *port, char *dbname, #define LINELEN NAMEDATALEN*5 char buf[LINELEN]; - if (dbname == NULL || strlen(dbname) == 0) + if (dbname == NULL || dbname[0] == '\0') return NULL; - if (username == NULL || strlen(username) == 0) + if (username == NULL || username[0] == '\0') return NULL; /* 'localhost' matches pghost of '' or the default socket directory */ - if (hostname == NULL) + if (hostname == NULL || hostname[0] == '\0') hostname = DefaultHost; else if (is_absolute_path(hostname)) @@ -6384,7 +6544,7 @@ passwordFromFile(char *hostname, char *port, char *dbname, if (strcmp(hostname, DEFAULT_PGSOCKET_DIR) == 0) hostname = DefaultHost; - if (port == NULL) + if (port == NULL || port[0] == '\0') port = DEF_PGPORT_STR; /* If password file cannot be opened, ignore it. */ @@ -6489,7 +6649,9 @@ pgpassfileWarning(PGconn *conn) { /* If it was 'invalid authorization', add pgpassfile mention */ /* only works with >= 9.0 servers */ - if (conn->pgpassfile_used && conn->password_needed && conn->result) + if (conn->password_needed && + conn->connhost[conn->whichhost].password != NULL && + conn->result) { const char *sqlstate = PQresultErrorField(conn->result, PG_DIAG_SQLSTATE); @@ -6510,7 +6672,15 @@ pgpassfileWarning(PGconn *conn) * * This is essentially the same as get_home_path(), but we don't use that * because we don't want to pull path.c into libpq (it pollutes application - * namespace) + * namespace). + * + * Returns true on success, false on failure to obtain the directory name. + * + * CAUTION: although in most situations failure is unexpected, there are users + * who like to run applications in a home-directory-less environment. On + * failure, you almost certainly DO NOT want to report an error. Just act as + * though whatever file you were hoping to find in the home directory isn't + * there (which it isn't). */ bool pqGetHomeDirectory(char *buf, int bufsize) diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c index e1e2d18e3a..6aed8c87c7 100644 --- a/src/interfaces/libpq/fe-exec.c +++ b/src/interfaces/libpq/fe-exec.c @@ -3,7 +3,7 @@ * fe-exec.c * functions related to sending a query down to the backend * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -16,6 +16,7 @@ #include #include +#include #include "libpq-fe.h" #include "libpq-int.h" @@ -50,8 +51,9 @@ static int static_client_encoding = PG_SQL_ASCII; static bool static_std_strings = false; -static PGEvent *dupEvents(PGEvent *events, int count); -static bool pqAddTuple(PGresult *res, PGresAttValue *tup); +static PGEvent *dupEvents(PGEvent *events, int count, size_t *memSize); +static bool pqAddTuple(PGresult *res, PGresAttValue *tup, + const char **errmsgp); static bool PQsendQueryStart(PGconn *conn); static int PQsendQueryGuts(PGconn *conn, const char *command, @@ -164,6 +166,7 @@ PQmakeEmptyPGresult(PGconn *conn, ExecStatusType status) result->curBlock = NULL; result->curOffset = 0; result->spaceLeft = 0; + result->memorySize = sizeof(PGresult); if (conn) { @@ -191,7 +194,8 @@ PQmakeEmptyPGresult(PGconn *conn, ExecStatusType status) /* copy events last; result must be valid if we need to PQclear */ if (conn->nEvents > 0) { - result->events = dupEvents(conn->events, conn->nEvents); + result->events = dupEvents(conn->events, conn->nEvents, + &result->memorySize); if (!result->events) { PQclear(result); @@ -229,17 +233,17 @@ PQsetResultAttrs(PGresult *res, int numAttributes, PGresAttDesc *attDescs) /* If attrs already exist, they cannot be overwritten. */ if (!res || res->numAttributes > 0) - return FALSE; + return false; /* ignore no-op request */ if (numAttributes <= 0 || !attDescs) - return TRUE; + return true; res->attDescs = (PGresAttDesc *) PQresultAlloc(res, numAttributes * sizeof(PGresAttDesc)); if (!res->attDescs) - return FALSE; + return false; res->numAttributes = numAttributes; memcpy(res->attDescs, attDescs, numAttributes * sizeof(PGresAttDesc)); @@ -254,13 +258,13 @@ PQsetResultAttrs(PGresult *res, int numAttributes, PGresAttDesc *attDescs) res->attDescs[i].name = res->null_field; if (!res->attDescs[i].name) - return FALSE; + return false; if (res->attDescs[i].format == 0) res->binary = 0; } - return TRUE; + return true; } /* @@ -342,7 +346,8 @@ PQcopyResult(const PGresult *src, int flags) /* Wants to copy PGEvents? */ if ((flags & PG_COPYRES_EVENTS) && src->nEvents > 0) { - dest->events = dupEvents(src->events, src->nEvents); + dest->events = dupEvents(src->events, src->nEvents, + &dest->memorySize); if (!dest->events) { PQclear(dest); @@ -366,7 +371,7 @@ PQcopyResult(const PGresult *src, int flags) PQclear(dest); return NULL; } - dest->events[i].resultInitialized = TRUE; + dest->events[i].resultInitialized = true; } } @@ -377,17 +382,20 @@ PQcopyResult(const PGresult *src, int flags) * Copy an array of PGEvents (with no extra space for more). * Does not duplicate the event instance data, sets this to NULL. * Also, the resultInitialized flags are all cleared. + * The total space allocated is added to *memSize. */ static PGEvent * -dupEvents(PGEvent *events, int count) +dupEvents(PGEvent *events, int count, size_t *memSize) { PGEvent *newEvents; + size_t msize; int i; if (!events || count <= 0) return NULL; - newEvents = (PGEvent *) malloc(count * sizeof(PGEvent)); + msize = count * sizeof(PGEvent); + newEvents = (PGEvent *) malloc(msize); if (!newEvents) return NULL; @@ -396,7 +404,7 @@ dupEvents(PGEvent *events, int count) newEvents[i].proc = events[i].proc; newEvents[i].passThrough = events[i].passThrough; newEvents[i].data = NULL; - newEvents[i].resultInitialized = FALSE; + newEvents[i].resultInitialized = false; newEvents[i].name = strdup(events[i].name); if (!newEvents[i].name) { @@ -405,8 +413,10 @@ dupEvents(PGEvent *events, int count) free(newEvents); return NULL; } + msize += strlen(events[i].name) + 1; } + *memSize += msize; return newEvents; } @@ -416,18 +426,26 @@ dupEvents(PGEvent *events, int count) * equal to PQntuples(res). If it is equal, a new tuple is created and * added to the result. * Returns a non-zero value for success and zero for failure. + * (On failure, we report the specific problem via pqInternalNotice.) */ int PQsetvalue(PGresult *res, int tup_num, int field_num, char *value, int len) { PGresAttValue *attval; + const char *errmsg = NULL; + /* Note that this check also protects us against null "res" */ if (!check_field_number(res, field_num)) - return FALSE; + return false; /* Invalid tup_num, must be <= ntups */ if (tup_num < 0 || tup_num > res->ntups) - return FALSE; + { + pqInternalNotice(&res->noticeHooks, + "row number %d is out of range 0..%d", + tup_num, res->ntups); + return false; + } /* need to allocate a new tuple? */ if (tup_num == res->ntups) @@ -437,10 +455,10 @@ PQsetvalue(PGresult *res, int tup_num, int field_num, char *value, int len) tup = (PGresAttValue *) pqResultAlloc(res, res->numAttributes * sizeof(PGresAttValue), - TRUE); + true); if (!tup) - return FALSE; + goto fail; /* initialize each column to NULL */ for (i = 0; i < res->numAttributes; i++) @@ -450,8 +468,8 @@ PQsetvalue(PGresult *res, int tup_num, int field_num, char *value, int len) } /* add it to the array */ - if (!pqAddTuple(res, tup)) - return FALSE; + if (!pqAddTuple(res, tup, &errmsg)) + goto fail; } attval = &res->tuples[tup_num][field_num]; @@ -469,15 +487,26 @@ PQsetvalue(PGresult *res, int tup_num, int field_num, char *value, int len) } else { - attval->value = (char *) pqResultAlloc(res, len + 1, TRUE); + attval->value = (char *) pqResultAlloc(res, len + 1, true); if (!attval->value) - return FALSE; + goto fail; attval->len = len; memcpy(attval->value, value, len); attval->value[len] = '\0'; } - return TRUE; + return true; + + /* + * Report failure via pqInternalNotice. If preceding code didn't provide + * an error message, assume "out of memory" was meant. + */ +fail: + if (!errmsg) + errmsg = libpq_gettext("out of memory"); + pqInternalNotice(&res->noticeHooks, "%s", errmsg); + + return false; } /* @@ -489,7 +518,7 @@ PQsetvalue(PGresult *res, int tup_num, int field_num, char *value, int len) void * PQresultAlloc(PGresult *res, size_t nBytes) { - return pqResultAlloc(res, nBytes, TRUE); + return pqResultAlloc(res, nBytes, true); } /* @@ -546,9 +575,12 @@ pqResultAlloc(PGresult *res, size_t nBytes, bool isBinary) */ if (nBytes >= PGRESULT_SEP_ALLOC_THRESHOLD) { - block = (PGresult_data *) malloc(nBytes + PGRESULT_BLOCK_OVERHEAD); + size_t alloc_size = nBytes + PGRESULT_BLOCK_OVERHEAD; + + block = (PGresult_data *) malloc(alloc_size); if (!block) return NULL; + res->memorySize += alloc_size; space = block->space + PGRESULT_BLOCK_OVERHEAD; if (res->curBlock) { @@ -573,6 +605,7 @@ pqResultAlloc(PGresult *res, size_t nBytes, bool isBinary) block = (PGresult_data *) malloc(PGRESULT_DATA_BLOCKSIZE); if (!block) return NULL; + res->memorySize += PGRESULT_DATA_BLOCKSIZE; block->next = res->curBlock; res->curBlock = block; if (isBinary) @@ -594,6 +627,18 @@ pqResultAlloc(PGresult *res, size_t nBytes, bool isBinary) return space; } +/* + * PQresultMemorySize - + * Returns total space allocated for the PGresult. + */ +size_t +PQresultMemorySize(const PGresult *res) +{ + if (!res) + return 0; + return res->memorySize; +} + /* * pqResultStrdup - * Like strdup, but the space is subsidiary PGresult space. @@ -601,7 +646,7 @@ pqResultAlloc(PGresult *res, size_t nBytes, bool isBinary) char * pqResultStrdup(PGresult *res, const char *str) { - char *space = (char *) pqResultAlloc(res, strlen(str) + 1, FALSE); + char *space = (char *) pqResultAlloc(res, strlen(str) + 1, false); if (space) strcpy(space, str); @@ -831,7 +876,7 @@ pqInternalNotice(const PGNoticeHooks *hooks, const char *fmt,...) * Result text is always just the primary message + newline. If we can't * allocate it, don't bother invoking the receiver. */ - res->errMsg = (char *) pqResultAlloc(res, strlen(msgBuf) + 2, FALSE); + res->errMsg = (char *) pqResultAlloc(res, strlen(msgBuf) + 2, false); if (res->errMsg) { sprintf(res->errMsg, "%s\n", msgBuf); @@ -839,7 +884,7 @@ pqInternalNotice(const PGNoticeHooks *hooks, const char *fmt,...) /* * Pass to receiver, then free it. */ - (*res->noticeHooks.noticeRec) (res->noticeHooks.noticeRecArg, res); + res->noticeHooks.noticeRec(res->noticeHooks.noticeRecArg, res); } PQclear(res); } @@ -847,10 +892,13 @@ pqInternalNotice(const PGNoticeHooks *hooks, const char *fmt,...) /* * pqAddTuple * add a row pointer to the PGresult structure, growing it if necessary - * Returns TRUE if OK, FALSE if not enough memory to add the row + * Returns true if OK, false if an error prevented adding the row + * + * On error, *errmsgp can be set to an error string to be returned. + * If it is left NULL, the error is presumed to be "out of memory". */ static bool -pqAddTuple(PGresult *res, PGresAttValue *tup) +pqAddTuple(PGresult *res, PGresAttValue *tup, const char **errmsgp) { if (res->ntups >= res->tupArrSize) { @@ -865,9 +913,36 @@ pqAddTuple(PGresult *res, PGresAttValue *tup) * existing allocation. Note that the positions beyond res->ntups are * garbage, not necessarily NULL. */ - int newSize = (res->tupArrSize > 0) ? res->tupArrSize * 2 : 128; + int newSize; PGresAttValue **newTuples; + /* + * Since we use integers for row numbers, we can't support more than + * INT_MAX rows. Make sure we allow that many, though. + */ + if (res->tupArrSize <= INT_MAX / 2) + newSize = (res->tupArrSize > 0) ? res->tupArrSize * 2 : 128; + else if (res->tupArrSize < INT_MAX) + newSize = INT_MAX; + else + { + *errmsgp = libpq_gettext("PGresult cannot support more than INT_MAX tuples"); + return false; + } + + /* + * Also, on 32-bit platforms we could, in theory, overflow size_t even + * before newSize gets to INT_MAX. (In practice we'd doubtless hit + * OOM long before that, but let's check.) + */ +#if INT_MAX >= (SIZE_MAX / 2) + if (newSize > SIZE_MAX / sizeof(PGresAttValue *)) + { + *errmsgp = libpq_gettext("size_t overflow"); + return false; + } +#endif + if (res->tuples == NULL) newTuples = (PGresAttValue **) malloc(newSize * sizeof(PGresAttValue *)); @@ -875,13 +950,15 @@ pqAddTuple(PGresult *res, PGresAttValue *tup) newTuples = (PGresAttValue **) realloc(res->tuples, newSize * sizeof(PGresAttValue *)); if (!newTuples) - return FALSE; /* malloc or realloc failed */ + return false; /* malloc or realloc failed */ + res->memorySize += + (newSize - res->tupArrSize) * sizeof(PGresAttValue *); res->tupArrSize = newSize; res->tuples = newTuples; } res->tuples[res->ntups] = tup; res->ntups++; - return TRUE; + return true; } /* @@ -896,7 +973,7 @@ pqSaveMessageField(PGresult *res, char code, const char *value) pqResultAlloc(res, offsetof(PGMessageField, contents) + strlen(value) + 1, - TRUE); + true); if (!pfield) return; /* out of memory? */ pfield->code = code; @@ -1060,7 +1137,7 @@ pqRowProcessor(PGconn *conn, const char **errmsgp) * memory for gettext() to do anything. */ tup = (PGresAttValue *) - pqResultAlloc(res, nfields * sizeof(PGresAttValue), TRUE); + pqResultAlloc(res, nfields * sizeof(PGresAttValue), true); if (tup == NULL) goto fail; @@ -1093,7 +1170,7 @@ pqRowProcessor(PGconn *conn, const char **errmsgp) } /* And add the tuple to the PGresult's tuple array */ - if (!pqAddTuple(res, tup)) + if (!pqAddTuple(res, tup, errmsgp)) goto fail; /* @@ -1674,14 +1751,14 @@ parseInput(PGconn *conn) /* * PQisBusy - * Return TRUE if PQgetResult would block waiting for input. + * Return true if PQgetResult would block waiting for input. */ int PQisBusy(PGconn *conn) { if (!conn) - return FALSE; + return false; /* Parse any available data, if our state permits. */ parseInput(conn); @@ -1720,7 +1797,7 @@ PQgetResult(PGconn *conn) */ while ((flushResult = pqFlush(conn)) > 0) { - if (pqWait(FALSE, TRUE, conn)) + if (pqWait(false, true, conn)) { flushResult = -1; break; @@ -1729,7 +1806,7 @@ PQgetResult(PGconn *conn) /* Wait for some more data, and load it. */ if (flushResult || - pqWait(TRUE, FALSE, conn) || + pqWait(true, false, conn) || pqReadData(conn) < 0) { /* @@ -1793,7 +1870,7 @@ PQgetResult(PGconn *conn) res->resultStatus = PGRES_FATAL_ERROR; break; } - res->events[i].resultInitialized = TRUE; + res->events[i].resultInitialized = true; } } @@ -2188,6 +2265,9 @@ PQsendDescribe(PGconn *conn, char desc_type, const char *desc_target) * no unhandled async notification from the backend * * the CALLER is responsible for FREE'ing the structure returned + * + * Note that this function does not read any new data from the socket; + * so usually, caller should call PQconsumeInput() first. */ PGnotify * PQnotifies(PGconn *conn) @@ -2695,22 +2775,22 @@ PQbinaryTuples(const PGresult *res) /* * Helper routines to range-check field numbers and tuple numbers. - * Return TRUE if OK, FALSE if not + * Return true if OK, false if not */ static int check_field_number(const PGresult *res, int field_num) { if (!res) - return FALSE; /* no way to display error message... */ + return false; /* no way to display error message... */ if (field_num < 0 || field_num >= res->numAttributes) { pqInternalNotice(&res->noticeHooks, "column number %d is out of range 0..%d", field_num, res->numAttributes - 1); - return FALSE; + return false; } - return TRUE; + return true; } static int @@ -2718,38 +2798,38 @@ check_tuple_field_number(const PGresult *res, int tup_num, int field_num) { if (!res) - return FALSE; /* no way to display error message... */ + return false; /* no way to display error message... */ if (tup_num < 0 || tup_num >= res->ntups) { pqInternalNotice(&res->noticeHooks, "row number %d is out of range 0..%d", tup_num, res->ntups - 1); - return FALSE; + return false; } if (field_num < 0 || field_num >= res->numAttributes) { pqInternalNotice(&res->noticeHooks, "column number %d is out of range 0..%d", field_num, res->numAttributes - 1); - return FALSE; + return false; } - return TRUE; + return true; } static int check_param_number(const PGresult *res, int param_num) { if (!res) - return FALSE; /* no way to display error message... */ + return false; /* no way to display error message... */ if (param_num < 0 || param_num >= res->numParameters) { pqInternalNotice(&res->noticeHooks, "parameter number %d is out of range 0..%d", param_num, res->numParameters - 1); - return FALSE; + return false; } - return TRUE; + return true; } /* @@ -3126,8 +3206,8 @@ PQparamtype(const PGresult *res, int param_num) /* PQsetnonblocking: - * sets the PGconn's database connection non-blocking if the arg is TRUE - * or makes it blocking if the arg is FALSE, this will not protect + * sets the PGconn's database connection non-blocking if the arg is true + * or makes it blocking if the arg is false, this will not protect * you from PQexec(), you'll only be safe when using the non-blocking API. * Needs to be called only on a connected database connection. */ @@ -3139,7 +3219,7 @@ PQsetnonblocking(PGconn *conn, int arg) if (!conn || conn->status == CONNECTION_BAD) return -1; - barg = (arg ? TRUE : FALSE); + barg = (arg ? true : false); /* early out if the socket is already in the state requested */ if (barg == conn->nonblocking) @@ -3162,7 +3242,7 @@ PQsetnonblocking(PGconn *conn, int arg) /* * return the blocking status of the database connection - * TRUE == nonblocking, FALSE == blocking + * true == nonblocking, false == blocking */ int PQisnonblocking(const PGconn *conn) diff --git a/src/interfaces/libpq/fe-lobj.c b/src/interfaces/libpq/fe-lobj.c index 343e5303d9..b9caa22966 100644 --- a/src/interfaces/libpq/fe-lobj.c +++ b/src/interfaces/libpq/fe-lobj.c @@ -3,7 +3,7 @@ * fe-lobj.c * Front-end large object interface * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -33,12 +33,11 @@ #include #include #include -#include /* for ntohl/htonl */ -#include #include "libpq-fe.h" #include "libpq-int.h" #include "libpq/libpq-fs.h" /* must come after sys/stat.h */ +#include "port/pg_bswap.h" #define LO_BUFSIZE 8192 @@ -695,7 +694,7 @@ lo_import_internal(PGconn *conn, const char *filename, Oid oid) char buf[LO_BUFSIZE]; Oid lobjOid; int lobj; - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; /* * open the file to be read in @@ -705,7 +704,7 @@ lo_import_internal(PGconn *conn, const char *filename, Oid oid) { /* error */ printfPQExpBuffer(&conn->errorMessage, libpq_gettext("could not open file \"%s\": %s\n"), - filename, pqStrerror(errno, sebuf, sizeof(sebuf))); + filename, strerror_r(errno, sebuf, sizeof(sebuf))); return InvalidOid; } @@ -761,7 +760,7 @@ lo_import_internal(PGconn *conn, const char *filename, Oid oid) printfPQExpBuffer(&conn->errorMessage, libpq_gettext("could not read from file \"%s\": %s\n"), filename, - pqStrerror(save_errno, sebuf, sizeof(sebuf))); + strerror_r(save_errno, sebuf, sizeof(sebuf))); return InvalidOid; } @@ -790,7 +789,7 @@ lo_export(PGconn *conn, Oid lobjId, const char *filename) tmp; char buf[LO_BUFSIZE]; int lobj; - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; /* * open the large object. @@ -815,7 +814,7 @@ lo_export(PGconn *conn, Oid lobjId, const char *filename) printfPQExpBuffer(&conn->errorMessage, libpq_gettext("could not open file \"%s\": %s\n"), filename, - pqStrerror(save_errno, sebuf, sizeof(sebuf))); + strerror_r(save_errno, sebuf, sizeof(sebuf))); return -1; } @@ -835,7 +834,7 @@ lo_export(PGconn *conn, Oid lobjId, const char *filename) printfPQExpBuffer(&conn->errorMessage, libpq_gettext("could not write to file \"%s\": %s\n"), filename, - pqStrerror(save_errno, sebuf, sizeof(sebuf))); + strerror_r(save_errno, sebuf, sizeof(sebuf))); return -1; } } @@ -858,7 +857,7 @@ lo_export(PGconn *conn, Oid lobjId, const char *filename) { printfPQExpBuffer(&conn->errorMessage, libpq_gettext("could not write to file \"%s\": %s\n"), - filename, pqStrerror(errno, sebuf, sizeof(sebuf))); + filename, strerror_r(errno, sebuf, sizeof(sebuf))); result = -1; } @@ -1070,11 +1069,11 @@ lo_hton64(pg_int64 host64) /* High order half first, since we're doing MSB-first */ t = (uint32) (host64 >> 32); - swap.i32[0] = htonl(t); + swap.i32[0] = pg_hton32(t); /* Now the low order half */ t = (uint32) host64; - swap.i32[1] = htonl(t); + swap.i32[1] = pg_hton32(t); return swap.i64; } @@ -1095,9 +1094,9 @@ lo_ntoh64(pg_int64 net64) swap.i64 = net64; - result = (uint32) ntohl(swap.i32[0]); + result = (uint32) pg_ntoh32(swap.i32[0]); result <<= 32; - result |= (uint32) ntohl(swap.i32[1]); + result |= (uint32) pg_ntoh32(swap.i32[1]); return result; } diff --git a/src/interfaces/libpq/fe-misc.c b/src/interfaces/libpq/fe-misc.c index cac6359585..46ece1a14c 100644 --- a/src/interfaces/libpq/fe-misc.c +++ b/src/interfaces/libpq/fe-misc.c @@ -19,7 +19,7 @@ * routines. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -33,9 +33,6 @@ #include #include -#include -#include - #ifdef WIN32 #include "win32.h" #else @@ -53,6 +50,7 @@ #include "libpq-fe.h" #include "libpq-int.h" #include "mb/pg_wchar.h" +#include "port/pg_bswap.h" #include "pg_config_paths.h" @@ -278,14 +276,14 @@ pqGetInt(int *result, size_t bytes, PGconn *conn) return EOF; memcpy(&tmp2, conn->inBuffer + conn->inCursor, 2); conn->inCursor += 2; - *result = (int) ntohs(tmp2); + *result = (int) pg_ntoh16(tmp2); break; case 4: if (conn->inCursor + 4 > conn->inEnd) return EOF; memcpy(&tmp4, conn->inBuffer + conn->inCursor, 4); conn->inCursor += 4; - *result = (int) ntohl(tmp4); + *result = (int) pg_ntoh32(tmp4); break; default: pqInternalNotice(&conn->noticeHooks, @@ -314,12 +312,12 @@ pqPutInt(int value, size_t bytes, PGconn *conn) switch (bytes) { case 2: - tmp2 = htons((uint16) value); + tmp2 = pg_hton16((uint16) value); if (pqPutMsgBytes((const char *) &tmp2, 2, conn)) return EOF; break; case 4: - tmp4 = htonl((uint32) value); + tmp4 = pg_hton32((uint32) value); if (pqPutMsgBytes((const char *) &tmp4, 4, conn)) return EOF; break; @@ -597,7 +595,7 @@ pqPutMsgEnd(PGconn *conn) { uint32 msgLen = conn->outMsgEnd - conn->outMsgStart; - msgLen = htonl(msgLen); + msgLen = pg_hton32(msgLen); memcpy(conn->outBuffer + conn->outMsgStart, &msgLen, 4); } @@ -936,7 +934,7 @@ pqSendSome(PGconn *conn, int len) break; } - if (pqWait(TRUE, TRUE, conn)) + if (pqWait(true, true, conn)) { result = -1; break; @@ -1073,7 +1071,7 @@ pqSocketCheck(PGconn *conn, int forRead, int forWrite, time_t end_time) if (result < 0) { - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; printfPQExpBuffer(&conn->errorMessage, libpq_gettext("select() failed: %s\n"), diff --git a/src/interfaces/libpq/fe-print.c b/src/interfaces/libpq/fe-print.c index 89bc4c5429..95de270b93 100644 --- a/src/interfaces/libpq/fe-print.c +++ b/src/interfaces/libpq/fe-print.c @@ -3,7 +3,7 @@ * fe-print.c * functions for pretty-printing query results * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * These functions were formerly part of fe-exec.c, but they @@ -165,6 +165,13 @@ PQprint(FILE *fout, const PGresult *res, const PQprintOpt *po) screen_size.ws_row = 24; screen_size.ws_col = 80; #endif + + /* + * Since this function is no longer used by psql, we don't examine + * PSQL_PAGER. It's possible that the hypothetical external users + * of the function would like that to happen, but in the name of + * backwards compatibility, we'll stick to just examining PAGER. + */ pagerenv = getenv("PAGER"); /* if PAGER is unset, empty or all-white-space, don't use pager */ if (pagerenv != NULL && diff --git a/src/interfaces/libpq/fe-protocol2.c b/src/interfaces/libpq/fe-protocol2.c index a58f701e18..53e5083702 100644 --- a/src/interfaces/libpq/fe-protocol2.c +++ b/src/interfaces/libpq/fe-protocol2.c @@ -3,7 +3,7 @@ * fe-protocol2.c * functions that are specific to frontend/backend protocol version 2 * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -19,17 +19,16 @@ #include "libpq-fe.h" #include "libpq-int.h" +#include "port/pg_bswap.h" #ifdef WIN32 #include "win32.h" #else #include -#include #ifdef HAVE_NETINET_TCP_H #include #endif -#include #endif @@ -584,7 +583,7 @@ pqParseInput2(PGconn *conn) if (conn->result != NULL) { /* Read another tuple of a normal query response */ - if (getAnotherTuple(conn, FALSE)) + if (getAnotherTuple(conn, false)) return; /* getAnotherTuple() moves inStart itself */ continue; @@ -602,7 +601,7 @@ pqParseInput2(PGconn *conn) if (conn->result != NULL) { /* Read another tuple of a normal query response */ - if (getAnotherTuple(conn, TRUE)) + if (getAnotherTuple(conn, true)) return; /* getAnotherTuple() moves inStart itself */ continue; @@ -680,7 +679,7 @@ getRowDescriptions(PGconn *conn) if (nfields > 0) { result->attDescs = (PGresAttDesc *) - pqResultAlloc(result, nfields * sizeof(PGresAttDesc), TRUE); + pqResultAlloc(result, nfields * sizeof(PGresAttDesc), true); if (!result->attDescs) { errmsg = NULL; /* means "out of memory", see below */ @@ -967,6 +966,14 @@ pqGetErrorNotice2(PGconn *conn, bool isError) char *startp; char *splitp; + /* + * If this is an error message, pre-emptively clear any incomplete query + * result we may have. We'd just throw it away below anyway, and + * releasing it before collecting the error might avoid out-of-memory. + */ + if (isError) + pqClearAsyncResult(conn); + /* * Since the message might be pretty long, we create a temporary * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended @@ -1039,7 +1046,7 @@ pqGetErrorNotice2(PGconn *conn, bool isError) */ if (isError) { - pqClearAsyncResult(conn); + pqClearAsyncResult(conn); /* redundant, but be safe */ conn->result = res; resetPQExpBuffer(&conn->errorMessage); if (res && !PQExpBufferDataBroken(workBuf) && res->errMsg) @@ -1055,7 +1062,7 @@ pqGetErrorNotice2(PGconn *conn, bool isError) if (res) { if (res->noticeHooks.noticeRec != NULL) - (*res->noticeHooks.noticeRec) (res->noticeHooks.noticeRecArg, res); + res->noticeHooks.noticeRec(res->noticeHooks.noticeRecArg, res); PQclear(res); } } @@ -1219,7 +1226,7 @@ pqGetCopyData2(PGconn *conn, char **buffer, int async) if (async) return 0; /* Need to load more data */ - if (pqWait(TRUE, FALSE, conn) || + if (pqWait(true, false, conn) || pqReadData(conn) < 0) return -2; } @@ -1264,7 +1271,7 @@ pqGetline2(PGconn *conn, char *s, int maxlen) else { /* need to load more data */ - if (pqWait(TRUE, FALSE, conn) || + if (pqWait(true, false, conn) || pqReadData(conn) < 0) { result = EOF; @@ -1485,7 +1492,7 @@ pqFunctionCall2(PGconn *conn, Oid fnid, if (needInput) { /* Wait for some data to arrive (or for the channel to close) */ - if (pqWait(TRUE, FALSE, conn) || + if (pqWait(true, false, conn) || pqReadData(conn) < 0) break; } @@ -1609,7 +1616,7 @@ pqBuildStartupPacket2(PGconn *conn, int *packetlen, MemSet(startpacket, 0, sizeof(StartupPacket)); - startpacket->protoVersion = htonl(conn->pversion); + startpacket->protoVersion = pg_hton32(conn->pversion); /* strncpy is safe here: postmaster will handle full fields correctly */ strncpy(startpacket->user, conn->pguser, SM_USER); diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c index a484fe80a1..8345faface 100644 --- a/src/interfaces/libpq/fe-protocol3.c +++ b/src/interfaces/libpq/fe-protocol3.c @@ -3,7 +3,7 @@ * fe-protocol3.c * functions that are specific to frontend/backend protocol version 3 * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -21,16 +21,15 @@ #include "libpq-int.h" #include "mb/pg_wchar.h" +#include "port/pg_bswap.h" #ifdef WIN32 #include "win32.h" #else #include -#include #ifdef HAVE_NETINET_TCP_H #include #endif -#include #endif @@ -511,7 +510,7 @@ getRowDescriptions(PGconn *conn, int msgLength) if (nfields > 0) { result->attDescs = (PGresAttDesc *) - pqResultAlloc(result, nfields * sizeof(PGresAttDesc), TRUE); + pqResultAlloc(result, nfields * sizeof(PGresAttDesc), true); if (!result->attDescs) { errmsg = NULL; /* means "out of memory", see below */ @@ -669,7 +668,7 @@ getParamDescriptions(PGconn *conn, int msgLength) if (nparams > 0) { result->paramDescs = (PGresParamDesc *) - pqResultAlloc(result, nparams * sizeof(PGresParamDesc), TRUE); + pqResultAlloc(result, nparams * sizeof(PGresParamDesc), true); if (!result->paramDescs) goto advance_and_error; MemSet(result->paramDescs, 0, nparams * sizeof(PGresParamDesc)); @@ -880,6 +879,14 @@ pqGetErrorNotice3(PGconn *conn, bool isError) PQExpBufferData workBuf; char id; + /* + * If this is an error message, pre-emptively clear any incomplete query + * result we may have. We'd just throw it away below anyway, and + * releasing it before collecting the error might avoid out-of-memory. + */ + if (isError) + pqClearAsyncResult(conn); + /* * Since the fields might be pretty long, we create a temporary * PQExpBuffer rather than using conn->workBuffer. workBuffer is intended @@ -944,7 +951,7 @@ pqGetErrorNotice3(PGconn *conn, bool isError) { if (res) res->errMsg = pqResultStrdup(res, workBuf.data); - pqClearAsyncResult(conn); + pqClearAsyncResult(conn); /* redundant, but be safe */ conn->result = res; if (PQExpBufferDataBroken(workBuf)) printfPQExpBuffer(&conn->errorMessage, @@ -960,7 +967,7 @@ pqGetErrorNotice3(PGconn *conn, bool isError) /* We can cheat a little here and not copy the message. */ res->errMsg = workBuf.data; if (res->noticeHooks.noticeRec != NULL) - (*res->noticeHooks.noticeRec) (res->noticeHooks.noticeRecArg, res); + res->noticeHooks.noticeRec(res->noticeHooks.noticeRecArg, res); PQclear(res); } } @@ -1467,7 +1474,7 @@ getCopyStart(PGconn *conn, ExecStatusType copytype) if (nfields > 0) { result->attDescs = (PGresAttDesc *) - pqResultAlloc(result, nfields * sizeof(PGresAttDesc), TRUE); + pqResultAlloc(result, nfields * sizeof(PGresAttDesc), true); if (!result->attDescs) goto failure; MemSet(result->attDescs, 0, nfields * sizeof(PGresAttDesc)); @@ -1658,7 +1665,7 @@ pqGetCopyData3(PGconn *conn, char **buffer, int async) if (async) return 0; /* Need to load more data */ - if (pqWait(TRUE, FALSE, conn) || + if (pqWait(true, false, conn) || pqReadData(conn) < 0) return -2; continue; @@ -1716,7 +1723,7 @@ pqGetline3(PGconn *conn, char *s, int maxlen) while ((status = PQgetlineAsync(conn, s, maxlen - 1)) == 0) { /* need to load more data */ - if (pqWait(TRUE, FALSE, conn) || + if (pqWait(true, false, conn) || pqReadData(conn) < 0) { *s = '\0'; @@ -1969,7 +1976,7 @@ pqFunctionCall3(PGconn *conn, Oid fnid, if (needInput) { /* Wait for some data to arrive (or for the channel to close) */ - if (pqWait(TRUE, FALSE, conn) || + if (pqWait(true, false, conn) || pqReadData(conn) < 0) break; } @@ -2148,7 +2155,7 @@ build_startup_packet(const PGconn *conn, char *packet, /* Protocol version comes first. */ if (packet) { - ProtocolVersion pv = htonl(conn->pversion); + ProtocolVersion pv = pg_hton32(conn->pversion); memcpy(packet + packet_len, &pv, sizeof(ProtocolVersion)); } diff --git a/src/interfaces/libpq/fe-secure-common.c b/src/interfaces/libpq/fe-secure-common.c new file mode 100644 index 0000000000..b3f580f595 --- /dev/null +++ b/src/interfaces/libpq/fe-secure-common.c @@ -0,0 +1,211 @@ +/*------------------------------------------------------------------------- + * + * fe-secure-common.c + * + * common implementation-independent SSL support code + * + * While fe-secure.c contains the interfaces that the rest of libpq call, this + * file contains support routines that are used by the library-specific + * implementations such as fe-secure-openssl.c. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/interfaces/libpq/fe-secure-common.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres_fe.h" + +#include "fe-secure-common.h" + +#include "libpq-int.h" +#include "pqexpbuffer.h" + +/* + * Check if a wildcard certificate matches the server hostname. + * + * The rule for this is: + * 1. We only match the '*' character as wildcard + * 2. We match only wildcards at the start of the string + * 3. The '*' character does *not* match '.', meaning that we match only + * a single pathname component. + * 4. We don't support more than one '*' in a single pattern. + * + * This is roughly in line with RFC2818, but contrary to what most browsers + * appear to be implementing (point 3 being the difference) + * + * Matching is always case-insensitive, since DNS is case insensitive. + */ +static bool +wildcard_certificate_match(const char *pattern, const char *string) +{ + int lenpat = strlen(pattern); + int lenstr = strlen(string); + + /* If we don't start with a wildcard, it's not a match (rule 1 & 2) */ + if (lenpat < 3 || + pattern[0] != '*' || + pattern[1] != '.') + return false; + + /* If pattern is longer than the string, we can never match */ + if (lenpat > lenstr) + return false; + + /* + * If string does not end in pattern (minus the wildcard), we don't match + */ + if (pg_strcasecmp(pattern + 1, string + lenstr - lenpat + 1) != 0) + return false; + + /* + * If there is a dot left of where the pattern started to match, we don't + * match (rule 3) + */ + if (strchr(string, '.') < string + lenstr - lenpat) + return false; + + /* String ended with pattern, and didn't have a dot before, so we match */ + return true; +} + +/* + * Check if a name from a server's certificate matches the peer's hostname. + * + * Returns 1 if the name matches, and 0 if it does not. On error, returns + * -1, and sets the libpq error message. + * + * The name extracted from the certificate is returned in *store_name. The + * caller is responsible for freeing it. + */ +int +pq_verify_peer_name_matches_certificate_name(PGconn *conn, + const char *namedata, size_t namelen, + char **store_name) +{ + char *name; + int result; + char *host = conn->connhost[conn->whichhost].host; + + *store_name = NULL; + + if (!(host && host[0] != '\0')) + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("host name must be specified\n")); + return -1; + } + + /* + * There is no guarantee the string returned from the certificate is + * NULL-terminated, so make a copy that is. + */ + name = malloc(namelen + 1); + if (name == NULL) + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("out of memory\n")); + return -1; + } + memcpy(name, namedata, namelen); + name[namelen] = '\0'; + + /* + * Reject embedded NULLs in certificate common or alternative name to + * prevent attacks like CVE-2009-4034. + */ + if (namelen != strlen(name)) + { + free(name); + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("SSL certificate's name contains embedded null\n")); + return -1; + } + + if (pg_strcasecmp(name, host) == 0) + { + /* Exact name match */ + result = 1; + } + else if (wildcard_certificate_match(name, host)) + { + /* Matched wildcard name */ + result = 1; + } + else + { + result = 0; + } + + *store_name = name; + return result; +} + +/* + * Verify that the server certificate matches the hostname we connected to. + * + * The certificate's Common Name and Subject Alternative Names are considered. + */ +bool +pq_verify_peer_name_matches_certificate(PGconn *conn) +{ + char *host = conn->connhost[conn->whichhost].host; + int rc; + int names_examined = 0; + char *first_name = NULL; + + /* + * If told not to verify the peer name, don't do it. Return true + * indicating that the verification was successful. + */ + if (strcmp(conn->sslmode, "verify-full") != 0) + return true; + + /* Check that we have a hostname to compare with. */ + if (!(host && host[0] != '\0')) + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("host name must be specified for a verified SSL connection\n")); + return false; + } + + rc = pgtls_verify_peer_name_matches_certificate_guts(conn, &names_examined, &first_name); + + if (rc == 0) + { + /* + * No match. Include the name from the server certificate in the error + * message, to aid debugging broken configurations. If there are + * multiple names, only print the first one to avoid an overly long + * error message. + */ + if (names_examined > 1) + { + printfPQExpBuffer(&conn->errorMessage, + libpq_ngettext("server certificate for \"%s\" (and %d other name) does not match host name \"%s\"\n", + "server certificate for \"%s\" (and %d other names) does not match host name \"%s\"\n", + names_examined - 1), + first_name, names_examined - 1, host); + } + else if (names_examined == 1) + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("server certificate for \"%s\" does not match host name \"%s\"\n"), + first_name, host); + } + else + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("could not get server's host name from server certificate\n")); + } + } + + /* clean up */ + if (first_name) + free(first_name); + + return (rc == 1); +} diff --git a/src/interfaces/libpq/fe-secure-common.h b/src/interfaces/libpq/fe-secure-common.h new file mode 100644 index 0000000000..980a58af25 --- /dev/null +++ b/src/interfaces/libpq/fe-secure-common.h @@ -0,0 +1,26 @@ +/*------------------------------------------------------------------------- + * + * fe-secure-common.h + * + * common implementation-independent SSL support code + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/interfaces/libpq/fe-secure-common.h + * + *------------------------------------------------------------------------- + */ + +#ifndef FE_SECURE_COMMON_H +#define FE_SECURE_COMMON_H + +#include "libpq-fe.h" + +extern int pq_verify_peer_name_matches_certificate_name(PGconn *conn, + const char *namedata, size_t namelen, + char **store_name); +extern bool pq_verify_peer_name_matches_certificate(PGconn *conn); + +#endif /* FE_SECURE_COMMON_H */ diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c index 2f29820e82..beca3492e8 100644 --- a/src/interfaces/libpq/fe-secure-openssl.c +++ b/src/interfaces/libpq/fe-secure-openssl.c @@ -4,7 +4,7 @@ * OpenSSL support * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -28,6 +28,7 @@ #include "libpq-fe.h" #include "fe-auth.h" +#include "fe-secure-common.h" #include "libpq-int.h" #ifdef WIN32 @@ -60,11 +61,10 @@ #endif #include -static bool verify_peer_name_matches_certificate(PGconn *); static int verify_cb(int ok, X509_STORE_CTX *ctx); -static int verify_peer_name_matches_certificate_name(PGconn *conn, - ASN1_STRING *name, - char **store_name); +static int openssl_verify_peer_name_matches_certificate_name(PGconn *conn, + ASN1_STRING *name, + char **store_name); static void destroy_ssl_system(void); static int initialize_SSL(PGconn *conn); static PostgresPollingStatusType open_client_SSL(PGconn *); @@ -98,10 +98,6 @@ static long win32_ssl_create_mutex = 0; /* Procedures common to all secure sessions */ /* ------------------------------------------------------------ */ -/* - * Exported function to allow application to tell us it's already - * initialized OpenSSL and/or libcrypto. - */ void pgtls_init_library(bool do_ssl, int do_crypto) { @@ -119,9 +115,6 @@ pgtls_init_library(bool do_ssl, int do_crypto) pq_init_crypto_lib = do_crypto; } -/* - * Begin or continue negotiating a secure session. - */ PostgresPollingStatusType pgtls_open_client(PGconn *conn) { @@ -144,28 +137,12 @@ pgtls_open_client(PGconn *conn) return open_client_SSL(conn); } -/* - * Is there unread data waiting in the SSL read buffer? - */ -bool -pgtls_read_pending(PGconn *conn) -{ - return SSL_pending(conn->ssl); -} - -/* - * Read data from a secure connection. - * - * On failure, this function is responsible for putting a suitable message - * into conn->errorMessage. The caller must still inspect errno, but only - * to determine whether to continue/retry after error. - */ ssize_t pgtls_read(PGconn *conn, void *ptr, size_t len) { ssize_t n; int result_errno = 0; - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; int err; unsigned long ecode; @@ -284,19 +261,18 @@ pgtls_read(PGconn *conn, void *ptr, size_t len) return n; } -/* - * Write data to a secure connection. - * - * On failure, this function is responsible for putting a suitable message - * into conn->errorMessage. The caller must still inspect errno, but only - * to determine whether to continue/retry after error. - */ +bool +pgtls_read_pending(PGconn *conn) +{ + return SSL_pending(conn->ssl); +} + ssize_t pgtls_write(PGconn *conn, const void *ptr, size_t len) { ssize_t n; int result_errno = 0; - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; int err; unsigned long ecode; @@ -393,6 +369,82 @@ pgtls_write(PGconn *conn, const void *ptr, size_t len) return n; } +#ifdef HAVE_X509_GET_SIGNATURE_NID +char * +pgtls_get_peer_certificate_hash(PGconn *conn, size_t *len) +{ + X509 *peer_cert; + const EVP_MD *algo_type; + unsigned char hash[EVP_MAX_MD_SIZE]; /* size for SHA-512 */ + unsigned int hash_size; + int algo_nid; + char *cert_hash; + + *len = 0; + + if (!conn->peer) + return NULL; + + peer_cert = conn->peer; + + /* + * Get the signature algorithm of the certificate to determine the hash + * algorithm to use for the result. + */ + if (!OBJ_find_sigid_algs(X509_get_signature_nid(peer_cert), + &algo_nid, NULL)) + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("could not determine server certificate signature algorithm\n")); + return NULL; + } + + /* + * The TLS server's certificate bytes need to be hashed with SHA-256 if + * its signature algorithm is MD5 or SHA-1 as per RFC 5929 + * (https://tools.ietf.org/html/rfc5929#section-4.1). If something else + * is used, the same hash as the signature algorithm is used. + */ + switch (algo_nid) + { + case NID_md5: + case NID_sha1: + algo_type = EVP_sha256(); + break; + default: + algo_type = EVP_get_digestbynid(algo_nid); + if (algo_type == NULL) + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("could not find digest for NID %s\n"), + OBJ_nid2sn(algo_nid)); + return NULL; + } + break; + } + + if (!X509_digest(peer_cert, algo_type, hash, &hash_size)) + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("could not generate peer certificate hash\n")); + return NULL; + } + + /* save result */ + cert_hash = malloc(hash_size); + if (cert_hash == NULL) + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("out of memory\n")); + return NULL; + } + memcpy(cert_hash, hash, hash_size); + *len = hash_size; + + return cert_hash; +} +#endif /* HAVE_X509_GET_SIGNATURE_NID */ + /* ------------------------------------------------------------ */ /* OpenSSL specific code */ /* ------------------------------------------------------------ */ @@ -416,76 +468,16 @@ verify_cb(int ok, X509_STORE_CTX *ctx) /* - * Check if a wildcard certificate matches the server hostname. - * - * The rule for this is: - * 1. We only match the '*' character as wildcard - * 2. We match only wildcards at the start of the string - * 3. The '*' character does *not* match '.', meaning that we match only - * a single pathname component. - * 4. We don't support more than one '*' in a single pattern. - * - * This is roughly in line with RFC2818, but contrary to what most browsers - * appear to be implementing (point 3 being the difference) - * - * Matching is always case-insensitive, since DNS is case insensitive. + * OpenSSL-specific wrapper around + * pq_verify_peer_name_matches_certificate_name(), converting the ASN1_STRING + * into a plain C string. */ static int -wildcard_certificate_match(const char *pattern, const char *string) -{ - int lenpat = strlen(pattern); - int lenstr = strlen(string); - - /* If we don't start with a wildcard, it's not a match (rule 1 & 2) */ - if (lenpat < 3 || - pattern[0] != '*' || - pattern[1] != '.') - return 0; - - if (lenpat > lenstr) - /* If pattern is longer than the string, we can never match */ - return 0; - - if (pg_strcasecmp(pattern + 1, string + lenstr - lenpat + 1) != 0) - - /* - * If string does not end in pattern (minus the wildcard), we don't - * match - */ - return 0; - - if (strchr(string, '.') < string + lenstr - lenpat) - - /* - * If there is a dot left of where the pattern started to match, we - * don't match (rule 3) - */ - return 0; - - /* String ended with pattern, and didn't have a dot before, so we match */ - return 1; -} - -/* - * Check if a name from a server's certificate matches the peer's hostname. - * - * Returns 1 if the name matches, and 0 if it does not. On error, returns - * -1, and sets the libpq error message. - * - * The name extracted from the certificate is returned in *store_name. The - * caller is responsible for freeing it. - */ -static int -verify_peer_name_matches_certificate_name(PGconn *conn, ASN1_STRING *name_entry, - char **store_name) +openssl_verify_peer_name_matches_certificate_name(PGconn *conn, ASN1_STRING *name_entry, + char **store_name) { int len; - char *name; const unsigned char *namedata; - int result; - char *host = PQhost(conn); - - *store_name = NULL; /* Should not happen... */ if (name_entry == NULL) @@ -497,9 +489,6 @@ verify_peer_name_matches_certificate_name(PGconn *conn, ASN1_STRING *name_entry, /* * GEN_DNS can be only IA5String, equivalent to US ASCII. - * - * There is no guarantee the string returned from the certificate is - * NULL-terminated, so make a copy that is. */ #ifdef HAVE_ASN1_STRING_GET0_DATA namedata = ASN1_STRING_get0_data(name_entry); @@ -507,45 +496,9 @@ verify_peer_name_matches_certificate_name(PGconn *conn, ASN1_STRING *name_entry, namedata = ASN1_STRING_data(name_entry); #endif len = ASN1_STRING_length(name_entry); - name = malloc(len + 1); - if (name == NULL) - { - printfPQExpBuffer(&conn->errorMessage, - libpq_gettext("out of memory\n")); - return -1; - } - memcpy(name, namedata, len); - name[len] = '\0'; - /* - * Reject embedded NULLs in certificate common or alternative name to - * prevent attacks like CVE-2009-4034. - */ - if (len != strlen(name)) - { - free(name); - printfPQExpBuffer(&conn->errorMessage, - libpq_gettext("SSL certificate's name contains embedded null\n")); - return -1; - } - - if (pg_strcasecmp(name, host) == 0) - { - /* Exact name match */ - result = 1; - } - else if (wildcard_certificate_match(name, host)) - { - /* Matched wildcard name */ - result = 1; - } - else - { - result = 0; - } - - *store_name = name; - return result; + /* OK to cast from unsigned to plain char, since it's all ASCII. */ + return pq_verify_peer_name_matches_certificate_name(conn, (const char *) namedata, len, store_name); } /* @@ -553,33 +506,14 @@ verify_peer_name_matches_certificate_name(PGconn *conn, ASN1_STRING *name_entry, * * The certificate's Common Name and Subject Alternative Names are considered. */ -static bool -verify_peer_name_matches_certificate(PGconn *conn) +int +pgtls_verify_peer_name_matches_certificate_guts(PGconn *conn, + int *names_examined, + char **first_name) { - int names_examined = 0; - bool found_match = false; - bool got_error = false; - char *first_name = NULL; - STACK_OF(GENERAL_NAME) *peer_san; int i; - int rc; - char *host = PQhost(conn); - - /* - * If told not to verify the peer name, don't do it. Return true - * indicating that the verification was successful. - */ - if (strcmp(conn->sslmode, "verify-full") != 0) - return true; - - /* Check that we have a hostname to compare with. */ - if (!(host && host[0] != '\0')) - { - printfPQExpBuffer(&conn->errorMessage, - libpq_gettext("host name must be specified for a verified SSL connection\n")); - return false; - } + int rc = 0; /* * First, get the Subject Alternative Names (SANs) from the certificate, @@ -600,24 +534,20 @@ verify_peer_name_matches_certificate(PGconn *conn) { char *alt_name; - names_examined++; - rc = verify_peer_name_matches_certificate_name(conn, - name->d.dNSName, - &alt_name); - if (rc == -1) - got_error = true; - if (rc == 1) - found_match = true; + (*names_examined)++; + rc = openssl_verify_peer_name_matches_certificate_name(conn, + name->d.dNSName, + &alt_name); if (alt_name) { - if (!first_name) - first_name = alt_name; + if (!*first_name) + *first_name = alt_name; else free(alt_name); } } - if (found_match || got_error) + if (rc != 0) break; } sk_GENERAL_NAME_free(peer_san); @@ -630,7 +560,7 @@ verify_peer_name_matches_certificate(PGconn *conn) * (Per RFC 2818 and RFC 6125, if the subjectAltName extension of type * dNSName is present, the CN must be ignored.) */ - if (names_examined == 0) + if (*names_examined == 0) { X509_NAME *subject_name; @@ -643,55 +573,17 @@ verify_peer_name_matches_certificate(PGconn *conn) NID_commonName, -1); if (cn_index >= 0) { - names_examined++; - rc = verify_peer_name_matches_certificate_name( - conn, - X509_NAME_ENTRY_get_data( - X509_NAME_get_entry(subject_name, cn_index)), - &first_name); - - if (rc == -1) - got_error = true; - else if (rc == 1) - found_match = true; + (*names_examined)++; + rc = openssl_verify_peer_name_matches_certificate_name( + conn, + X509_NAME_ENTRY_get_data( + X509_NAME_get_entry(subject_name, cn_index)), + first_name); } } } - if (!found_match && !got_error) - { - /* - * No match. Include the name from the server certificate in the error - * message, to aid debugging broken configurations. If there are - * multiple names, only print the first one to avoid an overly long - * error message. - */ - if (names_examined > 1) - { - printfPQExpBuffer(&conn->errorMessage, - libpq_ngettext("server certificate for \"%s\" (and %d other name) does not match host name \"%s\"\n", - "server certificate for \"%s\" (and %d other names) does not match host name \"%s\"\n", - names_examined - 1), - first_name, names_examined - 1, host); - } - else if (names_examined == 1) - { - printfPQExpBuffer(&conn->errorMessage, - libpq_gettext("server certificate for \"%s\" does not match host name \"%s\"\n"), - first_name, host); - } - else - { - printfPQExpBuffer(&conn->errorMessage, - libpq_gettext("could not get server's host name from server certificate\n")); - } - } - - /* clean up */ - if (first_name) - free(first_name); - - return found_match && !got_error; + return rc; } #if defined(ENABLE_THREAD_SAFETY) && defined(HAVE_CRYPTO_LOCK) @@ -741,11 +633,6 @@ pq_lockingcallback(int mode, int n, const char *file, int line) * If the caller has told us (through PQinitOpenSSL) that he's taking care * of libcrypto, we expect that callbacks are already set, and won't try to * override it. - * - * The conn parameter is only used to be able to pass back an error - * message - no connection-local setup is made here. - * - * Returns 0 if OK, -1 on failure (with a message in conn->errorMessage). */ int pgtls_init(PGconn *conn) @@ -893,7 +780,7 @@ initialize_SSL(PGconn *conn) struct stat buf; char homedir[MAXPGPATH]; char fnbuf[MAXPGPATH]; - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; bool have_homedir; bool have_cert; bool have_rootcert; @@ -1054,7 +941,7 @@ initialize_SSL(PGconn *conn) { printfPQExpBuffer(&conn->errorMessage, libpq_gettext("could not open certificate file \"%s\": %s\n"), - fnbuf, pqStrerror(errno, sebuf, sizeof(sebuf))); + fnbuf, strerror_r(errno, sebuf, sizeof(sebuf))); SSL_CTX_free(SSL_context); return -1; } @@ -1277,14 +1164,23 @@ initialize_SSL(PGconn *conn) SSL_set_verify(conn->ssl, SSL_VERIFY_PEER, verify_cb); /* - * If the OpenSSL version used supports it (from 1.0.0 on) and the user - * requested it, disable SSL compression. + * Set compression option if the OpenSSL version used supports it (from + * 1.0.0 on). */ #ifdef SSL_OP_NO_COMPRESSION if (conn->sslcompression && conn->sslcompression[0] == '0') - { SSL_set_options(conn->ssl, SSL_OP_NO_COMPRESSION); - } + + /* + * Mainline OpenSSL introduced SSL_clear_options() before + * SSL_OP_NO_COMPRESSION, so this following #ifdef should not be + * necessary, but some old NetBSD version have a locally modified libssl + * that has SSL_OP_NO_COMPRESSION but not SSL_clear_options(). + */ +#ifdef HAVE_SSL_CLEAR_OPTIONS + else + SSL_clear_options(conn->ssl, SSL_OP_NO_COMPRESSION); +#endif #endif return 0; @@ -1316,7 +1212,7 @@ open_client_SSL(PGconn *conn) case SSL_ERROR_SYSCALL: { - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; if (r == -1) printfPQExpBuffer(&conn->errorMessage, @@ -1370,7 +1266,7 @@ open_client_SSL(PGconn *conn) return PGRES_POLLING_FAILED; } - if (!verify_peer_name_matches_certificate(conn)) + if (!pq_verify_peer_name_matches_certificate(conn)) { pgtls_close(conn); return PGRES_POLLING_FAILED; @@ -1380,9 +1276,6 @@ open_client_SSL(PGconn *conn) return PGRES_POLLING_OK; } -/* - * Close SSL connection. - */ void pgtls_close(PGconn *conn) { @@ -1479,14 +1372,6 @@ SSLerrfree(char *buf) /* SSL information functions */ /* ------------------------------------------------------------ */ -int -PQsslInUse(PGconn *conn) -{ - if (!conn) - return 0; - return conn->ssl_in_use; -} - /* * Return pointer to OpenSSL object. */ @@ -1536,7 +1421,7 @@ PQsslAttribute(PGconn *conn, const char *attribute_name) if (strcmp(attribute_name, "key_bits") == 0) { - static char sslbits_str[10]; + static char sslbits_str[12]; int sslbits; SSL_get_cipher_bits(conn->ssl, &sslbits); @@ -1679,7 +1564,7 @@ my_BIO_s_socket(void) return my_bio_methods; } -/* This should exactly match openssl's SSL_set_fd except for using my BIO */ +/* This should exactly match OpenSSL's SSL_set_fd except for using my BIO */ static int my_SSL_set_fd(PGconn *conn, int fd) { diff --git a/src/interfaces/libpq/fe-secure.c b/src/interfaces/libpq/fe-secure.c index 7c2d0cb4e6..a06fc7dc82 100644 --- a/src/interfaces/libpq/fe-secure.c +++ b/src/interfaces/libpq/fe-secure.c @@ -6,7 +6,7 @@ * message integrity and endpoint authentication. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -129,6 +129,14 @@ struct sigpipe_info /* ------------------------------------------------------------ */ +int +PQsslInUse(PGconn *conn) +{ + if (!conn) + return 0; + return conn->ssl_in_use; +} + /* * Exported function to allow application to tell us it's already * initialized OpenSSL. @@ -225,7 +233,7 @@ pqsecure_raw_read(PGconn *conn, void *ptr, size_t len) { ssize_t n; int result_errno = 0; - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; n = recv(conn->sock, ptr, len, 0); @@ -303,7 +311,7 @@ pqsecure_raw_write(PGconn *conn, const void *ptr, size_t len) ssize_t n; int flags = 0; int result_errno = 0; - char sebuf[256]; + char sebuf[PG_STRERROR_R_BUFLEN]; DECLARE_SIGPIPE_INFO(spinfo); @@ -352,9 +360,10 @@ pqsecure_raw_write(PGconn *conn, const void *ptr, size_t len) case EPIPE: /* Set flag for EPIPE */ REMEMBER_EPIPE(spinfo, true); - /* FALL THRU */ #ifdef ECONNRESET + /* FALL THRU */ + case ECONNRESET: #endif printfPQExpBuffer(&conn->errorMessage, @@ -384,12 +393,6 @@ pqsecure_raw_write(PGconn *conn, const void *ptr, size_t len) /* Dummy versions of SSL info functions, when built without SSL support */ #ifndef USE_SSL -int -PQsslInUse(PGconn *conn) -{ - return 0; -} - void * PQgetssl(PGconn *conn) { @@ -465,10 +468,10 @@ pq_block_sigpipe(sigset_t *osigset, bool *sigpipe_pending) * As long as it doesn't queue multiple events, we're OK because the caller * can't tell the difference. * - * The caller should say got_epipe = FALSE if it is certain that it + * The caller should say got_epipe = false if it is certain that it * didn't get an EPIPE error; in that case we'll skip the clear operation * and things are definitely OK, queuing or no. If it got one or might have - * gotten one, pass got_epipe = TRUE. + * gotten one, pass got_epipe = true. * * We do not want this to change errno, since if it did that could lose * the error code from a preceding send(). We essentially assume that if diff --git a/src/interfaces/libpq/libpq-events.c b/src/interfaces/libpq/libpq-events.c index e533017a03..09f9c7f9bb 100644 --- a/src/interfaces/libpq/libpq-events.c +++ b/src/interfaces/libpq/libpq-events.c @@ -3,7 +3,7 @@ * libpq-events.c * functions for supporting the libpq "events" API * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -44,12 +44,12 @@ PQregisterEventProc(PGconn *conn, PGEventProc proc, PGEventRegister regevt; if (!proc || !conn || !name || !*name) - return FALSE; /* bad arguments */ + return false; /* bad arguments */ for (i = 0; i < conn->nEvents; i++) { if (conn->events[i].proc == proc) - return FALSE; /* already registered */ + return false; /* already registered */ } if (conn->nEvents >= conn->eventArraySize) @@ -64,7 +64,7 @@ PQregisterEventProc(PGconn *conn, PGEventProc proc, e = (PGEvent *) malloc(newSize * sizeof(PGEvent)); if (!e) - return FALSE; + return false; conn->eventArraySize = newSize; conn->events = e; @@ -73,10 +73,10 @@ PQregisterEventProc(PGconn *conn, PGEventProc proc, conn->events[conn->nEvents].proc = proc; conn->events[conn->nEvents].name = strdup(name); if (!conn->events[conn->nEvents].name) - return FALSE; + return false; conn->events[conn->nEvents].passThrough = passThrough; conn->events[conn->nEvents].data = NULL; - conn->events[conn->nEvents].resultInitialized = FALSE; + conn->events[conn->nEvents].resultInitialized = false; conn->nEvents++; regevt.conn = conn; @@ -84,10 +84,10 @@ PQregisterEventProc(PGconn *conn, PGEventProc proc, { conn->nEvents--; free(conn->events[conn->nEvents].name); - return FALSE; + return false; } - return TRUE; + return true; } /* @@ -100,18 +100,18 @@ PQsetInstanceData(PGconn *conn, PGEventProc proc, void *data) int i; if (!conn || !proc) - return FALSE; + return false; for (i = 0; i < conn->nEvents; i++) { if (conn->events[i].proc == proc) { conn->events[i].data = data; - return TRUE; + return true; } } - return FALSE; + return false; } /* @@ -144,18 +144,18 @@ PQresultSetInstanceData(PGresult *result, PGEventProc proc, void *data) int i; if (!result || !proc) - return FALSE; + return false; for (i = 0; i < result->nEvents; i++) { if (result->events[i].proc == proc) { result->events[i].data = data; - return TRUE; + return true; } } - return FALSE; + return false; } /* @@ -187,7 +187,7 @@ PQfireResultCreateEvents(PGconn *conn, PGresult *res) int i; if (!res) - return FALSE; + return false; for (i = 0; i < res->nEvents; i++) { @@ -199,11 +199,11 @@ PQfireResultCreateEvents(PGconn *conn, PGresult *res) evt.result = res; if (!res->events[i].proc(PGEVT_RESULTCREATE, &evt, res->events[i].passThrough)) - return FALSE; + return false; - res->events[i].resultInitialized = TRUE; + res->events[i].resultInitialized = true; } } - return TRUE; + return true; } diff --git a/src/interfaces/libpq/libpq-events.h b/src/interfaces/libpq/libpq-events.h index 20af1ffe6d..7d0726a839 100644 --- a/src/interfaces/libpq/libpq-events.h +++ b/src/interfaces/libpq/libpq-events.h @@ -5,7 +5,7 @@ * that invoke the libpq "events" API, but are not interesting to * ordinary users of libpq. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/interfaces/libpq/libpq-events.h diff --git a/src/interfaces/libpq/libpq-fe.h b/src/interfaces/libpq/libpq-fe.h index 1d915e7915..52bd5d2cd8 100644 --- a/src/interfaces/libpq/libpq-fe.h +++ b/src/interfaces/libpq/libpq-fe.h @@ -4,7 +4,7 @@ * This file contains definitions for structures and * externs for functions used by frontend postgres applications. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/interfaces/libpq/libpq-fe.h @@ -516,6 +516,7 @@ extern PGresult *PQmakeEmptyPGresult(PGconn *conn, ExecStatusType status); extern PGresult *PQcopyResult(const PGresult *src, int flags); extern int PQsetResultAttrs(PGresult *res, int numAttributes, PGresAttDesc *attDescs); extern void *PQresultAlloc(PGresult *res, size_t nBytes); +extern size_t PQresultMemorySize(const PGresult *res); extern int PQsetvalue(PGresult *res, int tup_num, int field_num, char *value, int len); /* Quoting strings before inclusion in queries. */ diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h index 42913604e3..975ab33d02 100644 --- a/src/interfaces/libpq/libpq-int.h +++ b/src/interfaces/libpq/libpq-int.h @@ -9,7 +9,7 @@ * more likely to break across PostgreSQL releases than code that uses * only the official API. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/interfaces/libpq/libpq-int.h @@ -208,6 +208,8 @@ struct pg_result PGresult_data *curBlock; /* most recently allocated block */ int curOffset; /* start offset of free space in block */ int spaceLeft; /* number of free bytes remaining in block */ + + size_t memorySize; /* total space allocated for this PGresult */ }; /* PGAsyncStatusType defines the state of the query-execution state machine */ @@ -290,6 +292,7 @@ typedef struct pgDataValue const char *value; /* data value, without zero-termination */ } PGdataValue; +/* Host address type enum for struct pg_conn_host */ typedef enum pg_conn_host_type { CHT_HOST_NAME, @@ -298,21 +301,19 @@ typedef enum pg_conn_host_type } pg_conn_host_type; /* - * pg_conn_host stores all information about one of possibly several hosts - * mentioned in the connection string. Derived by splitting the pghost - * on the comma character and then parsing each segment. + * pg_conn_host stores all information about each of possibly several hosts + * mentioned in the connection string. Most fields are derived by splitting + * the relevant connection parameter (e.g., pghost) at commas. */ typedef struct pg_conn_host { - pg_conn_host_type type; /* type of host */ + pg_conn_host_type type; /* type of host address */ char *host; /* host name or socket path */ - char *hostaddr; /* host address */ - char *port; /* port number for this host; if not NULL, - * overrides the PGConn's pgport */ + char *hostaddr; /* host numeric IP address */ + char *port; /* port number (always provided) */ char *password; /* password for this host, read from the - * password file. only set if the PGconn's - * pgpass field is NULL. */ - struct addrinfo *addrlist; /* list of possible backend addresses */ + * password file; NULL if not sought or not + * found in password file. */ } pg_conn_host; /* @@ -325,12 +326,13 @@ struct pg_conn char *pghost; /* the machine on which the server is running, * or a path to a UNIX-domain socket, or a * comma-separated list of machines and/or - * paths, optionally with port suffixes; if - * NULL, use DEFAULT_PGSOCKET_DIR */ + * paths; if NULL, use DEFAULT_PGSOCKET_DIR */ char *pghostaddr; /* the numeric IP address of the machine on - * which the server is running. Takes - * precedence over above. */ - char *pgport; /* the server's communication port number */ + * which the server is running, or a + * comma-separated list of same. Takes + * precedence over pghost. */ + char *pgport; /* the server's communication port number, or + * a comma-separated list of ports */ char *pgtty; /* tty on which the backend messages is * displayed (OBSOLETE, NOT USED) */ char *connect_timeout; /* connection timeout (numeric string) */ @@ -392,9 +394,9 @@ struct pg_conn PGnotify *notifyTail; /* newest unreported Notify msg */ /* Support for multiple hosts in connection string */ - int nconnhost; /* # of possible hosts */ - int whichhost; /* host we're currently considering */ - pg_conn_host *connhost; /* details about each possible host */ + int nconnhost; /* # of hosts named in conn string */ + int whichhost; /* host we're currently trying/connected to */ + pg_conn_host *connhost; /* details about each named host */ /* Connection data */ pgsocket sock; /* FD for socket, PGINVALID_SOCKET if @@ -405,12 +407,15 @@ struct pg_conn int sversion; /* server version, e.g. 70401 for 7.4.1 */ bool auth_req_received; /* true if any type of auth req received */ bool password_needed; /* true if server demanded a password */ - bool pgpassfile_used; /* true if password is from pgpassfile */ bool sigpipe_so; /* have we masked SIGPIPE via SO_NOSIGPIPE? */ bool sigpipe_flag; /* can we mask SIGPIPE via MSG_NOSIGNAL? */ /* Transient state needed while establishing connection */ - struct addrinfo *addr_cur; /* backend address currently being tried */ + bool try_next_addr; /* time to advance to next address/host? */ + bool try_next_host; /* time to advance to next connhost[]? */ + struct addrinfo *addrlist; /* list of addresses for current connhost */ + struct addrinfo *addr_cur; /* the one currently being tried */ + int addrlist_family; /* needed to know how to free addrlist */ PGSetenvStatusType setenv_state; /* for 2.0 protocol only */ const PQEnvironmentOption *next_eo; bool send_appname; /* okay to send application_name? */ @@ -453,11 +458,13 @@ struct pg_conn /* Assorted state for SASL, SSL, GSS, etc */ void *sasl_state; + /* SSL structures */ + bool ssl_in_use; + #ifdef USE_SSL bool allow_ssl_try; /* Allowed to try SSL negotiation */ bool wait_ssl_try; /* Delay SSL negotiation until after * attempting normal connection */ - bool ssl_in_use; #ifdef USE_OPENSSL SSL *ssl; /* SSL status, if have SSL connection */ X509 *peer; /* X509 cert of server */ @@ -658,17 +665,90 @@ extern void pq_reset_sigpipe(sigset_t *osigset, bool sigpipe_pending, bool got_epipe); #endif +/* === SSL === */ + /* - * The SSL implementation provides these functions (fe-secure-openssl.c) + * The SSL implementation provides these functions. + */ + +/* + * Implementation of PQinitSSL(). */ extern void pgtls_init_library(bool do_ssl, int do_crypto); + +/* + * Initialize SSL library. + * + * The conn parameter is only used to be able to pass back an error + * message - no connection-local setup is made here. + * + * Returns 0 if OK, -1 on failure (with a message in conn->errorMessage). + */ extern int pgtls_init(PGconn *conn); + +/* + * Begin or continue negotiating a secure session. + */ extern PostgresPollingStatusType pgtls_open_client(PGconn *conn); + +/* + * Close SSL connection. + */ extern void pgtls_close(PGconn *conn); + +/* + * Read data from a secure connection. + * + * On failure, this function is responsible for putting a suitable message + * into conn->errorMessage. The caller must still inspect errno, but only + * to determine whether to continue/retry after error. + */ extern ssize_t pgtls_read(PGconn *conn, void *ptr, size_t len); + +/* + * Is there unread data waiting in the SSL read buffer? + */ extern bool pgtls_read_pending(PGconn *conn); + +/* + * Write data to a secure connection. + * + * On failure, this function is responsible for putting a suitable message + * into conn->errorMessage. The caller must still inspect errno, but only + * to determine whether to continue/retry after error. + */ extern ssize_t pgtls_write(PGconn *conn, const void *ptr, size_t len); +/* + * Get the hash of the server certificate, for SCRAM channel binding type + * tls-server-end-point. + * + * NULL is sent back to the caller in the event of an error, with an + * error message for the caller to consume. + * + * This is not supported with old versions of OpenSSL that don't have + * the X509_get_signature_nid() function. + */ +#if defined(USE_OPENSSL) && defined(HAVE_X509_GET_SIGNATURE_NID) +#define HAVE_PGTLS_GET_PEER_CERTIFICATE_HASH +extern char *pgtls_get_peer_certificate_hash(PGconn *conn, size_t *len); +#endif + +/* + * Verify that the server certificate matches the host name we connected to. + * + * The certificate's Common Name and Subject Alternative Names are considered. + * + * Returns 1 if the name matches, and 0 if it does not. On error, returns + * -1, and sets the libpq error message. + * + */ +extern int pgtls_verify_peer_name_matches_certificate_guts(PGconn *conn, + int *names_examined, + char **first_name); + +/* === miscellaneous macros === */ + /* * this is so that we can check if a connection is non-blocking internally * without the overhead of a function call @@ -693,7 +773,7 @@ extern char *libpq_ngettext(const char *msgid, const char *msgid_plural, unsigne #define SOCK_ERRNO_SET(e) WSASetLastError(e) #else #define SOCK_ERRNO errno -#define SOCK_STRERROR pqStrerror +#define SOCK_STRERROR strerror_r #define SOCK_ERRNO_SET(e) (errno = (e)) #endif diff --git a/src/interfaces/libpq/libpq.rc.in b/src/interfaces/libpq/libpq.rc.in index 437c45d602..de2985c4d4 100644 --- a/src/interfaces/libpq/libpq.rc.in +++ b/src/interfaces/libpq/libpq.rc.in @@ -1,8 +1,8 @@ #include VS_VERSION_INFO VERSIONINFO - FILEVERSION 11,0,0,0 - PRODUCTVERSION 11,0,0,0 + FILEVERSION 12,0,0,0 + PRODUCTVERSION 12,0,0,0 FILEFLAGSMASK 0x3fL FILEFLAGS 0 FILEOS VOS__WINDOWS32 @@ -15,13 +15,13 @@ BEGIN BEGIN VALUE "CompanyName", "\0" VALUE "FileDescription", "PostgreSQL Access Library\0" - VALUE "FileVersion", "11.0\0" + VALUE "FileVersion", "12.0\0" VALUE "InternalName", "libpq\0" - VALUE "LegalCopyright", "Copyright (C) 2017\0" + VALUE "LegalCopyright", "Copyright (C) 2018\0" VALUE "LegalTrademarks", "\0" VALUE "OriginalFilename", "libpq.dll\0" VALUE "ProductName", "PostgreSQL\0" - VALUE "ProductVersion", "11.0\0" + VALUE "ProductVersion", "12.0\0" END END BLOCK "VarFileInfo" diff --git a/src/interfaces/libpq/nls.mk b/src/interfaces/libpq/nls.mk index 2c5659e262..4196870b49 100644 --- a/src/interfaces/libpq/nls.mk +++ b/src/interfaces/libpq/nls.mk @@ -1,6 +1,6 @@ # src/interfaces/libpq/nls.mk CATALOG_NAME = libpq AVAIL_LANGUAGES = cs de es fr he it ja ko pl pt_BR ru sv tr zh_CN zh_TW -GETTEXT_FILES = fe-auth.c fe-auth-scram.c fe-connect.c fe-exec.c fe-lobj.c fe-misc.c fe-protocol2.c fe-protocol3.c fe-secure.c fe-secure-openssl.c win32.c +GETTEXT_FILES = fe-auth.c fe-auth-scram.c fe-connect.c fe-exec.c fe-lobj.c fe-misc.c fe-protocol2.c fe-protocol3.c fe-secure.c fe-secure-common.c fe-secure-openssl.c win32.c GETTEXT_TRIGGERS = libpq_gettext pqInternalNotice:2 GETTEXT_FLAGS = libpq_gettext:1:pass-c-format pqInternalNotice:2:c-format diff --git a/src/interfaces/libpq/po/de.po b/src/interfaces/libpq/po/de.po index 738a88f0ee..3dda9a2bce 100644 --- a/src/interfaces/libpq/po/de.po +++ b/src/interfaces/libpq/po/de.po @@ -1,14 +1,14 @@ # German message translation file for libpq -# Peter Eisentraut , 2001 - 2017. +# Peter Eisentraut , 2001 - 2018. # # Use these quotes: »%s« # msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 10\n" +"Project-Id-Version: PostgreSQL 11\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-08-04 16:38+0000\n" -"PO-Revision-Date: 2017-08-04 17:31-0400\n" +"POT-Creation-Date: 2018-06-25 02:38+0000\n" +"PO-Revision-Date: 2018-06-25 09:27+0200\n" "Last-Translator: Peter Eisentraut \n" "Language-Team: German \n" "Language: de\n" @@ -16,73 +16,82 @@ msgstr "" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -#: fe-auth-scram.c:176 +#: fe-auth-scram.c:189 msgid "malformed SCRAM message (empty message)\n" msgstr "fehlerhafte SCRAM-Nachricht (leere Nachricht)\n" -#: fe-auth-scram.c:182 +#: fe-auth-scram.c:195 msgid "malformed SCRAM message (length mismatch)\n" msgstr "fehlerhafte SCRAM-Nachricht (Länge stimmt nicht überein)\n" -#: fe-auth-scram.c:231 -msgid "invalid server signature\n" -msgstr "ungültige Serversignatur\n" +#: fe-auth-scram.c:244 +msgid "incorrect server signature\n" +msgstr "falsche Serversignatur\n" -#: fe-auth-scram.c:240 +#: fe-auth-scram.c:253 msgid "invalid SCRAM exchange state\n" msgstr "ungültiger Zustand des SCRAM-Austauschs\n" -#: fe-auth-scram.c:263 +#: fe-auth-scram.c:276 #, c-format -msgid "malformed SCRAM message (%c expected)\n" -msgstr "" +msgid "malformed SCRAM message (attribute \"%c\" expected)\n" +msgstr "fehlerhafte SCRAM-Nachricht (Attribut »%c« erwartet)\n" -#: fe-auth-scram.c:272 +#: fe-auth-scram.c:285 #, c-format -msgid "malformed SCRAM message (expected = in attr '%c')\n" -msgstr "" +msgid "malformed SCRAM message (expected character \"=\" for attribute \"%c\")\n" +msgstr "fehlerhafte SCRAM-Nachricht (Zeichen »=« für Attribut »%c« erwartet)\n" -#: fe-auth-scram.c:311 -msgid "failed to generate nonce\n" +#: fe-auth-scram.c:326 +msgid "could not generate nonce\n" msgstr "konnte Nonce nicht erzeugen\n" -#: fe-auth-scram.c:319 fe-auth-scram.c:336 fe-auth-scram.c:346 -#: fe-auth-scram.c:400 fe-auth-scram.c:420 fe-auth-scram.c:445 -#: fe-auth-scram.c:459 fe-auth-scram.c:501 fe-auth.c:227 fe-auth.c:362 -#: fe-auth.c:432 fe-auth.c:467 fe-auth.c:609 fe-auth.c:768 fe-auth.c:1080 -#: fe-auth.c:1228 fe-connect.c:775 fe-connect.c:1203 fe-connect.c:1379 -#: fe-connect.c:1947 fe-connect.c:2476 fe-connect.c:4062 fe-connect.c:4314 -#: fe-connect.c:4433 fe-connect.c:4673 fe-connect.c:4753 fe-connect.c:4852 -#: fe-connect.c:5108 fe-connect.c:5137 fe-connect.c:5209 fe-connect.c:5233 -#: fe-connect.c:5251 fe-connect.c:5352 fe-connect.c:5361 fe-connect.c:5717 -#: fe-connect.c:5867 fe-exec.c:2651 fe-exec.c:3398 fe-exec.c:3563 -#: fe-lobj.c:896 fe-protocol2.c:1206 fe-protocol3.c:992 fe-protocol3.c:1678 -#: fe-secure-openssl.c:514 fe-secure-openssl.c:1138 +#: fe-auth-scram.c:334 fe-auth-scram.c:408 fe-auth-scram.c:554 +#: fe-auth-scram.c:574 fe-auth-scram.c:600 fe-auth-scram.c:614 +#: fe-auth-scram.c:656 fe-auth.c:227 fe-auth.c:362 fe-auth.c:432 fe-auth.c:467 +#: fe-auth.c:628 fe-auth.c:787 fe-auth.c:1099 fe-auth.c:1247 fe-connect.c:782 +#: fe-connect.c:1209 fe-connect.c:1385 fe-connect.c:1953 fe-connect.c:2482 +#: fe-connect.c:4070 fe-connect.c:4322 fe-connect.c:4441 fe-connect.c:4691 +#: fe-connect.c:4771 fe-connect.c:4870 fe-connect.c:5126 fe-connect.c:5155 +#: fe-connect.c:5227 fe-connect.c:5251 fe-connect.c:5269 fe-connect.c:5370 +#: fe-connect.c:5379 fe-connect.c:5735 fe-connect.c:5885 fe-exec.c:2702 +#: fe-exec.c:3449 fe-exec.c:3614 fe-lobj.c:895 fe-protocol2.c:1213 +#: fe-protocol3.c:999 fe-protocol3.c:1685 fe-secure-common.c:103 +#: fe-secure-openssl.c:458 fe-secure-openssl.c:1049 msgid "out of memory\n" msgstr "Speicher aufgebraucht\n" -#: fe-auth-scram.c:437 +#: fe-auth-scram.c:469 +msgid "invalid channel binding type\n" +msgstr "ungültiger Channel-Binding-Typ\n" + +#: fe-auth-scram.c:480 +#, c-format +msgid "empty channel binding data for channel binding type \"%s\"\n" +msgstr "leere Channel-Binding-Daten für Channel-Binding-Typ »%s«\n" + +#: fe-auth-scram.c:592 msgid "invalid SCRAM response (nonce mismatch)\n" msgstr "ungültige SCRAM-Antwort (Nonce stimmt nicht überein)\n" -#: fe-auth-scram.c:476 +#: fe-auth-scram.c:631 msgid "malformed SCRAM message (invalid iteration count)\n" -msgstr "" +msgstr "fehlerhafte SCRAM-Nachricht (ungültige Iterationszahl)\n" -#: fe-auth-scram.c:482 +#: fe-auth-scram.c:637 msgid "malformed SCRAM message (garbage at end of server-first-message)\n" -msgstr "" +msgstr "fehlerhafte SCRAM-Nachricht (Müll am Ende der »server-first-message«)\n" -#: fe-auth-scram.c:511 +#: fe-auth-scram.c:667 #, c-format -msgid "error received from server in SASL exchange: %s\n" -msgstr "Fehler vom Server empfangen im SASL-Austausch: %s\n" +msgid "error received from server in SCRAM exchange: %s\n" +msgstr "Fehler vom Server empfangen im SCRAM-Austausch: %s\n" -#: fe-auth-scram.c:526 +#: fe-auth-scram.c:683 msgid "malformed SCRAM message (garbage at end of server-final-message)\n" -msgstr "" +msgstr "fehlerhafte SCRAM-Nachricht (Müll am Ende der »server-final-message«)\n" -#: fe-auth-scram.c:534 +#: fe-auth-scram.c:691 msgid "malformed SCRAM message (invalid server signature)\n" msgstr "fehlerhafte SCRAM-Nachricht (ungültige Serversignatur)\n" @@ -124,115 +133,111 @@ msgstr "doppelte SSPI-Authentifizierungsanfrage\n" msgid "could not acquire SSPI credentials" msgstr "konnte SSPI-Credentials nicht erhalten" -#: fe-auth.c:500 +#: fe-auth.c:501 msgid "duplicate SASL authentication request\n" msgstr "doppelte SASL-Authentifizierungsanfrage\n" -#: fe-auth.c:560 +#: fe-auth.c:546 msgid "none of the server's SASL authentication mechanisms are supported\n" msgstr "keine der SASL-Authentifizierungsmechanismen des Servers werden unterstützt\n" -#: fe-auth.c:633 +#: fe-auth.c:652 #, c-format msgid "out of memory allocating SASL buffer (%d)\n" msgstr "Speicher aufgebraucht beim Anlegen des SASL-Puffers (%d)\n" -#: fe-auth.c:658 +#: fe-auth.c:677 msgid "AuthenticationSASLFinal received from server, but SASL authentication was not completed\n" msgstr "AuthenticationSASLFinal vom Server empfangen, aber SASL-Authentifizierung war noch nicht abgeschlossen\n" -#: fe-auth.c:735 +#: fe-auth.c:754 msgid "SCM_CRED authentication method not supported\n" msgstr "SCM_CRED-Authentifizierungsmethode nicht unterstützt\n" -#: fe-auth.c:826 +#: fe-auth.c:845 msgid "Kerberos 4 authentication not supported\n" msgstr "Authentifizierung mit Kerberos 4 nicht unterstützt\n" -#: fe-auth.c:831 +#: fe-auth.c:850 msgid "Kerberos 5 authentication not supported\n" msgstr "Authentifizierung mit Kerberos 5 nicht unterstützt\n" -#: fe-auth.c:902 +#: fe-auth.c:921 msgid "GSSAPI authentication not supported\n" msgstr "Authentifizierung mit GSSAPI nicht unterstützt\n" -#: fe-auth.c:934 +#: fe-auth.c:953 msgid "SSPI authentication not supported\n" msgstr "Authentifizierung mit SSPI nicht unterstützt\n" -#: fe-auth.c:942 +#: fe-auth.c:961 msgid "Crypt authentication not supported\n" msgstr "Authentifizierung mit Crypt nicht unterstützt\n" -#: fe-auth.c:1008 +#: fe-auth.c:1027 #, c-format msgid "authentication method %u not supported\n" msgstr "Authentifizierungsmethode %u nicht unterstützt\n" -#: fe-auth.c:1055 +#: fe-auth.c:1074 #, c-format msgid "user name lookup failure: error code %lu\n" msgstr "Fehler beim Nachschlagen des Benutzernamens: Fehlercode %lu\n" -#: fe-auth.c:1065 fe-connect.c:2403 +#: fe-auth.c:1084 fe-connect.c:2409 #, c-format msgid "could not look up local user ID %d: %s\n" msgstr "konnte lokale Benutzer-ID %d nicht nachschlagen: %s\n" -#: fe-auth.c:1070 fe-connect.c:2408 +#: fe-auth.c:1089 fe-connect.c:2414 #, c-format msgid "local user with ID %d does not exist\n" msgstr "lokaler Benutzer mit ID %d existiert nicht\n" -#: fe-auth.c:1172 +#: fe-auth.c:1191 msgid "unexpected shape of result set returned for SHOW\n" msgstr "unerwartete Form der Ergebnismenge von SHOW\n" -#: fe-auth.c:1181 +#: fe-auth.c:1200 msgid "password_encryption value too long\n" msgstr "Wert von password_encryption ist zu lang\n" -#: fe-auth.c:1221 +#: fe-auth.c:1240 #, c-format msgid "unrecognized password encryption algorithm \"%s\"\n" msgstr "unbekannter Passwortverschlüsselungsalgorithmus »%s«\n" -#: fe-connect.c:968 +#: fe-connect.c:975 #, c-format -msgid "could not match %d host names to %d hostaddrs\n" +msgid "could not match %d host names to %d hostaddr values\n" msgstr "fehlerhafte Angabe: %d Hostnamen und %d hostaddr-Angaben\n" -#: fe-connect.c:1025 +#: fe-connect.c:1032 #, c-format msgid "could not match %d port numbers to %d hosts\n" msgstr "fehlerhafte Angabe: %d Portnummern und %d Hosts\n" -#: fe-connect.c:1077 -msgid "could not get home directory to locate password file\n" -msgstr "konnte Home-Verzeichnis nicht ermitteln, um Passwortdatei zu finden\n" - -#: fe-connect.c:1129 +#: fe-connect.c:1135 #, c-format msgid "invalid sslmode value: \"%s\"\n" msgstr "ungültiger sslmode-Wert: »%s«\n" -#: fe-connect.c:1150 +#: fe-connect.c:1156 #, c-format msgid "sslmode value \"%s\" invalid when SSL support is not compiled in\n" msgstr "sslmode-Wert »%s« ist ungültig, wenn SSL-Unterstützung nicht einkompiliert worden ist\n" -#: fe-connect.c:1185 +#: fe-connect.c:1191 #, c-format msgid "invalid target_session_attrs value: \"%s\"\n" msgstr "ungültiger target_session_attrs-Wert: »%s«\n" -#: fe-connect.c:1403 +#: fe-connect.c:1409 #, c-format msgid "could not set socket to TCP no delay mode: %s\n" msgstr "konnte Socket nicht auf TCP »No Delay«-Modus umstellen: %s\n" -#: fe-connect.c:1433 +#: fe-connect.c:1439 #, c-format msgid "" "could not connect to server: %s\n" @@ -243,7 +248,7 @@ msgstr "" "\tLäuft der Server lokal und akzeptiert er Verbindungen\n" "\tauf dem Unix-Domain-Socket »%s«?\n" -#: fe-connect.c:1491 +#: fe-connect.c:1497 #, c-format msgid "" "could not connect to server: %s\n" @@ -254,7 +259,7 @@ msgstr "" "\tLäuft der Server auf dem Host »%s« (%s) und akzeptiert er\n" "\tTCP/IP-Verbindungen auf Port %s?\n" -#: fe-connect.c:1500 +#: fe-connect.c:1506 #, c-format msgid "" "could not connect to server: %s\n" @@ -265,492 +270,507 @@ msgstr "" "\tLäuft der Server auf dem Host »%s« und akzeptiert er\n" "\tTCP/IP-Verbindungen auf Port %s?\n" -#: fe-connect.c:1551 fe-connect.c:1583 fe-connect.c:1616 fe-connect.c:2175 +#: fe-connect.c:1557 fe-connect.c:1589 fe-connect.c:1622 fe-connect.c:2181 #, c-format msgid "setsockopt(%s) failed: %s\n" msgstr "setsockopt(%s) fehlgeschlagen: %s\n" -#: fe-connect.c:1665 +#: fe-connect.c:1671 #, c-format msgid "WSAIoctl(SIO_KEEPALIVE_VALS) failed: %ui\n" msgstr "WSAIoctl(SIO_KEEPALIVE_VALS) fehlgeschlagen: %ui\n" -#: fe-connect.c:1722 +#: fe-connect.c:1728 #, c-format msgid "invalid port number: \"%s\"\n" msgstr "ungültige Portnummer: »%s«\n" -#: fe-connect.c:1738 +#: fe-connect.c:1744 #, c-format msgid "could not translate host name \"%s\" to address: %s\n" msgstr "konnte Hostnamen »%s« nicht in Adresse übersetzen: %s\n" -#: fe-connect.c:1747 +#: fe-connect.c:1753 #, c-format msgid "could not parse network address \"%s\": %s\n" msgstr "konnte Netzwerkadresse »%s« nicht interpretieren: %s\n" -#: fe-connect.c:1758 +#: fe-connect.c:1764 #, c-format msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n" msgstr "Unix-Domain-Socket-Pfad »%s« ist zu lang (maximal %d Bytes)\n" -#: fe-connect.c:1772 +#: fe-connect.c:1778 #, c-format msgid "could not translate Unix-domain socket path \"%s\" to address: %s\n" msgstr "konnte Unix-Domain-Socket-Pfad »%s« nicht in Adresse übersetzen: %s\n" -#: fe-connect.c:2053 +#: fe-connect.c:2059 msgid "invalid connection state, probably indicative of memory corruption\n" msgstr "ungültiger Verbindungszustand, möglicherweise ein Speicherproblem\n" -#: fe-connect.c:2110 +#: fe-connect.c:2116 #, c-format msgid "could not create socket: %s\n" msgstr "konnte Socket nicht erzeugen: %s\n" -#: fe-connect.c:2132 +#: fe-connect.c:2138 #, c-format msgid "could not set socket to nonblocking mode: %s\n" msgstr "konnte Socket nicht auf nicht-blockierenden Modus umstellen: %s\n" -#: fe-connect.c:2143 +#: fe-connect.c:2149 #, c-format msgid "could not set socket to close-on-exec mode: %s\n" msgstr "konnte Socket nicht auf »Close on exec«-Modus umstellen: %s\n" -#: fe-connect.c:2162 +#: fe-connect.c:2168 msgid "keepalives parameter must be an integer\n" msgstr "Parameter »keepalives« muss eine ganze Zahl sein\n" -#: fe-connect.c:2313 +#: fe-connect.c:2319 #, c-format msgid "could not get socket error status: %s\n" msgstr "konnte Socket-Fehlerstatus nicht ermitteln: %s\n" -#: fe-connect.c:2348 +#: fe-connect.c:2354 #, c-format msgid "could not get client address from socket: %s\n" msgstr "konnte Client-Adresse vom Socket nicht ermitteln: %s\n" -#: fe-connect.c:2390 +#: fe-connect.c:2396 msgid "requirepeer parameter is not supported on this platform\n" msgstr "Parameter »requirepeer« wird auf dieser Plattform nicht unterstützt\n" -#: fe-connect.c:2393 +#: fe-connect.c:2399 #, c-format msgid "could not get peer credentials: %s\n" msgstr "konnte Credentials von Gegenstelle nicht ermitteln: %s\n" -#: fe-connect.c:2416 +#: fe-connect.c:2422 #, c-format msgid "requirepeer specifies \"%s\", but actual peer user name is \"%s\"\n" msgstr "requirepeer gibt »%s« an, aber tatsächlicher Benutzername der Gegenstelle ist »%s«\n" -#: fe-connect.c:2450 +#: fe-connect.c:2456 #, c-format msgid "could not send SSL negotiation packet: %s\n" msgstr "konnte Paket zur SSL-Verhandlung nicht senden: %s\n" -#: fe-connect.c:2489 +#: fe-connect.c:2495 #, c-format msgid "could not send startup packet: %s\n" msgstr "konnte Startpaket nicht senden: %s\n" -#: fe-connect.c:2559 +#: fe-connect.c:2565 msgid "server does not support SSL, but SSL was required\n" msgstr "Server unterstützt kein SSL, aber SSL wurde verlangt\n" -#: fe-connect.c:2585 +#: fe-connect.c:2591 #, c-format msgid "received invalid response to SSL negotiation: %c\n" msgstr "ungültige Antwort auf SSL-Verhandlungspaket empfangen: %c\n" -#: fe-connect.c:2661 fe-connect.c:2694 +#: fe-connect.c:2667 fe-connect.c:2700 #, c-format msgid "expected authentication request from server, but received %c\n" msgstr "Authentifizierungsanfrage wurde vom Server erwartet, aber %c wurde empfangen\n" -#: fe-connect.c:2923 +#: fe-connect.c:2929 msgid "unexpected message from server during startup\n" msgstr "unerwartete Nachricht vom Server beim Start\n" -#: fe-connect.c:3141 +#: fe-connect.c:3147 #, c-format msgid "could not make a writable connection to server \"%s:%s\"\n" msgstr "konnte keine schreibbare Verbindung zum Server »%s:%s« aufbauen\n" -#: fe-connect.c:3190 +#: fe-connect.c:3196 #, c-format msgid "test \"SHOW transaction_read_only\" failed on server \"%s:%s\"\n" msgstr "Test »SHOW transaction_read_only« fehlgeschlagen auf Server »%s:%s«\n" -#: fe-connect.c:3211 +#: fe-connect.c:3217 #, c-format msgid "invalid connection state %d, probably indicative of memory corruption\n" msgstr "ungültiger Verbindungszustand %d, möglicherweise ein Speicherproblem\n" -#: fe-connect.c:3668 fe-connect.c:3728 +#: fe-connect.c:3676 fe-connect.c:3736 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_CONNRESET event\n" msgstr "PGEventProc »%s« während PGEVT_CONNRESET-Ereignis fehlgeschlagen\n" -#: fe-connect.c:4075 +#: fe-connect.c:4083 #, c-format msgid "invalid LDAP URL \"%s\": scheme must be ldap://\n" msgstr "ungültige LDAP-URL »%s«: Schema muss ldap:// sein\n" -#: fe-connect.c:4090 +#: fe-connect.c:4098 #, c-format msgid "invalid LDAP URL \"%s\": missing distinguished name\n" msgstr "ungültige LDAP-URL »%s«: Distinguished Name fehlt\n" -#: fe-connect.c:4101 fe-connect.c:4154 +#: fe-connect.c:4109 fe-connect.c:4162 #, c-format msgid "invalid LDAP URL \"%s\": must have exactly one attribute\n" msgstr "ungültige LDAP-URL »%s«: muss genau ein Attribut haben\n" -#: fe-connect.c:4111 fe-connect.c:4168 +#: fe-connect.c:4119 fe-connect.c:4176 #, c-format msgid "invalid LDAP URL \"%s\": must have search scope (base/one/sub)\n" msgstr "ungültige LDAP-URL »%s«: Suchbereich fehlt (base/one/sub)\n" -#: fe-connect.c:4122 +#: fe-connect.c:4130 #, c-format msgid "invalid LDAP URL \"%s\": no filter\n" msgstr "ungültige LDAP-URL »%s«: kein Filter\n" -#: fe-connect.c:4143 +#: fe-connect.c:4151 #, c-format msgid "invalid LDAP URL \"%s\": invalid port number\n" msgstr "ungültige LDAP-URL »%s«: ungültige Portnummer\n" -#: fe-connect.c:4177 +#: fe-connect.c:4185 msgid "could not create LDAP structure\n" msgstr "konnte LDAP-Struktur nicht erzeugen\n" -#: fe-connect.c:4253 +#: fe-connect.c:4261 #, c-format msgid "lookup on LDAP server failed: %s\n" msgstr "Suche auf LDAP-Server fehlgeschlagen: %s\n" -#: fe-connect.c:4264 +#: fe-connect.c:4272 msgid "more than one entry found on LDAP lookup\n" msgstr "LDAP-Suche ergab mehr als einen Eintrag\n" -#: fe-connect.c:4265 fe-connect.c:4277 +#: fe-connect.c:4273 fe-connect.c:4285 msgid "no entry found on LDAP lookup\n" msgstr "kein Eintrag gefunden bei LDAP-Suche\n" -#: fe-connect.c:4288 fe-connect.c:4301 +#: fe-connect.c:4296 fe-connect.c:4309 msgid "attribute has no values on LDAP lookup\n" msgstr "Attribut hat keine Werte bei LDAP-Suche\n" -#: fe-connect.c:4353 fe-connect.c:4372 fe-connect.c:4891 +#: fe-connect.c:4361 fe-connect.c:4380 fe-connect.c:4909 #, c-format msgid "missing \"=\" after \"%s\" in connection info string\n" msgstr "fehlendes »=« nach »%s« in der Zeichenkette der Verbindungsdaten\n" -#: fe-connect.c:4445 fe-connect.c:5076 fe-connect.c:5850 +#: fe-connect.c:4453 fe-connect.c:5094 fe-connect.c:5868 #, c-format msgid "invalid connection option \"%s\"\n" msgstr "ungültige Verbindungsoption »%s«\n" -#: fe-connect.c:4461 fe-connect.c:4940 +#: fe-connect.c:4469 fe-connect.c:4958 msgid "unterminated quoted string in connection info string\n" msgstr "fehlendes schließendes Anführungszeichen (\") in der Zeichenkette der Verbindungsdaten\n" -#: fe-connect.c:4501 -msgid "could not get home directory to locate service definition file" -msgstr "konnte Home-Verzeichnis nicht ermitteln, um Servicedefinitionsdatei zu finden" - -#: fe-connect.c:4534 +#: fe-connect.c:4552 #, c-format msgid "definition of service \"%s\" not found\n" msgstr "Definition von Service »%s« nicht gefunden\n" -#: fe-connect.c:4557 +#: fe-connect.c:4575 #, c-format msgid "service file \"%s\" not found\n" msgstr "Servicedatei »%s« nicht gefunden\n" -#: fe-connect.c:4570 +#: fe-connect.c:4588 #, c-format msgid "line %d too long in service file \"%s\"\n" msgstr "Zeile %d zu lang in Servicedatei »%s«\n" -#: fe-connect.c:4641 fe-connect.c:4685 +#: fe-connect.c:4659 fe-connect.c:4703 #, c-format msgid "syntax error in service file \"%s\", line %d\n" msgstr "Syntaxfehler in Servicedatei »%s«, Zeile %d\n" -#: fe-connect.c:4652 +#: fe-connect.c:4670 #, c-format msgid "nested service specifications not supported in service file \"%s\", line %d\n" msgstr "geschachtelte »service«-Definitionen werden nicht unterstützt in Servicedatei »%s«, Zeile %d\n" -#: fe-connect.c:5372 +#: fe-connect.c:5390 #, c-format msgid "invalid URI propagated to internal parser routine: \"%s\"\n" msgstr "ungültige URI an interne Parserroutine weitergeleitet: »%s«\n" -#: fe-connect.c:5449 +#: fe-connect.c:5467 #, c-format msgid "end of string reached when looking for matching \"]\" in IPv6 host address in URI: \"%s\"\n" msgstr "Ende der Eingabezeichenkette gefunden beim Suchen nach passendem »]« in IPv6-Hostadresse in URI: »%s«\n" -#: fe-connect.c:5456 +#: fe-connect.c:5474 #, c-format msgid "IPv6 host address may not be empty in URI: \"%s\"\n" msgstr "IPv6-Hostadresse darf nicht leer sein in URI: »%s«\n" -#: fe-connect.c:5471 +#: fe-connect.c:5489 #, c-format msgid "unexpected character \"%c\" at position %d in URI (expected \":\" or \"/\"): \"%s\"\n" msgstr "unerwartetes Zeichen »%c« an Position %d in URI (»:« oder »/« erwartet): »%s«\n" -#: fe-connect.c:5600 +#: fe-connect.c:5618 #, c-format msgid "extra key/value separator \"=\" in URI query parameter: \"%s\"\n" msgstr "zusätzliches Schlüssel/Wert-Trennzeichen »=« in URI-Query-Parameter: »%s«\n" -#: fe-connect.c:5620 +#: fe-connect.c:5638 #, c-format msgid "missing key/value separator \"=\" in URI query parameter: \"%s\"\n" msgstr "fehlendes Schlüssel/Wert-Trennzeichen »=« in URI-Query-Parameter: »%s«\n" -#: fe-connect.c:5671 +#: fe-connect.c:5689 #, c-format msgid "invalid URI query parameter: \"%s\"\n" msgstr "ungültiger URI-Query-Parameter: »%s«\n" -#: fe-connect.c:5745 +#: fe-connect.c:5763 #, c-format msgid "invalid percent-encoded token: \"%s\"\n" msgstr "ungültiges Prozent-kodiertes Token: »%s«\n" -#: fe-connect.c:5755 +#: fe-connect.c:5773 #, c-format msgid "forbidden value %%00 in percent-encoded value: \"%s\"\n" msgstr "verbotener Wert %%00 in Prozent-kodiertem Wert: »%s«\n" -#: fe-connect.c:6100 +#: fe-connect.c:6119 msgid "connection pointer is NULL\n" msgstr "Verbindung ist ein NULL-Zeiger\n" -#: fe-connect.c:6398 +#: fe-connect.c:6417 #, c-format msgid "WARNING: password file \"%s\" is not a plain file\n" msgstr "WARNUNG: Passwortdatei »%s« ist keine normale Datei\n" -#: fe-connect.c:6407 +#: fe-connect.c:6426 #, c-format msgid "WARNING: password file \"%s\" has group or world access; permissions should be u=rw (0600) or less\n" msgstr "WARNUNG: Passwortdatei »%s« erlaubt Lesezugriff für Gruppe oder Andere; Rechte sollten u=rw (0600) oder weniger sein\n" -#: fe-connect.c:6499 +#: fe-connect.c:6518 #, c-format msgid "password retrieved from file \"%s\"\n" msgstr "Passwort wurde aus Datei »%s« gelesen\n" -#: fe-exec.c:826 +#: fe-exec.c:437 fe-exec.c:2776 +#, c-format +msgid "row number %d is out of range 0..%d" +msgstr "Zeilennummer %d ist außerhalb des zulässigen Bereichs 0..%d" + +#: fe-exec.c:498 fe-protocol2.c:502 fe-protocol2.c:537 fe-protocol2.c:1056 +#: fe-protocol3.c:208 fe-protocol3.c:235 fe-protocol3.c:252 fe-protocol3.c:332 +#: fe-protocol3.c:727 fe-protocol3.c:958 +msgid "out of memory" +msgstr "Speicher aufgebraucht" + +#: fe-exec.c:499 fe-protocol2.c:1402 fe-protocol3.c:1893 +#, c-format +msgid "%s" +msgstr "%s" + +#: fe-exec.c:847 msgid "NOTICE" msgstr "HINWEIS" -#: fe-exec.c:1141 fe-exec.c:1199 fe-exec.c:1245 +#: fe-exec.c:905 +msgid "PGresult cannot support more than INT_MAX tuples" +msgstr "PGresult kann nicht mehr als INT_MAX Tupel enthalten" + +#: fe-exec.c:917 +msgid "size_t overflow" +msgstr "Überlauf von size_t" + +#: fe-exec.c:1192 fe-exec.c:1250 fe-exec.c:1296 msgid "command string is a null pointer\n" msgstr "Befehlszeichenkette ist ein NULL-Zeiger\n" -#: fe-exec.c:1205 fe-exec.c:1251 fe-exec.c:1346 +#: fe-exec.c:1256 fe-exec.c:1302 fe-exec.c:1397 msgid "number of parameters must be between 0 and 65535\n" msgstr "Anzahl der Parameter muss zwischen 0 und 65535 sein\n" -#: fe-exec.c:1239 fe-exec.c:1340 +#: fe-exec.c:1290 fe-exec.c:1391 msgid "statement name is a null pointer\n" msgstr "Anweisungsname ist ein NULL-Zeiger\n" -#: fe-exec.c:1259 fe-exec.c:1422 fe-exec.c:2140 fe-exec.c:2339 +#: fe-exec.c:1310 fe-exec.c:1473 fe-exec.c:2191 fe-exec.c:2390 msgid "function requires at least protocol version 3.0\n" msgstr "Funktion erfordert mindestens Protokollversion 3.0\n" -#: fe-exec.c:1377 +#: fe-exec.c:1428 msgid "no connection to the server\n" msgstr "keine Verbindung mit dem Server\n" -#: fe-exec.c:1384 +#: fe-exec.c:1435 msgid "another command is already in progress\n" msgstr "ein anderer Befehl ist bereits in Ausführung\n" -#: fe-exec.c:1498 +#: fe-exec.c:1549 msgid "length must be given for binary parameter\n" msgstr "für binäre Parameter muss eine Länge angegeben werden\n" -#: fe-exec.c:1770 +#: fe-exec.c:1821 #, c-format msgid "unexpected asyncStatus: %d\n" msgstr "unerwarteter asyncStatus: %d\n" -#: fe-exec.c:1790 +#: fe-exec.c:1841 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_RESULTCREATE event\n" msgstr "PGEventProc »%s« während PGEVT_RESULTCREATE-Ereignis fehlgeschlagen\n" -#: fe-exec.c:1950 +#: fe-exec.c:2001 msgid "COPY terminated by new PQexec" msgstr "COPY von neuem PQexec beendet" -#: fe-exec.c:1958 +#: fe-exec.c:2009 msgid "COPY IN state must be terminated first\n" msgstr "COPY-IN-Zustand muss erst beendet werden\n" -#: fe-exec.c:1978 +#: fe-exec.c:2029 msgid "COPY OUT state must be terminated first\n" msgstr "COPY-OUT-Zustand muss erst beendet werden\n" -#: fe-exec.c:1986 +#: fe-exec.c:2037 msgid "PQexec not allowed during COPY BOTH\n" msgstr "PQexec ist während COPY BOTH nicht erlaubt\n" -#: fe-exec.c:2229 fe-exec.c:2296 fe-exec.c:2386 fe-protocol2.c:1352 -#: fe-protocol3.c:1817 +#: fe-exec.c:2280 fe-exec.c:2347 fe-exec.c:2437 fe-protocol2.c:1359 +#: fe-protocol3.c:1824 msgid "no COPY in progress\n" msgstr "keine COPY in Ausführung\n" -#: fe-exec.c:2576 +#: fe-exec.c:2627 msgid "connection in wrong state\n" msgstr "Verbindung im falschen Zustand\n" -#: fe-exec.c:2607 +#: fe-exec.c:2658 msgid "invalid ExecStatusType code" msgstr "ungültiger ExecStatusType-Kode" -#: fe-exec.c:2634 +#: fe-exec.c:2685 msgid "PGresult is not an error result\n" msgstr "PGresult ist kein Fehlerresultat\n" -#: fe-exec.c:2709 fe-exec.c:2732 +#: fe-exec.c:2760 fe-exec.c:2783 #, c-format msgid "column number %d is out of range 0..%d" msgstr "Spaltennummer %d ist außerhalb des zulässigen Bereichs 0..%d" -#: fe-exec.c:2725 -#, c-format -msgid "row number %d is out of range 0..%d" -msgstr "Zeilennummer %d ist außerhalb des zulässigen Bereichs 0..%d" - -#: fe-exec.c:2747 +#: fe-exec.c:2798 #, c-format msgid "parameter number %d is out of range 0..%d" msgstr "Parameternummer %d ist außerhalb des zulässigen Bereichs 0..%d" -#: fe-exec.c:3057 +#: fe-exec.c:3108 #, c-format msgid "could not interpret result from server: %s" msgstr "konnte Ergebnis vom Server nicht interpretieren: %s" -#: fe-exec.c:3296 fe-exec.c:3380 +#: fe-exec.c:3347 fe-exec.c:3431 msgid "incomplete multibyte character\n" msgstr "unvollständiges Mehrbyte-Zeichen\n" -#: fe-lobj.c:155 +#: fe-lobj.c:154 msgid "cannot determine OID of function lo_truncate\n" msgstr "kann OID der Funktion lo_truncate nicht ermitteln\n" -#: fe-lobj.c:171 +#: fe-lobj.c:170 msgid "argument of lo_truncate exceeds integer range\n" msgstr "Argument von lo_truncate überschreitet Bereich für ganze Zahlen\n" -#: fe-lobj.c:222 +#: fe-lobj.c:221 msgid "cannot determine OID of function lo_truncate64\n" msgstr "kann OID der Funktion lo_truncate64 nicht ermitteln\n" -#: fe-lobj.c:280 +#: fe-lobj.c:279 msgid "argument of lo_read exceeds integer range\n" msgstr "Argument von lo_read überschreitet Bereich für ganze Zahlen\n" -#: fe-lobj.c:335 +#: fe-lobj.c:334 msgid "argument of lo_write exceeds integer range\n" msgstr "Argument von lo_write überschreitet Bereich für ganze Zahlen\n" -#: fe-lobj.c:426 +#: fe-lobj.c:425 msgid "cannot determine OID of function lo_lseek64\n" msgstr "kann OID der Funktion lo_lseek64 nicht ermitteln\n" -#: fe-lobj.c:522 +#: fe-lobj.c:521 msgid "cannot determine OID of function lo_create\n" msgstr "kann OID der Funktion lo_create nicht ermitteln\n" -#: fe-lobj.c:601 +#: fe-lobj.c:600 msgid "cannot determine OID of function lo_tell64\n" msgstr "kann OID der Funktion lo_tell64 nicht ermitteln\n" -#: fe-lobj.c:707 fe-lobj.c:816 +#: fe-lobj.c:706 fe-lobj.c:815 #, c-format msgid "could not open file \"%s\": %s\n" msgstr "konnte Datei »%s« nicht öffnen: %s\n" -#: fe-lobj.c:762 +#: fe-lobj.c:761 #, c-format msgid "could not read from file \"%s\": %s\n" -msgstr "konnte nicht aus Datei »%s« nicht lesen: %s\n" +msgstr "konnte nicht aus Datei »%s« lesen: %s\n" -#: fe-lobj.c:836 fe-lobj.c:860 +#: fe-lobj.c:835 fe-lobj.c:859 #, c-format msgid "could not write to file \"%s\": %s\n" msgstr "konnte nicht in Datei »%s« schreiben: %s\n" -#: fe-lobj.c:947 +#: fe-lobj.c:946 msgid "query to initialize large object functions did not return data\n" msgstr "Abfrage zur Initialisierung der Large-Object-Funktionen ergab keine Daten\n" -#: fe-lobj.c:996 +#: fe-lobj.c:995 msgid "cannot determine OID of function lo_open\n" msgstr "kann OID der Funktion lo_open nicht ermitteln\n" -#: fe-lobj.c:1003 +#: fe-lobj.c:1002 msgid "cannot determine OID of function lo_close\n" msgstr "kann OID der Funktion lo_close nicht ermitteln\n" -#: fe-lobj.c:1010 +#: fe-lobj.c:1009 msgid "cannot determine OID of function lo_creat\n" msgstr "kann OID der Funktion lo_creat nicht ermitteln\n" -#: fe-lobj.c:1017 +#: fe-lobj.c:1016 msgid "cannot determine OID of function lo_unlink\n" msgstr "kann OID der Funktion lo_unlink nicht ermitteln\n" -#: fe-lobj.c:1024 +#: fe-lobj.c:1023 msgid "cannot determine OID of function lo_lseek\n" msgstr "kann OID der Funktion lo_lseek nicht ermitteln\n" -#: fe-lobj.c:1031 +#: fe-lobj.c:1030 msgid "cannot determine OID of function lo_tell\n" msgstr "kann OID der Funktion lo_tell nicht ermitteln\n" -#: fe-lobj.c:1038 +#: fe-lobj.c:1037 msgid "cannot determine OID of function loread\n" msgstr "kann OID der Funktion loread nicht ermitteln\n" -#: fe-lobj.c:1045 +#: fe-lobj.c:1044 msgid "cannot determine OID of function lowrite\n" msgstr "kann OID der Funktion lowrite nicht ermitteln\n" -#: fe-misc.c:292 +#: fe-misc.c:290 #, c-format msgid "integer of size %lu not supported by pqGetInt" msgstr "Integer der Größe %lu wird von pqGetInt nicht unterstützt" -#: fe-misc.c:328 +#: fe-misc.c:326 #, c-format msgid "integer of size %lu not supported by pqPutInt" msgstr "Integer der Größe %lu wird von pqPutInt nicht unterstützt" -#: fe-misc.c:639 fe-misc.c:840 +#: fe-misc.c:637 fe-misc.c:838 msgid "connection not open\n" msgstr "Verbindung nicht offen\n" -#: fe-misc.c:809 fe-secure-openssl.c:229 fe-secure-openssl.c:338 -#: fe-secure.c:253 fe-secure.c:362 +#: fe-misc.c:807 fe-secure-openssl.c:206 fe-secure-openssl.c:314 +#: fe-secure.c:261 fe-secure.c:371 msgid "" "server closed the connection unexpectedly\n" "\tThis probably means the server terminated abnormally\n" @@ -760,255 +780,261 @@ msgstr "" "\tDas heißt wahrscheinlich, dass der Server abnormal beendete\n" "\tbevor oder während die Anweisung bearbeitet wurde.\n" -#: fe-misc.c:1011 +#: fe-misc.c:1009 msgid "timeout expired\n" msgstr "Timeout abgelaufen\n" -#: fe-misc.c:1056 +#: fe-misc.c:1054 msgid "invalid socket\n" msgstr "ungültiges Socket\n" -#: fe-misc.c:1079 +#: fe-misc.c:1077 #, c-format msgid "select() failed: %s\n" msgstr "select() fehlgeschlagen: %s\n" -#: fe-protocol2.c:91 +#: fe-protocol2.c:90 #, c-format msgid "invalid setenv state %c, probably indicative of memory corruption\n" msgstr "ungültiger Setenv-Zustand %c, möglicherweise ein Speicherproblem\n" -#: fe-protocol2.c:390 +#: fe-protocol2.c:389 #, c-format msgid "invalid state %c, probably indicative of memory corruption\n" msgstr "ungültiger Zustand %c, möglicherweise ein Speicherproblem\n" -#: fe-protocol2.c:479 fe-protocol3.c:186 +#: fe-protocol2.c:478 fe-protocol3.c:185 #, c-format msgid "message type 0x%02x arrived from server while idle" msgstr "Nachricht vom Typ 0x%02x kam vom Server im Ruhezustand" -#: fe-protocol2.c:503 fe-protocol2.c:538 fe-protocol2.c:1049 -#: fe-protocol3.c:209 fe-protocol3.c:236 fe-protocol3.c:253 fe-protocol3.c:333 -#: fe-protocol3.c:728 fe-protocol3.c:951 -msgid "out of memory" -msgstr "Speicher aufgebraucht" - -#: fe-protocol2.c:529 +#: fe-protocol2.c:528 #, c-format msgid "unexpected character %c following empty query response (\"I\" message)" msgstr "unerwartetes Zeichen %c kam nach Antwort auf leere Anfrage (»I«-Nachricht)" -#: fe-protocol2.c:595 +#: fe-protocol2.c:594 #, c-format msgid "server sent data (\"D\" message) without prior row description (\"T\" message)" msgstr "Server sendete Daten (»D«-Nachricht) ohne vorherige Zeilenbeschreibung (»T«-Nachricht)" -#: fe-protocol2.c:613 +#: fe-protocol2.c:612 #, c-format msgid "server sent binary data (\"B\" message) without prior row description (\"T\" message)" msgstr "Server sendete binäre Daten (»B«-Nachricht) ohne vorherige Zeilenbeschreibung (»T«-Nachricht)" -#: fe-protocol2.c:633 fe-protocol3.c:412 +#: fe-protocol2.c:632 fe-protocol3.c:411 #, c-format msgid "unexpected response from server; first received character was \"%c\"\n" msgstr "unerwartete Antwort vom Server; erstes empfangenes Zeichen war »%c«\n" -#: fe-protocol2.c:762 fe-protocol2.c:937 fe-protocol3.c:627 fe-protocol3.c:854 +#: fe-protocol2.c:761 fe-protocol2.c:936 fe-protocol3.c:626 fe-protocol3.c:853 msgid "out of memory for query result" msgstr "Speicher für Anfrageergebnis aufgebraucht" -#: fe-protocol2.c:1395 fe-protocol3.c:1886 -#, c-format -msgid "%s" -msgstr "%s" - -#: fe-protocol2.c:1407 +#: fe-protocol2.c:1414 #, c-format msgid "lost synchronization with server, resetting connection" msgstr "Synchronisation mit Server verloren, Verbindung wird zurückgesetzt" -#: fe-protocol2.c:1541 fe-protocol2.c:1573 fe-protocol3.c:2089 +#: fe-protocol2.c:1548 fe-protocol2.c:1580 fe-protocol3.c:2096 #, c-format msgid "protocol error: id=0x%x\n" msgstr "Protokollfehler: id=0x%x\n" -#: fe-protocol3.c:368 +#: fe-protocol3.c:367 msgid "server sent data (\"D\" message) without prior row description (\"T\" message)\n" msgstr "Server sendete Daten (»D«-Nachricht) ohne vorherige Zeilenbeschreibung (»T«-Nachricht)\n" -#: fe-protocol3.c:433 +#: fe-protocol3.c:432 #, c-format msgid "message contents do not agree with length in message type \"%c\"\n" msgstr "Nachrichteninhalt stimmt nicht mit Länge in Nachrichtentyp »%c« überein\n" -#: fe-protocol3.c:454 +#: fe-protocol3.c:453 #, c-format msgid "lost synchronization with server: got message type \"%c\", length %d\n" msgstr "Synchronisation mit Server verloren: Nachrichtentyp »%c« empfangen, Länge %d\n" -#: fe-protocol3.c:505 fe-protocol3.c:545 +#: fe-protocol3.c:504 fe-protocol3.c:544 msgid "insufficient data in \"T\" message" msgstr "nicht genug Daten in »T«-Nachricht" -#: fe-protocol3.c:578 +#: fe-protocol3.c:577 msgid "extraneous data in \"T\" message" msgstr "zu viele Daten in »T«-Nachricht" -#: fe-protocol3.c:691 +#: fe-protocol3.c:690 msgid "extraneous data in \"t\" message" msgstr "zu viele Daten in »t«-Nachricht" -#: fe-protocol3.c:762 fe-protocol3.c:794 fe-protocol3.c:812 +#: fe-protocol3.c:761 fe-protocol3.c:793 fe-protocol3.c:811 msgid "insufficient data in \"D\" message" msgstr "nicht genug Daten in »D«-Nachricht" -#: fe-protocol3.c:768 +#: fe-protocol3.c:767 msgid "unexpected field count in \"D\" message" msgstr "unerwartete Feldzahl in »D«-Nachricht" -#: fe-protocol3.c:821 +#: fe-protocol3.c:820 msgid "extraneous data in \"D\" message" msgstr "zu viele Daten in »D«-Nachricht" -#: fe-protocol3.c:1005 +#: fe-protocol3.c:1012 msgid "no error message available\n" msgstr "keine Fehlermeldung verfügbar\n" #. translator: %s represents a digit string -#: fe-protocol3.c:1035 fe-protocol3.c:1054 +#: fe-protocol3.c:1042 fe-protocol3.c:1061 #, c-format msgid " at character %s" msgstr " bei Zeichen %s" -#: fe-protocol3.c:1067 +#: fe-protocol3.c:1074 #, c-format msgid "DETAIL: %s\n" msgstr "DETAIL: %s\n" -#: fe-protocol3.c:1070 +#: fe-protocol3.c:1077 #, c-format msgid "HINT: %s\n" msgstr "TIP: %s\n" -#: fe-protocol3.c:1073 +#: fe-protocol3.c:1080 #, c-format msgid "QUERY: %s\n" msgstr "ANFRAGE: %s\n" -#: fe-protocol3.c:1080 +#: fe-protocol3.c:1087 #, c-format msgid "CONTEXT: %s\n" msgstr "KONTEXT: %s\n" -#: fe-protocol3.c:1089 +#: fe-protocol3.c:1096 #, c-format msgid "SCHEMA NAME: %s\n" msgstr "SCHEMANAME: %s\n" -#: fe-protocol3.c:1093 +#: fe-protocol3.c:1100 #, c-format msgid "TABLE NAME: %s\n" msgstr "TABELLENNAME: %s\n" -#: fe-protocol3.c:1097 +#: fe-protocol3.c:1104 #, c-format msgid "COLUMN NAME: %s\n" msgstr "SPALTENNAME: %s\n" -#: fe-protocol3.c:1101 +#: fe-protocol3.c:1108 #, c-format msgid "DATATYPE NAME: %s\n" msgstr "DATENTYPNAME: %s\n" -#: fe-protocol3.c:1105 +#: fe-protocol3.c:1112 #, c-format msgid "CONSTRAINT NAME: %s\n" msgstr "CONSTRAINT-NAME: %s\n" -#: fe-protocol3.c:1117 +#: fe-protocol3.c:1124 msgid "LOCATION: " msgstr "ORT: " -#: fe-protocol3.c:1119 +#: fe-protocol3.c:1126 #, c-format msgid "%s, " msgstr "%s, " -#: fe-protocol3.c:1121 +#: fe-protocol3.c:1128 #, c-format msgid "%s:%s" msgstr "%s:%s" -#: fe-protocol3.c:1316 +#: fe-protocol3.c:1323 #, c-format msgid "LINE %d: " msgstr "ZEILE %d: " -#: fe-protocol3.c:1711 +#: fe-protocol3.c:1718 msgid "PQgetline: not doing text COPY OUT\n" msgstr "PQgetline: Text COPY OUT nicht ausgeführt\n" -#: fe-secure-openssl.c:234 fe-secure-openssl.c:343 fe-secure-openssl.c:1323 +#: fe-secure-common.c:117 +msgid "SSL certificate's name contains embedded null\n" +msgstr "Name im SSL-Zertifikat enthält Null-Byte\n" + +#: fe-secure-common.c:164 +msgid "host name must be specified for a verified SSL connection\n" +msgstr "Hostname muss angegeben werden für eine verifizierte SSL-Verbindung\n" + +#: fe-secure-common.c:189 +#, c-format +msgid "server certificate for \"%s\" does not match host name \"%s\"\n" +msgstr "Server-Zertifikat für »%s« stimmt nicht mit dem Hostnamen »%s« überein\n" + +#: fe-secure-common.c:195 +msgid "could not get server's host name from server certificate\n" +msgstr "konnte Hostnamen des Servers nicht aus dem Serverzertifikat ermitteln\n" + +#: fe-secure-openssl.c:211 fe-secure-openssl.c:319 fe-secure-openssl.c:1243 #, c-format msgid "SSL SYSCALL error: %s\n" msgstr "SSL-SYSCALL-Fehler: %s\n" -#: fe-secure-openssl.c:241 fe-secure-openssl.c:350 fe-secure-openssl.c:1327 +#: fe-secure-openssl.c:218 fe-secure-openssl.c:326 fe-secure-openssl.c:1247 msgid "SSL SYSCALL error: EOF detected\n" msgstr "SSL-SYSCALL-Fehler: Dateiende entdeckt\n" -#: fe-secure-openssl.c:252 fe-secure-openssl.c:361 fe-secure-openssl.c:1336 +#: fe-secure-openssl.c:229 fe-secure-openssl.c:337 fe-secure-openssl.c:1256 #, c-format msgid "SSL error: %s\n" msgstr "SSL-Fehler: %s\n" -#: fe-secure-openssl.c:267 fe-secure-openssl.c:376 +#: fe-secure-openssl.c:244 fe-secure-openssl.c:352 msgid "SSL connection has been closed unexpectedly\n" msgstr "SSL-Verbindung wurde unerwartet geschlossen\n" -#: fe-secure-openssl.c:273 fe-secure-openssl.c:382 fe-secure-openssl.c:1345 +#: fe-secure-openssl.c:250 fe-secure-openssl.c:358 fe-secure-openssl.c:1265 #, c-format msgid "unrecognized SSL error code: %d\n" msgstr "unbekannter SSL-Fehlercode: %d\n" -#: fe-secure-openssl.c:494 -msgid "SSL certificate's name entry is missing\n" -msgstr "Namenseintrag fehlt im SSL-Zertifikat\n" +#: fe-secure-openssl.c:418 +msgid "could not determine server certificate signature algorithm\n" +msgstr "konnte Signaturalgorithmus des Serverzertifikats nicht ermitteln\n" -#: fe-secure-openssl.c:528 -msgid "SSL certificate's name contains embedded null\n" -msgstr "Name im SSL-Zertifikat enthält Null-Byte\n" +#: fe-secure-openssl.c:439 +#, c-format +msgid "could not find digest for NID %s\n" +msgstr "konnte Digest für NID %s nicht finden\n" -#: fe-secure-openssl.c:580 -msgid "host name must be specified for a verified SSL connection\n" -msgstr "Hostname muss angegeben werden für eine verifizierte SSL-Verbindung\n" +#: fe-secure-openssl.c:449 +msgid "could not generate peer certificate hash\n" +msgstr "konnte Hash des Zertifikats der Gegenstelle nicht erzeugen\n" -#: fe-secure-openssl.c:680 -#, c-format -msgid "server certificate for \"%s\" does not match host name \"%s\"\n" -msgstr "Server-Zertifikat für »%s« stimmt nicht mit dem Hostnamen »%s« überein\n" +#: fe-secure-openssl.c:467 +msgid "channel binding type \"tls-server-end-point\" is not supported by this build\n" +msgstr "Channel-Binding-Typ »tls-server-end-point« wird von dieser Installation nicht unterstützt\n" -#: fe-secure-openssl.c:686 -msgid "could not get server's host name from server certificate\n" -msgstr "konnte Hostnamen des Servers nicht aus dem Serverzertifikat ermitteln\n" +#: fe-secure-openssl.c:510 +msgid "SSL certificate's name entry is missing\n" +msgstr "Namenseintrag fehlt im SSL-Zertifikat\n" -#: fe-secure-openssl.c:928 +#: fe-secure-openssl.c:839 #, c-format msgid "could not create SSL context: %s\n" msgstr "konnte SSL-Kontext nicht erzeugen: %s\n" -#: fe-secure-openssl.c:965 +#: fe-secure-openssl.c:876 #, c-format msgid "could not read root certificate file \"%s\": %s\n" msgstr "konnte Root-Zertifikat-Datei »%s« nicht lesen: %s\n" -#: fe-secure-openssl.c:993 +#: fe-secure-openssl.c:904 #, c-format msgid "SSL library does not support CRL certificates (file \"%s\")\n" msgstr "SSL-Bibliothek unterstützt keine CRL-Zertifikate (Datei »%s«)\n" -#: fe-secure-openssl.c:1021 +#: fe-secure-openssl.c:932 msgid "" "could not get home directory to locate root certificate file\n" "Either provide the file or change sslmode to disable server certificate verification.\n" @@ -1016,7 +1042,7 @@ msgstr "" "konnte Home-Verzeichnis nicht ermitteln, um Root-Zertifikat-Datei zu finden\n" "Legen Sie entweder die Datei an oder ändern Sie sslmode, um die Überprüfung der Serverzertifikate abzuschalten.\n" -#: fe-secure-openssl.c:1025 +#: fe-secure-openssl.c:936 #, c-format msgid "" "root certificate file \"%s\" does not exist\n" @@ -1025,82 +1051,82 @@ msgstr "" "Root-Zertifikat-Datei »%s« existiert nicht\n" "Legen Sie entweder die Datei an oder ändern Sie sslmode, um die Überprüfung der Serverzertifikate abzuschalten.\n" -#: fe-secure-openssl.c:1056 +#: fe-secure-openssl.c:967 #, c-format msgid "could not open certificate file \"%s\": %s\n" msgstr "konnte Zertifikatdatei »%s« nicht öffnen: %s\n" -#: fe-secure-openssl.c:1075 +#: fe-secure-openssl.c:986 #, c-format msgid "could not read certificate file \"%s\": %s\n" msgstr "konnte Zertifikatdatei »%s« nicht lesen: %s\n" -#: fe-secure-openssl.c:1100 +#: fe-secure-openssl.c:1011 #, c-format msgid "could not establish SSL connection: %s\n" msgstr "konnte SSL-Verbindung nicht aufbauen: %s\n" -#: fe-secure-openssl.c:1154 +#: fe-secure-openssl.c:1065 #, c-format msgid "could not load SSL engine \"%s\": %s\n" msgstr "konnte SSL-Engine »%s« nicht laden: %s\n" -#: fe-secure-openssl.c:1166 +#: fe-secure-openssl.c:1077 #, c-format msgid "could not initialize SSL engine \"%s\": %s\n" msgstr "konnte SSL-Engine »%s« nicht initialisieren: %s\n" -#: fe-secure-openssl.c:1182 +#: fe-secure-openssl.c:1093 #, c-format msgid "could not read private SSL key \"%s\" from engine \"%s\": %s\n" msgstr "konnte privaten SSL-Schlüssel »%s« nicht von Engine »%s« lesen: %s\n" -#: fe-secure-openssl.c:1196 +#: fe-secure-openssl.c:1107 #, c-format msgid "could not load private SSL key \"%s\" from engine \"%s\": %s\n" msgstr "konnte privaten SSL-Schlüssel »%s« nicht von Engine »%s« laden: %s\n" -#: fe-secure-openssl.c:1233 +#: fe-secure-openssl.c:1144 #, c-format msgid "certificate present, but not private key file \"%s\"\n" msgstr "Zertifikat vorhanden, aber keine private Schlüsseldatei »%s«\n" -#: fe-secure-openssl.c:1241 +#: fe-secure-openssl.c:1152 #, c-format msgid "private key file \"%s\" has group or world access; permissions should be u=rw (0600) or less\n" msgstr "WARNUNG: private Schlüsseldatei »%s« erlaubt Lesezugriff für Gruppe oder Andere; Rechte sollten u=rw (0600) oder weniger sein\n" -#: fe-secure-openssl.c:1252 +#: fe-secure-openssl.c:1163 #, c-format msgid "could not load private key file \"%s\": %s\n" msgstr "konnte private Schlüsseldatei »%s« nicht laden: %s\n" -#: fe-secure-openssl.c:1266 +#: fe-secure-openssl.c:1177 #, c-format msgid "certificate does not match private key file \"%s\": %s\n" msgstr "Zertifikat passt nicht zur privaten Schlüsseldatei »%s«: %s\n" -#: fe-secure-openssl.c:1366 +#: fe-secure-openssl.c:1286 #, c-format msgid "certificate could not be obtained: %s\n" msgstr "Zertifikat konnte nicht ermittelt werden: %s\n" -#: fe-secure-openssl.c:1458 +#: fe-secure-openssl.c:1375 #, c-format msgid "no SSL error reported" msgstr "kein SSL-Fehler berichtet" -#: fe-secure-openssl.c:1467 +#: fe-secure-openssl.c:1384 #, c-format msgid "SSL error code %lu" msgstr "SSL-Fehlercode %lu" -#: fe-secure.c:261 +#: fe-secure.c:269 #, c-format msgid "could not receive data from server: %s\n" msgstr "konnte keine Daten vom Server empfangen: %s\n" -#: fe-secure.c:369 +#: fe-secure.c:378 #, c-format msgid "could not send data to server: %s\n" msgstr "konnte keine Daten an den Server senden: %s\n" diff --git a/src/interfaces/libpq/po/es.po b/src/interfaces/libpq/po/es.po index 7e335da1f2..d88f0a1a02 100644 --- a/src/interfaces/libpq/po/es.po +++ b/src/interfaces/libpq/po/es.po @@ -6,20 +6,98 @@ # Karim , 2002. # Alvaro Herrera , 2003-2013 # Mario González , 2005 +# Carlos Chapi , 2017 # msgid "" msgstr "" "Project-Id-Version: libpq (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-07-10 20:38+0000\n" -"PO-Revision-Date: 2017-07-11 11:22-0500\n" +"POT-Creation-Date: 2017-08-29 20:38+0000\n" +"PO-Revision-Date: 2017-08-29 21:07-0500\n" "Last-Translator: Carlos Chapi \n" "Language-Team: PgSQL-es-Ayuda \n" "Language: es\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Generator: Poedit 2.0.2\n" +"X-Generator: BlackCAT 1.0\n" + +#: fe-auth-scram.c:176 +msgid "malformed SCRAM message (empty message)\n" +msgstr "mensaje SCRAM no es válido (mensaje vacío)\n" + +#: fe-auth-scram.c:182 +msgid "malformed SCRAM message (length mismatch)\n" +msgstr "mensaje SCRAM no es válido (longitud no coincide)\n" + +#: fe-auth-scram.c:231 +msgid "incorrect server signature\n" +msgstr "signatura de servidor incorrecta\n" + +#: fe-auth-scram.c:240 +msgid "invalid SCRAM exchange state\n" +msgstr "estado de intercambio SCRAM no es válido\n" + +#: fe-auth-scram.c:263 +#, c-format +msgid "malformed SCRAM message (attribute \"%c\" expected)\n" +msgstr "mensaje SCRAM no es válido (se esperaba atributo «%c»)\n" + +#: fe-auth-scram.c:272 +#, c-format +msgid "malformed SCRAM message (expected character \"=\" for attribute \"%c\")\n" +msgstr "" +"mensaje SCRAM no es válido (se esperaba el carácter «=» para el atributo " +"«%c»)\n" + +#: fe-auth-scram.c:311 +msgid "could not generate nonce\n" +msgstr "no se pude generar nonce\n" + +#: fe-auth-scram.c:319 fe-auth-scram.c:336 fe-auth-scram.c:346 +#: fe-auth-scram.c:400 fe-auth-scram.c:420 fe-auth-scram.c:445 +#: fe-auth-scram.c:459 fe-auth-scram.c:501 fe-auth.c:227 fe-auth.c:362 +#: fe-auth.c:432 fe-auth.c:467 fe-auth.c:609 fe-auth.c:768 fe-auth.c:1080 +#: fe-auth.c:1228 fe-connect.c:775 fe-connect.c:1203 fe-connect.c:1379 +#: fe-connect.c:1947 fe-connect.c:2476 fe-connect.c:4062 fe-connect.c:4314 +#: fe-connect.c:4433 fe-connect.c:4673 fe-connect.c:4753 fe-connect.c:4852 +#: fe-connect.c:5108 fe-connect.c:5137 fe-connect.c:5209 fe-connect.c:5233 +#: fe-connect.c:5251 fe-connect.c:5352 fe-connect.c:5361 fe-connect.c:5717 +#: fe-connect.c:5867 fe-exec.c:2702 fe-exec.c:3449 fe-exec.c:3614 +#: fe-lobj.c:896 fe-protocol2.c:1206 fe-protocol3.c:992 fe-protocol3.c:1678 +#: fe-secure-openssl.c:514 fe-secure-openssl.c:1138 +msgid "out of memory\n" +msgstr "memoria agotada\n" + +#: fe-auth-scram.c:437 +msgid "invalid SCRAM response (nonce mismatch)\n" +msgstr "respuesta SCRAM no es válida (nonce no coincide)\n" + +#: fe-auth-scram.c:476 +msgid "malformed SCRAM message (invalid iteration count)\n" +msgstr "mensaje SCRAM no es válido (el conteo de iteración no es válido)\n" + +#: fe-auth-scram.c:482 +msgid "malformed SCRAM message (garbage at end of server-first-message)\n" +msgstr "" +"mensaje SCRAM no es válido (se encontró basura al final de server-first-" +"message)\n" + +#: fe-auth-scram.c:511 +#, c-format +msgid "error received from server in SCRAM exchange: %s\n" +msgstr "" +"se recibió un error desde el servidor durante el intercambio SCRAM: %s\n" + +#: fe-auth-scram.c:526 +msgid "malformed SCRAM message (garbage at end of server-final-message)\n" +msgstr "" +"mensaje SCRAM no válido (se encontró basura al final de server-final-" +"message)\n" + +#: fe-auth-scram.c:534 +msgid "malformed SCRAM message (invalid server signature)\n" +msgstr "mensaje SCRAM no es válido (la signatura del servidor no es válida)\n" #: fe-auth.c:122 #, c-format @@ -38,19 +116,6 @@ msgstr "el nombre de servidor debe ser especificado\n" msgid "duplicate GSS authentication request\n" msgstr "petición de autentificación GSS duplicada\n" -#: fe-auth.c:227 fe-auth.c:362 fe-auth.c:432 fe-auth.c:467 fe-auth.c:609 -#: fe-auth.c:768 fe-auth.c:1080 fe-auth.c:1228 fe-connect.c:775 -#: fe-connect.c:1203 fe-connect.c:1379 fe-connect.c:1947 fe-connect.c:2476 -#: fe-connect.c:4062 fe-connect.c:4314 fe-connect.c:4433 fe-connect.c:4673 -#: fe-connect.c:4753 fe-connect.c:4852 fe-connect.c:5108 fe-connect.c:5137 -#: fe-connect.c:5209 fe-connect.c:5233 fe-connect.c:5251 fe-connect.c:5352 -#: fe-connect.c:5361 fe-connect.c:5717 fe-connect.c:5867 fe-exec.c:2651 -#: fe-exec.c:3398 fe-exec.c:3563 fe-lobj.c:896 fe-protocol2.c:1206 -#: fe-protocol3.c:992 fe-protocol3.c:1678 fe-secure-openssl.c:514 -#: fe-secure-openssl.c:1138 -msgid "out of memory\n" -msgstr "memoria agotada\n" - #: fe-auth.c:240 msgid "GSSAPI name import error" msgstr "error en conversión de nombre GSSAPI" @@ -86,8 +151,12 @@ msgid "out of memory allocating SASL buffer (%d)\n" msgstr "memoria agotada creando el búfer SASL (%d)\n" #: fe-auth.c:658 -msgid "AuthenticationSASLFinal received from server, but SASL authentication was not completed\n" -msgstr "Se recibió AuthenticationSASLFinal desde el servidor, pero la autentificación SASL no se completó\n" +msgid "" +"AuthenticationSASLFinal received from server, but SASL authentication was " +"not completed\n" +msgstr "" +"Se recibió AuthenticationSASLFinal desde el servidor, pero la " +"autentificación SASL no se completó\n" #: fe-auth.c:735 msgid "SCM_CRED authentication method not supported\n" @@ -158,7 +227,9 @@ msgstr "no se pudo emparejar %d números de puertos a %d hosts\n" #: fe-connect.c:1077 msgid "could not get home directory to locate password file\n" -msgstr "no se pudo obtener el directorio home para localizar el archivo de contraseña\n" +msgstr "" +"no se pudo obtener el directorio home para localizar el archivo de " +"contraseña\n" #: fe-connect.c:1129 #, c-format @@ -168,7 +239,9 @@ msgstr "valor sslmode no válido: «%s»\n" #: fe-connect.c:1150 #, c-format msgid "sslmode value \"%s\" invalid when SSL support is not compiled in\n" -msgstr "el valor sslmode «%s» no es válido cuando no se ha compilado con soporte SSL\n" +msgstr "" +"el valor sslmode «%s» no es válido cuando no se ha compilado con soporte " +"SSL\n" #: fe-connect.c:1185 #, c-format @@ -241,7 +314,9 @@ msgstr "no se pudo interpretar la dirección de red «%s»: %s\n" #: fe-connect.c:1758 #, c-format msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n" -msgstr "la ruta del socket de dominio Unix «%s» es demasiado larga (máximo %d bytes)\n" +msgstr "" +"la ruta del socket de dominio Unix «%s» es demasiado larga (máximo %d " +"bytes)\n" #: fe-connect.c:1772 #, c-format @@ -250,7 +325,9 @@ msgstr "no se pudo traducir la ruta del socket Unix «%s» a una dirección: %s\ #: fe-connect.c:2053 msgid "invalid connection state, probably indicative of memory corruption\n" -msgstr "el estado de conexión no es válido, probablemente por corrupción de memoria\n" +msgstr "" +"el estado de conexión no es válido, probablemente por corrupción de " +"memoria\n" #: fe-connect.c:2110 #, c-format @@ -293,7 +370,9 @@ msgstr "no se pudo obtener credenciales de la contraparte: %s\n" #: fe-connect.c:2416 #, c-format msgid "requirepeer specifies \"%s\", but actual peer user name is \"%s\"\n" -msgstr "requirepeer especifica «%s», pero el nombre de usuario de la contraparte es «%s»\n" +msgstr "" +"requirepeer especifica «%s», pero el nombre de usuario de la contraparte es " +"«%s»\n" #: fe-connect.c:2450 #, c-format @@ -317,7 +396,9 @@ msgstr "se ha recibido una respuesta no válida en la negociación SSL: %c\n" #: fe-connect.c:2661 fe-connect.c:2694 #, c-format msgid "expected authentication request from server, but received %c\n" -msgstr "se esperaba una petición de autentificación desde el servidor, pero se ha recibido %c\n" +msgstr "" +"se esperaba una petición de autentificación desde el servidor, pero se ha " +"recibido %c\n" #: fe-connect.c:2923 msgid "unexpected message from server during startup\n" @@ -336,7 +417,8 @@ msgstr "la prueba «SHOW transaction_read_only» falló en el servidor «%s:%s» #: fe-connect.c:3211 #, c-format msgid "invalid connection state %d, probably indicative of memory corruption\n" -msgstr "estado de conexión no válido %d, probablemente por corrupción de memoria\n" +msgstr "" +"estado de conexión no válido %d, probablemente por corrupción de memoria\n" #: fe-connect.c:3668 fe-connect.c:3728 #, c-format @@ -406,11 +488,15 @@ msgstr "opción de conexión no válida «%s»\n" #: fe-connect.c:4461 fe-connect.c:4940 msgid "unterminated quoted string in connection info string\n" -msgstr "cadena de caracteres entre comillas sin terminar en la cadena de información de conexión\n" +msgstr "" +"cadena de caracteres entre comillas sin terminar en la cadena de información" +" de conexión\n" #: fe-connect.c:4501 msgid "could not get home directory to locate service definition file" -msgstr "no se pudo obtener el directorio home para localizar el archivo de definición de servicio" +msgstr "" +"no se pudo obtener el directorio home para localizar el archivo de " +"definición de servicio" #: fe-connect.c:4534 #, c-format @@ -434,8 +520,12 @@ msgstr "error de sintaxis en archivo de servicio «%s», línea %d\n" #: fe-connect.c:4652 #, c-format -msgid "nested service specifications not supported in service file \"%s\", line %d\n" -msgstr "especificaciones de servicio anidadas no soportadas en archivo de servicio «%s», línea %d\n" +msgid "" +"nested service specifications not supported in service file \"%s\", line " +"%d\n" +msgstr "" +"especificaciones de servicio anidadas no soportadas en archivo de servicio " +"«%s», línea %d\n" #: fe-connect.c:5372 #, c-format @@ -444,8 +534,12 @@ msgstr "URI no válida propagada a rutina interna de procesamiento: «%s»\n" #: fe-connect.c:5449 #, c-format -msgid "end of string reached when looking for matching \"]\" in IPv6 host address in URI: \"%s\"\n" -msgstr "se encontró el fin de la cadena mientras se buscaba el «]» correspondiente en dirección IPv6 en URI: «%s»\n" +msgid "" +"end of string reached when looking for matching \"]\" in IPv6 host address " +"in URI: \"%s\"\n" +msgstr "" +"se encontró el fin de la cadena mientras se buscaba el «]» correspondiente " +"en dirección IPv6 en URI: «%s»\n" #: fe-connect.c:5456 #, c-format @@ -455,7 +549,9 @@ msgstr "la dirección IPv6 no puede ser vacía en la URI: «%s»\n" #: fe-connect.c:5471 #, c-format msgid "unexpected character \"%c\" at position %d in URI (expected \":\" or \"/\"): \"%s\"\n" -msgstr "carácter «%c» inesperado en la posición %d en URI (se esperaba «:» o «/»): «%s»\n" +msgstr "" +"carácter «%c» inesperado en la posición %d en URI (se esperaba «:» o «/»): " +"«%s»\n" #: fe-connect.c:5600 #, c-format @@ -493,110 +589,133 @@ msgstr "ADVERTENCIA: El archivo de claves «%s» no es un archivo plano\n" #: fe-connect.c:6407 #, c-format -msgid "WARNING: password file \"%s\" has group or world access; permissions should be u=rw (0600) or less\n" -msgstr "ADVERTENCIA: El archivo de claves «%s» tiene permiso de lectura para el grupo u otros; los permisos deberían ser u=rw (0600) o menos\n" +msgid "" +"WARNING: password file \"%s\" has group or world access; permissions should " +"be u=rw (0600) or less\n" +msgstr "" +"ADVERTENCIA: El archivo de claves «%s» tiene permiso de lectura para el " +"grupo u otros; los permisos deberían ser u=rw (0600) o menos\n" #: fe-connect.c:6499 #, c-format msgid "password retrieved from file \"%s\"\n" msgstr "contraseña obtenida desde el archivo «%s»\n" -#: fe-exec.c:826 +#: fe-exec.c:437 fe-exec.c:2776 +#, c-format +msgid "row number %d is out of range 0..%d" +msgstr "el número de fila %d está fuera del rango 0..%d" + +#: fe-exec.c:498 fe-protocol2.c:503 fe-protocol2.c:538 fe-protocol2.c:1049 +#: fe-protocol3.c:209 fe-protocol3.c:236 fe-protocol3.c:253 fe-protocol3.c:333 +#: fe-protocol3.c:728 fe-protocol3.c:951 +msgid "out of memory" +msgstr "memoria agotada" + +#: fe-exec.c:499 fe-protocol2.c:1395 fe-protocol3.c:1886 +#, c-format +msgid "%s" +msgstr "%s" + +#: fe-exec.c:847 msgid "NOTICE" msgstr "AVISO" -#: fe-exec.c:1141 fe-exec.c:1199 fe-exec.c:1245 +#: fe-exec.c:905 +msgid "PGresult cannot support more than INT_MAX tuples" +msgstr "PGresult no puede soportar un número de tuplas mayor que INT_MAX" + +#: fe-exec.c:917 +msgid "size_t overflow" +msgstr "desbordamiento de size_t" + +#: fe-exec.c:1192 fe-exec.c:1250 fe-exec.c:1296 msgid "command string is a null pointer\n" msgstr "la cadena de orden es un puntero nulo\n" -#: fe-exec.c:1205 fe-exec.c:1251 fe-exec.c:1346 +#: fe-exec.c:1256 fe-exec.c:1302 fe-exec.c:1397 msgid "number of parameters must be between 0 and 65535\n" msgstr "el número de parámetros debe estar entre 0 y 65535\n" -#: fe-exec.c:1239 fe-exec.c:1340 +#: fe-exec.c:1290 fe-exec.c:1391 msgid "statement name is a null pointer\n" msgstr "el nombre de sentencia es un puntero nulo\n" -#: fe-exec.c:1259 fe-exec.c:1422 fe-exec.c:2140 fe-exec.c:2339 +#: fe-exec.c:1310 fe-exec.c:1473 fe-exec.c:2191 fe-exec.c:2390 msgid "function requires at least protocol version 3.0\n" msgstr "la función requiere protocolo 3.0 o superior\n" -#: fe-exec.c:1377 +#: fe-exec.c:1428 msgid "no connection to the server\n" msgstr "no hay conexión con el servidor\n" -#: fe-exec.c:1384 +#: fe-exec.c:1435 msgid "another command is already in progress\n" msgstr "hay otra orden en ejecución\n" -#: fe-exec.c:1498 +#: fe-exec.c:1549 msgid "length must be given for binary parameter\n" msgstr "el largo debe ser especificado para un parámetro binario\n" -#: fe-exec.c:1770 +#: fe-exec.c:1821 #, c-format msgid "unexpected asyncStatus: %d\n" msgstr "asyncStatus no esperado: %d\n" -#: fe-exec.c:1790 +#: fe-exec.c:1841 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_RESULTCREATE event\n" msgstr "PGEventProc «%s» falló durante el evento PGEVT_RESULTCREATE\n" -#: fe-exec.c:1950 +#: fe-exec.c:2001 msgid "COPY terminated by new PQexec" msgstr "COPY terminado por un nuevo PQexec" -#: fe-exec.c:1958 +#: fe-exec.c:2009 msgid "COPY IN state must be terminated first\n" msgstr "el estado COPY IN debe ser terminado primero\n" -#: fe-exec.c:1978 +#: fe-exec.c:2029 msgid "COPY OUT state must be terminated first\n" msgstr "el estado COPY OUT debe ser terminado primero\n" -#: fe-exec.c:1986 +#: fe-exec.c:2037 msgid "PQexec not allowed during COPY BOTH\n" msgstr "PQexec no está permitido durante COPY BOTH\n" -#: fe-exec.c:2229 fe-exec.c:2296 fe-exec.c:2386 fe-protocol2.c:1352 +#: fe-exec.c:2280 fe-exec.c:2347 fe-exec.c:2437 fe-protocol2.c:1352 #: fe-protocol3.c:1817 msgid "no COPY in progress\n" msgstr "no hay COPY alguno en ejecución\n" -#: fe-exec.c:2576 +#: fe-exec.c:2627 msgid "connection in wrong state\n" msgstr "la conexión está en un estado incorrecto\n" -#: fe-exec.c:2607 +#: fe-exec.c:2658 msgid "invalid ExecStatusType code" msgstr "el código de ExecStatusType no es válido" -#: fe-exec.c:2634 +#: fe-exec.c:2685 msgid "PGresult is not an error result\n" msgstr "PGresult no es un resultado de error\n" -#: fe-exec.c:2709 fe-exec.c:2732 +#: fe-exec.c:2760 fe-exec.c:2783 #, c-format msgid "column number %d is out of range 0..%d" msgstr "el número de columna %d está fuera del rango 0..%d" -#: fe-exec.c:2725 -#, c-format -msgid "row number %d is out of range 0..%d" -msgstr "el número de fila %d está fuera del rango 0..%d" - -#: fe-exec.c:2747 +#: fe-exec.c:2798 #, c-format msgid "parameter number %d is out of range 0..%d" msgstr "el número de parámetro %d está fuera del rango 0..%d" -#: fe-exec.c:3057 +#: fe-exec.c:3108 #, c-format msgid "could not interpret result from server: %s" msgstr "no se pudo interpretar el resultado del servidor: %s" -#: fe-exec.c:3296 fe-exec.c:3380 +#: fe-exec.c:3347 fe-exec.c:3431 msgid "incomplete multibyte character\n" msgstr "carácter multibyte incompleto\n" @@ -649,7 +768,9 @@ msgstr "no se pudo escribir a archivo «%s»: %s\n" #: fe-lobj.c:947 msgid "query to initialize large object functions did not return data\n" -msgstr "la consulta para inicializar las funciones de objetos grandes no devuelve datos\n" +msgstr "" +"la consulta para inicializar las funciones de objetos grandes no devuelve " +"datos\n" #: fe-lobj.c:996 msgid "cannot determine OID of function lo_open\n" @@ -724,7 +845,9 @@ msgstr "select() fallida: %s\n" #: fe-protocol2.c:91 #, c-format msgid "invalid setenv state %c, probably indicative of memory corruption\n" -msgstr "el estado de setenv %c no es válido, probablemente por corrupción de memoria\n" +msgstr "" +"el estado de setenv %c no es válido, probablemente por corrupción de " +"memoria\n" #: fe-protocol2.c:390 #, c-format @@ -736,41 +859,40 @@ msgstr "el estado %c no es válido, probablemente por corrupción de memoria\n" msgid "message type 0x%02x arrived from server while idle" msgstr "un mensaje de tipo 0x%02x llegó del servidor estando inactivo" -#: fe-protocol2.c:503 fe-protocol2.c:538 fe-protocol2.c:1049 fe-protocol3.c:209 -#: fe-protocol3.c:236 fe-protocol3.c:253 fe-protocol3.c:333 fe-protocol3.c:728 -#: fe-protocol3.c:951 -msgid "out of memory" -msgstr "memoria agotada" - #: fe-protocol2.c:529 #, c-format msgid "unexpected character %c following empty query response (\"I\" message)" -msgstr "carácter %c no esperado, siguiendo una respuesta de consulta vacía (mensaje «I»)" +msgstr "" +"carácter %c no esperado, siguiendo una respuesta de consulta vacía (mensaje " +"«I»)" #: fe-protocol2.c:595 #, c-format msgid "server sent data (\"D\" message) without prior row description (\"T\" message)" -msgstr "el servidor envió datos (mensaje «D») sin precederlos con una descripción de fila (mensaje «T»)" +msgstr "" +"el servidor envió datos (mensaje «D») sin precederlos con una descripción de" +" fila (mensaje «T»)" #: fe-protocol2.c:613 #, c-format -msgid "server sent binary data (\"B\" message) without prior row description (\"T\" message)" -msgstr "el servidor envió datos binarios (mensaje «B») sin precederlos con una description de fila (mensaje «T»)" +msgid "" +"server sent binary data (\"B\" message) without prior row description (\"T\"" +" message)" +msgstr "" +"el servidor envió datos binarios (mensaje «B») sin precederlos con una " +"description de fila (mensaje «T»)" #: fe-protocol2.c:633 fe-protocol3.c:412 #, c-format msgid "unexpected response from server; first received character was \"%c\"\n" -msgstr "se ha recibido una respuesta inesperada del servidor; el primer carácter recibido fue «%c»\n" +msgstr "" +"se ha recibido una respuesta inesperada del servidor; el primer carácter " +"recibido fue «%c»\n" #: fe-protocol2.c:762 fe-protocol2.c:937 fe-protocol3.c:627 fe-protocol3.c:854 msgid "out of memory for query result" msgstr "no hay suficiente memoria para el resultado de la consulta" -#: fe-protocol2.c:1395 fe-protocol3.c:1886 -#, c-format -msgid "%s" -msgstr "%s" - #: fe-protocol2.c:1407 #, c-format msgid "lost synchronization with server, resetting connection" @@ -783,17 +905,23 @@ msgstr "error de protocolo: id=0x%x\n" #: fe-protocol3.c:368 msgid "server sent data (\"D\" message) without prior row description (\"T\" message)\n" -msgstr "el servidor envió datos (mensaje «D») sin precederlos con una descripción de fila (mensaje «T»)\n" +msgstr "" +"el servidor envió datos (mensaje «D») sin precederlos con una descripción de" +" fila (mensaje «T»)\n" #: fe-protocol3.c:433 #, c-format msgid "message contents do not agree with length in message type \"%c\"\n" -msgstr "el contenido del mensaje no concuerda con el largo, en el mensaje tipo «%c»\n" +msgstr "" +"el contenido del mensaje no concuerda con el largo, en el mensaje tipo " +"«%c»\n" #: fe-protocol3.c:454 #, c-format msgid "lost synchronization with server: got message type \"%c\", length %d\n" -msgstr "se perdió la sincronía con el servidor: se recibió un mensaje de tipo «%c», largo %d\n" +msgstr "" +"se perdió la sincronía con el servidor: se recibió un mensaje de tipo «%c», " +"largo %d\n" #: fe-protocol3.c:505 fe-protocol3.c:545 msgid "insufficient data in \"T\" message" @@ -930,16 +1058,21 @@ msgstr "el elemento de nombre en el certificado SSL contiene un carácter null\n #: fe-secure-openssl.c:580 msgid "host name must be specified for a verified SSL connection\n" -msgstr "el nombre de servidor debe ser especificado para una conexión SSL verificada\n" +msgstr "" +"el nombre de servidor debe ser especificado para una conexión SSL " +"verificada\n" #: fe-secure-openssl.c:680 #, c-format msgid "server certificate for \"%s\" does not match host name \"%s\"\n" -msgstr "el certificado de servidor para «%s» no coincide con el nombre de servidor «%s»\n" +msgstr "" +"el certificado de servidor para «%s» no coincide con el nombre de servidor " +"«%s»\n" #: fe-secure-openssl.c:686 msgid "could not get server's host name from server certificate\n" -msgstr "no se pudo obtener el nombre de servidor desde el certificado del servidor\n" +msgstr "" +"no se pudo obtener el nombre de servidor desde el certificado del servidor\n" #: fe-secure-openssl.c:928 #, c-format @@ -1001,7 +1134,9 @@ msgstr "no se pudo inicializar el motor SSL «%s»: %s\n" #: fe-secure-openssl.c:1182 #, c-format msgid "could not read private SSL key \"%s\" from engine \"%s\": %s\n" -msgstr "no se pudo leer el archivo de la llave privada SSL «%s» desde el motor «%s»: %s\n" +msgstr "" +"no se pudo leer el archivo de la llave privada SSL «%s» desde el motor «%s»:" +" %s\n" #: fe-secure-openssl.c:1196 #, c-format @@ -1015,8 +1150,12 @@ msgstr "el certificado está presente, pero no la llave privada «%s»\n" #: fe-secure-openssl.c:1241 #, c-format -msgid "private key file \"%s\" has group or world access; permissions should be u=rw (0600) or less\n" -msgstr "el archivo de la llave privada «%s» tiene permiso de lectura para el grupo u otros; los permisos deberían ser u=rw (0600) o menos\n" +msgid "" +"private key file \"%s\" has group or world access; permissions should be " +"u=rw (0600) or less\n" +msgstr "" +"el archivo de la llave privada «%s» tiene permiso de lectura para el grupo u" +" otros; los permisos deberían ser u=rw (0600) o menos\n" #: fe-secure-openssl.c:1252 #, c-format diff --git a/src/interfaces/libpq/po/fr.po b/src/interfaces/libpq/po/fr.po index 755f0e982a..eed8fa53cc 100644 --- a/src/interfaces/libpq/po/fr.po +++ b/src/interfaces/libpq/po/fr.po @@ -9,15 +9,15 @@ msgid "" msgstr "" "Project-Id-Version: PostgreSQL 9.6\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-08-04 02:38+0000\n" -"PO-Revision-Date: 2017-08-04 19:53+0200\n" +"POT-Creation-Date: 2018-02-05 14:08+0000\n" +"PO-Revision-Date: 2018-02-10 17:49+0100\n" "Last-Translator: Guillaume Lelarge \n" "Language-Team: PostgreSQLfr \n" "Language: fr\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Generator: Poedit 2.0.2\n" +"X-Generator: Poedit 2.0.3\n" #: fe-auth-scram.c:176 msgid "malformed SCRAM message (empty message)\n" @@ -28,7 +28,7 @@ msgid "malformed SCRAM message (length mismatch)\n" msgstr "message SCRAM malformé (pas de correspondance sur la longueur)\n" #: fe-auth-scram.c:231 -msgid "invalid server signature\n" +msgid "incorrect server signature\n" msgstr "signature invalide du serveur\n" #: fe-auth-scram.c:240 @@ -37,28 +37,28 @@ msgstr "état d'échange SCRAM invalide\n" #: fe-auth-scram.c:263 #, c-format -msgid "malformed SCRAM message (%c expected)\n" -msgstr "message SCRAM malformé (%c attendu)\n" +msgid "malformed SCRAM message (attribute \"%c\" expected)\n" +msgstr "message SCRAM malformé (attribut « %c » attendu)\n" #: fe-auth-scram.c:272 #, c-format -msgid "malformed SCRAM message (expected = in attr '%c')\n" -msgstr "message SCRAM malformé (= attendu dans l'attribut '%c')\n" +msgid "malformed SCRAM message (expected character \"=\" for attribute \"%c\")\n" +msgstr "message SCRAM malformé (caractère « = » attendu pour l'attribut « %c »)\n" #: fe-auth-scram.c:311 -msgid "failed to generate nonce\n" -msgstr "échec pour la génération de nonce\n" +msgid "could not generate nonce\n" +msgstr "n'a pas pu générer le nonce\n" #: fe-auth-scram.c:319 fe-auth-scram.c:336 fe-auth-scram.c:346 #: fe-auth-scram.c:400 fe-auth-scram.c:420 fe-auth-scram.c:445 #: fe-auth-scram.c:459 fe-auth-scram.c:501 fe-auth.c:227 fe-auth.c:362 #: fe-auth.c:432 fe-auth.c:467 fe-auth.c:609 fe-auth.c:768 fe-auth.c:1080 -#: fe-auth.c:1228 fe-connect.c:775 fe-connect.c:1203 fe-connect.c:1379 -#: fe-connect.c:1947 fe-connect.c:2476 fe-connect.c:4062 fe-connect.c:4314 -#: fe-connect.c:4433 fe-connect.c:4673 fe-connect.c:4753 fe-connect.c:4852 -#: fe-connect.c:5108 fe-connect.c:5137 fe-connect.c:5209 fe-connect.c:5233 -#: fe-connect.c:5251 fe-connect.c:5352 fe-connect.c:5361 fe-connect.c:5717 -#: fe-connect.c:5867 fe-exec.c:2651 fe-exec.c:3398 fe-exec.c:3563 fe-lobj.c:896 +#: fe-auth.c:1228 fe-connect.c:775 fe-connect.c:1202 fe-connect.c:1378 +#: fe-connect.c:1946 fe-connect.c:2475 fe-connect.c:4061 fe-connect.c:4313 +#: fe-connect.c:4432 fe-connect.c:4682 fe-connect.c:4762 fe-connect.c:4861 +#: fe-connect.c:5117 fe-connect.c:5146 fe-connect.c:5218 fe-connect.c:5242 +#: fe-connect.c:5260 fe-connect.c:5361 fe-connect.c:5370 fe-connect.c:5726 +#: fe-connect.c:5876 fe-exec.c:2702 fe-exec.c:3449 fe-exec.c:3614 fe-lobj.c:896 #: fe-protocol2.c:1206 fe-protocol3.c:992 fe-protocol3.c:1678 #: fe-secure-openssl.c:514 fe-secure-openssl.c:1138 msgid "out of memory\n" @@ -78,8 +78,8 @@ msgstr "message SCRAM malformé (problème à la fin du server-first-message)\n" #: fe-auth-scram.c:511 #, c-format -msgid "error received from server in SASL exchange: %s\n" -msgstr "réception d'une erreur du serveur dans l'échange SASL : %s\n" +msgid "error received from server in SCRAM exchange: %s\n" +msgstr "réception d'une erreur du serveur dans l'échange SCRAM : %s\n" #: fe-auth-scram.c:526 msgid "malformed SCRAM message (garbage at end of server-final-message)\n" @@ -180,12 +180,12 @@ msgstr "méthode d'authentification %u non supportée\n" msgid "user name lookup failure: error code %lu\n" msgstr "échec de la recherche du nom d'utilisateur : code erreur %lu\n" -#: fe-auth.c:1065 fe-connect.c:2403 +#: fe-auth.c:1065 fe-connect.c:2402 #, c-format msgid "could not look up local user ID %d: %s\n" msgstr "n'a pas pu rechercher l'identifiant de l'utilisateur local %d : %s\n" -#: fe-auth.c:1070 fe-connect.c:2408 +#: fe-auth.c:1070 fe-connect.c:2407 #, c-format msgid "local user with ID %d does not exist\n" msgstr "l'utilisateur local dont l'identifiant est %d n'existe pas\n" @@ -205,41 +205,35 @@ msgstr "algorithme de chiffrement du mot de passe « %s » non reconnu\n" #: fe-connect.c:968 #, c-format -msgid "could not match %d host names to %d hostaddrs\n" -msgstr "n'a pas pu faire correspondre les %d noms d'hôte aux %d adresses\n" +msgid "could not match %d host names to %d hostaddr values\n" +msgstr "n'a pas pu faire correspondre les %d noms d'hôte aux %d valeurs hostaddr\n" #: fe-connect.c:1025 #, c-format msgid "could not match %d port numbers to %d hosts\n" msgstr "n'a pas pu faire correspondre les %d numéros de port aux %d hôtes\n" -#: fe-connect.c:1077 -msgid "could not get home directory to locate password file\n" -msgstr "" -"n'a pas pu obtenir le répertoire personnel pour trouver le fichier de\n" -"mot de passe\n" - -#: fe-connect.c:1129 +#: fe-connect.c:1128 #, c-format msgid "invalid sslmode value: \"%s\"\n" msgstr "valeur sslmode invalide : « %s »\n" -#: fe-connect.c:1150 +#: fe-connect.c:1149 #, c-format msgid "sslmode value \"%s\" invalid when SSL support is not compiled in\n" msgstr "valeur sslmode « %s » invalide si le support SSL n'est pas compilé initialement\n" -#: fe-connect.c:1185 +#: fe-connect.c:1184 #, c-format msgid "invalid target_session_attrs value: \"%s\"\n" msgstr "valeur target_session_attrs invalide : « %s »\n" -#: fe-connect.c:1403 +#: fe-connect.c:1402 #, c-format msgid "could not set socket to TCP no delay mode: %s\n" msgstr "n'a pas pu activer le mode TCP sans délai pour la socket : %s\n" -#: fe-connect.c:1433 +#: fe-connect.c:1432 #, c-format msgid "" "could not connect to server: %s\n" @@ -250,7 +244,7 @@ msgstr "" "\tLe serveur est-il actif localement et accepte-t-il les connexions sur la\n" " \tsocket Unix « %s » ?\n" -#: fe-connect.c:1491 +#: fe-connect.c:1490 #, c-format msgid "" "could not connect to server: %s\n" @@ -261,7 +255,7 @@ msgstr "" "\tLe serveur est-il actif sur l'hôte « %s » (%s)\n" "\tet accepte-t-il les connexionsTCP/IP sur le port %s ?\n" -#: fe-connect.c:1500 +#: fe-connect.c:1499 #, c-format msgid "" "could not connect to server: %s\n" @@ -272,297 +266,291 @@ msgstr "" "\tLe serveur est-il actif sur l'hôte « %s » et accepte-t-il les connexions\n" "\tTCP/IP sur le port %s ?\n" -#: fe-connect.c:1551 fe-connect.c:1583 fe-connect.c:1616 fe-connect.c:2175 +#: fe-connect.c:1550 fe-connect.c:1582 fe-connect.c:1615 fe-connect.c:2174 #, c-format msgid "setsockopt(%s) failed: %s\n" msgstr "setsockopt(%s) a échoué : %s\n" -#: fe-connect.c:1665 +#: fe-connect.c:1664 #, c-format msgid "WSAIoctl(SIO_KEEPALIVE_VALS) failed: %ui\n" msgstr "WSAIoctl(SIO_KEEPALIVE_VALS) a échoué : %ui\n" -#: fe-connect.c:1722 +#: fe-connect.c:1721 #, c-format msgid "invalid port number: \"%s\"\n" msgstr "numéro de port invalide : « %s »\n" -#: fe-connect.c:1738 +#: fe-connect.c:1737 #, c-format msgid "could not translate host name \"%s\" to address: %s\n" msgstr "n'a pas pu traduire le nom d'hôte « %s » en adresse : %s\n" -#: fe-connect.c:1747 +#: fe-connect.c:1746 #, c-format msgid "could not parse network address \"%s\": %s\n" msgstr "n'a pas pu analyser l'adresse réseau « %s » : %s\n" -#: fe-connect.c:1758 +#: fe-connect.c:1757 #, c-format msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n" msgstr "Le chemin du socket de domaine Unix, « %s », est trop (maximum %d octets)\n" -#: fe-connect.c:1772 +#: fe-connect.c:1771 #, c-format msgid "could not translate Unix-domain socket path \"%s\" to address: %s\n" msgstr "" "n'a pas pu traduire le chemin de la socket du domaine Unix « %s » en adresse :\n" "%s\n" -#: fe-connect.c:2053 +#: fe-connect.c:2052 msgid "invalid connection state, probably indicative of memory corruption\n" msgstr "état de connexion invalide, indique probablement une corruption de mémoire\n" -#: fe-connect.c:2110 +#: fe-connect.c:2109 #, c-format msgid "could not create socket: %s\n" msgstr "n'a pas pu créer la socket : %s\n" -#: fe-connect.c:2132 +#: fe-connect.c:2131 #, c-format msgid "could not set socket to nonblocking mode: %s\n" msgstr "n'a pas pu activer le mode non-bloquant pour la socket : %s\n" -#: fe-connect.c:2143 +#: fe-connect.c:2142 #, c-format msgid "could not set socket to close-on-exec mode: %s\n" msgstr "n'a pas pu paramétrer la socket en mode close-on-exec : %s\n" -#: fe-connect.c:2162 +#: fe-connect.c:2161 msgid "keepalives parameter must be an integer\n" msgstr "le paramètre keepalives doit être un entier\n" -#: fe-connect.c:2313 +#: fe-connect.c:2312 #, c-format msgid "could not get socket error status: %s\n" msgstr "n'a pas pu déterminer le statut d'erreur de la socket : %s\n" -#: fe-connect.c:2348 +#: fe-connect.c:2347 #, c-format msgid "could not get client address from socket: %s\n" msgstr "n'a pas pu obtenir l'adresse du client depuis la socket : %s\n" -#: fe-connect.c:2390 +#: fe-connect.c:2389 msgid "requirepeer parameter is not supported on this platform\n" msgstr "le paramètre requirepeer n'est pas supporté sur cette plateforme\n" -#: fe-connect.c:2393 +#: fe-connect.c:2392 #, c-format msgid "could not get peer credentials: %s\n" msgstr "n'a pas pu obtenir l'authentification de l'autre : %s\n" -#: fe-connect.c:2416 +#: fe-connect.c:2415 #, c-format msgid "requirepeer specifies \"%s\", but actual peer user name is \"%s\"\n" msgstr "requirepeer indique « %s » mais le nom de l'utilisateur réel est « %s »\n" -#: fe-connect.c:2450 +#: fe-connect.c:2449 #, c-format msgid "could not send SSL negotiation packet: %s\n" msgstr "n'a pas pu transmettre le paquet de négociation SSL : %s\n" -#: fe-connect.c:2489 +#: fe-connect.c:2488 #, c-format msgid "could not send startup packet: %s\n" msgstr "n'a pas pu transmettre le paquet de démarrage : %s\n" -#: fe-connect.c:2559 +#: fe-connect.c:2558 msgid "server does not support SSL, but SSL was required\n" msgstr "le serveur ne supporte pas SSL alors que SSL était réclamé\n" -#: fe-connect.c:2585 +#: fe-connect.c:2584 #, c-format msgid "received invalid response to SSL negotiation: %c\n" msgstr "a reçu une réponse invalide à la négociation SSL : %c\n" -#: fe-connect.c:2661 fe-connect.c:2694 +#: fe-connect.c:2660 fe-connect.c:2693 #, c-format msgid "expected authentication request from server, but received %c\n" msgstr "" "attendait une requête d'authentification en provenance du serveur, mais a\n" " reçu %c\n" -#: fe-connect.c:2923 +#: fe-connect.c:2922 msgid "unexpected message from server during startup\n" msgstr "message inattendu du serveur lors du démarrage\n" -#: fe-connect.c:3141 +#: fe-connect.c:3140 #, c-format msgid "could not make a writable connection to server \"%s:%s\"\n" msgstr "n'a pas pu réaliser une connexion en écriture au serveur « %s » : %s\n" -#: fe-connect.c:3190 +#: fe-connect.c:3189 #, c-format msgid "test \"SHOW transaction_read_only\" failed on server \"%s:%s\"\n" msgstr "le test \"SHOW transaction_read_only\" a échoué sur le serveur \"%s:%s\"\n" -#: fe-connect.c:3211 +#: fe-connect.c:3210 #, c-format msgid "invalid connection state %d, probably indicative of memory corruption\n" msgstr "" "état de connexion invalide (%d), indiquant probablement une corruption de\n" " mémoire\n" -#: fe-connect.c:3668 fe-connect.c:3728 +#: fe-connect.c:3667 fe-connect.c:3727 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_CONNRESET event\n" msgstr "échec de PGEventProc « %s » lors de l'événement PGEVT_CONNRESET\n" -#: fe-connect.c:4075 +#: fe-connect.c:4074 #, c-format msgid "invalid LDAP URL \"%s\": scheme must be ldap://\n" msgstr "URL LDAP « %s » invalide : le schéma doit être ldap://\n" -#: fe-connect.c:4090 +#: fe-connect.c:4089 #, c-format msgid "invalid LDAP URL \"%s\": missing distinguished name\n" msgstr "URL LDAP « %s » invalide : le « distinguished name » manque\n" -#: fe-connect.c:4101 fe-connect.c:4154 +#: fe-connect.c:4100 fe-connect.c:4153 #, c-format msgid "invalid LDAP URL \"%s\": must have exactly one attribute\n" msgstr "URL LDAP « %s » invalide : doit avoir exactement un attribut\n" -#: fe-connect.c:4111 fe-connect.c:4168 +#: fe-connect.c:4110 fe-connect.c:4167 #, c-format msgid "invalid LDAP URL \"%s\": must have search scope (base/one/sub)\n" msgstr "URL LDAP « %s » invalide : doit avoir une échelle de recherche (base/un/sous)\n" -#: fe-connect.c:4122 +#: fe-connect.c:4121 #, c-format msgid "invalid LDAP URL \"%s\": no filter\n" msgstr "URL LDAP « %s » invalide : aucun filtre\n" -#: fe-connect.c:4143 +#: fe-connect.c:4142 #, c-format msgid "invalid LDAP URL \"%s\": invalid port number\n" msgstr "URL LDAP « %s » invalide : numéro de port invalide\n" -#: fe-connect.c:4177 +#: fe-connect.c:4176 msgid "could not create LDAP structure\n" msgstr "n'a pas pu créer la structure LDAP\n" -#: fe-connect.c:4253 +#: fe-connect.c:4252 #, c-format msgid "lookup on LDAP server failed: %s\n" msgstr "échec de la recherche sur le serveur LDAP : %s\n" -#: fe-connect.c:4264 +#: fe-connect.c:4263 msgid "more than one entry found on LDAP lookup\n" msgstr "plusieurs entrées trouvées pendant la recherche LDAP\n" -#: fe-connect.c:4265 fe-connect.c:4277 +#: fe-connect.c:4264 fe-connect.c:4276 msgid "no entry found on LDAP lookup\n" msgstr "aucune entrée trouvée pendant la recherche LDAP\n" -#: fe-connect.c:4288 fe-connect.c:4301 +#: fe-connect.c:4287 fe-connect.c:4300 msgid "attribute has no values on LDAP lookup\n" msgstr "l'attribut n'a pas de valeur après la recherche LDAP\n" -#: fe-connect.c:4353 fe-connect.c:4372 fe-connect.c:4891 +#: fe-connect.c:4352 fe-connect.c:4371 fe-connect.c:4900 #, c-format msgid "missing \"=\" after \"%s\" in connection info string\n" msgstr "« = » manquant après « %s » dans la chaîne des paramètres de connexion\n" -#: fe-connect.c:4445 fe-connect.c:5076 fe-connect.c:5850 +#: fe-connect.c:4444 fe-connect.c:5085 fe-connect.c:5859 #, c-format msgid "invalid connection option \"%s\"\n" msgstr "option de connexion « %s » invalide\n" -#: fe-connect.c:4461 fe-connect.c:4940 +#: fe-connect.c:4460 fe-connect.c:4949 msgid "unterminated quoted string in connection info string\n" msgstr "guillemets non refermés dans la chaîne des paramètres de connexion\n" -#: fe-connect.c:4501 -msgid "could not get home directory to locate service definition file" -msgstr "" -"n'a pas pu obtenir le répertoire personnel pour trouver le certificat de\n" -"définition du service" - -#: fe-connect.c:4534 +#: fe-connect.c:4543 #, c-format msgid "definition of service \"%s\" not found\n" msgstr "définition du service « %s » introuvable\n" -#: fe-connect.c:4557 +#: fe-connect.c:4566 #, c-format msgid "service file \"%s\" not found\n" msgstr "fichier de service « %s » introuvable\n" -#: fe-connect.c:4570 +#: fe-connect.c:4579 #, c-format msgid "line %d too long in service file \"%s\"\n" msgstr "ligne %d trop longue dans le fichier service « %s »\n" -#: fe-connect.c:4641 fe-connect.c:4685 +#: fe-connect.c:4650 fe-connect.c:4694 #, c-format msgid "syntax error in service file \"%s\", line %d\n" msgstr "erreur de syntaxe dans le fichier service « %s », ligne %d\n" -#: fe-connect.c:4652 +#: fe-connect.c:4661 #, c-format msgid "nested service specifications not supported in service file \"%s\", line %d\n" msgstr "spécifications imbriquées de service non supportées dans le fichier service « %s », ligne %d\n" -#: fe-connect.c:5372 +#: fe-connect.c:5381 #, c-format msgid "invalid URI propagated to internal parser routine: \"%s\"\n" msgstr "URI invalide propagée à la routine d'analyse interne : « %s »\n" -#: fe-connect.c:5449 +#: fe-connect.c:5458 #, c-format msgid "end of string reached when looking for matching \"]\" in IPv6 host address in URI: \"%s\"\n" msgstr "" "fin de chaîne atteinte lors de la recherche du « ] » correspondant dans\n" "l'adresse IPv6 de l'hôte indiquée dans l'URI : « %s »\n" -#: fe-connect.c:5456 +#: fe-connect.c:5465 #, c-format msgid "IPv6 host address may not be empty in URI: \"%s\"\n" msgstr "l'adresse IPv6 de l'hôte ne peut pas être vide dans l'URI : « %s »\n" -#: fe-connect.c:5471 +#: fe-connect.c:5480 #, c-format msgid "unexpected character \"%c\" at position %d in URI (expected \":\" or \"/\"): \"%s\"\n" msgstr "" "caractère « %c » inattendu à la position %d de l'URI (caractère « : » ou\n" "« / » attendu) : « %s »\n" -#: fe-connect.c:5600 +#: fe-connect.c:5609 #, c-format msgid "extra key/value separator \"=\" in URI query parameter: \"%s\"\n" msgstr "séparateur « = » de clé/valeur en trop dans le paramètre de requête URI : « %s »\n" -#: fe-connect.c:5620 +#: fe-connect.c:5629 #, c-format msgid "missing key/value separator \"=\" in URI query parameter: \"%s\"\n" msgstr "séparateur « = » de clé/valeur manquant dans le paramètre de requête URI : « %s »\n" -#: fe-connect.c:5671 +#: fe-connect.c:5680 #, c-format msgid "invalid URI query parameter: \"%s\"\n" msgstr "paramètre de la requête URI invalide : « %s »\n" -#: fe-connect.c:5745 +#: fe-connect.c:5754 #, c-format msgid "invalid percent-encoded token: \"%s\"\n" msgstr "jeton encodé en pourcentage invalide : « %s »\n" -#: fe-connect.c:5755 +#: fe-connect.c:5764 #, c-format msgid "forbidden value %%00 in percent-encoded value: \"%s\"\n" msgstr "valeur %%00 interdite dans la valeur codée en pourcentage : « %s »\n" -#: fe-connect.c:6100 +#: fe-connect.c:6109 msgid "connection pointer is NULL\n" msgstr "le pointeur de connexion est NULL\n" -#: fe-connect.c:6398 +#: fe-connect.c:6407 #, c-format msgid "WARNING: password file \"%s\" is not a plain file\n" msgstr "ATTENTION : le fichier de mots de passe « %s » n'est pas un fichier texte\n" -#: fe-connect.c:6407 +#: fe-connect.c:6416 #, c-format msgid "WARNING: password file \"%s\" has group or world access; permissions should be u=rw (0600) or less\n" msgstr "" @@ -570,107 +558,126 @@ msgstr "" "lecture pour le groupe ou universel ; les droits devraient être u=rw (0600)\n" "ou inférieur\n" -#: fe-connect.c:6499 +#: fe-connect.c:6508 #, c-format msgid "password retrieved from file \"%s\"\n" msgstr "mot de passe récupéré dans le fichier fichier « %s »\n" -#: fe-exec.c:826 +#: fe-exec.c:437 fe-exec.c:2776 +#, c-format +msgid "row number %d is out of range 0..%d" +msgstr "le numéro de ligne %d est en dehors des limites 0..%d" + +#: fe-exec.c:498 fe-protocol2.c:503 fe-protocol2.c:538 fe-protocol2.c:1049 +#: fe-protocol3.c:209 fe-protocol3.c:236 fe-protocol3.c:253 fe-protocol3.c:333 +#: fe-protocol3.c:728 fe-protocol3.c:951 +msgid "out of memory" +msgstr "mémoire épuisée" + +#: fe-exec.c:499 fe-protocol2.c:1395 fe-protocol3.c:1886 +#, c-format +msgid "%s" +msgstr "%s" + +#: fe-exec.c:847 msgid "NOTICE" msgstr "NOTICE" -#: fe-exec.c:1141 fe-exec.c:1199 fe-exec.c:1245 +#: fe-exec.c:905 +msgid "PGresult cannot support more than INT_MAX tuples" +msgstr "PGresult ne supporte pas plus de INT_MAX lignes" + +#: fe-exec.c:917 +msgid "size_t overflow" +msgstr "saturation de size_t" + +#: fe-exec.c:1192 fe-exec.c:1250 fe-exec.c:1296 msgid "command string is a null pointer\n" msgstr "la chaîne de commande est un pointeur nul\n" -#: fe-exec.c:1205 fe-exec.c:1251 fe-exec.c:1346 +#: fe-exec.c:1256 fe-exec.c:1302 fe-exec.c:1397 msgid "number of parameters must be between 0 and 65535\n" msgstr "le nombre de paramètres doit être compris entre 0 et 65535\n" -#: fe-exec.c:1239 fe-exec.c:1340 +#: fe-exec.c:1290 fe-exec.c:1391 msgid "statement name is a null pointer\n" msgstr "le nom de l'instruction est un pointeur nul\n" -#: fe-exec.c:1259 fe-exec.c:1422 fe-exec.c:2140 fe-exec.c:2339 +#: fe-exec.c:1310 fe-exec.c:1473 fe-exec.c:2191 fe-exec.c:2390 msgid "function requires at least protocol version 3.0\n" msgstr "la fonction nécessite au minimum le protocole 3.0\n" -#: fe-exec.c:1377 +#: fe-exec.c:1428 msgid "no connection to the server\n" msgstr "aucune connexion au serveur\n" -#: fe-exec.c:1384 +#: fe-exec.c:1435 msgid "another command is already in progress\n" msgstr "une autre commande est déjà en cours\n" -#: fe-exec.c:1498 +#: fe-exec.c:1549 msgid "length must be given for binary parameter\n" msgstr "la longueur doit être indiquée pour les paramètres binaires\n" -#: fe-exec.c:1770 +#: fe-exec.c:1821 #, c-format msgid "unexpected asyncStatus: %d\n" msgstr "asyncStatus inattendu : %d\n" -#: fe-exec.c:1790 +#: fe-exec.c:1841 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_RESULTCREATE event\n" msgstr "échec de PGEventProc « %s » lors de l'événement PGEVT_RESULTCREATE\n" -#: fe-exec.c:1950 +#: fe-exec.c:2001 msgid "COPY terminated by new PQexec" msgstr "COPY terminé par un nouveau PQexec" -#: fe-exec.c:1958 +#: fe-exec.c:2009 msgid "COPY IN state must be terminated first\n" msgstr "l'état COPY IN doit d'abord être terminé\n" -#: fe-exec.c:1978 +#: fe-exec.c:2029 msgid "COPY OUT state must be terminated first\n" msgstr "l'état COPY OUT doit d'abord être terminé\n" -#: fe-exec.c:1986 +#: fe-exec.c:2037 msgid "PQexec not allowed during COPY BOTH\n" msgstr "PQexec non autorisé pendant COPY BOTH\n" -#: fe-exec.c:2229 fe-exec.c:2296 fe-exec.c:2386 fe-protocol2.c:1352 +#: fe-exec.c:2280 fe-exec.c:2347 fe-exec.c:2437 fe-protocol2.c:1352 #: fe-protocol3.c:1817 msgid "no COPY in progress\n" msgstr "aucun COPY en cours\n" -#: fe-exec.c:2576 +#: fe-exec.c:2627 msgid "connection in wrong state\n" msgstr "connexion dans un état erroné\n" -#: fe-exec.c:2607 +#: fe-exec.c:2658 msgid "invalid ExecStatusType code" msgstr "code ExecStatusType invalide" -#: fe-exec.c:2634 +#: fe-exec.c:2685 msgid "PGresult is not an error result\n" msgstr "PGresult n'est pas un résultat d'erreur\n" -#: fe-exec.c:2709 fe-exec.c:2732 +#: fe-exec.c:2760 fe-exec.c:2783 #, c-format msgid "column number %d is out of range 0..%d" msgstr "le numéro de colonne %d est en dehors des limites 0..%d" -#: fe-exec.c:2725 -#, c-format -msgid "row number %d is out of range 0..%d" -msgstr "le numéro de ligne %d est en dehors des limites 0..%d" - -#: fe-exec.c:2747 +#: fe-exec.c:2798 #, c-format msgid "parameter number %d is out of range 0..%d" msgstr "le numéro de paramètre %d est en dehors des limites 0..%d" -#: fe-exec.c:3057 +#: fe-exec.c:3108 #, c-format msgid "could not interpret result from server: %s" msgstr "n'a pas pu interpréter la réponse du serveur : %s" -#: fe-exec.c:3296 fe-exec.c:3380 +#: fe-exec.c:3347 fe-exec.c:3431 msgid "incomplete multibyte character\n" msgstr "caractère multi-octet incomplet\n" @@ -812,12 +819,6 @@ msgstr "état %c invalide, indiquant probablement une corruption de la mémoire\ msgid "message type 0x%02x arrived from server while idle" msgstr "le message de type 0x%02x est arrivé alors que le serveur était en attente" -#: fe-protocol2.c:503 fe-protocol2.c:538 fe-protocol2.c:1049 fe-protocol3.c:209 -#: fe-protocol3.c:236 fe-protocol3.c:253 fe-protocol3.c:333 fe-protocol3.c:728 -#: fe-protocol3.c:951 -msgid "out of memory" -msgstr "mémoire épuisée" - #: fe-protocol2.c:529 #, c-format msgid "unexpected character %c following empty query response (\"I\" message)" @@ -848,11 +849,6 @@ msgstr "réponse inattendue du serveur, le premier caractère reçu étant « %c msgid "out of memory for query result" msgstr "mémoire épuisée pour le résultat de la requête" -#: fe-protocol2.c:1395 fe-protocol3.c:1886 -#, c-format -msgid "%s" -msgstr "%s" - #: fe-protocol2.c:1407 #, c-format msgid "lost synchronization with server, resetting connection" @@ -1151,6 +1147,9 @@ msgstr "n'a pas pu transmettre les données au serveur : %s\n" msgid "unrecognized socket error: 0x%08X/%d" msgstr "erreur de socket non reconnue : 0x%08X/%d" +#~ msgid "failed to generate nonce\n" +#~ msgstr "échec pour la génération de nonce\n" + #~ msgid "socket not open\n" #~ msgstr "socket non ouvert\n" @@ -1205,3 +1204,13 @@ msgstr "erreur de socket non reconnue : 0x%08X/%d" #~ msgid "setsockopt(TCP_KEEPIDLE) failed: %s\n" #~ msgstr "setsockopt(TCP_KEEPIDLE) a échoué : %s\n" + +#~ msgid "could not get home directory to locate service definition file" +#~ msgstr "" +#~ "n'a pas pu obtenir le répertoire personnel pour trouver le certificat de\n" +#~ "définition du service" + +#~ msgid "could not get home directory to locate password file\n" +#~ msgstr "" +#~ "n'a pas pu obtenir le répertoire personnel pour trouver le fichier de\n" +#~ "mot de passe\n" diff --git a/src/interfaces/libpq/po/it.po b/src/interfaces/libpq/po/it.po index 87c3562893..21f6c8ed07 100644 --- a/src/interfaces/libpq/po/it.po +++ b/src/interfaces/libpq/po/it.po @@ -1,56 +1,105 @@ # -# Translation of libpq to Italian -# PostgreSQL Project +# libpq.po +# Italian message translation file for libpq # -# Associazione Culturale ITPUG - Italian PostgreSQL Users Group -# http://www.itpug.org/ - info@itpug.org +# For development and bug report please use: +# https://github.com/dvarrazzo/postgresql-it # -# Traduttori: -# * Daniele Varrazzo -# * Maurizio Totti +# Copyright (C) 2012-2017 PostgreSQL Global Development Group +# Copyright (C) 2010, Associazione Culturale ITPUG # -# Revisori: -# * Emanuele Zamprogno -# -# Copyright (c) 2010, Associazione Culturale ITPUG -# Distributed under the same license of the PostgreSQL project -# -# Translation of libpq to Italian -# PostgreSQL Project -# -# Associazione Culturale ITPUG - Italian PostgreSQL Users Group -# http://www.itpug.org/ - info@itpug.org -# -# Traduttori: -# * Maurizio Totti -# -# Revisori: -# * Emanuele Zamprogno -# -# Copyright (c) 2009, Associazione Culturale ITPUG -# Distributed under the same license of the PostgreSQL project -# -# Italian translation file for libpq. +# Daniele Varrazzo , 2012-2017 +# Maurizio Totti , 2010 # Fabrizio Mazzoni , 2003. +# Gaetano Mendola , 2003. # -# Versione 1.00 del 12 Ottobre 2003 -# Revisione 1.01 del 14 Ottobre 2003 a cura di Gaetano Mendola +# This file is distributed under the same license as the PostgreSQL package. # msgid "" msgstr "" "Project-Id-Version: libpq (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-05-22 07:38+0000\n" -"PO-Revision-Date: 2017-05-23 01:18+0100\n" +"POT-Creation-Date: 2017-11-07 19:08+0000\n" +"PO-Revision-Date: 2017-08-31 01:08+0100\n" "Last-Translator: Daniele Varrazzo \n" -"Language-Team: Gruppo traduzioni ITPUG \n" +"Language-Team: https://github.com/dvarrazzo/postgresql-it\n" "Language: it\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=n != 1;\n" "X-Poedit-SourceCharset: UTF-8\n" -"X-Generator: Poedit 1.8.7.1\n" +"X-Generator: Poedit 1.5.4\n" + +#: fe-auth-scram.c:176 +msgid "malformed SCRAM message (empty message)\n" +msgstr "messaggio SCRAM malformato (messaggio vuoto)\n" + +#: fe-auth-scram.c:182 +msgid "malformed SCRAM message (length mismatch)\n" +msgstr "messaggio SCRAM malformato (lunghezza errata)\n" + +#: fe-auth-scram.c:231 +msgid "incorrect server signature\n" +msgstr "firma del server non corretta\n" + +#: fe-auth-scram.c:240 +msgid "invalid SCRAM exchange state\n" +msgstr "stato di scambio SCRAM non valido\n" + +#: fe-auth-scram.c:263 +#, c-format +msgid "malformed SCRAM message (attribute \"%c\" expected)\n" +msgstr "messaggio SCRAM malformato (atteso attributo \"%c\")\n" + +#: fe-auth-scram.c:272 +#, c-format +msgid "malformed SCRAM message (expected character \"=\" for attribute \"%c\")\n" +msgstr "messaggio SCRAM malformato (atteso carattere \"=\" per l'attributo \"%c\")\n" + +#: fe-auth-scram.c:311 +msgid "could not generate nonce\n" +msgstr "generazione del nonce fallita\n" + +#: fe-auth-scram.c:319 fe-auth-scram.c:336 fe-auth-scram.c:346 +#: fe-auth-scram.c:400 fe-auth-scram.c:420 fe-auth-scram.c:445 +#: fe-auth-scram.c:459 fe-auth-scram.c:501 fe-auth.c:227 fe-auth.c:362 +#: fe-auth.c:432 fe-auth.c:467 fe-auth.c:609 fe-auth.c:768 fe-auth.c:1080 +#: fe-auth.c:1228 fe-connect.c:775 fe-connect.c:1202 fe-connect.c:1378 +#: fe-connect.c:1946 fe-connect.c:2475 fe-connect.c:4061 fe-connect.c:4313 +#: fe-connect.c:4432 fe-connect.c:4682 fe-connect.c:4762 fe-connect.c:4861 +#: fe-connect.c:5117 fe-connect.c:5146 fe-connect.c:5218 fe-connect.c:5242 +#: fe-connect.c:5260 fe-connect.c:5361 fe-connect.c:5370 fe-connect.c:5726 +#: fe-connect.c:5876 fe-exec.c:2702 fe-exec.c:3449 fe-exec.c:3614 +#: fe-lobj.c:896 fe-protocol2.c:1206 fe-protocol3.c:992 fe-protocol3.c:1678 +#: fe-secure-openssl.c:514 fe-secure-openssl.c:1138 +msgid "out of memory\n" +msgstr "memoria esaurita\n" + +#: fe-auth-scram.c:437 +msgid "invalid SCRAM response (nonce mismatch)\n" +msgstr "risposta SCRAM non valida (il nonce non combacia)\n" + +#: fe-auth-scram.c:476 +msgid "malformed SCRAM message (invalid iteration count)\n" +msgstr "messaggio SCRAM malformato (numero di iterazione non valido)\n" + +#: fe-auth-scram.c:482 +msgid "malformed SCRAM message (garbage at end of server-first-message)\n" +msgstr "messaggio SCRAM malformato (dati non riconosciuti dopo il primo messaggio del server)\n" + +#: fe-auth-scram.c:511 +#, c-format +msgid "error received from server in SCRAM exchange: %s\n" +msgstr "errore ricevuto dal server durante lo scambio SCRAM: %s\n" + +#: fe-auth-scram.c:526 +msgid "malformed SCRAM message (garbage at end of server-final-message)\n" +msgstr "messaggio SCRAM malformato (dati non riconosciuti dopo il messaggio finale del server)\n" + +#: fe-auth-scram.c:534 +msgid "malformed SCRAM message (invalid server signature)\n" +msgstr "messaggio SCRAM malformato (firma del server non valida)\n" #: fe-auth.c:122 #, c-format @@ -58,152 +107,145 @@ msgid "out of memory allocating GSSAPI buffer (%d)\n" msgstr "memoria esaurita nell'allocazione del buffer GSSAPI (%d)\n" # DV: non ne sono convinto -#: fe-auth.c:172 +#: fe-auth.c:177 msgid "GSSAPI continuation error" msgstr "GSSAPI errore di continuazione" -#: fe-auth.c:202 fe-auth.c:451 +#: fe-auth.c:207 fe-auth.c:461 msgid "host name must be specified\n" msgstr "il nome dell'host deve essere specificato\n" -#: fe-auth.c:209 +#: fe-auth.c:214 msgid "duplicate GSS authentication request\n" msgstr "richiesta di autenticazione GSS duplicata\n" -#: fe-auth.c:222 fe-auth.c:357 fe-auth.c:422 fe-auth.c:457 fe-auth.c:599 -#: fe-auth.c:758 fe-auth.c:1070 fe-auth.c:1217 fe-connect.c:712 -#: fe-connect.c:1091 fe-connect.c:1267 fe-connect.c:1824 fe-connect.c:2352 -#: fe-connect.c:3953 fe-connect.c:4205 fe-connect.c:4324 fe-connect.c:4564 -#: fe-connect.c:4644 fe-connect.c:4743 fe-connect.c:4999 fe-connect.c:5028 -#: fe-connect.c:5100 fe-connect.c:5124 fe-connect.c:5142 fe-connect.c:5243 -#: fe-connect.c:5252 fe-connect.c:5608 fe-connect.c:5758 fe-exec.c:2651 -#: fe-exec.c:3398 fe-exec.c:3563 fe-lobj.c:896 fe-protocol2.c:1206 -#: fe-protocol3.c:992 fe-protocol3.c:1678 fe-secure-openssl.c:514 -#: fe-secure-openssl.c:1138 -msgid "out of memory\n" -msgstr "memoria esaurita\n" - # non è che mi torni tanto così -#: fe-auth.c:235 +#: fe-auth.c:240 msgid "GSSAPI name import error" msgstr "errore di importazione del nome GSSAPI" -#: fe-auth.c:298 +#: fe-auth.c:303 #, c-format msgid "out of memory allocating SSPI buffer (%d)\n" msgstr "memoria esaurita nell'allocazione del buffer SSPI (%d)\n" -#: fe-auth.c:346 +#: fe-auth.c:351 msgid "SSPI continuation error" msgstr "SSPI errore di continuazione" -#: fe-auth.c:437 +#: fe-auth.c:422 +msgid "duplicate SSPI authentication request\n" +msgstr "richiesta di autenticazione SSPI duplicata\n" + +#: fe-auth.c:447 msgid "could not acquire SSPI credentials" msgstr "non è stato possibile ottenere le credenziali SSPI" -#: fe-auth.c:490 +#: fe-auth.c:500 msgid "duplicate SASL authentication request\n" msgstr "doppia richiesta di autenticazione SASL\n" -#: fe-auth.c:550 +#: fe-auth.c:560 msgid "none of the server's SASL authentication mechanisms are supported\n" msgstr "nessuno dei meccanismi di autenticazione SASL del server è supportato\n" -#: fe-auth.c:623 +#: fe-auth.c:633 #, c-format msgid "out of memory allocating SASL buffer (%d)\n" msgstr "memoria esaurita nell'allocazione del buffer SASL (%d)\n" -#: fe-auth.c:648 +#: fe-auth.c:658 msgid "AuthenticationSASLFinal received from server, but SASL authentication was not completed\n" msgstr "ricevuto AuthenticationSASLFinal dal server, ma l'autenticazione SASL non è stata completata\n" -#: fe-auth.c:725 +#: fe-auth.c:735 msgid "SCM_CRED authentication method not supported\n" msgstr "il metodo di autenticazione SCM_CRED non è supportato\n" -#: fe-auth.c:816 +#: fe-auth.c:826 msgid "Kerberos 4 authentication not supported\n" msgstr "l'autenticazione Kerberos 4 non è supportata\n" -#: fe-auth.c:821 +#: fe-auth.c:831 msgid "Kerberos 5 authentication not supported\n" msgstr "l'autenticazione Kerberos 5 non è supportata\n" -#: fe-auth.c:892 +#: fe-auth.c:902 msgid "GSSAPI authentication not supported\n" msgstr "l'autenticazione GSSAPI non è supportata\n" -#: fe-auth.c:924 +#: fe-auth.c:934 msgid "SSPI authentication not supported\n" msgstr "l'autenticazione SSPI non è supportata\n" -#: fe-auth.c:932 +#: fe-auth.c:942 msgid "Crypt authentication not supported\n" msgstr "l'autenticazione Crypt non è supportata\n" -#: fe-auth.c:998 +#: fe-auth.c:1008 #, c-format msgid "authentication method %u not supported\n" msgstr "l'autenticazione %u non è supportata\n" -#: fe-auth.c:1045 +#: fe-auth.c:1055 #, c-format msgid "user name lookup failure: error code %lu\n" msgstr "ricerca del nome utente fallita: codice di errore %lu\n" -#: fe-auth.c:1055 fe-connect.c:2279 +#: fe-auth.c:1065 fe-connect.c:2402 #, c-format msgid "could not look up local user ID %d: %s\n" msgstr "ricerca dell'ID utente locale %d non riuscita: %s\n" -#: fe-auth.c:1060 fe-connect.c:2284 +#: fe-auth.c:1070 fe-connect.c:2407 #, c-format msgid "local user with ID %d does not exist\n" msgstr "l'utente locale con ID %d non esiste\n" -#: fe-auth.c:1162 +#: fe-auth.c:1172 msgid "unexpected shape of result set returned for SHOW\n" msgstr "il risultato restituito da SHOW ha una forma imprevista\n" -#: fe-auth.c:1171 +#: fe-auth.c:1181 msgid "password_encryption value too long\n" msgstr "valore di password_encryption troppo lungo\n" -#: fe-auth.c:1211 -msgid "unknown password encryption algorithm\n" -msgstr "algoritmo di criptaggio della password sconosciuto\n" +#: fe-auth.c:1221 +#, c-format +msgid "unrecognized password encryption algorithm \"%s\"\n" +msgstr "algoritmo di criptaggio della password \"%s\" sconosciuto\n" + +#: fe-connect.c:968 +#, c-format +msgid "could not match %d host names to %d hostaddrs\n" +msgstr "non è possibile far combaciare %d nomi host con %d indirizzi host\n" -#: fe-connect.c:913 +#: fe-connect.c:1025 #, c-format msgid "could not match %d port numbers to %d hosts\n" msgstr "non è possibile far combaciare %d numeri di porta con %d host\n" -#: fe-connect.c:965 -msgid "could not get home directory to locate password file\n" -msgstr "errore nel raggiungere la directory home per trovare il file delle password\n" - -#: fe-connect.c:1017 +#: fe-connect.c:1128 #, c-format msgid "invalid sslmode value: \"%s\"\n" msgstr "valore sslmode errato: \"%s\"\n" -#: fe-connect.c:1038 +#: fe-connect.c:1149 #, c-format msgid "sslmode value \"%s\" invalid when SSL support is not compiled in\n" msgstr "valore sslmode \"%s\" non valido quando il supporto SSL non è compilato\n" -#: fe-connect.c:1073 +#: fe-connect.c:1184 #, c-format msgid "invalid target_session_attrs value: \"%s\"\n" msgstr "valore per target_session_attrs non valido: \"%s\"\n" -#: fe-connect.c:1291 +#: fe-connect.c:1402 #, c-format msgid "could not set socket to TCP no delay mode: %s\n" msgstr "impostazione del socket in modalità TCP no delay fallita: %s\n" -#: fe-connect.c:1321 +#: fe-connect.c:1432 #, c-format msgid "" "could not connect to server: %s\n" @@ -214,7 +256,7 @@ msgstr "" "\tVerifica che il server locale sia in funzione e che\n" "\taccetti connessioni sul socket di dominio Unix \"%s\"\n" -#: fe-connect.c:1376 +#: fe-connect.c:1490 #, c-format msgid "" "could not connect to server: %s\n" @@ -225,7 +267,7 @@ msgstr "" "\tVerifica che il server all'indirizzo \"%s\" (%s) sia in funzione\n" "\te che accetti connessioni TCP/IP sulla porta %s\n" -#: fe-connect.c:1385 +#: fe-connect.c:1499 #, c-format msgid "" "could not connect to server: %s\n" @@ -236,408 +278,408 @@ msgstr "" "\tVerifica che il server all'indirizzo \"%s\" sia in funzione\n" "\te che accetti connessioni TCP/IP sulla porta %s\n" -#: fe-connect.c:1436 -#, c-format -msgid "setsockopt(TCP_KEEPIDLE) failed: %s\n" -msgstr "chiamata setsockopt(TCP_KEEPIDLE) fallita: %s\n" - -#: fe-connect.c:1449 -#, c-format -msgid "setsockopt(TCP_KEEPALIVE) failed: %s\n" -msgstr "chiamata setsockopt(TCP_KEEPALIVE) fallita: %s\n" - -#: fe-connect.c:1481 -#, c-format -msgid "setsockopt(TCP_KEEPINTVL) failed: %s\n" -msgstr "chiamata setsockopt(TCP_KEEPINTVL) fallita: %s\n" - -#: fe-connect.c:1513 +#: fe-connect.c:1550 fe-connect.c:1582 fe-connect.c:1615 fe-connect.c:2174 #, c-format -msgid "setsockopt(TCP_KEEPCNT) failed: %s\n" -msgstr "chiamata setsockopt(TCP_KEEPCNT) fallita: %s\n" +msgid "setsockopt(%s) failed: %s\n" +msgstr "setsockopt(%s) fallita: %s\n" -#: fe-connect.c:1561 +#: fe-connect.c:1664 #, c-format msgid "WSAIoctl(SIO_KEEPALIVE_VALS) failed: %ui\n" msgstr "chiamata WSAIoctl(SIO_KEEPALIVE_VALS) fallito: %ui\n" -#: fe-connect.c:1619 +#: fe-connect.c:1721 #, c-format msgid "invalid port number: \"%s\"\n" msgstr "numero di porta non valido: \"%s\"\n" -#: fe-connect.c:1643 -#, c-format -msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n" -msgstr "Il percorso del socket di dominio unix \"%s\" è troppo lungo (massimo %d byte)\n" - -#: fe-connect.c:1661 +#: fe-connect.c:1737 #, c-format msgid "could not translate host name \"%s\" to address: %s\n" msgstr "conversione del nome host \"%s\" in indirizzo fallita: %s\n" -#: fe-connect.c:1665 +#: fe-connect.c:1746 +#, c-format +msgid "could not parse network address \"%s\": %s\n" +msgstr "interpretazione dell'indirizzo di rete \"%s\" fallita: %s\n" + +#: fe-connect.c:1757 +#, c-format +msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n" +msgstr "Il percorso del socket di dominio unix \"%s\" è troppo lungo (massimo %d byte)\n" + +#: fe-connect.c:1771 #, c-format msgid "could not translate Unix-domain socket path \"%s\" to address: %s\n" msgstr "conversione del percorso del socket di dominio Unix \"%s\" in indirizzo fallita: %s\n" -#: fe-connect.c:1930 +#: fe-connect.c:2052 msgid "invalid connection state, probably indicative of memory corruption\n" msgstr "stato della connessione non valido, probabilmente indica una corruzione della memoria\n" -#: fe-connect.c:1987 +#: fe-connect.c:2109 #, c-format msgid "could not create socket: %s\n" msgstr "creazione del socket fallita: %s\n" -#: fe-connect.c:2009 +#: fe-connect.c:2131 #, c-format msgid "could not set socket to nonblocking mode: %s\n" msgstr "impostazione del socket in modalità non bloccante fallita: %s\n" -#: fe-connect.c:2020 +#: fe-connect.c:2142 #, c-format msgid "could not set socket to close-on-exec mode: %s\n" msgstr "impostazione del socket in modalità close-on-exec fallita: %s\n" -#: fe-connect.c:2039 +#: fe-connect.c:2161 msgid "keepalives parameter must be an integer\n" msgstr "il parametro keepalives dev'essere un intero\n" -#: fe-connect.c:2052 -#, c-format -msgid "setsockopt(SO_KEEPALIVE) failed: %s\n" -msgstr "chiamata setsockopt(SO_KEEPALIVE) fallita: %s\n" - -#: fe-connect.c:2189 +#: fe-connect.c:2312 #, c-format msgid "could not get socket error status: %s\n" msgstr "lettura dello stato di errore del socket fallita: %s\n" -#: fe-connect.c:2224 +#: fe-connect.c:2347 #, c-format msgid "could not get client address from socket: %s\n" msgstr "non è stato possibile ottenere l'indirizzo del client dal socket: %s\n" -#: fe-connect.c:2266 +#: fe-connect.c:2389 msgid "requirepeer parameter is not supported on this platform\n" msgstr "il parametro requirepeer non è supportato su questa piattaforma\n" -#: fe-connect.c:2269 +#: fe-connect.c:2392 #, c-format msgid "could not get peer credentials: %s\n" msgstr "non è stato possibile ottenere le credenziali del peer: %s\n" -#: fe-connect.c:2292 +#: fe-connect.c:2415 #, c-format msgid "requirepeer specifies \"%s\", but actual peer user name is \"%s\"\n" msgstr "requirepeer specifica \"%s\", ma il vero nome utente del peer è \"%s\"\n" -#: fe-connect.c:2326 +#: fe-connect.c:2449 #, c-format msgid "could not send SSL negotiation packet: %s\n" msgstr "invio del pacchetto di negoziazione SSL fallito: %s\n" -#: fe-connect.c:2365 +#: fe-connect.c:2488 #, c-format msgid "could not send startup packet: %s\n" msgstr "invio del pacchetto di avvio fallito: %s\n" -#: fe-connect.c:2435 +#: fe-connect.c:2558 msgid "server does not support SSL, but SSL was required\n" msgstr "il server non supporta SSL, ma SSL è stato richiesto\n" -#: fe-connect.c:2461 +#: fe-connect.c:2584 #, c-format msgid "received invalid response to SSL negotiation: %c\n" msgstr "ricevuta risposta errata alla negoziazione SSL: %c\n" -#: fe-connect.c:2537 fe-connect.c:2570 +#: fe-connect.c:2660 fe-connect.c:2693 #, c-format msgid "expected authentication request from server, but received %c\n" msgstr "prevista richiesta di autenticazione dal server, ma è stato ricevuto %c\n" -#: fe-connect.c:2799 +#: fe-connect.c:2922 msgid "unexpected message from server during startup\n" msgstr "messaggio imprevisto dal server durante l'avvio\n" -#: fe-connect.c:3003 +#: fe-connect.c:3140 #, c-format msgid "could not make a writable connection to server \"%s:%s\"\n" msgstr "errore nello stabilire una connessione scrivibile col server \"%s:%s\"\n" -#: fe-connect.c:3045 +#: fe-connect.c:3189 #, c-format -msgid "test \"SHOW transaction_read_only\" failed on \"%s:%s\"\n" -msgstr "test \"SHOW transaction_read_only\" fallito su \"%s:%s\"\n" +msgid "test \"SHOW transaction_read_only\" failed on server \"%s:%s\"\n" +msgstr "test \"SHOW transaction_read_only\" fallito sul server \"%s:%s\"\n" -#: fe-connect.c:3067 +#: fe-connect.c:3210 #, c-format msgid "invalid connection state %d, probably indicative of memory corruption\n" msgstr "stato connessione errato %d, probabilmente indica una corruzione di memoria\n" -#: fe-connect.c:3559 fe-connect.c:3619 +#: fe-connect.c:3667 fe-connect.c:3727 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_CONNRESET event\n" msgstr "PGEventProc \"%s\" fallito durante l'evento PGEVT_CONNRESET\n" -#: fe-connect.c:3966 +#: fe-connect.c:4074 #, c-format msgid "invalid LDAP URL \"%s\": scheme must be ldap://\n" msgstr "URL LDAP \"%s\" non corretta: lo schema deve essere ldap://\n" -#: fe-connect.c:3981 +#: fe-connect.c:4089 #, c-format msgid "invalid LDAP URL \"%s\": missing distinguished name\n" msgstr "URL LDAP \"%s\" non corretta: distinguished name non trovato\n" -#: fe-connect.c:3992 fe-connect.c:4045 +#: fe-connect.c:4100 fe-connect.c:4153 #, c-format msgid "invalid LDAP URL \"%s\": must have exactly one attribute\n" msgstr "URL LDAP \"%s\" non corretta: deve avere esattamente un attributo\n" -#: fe-connect.c:4002 fe-connect.c:4059 +#: fe-connect.c:4110 fe-connect.c:4167 #, c-format msgid "invalid LDAP URL \"%s\": must have search scope (base/one/sub)\n" msgstr "URL LDAP \"%s\" non corretta: deve essere specificato la portata della ricerca (base/one/sub)\n" -#: fe-connect.c:4013 +#: fe-connect.c:4121 #, c-format msgid "invalid LDAP URL \"%s\": no filter\n" msgstr "URL LDAP \"%s\" non corretta: filtro non specificato\n" -#: fe-connect.c:4034 +#: fe-connect.c:4142 #, c-format msgid "invalid LDAP URL \"%s\": invalid port number\n" msgstr "URL LDAP \"%s\" non corretta: numero di porta non valido\n" -#: fe-connect.c:4068 +#: fe-connect.c:4176 msgid "could not create LDAP structure\n" msgstr "creazione della struttura dati LDAP fallita\n" -#: fe-connect.c:4144 +#: fe-connect.c:4252 #, c-format msgid "lookup on LDAP server failed: %s\n" msgstr "ricerca del server LDAP fallita: %s\n" -#: fe-connect.c:4155 +#: fe-connect.c:4263 msgid "more than one entry found on LDAP lookup\n" msgstr "trovata più di una voce nella ricerca LDAP\n" -#: fe-connect.c:4156 fe-connect.c:4168 +#: fe-connect.c:4264 fe-connect.c:4276 msgid "no entry found on LDAP lookup\n" msgstr "nessun elemento trovato per la ricerca LDAP\n" -#: fe-connect.c:4179 fe-connect.c:4192 +#: fe-connect.c:4287 fe-connect.c:4300 msgid "attribute has no values on LDAP lookup\n" msgstr "l'attributo non ha valori nella ricerca LDAP\n" -#: fe-connect.c:4244 fe-connect.c:4263 fe-connect.c:4782 +#: fe-connect.c:4352 fe-connect.c:4371 fe-connect.c:4900 #, c-format msgid "missing \"=\" after \"%s\" in connection info string\n" msgstr "manca \"=\" dopo \"%s\" nella stringa di connessione\n" -#: fe-connect.c:4336 fe-connect.c:4967 fe-connect.c:5741 +#: fe-connect.c:4444 fe-connect.c:5085 fe-connect.c:5859 #, c-format msgid "invalid connection option \"%s\"\n" msgstr "opzione di connessione errata \"%s\"\n" -#: fe-connect.c:4352 fe-connect.c:4831 +#: fe-connect.c:4460 fe-connect.c:4949 msgid "unterminated quoted string in connection info string\n" msgstr "stringa tra virgolette non terminata nella stringa di connessione\n" -#: fe-connect.c:4392 -msgid "could not get home directory to locate service definition file" -msgstr "directory home non trovata per la localizzazione del file di definizione di servizio" - -#: fe-connect.c:4425 +#: fe-connect.c:4543 #, c-format msgid "definition of service \"%s\" not found\n" msgstr "il file di definizione di servizio \"%s\" non è stato trovato\n" -#: fe-connect.c:4448 +#: fe-connect.c:4566 #, c-format msgid "service file \"%s\" not found\n" msgstr "il file di servizio \"%s\" non è stato trovato\n" -#: fe-connect.c:4461 +#: fe-connect.c:4579 #, c-format msgid "line %d too long in service file \"%s\"\n" msgstr "la riga %d nel file di servizio \"%s\" è troppo lunga\n" -#: fe-connect.c:4532 fe-connect.c:4576 +#: fe-connect.c:4650 fe-connect.c:4694 #, c-format msgid "syntax error in service file \"%s\", line %d\n" msgstr "errore di sintassi del file di servizio \"%s\", alla riga %d\n" -#: fe-connect.c:4543 +#: fe-connect.c:4661 #, c-format msgid "nested service specifications not supported in service file \"%s\", line %d\n" msgstr "specifiche di servizio annidate non supportate nel file di servizio \"%s\", linea %d\n" -#: fe-connect.c:5263 +#: fe-connect.c:5381 #, c-format msgid "invalid URI propagated to internal parser routine: \"%s\"\n" msgstr "URI invalida propagata alla routine di parsing interna: \"%s\"\n" -#: fe-connect.c:5340 +#: fe-connect.c:5458 #, c-format msgid "end of string reached when looking for matching \"]\" in IPv6 host address in URI: \"%s\"\n" msgstr "fine stringa raggiunta cercando un \"]\" corrispondente nell'indirizzo host IPv6 nella URI: \"%s\"\n" -#: fe-connect.c:5347 +#: fe-connect.c:5465 #, c-format msgid "IPv6 host address may not be empty in URI: \"%s\"\n" msgstr "l'indirizzo host IPv6 non dev'essere assente nella URI: \"%s\"\n" -#: fe-connect.c:5362 +#: fe-connect.c:5480 #, c-format msgid "unexpected character \"%c\" at position %d in URI (expected \":\" or \"/\"): \"%s\"\n" msgstr "carattere inatteso \"%c\" in posizione %d nella uri URI (atteso \":\" oppure \"/\"): \"%s\"\n" -#: fe-connect.c:5491 +#: fe-connect.c:5609 #, c-format msgid "extra key/value separator \"=\" in URI query parameter: \"%s\"\n" msgstr "separatore chiave/valore \"=\" in eccesso nei parametri della URI: \"%s\"\n" -#: fe-connect.c:5511 +#: fe-connect.c:5629 #, c-format msgid "missing key/value separator \"=\" in URI query parameter: \"%s\"\n" msgstr "separatore chiave/valore \"=\" mancante nei parametri della URI: \"%s\"\n" -#: fe-connect.c:5562 +#: fe-connect.c:5680 #, c-format msgid "invalid URI query parameter: \"%s\"\n" msgstr "parametro URI non valido: \"%s\"\n" -#: fe-connect.c:5636 +#: fe-connect.c:5754 #, c-format msgid "invalid percent-encoded token: \"%s\"\n" msgstr "simbolo percent-encoded non valido \"%s\"\n" -#: fe-connect.c:5646 +#: fe-connect.c:5764 #, c-format msgid "forbidden value %%00 in percent-encoded value: \"%s\"\n" msgstr "valore non ammesso %%00 nel valore percent-encoded: \"%s\"\n" -#: fe-connect.c:5991 +#: fe-connect.c:6109 msgid "connection pointer is NULL\n" msgstr "il puntatore della connessione è NULL\n" -#: fe-connect.c:6289 +#: fe-connect.c:6407 #, c-format msgid "WARNING: password file \"%s\" is not a plain file\n" msgstr "ATTENZIONE: il file delle password \"%s\" non è un file regolare\n" -#: fe-connect.c:6298 +#: fe-connect.c:6416 #, c-format msgid "WARNING: password file \"%s\" has group or world access; permissions should be u=rw (0600) or less\n" msgstr "" "ATTENZIONE: Il file delle password %s ha privilegi di accesso in lettura e scrittura per tutti;\n" "i permessi dovrebbero essere u=rw (0600) o inferiori\n" -#: fe-connect.c:6390 +#: fe-connect.c:6508 #, c-format msgid "password retrieved from file \"%s\"\n" msgstr "password ottenuta dal file \"%s\"\n" -#: fe-exec.c:826 +#: fe-exec.c:437 fe-exec.c:2776 +#, c-format +msgid "row number %d is out of range 0..%d" +msgstr "la riga numero %d non è compreso tra 0 e %d" + +#: fe-exec.c:498 fe-protocol2.c:503 fe-protocol2.c:538 fe-protocol2.c:1049 +#: fe-protocol3.c:209 fe-protocol3.c:236 fe-protocol3.c:253 fe-protocol3.c:333 +#: fe-protocol3.c:728 fe-protocol3.c:951 +msgid "out of memory" +msgstr "memoria esaurita" + +#: fe-exec.c:499 fe-protocol2.c:1395 fe-protocol3.c:1886 +#, c-format +msgid "%s" +msgstr "%s" + +#: fe-exec.c:847 msgid "NOTICE" msgstr "NOTIFICA" -#: fe-exec.c:1141 fe-exec.c:1199 fe-exec.c:1245 +#: fe-exec.c:905 +msgid "PGresult cannot support more than INT_MAX tuples" +msgstr "PGresult non può supportare più di INT_MAX tuple" + +#: fe-exec.c:917 +msgid "size_t overflow" +msgstr "overflow size_t" + +#: fe-exec.c:1192 fe-exec.c:1250 fe-exec.c:1296 msgid "command string is a null pointer\n" msgstr "il testo del comando è un puntatore nullo\n" -#: fe-exec.c:1205 fe-exec.c:1251 fe-exec.c:1346 +#: fe-exec.c:1256 fe-exec.c:1302 fe-exec.c:1397 msgid "number of parameters must be between 0 and 65535\n" msgstr "il numero di parametri deve essere tra 0 e 65535\n" -#: fe-exec.c:1239 fe-exec.c:1340 +#: fe-exec.c:1290 fe-exec.c:1391 msgid "statement name is a null pointer\n" msgstr "il nome dell'istruzione è un puntatore nullo\n" -#: fe-exec.c:1259 fe-exec.c:1422 fe-exec.c:2140 fe-exec.c:2339 +#: fe-exec.c:1310 fe-exec.c:1473 fe-exec.c:2191 fe-exec.c:2390 msgid "function requires at least protocol version 3.0\n" msgstr "la funzione richiede almeno il protocollo versione 3.0\n" -#: fe-exec.c:1377 +#: fe-exec.c:1428 msgid "no connection to the server\n" msgstr "nessuna connessione al server\n" -#: fe-exec.c:1384 +#: fe-exec.c:1435 msgid "another command is already in progress\n" msgstr "un altro comando è in esecuzione\n" -#: fe-exec.c:1498 +#: fe-exec.c:1549 msgid "length must be given for binary parameter\n" msgstr "la lunghezza deve essere fornita per i parametri binari\n" -#: fe-exec.c:1770 +#: fe-exec.c:1821 #, c-format msgid "unexpected asyncStatus: %d\n" msgstr "asyncStatus imprevisto: %d\n" -#: fe-exec.c:1790 +#: fe-exec.c:1841 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_RESULTCREATE event\n" msgstr "PGEventProc \"%s\" fallito durante l'evento PGEVT_RESULTCREATE\n" -#: fe-exec.c:1950 +#: fe-exec.c:2001 msgid "COPY terminated by new PQexec" msgstr "COPY terminato da una nuova PQexec" -#: fe-exec.c:1958 +#: fe-exec.c:2009 msgid "COPY IN state must be terminated first\n" msgstr "lo stato COPY IN deve prima essere terminato\n" -#: fe-exec.c:1978 +#: fe-exec.c:2029 msgid "COPY OUT state must be terminated first\n" msgstr "lo stato COPY OUT deve prima essere terminato\n" # NON SONO ASSOLUTAMENTE CONVINTO! -#: fe-exec.c:1986 +#: fe-exec.c:2037 msgid "PQexec not allowed during COPY BOTH\n" msgstr "PQexec not consentito durante COPY BOTH\n" -#: fe-exec.c:2229 fe-exec.c:2296 fe-exec.c:2386 fe-protocol2.c:1352 +#: fe-exec.c:2280 fe-exec.c:2347 fe-exec.c:2437 fe-protocol2.c:1352 #: fe-protocol3.c:1817 msgid "no COPY in progress\n" msgstr "nessun comando COPY in corso\n" -#: fe-exec.c:2576 +#: fe-exec.c:2627 msgid "connection in wrong state\n" msgstr "la connessione è in uno stato errato\n" -#: fe-exec.c:2607 +#: fe-exec.c:2658 msgid "invalid ExecStatusType code" msgstr "codice ExecStatusType errato" -#: fe-exec.c:2634 +#: fe-exec.c:2685 msgid "PGresult is not an error result\n" msgstr "PGresult non è un risultato di errore\n" -#: fe-exec.c:2709 fe-exec.c:2732 +#: fe-exec.c:2760 fe-exec.c:2783 #, c-format msgid "column number %d is out of range 0..%d" msgstr "la colonna numero %d non è compreso tra 0 e %d" -#: fe-exec.c:2725 -#, c-format -msgid "row number %d is out of range 0..%d" -msgstr "la riga numero %d non è compreso tra 0 e %d" - -#: fe-exec.c:2747 +#: fe-exec.c:2798 #, c-format msgid "parameter number %d is out of range 0..%d" msgstr "il parametro numero %d non è compreso tra 0 e %d" -#: fe-exec.c:3057 +#: fe-exec.c:3108 #, c-format msgid "could not interpret result from server: %s" msgstr "errore nell'interpretazione del risultato dal server: %s" -#: fe-exec.c:3296 fe-exec.c:3380 +#: fe-exec.c:3347 fe-exec.c:3431 msgid "incomplete multibyte character\n" msgstr "carattere multibyte incompleto\n" @@ -777,12 +819,6 @@ msgstr "stato %c non valido, probabilmente indica una corruzione di memoria\n" msgid "message type 0x%02x arrived from server while idle" msgstr "messaggio tipo 0x%02x arrivato dal server mentre era inattivo" -#: fe-protocol2.c:503 fe-protocol2.c:538 fe-protocol2.c:1049 fe-protocol3.c:209 -#: fe-protocol3.c:236 fe-protocol3.c:253 fe-protocol3.c:333 fe-protocol3.c:728 -#: fe-protocol3.c:951 -msgid "out of memory" -msgstr "memoria esaurita" - #: fe-protocol2.c:529 #, c-format msgid "unexpected character %c following empty query response (\"I\" message)" @@ -807,11 +843,6 @@ msgstr "risposta inattesa dal server; il primo carattere ricevuto era \"%c\"\n" msgid "out of memory for query result" msgstr "memoria esaurita per il risultato della query" -#: fe-protocol2.c:1395 fe-protocol3.c:1886 -#, c-format -msgid "%s" -msgstr "%s" - #: fe-protocol2.c:1407 #, c-format msgid "lost synchronization with server, resetting connection" diff --git a/src/interfaces/libpq/po/ko.po b/src/interfaces/libpq/po/ko.po index 43c53cca76..82f77deb92 100644 --- a/src/interfaces/libpq/po/ko.po +++ b/src/interfaces/libpq/po/ko.po @@ -3,10 +3,10 @@ # msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 9.6 libpq\n" +"Project-Id-Version: libpq (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2016-09-26 14:02+0900\n" -"PO-Revision-Date: 2016-09-26 16:37+0900\n" +"POT-Creation-Date: 2017-09-19 09:51+0900\n" +"PO-Revision-Date: 2017-09-19 10:25+0900\n" "Last-Translator: Ioseph Kim \n" "Language-Team: Korean \n" "Language: ko\n" @@ -15,103 +15,231 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=1; plural=0;\n" -#: fe-auth.c:148 +#: fe-auth-scram.c:176 +msgid "malformed SCRAM message (empty message)\n" +msgstr "SCRAM 메시지가 형식에 안맞음 (메시지 비었음)\n" + +#: fe-auth-scram.c:182 +msgid "malformed SCRAM message (length mismatch)\n" +msgstr "SCRAM 메시지가 형식에 안맞음 (길이 불일치)\n" + +#: fe-auth-scram.c:231 +msgid "incorrect server signature\n" +msgstr "잘못된 서버 서명\n" + +#: fe-auth-scram.c:240 +msgid "invalid SCRAM exchange state\n" +msgstr "SCRAM 교화 상태가 바르지 않음\n" + +#: fe-auth-scram.c:263 +#, c-format +msgid "malformed SCRAM message (attribute \"%c\" expected)\n" +msgstr "SCRAM 메시지가 형식에 안맞음 (\"%c\" 속성이 예상됨)\n" + +#: fe-auth-scram.c:272 +#, c-format +msgid "" +"malformed SCRAM message (expected character \"=\" for attribute \"%c\")\n" +msgstr "SCRAM 메시지가 형식에 안맞음 (\"%c\" 속성 예상값은 \"=\")\n" + +#: fe-auth-scram.c:311 +msgid "could not generate nonce\n" +msgstr "암호화 토큰(nonce)을 만들 수 없음\n" + +#: fe-auth-scram.c:319 fe-auth-scram.c:336 fe-auth-scram.c:346 +#: fe-auth-scram.c:400 fe-auth-scram.c:420 fe-auth-scram.c:445 +#: fe-auth-scram.c:459 fe-auth-scram.c:501 fe-auth.c:227 fe-auth.c:362 +#: fe-auth.c:432 fe-auth.c:467 fe-auth.c:609 fe-auth.c:768 fe-auth.c:1080 +#: fe-auth.c:1228 fe-connect.c:775 fe-connect.c:1203 fe-connect.c:1379 +#: fe-connect.c:1947 fe-connect.c:2476 fe-connect.c:4062 fe-connect.c:4314 +#: fe-connect.c:4433 fe-connect.c:4673 fe-connect.c:4753 fe-connect.c:4852 +#: fe-connect.c:5108 fe-connect.c:5137 fe-connect.c:5209 fe-connect.c:5233 +#: fe-connect.c:5251 fe-connect.c:5352 fe-connect.c:5361 fe-connect.c:5717 +#: fe-connect.c:5867 fe-exec.c:2702 fe-exec.c:3449 fe-exec.c:3614 +#: fe-lobj.c:896 fe-protocol2.c:1206 fe-protocol3.c:992 fe-protocol3.c:1678 +#: fe-secure-openssl.c:514 fe-secure-openssl.c:1138 +msgid "out of memory\n" +msgstr "메모리 부족\n" + +#: fe-auth-scram.c:437 +msgid "invalid SCRAM response (nonce mismatch)\n" +msgstr "잘못된 SCRAM 응답 (토큰 불일치)\n" + +#: fe-auth-scram.c:476 +msgid "malformed SCRAM message (invalid iteration count)\n" +msgstr "형식에 맞지 않은 SCRAM 메시지 (나열 숫자가 이상함)\n" + +#: fe-auth-scram.c:482 +msgid "malformed SCRAM message (garbage at end of server-first-message)\n" +msgstr "" +"형식에 맞지 않은 SCRAM 메시지 (서버 첫 메시지 끝에 쓸모 없는 값이 있음)\n" + +#: fe-auth-scram.c:511 +#, c-format +msgid "error received from server in SCRAM exchange: %s\n" +msgstr "SCRAM 교환작업에서 서버로부터 데이터를 받지 못했음: %s\n" + +#: fe-auth-scram.c:526 +msgid "malformed SCRAM message (garbage at end of server-final-message)\n" +msgstr "" +"형식에 맞지 않은 SCRAM 메시지 (서버 끝 메시지 뒤에 쓸모 없는 값이 있음)\n" + +#: fe-auth-scram.c:534 +msgid "malformed SCRAM message (invalid server signature)\n" +msgstr "형식에 맞지 않은 SCRAM 메시지 (서버 사인이 이상함)\n" + +#: fe-auth.c:122 +#, c-format +msgid "out of memory allocating GSSAPI buffer (%d)\n" +msgstr "GSSAPI 버퍼(%d)에 할당할 메모리 부족\n" + +#: fe-auth.c:177 msgid "GSSAPI continuation error" msgstr "GSSAPI 연속 오류" -#: fe-auth.c:177 fe-auth.c:412 +#: fe-auth.c:207 fe-auth.c:461 msgid "host name must be specified\n" msgstr "호스트 이름을 지정해야 함\n" -#: fe-auth.c:184 +#: fe-auth.c:214 msgid "duplicate GSS authentication request\n" msgstr "중복된 GSS 인증 요청\n" -#: fe-auth.c:197 fe-auth.c:309 fe-auth.c:383 fe-auth.c:418 fe-auth.c:514 -#: fe-auth.c:780 fe-connect.c:707 fe-connect.c:904 fe-connect.c:1080 -#: fe-connect.c:2091 fe-connect.c:3484 fe-connect.c:3736 fe-connect.c:3855 -#: fe-connect.c:4095 fe-connect.c:4175 fe-connect.c:4274 fe-connect.c:4530 -#: fe-connect.c:4559 fe-connect.c:4631 fe-connect.c:4649 fe-connect.c:4745 -#: fe-connect.c:5079 fe-connect.c:5229 fe-exec.c:2652 fe-exec.c:3399 -#: fe-exec.c:3564 fe-lobj.c:896 fe-protocol2.c:1206 fe-protocol3.c:992 -#: fe-protocol3.c:1678 fe-secure-openssl.c:552 fe-secure-openssl.c:1094 -msgid "out of memory\n" -msgstr "메모리 부족\n" - -#: fe-auth.c:210 +#: fe-auth.c:240 msgid "GSSAPI name import error" msgstr "GSSAPI 이름 가져오기 오류" -#: fe-auth.c:298 +#: fe-auth.c:303 +#, c-format +msgid "out of memory allocating SSPI buffer (%d)\n" +msgstr "SSPI 버퍼(%d)에 할당할 메모리 부족\n" + +#: fe-auth.c:351 msgid "SSPI continuation error" msgstr "SSPI 연속 오류" -#: fe-auth.c:398 +#: fe-auth.c:422 +msgid "duplicate SSPI authentication request\n" +msgstr "중복된 SSPI 인증 요청\n" + +#: fe-auth.c:447 msgid "could not acquire SSPI credentials" msgstr "SSPI 자격 증명을 가져올 수 없음" -#: fe-auth.c:489 +#: fe-auth.c:500 +msgid "duplicate SASL authentication request\n" +msgstr "중복된 SASL 인증 요청\n" + +#: fe-auth.c:560 +msgid "none of the server's SASL authentication mechanisms are supported\n" +msgstr "SASL 인증 메커니즘을 지원하는 서버가 없습니다.\n" + +#: fe-auth.c:633 +#, c-format +msgid "out of memory allocating SASL buffer (%d)\n" +msgstr "SASL 버퍼(%d)에 할당할 메모리 부족\n" + +#: fe-auth.c:658 +msgid "" +"AuthenticationSASLFinal received from server, but SASL authentication was " +"not completed\n" +msgstr "" +"서버에서 AuthenticationSASLFinal 응답을 받았지만, SASL 인증이 끝나지 않았음\n" + +#: fe-auth.c:735 msgid "SCM_CRED authentication method not supported\n" msgstr "SCM_CRED 인증 방법이 지원되지 않음\n" -#: fe-auth.c:565 +#: fe-auth.c:826 msgid "Kerberos 4 authentication not supported\n" msgstr "Kerberos 4 인증 방법이 지원되지 않음\n" -#: fe-auth.c:570 +#: fe-auth.c:831 msgid "Kerberos 5 authentication not supported\n" msgstr "Kerberos 5 인증 방법이 지원되지 않음\n" -#: fe-auth.c:641 +#: fe-auth.c:902 msgid "GSSAPI authentication not supported\n" msgstr "GSSAPI 인증은 지원되지 않음\n" -#: fe-auth.c:673 +#: fe-auth.c:934 msgid "SSPI authentication not supported\n" msgstr "SSPI 인증은 지원되지 않음\n" -#: fe-auth.c:681 +#: fe-auth.c:942 msgid "Crypt authentication not supported\n" msgstr "암호화 인증은 지원되지 않음\n" -#: fe-auth.c:708 +#: fe-auth.c:1008 #, c-format msgid "authentication method %u not supported\n" msgstr "%u 인증 방법이 지원되지 않음\n" -#: fe-auth.c:755 +#: fe-auth.c:1055 #, c-format msgid "user name lookup failure: error code %lu\n" msgstr "사용자 이름 찾기 실패: 오류 코드 %lu\n" -#: fe-auth.c:765 fe-connect.c:2018 +#: fe-auth.c:1065 fe-connect.c:2403 #, c-format msgid "could not look up local user ID %d: %s\n" msgstr "UID %d 해당하는 사용자를 찾을 수 없음: %s\n" -#: fe-auth.c:770 fe-connect.c:2023 +#: fe-auth.c:1070 fe-connect.c:2408 #, c-format msgid "local user with ID %d does not exist\n" msgstr "ID %d 로컬 사용자 없음\n" -#: fe-connect.c:846 +#: fe-auth.c:1172 +msgid "unexpected shape of result set returned for SHOW\n" +msgstr "SHOW 명령의 결과 자료가 비정상임\n" + +#: fe-auth.c:1181 +msgid "password_encryption value too long\n" +msgstr "password_encryption 너무 긺\n" + +#: fe-auth.c:1221 +#, c-format +msgid "unrecognized password encryption algorithm \"%s\"\n" +msgstr "알 수 없는 비밀번호 암호화 알고리즘: \"%s\"\n" + +#: fe-connect.c:968 +#, c-format +msgid "could not match %d host names to %d hostaddrs\n" +msgstr "호스트 이름은 %d개인데, 호스트 주소는 %d개임\n" + +#: fe-connect.c:1025 +#, c-format +msgid "could not match %d port numbers to %d hosts\n" +msgstr "포트 번호는 %d개인데, 호스트는 %d개입니다.\n" + +#: fe-connect.c:1077 +msgid "could not get home directory to locate password file\n" +msgstr "비밀번호 파일이 있는 홈 디렉토리를 찾을 수 없음\n" + +#: fe-connect.c:1129 #, c-format msgid "invalid sslmode value: \"%s\"\n" msgstr "잘못된 sslmode 값: \"%s\"\n" -#: fe-connect.c:867 +#: fe-connect.c:1150 #, c-format msgid "sslmode value \"%s\" invalid when SSL support is not compiled in\n" msgstr "" "SSL 연결 기능을 지원하지 않고 컴파일 된 경우는 sslmode 값으로 \"%s\" 값은 타" "당치 않습니다\n" -#: fe-connect.c:1104 +#: fe-connect.c:1185 +#, c-format +msgid "invalid target_session_attrs value: \"%s\"\n" +msgstr "잘못된 target_session_attrs 값: \"%s\"\n" + +#: fe-connect.c:1403 #, c-format msgid "could not set socket to TCP no delay mode: %s\n" msgstr "소켓을 TCP에 no delay 모드로 지정할 수 없음: %s\n" -#: fe-connect.c:1134 +#: fe-connect.c:1433 #, c-format msgid "" "could not connect to server: %s\n" @@ -122,7 +250,7 @@ msgstr "" "\t로컬호스트에 서버가 가동 중인지,\n" "\t\"%s\" 유닉스 도메인 소켓 접근이 가능한지 살펴보십시오.\n" -#: fe-connect.c:1189 +#: fe-connect.c:1491 #, c-format msgid "" "could not connect to server: %s\n" @@ -133,7 +261,7 @@ msgstr "" "\t\"%s\" (%s) 호스트에 서버가 가동 중인지,\n" "\t%s 포트로 TCP/IP 연결이 가능한지 살펴보십시오.\n" -#: fe-connect.c:1198 +#: fe-connect.c:1500 #, c-format msgid "" "could not connect to server: %s\n" @@ -144,247 +272,238 @@ msgstr "" "\t\"%s\" 호스트에 서버가 가동 중인지,\n" "\t%s 포트로 TCP/IP 연결이 가능한지 살펴보십시오.\n" -#: fe-connect.c:1249 -#, c-format -msgid "setsockopt(TCP_KEEPIDLE) failed: %s\n" -msgstr "setsockopt(TCP_KEEPIDLE) 실패: %s\n" - -#: fe-connect.c:1262 -#, c-format -msgid "setsockopt(TCP_KEEPALIVE) failed: %s\n" -msgstr "setsockopt(TCP_KEEPALIVE) 실패: %s\n" - -#: fe-connect.c:1294 -#, c-format -msgid "setsockopt(TCP_KEEPINTVL) failed: %s\n" -msgstr "setsockopt(TCP_KEEPINTVL) 실패: %s\n" - -#: fe-connect.c:1326 +#: fe-connect.c:1551 fe-connect.c:1583 fe-connect.c:1616 fe-connect.c:2175 #, c-format -msgid "setsockopt(TCP_KEEPCNT) failed: %s\n" -msgstr "setsockopt(TCP_KEEPCNT) 실패: %s\n" +msgid "setsockopt(%s) failed: %s\n" +msgstr "setsockopt(%s) 실패: %s\n" -#: fe-connect.c:1374 +#: fe-connect.c:1665 #, c-format msgid "WSAIoctl(SIO_KEEPALIVE_VALS) failed: %ui\n" msgstr "WSAIoctl(SIO_KEEPALIVE_VALS) 실패: %ui\n" -#: fe-connect.c:1426 +#: fe-connect.c:1722 #, c-format msgid "invalid port number: \"%s\"\n" msgstr "잘못된 포트 번호: \"%s\"\n" -#: fe-connect.c:1459 -#, c-format -msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n" -msgstr "\"%s\" 유닉스 도메인 소켓 경로가 너무 깁니다 (최대 %d 바이트)\n" - -#: fe-connect.c:1478 +#: fe-connect.c:1738 #, c-format msgid "could not translate host name \"%s\" to address: %s\n" msgstr "\"%s\" 호스트 이름을 전송할 수 없습니다: 대상 주소: %s\n" -#: fe-connect.c:1482 +#: fe-connect.c:1747 +#, c-format +msgid "could not parse network address \"%s\": %s\n" +msgstr "\"%s\" 네트워크 주소를 해석할 수 없음: %s\n" + +#: fe-connect.c:1758 +#, c-format +msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n" +msgstr "\"%s\" 유닉스 도메인 소켓 경로가 너무 깁니다 (최대 %d 바이트)\n" + +#: fe-connect.c:1772 #, c-format msgid "could not translate Unix-domain socket path \"%s\" to address: %s\n" msgstr "\"%s\" 유닉스 도메인 소켓 경로를 전송할 수 없습니다: 대상 주소: %s\n" -#: fe-connect.c:1687 +#: fe-connect.c:2053 msgid "invalid connection state, probably indicative of memory corruption\n" msgstr "잘못된 연결 상태, 메모리 손상일 가능성이 큼\n" -#: fe-connect.c:1727 +#: fe-connect.c:2110 #, c-format msgid "could not create socket: %s\n" msgstr "소켓을 만들 수 없음: %s\n" -#: fe-connect.c:1749 +#: fe-connect.c:2132 #, c-format msgid "could not set socket to nonblocking mode: %s\n" msgstr "소켓을 nonblocking 모드로 지정할 수 없음: %s\n" -#: fe-connect.c:1760 +#: fe-connect.c:2143 #, c-format msgid "could not set socket to close-on-exec mode: %s\n" msgstr "소켓을 close-on-exec 모드로 지정할 수 없음: %s\n" -#: fe-connect.c:1779 +#: fe-connect.c:2162 msgid "keepalives parameter must be an integer\n" msgstr "keepalives 매개변수값은 정수여야 합니다.\n" -#: fe-connect.c:1792 -#, c-format -msgid "setsockopt(SO_KEEPALIVE) failed: %s\n" -msgstr "setsockopt(SO_KEEPALIVE) 실패: %s\n" - -#: fe-connect.c:1929 +#: fe-connect.c:2313 #, c-format msgid "could not get socket error status: %s\n" msgstr "소켓 오류 상태를 구할 수 없음: %s\n" -#: fe-connect.c:1963 +#: fe-connect.c:2348 #, c-format msgid "could not get client address from socket: %s\n" msgstr "소켓에서 클라이언트 주소를 구할 수 없음: %s\n" -#: fe-connect.c:2005 +#: fe-connect.c:2390 msgid "requirepeer parameter is not supported on this platform\n" msgstr "requirepeer 매개변수는 이 운영체제에서 지원하지 않음\n" -#: fe-connect.c:2008 +#: fe-connect.c:2393 #, c-format msgid "could not get peer credentials: %s\n" msgstr "신뢰성 피어를 얻을 수 없습니다: %s\n" -#: fe-connect.c:2031 +#: fe-connect.c:2416 #, c-format msgid "requirepeer specifies \"%s\", but actual peer user name is \"%s\"\n" -msgstr "\"%s\" 이름으로 requirepeer를 지정했지만, 실재 사용자 이름은 \"%s\" 입니다\n" +msgstr "" +"\"%s\" 이름으로 requirepeer를 지정했지만, 실재 사용자 이름은 \"%s\" 입니다\n" -#: fe-connect.c:2065 +#: fe-connect.c:2450 #, c-format msgid "could not send SSL negotiation packet: %s\n" msgstr "SSL 교섭 패킷을 보낼 수 없음: %s\n" -#: fe-connect.c:2104 +#: fe-connect.c:2489 #, c-format msgid "could not send startup packet: %s\n" msgstr "시작 패킷을 보낼 수 없음: %s\n" -#: fe-connect.c:2174 +#: fe-connect.c:2559 msgid "server does not support SSL, but SSL was required\n" msgstr "서버가 SSL 기능을 지원하지 않는데, SSL 기능을 요구했음\n" -#: fe-connect.c:2200 +#: fe-connect.c:2585 #, c-format msgid "received invalid response to SSL negotiation: %c\n" msgstr "SSL 교섭에 대한 잘못된 응답을 감지했음: %c\n" -#: fe-connect.c:2275 fe-connect.c:2308 +#: fe-connect.c:2661 fe-connect.c:2694 #, c-format msgid "expected authentication request from server, but received %c\n" msgstr "서버가 인증을 요구했지만, %c 받았음\n" -#: fe-connect.c:2475 -#, c-format -msgid "out of memory allocating GSSAPI buffer (%d)" -msgstr "GSSAPI 버퍼(%d)에 할당할 메모리 부족" - -#: fe-connect.c:2560 +#: fe-connect.c:2923 msgid "unexpected message from server during startup\n" msgstr "시작하는 동안 서버로부터 기대되지 않는 메시지\n" -#: fe-connect.c:2654 +#: fe-connect.c:3141 +#, c-format +msgid "could not make a writable connection to server \"%s:%s\"\n" +msgstr "\"%s:%s\" 서버에 쓰기 가능한 연결을 맺을 수 없음\n" + +#: fe-connect.c:3190 +#, c-format +msgid "test \"SHOW transaction_read_only\" failed on server \"%s:%s\"\n" +msgstr "\"%s:%s\" 서버에서 \"SHOW transaction_read_only\" 검사가 실패함\n" + +#: fe-connect.c:3211 #, c-format msgid "invalid connection state %d, probably indicative of memory corruption\n" msgstr "잘못된 연결 상태 %d, 메모리 손상일 가능성이 큼\n" -#: fe-connect.c:3090 fe-connect.c:3150 +#: fe-connect.c:3668 fe-connect.c:3728 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_CONNRESET event\n" msgstr "PGEVT_CONNRESET 이벤트 동안 PGEventProc \"%s\"이(가) 실패함\n" -#: fe-connect.c:3497 +#: fe-connect.c:4075 #, c-format msgid "invalid LDAP URL \"%s\": scheme must be ldap://\n" msgstr "잘못된 LDAP URL \"%s\": 스키마는 ldap:// 여야함\n" -#: fe-connect.c:3512 +#: fe-connect.c:4090 #, c-format msgid "invalid LDAP URL \"%s\": missing distinguished name\n" msgstr "잘못된 LDAP URL \"%s\": 식별자 이름이 빠졌음\n" -#: fe-connect.c:3523 fe-connect.c:3576 +#: fe-connect.c:4101 fe-connect.c:4154 #, c-format msgid "invalid LDAP URL \"%s\": must have exactly one attribute\n" msgstr "잘못된 LDAP URL \"%s\": 단 하나의 속성만 가져야함\n" -#: fe-connect.c:3533 fe-connect.c:3590 +#: fe-connect.c:4111 fe-connect.c:4168 #, c-format msgid "invalid LDAP URL \"%s\": must have search scope (base/one/sub)\n" msgstr "잘못된 LDAP URL \"%s\": 검색범위(base/one/sub)를 지정해야함\n" -#: fe-connect.c:3544 +#: fe-connect.c:4122 #, c-format msgid "invalid LDAP URL \"%s\": no filter\n" msgstr "잘못된 LDAP URL \"%s\": 필터 없음\n" -#: fe-connect.c:3565 +#: fe-connect.c:4143 #, c-format msgid "invalid LDAP URL \"%s\": invalid port number\n" msgstr "잘못된 LDAP URL \"%s\": 포트번호가 잘못됨\n" -#: fe-connect.c:3599 +#: fe-connect.c:4177 msgid "could not create LDAP structure\n" msgstr "LDAP 구조를 만들 수 없음\n" -#: fe-connect.c:3675 +#: fe-connect.c:4253 #, c-format msgid "lookup on LDAP server failed: %s\n" msgstr "LDAP 서버를 찾을 수 없음: %s\n" -#: fe-connect.c:3686 +#: fe-connect.c:4264 msgid "more than one entry found on LDAP lookup\n" msgstr "LDAP 검색에서 하나 이상의 엔트리가 발견되었음\n" -#: fe-connect.c:3687 fe-connect.c:3699 +#: fe-connect.c:4265 fe-connect.c:4277 msgid "no entry found on LDAP lookup\n" msgstr "LDAP 검색에서 해당 항목 없음\n" -#: fe-connect.c:3710 fe-connect.c:3723 +#: fe-connect.c:4288 fe-connect.c:4301 msgid "attribute has no values on LDAP lookup\n" msgstr "LDAP 검색에서 속성의 값이 없음\n" -#: fe-connect.c:3775 fe-connect.c:3794 fe-connect.c:4313 +#: fe-connect.c:4353 fe-connect.c:4372 fe-connect.c:4891 #, c-format msgid "missing \"=\" after \"%s\" in connection info string\n" msgstr "연결문자열에서 \"%s\" 다음에 \"=\" 문자 빠졌음\n" -#: fe-connect.c:3867 fe-connect.c:4498 fe-connect.c:5212 +#: fe-connect.c:4445 fe-connect.c:5076 fe-connect.c:5850 #, c-format msgid "invalid connection option \"%s\"\n" msgstr "잘못된 연결 옵션 \"%s\"\n" -#: fe-connect.c:3883 fe-connect.c:4362 +#: fe-connect.c:4461 fe-connect.c:4940 msgid "unterminated quoted string in connection info string\n" msgstr "연결문자열에서 완성되지 못한 따옴표문자열이 있음\n" -#: fe-connect.c:3923 +#: fe-connect.c:4501 msgid "could not get home directory to locate service definition file" msgstr "서비스 정의 파일이 있는 홈 디렉토리를 찾을 수 없음" -#: fe-connect.c:3956 +#: fe-connect.c:4534 #, c-format msgid "definition of service \"%s\" not found\n" msgstr "\"%s\" 서비스 정의를 찾을 수 없음\n" -#: fe-connect.c:3979 +#: fe-connect.c:4557 #, c-format msgid "service file \"%s\" not found\n" msgstr "\"%s\" 서비스 파일을 찾을 수 없음\n" -#: fe-connect.c:3992 +#: fe-connect.c:4570 #, c-format msgid "line %d too long in service file \"%s\"\n" msgstr "%d번째 줄이 \"%s\" 서비스 파일에서 너무 깁니다\n" -#: fe-connect.c:4063 fe-connect.c:4107 +#: fe-connect.c:4641 fe-connect.c:4685 #, c-format msgid "syntax error in service file \"%s\", line %d\n" msgstr "\"%s\" 서비스 파일의 %d번째 줄에 구문 오류 있음\n" -#: fe-connect.c:4074 +#: fe-connect.c:4652 #, c-format msgid "" "nested service specifications not supported in service file \"%s\", line %d\n" msgstr "\"%s\" 서비스 파일의 %d번째 줄에 설정을 지원하지 않음\n" -#: fe-connect.c:4756 +#: fe-connect.c:5372 #, c-format msgid "invalid URI propagated to internal parser routine: \"%s\"\n" msgstr "URI 구문 분석을 할 수 없음: \"%s\"\n" -#: fe-connect.c:4826 +#: fe-connect.c:5449 #, c-format msgid "" "end of string reached when looking for matching \"]\" in IPv6 host address " @@ -392,55 +511,55 @@ msgid "" msgstr "" "URI의 IPv6 호스트 주소에서 \"]\" 매칭 검색을 실패했습니다, 해당 URI: \"%s\"\n" -#: fe-connect.c:4833 +#: fe-connect.c:5456 #, c-format msgid "IPv6 host address may not be empty in URI: \"%s\"\n" msgstr "IPv6 호스트 주소가 없습니다, 해당 URI: \"%s\"\n" -#: fe-connect.c:4848 +#: fe-connect.c:5471 #, c-format msgid "" "unexpected character \"%c\" at position %d in URI (expected \":\" or \"/\"): " "\"%s\"\n" msgstr "" -"잘못된 \"%c\" 문자가 URI 문자열 가운데 %d 번째 있습니다(\":\" 또는 \"/\" 문자가 있어야 함): " -"\"%s\"\n" +"잘못된 \"%c\" 문자가 URI 문자열 가운데 %d 번째 있습니다(\":\" 또는 \"/\" 문자" +"가 있어야 함): \"%s\"\n" -#: fe-connect.c:4962 +#: fe-connect.c:5600 #, c-format msgid "extra key/value separator \"=\" in URI query parameter: \"%s\"\n" msgstr "키/밸류 구분자 \"=\" 문자가 필요함, 해당 URI 쿼리 매개변수: \"%s\"\n" -#: fe-connect.c:4982 +#: fe-connect.c:5620 #, c-format msgid "missing key/value separator \"=\" in URI query parameter: \"%s\"\n" msgstr "키/밸류 구분자 \"=\" 문자가 필요함, 해당 URI 쿼리 매개변수: \"%s\"\n" -#: fe-connect.c:5033 +#: fe-connect.c:5671 #, c-format msgid "invalid URI query parameter: \"%s\"\n" msgstr "잘못된 URL 쿼리 매개변수값: \"%s\"\n" -#: fe-connect.c:5107 +#: fe-connect.c:5745 #, c-format msgid "invalid percent-encoded token: \"%s\"\n" msgstr "잘못된 퍼센트 인코드 토큰: \"%s\"\n" -#: fe-connect.c:5117 +#: fe-connect.c:5755 #, c-format msgid "forbidden value %%00 in percent-encoded value: \"%s\"\n" msgstr "퍼센트 인코드 값에 %%00 숨김 값이 있음: \"%s\"\n" -#: fe-connect.c:5451 +#: fe-connect.c:6100 msgid "connection pointer is NULL\n" msgstr "연결 포인터가 NULL\n" -#: fe-connect.c:5749 +#: fe-connect.c:6398 #, c-format msgid "WARNING: password file \"%s\" is not a plain file\n" msgstr "경고: \"%s\" 패스워드 파일이 plain 파일이 아님\n" -#: fe-connect.c:5758 +#: fe-connect.c:6407 #, c-format msgid "" "WARNING: password file \"%s\" has group or world access; permissions should " @@ -449,107 +568,126 @@ msgstr "" "경고: 패스워드 파일 \"%s\"에 그룹 또는 범용 액세스 권한이 있습니다. 권한은 " "u=rw(0600) 이하여야 합니다.\n" -#: fe-connect.c:5864 +#: fe-connect.c:6499 #, c-format msgid "password retrieved from file \"%s\"\n" msgstr "\"%s\" 파일에서 암호를 찾을 수 없음\n" -#: fe-exec.c:826 +#: fe-exec.c:437 fe-exec.c:2776 +#, c-format +msgid "row number %d is out of range 0..%d" +msgstr "%d 번째 행(row)은 0..%d 범위를 벗어났음" + +#: fe-exec.c:498 fe-protocol2.c:503 fe-protocol2.c:538 fe-protocol2.c:1049 +#: fe-protocol3.c:209 fe-protocol3.c:236 fe-protocol3.c:253 fe-protocol3.c:333 +#: fe-protocol3.c:728 fe-protocol3.c:951 +msgid "out of memory" +msgstr "메모리 부족" + +#: fe-exec.c:499 fe-protocol2.c:1395 fe-protocol3.c:1886 +#, c-format +msgid "%s" +msgstr "%s" + +#: fe-exec.c:847 msgid "NOTICE" msgstr "알림" -#: fe-exec.c:1141 fe-exec.c:1199 fe-exec.c:1245 +#: fe-exec.c:905 +msgid "PGresult cannot support more than INT_MAX tuples" +msgstr "PGresult 함수는 INT_MAX 튜플보다 많은 경우를 지원하지 않음" + +#: fe-exec.c:917 +msgid "size_t overflow" +msgstr "size_t 초과" + +#: fe-exec.c:1192 fe-exec.c:1250 fe-exec.c:1296 msgid "command string is a null pointer\n" msgstr "명령 문자열이 null 포인터\n" -#: fe-exec.c:1205 fe-exec.c:1251 fe-exec.c:1346 +#: fe-exec.c:1256 fe-exec.c:1302 fe-exec.c:1397 msgid "number of parameters must be between 0 and 65535\n" msgstr "매개변수값으로 숫자는 0에서 65535까지만 쓸 수 있음\n" -#: fe-exec.c:1239 fe-exec.c:1340 +#: fe-exec.c:1290 fe-exec.c:1391 msgid "statement name is a null pointer\n" msgstr "실행 구문 이름이 null 포인트(값이 없음)입니다\n" -#: fe-exec.c:1259 fe-exec.c:1423 fe-exec.c:2141 fe-exec.c:2340 +#: fe-exec.c:1310 fe-exec.c:1473 fe-exec.c:2191 fe-exec.c:2390 msgid "function requires at least protocol version 3.0\n" msgstr "함수는 적어도 버전 3의 프로토콜을 요구하고 있습니다\n" -#: fe-exec.c:1377 +#: fe-exec.c:1428 msgid "no connection to the server\n" msgstr "서버에 대한 연결이 없음\n" -#: fe-exec.c:1384 +#: fe-exec.c:1435 msgid "another command is already in progress\n" msgstr "처리 중에 이미 다른 명령이 존재함\n" -#: fe-exec.c:1499 +#: fe-exec.c:1549 msgid "length must be given for binary parameter\n" msgstr "바이너리 자료 매개 변수를 사용할 때는 그 길이를 지정해야 함\n" -#: fe-exec.c:1771 +#: fe-exec.c:1821 #, c-format msgid "unexpected asyncStatus: %d\n" msgstr "기대되지 않은 동기화상태: %d\n" -#: fe-exec.c:1791 +#: fe-exec.c:1841 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_RESULTCREATE event\n" msgstr "PGEVT_RESULTCREATE 이벤트 동안 PGEventProc \"%s\" 실패함\n" -#: fe-exec.c:1951 +#: fe-exec.c:2001 msgid "COPY terminated by new PQexec" msgstr "새 PQexec 호출로 COPY 작업이 중지 되었습니다" -#: fe-exec.c:1959 +#: fe-exec.c:2009 msgid "COPY IN state must be terminated first\n" msgstr "COPY IN 상태가 먼저 끝나야함\n" -#: fe-exec.c:1979 +#: fe-exec.c:2029 msgid "COPY OUT state must be terminated first\n" msgstr "COPY OUT 상태가 먼저 끝나야함\n" -#: fe-exec.c:1987 +#: fe-exec.c:2037 msgid "PQexec not allowed during COPY BOTH\n" msgstr "COPY BOTH 작업 중에는 PQexec 사용할 수 없음\n" -#: fe-exec.c:2230 fe-exec.c:2297 fe-exec.c:2387 fe-protocol2.c:1352 +#: fe-exec.c:2280 fe-exec.c:2347 fe-exec.c:2437 fe-protocol2.c:1352 #: fe-protocol3.c:1817 msgid "no COPY in progress\n" msgstr "처리 가운데 COPY가 없음\n" -#: fe-exec.c:2577 +#: fe-exec.c:2627 msgid "connection in wrong state\n" msgstr "잘못된 상태의 연결\n" -#: fe-exec.c:2608 +#: fe-exec.c:2658 msgid "invalid ExecStatusType code" msgstr "잘못된 ExecStatusType 코드" -#: fe-exec.c:2635 +#: fe-exec.c:2685 msgid "PGresult is not an error result\n" msgstr "PGresult가 오류 결과가 아님\n" -#: fe-exec.c:2710 fe-exec.c:2733 +#: fe-exec.c:2760 fe-exec.c:2783 #, c-format msgid "column number %d is out of range 0..%d" msgstr "%d 번째 열은 0..%d 범위를 벗어났음" -#: fe-exec.c:2726 -#, c-format -msgid "row number %d is out of range 0..%d" -msgstr "%d 번째 행(row)은 0..%d 범위를 벗어났음" - -#: fe-exec.c:2748 +#: fe-exec.c:2798 #, c-format msgid "parameter number %d is out of range 0..%d" msgstr "%d개의 매개 변수는 0..%d 범위를 벗어났음" -#: fe-exec.c:3058 +#: fe-exec.c:3108 #, c-format msgid "could not interpret result from server: %s" msgstr "서버로부터 결과처리를 중지 시킬 수 없음: %s" -#: fe-exec.c:3297 fe-exec.c:3381 +#: fe-exec.c:3347 fe-exec.c:3431 msgid "incomplete multibyte character\n" msgstr "완성되지 않은 멀티바이트 문자\n" @@ -636,21 +774,21 @@ msgstr "loread 함수의 OID 조사를 할 수 없음\n" msgid "cannot determine OID of function lowrite\n" msgstr "lowrite 함수의 OID 조사를 할 수 없음\n" -#: fe-misc.c:295 +#: fe-misc.c:292 #, c-format msgid "integer of size %lu not supported by pqGetInt" msgstr "%lu 정수형 크기는 pqGetInt 함수에서 지원하지 않음" -#: fe-misc.c:331 +#: fe-misc.c:328 #, c-format msgid "integer of size %lu not supported by pqPutInt" msgstr "%lu 정수형 크기는 pqPutInt 함수에서 지원하지 않음" -#: fe-misc.c:642 fe-misc.c:843 +#: fe-misc.c:639 fe-misc.c:840 msgid "connection not open\n" msgstr "연결 열기 실패\n" -#: fe-misc.c:812 fe-secure-openssl.c:271 fe-secure-openssl.c:380 +#: fe-misc.c:809 fe-secure-openssl.c:229 fe-secure-openssl.c:338 #: fe-secure.c:253 fe-secure.c:362 msgid "" "server closed the connection unexpectedly\n" @@ -661,15 +799,15 @@ msgstr "" "\t이런 처리는 클라이언트의 요구를 처리하는 동안이나\n" "\t처리하기 전에 서버가 갑자기 종료되었음을 의미함\n" -#: fe-misc.c:1016 +#: fe-misc.c:1011 msgid "timeout expired\n" msgstr "시간 초과\n" -#: fe-misc.c:1061 +#: fe-misc.c:1056 msgid "invalid socket\n" msgstr "잘못된 소켓\n" -#: fe-misc.c:1084 +#: fe-misc.c:1079 #, c-format msgid "select() failed: %s\n" msgstr "select() 실패: %s\n" @@ -689,12 +827,6 @@ msgstr "잘못된 상태 %c, 메모리 손상일 가능성이 큼\n" msgid "message type 0x%02x arrived from server while idle" msgstr "휴지(idle)동안 서버로 부터 0x%02x 형태 메시지를 받았음" -#: fe-protocol2.c:503 fe-protocol2.c:538 fe-protocol2.c:1049 -#: fe-protocol3.c:209 fe-protocol3.c:236 fe-protocol3.c:253 fe-protocol3.c:333 -#: fe-protocol3.c:728 fe-protocol3.c:951 -msgid "out of memory" -msgstr "메모리 부족" - #: fe-protocol2.c:529 #, c-format msgid "unexpected character %c following empty query response (\"I\" message)" @@ -726,11 +858,6 @@ msgstr "서버로부터 예상치 못한 응답을 받았음; \"%c\" 문자를 msgid "out of memory for query result" msgstr "쿼리 결과 처리를 위한 메모리 부족" -#: fe-protocol2.c:1395 fe-protocol3.c:1886 -#, c-format -msgid "%s" -msgstr "%s" - #: fe-protocol2.c:1407 #, c-format msgid "lost synchronization with server, resetting connection" @@ -860,102 +987,128 @@ msgstr "줄 %d: " msgid "PQgetline: not doing text COPY OUT\n" msgstr "PQgetline: text COPY OUT 작업을 할 수 없음\n" -#: fe-secure-openssl.c:146 fe-secure-openssl.c:1031 fe-secure-openssl.c:1251 -#, c-format -msgid "could not acquire mutex: %s\n" -msgstr "mutex 취득 실패: %s\n" - -#: fe-secure-openssl.c:158 -#, c-format -msgid "could not establish SSL connection: %s\n" -msgstr "SSL 연결을 확립할 수 없음: %s\n" - -#: fe-secure-openssl.c:276 fe-secure-openssl.c:385 fe-secure-openssl.c:1377 +#: fe-secure-openssl.c:234 fe-secure-openssl.c:343 fe-secure-openssl.c:1323 #, c-format msgid "SSL SYSCALL error: %s\n" msgstr "SSL SYSCALL 오류: %s\n" -#: fe-secure-openssl.c:283 fe-secure-openssl.c:392 fe-secure-openssl.c:1381 +#: fe-secure-openssl.c:241 fe-secure-openssl.c:350 fe-secure-openssl.c:1327 msgid "SSL SYSCALL error: EOF detected\n" msgstr "SSL SYSCALL 오류: EOF 감지됨\n" -#: fe-secure-openssl.c:294 fe-secure-openssl.c:403 fe-secure-openssl.c:1390 +#: fe-secure-openssl.c:252 fe-secure-openssl.c:361 fe-secure-openssl.c:1336 #, c-format msgid "SSL error: %s\n" msgstr "SSL 오류: %s\n" -#: fe-secure-openssl.c:309 fe-secure-openssl.c:418 +#: fe-secure-openssl.c:267 fe-secure-openssl.c:376 msgid "SSL connection has been closed unexpectedly\n" msgstr "SSL 연결이 예상치 못하게 끊김\n" -#: fe-secure-openssl.c:315 fe-secure-openssl.c:424 fe-secure-openssl.c:1399 +#: fe-secure-openssl.c:273 fe-secure-openssl.c:382 fe-secure-openssl.c:1345 #, c-format msgid "unrecognized SSL error code: %d\n" msgstr "알 수 없는 SSL 오류 코드: %d\n" -#: fe-secure-openssl.c:536 +#: fe-secure-openssl.c:494 msgid "SSL certificate's name entry is missing\n" msgstr "SSL 인증서의 이름 항목이 잘못됨\n" -#: fe-secure-openssl.c:566 +#: fe-secure-openssl.c:528 msgid "SSL certificate's name contains embedded null\n" msgstr "SSL 인증서의 이름에 null 문자가 있음\n" -#: fe-secure-openssl.c:617 +#: fe-secure-openssl.c:580 msgid "host name must be specified for a verified SSL connection\n" msgstr "인증된 SSL 접속을 위해서는 호스트 이름을 지정해야 함\n" -#: fe-secure-openssl.c:717 +#: fe-secure-openssl.c:680 #, c-format msgid "server certificate for \"%s\" does not match host name \"%s\"\n" msgstr "" "서버 인증서의 이름 \"%s\"이(가) 호스트 이름 \"%s\"과(와) 일치하지 않음\n" -#: fe-secure-openssl.c:723 +#: fe-secure-openssl.c:686 msgid "could not get server's host name from server certificate\n" msgstr "서버 인증서에서 서버 호스트 이름을 찾을 수 없음\n" -#: fe-secure-openssl.c:870 +#: fe-secure-openssl.c:928 #, c-format msgid "could not create SSL context: %s\n" msgstr "SSL context를 만들 수 없음: %s\n" -#: fe-secure-openssl.c:1001 +#: fe-secure-openssl.c:965 +#, c-format +msgid "could not read root certificate file \"%s\": %s\n" +msgstr "\"%s\" 루트 인증서 파일을 읽을 수 없음: %s\n" + +#: fe-secure-openssl.c:993 +#, c-format +msgid "SSL library does not support CRL certificates (file \"%s\")\n" +msgstr "SSL 라이브러리가 CRL 인증서 (\"%s\" 파일)를 지원하지 않음\n" + +#: fe-secure-openssl.c:1021 +msgid "" +"could not get home directory to locate root certificate file\n" +"Either provide the file or change sslmode to disable server certificate " +"verification.\n" +msgstr "" +"루트 인증서 파일이 있는 홈 디렉터리를 찾을 수 없음\n" +"해당 파일을 제공하거나 서버 인증서 확인을 사용하지 않도록 sslmode를 변경하십" +"시오.\n" + +#: fe-secure-openssl.c:1025 +#, c-format +msgid "" +"root certificate file \"%s\" does not exist\n" +"Either provide the file or change sslmode to disable server certificate " +"verification.\n" +msgstr "" +"루트 인증서 파일 \"%s\"이(가) 없습니다.\n" +"해당 파일을 제공하거나 서버 인증서 확인을 사용하지 않도록 sslmode를 변경하십" +"시오.\n" + +#: fe-secure-openssl.c:1056 #, c-format msgid "could not open certificate file \"%s\": %s\n" msgstr "\"%s\" 인증서 파일을 열수 없음: %s\n" -#: fe-secure-openssl.c:1040 fe-secure-openssl.c:1055 +#: fe-secure-openssl.c:1075 #, c-format msgid "could not read certificate file \"%s\": %s\n" msgstr "\"%s\" 인증서 파일을 읽을 수 없음: %s\n" -#: fe-secure-openssl.c:1110 +#: fe-secure-openssl.c:1100 +#, c-format +msgid "could not establish SSL connection: %s\n" +msgstr "SSL 연결을 확립할 수 없음: %s\n" + +#: fe-secure-openssl.c:1154 #, c-format msgid "could not load SSL engine \"%s\": %s\n" msgstr "SSL 엔진 \"%s\"을(를) 로드할 수 없음: %s\n" -#: fe-secure-openssl.c:1122 +#: fe-secure-openssl.c:1166 #, c-format msgid "could not initialize SSL engine \"%s\": %s\n" msgstr "SSL 엔진 \"%s\"을(를) 초기화할 수 없음: %s\n" -#: fe-secure-openssl.c:1138 +#: fe-secure-openssl.c:1182 #, c-format msgid "could not read private SSL key \"%s\" from engine \"%s\": %s\n" msgstr "개인 SSL 키 \"%s\"을(를) \"%s\" 엔진에서 읽을 수 없음: %s\n" -#: fe-secure-openssl.c:1152 +#: fe-secure-openssl.c:1196 #, c-format msgid "could not load private SSL key \"%s\" from engine \"%s\": %s\n" msgstr "개인 SSL 키 \"%s\"을(를) \"%s\" 엔진에서 읽을 수 없음: %s\n" -#: fe-secure-openssl.c:1189 +#: fe-secure-openssl.c:1233 #, c-format msgid "certificate present, but not private key file \"%s\"\n" msgstr "인증서가 있지만, \"%s\" 개인키가 아닙니다.\n" -#: fe-secure-openssl.c:1197 +#: fe-secure-openssl.c:1241 #, c-format msgid "" "private key file \"%s\" has group or world access; permissions should be " @@ -964,58 +1117,27 @@ msgstr "" "개인 키 파일 \"%s\"에 그룹 또는 범용 액세스 권한이 있습니다. 권한은 " "u=rw(0600) 이하여야 합니다.\n" -#: fe-secure-openssl.c:1208 +#: fe-secure-openssl.c:1252 #, c-format msgid "could not load private key file \"%s\": %s\n" msgstr "\"%s\" 개인키 파일을 불러들일 수 없습니다: %s\n" -#: fe-secure-openssl.c:1222 +#: fe-secure-openssl.c:1266 #, c-format msgid "certificate does not match private key file \"%s\": %s\n" msgstr "인증서가 \"%s\" 개인키 파일과 맞지 않습니다: %s\n" -#: fe-secure-openssl.c:1260 -#, c-format -msgid "could not read root certificate file \"%s\": %s\n" -msgstr "\"%s\" 루트 인증서 파일을 읽을 수 없음: %s\n" - -#: fe-secure-openssl.c:1290 -#, c-format -msgid "SSL library does not support CRL certificates (file \"%s\")\n" -msgstr "SSL 라이브러리가 CRL 인증서 (\"%s\" 파일)를 지원하지 않음\n" - -#: fe-secure-openssl.c:1323 -msgid "" -"could not get home directory to locate root certificate file\n" -"Either provide the file or change sslmode to disable server certificate " -"verification.\n" -msgstr "" -"루트 인증서 파일이 있는 홈 디렉터리를 찾을 수 없음\n" -"해당 파일을 제공하거나 서버 인증서 확인을 사용하지 않도록 sslmode를 변경하십" -"시오.\n" - -#: fe-secure-openssl.c:1327 -#, c-format -msgid "" -"root certificate file \"%s\" does not exist\n" -"Either provide the file or change sslmode to disable server certificate " -"verification.\n" -msgstr "" -"루트 인증서 파일 \"%s\"이(가) 없습니다.\n" -"해당 파일을 제공하거나 서버 인증서 확인을 사용하지 않도록 sslmode를 변경하십" -"시오.\n" - -#: fe-secure-openssl.c:1420 +#: fe-secure-openssl.c:1366 #, c-format msgid "certificate could not be obtained: %s\n" msgstr "인증서를 구하질 못했습니다: %s\n" -#: fe-secure-openssl.c:1512 +#: fe-secure-openssl.c:1458 #, c-format msgid "no SSL error reported" msgstr "SSL 오류 없음이 보고됨" -#: fe-secure-openssl.c:1521 +#: fe-secure-openssl.c:1467 #, c-format msgid "SSL error code %lu" msgstr "SSL 오류 번호 %lu" diff --git a/src/interfaces/libpq/po/ru.po b/src/interfaces/libpq/po/ru.po index de6e91fc72..7e80be00fe 100644 --- a/src/interfaces/libpq/po/ru.po +++ b/src/interfaces/libpq/po/ru.po @@ -4,14 +4,14 @@ # Serguei A. Mokhov , 2001-2004. # Oleg Bartunov , 2005. # Andrey Sudnik , 2010. -# Alexander Lakhin , 2012-2017. -# +# Alexander Lakhin , 2012-2017, 2018. msgid "" msgstr "" "Project-Id-Version: libpq (PostgreSQL current)\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-03-27 12:38+0000\n" -"PO-Revision-Date: 2017-03-29 06:55+0300\n" +"POT-Creation-Date: 2018-01-31 07:53+0300\n" +"PO-Revision-Date: 2018-01-31 08:16+0300\n" +"Last-Translator: Alexander Lakhin \n" "Language-Team: Russian \n" "Language: ru\n" "MIME-Version: 1.0\n" @@ -19,123 +19,229 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"Last-Translator: Alexander Lakhin \n" -#: fe-auth.c:149 +#: fe-auth-scram.c:176 +msgid "malformed SCRAM message (empty message)\n" +msgstr "неправильное сообщение SCRAM (пустое содержимое)\n" + +#: fe-auth-scram.c:182 +msgid "malformed SCRAM message (length mismatch)\n" +msgstr "неправильное сообщение SCRAM (некорректная длина)\n" + +#: fe-auth-scram.c:231 +msgid "incorrect server signature\n" +msgstr "некорректная сигнатура сервера\n" + +#: fe-auth-scram.c:240 +msgid "invalid SCRAM exchange state\n" +msgstr "ошибочное состояние обмена SCRAM\n" + +#: fe-auth-scram.c:263 +#, c-format +msgid "malformed SCRAM message (attribute \"%c\" expected)\n" +msgstr "неправильное сообщение SCRAM (ожидался атрибут \"%c\")\n" + +#: fe-auth-scram.c:272 +#, c-format +msgid "" +"malformed SCRAM message (expected character \"=\" for attribute \"%c\")\n" +msgstr "" +"неправильное сообщение SCRAM (для атрибута \"%c\" ожидался символ \"=\")\n" + +#: fe-auth-scram.c:311 +msgid "could not generate nonce\n" +msgstr "не удалось сгенерировать разовый код\n" + +#: fe-auth-scram.c:319 fe-auth-scram.c:336 fe-auth-scram.c:346 +#: fe-auth-scram.c:400 fe-auth-scram.c:420 fe-auth-scram.c:445 +#: fe-auth-scram.c:459 fe-auth-scram.c:501 fe-auth.c:227 fe-auth.c:362 +#: fe-auth.c:432 fe-auth.c:467 fe-auth.c:609 fe-auth.c:768 fe-auth.c:1080 +#: fe-auth.c:1228 fe-connect.c:775 fe-connect.c:1202 fe-connect.c:1378 +#: fe-connect.c:1946 fe-connect.c:2475 fe-connect.c:4061 fe-connect.c:4313 +#: fe-connect.c:4432 fe-connect.c:4682 fe-connect.c:4762 fe-connect.c:4861 +#: fe-connect.c:5117 fe-connect.c:5146 fe-connect.c:5218 fe-connect.c:5242 +#: fe-connect.c:5260 fe-connect.c:5361 fe-connect.c:5370 fe-connect.c:5726 +#: fe-connect.c:5876 fe-exec.c:2702 fe-exec.c:3449 fe-exec.c:3614 fe-lobj.c:896 +#: fe-protocol2.c:1206 fe-protocol3.c:992 fe-protocol3.c:1678 +#: fe-secure-openssl.c:514 fe-secure-openssl.c:1138 +msgid "out of memory\n" +msgstr "нехватка памяти\n" + +#: fe-auth-scram.c:437 +msgid "invalid SCRAM response (nonce mismatch)\n" +msgstr "неверный ответ SCRAM (несовпадение проверочного кода)\n" + +#: fe-auth-scram.c:476 +msgid "malformed SCRAM message (invalid iteration count)\n" +msgstr "неправильное сообщение SCRAM (некорректное число итераций)\n" + +#: fe-auth-scram.c:482 +msgid "malformed SCRAM message (garbage at end of server-first-message)\n" +msgstr "" +"неправильное сообщение SCRAM (мусор в конце первого сообщения сервера)\n" + +#: fe-auth-scram.c:511 +#, c-format +msgid "error received from server in SCRAM exchange: %s\n" +msgstr "в ходе обмена SCRAM от сервера получена ошибка: %s\n" + +#: fe-auth-scram.c:526 +msgid "malformed SCRAM message (garbage at end of server-final-message)\n" +msgstr "" +"неправильное сообщение SCRAM (мусор в конце последнего сообщения сервера)\n" + +#: fe-auth-scram.c:534 +msgid "malformed SCRAM message (invalid server signature)\n" +msgstr "неправильное сообщение SCRAM (неверная сигнатура сервера)\n" + +#: fe-auth.c:122 +#, c-format +msgid "out of memory allocating GSSAPI buffer (%d)\n" +msgstr "недостаточно памяти для буфера GSSAPI (%d)\n" + +#: fe-auth.c:177 msgid "GSSAPI continuation error" msgstr "ошибка продолжения в GSSAPI" -#: fe-auth.c:179 fe-auth.c:415 +#: fe-auth.c:207 fe-auth.c:461 msgid "host name must be specified\n" msgstr "требуется указать имя сервера\n" -#: fe-auth.c:186 +#: fe-auth.c:214 msgid "duplicate GSS authentication request\n" msgstr "повторный запрос аутентификации GSS\n" -#: fe-auth.c:199 fe-auth.c:311 fe-auth.c:386 fe-auth.c:421 fe-auth.c:465 -#: fe-auth.c:599 fe-auth.c:902 fe-connect.c:716 fe-connect.c:1086 -#: fe-connect.c:1262 fe-connect.c:1798 fe-connect.c:2326 fe-connect.c:4000 -#: fe-connect.c:4252 fe-connect.c:4371 fe-connect.c:4611 fe-connect.c:4691 -#: fe-connect.c:4790 fe-connect.c:5046 fe-connect.c:5075 fe-connect.c:5147 -#: fe-connect.c:5165 fe-connect.c:5266 fe-connect.c:5275 fe-connect.c:5631 -#: fe-connect.c:5781 fe-exec.c:2651 fe-exec.c:3398 fe-exec.c:3563 fe-lobj.c:896 -#: fe-protocol2.c:1206 fe-protocol3.c:992 fe-protocol3.c:1678 -#: fe-secure-openssl.c:514 fe-secure-openssl.c:1137 -msgid "out of memory\n" -msgstr "нехватка памяти\n" - -#: fe-auth.c:212 +#: fe-auth.c:240 msgid "GSSAPI name import error" msgstr "ошибка импорта имени в GSSAPI" -#: fe-auth.c:300 +#: fe-auth.c:303 +#, c-format +msgid "out of memory allocating SSPI buffer (%d)\n" +msgstr "недостаточно памяти для буфера SSPI (%d)\n" + +#: fe-auth.c:351 msgid "SSPI continuation error" msgstr "ошибка продолжения в SSPI" -#: fe-auth.c:401 +#: fe-auth.c:422 +msgid "duplicate SSPI authentication request\n" +msgstr "повторный запрос аутентификации SSPI\n" + +#: fe-auth.c:447 msgid "could not acquire SSPI credentials" msgstr "не удалось получить удостоверение SSPI" -#: fe-auth.c:474 +#: fe-auth.c:500 +msgid "duplicate SASL authentication request\n" +msgstr "повторный запрос аутентификации SASL\n" + +#: fe-auth.c:560 +msgid "none of the server's SASL authentication mechanisms are supported\n" +msgstr "" +"ни один из серверных механизмов аутентификации SASL не поддерживается\n" + +#: fe-auth.c:633 #, c-format -msgid "SASL authentication mechanism %s not supported\n" -msgstr "механизм аутентификации SASL %s не поддерживается\n" +msgid "out of memory allocating SASL buffer (%d)\n" +msgstr "недостаточно памяти для буфера SASL (%d)\n" -#: fe-auth.c:574 +#: fe-auth.c:658 +msgid "" +"AuthenticationSASLFinal received from server, but SASL authentication was " +"not completed\n" +msgstr "" +"c сервера получено сообщение AuthenticationSASLFinal, но аутентификация SASL " +"ещё не завершена\n" + +#: fe-auth.c:735 msgid "SCM_CRED authentication method not supported\n" msgstr "аутентификация SCM_CRED не поддерживается\n" -#: fe-auth.c:650 +#: fe-auth.c:826 msgid "Kerberos 4 authentication not supported\n" msgstr "аутентификация Kerberos 4 не поддерживается\n" -#: fe-auth.c:655 +#: fe-auth.c:831 msgid "Kerberos 5 authentication not supported\n" msgstr "аутентификация Kerberos 5 не поддерживается\n" -#: fe-auth.c:726 +#: fe-auth.c:902 msgid "GSSAPI authentication not supported\n" msgstr "аутентификация через GSSAPI не поддерживается\n" -#: fe-auth.c:758 +#: fe-auth.c:934 msgid "SSPI authentication not supported\n" msgstr "аутентификация через SSPI не поддерживается\n" -#: fe-auth.c:766 +#: fe-auth.c:942 msgid "Crypt authentication not supported\n" msgstr "аутентификация Crypt не поддерживается\n" -#: fe-auth.c:830 +#: fe-auth.c:1008 #, c-format msgid "authentication method %u not supported\n" msgstr "метод аутентификации %u не поддерживается\n" -#: fe-auth.c:877 +#: fe-auth.c:1055 #, c-format msgid "user name lookup failure: error code %lu\n" msgstr "распознать имя пользователя не удалось (код ошибки: %lu)\n" -#: fe-auth.c:887 fe-connect.c:2253 +#: fe-auth.c:1065 fe-connect.c:2402 #, c-format msgid "could not look up local user ID %d: %s\n" msgstr "найти локального пользователя по идентификатору (%d) не удалось: %s\n" -#: fe-auth.c:892 fe-connect.c:2258 +#: fe-auth.c:1070 fe-connect.c:2407 #, c-format msgid "local user with ID %d does not exist\n" msgstr "локальный пользователь с ID %d не существует\n" -#: fe-connect.c:918 +#: fe-auth.c:1172 +msgid "unexpected shape of result set returned for SHOW\n" +msgstr "неожиданная форма набора результатов, возвращённого для SHOW\n" + +#: fe-auth.c:1181 +msgid "password_encryption value too long\n" +msgstr "слишком длинное значение password_encryption\n" + +#: fe-auth.c:1221 +#, c-format +msgid "unrecognized password encryption algorithm \"%s\"\n" +msgstr "нераспознанный алгоритм шифрования пароля \"%s\"\n" + +#: fe-connect.c:968 +#, c-format +msgid "could not match %d host names to %d hostaddr values\n" +msgstr "не удалось сопоставить имена узлов (%d) со значениями hostaddr (%d)\n" + +#: fe-connect.c:1025 #, c-format msgid "could not match %d port numbers to %d hosts\n" msgstr "не удалось сопоставить номера портов (%d) с узлами (%d)\n" -#: fe-connect.c:970 -msgid "could not get home directory to locate password file\n" -msgstr "не удалось получить домашний каталог для загрузки файла паролей\n" - -#: fe-connect.c:1012 +#: fe-connect.c:1128 #, c-format msgid "invalid sslmode value: \"%s\"\n" msgstr "неверное значение sslmode: \"%s\"\n" -#: fe-connect.c:1033 +#: fe-connect.c:1149 #, c-format msgid "sslmode value \"%s\" invalid when SSL support is not compiled in\n" msgstr "значение sslmode \"%s\" недопустимо для сборки без поддержки SSL\n" -#: fe-connect.c:1068 +#: fe-connect.c:1184 #, c-format msgid "invalid target_session_attrs value: \"%s\"\n" msgstr "неверное значение target_session_attrs: \"%s\"\n" -#: fe-connect.c:1286 +#: fe-connect.c:1402 #, c-format msgid "could not set socket to TCP no delay mode: %s\n" msgstr "не удалось перевести сокет в режим TCP-передачи без задержки: %s\n" -#: fe-connect.c:1316 +#: fe-connect.c:1432 #, c-format msgid "" "could not connect to server: %s\n" @@ -144,9 +250,9 @@ msgid "" msgstr "" "не удалось подключиться к серверу: %s\n" "\tОн действительно работает локально и принимает\n" -"\tсоединения через доменный сокет \"%s\"?\n" +"\tсоединения через Unix-сокет \"%s\"?\n" -#: fe-connect.c:1371 +#: fe-connect.c:1490 #, c-format msgid "" "could not connect to server: %s\n" @@ -157,7 +263,7 @@ msgstr "" "\tОн действительно работает по адресу \"%s\" (%s)\n" "\t и принимает TCP-соединения (порт %s)?\n" -#: fe-connect.c:1380 +#: fe-connect.c:1499 #, c-format msgid "" "could not connect to server: %s\n" @@ -168,259 +274,229 @@ msgstr "" "\tОн действительно работает по адресу \"%s\"\n" "\t и принимает TCP-соединения (порт %s)?\n" -#: fe-connect.c:1431 -#, c-format -msgid "setsockopt(TCP_KEEPIDLE) failed: %s\n" -msgstr "ошибка в setsockopt(TCP_KEEPIDLE): %s\n" - -#: fe-connect.c:1444 -#, c-format -msgid "setsockopt(TCP_KEEPALIVE) failed: %s\n" -msgstr "ошибка в setsockopt(TCP_KEEPALIVE): %s\n" - -#: fe-connect.c:1476 +#: fe-connect.c:1550 fe-connect.c:1582 fe-connect.c:1615 fe-connect.c:2174 #, c-format -msgid "setsockopt(TCP_KEEPINTVL) failed: %s\n" -msgstr "ошибка в setsockopt(TCP_KEEPINTVL): %s\n" +msgid "setsockopt(%s) failed: %s\n" +msgstr "ошибка в setsockopt(%s): %s\n" -#: fe-connect.c:1508 -#, c-format -msgid "setsockopt(TCP_KEEPCNT) failed: %s\n" -msgstr "ошибка в setsockopt(TCP_KEEPCNT): %s\n" - -#: fe-connect.c:1556 +#: fe-connect.c:1664 #, c-format msgid "WSAIoctl(SIO_KEEPALIVE_VALS) failed: %ui\n" msgstr "ошибка в WSAIoctl(SIO_KEEPALIVE_VALS): %ui\n" -#: fe-connect.c:1614 +#: fe-connect.c:1721 #, c-format msgid "invalid port number: \"%s\"\n" msgstr "неверный номер порта: \"%s\"\n" -#: fe-connect.c:1638 -#, c-format -msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n" -msgstr "длина пути доменного сокета \"%s\" превышает предел (%d байт)\n" - -#: fe-connect.c:1656 +#: fe-connect.c:1737 #, c-format msgid "could not translate host name \"%s\" to address: %s\n" msgstr "преобразовать имя \"%s\" в адрес не удалось: %s\n" -#: fe-connect.c:1660 +#: fe-connect.c:1746 +#, c-format +msgid "could not parse network address \"%s\": %s\n" +msgstr "не удалось разобрать сетевой адрес \"%s\": %s\n" + +#: fe-connect.c:1757 +#, c-format +msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n" +msgstr "длина пути Unix-сокета \"%s\" превышает предел (%d байт)\n" + +#: fe-connect.c:1771 #, c-format msgid "could not translate Unix-domain socket path \"%s\" to address: %s\n" -msgstr "" -"преобразовать путь к доменному сокету UNIX \"%s\" в адрес не удалось: %s\n" +msgstr "преобразовать путь Unix-сокета \"%s\" в адрес не удалось: %s\n" -#: fe-connect.c:1904 +#: fe-connect.c:2052 msgid "invalid connection state, probably indicative of memory corruption\n" msgstr "неверное состояние соединения - возможно разрушение памяти\n" -#: fe-connect.c:1961 +#: fe-connect.c:2109 #, c-format msgid "could not create socket: %s\n" msgstr "не удалось создать сокет: %s\n" -#: fe-connect.c:1983 +#: fe-connect.c:2131 #, c-format msgid "could not set socket to nonblocking mode: %s\n" msgstr "не удалось перевести сокет в неблокирующий режим: %s\n" -#: fe-connect.c:1994 +#: fe-connect.c:2142 #, c-format msgid "could not set socket to close-on-exec mode: %s\n" msgstr "" "не удалось перевести сокет в режим закрытия при выполнении (close-on-exec): " "%s\n" -#: fe-connect.c:2013 +#: fe-connect.c:2161 msgid "keepalives parameter must be an integer\n" msgstr "параметр keepalives должен быть целым числом\n" -#: fe-connect.c:2026 -#, c-format -msgid "setsockopt(SO_KEEPALIVE) failed: %s\n" -msgstr "ошибка в setsockopt(SO_KEEPALIVE): %s\n" - -#: fe-connect.c:2163 +#: fe-connect.c:2312 #, c-format msgid "could not get socket error status: %s\n" msgstr "не удалось получить статус ошибки сокета: %s\n" -#: fe-connect.c:2198 +#: fe-connect.c:2347 #, c-format msgid "could not get client address from socket: %s\n" msgstr "не удалось получить адрес клиента из сокета: %s\n" -#: fe-connect.c:2240 +#: fe-connect.c:2389 msgid "requirepeer parameter is not supported on this platform\n" msgstr "параметр requirepeer не поддерживается в этой ОС\n" -#: fe-connect.c:2243 +#: fe-connect.c:2392 #, c-format msgid "could not get peer credentials: %s\n" msgstr "не удалось получить учётные данные сервера: %s\n" -#: fe-connect.c:2266 +#: fe-connect.c:2415 #, c-format msgid "requirepeer specifies \"%s\", but actual peer user name is \"%s\"\n" msgstr "" "requirepeer допускает подключение только к \"%s\", но сервер работает под " "именем \"%s\"\n" -#: fe-connect.c:2300 +#: fe-connect.c:2449 #, c-format msgid "could not send SSL negotiation packet: %s\n" msgstr "не удалось отправить пакет согласования SSL: %s\n" -#: fe-connect.c:2339 +#: fe-connect.c:2488 #, c-format msgid "could not send startup packet: %s\n" msgstr "не удалось отправить стартовый пакет: %s\n" -#: fe-connect.c:2409 +#: fe-connect.c:2558 msgid "server does not support SSL, but SSL was required\n" msgstr "затребовано подключение через SSL, но сервер не поддерживает SSL\n" -#: fe-connect.c:2435 +#: fe-connect.c:2584 #, c-format msgid "received invalid response to SSL negotiation: %c\n" msgstr "получен неверный ответ на согласование SSL: %c\n" -#: fe-connect.c:2510 fe-connect.c:2543 +#: fe-connect.c:2660 fe-connect.c:2693 #, c-format msgid "expected authentication request from server, but received %c\n" msgstr "ожидался запрос аутентификации от сервера, но получено: %c\n" -#: fe-connect.c:2710 -#, c-format -msgid "out of memory allocating GSSAPI buffer (%d)" -msgstr "недостаточно памяти для буфера GSSAPI (%d)" - -#: fe-connect.c:2748 -#, c-format -msgid "out of memory allocating SASL buffer (%d)" -msgstr "недостаточно памяти для буфера SASL (%d)" - -#: fe-connect.c:2838 +#: fe-connect.c:2922 msgid "unexpected message from server during startup\n" msgstr "неожиданное сообщение от сервера в начале работы\n" -#: fe-connect.c:3042 +#: fe-connect.c:3140 #, c-format msgid "could not make a writable connection to server \"%s:%s\"\n" msgstr "" "не удалось установить подключение для чтения/записи к серверу \"%s:%s\"\n" -#: fe-connect.c:3084 +#: fe-connect.c:3189 #, c-format -msgid "test \"show transaction_read_only\" failed on \"%s:%s\" \n" -msgstr "проверка \"show transaction_read_only\" не пройдена на \"%s:%s\" \n" +msgid "test \"SHOW transaction_read_only\" failed on server \"%s:%s\"\n" +msgstr "" +"проверка \"SHOW transaction_read_only\" не пройдена на сервере \"%s:%s\"\n" -#: fe-connect.c:3106 +#: fe-connect.c:3210 #, c-format msgid "invalid connection state %d, probably indicative of memory corruption\n" msgstr "неверное состояние соединения %d - возможно разрушение памяти\n" -#: fe-connect.c:3606 fe-connect.c:3666 +#: fe-connect.c:3667 fe-connect.c:3727 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_CONNRESET event\n" msgstr "ошибка в PGEventProc \"%s\" при обработке события PGEVT_CONNRESET\n" -#: fe-connect.c:4013 +#: fe-connect.c:4074 #, c-format msgid "invalid LDAP URL \"%s\": scheme must be ldap://\n" msgstr "некорректный адрес LDAP \"%s\": схема должна быть ldap://\n" -#: fe-connect.c:4028 +#: fe-connect.c:4089 #, c-format msgid "invalid LDAP URL \"%s\": missing distinguished name\n" msgstr "некорректный адрес LDAP \"%s\": отсутствует уникальное имя\n" -#: fe-connect.c:4039 fe-connect.c:4092 +#: fe-connect.c:4100 fe-connect.c:4153 #, c-format msgid "invalid LDAP URL \"%s\": must have exactly one attribute\n" msgstr "некорректный адрес LDAP \"%s\": должен быть только один атрибут\n" -#: fe-connect.c:4049 fe-connect.c:4106 +#: fe-connect.c:4110 fe-connect.c:4167 #, c-format msgid "invalid LDAP URL \"%s\": must have search scope (base/one/sub)\n" msgstr "" "некорректный адрес LDAP \"%s\": не указана область поиска (base/one/sub)\n" -#: fe-connect.c:4060 +#: fe-connect.c:4121 #, c-format msgid "invalid LDAP URL \"%s\": no filter\n" msgstr "некорректный адрес LDAP \"%s\": нет фильтра\n" -#: fe-connect.c:4081 +#: fe-connect.c:4142 #, c-format msgid "invalid LDAP URL \"%s\": invalid port number\n" msgstr "некорректный адрес LDAP \"%s\": неверный номер порта\n" -#: fe-connect.c:4115 +#: fe-connect.c:4176 msgid "could not create LDAP structure\n" msgstr "не удалось создать структуру LDAP\n" -#: fe-connect.c:4191 +#: fe-connect.c:4252 #, c-format msgid "lookup on LDAP server failed: %s\n" msgstr "ошибка поиска на сервере LDAP: %s\n" -#: fe-connect.c:4202 +#: fe-connect.c:4263 msgid "more than one entry found on LDAP lookup\n" msgstr "при поиске LDAP найдено более одного вхождения\n" -#: fe-connect.c:4203 fe-connect.c:4215 +#: fe-connect.c:4264 fe-connect.c:4276 msgid "no entry found on LDAP lookup\n" msgstr "при поиске LDAP ничего не найдено\n" -#: fe-connect.c:4226 fe-connect.c:4239 +#: fe-connect.c:4287 fe-connect.c:4300 msgid "attribute has no values on LDAP lookup\n" msgstr "атрибут не содержит значений при поиске LDAP\n" -#: fe-connect.c:4291 fe-connect.c:4310 fe-connect.c:4829 +#: fe-connect.c:4352 fe-connect.c:4371 fe-connect.c:4900 #, c-format msgid "missing \"=\" after \"%s\" in connection info string\n" msgstr "в строке соединения нет \"=\" после \"%s\"\n" -#: fe-connect.c:4383 fe-connect.c:5014 fe-connect.c:5764 +#: fe-connect.c:4444 fe-connect.c:5085 fe-connect.c:5859 #, c-format msgid "invalid connection option \"%s\"\n" msgstr "неверный параметр соединения \"%s\"\n" -#: fe-connect.c:4399 fe-connect.c:4878 +#: fe-connect.c:4460 fe-connect.c:4949 msgid "unterminated quoted string in connection info string\n" msgstr "в строке соединения не хватает закрывающей кавычки\n" -#: fe-connect.c:4439 -msgid "could not get home directory to locate service definition file" -msgstr "" -"не удалось получить домашний каталог для загрузки файла определений служб" - -#: fe-connect.c:4472 +#: fe-connect.c:4543 #, c-format msgid "definition of service \"%s\" not found\n" msgstr "определение службы \"%s\" не найдено\n" -#: fe-connect.c:4495 +#: fe-connect.c:4566 #, c-format msgid "service file \"%s\" not found\n" msgstr "файл определений служб \"%s\" не найден\n" -#: fe-connect.c:4508 +#: fe-connect.c:4579 #, c-format msgid "line %d too long in service file \"%s\"\n" msgstr "слишком длинная строка (%d) в файле определений служб \"%s\"\n" -#: fe-connect.c:4579 fe-connect.c:4623 +#: fe-connect.c:4650 fe-connect.c:4694 #, c-format msgid "syntax error in service file \"%s\", line %d\n" msgstr "синтаксическая ошибка в файле определения служб \"%s\" (строка %d)\n" -#: fe-connect.c:4590 +#: fe-connect.c:4661 #, c-format msgid "" "nested service specifications not supported in service file \"%s\", line %d\n" @@ -428,24 +504,24 @@ msgstr "" "рекурсивные определения служб не поддерживаются (файл определения служб \"%s" "\", строка %d)\n" -#: fe-connect.c:5286 +#: fe-connect.c:5381 #, c-format msgid "invalid URI propagated to internal parser routine: \"%s\"\n" msgstr "во внутреннюю процедуру разбора строки передан ошибочный URI: \"%s\"\n" -#: fe-connect.c:5363 +#: fe-connect.c:5458 #, c-format msgid "" "end of string reached when looking for matching \"]\" in IPv6 host address " "in URI: \"%s\"\n" msgstr "URI не содержит символ \"]\" после адреса IPv6: \"%s\"\n" -#: fe-connect.c:5370 +#: fe-connect.c:5465 #, c-format msgid "IPv6 host address may not be empty in URI: \"%s\"\n" msgstr "IPv6, содержащийся в URI, не может быть пустым: \"%s\"\n" -#: fe-connect.c:5385 +#: fe-connect.c:5480 #, c-format msgid "" "unexpected character \"%c\" at position %d in URI (expected \":\" or \"/\"): " @@ -454,41 +530,41 @@ msgstr "" "неожиданный символ \"%c\" в позиции %d в URI (ожидалось \":\" или \"/\"): " "\"%s\"\n" -#: fe-connect.c:5514 +#: fe-connect.c:5609 #, c-format msgid "extra key/value separator \"=\" in URI query parameter: \"%s\"\n" msgstr "лишний разделитель ключа/значения \"=\" в параметрах URI: \"%s\"\n" -#: fe-connect.c:5534 +#: fe-connect.c:5629 #, c-format msgid "missing key/value separator \"=\" in URI query parameter: \"%s\"\n" msgstr "в параметрах URI не хватает разделителя ключа/значения \"=\": \"%s\"\n" -#: fe-connect.c:5585 +#: fe-connect.c:5680 #, c-format msgid "invalid URI query parameter: \"%s\"\n" msgstr "неверный параметр в URI: \"%s\"\n" -#: fe-connect.c:5659 +#: fe-connect.c:5754 #, c-format msgid "invalid percent-encoded token: \"%s\"\n" msgstr "неверный символ, закодированный с %%: \"%s\"\n" -#: fe-connect.c:5669 +#: fe-connect.c:5764 #, c-format msgid "forbidden value %%00 in percent-encoded value: \"%s\"\n" msgstr "недопустимое значение %%00 для символа, закодированного с %%: \"%s\"\n" -#: fe-connect.c:6014 +#: fe-connect.c:6109 msgid "connection pointer is NULL\n" msgstr "нулевой указатель соединения\n" -#: fe-connect.c:6312 +#: fe-connect.c:6407 #, c-format msgid "WARNING: password file \"%s\" is not a plain file\n" msgstr "ПРЕДУПРЕЖДЕНИЕ: файл паролей \"%s\" - не обычный файл\n" -#: fe-connect.c:6321 +#: fe-connect.c:6416 #, c-format msgid "" "WARNING: password file \"%s\" has group or world access; permissions should " @@ -497,107 +573,126 @@ msgstr "" "ПРЕДУПРЕЖДЕНИЕ: к файлу паролей \"%s\" имеют доступ все или группа; права " "должны быть u=rw (0600) или более ограниченные\n" -#: fe-connect.c:6413 +#: fe-connect.c:6508 #, c-format msgid "password retrieved from file \"%s\"\n" msgstr "пароль получен из файла \"%s\"\n" -#: fe-exec.c:826 +#: fe-exec.c:437 fe-exec.c:2776 +#, c-format +msgid "row number %d is out of range 0..%d" +msgstr "номер записи %d вне диапазона 0..%d" + +#: fe-exec.c:498 fe-protocol2.c:503 fe-protocol2.c:538 fe-protocol2.c:1049 +#: fe-protocol3.c:209 fe-protocol3.c:236 fe-protocol3.c:253 fe-protocol3.c:333 +#: fe-protocol3.c:728 fe-protocol3.c:951 +msgid "out of memory" +msgstr "нехватка памяти" + +#: fe-exec.c:499 fe-protocol2.c:1395 fe-protocol3.c:1886 +#, c-format +msgid "%s" +msgstr "%s" + +#: fe-exec.c:847 msgid "NOTICE" msgstr "ЗАМЕЧАНИЕ" -#: fe-exec.c:1141 fe-exec.c:1199 fe-exec.c:1245 +#: fe-exec.c:905 +msgid "PGresult cannot support more than INT_MAX tuples" +msgstr "PGresult не может вместить больше чем INT_MAX кортежей" + +#: fe-exec.c:917 +msgid "size_t overflow" +msgstr "переполнение size_t" + +#: fe-exec.c:1192 fe-exec.c:1250 fe-exec.c:1296 msgid "command string is a null pointer\n" msgstr "указатель на командную строку нулевой\n" -#: fe-exec.c:1205 fe-exec.c:1251 fe-exec.c:1346 +#: fe-exec.c:1256 fe-exec.c:1302 fe-exec.c:1397 msgid "number of parameters must be between 0 and 65535\n" msgstr "число параметров должно быть от 0 до 65535\n" -#: fe-exec.c:1239 fe-exec.c:1340 +#: fe-exec.c:1290 fe-exec.c:1391 msgid "statement name is a null pointer\n" msgstr "указатель на имя оператора нулевой\n" -#: fe-exec.c:1259 fe-exec.c:1422 fe-exec.c:2140 fe-exec.c:2339 +#: fe-exec.c:1310 fe-exec.c:1473 fe-exec.c:2191 fe-exec.c:2390 msgid "function requires at least protocol version 3.0\n" msgstr "функция требует протокол минимум версии 3.0\n" -#: fe-exec.c:1377 +#: fe-exec.c:1428 msgid "no connection to the server\n" msgstr "нет соединения с сервером\n" -#: fe-exec.c:1384 +#: fe-exec.c:1435 msgid "another command is already in progress\n" msgstr "уже выполняется другая команда\n" -#: fe-exec.c:1498 +#: fe-exec.c:1549 msgid "length must be given for binary parameter\n" msgstr "для двоичного параметра должна быть указана длина\n" -#: fe-exec.c:1770 +#: fe-exec.c:1821 #, c-format msgid "unexpected asyncStatus: %d\n" msgstr "неожиданный asyncStatus: %d\n" -#: fe-exec.c:1790 +#: fe-exec.c:1841 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_RESULTCREATE event\n" msgstr "ошибка в PGEventProc \"%s\" при обработке события PGEVT_RESULTCREATE\n" -#: fe-exec.c:1950 +#: fe-exec.c:2001 msgid "COPY terminated by new PQexec" msgstr "операция COPY прервана вызовом PQexec" -#: fe-exec.c:1958 +#: fe-exec.c:2009 msgid "COPY IN state must be terminated first\n" msgstr "сначала должно завершиться состояние COPY IN\n" -#: fe-exec.c:1978 +#: fe-exec.c:2029 msgid "COPY OUT state must be terminated first\n" msgstr "сначала должно завершиться состояние COPY OUT\n" -#: fe-exec.c:1986 +#: fe-exec.c:2037 msgid "PQexec not allowed during COPY BOTH\n" msgstr "вызов PQexec не допускается в процессе COPY BOTH\n" -#: fe-exec.c:2229 fe-exec.c:2296 fe-exec.c:2386 fe-protocol2.c:1352 +#: fe-exec.c:2280 fe-exec.c:2347 fe-exec.c:2437 fe-protocol2.c:1352 #: fe-protocol3.c:1817 msgid "no COPY in progress\n" msgstr "операция COPY не выполняется\n" -#: fe-exec.c:2576 +#: fe-exec.c:2627 msgid "connection in wrong state\n" msgstr "соединение в неправильном состоянии\n" -#: fe-exec.c:2607 +#: fe-exec.c:2658 msgid "invalid ExecStatusType code" msgstr "неверный код ExecStatusType" -#: fe-exec.c:2634 +#: fe-exec.c:2685 msgid "PGresult is not an error result\n" msgstr "В PGresult не передан результат ошибки\n" -#: fe-exec.c:2709 fe-exec.c:2732 +#: fe-exec.c:2760 fe-exec.c:2783 #, c-format msgid "column number %d is out of range 0..%d" msgstr "номер столбца %d вне диапазона 0..%d" -#: fe-exec.c:2725 -#, c-format -msgid "row number %d is out of range 0..%d" -msgstr "номер записи %d вне диапазона 0..%d" - -#: fe-exec.c:2747 +#: fe-exec.c:2798 #, c-format msgid "parameter number %d is out of range 0..%d" msgstr "номер параметра %d вне диапазона 0..%d" -#: fe-exec.c:3057 +#: fe-exec.c:3108 #, c-format msgid "could not interpret result from server: %s" msgstr "не удалось интерпретировать ответ сервера: %s" -#: fe-exec.c:3296 fe-exec.c:3380 +#: fe-exec.c:3347 fe-exec.c:3431 msgid "incomplete multibyte character\n" msgstr "неполный многобайтный символ\n" @@ -684,21 +779,21 @@ msgstr "не удалось определить OID функции loread\n" msgid "cannot determine OID of function lowrite\n" msgstr "не удалось определить OID функции lowrite\n" -#: fe-misc.c:295 +#: fe-misc.c:292 #, c-format msgid "integer of size %lu not supported by pqGetInt" msgstr "функция pqGetInt не поддерживает integer размером %lu байт" -#: fe-misc.c:331 +#: fe-misc.c:328 #, c-format msgid "integer of size %lu not supported by pqPutInt" msgstr "функция pqPutInt не поддерживает integer размером %lu байт" -#: fe-misc.c:642 fe-misc.c:843 +#: fe-misc.c:639 fe-misc.c:840 msgid "connection not open\n" msgstr "соединение не открыто\n" -#: fe-misc.c:812 fe-secure-openssl.c:229 fe-secure-openssl.c:338 +#: fe-misc.c:809 fe-secure-openssl.c:229 fe-secure-openssl.c:338 #: fe-secure.c:253 fe-secure.c:362 msgid "" "server closed the connection unexpectedly\n" @@ -709,15 +804,15 @@ msgstr "" "\tСкорее всего сервер прекратил работу из-за сбоя\n" "\tдо или в процессе выполнения запроса.\n" -#: fe-misc.c:1016 +#: fe-misc.c:1011 msgid "timeout expired\n" msgstr "таймаут\n" -#: fe-misc.c:1061 +#: fe-misc.c:1056 msgid "invalid socket\n" msgstr "неверный сокет\n" -#: fe-misc.c:1084 +#: fe-misc.c:1079 #, c-format msgid "select() failed: %s\n" msgstr "ошибка в select(): %s\n" @@ -737,12 +832,6 @@ msgstr "неверное состояние %c - возможно разруше msgid "message type 0x%02x arrived from server while idle" msgstr "от сервера во время простоя получено сообщение типа 0x%02x" -#: fe-protocol2.c:503 fe-protocol2.c:538 fe-protocol2.c:1049 fe-protocol3.c:209 -#: fe-protocol3.c:236 fe-protocol3.c:253 fe-protocol3.c:333 fe-protocol3.c:728 -#: fe-protocol3.c:951 -msgid "out of memory" -msgstr "нехватка памяти" - #: fe-protocol2.c:529 #, c-format msgid "unexpected character %c following empty query response (\"I\" message)" @@ -775,11 +864,6 @@ msgstr "неожиданный ответ сервера; первый полу msgid "out of memory for query result" msgstr "недостаточно памяти для результата запроса" -#: fe-protocol2.c:1395 fe-protocol3.c:1886 -#, c-format -msgid "%s" -msgstr "%s" - #: fe-protocol2.c:1407 #, c-format msgid "lost synchronization with server, resetting connection" @@ -911,16 +995,16 @@ msgstr "СТРОКА %d: " msgid "PQgetline: not doing text COPY OUT\n" msgstr "PQgetline можно вызывать только во время COPY OUT с текстом\n" -#: fe-secure-openssl.c:234 fe-secure-openssl.c:343 fe-secure-openssl.c:1321 +#: fe-secure-openssl.c:234 fe-secure-openssl.c:343 fe-secure-openssl.c:1323 #, c-format msgid "SSL SYSCALL error: %s\n" msgstr "ошибка SSL SYSCALL: %s\n" -#: fe-secure-openssl.c:241 fe-secure-openssl.c:350 fe-secure-openssl.c:1325 +#: fe-secure-openssl.c:241 fe-secure-openssl.c:350 fe-secure-openssl.c:1327 msgid "SSL SYSCALL error: EOF detected\n" msgstr "ошибка SSL SYSCALL: конец файла (EOF)\n" -#: fe-secure-openssl.c:252 fe-secure-openssl.c:361 fe-secure-openssl.c:1334 +#: fe-secure-openssl.c:252 fe-secure-openssl.c:361 fe-secure-openssl.c:1336 #, c-format msgid "SSL error: %s\n" msgstr "ошибка SSL: %s\n" @@ -929,7 +1013,7 @@ msgstr "ошибка SSL: %s\n" msgid "SSL connection has been closed unexpectedly\n" msgstr "SSL-соединение было неожиданно закрыто\n" -#: fe-secure-openssl.c:273 fe-secure-openssl.c:382 fe-secure-openssl.c:1343 +#: fe-secure-openssl.c:273 fe-secure-openssl.c:382 fe-secure-openssl.c:1345 #, c-format msgid "unrecognized SSL error code: %d\n" msgstr "нераспознанный код ошибки SSL: %d\n" @@ -1002,37 +1086,37 @@ msgstr "не удалось открыть файл сертификата \"%s\ msgid "could not read certificate file \"%s\": %s\n" msgstr "не удалось прочитать файл сертификата \"%s\": %s\n" -#: fe-secure-openssl.c:1099 +#: fe-secure-openssl.c:1100 #, c-format msgid "could not establish SSL connection: %s\n" msgstr "не удалось установить SSL-соединение: %s\n" -#: fe-secure-openssl.c:1153 +#: fe-secure-openssl.c:1154 #, c-format msgid "could not load SSL engine \"%s\": %s\n" msgstr "не удалось загрузить модуль SSL ENGINE \"%s\": %s\n" -#: fe-secure-openssl.c:1165 +#: fe-secure-openssl.c:1166 #, c-format msgid "could not initialize SSL engine \"%s\": %s\n" msgstr "не удалось инициализировать модуль SSL ENGINE \"%s\": %s\n" -#: fe-secure-openssl.c:1181 +#: fe-secure-openssl.c:1182 #, c-format msgid "could not read private SSL key \"%s\" from engine \"%s\": %s\n" msgstr "не удалось прочитать закрытый ключ SSL \"%s\" из модуля \"%s\": %s\n" -#: fe-secure-openssl.c:1195 +#: fe-secure-openssl.c:1196 #, c-format msgid "could not load private SSL key \"%s\" from engine \"%s\": %s\n" msgstr "не удалось загрузить закрытый ключ SSL \"%s\" из модуля \"%s\": %s\n" -#: fe-secure-openssl.c:1232 +#: fe-secure-openssl.c:1233 #, c-format msgid "certificate present, but not private key file \"%s\"\n" msgstr "сертификат присутствует, но файла закрытого ключа \"%s\" нет\n" -#: fe-secure-openssl.c:1240 +#: fe-secure-openssl.c:1241 #, c-format msgid "" "private key file \"%s\" has group or world access; permissions should be " @@ -1041,27 +1125,27 @@ msgstr "" "к файлу закрытого ключа \"%s\" имеют доступ все или группа; права должны " "быть u=rw (0600) или более ограниченные\n" -#: fe-secure-openssl.c:1251 +#: fe-secure-openssl.c:1252 #, c-format msgid "could not load private key file \"%s\": %s\n" msgstr "не удалось загрузить файл закрытого ключа \"%s\": %s\n" -#: fe-secure-openssl.c:1265 +#: fe-secure-openssl.c:1266 #, c-format msgid "certificate does not match private key file \"%s\": %s\n" msgstr "сертификат не соответствует файлу закрытого ключа \"%s\": %s\n" -#: fe-secure-openssl.c:1364 +#: fe-secure-openssl.c:1366 #, c-format msgid "certificate could not be obtained: %s\n" msgstr "не удалось получить сертификат: %s\n" -#: fe-secure-openssl.c:1456 +#: fe-secure-openssl.c:1458 #, c-format msgid "no SSL error reported" msgstr "нет сообщения об ошибке SSL" -#: fe-secure-openssl.c:1465 +#: fe-secure-openssl.c:1467 #, c-format msgid "SSL error code %lu" msgstr "код ошибки SSL: %lu" @@ -1081,6 +1165,25 @@ msgstr "не удалось передать данные серверу: %s\n" msgid "unrecognized socket error: 0x%08X/%d" msgstr "нераспознанная ошибка сокета: 0x%08X/%d" +#~ msgid "could not get home directory to locate password file\n" +#~ msgstr "не удалось получить домашний каталог для загрузки файла паролей\n" + +#~ msgid "could not get home directory to locate service definition file" +#~ msgstr "" +#~ "не удалось получить домашний каталог для загрузки файла определений служб" + +#~ msgid "setsockopt(TCP_KEEPIDLE) failed: %s\n" +#~ msgstr "ошибка в setsockopt(TCP_KEEPIDLE): %s\n" + +#~ msgid "setsockopt(TCP_KEEPALIVE) failed: %s\n" +#~ msgstr "ошибка в setsockopt(TCP_KEEPALIVE): %s\n" + +#~ msgid "setsockopt(TCP_KEEPINTVL) failed: %s\n" +#~ msgstr "ошибка в setsockopt(TCP_KEEPINTVL): %s\n" + +#~ msgid "setsockopt(SO_KEEPALIVE) failed: %s\n" +#~ msgstr "ошибка в setsockopt(SO_KEEPALIVE): %s\n" + #~ msgid "could not acquire mutex: %s\n" #~ msgstr "не удалось заблокировать семафор: %s\n" diff --git a/src/interfaces/libpq/po/sv.po b/src/interfaces/libpq/po/sv.po index 0c76f0f546..87a2582777 100644 --- a/src/interfaces/libpq/po/sv.po +++ b/src/interfaces/libpq/po/sv.po @@ -1,6 +1,6 @@ # Swedish message translation file for libpq # Peter Eisentraut , 2001, 2010. -# Dennis Björklund , 2002, 2003, 2004, 2005, 2006, 2017. +# Dennis Björklund , 2002, 2003, 2004, 2005, 2006, 2017, 2018. # # Use these quotes: "%s" # @@ -8,8 +8,8 @@ msgid "" msgstr "" "Project-Id-Version: PostgreSQL 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-08-05 02:08+0000\n" -"PO-Revision-Date: 2017-08-05 07:54+0200\n" +"POT-Creation-Date: 2018-04-28 22:08+0000\n" +"PO-Revision-Date: 2018-04-29 12:39+0200\n" "Last-Translator: Dennis Björklund \n" "Language-Team: Swedish \n" "Language: sv\n" @@ -17,73 +17,82 @@ msgstr "" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -#: fe-auth-scram.c:176 +#: fe-auth-scram.c:189 msgid "malformed SCRAM message (empty message)\n" msgstr "felaktigt SCRAM-meddelande (tomt meddelande)\n" -#: fe-auth-scram.c:182 +#: fe-auth-scram.c:195 msgid "malformed SCRAM message (length mismatch)\n" msgstr "felaktigt SCRAM-meddelande (längden stämmer inte)\n" -#: fe-auth-scram.c:231 -msgid "invalid server signature\n" -msgstr "ogiltig serversignatur\n" +#: fe-auth-scram.c:244 +msgid "incorrect server signature\n" +msgstr "felaktig serversignatur\n" -#: fe-auth-scram.c:240 +#: fe-auth-scram.c:253 msgid "invalid SCRAM exchange state\n" msgstr "ogiltig SCRAM-utbytesstatus\n" -#: fe-auth-scram.c:263 +#: fe-auth-scram.c:276 #, c-format -msgid "malformed SCRAM message (%c expected)\n" -msgstr "felaktigt SCRAM-meddelande (förväntade %c)\n" +msgid "malformed SCRAM message (attribute \"%c\" expected)\n" +msgstr "felaktigt SCRAM-meddelande (förväntade attribut %c)\n" -#: fe-auth-scram.c:272 +#: fe-auth-scram.c:285 #, c-format -msgid "malformed SCRAM message (expected = in attr '%c')\n" -msgstr "felaktigt SCRAM-meddelande (förväntade = i attr '%c')\n" +msgid "malformed SCRAM message (expected character \"=\" for attribute \"%c\")\n" +msgstr "felaktigt SCRAM-meddelande (förväntade tecken \"=\" för attribut '%c')\n" -#: fe-auth-scram.c:311 +#: fe-auth-scram.c:326 msgid "could not generate nonce\n" msgstr "kunde inte skapa engångsnummer\n" -#: fe-auth-scram.c:319 fe-auth-scram.c:336 fe-auth-scram.c:346 -#: fe-auth-scram.c:400 fe-auth-scram.c:420 fe-auth-scram.c:445 -#: fe-auth-scram.c:459 fe-auth-scram.c:501 fe-auth.c:227 fe-auth.c:362 -#: fe-auth.c:432 fe-auth.c:467 fe-auth.c:609 fe-auth.c:768 fe-auth.c:1080 -#: fe-auth.c:1228 fe-connect.c:775 fe-connect.c:1203 fe-connect.c:1379 -#: fe-connect.c:1947 fe-connect.c:2476 fe-connect.c:4062 fe-connect.c:4314 -#: fe-connect.c:4433 fe-connect.c:4673 fe-connect.c:4753 fe-connect.c:4852 -#: fe-connect.c:5108 fe-connect.c:5137 fe-connect.c:5209 fe-connect.c:5233 -#: fe-connect.c:5251 fe-connect.c:5352 fe-connect.c:5361 fe-connect.c:5717 -#: fe-connect.c:5867 fe-exec.c:2651 fe-exec.c:3398 fe-exec.c:3563 -#: fe-lobj.c:896 fe-protocol2.c:1206 fe-protocol3.c:992 fe-protocol3.c:1678 -#: fe-secure-openssl.c:514 fe-secure-openssl.c:1138 +#: fe-auth-scram.c:334 fe-auth-scram.c:408 fe-auth-scram.c:554 +#: fe-auth-scram.c:574 fe-auth-scram.c:600 fe-auth-scram.c:614 +#: fe-auth-scram.c:656 fe-auth.c:227 fe-auth.c:362 fe-auth.c:432 fe-auth.c:467 +#: fe-auth.c:628 fe-auth.c:787 fe-auth.c:1099 fe-auth.c:1247 fe-connect.c:782 +#: fe-connect.c:1209 fe-connect.c:1385 fe-connect.c:1953 fe-connect.c:2482 +#: fe-connect.c:4070 fe-connect.c:4322 fe-connect.c:4441 fe-connect.c:4691 +#: fe-connect.c:4771 fe-connect.c:4870 fe-connect.c:5126 fe-connect.c:5155 +#: fe-connect.c:5227 fe-connect.c:5251 fe-connect.c:5269 fe-connect.c:5370 +#: fe-connect.c:5379 fe-connect.c:5735 fe-connect.c:5885 fe-exec.c:2702 +#: fe-exec.c:3449 fe-exec.c:3614 fe-lobj.c:895 fe-protocol2.c:1213 +#: fe-protocol3.c:999 fe-protocol3.c:1685 fe-secure-common.c:103 +#: fe-secure-openssl.c:458 fe-secure-openssl.c:1049 msgid "out of memory\n" msgstr "slut på minne\n" -#: fe-auth-scram.c:437 +#: fe-auth-scram.c:469 +msgid "invalid channel binding type\n" +msgstr "ogiltig kanalbindningstyp\n" + +#: fe-auth-scram.c:480 +#, c-format +msgid "empty channel binding data for channel binding type \"%s\"\n" +msgstr "tom kanalbindningsdata för kanalbindningstyp \"%s\"\n" + +#: fe-auth-scram.c:592 msgid "invalid SCRAM response (nonce mismatch)\n" msgstr "ogiltigt SCRAM-svar (engångsnummer matchar inte)\n" -#: fe-auth-scram.c:476 +#: fe-auth-scram.c:631 msgid "malformed SCRAM message (invalid iteration count)\n" msgstr "felaktigt SCRAM meddelande (ogiltig iterationsräknare)\n" -#: fe-auth-scram.c:482 +#: fe-auth-scram.c:637 msgid "malformed SCRAM message (garbage at end of server-first-message)\n" msgstr "felaktigt SCRAM-meddelande (skräp i slutet på server-first-message)\n" -#: fe-auth-scram.c:511 +#: fe-auth-scram.c:667 #, c-format -msgid "error received from server in SASL exchange: %s\n" -msgstr "fel mottaget från server i SASL-utbyte: %s\n" +msgid "error received from server in SCRAM exchange: %s\n" +msgstr "fel mottaget från server i SCRAM-utbyte: %s\n" -#: fe-auth-scram.c:526 +#: fe-auth-scram.c:683 msgid "malformed SCRAM message (garbage at end of server-final-message)\n" msgstr "felaktigt SCRAM-meddelande (skräp i slutet av server-final-message)\n" -#: fe-auth-scram.c:534 +#: fe-auth-scram.c:691 msgid "malformed SCRAM message (invalid server signature)\n" msgstr "felaktigt SCRAM-meddelande (ogiltigt serversignatur)\n" @@ -125,115 +134,111 @@ msgstr "duplicerad autentiseringsbegäran från SSPI\n" msgid "could not acquire SSPI credentials" msgstr "kunde inte hämta SSPI-referenser" -#: fe-auth.c:500 +#: fe-auth.c:501 msgid "duplicate SASL authentication request\n" msgstr "duplicerad autentiseringsbegäran från SASL\n" -#: fe-auth.c:560 +#: fe-auth.c:546 msgid "none of the server's SASL authentication mechanisms are supported\n" msgstr "ingen av serverns SASL-autentiseringsmekanismer stöds\n" -#: fe-auth.c:633 +#: fe-auth.c:652 #, c-format msgid "out of memory allocating SASL buffer (%d)\n" msgstr "slut på minne vid allokering av buffer till SASL (%d)\n" -#: fe-auth.c:658 +#: fe-auth.c:677 msgid "AuthenticationSASLFinal received from server, but SASL authentication was not completed\n" msgstr "mottog AuthenticationSASLFinal från server, men SASL-autentisering slutfördes ej\n" -#: fe-auth.c:735 +#: fe-auth.c:754 msgid "SCM_CRED authentication method not supported\n" msgstr "autentiseringsmetoden SCM_CRED stöds ej\n" -#: fe-auth.c:826 +#: fe-auth.c:845 msgid "Kerberos 4 authentication not supported\n" msgstr "Kerberos-4-autentisering stöds ej\n" -#: fe-auth.c:831 +#: fe-auth.c:850 msgid "Kerberos 5 authentication not supported\n" msgstr "Kerberos-5-autentisering stöds ej\n" -#: fe-auth.c:902 +#: fe-auth.c:921 msgid "GSSAPI authentication not supported\n" msgstr "GSSAPI-autentisering stöds ej\n" -#: fe-auth.c:934 +#: fe-auth.c:953 msgid "SSPI authentication not supported\n" msgstr "SSPI-autentisering stöds ej\n" -#: fe-auth.c:942 +#: fe-auth.c:961 msgid "Crypt authentication not supported\n" msgstr "Crypt-autentisering stöds ej\n" -#: fe-auth.c:1008 +#: fe-auth.c:1027 #, c-format msgid "authentication method %u not supported\n" msgstr "autentiseringsmetod %u stöds ej\n" -#: fe-auth.c:1055 +#: fe-auth.c:1074 #, c-format msgid "user name lookup failure: error code %lu\n" msgstr "misslyckad sökning efter användarnamn: felkod %lu\n" -#: fe-auth.c:1065 fe-connect.c:2403 +#: fe-auth.c:1084 fe-connect.c:2409 #, c-format msgid "could not look up local user ID %d: %s\n" msgstr "kunde inte slå upp lokalt användar-id %d: %s\n" -#: fe-auth.c:1070 fe-connect.c:2408 +#: fe-auth.c:1089 fe-connect.c:2414 #, c-format msgid "local user with ID %d does not exist\n" msgstr "lokal användare med ID %d existerar inte\n" -#: fe-auth.c:1172 +#: fe-auth.c:1191 msgid "unexpected shape of result set returned for SHOW\n" msgstr "oväntad form på resultatmängden som returnerades för SHOW\n" -#: fe-auth.c:1181 +#: fe-auth.c:1200 msgid "password_encryption value too long\n" msgstr "password_encryption-värdet är för långt\n" -#: fe-auth.c:1221 +#: fe-auth.c:1240 #, c-format msgid "unrecognized password encryption algorithm \"%s\"\n" msgstr "okänd lösenordskrypteringsalgoritm \"%s\"\n" -#: fe-connect.c:968 +#: fe-connect.c:975 #, c-format -msgid "could not match %d host names to %d hostaddrs\n" +msgid "could not match %d host names to %d hostaddr values\n" msgstr "kunde inte matcha %d värdnamn till %d värdadresser\n" -#: fe-connect.c:1025 +#: fe-connect.c:1032 #, c-format msgid "could not match %d port numbers to %d hosts\n" msgstr "kunde inte matcha %d portnummer med %d värdar\n" -#: fe-connect.c:1077 -msgid "could not get home directory to locate password file\n" -msgstr "kunde inte hämta hemkatalogen för att lokalisera lösenordsfilen\n" - -#: fe-connect.c:1129 +#: fe-connect.c:1135 #, c-format msgid "invalid sslmode value: \"%s\"\n" msgstr "ogiltigt värde för ssl-läge: \"%s\"\n" -#: fe-connect.c:1150 +#: fe-connect.c:1156 #, c-format msgid "sslmode value \"%s\" invalid when SSL support is not compiled in\n" msgstr "värde för ssl-läge, \"%s\", är ogiltigt när SSL-stöd inte kompilerats in\n" -#: fe-connect.c:1185 +#: fe-connect.c:1191 #, c-format msgid "invalid target_session_attrs value: \"%s\"\n" msgstr "ogiltigt target_session_attrs-värde: \"%s\"\n" -#: fe-connect.c:1403 +#: fe-connect.c:1409 #, c-format msgid "could not set socket to TCP no delay mode: %s\n" msgstr "kunde inte sätta uttag (socket) till läget TCP-ingen-fördröjning: %s\n" -#: fe-connect.c:1433 +#: fe-connect.c:1439 #, c-format msgid "" "could not connect to server: %s\n" @@ -244,7 +249,7 @@ msgstr "" "\tKör servern på lokalt och accepterar den\n" "\tanslutningar på Unix-uttaget \"%s\"?\n" -#: fe-connect.c:1491 +#: fe-connect.c:1497 #, c-format msgid "" "could not connect to server: %s\n" @@ -255,7 +260,7 @@ msgstr "" "\tKör servern på värden \"%s\" (%s) och accepterar\n" "\tden TCP/IP-uppkopplingar på port %s?\n" -#: fe-connect.c:1500 +#: fe-connect.c:1506 #, c-format msgid "" "could not connect to server: %s\n" @@ -266,492 +271,507 @@ msgstr "" "\tKör servern på värden \"%s\" och accepterar\n" "\tden TCP/IP-uppkopplingar på porten %s?\n" -#: fe-connect.c:1551 fe-connect.c:1583 fe-connect.c:1616 fe-connect.c:2175 +#: fe-connect.c:1557 fe-connect.c:1589 fe-connect.c:1622 fe-connect.c:2181 #, c-format msgid "setsockopt(%s) failed: %s\n" msgstr "setsockopt(%s) misslyckades: %s\n" -#: fe-connect.c:1665 +#: fe-connect.c:1671 #, c-format msgid "WSAIoctl(SIO_KEEPALIVE_VALS) failed: %ui\n" msgstr "WSAIoctl(SIO_KEEPALIVE_VALS) misslyckades: %ui\n" -#: fe-connect.c:1722 +#: fe-connect.c:1728 #, c-format msgid "invalid port number: \"%s\"\n" msgstr "ogiltigt portnummer \"%s\"\n" -#: fe-connect.c:1738 +#: fe-connect.c:1744 #, c-format msgid "could not translate host name \"%s\" to address: %s\n" msgstr "kunde inte översätta värdnamn \"%s\" till adress: %s\n" -#: fe-connect.c:1747 +#: fe-connect.c:1753 #, c-format msgid "could not parse network address \"%s\": %s\n" msgstr "kunde inte parsa nätverksadress \"%s\": %s\n" -#: fe-connect.c:1758 +#: fe-connect.c:1764 #, c-format msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n" msgstr "Sökväg till unixdomänuttag \"%s\" är för lång (maximalt %d byte)\n" -#: fe-connect.c:1772 +#: fe-connect.c:1778 #, c-format msgid "could not translate Unix-domain socket path \"%s\" to address: %s\n" msgstr "kunde inte översätta sökväg till unix-uttag (socket) \"%s\" till adress: %s\n" -#: fe-connect.c:2053 +#: fe-connect.c:2059 msgid "invalid connection state, probably indicative of memory corruption\n" msgstr "ogiltigt förbindelsetillstånd, antagligen korrupt minne\n" -#: fe-connect.c:2110 +#: fe-connect.c:2116 #, c-format msgid "could not create socket: %s\n" msgstr "kan inte skapa uttag: %s\n" -#: fe-connect.c:2132 +#: fe-connect.c:2138 #, c-format msgid "could not set socket to nonblocking mode: %s\n" msgstr "kunde inte sätta uttag (socket) till ickeblockerande läge: %s\n" -#: fe-connect.c:2143 +#: fe-connect.c:2149 #, c-format msgid "could not set socket to close-on-exec mode: %s\n" msgstr "kunde inte ställa in uttag (socket) i \"close-on-exec\"-läge: %s\n" -#: fe-connect.c:2162 +#: fe-connect.c:2168 msgid "keepalives parameter must be an integer\n" msgstr "keepalives-parameter måste vara ett heltal\n" -#: fe-connect.c:2313 +#: fe-connect.c:2319 #, c-format msgid "could not get socket error status: %s\n" msgstr "kunde inte hämta felstatus för uttag (socket): %s\n" -#: fe-connect.c:2348 +#: fe-connect.c:2354 #, c-format msgid "could not get client address from socket: %s\n" msgstr "kunde inte få tag på klientadressen från uttag (socket): %s\n" -#: fe-connect.c:2390 +#: fe-connect.c:2396 msgid "requirepeer parameter is not supported on this platform\n" msgstr "requirepeer-parameter stöds inte på denna plattform\n" -#: fe-connect.c:2393 +#: fe-connect.c:2399 #, c-format msgid "could not get peer credentials: %s\n" msgstr "kunde inte hämta andra sidans referenser: %s\n" -#: fe-connect.c:2416 +#: fe-connect.c:2422 #, c-format msgid "requirepeer specifies \"%s\", but actual peer user name is \"%s\"\n" msgstr "requirepeer anger \"%s\", men andra sidans användarnamn är \"%s\"\n" -#: fe-connect.c:2450 +#: fe-connect.c:2456 #, c-format msgid "could not send SSL negotiation packet: %s\n" msgstr "kunde inte skicka SSL-paket för uppkopplingsförhandling: %s\n" -#: fe-connect.c:2489 +#: fe-connect.c:2495 #, c-format msgid "could not send startup packet: %s\n" msgstr "kan inte skicka startpaketet: %s\n" -#: fe-connect.c:2559 +#: fe-connect.c:2565 msgid "server does not support SSL, but SSL was required\n" msgstr "SSL stöds inte av servern, men SSL krävdes\n" -#: fe-connect.c:2585 +#: fe-connect.c:2591 #, c-format msgid "received invalid response to SSL negotiation: %c\n" msgstr "tog emot ogiltigt svar till SSL-uppkopplingsförhandling: %c\n" -#: fe-connect.c:2661 fe-connect.c:2694 +#: fe-connect.c:2667 fe-connect.c:2700 #, c-format msgid "expected authentication request from server, but received %c\n" msgstr "förväntade autentiseringsförfrågan från servern, men fick %c\n" -#: fe-connect.c:2923 +#: fe-connect.c:2929 msgid "unexpected message from server during startup\n" msgstr "oväntat meddelande från servern under starten\n" -#: fe-connect.c:3141 +#: fe-connect.c:3147 #, c-format msgid "could not make a writable connection to server \"%s:%s\"\n" msgstr "kunde inte upprätta en skrivbar anslutning till server \"%s:%s\"\n" -#: fe-connect.c:3190 +#: fe-connect.c:3196 #, c-format msgid "test \"SHOW transaction_read_only\" failed on server \"%s:%s\"\n" msgstr "test \"SHOW transaction_read_only\" misslyckades på server \"%s:%s\"\n" -#: fe-connect.c:3211 +#: fe-connect.c:3217 #, c-format msgid "invalid connection state %d, probably indicative of memory corruption\n" msgstr "ogiltigt förbindelsetillstånd %d, antagligen korrupt minne\n" -#: fe-connect.c:3668 fe-connect.c:3728 +#: fe-connect.c:3676 fe-connect.c:3736 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_CONNRESET event\n" msgstr "PGEventProc \"%s\" misslyckades under PGEVT_CONNRESET-händelse\n" -#: fe-connect.c:4075 +#: fe-connect.c:4083 #, c-format msgid "invalid LDAP URL \"%s\": scheme must be ldap://\n" msgstr "ogiltig LDAP URL \"%s\": schemat måste vara ldap://\n" -#: fe-connect.c:4090 +#: fe-connect.c:4098 #, c-format msgid "invalid LDAP URL \"%s\": missing distinguished name\n" msgstr "ogiltig LDAP URL \"%s\": saknar urskiljbart namn\n" -#: fe-connect.c:4101 fe-connect.c:4154 +#: fe-connect.c:4109 fe-connect.c:4162 #, c-format msgid "invalid LDAP URL \"%s\": must have exactly one attribute\n" msgstr "ogiltig LDAP URL \"%s\": måste finnas exakt ett attribut\n" -#: fe-connect.c:4111 fe-connect.c:4168 +#: fe-connect.c:4119 fe-connect.c:4176 #, c-format msgid "invalid LDAP URL \"%s\": must have search scope (base/one/sub)\n" msgstr "ogiltig LDAP URL \"%s\": måste ha sök-scope (base/one/sub)\n" -#: fe-connect.c:4122 +#: fe-connect.c:4130 #, c-format msgid "invalid LDAP URL \"%s\": no filter\n" msgstr "ogiltigt LDAP URL \"%s\": inget filter\n" -#: fe-connect.c:4143 +#: fe-connect.c:4151 #, c-format msgid "invalid LDAP URL \"%s\": invalid port number\n" msgstr "ogiltig LDAP URL \"%s\": ogiltigt portnummer\n" -#: fe-connect.c:4177 +#: fe-connect.c:4185 msgid "could not create LDAP structure\n" msgstr "kunde inte skapa LDAP-struktur\n" -#: fe-connect.c:4253 +#: fe-connect.c:4261 #, c-format msgid "lookup on LDAP server failed: %s\n" msgstr "uppslagning av LDAP-server misslyckades: %s\n" -#: fe-connect.c:4264 +#: fe-connect.c:4272 msgid "more than one entry found on LDAP lookup\n" msgstr "mer än en post hittad i LDAP-uppslagning\n" -#: fe-connect.c:4265 fe-connect.c:4277 +#: fe-connect.c:4273 fe-connect.c:4285 msgid "no entry found on LDAP lookup\n" msgstr "ingen post hittad i LDAP-uppslagning\n" -#: fe-connect.c:4288 fe-connect.c:4301 +#: fe-connect.c:4296 fe-connect.c:4309 msgid "attribute has no values on LDAP lookup\n" msgstr "attributet har inga värden i LDAP-uppslagning\n" -#: fe-connect.c:4353 fe-connect.c:4372 fe-connect.c:4891 +#: fe-connect.c:4361 fe-connect.c:4380 fe-connect.c:4909 #, c-format msgid "missing \"=\" after \"%s\" in connection info string\n" msgstr "\"=\" efter \"%s\" saknas i förbindelseinfosträng\n" -#: fe-connect.c:4445 fe-connect.c:5076 fe-connect.c:5850 +#: fe-connect.c:4453 fe-connect.c:5094 fe-connect.c:5868 #, c-format msgid "invalid connection option \"%s\"\n" msgstr "ogiltig förbindelseparameter \"%s\"\n" -#: fe-connect.c:4461 fe-connect.c:4940 +#: fe-connect.c:4469 fe-connect.c:4958 msgid "unterminated quoted string in connection info string\n" msgstr "icke terminerad sträng i uppkopplingsinformationen\n" -#: fe-connect.c:4501 -msgid "could not get home directory to locate service definition file" -msgstr "kunde inte hämta hemkatalogen för att lokalisera servicedefinitionsfilen" - -#: fe-connect.c:4534 +#: fe-connect.c:4552 #, c-format msgid "definition of service \"%s\" not found\n" msgstr "definition av service \"%s\" hittades inte\n" -#: fe-connect.c:4557 +#: fe-connect.c:4575 #, c-format msgid "service file \"%s\" not found\n" msgstr "servicefil \"%s\" hittades inte\n" -#: fe-connect.c:4570 +#: fe-connect.c:4588 #, c-format msgid "line %d too long in service file \"%s\"\n" msgstr "rad %d för lång i servicefil \"%s\"\n" -#: fe-connect.c:4641 fe-connect.c:4685 +#: fe-connect.c:4659 fe-connect.c:4703 #, c-format msgid "syntax error in service file \"%s\", line %d\n" msgstr "syntaxfel i servicefel \"%s\", rad %d\n" -#: fe-connect.c:4652 +#: fe-connect.c:4670 #, c-format msgid "nested service specifications not supported in service file \"%s\", line %d\n" msgstr "nästlade servicespecifikationer stöds inte i servicefil \"%s\", rad %d\n" -#: fe-connect.c:5372 +#: fe-connect.c:5390 #, c-format msgid "invalid URI propagated to internal parser routine: \"%s\"\n" msgstr "ogiltig URI propagerad till intern parsningsrutin: \"%s\"\n" -#: fe-connect.c:5449 +#: fe-connect.c:5467 #, c-format msgid "end of string reached when looking for matching \"]\" in IPv6 host address in URI: \"%s\"\n" msgstr "nådde slutet på strängen när vi letade efter matchande \"]\" i IPv6-värdadress i URI: \"%s\"\n" -#: fe-connect.c:5456 +#: fe-connect.c:5474 #, c-format msgid "IPv6 host address may not be empty in URI: \"%s\"\n" msgstr "IPv6-värdadress får ej vara tom i URI: \"%s\"\n" -#: fe-connect.c:5471 +#: fe-connect.c:5489 #, c-format msgid "unexpected character \"%c\" at position %d in URI (expected \":\" or \"/\"): \"%s\"\n" msgstr "oväntat tecken \"%c\" vid position %d i URI (förväntade \":\" eller \"/\"): \"%s\"\n" -#: fe-connect.c:5600 +#: fe-connect.c:5618 #, c-format msgid "extra key/value separator \"=\" in URI query parameter: \"%s\"\n" msgstr "extra nyckel/värde-separator \"=\" i URI-frågeparameter: \"%s\"\n" -#: fe-connect.c:5620 +#: fe-connect.c:5638 #, c-format msgid "missing key/value separator \"=\" in URI query parameter: \"%s\"\n" msgstr "saknar nyckel/värde-separator \"=\" i URI-frågeparameter: \"%s\"\n" -#: fe-connect.c:5671 +#: fe-connect.c:5689 #, c-format msgid "invalid URI query parameter: \"%s\"\n" msgstr "ogiltig URI-frågeparameter: \"%s\"\n" -#: fe-connect.c:5745 +#: fe-connect.c:5763 #, c-format msgid "invalid percent-encoded token: \"%s\"\n" msgstr "ogiltigt procent-kodad symbol: \"%s\"\n" -#: fe-connect.c:5755 +#: fe-connect.c:5773 #, c-format msgid "forbidden value %%00 in percent-encoded value: \"%s\"\n" msgstr "förbjudet värde %%00 i procentkodat värde: \"%s\"\n" -#: fe-connect.c:6100 +#: fe-connect.c:6119 msgid "connection pointer is NULL\n" msgstr "anslutningspekare är NULL\n" -#: fe-connect.c:6398 +#: fe-connect.c:6417 #, c-format msgid "WARNING: password file \"%s\" is not a plain file\n" msgstr "FEL: lösenordsfil \"%s\" är inte en vanlig fil\n" -#: fe-connect.c:6407 +#: fe-connect.c:6426 #, c-format msgid "WARNING: password file \"%s\" has group or world access; permissions should be u=rw (0600) or less\n" msgstr "VARNING: lösenordsfilen \"%s\" har läsrättigheter för gruppen eller världen; rättigheten skall vara u=rw (0600) eller mindre\n" -#: fe-connect.c:6499 +#: fe-connect.c:6518 #, c-format msgid "password retrieved from file \"%s\"\n" msgstr "lösenord hämtat från fil \"%s\"\n" -#: fe-exec.c:826 +#: fe-exec.c:437 fe-exec.c:2776 +#, c-format +msgid "row number %d is out of range 0..%d" +msgstr "radnummer %d är utanför giltigt intervall 0..%d" + +#: fe-exec.c:498 fe-protocol2.c:502 fe-protocol2.c:537 fe-protocol2.c:1056 +#: fe-protocol3.c:208 fe-protocol3.c:235 fe-protocol3.c:252 fe-protocol3.c:332 +#: fe-protocol3.c:727 fe-protocol3.c:958 +msgid "out of memory" +msgstr "slut på minne" + +#: fe-exec.c:499 fe-protocol2.c:1402 fe-protocol3.c:1893 +#, c-format +msgid "%s" +msgstr "%s" + +#: fe-exec.c:847 msgid "NOTICE" msgstr "NOTIS" -#: fe-exec.c:1141 fe-exec.c:1199 fe-exec.c:1245 +#: fe-exec.c:905 +msgid "PGresult cannot support more than INT_MAX tuples" +msgstr "PGresult stöder inte mer än INT_MAX tupler" + +#: fe-exec.c:917 +msgid "size_t overflow" +msgstr "size_t-överspill" + +#: fe-exec.c:1192 fe-exec.c:1250 fe-exec.c:1296 msgid "command string is a null pointer\n" msgstr "kommandosträngen är en null-pekare\n" -#: fe-exec.c:1205 fe-exec.c:1251 fe-exec.c:1346 +#: fe-exec.c:1256 fe-exec.c:1302 fe-exec.c:1397 msgid "number of parameters must be between 0 and 65535\n" msgstr "antal parametrar måste bara mellan 0 och 65535\n" -#: fe-exec.c:1239 fe-exec.c:1340 +#: fe-exec.c:1290 fe-exec.c:1391 msgid "statement name is a null pointer\n" msgstr "satsens namn är en null-pekare\n" -#: fe-exec.c:1259 fe-exec.c:1422 fe-exec.c:2140 fe-exec.c:2339 +#: fe-exec.c:1310 fe-exec.c:1473 fe-exec.c:2191 fe-exec.c:2390 msgid "function requires at least protocol version 3.0\n" msgstr "funktionen kräver minst protokollversion 3.0\n" -#: fe-exec.c:1377 +#: fe-exec.c:1428 msgid "no connection to the server\n" msgstr "inte förbunden till servern\n" -#: fe-exec.c:1384 +#: fe-exec.c:1435 msgid "another command is already in progress\n" msgstr "ett annat kommando pågår redan\n" -#: fe-exec.c:1498 +#: fe-exec.c:1549 msgid "length must be given for binary parameter\n" msgstr "längden måste anges för en binär parameter\n" -#: fe-exec.c:1770 +#: fe-exec.c:1821 #, c-format msgid "unexpected asyncStatus: %d\n" msgstr "oväntad asyncStatus: %d\n" -#: fe-exec.c:1790 +#: fe-exec.c:1841 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_RESULTCREATE event\n" msgstr "PGEventProc \"%s\" misslyckades under PGEVT_RESULTCREATE-händelse\n" -#: fe-exec.c:1950 +#: fe-exec.c:2001 msgid "COPY terminated by new PQexec" msgstr "COPY terminerad av ny PQexec" -#: fe-exec.c:1958 +#: fe-exec.c:2009 msgid "COPY IN state must be terminated first\n" msgstr "COPY IN-läge måste avslutas först\n" -#: fe-exec.c:1978 +#: fe-exec.c:2029 msgid "COPY OUT state must be terminated first\n" msgstr "COPY OUT-läge måste avslutas först\n" -#: fe-exec.c:1986 +#: fe-exec.c:2037 msgid "PQexec not allowed during COPY BOTH\n" msgstr "PQexec tillåts inte under COPY BOTH\n" -#: fe-exec.c:2229 fe-exec.c:2296 fe-exec.c:2386 fe-protocol2.c:1352 -#: fe-protocol3.c:1817 +#: fe-exec.c:2280 fe-exec.c:2347 fe-exec.c:2437 fe-protocol2.c:1359 +#: fe-protocol3.c:1824 msgid "no COPY in progress\n" msgstr "ingen COPY pågår\n" -#: fe-exec.c:2576 +#: fe-exec.c:2627 msgid "connection in wrong state\n" msgstr "förbindelse i felaktigt tillstånd\n" -#: fe-exec.c:2607 +#: fe-exec.c:2658 msgid "invalid ExecStatusType code" msgstr "ogiltig ExecStatusType-kod" -#: fe-exec.c:2634 +#: fe-exec.c:2685 msgid "PGresult is not an error result\n" msgstr "PGresult är inte ett felresultat\n" -#: fe-exec.c:2709 fe-exec.c:2732 +#: fe-exec.c:2760 fe-exec.c:2783 #, c-format msgid "column number %d is out of range 0..%d" msgstr "kolumnnummer %d är utanför giltigt intervall 0..%d" -#: fe-exec.c:2725 -#, c-format -msgid "row number %d is out of range 0..%d" -msgstr "radnummer %d är utanför giltigt intervall 0..%d" - -#: fe-exec.c:2747 +#: fe-exec.c:2798 #, c-format msgid "parameter number %d is out of range 0..%d" msgstr "parameter nummer %d är utanför giltigt intervall 0..%d" -#: fe-exec.c:3057 +#: fe-exec.c:3108 #, c-format msgid "could not interpret result from server: %s" msgstr "kunde inte tolka svaret från servern: %s" -#: fe-exec.c:3296 fe-exec.c:3380 +#: fe-exec.c:3347 fe-exec.c:3431 msgid "incomplete multibyte character\n" msgstr "ofullständigt multibyte-tecken\n" -#: fe-lobj.c:155 +#: fe-lobj.c:154 msgid "cannot determine OID of function lo_truncate\n" msgstr "kan inte ta reda på OID för funktionen lo_truncate\n" -#: fe-lobj.c:171 +#: fe-lobj.c:170 msgid "argument of lo_truncate exceeds integer range\n" msgstr "argumentet till lo_truncate överskrider heltalsintervallet\n" -#: fe-lobj.c:222 +#: fe-lobj.c:221 msgid "cannot determine OID of function lo_truncate64\n" msgstr "kan inte ta reda på OID för funktionen lo_truncate64\n" -#: fe-lobj.c:280 +#: fe-lobj.c:279 msgid "argument of lo_read exceeds integer range\n" msgstr "ett argument till lo_read överskriver heltalsintervallet\n" -#: fe-lobj.c:335 +#: fe-lobj.c:334 msgid "argument of lo_write exceeds integer range\n" msgstr "ett argument till lo_write överskriver heltalsintervallet\n" -#: fe-lobj.c:426 +#: fe-lobj.c:425 msgid "cannot determine OID of function lo_lseek64\n" msgstr "kan inte ta reda på OID för funktionen lo_lseek64\n" -#: fe-lobj.c:522 +#: fe-lobj.c:521 msgid "cannot determine OID of function lo_create\n" msgstr "kan inte ta reda på OID för funktionen lo_create\n" -#: fe-lobj.c:601 +#: fe-lobj.c:600 msgid "cannot determine OID of function lo_tell64\n" msgstr "kan inte ta reda på OID för funktionen lo_tell64\n" -#: fe-lobj.c:707 fe-lobj.c:816 +#: fe-lobj.c:706 fe-lobj.c:815 #, c-format msgid "could not open file \"%s\": %s\n" msgstr "kan inte öppna fil \"%s\": %s\n" -#: fe-lobj.c:762 +#: fe-lobj.c:761 #, c-format msgid "could not read from file \"%s\": %s\n" msgstr "kunde inte läsa från fil \"%s\": %s\n" -#: fe-lobj.c:836 fe-lobj.c:860 +#: fe-lobj.c:835 fe-lobj.c:859 #, c-format msgid "could not write to file \"%s\": %s\n" msgstr "kan inte skriva till fil \"%s\": %s\n" -#: fe-lobj.c:947 +#: fe-lobj.c:946 msgid "query to initialize large object functions did not return data\n" msgstr "fråga för att initiera stort objekt-funktion returnerade ingen data\n" -#: fe-lobj.c:996 +#: fe-lobj.c:995 msgid "cannot determine OID of function lo_open\n" msgstr "kan inte ta reda på OID för funktionen lo_open\n" -#: fe-lobj.c:1003 +#: fe-lobj.c:1002 msgid "cannot determine OID of function lo_close\n" msgstr "kan inte ta reda på OID för funktionen lo_close\n" -#: fe-lobj.c:1010 +#: fe-lobj.c:1009 msgid "cannot determine OID of function lo_creat\n" msgstr "kan inte ta reda på OID för funktionen lo_create\n" -#: fe-lobj.c:1017 +#: fe-lobj.c:1016 msgid "cannot determine OID of function lo_unlink\n" msgstr "kan inte ta reda på OID för funktionen lo_unlink\n" -#: fe-lobj.c:1024 +#: fe-lobj.c:1023 msgid "cannot determine OID of function lo_lseek\n" msgstr "kan inte ta reda på OID för funktionen lo_lseek\n" -#: fe-lobj.c:1031 +#: fe-lobj.c:1030 msgid "cannot determine OID of function lo_tell\n" msgstr "kan inte ta reda på OID för funktionen lo_tell\n" -#: fe-lobj.c:1038 +#: fe-lobj.c:1037 msgid "cannot determine OID of function loread\n" msgstr "kan inte ta reda på OID för funktionen loread\n" -#: fe-lobj.c:1045 +#: fe-lobj.c:1044 msgid "cannot determine OID of function lowrite\n" msgstr "kan inte ta reda på OID för funktionen lowrite\n" -#: fe-misc.c:292 +#: fe-misc.c:290 #, c-format msgid "integer of size %lu not supported by pqGetInt" msgstr "heltal med storlek %lu stöds inte av pqGetInt" -#: fe-misc.c:328 +#: fe-misc.c:326 #, c-format msgid "integer of size %lu not supported by pqPutInt" msgstr "heltal med storlek %lu stöds inte av pqPutInt" -#: fe-misc.c:639 fe-misc.c:840 +#: fe-misc.c:637 fe-misc.c:838 msgid "connection not open\n" msgstr "förbindelse inte öppen\n" -#: fe-misc.c:809 fe-secure-openssl.c:229 fe-secure-openssl.c:338 -#: fe-secure.c:253 fe-secure.c:362 +#: fe-misc.c:807 fe-secure-openssl.c:206 fe-secure-openssl.c:314 +#: fe-secure.c:261 fe-secure.c:370 msgid "" "server closed the connection unexpectedly\n" "\tThis probably means the server terminated abnormally\n" @@ -761,255 +781,261 @@ msgstr "" "\tTroligen så terminerade servern pga något fel antingen\n" "\tinnan eller under tiden den bearbetade en förfrågan.\n" -#: fe-misc.c:1011 +#: fe-misc.c:1009 msgid "timeout expired\n" msgstr "timeout utgången\n" -#: fe-misc.c:1056 +#: fe-misc.c:1054 msgid "invalid socket\n" msgstr "ogiltigt uttag\n" -#: fe-misc.c:1079 +#: fe-misc.c:1077 #, c-format msgid "select() failed: %s\n" msgstr "select() misslyckades: %s\n" -#: fe-protocol2.c:91 +#: fe-protocol2.c:90 #, c-format msgid "invalid setenv state %c, probably indicative of memory corruption\n" msgstr "ogiltigt setenv-tillstånd %c, indikerar troligen ett minnesfel\n" -#: fe-protocol2.c:390 +#: fe-protocol2.c:389 #, c-format msgid "invalid state %c, probably indicative of memory corruption\n" msgstr "ogiltigt tillstånd %c, indikerar troligen ett minnesfel\n" -#: fe-protocol2.c:479 fe-protocol3.c:186 +#: fe-protocol2.c:478 fe-protocol3.c:185 #, c-format msgid "message type 0x%02x arrived from server while idle" msgstr "meddelandetyp 0x%02x kom från server under viloperiod" -#: fe-protocol2.c:503 fe-protocol2.c:538 fe-protocol2.c:1049 -#: fe-protocol3.c:209 fe-protocol3.c:236 fe-protocol3.c:253 fe-protocol3.c:333 -#: fe-protocol3.c:728 fe-protocol3.c:951 -msgid "out of memory" -msgstr "slut på minne" - -#: fe-protocol2.c:529 +#: fe-protocol2.c:528 #, c-format msgid "unexpected character %c following empty query response (\"I\" message)" msgstr "oväntat tecken %c följer på ett tomt frågesvar (meddelande \"I\")" -#: fe-protocol2.c:595 +#: fe-protocol2.c:594 #, c-format msgid "server sent data (\"D\" message) without prior row description (\"T\" message)" msgstr "servern skickade data (meddelande \"D\") utan föregående radbeskrivning (meddelande \"T\")" -#: fe-protocol2.c:613 +#: fe-protocol2.c:612 #, c-format msgid "server sent binary data (\"B\" message) without prior row description (\"T\" message)" msgstr "servern skickade binärdata (meddelande \"B\") utan föregående radbeskrivning (meddelande \"T\")" -#: fe-protocol2.c:633 fe-protocol3.c:412 +#: fe-protocol2.c:632 fe-protocol3.c:411 #, c-format msgid "unexpected response from server; first received character was \"%c\"\n" msgstr "oväntat svar för servern; första mottagna tecknet var \"%c\"\n" -#: fe-protocol2.c:762 fe-protocol2.c:937 fe-protocol3.c:627 fe-protocol3.c:854 +#: fe-protocol2.c:761 fe-protocol2.c:936 fe-protocol3.c:626 fe-protocol3.c:853 msgid "out of memory for query result" msgstr "slut på minnet för frågeresultat" -#: fe-protocol2.c:1395 fe-protocol3.c:1886 -#, c-format -msgid "%s" -msgstr "%s" - -#: fe-protocol2.c:1407 +#: fe-protocol2.c:1414 #, c-format msgid "lost synchronization with server, resetting connection" msgstr "tappade synkronisering med servern, startar o, uppkopplingen" -#: fe-protocol2.c:1541 fe-protocol2.c:1573 fe-protocol3.c:2089 +#: fe-protocol2.c:1548 fe-protocol2.c:1580 fe-protocol3.c:2096 #, c-format msgid "protocol error: id=0x%x\n" msgstr "protokollfel: id=0x%x\n" -#: fe-protocol3.c:368 +#: fe-protocol3.c:367 msgid "server sent data (\"D\" message) without prior row description (\"T\" message)\n" msgstr "servern skickade data (meddelande \"D\") utan att först skicka en radbeskrivning (meddelande \"T\")\n" -#: fe-protocol3.c:433 +#: fe-protocol3.c:432 #, c-format msgid "message contents do not agree with length in message type \"%c\"\n" msgstr "meddelandeinnehåll stämmer inte med längden för meddelandetyp \"%c\"\n" -#: fe-protocol3.c:454 +#: fe-protocol3.c:453 #, c-format msgid "lost synchronization with server: got message type \"%c\", length %d\n" msgstr "tappade synkronisering med servern: fick meddelandetyp \"%c\", längd %d\n" -#: fe-protocol3.c:505 fe-protocol3.c:545 +#: fe-protocol3.c:504 fe-protocol3.c:544 msgid "insufficient data in \"T\" message" msgstr "otillräckligt med data i \"T\"-meddelande" -#: fe-protocol3.c:578 +#: fe-protocol3.c:577 msgid "extraneous data in \"T\" message" msgstr "extra data i \"T\"-meddelande" -#: fe-protocol3.c:691 +#: fe-protocol3.c:690 msgid "extraneous data in \"t\" message" msgstr "extra data i \"t\"-meddelande" -#: fe-protocol3.c:762 fe-protocol3.c:794 fe-protocol3.c:812 +#: fe-protocol3.c:761 fe-protocol3.c:793 fe-protocol3.c:811 msgid "insufficient data in \"D\" message" msgstr "otillräckligt med data i \"D\"-meddelande" -#: fe-protocol3.c:768 +#: fe-protocol3.c:767 msgid "unexpected field count in \"D\" message" msgstr "oväntat fältantal i \"D\"-meddelande" -#: fe-protocol3.c:821 +#: fe-protocol3.c:820 msgid "extraneous data in \"D\" message" msgstr "extra data i \"D\"-meddelande" -#: fe-protocol3.c:1005 +#: fe-protocol3.c:1012 msgid "no error message available\n" msgstr "inget felmeddelande finns tillgängligt\n" #. translator: %s represents a digit string -#: fe-protocol3.c:1035 fe-protocol3.c:1054 +#: fe-protocol3.c:1042 fe-protocol3.c:1061 #, c-format msgid " at character %s" msgstr " vid tecken %s" -#: fe-protocol3.c:1067 +#: fe-protocol3.c:1074 #, c-format msgid "DETAIL: %s\n" msgstr "DETALJ: %s\n" -#: fe-protocol3.c:1070 +#: fe-protocol3.c:1077 #, c-format msgid "HINT: %s\n" msgstr "TIPS: %s\n" -#: fe-protocol3.c:1073 +#: fe-protocol3.c:1080 #, c-format msgid "QUERY: %s\n" msgstr "FRÅGA: %s\n" -#: fe-protocol3.c:1080 +#: fe-protocol3.c:1087 #, c-format msgid "CONTEXT: %s\n" msgstr "KONTEXT: %s\n" -#: fe-protocol3.c:1089 +#: fe-protocol3.c:1096 #, c-format msgid "SCHEMA NAME: %s\n" msgstr "SCHEMANAMN: %s\n" -#: fe-protocol3.c:1093 +#: fe-protocol3.c:1100 #, c-format msgid "TABLE NAME: %s\n" msgstr "TABELLNAMN: %s\n" -#: fe-protocol3.c:1097 +#: fe-protocol3.c:1104 #, c-format msgid "COLUMN NAME: %s\n" msgstr "KOLUMNNAMN: %s\n" -#: fe-protocol3.c:1101 +#: fe-protocol3.c:1108 #, c-format msgid "DATATYPE NAME: %s\n" msgstr "DATATYPNAMN: %s\n" -#: fe-protocol3.c:1105 +#: fe-protocol3.c:1112 #, c-format msgid "CONSTRAINT NAME: %s\n" msgstr "VILLKORSNAMN: %s\n" -#: fe-protocol3.c:1117 +#: fe-protocol3.c:1124 msgid "LOCATION: " msgstr "PLATS: " -#: fe-protocol3.c:1119 +#: fe-protocol3.c:1126 #, c-format msgid "%s, " msgstr "%s, " -#: fe-protocol3.c:1121 +#: fe-protocol3.c:1128 #, c-format msgid "%s:%s" msgstr "%s:%s" -#: fe-protocol3.c:1316 +#: fe-protocol3.c:1323 #, c-format msgid "LINE %d: " msgstr "RAD %d: " -#: fe-protocol3.c:1711 +#: fe-protocol3.c:1718 msgid "PQgetline: not doing text COPY OUT\n" msgstr "PQgetline: utför inte text-COPY OUT\n" -#: fe-secure-openssl.c:234 fe-secure-openssl.c:343 fe-secure-openssl.c:1323 +#: fe-secure-common.c:117 +msgid "SSL certificate's name contains embedded null\n" +msgstr "SSL-certifikatets namn innehåller null-värden\n" + +#: fe-secure-common.c:164 +msgid "host name must be specified for a verified SSL connection\n" +msgstr "värdnamn måste anges för en verifierad SSL-anslutning\n" + +#: fe-secure-common.c:189 +#, c-format +msgid "server certificate for \"%s\" does not match host name \"%s\"\n" +msgstr "servercertifikat för \"%s\" matchar inte värdnamn \"%s\"\n" + +#: fe-secure-common.c:195 +msgid "could not get server's host name from server certificate\n" +msgstr "kan inte hämta ut serverns värdnamn från servercertifikatet\n" + +#: fe-secure-openssl.c:211 fe-secure-openssl.c:319 fe-secure-openssl.c:1243 #, c-format msgid "SSL SYSCALL error: %s\n" msgstr "SSL SYSCALL fel: %s\n" -#: fe-secure-openssl.c:241 fe-secure-openssl.c:350 fe-secure-openssl.c:1327 +#: fe-secure-openssl.c:218 fe-secure-openssl.c:326 fe-secure-openssl.c:1247 msgid "SSL SYSCALL error: EOF detected\n" msgstr "SSL SYSCALL-fel: EOF upptäckt\n" -#: fe-secure-openssl.c:252 fe-secure-openssl.c:361 fe-secure-openssl.c:1336 +#: fe-secure-openssl.c:229 fe-secure-openssl.c:337 fe-secure-openssl.c:1256 #, c-format msgid "SSL error: %s\n" msgstr "SSL-fel: %s\n" -#: fe-secure-openssl.c:267 fe-secure-openssl.c:376 +#: fe-secure-openssl.c:244 fe-secure-openssl.c:352 msgid "SSL connection has been closed unexpectedly\n" msgstr "SSL-anslutning har oväntat stängts\n" -#: fe-secure-openssl.c:273 fe-secure-openssl.c:382 fe-secure-openssl.c:1345 +#: fe-secure-openssl.c:250 fe-secure-openssl.c:358 fe-secure-openssl.c:1265 #, c-format msgid "unrecognized SSL error code: %d\n" msgstr "okänd SSL-felkod: %d\n" -#: fe-secure-openssl.c:494 -msgid "SSL certificate's name entry is missing\n" -msgstr "SSL-certifikatets namn saknas\n" +#: fe-secure-openssl.c:418 +msgid "could not determine server certificate signature algorithm\n" +msgstr "kunde inte lista ut serverns algoritm för certifikatsignering\n" -#: fe-secure-openssl.c:528 -msgid "SSL certificate's name contains embedded null\n" -msgstr "SSL-certifikatets namn innehåller null-värden\n" +#: fe-secure-openssl.c:439 +#, c-format +msgid "could not find digest for NID %s\n" +msgstr "kunde inte hitta kontrollsumma för NID %s\n" -#: fe-secure-openssl.c:580 -msgid "host name must be specified for a verified SSL connection\n" -msgstr "värdnamn måste anges för en verifierad SSL-anslutning\n" +#: fe-secure-openssl.c:449 +msgid "could not generate peer certificate hash\n" +msgstr "kunde inte generera peer-certifikathash\n" -#: fe-secure-openssl.c:680 -#, c-format -msgid "server certificate for \"%s\" does not match host name \"%s\"\n" -msgstr "servercertifikat för \"%s\" matchar inte värdnamn \"%s\"\n" +#: fe-secure-openssl.c:467 +msgid "channel binding type \"tls-server-end-point\" is not supported by this build\n" +msgstr "kanalbindningstyp \"tls-server-end-point\" stöds inte av detta bygge\n" -#: fe-secure-openssl.c:686 -msgid "could not get server's host name from server certificate\n" -msgstr "kan inte hämta ut serverns värdnamn från servercertifikatet\n" +#: fe-secure-openssl.c:510 +msgid "SSL certificate's name entry is missing\n" +msgstr "SSL-certifikatets namn saknas\n" -#: fe-secure-openssl.c:928 +#: fe-secure-openssl.c:839 #, c-format msgid "could not create SSL context: %s\n" msgstr "kan inte skapa SSL-omgivning: %s\n" -#: fe-secure-openssl.c:965 +#: fe-secure-openssl.c:876 #, c-format msgid "could not read root certificate file \"%s\": %s\n" msgstr "kunde inte läsa root-certifikatfilen \"%s\": %s\n" -#: fe-secure-openssl.c:993 +#: fe-secure-openssl.c:904 #, c-format msgid "SSL library does not support CRL certificates (file \"%s\")\n" msgstr "SSL-bibliotek stöder inte CRL-certifikat (fil \"%s\")\n" -#: fe-secure-openssl.c:1021 +#: fe-secure-openssl.c:932 msgid "" "could not get home directory to locate root certificate file\n" "Either provide the file or change sslmode to disable server certificate verification.\n" @@ -1017,7 +1043,7 @@ msgstr "" "kunde inte hämta hemkatalogen för att lokalisera root-certifikatfilen\n" "Antingen tillhandahåll filen eller ändra sslmode för att stänga av serverns certifikatverifiering.\n" -#: fe-secure-openssl.c:1025 +#: fe-secure-openssl.c:936 #, c-format msgid "" "root certificate file \"%s\" does not exist\n" @@ -1026,82 +1052,82 @@ msgstr "" "root-certifikatfilen \"%s\" finns inte\n" "Antingen tillhandahåll filen eller ändra sslmode för att stänga av serverns certifikatverifiering.\n" -#: fe-secure-openssl.c:1056 +#: fe-secure-openssl.c:967 #, c-format msgid "could not open certificate file \"%s\": %s\n" msgstr "kunde inte öppna certifikatfil \"%s\": %s\n" -#: fe-secure-openssl.c:1075 +#: fe-secure-openssl.c:986 #, c-format msgid "could not read certificate file \"%s\": %s\n" msgstr "kunde inte läsa certifikatfil \"%s\": %s\n" -#: fe-secure-openssl.c:1100 +#: fe-secure-openssl.c:1011 #, c-format msgid "could not establish SSL connection: %s\n" msgstr "kan inte skapa SSL-förbindelse: %s\n" -#: fe-secure-openssl.c:1154 +#: fe-secure-openssl.c:1065 #, c-format msgid "could not load SSL engine \"%s\": %s\n" msgstr "kunde inte ladda SSL-motor \"%s\": %s\n" -#: fe-secure-openssl.c:1166 +#: fe-secure-openssl.c:1077 #, c-format msgid "could not initialize SSL engine \"%s\": %s\n" msgstr "kunde inte initiera SSL-motor \"%s\": %s\n" -#: fe-secure-openssl.c:1182 +#: fe-secure-openssl.c:1093 #, c-format msgid "could not read private SSL key \"%s\" from engine \"%s\": %s\n" msgstr "kunde inte läsa privat SSL-nyckel \"%s\" från motor \"%s\": %s\n" -#: fe-secure-openssl.c:1196 +#: fe-secure-openssl.c:1107 #, c-format msgid "could not load private SSL key \"%s\" from engine \"%s\": %s\n" msgstr "kunde inte ladda privat SSL-nyckel \"%s\" från motor \"%s\": %s\n" -#: fe-secure-openssl.c:1233 +#: fe-secure-openssl.c:1144 #, c-format msgid "certificate present, but not private key file \"%s\"\n" msgstr "certifikat tillgängligt, men inte den privata nyckelfilen \"%s\"\n" -#: fe-secure-openssl.c:1241 +#: fe-secure-openssl.c:1152 #, c-format msgid "private key file \"%s\" has group or world access; permissions should be u=rw (0600) or less\n" msgstr "privata nyckelfilen \"%s\" har läsrättigheter för gruppen eller världen; rättigheten skall vara u=rw (0600) eller mindre\n" -#: fe-secure-openssl.c:1252 +#: fe-secure-openssl.c:1163 #, c-format msgid "could not load private key file \"%s\": %s\n" msgstr "kunde inte ladda privata nyckelfilen \"%s\": %s\n" -#: fe-secure-openssl.c:1266 +#: fe-secure-openssl.c:1177 #, c-format msgid "certificate does not match private key file \"%s\": %s\n" msgstr "certifikatet matchar inte den privata nyckelfilen \"%s\": %s\n" -#: fe-secure-openssl.c:1366 +#: fe-secure-openssl.c:1286 #, c-format msgid "certificate could not be obtained: %s\n" msgstr "certifikatet kunde inte hämtas: %s\n" -#: fe-secure-openssl.c:1458 +#: fe-secure-openssl.c:1375 #, c-format msgid "no SSL error reported" msgstr "inget SSL-fel rapporterat" -#: fe-secure-openssl.c:1467 +#: fe-secure-openssl.c:1384 #, c-format msgid "SSL error code %lu" msgstr "SSL-felkod %lu" -#: fe-secure.c:261 +#: fe-secure.c:269 #, c-format msgid "could not receive data from server: %s\n" msgstr "kan inte ta emot data från servern: %s\n" -#: fe-secure.c:369 +#: fe-secure.c:377 #, c-format msgid "could not send data to server: %s\n" msgstr "kan inte skicka data till servern: %s\n" @@ -1111,63 +1137,69 @@ msgstr "kan inte skicka data till servern: %s\n" msgid "unrecognized socket error: 0x%08X/%d" msgstr "okänt uttagsfel: 0x%08X/%d" -#~ msgid "could not set socket to blocking mode: %s\n" -#~ msgstr "kunde inte ställa in uttag (socket) i blockerande läge: %s\n" +#~ msgid "could not get home directory to locate password file\n" +#~ msgstr "kunde inte hämta hemkatalogen för att lokalisera lösenordsfilen\n" -#~ msgid "Kerberos 5 authentication rejected: %*s\n" -#~ msgstr "Kerberos-5-autentisering vägras: %*s\n" +#~ msgid "could not get home directory to locate service definition file" +#~ msgstr "kunde inte hämta hemkatalogen för att lokalisera servicedefinitionsfilen" -#~ msgid "could not restore non-blocking mode on socket: %s\n" -#~ msgstr "kunde inte återställa ickeblockerande läge för uttag (socket): %s\n" +#~ msgid "certificate could not be validated: %s\n" +#~ msgstr "certifikatet kunde inte valideras: %s\n" -#~ msgid "setsockopt(TCP_KEEPIDLE) failed: %s\n" -#~ msgstr "setsockopt(TCP_KEEPIDLE) misslyckades: %s\n" +#~ msgid "could not read private key file \"%s\": %s\n" +#~ msgstr "kunde inte läsa privat nyckelfil \"%s\": %s\n" -#~ msgid "setsockopt(TCP_KEEPALIVE) failed: %s\n" -#~ msgstr "setsockopt(TCP_KEEPALIVE) misslyckades: %s\n" +#~ msgid "private key file \"%s\" changed during execution\n" +#~ msgstr "privata nyckelfilen \"%s\" har ändrats under körning\n" -#~ msgid "setsockopt(TCP_KEEPINTVL) failed: %s\n" -#~ msgstr "setsockopt(TCP_KEEPINTVL) misslyckades: %s\n" +#~ msgid "could not open private key file \"%s\": %s\n" +#~ msgstr "kan inte öppna privat nyckelfil \"%s\": %s\n" -#~ msgid "setsockopt(SO_KEEPALIVE) failed: %s\n" -#~ msgstr "setsockopt(SO_KEEPALIVE) misslyckades: %s\n" +#~ msgid "private key file \"%s\" has wrong permissions\n" +#~ msgstr "privata nyckelfilen \"%s\" har fel rättigheter\n" -#~ msgid "socket not open\n" -#~ msgstr "uttag (socket) ej öppen\n" +#~ msgid "invalid value of PGSSLKEY environment variable\n" +#~ msgstr "felaktigt värde på miljövariabeln PGSSLKEY\n" -#, fuzzy -#~ msgid "could not get home directory to locate client certificate files\n" -#~ msgstr "kunde inte hämta hemkatalogen: %s\n" +#~ msgid "could not get user information\n" +#~ msgstr "kunde inte hämta användarinformation\n" -#~ msgid "error querying socket: %s\n" -#~ msgstr "fel vid förfrågan till uttag (socket): %s\n" +#~ msgid "server common name \"%s\" does not resolve to %ld.%ld.%ld.%ld\n" +#~ msgstr "värdens namn \"%s\" är inte %ld.%ld.%ld.%ld efter uppslagning\n" + +#~ msgid "unsupported protocol\n" +#~ msgstr "protokoll stöds inte\n" #~ msgid "could not get information about host \"%s\": %s\n" #~ msgstr "kunde inte få information om värd \"%s\": %s\n" -#~ msgid "unsupported protocol\n" -#~ msgstr "protokoll stöds inte\n" +#~ msgid "error querying socket: %s\n" +#~ msgstr "fel vid förfrågan till uttag (socket): %s\n" -#~ msgid "server common name \"%s\" does not resolve to %ld.%ld.%ld.%ld\n" -#~ msgstr "värdens namn \"%s\" är inte %ld.%ld.%ld.%ld efter uppslagning\n" +#, fuzzy +#~ msgid "could not get home directory to locate client certificate files\n" +#~ msgstr "kunde inte hämta hemkatalogen: %s\n" -#~ msgid "could not get user information\n" -#~ msgstr "kunde inte hämta användarinformation\n" +#~ msgid "socket not open\n" +#~ msgstr "uttag (socket) ej öppen\n" -#~ msgid "invalid value of PGSSLKEY environment variable\n" -#~ msgstr "felaktigt värde på miljövariabeln PGSSLKEY\n" +#~ msgid "setsockopt(SO_KEEPALIVE) failed: %s\n" +#~ msgstr "setsockopt(SO_KEEPALIVE) misslyckades: %s\n" -#~ msgid "private key file \"%s\" has wrong permissions\n" -#~ msgstr "privata nyckelfilen \"%s\" har fel rättigheter\n" +#~ msgid "setsockopt(TCP_KEEPINTVL) failed: %s\n" +#~ msgstr "setsockopt(TCP_KEEPINTVL) misslyckades: %s\n" -#~ msgid "could not open private key file \"%s\": %s\n" -#~ msgstr "kan inte öppna privat nyckelfil \"%s\": %s\n" +#~ msgid "setsockopt(TCP_KEEPALIVE) failed: %s\n" +#~ msgstr "setsockopt(TCP_KEEPALIVE) misslyckades: %s\n" -#~ msgid "private key file \"%s\" changed during execution\n" -#~ msgstr "privata nyckelfilen \"%s\" har ändrats under körning\n" +#~ msgid "setsockopt(TCP_KEEPIDLE) failed: %s\n" +#~ msgstr "setsockopt(TCP_KEEPIDLE) misslyckades: %s\n" -#~ msgid "could not read private key file \"%s\": %s\n" -#~ msgstr "kunde inte läsa privat nyckelfil \"%s\": %s\n" +#~ msgid "could not restore non-blocking mode on socket: %s\n" +#~ msgstr "kunde inte återställa ickeblockerande läge för uttag (socket): %s\n" -#~ msgid "certificate could not be validated: %s\n" -#~ msgstr "certifikatet kunde inte valideras: %s\n" +#~ msgid "Kerberos 5 authentication rejected: %*s\n" +#~ msgstr "Kerberos-5-autentisering vägras: %*s\n" + +#~ msgid "could not set socket to blocking mode: %s\n" +#~ msgstr "kunde inte ställa in uttag (socket) i blockerande läge: %s\n" diff --git a/src/interfaces/libpq/po/tr.po b/src/interfaces/libpq/po/tr.po index bed2592092..f47c7f7cbd 100644 --- a/src/interfaces/libpq/po/tr.po +++ b/src/interfaces/libpq/po/tr.po @@ -5,129 +5,231 @@ msgid "" msgstr "" "Project-Id-Version: libpq-tr\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2011-08-30 22:43+0000\n" -"PO-Revision-Date: 2011-08-31 13:21+0200\n" +"POT-Creation-Date: 2017-10-26 09:08+0000\n" +"PO-Revision-Date: 2017-12-22 15:13+0300\n" "Last-Translator: Devrim GÜNDÜZ \n" "Language-Team: Turkish \n" "Language: tr\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Generator: KBabel 1.9.1\n" +"X-Generator: Poedit 2.0.4\n" "X-Poedit-Basepath: /home/ntufar/pg/pgsql/src/interfaces/libpq\n" "X-Poedit-SearchPath-0: /home/ntufar/pg/pgsql/src/interfaces/libpq\n" -#: fe-auth.c:210 -#: fe-auth.c:429 -#: fe-auth.c:656 -msgid "host name must be specified\n" -msgstr "sunucu adı belirtilmelidir\n" +#: fe-auth-scram.c:176 +msgid "malformed SCRAM message (empty message)\n" +msgstr "hatalı SCRAM mesajı (boş mesaj)\n" -#: fe-auth.c:240 +#: fe-auth-scram.c:182 +msgid "malformed SCRAM message (length mismatch)\n" +msgstr "hatalı SCRAM mesajı (uzunluk uyuşmazlığı)\n" + +#: fe-auth-scram.c:231 +msgid "incorrect server signature\n" +msgstr "sunucu imzası yanlış\n" + +#: fe-auth-scram.c:240 +msgid "invalid SCRAM exchange state\n" +msgstr "geçersiz SCRAM değişim durumu\n" + +#: fe-auth-scram.c:263 #, c-format -msgid "could not set socket to blocking mode: %s\n" -msgstr "soket engelleme moduna ayarlanamadı: %s\n" +msgid "malformed SCRAM message (attribute \"%c\" expected)\n" +msgstr "hatalı SCRAM mesajı (\"%c\" niteliği bekleniyor)\n" -#: fe-auth.c:258 -#: fe-auth.c:262 +#: fe-auth-scram.c:272 #, c-format -msgid "Kerberos 5 authentication rejected: %*s\n" -msgstr "Kerberos 5 yetkilendirmesi kabul edilmedi: %*s\n" +msgid "malformed SCRAM message (expected character \"=\" for attribute \"%c\")\n" +msgstr "hatalı SCRAM mesajı (\"%c\" niteliği için \"=\" karakteri bekleniyor)\n" + +#: fe-auth-scram.c:311 +msgid "could not generate nonce\n" +msgstr "nonce oluşturulamadı\n" + +#: fe-auth-scram.c:319 fe-auth-scram.c:336 fe-auth-scram.c:346 +#: fe-auth-scram.c:400 fe-auth-scram.c:420 fe-auth-scram.c:445 +#: fe-auth-scram.c:459 fe-auth-scram.c:501 fe-auth.c:227 fe-auth.c:362 +#: fe-auth.c:432 fe-auth.c:467 fe-auth.c:609 fe-auth.c:768 fe-auth.c:1080 +#: fe-auth.c:1228 fe-connect.c:775 fe-connect.c:1202 fe-connect.c:1378 +#: fe-connect.c:1946 fe-connect.c:2475 fe-connect.c:4061 fe-connect.c:4313 +#: fe-connect.c:4432 fe-connect.c:4682 fe-connect.c:4762 fe-connect.c:4861 +#: fe-connect.c:5117 fe-connect.c:5146 fe-connect.c:5218 fe-connect.c:5242 +#: fe-connect.c:5260 fe-connect.c:5361 fe-connect.c:5370 fe-connect.c:5726 +#: fe-connect.c:5876 fe-exec.c:2702 fe-exec.c:3449 fe-exec.c:3614 fe-lobj.c:896 +#: fe-protocol2.c:1206 fe-protocol3.c:992 fe-protocol3.c:1678 +#: fe-secure-openssl.c:514 fe-secure-openssl.c:1138 +msgid "out of memory\n" +msgstr "yetersiz bellek\n" + +#: fe-auth-scram.c:437 +msgid "invalid SCRAM response (nonce mismatch)\n" +msgstr "geçersiz SCRAM cevabı (nonce uyuşmazlığı)\n" + +#: fe-auth-scram.c:476 +msgid "malformed SCRAM message (invalid iteration count)\n" +msgstr "hatalı SCRAM mesajı (geçersiz iterasyon sayısı)\n" + +#: fe-auth-scram.c:482 +msgid "malformed SCRAM message (garbage at end of server-first-message)\n" +msgstr "hatalı SCRAM mesajı (sunucu-ilk-mesajı sonunda anlamsız değer)\n" + +#: fe-auth-scram.c:511 +#, c-format +msgid "error received from server in SCRAM exchange: %s\n" +msgstr "SCRAM değişimi işleminde sunucudan hata alındı: %s\n" + +#: fe-auth-scram.c:526 +msgid "malformed SCRAM message (garbage at end of server-final-message)\n" +msgstr "hatalı SCRAM mesajı (sunucu-son-mesajı sonunda anlamsız değer)\n" + +#: fe-auth-scram.c:534 +msgid "malformed SCRAM message (invalid server signature)\n" +msgstr "hatalı SCRAM mesajı (geçersiz sunucu imzası)\n" -#: fe-auth.c:288 +#: fe-auth.c:122 #, c-format -msgid "could not restore non-blocking mode on socket: %s\n" -msgstr "could not restore non-blocking mode on socket: %s\n" +msgid "out of memory allocating GSSAPI buffer (%d)\n" +msgstr "GSSAPI tamponu ayrılırken yetersiz bellek hatası (%d)\n" -#: fe-auth.c:400 +#: fe-auth.c:177 msgid "GSSAPI continuation error" msgstr "GSSAPI devam hatası" -#: fe-auth.c:436 +#: fe-auth.c:207 fe-auth.c:461 +msgid "host name must be specified\n" +msgstr "sunucu adı belirtilmelidir\n" + +#: fe-auth.c:214 msgid "duplicate GSS authentication request\n" msgstr "çift GSS yetkilendirme isteği\n" -#: fe-auth.c:456 +#: fe-auth.c:240 msgid "GSSAPI name import error" msgstr "GSSAPI ad aktarma hatası" -#: fe-auth.c:542 +#: fe-auth.c:303 +#, c-format +msgid "out of memory allocating SSPI buffer (%d)\n" +msgstr "GSSAPI tamponu ayrılırken yetersiz bellek hatası (%d)\n" + +#: fe-auth.c:351 msgid "SSPI continuation error" msgstr "SSPI devam hatası" -#: fe-auth.c:553 -#: fe-auth.c:627 -#: fe-auth.c:662 -#: fe-auth.c:757 -#: fe-connect.c:1961 -#: fe-connect.c:3368 -#: fe-connect.c:3586 -#: fe-connect.c:4007 -#: fe-connect.c:4016 -#: fe-connect.c:4153 -#: fe-connect.c:4199 -#: fe-connect.c:4217 -#: fe-connect.c:4296 -#: fe-connect.c:4366 -#: fe-connect.c:4412 -#: fe-connect.c:4430 -#: fe-exec.c:3121 -#: fe-exec.c:3286 -#: fe-lobj.c:696 -#: fe-protocol2.c:1092 -#: fe-protocol3.c:1433 -msgid "out of memory\n" -msgstr "yetersiz bellek\n" +#: fe-auth.c:422 +msgid "duplicate SSPI authentication request\n" +msgstr "çift SSPI yetkilendirme isteği\n" -#: fe-auth.c:642 +#: fe-auth.c:447 msgid "could not acquire SSPI credentials" -msgstr "SSPI kimlik bilgileri alınamadı: %m" +msgstr "SSPI kimlik bilgileri alınamadı" -#: fe-auth.c:733 +#: fe-auth.c:500 +msgid "duplicate SASL authentication request\n" +msgstr "çift SASL yetkilendirme isteği\n" + +#: fe-auth.c:560 +msgid "none of the server's SASL authentication mechanisms are supported\n" +msgstr "sunucunun SASL yetkilendirme mekanizmalarından hiçbiri desteklenmiyor\n" + +#: fe-auth.c:633 +#, c-format +msgid "out of memory allocating SASL buffer (%d)\n" +msgstr "SASL tamponu ayrılırken yetersiz bellek hatası (%d)\n" + +#: fe-auth.c:658 +msgid "AuthenticationSASLFinal received from server, but SASL authentication was not completed\n" +msgstr "sunucudan AuthenticationSASLFinal alındı, fakat SASL yetkilendirmesi tamamlanmadı\n" + +#: fe-auth.c:735 msgid "SCM_CRED authentication method not supported\n" msgstr "SCM_CRED yetkilendirme yöntemi desteklenmiyor.\n" -#: fe-auth.c:807 +#: fe-auth.c:826 msgid "Kerberos 4 authentication not supported\n" msgstr "Kerberos 4 yetkilendirmesi desteklenmiyor\n" -#: fe-auth.c:823 +#: fe-auth.c:831 msgid "Kerberos 5 authentication not supported\n" msgstr "Kerberos 5 yetkilendirmesi desteklenmiyor\n" -#: fe-auth.c:895 +#: fe-auth.c:902 msgid "GSSAPI authentication not supported\n" msgstr "GSSAPI yetkilendirmesi desteklenmiyor\n" -#: fe-auth.c:927 +#: fe-auth.c:934 msgid "SSPI authentication not supported\n" msgstr "SSPI yetkilendirmesi desteklenmiyor\n" -#: fe-auth.c:935 +#: fe-auth.c:942 msgid "Crypt authentication not supported\n" msgstr "Crypt yetkilendirmesi desteklenmiyor\n" -#: fe-auth.c:962 +#: fe-auth.c:1008 #, c-format msgid "authentication method %u not supported\n" msgstr "%u yetkilendirme sistemi desteklenmiyor\n" -#: fe-connect.c:758 +#: fe-auth.c:1055 +#, c-format +msgid "user name lookup failure: error code %lu\n" +msgstr "kullanıcı adı arama başarısız: hata kodu %lu\n" + +#: fe-auth.c:1065 fe-connect.c:2402 +#, c-format +msgid "could not look up local user ID %d: %s\n" +msgstr "yerel kullanıcı ID %d bulunamadı: %s\n" + +#: fe-auth.c:1070 fe-connect.c:2407 +#, c-format +msgid "local user with ID %d does not exist\n" +msgstr "yerel kullanıcı ID %d mevcut değildir\n" + +#: fe-auth.c:1172 +msgid "unexpected shape of result set returned for SHOW\n" +msgstr "SHOW için döndürülen sonuç kümesi beklenmeyen şekilde \n" + +#: fe-auth.c:1181 +msgid "password_encryption value too long\n" +msgstr "Parola şifreleme (password_encryption) değeri çok uzun\n" + +#: fe-auth.c:1221 +#, c-format +msgid "unrecognized password encryption algorithm \"%s\"\n" +msgstr "tanımlanamayan exception durumu \"%s\"\n" + +#: fe-connect.c:968 +#, c-format +msgid "could not match %d host names to %d hostaddrs\n" +msgstr "%d sunucu adları %d sunucu adresleriyle eşleştirilemedi\n" + +#: fe-connect.c:1025 +#, c-format +msgid "could not match %d port numbers to %d hosts\n" +msgstr "%d kapı (port) numaraları %d sunucuları ile eşleştirilemedi\n" + +#: fe-connect.c:1128 #, c-format msgid "invalid sslmode value: \"%s\"\n" msgstr "geçersiz sslmode değeri: \"%s\"\n" -#: fe-connect.c:779 +#: fe-connect.c:1149 #, c-format msgid "sslmode value \"%s\" invalid when SSL support is not compiled in\n" msgstr "\"%s\" ssl modu, SSL desteği derlenmeyince geçersizdir.\n" -#: fe-connect.c:972 +#: fe-connect.c:1184 +#, c-format +msgid "invalid target_session_attrs value: \"%s\"\n" +msgstr "geçersiz target_session_attrs değeri: \"%s\"\n" + +#: fe-connect.c:1402 #, c-format msgid "could not set socket to TCP no delay mode: %s\n" msgstr "could not set socket to TCP no delay mode: %s\n" -#: fe-connect.c:1002 +#: fe-connect.c:1432 #, c-format msgid "" "could not connect to server: %s\n" @@ -138,7 +240,7 @@ msgstr "" "\tSunucu yerelde çalışıyor ve Unix domain\n" "\tsoketleri üzerinden bağlantılara izin veriyor mu? \"%s\"?\n" -#: fe-connect.c:1057 +#: fe-connect.c:1490 #, c-format msgid "" "could not connect to server: %s\n" @@ -149,7 +251,7 @@ msgstr "" "\tSunucu \"%s\" (%s) sunucusunda çalışıyor ve\n" "\t %s portundan TCP/IP bağlantılarına izin veriyor mu?\n" -#: fe-connect.c:1066 +#: fe-connect.c:1499 #, c-format msgid "" "could not connect to server: %s\n" @@ -160,449 +262,507 @@ msgstr "" "\tls Sunucu \"%s\" sunucunda çalışıyor ve\n" "\t %s portundan bağlantılara izin veriyor mu?\n" -#: fe-connect.c:1117 -#, c-format -msgid "setsockopt(TCP_KEEPIDLE) failed: %s\n" -msgstr "setsockopt(TCP_KEEPIDLE) başarısız oldu: %s\n" - -#: fe-connect.c:1130 +#: fe-connect.c:1550 fe-connect.c:1582 fe-connect.c:1615 fe-connect.c:2174 #, c-format -msgid "setsockopt(TCP_KEEPALIVE) failed: %s\n" -msgstr "setsockopt(SO_REUSEADDR) başarısız oldu: %s\n" +msgid "setsockopt(%s) failed: %s\n" +msgstr "setsockopt(%s) başarısız oldu: %s\n" -#: fe-connect.c:1162 -#, c-format -msgid "setsockopt(TCP_KEEPINTVL) failed: %s\n" -msgstr "setsockopt(TCP_KEEPINTVL) başarısız oldu: %s\n" - -#: fe-connect.c:1194 -#, c-format -msgid "setsockopt(TCP_KEEPCNT) failed: %s\n" -msgstr "setsockopt(TCP_KEEPCNT) başarısız oldu: %s\n" - -#: fe-connect.c:1242 +#: fe-connect.c:1664 #, c-format msgid "WSAIoctl(SIO_KEEPALIVE_VALS) failed: %ui\n" msgstr "WSAIoctl(SIO_KEEPALIVE_VALS) başarısız oldu: %ui\n" -#: fe-connect.c:1294 +#: fe-connect.c:1721 #, c-format msgid "invalid port number: \"%s\"\n" -msgstr "Geçersiz port numarası: \"%s\"\n" +msgstr "geçersiz port numarası: \"%s\"\n" -#: fe-connect.c:1337 +#: fe-connect.c:1737 #, c-format msgid "could not translate host name \"%s\" to address: %s\n" msgstr "\"%s\" makine adı bir adrese çevirilemedi: %s\n" -#: fe-connect.c:1341 +#: fe-connect.c:1746 +#, c-format +msgid "could not parse network address \"%s\": %s\n" +msgstr "ağ adresi \"%s\" ayrıştırılamadı: %s\n" + +#: fe-connect.c:1757 +#, c-format +msgid "Unix-domain socket path \"%s\" is too long (maximum %d bytes)\n" +msgstr "Unix-domain soket ypolu \"%s\" çok UZUN (azami %d bayt)\n" + +#: fe-connect.c:1771 #, c-format msgid "could not translate Unix-domain socket path \"%s\" to address: %s\n" msgstr "\"%s\" Unix domain soket yolu adrese çevirilemedi: %s\n" -#: fe-connect.c:1551 +#: fe-connect.c:2052 msgid "invalid connection state, probably indicative of memory corruption\n" -msgstr "Geçersiz bağlantı durumu, hafızanın zarar görmüş olmasının işareti olabilir\n" +msgstr "geçersiz bağlantı durumu, hafızanın zarar görmüş olmasının işareti olabilir\n" -#: fe-connect.c:1592 +#: fe-connect.c:2109 #, c-format msgid "could not create socket: %s\n" msgstr "soket yaratılamadı: %s\n" -#: fe-connect.c:1615 +#: fe-connect.c:2131 #, c-format -msgid "could not set socket to non-blocking mode: %s\n" -msgstr "could not set socket to non-blocking mode: %s\n" +msgid "could not set socket to nonblocking mode: %s\n" +msgstr "soket bloklamasız ( non-blocking ) moda ayarlanamadı: %s\n" -#: fe-connect.c:1627 +#: fe-connect.c:2142 #, c-format msgid "could not set socket to close-on-exec mode: %s\n" msgstr "soket close-on-exec moduna ayarlanamadı: %s\n" -#: fe-connect.c:1647 +#: fe-connect.c:2161 msgid "keepalives parameter must be an integer\n" msgstr "keepalives parametresi tamsayı olmalıdır\n" -#: fe-connect.c:1660 -#, c-format -msgid "setsockopt(SO_KEEPALIVE) failed: %s\n" -msgstr "setsockopt(SO_KEEPALIVE) başarısız oldu: %s\n" - -#: fe-connect.c:1801 +#: fe-connect.c:2312 #, c-format msgid "could not get socket error status: %s\n" msgstr "soket hata durumu alınamadı: %s\n" -#: fe-connect.c:1839 +#: fe-connect.c:2347 #, c-format msgid "could not get client address from socket: %s\n" msgstr "soketten istemci adresi alınamadı: %s\n" -#: fe-connect.c:1880 +#: fe-connect.c:2389 msgid "requirepeer parameter is not supported on this platform\n" msgstr "bu platformda requirepeer parametresi desteklenmiyor \n" -#: fe-connect.c:1883 +#: fe-connect.c:2392 #, c-format msgid "could not get peer credentials: %s\n" msgstr "karşı tarafın kimlik bilgileri alınamadı: %s \n" -#: fe-connect.c:1893 -#, c-format -msgid "local user with ID %d does not exist\n" -msgstr "yerel kullanıcı ID %d mevcut değildir\n" - -#: fe-connect.c:1901 +#: fe-connect.c:2415 #, c-format msgid "requirepeer specifies \"%s\", but actual peer user name is \"%s\"\n" msgstr "requirepeer \"%s\" belirtiyor, ancak gerçek peer kullanıcı aıd \"%s\"\n" -#: fe-connect.c:1935 +#: fe-connect.c:2449 #, c-format msgid "could not send SSL negotiation packet: %s\n" msgstr "could not send SSL negotiation packet: %s\n" -#: fe-connect.c:1974 +#: fe-connect.c:2488 #, c-format msgid "could not send startup packet: %s\n" msgstr "başlangıç paketi gönderilemedi: %s\n" -#: fe-connect.c:2044 +#: fe-connect.c:2558 msgid "server does not support SSL, but SSL was required\n" msgstr "sunucu SSL desteklemiyor, ama SSL gerekli idi\n" -#: fe-connect.c:2070 +#: fe-connect.c:2584 #, c-format msgid "received invalid response to SSL negotiation: %c\n" msgstr "ssl görüşmesine geçersiz yanıt alındı: %c\n" -#: fe-connect.c:2149 -#: fe-connect.c:2182 +#: fe-connect.c:2660 fe-connect.c:2693 #, c-format msgid "expected authentication request from server, but received %c\n" msgstr "sunucudan yetkilendirme isteği beklendi ancak %c alındı\n" -#: fe-connect.c:2363 -#, c-format -msgid "out of memory allocating GSSAPI buffer (%d)" -msgstr "GSSAPI tamponu ayrılırken yetersiz bellek hatası (%d)" - -#: fe-connect.c:2448 +#: fe-connect.c:2922 msgid "unexpected message from server during startup\n" msgstr "başlangıç sırasında sunucudan beklenmeyen bir mesaj alındı\n" -#: fe-connect.c:2547 +#: fe-connect.c:3140 +#, c-format +msgid "could not make a writable connection to server \"%s:%s\"\n" +msgstr "sunucuya yazılabilir (writable) bağlantı sağlanamadı \"%s:%s\"\n" + +#: fe-connect.c:3189 +#, c-format +msgid "test \"SHOW transaction_read_only\" failed on server \"%s:%s\"\n" +msgstr "\"SHOW transaction_read_only\" testi sunucuda başarısız oldu \"%s:%s\"\n" + +#: fe-connect.c:3210 #, c-format msgid "invalid connection state %d, probably indicative of memory corruption\n" msgstr "%d - geçersiz bağlantı durumu, bellekteki veri zarar görmüş olabilir\n" -#: fe-connect.c:2976 -#: fe-connect.c:3036 +#: fe-connect.c:3667 fe-connect.c:3727 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_CONNRESET event\n" msgstr "PGEventProc \"%s\" işlemi PGEVT_CONNRESET işlemi sırasında başarısız oldu\n" -#: fe-connect.c:3381 +#: fe-connect.c:4074 #, c-format msgid "invalid LDAP URL \"%s\": scheme must be ldap://\n" msgstr "geçersiz LDAP URL \"%s\": şema, ldap:// ile başlamalıdir\n" -#: fe-connect.c:3396 +#: fe-connect.c:4089 #, c-format msgid "invalid LDAP URL \"%s\": missing distinguished name\n" msgstr "geçersiz LDAP URL \"%s\": distinguished name eksik\n" -#: fe-connect.c:3407 -#: fe-connect.c:3460 +#: fe-connect.c:4100 fe-connect.c:4153 #, c-format msgid "invalid LDAP URL \"%s\": must have exactly one attribute\n" msgstr "geçersiz LDAP URL \"%s\": tam bir attribute içermelidir\n" -#: fe-connect.c:3417 -#: fe-connect.c:3474 +#: fe-connect.c:4110 fe-connect.c:4167 #, c-format msgid "invalid LDAP URL \"%s\": must have search scope (base/one/sub)\n" msgstr "geçersiz LDAP URL \"%s\": arama kapsamı içermelidir (base/one/sub)\n" -#: fe-connect.c:3428 +#: fe-connect.c:4121 #, c-format msgid "invalid LDAP URL \"%s\": no filter\n" msgstr "geçersiz LDAP URL \"%s\": filtere eksik\n" -#: fe-connect.c:3449 +#: fe-connect.c:4142 #, c-format msgid "invalid LDAP URL \"%s\": invalid port number\n" msgstr "geçersiz LDAP URL \"%s\": geçersiz port numarası\n" -#: fe-connect.c:3483 +#: fe-connect.c:4176 msgid "could not create LDAP structure\n" msgstr "LDAP yapısı oluşturma hatası\n" -#: fe-connect.c:3525 +#: fe-connect.c:4252 #, c-format msgid "lookup on LDAP server failed: %s\n" msgstr "LDAP sonucunda sorgulama hatası: %s\n" -#: fe-connect.c:3536 +#: fe-connect.c:4263 msgid "more than one entry found on LDAP lookup\n" msgstr "LDAP sorgusu sonucunda birden fazla giriş bulundu\n" -#: fe-connect.c:3537 -#: fe-connect.c:3549 +#: fe-connect.c:4264 fe-connect.c:4276 msgid "no entry found on LDAP lookup\n" msgstr "LDAP sorgusu sonucunda hiçbir giriş bulunamadı\n" -#: fe-connect.c:3560 -#: fe-connect.c:3573 +#: fe-connect.c:4287 fe-connect.c:4300 msgid "attribute has no values on LDAP lookup\n" msgstr "LDAP sorgusu sonucunda bulunan attribute, hiçbir değer içermiyor\n" -#: fe-connect.c:3625 -#: fe-connect.c:3644 -#: fe-connect.c:4055 +#: fe-connect.c:4352 fe-connect.c:4371 fe-connect.c:4900 #, c-format msgid "missing \"=\" after \"%s\" in connection info string\n" msgstr "bağlantı bilgi katarında \"%s\" bilgisinden sonra \"=\" işareti eksik\n" -#: fe-connect.c:3708 -#: fe-connect.c:4137 -#: fe-connect.c:4321 +#: fe-connect.c:4444 fe-connect.c:5085 fe-connect.c:5859 #, c-format msgid "invalid connection option \"%s\"\n" msgstr "geçersiz bağlantı seçeneği \"%s\"\n" -#: fe-connect.c:3724 -#: fe-connect.c:4104 +#: fe-connect.c:4460 fe-connect.c:4949 msgid "unterminated quoted string in connection info string\n" msgstr "bağlantı bilgi katarında sonlandırılmamış tırnaklı katar\n" -#: fe-connect.c:3763 -msgid "could not get home directory to locate service definition file" -msgstr "servis dosyasının olduğu ev dizini bulunamadı" - -#: fe-connect.c:3796 +#: fe-connect.c:4543 #, c-format msgid "definition of service \"%s\" not found\n" msgstr "\"%s\" servisinin tanımı bulunamadı\n" -#: fe-connect.c:3819 +#: fe-connect.c:4566 #, c-format msgid "service file \"%s\" not found\n" msgstr "\"%s\" servis dosyası bulunamadı\n" -#: fe-connect.c:3832 +#: fe-connect.c:4579 #, c-format msgid "line %d too long in service file \"%s\"\n" -msgstr " \"%2$s\" servis dosyasında %1$d no'lu satır çok uzun \n" +msgstr "\"%2$s\" servis dosyasında %1$d no'lu satır çok uzun \n" -#: fe-connect.c:3903 -#: fe-connect.c:3930 +#: fe-connect.c:4650 fe-connect.c:4694 #, c-format msgid "syntax error in service file \"%s\", line %d\n" msgstr "\"%s\" servis dosyasında yazım hatası, satır no %d\n" -#: fe-connect.c:4597 +#: fe-connect.c:4661 +#, c-format +msgid "nested service specifications not supported in service file \"%s\", line %d\n" +msgstr "\"%s\" servis dosyası satır no %d , desteklenmeyen içiçe servis tanımlamaları\n" + +#: fe-connect.c:5381 +#, c-format +msgid "invalid URI propagated to internal parser routine: \"%s\"\n" +msgstr "dahili çözümleyici yordamına aktarılan geçersiz URI: \"%s\"\n" + +#: fe-connect.c:5458 +#, c-format +msgid "end of string reached when looking for matching \"]\" in IPv6 host address in URI: \"%s\"\n" +msgstr "URI içinde IPv6 sunucu adresinde eşleşen \"]\" aranırken dize sonuna ulaşıldı: \"%s\"\n" + +#: fe-connect.c:5465 +#, c-format +msgid "IPv6 host address may not be empty in URI: \"%s\"\n" +msgstr "URI içinde IPv6 sunuu adresi boş olamaz: \"%s\"\n" + +#: fe-connect.c:5480 +#, c-format +msgid "unexpected character \"%c\" at position %d in URI (expected \":\" or \"/\"): \"%s\"\n" +msgstr "URI içinde %2$d pozisyonunda beklenmeyen karakter \"%1$c\" (\":\" veya \"/\" bekleniyordu): \"%3$s\"\n" + +#: fe-connect.c:5609 +#, c-format +msgid "extra key/value separator \"=\" in URI query parameter: \"%s\"\n" +msgstr "URI sorgu parametresinde fazla anahtar/değer ayıracı \"=\": \"%s\"\n" + +#: fe-connect.c:5629 +#, c-format +msgid "missing key/value separator \"=\" in URI query parameter: \"%s\"\n" +msgstr "URI sorgu parametresinde eksik anahtar/değer ayıracı \"=\": \"%s\"\n" + +#: fe-connect.c:5680 +#, c-format +msgid "invalid URI query parameter: \"%s\"\n" +msgstr "geçersiz URI sorgu parametresi: \"%s\"\n" + +#: fe-connect.c:5754 +#, c-format +msgid "invalid percent-encoded token: \"%s\"\n" +msgstr "geçersiz percent-encoded andacı (token)\"%s\"\n" + +#: fe-connect.c:5764 +#, c-format +msgid "forbidden value %%00 in percent-encoded value: \"%s\"\n" +msgstr "percent-encoded değeri içinde yasak değer %%00: \"%s\"\n" + +#: fe-connect.c:6109 msgid "connection pointer is NULL\n" msgstr "bağlantı belirteci NULL'dur\n" -#: fe-connect.c:4874 +#: fe-connect.c:6407 #, c-format msgid "WARNING: password file \"%s\" is not a plain file\n" msgstr "UYARI: \"%s\" password dosyası düz metin dosyası değildir\n" -#: fe-connect.c:4883 +#: fe-connect.c:6416 #, c-format msgid "WARNING: password file \"%s\" has group or world access; permissions should be u=rw (0600) or less\n" msgstr "UYARI: \"%s\" şifre dosyası herkes ya da grup tarafından erişilebilir durumda; dosyanın izinleri u=rw (0600) ya da daha az olmalı\n" -#: fe-connect.c:4971 +#: fe-connect.c:6508 #, c-format msgid "password retrieved from file \"%s\"\n" msgstr "\"%s\" dosyasından parola okundu\n" -#: fe-exec.c:810 +#: fe-exec.c:437 fe-exec.c:2776 +#, c-format +msgid "row number %d is out of range 0..%d" +msgstr "%d satır numarası, 0..%d sınırının dışında" + +#: fe-exec.c:498 fe-protocol2.c:503 fe-protocol2.c:538 fe-protocol2.c:1049 +#: fe-protocol3.c:209 fe-protocol3.c:236 fe-protocol3.c:253 fe-protocol3.c:333 +#: fe-protocol3.c:728 fe-protocol3.c:951 +msgid "out of memory" +msgstr "yetersiz bellek" + +#: fe-exec.c:499 fe-protocol2.c:1395 fe-protocol3.c:1886 +#, c-format +msgid "%s" +msgstr "%s" + +#: fe-exec.c:847 msgid "NOTICE" msgstr "BİLGİ" -#: fe-exec.c:997 -#: fe-exec.c:1054 -#: fe-exec.c:1094 +#: fe-exec.c:905 +msgid "PGresult cannot support more than INT_MAX tuples" +msgstr "PGresult INT_MAX değerinden daha fazla satır (Tuple) destekleyemez" + +#: fe-exec.c:917 +msgid "size_t overflow" +msgstr "size_t taşması" + +#: fe-exec.c:1192 fe-exec.c:1250 fe-exec.c:1296 msgid "command string is a null pointer\n" msgstr "komut katarı null belirteçtir\n" -#: fe-exec.c:1087 -#: fe-exec.c:1182 +#: fe-exec.c:1256 fe-exec.c:1302 fe-exec.c:1397 +msgid "number of parameters must be between 0 and 65535\n" +msgstr "parametrelerin sayısı 0 ve 65535 arasında olmalı\n" + +#: fe-exec.c:1290 fe-exec.c:1391 msgid "statement name is a null pointer\n" msgstr "durum adı null belirteçtir\n" -#: fe-exec.c:1102 -#: fe-exec.c:1256 -#: fe-exec.c:1925 -#: fe-exec.c:2123 +#: fe-exec.c:1310 fe-exec.c:1473 fe-exec.c:2191 fe-exec.c:2390 msgid "function requires at least protocol version 3.0\n" msgstr "fonksiyon en az 3.0 prokolüne gereksinim duyuyor.\n" -#: fe-exec.c:1213 +#: fe-exec.c:1428 msgid "no connection to the server\n" msgstr "sunucuya bağlantı yok\n" -#: fe-exec.c:1220 +#: fe-exec.c:1435 msgid "another command is already in progress\n" msgstr "şu anda işlenen başka bir komut var\n" -#: fe-exec.c:1332 +#: fe-exec.c:1549 msgid "length must be given for binary parameter\n" msgstr "binary parametresinin uzunluğu belirtilmelidir\n" -#: fe-exec.c:1585 +#: fe-exec.c:1821 #, c-format msgid "unexpected asyncStatus: %d\n" msgstr "beklenmeyen asyncStatus: %d\n" -#: fe-exec.c:1605 +#: fe-exec.c:1841 #, c-format msgid "PGEventProc \"%s\" failed during PGEVT_RESULTCREATE event\n" msgstr "PGEventProc \"%s\" işlemi PGEVT_RESULTCREATE işlemi sırasında başarısız oldu\n" -#: fe-exec.c:1735 +#: fe-exec.c:2001 msgid "COPY terminated by new PQexec" msgstr "COPY, yeni PQexec tarafından sonlandırıldı" -#: fe-exec.c:1743 +#: fe-exec.c:2009 msgid "COPY IN state must be terminated first\n" msgstr "Öncelikle COPY IN durumu sonlandırılmalıdır\n" -#: fe-exec.c:1763 +#: fe-exec.c:2029 msgid "COPY OUT state must be terminated first\n" msgstr "Öncelikle COPY OUT durumu sonlandırılmalıdır\n" -#: fe-exec.c:1771 +#: fe-exec.c:2037 msgid "PQexec not allowed during COPY BOTH\n" msgstr "PQexec COPY BOTH sırasında izin verilmiyor\n" -#: fe-exec.c:2014 -#: fe-exec.c:2080 -#: fe-exec.c:2167 -#: fe-protocol2.c:1237 -#: fe-protocol3.c:1569 +#: fe-exec.c:2280 fe-exec.c:2347 fe-exec.c:2437 fe-protocol2.c:1352 +#: fe-protocol3.c:1817 msgid "no COPY in progress\n" msgstr "çalışan COPY süreci yok\n" -#: fe-exec.c:2359 +#: fe-exec.c:2627 msgid "connection in wrong state\n" msgstr "bağlantı yanlış durumda\n" -#: fe-exec.c:2390 +#: fe-exec.c:2658 msgid "invalid ExecStatusType code" msgstr "geçersiz ExecStatusType kodu" -#: fe-exec.c:2454 -#: fe-exec.c:2477 +#: fe-exec.c:2685 +msgid "PGresult is not an error result\n" +msgstr "PGresult bir hata sonucu değildir\n" + +#: fe-exec.c:2760 fe-exec.c:2783 #, c-format msgid "column number %d is out of range 0..%d" msgstr "%d kolon numarası, 0..%d sınırının dışında" -#: fe-exec.c:2470 -#, c-format -msgid "row number %d is out of range 0..%d" -msgstr "%d satır numarası, 0..%d sınırının dışında" - -#: fe-exec.c:2492 +#: fe-exec.c:2798 #, c-format msgid "parameter number %d is out of range 0..%d" msgstr "%d parametre sıra dışı: 0..%d" -#: fe-exec.c:2780 +#: fe-exec.c:3108 #, c-format msgid "could not interpret result from server: %s" msgstr "sunucudan gelen yanıt yorumlanamadı: %s" -#: fe-exec.c:3019 -#: fe-exec.c:3103 +#: fe-exec.c:3347 fe-exec.c:3431 msgid "incomplete multibyte character\n" msgstr "tamamlanmamış çoklu bayt karakteri\n" -#: fe-lobj.c:152 +#: fe-lobj.c:155 msgid "cannot determine OID of function lo_truncate\n" msgstr "lo_truncate fonksiyonunun OID'i belirlenemiyor\n" -#: fe-lobj.c:380 +#: fe-lobj.c:171 +msgid "argument of lo_truncate exceeds integer range\n" +msgstr "lo_truncate argümanı tamsayı aralığını aşıyor\n" + +#: fe-lobj.c:222 +msgid "cannot determine OID of function lo_truncate64\n" +msgstr "lo_truncate64 fonksiyonunun OID'i belirlenemiyor\n" + +#: fe-lobj.c:280 +msgid "argument of lo_read exceeds integer range\n" +msgstr "lo_read argümanı tamsayı aralığını aşıyor\n" + +#: fe-lobj.c:335 +msgid "argument of lo_write exceeds integer range\n" +msgstr "lo_write argümanı tamsayı aralığını aşıyor\n" + +#: fe-lobj.c:426 +msgid "cannot determine OID of function lo_lseek64\n" +msgstr "lo_lseek64 fonksiyonunun OID'i belirlenemiyor\n" + +#: fe-lobj.c:522 msgid "cannot determine OID of function lo_create\n" msgstr "lo_create fonksiyonunun OID'i belirlenemiyor\n" -#: fe-lobj.c:525 -#: fe-lobj.c:624 +#: fe-lobj.c:601 +msgid "cannot determine OID of function lo_tell64\n" +msgstr "lo_tell64 fonksiyonunun OID'i belirlenemiyor\n" + +#: fe-lobj.c:707 fe-lobj.c:816 #, c-format msgid "could not open file \"%s\": %s\n" msgstr "\"%s\" dosyası açılamadı: %s\n" -#: fe-lobj.c:575 +#: fe-lobj.c:762 #, c-format msgid "could not read from file \"%s\": %s\n" msgstr "\"%s\" dosyasından okuma hatası: %s\n" -#: fe-lobj.c:639 -#: fe-lobj.c:663 +#: fe-lobj.c:836 fe-lobj.c:860 #, c-format msgid "could not write to file \"%s\": %s\n" msgstr "\"%s\" dosyasına yazılamadı: %s\n" -#: fe-lobj.c:744 +#: fe-lobj.c:947 msgid "query to initialize large object functions did not return data\n" msgstr "large object fonksiyonlarını ilklendirecek sorgu veri döndürmedi\n" -#: fe-lobj.c:785 +#: fe-lobj.c:996 msgid "cannot determine OID of function lo_open\n" msgstr "lo_open fonksiyonunun OID'i belirlenemiyor\n" -#: fe-lobj.c:792 +#: fe-lobj.c:1003 msgid "cannot determine OID of function lo_close\n" msgstr "lo_close fonksiyonunun OID'i belirlenemiyor\n" -#: fe-lobj.c:799 +#: fe-lobj.c:1010 msgid "cannot determine OID of function lo_creat\n" msgstr "lo_create fonksiyonunun OID'i belirlenemiyor\n" -#: fe-lobj.c:806 +#: fe-lobj.c:1017 msgid "cannot determine OID of function lo_unlink\n" msgstr "lo_unlink fonksiyonunun OID'i belirlenemiyor\n" -#: fe-lobj.c:813 +#: fe-lobj.c:1024 msgid "cannot determine OID of function lo_lseek\n" msgstr "lo_lseek fonksiyonunun OID'i belirlenemiyor\n" -#: fe-lobj.c:820 +#: fe-lobj.c:1031 msgid "cannot determine OID of function lo_tell\n" msgstr "lo_tell fonksiyonunun OID'i belirlenemiyor\n" -#: fe-lobj.c:827 +#: fe-lobj.c:1038 msgid "cannot determine OID of function loread\n" msgstr "loread fonksiyonunun OID'i belirlenemiyor\n" -#: fe-lobj.c:834 +#: fe-lobj.c:1045 msgid "cannot determine OID of function lowrite\n" msgstr "lowrite fonksiyonunun OID'i belirlenemiyor\n" -#: fe-misc.c:270 +#: fe-misc.c:292 #, c-format msgid "integer of size %lu not supported by pqGetInt" -msgstr "%lu büyüklüğündeki tamsayılar pqGetInt tarafından desteklenmez." +msgstr "%lu büyüklüğündeki tamsayılar pqGetInt tarafından desteklenmez" -#: fe-misc.c:306 +#: fe-misc.c:328 #, c-format msgid "integer of size %lu not supported by pqPutInt" -msgstr "%lu büyüklüğündeki tamsayılar pqPutInt tarafından desteklenmez." +msgstr "%lu büyüklüğündeki tamsayılar pqPutInt tarafından desteklenmez" -#: fe-misc.c:585 -#: fe-misc.c:784 +#: fe-misc.c:639 fe-misc.c:840 msgid "connection not open\n" msgstr "bağlantı açık değil\n" -#: fe-misc.c:711 -#: fe-secure.c:364 -#: fe-secure.c:443 -#: fe-secure.c:524 -#: fe-secure.c:632 +#: fe-misc.c:809 fe-secure-openssl.c:229 fe-secure-openssl.c:338 +#: fe-secure.c:253 fe-secure.c:362 msgid "" "server closed the connection unexpectedly\n" "\tThis probably means the server terminated abnormally\n" @@ -612,15 +772,15 @@ msgstr "" "\tBu ileti sunucunun isteği işlemeden hemen önce ya da \n" "\tisteği işlerken kapatıldığı anlamına gelir.\n" -#: fe-misc.c:948 +#: fe-misc.c:1011 msgid "timeout expired\n" msgstr "zamanaşımı süresi sona derdi\n" -#: fe-misc.c:993 -msgid "socket not open\n" -msgstr "soket açık değil\n" +#: fe-misc.c:1056 +msgid "invalid socket\n" +msgstr "geçersiz sembol\n" -#: fe-misc.c:1016 +#: fe-misc.c:1079 #, c-format msgid "select() failed: %s\n" msgstr "select() başarısız oldu: %s\n" @@ -628,334 +788,399 @@ msgstr "select() başarısız oldu: %s\n" #: fe-protocol2.c:91 #, c-format msgid "invalid setenv state %c, probably indicative of memory corruption\n" -msgstr "Geçersiz setenv durumu %c, belleğin zarar görmesinin bir işareti olabilir\n" +msgstr "geçersiz setenv durumu %c, belleğin zarar görmesinin bir işareti olabilir\n" #: fe-protocol2.c:390 #, c-format msgid "invalid state %c, probably indicative of memory corruption\n" -msgstr "Geçersiz %c durumu, belleğin zarar görmesinin bir işareti olabilir\n" +msgstr "geçersiz %c durumu, belleğin zarar görmesinin bir işareti olabilir\n" -#: fe-protocol2.c:479 -#: fe-protocol3.c:186 +#: fe-protocol2.c:479 fe-protocol3.c:186 #, c-format msgid "message type 0x%02x arrived from server while idle" -msgstr "Sunucu boş durumdayken sunucudan 0x%02x ileti tipi geldi" +msgstr "sunucu boş durumdayken sunucudan 0x%02x ileti tipi geldi" -#: fe-protocol2.c:522 +#: fe-protocol2.c:529 #, c-format msgid "unexpected character %c following empty query response (\"I\" message)" -msgstr "Boş sorgu yanıtını takip eden geçersiz karakter:%c (\"I\" ileti)" +msgstr "boş sorgu yanıtını takip eden geçersiz karakter:%c (\"I\" ileti)" -#: fe-protocol2.c:576 +#: fe-protocol2.c:595 #, c-format msgid "server sent data (\"D\" message) without prior row description (\"T\" message)" -msgstr "Sunucu öncelikli satır tanımı olmadan veri (\"D\" ileti) gönderdi (\"T\" ileti)" +msgstr "sunucu öncelikli satır tanımı olmadan veri (\"D\" ileti) gönderdi (\"T\" ileti)" -#: fe-protocol2.c:592 +#: fe-protocol2.c:613 #, c-format msgid "server sent binary data (\"B\" message) without prior row description (\"T\" message)" -msgstr "Sunucu öncelikli satır tanımı olmadan ikili veri (\"D\" ileti) gönderdi (\"T\" ileti)" +msgstr "sunucu öncelikli satır tanımı olmadan ikili veri (\"B\" ileti) gönderdi (\"T\" ileti)" -#: fe-protocol2.c:612 -#: fe-protocol3.c:388 +#: fe-protocol2.c:633 fe-protocol3.c:412 #, c-format msgid "unexpected response from server; first received character was \"%c\"\n" msgstr "sunucudan beklenmeyen bir yanıt alındı; alınan ilk karakter\"%c\" idi\n" -#: fe-protocol2.c:833 -#: fe-protocol3.c:707 -msgid "out of memory for query result\n" -msgstr "Sorgu sonucu için yetersiz bellek\n" - -#: fe-protocol2.c:1280 -#: fe-protocol3.c:1637 -#, c-format -msgid "%s" -msgstr "%s" +#: fe-protocol2.c:762 fe-protocol2.c:937 fe-protocol3.c:627 fe-protocol3.c:854 +msgid "out of memory for query result" +msgstr "sorgu sonucu için yetersiz bellek" -#: fe-protocol2.c:1292 +#: fe-protocol2.c:1407 #, c-format msgid "lost synchronization with server, resetting connection" msgstr "sunucu ile eşzamanlama kayboldu, bağlantı yeniden açılıyor" -#: fe-protocol2.c:1426 -#: fe-protocol2.c:1458 -#: fe-protocol3.c:1840 +#: fe-protocol2.c:1541 fe-protocol2.c:1573 fe-protocol3.c:2089 #, c-format msgid "protocol error: id=0x%x\n" msgstr "protokol hatası: id=0x%x\n" -#: fe-protocol3.c:344 +#: fe-protocol3.c:368 msgid "server sent data (\"D\" message) without prior row description (\"T\" message)\n" -msgstr "Sunucu öncelikli satır tanımı olmadan veri (\"D\" ileti) gönderdi (\"T\" ileti)\n" +msgstr "sunucu öncelikli satır tanımı olmadan veri (\"D\" ileti) gönderdi (\"T\" ileti)\n" -#: fe-protocol3.c:409 +#: fe-protocol3.c:433 #, c-format msgid "message contents do not agree with length in message type \"%c\"\n" -msgstr "İleti içeriği,\"%c\" ileti tipinin içindeki uzunlukla aynı değil\n" +msgstr "ileti içeriği,\"%c\" ileti tipinin içindeki uzunlukla aynı değil\n" -#: fe-protocol3.c:430 +#: fe-protocol3.c:454 #, c-format msgid "lost synchronization with server: got message type \"%c\", length %d\n" msgstr "sunucu ile eşzamanlılık kayboldu: \"%c\" ileti tipi alındı, uzunluğu %d\n" -#: fe-protocol3.c:652 -msgid "unexpected field count in \"D\" message\n" -msgstr "\"D\" iletisinde beklenmeyen alan sayısı\n" +#: fe-protocol3.c:505 fe-protocol3.c:545 +msgid "insufficient data in \"T\" message" +msgstr "\"T\" mesajında yetersiz veri" + +#: fe-protocol3.c:578 +msgid "extraneous data in \"T\" message" +msgstr "\"T\" mesajında ilgisiz veri" + +#: fe-protocol3.c:691 +msgid "extraneous data in \"t\" message" +msgstr "\"t\" mesajında ilgisiz veri" + +#: fe-protocol3.c:762 fe-protocol3.c:794 fe-protocol3.c:812 +msgid "insufficient data in \"D\" message" +msgstr "\"D\" mesajında yetersiz veri" + +#: fe-protocol3.c:768 +msgid "unexpected field count in \"D\" message" +msgstr "\"D\" mesajında beklenmeyen alan sayısı" + +#: fe-protocol3.c:821 +msgid "extraneous data in \"D\" message" +msgstr "\"D\" mesajında ilgisiz veri" + +#: fe-protocol3.c:1005 +msgid "no error message available\n" +msgstr "hata mesajı bulunmuyor\n" #. translator: %s represents a digit string -#: fe-protocol3.c:798 -#: fe-protocol3.c:817 +#: fe-protocol3.c:1035 fe-protocol3.c:1054 #, c-format msgid " at character %s" -msgstr "%s. karakterde" +msgstr " %s. karakterde" -#: fe-protocol3.c:830 +#: fe-protocol3.c:1067 #, c-format msgid "DETAIL: %s\n" msgstr "AYRINTI: %s\n" -#: fe-protocol3.c:833 +#: fe-protocol3.c:1070 #, c-format msgid "HINT: %s\n" msgstr "İPUCU: %s\n" -#: fe-protocol3.c:836 +#: fe-protocol3.c:1073 #, c-format msgid "QUERY: %s\n" msgstr "SORGU: %s\n" -#: fe-protocol3.c:839 +#: fe-protocol3.c:1080 #, c-format msgid "CONTEXT: %s\n" msgstr "BAĞLAM: %s\n" -#: fe-protocol3.c:851 +#: fe-protocol3.c:1089 +#, c-format +msgid "SCHEMA NAME: %s\n" +msgstr "ŞEMA ADI: %s\n" + +#: fe-protocol3.c:1093 +#, c-format +msgid "TABLE NAME: %s\n" +msgstr "TABLO ADI: %s\n" + +#: fe-protocol3.c:1097 +#, c-format +msgid "COLUMN NAME: %s\n" +msgstr "SÜTUN ADI: %s\n" + +#: fe-protocol3.c:1101 +#, c-format +msgid "DATATYPE NAME: %s\n" +msgstr "VERİ TİPİ ADI: %s\n" + +#: fe-protocol3.c:1105 +#, c-format +msgid "CONSTRAINT NAME: %s\n" +msgstr "KISITLAMA ADI: %s\n" + +#: fe-protocol3.c:1117 msgid "LOCATION: " msgstr "YER: " -#: fe-protocol3.c:853 +#: fe-protocol3.c:1119 #, c-format msgid "%s, " msgstr "%s, " -#: fe-protocol3.c:855 +#: fe-protocol3.c:1121 #, c-format msgid "%s:%s" msgstr "%s:%s" -#: fe-protocol3.c:1079 +#: fe-protocol3.c:1316 #, c-format msgid "LINE %d: " msgstr "SATIR %d: " -#: fe-protocol3.c:1465 +#: fe-protocol3.c:1711 msgid "PQgetline: not doing text COPY OUT\n" msgstr "PQgetline: COPY OUT metnini yapmıyor\n" -#: fe-secure.c:265 -#, c-format -msgid "could not establish SSL connection: %s\n" -msgstr "SSL bağlantısı sağlanamadı: %s\n" - -#: fe-secure.c:369 -#: fe-secure.c:529 -#: fe-secure.c:1331 +#: fe-secure-openssl.c:234 fe-secure-openssl.c:343 fe-secure-openssl.c:1323 #, c-format msgid "SSL SYSCALL error: %s\n" msgstr "SSL SYSCALL hatası: %s\n" -#: fe-secure.c:376 -#: fe-secure.c:536 -#: fe-secure.c:1335 +#: fe-secure-openssl.c:241 fe-secure-openssl.c:350 fe-secure-openssl.c:1327 msgid "SSL SYSCALL error: EOF detected\n" msgstr "SSL SYSCALL hatası: EOF bulundu\n" -#: fe-secure.c:387 -#: fe-secure.c:547 -#: fe-secure.c:1344 +#: fe-secure-openssl.c:252 fe-secure-openssl.c:361 fe-secure-openssl.c:1336 #, c-format msgid "SSL error: %s\n" msgstr "SSL hatası: %s\n" -#: fe-secure.c:401 -#: fe-secure.c:561 +#: fe-secure-openssl.c:267 fe-secure-openssl.c:376 msgid "SSL connection has been closed unexpectedly\n" msgstr "SSL bağlantısı beklenmeyen şekilde sonlandırıldı\n" -#: fe-secure.c:407 -#: fe-secure.c:567 -#: fe-secure.c:1353 +#: fe-secure-openssl.c:273 fe-secure-openssl.c:382 fe-secure-openssl.c:1345 #, c-format msgid "unrecognized SSL error code: %d\n" msgstr "tanımlanamayan SSL hata kodu: %d\n" -#: fe-secure.c:451 -#, c-format -msgid "could not receive data from server: %s\n" -msgstr "Sunucudan veri alınamadı: %s\n" +#: fe-secure-openssl.c:494 +msgid "SSL certificate's name entry is missing\n" +msgstr "" +"SSL sertifikasının isim girişi eksik\n" +"\n" -#: fe-secure.c:639 -#, c-format -msgid "could not send data to server: %s\n" -msgstr "Sunucuya veri gönderilemedi: %s\n" +#: fe-secure-openssl.c:528 +msgid "SSL certificate's name contains embedded null\n" +msgstr "SSL sertifikasının ismi gömülü olarak null içeriyor\n" -#: fe-secure.c:746 +#: fe-secure-openssl.c:580 msgid "host name must be specified for a verified SSL connection\n" -msgstr "onaylı SSL bağlantısı için sunucu adı belirtilmelidir\n" +msgstr "onaylı SSL bağlantısı için makina adı belirtilmelidir\n" -#: fe-secure.c:765 +#: fe-secure-openssl.c:680 #, c-format -msgid "server common name \"%s\" does not match host name \"%s\"\n" -msgstr "Sunucu ortak adı olan \"%s\", \"%s\" olan host adı ile eşleşmiyor\n" +msgid "server certificate for \"%s\" does not match host name \"%s\"\n" +msgstr "\"%s\" için olan sunucu sertifikası \"%s\" olan makina adı ile eşleşmiyor\n" -#: fe-secure.c:897 +#: fe-secure-openssl.c:686 +msgid "could not get server's host name from server certificate\n" +msgstr "sunucunun makina adısunucu sertifikasından alınamadı\n" + +#: fe-secure-openssl.c:928 #, c-format msgid "could not create SSL context: %s\n" -msgstr "SSL içeriği yaratılamadı: %s\n" +msgstr "SSL bağlamı oluşturulamadı: %s\n" + +#: fe-secure-openssl.c:965 +#, c-format +msgid "could not read root certificate file \"%s\": %s\n" +msgstr "\"%s\"kök sertifika dosyası okunamadı: %s\n" + +#: fe-secure-openssl.c:993 +#, c-format +msgid "SSL library does not support CRL certificates (file \"%s\")\n" +msgstr "kurulu SSL kütüphanesi CRL sertifikalarını desteklemiyor (dosya adı \"%s\")\n" + +#: fe-secure-openssl.c:1021 +msgid "" +"could not get home directory to locate root certificate file\n" +"Either provide the file or change sslmode to disable server certificate verification.\n" +msgstr "" +"kök sertifika dosyasının ev dizini bulunamadı\n" +"Ya bir dosya adı belirtin, ya da sunucu sertifika onaylamasını kapatmak için sslmode'u kapatın.\n" + +#: fe-secure-openssl.c:1025 +#, c-format +msgid "" +"root certificate file \"%s\" does not exist\n" +"Either provide the file or change sslmode to disable server certificate verification.\n" +msgstr "" +"\"%s\" kök sertifika dosyası mevcut değil\n" +"Ya bu dosyayı oluşturun ya da sslmode ayarını sunucu sertifika onaylamasını kapatmak için değiştirin.\n" -#: fe-secure.c:1019 +#: fe-secure-openssl.c:1056 #, c-format msgid "could not open certificate file \"%s\": %s\n" msgstr "\"%s\" sertifikası açılamadı: %s\n" -#: fe-secure.c:1044 -#: fe-secure.c:1054 +#: fe-secure-openssl.c:1075 #, c-format msgid "could not read certificate file \"%s\": %s\n" msgstr "\"%s\" sertifikası okunamadı: %s\n" -#: fe-secure.c:1091 +#: fe-secure-openssl.c:1100 +#, c-format +msgid "could not establish SSL connection: %s\n" +msgstr "SSL bağlantısı sağlanamadı: %s\n" + +#: fe-secure-openssl.c:1154 #, c-format msgid "could not load SSL engine \"%s\": %s\n" msgstr "\"%s\" SSL motoru yüklenemedi: %s\n" -#: fe-secure.c:1103 +#: fe-secure-openssl.c:1166 #, c-format msgid "could not initialize SSL engine \"%s\": %s\n" msgstr "\"%s\" SSL motoru ilklendirilemedi: %s\n" -#: fe-secure.c:1119 +#: fe-secure-openssl.c:1182 #, c-format msgid "could not read private SSL key \"%s\" from engine \"%s\": %s\n" msgstr "\"%2$s\" motorundan \"%1$s\" SSL özel anahtarı okunamadı: %3$s\n" -#: fe-secure.c:1133 +#: fe-secure-openssl.c:1196 #, c-format msgid "could not load private SSL key \"%s\" from engine \"%s\": %s\n" msgstr "\"%2$s\" motorundan \"%1$s\" SSL özel anahtarı yüklenemedi: %3$s\n" -#: fe-secure.c:1170 +#: fe-secure-openssl.c:1233 #, c-format msgid "certificate present, but not private key file \"%s\"\n" -msgstr "Sertifika mevcut ancak özel anahtar mevcut değil \"%s\"\n" +msgstr "sertifika mevcut ancak özel anahtar mevcut değil \"%s\"\n" -#: fe-secure.c:1178 +#: fe-secure-openssl.c:1241 #, c-format msgid "private key file \"%s\" has group or world access; permissions should be u=rw (0600) or less\n" msgstr "\"%s\" özel anahtar dosyası herkes ya da grup tarafından erişilebilir durumda; dosyanın izinleri u=rw (0600) ya da daha az olmalı\n" -#: fe-secure.c:1189 +#: fe-secure-openssl.c:1252 #, c-format msgid "could not load private key file \"%s\": %s\n" msgstr "private key dosyası \"%s\" okunamıyor: %s\n" -#: fe-secure.c:1203 +#: fe-secure-openssl.c:1266 #, c-format msgid "certificate does not match private key file \"%s\": %s\n" -msgstr "Sertifika, \"%s\" özel anahtar dosyası ile uyuşmuyor: %s\n" +msgstr "sertifika, \"%s\" özel anahtar dosyası ile uyuşmuyor: %s\n" -#: fe-secure.c:1231 -#, c-format -msgid "could not read root certificate file \"%s\": %s\n" -msgstr "\"%s\"kök sertifika dosyası okunamadı: %s\n" - -#: fe-secure.c:1258 -#, c-format -msgid "SSL library does not support CRL certificates (file \"%s\")\n" -msgstr "Kurulu SSL kütüphanesi CRL sertifikalarını desteklemiyor (dosya adı \"%s\")\n" - -#: fe-secure.c:1285 -msgid "" -"could not get home directory to locate root certificate file\n" -"Either provide the file or change sslmode to disable server certificate verification.\n" -msgstr "" -"kök sertifika dosyasının ev dizini bulunamadı\n" -"Ya bir dosya adı belirtin, ya da sunucu sertifika onaylamasını kapatmak için sslmode'u kapatın.\n" - -#: fe-secure.c:1289 -#, c-format -msgid "" -"root certificate file \"%s\" does not exist\n" -"Either provide the file or change sslmode to disable server certificate verification.\n" -msgstr "" -"\"%s\" kök sertifika dosyası mevcut değil\n" -"Ya bu dosyayı oluşturun ya da sslmode ayarını sunucu sertifika onaylamasını kapatmak için değiştirin.\n" - -#: fe-secure.c:1372 +#: fe-secure-openssl.c:1366 #, c-format msgid "certificate could not be obtained: %s\n" msgstr "sertifika elde edilemedi: %s\n" -#: fe-secure.c:1400 -msgid "SSL certificate's common name contains embedded null\n" -msgstr "SSL sertifikasının ortak adı (common name) gömülü olarak null içeriyor\n" - -#: fe-secure.c:1476 +#: fe-secure-openssl.c:1458 #, c-format msgid "no SSL error reported" msgstr "SSL hatası raporlanmadı" -#: fe-secure.c:1485 +#: fe-secure-openssl.c:1467 #, c-format msgid "SSL error code %lu" msgstr "SSL hata kodu: %lu" -#~ msgid "could not get home directory to locate client certificate files\n" -#~ msgstr "İstemci sertifika dosyalarının olduğu ev dizini bulunamadı\n" +#: fe-secure.c:261 +#, c-format +msgid "could not receive data from server: %s\n" +msgstr "sunucudan veri alınamadı: %s\n" -#~ msgid "" -#~ "verified SSL connections are only supported when connecting to a host name" -#~ msgstr "" -#~ "Onaylanmış SSL bağlantıları sadece bir sunucu adına bağlanıldığı zaman " -#~ "geçerlidir" +#: fe-secure.c:369 +#, c-format +msgid "could not send data to server: %s\n" +msgstr "sunucuya veri gönderilemedi: %s\n" -#~ msgid "could not open private key file \"%s\": %s\n" -#~ msgstr "\"%s\" özel anahtar dosyası açılamadı: %s\n" +#: win32.c:317 +#, c-format +msgid "unrecognized socket error: 0x%08X/%d" +msgstr "bilinmeyen soket hatası: 0x%08X/%d" -#~ msgid "private key file \"%s\" changed during execution\n" -#~ msgstr "\"%s\" özel anahtar dosyası çalışma anında açılamadı\n" +#~ msgid "certificate could not be validated: %s\n" +#~ msgstr "sertifika doğrulanamadı: %s\n" -#~ msgid "could not read private key file \"%s\": %s\n" -#~ msgstr "\"%s\" özel anahtar dosyası okunamadı: %s\n" +#~ msgid "private key file \"%s\" has wrong permissions\n" +#~ msgstr "\"%s\" özel anahtarı yanlış izinlere sahip\n" -#~ msgid "invalid sslverify value: \"%s\"\n" -#~ msgstr "geçersiz sslverify değeri: \"%s\"\n" +#~ msgid "invalid value of PGSSLKEY environment variable\n" +#~ msgstr "PGSSLKEY ortam değişkeni için geçersiz değer\n" -#~ msgid "root certificate file \"%s\" does not exist" -#~ msgstr "kök sertifika dosyası \"%s\" mevcut değildir" +#~ msgid "could not get user information\n" +#~ msgstr "kullanıcı bilgisi alınamadı\n" -#~ msgid "error querying socket: %s\n" -#~ msgstr "soketi sorgularken hata oluştu: %s\n" +#~ msgid "server common name \"%s\" does not resolve to %ld.%ld.%ld.%ld\n" +#~ msgstr "Sunucu ortak adı olan \"%s\" %ld.%ld.%ld.%ld adresine çözülemiyor\n" + +#~ msgid "unsupported protocol\n" +#~ msgstr "desteklenmeyen protokol\n" #~ msgid "could not get information about host \"%s\": %s\n" #~ msgstr "\"%s\" sunucusu hakkında bilgi alınamadı: %s\n" -#~ msgid "unsupported protocol\n" -#~ msgstr "desteklenmeyen protokol\n" +#~ msgid "error querying socket: %s\n" +#~ msgstr "soketi sorgularken hata oluştu: %s\n" -#~ msgid "server common name \"%s\" does not resolve to %ld.%ld.%ld.%ld\n" -#~ msgstr "" -#~ "Sunucu ortak adı olan \"%s\" %ld.%ld.%ld.%ld adresine çözülemiyor\n" +#~ msgid "root certificate file \"%s\" does not exist" +#~ msgstr "kök sertifika dosyası \"%s\" mevcut değildir" -#~ msgid "could not get user information\n" -#~ msgstr "kullanıcı bilgisi alınamadı\n" +#~ msgid "invalid sslverify value: \"%s\"\n" +#~ msgstr "geçersiz sslverify değeri: \"%s\"\n" -#~ msgid "invalid value of PGSSLKEY environment variable\n" -#~ msgstr "PGSSLKEY ortam değişkeni için geçersiz değer\n" +#~ msgid "could not read private key file \"%s\": %s\n" +#~ msgstr "\"%s\" özel anahtar dosyası okunamadı: %s\n" -#~ msgid "private key file \"%s\" has wrong permissions\n" -#~ msgstr "\"%s\" özel anahtarı yanlış izinlere sahip\n" +#~ msgid "private key file \"%s\" changed during execution\n" +#~ msgstr "\"%s\" özel anahtar dosyası çalışma anında açılamadı\n" -#~ msgid "certificate could not be validated: %s\n" -#~ msgstr "sertifika doğrulanamadı: %s\n" +#~ msgid "could not open private key file \"%s\": %s\n" +#~ msgstr "\"%s\" özel anahtar dosyası açılamadı: %s\n" + +#~ msgid "verified SSL connections are only supported when connecting to a host name" +#~ msgstr "Onaylanmış SSL bağlantıları sadece bir sunucu adına bağlanıldığı zaman geçerlidir" + +#~ msgid "could not get home directory to locate client certificate files\n" +#~ msgstr "İstemci sertifika dosyalarının olduğu ev dizini bulunamadı\n" + +#~ msgid "socket not open\n" +#~ msgstr "soket açık değil\n" + +#~ msgid "could not get home directory to locate service definition file" +#~ msgstr "servis dosyasının olduğu ev dizini bulunamadı" + +#~ msgid "setsockopt(SO_KEEPALIVE) failed: %s\n" +#~ msgstr "setsockopt(SO_KEEPALIVE) başarısız oldu: %s\n" + +#~ msgid "setsockopt(TCP_KEEPINTVL) failed: %s\n" +#~ msgstr "setsockopt(TCP_KEEPINTVL) başarısız oldu: %s\n" + +#~ msgid "setsockopt(TCP_KEEPALIVE) failed: %s\n" +#~ msgstr "setsockopt(SO_REUSEADDR) başarısız oldu: %s\n" + +#~ msgid "setsockopt(TCP_KEEPIDLE) failed: %s\n" +#~ msgstr "setsockopt(TCP_KEEPIDLE) başarısız oldu: %s\n" + +#~ msgid "could not restore non-blocking mode on socket: %s\n" +#~ msgstr "could not restore non-blocking mode on socket: %s\n" + +#~ msgid "Kerberos 5 authentication rejected: %*s\n" +#~ msgstr "Kerberos 5 yetkilendirmesi kabul edilmedi: %*s\n" + +#~ msgid "could not set socket to blocking mode: %s\n" +#~ msgstr "soket engelleme moduna ayarlanamadı: %s\n" diff --git a/src/interfaces/libpq/pqexpbuffer.c b/src/interfaces/libpq/pqexpbuffer.c index f4aa7c9cef..43c36c3bff 100644 --- a/src/interfaces/libpq/pqexpbuffer.c +++ b/src/interfaces/libpq/pqexpbuffer.c @@ -15,7 +15,7 @@ * a usable vsnprintf(), then a copy of our own implementation of it will * be linked into libpq. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/interfaces/libpq/pqexpbuffer.c @@ -233,6 +233,7 @@ enlargePQExpBuffer(PQExpBuffer str, size_t needed) void printfPQExpBuffer(PQExpBuffer str, const char *fmt,...) { + int save_errno = errno; va_list args; bool done; @@ -244,6 +245,7 @@ printfPQExpBuffer(PQExpBuffer str, const char *fmt,...) /* Loop in case we have to retry after enlarging the buffer. */ do { + errno = save_errno; va_start(args, fmt); done = appendPQExpBufferVA(str, fmt, args); va_end(args); @@ -261,6 +263,7 @@ printfPQExpBuffer(PQExpBuffer str, const char *fmt,...) void appendPQExpBuffer(PQExpBuffer str, const char *fmt,...) { + int save_errno = errno; va_list args; bool done; @@ -270,6 +273,7 @@ appendPQExpBuffer(PQExpBuffer str, const char *fmt,...) /* Loop in case we have to retry after enlarging the buffer. */ do { + errno = save_errno; va_start(args, fmt); done = appendPQExpBufferVA(str, fmt, args); va_end(args); @@ -281,6 +285,9 @@ appendPQExpBuffer(PQExpBuffer str, const char *fmt,...) * Shared guts of printfPQExpBuffer/appendPQExpBuffer. * Attempt to format data and append it to str. Returns true if done * (either successful or hard failure), false if need to retry. + * + * Caution: callers must be sure to preserve their entry-time errno + * when looping, in case the fmt contains "%m". */ static bool appendPQExpBufferVA(PQExpBuffer str, const char *fmt, va_list args) @@ -295,76 +302,50 @@ appendPQExpBufferVA(PQExpBuffer str, const char *fmt, va_list args) */ if (str->maxlen > str->len + 16) { - /* - * Note: we intentionally leave one byte unused, as a guard against - * old broken versions of vsnprintf. - */ - avail = str->maxlen - str->len - 1; - - errno = 0; + avail = str->maxlen - str->len; nprinted = vsnprintf(str->data + str->len, avail, fmt, args); /* - * If vsnprintf reports an error other than ENOMEM, fail. + * If vsnprintf reports an error, fail (we assume this means there's + * something wrong with the format string). */ - if (nprinted < 0 && errno != 0 && errno != ENOMEM) + if (unlikely(nprinted < 0)) { markPQExpBufferBroken(str); return true; } - /* - * Note: some versions of vsnprintf return the number of chars - * actually stored, not the total space needed as C99 specifies. And - * at least one returns -1 on failure. Be conservative about - * believing whether the print worked. - */ - if (nprinted >= 0 && (size_t) nprinted < avail - 1) + if ((size_t) nprinted < avail) { /* Success. Note nprinted does not include trailing null. */ str->len += nprinted; return true; } - if (nprinted >= 0 && (size_t) nprinted > avail) - { - /* - * This appears to be a C99-compliant vsnprintf, so believe its - * estimate of the required space. (If it's wrong, the logic will - * still work, but we may loop multiple times.) Note that the - * space needed should be only nprinted+1 bytes, but we'd better - * allocate one more than that so that the test above will succeed - * next time. - * - * In the corner case where the required space just barely - * overflows, fail. - */ - if (nprinted > INT_MAX - 2) - { - markPQExpBufferBroken(str); - return true; - } - needed = nprinted + 2; - } - else + /* + * We assume a C99-compliant vsnprintf, so believe its estimate of the + * required space, and add one for the trailing null. (If it's wrong, + * the logic will still work, but we may loop multiple times.) + * + * Choke if the required space would exceed INT_MAX, since str->maxlen + * can't represent more than that. + */ + if (unlikely(nprinted > INT_MAX - 1)) { - /* - * Buffer overrun, and we don't know how much space is needed. - * Estimate twice the previous buffer size, but not more than - * INT_MAX. - */ - if (avail >= INT_MAX / 2) - needed = INT_MAX; - else - needed = avail * 2; + markPQExpBufferBroken(str); + return true; } + needed = nprinted + 1; } else { /* * We have to guess at how much to enlarge, since we're skipping the - * formatting work. + * formatting work. Fortunately, because of enlargePQExpBuffer's + * preference for power-of-2 sizes, this number isn't very sensitive; + * the net effect is that we'll double the buffer size before trying + * to run vsnprintf, which seems sensible. */ needed = 32; } diff --git a/src/interfaces/libpq/pqexpbuffer.h b/src/interfaces/libpq/pqexpbuffer.h index 19633f9b79..771602af09 100644 --- a/src/interfaces/libpq/pqexpbuffer.h +++ b/src/interfaces/libpq/pqexpbuffer.h @@ -15,7 +15,7 @@ * a usable vsnprintf(), then a copy of our own implementation of it will * be linked into libpq. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/interfaces/libpq/pqexpbuffer.h diff --git a/src/interfaces/libpq/pthread-win32.c b/src/interfaces/libpq/pthread-win32.c index 0e0d3eeb88..f6d675d5c4 100644 --- a/src/interfaces/libpq/pthread-win32.c +++ b/src/interfaces/libpq/pthread-win32.c @@ -3,7 +3,7 @@ * pthread-win32.c * partial pthread implementation for win32 * -* Copyright (c) 2004-2017, PostgreSQL Global Development Group +* Copyright (c) 2004-2018, PostgreSQL Global Development Group * IDENTIFICATION * src/interfaces/libpq/pthread-win32.c * diff --git a/src/interfaces/libpq/test/Makefile b/src/interfaces/libpq/test/Makefile index 01041fb15f..4832fab9d2 100644 --- a/src/interfaces/libpq/test/Makefile +++ b/src/interfaces/libpq/test/Makefile @@ -3,11 +3,11 @@ top_builddir = ../../../.. include $(top_builddir)/src/Makefile.global ifeq ($(PORTNAME), win32) -LDLIBS += -lws2_32 +LDFLAGS += -lws2_32 endif override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS) -override LDLIBS := $(libpq_pgport) $(LDLIBS) +LDFLAGS_INTERNAL += $(libpq_pgport) PROGS = uri-regress diff --git a/src/interfaces/libpq/test/uri-regress.c b/src/interfaces/libpq/test/uri-regress.c index fac849bab2..4590f37008 100644 --- a/src/interfaces/libpq/test/uri-regress.c +++ b/src/interfaces/libpq/test/uri-regress.c @@ -7,7 +7,7 @@ * prints out the values from the parsed PQconninfoOption struct that differ * from the defaults (obtained from PQconndefaults). * - * Portions Copyright (c) 2012-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/interfaces/libpq/test/uri-regress.c diff --git a/src/interfaces/libpq/win32.c b/src/interfaces/libpq/win32.c index 11abb0be04..79768e4e0b 100644 --- a/src/interfaces/libpq/win32.c +++ b/src/interfaces/libpq/win32.c @@ -15,7 +15,7 @@ * The error constants are taken from the Frambak Bakfram LGSOCKET * library guys who in turn took them from the Winsock FAQ. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * */ diff --git a/src/makefiles/pgxs.mk b/src/makefiles/pgxs.mk index c27004ecfb..070d151018 100644 --- a/src/makefiles/pgxs.mk +++ b/src/makefiles/pgxs.mk @@ -38,6 +38,12 @@ # SCRIPTS -- script files (not binaries) to install into $PREFIX/bin # SCRIPTS_built -- script files (not binaries) to install into $PREFIX/bin, # which need to be built first +# HEADERS -- files to install into $(includedir_server)/$MODULEDIR/$MODULE_big +# HEADERS_built -- as above but built first (but NOT cleaned) +# HEADERS_$(MODULE) -- files to install into +# $(includedir_server)/$MODULEDIR/$MODULE; the value of $MODULE must be +# listed in MODULES or MODULE_big +# HEADERS_built_$(MODULE) -- as above but built first (also NOT cleaned) # REGRESS -- list of regression test cases (without suffix) # REGRESS_OPTS -- additional switches to pass to pg_regress # NO_INSTALLCHECK -- don't define an installcheck target, useful e.g. if @@ -45,7 +51,9 @@ # EXTRA_CLEAN -- extra files to remove in 'make clean' # PG_CPPFLAGS -- will be added to CPPFLAGS # PG_LIBS -- will be added to PROGRAM link line +# PG_LIBS_INTERNAL -- same, for references to libraries within build tree # SHLIB_LINK -- will be added to MODULE_big link line +# SHLIB_LINK_INTERNAL -- same, for references to libraries within build tree # PG_CONFIG -- path to pg_config program for the PostgreSQL installation # to build against (typically just "pg_config" to use the first one in # your PATH) @@ -60,6 +68,12 @@ endif ifdef PGXS + +# External extensions must assume generated headers are available +NO_GENERATED_HEADERS=yes +# The temp-install rule won't work, either +NO_TEMP_INSTALL=yes + # We assume that we are in src/makefiles/, so top is ... top_builddir := $(dir $(PGXS))../.. include $(top_builddir)/src/Makefile.global @@ -73,7 +87,8 @@ endif ifeq ($(FLEX),) FLEX = flex endif -endif + +endif # PGXS override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) @@ -85,13 +100,16 @@ endif ifdef MODULEDIR datamoduledir := $(MODULEDIR) docmoduledir := $(MODULEDIR) +incmoduledir := $(MODULEDIR) else ifdef EXTENSION datamoduledir := extension docmoduledir := extension +incmoduledir := extension else datamoduledir := contrib docmoduledir := contrib +incmoduledir := contrib endif endif @@ -99,7 +117,90 @@ ifdef PG_CPPFLAGS override CPPFLAGS := $(PG_CPPFLAGS) $(CPPFLAGS) endif -all: $(PROGRAM) $(DATA_built) $(SCRIPTS_built) $(addsuffix $(DLSUFFIX), $(MODULES)) $(addsuffix .control, $(EXTENSION)) +# logic for HEADERS_* stuff + +# get list of all names used with or without built_ prefix +# note that use of HEADERS_built_foo will get both "foo" and "built_foo", +# we cope with that later when filtering this list against MODULES. +# If someone wants to name a module "built_foo", they can do that and it +# works, but if they have MODULES = foo built_foo then they will need to +# force building of all headers and use HEADERS_built_foo and +# HEADERS_built_built_foo. +HEADER_alldirs := $(patsubst HEADERS_%,%,$(filter HEADERS_%, $(.VARIABLES))) +HEADER_alldirs += $(patsubst HEADERS_built_%,%,$(filter HEADERS_built_%, $(.VARIABLES))) + +# collect all names of built headers to use as a dependency +HEADER_allbuilt := + +ifdef MODULE_big + +# we can unconditionally add $(MODULE_big) here, because we will strip it +# back out below if it turns out not to actually define any headers. +HEADER_dirs := $(MODULE_big) +HEADER_unbuilt_$(MODULE_big) = $(HEADERS) +HEADER_built_$(MODULE_big) = $(HEADERS_built) +HEADER_allbuilt += $(HEADERS_built) +# treat "built" as an exclusion below as well as "built_foo" +HEADER_xdirs := built built_$(MODULE_big) + +else # not MODULE_big, so check MODULES + +# HEADERS is an error in the absence of MODULE_big to provide a dir name +ifdef HEADERS +$(error HEADERS requires MODULE_big to be set) +endif +# make list of modules that have either HEADERS_foo or HEADERS_built_foo +HEADER_dirs := $(foreach m,$(MODULES),$(if $(filter $(m) built_$(m),$(HEADER_alldirs)),$(m))) +# make list of conflicting names to exclude +HEADER_xdirs := $(addprefix built_,$(HEADER_dirs)) + +endif # MODULE_big or MODULES + +# HEADERS_foo requires that "foo" is in MODULES as a sanity check +ifneq (,$(filter-out $(HEADER_dirs) $(HEADER_xdirs),$(HEADER_alldirs))) +$(error $(patsubst %,HEADERS_%,$(filter-out $(HEADER_dirs) $(HEADER_xdirs),$(HEADER_alldirs))) defined with no module) +endif + +# assign HEADER_unbuilt_foo and HEADER_built_foo, but make sure +# that "built" takes precedence in the case of conflict, by removing +# conflicting module names when matching the unbuilt name +$(foreach m,$(filter-out $(HEADER_xdirs),$(HEADER_dirs)),$(eval HEADER_unbuilt_$(m) += $$(HEADERS_$(m)))) +$(foreach m,$(HEADER_dirs),$(eval HEADER_built_$(m) += $$(HEADERS_built_$(m)))) +$(foreach m,$(HEADER_dirs),$(eval HEADER_allbuilt += $$(HEADERS_built_$(m)))) + +# expand out the list of headers for each dir, attaching source prefixes +header_file_list = $(HEADER_built_$(1)) $(addprefix $(srcdir)/,$(HEADER_unbuilt_$(1))) +$(foreach m,$(HEADER_dirs),$(eval HEADER_files_$(m) := $$(call header_file_list,$$(m)))) + +# note that the caller's HEADERS* vars have all been expanded now, and +# later changes will have no effect. + +# remove entries in HEADER_dirs that produced an empty list of files, +# to ensure we don't try and install them +HEADER_dirs := $(foreach m,$(HEADER_dirs),$(if $(strip $(HEADER_files_$(m))),$(m))) + +# Functions for generating install/uninstall commands; the blank lines +# before the "endef" are required, don't lose them +# $(call install_headers,dir,headers) +define install_headers +$(MKDIR_P) '$(DESTDIR)$(includedir_server)/$(incmoduledir)/$(1)/' +$(INSTALL_DATA) $(2) '$(DESTDIR)$(includedir_server)/$(incmoduledir)/$(1)/' + +endef +# $(call uninstall_headers,dir,headers) +define uninstall_headers +rm -f $(addprefix '$(DESTDIR)$(includedir_server)/$(incmoduledir)/$(1)'/, $(notdir $(2))) + +endef + +# end of HEADERS_* stuff + + +all: $(PROGRAM) $(DATA_built) $(HEADER_allbuilt) $(SCRIPTS_built) $(addsuffix $(DLSUFFIX), $(MODULES)) $(addsuffix .control, $(EXTENSION)) + +ifeq ($(with_llvm), yes) +all: $(addsuffix .bc, $(MODULES)) $(patsubst %.o,%.bc, $(OBJS)) +endif ifdef MODULE_big # shared library parameters @@ -123,6 +224,9 @@ ifneq (,$(DATA_TSEARCH)) endif # DATA_TSEARCH ifdef MODULES $(INSTALL_SHLIB) $(addsuffix $(DLSUFFIX), $(MODULES)) '$(DESTDIR)$(pkglibdir)/' +ifeq ($(with_llvm), yes) + $(foreach mod, $(MODULES), $(call install_llvm_module,$(mod),$(mod).bc)) +endif # with_llvm endif # MODULES ifdef DOCS ifdef docdir @@ -138,8 +242,14 @@ endif # SCRIPTS ifdef SCRIPTS_built $(INSTALL_SCRIPT) $(SCRIPTS_built) '$(DESTDIR)$(bindir)/' endif # SCRIPTS_built - +ifneq (,$(strip $(HEADER_dirs))) + $(foreach dir,$(HEADER_dirs),$(call install_headers,$(dir),$(HEADER_files_$(dir)))) +endif # HEADERS ifdef MODULE_big +ifeq ($(with_llvm), yes) + $(call install_llvm_module,$(MODULE_big),$(OBJS)) +endif # with_llvm + install: install-lib endif # MODULE_big @@ -183,7 +293,10 @@ ifneq (,$(DATA_TSEARCH)) endif ifdef MODULES rm -f $(addprefix '$(DESTDIR)$(pkglibdir)'/, $(addsuffix $(DLSUFFIX), $(MODULES))) -endif +ifeq ($(with_llvm), yes) + $(foreach mod, $(MODULES), $(call uninstall_llvm_module,$(mod))) +endif # with_llvm +endif # MODULES ifdef DOCS rm -f $(addprefix '$(DESTDIR)$(docdir)/$(docmoduledir)'/, $(DOCS)) endif @@ -196,15 +309,23 @@ endif ifdef SCRIPTS_built rm -f $(addprefix '$(DESTDIR)$(bindir)'/, $(SCRIPTS_built)) endif +ifneq (,$(strip $(HEADER_dirs))) + $(foreach dir,$(HEADER_dirs),$(call uninstall_headers,$(dir),$(HEADER_files_$(dir)))) +endif # HEADERS ifdef MODULE_big +ifeq ($(with_llvm), yes) + $(call uninstall_llvm_module,$(MODULE_big)) +endif # with_llvm + uninstall: uninstall-lib endif # MODULE_big clean: ifdef MODULES - rm -f $(addsuffix $(DLSUFFIX), $(MODULES)) $(addsuffix .o, $(MODULES)) $(if $(PGFILEDESC),$(WIN32RES)) + rm -f $(addsuffix $(DLSUFFIX), $(MODULES)) $(addsuffix .o, $(MODULES)) $(if $(PGFILEDESC),$(WIN32RES)) \ + $(addsuffix .bc, $(MODULES)) endif ifdef DATA_built rm -f $(DATA_built) @@ -216,7 +337,7 @@ ifdef PROGRAM rm -f $(PROGRAM)$(X) endif ifdef OBJS - rm -f $(OBJS) + rm -f $(OBJS) $(patsubst %.o,%.bc, $(OBJS)) endif ifdef EXTRA_CLEAN rm -rf $(EXTRA_CLEAN) @@ -282,10 +403,12 @@ check: else check: submake $(REGRESS_PREP) $(pg_regress_check) $(REGRESS_OPTS) $(REGRESS) +endif +endif # REGRESS +ifndef NO_TEMP_INSTALL temp-install: EXTRA_INSTALL+=$(subdir) endif -endif # REGRESS # STANDARD RULES @@ -297,5 +420,5 @@ endif ifdef PROGRAM $(PROGRAM): $(OBJS) - $(CC) $(CFLAGS) $(OBJS) $(PG_LIBS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@$(X) + $(CC) $(CFLAGS) $(OBJS) $(PG_LIBS_INTERNAL) $(LDFLAGS) $(LDFLAGS_EX) $(PG_LIBS) $(LIBS) -o $@$(X) endif diff --git a/src/pl/plperl/GNUmakefile b/src/pl/plperl/GNUmakefile index 191f74067a..9b1c514101 100644 --- a/src/pl/plperl/GNUmakefile +++ b/src/pl/plperl/GNUmakefile @@ -12,12 +12,13 @@ override CPPFLAGS += -DPLPERL_HAVE_UID_GID override CPPFLAGS += -Wno-comment endif -# Note: we need to make sure that the CORE directory is included last, +# Note: we need to include the perl_includespec directory last, # probably because it sometimes contains some header files with names # that clash with some of ours, or with some that we include, notably on # Windows. -override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) $(perl_embed_ccflags) -I$(perl_archlibexp)/CORE +override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) $(perl_embed_ccflags) $(perl_includespec) +# this is often, but not always, the same directory named by perl_includespec rpathdir = $(perl_archlibexp)/CORE PGFILEDESC = "PL/Perl - procedural language" @@ -55,7 +56,7 @@ endif # win32 SHLIB_LINK = $(perl_embed_ldflags) REGRESS_OPTS = --dbname=$(PL_TESTDB) --load-extension=plperl --load-extension=plperlu -REGRESS = plperl plperl_lc plperl_trigger plperl_shared plperl_elog plperl_util plperl_init plperlu plperl_array +REGRESS = plperl plperl_lc plperl_trigger plperl_shared plperl_elog plperl_util plperl_init plperlu plperl_array plperl_call plperl_transaction # if Perl can support two interpreters in one backend, # test plperl-and-plperlu cases ifneq ($(PERL),) @@ -81,13 +82,14 @@ perlchunks.h: $(PERLCHUNKS) all: all-lib -SPI.c: SPI.xs plperl_helpers.h - @if [ x"$(perl_privlibexp)" = x"" ]; then echo "configure switch --with-perl was not specified."; exit 1; fi - $(PERL) $(XSUBPPDIR)/ExtUtils/xsubpp -typemap $(perl_privlibexp)/ExtUtils/typemap $< >$@ - -Util.c: Util.xs plperl_helpers.h +%.c: %.xs @if [ x"$(perl_privlibexp)" = x"" ]; then echo "configure switch --with-perl was not specified."; exit 1; fi +# xsubpp -output option is required for coverage+vpath, but requires Perl 5.9.3 +ifeq ($(enable_coverage)$(vpath_build),yesyes) + $(PERL) $(XSUBPPDIR)/ExtUtils/xsubpp -typemap $(perl_privlibexp)/ExtUtils/typemap -output $@ $< +else $(PERL) $(XSUBPPDIR)/ExtUtils/xsubpp -typemap $(perl_privlibexp)/ExtUtils/typemap $< >$@ +endif install: all install-lib install-data @@ -99,7 +101,7 @@ uninstall: uninstall-lib uninstall-data install-data: installdirs $(INSTALL_DATA) $(addprefix $(srcdir)/, $(DATA)) '$(DESTDIR)$(datadir)/extension/' - $(INSTALL_DATA) $(srcdir)/plperl.h $(srcdir)/ppport.h '$(DESTDIR)$(includedir_server)' + $(INSTALL_DATA) $(srcdir)/plperl.h $(srcdir)/ppport.h $(srcdir)/plperl_helpers.h '$(DESTDIR)$(includedir_server)' uninstall-data: rm -f $(addprefix '$(DESTDIR)$(datadir)/extension'/, $(notdir $(DATA))) diff --git a/src/pl/plperl/SPI.xs b/src/pl/plperl/SPI.xs index d9e6f579d4..b98c547e8b 100644 --- a/src/pl/plperl/SPI.xs +++ b/src/pl/plperl/SPI.xs @@ -152,6 +152,15 @@ spi_spi_cursor_close(sv) plperl_spi_cursor_close(cursor); pfree(cursor); +void +spi_spi_commit() + CODE: + plperl_spi_commit(); + +void +spi_spi_rollback() + CODE: + plperl_spi_rollback(); BOOT: items = 0; /* avoid 'unused variable' warning */ diff --git a/src/pl/plperl/expected/plperl.out b/src/pl/plperl/expected/plperl.out index 14df5f42df..d8a1ff5dd8 100644 --- a/src/pl/plperl/expected/plperl.out +++ b/src/pl/plperl/expected/plperl.out @@ -214,8 +214,10 @@ CREATE OR REPLACE FUNCTION perl_record_set() RETURNS SETOF record AS $$ return undef; $$ LANGUAGE plperl; SELECT perl_record_set(); -ERROR: set-valued function called in context that cannot accept a set -CONTEXT: PL/Perl function "perl_record_set" + perl_record_set +----------------- +(0 rows) + SELECT * FROM perl_record_set(); ERROR: a column definition list is required for functions returning "record" LINE 1: SELECT * FROM perl_record_set(); @@ -233,7 +235,7 @@ CREATE OR REPLACE FUNCTION perl_record_set() RETURNS SETOF record AS $$ ]; $$ LANGUAGE plperl; SELECT perl_record_set(); -ERROR: set-valued function called in context that cannot accept a set +ERROR: function returning record called in context that cannot accept type record CONTEXT: PL/Perl function "perl_record_set" SELECT * FROM perl_record_set(); ERROR: a column definition list is required for functions returning "record" @@ -250,7 +252,7 @@ CREATE OR REPLACE FUNCTION perl_record_set() RETURNS SETOF record AS $$ ]; $$ LANGUAGE plperl; SELECT perl_record_set(); -ERROR: set-valued function called in context that cannot accept a set +ERROR: function returning record called in context that cannot accept type record CONTEXT: PL/Perl function "perl_record_set" SELECT * FROM perl_record_set(); ERROR: a column definition list is required for functions returning "record" @@ -387,6 +389,44 @@ $$ LANGUAGE plperl; SELECT * FROM foo_set_bad(); ERROR: Perl hash contains nonexistent column "z" CONTEXT: PL/Perl function "foo_set_bad" +CREATE DOMAIN orderedfootype AS footype CHECK ((VALUE).x <= (VALUE).y); +CREATE OR REPLACE FUNCTION foo_ordered() RETURNS orderedfootype AS $$ + return {x => 3, y => 4}; +$$ LANGUAGE plperl; +SELECT * FROM foo_ordered(); + x | y +---+--- + 3 | 4 +(1 row) + +CREATE OR REPLACE FUNCTION foo_ordered() RETURNS orderedfootype AS $$ + return {x => 5, y => 4}; +$$ LANGUAGE plperl; +SELECT * FROM foo_ordered(); -- fail +ERROR: value for domain orderedfootype violates check constraint "orderedfootype_check" +CONTEXT: PL/Perl function "foo_ordered" +CREATE OR REPLACE FUNCTION foo_ordered_set() RETURNS SETOF orderedfootype AS $$ +return [ + {x => 3, y => 4}, + {x => 4, y => 7} +]; +$$ LANGUAGE plperl; +SELECT * FROM foo_ordered_set(); + x | y +---+--- + 3 | 4 + 4 | 7 +(2 rows) + +CREATE OR REPLACE FUNCTION foo_ordered_set() RETURNS SETOF orderedfootype AS $$ +return [ + {x => 3, y => 4}, + {x => 9, y => 7} +]; +$$ LANGUAGE plperl; +SELECT * FROM foo_ordered_set(); -- fail +ERROR: value for domain orderedfootype violates check constraint "orderedfootype_check" +CONTEXT: PL/Perl function "foo_ordered_set" -- -- Check passing a tuple argument -- @@ -411,6 +451,46 @@ SELECT perl_get_field((11,12), 'z'); (1 row) +CREATE OR REPLACE FUNCTION perl_get_cfield(orderedfootype, text) RETURNS integer AS $$ + return $_[0]->{$_[1]}; +$$ LANGUAGE plperl; +SELECT perl_get_cfield((11,12), 'x'); + perl_get_cfield +----------------- + 11 +(1 row) + +SELECT perl_get_cfield((11,12), 'y'); + perl_get_cfield +----------------- + 12 +(1 row) + +SELECT perl_get_cfield((12,11), 'x'); -- fail +ERROR: value for domain orderedfootype violates check constraint "orderedfootype_check" +CREATE OR REPLACE FUNCTION perl_get_rfield(record, text) RETURNS integer AS $$ + return $_[0]->{$_[1]}; +$$ LANGUAGE plperl; +SELECT perl_get_rfield((11,12), 'f1'); + perl_get_rfield +----------------- + 11 +(1 row) + +SELECT perl_get_rfield((11,12)::footype, 'y'); + perl_get_rfield +----------------- + 12 +(1 row) + +SELECT perl_get_rfield((11,12)::orderedfootype, 'x'); + perl_get_rfield +----------------- + 11 +(1 row) + +SELECT perl_get_rfield((12,11)::orderedfootype, 'x'); -- fail +ERROR: value for domain orderedfootype violates check constraint "orderedfootype_check" -- -- Test return_next -- @@ -683,14 +763,17 @@ $$ LANGUAGE plperl; SELECT text_obj(); ERROR: cannot convert Perl hash to non-composite type text CONTEXT: PL/Perl function "text_obj" ------ make sure we can't return a scalar ref +-- test looking through a scalar ref CREATE OR REPLACE FUNCTION text_scalarref() RETURNS text AS $$ my $str = 'str'; return \$str; $$ LANGUAGE plperl; SELECT text_scalarref(); -ERROR: PL/Perl function must return reference to hash or array -CONTEXT: PL/Perl function "text_scalarref" + text_scalarref +---------------- + str +(1 row) + -- check safe behavior when a function body is replaced during execution CREATE OR REPLACE FUNCTION self_modify(INTEGER) RETURNS INTEGER AS $$ spi_exec_query('CREATE OR REPLACE FUNCTION self_modify(INTEGER) RETURNS INTEGER AS \'return $_[0] * 3;\' LANGUAGE plperl;'); diff --git a/src/pl/plperl/expected/plperl_call.out b/src/pl/plperl/expected/plperl_call.out new file mode 100644 index 0000000000..c55c59cbce --- /dev/null +++ b/src/pl/plperl/expected/plperl_call.out @@ -0,0 +1,54 @@ +CREATE PROCEDURE test_proc1() +LANGUAGE plperl +AS $$ +undef; +$$; +CALL test_proc1(); +CREATE PROCEDURE test_proc2() +LANGUAGE plperl +AS $$ +return 5 +$$; +CALL test_proc2(); +CREATE TABLE test1 (a int); +CREATE PROCEDURE test_proc3(x int) +LANGUAGE plperl +AS $$ +spi_exec_query("INSERT INTO test1 VALUES ($_[0])"); +$$; +CALL test_proc3(55); +SELECT * FROM test1; + a +---- + 55 +(1 row) + +-- output arguments +CREATE PROCEDURE test_proc5(INOUT a text) +LANGUAGE plperl +AS $$ +my ($a) = @_; +return { a => "$a+$a" }; +$$; +CALL test_proc5('abc'); + a +--------- + abc+abc +(1 row) + +CREATE PROCEDURE test_proc6(a int, INOUT b int, INOUT c int) +LANGUAGE plperl +AS $$ +my ($a, $b, $c) = @_; +return { b => $b * $a, c => $c * $a }; +$$; +CALL test_proc6(2, 3, 4); + b | c +---+--- + 6 | 8 +(1 row) + +DROP PROCEDURE test_proc1; +DROP PROCEDURE test_proc2; +DROP PROCEDURE test_proc3; +DROP TABLE test1; diff --git a/src/pl/plperl/expected/plperl_transaction.out b/src/pl/plperl/expected/plperl_transaction.out new file mode 100644 index 0000000000..7ca0ef35fb --- /dev/null +++ b/src/pl/plperl/expected/plperl_transaction.out @@ -0,0 +1,196 @@ +CREATE TABLE test1 (a int, b text); +CREATE PROCEDURE transaction_test1() +LANGUAGE plperl +AS $$ +foreach my $i (0..9) { + spi_exec_query("INSERT INTO test1 (a) VALUES ($i)"); + if ($i % 2 == 0) { + spi_commit(); + } else { + spi_rollback(); + } +} +$$; +CALL transaction_test1(); +SELECT * FROM test1; + a | b +---+--- + 0 | + 2 | + 4 | + 6 | + 8 | +(5 rows) + +TRUNCATE test1; +DO +LANGUAGE plperl +$$ +foreach my $i (0..9) { + spi_exec_query("INSERT INTO test1 (a) VALUES ($i)"); + if ($i % 2 == 0) { + spi_commit(); + } else { + spi_rollback(); + } +} +$$; +SELECT * FROM test1; + a | b +---+--- + 0 | + 2 | + 4 | + 6 | + 8 | +(5 rows) + +TRUNCATE test1; +-- not allowed in a function +CREATE FUNCTION transaction_test2() RETURNS int +LANGUAGE plperl +AS $$ +foreach my $i (0..9) { + spi_exec_query("INSERT INTO test1 (a) VALUES ($i)"); + if ($i % 2 == 0) { + spi_commit(); + } else { + spi_rollback(); + } +} +return 1; +$$; +SELECT transaction_test2(); +ERROR: invalid transaction termination at line 5. +CONTEXT: PL/Perl function "transaction_test2" +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +-- also not allowed if procedure is called from a function +CREATE FUNCTION transaction_test3() RETURNS int +LANGUAGE plperl +AS $$ +spi_exec_query("CALL transaction_test1()"); +return 1; +$$; +SELECT transaction_test3(); +ERROR: invalid transaction termination at line 5. at line 2. +CONTEXT: PL/Perl function "transaction_test3" +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +-- DO block inside function +CREATE FUNCTION transaction_test4() RETURNS int +LANGUAGE plperl +AS $$ +spi_exec_query('DO LANGUAGE plperl $x$ spi_commit(); $x$'); +return 1; +$$; +SELECT transaction_test4(); +ERROR: invalid transaction termination at line 1. at line 2. +CONTEXT: PL/Perl function "transaction_test4" +-- commit inside cursor loop +CREATE TABLE test2 (x int); +INSERT INTO test2 VALUES (0), (1), (2), (3), (4); +TRUNCATE test1; +DO LANGUAGE plperl $$ +my $sth = spi_query("SELECT * FROM test2 ORDER BY x"); +my $row; +while (defined($row = spi_fetchrow($sth))) { + spi_exec_query("INSERT INTO test1 (a) VALUES (" . $row->{x} . ")"); + spi_commit(); +} +$$; +SELECT * FROM test1; + a | b +---+--- + 0 | + 1 | + 2 | + 3 | + 4 | +(5 rows) + +-- check that this doesn't leak a holdable portal +SELECT * FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +-- error in cursor loop with commit +TRUNCATE test1; +DO LANGUAGE plperl $$ +my $sth = spi_query("SELECT * FROM test2 ORDER BY x"); +my $row; +while (defined($row = spi_fetchrow($sth))) { + spi_exec_query("INSERT INTO test1 (a) VALUES (12/(" . $row->{x} . "-2))"); + spi_commit(); +} +$$; +ERROR: division by zero at line 5. +CONTEXT: PL/Perl anonymous code block +SELECT * FROM test1; + a | b +-----+--- + -6 | + -12 | +(2 rows) + +SELECT * FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +-- rollback inside cursor loop +TRUNCATE test1; +DO LANGUAGE plperl $$ +my $sth = spi_query("SELECT * FROM test2 ORDER BY x"); +my $row; +while (defined($row = spi_fetchrow($sth))) { + spi_exec_query("INSERT INTO test1 (a) VALUES (" . $row->{x} . ")"); + spi_rollback(); +} +$$; +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +SELECT * FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +-- first commit then rollback inside cursor loop +TRUNCATE test1; +DO LANGUAGE plperl $$ +my $sth = spi_query("SELECT * FROM test2 ORDER BY x"); +my $row; +while (defined($row = spi_fetchrow($sth))) { + spi_exec_query("INSERT INTO test1 (a) VALUES (" . $row->{x} . ")"); + if ($row->{x} % 2 == 0) { + spi_commit(); + } else { + spi_rollback(); + } +} +$$; +SELECT * FROM test1; + a | b +---+--- + 0 | + 2 | + 4 | +(3 rows) + +SELECT * FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +DROP TABLE test1; +DROP TABLE test2; diff --git a/src/pl/plperl/expected/plperl_util.out b/src/pl/plperl/expected/plperl_util.out index 7cd027f33e..698a8a17fe 100644 --- a/src/pl/plperl/expected/plperl_util.out +++ b/src/pl/plperl/expected/plperl_util.out @@ -172,11 +172,13 @@ select perl_looks_like_number(); -- test encode_typed_literal create type perl_foo as (a integer, b text[]); create type perl_bar as (c perl_foo[]); +create domain perl_foo_pos as perl_foo check((value).a > 0); create or replace function perl_encode_typed_literal() returns setof text language plperl as $$ return_next encode_typed_literal(undef, 'text'); return_next encode_typed_literal([[1,2,3],[3,2,1],[1,3,2]], 'integer[]'); return_next encode_typed_literal({a => 1, b => ['PL','/','Perl']}, 'perl_foo'); return_next encode_typed_literal({c => [{a => 9, b => ['PostgreSQL']}, {b => ['Postgres'], a => 1}]}, 'perl_bar'); + return_next encode_typed_literal({a => 1, b => ['PL','/','Perl']}, 'perl_foo_pos'); $$; select perl_encode_typed_literal(); perl_encode_typed_literal @@ -185,5 +187,12 @@ select perl_encode_typed_literal(); {{1,2,3},{3,2,1},{1,3,2}} (1,"{PL,/,Perl}") ("{""(9,{PostgreSQL})"",""(1,{Postgres})""}") -(4 rows) + (1,"{PL,/,Perl}") +(5 rows) +create or replace function perl_encode_typed_literal() returns setof text language plperl as $$ + return_next encode_typed_literal({a => 0, b => ['PL','/','Perl']}, 'perl_foo_pos'); +$$; +select perl_encode_typed_literal(); -- fail +ERROR: value for domain perl_foo_pos violates check constraint "perl_foo_pos_check" +CONTEXT: PL/Perl function "perl_encode_typed_literal" diff --git a/src/pl/plperl/nls.mk b/src/pl/plperl/nls.mk index 9b4b429270..f33e325c76 100644 --- a/src/pl/plperl/nls.mk +++ b/src/pl/plperl/nls.mk @@ -1,6 +1,6 @@ # src/pl/plperl/nls.mk CATALOG_NAME = plperl -AVAIL_LANGUAGES = cs de es fr it ja ko pl pt_BR ro ru sv tr zh_CN zh_TW +AVAIL_LANGUAGES = cs de es fr it ja ko pl pt_BR ro ru sv tr vi zh_CN zh_TW GETTEXT_FILES = plperl.c SPI.c GETTEXT_TRIGGERS = $(BACKEND_COMMON_GETTEXT_TRIGGERS) GETTEXT_FLAGS = $(BACKEND_COMMON_GETTEXT_FLAGS) diff --git a/src/pl/plperl/plc_perlboot.pl b/src/pl/plperl/plc_perlboot.pl index ff05964869..f41aa80e80 100644 --- a/src/pl/plperl/plc_perlboot.pl +++ b/src/pl/plperl/plc_perlboot.pl @@ -51,9 +51,9 @@ sub ::encode_array_constructor } { - - package PostgreSQL::InServer - ; ## no critic (RequireFilenameMatchesPackage); +#<<< protect next line from perltidy so perlcritic annotation works + package PostgreSQL::InServer; ## no critic (RequireFilenameMatchesPackage) +#>>> use strict; use warnings; @@ -62,6 +62,7 @@ sub ::encode_array_constructor (my $msg = shift) =~ s/\(eval \d+\) //g; chomp $msg; &::elog(&::WARNING, $msg); + return; } $SIG{__WARN__} = \&plperl_warn; diff --git a/src/pl/plperl/plc_trusted.pl b/src/pl/plperl/plc_trusted.pl index 7b11a3f52b..dea3727682 100644 --- a/src/pl/plperl/plc_trusted.pl +++ b/src/pl/plperl/plc_trusted.pl @@ -1,7 +1,8 @@ # src/pl/plperl/plc_trusted.pl -package PostgreSQL::InServer::safe - ; ## no critic (RequireFilenameMatchesPackage); +#<<< protect next line from perltidy so perlcritic annotation works +package PostgreSQL::InServer::safe; ## no critic (RequireFilenameMatchesPackage) +#>>> # Load widely useful pragmas into plperl to make them available. # diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c index afebec910d..4cfc506253 100644 --- a/src/pl/plperl/plperl.c +++ b/src/pl/plperl/plperl.c @@ -21,7 +21,6 @@ #include "access/xact.h" #include "catalog/pg_language.h" #include "catalog/pg_proc.h" -#include "catalog/pg_proc_fn.h" #include "catalog/pg_type.h" #include "commands/event_trigger.h" #include "commands/trigger.h" @@ -179,8 +178,11 @@ typedef struct plperl_call_data { plperl_proc_desc *prodesc; FunctionCallInfo fcinfo; + /* remaining fields are used only in a function returning set: */ Tuplestorestate *tuple_store; TupleDesc ret_tdesc; + Oid cdomain_oid; /* 0 unless returning domain-over-composite */ + void *cdomain_info; MemoryContext tmp_cxt; } plperl_call_data; @@ -290,7 +292,7 @@ static void plperl_return_next_internal(SV *sv); static char *hek2cstr(HE *he); static SV **hv_store_string(HV *hv, const char *key, SV *val); static SV **hv_fetch_string(HV *hv, const char *key); -static void plperl_create_sub(plperl_proc_desc *desc, char *s, Oid fn_oid); +static void plperl_create_sub(plperl_proc_desc *desc, const char *s, Oid fn_oid); static SV *plperl_call_perl_func(plperl_proc_desc *desc, FunctionCallInfo fcinfo); static void plperl_compile_callback(void *arg); @@ -1095,6 +1097,7 @@ plperl_build_tuple_result(HV *perlhash, TupleDesc td) SV *val = HeVAL(he); char *key = hek2cstr(he); int attn = SPI_fnumber(td, key); + Form_pg_attribute attr = TupleDescAttr(td, attn - 1); if (attn == SPI_ERROR_NOATTRIBUTE) ereport(ERROR, @@ -1108,8 +1111,8 @@ plperl_build_tuple_result(HV *perlhash, TupleDesc td) key))); values[attn - 1] = plperl_sv_to_datum(val, - td->attrs[attn - 1]->atttypid, - td->attrs[attn - 1]->atttypmod, + attr->atttypid, + attr->atttypmod, NULL, NULL, InvalidOid, @@ -1355,6 +1358,7 @@ plperl_sv_to_datum(SV *sv, Oid typid, int32 typmod, /* handle a hashref */ Datum ret; TupleDesc td; + bool isdomain; if (!type_is_rowtype(typid)) ereport(ERROR, @@ -1362,31 +1366,49 @@ plperl_sv_to_datum(SV *sv, Oid typid, int32 typmod, errmsg("cannot convert Perl hash to non-composite type %s", format_type_be(typid)))); - td = lookup_rowtype_tupdesc_noerror(typid, typmod, true); - if (td == NULL) + td = lookup_rowtype_tupdesc_domain(typid, typmod, true); + if (td != NULL) { - /* Try to look it up based on our result type */ - if (fcinfo == NULL || - get_call_result_type(fcinfo, NULL, &td) != TYPEFUNC_COMPOSITE) + /* Did we look through a domain? */ + isdomain = (typid != td->tdtypeid); + } + else + { + /* Must be RECORD, try to resolve based on call info */ + TypeFuncClass funcclass; + + if (fcinfo) + funcclass = get_call_result_type(fcinfo, &typid, &td); + else + funcclass = TYPEFUNC_OTHER; + if (funcclass != TYPEFUNC_COMPOSITE && + funcclass != TYPEFUNC_COMPOSITE_DOMAIN) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("function returning record called in context " "that cannot accept type record"))); + Assert(td); + isdomain = (funcclass == TYPEFUNC_COMPOSITE_DOMAIN); } ret = plperl_hash_to_datum(sv, td); + if (isdomain) + domain_check(ret, false, typid, NULL, NULL); + /* Release on the result of get_call_result_type is harmless */ ReleaseTupleDesc(td); return ret; } - /* Reference, but not reference to hash or array ... */ - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("PL/Perl function must return reference to hash or array"))); - return (Datum) 0; /* shut up compiler */ + /* + * If it's a reference to something else, such as a scalar, just + * recursively look through the reference. + */ + return plperl_sv_to_datum(SvRV(sv), typid, typmod, + fcinfo, finfo, typioparam, + isnull); } else { @@ -1757,6 +1779,7 @@ plperl_modify_tuple(HV *hvTD, TriggerData *tdata, HeapTuple otup) char *key = hek2cstr(he); SV *val = HeVAL(he); int attn = SPI_fnumber(tupdesc, key); + Form_pg_attribute attr = TupleDescAttr(tupdesc, attn - 1); if (attn == SPI_ERROR_NOATTRIBUTE) ereport(ERROR, @@ -1770,8 +1793,8 @@ plperl_modify_tuple(HV *hvTD, TriggerData *tdata, HeapTuple otup) key))); modvalues[attn - 1] = plperl_sv_to_datum(val, - tupdesc->attrs[attn - 1]->atttypid, - tupdesc->attrs[attn - 1]->atttypmod, + attr->atttypid, + attr->atttypmod, NULL, NULL, InvalidOid, @@ -1893,7 +1916,7 @@ plperl_inline_handler(PG_FUNCTION_ARGS) desc.fn_retistuple = false; desc.fn_retisset = false; desc.fn_retisarray = false; - desc.result_oid = VOIDOID; + desc.result_oid = InvalidOid; desc.nargs = 0; desc.reference = NULL; @@ -1907,7 +1930,7 @@ plperl_inline_handler(PG_FUNCTION_ARGS) current_call_data = &this_call_data; - if (SPI_connect() != SPI_OK_CONNECT) + if (SPI_connect_ext(codeblock->atomic ? 0 : SPI_OPT_NONATOMIC) != SPI_OK_CONNECT) elog(ERROR, "could not connect to SPI manager"); select_perl_context(desc.lanpltrusted); @@ -2061,7 +2084,7 @@ plperlu_validator(PG_FUNCTION_ARGS) * supplied in s, and returns a reference to it */ static void -plperl_create_sub(plperl_proc_desc *prodesc, char *s, Oid fn_oid) +plperl_create_sub(plperl_proc_desc *prodesc, const char *s, Oid fn_oid) { dTHX; dSP; @@ -2374,13 +2397,18 @@ plperl_call_perl_event_trigger_func(plperl_proc_desc *desc, static Datum plperl_func_handler(PG_FUNCTION_ARGS) { + bool nonatomic; plperl_proc_desc *prodesc; SV *perlret; Datum retval = 0; ReturnSetInfo *rsi; ErrorContextCallback pl_error_context; - if (SPI_connect() != SPI_OK_CONNECT) + nonatomic = fcinfo->context && + IsA(fcinfo->context, CallContext) && + !castNode(CallContext, fcinfo->context)->atomic; + + if (SPI_connect_ext(nonatomic ? SPI_OPT_NONATOMIC : 0) != SPI_OK_CONNECT) elog(ERROR, "could not connect to SPI manager"); prodesc = compile_plperl_function(fcinfo->flinfo->fn_oid, false, false); @@ -2399,8 +2427,7 @@ plperl_func_handler(PG_FUNCTION_ARGS) { /* Check context before allowing the call to go through */ if (!rsi || !IsA(rsi, ReturnSetInfo) || - (rsi->allowedModes & SFRM_Materialize) == 0 || - rsi->expectedDesc == NULL) + (rsi->allowedModes & SFRM_Materialize) == 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that " @@ -2460,7 +2487,7 @@ plperl_func_handler(PG_FUNCTION_ARGS) } retval = (Datum) 0; } - else + else if (prodesc->result_oid) { retval = plperl_sv_to_datum(perlret, prodesc->result_oid, @@ -2757,7 +2784,7 @@ compile_plperl_function(Oid fn_oid, bool is_trigger, bool is_event_trigger) * Allocate a context that will hold all PG data for the procedure. ************************************************************/ proc_cxt = AllocSetContextCreate(TopMemoryContext, - NameStr(procStruct->proname), + "PL/Perl function", ALLOCSET_SMALL_SIZES); /************************************************************ @@ -2767,6 +2794,7 @@ compile_plperl_function(Oid fn_oid, bool is_trigger, bool is_event_trigger) oldcontext = MemoryContextSwitchTo(proc_cxt); prodesc = (plperl_proc_desc *) palloc0(sizeof(plperl_proc_desc)); prodesc->proname = pstrdup(NameStr(procStruct->proname)); + MemoryContextSetIdentifier(proc_cxt, prodesc->proname); prodesc->fn_cxt = proc_cxt; prodesc->fn_refcount = 0; prodesc->fn_xmin = HeapTupleHeaderGetRawXmin(procTup->t_data); @@ -2807,22 +2835,21 @@ compile_plperl_function(Oid fn_oid, bool is_trigger, bool is_event_trigger) ************************************************************/ if (!is_trigger && !is_event_trigger) { - typeTup = - SearchSysCache1(TYPEOID, - ObjectIdGetDatum(procStruct->prorettype)); + Oid rettype = procStruct->prorettype; + + typeTup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(rettype)); if (!HeapTupleIsValid(typeTup)) - elog(ERROR, "cache lookup failed for type %u", - procStruct->prorettype); + elog(ERROR, "cache lookup failed for type %u", rettype); typeStruct = (Form_pg_type) GETSTRUCT(typeTup); /* Disallow pseudotype result, except VOID or RECORD */ if (typeStruct->typtype == TYPTYPE_PSEUDO) { - if (procStruct->prorettype == VOIDOID || - procStruct->prorettype == RECORDOID) + if (rettype == VOIDOID || + rettype == RECORDOID) /* okay */ ; - else if (procStruct->prorettype == TRIGGEROID || - procStruct->prorettype == EVTTRIGGEROID) + else if (rettype == TRIGGEROID || + rettype == EVTTRIGGEROID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("trigger functions can only be called " @@ -2831,13 +2858,12 @@ compile_plperl_function(Oid fn_oid, bool is_trigger, bool is_event_trigger) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("PL/Perl functions cannot return type %s", - format_type_be(procStruct->prorettype)))); + format_type_be(rettype)))); } - prodesc->result_oid = procStruct->prorettype; + prodesc->result_oid = rettype; prodesc->fn_retisset = procStruct->proretset; - prodesc->fn_retistuple = (procStruct->prorettype == RECORDOID || - typeStruct->typtype == TYPTYPE_COMPOSITE); + prodesc->fn_retistuple = type_is_rowtype(rettype); prodesc->fn_retisarray = (typeStruct->typlen == -1 && typeStruct->typelem); @@ -2860,23 +2886,22 @@ compile_plperl_function(Oid fn_oid, bool is_trigger, bool is_event_trigger) for (i = 0; i < prodesc->nargs; i++) { - typeTup = SearchSysCache1(TYPEOID, - ObjectIdGetDatum(procStruct->proargtypes.values[i])); + Oid argtype = procStruct->proargtypes.values[i]; + + typeTup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(argtype)); if (!HeapTupleIsValid(typeTup)) - elog(ERROR, "cache lookup failed for type %u", - procStruct->proargtypes.values[i]); + elog(ERROR, "cache lookup failed for type %u", argtype); typeStruct = (Form_pg_type) GETSTRUCT(typeTup); - /* Disallow pseudotype argument */ + /* Disallow pseudotype argument, except RECORD */ if (typeStruct->typtype == TYPTYPE_PSEUDO && - procStruct->proargtypes.values[i] != RECORDOID) + argtype != RECORDOID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("PL/Perl functions cannot accept type %s", - format_type_be(procStruct->proargtypes.values[i])))); + format_type_be(argtype)))); - if (typeStruct->typtype == TYPTYPE_COMPOSITE || - procStruct->proargtypes.values[i] == RECORDOID) + if (type_is_rowtype(argtype)) prodesc->arg_is_rowtype[i] = true; else { @@ -2886,9 +2911,9 @@ compile_plperl_function(Oid fn_oid, bool is_trigger, bool is_event_trigger) proc_cxt); } - /* Identify array attributes */ + /* Identify array-type arguments */ if (typeStruct->typelem != 0 && typeStruct->typlen == -1) - prodesc->arg_arraytype[i] = procStruct->proargtypes.values[i]; + prodesc->arg_arraytype[i] = argtype; else prodesc->arg_arraytype[i] = InvalidOid; @@ -3014,11 +3039,12 @@ plperl_hash_from_tuple(HeapTuple tuple, TupleDesc tupdesc) typisvarlena; char *attname; Oid typoutput; + Form_pg_attribute att = TupleDescAttr(tupdesc, i); - if (tupdesc->attrs[i]->attisdropped) + if (att->attisdropped) continue; - attname = NameStr(tupdesc->attrs[i]->attname); + attname = NameStr(att->attname); attr = heap_getattr(tuple, i + 1, tupdesc, &isnull); if (isnull) @@ -3032,7 +3058,7 @@ plperl_hash_from_tuple(HeapTuple tuple, TupleDesc tupdesc) continue; } - if (type_is_rowtype(tupdesc->attrs[i]->atttypid)) + if (type_is_rowtype(att->atttypid)) { SV *sv = plperl_hash_from_datum(attr); @@ -3043,17 +3069,16 @@ plperl_hash_from_tuple(HeapTuple tuple, TupleDesc tupdesc) SV *sv; Oid funcid; - if (OidIsValid(get_base_element_type(tupdesc->attrs[i]->atttypid))) - sv = plperl_ref_from_pg_array(attr, tupdesc->attrs[i]->atttypid); - else if ((funcid = get_transform_fromsql(tupdesc->attrs[i]->atttypid, current_call_data->prodesc->lang_oid, current_call_data->prodesc->trftypes))) + if (OidIsValid(get_base_element_type(att->atttypid))) + sv = plperl_ref_from_pg_array(attr, att->atttypid); + else if ((funcid = get_transform_fromsql(att->atttypid, current_call_data->prodesc->lang_oid, current_call_data->prodesc->trftypes))) sv = (SV *) DatumGetPointer(OidFunctionCall1(funcid, attr)); else { char *outputstr; /* XXX should have a way to cache these lookups */ - getTypeOutputInfo(tupdesc->attrs[i]->atttypid, - &typoutput, &typisvarlena); + getTypeOutputInfo(att->atttypid, &typoutput, &typisvarlena); outputstr = OidOutputFunctionCall(typoutput, attr); sv = cstr2sv(outputstr); @@ -3247,11 +3272,25 @@ plperl_return_next_internal(SV *sv) /* * This is the first call to return_next in the current PL/Perl - * function call, so identify the output tuple descriptor and create a + * function call, so identify the output tuple type and create a * tuplestore to hold the result rows. */ if (prodesc->fn_retistuple) - (void) get_call_result_type(fcinfo, NULL, &tupdesc); + { + TypeFuncClass funcclass; + Oid typid; + + funcclass = get_call_result_type(fcinfo, &typid, &tupdesc); + if (funcclass != TYPEFUNC_COMPOSITE && + funcclass != TYPEFUNC_COMPOSITE_DOMAIN) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("function returning record called in context " + "that cannot accept type record"))); + /* if domain-over-composite, remember the domain's type OID */ + if (funcclass == TYPEFUNC_COMPOSITE_DOMAIN) + current_call_data->cdomain_oid = typid; + } else { tupdesc = rsi->expectedDesc; @@ -3302,9 +3341,16 @@ plperl_return_next_internal(SV *sv) tuple = plperl_build_tuple_result((HV *) SvRV(sv), current_call_data->ret_tdesc); + + if (OidIsValid(current_call_data->cdomain_oid)) + domain_check(HeapTupleGetDatum(tuple), false, + current_call_data->cdomain_oid, + ¤t_call_data->cdomain_info, + rsi->econtext->ecxt_per_query_memory); + tuplestore_puttuple(current_call_data->tuple_store, tuple); } - else + else if (prodesc->result_oid) { Datum ret[1]; bool isNull[1]; @@ -3366,6 +3412,8 @@ plperl_spi_query(char *query) SPI_result_code_string(SPI_result)); cursor = cstr2sv(portal->name); + PinPortal(portal); + /* Commit the inner transaction, return to outer xact context */ ReleaseCurrentSubTransaction(); MemoryContextSwitchTo(oldcontext); @@ -3429,6 +3477,7 @@ plperl_spi_fetchrow(char *cursor) SPI_cursor_fetch(p, true, 1); if (SPI_processed == 0) { + UnpinPortal(p); SPI_cursor_close(p); row = &PL_sv_undef; } @@ -3480,7 +3529,10 @@ plperl_spi_cursor_close(char *cursor) p = SPI_cursor_find(cursor); if (p) + { + UnpinPortal(p); SPI_cursor_close(p); + } } SV * @@ -3844,6 +3896,8 @@ plperl_spi_query_prepared(char *query, int argc, SV **argv) cursor = cstr2sv(portal->name); + PinPortal(portal); + /* Commit the inner transaction, return to outer xact context */ ReleaseCurrentSubTransaction(); MemoryContextSwitchTo(oldcontext); @@ -3905,6 +3959,60 @@ plperl_spi_freeplan(char *query) SPI_freeplan(plan); } +void +plperl_spi_commit(void) +{ + MemoryContext oldcontext = CurrentMemoryContext; + + PG_TRY(); + { + HoldPinnedPortals(); + + SPI_commit(); + SPI_start_transaction(); + } + PG_CATCH(); + { + ErrorData *edata; + + /* Save error info */ + MemoryContextSwitchTo(oldcontext); + edata = CopyErrorData(); + FlushErrorState(); + + /* Punt the error to Perl */ + croak_cstr(edata->message); + } + PG_END_TRY(); +} + +void +plperl_spi_rollback(void) +{ + MemoryContext oldcontext = CurrentMemoryContext; + + PG_TRY(); + { + HoldPinnedPortals(); + + SPI_rollback(); + SPI_start_transaction(); + } + PG_CATCH(); + { + ErrorData *edata; + + /* Save error info */ + MemoryContextSwitchTo(oldcontext); + edata = CopyErrorData(); + FlushErrorState(); + + /* Punt the error to Perl */ + croak_cstr(edata->message); + } + PG_END_TRY(); +} + /* * Implementation of plperl's elog() function * diff --git a/src/pl/plperl/plperl.h b/src/pl/plperl/plperl.h index c4e06d089f..12fbad9787 100644 --- a/src/pl/plperl/plperl.h +++ b/src/pl/plperl/plperl.h @@ -5,7 +5,7 @@ * * This should be included _AFTER_ postgres.h and system include files * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1995, Regents of the University of California * * src/pl/plperl/plperl.h @@ -17,13 +17,6 @@ /* stop perl headers from hijacking stdio and other stuff on Windows */ #ifdef WIN32 #define WIN32IO_IS_STDIO -/* - * isnan is defined in both the perl and mingw headers. We don't use it, - * so this just clears up the compile warning. - */ -#ifdef isnan -#undef isnan -#endif #endif /* WIN32 */ /* @@ -36,10 +29,33 @@ * Sometimes perl carefully scribbles on our *printf macros. * So we undefine them here and redefine them after it's done its dirty deed. */ - -#ifdef USE_REPL_SNPRINTF -#undef snprintf #undef vsnprintf +#undef snprintf +#undef vsprintf +#undef sprintf +#undef vfprintf +#undef fprintf +#undef vprintf +#undef printf + +/* + * ActivePerl 5.18 and later are MinGW-built, and their headers use GCC's + * __inline__. Translate to something MSVC recognizes. + */ +#ifdef _MSC_VER +#define __inline__ inline +#endif + +/* + * Regarding bool, both PostgreSQL and Perl might use stdbool.h or not, + * depending on configuration. If both agree, things are relatively harmless. + * If not, things get tricky. If PostgreSQL does but Perl does not, define + * HAS_BOOL here so that Perl does not redefine bool; this avoids compiler + * warnings. If PostgreSQL does not but Perl does, we need to undefine bool + * after we include the Perl headers; see below. + */ +#ifdef USE_STDBOOL +#define HAS_BOOL 1 #endif @@ -57,25 +73,67 @@ * before ppport.h, so use a #define flag to control inclusion here. */ #ifdef PG_NEED_PERL_XSUB_H +/* + * On Windows, port_win32.h defines macros for a lot of these same functions. + * To avoid compiler warnings when XSUB.h redefines them, #undef our versions. + */ +#ifdef WIN32 +#undef accept +#undef bind +#undef connect +#undef fopen +#undef kill +#undef listen +#undef lstat +#undef mkdir +#undef open +#undef putenv +#undef recv +#undef rename +#undef select +#undef send +#undef socket +#undef stat +#undef unlink +#endif + #include "XSUB.h" #endif -/* put back our snprintf and vsnprintf */ -#ifdef USE_REPL_SNPRINTF +/* put back our *printf macros ... this must match src/include/port.h */ +#ifdef vsnprintf +#undef vsnprintf +#endif #ifdef snprintf #undef snprintf #endif -#ifdef vsnprintf -#undef vsnprintf +#ifdef vsprintf +#undef vsprintf #endif -#ifdef __GNUC__ -#define vsnprintf(...) pg_vsnprintf(__VA_ARGS__) -#define snprintf(...) pg_snprintf(__VA_ARGS__) -#else +#ifdef sprintf +#undef sprintf +#endif +#ifdef vfprintf +#undef vfprintf +#endif +#ifdef fprintf +#undef fprintf +#endif +#ifdef vprintf +#undef vprintf +#endif +#ifdef printf +#undef printf +#endif + #define vsnprintf pg_vsnprintf #define snprintf pg_snprintf -#endif /* __GNUC__ */ -#endif /* USE_REPL_SNPRINTF */ +#define vsprintf pg_vsprintf +#define sprintf pg_sprintf +#define vfprintf pg_vfprintf +#define fprintf pg_fprintf +#define vprintf pg_vprintf +#define printf(...) pg_printf(__VA_ARGS__) /* perl version and platform portability */ #define NEED_eval_pv @@ -83,10 +141,18 @@ #define NEED_sv_2pv_flags #include "ppport.h" -/* perl may have a different width of "bool", don't buy it */ +/* + * perl might have included stdbool.h. If we also did that earlier (see c.h), + * then that's fine. If not, we probably rejected it for some reason. In + * that case, undef bool and proceed with our own bool. (Note that stdbool.h + * makes bool a macro, but our own replacement is a typedef, so the undef + * makes ours visible again). + */ +#ifndef USE_STDBOOL #ifdef bool #undef bool #endif +#endif /* supply HeUTF8 if it's missing - ppport.h doesn't supply it, unfortunately */ #ifndef HeUTF8 @@ -117,6 +183,8 @@ HV *plperl_spi_exec_prepared(char *, HV *, int, SV **); SV *plperl_spi_query_prepared(char *, int, SV **); void plperl_spi_freeplan(char *); void plperl_spi_cursor_close(char *); +void plperl_spi_commit(void); +void plperl_spi_rollback(void); char *plperl_sv_to_literal(SV *, char *); void plperl_util_elog(int level, SV *msg); diff --git a/src/pl/plperl/plperl_opmask.pl b/src/pl/plperl/plperl_opmask.pl index 61e5cac148..e4e64b843f 100644 --- a/src/pl/plperl/plperl_opmask.pl +++ b/src/pl/plperl/plperl_opmask.pl @@ -38,11 +38,11 @@ # (included in :default) but aren't considered sufficiently safe qw[!dbmopen !setpgrp !setpriority], - # custom is not deemed a likely security risk as it can't be generated from - # perl so would only be seen if the DBA had chosen to load a module that - # used it. Even then it's unlikely to be seen because it's typically - # generated by compiler plugins that operate after PL_op_mask checks. - # But we err on the side of caution and disable it + # custom is not deemed a likely security risk as it can't be generated from + # perl so would only be seen if the DBA had chosen to load a module that + # used it. Even then it's unlikely to be seen because it's typically + # generated by compiler plugins that operate after PL_op_mask checks. + # But we err on the side of caution and disable it qw[!custom],); printf $fh " /* ALLOWED: @allowed_ops */ \\\n"; diff --git a/src/pl/plperl/po/it.po b/src/pl/plperl/po/it.po index 134dc44584..f56d851ad3 100644 --- a/src/pl/plperl/po/it.po +++ b/src/pl/plperl/po/it.po @@ -1,19 +1,17 @@ # -# Translation of plperl to Italian -# PostgreSQL Project +# plperl.po +# Italian message translation file for plperl # -# Associazione Culturale ITPUG - Italian PostgreSQL Users Group -# http://www.itpug.org/ - info@itpug.org +# For development and bug report please use: +# https://github.com/dvarrazzo/postgresql-it # -# Traduttori: -# * Emanuele Zamprogno -# * Daniele Varrazzo +# Copyright (C) 2012-2017 PostgreSQL Global Development Group +# Copyright (C) 2010, Associazione Culturale ITPUG # -# Revisori: -# * Gabriele Bartolini +# Daniele Varrazzo , 2012-2017. +# Emanuele Zamprogno # -# Copyright (c) 2010, Associazione Culturale ITPUG -# Distributed under the same license of the PostgreSQL project +# This file is distributed under the same license as the PostgreSQL package. # msgid "" msgstr "" @@ -22,7 +20,7 @@ msgstr "" "POT-Creation-Date: 2017-04-22 22:37+0000\n" "PO-Revision-Date: 2017-04-23 04:42+0100\n" "Last-Translator: Daniele Varrazzo \n" -"Language-Team: Gruppo traduzioni ITPUG \n" +"Language-Team: https://github.com/dvarrazzo/postgresql-it\n" "Language: it\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" diff --git a/src/pl/plperl/po/ja.po b/src/pl/plperl/po/ja.po index cab0253534..517eadce9c 100644 --- a/src/pl/plperl/po/ja.po +++ b/src/pl/plperl/po/ja.po @@ -1,193 +1,242 @@ # LANGUAGE message translation file for plperl # Copyright (C) 2009 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. -# FIRST AUTHOR , 2009. +# Honda Shigehiro , 2012. # msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 9.1 beta 2\n" +"Project-Id-Version: plperl (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2012-08-11 17:49+0900\n" -"PO-Revision-Date: 2012-08-11 17:51+0900\n" -"Last-Translator: Honda Shigehiro\n" +"POT-Creation-Date: 2018-01-29 10:46+0900\n" +"PO-Revision-Date: 2018-02-13 11:13+0900\n" +"Last-Translator: Michihide Hotta \n" "Language-Team: jpug-doc \n" "Language: ja\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 2.0.6\n" -#: plperl.c:365 -msgid "If true, trusted and untrusted Perl code will be compiled in strict mode." -msgstr "真ならば信頼し、信頼されないPerlのコードはstrictモードでコンパイルされます。" +#: plperl.c:407 +msgid "" +"If true, trusted and untrusted Perl code will be compiled in strict mode." +msgstr "" +"true の場合、trusted および untrusted な Perl のコードはいずれも strict モー" +"ドでコンパイルされます。" -#: plperl.c:379 -msgid "Perl initialization code to execute when a Perl interpreter is initialized." -msgstr "Perl のインタプリタが初期化される際に実行されるべき Perl 初期化コード" +#: plperl.c:421 +msgid "" +"Perl initialization code to execute when a Perl interpreter is initialized." +msgstr "" +"Perl のインタプリタが初期化される際に実行されるべき Perl の初期化コード。" -#: plperl.c:401 +#: plperl.c:443 msgid "Perl initialization code to execute once when plperl is first used." -msgstr "plperl が最初に使用される際に一度実行されるべき Perl 初期化コード" +msgstr "plperl が最初に使用される際に一度だけ実行される Perl の初期化コード。" -#: plperl.c:409 +#: plperl.c:451 msgid "Perl initialization code to execute once when plperlu is first used." -msgstr "plperlu が最初に使用される際に一度実行されるべき Perl 初期化コード" +msgstr "plperlu が最初に使用される際に一度だけ実行される Perl の初期化コード。" -#: plperl.c:626 plperl.c:788 plperl.c:793 plperl.c:897 plperl.c:908 -#: plperl.c:949 plperl.c:970 plperl.c:1943 plperl.c:2038 plperl.c:2100 +#: plperl.c:648 +#, c-format +msgid "cannot allocate multiple Perl interpreters on this platform" +msgstr "このプラットフォームでは複数の Perl インタプリタを設定できません" + +#: plperl.c:671 plperl.c:855 plperl.c:861 plperl.c:978 plperl.c:990 +#: plperl.c:1033 plperl.c:1056 plperl.c:2120 plperl.c:2230 plperl.c:2298 +#: plperl.c:2361 #, c-format msgid "%s" msgstr "%s" -#: plperl.c:627 +#: plperl.c:672 #, c-format msgid "while executing PostgreSQL::InServer::SPI::bootstrap" -msgstr "PostgreSQL::InServer::SPI::bootstrap の実行中に" +msgstr "PostgreSQL::InServer::SPI::bootstrap の実行中" -#: plperl.c:789 +#: plperl.c:856 #, c-format msgid "while parsing Perl initialization" -msgstr "Perl 初期化処理のパース中に" +msgstr "Perl 初期化処理のパース中" -#: plperl.c:794 +#: plperl.c:862 #, c-format msgid "while running Perl initialization" -msgstr "Perl 初期化処理の実行中に" +msgstr "Perl 初期化処理の実行中" -#: plperl.c:898 +#: plperl.c:979 #, c-format msgid "while executing PLC_TRUSTED" -msgstr "PLC_TRUSTED の実行中に" +msgstr "PLC_TRUSTED の実行中" -#: plperl.c:909 +#: plperl.c:991 #, c-format msgid "while executing utf8fix" -msgstr "utf8fix の実行中に" +msgstr "utf8fix の実行中" -#: plperl.c:950 +#: plperl.c:1034 #, c-format msgid "while executing plperl.on_plperl_init" -msgstr "plperl.on_plperl_init の実行中に" +msgstr "plperl.on_plperl_init の実行中" -#: plperl.c:971 +#: plperl.c:1057 #, c-format msgid "while executing plperl.on_plperlu_init" -msgstr "plperl.on_plperlu_init の実行中に" +msgstr "plperl.on_plperlu_init の実行中" -#: plperl.c:1015 plperl.c:1615 +#: plperl.c:1102 plperl.c:1764 #, c-format msgid "Perl hash contains nonexistent column \"%s\"" -msgstr "Perlハッシュに存在しない列\"%s\"が含まれます" +msgstr "Perl ハッシュに存在しない列 \"%s\" があります" + +#: plperl.c:1107 plperl.c:1769 +#, c-format +msgid "cannot set system attribute \"%s\"" +msgstr "システム属性 \"%s\" をセットできません" -#: plperl.c:1100 +#: plperl.c:1195 #, c-format msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" -msgstr "配列の次数(%d)が上限(%d)を超えています" +msgstr "配列の次元数(%d)が制限値(%d)を超えています" -#: plperl.c:1112 plperl.c:1129 +#: plperl.c:1207 plperl.c:1224 #, c-format -msgid "multidimensional arrays must have array expressions with matching dimensions" -msgstr "多次元配列は次数に合った配列式を持たなければなりません" +msgid "" +"multidimensional arrays must have array expressions with matching dimensions" +msgstr "多次元配列は次元数に合った配列式を持たなければなりません" -#: plperl.c:1166 +#: plperl.c:1260 #, c-format msgid "cannot convert Perl array to non-array type %s" -msgstr "Perl配列を非配列型%sに変換できません" +msgstr "Perl 配列を非配列型 %s に変換できません" -#: plperl.c:1262 +#: plperl.c:1362 #, c-format msgid "cannot convert Perl hash to non-composite type %s" -msgstr "Perlハッシュを非複合型%sに変換できません" +msgstr "Perl ハッシュを非複合型 %s に変換できません" -#: plperl.c:1273 +#: plperl.c:1373 #, c-format -msgid "function returning record called in context that cannot accept type record" -msgstr "レコード型を受け付けられないコンテキストでレコードを返す関数が呼び出されました" +msgid "" +"function returning record called in context that cannot accept type record" +msgstr "" +"レコード型を受け付けられないコンテキストでレコードを返す関数が呼び出されまし" +"た" -#: plperl.c:1288 +#: plperl.c:1388 #, c-format msgid "PL/Perl function must return reference to hash or array" -msgstr "PL/Perl関数はハッシュまたは配列への参照を返す必要があります" +msgstr "PL/Perl 関数はハッシュまたは配列への参照を返す必要があります" -#: plperl.c:1592 +#: plperl.c:1425 +#, c-format +msgid "lookup failed for type %s" +msgstr "型 %s の検索に失敗しました" + +#: plperl.c:1740 #, c-format msgid "$_TD->{new} does not exist" -msgstr "$_TD->{new}は存在しません" +msgstr "$_TD->{new} は存在しません" -#: plperl.c:1596 +#: plperl.c:1744 #, c-format msgid "$_TD->{new} is not a hash reference" -msgstr "$_TD->{new}はハッシュへの参照ではありません" +msgstr "$_TD->{new} はハッシュへの参照ではありません" -#: plperl.c:1820 plperl.c:2518 +#: plperl.c:1995 plperl.c:2833 #, c-format msgid "PL/Perl functions cannot return type %s" -msgstr "PL/Perl関数は%s型を返すことができません" +msgstr "PL/Perl 関数は %s 型を返すことができません" -#: plperl.c:1833 plperl.c:2565 +#: plperl.c:2008 plperl.c:2875 #, c-format msgid "PL/Perl functions cannot accept type %s" -msgstr "PL/Perl関数は%s型を受け付けられません" +msgstr "PL/Perl 関数は %s 型を受け付けられません" -#: plperl.c:1947 +#: plperl.c:2125 #, c-format msgid "didn't get a CODE reference from compiling function \"%s\"" msgstr "関数 \"%s\" のコンパイルからはコード参照を取得しませんでした" -#: plperl.c:2151 +#: plperl.c:2218 #, c-format -msgid "set-valued function called in context that cannot accept a set" -msgstr "このコンテキストで集合値の関数は集合を受け付けられません" +msgid "didn't get a return item from function" +msgstr "関数からは戻り項目を取得しませんでした" -#: plperl.c:2195 +#: plperl.c:2262 plperl.c:2329 #, c-format -msgid "set-returning PL/Perl function must return reference to array or use return_next" -msgstr "集合を返すPL/Perl関数は配列への参照を返す、または、return_nextを使用する必要があります" +msgid "couldn't fetch $_TD" +msgstr "$_TD を取り出せませんでした" -#: plperl.c:2315 +#: plperl.c:2286 plperl.c:2349 #, c-format -msgid "ignoring modified row in DELETE trigger" -msgstr "DELETEトリガにて変更された行を無視します" +msgid "didn't get a return item from trigger function" +msgstr "トリガー関数から項目を取得しませんでした" -#: plperl.c:2323 +#: plperl.c:2406 #, c-format -msgid "result of PL/Perl trigger function must be undef, \"SKIP\", or \"MODIFY\"" -msgstr "PL/Perlトリガ関数の結果は\"SKIP\"または\"MODIFY\"でなければなりません" +msgid "set-valued function called in context that cannot accept a set" +msgstr "集合を受け付けられないコンテキストで集合値関数が呼ばれました" + +#: plperl.c:2451 +#, c-format +msgid "" +"set-returning PL/Perl function must return reference to array or use " +"return_next" +msgstr "" +"集合を返す PL/Perl 関数は、配列への参照を返すかまたは return_next を使う必要" +"があります" -#: plperl.c:2449 plperl.c:2455 +#: plperl.c:2572 #, c-format -msgid "out of memory" -msgstr "メモリ不足です" +msgid "ignoring modified row in DELETE trigger" +msgstr "DELETE トリガーで変更された行を無視しています" -#: plperl.c:2509 +#: plperl.c:2580 +#, c-format +msgid "" +"result of PL/Perl trigger function must be undef, \"SKIP\", or \"MODIFY\"" +msgstr "" +"PL/Perl のトリガー関数の結果は undef、\"SKIP\"、\"MODIFY\" のいずれかでなけ" +"ればなりません" + +#: plperl.c:2828 #, c-format msgid "trigger functions can only be called as triggers" msgstr "トリガー関数はトリガーとしてのみコールできます" -#: plperl.c:2885 +#: plperl.c:3170 +#, c-format +msgid "query result has too many rows to fit in a Perl array" +msgstr "問い合わせの結果に含まれる行数が Perl の配列に対して多すぎます" + +#: plperl.c:3240 #, c-format msgid "cannot use return_next in a non-SETOF function" -msgstr "SETOF関数以外ではreturn_nextを使用することはできません" +msgstr "集合を返す関数以外で return_next を使うことはできません" -#: plperl.c:2941 +#: plperl.c:3300 #, c-format -msgid "SETOF-composite-returning PL/Perl function must call return_next with reference to hash" -msgstr "複合型のSETOFを返すPL/Perl関数はハッシュへの参照を持つreturn_nextを呼び出さなければなりません" +msgid "" +"SETOF-composite-returning PL/Perl function must call return_next with " +"reference to hash" +msgstr "" +"複合型の集合を返す PL/Perl 関数は、ハッシュへの参照を持つ return_next を呼び" +"出さなければなりません" -#: plperl.c:3652 +#: plperl.c:4009 #, c-format msgid "PL/Perl function \"%s\"" msgstr "PL/Perl 関数 \"%s\"" -#: plperl.c:3664 +#: plperl.c:4021 #, c-format msgid "compilation of PL/Perl function \"%s\"" msgstr "PL/Perl 関数 \"%s\" のコンパイル" -#: plperl.c:3673 +#: plperl.c:4030 #, c-format msgid "PL/Perl anonymous code block" msgstr "PL/Perl の無名コードブロック" - -#~ msgid "composite-returning PL/Perl function must return reference to hash" -#~ msgstr "複合型を返すPL/Perl関数はハッシュへの参照を返す必要があります" diff --git a/src/pl/plperl/po/ko.po b/src/pl/plperl/po/ko.po index 2f32d9fcbf..75c4202b4c 100644 --- a/src/pl/plperl/po/ko.po +++ b/src/pl/plperl/po/ko.po @@ -5,10 +5,10 @@ # msgid "" msgstr "" -"Project-Id-Version: plperl (PostgreSQL) 9.6\n" +"Project-Id-Version: plperl (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2016-09-26 14:02+0900\n" -"PO-Revision-Date: 2016-09-26 18:54+0900\n" +"POT-Creation-Date: 2017-08-16 10:59+0900\n" +"PO-Revision-Date: 2017-08-16 17:47+0900\n" "Last-Translator: Ioseph Kim \n" "Language-Team: Korean Team \n" "Language: ko\n" @@ -16,200 +16,200 @@ msgstr "" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -#: plperl.c:405 +#: plperl.c:407 msgid "" "If true, trusted and untrusted Perl code will be compiled in strict mode." msgstr "true로 지정하면, Perl 코드가 엄격한 구문 검사로 컴파일 됨" -#: plperl.c:419 +#: plperl.c:421 msgid "" "Perl initialization code to execute when a Perl interpreter is initialized." msgstr "Perl 인터프리터가 초기화 될 때 실행할 Perl 초기화 코드" -#: plperl.c:441 +#: plperl.c:443 msgid "Perl initialization code to execute once when plperl is first used." msgstr "plperl 모듈이 처음 사용될 때 실행할 Perl 초기화 코드" -#: plperl.c:449 +#: plperl.c:451 msgid "Perl initialization code to execute once when plperlu is first used." msgstr "plperlu 모듈이 처음 사용될 때 실행할 Perl 초기화 코드" -#: plperl.c:646 +#: plperl.c:648 #, c-format msgid "cannot allocate multiple Perl interpreters on this platform" msgstr "이 플랫폼에 여러 Perl 인터프리터를 사용할 수 없음" -#: plperl.c:666 plperl.c:841 plperl.c:847 plperl.c:961 plperl.c:973 -#: plperl.c:1016 plperl.c:1037 plperl.c:2080 plperl.c:2189 plperl.c:2256 -#: plperl.c:2318 +#: plperl.c:671 plperl.c:855 plperl.c:861 plperl.c:978 plperl.c:990 +#: plperl.c:1033 plperl.c:1056 plperl.c:2120 plperl.c:2230 plperl.c:2298 +#: plperl.c:2361 #, c-format msgid "%s" msgstr "%s" -#: plperl.c:667 +#: plperl.c:672 #, c-format msgid "while executing PostgreSQL::InServer::SPI::bootstrap" msgstr "PostgreSQL::InServer::SPI::bootstrap 실행 중" -#: plperl.c:842 +#: plperl.c:856 #, c-format msgid "while parsing Perl initialization" msgstr "Perl 초기화 구문 분석 중" -#: plperl.c:848 +#: plperl.c:862 #, c-format msgid "while running Perl initialization" msgstr "Perl 초기화 실행 중" -#: plperl.c:962 +#: plperl.c:979 #, c-format msgid "while executing PLC_TRUSTED" msgstr "PLC_TRUSTED 실행 중" -#: plperl.c:974 +#: plperl.c:991 #, c-format msgid "while executing utf8fix" msgstr "utf8fix 실행 중" -#: plperl.c:1017 +#: plperl.c:1034 #, c-format msgid "while executing plperl.on_plperl_init" msgstr "plperl.on_plperl_init 실행 중" -#: plperl.c:1038 +#: plperl.c:1057 #, c-format msgid "while executing plperl.on_plperlu_init" msgstr "plperl.on_plperlu_init 실행 중" -#: plperl.c:1082 plperl.c:1722 +#: plperl.c:1102 plperl.c:1764 #, c-format msgid "Perl hash contains nonexistent column \"%s\"" msgstr "Perl 해시에 존재하지 않는 \"%s\" 칼럼이 포함되었습니다" -#: plperl.c:1167 +#: plperl.c:1107 plperl.c:1769 +#, c-format +msgid "cannot set system attribute \"%s\"" +msgstr "\"%s\" 시스템 속성을 지정할 수 없음" + +#: plperl.c:1195 #, c-format msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" msgstr "지정한 배열 크기(%d)가 최대치(%d)를 초과했습니다" -#: plperl.c:1179 plperl.c:1196 +#: plperl.c:1207 plperl.c:1224 #, c-format msgid "" "multidimensional arrays must have array expressions with matching dimensions" msgstr "다차원 배열에는 일치하는 차원이 포함된 배열 식이 있어야 함" -#: plperl.c:1231 +#: plperl.c:1260 #, c-format msgid "cannot convert Perl array to non-array type %s" msgstr "Perl 배열형을 비배열형 %s 자료형으로 변환할 수 없음" -#: plperl.c:1333 +#: plperl.c:1362 #, c-format msgid "cannot convert Perl hash to non-composite type %s" msgstr "Perl 해시 자료형을 비복합 %s 자료형으로 변환할 수 없음" -#: plperl.c:1344 +#: plperl.c:1373 #, c-format msgid "" "function returning record called in context that cannot accept type record" msgstr "반환 자료형이 record인데 함수가 그 자료형으로 반환하지 않음" -#: plperl.c:1359 +#: plperl.c:1388 #, c-format msgid "PL/Perl function must return reference to hash or array" msgstr "PL/Perl 함수는 해시나 배열 자료형을 참조하게 반환해야 함" -#: plperl.c:1396 +#: plperl.c:1425 #, c-format msgid "lookup failed for type %s" msgstr "%s 자료형 찾기 실패" -#: plperl.c:1699 +#: plperl.c:1740 #, c-format msgid "$_TD->{new} does not exist" msgstr "$_TD->{new} 없음" -#: plperl.c:1703 +#: plperl.c:1744 #, c-format msgid "$_TD->{new} is not a hash reference" msgstr "$_TD->{new} 자료형이 해시 참조가 아님" -#: plperl.c:1956 plperl.c:2790 +#: plperl.c:1995 plperl.c:2833 #, c-format msgid "PL/Perl functions cannot return type %s" msgstr "PL/Perl 함수는 %s 자료형을 반환할 수 없음" -#: plperl.c:1969 plperl.c:2835 +#: plperl.c:2008 plperl.c:2875 #, c-format msgid "PL/Perl functions cannot accept type %s" msgstr "PL/Perl 함수는 %s 자료형을 사용할 수 없음" -#: plperl.c:2085 +#: plperl.c:2125 #, c-format msgid "didn't get a CODE reference from compiling function \"%s\"" msgstr "\"%s\" 함수를 컴파일 하면서 코드 참조를 구할 수 없음" -#: plperl.c:2177 +#: plperl.c:2218 #, c-format msgid "didn't get a return item from function" msgstr "함수에서 반환할 항목을 못 찾음" -#: plperl.c:2220 plperl.c:2286 +#: plperl.c:2262 plperl.c:2329 #, c-format msgid "couldn't fetch $_TD" msgstr "$_TD 못 구함" -#: plperl.c:2244 plperl.c:2306 +#: plperl.c:2286 plperl.c:2349 #, c-format msgid "didn't get a return item from trigger function" msgstr "트리거 함수에서 반환할 항목을 못 찾음" -#: plperl.c:2363 +#: plperl.c:2406 #, c-format msgid "set-valued function called in context that cannot accept a set" msgstr "" "set-values 함수(테이블 리턴 함수)가 set 정의 없이 사용되었습니다 (테이블과 해" "당 열 alias 지정하세요)" -#: plperl.c:2407 +#: plperl.c:2451 #, c-format msgid "" "set-returning PL/Perl function must return reference to array or use " "return_next" msgstr "집합 반환 PL/Perl 함수는 배열 또는 return_next 를 사용해서 반환해야 함" -#: plperl.c:2521 +#: plperl.c:2572 #, c-format msgid "ignoring modified row in DELETE trigger" msgstr "DELETE 트리거에서는 변경된 로우는 무시 함" -#: plperl.c:2529 +#: plperl.c:2580 #, c-format msgid "" "result of PL/Perl trigger function must be undef, \"SKIP\", or \"MODIFY\"" msgstr "" "PL/Perl 트리거 함수의 결과는 undef, \"SKIP\", \"MODIFY\" 중 하나여야 함" -#: plperl.c:2708 plperl.c:2718 -#, c-format -msgid "out of memory" -msgstr "메모리 부족" - -#: plperl.c:2782 +#: plperl.c:2828 #, c-format msgid "trigger functions can only be called as triggers" msgstr "트리거 함수는 트리거로만 호출될 수 있음" -#: plperl.c:3121 +#: plperl.c:3170 #, c-format msgid "query result has too many rows to fit in a Perl array" msgstr "쿼리 결과가 Perl 배열에 담기에는 너무 많습니다" -#: plperl.c:3166 +#: plperl.c:3240 #, c-format msgid "cannot use return_next in a non-SETOF function" msgstr "SETOF 함수가 아닌 경우에는 return_next 구문을 쓸 수 없음" -#: plperl.c:3220 +#: plperl.c:3300 #, c-format msgid "" "SETOF-composite-returning PL/Perl function must call return_next with " @@ -218,17 +218,17 @@ msgstr "" "SETOF-composite-returning PL/Perl 함수는 return_next 에서 해시 자료형을 참조" "해야 함" -#: plperl.c:3948 +#: plperl.c:4009 #, c-format msgid "PL/Perl function \"%s\"" msgstr "\"%s\" PL/Perl 함수" -#: plperl.c:3960 +#: plperl.c:4021 #, c-format msgid "compilation of PL/Perl function \"%s\"" msgstr "\"%s\" PL/Perl 함수 컴필레이션" -#: plperl.c:3969 +#: plperl.c:4030 #, c-format msgid "PL/Perl anonymous code block" msgstr "PL/Perl 익명 코드 블럭" diff --git a/src/pl/plperl/po/ru.po b/src/pl/plperl/po/ru.po index ba7e6b7a7d..243767b728 100644 --- a/src/pl/plperl/po/ru.po +++ b/src/pl/plperl/po/ru.po @@ -2,13 +2,13 @@ # Copyright (C) 2012-2016 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. # Alexander Lakhin , 2012-2017. -# msgid "" msgstr "" "Project-Id-Version: plperl (PostgreSQL current)\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-03-27 12:37+0000\n" +"POT-Creation-Date: 2017-08-17 23:07+0000\n" "PO-Revision-Date: 2017-03-29 13:41+0300\n" +"Last-Translator: Alexander Lakhin \n" "Language-Team: Russian \n" "Language: ru\n" "MIME-Version: 1.0\n" @@ -16,97 +16,96 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"Last-Translator: Alexander Lakhin \n" -#: plperl.c:390 +#: plperl.c:407 msgid "" "If true, trusted and untrusted Perl code will be compiled in strict mode." msgstr "" "Если этот параметр равен true, доверенный и недоверенный код Perl будет " "компилироваться в строгом режиме." -#: plperl.c:404 +#: plperl.c:421 msgid "" "Perl initialization code to execute when a Perl interpreter is initialized." msgstr "" "Код инициализации Perl, который выполняется при инициализации интерпретатора " "Perl." -#: plperl.c:426 +#: plperl.c:443 msgid "Perl initialization code to execute once when plperl is first used." msgstr "" "Код инициализации Perl, который выполняется один раз, при первом " "использовании plperl." -#: plperl.c:434 +#: plperl.c:451 msgid "Perl initialization code to execute once when plperlu is first used." msgstr "" "Код инициализации Perl, который выполняется один раз, при первом " "использовании plperlu." -#: plperl.c:631 +#: plperl.c:648 #, c-format msgid "cannot allocate multiple Perl interpreters on this platform" msgstr "на этой платформе нельзя запустить множество интерпретаторов Perl" -#: plperl.c:651 plperl.c:826 plperl.c:832 plperl.c:946 plperl.c:958 -#: plperl.c:1001 plperl.c:1022 plperl.c:2074 plperl.c:2183 plperl.c:2250 -#: plperl.c:2312 +#: plperl.c:671 plperl.c:855 plperl.c:861 plperl.c:978 plperl.c:990 +#: plperl.c:1033 plperl.c:1056 plperl.c:2120 plperl.c:2230 plperl.c:2298 +#: plperl.c:2361 #, c-format msgid "%s" msgstr "%s" -#: plperl.c:652 +#: plperl.c:672 #, c-format msgid "while executing PostgreSQL::InServer::SPI::bootstrap" msgstr "при выполнении PostgreSQL::InServer::SPI::bootstrap" -#: plperl.c:827 +#: plperl.c:856 #, c-format msgid "while parsing Perl initialization" msgstr "при разборе параметров инициализации Perl" -#: plperl.c:833 +#: plperl.c:862 #, c-format msgid "while running Perl initialization" msgstr "при выполнении инициализации Perl" -#: plperl.c:947 +#: plperl.c:979 #, c-format msgid "while executing PLC_TRUSTED" msgstr "при выполнении PLC_TRUSTED" -#: plperl.c:959 +#: plperl.c:991 #, c-format msgid "while executing utf8fix" msgstr "при выполнении utf8fix" -#: plperl.c:1002 +#: plperl.c:1034 #, c-format msgid "while executing plperl.on_plperl_init" msgstr "при выполнении plperl.on_plperl_init" -#: plperl.c:1023 +#: plperl.c:1057 #, c-format msgid "while executing plperl.on_plperlu_init" msgstr "при выполнении plperl.on_plperlu_init" -#: plperl.c:1067 plperl.c:1719 +#: plperl.c:1102 plperl.c:1764 #, c-format msgid "Perl hash contains nonexistent column \"%s\"" msgstr "Perl-хеш содержит несуществующий столбец \"%s\"" -#: plperl.c:1072 plperl.c:1724 +#: plperl.c:1107 plperl.c:1769 #, c-format msgid "cannot set system attribute \"%s\"" msgstr "установить системный атрибут \"%s\" нельзя" -#: plperl.c:1157 +#: plperl.c:1195 #, c-format msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" msgstr "число размерностей массива (%d) превышает предел (%d)" -#: plperl.c:1169 plperl.c:1186 +#: plperl.c:1207 plperl.c:1224 #, c-format msgid "" "multidimensional arrays must have array expressions with matching dimensions" @@ -114,80 +113,80 @@ msgstr "" "для многомерных массивов должны задаваться выражения с соответствующими " "размерностями" -#: plperl.c:1221 +#: plperl.c:1260 #, c-format msgid "cannot convert Perl array to non-array type %s" msgstr "Perl-массив нельзя преобразовать в тип не массива %s" -#: plperl.c:1323 +#: plperl.c:1362 #, c-format msgid "cannot convert Perl hash to non-composite type %s" msgstr "Perl-хеш нельзя преобразовать в не составной тип %s" -#: plperl.c:1334 +#: plperl.c:1373 #, c-format msgid "" "function returning record called in context that cannot accept type record" msgstr "" "функция, возвращающая запись, вызвана в контексте, не допускающем этот тип" -#: plperl.c:1349 +#: plperl.c:1388 #, c-format msgid "PL/Perl function must return reference to hash or array" msgstr "функция PL/Perl должна возвращать ссылку на хеш или массив" -#: plperl.c:1386 +#: plperl.c:1425 #, c-format msgid "lookup failed for type %s" msgstr "найти тип %s не удалось" -#: plperl.c:1695 +#: plperl.c:1740 #, c-format msgid "$_TD->{new} does not exist" msgstr "$_TD->{new} не существует" -#: plperl.c:1699 +#: plperl.c:1744 #, c-format msgid "$_TD->{new} is not a hash reference" msgstr "$_TD->{new} - не ссылка на хеш" -#: plperl.c:1950 plperl.c:2778 +#: plperl.c:1995 plperl.c:2833 #, c-format msgid "PL/Perl functions cannot return type %s" msgstr "функции PL/Perl не могут возвращать тип %s" -#: plperl.c:1963 plperl.c:2820 +#: plperl.c:2008 plperl.c:2875 #, c-format msgid "PL/Perl functions cannot accept type %s" msgstr "функции PL/Perl не могут принимать тип %s" -#: plperl.c:2079 +#: plperl.c:2125 #, c-format msgid "didn't get a CODE reference from compiling function \"%s\"" msgstr "не удалось получить ссылку на код после компиляции функции \"%s\"" -#: plperl.c:2171 +#: plperl.c:2218 #, c-format msgid "didn't get a return item from function" msgstr "не удалось получить возвращаемый элемент от функции" -#: plperl.c:2214 plperl.c:2280 +#: plperl.c:2262 plperl.c:2329 #, c-format msgid "couldn't fetch $_TD" msgstr "не удалось получить $_TD" -#: plperl.c:2238 plperl.c:2300 +#: plperl.c:2286 plperl.c:2349 #, c-format msgid "didn't get a return item from trigger function" msgstr "не удалось получить возвращаемый элемент от триггерной функции" -#: plperl.c:2357 +#: plperl.c:2406 #, c-format msgid "set-valued function called in context that cannot accept a set" msgstr "" "функция, возвращающая множество, вызвана в контексте, где ему нет места" -#: plperl.c:2401 +#: plperl.c:2451 #, c-format msgid "" "set-returning PL/Perl function must return reference to array or use " @@ -196,12 +195,12 @@ msgstr "" "функция PL/Perl, возвращающая множество, должна возвращать ссылку на массив " "или вызывать return_next" -#: plperl.c:2515 +#: plperl.c:2572 #, c-format msgid "ignoring modified row in DELETE trigger" msgstr "в триггере DELETE изменённая строка игнорируется" -#: plperl.c:2523 +#: plperl.c:2580 #, c-format msgid "" "result of PL/Perl trigger function must be undef, \"SKIP\", or \"MODIFY\"" @@ -209,24 +208,24 @@ msgstr "" "результатом триггерной функции PL/Perl должен быть undef, \"SKIP\" или " "\"MODIFY\"" -#: plperl.c:2773 +#: plperl.c:2828 #, c-format msgid "trigger functions can only be called as triggers" msgstr "триггерные функции могут вызываться только в триггерах" -#: plperl.c:3113 +#: plperl.c:3170 #, c-format msgid "query result has too many rows to fit in a Perl array" msgstr "" "результат запроса содержит слишком много строк для передачи в массиве Perl" -#: plperl.c:3158 +#: plperl.c:3240 #, c-format msgid "cannot use return_next in a non-SETOF function" msgstr "" "return_next можно использовать только в функциях, возвращающих множества" -#: plperl.c:3212 +#: plperl.c:3300 #, c-format msgid "" "SETOF-composite-returning PL/Perl function must call return_next with " @@ -235,17 +234,17 @@ msgstr "" "функция PL/Perl, возвращающая составное множество, должна вызывать " "return_next со ссылкой на хеш" -#: plperl.c:3875 +#: plperl.c:4009 #, c-format msgid "PL/Perl function \"%s\"" msgstr "функция PL/Perl \"%s\"" -#: plperl.c:3887 +#: plperl.c:4021 #, c-format msgid "compilation of PL/Perl function \"%s\"" msgstr "компиляция функции PL/Perl \"%s\"" -#: plperl.c:3896 +#: plperl.c:4030 #, c-format msgid "PL/Perl anonymous code block" msgstr "анонимный блок кода PL/Perl" diff --git a/src/pl/plperl/po/tr.po b/src/pl/plperl/po/tr.po index 8c46740be9..9a831e9956 100644 --- a/src/pl/plperl/po/tr.po +++ b/src/pl/plperl/po/tr.po @@ -7,198 +7,227 @@ msgid "" msgstr "" "Project-Id-Version: PostgreSQL 8.4\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2011-08-29 20:38+0000\n" -"PO-Revision-Date: 2011-08-30 01:44+0200\n" +"POT-Creation-Date: 2018-02-22 00:08+0000\n" +"PO-Revision-Date: 2018-02-22 11:49+0300\n" "Last-Translator: Devrim GÜNDÜZ \n" "Language-Team: Turkish \n" "Language: tr\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Poedit-Language: Turkish\n" -"X-Poedit-Country: Turkey\n" +"X-Generator: Poedit 1.8.7.1\n" -#: plperl.c:362 +#: plperl.c:407 msgid "If true, trusted and untrusted Perl code will be compiled in strict mode." msgstr "Doğru ise, trusted ve untrusted Perl kodları strict modda derlenecektir" -#: plperl.c:376 +#: plperl.c:421 msgid "Perl initialization code to execute when a Perl interpreter is initialized." msgstr "Perl yorumlayıcısı ilklendirildiğinde çalışacak Perl ilklendirme kodu." -#: plperl.c:398 +#: plperl.c:443 msgid "Perl initialization code to execute once when plperl is first used." msgstr "plperl ilk kez kullanıldığında çalışacak Perl ilklendirme kodu" -#: plperl.c:406 +#: plperl.c:451 msgid "Perl initialization code to execute once when plperlu is first used." msgstr "plperlu ilk kez kullanıldığında çalışacak Perl ilklendirme kodu" -#: plperl.c:623 -#: plperl.c:785 -#: plperl.c:790 -#: plperl.c:894 -#: plperl.c:905 -#: plperl.c:946 -#: plperl.c:967 -#: plperl.c:1868 -#: plperl.c:1963 -#: plperl.c:2025 +#: plperl.c:648 +#, c-format +msgid "cannot allocate multiple Perl interpreters on this platform" +msgstr "bu platformda birden fazla Perl interpreter ayrılamıyor" + +#: plperl.c:671 plperl.c:855 plperl.c:861 plperl.c:978 plperl.c:990 +#: plperl.c:1033 plperl.c:1056 plperl.c:2120 plperl.c:2230 plperl.c:2298 +#: plperl.c:2361 #, c-format msgid "%s" msgstr "%s" -#: plperl.c:624 +#: plperl.c:672 #, c-format msgid "while executing PostgreSQL::InServer::SPI::bootstrap" msgstr "PostgreSQL::InServer::SPI::bootstrap çalıştırılırken" -#: plperl.c:786 +#: plperl.c:856 #, c-format msgid "while parsing Perl initialization" msgstr "Perl ilklendirmesi ayrıştırılırken" -#: plperl.c:791 +#: plperl.c:862 #, c-format msgid "while running Perl initialization" msgstr "Perl ilklendirmesi sırasında" -#: plperl.c:895 +#: plperl.c:979 #, c-format msgid "while executing PLC_TRUSTED" msgstr " PLC_TRUSTED çalıştırılırken" -#: plperl.c:906 +#: plperl.c:991 #, c-format msgid "while executing utf8fix" msgstr "utf8fix çalıştırılırken" -#: plperl.c:947 +#: plperl.c:1034 #, c-format msgid "while executing plperl.on_plperl_init" msgstr "plperl.on_plperl_init çalıştırılırken" -#: plperl.c:968 +#: plperl.c:1057 #, c-format msgid "while executing plperl.on_plperlu_init" msgstr "plperl.on_plperlu_init çalıştırılırken" -#: plperl.c:1014 -#: plperl.c:1541 +#: plperl.c:1102 plperl.c:1764 #, c-format msgid "Perl hash contains nonexistent column \"%s\"" msgstr "Perl hashi olmayan kolonu içeriyor: \"%s\"" -#: plperl.c:1097 +#: plperl.c:1107 plperl.c:1769 +#, c-format +msgid "cannot set system attribute \"%s\"" +msgstr "\"%s\" sistem niteliği ayarlanamıyor" + +#: plperl.c:1195 #, c-format msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" msgstr "dizin boyut sayısı (%d), izin verilern en yüksek değerini (%d) aşmaktadır" -#: plperl.c:1111 +#: plperl.c:1207 plperl.c:1224 #, c-format msgid "multidimensional arrays must have array expressions with matching dimensions" msgstr "çok boyutlu dizinler boyut sayısı kadar dizin ifade sayısına sahip olmalıdırlar" -#: plperl.c:1219 +#: plperl.c:1260 +#, c-format +msgid "cannot convert Perl array to non-array type %s" +msgstr "Perl dizisi (array) dizi olmayan %s tipine dönüştürülemiyor" + +#: plperl.c:1362 +#, c-format +msgid "cannot convert Perl hash to non-composite type %s" +msgstr "Perl hash'i kompozit olmayan %s tipine dönüştürülemez" + +#: plperl.c:1373 +#, c-format +msgid "function returning record called in context that cannot accept type record" +msgstr "tip kaydı içermeyen alanda çağırılan ve kayıt döndüren fonksiyon" + +#: plperl.c:1388 #, c-format msgid "PL/Perl function must return reference to hash or array" msgstr "PL/Perl fonksiyonu hash ya da dizine referans dönmelidir" -#: plperl.c:1518 +#: plperl.c:1425 +#, c-format +msgid "lookup failed for type %s" +msgstr "%s tipi için arama (lookup) başarısız oldu" + +#: plperl.c:1740 #, c-format msgid "$_TD->{new} does not exist" msgstr "$_TD->{new} mevcut değil" -#: plperl.c:1522 +#: plperl.c:1744 #, c-format msgid "$_TD->{new} is not a hash reference" msgstr "$_TD->{new} hash referansı değil" -#: plperl.c:1745 -#: plperl.c:2476 +#: plperl.c:1995 plperl.c:2833 #, c-format msgid "PL/Perl functions cannot return type %s" msgstr "PL/Perl fonksiyonları %s veri tipini döndüremezler" -#: plperl.c:1758 -#: plperl.c:2523 +#: plperl.c:2008 plperl.c:2875 #, c-format msgid "PL/Perl functions cannot accept type %s" msgstr "PL/Perl fonksiyonları %s tipini kabul etmez" -#: plperl.c:1872 +#: plperl.c:2125 #, c-format msgid "didn't get a CODE reference from compiling function \"%s\"" msgstr "\"%s\" fonksiyonu derlenirken CODE referansı alınamadı" -#: plperl.c:2077 +#: plperl.c:2218 #, c-format -msgid "set-valued function called in context that cannot accept a set" -msgstr "set değerini kabul etmediği ortamda set değeri alan fonksiyon çağırılmış" +msgid "didn't get a return item from function" +msgstr "fonksiyonden dönüş (return) değeri alınamadı" -#: plperl.c:2121 +#: plperl.c:2262 plperl.c:2329 #, c-format -msgid "set-returning PL/Perl function must return reference to array or use return_next" -msgstr "se dönen PL/Perl fonksiyonu return_next kullanmalı ya da bir diziye referans dönmelidir" +msgid "couldn't fetch $_TD" +msgstr "$_TD getirilemedi" -#: plperl.c:2150 +#: plperl.c:2286 plperl.c:2349 #, c-format -msgid "composite-returning PL/Perl function must return reference to hash" -msgstr "composite döndüren PL/Perl fonksiyonu hash'e referans dönmelidir" +msgid "didn't get a return item from trigger function" +msgstr "trigger fonksiyonundan dönüş (return) değeri alınamadı" -#: plperl.c:2159 +#: plperl.c:2406 #, c-format -msgid "function returning record called in context that cannot accept type record" -msgstr "tip kaydı içermeyen alanda çağırılan ve kayıt döndüren fonksiyon" +msgid "set-valued function called in context that cannot accept a set" +msgstr "set değerini kabul etmediği ortamda set değeri alan fonksiyon çağırılmış" -#: plperl.c:2273 +#: plperl.c:2451 +#, c-format +msgid "set-returning PL/Perl function must return reference to array or use return_next" +msgstr "se dönen PL/Perl fonksiyonu return_next kullanmalı ya da bir diziye referans dönmelidir" + +#: plperl.c:2572 #, c-format msgid "ignoring modified row in DELETE trigger" msgstr "DELETE triggerındaki değiştirilmiş satır gözardı ediliyor" -#: plperl.c:2281 +#: plperl.c:2580 #, c-format msgid "result of PL/Perl trigger function must be undef, \"SKIP\", or \"MODIFY\"" msgstr "PL/Perl trigger fonksiyonun sonucu undef, \"SKIP\" ya da \"MODIFY\" olmalıdır" -#: plperl.c:2407 -#: plperl.c:2413 -#, c-format -msgid "out of memory" -msgstr "yetersiz bellek" - -#: plperl.c:2467 +#: plperl.c:2828 #, c-format msgid "trigger functions can only be called as triggers" msgstr "trigger fonksiyonları sadece trigger olarak çağırılabilirler" -#: plperl.c:2843 +#: plperl.c:3170 +#, c-format +msgid "query result has too many rows to fit in a Perl array" +msgstr "sorgu sonucunda bir Perl dizisine (array) sığabilecekten çok fazla satır var" + +#: plperl.c:3240 #, c-format msgid "cannot use return_next in a non-SETOF function" msgstr "SETOF olmayan bir fonksiyonda return_next kullanılamaz" -#: plperl.c:2849 +#: plperl.c:3300 #, c-format msgid "SETOF-composite-returning PL/Perl function must call return_next with reference to hash" msgstr "SETOF-composite döndüren PL/Perl fonksiyonları return_next'i hash'e referans olarak çağırmalıdır" -#: plperl.c:3615 +#: plperl.c:4009 #, c-format msgid "PL/Perl function \"%s\"" msgstr "\"%s\" PL/Perl fonksiyonu" -#: plperl.c:3627 +#: plperl.c:4021 #, c-format msgid "compilation of PL/Perl function \"%s\"" msgstr "\"%s\" PL/Perl fonksiyonunun derlenmesi" -#: plperl.c:3636 +#: plperl.c:4030 #, c-format msgid "PL/Perl anonymous code block" msgstr "PL/Perl anonim kod bloğu" +#~ msgid "error from Perl function \"%s\": %s" +#~ msgstr "Perl fonksiyonunda hata: \"%s\": %s" + #~ msgid "creation of Perl function \"%s\" failed: %s" #~ msgstr " \"%s\" Perl fonksiyonunun yaratılması başarısız oldu: %s" -#~ msgid "error from Perl function \"%s\": %s" -#~ msgstr "Perl fonksiyonunda hata: \"%s\": %s" +#~ msgid "out of memory" +#~ msgstr "yetersiz bellek" + +#~ msgid "composite-returning PL/Perl function must return reference to hash" +#~ msgstr "composite döndüren PL/Perl fonksiyonu hash'e referans dönmelidir" diff --git a/src/pl/plperl/po/vi.po b/src/pl/plperl/po/vi.po new file mode 100644 index 0000000000..15ba87477c --- /dev/null +++ b/src/pl/plperl/po/vi.po @@ -0,0 +1,242 @@ +# LANGUAGE message translation file for plperl +# Copyright (C) 2018 PostgreSQL Global Development Group +# This file is distributed under the same license as the plperl (PostgreSQL) package. +# FIRST AUTHOR , 2018. +# +msgid "" +msgstr "" +"Project-Id-Version: plperl (PostgreSQL) 11\n" +"Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" +"POT-Creation-Date: 2018-04-22 12:08+0000\n" +"PO-Revision-Date: 2018-04-29 23:57+0900\n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 2.0.6\n" +"Last-Translator: Dang Minh Huong \n" +"Plural-Forms: nplurals=1; plural=0;\n" +"Language: vi_VN\n" + +#: plperl.c:409 +msgid "" +"If true, trusted and untrusted Perl code will be compiled in strict mode." +msgstr "" +"Nếu đúng, mã perl đáng tin cậy(PL/Perl ) và không đáng tin cậy(PL/PerlU) sẽ " +"được biên dịch trong chế độ strict." + +#: plperl.c:423 +msgid "" +"Perl initialization code to execute when a Perl interpreter is initialized." +msgstr "Mã Perl được thực thi khi trình thông dịch Perl được khởi tạo." + +#: plperl.c:445 +msgid "Perl initialization code to execute once when plperl is first used." +msgstr "Mã Perl được thực thi khi plperl được sử dụng lần đầu tiên." + +#: plperl.c:453 +msgid "Perl initialization code to execute once when plperlu is first used." +msgstr "Mã Perl được thực thi khi plperlu được sử dụng lần đầu tiên." + +#: plperl.c:650 +#, c-format +msgid "cannot allocate multiple Perl interpreters on this platform" +msgstr "không thể cấp phát nhiều trình thông dịch Perl trên hệ điều hành này" + +#: plperl.c:673 plperl.c:857 plperl.c:863 plperl.c:980 plperl.c:992 +#: plperl.c:1035 plperl.c:1058 plperl.c:2141 plperl.c:2251 plperl.c:2319 +#: plperl.c:2382 +#, c-format +msgid "%s" +msgstr "%s" + +#: plperl.c:674 +#, c-format +msgid "while executing PostgreSQL::InServer::SPI::bootstrap" +msgstr "trong khi thực thi PostgreSQL::InServer::SPI::bootstrap" + +#: plperl.c:858 +#, c-format +msgid "while parsing Perl initialization" +msgstr "trong khi phân tích cú pháp khởi tạo Perl" + +#: plperl.c:864 +#, c-format +msgid "while running Perl initialization" +msgstr "trong khi chạy cú pháp khởi tạo Perl" + +#: plperl.c:981 +#, c-format +msgid "while executing PLC_TRUSTED" +msgstr "trong khi chạy PLC_TRUSTED" + +#: plperl.c:993 +#, c-format +msgid "while executing utf8fix" +msgstr "trong khi thực thi utf8fix" + +#: plperl.c:1036 +#, c-format +msgid "while executing plperl.on_plperl_init" +msgstr "trong khi thực thi plperl.on_plperl_init" + +#: plperl.c:1059 +#, c-format +msgid "while executing plperl.on_plperlu_init" +msgstr "trong khi thực thi plperl.plperlu_init" + +#: plperl.c:1105 plperl.c:1785 +#, c-format +msgid "Perl hash contains nonexistent column \"%s\"" +msgstr "Giá trị băm Perl chứa cột không tồn tại \"%s\"" + +#: plperl.c:1110 plperl.c:1790 +#, c-format +msgid "cannot set system attribute \"%s\"" +msgstr "không thể thiết lập attribute hệ thống \"%s\"" + +#: plperl.c:1198 +#, c-format +msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" +msgstr "số lượng chiều của mảng (%d) vượt quá số lượng tối đa cho phép (%d)" + +#: plperl.c:1210 plperl.c:1227 +#, c-format +msgid "" +"multidimensional arrays must have array expressions with matching dimensions" +msgstr "mảng đa chiều phải có biểu thức mảng tương ứng với các chiều" + +#: plperl.c:1263 +#, c-format +msgid "cannot convert Perl array to non-array type %s" +msgstr "không thể chuyển đổi mảng Perl thành kiểu không phải mảng %s" + +#: plperl.c:1366 +#, c-format +msgid "cannot convert Perl hash to non-composite type %s" +msgstr "" +"không thể chuyển đổi giá trị băm Perl thành kiểu không phải-composite %s" + +#: plperl.c:1388 plperl.c:3286 +#, c-format +msgid "" +"function returning record called in context that cannot accept type record" +msgstr "" +"hàm trả về bản ghi được gọi trong ngữ cảnh không thể chấp nhận kiểu bản ghi" + +#: plperl.c:1408 +#, c-format +msgid "PL/Perl function must return reference to hash or array" +msgstr "Hàm PL/Perl phải trả về tham thiếu tới giá trị băm hoặc mảng" + +#: plperl.c:1445 +#, c-format +msgid "lookup failed for type %s" +msgstr "không tìm thấy kiểu dữ liệu %s" + +#: plperl.c:1760 +#, c-format +msgid "$_TD->{new} does not exist" +msgstr "$_TD->{new} không tồn tại" + +#: plperl.c:1764 +#, c-format +msgid "$_TD->{new} is not a hash reference" +msgstr "$_TD->{new} không phải là một tham chiếu giá trị băm" + +#: plperl.c:2016 plperl.c:2858 +#, c-format +msgid "PL/Perl functions cannot return type %s" +msgstr "Hàm PL/Perl không thể trả về kiểu %s" + +#: plperl.c:2029 plperl.c:2899 +#, c-format +msgid "PL/Perl functions cannot accept type %s" +msgstr "Hàm PL/Perl không thể chấp nhận kiểu %s" + +#: plperl.c:2146 +#, c-format +msgid "didn't get a CODE reference from compiling function \"%s\"" +msgstr "không nhận được tham chiếu CODE từ hàm biên dịch \"%s\"" + +#: plperl.c:2239 +#, c-format +msgid "didn't get a return item from function" +msgstr "không nhận được một mục trả về từ hàm" + +#: plperl.c:2283 plperl.c:2350 +#, c-format +msgid "couldn't fetch $_TD" +msgstr "không thể fetch $_TD" + +#: plperl.c:2307 plperl.c:2370 +#, c-format +msgid "didn't get a return item from trigger function" +msgstr "không nhận được một mục trả về từ hàm trigger" + +#: plperl.c:2431 +#, c-format +msgid "set-valued function called in context that cannot accept a set" +msgstr "" +"hàm thiết lập giá trị được gọi trong ngữ cảnh không thể chấp nhận một tập " +"hợp" + +#: plperl.c:2476 +#, c-format +msgid "" +"set-returning PL/Perl function must return reference to array or use " +"return_next" +msgstr "" +"hàm thiết lập-trả về PL/Perl phải trả về tham chiếu tới mảng hay sử dụng " +"return_next" + +#: plperl.c:2597 +#, c-format +msgid "ignoring modified row in DELETE trigger" +msgstr "bỏ qua hàng đã sửa đổi trong trigger DELETE" + +#: plperl.c:2605 +#, c-format +msgid "" +"result of PL/Perl trigger function must be undef, \"SKIP\", or \"MODIFY\"" +msgstr "" +"kết quả của hàm trigger PL/Perl phải là undef, \"SKIP\" hoặc \"MODIFY\"" + +#: plperl.c:2853 +#, c-format +msgid "trigger functions can only be called as triggers" +msgstr "các hàm trigger chỉ có thể được gọi như những trigger" + +#: plperl.c:3193 +#, c-format +msgid "query result has too many rows to fit in a Perl array" +msgstr "kết quả truy vấn có quá nhiều hàng có thể vừa với một mảng Perl" + +#: plperl.c:3263 +#, c-format +msgid "cannot use return_next in a non-SETOF function" +msgstr "không thể sử dụng return_next trong hàm không phải-SETOF" + +#: plperl.c:3337 +#, c-format +msgid "" +"SETOF-composite-returning PL/Perl function must call return_next with " +"reference to hash" +msgstr "" +"Hàm PL/Perl trả về SETOF-composite phải gọi return_next với tham chiếu tới " +"giá trị băm" + +#: plperl.c:4115 +#, c-format +msgid "PL/Perl function \"%s\"" +msgstr "Hàm PL/Perl \"%s\"" + +#: plperl.c:4127 +#, c-format +msgid "compilation of PL/Perl function \"%s\"" +msgstr "biên dịch hàm PL/Perl \"%s\"" + +#: plperl.c:4136 +#, c-format +msgid "PL/Perl anonymous code block" +msgstr "Khối mã ẩn danh PL/Perl" diff --git a/src/pl/plperl/sql/plperl.sql b/src/pl/plperl/sql/plperl.sql index dc6b169464..b0d950b230 100644 --- a/src/pl/plperl/sql/plperl.sql +++ b/src/pl/plperl/sql/plperl.sql @@ -231,6 +231,38 @@ $$ LANGUAGE plperl; SELECT * FROM foo_set_bad(); +CREATE DOMAIN orderedfootype AS footype CHECK ((VALUE).x <= (VALUE).y); + +CREATE OR REPLACE FUNCTION foo_ordered() RETURNS orderedfootype AS $$ + return {x => 3, y => 4}; +$$ LANGUAGE plperl; + +SELECT * FROM foo_ordered(); + +CREATE OR REPLACE FUNCTION foo_ordered() RETURNS orderedfootype AS $$ + return {x => 5, y => 4}; +$$ LANGUAGE plperl; + +SELECT * FROM foo_ordered(); -- fail + +CREATE OR REPLACE FUNCTION foo_ordered_set() RETURNS SETOF orderedfootype AS $$ +return [ + {x => 3, y => 4}, + {x => 4, y => 7} +]; +$$ LANGUAGE plperl; + +SELECT * FROM foo_ordered_set(); + +CREATE OR REPLACE FUNCTION foo_ordered_set() RETURNS SETOF orderedfootype AS $$ +return [ + {x => 3, y => 4}, + {x => 9, y => 7} +]; +$$ LANGUAGE plperl; + +SELECT * FROM foo_ordered_set(); -- fail + -- -- Check passing a tuple argument -- @@ -243,6 +275,23 @@ SELECT perl_get_field((11,12), 'x'); SELECT perl_get_field((11,12), 'y'); SELECT perl_get_field((11,12), 'z'); +CREATE OR REPLACE FUNCTION perl_get_cfield(orderedfootype, text) RETURNS integer AS $$ + return $_[0]->{$_[1]}; +$$ LANGUAGE plperl; + +SELECT perl_get_cfield((11,12), 'x'); +SELECT perl_get_cfield((11,12), 'y'); +SELECT perl_get_cfield((12,11), 'x'); -- fail + +CREATE OR REPLACE FUNCTION perl_get_rfield(record, text) RETURNS integer AS $$ + return $_[0]->{$_[1]}; +$$ LANGUAGE plperl; + +SELECT perl_get_rfield((11,12), 'f1'); +SELECT perl_get_rfield((11,12)::footype, 'y'); +SELECT perl_get_rfield((11,12)::orderedfootype, 'x'); +SELECT perl_get_rfield((12,11)::orderedfootype, 'x'); -- fail + -- -- Test return_next -- @@ -455,7 +504,7 @@ $$ LANGUAGE plperl; SELECT text_obj(); ------ make sure we can't return a scalar ref +-- test looking through a scalar ref CREATE OR REPLACE FUNCTION text_scalarref() RETURNS text AS $$ my $str = 'str'; return \$str; diff --git a/src/pl/plperl/sql/plperl_call.sql b/src/pl/plperl/sql/plperl_call.sql new file mode 100644 index 0000000000..2cf5461fef --- /dev/null +++ b/src/pl/plperl/sql/plperl_call.sql @@ -0,0 +1,58 @@ +CREATE PROCEDURE test_proc1() +LANGUAGE plperl +AS $$ +undef; +$$; + +CALL test_proc1(); + + +CREATE PROCEDURE test_proc2() +LANGUAGE plperl +AS $$ +return 5 +$$; + +CALL test_proc2(); + + +CREATE TABLE test1 (a int); + +CREATE PROCEDURE test_proc3(x int) +LANGUAGE plperl +AS $$ +spi_exec_query("INSERT INTO test1 VALUES ($_[0])"); +$$; + +CALL test_proc3(55); + +SELECT * FROM test1; + + +-- output arguments + +CREATE PROCEDURE test_proc5(INOUT a text) +LANGUAGE plperl +AS $$ +my ($a) = @_; +return { a => "$a+$a" }; +$$; + +CALL test_proc5('abc'); + + +CREATE PROCEDURE test_proc6(a int, INOUT b int, INOUT c int) +LANGUAGE plperl +AS $$ +my ($a, $b, $c) = @_; +return { b => $b * $a, c => $c * $a }; +$$; + +CALL test_proc6(2, 3, 4); + + +DROP PROCEDURE test_proc1; +DROP PROCEDURE test_proc2; +DROP PROCEDURE test_proc3; + +DROP TABLE test1; diff --git a/src/pl/plperl/sql/plperl_transaction.sql b/src/pl/plperl/sql/plperl_transaction.sql new file mode 100644 index 0000000000..0a60799805 --- /dev/null +++ b/src/pl/plperl/sql/plperl_transaction.sql @@ -0,0 +1,163 @@ +CREATE TABLE test1 (a int, b text); + + +CREATE PROCEDURE transaction_test1() +LANGUAGE plperl +AS $$ +foreach my $i (0..9) { + spi_exec_query("INSERT INTO test1 (a) VALUES ($i)"); + if ($i % 2 == 0) { + spi_commit(); + } else { + spi_rollback(); + } +} +$$; + +CALL transaction_test1(); + +SELECT * FROM test1; + + +TRUNCATE test1; + +DO +LANGUAGE plperl +$$ +foreach my $i (0..9) { + spi_exec_query("INSERT INTO test1 (a) VALUES ($i)"); + if ($i % 2 == 0) { + spi_commit(); + } else { + spi_rollback(); + } +} +$$; + +SELECT * FROM test1; + + +TRUNCATE test1; + +-- not allowed in a function +CREATE FUNCTION transaction_test2() RETURNS int +LANGUAGE plperl +AS $$ +foreach my $i (0..9) { + spi_exec_query("INSERT INTO test1 (a) VALUES ($i)"); + if ($i % 2 == 0) { + spi_commit(); + } else { + spi_rollback(); + } +} +return 1; +$$; + +SELECT transaction_test2(); + +SELECT * FROM test1; + + +-- also not allowed if procedure is called from a function +CREATE FUNCTION transaction_test3() RETURNS int +LANGUAGE plperl +AS $$ +spi_exec_query("CALL transaction_test1()"); +return 1; +$$; + +SELECT transaction_test3(); + +SELECT * FROM test1; + + +-- DO block inside function +CREATE FUNCTION transaction_test4() RETURNS int +LANGUAGE plperl +AS $$ +spi_exec_query('DO LANGUAGE plperl $x$ spi_commit(); $x$'); +return 1; +$$; + +SELECT transaction_test4(); + + +-- commit inside cursor loop +CREATE TABLE test2 (x int); +INSERT INTO test2 VALUES (0), (1), (2), (3), (4); + +TRUNCATE test1; + +DO LANGUAGE plperl $$ +my $sth = spi_query("SELECT * FROM test2 ORDER BY x"); +my $row; +while (defined($row = spi_fetchrow($sth))) { + spi_exec_query("INSERT INTO test1 (a) VALUES (" . $row->{x} . ")"); + spi_commit(); +} +$$; + +SELECT * FROM test1; + +-- check that this doesn't leak a holdable portal +SELECT * FROM pg_cursors; + + +-- error in cursor loop with commit +TRUNCATE test1; + +DO LANGUAGE plperl $$ +my $sth = spi_query("SELECT * FROM test2 ORDER BY x"); +my $row; +while (defined($row = spi_fetchrow($sth))) { + spi_exec_query("INSERT INTO test1 (a) VALUES (12/(" . $row->{x} . "-2))"); + spi_commit(); +} +$$; + +SELECT * FROM test1; + +SELECT * FROM pg_cursors; + + +-- rollback inside cursor loop +TRUNCATE test1; + +DO LANGUAGE plperl $$ +my $sth = spi_query("SELECT * FROM test2 ORDER BY x"); +my $row; +while (defined($row = spi_fetchrow($sth))) { + spi_exec_query("INSERT INTO test1 (a) VALUES (" . $row->{x} . ")"); + spi_rollback(); +} +$$; + +SELECT * FROM test1; + +SELECT * FROM pg_cursors; + + +-- first commit then rollback inside cursor loop +TRUNCATE test1; + +DO LANGUAGE plperl $$ +my $sth = spi_query("SELECT * FROM test2 ORDER BY x"); +my $row; +while (defined($row = spi_fetchrow($sth))) { + spi_exec_query("INSERT INTO test1 (a) VALUES (" . $row->{x} . ")"); + if ($row->{x} % 2 == 0) { + spi_commit(); + } else { + spi_rollback(); + } +} +$$; + +SELECT * FROM test1; + +SELECT * FROM pg_cursors; + + +DROP TABLE test1; +DROP TABLE test2; diff --git a/src/pl/plperl/sql/plperl_util.sql b/src/pl/plperl/sql/plperl_util.sql index 143d047802..5b31605ccd 100644 --- a/src/pl/plperl/sql/plperl_util.sql +++ b/src/pl/plperl/sql/plperl_util.sql @@ -102,11 +102,20 @@ select perl_looks_like_number(); -- test encode_typed_literal create type perl_foo as (a integer, b text[]); create type perl_bar as (c perl_foo[]); +create domain perl_foo_pos as perl_foo check((value).a > 0); + create or replace function perl_encode_typed_literal() returns setof text language plperl as $$ return_next encode_typed_literal(undef, 'text'); return_next encode_typed_literal([[1,2,3],[3,2,1],[1,3,2]], 'integer[]'); return_next encode_typed_literal({a => 1, b => ['PL','/','Perl']}, 'perl_foo'); return_next encode_typed_literal({c => [{a => 9, b => ['PostgreSQL']}, {b => ['Postgres'], a => 1}]}, 'perl_bar'); + return_next encode_typed_literal({a => 1, b => ['PL','/','Perl']}, 'perl_foo_pos'); $$; select perl_encode_typed_literal(); + +create or replace function perl_encode_typed_literal() returns setof text language plperl as $$ + return_next encode_typed_literal({a => 0, b => ['PL','/','Perl']}, 'perl_foo_pos'); +$$; + +select perl_encode_typed_literal(); -- fail diff --git a/src/pl/plperl/text2macro.pl b/src/pl/plperl/text2macro.pl index e681fca21a..52fcbe1be1 100644 --- a/src/pl/plperl/text2macro.pl +++ b/src/pl/plperl/text2macro.pl @@ -40,7 +40,7 @@ =head1 DESCRIPTION print qq{ /* * DO NOT EDIT - THIS FILE IS AUTOGENERATED - CHANGES WILL BE LOST - * Written by $0 from @ARGV + * Generated by src/pl/plperl/text2macro.pl */ }; @@ -99,4 +99,5 @@ sub selftest warn "Test string: $string\n"; warn "Result : $result"; die "Failed!" if $result ne "$string\n"; + return; } diff --git a/src/pl/plpgsql/src/.gitignore b/src/pl/plpgsql/src/.gitignore index 92387fa3cb..ff6ac965fd 100644 --- a/src/pl/plpgsql/src/.gitignore +++ b/src/pl/plpgsql/src/.gitignore @@ -1,3 +1,6 @@ /pl_gram.c /pl_gram.h /plerrcodes.h +/log/ +/results/ +/tmp_check/ diff --git a/src/pl/plpgsql/src/Makefile b/src/pl/plpgsql/src/Makefile index 95348179ac..25a5a9d448 100644 --- a/src/pl/plpgsql/src/Makefile +++ b/src/pl/plpgsql/src/Makefile @@ -24,6 +24,11 @@ OBJS = pl_gram.o pl_handler.o pl_comp.o pl_exec.o \ DATA = plpgsql.control plpgsql--1.0.sql plpgsql--unpackaged--1.0.sql +REGRESS_OPTS = --dbname=$(PL_TESTDB) + +REGRESS = plpgsql_call plpgsql_control plpgsql_domain plpgsql_record \ + plpgsql_cache plpgsql_transaction plpgsql_varprops + all: all-lib # Shared library stuff @@ -58,19 +63,34 @@ uninstall-headers: pl_gram.o pl_handler.o pl_comp.o pl_exec.o pl_funcs.o pl_scanner.o: plpgsql.h pl_gram.h plerrcodes.h # See notes in src/backend/parser/Makefile about the following two rules -pl_gram.h: pl_gram.c ; +pl_gram.h: pl_gram.c + touch $@ + pl_gram.c: BISONFLAGS += -d # generate plerrcodes.h from src/backend/utils/errcodes.txt plerrcodes.h: $(top_srcdir)/src/backend/utils/errcodes.txt generate-plerrcodes.pl $(PERL) $(srcdir)/generate-plerrcodes.pl $< > $@ + +check: submake + $(pg_regress_check) $(REGRESS_OPTS) $(REGRESS) + +installcheck: submake + $(pg_regress_installcheck) $(REGRESS_OPTS) $(REGRESS) + +.PHONY: submake +submake: + $(MAKE) -C $(top_builddir)/src/test/regress pg_regress$(X) + + distprep: pl_gram.h pl_gram.c plerrcodes.h # pl_gram.c, pl_gram.h and plerrcodes.h are in the distribution tarball, # so they are not cleaned here. clean distclean: clean-lib rm -f $(OBJS) + rm -rf $(pg_regress_clean_files) -maintainer-clean: clean +maintainer-clean: distclean rm -f pl_gram.c pl_gram.h plerrcodes.h diff --git a/src/pl/plpgsql/src/expected/plpgsql_cache.out b/src/pl/plpgsql/src/expected/plpgsql_cache.out new file mode 100644 index 0000000000..c2cf013605 --- /dev/null +++ b/src/pl/plpgsql/src/expected/plpgsql_cache.out @@ -0,0 +1,67 @@ +-- +-- Cache-behavior-dependent test cases +-- +-- These tests logically belong in plpgsql_record.sql, and perhaps someday +-- can be merged back into it. For now, however, their results are different +-- between regular and CLOBBER_CACHE_ALWAYS builds, so we must have two +-- expected-output files to cover both cases. To minimize the maintenance +-- effort resulting from that, this file should contain only tests that +-- do have different results under CLOBBER_CACHE_ALWAYS. +-- +-- check behavior with changes of a named rowtype +create table c_mutable(f1 int, f2 text); +create function c_sillyaddone(int) returns int language plpgsql as +$$ declare r c_mutable; begin r.f1 := $1; return r.f1 + 1; end $$; +select c_sillyaddone(42); + c_sillyaddone +--------------- + 43 +(1 row) + +alter table c_mutable drop column f1; +alter table c_mutable add column f1 float8; +-- currently, this fails due to cached plan for "r.f1 + 1" expression +-- (but a CLOBBER_CACHE_ALWAYS build will succeed) +select c_sillyaddone(42); +ERROR: type of parameter 4 (double precision) does not match that when preparing the plan (integer) +CONTEXT: PL/pgSQL function c_sillyaddone(integer) line 1 at RETURN +-- but it's OK if we force plan rebuilding +discard plans; +select c_sillyaddone(42); + c_sillyaddone +--------------- + 43 +(1 row) + +-- check behavior with changes in a record rowtype +create function show_result_type(text) returns text language plpgsql as +$$ + declare + r record; + t text; + begin + execute $1 into r; + select pg_typeof(r.a) into t; + return format('type %s value %s', t, r.a::text); + end; +$$; +select show_result_type('select 1 as a'); + show_result_type +---------------------- + type integer value 1 +(1 row) + +-- currently this fails due to cached plan for pg_typeof expression +-- (but a CLOBBER_CACHE_ALWAYS build will succeed) +select show_result_type('select 2.0 as a'); +ERROR: type of parameter 5 (numeric) does not match that when preparing the plan (integer) +CONTEXT: SQL statement "select pg_typeof(r.a)" +PL/pgSQL function show_result_type(text) line 7 at SQL statement +-- but it's OK if we force plan rebuilding +discard plans; +select show_result_type('select 2.0 as a'); + show_result_type +------------------------ + type numeric value 2.0 +(1 row) + diff --git a/src/pl/plpgsql/src/expected/plpgsql_cache_1.out b/src/pl/plpgsql/src/expected/plpgsql_cache_1.out new file mode 100644 index 0000000000..0ac2c64a15 --- /dev/null +++ b/src/pl/plpgsql/src/expected/plpgsql_cache_1.out @@ -0,0 +1,72 @@ +-- +-- Cache-behavior-dependent test cases +-- +-- These tests logically belong in plpgsql_record.sql, and perhaps someday +-- can be merged back into it. For now, however, their results are different +-- between regular and CLOBBER_CACHE_ALWAYS builds, so we must have two +-- expected-output files to cover both cases. To minimize the maintenance +-- effort resulting from that, this file should contain only tests that +-- do have different results under CLOBBER_CACHE_ALWAYS. +-- +-- check behavior with changes of a named rowtype +create table c_mutable(f1 int, f2 text); +create function c_sillyaddone(int) returns int language plpgsql as +$$ declare r c_mutable; begin r.f1 := $1; return r.f1 + 1; end $$; +select c_sillyaddone(42); + c_sillyaddone +--------------- + 43 +(1 row) + +alter table c_mutable drop column f1; +alter table c_mutable add column f1 float8; +-- currently, this fails due to cached plan for "r.f1 + 1" expression +-- (but a CLOBBER_CACHE_ALWAYS build will succeed) +select c_sillyaddone(42); + c_sillyaddone +--------------- + 43 +(1 row) + +-- but it's OK if we force plan rebuilding +discard plans; +select c_sillyaddone(42); + c_sillyaddone +--------------- + 43 +(1 row) + +-- check behavior with changes in a record rowtype +create function show_result_type(text) returns text language plpgsql as +$$ + declare + r record; + t text; + begin + execute $1 into r; + select pg_typeof(r.a) into t; + return format('type %s value %s', t, r.a::text); + end; +$$; +select show_result_type('select 1 as a'); + show_result_type +---------------------- + type integer value 1 +(1 row) + +-- currently this fails due to cached plan for pg_typeof expression +-- (but a CLOBBER_CACHE_ALWAYS build will succeed) +select show_result_type('select 2.0 as a'); + show_result_type +------------------------ + type numeric value 2.0 +(1 row) + +-- but it's OK if we force plan rebuilding +discard plans; +select show_result_type('select 2.0 as a'); + show_result_type +------------------------ + type numeric value 2.0 +(1 row) + diff --git a/src/pl/plpgsql/src/expected/plpgsql_call.out b/src/pl/plpgsql/src/expected/plpgsql_call.out new file mode 100644 index 0000000000..d9c88e85c8 --- /dev/null +++ b/src/pl/plpgsql/src/expected/plpgsql_call.out @@ -0,0 +1,340 @@ +-- +-- Tests for procedures / CALL syntax +-- +CREATE PROCEDURE test_proc1() +LANGUAGE plpgsql +AS $$ +BEGIN + NULL; +END; +$$; +CALL test_proc1(); +-- error: can't return non-NULL +CREATE PROCEDURE test_proc2() +LANGUAGE plpgsql +AS $$ +BEGIN + RETURN 5; +END; +$$; +ERROR: RETURN cannot have a parameter in a procedure +LINE 5: RETURN 5; + ^ +CREATE TABLE test1 (a int); +CREATE PROCEDURE test_proc3(x int) +LANGUAGE plpgsql +AS $$ +BEGIN + INSERT INTO test1 VALUES (x); +END; +$$; +CALL test_proc3(55); +SELECT * FROM test1; + a +---- + 55 +(1 row) + +-- nested CALL +TRUNCATE TABLE test1; +CREATE PROCEDURE test_proc4(y int) +LANGUAGE plpgsql +AS $$ +BEGIN + CALL test_proc3(y); + CALL test_proc3($1); +END; +$$; +CALL test_proc4(66); +SELECT * FROM test1; + a +---- + 66 + 66 +(2 rows) + +CALL test_proc4(66); +SELECT * FROM test1; + a +---- + 66 + 66 + 66 + 66 +(4 rows) + +-- output arguments +CREATE PROCEDURE test_proc5(INOUT a text) +LANGUAGE plpgsql +AS $$ +BEGIN + a := a || '+' || a; +END; +$$; +CALL test_proc5('abc'); + a +--------- + abc+abc +(1 row) + +CREATE PROCEDURE test_proc6(a int, INOUT b int, INOUT c int) +LANGUAGE plpgsql +AS $$ +BEGIN + b := b * a; + c := c * a; +END; +$$; +CALL test_proc6(2, 3, 4); + b | c +---+--- + 6 | 8 +(1 row) + +DO +LANGUAGE plpgsql +$$ +DECLARE + x int := 3; + y int := 4; +BEGIN + CALL test_proc6(2, x, y); + RAISE INFO 'x = %, y = %', x, y; +END; +$$; +INFO: x = 6, y = 8 +DO +LANGUAGE plpgsql +$$ +DECLARE + x int := 3; + y int := 4; +BEGIN + CALL test_proc6(2, x + 1, y); -- error + RAISE INFO 'x = %, y = %', x, y; +END; +$$; +ERROR: procedure parameter "b" is an output parameter but corresponding argument is not writable +CONTEXT: PL/pgSQL function inline_code_block line 6 at CALL +DO +LANGUAGE plpgsql +$$ +DECLARE + x int := 3; + y int := 4; +BEGIN + FOR i IN 1..5 LOOP + CALL test_proc6(i, x, y); + RAISE INFO 'x = %, y = %', x, y; + END LOOP; +END; +$$; +INFO: x = 3, y = 4 +INFO: x = 6, y = 8 +INFO: x = 18, y = 24 +INFO: x = 72, y = 96 +INFO: x = 360, y = 480 +-- recursive with output arguments +CREATE PROCEDURE test_proc7(x int, INOUT a int, INOUT b numeric) +LANGUAGE plpgsql +AS $$ +BEGIN +IF x > 1 THEN + a := x / 10; + b := x / 2; + CALL test_proc7(b::int, a, b); +END IF; +END; +$$; +CALL test_proc7(100, -1, -1); + a | b +---+--- + 0 | 1 +(1 row) + +-- named parameters and defaults +CREATE PROCEDURE test_proc8a(INOUT a int, INOUT b int) +LANGUAGE plpgsql +AS $$ +BEGIN + RAISE NOTICE 'a: %, b: %', a, b; + a := a * 10; + b := b + 10; +END; +$$; +CALL test_proc8a(10, 20); +NOTICE: a: 10, b: 20 + a | b +-----+---- + 100 | 30 +(1 row) + +CALL test_proc8a(b => 20, a => 10); +NOTICE: a: 10, b: 20 + a | b +-----+---- + 100 | 30 +(1 row) + +DO $$ +DECLARE _a int; _b int; +BEGIN + _a := 10; _b := 30; + CALL test_proc8a(_a, _b); + RAISE NOTICE '_a: %, _b: %', _a, _b; + CALL test_proc8a(b => _b, a => _a); + RAISE NOTICE '_a: %, _b: %', _a, _b; +END +$$; +NOTICE: a: 10, b: 30 +NOTICE: _a: 100, _b: 40 +NOTICE: a: 100, b: 40 +NOTICE: _a: 1000, _b: 50 +CREATE PROCEDURE test_proc8b(INOUT a int, INOUT b int, INOUT c int) +LANGUAGE plpgsql +AS $$ +BEGIN + RAISE NOTICE 'a: %, b: %, c: %', a, b, c; + a := a * 10; + b := b + 10; + c := c * -10; +END; +$$; +DO $$ +DECLARE _a int; _b int; _c int; +BEGIN + _a := 10; _b := 30; _c := 50; + CALL test_proc8b(_a, _b, _c); + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; + CALL test_proc8b(_a, c => _c, b => _b); + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; +END +$$; +NOTICE: a: 10, b: 30, c: 50 +NOTICE: _a: 100, _b: 40, _c: -500 +NOTICE: a: 100, b: 40, c: -500 +NOTICE: _a: 1000, _b: 50, _c: 5000 +CREATE PROCEDURE test_proc8c(INOUT a int, INOUT b int, INOUT c int DEFAULT 11) +LANGUAGE plpgsql +AS $$ +BEGIN + RAISE NOTICE 'a: %, b: %, c: %', a, b, c; + a := a * 10; + b := b + 10; + c := c * -10; +END; +$$; +DO $$ +DECLARE _a int; _b int; _c int; +BEGIN + _a := 10; _b := 30; _c := 50; + CALL test_proc8c(_a, _b, _c); + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; + _a := 10; _b := 30; _c := 50; + CALL test_proc8c(_a, c => _c, b => _b); + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; + _a := 10; _b := 30; _c := 50; + CALL test_proc8c(c => _c, b => _b, a => _a); + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; +END +$$; +NOTICE: a: 10, b: 30, c: 50 +NOTICE: _a: 100, _b: 40, _c: -500 +NOTICE: a: 10, b: 30, c: 50 +NOTICE: _a: 100, _b: 40, _c: -500 +NOTICE: a: 10, b: 30, c: 50 +NOTICE: _a: 100, _b: 40, _c: -500 +DO $$ +DECLARE _a int; _b int; _c int; +BEGIN + _a := 10; _b := 30; _c := 50; + CALL test_proc8c(_a, _b); -- fail, no output argument for c + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; +END +$$; +ERROR: procedure parameter "c" is an output parameter but corresponding argument is not writable +CONTEXT: PL/pgSQL function inline_code_block line 5 at CALL +DO $$ +DECLARE _a int; _b int; _c int; +BEGIN + _a := 10; _b := 30; _c := 50; + CALL test_proc8c(_a, b => _b); -- fail, no output argument for c + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; +END +$$; +ERROR: procedure parameter "c" is an output parameter but corresponding argument is not writable +CONTEXT: PL/pgSQL function inline_code_block line 5 at CALL +-- transition variable assignment +TRUNCATE test1; +CREATE FUNCTION triggerfunc1() RETURNS trigger +LANGUAGE plpgsql +AS $$ +DECLARE + z int := 0; +BEGIN + CALL test_proc6(2, NEW.a, NEW.a); + RETURN NEW; +END; +$$; +CREATE TRIGGER t1 BEFORE INSERT ON test1 EXECUTE PROCEDURE triggerfunc1(); +INSERT INTO test1 VALUES (1), (2), (3); +UPDATE test1 SET a = 22 WHERE a = 2; +SELECT * FROM test1 ORDER BY a; + a +---- + 1 + 3 + 22 +(3 rows) + +DROP PROCEDURE test_proc1; +DROP PROCEDURE test_proc3; +DROP PROCEDURE test_proc4; +DROP TABLE test1; +-- more checks for named-parameter handling +CREATE PROCEDURE p1(v_cnt int, v_Text inout text = NULL) +AS $$ +BEGIN + v_Text := 'v_cnt = ' || v_cnt; +END +$$ LANGUAGE plpgsql; +DO $$ +DECLARE + v_Text text; + v_cnt integer := 42; +BEGIN + CALL p1(v_cnt := v_cnt); -- error, must supply something for v_Text + RAISE NOTICE '%', v_Text; +END; +$$; +ERROR: procedure parameter "v_text" is an output parameter but corresponding argument is not writable +CONTEXT: PL/pgSQL function inline_code_block line 6 at CALL +DO $$ +DECLARE + v_Text text; + v_cnt integer := 42; +BEGIN + CALL p1(v_cnt := v_cnt, v_Text := v_Text); + RAISE NOTICE '%', v_Text; +END; +$$; +NOTICE: v_cnt = 42 +DO $$ +DECLARE + v_Text text; +BEGIN + CALL p1(10, v_Text := v_Text); + RAISE NOTICE '%', v_Text; +END; +$$; +NOTICE: v_cnt = 10 +DO $$ +DECLARE + v_Text text; + v_cnt integer; +BEGIN + CALL p1(v_Text := v_Text, v_cnt := v_cnt); + RAISE NOTICE '%', v_Text; +END; +$$; +NOTICE: diff --git a/src/pl/plpgsql/src/expected/plpgsql_control.out b/src/pl/plpgsql/src/expected/plpgsql_control.out new file mode 100644 index 0000000000..73b23a35e5 --- /dev/null +++ b/src/pl/plpgsql/src/expected/plpgsql_control.out @@ -0,0 +1,672 @@ +-- +-- Tests for PL/pgSQL control structures +-- +-- integer FOR loop +do $$ +begin + -- basic case + for i in 1..3 loop + raise notice '1..3: i = %', i; + end loop; + -- with BY, end matches exactly + for i in 1..10 by 3 loop + raise notice '1..10 by 3: i = %', i; + end loop; + -- with BY, end does not match + for i in 1..11 by 3 loop + raise notice '1..11 by 3: i = %', i; + end loop; + -- zero iterations + for i in 1..0 by 3 loop + raise notice '1..0 by 3: i = %', i; + end loop; + -- REVERSE + for i in reverse 10..0 by 3 loop + raise notice 'reverse 10..0 by 3: i = %', i; + end loop; + -- potential overflow + for i in 2147483620..2147483647 by 10 loop + raise notice '2147483620..2147483647 by 10: i = %', i; + end loop; + -- potential overflow, reverse direction + for i in reverse -2147483620..-2147483647 by 10 loop + raise notice 'reverse -2147483620..-2147483647 by 10: i = %', i; + end loop; +end$$; +NOTICE: 1..3: i = 1 +NOTICE: 1..3: i = 2 +NOTICE: 1..3: i = 3 +NOTICE: 1..10 by 3: i = 1 +NOTICE: 1..10 by 3: i = 4 +NOTICE: 1..10 by 3: i = 7 +NOTICE: 1..10 by 3: i = 10 +NOTICE: 1..11 by 3: i = 1 +NOTICE: 1..11 by 3: i = 4 +NOTICE: 1..11 by 3: i = 7 +NOTICE: 1..11 by 3: i = 10 +NOTICE: reverse 10..0 by 3: i = 10 +NOTICE: reverse 10..0 by 3: i = 7 +NOTICE: reverse 10..0 by 3: i = 4 +NOTICE: reverse 10..0 by 3: i = 1 +NOTICE: 2147483620..2147483647 by 10: i = 2147483620 +NOTICE: 2147483620..2147483647 by 10: i = 2147483630 +NOTICE: 2147483620..2147483647 by 10: i = 2147483640 +NOTICE: reverse -2147483620..-2147483647 by 10: i = -2147483620 +NOTICE: reverse -2147483620..-2147483647 by 10: i = -2147483630 +NOTICE: reverse -2147483620..-2147483647 by 10: i = -2147483640 +-- BY can't be zero or negative +do $$ +begin + for i in 1..3 by 0 loop + raise notice '1..3 by 0: i = %', i; + end loop; +end$$; +ERROR: BY value of FOR loop must be greater than zero +CONTEXT: PL/pgSQL function inline_code_block line 3 at FOR with integer loop variable +do $$ +begin + for i in 1..3 by -1 loop + raise notice '1..3 by -1: i = %', i; + end loop; +end$$; +ERROR: BY value of FOR loop must be greater than zero +CONTEXT: PL/pgSQL function inline_code_block line 3 at FOR with integer loop variable +do $$ +begin + for i in reverse 1..3 by -1 loop + raise notice 'reverse 1..3 by -1: i = %', i; + end loop; +end$$; +ERROR: BY value of FOR loop must be greater than zero +CONTEXT: PL/pgSQL function inline_code_block line 3 at FOR with integer loop variable +-- CONTINUE statement +create table conttesttbl(idx serial, v integer); +insert into conttesttbl(v) values(10); +insert into conttesttbl(v) values(20); +insert into conttesttbl(v) values(30); +insert into conttesttbl(v) values(40); +create function continue_test1() returns void as $$ +declare _i integer = 0; _r record; +begin + raise notice '---1---'; + loop + _i := _i + 1; + raise notice '%', _i; + continue when _i < 10; + exit; + end loop; + + raise notice '---2---'; + <> + loop + _i := _i - 1; + loop + raise notice '%', _i; + continue lbl when _i > 0; + exit lbl; + end loop; + end loop; + + raise notice '---3---'; + <> + while _i < 10 loop + _i := _i + 1; + continue the_loop when _i % 2 = 0; + raise notice '%', _i; + end loop; + + raise notice '---4---'; + for _i in 1..10 loop + begin + -- applies to outer loop, not the nested begin block + continue when _i < 5; + raise notice '%', _i; + end; + end loop; + + raise notice '---5---'; + for _r in select * from conttesttbl loop + continue when _r.v <= 20; + raise notice '%', _r.v; + end loop; + + raise notice '---6---'; + for _r in execute 'select * from conttesttbl' loop + continue when _r.v <= 20; + raise notice '%', _r.v; + end loop; + + raise notice '---7---'; + <> + for _i in 1..3 loop + continue looplabel when _i = 2; + raise notice '%', _i; + end loop; + + raise notice '---8---'; + _i := 1; + while _i <= 3 loop + raise notice '%', _i; + _i := _i + 1; + continue when _i = 3; + end loop; + + raise notice '---9---'; + for _r in select * from conttesttbl order by v limit 1 loop + raise notice '%', _r.v; + continue; + end loop; + + raise notice '---10---'; + for _r in execute 'select * from conttesttbl order by v limit 1' loop + raise notice '%', _r.v; + continue; + end loop; + + raise notice '---11---'; + <> + for _i in 1..2 loop + raise notice 'outer %', _i; + <> + for _j in 1..3 loop + continue outerlooplabel when _j = 2; + raise notice 'inner %', _j; + end loop; + end loop; +end; $$ language plpgsql; +select continue_test1(); +NOTICE: ---1--- +NOTICE: 1 +NOTICE: 2 +NOTICE: 3 +NOTICE: 4 +NOTICE: 5 +NOTICE: 6 +NOTICE: 7 +NOTICE: 8 +NOTICE: 9 +NOTICE: 10 +NOTICE: ---2--- +NOTICE: 9 +NOTICE: 8 +NOTICE: 7 +NOTICE: 6 +NOTICE: 5 +NOTICE: 4 +NOTICE: 3 +NOTICE: 2 +NOTICE: 1 +NOTICE: 0 +NOTICE: ---3--- +NOTICE: 1 +NOTICE: 3 +NOTICE: 5 +NOTICE: 7 +NOTICE: 9 +NOTICE: ---4--- +NOTICE: 5 +NOTICE: 6 +NOTICE: 7 +NOTICE: 8 +NOTICE: 9 +NOTICE: 10 +NOTICE: ---5--- +NOTICE: 30 +NOTICE: 40 +NOTICE: ---6--- +NOTICE: 30 +NOTICE: 40 +NOTICE: ---7--- +NOTICE: 1 +NOTICE: 3 +NOTICE: ---8--- +NOTICE: 1 +NOTICE: 2 +NOTICE: 3 +NOTICE: ---9--- +NOTICE: 10 +NOTICE: ---10--- +NOTICE: 10 +NOTICE: ---11--- +NOTICE: outer 1 +NOTICE: inner 1 +NOTICE: outer 2 +NOTICE: inner 1 + continue_test1 +---------------- + +(1 row) + +-- should fail: CONTINUE is only legal inside a loop +create function continue_error1() returns void as $$ +begin + begin + continue; + end; +end; +$$ language plpgsql; +ERROR: CONTINUE cannot be used outside a loop +LINE 4: continue; + ^ +-- should fail: unlabeled EXIT is only legal inside a loop +create function exit_error1() returns void as $$ +begin + begin + exit; + end; +end; +$$ language plpgsql; +ERROR: EXIT cannot be used outside a loop, unless it has a label +LINE 4: exit; + ^ +-- should fail: no such label +create function continue_error2() returns void as $$ +begin + begin + loop + continue no_such_label; + end loop; + end; +end; +$$ language plpgsql; +ERROR: there is no label "no_such_label" attached to any block or loop enclosing this statement +LINE 5: continue no_such_label; + ^ +-- should fail: no such label +create function exit_error2() returns void as $$ +begin + begin + loop + exit no_such_label; + end loop; + end; +end; +$$ language plpgsql; +ERROR: there is no label "no_such_label" attached to any block or loop enclosing this statement +LINE 5: exit no_such_label; + ^ +-- should fail: CONTINUE can't reference the label of a named block +create function continue_error3() returns void as $$ +begin + <> + begin + loop + continue begin_block1; + end loop; + end; +end; +$$ language plpgsql; +ERROR: block label "begin_block1" cannot be used in CONTINUE +LINE 6: continue begin_block1; + ^ +-- On the other hand, EXIT *can* reference the label of a named block +create function exit_block1() returns void as $$ +begin + <> + begin + loop + exit begin_block1; + raise exception 'should not get here'; + end loop; + end; +end; +$$ language plpgsql; +select exit_block1(); + exit_block1 +------------- + +(1 row) + +-- verbose end block and end loop +create function end_label1() returns void as $$ +<> +begin + <> + for i in 1 .. 10 loop + raise notice 'i = %', i; + exit flbl1; + end loop flbl1; + <> + for j in 1 .. 10 loop + raise notice 'j = %', j; + exit flbl2; + end loop; +end blbl; +$$ language plpgsql; +select end_label1(); +NOTICE: i = 1 +NOTICE: j = 1 + end_label1 +------------ + +(1 row) + +-- should fail: undefined end label +create function end_label2() returns void as $$ +begin + for _i in 1 .. 10 loop + exit; + end loop flbl1; +end; +$$ language plpgsql; +ERROR: end label "flbl1" specified for unlabelled block +LINE 5: end loop flbl1; + ^ +-- should fail: end label does not match start label +create function end_label3() returns void as $$ +<> +begin + <> + for _i in 1 .. 10 loop + exit; + end loop outer_label; +end; +$$ language plpgsql; +ERROR: end label "outer_label" differs from block's label "inner_label" +LINE 7: end loop outer_label; + ^ +-- should fail: end label on a block without a start label +create function end_label4() returns void as $$ +<> +begin + for _i in 1 .. 10 loop + exit; + end loop outer_label; +end; +$$ language plpgsql; +ERROR: end label "outer_label" specified for unlabelled block +LINE 6: end loop outer_label; + ^ +-- unlabeled exit matches no blocks +do $$ +begin +for i in 1..10 loop + <> + begin + begin -- unlabeled block + exit; + raise notice 'should not get here'; + end; + raise notice 'should not get here, either'; + end; + raise notice 'nor here'; +end loop; +raise notice 'should get here'; +end$$; +NOTICE: should get here +-- check exit out of an unlabeled block to a labeled one +do $$ +<> +begin + <> + begin + <> + begin + begin -- unlabeled block + exit innerblock; + raise notice 'should not get here'; + end; + raise notice 'should not get here, either'; + end; + raise notice 'nor here'; + end; + raise notice 'should get here'; +end$$; +NOTICE: should get here +-- unlabeled exit does match a while loop +do $$ +begin + <> + while 1 > 0 loop + <> + while 1 > 0 loop + <> + while 1 > 0 loop + exit; + raise notice 'should not get here'; + end loop; + raise notice 'should get here'; + exit outermostwhile; + raise notice 'should not get here, either'; + end loop; + raise notice 'nor here'; + end loop; + raise notice 'should get here, too'; +end$$; +NOTICE: should get here +NOTICE: should get here, too +-- check exit out of an unlabeled while to a labeled one +do $$ +begin + <> + while 1 > 0 loop + while 1 > 0 loop + exit outerwhile; + raise notice 'should not get here'; + end loop; + raise notice 'should not get here, either'; + end loop; + raise notice 'should get here'; +end$$; +NOTICE: should get here +-- continue to an outer while +do $$ +declare i int := 0; +begin + <> + while i < 2 loop + raise notice 'outermostwhile, i = %', i; + i := i + 1; + <> + while 1 > 0 loop + <> + while 1 > 0 loop + continue outermostwhile; + raise notice 'should not get here'; + end loop; + raise notice 'should not get here, either'; + end loop; + raise notice 'nor here'; + end loop; + raise notice 'out of outermostwhile, i = %', i; +end$$; +NOTICE: outermostwhile, i = 0 +NOTICE: outermostwhile, i = 1 +NOTICE: out of outermostwhile, i = 2 +-- return out of a while +create function return_from_while() returns int language plpgsql as $$ +declare i int := 0; +begin + while i < 10 loop + if i > 2 then + return i; + end if; + i := i + 1; + end loop; + return null; +end$$; +select return_from_while(); + return_from_while +------------------- + 3 +(1 row) + +-- using list of scalars in fori and fore stmts +create function for_vect() returns void as $proc$ +<>declare a integer; b varchar; c varchar; r record; +begin + -- fori + for i in 1 .. 3 loop + raise notice '%', i; + end loop; + -- fore with record var + for r in select gs as aa, 'BB' as bb, 'CC' as cc from generate_series(1,4) gs loop + raise notice '% % %', r.aa, r.bb, r.cc; + end loop; + -- fore with single scalar + for a in select gs from generate_series(1,4) gs loop + raise notice '%', a; + end loop; + -- fore with multiple scalars + for a,b,c in select gs, 'BB','CC' from generate_series(1,4) gs loop + raise notice '% % %', a, b, c; + end loop; + -- using qualified names in fors, fore is enabled, disabled only for fori + for lbl.a, lbl.b, lbl.c in execute $$select gs, 'bb','cc' from generate_series(1,4) gs$$ loop + raise notice '% % %', a, b, c; + end loop; +end; +$proc$ language plpgsql; +select for_vect(); +NOTICE: 1 +NOTICE: 2 +NOTICE: 3 +NOTICE: 1 BB CC +NOTICE: 2 BB CC +NOTICE: 3 BB CC +NOTICE: 4 BB CC +NOTICE: 1 +NOTICE: 2 +NOTICE: 3 +NOTICE: 4 +NOTICE: 1 BB CC +NOTICE: 2 BB CC +NOTICE: 3 BB CC +NOTICE: 4 BB CC +NOTICE: 1 bb cc +NOTICE: 2 bb cc +NOTICE: 3 bb cc +NOTICE: 4 bb cc + for_vect +---------- + +(1 row) + +-- CASE statement +create or replace function case_test(bigint) returns text as $$ +declare a int = 10; + b int = 1; +begin + case $1 + when 1 then + return 'one'; + when 2 then + return 'two'; + when 3,4,3+5 then + return 'three, four or eight'; + when a then + return 'ten'; + when a+b, a+b+1 then + return 'eleven, twelve'; + end case; +end; +$$ language plpgsql immutable; +select case_test(1); + case_test +----------- + one +(1 row) + +select case_test(2); + case_test +----------- + two +(1 row) + +select case_test(3); + case_test +---------------------- + three, four or eight +(1 row) + +select case_test(4); + case_test +---------------------- + three, four or eight +(1 row) + +select case_test(5); -- fails +ERROR: case not found +HINT: CASE statement is missing ELSE part. +CONTEXT: PL/pgSQL function case_test(bigint) line 5 at CASE +select case_test(8); + case_test +---------------------- + three, four or eight +(1 row) + +select case_test(10); + case_test +----------- + ten +(1 row) + +select case_test(11); + case_test +---------------- + eleven, twelve +(1 row) + +select case_test(12); + case_test +---------------- + eleven, twelve +(1 row) + +select case_test(13); -- fails +ERROR: case not found +HINT: CASE statement is missing ELSE part. +CONTEXT: PL/pgSQL function case_test(bigint) line 5 at CASE +create or replace function catch() returns void as $$ +begin + raise notice '%', case_test(6); +exception + when case_not_found then + raise notice 'caught case_not_found % %', SQLSTATE, SQLERRM; +end +$$ language plpgsql; +select catch(); +NOTICE: caught case_not_found 20000 case not found + catch +------- + +(1 row) + +-- test the searched variant too, as well as ELSE +create or replace function case_test(bigint) returns text as $$ +declare a int = 10; +begin + case + when $1 = 1 then + return 'one'; + when $1 = a + 2 then + return 'twelve'; + else + return 'other'; + end case; +end; +$$ language plpgsql immutable; +select case_test(1); + case_test +----------- + one +(1 row) + +select case_test(2); + case_test +----------- + other +(1 row) + +select case_test(12); + case_test +----------- + twelve +(1 row) + +select case_test(13); + case_test +----------- + other +(1 row) + diff --git a/src/pl/plpgsql/src/expected/plpgsql_domain.out b/src/pl/plpgsql/src/expected/plpgsql_domain.out new file mode 100644 index 0000000000..efc877cdd1 --- /dev/null +++ b/src/pl/plpgsql/src/expected/plpgsql_domain.out @@ -0,0 +1,397 @@ +-- +-- Tests for PL/pgSQL's behavior with domain types +-- +CREATE DOMAIN booltrue AS bool CHECK (VALUE IS TRUE OR VALUE IS NULL); +CREATE FUNCTION test_argresult_booltrue(x booltrue, y bool) RETURNS booltrue AS $$ +begin +return y; +end +$$ LANGUAGE plpgsql; +SELECT * FROM test_argresult_booltrue(true, true); + test_argresult_booltrue +------------------------- + t +(1 row) + +SELECT * FROM test_argresult_booltrue(false, true); +ERROR: value for domain booltrue violates check constraint "booltrue_check" +SELECT * FROM test_argresult_booltrue(true, false); +ERROR: value for domain booltrue violates check constraint "booltrue_check" +CONTEXT: PL/pgSQL function test_argresult_booltrue(booltrue,boolean) while casting return value to function's return type +CREATE FUNCTION test_assign_booltrue(x bool, y bool) RETURNS booltrue AS $$ +declare v booltrue := x; +begin +v := y; +return v; +end +$$ LANGUAGE plpgsql; +SELECT * FROM test_assign_booltrue(true, true); + test_assign_booltrue +---------------------- + t +(1 row) + +SELECT * FROM test_assign_booltrue(false, true); +ERROR: value for domain booltrue violates check constraint "booltrue_check" +CONTEXT: PL/pgSQL function test_assign_booltrue(boolean,boolean) line 3 during statement block local variable initialization +SELECT * FROM test_assign_booltrue(true, false); +ERROR: value for domain booltrue violates check constraint "booltrue_check" +CONTEXT: PL/pgSQL function test_assign_booltrue(boolean,boolean) line 4 at assignment +CREATE DOMAIN uint2 AS int2 CHECK (VALUE >= 0); +CREATE FUNCTION test_argresult_uint2(x uint2, y int) RETURNS uint2 AS $$ +begin +return y; +end +$$ LANGUAGE plpgsql; +SELECT * FROM test_argresult_uint2(100::uint2, 50); + test_argresult_uint2 +---------------------- + 50 +(1 row) + +SELECT * FROM test_argresult_uint2(100::uint2, -50); +ERROR: value for domain uint2 violates check constraint "uint2_check" +CONTEXT: PL/pgSQL function test_argresult_uint2(uint2,integer) while casting return value to function's return type +SELECT * FROM test_argresult_uint2(null, 1); + test_argresult_uint2 +---------------------- + 1 +(1 row) + +CREATE FUNCTION test_assign_uint2(x int, y int) RETURNS uint2 AS $$ +declare v uint2 := x; +begin +v := y; +return v; +end +$$ LANGUAGE plpgsql; +SELECT * FROM test_assign_uint2(100, 50); + test_assign_uint2 +------------------- + 50 +(1 row) + +SELECT * FROM test_assign_uint2(100, -50); +ERROR: value for domain uint2 violates check constraint "uint2_check" +CONTEXT: PL/pgSQL function test_assign_uint2(integer,integer) line 4 at assignment +SELECT * FROM test_assign_uint2(-100, 50); +ERROR: value for domain uint2 violates check constraint "uint2_check" +CONTEXT: PL/pgSQL function test_assign_uint2(integer,integer) line 3 during statement block local variable initialization +SELECT * FROM test_assign_uint2(null, 1); + test_assign_uint2 +------------------- + 1 +(1 row) + +CREATE DOMAIN nnint AS int NOT NULL; +CREATE FUNCTION test_argresult_nnint(x nnint, y int) RETURNS nnint AS $$ +begin +return y; +end +$$ LANGUAGE plpgsql; +SELECT * FROM test_argresult_nnint(10, 20); + test_argresult_nnint +---------------------- + 20 +(1 row) + +SELECT * FROM test_argresult_nnint(null, 20); +ERROR: domain nnint does not allow null values +SELECT * FROM test_argresult_nnint(10, null); +ERROR: domain nnint does not allow null values +CONTEXT: PL/pgSQL function test_argresult_nnint(nnint,integer) while casting return value to function's return type +CREATE FUNCTION test_assign_nnint(x int, y int) RETURNS nnint AS $$ +declare v nnint := x; +begin +v := y; +return v; +end +$$ LANGUAGE plpgsql; +SELECT * FROM test_assign_nnint(10, 20); + test_assign_nnint +------------------- + 20 +(1 row) + +SELECT * FROM test_assign_nnint(null, 20); +ERROR: domain nnint does not allow null values +CONTEXT: PL/pgSQL function test_assign_nnint(integer,integer) line 3 during statement block local variable initialization +SELECT * FROM test_assign_nnint(10, null); +ERROR: domain nnint does not allow null values +CONTEXT: PL/pgSQL function test_assign_nnint(integer,integer) line 4 at assignment +-- +-- Domains over arrays +-- +CREATE DOMAIN ordered_pair_domain AS integer[] CHECK (array_length(VALUE,1)=2 AND VALUE[1] < VALUE[2]); +CREATE FUNCTION test_argresult_array_domain(x ordered_pair_domain) + RETURNS ordered_pair_domain AS $$ +begin +return x; +end +$$ LANGUAGE plpgsql; +SELECT * FROM test_argresult_array_domain(ARRAY[0, 100]::ordered_pair_domain); + test_argresult_array_domain +----------------------------- + {0,100} +(1 row) + +SELECT * FROM test_argresult_array_domain(NULL::ordered_pair_domain); + test_argresult_array_domain +----------------------------- + +(1 row) + +CREATE FUNCTION test_argresult_array_domain_check_violation() + RETURNS ordered_pair_domain AS $$ +begin +return array[2,1]; +end +$$ LANGUAGE plpgsql; +SELECT * FROM test_argresult_array_domain_check_violation(); +ERROR: value for domain ordered_pair_domain violates check constraint "ordered_pair_domain_check" +CONTEXT: PL/pgSQL function test_argresult_array_domain_check_violation() while casting return value to function's return type +CREATE FUNCTION test_assign_ordered_pair_domain(x int, y int, z int) RETURNS ordered_pair_domain AS $$ +declare v ordered_pair_domain := array[x, y]; +begin +v[2] := z; +return v; +end +$$ LANGUAGE plpgsql; +SELECT * FROM test_assign_ordered_pair_domain(1,2,3); + test_assign_ordered_pair_domain +--------------------------------- + {1,3} +(1 row) + +SELECT * FROM test_assign_ordered_pair_domain(1,2,0); +ERROR: value for domain ordered_pair_domain violates check constraint "ordered_pair_domain_check" +CONTEXT: PL/pgSQL function test_assign_ordered_pair_domain(integer,integer,integer) line 4 at assignment +SELECT * FROM test_assign_ordered_pair_domain(2,1,3); +ERROR: value for domain ordered_pair_domain violates check constraint "ordered_pair_domain_check" +CONTEXT: PL/pgSQL function test_assign_ordered_pair_domain(integer,integer,integer) line 3 during statement block local variable initialization +-- +-- Arrays of domains +-- +CREATE FUNCTION test_read_uint2_array(x uint2[]) RETURNS uint2 AS $$ +begin +return x[1]; +end +$$ LANGUAGE plpgsql; +select test_read_uint2_array(array[1::uint2]); + test_read_uint2_array +----------------------- + 1 +(1 row) + +CREATE FUNCTION test_build_uint2_array(x int2) RETURNS uint2[] AS $$ +begin +return array[x, x]; +end +$$ LANGUAGE plpgsql; +select test_build_uint2_array(1::int2); + test_build_uint2_array +------------------------ + {1,1} +(1 row) + +select test_build_uint2_array(-1::int2); -- fail +ERROR: value for domain uint2 violates check constraint "uint2_check" +CONTEXT: PL/pgSQL function test_build_uint2_array(smallint) while casting return value to function's return type +CREATE FUNCTION test_argresult_domain_array(x integer[]) + RETURNS ordered_pair_domain[] AS $$ +begin +return array[x::ordered_pair_domain, x::ordered_pair_domain]; +end +$$ LANGUAGE plpgsql; +select test_argresult_domain_array(array[2,4]); + test_argresult_domain_array +----------------------------- + {"{2,4}","{2,4}"} +(1 row) + +select test_argresult_domain_array(array[4,2]); -- fail +ERROR: value for domain ordered_pair_domain violates check constraint "ordered_pair_domain_check" +CONTEXT: PL/pgSQL function test_argresult_domain_array(integer[]) line 3 at RETURN +CREATE FUNCTION test_argresult_domain_array2(x ordered_pair_domain) + RETURNS integer AS $$ +begin +return x[1]; +end +$$ LANGUAGE plpgsql; +select test_argresult_domain_array2(array[2,4]); + test_argresult_domain_array2 +------------------------------ + 2 +(1 row) + +select test_argresult_domain_array2(array[4,2]); -- fail +ERROR: value for domain ordered_pair_domain violates check constraint "ordered_pair_domain_check" +CREATE FUNCTION test_argresult_array_domain_array(x ordered_pair_domain[]) + RETURNS ordered_pair_domain AS $$ +begin +return x[1]; +end +$$ LANGUAGE plpgsql; +select test_argresult_array_domain_array(array[array[2,4]::ordered_pair_domain]); + test_argresult_array_domain_array +----------------------------------- + {2,4} +(1 row) + +-- +-- Domains within composite +-- +CREATE TYPE nnint_container AS (f1 int, f2 nnint); +CREATE FUNCTION test_result_nnint_container(x int, y int) + RETURNS nnint_container AS $$ +begin +return row(x, y)::nnint_container; +end +$$ LANGUAGE plpgsql; +SELECT test_result_nnint_container(null, 3); + test_result_nnint_container +----------------------------- + (,3) +(1 row) + +SELECT test_result_nnint_container(3, null); -- fail +ERROR: domain nnint does not allow null values +CONTEXT: PL/pgSQL function test_result_nnint_container(integer,integer) line 3 at RETURN +CREATE FUNCTION test_assign_nnint_container(x int, y int, z int) + RETURNS nnint_container AS $$ +declare v nnint_container := row(x, y); +begin +v.f2 := z; +return v; +end +$$ LANGUAGE plpgsql; +SELECT * FROM test_assign_nnint_container(1,2,3); + f1 | f2 +----+---- + 1 | 3 +(1 row) + +SELECT * FROM test_assign_nnint_container(1,2,null); +ERROR: domain nnint does not allow null values +CONTEXT: PL/pgSQL function test_assign_nnint_container(integer,integer,integer) line 4 at assignment +SELECT * FROM test_assign_nnint_container(1,null,3); +ERROR: domain nnint does not allow null values +CONTEXT: PL/pgSQL function test_assign_nnint_container(integer,integer,integer) line 3 during statement block local variable initialization +-- Since core system allows this: +SELECT null::nnint_container; + nnint_container +----------------- + +(1 row) + +-- so should PL/PgSQL +CREATE FUNCTION test_assign_nnint_container2(x int, y int, z int) + RETURNS nnint_container AS $$ +declare v nnint_container; +begin +v.f2 := z; +return v; +end +$$ LANGUAGE plpgsql; +SELECT * FROM test_assign_nnint_container2(1,2,3); + f1 | f2 +----+---- + | 3 +(1 row) + +SELECT * FROM test_assign_nnint_container2(1,2,null); +ERROR: domain nnint does not allow null values +CONTEXT: PL/pgSQL function test_assign_nnint_container2(integer,integer,integer) line 4 at assignment +-- +-- Domains of composite +-- +CREATE TYPE named_pair AS ( + i integer, + j integer +); +CREATE DOMAIN ordered_named_pair AS named_pair CHECK((VALUE).i <= (VALUE).j); +CREATE FUNCTION read_ordered_named_pair(p ordered_named_pair) RETURNS integer AS $$ +begin +return p.i + p.j; +end +$$ LANGUAGE plpgsql; +SELECT read_ordered_named_pair(row(1, 2)); + read_ordered_named_pair +------------------------- + 3 +(1 row) + +SELECT read_ordered_named_pair(row(2, 1)); -- fail +ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" +CREATE FUNCTION build_ordered_named_pair(i int, j int) RETURNS ordered_named_pair AS $$ +begin +return row(i, j); +end +$$ LANGUAGE plpgsql; +SELECT build_ordered_named_pair(1,2); + build_ordered_named_pair +-------------------------- + (1,2) +(1 row) + +SELECT build_ordered_named_pair(2,1); -- fail +ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" +CONTEXT: PL/pgSQL function build_ordered_named_pair(integer,integer) while casting return value to function's return type +CREATE FUNCTION test_assign_ordered_named_pair(x int, y int, z int) + RETURNS ordered_named_pair AS $$ +declare v ordered_named_pair := row(x, y); +begin +v.j := z; +return v; +end +$$ LANGUAGE plpgsql; +SELECT * FROM test_assign_ordered_named_pair(1,2,3); + i | j +---+--- + 1 | 3 +(1 row) + +SELECT * FROM test_assign_ordered_named_pair(1,2,0); +ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" +CONTEXT: PL/pgSQL function test_assign_ordered_named_pair(integer,integer,integer) line 4 at assignment +SELECT * FROM test_assign_ordered_named_pair(2,1,3); +ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" +CONTEXT: PL/pgSQL function test_assign_ordered_named_pair(integer,integer,integer) line 3 during statement block local variable initialization +CREATE FUNCTION build_ordered_named_pairs(i int, j int) RETURNS ordered_named_pair[] AS $$ +begin +return array[row(i, j), row(i, j+1)]; +end +$$ LANGUAGE plpgsql; +SELECT build_ordered_named_pairs(1,2); + build_ordered_named_pairs +--------------------------- + {"(1,2)","(1,3)"} +(1 row) + +SELECT build_ordered_named_pairs(2,1); -- fail +ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" +CONTEXT: PL/pgSQL function build_ordered_named_pairs(integer,integer) while casting return value to function's return type +CREATE FUNCTION test_assign_ordered_named_pairs(x int, y int, z int) + RETURNS ordered_named_pair[] AS $$ +declare v ordered_named_pair[] := array[row(x, y)]; +begin +-- ideally this would work, but it doesn't yet: +-- v[1].j := z; +return v; +end +$$ LANGUAGE plpgsql; +SELECT * FROM test_assign_ordered_named_pairs(1,2,3); + test_assign_ordered_named_pairs +--------------------------------- + {"(1,2)"} +(1 row) + +SELECT * FROM test_assign_ordered_named_pairs(2,1,3); +ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" +CONTEXT: PL/pgSQL function test_assign_ordered_named_pairs(integer,integer,integer) line 3 during statement block local variable initialization +SELECT * FROM test_assign_ordered_named_pairs(1,2,0); -- should fail someday + test_assign_ordered_named_pairs +--------------------------------- + {"(1,2)"} +(1 row) + diff --git a/src/pl/plpgsql/src/expected/plpgsql_record.out b/src/pl/plpgsql/src/expected/plpgsql_record.out new file mode 100644 index 0000000000..cc36231aef --- /dev/null +++ b/src/pl/plpgsql/src/expected/plpgsql_record.out @@ -0,0 +1,656 @@ +-- +-- Tests for PL/pgSQL handling of composite (record) variables +-- +create type two_int4s as (f1 int4, f2 int4); +create type two_int8s as (q1 int8, q2 int8); +-- base-case return of a composite type +create function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1,1)::two_int8s; end $$; +select retc(42); + retc +-------- + (42,1) +(1 row) + +-- ok to return a matching record type +create or replace function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1::int8, 1::int8); end $$; +select retc(42); + retc +-------- + (42,1) +(1 row) + +-- we don't currently support implicit casting +create or replace function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1,1); end $$; +select retc(42); +ERROR: returned record type does not match expected record type +DETAIL: Returned type integer does not match expected type bigint in column 1. +CONTEXT: PL/pgSQL function retc(integer) while casting return value to function's return type +-- nor extra columns +create or replace function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1::int8, 1::int8, 42); end $$; +select retc(42); +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (3) does not match expected column count (2). +CONTEXT: PL/pgSQL function retc(integer) while casting return value to function's return type +-- same cases with an intermediate "record" variable +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r record; begin r := row($1::int8, 1::int8); return r; end $$; +select retc(42); + retc +-------- + (42,1) +(1 row) + +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r record; begin r := row($1,1); return r; end $$; +select retc(42); +ERROR: returned record type does not match expected record type +DETAIL: Returned type integer does not match expected type bigint in column 1. +CONTEXT: PL/pgSQL function retc(integer) while casting return value to function's return type +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r record; begin r := row($1::int8, 1::int8, 42); return r; end $$; +select retc(42); +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (3) does not match expected column count (2). +CONTEXT: PL/pgSQL function retc(integer) while casting return value to function's return type +-- but, for mostly historical reasons, we do convert when assigning +-- to a named-composite-type variable +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r two_int8s; begin r := row($1::int8, 1::int8, 42); return r; end $$; +select retc(42); + retc +-------- + (42,1) +(1 row) + +do $$ declare c two_int8s; +begin c := row(1,2); raise notice 'c = %', c; end$$; +NOTICE: c = (1,2) +do $$ declare c two_int8s; +begin for c in select 1,2 loop raise notice 'c = %', c; end loop; end$$; +NOTICE: c = (1,2) +do $$ declare c4 two_int4s; c8 two_int8s; +begin + c8 := row(1,2); + c4 := c8; + c8 := c4; + raise notice 'c4 = %', c4; + raise notice 'c8 = %', c8; +end$$; +NOTICE: c4 = (1,2) +NOTICE: c8 = (1,2) +-- check passing composite result to another function +create function getq1(two_int8s) returns int8 language plpgsql as $$ +declare r two_int8s; begin r := $1; return r.q1; end $$; +select getq1(retc(344)); + getq1 +------- + 344 +(1 row) + +select getq1(row(1,2)); + getq1 +------- + 1 +(1 row) + +do $$ +declare r1 two_int8s; r2 record; x int8; +begin + r1 := retc(345); + perform getq1(r1); + x := getq1(r1); + raise notice 'x = %', x; + r2 := retc(346); + perform getq1(r2); + x := getq1(r2); + raise notice 'x = %', x; +end$$; +NOTICE: x = 345 +NOTICE: x = 346 +-- check assignments of composites +do $$ +declare r1 two_int8s; r2 two_int8s; r3 record; r4 record; +begin + r1 := row(1,2); + raise notice 'r1 = %', r1; + r1 := r1; -- shouldn't do anything + raise notice 'r1 = %', r1; + r2 := r1; + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r2.q2 = r1.q1 + 3; -- check that r2 has distinct storage + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r1 := null; + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r1 := row(7,11)::two_int8s; + r2 := r1; + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r3 := row(1,2); + r4 := r3; + raise notice 'r3 = %', r3; + raise notice 'r4 = %', r4; + r4.f1 := r4.f1 + 3; -- check that r4 has distinct storage + raise notice 'r3 = %', r3; + raise notice 'r4 = %', r4; + r1 := r3; + raise notice 'r1 = %', r1; + r4 := r1; + raise notice 'r4 = %', r4; + r4.q2 := r4.q2 + 1; -- r4's field names have changed + raise notice 'r4 = %', r4; +end$$; +NOTICE: r1 = (1,2) +NOTICE: r1 = (1,2) +NOTICE: r1 = (1,2) +NOTICE: r2 = (1,2) +NOTICE: r1 = (1,2) +NOTICE: r2 = (1,4) +NOTICE: r1 = +NOTICE: r2 = (1,4) +NOTICE: r1 = (7,11) +NOTICE: r2 = (7,11) +NOTICE: r3 = (1,2) +NOTICE: r4 = (1,2) +NOTICE: r3 = (1,2) +NOTICE: r4 = (4,2) +NOTICE: r1 = (1,2) +NOTICE: r4 = (1,2) +NOTICE: r4 = (1,3) +-- fields of named-type vars read as null if uninitialized +do $$ +declare r1 two_int8s; +begin + raise notice 'r1 = %', r1; + raise notice 'r1.q1 = %', r1.q1; + raise notice 'r1.q2 = %', r1.q2; + raise notice 'r1 = %', r1; +end$$; +NOTICE: r1 = +NOTICE: r1.q1 = +NOTICE: r1.q2 = +NOTICE: r1 = +do $$ +declare r1 two_int8s; +begin + raise notice 'r1.q1 = %', r1.q1; + raise notice 'r1.q2 = %', r1.q2; + raise notice 'r1 = %', r1; + raise notice 'r1.nosuchfield = %', r1.nosuchfield; +end$$; +NOTICE: r1.q1 = +NOTICE: r1.q2 = +NOTICE: r1 = +ERROR: record "r1" has no field "nosuchfield" +CONTEXT: SQL statement "SELECT r1.nosuchfield" +PL/pgSQL function inline_code_block line 7 at RAISE +-- records, not so much +do $$ +declare r1 record; +begin + raise notice 'r1 = %', r1; + raise notice 'r1.f1 = %', r1.f1; + raise notice 'r1.f2 = %', r1.f2; + raise notice 'r1 = %', r1; +end$$; +NOTICE: r1 = +ERROR: record "r1" is not assigned yet +DETAIL: The tuple structure of a not-yet-assigned record is indeterminate. +CONTEXT: SQL statement "SELECT r1.f1" +PL/pgSQL function inline_code_block line 5 at RAISE +-- but OK if you assign first +do $$ +declare r1 record; +begin + raise notice 'r1 = %', r1; + r1 := row(1,2); + raise notice 'r1.f1 = %', r1.f1; + raise notice 'r1.f2 = %', r1.f2; + raise notice 'r1 = %', r1; + raise notice 'r1.nosuchfield = %', r1.nosuchfield; +end$$; +NOTICE: r1 = +NOTICE: r1.f1 = 1 +NOTICE: r1.f2 = 2 +NOTICE: r1 = (1,2) +ERROR: record "r1" has no field "nosuchfield" +CONTEXT: SQL statement "SELECT r1.nosuchfield" +PL/pgSQL function inline_code_block line 9 at RAISE +-- check repeated assignments to composite fields +create table some_table (id int, data text); +do $$ +declare r some_table; +begin + r := (23, 'skidoo'); + for i in 1 .. 10 loop + r.id := r.id + i; + r.data := r.data || ' ' || i; + end loop; + raise notice 'r = %', r; +end$$; +NOTICE: r = (78,"skidoo 1 2 3 4 5 6 7 8 9 10") +-- check behavior of function declared to return "record" +create function returnsrecord(int) returns record language plpgsql as +$$ begin return row($1,$1+1); end $$; +select returnsrecord(42); + returnsrecord +--------------- + (42,43) +(1 row) + +select * from returnsrecord(42) as r(x int, y int); + x | y +----+---- + 42 | 43 +(1 row) + +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (2) does not match expected column count (3). +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +select * from returnsrecord(42) as r(x int, y bigint); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Returned type integer does not match expected type bigint in column 2. +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +-- same with an intermediate record variable +create or replace function returnsrecord(int) returns record language plpgsql as +$$ declare r record; begin r := row($1,$1+1); return r; end $$; +select returnsrecord(42); + returnsrecord +--------------- + (42,43) +(1 row) + +select * from returnsrecord(42) as r(x int, y int); + x | y +----+---- + 42 | 43 +(1 row) + +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (2) does not match expected column count (3). +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +select * from returnsrecord(42) as r(x int, y bigint); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Returned type integer does not match expected type bigint in column 2. +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +-- should work the same with a missing column in the actual result value +create table has_hole(f1 int, f2 int, f3 int); +alter table has_hole drop column f2; +create or replace function returnsrecord(int) returns record language plpgsql as +$$ begin return row($1,$1+1)::has_hole; end $$; +select returnsrecord(42); + returnsrecord +--------------- + (42,43) +(1 row) + +select * from returnsrecord(42) as r(x int, y int); + x | y +----+---- + 42 | 43 +(1 row) + +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (2) does not match expected column count (3). +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +select * from returnsrecord(42) as r(x int, y bigint); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Returned type integer does not match expected type bigint in column 2. +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +-- same with an intermediate record variable +create or replace function returnsrecord(int) returns record language plpgsql as +$$ declare r record; begin r := row($1,$1+1)::has_hole; return r; end $$; +select returnsrecord(42); + returnsrecord +--------------- + (42,43) +(1 row) + +select * from returnsrecord(42) as r(x int, y int); + x | y +----+---- + 42 | 43 +(1 row) + +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (2) does not match expected column count (3). +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +select * from returnsrecord(42) as r(x int, y bigint); -- fail +ERROR: returned record type does not match expected record type +DETAIL: Returned type integer does not match expected type bigint in column 2. +CONTEXT: PL/pgSQL function returnsrecord(integer) while casting return value to function's return type +-- check access to a field of an argument declared "record" +create function getf1(x record) returns int language plpgsql as +$$ begin return x.f1; end $$; +select getf1(1); +ERROR: function getf1(integer) does not exist +LINE 1: select getf1(1); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select getf1(row(1,2)); + getf1 +------- + 1 +(1 row) + +-- a CLOBBER_CACHE_ALWAYS build will report this error with a different +-- context stack than other builds, so suppress context output +\set SHOW_CONTEXT never +select getf1(row(1,2)::two_int8s); +ERROR: record "x" has no field "f1" +\set SHOW_CONTEXT errors +select getf1(row(1,2)); + getf1 +------- + 1 +(1 row) + +-- check behavior when assignment to FOR-loop variable requires coercion +do $$ +declare r two_int8s; +begin + for r in select i, i+1 from generate_series(1,4) i + loop + raise notice 'r = %', r; + end loop; +end$$; +NOTICE: r = (1,2) +NOTICE: r = (2,3) +NOTICE: r = (3,4) +NOTICE: r = (4,5) +-- check behavior when returning setof composite +create function returnssetofholes() returns setof has_hole language plpgsql as +$$ +declare r record; + h has_hole; +begin + return next h; + r := (1,2); + h := (3,4); + return next r; + return next h; + return next row(5,6); + return next row(7,8)::has_hole; +end$$; +select returnssetofholes(); + returnssetofholes +------------------- + (,) + (1,2) + (3,4) + (5,6) + (7,8) +(5 rows) + +create or replace function returnssetofholes() returns setof has_hole language plpgsql as +$$ +declare r record; +begin + return next r; -- fails, not assigned yet +end$$; +select returnssetofholes(); +ERROR: record "r" is not assigned yet +DETAIL: The tuple structure of a not-yet-assigned record is indeterminate. +CONTEXT: PL/pgSQL function returnssetofholes() line 4 at RETURN NEXT +create or replace function returnssetofholes() returns setof has_hole language plpgsql as +$$ +begin + return next row(1,2,3); -- fails +end$$; +select returnssetofholes(); +ERROR: returned record type does not match expected record type +DETAIL: Number of returned columns (3) does not match expected column count (2). +CONTEXT: PL/pgSQL function returnssetofholes() line 3 at RETURN NEXT +-- check behavior with changes of a named rowtype +create table mutable(f1 int, f2 text); +create function sillyaddone(int) returns int language plpgsql as +$$ declare r mutable; begin r.f1 := $1; return r.f1 + 1; end $$; +select sillyaddone(42); + sillyaddone +------------- + 43 +(1 row) + +-- test for change of type of column f1 should be here someday; +-- for now see plpgsql_cache test +alter table mutable drop column f1; +select sillyaddone(42); -- fail +ERROR: record "r" has no field "f1" +CONTEXT: PL/pgSQL function sillyaddone(integer) line 1 at assignment +create function getf3(x mutable) returns int language plpgsql as +$$ begin return x.f3; end $$; +select getf3(null::mutable); -- doesn't work yet +ERROR: record "x" has no field "f3" +CONTEXT: SQL statement "SELECT x.f3" +PL/pgSQL function getf3(mutable) line 1 at RETURN +alter table mutable add column f3 int; +select getf3(null::mutable); -- now it works + getf3 +------- + +(1 row) + +alter table mutable drop column f3; +-- a CLOBBER_CACHE_ALWAYS build will report this error with a different +-- context stack than other builds, so suppress context output +\set SHOW_CONTEXT never +select getf3(null::mutable); -- fails again +ERROR: record "x" has no field "f3" +\set SHOW_CONTEXT errors +-- check access to system columns in a record variable +create function sillytrig() returns trigger language plpgsql as +$$begin + raise notice 'old.ctid = %', old.ctid; + raise notice 'old.tableoid = %', old.tableoid::regclass; + return new; +end$$; +create trigger mutable_trig before update on mutable for each row +execute procedure sillytrig(); +insert into mutable values ('foo'), ('bar'); +update mutable set f2 = f2 || ' baz'; +NOTICE: old.ctid = (0,1) +NOTICE: old.tableoid = mutable +NOTICE: old.ctid = (0,2) +NOTICE: old.tableoid = mutable +table mutable; + f2 +--------- + foo baz + bar baz +(2 rows) + +-- check returning a composite datum from a trigger +create or replace function sillytrig() returns trigger language plpgsql as +$$begin + return row(new.*); +end$$; +update mutable set f2 = f2 || ' baz'; +table mutable; + f2 +------------- + foo baz baz + bar baz baz +(2 rows) + +create or replace function sillytrig() returns trigger language plpgsql as +$$declare r record; +begin + r := row(new.*); + return r; +end$$; +update mutable set f2 = f2 || ' baz'; +table mutable; + f2 +----------------- + foo baz baz baz + bar baz baz baz +(2 rows) + +-- +-- Domains of composite +-- +create domain ordered_int8s as two_int8s check((value).q1 <= (value).q2); +create function read_ordered_int8s(p ordered_int8s) returns int8 as $$ +begin return p.q1 + p.q2; end +$$ language plpgsql; +select read_ordered_int8s(row(1, 2)); + read_ordered_int8s +-------------------- + 3 +(1 row) + +select read_ordered_int8s(row(2, 1)); -- fail +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +create function build_ordered_int8s(i int8, j int8) returns ordered_int8s as $$ +begin return row(i,j); end +$$ language plpgsql; +select build_ordered_int8s(1,2); + build_ordered_int8s +--------------------- + (1,2) +(1 row) + +select build_ordered_int8s(2,1); -- fail +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function build_ordered_int8s(bigint,bigint) while casting return value to function's return type +create function build_ordered_int8s_2(i int8, j int8) returns ordered_int8s as $$ +declare r record; begin r := row(i,j); return r; end +$$ language plpgsql; +select build_ordered_int8s_2(1,2); + build_ordered_int8s_2 +----------------------- + (1,2) +(1 row) + +select build_ordered_int8s_2(2,1); -- fail +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function build_ordered_int8s_2(bigint,bigint) while casting return value to function's return type +create function build_ordered_int8s_3(i int8, j int8) returns ordered_int8s as $$ +declare r two_int8s; begin r := row(i,j); return r; end +$$ language plpgsql; +select build_ordered_int8s_3(1,2); + build_ordered_int8s_3 +----------------------- + (1,2) +(1 row) + +select build_ordered_int8s_3(2,1); -- fail +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function build_ordered_int8s_3(bigint,bigint) while casting return value to function's return type +create function build_ordered_int8s_4(i int8, j int8) returns ordered_int8s as $$ +declare r ordered_int8s; begin r := row(i,j); return r; end +$$ language plpgsql; +select build_ordered_int8s_4(1,2); + build_ordered_int8s_4 +----------------------- + (1,2) +(1 row) + +select build_ordered_int8s_4(2,1); -- fail +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function build_ordered_int8s_4(bigint,bigint) line 2 at assignment +create function build_ordered_int8s_a(i int8, j int8) returns ordered_int8s[] as $$ +begin return array[row(i,j), row(i,j+1)]; end +$$ language plpgsql; +select build_ordered_int8s_a(1,2); + build_ordered_int8s_a +----------------------- + {"(1,2)","(1,3)"} +(1 row) + +select build_ordered_int8s_a(2,1); -- fail +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function build_ordered_int8s_a(bigint,bigint) while casting return value to function's return type +-- check field assignment +do $$ +declare r ordered_int8s; +begin + r.q1 := null; + r.q2 := 43; + r.q1 := 42; + r.q2 := 41; -- fail +end$$; +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function inline_code_block line 7 at assignment +-- check whole-row assignment +do $$ +declare r ordered_int8s; +begin + r := null; + r := row(null,null); + r := row(1,2); + r := row(2,1); -- fail +end$$; +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function inline_code_block line 7 at assignment +-- check assignment in for-loop +do $$ +declare r ordered_int8s; +begin + for r in values (1,2),(3,4),(6,5) loop + raise notice 'r = %', r; + end loop; +end$$; +NOTICE: r = (1,2) +NOTICE: r = (3,4) +ERROR: value for domain ordered_int8s violates check constraint "ordered_int8s_check" +CONTEXT: PL/pgSQL function inline_code_block line 4 at FOR over SELECT rows +-- check behavior with toastable fields, too +create type two_texts as (f1 text, f2 text); +create domain ordered_texts as two_texts check((value).f1 <= (value).f2); +create table sometable (id int, a text, b text); +-- b should be compressed, but in-line +insert into sometable values (1, 'a', repeat('ffoob',1000)); +-- this b should be out-of-line +insert into sometable values (2, 'a', repeat('ffoob',100000)); +-- this pair should fail the domain check +insert into sometable values (3, 'z', repeat('ffoob',100000)); +do $$ +declare d ordered_texts; +begin + for d in select a, b from sometable loop + raise notice 'succeeded at "%"', d.f1; + end loop; +end$$; +NOTICE: succeeded at "a" +NOTICE: succeeded at "a" +ERROR: value for domain ordered_texts violates check constraint "ordered_texts_check" +CONTEXT: PL/pgSQL function inline_code_block line 4 at FOR over SELECT rows +do $$ +declare r record; d ordered_texts; +begin + for r in select * from sometable loop + raise notice 'processing row %', r.id; + d := row(r.a, r.b); + end loop; +end$$; +NOTICE: processing row 1 +NOTICE: processing row 2 +NOTICE: processing row 3 +ERROR: value for domain ordered_texts violates check constraint "ordered_texts_check" +CONTEXT: PL/pgSQL function inline_code_block line 6 at assignment +do $$ +declare r record; d ordered_texts; +begin + for r in select * from sometable loop + raise notice 'processing row %', r.id; + d := null; + d.f1 := r.a; + d.f2 := r.b; + end loop; +end$$; +NOTICE: processing row 1 +NOTICE: processing row 2 +NOTICE: processing row 3 +ERROR: value for domain ordered_texts violates check constraint "ordered_texts_check" +CONTEXT: PL/pgSQL function inline_code_block line 8 at assignment diff --git a/src/pl/plpgsql/src/expected/plpgsql_transaction.out b/src/pl/plpgsql/src/expected/plpgsql_transaction.out new file mode 100644 index 0000000000..6eedb215a4 --- /dev/null +++ b/src/pl/plpgsql/src/expected/plpgsql_transaction.out @@ -0,0 +1,528 @@ +CREATE TABLE test1 (a int, b text); +CREATE PROCEDURE transaction_test1(x int, y text) +LANGUAGE plpgsql +AS $$ +BEGIN + FOR i IN 0..x LOOP + INSERT INTO test1 (a, b) VALUES (i, y); + IF i % 2 = 0 THEN + COMMIT; + ELSE + ROLLBACK; + END IF; + END LOOP; +END +$$; +CALL transaction_test1(9, 'foo'); +SELECT * FROM test1; + a | b +---+----- + 0 | foo + 2 | foo + 4 | foo + 6 | foo + 8 | foo +(5 rows) + +TRUNCATE test1; +DO +LANGUAGE plpgsql +$$ +BEGIN + FOR i IN 0..9 LOOP + INSERT INTO test1 (a) VALUES (i); + IF i % 2 = 0 THEN + COMMIT; + ELSE + ROLLBACK; + END IF; + END LOOP; +END +$$; +SELECT * FROM test1; + a | b +---+--- + 0 | + 2 | + 4 | + 6 | + 8 | +(5 rows) + +-- transaction commands not allowed when called in transaction block +START TRANSACTION; +CALL transaction_test1(9, 'error'); +ERROR: invalid transaction termination +CONTEXT: PL/pgSQL function transaction_test1(integer,text) line 6 at COMMIT +COMMIT; +START TRANSACTION; +DO LANGUAGE plpgsql $$ BEGIN COMMIT; END $$; +ERROR: invalid transaction termination +CONTEXT: PL/pgSQL function inline_code_block line 1 at COMMIT +COMMIT; +TRUNCATE test1; +-- not allowed in a function +CREATE FUNCTION transaction_test2() RETURNS int +LANGUAGE plpgsql +AS $$ +BEGIN + FOR i IN 0..9 LOOP + INSERT INTO test1 (a) VALUES (i); + IF i % 2 = 0 THEN + COMMIT; + ELSE + ROLLBACK; + END IF; + END LOOP; + RETURN 1; +END +$$; +SELECT transaction_test2(); +ERROR: invalid transaction termination +CONTEXT: PL/pgSQL function transaction_test2() line 6 at COMMIT +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +-- also not allowed if procedure is called from a function +CREATE FUNCTION transaction_test3() RETURNS int +LANGUAGE plpgsql +AS $$ +BEGIN + CALL transaction_test1(9, 'error'); + RETURN 1; +END; +$$; +SELECT transaction_test3(); +ERROR: invalid transaction termination +CONTEXT: PL/pgSQL function transaction_test1(integer,text) line 6 at COMMIT +SQL statement "CALL transaction_test1(9, 'error')" +PL/pgSQL function transaction_test3() line 3 at CALL +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +-- DO block inside function +CREATE FUNCTION transaction_test4() RETURNS int +LANGUAGE plpgsql +AS $$ +BEGIN + EXECUTE 'DO LANGUAGE plpgsql $x$ BEGIN COMMIT; END $x$'; + RETURN 1; +END; +$$; +SELECT transaction_test4(); +ERROR: invalid transaction termination +CONTEXT: PL/pgSQL function inline_code_block line 1 at COMMIT +SQL statement "DO LANGUAGE plpgsql $x$ BEGIN COMMIT; END $x$" +PL/pgSQL function transaction_test4() line 3 at EXECUTE +-- proconfig settings currently disallow transaction statements +CREATE PROCEDURE transaction_test5() +LANGUAGE plpgsql +SET work_mem = 555 +AS $$ +BEGIN + COMMIT; +END; +$$; +CALL transaction_test5(); +ERROR: invalid transaction termination +CONTEXT: PL/pgSQL function transaction_test5() line 3 at COMMIT +-- SECURITY DEFINER currently disallow transaction statements +CREATE PROCEDURE transaction_test5b() +LANGUAGE plpgsql +SECURITY DEFINER +AS $$ +BEGIN + COMMIT; +END; +$$; +CALL transaction_test5b(); +ERROR: invalid transaction termination +CONTEXT: PL/pgSQL function transaction_test5b() line 3 at COMMIT +TRUNCATE test1; +-- nested procedure calls +CREATE PROCEDURE transaction_test6(c text) +LANGUAGE plpgsql +AS $$ +BEGIN + CALL transaction_test1(9, c); +END; +$$; +CALL transaction_test6('bar'); +SELECT * FROM test1; + a | b +---+----- + 0 | bar + 2 | bar + 4 | bar + 6 | bar + 8 | bar +(5 rows) + +TRUNCATE test1; +CREATE PROCEDURE transaction_test7() +LANGUAGE plpgsql +AS $$ +BEGIN + DO 'BEGIN CALL transaction_test1(9, $x$baz$x$); END;'; +END; +$$; +CALL transaction_test7(); +SELECT * FROM test1; + a | b +---+----- + 0 | baz + 2 | baz + 4 | baz + 6 | baz + 8 | baz +(5 rows) + +CREATE PROCEDURE transaction_test8() +LANGUAGE plpgsql +AS $$ +BEGIN + EXECUTE 'CALL transaction_test1(10, $x$baz$x$)'; +END; +$$; +CALL transaction_test8(); +ERROR: invalid transaction termination +CONTEXT: PL/pgSQL function transaction_test1(integer,text) line 6 at COMMIT +SQL statement "CALL transaction_test1(10, $x$baz$x$)" +PL/pgSQL function transaction_test8() line 3 at EXECUTE +-- commit inside cursor loop +CREATE TABLE test2 (x int); +INSERT INTO test2 VALUES (0), (1), (2), (3), (4); +TRUNCATE test1; +DO LANGUAGE plpgsql $$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM test2 ORDER BY x LOOP + INSERT INTO test1 (a) VALUES (r.x); + COMMIT; + END LOOP; +END; +$$; +SELECT * FROM test1; + a | b +---+--- + 0 | + 1 | + 2 | + 3 | + 4 | +(5 rows) + +-- check that this doesn't leak a holdable portal +SELECT * FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +-- error in cursor loop with commit +TRUNCATE test1; +DO LANGUAGE plpgsql $$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM test2 ORDER BY x LOOP + INSERT INTO test1 (a) VALUES (12/(r.x-2)); + COMMIT; + END LOOP; +END; +$$; +ERROR: division by zero +CONTEXT: SQL statement "INSERT INTO test1 (a) VALUES (12/(r.x-2))" +PL/pgSQL function inline_code_block line 6 at SQL statement +SELECT * FROM test1; + a | b +-----+--- + -6 | + -12 | +(2 rows) + +SELECT * FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +-- rollback inside cursor loop +TRUNCATE test1; +DO LANGUAGE plpgsql $$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM test2 ORDER BY x LOOP + INSERT INTO test1 (a) VALUES (r.x); + ROLLBACK; + END LOOP; +END; +$$; +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +SELECT * FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +-- first commit then rollback inside cursor loop +TRUNCATE test1; +DO LANGUAGE plpgsql $$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM test2 ORDER BY x LOOP + INSERT INTO test1 (a) VALUES (r.x); + IF r.x % 2 = 0 THEN + COMMIT; + ELSE + ROLLBACK; + END IF; + END LOOP; +END; +$$; +SELECT * FROM test1; + a | b +---+--- + 0 | + 2 | + 4 | +(3 rows) + +SELECT * FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +-- rollback inside cursor loop +TRUNCATE test1; +DO LANGUAGE plpgsql $$ +DECLARE + r RECORD; +BEGIN + FOR r IN UPDATE test2 SET x = x * 2 RETURNING x LOOP + INSERT INTO test1 (a) VALUES (r.x); + ROLLBACK; + END LOOP; +END; +$$; +ERROR: cannot perform transaction commands inside a cursor loop that is not read-only +CONTEXT: PL/pgSQL function inline_code_block line 7 at ROLLBACK +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +SELECT * FROM test2; + x +--- + 0 + 1 + 2 + 3 + 4 +(5 rows) + +SELECT * FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +-- commit inside block with exception handler +TRUNCATE test1; +DO LANGUAGE plpgsql $$ +BEGIN + BEGIN + INSERT INTO test1 (a) VALUES (1); + COMMIT; + INSERT INTO test1 (a) VALUES (1/0); + COMMIT; + EXCEPTION + WHEN division_by_zero THEN + RAISE NOTICE 'caught division_by_zero'; + END; +END; +$$; +ERROR: cannot commit while a subtransaction is active +CONTEXT: PL/pgSQL function inline_code_block line 5 at COMMIT +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +-- rollback inside block with exception handler +TRUNCATE test1; +DO LANGUAGE plpgsql $$ +BEGIN + BEGIN + INSERT INTO test1 (a) VALUES (1); + ROLLBACK; + INSERT INTO test1 (a) VALUES (1/0); + ROLLBACK; + EXCEPTION + WHEN division_by_zero THEN + RAISE NOTICE 'caught division_by_zero'; + END; +END; +$$; +ERROR: cannot roll back while a subtransaction is active +CONTEXT: PL/pgSQL function inline_code_block line 5 at ROLLBACK +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +-- COMMIT failures +DO LANGUAGE plpgsql $$ +BEGIN + CREATE TABLE test3 (y int UNIQUE DEFERRABLE INITIALLY DEFERRED); + COMMIT; + INSERT INTO test3 (y) VALUES (1); + COMMIT; + INSERT INTO test3 (y) VALUES (1); + INSERT INTO test3 (y) VALUES (2); + COMMIT; + INSERT INTO test3 (y) VALUES (3); -- won't get here +END; +$$; +ERROR: duplicate key value violates unique constraint "test3_y_key" +DETAIL: Key (y)=(1) already exists. +CONTEXT: PL/pgSQL function inline_code_block line 9 at COMMIT +SELECT * FROM test3; + y +--- + 1 +(1 row) + +-- SET TRANSACTION +DO LANGUAGE plpgsql $$ +BEGIN + PERFORM 1; + RAISE INFO '%', current_setting('transaction_isolation'); + COMMIT; + SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; + PERFORM 1; + RAISE INFO '%', current_setting('transaction_isolation'); + COMMIT; + SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; + RESET TRANSACTION ISOLATION LEVEL; + PERFORM 1; + RAISE INFO '%', current_setting('transaction_isolation'); + COMMIT; +END; +$$; +INFO: read committed +INFO: repeatable read +INFO: read committed +-- error cases +DO LANGUAGE plpgsql $$ +BEGIN + SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; +END; +$$; +ERROR: SET TRANSACTION ISOLATION LEVEL must be called before any query +CONTEXT: SQL statement "SET TRANSACTION ISOLATION LEVEL REPEATABLE READ" +PL/pgSQL function inline_code_block line 3 at SET +DO LANGUAGE plpgsql $$ +BEGIN + SAVEPOINT foo; +END; +$$; +ERROR: unsupported transaction command in PL/pgSQL +CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement +DO LANGUAGE plpgsql $$ +BEGIN + EXECUTE 'COMMIT'; +END; +$$; +ERROR: EXECUTE of transaction commands is not implemented +CONTEXT: PL/pgSQL function inline_code_block line 3 at EXECUTE +-- snapshot handling test +TRUNCATE test2; +CREATE PROCEDURE transaction_test9() +LANGUAGE SQL +AS $$ +INSERT INTO test2 VALUES (42); +$$; +DO LANGUAGE plpgsql $$ +BEGIN + ROLLBACK; + CALL transaction_test9(); +END +$$; +SELECT * FROM test2; + x +---- + 42 +(1 row) + +-- Test transaction in procedure with output parameters. This uses a +-- different portal strategy and different code paths in pquery.c. +CREATE PROCEDURE transaction_test10a(INOUT x int) +LANGUAGE plpgsql +AS $$ +BEGIN + x := x + 1; + COMMIT; +END; +$$; +CALL transaction_test10a(10); + x +---- + 11 +(1 row) + +CREATE PROCEDURE transaction_test10b(INOUT x int) +LANGUAGE plpgsql +AS $$ +BEGIN + x := x - 1; + ROLLBACK; +END; +$$; +CALL transaction_test10b(10); + x +--- + 9 +(1 row) + +-- transaction timestamp vs. statement timestamp +CREATE PROCEDURE transaction_test11() +LANGUAGE plpgsql +AS $$ +DECLARE + s1 timestamp with time zone; + s2 timestamp with time zone; + s3 timestamp with time zone; + t1 timestamp with time zone; + t2 timestamp with time zone; + t3 timestamp with time zone; +BEGIN + s1 := statement_timestamp(); + t1 := transaction_timestamp(); + ASSERT s1 = t1; + PERFORM pg_sleep(0.001); + COMMIT; + s2 := statement_timestamp(); + t2 := transaction_timestamp(); + ASSERT s2 = s1; + ASSERT t2 > t1; + PERFORM pg_sleep(0.001); + ROLLBACK; + s3 := statement_timestamp(); + t3 := transaction_timestamp(); + ASSERT s3 = s1; + ASSERT t3 > t2; +END; +$$; +CALL transaction_test11(); +DROP TABLE test1; +DROP TABLE test2; +DROP TABLE test3; diff --git a/src/pl/plpgsql/src/expected/plpgsql_varprops.out b/src/pl/plpgsql/src/expected/plpgsql_varprops.out new file mode 100644 index 0000000000..18f03d75b4 --- /dev/null +++ b/src/pl/plpgsql/src/expected/plpgsql_varprops.out @@ -0,0 +1,298 @@ +-- +-- Tests for PL/pgSQL variable properties: CONSTANT, NOT NULL, initializers +-- +create type var_record as (f1 int4, f2 int4); +create domain int_nn as int not null; +create domain var_record_nn as var_record not null; +create domain var_record_colnn as var_record check((value).f2 is not null); +-- CONSTANT +do $$ +declare x constant int := 42; +begin + raise notice 'x = %', x; +end$$; +NOTICE: x = 42 +do $$ +declare x constant int; +begin + x := 42; -- fail +end$$; +ERROR: variable "x" is declared CONSTANT +LINE 4: x := 42; -- fail + ^ +do $$ +declare x constant int; y int; +begin + for x, y in select 1, 2 loop -- fail + end loop; +end$$; +ERROR: variable "x" is declared CONSTANT +LINE 4: for x, y in select 1, 2 loop -- fail + ^ +do $$ +declare x constant int[]; +begin + x[1] := 42; -- fail +end$$; +ERROR: variable "x" is declared CONSTANT +LINE 4: x[1] := 42; -- fail + ^ +do $$ +declare x constant int[]; y int; +begin + for x[1], y in select 1, 2 loop -- fail (currently, unsupported syntax) + end loop; +end$$; +ERROR: syntax error at or near "[" +LINE 4: for x[1], y in select 1, 2 loop -- fail (currently, unsup... + ^ +do $$ +declare x constant var_record; +begin + x.f1 := 42; -- fail +end$$; +ERROR: variable "x" is declared CONSTANT +LINE 4: x.f1 := 42; -- fail + ^ +do $$ +declare x constant var_record; y int; +begin + for x.f1, y in select 1, 2 loop -- fail + end loop; +end$$; +ERROR: variable "x" is declared CONSTANT +LINE 4: for x.f1, y in select 1, 2 loop -- fail + ^ +-- initializer expressions +do $$ +declare x int := sin(0); +begin + raise notice 'x = %', x; +end$$; +NOTICE: x = 0 +do $$ +declare x int := 1/0; -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: division by zero +CONTEXT: SQL statement "SELECT 1/0" +PL/pgSQL function inline_code_block line 3 during statement block local variable initialization +do $$ +declare x bigint[] := array[1,3,5]; +begin + raise notice 'x = %', x; +end$$; +NOTICE: x = {1,3,5} +do $$ +declare x record := row(1,2,3); +begin + raise notice 'x = %', x; +end$$; +NOTICE: x = (1,2,3) +do $$ +declare x var_record := row(1,2); +begin + raise notice 'x = %', x; +end$$; +NOTICE: x = (1,2) +-- NOT NULL +do $$ +declare x int not null; -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: variable "x" must have a default value, since it's declared NOT NULL +LINE 2: declare x int not null; -- fail + ^ +do $$ +declare x int not null := 42; +begin + raise notice 'x = %', x; + x := null; -- fail +end$$; +NOTICE: x = 42 +ERROR: null value cannot be assigned to variable "x" declared NOT NULL +CONTEXT: PL/pgSQL function inline_code_block line 5 at assignment +do $$ +declare x int not null := null; -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: null value cannot be assigned to variable "x" declared NOT NULL +CONTEXT: PL/pgSQL function inline_code_block line 3 during statement block local variable initialization +do $$ +declare x record not null; -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: variable "x" must have a default value, since it's declared NOT NULL +LINE 2: declare x record not null; -- fail + ^ +do $$ +declare x record not null := row(42); +begin + raise notice 'x = %', x; + x := row(null); -- ok + raise notice 'x = %', x; + x := null; -- fail +end$$; +NOTICE: x = (42) +NOTICE: x = () +ERROR: null value cannot be assigned to variable "x" declared NOT NULL +CONTEXT: PL/pgSQL function inline_code_block line 7 at assignment +do $$ +declare x record not null := null; -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: null value cannot be assigned to variable "x" declared NOT NULL +CONTEXT: PL/pgSQL function inline_code_block line 3 during statement block local variable initialization +do $$ +declare x var_record not null; -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: variable "x" must have a default value, since it's declared NOT NULL +LINE 2: declare x var_record not null; -- fail + ^ +do $$ +declare x var_record not null := row(41,42); +begin + raise notice 'x = %', x; + x := row(null,null); -- ok + raise notice 'x = %', x; + x := null; -- fail +end$$; +NOTICE: x = (41,42) +NOTICE: x = (,) +ERROR: null value cannot be assigned to variable "x" declared NOT NULL +CONTEXT: PL/pgSQL function inline_code_block line 7 at assignment +do $$ +declare x var_record not null := null; -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: null value cannot be assigned to variable "x" declared NOT NULL +CONTEXT: PL/pgSQL function inline_code_block line 3 during statement block local variable initialization +-- Check that variables are reinitialized on block re-entry. +do $$ +begin + for i in 1..3 loop + declare + x int; + y int := i; + r record; + c var_record; + begin + if i = 1 then + x := 42; + r := row(i, i+1); + c := row(i, i+1); + end if; + raise notice 'x = %', x; + raise notice 'y = %', y; + raise notice 'r = %', r; + raise notice 'c = %', c; + end; + end loop; +end$$; +NOTICE: x = 42 +NOTICE: y = 1 +NOTICE: r = (1,2) +NOTICE: c = (1,2) +NOTICE: x = +NOTICE: y = 2 +NOTICE: r = +NOTICE: c = +NOTICE: x = +NOTICE: y = 3 +NOTICE: r = +NOTICE: c = +-- Check enforcement of domain constraints during initialization +do $$ +declare x int_nn; -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: domain int_nn does not allow null values +CONTEXT: PL/pgSQL function inline_code_block line 3 during statement block local variable initialization +do $$ +declare x int_nn := null; -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: domain int_nn does not allow null values +CONTEXT: PL/pgSQL function inline_code_block line 3 during statement block local variable initialization +do $$ +declare x int_nn := 42; +begin + raise notice 'x = %', x; + x := null; -- fail +end$$; +NOTICE: x = 42 +ERROR: domain int_nn does not allow null values +CONTEXT: PL/pgSQL function inline_code_block line 5 at assignment +do $$ +declare x var_record_nn; -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: domain var_record_nn does not allow null values +CONTEXT: PL/pgSQL function inline_code_block line 3 during statement block local variable initialization +do $$ +declare x var_record_nn := null; -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: domain var_record_nn does not allow null values +CONTEXT: PL/pgSQL function inline_code_block line 3 during statement block local variable initialization +do $$ +declare x var_record_nn := row(1,2); +begin + raise notice 'x = %', x; + x := row(null,null); -- ok + x := null; -- fail +end$$; +NOTICE: x = (1,2) +ERROR: domain var_record_nn does not allow null values +CONTEXT: PL/pgSQL function inline_code_block line 6 at assignment +do $$ +declare x var_record_colnn; -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: value for domain var_record_colnn violates check constraint "var_record_colnn_check" +CONTEXT: PL/pgSQL function inline_code_block line 3 during statement block local variable initialization +do $$ +declare x var_record_colnn := null; -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: value for domain var_record_colnn violates check constraint "var_record_colnn_check" +CONTEXT: PL/pgSQL function inline_code_block line 3 during statement block local variable initialization +do $$ +declare x var_record_colnn := row(1,null); -- fail +begin + raise notice 'x = %', x; +end$$; +ERROR: value for domain var_record_colnn violates check constraint "var_record_colnn_check" +CONTEXT: PL/pgSQL function inline_code_block line 3 during statement block local variable initialization +do $$ +declare x var_record_colnn := row(1,2); +begin + raise notice 'x = %', x; + x := null; -- fail +end$$; +NOTICE: x = (1,2) +ERROR: value for domain var_record_colnn violates check constraint "var_record_colnn_check" +CONTEXT: PL/pgSQL function inline_code_block line 5 at assignment +do $$ +declare x var_record_colnn := row(1,2); +begin + raise notice 'x = %', x; + x := row(null,null); -- fail +end$$; +NOTICE: x = (1,2) +ERROR: value for domain var_record_colnn violates check constraint "var_record_colnn_check" +CONTEXT: PL/pgSQL function inline_code_block line 5 at assignment diff --git a/src/pl/plpgsql/src/generate-plerrcodes.pl b/src/pl/plpgsql/src/generate-plerrcodes.pl index eb135bc25e..834cd5058f 100644 --- a/src/pl/plpgsql/src/generate-plerrcodes.pl +++ b/src/pl/plpgsql/src/generate-plerrcodes.pl @@ -1,7 +1,7 @@ #!/usr/bin/perl # # Generate the plerrcodes.h header from errcodes.txt -# Copyright (c) 2000-2017, PostgreSQL Global Development Group +# Copyright (c) 2000-2018, PostgreSQL Global Development Group use warnings; use strict; diff --git a/src/pl/plpgsql/src/nls.mk b/src/pl/plpgsql/src/nls.mk index 1133668fc7..05512678e8 100644 --- a/src/pl/plpgsql/src/nls.mk +++ b/src/pl/plpgsql/src/nls.mk @@ -1,6 +1,6 @@ # src/pl/plpgsql/src/nls.mk CATALOG_NAME = plpgsql -AVAIL_LANGUAGES = cs de es fr it ja ko pl pt_BR ro ru sv zh_CN zh_TW +AVAIL_LANGUAGES = cs de es fr it ja ko pl pt_BR ro ru sv tr vi zh_CN zh_TW GETTEXT_FILES = pl_comp.c pl_exec.c pl_gram.c pl_funcs.c pl_handler.c pl_scanner.c GETTEXT_TRIGGERS = $(BACKEND_COMMON_GETTEXT_TRIGGERS) yyerror plpgsql_yyerror GETTEXT_FLAGS = $(BACKEND_COMMON_GETTEXT_FLAGS) diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index 662b3c97d7..59460d2643 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -3,7 +3,7 @@ * pl_comp.c - Compiler part of the PL/pgSQL * procedural language * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -20,7 +20,6 @@ #include "access/htup_details.h" #include "catalog/namespace.h" #include "catalog/pg_proc.h" -#include "catalog/pg_proc_fn.h" #include "catalog/pg_type.h" #include "funcapi.h" #include "nodes/makefuncs.h" @@ -32,6 +31,7 @@ #include "utils/regproc.h" #include "utils/rel.h" #include "utils/syscache.h" +#include "utils/typcache.h" #include "plpgsql.h" @@ -104,7 +104,6 @@ static Node *plpgsql_param_ref(ParseState *pstate, ParamRef *pref); static Node *resolve_column_ref(ParseState *pstate, PLpgSQL_expr *expr, ColumnRef *cref, bool error_if_no_field); static Node *make_datum_param(PLpgSQL_expr *expr, int dno, int location); -static PLpgSQL_row *build_row_from_class(Oid classOid); static PLpgSQL_row *build_row_from_vars(PLpgSQL_variable **vars, int numvars); static PLpgSQL_type *build_datatype(HeapTuple typeTup, int32 typmod, Oid collation); static void plpgsql_start_datums(void); @@ -342,11 +341,12 @@ do_compile(FunctionCallInfo fcinfo, * per-function memory context, so it can be reclaimed easily. */ func_cxt = AllocSetContextCreate(TopMemoryContext, - "PL/pgSQL function context", + "PL/pgSQL function", ALLOCSET_DEFAULT_SIZES); plpgsql_compile_tmp_cxt = MemoryContextSwitchTo(func_cxt); function->fn_signature = format_procedure(fcinfo->flinfo->fn_oid); + MemoryContextSetIdentifier(func_cxt, function->fn_signature); function->fn_oid = fcinfo->flinfo->fn_oid; function->fn_xmin = HeapTupleHeaderGetRawXmin(procTup->t_data); function->fn_tid = procTup->t_self; @@ -366,6 +366,8 @@ do_compile(FunctionCallInfo fcinfo, else function->fn_is_trigger = PLPGSQL_NOT_TRIGGER; + function->fn_prokind = procStruct->prokind; + /* * Initialize the compiler, particularly the namespace stack. The * outermost namespace contains function parameters and other special @@ -426,16 +428,20 @@ do_compile(FunctionCallInfo fcinfo, /* Disallow pseudotype argument */ /* (note we already replaced polymorphic types) */ /* (build_variable would do this, but wrong message) */ - if (argdtype->ttype != PLPGSQL_TTYPE_SCALAR && - argdtype->ttype != PLPGSQL_TTYPE_ROW) + if (argdtype->ttype == PLPGSQL_TTYPE_PSEUDO) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("PL/pgSQL functions cannot accept type %s", format_type_be(argtypeid)))); - /* Build variable and add to datum list */ - argvariable = plpgsql_build_variable(buf, 0, - argdtype, false); + /* + * Build variable and add to datum list. If there's a name + * for the argument, use that as refname, else use $n name. + */ + argvariable = plpgsql_build_variable((argnames && + argnames[i][0] != '\0') ? + argnames[i] : buf, + 0, argdtype, false); if (argvariable->dtype == PLPGSQL_DTYPE_VAR) { @@ -443,8 +449,8 @@ do_compile(FunctionCallInfo fcinfo, } else { - Assert(argvariable->dtype == PLPGSQL_DTYPE_ROW); - argitemtype = PLPGSQL_NSTYPE_ROW; + Assert(argvariable->dtype == PLPGSQL_DTYPE_REC); + argitemtype = PLPGSQL_NSTYPE_REC; } /* Remember arguments in appropriate arrays */ @@ -469,11 +475,11 @@ do_compile(FunctionCallInfo fcinfo, /* * If there's just one OUT parameter, out_param_varno points * directly to it. If there's more than one, build a row that - * holds all of them. + * holds all of them. Procedures return a row even for one OUT + * parameter. */ - if (num_out_args == 1) - function->out_param_varno = out_arg_variables[0]->dno; - else if (num_out_args > 1) + if (num_out_args > 1 || + (num_out_args == 1 && function->fn_prokind == PROKIND_PROCEDURE)) { PLpgSQL_row *row = build_row_from_vars(out_arg_variables, num_out_args); @@ -481,6 +487,8 @@ do_compile(FunctionCallInfo fcinfo, plpgsql_adddatum((PLpgSQL_datum *) row); function->out_param_varno = row->dno; } + else if (num_out_args == 1) + function->out_param_varno = out_arg_variables[0]->dno; /* * Check for a polymorphic returntype. If found, use the actual @@ -549,29 +557,26 @@ do_compile(FunctionCallInfo fcinfo, format_type_be(rettypeid)))); } - if (typeStruct->typrelid != InvalidOid || - rettypeid == RECORDOID) - function->fn_retistuple = true; - else - { - function->fn_retbyval = typeStruct->typbyval; - function->fn_rettyplen = typeStruct->typlen; + function->fn_retistuple = type_is_rowtype(rettypeid); + function->fn_retisdomain = (typeStruct->typtype == TYPTYPE_DOMAIN); + function->fn_retbyval = typeStruct->typbyval; + function->fn_rettyplen = typeStruct->typlen; - /* - * install $0 reference, but only for polymorphic return - * types, and not when the return is specified through an - * output parameter. - */ - if (IsPolymorphicType(procStruct->prorettype) && - num_out_args == 0) - { - (void) plpgsql_build_variable("$0", 0, - build_datatype(typeTup, - -1, - function->fn_input_collation), - true); - } + /* + * install $0 reference, but only for polymorphic return types, + * and not when the return is specified through an output + * parameter. + */ + if (IsPolymorphicType(procStruct->prorettype) && + num_out_args == 0) + { + (void) plpgsql_build_variable("$0", 0, + build_datatype(typeTup, + -1, + function->fn_input_collation), + true); } + ReleaseSysCache(typeTup); break; @@ -580,6 +585,7 @@ do_compile(FunctionCallInfo fcinfo, function->fn_rettype = InvalidOid; function->fn_retbyval = false; function->fn_retistuple = true; + function->fn_retisdomain = false; function->fn_retset = false; /* shouldn't be any declared arguments */ @@ -590,11 +596,11 @@ do_compile(FunctionCallInfo fcinfo, errhint("The arguments of the trigger can be accessed through TG_NARGS and TG_ARGV instead."))); /* Add the record for referencing NEW ROW */ - rec = plpgsql_build_record("new", 0, true); + rec = plpgsql_build_record("new", 0, NULL, RECORDOID, true); function->new_varno = rec->dno; /* Add the record for referencing OLD ROW */ - rec = plpgsql_build_record("old", 0, true); + rec = plpgsql_build_record("old", 0, NULL, RECORDOID, true); function->old_varno = rec->dno; /* Add the variable tg_name */ @@ -603,7 +609,9 @@ do_compile(FunctionCallInfo fcinfo, -1, InvalidOid), true); - function->tg_name_varno = var->dno; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + var->dtype = PLPGSQL_DTYPE_PROMISE; + ((PLpgSQL_var *) var)->promise = PLPGSQL_PROMISE_TG_NAME; /* Add the variable tg_when */ var = plpgsql_build_variable("tg_when", 0, @@ -611,7 +619,9 @@ do_compile(FunctionCallInfo fcinfo, -1, function->fn_input_collation), true); - function->tg_when_varno = var->dno; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + var->dtype = PLPGSQL_DTYPE_PROMISE; + ((PLpgSQL_var *) var)->promise = PLPGSQL_PROMISE_TG_WHEN; /* Add the variable tg_level */ var = plpgsql_build_variable("tg_level", 0, @@ -619,7 +629,9 @@ do_compile(FunctionCallInfo fcinfo, -1, function->fn_input_collation), true); - function->tg_level_varno = var->dno; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + var->dtype = PLPGSQL_DTYPE_PROMISE; + ((PLpgSQL_var *) var)->promise = PLPGSQL_PROMISE_TG_LEVEL; /* Add the variable tg_op */ var = plpgsql_build_variable("tg_op", 0, @@ -627,7 +639,9 @@ do_compile(FunctionCallInfo fcinfo, -1, function->fn_input_collation), true); - function->tg_op_varno = var->dno; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + var->dtype = PLPGSQL_DTYPE_PROMISE; + ((PLpgSQL_var *) var)->promise = PLPGSQL_PROMISE_TG_OP; /* Add the variable tg_relid */ var = plpgsql_build_variable("tg_relid", 0, @@ -635,7 +649,9 @@ do_compile(FunctionCallInfo fcinfo, -1, InvalidOid), true); - function->tg_relid_varno = var->dno; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + var->dtype = PLPGSQL_DTYPE_PROMISE; + ((PLpgSQL_var *) var)->promise = PLPGSQL_PROMISE_TG_RELID; /* Add the variable tg_relname */ var = plpgsql_build_variable("tg_relname", 0, @@ -643,7 +659,9 @@ do_compile(FunctionCallInfo fcinfo, -1, InvalidOid), true); - function->tg_relname_varno = var->dno; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + var->dtype = PLPGSQL_DTYPE_PROMISE; + ((PLpgSQL_var *) var)->promise = PLPGSQL_PROMISE_TG_TABLE_NAME; /* tg_table_name is now preferred to tg_relname */ var = plpgsql_build_variable("tg_table_name", 0, @@ -651,7 +669,9 @@ do_compile(FunctionCallInfo fcinfo, -1, InvalidOid), true); - function->tg_table_name_varno = var->dno; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + var->dtype = PLPGSQL_DTYPE_PROMISE; + ((PLpgSQL_var *) var)->promise = PLPGSQL_PROMISE_TG_TABLE_NAME; /* add the variable tg_table_schema */ var = plpgsql_build_variable("tg_table_schema", 0, @@ -659,7 +679,9 @@ do_compile(FunctionCallInfo fcinfo, -1, InvalidOid), true); - function->tg_table_schema_varno = var->dno; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + var->dtype = PLPGSQL_DTYPE_PROMISE; + ((PLpgSQL_var *) var)->promise = PLPGSQL_PROMISE_TG_TABLE_SCHEMA; /* Add the variable tg_nargs */ var = plpgsql_build_variable("tg_nargs", 0, @@ -667,7 +689,9 @@ do_compile(FunctionCallInfo fcinfo, -1, InvalidOid), true); - function->tg_nargs_varno = var->dno; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + var->dtype = PLPGSQL_DTYPE_PROMISE; + ((PLpgSQL_var *) var)->promise = PLPGSQL_PROMISE_TG_NARGS; /* Add the variable tg_argv */ var = plpgsql_build_variable("tg_argv", 0, @@ -675,7 +699,9 @@ do_compile(FunctionCallInfo fcinfo, -1, function->fn_input_collation), true); - function->tg_argv_varno = var->dno; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + var->dtype = PLPGSQL_DTYPE_PROMISE; + ((PLpgSQL_var *) var)->promise = PLPGSQL_PROMISE_TG_ARGV; break; @@ -683,6 +709,7 @@ do_compile(FunctionCallInfo fcinfo, function->fn_rettype = VOIDOID; function->fn_retbyval = false; function->fn_retistuple = true; + function->fn_retisdomain = false; function->fn_retset = false; /* shouldn't be any declared arguments */ @@ -697,7 +724,9 @@ do_compile(FunctionCallInfo fcinfo, -1, function->fn_input_collation), true); - function->tg_event_varno = var->dno; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + var->dtype = PLPGSQL_DTYPE_PROMISE; + ((PLpgSQL_var *) var)->promise = PLPGSQL_PROMISE_TG_EVENT; /* Add the variable tg_tag */ var = plpgsql_build_variable("tg_tag", 0, @@ -705,7 +734,9 @@ do_compile(FunctionCallInfo fcinfo, -1, function->fn_input_collation), true); - function->tg_tag_varno = var->dno; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + var->dtype = PLPGSQL_DTYPE_PROMISE; + ((PLpgSQL_var *) var)->promise = PLPGSQL_PROMISE_TG_TAG; break; @@ -858,13 +889,15 @@ plpgsql_compile_inline(char *proc_source) function->fn_rettype = VOIDOID; function->fn_retset = false; function->fn_retistuple = false; + function->fn_retisdomain = false; + function->fn_prokind = PROKIND_FUNCTION; /* a bit of hardwired knowledge about type VOID here */ function->fn_retbyval = true; function->fn_rettyplen = sizeof(int32); /* * Remember if function is STABLE/IMMUTABLE. XXX would it be better to - * set this TRUE inside a read-only transaction? Not clear. + * set this true inside a read-only transaction? Not clear. */ function->fn_readonly = false; @@ -1231,19 +1264,22 @@ resolve_column_ref(ParseState *pstate, PLpgSQL_expr *expr, if (nnames == nnames_field) { /* colname could be a field in this record */ + PLpgSQL_rec *rec = (PLpgSQL_rec *) estate->datums[nse->itemno]; int i; /* search for a datum referencing this field */ - for (i = 0; i < estate->ndatums; i++) + i = rec->firstfield; + while (i >= 0) { PLpgSQL_recfield *fld = (PLpgSQL_recfield *) estate->datums[i]; - if (fld->dtype == PLPGSQL_DTYPE_RECFIELD && - fld->recparentno == nse->itemno && - strcmp(fld->fieldname, colname) == 0) + Assert(fld->dtype == PLPGSQL_DTYPE_RECFIELD && + fld->recparentno == nse->itemno); + if (strcmp(fld->fieldname, colname) == 0) { return make_datum_param(expr, i, cref->location); } + i = fld->nextfield; } /* @@ -1261,34 +1297,6 @@ resolve_column_ref(ParseState *pstate, PLpgSQL_expr *expr, parser_errposition(pstate, cref->location))); } break; - case PLPGSQL_NSTYPE_ROW: - if (nnames == nnames_wholerow) - return make_datum_param(expr, nse->itemno, cref->location); - if (nnames == nnames_field) - { - /* colname could be a field in this row */ - PLpgSQL_row *row = (PLpgSQL_row *) estate->datums[nse->itemno]; - int i; - - for (i = 0; i < row->nfields; i++) - { - if (row->fieldnames[i] && - strcmp(row->fieldnames[i], colname) == 0) - { - return make_datum_param(expr, row->varnos[i], - cref->location); - } - } - /* Not found, so throw error or return NULL */ - if (error_if_no_field) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("record \"%s\" has no field \"%s\"", - (nnames_field == 1) ? name1 : name2, - colname), - parser_errposition(pstate, cref->location))); - } - break; default: elog(ERROR, "unrecognized plpgsql itemtype: %d", nse->itemtype); } @@ -1345,8 +1353,8 @@ make_datum_param(PLpgSQL_expr *expr, int dno, int location) * yytxt is the original token text; we need this to check for quoting, * so that later checks for unreserved keywords work properly. * - * If recognized as a variable, fill in *wdatum and return TRUE; - * if not recognized, fill in *word and return FALSE. + * If recognized as a variable, fill in *wdatum and return true; + * if not recognized, fill in *word and return false. * (Note: those two pointers actually point to members of the same union, * but for notational reasons we pass them separately.) * ---------- @@ -1376,7 +1384,6 @@ plpgsql_parse_word(char *word1, const char *yytxt, switch (ns->itemtype) { case PLPGSQL_NSTYPE_VAR: - case PLPGSQL_NSTYPE_ROW: case PLPGSQL_NSTYPE_REC: wdatum->datum = plpgsql_Datums[ns->itemno]; wdatum->ident = word1; @@ -1452,14 +1459,11 @@ plpgsql_parse_dblword(char *word1, char *word2, * datum whether it is or not --- any error will be * detected later. */ + PLpgSQL_rec *rec; PLpgSQL_recfield *new; - new = palloc(sizeof(PLpgSQL_recfield)); - new->dtype = PLPGSQL_DTYPE_RECFIELD; - new->fieldname = pstrdup(word2); - new->recparentno = ns->itemno; - - plpgsql_adddatum((PLpgSQL_datum *) new); + rec = (PLpgSQL_rec *) (plpgsql_Datums[ns->itemno]); + new = plpgsql_build_recfield(rec, word2); wdatum->datum = (PLpgSQL_datum *) new; } @@ -1473,43 +1477,6 @@ plpgsql_parse_dblword(char *word1, char *word2, wdatum->idents = idents; return true; - case PLPGSQL_NSTYPE_ROW: - if (nnames == 1) - { - /* - * First word is a row name, so second word could be a - * field in this row. Again, no error now if it - * isn't. - */ - PLpgSQL_row *row; - int i; - - row = (PLpgSQL_row *) (plpgsql_Datums[ns->itemno]); - for (i = 0; i < row->nfields; i++) - { - if (row->fieldnames[i] && - strcmp(row->fieldnames[i], word2) == 0) - { - wdatum->datum = plpgsql_Datums[row->varnos[i]]; - wdatum->ident = NULL; - wdatum->quoted = false; /* not used */ - wdatum->idents = idents; - return true; - } - } - /* fall through to return CWORD */ - } - else - { - /* Block-qualified reference to row variable. */ - wdatum->datum = plpgsql_Datums[ns->itemno]; - wdatum->ident = NULL; - wdatum->quoted = false; /* not used */ - wdatum->idents = idents; - return true; - } - break; - default: break; } @@ -1563,14 +1530,11 @@ plpgsql_parse_tripword(char *word1, char *word2, char *word3, * words 1/2 are a record name, so third word could be * a field in this record. */ + PLpgSQL_rec *rec; PLpgSQL_recfield *new; - new = palloc(sizeof(PLpgSQL_recfield)); - new->dtype = PLPGSQL_DTYPE_RECFIELD; - new->fieldname = pstrdup(word3); - new->recparentno = ns->itemno; - - plpgsql_adddatum((PLpgSQL_datum *) new); + rec = (PLpgSQL_rec *) (plpgsql_Datums[ns->itemno]); + new = plpgsql_build_recfield(rec, word3); wdatum->datum = (PLpgSQL_datum *) new; wdatum->ident = NULL; @@ -1579,32 +1543,6 @@ plpgsql_parse_tripword(char *word1, char *word2, char *word3, return true; } - case PLPGSQL_NSTYPE_ROW: - { - /* - * words 1/2 are a row name, so third word could be a - * field in this row. - */ - PLpgSQL_row *row; - int i; - - row = (PLpgSQL_row *) (plpgsql_Datums[ns->itemno]); - for (i = 0; i < row->nfields; i++) - { - if (row->fieldnames[i] && - strcmp(row->fieldnames[i], word3) == 0) - { - wdatum->datum = plpgsql_Datums[row->varnos[i]]; - wdatum->ident = NULL; - wdatum->quoted = false; /* not used */ - wdatum->idents = idents; - return true; - } - } - /* fall through to return CWORD */ - break; - } - default: break; } @@ -1855,8 +1793,8 @@ plpgsql_parse_cwordrowtype(List *idents) * plpgsql_build_variable - build a datum-array entry of a given * datatype * - * The returned struct may be a PLpgSQL_var, PLpgSQL_row, or - * PLpgSQL_rec depending on the given datatype, and is allocated via + * The returned struct may be a PLpgSQL_var or PLpgSQL_rec + * depending on the given datatype, and is allocated via * palloc. The struct is automatically added to the current datum * array, and optionally to the current namespace. */ @@ -1878,7 +1816,7 @@ plpgsql_build_variable(const char *refname, int lineno, PLpgSQL_type *dtype, var->refname = pstrdup(refname); var->lineno = lineno; var->datatype = dtype; - /* other fields might be filled by caller */ + /* other fields are left as 0, might be changed by caller */ /* preset to NULL */ var->value = 0; @@ -1893,31 +1831,14 @@ plpgsql_build_variable(const char *refname, int lineno, PLpgSQL_type *dtype, result = (PLpgSQL_variable *) var; break; } - case PLPGSQL_TTYPE_ROW: - { - /* Composite type -- build a row variable */ - PLpgSQL_row *row; - - row = build_row_from_class(dtype->typrelid); - - row->dtype = PLPGSQL_DTYPE_ROW; - row->refname = pstrdup(refname); - row->lineno = lineno; - - plpgsql_adddatum((PLpgSQL_datum *) row); - if (add2namespace) - plpgsql_ns_additem(PLPGSQL_NSTYPE_ROW, - row->dno, - refname); - result = (PLpgSQL_variable *) row; - break; - } case PLPGSQL_TTYPE_REC: { - /* "record" type -- build a record variable */ + /* Composite type -- build a record variable */ PLpgSQL_rec *rec; - rec = plpgsql_build_record(refname, lineno, add2namespace); + rec = plpgsql_build_record(refname, lineno, + dtype, dtype->typoid, + add2namespace); result = (PLpgSQL_variable *) rec; break; } @@ -1941,7 +1862,9 @@ plpgsql_build_variable(const char *refname, int lineno, PLpgSQL_type *dtype, * Build empty named record variable, and optionally add it to namespace */ PLpgSQL_rec * -plpgsql_build_record(const char *refname, int lineno, bool add2namespace) +plpgsql_build_record(const char *refname, int lineno, + PLpgSQL_type *dtype, Oid rectypeid, + bool add2namespace) { PLpgSQL_rec *rec; @@ -1949,10 +1872,11 @@ plpgsql_build_record(const char *refname, int lineno, bool add2namespace) rec->dtype = PLPGSQL_DTYPE_REC; rec->refname = pstrdup(refname); rec->lineno = lineno; - rec->tup = NULL; - rec->tupdesc = NULL; - rec->freetup = false; - rec->freetupdesc = false; + /* other fields are left as 0, might be changed by caller */ + rec->datatype = dtype; + rec->rectypeid = rectypeid; + rec->firstfield = -1; + rec->erh = NULL; plpgsql_adddatum((PLpgSQL_datum *) rec); if (add2namespace) plpgsql_ns_additem(PLPGSQL_NSTYPE_REC, rec->dno, rec->refname); @@ -1960,104 +1884,9 @@ plpgsql_build_record(const char *refname, int lineno, bool add2namespace) return rec; } -/* - * Build a row-variable data structure given the pg_class OID. - */ -static PLpgSQL_row * -build_row_from_class(Oid classOid) -{ - PLpgSQL_row *row; - Relation rel; - Form_pg_class classStruct; - const char *relname; - int i; - - /* - * Open the relation to get info. - */ - rel = relation_open(classOid, AccessShareLock); - classStruct = RelationGetForm(rel); - relname = RelationGetRelationName(rel); - - /* - * Accept relation, sequence, view, materialized view, composite type, or - * foreign table. - */ - if (classStruct->relkind != RELKIND_RELATION && - classStruct->relkind != RELKIND_SEQUENCE && - classStruct->relkind != RELKIND_VIEW && - classStruct->relkind != RELKIND_MATVIEW && - classStruct->relkind != RELKIND_COMPOSITE_TYPE && - classStruct->relkind != RELKIND_FOREIGN_TABLE && - classStruct->relkind != RELKIND_PARTITIONED_TABLE) - ereport(ERROR, - (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("relation \"%s\" is not a table", relname))); - - /* - * Create a row datum entry and all the required variables that it will - * point to. - */ - row = palloc0(sizeof(PLpgSQL_row)); - row->dtype = PLPGSQL_DTYPE_ROW; - row->rowtupdesc = CreateTupleDescCopy(RelationGetDescr(rel)); - row->nfields = classStruct->relnatts; - row->fieldnames = palloc(sizeof(char *) * row->nfields); - row->varnos = palloc(sizeof(int) * row->nfields); - - for (i = 0; i < row->nfields; i++) - { - Form_pg_attribute attrStruct; - - /* - * Get the attribute and check for dropped column - */ - attrStruct = row->rowtupdesc->attrs[i]; - - if (!attrStruct->attisdropped) - { - char *attname; - char refname[(NAMEDATALEN * 2) + 100]; - PLpgSQL_variable *var; - - attname = NameStr(attrStruct->attname); - snprintf(refname, sizeof(refname), "%s.%s", relname, attname); - - /* - * Create the internal variable for the field - * - * We know if the table definitions contain a default value or if - * the field is declared in the table as NOT NULL. But it's - * possible to create a table field as NOT NULL without a default - * value and that would lead to problems later when initializing - * the variables due to entering a block at execution time. Thus - * we ignore this information for now. - */ - var = plpgsql_build_variable(refname, 0, - plpgsql_build_datatype(attrStruct->atttypid, - attrStruct->atttypmod, - attrStruct->attcollation), - false); - - /* Add the variable to the row */ - row->fieldnames[i] = attname; - row->varnos[i] = var->dno; - } - else - { - /* Leave a hole in the row structure for the dropped col */ - row->fieldnames[i] = NULL; - row->varnos[i] = -1; - } - } - - relation_close(rel, AccessShareLock); - - return row; -} - /* * Build a row-variable data structure given the component variables. + * Include a rowtupdesc, since we will need to materialize the row result. */ static PLpgSQL_row * build_row_from_vars(PLpgSQL_variable **vars, int numvars) @@ -2067,6 +1896,8 @@ build_row_from_vars(PLpgSQL_variable **vars, int numvars) row = palloc0(sizeof(PLpgSQL_row)); row->dtype = PLPGSQL_DTYPE_ROW; + row->refname = "(unnamed row)"; + row->lineno = -1; row->rowtupdesc = CreateTemplateTupleDesc(numvars, false); row->nfields = numvars; row->fieldnames = palloc(numvars * sizeof(char *)); @@ -2075,32 +1906,34 @@ build_row_from_vars(PLpgSQL_variable **vars, int numvars) for (i = 0; i < numvars; i++) { PLpgSQL_variable *var = vars[i]; - Oid typoid = RECORDOID; - int32 typmod = -1; - Oid typcoll = InvalidOid; + Oid typoid; + int32 typmod; + Oid typcoll; + + /* Member vars of a row should never be const */ + Assert(!var->isconst); switch (var->dtype) { case PLPGSQL_DTYPE_VAR: + case PLPGSQL_DTYPE_PROMISE: typoid = ((PLpgSQL_var *) var)->datatype->typoid; typmod = ((PLpgSQL_var *) var)->datatype->atttypmod; typcoll = ((PLpgSQL_var *) var)->datatype->collation; break; case PLPGSQL_DTYPE_REC: - break; - - case PLPGSQL_DTYPE_ROW: - if (((PLpgSQL_row *) var)->rowtupdesc) - { - typoid = ((PLpgSQL_row *) var)->rowtupdesc->tdtypeid; - typmod = ((PLpgSQL_row *) var)->rowtupdesc->tdtypmod; - /* composite types have no collation */ - } + typoid = ((PLpgSQL_rec *) var)->rectypeid; + typmod = -1; /* don't know typmod, if it's used at all */ + typcoll = InvalidOid; /* composite types have no collation */ break; default: elog(ERROR, "unrecognized dtype: %d", var->dtype); + typoid = InvalidOid; /* keep compiler quiet */ + typmod = 0; + typcoll = InvalidOid; + break; } row->fieldnames[i] = var->refname; @@ -2116,6 +1949,46 @@ build_row_from_vars(PLpgSQL_variable **vars, int numvars) return row; } +/* + * Build a RECFIELD datum for the named field of the specified record variable + * + * If there's already such a datum, just return it; we don't need duplicates. + */ +PLpgSQL_recfield * +plpgsql_build_recfield(PLpgSQL_rec *rec, const char *fldname) +{ + PLpgSQL_recfield *recfield; + int i; + + /* search for an existing datum referencing this field */ + i = rec->firstfield; + while (i >= 0) + { + PLpgSQL_recfield *fld = (PLpgSQL_recfield *) plpgsql_Datums[i]; + + Assert(fld->dtype == PLPGSQL_DTYPE_RECFIELD && + fld->recparentno == rec->dno); + if (strcmp(fld->fieldname, fldname) == 0) + return fld; + i = fld->nextfield; + } + + /* nope, so make a new one */ + recfield = palloc0(sizeof(PLpgSQL_recfield)); + recfield->dtype = PLPGSQL_DTYPE_RECFIELD; + recfield->fieldname = pstrdup(fldname); + recfield->recparentno = rec->dno; + recfield->rectupledescid = INVALID_TUPLEDESC_IDENTIFIER; + + plpgsql_adddatum((PLpgSQL_datum *) recfield); + + /* now we can link it into the parent's chain */ + recfield->nextfield = rec->firstfield; + rec->firstfield = recfield->dno; + + return recfield; +} + /* * plpgsql_build_datatype * Build PLpgSQL_type struct given type OID, typmod, and collation. @@ -2162,14 +2035,18 @@ build_datatype(HeapTuple typeTup, int32 typmod, Oid collation) switch (typeStruct->typtype) { case TYPTYPE_BASE: - case TYPTYPE_DOMAIN: case TYPTYPE_ENUM: case TYPTYPE_RANGE: typ->ttype = PLPGSQL_TTYPE_SCALAR; break; case TYPTYPE_COMPOSITE: - Assert(OidIsValid(typeStruct->typrelid)); - typ->ttype = PLPGSQL_TTYPE_ROW; + typ->ttype = PLPGSQL_TTYPE_REC; + break; + case TYPTYPE_DOMAIN: + if (type_is_rowtype(typeStruct->typbasetype)) + typ->ttype = PLPGSQL_TTYPE_REC; + else + typ->ttype = PLPGSQL_TTYPE_SCALAR; break; case TYPTYPE_PSEUDO: if (typ->typoid == RECORDOID) @@ -2185,7 +2062,6 @@ build_datatype(HeapTuple typeTup, int32 typmod, Oid collation) typ->typlen = typeStruct->typlen; typ->typbyval = typeStruct->typbyval; typ->typtype = typeStruct->typtype; - typ->typrelid = typeStruct->typrelid; typ->collation = typeStruct->typcollation; if (OidIsValid(collation) && OidIsValid(typ->collation)) typ->collation = collation; @@ -2341,15 +2217,12 @@ plpgsql_adddatum(PLpgSQL_datum *new) /* ---------- * plpgsql_finish_datums Copy completed datum info into function struct. - * - * This is also responsible for building resettable_datums, a bitmapset - * of the dnos of all ROW, REC, and RECFIELD datums in the function. * ---------- */ static void plpgsql_finish_datums(PLpgSQL_function *function) { - Bitmapset *resettable_datums = NULL; + Size copiable_size = 0; int i; function->ndatums = plpgsql_nDatums; @@ -2357,32 +2230,35 @@ plpgsql_finish_datums(PLpgSQL_function *function) for (i = 0; i < plpgsql_nDatums; i++) { function->datums[i] = plpgsql_Datums[i]; + + /* This must agree with copy_plpgsql_datums on what is copiable */ switch (function->datums[i]->dtype) { - case PLPGSQL_DTYPE_ROW: + case PLPGSQL_DTYPE_VAR: + case PLPGSQL_DTYPE_PROMISE: + copiable_size += MAXALIGN(sizeof(PLpgSQL_var)); + break; case PLPGSQL_DTYPE_REC: - case PLPGSQL_DTYPE_RECFIELD: - resettable_datums = bms_add_member(resettable_datums, i); + copiable_size += MAXALIGN(sizeof(PLpgSQL_rec)); break; - default: break; } } - function->resettable_datums = resettable_datums; + function->copiable_size = copiable_size; } /* ---------- * plpgsql_add_initdatums Make an array of the datum numbers of - * all the simple VAR datums created since the last call + * all the initializable datums created since the last call * to this function. * * If varnos is NULL, we just forget any datum entries created since the * last call. * - * This is used around a DECLARE section to create a list of the VARs - * that have to be initialized at block entry. Note that VARs can also + * This is used around a DECLARE section to create a list of the datums + * that have to be initialized at block entry. Note that datums can also * be created elsewhere than DECLARE, eg by a FOR-loop, but it is then * the responsibility of special-purpose code to initialize them. * ---------- @@ -2393,11 +2269,16 @@ plpgsql_add_initdatums(int **varnos) int i; int n = 0; + /* + * The set of dtypes recognized here must match what exec_stmt_block() + * cares about (re)initializing at block entry. + */ for (i = datums_last; i < plpgsql_nDatums; i++) { switch (plpgsql_Datums[i]->dtype) { case PLPGSQL_DTYPE_VAR: + case PLPGSQL_DTYPE_REC: n++; break; @@ -2418,6 +2299,7 @@ plpgsql_add_initdatums(int **varnos) switch (plpgsql_Datums[i]->dtype) { case PLPGSQL_DTYPE_VAR: + case PLPGSQL_DTYPE_REC: (*varnos)[n++] = plpgsql_Datums[i]->dno; default: @@ -2573,7 +2455,7 @@ plpgsql_HashTableInit(void) memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(PLpgSQL_func_hashkey); ctl.entrysize = sizeof(plpgsql_HashEnt); - plpgsql_HashTable = hash_create("PLpgSQL function cache", + plpgsql_HashTable = hash_create("PLpgSQL function hash", FUNCS_PER_USER, &ctl, HASH_ELEM | HASH_BLOBS); diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index 616f5e30f8..d5694d3d08 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -3,7 +3,7 @@ * pl_exec.c - Executor for the PL/pgSQL * procedural language * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -20,17 +20,23 @@ #include "access/htup_details.h" #include "access/transam.h" #include "access/tupconvert.h" +#include "access/tuptoaster.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" +#include "commands/defrem.h" +#include "executor/execExpr.h" #include "executor/spi.h" +#include "executor/spi_priv.h" #include "funcapi.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" +#include "optimizer/clauses.h" #include "optimizer/planner.h" #include "parser/parse_coerce.h" #include "parser/scansup.h" #include "storage/proc.h" #include "tcop/tcopprot.h" +#include "tcop/utility.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/datum.h" @@ -39,6 +45,7 @@ #include "utils/memutils.h" #include "utils/rel.h" #include "utils/snapmgr.h" +#include "utils/syscache.h" #include "utils/typcache.h" #include "plpgsql.h" @@ -154,11 +161,90 @@ typedef struct /* cast_hash table entry */ static MemoryContext shared_cast_context = NULL; static HTAB *shared_cast_hash = NULL; +/* + * LOOP_RC_PROCESSING encapsulates common logic for looping statements to + * handle return/exit/continue result codes from the loop body statement(s). + * It's meant to be used like this: + * + * int rc = PLPGSQL_RC_OK; + * for (...) + * { + * ... + * rc = exec_stmts(estate, stmt->body); + * LOOP_RC_PROCESSING(stmt->label, break); + * ... + * } + * return rc; + * + * If execution of the loop should terminate, LOOP_RC_PROCESSING will execute + * "exit_action" (typically a "break" or "goto"), after updating "rc" to the + * value the current statement should return. If execution should continue, + * LOOP_RC_PROCESSING will do nothing except reset "rc" to PLPGSQL_RC_OK. + * + * estate and rc are implicit arguments to the macro. + * estate->exitlabel is examined and possibly updated. + */ +#define LOOP_RC_PROCESSING(looplabel, exit_action) \ + if (rc == PLPGSQL_RC_RETURN) \ + { \ + /* RETURN, so propagate RC_RETURN out */ \ + exit_action; \ + } \ + else if (rc == PLPGSQL_RC_EXIT) \ + { \ + if (estate->exitlabel == NULL) \ + { \ + /* unlabelled EXIT terminates this loop */ \ + rc = PLPGSQL_RC_OK; \ + exit_action; \ + } \ + else if ((looplabel) != NULL && \ + strcmp(looplabel, estate->exitlabel) == 0) \ + { \ + /* labelled EXIT matching this loop, so terminate loop */ \ + estate->exitlabel = NULL; \ + rc = PLPGSQL_RC_OK; \ + exit_action; \ + } \ + else \ + { \ + /* non-matching labelled EXIT, propagate RC_EXIT out */ \ + exit_action; \ + } \ + } \ + else if (rc == PLPGSQL_RC_CONTINUE) \ + { \ + if (estate->exitlabel == NULL) \ + { \ + /* unlabelled CONTINUE matches this loop, so continue in loop */ \ + rc = PLPGSQL_RC_OK; \ + } \ + else if ((looplabel) != NULL && \ + strcmp(looplabel, estate->exitlabel) == 0) \ + { \ + /* labelled CONTINUE matching this loop, so continue in loop */ \ + estate->exitlabel = NULL; \ + rc = PLPGSQL_RC_OK; \ + } \ + else \ + { \ + /* non-matching labelled CONTINUE, propagate RC_CONTINUE out */ \ + exit_action; \ + } \ + } \ + else \ + Assert(rc == PLPGSQL_RC_OK) + /************************************************************ * Local function forward declarations ************************************************************/ +static void coerce_function_result_tuple(PLpgSQL_execstate *estate, + TupleDesc tupdesc); static void plpgsql_exec_error_callback(void *arg); -static PLpgSQL_datum *copy_plpgsql_datum(PLpgSQL_datum *datum); +static void copy_plpgsql_datums(PLpgSQL_execstate *estate, + PLpgSQL_function *func); +static void plpgsql_fulfill_promise(PLpgSQL_execstate *estate, + PLpgSQL_var *var); static MemoryContext get_stmt_mcontext(PLpgSQL_execstate *estate); static void push_stmt_mcontext(PLpgSQL_execstate *estate); static void pop_stmt_mcontext(PLpgSQL_execstate *estate); @@ -173,6 +259,8 @@ static int exec_stmt_assign(PLpgSQL_execstate *estate, PLpgSQL_stmt_assign *stmt); static int exec_stmt_perform(PLpgSQL_execstate *estate, PLpgSQL_stmt_perform *stmt); +static int exec_stmt_call(PLpgSQL_execstate *estate, + PLpgSQL_stmt_call *stmt); static int exec_stmt_getdiag(PLpgSQL_execstate *estate, PLpgSQL_stmt_getdiag *stmt); static int exec_stmt_if(PLpgSQL_execstate *estate, @@ -215,6 +303,12 @@ static int exec_stmt_dynexecute(PLpgSQL_execstate *estate, PLpgSQL_stmt_dynexecute *stmt); static int exec_stmt_dynfors(PLpgSQL_execstate *estate, PLpgSQL_stmt_dynfors *stmt); +static int exec_stmt_commit(PLpgSQL_execstate *estate, + PLpgSQL_stmt_commit *stmt); +static int exec_stmt_rollback(PLpgSQL_execstate *estate, + PLpgSQL_stmt_rollback *stmt); +static int exec_stmt_set(PLpgSQL_execstate *estate, + PLpgSQL_stmt_set *stmt); static void plpgsql_estate_setup(PLpgSQL_execstate *estate, PLpgSQL_function *func, @@ -223,7 +317,8 @@ static void plpgsql_estate_setup(PLpgSQL_execstate *estate, static void exec_eval_cleanup(PLpgSQL_execstate *estate); static void exec_prepare_plan(PLpgSQL_execstate *estate, - PLpgSQL_expr *expr, int cursorOptions); + PLpgSQL_expr *expr, int cursorOptions, + bool keepplan); static void exec_simple_check_plan(PLpgSQL_execstate *estate, PLpgSQL_expr *expr); static void exec_save_simple_expr(PLpgSQL_expr *expr, CachedPlan *cplan); static void exec_check_rw_parameter(PLpgSQL_expr *expr, int target_dno); @@ -268,22 +363,45 @@ static int exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt, Portal portal, bool prefetch_ok); static ParamListInfo setup_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr); -static ParamListInfo setup_unshared_param_list(PLpgSQL_execstate *estate, - PLpgSQL_expr *expr); -static void plpgsql_param_fetch(ParamListInfo params, int paramid); +static ParamExternData *plpgsql_param_fetch(ParamListInfo params, + int paramid, bool speculative, + ParamExternData *workspace); +static void plpgsql_param_compile(ParamListInfo params, Param *param, + ExprState *state, + Datum *resv, bool *resnull); +static void plpgsql_param_eval_var(ExprState *state, ExprEvalStep *op, + ExprContext *econtext); +static void plpgsql_param_eval_var_ro(ExprState *state, ExprEvalStep *op, + ExprContext *econtext); +static void plpgsql_param_eval_recfield(ExprState *state, ExprEvalStep *op, + ExprContext *econtext); +static void plpgsql_param_eval_generic(ExprState *state, ExprEvalStep *op, + ExprContext *econtext); +static void plpgsql_param_eval_generic_ro(ExprState *state, ExprEvalStep *op, + ExprContext *econtext); static void exec_move_row(PLpgSQL_execstate *estate, - PLpgSQL_rec *rec, - PLpgSQL_row *row, + PLpgSQL_variable *target, HeapTuple tup, TupleDesc tupdesc); +static ExpandedRecordHeader *make_expanded_record_for_rec(PLpgSQL_execstate *estate, + PLpgSQL_rec *rec, + TupleDesc srctupdesc, + ExpandedRecordHeader *srcerh); +static void exec_move_row_from_fields(PLpgSQL_execstate *estate, + PLpgSQL_variable *target, + ExpandedRecordHeader *newerh, + Datum *values, bool *nulls, + TupleDesc tupdesc); +static bool compatible_tupdescs(TupleDesc src_tupdesc, TupleDesc dst_tupdesc); static HeapTuple make_tuple_from_row(PLpgSQL_execstate *estate, PLpgSQL_row *row, TupleDesc tupdesc); -static HeapTuple get_tuple_from_datum(Datum value); -static TupleDesc get_tupdesc_from_datum(Datum value); +static TupleDesc deconstruct_composite_datum(Datum value, + HeapTupleData *tmptup); static void exec_move_row_from_datum(PLpgSQL_execstate *estate, - PLpgSQL_rec *rec, - PLpgSQL_row *row, + PLpgSQL_variable *target, Datum value); +static void instantiate_empty_record_variable(PLpgSQL_execstate *estate, + PLpgSQL_rec *rec); static char *convert_value_to_string(PLpgSQL_execstate *estate, Datum value, Oid valtype); static Datum exec_cast_value(PLpgSQL_execstate *estate, @@ -301,6 +419,8 @@ static void assign_simple_var(PLpgSQL_execstate *estate, PLpgSQL_var *var, Datum newvalue, bool isnull, bool freeable); static void assign_text_var(PLpgSQL_execstate *estate, PLpgSQL_var *var, const char *str); +static void assign_record_var(PLpgSQL_execstate *estate, PLpgSQL_rec *rec, + ExpandedRecordHeader *erh); static PreparedParamsData *exec_eval_using_params(PLpgSQL_execstate *estate, List *params); static Portal exec_dynquery_with_params(PLpgSQL_execstate *estate, @@ -327,7 +447,7 @@ static char *format_preparedparamsdata(PLpgSQL_execstate *estate, */ Datum plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, - EState *simple_eval_estate) + EState *simple_eval_estate, bool atomic) { PLpgSQL_execstate estate; ErrorContextCallback plerrcontext; @@ -339,6 +459,7 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, */ plpgsql_estate_setup(&estate, func, (ReturnSetInfo *) fcinfo->resultinfo, simple_eval_estate); + estate.atomic = atomic; /* * Setup error traceback support for ereport() @@ -352,8 +473,7 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, * Make local execution copies of all the datums */ estate.err_text = gettext_noop("during initialization of execution state"); - for (i = 0; i < estate.ndatums; i++) - estate.datums[i] = copy_plpgsql_datum(func->datums[i]); + copy_plpgsql_datums(&estate, func); /* * Store the actual call argument values into the appropriate variables @@ -396,7 +516,7 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, /* take ownership of R/W object */ assign_simple_var(&estate, var, TransferExpandedObject(var->value, - CurrentMemoryContext), + estate.datum_context), false, true); } @@ -409,7 +529,7 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, /* flat array, so force to expanded form */ assign_simple_var(&estate, var, expand_array(var->value, - CurrentMemoryContext, + estate.datum_context, NULL), false, true); @@ -418,20 +538,22 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, } break; - case PLPGSQL_DTYPE_ROW: + case PLPGSQL_DTYPE_REC: { - PLpgSQL_row *row = (PLpgSQL_row *) estate.datums[n]; + PLpgSQL_rec *rec = (PLpgSQL_rec *) estate.datums[n]; if (!fcinfo->argnull[i]) { /* Assign row value from composite datum */ - exec_move_row_from_datum(&estate, NULL, row, + exec_move_row_from_datum(&estate, + (PLpgSQL_variable *) rec, fcinfo->arg[i]); } else { - /* If arg is null, treat it as an empty row */ - exec_move_row(&estate, NULL, row, NULL, NULL); + /* If arg is null, set variable to null */ + exec_move_row(&estate, (PLpgSQL_variable *) rec, + NULL, NULL); } /* clean up after exec_move_row() */ exec_eval_cleanup(&estate); @@ -439,6 +561,7 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, break; default: + /* Anything else should not be an argument variable */ elog(ERROR, "unrecognized dtype: %d", func->datums[i]->dtype); } } @@ -494,72 +617,89 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, /* If we produced any tuples, send back the result */ if (estate.tuple_store) { - rsi->setResult = estate.tuple_store; - if (estate.rettupdesc) - { - MemoryContext oldcxt; + MemoryContext oldcxt; - oldcxt = MemoryContextSwitchTo(estate.tuple_store_cxt); - rsi->setDesc = CreateTupleDescCopy(estate.rettupdesc); - MemoryContextSwitchTo(oldcxt); - } + rsi->setResult = estate.tuple_store; + oldcxt = MemoryContextSwitchTo(estate.tuple_store_cxt); + rsi->setDesc = CreateTupleDescCopy(estate.tuple_store_desc); + MemoryContextSwitchTo(oldcxt); } estate.retval = (Datum) 0; fcinfo->isnull = true; } else if (!estate.retisnull) { + /* + * Cast result value to function's declared result type, and copy it + * out to the upper executor memory context. We must treat tuple + * results specially in order to deal with cases like rowtypes + * involving dropped columns. + */ if (estate.retistuple) { - /* - * We have to check that the returned tuple actually matches the - * expected result type. XXX would be better to cache the tupdesc - * instead of repeating get_call_result_type() - */ - HeapTuple rettup = (HeapTuple) DatumGetPointer(estate.retval); - TupleDesc tupdesc; - TupleConversionMap *tupmap; - - switch (get_call_result_type(fcinfo, NULL, &tupdesc)) + /* Don't need coercion if rowtype is known to match */ + if (func->fn_rettype == estate.rettype && + func->fn_rettype != RECORDOID) { - case TYPEFUNC_COMPOSITE: - /* got the expected result rowtype, now check it */ - tupmap = convert_tuples_by_position(estate.rettupdesc, - tupdesc, - gettext_noop("returned record type does not match expected record type")); - /* it might need conversion */ - if (tupmap) - rettup = do_convert_tuple(rettup, tupmap); - /* no need to free map, we're about to return anyway */ - break; - case TYPEFUNC_RECORD: + /* + * Copy the tuple result into upper executor memory context. + * However, if we have a R/W expanded datum, we can just + * transfer its ownership out to the upper context. + */ + estate.retval = SPI_datumTransfer(estate.retval, + false, + -1); + } + else + { + /* + * Need to look up the expected result type. XXX would be + * better to cache the tupdesc instead of repeating + * get_call_result_type(), but the only easy place to save it + * is in the PLpgSQL_function struct, and that's too + * long-lived: composite types could change during the + * existence of a PLpgSQL_function. + */ + Oid resultTypeId; + TupleDesc tupdesc; - /* - * Failed to determine actual type of RECORD. We could - * raise an error here, but what this means in practice is - * that the caller is expecting any old generic rowtype, - * so we don't really need to be restrictive. Pass back - * the generated result type, instead. - */ - tupdesc = estate.rettupdesc; - if (tupdesc == NULL) /* shouldn't happen */ + switch (get_call_result_type(fcinfo, &resultTypeId, &tupdesc)) + { + case TYPEFUNC_COMPOSITE: + /* got the expected result rowtype, now coerce it */ + coerce_function_result_tuple(&estate, tupdesc); + break; + case TYPEFUNC_COMPOSITE_DOMAIN: + /* got the expected result rowtype, now coerce it */ + coerce_function_result_tuple(&estate, tupdesc); + /* and check domain constraints */ + /* XXX allowing caching here would be good, too */ + domain_check(estate.retval, false, resultTypeId, + NULL, NULL); + break; + case TYPEFUNC_RECORD: + + /* + * Failed to determine actual type of RECORD. We + * could raise an error here, but what this means in + * practice is that the caller is expecting any old + * generic rowtype, so we don't really need to be + * restrictive. Pass back the generated result as-is. + */ + estate.retval = SPI_datumTransfer(estate.retval, + false, + -1); + break; + default: + /* shouldn't get here if retistuple is true ... */ elog(ERROR, "return type must be a row type"); - break; - default: - /* shouldn't get here if retistuple is true ... */ - elog(ERROR, "return type must be a row type"); - break; + break; + } } - - /* - * Copy tuple to upper executor memory, as a tuple Datum. Make - * sure it is labeled with the caller-supplied tuple type. - */ - estate.retval = PointerGetDatum(SPI_returntuple(rettup, tupdesc)); } else { - /* Cast value to proper type */ + /* Scalar case: use exec_cast_value */ estate.retval = exec_cast_value(&estate, estate.retval, &fcinfo->isnull, @@ -580,6 +720,22 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, func->fn_rettyplen); } } + else + { + /* + * We're returning a NULL, which normally requires no conversion work + * regardless of datatypes. But, if we are casting it to a domain + * return type, we'd better check that the domain's constraints pass. + */ + if (func->fn_retisdomain) + estate.retval = exec_cast_value(&estate, + estate.retval, + &fcinfo->isnull, + estate.rettype, + -1, + func->fn_rettype, + -1); + } estate.err_text = gettext_noop("during function exit"); @@ -605,6 +761,94 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, return estate.retval; } +/* + * Helper for plpgsql_exec_function: coerce composite result to the specified + * tuple descriptor, and copy it out to upper executor memory. This is split + * out mostly for cosmetic reasons --- the logic would be very deeply nested + * otherwise. + * + * estate->retval is updated in-place. + */ +static void +coerce_function_result_tuple(PLpgSQL_execstate *estate, TupleDesc tupdesc) +{ + HeapTuple rettup; + TupleDesc retdesc; + TupleConversionMap *tupmap; + + /* We assume exec_stmt_return verified that result is composite */ + Assert(type_is_rowtype(estate->rettype)); + + /* We can special-case expanded records for speed */ + if (VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(estate->retval))) + { + ExpandedRecordHeader *erh = (ExpandedRecordHeader *) DatumGetEOHP(estate->retval); + + Assert(erh->er_magic == ER_MAGIC); + + /* Extract record's TupleDesc */ + retdesc = expanded_record_get_tupdesc(erh); + + /* check rowtype compatibility */ + tupmap = convert_tuples_by_position(retdesc, + tupdesc, + gettext_noop("returned record type does not match expected record type")); + + /* it might need conversion */ + if (tupmap) + { + rettup = expanded_record_get_tuple(erh); + Assert(rettup); + rettup = execute_attr_map_tuple(rettup, tupmap); + + /* + * Copy tuple to upper executor memory, as a tuple Datum. Make + * sure it is labeled with the caller-supplied tuple type. + */ + estate->retval = PointerGetDatum(SPI_returntuple(rettup, tupdesc)); + /* no need to free map, we're about to return anyway */ + } + else + { + /* + * We need only copy result into upper executor memory context. + * However, if we have a R/W expanded datum, we can just transfer + * its ownership out to the upper executor context. + */ + estate->retval = SPI_datumTransfer(estate->retval, + false, + -1); + } + } + else + { + /* Convert composite datum to a HeapTuple and TupleDesc */ + HeapTupleData tmptup; + + retdesc = deconstruct_composite_datum(estate->retval, &tmptup); + rettup = &tmptup; + + /* check rowtype compatibility */ + tupmap = convert_tuples_by_position(retdesc, + tupdesc, + gettext_noop("returned record type does not match expected record type")); + + /* it might need conversion */ + if (tupmap) + rettup = execute_attr_map_tuple(rettup, tupmap); + + /* + * Copy tuple to upper executor memory, as a tuple Datum. Make sure + * it is labeled with the caller-supplied tuple type. + */ + estate->retval = PointerGetDatum(SPI_returntuple(rettup, tupdesc)); + + /* no need to free map, we're about to return anyway */ + + ReleaseTupleDesc(retdesc); + } +} + /* ---------- * plpgsql_exec_trigger Called by the call handler for @@ -617,9 +861,8 @@ plpgsql_exec_trigger(PLpgSQL_function *func, { PLpgSQL_execstate estate; ErrorContextCallback plerrcontext; - int i; int rc; - PLpgSQL_var *var; + TupleDesc tupdesc; PLpgSQL_rec *rec_new, *rec_old; HeapTuple rettup; @@ -628,6 +871,7 @@ plpgsql_exec_trigger(PLpgSQL_function *func, * Setup the execution state */ plpgsql_estate_setup(&estate, func, NULL, NULL); + estate.trigdata = trigdata; /* * Setup error traceback support for ereport() @@ -641,8 +885,7 @@ plpgsql_exec_trigger(PLpgSQL_function *func, * Make local execution copies of all the datums */ estate.err_text = gettext_noop("during initialization of execution state"); - for (i = 0; i < estate.ndatums; i++) - estate.datums[i] = copy_plpgsql_datum(func->datums[i]); + copy_plpgsql_datums(&estate, func); /* * Put the OLD and NEW tuples into record variables @@ -653,37 +896,38 @@ plpgsql_exec_trigger(PLpgSQL_function *func, * might have a test like "if (TG_OP = 'INSERT' and NEW.foo = 'xyz')", * which should parse regardless of the current trigger type. */ + tupdesc = RelationGetDescr(trigdata->tg_relation); + rec_new = (PLpgSQL_rec *) (estate.datums[func->new_varno]); - rec_new->freetup = false; - rec_new->tupdesc = trigdata->tg_relation->rd_att; - rec_new->freetupdesc = false; rec_old = (PLpgSQL_rec *) (estate.datums[func->old_varno]); - rec_old->freetup = false; - rec_old->tupdesc = trigdata->tg_relation->rd_att; - rec_old->freetupdesc = false; + + rec_new->erh = make_expanded_record_from_tupdesc(tupdesc, + estate.datum_context); + rec_old->erh = make_expanded_record_from_exprecord(rec_new->erh, + estate.datum_context); if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) { /* * Per-statement triggers don't use OLD/NEW variables */ - rec_new->tup = NULL; - rec_old->tup = NULL; } else if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event)) { - rec_new->tup = trigdata->tg_trigtuple; - rec_old->tup = NULL; + expanded_record_set_tuple(rec_new->erh, trigdata->tg_trigtuple, + false, false); } else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) { - rec_new->tup = trigdata->tg_newtuple; - rec_old->tup = trigdata->tg_trigtuple; + expanded_record_set_tuple(rec_new->erh, trigdata->tg_newtuple, + false, false); + expanded_record_set_tuple(rec_old->erh, trigdata->tg_trigtuple, + false, false); } else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) { - rec_new->tup = NULL; - rec_old->tup = trigdata->tg_trigtuple; + expanded_record_set_tuple(rec_old->erh, trigdata->tg_trigtuple, + false, false); } else elog(ERROR, "unrecognized trigger action: not INSERT, DELETE, or UPDATE"); @@ -692,106 +936,6 @@ plpgsql_exec_trigger(PLpgSQL_function *func, rc = SPI_register_trigger_data(trigdata); Assert(rc >= 0); - /* - * Assign the special tg_ variables - */ - - var = (PLpgSQL_var *) (estate.datums[func->tg_op_varno]); - if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event)) - assign_text_var(&estate, var, "INSERT"); - else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) - assign_text_var(&estate, var, "UPDATE"); - else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) - assign_text_var(&estate, var, "DELETE"); - else if (TRIGGER_FIRED_BY_TRUNCATE(trigdata->tg_event)) - assign_text_var(&estate, var, "TRUNCATE"); - else - elog(ERROR, "unrecognized trigger action: not INSERT, DELETE, UPDATE, or TRUNCATE"); - - var = (PLpgSQL_var *) (estate.datums[func->tg_name_varno]); - assign_simple_var(&estate, var, - DirectFunctionCall1(namein, - CStringGetDatum(trigdata->tg_trigger->tgname)), - false, true); - - var = (PLpgSQL_var *) (estate.datums[func->tg_when_varno]); - if (TRIGGER_FIRED_BEFORE(trigdata->tg_event)) - assign_text_var(&estate, var, "BEFORE"); - else if (TRIGGER_FIRED_AFTER(trigdata->tg_event)) - assign_text_var(&estate, var, "AFTER"); - else if (TRIGGER_FIRED_INSTEAD(trigdata->tg_event)) - assign_text_var(&estate, var, "INSTEAD OF"); - else - elog(ERROR, "unrecognized trigger execution time: not BEFORE, AFTER, or INSTEAD OF"); - - var = (PLpgSQL_var *) (estate.datums[func->tg_level_varno]); - if (TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) - assign_text_var(&estate, var, "ROW"); - else if (TRIGGER_FIRED_FOR_STATEMENT(trigdata->tg_event)) - assign_text_var(&estate, var, "STATEMENT"); - else - elog(ERROR, "unrecognized trigger event type: not ROW or STATEMENT"); - - var = (PLpgSQL_var *) (estate.datums[func->tg_relid_varno]); - assign_simple_var(&estate, var, - ObjectIdGetDatum(trigdata->tg_relation->rd_id), - false, false); - - var = (PLpgSQL_var *) (estate.datums[func->tg_relname_varno]); - assign_simple_var(&estate, var, - DirectFunctionCall1(namein, - CStringGetDatum(RelationGetRelationName(trigdata->tg_relation))), - false, true); - - var = (PLpgSQL_var *) (estate.datums[func->tg_table_name_varno]); - assign_simple_var(&estate, var, - DirectFunctionCall1(namein, - CStringGetDatum(RelationGetRelationName(trigdata->tg_relation))), - false, true); - - var = (PLpgSQL_var *) (estate.datums[func->tg_table_schema_varno]); - assign_simple_var(&estate, var, - DirectFunctionCall1(namein, - CStringGetDatum(get_namespace_name( - RelationGetNamespace( - trigdata->tg_relation)))), - false, true); - - var = (PLpgSQL_var *) (estate.datums[func->tg_nargs_varno]); - assign_simple_var(&estate, var, - Int16GetDatum(trigdata->tg_trigger->tgnargs), - false, false); - - var = (PLpgSQL_var *) (estate.datums[func->tg_argv_varno]); - if (trigdata->tg_trigger->tgnargs > 0) - { - /* - * For historical reasons, tg_argv[] subscripts start at zero not one. - * So we can't use construct_array(). - */ - int nelems = trigdata->tg_trigger->tgnargs; - Datum *elems; - int dims[1]; - int lbs[1]; - - elems = palloc(sizeof(Datum) * nelems); - for (i = 0; i < nelems; i++) - elems[i] = CStringGetTextDatum(trigdata->tg_trigger->tgargs[i]); - dims[0] = nelems; - lbs[0] = 0; - - assign_simple_var(&estate, var, - PointerGetDatum(construct_md_array(elems, NULL, - 1, dims, lbs, - TEXTOID, - -1, false, 'i')), - false, true); - } - else - { - assign_simple_var(&estate, var, (Datum) 0, true, false); - } - estate.err_text = gettext_noop("during function entry"); /* @@ -842,20 +986,68 @@ plpgsql_exec_trigger(PLpgSQL_function *func, rettup = NULL; else { + TupleDesc retdesc; TupleConversionMap *tupmap; - rettup = (HeapTuple) DatumGetPointer(estate.retval); - /* check rowtype compatibility */ - tupmap = convert_tuples_by_position(estate.rettupdesc, - trigdata->tg_relation->rd_att, - gettext_noop("returned row structure does not match the structure of the triggering table")); - /* it might need conversion */ - if (tupmap) - rettup = do_convert_tuple(rettup, tupmap); - /* no need to free map, we're about to return anyway */ + /* We assume exec_stmt_return verified that result is composite */ + Assert(type_is_rowtype(estate.rettype)); + + /* We can special-case expanded records for speed */ + if (VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(estate.retval))) + { + ExpandedRecordHeader *erh = (ExpandedRecordHeader *) DatumGetEOHP(estate.retval); + + Assert(erh->er_magic == ER_MAGIC); + + /* Extract HeapTuple and TupleDesc */ + rettup = expanded_record_get_tuple(erh); + Assert(rettup); + retdesc = expanded_record_get_tupdesc(erh); + + if (retdesc != RelationGetDescr(trigdata->tg_relation)) + { + /* check rowtype compatibility */ + tupmap = convert_tuples_by_position(retdesc, + RelationGetDescr(trigdata->tg_relation), + gettext_noop("returned row structure does not match the structure of the triggering table")); + /* it might need conversion */ + if (tupmap) + rettup = execute_attr_map_tuple(rettup, tupmap); + /* no need to free map, we're about to return anyway */ + } + + /* + * Copy tuple to upper executor memory. But if user just did + * "return new" or "return old" without changing anything, there's + * no need to copy; we can return the original tuple (which will + * save a few cycles in trigger.c as well as here). + */ + if (rettup != trigdata->tg_newtuple && + rettup != trigdata->tg_trigtuple) + rettup = SPI_copytuple(rettup); + } + else + { + /* Convert composite datum to a HeapTuple and TupleDesc */ + HeapTupleData tmptup; + + retdesc = deconstruct_composite_datum(estate.retval, &tmptup); + rettup = &tmptup; + + /* check rowtype compatibility */ + tupmap = convert_tuples_by_position(retdesc, + RelationGetDescr(trigdata->tg_relation), + gettext_noop("returned row structure does not match the structure of the triggering table")); + /* it might need conversion */ + if (tupmap) + rettup = execute_attr_map_tuple(rettup, tupmap); + + ReleaseTupleDesc(retdesc); + /* no need to free map, we're about to return anyway */ - /* Copy tuple to upper executor memory */ - rettup = SPI_copytuple(rettup); + /* Copy tuple to upper executor memory */ + rettup = SPI_copytuple(rettup); + } } /* @@ -890,14 +1082,13 @@ plpgsql_exec_event_trigger(PLpgSQL_function *func, EventTriggerData *trigdata) { PLpgSQL_execstate estate; ErrorContextCallback plerrcontext; - int i; int rc; - PLpgSQL_var *var; /* * Setup the execution state */ plpgsql_estate_setup(&estate, func, NULL, NULL); + estate.evtrigdata = trigdata; /* * Setup error traceback support for ereport() @@ -911,17 +1102,7 @@ plpgsql_exec_event_trigger(PLpgSQL_function *func, EventTriggerData *trigdata) * Make local execution copies of all the datums */ estate.err_text = gettext_noop("during initialization of execution state"); - for (i = 0; i < estate.ndatums; i++) - estate.datums[i] = copy_plpgsql_datum(func->datums[i]); - - /* - * Assign the special tg_ variables - */ - var = (PLpgSQL_var *) (estate.datums[func->tg_event_varno]); - assign_text_var(&estate, var, trigdata->event); - - var = (PLpgSQL_var *) (estate.datums[func->tg_tag_varno]); - assign_text_var(&estate, var, trigdata->tag); + copy_plpgsql_datums(&estate, func); /* * Let the instrumentation plugin peek at this function @@ -1027,60 +1208,234 @@ plpgsql_exec_error_callback(void *arg) * Support function for initializing local execution variables * ---------- */ -static PLpgSQL_datum * -copy_plpgsql_datum(PLpgSQL_datum *datum) +static void +copy_plpgsql_datums(PLpgSQL_execstate *estate, + PLpgSQL_function *func) { - PLpgSQL_datum *result; + int ndatums = estate->ndatums; + PLpgSQL_datum **indatums; + PLpgSQL_datum **outdatums; + char *workspace; + char *ws_next; + int i; - switch (datum->dtype) + /* Allocate local datum-pointer array */ + estate->datums = (PLpgSQL_datum **) + palloc(sizeof(PLpgSQL_datum *) * ndatums); + + /* + * To reduce palloc overhead, we make a single palloc request for all the + * space needed for locally-instantiated datums. + */ + workspace = palloc(func->copiable_size); + ws_next = workspace; + + /* Fill datum-pointer array, copying datums into workspace as needed */ + indatums = func->datums; + outdatums = estate->datums; + for (i = 0; i < ndatums; i++) { - case PLPGSQL_DTYPE_VAR: - { - PLpgSQL_var *new = palloc(sizeof(PLpgSQL_var)); + PLpgSQL_datum *indatum = indatums[i]; + PLpgSQL_datum *outdatum; - memcpy(new, datum, sizeof(PLpgSQL_var)); - /* should be preset to null/non-freeable */ - Assert(new->isnull); - Assert(!new->freeval); + /* This must agree with plpgsql_finish_datums on what is copiable */ + switch (indatum->dtype) + { + case PLPGSQL_DTYPE_VAR: + case PLPGSQL_DTYPE_PROMISE: + outdatum = (PLpgSQL_datum *) ws_next; + memcpy(outdatum, indatum, sizeof(PLpgSQL_var)); + ws_next += MAXALIGN(sizeof(PLpgSQL_var)); + break; - result = (PLpgSQL_datum *) new; - } + case PLPGSQL_DTYPE_REC: + outdatum = (PLpgSQL_datum *) ws_next; + memcpy(outdatum, indatum, sizeof(PLpgSQL_rec)); + ws_next += MAXALIGN(sizeof(PLpgSQL_rec)); + break; + + case PLPGSQL_DTYPE_ROW: + case PLPGSQL_DTYPE_RECFIELD: + case PLPGSQL_DTYPE_ARRAYELEM: + + /* + * These datum records are read-only at runtime, so no need to + * copy them (well, RECFIELD and ARRAYELEM contain cached + * data, but we'd just as soon centralize the caching anyway). + */ + outdatum = indatum; + break; + + default: + elog(ERROR, "unrecognized dtype: %d", indatum->dtype); + outdatum = NULL; /* keep compiler quiet */ + break; + } + + outdatums[i] = outdatum; + } + + Assert(ws_next == workspace + func->copiable_size); +} + +/* + * If the variable has an armed "promise", compute the promised value + * and assign it to the variable. + * The assignment automatically disarms the promise. + */ +static void +plpgsql_fulfill_promise(PLpgSQL_execstate *estate, + PLpgSQL_var *var) +{ + MemoryContext oldcontext; + + if (var->promise == PLPGSQL_PROMISE_NONE) + return; /* nothing to do */ + + /* + * This will typically be invoked in a short-lived context such as the + * mcontext. We must create variable values in the estate's datum + * context. This quick-and-dirty solution risks leaking some additional + * cruft there, but since any one promise is honored at most once per + * function call, it's probably not worth being more careful. + */ + oldcontext = MemoryContextSwitchTo(estate->datum_context); + + switch (var->promise) + { + case PLPGSQL_PROMISE_TG_NAME: + if (estate->trigdata == NULL) + elog(ERROR, "trigger promise is not in a trigger function"); + assign_simple_var(estate, var, + DirectFunctionCall1(namein, + CStringGetDatum(estate->trigdata->tg_trigger->tgname)), + false, true); break; - case PLPGSQL_DTYPE_REC: - { - PLpgSQL_rec *new = palloc(sizeof(PLpgSQL_rec)); + case PLPGSQL_PROMISE_TG_WHEN: + if (estate->trigdata == NULL) + elog(ERROR, "trigger promise is not in a trigger function"); + if (TRIGGER_FIRED_BEFORE(estate->trigdata->tg_event)) + assign_text_var(estate, var, "BEFORE"); + else if (TRIGGER_FIRED_AFTER(estate->trigdata->tg_event)) + assign_text_var(estate, var, "AFTER"); + else if (TRIGGER_FIRED_INSTEAD(estate->trigdata->tg_event)) + assign_text_var(estate, var, "INSTEAD OF"); + else + elog(ERROR, "unrecognized trigger execution time: not BEFORE, AFTER, or INSTEAD OF"); + break; + + case PLPGSQL_PROMISE_TG_LEVEL: + if (estate->trigdata == NULL) + elog(ERROR, "trigger promise is not in a trigger function"); + if (TRIGGER_FIRED_FOR_ROW(estate->trigdata->tg_event)) + assign_text_var(estate, var, "ROW"); + else if (TRIGGER_FIRED_FOR_STATEMENT(estate->trigdata->tg_event)) + assign_text_var(estate, var, "STATEMENT"); + else + elog(ERROR, "unrecognized trigger event type: not ROW or STATEMENT"); + break; + + case PLPGSQL_PROMISE_TG_OP: + if (estate->trigdata == NULL) + elog(ERROR, "trigger promise is not in a trigger function"); + if (TRIGGER_FIRED_BY_INSERT(estate->trigdata->tg_event)) + assign_text_var(estate, var, "INSERT"); + else if (TRIGGER_FIRED_BY_UPDATE(estate->trigdata->tg_event)) + assign_text_var(estate, var, "UPDATE"); + else if (TRIGGER_FIRED_BY_DELETE(estate->trigdata->tg_event)) + assign_text_var(estate, var, "DELETE"); + else if (TRIGGER_FIRED_BY_TRUNCATE(estate->trigdata->tg_event)) + assign_text_var(estate, var, "TRUNCATE"); + else + elog(ERROR, "unrecognized trigger action: not INSERT, DELETE, UPDATE, or TRUNCATE"); + break; + + case PLPGSQL_PROMISE_TG_RELID: + if (estate->trigdata == NULL) + elog(ERROR, "trigger promise is not in a trigger function"); + assign_simple_var(estate, var, + ObjectIdGetDatum(estate->trigdata->tg_relation->rd_id), + false, false); + break; - memcpy(new, datum, sizeof(PLpgSQL_rec)); - /* should be preset to null/non-freeable */ - Assert(new->tup == NULL); - Assert(new->tupdesc == NULL); - Assert(!new->freetup); - Assert(!new->freetupdesc); + case PLPGSQL_PROMISE_TG_TABLE_NAME: + if (estate->trigdata == NULL) + elog(ERROR, "trigger promise is not in a trigger function"); + assign_simple_var(estate, var, + DirectFunctionCall1(namein, + CStringGetDatum(RelationGetRelationName(estate->trigdata->tg_relation))), + false, true); + break; + + case PLPGSQL_PROMISE_TG_TABLE_SCHEMA: + if (estate->trigdata == NULL) + elog(ERROR, "trigger promise is not in a trigger function"); + assign_simple_var(estate, var, + DirectFunctionCall1(namein, + CStringGetDatum(get_namespace_name(RelationGetNamespace(estate->trigdata->tg_relation)))), + false, true); + break; + + case PLPGSQL_PROMISE_TG_NARGS: + if (estate->trigdata == NULL) + elog(ERROR, "trigger promise is not in a trigger function"); + assign_simple_var(estate, var, + Int16GetDatum(estate->trigdata->tg_trigger->tgnargs), + false, false); + break; + + case PLPGSQL_PROMISE_TG_ARGV: + if (estate->trigdata == NULL) + elog(ERROR, "trigger promise is not in a trigger function"); + if (estate->trigdata->tg_trigger->tgnargs > 0) + { + /* + * For historical reasons, tg_argv[] subscripts start at zero + * not one. So we can't use construct_array(). + */ + int nelems = estate->trigdata->tg_trigger->tgnargs; + Datum *elems; + int dims[1]; + int lbs[1]; + int i; - result = (PLpgSQL_datum *) new; + elems = palloc(sizeof(Datum) * nelems); + for (i = 0; i < nelems; i++) + elems[i] = CStringGetTextDatum(estate->trigdata->tg_trigger->tgargs[i]); + dims[0] = nelems; + lbs[0] = 0; + + assign_simple_var(estate, var, + PointerGetDatum(construct_md_array(elems, NULL, + 1, dims, lbs, + TEXTOID, + -1, false, 'i')), + false, true); + } + else + { + assign_simple_var(estate, var, (Datum) 0, true, false); } break; - case PLPGSQL_DTYPE_ROW: - case PLPGSQL_DTYPE_RECFIELD: - case PLPGSQL_DTYPE_ARRAYELEM: + case PLPGSQL_PROMISE_TG_EVENT: + if (estate->evtrigdata == NULL) + elog(ERROR, "event trigger promise is not in an event trigger function"); + assign_text_var(estate, var, estate->evtrigdata->event); + break; - /* - * These datum records are read-only at runtime, so no need to - * copy them (well, ARRAYELEM contains some cached type data, but - * we'd just as soon centralize the caching anyway) - */ - result = datum; + case PLPGSQL_PROMISE_TG_TAG: + if (estate->evtrigdata == NULL) + elog(ERROR, "event trigger promise is not in an event trigger function"); + assign_text_var(estate, var, estate->evtrigdata->tag); break; default: - elog(ERROR, "unrecognized dtype: %d", datum->dtype); - result = NULL; /* keep compiler quiet */ - break; + elog(ERROR, "unrecognized promise type: %d", var->promise); } - return result; + MemoryContextSwitchTo(oldcontext); } /* @@ -1178,7 +1533,6 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) { volatile int rc = -1; int i; - int n; /* * First initialize all variables declared in this block @@ -1187,13 +1541,21 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) for (i = 0; i < block->n_initvars; i++) { - n = block->initvarnos[i]; + int n = block->initvarnos[i]; + PLpgSQL_datum *datum = estate->datums[n]; - switch (estate->datums[n]->dtype) + /* + * The set of dtypes handled here must match plpgsql_add_initdatums(). + * + * Note that we currently don't support promise datums within blocks, + * only at a function's outermost scope, so we needn't handle those + * here. + */ + switch (datum->dtype) { case PLPGSQL_DTYPE_VAR: { - PLpgSQL_var *var = (PLpgSQL_var *) (estate->datums[n]); + PLpgSQL_var *var = (PLpgSQL_var *) datum; /* * Free any old value, in case re-entering block, and @@ -1205,11 +1567,9 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) { /* * If needed, give the datatype a chance to reject - * NULLs, by assigning a NULL to the variable. We + * NULLs, by assigning a NULL to the variable. We * claim the value is of type UNKNOWN, not the var's - * datatype, else coercion will be skipped. (Do this - * before the notnull check to be consistent with - * exec_assign_value.) + * datatype, else coercion will be skipped. */ if (var->datatype->typtype == TYPTYPE_DOMAIN) exec_assign_value(estate, @@ -1219,11 +1579,8 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) UNKNOWNOID, -1); - if (var->notnull) - ereport(ERROR, - (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("variable \"%s\" declared NOT NULL cannot default to NULL", - var->refname))); + /* parser should have rejected NOT NULL */ + Assert(!var->notnull); } else { @@ -1235,30 +1592,35 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) case PLPGSQL_DTYPE_REC: { - PLpgSQL_rec *rec = (PLpgSQL_rec *) (estate->datums[n]); + PLpgSQL_rec *rec = (PLpgSQL_rec *) datum; - if (rec->freetup) + /* + * Deletion of any existing object will be handled during + * the assignments below, and in some cases it's more + * efficient for us not to get rid of it beforehand. + */ + if (rec->default_val == NULL) { - heap_freetuple(rec->tup); - rec->freetup = false; + /* + * If needed, give the datatype a chance to reject + * NULLs, by assigning a NULL to the variable. + */ + exec_move_row(estate, (PLpgSQL_variable *) rec, + NULL, NULL); + + /* parser should have rejected NOT NULL */ + Assert(!rec->notnull); } - if (rec->freetupdesc) + else { - FreeTupleDesc(rec->tupdesc); - rec->freetupdesc = false; + exec_assign_expr(estate, (PLpgSQL_datum *) rec, + rec->default_val); } - rec->tup = NULL; - rec->tupdesc = NULL; } break; - case PLPGSQL_DTYPE_RECFIELD: - case PLPGSQL_DTYPE_ARRAYELEM: - break; - default: - elog(ERROR, "unrecognized dtype: %d", - estate->datums[n]->dtype); + elog(ERROR, "unrecognized dtype: %d", datum->dtype); } } @@ -1309,16 +1671,12 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) /* * If the block ended with RETURN, we may need to copy the return - * value out of the subtransaction eval_context. This is - * currently only needed for scalar result types --- rowtype - * values will always exist in the function's main memory context, - * cf. exec_stmt_return(). We can avoid a physical copy if the - * value happens to be a R/W expanded object. + * value out of the subtransaction eval_context. We can avoid a + * physical copy if the value happens to be a R/W expanded object. */ if (rc == PLPGSQL_RC_RETURN && !estate->retisset && - !estate->retisnull && - estate->rettupdesc == NULL) + !estate->retisnull) { int16 resTypLen; bool resTypByVal; @@ -1462,7 +1820,9 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) estate->err_text = NULL; /* - * Handle the return code. + * Handle the return code. This is intentionally different from + * LOOP_RC_PROCESSING(): CONTINUE never matches a block, and EXIT matches + * a block only if there is a label match. */ switch (rc) { @@ -1472,11 +1832,6 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) return rc; case PLPGSQL_RC_EXIT: - - /* - * This is intentionally different from the handling of RC_EXIT - * for loops: to match a block, we require a match by label. - */ if (estate->exitlabel == NULL) return PLPGSQL_RC_EXIT; if (block->label == NULL) @@ -1562,6 +1917,10 @@ exec_stmt(PLpgSQL_execstate *estate, PLpgSQL_stmt *stmt) rc = exec_stmt_perform(estate, (PLpgSQL_stmt_perform *) stmt); break; + case PLPGSQL_STMT_CALL: + rc = exec_stmt_call(estate, (PLpgSQL_stmt_call *) stmt); + break; + case PLPGSQL_STMT_GETDIAG: rc = exec_stmt_getdiag(estate, (PLpgSQL_stmt_getdiag *) stmt); break; @@ -1646,10 +2005,22 @@ exec_stmt(PLpgSQL_execstate *estate, PLpgSQL_stmt *stmt) rc = exec_stmt_close(estate, (PLpgSQL_stmt_close *) stmt); break; - default: - estate->err_stmt = save_estmt; - elog(ERROR, "unrecognized cmdtype: %d", stmt->cmd_type); - } + case PLPGSQL_STMT_COMMIT: + rc = exec_stmt_commit(estate, (PLpgSQL_stmt_commit *) stmt); + break; + + case PLPGSQL_STMT_ROLLBACK: + rc = exec_stmt_rollback(estate, (PLpgSQL_stmt_rollback *) stmt); + break; + + case PLPGSQL_STMT_SET: + rc = exec_stmt_set(estate, (PLpgSQL_stmt_set *) stmt); + break; + + default: + estate->err_stmt = save_estmt; + elog(ERROR, "unrecognized cmd_type: %d", stmt->cmd_type); + } /* Let the plugin know that we have finished executing this statement */ if (*plpgsql_plugin_ptr && (*plpgsql_plugin_ptr)->stmt_end) @@ -1694,6 +2065,239 @@ exec_stmt_perform(PLpgSQL_execstate *estate, PLpgSQL_stmt_perform *stmt) return PLPGSQL_RC_OK; } +/* + * exec_stmt_call + */ +static int +exec_stmt_call(PLpgSQL_execstate *estate, PLpgSQL_stmt_call *stmt) +{ + PLpgSQL_expr *expr = stmt->expr; + volatile LocalTransactionId before_lxid; + LocalTransactionId after_lxid; + volatile bool pushed_active_snap = false; + volatile int rc; + + /* PG_TRY to ensure we clear the plan link, if needed, on failure */ + PG_TRY(); + { + SPIPlanPtr plan = expr->plan; + ParamListInfo paramLI; + + if (plan == NULL) + { + + /* + * Don't save the plan if not in atomic context. Otherwise, + * transaction ends would cause errors about plancache leaks. + * + * XXX This would be fixable with some plancache/resowner surgery + * elsewhere, but for now we'll just work around this here. + */ + exec_prepare_plan(estate, expr, 0, estate->atomic); + + /* + * The procedure call could end transactions, which would upset + * the snapshot management in SPI_execute*, so don't let it do it. + * Instead, we set the snapshots ourselves below. + */ + plan = expr->plan; + plan->no_snapshots = true; + + /* + * Force target to be recalculated whenever the plan changes, in + * case the procedure's argument list has changed. + */ + stmt->target = NULL; + } + + /* + * We construct a DTYPE_ROW datum representing the plpgsql variables + * associated with the procedure's output arguments. Then we can use + * exec_move_row() to do the assignments. + */ + if (stmt->is_call && stmt->target == NULL) + { + Node *node; + FuncExpr *funcexpr; + HeapTuple func_tuple; + List *funcargs; + Oid *argtypes; + char **argnames; + char *argmodes; + MemoryContext oldcontext; + PLpgSQL_row *row; + int nfields; + int i; + ListCell *lc; + + /* + * Get the parsed CallStmt, and look up the called procedure + */ + node = linitial_node(Query, + ((CachedPlanSource *) linitial(plan->plancache_list))->query_list)->utilityStmt; + if (node == NULL || !IsA(node, CallStmt)) + elog(ERROR, "query for CALL statement is not a CallStmt"); + + funcexpr = ((CallStmt *) node)->funcexpr; + + func_tuple = SearchSysCache1(PROCOID, + ObjectIdGetDatum(funcexpr->funcid)); + if (!HeapTupleIsValid(func_tuple)) + elog(ERROR, "cache lookup failed for function %u", + funcexpr->funcid); + + /* + * Extract function arguments, and expand any named-arg notation + */ + funcargs = expand_function_arguments(funcexpr->args, + funcexpr->funcresulttype, + func_tuple); + + /* + * Get the argument names and modes, too + */ + get_func_arg_info(func_tuple, &argtypes, &argnames, &argmodes); + + ReleaseSysCache(func_tuple); + + /* + * Begin constructing row Datum + */ + oldcontext = MemoryContextSwitchTo(estate->func->fn_cxt); + + row = (PLpgSQL_row *) palloc0(sizeof(PLpgSQL_row)); + row->dtype = PLPGSQL_DTYPE_ROW; + row->refname = "(unnamed row)"; + row->lineno = -1; + row->varnos = (int *) palloc(sizeof(int) * list_length(funcargs)); + + MemoryContextSwitchTo(oldcontext); + + /* + * Examine procedure's argument list. Each output arg position + * should be an unadorned plpgsql variable (Datum), which we can + * insert into the row Datum. + */ + nfields = 0; + i = 0; + foreach(lc, funcargs) + { + Node *n = lfirst(lc); + + if (argmodes && + (argmodes[i] == PROARGMODE_INOUT || + argmodes[i] == PROARGMODE_OUT)) + { + if (IsA(n, Param)) + { + Param *param = (Param *) n; + + /* paramid is offset by 1 (see make_datum_param()) */ + row->varnos[nfields++] = param->paramid - 1; + } + else + { + /* report error using parameter name, if available */ + if (argnames && argnames[i] && argnames[i][0]) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("procedure parameter \"%s\" is an output parameter but corresponding argument is not writable", + argnames[i]))); + else + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("procedure parameter %d is an output parameter but corresponding argument is not writable", + i + 1))); + } + } + i++; + } + + row->nfields = nfields; + + stmt->target = (PLpgSQL_variable *) row; + } + + paramLI = setup_param_list(estate, expr); + + before_lxid = MyProc->lxid; + + /* + * Set snapshot only for non-read-only procedures, similar to SPI + * behavior. + */ + if (!estate->readonly_func) + { + PushActiveSnapshot(GetTransactionSnapshot()); + pushed_active_snap = true; + } + + rc = SPI_execute_plan_with_paramlist(expr->plan, paramLI, + estate->readonly_func, 0); + } + PG_CATCH(); + { + /* + * If we aren't saving the plan, unset the pointer. Note that it + * could have been unset already, in case of a recursive call. + */ + if (expr->plan && !expr->plan->saved) + expr->plan = NULL; + PG_RE_THROW(); + } + PG_END_TRY(); + + if (expr->plan && !expr->plan->saved) + expr->plan = NULL; + + if (rc < 0) + elog(ERROR, "SPI_execute_plan_with_paramlist failed executing query \"%s\": %s", + expr->query, SPI_result_code_string(rc)); + + after_lxid = MyProc->lxid; + + if (before_lxid == after_lxid) + { + /* + * If we are still in the same transaction after the call, pop the + * snapshot that we might have pushed. (If it's a new transaction, + * then all the snapshots are gone already.) + */ + if (pushed_active_snap) + PopActiveSnapshot(); + } + else + { + /* + * If we are in a new transaction after the call, we need to reset + * some internal state. + */ + estate->simple_eval_estate = NULL; + plpgsql_create_econtext(estate); + } + + /* + * Check result rowcount; if there's one row, assign procedure's output + * values back to the appropriate variables. + */ + if (SPI_processed == 1) + { + SPITupleTable *tuptab = SPI_tuptable; + + if (!stmt->target) + elog(ERROR, "DO statement returned a row"); + + exec_move_row(estate, stmt->target, tuptab->vals[0], tuptab->tupdesc); + } + else if (SPI_processed > 1) + elog(ERROR, "procedure call returned more than one row"); + + exec_eval_cleanup(estate); + SPI_freetuptable(SPI_tuptable); + + return PLPGSQL_RC_OK; +} + /* ---------- * exec_stmt_getdiag Put internal PG information into * specified variables. @@ -1934,45 +2538,16 @@ exec_stmt_case(PLpgSQL_execstate *estate, PLpgSQL_stmt_case *stmt) static int exec_stmt_loop(PLpgSQL_execstate *estate, PLpgSQL_stmt_loop *stmt) { + int rc = PLPGSQL_RC_OK; + for (;;) { - int rc = exec_stmts(estate, stmt->body); - - switch (rc) - { - case PLPGSQL_RC_OK: - break; - - case PLPGSQL_RC_EXIT: - if (estate->exitlabel == NULL) - return PLPGSQL_RC_OK; - if (stmt->label == NULL) - return PLPGSQL_RC_EXIT; - if (strcmp(stmt->label, estate->exitlabel) != 0) - return PLPGSQL_RC_EXIT; - estate->exitlabel = NULL; - return PLPGSQL_RC_OK; - - case PLPGSQL_RC_CONTINUE: - if (estate->exitlabel == NULL) - /* anonymous continue, so re-run the loop */ - break; - else if (stmt->label != NULL && - strcmp(stmt->label, estate->exitlabel) == 0) - /* label matches named continue, so re-run loop */ - estate->exitlabel = NULL; - else - /* label doesn't match named continue, so propagate upward */ - return PLPGSQL_RC_CONTINUE; - break; - - case PLPGSQL_RC_RETURN: - return rc; + rc = exec_stmts(estate, stmt->body); - default: - elog(ERROR, "unrecognized rc: %d", rc); - } + LOOP_RC_PROCESSING(stmt->label, break); } + + return rc; } @@ -1985,9 +2560,10 @@ exec_stmt_loop(PLpgSQL_execstate *estate, PLpgSQL_stmt_loop *stmt) static int exec_stmt_while(PLpgSQL_execstate *estate, PLpgSQL_stmt_while *stmt) { + int rc = PLPGSQL_RC_OK; + for (;;) { - int rc; bool value; bool isnull; @@ -1999,43 +2575,10 @@ exec_stmt_while(PLpgSQL_execstate *estate, PLpgSQL_stmt_while *stmt) rc = exec_stmts(estate, stmt->body); - switch (rc) - { - case PLPGSQL_RC_OK: - break; - - case PLPGSQL_RC_EXIT: - if (estate->exitlabel == NULL) - return PLPGSQL_RC_OK; - if (stmt->label == NULL) - return PLPGSQL_RC_EXIT; - if (strcmp(stmt->label, estate->exitlabel) != 0) - return PLPGSQL_RC_EXIT; - estate->exitlabel = NULL; - return PLPGSQL_RC_OK; - - case PLPGSQL_RC_CONTINUE: - if (estate->exitlabel == NULL) - /* anonymous continue, so re-run loop */ - break; - else if (stmt->label != NULL && - strcmp(stmt->label, estate->exitlabel) == 0) - /* label matches named continue, so re-run loop */ - estate->exitlabel = NULL; - else - /* label doesn't match named continue, propagate upward */ - return PLPGSQL_RC_CONTINUE; - break; - - case PLPGSQL_RC_RETURN: - return rc; - - default: - elog(ERROR, "unrecognized rc: %d", rc); - } + LOOP_RC_PROCESSING(stmt->label, break); } - return PLPGSQL_RC_OK; + return rc; } @@ -2149,50 +2692,7 @@ exec_stmt_fori(PLpgSQL_execstate *estate, PLpgSQL_stmt_fori *stmt) */ rc = exec_stmts(estate, stmt->body); - if (rc == PLPGSQL_RC_RETURN) - break; /* break out of the loop */ - else if (rc == PLPGSQL_RC_EXIT) - { - if (estate->exitlabel == NULL) - /* unlabelled exit, finish the current loop */ - rc = PLPGSQL_RC_OK; - else if (stmt->label != NULL && - strcmp(stmt->label, estate->exitlabel) == 0) - { - /* labelled exit, matches the current stmt's label */ - estate->exitlabel = NULL; - rc = PLPGSQL_RC_OK; - } - - /* - * otherwise, this is a labelled exit that does not match the - * current statement's label, if any: return RC_EXIT so that the - * EXIT continues to propagate up the stack. - */ - break; - } - else if (rc == PLPGSQL_RC_CONTINUE) - { - if (estate->exitlabel == NULL) - /* unlabelled continue, so re-run the current loop */ - rc = PLPGSQL_RC_OK; - else if (stmt->label != NULL && - strcmp(stmt->label, estate->exitlabel) == 0) - { - /* label matches named continue, so re-run loop */ - estate->exitlabel = NULL; - rc = PLPGSQL_RC_OK; - } - else - { - /* - * otherwise, this is a named continue that does not match the - * current statement's label, if any: return RC_CONTINUE so - * that the CONTINUE will propagate up the stack. - */ - break; - } - } + LOOP_RC_PROCESSING(stmt->label, break); /* * Increase/decrease loop value, unless it would overflow, in which @@ -2200,13 +2700,13 @@ exec_stmt_fori(PLpgSQL_execstate *estate, PLpgSQL_stmt_fori *stmt) */ if (stmt->reverse) { - if ((int32) (loop_value - step_value) > loop_value) + if (loop_value < (PG_INT32_MIN + step_value)) break; loop_value -= step_value; } else { - if ((int32) (loop_value + step_value) < loop_value) + if (loop_value > (PG_INT32_MAX - step_value)) break; loop_value += step_value; } @@ -2321,7 +2821,7 @@ exec_stmt_forc(PLpgSQL_execstate *estate, PLpgSQL_stmt_forc *stmt) set_args.sqlstmt = stmt->argquery; set_args.into = true; /* XXX historically this has not been STRICT */ - set_args.row = (PLpgSQL_row *) + set_args.target = (PLpgSQL_variable *) (estate->datums[curvar->cursor_explicit_argrow]); if (exec_stmt_execsql(estate, &set_args) != PLPGSQL_RC_OK) @@ -2339,12 +2839,12 @@ exec_stmt_forc(PLpgSQL_execstate *estate, PLpgSQL_stmt_forc *stmt) Assert(query); if (query->plan == NULL) - exec_prepare_plan(estate, query, curvar->cursor_options); + exec_prepare_plan(estate, query, curvar->cursor_options, true); /* - * Set up short-lived ParamListInfo + * Set up ParamListInfo for this query */ - paramLI = setup_unshared_param_list(estate, query); + paramLI = setup_param_list(estate, query); /* * Open the cursor (the paramlist will get copied into the portal) @@ -2522,51 +3022,7 @@ exec_stmt_foreach_a(PLpgSQL_execstate *estate, PLpgSQL_stmt_foreach_a *stmt) */ rc = exec_stmts(estate, stmt->body); - /* Handle the return code */ - if (rc == PLPGSQL_RC_RETURN) - break; /* break out of the loop */ - else if (rc == PLPGSQL_RC_EXIT) - { - if (estate->exitlabel == NULL) - /* unlabelled exit, finish the current loop */ - rc = PLPGSQL_RC_OK; - else if (stmt->label != NULL && - strcmp(stmt->label, estate->exitlabel) == 0) - { - /* labelled exit, matches the current stmt's label */ - estate->exitlabel = NULL; - rc = PLPGSQL_RC_OK; - } - - /* - * otherwise, this is a labelled exit that does not match the - * current statement's label, if any: return RC_EXIT so that the - * EXIT continues to propagate up the stack. - */ - break; - } - else if (rc == PLPGSQL_RC_CONTINUE) - { - if (estate->exitlabel == NULL) - /* unlabelled continue, so re-run the current loop */ - rc = PLPGSQL_RC_OK; - else if (stmt->label != NULL && - strcmp(stmt->label, estate->exitlabel) == 0) - { - /* label matches named continue, so re-run loop */ - estate->exitlabel = NULL; - rc = PLPGSQL_RC_OK; - } - else - { - /* - * otherwise, this is a named continue that does not match the - * current statement's label, if any: return RC_CONTINUE so - * that the CONTINUE will propagate up the stack. - */ - break; - } - } + LOOP_RC_PROCESSING(stmt->label, break); MemoryContextSwitchTo(stmt_mcontext); } @@ -2625,12 +3081,8 @@ exec_stmt_exit(PLpgSQL_execstate *estate, PLpgSQL_stmt_exit *stmt) * exec_stmt_return Evaluate an expression and start * returning from the function. * - * Note: in the retistuple code paths, the returned tuple is always in the - * function's main context, whereas for non-tuple data types the result may - * be in the eval_mcontext. The former case is not a memory leak since we're - * about to exit the function anyway. (If you want to change it, note that - * exec_stmt_block() knows about this behavior.) The latter case means that - * we must not do exec_eval_cleanup while unwinding the control stack. + * Note: The result may be in the eval_mcontext. Therefore, we must not + * do exec_eval_cleanup while unwinding the control stack. * ---------- */ static int @@ -2644,9 +3096,8 @@ exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt) if (estate->retisset) return PLPGSQL_RC_RETURN; - /* initialize for null result (possibly a tuple) */ + /* initialize for null result */ estate->retval = (Datum) 0; - estate->rettupdesc = NULL; estate->retisnull = true; estate->rettype = InvalidOid; @@ -2668,6 +3119,12 @@ exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt) switch (retvar->dtype) { + case PLPGSQL_DTYPE_PROMISE: + /* fulfill promise if needed, then handle like regular var */ + plpgsql_fulfill_promise(estate, (PLpgSQL_var *) retvar); + + /* FALL THRU */ + case PLPGSQL_DTYPE_VAR: { PLpgSQL_var *var = (PLpgSQL_var *) retvar; @@ -2677,10 +3134,12 @@ exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt) estate->rettype = var->datatype->typoid; /* - * Cope with retistuple case. A PLpgSQL_var could not be - * of composite type, so we needn't make any effort to - * convert. However, for consistency with the expression - * code path, don't throw error if the result is NULL. + * A PLpgSQL_var could not be of composite type, so + * conversion must fail if retistuple. We throw a custom + * error mainly for consistency with historical behavior. + * For the same reason, we don't throw error if the result + * is NULL. (Note that plpgsql_exec_trigger assumes that + * any non-null result has been verified to be composite.) */ if (estate->retistuple && !estate->retisnull) ereport(ERROR, @@ -2692,23 +3151,13 @@ exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt) case PLPGSQL_DTYPE_REC: { PLpgSQL_rec *rec = (PLpgSQL_rec *) retvar; - int32 rettypmod; - if (HeapTupleIsValid(rec->tup)) + /* If record is empty, we return NULL not a row of nulls */ + if (rec->erh && !ExpandedRecordIsEmpty(rec->erh)) { - if (estate->retistuple) - { - estate->retval = PointerGetDatum(rec->tup); - estate->rettupdesc = rec->tupdesc; - estate->retisnull = false; - } - else - exec_eval_datum(estate, - retvar, - &estate->rettype, - &rettypmod, - &estate->retval, - &estate->retisnull); + estate->retval = ExpandedRecordGetDatum(rec->erh); + estate->retisnull = false; + estate->rettype = rec->rectypeid; } } break; @@ -2718,26 +3167,13 @@ exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt) PLpgSQL_row *row = (PLpgSQL_row *) retvar; int32 rettypmod; - if (estate->retistuple) - { - HeapTuple tup; - - if (!row->rowtupdesc) /* should not happen */ - elog(ERROR, "row variable has no tupdesc"); - tup = make_tuple_from_row(estate, row, row->rowtupdesc); - if (tup == NULL) /* should not happen */ - elog(ERROR, "row not compatible with its own tupdesc"); - estate->retval = PointerGetDatum(tup); - estate->rettupdesc = row->rowtupdesc; - estate->retisnull = false; - } - else - exec_eval_datum(estate, - retvar, - &estate->rettype, - &rettypmod, - &estate->retval, - &estate->retisnull); + /* We get here if there are multiple OUT parameters */ + exec_eval_datum(estate, + (PLpgSQL_datum *) row, + &estate->rettype, + &rettypmod, + &estate->retval, + &estate->retisnull); } break; @@ -2757,23 +3193,15 @@ exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt) &(estate->rettype), &rettypmod); - if (estate->retistuple && !estate->retisnull) - { - /* Convert composite datum to a HeapTuple and TupleDesc */ - HeapTuple tuple; - TupleDesc tupdesc; - - /* Source must be of RECORD or composite type */ - if (!type_is_rowtype(estate->rettype)) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("cannot return non-composite value from function returning composite type"))); - tuple = get_tuple_from_datum(estate->retval); - tupdesc = get_tupdesc_from_datum(estate->retval); - estate->retval = PointerGetDatum(tuple); - estate->rettupdesc = CreateTupleDescCopy(tupdesc); - ReleaseTupleDesc(tupdesc); - } + /* + * As in the DTYPE_VAR case above, throw a custom error if a non-null, + * non-composite value is returned in a function returning tuple. + */ + if (estate->retistuple && !estate->retisnull && + !type_is_rowtype(estate->rettype)) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("cannot return non-composite value from function returning composite type"))); return PLPGSQL_RC_RETURN; } @@ -2781,9 +3209,10 @@ exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt) /* * Special hack for function returning VOID: instead of NULL, return a * non-null VOID value. This is of dubious importance but is kept for - * backwards compatibility. + * backwards compatibility. We don't do it for procedures, though. */ - if (estate->fn_rettype == VOIDOID) + if (estate->fn_rettype == VOIDOID && + estate->func->fn_prokind != PROKIND_PROCEDURE) { estate->retval = (Datum) 0; estate->retisnull = false; @@ -2816,8 +3245,8 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, if (estate->tuple_store == NULL) exec_init_tuple_store(estate); - /* rettupdesc will be filled by exec_init_tuple_store */ - tupdesc = estate->rettupdesc; + /* tuple_store_desc will be filled by exec_init_tuple_store */ + tupdesc = estate->tuple_store_desc; natts = tupdesc->natts; /* @@ -2836,11 +3265,18 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, switch (retvar->dtype) { + case PLPGSQL_DTYPE_PROMISE: + /* fulfill promise if needed, then handle like regular var */ + plpgsql_fulfill_promise(estate, (PLpgSQL_var *) retvar); + + /* FALL THRU */ + case PLPGSQL_DTYPE_VAR: { PLpgSQL_var *var = (PLpgSQL_var *) retvar; Datum retval = var->value; bool isNull = var->isnull; + Form_pg_attribute attr = TupleDescAttr(tupdesc, 0); if (natts != 1) ereport(ERROR, @@ -2858,8 +3294,8 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, &isNull, var->datatype->typoid, var->datatype->atttypmod, - tupdesc->attrs[0]->atttypid, - tupdesc->attrs[0]->atttypmod); + attr->atttypid, + attr->atttypmod); tuplestore_putvalues(estate->tuple_store, tupdesc, &retval, &isNull); @@ -2869,24 +3305,24 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, case PLPGSQL_DTYPE_REC: { PLpgSQL_rec *rec = (PLpgSQL_rec *) retvar; + TupleDesc rec_tupdesc; TupleConversionMap *tupmap; - if (!HeapTupleIsValid(rec->tup)) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned" - " record is indeterminate."))); + /* If rec is null, try to convert it to a row of nulls */ + if (rec->erh == NULL) + instantiate_empty_record_variable(estate, rec); + if (ExpandedRecordIsEmpty(rec->erh)) + deconstruct_expanded_record(rec->erh); /* Use eval_mcontext for tuple conversion work */ oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); - tupmap = convert_tuples_by_position(rec->tupdesc, + rec_tupdesc = expanded_record_get_tupdesc(rec->erh); + tupmap = convert_tuples_by_position(rec_tupdesc, tupdesc, gettext_noop("wrong record type supplied in RETURN NEXT")); - tuple = rec->tup; + tuple = expanded_record_get_tuple(rec->erh); if (tupmap) - tuple = do_convert_tuple(tuple, tupmap); + tuple = execute_attr_map_tuple(tuple, tupmap); tuplestore_puttuple(estate->tuple_store, tuple); MemoryContextSwitchTo(oldcontext); } @@ -2896,10 +3332,12 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, { PLpgSQL_row *row = (PLpgSQL_row *) retvar; + /* We get here if there are multiple OUT parameters */ + /* Use eval_mcontext for tuple conversion work */ oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); tuple = make_tuple_from_row(estate, row, tupdesc); - if (tuple == NULL) + if (tuple == NULL) /* should not happen */ ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("wrong record type supplied in RETURN NEXT"))); @@ -2931,6 +3369,7 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, /* Expression should be of RECORD or composite type */ if (!isNull) { + HeapTupleData tmptup; TupleDesc retvaldesc; TupleConversionMap *tupmap; @@ -2941,12 +3380,12 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, /* Use eval_mcontext for tuple conversion work */ oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); - tuple = get_tuple_from_datum(retval); - retvaldesc = get_tupdesc_from_datum(retval); + retvaldesc = deconstruct_composite_datum(retval, &tmptup); + tuple = &tmptup; tupmap = convert_tuples_by_position(retvaldesc, tupdesc, gettext_noop("returned record type does not match expected record type")); if (tupmap) - tuple = do_convert_tuple(tuple, tupmap); + tuple = execute_attr_map_tuple(tuple, tupmap); tuplestore_puttuple(estate->tuple_store, tuple); ReleaseTupleDesc(retvaldesc); MemoryContextSwitchTo(oldcontext); @@ -2968,6 +3407,8 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, } else { + Form_pg_attribute attr = TupleDescAttr(tupdesc, 0); + /* Simple scalar result */ if (natts != 1) ereport(ERROR, @@ -2980,8 +3421,8 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, &isNull, rettype, rettypmod, - tupdesc->attrs[0]->atttypid, - tupdesc->attrs[0]->atttypmod); + attr->atttypid, + attr->atttypmod); tuplestore_putvalues(estate->tuple_store, tupdesc, &retval, &isNull); @@ -3040,7 +3481,7 @@ exec_stmt_return_query(PLpgSQL_execstate *estate, oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); tupmap = convert_tuples_by_position(portal->tupDesc, - estate->rettupdesc, + estate->tuple_store_desc, gettext_noop("structure of query does not match function result type")); while (true) @@ -3060,7 +3501,7 @@ exec_stmt_return_query(PLpgSQL_execstate *estate, HeapTuple tuple = SPI_tuptable->vals[i]; if (tupmap) - tuple = do_convert_tuple(tuple, tupmap); + tuple = execute_attr_map_tuple(tuple, tupmap); tuplestore_puttuple(estate->tuple_store, tuple); if (tupmap) heap_freetuple(tuple); @@ -3117,7 +3558,7 @@ exec_init_tuple_store(PLpgSQL_execstate *estate) CurrentResourceOwner = oldowner; MemoryContextSwitchTo(oldcxt); - estate->rettupdesc = rsi->expectedDesc; + estate->tuple_store_desc = rsi->expectedDesc; } #define SET_RAISE_OPTION_TEXT(opt, name) \ @@ -3400,6 +3841,8 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate, func->cur_estate = estate; estate->func = func; + estate->trigdata = NULL; + estate->evtrigdata = NULL; estate->retval = (Datum) 0; estate->retisnull = true; @@ -3410,12 +3853,13 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate, estate->retisset = func->fn_retset; estate->readonly_func = func->fn_readonly; + estate->atomic = true; - estate->rettupdesc = NULL; estate->exitlabel = NULL; estate->cur_error = NULL; estate->tuple_store = NULL; + estate->tuple_store_desc = NULL; if (rsi) { estate->tuple_store_cxt = rsi->econtext->ecxt_per_query_memory; @@ -3430,20 +3874,20 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate, estate->found_varno = func->found_varno; estate->ndatums = func->ndatums; - estate->datums = palloc(sizeof(PLpgSQL_datum *) * estate->ndatums); - /* caller is expected to fill the datums array */ + estate->datums = NULL; + /* the datums array will be filled by copy_plpgsql_datums() */ + estate->datum_context = CurrentMemoryContext; - /* initialize ParamListInfo with one entry per datum, all invalid */ + /* initialize our ParamListInfo with appropriate hook functions */ estate->paramLI = (ParamListInfo) - palloc0(offsetof(ParamListInfoData, params) + - estate->ndatums * sizeof(ParamExternData)); + palloc(offsetof(ParamListInfoData, params)); estate->paramLI->paramFetch = plpgsql_param_fetch; estate->paramLI->paramFetchArg = (void *) estate; + estate->paramLI->paramCompile = plpgsql_param_compile; + estate->paramLI->paramCompileArg = NULL; /* not needed */ estate->paramLI->parserSetup = (ParserSetupHook) plpgsql_parser_setup; estate->paramLI->parserSetupArg = NULL; /* filled during use */ estate->paramLI->numParams = estate->ndatums; - estate->paramLI->paramMask = NULL; - estate->params_dirty = false; /* set up for use of appropriate simple-expression EState and cast hash */ if (simple_eval_estate) @@ -3554,7 +3998,8 @@ exec_eval_cleanup(PLpgSQL_execstate *estate) */ static void exec_prepare_plan(PLpgSQL_execstate *estate, - PLpgSQL_expr *expr, int cursorOptions) + PLpgSQL_expr *expr, int cursorOptions, + bool keepplan) { SPIPlanPtr plan; @@ -3572,25 +4017,10 @@ exec_prepare_plan(PLpgSQL_execstate *estate, (void *) expr, cursorOptions); if (plan == NULL) - { - /* Some SPI errors deserve specific error messages */ - switch (SPI_result) - { - case SPI_ERROR_COPY: - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot COPY to/from client in PL/pgSQL"))); - case SPI_ERROR_TRANSACTION: - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot begin/end transactions in PL/pgSQL"), - errhint("Use a BEGIN block with an EXCEPTION clause instead."))); - default: - elog(ERROR, "SPI_prepare_params failed for \"%s\": %s", - expr->query, SPI_result_code_string(SPI_result)); - } - } - SPI_keepplan(plan); + elog(ERROR, "SPI_prepare_params failed for \"%s\": %s", + expr->query, SPI_result_code_string(SPI_result)); + if (keepplan) + SPI_keepplan(plan); expr->plan = plan; /* Check to see if it's a simple expression */ @@ -3620,6 +4050,12 @@ exec_stmt_execsql(PLpgSQL_execstate *estate, long tcount; int rc; PLpgSQL_expr *expr = stmt->sqlstmt; + int too_many_rows_level = 0; + + if (plpgsql_extra_errors & PLPGSQL_XCHECK_TOOMANYROWS) + too_many_rows_level = ERROR; + else if (plpgsql_extra_warnings & PLPGSQL_XCHECK_TOOMANYROWS) + too_many_rows_level = WARNING; /* * On the first call for this statement generate the plan, and detect @@ -3629,24 +4065,25 @@ exec_stmt_execsql(PLpgSQL_execstate *estate, { ListCell *l; - exec_prepare_plan(estate, expr, CURSOR_OPT_PARALLEL_OK); + exec_prepare_plan(estate, expr, CURSOR_OPT_PARALLEL_OK, true); stmt->mod_stmt = false; foreach(l, SPI_plan_get_plan_sources(expr->plan)) { CachedPlanSource *plansource = (CachedPlanSource *) lfirst(l); - ListCell *l2; - foreach(l2, plansource->query_list) + /* + * We could look at the raw_parse_tree, but it seems simpler to + * check the command tag. Note we should *not* look at the Query + * tree(s), since those are the result of rewriting and could have + * been transmogrified into something else entirely. + */ + if (plansource->commandTag && + (strcmp(plansource->commandTag, "INSERT") == 0 || + strcmp(plansource->commandTag, "UPDATE") == 0 || + strcmp(plansource->commandTag, "DELETE") == 0)) { - Query *q = lfirst_node(Query, l2); - - if (q->canSetTag) - { - if (q->commandType == CMD_INSERT || - q->commandType == CMD_UPDATE || - q->commandType == CMD_DELETE) - stmt->mod_stmt = true; - } + stmt->mod_stmt = true; + break; } } } @@ -3658,9 +4095,10 @@ exec_stmt_execsql(PLpgSQL_execstate *estate, /* * If we have INTO, then we only need one row back ... but if we have INTO - * STRICT, ask for two rows, so that we can verify the statement returns - * only one. INSERT/UPDATE/DELETE are always treated strictly. Without - * INTO, just run the statement to completion (tcount = 0). + * STRICT or extra check too_many_rows, ask for two rows, so that we can + * verify the statement returns only one. INSERT/UPDATE/DELETE are always + * treated strictly. Without INTO, just run the statement to completion + * (tcount = 0). * * We could just ask for two rows always when using INTO, but there are * some cases where demanding the extra row costs significant time, eg by @@ -3669,7 +4107,7 @@ exec_stmt_execsql(PLpgSQL_execstate *estate, */ if (stmt->into) { - if (stmt->strict || stmt->mod_stmt) + if (stmt->strict || stmt->mod_stmt || too_many_rows_level) tcount = 2; else tcount = 1; @@ -3711,12 +4149,12 @@ exec_stmt_execsql(PLpgSQL_execstate *estate, break; case SPI_OK_REWRITTEN: - Assert(!stmt->mod_stmt); /* * The command was rewritten into another kind of command. It's * not clear what FOUND would mean in that case (and SPI doesn't - * return the row count either), so just set it to false. + * return the row count either), so just set it to false. Note + * that we can't assert anything about mod_stmt here. */ exec_set_found(estate, false); break; @@ -3726,15 +4164,18 @@ exec_stmt_execsql(PLpgSQL_execstate *estate, ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot COPY to/from client in PL/pgSQL"))); + break; + case SPI_ERROR_TRANSACTION: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot begin/end transactions in PL/pgSQL"), - errhint("Use a BEGIN block with an EXCEPTION clause instead."))); + errmsg("unsupported transaction command in PL/pgSQL"))); + break; default: elog(ERROR, "SPI_execute_plan_with_paramlist failed executing query \"%s\": %s", expr->query, SPI_result_code_string(rc)); + break; } /* All variants should save result info for GET DIAGNOSTICS */ @@ -3746,8 +4187,7 @@ exec_stmt_execsql(PLpgSQL_execstate *estate, { SPITupleTable *tuptab = SPI_tuptable; uint64 n = SPI_processed; - PLpgSQL_rec *rec = NULL; - PLpgSQL_row *row = NULL; + PLpgSQL_variable *target; /* If the statement did not return a tuple table, complain */ if (tuptab == NULL) @@ -3755,13 +4195,8 @@ exec_stmt_execsql(PLpgSQL_execstate *estate, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("INTO used with a command that cannot return data"))); - /* Determine if we assign to a record or a row */ - if (stmt->rec != NULL) - rec = (PLpgSQL_rec *) (estate->datums[stmt->rec->dno]); - else if (stmt->row != NULL) - row = (PLpgSQL_row *) (estate->datums[stmt->row->dno]); - else - elog(ERROR, "unsupported target"); + /* Fetch target's datum entry */ + target = (PLpgSQL_variable *) estate->datums[stmt->target->dno]; /* * If SELECT ... INTO specified STRICT, and the query didn't find @@ -3785,26 +4220,30 @@ exec_stmt_execsql(PLpgSQL_execstate *estate, errdetail ? errdetail_internal("parameters: %s", errdetail) : 0)); } /* set the target to NULL(s) */ - exec_move_row(estate, rec, row, NULL, tuptab->tupdesc); + exec_move_row(estate, target, NULL, tuptab->tupdesc); } else { - if (n > 1 && (stmt->strict || stmt->mod_stmt)) + if (n > 1 && (stmt->strict || stmt->mod_stmt || too_many_rows_level)) { char *errdetail; + int errlevel; if (estate->func->print_strict_params) errdetail = format_expr_params(estate, expr); else errdetail = NULL; - ereport(ERROR, + errlevel = (stmt->strict || stmt->mod_stmt) ? ERROR : too_many_rows_level; + + ereport(errlevel, (errcode(ERRCODE_TOO_MANY_ROWS), errmsg("query returned more than one row"), - errdetail ? errdetail_internal("parameters: %s", errdetail) : 0)); + errdetail ? errdetail_internal("parameters: %s", errdetail) : 0, + errhint("Make sure the query returns a single row, or use LIMIT 1"))); } /* Put the first result row into the target */ - exec_move_row(estate, rec, row, tuptab->vals[0], tuptab->tupdesc); + exec_move_row(estate, target, tuptab->vals[0], tuptab->tupdesc); } /* Clean up */ @@ -3916,11 +4355,13 @@ exec_stmt_dynexecute(PLpgSQL_execstate *estate, ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot COPY to/from client in PL/pgSQL"))); + break; + case SPI_ERROR_TRANSACTION: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot begin/end transactions in PL/pgSQL"), - errhint("Use a BEGIN block with an EXCEPTION clause instead."))); + errmsg("EXECUTE of transaction commands is not implemented"))); + break; default: elog(ERROR, "SPI_execute failed executing query \"%s\": %s", @@ -3937,8 +4378,7 @@ exec_stmt_dynexecute(PLpgSQL_execstate *estate, { SPITupleTable *tuptab = SPI_tuptable; uint64 n = SPI_processed; - PLpgSQL_rec *rec = NULL; - PLpgSQL_row *row = NULL; + PLpgSQL_variable *target; /* If the statement did not return a tuple table, complain */ if (tuptab == NULL) @@ -3946,13 +4386,8 @@ exec_stmt_dynexecute(PLpgSQL_execstate *estate, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("INTO used with a command that cannot return data"))); - /* Determine if we assign to a record or a row */ - if (stmt->rec != NULL) - rec = (PLpgSQL_rec *) (estate->datums[stmt->rec->dno]); - else if (stmt->row != NULL) - row = (PLpgSQL_row *) (estate->datums[stmt->row->dno]); - else - elog(ERROR, "unsupported target"); + /* Fetch target's datum entry */ + target = (PLpgSQL_variable *) estate->datums[stmt->target->dno]; /* * If SELECT ... INTO specified STRICT, and the query didn't find @@ -3976,7 +4411,7 @@ exec_stmt_dynexecute(PLpgSQL_execstate *estate, errdetail ? errdetail_internal("parameters: %s", errdetail) : 0)); } /* set the target to NULL(s) */ - exec_move_row(estate, rec, row, NULL, tuptab->tupdesc); + exec_move_row(estate, target, NULL, tuptab->tupdesc); } else { @@ -3996,7 +4431,7 @@ exec_stmt_dynexecute(PLpgSQL_execstate *estate, } /* Put the first result row into the target */ - exec_move_row(estate, rec, row, tuptab->vals[0], tuptab->tupdesc); + exec_move_row(estate, target, tuptab->vals[0], tuptab->tupdesc); } /* clean up after exec_move_row() */ exec_eval_cleanup(estate); @@ -4099,7 +4534,7 @@ exec_stmt_open(PLpgSQL_execstate *estate, PLpgSQL_stmt_open *stmt) */ query = stmt->query; if (query->plan == NULL) - exec_prepare_plan(estate, query, stmt->cursor_options); + exec_prepare_plan(estate, query, stmt->cursor_options, true); } else if (stmt->dynquery != NULL) { @@ -4154,7 +4589,7 @@ exec_stmt_open(PLpgSQL_execstate *estate, PLpgSQL_stmt_open *stmt) set_args.sqlstmt = stmt->argquery; set_args.into = true; /* XXX historically this has not been STRICT */ - set_args.row = (PLpgSQL_row *) + set_args.target = (PLpgSQL_variable *) (estate->datums[curvar->cursor_explicit_argrow]); if (exec_stmt_execsql(estate, &set_args) != PLPGSQL_RC_OK) @@ -4170,16 +4605,16 @@ exec_stmt_open(PLpgSQL_execstate *estate, PLpgSQL_stmt_open *stmt) query = curvar->cursor_explicit_expr; if (query->plan == NULL) - exec_prepare_plan(estate, query, curvar->cursor_options); + exec_prepare_plan(estate, query, curvar->cursor_options, true); } /* - * Set up short-lived ParamListInfo + * Set up ParamListInfo for this query */ - paramLI = setup_unshared_param_list(estate, query); + paramLI = setup_param_list(estate, query); /* - * Open the cursor + * Open the cursor (the paramlist will get copied into the portal) */ portal = SPI_cursor_open_with_paramlist(curname, query->plan, paramLI, @@ -4212,8 +4647,6 @@ static int exec_stmt_fetch(PLpgSQL_execstate *estate, PLpgSQL_stmt_fetch *stmt) { PLpgSQL_var *curvar; - PLpgSQL_rec *rec = NULL; - PLpgSQL_row *row = NULL; long how_many = stmt->how_many; SPITupleTable *tuptab; Portal portal; @@ -4260,16 +4693,7 @@ exec_stmt_fetch(PLpgSQL_execstate *estate, PLpgSQL_stmt_fetch *stmt) if (!stmt->is_move) { - /* ---------- - * Determine if we fetch into a record or a row - * ---------- - */ - if (stmt->rec != NULL) - rec = (PLpgSQL_rec *) (estate->datums[stmt->rec->dno]); - else if (stmt->row != NULL) - row = (PLpgSQL_row *) (estate->datums[stmt->row->dno]); - else - elog(ERROR, "unsupported target"); + PLpgSQL_variable *target; /* ---------- * Fetch 1 tuple from the cursor @@ -4283,10 +4707,11 @@ exec_stmt_fetch(PLpgSQL_execstate *estate, PLpgSQL_stmt_fetch *stmt) * Set the target appropriately. * ---------- */ + target = (PLpgSQL_variable *) estate->datums[stmt->target->dno]; if (n == 0) - exec_move_row(estate, rec, row, NULL, tuptab->tupdesc); + exec_move_row(estate, target, NULL, tuptab->tupdesc); else - exec_move_row(estate, rec, row, tuptab->vals[0], tuptab->tupdesc); + exec_move_row(estate, target, tuptab->vals[0], tuptab->tupdesc); exec_eval_cleanup(estate); SPI_freetuptable(tuptab); @@ -4347,29 +4772,95 @@ exec_stmt_close(PLpgSQL_execstate *estate, PLpgSQL_stmt_close *stmt) return PLPGSQL_RC_OK; } - -/* ---------- - * exec_assign_expr Put an expression's result into a variable. - * ---------- +/* + * exec_stmt_commit + * + * Commit the transaction. */ -static void -exec_assign_expr(PLpgSQL_execstate *estate, PLpgSQL_datum *target, - PLpgSQL_expr *expr) +static int +exec_stmt_commit(PLpgSQL_execstate *estate, PLpgSQL_stmt_commit *stmt) { - Datum value; - bool isnull; - Oid valtype; - int32 valtypmod; + HoldPinnedPortals(); - /* - * If first time through, create a plan for this expression, and then see - * if we can pass the target variable as a read-write parameter to the - * expression. (This is a bit messy, but it seems cleaner than modifying + SPI_commit(); + SPI_start_transaction(); + + estate->simple_eval_estate = NULL; + plpgsql_create_econtext(estate); + + return PLPGSQL_RC_OK; +} + +/* + * exec_stmt_rollback + * + * Abort the transaction. + */ +static int +exec_stmt_rollback(PLpgSQL_execstate *estate, PLpgSQL_stmt_rollback *stmt) +{ + HoldPinnedPortals(); + + SPI_rollback(); + SPI_start_transaction(); + + estate->simple_eval_estate = NULL; + plpgsql_create_econtext(estate); + + return PLPGSQL_RC_OK; +} + +/* + * exec_stmt_set + * + * Execute SET/RESET statement. + * + * We just parse and execute the statement normally, but we have to do it + * without setting a snapshot, for things like SET TRANSACTION. + */ +static int +exec_stmt_set(PLpgSQL_execstate *estate, PLpgSQL_stmt_set *stmt) +{ + PLpgSQL_expr *expr = stmt->expr; + int rc; + + if (expr->plan == NULL) + { + exec_prepare_plan(estate, expr, 0, true); + expr->plan->no_snapshots = true; + } + + rc = SPI_execute_plan(expr->plan, NULL, NULL, estate->readonly_func, 0); + + if (rc != SPI_OK_UTILITY) + elog(ERROR, "SPI_execute_plan failed executing query \"%s\": %s", + expr->query, SPI_result_code_string(rc)); + + return PLPGSQL_RC_OK; +} + +/* ---------- + * exec_assign_expr Put an expression's result into a variable. + * ---------- + */ +static void +exec_assign_expr(PLpgSQL_execstate *estate, PLpgSQL_datum *target, + PLpgSQL_expr *expr) +{ + Datum value; + bool isnull; + Oid valtype; + int32 valtypmod; + + /* + * If first time through, create a plan for this expression, and then see + * if we can pass the target variable as a read-write parameter to the + * expression. (This is a bit messy, but it seems cleaner than modifying * the API of exec_eval_expr for the purpose.) */ if (expr->plan == NULL) { - exec_prepare_plan(estate, expr, 0); + exec_prepare_plan(estate, expr, 0, true); if (target->dtype == PLPGSQL_DTYPE_VAR) exec_check_rw_parameter(expr, target->dno); } @@ -4426,6 +4917,7 @@ exec_assign_value(PLpgSQL_execstate *estate, switch (target->dtype) { case PLPGSQL_DTYPE_VAR: + case PLPGSQL_DTYPE_PROMISE: { /* * Target is a variable @@ -4469,7 +4961,7 @@ exec_assign_value(PLpgSQL_execstate *estate, { /* array and not already R/W, so apply expand_array */ newvalue = expand_array(newvalue, - CurrentMemoryContext, + estate->datum_context, NULL); } else @@ -4488,10 +4980,16 @@ exec_assign_value(PLpgSQL_execstate *estate, * cannot reliably be made any earlier; we have to be looking * at the object's standard R/W pointer to be sure pointer * equality is meaningful. + * + * Also, if it's a promise variable, we should disarm the + * promise in any case --- otherwise, assigning null to an + * armed promise variable would fail to disarm the promise. */ if (var->value != newvalue || var->isnull || isNull) assign_simple_var(estate, var, newvalue, isNull, (!var->datatype->typbyval && !isNull)); + else + var->promise = PLPGSQL_PROMISE_NONE; break; } @@ -4505,7 +5003,8 @@ exec_assign_value(PLpgSQL_execstate *estate, if (isNull) { /* If source is null, just assign nulls to the row */ - exec_move_row(estate, NULL, row, NULL, NULL); + exec_move_row(estate, (PLpgSQL_variable *) row, + NULL, NULL); } else { @@ -4514,7 +5013,8 @@ exec_assign_value(PLpgSQL_execstate *estate, ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("cannot assign non-composite value to a row variable"))); - exec_move_row_from_datum(estate, NULL, row, value); + exec_move_row_from_datum(estate, (PLpgSQL_variable *) row, + value); } break; } @@ -4528,8 +5028,15 @@ exec_assign_value(PLpgSQL_execstate *estate, if (isNull) { - /* If source is null, just assign nulls to the record */ - exec_move_row(estate, rec, NULL, NULL, NULL); + if (rec->notnull) + ereport(ERROR, + (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + errmsg("null value cannot be assigned to variable \"%s\" declared NOT NULL", + rec->refname))); + + /* Set variable to a simple NULL */ + exec_move_row(estate, (PLpgSQL_variable *) rec, + NULL, NULL); } else { @@ -4538,7 +5045,8 @@ exec_assign_value(PLpgSQL_execstate *estate, ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("cannot assign non-composite value to a record variable"))); - exec_move_row_from_datum(estate, rec, NULL, value); + exec_move_row_from_datum(estate, (PLpgSQL_variable *) rec, + value); } break; } @@ -4550,64 +5058,58 @@ exec_assign_value(PLpgSQL_execstate *estate, */ PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) target; PLpgSQL_rec *rec; - int fno; - HeapTuple newtup; - int colnums[1]; - Datum values[1]; - bool nulls[1]; - Oid atttype; - int32 atttypmod; + ExpandedRecordHeader *erh; rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]); + erh = rec->erh; /* - * Check that there is already a tuple in the record. We need - * that because records don't have any predefined field - * structure. - */ - if (!HeapTupleIsValid(rec->tup)) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - - /* - * Get the number of the record field to change. Disallow - * system columns because the code below won't cope. + * If record variable is NULL, instantiate it if it has a + * named composite type, else complain. (This won't change + * the logical state of the record, but if we successfully + * assign below, the unassigned fields will all become NULLs.) */ - fno = SPI_fnumber(rec->tupdesc, recfield->fieldname); - if (fno <= 0) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("record \"%s\" has no field \"%s\"", - rec->refname, recfield->fieldname))); - colnums[0] = fno; + if (erh == NULL) + { + instantiate_empty_record_variable(estate, rec); + erh = rec->erh; + } /* - * Now insert the new value, being careful to cast it to the - * right type. + * Look up the field's properties if we have not already, or + * if the tuple descriptor ID changed since last time. */ - atttype = rec->tupdesc->attrs[fno - 1]->atttypid; - atttypmod = rec->tupdesc->attrs[fno - 1]->atttypmod; - values[0] = exec_cast_value(estate, - value, - &isNull, - valtype, - valtypmod, - atttype, - atttypmod); - nulls[0] = isNull; - - newtup = heap_modify_tuple_by_cols(rec->tup, rec->tupdesc, - 1, colnums, values, nulls); - - if (rec->freetup) - heap_freetuple(rec->tup); - - rec->tup = newtup; - rec->freetup = true; + if (unlikely(recfield->rectupledescid != erh->er_tupdesc_id)) + { + if (!expanded_record_lookup_field(erh, + recfield->fieldname, + &recfield->finfo)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("record \"%s\" has no field \"%s\"", + rec->refname, recfield->fieldname))); + recfield->rectupledescid = erh->er_tupdesc_id; + } + /* We don't support assignments to system columns. */ + if (recfield->finfo.fnumber <= 0) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot assign to system column \"%s\"", + recfield->fieldname))); + + /* Cast the new value to the right type, if needed. */ + value = exec_cast_value(estate, + value, + &isNull, + valtype, + valtypmod, + recfield->finfo.ftypeid, + recfield->finfo.ftypmod); + + /* And assign it. */ + expanded_record_set_field(erh, recfield->finfo.fnumber, + value, isNull, !estate->atomic); break; } @@ -4837,6 +5339,12 @@ exec_eval_datum(PLpgSQL_execstate *estate, switch (datum->dtype) { + case PLPGSQL_DTYPE_PROMISE: + /* fulfill promise if needed, then handle like regular var */ + plpgsql_fulfill_promise(estate, (PLpgSQL_var *) datum); + + /* FALL THRU */ + case PLPGSQL_DTYPE_VAR: { PLpgSQL_var *var = (PLpgSQL_var *) datum; @@ -4853,6 +5361,7 @@ exec_eval_datum(PLpgSQL_execstate *estate, PLpgSQL_row *row = (PLpgSQL_row *) datum; HeapTuple tup; + /* We get here if there are multiple OUT parameters */ if (!row->rowtupdesc) /* should not happen */ elog(ERROR, "row variable has no tupdesc"); /* Make sure we have a valid type/typmod setting */ @@ -4873,22 +5382,41 @@ exec_eval_datum(PLpgSQL_execstate *estate, { PLpgSQL_rec *rec = (PLpgSQL_rec *) datum; - if (!HeapTupleIsValid(rec->tup)) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - Assert(rec->tupdesc != NULL); - /* Make sure we have a valid type/typmod setting */ - BlessTupleDesc(rec->tupdesc); - - oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); - *typeid = rec->tupdesc->tdtypeid; - *typetypmod = rec->tupdesc->tdtypmod; - *value = heap_copy_tuple_as_datum(rec->tup, rec->tupdesc); - *isnull = false; - MemoryContextSwitchTo(oldcontext); + if (rec->erh == NULL) + { + /* Treat uninstantiated record as a simple NULL */ + *value = (Datum) 0; + *isnull = true; + /* Report variable's declared type */ + *typeid = rec->rectypeid; + *typetypmod = -1; + } + else + { + if (ExpandedRecordIsEmpty(rec->erh)) + { + /* Empty record is also a NULL */ + *value = (Datum) 0; + *isnull = true; + } + else + { + *value = ExpandedRecordGetDatum(rec->erh); + *isnull = false; + } + if (rec->rectypeid != RECORDOID) + { + /* Report variable's declared type, if not RECORD */ + *typeid = rec->rectypeid; + *typetypmod = -1; + } + else + { + /* Report record's actual type if declared RECORD */ + *typeid = rec->erh->er_typeid; + *typetypmod = rec->erh->er_typmod; + } + } break; } @@ -4896,27 +5424,46 @@ exec_eval_datum(PLpgSQL_execstate *estate, { PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) datum; PLpgSQL_rec *rec; - int fno; + ExpandedRecordHeader *erh; rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]); - if (!HeapTupleIsValid(rec->tup)) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - fno = SPI_fnumber(rec->tupdesc, recfield->fieldname); - if (fno == SPI_ERROR_NOATTRIBUTE) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("record \"%s\" has no field \"%s\"", - rec->refname, recfield->fieldname))); - *typeid = SPI_gettypeid(rec->tupdesc, fno); - if (fno > 0) - *typetypmod = rec->tupdesc->attrs[fno - 1]->atttypmod; - else - *typetypmod = -1; - *value = SPI_getbinval(rec->tup, rec->tupdesc, fno, isnull); + erh = rec->erh; + + /* + * If record variable is NULL, instantiate it if it has a + * named composite type, else complain. (This won't change + * the logical state of the record: it's still NULL.) + */ + if (erh == NULL) + { + instantiate_empty_record_variable(estate, rec); + erh = rec->erh; + } + + /* + * Look up the field's properties if we have not already, or + * if the tuple descriptor ID changed since last time. + */ + if (unlikely(recfield->rectupledescid != erh->er_tupdesc_id)) + { + if (!expanded_record_lookup_field(erh, + recfield->fieldname, + &recfield->finfo)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("record \"%s\" has no field \"%s\"", + rec->refname, recfield->fieldname))); + recfield->rectupledescid = erh->er_tupdesc_id; + } + + /* Report type data. */ + *typeid = recfield->finfo.ftypeid; + *typetypmod = recfield->finfo.ftypmod; + + /* And fetch the field value. */ + *value = expanded_record_get_field(erh, + recfield->finfo.fnumber, + isnull); break; } @@ -4928,10 +5475,8 @@ exec_eval_datum(PLpgSQL_execstate *estate, /* * plpgsql_exec_get_datum_type Get datatype of a PLpgSQL_datum * - * This is the same logic as in exec_eval_datum, except that it can handle - * some cases where exec_eval_datum has to fail; specifically, we may have - * a tupdesc but no row value for a record variable. (This currently can - * happen only for a trigger's NEW/OLD records.) + * This is the same logic as in exec_eval_datum, but we skip acquiring + * the actual value of the variable. Also, needn't support DTYPE_ROW. */ Oid plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate, @@ -4942,6 +5487,7 @@ plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate, switch (datum->dtype) { case PLPGSQL_DTYPE_VAR: + case PLPGSQL_DTYPE_PROMISE: { PLpgSQL_var *var = (PLpgSQL_var *) datum; @@ -4949,31 +5495,20 @@ plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate, break; } - case PLPGSQL_DTYPE_ROW: - { - PLpgSQL_row *row = (PLpgSQL_row *) datum; - - if (!row->rowtupdesc) /* should not happen */ - elog(ERROR, "row variable has no tupdesc"); - /* Make sure we have a valid type/typmod setting */ - BlessTupleDesc(row->rowtupdesc); - typeid = row->rowtupdesc->tdtypeid; - break; - } - case PLPGSQL_DTYPE_REC: { PLpgSQL_rec *rec = (PLpgSQL_rec *) datum; - if (rec->tupdesc == NULL) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - /* Make sure we have a valid type/typmod setting */ - BlessTupleDesc(rec->tupdesc); - typeid = rec->tupdesc->tdtypeid; + if (rec->erh == NULL || rec->rectypeid != RECORDOID) + { + /* Report variable's declared type */ + typeid = rec->rectypeid; + } + else + { + /* Report record's actual type if declared RECORD */ + typeid = rec->erh->er_typeid; + } break; } @@ -4981,22 +5516,34 @@ plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate, { PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) datum; PLpgSQL_rec *rec; - int fno; rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]); - if (rec->tupdesc == NULL) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - fno = SPI_fnumber(rec->tupdesc, recfield->fieldname); - if (fno == SPI_ERROR_NOATTRIBUTE) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("record \"%s\" has no field \"%s\"", - rec->refname, recfield->fieldname))); - typeid = SPI_gettypeid(rec->tupdesc, fno); + + /* + * If record variable is NULL, instantiate it if it has a + * named composite type, else complain. (This won't change + * the logical state of the record: it's still NULL.) + */ + if (rec->erh == NULL) + instantiate_empty_record_variable(estate, rec); + + /* + * Look up the field's properties if we have not already, or + * if the tuple descriptor ID changed since last time. + */ + if (unlikely(recfield->rectupledescid != rec->erh->er_tupdesc_id)) + { + if (!expanded_record_lookup_field(rec->erh, + recfield->fieldname, + &recfield->finfo)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("record \"%s\" has no field \"%s\"", + rec->refname, recfield->fieldname))); + recfield->rectupledescid = rec->erh->er_tupdesc_id; + } + + typeid = recfield->finfo.ftypeid; break; } @@ -5013,7 +5560,8 @@ plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate, * plpgsql_exec_get_datum_type_info Get datatype etc of a PLpgSQL_datum * * An extended version of plpgsql_exec_get_datum_type, which also retrieves the - * typmod and collation of the datum. + * typmod and collation of the datum. Note however that we don't report the + * possibly-mutable typmod of RECORD values, but say -1 always. */ void plpgsql_exec_get_datum_type_info(PLpgSQL_execstate *estate, @@ -5023,6 +5571,7 @@ plpgsql_exec_get_datum_type_info(PLpgSQL_execstate *estate, switch (datum->dtype) { case PLPGSQL_DTYPE_VAR: + case PLPGSQL_DTYPE_PROMISE: { PLpgSQL_var *var = (PLpgSQL_var *) datum; @@ -5032,37 +5581,23 @@ plpgsql_exec_get_datum_type_info(PLpgSQL_execstate *estate, break; } - case PLPGSQL_DTYPE_ROW: - { - PLpgSQL_row *row = (PLpgSQL_row *) datum; - - if (!row->rowtupdesc) /* should not happen */ - elog(ERROR, "row variable has no tupdesc"); - /* Make sure we have a valid type/typmod setting */ - BlessTupleDesc(row->rowtupdesc); - *typeid = row->rowtupdesc->tdtypeid; - /* do NOT return the mutable typmod of a RECORD variable */ - *typmod = -1; - /* composite types are never collatable */ - *collation = InvalidOid; - break; - } - case PLPGSQL_DTYPE_REC: { PLpgSQL_rec *rec = (PLpgSQL_rec *) datum; - if (rec->tupdesc == NULL) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - /* Make sure we have a valid type/typmod setting */ - BlessTupleDesc(rec->tupdesc); - *typeid = rec->tupdesc->tdtypeid; - /* do NOT return the mutable typmod of a RECORD variable */ - *typmod = -1; + if (rec->erh == NULL || rec->rectypeid != RECORDOID) + { + /* Report variable's declared type */ + *typeid = rec->rectypeid; + *typmod = -1; + } + else + { + /* Report record's actual type if declared RECORD */ + *typeid = rec->erh->er_typeid; + /* do NOT return the mutable typmod of a RECORD variable */ + *typmod = -1; + } /* composite types are never collatable */ *collation = InvalidOid; break; @@ -5072,30 +5607,36 @@ plpgsql_exec_get_datum_type_info(PLpgSQL_execstate *estate, { PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) datum; PLpgSQL_rec *rec; - int fno; rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]); - if (rec->tupdesc == NULL) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("record \"%s\" is not assigned yet", - rec->refname), - errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - fno = SPI_fnumber(rec->tupdesc, recfield->fieldname); - if (fno == SPI_ERROR_NOATTRIBUTE) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("record \"%s\" has no field \"%s\"", - rec->refname, recfield->fieldname))); - *typeid = SPI_gettypeid(rec->tupdesc, fno); - if (fno > 0) - *typmod = rec->tupdesc->attrs[fno - 1]->atttypmod; - else - *typmod = -1; - if (fno > 0) - *collation = rec->tupdesc->attrs[fno - 1]->attcollation; - else /* no system column types have collation */ - *collation = InvalidOid; + + /* + * If record variable is NULL, instantiate it if it has a + * named composite type, else complain. (This won't change + * the logical state of the record: it's still NULL.) + */ + if (rec->erh == NULL) + instantiate_empty_record_variable(estate, rec); + + /* + * Look up the field's properties if we have not already, or + * if the tuple descriptor ID changed since last time. + */ + if (unlikely(recfield->rectupledescid != rec->erh->er_tupdesc_id)) + { + if (!expanded_record_lookup_field(rec->erh, + recfield->fieldname, + &recfield->finfo)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("record \"%s\" has no field \"%s\"", + rec->refname, recfield->fieldname))); + recfield->rectupledescid = rec->erh->er_tupdesc_id; + } + + *typeid = recfield->finfo.ftypeid; + *typmod = recfield->finfo.ftypmod; + *collation = recfield->finfo.fcollation; break; } @@ -5172,12 +5713,13 @@ exec_eval_expr(PLpgSQL_execstate *estate, { Datum result = 0; int rc; + Form_pg_attribute attr; /* * If first time through, create a plan for this expression. */ if (expr->plan == NULL) - exec_prepare_plan(estate, expr, CURSOR_OPT_PARALLEL_OK); + exec_prepare_plan(estate, expr, CURSOR_OPT_PARALLEL_OK, true); /* * If this is a simple expression, bypass SPI and use the executor @@ -5211,8 +5753,9 @@ exec_eval_expr(PLpgSQL_execstate *estate, /* * ... and get the column's datatype. */ - *rettype = estate->eval_tuptable->tupdesc->attrs[0]->atttypid; - *rettypmod = estate->eval_tuptable->tupdesc->attrs[0]->atttypmod; + attr = TupleDescAttr(estate->eval_tuptable->tupdesc, 0); + *rettype = attr->atttypid; + *rettypmod = attr->atttypmod; /* * If there are no rows selected, the result is a NULL of that type. @@ -5262,18 +5805,18 @@ exec_run_select(PLpgSQL_execstate *estate, */ if (expr->plan == NULL) exec_prepare_plan(estate, expr, - portalP == NULL ? CURSOR_OPT_PARALLEL_OK : 0); + portalP == NULL ? CURSOR_OPT_PARALLEL_OK : 0, true); + + /* + * Set up ParamListInfo to pass to executor + */ + paramLI = setup_param_list(estate, expr); /* - * If a portal was requested, put the query into the portal + * If a portal was requested, put the query and paramlist into the portal */ if (portalP != NULL) { - /* - * Set up short-lived ParamListInfo - */ - paramLI = setup_unshared_param_list(estate, expr); - *portalP = SPI_cursor_open_with_paramlist(NULL, expr->plan, paramLI, estate->readonly_func); @@ -5284,11 +5827,6 @@ exec_run_select(PLpgSQL_execstate *estate, return SPI_OK_CURSOR; } - /* - * Set up ParamListInfo to pass to executor - */ - paramLI = setup_param_list(estate, expr); - /* * Execute the query */ @@ -5318,22 +5856,16 @@ static int exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt, Portal portal, bool prefetch_ok) { - PLpgSQL_rec *rec = NULL; - PLpgSQL_row *row = NULL; + PLpgSQL_variable *var; SPITupleTable *tuptab; bool found = false; int rc = PLPGSQL_RC_OK; + uint64 previous_id = INVALID_TUPLEDESC_IDENTIFIER; + bool tupdescs_match = true; uint64 n; - /* - * Determine if we assign to a record or a row - */ - if (stmt->rec != NULL) - rec = (PLpgSQL_rec *) (estate->datums[stmt->rec->dno]); - else if (stmt->row != NULL) - row = (PLpgSQL_row *) (estate->datums[stmt->row->dno]); - else - elog(ERROR, "unsupported target"); + /* Fetch loop variable's datum entry */ + var = (PLpgSQL_variable *) estate->datums[stmt->var->dno]; /* * Make sure the portal doesn't get closed by the user statements we @@ -5356,7 +5888,7 @@ exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt, */ if (n == 0) { - exec_move_row(estate, rec, row, NULL, tuptab->tupdesc); + exec_move_row(estate, var, NULL, tuptab->tupdesc); exec_eval_cleanup(estate); } else @@ -5372,70 +5904,65 @@ exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt, for (i = 0; i < n; i++) { /* - * Assign the tuple to the target - */ - exec_move_row(estate, rec, row, tuptab->vals[i], tuptab->tupdesc); - exec_eval_cleanup(estate); - - /* - * Execute the statements + * Assign the tuple to the target. Here, because we know that all + * loop iterations should be assigning the same tupdesc, we can + * optimize away repeated creations of expanded records with + * identical tupdescs. Testing for changes of er_tupdesc_id is + * reliable even if the loop body contains assignments that + * replace the target's value entirely, because it's assigned from + * a process-global counter. The case where the tupdescs don't + * match could possibly be handled more efficiently than this + * coding does, but it's not clear extra effort is worthwhile. */ - rc = exec_stmts(estate, stmt->body); - - if (rc != PLPGSQL_RC_OK) + if (var->dtype == PLPGSQL_DTYPE_REC) { - if (rc == PLPGSQL_RC_EXIT) - { - if (estate->exitlabel == NULL) - { - /* unlabelled exit, so exit the current loop */ - rc = PLPGSQL_RC_OK; - } - else if (stmt->label != NULL && - strcmp(stmt->label, estate->exitlabel) == 0) - { - /* label matches this loop, so exit loop */ - estate->exitlabel = NULL; - rc = PLPGSQL_RC_OK; - } + PLpgSQL_rec *rec = (PLpgSQL_rec *) var; - /* - * otherwise, we processed a labelled exit that does not - * match the current statement's label, if any; return - * RC_EXIT so that the EXIT continues to recurse upward. - */ + if (rec->erh && + rec->erh->er_tupdesc_id == previous_id && + tupdescs_match) + { + /* Only need to assign a new tuple value */ + expanded_record_set_tuple(rec->erh, tuptab->vals[i], + true, !estate->atomic); } - else if (rc == PLPGSQL_RC_CONTINUE) + else { - if (estate->exitlabel == NULL) - { - /* unlabelled continue, so re-run the current loop */ - rc = PLPGSQL_RC_OK; - continue; - } - else if (stmt->label != NULL && - strcmp(stmt->label, estate->exitlabel) == 0) - { - /* label matches this loop, so re-run loop */ - estate->exitlabel = NULL; - rc = PLPGSQL_RC_OK; - continue; - } + /* + * First time through, or var's tupdesc changed in loop, + * or we have to do it the hard way because type coercion + * is needed. + */ + exec_move_row(estate, var, + tuptab->vals[i], tuptab->tupdesc); /* - * otherwise, we process a labelled continue that does not - * match the current statement's label, if any; return - * RC_CONTINUE so that the CONTINUE will propagate up the - * stack. + * Check to see if physical assignment is OK next time. + * Once the tupdesc comparison has failed once, we don't + * bother rechecking in subsequent loop iterations. */ + if (tupdescs_match) + { + tupdescs_match = + (rec->rectypeid == RECORDOID || + rec->rectypeid == tuptab->tupdesc->tdtypeid || + compatible_tupdescs(tuptab->tupdesc, + expanded_record_get_tupdesc(rec->erh))); + } + previous_id = rec->erh->er_tupdesc_id; } - - /* - * We're aborting the loop. Need a goto to get out of two - * levels of loop... - */ - goto loop_exit; } + else + exec_move_row(estate, var, tuptab->vals[i], tuptab->tupdesc); + + exec_eval_cleanup(estate); + + /* + * Execute the statements + */ + rc = exec_stmts(estate, stmt->body); + + LOOP_RC_PROCESSING(stmt->label, goto loop_exit); } SPI_freetuptable(tuptab); @@ -5474,12 +6001,12 @@ exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt, * a Datum by directly calling ExecEvalExpr(). * * If successful, store results into *result, *isNull, *rettype, *rettypmod - * and return TRUE. If the expression cannot be handled by simple evaluation, - * return FALSE. + * and return true. If the expression cannot be handled by simple evaluation, + * return false. * * Because we only store one execution tree for a simple expression, we * can't handle recursion cases. So, if we see the tree is already busy - * with an evaluation in the current xact, we just return FALSE and let the + * with an evaluation in the current xact, we just return false and let the * caller run the expression the hard way. (Other alternatives such as * creating a new tree for a recursive call either introduce memory leaks, * or add enough bookkeeping to be doubtful wins anyway.) Another case that @@ -5509,7 +6036,6 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate, ExprContext *econtext = estate->eval_econtext; LocalTransactionId curlxid = MyProc->lxid; CachedPlan *cplan; - ParamListInfo paramLI; void *save_setup_arg; MemoryContext oldcontext; @@ -5556,6 +6082,14 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate, *rettype = expr->expr_simple_type; *rettypmod = expr->expr_simple_typmod; + /* + * Set up ParamListInfo to pass to executor. For safety, save and restore + * estate->paramLI->parserSetupArg around our use of the param list. + */ + save_setup_arg = estate->paramLI->parserSetupArg; + + econtext->ecxt_param_list_info = setup_param_list(estate, expr); + /* * Prepare the expression for execution, if it's not been done already in * the current transaction. (This will be forced to happen if we called @@ -5564,7 +6098,9 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate, if (expr->expr_simple_lxid != curlxid) { oldcontext = MemoryContextSwitchTo(estate->simple_eval_estate->es_query_cxt); - expr->expr_simple_state = ExecInitExpr(expr->expr_simple_expr, NULL); + expr->expr_simple_state = + ExecInitExprWithParams(expr->expr_simple_expr, + econtext->ecxt_param_list_info); expr->expr_simple_in_use = false; expr->expr_simple_lxid = curlxid; MemoryContextSwitchTo(oldcontext); @@ -5583,21 +6119,6 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate, PushActiveSnapshot(GetTransactionSnapshot()); } - /* - * Set up ParamListInfo to pass to executor. We need an unshared list if - * it's going to include any R/W expanded-object pointer. For safety, - * save and restore estate->paramLI->parserSetupArg around our use of the - * param list. - */ - save_setup_arg = estate->paramLI->parserSetupArg; - - if (expr->rwparam >= 0) - paramLI = setup_unshared_param_list(estate, expr); - else - paramLI = setup_param_list(estate, expr); - - econtext->ecxt_param_list_info = paramLI; - /* * Mark expression as busy for the duration of the ExecEvalExpr call. */ @@ -5637,35 +6158,17 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate, /* * Create a ParamListInfo to pass to SPI * - * We share a single ParamListInfo array across all SPI calls made from this - * estate, except calls creating cursors, which use setup_unshared_param_list - * (see its comments for reasons why), and calls that pass a R/W expanded - * object pointer. A shared array is generally OK since any given slot in - * the array would need to contain the same current datum value no matter - * which query or expression we're evaluating; but of course that doesn't - * hold when a specific variable is being passed as a R/W pointer, because - * other expressions in the same function probably don't want to do that. - * - * Note that paramLI->parserSetupArg points to the specific PLpgSQL_expr - * being evaluated. This is not an issue for statement-level callers, but - * lower-level callers must save and restore estate->paramLI->parserSetupArg - * just in case there's an active evaluation at an outer call level. + * We use a single ParamListInfo struct for all SPI calls made from this + * estate; it contains no per-param data, just hook functions, so it's + * effectively read-only for SPI. * - * The general plan for passing parameters to SPI is that plain VAR datums - * always have valid images in the shared param list. This is ensured by - * assign_simple_var(), which also marks those params as PARAM_FLAG_CONST, - * allowing the planner to use those values in custom plans. However, non-VAR - * datums cannot conveniently be managed that way. For one thing, they could - * throw errors (for example "no such record field") and we do not want that - * to happen in a part of the expression that might never be evaluated at - * runtime. For another thing, exec_eval_datum() may return short-lived - * values stored in the estate's eval_mcontext, which will not necessarily - * survive to the next SPI operation. And for a third thing, ROW - * and RECFIELD datums' values depend on other datums, and we don't have a - * cheap way to track that. Therefore, param slots for non-VAR datum types - * are always reset here and then filled on-demand by plpgsql_param_fetch(). - * We can save a few cycles by not bothering with the reset loop unless at - * least one such param has actually been filled by plpgsql_param_fetch(). + * An exception from pure read-only-ness is that the parserSetupArg points + * to the specific PLpgSQL_expr being evaluated. This is not an issue for + * statement-level callers, but lower-level callers must save and restore + * estate->paramLI->parserSetupArg just in case there's an active evaluation + * at an outer call level. (A plausible alternative design would be to + * create a ParamListInfo struct for each PLpgSQL_expr, but for the moment + * that seems like a waste of memory.) */ static ParamListInfo setup_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr) @@ -5678,11 +6181,6 @@ setup_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr) */ Assert(expr->plan != NULL); - /* - * Expressions with R/W parameters can't use the shared param list. - */ - Assert(expr->rwparam == -1); - /* * We only need a ParamListInfo if the expression has parameters. In * principle we should test with bms_is_empty(), but we use a not-null @@ -5694,25 +6192,6 @@ setup_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr) /* Use the common ParamListInfo */ paramLI = estate->paramLI; - /* - * If any resettable parameters have been passed to the executor since - * last time, we need to reset those param slots to "invalid", for the - * reasons mentioned in the comment above. - */ - if (estate->params_dirty) - { - Bitmapset *resettable_datums = estate->func->resettable_datums; - int dno = -1; - - while ((dno = bms_next_member(resettable_datums, dno)) >= 0) - { - ParamExternData *prm = ¶mLI->params[dno]; - - prm->ptype = InvalidOid; - } - estate->params_dirty = false; - } - /* * Set up link to active expr where the hook functions can find it. * Callers must save and restore parserSetupArg if there is any chance @@ -5720,12 +6199,6 @@ setup_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr) */ paramLI->parserSetupArg = (void *) expr; - /* - * Allow parameters that aren't needed by this expression to be - * ignored. - */ - paramLI->paramMask = expr->paramnos; - /* * Also make sure this is set before parser hooks need it. There is * no need to save and restore, since the value is always correct once @@ -5746,275 +6219,837 @@ setup_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr) } /* - * Create an unshared, short-lived ParamListInfo to pass to SPI - * - * When creating a cursor, we do not use the shared ParamListInfo array - * but create a short-lived one that will contain only params actually - * referenced by the query. The reason for this is that copyParamList() will - * be used to copy the parameters into cursor-lifespan storage, and we don't - * want it to copy anything that's not used by the specific cursor; that - * could result in uselessly copying some large values. - * - * We also use this for expressions that are passing a R/W object pointer - * to some trusted function. We don't want the R/W pointer to get into the - * shared param list, where it could get passed to some less-trusted function. + * plpgsql_param_fetch paramFetch callback for dynamic parameter fetch * - * The result, if not NULL, is in the estate's eval_mcontext. + * We always use the caller's workspace to construct the returned struct. * - * XXX. Could we use ParamListInfo's new paramMask to avoid creating unshared - * parameter lists? + * Note: this is no longer used during query execution. It is used during + * planning (with speculative == true) and when the ParamListInfo we supply + * to the executor is copied into a cursor portal or transferred to a + * parallel child process. */ -static ParamListInfo -setup_unshared_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr) +static ParamExternData * +plpgsql_param_fetch(ParamListInfo params, + int paramid, bool speculative, + ParamExternData *prm) { - ParamListInfo paramLI; + int dno; + PLpgSQL_execstate *estate; + PLpgSQL_expr *expr; + PLpgSQL_datum *datum; + bool ok = true; + int32 prmtypmod; + + /* paramid's are 1-based, but dnos are 0-based */ + dno = paramid - 1; + Assert(dno >= 0 && dno < params->numParams); + + /* fetch back the hook data */ + estate = (PLpgSQL_execstate *) params->paramFetchArg; + expr = (PLpgSQL_expr *) params->parserSetupArg; + Assert(params->numParams == estate->ndatums); + + /* now we can access the target datum */ + datum = estate->datums[dno]; /* - * We must have created the SPIPlan already (hence, query text has been - * parsed/analyzed at least once); else we cannot rely on expr->paramnos. + * Since copyParamList() or SerializeParamList() will try to materialize + * every single parameter slot, it's important to return a dummy param + * when asked for a datum that's not supposed to be used by this SQL + * expression. Otherwise we risk failures in exec_eval_datum(), or + * copying a lot more data than necessary. */ - Assert(expr->plan != NULL); + if (!bms_is_member(dno, expr->paramnos)) + ok = false; /* - * We only need a ParamListInfo if the expression has parameters. In - * principle we should test with bms_is_empty(), but we use a not-null - * test because it's faster. In current usage bits are never removed from - * expr->paramnos, only added, so this test is correct anyway. + * If the access is speculative, we prefer to return no data rather than + * to fail in exec_eval_datum(). Check the likely failure cases. */ - if (expr->paramnos) + else if (speculative) { - int dno; + switch (datum->dtype) + { + case PLPGSQL_DTYPE_VAR: + case PLPGSQL_DTYPE_PROMISE: + /* always safe */ + break; - /* initialize ParamListInfo with one entry per datum, all invalid */ - paramLI = (ParamListInfo) - eval_mcontext_alloc0(estate, - offsetof(ParamListInfoData, params) + - estate->ndatums * sizeof(ParamExternData)); - paramLI->paramFetch = plpgsql_param_fetch; - paramLI->paramFetchArg = (void *) estate; - paramLI->parserSetup = (ParserSetupHook) plpgsql_parser_setup; - paramLI->parserSetupArg = (void *) expr; - paramLI->numParams = estate->ndatums; - paramLI->paramMask = NULL; + case PLPGSQL_DTYPE_ROW: + /* should be safe in all interesting cases */ + break; - /* - * Instantiate values for "safe" parameters of the expression. We - * could skip this and leave them to be filled by plpgsql_param_fetch; - * but then the values would not be available for query planning, - * since the planner doesn't call the paramFetch hook. - */ - dno = -1; - while ((dno = bms_next_member(expr->paramnos, dno)) >= 0) - { - PLpgSQL_datum *datum = estate->datums[dno]; + case PLPGSQL_DTYPE_REC: + /* always safe (might return NULL, that's fine) */ + break; - if (datum->dtype == PLPGSQL_DTYPE_VAR) - { - PLpgSQL_var *var = (PLpgSQL_var *) datum; - ParamExternData *prm = ¶mLI->params[dno]; + case PLPGSQL_DTYPE_RECFIELD: + { + PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) datum; + PLpgSQL_rec *rec; - if (dno == expr->rwparam) - prm->value = var->value; - else - prm->value = MakeExpandedObjectReadOnly(var->value, - var->isnull, - var->datatype->typlen); - prm->isnull = var->isnull; - prm->pflags = PARAM_FLAG_CONST; - prm->ptype = var->datatype->typoid; - } + rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]); + + /* + * If record variable is NULL, don't risk anything. + */ + if (rec->erh == NULL) + ok = false; + + /* + * Look up the field's properties if we have not already, + * or if the tuple descriptor ID changed since last time. + */ + else if (unlikely(recfield->rectupledescid != rec->erh->er_tupdesc_id)) + { + if (expanded_record_lookup_field(rec->erh, + recfield->fieldname, + &recfield->finfo)) + recfield->rectupledescid = rec->erh->er_tupdesc_id; + else + ok = false; + } + break; + } + + default: + ok = false; + break; } + } - /* - * Also make sure this is set before parser hooks need it. There is - * no need to save and restore, since the value is always correct once - * set. (Should be set already, but let's be sure.) - */ - expr->func = estate->func; + /* Return "no such parameter" if not ok */ + if (!ok) + { + prm->value = (Datum) 0; + prm->isnull = true; + prm->pflags = 0; + prm->ptype = InvalidOid; + return prm; } - else + + /* OK, evaluate the value and store into the return struct */ + exec_eval_datum(estate, datum, + &prm->ptype, &prmtypmod, + &prm->value, &prm->isnull); + /* We can always mark params as "const" for executor's purposes */ + prm->pflags = PARAM_FLAG_CONST; + + /* + * If it's a read/write expanded datum, convert reference to read-only, + * unless it's safe to pass as read-write. + */ + if (dno != expr->rwparam) { - /* - * Expression requires no parameters. Be sure we represent this case - * as a NULL ParamListInfo, so that plancache.c knows there is no - * point in a custom plan. - */ - paramLI = NULL; + if (datum->dtype == PLPGSQL_DTYPE_VAR) + prm->value = MakeExpandedObjectReadOnly(prm->value, + prm->isnull, + ((PLpgSQL_var *) datum)->datatype->typlen); + else if (datum->dtype == PLPGSQL_DTYPE_REC) + prm->value = MakeExpandedObjectReadOnly(prm->value, + prm->isnull, + -1); } - return paramLI; + + return prm; } /* - * plpgsql_param_fetch paramFetch callback for dynamic parameter fetch + * plpgsql_param_compile paramCompile callback for plpgsql parameters */ static void -plpgsql_param_fetch(ParamListInfo params, int paramid) +plpgsql_param_compile(ParamListInfo params, Param *param, + ExprState *state, + Datum *resv, bool *resnull) { - int dno; PLpgSQL_execstate *estate; PLpgSQL_expr *expr; + int dno; PLpgSQL_datum *datum; - ParamExternData *prm; - int32 prmtypmod; + ExprEvalStep scratch; + + /* fetch back the hook data */ + estate = (PLpgSQL_execstate *) params->paramFetchArg; + expr = (PLpgSQL_expr *) params->parserSetupArg; /* paramid's are 1-based, but dnos are 0-based */ - dno = paramid - 1; - Assert(dno >= 0 && dno < params->numParams); + dno = param->paramid - 1; + Assert(dno >= 0 && dno < estate->ndatums); + + /* now we can access the target datum */ + datum = estate->datums[dno]; + + scratch.opcode = EEOP_PARAM_CALLBACK; + scratch.resvalue = resv; + scratch.resnull = resnull; + + /* + * Select appropriate eval function. It seems worth special-casing + * DTYPE_VAR and DTYPE_RECFIELD for performance. Also, we can determine + * in advance whether MakeExpandedObjectReadOnly() will be required. + * Currently, only VAR/PROMISE and REC datums could contain read/write + * expanded objects. + */ + if (datum->dtype == PLPGSQL_DTYPE_VAR) + { + if (dno != expr->rwparam && + ((PLpgSQL_var *) datum)->datatype->typlen == -1) + scratch.d.cparam.paramfunc = plpgsql_param_eval_var_ro; + else + scratch.d.cparam.paramfunc = plpgsql_param_eval_var; + } + else if (datum->dtype == PLPGSQL_DTYPE_RECFIELD) + scratch.d.cparam.paramfunc = plpgsql_param_eval_recfield; + else if (datum->dtype == PLPGSQL_DTYPE_PROMISE) + { + if (dno != expr->rwparam && + ((PLpgSQL_var *) datum)->datatype->typlen == -1) + scratch.d.cparam.paramfunc = plpgsql_param_eval_generic_ro; + else + scratch.d.cparam.paramfunc = plpgsql_param_eval_generic; + } + else if (datum->dtype == PLPGSQL_DTYPE_REC && + dno != expr->rwparam) + scratch.d.cparam.paramfunc = plpgsql_param_eval_generic_ro; + else + scratch.d.cparam.paramfunc = plpgsql_param_eval_generic; + + /* + * Note: it's tempting to use paramarg to store the estate pointer and + * thereby save an indirection or two in the eval functions. But that + * doesn't work because the compiled expression might be used with + * different estates for the same PL/pgSQL function. + */ + scratch.d.cparam.paramarg = NULL; + scratch.d.cparam.paramid = param->paramid; + scratch.d.cparam.paramtype = param->paramtype; + ExprEvalPushStep(state, &scratch); +} + +/* + * plpgsql_param_eval_var evaluation of EEOP_PARAM_CALLBACK step + * + * This is specialized to the case of DTYPE_VAR variables for which + * we do not need to invoke MakeExpandedObjectReadOnly. + */ +static void +plpgsql_param_eval_var(ExprState *state, ExprEvalStep *op, + ExprContext *econtext) +{ + ParamListInfo params; + PLpgSQL_execstate *estate; + int dno = op->d.cparam.paramid - 1; + PLpgSQL_var *var; /* fetch back the hook data */ + params = econtext->ecxt_param_list_info; estate = (PLpgSQL_execstate *) params->paramFetchArg; - expr = (PLpgSQL_expr *) params->parserSetupArg; - Assert(params->numParams == estate->ndatums); + Assert(dno >= 0 && dno < estate->ndatums); + + /* now we can access the target datum */ + var = (PLpgSQL_var *) estate->datums[dno]; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + + /* inlined version of exec_eval_datum() */ + *op->resvalue = var->value; + *op->resnull = var->isnull; + + /* safety check -- an assertion should be sufficient */ + Assert(var->datatype->typoid == op->d.cparam.paramtype); +} + +/* + * plpgsql_param_eval_var_ro evaluation of EEOP_PARAM_CALLBACK step + * + * This is specialized to the case of DTYPE_VAR variables for which + * we need to invoke MakeExpandedObjectReadOnly. + */ +static void +plpgsql_param_eval_var_ro(ExprState *state, ExprEvalStep *op, + ExprContext *econtext) +{ + ParamListInfo params; + PLpgSQL_execstate *estate; + int dno = op->d.cparam.paramid - 1; + PLpgSQL_var *var; + + /* fetch back the hook data */ + params = econtext->ecxt_param_list_info; + estate = (PLpgSQL_execstate *) params->paramFetchArg; + Assert(dno >= 0 && dno < estate->ndatums); + + /* now we can access the target datum */ + var = (PLpgSQL_var *) estate->datums[dno]; + Assert(var->dtype == PLPGSQL_DTYPE_VAR); + + /* + * Inlined version of exec_eval_datum() ... and while we're at it, force + * expanded datums to read-only. + */ + *op->resvalue = MakeExpandedObjectReadOnly(var->value, + var->isnull, + -1); + *op->resnull = var->isnull; + + /* safety check -- an assertion should be sufficient */ + Assert(var->datatype->typoid == op->d.cparam.paramtype); +} + +/* + * plpgsql_param_eval_recfield evaluation of EEOP_PARAM_CALLBACK step + * + * This is specialized to the case of DTYPE_RECFIELD variables, for which + * we never need to invoke MakeExpandedObjectReadOnly. + */ +static void +plpgsql_param_eval_recfield(ExprState *state, ExprEvalStep *op, + ExprContext *econtext) +{ + ParamListInfo params; + PLpgSQL_execstate *estate; + int dno = op->d.cparam.paramid - 1; + PLpgSQL_recfield *recfield; + PLpgSQL_rec *rec; + ExpandedRecordHeader *erh; + + /* fetch back the hook data */ + params = econtext->ecxt_param_list_info; + estate = (PLpgSQL_execstate *) params->paramFetchArg; + Assert(dno >= 0 && dno < estate->ndatums); + + /* now we can access the target datum */ + recfield = (PLpgSQL_recfield *) estate->datums[dno]; + Assert(recfield->dtype == PLPGSQL_DTYPE_RECFIELD); + + /* inline the relevant part of exec_eval_datum */ + rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]); + erh = rec->erh; + + /* + * If record variable is NULL, instantiate it if it has a named composite + * type, else complain. (This won't change the logical state of the + * record: it's still NULL.) + */ + if (erh == NULL) + { + instantiate_empty_record_variable(estate, rec); + erh = rec->erh; + } + + /* + * Look up the field's properties if we have not already, or if the tuple + * descriptor ID changed since last time. + */ + if (unlikely(recfield->rectupledescid != erh->er_tupdesc_id)) + { + if (!expanded_record_lookup_field(erh, + recfield->fieldname, + &recfield->finfo)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("record \"%s\" has no field \"%s\"", + rec->refname, recfield->fieldname))); + recfield->rectupledescid = erh->er_tupdesc_id; + } + + /* OK to fetch the field value. */ + *op->resvalue = expanded_record_get_field(erh, + recfield->finfo.fnumber, + op->resnull); + + /* safety check -- needed for, eg, record fields */ + if (unlikely(recfield->finfo.ftypeid != op->d.cparam.paramtype)) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("type of parameter %d (%s) does not match that when preparing the plan (%s)", + op->d.cparam.paramid, + format_type_be(recfield->finfo.ftypeid), + format_type_be(op->d.cparam.paramtype)))); +} + +/* + * plpgsql_param_eval_generic evaluation of EEOP_PARAM_CALLBACK step + * + * This handles all variable types, but assumes we do not need to invoke + * MakeExpandedObjectReadOnly. + */ +static void +plpgsql_param_eval_generic(ExprState *state, ExprEvalStep *op, + ExprContext *econtext) +{ + ParamListInfo params; + PLpgSQL_execstate *estate; + int dno = op->d.cparam.paramid - 1; + PLpgSQL_datum *datum; + Oid datumtype; + int32 datumtypmod; + + /* fetch back the hook data */ + params = econtext->ecxt_param_list_info; + estate = (PLpgSQL_execstate *) params->paramFetchArg; + Assert(dno >= 0 && dno < estate->ndatums); /* now we can access the target datum */ datum = estate->datums[dno]; + /* fetch datum's value */ + exec_eval_datum(estate, datum, + &datumtype, &datumtypmod, + op->resvalue, op->resnull); + + /* safety check -- needed for, eg, record fields */ + if (unlikely(datumtype != op->d.cparam.paramtype)) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("type of parameter %d (%s) does not match that when preparing the plan (%s)", + op->d.cparam.paramid, + format_type_be(datumtype), + format_type_be(op->d.cparam.paramtype)))); +} + +/* + * plpgsql_param_eval_generic_ro evaluation of EEOP_PARAM_CALLBACK step + * + * This handles all variable types, but assumes we need to invoke + * MakeExpandedObjectReadOnly (hence, variable must be of a varlena type). + */ +static void +plpgsql_param_eval_generic_ro(ExprState *state, ExprEvalStep *op, + ExprContext *econtext) +{ + ParamListInfo params; + PLpgSQL_execstate *estate; + int dno = op->d.cparam.paramid - 1; + PLpgSQL_datum *datum; + Oid datumtype; + int32 datumtypmod; + + /* fetch back the hook data */ + params = econtext->ecxt_param_list_info; + estate = (PLpgSQL_execstate *) params->paramFetchArg; + Assert(dno >= 0 && dno < estate->ndatums); + + /* now we can access the target datum */ + datum = estate->datums[dno]; + + /* fetch datum's value */ + exec_eval_datum(estate, datum, + &datumtype, &datumtypmod, + op->resvalue, op->resnull); + + /* safety check -- needed for, eg, record fields */ + if (unlikely(datumtype != op->d.cparam.paramtype)) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("type of parameter %d (%s) does not match that when preparing the plan (%s)", + op->d.cparam.paramid, + format_type_be(datumtype), + format_type_be(op->d.cparam.paramtype)))); + + /* force the value to read-only */ + *op->resvalue = MakeExpandedObjectReadOnly(*op->resvalue, + *op->resnull, + -1); +} + + +/* + * exec_move_row Move one tuple's values into a record or row + * + * tup and tupdesc may both be NULL if we're just assigning an indeterminate + * composite NULL to the target. Alternatively, can have tup be NULL and + * tupdesc not NULL, in which case we assign a row of NULLs to the target. + * + * Since this uses the mcontext for workspace, caller should eventually call + * exec_eval_cleanup to prevent long-term memory leaks. + */ +static void +exec_move_row(PLpgSQL_execstate *estate, + PLpgSQL_variable *target, + HeapTuple tup, TupleDesc tupdesc) +{ + ExpandedRecordHeader *newerh = NULL; + /* - * Since copyParamList() or SerializeParamList() will try to materialize - * every single parameter slot, it's important to do nothing when asked - * for a datum that's not supposed to be used by this SQL expression. - * Otherwise we risk failures in exec_eval_datum(), or copying a lot more - * data than necessary. + * If target is RECORD, we may be able to avoid field-by-field processing. */ - if (!bms_is_member(dno, expr->paramnos)) - return; + if (target->dtype == PLPGSQL_DTYPE_REC) + { + PLpgSQL_rec *rec = (PLpgSQL_rec *) target; + + /* + * If we have no source tupdesc, just set the record variable to NULL. + * (If we have a source tupdesc but not a tuple, we'll set the + * variable to a row of nulls, instead. This is odd perhaps, but + * backwards compatible.) + */ + if (tupdesc == NULL) + { + if (rec->datatype && + rec->datatype->typtype == TYPTYPE_DOMAIN) + { + /* + * If it's a composite domain, NULL might not be a legal + * value, so we instead need to make an empty expanded record + * and ensure that domain type checking gets done. If there + * is already an expanded record, piggyback on its lookups. + */ + newerh = make_expanded_record_for_rec(estate, rec, + NULL, rec->erh); + expanded_record_set_tuple(newerh, NULL, false, false); + assign_record_var(estate, rec, newerh); + } + else + { + /* Just clear it to NULL */ + if (rec->erh) + DeleteExpandedObject(ExpandedRecordGetDatum(rec->erh)); + rec->erh = NULL; + } + return; + } + + /* + * Build a new expanded record with appropriate tupdesc. + */ + newerh = make_expanded_record_for_rec(estate, rec, tupdesc, NULL); + + /* + * If the rowtypes match, or if we have no tuple anyway, we can + * complete the assignment without field-by-field processing. + * + * The tests here are ordered more or less in order of cheapness. We + * can easily detect it will work if the target is declared RECORD or + * has the same typeid as the source. But when assigning from a query + * result, it's common to have a source tupdesc that's labeled RECORD + * but is actually physically compatible with a named-composite-type + * target, so it's worth spending extra cycles to check for that. + */ + if (rec->rectypeid == RECORDOID || + rec->rectypeid == tupdesc->tdtypeid || + !HeapTupleIsValid(tup) || + compatible_tupdescs(tupdesc, expanded_record_get_tupdesc(newerh))) + { + if (!HeapTupleIsValid(tup)) + { + /* No data, so force the record into all-nulls state */ + deconstruct_expanded_record(newerh); + } + else + { + /* No coercion is needed, so just assign the row value */ + expanded_record_set_tuple(newerh, tup, true, !estate->atomic); + } + + /* Complete the assignment */ + assign_record_var(estate, rec, newerh); + + return; + } + } + + /* + * Otherwise, deconstruct the tuple and do field-by-field assignment, + * using exec_move_row_from_fields. + */ + if (tupdesc && HeapTupleIsValid(tup)) + { + int td_natts = tupdesc->natts; + Datum *values; + bool *nulls; + Datum values_local[64]; + bool nulls_local[64]; + + /* + * Need workspace arrays. If td_natts is small enough, use local + * arrays to save doing a palloc. Even if it's not small, we can + * allocate both the Datum and isnull arrays in one palloc chunk. + */ + if (td_natts <= lengthof(values_local)) + { + values = values_local; + nulls = nulls_local; + } + else + { + char *chunk; + + chunk = eval_mcontext_alloc(estate, + td_natts * (sizeof(Datum) + sizeof(bool))); + values = (Datum *) chunk; + nulls = (bool *) (chunk + td_natts * sizeof(Datum)); + } + + heap_deform_tuple(tup, tupdesc, values, nulls); + + exec_move_row_from_fields(estate, target, newerh, + values, nulls, tupdesc); + } + else + { + /* + * Assign all-nulls. + */ + exec_move_row_from_fields(estate, target, newerh, + NULL, NULL, NULL); + } +} + +/* + * Build an expanded record object suitable for assignment to "rec". + * + * Caller must supply either a source tuple descriptor or a source expanded + * record (not both). If the record variable has declared type RECORD, + * it'll adopt the source's rowtype. Even if it doesn't, we may be able to + * piggyback on a source expanded record to save a typcache lookup. + * + * Caller must fill the object with data, then do assign_record_var(). + * + * The new record is initially put into the mcontext, so it will be cleaned up + * if we fail before reaching assign_record_var(). + */ +static ExpandedRecordHeader * +make_expanded_record_for_rec(PLpgSQL_execstate *estate, + PLpgSQL_rec *rec, + TupleDesc srctupdesc, + ExpandedRecordHeader *srcerh) +{ + ExpandedRecordHeader *newerh; + MemoryContext mcontext = get_eval_mcontext(estate); - if (params == estate->paramLI) + if (rec->rectypeid != RECORDOID) { /* - * We need to mark the shared params array dirty if we're about to - * evaluate a resettable datum. + * New record must be of desired type, but maybe srcerh has already + * done all the same lookups. */ - switch (datum->dtype) + if (srcerh && rec->rectypeid == srcerh->er_decltypeid) + newerh = make_expanded_record_from_exprecord(srcerh, + mcontext); + else + newerh = make_expanded_record_from_typeid(rec->rectypeid, -1, + mcontext); + } + else + { + /* + * We'll adopt the input tupdesc. We can still use + * make_expanded_record_from_exprecord, if srcerh isn't a composite + * domain. (If it is, we effectively adopt its base type.) + */ + if (srcerh && !ExpandedRecordIsDomain(srcerh)) + newerh = make_expanded_record_from_exprecord(srcerh, + mcontext); + else { - case PLPGSQL_DTYPE_ROW: - case PLPGSQL_DTYPE_REC: - case PLPGSQL_DTYPE_RECFIELD: - estate->params_dirty = true; - break; - - default: - break; + if (!srctupdesc) + srctupdesc = expanded_record_get_tupdesc(srcerh); + newerh = make_expanded_record_from_tupdesc(srctupdesc, + mcontext); } } - /* OK, evaluate the value and store into the appropriate paramlist slot */ - prm = ¶ms->params[dno]; - exec_eval_datum(estate, datum, - &prm->ptype, &prmtypmod, - &prm->value, &prm->isnull); - /* We can always mark params as "const" for executor's purposes */ - prm->pflags = PARAM_FLAG_CONST; - - /* - * If it's a read/write expanded datum, convert reference to read-only, - * unless it's safe to pass as read-write. - */ - if (datum->dtype == PLPGSQL_DTYPE_VAR && dno != expr->rwparam) - prm->value = MakeExpandedObjectReadOnly(prm->value, - prm->isnull, - ((PLpgSQL_var *) datum)->datatype->typlen); + return newerh; } - -/* ---------- - * exec_move_row Move one tuple's values into a record or row +/* + * exec_move_row_from_fields Move arrays of field values into a record or row + * + * When assigning to a record, the caller must have already created a suitable + * new expanded record object, newerh. Pass NULL when assigning to a row. * - * Since this uses exec_assign_value, caller should eventually call + * tupdesc describes the input row, which might have different column + * types and/or different dropped-column positions than the target. + * values/nulls/tupdesc can all be NULL if we just want to assign nulls to + * all fields of the record or row. + * + * Since this uses the mcontext for workspace, caller should eventually call * exec_eval_cleanup to prevent long-term memory leaks. - * ---------- */ static void -exec_move_row(PLpgSQL_execstate *estate, - PLpgSQL_rec *rec, - PLpgSQL_row *row, - HeapTuple tup, TupleDesc tupdesc) +exec_move_row_from_fields(PLpgSQL_execstate *estate, + PLpgSQL_variable *target, + ExpandedRecordHeader *newerh, + Datum *values, bool *nulls, + TupleDesc tupdesc) { + int td_natts = tupdesc ? tupdesc->natts : 0; + int fnum; + int anum; + int strict_multiassignment_level = 0; + /* - * Record is simple - just copy the tuple and its descriptor into the - * record variable + * The extra check strict strict_multi_assignment can be active, + * only when input tupdesc is specified. */ - if (rec != NULL) + if (tupdesc != NULL) + { + if (plpgsql_extra_errors & PLPGSQL_XCHECK_STRICTMULTIASSIGNMENT) + strict_multiassignment_level = ERROR; + else if (plpgsql_extra_warnings & PLPGSQL_XCHECK_STRICTMULTIASSIGNMENT) + strict_multiassignment_level = WARNING; + } + + /* Handle RECORD-target case */ + if (target->dtype == PLPGSQL_DTYPE_REC) { + PLpgSQL_rec *rec = (PLpgSQL_rec *) target; + TupleDesc var_tupdesc; + Datum newvalues_local[64]; + bool newnulls_local[64]; + + Assert(newerh != NULL); /* caller must have built new object */ + + var_tupdesc = expanded_record_get_tupdesc(newerh); + /* - * Copy input first, just in case it is pointing at variable's value + * Coerce field values if needed. This might involve dealing with + * different sets of dropped columns and/or coercing individual column + * types. That's sort of a pain, but historically plpgsql has allowed + * it, so we preserve the behavior. However, it's worth a quick check + * to see if the tupdescs are identical. (Since expandedrecord.c + * prefers to use refcounted tupdescs from the typcache, expanded + * records with the same rowtype will have pointer-equal tupdescs.) */ - if (HeapTupleIsValid(tup)) - tup = heap_copytuple(tup); - else if (tupdesc) + if (var_tupdesc != tupdesc) { - /* If we have a tupdesc but no data, form an all-nulls tuple */ - bool *nulls; + int vtd_natts = var_tupdesc->natts; + Datum *newvalues; + bool *newnulls; - nulls = (bool *) - eval_mcontext_alloc(estate, tupdesc->natts * sizeof(bool)); - memset(nulls, true, tupdesc->natts * sizeof(bool)); + /* + * Need workspace arrays. If vtd_natts is small enough, use local + * arrays to save doing a palloc. Even if it's not small, we can + * allocate both the Datum and isnull arrays in one palloc chunk. + */ + if (vtd_natts <= lengthof(newvalues_local)) + { + newvalues = newvalues_local; + newnulls = newnulls_local; + } + else + { + char *chunk; - tup = heap_form_tuple(tupdesc, NULL, nulls); - } + chunk = eval_mcontext_alloc(estate, + vtd_natts * (sizeof(Datum) + sizeof(bool))); + newvalues = (Datum *) chunk; + newnulls = (bool *) (chunk + vtd_natts * sizeof(Datum)); + } - if (tupdesc) - tupdesc = CreateTupleDescCopy(tupdesc); + /* Walk over destination columns */ + anum = 0; + for (fnum = 0; fnum < vtd_natts; fnum++) + { + Form_pg_attribute attr = TupleDescAttr(var_tupdesc, fnum); + Datum value; + bool isnull; + Oid valtype; + int32 valtypmod; - /* Free the old value ... */ - if (rec->freetup) - { - heap_freetuple(rec->tup); - rec->freetup = false; - } - if (rec->freetupdesc) - { - FreeTupleDesc(rec->tupdesc); - rec->freetupdesc = false; - } + if (attr->attisdropped) + { + /* expanded_record_set_fields should ignore this column */ + continue; /* skip dropped column in record */ + } - /* ... and install the new */ - if (HeapTupleIsValid(tup)) - { - rec->tup = tup; - rec->freetup = true; - } - else - rec->tup = NULL; + while (anum < td_natts && + TupleDescAttr(tupdesc, anum)->attisdropped) + anum++; /* skip dropped column in tuple */ - if (tupdesc) - { - rec->tupdesc = tupdesc; - rec->freetupdesc = true; + if (anum < td_natts) + { + value = values[anum]; + isnull = nulls[anum]; + valtype = TupleDescAttr(tupdesc, anum)->atttypid; + valtypmod = TupleDescAttr(tupdesc, anum)->atttypmod; + anum++; + } + else + { + /* no source for destination column */ + value = (Datum) 0; + isnull = true; + valtype = UNKNOWNOID; + valtypmod = -1; + + /* When source value is missing */ + if (strict_multiassignment_level) + ereport(strict_multiassignment_level, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("number of source and target fields in assignment do not match"), + /* translator: %s represents a name of an extra check */ + errdetail("%s check of %s is active.", + "strict_multi_assignment", + strict_multiassignment_level == ERROR ? "extra_errors" : + "extra_warnings"), + errhint("Make sure the query returns the exact list of columns."))); + } + + /* Cast the new value to the right type, if needed. */ + newvalues[fnum] = exec_cast_value(estate, + value, + &isnull, + valtype, + valtypmod, + attr->atttypid, + attr->atttypmod); + newnulls[fnum] = isnull; + } + + /* + * When strict_multiassignment extra check is active, then ensure + * there are no unassigned source attributes. + */ + if (strict_multiassignment_level && anum < td_natts) + { + /* skip dropped columns in the source descriptor */ + while (anum < td_natts && + TupleDescAttr(tupdesc, anum)->attisdropped) + anum++; + + if (anum < td_natts) + ereport(strict_multiassignment_level, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("number of source and target fields in assignment do not match"), + /* translator: %s represents a name of an extra check */ + errdetail("%s check of %s is active.", + "strict_multi_assignment", + strict_multiassignment_level == ERROR ? "extra_errors" : + "extra_warnings"), + errhint("Make sure the query returns the exact list of columns."))); + } + + values = newvalues; + nulls = newnulls; } - else - rec->tupdesc = NULL; + + /* Insert the coerced field values into the new expanded record */ + expanded_record_set_fields(newerh, values, nulls, !estate->atomic); + + /* Complete the assignment */ + assign_record_var(estate, rec, newerh); return; } + /* newerh should not have been passed in non-RECORD cases */ + Assert(newerh == NULL); + /* - * Row is a bit more complicated in that we assign the individual - * attributes of the tuple to the variables the row points to. + * For a row, we assign the individual field values to the variables the + * row points to. * - * NOTE: this code used to demand row->nfields == - * HeapTupleHeaderGetNatts(tup->t_data), but that's wrong. The tuple - * might have more fields than we expected if it's from an - * inheritance-child table of the current table, or it might have fewer if - * the table has had columns added by ALTER TABLE. Ignore extra columns - * and assume NULL for missing columns, the same as heap_getattr would do. - * We also have to skip over dropped columns in either the source or - * destination. + * NOTE: both this code and the record code above silently ignore extra + * columns in the source and assume NULL for missing columns. This is + * pretty dubious but it's the historical behavior. * - * If we have no tuple data at all, we'll assign NULL to all columns of + * If we have no input data at all, we'll assign NULL to all columns of * the row variable. */ - if (row != NULL) + if (target->dtype == PLPGSQL_DTYPE_ROW) { - int td_natts = tupdesc ? tupdesc->natts : 0; - int t_natts; - int fnum; - int anum; - - if (HeapTupleIsValid(tup)) - t_natts = HeapTupleHeaderGetNatts(tup->t_data); - else - t_natts = 0; + PLpgSQL_row *row = (PLpgSQL_row *) target; anum = 0; for (fnum = 0; fnum < row->nfields; fnum++) @@ -6025,43 +7060,111 @@ exec_move_row(PLpgSQL_execstate *estate, Oid valtype; int32 valtypmod; - if (row->varnos[fnum] < 0) - continue; /* skip dropped column in row struct */ - var = (PLpgSQL_var *) (estate->datums[row->varnos[fnum]]); - while (anum < td_natts && tupdesc->attrs[anum]->attisdropped) + while (anum < td_natts && + TupleDescAttr(tupdesc, anum)->attisdropped) anum++; /* skip dropped column in tuple */ if (anum < td_natts) { - if (anum < t_natts) - value = SPI_getbinval(tup, tupdesc, anum + 1, &isnull); - else - { - value = (Datum) 0; - isnull = true; - } - valtype = tupdesc->attrs[anum]->atttypid; - valtypmod = tupdesc->attrs[anum]->atttypmod; + value = values[anum]; + isnull = nulls[anum]; + valtype = TupleDescAttr(tupdesc, anum)->atttypid; + valtypmod = TupleDescAttr(tupdesc, anum)->atttypmod; anum++; } else { + /* no source for destination column */ value = (Datum) 0; isnull = true; valtype = UNKNOWNOID; valtypmod = -1; + + if (strict_multiassignment_level) + ereport(strict_multiassignment_level, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("number of source and target fields in assignment do not match"), + /* translator: %s represents a name of an extra check */ + errdetail("%s check of %s is active.", + "strict_multi_assignment", + strict_multiassignment_level == ERROR ? "extra_errors" : + "extra_warnings"), + errhint("Make sure the query returns the exact list of columns."))); } exec_assign_value(estate, (PLpgSQL_datum *) var, value, isnull, valtype, valtypmod); } + /* + * When strict_multiassignment extra check is active, ensure there + * are no unassigned source attributes. + */ + if (strict_multiassignment_level && anum < td_natts) + { + while (anum < td_natts && + TupleDescAttr(tupdesc, anum)->attisdropped) + anum++; /* skip dropped column in tuple */ + + if (anum < td_natts) + ereport(strict_multiassignment_level, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("number of source and target fields in assignment do not match"), + /* translator: %s represents a name of an extra check */ + errdetail("%s check of %s is active.", + "strict_multi_assignment", + strict_multiassignment_level == ERROR ? "extra_errors" : + "extra_warnings"), + errhint("Make sure the query returns the exact list of columns."))); + } + return; } - elog(ERROR, "unsupported target"); + elog(ERROR, "unsupported target type: %d", target->dtype); +} + +/* + * compatible_tupdescs: detect whether two tupdescs are physically compatible + * + * TRUE indicates that a tuple satisfying src_tupdesc can be used directly as + * a value for a composite variable using dst_tupdesc. + */ +static bool +compatible_tupdescs(TupleDesc src_tupdesc, TupleDesc dst_tupdesc) +{ + int i; + + /* Possibly we could allow src_tupdesc to have extra columns? */ + if (dst_tupdesc->natts != src_tupdesc->natts) + return false; + + for (i = 0; i < dst_tupdesc->natts; i++) + { + Form_pg_attribute dattr = TupleDescAttr(dst_tupdesc, i); + Form_pg_attribute sattr = TupleDescAttr(src_tupdesc, i); + + if (dattr->attisdropped != sattr->attisdropped) + return false; + if (!dattr->attisdropped) + { + /* Normal columns must match by type and typmod */ + if (dattr->atttypid != sattr->atttypid || + (dattr->atttypmod >= 0 && + dattr->atttypmod != sattr->atttypmod)) + return false; + } + else + { + /* Dropped columns are OK as long as length/alignment match */ + if (dattr->attlen != sattr->attlen || + dattr->attalign != sattr->attalign) + return false; + } + } + return true; } /* ---------- @@ -6095,18 +7198,16 @@ make_tuple_from_row(PLpgSQL_execstate *estate, Oid fieldtypeid; int32 fieldtypmod; - if (tupdesc->attrs[i]->attisdropped) + if (TupleDescAttr(tupdesc, i)->attisdropped) { nulls[i] = true; /* leave the column as null */ continue; } - if (row->varnos[i] < 0) /* should not happen */ - elog(ERROR, "dropped rowtype entry for non-dropped column"); exec_eval_datum(estate, estate->datums[row->varnos[i]], &fieldtypeid, &fieldtypmod, &dvalues[i], &nulls[i]); - if (fieldtypeid != tupdesc->attrs[i]->atttypid) + if (fieldtypeid != TupleDescAttr(tupdesc, i)->atttypid) return NULL; /* XXX should we insist on typmod match, too? */ } @@ -6116,87 +7217,294 @@ make_tuple_from_row(PLpgSQL_execstate *estate, return tuple; } -/* ---------- - * get_tuple_from_datum extract a tuple from a composite Datum - * - * Returns a HeapTuple, freshly palloc'd in caller's context. +/* + * deconstruct_composite_datum extract tuple+tupdesc from composite Datum * - * Note: it's caller's responsibility to be sure value is of composite type. - * ---------- - */ -static HeapTuple -get_tuple_from_datum(Datum value) -{ - HeapTupleHeader td = DatumGetHeapTupleHeader(value); - HeapTupleData tmptup; - - /* Build a temporary HeapTuple control structure */ - tmptup.t_len = HeapTupleHeaderGetDatumLength(td); - ItemPointerSetInvalid(&(tmptup.t_self)); - tmptup.t_tableOid = InvalidOid; - tmptup.t_data = td; - - /* Build a copy and return it */ - return heap_copytuple(&tmptup); -} - -/* ---------- - * get_tupdesc_from_datum get a tuple descriptor for a composite Datum + * The caller must supply a HeapTupleData variable, in which we set up a + * tuple header pointing to the composite datum's body. To make the tuple + * value outlive that variable, caller would need to apply heap_copytuple... + * but current callers only need a short-lived tuple value anyway. * - * Returns a pointer to the TupleDesc of the tuple's rowtype. + * Returns a pointer to the TupleDesc of the datum's rowtype. * Caller is responsible for calling ReleaseTupleDesc when done with it. * * Note: it's caller's responsibility to be sure value is of composite type. - * ---------- + * Also, best to call this in a short-lived context, as it might leak memory. */ static TupleDesc -get_tupdesc_from_datum(Datum value) +deconstruct_composite_datum(Datum value, HeapTupleData *tmptup) { - HeapTupleHeader td = DatumGetHeapTupleHeader(value); + HeapTupleHeader td; Oid tupType; int32 tupTypmod; + /* Get tuple body (note this could involve detoasting) */ + td = DatumGetHeapTupleHeader(value); + + /* Build a temporary HeapTuple control structure */ + tmptup->t_len = HeapTupleHeaderGetDatumLength(td); + ItemPointerSetInvalid(&(tmptup->t_self)); + tmptup->t_tableOid = InvalidOid; + tmptup->t_data = td; + /* Extract rowtype info and find a tupdesc */ tupType = HeapTupleHeaderGetTypeId(td); tupTypmod = HeapTupleHeaderGetTypMod(td); return lookup_rowtype_tupdesc(tupType, tupTypmod); } -/* ---------- +/* * exec_move_row_from_datum Move a composite Datum into a record or row * - * This is equivalent to get_tuple_from_datum() followed by exec_move_row(), - * but we avoid constructing an intermediate physical copy of the tuple. - * ---------- + * This is equivalent to deconstruct_composite_datum() followed by + * exec_move_row(), but we can optimize things if the Datum is an + * expanded-record reference. + * + * Note: it's caller's responsibility to be sure value is of composite type. */ static void exec_move_row_from_datum(PLpgSQL_execstate *estate, - PLpgSQL_rec *rec, - PLpgSQL_row *row, + PLpgSQL_variable *target, Datum value) { - HeapTupleHeader td = DatumGetHeapTupleHeader(value); - Oid tupType; - int32 tupTypmod; - TupleDesc tupdesc; - HeapTupleData tmptup; + /* Check to see if source is an expanded record */ + if (VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(value))) + { + ExpandedRecordHeader *erh = (ExpandedRecordHeader *) DatumGetEOHP(value); + ExpandedRecordHeader *newerh = NULL; - /* Extract rowtype info and find a tupdesc */ - tupType = HeapTupleHeaderGetTypeId(td); - tupTypmod = HeapTupleHeaderGetTypMod(td); - tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); + Assert(erh->er_magic == ER_MAGIC); - /* Build a temporary HeapTuple control structure */ - tmptup.t_len = HeapTupleHeaderGetDatumLength(td); - ItemPointerSetInvalid(&(tmptup.t_self)); - tmptup.t_tableOid = InvalidOid; - tmptup.t_data = td; + /* These cases apply if the target is record not row... */ + if (target->dtype == PLPGSQL_DTYPE_REC) + { + PLpgSQL_rec *rec = (PLpgSQL_rec *) target; + + /* + * If it's the same record already stored in the variable, do + * nothing. This would happen only in silly cases like "r := r", + * but we need some check to avoid possibly freeing the variable's + * live value below. Note that this applies even if what we have + * is a R/O pointer. + */ + if (erh == rec->erh) + return; + + /* + * If we have a R/W pointer, we're allowed to just commandeer + * ownership of the expanded record. If it's of the right type to + * put into the record variable, do that. (Note we don't accept + * an expanded record of a composite-domain type as a RECORD + * value. We'll treat it as the base composite type instead; + * compare logic in make_expanded_record_for_rec.) + */ + if (VARATT_IS_EXTERNAL_EXPANDED_RW(DatumGetPointer(value)) && + (rec->rectypeid == erh->er_decltypeid || + (rec->rectypeid == RECORDOID && + !ExpandedRecordIsDomain(erh)))) + { + assign_record_var(estate, rec, erh); + return; + } + + /* + * If we already have an expanded record object in the target + * variable, and the source record contains a valid tuple + * representation with the right rowtype, then we can skip making + * a new expanded record and just assign the tuple with + * expanded_record_set_tuple. (We can't do the equivalent if we + * have to do field-by-field assignment, since that wouldn't be + * atomic if there's an error.) We consider that there's a + * rowtype match only if it's the same named composite type or + * same registered rowtype; checking for matches of anonymous + * rowtypes would be more expensive than this is worth. + */ + if (rec->erh && + (erh->flags & ER_FLAG_FVALUE_VALID) && + erh->er_typeid == rec->erh->er_typeid && + (erh->er_typeid != RECORDOID || + (erh->er_typmod == rec->erh->er_typmod && + erh->er_typmod >= 0))) + { + expanded_record_set_tuple(rec->erh, erh->fvalue, + true, !estate->atomic); + return; + } + + /* + * Otherwise we're gonna need a new expanded record object. Make + * it here in hopes of piggybacking on the source object's + * previous typcache lookup. + */ + newerh = make_expanded_record_for_rec(estate, rec, NULL, erh); + + /* + * If the expanded record contains a valid tuple representation, + * and we don't need rowtype conversion, then just copying the + * tuple is probably faster than field-by-field processing. (This + * isn't duplicative of the previous check, since here we will + * catch the case where the record variable was previously empty.) + */ + if ((erh->flags & ER_FLAG_FVALUE_VALID) && + (rec->rectypeid == RECORDOID || + rec->rectypeid == erh->er_typeid)) + { + expanded_record_set_tuple(newerh, erh->fvalue, + true, !estate->atomic); + assign_record_var(estate, rec, newerh); + return; + } + + /* + * Need to special-case empty source record, else code below would + * leak newerh. + */ + if (ExpandedRecordIsEmpty(erh)) + { + /* Set newerh to a row of NULLs */ + deconstruct_expanded_record(newerh); + assign_record_var(estate, rec, newerh); + return; + } + } /* end of record-target-only cases */ + + /* + * If the source expanded record is empty, we should treat that like a + * NULL tuple value. (We're unlikely to see such a case, but we must + * check this; deconstruct_expanded_record would cause a change of + * logical state, which is not OK.) + */ + if (ExpandedRecordIsEmpty(erh)) + { + exec_move_row(estate, target, NULL, + expanded_record_get_tupdesc(erh)); + return; + } + + /* + * Otherwise, ensure that the source record is deconstructed, and + * assign from its field values. + */ + deconstruct_expanded_record(erh); + exec_move_row_from_fields(estate, target, newerh, + erh->dvalues, erh->dnulls, + expanded_record_get_tupdesc(erh)); + } + else + { + /* + * Nope, we've got a plain composite Datum. Deconstruct it; but we + * don't use deconstruct_composite_datum(), because we may be able to + * skip calling lookup_rowtype_tupdesc(). + */ + HeapTupleHeader td; + HeapTupleData tmptup; + Oid tupType; + int32 tupTypmod; + TupleDesc tupdesc; + MemoryContext oldcontext; + + /* Ensure that any detoasted data winds up in the eval_mcontext */ + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); + /* Get tuple body (note this could involve detoasting) */ + td = DatumGetHeapTupleHeader(value); + MemoryContextSwitchTo(oldcontext); + + /* Build a temporary HeapTuple control structure */ + tmptup.t_len = HeapTupleHeaderGetDatumLength(td); + ItemPointerSetInvalid(&(tmptup.t_self)); + tmptup.t_tableOid = InvalidOid; + tmptup.t_data = td; + + /* Extract rowtype info */ + tupType = HeapTupleHeaderGetTypeId(td); + tupTypmod = HeapTupleHeaderGetTypMod(td); + + /* Now, if the target is record not row, maybe we can optimize ... */ + if (target->dtype == PLPGSQL_DTYPE_REC) + { + PLpgSQL_rec *rec = (PLpgSQL_rec *) target; + + /* + * If we already have an expanded record object in the target + * variable, and the source datum has a matching rowtype, then we + * can skip making a new expanded record and just assign the tuple + * with expanded_record_set_tuple. We consider that there's a + * rowtype match only if it's the same named composite type or + * same registered rowtype. (Checking to reject an anonymous + * rowtype here should be redundant, but let's be safe.) + */ + if (rec->erh && + tupType == rec->erh->er_typeid && + (tupType != RECORDOID || + (tupTypmod == rec->erh->er_typmod && + tupTypmod >= 0))) + { + expanded_record_set_tuple(rec->erh, &tmptup, + true, !estate->atomic); + return; + } + + /* + * If the source datum has a rowtype compatible with the target + * variable, just build a new expanded record and assign the tuple + * into it. Using make_expanded_record_from_typeid() here saves + * one typcache lookup compared to the code below. + */ + if (rec->rectypeid == RECORDOID || rec->rectypeid == tupType) + { + ExpandedRecordHeader *newerh; + MemoryContext mcontext = get_eval_mcontext(estate); + + newerh = make_expanded_record_from_typeid(tupType, tupTypmod, + mcontext); + expanded_record_set_tuple(newerh, &tmptup, + true, !estate->atomic); + assign_record_var(estate, rec, newerh); + return; + } + + /* + * Otherwise, we're going to need conversion, so fall through to + * do it the hard way. + */ + } + + /* + * ROW target, or unoptimizable RECORD target, so we have to expend a + * lookup to obtain the source datum's tupdesc. + */ + tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); + + /* Do the move */ + exec_move_row(estate, target, &tmptup, tupdesc); + + /* Release tupdesc usage count */ + ReleaseTupleDesc(tupdesc); + } +} + +/* + * If we have not created an expanded record to hold the record variable's + * value, do so. The expanded record will be "empty", so this does not + * change the logical state of the record variable: it's still NULL. + * However, now we'll have a tupdesc with which we can e.g. look up fields. + */ +static void +instantiate_empty_record_variable(PLpgSQL_execstate *estate, PLpgSQL_rec *rec) +{ + Assert(rec->erh == NULL); /* else caller error */ - /* Do the move */ - exec_move_row(estate, rec, row, &tmptup, tupdesc); + /* If declared type is RECORD, we can't instantiate */ + if (rec->rectypeid == RECORDOID) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("record \"%s\" is not assigned yet", rec->refname), + errdetail("The tuple structure of a not-yet-assigned record is indeterminate."))); - /* Release tupdesc usage count */ - ReleaseTupleDesc(tupdesc); + /* OK, do it */ + rec->erh = make_expanded_record_from_typeid(rec->rectypeid, -1, + estate->datum_context); } /* ---------- @@ -6291,7 +7599,7 @@ exec_cast_value(PLpgSQL_execstate *estate, * or NULL if the cast is a mere no-op relabeling. If there's work to be * done, the cast_exprstate field contains an expression evaluation tree * based on a CaseTestExpr input, and the cast_in_use field should be set - * TRUE while executing it. + * true while executing it. * ---------- */ static plpgsql_CastHashEntry * @@ -6570,8 +7878,8 @@ exec_save_simple_expr(PLpgSQL_expr *expr, CachedPlan *cplan) * force_parallel_mode is on, the planner might've stuck a Gather node * atop that. The simplest way to deal with this is to look through the * Gather node. The Gather node's tlist would normally contain a Var - * referencing the child node's output ... but setrefs.c might also have - * copied a Const as-is. + * referencing the child node's output, but it could also be a Param, or + * it could be a Const that setrefs.c copied as-is. */ plan = stmt->planTree; for (;;) @@ -6598,9 +7906,9 @@ exec_save_simple_expr(PLpgSQL_expr *expr, CachedPlan *cplan) /* If setrefs.c copied up a Const, no need to look further */ if (IsA(tle_expr, Const)) break; - /* Otherwise, it better be an outer Var */ - Assert(IsA(tle_expr, Var)); - Assert(((Var *) tle_expr)->varno == OUTER_VAR); + /* Otherwise, it had better be a Param or an outer Var */ + Assert(IsA(tle_expr, Param) ||(IsA(tle_expr, Var) && + ((Var *) tle_expr)->varno == OUTER_VAR)); /* Descend to the child node */ plan = plan->lefttree; } @@ -6767,11 +8075,13 @@ plpgsql_create_econtext(PLpgSQL_execstate *estate) { MemoryContext oldcontext; - Assert(shared_simple_eval_estate == NULL); - oldcontext = MemoryContextSwitchTo(TopTransactionContext); - shared_simple_eval_estate = CreateExecutorState(); + if (shared_simple_eval_estate == NULL) + { + oldcontext = MemoryContextSwitchTo(TopTransactionContext); + shared_simple_eval_estate = CreateExecutorState(); + MemoryContextSwitchTo(oldcontext); + } estate->simple_eval_estate = shared_simple_eval_estate; - MemoryContextSwitchTo(oldcontext); } /* @@ -6833,8 +8143,7 @@ plpgsql_xact_cb(XactEvent event, void *arg) */ if (event == XACT_EVENT_COMMIT || event == XACT_EVENT_PREPARE) { - /* Shouldn't be any econtext stack entries left at commit */ - Assert(simple_econtext_stack == NULL); + simple_econtext_stack = NULL; if (shared_simple_eval_estate) FreeExecutorState(shared_simple_eval_estate); @@ -6878,15 +8187,50 @@ plpgsql_subxact_cb(SubXactEvent event, SubTransactionId mySubid, * assign_simple_var --- assign a new value to any VAR datum. * * This should be the only mechanism for assignment to simple variables, - * lest we forget to update the paramLI image. + * lest we do the release of the old value incorrectly (not to mention + * the detoasting business). */ static void assign_simple_var(PLpgSQL_execstate *estate, PLpgSQL_var *var, Datum newvalue, bool isnull, bool freeable) { - ParamExternData *prm; + Assert(var->dtype == PLPGSQL_DTYPE_VAR || + var->dtype == PLPGSQL_DTYPE_PROMISE); + + /* + * In non-atomic contexts, we do not want to store TOAST pointers in + * variables, because such pointers might become stale after a commit. + * Forcibly detoast in such cases. We don't want to detoast (flatten) + * expanded objects, however; those should be OK across a transaction + * boundary since they're just memory-resident objects. (Elsewhere in + * this module, operations on expanded records likewise need to request + * detoasting of record fields when !estate->atomic. Expanded arrays are + * not a problem since all array entries are always detoasted.) + */ + if (!estate->atomic && !isnull && var->datatype->typlen == -1 && + VARATT_IS_EXTERNAL_NON_EXPANDED(DatumGetPointer(newvalue))) + { + MemoryContext oldcxt; + Datum detoasted; + + /* + * Do the detoasting in the eval_mcontext to avoid long-term leakage + * of whatever memory toast fetching might leak. Then we have to copy + * the detoasted datum to the function's main context, which is a + * pain, but there's little choice. + */ + oldcxt = MemoryContextSwitchTo(get_eval_mcontext(estate)); + detoasted = PointerGetDatum(heap_tuple_fetch_attr((struct varlena *) DatumGetPointer(newvalue))); + MemoryContextSwitchTo(oldcxt); + /* Now's a good time to not leak the input value if it's freeable */ + if (freeable) + pfree(DatumGetPointer(newvalue)); + /* Once we copy the value, it's definitely freeable */ + newvalue = datumCopy(detoasted, false, -1); + freeable = true; + /* Can't clean up eval_mcontext here, but it'll happen before long */ + } - Assert(var->dtype == PLPGSQL_DTYPE_VAR); /* Free the old value if needed */ if (var->freeval) { @@ -6901,15 +8245,13 @@ assign_simple_var(PLpgSQL_execstate *estate, PLpgSQL_var *var, var->value = newvalue; var->isnull = isnull; var->freeval = freeable; - /* And update the image in the common parameter list */ - prm = &estate->paramLI->params[var->dno]; - prm->value = MakeExpandedObjectReadOnly(newvalue, - isnull, - var->datatype->typlen); - prm->isnull = isnull; - /* these might be set already, but let's be sure */ - prm->pflags = PARAM_FLAG_CONST; - prm->ptype = var->datatype->typoid; + + /* + * If it's a promise variable, then either we just assigned the promised + * value, or the user explicitly assigned an overriding value. Either + * way, cancel the promise. + */ + var->promise = PLPGSQL_PROMISE_NONE; } /* @@ -6921,6 +8263,26 @@ assign_text_var(PLpgSQL_execstate *estate, PLpgSQL_var *var, const char *str) assign_simple_var(estate, var, CStringGetTextDatum(str), false, true); } +/* + * assign_record_var --- assign a new value to any REC datum. + */ +static void +assign_record_var(PLpgSQL_execstate *estate, PLpgSQL_rec *rec, + ExpandedRecordHeader *erh) +{ + Assert(rec->dtype == PLPGSQL_DTYPE_REC); + + /* Transfer new record object into datum_context */ + TransferExpandedRecord(erh, estate->datum_context); + + /* Free the old value ... */ + if (rec->erh) + DeleteExpandedObject(ExpandedRecordGetDatum(rec->erh)); + + /* ... and install the new */ + rec->erh = erh; +} + /* * exec_eval_using_params --- evaluate params of USING clause * diff --git a/src/pl/plpgsql/src/pl_funcs.c b/src/pl/plpgsql/src/pl_funcs.c index cd44a8e9a3..b93f866223 100644 --- a/src/pl/plpgsql/src/pl_funcs.c +++ b/src/pl/plpgsql/src/pl_funcs.c @@ -3,7 +3,7 @@ * pl_funcs.c - Misc functions for the PL/pgSQL * procedural language * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -113,7 +113,7 @@ plpgsql_ns_additem(PLpgSQL_nsitem_type itemtype, int itemno, const char *name) * * Note that this only searches for variables, not labels. * - * If localmode is TRUE, only the topmost block level is searched. + * If localmode is true, only the topmost block level is searched. * * name1 must be non-NULL. Pass NULL for name2 and/or name3 if parsing a name * with fewer than three components. @@ -284,6 +284,14 @@ plpgsql_stmt_typename(PLpgSQL_stmt *stmt) return "CLOSE"; case PLPGSQL_STMT_PERFORM: return "PERFORM"; + case PLPGSQL_STMT_CALL: + return ((PLpgSQL_stmt_call *) stmt)->is_call ? "CALL" : "DO"; + case PLPGSQL_STMT_COMMIT: + return "COMMIT"; + case PLPGSQL_STMT_ROLLBACK: + return "ROLLBACK"; + case PLPGSQL_STMT_SET: + return "SET"; } return "unknown"; @@ -363,6 +371,10 @@ static void free_open(PLpgSQL_stmt_open *stmt); static void free_fetch(PLpgSQL_stmt_fetch *stmt); static void free_close(PLpgSQL_stmt_close *stmt); static void free_perform(PLpgSQL_stmt_perform *stmt); +static void free_call(PLpgSQL_stmt_call *stmt); +static void free_commit(PLpgSQL_stmt_commit *stmt); +static void free_rollback(PLpgSQL_stmt_rollback *stmt); +static void free_set(PLpgSQL_stmt_set *stmt); static void free_expr(PLpgSQL_expr *expr); @@ -443,6 +455,18 @@ free_stmt(PLpgSQL_stmt *stmt) case PLPGSQL_STMT_PERFORM: free_perform((PLpgSQL_stmt_perform *) stmt); break; + case PLPGSQL_STMT_CALL: + free_call((PLpgSQL_stmt_call *) stmt); + break; + case PLPGSQL_STMT_COMMIT: + free_commit((PLpgSQL_stmt_commit *) stmt); + break; + case PLPGSQL_STMT_ROLLBACK: + free_rollback((PLpgSQL_stmt_rollback *) stmt); + break; + case PLPGSQL_STMT_SET: + free_set((PLpgSQL_stmt_set *) stmt); + break; default: elog(ERROR, "unrecognized cmd_type: %d", stmt->cmd_type); break; @@ -590,6 +614,28 @@ free_perform(PLpgSQL_stmt_perform *stmt) free_expr(stmt->expr); } +static void +free_call(PLpgSQL_stmt_call *stmt) +{ + free_expr(stmt->expr); +} + +static void +free_commit(PLpgSQL_stmt_commit *stmt) +{ +} + +static void +free_rollback(PLpgSQL_stmt_rollback *stmt) +{ +} + +static void +free_set(PLpgSQL_stmt_set *stmt) +{ + free_expr(stmt->expr); +} + static void free_exit(PLpgSQL_stmt_exit *stmt) { @@ -707,6 +753,7 @@ plpgsql_free_function_memory(PLpgSQL_function *func) switch (d->dtype) { case PLPGSQL_DTYPE_VAR: + case PLPGSQL_DTYPE_PROMISE: { PLpgSQL_var *var = (PLpgSQL_var *) d; @@ -717,6 +764,11 @@ plpgsql_free_function_memory(PLpgSQL_function *func) case PLPGSQL_DTYPE_ROW: break; case PLPGSQL_DTYPE_REC: + { + PLpgSQL_rec *rec = (PLpgSQL_rec *) d; + + free_expr(rec->default_val); + } break; case PLPGSQL_DTYPE_RECFIELD: break; @@ -777,6 +829,10 @@ static void dump_fetch(PLpgSQL_stmt_fetch *stmt); static void dump_cursor_direction(PLpgSQL_stmt_fetch *stmt); static void dump_close(PLpgSQL_stmt_close *stmt); static void dump_perform(PLpgSQL_stmt_perform *stmt); +static void dump_call(PLpgSQL_stmt_call *stmt); +static void dump_commit(PLpgSQL_stmt_commit *stmt); +static void dump_rollback(PLpgSQL_stmt_rollback *stmt); +static void dump_set(PLpgSQL_stmt_set *stmt); static void dump_expr(PLpgSQL_expr *expr); @@ -867,6 +923,18 @@ dump_stmt(PLpgSQL_stmt *stmt) case PLPGSQL_STMT_PERFORM: dump_perform((PLpgSQL_stmt_perform *) stmt); break; + case PLPGSQL_STMT_CALL: + dump_call((PLpgSQL_stmt_call *) stmt); + break; + case PLPGSQL_STMT_COMMIT: + dump_commit((PLpgSQL_stmt_commit *) stmt); + break; + case PLPGSQL_STMT_ROLLBACK: + dump_rollback((PLpgSQL_stmt_rollback *) stmt); + break; + case PLPGSQL_STMT_SET: + dump_set((PLpgSQL_stmt_set *) stmt); + break; default: elog(ERROR, "unrecognized cmd_type: %d", stmt->cmd_type); break; @@ -1062,7 +1130,7 @@ static void dump_fors(PLpgSQL_stmt_fors *stmt) { dump_ind(); - printf("FORS %s ", (stmt->rec != NULL) ? stmt->rec->refname : stmt->row->refname); + printf("FORS %s ", stmt->var->refname); dump_expr(stmt->query); printf("\n"); @@ -1076,7 +1144,7 @@ static void dump_forc(PLpgSQL_stmt_forc *stmt) { dump_ind(); - printf("FORC %s ", stmt->rec->refname); + printf("FORC %s ", stmt->var->refname); printf("curvar=%d\n", stmt->curvar); dump_indent += 2; @@ -1174,15 +1242,11 @@ dump_fetch(PLpgSQL_stmt_fetch *stmt) dump_cursor_direction(stmt); dump_indent += 2; - if (stmt->rec != NULL) - { - dump_ind(); - printf(" target = %d %s\n", stmt->rec->dno, stmt->rec->refname); - } - if (stmt->row != NULL) + if (stmt->target != NULL) { dump_ind(); - printf(" target = %d %s\n", stmt->row->dno, stmt->row->refname); + printf(" target = %d %s\n", + stmt->target->dno, stmt->target->refname); } dump_indent -= 2; } @@ -1243,6 +1307,36 @@ dump_perform(PLpgSQL_stmt_perform *stmt) printf("\n"); } +static void +dump_call(PLpgSQL_stmt_call *stmt) +{ + dump_ind(); + printf("%s expr = ", stmt->is_call ? "CALL" : "DO"); + dump_expr(stmt->expr); + printf("\n"); +} + +static void +dump_commit(PLpgSQL_stmt_commit *stmt) +{ + dump_ind(); + printf("COMMIT\n"); +} + +static void +dump_rollback(PLpgSQL_stmt_rollback *stmt) +{ + dump_ind(); + printf("ROLLBACK\n"); +} + +static void +dump_set(PLpgSQL_stmt_set *stmt) +{ + dump_ind(); + printf("%s\n", stmt->expr->query); +} + static void dump_exit(PLpgSQL_stmt_exit *stmt) { @@ -1420,19 +1514,12 @@ dump_execsql(PLpgSQL_stmt_execsql *stmt) printf("\n"); dump_indent += 2; - if (stmt->rec != NULL) - { - dump_ind(); - printf(" INTO%s target = %d %s\n", - stmt->strict ? " STRICT" : "", - stmt->rec->dno, stmt->rec->refname); - } - if (stmt->row != NULL) + if (stmt->target != NULL) { dump_ind(); printf(" INTO%s target = %d %s\n", stmt->strict ? " STRICT" : "", - stmt->row->dno, stmt->row->refname); + stmt->target->dno, stmt->target->refname); } dump_indent -= 2; } @@ -1446,19 +1533,12 @@ dump_dynexecute(PLpgSQL_stmt_dynexecute *stmt) printf("\n"); dump_indent += 2; - if (stmt->rec != NULL) + if (stmt->target != NULL) { dump_ind(); printf(" INTO%s target = %d %s\n", stmt->strict ? " STRICT" : "", - stmt->rec->dno, stmt->rec->refname); - } - if (stmt->row != NULL) - { - dump_ind(); - printf(" INTO%s target = %d %s\n", - stmt->strict ? " STRICT" : "", - stmt->row->dno, stmt->row->refname); + stmt->target->dno, stmt->target->refname); } if (stmt->params != NIL) { @@ -1485,8 +1565,7 @@ static void dump_dynfors(PLpgSQL_stmt_dynfors *stmt) { dump_ind(); - printf("FORS %s EXECUTE ", - (stmt->rec != NULL) ? stmt->rec->refname : stmt->row->refname); + printf("FORS %s EXECUTE ", stmt->var->refname); dump_expr(stmt->query); printf("\n"); if (stmt->params != NIL) @@ -1557,6 +1636,7 @@ plpgsql_dumptree(PLpgSQL_function *func) switch (d->dtype) { case PLPGSQL_DTYPE_VAR: + case PLPGSQL_DTYPE_PROMISE: { PLpgSQL_var *var = (PLpgSQL_var *) d; @@ -1583,6 +1663,9 @@ plpgsql_dumptree(PLpgSQL_function *func) dump_expr(var->cursor_explicit_expr); printf("\n"); } + if (var->promise != PLPGSQL_PROMISE_NONE) + printf(" PROMISE %d\n", + (int) var->promise); } break; case PLPGSQL_DTYPE_ROW: @@ -1593,15 +1676,26 @@ plpgsql_dumptree(PLpgSQL_function *func) printf("ROW %-16s fields", row->refname); for (i = 0; i < row->nfields; i++) { - if (row->fieldnames[i]) - printf(" %s=var %d", row->fieldnames[i], - row->varnos[i]); + printf(" %s=var %d", row->fieldnames[i], + row->varnos[i]); } printf("\n"); } break; case PLPGSQL_DTYPE_REC: - printf("REC %s\n", ((PLpgSQL_rec *) d)->refname); + printf("REC %-16s typoid %u\n", + ((PLpgSQL_rec *) d)->refname, + ((PLpgSQL_rec *) d)->rectypeid); + if (((PLpgSQL_rec *) d)->isconst) + printf(" CONSTANT\n"); + if (((PLpgSQL_rec *) d)->notnull) + printf(" NOT NULL\n"); + if (((PLpgSQL_rec *) d)->default_val != NULL) + { + printf(" DEFAULT "); + dump_expr(((PLpgSQL_rec *) d)->default_val); + printf("\n"); + } break; case PLPGSQL_DTYPE_RECFIELD: printf("RECFIELD %-16s of REC %d\n", diff --git a/src/pl/plpgsql/src/pl_gram.y b/src/pl/plpgsql/src/pl_gram.y index 94f1f58593..68e399f9cf 100644 --- a/src/pl/plpgsql/src/pl_gram.y +++ b/src/pl/plpgsql/src/pl_gram.y @@ -3,7 +3,7 @@ * * pl_gram.y - Parser for the PL/pgSQL procedural language * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -16,6 +16,7 @@ #include "postgres.h" #include "catalog/namespace.h" +#include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "parser/parser.h" #include "parser/parse_type.h" @@ -90,7 +91,7 @@ static PLpgSQL_stmt *make_case(int location, PLpgSQL_expr *t_expr, List *case_when_list, List *else_stmts); static char *NameOfDatum(PLwdatum *wdatum); static void check_assignable(PLpgSQL_datum *datum, int location); -static void read_into_target(PLpgSQL_rec **rec, PLpgSQL_row **row, +static void read_into_target(PLpgSQL_variable **target, bool *strict); static PLpgSQL_row *read_into_scalar_list(char *initial_name, PLpgSQL_datum *initial_datum, @@ -138,8 +139,7 @@ static void check_raise_parameters(PLpgSQL_stmt_raise *stmt); char *name; int lineno; PLpgSQL_datum *scalar; - PLpgSQL_rec *rec; - PLpgSQL_row *row; + PLpgSQL_datum *row; } forvariable; struct { @@ -197,8 +197,9 @@ static void check_raise_parameters(PLpgSQL_stmt_raise *stmt); %type proc_stmt pl_block %type stmt_assign stmt_if stmt_loop stmt_while stmt_exit %type stmt_return stmt_raise stmt_assert stmt_execsql -%type stmt_dynexecute stmt_for stmt_perform stmt_getdiag +%type stmt_dynexecute stmt_for stmt_perform stmt_call stmt_getdiag %type stmt_open stmt_fetch stmt_move stmt_close stmt_null +%type stmt_commit stmt_rollback stmt_set %type stmt_case stmt_foreach_a %type proc_exceptions @@ -256,11 +257,13 @@ static void check_raise_parameters(PLpgSQL_stmt_raise *stmt); %token K_BACKWARD %token K_BEGIN %token K_BY +%token K_CALL %token K_CASE %token K_CLOSE %token K_COLLATE %token K_COLUMN %token K_COLUMN_NAME +%token K_COMMIT %token K_CONSTANT %token K_CONSTRAINT %token K_CONSTRAINT_NAME @@ -273,6 +276,7 @@ static void check_raise_parameters(PLpgSQL_stmt_raise *stmt); %token K_DEFAULT %token K_DETAIL %token K_DIAGNOSTICS +%token K_DO %token K_DUMP %token K_ELSE %token K_ELSIF @@ -322,15 +326,18 @@ static void check_raise_parameters(PLpgSQL_stmt_raise *stmt); %token K_QUERY %token K_RAISE %token K_RELATIVE +%token K_RESET %token K_RESULT_OID %token K_RETURN %token K_RETURNED_SQLSTATE %token K_REVERSE +%token K_ROLLBACK %token K_ROW_COUNT %token K_ROWTYPE %token K_SCHEMA %token K_SCHEMA_NAME %token K_SCROLL +%token K_SET %token K_SLICE %token K_SQLSTATE %token K_STACKED @@ -503,37 +510,20 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull var = plpgsql_build_variable($1.name, $1.lineno, $3, true); - if ($2) - { - if (var->dtype == PLPGSQL_DTYPE_VAR) - ((PLpgSQL_var *) var)->isconst = $2; - else - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("row or record variable cannot be CONSTANT"), - parser_errposition(@2))); - } - if ($5) - { - if (var->dtype == PLPGSQL_DTYPE_VAR) - ((PLpgSQL_var *) var)->notnull = $5; - else - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("row or record variable cannot be NOT NULL"), - parser_errposition(@4))); + var->isconst = $2; + var->notnull = $5; + var->default_val = $6; - } - if ($6 != NULL) - { - if (var->dtype == PLPGSQL_DTYPE_VAR) - ((PLpgSQL_var *) var)->default_val = $6; - else - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("default value for row or record variable is not supported"), - parser_errposition(@5))); - } + /* + * The combination of NOT NULL without an initializer + * can't work, so let's reject it at compile time. + */ + if (var->notnull && var->default_val == NULL) + ereport(ERROR, + (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + errmsg("variable \"%s\" must have a default value, since it's declared NOT NULL", + var->refname), + parser_errposition(@5))); } | decl_varname K_ALIAS K_FOR decl_aliasitem ';' { @@ -562,7 +552,6 @@ decl_statement : decl_varname decl_const decl_datatype decl_collate decl_notnull curname_def = palloc0(sizeof(PLpgSQL_expr)); - curname_def->dtype = PLPGSQL_DTYPE_EXPR; strcpy(buf, "SELECT "); cp1 = new->refname; cp2 = buf + strlen(buf); @@ -624,6 +613,7 @@ decl_cursor_args : new = palloc0(sizeof(PLpgSQL_row)); new->dtype = PLPGSQL_DTYPE_ROW; + new->refname = "(unnamed row)"; new->lineno = plpgsql_location_to_lineno(@1); new->rowtupdesc = NULL; new->nfields = list_length($2); @@ -634,6 +624,7 @@ decl_cursor_args : foreach (l, $2) { PLpgSQL_variable *arg = (PLpgSQL_variable *) lfirst(l); + Assert(!arg->isconst); new->fieldnames[i] = arg->refname; new->varnos[i] = arg->dno; i++; @@ -886,6 +877,8 @@ proc_stmt : pl_block ';' { $$ = $1; } | stmt_perform { $$ = $1; } + | stmt_call + { $$ = $1; } | stmt_getdiag { $$ = $1; } | stmt_open @@ -898,6 +891,12 @@ proc_stmt : pl_block ';' { $$ = $1; } | stmt_null { $$ = $1; } + | stmt_commit + { $$ = $1; } + | stmt_rollback + { $$ = $1; } + | stmt_set + { $$ = $1; } ; stmt_perform : K_PERFORM expr_until_semi @@ -913,6 +912,35 @@ stmt_perform : K_PERFORM expr_until_semi } ; +stmt_call : K_CALL + { + PLpgSQL_stmt_call *new; + + new = palloc0(sizeof(PLpgSQL_stmt_call)); + new->cmd_type = PLPGSQL_STMT_CALL; + new->lineno = plpgsql_location_to_lineno(@1); + new->expr = read_sql_stmt("CALL "); + new->is_call = true; + + $$ = (PLpgSQL_stmt *)new; + + } + | K_DO + { + /* use the same structures as for CALL, for simplicity */ + PLpgSQL_stmt_call *new; + + new = palloc0(sizeof(PLpgSQL_stmt_call)); + new->cmd_type = PLPGSQL_STMT_CALL; + new->lineno = plpgsql_location_to_lineno(@1); + new->expr = read_sql_stmt("DO "); + new->is_call = false; + + $$ = (PLpgSQL_stmt *)new; + + } + ; + stmt_assign : assign_var assign_operator expr_until_semi { PLpgSQL_stmt_assign *new; @@ -1310,28 +1338,24 @@ for_control : for_variable K_IN new = palloc0(sizeof(PLpgSQL_stmt_dynfors)); new->cmd_type = PLPGSQL_STMT_DYNFORS; - if ($1.rec) + if ($1.row) { - new->rec = $1.rec; - check_assignable((PLpgSQL_datum *) new->rec, @1); - } - else if ($1.row) - { - new->row = $1.row; - check_assignable((PLpgSQL_datum *) new->row, @1); + new->var = (PLpgSQL_variable *) $1.row; + check_assignable($1.row, @1); } else if ($1.scalar) { /* convert single scalar to list */ - new->row = make_scalar_list1($1.name, $1.scalar, - $1.lineno, @1); - /* no need for check_assignable */ + new->var = (PLpgSQL_variable *) + make_scalar_list1($1.name, $1.scalar, + $1.lineno, @1); + /* make_scalar_list1 did check_assignable */ } else { ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("loop variable of loop over rows must be a record or row variable or list of scalar variables"), + errmsg("loop variable of loop over rows must be a record variable or list of scalar variables"), parser_errposition(@1))); } new->query = expr; @@ -1381,9 +1405,12 @@ for_control : for_variable K_IN "LOOP"); /* create loop's private RECORD variable */ - new->rec = plpgsql_build_record($1.name, - $1.lineno, - true); + new->var = (PLpgSQL_variable *) + plpgsql_build_record($1.name, + $1.lineno, + NULL, + RECORDOID, + true); $$ = (PLpgSQL_stmt *) new; } @@ -1504,28 +1531,24 @@ for_control : for_variable K_IN new = palloc0(sizeof(PLpgSQL_stmt_fors)); new->cmd_type = PLPGSQL_STMT_FORS; - if ($1.rec) + if ($1.row) { - new->rec = $1.rec; - check_assignable((PLpgSQL_datum *) new->rec, @1); - } - else if ($1.row) - { - new->row = $1.row; - check_assignable((PLpgSQL_datum *) new->row, @1); + new->var = (PLpgSQL_variable *) $1.row; + check_assignable($1.row, @1); } else if ($1.scalar) { /* convert single scalar to list */ - new->row = make_scalar_list1($1.name, $1.scalar, - $1.lineno, @1); - /* no need for check_assignable */ + new->var = (PLpgSQL_variable *) + make_scalar_list1($1.name, $1.scalar, + $1.lineno, @1); + /* make_scalar_list1 did check_assignable */ } else { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("loop variable of loop over rows must be a record or row variable or list of scalar variables"), + errmsg("loop variable of loop over rows must be a record variable or list of scalar variables"), parser_errposition(@1))); } @@ -1558,32 +1581,26 @@ for_variable : T_DATUM { $$.name = NameOfDatum(&($1)); $$.lineno = plpgsql_location_to_lineno(@1); - if ($1.datum->dtype == PLPGSQL_DTYPE_ROW) + if ($1.datum->dtype == PLPGSQL_DTYPE_ROW || + $1.datum->dtype == PLPGSQL_DTYPE_REC) { $$.scalar = NULL; - $$.rec = NULL; - $$.row = (PLpgSQL_row *) $1.datum; - } - else if ($1.datum->dtype == PLPGSQL_DTYPE_REC) - { - $$.scalar = NULL; - $$.rec = (PLpgSQL_rec *) $1.datum; - $$.row = NULL; + $$.row = $1.datum; } else { int tok; $$.scalar = $1.datum; - $$.rec = NULL; $$.row = NULL; /* check for comma-separated list */ tok = yylex(); plpgsql_push_back_token(tok); if (tok == ',') - $$.row = read_into_scalar_list($$.name, - $$.scalar, - @1); + $$.row = (PLpgSQL_datum *) + read_into_scalar_list($$.name, + $$.scalar, + @1); } } | T_WORD @@ -1593,7 +1610,6 @@ for_variable : T_DATUM $$.name = $1.ident; $$.lineno = plpgsql_location_to_lineno(@1); $$.scalar = NULL; - $$.rec = NULL; $$.row = NULL; /* check for comma-separated list */ tok = yylex(); @@ -1620,15 +1636,10 @@ stmt_foreach_a : opt_loop_label K_FOREACH for_variable foreach_slice K_IN K_ARRA new->expr = $7; new->body = $8.stmts; - if ($3.rec) - { - new->varno = $3.rec->dno; - check_assignable((PLpgSQL_datum *) $3.rec, @3); - } - else if ($3.row) + if ($3.row) { new->varno = $3.row->dno; - check_assignable((PLpgSQL_datum *) $3.row, @3); + check_assignable($3.row, @3); } else if ($3.scalar) { @@ -1981,8 +1992,7 @@ stmt_dynexecute : K_EXECUTE new->query = expr; new->into = false; new->strict = false; - new->rec = NULL; - new->row = NULL; + new->target = NULL; new->params = NIL; /* @@ -1999,7 +2009,7 @@ stmt_dynexecute : K_EXECUTE if (new->into) /* multiple INTO */ yyerror("syntax error"); new->into = true; - read_into_target(&new->rec, &new->row, &new->strict); + read_into_target(&new->target, &new->strict); endtoken = yylex(); } else if (endtoken == K_USING) @@ -2107,11 +2117,10 @@ stmt_open : K_OPEN cursor_variable stmt_fetch : K_FETCH opt_fetch_direction cursor_variable K_INTO { PLpgSQL_stmt_fetch *fetch = $2; - PLpgSQL_rec *rec; - PLpgSQL_row *row; + PLpgSQL_variable *target; /* We have already parsed everything through the INTO keyword */ - read_into_target(&rec, &row, NULL); + read_into_target(&target, NULL); if (yylex() != ';') yyerror("syntax error"); @@ -2127,8 +2136,7 @@ stmt_fetch : K_FETCH opt_fetch_direction cursor_variable K_INTO parser_errposition(@1))); fetch->lineno = plpgsql_location_to_lineno(@1); - fetch->rec = rec; - fetch->row = row; + fetch->target = target; fetch->curvar = $3->dno; fetch->is_move = false; @@ -2174,6 +2182,55 @@ stmt_null : K_NULL ';' } ; +stmt_commit : K_COMMIT ';' + { + PLpgSQL_stmt_commit *new; + + new = palloc(sizeof(PLpgSQL_stmt_commit)); + new->cmd_type = PLPGSQL_STMT_COMMIT; + new->lineno = plpgsql_location_to_lineno(@1); + + $$ = (PLpgSQL_stmt *)new; + } + ; + +stmt_rollback : K_ROLLBACK ';' + { + PLpgSQL_stmt_rollback *new; + + new = palloc(sizeof(PLpgSQL_stmt_rollback)); + new->cmd_type = PLPGSQL_STMT_ROLLBACK; + new->lineno = plpgsql_location_to_lineno(@1); + + $$ = (PLpgSQL_stmt *)new; + } + ; + +stmt_set : K_SET + { + PLpgSQL_stmt_set *new; + + new = palloc0(sizeof(PLpgSQL_stmt_set)); + new->cmd_type = PLPGSQL_STMT_SET; + new->lineno = plpgsql_location_to_lineno(@1); + new->expr = read_sql_stmt("SET "); + + $$ = (PLpgSQL_stmt *)new; + } + | K_RESET + { + PLpgSQL_stmt_set *new; + + new = palloc0(sizeof(PLpgSQL_stmt_set)); + new->cmd_type = PLPGSQL_STMT_SET; + new->lineno = plpgsql_location_to_lineno(@1); + new->expr = read_sql_stmt("RESET "); + + $$ = (PLpgSQL_stmt *)new; + } + ; + + cursor_variable : T_DATUM { /* @@ -2228,7 +2285,7 @@ exception_sect : -1, plpgsql_curr_compile->fn_input_collation), true); - ((PLpgSQL_var *) var)->isconst = true; + var->isconst = true; new->sqlstate_varno = var->dno; var = plpgsql_build_variable("sqlerrm", lineno, @@ -2236,7 +2293,7 @@ exception_sect : -1, plpgsql_curr_compile->fn_input_collation), true); - ((PLpgSQL_var *) var)->isconst = true; + var->isconst = true; new->sqlerrm_varno = var->dno; $$ = new; @@ -2406,10 +2463,12 @@ unreserved_keyword : | K_ARRAY | K_ASSERT | K_BACKWARD + | K_CALL | K_CLOSE | K_COLLATE | K_COLUMN | K_COLUMN_NAME + | K_COMMIT | K_CONSTANT | K_CONSTRAINT | K_CONSTRAINT_NAME @@ -2421,6 +2480,7 @@ unreserved_keyword : | K_DEFAULT | K_DETAIL | K_DIAGNOSTICS + | K_DO | K_DUMP | K_ELSIF | K_ERRCODE @@ -2457,15 +2517,18 @@ unreserved_keyword : | K_QUERY | K_RAISE | K_RELATIVE + | K_RESET | K_RESULT_OID | K_RETURN | K_RETURNED_SQLSTATE | K_REVERSE + | K_ROLLBACK | K_ROW_COUNT | K_ROWTYPE | K_SCHEMA | K_SCHEMA_NAME | K_SCROLL + | K_SET | K_SLICE | K_SQLSTATE | K_STACKED @@ -2685,7 +2748,6 @@ read_sql_construct(int until, } expr = palloc0(sizeof(PLpgSQL_expr)); - expr->dtype = PLPGSQL_DTYPE_EXPR; expr->query = pstrdup(ds.data); expr->plan = NULL; expr->paramnos = NULL; @@ -2842,8 +2904,7 @@ make_execsql_stmt(int firsttoken, int location) IdentifierLookup save_IdentifierLookup; PLpgSQL_stmt_execsql *execsql; PLpgSQL_expr *expr; - PLpgSQL_row *row = NULL; - PLpgSQL_rec *rec = NULL; + PLpgSQL_variable *target = NULL; int tok; int prev_tok; bool have_into = false; @@ -2907,7 +2968,7 @@ make_execsql_stmt(int firsttoken, int location) have_into = true; into_start_loc = yylloc; plpgsql_IdentifierLookup = IDENTIFIER_LOOKUP_NORMAL; - read_into_target(&rec, &row, &have_strict); + read_into_target(&target, &have_strict); plpgsql_IdentifierLookup = IDENTIFIER_LOOKUP_EXPR; } } @@ -2933,7 +2994,6 @@ make_execsql_stmt(int firsttoken, int location) ds.data[--ds.len] = '\0'; expr = palloc0(sizeof(PLpgSQL_expr)); - expr->dtype = PLPGSQL_DTYPE_EXPR; expr->query = pstrdup(ds.data); expr->plan = NULL; expr->paramnos = NULL; @@ -2949,8 +3009,7 @@ make_execsql_stmt(int firsttoken, int location) execsql->sqlstmt = expr; execsql->into = have_into; execsql->strict = have_strict; - execsql->rec = rec; - execsql->row = row; + execsql->target = target; return (PLpgSQL_stmt *) execsql; } @@ -3136,22 +3195,30 @@ make_return_stmt(int location) errhint("Use RETURN NEXT or RETURN QUERY."), parser_errposition(yylloc))); } - else if (plpgsql_curr_compile->out_param_varno >= 0) + else if (plpgsql_curr_compile->fn_rettype == VOIDOID) { if (yylex() != ';') - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("RETURN cannot have a parameter in function with OUT parameters"), - parser_errposition(yylloc))); - new->retvarno = plpgsql_curr_compile->out_param_varno; + { + if (plpgsql_curr_compile->fn_prokind == PROKIND_PROCEDURE) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("RETURN cannot have a parameter in a procedure"), + parser_errposition(yylloc))); + else + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("RETURN cannot have a parameter in function returning void"), + parser_errposition(yylloc))); + } } - else if (plpgsql_curr_compile->fn_rettype == VOIDOID) + else if (plpgsql_curr_compile->out_param_varno >= 0) { if (yylex() != ';') ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("RETURN cannot have a parameter in function returning void"), + errmsg("RETURN cannot have a parameter in function with OUT parameters"), parser_errposition(yylloc))); + new->retvarno = plpgsql_curr_compile->out_param_varno; } else { @@ -3163,6 +3230,7 @@ make_return_stmt(int location) if (tok == T_DATUM && plpgsql_peek() == ';' && (yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_VAR || + yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_PROMISE || yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_ROW || yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_REC)) { @@ -3224,6 +3292,7 @@ make_return_next_stmt(int location) if (tok == T_DATUM && plpgsql_peek() == ';' && (yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_VAR || + yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_PROMISE || yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_ROW || yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_REC)) { @@ -3311,24 +3380,27 @@ check_assignable(PLpgSQL_datum *datum, int location) switch (datum->dtype) { case PLPGSQL_DTYPE_VAR: - if (((PLpgSQL_var *) datum)->isconst) + case PLPGSQL_DTYPE_PROMISE: + case PLPGSQL_DTYPE_REC: + if (((PLpgSQL_variable *) datum)->isconst) ereport(ERROR, (errcode(ERRCODE_ERROR_IN_ASSIGNMENT), - errmsg("\"%s\" is declared CONSTANT", - ((PLpgSQL_var *) datum)->refname), + errmsg("variable \"%s\" is declared CONSTANT", + ((PLpgSQL_variable *) datum)->refname), parser_errposition(location))); break; case PLPGSQL_DTYPE_ROW: - /* always assignable? */ - break; - case PLPGSQL_DTYPE_REC: - /* always assignable? What about NEW/OLD? */ + /* always assignable; member vars were checked at compile time */ break; case PLPGSQL_DTYPE_RECFIELD: - /* always assignable? */ + /* assignable if parent record is */ + check_assignable(plpgsql_Datums[((PLpgSQL_recfield *) datum)->recparentno], + location); break; case PLPGSQL_DTYPE_ARRAYELEM: - /* always assignable? */ + /* assignable if parent array is */ + check_assignable(plpgsql_Datums[((PLpgSQL_arrayelem *) datum)->arrayparentno], + location); break; default: elog(ERROR, "unrecognized dtype: %d", datum->dtype); @@ -3341,13 +3413,12 @@ check_assignable(PLpgSQL_datum *datum, int location) * INTO keyword. */ static void -read_into_target(PLpgSQL_rec **rec, PLpgSQL_row **row, bool *strict) +read_into_target(PLpgSQL_variable **target, bool *strict) { int tok; /* Set default results */ - *rec = NULL; - *row = NULL; + *target = NULL; if (strict) *strict = false; @@ -3368,34 +3439,24 @@ read_into_target(PLpgSQL_rec **rec, PLpgSQL_row **row, bool *strict) switch (tok) { case T_DATUM: - if (yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_ROW) - { - check_assignable(yylval.wdatum.datum, yylloc); - *row = (PLpgSQL_row *) yylval.wdatum.datum; - - if ((tok = yylex()) == ',') - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("record or row variable cannot be part of multiple-item INTO list"), - parser_errposition(yylloc))); - plpgsql_push_back_token(tok); - } - else if (yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_REC) + if (yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_ROW || + yylval.wdatum.datum->dtype == PLPGSQL_DTYPE_REC) { check_assignable(yylval.wdatum.datum, yylloc); - *rec = (PLpgSQL_rec *) yylval.wdatum.datum; + *target = (PLpgSQL_variable *) yylval.wdatum.datum; if ((tok = yylex()) == ',') ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("record or row variable cannot be part of multiple-item INTO list"), + errmsg("record variable cannot be part of multiple-item INTO list"), parser_errposition(yylloc))); plpgsql_push_back_token(tok); } else { - *row = read_into_scalar_list(NameOfDatum(&(yylval.wdatum)), - yylval.wdatum.datum, yylloc); + *target = (PLpgSQL_variable *) + read_into_scalar_list(NameOfDatum(&(yylval.wdatum)), + yylval.wdatum.datum, yylloc); } break; @@ -3464,9 +3525,9 @@ read_into_scalar_list(char *initial_name, */ plpgsql_push_back_token(tok); - row = palloc(sizeof(PLpgSQL_row)); + row = palloc0(sizeof(PLpgSQL_row)); row->dtype = PLPGSQL_DTYPE_ROW; - row->refname = pstrdup("*internal*"); + row->refname = "(unnamed row)"; row->lineno = plpgsql_location_to_lineno(initial_location); row->rowtupdesc = NULL; row->nfields = nfields; @@ -3499,9 +3560,9 @@ make_scalar_list1(char *initial_name, check_assignable(initial_datum, location); - row = palloc(sizeof(PLpgSQL_row)); + row = palloc0(sizeof(PLpgSQL_row)); row->dtype = PLPGSQL_DTYPE_ROW; - row->refname = pstrdup("*internal*"); + row->refname = "(unnamed row)"; row->lineno = lineno; row->rowtupdesc = NULL; row->nfields = 1; @@ -3817,7 +3878,6 @@ read_cursor_args(PLpgSQL_var *cursor, int until, const char *expected) appendStringInfoChar(&ds, ';'); expr = palloc0(sizeof(PLpgSQL_expr)); - expr->dtype = PLPGSQL_DTYPE_EXPR; expr->query = pstrdup(ds.data); expr->plan = NULL; expr->paramnos = NULL; diff --git a/src/pl/plpgsql/src/pl_handler.c b/src/pl/plpgsql/src/pl_handler.c index 1ebb7a7b5e..7d3647a12d 100644 --- a/src/pl/plpgsql/src/pl_handler.c +++ b/src/pl/plpgsql/src/pl_handler.c @@ -3,7 +3,7 @@ * pl_handler.c - Handler for the PL/pgSQL * procedural language * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -92,6 +92,10 @@ plpgsql_extra_checks_check_hook(char **newvalue, void **extra, GucSource source) if (pg_strcasecmp(tok, "shadowed_variables") == 0) extrachecks |= PLPGSQL_XCHECK_SHADOWVAR; + else if (pg_strcasecmp(tok, "too_many_rows") == 0) + extrachecks |= PLPGSQL_XCHECK_TOOMANYROWS; + else if (pg_strcasecmp(tok, "strict_multi_assignment") == 0) + extrachecks |= PLPGSQL_XCHECK_STRICTMULTIASSIGNMENT; else if (pg_strcasecmp(tok, "all") == 0 || pg_strcasecmp(tok, "none") == 0) { GUC_check_errdetail("Key word \"%s\" cannot be combined with other key words.", tok); @@ -219,15 +223,20 @@ PG_FUNCTION_INFO_V1(plpgsql_call_handler); Datum plpgsql_call_handler(PG_FUNCTION_ARGS) { + bool nonatomic; PLpgSQL_function *func; PLpgSQL_execstate *save_cur_estate; Datum retval; int rc; + nonatomic = fcinfo->context && + IsA(fcinfo->context, CallContext) && + !castNode(CallContext, fcinfo->context)->atomic; + /* * Connect to SPI manager */ - if ((rc = SPI_connect()) != SPI_OK_CONNECT) + if ((rc = SPI_connect_ext(nonatomic ? SPI_OPT_NONATOMIC : 0)) != SPI_OK_CONNECT) elog(ERROR, "SPI_connect failed: %s", SPI_result_code_string(rc)); /* Find or compile the function */ @@ -255,7 +264,7 @@ plpgsql_call_handler(PG_FUNCTION_ARGS) retval = (Datum) 0; } else - retval = plpgsql_exec_function(func, fcinfo, NULL); + retval = plpgsql_exec_function(func, fcinfo, NULL, !nonatomic); } PG_CATCH(); { @@ -301,7 +310,7 @@ plpgsql_inline_handler(PG_FUNCTION_ARGS) /* * Connect to SPI manager */ - if ((rc = SPI_connect()) != SPI_OK_CONNECT) + if ((rc = SPI_connect_ext(codeblock->atomic ? 0 : SPI_OPT_NONATOMIC)) != SPI_OK_CONNECT) elog(ERROR, "SPI_connect failed: %s", SPI_result_code_string(rc)); /* Compile the anonymous code block */ @@ -327,7 +336,7 @@ plpgsql_inline_handler(PG_FUNCTION_ARGS) /* And run the function */ PG_TRY(); { - retval = plpgsql_exec_function(func, &fake_fcinfo, simple_eval_estate); + retval = plpgsql_exec_function(func, &fake_fcinfo, simple_eval_estate, codeblock->atomic); } PG_CATCH(); { @@ -438,14 +447,15 @@ plpgsql_validator(PG_FUNCTION_ARGS) } /* Disallow pseudotypes in arguments (either IN or OUT) */ - /* except for polymorphic */ + /* except for RECORD and polymorphic */ numargs = get_func_arg_info(tuple, &argtypes, &argnames, &argmodes); for (i = 0; i < numargs; i++) { if (get_typtype(argtypes[i]) == TYPTYPE_PSEUDO) { - if (!IsPolymorphicType(argtypes[i])) + if (argtypes[i] != RECORDOID && + !IsPolymorphicType(argtypes[i])) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("PL/pgSQL functions cannot accept type %s", diff --git a/src/pl/plpgsql/src/pl_scanner.c b/src/pl/plpgsql/src/pl_scanner.c index 553be8c93c..fc4ba3054a 100644 --- a/src/pl/plpgsql/src/pl_scanner.c +++ b/src/pl/plpgsql/src/pl_scanner.c @@ -4,7 +4,7 @@ * lexical scanning for PL/pgSQL * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -102,10 +102,12 @@ static const ScanKeyword unreserved_keywords[] = { PG_KEYWORD("array", K_ARRAY, UNRESERVED_KEYWORD) PG_KEYWORD("assert", K_ASSERT, UNRESERVED_KEYWORD) PG_KEYWORD("backward", K_BACKWARD, UNRESERVED_KEYWORD) + PG_KEYWORD("call", K_CALL, UNRESERVED_KEYWORD) PG_KEYWORD("close", K_CLOSE, UNRESERVED_KEYWORD) PG_KEYWORD("collate", K_COLLATE, UNRESERVED_KEYWORD) PG_KEYWORD("column", K_COLUMN, UNRESERVED_KEYWORD) PG_KEYWORD("column_name", K_COLUMN_NAME, UNRESERVED_KEYWORD) + PG_KEYWORD("commit", K_COMMIT, UNRESERVED_KEYWORD) PG_KEYWORD("constant", K_CONSTANT, UNRESERVED_KEYWORD) PG_KEYWORD("constraint", K_CONSTRAINT, UNRESERVED_KEYWORD) PG_KEYWORD("constraint_name", K_CONSTRAINT_NAME, UNRESERVED_KEYWORD) @@ -117,6 +119,7 @@ static const ScanKeyword unreserved_keywords[] = { PG_KEYWORD("default", K_DEFAULT, UNRESERVED_KEYWORD) PG_KEYWORD("detail", K_DETAIL, UNRESERVED_KEYWORD) PG_KEYWORD("diagnostics", K_DIAGNOSTICS, UNRESERVED_KEYWORD) + PG_KEYWORD("do", K_DO, UNRESERVED_KEYWORD) PG_KEYWORD("dump", K_DUMP, UNRESERVED_KEYWORD) PG_KEYWORD("elseif", K_ELSIF, UNRESERVED_KEYWORD) PG_KEYWORD("elsif", K_ELSIF, UNRESERVED_KEYWORD) @@ -154,15 +157,18 @@ static const ScanKeyword unreserved_keywords[] = { PG_KEYWORD("query", K_QUERY, UNRESERVED_KEYWORD) PG_KEYWORD("raise", K_RAISE, UNRESERVED_KEYWORD) PG_KEYWORD("relative", K_RELATIVE, UNRESERVED_KEYWORD) + PG_KEYWORD("reset", K_RESET, UNRESERVED_KEYWORD) PG_KEYWORD("result_oid", K_RESULT_OID, UNRESERVED_KEYWORD) PG_KEYWORD("return", K_RETURN, UNRESERVED_KEYWORD) PG_KEYWORD("returned_sqlstate", K_RETURNED_SQLSTATE, UNRESERVED_KEYWORD) PG_KEYWORD("reverse", K_REVERSE, UNRESERVED_KEYWORD) + PG_KEYWORD("rollback", K_ROLLBACK, UNRESERVED_KEYWORD) PG_KEYWORD("row_count", K_ROW_COUNT, UNRESERVED_KEYWORD) PG_KEYWORD("rowtype", K_ROWTYPE, UNRESERVED_KEYWORD) PG_KEYWORD("schema", K_SCHEMA, UNRESERVED_KEYWORD) PG_KEYWORD("schema_name", K_SCHEMA_NAME, UNRESERVED_KEYWORD) PG_KEYWORD("scroll", K_SCROLL, UNRESERVED_KEYWORD) + PG_KEYWORD("set", K_SET, UNRESERVED_KEYWORD) PG_KEYWORD("slice", K_SLICE, UNRESERVED_KEYWORD) PG_KEYWORD("sqlstate", K_SQLSTATE, UNRESERVED_KEYWORD) PG_KEYWORD("stacked", K_STACKED, UNRESERVED_KEYWORD) diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h index 2b19948562..f6c35a5049 100644 --- a/src/pl/plpgsql/src/plpgsql.h +++ b/src/pl/plpgsql/src/plpgsql.h @@ -3,7 +3,7 @@ * plpgsql.h - Definitions for the PL/pgSQL * procedural language * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -20,6 +20,8 @@ #include "commands/event_trigger.h" #include "commands/trigger.h" #include "executor/spi.h" +#include "utils/expandedrecord.h" + /********************************************************************** * Definitions @@ -37,10 +39,9 @@ */ typedef enum PLpgSQL_nsitem_type { - PLPGSQL_NSTYPE_LABEL, - PLPGSQL_NSTYPE_VAR, - PLPGSQL_NSTYPE_ROW, - PLPGSQL_NSTYPE_REC + PLPGSQL_NSTYPE_LABEL, /* block label */ + PLPGSQL_NSTYPE_VAR, /* scalar variable */ + PLPGSQL_NSTYPE_REC /* composite variable */ } PLpgSQL_nsitem_type; /* @@ -63,18 +64,36 @@ typedef enum PLpgSQL_datum_type PLPGSQL_DTYPE_REC, PLPGSQL_DTYPE_RECFIELD, PLPGSQL_DTYPE_ARRAYELEM, - PLPGSQL_DTYPE_EXPR + PLPGSQL_DTYPE_PROMISE } PLpgSQL_datum_type; +/* + * DTYPE_PROMISE datums have these possible ways of computing the promise + */ +typedef enum PLpgSQL_promise_type +{ + PLPGSQL_PROMISE_NONE = 0, /* not a promise, or promise satisfied */ + PLPGSQL_PROMISE_TG_NAME, + PLPGSQL_PROMISE_TG_WHEN, + PLPGSQL_PROMISE_TG_LEVEL, + PLPGSQL_PROMISE_TG_OP, + PLPGSQL_PROMISE_TG_RELID, + PLPGSQL_PROMISE_TG_TABLE_NAME, + PLPGSQL_PROMISE_TG_TABLE_SCHEMA, + PLPGSQL_PROMISE_TG_NARGS, + PLPGSQL_PROMISE_TG_ARGV, + PLPGSQL_PROMISE_TG_EVENT, + PLPGSQL_PROMISE_TG_TAG +} PLpgSQL_promise_type; + /* * Variants distinguished in PLpgSQL_type structs */ typedef enum PLpgSQL_type_type { PLPGSQL_TTYPE_SCALAR, /* scalar types and domains */ - PLPGSQL_TTYPE_ROW, /* composite types */ - PLPGSQL_TTYPE_REC, /* RECORD pseudotype */ - PLPGSQL_TTYPE_PSEUDO /* other pseudotypes */ + PLPGSQL_TTYPE_REC, /* composite types, including RECORD */ + PLPGSQL_TTYPE_PSEUDO /* pseudotypes */ } PLpgSQL_type_type; /* @@ -105,7 +124,11 @@ typedef enum PLpgSQL_stmt_type PLPGSQL_STMT_OPEN, PLPGSQL_STMT_FETCH, PLPGSQL_STMT_CLOSE, - PLPGSQL_STMT_PERFORM + PLPGSQL_STMT_PERFORM, + PLPGSQL_STMT_CALL, + PLPGSQL_STMT_COMMIT, + PLPGSQL_STMT_ROLLBACK, + PLPGSQL_STMT_SET } PLpgSQL_stmt_type; /* @@ -181,45 +204,16 @@ typedef struct PLpgSQL_type int16 typlen; /* stuff copied from its pg_type entry */ bool typbyval; char typtype; - Oid typrelid; Oid collation; /* from pg_type, but can be overridden */ bool typisarray; /* is "true" array, or domain over one */ int32 atttypmod; /* typmod (taken from someplace else) */ } PLpgSQL_type; -/* - * Generic datum array item - * - * PLpgSQL_datum is the common supertype for PLpgSQL_expr, PLpgSQL_var, - * PLpgSQL_row, PLpgSQL_rec, PLpgSQL_recfield, and PLpgSQL_arrayelem - */ -typedef struct PLpgSQL_datum -{ - PLpgSQL_datum_type dtype; - int dno; -} PLpgSQL_datum; - -/* - * Scalar or composite variable - * - * The variants PLpgSQL_var, PLpgSQL_row, and PLpgSQL_rec share these - * fields - */ -typedef struct PLpgSQL_variable -{ - PLpgSQL_datum_type dtype; - int dno; - char *refname; - int lineno; -} PLpgSQL_variable; - /* * SQL Query to plan and execute */ typedef struct PLpgSQL_expr { - PLpgSQL_datum_type dtype; - int dno; char *query; SPIPlanPtr plan; Bitmapset *paramnos; /* all dnos referenced by this query */ @@ -248,8 +242,45 @@ typedef struct PLpgSQL_expr LocalTransactionId expr_simple_lxid; } PLpgSQL_expr; +/* + * Generic datum array item + * + * PLpgSQL_datum is the common supertype for PLpgSQL_var, PLpgSQL_row, + * PLpgSQL_rec, PLpgSQL_recfield, and PLpgSQL_arrayelem. + */ +typedef struct PLpgSQL_datum +{ + PLpgSQL_datum_type dtype; + int dno; +} PLpgSQL_datum; + +/* + * Scalar or composite variable + * + * The variants PLpgSQL_var, PLpgSQL_row, and PLpgSQL_rec share these + * fields. + */ +typedef struct PLpgSQL_variable +{ + PLpgSQL_datum_type dtype; + int dno; + char *refname; + int lineno; + bool isconst; + bool notnull; + PLpgSQL_expr *default_val; +} PLpgSQL_variable; + /* * Scalar variable + * + * DTYPE_VAR and DTYPE_PROMISE datums both use this struct type. + * A PROMISE datum works exactly like a VAR datum for most purposes, + * but if it is read without having previously been assigned to, then + * a special "promised" value is computed and assigned to the datum + * before the read is performed. This technique avoids the overhead of + * computing the variable's value in cases where we expect that many + * functions will never read it. */ typedef struct PLpgSQL_var { @@ -257,22 +288,53 @@ typedef struct PLpgSQL_var int dno; char *refname; int lineno; + bool isconst; + bool notnull; + PLpgSQL_expr *default_val; + /* end of PLpgSQL_variable fields */ PLpgSQL_type *datatype; - int isconst; - int notnull; - PLpgSQL_expr *default_val; + + /* + * Variables declared as CURSOR FOR are mostly like ordinary + * scalar variables of type refcursor, but they have these additional + * properties: + */ PLpgSQL_expr *cursor_explicit_expr; int cursor_explicit_argrow; int cursor_options; + /* Fields below here can change at runtime */ + Datum value; bool isnull; bool freeval; + + /* + * The promise field records which "promised" value to assign if the + * promise must be honored. If it's a normal variable, or the promise has + * been fulfilled, this is PLPGSQL_PROMISE_NONE. + */ + PLpgSQL_promise_type promise; } PLpgSQL_var; /* - * Row variable + * Row variable - this represents one or more variables that are listed in an + * INTO clause, FOR-loop targetlist, cursor argument list, etc. We also use + * a row to represent a function's OUT parameters when there's more than one. + * + * Note that there's no way to name the row as such from PL/pgSQL code, + * so many functions don't need to support these. + * + * That also means that there's no real name for the row variable, so we + * conventionally set refname to "(unnamed row)". We could leave it NULL, + * but it's too convenient to be able to assume that refname is valid in + * all variants of PLpgSQL_variable. + * + * isconst, notnull, and default_val are unsupported (and hence + * always zero/null) for a row. The member variables of a row should have + * been checked to be writable at compile time, so isconst is correctly set + * to false. notnull and default_val aren't applicable. */ typedef struct PLpgSQL_row { @@ -280,22 +342,25 @@ typedef struct PLpgSQL_row int dno; char *refname; int lineno; - - /* Note: TupleDesc is only set up for named rowtypes, else it is NULL. */ - TupleDesc rowtupdesc; + bool isconst; + bool notnull; + PLpgSQL_expr *default_val; + /* end of PLpgSQL_variable fields */ /* - * Note: if the underlying rowtype contains a dropped column, the - * corresponding fieldnames[] entry will be NULL, and there is no - * corresponding var (varnos[] will be -1). + * rowtupdesc is only set up if we might need to convert the row into a + * composite datum, which currently only happens for OUT parameters. + * Otherwise it is NULL. */ + TupleDesc rowtupdesc; + int nfields; char **fieldnames; int *varnos; } PLpgSQL_row; /* - * Record variable (non-fixed structure) + * Record variable (any composite type, including RECORD) */ typedef struct PLpgSQL_rec { @@ -303,11 +368,20 @@ typedef struct PLpgSQL_rec int dno; char *refname; int lineno; + bool isconst; + bool notnull; + PLpgSQL_expr *default_val; + /* end of PLpgSQL_variable fields */ - HeapTuple tup; - TupleDesc tupdesc; - bool freetup; - bool freetupdesc; + PLpgSQL_type *datatype; /* can be NULL, if rectypeid is RECORDOID */ + Oid rectypeid; /* declared type of variable */ + /* RECFIELDs for this record are chained together for easy access */ + int firstfield; /* dno of first RECFIELD, or -1 if none */ + + /* Fields below here can change at runtime */ + + /* We always store record variables as "expanded" records */ + ExpandedRecordHeader *erh; } PLpgSQL_rec; /* @@ -317,8 +391,14 @@ typedef struct PLpgSQL_recfield { PLpgSQL_datum_type dtype; int dno; - char *fieldname; + /* end of PLpgSQL_datum fields */ + + char *fieldname; /* name of field */ int recparentno; /* dno of parent record */ + int nextfield; /* dno of next child, or -1 if none */ + uint64 rectupledescid; /* record's tupledesc ID as of last lookup */ + ExpandedRecordFieldInfo finfo; /* field's attnum and type info */ + /* if rectupledescid == INVALID_TUPLEDESC_IDENTIFIER, finfo isn't valid */ } PLpgSQL_recfield; /* @@ -328,6 +408,8 @@ typedef struct PLpgSQL_arrayelem { PLpgSQL_datum_type dtype; int dno; + /* end of PLpgSQL_datum fields */ + PLpgSQL_expr *subscript; int arrayparentno; /* dno of parent array variable */ @@ -407,8 +489,8 @@ typedef struct PLpgSQL_stmt_block int lineno; char *label; List *body; /* List of statements */ - int n_initvars; - int *initvarnos; + int n_initvars; /* Length of initvarnos[] */ + int *initvarnos; /* dnos of variables declared in this block */ PLpgSQL_exception_block *exceptions; } PLpgSQL_stmt_block; @@ -433,6 +515,46 @@ typedef struct PLpgSQL_stmt_perform PLpgSQL_expr *expr; } PLpgSQL_stmt_perform; +/* + * CALL statement + */ +typedef struct PLpgSQL_stmt_call +{ + PLpgSQL_stmt_type cmd_type; + int lineno; + PLpgSQL_expr *expr; + bool is_call; + PLpgSQL_variable *target; +} PLpgSQL_stmt_call; + +/* + * COMMIT statement + */ +typedef struct PLpgSQL_stmt_commit +{ + PLpgSQL_stmt_type cmd_type; + int lineno; +} PLpgSQL_stmt_commit; + +/* + * ROLLBACK statement + */ +typedef struct PLpgSQL_stmt_rollback +{ + PLpgSQL_stmt_type cmd_type; + int lineno; +} PLpgSQL_stmt_rollback; + +/* + * SET statement + */ +typedef struct PLpgSQL_stmt_set +{ + PLpgSQL_stmt_type cmd_type; + int lineno; + PLpgSQL_expr *expr; +} PLpgSQL_stmt_set; + /* * GET DIAGNOSTICS item */ @@ -549,8 +671,7 @@ typedef struct PLpgSQL_stmt_forq PLpgSQL_stmt_type cmd_type; int lineno; char *label; - PLpgSQL_rec *rec; - PLpgSQL_row *row; + PLpgSQL_variable *var; /* Loop variable (record or row) */ List *body; /* List of statements */ } PLpgSQL_stmt_forq; @@ -562,8 +683,7 @@ typedef struct PLpgSQL_stmt_fors PLpgSQL_stmt_type cmd_type; int lineno; char *label; - PLpgSQL_rec *rec; - PLpgSQL_row *row; + PLpgSQL_variable *var; /* Loop variable (record or row) */ List *body; /* List of statements */ /* end of fields that must match PLpgSQL_stmt_forq */ PLpgSQL_expr *query; @@ -577,8 +697,7 @@ typedef struct PLpgSQL_stmt_forc PLpgSQL_stmt_type cmd_type; int lineno; char *label; - PLpgSQL_rec *rec; - PLpgSQL_row *row; + PLpgSQL_variable *var; /* Loop variable (record or row) */ List *body; /* List of statements */ /* end of fields that must match PLpgSQL_stmt_forq */ int curvar; @@ -593,8 +712,7 @@ typedef struct PLpgSQL_stmt_dynfors PLpgSQL_stmt_type cmd_type; int lineno; char *label; - PLpgSQL_rec *rec; - PLpgSQL_row *row; + PLpgSQL_variable *var; /* Loop variable (record or row) */ List *body; /* List of statements */ /* end of fields that must match PLpgSQL_stmt_forq */ PLpgSQL_expr *query; @@ -624,7 +742,6 @@ typedef struct PLpgSQL_stmt_open int lineno; int curvar; int cursor_options; - PLpgSQL_row *returntype; PLpgSQL_expr *argquery; PLpgSQL_expr *query; PLpgSQL_expr *dynquery; @@ -638,8 +755,7 @@ typedef struct PLpgSQL_stmt_fetch { PLpgSQL_stmt_type cmd_type; int lineno; - PLpgSQL_rec *rec; /* target, as record or row */ - PLpgSQL_row *row; + PLpgSQL_variable *target; /* target (record or row) */ int curvar; /* cursor variable to fetch from */ FetchDirection direction; /* fetch direction */ long how_many; /* count, if constant (expr is NULL) */ @@ -750,8 +866,7 @@ typedef struct PLpgSQL_stmt_execsql * mod_stmt is set when we plan the query */ bool into; /* INTO supplied? */ bool strict; /* INTO STRICT flag */ - PLpgSQL_rec *rec; /* INTO target, if record */ - PLpgSQL_row *row; /* INTO target, if row */ + PLpgSQL_variable *target; /* INTO target (record or row) */ } PLpgSQL_stmt_execsql; /* @@ -764,8 +879,7 @@ typedef struct PLpgSQL_stmt_dynexecute PLpgSQL_expr *query; /* string expression */ bool into; /* INTO supplied? */ bool strict; /* INTO STRICT flag */ - PLpgSQL_rec *rec; /* INTO target, if record */ - PLpgSQL_row *row; /* INTO target, if row */ + PLpgSQL_variable *target; /* INTO target (record or row) */ List *params; /* USING expressions */ } PLpgSQL_stmt_dynexecute; @@ -830,8 +944,10 @@ typedef struct PLpgSQL_function int fn_rettyplen; bool fn_retbyval; bool fn_retistuple; + bool fn_retisdomain; bool fn_retset; bool fn_readonly; + char fn_prokind; int fn_nargs; int fn_argvarnos[FUNC_MAX_ARGS]; @@ -839,20 +955,6 @@ typedef struct PLpgSQL_function int found_varno; int new_varno; int old_varno; - int tg_name_varno; - int tg_when_varno; - int tg_level_varno; - int tg_op_varno; - int tg_relid_varno; - int tg_relname_varno; - int tg_table_name_varno; - int tg_table_schema_varno; - int tg_nargs_varno; - int tg_argv_varno; - - /* for event triggers */ - int tg_event_varno; - int tg_tag_varno; PLpgSQL_resolve_option resolve_option; @@ -865,7 +967,7 @@ typedef struct PLpgSQL_function /* the datums representing the function's local variables */ int ndatums; PLpgSQL_datum **datums; - Bitmapset *resettable_datums; /* dnos of non-simple vars */ + Size copiable_size; /* space for locally instantiated datums */ /* function body parsetree */ PLpgSQL_stmt_block *action; @@ -882,6 +984,9 @@ typedef struct PLpgSQL_execstate { PLpgSQL_function *func; /* function being executed */ + TriggerData *trigdata; /* if regular trigger, data about firing */ + EventTriggerData *evtrigdata; /* if event trigger, data about firing */ + Datum retval; bool retisnull; Oid rettype; /* type of current retval */ @@ -891,25 +996,38 @@ typedef struct PLpgSQL_execstate bool retisset; bool readonly_func; + bool atomic; - TupleDesc rettupdesc; char *exitlabel; /* the "target" label of the current EXIT or * CONTINUE stmt, if any */ ErrorData *cur_error; /* current exception handler's error */ Tuplestorestate *tuple_store; /* SRFs accumulate results here */ + TupleDesc tuple_store_desc; /* descriptor for tuples in tuple_store */ MemoryContext tuple_store_cxt; ResourceOwner tuple_store_owner; ReturnSetInfo *rsi; - /* the datums representing the function's local variables */ int found_varno; + + /* + * The datums representing the function's local variables. Some of these + * are local storage in this execstate, but some just point to the shared + * copy belonging to the PLpgSQL_function, depending on whether or not we + * need any per-execution state for the datum's dtype. + */ int ndatums; PLpgSQL_datum **datums; + /* context containing variable values (same as func's SPI_proc context) */ + MemoryContext datum_context; - /* we pass datums[i] to the executor, when needed, in paramLI->params[i] */ + /* + * paramLI is what we use to pass local variable values to the executor. + * It does not have a ParamExternData array; we just dynamically + * instantiate parameter data as needed. By convention, PARAM_EXTERN + * Params have paramid equal to the dno of the referenced local variable. + */ ParamListInfo paramLI; - bool params_dirty; /* T if any resettable datum has been passed */ /* EState to use for "simple" expression evaluation */ EState *simple_eval_estate; @@ -1022,10 +1140,12 @@ extern bool plpgsql_print_strict_params; extern bool plpgsql_check_asserts; -/* extra compile-time checks */ -#define PLPGSQL_XCHECK_NONE 0 -#define PLPGSQL_XCHECK_SHADOWVAR 1 -#define PLPGSQL_XCHECK_ALL ((int) ~0) +/* extra compile-time and run-time checks */ +#define PLPGSQL_XCHECK_NONE 0 +#define PLPGSQL_XCHECK_SHADOWVAR (1 << 1) +#define PLPGSQL_XCHECK_TOOMANYROWS (1 << 2) +#define PLPGSQL_XCHECK_STRICTMULTIASSIGNMENT (1 << 3) +#define PLPGSQL_XCHECK_ALL ((int) ~0) extern int plpgsql_extra_warnings; extern int plpgsql_extra_errors; @@ -1073,7 +1193,10 @@ extern PLpgSQL_variable *plpgsql_build_variable(const char *refname, int lineno, PLpgSQL_type *dtype, bool add2namespace); extern PLpgSQL_rec *plpgsql_build_record(const char *refname, int lineno, + PLpgSQL_type *dtype, Oid rectypeid, bool add2namespace); +extern PLpgSQL_recfield *plpgsql_build_recfield(PLpgSQL_rec *rec, + const char *fldname); extern int plpgsql_recognize_err_condition(const char *condname, bool allow_sqlstate); extern PLpgSQL_condition *plpgsql_parse_err_condition(char *condname); @@ -1091,7 +1214,8 @@ extern void _PG_init(void); */ extern Datum plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, - EState *simple_eval_estate); + EState *simple_eval_estate, + bool atomic); extern HeapTuple plpgsql_exec_trigger(PLpgSQL_function *func, TriggerData *trigdata); extern void plpgsql_exec_event_trigger(PLpgSQL_function *func, diff --git a/src/pl/plpgsql/src/po/de.po b/src/pl/plpgsql/src/po/de.po index 7cb006ff7b..f7baa228f2 100644 --- a/src/pl/plpgsql/src/po/de.po +++ b/src/pl/plpgsql/src/po/de.po @@ -1,16 +1,16 @@ # German message translation file for plpgsql -# Copyright (C) 2009 - 2015 PostgreSQL Global Development Group +# Copyright (C) 2009 - 2018 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. -# Peter Eisentraut , 2009 - 2015. +# Peter Eisentraut , 2009 - 2018. # # Use these quotes: »%s« # msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 9.5\n" +"Project-Id-Version: PostgreSQL 11\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2016-04-28 16:07+0000\n" -"PO-Revision-Date: 2016-04-29 13:20-0400\n" +"POT-Creation-Date: 2018-05-07 00:38+0000\n" +"PO-Revision-Date: 2018-05-06 21:25-0400\n" "Last-Translator: Peter Eisentraut \n" "Language-Team: German \n" "Language: de\n" @@ -19,149 +19,144 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=n != 1;\n" -#: pl_comp.c:432 pl_handler.c:448 +#: pl_comp.c:434 pl_handler.c:457 #, c-format msgid "PL/pgSQL functions cannot accept type %s" msgstr "PL/pgSQL-Funktionen können Typ %s nicht annehmen" -#: pl_comp.c:513 +#: pl_comp.c:522 #, c-format msgid "could not determine actual return type for polymorphic function \"%s\"" msgstr "konnte den tatsächlichen Rückgabetyp der polymorphischen Funktion »%s« nicht ermitteln" -#: pl_comp.c:543 +#: pl_comp.c:552 #, c-format msgid "trigger functions can only be called as triggers" msgstr "Triggerfunktionen können nur als Trigger aufgerufen werden" -#: pl_comp.c:547 pl_handler.c:433 +#: pl_comp.c:556 pl_handler.c:441 #, c-format msgid "PL/pgSQL functions cannot return type %s" msgstr "PL/pgSQL-Funktionen können keinen Rückgabetyp %s haben" -#: pl_comp.c:588 +#: pl_comp.c:595 #, c-format msgid "trigger functions cannot have declared arguments" msgstr "Triggerfunktionen können keine deklarierten Argumente haben" -#: pl_comp.c:589 +#: pl_comp.c:596 #, c-format msgid "The arguments of the trigger can be accessed through TG_NARGS and TG_ARGV instead." msgstr "Auf die Argumente des Triggers kann stattdessen über TG_NARGS und TG_ARGV zugegriffen werden." -#: pl_comp.c:691 +#: pl_comp.c:719 #, c-format msgid "event trigger functions cannot have declared arguments" msgstr "Ereignistriggerfunktionen können keine deklarierten Argumente haben" -#: pl_comp.c:944 +#: pl_comp.c:976 #, c-format msgid "compilation of PL/pgSQL function \"%s\" near line %d" msgstr "Kompilierung der PL/pgSQL-Funktion »%s« nahe Zeile %d" -#: pl_comp.c:967 +#: pl_comp.c:999 #, c-format msgid "parameter name \"%s\" used more than once" msgstr "Parametername »%s« mehrmals angegeben" -#: pl_comp.c:1077 +#: pl_comp.c:1109 #, c-format msgid "column reference \"%s\" is ambiguous" msgstr "Spaltenverweis »%s« ist nicht eindeutig" -#: pl_comp.c:1079 +#: pl_comp.c:1111 #, c-format msgid "It could refer to either a PL/pgSQL variable or a table column." msgstr "Er könnte sich entweder auf eine PL/pgSQL-Variable oder eine Tabellenspalte beziehen." -#: pl_comp.c:1259 pl_comp.c:1287 pl_exec.c:4395 pl_exec.c:4744 pl_exec.c:4829 -#: pl_exec.c:4920 +#: pl_comp.c:1294 pl_exec.c:5041 pl_exec.c:5406 pl_exec.c:5493 pl_exec.c:5584 +#: pl_exec.c:6501 #, c-format msgid "record \"%s\" has no field \"%s\"" msgstr "Record »%s« hat kein Feld »%s«" -#: pl_comp.c:1818 +#: pl_comp.c:1756 #, c-format msgid "relation \"%s\" does not exist" msgstr "Relation »%s« existiert nicht" -#: pl_comp.c:1927 +#: pl_comp.c:1848 #, c-format msgid "variable \"%s\" has pseudo-type %s" msgstr "Variable »%s« hat Pseudotyp %s" -#: pl_comp.c:1994 -#, c-format -msgid "relation \"%s\" is not a table" -msgstr "Relation »%s« ist keine Tabelle" - -#: pl_comp.c:2154 +#: pl_comp.c:2026 #, c-format msgid "type \"%s\" is only a shell" msgstr "Typ »%s« ist nur eine Hülle" -#: pl_comp.c:2243 pl_comp.c:2296 +#: pl_comp.c:2123 pl_comp.c:2176 #, c-format msgid "unrecognized exception condition \"%s\"" msgstr "unbekannte Ausnahmebedingung »%s«" -#: pl_comp.c:2503 +#: pl_comp.c:2390 #, c-format msgid "could not determine actual argument type for polymorphic function \"%s\"" msgstr "konnte den tatsächlichen Argumenttyp der polymorphischen Funktion »%s« nicht ermitteln" -#: pl_exec.c:324 pl_exec.c:612 pl_exec.c:872 +#: pl_exec.c:473 pl_exec.c:885 pl_exec.c:1098 msgid "during initialization of execution state" msgstr "bei der Initialisierung des Ausführungszustandes" -#: pl_exec.c:331 +#: pl_exec.c:479 msgid "while storing call arguments into local variables" msgstr "beim Abspeichern der Aufrufargumente in lokale Variablen" -#: pl_exec.c:416 pl_exec.c:760 +#: pl_exec.c:567 pl_exec.c:933 msgid "during function entry" msgstr "beim Eintritts in die Funktion" -#: pl_exec.c:441 +#: pl_exec.c:592 #, c-format msgid "control reached end of function without RETURN" msgstr "Kontrollfluss erreichte das Ende der Funktion ohne RETURN" -#: pl_exec.c:448 +#: pl_exec.c:599 msgid "while casting return value to function's return type" msgstr "bei der Umwandlung des Rückgabewerts in den Rückgabetyp der Funktion" -#: pl_exec.c:461 pl_exec.c:2938 +#: pl_exec.c:612 pl_exec.c:3484 #, c-format msgid "set-valued function called in context that cannot accept a set" msgstr "Funktion mit Mengenergebnis in einem Zusammenhang aufgerufen, der keine Mengenergebnisse verarbeiten kann" -#: pl_exec.c:499 pl_exec.c:2779 -msgid "returned record type does not match expected record type" -msgstr "zurückgegebener Record-Typ stimmt nicht mit erwartetem Record-Typ überein" - -#: pl_exec.c:554 pl_exec.c:789 pl_exec.c:907 +#: pl_exec.c:738 pl_exec.c:962 pl_exec.c:1123 msgid "during function exit" msgstr "beim Verlassen der Funktion" -#: pl_exec.c:785 pl_exec.c:903 +#: pl_exec.c:793 pl_exec.c:832 pl_exec.c:3329 +msgid "returned record type does not match expected record type" +msgstr "zurückgegebener Record-Typ stimmt nicht mit erwartetem Record-Typ überein" + +#: pl_exec.c:958 pl_exec.c:1119 #, c-format msgid "control reached end of trigger procedure without RETURN" msgstr "Kontrollfluss erreichte das Ende der Triggerprozedur ohne RETURN" -#: pl_exec.c:794 +#: pl_exec.c:967 #, c-format msgid "trigger procedure cannot return a set" msgstr "Triggerprozedur kann keine Ergebnismenge zurückgeben" -#: pl_exec.c:816 +#: pl_exec.c:1006 pl_exec.c:1034 msgid "returned row structure does not match the structure of the triggering table" msgstr "zurückgegebene Zeilenstruktur stimmt nicht mit der Struktur der Tabelle, die den Trigger ausgelöst hat, überein" #. translator: last %s is a phrase such as "during statement block #. local variable initialization" #. -#: pl_exec.c:954 +#: pl_exec.c:1171 #, c-format msgid "PL/pgSQL function %s line %d %s" msgstr "PL/pgSQL-Funktion %s Zeile %d %s" @@ -169,654 +164,657 @@ msgstr "PL/pgSQL-Funktion %s Zeile %d %s" #. translator: last %s is a phrase such as "while storing call #. arguments into local variables" #. -#: pl_exec.c:965 +#: pl_exec.c:1182 #, c-format msgid "PL/pgSQL function %s %s" msgstr "PL/pgSQL-Funktion %s %s" #. translator: last %s is a plpgsql statement type name -#: pl_exec.c:973 +#: pl_exec.c:1190 #, c-format msgid "PL/pgSQL function %s line %d at %s" msgstr "PL/pgSQL-Funktion %s Zeile %d bei %s" -#: pl_exec.c:979 +#: pl_exec.c:1196 #, c-format msgid "PL/pgSQL function %s" msgstr "PL/pgSQL-Funktion %s" -#: pl_exec.c:1089 +#: pl_exec.c:1534 msgid "during statement block local variable initialization" msgstr "bei der Initialisierung der lokalen Variablen des Anweisungsblocks" -#: pl_exec.c:1128 -#, c-format -msgid "variable \"%s\" declared NOT NULL cannot default to NULL" -msgstr "Variable »%s« ist als NOT NULL deklariert und kann daher nicht den Ausgangswert NULL haben" - -#: pl_exec.c:1178 +#: pl_exec.c:1632 msgid "during statement block entry" msgstr "beim Eintreten in den Anweisungsblock" -#: pl_exec.c:1199 +#: pl_exec.c:1664 msgid "during statement block exit" msgstr "beim Verlassen des Anweisungsblocks" -#: pl_exec.c:1242 +#: pl_exec.c:1702 msgid "during exception cleanup" msgstr "beim Aufräumen der Ausnahme" -#: pl_exec.c:1593 +#: pl_exec.c:2207 pl_exec.c:2221 +#, c-format +msgid "argument %d is an output argument but is not writable" +msgstr "Argument %d ist ein Ausgabeargument aber ist nicht schreibbar" + +#: pl_exec.c:2263 #, c-format msgid "GET STACKED DIAGNOSTICS cannot be used outside an exception handler" msgstr "GET STACKED DIAGNOSTICS kann nicht außerhalb einer Ausnahmebehandlung verwendet werden" -#: pl_exec.c:1789 +#: pl_exec.c:2468 #, c-format msgid "case not found" msgstr "Fall nicht gefunden" -#: pl_exec.c:1790 +#: pl_exec.c:2469 #, c-format msgid "CASE statement is missing ELSE part." msgstr "Der CASE-Anweisung fehlt ein ELSE-Teil." -#: pl_exec.c:1944 +#: pl_exec.c:2562 #, c-format msgid "lower bound of FOR loop cannot be null" msgstr "Untergrenze einer FOR-Schleife darf nicht NULL sein" -#: pl_exec.c:1960 +#: pl_exec.c:2578 #, c-format msgid "upper bound of FOR loop cannot be null" msgstr "Obergrenze einer FOR-Schleife darf nicht NULL sein" -#: pl_exec.c:1978 +#: pl_exec.c:2596 #, c-format msgid "BY value of FOR loop cannot be null" msgstr "BY-Wert einer FOR-Schleife darf nicht NULL sein" -#: pl_exec.c:1984 +#: pl_exec.c:2602 #, c-format msgid "BY value of FOR loop must be greater than zero" msgstr "BY-Wert einer FOR-Schleife muss größer als null sein" -#: pl_exec.c:2153 pl_exec.c:3912 +#: pl_exec.c:2736 pl_exec.c:4471 #, c-format msgid "cursor \"%s\" already in use" msgstr "Cursor »%s« ist bereits in Verwendung" -#: pl_exec.c:2176 pl_exec.c:3974 +#: pl_exec.c:2759 pl_exec.c:4536 #, c-format msgid "arguments given for cursor without arguments" msgstr "einem Cursor ohne Argumente wurden Argumente übergeben" -#: pl_exec.c:2195 pl_exec.c:3993 +#: pl_exec.c:2778 pl_exec.c:4555 #, c-format msgid "arguments required for cursor" msgstr "Cursor benötigt Argumente" -#: pl_exec.c:2280 +#: pl_exec.c:2865 #, c-format msgid "FOREACH expression must not be null" msgstr "FOREACH-Ausdruck darf nicht NULL sein" -#: pl_exec.c:2286 +#: pl_exec.c:2880 #, c-format msgid "FOREACH expression must yield an array, not type %s" msgstr "FOREACH-Ausdruck muss ein Array ergeben, nicht Typ %s" -#: pl_exec.c:2303 +#: pl_exec.c:2897 #, c-format msgid "slice dimension (%d) is out of the valid range 0..%d" msgstr "Slice-Dimension (%d) ist außerhalb des gültigen Bereichs 0..%d" -#: pl_exec.c:2330 +#: pl_exec.c:2924 #, c-format msgid "FOREACH ... SLICE loop variable must be of an array type" msgstr "FOREACH ... SLICE Schleifenvariable muss einen Arraytyp haben" -#: pl_exec.c:2334 +#: pl_exec.c:2928 #, c-format msgid "FOREACH loop variable must not be of an array type" msgstr "FOREACH-Schleifenvariable darf keinen Array-Typ haben" -#: pl_exec.c:2522 pl_exec.c:2604 pl_exec.c:2771 +#: pl_exec.c:3090 pl_exec.c:3147 pl_exec.c:3322 #, c-format msgid "cannot return non-composite value from function returning composite type" msgstr "kann keinen nicht zusammengesetzten Wert aus einer Funktion zurückgeben, die einen zusammengesetzten Typ zurückgibt" -#: pl_exec.c:2648 pl_gram.y:3161 +#: pl_exec.c:3186 pl_gram.y:3266 #, c-format msgid "cannot use RETURN NEXT in a non-SETOF function" msgstr "RETURN NEXT kann nur in einer Funktion mit SETOF-Rückgabetyp verwendet werden" -#: pl_exec.c:2682 pl_exec.c:2813 +#: pl_exec.c:3227 pl_exec.c:3359 #, c-format msgid "wrong result type supplied in RETURN NEXT" msgstr "falscher Ergebnistyp angegeben in RETURN NEXT" -#: pl_exec.c:2711 pl_exec.c:4382 pl_exec.c:4711 pl_exec.c:4737 pl_exec.c:4803 -#: pl_exec.c:4822 pl_exec.c:4890 pl_exec.c:4913 -#, c-format -msgid "record \"%s\" is not assigned yet" -msgstr "Record »%s« hat noch keinen Wert" - -#: pl_exec.c:2713 pl_exec.c:4384 pl_exec.c:4713 pl_exec.c:4739 pl_exec.c:4805 -#: pl_exec.c:4824 pl_exec.c:4892 pl_exec.c:4915 -#, c-format -msgid "The tuple structure of a not-yet-assigned record is indeterminate." -msgstr "Die Tupelstruktur eines Records ohne Wert ist unbestimmt." - -#: pl_exec.c:2717 pl_exec.c:2737 +#: pl_exec.c:3265 pl_exec.c:3286 #, c-format msgid "wrong record type supplied in RETURN NEXT" msgstr "falscher Record-Typ angegeben in RETURN NEXT" -#: pl_exec.c:2832 +#: pl_exec.c:3378 #, c-format msgid "RETURN NEXT must have a parameter" msgstr "RETURN NEXT muss einen Parameter haben" -#: pl_exec.c:2865 pl_gram.y:3223 +#: pl_exec.c:3404 pl_gram.y:3329 #, c-format msgid "cannot use RETURN QUERY in a non-SETOF function" msgstr "RETURN QUERY kann nur in einer Funktion mit SETOF-Rückgabetyp verwendet werden" -#: pl_exec.c:2886 +#: pl_exec.c:3428 msgid "structure of query does not match function result type" msgstr "Struktur der Anfrage stimmt nicht mit Rückgabetyp der Funktion überein" -#: pl_exec.c:2966 pl_exec.c:3096 +#: pl_exec.c:3512 pl_exec.c:3650 #, c-format msgid "RAISE option already specified: %s" msgstr "RAISE-Option bereits angegeben: %s" -#: pl_exec.c:2999 +#: pl_exec.c:3546 #, c-format msgid "RAISE without parameters cannot be used outside an exception handler" msgstr "RAISE ohne Parameter kann nicht außerhalb einer Ausnahmebehandlung verwendet werden" -#: pl_exec.c:3086 +#: pl_exec.c:3640 #, c-format msgid "RAISE statement option cannot be null" msgstr "Option einer RAISE-Anweisung darf nicht NULL sein" -#: pl_exec.c:3155 +#: pl_exec.c:3710 #, c-format msgid "%s" msgstr "%s" -#: pl_exec.c:3226 +#: pl_exec.c:3765 #, c-format msgid "assertion failed" msgstr "Assertion fehlgeschlagen" -#: pl_exec.c:3418 pl_exec.c:3562 pl_exec.c:3751 +#: pl_exec.c:3970 pl_exec.c:4120 pl_exec.c:4308 #, c-format msgid "cannot COPY to/from client in PL/pgSQL" msgstr "COPY vom/zum Client funktioniert in PL/pgSQL nicht" -#: pl_exec.c:3422 pl_exec.c:3566 pl_exec.c:3755 +#: pl_exec.c:3975 pl_exec.c:4126 pl_exec.c:4314 #, c-format msgid "cannot begin/end transactions in PL/pgSQL" msgstr "Transaktionen können in PL/pgSQL nicht begonnen/beendet werden" -#: pl_exec.c:3423 pl_exec.c:3567 pl_exec.c:3756 +#: pl_exec.c:3976 pl_exec.c:4127 pl_exec.c:4315 #, c-format msgid "Use a BEGIN block with an EXCEPTION clause instead." msgstr "Verwenden Sie stattdessen einen BEGIN-Block mit einer EXCEPTION-Klausel." -#: pl_exec.c:3590 pl_exec.c:3780 +#: pl_exec.c:4151 pl_exec.c:4339 #, c-format msgid "INTO used with a command that cannot return data" msgstr "INTO mit einem Befehl verwendet, der keine Daten zurückgeben kann" -#: pl_exec.c:3618 pl_exec.c:3808 +#: pl_exec.c:4174 pl_exec.c:4362 #, c-format msgid "query returned no rows" msgstr "Anfrage gab keine Zeilen zurück" -#: pl_exec.c:3637 pl_exec.c:3827 +#: pl_exec.c:4193 pl_exec.c:4381 #, c-format msgid "query returned more than one row" msgstr "Anfrage gab mehr als eine Zeile zurück" -#: pl_exec.c:3654 +#: pl_exec.c:4210 #, c-format msgid "query has no destination for result data" msgstr "Anfrage hat keinen Bestimmungsort für die Ergebnisdaten" -#: pl_exec.c:3655 +#: pl_exec.c:4211 #, c-format msgid "If you want to discard the results of a SELECT, use PERFORM instead." msgstr "Wenn Sie die Ergebnisse eines SELECT verwerfen wollen, verwenden Sie stattdessen PERFORM." -#: pl_exec.c:3687 pl_exec.c:7128 +#: pl_exec.c:4244 pl_exec.c:8222 #, c-format msgid "query string argument of EXECUTE is null" msgstr "Anfrageargument von EXECUTE ist NULL" -#: pl_exec.c:3743 +#: pl_exec.c:4300 #, c-format msgid "EXECUTE of SELECT ... INTO is not implemented" msgstr "EXECUTE von SELECT ... INTO ist nicht implementiert" -#: pl_exec.c:3744 +#: pl_exec.c:4301 #, c-format msgid "You might want to use EXECUTE ... INTO or EXECUTE CREATE TABLE ... AS instead." msgstr "Sie könnten stattdessen EXECUTE ... INTO oder EXECUTE CREATE TABLE ... AS verwenden." -#: pl_exec.c:4056 pl_exec.c:4148 +#: pl_exec.c:4617 pl_exec.c:4705 #, c-format msgid "cursor variable \"%s\" is null" msgstr "Cursor-Variable »%s« ist NULL" -#: pl_exec.c:4063 pl_exec.c:4155 +#: pl_exec.c:4628 pl_exec.c:4716 #, c-format msgid "cursor \"%s\" does not exist" msgstr "Cursor »%s« existiert nicht" -#: pl_exec.c:4077 +#: pl_exec.c:4641 #, c-format msgid "relative or absolute cursor position is null" msgstr "relative oder absolute Cursorposition ist NULL" -#: pl_exec.c:4257 +#: pl_exec.c:4891 pl_exec.c:4986 #, c-format msgid "null value cannot be assigned to variable \"%s\" declared NOT NULL" msgstr "NULL-Wert kann der Variable »%s« nicht zugewiesen werden, weil sie als NOT NULL deklariert ist" -#: pl_exec.c:4326 +#: pl_exec.c:4967 #, c-format msgid "cannot assign non-composite value to a row variable" msgstr "nicht zusammengesetzter Wert kann nicht einer Zeilenvariable zugewiesen werden" -#: pl_exec.c:4350 +#: pl_exec.c:4999 #, c-format msgid "cannot assign non-composite value to a record variable" msgstr "nicht zusammengesetzter Wert kann nicht einer Record-Variable zugewiesen werden" -#: pl_exec.c:4493 +#: pl_exec.c:5050 +#, c-format +msgid "cannot assign to system column \"%s\"" +msgstr "kann Systemspalte »%s« keinen Wert zuweisen" + +#: pl_exec.c:5114 #, c-format msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" msgstr "Anzahl der Arraydimensionen (%d) überschreitet erlaubtes Maximum (%d)" -#: pl_exec.c:4525 +#: pl_exec.c:5146 #, c-format msgid "subscripted object is not an array" msgstr "Objekt mit Arrayindex ist kein Array" -#: pl_exec.c:4562 +#: pl_exec.c:5184 #, c-format msgid "array subscript in assignment must not be null" msgstr "Arrayindex in Zuweisung darf nicht NULL sein" -#: pl_exec.c:5029 +#: pl_exec.c:5691 #, c-format msgid "query \"%s\" did not return data" msgstr "Anfrage »%s« hat keine Daten zurückgegeben" -#: pl_exec.c:5037 +#: pl_exec.c:5699 #, c-format msgid "query \"%s\" returned %d column" msgid_plural "query \"%s\" returned %d columns" msgstr[0] "Anfrage »%s« hat %d Spalte zurückgegeben" msgstr[1] "Anfrage »%s« hat %d Spalten zurückgegeben" -#: pl_exec.c:5064 +#: pl_exec.c:5727 #, c-format msgid "query \"%s\" returned more than one row" msgstr "Anfrage »%s« hat mehr als eine Zeile zurückgegeben" -#: pl_exec.c:5128 +#: pl_exec.c:5790 #, c-format msgid "query \"%s\" is not a SELECT" msgstr "Anfrage »%s« ist kein SELECT" -#: pl_funcs.c:237 +#: pl_exec.c:6515 pl_exec.c:6555 pl_exec.c:6595 +#, c-format +msgid "type of parameter %d (%s) does not match that when preparing the plan (%s)" +msgstr "Typ von Parameter %d (%s) stimmt nicht mit dem überein, als der Plan vorbereitet worden ist (%s)" + +#: pl_exec.c:7366 +#, c-format +msgid "record \"%s\" is not assigned yet" +msgstr "Record »%s« hat noch keinen Wert" + +#: pl_exec.c:7367 +#, c-format +msgid "The tuple structure of a not-yet-assigned record is indeterminate." +msgstr "Die Tupelstruktur eines Records ohne Wert ist unbestimmt." + +#: pl_funcs.c:239 msgid "statement block" msgstr "Anweisungsblock" -#: pl_funcs.c:239 +#: pl_funcs.c:241 msgid "assignment" msgstr "Zuweisung" -#: pl_funcs.c:249 +#: pl_funcs.c:251 msgid "FOR with integer loop variable" msgstr "FOR mit ganzzahliger Schleifenvariable" -#: pl_funcs.c:251 +#: pl_funcs.c:253 msgid "FOR over SELECT rows" msgstr "FOR über SELECT-Zeilen" -#: pl_funcs.c:253 +#: pl_funcs.c:255 msgid "FOR over cursor" msgstr "FOR über Cursor" -#: pl_funcs.c:255 +#: pl_funcs.c:257 msgid "FOREACH over array" msgstr "FOREACH über Array" -#: pl_funcs.c:269 +#: pl_funcs.c:271 msgid "SQL statement" msgstr "SQL-Anweisung" -#: pl_funcs.c:273 +#: pl_funcs.c:275 msgid "FOR over EXECUTE statement" msgstr "FOR-über-EXECUTE-Anweisung" -#: pl_gram.y:473 +#: pl_gram.y:485 #, c-format msgid "block label must be placed before DECLARE, not after" msgstr "Blocklabel muss vor DECLARE stehen, nicht danach" -#: pl_gram.y:493 +#: pl_gram.y:505 #, c-format msgid "collations are not supported by type %s" msgstr "Sortierfolgen werden von Typ %s nicht unterstützt" -#: pl_gram.y:508 -#, c-format -msgid "row or record variable cannot be CONSTANT" -msgstr "Zeilen- oder Record-Variable kann nicht CONSTANT sein" - -#: pl_gram.y:518 -#, c-format -msgid "row or record variable cannot be NOT NULL" -msgstr "Zeilen- oder Record-Variable kann nicht NOT NULL sein" - -#: pl_gram.y:529 +#: pl_gram.y:524 #, c-format -msgid "default value for row or record variable is not supported" -msgstr "Vorgabewerte werden für Zeilen- oder Record-Variablen nicht unterstützt" +msgid "variable \"%s\" must have a default value, since it's declared NOT NULL" +msgstr "Variable »%s« muss einen Vorgabewert haben, da sie als NOT NULL deklariert ist" -#: pl_gram.y:674 pl_gram.y:689 pl_gram.y:715 +#: pl_gram.y:669 pl_gram.y:684 pl_gram.y:710 #, c-format msgid "variable \"%s\" does not exist" msgstr "Variable »%s« existiert nicht" -#: pl_gram.y:733 pl_gram.y:761 +#: pl_gram.y:728 pl_gram.y:756 msgid "duplicate declaration" msgstr "doppelte Deklaration" -#: pl_gram.y:744 pl_gram.y:772 +#: pl_gram.y:739 pl_gram.y:767 #, c-format msgid "variable \"%s\" shadows a previously defined variable" msgstr "Variable »%s« verdeckt eine zuvor definierte Variable" -#: pl_gram.y:951 +#: pl_gram.y:983 #, c-format msgid "diagnostics item %s is not allowed in GET STACKED DIAGNOSTICS" msgstr "Diagnostikelement %s ist in GET STACKED DIAGNOSTICS nicht erlaubt" -#: pl_gram.y:969 +#: pl_gram.y:1001 #, c-format msgid "diagnostics item %s is not allowed in GET CURRENT DIAGNOSTICS" msgstr "Diagnostikelement %s ist in GET CURRENT DIAGNOSTICS nicht erlaubt" -#: pl_gram.y:1067 +#: pl_gram.y:1099 msgid "unrecognized GET DIAGNOSTICS item" msgstr "unbekanntes Element in GET DIAGNOSTICS" -#: pl_gram.y:1078 pl_gram.y:3410 +#: pl_gram.y:1109 pl_gram.y:3508 #, c-format msgid "\"%s\" is not a scalar variable" msgstr "»%s« ist keine skalare Variable" -#: pl_gram.y:1330 pl_gram.y:1524 +#: pl_gram.y:1357 pl_gram.y:1550 #, c-format -msgid "loop variable of loop over rows must be a record or row variable or list of scalar variables" -msgstr "Schleifenvariable einer Schleife über Zeilen muss eine Record-Variable oder Zeilenvariable oder eine Liste von skalaren Variablen sein" +msgid "loop variable of loop over rows must be a record variable or list of scalar variables" +msgstr "Schleifenvariable einer Schleife über Zeilen muss eine Record-Variable oder eine Liste von skalaren Variablen sein" -#: pl_gram.y:1364 +#: pl_gram.y:1391 #, c-format msgid "cursor FOR loop must have only one target variable" msgstr "Cursor-FOR-Schleife darf nur eine Zielvariable haben" -#: pl_gram.y:1371 +#: pl_gram.y:1398 #, c-format msgid "cursor FOR loop must use a bound cursor variable" msgstr "Cursor-FOR-Schleife muss eine gebundene Cursor-Variable verwenden" -#: pl_gram.y:1455 +#: pl_gram.y:1485 #, c-format msgid "integer FOR loop must have only one target variable" msgstr "ganzzahlige FOR-Schleife darf nur eine Zielvariable haben" -#: pl_gram.y:1491 +#: pl_gram.y:1521 #, c-format msgid "cannot specify REVERSE in query FOR loop" msgstr "REVERSE kann nicht in einer Anfrage-FOR-Schleife verwendet werden" -#: pl_gram.y:1638 +#: pl_gram.y:1652 #, c-format msgid "loop variable of FOREACH must be a known variable or list of variables" msgstr "Schleifenvariable von FOREACH muss eine bekannte Variable oder Liste von Variablen sein" -#: pl_gram.y:1679 +#: pl_gram.y:1693 #, c-format msgid "there is no label \"%s\" attached to any block or loop enclosing this statement" msgstr "diese Anweisung umschließt kein Block und keine Schleife mit Label »%s«" -#: pl_gram.y:1687 +#: pl_gram.y:1701 #, c-format msgid "block label \"%s\" cannot be used in CONTINUE" msgstr "Blocklabel »%s« kann nicht in CONTINUE verwendet werden" -#: pl_gram.y:1702 +#: pl_gram.y:1716 #, c-format msgid "EXIT cannot be used outside a loop, unless it has a label" msgstr "EXIT kann nicht außerhalb einer Schleife verwendet werden, außer wenn es ein Label hat" -#: pl_gram.y:1703 +#: pl_gram.y:1717 #, c-format msgid "CONTINUE cannot be used outside a loop" msgstr "CONTINUE kann nicht außerhalb einer Schleife verwendet werden" -#: pl_gram.y:1727 pl_gram.y:1764 pl_gram.y:1812 pl_gram.y:2863 pl_gram.y:2945 -#: pl_gram.y:3056 pl_gram.y:3812 +#: pl_gram.y:1741 pl_gram.y:1778 pl_gram.y:1826 pl_gram.y:2958 pl_gram.y:3041 +#: pl_gram.y:3152 pl_gram.y:3907 msgid "unexpected end of function definition" msgstr "unerwartetes Ende der Funktionsdefinition" -#: pl_gram.y:1832 pl_gram.y:1856 pl_gram.y:1872 pl_gram.y:1878 pl_gram.y:1992 -#: pl_gram.y:2000 pl_gram.y:2014 pl_gram.y:2109 pl_gram.y:2290 pl_gram.y:2384 -#: pl_gram.y:2535 pl_gram.y:3653 pl_gram.y:3714 pl_gram.y:3793 +#: pl_gram.y:1846 pl_gram.y:1870 pl_gram.y:1886 pl_gram.y:1892 pl_gram.y:2009 +#: pl_gram.y:2017 pl_gram.y:2031 pl_gram.y:2125 pl_gram.y:2360 pl_gram.y:2454 +#: pl_gram.y:2612 pl_gram.y:3749 pl_gram.y:3810 pl_gram.y:3888 msgid "syntax error" msgstr "Syntaxfehler" -#: pl_gram.y:1860 pl_gram.y:1862 pl_gram.y:2294 pl_gram.y:2296 +#: pl_gram.y:1874 pl_gram.y:1876 pl_gram.y:2364 pl_gram.y:2366 msgid "invalid SQLSTATE code" msgstr "ungültiger SQLSTATE-Code" -#: pl_gram.y:2056 +#: pl_gram.y:2073 msgid "syntax error, expected \"FOR\"" msgstr "Syntaxfehler, »FOR« erwartet" -#: pl_gram.y:2118 +#: pl_gram.y:2134 #, c-format msgid "FETCH statement cannot return multiple rows" msgstr "FETCH-Anweisung kann nicht mehrere Zeilen zurückgeben" -#: pl_gram.y:2174 +#: pl_gram.y:2244 #, c-format msgid "cursor variable must be a simple variable" msgstr "Cursor-Variable muss eine einfache Variable sein" -#: pl_gram.y:2180 +#: pl_gram.y:2250 #, c-format msgid "variable \"%s\" must be of type cursor or refcursor" msgstr "Variable »%s« muss Typ cursor oder refcursor haben" -#: pl_gram.y:2506 pl_gram.y:2517 +#: pl_gram.y:2583 pl_gram.y:2594 #, c-format msgid "\"%s\" is not a known variable" msgstr "»%s« ist keine bekannte Variable" -#: pl_gram.y:2621 pl_gram.y:2631 pl_gram.y:2787 +#: pl_gram.y:2698 pl_gram.y:2708 pl_gram.y:2863 msgid "mismatched parentheses" msgstr "Klammern passen nicht" -#: pl_gram.y:2635 +#: pl_gram.y:2712 #, c-format msgid "missing \"%s\" at end of SQL expression" msgstr "»%s« fehlt am Ende des SQL-Ausdrucks" -#: pl_gram.y:2641 +#: pl_gram.y:2718 #, c-format msgid "missing \"%s\" at end of SQL statement" msgstr "»%s« fehlt am Ende der SQL-Anweisung" -#: pl_gram.y:2658 +#: pl_gram.y:2735 msgid "missing expression" msgstr "Ausdruck fehlt" -#: pl_gram.y:2660 +#: pl_gram.y:2737 msgid "missing SQL statement" msgstr "SQL-Anweisung fehlt" -#: pl_gram.y:2789 +#: pl_gram.y:2865 msgid "incomplete data type declaration" msgstr "unvollständige Datentypdeklaration" -#: pl_gram.y:2812 +#: pl_gram.y:2888 msgid "missing data type declaration" msgstr "fehlende Datentypdeklaration" -#: pl_gram.y:2868 +#: pl_gram.y:2966 msgid "INTO specified more than once" msgstr "INTO mehr als einmal angegeben" -#: pl_gram.y:3037 +#: pl_gram.y:3133 msgid "expected FROM or IN" msgstr "FROM oder IN erwartet" -#: pl_gram.y:3097 +#: pl_gram.y:3193 #, c-format msgid "RETURN cannot have a parameter in function returning set" msgstr "RETURN kann keinen Parameter haben in einer Funktion mit Mengenergebnis" -#: pl_gram.y:3098 +#: pl_gram.y:3194 #, c-format msgid "Use RETURN NEXT or RETURN QUERY." msgstr "Verwenden Sie RETURN NEXT oder RETURN QUERY." -#: pl_gram.y:3106 +#: pl_gram.y:3204 #, c-format -msgid "RETURN cannot have a parameter in function with OUT parameters" -msgstr "RETURN kann keinen Parameter haben in einer Funktion mit OUT-Parametern" +msgid "RETURN cannot have a parameter in a procedure" +msgstr "RETURN kann keinen Parameter haben in einer Prozedur" -#: pl_gram.y:3115 +#: pl_gram.y:3209 #, c-format msgid "RETURN cannot have a parameter in function returning void" msgstr "RETURN kann keinen Parameter haben in einer Funktion, die »void« zurückgibt" -#: pl_gram.y:3175 +#: pl_gram.y:3218 +#, c-format +msgid "RETURN cannot have a parameter in function with OUT parameters" +msgstr "RETURN kann keinen Parameter haben in einer Funktion mit OUT-Parametern" + +#: pl_gram.y:3280 #, c-format msgid "RETURN NEXT cannot have a parameter in function with OUT parameters" msgstr "RETURN NEXT kann keinen Parameter haben in einer Funktion mit OUT-Parametern" -#: pl_gram.y:3279 +#: pl_gram.y:3387 #, c-format -msgid "\"%s\" is declared CONSTANT" -msgstr "»%s« wurde als CONSTANT deklariert" +msgid "variable \"%s\" is declared CONSTANT" +msgstr "Variable »%s« ist als CONSTANT deklariert" -#: pl_gram.y:3341 pl_gram.y:3353 +#: pl_gram.y:3450 #, c-format -msgid "record or row variable cannot be part of multiple-item INTO list" -msgstr "Record- oder Zeilenvariable kann nicht Teil einer INTO-Liste mit mehreren Elementen sein" +msgid "record variable cannot be part of multiple-item INTO list" +msgstr "Record-Variable kann nicht Teil einer INTO-Liste mit mehreren Elementen sein" -#: pl_gram.y:3398 +#: pl_gram.y:3496 #, c-format msgid "too many INTO variables specified" msgstr "zu viele INTO-Variablen angegeben" -#: pl_gram.y:3606 +#: pl_gram.y:3702 #, c-format msgid "end label \"%s\" specified for unlabelled block" msgstr "Endlabel »%s« für ungelabelten Block angegeben" -#: pl_gram.y:3613 +#: pl_gram.y:3709 #, c-format msgid "end label \"%s\" differs from block's label \"%s\"" msgstr "Endlabel »%s« unterscheidet sich vom Label des Blocks »%s«" -#: pl_gram.y:3648 +#: pl_gram.y:3744 #, c-format msgid "cursor \"%s\" has no arguments" msgstr "Cursor »%s« hat keine Argumente" -#: pl_gram.y:3662 +#: pl_gram.y:3758 #, c-format msgid "cursor \"%s\" has arguments" msgstr "Cursor »%s« hat Argumente" -#: pl_gram.y:3704 +#: pl_gram.y:3800 #, c-format msgid "cursor \"%s\" has no argument named \"%s\"" msgstr "Cursor »%s« hat kein Argument namens »%s«" -#: pl_gram.y:3724 +#: pl_gram.y:3820 #, c-format msgid "value for parameter \"%s\" of cursor \"%s\" specified more than once" msgstr "Wert für Parameter »%s« von Cursor »%s« mehrmals angegeben" -#: pl_gram.y:3749 +#: pl_gram.y:3845 #, c-format msgid "not enough arguments for cursor \"%s\"" msgstr "nicht genügend Argumente für Cursor »%s«" -#: pl_gram.y:3756 +#: pl_gram.y:3852 #, c-format msgid "too many arguments for cursor \"%s\"" msgstr "zu viele Argumente für Cursor »%s«" -#: pl_gram.y:3844 +#: pl_gram.y:3939 msgid "unrecognized RAISE statement option" msgstr "unbekannte Option für RAISE-Anweisung" -#: pl_gram.y:3848 +#: pl_gram.y:3943 msgid "syntax error, expected \"=\"" msgstr "Syntaxfehler, »=« erwartet" -#: pl_gram.y:3889 +#: pl_gram.y:3984 #, c-format msgid "too many parameters specified for RAISE" msgstr "zu viele Parameter für RAISE angegeben" -#: pl_gram.y:3893 +#: pl_gram.y:3988 #, c-format msgid "too few parameters specified for RAISE" msgstr "zu wenige Parameter für RAISE angegeben" -#: pl_handler.c:149 +#: pl_handler.c:154 msgid "Sets handling of conflicts between PL/pgSQL variable names and table column names." msgstr "Bestimmt die Verarbeitung von Konflikten zwischen PL/pgSQL-Variablennamen und Tabellenspaltennamen." -#: pl_handler.c:158 +#: pl_handler.c:163 msgid "Print information about parameters in the DETAIL part of the error messages generated on INTO ... STRICT failures." msgstr "Informationen über Parameter im DETAIL-Teil von Fehlermeldungen ausgeben, die durch Fehler in INTO ... STRICT erzeugt wurden." -#: pl_handler.c:166 +#: pl_handler.c:171 msgid "Perform checks given in ASSERT statements." msgstr "Prüfungen in ASSERT-Anweisungen ausführen." -#: pl_handler.c:174 +#: pl_handler.c:179 msgid "List of programming constructs that should produce a warning." msgstr "Zählt Programmierkonstrukte auf, die eine Warnung erzeugen sollen." -#: pl_handler.c:184 +#: pl_handler.c:189 msgid "List of programming constructs that should produce an error." msgstr "Zählt Programmierkonstrukte auf, die einen Fehler zeugen sollen." #. translator: %s is typically the translation of "syntax error" -#: pl_scanner.c:621 +#: pl_scanner.c:630 #, c-format msgid "%s at end of input" msgstr "%s am Ende der Eingabe" #. translator: first %s is typically the translation of "syntax error" -#: pl_scanner.c:637 +#: pl_scanner.c:646 #, c-format msgid "%s at or near \"%s\"" msgstr "%s bei »%s«" diff --git a/src/pl/plpgsql/src/po/it.po b/src/pl/plpgsql/src/po/it.po index 9d47ac9cfe..c94db2b000 100644 --- a/src/pl/plpgsql/src/po/it.po +++ b/src/pl/plpgsql/src/po/it.po @@ -1,28 +1,26 @@ # -# Translation of plpgsql to Italian -# PostgreSQL Project +# plpgsql.po +# Italian message translation file for plpgsql # -# Associazione Culturale ITPUG - Italian PostgreSQL Users Group -# http://www.itpug.org/ - info@itpug.org +# For development and bug report please use: +# https://github.com/dvarrazzo/postgresql-it # -# Traduttori: -# * Diego Cinelli -# * Daniele Varrazzo +# Daniele Varrazzo , 2012-2017. +# Diego Cinelli # -# Copyright (c) 2010, Associazione Culturale ITPUG -# Distributed under the same license of the PostgreSQL project +# This file is distributed under the same license as the PostgreSQL package. # msgid "" msgstr "" -"Project-Id-Version: plpgsql (PostgreSQL) 9.6\n" +"Project-Id-Version: plpgsql (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" "POT-Creation-Date: 2016-04-17 00:07+0000\n" "PO-Revision-Date: 2016-04-17 20:58+0100\n" "Last-Translator: Daniele Varrazzo \n" -"Language-Team: Gruppo traduzioni ITPUG \n" +"Language-Team: https://github.com/dvarrazzo/postgresql-it\n" "Language: it\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" diff --git a/src/pl/plpgsql/src/po/ja.po b/src/pl/plpgsql/src/po/ja.po index efb8aa9a53..938565d405 100644 --- a/src/pl/plpgsql/src/po/ja.po +++ b/src/pl/plpgsql/src/po/ja.po @@ -1,798 +1,857 @@ +# LANGUAGE message translation file for plpgsql +# Copyright (C) 2018 PostgreSQL Global Development Group +# This file is distributed under the same license as the plpgsql (PostgreSQL) package. +# HOTTA Michihde , 2013. +# msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 9.0 beta 3\n" +"Project-Id-Version: plpgsql (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2013-08-18 13:02+0900\n" -"PO-Revision-Date: 2013-08-18 13:04+0900\n" -"Last-Translator: HOTTA Michihde \n" +"POT-Creation-Date: 2018-01-26 13:56+0900\n" +"PO-Revision-Date: 2018-02-13 09:35+0900\n" +"Last-Translator: Michihide Hotta \n" "Language-Team: Japan PostgreSQL Users Group \n" "Language: ja\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Poedit-Language: Japanese\n" -"X-Poedit-Country: JAPAN\n" -"Plural-Forms: nplurals=2; plural=n != 1;\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 2.0.6\n" -#: pl_comp.c:432 pl_handler.c:276 +#: pl_comp.c:433 pl_handler.c:451 #, c-format msgid "PL/pgSQL functions cannot accept type %s" msgstr "PL/pgSQL 関数では %s 型は指定できません" -#: pl_comp.c:513 +#: pl_comp.c:514 #, c-format msgid "could not determine actual return type for polymorphic function \"%s\"" -msgstr "関数 \"%s\" が多様な形を持つため、実際の戻り値の型を特定できませんでした" +msgstr "" +"関数 \"%s\" が多様な形を持つため、実際の戻り値の型を特定できませんでした" -#: pl_comp.c:543 +#: pl_comp.c:544 #, c-format msgid "trigger functions can only be called as triggers" msgstr "トリガー関数はトリガーとしてのみコールできます" -#: pl_comp.c:547 pl_handler.c:261 +#: pl_comp.c:548 pl_handler.c:436 #, c-format msgid "PL/pgSQL functions cannot return type %s" msgstr "PL/pgSQL 関数は %s 型を返せません" -#: pl_comp.c:590 +#: pl_comp.c:589 #, c-format msgid "trigger functions cannot have declared arguments" msgstr "トリガー関数には引数を宣言できません" -#: pl_comp.c:591 +#: pl_comp.c:590 #, c-format -msgid "The arguments of the trigger can be accessed through TG_NARGS and TG_ARGV instead." -msgstr "その代わり、トリガーの引数には TG_NARGS と TG_ARGV を通してのみアクセスできます" +msgid "" +"The arguments of the trigger can be accessed through TG_NARGS and TG_ARGV " +"instead." +msgstr "" +"その代わり、トリガーの引数には TG_NARGS と TG_ARGV を通してのみアクセスでき" +"ます" -#: pl_comp.c:693 +#: pl_comp.c:692 #, c-format -#| msgid "trigger functions cannot have declared arguments" msgid "event trigger functions cannot have declared arguments" -msgstr "イベントトリガー関数には引数を宣言できません" +msgstr "イベントトリガー関数では引数を宣言できません" -#: pl_comp.c:950 +#: pl_comp.c:943 #, c-format msgid "compilation of PL/pgSQL function \"%s\" near line %d" msgstr "PL/pgSQL 関数 \"%s\" の %d 行目付近でのコンパイル" -#: pl_comp.c:973 +#: pl_comp.c:966 #, c-format msgid "parameter name \"%s\" used more than once" msgstr "パラメータ \"%s\" が複数指定されました" -#: pl_comp.c:1083 +#: pl_comp.c:1076 #, c-format msgid "column reference \"%s\" is ambiguous" -msgstr "列参照\"%s\"は曖昧です" +msgstr "列参照 \"%s\" が一意に特定できません" -#: pl_comp.c:1085 +#: pl_comp.c:1078 #, c-format msgid "It could refer to either a PL/pgSQL variable or a table column." -msgstr "PL/pgSQL変数もしくはテーブルのカラム名いずれかを参照していた可能性があります" +msgstr "" +"PL/pgSQL 変数もしくはテーブルのカラム名のどちらかを参照していた可能性があり" +"ます。" -#: pl_comp.c:1265 pl_comp.c:1293 pl_exec.c:4107 pl_exec.c:4462 pl_exec.c:4547 -#: pl_exec.c:4638 +#: pl_comp.c:1258 pl_comp.c:1286 pl_exec.c:4584 pl_exec.c:4913 pl_exec.c:4998 +#: pl_exec.c:5089 #, c-format msgid "record \"%s\" has no field \"%s\"" -msgstr "レコード \"%s\" には列 \"%s\" はありません" +msgstr "レコード \"%s\" には項目 \"%s\" はありません" -#: pl_comp.c:1824 +#: pl_comp.c:1818 #, c-format msgid "relation \"%s\" does not exist" msgstr "リレーション \"%s\" がありません" -#: pl_comp.c:1933 +#: pl_comp.c:1927 #, c-format msgid "variable \"%s\" has pseudo-type %s" msgstr "変数 \"%s\" の型は擬似タイプ %s です" -#: pl_comp.c:1999 +#: pl_comp.c:1995 #, c-format msgid "relation \"%s\" is not a table" msgstr "リレーション \"%s\" はテーブルではありません" -#: pl_comp.c:2159 +#: pl_comp.c:2155 #, c-format msgid "type \"%s\" is only a shell" -msgstr "型 \"%s\" はシェルでのみ使えます" +msgstr "型 \"%s\" はシェルでのみ使えます" -#: pl_comp.c:2233 pl_comp.c:2286 +#: pl_comp.c:2249 pl_comp.c:2302 #, c-format msgid "unrecognized exception condition \"%s\"" msgstr "例外条件 \"%s\" が認識できません" -#: pl_comp.c:2444 +#: pl_comp.c:2510 #, c-format -msgid "could not determine actual argument type for polymorphic function \"%s\"" -msgstr "関数 \"%s\" が多様な形を持つため、実際の引数の型を特定できませんでした" +msgid "" +"could not determine actual argument type for polymorphic function \"%s\"" +msgstr "" +"関数 \"%s\" が多様な形を持つため、実際の引数の型を特定できませんでした" -#: pl_exec.c:254 pl_exec.c:514 pl_exec.c:793 +#: pl_exec.c:355 pl_exec.c:644 pl_exec.c:914 msgid "during initialization of execution state" -msgstr "実行状態の初期化中に" +msgstr "実行状態の初期化の際" -#: pl_exec.c:261 +#: pl_exec.c:362 msgid "while storing call arguments into local variables" -msgstr "引数をローカル変数に格納する際に" +msgstr "引数をローカル変数に格納する際" -#: pl_exec.c:303 pl_exec.c:671 +#: pl_exec.c:447 pl_exec.c:796 msgid "during function entry" -msgstr "関数登録の際に" +msgstr "関数に入る際" -#: pl_exec.c:334 pl_exec.c:702 pl_exec.c:834 -#, c-format -msgid "CONTINUE cannot be used outside a loop" -msgstr "CONTINUE はループの外では使えません" - -#: pl_exec.c:338 +#: pl_exec.c:472 #, c-format msgid "control reached end of function without RETURN" msgstr "RETURN が現れる前に、制御が関数の終わりに達しました" -#: pl_exec.c:345 +#: pl_exec.c:479 msgid "while casting return value to function's return type" msgstr "戻り値を関数の戻り値の型へキャストする際に" -#: pl_exec.c:358 pl_exec.c:2820 +#: pl_exec.c:492 pl_exec.c:3101 #, c-format msgid "set-valued function called in context that cannot accept a set" -msgstr "値のセットを受け付けないような文脈で、セット値を返す関数が呼ばれました" +msgstr "" +"値の集合を受け付けないようなコンテキストで、集合値を返す関数が呼ばれました" -#: pl_exec.c:396 pl_exec.c:2663 +#: pl_exec.c:530 pl_exec.c:2948 msgid "returned record type does not match expected record type" -msgstr "戻りレコードの型が期待するレコードの型と一致しません" +msgstr "返されたレコードの型が期待するレコードの型と一致しません" -#: pl_exec.c:456 pl_exec.c:710 pl_exec.c:842 +#: pl_exec.c:585 pl_exec.c:825 pl_exec.c:949 msgid "during function exit" -msgstr "関数を抜ける際に" +msgstr "関数を抜ける際" -#: pl_exec.c:706 pl_exec.c:838 +#: pl_exec.c:821 pl_exec.c:945 #, c-format msgid "control reached end of trigger procedure without RETURN" -msgstr "RETURN が現れる前に、制御がトリガー手続きの終わりに達しました" +msgstr "RETURN が現れる前に、制御がトリガープロシージャの終わりに達しました" -#: pl_exec.c:715 +#: pl_exec.c:830 #, c-format msgid "trigger procedure cannot return a set" -msgstr "トリガー手続きはセットを返すことができません" +msgstr "トリガー手続きは集合値を返すことができません" -#: pl_exec.c:737 -msgid "returned row structure does not match the structure of the triggering table" +#: pl_exec.c:852 +msgid "" +"returned row structure does not match the structure of the triggering table" msgstr "返された行の構造が、トリガーしているテーブルの構造とマッチしません" -#: pl_exec.c:893 +#. translator: last %s is a phrase such as "during statement block +#. local variable initialization" +#. +#: pl_exec.c:997 #, c-format msgid "PL/pgSQL function %s line %d %s" -msgstr "PL/pgSQL関数%sの%d行目で%s" +msgstr "PL/pgSQL 関数 %s の%d行目 %s" -#: pl_exec.c:904 +#. translator: last %s is a phrase such as "while storing call +#. arguments into local variables" +#. +#: pl_exec.c:1008 #, c-format msgid "PL/pgSQL function %s %s" -msgstr "PL/pgSQL関数%sで%s" +msgstr "PL/pgSQL 関数 %s - %s" #. translator: last %s is a plpgsql statement type name -#: pl_exec.c:912 +#: pl_exec.c:1016 #, c-format msgid "PL/pgSQL function %s line %d at %s" -msgstr "PL/pgSQL関数%sの%d行目の型%s" +msgstr "PL/pgSQL 関数 %s の%d行目 - %s" -#: pl_exec.c:918 +#: pl_exec.c:1022 #, c-format msgid "PL/pgSQL function %s" -msgstr "PL/pgSQL関数%s" +msgstr "PL/pgSQL 関数 %s" -#: pl_exec.c:1027 +#: pl_exec.c:1187 msgid "during statement block local variable initialization" -msgstr "ステートメントブロックでローカル変数を初期化する際に" +msgstr "ステートメントブロックでローカル変数を初期化中" -#: pl_exec.c:1069 +#: pl_exec.c:1226 #, c-format msgid "variable \"%s\" declared NOT NULL cannot default to NULL" -msgstr "変数 \"%s\" は NOT NULL として宣言されているため、初期値を NULL にすることはできません" +msgstr "" +"変数 \"%s\" は NOT NULL として宣言されているため、デフォルト値を NULL にする" +"ことはできません" -#: pl_exec.c:1119 +#: pl_exec.c:1277 msgid "during statement block entry" -msgstr "ステートメントブロックを登録する際に" +msgstr "ステートメントブロックに入る際に" -#: pl_exec.c:1140 +#: pl_exec.c:1309 msgid "during statement block exit" msgstr "ステートメントブロックを抜ける際に" -#: pl_exec.c:1183 +#: pl_exec.c:1351 msgid "during exception cleanup" msgstr "例外をクリーンアップする際に" -#: pl_exec.c:1536 +#: pl_exec.c:1717 #, c-format msgid "GET STACKED DIAGNOSTICS cannot be used outside an exception handler" -msgstr "GET STACKED DIAGNOSTICSは例外ハンドラの外では使えません" +msgstr "GET STACKED DIAGNOSTICS は例外ハンドラの外では使えません" -#: pl_exec.c:1737 +#: pl_exec.c:1922 #, c-format msgid "case not found" msgstr "case が見つかりません" -#: pl_exec.c:1738 +#: pl_exec.c:1923 #, c-format msgid "CASE statement is missing ELSE part." msgstr "CASE ステートメントに ELSE 部分がありません" -#: pl_exec.c:1890 +#: pl_exec.c:2077 #, c-format msgid "lower bound of FOR loop cannot be null" msgstr "FOR ループの下限を NULL にすることはできません" -#: pl_exec.c:1905 +#: pl_exec.c:2093 #, c-format msgid "upper bound of FOR loop cannot be null" msgstr "FOR ループの上限を NULL にすることはできません" -#: pl_exec.c:1922 +#: pl_exec.c:2111 #, c-format msgid "BY value of FOR loop cannot be null" msgstr "FOR ループにおける BY の値を NULL にすることはできません" -#: pl_exec.c:1928 +#: pl_exec.c:2117 #, c-format msgid "BY value of FOR loop must be greater than zero" msgstr "FOR ループにおける BY の値はゼロより大きくなければなりません" -#: pl_exec.c:2098 pl_exec.c:3658 +#: pl_exec.c:2294 pl_exec.c:4085 #, c-format msgid "cursor \"%s\" already in use" msgstr "カーソル \"%s\" はすでに使われています" -#: pl_exec.c:2121 pl_exec.c:3720 +#: pl_exec.c:2317 pl_exec.c:4150 #, c-format msgid "arguments given for cursor without arguments" msgstr "引数なしのカーソルに引数が与えられました" -#: pl_exec.c:2140 pl_exec.c:3739 +#: pl_exec.c:2336 pl_exec.c:4169 #, c-format msgid "arguments required for cursor" msgstr "カーソルには引数が必要です" -#: pl_exec.c:2227 +#: pl_exec.c:2423 #, c-format msgid "FOREACH expression must not be null" -msgstr "FOREACH式はNULLではいけません" +msgstr "FOREACH 式は NULL であってはなりません" -#: pl_exec.c:2233 +#: pl_exec.c:2438 #, c-format msgid "FOREACH expression must yield an array, not type %s" -msgstr "FOREACH式は%s型ではなく配列を生成しなければなりません" +msgstr "FOREACH 式は %s 型ではなく配列を生成しなければなりません" -#: pl_exec.c:2250 +#: pl_exec.c:2455 #, c-format msgid "slice dimension (%d) is out of the valid range 0..%d" -msgstr "範囲次元%dは有効範囲0から%dまでの間にありません" +msgstr "配列の要素数 (%d) が有効範囲0から%dまでの間にありません" -#: pl_exec.c:2277 +#: pl_exec.c:2482 #, c-format msgid "FOREACH ... SLICE loop variable must be of an array type" -msgstr "FOREACH ... SLICEループ変数は配列型でなければなりません" +msgstr "FOREACH ... SLICE ループ変数は配列型でなければなりません" -#: pl_exec.c:2281 +#: pl_exec.c:2486 #, c-format msgid "FOREACH loop variable must not be of an array type" -msgstr "FOREACHループ変数は配列型ではいけません" +msgstr "FOREACH ループ変数は配列型であってはなりません" -#: pl_exec.c:2502 pl_exec.c:2655 +#: pl_exec.c:2689 pl_exec.c:2771 pl_exec.c:2941 #, c-format -#| msgid "while casting return value to function's return type" -msgid "cannot return non-composite value from function returning composite type" +msgid "" +"cannot return non-composite value from function returning composite type" msgstr "複合型を返す関数から複合型以外の値を返すことはできません" -#: pl_exec.c:2546 pl_gram.y:3020 +#: pl_exec.c:2815 pl_gram.y:3199 #, c-format msgid "cannot use RETURN NEXT in a non-SETOF function" msgstr "SETOF でない関数では RETURN NEXT は使えません" -#: pl_exec.c:2574 pl_exec.c:2697 +#: pl_exec.c:2849 pl_exec.c:2976 #, c-format msgid "wrong result type supplied in RETURN NEXT" -msgstr "RETURN NEXT において誤った戻り値の型が指定されています" +msgstr "RETURN NEXT で指定されている結果の型が誤っています" -#: pl_exec.c:2597 pl_exec.c:4094 pl_exec.c:4420 pl_exec.c:4455 pl_exec.c:4521 -#: pl_exec.c:4540 pl_exec.c:4608 pl_exec.c:4631 +#: pl_exec.c:2878 pl_exec.c:4572 pl_exec.c:4880 pl_exec.c:4906 pl_exec.c:4972 +#: pl_exec.c:4991 pl_exec.c:5059 pl_exec.c:5082 #, c-format msgid "record \"%s\" is not assigned yet" -msgstr "レコード \"%s\" には、まだ値が代入されていません" +msgstr "レコード \"%s\" にはまだ値が代入されていません" -#: pl_exec.c:2599 pl_exec.c:4096 pl_exec.c:4422 pl_exec.c:4457 pl_exec.c:4523 -#: pl_exec.c:4542 pl_exec.c:4610 pl_exec.c:4633 +#: pl_exec.c:2880 pl_exec.c:4574 pl_exec.c:4882 pl_exec.c:4908 pl_exec.c:4974 +#: pl_exec.c:4993 pl_exec.c:5061 pl_exec.c:5084 #, c-format msgid "The tuple structure of a not-yet-assigned record is indeterminate." msgstr "まだ代入されていないレコードのタプル構造は不定です" -#: pl_exec.c:2603 pl_exec.c:2623 +#: pl_exec.c:2887 pl_exec.c:2906 #, c-format msgid "wrong record type supplied in RETURN NEXT" -msgstr "RETURN NEXT において、誤ったレコード型が指定されています" +msgstr "RETURN NEXT で指定されているレコードの型が誤っています" -#: pl_exec.c:2715 +#: pl_exec.c:2995 #, c-format msgid "RETURN NEXT must have a parameter" msgstr "RETURN NEXT にはパラメーターが必要です" -#: pl_exec.c:2748 pl_gram.y:3078 +#: pl_exec.c:3021 pl_gram.y:3261 #, c-format msgid "cannot use RETURN QUERY in a non-SETOF function" msgstr "SETOF でない関数では RETURN QUERY は使えません" -#: pl_exec.c:2768 +#: pl_exec.c:3045 msgid "structure of query does not match function result type" -msgstr "クエリーの構造が関数の戻り値の型と一致しません" +msgstr "問い合わせの構造が関数の結果の型と一致しません" -#: pl_exec.c:2848 pl_exec.c:2980 +#: pl_exec.c:3129 pl_exec.c:3267 #, c-format msgid "RAISE option already specified: %s" msgstr "RAISE オプションは既に指定されています: %s" -#: pl_exec.c:2881 +#: pl_exec.c:3163 #, c-format msgid "RAISE without parameters cannot be used outside an exception handler" msgstr "引数の無い RAISE は、例外ハンドラの外では使えません" -#: pl_exec.c:2922 -#, c-format -msgid "too few parameters specified for RAISE" -msgstr "RAISE に指定されたパラメーターの数が足りません" - -#: pl_exec.c:2950 -#, c-format -msgid "too many parameters specified for RAISE" -msgstr "RAISE に指定されたパラメーターの数が多すぎます" - -#: pl_exec.c:2970 +#: pl_exec.c:3257 #, c-format msgid "RAISE statement option cannot be null" msgstr "RAISE ステートメントのオプションには NULL は指定できません" -#: pl_exec.c:3041 +#: pl_exec.c:3327 #, c-format msgid "%s" msgstr "%s" -#: pl_exec.c:3211 pl_exec.c:3348 pl_exec.c:3521 +#: pl_exec.c:3382 +#, c-format +msgid "assertion failed" +msgstr "アサーションに失敗" + +#: pl_exec.c:3583 pl_exec.c:3729 pl_exec.c:3919 #, c-format msgid "cannot COPY to/from client in PL/pgSQL" -msgstr "PL/pgSQL 内では COPY to/from は使えません" +msgstr "PL/pgSQL 内では COPY to/from クライアントは使えません" -#: pl_exec.c:3215 pl_exec.c:3352 pl_exec.c:3525 +#: pl_exec.c:3587 pl_exec.c:3733 pl_exec.c:3923 #, c-format msgid "cannot begin/end transactions in PL/pgSQL" -msgstr "PL/pgSQL 内ではトランザクションを開始/終了できません" +msgstr "PL/pgSQL 内ではトランザクションの開始/終了はできません" -#: pl_exec.c:3216 pl_exec.c:3353 pl_exec.c:3526 +#: pl_exec.c:3588 pl_exec.c:3734 pl_exec.c:3924 #, c-format msgid "Use a BEGIN block with an EXCEPTION clause instead." msgstr "代わりに EXCEPTION 句を伴う BEGIN ブロックを使用してください" -#: pl_exec.c:3376 pl_exec.c:3550 +#: pl_exec.c:3757 pl_exec.c:3948 #, c-format msgid "INTO used with a command that cannot return data" -msgstr "データを返せない命令で INTO が使われました" +msgstr "データを返せないコマンドで INTO が使われました" -#: pl_exec.c:3396 pl_exec.c:3570 +#: pl_exec.c:3785 pl_exec.c:3976 #, c-format msgid "query returned no rows" -msgstr "クエリーは行を返しませんでした" +msgstr "問い合わせは行を返しませんでした" -#: pl_exec.c:3405 pl_exec.c:3579 +#: pl_exec.c:3804 pl_exec.c:3995 #, c-format msgid "query returned more than one row" -msgstr "クエリーが複数の行を返しました" +msgstr "問い合わせが複数の行を返しました" -#: pl_exec.c:3420 +#: pl_exec.c:3821 #, c-format msgid "query has no destination for result data" -msgstr "クエリーに結果データの返却先が指定されていません" +msgstr "問い合わせに結果データの返却先が指定されていません" -#: pl_exec.c:3421 +#: pl_exec.c:3822 #, c-format msgid "If you want to discard the results of a SELECT, use PERFORM instead." -msgstr "SELECT の結果を破棄したい場合は、代わりに PERFORM を使ってください" +msgstr "SELECT の結果を破棄したい場合、代わりに PERFORM を使ってください" -#: pl_exec.c:3454 pl_exec.c:6414 +#: pl_exec.c:3855 pl_exec.c:7317 #, c-format msgid "query string argument of EXECUTE is null" -msgstr "EXECUTE のクエリー文字列の引数が NULL です" +msgstr "EXECUTE の問い合わせ文字列の引数が NULL です" -#: pl_exec.c:3513 +#: pl_exec.c:3911 #, c-format msgid "EXECUTE of SELECT ... INTO is not implemented" msgstr "SELECT ... INTO の EXECUTE は実装されていません" -#: pl_exec.c:3514 +#: pl_exec.c:3912 #, c-format -msgid "You might want to use EXECUTE ... INTO or EXECUTE CREATE TABLE ... AS instead." -msgstr "代わりにEXECUTE ... INTOまたはEXECUTE CREATE TABLE ... ASを使用する方がよいかもしれません。" +msgid "" +"You might want to use EXECUTE ... INTO or EXECUTE CREATE TABLE ... AS " +"instead." +msgstr "" +"代わりに EXECUTE ... INTO または EXECUTE CREATE TABLE ... AS が使えます。" -#: pl_exec.c:3802 pl_exec.c:3894 +#: pl_exec.c:4233 pl_exec.c:4329 #, c-format msgid "cursor variable \"%s\" is null" msgstr "カーソル変数 \"%s\" が NULL です" -#: pl_exec.c:3809 pl_exec.c:3901 +#: pl_exec.c:4244 pl_exec.c:4340 #, c-format msgid "cursor \"%s\" does not exist" msgstr "カーソル \"%s\" は存在しません" -#: pl_exec.c:3823 +#: pl_exec.c:4257 #, c-format msgid "relative or absolute cursor position is null" msgstr "相対もしくは絶対カーソル位置が NULL です" -#: pl_exec.c:3990 +#: pl_exec.c:4448 #, c-format msgid "null value cannot be assigned to variable \"%s\" declared NOT NULL" msgstr "NOT NULL として宣言された変数 \"%s\" には NULL を代入できません" -#: pl_exec.c:4037 +#: pl_exec.c:4517 #, c-format msgid "cannot assign non-composite value to a row variable" -msgstr "複合値でない値を行変数に代入できません" +msgstr "複合型でない値を行変数に代入できません" -#: pl_exec.c:4061 +#: pl_exec.c:4541 #, c-format msgid "cannot assign non-composite value to a record variable" -msgstr "複合値でない値をレコード変数に代入できません" +msgstr "複合型でない値をレコード変数に代入できません" -#: pl_exec.c:4206 +#: pl_exec.c:4661 #, c-format msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" -msgstr "配列の次元数(%d)が指定可能な最大値(%d)を超えています" +msgstr "配列の次元数(%d)が制限値(%d)を超えています" -#: pl_exec.c:4238 +#: pl_exec.c:4693 #, c-format msgid "subscripted object is not an array" msgstr "添字つきオブジェクトは配列ではありません" -#: pl_exec.c:4275 +#: pl_exec.c:4731 #, c-format msgid "array subscript in assignment must not be null" msgstr "代入における配列の添字が NULL であってはなりません" -#: pl_exec.c:4744 +#: pl_exec.c:5198 #, c-format msgid "query \"%s\" did not return data" -msgstr "クエリー \"%s\" がデータを返しませんでした" +msgstr "問い合わせ \"%s\" がデータを返しませんでした" -#: pl_exec.c:4752 +#: pl_exec.c:5206 #, c-format msgid "query \"%s\" returned %d column" msgid_plural "query \"%s\" returned %d columns" -msgstr[0] "クエリー \"%s\" が %d 個の列を返しました" -msgstr[1] "クエリー \"%s\" が %d 個の列を返しました" +msgstr[0] "問い合わせ \"%s\" が %d 個の列を返しました" -#: pl_exec.c:4778 +#: pl_exec.c:5233 #, c-format msgid "query \"%s\" returned more than one row" -msgstr "クエリー \"%s\" が複数の行を返しました" +msgstr "問い合わせ \"%s\" が複数の行を返しました" -#: pl_exec.c:4835 +#: pl_exec.c:5301 #, c-format msgid "query \"%s\" is not a SELECT" -msgstr "クエリー \"%s\" が SELECT ではありません" +msgstr "問い合わせ \"%s\" が SELECT ではありません" -#: pl_funcs.c:218 +#: pl_funcs.c:239 msgid "statement block" msgstr "ステートメントブロック" -#: pl_funcs.c:220 +#: pl_funcs.c:241 msgid "assignment" msgstr "代入" -#: pl_funcs.c:230 +#: pl_funcs.c:251 msgid "FOR with integer loop variable" -msgstr "整数のループ変数を伴う FOR" +msgstr "整数のループ変数を使った FOR" -#: pl_funcs.c:232 +#: pl_funcs.c:253 msgid "FOR over SELECT rows" -msgstr "SELECT 行を制御する FOR" +msgstr "SELECT 行を使った FOR" -#: pl_funcs.c:234 +#: pl_funcs.c:255 msgid "FOR over cursor" -msgstr "カーソルを制御する FOR" +msgstr "カーソルを使った FOR" -#: pl_funcs.c:236 +#: pl_funcs.c:257 msgid "FOREACH over array" -msgstr "配列全体に対するFOREACH" +msgstr "配列を巡回する FOREACH" -#: pl_funcs.c:248 +#: pl_funcs.c:271 msgid "SQL statement" msgstr "SQL ステートメント" -#: pl_funcs.c:250 -msgid "EXECUTE statement" -msgstr "EXECUTE ステートメント" - -#: pl_funcs.c:252 +#: pl_funcs.c:275 msgid "FOR over EXECUTE statement" -msgstr "EXECUTE ステートメントを制御する FOR" +msgstr "EXECUTE ステートメントを使った FOR" -#: pl_gram.y:450 +#: pl_gram.y:478 #, c-format msgid "block label must be placed before DECLARE, not after" msgstr "ブロックラベルは DECLARE の後ではなく前に置かなければなりません" -#: pl_gram.y:470 +#: pl_gram.y:498 #, c-format msgid "collations are not supported by type %s" -msgstr "%s型では照合順序はサポートされません" +msgstr "%s 型では照合順序はサポートされていません" -#: pl_gram.y:485 +#: pl_gram.y:513 #, c-format msgid "row or record variable cannot be CONSTANT" -msgstr "行またはレコード変数を CONSTANT にはできません" +msgstr "行またはレコード変数は CONSTANT にはできません" -#: pl_gram.y:495 +#: pl_gram.y:523 #, c-format msgid "row or record variable cannot be NOT NULL" msgstr "行またはレコード変数を NOT NULL にはできません" -#: pl_gram.y:506 +#: pl_gram.y:534 #, c-format msgid "default value for row or record variable is not supported" msgstr "行またはレコード変数のデフォルト値指定はサポートされていません" -#: pl_gram.y:651 pl_gram.y:666 pl_gram.y:692 +#: pl_gram.y:679 pl_gram.y:694 pl_gram.y:720 #, c-format msgid "variable \"%s\" does not exist" -msgstr "変数\"%s\"は存在しません" +msgstr "変数 \"%s\" は存在しません" -#: pl_gram.y:710 pl_gram.y:723 +#: pl_gram.y:738 pl_gram.y:766 msgid "duplicate declaration" msgstr "重複した宣言です。" -#: pl_gram.y:901 +#: pl_gram.y:749 pl_gram.y:777 +#, c-format +msgid "variable \"%s\" shadows a previously defined variable" +msgstr "変数 \"%s\" が事前に定義された変数を不可視にしています" + +#: pl_gram.y:956 #, c-format msgid "diagnostics item %s is not allowed in GET STACKED DIAGNOSTICS" -msgstr "GET STACKED DIAGNOSTICSでは診断項目%sは許されません" +msgstr "GET STACKED DIAGNOSTICS では診断項目 %s は許可されていません" -#: pl_gram.y:919 +#: pl_gram.y:974 #, c-format msgid "diagnostics item %s is not allowed in GET CURRENT DIAGNOSTICS" -msgstr "GET CURRENT DIAGNOSTICSでは診断項目%sは許されません" +msgstr "GET CURRENT DIAGNOSTICS では診断項目 %s は許可されていません" -#: pl_gram.y:1017 +#: pl_gram.y:1072 msgid "unrecognized GET DIAGNOSTICS item" msgstr "GET DIAGNOSTICS 項目が認識できません" -#: pl_gram.y:1028 pl_gram.y:3265 +#: pl_gram.y:1082 pl_gram.y:3448 #, c-format msgid "\"%s\" is not a scalar variable" msgstr "\"%s\" はスカラー変数ではありません" -#: pl_gram.y:1280 pl_gram.y:1474 +#: pl_gram.y:1334 pl_gram.y:1528 #, c-format -msgid "loop variable of loop over rows must be a record or row variable or list of scalar variables" -msgstr "行をまたがるループにおけるループ変数は、レコード、行変数、スカラー変数並びのいずれかでなければなりません" +msgid "" +"loop variable of loop over rows must be a record or row variable or list of " +"scalar variables" +msgstr "" +"行をまたがるループのループ変数は、レコード、行変数、スカラー変数並びのいずれ" +"かでなければなりません" -#: pl_gram.y:1314 +#: pl_gram.y:1368 #, c-format msgid "cursor FOR loop must have only one target variable" msgstr "カーソルを使った FOR ループには、ターゲット変数が1個だけ必要です" -#: pl_gram.y:1321 +#: pl_gram.y:1375 #, c-format msgid "cursor FOR loop must use a bound cursor variable" -msgstr "カーソルを使った FOR ループでは、それに関連付けられたカーソル変数を使用しなければなりません" +msgstr "" +"カーソルを使った FOR ループでは、それに関連付けられたカーソル変数を使用しな" +"ければなりません" -#: pl_gram.y:1405 +#: pl_gram.y:1459 #, c-format msgid "integer FOR loop must have only one target variable" msgstr "整数を使った FOR ループには、ターゲット変数が1個だけ必要です" -#: pl_gram.y:1441 +#: pl_gram.y:1495 #, c-format msgid "cannot specify REVERSE in query FOR loop" -msgstr "クエリーを使った FOR ループの中では REVERSE は指定できません" +msgstr "問い合わせを使った FOR ループの中では REVERSE は指定できません" -#: pl_gram.y:1588 +#: pl_gram.y:1642 #, c-format msgid "loop variable of FOREACH must be a known variable or list of variables" -msgstr "FOREACHのループ変数は既知の変数または変数のリストでなければなりません" +msgstr "" +"FOREACH のループ変数は、既知の変数または変数のリストでなければなりません" -#: pl_gram.y:1640 pl_gram.y:1677 pl_gram.y:1725 pl_gram.y:2721 pl_gram.y:2802 -#: pl_gram.y:2913 pl_gram.y:3666 +#: pl_gram.y:1683 +#, c-format +msgid "" +"there is no label \"%s\" attached to any block or loop enclosing this " +"statement" +msgstr "" +"このステートメントを囲むブロックやループに割り当てられた \"%s\" というラベル" +"はありません。" + +#: pl_gram.y:1691 +#, c-format +msgid "block label \"%s\" cannot be used in CONTINUE" +msgstr "ブロックラベル \"%s\" は CONTINUE の中では使えません。" + +#: pl_gram.y:1706 +#, c-format +msgid "EXIT cannot be used outside a loop, unless it has a label" +msgstr "ラベルのない EXIT は、ループの外では使えません" + +#: pl_gram.y:1707 +#, c-format +msgid "CONTINUE cannot be used outside a loop" +msgstr "CONTINUE はループの外では使えません" + +#: pl_gram.y:1731 pl_gram.y:1768 pl_gram.y:1816 pl_gram.y:2898 pl_gram.y:2983 +#: pl_gram.y:3094 pl_gram.y:3850 msgid "unexpected end of function definition" msgstr "予期しない関数定義の終端に達しました" -#: pl_gram.y:1745 pl_gram.y:1769 pl_gram.y:1785 pl_gram.y:1791 pl_gram.y:1880 -#: pl_gram.y:1888 pl_gram.y:1902 pl_gram.y:1997 pl_gram.y:2178 pl_gram.y:2261 -#: pl_gram.y:2394 pl_gram.y:3508 pl_gram.y:3569 pl_gram.y:3647 +#: pl_gram.y:1836 pl_gram.y:1860 pl_gram.y:1876 pl_gram.y:1882 pl_gram.y:2000 +#: pl_gram.y:2008 pl_gram.y:2022 pl_gram.y:2117 pl_gram.y:2304 pl_gram.y:2398 +#: pl_gram.y:2550 pl_gram.y:3691 pl_gram.y:3752 pl_gram.y:3831 msgid "syntax error" msgstr "構文エラー" -#: pl_gram.y:1773 pl_gram.y:1775 pl_gram.y:2182 pl_gram.y:2184 +#: pl_gram.y:1864 pl_gram.y:1866 pl_gram.y:2308 pl_gram.y:2310 msgid "invalid SQLSTATE code" msgstr "無効な SQLSTATE コードです" -#: pl_gram.y:1944 +#: pl_gram.y:2064 msgid "syntax error, expected \"FOR\"" -msgstr "構文エラー。\"FOR\" を期待していました" +msgstr "構文エラー。\"FOR\" が現れるべきでした。" -#: pl_gram.y:2006 +#: pl_gram.y:2126 #, c-format msgid "FETCH statement cannot return multiple rows" msgstr "FETCH ステートメントは複数行を返せません" -#: pl_gram.y:2062 +#: pl_gram.y:2188 #, c-format msgid "cursor variable must be a simple variable" msgstr "カーソル変数は単純変数でなければなりません" -#: pl_gram.y:2068 +#: pl_gram.y:2194 #, c-format msgid "variable \"%s\" must be of type cursor or refcursor" msgstr "変数 \"%s\" は cursor 型または refcursor 型でなければなりません" -#: pl_gram.y:2236 -msgid "label does not exist" -msgstr "ラベルが存在しません" - -#: pl_gram.y:2365 pl_gram.y:2376 +#: pl_gram.y:2521 pl_gram.y:2532 #, c-format msgid "\"%s\" is not a known variable" -msgstr "\"%s\"は既知の変数ではありません" +msgstr "\"%s\" は既知の変数ではありません" -#: pl_gram.y:2480 pl_gram.y:2490 pl_gram.y:2645 +#: pl_gram.y:2636 pl_gram.y:2646 pl_gram.y:2802 msgid "mismatched parentheses" -msgstr "カッコが対応していません" +msgstr "括弧が対応していません" -#: pl_gram.y:2494 +#: pl_gram.y:2650 #, c-format msgid "missing \"%s\" at end of SQL expression" -msgstr "SQL 表現式の終端に \"%s\" がありません" +msgstr "SQL 表現式の終わりに \"%s\" がありません" -#: pl_gram.y:2500 +#: pl_gram.y:2656 #, c-format msgid "missing \"%s\" at end of SQL statement" -msgstr "SQL ステートメントの終端に \"%s\" がありません" +msgstr "SQL ステートメントの終わりに \"%s\" がありません" -#: pl_gram.y:2517 +#: pl_gram.y:2673 msgid "missing expression" msgstr "表現式がありません" -#: pl_gram.y:2519 +#: pl_gram.y:2675 msgid "missing SQL statement" -msgstr "SQLステートメントがありません" +msgstr "SQL ステートメントがありません" -#: pl_gram.y:2647 +#: pl_gram.y:2804 msgid "incomplete data type declaration" msgstr "データ型の定義が不完全です" -#: pl_gram.y:2670 +#: pl_gram.y:2827 msgid "missing data type declaration" msgstr "データ型の定義がありません" -#: pl_gram.y:2726 +#: pl_gram.y:2906 msgid "INTO specified more than once" msgstr "INTO が複数回指定されています" -#: pl_gram.y:2894 +#: pl_gram.y:3075 msgid "expected FROM or IN" -msgstr "FROM もしくは IN を期待していました" +msgstr "FROM もしくは IN が来るべきでした" -#: pl_gram.y:2954 +#: pl_gram.y:3135 #, c-format msgid "RETURN cannot have a parameter in function returning set" -msgstr "値のセットを返す関数では、RETURNにパラメータを指定できません" +msgstr "集合を返す関数では、RETURN にパラメータを指定できません" -#: pl_gram.y:2955 +#: pl_gram.y:3136 #, c-format msgid "Use RETURN NEXT or RETURN QUERY." msgstr "RETURN NEXT もしくは RETURN QUERY を使用してください" -#: pl_gram.y:2963 +#: pl_gram.y:3144 #, c-format msgid "RETURN cannot have a parameter in function with OUT parameters" -msgstr "OUT パラメータのない関数では、RETURN にはパラメータを指定できません" +msgstr "OUT パラメータのない関数では、RETURN にパラメータを指定できません" -#: pl_gram.y:2972 +#: pl_gram.y:3153 #, c-format msgid "RETURN cannot have a parameter in function returning void" -msgstr "void を返す関数では、RETURN にはパラメータを指定できません" +msgstr "void を返す関数では、RETURN にパラメータを指定できません" -#: pl_gram.y:3034 +#: pl_gram.y:3213 #, c-format msgid "RETURN NEXT cannot have a parameter in function with OUT parameters" -msgstr "OUT パラメータのない関数では、RETURN NEXT にはパラメータを指定できません" +msgstr "" +"OUT パラメータ付きの関数では、RETURN NEXT にパラメータを指定できません" -#: pl_gram.y:3134 +#: pl_gram.y:3317 #, c-format msgid "\"%s\" is declared CONSTANT" -msgstr "\"%s\" は CONSTANT として宣言されています" +msgstr "\"%s\" は定義済み CONSTANT です" -#: pl_gram.y:3196 pl_gram.y:3208 +#: pl_gram.y:3379 pl_gram.y:3391 #, c-format msgid "record or row variable cannot be part of multiple-item INTO list" -msgstr "レコードもしくは行変数は、複数項目を持つ INTO リストの一部分としては指定できません" +msgstr "" +"レコードもしくは行変数は、複数項目を持つ INTO リストの一部としては指定できま" +"せん" -#: pl_gram.y:3253 +#: pl_gram.y:3436 #, c-format msgid "too many INTO variables specified" msgstr "INTO 変数の指定が多すぎます" -#: pl_gram.y:3461 +#: pl_gram.y:3644 #, c-format msgid "end label \"%s\" specified for unlabelled block" msgstr "ラベル無しブロックで終端ラベル \"%s\" が指定されました" -#: pl_gram.y:3468 +#: pl_gram.y:3651 #, c-format msgid "end label \"%s\" differs from block's label \"%s\"" msgstr "終端ラベル \"%s\" がブロックのラベル \"%s\" と異なります" -#: pl_gram.y:3503 +#: pl_gram.y:3686 #, c-format msgid "cursor \"%s\" has no arguments" msgstr "カーソル \"%s\" に引数がありません" -#: pl_gram.y:3517 +#: pl_gram.y:3700 #, c-format msgid "cursor \"%s\" has arguments" msgstr "カーソル \"%s\" に引数がついています" -#: pl_gram.y:3559 +#: pl_gram.y:3742 #, c-format msgid "cursor \"%s\" has no argument named \"%s\"" -msgstr "カーソル\"%s\"に\"%s\"という名前の引数がありません" +msgstr "カーソル \"%s\" に \"%s\" という名前の引数がありません" -#: pl_gram.y:3579 +#: pl_gram.y:3762 #, c-format msgid "value for parameter \"%s\" of cursor \"%s\" specified more than once" -msgstr "カーソル\"%2$s\"のパラメータ\"%1$s\"の値が複数指定されました" +msgstr "カーソル \"%2$s\" のパラメータ \"%1$s\" の値が複数個指定されました" -#: pl_gram.y:3604 +#: pl_gram.y:3787 #, c-format msgid "not enough arguments for cursor \"%s\"" -msgstr "カーソル\"%s\"の引数が不足しています" +msgstr "カーソル \"%s\" の引数が不足しています" -#: pl_gram.y:3611 +#: pl_gram.y:3794 #, c-format msgid "too many arguments for cursor \"%s\"" -msgstr "カーソル\"%s\"に対する引数が多すぎます" +msgstr "カーソル \"%s\" に対する引数が多すぎます" -#: pl_gram.y:3698 +#: pl_gram.y:3882 msgid "unrecognized RAISE statement option" msgstr "RAISE ステートメントのオプションを認識できません" -#: pl_gram.y:3702 +#: pl_gram.y:3886 msgid "syntax error, expected \"=\"" msgstr "構文エラー。\"=\" を期待していました" -#: pl_handler.c:61 -msgid "Sets handling of conflicts between PL/pgSQL variable names and table column names." -msgstr "PL/pgSQL変数名とテーブルのカラム名の間の衝突処理を設定してください" +#: pl_gram.y:3927 +#, c-format +msgid "too many parameters specified for RAISE" +msgstr "RAISE に指定されたパラメーターの数が多すぎます" + +#: pl_gram.y:3931 +#, c-format +msgid "too few parameters specified for RAISE" +msgstr "RAISE に指定されたパラメーターの数が足りません" + +#: pl_handler.c:154 +msgid "" +"Sets handling of conflicts between PL/pgSQL variable names and table column " +"names." +msgstr "PL/pgSQL 変数名とテーブルのカラム名の間の衝突時処理を設定します。" + +#: pl_handler.c:163 +msgid "" +"Print information about parameters in the DETAIL part of the error messages " +"generated on INTO ... STRICT failures." +msgstr "" +"INTO ... STRICT 失敗時に生成されたエラーメッセージの DETAIL 部分のパラメー" +"ター情報を表示します。" + +#: pl_handler.c:171 +msgid "Perform checks given in ASSERT statements." +msgstr "ASSERT ステートメントで指定されたチェックを実行します。" + +#: pl_handler.c:179 +msgid "List of programming constructs that should produce a warning." +msgstr "生成されたプログラムの中で、警告を発生すべき部分の一覧です。" + +#: pl_handler.c:189 +msgid "List of programming constructs that should produce an error." +msgstr "生成されたプログラムの中で、エラーを発生すべき部分の一覧です。" #. translator: %s is typically the translation of "syntax error" -#: pl_scanner.c:553 +#: pl_scanner.c:624 #, c-format msgid "%s at end of input" msgstr "入力の最後で %s" #. translator: first %s is typically the translation of "syntax error" -#: pl_scanner.c:569 +#: pl_scanner.c:640 #, c-format msgid "%s at or near \"%s\"" msgstr "\"%2$s\" もしくはその近辺で %1$s" - -#~ msgid "RETURN must specify a record or row variable in function returning row" -#~ msgstr "行を返す関数では、RETURN にレコードまたは行変数を指定しなければなりません" - -#~ msgid "syntax error; also virtual memory exhausted" -#~ msgstr "構文エラー: 仮想メモリも枯渇しました" - -#~ msgid "parser stack overflow" -#~ msgstr "パーサのスタックがオーバーフローしました" - -#~ msgid "relation \"%s.%s\" does not exist" -#~ msgstr "リレーション \"%s.%s\" がありません" - -#~ msgid "RETURN NEXT must specify a record or row variable in function returning row" -#~ msgstr "行を返す関数では、RETURN NEXT にレコードまたは行変数を指定しなければなりません" - -#~ msgid "syntax error: cannot back up" -#~ msgstr "構文エラー: バックアップできません" diff --git a/src/pl/plpgsql/src/po/ko.po b/src/pl/plpgsql/src/po/ko.po index 95d9dc9d24..b4bb7724bf 100644 --- a/src/pl/plpgsql/src/po/ko.po +++ b/src/pl/plpgsql/src/po/ko.po @@ -2,12 +2,13 @@ # Copyright (C) 2010 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. # Ioseph Kim , 2010 + msgid "" msgstr "" -"Project-Id-Version: plpgsql (PostgreSQL 9.6)\n" +"Project-Id-Version: plpgsql (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2016-09-26 14:02+0900\n" -"PO-Revision-Date: 2016-09-26 19:23+0900\n" +"POT-Creation-Date: 2017-08-16 10:59+0900\n" +"PO-Revision-Date: 2017-08-16 17:49+0900\n" "Last-Translator: Ioseph Kim \n" "Language-Team: Korean \n" "Language: ko\n" @@ -16,556 +17,555 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=1; plural=0;\n" -#: pl_comp.c:430 pl_handler.c:450 +#: pl_comp.c:433 pl_handler.c:451 #, c-format msgid "PL/pgSQL functions cannot accept type %s" msgstr "PL/pgSQL 함수에 %s 형식을 사용할 수 없음" -#: pl_comp.c:511 +#: pl_comp.c:514 #, c-format msgid "could not determine actual return type for polymorphic function \"%s\"" msgstr "다형적 함수 \"%s\"의 실제 반환 형식을 확인할 수 없음" -#: pl_comp.c:541 +#: pl_comp.c:544 #, c-format msgid "trigger functions can only be called as triggers" msgstr "트리거 함수는 트리거로만 호출될 수 있음" -#: pl_comp.c:545 pl_handler.c:435 +#: pl_comp.c:548 pl_handler.c:436 #, c-format msgid "PL/pgSQL functions cannot return type %s" msgstr "PL/pgSQL 함수는 %s 형식을 반환할 수 없음" -#: pl_comp.c:586 +#: pl_comp.c:589 #, c-format msgid "trigger functions cannot have declared arguments" msgstr "트리거 함수는 선언된 인수를 포함할 수 없음" -#: pl_comp.c:587 +#: pl_comp.c:590 #, c-format msgid "" "The arguments of the trigger can be accessed through TG_NARGS and TG_ARGV " "instead." msgstr "대신 TG_NARGS 및 TG_ARGV를 통해 트리거의 인수에 액세스할 수 있습니다." -#: pl_comp.c:689 +#: pl_comp.c:692 #, c-format msgid "event trigger functions cannot have declared arguments" msgstr "이벤트 트리거 함수는 선언된 인자(declare argument)를 사용할 수 없음" -#: pl_comp.c:940 +#: pl_comp.c:943 #, c-format msgid "compilation of PL/pgSQL function \"%s\" near line %d" msgstr "PL/pgSQL 함수 \"%s\" 컴파일(%d번째 줄 근처)" -#: pl_comp.c:963 +#: pl_comp.c:966 #, c-format msgid "parameter name \"%s\" used more than once" msgstr "\"%s\" 매개 변수가 여러 번 사용 됨" -#: pl_comp.c:1073 +#: pl_comp.c:1076 #, c-format msgid "column reference \"%s\" is ambiguous" msgstr "열 참조 \"%s\" 가 명확하지 않습니다." -#: pl_comp.c:1075 +#: pl_comp.c:1078 #, c-format msgid "It could refer to either a PL/pgSQL variable or a table column." msgstr "PL/pgSQL 변수명도, 테이블 칼럼 이름도 아니여야 함" -#: pl_comp.c:1255 pl_comp.c:1283 pl_exec.c:4393 pl_exec.c:4742 pl_exec.c:4827 -#: pl_exec.c:4918 +#: pl_comp.c:1258 pl_comp.c:1286 pl_exec.c:4584 pl_exec.c:4913 pl_exec.c:4998 +#: pl_exec.c:5089 #, c-format msgid "record \"%s\" has no field \"%s\"" msgstr "\"%s\" 레코드에 \"%s\" 필드가 없음" -#: pl_comp.c:1814 +#: pl_comp.c:1818 #, c-format msgid "relation \"%s\" does not exist" msgstr "\"%s\" 이름의 릴레이션(relation)이 없습니다" -#: pl_comp.c:1923 +#: pl_comp.c:1927 #, c-format msgid "variable \"%s\" has pseudo-type %s" msgstr "\"%s\" 변수에 의사 형식 %s이(가) 있음" -#: pl_comp.c:1990 +#: pl_comp.c:1995 #, c-format msgid "relation \"%s\" is not a table" msgstr "\"%s\" 관계가 테이블이 아님" -#: pl_comp.c:2150 +#: pl_comp.c:2155 #, c-format msgid "type \"%s\" is only a shell" msgstr "자료형 \"%s\" 는 오로지 shell 에만 있습니다. " -#: pl_comp.c:2244 pl_comp.c:2297 +#: pl_comp.c:2249 pl_comp.c:2302 #, c-format msgid "unrecognized exception condition \"%s\"" msgstr "인식할 수 없는 예외 조건 \"%s\"" -#: pl_comp.c:2504 +#: pl_comp.c:2510 #, c-format msgid "" "could not determine actual argument type for polymorphic function \"%s\"" msgstr "다형적 함수 \"%s\"의 실제 인수 형식을 확인할 수 없음" -#: pl_exec.c:324 pl_exec.c:612 pl_exec.c:872 +#: pl_exec.c:355 pl_exec.c:644 pl_exec.c:914 msgid "during initialization of execution state" msgstr "실행 상태를 초기화하는 동안" -#: pl_exec.c:331 +#: pl_exec.c:362 msgid "while storing call arguments into local variables" msgstr "호출 인수를 로컬 변수에 저장하는 동안" -#: pl_exec.c:416 pl_exec.c:760 +#: pl_exec.c:447 pl_exec.c:796 msgid "during function entry" msgstr "함수를 시작하는 동안" -#: pl_exec.c:441 +#: pl_exec.c:472 #, c-format msgid "control reached end of function without RETURN" msgstr "컨트롤이 RETURN 없이 함수 끝에 도달함" -#: pl_exec.c:448 +#: pl_exec.c:479 msgid "while casting return value to function's return type" msgstr "함수의 반환 형식으로 반환 값을 형변환하는 동안" -#: pl_exec.c:461 pl_exec.c:2938 +#: pl_exec.c:492 pl_exec.c:3101 #, c-format msgid "set-valued function called in context that cannot accept a set" msgstr "" "set-values 함수(테이블 리턴 함수)가 set 정의 없이 사용되었습니다 (테이블과 해" "당 열 alias 지정하세요)" -#: pl_exec.c:499 pl_exec.c:2779 +#: pl_exec.c:530 pl_exec.c:2948 msgid "returned record type does not match expected record type" msgstr "반환된 레코드 형식이 필요한 레코드 형식과 일치하지 않음" -#: pl_exec.c:554 pl_exec.c:789 pl_exec.c:907 +#: pl_exec.c:585 pl_exec.c:825 pl_exec.c:949 msgid "during function exit" msgstr "함수를 종료하는 동안" -#: pl_exec.c:785 pl_exec.c:903 +#: pl_exec.c:821 pl_exec.c:945 #, c-format msgid "control reached end of trigger procedure without RETURN" msgstr "컨트롤이 RETURN 없이 트리거 프로시저 끝에 도달함" -#: pl_exec.c:794 +#: pl_exec.c:830 #, c-format msgid "trigger procedure cannot return a set" msgstr "트리거 프로시저는 집합을 반환할 수 없음" -#: pl_exec.c:816 +#: pl_exec.c:852 msgid "" "returned row structure does not match the structure of the triggering table" msgstr "반환된 행 구조가 트리거하는 테이블의 구조와 일치하지 않음" -#: pl_exec.c:954 +#: pl_exec.c:997 #, c-format msgid "PL/pgSQL function %s line %d %s" msgstr "PL/pgSQL 함수 \"%s\" 의 %d번째 줄 %s" -#: pl_exec.c:965 +#: pl_exec.c:1008 #, c-format msgid "PL/pgSQL function %s %s" msgstr "PL/pgSQL 함수 %s %s" #. translator: last %s is a plpgsql statement type name -#: pl_exec.c:973 +#: pl_exec.c:1016 #, c-format msgid "PL/pgSQL function %s line %d at %s" msgstr "PL/pgSQL 함수 \"%s\" 의 %d번째 %s" -#: pl_exec.c:979 +#: pl_exec.c:1022 #, c-format msgid "PL/pgSQL function %s" msgstr "PL/pgSQL 함수 %s" -#: pl_exec.c:1089 +#: pl_exec.c:1187 msgid "during statement block local variable initialization" msgstr "문 블록 로컬 변수를 초기화하는 동안" -#: pl_exec.c:1128 +#: pl_exec.c:1226 #, c-format msgid "variable \"%s\" declared NOT NULL cannot default to NULL" msgstr "NOT NULL이 선언된 \"%s\" 변수의 기본 값이 NULL로 설정될 수 없음" -#: pl_exec.c:1178 +#: pl_exec.c:1277 msgid "during statement block entry" msgstr "문 블록을 시작하는 동안" -#: pl_exec.c:1199 +#: pl_exec.c:1309 msgid "during statement block exit" msgstr "문 블록을 종료하는 동안" -#: pl_exec.c:1242 +#: pl_exec.c:1351 msgid "during exception cleanup" msgstr "예외를 정리하는 동안" -#: pl_exec.c:1593 +#: pl_exec.c:1717 #, c-format msgid "GET STACKED DIAGNOSTICS cannot be used outside an exception handler" msgstr "GET STACKED DIAGNOSTICS 구문은 예외처리 헨들러 밖에서 사용할 수 없음" -#: pl_exec.c:1789 +#: pl_exec.c:1922 #, c-format msgid "case not found" msgstr "사례를 찾지 못함" -#: pl_exec.c:1790 +#: pl_exec.c:1923 #, c-format msgid "CASE statement is missing ELSE part." msgstr "CASE 문에 ELSE 부분이 누락되었습니다." -#: pl_exec.c:1944 +#: pl_exec.c:2077 #, c-format msgid "lower bound of FOR loop cannot be null" msgstr "FOR 루프의 하한은 null일 수 없음" -#: pl_exec.c:1960 +#: pl_exec.c:2093 #, c-format msgid "upper bound of FOR loop cannot be null" msgstr "FOR 루프의 상한은 null일 수 없음" -#: pl_exec.c:1978 +#: pl_exec.c:2111 #, c-format msgid "BY value of FOR loop cannot be null" msgstr "FOR 루프의 BY 값은 null일 수 없음" -#: pl_exec.c:1984 +#: pl_exec.c:2117 #, c-format msgid "BY value of FOR loop must be greater than zero" msgstr "FOR 루프의 BY 값은 0보다 커야 함" -#: pl_exec.c:2153 pl_exec.c:3910 +#: pl_exec.c:2294 pl_exec.c:4085 #, c-format msgid "cursor \"%s\" already in use" msgstr "\"%s\" 커서가 이미 사용 중임" -#: pl_exec.c:2176 pl_exec.c:3972 +#: pl_exec.c:2317 pl_exec.c:4150 #, c-format msgid "arguments given for cursor without arguments" msgstr "인수가 없는 커서에 인수가 제공됨" -#: pl_exec.c:2195 pl_exec.c:3991 +#: pl_exec.c:2336 pl_exec.c:4169 #, c-format msgid "arguments required for cursor" msgstr "커서에 인수 필요" -#: pl_exec.c:2280 +#: pl_exec.c:2423 #, c-format msgid "FOREACH expression must not be null" msgstr "FOREACH 구문은 null 이 아니여야 함" -#: pl_exec.c:2286 +#: pl_exec.c:2438 #, c-format msgid "FOREACH expression must yield an array, not type %s" msgstr "FOREACH 구문에서는 배열이 사용됩니다. 사용된 자료형 %s" -#: pl_exec.c:2303 +#: pl_exec.c:2455 #, c-format msgid "slice dimension (%d) is out of the valid range 0..%d" msgstr "slice dimension (%d) 값이 범위를 벗어남, 0..%d" -#: pl_exec.c:2330 +#: pl_exec.c:2482 #, c-format msgid "FOREACH ... SLICE loop variable must be of an array type" msgstr "FOREACH ... SLICE 루프 변수는 배열 자료형이어야 함" -#: pl_exec.c:2334 +#: pl_exec.c:2486 #, c-format msgid "FOREACH loop variable must not be of an array type" msgstr "FOREACH 반복 변수는 배열형이 아니여야 함" -#: pl_exec.c:2522 pl_exec.c:2604 pl_exec.c:2771 +#: pl_exec.c:2689 pl_exec.c:2771 pl_exec.c:2941 #, c-format msgid "" "cannot return non-composite value from function returning composite type" msgstr "" "함수의 반환값이 복합 자료형인데, 복합 자료형아닌 자료형을 반환하려고 함" -#: pl_exec.c:2648 pl_gram.y:3190 +#: pl_exec.c:2815 pl_gram.y:3199 #, c-format msgid "cannot use RETURN NEXT in a non-SETOF function" msgstr "SETOF 함수가 아닌 함수에서 RETURN NEXT를 사용할 수 없음" -#: pl_exec.c:2682 pl_exec.c:2813 +#: pl_exec.c:2849 pl_exec.c:2976 #, c-format msgid "wrong result type supplied in RETURN NEXT" msgstr "RETURN NEXT에 잘못된 결과 형식이 제공됨" -#: pl_exec.c:2711 pl_exec.c:4380 pl_exec.c:4709 pl_exec.c:4735 pl_exec.c:4801 -#: pl_exec.c:4820 pl_exec.c:4888 pl_exec.c:4911 +#: pl_exec.c:2878 pl_exec.c:4572 pl_exec.c:4880 pl_exec.c:4906 pl_exec.c:4972 +#: pl_exec.c:4991 pl_exec.c:5059 pl_exec.c:5082 #, c-format msgid "record \"%s\" is not assigned yet" msgstr "\"%s\" 레코드가 아직 할당되지 않음" -#: pl_exec.c:2713 pl_exec.c:4382 pl_exec.c:4711 pl_exec.c:4737 pl_exec.c:4803 -#: pl_exec.c:4822 pl_exec.c:4890 pl_exec.c:4913 +#: pl_exec.c:2880 pl_exec.c:4574 pl_exec.c:4882 pl_exec.c:4908 pl_exec.c:4974 +#: pl_exec.c:4993 pl_exec.c:5061 pl_exec.c:5084 #, c-format msgid "The tuple structure of a not-yet-assigned record is indeterminate." msgstr "아직 할당되지 않은 레코드의 튜플 구조는 미정입니다." -#: pl_exec.c:2717 pl_exec.c:2737 +#: pl_exec.c:2887 pl_exec.c:2906 #, c-format msgid "wrong record type supplied in RETURN NEXT" msgstr "RETURN NEXT에 잘못된 레코드 형식이 제공됨" -#: pl_exec.c:2832 +#: pl_exec.c:2995 #, c-format msgid "RETURN NEXT must have a parameter" msgstr "RETURN NEXT에 매개 변수 필요" -#: pl_exec.c:2865 pl_gram.y:3252 +#: pl_exec.c:3021 pl_gram.y:3261 #, c-format msgid "cannot use RETURN QUERY in a non-SETOF function" msgstr "SETOF 함수가 아닌 함수에서 RETURN QUERY를 사용할 수 없음" -#: pl_exec.c:2886 +#: pl_exec.c:3045 msgid "structure of query does not match function result type" msgstr "쿼리 구조가 함수 결과 형식과 일치하지 않음" -#: pl_exec.c:2966 pl_exec.c:3096 +#: pl_exec.c:3129 pl_exec.c:3267 #, c-format msgid "RAISE option already specified: %s" msgstr "RAISE 옵션이 이미 지정됨: %s" -#: pl_exec.c:2999 +#: pl_exec.c:3163 #, c-format msgid "RAISE without parameters cannot be used outside an exception handler" msgstr "매개 변수 없는 RAISE를 예외 처리기 외부에 사용할 수 없음" -#: pl_exec.c:3086 +#: pl_exec.c:3257 #, c-format msgid "RAISE statement option cannot be null" msgstr "RAISE 문 옵션이 null일 수 없음" -#: pl_exec.c:3155 +#: pl_exec.c:3327 #, c-format msgid "%s" msgstr "%s" -#: pl_exec.c:3226 +#: pl_exec.c:3382 #, c-format msgid "assertion failed" msgstr "assertion 실패" -#: pl_exec.c:3416 pl_exec.c:3560 pl_exec.c:3749 +#: pl_exec.c:3583 pl_exec.c:3729 pl_exec.c:3919 #, c-format msgid "cannot COPY to/from client in PL/pgSQL" msgstr "PL/pgSQL의 클라이언트와 상호 복사할 수 없음" -#: pl_exec.c:3420 pl_exec.c:3564 pl_exec.c:3753 +#: pl_exec.c:3587 pl_exec.c:3733 pl_exec.c:3923 #, c-format msgid "cannot begin/end transactions in PL/pgSQL" msgstr "PL/pgSQL의 트랜잭션을 시작/종료할 수 없음" -#: pl_exec.c:3421 pl_exec.c:3565 pl_exec.c:3754 +#: pl_exec.c:3588 pl_exec.c:3734 pl_exec.c:3924 #, c-format msgid "Use a BEGIN block with an EXCEPTION clause instead." msgstr "대신 BEGIN 블록을 EXCEPTION 절과 함께 사용하십시오." -#: pl_exec.c:3588 pl_exec.c:3778 +#: pl_exec.c:3757 pl_exec.c:3948 #, c-format msgid "INTO used with a command that cannot return data" msgstr "데이터를 반환할 수 없는 명령과 함께 INTO가 사용됨" -#: pl_exec.c:3616 pl_exec.c:3806 +#: pl_exec.c:3785 pl_exec.c:3976 #, c-format msgid "query returned no rows" msgstr "쿼리에서 행을 반환하지 않음" -#: pl_exec.c:3635 pl_exec.c:3825 +#: pl_exec.c:3804 pl_exec.c:3995 #, c-format msgid "query returned more than one row" msgstr "쿼리에서 두 개 이상의 행을 반환" -#: pl_exec.c:3652 +#: pl_exec.c:3821 #, c-format msgid "query has no destination for result data" msgstr "쿼리에 결과 데이터의 대상이 없음" -#: pl_exec.c:3653 +#: pl_exec.c:3822 #, c-format msgid "If you want to discard the results of a SELECT, use PERFORM instead." msgstr "SELECT의 결과를 취소하려면 대신 PERFORM을 사용하십시오." -#: pl_exec.c:3685 pl_exec.c:7126 +#: pl_exec.c:3855 pl_exec.c:7292 #, c-format msgid "query string argument of EXECUTE is null" msgstr "EXECUTE의 쿼리 문자열 인수가 null임" -#: pl_exec.c:3741 +#: pl_exec.c:3911 #, c-format msgid "EXECUTE of SELECT ... INTO is not implemented" msgstr "SELECT의 EXECUTE... INTO가 구현되지 않음" -#: pl_exec.c:3742 +#: pl_exec.c:3912 #, c-format msgid "" "You might want to use EXECUTE ... INTO or EXECUTE CREATE TABLE ... AS " "instead." -msgstr "" -"EXECUTE ... INTO 또는 EXECUTE CREATE TABLE ... AS 구문을 사용하세요." +msgstr "EXECUTE ... INTO 또는 EXECUTE CREATE TABLE ... AS 구문을 사용하세요." -#: pl_exec.c:4054 pl_exec.c:4146 +#: pl_exec.c:4233 pl_exec.c:4329 #, c-format msgid "cursor variable \"%s\" is null" msgstr "커서 변수 \"%s\"이(가) null임" -#: pl_exec.c:4061 pl_exec.c:4153 +#: pl_exec.c:4244 pl_exec.c:4340 #, c-format msgid "cursor \"%s\" does not exist" msgstr "\"%s\" 이름의 커서가 없음" -#: pl_exec.c:4075 +#: pl_exec.c:4257 #, c-format msgid "relative or absolute cursor position is null" msgstr "상대 또는 절대 커서 위치가 null임" -#: pl_exec.c:4255 +#: pl_exec.c:4448 #, c-format msgid "null value cannot be assigned to variable \"%s\" declared NOT NULL" msgstr "NOT NULL이 선언된 \"%s\" 변수에 null 값을 할당할 수 없음" -#: pl_exec.c:4324 +#: pl_exec.c:4517 #, c-format msgid "cannot assign non-composite value to a row variable" msgstr "행 변수에 비복합 값을 할당할 수 없음" -#: pl_exec.c:4348 +#: pl_exec.c:4541 #, c-format msgid "cannot assign non-composite value to a record variable" msgstr "레코드 변수에 비복합 값을 할당할 수 없음" -#: pl_exec.c:4491 +#: pl_exec.c:4661 #, c-format msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" msgstr "지정한 배열 크기(%d)가 최대치(%d)를 초과했습니다" -#: pl_exec.c:4523 +#: pl_exec.c:4693 #, c-format msgid "subscripted object is not an array" msgstr "하위 스크립트 개체는 배열이 아님" -#: pl_exec.c:4560 +#: pl_exec.c:4731 #, c-format msgid "array subscript in assignment must not be null" msgstr "배열 하위 스크립트로 지정하는 값으로 null 값을 사용할 수 없습니다" -#: pl_exec.c:5027 +#: pl_exec.c:5198 #, c-format msgid "query \"%s\" did not return data" msgstr "\"%s\" 쿼리에서 데이터를 반환하지 않음" -#: pl_exec.c:5035 +#: pl_exec.c:5206 #, c-format msgid "query \"%s\" returned %d column" msgid_plural "query \"%s\" returned %d columns" msgstr[0] "\"%s\" 쿼리가 %d 개의 칼럼을 반환함" -#: pl_exec.c:5062 +#: pl_exec.c:5233 #, c-format msgid "query \"%s\" returned more than one row" msgstr "\"%s\" 쿼리에서 두 개 이상의 행을 반환함" -#: pl_exec.c:5126 +#: pl_exec.c:5301 #, c-format msgid "query \"%s\" is not a SELECT" msgstr "\"%s\" 쿼리가 SELECT가 아님" -#: pl_funcs.c:237 +#: pl_funcs.c:239 msgid "statement block" msgstr "문 블록" -#: pl_funcs.c:239 +#: pl_funcs.c:241 msgid "assignment" msgstr "할당" -#: pl_funcs.c:249 +#: pl_funcs.c:251 msgid "FOR with integer loop variable" msgstr "정수 루프 변수를 포함하는 FOR" -#: pl_funcs.c:251 +#: pl_funcs.c:253 msgid "FOR over SELECT rows" msgstr "SELECT 행을 제어하는 FOR" -#: pl_funcs.c:253 +#: pl_funcs.c:255 msgid "FOR over cursor" msgstr "커서를 제어하는 FOR" -#: pl_funcs.c:255 +#: pl_funcs.c:257 msgid "FOREACH over array" msgstr "배열 초과된 FOREACH" -#: pl_funcs.c:269 +#: pl_funcs.c:271 msgid "SQL statement" msgstr "SQL 문" -#: pl_funcs.c:273 +#: pl_funcs.c:275 msgid "FOR over EXECUTE statement" msgstr "EXECUTE 문을 제어하는 FOR" -#: pl_gram.y:474 +#: pl_gram.y:478 #, c-format msgid "block label must be placed before DECLARE, not after" msgstr "블록 라벨은 DECLARE 영역 앞에 있어야 함" -#: pl_gram.y:494 +#: pl_gram.y:498 #, c-format msgid "collations are not supported by type %s" msgstr "%s 자료형은 collation 지원 안함" -#: pl_gram.y:509 +#: pl_gram.y:513 #, c-format msgid "row or record variable cannot be CONSTANT" msgstr "행 또는 레코드 변수는 CONSTANT일 수 없음" -#: pl_gram.y:519 +#: pl_gram.y:523 #, c-format msgid "row or record variable cannot be NOT NULL" msgstr "행 또는 레코드 변수는 NOT NULL일 수 없음" -#: pl_gram.y:530 +#: pl_gram.y:534 #, c-format msgid "default value for row or record variable is not supported" msgstr "행 또는 레코드 변수의 기본 값이 지원되지 않음" -#: pl_gram.y:675 pl_gram.y:690 pl_gram.y:716 +#: pl_gram.y:679 pl_gram.y:694 pl_gram.y:720 #, c-format msgid "variable \"%s\" does not exist" msgstr "\"%s\" 변수가 없음" -#: pl_gram.y:734 pl_gram.y:762 +#: pl_gram.y:738 pl_gram.y:766 msgid "duplicate declaration" msgstr "중복 선언" -#: pl_gram.y:745 pl_gram.y:773 +#: pl_gram.y:749 pl_gram.y:777 #, c-format msgid "variable \"%s\" shadows a previously defined variable" msgstr "variable \"%s\" shadows a previously defined variable" -#: pl_gram.y:952 +#: pl_gram.y:956 #, c-format msgid "diagnostics item %s is not allowed in GET STACKED DIAGNOSTICS" msgstr "GET STACKED DIAGNOSTICS 에서 %s 항목을 사용할 수 없음" -#: pl_gram.y:970 +#: pl_gram.y:974 #, c-format msgid "diagnostics item %s is not allowed in GET CURRENT DIAGNOSTICS" msgstr "GET CURRENT DIAGNOSTICS 에서 %s 항목을 사용할 수 없음" -#: pl_gram.y:1068 +#: pl_gram.y:1072 msgid "unrecognized GET DIAGNOSTICS item" msgstr "알 수 없는 GET DIAGNOSTICS 항목" -#: pl_gram.y:1079 pl_gram.y:3439 +#: pl_gram.y:1082 pl_gram.y:3448 #, c-format msgid "\"%s\" is not a scalar variable" msgstr "\"%s\"은(는) 스칼라 변수가 아님" -#: pl_gram.y:1331 pl_gram.y:1525 +#: pl_gram.y:1334 pl_gram.y:1528 #, c-format msgid "" "loop variable of loop over rows must be a record or row variable or list of " @@ -574,263 +574,263 @@ msgstr "" "행에 있는 루프의 루프 변수는 레코드 또는 행 변수이거나 스칼라 변수의 목록이어" "야 함" -#: pl_gram.y:1365 +#: pl_gram.y:1368 #, c-format msgid "cursor FOR loop must have only one target variable" msgstr "커서 FOR 루프에 대상 변수가 한 개만 있어야 함" -#: pl_gram.y:1372 +#: pl_gram.y:1375 #, c-format msgid "cursor FOR loop must use a bound cursor variable" msgstr "커서 FOR 루프는 바인딩된 커서 변수를 한 개만 사용해야 함" -#: pl_gram.y:1456 +#: pl_gram.y:1459 #, c-format msgid "integer FOR loop must have only one target variable" msgstr "정수 FOR 루프에 대상 변수가 한 개만 있어야 함" -#: pl_gram.y:1492 +#: pl_gram.y:1495 #, c-format msgid "cannot specify REVERSE in query FOR loop" msgstr "쿼리 FOR 루프에 REVERSE를 지정할 수 없음" -#: pl_gram.y:1639 +#: pl_gram.y:1642 #, c-format msgid "loop variable of FOREACH must be a known variable or list of variables" msgstr "FOREACH의 반복 변수는 알려진 변수이거나 변수의 목록이어야 함" -#: pl_gram.y:1680 +#: pl_gram.y:1683 #, c-format msgid "" "there is no label \"%s\" attached to any block or loop enclosing this " "statement" -msgstr "" -"임의 블록이나 루프 구문에 할당된 \"%s\" 라벨이 없음" +msgstr "임의 블록이나 루프 구문에 할당된 \"%s\" 라벨이 없음" -#: pl_gram.y:1688 +#: pl_gram.y:1691 #, c-format msgid "block label \"%s\" cannot be used in CONTINUE" msgstr "CONTINUE 안에서 \"%s\" 블록 라벨을 사용할 수 없음" -#: pl_gram.y:1703 +#: pl_gram.y:1706 #, c-format msgid "EXIT cannot be used outside a loop, unless it has a label" msgstr "루프 외부에 라벨 지정 없이 EXIT 사용할 수 없음" -#: pl_gram.y:1704 +#: pl_gram.y:1707 #, c-format msgid "CONTINUE cannot be used outside a loop" msgstr "CONTINUE를 루프 외부에 사용할 수 없음" -#: pl_gram.y:1728 pl_gram.y:1765 pl_gram.y:1813 pl_gram.y:2889 pl_gram.y:2974 -#: pl_gram.y:3085 pl_gram.y:3841 +#: pl_gram.y:1731 pl_gram.y:1768 pl_gram.y:1816 pl_gram.y:2898 pl_gram.y:2983 +#: pl_gram.y:3094 pl_gram.y:3850 msgid "unexpected end of function definition" msgstr "예기치 않은 함수 정의의 끝" -#: pl_gram.y:1833 pl_gram.y:1857 pl_gram.y:1873 pl_gram.y:1879 pl_gram.y:1997 -#: pl_gram.y:2005 pl_gram.y:2019 pl_gram.y:2114 pl_gram.y:2295 pl_gram.y:2389 -#: pl_gram.y:2541 pl_gram.y:3682 pl_gram.y:3743 pl_gram.y:3822 +#: pl_gram.y:1836 pl_gram.y:1860 pl_gram.y:1876 pl_gram.y:1882 pl_gram.y:2000 +#: pl_gram.y:2008 pl_gram.y:2022 pl_gram.y:2117 pl_gram.y:2304 pl_gram.y:2398 +#: pl_gram.y:2550 pl_gram.y:3691 pl_gram.y:3752 pl_gram.y:3831 msgid "syntax error" msgstr "구문 오류" -#: pl_gram.y:1861 pl_gram.y:1863 pl_gram.y:2299 pl_gram.y:2301 +#: pl_gram.y:1864 pl_gram.y:1866 pl_gram.y:2308 pl_gram.y:2310 msgid "invalid SQLSTATE code" msgstr "잘못된 SQLSTATE 코드" -#: pl_gram.y:2061 +#: pl_gram.y:2064 msgid "syntax error, expected \"FOR\"" msgstr "구문 오류, \"FOR\" 필요" -#: pl_gram.y:2123 +#: pl_gram.y:2126 #, c-format msgid "FETCH statement cannot return multiple rows" msgstr "FETCH 구문은 다중 로우를 반환할 수 없음" -#: pl_gram.y:2179 +#: pl_gram.y:2188 #, c-format msgid "cursor variable must be a simple variable" msgstr "커서 변수는 단순 변수여야 함" -#: pl_gram.y:2185 +#: pl_gram.y:2194 #, c-format msgid "variable \"%s\" must be of type cursor or refcursor" msgstr "\"%s\" 변수는 커서 또는 ref 커서 형식이어야 함" -#: pl_gram.y:2512 pl_gram.y:2523 +#: pl_gram.y:2521 pl_gram.y:2532 #, c-format msgid "\"%s\" is not a known variable" msgstr "\"%s\" (은)는 알려진 변수가 아님" -#: pl_gram.y:2627 pl_gram.y:2637 pl_gram.y:2793 +#: pl_gram.y:2636 pl_gram.y:2646 pl_gram.y:2802 msgid "mismatched parentheses" msgstr "괄호의 짝이 맞지 않음" -#: pl_gram.y:2641 +#: pl_gram.y:2650 #, c-format msgid "missing \"%s\" at end of SQL expression" msgstr "SQL 식 끝에 \"%s\" 누락" -#: pl_gram.y:2647 +#: pl_gram.y:2656 #, c-format msgid "missing \"%s\" at end of SQL statement" msgstr "SQL 문 끝에 \"%s\" 누락" -#: pl_gram.y:2664 +#: pl_gram.y:2673 msgid "missing expression" msgstr "표현식 빠졌음" -#: pl_gram.y:2666 +#: pl_gram.y:2675 msgid "missing SQL statement" msgstr "SQL 문이 빠졌음" -#: pl_gram.y:2795 +#: pl_gram.y:2804 msgid "incomplete data type declaration" msgstr "불완전한 데이터 형식 선언" -#: pl_gram.y:2818 +#: pl_gram.y:2827 msgid "missing data type declaration" msgstr "데이터 형식 선언 누락" -#: pl_gram.y:2897 +#: pl_gram.y:2906 msgid "INTO specified more than once" msgstr "INTO가 여러 번 지정됨" -#: pl_gram.y:3066 +#: pl_gram.y:3075 msgid "expected FROM or IN" msgstr "FROM 또는 IN 필요" -#: pl_gram.y:3126 +#: pl_gram.y:3135 #, c-format msgid "RETURN cannot have a parameter in function returning set" msgstr "집합을 반환하는 함수에서 RETURN 구문에는 인자가 없음" -#: pl_gram.y:3127 +#: pl_gram.y:3136 #, c-format msgid "Use RETURN NEXT or RETURN QUERY." msgstr "RETURN NEXT 나 RETURN QUERY 구문을 사용하세요." -#: pl_gram.y:3135 +#: pl_gram.y:3144 #, c-format msgid "RETURN cannot have a parameter in function with OUT parameters" msgstr "RETURN은 OUT 매개 변수가 있는 함수에 매개 변수를 포함할 수 없음" -#: pl_gram.y:3144 +#: pl_gram.y:3153 #, c-format msgid "RETURN cannot have a parameter in function returning void" msgstr "RETURN은 void를 반환하는 함수에 매개 변수를 포함할 수 없음" -#: pl_gram.y:3204 +#: pl_gram.y:3213 #, c-format msgid "RETURN NEXT cannot have a parameter in function with OUT parameters" msgstr "RETURN NEXT는 OUT 매개 변수가 있는 함수에 매개 변수를 포함할 수 없음" -#: pl_gram.y:3308 +#: pl_gram.y:3317 #, c-format msgid "\"%s\" is declared CONSTANT" msgstr "\"%s\"이(가) CONSTANT로 선언됨" -#: pl_gram.y:3370 pl_gram.y:3382 +#: pl_gram.y:3379 pl_gram.y:3391 #, c-format msgid "record or row variable cannot be part of multiple-item INTO list" msgstr "다중 아이템 INTO 목록의 부분으로 record나 row 변수가 사용될 수 없음" -#: pl_gram.y:3427 +#: pl_gram.y:3436 #, c-format msgid "too many INTO variables specified" msgstr "너무 많은 INTO 변수가 지정됨" -#: pl_gram.y:3635 +#: pl_gram.y:3644 #, c-format msgid "end label \"%s\" specified for unlabelled block" msgstr "레이블이 없는 블록에 끝 레이블 \"%s\"이(가) 지정됨" -#: pl_gram.y:3642 +#: pl_gram.y:3651 #, c-format msgid "end label \"%s\" differs from block's label \"%s\"" msgstr "끝 레이블 \"%s\"이(가) 블록의 \"%s\" 레이블과 다름" -#: pl_gram.y:3677 +#: pl_gram.y:3686 #, c-format msgid "cursor \"%s\" has no arguments" msgstr "\"%s\" 커서에 인수가 없음" -#: pl_gram.y:3691 +#: pl_gram.y:3700 #, c-format msgid "cursor \"%s\" has arguments" msgstr "\"%s\" 커서에 인수가 있음" -#: pl_gram.y:3733 +#: pl_gram.y:3742 #, c-format msgid "cursor \"%s\" has no argument named \"%s\"" msgstr "\"%s\" 커서는 \"%s\" 이름의 인자가 없음" -#: pl_gram.y:3753 +#: pl_gram.y:3762 #, c-format msgid "value for parameter \"%s\" of cursor \"%s\" specified more than once" msgstr "\"%s\" 이름의 인자가 \"%s\" 커서에서 중복됨" -#: pl_gram.y:3778 +#: pl_gram.y:3787 #, c-format msgid "not enough arguments for cursor \"%s\"" msgstr "\"%s\" 커서를 위한 충분하지 않은 인자" -#: pl_gram.y:3785 +#: pl_gram.y:3794 #, c-format msgid "too many arguments for cursor \"%s\"" msgstr "\"%s\" 커서를 위한 인자가 너무 많음" -#: pl_gram.y:3873 +#: pl_gram.y:3882 msgid "unrecognized RAISE statement option" msgstr "인식할 수 없는 RAISE 문 옵션" -#: pl_gram.y:3877 +#: pl_gram.y:3886 msgid "syntax error, expected \"=\"" msgstr "구문 오류, \"=\" 필요" -#: pl_gram.y:3918 +#: pl_gram.y:3927 #, c-format msgid "too many parameters specified for RAISE" msgstr "RAISE에 지정된 매개 변수가 너무 많음" -#: pl_gram.y:3922 +#: pl_gram.y:3931 #, c-format msgid "too few parameters specified for RAISE" msgstr "RAISE에 지정된 매개 변수가 너무 적음" -#: pl_handler.c:151 +#: pl_handler.c:154 msgid "" "Sets handling of conflicts between PL/pgSQL variable names and table column " "names." -msgstr "PL/pgSQL 변수명과 테이블 칼럼명 사이 충돌이 일어날 경우에 대한 처리를 하세요." +msgstr "" +"PL/pgSQL 변수명과 테이블 칼럼명 사이 충돌이 일어날 경우에 대한 처리를 하세요." -#: pl_handler.c:160 +#: pl_handler.c:163 msgid "" "Print information about parameters in the DETAIL part of the error messages " "generated on INTO ... STRICT failures." msgstr "" -"INTO ... STRICT 실패에서 오류 메시지를 만들 때 그 DETAIL 부분에 들어갈 내용을 " -"출력 하세요" +"INTO ... STRICT 실패에서 오류 메시지를 만들 때 그 DETAIL 부분에 들어갈 내용" +"을 출력 하세요" -#: pl_handler.c:168 +#: pl_handler.c:171 msgid "Perform checks given in ASSERT statements." msgstr "ASSERT 구문에서 주어진 검사를 수행하세요." -#: pl_handler.c:176 +#: pl_handler.c:179 msgid "List of programming constructs that should produce a warning." msgstr "경고로 처리할 프로그래밍 컨스트럭트 목록" -#: pl_handler.c:186 +#: pl_handler.c:189 msgid "List of programming constructs that should produce an error." msgstr "오류로 처리할 프로그래밍 컨스트럭트 목록" #. translator: %s is typically the translation of "syntax error" -#: pl_scanner.c:622 +#: pl_scanner.c:624 #, c-format msgid "%s at end of input" msgstr "%s, 입력 끝부분" #. translator: first %s is typically the translation of "syntax error" -#: pl_scanner.c:638 +#: pl_scanner.c:640 #, c-format msgid "%s at or near \"%s\"" msgstr "%s, \"%s\" 부근" diff --git a/src/pl/plpgsql/src/po/ru.po b/src/pl/plpgsql/src/po/ru.po index 759f368b13..1ddfac3706 100644 --- a/src/pl/plpgsql/src/po/ru.po +++ b/src/pl/plpgsql/src/po/ru.po @@ -2,13 +2,12 @@ # Copyright (C) 2012-2016 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. # Alexander Lakhin , 2012-2017. -# msgid "" msgstr "" "Project-Id-Version: plpgsql (PostgreSQL current)\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-04-02 23:37+0000\n" -"PO-Revision-Date: 2016-11-24 11:24+0300\n" +"POT-Creation-Date: 2017-10-30 19:30+0300\n" +"PO-Revision-Date: 2017-10-11 10:10+0300\n" "Last-Translator: Alexander Lakhin \n" "Language-Team: Russian \n" "Language: ru\n" @@ -79,38 +78,38 @@ msgstr "неоднозначная ссылка на столбец \"%s\"" msgid "It could refer to either a PL/pgSQL variable or a table column." msgstr "Подразумевается ссылка на переменную PL/pgSQL или столбец таблицы." -#: pl_comp.c:1258 pl_comp.c:1286 pl_exec.c:4624 pl_exec.c:4953 pl_exec.c:5038 -#: pl_exec.c:5129 +#: pl_comp.c:1258 pl_comp.c:1286 pl_exec.c:4584 pl_exec.c:4913 pl_exec.c:4998 +#: pl_exec.c:5089 #, c-format msgid "record \"%s\" has no field \"%s\"" msgstr "в записи \"%s\" нет поля \"%s\"" -#: pl_comp.c:1817 +#: pl_comp.c:1818 #, c-format msgid "relation \"%s\" does not exist" msgstr "отношение \"%s\" не существует" -#: pl_comp.c:1926 +#: pl_comp.c:1927 #, c-format msgid "variable \"%s\" has pseudo-type %s" msgstr "переменная \"%s\" имеет псевдотип %s" -#: pl_comp.c:1993 +#: pl_comp.c:1995 #, c-format msgid "relation \"%s\" is not a table" msgstr "отношение \"%s\" не является таблицей" -#: pl_comp.c:2153 +#: pl_comp.c:2155 #, c-format msgid "type \"%s\" is only a shell" msgstr "тип \"%s\" - лишь пустышка" -#: pl_comp.c:2247 pl_comp.c:2300 +#: pl_comp.c:2249 pl_comp.c:2302 #, c-format msgid "unrecognized exception condition \"%s\"" msgstr "нераспознанное условие исключения \"%s\"" -#: pl_comp.c:2508 +#: pl_comp.c:2510 #, c-format msgid "" "could not determine actual argument type for polymorphic function \"%s\"" @@ -118,7 +117,7 @@ msgstr "" "не удалось определить фактический тип аргумента для полиморфной функции \"%s" "\"" -#: pl_exec.c:355 pl_exec.c:644 pl_exec.c:951 +#: pl_exec.c:355 pl_exec.c:644 pl_exec.c:914 msgid "during initialization of execution state" msgstr "в процессе инициализации состояния выполнения" @@ -126,7 +125,7 @@ msgstr "в процессе инициализации состояния вып msgid "while storing call arguments into local variables" msgstr "при сохранении аргументов вызова в локальных переменных" -#: pl_exec.c:447 pl_exec.c:833 +#: pl_exec.c:447 pl_exec.c:796 msgid "during function entry" msgstr "при входе в функцию" @@ -139,31 +138,31 @@ msgstr "конец функции достигнут без RETURN" msgid "while casting return value to function's return type" msgstr "при приведении возвращаемого значения к типу результата функции" -#: pl_exec.c:492 pl_exec.c:3138 +#: pl_exec.c:492 pl_exec.c:3101 #, c-format msgid "set-valued function called in context that cannot accept a set" msgstr "" "функция, возвращающая множество, вызвана в контексте, где ему нет места" -#: pl_exec.c:530 pl_exec.c:2985 +#: pl_exec.c:530 pl_exec.c:2948 msgid "returned record type does not match expected record type" msgstr "возвращаемый тип записи не соответствует ожидаемому" -#: pl_exec.c:585 pl_exec.c:862 pl_exec.c:986 +#: pl_exec.c:585 pl_exec.c:825 pl_exec.c:949 msgid "during function exit" msgstr "при выходе из функции" -#: pl_exec.c:858 pl_exec.c:982 +#: pl_exec.c:821 pl_exec.c:945 #, c-format msgid "control reached end of trigger procedure without RETURN" msgstr "конец триггерной процедуры достигнут без RETURN" -#: pl_exec.c:867 +#: pl_exec.c:830 #, c-format msgid "trigger procedure cannot return a set" msgstr "триггерная процедура не может возвращать множество" -#: pl_exec.c:889 +#: pl_exec.c:852 msgid "" "returned row structure does not match the structure of the triggering table" msgstr "" @@ -173,7 +172,7 @@ msgstr "" #. translator: last %s is a phrase such as "during statement block #. local variable initialization" #. -#: pl_exec.c:1034 +#: pl_exec.c:997 #, c-format msgid "PL/pgSQL function %s line %d %s" msgstr "функция PL/pgSQL %s, строка %d, %s" @@ -181,250 +180,250 @@ msgstr "функция PL/pgSQL %s, строка %d, %s" #. translator: last %s is a phrase such as "while storing call #. arguments into local variables" #. -#: pl_exec.c:1045 +#: pl_exec.c:1008 #, c-format msgid "PL/pgSQL function %s %s" msgstr "функция PL/pgSQL %s, %s" #. translator: last %s is a plpgsql statement type name -#: pl_exec.c:1053 +#: pl_exec.c:1016 #, c-format msgid "PL/pgSQL function %s line %d at %s" msgstr "функция PL/pgSQL %s, строка %d, оператор %s" -#: pl_exec.c:1059 +#: pl_exec.c:1022 #, c-format msgid "PL/pgSQL function %s" msgstr "функция PL/pgSQL %s" -#: pl_exec.c:1224 +#: pl_exec.c:1187 msgid "during statement block local variable initialization" msgstr "при инициализации локальной переменной в блоке операторов" -#: pl_exec.c:1263 +#: pl_exec.c:1226 #, c-format msgid "variable \"%s\" declared NOT NULL cannot default to NULL" msgstr "" "переменная \"%s\", объявленная NOT NULL, не может иметь значение по " "умолчанию NULL" -#: pl_exec.c:1314 +#: pl_exec.c:1277 msgid "during statement block entry" msgstr "при входе в блок операторов" -#: pl_exec.c:1346 +#: pl_exec.c:1309 msgid "during statement block exit" msgstr "при выходе из блока операторов" -#: pl_exec.c:1388 +#: pl_exec.c:1351 msgid "during exception cleanup" msgstr "при очистке после исключения" -#: pl_exec.c:1754 +#: pl_exec.c:1717 #, c-format msgid "GET STACKED DIAGNOSTICS cannot be used outside an exception handler" msgstr "" "GET STACKED DIAGNOSTICS нельзя использовать вне блока обработчика исключения" -#: pl_exec.c:1959 +#: pl_exec.c:1922 #, c-format msgid "case not found" msgstr "неправильный CASE" -#: pl_exec.c:1960 +#: pl_exec.c:1923 #, c-format msgid "CASE statement is missing ELSE part." msgstr "В операторе CASE не хватает части ELSE." -#: pl_exec.c:2114 +#: pl_exec.c:2077 #, c-format msgid "lower bound of FOR loop cannot be null" msgstr "нижняя граница цикла FOR не может быть равна NULL" -#: pl_exec.c:2130 +#: pl_exec.c:2093 #, c-format msgid "upper bound of FOR loop cannot be null" msgstr "верхняя граница цикла FOR не может быть равна NULL" -#: pl_exec.c:2148 +#: pl_exec.c:2111 #, c-format msgid "BY value of FOR loop cannot be null" msgstr "значение BY в цикле FOR не может быть равно NULL" -#: pl_exec.c:2154 +#: pl_exec.c:2117 #, c-format msgid "BY value of FOR loop must be greater than zero" msgstr "значение BY в цикле FOR должно быть больше нуля" -#: pl_exec.c:2331 pl_exec.c:4125 +#: pl_exec.c:2294 pl_exec.c:4085 #, c-format msgid "cursor \"%s\" already in use" msgstr "курсор \"%s\" уже используется" -#: pl_exec.c:2354 pl_exec.c:4190 +#: pl_exec.c:2317 pl_exec.c:4150 #, c-format msgid "arguments given for cursor without arguments" msgstr "курсору без аргументов были переданы аргументы" -#: pl_exec.c:2373 pl_exec.c:4209 +#: pl_exec.c:2336 pl_exec.c:4169 #, c-format msgid "arguments required for cursor" msgstr "курсору требуются аргументы" -#: pl_exec.c:2460 +#: pl_exec.c:2423 #, c-format msgid "FOREACH expression must not be null" msgstr "выражение FOREACH не может быть равно NULL" -#: pl_exec.c:2475 +#: pl_exec.c:2438 #, c-format msgid "FOREACH expression must yield an array, not type %s" msgstr "выражение в FOREACH должно быть массивом, но не типом %s" -#: pl_exec.c:2492 +#: pl_exec.c:2455 #, c-format msgid "slice dimension (%d) is out of the valid range 0..%d" msgstr "размерность среза (%d) вне допустимого диапазона 0..%d" -#: pl_exec.c:2519 +#: pl_exec.c:2482 #, c-format msgid "FOREACH ... SLICE loop variable must be of an array type" msgstr "переменная цикла FOREACH ... SLICE должна быть массивом" -#: pl_exec.c:2523 +#: pl_exec.c:2486 #, c-format msgid "FOREACH loop variable must not be of an array type" msgstr "переменная цикла FOREACH не должна быть массивом" -#: pl_exec.c:2726 pl_exec.c:2808 pl_exec.c:2978 +#: pl_exec.c:2689 pl_exec.c:2771 pl_exec.c:2941 #, c-format msgid "" "cannot return non-composite value from function returning composite type" msgstr "" "функция, возвращающая составной тип, не может вернуть несоставное значение" -#: pl_exec.c:2852 pl_gram.y:3199 +#: pl_exec.c:2815 pl_gram.y:3199 #, c-format msgid "cannot use RETURN NEXT in a non-SETOF function" msgstr "" "RETURN NEXT можно использовать только в функциях, возвращающих множества" -#: pl_exec.c:2886 pl_exec.c:3013 +#: pl_exec.c:2849 pl_exec.c:2976 #, c-format msgid "wrong result type supplied in RETURN NEXT" msgstr "в RETURN NEXT передан неправильный тип результата" -#: pl_exec.c:2915 pl_exec.c:4612 pl_exec.c:4920 pl_exec.c:4946 pl_exec.c:5012 -#: pl_exec.c:5031 pl_exec.c:5099 pl_exec.c:5122 +#: pl_exec.c:2878 pl_exec.c:4572 pl_exec.c:4880 pl_exec.c:4906 pl_exec.c:4972 +#: pl_exec.c:4991 pl_exec.c:5059 pl_exec.c:5082 #, c-format msgid "record \"%s\" is not assigned yet" msgstr "записи \"%s\" не присвоено значение" -#: pl_exec.c:2917 pl_exec.c:4614 pl_exec.c:4922 pl_exec.c:4948 pl_exec.c:5014 -#: pl_exec.c:5033 pl_exec.c:5101 pl_exec.c:5124 +#: pl_exec.c:2880 pl_exec.c:4574 pl_exec.c:4882 pl_exec.c:4908 pl_exec.c:4974 +#: pl_exec.c:4993 pl_exec.c:5061 pl_exec.c:5084 #, c-format msgid "The tuple structure of a not-yet-assigned record is indeterminate." msgstr "" "Для записи, которой не присвоено значение, структура кортежа не определена." -#: pl_exec.c:2924 pl_exec.c:2943 +#: pl_exec.c:2887 pl_exec.c:2906 #, c-format msgid "wrong record type supplied in RETURN NEXT" msgstr "в RETURN NEXT передан неправильный тип записи" -#: pl_exec.c:3032 +#: pl_exec.c:2995 #, c-format msgid "RETURN NEXT must have a parameter" msgstr "у оператора RETURN NEXT должен быть параметр" -#: pl_exec.c:3058 pl_gram.y:3261 +#: pl_exec.c:3021 pl_gram.y:3261 #, c-format msgid "cannot use RETURN QUERY in a non-SETOF function" msgstr "" "RETURN QUERY можно использовать только в функциях, возвращающих множества" -#: pl_exec.c:3082 +#: pl_exec.c:3045 msgid "structure of query does not match function result type" msgstr "структура запроса не соответствует типу результата функции" -#: pl_exec.c:3166 pl_exec.c:3304 +#: pl_exec.c:3129 pl_exec.c:3267 #, c-format msgid "RAISE option already specified: %s" msgstr "этот параметр RAISE уже указан: %s" -#: pl_exec.c:3200 +#: pl_exec.c:3163 #, c-format msgid "RAISE without parameters cannot be used outside an exception handler" msgstr "" "RAISE без параметров нельзя использовать вне блока обработчика исключения" -#: pl_exec.c:3294 +#: pl_exec.c:3257 #, c-format msgid "RAISE statement option cannot be null" msgstr "параметром оператора RAISE не может быть NULL" -#: pl_exec.c:3364 +#: pl_exec.c:3327 #, c-format msgid "%s" msgstr "%s" -#: pl_exec.c:3419 +#: pl_exec.c:3382 #, c-format msgid "assertion failed" msgstr "нарушение истинности" -#: pl_exec.c:3623 pl_exec.c:3769 pl_exec.c:3959 +#: pl_exec.c:3583 pl_exec.c:3729 pl_exec.c:3919 #, c-format msgid "cannot COPY to/from client in PL/pgSQL" msgstr "в PL/pgSQL нельзя выполнить COPY с участием клиента" -#: pl_exec.c:3627 pl_exec.c:3773 pl_exec.c:3963 +#: pl_exec.c:3587 pl_exec.c:3733 pl_exec.c:3923 #, c-format msgid "cannot begin/end transactions in PL/pgSQL" msgstr "в PL/pgSQL нельзя начинать/заканчивать транзакции" -#: pl_exec.c:3628 pl_exec.c:3774 pl_exec.c:3964 +#: pl_exec.c:3588 pl_exec.c:3734 pl_exec.c:3924 #, c-format msgid "Use a BEGIN block with an EXCEPTION clause instead." msgstr "Используйте блок BEGIN с предложением EXCEPTION." -#: pl_exec.c:3797 pl_exec.c:3988 +#: pl_exec.c:3757 pl_exec.c:3948 #, c-format msgid "INTO used with a command that cannot return data" msgstr "INTO с командой не может возвращать данные" -#: pl_exec.c:3825 pl_exec.c:4016 +#: pl_exec.c:3785 pl_exec.c:3976 #, c-format msgid "query returned no rows" msgstr "запрос не вернул строк" -#: pl_exec.c:3844 pl_exec.c:4035 +#: pl_exec.c:3804 pl_exec.c:3995 #, c-format msgid "query returned more than one row" msgstr "запрос вернул несколько строк" -#: pl_exec.c:3861 +#: pl_exec.c:3821 #, c-format msgid "query has no destination for result data" msgstr "в запросе нет назначения для данных результата" -#: pl_exec.c:3862 +#: pl_exec.c:3822 #, c-format msgid "If you want to discard the results of a SELECT, use PERFORM instead." msgstr "Если вам нужно отбросить результаты SELECT, используйте PERFORM." -#: pl_exec.c:3895 pl_exec.c:7332 +#: pl_exec.c:3855 pl_exec.c:7317 #, c-format msgid "query string argument of EXECUTE is null" msgstr "в качестве текста запроса в EXECUTE передан NULL" -#: pl_exec.c:3951 +#: pl_exec.c:3911 #, c-format msgid "EXECUTE of SELECT ... INTO is not implemented" msgstr "возможность выполнения SELECT ... INTO в EXECUTE не реализована" # skip-rule: space-before-ellipsis -#: pl_exec.c:3952 +#: pl_exec.c:3912 #, c-format msgid "" "You might want to use EXECUTE ... INTO or EXECUTE CREATE TABLE ... AS " @@ -433,70 +432,70 @@ msgstr "" "Альтернативой может стать EXECUTE ... INTO или EXECUTE CREATE TABLE ... " "AS ..." -#: pl_exec.c:4273 pl_exec.c:4369 +#: pl_exec.c:4233 pl_exec.c:4329 #, c-format msgid "cursor variable \"%s\" is null" msgstr "переменная курсора \"%s\" равна NULL" -#: pl_exec.c:4284 pl_exec.c:4380 +#: pl_exec.c:4244 pl_exec.c:4340 #, c-format msgid "cursor \"%s\" does not exist" msgstr "курсор \"%s\" не существует" -#: pl_exec.c:4297 +#: pl_exec.c:4257 #, c-format msgid "relative or absolute cursor position is null" msgstr "относительная или абсолютная позиция курсора равна NULL" -#: pl_exec.c:4488 +#: pl_exec.c:4448 #, c-format msgid "null value cannot be assigned to variable \"%s\" declared NOT NULL" msgstr "значение NULL нельзя присвоить переменной \"%s\", объявленной NOT NULL" -#: pl_exec.c:4557 +#: pl_exec.c:4517 #, c-format msgid "cannot assign non-composite value to a row variable" msgstr "переменной типа кортеж можно присвоить только составное значение" -#: pl_exec.c:4581 +#: pl_exec.c:4541 #, c-format msgid "cannot assign non-composite value to a record variable" msgstr "переменной типа запись можно присвоить только составное значение" -#: pl_exec.c:4701 +#: pl_exec.c:4661 #, c-format msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" msgstr "число размерностей массива (%d) превышает предел (%d)" -#: pl_exec.c:4733 +#: pl_exec.c:4693 #, c-format msgid "subscripted object is not an array" msgstr "для объекта указан индекс, но этот объект - не массив" -#: pl_exec.c:4771 +#: pl_exec.c:4731 #, c-format msgid "array subscript in assignment must not be null" msgstr "индекс элемента массива в присваивании не может быть NULL" -#: pl_exec.c:5238 +#: pl_exec.c:5198 #, c-format msgid "query \"%s\" did not return data" msgstr "запрос \"%s\" не вернул данные" -#: pl_exec.c:5246 +#: pl_exec.c:5206 #, c-format msgid "query \"%s\" returned %d column" msgid_plural "query \"%s\" returned %d columns" -msgstr[0] "запрос \"%s\" вернул %d строку" -msgstr[1] "запрос \"%s\" вернул %d строки" -msgstr[2] "запрос \"%s\" вернул %d строк" +msgstr[0] "запрос \"%s\" вернул %d столбец" +msgstr[1] "запрос \"%s\" вернул %d столбца" +msgstr[2] "запрос \"%s\" вернул %d столбцов" -#: pl_exec.c:5273 +#: pl_exec.c:5233 #, c-format msgid "query \"%s\" returned more than one row" msgstr "запрос \"%s\" вернул несколько строк" -#: pl_exec.c:5341 +#: pl_exec.c:5301 #, c-format msgid "query \"%s\" is not a SELECT" msgstr "запрос \"%s\" - не SELECT" diff --git a/src/pl/plpgsql/src/po/sv.po b/src/pl/plpgsql/src/po/sv.po index 780d220aa3..bac48491d0 100644 --- a/src/pl/plpgsql/src/po/sv.po +++ b/src/pl/plpgsql/src/po/sv.po @@ -1,14 +1,14 @@ # Swedish message translation file for plpgsql # Copyright (C) 2017 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. -# Dennis Björklund , 2017. +# Dennis Björklund , 2017, 2018. # msgid "" msgstr "" "Project-Id-Version: plpgsql (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-08-02 18:08+0000\n" -"PO-Revision-Date: 2017-08-05 15:50+0200\n" +"POT-Creation-Date: 2018-04-29 20:08+0000\n" +"PO-Revision-Date: 2018-04-29 23:48+0200\n" "Last-Translator: Dennis Björklund \n" "Language-Team: Swedish \n" "Language: sv\n" @@ -17,149 +17,144 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=n != 1;\n" -#: pl_comp.c:433 pl_handler.c:451 +#: pl_comp.c:434 pl_handler.c:457 #, c-format msgid "PL/pgSQL functions cannot accept type %s" msgstr "PL/pgSQL-funktioner kan inte acceptera typ %s" -#: pl_comp.c:514 +#: pl_comp.c:522 #, c-format msgid "could not determine actual return type for polymorphic function \"%s\"" msgstr "kunde inte bestämma aktuell returtyp för polymorfisk funktion \"%s\"" -#: pl_comp.c:544 +#: pl_comp.c:552 #, c-format msgid "trigger functions can only be called as triggers" msgstr "utlösarfunktioner kan bara anropas som utlösare" -#: pl_comp.c:548 pl_handler.c:436 +#: pl_comp.c:556 pl_handler.c:441 #, c-format msgid "PL/pgSQL functions cannot return type %s" msgstr "PL/pgSQL-funktioner kan inte returnera typ %s" -#: pl_comp.c:589 +#: pl_comp.c:595 #, c-format msgid "trigger functions cannot have declared arguments" msgstr "utlösarfunktioner kan inte ha deklarerade argument" -#: pl_comp.c:590 +#: pl_comp.c:596 #, c-format msgid "The arguments of the trigger can be accessed through TG_NARGS and TG_ARGV instead." msgstr "Argumenten till utlösaren kan accessas via TG_NARGS och TG_ARGV istället." -#: pl_comp.c:692 +#: pl_comp.c:719 #, c-format msgid "event trigger functions cannot have declared arguments" msgstr "händelseutlösarfunktioner kan inte ha deklarerade argument" -#: pl_comp.c:943 +#: pl_comp.c:976 #, c-format msgid "compilation of PL/pgSQL function \"%s\" near line %d" msgstr "kompilering av PL/pgSQL-funktion \"%s\" nära rad %d" -#: pl_comp.c:966 +#: pl_comp.c:999 #, c-format msgid "parameter name \"%s\" used more than once" msgstr "parameternamn \"%s\" angivet mer än en gång" -#: pl_comp.c:1076 +#: pl_comp.c:1109 #, c-format msgid "column reference \"%s\" is ambiguous" msgstr "kolumnreferens \"%s\" är tvetydig" -#: pl_comp.c:1078 +#: pl_comp.c:1111 #, c-format msgid "It could refer to either a PL/pgSQL variable or a table column." msgstr "Det kan referera till antingen en PL/pgSQL-variabel eller en tabellkolumn." -#: pl_comp.c:1258 pl_comp.c:1286 pl_exec.c:4584 pl_exec.c:4913 pl_exec.c:4998 -#: pl_exec.c:5089 +#: pl_comp.c:1294 pl_exec.c:5031 pl_exec.c:5396 pl_exec.c:5483 pl_exec.c:5574 +#: pl_exec.c:6491 #, c-format msgid "record \"%s\" has no field \"%s\"" msgstr "post \"%s\" saknar fält \"%s\"" -#: pl_comp.c:1818 +#: pl_comp.c:1756 #, c-format msgid "relation \"%s\" does not exist" msgstr "relationen \"%s\" existerar inte" -#: pl_comp.c:1927 +#: pl_comp.c:1848 #, c-format msgid "variable \"%s\" has pseudo-type %s" msgstr "variabel \"%s\" har pseudotyp %s" -#: pl_comp.c:1995 -#, c-format -msgid "relation \"%s\" is not a table" -msgstr "relation \"%s\" är inte en tabell" - -#: pl_comp.c:2155 +#: pl_comp.c:2026 #, c-format msgid "type \"%s\" is only a shell" msgstr "typ \"%s\" är bara ett skal" -#: pl_comp.c:2249 pl_comp.c:2302 +#: pl_comp.c:2123 pl_comp.c:2176 #, c-format msgid "unrecognized exception condition \"%s\"" msgstr "okänt avbrottsvillkor \"%s\"" -#: pl_comp.c:2510 +#: pl_comp.c:2390 #, c-format msgid "could not determine actual argument type for polymorphic function \"%s\"" msgstr "kunde inte bestämma argumenttyp för polymorfisk funktion function \"%s\"" -#: pl_exec.c:355 pl_exec.c:644 pl_exec.c:914 +#: pl_exec.c:473 pl_exec.c:885 pl_exec.c:1098 msgid "during initialization of execution state" msgstr "unde initiering av körtillstånd" -#: pl_exec.c:362 +#: pl_exec.c:479 msgid "while storing call arguments into local variables" msgstr "under sparande av anropsargument till lokala variabler" -#: pl_exec.c:447 pl_exec.c:796 +#: pl_exec.c:567 pl_exec.c:933 msgid "during function entry" msgstr "under funktionsingången" -#: pl_exec.c:472 +#: pl_exec.c:592 #, c-format msgid "control reached end of function without RETURN" msgstr "kontrollen nådde slutet av funktionen utan RETURN" -#: pl_exec.c:479 +#: pl_exec.c:599 msgid "while casting return value to function's return type" msgstr "under typomvandling av returvärde till funktionens returtyp" -#: pl_exec.c:492 pl_exec.c:3101 +#: pl_exec.c:612 pl_exec.c:3484 #, c-format msgid "set-valued function called in context that cannot accept a set" msgstr "en funktion som returnerar en mängd anropades i kontext som inte godtar en mängd" -#: pl_exec.c:530 pl_exec.c:2948 -msgid "returned record type does not match expected record type" -msgstr "returnerad posttyp matchar inte förväntad posttyp" - -#: pl_exec.c:585 pl_exec.c:825 pl_exec.c:949 +#: pl_exec.c:738 pl_exec.c:962 pl_exec.c:1123 msgid "during function exit" msgstr "under funktionsavslutning" -#: pl_exec.c:821 pl_exec.c:945 +#: pl_exec.c:793 pl_exec.c:832 pl_exec.c:3329 +msgid "returned record type does not match expected record type" +msgstr "returnerad posttyp matchar inte förväntad posttyp" + +#: pl_exec.c:958 pl_exec.c:1119 #, c-format msgid "control reached end of trigger procedure without RETURN" msgstr "kontroll nådde slutet på utlösarprocedur utan RETURN" -#: pl_exec.c:830 +#: pl_exec.c:967 #, c-format msgid "trigger procedure cannot return a set" msgstr "utlösarprocedur kan inte returnera en mängd" -#: pl_exec.c:852 +#: pl_exec.c:1006 pl_exec.c:1034 msgid "returned row structure does not match the structure of the triggering table" msgstr "returnerad radstruktur matchar inte strukturen på utlösande tabell" #. translator: last %s is a phrase such as "during statement block #. local variable initialization" #. -#: pl_exec.c:997 +#: pl_exec.c:1171 #, c-format msgid "PL/pgSQL function %s line %d %s" msgstr "PL/pgSQL-funktion %s rad %d %s" @@ -167,311 +162,319 @@ msgstr "PL/pgSQL-funktion %s rad %d %s" #. translator: last %s is a phrase such as "while storing call #. arguments into local variables" #. -#: pl_exec.c:1008 +#: pl_exec.c:1182 #, c-format msgid "PL/pgSQL function %s %s" msgstr "PL/pgSQL-funktion %s %s" #. translator: last %s is a plpgsql statement type name -#: pl_exec.c:1016 +#: pl_exec.c:1190 #, c-format msgid "PL/pgSQL function %s line %d at %s" msgstr "PL/pgSQL-funktion %s rad %d vid %s" -#: pl_exec.c:1022 +#: pl_exec.c:1196 #, c-format msgid "PL/pgSQL function %s" msgstr "PL/pgSQL-funktion %s" -#: pl_exec.c:1187 +#: pl_exec.c:1534 msgid "during statement block local variable initialization" msgstr "under initiering av lokala variabler i satsblock" -#: pl_exec.c:1226 -#, c-format -msgid "variable \"%s\" declared NOT NULL cannot default to NULL" -msgstr "variabel \"%s\" deklarerad NOT NULL kan inte default:a till NULL" - -#: pl_exec.c:1277 +#: pl_exec.c:1632 msgid "during statement block entry" msgstr "under ingång till satsblock" -#: pl_exec.c:1309 +#: pl_exec.c:1664 msgid "during statement block exit" msgstr "under satsblockavslutning" -#: pl_exec.c:1351 +#: pl_exec.c:1702 msgid "during exception cleanup" msgstr "under avbrottsuppstädning" -#: pl_exec.c:1717 +#: pl_exec.c:2207 pl_exec.c:2221 +#, c-format +msgid "argument %d is an output argument but is not writable" +msgstr "argument %d är ett utdataargument men är ej skrivbar" + +#: pl_exec.c:2263 #, c-format msgid "GET STACKED DIAGNOSTICS cannot be used outside an exception handler" msgstr "GET STACKED DIAGNOSTICS kan inte användas utanför en avbrottshanterare" -#: pl_exec.c:1922 +#: pl_exec.c:2468 #, c-format msgid "case not found" msgstr "hittade inte alternativ" -#: pl_exec.c:1923 +#: pl_exec.c:2469 #, c-format msgid "CASE statement is missing ELSE part." msgstr "CASE-sats saknar ELSE-del." -#: pl_exec.c:2077 +#: pl_exec.c:2562 #, c-format msgid "lower bound of FOR loop cannot be null" msgstr "lägre gräns i FOR-loop kan inte vara null" -#: pl_exec.c:2093 +#: pl_exec.c:2578 #, c-format msgid "upper bound of FOR loop cannot be null" msgstr "övre gräns i FOR-loop kan inte vara null" -#: pl_exec.c:2111 +#: pl_exec.c:2596 #, c-format msgid "BY value of FOR loop cannot be null" msgstr "BY-värde i FOR-loop kan inte vara null" -#: pl_exec.c:2117 +#: pl_exec.c:2602 #, c-format msgid "BY value of FOR loop must be greater than zero" msgstr "BY-värde i FOR-loop måste vara större än noll" -#: pl_exec.c:2294 pl_exec.c:4085 +#: pl_exec.c:2736 pl_exec.c:4461 #, c-format msgid "cursor \"%s\" already in use" msgstr "markören \"%s\" används redan" -#: pl_exec.c:2317 pl_exec.c:4150 +#: pl_exec.c:2759 pl_exec.c:4526 #, c-format msgid "arguments given for cursor without arguments" msgstr "argument angivna till markör utan argumnet" -#: pl_exec.c:2336 pl_exec.c:4169 +#: pl_exec.c:2778 pl_exec.c:4545 #, c-format msgid "arguments required for cursor" msgstr "argument krävs för markör" -#: pl_exec.c:2423 +#: pl_exec.c:2865 #, c-format msgid "FOREACH expression must not be null" msgstr "FOREACH-uttryck får inte vara null" -#: pl_exec.c:2438 +#: pl_exec.c:2880 #, c-format msgid "FOREACH expression must yield an array, not type %s" msgstr "FOREACH-uttryck måste ge en array, inte typ %s" -#: pl_exec.c:2455 +#: pl_exec.c:2897 #, c-format msgid "slice dimension (%d) is out of the valid range 0..%d" msgstr "slice-storlek (%d) är utanför giltigt intervall 0..%d" -#: pl_exec.c:2482 +#: pl_exec.c:2924 #, c-format msgid "FOREACH ... SLICE loop variable must be of an array type" msgstr "FOREACH ... SLICE-loop-variabel måste ha typen array" -#: pl_exec.c:2486 +#: pl_exec.c:2928 #, c-format msgid "FOREACH loop variable must not be of an array type" msgstr "FOREACH-loop-variable får inte ha typen array" -#: pl_exec.c:2689 pl_exec.c:2771 pl_exec.c:2941 +#: pl_exec.c:3090 pl_exec.c:3147 pl_exec.c:3322 #, c-format msgid "cannot return non-composite value from function returning composite type" msgstr "kan inte returnera icke-composit-värde från funktion med returtyp composit" -#: pl_exec.c:2815 pl_gram.y:3199 +#: pl_exec.c:3186 pl_gram.y:3266 #, c-format msgid "cannot use RETURN NEXT in a non-SETOF function" msgstr "kan inte använda RETURN NEXT i en icke-SETOF-funktion" -#: pl_exec.c:2849 pl_exec.c:2976 +#: pl_exec.c:3227 pl_exec.c:3359 #, c-format msgid "wrong result type supplied in RETURN NEXT" msgstr "fel resultattyp given i RETURN NEXT" -#: pl_exec.c:2878 pl_exec.c:4572 pl_exec.c:4880 pl_exec.c:4906 pl_exec.c:4972 -#: pl_exec.c:4991 pl_exec.c:5059 pl_exec.c:5082 -#, c-format -msgid "record \"%s\" is not assigned yet" -msgstr "posten \"%s\" är inte tilldelad än" - -#: pl_exec.c:2880 pl_exec.c:4574 pl_exec.c:4882 pl_exec.c:4908 pl_exec.c:4974 -#: pl_exec.c:4993 pl_exec.c:5061 pl_exec.c:5084 -#, c-format -msgid "The tuple structure of a not-yet-assigned record is indeterminate." -msgstr "Tuple-strukturen av en ej-ännu-tilldelad post är obestämd." - -#: pl_exec.c:2887 pl_exec.c:2906 +#: pl_exec.c:3265 pl_exec.c:3286 #, c-format msgid "wrong record type supplied in RETURN NEXT" msgstr "fel posttyp given i RETURN NEXT" -#: pl_exec.c:2995 +#: pl_exec.c:3378 #, c-format msgid "RETURN NEXT must have a parameter" msgstr "RETURN NEXT måste ha en parameter" -#: pl_exec.c:3021 pl_gram.y:3261 +#: pl_exec.c:3404 pl_gram.y:3329 #, c-format msgid "cannot use RETURN QUERY in a non-SETOF function" msgstr "kan inte använda RETURN QUERY i en icke-SETOF-funktion" -#: pl_exec.c:3045 +#: pl_exec.c:3428 msgid "structure of query does not match function result type" msgstr "strukturen på frågan matchar inte funktionens resultattyp" -#: pl_exec.c:3129 pl_exec.c:3267 +#: pl_exec.c:3512 pl_exec.c:3650 #, c-format msgid "RAISE option already specified: %s" msgstr "RAISE-flagga redan angiven: %s" -#: pl_exec.c:3163 +#: pl_exec.c:3546 #, c-format msgid "RAISE without parameters cannot be used outside an exception handler" msgstr "RAISE utan parametrar kan inte användas utanför en avbrottshanterare" -#: pl_exec.c:3257 +#: pl_exec.c:3640 #, c-format msgid "RAISE statement option cannot be null" msgstr "RAISE-satsens flagga får inte vare null" -#: pl_exec.c:3327 +#: pl_exec.c:3710 #, c-format msgid "%s" msgstr "%s" -#: pl_exec.c:3382 +#: pl_exec.c:3765 #, c-format msgid "assertion failed" msgstr "assert misslyckades" -#: pl_exec.c:3583 pl_exec.c:3729 pl_exec.c:3919 +#: pl_exec.c:3970 pl_exec.c:4117 pl_exec.c:4301 #, c-format msgid "cannot COPY to/from client in PL/pgSQL" msgstr "kan inte COPY till/från klient i PL/pgSQL" -#: pl_exec.c:3587 pl_exec.c:3733 pl_exec.c:3923 +#: pl_exec.c:3974 pl_exec.c:4121 pl_exec.c:4305 #, c-format msgid "cannot begin/end transactions in PL/pgSQL" msgstr "kan inte starta/avsluta transaktioner i PL/pgSQL" -#: pl_exec.c:3588 pl_exec.c:3734 pl_exec.c:3924 +#: pl_exec.c:3975 pl_exec.c:4122 pl_exec.c:4306 #, c-format msgid "Use a BEGIN block with an EXCEPTION clause instead." msgstr "Använd ett BEGIN-block men en EXCEPTION-klausul istället." -#: pl_exec.c:3757 pl_exec.c:3948 +#: pl_exec.c:4144 pl_exec.c:4329 #, c-format msgid "INTO used with a command that cannot return data" msgstr "INTO använd med ett kommando som inte returnerar data" -#: pl_exec.c:3785 pl_exec.c:3976 +#: pl_exec.c:4167 pl_exec.c:4352 #, c-format msgid "query returned no rows" msgstr "frågan returnerade inga rader" -#: pl_exec.c:3804 pl_exec.c:3995 +#: pl_exec.c:4186 pl_exec.c:4371 #, c-format msgid "query returned more than one row" msgstr "frågan returnerade mer än en rad" -#: pl_exec.c:3821 +#: pl_exec.c:4203 #, c-format msgid "query has no destination for result data" msgstr "frågan har ingen destination för resultatdatan" -#: pl_exec.c:3822 +#: pl_exec.c:4204 #, c-format msgid "If you want to discard the results of a SELECT, use PERFORM instead." msgstr "Om du vill slänga resultatet av en SELECT, använd PERFORM istället." -#: pl_exec.c:3855 pl_exec.c:7292 +#: pl_exec.c:4237 pl_exec.c:8212 #, c-format msgid "query string argument of EXECUTE is null" msgstr "frågesträngargumentet till EXECUTE är null" -#: pl_exec.c:3911 +#: pl_exec.c:4293 #, c-format msgid "EXECUTE of SELECT ... INTO is not implemented" msgstr "EXECUTE för SELECT ... INTO är inte implementerad" -#: pl_exec.c:3912 +#: pl_exec.c:4294 #, c-format msgid "You might want to use EXECUTE ... INTO or EXECUTE CREATE TABLE ... AS instead." msgstr "Du vill nog använda EXECUTE ... INTO eller EXECUTE CREATE TABLE ... AS istället." -#: pl_exec.c:4233 pl_exec.c:4329 +#: pl_exec.c:4607 pl_exec.c:4695 #, c-format msgid "cursor variable \"%s\" is null" msgstr "markörvariabel \"%s\" är null" -#: pl_exec.c:4244 pl_exec.c:4340 +#: pl_exec.c:4618 pl_exec.c:4706 #, c-format msgid "cursor \"%s\" does not exist" msgstr "markör \"%s\" existerar inte" -#: pl_exec.c:4257 +#: pl_exec.c:4631 #, c-format msgid "relative or absolute cursor position is null" msgstr "relativ eller absolut markörposition är null" -#: pl_exec.c:4448 +#: pl_exec.c:4881 pl_exec.c:4976 #, c-format msgid "null value cannot be assigned to variable \"%s\" declared NOT NULL" msgstr "null-value kan inte tilldelas till variabel \"%s\" som deklarerats NOT NULL" -#: pl_exec.c:4517 +#: pl_exec.c:4957 #, c-format msgid "cannot assign non-composite value to a row variable" msgstr "kan inte tilldela icke-composite-värde till radvariabel" -#: pl_exec.c:4541 +#: pl_exec.c:4989 #, c-format msgid "cannot assign non-composite value to a record variable" msgstr "kan inte tilldela icke-composite-värde till en post-variabel" -#: pl_exec.c:4661 +#: pl_exec.c:5040 +#, c-format +msgid "cannot assign to system column \"%s\"" +msgstr "kan inte skriva till systemkolumn \"%s\"" + +#: pl_exec.c:5104 #, c-format msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" msgstr "antalet array-dimensioner (%d) överskrider det maximalt tillåtna (%d)" -#: pl_exec.c:4693 +#: pl_exec.c:5136 #, c-format msgid "subscripted object is not an array" msgstr "arrayindexobjekt är inte en array" -#: pl_exec.c:4731 +#: pl_exec.c:5174 #, c-format msgid "array subscript in assignment must not be null" msgstr "arrayindex i tilldelning kan inte vara null" -#: pl_exec.c:5198 +#: pl_exec.c:5681 #, c-format msgid "query \"%s\" did not return data" msgstr "frågan \"%s\" returnerade ingen data" -#: pl_exec.c:5206 +#: pl_exec.c:5689 #, c-format msgid "query \"%s\" returned %d column" msgid_plural "query \"%s\" returned %d columns" msgstr[0] "frågan \"%s\" returnerade %d kolumn" msgstr[1] "frågan \"%s\" returnerade %d kolumner" -#: pl_exec.c:5233 +#: pl_exec.c:5717 #, c-format msgid "query \"%s\" returned more than one row" msgstr "frågan \"%s\" returnerade mer än en rad" -#: pl_exec.c:5301 +#: pl_exec.c:5780 #, c-format msgid "query \"%s\" is not a SELECT" msgstr "frågan \"%s\" är inte en SELECT" +#: pl_exec.c:6505 pl_exec.c:6545 pl_exec.c:6585 +#, c-format +msgid "type of parameter %d (%s) does not match that when preparing the plan (%s)" +msgstr "typen av parameter %d (%s) matchar inte det som var vid preparerande av plan (%s)" + +#: pl_exec.c:7356 +#, c-format +msgid "record \"%s\" is not assigned yet" +msgstr "posten \"%s\" är inte tilldelad än" + +#: pl_exec.c:7357 +#, c-format +msgid "The tuple structure of a not-yet-assigned record is indeterminate." +msgstr "Tuple-strukturen av en ej-ännu-tilldelad post är obestämd." + #: pl_funcs.c:239 msgid "statement block" msgstr "satsblock" @@ -504,285 +507,280 @@ msgstr "SQL-sats" msgid "FOR over EXECUTE statement" msgstr "FOR över EXECUTE-sats" -#: pl_gram.y:478 +#: pl_gram.y:485 #, c-format msgid "block label must be placed before DECLARE, not after" msgstr "blocketikett måste anges före DECLARE, inte efter" -#: pl_gram.y:498 +#: pl_gram.y:505 #, c-format msgid "collations are not supported by type %s" -msgstr "sorteringar stöds inte för typ %s" - -#: pl_gram.y:513 -#, c-format -msgid "row or record variable cannot be CONSTANT" -msgstr "rad- eller post-variabel får inte vara CONSTANT" - -#: pl_gram.y:523 -#, c-format -msgid "row or record variable cannot be NOT NULL" -msgstr "rad- eller post-variabel får inte vara NOT NULL" +msgstr "jämförelser stöds inte för typ %s" -#: pl_gram.y:534 +#: pl_gram.y:524 #, c-format -msgid "default value for row or record variable is not supported" -msgstr "standardvärde för rad- eller post-variabel stöds inte" +msgid "variable \"%s\" must have a default value, since it's declared NOT NULL" +msgstr "variabel \"%s\" måste ha ett default-värde då det inte deklarerats som NOT NULL" -#: pl_gram.y:679 pl_gram.y:694 pl_gram.y:720 +#: pl_gram.y:669 pl_gram.y:684 pl_gram.y:710 #, c-format msgid "variable \"%s\" does not exist" msgstr "variabel \"%s\" finns inte" -#: pl_gram.y:738 pl_gram.y:766 +#: pl_gram.y:728 pl_gram.y:756 msgid "duplicate declaration" msgstr "duplicerad deklaration" -#: pl_gram.y:749 pl_gram.y:777 +#: pl_gram.y:739 pl_gram.y:767 #, c-format msgid "variable \"%s\" shadows a previously defined variable" msgstr "variabeln \"%s\" döljer en tidigare definierad variabel" -#: pl_gram.y:956 +#: pl_gram.y:983 #, c-format msgid "diagnostics item %s is not allowed in GET STACKED DIAGNOSTICS" msgstr "diagnostikdel %s tillåts inte i GET STACKED DIAGNOSTICS" -#: pl_gram.y:974 +#: pl_gram.y:1001 #, c-format msgid "diagnostics item %s is not allowed in GET CURRENT DIAGNOSTICS" msgstr "diagnostikdel %s tillåts inte i GET CURRENT DIAGNOSTICS" -#: pl_gram.y:1072 +#: pl_gram.y:1099 msgid "unrecognized GET DIAGNOSTICS item" msgstr "okänd GET DIAGNOSTICS-del" -#: pl_gram.y:1082 pl_gram.y:3448 +#: pl_gram.y:1109 pl_gram.y:3508 #, c-format msgid "\"%s\" is not a scalar variable" msgstr "\"%s\" är inte ett skalärt värde" -#: pl_gram.y:1334 pl_gram.y:1528 +#: pl_gram.y:1357 pl_gram.y:1550 #, c-format -msgid "loop variable of loop over rows must be a record or row variable or list of scalar variables" -msgstr "loop-variabeln för loop över rader måste vara en post- eller rad-variabel alternativt en lista av skalärvariabler" +msgid "loop variable of loop over rows must be a record variable or list of scalar variables" +msgstr "loop-variabeln för loop över rader måste vara en postvariabel eller en lista av skalärvariabler" -#: pl_gram.y:1368 +#: pl_gram.y:1391 #, c-format msgid "cursor FOR loop must have only one target variable" msgstr "markör-FOR-loop måste ha exakt en målvariabel" -#: pl_gram.y:1375 +#: pl_gram.y:1398 #, c-format msgid "cursor FOR loop must use a bound cursor variable" msgstr "markör-FOR-loop måste använda en bunden markörvariabel" -#: pl_gram.y:1459 +#: pl_gram.y:1485 #, c-format msgid "integer FOR loop must have only one target variable" msgstr "heltals-FOR-loop måste ha exakt en målvariabel" -#: pl_gram.y:1495 +#: pl_gram.y:1521 #, c-format msgid "cannot specify REVERSE in query FOR loop" msgstr "kan inte ange REVERSE i fråge-FOR-loop" -#: pl_gram.y:1642 +#: pl_gram.y:1652 #, c-format msgid "loop variable of FOREACH must be a known variable or list of variables" msgstr "loop-variabel för FOREACH måste vara en känd variabel eller lista av variabler" -#: pl_gram.y:1683 +#: pl_gram.y:1693 #, c-format msgid "there is no label \"%s\" attached to any block or loop enclosing this statement" msgstr "det finns ingen etikett \"%s\" kopplad till något block eller loop-omslutning i denna sats" -#: pl_gram.y:1691 +#: pl_gram.y:1701 #, c-format msgid "block label \"%s\" cannot be used in CONTINUE" msgstr "blocketikett \"%s\" kan inte användas i CONTINUE" -#: pl_gram.y:1706 +#: pl_gram.y:1716 #, c-format msgid "EXIT cannot be used outside a loop, unless it has a label" msgstr "EXIT kan inte användas utanför en loop, om den inte har en etikett" -#: pl_gram.y:1707 +#: pl_gram.y:1717 #, c-format msgid "CONTINUE cannot be used outside a loop" msgstr "CONTINUE kan inte användas utanför en loop" -#: pl_gram.y:1731 pl_gram.y:1768 pl_gram.y:1816 pl_gram.y:2898 pl_gram.y:2983 -#: pl_gram.y:3094 pl_gram.y:3850 +#: pl_gram.y:1741 pl_gram.y:1778 pl_gram.y:1826 pl_gram.y:2958 pl_gram.y:3041 +#: pl_gram.y:3152 pl_gram.y:3907 msgid "unexpected end of function definition" msgstr "oväntat slut på funktionsdefinitionen" -#: pl_gram.y:1836 pl_gram.y:1860 pl_gram.y:1876 pl_gram.y:1882 pl_gram.y:2000 -#: pl_gram.y:2008 pl_gram.y:2022 pl_gram.y:2117 pl_gram.y:2304 pl_gram.y:2398 -#: pl_gram.y:2550 pl_gram.y:3691 pl_gram.y:3752 pl_gram.y:3831 +#: pl_gram.y:1846 pl_gram.y:1870 pl_gram.y:1886 pl_gram.y:1892 pl_gram.y:2009 +#: pl_gram.y:2017 pl_gram.y:2031 pl_gram.y:2125 pl_gram.y:2360 pl_gram.y:2454 +#: pl_gram.y:2612 pl_gram.y:3749 pl_gram.y:3810 pl_gram.y:3888 msgid "syntax error" msgstr "syntaxfel" -#: pl_gram.y:1864 pl_gram.y:1866 pl_gram.y:2308 pl_gram.y:2310 +#: pl_gram.y:1874 pl_gram.y:1876 pl_gram.y:2364 pl_gram.y:2366 msgid "invalid SQLSTATE code" msgstr "ogiltig SQLSTATE-kod" -#: pl_gram.y:2064 +#: pl_gram.y:2073 msgid "syntax error, expected \"FOR\"" msgstr "syntaxfel, förväntade \"FOR\"" -#: pl_gram.y:2126 +#: pl_gram.y:2134 #, c-format msgid "FETCH statement cannot return multiple rows" msgstr "FETCH-sats kan inte returnera multipla rader" -#: pl_gram.y:2188 +#: pl_gram.y:2244 #, c-format msgid "cursor variable must be a simple variable" msgstr "markörvariabel måste vara en enkel variabel" -#: pl_gram.y:2194 +#: pl_gram.y:2250 #, c-format msgid "variable \"%s\" must be of type cursor or refcursor" msgstr "variabel \"%s\" måste ha typen cursor eller refcursor" -#: pl_gram.y:2521 pl_gram.y:2532 +#: pl_gram.y:2583 pl_gram.y:2594 #, c-format msgid "\"%s\" is not a known variable" msgstr "\"%s\" är inte en känd variabel" -#: pl_gram.y:2636 pl_gram.y:2646 pl_gram.y:2802 +#: pl_gram.y:2698 pl_gram.y:2708 pl_gram.y:2863 msgid "mismatched parentheses" msgstr "missmatchade parenteser" -#: pl_gram.y:2650 +#: pl_gram.y:2712 #, c-format msgid "missing \"%s\" at end of SQL expression" msgstr "saknar \"%s\" vid slutet av SQL-uttryck" -#: pl_gram.y:2656 +#: pl_gram.y:2718 #, c-format msgid "missing \"%s\" at end of SQL statement" msgstr "saknar \"%s\" vid slutet av SQL-sats" -#: pl_gram.y:2673 +#: pl_gram.y:2735 msgid "missing expression" msgstr "saknar uttryck" -#: pl_gram.y:2675 +#: pl_gram.y:2737 msgid "missing SQL statement" msgstr "saknars SQL-sats" -#: pl_gram.y:2804 +#: pl_gram.y:2865 msgid "incomplete data type declaration" msgstr "inkomplett datatypdeklaration" -#: pl_gram.y:2827 +#: pl_gram.y:2888 msgid "missing data type declaration" msgstr "saknar datatypdeklaration" -#: pl_gram.y:2906 +#: pl_gram.y:2966 msgid "INTO specified more than once" msgstr "INTO angiven mer än en gång" -#: pl_gram.y:3075 +#: pl_gram.y:3133 msgid "expected FROM or IN" msgstr "förväntade FROM eller IN" -#: pl_gram.y:3135 +#: pl_gram.y:3193 #, c-format msgid "RETURN cannot have a parameter in function returning set" msgstr "RETURN kan inte ha en parameter i funktion som returnerar en mängd" -#: pl_gram.y:3136 +#: pl_gram.y:3194 #, c-format msgid "Use RETURN NEXT or RETURN QUERY." msgstr "Använd RETURN NEXT eller RETURN QUERY." -#: pl_gram.y:3144 +#: pl_gram.y:3204 #, c-format -msgid "RETURN cannot have a parameter in function with OUT parameters" -msgstr "RETURN kan inte ha en parameter i en funktion med OUT-parameterar" +msgid "RETURN cannot have a parameter in a procedure" +msgstr "RETURN kan inte ha en parameter i en procedur" -#: pl_gram.y:3153 +#: pl_gram.y:3209 #, c-format msgid "RETURN cannot have a parameter in function returning void" msgstr "RETURN kan inte ha en parameter i funktion som returnerar void" -#: pl_gram.y:3213 +#: pl_gram.y:3218 +#, c-format +msgid "RETURN cannot have a parameter in function with OUT parameters" +msgstr "RETURN kan inte ha en parameter i en funktion med OUT-parameterar" + +#: pl_gram.y:3280 #, c-format msgid "RETURN NEXT cannot have a parameter in function with OUT parameters" msgstr "RETURN NEXT kan inte ha en parameter i funktion med OUT-parametrar" -#: pl_gram.y:3317 +#: pl_gram.y:3387 #, c-format -msgid "\"%s\" is declared CONSTANT" -msgstr "\"%s\" är deklarerad CONSTANT" +msgid "variable \"%s\" is declared CONSTANT" +msgstr "variabel \"%s\" är deklarerad CONSTANT" -#: pl_gram.y:3379 pl_gram.y:3391 +#: pl_gram.y:3450 #, c-format -msgid "record or row variable cannot be part of multiple-item INTO list" -msgstr "post- eller rad-variabel kan inte vara del av en multipel-INTO-lista" +msgid "record variable cannot be part of multiple-item INTO list" +msgstr "postvariabel kan inte vara del av en multipel-INTO-lista" -#: pl_gram.y:3436 +#: pl_gram.y:3496 #, c-format msgid "too many INTO variables specified" msgstr "för många INTO-variabler angivna" -#: pl_gram.y:3644 +#: pl_gram.y:3702 #, c-format msgid "end label \"%s\" specified for unlabelled block" msgstr "slutetikett \"%s\" angiven för block utan etikett" -#: pl_gram.y:3651 +#: pl_gram.y:3709 #, c-format msgid "end label \"%s\" differs from block's label \"%s\"" msgstr "slutetikett \"%s\" stämmer inte med blockets etikett \"%s\"" -#: pl_gram.y:3686 +#: pl_gram.y:3744 #, c-format msgid "cursor \"%s\" has no arguments" msgstr "markör \"%s\" har inga argument" -#: pl_gram.y:3700 +#: pl_gram.y:3758 #, c-format msgid "cursor \"%s\" has arguments" msgstr "markör \"%s\" har argument" -#: pl_gram.y:3742 +#: pl_gram.y:3800 #, c-format msgid "cursor \"%s\" has no argument named \"%s\"" msgstr "markör \"%s\" har inga argument med namn \"%s\"" -#: pl_gram.y:3762 +#: pl_gram.y:3820 #, c-format msgid "value for parameter \"%s\" of cursor \"%s\" specified more than once" msgstr "värdet för parameter \"%s\" i markör \"%s\" är angivet mer än en gång" -#: pl_gram.y:3787 +#: pl_gram.y:3845 #, c-format msgid "not enough arguments for cursor \"%s\"" msgstr "ej tillräckligt med argument för markör \"%s\"" -#: pl_gram.y:3794 +#: pl_gram.y:3852 #, c-format msgid "too many arguments for cursor \"%s\"" msgstr "fär många argument för markör \"%s\"" -#: pl_gram.y:3882 +#: pl_gram.y:3939 msgid "unrecognized RAISE statement option" msgstr "okänd RAISE-sats-flagga" -#: pl_gram.y:3886 +#: pl_gram.y:3943 msgid "syntax error, expected \"=\"" msgstr "syntaxfel, förväntade \"=\"" -#: pl_gram.y:3927 +#: pl_gram.y:3984 #, c-format msgid "too many parameters specified for RAISE" msgstr "för många parametrar angivna för RAISE" -#: pl_gram.y:3931 +#: pl_gram.y:3988 #, c-format msgid "too few parameters specified for RAISE" msgstr "för få parametrar angivna för RAISE" @@ -808,13 +806,28 @@ msgid "List of programming constructs that should produce an error." msgstr "Lista av programmeringskonstruktioner som skall ge ett fel" #. translator: %s is typically the translation of "syntax error" -#: pl_scanner.c:624 +#: pl_scanner.c:630 #, c-format msgid "%s at end of input" msgstr "%s vid slutet av indatan" #. translator: first %s is typically the translation of "syntax error" -#: pl_scanner.c:640 +#: pl_scanner.c:646 #, c-format msgid "%s at or near \"%s\"" msgstr "%s vid eller nära \"%s\"" + +#~ msgid "default value for row or record variable is not supported" +#~ msgstr "standardvärde för rad- eller post-variabel stöds inte" + +#~ msgid "row or record variable cannot be NOT NULL" +#~ msgstr "rad- eller post-variabel får inte vara NOT NULL" + +#~ msgid "row or record variable cannot be CONSTANT" +#~ msgstr "rad- eller post-variabel får inte vara CONSTANT" + +#~ msgid "variable \"%s\" declared NOT NULL cannot default to NULL" +#~ msgstr "variabel \"%s\" deklarerad NOT NULL kan inte default:a till NULL" + +#~ msgid "relation \"%s\" is not a table" +#~ msgstr "relation \"%s\" är inte en tabell" diff --git a/src/pl/plpgsql/src/po/tr.po b/src/pl/plpgsql/src/po/tr.po new file mode 100644 index 0000000000..9ff44731e1 --- /dev/null +++ b/src/pl/plpgsql/src/po/tr.po @@ -0,0 +1,887 @@ +# LANGUAGE message translation file for plpgsql +# Copyright (C) 2009 PostgreSQL Global Development Group +# This file is distributed under the same license as the PostgreSQL package. +# FIRST AUTHOR , 2009. +# +msgid "" +msgstr "" +"Project-Id-Version: PostgreSQL 8.4\n" +"Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" +"POT-Creation-Date: 2018-02-22 00:08+0000\n" +"PO-Revision-Date: 2018-02-22 14:36+0300\n" +"Last-Translator: Devrim GÜNDÜZ \n" +"Language-Team: TR \n" +"Language: tr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 1.8.7.1\n" + +#: pl_comp.c:433 pl_handler.c:451 +#, c-format +msgid "PL/pgSQL functions cannot accept type %s" +msgstr "PL/pgSQL fonksiyonları %s veri tipini kabul etmezler" + +#: pl_comp.c:514 +#, c-format +msgid "could not determine actual return type for polymorphic function \"%s\"" +msgstr "\"%s\" polimorfik fonksiyonunun asıl dönüşdeğeri belirlenemedi" + +#: pl_comp.c:544 +#, c-format +msgid "trigger functions can only be called as triggers" +msgstr "trigger fonksiyonları sadece trigger olarak çağırılabilirler" + +#: pl_comp.c:548 pl_handler.c:436 +#, c-format +msgid "PL/pgSQL functions cannot return type %s" +msgstr "PL/pgSQL fonksiyonları %s tipini döndüremezler" + +#: pl_comp.c:589 +#, c-format +msgid "trigger functions cannot have declared arguments" +msgstr "trigger fonksiyonları belirtilmiş (declared) argümanlara sahip olamaz" + +#: pl_comp.c:590 +#, c-format +msgid "The arguments of the trigger can be accessed through TG_NARGS and TG_ARGV instead." +msgstr "Tetikleyici bağımsız değişkenlerine TG_NARGS ve TG_ARGV üzerinden erişilebilir." + +#: pl_comp.c:692 +#, c-format +msgid "event trigger functions cannot have declared arguments" +msgstr "olay tetikleyici (trigger) fonksiyonları belirtilmiş (declared) argümanlara sahip olamaz" + +#: pl_comp.c:943 +#, c-format +msgid "compilation of PL/pgSQL function \"%s\" near line %d" +msgstr "\"%s\" fonkiyonununun %d numaralı satırının civarlarında derlenmesi" + +#: pl_comp.c:966 +#, c-format +msgid "parameter name \"%s\" used more than once" +msgstr "\"%s\" parametresi birden fazla kez kullanılmıştır" + +#: pl_comp.c:1076 +#, c-format +msgid "column reference \"%s\" is ambiguous" +msgstr "\"%s\" sütun referansı iki anlamlı" + +#: pl_comp.c:1078 +#, c-format +msgid "It could refer to either a PL/pgSQL variable or a table column." +msgstr "Ya bir PL/pgSQL değişkenine ya da bir tablo sütununa atıfta bulunuyor olabilir." + +#: pl_comp.c:1258 pl_comp.c:1286 pl_exec.c:4584 pl_exec.c:4913 pl_exec.c:4998 +#: pl_exec.c:5089 +#, c-format +msgid "record \"%s\" has no field \"%s\"" +msgstr "\"%s\" kaydı \"%s\" alanını içermiyor" + +#: pl_comp.c:1818 +#, c-format +msgid "relation \"%s\" does not exist" +msgstr "\"%s\" nesnesi mevcut değil" + +#: pl_comp.c:1927 +#, c-format +msgid "variable \"%s\" has pseudo-type %s" +msgstr "\"%s\" değişkeni %s pseudo tipine sahip" + +#: pl_comp.c:1995 +#, c-format +msgid "relation \"%s\" is not a table" +msgstr "\"%s\" bir tablo değil" + +#: pl_comp.c:2155 +#, c-format +msgid "type \"%s\" is only a shell" +msgstr "\"%s\" tipi bir shelldir" + +#: pl_comp.c:2249 pl_comp.c:2302 +#, c-format +msgid "unrecognized exception condition \"%s\"" +msgstr "tanımlanamayan exception durumu \"%s\"" + +#: pl_comp.c:2510 +#, c-format +msgid "could not determine actual argument type for polymorphic function \"%s\"" +msgstr "\"%s\" polimorfik fonksiyonu için gerçek argüman tipi belirlenemedi" + +#: pl_exec.c:355 pl_exec.c:644 pl_exec.c:914 +msgid "during initialization of execution state" +msgstr "çalıştırma durumu ilklendirmesi sırasında" + +#: pl_exec.c:362 +msgid "while storing call arguments into local variables" +msgstr "çağrı argümanlarını yerel değişkenlerde saklarken" + +#: pl_exec.c:447 pl_exec.c:796 +msgid "during function entry" +msgstr "fonksiyon girişi sırasında" + +#: pl_exec.c:472 +#, c-format +msgid "control reached end of function without RETURN" +msgstr "control fonksiyonun sonuna RETURNsüz ulaştı" + +#: pl_exec.c:479 +msgid "while casting return value to function's return type" +msgstr "dönüş değerini fonksiyonun dönüş tipine dönüştürürken" + +#: pl_exec.c:492 pl_exec.c:3101 +#, c-format +msgid "set-valued function called in context that cannot accept a set" +msgstr "set değerini kabul etmediği ortamda set değeri alan fonksiyon çağırılmış" + +#: pl_exec.c:530 pl_exec.c:2948 +msgid "returned record type does not match expected record type" +msgstr "dönen kayıt tipi beklenen kayıt tipine uymuyor" + +#: pl_exec.c:585 pl_exec.c:825 pl_exec.c:949 +msgid "during function exit" +msgstr "fonksiyon çıkışı sırasında" + +#: pl_exec.c:821 pl_exec.c:945 +#, c-format +msgid "control reached end of trigger procedure without RETURN" +msgstr "trigger yordamı RETURN olmadan bitti" + +#: pl_exec.c:830 +#, c-format +msgid "trigger procedure cannot return a set" +msgstr "trigger yordamı bir küme döndüremez" + +#: pl_exec.c:852 +msgid "returned row structure does not match the structure of the triggering table" +msgstr "dönen satır yapısı tetikleyen tablonun yapısına uymuyor" + +#. translator: last %s is a phrase such as "during statement block +#. local variable initialization" +#. +#: pl_exec.c:997 +#, c-format +msgid "PL/pgSQL function %s line %d %s" +msgstr "PL/pgSQL fonksiyonu %s satır %d %s" + +#. translator: last %s is a phrase such as "while storing call +#. arguments into local variables" +#. +#: pl_exec.c:1008 +#, c-format +msgid "PL/pgSQL function %s %s" +msgstr "PL/pgSQL fonksiyonu %s %s" + +#. translator: last %s is a plpgsql statement type name +#: pl_exec.c:1016 +#, c-format +msgid "PL/pgSQL function %s line %d at %s" +msgstr "%s PL/pgSQL fonksiyonu, %d. satır, %s içinde" + +#: pl_exec.c:1022 +#, c-format +msgid "PL/pgSQL function %s" +msgstr "PL/pgSQL fonksiyonu %s" + +#: pl_exec.c:1187 +msgid "during statement block local variable initialization" +msgstr "ifade (statement) bloğu yerel değişken ilklendirmesi sırasında" + +#: pl_exec.c:1226 +#, c-format +msgid "variable \"%s\" declared NOT NULL cannot default to NULL" +msgstr "NOT NULL olarak belirtilen \"%s\" değişkeni öntanımlı olarak NULL olamaz" + +#: pl_exec.c:1277 +msgid "during statement block entry" +msgstr "ifade bloğu girişi sırasında" + +#: pl_exec.c:1309 +msgid "during statement block exit" +msgstr "ifade bloğu çıkışı sırasında" + +#: pl_exec.c:1351 +msgid "during exception cleanup" +msgstr "exception temizlemesi sırasında" + +#: pl_exec.c:1717 +#, c-format +msgid "GET STACKED DIAGNOSTICS cannot be used outside an exception handler" +msgstr "GET STACKED DIAGNOSTICS özel durum işleyici (exception handler) dışında kullanılamaz" + +#: pl_exec.c:1922 +#, c-format +msgid "case not found" +msgstr "case bulunamadı" + +#: pl_exec.c:1923 +#, c-format +msgid "CASE statement is missing ELSE part." +msgstr "CASE ifadesindeki ELSE eksik." + +#: pl_exec.c:2077 +#, c-format +msgid "lower bound of FOR loop cannot be null" +msgstr "FOR döngüsünün alt sınırı null olamaz" + +#: pl_exec.c:2093 +#, c-format +msgid "upper bound of FOR loop cannot be null" +msgstr "For döngüsünün üst sınırı null olamaz" + +#: pl_exec.c:2111 +#, c-format +msgid "BY value of FOR loop cannot be null" +msgstr "FOR döngüsünün BY değeri null olamaz" + +#: pl_exec.c:2117 +#, c-format +msgid "BY value of FOR loop must be greater than zero" +msgstr "FOR döngüsünn BY değeri sıfırdan büyük olmalıdır" + +#: pl_exec.c:2294 pl_exec.c:4085 +#, c-format +msgid "cursor \"%s\" already in use" +msgstr "\"%s\" imleci kullanımda" + +#: pl_exec.c:2317 pl_exec.c:4150 +#, c-format +msgid "arguments given for cursor without arguments" +msgstr "argümansız imleç (cursor) için verilen argümanlar" + +#: pl_exec.c:2336 pl_exec.c:4169 +#, c-format +msgid "arguments required for cursor" +msgstr "imleç için gereken argümanlar" + +#: pl_exec.c:2423 +#, c-format +msgid "FOREACH expression must not be null" +msgstr "FOREACH ifadesi NULL olamaz" + +#: pl_exec.c:2438 +#, c-format +msgid "FOREACH expression must yield an array, not type %s" +msgstr "FOREACH ifadesi %s değil bir dizi (array) sağlamalı" + +#: pl_exec.c:2455 +#, c-format +msgid "slice dimension (%d) is out of the valid range 0..%d" +msgstr "slice boyutu (%d) geçerli kapsamın dışındadır: 0..%d" + +#: pl_exec.c:2482 +#, c-format +msgid "FOREACH ... SLICE loop variable must be of an array type" +msgstr "FOREACH ... SLICE döngü değişkeni bir dizi (array) tipinde olmalı" + +#: pl_exec.c:2486 +#, c-format +msgid "FOREACH loop variable must not be of an array type" +msgstr "FOREACH döngü değişkeni dizgi tipinde olamaz" + +#: pl_exec.c:2689 pl_exec.c:2771 pl_exec.c:2941 +#, c-format +msgid "cannot return non-composite value from function returning composite type" +msgstr "bileşik tip dönen fonksiyondan bileşik olmayan değer döndürülemez" + +#: pl_exec.c:2815 pl_gram.y:3199 +#, c-format +msgid "cannot use RETURN NEXT in a non-SETOF function" +msgstr "SETOF olmayan fonksiyonda RETURN NEXT kullanılamaz" + +#: pl_exec.c:2849 pl_exec.c:2976 +#, c-format +msgid "wrong result type supplied in RETURN NEXT" +msgstr "RETURN NEXT içinde yanlış dönüş tipi verildi" + +#: pl_exec.c:2878 pl_exec.c:4572 pl_exec.c:4880 pl_exec.c:4906 pl_exec.c:4972 +#: pl_exec.c:4991 pl_exec.c:5059 pl_exec.c:5082 +#, c-format +msgid "record \"%s\" is not assigned yet" +msgstr "\"%s\" kaydı henüz atanmamış" + +#: pl_exec.c:2880 pl_exec.c:4574 pl_exec.c:4882 pl_exec.c:4908 pl_exec.c:4974 +#: pl_exec.c:4993 pl_exec.c:5061 pl_exec.c:5084 +#, c-format +msgid "The tuple structure of a not-yet-assigned record is indeterminate." +msgstr "Henüz atanmamış kaydın satır yapısı belirsizdir." + +#: pl_exec.c:2887 pl_exec.c:2906 +#, c-format +msgid "wrong record type supplied in RETURN NEXT" +msgstr "RETURN NEXT içinde yanlış kayıt tipi verildi" + +#: pl_exec.c:2995 +#, c-format +msgid "RETURN NEXT must have a parameter" +msgstr "RETURN NEXT bir parameter içermeli" + +#: pl_exec.c:3021 pl_gram.y:3261 +#, c-format +msgid "cannot use RETURN QUERY in a non-SETOF function" +msgstr "RETURN QUERY, SETOF olmayan bir fonksiyon içinde bulunamaz" + +#: pl_exec.c:3045 +msgid "structure of query does not match function result type" +msgstr "sorgunun yapısı fonksiyonun sonuç tipine uymuyor" + +#: pl_exec.c:3129 pl_exec.c:3267 +#, c-format +msgid "RAISE option already specified: %s" +msgstr "RAISE seçeneği zaten belirtilmiş: %s" + +#: pl_exec.c:3163 +#, c-format +msgid "RAISE without parameters cannot be used outside an exception handler" +msgstr "parametresi olmayan RAISE bir özel durum işleyici (exception handler) dışında kullanılamaz" + +#: pl_exec.c:3257 +#, c-format +msgid "RAISE statement option cannot be null" +msgstr "RAISE ifadesi seçeneği null olamaz" + +#: pl_exec.c:3327 +#, c-format +msgid "%s" +msgstr "%s" + +#: pl_exec.c:3382 +#, c-format +msgid "assertion failed" +msgstr "ısrar hatası" + +#: pl_exec.c:3583 pl_exec.c:3729 pl_exec.c:3919 +#, c-format +msgid "cannot COPY to/from client in PL/pgSQL" +msgstr "PL/pgSQL'de istemcide ya da istemciden COPY çalıştırılamaz" + +#: pl_exec.c:3587 pl_exec.c:3733 pl_exec.c:3923 +#, c-format +msgid "cannot begin/end transactions in PL/pgSQL" +msgstr "PL/pgSQL'de transactionlar başlatılıp durdurulamazlar" + +#: pl_exec.c:3588 pl_exec.c:3734 pl_exec.c:3924 +#, c-format +msgid "Use a BEGIN block with an EXCEPTION clause instead." +msgstr "Bunun yerine BEGIN bloğunu EXCEPTION yantümcesi ile kullanın." + +#: pl_exec.c:3757 pl_exec.c:3948 +#, c-format +msgid "INTO used with a command that cannot return data" +msgstr "Veri döndüremeyen bir komutta INTO kullanıldı" + +#: pl_exec.c:3785 pl_exec.c:3976 +#, c-format +msgid "query returned no rows" +msgstr "sorgu satır döndürmedi" + +#: pl_exec.c:3804 pl_exec.c:3995 +#, c-format +msgid "query returned more than one row" +msgstr "sorgu birden fazla satır döndürdü" + +#: pl_exec.c:3821 +#, c-format +msgid "query has no destination for result data" +msgstr "Sorgu sonuç verisi için bir hedef içermiyor" + +#: pl_exec.c:3822 +#, c-format +msgid "If you want to discard the results of a SELECT, use PERFORM instead." +msgstr "SELECT'den gelen sonuçları gözardı etmek istiyorsanız SELECT yerine PERFORM kullanın." + +#: pl_exec.c:3855 pl_exec.c:7317 +#, c-format +msgid "query string argument of EXECUTE is null" +msgstr "EXECUTE' un sorgu dizesi argümanı boştur (null)" + +#: pl_exec.c:3911 +#, c-format +msgid "EXECUTE of SELECT ... INTO is not implemented" +msgstr "EXECUTE of SELECT ... INTO kodlanmadı" + +#: pl_exec.c:3912 +#, c-format +msgid "You might want to use EXECUTE ... INTO or EXECUTE CREATE TABLE ... AS instead." +msgstr "Bunun yerine, EXECUTE ... INTO ya da EXECUTE CREATE TABLE ... AS kullanmak isteyebilirsiniz." + +#: pl_exec.c:4233 pl_exec.c:4329 +#, c-format +msgid "cursor variable \"%s\" is null" +msgstr "\"%s\" imleç değişkeni null'dır" + +#: pl_exec.c:4244 pl_exec.c:4340 +#, c-format +msgid "cursor \"%s\" does not exist" +msgstr "\"%s\" imleci mevcut değil" + +#: pl_exec.c:4257 +#, c-format +msgid "relative or absolute cursor position is null" +msgstr "bağıl ya da mutlak imleç pozisyonu null" + +#: pl_exec.c:4448 +#, c-format +msgid "null value cannot be assigned to variable \"%s\" declared NOT NULL" +msgstr "NOT NULL olarak belirtilen \"%s\" değişkenine null değer atanamaz" + +#: pl_exec.c:4517 +#, c-format +msgid "cannot assign non-composite value to a row variable" +msgstr "bir satır değişkenine bileşik olmayan bir değer atanamaz" + +#: pl_exec.c:4541 +#, c-format +msgid "cannot assign non-composite value to a record variable" +msgstr "bir kayıt değişkenine bileşik olmayan bir değer atanamaz" + +#: pl_exec.c:4661 +#, c-format +msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" +msgstr "dizin boyut sayısı (%d), izin verilern en yüksek değerini (%d) aşmaktadır" + +#: pl_exec.c:4693 +#, c-format +msgid "subscripted object is not an array" +msgstr "subscript edilen nesne bir dizi (array) değil" + +#: pl_exec.c:4731 +#, c-format +msgid "array subscript in assignment must not be null" +msgstr "atamada array subscript null olamaz" + +#: pl_exec.c:5198 +#, c-format +msgid "query \"%s\" did not return data" +msgstr "\"%s\" sorgusu veri döndürmedi" + +#: pl_exec.c:5206 +#, c-format +msgid "query \"%s\" returned %d column" +msgid_plural "query \"%s\" returned %d columns" +msgstr[0] "\"%s\" sorgusu %d kolon döndürdü" + +#: pl_exec.c:5233 +#, c-format +msgid "query \"%s\" returned more than one row" +msgstr "\"%s\" sorgusu birden fazla satır döndürdü" + +#: pl_exec.c:5301 +#, c-format +msgid "query \"%s\" is not a SELECT" +msgstr "\"%s\" sorgusu bir SELECT değil" + +#: pl_funcs.c:239 +msgid "statement block" +msgstr "ifade bloğu" + +#: pl_funcs.c:241 +msgid "assignment" +msgstr "atama" + +#: pl_funcs.c:251 +msgid "FOR with integer loop variable" +msgstr "tamsayı döngüsünde FOR" + +#: pl_funcs.c:253 +msgid "FOR over SELECT rows" +msgstr "FOR over SELECT rows" + +#: pl_funcs.c:255 +msgid "FOR over cursor" +msgstr "FOR over cursor" + +#: pl_funcs.c:257 +msgid "FOREACH over array" +msgstr "FOREACH dizgi üstünde" + +#: pl_funcs.c:271 +msgid "SQL statement" +msgstr "SQL ifadesi" + +#: pl_funcs.c:275 +msgid "FOR over EXECUTE statement" +msgstr "EXECUTE ifadesinde FOR" + +#: pl_gram.y:478 +#, c-format +msgid "block label must be placed before DECLARE, not after" +msgstr "blok etiketi DECLARE'den önce yerleştirilmelidir, sonra değil." + +#: pl_gram.y:498 +#, c-format +msgid "collations are not supported by type %s" +msgstr "%s veri tipinde collation desteklenmemektedir" + +#: pl_gram.y:513 +#, c-format +msgid "row or record variable cannot be CONSTANT" +msgstr "Satır ya da kayıt değişkeni CONSTANT olamaz" + +#: pl_gram.y:523 +#, c-format +msgid "row or record variable cannot be NOT NULL" +msgstr "satır ya da kayıt değişkeni NOT NULL olamaz" + +#: pl_gram.y:534 +#, c-format +msgid "default value for row or record variable is not supported" +msgstr "satır ya da kayıt değişkenlerine öntanımlı değer atanması desteklenmiyor" + +#: pl_gram.y:679 pl_gram.y:694 pl_gram.y:720 +#, c-format +msgid "variable \"%s\" does not exist" +msgstr "\"%s\" değişkeni mevcut değil" + +#: pl_gram.y:738 pl_gram.y:766 +msgid "duplicate declaration" +msgstr "tekrarlanmış veri tipi deklarasyonu" + +#: pl_gram.y:749 pl_gram.y:777 +#, c-format +msgid "variable \"%s\" shadows a previously defined variable" +msgstr "\"%s\" değişkeni daha önce tanımlanan bir değişkeni gölgeliyor" + +#: pl_gram.y:956 +#, c-format +msgid "diagnostics item %s is not allowed in GET STACKED DIAGNOSTICS" +msgstr "%s tanılayıcı elemanı GET STACKED DIAGNOSTICS içinde kullanılamaz" + +#: pl_gram.y:974 +#, c-format +msgid "diagnostics item %s is not allowed in GET CURRENT DIAGNOSTICS" +msgstr "%s tanılayıcı elemanı GET CURRENT DIAGNOSTICS içinde kullanılamaz" + +#: pl_gram.y:1072 +msgid "unrecognized GET DIAGNOSTICS item" +msgstr "tanımlanamayan GET DIAGNOSTICS öğesi" + +#: pl_gram.y:1082 pl_gram.y:3448 +#, c-format +msgid "\"%s\" is not a scalar variable" +msgstr "\"%s\" scalar bir değişken değil" + +#: pl_gram.y:1334 pl_gram.y:1528 +#, c-format +msgid "loop variable of loop over rows must be a record or row variable or list of scalar variables" +msgstr "" + +#: pl_gram.y:1368 +#, c-format +msgid "cursor FOR loop must have only one target variable" +msgstr "" + +#: pl_gram.y:1375 +#, c-format +msgid "cursor FOR loop must use a bound cursor variable" +msgstr "" + +#: pl_gram.y:1459 +#, c-format +msgid "integer FOR loop must have only one target variable" +msgstr "Tamsayı FOR döngüsünde sadece bir tane hedef değişken olabilir" + +#: pl_gram.y:1495 +#, c-format +msgid "cannot specify REVERSE in query FOR loop" +msgstr "FOR döngü sorgusu içinde REVERSE belirtilemez" + +#: pl_gram.y:1642 +#, c-format +msgid "loop variable of FOREACH must be a known variable or list of variables" +msgstr "" + +#: pl_gram.y:1683 +#, c-format +msgid "there is no label \"%s\" attached to any block or loop enclosing this statement" +msgstr "" + +#: pl_gram.y:1691 +#, c-format +msgid "block label \"%s\" cannot be used in CONTINUE" +msgstr "\"%s\" blok etiketi CONTINUE içinde kullanılamaz" + +#: pl_gram.y:1706 +#, c-format +msgid "EXIT cannot be used outside a loop, unless it has a label" +msgstr "EXIT, bir etiketi olmadıkça bir döngü dışında kullanılamaz" + +#: pl_gram.y:1707 +#, c-format +msgid "CONTINUE cannot be used outside a loop" +msgstr "CONTINUE bir döngü dışında kullanılamaz" + +#: pl_gram.y:1731 pl_gram.y:1768 pl_gram.y:1816 pl_gram.y:2898 pl_gram.y:2983 +#: pl_gram.y:3094 pl_gram.y:3850 +msgid "unexpected end of function definition" +msgstr "fonksiyon tanımında beklenmeyen sonlanma" + +#: pl_gram.y:1836 pl_gram.y:1860 pl_gram.y:1876 pl_gram.y:1882 pl_gram.y:2000 +#: pl_gram.y:2008 pl_gram.y:2022 pl_gram.y:2117 pl_gram.y:2304 pl_gram.y:2398 +#: pl_gram.y:2550 pl_gram.y:3691 pl_gram.y:3752 pl_gram.y:3831 +msgid "syntax error" +msgstr "söz dizim hatası " + +#: pl_gram.y:1864 pl_gram.y:1866 pl_gram.y:2308 pl_gram.y:2310 +msgid "invalid SQLSTATE code" +msgstr "geçersiz SQLSTATE kodu" + +#: pl_gram.y:2064 +msgid "syntax error, expected \"FOR\"" +msgstr "sözdizimi hatası, \"FOR\" bekleniyordu" + +#: pl_gram.y:2126 +#, c-format +msgid "FETCH statement cannot return multiple rows" +msgstr "RAISE ifadesi çoklu satır döndüremez" + +#: pl_gram.y:2188 +#, c-format +msgid "cursor variable must be a simple variable" +msgstr "imleç değişkeni basit bir değişken olmalıdır" + +#: pl_gram.y:2194 +#, c-format +msgid "variable \"%s\" must be of type cursor or refcursor" +msgstr "\"%s\" değişkeni cursor ya da refcursor tiplerinden birisi olmalıdır" + +#: pl_gram.y:2521 pl_gram.y:2532 +#, c-format +msgid "\"%s\" is not a known variable" +msgstr "\"%s\" bilinen bir değişken değil" + +#: pl_gram.y:2636 pl_gram.y:2646 pl_gram.y:2802 +msgid "mismatched parentheses" +msgstr "eşlenmemiş parantezler" + +#: pl_gram.y:2650 +#, c-format +msgid "missing \"%s\" at end of SQL expression" +msgstr "SQL ifadesinin sonunda eksik \"%s\" " + +#: pl_gram.y:2656 +#, c-format +msgid "missing \"%s\" at end of SQL statement" +msgstr "SQL ifadesinin sonunda \"%s\" eksik" + +#: pl_gram.y:2673 +msgid "missing expression" +msgstr "eksik ifade" + +#: pl_gram.y:2675 +msgid "missing SQL statement" +msgstr "eksik SQL ifadesi" + +#: pl_gram.y:2804 +msgid "incomplete data type declaration" +msgstr "eksik veri tipi deklarasyonu" + +#: pl_gram.y:2827 +msgid "missing data type declaration" +msgstr "eksik veri tipi deklarasyonu" + +#: pl_gram.y:2906 +msgid "INTO specified more than once" +msgstr "INTO birden fazla belirtildi" + +#: pl_gram.y:3075 +msgid "expected FROM or IN" +msgstr "FROM ya da IN bekleniyordu" + +#: pl_gram.y:3135 +#, c-format +msgid "RETURN cannot have a parameter in function returning set" +msgstr "RETURN, fonksiyon return set içinde parametre alamaz" + +#: pl_gram.y:3136 +#, c-format +msgid "Use RETURN NEXT or RETURN QUERY." +msgstr "RETURN NEXT ya da RETURN QUERY kullanın." + +#: pl_gram.y:3144 +#, c-format +msgid "RETURN cannot have a parameter in function with OUT parameters" +msgstr "RETURN, OUT parametreleri olan fonksiyonda parametre içeremez" + +#: pl_gram.y:3153 +#, c-format +msgid "RETURN cannot have a parameter in function returning void" +msgstr "RETURN, void dönen bir fonksiyonda parametre alamaz" + +#: pl_gram.y:3213 +#, c-format +msgid "RETURN NEXT cannot have a parameter in function with OUT parameters" +msgstr "RETURN NEXT OUT parametreleri olan fonksiyonda parametre içeremez" + +#: pl_gram.y:3317 +#, c-format +msgid "\"%s\" is declared CONSTANT" +msgstr "\"%s\" CONSTANT olarak deklare edilmiş" + +#: pl_gram.y:3379 pl_gram.y:3391 +#, c-format +msgid "record or row variable cannot be part of multiple-item INTO list" +msgstr "" + +#: pl_gram.y:3436 +#, c-format +msgid "too many INTO variables specified" +msgstr "çok fazla INTO değişkeni belirtilmiş" + +#: pl_gram.y:3644 +#, c-format +msgid "end label \"%s\" specified for unlabelled block" +msgstr "etiketlenmemiş blok için \"%s\" bitiş etiketi tanımlanmış" + +#: pl_gram.y:3651 +#, c-format +msgid "end label \"%s\" differs from block's label \"%s\"" +msgstr "\"%s\" bitiş etiketi bloğun etiketi \"%s\"den farklı" + +#: pl_gram.y:3686 +#, c-format +msgid "cursor \"%s\" has no arguments" +msgstr "\"%s\" imlecinin argümanı yok" + +#: pl_gram.y:3700 +#, c-format +msgid "cursor \"%s\" has arguments" +msgstr "\"%s\" imlecinin argümanları var" + +#: pl_gram.y:3742 +#, c-format +msgid "cursor \"%s\" has no argument named \"%s\"" +msgstr "\"%s\" imlecinin \"%s\" adlı bir argümanı yok" + +#: pl_gram.y:3762 +#, fuzzy, c-format +#| msgid "parameter \"%s\" specified more than once" +msgid "value for parameter \"%s\" of cursor \"%s\" specified more than once" +msgstr "\"%s\" parametresi birden fazla kez belirtilmiştir" + +#: pl_gram.y:3787 +#, c-format +msgid "not enough arguments for cursor \"%s\"" +msgstr "\"%s\" imleci (cursor) için yetersiz sayıda argüman " + +#: pl_gram.y:3794 +#, c-format +msgid "too many arguments for cursor \"%s\"" +msgstr "\"%s\" imleci (cursor) için çok fazla argüman" + +#: pl_gram.y:3882 +msgid "unrecognized RAISE statement option" +msgstr "tanımsız RAISE ifadesi seçeneği" + +#: pl_gram.y:3886 +msgid "syntax error, expected \"=\"" +msgstr "sözdizimi hatası, \"=\" bekleniyordu" + +#: pl_gram.y:3927 +#, c-format +msgid "too many parameters specified for RAISE" +msgstr "RAISE için çok fazla parametre var" + +#: pl_gram.y:3931 +#, c-format +msgid "too few parameters specified for RAISE" +msgstr "RAISE için çok az parametre var" + +#: pl_handler.c:154 +msgid "Sets handling of conflicts between PL/pgSQL variable names and table column names." +msgstr "" + +#: pl_handler.c:163 +msgid "Print information about parameters in the DETAIL part of the error messages generated on INTO ... STRICT failures." +msgstr "" + +#: pl_handler.c:171 +msgid "Perform checks given in ASSERT statements." +msgstr "" + +#: pl_handler.c:179 +msgid "List of programming constructs that should produce a warning." +msgstr "Uyarı üretmesi gereken programlama construct'larının yapısı" + +#: pl_handler.c:189 +msgid "List of programming constructs that should produce an error." +msgstr "Hata üretmesi gereken programlama construct'larının yapısı" + +#. translator: %s is typically the translation of "syntax error" +#: pl_scanner.c:624 +#, c-format +msgid "%s at end of input" +msgstr "giriş sonuna %s" + +#. translator: first %s is typically the translation of "syntax error" +#: pl_scanner.c:640 +#, c-format +msgid "%s at or near \"%s\"" +msgstr "\"%2$s\" yerinde %1$s" + +#~ msgid "unterminated dollar-quoted string" +#~ msgstr "sonuçlandırılmamış dolar işeretiyle sınırlandırılmış satır" + +#~ msgid "unterminated quoted string" +#~ msgstr "sonuçlandırılmamış tırnakla sınırlandırılmış satır" + +#~ msgid "unterminated /* comment" +#~ msgstr "/* açıklama sonlandırılmamış" + +#~ msgid "unterminated quoted identifier" +#~ msgstr "sonuçlandırılmamış tırnakla sınırlandırılmış tanım" + +#~ msgid "unterminated \" in identifier: %s" +#~ msgstr "belirteçte sonlandırılmamış *\" : %s" + +#~ msgid "variable \"%s\" does not exist in the current block" +#~ msgstr "\"%s\" değişkeni mevcut bloğun içinde yok" + +#~ msgid "expected \")\"" +#~ msgstr "\")\" bekleniyordu" + +#~ msgid "cannot assign to tg_argv" +#~ msgstr "tg_argv'ye atama yapılamadı" + +#~ msgid "too many variables specified in SQL statement" +#~ msgstr "SQL ifadesinde çok fazla değişken belirtilmiş" + +#~ msgid "expected a cursor or refcursor variable" +#~ msgstr "cursor ya da refcursonr değişkeni beklendi" + +#~ msgid "syntax error at \"%s\"" +#~ msgstr "\"%s\" içinde sözdizimi hatası" + +#~ msgid "expected an integer variable" +#~ msgstr "tamsayı değişken bekleniyordu" + +#~ msgid "function has no parameter \"%s\"" +#~ msgstr "fonksiyonun \"%s\" parametresi var" + +#~ msgid "Number of returned columns (%d) does not match expected column count (%d)." +#~ msgstr "Dönen kolonların sayısı (%d) beklenen kolon sayısı (%d) ile eşleşmiyor." + +#~ msgid "N/A (dropped column)" +#~ msgstr "N/A (silinmiş kolon)" + +#~ msgid "row \"%s.%s\" has no field \"%s\"" +#~ msgstr "\"%s.%s\" satırında \"%s\" alanı yok." + +#~ msgid "row \"%s\" has no field \"%s\"" +#~ msgstr "\"%s\" satırının bir alanı yok \"%s\"" + +#~ msgid "expected \"[\"" +#~ msgstr " \"[\" bekleniyordu" + +#~ msgid "relation \"%s.%s\" does not exist" +#~ msgstr "\"%s.%s\" nesnesi mevcut değil" + +#~ msgid "EXECUTE statement" +#~ msgstr "EXECUTE ifadesi" + +#~ msgid "RETURN NEXT must specify a record or row variable in function returning row" +#~ msgstr "RETURN NEXT satır döndüren fonksiyonda kayıt ya da satır değişkeni belirtmelidir" + +#~ msgid "label does not exist" +#~ msgstr "etiket bulunamadı" diff --git a/src/pl/plpgsql/src/po/vi.po b/src/pl/plpgsql/src/po/vi.po new file mode 100644 index 0000000000..6620dea644 --- /dev/null +++ b/src/pl/plpgsql/src/po/vi.po @@ -0,0 +1,850 @@ +# LANGUAGE message translation file for plpgsql +# Copyright (C) 2018 PostgreSQL Global Development Group +# This file is distributed under the same license as the plpgsql (PostgreSQL) package. +# FIRST AUTHOR , 2018. +# +msgid "" +msgstr "" +"Project-Id-Version: plpgsql (PostgreSQL) 11\n" +"Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" +"POT-Creation-Date: 2018-04-22 12:08+0000\n" +"PO-Revision-Date: 2018-05-13 15:28+0900\n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 2.0.6\n" +"Last-Translator: \n" +"Language: vi_VN\n" + +#: pl_comp.c:434 pl_handler.c:457 +#, c-format +msgid "PL/pgSQL functions cannot accept type %s" +msgstr "Các hàm PL/pgSQL không thể chấp nhận kiểu %s" + +#: pl_comp.c:522 +#, c-format +msgid "could not determine actual return type for polymorphic function \"%s\"" +msgstr "không thể xác định kiểu thực tế trả về cho hàm đa hình \"%s\"" + +#: pl_comp.c:552 +#, c-format +msgid "trigger functions can only be called as triggers" +msgstr "hàm trigger chỉ có thể được gọi như một trigger" + +#: pl_comp.c:556 pl_handler.c:441 +#, c-format +msgid "PL/pgSQL functions cannot return type %s" +msgstr "Các hàm PL/pgSQL không thể trả về kiểu %s" + +#: pl_comp.c:595 +#, c-format +msgid "trigger functions cannot have declared arguments" +msgstr "không thể khai báo đối số cho hàm trigger" + +#: pl_comp.c:596 +#, c-format +msgid "" +"The arguments of the trigger can be accessed through TG_NARGS and TG_ARGV " +"instead." +msgstr "" +"Thay vào đó các đối số của trigger có thể được truy cập thông qua TG_NARGS " +"và TG_ARGV." + +#: pl_comp.c:719 +#, c-format +msgid "event trigger functions cannot have declared arguments" +msgstr "không thể khai báo đối số cho hàm sự kiện trigger" + +#: pl_comp.c:976 +#, c-format +msgid "compilation of PL/pgSQL function \"%s\" near line %d" +msgstr "biên dịch hàm PL/pgSQL \"%s\" gần dòng %d" + +#: pl_comp.c:999 +#, c-format +msgid "parameter name \"%s\" used more than once" +msgstr "tên thông số \"%s\" được sử dụng nhiều lần" + +#: pl_comp.c:1109 +#, c-format +msgid "column reference \"%s\" is ambiguous" +msgstr "tham chiếu cột \"%s\" không rõ ràng" + +#: pl_comp.c:1111 +#, c-format +msgid "It could refer to either a PL/pgSQL variable or a table column." +msgstr "Nó có thể tham chiếu đến một biến PL/pgSQL hoặc một cột bảng." + +#: pl_comp.c:1294 pl_exec.c:5031 pl_exec.c:5396 pl_exec.c:5483 pl_exec.c:5574 +#: pl_exec.c:6491 +#, c-format +msgid "record \"%s\" has no field \"%s\"" +msgstr "bản ghi \"%s\" không có trường \"%s\"" + +#: pl_comp.c:1756 +#, c-format +msgid "relation \"%s\" does not exist" +msgstr "relation \"%s\" không tồn tại" + +#: pl_comp.c:1848 +#, c-format +msgid "variable \"%s\" has pseudo-type %s" +msgstr "biến \"%s\" có pseudo-type %s" + +#: pl_comp.c:2026 +#, c-format +msgid "type \"%s\" is only a shell" +msgstr "kiểu \"%s\" chỉ là một shell(chưa được định nghĩa nội dung)" + +#: pl_comp.c:2123 pl_comp.c:2176 +#, c-format +msgid "unrecognized exception condition \"%s\"" +msgstr "không thể nhận ra điều kiện của ngoại lệ \"%s\"" + +#: pl_comp.c:2390 +#, c-format +msgid "" +"could not determine actual argument type for polymorphic function \"%s\"" +msgstr "không thể xác định loại đối số thực tế cho hàm đa hình \"%s\"" + +#: pl_exec.c:473 pl_exec.c:885 pl_exec.c:1098 +msgid "during initialization of execution state" +msgstr "trong khi khởi tạo trạng thái thực thi" + +#: pl_exec.c:479 +msgid "while storing call arguments into local variables" +msgstr "trong khi lưu trữ các đối số gọi vào trong các biến cục bộ" + +#: pl_exec.c:567 pl_exec.c:933 +msgid "during function entry" +msgstr "trong lúc đi vào hàm" + +#: pl_exec.c:592 +#, c-format +msgid "control reached end of function without RETURN" +msgstr "Hàm đã kết thúc trước khi trả về RETURN" + +#: pl_exec.c:599 +msgid "while casting return value to function's return type" +msgstr "trong khi ép kiểu giá trị trả về cho kiểu trả về của hàm" + +#: pl_exec.c:612 pl_exec.c:3484 +#, c-format +msgid "set-valued function called in context that cannot accept a set" +msgstr "" +"hàm thiết lập giá trị được gọi trong ngữ cảnh không thể chấp nhận một tập hợp" + +#: pl_exec.c:738 pl_exec.c:962 pl_exec.c:1123 +msgid "during function exit" +msgstr "trong lúc kết thúc hàm" + +#: pl_exec.c:793 pl_exec.c:832 pl_exec.c:3329 +msgid "returned record type does not match expected record type" +msgstr "loại bản ghi đã trả về không khớp với kiểu được ghi kỳ vọng" + +#: pl_exec.c:958 pl_exec.c:1119 +#, c-format +msgid "control reached end of trigger procedure without RETURN" +msgstr "thủ tục trigger kết thúc trước khi tra về RETURN" + +#: pl_exec.c:967 +#, c-format +msgid "trigger procedure cannot return a set" +msgstr "thủ tục trigger không thể trả về một tập hợp" + +#: pl_exec.c:1006 pl_exec.c:1034 +msgid "" +"returned row structure does not match the structure of the triggering table" +msgstr "cấu trúc hàng trả về không khớp với cấu trúc của trigger bảng" + +#. translator: last %s is a phrase such as "during statement block +#. local variable initialization" +#. +#: pl_exec.c:1171 +#, c-format +msgid "PL/pgSQL function %s line %d %s" +msgstr "Hàm PL/pgSQL %s dòng %d %s" + +#. translator: last %s is a phrase such as "while storing call +#. arguments into local variables" +#. +#: pl_exec.c:1182 +#, c-format +msgid "PL/pgSQL function %s %s" +msgstr "Hàm PL/pgSQL %s %s" + +#. translator: last %s is a plpgsql statement type name +#: pl_exec.c:1190 +#, c-format +msgid "PL/pgSQL function %s line %d at %s" +msgstr "Hàm PL/pgSQL %s dòng %d tại %s" + +#: pl_exec.c:1196 +#, c-format +msgid "PL/pgSQL function %s" +msgstr "Hàm PL/pgSQL %s" + +#: pl_exec.c:1534 +msgid "during statement block local variable initialization" +msgstr "trong khi khởi tạo biến cục bộ trong khối lệnh" + +#: pl_exec.c:1632 +msgid "during statement block entry" +msgstr "trong khi vào khối lệnh" + +#: pl_exec.c:1664 +msgid "during statement block exit" +msgstr "trong khi kết thúc khối lệnh" + +#: pl_exec.c:1702 +msgid "during exception cleanup" +msgstr "trong khi dọn dẹp ngoại lệ" + +#: pl_exec.c:2207 pl_exec.c:2221 +#, c-format +msgid "argument %d is an output argument but is not writable" +msgstr "đối số %d là đối số đầu ra nhưng không thể ghi" + +#: pl_exec.c:2263 +#, c-format +msgid "GET STACKED DIAGNOSTICS cannot be used outside an exception handler" +msgstr "" +"GET STACKED DIAGNOSTICS không thể được sử dụng bên ngoài bên ngoài một trình " +"xử lý ngoại lệ" + +#: pl_exec.c:2468 +#, c-format +msgid "case not found" +msgstr "không tìm thấy trường hợp này" + +#: pl_exec.c:2469 +#, c-format +msgid "CASE statement is missing ELSE part." +msgstr "Câu lệnh CASE thiếu phần ELSE." + +#: pl_exec.c:2562 +#, c-format +msgid "lower bound of FOR loop cannot be null" +msgstr "giới hạn dưới của vòng lặp FOR không thể là null" + +#: pl_exec.c:2578 +#, c-format +msgid "upper bound of FOR loop cannot be null" +msgstr "giới hạn trên của vòng lặp FOR không thể là null" + +#: pl_exec.c:2596 +#, c-format +msgid "BY value of FOR loop cannot be null" +msgstr "Giá trị BY của vòng lặp FOR không thể là null" + +#: pl_exec.c:2602 +#, c-format +msgid "BY value of FOR loop must be greater than zero" +msgstr "Giá trị BY của vòng lặp FOR phải lớn hơn 0" + +#: pl_exec.c:2736 pl_exec.c:4461 +#, c-format +msgid "cursor \"%s\" already in use" +msgstr "con trỏ \"%s\" đã đang được sử dụng" + +#: pl_exec.c:2759 pl_exec.c:4526 +#, c-format +msgid "arguments given for cursor without arguments" +msgstr "đối số được đưa ra cho con trỏ không có đối số" + +#: pl_exec.c:2778 pl_exec.c:4545 +#, c-format +msgid "arguments required for cursor" +msgstr "đối số cần thiết cho con trỏ" + +#: pl_exec.c:2865 +#, c-format +msgid "FOREACH expression must not be null" +msgstr "Biểu thức FOREACH không được là null" + +#: pl_exec.c:2880 +#, c-format +msgid "FOREACH expression must yield an array, not type %s" +msgstr "Biểu thức FOREACH phải tạo ra một mảng, không phải kiểu %s" + +#: pl_exec.c:2897 +#, c-format +msgid "slice dimension (%d) is out of the valid range 0..%d" +msgstr "kích thước slice (%d) nằm ngoài phạm vi hợp lệ 0..%d" + +#: pl_exec.c:2924 +#, c-format +msgid "FOREACH ... SLICE loop variable must be of an array type" +msgstr "Biến cho vòng lặp FOREACH ... SLICE phải là một kiểu mảng" + +#: pl_exec.c:2928 +#, c-format +msgid "FOREACH loop variable must not be of an array type" +msgstr "Biến vòng lặp FOREACH không được thuộc loại mảng" + +#: pl_exec.c:3090 pl_exec.c:3147 pl_exec.c:3322 +#, c-format +msgid "" +"cannot return non-composite value from function returning composite type" +msgstr "không thể trả về giá trị không-phức hợp từ hàm trả về kiểu phức hợp" + +#: pl_exec.c:3186 pl_gram.y:3266 +#, c-format +msgid "cannot use RETURN NEXT in a non-SETOF function" +msgstr "không thể sử dụng RETURN NEXT trong một hàm không phải-SETOF" + +#: pl_exec.c:3227 pl_exec.c:3359 +#, c-format +msgid "wrong result type supplied in RETURN NEXT" +msgstr "kiểu kết quả trả về không đúng trong RETURN NEXT" + +#: pl_exec.c:3265 pl_exec.c:3286 +#, c-format +msgid "wrong record type supplied in RETURN NEXT" +msgstr "kiểu bản ghi trả về không đúng trong RETURN NEXT" + +#: pl_exec.c:3378 +#, c-format +msgid "RETURN NEXT must have a parameter" +msgstr "RETURN NEXT phải có một tham số" + +#: pl_exec.c:3404 pl_gram.y:3329 +#, c-format +msgid "cannot use RETURN QUERY in a non-SETOF function" +msgstr "không thể sử dụng RETURN QUERY trong một hàm không phải-SETOF" + +#: pl_exec.c:3428 +msgid "structure of query does not match function result type" +msgstr "cấu trúc của truy vấn không khớp với kiểu kết quả hàm" + +#: pl_exec.c:3512 pl_exec.c:3650 +#, c-format +msgid "RAISE option already specified: %s" +msgstr "Tùy chọn RAISE đã được chỉ định: %s" + +#: pl_exec.c:3546 +#, c-format +msgid "RAISE without parameters cannot be used outside an exception handler" +msgstr "" +"RAISE không có tham số không thể được sử dụng bên ngoài một trình xử lý " +"ngoại lệ" + +#: pl_exec.c:3640 +#, c-format +msgid "RAISE statement option cannot be null" +msgstr "Tùy chọn lệnh RAISE không thể là null" + +#: pl_exec.c:3710 +#, c-format +msgid "%s" +msgstr "%s" + +#: pl_exec.c:3765 +#, c-format +msgid "assertion failed" +msgstr "lỗi assertion" + +#: pl_exec.c:3970 pl_exec.c:4117 pl_exec.c:4301 +#, c-format +msgid "cannot COPY to/from client in PL/pgSQL" +msgstr "không thể COPY tới/từ client trong PL/pgSQL" + +#: pl_exec.c:3974 pl_exec.c:4121 pl_exec.c:4305 +#, c-format +msgid "cannot begin/end transactions in PL/pgSQL" +msgstr "không thể begin/end transactions trong PL/pgSQL" + +#: pl_exec.c:3975 pl_exec.c:4122 pl_exec.c:4306 +#, c-format +msgid "Use a BEGIN block with an EXCEPTION clause instead." +msgstr "Sử dụng một khối BEGIN với một cấu trúc EXCEPTION để thay thế." + +#: pl_exec.c:4144 pl_exec.c:4329 +#, c-format +msgid "INTO used with a command that cannot return data" +msgstr "INTO được sử dụng với lệnh không thể trả về dữ liệu" + +#: pl_exec.c:4167 pl_exec.c:4352 +#, c-format +msgid "query returned no rows" +msgstr "truy vấn không trả lại dòng nào" + +#: pl_exec.c:4186 pl_exec.c:4371 +#, c-format +msgid "query returned more than one row" +msgstr "truy vấn trả về nhiều dòng" + +#: pl_exec.c:4203 +#, c-format +msgid "query has no destination for result data" +msgstr "truy vấn không có đích cho dữ liệu kết quả" + +#: pl_exec.c:4204 +#, c-format +msgid "If you want to discard the results of a SELECT, use PERFORM instead." +msgstr "Nếu bạn không muốn sử dụng kết quả của SELECT, hãy sử dụng PERFORM." + +#: pl_exec.c:4237 pl_exec.c:8212 +#, c-format +msgid "query string argument of EXECUTE is null" +msgstr "đối số chuỗi truy vấn của EXECUTE là null" + +#: pl_exec.c:4293 +#, c-format +msgid "EXECUTE of SELECT ... INTO is not implemented" +msgstr "EXECUTE của SELECT ... INTO chưa được thực thi" + +#: pl_exec.c:4294 +#, c-format +msgid "" +"You might want to use EXECUTE ... INTO or EXECUTE CREATE TABLE ... AS " +"instead." +msgstr "" +"Thay vào đó có thể bạn muốn sử dụng EXECUTE ... INTO hoặc EXECUTE CREATE " +"TABLE ... AS." + +#: pl_exec.c:4607 pl_exec.c:4695 +#, c-format +msgid "cursor variable \"%s\" is null" +msgstr "biến con trỏ \"%s\" là null" + +#: pl_exec.c:4618 pl_exec.c:4706 +#, c-format +msgid "cursor \"%s\" does not exist" +msgstr "con trỏ \"%s\" không tồn tại" + +#: pl_exec.c:4631 +#, c-format +msgid "relative or absolute cursor position is null" +msgstr "vị trí con trỏ tương đối hoặc tuyệt đối là null" + +#: pl_exec.c:4881 pl_exec.c:4976 +#, c-format +msgid "null value cannot be assigned to variable \"%s\" declared NOT NULL" +msgstr "giá trị null không thể được gán cho biến \"%s\" được khai báo NOT NULL" + +#: pl_exec.c:4957 +#, c-format +msgid "cannot assign non-composite value to a row variable" +msgstr "không thể gán giá trị không-phức hợp cho biến dòng" + +#: pl_exec.c:4989 +#, c-format +msgid "cannot assign non-composite value to a record variable" +msgstr "không thể gán giá trị không phải-phức hợp cho biến bản ghi" + +#: pl_exec.c:5040 +#, c-format +msgid "cannot assign to system column \"%s\"" +msgstr "không thể gán cho cột hệ thống \"%s\"" + +#: pl_exec.c:5104 +#, c-format +msgid "number of array dimensions (%d) exceeds the maximum allowed (%d)" +msgstr "số lượng chiều của mảng (%d) vượt quá số lượng tối đa cho phép (%d)" + +#: pl_exec.c:5136 +#, c-format +msgid "subscripted object is not an array" +msgstr "đối tượng chỉ số không phải là một mảng" + +#: pl_exec.c:5174 +#, c-format +msgid "array subscript in assignment must not be null" +msgstr "chỉ số mảng sử dụng trong gán không thể là null" + +#: pl_exec.c:5681 +#, c-format +msgid "query \"%s\" did not return data" +msgstr "truy vấn \"%s\" không trả về dữ liệu" + +#: pl_exec.c:5689 +#, c-format +msgid "query \"%s\" returned %d column" +msgid_plural "query \"%s\" returned %d columns" +msgstr[0] "truy vấn \"%s\" trả lại %d cột" + +#: pl_exec.c:5717 +#, c-format +msgid "query \"%s\" returned more than one row" +msgstr "truy vấn \"%s\" đã trả lại nhiều hàng" + +#: pl_exec.c:5780 +#, c-format +msgid "query \"%s\" is not a SELECT" +msgstr "truy vấn \"%s\" không phải là một SELECT" + +#: pl_exec.c:6505 pl_exec.c:6545 pl_exec.c:6585 +#, c-format +msgid "" +"type of parameter %d (%s) does not match that when preparing the plan (%s)" +msgstr "kiểu tham số %d (%s) không khớp với thông số khi chuẩn bị plan (%s)" + +#: pl_exec.c:7356 +#, c-format +msgid "record \"%s\" is not assigned yet" +msgstr "bản ghi \"%s\" chưa được gán" + +#: pl_exec.c:7357 +#, c-format +msgid "The tuple structure of a not-yet-assigned record is indeterminate." +msgstr "Cấu trúc tuple của một bản ghi chưa được gán là không xác định." + +#: pl_funcs.c:239 +msgid "statement block" +msgstr "khối câu lệnh" + +#: pl_funcs.c:241 +msgid "assignment" +msgstr "gán" + +#: pl_funcs.c:251 +msgid "FOR with integer loop variable" +msgstr "FOR với biến số nguyên vòng lặp" + +#: pl_funcs.c:253 +msgid "FOR over SELECT rows" +msgstr "FOR trên các dòng SELECT" + +#: pl_funcs.c:255 +msgid "FOR over cursor" +msgstr "FOR trên con trỏ" + +#: pl_funcs.c:257 +msgid "FOREACH over array" +msgstr "FOREACH trên mảng" + +#: pl_funcs.c:271 +msgid "SQL statement" +msgstr "Câu lệnh SQL" + +#: pl_funcs.c:275 +msgid "FOR over EXECUTE statement" +msgstr "FOR trên câu lệnh EXECUTE" + +#: pl_gram.y:485 +#, c-format +msgid "block label must be placed before DECLARE, not after" +msgstr "nhãn khối phải được đặt trước DECLARE, không phải sau" + +#: pl_gram.y:505 +#, c-format +msgid "collations are not supported by type %s" +msgstr "collation không được hỗ trợ bởi kiểu %s" + +#: pl_gram.y:524 +#, c-format +msgid "variable \"%s\" must have a default value, since it's declared NOT NULL" +msgstr "biến \"%s\" phải có giá trị mặc định, vì nó được khai báo là NOT NULL" + +#: pl_gram.y:669 pl_gram.y:684 pl_gram.y:710 +#, c-format +msgid "variable \"%s\" does not exist" +msgstr "biến \"%s\" không tồn tại" + +#: pl_gram.y:728 pl_gram.y:756 +msgid "duplicate declaration" +msgstr "khai báo trùng lặp" + +#: pl_gram.y:739 pl_gram.y:767 +#, c-format +msgid "variable \"%s\" shadows a previously defined variable" +msgstr "biến \"%s\" làm cho một biến được định nghĩa trước đó không khả thị" + +#: pl_gram.y:983 +#, c-format +msgid "diagnostics item %s is not allowed in GET STACKED DIAGNOSTICS" +msgstr "diagnostics mục %s không được phép trong GET STACKED DIAGNOSTICS" + +#: pl_gram.y:1001 +#, c-format +msgid "diagnostics item %s is not allowed in GET CURRENT DIAGNOSTICS" +msgstr "diagnostics mục %s không được cho phép trong GET CURRENT DIAGNOSTICS" + +#: pl_gram.y:1099 +msgid "unrecognized GET DIAGNOSTICS item" +msgstr "không nhận ra mục GET DIAGNOSTICS" + +#: pl_gram.y:1109 pl_gram.y:3508 +#, c-format +msgid "\"%s\" is not a scalar variable" +msgstr "\"%s\" không phải là biến vô hướng" + +#: pl_gram.y:1357 pl_gram.y:1550 +#, c-format +msgid "" +"loop variable of loop over rows must be a record variable or list of scalar " +"variables" +msgstr "" +"vòng lặp của vòng lặp trên các dòng phải là một biến bản ghi hoặc danh sách " +"các biến vô hướng" + +#: pl_gram.y:1391 +#, c-format +msgid "cursor FOR loop must have only one target variable" +msgstr "vòng lặp FOR sử dụng con trỏ chỉ có một biến đích" + +#: pl_gram.y:1398 +#, c-format +msgid "cursor FOR loop must use a bound cursor variable" +msgstr "vòng lặp FOR sử dụng con trỏ phải sử dụng một biến con trỏ" + +#: pl_gram.y:1485 +#, c-format +msgid "integer FOR loop must have only one target variable" +msgstr "vòng lặp FOR sử dụng số nguyên chỉ được phép có một biến đích" + +#: pl_gram.y:1521 +#, c-format +msgid "cannot specify REVERSE in query FOR loop" +msgstr "không thể chỉ định REVERSE trong vòng lặp truy vấn FOR" + +#: pl_gram.y:1652 +#, c-format +msgid "loop variable of FOREACH must be a known variable or list of variables" +msgstr "biến vòng lặp của FOREACH phải là biến được biết hoặc danh sách biến" + +#: pl_gram.y:1693 +#, c-format +msgid "" +"there is no label \"%s\" attached to any block or loop enclosing this " +"statement" +msgstr "" +"không có nhãn \"%s\" được đính kèm với bất kỳ khối hoặc vòng lặp nào kèm " +"theo câu lệnh này" + +#: pl_gram.y:1701 +#, c-format +msgid "block label \"%s\" cannot be used in CONTINUE" +msgstr "không thể sử dụng nhãn khối \"%s\" trong CONTINUE" + +#: pl_gram.y:1716 +#, c-format +msgid "EXIT cannot be used outside a loop, unless it has a label" +msgstr "" +"EXIT không thể được sử dụng bên ngoài một vòng lặp, trừ khi nó có một nhãn" + +#: pl_gram.y:1717 +#, c-format +msgid "CONTINUE cannot be used outside a loop" +msgstr "Không thể sử dụng CONTINUE bên ngoài vòng lặp" + +#: pl_gram.y:1741 pl_gram.y:1778 pl_gram.y:1826 pl_gram.y:2958 pl_gram.y:3041 +#: pl_gram.y:3152 pl_gram.y:3907 +msgid "unexpected end of function definition" +msgstr "định nghĩa kết thúc hàm không mong đợi" + +#: pl_gram.y:1846 pl_gram.y:1870 pl_gram.y:1886 pl_gram.y:1892 pl_gram.y:2009 +#: pl_gram.y:2017 pl_gram.y:2031 pl_gram.y:2125 pl_gram.y:2360 pl_gram.y:2454 +#: pl_gram.y:2612 pl_gram.y:3749 pl_gram.y:3810 pl_gram.y:3888 +msgid "syntax error" +msgstr "lỗi cú pháp" + +#: pl_gram.y:1874 pl_gram.y:1876 pl_gram.y:2364 pl_gram.y:2366 +msgid "invalid SQLSTATE code" +msgstr "mã SQLSTATE không hợp lệ" + +#: pl_gram.y:2073 +msgid "syntax error, expected \"FOR\"" +msgstr "lỗi cú pháp, kỳ vọng \"FOR\"" + +#: pl_gram.y:2134 +#, c-format +msgid "FETCH statement cannot return multiple rows" +msgstr "Câu lệnh FETCH không thể trả về nhiều hàng" + +#: pl_gram.y:2244 +#, c-format +msgid "cursor variable must be a simple variable" +msgstr "biến con trỏ phải là một biến đơn giản" + +#: pl_gram.y:2250 +#, c-format +msgid "variable \"%s\" must be of type cursor or refcursor" +msgstr "biến \"%s\" phải là kiểu con trỏ hoặc refcursor" + +#: pl_gram.y:2583 pl_gram.y:2594 +#, c-format +msgid "\"%s\" is not a known variable" +msgstr "\"%s\" không phải là một biến" + +#: pl_gram.y:2698 pl_gram.y:2708 pl_gram.y:2863 +msgid "mismatched parentheses" +msgstr "dấu ngoặc đơn không khớp" + +#: pl_gram.y:2712 +#, c-format +msgid "missing \"%s\" at end of SQL expression" +msgstr "thiếu \"%s\" ở cuối biểu thức SQL" + +#: pl_gram.y:2718 +#, c-format +msgid "missing \"%s\" at end of SQL statement" +msgstr "thiếu \"%s\" ở cuối câu lệnh SQL" + +#: pl_gram.y:2735 +msgid "missing expression" +msgstr "thiếu biểu thức" + +#: pl_gram.y:2737 +msgid "missing SQL statement" +msgstr "thiếu câu lệnh SQL" + +#: pl_gram.y:2865 +msgid "incomplete data type declaration" +msgstr "khai báo kiểu dữ liệu không đầy đủ" + +#: pl_gram.y:2888 +msgid "missing data type declaration" +msgstr "thiếu khai báo kiểu dữ liệu" + +#: pl_gram.y:2966 +msgid "INTO specified more than once" +msgstr "INTO được chỉ định nhiều lần" + +#: pl_gram.y:3133 +msgid "expected FROM or IN" +msgstr "kỳ vọng FROM hoặc IN" + +#: pl_gram.y:3193 +#, c-format +msgid "RETURN cannot have a parameter in function returning set" +msgstr "RETURN không thể có tham số trong tập hợp giá trị trả về của hàm" + +#: pl_gram.y:3194 +#, c-format +msgid "Use RETURN NEXT or RETURN QUERY." +msgstr "sử dụng RETURN NEXT hay RETURN QUERY." + +#: pl_gram.y:3204 +#, c-format +msgid "RETURN cannot have a parameter in a procedure" +msgstr "RETURN không thể có tham số trong một thủ tục" + +#: pl_gram.y:3209 +#, c-format +msgid "RETURN cannot have a parameter in function returning void" +msgstr "RETURN không thể có tham số trong hàm trả về void" + +#: pl_gram.y:3218 +#, c-format +msgid "RETURN cannot have a parameter in function with OUT parameters" +msgstr "RETURN không thể có tham số trong hàm với tham số OUT" + +#: pl_gram.y:3280 +#, c-format +msgid "RETURN NEXT cannot have a parameter in function with OUT parameters" +msgstr "RETURN NEXT không thể có tham số trong hàm với tham số OUT" + +#: pl_gram.y:3387 +#, c-format +msgid "variable \"%s\" is declared CONSTANT" +msgstr "biến \"%s\" được khai báo CONSTANT" + +#: pl_gram.y:3450 +#, c-format +msgid "record variable cannot be part of multiple-item INTO list" +msgstr "biến bản ghi không thể là một phần của danh sách INTO nhiều mục" + +#: pl_gram.y:3496 +#, c-format +msgid "too many INTO variables specified" +msgstr "quá nhiều biến INTO được chỉ định" + +#: pl_gram.y:3702 +#, c-format +msgid "end label \"%s\" specified for unlabelled block" +msgstr "nhãn kết thúc \"%s\" được chỉ định cho khối không được gắn nhãn" + +#: pl_gram.y:3709 +#, c-format +msgid "end label \"%s\" differs from block's label \"%s\"" +msgstr "nhãn kết thúc \"%s\" khác với nhãn của block \"%s\"" + +#: pl_gram.y:3744 +#, c-format +msgid "cursor \"%s\" has no arguments" +msgstr "con trỏ \"%s\" không có đối số" + +#: pl_gram.y:3758 +#, c-format +msgid "cursor \"%s\" has arguments" +msgstr "con trỏ \"%s\" có đối số" + +#: pl_gram.y:3800 +#, c-format +msgid "cursor \"%s\" has no argument named \"%s\"" +msgstr "con trỏ \"%s\" không có đối số tên là \"%s\"" + +#: pl_gram.y:3820 +#, c-format +msgid "value for parameter \"%s\" of cursor \"%s\" specified more than once" +msgstr "giá trị cho tham số \"%s\" của con trỏ \"%s\" được chỉ định nhiều lần" + +#: pl_gram.y:3845 +#, c-format +msgid "not enough arguments for cursor \"%s\"" +msgstr "không đủ đối số cho con trỏ \"%s\"" + +#: pl_gram.y:3852 +#, c-format +msgid "too many arguments for cursor \"%s\"" +msgstr "quá nhiều đối số cho con trỏ \"%s\"" + +#: pl_gram.y:3939 +msgid "unrecognized RAISE statement option" +msgstr "không thể xác định tùy chọn cho lệnh RAISE" + +#: pl_gram.y:3943 +msgid "syntax error, expected \"=\"" +msgstr "lỗi cú pháp, kỳ vọng \"=\"" + +#: pl_gram.y:3984 +#, c-format +msgid "too many parameters specified for RAISE" +msgstr "quá nhiều thông số được chỉ định cho RAISE" + +#: pl_gram.y:3988 +#, c-format +msgid "too few parameters specified for RAISE" +msgstr "quá ít thông số được chỉ định cho RAISE" + +#: pl_handler.c:154 +msgid "" +"Sets handling of conflicts between PL/pgSQL variable names and table column " +"names." +msgstr "Đặt xử lý xung đột giữa tên biến PL/pgSQL và tên cột bảng." + +#: pl_handler.c:163 +msgid "" +"Print information about parameters in the DETAIL part of the error messages " +"generated on INTO ... STRICT failures." +msgstr "" +"Hiển thị thông tin về các tham số trong phần DETAIL của các thông báo lỗi " +"được tạo ra khi INTO ... STRICT lỗi." + +#: pl_handler.c:171 +msgid "Perform checks given in ASSERT statements." +msgstr "Thực hiện các kiểm tra được đưa ra trong các câu lệnh ASSERT." + +#: pl_handler.c:179 +msgid "List of programming constructs that should produce a warning." +msgstr "Danh sách các cấu trúc lập trình sẽ tạo ra một cảnh báo." + +#: pl_handler.c:189 +msgid "List of programming constructs that should produce an error." +msgstr "Danh sách các cấu trúc lập trình sẽ tạo ra lỗi." + +#. translator: %s is typically the translation of "syntax error" +#: pl_scanner.c:630 +#, c-format +msgid "%s at end of input" +msgstr "%s tại nơi kết thúc đầu vào" + +#. translator: first %s is typically the translation of "syntax error" +#: pl_scanner.c:646 +#, c-format +msgid "%s at or near \"%s\"" +msgstr "%s tại hoặc gần\"%s\"" diff --git a/src/pl/plpgsql/src/sql/plpgsql_cache.sql b/src/pl/plpgsql/src/sql/plpgsql_cache.sql new file mode 100644 index 0000000000..f3b64d9209 --- /dev/null +++ b/src/pl/plpgsql/src/sql/plpgsql_cache.sql @@ -0,0 +1,50 @@ +-- +-- Cache-behavior-dependent test cases +-- +-- These tests logically belong in plpgsql_record.sql, and perhaps someday +-- can be merged back into it. For now, however, their results are different +-- between regular and CLOBBER_CACHE_ALWAYS builds, so we must have two +-- expected-output files to cover both cases. To minimize the maintenance +-- effort resulting from that, this file should contain only tests that +-- do have different results under CLOBBER_CACHE_ALWAYS. +-- + +-- check behavior with changes of a named rowtype +create table c_mutable(f1 int, f2 text); + +create function c_sillyaddone(int) returns int language plpgsql as +$$ declare r c_mutable; begin r.f1 := $1; return r.f1 + 1; end $$; +select c_sillyaddone(42); + +alter table c_mutable drop column f1; +alter table c_mutable add column f1 float8; + +-- currently, this fails due to cached plan for "r.f1 + 1" expression +-- (but a CLOBBER_CACHE_ALWAYS build will succeed) +select c_sillyaddone(42); + +-- but it's OK if we force plan rebuilding +discard plans; +select c_sillyaddone(42); + +-- check behavior with changes in a record rowtype +create function show_result_type(text) returns text language plpgsql as +$$ + declare + r record; + t text; + begin + execute $1 into r; + select pg_typeof(r.a) into t; + return format('type %s value %s', t, r.a::text); + end; +$$; + +select show_result_type('select 1 as a'); +-- currently this fails due to cached plan for pg_typeof expression +-- (but a CLOBBER_CACHE_ALWAYS build will succeed) +select show_result_type('select 2.0 as a'); + +-- but it's OK if we force plan rebuilding +discard plans; +select show_result_type('select 2.0 as a'); diff --git a/src/pl/plpgsql/src/sql/plpgsql_call.sql b/src/pl/plpgsql/src/sql/plpgsql_call.sql new file mode 100644 index 0000000000..4702bd14d1 --- /dev/null +++ b/src/pl/plpgsql/src/sql/plpgsql_call.sql @@ -0,0 +1,317 @@ +-- +-- Tests for procedures / CALL syntax +-- + +CREATE PROCEDURE test_proc1() +LANGUAGE plpgsql +AS $$ +BEGIN + NULL; +END; +$$; + +CALL test_proc1(); + + +-- error: can't return non-NULL +CREATE PROCEDURE test_proc2() +LANGUAGE plpgsql +AS $$ +BEGIN + RETURN 5; +END; +$$; + + +CREATE TABLE test1 (a int); + +CREATE PROCEDURE test_proc3(x int) +LANGUAGE plpgsql +AS $$ +BEGIN + INSERT INTO test1 VALUES (x); +END; +$$; + +CALL test_proc3(55); + +SELECT * FROM test1; + + +-- nested CALL +TRUNCATE TABLE test1; + +CREATE PROCEDURE test_proc4(y int) +LANGUAGE plpgsql +AS $$ +BEGIN + CALL test_proc3(y); + CALL test_proc3($1); +END; +$$; + +CALL test_proc4(66); + +SELECT * FROM test1; + +CALL test_proc4(66); + +SELECT * FROM test1; + + +-- output arguments + +CREATE PROCEDURE test_proc5(INOUT a text) +LANGUAGE plpgsql +AS $$ +BEGIN + a := a || '+' || a; +END; +$$; + +CALL test_proc5('abc'); + + +CREATE PROCEDURE test_proc6(a int, INOUT b int, INOUT c int) +LANGUAGE plpgsql +AS $$ +BEGIN + b := b * a; + c := c * a; +END; +$$; + +CALL test_proc6(2, 3, 4); + + +DO +LANGUAGE plpgsql +$$ +DECLARE + x int := 3; + y int := 4; +BEGIN + CALL test_proc6(2, x, y); + RAISE INFO 'x = %, y = %', x, y; +END; +$$; + + +DO +LANGUAGE plpgsql +$$ +DECLARE + x int := 3; + y int := 4; +BEGIN + CALL test_proc6(2, x + 1, y); -- error + RAISE INFO 'x = %, y = %', x, y; +END; +$$; + + +DO +LANGUAGE plpgsql +$$ +DECLARE + x int := 3; + y int := 4; +BEGIN + FOR i IN 1..5 LOOP + CALL test_proc6(i, x, y); + RAISE INFO 'x = %, y = %', x, y; + END LOOP; +END; +$$; + + +-- recursive with output arguments + +CREATE PROCEDURE test_proc7(x int, INOUT a int, INOUT b numeric) +LANGUAGE plpgsql +AS $$ +BEGIN +IF x > 1 THEN + a := x / 10; + b := x / 2; + CALL test_proc7(b::int, a, b); +END IF; +END; +$$; + +CALL test_proc7(100, -1, -1); + + +-- named parameters and defaults + +CREATE PROCEDURE test_proc8a(INOUT a int, INOUT b int) +LANGUAGE plpgsql +AS $$ +BEGIN + RAISE NOTICE 'a: %, b: %', a, b; + a := a * 10; + b := b + 10; +END; +$$; + +CALL test_proc8a(10, 20); +CALL test_proc8a(b => 20, a => 10); + +DO $$ +DECLARE _a int; _b int; +BEGIN + _a := 10; _b := 30; + CALL test_proc8a(_a, _b); + RAISE NOTICE '_a: %, _b: %', _a, _b; + CALL test_proc8a(b => _b, a => _a); + RAISE NOTICE '_a: %, _b: %', _a, _b; +END +$$; + + +CREATE PROCEDURE test_proc8b(INOUT a int, INOUT b int, INOUT c int) +LANGUAGE plpgsql +AS $$ +BEGIN + RAISE NOTICE 'a: %, b: %, c: %', a, b, c; + a := a * 10; + b := b + 10; + c := c * -10; +END; +$$; + +DO $$ +DECLARE _a int; _b int; _c int; +BEGIN + _a := 10; _b := 30; _c := 50; + CALL test_proc8b(_a, _b, _c); + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; + CALL test_proc8b(_a, c => _c, b => _b); + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; +END +$$; + + +CREATE PROCEDURE test_proc8c(INOUT a int, INOUT b int, INOUT c int DEFAULT 11) +LANGUAGE plpgsql +AS $$ +BEGIN + RAISE NOTICE 'a: %, b: %, c: %', a, b, c; + a := a * 10; + b := b + 10; + c := c * -10; +END; +$$; + +DO $$ +DECLARE _a int; _b int; _c int; +BEGIN + _a := 10; _b := 30; _c := 50; + CALL test_proc8c(_a, _b, _c); + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; + _a := 10; _b := 30; _c := 50; + CALL test_proc8c(_a, c => _c, b => _b); + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; + _a := 10; _b := 30; _c := 50; + CALL test_proc8c(c => _c, b => _b, a => _a); + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; +END +$$; + +DO $$ +DECLARE _a int; _b int; _c int; +BEGIN + _a := 10; _b := 30; _c := 50; + CALL test_proc8c(_a, _b); -- fail, no output argument for c + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; +END +$$; + +DO $$ +DECLARE _a int; _b int; _c int; +BEGIN + _a := 10; _b := 30; _c := 50; + CALL test_proc8c(_a, b => _b); -- fail, no output argument for c + RAISE NOTICE '_a: %, _b: %, _c: %', _a, _b, _c; +END +$$; + + +-- transition variable assignment + +TRUNCATE test1; + +CREATE FUNCTION triggerfunc1() RETURNS trigger +LANGUAGE plpgsql +AS $$ +DECLARE + z int := 0; +BEGIN + CALL test_proc6(2, NEW.a, NEW.a); + RETURN NEW; +END; +$$; + +CREATE TRIGGER t1 BEFORE INSERT ON test1 EXECUTE PROCEDURE triggerfunc1(); + +INSERT INTO test1 VALUES (1), (2), (3); + +UPDATE test1 SET a = 22 WHERE a = 2; + +SELECT * FROM test1 ORDER BY a; + + +DROP PROCEDURE test_proc1; +DROP PROCEDURE test_proc3; +DROP PROCEDURE test_proc4; + +DROP TABLE test1; + + +-- more checks for named-parameter handling + +CREATE PROCEDURE p1(v_cnt int, v_Text inout text = NULL) +AS $$ +BEGIN + v_Text := 'v_cnt = ' || v_cnt; +END +$$ LANGUAGE plpgsql; + +DO $$ +DECLARE + v_Text text; + v_cnt integer := 42; +BEGIN + CALL p1(v_cnt := v_cnt); -- error, must supply something for v_Text + RAISE NOTICE '%', v_Text; +END; +$$; + +DO $$ +DECLARE + v_Text text; + v_cnt integer := 42; +BEGIN + CALL p1(v_cnt := v_cnt, v_Text := v_Text); + RAISE NOTICE '%', v_Text; +END; +$$; + +DO $$ +DECLARE + v_Text text; +BEGIN + CALL p1(10, v_Text := v_Text); + RAISE NOTICE '%', v_Text; +END; +$$; + +DO $$ +DECLARE + v_Text text; + v_cnt integer; +BEGIN + CALL p1(v_Text := v_Text, v_cnt := v_cnt); + RAISE NOTICE '%', v_Text; +END; +$$; diff --git a/src/pl/plpgsql/src/sql/plpgsql_control.sql b/src/pl/plpgsql/src/sql/plpgsql_control.sql new file mode 100644 index 0000000000..61d6ca6451 --- /dev/null +++ b/src/pl/plpgsql/src/sql/plpgsql_control.sql @@ -0,0 +1,476 @@ +-- +-- Tests for PL/pgSQL control structures +-- + +-- integer FOR loop + +do $$ +begin + -- basic case + for i in 1..3 loop + raise notice '1..3: i = %', i; + end loop; + -- with BY, end matches exactly + for i in 1..10 by 3 loop + raise notice '1..10 by 3: i = %', i; + end loop; + -- with BY, end does not match + for i in 1..11 by 3 loop + raise notice '1..11 by 3: i = %', i; + end loop; + -- zero iterations + for i in 1..0 by 3 loop + raise notice '1..0 by 3: i = %', i; + end loop; + -- REVERSE + for i in reverse 10..0 by 3 loop + raise notice 'reverse 10..0 by 3: i = %', i; + end loop; + -- potential overflow + for i in 2147483620..2147483647 by 10 loop + raise notice '2147483620..2147483647 by 10: i = %', i; + end loop; + -- potential overflow, reverse direction + for i in reverse -2147483620..-2147483647 by 10 loop + raise notice 'reverse -2147483620..-2147483647 by 10: i = %', i; + end loop; +end$$; + +-- BY can't be zero or negative +do $$ +begin + for i in 1..3 by 0 loop + raise notice '1..3 by 0: i = %', i; + end loop; +end$$; + +do $$ +begin + for i in 1..3 by -1 loop + raise notice '1..3 by -1: i = %', i; + end loop; +end$$; + +do $$ +begin + for i in reverse 1..3 by -1 loop + raise notice 'reverse 1..3 by -1: i = %', i; + end loop; +end$$; + + +-- CONTINUE statement + +create table conttesttbl(idx serial, v integer); +insert into conttesttbl(v) values(10); +insert into conttesttbl(v) values(20); +insert into conttesttbl(v) values(30); +insert into conttesttbl(v) values(40); + +create function continue_test1() returns void as $$ +declare _i integer = 0; _r record; +begin + raise notice '---1---'; + loop + _i := _i + 1; + raise notice '%', _i; + continue when _i < 10; + exit; + end loop; + + raise notice '---2---'; + <> + loop + _i := _i - 1; + loop + raise notice '%', _i; + continue lbl when _i > 0; + exit lbl; + end loop; + end loop; + + raise notice '---3---'; + <> + while _i < 10 loop + _i := _i + 1; + continue the_loop when _i % 2 = 0; + raise notice '%', _i; + end loop; + + raise notice '---4---'; + for _i in 1..10 loop + begin + -- applies to outer loop, not the nested begin block + continue when _i < 5; + raise notice '%', _i; + end; + end loop; + + raise notice '---5---'; + for _r in select * from conttesttbl loop + continue when _r.v <= 20; + raise notice '%', _r.v; + end loop; + + raise notice '---6---'; + for _r in execute 'select * from conttesttbl' loop + continue when _r.v <= 20; + raise notice '%', _r.v; + end loop; + + raise notice '---7---'; + <> + for _i in 1..3 loop + continue looplabel when _i = 2; + raise notice '%', _i; + end loop; + + raise notice '---8---'; + _i := 1; + while _i <= 3 loop + raise notice '%', _i; + _i := _i + 1; + continue when _i = 3; + end loop; + + raise notice '---9---'; + for _r in select * from conttesttbl order by v limit 1 loop + raise notice '%', _r.v; + continue; + end loop; + + raise notice '---10---'; + for _r in execute 'select * from conttesttbl order by v limit 1' loop + raise notice '%', _r.v; + continue; + end loop; + + raise notice '---11---'; + <> + for _i in 1..2 loop + raise notice 'outer %', _i; + <> + for _j in 1..3 loop + continue outerlooplabel when _j = 2; + raise notice 'inner %', _j; + end loop; + end loop; +end; $$ language plpgsql; + +select continue_test1(); + +-- should fail: CONTINUE is only legal inside a loop +create function continue_error1() returns void as $$ +begin + begin + continue; + end; +end; +$$ language plpgsql; + +-- should fail: unlabeled EXIT is only legal inside a loop +create function exit_error1() returns void as $$ +begin + begin + exit; + end; +end; +$$ language plpgsql; + +-- should fail: no such label +create function continue_error2() returns void as $$ +begin + begin + loop + continue no_such_label; + end loop; + end; +end; +$$ language plpgsql; + +-- should fail: no such label +create function exit_error2() returns void as $$ +begin + begin + loop + exit no_such_label; + end loop; + end; +end; +$$ language plpgsql; + +-- should fail: CONTINUE can't reference the label of a named block +create function continue_error3() returns void as $$ +begin + <> + begin + loop + continue begin_block1; + end loop; + end; +end; +$$ language plpgsql; + +-- On the other hand, EXIT *can* reference the label of a named block +create function exit_block1() returns void as $$ +begin + <> + begin + loop + exit begin_block1; + raise exception 'should not get here'; + end loop; + end; +end; +$$ language plpgsql; + +select exit_block1(); + +-- verbose end block and end loop +create function end_label1() returns void as $$ +<> +begin + <> + for i in 1 .. 10 loop + raise notice 'i = %', i; + exit flbl1; + end loop flbl1; + <> + for j in 1 .. 10 loop + raise notice 'j = %', j; + exit flbl2; + end loop; +end blbl; +$$ language plpgsql; + +select end_label1(); + +-- should fail: undefined end label +create function end_label2() returns void as $$ +begin + for _i in 1 .. 10 loop + exit; + end loop flbl1; +end; +$$ language plpgsql; + +-- should fail: end label does not match start label +create function end_label3() returns void as $$ +<> +begin + <> + for _i in 1 .. 10 loop + exit; + end loop outer_label; +end; +$$ language plpgsql; + +-- should fail: end label on a block without a start label +create function end_label4() returns void as $$ +<> +begin + for _i in 1 .. 10 loop + exit; + end loop outer_label; +end; +$$ language plpgsql; + +-- unlabeled exit matches no blocks +do $$ +begin +for i in 1..10 loop + <> + begin + begin -- unlabeled block + exit; + raise notice 'should not get here'; + end; + raise notice 'should not get here, either'; + end; + raise notice 'nor here'; +end loop; +raise notice 'should get here'; +end$$; + +-- check exit out of an unlabeled block to a labeled one +do $$ +<> +begin + <> + begin + <> + begin + begin -- unlabeled block + exit innerblock; + raise notice 'should not get here'; + end; + raise notice 'should not get here, either'; + end; + raise notice 'nor here'; + end; + raise notice 'should get here'; +end$$; + +-- unlabeled exit does match a while loop +do $$ +begin + <> + while 1 > 0 loop + <> + while 1 > 0 loop + <> + while 1 > 0 loop + exit; + raise notice 'should not get here'; + end loop; + raise notice 'should get here'; + exit outermostwhile; + raise notice 'should not get here, either'; + end loop; + raise notice 'nor here'; + end loop; + raise notice 'should get here, too'; +end$$; + +-- check exit out of an unlabeled while to a labeled one +do $$ +begin + <> + while 1 > 0 loop + while 1 > 0 loop + exit outerwhile; + raise notice 'should not get here'; + end loop; + raise notice 'should not get here, either'; + end loop; + raise notice 'should get here'; +end$$; + +-- continue to an outer while +do $$ +declare i int := 0; +begin + <> + while i < 2 loop + raise notice 'outermostwhile, i = %', i; + i := i + 1; + <> + while 1 > 0 loop + <> + while 1 > 0 loop + continue outermostwhile; + raise notice 'should not get here'; + end loop; + raise notice 'should not get here, either'; + end loop; + raise notice 'nor here'; + end loop; + raise notice 'out of outermostwhile, i = %', i; +end$$; + +-- return out of a while +create function return_from_while() returns int language plpgsql as $$ +declare i int := 0; +begin + while i < 10 loop + if i > 2 then + return i; + end if; + i := i + 1; + end loop; + return null; +end$$; + +select return_from_while(); + +-- using list of scalars in fori and fore stmts +create function for_vect() returns void as $proc$ +<>declare a integer; b varchar; c varchar; r record; +begin + -- fori + for i in 1 .. 3 loop + raise notice '%', i; + end loop; + -- fore with record var + for r in select gs as aa, 'BB' as bb, 'CC' as cc from generate_series(1,4) gs loop + raise notice '% % %', r.aa, r.bb, r.cc; + end loop; + -- fore with single scalar + for a in select gs from generate_series(1,4) gs loop + raise notice '%', a; + end loop; + -- fore with multiple scalars + for a,b,c in select gs, 'BB','CC' from generate_series(1,4) gs loop + raise notice '% % %', a, b, c; + end loop; + -- using qualified names in fors, fore is enabled, disabled only for fori + for lbl.a, lbl.b, lbl.c in execute $$select gs, 'bb','cc' from generate_series(1,4) gs$$ loop + raise notice '% % %', a, b, c; + end loop; +end; +$proc$ language plpgsql; + +select for_vect(); + +-- CASE statement + +create or replace function case_test(bigint) returns text as $$ +declare a int = 10; + b int = 1; +begin + case $1 + when 1 then + return 'one'; + when 2 then + return 'two'; + when 3,4,3+5 then + return 'three, four or eight'; + when a then + return 'ten'; + when a+b, a+b+1 then + return 'eleven, twelve'; + end case; +end; +$$ language plpgsql immutable; + +select case_test(1); +select case_test(2); +select case_test(3); +select case_test(4); +select case_test(5); -- fails +select case_test(8); +select case_test(10); +select case_test(11); +select case_test(12); +select case_test(13); -- fails + +create or replace function catch() returns void as $$ +begin + raise notice '%', case_test(6); +exception + when case_not_found then + raise notice 'caught case_not_found % %', SQLSTATE, SQLERRM; +end +$$ language plpgsql; + +select catch(); + +-- test the searched variant too, as well as ELSE +create or replace function case_test(bigint) returns text as $$ +declare a int = 10; +begin + case + when $1 = 1 then + return 'one'; + when $1 = a + 2 then + return 'twelve'; + else + return 'other'; + end case; +end; +$$ language plpgsql immutable; + +select case_test(1); +select case_test(2); +select case_test(12); +select case_test(13); diff --git a/src/pl/plpgsql/src/sql/plpgsql_domain.sql b/src/pl/plpgsql/src/sql/plpgsql_domain.sql new file mode 100644 index 0000000000..8f99aae5a9 --- /dev/null +++ b/src/pl/plpgsql/src/sql/plpgsql_domain.sql @@ -0,0 +1,279 @@ +-- +-- Tests for PL/pgSQL's behavior with domain types +-- + +CREATE DOMAIN booltrue AS bool CHECK (VALUE IS TRUE OR VALUE IS NULL); + +CREATE FUNCTION test_argresult_booltrue(x booltrue, y bool) RETURNS booltrue AS $$ +begin +return y; +end +$$ LANGUAGE plpgsql; + +SELECT * FROM test_argresult_booltrue(true, true); +SELECT * FROM test_argresult_booltrue(false, true); +SELECT * FROM test_argresult_booltrue(true, false); + +CREATE FUNCTION test_assign_booltrue(x bool, y bool) RETURNS booltrue AS $$ +declare v booltrue := x; +begin +v := y; +return v; +end +$$ LANGUAGE plpgsql; + +SELECT * FROM test_assign_booltrue(true, true); +SELECT * FROM test_assign_booltrue(false, true); +SELECT * FROM test_assign_booltrue(true, false); + + +CREATE DOMAIN uint2 AS int2 CHECK (VALUE >= 0); + +CREATE FUNCTION test_argresult_uint2(x uint2, y int) RETURNS uint2 AS $$ +begin +return y; +end +$$ LANGUAGE plpgsql; + +SELECT * FROM test_argresult_uint2(100::uint2, 50); +SELECT * FROM test_argresult_uint2(100::uint2, -50); +SELECT * FROM test_argresult_uint2(null, 1); + +CREATE FUNCTION test_assign_uint2(x int, y int) RETURNS uint2 AS $$ +declare v uint2 := x; +begin +v := y; +return v; +end +$$ LANGUAGE plpgsql; + +SELECT * FROM test_assign_uint2(100, 50); +SELECT * FROM test_assign_uint2(100, -50); +SELECT * FROM test_assign_uint2(-100, 50); +SELECT * FROM test_assign_uint2(null, 1); + + +CREATE DOMAIN nnint AS int NOT NULL; + +CREATE FUNCTION test_argresult_nnint(x nnint, y int) RETURNS nnint AS $$ +begin +return y; +end +$$ LANGUAGE plpgsql; + +SELECT * FROM test_argresult_nnint(10, 20); +SELECT * FROM test_argresult_nnint(null, 20); +SELECT * FROM test_argresult_nnint(10, null); + +CREATE FUNCTION test_assign_nnint(x int, y int) RETURNS nnint AS $$ +declare v nnint := x; +begin +v := y; +return v; +end +$$ LANGUAGE plpgsql; + +SELECT * FROM test_assign_nnint(10, 20); +SELECT * FROM test_assign_nnint(null, 20); +SELECT * FROM test_assign_nnint(10, null); + + +-- +-- Domains over arrays +-- + +CREATE DOMAIN ordered_pair_domain AS integer[] CHECK (array_length(VALUE,1)=2 AND VALUE[1] < VALUE[2]); + +CREATE FUNCTION test_argresult_array_domain(x ordered_pair_domain) + RETURNS ordered_pair_domain AS $$ +begin +return x; +end +$$ LANGUAGE plpgsql; + +SELECT * FROM test_argresult_array_domain(ARRAY[0, 100]::ordered_pair_domain); +SELECT * FROM test_argresult_array_domain(NULL::ordered_pair_domain); + +CREATE FUNCTION test_argresult_array_domain_check_violation() + RETURNS ordered_pair_domain AS $$ +begin +return array[2,1]; +end +$$ LANGUAGE plpgsql; + +SELECT * FROM test_argresult_array_domain_check_violation(); + +CREATE FUNCTION test_assign_ordered_pair_domain(x int, y int, z int) RETURNS ordered_pair_domain AS $$ +declare v ordered_pair_domain := array[x, y]; +begin +v[2] := z; +return v; +end +$$ LANGUAGE plpgsql; + +SELECT * FROM test_assign_ordered_pair_domain(1,2,3); +SELECT * FROM test_assign_ordered_pair_domain(1,2,0); +SELECT * FROM test_assign_ordered_pair_domain(2,1,3); + + +-- +-- Arrays of domains +-- + +CREATE FUNCTION test_read_uint2_array(x uint2[]) RETURNS uint2 AS $$ +begin +return x[1]; +end +$$ LANGUAGE plpgsql; + +select test_read_uint2_array(array[1::uint2]); + +CREATE FUNCTION test_build_uint2_array(x int2) RETURNS uint2[] AS $$ +begin +return array[x, x]; +end +$$ LANGUAGE plpgsql; + +select test_build_uint2_array(1::int2); +select test_build_uint2_array(-1::int2); -- fail + +CREATE FUNCTION test_argresult_domain_array(x integer[]) + RETURNS ordered_pair_domain[] AS $$ +begin +return array[x::ordered_pair_domain, x::ordered_pair_domain]; +end +$$ LANGUAGE plpgsql; + +select test_argresult_domain_array(array[2,4]); +select test_argresult_domain_array(array[4,2]); -- fail + +CREATE FUNCTION test_argresult_domain_array2(x ordered_pair_domain) + RETURNS integer AS $$ +begin +return x[1]; +end +$$ LANGUAGE plpgsql; + +select test_argresult_domain_array2(array[2,4]); +select test_argresult_domain_array2(array[4,2]); -- fail + +CREATE FUNCTION test_argresult_array_domain_array(x ordered_pair_domain[]) + RETURNS ordered_pair_domain AS $$ +begin +return x[1]; +end +$$ LANGUAGE plpgsql; + +select test_argresult_array_domain_array(array[array[2,4]::ordered_pair_domain]); + + +-- +-- Domains within composite +-- + +CREATE TYPE nnint_container AS (f1 int, f2 nnint); + +CREATE FUNCTION test_result_nnint_container(x int, y int) + RETURNS nnint_container AS $$ +begin +return row(x, y)::nnint_container; +end +$$ LANGUAGE plpgsql; + +SELECT test_result_nnint_container(null, 3); +SELECT test_result_nnint_container(3, null); -- fail + +CREATE FUNCTION test_assign_nnint_container(x int, y int, z int) + RETURNS nnint_container AS $$ +declare v nnint_container := row(x, y); +begin +v.f2 := z; +return v; +end +$$ LANGUAGE plpgsql; + +SELECT * FROM test_assign_nnint_container(1,2,3); +SELECT * FROM test_assign_nnint_container(1,2,null); +SELECT * FROM test_assign_nnint_container(1,null,3); + +-- Since core system allows this: +SELECT null::nnint_container; +-- so should PL/PgSQL + +CREATE FUNCTION test_assign_nnint_container2(x int, y int, z int) + RETURNS nnint_container AS $$ +declare v nnint_container; +begin +v.f2 := z; +return v; +end +$$ LANGUAGE plpgsql; + +SELECT * FROM test_assign_nnint_container2(1,2,3); +SELECT * FROM test_assign_nnint_container2(1,2,null); + + +-- +-- Domains of composite +-- + +CREATE TYPE named_pair AS ( + i integer, + j integer +); + +CREATE DOMAIN ordered_named_pair AS named_pair CHECK((VALUE).i <= (VALUE).j); + +CREATE FUNCTION read_ordered_named_pair(p ordered_named_pair) RETURNS integer AS $$ +begin +return p.i + p.j; +end +$$ LANGUAGE plpgsql; + +SELECT read_ordered_named_pair(row(1, 2)); +SELECT read_ordered_named_pair(row(2, 1)); -- fail + +CREATE FUNCTION build_ordered_named_pair(i int, j int) RETURNS ordered_named_pair AS $$ +begin +return row(i, j); +end +$$ LANGUAGE plpgsql; + +SELECT build_ordered_named_pair(1,2); +SELECT build_ordered_named_pair(2,1); -- fail + +CREATE FUNCTION test_assign_ordered_named_pair(x int, y int, z int) + RETURNS ordered_named_pair AS $$ +declare v ordered_named_pair := row(x, y); +begin +v.j := z; +return v; +end +$$ LANGUAGE plpgsql; + +SELECT * FROM test_assign_ordered_named_pair(1,2,3); +SELECT * FROM test_assign_ordered_named_pair(1,2,0); +SELECT * FROM test_assign_ordered_named_pair(2,1,3); + +CREATE FUNCTION build_ordered_named_pairs(i int, j int) RETURNS ordered_named_pair[] AS $$ +begin +return array[row(i, j), row(i, j+1)]; +end +$$ LANGUAGE plpgsql; + +SELECT build_ordered_named_pairs(1,2); +SELECT build_ordered_named_pairs(2,1); -- fail + +CREATE FUNCTION test_assign_ordered_named_pairs(x int, y int, z int) + RETURNS ordered_named_pair[] AS $$ +declare v ordered_named_pair[] := array[row(x, y)]; +begin +-- ideally this would work, but it doesn't yet: +-- v[1].j := z; +return v; +end +$$ LANGUAGE plpgsql; + +SELECT * FROM test_assign_ordered_named_pairs(1,2,3); +SELECT * FROM test_assign_ordered_named_pairs(2,1,3); +SELECT * FROM test_assign_ordered_named_pairs(1,2,0); -- should fail someday diff --git a/src/pl/plpgsql/src/sql/plpgsql_record.sql b/src/pl/plpgsql/src/sql/plpgsql_record.sql new file mode 100644 index 0000000000..88333d45e1 --- /dev/null +++ b/src/pl/plpgsql/src/sql/plpgsql_record.sql @@ -0,0 +1,443 @@ +-- +-- Tests for PL/pgSQL handling of composite (record) variables +-- + +create type two_int4s as (f1 int4, f2 int4); +create type two_int8s as (q1 int8, q2 int8); + +-- base-case return of a composite type +create function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1,1)::two_int8s; end $$; +select retc(42); + +-- ok to return a matching record type +create or replace function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1::int8, 1::int8); end $$; +select retc(42); + +-- we don't currently support implicit casting +create or replace function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1,1); end $$; +select retc(42); + +-- nor extra columns +create or replace function retc(int) returns two_int8s language plpgsql as +$$ begin return row($1::int8, 1::int8, 42); end $$; +select retc(42); + +-- same cases with an intermediate "record" variable +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r record; begin r := row($1::int8, 1::int8); return r; end $$; +select retc(42); + +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r record; begin r := row($1,1); return r; end $$; +select retc(42); + +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r record; begin r := row($1::int8, 1::int8, 42); return r; end $$; +select retc(42); + +-- but, for mostly historical reasons, we do convert when assigning +-- to a named-composite-type variable +create or replace function retc(int) returns two_int8s language plpgsql as +$$ declare r two_int8s; begin r := row($1::int8, 1::int8, 42); return r; end $$; +select retc(42); + +do $$ declare c two_int8s; +begin c := row(1,2); raise notice 'c = %', c; end$$; + +do $$ declare c two_int8s; +begin for c in select 1,2 loop raise notice 'c = %', c; end loop; end$$; + +do $$ declare c4 two_int4s; c8 two_int8s; +begin + c8 := row(1,2); + c4 := c8; + c8 := c4; + raise notice 'c4 = %', c4; + raise notice 'c8 = %', c8; +end$$; + +-- check passing composite result to another function +create function getq1(two_int8s) returns int8 language plpgsql as $$ +declare r two_int8s; begin r := $1; return r.q1; end $$; + +select getq1(retc(344)); +select getq1(row(1,2)); + +do $$ +declare r1 two_int8s; r2 record; x int8; +begin + r1 := retc(345); + perform getq1(r1); + x := getq1(r1); + raise notice 'x = %', x; + r2 := retc(346); + perform getq1(r2); + x := getq1(r2); + raise notice 'x = %', x; +end$$; + +-- check assignments of composites +do $$ +declare r1 two_int8s; r2 two_int8s; r3 record; r4 record; +begin + r1 := row(1,2); + raise notice 'r1 = %', r1; + r1 := r1; -- shouldn't do anything + raise notice 'r1 = %', r1; + r2 := r1; + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r2.q2 = r1.q1 + 3; -- check that r2 has distinct storage + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r1 := null; + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r1 := row(7,11)::two_int8s; + r2 := r1; + raise notice 'r1 = %', r1; + raise notice 'r2 = %', r2; + r3 := row(1,2); + r4 := r3; + raise notice 'r3 = %', r3; + raise notice 'r4 = %', r4; + r4.f1 := r4.f1 + 3; -- check that r4 has distinct storage + raise notice 'r3 = %', r3; + raise notice 'r4 = %', r4; + r1 := r3; + raise notice 'r1 = %', r1; + r4 := r1; + raise notice 'r4 = %', r4; + r4.q2 := r4.q2 + 1; -- r4's field names have changed + raise notice 'r4 = %', r4; +end$$; + +-- fields of named-type vars read as null if uninitialized +do $$ +declare r1 two_int8s; +begin + raise notice 'r1 = %', r1; + raise notice 'r1.q1 = %', r1.q1; + raise notice 'r1.q2 = %', r1.q2; + raise notice 'r1 = %', r1; +end$$; + +do $$ +declare r1 two_int8s; +begin + raise notice 'r1.q1 = %', r1.q1; + raise notice 'r1.q2 = %', r1.q2; + raise notice 'r1 = %', r1; + raise notice 'r1.nosuchfield = %', r1.nosuchfield; +end$$; + +-- records, not so much +do $$ +declare r1 record; +begin + raise notice 'r1 = %', r1; + raise notice 'r1.f1 = %', r1.f1; + raise notice 'r1.f2 = %', r1.f2; + raise notice 'r1 = %', r1; +end$$; + +-- but OK if you assign first +do $$ +declare r1 record; +begin + raise notice 'r1 = %', r1; + r1 := row(1,2); + raise notice 'r1.f1 = %', r1.f1; + raise notice 'r1.f2 = %', r1.f2; + raise notice 'r1 = %', r1; + raise notice 'r1.nosuchfield = %', r1.nosuchfield; +end$$; + +-- check repeated assignments to composite fields +create table some_table (id int, data text); + +do $$ +declare r some_table; +begin + r := (23, 'skidoo'); + for i in 1 .. 10 loop + r.id := r.id + i; + r.data := r.data || ' ' || i; + end loop; + raise notice 'r = %', r; +end$$; + +-- check behavior of function declared to return "record" + +create function returnsrecord(int) returns record language plpgsql as +$$ begin return row($1,$1+1); end $$; + +select returnsrecord(42); +select * from returnsrecord(42) as r(x int, y int); +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +select * from returnsrecord(42) as r(x int, y bigint); -- fail + +-- same with an intermediate record variable +create or replace function returnsrecord(int) returns record language plpgsql as +$$ declare r record; begin r := row($1,$1+1); return r; end $$; + +select returnsrecord(42); +select * from returnsrecord(42) as r(x int, y int); +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +select * from returnsrecord(42) as r(x int, y bigint); -- fail + +-- should work the same with a missing column in the actual result value +create table has_hole(f1 int, f2 int, f3 int); +alter table has_hole drop column f2; + +create or replace function returnsrecord(int) returns record language plpgsql as +$$ begin return row($1,$1+1)::has_hole; end $$; + +select returnsrecord(42); +select * from returnsrecord(42) as r(x int, y int); +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +select * from returnsrecord(42) as r(x int, y bigint); -- fail + +-- same with an intermediate record variable +create or replace function returnsrecord(int) returns record language plpgsql as +$$ declare r record; begin r := row($1,$1+1)::has_hole; return r; end $$; + +select returnsrecord(42); +select * from returnsrecord(42) as r(x int, y int); +select * from returnsrecord(42) as r(x int, y int, z int); -- fail +select * from returnsrecord(42) as r(x int, y bigint); -- fail + +-- check access to a field of an argument declared "record" +create function getf1(x record) returns int language plpgsql as +$$ begin return x.f1; end $$; +select getf1(1); +select getf1(row(1,2)); +-- a CLOBBER_CACHE_ALWAYS build will report this error with a different +-- context stack than other builds, so suppress context output +\set SHOW_CONTEXT never +select getf1(row(1,2)::two_int8s); +\set SHOW_CONTEXT errors +select getf1(row(1,2)); + +-- check behavior when assignment to FOR-loop variable requires coercion +do $$ +declare r two_int8s; +begin + for r in select i, i+1 from generate_series(1,4) i + loop + raise notice 'r = %', r; + end loop; +end$$; + +-- check behavior when returning setof composite +create function returnssetofholes() returns setof has_hole language plpgsql as +$$ +declare r record; + h has_hole; +begin + return next h; + r := (1,2); + h := (3,4); + return next r; + return next h; + return next row(5,6); + return next row(7,8)::has_hole; +end$$; +select returnssetofholes(); + +create or replace function returnssetofholes() returns setof has_hole language plpgsql as +$$ +declare r record; +begin + return next r; -- fails, not assigned yet +end$$; +select returnssetofholes(); + +create or replace function returnssetofholes() returns setof has_hole language plpgsql as +$$ +begin + return next row(1,2,3); -- fails +end$$; +select returnssetofholes(); + +-- check behavior with changes of a named rowtype +create table mutable(f1 int, f2 text); + +create function sillyaddone(int) returns int language plpgsql as +$$ declare r mutable; begin r.f1 := $1; return r.f1 + 1; end $$; +select sillyaddone(42); + +-- test for change of type of column f1 should be here someday; +-- for now see plpgsql_cache test + +alter table mutable drop column f1; +select sillyaddone(42); -- fail + +create function getf3(x mutable) returns int language plpgsql as +$$ begin return x.f3; end $$; +select getf3(null::mutable); -- doesn't work yet +alter table mutable add column f3 int; +select getf3(null::mutable); -- now it works +alter table mutable drop column f3; +-- a CLOBBER_CACHE_ALWAYS build will report this error with a different +-- context stack than other builds, so suppress context output +\set SHOW_CONTEXT never +select getf3(null::mutable); -- fails again +\set SHOW_CONTEXT errors + +-- check access to system columns in a record variable + +create function sillytrig() returns trigger language plpgsql as +$$begin + raise notice 'old.ctid = %', old.ctid; + raise notice 'old.tableoid = %', old.tableoid::regclass; + return new; +end$$; + +create trigger mutable_trig before update on mutable for each row +execute procedure sillytrig(); + +insert into mutable values ('foo'), ('bar'); +update mutable set f2 = f2 || ' baz'; +table mutable; + +-- check returning a composite datum from a trigger + +create or replace function sillytrig() returns trigger language plpgsql as +$$begin + return row(new.*); +end$$; + +update mutable set f2 = f2 || ' baz'; +table mutable; + +create or replace function sillytrig() returns trigger language plpgsql as +$$declare r record; +begin + r := row(new.*); + return r; +end$$; + +update mutable set f2 = f2 || ' baz'; +table mutable; + +-- +-- Domains of composite +-- + +create domain ordered_int8s as two_int8s check((value).q1 <= (value).q2); + +create function read_ordered_int8s(p ordered_int8s) returns int8 as $$ +begin return p.q1 + p.q2; end +$$ language plpgsql; + +select read_ordered_int8s(row(1, 2)); +select read_ordered_int8s(row(2, 1)); -- fail + +create function build_ordered_int8s(i int8, j int8) returns ordered_int8s as $$ +begin return row(i,j); end +$$ language plpgsql; + +select build_ordered_int8s(1,2); +select build_ordered_int8s(2,1); -- fail + +create function build_ordered_int8s_2(i int8, j int8) returns ordered_int8s as $$ +declare r record; begin r := row(i,j); return r; end +$$ language plpgsql; + +select build_ordered_int8s_2(1,2); +select build_ordered_int8s_2(2,1); -- fail + +create function build_ordered_int8s_3(i int8, j int8) returns ordered_int8s as $$ +declare r two_int8s; begin r := row(i,j); return r; end +$$ language plpgsql; + +select build_ordered_int8s_3(1,2); +select build_ordered_int8s_3(2,1); -- fail + +create function build_ordered_int8s_4(i int8, j int8) returns ordered_int8s as $$ +declare r ordered_int8s; begin r := row(i,j); return r; end +$$ language plpgsql; + +select build_ordered_int8s_4(1,2); +select build_ordered_int8s_4(2,1); -- fail + +create function build_ordered_int8s_a(i int8, j int8) returns ordered_int8s[] as $$ +begin return array[row(i,j), row(i,j+1)]; end +$$ language plpgsql; + +select build_ordered_int8s_a(1,2); +select build_ordered_int8s_a(2,1); -- fail + +-- check field assignment +do $$ +declare r ordered_int8s; +begin + r.q1 := null; + r.q2 := 43; + r.q1 := 42; + r.q2 := 41; -- fail +end$$; + +-- check whole-row assignment +do $$ +declare r ordered_int8s; +begin + r := null; + r := row(null,null); + r := row(1,2); + r := row(2,1); -- fail +end$$; + +-- check assignment in for-loop +do $$ +declare r ordered_int8s; +begin + for r in values (1,2),(3,4),(6,5) loop + raise notice 'r = %', r; + end loop; +end$$; + +-- check behavior with toastable fields, too + +create type two_texts as (f1 text, f2 text); +create domain ordered_texts as two_texts check((value).f1 <= (value).f2); + +create table sometable (id int, a text, b text); +-- b should be compressed, but in-line +insert into sometable values (1, 'a', repeat('ffoob',1000)); +-- this b should be out-of-line +insert into sometable values (2, 'a', repeat('ffoob',100000)); +-- this pair should fail the domain check +insert into sometable values (3, 'z', repeat('ffoob',100000)); + +do $$ +declare d ordered_texts; +begin + for d in select a, b from sometable loop + raise notice 'succeeded at "%"', d.f1; + end loop; +end$$; + +do $$ +declare r record; d ordered_texts; +begin + for r in select * from sometable loop + raise notice 'processing row %', r.id; + d := row(r.a, r.b); + end loop; +end$$; + +do $$ +declare r record; d ordered_texts; +begin + for r in select * from sometable loop + raise notice 'processing row %', r.id; + d := null; + d.f1 := r.a; + d.f2 := r.b; + end loop; +end$$; diff --git a/src/pl/plpgsql/src/sql/plpgsql_transaction.sql b/src/pl/plpgsql/src/sql/plpgsql_transaction.sql new file mode 100644 index 0000000000..ac1361a8ce --- /dev/null +++ b/src/pl/plpgsql/src/sql/plpgsql_transaction.sql @@ -0,0 +1,450 @@ +CREATE TABLE test1 (a int, b text); + + +CREATE PROCEDURE transaction_test1(x int, y text) +LANGUAGE plpgsql +AS $$ +BEGIN + FOR i IN 0..x LOOP + INSERT INTO test1 (a, b) VALUES (i, y); + IF i % 2 = 0 THEN + COMMIT; + ELSE + ROLLBACK; + END IF; + END LOOP; +END +$$; + +CALL transaction_test1(9, 'foo'); + +SELECT * FROM test1; + + +TRUNCATE test1; + +DO +LANGUAGE plpgsql +$$ +BEGIN + FOR i IN 0..9 LOOP + INSERT INTO test1 (a) VALUES (i); + IF i % 2 = 0 THEN + COMMIT; + ELSE + ROLLBACK; + END IF; + END LOOP; +END +$$; + +SELECT * FROM test1; + + +-- transaction commands not allowed when called in transaction block +START TRANSACTION; +CALL transaction_test1(9, 'error'); +COMMIT; + +START TRANSACTION; +DO LANGUAGE plpgsql $$ BEGIN COMMIT; END $$; +COMMIT; + + +TRUNCATE test1; + +-- not allowed in a function +CREATE FUNCTION transaction_test2() RETURNS int +LANGUAGE plpgsql +AS $$ +BEGIN + FOR i IN 0..9 LOOP + INSERT INTO test1 (a) VALUES (i); + IF i % 2 = 0 THEN + COMMIT; + ELSE + ROLLBACK; + END IF; + END LOOP; + RETURN 1; +END +$$; + +SELECT transaction_test2(); + +SELECT * FROM test1; + + +-- also not allowed if procedure is called from a function +CREATE FUNCTION transaction_test3() RETURNS int +LANGUAGE plpgsql +AS $$ +BEGIN + CALL transaction_test1(9, 'error'); + RETURN 1; +END; +$$; + +SELECT transaction_test3(); + +SELECT * FROM test1; + + +-- DO block inside function +CREATE FUNCTION transaction_test4() RETURNS int +LANGUAGE plpgsql +AS $$ +BEGIN + EXECUTE 'DO LANGUAGE plpgsql $x$ BEGIN COMMIT; END $x$'; + RETURN 1; +END; +$$; + +SELECT transaction_test4(); + + +-- proconfig settings currently disallow transaction statements +CREATE PROCEDURE transaction_test5() +LANGUAGE plpgsql +SET work_mem = 555 +AS $$ +BEGIN + COMMIT; +END; +$$; + +CALL transaction_test5(); + + +-- SECURITY DEFINER currently disallow transaction statements +CREATE PROCEDURE transaction_test5b() +LANGUAGE plpgsql +SECURITY DEFINER +AS $$ +BEGIN + COMMIT; +END; +$$; + +CALL transaction_test5b(); + + +TRUNCATE test1; + +-- nested procedure calls +CREATE PROCEDURE transaction_test6(c text) +LANGUAGE plpgsql +AS $$ +BEGIN + CALL transaction_test1(9, c); +END; +$$; + +CALL transaction_test6('bar'); + +SELECT * FROM test1; + +TRUNCATE test1; + +CREATE PROCEDURE transaction_test7() +LANGUAGE plpgsql +AS $$ +BEGIN + DO 'BEGIN CALL transaction_test1(9, $x$baz$x$); END;'; +END; +$$; + +CALL transaction_test7(); + +SELECT * FROM test1; + +CREATE PROCEDURE transaction_test8() +LANGUAGE plpgsql +AS $$ +BEGIN + EXECUTE 'CALL transaction_test1(10, $x$baz$x$)'; +END; +$$; + +CALL transaction_test8(); + + +-- commit inside cursor loop +CREATE TABLE test2 (x int); +INSERT INTO test2 VALUES (0), (1), (2), (3), (4); + +TRUNCATE test1; + +DO LANGUAGE plpgsql $$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM test2 ORDER BY x LOOP + INSERT INTO test1 (a) VALUES (r.x); + COMMIT; + END LOOP; +END; +$$; + +SELECT * FROM test1; + +-- check that this doesn't leak a holdable portal +SELECT * FROM pg_cursors; + + +-- error in cursor loop with commit +TRUNCATE test1; + +DO LANGUAGE plpgsql $$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM test2 ORDER BY x LOOP + INSERT INTO test1 (a) VALUES (12/(r.x-2)); + COMMIT; + END LOOP; +END; +$$; + +SELECT * FROM test1; + +SELECT * FROM pg_cursors; + + +-- rollback inside cursor loop +TRUNCATE test1; + +DO LANGUAGE plpgsql $$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM test2 ORDER BY x LOOP + INSERT INTO test1 (a) VALUES (r.x); + ROLLBACK; + END LOOP; +END; +$$; + +SELECT * FROM test1; + +SELECT * FROM pg_cursors; + + +-- first commit then rollback inside cursor loop +TRUNCATE test1; + +DO LANGUAGE plpgsql $$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM test2 ORDER BY x LOOP + INSERT INTO test1 (a) VALUES (r.x); + IF r.x % 2 = 0 THEN + COMMIT; + ELSE + ROLLBACK; + END IF; + END LOOP; +END; +$$; + +SELECT * FROM test1; + +SELECT * FROM pg_cursors; + + +-- rollback inside cursor loop +TRUNCATE test1; + +DO LANGUAGE plpgsql $$ +DECLARE + r RECORD; +BEGIN + FOR r IN UPDATE test2 SET x = x * 2 RETURNING x LOOP + INSERT INTO test1 (a) VALUES (r.x); + ROLLBACK; + END LOOP; +END; +$$; + +SELECT * FROM test1; +SELECT * FROM test2; + +SELECT * FROM pg_cursors; + + +-- commit inside block with exception handler +TRUNCATE test1; + +DO LANGUAGE plpgsql $$ +BEGIN + BEGIN + INSERT INTO test1 (a) VALUES (1); + COMMIT; + INSERT INTO test1 (a) VALUES (1/0); + COMMIT; + EXCEPTION + WHEN division_by_zero THEN + RAISE NOTICE 'caught division_by_zero'; + END; +END; +$$; + +SELECT * FROM test1; + + +-- rollback inside block with exception handler +TRUNCATE test1; + +DO LANGUAGE plpgsql $$ +BEGIN + BEGIN + INSERT INTO test1 (a) VALUES (1); + ROLLBACK; + INSERT INTO test1 (a) VALUES (1/0); + ROLLBACK; + EXCEPTION + WHEN division_by_zero THEN + RAISE NOTICE 'caught division_by_zero'; + END; +END; +$$; + +SELECT * FROM test1; + + +-- COMMIT failures +DO LANGUAGE plpgsql $$ +BEGIN + CREATE TABLE test3 (y int UNIQUE DEFERRABLE INITIALLY DEFERRED); + COMMIT; + INSERT INTO test3 (y) VALUES (1); + COMMIT; + INSERT INTO test3 (y) VALUES (1); + INSERT INTO test3 (y) VALUES (2); + COMMIT; + INSERT INTO test3 (y) VALUES (3); -- won't get here +END; +$$; + +SELECT * FROM test3; + + +-- SET TRANSACTION +DO LANGUAGE plpgsql $$ +BEGIN + PERFORM 1; + RAISE INFO '%', current_setting('transaction_isolation'); + COMMIT; + SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; + PERFORM 1; + RAISE INFO '%', current_setting('transaction_isolation'); + COMMIT; + SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; + RESET TRANSACTION ISOLATION LEVEL; + PERFORM 1; + RAISE INFO '%', current_setting('transaction_isolation'); + COMMIT; +END; +$$; + +-- error cases +DO LANGUAGE plpgsql $$ +BEGIN + SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; +END; +$$; + +DO LANGUAGE plpgsql $$ +BEGIN + SAVEPOINT foo; +END; +$$; + +DO LANGUAGE plpgsql $$ +BEGIN + EXECUTE 'COMMIT'; +END; +$$; + + +-- snapshot handling test +TRUNCATE test2; + +CREATE PROCEDURE transaction_test9() +LANGUAGE SQL +AS $$ +INSERT INTO test2 VALUES (42); +$$; + +DO LANGUAGE plpgsql $$ +BEGIN + ROLLBACK; + CALL transaction_test9(); +END +$$; + +SELECT * FROM test2; + + +-- Test transaction in procedure with output parameters. This uses a +-- different portal strategy and different code paths in pquery.c. +CREATE PROCEDURE transaction_test10a(INOUT x int) +LANGUAGE plpgsql +AS $$ +BEGIN + x := x + 1; + COMMIT; +END; +$$; + +CALL transaction_test10a(10); + +CREATE PROCEDURE transaction_test10b(INOUT x int) +LANGUAGE plpgsql +AS $$ +BEGIN + x := x - 1; + ROLLBACK; +END; +$$; + +CALL transaction_test10b(10); + + +-- transaction timestamp vs. statement timestamp +CREATE PROCEDURE transaction_test11() +LANGUAGE plpgsql +AS $$ +DECLARE + s1 timestamp with time zone; + s2 timestamp with time zone; + s3 timestamp with time zone; + t1 timestamp with time zone; + t2 timestamp with time zone; + t3 timestamp with time zone; +BEGIN + s1 := statement_timestamp(); + t1 := transaction_timestamp(); + ASSERT s1 = t1; + PERFORM pg_sleep(0.001); + COMMIT; + s2 := statement_timestamp(); + t2 := transaction_timestamp(); + ASSERT s2 = s1; + ASSERT t2 > t1; + PERFORM pg_sleep(0.001); + ROLLBACK; + s3 := statement_timestamp(); + t3 := transaction_timestamp(); + ASSERT s3 = s1; + ASSERT t3 > t2; +END; +$$; + +CALL transaction_test11(); + + +DROP TABLE test1; +DROP TABLE test2; +DROP TABLE test3; diff --git a/src/pl/plpgsql/src/sql/plpgsql_varprops.sql b/src/pl/plpgsql/src/sql/plpgsql_varprops.sql new file mode 100644 index 0000000000..778119d223 --- /dev/null +++ b/src/pl/plpgsql/src/sql/plpgsql_varprops.sql @@ -0,0 +1,247 @@ +-- +-- Tests for PL/pgSQL variable properties: CONSTANT, NOT NULL, initializers +-- + +create type var_record as (f1 int4, f2 int4); +create domain int_nn as int not null; +create domain var_record_nn as var_record not null; +create domain var_record_colnn as var_record check((value).f2 is not null); + +-- CONSTANT + +do $$ +declare x constant int := 42; +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x constant int; +begin + x := 42; -- fail +end$$; + +do $$ +declare x constant int; y int; +begin + for x, y in select 1, 2 loop -- fail + end loop; +end$$; + +do $$ +declare x constant int[]; +begin + x[1] := 42; -- fail +end$$; + +do $$ +declare x constant int[]; y int; +begin + for x[1], y in select 1, 2 loop -- fail (currently, unsupported syntax) + end loop; +end$$; + +do $$ +declare x constant var_record; +begin + x.f1 := 42; -- fail +end$$; + +do $$ +declare x constant var_record; y int; +begin + for x.f1, y in select 1, 2 loop -- fail + end loop; +end$$; + +-- initializer expressions + +do $$ +declare x int := sin(0); +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x int := 1/0; -- fail +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x bigint[] := array[1,3,5]; +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x record := row(1,2,3); +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x var_record := row(1,2); +begin + raise notice 'x = %', x; +end$$; + +-- NOT NULL + +do $$ +declare x int not null; -- fail +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x int not null := 42; +begin + raise notice 'x = %', x; + x := null; -- fail +end$$; + +do $$ +declare x int not null := null; -- fail +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x record not null; -- fail +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x record not null := row(42); +begin + raise notice 'x = %', x; + x := row(null); -- ok + raise notice 'x = %', x; + x := null; -- fail +end$$; + +do $$ +declare x record not null := null; -- fail +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x var_record not null; -- fail +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x var_record not null := row(41,42); +begin + raise notice 'x = %', x; + x := row(null,null); -- ok + raise notice 'x = %', x; + x := null; -- fail +end$$; + +do $$ +declare x var_record not null := null; -- fail +begin + raise notice 'x = %', x; +end$$; + +-- Check that variables are reinitialized on block re-entry. + +do $$ +begin + for i in 1..3 loop + declare + x int; + y int := i; + r record; + c var_record; + begin + if i = 1 then + x := 42; + r := row(i, i+1); + c := row(i, i+1); + end if; + raise notice 'x = %', x; + raise notice 'y = %', y; + raise notice 'r = %', r; + raise notice 'c = %', c; + end; + end loop; +end$$; + +-- Check enforcement of domain constraints during initialization + +do $$ +declare x int_nn; -- fail +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x int_nn := null; -- fail +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x int_nn := 42; +begin + raise notice 'x = %', x; + x := null; -- fail +end$$; + +do $$ +declare x var_record_nn; -- fail +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x var_record_nn := null; -- fail +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x var_record_nn := row(1,2); +begin + raise notice 'x = %', x; + x := row(null,null); -- ok + x := null; -- fail +end$$; + +do $$ +declare x var_record_colnn; -- fail +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x var_record_colnn := null; -- fail +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x var_record_colnn := row(1,null); -- fail +begin + raise notice 'x = %', x; +end$$; + +do $$ +declare x var_record_colnn := row(1,2); +begin + raise notice 'x = %', x; + x := null; -- fail +end$$; + +do $$ +declare x var_record_colnn := row(1,2); +begin + raise notice 'x = %', x; + x := row(null,null); -- fail +end$$; diff --git a/src/pl/plpython/Makefile b/src/pl/plpython/Makefile index 7680d49cb6..667a74469e 100644 --- a/src/pl/plpython/Makefile +++ b/src/pl/plpython/Makefile @@ -39,6 +39,21 @@ ifeq ($(python_majorversion),2) DATA += plpythonu.control plpythonu--1.0.sql plpythonu--unpackaged--1.0.sql endif +# header files to install - it's not clear which of these might be needed +# so install them all. +INCS = plpython.h \ + plpy_cursorobject.h \ + plpy_elog.h \ + plpy_exec.h \ + plpy_main.h \ + plpy_planobject.h \ + plpy_plpymodule.h \ + plpy_procedure.h \ + plpy_resultobject.h \ + plpy_spi.h \ + plpy_subxactobject.h \ + plpy_typeio.h \ + plpy_util.h # Python on win32 ships with import libraries only for Microsoft Visual C++, # which are not compatible with mingw gcc. Therefore we need to build a @@ -78,6 +93,7 @@ REGRESS = \ plpython_spi \ plpython_newline \ plpython_void \ + plpython_call \ plpython_params \ plpython_setof \ plpython_record \ @@ -89,6 +105,7 @@ REGRESS = \ plpython_quote \ plpython_composite \ plpython_subtransaction \ + plpython_transaction \ plpython_drop REGRESS_PLPYTHON3_MANGLE := $(REGRESS) @@ -97,19 +114,20 @@ include $(top_srcdir)/src/Makefile.shlib all: all-lib +# Ensure parallel safety if a build is started in this directory $(OBJS): | submake-generated-headers - install: all install-lib install-data installdirs: installdirs-lib - $(MKDIR_P) '$(DESTDIR)$(datadir)/extension' '$(DESTDIR)$(includedir_server)' + $(MKDIR_P) '$(DESTDIR)$(datadir)/extension' '$(DESTDIR)$(includedir_server)' '$(DESTDIR)$(pgxsdir)/src/pl/plpython' uninstall: uninstall-lib uninstall-data install-data: installdirs $(INSTALL_DATA) $(addprefix $(srcdir)/, $(DATA)) '$(DESTDIR)$(datadir)/extension/' - $(INSTALL_DATA) $(srcdir)/plpython.h $(srcdir)/plpy_util.h '$(DESTDIR)$(includedir_server)' + $(INSTALL_DATA) $(addprefix $(srcdir)/, $(INCS)) '$(DESTDIR)$(includedir_server)' + $(INSTALL_DATA) $(srcdir)/regress-python3-mangle.mk '$(DESTDIR)$(pgxsdir)/src/pl/plpython' uninstall-data: rm -f $(addprefix '$(DESTDIR)$(datadir)/extension'/, $(notdir $(DATA))) @@ -129,7 +147,7 @@ installcheck: submake-pg-regress .PHONY: submake-pg-regress -submake-pg-regress: +submake-pg-regress: | submake-generated-headers $(MAKE) -C $(top_builddir)/src/test/regress pg_regress$(X) clean distclean: clean-lib diff --git a/src/pl/plpython/expected/plpython_call.out b/src/pl/plpython/expected/plpython_call.out new file mode 100644 index 0000000000..07ae04e98b --- /dev/null +++ b/src/pl/plpython/expected/plpython_call.out @@ -0,0 +1,58 @@ +-- +-- Tests for procedures / CALL syntax +-- +CREATE PROCEDURE test_proc1() +LANGUAGE plpythonu +AS $$ +pass +$$; +CALL test_proc1(); +-- error: can't return non-None +CREATE PROCEDURE test_proc2() +LANGUAGE plpythonu +AS $$ +return 5 +$$; +CALL test_proc2(); +ERROR: PL/Python procedure did not return None +CONTEXT: PL/Python procedure "test_proc2" +CREATE TABLE test1 (a int); +CREATE PROCEDURE test_proc3(x int) +LANGUAGE plpythonu +AS $$ +plpy.execute("INSERT INTO test1 VALUES (%s)" % x) +$$; +CALL test_proc3(55); +SELECT * FROM test1; + a +---- + 55 +(1 row) + +-- output arguments +CREATE PROCEDURE test_proc5(INOUT a text) +LANGUAGE plpythonu +AS $$ +return [a + '+' + a] +$$; +CALL test_proc5('abc'); + a +--------- + abc+abc +(1 row) + +CREATE PROCEDURE test_proc6(a int, INOUT b int, INOUT c int) +LANGUAGE plpythonu +AS $$ +return (b * a, c * a) +$$; +CALL test_proc6(2, 3, 4); + b | c +---+--- + 6 | 8 +(1 row) + +DROP PROCEDURE test_proc1; +DROP PROCEDURE test_proc2; +DROP PROCEDURE test_proc3; +DROP TABLE test1; diff --git a/src/pl/plpython/expected/plpython_error.out b/src/pl/plpython/expected/plpython_error.out index 1f52af7fe0..4d615b41cc 100644 --- a/src/pl/plpython/expected/plpython_error.out +++ b/src/pl/plpython/expected/plpython_error.out @@ -422,3 +422,26 @@ EXCEPTION WHEN SQLSTATE 'SILLY' THEN -- NOOP END $$ LANGUAGE plpgsql; +/* test the context stack trace for nested execution levels + */ +CREATE FUNCTION notice_innerfunc() RETURNS int AS $$ +plpy.execute("DO LANGUAGE plpythonu $x$ plpy.notice('inside DO') $x$") +return 1 +$$ LANGUAGE plpythonu; +CREATE FUNCTION notice_outerfunc() RETURNS int AS $$ +plpy.execute("SELECT notice_innerfunc()") +return 1 +$$ LANGUAGE plpythonu; +\set SHOW_CONTEXT always +SELECT notice_outerfunc(); +NOTICE: inside DO +CONTEXT: PL/Python anonymous code block +SQL statement "DO LANGUAGE plpythonu $x$ plpy.notice('inside DO') $x$" +PL/Python function "notice_innerfunc" +SQL statement "SELECT notice_innerfunc()" +PL/Python function "notice_outerfunc" + notice_outerfunc +------------------ + 1 +(1 row) + diff --git a/src/pl/plpython/expected/plpython_error_0.out b/src/pl/plpython/expected/plpython_error_0.out index 5323906122..290902b182 100644 --- a/src/pl/plpython/expected/plpython_error_0.out +++ b/src/pl/plpython/expected/plpython_error_0.out @@ -422,3 +422,26 @@ EXCEPTION WHEN SQLSTATE 'SILLY' THEN -- NOOP END $$ LANGUAGE plpgsql; +/* test the context stack trace for nested execution levels + */ +CREATE FUNCTION notice_innerfunc() RETURNS int AS $$ +plpy.execute("DO LANGUAGE plpythonu $x$ plpy.notice('inside DO') $x$") +return 1 +$$ LANGUAGE plpythonu; +CREATE FUNCTION notice_outerfunc() RETURNS int AS $$ +plpy.execute("SELECT notice_innerfunc()") +return 1 +$$ LANGUAGE plpythonu; +\set SHOW_CONTEXT always +SELECT notice_outerfunc(); +NOTICE: inside DO +CONTEXT: PL/Python anonymous code block +SQL statement "DO LANGUAGE plpythonu $x$ plpy.notice('inside DO') $x$" +PL/Python function "notice_innerfunc" +SQL statement "SELECT notice_innerfunc()" +PL/Python function "notice_outerfunc" + notice_outerfunc +------------------ + 1 +(1 row) + diff --git a/src/pl/plpython/expected/plpython_error_5.out b/src/pl/plpython/expected/plpython_error_5.out index 5ff46ca50a..bc66ab5534 100644 --- a/src/pl/plpython/expected/plpython_error_5.out +++ b/src/pl/plpython/expected/plpython_error_5.out @@ -422,3 +422,26 @@ EXCEPTION WHEN SQLSTATE 'SILLY' THEN -- NOOP END $$ LANGUAGE plpgsql; +/* test the context stack trace for nested execution levels + */ +CREATE FUNCTION notice_innerfunc() RETURNS int AS $$ +plpy.execute("DO LANGUAGE plpythonu $x$ plpy.notice('inside DO') $x$") +return 1 +$$ LANGUAGE plpythonu; +CREATE FUNCTION notice_outerfunc() RETURNS int AS $$ +plpy.execute("SELECT notice_innerfunc()") +return 1 +$$ LANGUAGE plpythonu; +\set SHOW_CONTEXT always +SELECT notice_outerfunc(); +NOTICE: inside DO +CONTEXT: PL/Python anonymous code block +SQL statement "DO LANGUAGE plpythonu $x$ plpy.notice('inside DO') $x$" +PL/Python function "notice_innerfunc" +SQL statement "SELECT notice_innerfunc()" +PL/Python function "notice_outerfunc" + notice_outerfunc +------------------ + 1 +(1 row) + diff --git a/src/pl/plpython/expected/plpython_subtransaction.out b/src/pl/plpython/expected/plpython_subtransaction.out index da3b312a06..069f0992ab 100644 --- a/src/pl/plpython/expected/plpython_subtransaction.out +++ b/src/pl/plpython/expected/plpython_subtransaction.out @@ -43,7 +43,7 @@ SELECT * FROM subtransaction_tbl; TRUNCATE subtransaction_tbl; SELECT subtransaction_test('SPI'); -ERROR: spiexceptions.InvalidTextRepresentation: invalid input syntax for integer: "oops" +ERROR: spiexceptions.InvalidTextRepresentation: invalid input syntax for type integer: "oops" LINE 1: INSERT INTO subtransaction_tbl VALUES ('oops') ^ QUERY: INSERT INTO subtransaction_tbl VALUES ('oops') @@ -95,7 +95,7 @@ SELECT * FROM subtransaction_tbl; TRUNCATE subtransaction_tbl; SELECT subtransaction_ctx_test('SPI'); -ERROR: spiexceptions.InvalidTextRepresentation: invalid input syntax for integer: "oops" +ERROR: spiexceptions.InvalidTextRepresentation: invalid input syntax for type integer: "oops" LINE 1: INSERT INTO subtransaction_tbl VALUES ('oops') ^ QUERY: INSERT INTO subtransaction_tbl VALUES ('oops') @@ -134,7 +134,7 @@ with plpy.subtransaction(): except plpy.SPIError, e: if not swallow: raise - plpy.notice("Swallowed %r" % e) + plpy.notice("Swallowed %s(%r)" % (e.__class__.__name__, e.args[0])) return "ok" $$ LANGUAGE plpythonu; SELECT subtransaction_nested_test(); @@ -153,7 +153,7 @@ SELECT * FROM subtransaction_tbl; TRUNCATE subtransaction_tbl; SELECT subtransaction_nested_test('t'); -NOTICE: Swallowed SyntaxError('syntax error at or near "error"',) +NOTICE: Swallowed SyntaxError('syntax error at or near "error"') subtransaction_nested_test ---------------------------- ok @@ -178,7 +178,7 @@ with plpy.subtransaction(): return "ok" $$ LANGUAGE plpythonu; SELECT subtransaction_deeply_nested_test(); -NOTICE: Swallowed SyntaxError('syntax error at or near "error"',) +NOTICE: Swallowed SyntaxError('syntax error at or near "error"') subtransaction_deeply_nested_test ----------------------------------- ok diff --git a/src/pl/plpython/expected/plpython_subtransaction_0.out b/src/pl/plpython/expected/plpython_subtransaction_0.out index e6cc38a033..97ee42b5a9 100644 --- a/src/pl/plpython/expected/plpython_subtransaction_0.out +++ b/src/pl/plpython/expected/plpython_subtransaction_0.out @@ -43,7 +43,7 @@ SELECT * FROM subtransaction_tbl; TRUNCATE subtransaction_tbl; SELECT subtransaction_test('SPI'); -ERROR: spiexceptions.InvalidTextRepresentation: invalid input syntax for integer: "oops" +ERROR: spiexceptions.InvalidTextRepresentation: invalid input syntax for type integer: "oops" LINE 1: INSERT INTO subtransaction_tbl VALUES ('oops') ^ QUERY: INSERT INTO subtransaction_tbl VALUES ('oops') @@ -128,7 +128,7 @@ with plpy.subtransaction(): except plpy.SPIError, e: if not swallow: raise - plpy.notice("Swallowed %r" % e) + plpy.notice("Swallowed %s(%r)" % (e.__class__.__name__, e.args[0])) return "ok" $$ LANGUAGE plpythonu; ERROR: could not compile PL/Python function "subtransaction_nested_test" diff --git a/src/pl/plpython/expected/plpython_subtransaction_5.out b/src/pl/plpython/expected/plpython_subtransaction_5.out index 6fbafa3166..e172e98f86 100644 --- a/src/pl/plpython/expected/plpython_subtransaction_5.out +++ b/src/pl/plpython/expected/plpython_subtransaction_5.out @@ -43,7 +43,7 @@ SELECT * FROM subtransaction_tbl; TRUNCATE subtransaction_tbl; SELECT subtransaction_test('SPI'); -ERROR: spiexceptions.InvalidTextRepresentation: invalid input syntax for integer: "oops" +ERROR: spiexceptions.InvalidTextRepresentation: invalid input syntax for type integer: "oops" LINE 1: INSERT INTO subtransaction_tbl VALUES ('oops') ^ QUERY: INSERT INTO subtransaction_tbl VALUES ('oops') @@ -128,7 +128,7 @@ with plpy.subtransaction(): except plpy.SPIError, e: if not swallow: raise - plpy.notice("Swallowed %r" % e) + plpy.notice("Swallowed %s(%r)" % (e.__class__.__name__, e.args[0])) return "ok" $$ LANGUAGE plpythonu; ERROR: could not compile PL/Python function "subtransaction_nested_test" diff --git a/src/pl/plpython/expected/plpython_test.out b/src/pl/plpython/expected/plpython_test.out index 847e4cc412..39b994f446 100644 --- a/src/pl/plpython/expected/plpython_test.out +++ b/src/pl/plpython/expected/plpython_test.out @@ -48,6 +48,7 @@ select module_contents(); Error Fatal SPIError + commit cursor debug error @@ -60,10 +61,11 @@ select module_contents(); quote_ident quote_literal quote_nullable + rollback spiexceptions subtransaction warning -(18 rows) +(20 rows) CREATE FUNCTION elog_test_basic() RETURNS void AS $$ diff --git a/src/pl/plpython/expected/plpython_transaction.out b/src/pl/plpython/expected/plpython_transaction.out new file mode 100644 index 0000000000..14152993c7 --- /dev/null +++ b/src/pl/plpython/expected/plpython_transaction.out @@ -0,0 +1,195 @@ +CREATE TABLE test1 (a int, b text); +CREATE PROCEDURE transaction_test1() +LANGUAGE plpythonu +AS $$ +for i in range(0, 10): + plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) + if i % 2 == 0: + plpy.commit() + else: + plpy.rollback() +$$; +CALL transaction_test1(); +SELECT * FROM test1; + a | b +---+--- + 0 | + 2 | + 4 | + 6 | + 8 | +(5 rows) + +TRUNCATE test1; +DO +LANGUAGE plpythonu +$$ +for i in range(0, 10): + plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) + if i % 2 == 0: + plpy.commit() + else: + plpy.rollback() +$$; +SELECT * FROM test1; + a | b +---+--- + 0 | + 2 | + 4 | + 6 | + 8 | +(5 rows) + +TRUNCATE test1; +-- not allowed in a function +CREATE FUNCTION transaction_test2() RETURNS int +LANGUAGE plpythonu +AS $$ +for i in range(0, 10): + plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) + if i % 2 == 0: + plpy.commit() + else: + plpy.rollback() +return 1 +$$; +SELECT transaction_test2(); +ERROR: invalid transaction termination +CONTEXT: PL/Python function "transaction_test2" +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +-- also not allowed if procedure is called from a function +CREATE FUNCTION transaction_test3() RETURNS int +LANGUAGE plpythonu +AS $$ +plpy.execute("CALL transaction_test1()") +return 1 +$$; +SELECT transaction_test3(); +ERROR: spiexceptions.InvalidTransactionTermination: invalid transaction termination +CONTEXT: Traceback (most recent call last): + PL/Python function "transaction_test3", line 2, in + plpy.execute("CALL transaction_test1()") +PL/Python function "transaction_test3" +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +-- DO block inside function +CREATE FUNCTION transaction_test4() RETURNS int +LANGUAGE plpythonu +AS $$ +plpy.execute("DO LANGUAGE plpythonu $x$ plpy.commit() $x$") +return 1 +$$; +SELECT transaction_test4(); +ERROR: spiexceptions.InvalidTransactionTermination: invalid transaction termination +CONTEXT: Traceback (most recent call last): + PL/Python function "transaction_test4", line 2, in + plpy.execute("DO LANGUAGE plpythonu $x$ plpy.commit() $x$") +PL/Python function "transaction_test4" +-- commit inside subtransaction (prohibited) +DO LANGUAGE plpythonu $$ +s = plpy.subtransaction() +s.enter() +plpy.commit() +$$; +WARNING: forcibly aborting a subtransaction that has not been exited +ERROR: cannot commit while a subtransaction is active +CONTEXT: PL/Python anonymous code block +-- commit inside cursor loop +CREATE TABLE test2 (x int); +INSERT INTO test2 VALUES (0), (1), (2), (3), (4); +TRUNCATE test1; +DO LANGUAGE plpythonu $$ +for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): + plpy.execute("INSERT INTO test1 (a) VALUES (%s)" % row['x']) + plpy.commit() +$$; +SELECT * FROM test1; + a | b +---+--- + 0 | + 1 | + 2 | + 3 | + 4 | +(5 rows) + +-- check that this doesn't leak a holdable portal +SELECT * FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +-- error in cursor loop with commit +TRUNCATE test1; +DO LANGUAGE plpythonu $$ +for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): + plpy.execute("INSERT INTO test1 (a) VALUES (12/(%s-2))" % row['x']) + plpy.commit() +$$; +ERROR: spiexceptions.DivisionByZero: division by zero +CONTEXT: Traceback (most recent call last): + PL/Python anonymous code block, line 3, in + plpy.execute("INSERT INTO test1 (a) VALUES (12/(%s-2))" % row['x']) +PL/Python anonymous code block +SELECT * FROM test1; + a | b +-----+--- + -6 | + -12 | +(2 rows) + +SELECT * FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +-- rollback inside cursor loop +TRUNCATE test1; +DO LANGUAGE plpythonu $$ +for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): + plpy.execute("INSERT INTO test1 (a) VALUES (%s)" % row['x']) + plpy.rollback() +$$; +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +SELECT * FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +-- first commit then rollback inside cursor loop +TRUNCATE test1; +DO LANGUAGE plpythonu $$ +for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): + plpy.execute("INSERT INTO test1 (a) VALUES (%s)" % row['x']) + if row['x'] % 2 == 0: + plpy.commit() + else: + plpy.rollback() +$$; +SELECT * FROM test1; + a | b +---+--- + 0 | + 2 | + 4 | +(3 rows) + +SELECT * FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable | creation_time +------+-----------+-------------+-----------+---------------+--------------- +(0 rows) + +DROP TABLE test1; +DROP TABLE test2; diff --git a/src/pl/plpython/expected/plpython_types.out b/src/pl/plpython/expected/plpython_types.out index 893de301dd..98b89b7d5c 100644 --- a/src/pl/plpython/expected/plpython_types.out +++ b/src/pl/plpython/expected/plpython_types.out @@ -684,7 +684,7 @@ CREATE FUNCTION test_type_conversion_array_mixed2() RETURNS int[] AS $$ return [123, 'abc'] $$ LANGUAGE plpythonu; SELECT * FROM test_type_conversion_array_mixed2(); -ERROR: invalid input syntax for integer: "abc" +ERROR: invalid input syntax for type integer: "abc" CONTEXT: while creating return value PL/Python function "test_type_conversion_array_mixed2" CREATE FUNCTION test_type_conversion_mdarray_malformed() RETURNS int[] AS $$ @@ -765,6 +765,76 @@ SELECT * FROM test_type_conversion_array_domain_check_violation(); ERROR: value for domain ordered_pair_domain violates check constraint "ordered_pair_domain_check" CONTEXT: while creating return value PL/Python function "test_type_conversion_array_domain_check_violation" +-- +-- Arrays of domains +-- +CREATE FUNCTION test_read_uint2_array(x uint2[]) RETURNS uint2 AS $$ +plpy.info(x, type(x)) +return x[0] +$$ LANGUAGE plpythonu; +select test_read_uint2_array(array[1::uint2]); +INFO: ([1], ) + test_read_uint2_array +----------------------- + 1 +(1 row) + +CREATE FUNCTION test_build_uint2_array(x int2) RETURNS uint2[] AS $$ +return [x, x] +$$ LANGUAGE plpythonu; +select test_build_uint2_array(1::int2); + test_build_uint2_array +------------------------ + {1,1} +(1 row) + +select test_build_uint2_array(-1::int2); -- fail +ERROR: value for domain uint2 violates check constraint "uint2_check" +CONTEXT: while creating return value +PL/Python function "test_build_uint2_array" +-- +-- ideally this would work, but for now it doesn't, because the return value +-- is [[2,4], [2,4]] which our conversion code thinks should become a 2-D +-- integer array, not an array of arrays. +-- +CREATE FUNCTION test_type_conversion_domain_array(x integer[]) + RETURNS ordered_pair_domain[] AS $$ +return [x, x] +$$ LANGUAGE plpythonu; +select test_type_conversion_domain_array(array[2,4]); +ERROR: return value of function with array return type is not a Python sequence +CONTEXT: while creating return value +PL/Python function "test_type_conversion_domain_array" +select test_type_conversion_domain_array(array[4,2]); -- fail +ERROR: return value of function with array return type is not a Python sequence +CONTEXT: while creating return value +PL/Python function "test_type_conversion_domain_array" +CREATE FUNCTION test_type_conversion_domain_array2(x ordered_pair_domain) + RETURNS integer AS $$ +plpy.info(x, type(x)) +return x[1] +$$ LANGUAGE plpythonu; +select test_type_conversion_domain_array2(array[2,4]); +INFO: ([2, 4], ) + test_type_conversion_domain_array2 +------------------------------------ + 4 +(1 row) + +select test_type_conversion_domain_array2(array[4,2]); -- fail +ERROR: value for domain ordered_pair_domain violates check constraint "ordered_pair_domain_check" +CREATE FUNCTION test_type_conversion_array_domain_array(x ordered_pair_domain[]) + RETURNS ordered_pair_domain AS $$ +plpy.info(x, type(x)) +return x[0] +$$ LANGUAGE plpythonu; +select test_type_conversion_array_domain_array(array[array[2,4]::ordered_pair_domain]); +INFO: ([[2, 4]], ) + test_type_conversion_array_domain_array +----------------------------------------- + {2,4} +(1 row) + --- --- Composite types --- @@ -820,6 +890,64 @@ SELECT test_composite_type_input(row(1, 2)); 3 (1 row) +-- +-- Domains within composite +-- +CREATE TYPE nnint_container AS (f1 int, f2 nnint); +CREATE FUNCTION nnint_test(x int, y int) RETURNS nnint_container AS $$ +return {'f1': x, 'f2': y} +$$ LANGUAGE plpythonu; +SELECT nnint_test(null, 3); + nnint_test +------------ + (,3) +(1 row) + +SELECT nnint_test(3, null); -- fail +ERROR: value for domain nnint violates check constraint "nnint_check" +CONTEXT: while creating return value +PL/Python function "nnint_test" +-- +-- Domains of composite +-- +CREATE DOMAIN ordered_named_pair AS named_pair_2 CHECK((VALUE).i <= (VALUE).j); +CREATE FUNCTION read_ordered_named_pair(p ordered_named_pair) RETURNS integer AS $$ +return p['i'] + p['j'] +$$ LANGUAGE plpythonu; +SELECT read_ordered_named_pair(row(1, 2)); + read_ordered_named_pair +------------------------- + 3 +(1 row) + +SELECT read_ordered_named_pair(row(2, 1)); -- fail +ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" +CREATE FUNCTION build_ordered_named_pair(i int, j int) RETURNS ordered_named_pair AS $$ +return {'i': i, 'j': j} +$$ LANGUAGE plpythonu; +SELECT build_ordered_named_pair(1,2); + build_ordered_named_pair +-------------------------- + (1,2) +(1 row) + +SELECT build_ordered_named_pair(2,1); -- fail +ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" +CONTEXT: while creating return value +PL/Python function "build_ordered_named_pair" +CREATE FUNCTION build_ordered_named_pairs(i int, j int) RETURNS ordered_named_pair[] AS $$ +return [{'i': i, 'j': j}, {'i': i, 'j': j+1}] +$$ LANGUAGE plpythonu; +SELECT build_ordered_named_pairs(1,2); + build_ordered_named_pairs +--------------------------- + {"(1,2)","(1,3)"} +(1 row) + +SELECT build_ordered_named_pairs(2,1); -- fail +ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" +CONTEXT: while creating return value +PL/Python function "build_ordered_named_pairs" -- -- Prepared statements -- diff --git a/src/pl/plpython/expected/plpython_types_3.out b/src/pl/plpython/expected/plpython_types_3.out index 2d853bd573..a6ec10d5e1 100644 --- a/src/pl/plpython/expected/plpython_types_3.out +++ b/src/pl/plpython/expected/plpython_types_3.out @@ -684,7 +684,7 @@ CREATE FUNCTION test_type_conversion_array_mixed2() RETURNS int[] AS $$ return [123, 'abc'] $$ LANGUAGE plpython3u; SELECT * FROM test_type_conversion_array_mixed2(); -ERROR: invalid input syntax for integer: "abc" +ERROR: invalid input syntax for type integer: "abc" CONTEXT: while creating return value PL/Python function "test_type_conversion_array_mixed2" CREATE FUNCTION test_type_conversion_mdarray_malformed() RETURNS int[] AS $$ @@ -765,6 +765,76 @@ SELECT * FROM test_type_conversion_array_domain_check_violation(); ERROR: value for domain ordered_pair_domain violates check constraint "ordered_pair_domain_check" CONTEXT: while creating return value PL/Python function "test_type_conversion_array_domain_check_violation" +-- +-- Arrays of domains +-- +CREATE FUNCTION test_read_uint2_array(x uint2[]) RETURNS uint2 AS $$ +plpy.info(x, type(x)) +return x[0] +$$ LANGUAGE plpythonu; +select test_read_uint2_array(array[1::uint2]); +INFO: ([1], ) + test_read_uint2_array +----------------------- + 1 +(1 row) + +CREATE FUNCTION test_build_uint2_array(x int2) RETURNS uint2[] AS $$ +return [x, x] +$$ LANGUAGE plpythonu; +select test_build_uint2_array(1::int2); + test_build_uint2_array +------------------------ + {1,1} +(1 row) + +select test_build_uint2_array(-1::int2); -- fail +ERROR: value for domain uint2 violates check constraint "uint2_check" +CONTEXT: while creating return value +PL/Python function "test_build_uint2_array" +-- +-- ideally this would work, but for now it doesn't, because the return value +-- is [[2,4], [2,4]] which our conversion code thinks should become a 2-D +-- integer array, not an array of arrays. +-- +CREATE FUNCTION test_type_conversion_domain_array(x integer[]) + RETURNS ordered_pair_domain[] AS $$ +return [x, x] +$$ LANGUAGE plpythonu; +select test_type_conversion_domain_array(array[2,4]); +ERROR: return value of function with array return type is not a Python sequence +CONTEXT: while creating return value +PL/Python function "test_type_conversion_domain_array" +select test_type_conversion_domain_array(array[4,2]); -- fail +ERROR: return value of function with array return type is not a Python sequence +CONTEXT: while creating return value +PL/Python function "test_type_conversion_domain_array" +CREATE FUNCTION test_type_conversion_domain_array2(x ordered_pair_domain) + RETURNS integer AS $$ +plpy.info(x, type(x)) +return x[1] +$$ LANGUAGE plpythonu; +select test_type_conversion_domain_array2(array[2,4]); +INFO: ([2, 4], ) + test_type_conversion_domain_array2 +------------------------------------ + 4 +(1 row) + +select test_type_conversion_domain_array2(array[4,2]); -- fail +ERROR: value for domain ordered_pair_domain violates check constraint "ordered_pair_domain_check" +CREATE FUNCTION test_type_conversion_array_domain_array(x ordered_pair_domain[]) + RETURNS ordered_pair_domain AS $$ +plpy.info(x, type(x)) +return x[0] +$$ LANGUAGE plpythonu; +select test_type_conversion_array_domain_array(array[array[2,4]::ordered_pair_domain]); +INFO: ([[2, 4]], ) + test_type_conversion_array_domain_array +----------------------------------------- + {2,4} +(1 row) + --- --- Composite types --- @@ -820,6 +890,64 @@ SELECT test_composite_type_input(row(1, 2)); 3 (1 row) +-- +-- Domains within composite +-- +CREATE TYPE nnint_container AS (f1 int, f2 nnint); +CREATE FUNCTION nnint_test(x int, y int) RETURNS nnint_container AS $$ +return {'f1': x, 'f2': y} +$$ LANGUAGE plpythonu; +SELECT nnint_test(null, 3); + nnint_test +------------ + (,3) +(1 row) + +SELECT nnint_test(3, null); -- fail +ERROR: value for domain nnint violates check constraint "nnint_check" +CONTEXT: while creating return value +PL/Python function "nnint_test" +-- +-- Domains of composite +-- +CREATE DOMAIN ordered_named_pair AS named_pair_2 CHECK((VALUE).i <= (VALUE).j); +CREATE FUNCTION read_ordered_named_pair(p ordered_named_pair) RETURNS integer AS $$ +return p['i'] + p['j'] +$$ LANGUAGE plpythonu; +SELECT read_ordered_named_pair(row(1, 2)); + read_ordered_named_pair +------------------------- + 3 +(1 row) + +SELECT read_ordered_named_pair(row(2, 1)); -- fail +ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" +CREATE FUNCTION build_ordered_named_pair(i int, j int) RETURNS ordered_named_pair AS $$ +return {'i': i, 'j': j} +$$ LANGUAGE plpythonu; +SELECT build_ordered_named_pair(1,2); + build_ordered_named_pair +-------------------------- + (1,2) +(1 row) + +SELECT build_ordered_named_pair(2,1); -- fail +ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" +CONTEXT: while creating return value +PL/Python function "build_ordered_named_pair" +CREATE FUNCTION build_ordered_named_pairs(i int, j int) RETURNS ordered_named_pair[] AS $$ +return [{'i': i, 'j': j}, {'i': i, 'j': j+1}] +$$ LANGUAGE plpythonu; +SELECT build_ordered_named_pairs(1,2); + build_ordered_named_pairs +--------------------------- + {"(1,2)","(1,3)"} +(1 row) + +SELECT build_ordered_named_pairs(2,1); -- fail +ERROR: value for domain ordered_named_pair violates check constraint "ordered_named_pair_check" +CONTEXT: while creating return value +PL/Python function "build_ordered_named_pairs" -- -- Prepared statements -- diff --git a/src/pl/plpython/generate-spiexceptions.pl b/src/pl/plpython/generate-spiexceptions.pl index a9ee9601b3..73ca50e875 100644 --- a/src/pl/plpython/generate-spiexceptions.pl +++ b/src/pl/plpython/generate-spiexceptions.pl @@ -1,7 +1,7 @@ #!/usr/bin/perl # # Generate the spiexceptions.h header from errcodes.txt -# Copyright (c) 2000-2017, PostgreSQL Global Development Group +# Copyright (c) 2000-2018, PostgreSQL Global Development Group use warnings; use strict; diff --git a/src/pl/plpython/nls.mk b/src/pl/plpython/nls.mk index 6c7359c1a2..b3e10d035c 100644 --- a/src/pl/plpython/nls.mk +++ b/src/pl/plpython/nls.mk @@ -1,6 +1,6 @@ # src/pl/plpython/nls.mk CATALOG_NAME = plpython -AVAIL_LANGUAGES = cs de es fr it ja ko pl pt_BR ru sv zh_CN +AVAIL_LANGUAGES = cs de es fr it ja ko pl pt_BR ru sv tr vi zh_CN GETTEXT_FILES = plpy_cursorobject.c plpy_elog.c plpy_exec.c plpy_main.c plpy_planobject.c plpy_plpymodule.c \ plpy_procedure.c plpy_resultobject.c plpy_spi.c plpy_subxactobject.c plpy_typeio.c plpy_util.c GETTEXT_TRIGGERS = $(BACKEND_COMMON_GETTEXT_TRIGGERS) PLy_elog:2 PLy_exception_set:2 PLy_exception_set_plural:2,3 diff --git a/src/pl/plpython/plpy_cursorobject.c b/src/pl/plpython/plpy_cursorobject.c index 2ad663cf66..45ac25b2ae 100644 --- a/src/pl/plpython/plpy_cursorobject.c +++ b/src/pl/plpython/plpy_cursorobject.c @@ -9,6 +9,7 @@ #include #include "access/xact.h" +#include "catalog/pg_type.h" #include "mb/pg_wchar.h" #include "utils/memutils.h" @@ -42,37 +43,14 @@ static PyMethodDef PLy_cursor_methods[] = { static PyTypeObject PLy_CursorType = { PyVarObject_HEAD_INIT(NULL, 0) - "PLyCursor", /* tp_name */ - sizeof(PLyCursorObject), /* tp_size */ - 0, /* tp_itemsize */ - - /* - * methods - */ - PLy_cursor_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_ITER, /* tp_flags */ - PLy_cursor_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - PLy_cursor_iternext, /* tp_iternext */ - PLy_cursor_methods, /* tp_tpmethods */ + .tp_name = "PLyCursor", + .tp_basicsize = sizeof(PLyCursorObject), + .tp_dealloc = PLy_cursor_dealloc, + .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_ITER, + .tp_doc = PLy_cursor_doc, + .tp_iter = PyObject_SelfIter, + .tp_iternext = PLy_cursor_iternext, + .tp_methods = PLy_cursor_methods, }; void @@ -106,6 +84,7 @@ static PyObject * PLy_cursor_query(const char *query) { PLyCursorObject *cursor; + PLyExecutionContext *exec_ctx = PLy_current_execution_context(); volatile MemoryContext oldcontext; volatile ResourceOwner oldowner; @@ -116,7 +95,11 @@ PLy_cursor_query(const char *query) cursor->mcxt = AllocSetContextCreate(TopMemoryContext, "PL/Python cursor context", ALLOCSET_DEFAULT_SIZES); - PLy_typeinfo_init(&cursor->result, cursor->mcxt); + + /* Initialize for converting result tuples to Python */ + PLy_input_setup_func(&cursor->result, cursor->mcxt, + RECORDOID, -1, + exec_ctx->curr_proc); oldcontext = CurrentMemoryContext; oldowner = CurrentResourceOwner; @@ -125,7 +108,6 @@ PLy_cursor_query(const char *query) PG_TRY(); { - PLyExecutionContext *exec_ctx = PLy_current_execution_context(); SPIPlanPtr plan; Portal portal; @@ -146,6 +128,8 @@ PLy_cursor_query(const char *query) cursor->portalname = MemoryContextStrdup(cursor->mcxt, portal->name); + PinPortal(portal); + PLy_spi_subtransaction_commit(oldcontext, oldowner); } PG_CATCH(); @@ -166,6 +150,7 @@ PLy_cursor_plan(PyObject *ob, PyObject *args) volatile int nargs; int i; PLyPlanObject *plan; + PLyExecutionContext *exec_ctx = PLy_current_execution_context(); volatile MemoryContext oldcontext; volatile ResourceOwner oldowner; @@ -208,7 +193,11 @@ PLy_cursor_plan(PyObject *ob, PyObject *args) cursor->mcxt = AllocSetContextCreate(TopMemoryContext, "PL/Python cursor context", ALLOCSET_DEFAULT_SIZES); - PLy_typeinfo_init(&cursor->result, cursor->mcxt); + + /* Initialize for converting result tuples to Python */ + PLy_input_setup_func(&cursor->result, cursor->mcxt, + RECORDOID, -1, + exec_ctx->curr_proc); oldcontext = CurrentMemoryContext; oldowner = CurrentResourceOwner; @@ -217,7 +206,6 @@ PLy_cursor_plan(PyObject *ob, PyObject *args) PG_TRY(); { - PLyExecutionContext *exec_ctx = PLy_current_execution_context(); Portal portal; char *volatile nulls; volatile int j; @@ -229,39 +217,24 @@ PLy_cursor_plan(PyObject *ob, PyObject *args) for (j = 0; j < nargs; j++) { + PLyObToDatum *arg = &plan->args[j]; PyObject *elem; elem = PySequence_GetItem(args, j); - if (elem != Py_None) + PG_TRY(); { - PG_TRY(); - { - plan->values[j] = - plan->args[j].out.d.func(&(plan->args[j].out.d), - -1, - elem, - false); - } - PG_CATCH(); - { - Py_DECREF(elem); - PG_RE_THROW(); - } - PG_END_TRY(); + bool isnull; - Py_DECREF(elem); - nulls[j] = ' '; + plan->values[j] = PLy_output_convert(arg, elem, &isnull); + nulls[j] = isnull ? 'n' : ' '; } - else + PG_CATCH(); { Py_DECREF(elem); - plan->values[j] = - InputFunctionCall(&(plan->args[j].out.d.typfunc), - NULL, - plan->args[j].out.d.typioparam, - -1); - nulls[j] = 'n'; + PG_RE_THROW(); } + PG_END_TRY(); + Py_DECREF(elem); } portal = SPI_cursor_open(NULL, plan->plan, plan->values, nulls, @@ -272,6 +245,8 @@ PLy_cursor_plan(PyObject *ob, PyObject *args) cursor->portalname = MemoryContextStrdup(cursor->mcxt, portal->name); + PinPortal(portal); + PLy_spi_subtransaction_commit(oldcontext, oldowner); } PG_CATCH(); @@ -281,7 +256,7 @@ PLy_cursor_plan(PyObject *ob, PyObject *args) /* cleanup plan->values array */ for (k = 0; k < nargs; k++) { - if (!plan->args[k].out.d.typbyval && + if (!plan->args[k].typbyval && (plan->values[k] != PointerGetDatum(NULL))) { pfree(DatumGetPointer(plan->values[k])); @@ -298,7 +273,7 @@ PLy_cursor_plan(PyObject *ob, PyObject *args) for (i = 0; i < nargs; i++) { - if (!plan->args[i].out.d.typbyval && + if (!plan->args[i].typbyval && (plan->values[i] != PointerGetDatum(NULL))) { pfree(DatumGetPointer(plan->values[i])); @@ -323,7 +298,10 @@ PLy_cursor_dealloc(PyObject *arg) portal = GetPortalByName(cursor->portalname); if (PortalIsValid(portal)) + { + UnpinPortal(portal); SPI_cursor_close(portal); + } cursor->closed = true; } if (cursor->mcxt) @@ -339,6 +317,7 @@ PLy_cursor_iternext(PyObject *self) { PLyCursorObject *cursor; PyObject *ret; + PLyExecutionContext *exec_ctx = PLy_current_execution_context(); volatile MemoryContext oldcontext; volatile ResourceOwner oldowner; Portal portal; @@ -374,11 +353,11 @@ PLy_cursor_iternext(PyObject *self) } else { - if (cursor->result.is_rowtype != 1) - PLy_input_tuple_funcs(&cursor->result, SPI_tuptable->tupdesc); + PLy_input_setup_tuple(&cursor->result, SPI_tuptable->tupdesc, + exec_ctx->curr_proc); - ret = PLyDict_FromTuple(&cursor->result, SPI_tuptable->vals[0], - SPI_tuptable->tupdesc); + ret = PLy_input_from_tuple(&cursor->result, SPI_tuptable->vals[0], + SPI_tuptable->tupdesc); } SPI_freetuptable(SPI_tuptable); @@ -401,6 +380,7 @@ PLy_cursor_fetch(PyObject *self, PyObject *args) PLyCursorObject *cursor; int count; PLyResultObject *ret; + PLyExecutionContext *exec_ctx = PLy_current_execution_context(); volatile MemoryContext oldcontext; volatile ResourceOwner oldowner; Portal portal; @@ -437,16 +417,11 @@ PLy_cursor_fetch(PyObject *self, PyObject *args) { SPI_cursor_fetch(portal, true, count); - if (cursor->result.is_rowtype != 1) - PLy_input_tuple_funcs(&cursor->result, SPI_tuptable->tupdesc); - Py_DECREF(ret->status); ret->status = PyInt_FromLong(SPI_OK_FETCH); Py_DECREF(ret->nrows); - ret->nrows = (SPI_processed > (uint64) LONG_MAX) ? - PyFloat_FromDouble((double) SPI_processed) : - PyInt_FromLong((long) SPI_processed); + ret->nrows = PyLong_FromUnsignedLongLong(SPI_processed); if (SPI_processed != 0) { @@ -464,14 +439,24 @@ PLy_cursor_fetch(PyObject *self, PyObject *args) Py_DECREF(ret->rows); ret->rows = PyList_New(SPI_processed); - - for (i = 0; i < SPI_processed; i++) + if (!ret->rows) + { + Py_DECREF(ret); + ret = NULL; + } + else { - PyObject *row = PLyDict_FromTuple(&cursor->result, - SPI_tuptable->vals[i], - SPI_tuptable->tupdesc); + PLy_input_setup_tuple(&cursor->result, SPI_tuptable->tupdesc, + exec_ctx->curr_proc); - PyList_SetItem(ret->rows, i, row); + for (i = 0; i < SPI_processed; i++) + { + PyObject *row = PLy_input_from_tuple(&cursor->result, + SPI_tuptable->vals[i], + SPI_tuptable->tupdesc); + + PyList_SetItem(ret->rows, i, row); + } } } @@ -505,10 +490,10 @@ PLy_cursor_close(PyObject *self, PyObject *unused) return NULL; } + UnpinPortal(portal); SPI_cursor_close(portal); cursor->closed = true; } - Py_INCREF(Py_None); - return Py_None; + Py_RETURN_NONE; } diff --git a/src/pl/plpython/plpy_cursorobject.h b/src/pl/plpython/plpy_cursorobject.h index 018b169cbf..e4d2c0ed25 100644 --- a/src/pl/plpython/plpy_cursorobject.h +++ b/src/pl/plpython/plpy_cursorobject.h @@ -12,7 +12,7 @@ typedef struct PLyCursorObject { PyObject_HEAD char *portalname; - PLyTypeInfo result; + PLyDatumToOb result; bool closed; MemoryContext mcxt; } PLyCursorObject; diff --git a/src/pl/plpython/plpy_elog.c b/src/pl/plpython/plpy_elog.c index bb864899f6..3814a6c32d 100644 --- a/src/pl/plpython/plpy_elog.c +++ b/src/pl/plpython/plpy_elog.c @@ -44,8 +44,9 @@ static bool set_string_attr(PyObject *obj, char *attrname, char *str); * in the context. */ void -PLy_elog(int elevel, const char *fmt,...) +PLy_elog_impl(int elevel, const char *fmt,...) { + int save_errno = errno; char *xmsg; char *tbmsg; int tb_depth; @@ -96,6 +97,7 @@ PLy_elog(int elevel, const char *fmt,...) va_list ap; int needed; + errno = save_errno; va_start(ap, fmt); needed = appendStringInfoVA(&emsg, dgettext(TEXTDOMAIN, fmt), ap); va_end(ap); diff --git a/src/pl/plpython/plpy_elog.h b/src/pl/plpython/plpy_elog.h index e73177d130..b56ac41247 100644 --- a/src/pl/plpython/plpy_elog.h +++ b/src/pl/plpython/plpy_elog.h @@ -10,7 +10,29 @@ extern PyObject *PLy_exc_error; extern PyObject *PLy_exc_fatal; extern PyObject *PLy_exc_spi_error; -extern void PLy_elog(int elevel, const char *fmt,...) pg_attribute_printf(2, 3); +/* + * PLy_elog() + * + * See comments at elog() about the compiler hinting. + */ +#ifdef HAVE__BUILTIN_CONSTANT_P +#define PLy_elog(elevel, ...) \ + do { \ + PLy_elog_impl(elevel, __VA_ARGS__); \ + if (__builtin_constant_p(elevel) && (elevel) >= ERROR) \ + pg_unreachable(); \ + } while(0) +#else /* !HAVE__BUILTIN_CONSTANT_P */ +#define PLy_elog(elevel, ...) \ + do { \ + const int elevel_ = (elevel); \ + PLy_elog_impl(elevel_, __VA_ARGS__); \ + if (elevel_ >= ERROR) \ + pg_unreachable(); \ + } while(0) +#endif /* HAVE__BUILTIN_CONSTANT_P */ + +extern void PLy_elog_impl(int elevel, const char *fmt,...) pg_attribute_printf(2, 3); extern void PLy_exception_set(PyObject *exc, const char *fmt,...) pg_attribute_printf(2, 3); diff --git a/src/pl/plpython/plpy_exec.c b/src/pl/plpython/plpy_exec.c index c6938d00aa..47ed95dcc6 100644 --- a/src/pl/plpython/plpy_exec.c +++ b/src/pl/plpython/plpy_exec.c @@ -57,6 +57,7 @@ static void PLy_abort_open_subtransactions(int save_subxact_level); Datum PLy_exec_function(FunctionCallInfo fcinfo, PLyProcedure *proc) { + bool is_setof = proc->is_setof; Datum rv; PyObject *volatile plargs = NULL; PyObject *volatile plrv = NULL; @@ -73,7 +74,7 @@ PLy_exec_function(FunctionCallInfo fcinfo, PLyProcedure *proc) PG_TRY(); { - if (proc->is_setof) + if (is_setof) { /* First Call setup */ if (SRF_IS_FIRSTCALL()) @@ -93,6 +94,7 @@ PLy_exec_function(FunctionCallInfo fcinfo, PLyProcedure *proc) funcctx = SRF_PERCALL_SETUP(); Assert(funcctx != NULL); srfstate = (PLySRFState *) funcctx->user_fctx; + Assert(srfstate != NULL); } if (srfstate == NULL || srfstate->iter == NULL) @@ -125,7 +127,7 @@ PLy_exec_function(FunctionCallInfo fcinfo, PLyProcedure *proc) * We stay in the SPI context while doing this, because PyIter_Next() * calls back into Python code which might contain SPI calls. */ - if (proc->is_setof) + if (is_setof) { if (srfstate->iter == NULL) { @@ -197,63 +199,44 @@ PLy_exec_function(FunctionCallInfo fcinfo, PLyProcedure *proc) error_context_stack = &plerrcontext; /* - * If the function is declared to return void, the Python return value - * must be None. For void-returning functions, we also treat a None - * return value as a special "void datum" rather than NULL (as is the - * case for non-void-returning functions). + * For a procedure or function declared to return void, the Python + * return value must be None. For void-returning functions, we also + * treat a None return value as a special "void datum" rather than + * NULL (as is the case for non-void-returning functions). */ - if (proc->result.out.d.typoid == VOIDOID) + if (proc->result.typoid == VOIDOID) { if (plrv != Py_None) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("PL/Python function with return type \"void\" did not return None"))); + { + if (proc->is_procedure) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("PL/Python procedure did not return None"))); + else + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("PL/Python function with return type \"void\" did not return None"))); + } fcinfo->isnull = false; rv = (Datum) 0; } - else if (plrv == Py_None) + else if (plrv == Py_None && + srfstate && srfstate->iter == NULL) { - fcinfo->isnull = true; - /* * In a SETOF function, the iteration-ending null isn't a real * value; don't pass it through the input function, which might * complain. */ - if (srfstate && srfstate->iter == NULL) - rv = (Datum) 0; - else if (proc->result.is_rowtype < 1) - rv = InputFunctionCall(&proc->result.out.d.typfunc, - NULL, - proc->result.out.d.typioparam, - -1); - else - /* Tuple as None */ - rv = (Datum) NULL; - } - else if (proc->result.is_rowtype >= 1) - { - TupleDesc desc; - - /* make sure it's not an unnamed record */ - Assert((proc->result.out.d.typoid == RECORDOID && - proc->result.out.d.typmod != -1) || - (proc->result.out.d.typoid != RECORDOID && - proc->result.out.d.typmod == -1)); - - desc = lookup_rowtype_tupdesc(proc->result.out.d.typoid, - proc->result.out.d.typmod); - - rv = PLyObject_ToCompositeDatum(&proc->result, desc, plrv, false); - fcinfo->isnull = (rv == (Datum) NULL); - - ReleaseTupleDesc(desc); + fcinfo->isnull = true; + rv = (Datum) 0; } else { - fcinfo->isnull = false; - rv = (proc->result.out.d.func) (&proc->result.out.d, -1, plrv, false); + /* Normal conversion of result */ + rv = PLy_output_convert(&proc->result, plrv, + &fcinfo->isnull); } } PG_CATCH(); @@ -328,20 +311,32 @@ PLy_exec_trigger(FunctionCallInfo fcinfo, PLyProcedure *proc) PyObject *volatile plargs = NULL; PyObject *volatile plrv = NULL; TriggerData *tdata; + TupleDesc rel_descr; Assert(CALLED_AS_TRIGGER(fcinfo)); + tdata = (TriggerData *) fcinfo->context; /* - * Input/output conversion for trigger tuples. Use the result TypeInfo - * variable to store the tuple conversion info. We do this over again on - * each call to cover the possibility that the relation's tupdesc changed - * since the trigger was last called. PLy_input_tuple_funcs and - * PLy_output_tuple_funcs are responsible for not doing repetitive work. + * Input/output conversion for trigger tuples. We use the result and + * result_in fields to store the tuple conversion info. We do this over + * again on each call to cover the possibility that the relation's tupdesc + * changed since the trigger was last called. The PLy_xxx_setup_func + * calls should only happen once, but PLy_input_setup_tuple and + * PLy_output_setup_tuple are responsible for not doing repetitive work. */ - tdata = (TriggerData *) fcinfo->context; - - PLy_input_tuple_funcs(&(proc->result), tdata->tg_relation->rd_att); - PLy_output_tuple_funcs(&(proc->result), tdata->tg_relation->rd_att); + rel_descr = RelationGetDescr(tdata->tg_relation); + if (proc->result.typoid != rel_descr->tdtypeid) + PLy_output_setup_func(&proc->result, proc->mcxt, + rel_descr->tdtypeid, + rel_descr->tdtypmod, + proc); + if (proc->result_in.typoid != rel_descr->tdtypeid) + PLy_input_setup_func(&proc->result_in, proc->mcxt, + rel_descr->tdtypeid, + rel_descr->tdtypmod, + proc); + PLy_output_setup_tuple(&proc->result, rel_descr, proc); + PLy_input_setup_tuple(&proc->result_in, rel_descr, proc); PG_TRY(); { @@ -434,48 +429,17 @@ PLy_function_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc) PG_TRY(); { args = PyList_New(proc->nargs); + if (!args) + return NULL; + for (i = 0; i < proc->nargs; i++) { - if (proc->args[i].is_rowtype > 0) - { - if (fcinfo->argnull[i]) - arg = NULL; - else - { - HeapTupleHeader td; - Oid tupType; - int32 tupTypmod; - TupleDesc tupdesc; - HeapTupleData tmptup; - - td = DatumGetHeapTupleHeader(fcinfo->arg[i]); - /* Extract rowtype info and find a tupdesc */ - tupType = HeapTupleHeaderGetTypeId(td); - tupTypmod = HeapTupleHeaderGetTypMod(td); - tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); - - /* Set up I/O funcs if not done yet */ - if (proc->args[i].is_rowtype != 1) - PLy_input_tuple_funcs(&(proc->args[i]), tupdesc); - - /* Build a temporary HeapTuple control structure */ - tmptup.t_len = HeapTupleHeaderGetDatumLength(td); - tmptup.t_data = td; - - arg = PLyDict_FromTuple(&(proc->args[i]), &tmptup, tupdesc); - ReleaseTupleDesc(tupdesc); - } - } + PLyDatumToOb *arginfo = &proc->args[i]; + + if (fcinfo->argnull[i]) + arg = NULL; else - { - if (fcinfo->argnull[i]) - arg = NULL; - else - { - arg = (proc->args[i].in.d.func) (&(proc->args[i].in.d), - fcinfo->arg[i]); - } - } + arg = PLy_input_convert(arginfo, fcinfo->arg[i]); if (arg == NULL) { @@ -493,7 +457,7 @@ PLy_function_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc) } /* Set up output conversion for functions returning RECORD */ - if (proc->result.out.d.typoid == RECORDOID) + if (proc->result.typoid == RECORDOID) { TupleDesc desc; @@ -504,7 +468,7 @@ PLy_function_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc) "that cannot accept type record"))); /* cache the output conversion functions */ - PLy_output_record_funcs(&(proc->result), desc); + PLy_output_setup_record(&proc->result, desc, proc); } } PG_CATCH(); @@ -715,7 +679,8 @@ plpython_return_error_callback(void *arg) { PLyExecutionContext *exec_ctx = PLy_current_execution_context(); - if (exec_ctx->curr_proc) + if (exec_ctx->curr_proc && + !exec_ctx->curr_proc->is_procedure) errcontext("while creating return value"); } @@ -723,6 +688,7 @@ static PyObject * PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *rv) { TriggerData *tdata = (TriggerData *) fcinfo->context; + TupleDesc rel_descr = RelationGetDescr(tdata->tg_relation); PyObject *pltname, *pltevent, *pltwhen, @@ -740,7 +706,7 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r { pltdata = PyDict_New(); if (!pltdata) - PLy_elog(ERROR, "could not create new dictionary while building trigger arguments"); + return NULL; pltname = PyString_FromString(tdata->tg_trigger->tgname); PyDict_SetItemString(pltdata, "name", pltname); @@ -790,8 +756,9 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r pltevent = PyString_FromString("INSERT"); PyDict_SetItemString(pltdata, "old", Py_None); - pytnew = PLyDict_FromTuple(&(proc->result), tdata->tg_trigtuple, - tdata->tg_relation->rd_att); + pytnew = PLy_input_from_tuple(&proc->result_in, + tdata->tg_trigtuple, + rel_descr); PyDict_SetItemString(pltdata, "new", pytnew); Py_DECREF(pytnew); *rv = tdata->tg_trigtuple; @@ -801,8 +768,9 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r pltevent = PyString_FromString("DELETE"); PyDict_SetItemString(pltdata, "new", Py_None); - pytold = PLyDict_FromTuple(&(proc->result), tdata->tg_trigtuple, - tdata->tg_relation->rd_att); + pytold = PLy_input_from_tuple(&proc->result_in, + tdata->tg_trigtuple, + rel_descr); PyDict_SetItemString(pltdata, "old", pytold); Py_DECREF(pytold); *rv = tdata->tg_trigtuple; @@ -811,12 +779,14 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r { pltevent = PyString_FromString("UPDATE"); - pytnew = PLyDict_FromTuple(&(proc->result), tdata->tg_newtuple, - tdata->tg_relation->rd_att); + pytnew = PLy_input_from_tuple(&proc->result_in, + tdata->tg_newtuple, + rel_descr); PyDict_SetItemString(pltdata, "new", pytnew); Py_DECREF(pytnew); - pytold = PLyDict_FromTuple(&(proc->result), tdata->tg_trigtuple, - tdata->tg_relation->rd_att); + pytold = PLy_input_from_tuple(&proc->result_in, + tdata->tg_trigtuple, + rel_descr); PyDict_SetItemString(pltdata, "old", pytold); Py_DECREF(pytold); *rv = tdata->tg_newtuple; @@ -869,6 +839,11 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r PyObject *pltarg; pltargs = PyList_New(tdata->tg_trigger->tgnargs); + if (!pltargs) + { + Py_DECREF(pltdata); + return NULL; + } for (i = 0; i < tdata->tg_trigger->tgnargs; i++) { pltarg = PyString_FromString(tdata->tg_trigger->tgargs[i]); @@ -897,6 +872,9 @@ PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc, HeapTuple *r return pltdata; } +/* + * Apply changes requested by a MODIFY return from a trigger function. + */ static HeapTuple PLy_modify_tuple(PLyProcedure *proc, PyObject *pltd, TriggerData *tdata, HeapTuple otup) @@ -938,7 +916,7 @@ PLy_modify_tuple(PLyProcedure *proc, PyObject *pltd, TriggerData *tdata, plkeys = PyDict_Keys(plntup); nkeys = PyList_Size(plkeys); - tupdesc = tdata->tg_relation->rd_att; + tupdesc = RelationGetDescr(tdata->tg_relation); modvalues = (Datum *) palloc0(tupdesc->natts * sizeof(Datum)); modnulls = (bool *) palloc0(tupdesc->natts * sizeof(bool)); @@ -974,7 +952,6 @@ PLy_modify_tuple(PLyProcedure *proc, PyObject *pltd, TriggerData *tdata, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot set system attribute \"%s\"", plattstr))); - att = &proc->result.out.r.atts[attn - 1]; plval = PyDict_GetItem(plntup, platt); if (plval == NULL) @@ -982,24 +959,12 @@ PLy_modify_tuple(PLyProcedure *proc, PyObject *pltd, TriggerData *tdata, Py_INCREF(plval); - if (plval != Py_None) - { - modvalues[attn - 1] = - (att->func) (att, - tupdesc->attrs[attn - 1]->atttypmod, - plval, - false); - modnulls[attn - 1] = false; - } - else - { - modvalues[attn - 1] = - InputFunctionCall(&att->typfunc, - NULL, - att->typioparam, - tupdesc->attrs[attn - 1]->atttypmod); - modnulls[attn - 1] = true; - } + /* We assume proc->result is set up to convert tuples properly */ + att = &proc->result.u.tuple.atts[attn - 1]; + + modvalues[attn - 1] = PLy_output_convert(att, + plval, + &modnulls[attn - 1]); modrepls[attn - 1] = true; Py_DECREF(plval); diff --git a/src/pl/plpython/plpy_main.c b/src/pl/plpython/plpy_main.c index 7df50c09c8..6a66eba176 100644 --- a/src/pl/plpython/plpy_main.c +++ b/src/pl/plpython/plpy_main.c @@ -60,7 +60,7 @@ static void plpython_error_callback(void *arg); static void plpython_inline_error_callback(void *arg); static void PLy_init_interp(void); -static PLyExecutionContext *PLy_push_execution_context(void); +static PLyExecutionContext *PLy_push_execution_context(bool atomic_context); static void PLy_pop_execution_context(void); /* static state for Python library conflict detection */ @@ -167,7 +167,7 @@ PLy_init_interp(void) PLy_interp_globals = PyModule_GetDict(mainmod); PLy_interp_safe_globals = PyDict_New(); if (PLy_interp_safe_globals == NULL) - PLy_elog(ERROR, "could not create globals"); + PLy_elog(ERROR, NULL); PyDict_SetItemString(PLy_interp_globals, "GD", PLy_interp_safe_globals); Py_DECREF(mainmod); if (PLy_interp_globals == NULL || PyErr_Occurred()) @@ -219,36 +219,44 @@ plpython2_validator(PG_FUNCTION_ARGS) Datum plpython_call_handler(PG_FUNCTION_ARGS) { + bool nonatomic; Datum retval; PLyExecutionContext *exec_ctx; ErrorContextCallback plerrcontext; PLy_initialize(); + nonatomic = fcinfo->context && + IsA(fcinfo->context, CallContext) && + !castNode(CallContext, fcinfo->context)->atomic; + /* Note: SPI_finish() happens in plpy_exec.c, which is dubious design */ - if (SPI_connect() != SPI_OK_CONNECT) + if (SPI_connect_ext(nonatomic ? SPI_OPT_NONATOMIC : 0) != SPI_OK_CONNECT) elog(ERROR, "SPI_connect failed"); /* * Push execution context onto stack. It is important that this get * popped again, so avoid putting anything that could throw error between - * here and the PG_TRY. (plpython_error_callback expects the stack entry - * to be there, so we have to make the context first.) - */ - exec_ctx = PLy_push_execution_context(); - - /* - * Setup error traceback support for ereport() + * here and the PG_TRY. */ - plerrcontext.callback = plpython_error_callback; - plerrcontext.previous = error_context_stack; - error_context_stack = &plerrcontext; + exec_ctx = PLy_push_execution_context(!nonatomic); PG_TRY(); { Oid funcoid = fcinfo->flinfo->fn_oid; PLyProcedure *proc; + /* + * Setup error traceback support for ereport(). Note that the PG_TRY + * structure pops this for us again at exit, so we needn't do that + * explicitly, nor do we risk the callback getting called after we've + * destroyed the exec_ctx. + */ + plerrcontext.callback = plpython_error_callback; + plerrcontext.arg = exec_ctx; + plerrcontext.previous = error_context_stack; + error_context_stack = &plerrcontext; + if (CALLED_AS_TRIGGER(fcinfo)) { Relation tgrel = ((TriggerData *) fcinfo->context)->tg_relation; @@ -274,9 +282,7 @@ plpython_call_handler(PG_FUNCTION_ARGS) } PG_END_TRY(); - /* Pop the error context stack */ - error_context_stack = plerrcontext.previous; - /* ... and then the execution context */ + /* Destroy the execution context */ PLy_pop_execution_context(); return retval; @@ -303,7 +309,7 @@ plpython_inline_handler(PG_FUNCTION_ARGS) PLy_initialize(); /* Note: SPI_finish() happens in plpy_exec.c, which is dubious design */ - if (SPI_connect() != SPI_OK_CONNECT) + if (SPI_connect_ext(codeblock->atomic ? 0 : SPI_OPT_NONATOMIC) != SPI_OK_CONNECT) elog(ERROR, "SPI_connect failed"); MemSet(&fake_fcinfo, 0, sizeof(fake_fcinfo)); @@ -318,26 +324,32 @@ plpython_inline_handler(PG_FUNCTION_ARGS) ALLOCSET_DEFAULT_SIZES); proc.pyname = MemoryContextStrdup(proc.mcxt, "__plpython_inline_block"); proc.langid = codeblock->langOid; - proc.result.out.d.typoid = VOIDOID; /* - * Push execution context onto stack. It is important that this get - * popped again, so avoid putting anything that could throw error between - * here and the PG_TRY. (plpython_inline_error_callback doesn't currently - * need the stack entry, but for consistency with plpython_call_handler we - * do it in this order.) + * This is currently sufficient to get PLy_exec_function to work, but + * someday we might need to be honest and use PLy_output_setup_func. */ - exec_ctx = PLy_push_execution_context(); + proc.result.typoid = VOIDOID; /* - * Setup error traceback support for ereport() + * Push execution context onto stack. It is important that this get + * popped again, so avoid putting anything that could throw error between + * here and the PG_TRY. */ - plerrcontext.callback = plpython_inline_error_callback; - plerrcontext.previous = error_context_stack; - error_context_stack = &plerrcontext; + exec_ctx = PLy_push_execution_context(codeblock->atomic); PG_TRY(); { + /* + * Setup error traceback support for ereport(). + * plpython_inline_error_callback doesn't currently need exec_ctx, but + * for consistency with plpython_call_handler we do it the same way. + */ + plerrcontext.callback = plpython_inline_error_callback; + plerrcontext.arg = exec_ctx; + plerrcontext.previous = error_context_stack; + error_context_stack = &plerrcontext; + PLy_procedure_compile(&proc, codeblock->source_text); exec_ctx->curr_proc = &proc; PLy_exec_function(&fake_fcinfo, &proc); @@ -351,9 +363,7 @@ plpython_inline_handler(PG_FUNCTION_ARGS) } PG_END_TRY(); - /* Pop the error context stack */ - error_context_stack = plerrcontext.previous; - /* ... and then the execution context */ + /* Destroy the execution context */ PLy_pop_execution_context(); /* Now clean up the transient procedure we made */ @@ -381,11 +391,17 @@ PLy_procedure_is_trigger(Form_pg_proc procStruct) static void plpython_error_callback(void *arg) { - PLyExecutionContext *exec_ctx = PLy_current_execution_context(); + PLyExecutionContext *exec_ctx = (PLyExecutionContext *) arg; if (exec_ctx->curr_proc) - errcontext("PL/Python function \"%s\"", - PLy_procedure_name(exec_ctx->curr_proc)); + { + if (exec_ctx->curr_proc->is_procedure) + errcontext("PL/Python procedure \"%s\"", + PLy_procedure_name(exec_ctx->curr_proc)); + else + errcontext("PL/Python function \"%s\"", + PLy_procedure_name(exec_ctx->curr_proc)); + } } static void @@ -419,12 +435,14 @@ PLy_get_scratch_context(PLyExecutionContext *context) } static PLyExecutionContext * -PLy_push_execution_context(void) +PLy_push_execution_context(bool atomic_context) { PLyExecutionContext *context; + /* Pick a memory context similar to what SPI uses. */ context = (PLyExecutionContext *) - MemoryContextAlloc(TopTransactionContext, sizeof(PLyExecutionContext)); + MemoryContextAlloc(atomic_context ? TopTransactionContext : PortalContext, + sizeof(PLyExecutionContext)); context->curr_proc = NULL; context->scratch_ctx = NULL; context->next = PLy_execution_contexts; diff --git a/src/pl/plpython/plpy_planobject.c b/src/pl/plpython/plpy_planobject.c index 390b4e90d4..96ea24cbcf 100644 --- a/src/pl/plpython/plpy_planobject.c +++ b/src/pl/plpython/plpy_planobject.c @@ -34,37 +34,12 @@ static PyMethodDef PLy_plan_methods[] = { static PyTypeObject PLy_PlanType = { PyVarObject_HEAD_INIT(NULL, 0) - "PLyPlan", /* tp_name */ - sizeof(PLyPlanObject), /* tp_size */ - 0, /* tp_itemsize */ - - /* - * methods - */ - PLy_plan_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ - PLy_plan_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - PLy_plan_methods, /* tp_tpmethods */ + .tp_name = "PLyPlan", + .tp_basicsize = sizeof(PLyPlanObject), + .tp_dealloc = PLy_plan_dealloc, + .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .tp_doc = PLy_plan_doc, + .tp_methods = PLy_plan_methods, }; void diff --git a/src/pl/plpython/plpy_planobject.h b/src/pl/plpython/plpy_planobject.h index 5adc957053..729effb163 100644 --- a/src/pl/plpython/plpy_planobject.h +++ b/src/pl/plpython/plpy_planobject.h @@ -16,7 +16,7 @@ typedef struct PLyPlanObject int nargs; Oid *types; Datum *values; - PLyTypeInfo *args; + PLyObToDatum *args; MemoryContext mcxt; } PLyPlanObject; diff --git a/src/pl/plpython/plpy_plpymodule.c b/src/pl/plpython/plpy_plpymodule.c index feaf203256..23e49e4b75 100644 --- a/src/pl/plpython/plpy_plpymodule.c +++ b/src/pl/plpython/plpy_plpymodule.c @@ -6,8 +6,10 @@ #include "postgres.h" +#include "access/xact.h" #include "mb/pg_wchar.h" #include "utils/builtins.h" +#include "utils/snapmgr.h" #include "plpython.h" @@ -15,6 +17,7 @@ #include "plpy_cursorobject.h" #include "plpy_elog.h" +#include "plpy_main.h" #include "plpy_planobject.h" #include "plpy_resultobject.h" #include "plpy_spi.h" @@ -41,6 +44,8 @@ static PyObject *PLy_fatal(PyObject *self, PyObject *args, PyObject *kw); static PyObject *PLy_quote_literal(PyObject *self, PyObject *args); static PyObject *PLy_quote_nullable(PyObject *self, PyObject *args); static PyObject *PLy_quote_ident(PyObject *self, PyObject *args); +static PyObject *PLy_commit(PyObject *self, PyObject *args); +static PyObject *PLy_rollback(PyObject *self, PyObject *args); /* A list of all known exceptions, generated from backend/utils/errcodes.txt */ @@ -95,6 +100,12 @@ static PyMethodDef PLy_methods[] = { */ {"cursor", PLy_cursor, METH_VARARGS, NULL}, + /* + * transaction control + */ + {"commit", PLy_commit, METH_NOARGS, NULL}, + {"rollback", PLy_rollback, METH_NOARGS, NULL}, + {NULL, NULL, 0, NULL} }; @@ -104,23 +115,17 @@ static PyMethodDef PLy_exc_methods[] = { #if PY_MAJOR_VERSION >= 3 static PyModuleDef PLy_module = { - PyModuleDef_HEAD_INIT, /* m_base */ - "plpy", /* m_name */ - NULL, /* m_doc */ - -1, /* m_size */ - PLy_methods, /* m_methods */ + PyModuleDef_HEAD_INIT, + .m_name = "plpy", + .m_size = -1, + .m_methods = PLy_methods, }; static PyModuleDef PLy_exc_module = { - PyModuleDef_HEAD_INIT, /* m_base */ - "spiexceptions", /* m_name */ - NULL, /* m_doc */ - -1, /* m_size */ - PLy_exc_methods, /* m_methods */ - NULL, /* m_reload */ - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ + PyModuleDef_HEAD_INIT, + .m_name = "spiexceptions", + .m_size = -1, + .m_methods = PLy_exc_methods, }; /* @@ -233,7 +238,7 @@ PLy_create_exception(char *name, PyObject *base, PyObject *dict, exc = PyErr_NewException(name, base, dict); if (exc == NULL) - PLy_elog(ERROR, "could not create exception \"%s\"", name); + PLy_elog(ERROR, NULL); /* * PyModule_AddObject does not add a refcount to the object, for some odd @@ -268,7 +273,7 @@ PLy_generate_spi_exceptions(PyObject *mod, PyObject *base) PyObject *dict = PyDict_New(); if (dict == NULL) - PLy_elog(ERROR, "could not generate SPI exceptions"); + PLy_elog(ERROR, NULL); sqlstate = PyString_FromString(unpack_sql_state(exception_map[i].sqlstate)); if (sqlstate == NULL) @@ -575,6 +580,37 @@ PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw) /* * return a legal object so the interpreter will continue on its merry way */ - Py_INCREF(Py_None); - return Py_None; + Py_RETURN_NONE; +} + +static PyObject * +PLy_commit(PyObject *self, PyObject *args) +{ + PLyExecutionContext *exec_ctx = PLy_current_execution_context(); + + HoldPinnedPortals(); + + SPI_commit(); + SPI_start_transaction(); + + /* was cleared at transaction end, reset pointer */ + exec_ctx->scratch_ctx = NULL; + + Py_RETURN_NONE; +} + +static PyObject * +PLy_rollback(PyObject *self, PyObject *args) +{ + PLyExecutionContext *exec_ctx = PLy_current_execution_context(); + + HoldPinnedPortals(); + + SPI_rollback(); + SPI_start_transaction(); + + /* was cleared at transaction end, reset pointer */ + exec_ctx->scratch_ctx = NULL; + + Py_RETURN_NONE; } diff --git a/src/pl/plpython/plpy_procedure.c b/src/pl/plpython/plpy_procedure.c index 26acc88b27..50b07cad82 100644 --- a/src/pl/plpython/plpy_procedure.c +++ b/src/pl/plpython/plpy_procedure.c @@ -10,11 +10,11 @@ #include "access/transam.h" #include "funcapi.h" #include "catalog/pg_proc.h" -#include "catalog/pg_proc_fn.h" #include "catalog/pg_type.h" #include "utils/builtins.h" #include "utils/hsearch.h" #include "utils/inval.h" +#include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/syscache.h" @@ -29,7 +29,6 @@ static HTAB *PLy_procedure_cache = NULL; static PLyProcedure *PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger); -static bool PLy_procedure_argument_valid(PLyTypeInfo *arg); static bool PLy_procedure_valid(PLyProcedure *proc, HeapTuple procTup); static char *PLy_procedure_munge_source(const char *name, const char *src); @@ -47,9 +46,7 @@ init_procedure_caches(void) } /* - * Get the name of the last procedure called by the backend (the - * innermost, if a plpython procedure call calls the backend and the - * backend calls another plpython procedure). + * PLy_procedure_name: get the name of the specified procedure. * * NB: this returns the SQL name, not the internal Python procedure name */ @@ -165,8 +162,9 @@ PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger) *ptr = '_'; } + /* Create long-lived context that all procedure info will live in */ cxt = AllocSetContextCreate(TopMemoryContext, - procName, + "PL/Python function", ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(cxt); @@ -183,16 +181,16 @@ PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger) int i; proc->proname = pstrdup(NameStr(procStruct->proname)); + MemoryContextSetIdentifier(cxt, proc->proname); proc->pyname = pstrdup(procName); proc->fn_xmin = HeapTupleHeaderGetRawXmin(procTup->t_data); proc->fn_tid = procTup->t_self; proc->fn_readonly = (procStruct->provolatile != PROVOLATILE_VOLATILE); proc->is_setof = procStruct->proretset; - PLy_typeinfo_init(&proc->result, proc->mcxt); + proc->is_procedure = (procStruct->prokind == PROKIND_PROCEDURE); proc->src = NULL; proc->argnames = NULL; - for (i = 0; i < FUNC_MAX_ARGS; i++) - PLy_typeinfo_init(&proc->args[i], proc->mcxt); + proc->args = NULL; proc->nargs = 0; proc->langid = procStruct->prolang; protrftypes_datum = SysCacheGetAttr(PROCOID, procTup, @@ -211,50 +209,48 @@ PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger) */ if (!is_trigger) { + Oid rettype = procStruct->prorettype; HeapTuple rvTypeTup; Form_pg_type rvTypeStruct; - rvTypeTup = SearchSysCache1(TYPEOID, - ObjectIdGetDatum(procStruct->prorettype)); + rvTypeTup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(rettype)); if (!HeapTupleIsValid(rvTypeTup)) - elog(ERROR, "cache lookup failed for type %u", - procStruct->prorettype); + elog(ERROR, "cache lookup failed for type %u", rettype); rvTypeStruct = (Form_pg_type) GETSTRUCT(rvTypeTup); /* Disallow pseudotype result, except for void or record */ if (rvTypeStruct->typtype == TYPTYPE_PSEUDO) { - if (procStruct->prorettype == TRIGGEROID) + if (rettype == VOIDOID || + rettype == RECORDOID) + /* okay */ ; + else if (rettype == TRIGGEROID || rettype == EVTTRIGGEROID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("trigger functions can only be called as triggers"))); - else if (procStruct->prorettype != VOIDOID && - procStruct->prorettype != RECORDOID) + else ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("PL/Python functions cannot return type %s", - format_type_be(procStruct->prorettype)))); + format_type_be(rettype)))); } - if (rvTypeStruct->typtype == TYPTYPE_COMPOSITE || - procStruct->prorettype == RECORDOID) - { - /* - * Tuple: set up later, during first call to - * PLy_function_handler - */ - proc->result.out.d.typoid = procStruct->prorettype; - proc->result.out.d.typmod = -1; - proc->result.is_rowtype = 2; - } - else - { - /* do the real work */ - PLy_output_datum_func(&proc->result, rvTypeTup, proc->langid, proc->trftypes); - } + /* set up output function for procedure result */ + PLy_output_setup_func(&proc->result, proc->mcxt, + rettype, -1, proc); ReleaseSysCache(rvTypeTup); } + else + { + /* + * In a trigger function, we use proc->result and proc->result_in + * for converting tuples, but we don't yet have enough info to set + * them up. PLy_exec_trigger will deal with it. + */ + proc->result.typoid = InvalidOid; + proc->result_in.typoid = InvalidOid; + } /* * Now get information required for input conversion of the @@ -287,7 +283,10 @@ PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger) } } + /* Allocate arrays for per-input-argument data */ proc->argnames = (char **) palloc0(sizeof(char *) * proc->nargs); + proc->args = (PLyDatumToOb *) palloc0(sizeof(PLyDatumToOb) * proc->nargs); + for (i = pos = 0; i < total; i++) { HeapTuple argTypeTup; @@ -306,28 +305,17 @@ PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger) elog(ERROR, "cache lookup failed for type %u", types[i]); argTypeStruct = (Form_pg_type) GETSTRUCT(argTypeTup); - /* check argument type is OK, set up I/O function info */ - switch (argTypeStruct->typtype) - { - case TYPTYPE_PSEUDO: - /* Disallow pseudotype argument */ - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("PL/Python functions cannot accept type %s", - format_type_be(types[i])))); - break; - case TYPTYPE_COMPOSITE: - /* we'll set IO funcs at first call */ - proc->args[pos].is_rowtype = 2; - break; - default: - PLy_input_datum_func(&(proc->args[pos]), - types[i], - argTypeTup, - proc->langid, - proc->trftypes); - break; - } + /* disallow pseudotype arguments */ + if (argTypeStruct->typtype == TYPTYPE_PSEUDO) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("PL/Python functions cannot accept type %s", + format_type_be(types[i])))); + + /* set up I/O function info */ + PLy_input_setup_func(&proc->args[pos], proc->mcxt, + types[i], -1, /* typmod not known */ + proc); /* get argument name */ proc->argnames[pos] = names ? pstrdup(names[i]) : NULL; @@ -379,6 +367,8 @@ PLy_procedure_compile(PLyProcedure *proc, const char *src) * all functions */ proc->statics = PyDict_New(); + if (!proc->statics) + PLy_elog(ERROR, NULL); PyDict_SetItemString(proc->globals, "SD", proc->statics); /* @@ -424,54 +414,12 @@ PLy_procedure_delete(PLyProcedure *proc) MemoryContextDelete(proc->mcxt); } -/* - * Check if our cached information about a datatype is still valid - */ -static bool -PLy_procedure_argument_valid(PLyTypeInfo *arg) -{ - HeapTuple relTup; - bool valid; - - /* Nothing to cache unless type is composite */ - if (arg->is_rowtype != 1) - return true; - - /* - * Zero typ_relid means that we got called on an output argument of a - * function returning an unnamed record type; the info for it can't - * change. - */ - if (!OidIsValid(arg->typ_relid)) - return true; - - /* Else we should have some cached data */ - Assert(TransactionIdIsValid(arg->typrel_xmin)); - Assert(ItemPointerIsValid(&arg->typrel_tid)); - - /* Get the pg_class tuple for the data type */ - relTup = SearchSysCache1(RELOID, ObjectIdGetDatum(arg->typ_relid)); - if (!HeapTupleIsValid(relTup)) - elog(ERROR, "cache lookup failed for relation %u", arg->typ_relid); - - /* If it has changed, the cached data is not valid */ - valid = (arg->typrel_xmin == HeapTupleHeaderGetRawXmin(relTup->t_data) && - ItemPointerEquals(&arg->typrel_tid, &relTup->t_self)); - - ReleaseSysCache(relTup); - - return valid; -} - /* * Decide whether a cached PLyProcedure struct is still valid */ static bool PLy_procedure_valid(PLyProcedure *proc, HeapTuple procTup) { - int i; - bool valid; - if (proc == NULL) return false; @@ -480,22 +428,7 @@ PLy_procedure_valid(PLyProcedure *proc, HeapTuple procTup) ItemPointerEquals(&proc->fn_tid, &procTup->t_self))) return false; - /* Else check the input argument datatypes */ - valid = true; - for (i = 0; i < proc->nargs; i++) - { - valid = PLy_procedure_argument_valid(&proc->args[i]); - - /* Short-circuit on first changed argument */ - if (!valid) - break; - } - - /* if the output type is composite, it might have changed */ - if (valid) - valid = PLy_procedure_argument_valid(&proc->result); - - return valid; + return true; } static char * diff --git a/src/pl/plpython/plpy_procedure.h b/src/pl/plpython/plpy_procedure.h index d05944fc39..8968b5c92e 100644 --- a/src/pl/plpython/plpy_procedure.h +++ b/src/pl/plpython/plpy_procedure.h @@ -30,13 +30,14 @@ typedef struct PLyProcedure TransactionId fn_xmin; ItemPointerData fn_tid; bool fn_readonly; - bool is_setof; /* true, if procedure returns result set */ - PLyTypeInfo result; /* also used to store info for trigger tuple - * type */ + bool is_setof; /* true, if function returns result set */ + bool is_procedure; + PLyObToDatum result; /* Function result output conversion info */ + PLyDatumToOb result_in; /* For converting input tuples in a trigger */ char *src; /* textual procedure code, after mangling */ char **argnames; /* Argument names */ - PLyTypeInfo args[FUNC_MAX_ARGS]; - int nargs; + PLyDatumToOb *args; /* Argument input conversion info */ + int nargs; /* Number of elements in above arrays */ Oid langid; /* OID of plpython pg_language entry */ List *trftypes; /* OID list of transform types */ PyObject *code; /* compiled procedure code */ diff --git a/src/pl/plpython/plpy_resultobject.c b/src/pl/plpython/plpy_resultobject.c index 077bde6dc3..20cf3edea8 100644 --- a/src/pl/plpython/plpy_resultobject.c +++ b/src/pl/plpython/plpy_resultobject.c @@ -20,8 +20,6 @@ static PyObject *PLy_result_nrows(PyObject *self, PyObject *args); static PyObject *PLy_result_status(PyObject *self, PyObject *args); static Py_ssize_t PLy_result_length(PyObject *arg); static PyObject *PLy_result_item(PyObject *arg, Py_ssize_t idx); -static PyObject *PLy_result_slice(PyObject *arg, Py_ssize_t lidx, Py_ssize_t hidx); -static int PLy_result_ass_slice(PyObject *arg, Py_ssize_t lidx, Py_ssize_t hidx, PyObject *slice); static PyObject *PLy_result_str(PyObject *arg); static PyObject *PLy_result_subscript(PyObject *arg, PyObject *item); static int PLy_result_ass_subscript(PyObject *self, PyObject *item, PyObject *value); @@ -31,19 +29,14 @@ static char PLy_result_doc[] = { }; static PySequenceMethods PLy_result_as_sequence = { - PLy_result_length, /* sq_length */ - NULL, /* sq_concat */ - NULL, /* sq_repeat */ - PLy_result_item, /* sq_item */ - PLy_result_slice, /* sq_slice */ - NULL, /* sq_ass_item */ - PLy_result_ass_slice, /* sq_ass_slice */ + .sq_length = PLy_result_length, + .sq_item = PLy_result_item, }; static PyMappingMethods PLy_result_as_mapping = { - PLy_result_length, /* mp_length */ - PLy_result_subscript, /* mp_subscript */ - PLy_result_ass_subscript, /* mp_ass_subscript */ + .mp_length = PLy_result_length, + .mp_subscript = PLy_result_subscript, + .mp_ass_subscript = PLy_result_ass_subscript, }; static PyMethodDef PLy_result_methods[] = { @@ -57,37 +50,15 @@ static PyMethodDef PLy_result_methods[] = { static PyTypeObject PLy_ResultType = { PyVarObject_HEAD_INIT(NULL, 0) - "PLyResult", /* tp_name */ - sizeof(PLyResultObject), /* tp_size */ - 0, /* tp_itemsize */ - - /* - * methods - */ - PLy_result_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - &PLy_result_as_sequence, /* tp_as_sequence */ - &PLy_result_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - &PLy_result_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ - PLy_result_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - PLy_result_methods, /* tp_tpmethods */ + .tp_name = "PLyResult", + .tp_basicsize = sizeof(PLyResultObject), + .tp_dealloc = PLy_result_dealloc, + .tp_as_sequence = &PLy_result_as_sequence, + .tp_as_mapping = &PLy_result_as_mapping, + .tp_str = &PLy_result_str, + .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .tp_doc = PLy_result_doc, + .tp_methods = PLy_result_methods, }; void @@ -112,6 +83,11 @@ PLy_result_new(void) ob->nrows = PyInt_FromLong(-1); ob->rows = PyList_New(0); ob->tupdesc = NULL; + if (!ob->rows) + { + Py_DECREF(ob); + return NULL; + } return (PyObject *) ob; } @@ -147,8 +123,14 @@ PLy_result_colnames(PyObject *self, PyObject *unused) } list = PyList_New(ob->tupdesc->natts); + if (!list) + return NULL; for (i = 0; i < ob->tupdesc->natts; i++) - PyList_SET_ITEM(list, i, PyString_FromString(NameStr(ob->tupdesc->attrs[i]->attname))); + { + Form_pg_attribute attr = TupleDescAttr(ob->tupdesc, i); + + PyList_SET_ITEM(list, i, PyString_FromString(NameStr(attr->attname))); + } return list; } @@ -167,8 +149,14 @@ PLy_result_coltypes(PyObject *self, PyObject *unused) } list = PyList_New(ob->tupdesc->natts); + if (!list) + return NULL; for (i = 0; i < ob->tupdesc->natts; i++) - PyList_SET_ITEM(list, i, PyInt_FromLong(ob->tupdesc->attrs[i]->atttypid)); + { + Form_pg_attribute attr = TupleDescAttr(ob->tupdesc, i); + + PyList_SET_ITEM(list, i, PyInt_FromLong(attr->atttypid)); + } return list; } @@ -187,8 +175,14 @@ PLy_result_coltypmods(PyObject *self, PyObject *unused) } list = PyList_New(ob->tupdesc->natts); + if (!list) + return NULL; for (i = 0; i < ob->tupdesc->natts; i++) - PyList_SET_ITEM(list, i, PyInt_FromLong(ob->tupdesc->attrs[i]->atttypmod)); + { + Form_pg_attribute attr = TupleDescAttr(ob->tupdesc, i); + + PyList_SET_ITEM(list, i, PyInt_FromLong(attr->atttypmod)); + } return list; } @@ -231,24 +225,6 @@ PLy_result_item(PyObject *arg, Py_ssize_t idx) return rv; } -static PyObject * -PLy_result_slice(PyObject *arg, Py_ssize_t lidx, Py_ssize_t hidx) -{ - PLyResultObject *ob = (PLyResultObject *) arg; - - return PyList_GetSlice(ob->rows, lidx, hidx); -} - -static int -PLy_result_ass_slice(PyObject *arg, Py_ssize_t lidx, Py_ssize_t hidx, PyObject *slice) -{ - int rv; - PLyResultObject *ob = (PLyResultObject *) arg; - - rv = PyList_SetSlice(ob->rows, lidx, hidx, slice); - return rv; -} - static PyObject * PLy_result_str(PyObject *arg) { diff --git a/src/pl/plpython/plpy_spi.c b/src/pl/plpython/plpy_spi.c index 955769c5e3..41155fc81e 100644 --- a/src/pl/plpython/plpy_spi.c +++ b/src/pl/plpython/plpy_spi.c @@ -46,6 +46,7 @@ PLy_spi_prepare(PyObject *self, PyObject *args) PyObject *list = NULL; PyObject *volatile optr = NULL; char *query; + PLyExecutionContext *exec_ctx = PLy_current_execution_context(); volatile MemoryContext oldcontext; volatile ResourceOwner oldowner; volatile int nargs; @@ -71,9 +72,9 @@ PLy_spi_prepare(PyObject *self, PyObject *args) nargs = list ? PySequence_Length(list) : 0; plan->nargs = nargs; - plan->types = nargs ? palloc(sizeof(Oid) * nargs) : NULL; - plan->values = nargs ? palloc(sizeof(Datum) * nargs) : NULL; - plan->args = nargs ? palloc(sizeof(PLyTypeInfo) * nargs) : NULL; + plan->types = nargs ? palloc0(sizeof(Oid) * nargs) : NULL; + plan->values = nargs ? palloc0(sizeof(Datum) * nargs) : NULL; + plan->args = nargs ? palloc0(sizeof(PLyObToDatum) * nargs) : NULL; MemoryContextSwitchTo(oldcontext); @@ -85,22 +86,10 @@ PLy_spi_prepare(PyObject *self, PyObject *args) PG_TRY(); { int i; - PLyExecutionContext *exec_ctx = PLy_current_execution_context(); - - /* - * the other loop might throw an exception, if PLyTypeInfo member - * isn't properly initialized the Py_DECREF(plan) will go boom - */ - for (i = 0; i < nargs; i++) - { - PLy_typeinfo_init(&plan->args[i], plan->mcxt); - plan->values[i] = PointerGetDatum(NULL); - } for (i = 0; i < nargs; i++) { char *sptr; - HeapTuple typeTup; Oid typeId; int32 typmod; @@ -124,11 +113,6 @@ PLy_spi_prepare(PyObject *self, PyObject *args) parseTypeString(sptr, &typeId, &typmod, false); - typeTup = SearchSysCache1(TYPEOID, - ObjectIdGetDatum(typeId)); - if (!HeapTupleIsValid(typeTup)) - elog(ERROR, "cache lookup failed for type %u", typeId); - Py_DECREF(optr); /* @@ -138,8 +122,9 @@ PLy_spi_prepare(PyObject *self, PyObject *args) optr = NULL; plan->types[i] = typeId; - PLy_output_datum_func(&plan->args[i], typeTup, exec_ctx->curr_proc->langid, exec_ctx->curr_proc->trftypes); - ReleaseSysCache(typeTup); + PLy_output_setup_func(&plan->args[i], plan->mcxt, + typeId, typmod, + exec_ctx->curr_proc); } pg_verifymbstr(query, strlen(query), false); @@ -253,39 +238,24 @@ PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit) for (j = 0; j < nargs; j++) { + PLyObToDatum *arg = &plan->args[j]; PyObject *elem; elem = PySequence_GetItem(list, j); - if (elem != Py_None) + PG_TRY(); { - PG_TRY(); - { - plan->values[j] = - plan->args[j].out.d.func(&(plan->args[j].out.d), - -1, - elem, - false); - } - PG_CATCH(); - { - Py_DECREF(elem); - PG_RE_THROW(); - } - PG_END_TRY(); + bool isnull; - Py_DECREF(elem); - nulls[j] = ' '; + plan->values[j] = PLy_output_convert(arg, elem, &isnull); + nulls[j] = isnull ? 'n' : ' '; } - else + PG_CATCH(); { Py_DECREF(elem); - plan->values[j] = - InputFunctionCall(&(plan->args[j].out.d.typfunc), - NULL, - plan->args[j].out.d.typioparam, - -1); - nulls[j] = 'n'; + PG_RE_THROW(); } + PG_END_TRY(); + Py_DECREF(elem); } rv = SPI_execute_plan(plan->plan, plan->values, nulls, @@ -306,7 +276,7 @@ PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit) */ for (k = 0; k < nargs; k++) { - if (!plan->args[k].out.d.typbyval && + if (!plan->args[k].typbyval && (plan->values[k] != PointerGetDatum(NULL))) { pfree(DatumGetPointer(plan->values[k])); @@ -321,7 +291,7 @@ PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit) for (i = 0; i < nargs; i++) { - if (!plan->args[i].out.d.typbyval && + if (!plan->args[i].typbyval && (plan->values[i] != PointerGetDatum(NULL))) { pfree(DatumGetPointer(plan->values[i])); @@ -386,33 +356,38 @@ static PyObject * PLy_spi_execute_fetch_result(SPITupleTable *tuptable, uint64 rows, int status) { PLyResultObject *result; + PLyExecutionContext *exec_ctx = PLy_current_execution_context(); volatile MemoryContext oldcontext; result = (PLyResultObject *) PLy_result_new(); + if (!result) + { + SPI_freetuptable(tuptable); + return NULL; + } Py_DECREF(result->status); result->status = PyInt_FromLong(status); if (status > 0 && tuptable == NULL) { Py_DECREF(result->nrows); - result->nrows = (rows > (uint64) LONG_MAX) ? - PyFloat_FromDouble((double) rows) : - PyInt_FromLong((long) rows); + result->nrows = PyLong_FromUnsignedLongLong(rows); } else if (status > 0 && tuptable != NULL) { - PLyTypeInfo args; + PLyDatumToOb ininfo; MemoryContext cxt; Py_DECREF(result->nrows); - result->nrows = (rows > (uint64) LONG_MAX) ? - PyFloat_FromDouble((double) rows) : - PyInt_FromLong((long) rows); + result->nrows = PyLong_FromUnsignedLongLong(rows); cxt = AllocSetContextCreate(CurrentMemoryContext, "PL/Python temp context", ALLOCSET_DEFAULT_SIZES); - PLy_typeinfo_init(&args, cxt); + + /* Initialize for converting result tuples to Python */ + PLy_input_setup_func(&ininfo, cxt, RECORDOID, -1, + exec_ctx->curr_proc); oldcontext = CurrentMemoryContext; PG_TRY(); @@ -435,15 +410,19 @@ PLy_spi_execute_fetch_result(SPITupleTable *tuptable, uint64 rows, int status) Py_DECREF(result->rows); result->rows = PyList_New(rows); - - PLy_input_tuple_funcs(&args, tuptable->tupdesc); - for (i = 0; i < rows; i++) + if (result->rows) { - PyObject *row = PLyDict_FromTuple(&args, - tuptable->vals[i], - tuptable->tupdesc); + PLy_input_setup_tuple(&ininfo, tuptable->tupdesc, + exec_ctx->curr_proc); + + for (i = 0; i < rows; i++) + { + PyObject *row = PLy_input_from_tuple(&ininfo, + tuptable->vals[i], + tuptable->tupdesc); - PyList_SetItem(result->rows, i, row); + PyList_SetItem(result->rows, i, row); + } } } @@ -470,6 +449,13 @@ PLy_spi_execute_fetch_result(SPITupleTable *tuptable, uint64 rows, int status) MemoryContextDelete(cxt); SPI_freetuptable(tuptable); + + /* in case PyList_New() failed above */ + if (!result->rows) + { + Py_DECREF(result); + result = NULL; + } } return (PyObject *) result; diff --git a/src/pl/plpython/plpy_spi.h b/src/pl/plpython/plpy_spi.h index d6b0a4707b..5a0eef78dc 100644 --- a/src/pl/plpython/plpy_spi.h +++ b/src/pl/plpython/plpy_spi.h @@ -5,7 +5,6 @@ #ifndef PLPY_SPI_H #define PLPY_SPI_H -#include "utils/palloc.h" #include "utils/resowner.h" extern PyObject *PLy_spi_prepare(PyObject *self, PyObject *args); diff --git a/src/pl/plpython/plpy_subxactobject.c b/src/pl/plpython/plpy_subxactobject.c index 9f1caa87d9..53fd36edba 100644 --- a/src/pl/plpython/plpy_subxactobject.c +++ b/src/pl/plpython/plpy_subxactobject.c @@ -38,37 +38,12 @@ static PyMethodDef PLy_subtransaction_methods[] = { static PyTypeObject PLy_SubtransactionType = { PyVarObject_HEAD_INIT(NULL, 0) - "PLySubtransaction", /* tp_name */ - sizeof(PLySubtransactionObject), /* tp_size */ - 0, /* tp_itemsize */ - - /* - * methods - */ - PLy_subtransaction_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ - PLy_subtransaction_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - PLy_subtransaction_methods, /* tp_tpmethods */ + .tp_name = "PLySubtransaction", + .tp_basicsize = sizeof(PLySubtransactionObject), + .tp_dealloc = PLy_subtransaction_dealloc, + .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .tp_doc = PLy_subtransaction_doc, + .tp_methods = PLy_subtransaction_methods, }; @@ -212,6 +187,5 @@ PLy_subtransaction_exit(PyObject *self, PyObject *args) CurrentResourceOwner = subxactdata->oldowner; pfree(subxactdata); - Py_INCREF(Py_None); - return Py_None; + Py_RETURN_NONE; } diff --git a/src/pl/plpython/plpy_typeio.c b/src/pl/plpython/plpy_typeio.c index 91ddcaa7b9..d6a6a849c3 100644 --- a/src/pl/plpython/plpy_typeio.c +++ b/src/pl/plpython/plpy_typeio.c @@ -7,19 +7,15 @@ #include "postgres.h" #include "access/htup_details.h" -#include "access/transam.h" #include "catalog/pg_type.h" #include "funcapi.h" #include "mb/pg_wchar.h" -#include "parser/parse_type.h" +#include "miscadmin.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/memutils.h" -#include "utils/numeric.h" -#include "utils/syscache.h" -#include "utils/typcache.h" #include "plpython.h" @@ -29,10 +25,6 @@ #include "plpy_main.h" -/* I/O function caching */ -static void PLy_input_datum_func2(PLyDatumToOb *arg, MemoryContext arg_mcxt, Oid typeOid, HeapTuple typeTup, Oid langid, List *trftypes); -static void PLy_output_datum_func2(PLyObToDatum *arg, MemoryContext arg_mcxt, HeapTuple typeTup, Oid langid, List *trftypes); - /* conversion from Datums to Python objects */ static PyObject *PLyBool_FromBool(PLyDatumToOb *arg, Datum d); static PyObject *PLyFloat_FromFloat4(PLyDatumToOb *arg, Datum d); @@ -43,360 +35,365 @@ static PyObject *PLyInt_FromInt32(PLyDatumToOb *arg, Datum d); static PyObject *PLyLong_FromInt64(PLyDatumToOb *arg, Datum d); static PyObject *PLyLong_FromOid(PLyDatumToOb *arg, Datum d); static PyObject *PLyBytes_FromBytea(PLyDatumToOb *arg, Datum d); -static PyObject *PLyString_FromDatum(PLyDatumToOb *arg, Datum d); +static PyObject *PLyString_FromScalar(PLyDatumToOb *arg, Datum d); static PyObject *PLyObject_FromTransform(PLyDatumToOb *arg, Datum d); static PyObject *PLyList_FromArray(PLyDatumToOb *arg, Datum d); static PyObject *PLyList_FromArray_recurse(PLyDatumToOb *elm, int *dims, int ndim, int dim, char **dataptr_p, bits8 **bitmap_p, int *bitmask_p); +static PyObject *PLyDict_FromComposite(PLyDatumToOb *arg, Datum d); +static PyObject *PLyDict_FromTuple(PLyDatumToOb *arg, HeapTuple tuple, TupleDesc desc); /* conversion from Python objects to Datums */ -static Datum PLyObject_ToBool(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray); -static Datum PLyObject_ToBytea(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray); -static Datum PLyObject_ToComposite(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray); -static Datum PLyObject_ToDatum(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray); -static Datum PLyObject_ToTransform(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray); -static Datum PLySequence_ToArray(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray); +static Datum PLyObject_ToBool(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray); +static Datum PLyObject_ToBytea(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray); +static Datum PLyObject_ToComposite(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray); +static Datum PLyObject_ToScalar(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray); +static Datum PLyObject_ToDomain(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray); +static Datum PLyObject_ToTransform(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray); +static Datum PLySequence_ToArray(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray); static void PLySequence_ToArray_recurse(PLyObToDatum *elm, PyObject *list, int *dims, int ndim, int dim, Datum *elems, bool *nulls, int *currelem); -/* conversion from Python objects to composite Datums (used by triggers and SRFs) */ -static Datum PLyString_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *string, bool inarray); -static Datum PLyMapping_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *mapping); -static Datum PLySequence_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *sequence); -static Datum PLyGenericObject_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *object, bool inarray); +/* conversion from Python objects to composite Datums */ +static Datum PLyString_ToComposite(PLyObToDatum *arg, PyObject *string, bool inarray); +static Datum PLyMapping_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *mapping); +static Datum PLySequence_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *sequence); +static Datum PLyGenericObject_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *object, bool inarray); -void -PLy_typeinfo_init(PLyTypeInfo *arg, MemoryContext mcxt) -{ - arg->is_rowtype = -1; - arg->in.r.natts = arg->out.r.natts = 0; - arg->in.r.atts = NULL; - arg->out.r.atts = NULL; - arg->typ_relid = InvalidOid; - arg->typrel_xmin = InvalidTransactionId; - ItemPointerSetInvalid(&arg->typrel_tid); - arg->mcxt = mcxt; -} /* * Conversion functions. Remember output from Python is input to * PostgreSQL, and vice versa. */ -void -PLy_input_datum_func(PLyTypeInfo *arg, Oid typeOid, HeapTuple typeTup, Oid langid, List *trftypes) + +/* + * Perform input conversion, given correctly-set-up state information. + * + * This is the outer-level entry point for any input conversion. Internally, + * the conversion functions recurse directly to each other. + */ +PyObject * +PLy_input_convert(PLyDatumToOb *arg, Datum val) { - if (arg->is_rowtype > 0) - elog(ERROR, "PLyTypeInfo struct is initialized for Tuple"); - arg->is_rowtype = 0; - PLy_input_datum_func2(&(arg->in.d), arg->mcxt, typeOid, typeTup, langid, trftypes); + PyObject *result; + PLyExecutionContext *exec_ctx = PLy_current_execution_context(); + MemoryContext scratch_context = PLy_get_scratch_context(exec_ctx); + MemoryContext oldcontext; + + /* + * Do the work in the scratch context to avoid leaking memory from the + * datatype output function calls. (The individual PLyDatumToObFunc + * functions can't reset the scratch context, because they recurse and an + * inner one might clobber data an outer one still needs. So we do it + * once at the outermost recursion level.) + * + * We reset the scratch context before, not after, each conversion cycle. + * This way we aren't on the hook to release a Python refcount on the + * result object in case MemoryContextReset throws an error. + */ + MemoryContextReset(scratch_context); + + oldcontext = MemoryContextSwitchTo(scratch_context); + + result = arg->func(arg, val); + + MemoryContextSwitchTo(oldcontext); + + return result; } -void -PLy_output_datum_func(PLyTypeInfo *arg, HeapTuple typeTup, Oid langid, List *trftypes) +/* + * Perform output conversion, given correctly-set-up state information. + * + * This is the outer-level entry point for any output conversion. Internally, + * the conversion functions recurse directly to each other. + * + * The result, as well as any cruft generated along the way, are in the + * current memory context. Caller is responsible for cleanup. + */ +Datum +PLy_output_convert(PLyObToDatum *arg, PyObject *val, bool *isnull) { - if (arg->is_rowtype > 0) - elog(ERROR, "PLyTypeInfo struct is initialized for a Tuple"); - arg->is_rowtype = 0; - PLy_output_datum_func2(&(arg->out.d), arg->mcxt, typeTup, langid, trftypes); + /* at outer level, we are not considering an array element */ + return arg->func(arg, val, isnull, false); } -void -PLy_input_tuple_funcs(PLyTypeInfo *arg, TupleDesc desc) +/* + * Transform a tuple into a Python dict object. + * + * Note: the tupdesc must match the one used to set up *arg. We could + * insist that this function lookup the tupdesc from what is in *arg, + * but in practice all callers have the right tupdesc available. + */ +PyObject * +PLy_input_from_tuple(PLyDatumToOb *arg, HeapTuple tuple, TupleDesc desc) { - int i; + PyObject *dict; PLyExecutionContext *exec_ctx = PLy_current_execution_context(); - MemoryContext oldcxt; + MemoryContext scratch_context = PLy_get_scratch_context(exec_ctx); + MemoryContext oldcontext; - oldcxt = MemoryContextSwitchTo(arg->mcxt); + /* + * As in PLy_input_convert, do the work in the scratch context. + */ + MemoryContextReset(scratch_context); - if (arg->is_rowtype == 0) - elog(ERROR, "PLyTypeInfo struct is initialized for a Datum"); - arg->is_rowtype = 1; + oldcontext = MemoryContextSwitchTo(scratch_context); - if (arg->in.r.natts != desc->natts) - { - if (arg->in.r.atts) - pfree(arg->in.r.atts); - arg->in.r.natts = desc->natts; - arg->in.r.atts = palloc0(desc->natts * sizeof(PLyDatumToOb)); - } + dict = PLyDict_FromTuple(arg, tuple, desc); - /* Can this be an unnamed tuple? If not, then an Assert would be enough */ - if (desc->tdtypmod != -1) - elog(ERROR, "received unnamed record type as input"); + MemoryContextSwitchTo(oldcontext); - Assert(OidIsValid(desc->tdtypeid)); + return dict; +} - /* - * RECORDOID means we got called to create input functions for a tuple - * fetched by plpy.execute or for an anonymous record type - */ - if (desc->tdtypeid != RECORDOID) - { - HeapTuple relTup; +/* + * Initialize, or re-initialize, per-column input info for a composite type. + * + * This is separate from PLy_input_setup_func() because in cases involving + * anonymous record types, we need to be passed the tupdesc explicitly. + * It's caller's responsibility that the tupdesc has adequate lifespan + * in such cases. If the tupdesc is for a named composite or registered + * record type, it does not need to be long-lived. + */ +void +PLy_input_setup_tuple(PLyDatumToOb *arg, TupleDesc desc, PLyProcedure *proc) +{ + int i; - /* Get the pg_class tuple corresponding to the type of the input */ - arg->typ_relid = typeidTypeRelid(desc->tdtypeid); - relTup = SearchSysCache1(RELOID, ObjectIdGetDatum(arg->typ_relid)); - if (!HeapTupleIsValid(relTup)) - elog(ERROR, "cache lookup failed for relation %u", arg->typ_relid); + /* We should be working on a previously-set-up struct */ + Assert(arg->func == PLyDict_FromComposite); - /* Remember XMIN and TID for later validation if cache is still OK */ - arg->typrel_xmin = HeapTupleHeaderGetRawXmin(relTup->t_data); - arg->typrel_tid = relTup->t_self; + /* Save pointer to tupdesc, but only if this is an anonymous record type */ + if (arg->typoid == RECORDOID && arg->typmod < 0) + arg->u.tuple.recdesc = desc; - ReleaseSysCache(relTup); + /* (Re)allocate atts array as needed */ + if (arg->u.tuple.natts != desc->natts) + { + if (arg->u.tuple.atts) + pfree(arg->u.tuple.atts); + arg->u.tuple.natts = desc->natts; + arg->u.tuple.atts = (PLyDatumToOb *) + MemoryContextAllocZero(arg->mcxt, + desc->natts * sizeof(PLyDatumToOb)); } + /* Fill the atts entries, except for dropped columns */ for (i = 0; i < desc->natts; i++) { - HeapTuple typeTup; + Form_pg_attribute attr = TupleDescAttr(desc, i); + PLyDatumToOb *att = &arg->u.tuple.atts[i]; - if (desc->attrs[i]->attisdropped) + if (attr->attisdropped) continue; - if (arg->in.r.atts[i].typoid == desc->attrs[i]->atttypid) + if (att->typoid == attr->atttypid && att->typmod == attr->atttypmod) continue; /* already set up this entry */ - typeTup = SearchSysCache1(TYPEOID, - ObjectIdGetDatum(desc->attrs[i]->atttypid)); - if (!HeapTupleIsValid(typeTup)) - elog(ERROR, "cache lookup failed for type %u", - desc->attrs[i]->atttypid); - - PLy_input_datum_func2(&(arg->in.r.atts[i]), arg->mcxt, - desc->attrs[i]->atttypid, - typeTup, - exec_ctx->curr_proc->langid, - exec_ctx->curr_proc->trftypes); - - ReleaseSysCache(typeTup); + PLy_input_setup_func(att, arg->mcxt, + attr->atttypid, attr->atttypmod, + proc); } - - MemoryContextSwitchTo(oldcxt); } +/* + * Initialize, or re-initialize, per-column output info for a composite type. + * + * This is separate from PLy_output_setup_func() because in cases involving + * anonymous record types, we need to be passed the tupdesc explicitly. + * It's caller's responsibility that the tupdesc has adequate lifespan + * in such cases. If the tupdesc is for a named composite or registered + * record type, it does not need to be long-lived. + */ void -PLy_output_tuple_funcs(PLyTypeInfo *arg, TupleDesc desc) +PLy_output_setup_tuple(PLyObToDatum *arg, TupleDesc desc, PLyProcedure *proc) { int i; - PLyExecutionContext *exec_ctx = PLy_current_execution_context(); - MemoryContext oldcxt; - oldcxt = MemoryContextSwitchTo(arg->mcxt); + /* We should be working on a previously-set-up struct */ + Assert(arg->func == PLyObject_ToComposite); - if (arg->is_rowtype == 0) - elog(ERROR, "PLyTypeInfo struct is initialized for a Datum"); - arg->is_rowtype = 1; + /* Save pointer to tupdesc, but only if this is an anonymous record type */ + if (arg->typoid == RECORDOID && arg->typmod < 0) + arg->u.tuple.recdesc = desc; - if (arg->out.r.natts != desc->natts) + /* (Re)allocate atts array as needed */ + if (arg->u.tuple.natts != desc->natts) { - if (arg->out.r.atts) - pfree(arg->out.r.atts); - arg->out.r.natts = desc->natts; - arg->out.r.atts = palloc0(desc->natts * sizeof(PLyObToDatum)); - } - - Assert(OidIsValid(desc->tdtypeid)); - - /* - * RECORDOID means we got called to create output functions for an - * anonymous record type - */ - if (desc->tdtypeid != RECORDOID) - { - HeapTuple relTup; - - /* Get the pg_class tuple corresponding to the type of the output */ - arg->typ_relid = typeidTypeRelid(desc->tdtypeid); - relTup = SearchSysCache1(RELOID, ObjectIdGetDatum(arg->typ_relid)); - if (!HeapTupleIsValid(relTup)) - elog(ERROR, "cache lookup failed for relation %u", arg->typ_relid); - - /* Remember XMIN and TID for later validation if cache is still OK */ - arg->typrel_xmin = HeapTupleHeaderGetRawXmin(relTup->t_data); - arg->typrel_tid = relTup->t_self; - - ReleaseSysCache(relTup); + if (arg->u.tuple.atts) + pfree(arg->u.tuple.atts); + arg->u.tuple.natts = desc->natts; + arg->u.tuple.atts = (PLyObToDatum *) + MemoryContextAllocZero(arg->mcxt, + desc->natts * sizeof(PLyObToDatum)); } + /* Fill the atts entries, except for dropped columns */ for (i = 0; i < desc->natts; i++) { - HeapTuple typeTup; + Form_pg_attribute attr = TupleDescAttr(desc, i); + PLyObToDatum *att = &arg->u.tuple.atts[i]; - if (desc->attrs[i]->attisdropped) + if (attr->attisdropped) continue; - if (arg->out.r.atts[i].typoid == desc->attrs[i]->atttypid) + if (att->typoid == attr->atttypid && att->typmod == attr->atttypmod) continue; /* already set up this entry */ - typeTup = SearchSysCache1(TYPEOID, - ObjectIdGetDatum(desc->attrs[i]->atttypid)); - if (!HeapTupleIsValid(typeTup)) - elog(ERROR, "cache lookup failed for type %u", - desc->attrs[i]->atttypid); - - PLy_output_datum_func2(&(arg->out.r.atts[i]), arg->mcxt, typeTup, - exec_ctx->curr_proc->langid, - exec_ctx->curr_proc->trftypes); - - ReleaseSysCache(typeTup); + PLy_output_setup_func(att, arg->mcxt, + attr->atttypid, attr->atttypmod, + proc); } - - MemoryContextSwitchTo(oldcxt); } +/* + * Set up output info for a PL/Python function returning record. + * + * Note: the given tupdesc is not necessarily long-lived. + */ void -PLy_output_record_funcs(PLyTypeInfo *arg, TupleDesc desc) +PLy_output_setup_record(PLyObToDatum *arg, TupleDesc desc, PLyProcedure *proc) { + /* Makes no sense unless RECORD */ + Assert(arg->typoid == RECORDOID); + Assert(desc->tdtypeid == RECORDOID); + /* - * If the output record functions are already set, we just have to check - * if the record descriptor has not changed + * Bless the record type if not already done. We'd have to do this anyway + * to return a tuple, so we might as well force the issue so we can use + * the known-record-type code path. */ - if ((arg->is_rowtype == 1) && - (arg->out.d.typmod != -1) && - (arg->out.d.typmod == desc->tdtypmod)) - return; - - /* bless the record to make it known to the typcache lookup code */ BlessTupleDesc(desc); - /* save the freshly generated typmod */ - arg->out.d.typmod = desc->tdtypmod; - /* proceed with normal I/O function caching */ - PLy_output_tuple_funcs(arg, desc); /* - * it should change is_rowtype to 1, so we won't go through this again - * unless the output record description changes + * Update arg->typmod, and clear the recdesc link if it's changed. The + * next call of PLyObject_ToComposite will look up a long-lived tupdesc + * for the record type. */ - Assert(arg->is_rowtype == 1); + arg->typmod = desc->tdtypmod; + if (arg->u.tuple.recdesc && + arg->u.tuple.recdesc->tdtypmod != arg->typmod) + arg->u.tuple.recdesc = NULL; + + /* Update derived data if necessary */ + PLy_output_setup_tuple(arg, desc, proc); } /* - * Transform a tuple into a Python dict object. + * Recursively initialize the PLyObToDatum structure(s) needed to construct + * a SQL value of the specified typeOid/typmod from a Python value. + * (But note that at this point we may have RECORDOID/-1, ie, an indeterminate + * record type.) + * proc is used to look up transform functions. */ -PyObject * -PLyDict_FromTuple(PLyTypeInfo *info, HeapTuple tuple, TupleDesc desc) +void +PLy_output_setup_func(PLyObToDatum *arg, MemoryContext arg_mcxt, + Oid typeOid, int32 typmod, + PLyProcedure *proc) { - PyObject *volatile dict; - PLyExecutionContext *exec_ctx = PLy_current_execution_context(); - MemoryContext scratch_context = PLy_get_scratch_context(exec_ctx); - MemoryContext oldcontext = CurrentMemoryContext; + TypeCacheEntry *typentry; + char typtype; + Oid trfuncid; + Oid typinput; - if (info->is_rowtype != 1) - elog(ERROR, "PLyTypeInfo structure describes a datum"); + /* Since this is recursive, it could theoretically be driven to overflow */ + check_stack_depth(); - dict = PyDict_New(); - if (dict == NULL) - PLy_elog(ERROR, "could not create new dictionary"); + arg->typoid = typeOid; + arg->typmod = typmod; + arg->mcxt = arg_mcxt; - PG_TRY(); + /* + * Fetch typcache entry for the target type, asking for whatever info + * we'll need later. RECORD is a special case: just treat it as composite + * without bothering with the typcache entry. + */ + if (typeOid != RECORDOID) { - int i; - - /* - * Do the work in the scratch context to avoid leaking memory from the - * datatype output function calls. - */ - MemoryContextSwitchTo(scratch_context); - for (i = 0; i < info->in.r.natts; i++) - { - char *key; - Datum vattr; - bool is_null; - PyObject *value; - - if (desc->attrs[i]->attisdropped) - continue; - - key = NameStr(desc->attrs[i]->attname); - vattr = heap_getattr(tuple, (i + 1), desc, &is_null); - - if (is_null || info->in.r.atts[i].func == NULL) - PyDict_SetItemString(dict, key, Py_None); - else - { - value = (info->in.r.atts[i].func) (&info->in.r.atts[i], vattr); - PyDict_SetItemString(dict, key, value); - Py_DECREF(value); - } - } - MemoryContextSwitchTo(oldcontext); - MemoryContextReset(scratch_context); + typentry = lookup_type_cache(typeOid, TYPECACHE_DOMAIN_BASE_INFO); + typtype = typentry->typtype; + arg->typbyval = typentry->typbyval; + arg->typlen = typentry->typlen; + arg->typalign = typentry->typalign; } - PG_CATCH(); + else { - MemoryContextSwitchTo(oldcontext); - Py_DECREF(dict); - PG_RE_THROW(); + typentry = NULL; + typtype = TYPTYPE_COMPOSITE; + /* hard-wired knowledge about type RECORD: */ + arg->typbyval = false; + arg->typlen = -1; + arg->typalign = 'd'; } - PG_END_TRY(); - - return dict; -} - -/* - * Convert a Python object to a composite Datum, using all supported - * conversion methods: composite as a string, as a sequence, as a mapping or - * as an object that has __getattr__ support. - */ -Datum -PLyObject_ToCompositeDatum(PLyTypeInfo *info, TupleDesc desc, PyObject *plrv, bool inarray) -{ - Datum datum; - - if (PyString_Check(plrv) || PyUnicode_Check(plrv)) - datum = PLyString_ToComposite(info, desc, plrv, inarray); - else if (PySequence_Check(plrv)) - /* composite type as sequence (tuple, list etc) */ - datum = PLySequence_ToComposite(info, desc, plrv); - else if (PyMapping_Check(plrv)) - /* composite type as mapping (currently only dict) */ - datum = PLyMapping_ToComposite(info, desc, plrv); - else - /* returned as smth, must provide method __getattr__(name) */ - datum = PLyGenericObject_ToComposite(info, desc, plrv, inarray); - - return datum; -} - -static void -PLy_output_datum_func2(PLyObToDatum *arg, MemoryContext arg_mcxt, HeapTuple typeTup, Oid langid, List *trftypes) -{ - Form_pg_type typeStruct = (Form_pg_type) GETSTRUCT(typeTup); - Oid element_type; - Oid base_type; - Oid funcid; - MemoryContext oldcxt; - - oldcxt = MemoryContextSwitchTo(arg_mcxt); - - fmgr_info_cxt(typeStruct->typinput, &arg->typfunc, arg_mcxt); - arg->typoid = HeapTupleGetOid(typeTup); - arg->typmod = -1; - arg->typioparam = getTypeIOParam(typeTup); - arg->typbyval = typeStruct->typbyval; - - element_type = get_base_element_type(arg->typoid); - base_type = getBaseType(element_type ? element_type : arg->typoid); /* - * Select a conversion function to convert Python objects to PostgreSQL - * datums. + * Choose conversion method. Note that transform functions are checked + * for composite and scalar types, but not for arrays or domains. This is + * somewhat historical, but we'd have a problem allowing them on domains, + * since we drill down through all levels of a domain nest without looking + * at the intermediate levels at all. */ - - if ((funcid = get_transform_tosql(base_type, langid, trftypes))) + if (typtype == TYPTYPE_DOMAIN) + { + /* Domain */ + arg->func = PLyObject_ToDomain; + arg->u.domain.domain_info = NULL; + /* Recursively set up conversion info for the element type */ + arg->u.domain.base = (PLyObToDatum *) + MemoryContextAllocZero(arg_mcxt, sizeof(PLyObToDatum)); + PLy_output_setup_func(arg->u.domain.base, arg_mcxt, + typentry->domainBaseType, + typentry->domainBaseTypmod, + proc); + } + else if (typentry && + OidIsValid(typentry->typelem) && typentry->typlen == -1) + { + /* Standard varlena array (cf. get_element_type) */ + arg->func = PLySequence_ToArray; + /* Get base type OID to insert into constructed array */ + /* (note this might not be the same as the immediate child type) */ + arg->u.array.elmbasetype = getBaseType(typentry->typelem); + /* Recursively set up conversion info for the element type */ + arg->u.array.elm = (PLyObToDatum *) + MemoryContextAllocZero(arg_mcxt, sizeof(PLyObToDatum)); + PLy_output_setup_func(arg->u.array.elm, arg_mcxt, + typentry->typelem, typmod, + proc); + } + else if ((trfuncid = get_transform_tosql(typeOid, + proc->langid, + proc->trftypes))) { arg->func = PLyObject_ToTransform; - fmgr_info_cxt(funcid, &arg->typtransform, arg_mcxt); + fmgr_info_cxt(trfuncid, &arg->u.transform.typtransform, arg_mcxt); } - else if (typeStruct->typtype == TYPTYPE_COMPOSITE) + else if (typtype == TYPTYPE_COMPOSITE) { + /* Named composite type, or RECORD */ arg->func = PLyObject_ToComposite; + /* We'll set up the per-field data later */ + arg->u.tuple.recdesc = NULL; + arg->u.tuple.typentry = typentry; + arg->u.tuple.tupdescid = INVALID_TUPLEDESC_IDENTIFIER; + arg->u.tuple.atts = NULL; + arg->u.tuple.natts = 0; + /* Mark this invalid till needed, too */ + arg->u.tuple.recinfunc.fn_oid = InvalidOid; } else - switch (base_type) + { + /* Scalar type, but we have a couple of special cases */ + switch (typeOid) { case BOOLOID: arg->func = PLyObject_ToBool; @@ -405,66 +402,111 @@ PLy_output_datum_func2(PLyObToDatum *arg, MemoryContext arg_mcxt, HeapTuple type arg->func = PLyObject_ToBytea; break; default: - arg->func = PLyObject_ToDatum; + arg->func = PLyObject_ToScalar; + getTypeInputInfo(typeOid, &typinput, &arg->u.scalar.typioparam); + fmgr_info_cxt(typinput, &arg->u.scalar.typfunc, arg_mcxt); break; } - - if (element_type) - { - char dummy_delim; - Oid funcid; - - if (type_is_rowtype(element_type)) - arg->func = PLyObject_ToComposite; - - arg->elm = palloc0(sizeof(*arg->elm)); - arg->elm->func = arg->func; - arg->elm->typtransform = arg->typtransform; - arg->func = PLySequence_ToArray; - - arg->elm->typoid = element_type; - arg->elm->typmod = -1; - get_type_io_data(element_type, IOFunc_input, - &arg->elm->typlen, &arg->elm->typbyval, &arg->elm->typalign, &dummy_delim, - &arg->elm->typioparam, &funcid); - fmgr_info_cxt(funcid, &arg->elm->typfunc, arg_mcxt); } - - MemoryContextSwitchTo(oldcxt); } -static void -PLy_input_datum_func2(PLyDatumToOb *arg, MemoryContext arg_mcxt, Oid typeOid, HeapTuple typeTup, Oid langid, List *trftypes) +/* + * Recursively initialize the PLyDatumToOb structure(s) needed to construct + * a Python value from a SQL value of the specified typeOid/typmod. + * (But note that at this point we may have RECORDOID/-1, ie, an indeterminate + * record type.) + * proc is used to look up transform functions. + */ +void +PLy_input_setup_func(PLyDatumToOb *arg, MemoryContext arg_mcxt, + Oid typeOid, int32 typmod, + PLyProcedure *proc) { - Form_pg_type typeStruct = (Form_pg_type) GETSTRUCT(typeTup); - Oid element_type; - Oid base_type; - Oid funcid; - MemoryContext oldcxt; - - oldcxt = MemoryContextSwitchTo(arg_mcxt); + TypeCacheEntry *typentry; + char typtype; + Oid trfuncid; + Oid typoutput; + bool typisvarlena; - /* Get the type's conversion information */ - fmgr_info_cxt(typeStruct->typoutput, &arg->typfunc, arg_mcxt); - arg->typoid = HeapTupleGetOid(typeTup); - arg->typmod = -1; - arg->typioparam = getTypeIOParam(typeTup); - arg->typbyval = typeStruct->typbyval; - arg->typlen = typeStruct->typlen; - arg->typalign = typeStruct->typalign; + /* Since this is recursive, it could theoretically be driven to overflow */ + check_stack_depth(); - /* Determine which kind of Python object we will convert to */ + arg->typoid = typeOid; + arg->typmod = typmod; + arg->mcxt = arg_mcxt; - element_type = get_base_element_type(typeOid); - base_type = getBaseType(element_type ? element_type : typeOid); + /* + * Fetch typcache entry for the target type, asking for whatever info + * we'll need later. RECORD is a special case: just treat it as composite + * without bothering with the typcache entry. + */ + if (typeOid != RECORDOID) + { + typentry = lookup_type_cache(typeOid, TYPECACHE_DOMAIN_BASE_INFO); + typtype = typentry->typtype; + arg->typbyval = typentry->typbyval; + arg->typlen = typentry->typlen; + arg->typalign = typentry->typalign; + } + else + { + typentry = NULL; + typtype = TYPTYPE_COMPOSITE; + /* hard-wired knowledge about type RECORD: */ + arg->typbyval = false; + arg->typlen = -1; + arg->typalign = 'd'; + } - if ((funcid = get_transform_fromsql(base_type, langid, trftypes))) + /* + * Choose conversion method. Note that transform functions are checked + * for composite and scalar types, but not for arrays or domains. This is + * somewhat historical, but we'd have a problem allowing them on domains, + * since we drill down through all levels of a domain nest without looking + * at the intermediate levels at all. + */ + if (typtype == TYPTYPE_DOMAIN) + { + /* Domain --- we don't care, just recurse down to the base type */ + PLy_input_setup_func(arg, arg_mcxt, + typentry->domainBaseType, + typentry->domainBaseTypmod, + proc); + } + else if (typentry && + OidIsValid(typentry->typelem) && typentry->typlen == -1) + { + /* Standard varlena array (cf. get_element_type) */ + arg->func = PLyList_FromArray; + /* Recursively set up conversion info for the element type */ + arg->u.array.elm = (PLyDatumToOb *) + MemoryContextAllocZero(arg_mcxt, sizeof(PLyDatumToOb)); + PLy_input_setup_func(arg->u.array.elm, arg_mcxt, + typentry->typelem, typmod, + proc); + } + else if ((trfuncid = get_transform_fromsql(typeOid, + proc->langid, + proc->trftypes))) { arg->func = PLyObject_FromTransform; - fmgr_info_cxt(funcid, &arg->typtransform, arg_mcxt); + fmgr_info_cxt(trfuncid, &arg->u.transform.typtransform, arg_mcxt); + } + else if (typtype == TYPTYPE_COMPOSITE) + { + /* Named composite type, or RECORD */ + arg->func = PLyDict_FromComposite; + /* We'll set up the per-field data later */ + arg->u.tuple.recdesc = NULL; + arg->u.tuple.typentry = typentry; + arg->u.tuple.tupdescid = INVALID_TUPLEDESC_IDENTIFIER; + arg->u.tuple.atts = NULL; + arg->u.tuple.natts = 0; } else - switch (base_type) + { + /* Scalar type, but we have a couple of special cases */ + switch (typeOid) { case BOOLOID: arg->func = PLyBool_FromBool; @@ -494,30 +536,19 @@ PLy_input_datum_func2(PLyDatumToOb *arg, MemoryContext arg_mcxt, Oid typeOid, He arg->func = PLyBytes_FromBytea; break; default: - arg->func = PLyString_FromDatum; + arg->func = PLyString_FromScalar; + getTypeOutputInfo(typeOid, &typoutput, &typisvarlena); + fmgr_info_cxt(typoutput, &arg->u.scalar.typfunc, arg_mcxt); break; } - - if (element_type) - { - char dummy_delim; - Oid funcid; - - arg->elm = palloc0(sizeof(*arg->elm)); - arg->elm->func = arg->func; - arg->elm->typtransform = arg->typtransform; - arg->func = PLyList_FromArray; - arg->elm->typoid = element_type; - arg->elm->typmod = -1; - get_type_io_data(element_type, IOFunc_output, - &arg->elm->typlen, &arg->elm->typbyval, &arg->elm->typalign, &dummy_delim, - &arg->elm->typioparam, &funcid); - fmgr_info_cxt(funcid, &arg->elm->typfunc, arg_mcxt); } - - MemoryContextSwitchTo(oldcxt); } + +/* + * Special-purpose input converters. + */ + static PyObject * PLyBool_FromBool(PLyDatumToOb *arg, Datum d) { @@ -587,11 +618,7 @@ PLyInt_FromInt32(PLyDatumToOb *arg, Datum d) static PyObject * PLyLong_FromInt64(PLyDatumToOb *arg, Datum d) { - /* on 32 bit platforms "long" may be too small */ - if (sizeof(int64) > sizeof(long)) - return PyLong_FromLongLong(DatumGetInt64(d)); - else - return PyLong_FromLong(DatumGetInt64(d)); + return PyLong_FromLongLong(DatumGetInt64(d)); } static PyObject * @@ -610,27 +637,40 @@ PLyBytes_FromBytea(PLyDatumToOb *arg, Datum d) return PyBytes_FromStringAndSize(str, size); } + +/* + * Generic input conversion using a SQL type's output function. + */ static PyObject * -PLyString_FromDatum(PLyDatumToOb *arg, Datum d) +PLyString_FromScalar(PLyDatumToOb *arg, Datum d) { - char *x = OutputFunctionCall(&arg->typfunc, d); + char *x = OutputFunctionCall(&arg->u.scalar.typfunc, d); PyObject *r = PyString_FromString(x); pfree(x); return r; } +/* + * Convert using a from-SQL transform function. + */ static PyObject * PLyObject_FromTransform(PLyDatumToOb *arg, Datum d) { - return (PyObject *) DatumGetPointer(FunctionCall1(&arg->typtransform, d)); + Datum t; + + t = FunctionCall1(&arg->u.transform.typtransform, d); + return (PyObject *) DatumGetPointer(t); } +/* + * Convert a SQL array to a Python list. + */ static PyObject * PLyList_FromArray(PLyDatumToOb *arg, Datum d) { ArrayType *array = DatumGetArrayTypeP(d); - PLyDatumToOb *elm = arg->elm; + PLyDatumToOb *elm = arg->u.array.elm; int ndim; int *dims; char *dataptr; @@ -674,6 +714,8 @@ PLyList_FromArray_recurse(PLyDatumToOb *elm, int *dims, int ndim, int dim, PyObject *list; list = PyList_New(dims[dim]); + if (!list) + return NULL; if (dim < ndim - 1) { @@ -735,6 +777,94 @@ PLyList_FromArray_recurse(PLyDatumToOb *elm, int *dims, int ndim, int dim, return list; } +/* + * Convert a composite SQL value to a Python dict. + */ +static PyObject * +PLyDict_FromComposite(PLyDatumToOb *arg, Datum d) +{ + PyObject *dict; + HeapTupleHeader td; + Oid tupType; + int32 tupTypmod; + TupleDesc tupdesc; + HeapTupleData tmptup; + + td = DatumGetHeapTupleHeader(d); + /* Extract rowtype info and find a tupdesc */ + tupType = HeapTupleHeaderGetTypeId(td); + tupTypmod = HeapTupleHeaderGetTypMod(td); + tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); + + /* Set up I/O funcs if not done yet */ + PLy_input_setup_tuple(arg, tupdesc, + PLy_current_execution_context()->curr_proc); + + /* Build a temporary HeapTuple control structure */ + tmptup.t_len = HeapTupleHeaderGetDatumLength(td); + tmptup.t_data = td; + + dict = PLyDict_FromTuple(arg, &tmptup, tupdesc); + + ReleaseTupleDesc(tupdesc); + + return dict; +} + +/* + * Transform a tuple into a Python dict object. + */ +static PyObject * +PLyDict_FromTuple(PLyDatumToOb *arg, HeapTuple tuple, TupleDesc desc) +{ + PyObject *volatile dict; + + /* Simple sanity check that desc matches */ + Assert(desc->natts == arg->u.tuple.natts); + + dict = PyDict_New(); + if (dict == NULL) + return NULL; + + PG_TRY(); + { + int i; + + for (i = 0; i < arg->u.tuple.natts; i++) + { + PLyDatumToOb *att = &arg->u.tuple.atts[i]; + Form_pg_attribute attr = TupleDescAttr(desc, i); + char *key; + Datum vattr; + bool is_null; + PyObject *value; + + if (attr->attisdropped) + continue; + + key = NameStr(attr->attname); + vattr = heap_getattr(tuple, (i + 1), desc, &is_null); + + if (is_null) + PyDict_SetItemString(dict, key, Py_None); + else + { + value = att->func(att, vattr); + PyDict_SetItemString(dict, key, value); + Py_DECREF(value); + } + } + } + PG_CATCH(); + { + Py_DECREF(dict); + PG_RE_THROW(); + } + PG_END_TRY(); + + return dict; +} + /* * Convert a Python object to a PostgreSQL bool datum. This can't go * through the generic conversion function, because Python attaches a @@ -742,17 +872,16 @@ PLyList_FromArray_recurse(PLyDatumToOb *elm, int *dims, int ndim, int dim, * type can parse. */ static Datum -PLyObject_ToBool(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray) +PLyObject_ToBool(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray) { - Datum rv; - - Assert(plrv != Py_None); - rv = BoolGetDatum(PyObject_IsTrue(plrv)); - - if (get_typtype(arg->typoid) == TYPTYPE_DOMAIN) - domain_check(rv, false, arg->typoid, &arg->typfunc.fn_extra, arg->typfunc.fn_mcxt); - - return rv; + if (plrv == Py_None) + { + *isnull = true; + return (Datum) 0; + } + *isnull = false; + return BoolGetDatum(PyObject_IsTrue(plrv)); } /* @@ -761,12 +890,18 @@ PLyObject_ToBool(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray) * with embedded nulls. And it's faster this way. */ static Datum -PLyObject_ToBytea(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray) +PLyObject_ToBytea(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray) { PyObject *volatile plrv_so = NULL; Datum rv; - Assert(plrv != Py_None); + if (plrv == Py_None) + { + *isnull = true; + return (Datum) 0; + } + *isnull = false; plrv_so = PyObject_Bytes(plrv); if (!plrv_so) @@ -792,9 +927,6 @@ PLyObject_ToBytea(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray) Py_XDECREF(plrv_so); - if (get_typtype(arg->typoid) == TYPTYPE_DOMAIN) - domain_check(rv, false, arg->typoid, &arg->typfunc.fn_extra, arg->typfunc.fn_mcxt); - return rv; } @@ -805,45 +937,87 @@ PLyObject_ToBytea(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray) * for obtaining PostgreSQL tuples. */ static Datum -PLyObject_ToComposite(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray) +PLyObject_ToComposite(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray) { Datum rv; - PLyTypeInfo info; TupleDesc desc; - MemoryContext cxt; - if (typmod != -1) - elog(ERROR, "received unnamed record type as input"); + if (plrv == Py_None) + { + *isnull = true; + return (Datum) 0; + } + *isnull = false; + + /* + * The string conversion case doesn't require a tupdesc, nor per-field + * conversion data, so just go for it if that's the case to use. + */ + if (PyString_Check(plrv) || PyUnicode_Check(plrv)) + return PLyString_ToComposite(arg, plrv, inarray); - /* Create a dummy PLyTypeInfo */ - cxt = AllocSetContextCreate(CurrentMemoryContext, - "PL/Python temp context", - ALLOCSET_DEFAULT_SIZES); - MemSet(&info, 0, sizeof(PLyTypeInfo)); - PLy_typeinfo_init(&info, cxt); - /* Mark it as needing output routines lookup */ - info.is_rowtype = 2; + /* + * If we're dealing with a named composite type, we must look up the + * tupdesc every time, to protect against possible changes to the type. + * RECORD types can't change between calls; but we must still be willing + * to set up the info the first time, if nobody did yet. + */ + if (arg->typoid != RECORDOID) + { + desc = lookup_rowtype_tupdesc(arg->typoid, arg->typmod); + /* We should have the descriptor of the type's typcache entry */ + Assert(desc == arg->u.tuple.typentry->tupDesc); + /* Detect change of descriptor, update cache if needed */ + if (arg->u.tuple.tupdescid != arg->u.tuple.typentry->tupDesc_identifier) + { + PLy_output_setup_tuple(arg, desc, + PLy_current_execution_context()->curr_proc); + arg->u.tuple.tupdescid = arg->u.tuple.typentry->tupDesc_identifier; + } + } + else + { + desc = arg->u.tuple.recdesc; + if (desc == NULL) + { + desc = lookup_rowtype_tupdesc(arg->typoid, arg->typmod); + arg->u.tuple.recdesc = desc; + } + else + { + /* Pin descriptor to match unpin below */ + PinTupleDesc(desc); + } + } - desc = lookup_rowtype_tupdesc(arg->typoid, arg->typmod); + /* Simple sanity check on our caching */ + Assert(desc->natts == arg->u.tuple.natts); /* - * This will set up the dummy PLyTypeInfo's output conversion routines, - * since we left is_rowtype as 2. A future optimization could be caching - * that info instead of looking it up every time a tuple is returned from - * the function. + * Convert, using the appropriate method depending on the type of the + * supplied Python object. */ - rv = PLyObject_ToCompositeDatum(&info, desc, plrv, inarray); + if (PySequence_Check(plrv)) + /* composite type as sequence (tuple, list etc) */ + rv = PLySequence_ToComposite(arg, desc, plrv); + else if (PyMapping_Check(plrv)) + /* composite type as mapping (currently only dict) */ + rv = PLyMapping_ToComposite(arg, desc, plrv); + else + /* returned as smth, must provide method __getattr__(name) */ + rv = PLyGenericObject_ToComposite(arg, desc, plrv, inarray); ReleaseTupleDesc(desc); - MemoryContextDelete(cxt); - return rv; } /* * Convert Python object to C string in server encoding. + * + * Note: this is exported for use by add-on transform modules. */ char * PLyObject_AsString(PyObject *plrv) @@ -900,74 +1074,71 @@ PLyObject_AsString(PyObject *plrv) /* - * Generic conversion function: Convert PyObject to cstring and + * Generic output conversion function: convert PyObject to cstring and * cstring into PostgreSQL type. */ static Datum -PLyObject_ToDatum(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray) +PLyObject_ToScalar(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray) { char *str; - Assert(plrv != Py_None); + if (plrv == Py_None) + { + *isnull = true; + return (Datum) 0; + } + *isnull = false; str = PLyObject_AsString(plrv); - /* - * If we are parsing a composite type within an array, and the string - * isn't a valid record literal, there's a high chance that the function - * did something like: - * - * CREATE FUNCTION .. RETURNS comptype[] AS $$ return [['foo', 'bar']] $$ - * LANGUAGE plpython; - * - * Before PostgreSQL 10, that was interpreted as a single-dimensional - * array, containing record ('foo', 'bar'). PostgreSQL 10 added support - * for multi-dimensional arrays, and it is now interpreted as a - * two-dimensional array, containing two records, 'foo', and 'bar'. - * record_in() will throw an error, because "foo" is not a valid record - * literal. - * - * To make that less confusing to users who are upgrading from older - * versions, try to give a hint in the typical instances of that. If we - * are parsing an array of composite types, and we see a string literal - * that is not a valid record literal, give a hint. We only want to give - * the hint in the narrow case of a malformed string literal, not any - * error from record_in(), so check for that case here specifically. - * - * This check better match the one in record_in(), so that we don't forbid - * literals that are actually valid! - */ - if (inarray && arg->typfunc.fn_oid == F_RECORD_IN) - { - char *ptr = str; + return InputFunctionCall(&arg->u.scalar.typfunc, + str, + arg->u.scalar.typioparam, + arg->typmod); +} - /* Allow leading whitespace */ - while (*ptr && isspace((unsigned char) *ptr)) - ptr++; - if (*ptr++ != '(') - ereport(ERROR, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("malformed record literal: \"%s\"", str), - errdetail("Missing left parenthesis."), - errhint("To return a composite type in an array, return the composite type as a Python tuple, e.g., \"[('foo',)]\"."))); - } - return InputFunctionCall(&arg->typfunc, - str, - arg->typioparam, - typmod); +/* + * Convert to a domain type. + */ +static Datum +PLyObject_ToDomain(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray) +{ + Datum result; + PLyObToDatum *base = arg->u.domain.base; + + result = base->func(base, plrv, isnull, inarray); + domain_check(result, *isnull, arg->typoid, + &arg->u.domain.domain_info, arg->mcxt); + return result; } +/* + * Convert using a to-SQL transform function. + */ static Datum -PLyObject_ToTransform(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray) +PLyObject_ToTransform(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray) { - return FunctionCall1(&arg->typtransform, PointerGetDatum(plrv)); + if (plrv == Py_None) + { + *isnull = true; + return (Datum) 0; + } + *isnull = false; + return FunctionCall1(&arg->u.transform.typtransform, PointerGetDatum(plrv)); } +/* + * Convert Python sequence to SQL array. + */ static Datum -PLySequence_ToArray(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarray) +PLySequence_ToArray(PLyObToDatum *arg, PyObject *plrv, + bool *isnull, bool inarray) { ArrayType *array; int i; @@ -978,11 +1149,15 @@ PLySequence_ToArray(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarra int dims[MAXDIM]; int lbs[MAXDIM]; int currelem; - Datum rv; PyObject *pyptr = plrv; PyObject *next; - Assert(plrv != Py_None); + if (plrv == Py_None) + { + *isnull = true; + return (Datum) 0; + } + *isnull = false; /* * Determine the number of dimensions, and their sizes. @@ -1048,7 +1223,7 @@ PLySequence_ToArray(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarra elems = palloc(sizeof(Datum) * len); nulls = palloc(sizeof(bool) * len); currelem = 0; - PLySequence_ToArray_recurse(arg->elm, plrv, + PLySequence_ToArray_recurse(arg->u.array.elm, plrv, dims, ndim, 0, elems, nulls, &currelem); @@ -1060,19 +1235,12 @@ PLySequence_ToArray(PLyObToDatum *arg, int32 typmod, PyObject *plrv, bool inarra ndim, dims, lbs, - get_base_element_type(arg->typoid), - arg->elm->typlen, - arg->elm->typbyval, - arg->elm->typalign); + arg->u.array.elmbasetype, + arg->u.array.elm->typlen, + arg->u.array.elm->typbyval, + arg->u.array.elm->typalign); - /* - * If the result type is a domain of array, the resulting array must be - * checked. - */ - rv = PointerGetDatum(array); - if (get_typtype(arg->typoid) == TYPTYPE_DOMAIN) - domain_check(rv, false, arg->typoid, &arg->typfunc.fn_extra, arg->typfunc.fn_mcxt); - return rv; + return PointerGetDatum(array); } /* @@ -1109,16 +1277,7 @@ PLySequence_ToArray_recurse(PLyObToDatum *elm, PyObject *list, { PyObject *obj = PySequence_GetItem(list, i); - if (obj == Py_None) - { - nulls[*currelem] = true; - elems[*currelem] = (Datum) 0; - } - else - { - nulls[*currelem] = false; - elems[*currelem] = elm->func(elm, -1, obj, true); - } + elems[*currelem] = elm->func(elm, obj, &nulls[*currelem], true); Py_XDECREF(obj); (*currelem)++; } @@ -1126,42 +1285,72 @@ PLySequence_ToArray_recurse(PLyObToDatum *elm, PyObject *list, } +/* + * Convert a Python string to composite, using record_in. + */ static Datum -PLyString_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *string, bool inarray) +PLyString_ToComposite(PLyObToDatum *arg, PyObject *string, bool inarray) { - Datum result; - HeapTuple typeTup; - PLyTypeInfo locinfo; - PLyExecutionContext *exec_ctx = PLy_current_execution_context(); - MemoryContext cxt; - - /* Create a dummy PLyTypeInfo */ - cxt = AllocSetContextCreate(CurrentMemoryContext, - "PL/Python temp context", - ALLOCSET_DEFAULT_SIZES); - MemSet(&locinfo, 0, sizeof(PLyTypeInfo)); - PLy_typeinfo_init(&locinfo, cxt); - - typeTup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(desc->tdtypeid)); - if (!HeapTupleIsValid(typeTup)) - elog(ERROR, "cache lookup failed for type %u", desc->tdtypeid); + char *str; - PLy_output_datum_func2(&locinfo.out.d, locinfo.mcxt, typeTup, - exec_ctx->curr_proc->langid, - exec_ctx->curr_proc->trftypes); + /* + * Set up call data for record_in, if we didn't already. (We can't just + * use DirectFunctionCall, because record_in needs a fn_extra field.) + */ + if (!OidIsValid(arg->u.tuple.recinfunc.fn_oid)) + fmgr_info_cxt(F_RECORD_IN, &arg->u.tuple.recinfunc, arg->mcxt); - ReleaseSysCache(typeTup); + str = PLyObject_AsString(string); - result = PLyObject_ToDatum(&locinfo.out.d, desc->tdtypmod, string, inarray); + /* + * If we are parsing a composite type within an array, and the string + * isn't a valid record literal, there's a high chance that the function + * did something like: + * + * CREATE FUNCTION .. RETURNS comptype[] AS $$ return [['foo', 'bar']] $$ + * LANGUAGE plpython; + * + * Before PostgreSQL 10, that was interpreted as a single-dimensional + * array, containing record ('foo', 'bar'). PostgreSQL 10 added support + * for multi-dimensional arrays, and it is now interpreted as a + * two-dimensional array, containing two records, 'foo', and 'bar'. + * record_in() will throw an error, because "foo" is not a valid record + * literal. + * + * To make that less confusing to users who are upgrading from older + * versions, try to give a hint in the typical instances of that. If we + * are parsing an array of composite types, and we see a string literal + * that is not a valid record literal, give a hint. We only want to give + * the hint in the narrow case of a malformed string literal, not any + * error from record_in(), so check for that case here specifically. + * + * This check better match the one in record_in(), so that we don't forbid + * literals that are actually valid! + */ + if (inarray) + { + char *ptr = str; - MemoryContextDelete(cxt); + /* Allow leading whitespace */ + while (*ptr && isspace((unsigned char) *ptr)) + ptr++; + if (*ptr++ != '(') + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("malformed record literal: \"%s\"", str), + errdetail("Missing left parenthesis."), + errhint("To return a composite type in an array, return the composite type as a Python tuple, e.g., \"[('foo',)]\"."))); + } - return result; + return InputFunctionCall(&arg->u.tuple.recinfunc, + str, + arg->typoid, + arg->typmod); } static Datum -PLyMapping_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *mapping) +PLyMapping_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *mapping) { Datum result; HeapTuple tuple; @@ -1171,10 +1360,6 @@ PLyMapping_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *mapping) Assert(PyMapping_Check(mapping)); - if (info->is_rowtype == 2) - PLy_output_tuple_funcs(info, desc); - Assert(info->is_rowtype == 1); - /* Build tuple */ values = palloc(sizeof(Datum) * desc->natts); nulls = palloc(sizeof(bool) * desc->natts); @@ -1183,37 +1368,30 @@ PLyMapping_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *mapping) char *key; PyObject *volatile value; PLyObToDatum *att; + Form_pg_attribute attr = TupleDescAttr(desc, i); - if (desc->attrs[i]->attisdropped) + if (attr->attisdropped) { values[i] = (Datum) 0; nulls[i] = true; continue; } - key = NameStr(desc->attrs[i]->attname); + key = NameStr(attr->attname); value = NULL; - att = &info->out.r.atts[i]; + att = &arg->u.tuple.atts[i]; PG_TRY(); { value = PyMapping_GetItemString(mapping, key); - if (value == Py_None) - { - values[i] = (Datum) NULL; - nulls[i] = true; - } - else if (value) - { - values[i] = (att->func) (att, -1, value, false); - nulls[i] = false; - } - else + if (!value) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("key \"%s\" not found in mapping", key), errhint("To return null in a column, " "add the value None to the mapping with the key named after the column."))); + values[i] = att->func(att, value, &nulls[i], false); + Py_XDECREF(value); value = NULL; } @@ -1237,7 +1415,7 @@ PLyMapping_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *mapping) static Datum -PLySequence_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *sequence) +PLySequence_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *sequence) { Datum result; HeapTuple tuple; @@ -1256,7 +1434,7 @@ PLySequence_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *sequence) idx = 0; for (i = 0; i < desc->natts; i++) { - if (!desc->attrs[i]->attisdropped) + if (!TupleDescAttr(desc, i)->attisdropped) idx++; } if (PySequence_Length(sequence) != idx) @@ -1264,10 +1442,6 @@ PLySequence_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *sequence) (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("length of returned sequence did not match number of columns in row"))); - if (info->is_rowtype == 2) - PLy_output_tuple_funcs(info, desc); - Assert(info->is_rowtype == 1); - /* Build tuple */ values = palloc(sizeof(Datum) * desc->natts); nulls = palloc(sizeof(bool) * desc->natts); @@ -1277,7 +1451,7 @@ PLySequence_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *sequence) PyObject *volatile value; PLyObToDatum *att; - if (desc->attrs[i]->attisdropped) + if (TupleDescAttr(desc, i)->attisdropped) { values[i] = (Datum) 0; nulls[i] = true; @@ -1285,21 +1459,13 @@ PLySequence_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *sequence) } value = NULL; - att = &info->out.r.atts[i]; + att = &arg->u.tuple.atts[i]; PG_TRY(); { value = PySequence_GetItem(sequence, idx); Assert(value); - if (value == Py_None) - { - values[i] = (Datum) NULL; - nulls[i] = true; - } - else if (value) - { - values[i] = (att->func) (att, -1, value, false); - nulls[i] = false; - } + + values[i] = att->func(att, value, &nulls[i], false); Py_XDECREF(value); value = NULL; @@ -1326,7 +1492,7 @@ PLySequence_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *sequence) static Datum -PLyGenericObject_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *object, bool inarray) +PLyGenericObject_ToComposite(PLyObToDatum *arg, TupleDesc desc, PyObject *object, bool inarray) { Datum result; HeapTuple tuple; @@ -1334,10 +1500,6 @@ PLyGenericObject_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *object bool *nulls; volatile int i; - if (info->is_rowtype == 2) - PLy_output_tuple_funcs(info, desc); - Assert(info->is_rowtype == 1); - /* Build tuple */ values = palloc(sizeof(Datum) * desc->natts); nulls = palloc(sizeof(bool) * desc->natts); @@ -1346,31 +1508,22 @@ PLyGenericObject_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *object char *key; PyObject *volatile value; PLyObToDatum *att; + Form_pg_attribute attr = TupleDescAttr(desc, i); - if (desc->attrs[i]->attisdropped) + if (attr->attisdropped) { values[i] = (Datum) 0; nulls[i] = true; continue; } - key = NameStr(desc->attrs[i]->attname); + key = NameStr(attr->attname); value = NULL; - att = &info->out.r.atts[i]; + att = &arg->u.tuple.atts[i]; PG_TRY(); { value = PyObject_GetAttrString(object, key); - if (value == Py_None) - { - values[i] = (Datum) NULL; - nulls[i] = true; - } - else if (value) - { - values[i] = (att->func) (att, -1, value, false); - nulls[i] = false; - } - else + if (!value) { /* * No attribute for this column in the object. @@ -1381,7 +1534,7 @@ PLyGenericObject_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *object * array, with a composite type (123, 'foo') in it. But now * it's interpreted as a two-dimensional array, and we try to * interpret "123" as the composite type. See also similar - * heuristic in PLyObject_ToDatum(). + * heuristic in PLyObject_ToScalar(). */ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), @@ -1391,6 +1544,8 @@ PLyGenericObject_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *object errhint("To return null in a column, let the returned object have an attribute named after column with value None."))); } + values[i] = att->func(att, value, &nulls[i], false); + Py_XDECREF(value); value = NULL; } diff --git a/src/pl/plpython/plpy_typeio.h b/src/pl/plpython/plpy_typeio.h index 95f84d8341..82bdfae548 100644 --- a/src/pl/plpython/plpy_typeio.h +++ b/src/pl/plpython/plpy_typeio.h @@ -6,117 +6,169 @@ #define PLPY_TYPEIO_H #include "access/htup.h" -#include "access/tupdesc.h" #include "fmgr.h" -#include "storage/itemptr.h" +#include "utils/typcache.h" + +struct PLyProcedure; /* avoid requiring plpy_procedure.h here */ + /* - * Conversion from PostgreSQL Datum to a Python object. + * "Input" conversion from PostgreSQL Datum to a Python object. + * + * arg is the previously-set-up conversion data, val is the value to convert. + * val mustn't be NULL. + * + * Note: the conversion data structs should be regarded as private to + * plpy_typeio.c. We declare them here only so that other modules can + * define structs containing them. */ -struct PLyDatumToOb; -typedef PyObject *(*PLyDatumToObFunc) (struct PLyDatumToOb *arg, Datum val); +typedef struct PLyDatumToOb PLyDatumToOb; /* forward reference */ -typedef struct PLyDatumToOb +typedef PyObject *(*PLyDatumToObFunc) (PLyDatumToOb *arg, Datum val); + +typedef struct PLyScalarToOb { - PLyDatumToObFunc func; - FmgrInfo typfunc; /* The type's output function */ - FmgrInfo typtransform; /* from-SQL transform */ - Oid typoid; /* The OID of the type */ - int32 typmod; /* The typmod of the type */ - Oid typioparam; - bool typbyval; - int16 typlen; - char typalign; - struct PLyDatumToOb *elm; -} PLyDatumToOb; + FmgrInfo typfunc; /* lookup info for type's output function */ +} PLyScalarToOb; + +typedef struct PLyArrayToOb +{ + PLyDatumToOb *elm; /* conversion info for array's element type */ +} PLyArrayToOb; typedef struct PLyTupleToOb { - PLyDatumToOb *atts; - int natts; + /* If we're dealing with a RECORD type, actual descriptor is here: */ + TupleDesc recdesc; + /* If we're dealing with a named composite type, these fields are set: */ + TypeCacheEntry *typentry; /* typcache entry for type */ + uint64 tupdescid; /* last tupdesc identifier seen in typcache */ + /* These fields are NULL/0 if not yet set: */ + PLyDatumToOb *atts; /* array of per-column conversion info */ + int natts; /* length of array */ } PLyTupleToOb; -typedef union PLyTypeInput +typedef struct PLyTransformToOb +{ + FmgrInfo typtransform; /* lookup info for from-SQL transform func */ +} PLyTransformToOb; + +struct PLyDatumToOb { - PLyDatumToOb d; - PLyTupleToOb r; -} PLyTypeInput; + PLyDatumToObFunc func; /* conversion control function */ + Oid typoid; /* OID of the source type */ + int32 typmod; /* typmod of the source type */ + bool typbyval; /* its physical representation details */ + int16 typlen; + char typalign; + MemoryContext mcxt; /* context this info is stored in */ + union /* conversion-type-specific data */ + { + PLyScalarToOb scalar; + PLyArrayToOb array; + PLyTupleToOb tuple; + PLyTransformToOb transform; + } u; +}; /* - * Conversion from Python object to a PostgreSQL Datum. + * "Output" conversion from Python object to a PostgreSQL Datum. + * + * arg is the previously-set-up conversion data, val is the value to convert. * - * The 'inarray' argument to the conversion function is true, if the - * converted value was in an array (Python list). It is used to give a - * better error message in some cases. + * *isnull is set to true if val is Py_None, false otherwise. + * (The conversion function *must* be called even for Py_None, + * so that domain constraints can be checked.) + * + * inarray is true if the converted value was in an array (Python list). + * It is used to give a better error message in some cases. */ -struct PLyObToDatum; -typedef Datum (*PLyObToDatumFunc) (struct PLyObToDatum *arg, int32 typmod, PyObject *val, bool inarray); +typedef struct PLyObToDatum PLyObToDatum; /* forward reference */ + +typedef Datum (*PLyObToDatumFunc) (PLyObToDatum *arg, PyObject *val, + bool *isnull, + bool inarray); -typedef struct PLyObToDatum +typedef struct PLyObToScalar { - PLyObToDatumFunc func; - FmgrInfo typfunc; /* The type's input function */ - FmgrInfo typtransform; /* to-SQL transform */ - Oid typoid; /* The OID of the type */ - int32 typmod; /* The typmod of the type */ - Oid typioparam; - bool typbyval; - int16 typlen; - char typalign; - struct PLyObToDatum *elm; -} PLyObToDatum; + FmgrInfo typfunc; /* lookup info for type's input function */ + Oid typioparam; /* argument to pass to it */ +} PLyObToScalar; + +typedef struct PLyObToArray +{ + PLyObToDatum *elm; /* conversion info for array's element type */ + Oid elmbasetype; /* element base type */ +} PLyObToArray; typedef struct PLyObToTuple { - PLyObToDatum *atts; - int natts; + /* If we're dealing with a RECORD type, actual descriptor is here: */ + TupleDesc recdesc; + /* If we're dealing with a named composite type, these fields are set: */ + TypeCacheEntry *typentry; /* typcache entry for type */ + uint64 tupdescid; /* last tupdesc identifier seen in typcache */ + /* These fields are NULL/0 if not yet set: */ + PLyObToDatum *atts; /* array of per-column conversion info */ + int natts; /* length of array */ + /* We might need to convert using record_in(); if so, cache info here */ + FmgrInfo recinfunc; /* lookup info for record_in */ } PLyObToTuple; -typedef union PLyTypeOutput +typedef struct PLyObToDomain { - PLyObToDatum d; - PLyObToTuple r; -} PLyTypeOutput; + PLyObToDatum *base; /* conversion info for domain's base type */ + void *domain_info; /* cache space for domain_check() */ +} PLyObToDomain; -/* all we need to move PostgreSQL data to Python objects, - * and vice versa - */ -typedef struct PLyTypeInfo +typedef struct PLyObToTransform { - PLyTypeInput in; - PLyTypeOutput out; - - /* - * is_rowtype can be: -1 = not known yet (initial state); 0 = scalar - * datatype; 1 = rowtype; 2 = rowtype, but I/O functions not set up yet - */ - int is_rowtype; - /* used to check if the type has been modified */ - Oid typ_relid; - TransactionId typrel_xmin; - ItemPointerData typrel_tid; - - /* context for subsidiary data (doesn't belong to this struct though) */ - MemoryContext mcxt; -} PLyTypeInfo; - -extern void PLy_typeinfo_init(PLyTypeInfo *arg, MemoryContext mcxt); + FmgrInfo typtransform; /* lookup info for to-SQL transform function */ +} PLyObToTransform; -extern void PLy_input_datum_func(PLyTypeInfo *arg, Oid typeOid, HeapTuple typeTup, Oid langid, List *trftypes); -extern void PLy_output_datum_func(PLyTypeInfo *arg, HeapTuple typeTup, Oid langid, List *trftypes); - -extern void PLy_input_tuple_funcs(PLyTypeInfo *arg, TupleDesc desc); -extern void PLy_output_tuple_funcs(PLyTypeInfo *arg, TupleDesc desc); - -extern void PLy_output_record_funcs(PLyTypeInfo *arg, TupleDesc desc); - -/* conversion from Python objects to composite Datums */ -extern Datum PLyObject_ToCompositeDatum(PLyTypeInfo *info, TupleDesc desc, PyObject *plrv, bool isarray); - -/* conversion from heap tuples to Python dictionaries */ -extern PyObject *PLyDict_FromTuple(PLyTypeInfo *info, HeapTuple tuple, TupleDesc desc); - -/* conversion from Python objects to C strings */ +struct PLyObToDatum +{ + PLyObToDatumFunc func; /* conversion control function */ + Oid typoid; /* OID of the target type */ + int32 typmod; /* typmod of the target type */ + bool typbyval; /* its physical representation details */ + int16 typlen; + char typalign; + MemoryContext mcxt; /* context this info is stored in */ + union /* conversion-type-specific data */ + { + PLyObToScalar scalar; + PLyObToArray array; + PLyObToTuple tuple; + PLyObToDomain domain; + PLyObToTransform transform; + } u; +}; + + +extern PyObject *PLy_input_convert(PLyDatumToOb *arg, Datum val); +extern Datum PLy_output_convert(PLyObToDatum *arg, PyObject *val, + bool *isnull); + +extern PyObject *PLy_input_from_tuple(PLyDatumToOb *arg, HeapTuple tuple, + TupleDesc desc); + +extern void PLy_input_setup_func(PLyDatumToOb *arg, MemoryContext arg_mcxt, + Oid typeOid, int32 typmod, + struct PLyProcedure *proc); +extern void PLy_output_setup_func(PLyObToDatum *arg, MemoryContext arg_mcxt, + Oid typeOid, int32 typmod, + struct PLyProcedure *proc); + +extern void PLy_input_setup_tuple(PLyDatumToOb *arg, TupleDesc desc, + struct PLyProcedure *proc); +extern void PLy_output_setup_tuple(PLyObToDatum *arg, TupleDesc desc, + struct PLyProcedure *proc); + +extern void PLy_output_setup_record(PLyObToDatum *arg, TupleDesc desc, + struct PLyProcedure *proc); + +/* conversion from Python objects to C strings --- exported for transforms */ extern char *PLyObject_AsString(PyObject *plrv); #endif /* PLPY_TYPEIO_H */ diff --git a/src/pl/plpython/plpy_util.c b/src/pl/plpython/plpy_util.c index 35d57a9e80..2128acbd91 100644 --- a/src/pl/plpython/plpy_util.c +++ b/src/pl/plpython/plpy_util.c @@ -8,7 +8,6 @@ #include "mb/pg_wchar.h" #include "utils/memutils.h" -#include "utils/palloc.h" #include "plpython.h" @@ -85,7 +84,7 @@ PLyUnicode_Bytes(PyObject *unicode) * function. The result is palloc'ed. * * Note that this function is disguised as PyString_AsString() when - * using Python 3. That function retuns a pointer into the internal + * using Python 3. That function returns a pointer into the internal * memory of the argument, which isn't exactly the interface of this * function. But in either case you get a rather short-lived * reference that you ought to better leave alone. diff --git a/src/pl/plpython/plpython.h b/src/pl/plpython/plpython.h index 9a8e8f246d..8cb922de72 100644 --- a/src/pl/plpython/plpython.h +++ b/src/pl/plpython/plpython.h @@ -2,7 +2,7 @@ * * plpython.h - Python as a procedural language for PostgreSQL * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/pl/plpython/plpython.h @@ -27,18 +27,20 @@ */ #undef _POSIX_C_SOURCE #undef _XOPEN_SOURCE -#undef HAVE_STRERROR #undef HAVE_TZNAME /* * Sometimes python carefully scribbles on our *printf macros. * So we undefine them here and redefine them after it's done its dirty deed. */ - -#ifdef USE_REPL_SNPRINTF -#undef snprintf #undef vsnprintf -#endif +#undef snprintf +#undef vsprintf +#undef sprintf +#undef vfprintf +#undef fprintf +#undef vprintf +#undef printf #if defined(_MSC_VER) && defined(_DEBUG) /* Python uses #pragma to bring in a non-default libpython on VC++ if @@ -124,22 +126,40 @@ typedef int Py_ssize_t; #include #include -/* put back our snprintf and vsnprintf */ -#ifdef USE_REPL_SNPRINTF +/* put back our *printf macros ... this must match src/include/port.h */ +#ifdef vsnprintf +#undef vsnprintf +#endif #ifdef snprintf #undef snprintf #endif -#ifdef vsnprintf -#undef vsnprintf +#ifdef vsprintf +#undef vsprintf #endif -#ifdef __GNUC__ -#define vsnprintf(...) pg_vsnprintf(__VA_ARGS__) -#define snprintf(...) pg_snprintf(__VA_ARGS__) -#else -#define vsnprintf pg_vsnprintf -#define snprintf pg_snprintf -#endif /* __GNUC__ */ -#endif /* USE_REPL_SNPRINTF */ +#ifdef sprintf +#undef sprintf +#endif +#ifdef vfprintf +#undef vfprintf +#endif +#ifdef fprintf +#undef fprintf +#endif +#ifdef vprintf +#undef vprintf +#endif +#ifdef printf +#undef printf +#endif + +#define vsnprintf pg_vsnprintf +#define snprintf pg_snprintf +#define vsprintf pg_vsprintf +#define sprintf pg_sprintf +#define vfprintf pg_vfprintf +#define fprintf pg_fprintf +#define vprintf pg_vprintf +#define printf(...) pg_printf(__VA_ARGS__) /* * Used throughout, and also by the Python 2/3 porting layer, so it's easier to diff --git a/src/pl/plpython/po/de.po b/src/pl/plpython/po/de.po index 4dae50327c..cfd35fec12 100644 --- a/src/pl/plpython/po/de.po +++ b/src/pl/plpython/po/de.po @@ -1,16 +1,16 @@ # German message translation file for plpython -# Copyright (C) 2009 - 2017 PostgreSQL Global Development Group +# Copyright (C) 2009 - 2018 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. -# Peter Eisentraut , 2009 - 2017. +# Peter Eisentraut , 2009 - 2018. # # Use these quotes: »%s« # msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 10\n" +"Project-Id-Version: PostgreSQL 11\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-08-04 15:37+0000\n" -"PO-Revision-Date: 2017-08-04 13:01-0400\n" +"POT-Creation-Date: 2018-05-07 00:38+0000\n" +"PO-Revision-Date: 2018-05-06 21:21-0400\n" "Last-Translator: Peter Eisentraut \n" "Language-Team: German \n" "Language: de\n" @@ -19,164 +19,164 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=n != 1;\n" -#: plpy_cursorobject.c:100 +#: plpy_cursorobject.c:101 #, c-format msgid "plpy.cursor expected a query or a plan" msgstr "plpy.cursor hat eine Anfrage oder einen Plan erwartet" -#: plpy_cursorobject.c:176 +#: plpy_cursorobject.c:184 #, c-format msgid "plpy.cursor takes a sequence as its second argument" msgstr "plpy.cursor nimmt eine Sequenz als zweites Argument" -#: plpy_cursorobject.c:192 plpy_spi.c:226 +#: plpy_cursorobject.c:200 plpy_spi.c:211 #, c-format msgid "could not execute plan" msgstr "konnte Plan nicht ausführen" -#: plpy_cursorobject.c:195 plpy_spi.c:229 +#: plpy_cursorobject.c:203 plpy_spi.c:214 #, c-format msgid "Expected sequence of %d argument, got %d: %s" msgid_plural "Expected sequence of %d arguments, got %d: %s" msgstr[0] "Sequenz aus %d Argument erwartet, aber %d erhalten: %s" msgstr[1] "Sequenz aus %d Argumenten erwartet, aber %d erhalten: %s" -#: plpy_cursorobject.c:350 +#: plpy_cursorobject.c:352 #, c-format msgid "iterating a closed cursor" msgstr "Iteration mit einem geschlossenen Cursor" -#: plpy_cursorobject.c:358 plpy_cursorobject.c:423 +#: plpy_cursorobject.c:360 plpy_cursorobject.c:426 #, c-format msgid "iterating a cursor in an aborted subtransaction" msgstr "Iteration mit einem Cursor in einer abgebrochenen Transaktionen" -#: plpy_cursorobject.c:415 +#: plpy_cursorobject.c:418 #, c-format msgid "fetch from a closed cursor" msgstr "Lesen aus einem geschlossenen Cursor" -#: plpy_cursorobject.c:463 plpy_spi.c:434 +#: plpy_cursorobject.c:461 plpy_spi.c:409 #, c-format msgid "query result has too many rows to fit in a Python list" msgstr "Anfrageergebnis hat zu viele Zeilen, um in eine Python-Liste zu passen" -#: plpy_cursorobject.c:504 +#: plpy_cursorobject.c:512 #, c-format msgid "closing a cursor in an aborted subtransaction" msgstr "Schließen eines Cursors in einer abgebrochenen Subtransaktion" -#: plpy_elog.c:127 plpy_elog.c:128 plpy_plpymodule.c:548 +#: plpy_elog.c:127 plpy_elog.c:128 plpy_plpymodule.c:559 #, c-format msgid "%s" msgstr "%s" -#: plpy_exec.c:140 +#: plpy_exec.c:142 #, c-format msgid "unsupported set function return mode" msgstr "nicht unterstützter Rückgabemodus für Funktion mit Mengenergebnis" -#: plpy_exec.c:141 +#: plpy_exec.c:143 #, c-format msgid "PL/Python set-returning functions only support returning one value per call." msgstr "PL/Python unterstützt für Funktionen mit Mengenergebnis nur das Zurückgeben von einem Wert pro Aufruf." -#: plpy_exec.c:154 +#: plpy_exec.c:156 #, c-format msgid "returned object cannot be iterated" msgstr "zurückgegebenes Objekt kann nicht iteriert werden" -#: plpy_exec.c:155 +#: plpy_exec.c:157 #, c-format msgid "PL/Python set-returning functions must return an iterable object." msgstr "PL/Python-Funktionen mit Mengenergebnis müssen ein iterierbares Objekt zurückgeben." -#: plpy_exec.c:169 +#: plpy_exec.c:171 #, c-format msgid "error fetching next item from iterator" msgstr "Fehler beim Auslesen des nächsten Elements vom Iterator" -#: plpy_exec.c:210 +#: plpy_exec.c:214 +#, c-format +msgid "PL/Python procedure did not return None" +msgstr "PL/Python-Prozedur hat nicht None zurückgegeben" + +#: plpy_exec.c:218 #, c-format msgid "PL/Python function with return type \"void\" did not return None" msgstr "PL/Python-Funktion mit Rückgabetyp »void« hat nicht None zurückgegeben" -#: plpy_exec.c:379 plpy_exec.c:405 +#: plpy_exec.c:374 plpy_exec.c:400 #, c-format msgid "unexpected return value from trigger procedure" msgstr "unerwarteter Rückgabewert von Triggerprozedur" -#: plpy_exec.c:380 +#: plpy_exec.c:375 #, c-format msgid "Expected None or a string." msgstr "Erwartete None oder eine Zeichenkette." -#: plpy_exec.c:395 +#: plpy_exec.c:390 #, c-format msgid "PL/Python trigger function returned \"MODIFY\" in a DELETE trigger -- ignored" msgstr "PL/Python-Funktion gab in einem DELETE-Trigger \"MODIFY\" zurück -- ignoriert" -#: plpy_exec.c:406 +#: plpy_exec.c:401 #, c-format msgid "Expected None, \"OK\", \"SKIP\", or \"MODIFY\"." msgstr "Erwartete None, \"OK\", \"SKIP\" oder \"MODIFY\"." -#: plpy_exec.c:487 +#: plpy_exec.c:451 #, c-format msgid "PyList_SetItem() failed, while setting up arguments" msgstr "PyList_SetItem() fehlgeschlagen, beim Einrichten der Argumente" -#: plpy_exec.c:491 +#: plpy_exec.c:455 #, c-format msgid "PyDict_SetItemString() failed, while setting up arguments" msgstr "PyDict_SetItemString() fehlgeschlagen, beim Einrichten der Argumente" -#: plpy_exec.c:503 +#: plpy_exec.c:467 #, c-format msgid "function returning record called in context that cannot accept type record" msgstr "Funktion, die einen Record zurückgibt, in einem Zusammenhang aufgerufen, der Typ record nicht verarbeiten kann" -#: plpy_exec.c:719 +#: plpy_exec.c:684 #, c-format msgid "while creating return value" msgstr "beim Erzeugen des Rückgabewerts" -#: plpy_exec.c:743 -#, c-format -msgid "could not create new dictionary while building trigger arguments" -msgstr "konnte neues Dictionary nicht erzeugen, beim Aufbauen der Triggerargumente" - -#: plpy_exec.c:931 +#: plpy_exec.c:909 #, c-format msgid "TD[\"new\"] deleted, cannot modify row" msgstr "TD[\"new\"] wurde gelöscht, kann Zeile nicht ändern" -#: plpy_exec.c:936 +#: plpy_exec.c:914 #, c-format msgid "TD[\"new\"] is not a dictionary" msgstr "TD[\"new\"] ist kein Dictionary" -#: plpy_exec.c:963 +#: plpy_exec.c:941 #, c-format msgid "TD[\"new\"] dictionary key at ordinal position %d is not a string" msgstr "Dictionary-Schlüssel auf Position %d in TD[\"new\"] ist keine Zeichenkette" -#: plpy_exec.c:970 +#: plpy_exec.c:948 #, c-format msgid "key \"%s\" found in TD[\"new\"] does not exist as a column in the triggering row" msgstr "in TD[\"new\"] gefundener Schlüssel »%s« existiert nicht als Spalte in der den Trigger auslösenden Zeile" -#: plpy_exec.c:975 +#: plpy_exec.c:953 #, c-format msgid "cannot set system attribute \"%s\"" msgstr "Systemattribut »%s« kann nicht gesetzt werden" -#: plpy_exec.c:1046 +#: plpy_exec.c:1011 #, c-format msgid "while modifying trigger row" msgstr "beim Ändern der Triggerzeile" -#: plpy_exec.c:1107 +#: plpy_exec.c:1072 #, c-format msgid "forcibly aborting a subtransaction that has not been exited" msgstr "Abbruch einer Subtransaktion, die nicht beendet wurde, wird erzwungen" @@ -201,71 +201,66 @@ msgstr "nicht abgefangener Fehler bei der Initialisierung" msgid "could not import \"__main__\" module" msgstr "konnte Modul »__main__« nicht importieren" -#: plpy_main.c:170 -#, c-format -msgid "could not create globals" -msgstr "konnte globale Objekte nicht erzeugen" - #: plpy_main.c:174 #, c-format msgid "could not initialize globals" msgstr "konnte globale Objekte nicht initialisieren" -#: plpy_main.c:387 +#: plpy_main.c:399 +#, c-format +msgid "PL/Python procedure \"%s\"" +msgstr "PL/Python-Prozedur »%s«" + +#: plpy_main.c:402 #, c-format msgid "PL/Python function \"%s\"" msgstr "PL/Python-Funktion »%s«" -#: plpy_main.c:394 +#: plpy_main.c:410 #, c-format msgid "PL/Python anonymous code block" msgstr "anonymer PL/Python-Codeblock" -#: plpy_plpymodule.c:181 plpy_plpymodule.c:184 +#: plpy_plpymodule.c:192 plpy_plpymodule.c:195 #, c-format msgid "could not import \"plpy\" module" msgstr "konnte Modul »plpy« nicht importieren" -#: plpy_plpymodule.c:199 +#: plpy_plpymodule.c:210 #, c-format msgid "could not create the spiexceptions module" msgstr "konnte das Modul »spiexceptions« nicht erzeugen" -#: plpy_plpymodule.c:207 +#: plpy_plpymodule.c:218 #, c-format msgid "could not add the spiexceptions module" msgstr "konnte das Modul »spiexceptions« nicht hinzufügen" -#: plpy_plpymodule.c:236 -#, c-format -msgid "could not create exception \"%s\"" -msgstr "konnte Ausnahme »%s« nicht erzeugen" - -#: plpy_plpymodule.c:271 plpy_plpymodule.c:275 +#: plpy_plpymodule.c:286 #, c-format msgid "could not generate SPI exceptions" msgstr "konnte SPI-Ausnahmen nicht erzeugen" -#: plpy_plpymodule.c:443 +#: plpy_plpymodule.c:454 #, c-format msgid "could not unpack arguments in plpy.elog" msgstr "konnte Argumente in plpy.elog nicht entpacken" -#: plpy_plpymodule.c:452 +#: plpy_plpymodule.c:463 msgid "could not parse error message in plpy.elog" msgstr "konnte Fehlermeldung in plpy.elog nicht parsen" -#: plpy_plpymodule.c:469 +#: plpy_plpymodule.c:480 #, c-format msgid "argument 'message' given by name and position" msgstr "Argument »message« wurde durch Namen und Position angegeben" -#: plpy_plpymodule.c:496 +#: plpy_plpymodule.c:507 #, c-format msgid "'%s' is an invalid keyword argument for this function" msgstr "»%s« ist ein ungültiges Schlüsselwortargument für diese Funktion" -#: plpy_plpymodule.c:507 plpy_plpymodule.c:513 +#: plpy_plpymodule.c:518 plpy_plpymodule.c:524 #, c-format msgid "invalid SQLSTATE code" msgstr "ungültiger SQLSTATE-Code" @@ -275,57 +270,57 @@ msgstr "ungültiger SQLSTATE-Code" msgid "trigger functions can only be called as triggers" msgstr "Triggerfunktionen können nur als Trigger aufgerufen werden" -#: plpy_procedure.c:235 +#: plpy_procedure.c:234 #, c-format msgid "PL/Python functions cannot return type %s" msgstr "PL/Python-Funktionen können keinen Rückgabetyp %s haben" -#: plpy_procedure.c:316 +#: plpy_procedure.c:312 #, c-format msgid "PL/Python functions cannot accept type %s" msgstr "PL/Python-Funktionen können Typ %s nicht annehmen" -#: plpy_procedure.c:412 +#: plpy_procedure.c:402 #, c-format msgid "could not compile PL/Python function \"%s\"" msgstr "konnte PL/Python-Funktion »%s« nicht kompilieren" -#: plpy_procedure.c:415 +#: plpy_procedure.c:405 #, c-format msgid "could not compile anonymous PL/Python code block" msgstr "konnte anonymen PL/Python-Codeblock nicht kompilieren" -#: plpy_resultobject.c:145 plpy_resultobject.c:165 plpy_resultobject.c:185 +#: plpy_resultobject.c:150 plpy_resultobject.c:176 plpy_resultobject.c:202 #, c-format msgid "command did not produce a result set" msgstr "Befehl hat keine Ergebnismenge erzeugt" -#: plpy_spi.c:59 +#: plpy_spi.c:60 #, c-format msgid "second argument of plpy.prepare must be a sequence" msgstr "zweites Argument von plpy.prepare muss eine Sequenz sein" -#: plpy_spi.c:115 +#: plpy_spi.c:104 #, c-format msgid "plpy.prepare: type name at ordinal position %d is not a string" msgstr "plpy.prepare: Typname auf Position %d ist keine Zeichenkette" -#: plpy_spi.c:191 +#: plpy_spi.c:176 #, c-format msgid "plpy.execute expected a query or a plan" msgstr "plpy.execute hat eine Anfrage oder einen Plan erwartet" -#: plpy_spi.c:210 +#: plpy_spi.c:195 #, c-format msgid "plpy.execute takes a sequence as its second argument" msgstr "plpy.execute nimmt eine Sequenz als zweites Argument" -#: plpy_spi.c:335 +#: plpy_spi.c:305 #, c-format msgid "SPI_execute_plan failed: %s" msgstr "SPI_execute_plan fehlgeschlagen: %s" -#: plpy_spi.c:377 +#: plpy_spi.c:347 #, c-format msgid "SPI_execute failed: %s" msgstr "SPI_execute fehlgeschlagen: %s" @@ -350,113 +345,112 @@ msgstr "diese Subtransaktion wurde nicht begonnen" msgid "there is no subtransaction to exit from" msgstr "es gibt keine Subtransaktion zu beenden" -#: plpy_typeio.c:292 -#, c-format -msgid "could not create new dictionary" -msgstr "konnte neues Dictionary nicht erzeugen" - -#: plpy_typeio.c:560 +#: plpy_typeio.c:591 #, c-format msgid "could not import a module for Decimal constructor" msgstr "konnte kein Modul für den »Decimal«-Konstruktor importieren" -#: plpy_typeio.c:564 +#: plpy_typeio.c:595 #, c-format msgid "no Decimal attribute in module" msgstr "kein Attribut »Decimal« im Modul" -#: plpy_typeio.c:570 +#: plpy_typeio.c:601 #, c-format msgid "conversion from numeric to Decimal failed" msgstr "Umwandlung von numeric in Decimal fehlgeschlagen" -#: plpy_typeio.c:773 +#: plpy_typeio.c:908 #, c-format msgid "could not create bytes representation of Python object" msgstr "konnte Bytes-Darstellung eines Python-Objektes nicht erzeugen" -#: plpy_typeio.c:882 +#: plpy_typeio.c:1056 #, c-format msgid "could not create string representation of Python object" msgstr "konnte Zeichenkettendarstellung eines Python-Objektes nicht erzeugen" -#: plpy_typeio.c:893 +#: plpy_typeio.c:1067 #, c-format msgid "could not convert Python object into cstring: Python string representation appears to contain null bytes" msgstr "konnte Python-Objekt nicht in cstring umwandeln: Python-Zeichenkettendarstellung enthält anscheinend Null-Bytes" -#: plpy_typeio.c:950 -#, c-format -msgid "malformed record literal: \"%s\"" -msgstr "fehlerhafte Record-Konstante: »%s«" - -#: plpy_typeio.c:951 -#, c-format -msgid "Missing left parenthesis." -msgstr "Linke Klammer fehlt." - -#: plpy_typeio.c:952 plpy_typeio.c:1390 -#, c-format -msgid "To return a composite type in an array, return the composite type as a Python tuple, e.g., \"[('foo',)]\"." -msgstr "Um einen zusammengesetzten Typ in einem Array zurückzugeben, geben Sie den zusammengesetzten Typ als ein Python-Tupel zurück, z.B. »[('foo',)]«." - -#: plpy_typeio.c:1001 +#: plpy_typeio.c:1176 #, c-format msgid "number of array dimensions exceeds the maximum allowed (%d)" msgstr "Anzahl der Arraydimensionen überschreitet erlaubtes Maximum (%d)" -#: plpy_typeio.c:1005 +#: plpy_typeio.c:1180 #, c-format -msgid "cannot determine sequence length for function return value" -msgstr "kann Sequenzlänge für Funktionsrückgabewert nicht ermitteln" +msgid "could not determine sequence length for function return value" +msgstr "konnte Sequenzlänge für Funktionsrückgabewert nicht ermitteln" -#: plpy_typeio.c:1008 plpy_typeio.c:1012 +#: plpy_typeio.c:1183 plpy_typeio.c:1187 #, c-format msgid "array size exceeds the maximum allowed" msgstr "Arraygröße überschreitet erlaubtes Maximum" -#: plpy_typeio.c:1038 +#: plpy_typeio.c:1213 #, c-format msgid "return value of function with array return type is not a Python sequence" msgstr "Rückgabewert von Funktion mit Array-Rückgabetyp ist keine Python-Sequenz" -#: plpy_typeio.c:1091 -#, fuzzy, c-format -#| msgid "multidimensional arrays must have array expressions with matching dimensions" -msgid "multidimensional arrays must have array expressions with matching dimensions. PL/Python function return value has sequence length %d while expected %d" -msgstr "mehrdimensionale Arrays müssen Arraysausdrücke mit gleicher Anzahl Dimensionen haben" +#: plpy_typeio.c:1259 +#, c-format +msgid "wrong length of inner sequence: has length %d, but %d was expected" +msgstr "falsche Länge der inneren Sequenz: hat Länge %d, aber %d wurde erwartet" + +#: plpy_typeio.c:1261 +#, c-format +msgid "To construct a multidimensional array, the inner sequences must all have the same length." +msgstr "Um ein mehrdimensionales Array zu konstruieren, müssen die inneren Sequenzen alle die gleiche Länge haben." -#: plpy_typeio.c:1213 +#: plpy_typeio.c:1340 +#, c-format +msgid "malformed record literal: \"%s\"" +msgstr "fehlerhafte Record-Konstante: »%s«" + +#: plpy_typeio.c:1341 +#, c-format +msgid "Missing left parenthesis." +msgstr "Linke Klammer fehlt." + +#: plpy_typeio.c:1342 plpy_typeio.c:1543 +#, c-format +msgid "To return a composite type in an array, return the composite type as a Python tuple, e.g., \"[('foo',)]\"." +msgstr "Um einen zusammengesetzten Typ in einem Array zurückzugeben, geben Sie den zusammengesetzten Typ als ein Python-Tupel zurück, z.B. »[('foo',)]«." + +#: plpy_typeio.c:1389 #, c-format msgid "key \"%s\" not found in mapping" msgstr "Schlüssel »%s« nicht in Mapping gefunden" -#: plpy_typeio.c:1214 +#: plpy_typeio.c:1390 #, c-format msgid "To return null in a column, add the value None to the mapping with the key named after the column." msgstr "Um einen NULL-Wert in einer Spalte zurückzugeben, muss der Wert None mit einem nach der Spalte benannten Schlüssel in das Mapping eingefügt werden." -#: plpy_typeio.c:1265 +#: plpy_typeio.c:1443 #, c-format msgid "length of returned sequence did not match number of columns in row" msgstr "Länge der zurückgegebenen Sequenz hat nicht mit der Anzahl der Spalten in der Zeile übereingestimmt" -#: plpy_typeio.c:1388 +#: plpy_typeio.c:1541 #, c-format msgid "attribute \"%s\" does not exist in Python object" msgstr "Attribut »%s« existiert nicht in Python-Objekt" -#: plpy_typeio.c:1391 +#: plpy_typeio.c:1544 #, c-format msgid "To return null in a column, let the returned object have an attribute named after column with value None." msgstr "Um einen NULL-Wert in einer Spalte zurückzugeben, muss das zurückzugebende Objekt ein nach der Spalte benanntes Attribut mit dem Wert None haben." -#: plpy_util.c:36 +#: plpy_util.c:35 #, c-format msgid "could not convert Python Unicode object to bytes" msgstr "konnte Python-Unicode-Objekt nicht in Bytes umwandeln" -#: plpy_util.c:42 +#: plpy_util.c:41 #, c-format msgid "could not extract bytes from encoded string" msgstr "konnte kodierte Zeichenkette nicht in Bytes umwandeln" diff --git a/src/pl/plpython/po/fr.po b/src/pl/plpython/po/fr.po index 43988c0869..05539cb8f4 100644 --- a/src/pl/plpython/po/fr.po +++ b/src/pl/plpython/po/fr.po @@ -8,8 +8,8 @@ msgid "" msgstr "" "Project-Id-Version: PostgreSQL 9.6\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-07-06 14:37+0000\n" -"PO-Revision-Date: 2017-07-06 18:07+0200\n" +"POT-Creation-Date: 2017-08-07 14:07+0000\n" +"PO-Revision-Date: 2017-08-07 18:15+0200\n" "Last-Translator: Guillaume Lelarge \n" "Language-Team: French \n" "Language: fr\n" @@ -17,7 +17,7 @@ msgstr "" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" -"X-Generator: Poedit 1.8.12\n" +"X-Generator: Poedit 2.0.2\n" #: plpy_cursorobject.c:100 #, c-format @@ -419,8 +419,8 @@ msgstr "le nombre de dimensions du tableau dépasse le maximum autorisé (%d)" #: plpy_typeio.c:1005 #, c-format -msgid "cannot determine sequence length for function return value" -msgstr "ne peut pas déterminer la longueur de la séquence pour la valeur de retour de la fonction" +msgid "could not determine sequence length for function return value" +msgstr "n'a pas pu déterminer la longueur de la séquence pour la valeur de retour de la fonction" #: plpy_typeio.c:1008 plpy_typeio.c:1012 #, c-format @@ -434,11 +434,13 @@ msgstr "la valeur de retour de la fonction de type tableau n'est pas une séquen #: plpy_typeio.c:1091 #, c-format -msgid "multidimensional arrays must have array expressions with matching dimensions. PL/Python function return value has sequence length %d while expected %d" -msgstr "" -"les tableaux multidimensionnels doivent avoir des expressions de tableaux\n" -"avec des dimensions correspondantes. La valeur de retour de la fonction\n" -"PL/Python a une longueur de séquence %d alors que %d est attendue" +msgid "wrong length of inner sequence: has length %d, but %d was expected" +msgstr "mauvaise longueur de la séquence interne : a une longueur %d, mais %d était attendu" + +#: plpy_typeio.c:1093 +#, c-format +msgid "To construct a multidimensional array, the inner sequences must all have the same length." +msgstr "Pour construire un tableau multidimensionnel, les séquences internes doivent toutes avoir la même longueur." #: plpy_typeio.c:1213 #, c-format @@ -481,89 +483,95 @@ msgstr "n'a pas pu convertir l'objet Unicode Python en octets" msgid "could not extract bytes from encoded string" msgstr "n'a pas pu extraire les octets de la chaîne encodée" -#~ msgid "could not create the base SPI exceptions" -#~ msgstr "n'a pas pu créer les exceptions SPI de base" +#~ msgid "plan.status takes no arguments" +#~ msgstr "plan.status ne prends pas d'arguments" -#~ msgid "Python major version mismatch in session" -#~ msgstr "Différence de version majeure de Python dans la session" +#~ msgid "cannot convert multidimensional array to Python list" +#~ msgstr "ne peut pas convertir un tableau multidimensionnel en liste Python" -#~ msgid "This session has previously used Python major version %d, and it is now attempting to use Python major version %d." -#~ msgstr "" -#~ "Cette session a auparavant utilisé la version majeure %d de Python et elle\n" -#~ "essaie maintenant d'utiliser la version majeure %d." +#~ msgid "PL/Python only supports one-dimensional arrays." +#~ msgstr "PL/Python supporte seulement les tableaux uni-dimensionnels." -#~ msgid "Start a new session to use a different Python major version." -#~ msgstr "" -#~ "Lancez une nouvelle session pour utiliser une version majeure différente de\n" -#~ "Python." +#~ msgid "could not create new Python list" +#~ msgstr "n'a pas pu créer la nouvelle liste Python" -#~ msgid "PL/Python function \"%s\" could not execute plan" -#~ msgstr "la fonction PL/python « %s » n'a pas pu exécuter un plan" +#~ msgid "the message is already specified" +#~ msgstr "le message est déjà spécifié" -#~ msgid "could not create string representation of Python object in PL/Python function \"%s\" while creating return value" -#~ msgstr "" -#~ "n'a pas pu créer la représentation en chaîne de caractère de l'objet\n" -#~ "Python dans la fonction PL/python « %s » lors de la création de la valeur\n" -#~ "de retour" +#~ msgid "plpy.prepare does not support composite types" +#~ msgstr "plpy.prepare ne supporte pas les types composites" -#~ msgid "could not compute string representation of Python object in PL/Python function \"%s\" while modifying trigger row" -#~ msgstr "" -#~ "n'a pas pu traiter la représentation de la chaîne d'un objet Python dans\n" -#~ "la fonction PL/Python « %s » lors de la modification de la ligne du trigger" +#~ msgid "PL/Python does not support conversion to arrays of row types." +#~ msgstr "PL/Python ne supporte pas les conversions vers des tableaux de types row." -#~ msgid "PL/Python function \"%s\" failed" -#~ msgstr "échec de la fonction PL/python « %s »" +#~ msgid "unrecognized error in PLy_spi_execute_fetch_result" +#~ msgstr "erreur inconnue dans PLy_spi_execute_fetch_result" -#~ msgid "out of memory" -#~ msgstr "mémoire épuisée" +#~ msgid "PyCObject_AsVoidPtr() failed" +#~ msgstr "échec de PyCObject_AsVoidPtr()" -#~ msgid "PL/Python: %s" -#~ msgstr "PL/python : %s" +#~ msgid "PyCObject_FromVoidPtr() failed" +#~ msgstr "échec de PyCObject_FromVoidPtr()" -#~ msgid "could not create procedure cache" -#~ msgstr "n'a pas pu créer le cache de procédure" +#~ msgid "transaction aborted" +#~ msgstr "transaction annulée" -#~ msgid "unrecognized error in PLy_spi_execute_query" -#~ msgstr "erreur inconnue dans PLy_spi_execute_query" +#~ msgid "invalid arguments for plpy.prepare" +#~ msgstr "arguments invalides pour plpy.prepare" + +#~ msgid "unrecognized error in PLy_spi_prepare" +#~ msgstr "erreur inconnue dans PLy_spi_prepare" #~ msgid "unrecognized error in PLy_spi_execute_plan" #~ msgstr "erreur inconnue dans PLy_spi_execute_plan" -#~ msgid "unrecognized error in PLy_spi_prepare" -#~ msgstr "erreur inconnue dans PLy_spi_prepare" +#~ msgid "unrecognized error in PLy_spi_execute_query" +#~ msgstr "erreur inconnue dans PLy_spi_execute_query" -#~ msgid "invalid arguments for plpy.prepare" -#~ msgstr "arguments invalides pour plpy.prepare" +#~ msgid "could not create procedure cache" +#~ msgstr "n'a pas pu créer le cache de procédure" -#~ msgid "transaction aborted" -#~ msgstr "transaction annulée" +#~ msgid "PL/Python: %s" +#~ msgstr "PL/python : %s" -#~ msgid "PyCObject_FromVoidPtr() failed" -#~ msgstr "échec de PyCObject_FromVoidPtr()" +#~ msgid "out of memory" +#~ msgstr "mémoire épuisée" -#~ msgid "PyCObject_AsVoidPtr() failed" -#~ msgstr "échec de PyCObject_AsVoidPtr()" +#~ msgid "PL/Python function \"%s\" failed" +#~ msgstr "échec de la fonction PL/python « %s »" -#~ msgid "unrecognized error in PLy_spi_execute_fetch_result" -#~ msgstr "erreur inconnue dans PLy_spi_execute_fetch_result" +#~ msgid "could not compute string representation of Python object in PL/Python function \"%s\" while modifying trigger row" +#~ msgstr "" +#~ "n'a pas pu traiter la représentation de la chaîne d'un objet Python dans\n" +#~ "la fonction PL/Python « %s » lors de la modification de la ligne du trigger" -#~ msgid "PL/Python does not support conversion to arrays of row types." -#~ msgstr "PL/Python ne supporte pas les conversions vers des tableaux de types row." +#~ msgid "could not create string representation of Python object in PL/Python function \"%s\" while creating return value" +#~ msgstr "" +#~ "n'a pas pu créer la représentation en chaîne de caractère de l'objet\n" +#~ "Python dans la fonction PL/python « %s » lors de la création de la valeur\n" +#~ "de retour" -#~ msgid "plpy.prepare does not support composite types" -#~ msgstr "plpy.prepare ne supporte pas les types composites" +#~ msgid "PL/Python function \"%s\" could not execute plan" +#~ msgstr "la fonction PL/python « %s » n'a pas pu exécuter un plan" -#~ msgid "the message is already specified" -#~ msgstr "le message est déjà spécifié" +#~ msgid "Start a new session to use a different Python major version." +#~ msgstr "" +#~ "Lancez une nouvelle session pour utiliser une version majeure différente de\n" +#~ "Python." -#~ msgid "could not create new Python list" -#~ msgstr "n'a pas pu créer la nouvelle liste Python" +#~ msgid "This session has previously used Python major version %d, and it is now attempting to use Python major version %d." +#~ msgstr "" +#~ "Cette session a auparavant utilisé la version majeure %d de Python et elle\n" +#~ "essaie maintenant d'utiliser la version majeure %d." -#~ msgid "PL/Python only supports one-dimensional arrays." -#~ msgstr "PL/Python supporte seulement les tableaux uni-dimensionnels." +#~ msgid "Python major version mismatch in session" +#~ msgstr "Différence de version majeure de Python dans la session" -#~ msgid "cannot convert multidimensional array to Python list" -#~ msgstr "ne peut pas convertir un tableau multidimensionnel en liste Python" +#~ msgid "could not create the base SPI exceptions" +#~ msgstr "n'a pas pu créer les exceptions SPI de base" -#~ msgid "plan.status takes no arguments" -#~ msgstr "plan.status ne prends pas d'arguments" +#~ msgid "multidimensional arrays must have array expressions with matching dimensions. PL/Python function return value has sequence length %d while expected %d" +#~ msgstr "" +#~ "les tableaux multidimensionnels doivent avoir des expressions de tableaux\n" +#~ "avec des dimensions correspondantes. La valeur de retour de la fonction\n" +#~ "PL/Python a une longueur de séquence %d alors que %d est attendue" diff --git a/src/pl/plpython/po/it.po b/src/pl/plpython/po/it.po index bf8ccb7ddc..5686ca9555 100644 --- a/src/pl/plpython/po/it.po +++ b/src/pl/plpython/po/it.po @@ -1,34 +1,32 @@ # -# Translation of plpython to Italian -# PostgreSQL Project +# plpython.po +# Italian message translation file for plpython # -# Associazione Culturale ITPUG - Italian PostgreSQL Users Group -# http://www.itpug.org/ - info@itpug.org +# For development and bug report please use: +# https://github.com/dvarrazzo/postgresql-it # -# Traduttori: -# * Flavio Spada -# * Daniele Varrazzo +# Daniele Varrazzo , 2012-2017. +# Flavio Spada # -# Copyright (c) 2010, Associazione Culturale ITPUG -# Distributed under the same license of the PostgreSQL project +# This file is distributed under the same license as the PostgreSQL package. # msgid "" msgstr "" "Project-Id-Version: plpython (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-04-22 22:37+0000\n" -"PO-Revision-Date: 2017-05-29 17:46+0100\n" +"POT-Creation-Date: 2017-08-30 21:37+0000\n" +"PO-Revision-Date: 2017-08-31 01:44+0100\n" "Last-Translator: Daniele Varrazzo \n" -"Language-Team: Gruppo traduzioni ITPUG \n" +"Language-Team: https://github.com/dvarrazzo/postgresql-it\n" "Language: it\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=n != 1;\n" -"X-Generator: Poedit 1.8.7.1\n" +"X-Generator: Poedit 1.5.4\n" #: plpy_cursorobject.c:100 #, c-format @@ -268,8 +266,8 @@ msgstr "non è stato possibile interpretare il messaggio di errore in plpy.elog" #: plpy_plpymodule.c:469 #, c-format -msgid "Argument 'message' given by name and position" -msgstr "Parametro 'message' dato con nome e posizione" +msgid "argument 'message' given by name and position" +msgstr "parametro 'message' dato con nome e posizione" #: plpy_plpymodule.c:496 #, c-format @@ -381,82 +379,87 @@ msgstr "attributo Decimal non trovato nel modulo" msgid "conversion from numeric to Decimal failed" msgstr "conversione da numeric a Decimal fallita" -#: plpy_typeio.c:772 +#: plpy_typeio.c:773 #, c-format msgid "could not create bytes representation of Python object" msgstr "creazione della rappresentazione in byte dell'oggetto Python fallita" -#: plpy_typeio.c:881 +#: plpy_typeio.c:882 #, c-format msgid "could not create string representation of Python object" msgstr "creazione della rappresentazione stringa dell'oggetto Python fallita" -#: plpy_typeio.c:892 +#: plpy_typeio.c:893 #, c-format msgid "could not convert Python object into cstring: Python string representation appears to contain null bytes" msgstr "conversione dell'oggetto Python in cstring fallita: la rappresentazione stringa Python sembra contenere byte null" -#: plpy_typeio.c:949 +#: plpy_typeio.c:950 #, c-format msgid "malformed record literal: \"%s\"" msgstr "letterale di record non corretto: \"%s\"" -#: plpy_typeio.c:950 +#: plpy_typeio.c:951 #, c-format msgid "Missing left parenthesis." msgstr "Parentesi aperta mancante." -#: plpy_typeio.c:951 plpy_typeio.c:1389 +#: plpy_typeio.c:952 plpy_typeio.c:1390 #, c-format -msgid "To return a composite type in an array, return the composite type as a Python tuple, e.g. \"[('foo')]\"" -msgstr "Per restutuire un tipo composito in un array, restituisci il tipo composito come tupla Python, per esempio \"[('foo')]\"" +msgid "To return a composite type in an array, return the composite type as a Python tuple, e.g., \"[('foo',)]\"." +msgstr "Per restutuire un tipo composito in un array, restituisci il tipo composito come tupla Python, per esempio \"[('foo',)]\" " -#: plpy_typeio.c:1000 +#: plpy_typeio.c:1001 #, c-format msgid "number of array dimensions exceeds the maximum allowed (%d)" msgstr "il numero di dimensioni dell'array supera il massimo consentito (%d)" -#: plpy_typeio.c:1004 +#: plpy_typeio.c:1005 #, c-format -msgid "cannot determine sequence length for function return value" +msgid "could not determine sequence length for function return value" msgstr "errore nel determinare la lunghezza della sequenza per il valore di ritorno della funzione" -#: plpy_typeio.c:1007 plpy_typeio.c:1011 +#: plpy_typeio.c:1008 plpy_typeio.c:1012 #, c-format msgid "array size exceeds the maximum allowed" msgstr "la dimensione dell'array supera il massimo consentito" -#: plpy_typeio.c:1037 +#: plpy_typeio.c:1038 #, c-format msgid "return value of function with array return type is not a Python sequence" msgstr "il valore restituito dalla funzione con tipo restituito array non è una sequenza Python" -#: plpy_typeio.c:1090 +#: plpy_typeio.c:1091 +#, c-format +msgid "wrong length of inner sequence: has length %d, but %d was expected" +msgstr "lunghezza errata della sequenza interna: la lunghezza è %d ma era atteso %d" + +#: plpy_typeio.c:1093 #, c-format -msgid "multidimensional arrays must have array expressions with matching dimensions. PL/Python function return value has sequence length %d while expected %d" -msgstr "gli array multidimensionali devono avere espressioni array di dimensioni corrispondenti. il valore di ritorno della funzione PL/Python ha una sequenza di lungezza %d, mentre era atteso %d" +msgid "To construct a multidimensional array, the inner sequences must all have the same length." +msgstr "Per costruire un array multidimensionale le sequenze interne devono avere tutte la stessa lunghezza." -#: plpy_typeio.c:1212 +#: plpy_typeio.c:1213 #, c-format msgid "key \"%s\" not found in mapping" msgstr "la chiave \"%s\" non è stata trovata nel dizionario" -#: plpy_typeio.c:1213 +#: plpy_typeio.c:1214 #, c-format msgid "To return null in a column, add the value None to the mapping with the key named after the column." msgstr "Per restituire null in una colonna, inserire nella mappa il valore None con una chiave chiamata come la colonna." -#: plpy_typeio.c:1264 +#: plpy_typeio.c:1265 #, c-format msgid "length of returned sequence did not match number of columns in row" msgstr "la lunghezza della sequenza ritornata non rispetta il numero di colonne presenti nella riga" -#: plpy_typeio.c:1387 +#: plpy_typeio.c:1388 #, c-format msgid "attribute \"%s\" does not exist in Python object" msgstr "l'attributo \"%s\" non esiste nell'oggetto Python" -#: plpy_typeio.c:1390 +#: plpy_typeio.c:1391 #, c-format msgid "To return null in a column, let the returned object have an attribute named after column with value None." msgstr "Per restituire null in una colonna, l'oggetto restituito deve avere un attributo chiamato come la colonna con valore None." diff --git a/src/pl/plpython/po/ja.po b/src/pl/plpython/po/ja.po index 5a5040ee0b..1c0055c20b 100644 --- a/src/pl/plpython/po/ja.po +++ b/src/pl/plpython/po/ja.po @@ -1,447 +1,504 @@ # LANGUAGE message translation file for plpython -# Copyright (C) 2009 PostgreSQL Global Development Group +# Copyright (C) 2018 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. +# Honda Shigehiro , 2012. # msgid "" msgstr "" -"Project-Id-Version: PostgreSQL 9.1 beta2\n" +"Project-Id-Version: plpython (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2012-08-11 16:28+0900\n" -"PO-Revision-Date: 2012-08-11 16:37+0900\n" -"Last-Translator: Honda Shigehiro \n" +"POT-Creation-Date: 2018-01-26 10:57+0900\n" +"PO-Revision-Date: 2018-02-13 10:25+0900\n" +"Last-Translator: Michihide Hotta \n" "Language-Team: Japan PostgreSQL Users Group \n" "Language: ja\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 2.0.6\n" -#: plpy_cursorobject.c:98 +#: plpy_cursorobject.c:100 #, c-format msgid "plpy.cursor expected a query or a plan" -msgstr "plpy.cursorはクエリーもしくは実行計画を期待していました" +msgstr "plpy.cursor は問い合わせもしくは実行計画を期待していました" -#: plpy_cursorobject.c:171 +#: plpy_cursorobject.c:176 #, c-format msgid "plpy.cursor takes a sequence as its second argument" -msgstr "plpy.cursorは第二引数としてシーケンスを取ります" +msgstr "plpy.cursor は第二引数としてシーケンスを取ります" -#: plpy_cursorobject.c:187 plpy_spi.c:222 +#: plpy_cursorobject.c:192 plpy_spi.c:226 #, c-format msgid "could not execute plan" -msgstr "プランを実行できませんでした" +msgstr "実行計画を実行できませんでした" -#: plpy_cursorobject.c:190 plpy_spi.c:225 +#: plpy_cursorobject.c:195 plpy_spi.c:229 #, c-format msgid "Expected sequence of %d argument, got %d: %s" msgid_plural "Expected sequence of %d arguments, got %d: %s" -msgstr[0] "%d 番目の引数はシーケンスを期待していましたが、%d が現れました:%s" +msgstr[0] "%d 個の引数のシーケンスを期待していましたが、個数は %d でした:%s" -#: plpy_cursorobject.c:340 +#: plpy_cursorobject.c:350 #, c-format msgid "iterating a closed cursor" -msgstr "クローズされたカーsるの反復" +msgstr "反復利用しようとしているカーソルは、すでにクローズされています" -#: plpy_cursorobject.c:348 plpy_cursorobject.c:415 +#: plpy_cursorobject.c:358 plpy_cursorobject.c:423 #, c-format msgid "iterating a cursor in an aborted subtransaction" -msgstr "アボートされたサブトランザクション内のカーソルの反復" +msgstr "" +"中断されたサブトランザクションの中でカーソルを反復利用しようとしています" -#: plpy_cursorobject.c:407 +#: plpy_cursorobject.c:415 #, c-format msgid "fetch from a closed cursor" msgstr "クローズされたカーソルからのフェッチ" -#: plpy_cursorobject.c:486 +#: plpy_cursorobject.c:463 plpy_spi.c:434 +#, c-format +msgid "query result has too many rows to fit in a Python list" +msgstr "問い合わせの結果に含まれる行数が、Python のリストに対して多すぎます" + +#: plpy_cursorobject.c:504 #, c-format msgid "closing a cursor in an aborted subtransaction" -msgstr "アボートされたサブトランザクションにおけるカーソルクローズ" +msgstr "" +"中断されたサブトランザクションの中でカーソルをクローズしようとしています" -#: plpy_elog.c:103 plpy_elog.c:104 plpy_plpymodule.c:420 +#: plpy_elog.c:127 plpy_elog.c:128 plpy_plpymodule.c:548 #, c-format msgid "%s" msgstr "%s" -#: plpy_exec.c:90 +#: plpy_exec.c:140 #, c-format msgid "unsupported set function return mode" -msgstr "未サポートの集合関数リターンモードです" +msgstr "未サポートの集合関数リターンモードです。" -#: plpy_exec.c:91 +#: plpy_exec.c:141 #, c-format -msgid "PL/Python set-returning functions only support returning only value per call." -msgstr "PL/Python の集合を返す関数では、1回のコールのたびに値だけを返すことがサポートされています" +msgid "" +"PL/Python set-returning functions only support returning one value per call." +msgstr "" +"PL/Python の集合を返す関数では、1回の呼び出しに対して1つの値を返すことのみ" +"がサポートされています。" -#: plpy_exec.c:103 +#: plpy_exec.c:154 #, c-format msgid "returned object cannot be iterated" -msgstr "返されたオブジェクトは反復適用できません" +msgstr "返されたオブジェクトは反復利用できません" -#: plpy_exec.c:104 +#: plpy_exec.c:155 #, c-format msgid "PL/Python set-returning functions must return an iterable object." -msgstr "PL/Python の集合を返す関数では、イテレータ(反復子)オブジェクトを返さなければなりません" +msgstr "" +"PL/Python の集合を返す関数は、イテレータ(反復利用可能)オブジェクトを返さな" +"ければなりません。" -#: plpy_exec.c:129 +#: plpy_exec.c:169 #, c-format msgid "error fetching next item from iterator" msgstr "イテレータ(反復子)から次の項目をフェッチ(取り出し)できません" -#: plpy_exec.c:164 +#: plpy_exec.c:210 #, c-format msgid "PL/Python function with return type \"void\" did not return None" -msgstr "\"void\" 型を返す PL/Python 関数は None 型を返しません" +msgstr "戻り値が \"void\" 型である PL/Python 関数が None 型を返しませんでした" -#: plpy_exec.c:288 plpy_exec.c:314 +#: plpy_exec.c:379 plpy_exec.c:405 #, c-format msgid "unexpected return value from trigger procedure" -msgstr "トリガー手続きから期待しない戻り値が返されました" +msgstr "トリガプロシージャから期待しない戻り値が返されました" -#: plpy_exec.c:289 +#: plpy_exec.c:380 #, c-format msgid "Expected None or a string." msgstr "None もしくは文字列を期待していました。" -#: plpy_exec.c:304 +#: plpy_exec.c:395 #, c-format -msgid "PL/Python trigger function returned \"MODIFY\" in a DELETE trigger -- ignored" -msgstr "PL/Python トリガー関数が、DELETE トリガーで \"MODIFY\" を返しました-- 無視しました" +msgid "" +"PL/Python trigger function returned \"MODIFY\" in a DELETE trigger -- ignored" +msgstr "" +"PL/Python トリガ関数が、DELETE トリガで \"MODIFY\" を返しました-- 無視さ" +"れました" -#: plpy_exec.c:315 +#: plpy_exec.c:406 #, c-format msgid "Expected None, \"OK\", \"SKIP\", or \"MODIFY\"." msgstr "None, \"OK\", \"SKIP\", \"MODIFY\" のいずれかを期待していました。" -#: plpy_exec.c:396 +#: plpy_exec.c:487 #, c-format msgid "PyList_SetItem() failed, while setting up arguments" msgstr "引数を設定する際に、PyList_SetItem() に失敗しました" -#: plpy_exec.c:400 +#: plpy_exec.c:491 #, c-format msgid "PyDict_SetItemString() failed, while setting up arguments" msgstr "引数を設定する際に、PyDict_SetItemString() に失敗しました" -#: plpy_exec.c:412 +#: plpy_exec.c:503 #, c-format -msgid "function returning record called in context that cannot accept type record" -msgstr "レコード型を受け付けられないコンテキストでレコードを返す関数が呼び出されました" +msgid "" +"function returning record called in context that cannot accept type record" +msgstr "" +"レコード型を受け付けられないコンテキストでレコードを返す関数が呼び出されまし" +"た" -#: plpy_exec.c:450 +#: plpy_exec.c:719 #, c-format msgid "while creating return value" msgstr "戻り値を生成する際に" -#: plpy_exec.c:474 +#: plpy_exec.c:743 #, c-format msgid "could not create new dictionary while building trigger arguments" -msgstr "トリガーの引数を構成中に、新しい辞書を生成できませんでした" +msgstr "トリガの引数を構成中、新しい辞書を生成できませんでした" -#: plpy_exec.c:664 +#: plpy_exec.c:931 #, c-format msgid "TD[\"new\"] deleted, cannot modify row" -msgstr "TD[\"new\"] は削除されました。もはや変更できません" +msgstr "TD[\"new\"] は削除されました。行を変更できません。" -#: plpy_exec.c:667 +#: plpy_exec.c:936 #, c-format msgid "TD[\"new\"] is not a dictionary" msgstr "TD[\"new\"] は辞書ではありません" -#: plpy_exec.c:691 +#: plpy_exec.c:963 #, c-format msgid "TD[\"new\"] dictionary key at ordinal position %d is not a string" msgstr "TD[\"new\"] 辞書の%d番目のキーが文字列ではありません" -#: plpy_exec.c:697 +#: plpy_exec.c:970 +#, c-format +msgid "" +"key \"%s\" found in TD[\"new\"] does not exist as a column in the triggering " +"row" +msgstr "" +"TD[\"new\"] で見つかったキー \"%s\" は、行レベルトリガにおけるカラムとして" +"は存在しません" + +#: plpy_exec.c:975 #, c-format -msgid "key \"%s\" found in TD[\"new\"] does not exist as a column in the triggering row" -msgstr "TD[\"new\"] で見つかったキー \"%s\" は、行レベルトリガーにおけるカラムとしては存在しません" +msgid "cannot set system attribute \"%s\"" +msgstr "システム属性 \"%s\" を設定できません" -#: plpy_exec.c:778 +#: plpy_exec.c:1046 #, c-format msgid "while modifying trigger row" -msgstr "トリガー行を変更する際に" +msgstr "トリガ行を変更する際に" -#: plpy_exec.c:839 +#: plpy_exec.c:1107 #, c-format msgid "forcibly aborting a subtransaction that has not been exited" msgstr "終了していないサブトランザクションを強制的にアボートしています" -#: plpy_main.c:100 -#, c-format -msgid "Python major version mismatch in session" -msgstr "セッションにおいて Python のメジャーバージョンが合致しません" - -#: plpy_main.c:101 +#: plpy_main.c:125 #, c-format -msgid "This session has previously used Python major version %d, and it is now attempting to use Python major version %d." -msgstr "このセッションではすでに Python のメジャーバージョン %d が使われていましたが、ここで Python のメジャーバージョン %d を使おうとしています。" +msgid "multiple Python libraries are present in session" +msgstr "セッションに複数の Python ライブラリが存在します" -#: plpy_main.c:103 +#: plpy_main.c:126 #, c-format -msgid "Start a new session to use a different Python major version." -msgstr "Python の異なったメジャーバージョンを使う場合は、新しいセッションを開始してください" +msgid "Only one Python major version can be used in one session." +msgstr "" +"1個のセッション中で使える Python のメジャーバージョンは1種類だけです。" -#: plpy_main.c:118 +#: plpy_main.c:142 #, c-format msgid "untrapped error in initialization" -msgstr "初期化中に捕獲できないエラーがありました" +msgstr "初期化中に捕捉できないエラーがありました" -#: plpy_main.c:141 +#: plpy_main.c:165 #, c-format msgid "could not import \"__main__\" module" msgstr "\"__main__\" モジュールをインポートできませんでした" -#: plpy_main.c:146 +#: plpy_main.c:170 #, c-format msgid "could not create globals" -msgstr "globalsを作成できませんでした" +msgstr "グローバル変数(globals)を作成できませんでした" -#: plpy_main.c:150 +#: plpy_main.c:174 #, c-format msgid "could not initialize globals" msgstr "グローバル変数(globals)を初期化できませんでした" -#: plpy_main.c:347 +#: plpy_main.c:387 #, c-format msgid "PL/Python function \"%s\"" msgstr "PL/Python 関数 \"%s\"" -#: plpy_main.c:354 +#: plpy_main.c:394 #, c-format msgid "PL/Python anonymous code block" msgstr "PL/Python の無名コードブロック" -#: plpy_planobject.c:126 +#: plpy_plpymodule.c:181 plpy_plpymodule.c:184 #, c-format -msgid "plan.status takes no arguments" -msgstr "plan.status は引数を取りません" +msgid "could not import \"plpy\" module" +msgstr "\"plpy\" モジュールをインポートできませんでした" -#: plpy_plpymodule.c:178 plpy_plpymodule.c:181 +#: plpy_plpymodule.c:199 #, c-format -msgid "could not import \"plpy\" module" -msgstr "\"plpy\"モジュールをインポートできませんでした" +msgid "could not create the spiexceptions module" +msgstr "spiexceptions モジュールを生成できませんでした" -#: plpy_plpymodule.c:196 +#: plpy_plpymodule.c:207 #, c-format msgid "could not add the spiexceptions module" -msgstr "spiexceptionsモジュールを追加できませんでした" +msgstr "spiexceptions モジュールを追加できませんでした" -#: plpy_plpymodule.c:217 +#: plpy_plpymodule.c:236 #, c-format -msgid "could not create the base SPI exceptions" -msgstr "基本SPI例外を作成できませんでした" +msgid "could not create exception \"%s\"" +msgstr "例外 \"%s \"を作成できませんでした" -#: plpy_plpymodule.c:253 plpy_plpymodule.c:257 +#: plpy_plpymodule.c:271 plpy_plpymodule.c:275 #, c-format msgid "could not generate SPI exceptions" -msgstr "SPI例外を生成できませんでした" +msgstr "SPI 例外を生成できませんでした" -#: plpy_plpymodule.c:388 +#: plpy_plpymodule.c:443 #, c-format msgid "could not unpack arguments in plpy.elog" -msgstr "plpy.elogで引数を展開することができませんでした" +msgstr "plpy.elog で引数を展開できませんでした" -#: plpy_plpymodule.c:396 +#: plpy_plpymodule.c:452 msgid "could not parse error message in plpy.elog" msgstr "plpy.elog でエラーメッセージをパースできませんでした" -#: plpy_procedure.c:194 +#: plpy_plpymodule.c:469 +#, c-format +msgid "argument 'message' given by name and position" +msgstr "名前と位置で 'message' 引数が渡されました" + +#: plpy_plpymodule.c:496 +#, c-format +msgid "'%s' is an invalid keyword argument for this function" +msgstr "この関数に対して '%s' は無効なキーワード引数です" + +#: plpy_plpymodule.c:507 plpy_plpymodule.c:513 +#, c-format +msgid "invalid SQLSTATE code" +msgstr "無効な SQLSTATE コードです" + +#: plpy_procedure.c:230 #, c-format msgid "trigger functions can only be called as triggers" -msgstr "トリガー関数はトリガーとしてのみコールできます" +msgstr "トリガ関数はトリガとしてのみコールできます" -#: plpy_procedure.c:199 plpy_typeio.c:406 +#: plpy_procedure.c:235 #, c-format msgid "PL/Python functions cannot return type %s" msgstr "PL/Python 関数は %s 型を返せません" -#: plpy_procedure.c:281 +#: plpy_procedure.c:316 #, c-format msgid "PL/Python functions cannot accept type %s" -msgstr "PL/Python 関数は %s 型を受け付けません" +msgstr "PL/Python 関数は %s 型を受け付けられません" -#: plpy_procedure.c:377 +#: plpy_procedure.c:412 #, c-format msgid "could not compile PL/Python function \"%s\"" -msgstr "PL/Python 関数 \"%s\" をコンパイルできません" +msgstr "PL/Python 関数 \"%s\" をコンパイルできませんでした" -#: plpy_procedure.c:380 +#: plpy_procedure.c:415 #, c-format msgid "could not compile anonymous PL/Python code block" -msgstr "PL/Python匿名コードブロックをコンパイルできません" +msgstr "匿名の PL/Python コードブロックをコンパイルできませんでした" #: plpy_resultobject.c:145 plpy_resultobject.c:165 plpy_resultobject.c:185 #, c-format msgid "command did not produce a result set" msgstr "コマンドは結果セットを生成しませんでした" -#: plpy_spi.c:56 +#: plpy_spi.c:59 #, c-format msgid "second argument of plpy.prepare must be a sequence" msgstr "plpy.prepare の第二引数はシーケンスでなければなりません" -#: plpy_spi.c:105 +#: plpy_spi.c:115 #, c-format msgid "plpy.prepare: type name at ordinal position %d is not a string" msgstr "plpy.prepare: %d 番目の型名が文字列ではありません" -#: plpy_spi.c:137 -#, c-format -msgid "plpy.prepare does not support composite types" -msgstr "plpy.prepare は複合型をサポートしていません" - -#: plpy_spi.c:187 +#: plpy_spi.c:191 #, c-format msgid "plpy.execute expected a query or a plan" -msgstr "plpy.execute はクエリーもしくは実行計画を期待していました" +msgstr "plpy.execute は問い合わせもしくは実行計画を期待していました" -#: plpy_spi.c:206 +#: plpy_spi.c:210 #, c-format msgid "plpy.execute takes a sequence as its second argument" msgstr "plpy.execute は第二引数としてシーケンスを取ります" -#: plpy_spi.c:330 +#: plpy_spi.c:335 #, c-format msgid "SPI_execute_plan failed: %s" msgstr "SPI_execute_plan が失敗しました:%s" -#: plpy_spi.c:372 +#: plpy_spi.c:377 #, c-format msgid "SPI_execute failed: %s" msgstr "SPI_execute が失敗しました:%s" -#: plpy_spi.c:439 -#, c-format -msgid "unrecognized error in PLy_spi_execute_fetch_result" -msgstr "PLy_spi_execute_fetch_result で認識できないエラーを検出しました" - #: plpy_subxactobject.c:122 #, c-format msgid "this subtransaction has already been entered" -msgstr "このサブトランザクションは到達済みです" +msgstr "すでにこのサブトランザクションの中に入っています" -#: plpy_subxactobject.c:128 plpy_subxactobject.c:180 +#: plpy_subxactobject.c:128 plpy_subxactobject.c:186 #, c-format msgid "this subtransaction has already been exited" -msgstr "このサブトランザクションは終了済みです" +msgstr "このサブトランザクションからすでに抜けています" -#: plpy_subxactobject.c:174 +#: plpy_subxactobject.c:180 #, c-format msgid "this subtransaction has not been entered" -msgstr "このサブトランザクションには到達しません" +msgstr "このサブトランザクションには入っていません" -#: plpy_subxactobject.c:186 +#: plpy_subxactobject.c:192 #, c-format msgid "there is no subtransaction to exit from" -msgstr "終了するためのサブトランザクションがありません" +msgstr "抜けるべきサブトランザクションがありません" -#: plpy_typeio.c:291 +#: plpy_typeio.c:292 #, c-format msgid "could not create new dictionary" -msgstr "新しいディレクトリを作れません" +msgstr "新しい辞書を作れませんでした" -#: plpy_typeio.c:408 +#: plpy_typeio.c:560 #, c-format -msgid "PL/Python does not support conversion to arrays of row types." -msgstr "PL/Python は行タイプ配列への変換をサポートしていません" +msgid "could not import a module for Decimal constructor" +msgstr "Decimal コンストラクタのためのモジュールをインポートできませんでした" -#: plpy_typeio.c:584 +#: plpy_typeio.c:564 #, c-format -msgid "cannot convert multidimensional array to Python list" -msgstr "多次元配列を Python の list に変換できません" +msgid "no Decimal attribute in module" +msgstr "モジュールの中に Decimal 属性が含まれていません" -#: plpy_typeio.c:585 +#: plpy_typeio.c:570 #, c-format -msgid "PL/Python only supports one-dimensional arrays." -msgstr "PL/Python でサポートしているのは一次元配列のみです。" +msgid "conversion from numeric to Decimal failed" +msgstr "numeric から Decimal への変換に失敗しました" -#: plpy_typeio.c:591 -#, c-format -msgid "could not create new Python list" -msgstr "新しいPythonリストを作成できませんでした" - -#: plpy_typeio.c:650 +#: plpy_typeio.c:773 #, c-format msgid "could not create bytes representation of Python object" msgstr "バイト表現の Python オブジェクトを生成できませんでした" -#: plpy_typeio.c:742 +#: plpy_typeio.c:882 #, c-format msgid "could not create string representation of Python object" msgstr "文字列表現の Python オブジェクトを生成できませんでした" -#: plpy_typeio.c:753 +#: plpy_typeio.c:893 #, c-format -msgid "could not convert Python object into cstring: Python string representation appears to contain null bytes" -msgstr "Python オブジェクトを cstring に変換できませんでした:Python の文字列表現が null バイトを持つことになってしまいます" +msgid "" +"could not convert Python object into cstring: Python string representation " +"appears to contain null bytes" +msgstr "" +"Python オブジェクトを cstring に変換できませんでした:Python の文字列表現に " +"null バイトが含まれているようです" -#: plpy_typeio.c:787 +#: plpy_typeio.c:950 #, c-format -msgid "return value of function with array return type is not a Python sequence" -msgstr "戻り値が配列である関数の戻り値が Python シーケンスではありません" +msgid "malformed record literal: \"%s\"" +msgstr "不正な形式のレコードリテラルです: \"%s\"" -#: plpy_typeio.c:886 +#: plpy_typeio.c:951 #, c-format -msgid "key \"%s\" not found in mapping" -msgstr "マッピング上にキー \"%s\" が見つかりません" +msgid "Missing left parenthesis." +msgstr "左括弧がありません。" -#: plpy_typeio.c:887 +#: plpy_typeio.c:952 plpy_typeio.c:1390 #, c-format -msgid "To return null in a column, add the value None to the mapping with the key named after the column." -msgstr "カラムに null を入れて返すには、カラムの後につけた名前をキーとして、マッピングに None 値を追加してください" +msgid "" +"To return a composite type in an array, return the composite type as a " +"Python tuple, e.g., \"[('foo',)]\"." +msgstr "" +"複合型を配列に入れて返したい場合、 \"[('foo',)]\" のように複合型を Pythonのタ" +"プルとして返すようにしてください。" -#: plpy_typeio.c:935 +#: plpy_typeio.c:1001 #, c-format -msgid "length of returned sequence did not match number of columns in row" -msgstr "返されたシーケンスの長さが、その行のカラム数と異なります" +msgid "number of array dimensions exceeds the maximum allowed (%d)" +msgstr "配列の次元数が制限値(%d)を超えています" -#: plpy_typeio.c:1043 +#: plpy_typeio.c:1005 #, c-format -msgid "attribute \"%s\" does not exist in Python object" -msgstr "属性 \"%s\" が Python オブジェクト中に存在しません" +msgid "could not determine sequence length for function return value" +msgstr "関数の戻り値について、シーケンスの長さを決定できませんでした" -#: plpy_typeio.c:1044 +#: plpy_typeio.c:1008 plpy_typeio.c:1012 #, c-format -msgid "To return null in a column, let the returned object have an attribute named after column with value None." -msgstr "カラムに null を入れて返す場合は、そのカラムの後につけた名前で表される属性が値として None を持ち、返されるオブジェクトがその属性を含むようにしてください" +msgid "array size exceeds the maximum allowed" +msgstr "配列のサイズが制限値を超えています" -#: plpy_util.c:70 +#: plpy_typeio.c:1038 #, c-format -msgid "could not convert Python Unicode object to bytes" -msgstr "PythonのUnicodeオブジェクトをバイトに変換できませんでした" +msgid "" +"return value of function with array return type is not a Python sequence" +msgstr "配列型を返す関数の戻り値が Python のシーケンスではありません" -#: plpy_util.c:75 +#: plpy_typeio.c:1091 #, c-format -msgid "could not extract bytes from encoded string" -msgstr "符号化された文字列からバイトを抽出できませんでした" - -#~ msgid "PyCObject_AsVoidPtr() failed" -#~ msgstr "PyCObject_AsVoidPtr() に失敗しました" - -#~ msgid "invalid arguments for plpy.prepare" -#~ msgstr "plpy.prepare の引数が不正です" +msgid "wrong length of inner sequence: has length %d, but %d was expected" +msgstr "" +"内部シーケンスで長さが異常です:長さは %d ですが、期待する値は %d でした" -#~ msgid "PyCObject_FromVoidPtr() failed" -#~ msgstr "PyCObject_FromVoidPtr() に失敗しました" +#: plpy_typeio.c:1093 +#, c-format +msgid "" +"To construct a multidimensional array, the inner sequences must all have the " +"same length." +msgstr "" +"多次元配列を生成する場合、内部シーケンスはすべて同じ長さでなければなりませ" +"ん。" -#~ msgid "unrecognized error in PLy_spi_prepare" -#~ msgstr "PLy_spi_prepare で認識できないエラーを検出しました" +#: plpy_typeio.c:1213 +#, c-format +msgid "key \"%s\" not found in mapping" +msgstr "マッピング上にキー \"%s\" が見つかりません" -#~ msgid "transaction aborted" -#~ msgstr "トランザクションがアボートしました" +#: plpy_typeio.c:1214 +#, c-format +msgid "" +"To return null in a column, add the value None to the mapping with the key " +"named after the column." +msgstr "" +"カラムに null を入れて返す場合、カラム名をキーとして値が None のエントリを" +"マッピングに追加してください" -#~ msgid "could not create procedure cache" -#~ msgstr "手続き用キャッシュ(procedure cache)を生成できませんでした" +#: plpy_typeio.c:1265 +#, c-format +msgid "length of returned sequence did not match number of columns in row" +msgstr "返されたシーケンスの長さが行のカラム数とマッチしませんでした" -#~ msgid "PL/Python: %s" -#~ msgstr "PL/Python: %s" +#: plpy_typeio.c:1388 +#, c-format +msgid "attribute \"%s\" does not exist in Python object" +msgstr "属性 \"%s\" が Python オブジェクト中に存在しません" -#~ msgid "out of memory" -#~ msgstr "メモリ不足です" +#: plpy_typeio.c:1391 +#, c-format +msgid "" +"To return null in a column, let the returned object have an attribute named " +"after column with value None." +msgstr "" +"カラムに null を入れて返す場合、カラム名をキーとして値が None である属性を持" +"つオブジェクトを返すようにしてください。" -#~ msgid "unrecognized error in PLy_spi_execute_plan" -#~ msgstr "PLy_spi_execute_plan で認識できないエラーを検出しました" +#: plpy_util.c:36 +#, c-format +msgid "could not convert Python Unicode object to bytes" +msgstr "Python の Unicode オブジェクトをバイト列に変換できませんでした" -#~ msgid "unrecognized error in PLy_spi_execute_query" -#~ msgstr "PLy_spi_execute_query で認識できないエラーを検出しました" +#: plpy_util.c:42 +#, c-format +msgid "could not extract bytes from encoded string" +msgstr "エンコードされた文字列からバイト列を抽出できませんでした" diff --git a/src/pl/plpython/po/ko.po b/src/pl/plpython/po/ko.po index 8608d1ba94..d39d81d0df 100644 --- a/src/pl/plpython/po/ko.po +++ b/src/pl/plpython/po/ko.po @@ -1,14 +1,14 @@ # LANGUAGE message translation file for plpython # Copyright (C) 2015 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. -# FIRST AUTHOR , 2015. +# Ioseph Kim , 2015. # msgid "" msgstr "" -"Project-Id-Version: plpython (PostgreSQL) 9.6\n" +"Project-Id-Version: plpython (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2016-09-26 14:02+0900\n" -"PO-Revision-Date: 2016-09-26 19:31+0900\n" +"POT-Creation-Date: 2017-08-16 10:59+0900\n" +"PO-Revision-Date: 2017-08-17 13:22+0900\n" "Last-Translator: Ioseph Kim \n" "Language-Team: Korean \n" "Language: ko\n" @@ -17,24 +17,24 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=1; plural=0;\n" -#: plpy_cursorobject.c:101 +#: plpy_cursorobject.c:100 #, c-format msgid "plpy.cursor expected a query or a plan" msgstr "plpy.cursor 객체는 쿼리나 plpy.prepare 객체를 인자로 사용합니다" -#: plpy_cursorobject.c:177 +#: plpy_cursorobject.c:176 #, c-format msgid "plpy.cursor takes a sequence as its second argument" msgstr "" "plpy.cursor 객체의 인자로 plpy.prepare 객체를 사용한 경우 두번째 인자는 " "prepare 객체의 매개변수가 있어야 합니다." -#: plpy_cursorobject.c:193 plpy_spi.c:227 +#: plpy_cursorobject.c:192 plpy_spi.c:226 #, c-format msgid "could not execute plan" msgstr "plpy.prepare 객체를 실행할 수 없음" -#: plpy_cursorobject.c:196 plpy_spi.c:230 +#: plpy_cursorobject.c:195 plpy_spi.c:229 #, c-format msgid "Expected sequence of %d argument, got %d: %s" msgid_plural "Expected sequence of %d arguments, got %d: %s" @@ -65,7 +65,7 @@ msgstr "쿼리 결과가 Python 리스트로 담기에는 너무 많습니다" msgid "closing a cursor in an aborted subtransaction" msgstr "중지된 서브트랜잭션에서 커서를 닫고 있음" -#: plpy_elog.c:127 plpy_elog.c:128 plpy_plpymodule.c:527 +#: plpy_elog.c:127 plpy_elog.c:128 plpy_plpymodule.c:548 #, c-format msgid "%s" msgstr "%s" @@ -104,70 +104,70 @@ msgid "PL/Python function with return type \"void\" did not return None" msgstr "" "반환 자료형이 \"void\"인 PL/Python 함수가 return None으로 끝나지 않았음" -#: plpy_exec.c:374 plpy_exec.c:400 +#: plpy_exec.c:379 plpy_exec.c:405 #, c-format msgid "unexpected return value from trigger procedure" msgstr "트리거 프로시져가 예상치 못한 값을 반환했습니다" -#: plpy_exec.c:375 +#: plpy_exec.c:380 #, c-format msgid "Expected None or a string." msgstr "None 이나 문자열이 있어야합니다." -#: plpy_exec.c:390 +#: plpy_exec.c:395 #, c-format msgid "" "PL/Python trigger function returned \"MODIFY\" in a DELETE trigger -- ignored" msgstr "" "PL/Python 트리거 함수가 DELETE 트리거에서 \"MODIFY\"를 반환했음 -- 무시함" -#: plpy_exec.c:401 +#: plpy_exec.c:406 #, c-format msgid "Expected None, \"OK\", \"SKIP\", or \"MODIFY\"." msgstr "None, \"OK\", \"SKIP\", 또는 \"MODIFY\"를 사용해야 함." -#: plpy_exec.c:482 +#: plpy_exec.c:487 #, c-format msgid "PyList_SetItem() failed, while setting up arguments" msgstr "PyList_SetItem() 함수가 인자 설정하는 중 실패" -#: plpy_exec.c:486 +#: plpy_exec.c:491 #, c-format msgid "PyDict_SetItemString() failed, while setting up arguments" msgstr "PyDict_SetItemString() 함수가 인자 설정하는 중 실패" -#: plpy_exec.c:498 +#: plpy_exec.c:503 #, c-format msgid "" "function returning record called in context that cannot accept type record" msgstr "반환 자료형이 record인데 함수가 그 자료형으로 반환하지 않음" -#: plpy_exec.c:714 +#: plpy_exec.c:719 #, c-format msgid "while creating return value" msgstr "반환값을 만들고 있은 중" -#: plpy_exec.c:738 +#: plpy_exec.c:743 #, c-format msgid "could not create new dictionary while building trigger arguments" msgstr "트리거 인자를 구성하는 중 새 딕션너리를 만들 수 없음" -#: plpy_exec.c:927 +#: plpy_exec.c:931 #, c-format msgid "TD[\"new\"] deleted, cannot modify row" msgstr "TD[\"new\"] 변수가 삭제되었음, 로우를 수정할 수 없음" -#: plpy_exec.c:932 +#: plpy_exec.c:936 #, c-format msgid "TD[\"new\"] is not a dictionary" msgstr "TD[\"new\"] 변수가 딕션너리 형태가 아님" -#: plpy_exec.c:957 +#: plpy_exec.c:963 #, c-format msgid "TD[\"new\"] dictionary key at ordinal position %d is not a string" msgstr "%d 번째 TD[\"new\"] 딕션너리 키가 문자열이 아님" -#: plpy_exec.c:964 +#: plpy_exec.c:970 #, c-format msgid "" "key \"%s\" found in TD[\"new\"] does not exist as a column in the triggering " @@ -175,12 +175,17 @@ msgid "" msgstr "" "로우 트리거 작업에서 칼럼으로 사용되는 \"%s\" 키가 TD[\"new\"] 변수에 없음." -#: plpy_exec.c:1044 +#: plpy_exec.c:975 +#, c-format +msgid "cannot set system attribute \"%s\"" +msgstr "\"%s\" 시스템 속성을 지정할 수 없음" + +#: plpy_exec.c:1046 #, c-format msgid "while modifying trigger row" msgstr "로우 변경 트리거 작업 도중" -#: plpy_exec.c:1105 +#: plpy_exec.c:1107 #, c-format msgid "forcibly aborting a subtransaction that has not been exited" msgstr "서브트랜잭션이 중지됨으로 강제로 중지됨" @@ -225,51 +230,51 @@ msgstr "\"%s\" PL/Python 함수" msgid "PL/Python anonymous code block" msgstr "PL/Python 익명 코드 블럭" -#: plpy_planobject.c:123 -#, c-format -msgid "plan.status takes no arguments" -msgstr "plan.status의 인자가 없습니다." - -#: plpy_plpymodule.c:178 plpy_plpymodule.c:181 +#: plpy_plpymodule.c:181 plpy_plpymodule.c:184 #, c-format msgid "could not import \"plpy\" module" msgstr "\"plpy\" 모듈을 임포트 할 수 없음" -#: plpy_plpymodule.c:196 +#: plpy_plpymodule.c:199 +#, c-format +msgid "could not create the spiexceptions module" +msgstr "spiexceptions 모듈을 만들 수 없음" + +#: plpy_plpymodule.c:207 #, c-format msgid "could not add the spiexceptions module" msgstr "spiexceptions 모듈을 추가할 수 없음" -#: plpy_plpymodule.c:217 +#: plpy_plpymodule.c:236 #, c-format -msgid "could not create the base SPI exceptions" -msgstr "기본 SPI 예외처리를 만들 수 없음" +msgid "could not create exception \"%s\"" +msgstr "\"%s\" 예외처리를 생성할 수 없음" -#: plpy_plpymodule.c:252 plpy_plpymodule.c:256 +#: plpy_plpymodule.c:271 plpy_plpymodule.c:275 #, c-format msgid "could not generate SPI exceptions" msgstr "SPI 예외처리를 생성할 수 없음" -#: plpy_plpymodule.c:422 +#: plpy_plpymodule.c:443 #, c-format msgid "could not unpack arguments in plpy.elog" msgstr "잘못된 인자로 구성된 plpy.elog" -#: plpy_plpymodule.c:431 +#: plpy_plpymodule.c:452 msgid "could not parse error message in plpy.elog" msgstr "plpy.elog 에서 오류 메시지를 분석할 수 없음" -#: plpy_plpymodule.c:448 +#: plpy_plpymodule.c:469 #, c-format -msgid "Argument 'message' given by name and position" +msgid "argument 'message' given by name and position" msgstr "'message' 인자는 이름과 위치가 있어야 함" -#: plpy_plpymodule.c:475 +#: plpy_plpymodule.c:496 #, c-format msgid "'%s' is an invalid keyword argument for this function" msgstr "'%s' 값은 이 함수에서 잘못된 예약어 인자입니다" -#: plpy_plpymodule.c:486 plpy_plpymodule.c:492 +#: plpy_plpymodule.c:507 plpy_plpymodule.c:513 #, c-format msgid "invalid SQLSTATE code" msgstr "잘못된 SQLSTATE 코드" @@ -304,22 +309,22 @@ msgstr "anonymous PL/Python 코드 블록을 컴파일 할 수 없음" msgid "command did not produce a result set" msgstr "명령의 결과값이 없음" -#: plpy_spi.c:60 +#: plpy_spi.c:59 #, c-format msgid "second argument of plpy.prepare must be a sequence" msgstr "plpy.prepare 함수의 두번째 인자는 Python 시퀀스형이어야 함" -#: plpy_spi.c:116 +#: plpy_spi.c:115 #, c-format msgid "plpy.prepare: type name at ordinal position %d is not a string" msgstr "plpy.prepare: %d 번째 인자의 자료형이 문자열이 아님" -#: plpy_spi.c:192 +#: plpy_spi.c:191 #, c-format msgid "plpy.execute expected a query or a plan" msgstr "plpy.execute 함수의 인자는 쿼리문이나 plpy.prepare 객체여야 함" -#: plpy_spi.c:211 +#: plpy_spi.c:210 #, c-format msgid "plpy.execute takes a sequence as its second argument" msgstr "plpy.execut 함수의 두번째 인자는 python 시퀀스형이 와야함" @@ -334,27 +339,27 @@ msgstr "SPI_execute_plan 실패: %s" msgid "SPI_execute failed: %s" msgstr "SPI_execute 실패: %s" -#: plpy_subxactobject.c:123 +#: plpy_subxactobject.c:122 #, c-format msgid "this subtransaction has already been entered" msgstr "이 서브트랜잭션은 이미 시작되었음" -#: plpy_subxactobject.c:129 plpy_subxactobject.c:187 +#: plpy_subxactobject.c:128 plpy_subxactobject.c:186 #, c-format msgid "this subtransaction has already been exited" msgstr "이 서브트랜잭션은 이미 끝났음" -#: plpy_subxactobject.c:181 +#: plpy_subxactobject.c:180 #, c-format msgid "this subtransaction has not been entered" msgstr "이 서브트랜잭션이 시작되지 않았음" -#: plpy_subxactobject.c:193 +#: plpy_subxactobject.c:192 #, c-format msgid "there is no subtransaction to exit from" msgstr "종료할 서브트랜잭션이 없음, 위치:" -#: plpy_typeio.c:286 +#: plpy_typeio.c:292 #, c-format msgid "could not create new dictionary" msgstr "새 디렉터리를 만들 수 없음" @@ -374,32 +379,17 @@ msgstr "모듈안에 Decimal 속성이 없음" msgid "conversion from numeric to Decimal failed" msgstr "numeric 형을 Decimal 형으로 변환할 수 없음" -#: plpy_typeio.c:645 -#, c-format -msgid "cannot convert multidimensional array to Python list" -msgstr "다중 차원 배열은 Python 리스트로 변환할 수 없음" - -#: plpy_typeio.c:646 -#, c-format -msgid "PL/Python only supports one-dimensional arrays." -msgstr "PL/Python에서는 1차원 배열만 지원함" - -#: plpy_typeio.c:652 -#, c-format -msgid "could not create new Python list" -msgstr "새 Python 리스트를 만들 수 없음" - -#: plpy_typeio.c:711 +#: plpy_typeio.c:773 #, c-format msgid "could not create bytes representation of Python object" msgstr "Python 객체를 bytea 자료형으로 변환할 수 없음" -#: plpy_typeio.c:820 +#: plpy_typeio.c:882 #, c-format msgid "could not create string representation of Python object" msgstr "Python 객체를 문자열 자료형으로 변환할 수 없음" -#: plpy_typeio.c:831 +#: plpy_typeio.c:893 #, c-format msgid "" "could not convert Python object into cstring: Python string representation " @@ -408,43 +398,91 @@ msgstr "" "Python 객체를 cstring 형으로 변환할 수 없음: Python string 변수에 null문자열" "이 포함되어 있음" -#: plpy_typeio.c:877 +#: plpy_typeio.c:950 +#, c-format +msgid "malformed record literal: \"%s\"" +msgstr "잘못된 레코드 표현: \"%s\"" + +#: plpy_typeio.c:951 +#, c-format +msgid "Missing left parenthesis." +msgstr "왼쪽 괄호가 없음." + +#: plpy_typeio.c:952 plpy_typeio.c:1390 +#, c-format +msgid "" +"To return a composite type in an array, return the composite type as a " +"Python tuple, e.g., \"[('foo',)]\"." +msgstr "" +"배열에서 복합 자료형을 반환하려면, Python 튜플 형을 사용하세요. 예: " +"\"[('foo',)]\"." + +#: plpy_typeio.c:1001 +#, c-format +msgid "number of array dimensions exceeds the maximum allowed (%d)" +msgstr "배열 차원이 최대치 (%d)를 초과 했습니다." + +#: plpy_typeio.c:1005 +#, c-format +msgid "could not determine sequence length for function return value" +msgstr "함수 반환 값으로 시퀀스 길이를 결정할 수 없음" + +#: plpy_typeio.c:1008 plpy_typeio.c:1012 +#, c-format +msgid "array size exceeds the maximum allowed" +msgstr "배열 최대 크기를 초과함" + +#: plpy_typeio.c:1038 #, c-format msgid "" "return value of function with array return type is not a Python sequence" msgstr "배열형으로 넘길 자료형이 Python 시퀀스형이 아님" -#: plpy_typeio.c:996 +#: plpy_typeio.c:1091 +#, c-format +msgid "wrong length of inner sequence: has length %d, but %d was expected" +msgstr "잘못된 내부 시퀀스 길이, 길이 %d, %d 초과했음" + +#: plpy_typeio.c:1093 +#, c-format +msgid "" +"To construct a multidimensional array, the inner sequences must all have the " +"same length." +msgstr "" +"다차원 배열을 사용하려면, 그 하위 배열의 차원이 모두 같아야합니다." + +#: plpy_typeio.c:1213 #, c-format msgid "key \"%s\" not found in mapping" msgstr "맵 안에 \"%s\" 키가 없음" -#: plpy_typeio.c:997 +#: plpy_typeio.c:1214 #, c-format msgid "" "To return null in a column, add the value None to the mapping with the key " "named after the column." msgstr "" -"칼럼값으로 null을 반환하려면, 칼럼 다음에 해당 키 이름과 맵핑 되는 None값을 지정하세요" +"칼럼값으로 null을 반환하려면, 칼럼 다음에 해당 키 이름과 맵핑 되는 None값을 " +"지정하세요" -#: plpy_typeio.c:1048 +#: plpy_typeio.c:1265 #, c-format msgid "length of returned sequence did not match number of columns in row" msgstr "반환되는 시퀀스형 변수의 길이가 로우의 칼럼수와 일치하지 않음" -#: plpy_typeio.c:1159 +#: plpy_typeio.c:1388 #, c-format msgid "attribute \"%s\" does not exist in Python object" msgstr "Python 객체 가운데 \"%s\" 속성이 없음" -#: plpy_typeio.c:1160 +#: plpy_typeio.c:1391 #, c-format msgid "" "To return null in a column, let the returned object have an attribute named " "after column with value None." msgstr "" -"칼럼 값으로 null 을 반환하려면, 값으로 None 값을 가지는 칼럼 뒤에, " -"속성 이름이 있는 객체를 반환하세요" +"칼럼 값으로 null 을 반환하려면, 값으로 None 값을 가지는 칼럼 뒤에, 속성 이름" +"이 있는 객체를 반환하세요" #: plpy_util.c:36 #, c-format @@ -455,12 +493,3 @@ msgstr "Python 유니코드 객체를 UTF-8 문자열로 변환할 수 없음" #, c-format msgid "could not extract bytes from encoded string" msgstr "해당 인코드 문자열을 Python에서 사용할 수 없음" - -#~ msgid "Start a new session to use a different Python major version." -#~ msgstr "Python 메이져 버전을 달리 사용하려면 새 세션으로 시작하세요." - -#~ msgid "" -#~ "This session has previously used Python major version %d, and it is now " -#~ "attempting to use Python major version %d." -#~ msgstr "" -#~ "이 세션은 이전에 %d 버전을 사용했는데, 지금은 %d 버전을 사용하려고 합니다." diff --git a/src/pl/plpython/po/ru.po b/src/pl/plpython/po/ru.po index cb3a2ed3cb..e31b04e7ee 100644 --- a/src/pl/plpython/po/ru.po +++ b/src/pl/plpython/po/ru.po @@ -2,13 +2,13 @@ # Copyright (C) 2012-2016 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. # Alexander Lakhin , 2012-2017. -# msgid "" msgstr "" "Project-Id-Version: plpython (PostgreSQL current)\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-04-02 23:37+0000\n" -"PO-Revision-Date: 2017-03-29 13:53+0300\n" +"POT-Creation-Date: 2017-08-23 14:37+0000\n" +"PO-Revision-Date: 2017-08-21 08:50+0300\n" +"Last-Translator: Alexander Lakhin \n" "Language-Team: Russian \n" "Language: ru\n" "MIME-Version: 1.0\n" @@ -16,7 +16,6 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;\n" -"Last-Translator: Alexander Lakhin \n" #: plpy_cursorobject.c:100 #, c-format @@ -107,17 +106,17 @@ msgstr "ошибка получения следующего элемента и msgid "PL/Python function with return type \"void\" did not return None" msgstr "функция PL/Python с типом результата \"void\" вернула не None" -#: plpy_exec.c:374 plpy_exec.c:400 +#: plpy_exec.c:379 plpy_exec.c:405 #, c-format msgid "unexpected return value from trigger procedure" msgstr "триггерная процедура вернула недопустимое значение" -#: plpy_exec.c:375 +#: plpy_exec.c:380 #, c-format msgid "Expected None or a string." msgstr "Ожидалось None или строка." -#: plpy_exec.c:390 +#: plpy_exec.c:395 #, c-format msgid "" "PL/Python trigger function returned \"MODIFY\" in a DELETE trigger -- ignored" @@ -125,54 +124,54 @@ msgstr "" "триггерная функция PL/Python вернула \"MODIFY\" в триггере DELETE -- " "игнорируется" -#: plpy_exec.c:401 +#: plpy_exec.c:406 #, c-format msgid "Expected None, \"OK\", \"SKIP\", or \"MODIFY\"." msgstr "Ожидалось None, \"OK\", \"SKIP\" или \"MODIFY\"." -#: plpy_exec.c:482 +#: plpy_exec.c:487 #, c-format msgid "PyList_SetItem() failed, while setting up arguments" msgstr "ошибка в PyList_SetItem() при настройке аргументов" -#: plpy_exec.c:486 +#: plpy_exec.c:491 #, c-format msgid "PyDict_SetItemString() failed, while setting up arguments" msgstr "ошибка в PyDict_SetItemString() при настройке аргументов" -#: plpy_exec.c:498 +#: plpy_exec.c:503 #, c-format msgid "" "function returning record called in context that cannot accept type record" msgstr "" "функция, возвращающая запись, вызвана в контексте, не допускающем этот тип" -#: plpy_exec.c:714 +#: plpy_exec.c:719 #, c-format msgid "while creating return value" msgstr "при создании возвращаемого значения" -#: plpy_exec.c:738 +#: plpy_exec.c:743 #, c-format msgid "could not create new dictionary while building trigger arguments" msgstr "не удалось создать словарь для передачи аргументов триггера" -#: plpy_exec.c:926 +#: plpy_exec.c:931 #, c-format msgid "TD[\"new\"] deleted, cannot modify row" msgstr "элемент TD[\"new\"] удалён -- изменить строку нельзя" -#: plpy_exec.c:931 +#: plpy_exec.c:936 #, c-format msgid "TD[\"new\"] is not a dictionary" msgstr "TD[\"new\"] - не словарь" -#: plpy_exec.c:958 +#: plpy_exec.c:963 #, c-format msgid "TD[\"new\"] dictionary key at ordinal position %d is not a string" msgstr "ключ словаря TD[\"new\"] с порядковым номером %d не является строкой" -#: plpy_exec.c:965 +#: plpy_exec.c:970 #, c-format msgid "" "key \"%s\" found in TD[\"new\"] does not exist as a column in the triggering " @@ -181,17 +180,17 @@ msgstr "" "ключу \"%s\", найденному в TD[\"new\"], не соответствует столбец в строке, " "обрабатываемой триггером" -#: plpy_exec.c:970 +#: plpy_exec.c:975 #, c-format msgid "cannot set system attribute \"%s\"" msgstr "установить системный атрибут \"%s\" нельзя" -#: plpy_exec.c:1041 +#: plpy_exec.c:1046 #, c-format msgid "while modifying trigger row" msgstr "при изменении строки в триггере" -#: plpy_exec.c:1102 +#: plpy_exec.c:1107 #, c-format msgid "forcibly aborting a subtransaction that has not been exited" msgstr "принудительное прерывание незавершённой подтранзакции" @@ -272,8 +271,8 @@ msgstr "не удалось разобрать сообщение об ошиб #: plpy_plpymodule.c:469 #, c-format -msgid "Argument 'message' given by name and position" -msgstr "Аргумент 'message' задан и по имени, и по позиции" +msgid "argument 'message' given by name and position" +msgstr "аргумент 'message' задан и по имени, и по позиции" #: plpy_plpymodule.c:496 #, c-format @@ -385,17 +384,17 @@ msgstr "в модуле нет атрибута Decimal" msgid "conversion from numeric to Decimal failed" msgstr "не удалось преобразовать numeric в Decimal" -#: plpy_typeio.c:772 +#: plpy_typeio.c:773 #, c-format msgid "could not create bytes representation of Python object" msgstr "не удалось создать байтовое представление объекта Python" -#: plpy_typeio.c:881 +#: plpy_typeio.c:882 #, c-format msgid "could not create string representation of Python object" msgstr "не удалось создать строковое представление объекта Python" -#: plpy_typeio.c:892 +#: plpy_typeio.c:893 #, c-format msgid "" "could not convert Python object into cstring: Python string representation " @@ -404,43 +403,43 @@ msgstr "" "не удалось преобразовать объект Python в cstring: похоже, представление " "строки Python содержит нулевые байты" -#: plpy_typeio.c:949 +#: plpy_typeio.c:950 #, c-format msgid "malformed record literal: \"%s\"" msgstr "ошибка в литерале записи: \"%s\"" -#: plpy_typeio.c:950 +#: plpy_typeio.c:951 #, c-format msgid "Missing left parenthesis." msgstr "Отсутствует левая скобка." -#: plpy_typeio.c:951 plpy_typeio.c:1389 +#: plpy_typeio.c:952 plpy_typeio.c:1390 #, c-format msgid "" "To return a composite type in an array, return the composite type as a " -"Python tuple, e.g. \"[('foo')]\"" +"Python tuple, e.g., \"[('foo',)]\"." msgstr "" "Чтобы возвратить составной тип в массиве, нужно возвратить составное " -"значение в виде кортежа Python, например: \"[('foo')]\"" +"значение в виде кортежа Python, например: \"[('foo',)]\"." -#: plpy_typeio.c:1000 +#: plpy_typeio.c:1001 #, c-format msgid "number of array dimensions exceeds the maximum allowed (%d)" msgstr "число размерностей массива превышает предел (%d)" -#: plpy_typeio.c:1004 +#: plpy_typeio.c:1005 #, c-format -msgid "cannot determine sequence length for function return value" +msgid "could not determine sequence length for function return value" msgstr "" "не удалось определить длину последовательности в возвращаемом функцией " "значении" -#: plpy_typeio.c:1007 plpy_typeio.c:1011 +#: plpy_typeio.c:1008 plpy_typeio.c:1012 #, c-format msgid "array size exceeds the maximum allowed" msgstr "размер массива превышает предел" -#: plpy_typeio.c:1037 +#: plpy_typeio.c:1038 #, c-format msgid "" "return value of function with array return type is not a Python sequence" @@ -448,23 +447,26 @@ msgstr "" "возвращаемое значение функции с результатом-массивом не является " "последовательностью" -#: plpy_typeio.c:1090 +#: plpy_typeio.c:1091 +#, c-format +msgid "wrong length of inner sequence: has length %d, but %d was expected" +msgstr "неверная длина внутренней последовательности: %d (ожидалось: %d)" + +#: plpy_typeio.c:1093 #, c-format msgid "" -"multidimensional arrays must have array expressions with matching " -"dimensions. PL/Python function return value has sequence length %d while " -"expected %d" +"To construct a multidimensional array, the inner sequences must all have the " +"same length." msgstr "" -"для многомерных массивов должны задаваться выражения с соответствующими " -"размерностями. В возвращаемом функцией на PL/Python значении " -"последовательность имеет длину %d (а ожидалось %d)" +"Для образования многомерного массива внутренние последовательности должны " +"иметь одинаковую длину." -#: plpy_typeio.c:1212 +#: plpy_typeio.c:1213 #, c-format msgid "key \"%s\" not found in mapping" msgstr "ключ \"%s\" не найден в сопоставлении" -#: plpy_typeio.c:1213 +#: plpy_typeio.c:1214 #, c-format msgid "" "To return null in a column, add the value None to the mapping with the key " @@ -473,17 +475,17 @@ msgstr "" "Чтобы присвоить столбцу NULL, добавьте в сопоставление значение None с " "ключом-именем столбца." -#: plpy_typeio.c:1264 +#: plpy_typeio.c:1265 #, c-format msgid "length of returned sequence did not match number of columns in row" msgstr "длина возвращённой последовательности не равна числу столбцов в строке" -#: plpy_typeio.c:1387 +#: plpy_typeio.c:1388 #, c-format msgid "attribute \"%s\" does not exist in Python object" msgstr "в объекте Python не существует атрибут \"%s\"" -#: plpy_typeio.c:1390 +#: plpy_typeio.c:1391 #, c-format msgid "" "To return null in a column, let the returned object have an attribute named " @@ -502,6 +504,15 @@ msgstr "не удалось преобразовать объект Python Unico msgid "could not extract bytes from encoded string" msgstr "не удалось извлечь байты из кодированной строки" +#~ msgid "" +#~ "multidimensional arrays must have array expressions with matching " +#~ "dimensions. PL/Python function return value has sequence length %d while " +#~ "expected %d" +#~ msgstr "" +#~ "для многомерных массивов должны задаваться выражения с соответствующими " +#~ "размерностями. В возвращаемом функцией на PL/Python значении " +#~ "последовательность имеет длину %d (а ожидалось %d)" + #~ msgid "plan.status takes no arguments" #~ msgstr "plan.status не принимает аргументы" diff --git a/src/pl/plpython/po/sv.po b/src/pl/plpython/po/sv.po index d178b46503..527a8129ff 100644 --- a/src/pl/plpython/po/sv.po +++ b/src/pl/plpython/po/sv.po @@ -1,14 +1,14 @@ # Swedish message translation file for plpython # Copyright (C) 2017 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. -# Dennis Björklund , 2017. +# Dennis Björklund , 2017, 2018. # msgid "" msgstr "" "Project-Id-Version: plpython (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-08-05 14:07+0000\n" -"PO-Revision-Date: 2017-08-06 08:32+0200\n" +"POT-Creation-Date: 2018-04-29 20:08+0000\n" +"PO-Revision-Date: 2018-04-29 23:49+0200\n" "Last-Translator: Dennis Björklund \n" "Language-Team: Swedish \n" "Language: sv\n" @@ -17,164 +17,164 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=n != 1;\n" -#: plpy_cursorobject.c:100 +#: plpy_cursorobject.c:101 #, c-format msgid "plpy.cursor expected a query or a plan" msgstr "plpy.cursor förväntade sig en fråga eller en plan" -#: plpy_cursorobject.c:176 +#: plpy_cursorobject.c:184 #, c-format msgid "plpy.cursor takes a sequence as its second argument" msgstr "plpy.cursor tar en sekvens som sitt andra argument" -#: plpy_cursorobject.c:192 plpy_spi.c:226 +#: plpy_cursorobject.c:200 plpy_spi.c:211 #, c-format msgid "could not execute plan" msgstr "kunde inte exekvera plan" -#: plpy_cursorobject.c:195 plpy_spi.c:229 +#: plpy_cursorobject.c:203 plpy_spi.c:214 #, c-format msgid "Expected sequence of %d argument, got %d: %s" msgid_plural "Expected sequence of %d arguments, got %d: %s" msgstr[0] "Förväntade sekvens med %d argument, fick %d: %s" msgstr[1] "Förväntade sekvens med %d argument, fick %d: %s" -#: plpy_cursorobject.c:350 +#: plpy_cursorobject.c:352 #, c-format msgid "iterating a closed cursor" msgstr "itererar med en stängd markör" -#: plpy_cursorobject.c:358 plpy_cursorobject.c:423 +#: plpy_cursorobject.c:360 plpy_cursorobject.c:426 #, c-format msgid "iterating a cursor in an aborted subtransaction" msgstr "itererar med en markör i en avbruten subtransaktion" -#: plpy_cursorobject.c:415 +#: plpy_cursorobject.c:418 #, c-format msgid "fetch from a closed cursor" msgstr "hämta från en stängd markör" -#: plpy_cursorobject.c:463 plpy_spi.c:434 +#: plpy_cursorobject.c:461 plpy_spi.c:409 #, c-format msgid "query result has too many rows to fit in a Python list" msgstr "frågeresultet har för många rader för att få plats i en Python-lista" -#: plpy_cursorobject.c:504 +#: plpy_cursorobject.c:512 #, c-format msgid "closing a cursor in an aborted subtransaction" msgstr "stänger en markör i en avbruten subtransaktion" -#: plpy_elog.c:127 plpy_elog.c:128 plpy_plpymodule.c:548 +#: plpy_elog.c:127 plpy_elog.c:128 plpy_plpymodule.c:559 #, c-format msgid "%s" msgstr "%s" -#: plpy_exec.c:140 +#: plpy_exec.c:142 #, c-format msgid "unsupported set function return mode" msgstr "ej supportat returläge för mängdfunktion" -#: plpy_exec.c:141 +#: plpy_exec.c:143 #, c-format msgid "PL/Python set-returning functions only support returning one value per call." msgstr "PL/Python mängdreturnerande funktioner stöder bara ett värde per anrop." -#: plpy_exec.c:154 +#: plpy_exec.c:156 #, c-format msgid "returned object cannot be iterated" msgstr "returnerat objekt kan inte itereras" -#: plpy_exec.c:155 +#: plpy_exec.c:157 #, c-format msgid "PL/Python set-returning functions must return an iterable object." msgstr "PL/Python mängdreturnerande funktioner måste returnera ett itererbart objekt." -#: plpy_exec.c:169 +#: plpy_exec.c:171 #, c-format msgid "error fetching next item from iterator" msgstr "fel vid hämtning av nästa del från iteratorn" -#: plpy_exec.c:210 +#: plpy_exec.c:214 +#, c-format +msgid "PL/Python procedure did not return None" +msgstr "PL/Python-procedur returnerade inte None" + +#: plpy_exec.c:218 #, c-format msgid "PL/Python function with return type \"void\" did not return None" msgstr "PL/Python-funktion med returtyp \"void\" returnerade inte None" -#: plpy_exec.c:379 plpy_exec.c:405 +#: plpy_exec.c:374 plpy_exec.c:400 #, c-format msgid "unexpected return value from trigger procedure" msgstr "oväntat returvärde från utlösarprocedur" -#: plpy_exec.c:380 +#: plpy_exec.c:375 #, c-format msgid "Expected None or a string." msgstr "Förväntade None eller en sträng." -#: plpy_exec.c:395 +#: plpy_exec.c:390 #, c-format msgid "PL/Python trigger function returned \"MODIFY\" in a DELETE trigger -- ignored" msgstr "PL/Python-utlösarfunktion returnerade \"MODIFY\" i en DELETE-utlösare -- ignorerad" -#: plpy_exec.c:406 +#: plpy_exec.c:401 #, c-format msgid "Expected None, \"OK\", \"SKIP\", or \"MODIFY\"." msgstr "Förväntade None, \"OK\", \"SKIP\" eller \"MODIFY\"." -#: plpy_exec.c:487 +#: plpy_exec.c:451 #, c-format msgid "PyList_SetItem() failed, while setting up arguments" msgstr "PyList_SetItem() misslyckades vid uppsättning av argument" -#: plpy_exec.c:491 +#: plpy_exec.c:455 #, c-format msgid "PyDict_SetItemString() failed, while setting up arguments" msgstr "PyDict_SetItemString() misslyckades vid uppsättning av argument" -#: plpy_exec.c:503 +#: plpy_exec.c:467 #, c-format msgid "function returning record called in context that cannot accept type record" msgstr "en funktion med post som värde anropades i sammanhang där poster inte kan godtagas." -#: plpy_exec.c:719 +#: plpy_exec.c:684 #, c-format msgid "while creating return value" msgstr "vid skapande av returvärde" -#: plpy_exec.c:743 -#, c-format -msgid "could not create new dictionary while building trigger arguments" -msgstr "kunde inte skapa ny katalog vid byggande av utlösarargument" - -#: plpy_exec.c:931 +#: plpy_exec.c:909 #, c-format msgid "TD[\"new\"] deleted, cannot modify row" msgstr "TD[\"new\"] raderad, kan inte modifiera rad" -#: plpy_exec.c:936 +#: plpy_exec.c:914 #, c-format msgid "TD[\"new\"] is not a dictionary" msgstr "TD[\"new\"] är inte en dictionary" -#: plpy_exec.c:963 +#: plpy_exec.c:941 #, c-format msgid "TD[\"new\"] dictionary key at ordinal position %d is not a string" msgstr "TD[\"new\"] dictionary-nyckel vid numerisk position %d är inte en sträng" -#: plpy_exec.c:970 +#: plpy_exec.c:948 #, c-format msgid "key \"%s\" found in TD[\"new\"] does not exist as a column in the triggering row" msgstr "nyckel \"%s\" hittad i TD[\"new\"] finns inte som en kolumn i den utlösande raden" -#: plpy_exec.c:975 +#: plpy_exec.c:953 #, c-format msgid "cannot set system attribute \"%s\"" msgstr "kan inte sätta systemattribut \"%s\"" -#: plpy_exec.c:1046 +#: plpy_exec.c:1011 #, c-format msgid "while modifying trigger row" msgstr "vid modifiering av utlösande rad" -#: plpy_exec.c:1107 +#: plpy_exec.c:1072 #, c-format msgid "forcibly aborting a subtransaction that has not been exited" msgstr "tvingar avbrytande av subtransaktion som inte har avslutats" @@ -199,71 +199,66 @@ msgstr "ej fångar fel i initiering" msgid "could not import \"__main__\" module" msgstr "kunde inte importera \"__main__\"-modul" -#: plpy_main.c:170 -#, c-format -msgid "could not create globals" -msgstr "kundew inte skapa globaler" - #: plpy_main.c:174 #, c-format msgid "could not initialize globals" msgstr "kunde inte initierar globaler" -#: plpy_main.c:387 +#: plpy_main.c:399 +#, c-format +msgid "PL/Python procedure \"%s\"" +msgstr "PL/Python-procedur \"%s\"" + +#: plpy_main.c:402 #, c-format msgid "PL/Python function \"%s\"" msgstr "PL/Python-funktion \"%s\"" -#: plpy_main.c:394 +#: plpy_main.c:410 #, c-format msgid "PL/Python anonymous code block" msgstr "PL/Python anonymt kodblock" -#: plpy_plpymodule.c:181 plpy_plpymodule.c:184 +#: plpy_plpymodule.c:192 plpy_plpymodule.c:195 #, c-format msgid "could not import \"plpy\" module" msgstr "kunde inte importera \"plpy\"-modul" -#: plpy_plpymodule.c:199 +#: plpy_plpymodule.c:210 #, c-format msgid "could not create the spiexceptions module" msgstr "kunde inte skapa modulen spiexceptions" -#: plpy_plpymodule.c:207 +#: plpy_plpymodule.c:218 #, c-format msgid "could not add the spiexceptions module" msgstr "kunde inte lägga till modulen spiexceptions" -#: plpy_plpymodule.c:236 -#, c-format -msgid "could not create exception \"%s\"" -msgstr "kunde inte skapa undantag \"%s\"" - -#: plpy_plpymodule.c:271 plpy_plpymodule.c:275 +#: plpy_plpymodule.c:286 #, c-format msgid "could not generate SPI exceptions" msgstr "kunde inte skapa SPI-undantag" -#: plpy_plpymodule.c:443 +#: plpy_plpymodule.c:454 #, c-format msgid "could not unpack arguments in plpy.elog" msgstr "kunde inte packa upp argument i plpy.elog" -#: plpy_plpymodule.c:452 +#: plpy_plpymodule.c:463 msgid "could not parse error message in plpy.elog" msgstr "kunde inte parsa felmeddelande i plpy.elog" -#: plpy_plpymodule.c:469 +#: plpy_plpymodule.c:480 #, c-format msgid "argument 'message' given by name and position" msgstr "argumentet 'message' angivet med namn och position" -#: plpy_plpymodule.c:496 +#: plpy_plpymodule.c:507 #, c-format msgid "'%s' is an invalid keyword argument for this function" msgstr "'%s' är ett ogiltigt nyckelordsargument för denna funktion" -#: plpy_plpymodule.c:507 plpy_plpymodule.c:513 +#: plpy_plpymodule.c:518 plpy_plpymodule.c:524 #, c-format msgid "invalid SQLSTATE code" msgstr "ogiltig SQLSTATE-kod" @@ -273,57 +268,57 @@ msgstr "ogiltig SQLSTATE-kod" msgid "trigger functions can only be called as triggers" msgstr "Triggningsfunktioner kan bara anropas vid triggning." -#: plpy_procedure.c:235 +#: plpy_procedure.c:234 #, c-format msgid "PL/Python functions cannot return type %s" msgstr "PL/Python-funktioner kan inte returnera typ %s" -#: plpy_procedure.c:316 +#: plpy_procedure.c:312 #, c-format msgid "PL/Python functions cannot accept type %s" msgstr "PL/Python-funktioner kan inte ta emot typ %s" -#: plpy_procedure.c:412 +#: plpy_procedure.c:402 #, c-format msgid "could not compile PL/Python function \"%s\"" msgstr "kunde inte kompilera PL/Python-funktion \"%s\"" -#: plpy_procedure.c:415 +#: plpy_procedure.c:405 #, c-format msgid "could not compile anonymous PL/Python code block" msgstr "kunde inte kompilera anonymt PL/Python-kodblock" -#: plpy_resultobject.c:145 plpy_resultobject.c:165 plpy_resultobject.c:185 +#: plpy_resultobject.c:150 plpy_resultobject.c:176 plpy_resultobject.c:202 #, c-format msgid "command did not produce a result set" msgstr "kommandot producerade inte en resultatmängd" -#: plpy_spi.c:59 +#: plpy_spi.c:60 #, c-format msgid "second argument of plpy.prepare must be a sequence" msgstr "andra argumentet till plpy.prepare måste vara en sekvens" -#: plpy_spi.c:115 +#: plpy_spi.c:104 #, c-format msgid "plpy.prepare: type name at ordinal position %d is not a string" msgstr "plpy.prepare: typnamn vid numerisk position %d är inte en sträng" -#: plpy_spi.c:191 +#: plpy_spi.c:176 #, c-format msgid "plpy.execute expected a query or a plan" msgstr "plpy.execute förväntade en fråga eller en plan" -#: plpy_spi.c:210 +#: plpy_spi.c:195 #, c-format msgid "plpy.execute takes a sequence as its second argument" msgstr "plpy.execute tar en sekvens som sitt andra argument" -#: plpy_spi.c:335 +#: plpy_spi.c:305 #, c-format msgid "SPI_execute_plan failed: %s" msgstr "SPI_execute_plan misslyckades: %s" -#: plpy_spi.c:377 +#: plpy_spi.c:347 #, c-format msgid "SPI_execute failed: %s" msgstr "SPI_execute misslyckades: %s" @@ -348,117 +343,124 @@ msgstr "denna subtransaktion har inte gåtts in i" msgid "there is no subtransaction to exit from" msgstr "det finns ingen subtransaktion att avsluta från" -#: plpy_typeio.c:292 -#, c-format -msgid "could not create new dictionary" -msgstr "kunde inte skapa ny katalog" - -#: plpy_typeio.c:560 +#: plpy_typeio.c:591 #, c-format msgid "could not import a module for Decimal constructor" msgstr "kunde inte importera en modul för Decimal-konstruktorn" -#: plpy_typeio.c:564 +#: plpy_typeio.c:595 #, c-format msgid "no Decimal attribute in module" msgstr "inga Decimal-attribut i modulen" -#: plpy_typeio.c:570 +#: plpy_typeio.c:601 #, c-format msgid "conversion from numeric to Decimal failed" msgstr "konvertering från numeric till Decimal misslyckades" -#: plpy_typeio.c:773 +#: plpy_typeio.c:908 #, c-format msgid "could not create bytes representation of Python object" msgstr "kunde inte skapa byte-representation av Python-objekt" -#: plpy_typeio.c:882 +#: plpy_typeio.c:1056 #, c-format msgid "could not create string representation of Python object" msgstr "kunde inte skapa strängrepresentation av Python-objekt" -#: plpy_typeio.c:893 +#: plpy_typeio.c:1067 #, c-format msgid "could not convert Python object into cstring: Python string representation appears to contain null bytes" msgstr "kunde inte konvertera Python-objekt till cstring: Python-strängrepresentationen verkar innehålla noll-bytes" -#: plpy_typeio.c:950 -#, c-format -msgid "malformed record literal: \"%s\"" -msgstr "felaktig postliteral: \"%s\"" - -#: plpy_typeio.c:951 -#, c-format -msgid "Missing left parenthesis." -msgstr "Saknar vänster parentes" - -#: plpy_typeio.c:952 plpy_typeio.c:1390 -#, c-format -msgid "To return a composite type in an array, return the composite type as a Python tuple, e.g., \"[('foo',)]\"." -msgstr "För att returnera en composite-typ i en array, returnera composite-typen som en Python-tupel, t.ex. \"[('foo',)]\"." - -#: plpy_typeio.c:1001 +#: plpy_typeio.c:1176 #, c-format msgid "number of array dimensions exceeds the maximum allowed (%d)" msgstr "antal array-dimensioner överskriver maximalt tillåtna (%d)" -#: plpy_typeio.c:1005 +#: plpy_typeio.c:1180 #, c-format msgid "could not determine sequence length for function return value" msgstr "kunde inte bestämma sekvenslängd för funktionens returvärde" -#: plpy_typeio.c:1008 plpy_typeio.c:1012 +#: plpy_typeio.c:1183 plpy_typeio.c:1187 #, c-format msgid "array size exceeds the maximum allowed" msgstr "array-storlek överskrider maximalt tillåtna" -#: plpy_typeio.c:1038 +#: plpy_typeio.c:1213 #, c-format msgid "return value of function with array return type is not a Python sequence" msgstr "returvärde för funktion med array-returtyp är inte en Python-sekvens" -#: plpy_typeio.c:1091 +#: plpy_typeio.c:1259 #, c-format msgid "wrong length of inner sequence: has length %d, but %d was expected" msgstr "fel längd på inre sekvens: har längd %d, men %d förväntades" -#: plpy_typeio.c:1093 +#: plpy_typeio.c:1261 #, c-format msgid "To construct a multidimensional array, the inner sequences must all have the same length." msgstr "För att skapa en multidimensionell array så skall alla de inre sekvenserna ha samma längd." -#: plpy_typeio.c:1213 +#: plpy_typeio.c:1340 +#, c-format +msgid "malformed record literal: \"%s\"" +msgstr "felaktig postliteral: \"%s\"" + +#: plpy_typeio.c:1341 +#, c-format +msgid "Missing left parenthesis." +msgstr "Saknar vänster parentes" + +#: plpy_typeio.c:1342 plpy_typeio.c:1543 +#, c-format +msgid "To return a composite type in an array, return the composite type as a Python tuple, e.g., \"[('foo',)]\"." +msgstr "För att returnera en composite-typ i en array, returnera composite-typen som en Python-tupel, t.ex. \"[('foo',)]\"." + +#: plpy_typeio.c:1389 #, c-format msgid "key \"%s\" not found in mapping" msgstr "nyckeln \"%s\" hittades inte i mapping" -#: plpy_typeio.c:1214 +#: plpy_typeio.c:1390 #, c-format msgid "To return null in a column, add the value None to the mapping with the key named after the column." msgstr "För att returnera null i en kolumn så lägg till värdet None till mappningen med nyckelnamn taget från kolumnen." -#: plpy_typeio.c:1265 +#: plpy_typeio.c:1443 #, c-format msgid "length of returned sequence did not match number of columns in row" msgstr "längden på den returnerade sekvensen matchade inte antal kolumner i raden" -#: plpy_typeio.c:1388 +#: plpy_typeio.c:1541 #, c-format msgid "attribute \"%s\" does not exist in Python object" msgstr "attributet \"%s\" finns inte i Python-objektet" -#: plpy_typeio.c:1391 +#: plpy_typeio.c:1544 #, c-format msgid "To return null in a column, let the returned object have an attribute named after column with value None." msgstr "För att returnera null i en kolumn så låt det returnerade objektet ha ett attribut med namn efter kolumnen och med värdet None." -#: plpy_util.c:36 +#: plpy_util.c:35 #, c-format msgid "could not convert Python Unicode object to bytes" msgstr "kunde inte konvertera Python-unicode-objekt till bytes" -#: plpy_util.c:42 +#: plpy_util.c:41 #, c-format msgid "could not extract bytes from encoded string" msgstr "kunde inte extrahera bytes från kodad sträng" + +#~ msgid "could not create new dictionary" +#~ msgstr "kunde inte skapa ny katalog" + +#~ msgid "could not create exception \"%s\"" +#~ msgstr "kunde inte skapa undantag \"%s\"" + +#~ msgid "could not create globals" +#~ msgstr "kundew inte skapa globaler" + +#~ msgid "could not create new dictionary while building trigger arguments" +#~ msgstr "kunde inte skapa ny katalog vid byggande av utlösarargument" diff --git a/src/pl/plpython/po/tr.po b/src/pl/plpython/po/tr.po new file mode 100644 index 0000000000..41c207884c --- /dev/null +++ b/src/pl/plpython/po/tr.po @@ -0,0 +1,534 @@ +# LANGUAGE message translation file for plpython +# Copyright (C) 2009 PostgreSQL Global Development Group +# This file is distributed under the same license as the PostgreSQL package. +# FIRST AUTHOR , 2009. +# +msgid "" +msgstr "" +"Project-Id-Version: PostgreSQL 8.4\n" +"Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" +"POT-Creation-Date: 2018-02-22 00:08+0000\n" +"PO-Revision-Date: 2018-02-22 16:03+0300\n" +"Last-Translator: Devrim GÜNDÜZ \n" +"Language-Team: Turkish \n" +"Language: tr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 1.8.7.1\n" + +#: plpy_cursorobject.c:100 +#, c-format +msgid "plpy.cursor expected a query or a plan" +msgstr "plpy.cursor bir sorgu ya da bir plan bekledi" + +#: plpy_cursorobject.c:176 +#, c-format +msgid "plpy.cursor takes a sequence as its second argument" +msgstr "plpy.cursor bir sequence'ı ikinci argüman olarak alır" + +#: plpy_cursorobject.c:192 plpy_spi.c:226 +#, c-format +msgid "could not execute plan" +msgstr "plan çalıştırılamadı" + +#: plpy_cursorobject.c:195 plpy_spi.c:229 +#, c-format +msgid "Expected sequence of %d argument, got %d: %s" +msgid_plural "Expected sequence of %d arguments, got %d: %s" +msgstr[0] "%d argümanının sequence'ı beklendi; %d alındı: %s" + +#: plpy_cursorobject.c:350 +#, fuzzy, c-format +#| msgid "position a cursor" +msgid "iterating a closed cursor" +msgstr "cursor'u yereştir" + +#: plpy_cursorobject.c:358 plpy_cursorobject.c:423 +#, fuzzy, c-format +#| msgid "aborting any active transactions" +msgid "iterating a cursor in an aborted subtransaction" +msgstr "aktif transactionlar iptal ediliyor" + +#: plpy_cursorobject.c:415 +#, c-format +msgid "fetch from a closed cursor" +msgstr "kapalı bir cursor'dan getir" + +#: plpy_cursorobject.c:463 plpy_spi.c:434 +#, c-format +msgid "query result has too many rows to fit in a Python list" +msgstr "sorgu sonucundaki satır sayısı bir Python listesine sığabilecekten çok fazla " + +#: plpy_cursorobject.c:504 +#, c-format +msgid "closing a cursor in an aborted subtransaction" +msgstr "iptal edilen bir alt-işlemdeki (subtransaction) bir cursor kapatılıyor" + +#: plpy_elog.c:127 plpy_elog.c:128 plpy_plpymodule.c:548 +#, c-format +msgid "%s" +msgstr "%s" + +#: plpy_exec.c:140 +#, c-format +msgid "unsupported set function return mode" +msgstr "desteklenmeyen küme fonksiyonu dönüş modu" + +#: plpy_exec.c:141 +#, c-format +msgid "PL/Python set-returning functions only support returning one value per call." +msgstr "PL/Python küme dönen fonksiyonları sadece her çağrı içinde bir değer döndürmeyi desteklerler" + +#: plpy_exec.c:154 +#, c-format +msgid "returned object cannot be iterated" +msgstr "dönen nesne yinelenemez" + +#: plpy_exec.c:155 +#, c-format +msgid "PL/Python set-returning functions must return an iterable object." +msgstr "PL/Python küme dönen fonksiyonları yinelenebilir bir nesne dönmelidir." + +#: plpy_exec.c:169 +#, c-format +msgid "error fetching next item from iterator" +msgstr "yineleticiden sonraki öğeyi alırken hata" + +#: plpy_exec.c:210 +#, c-format +msgid "PL/Python function with return type \"void\" did not return None" +msgstr "dönüş tipi \"void\" olan PL/Python fonksiyonu None döndürmedi" + +#: plpy_exec.c:379 plpy_exec.c:405 +#, c-format +msgid "unexpected return value from trigger procedure" +msgstr "trigger yordamından beklenmeyen dönüş değeri" + +#: plpy_exec.c:380 +#, c-format +msgid "Expected None or a string." +msgstr "None ya da string bekleniyordu." + +#: plpy_exec.c:395 +#, c-format +msgid "PL/Python trigger function returned \"MODIFY\" in a DELETE trigger -- ignored" +msgstr "PL/Python trigger fonksiyonu DELETE triggerında \"MODIFY\" döndürdü -- gözardı edildi" + +#: plpy_exec.c:406 +#, c-format +msgid "Expected None, \"OK\", \"SKIP\", or \"MODIFY\"." +msgstr "None, \"OK\", \"SKIP\", ya da \"MODIFY\" bekleniyordu" + +#: plpy_exec.c:487 +#, c-format +msgid "PyList_SetItem() failed, while setting up arguments" +msgstr "PyList_SetItem() bağımsız değişkenler ayarlanırken başarısız oldu" + +#: plpy_exec.c:491 +#, c-format +msgid "PyDict_SetItemString() failed, while setting up arguments" +msgstr "PyDict_SetItemString() bağımsız değişkenler ayarlanırken başarısız oldu" + +#: plpy_exec.c:503 +#, c-format +msgid "function returning record called in context that cannot accept type record" +msgstr "tip kaydı içermeyen alanda çağırılan ve kayıt döndüren fonksiyon" + +#: plpy_exec.c:719 +#, c-format +msgid "while creating return value" +msgstr "dönüş değeri yaratılırken" + +#: plpy_exec.c:743 +#, c-format +msgid "could not create new dictionary while building trigger arguments" +msgstr "trigger argümanlarını oluştururken yeni sözlük yaratılamadı" + +#: plpy_exec.c:931 +#, c-format +msgid "TD[\"new\"] deleted, cannot modify row" +msgstr "TD[\"new\"] silindi, satır düzenlenemiyor" + +#: plpy_exec.c:936 +#, c-format +msgid "TD[\"new\"] is not a dictionary" +msgstr "TD[\"new\"] bir sözlük değil" + +#: plpy_exec.c:963 +#, c-format +msgid "TD[\"new\"] dictionary key at ordinal position %d is not a string" +msgstr "%d sıra pozisyonundaki TD[\"new\"] sözlük anahtarı dizi değil" + +#: plpy_exec.c:970 +#, c-format +msgid "key \"%s\" found in TD[\"new\"] does not exist as a column in the triggering row" +msgstr "TD[\"new\"] içinde bulunan \"%s\" anahtarı tetikleyen satırda bir kolon olarak bulunmuyor" + +#: plpy_exec.c:975 +#, c-format +msgid "cannot set system attribute \"%s\"" +msgstr "\"%s\" sistem niteliği ayarlanamıyor" + +#: plpy_exec.c:1046 +#, c-format +msgid "while modifying trigger row" +msgstr "tetikleyici satırını düzenlerken" + +#: plpy_exec.c:1107 +#, c-format +msgid "forcibly aborting a subtransaction that has not been exited" +msgstr "çıkış yapılmamış bir alt-işlem (subtransaction) zorla iptal ediliyor" + +#: plpy_main.c:125 +#, c-format +msgid "multiple Python libraries are present in session" +msgstr "oturumda birden çok Python kütüphanesi mevcut" + +#: plpy_main.c:126 +#, c-format +msgid "Only one Python major version can be used in one session." +msgstr "Bir oturumda sadece bir Python ana sürümü kullanılabilir." + +#: plpy_main.c:142 +#, c-format +msgid "untrapped error in initialization" +msgstr "ilklendirme aşamasında yakalanamayan hata" + +#: plpy_main.c:165 +#, c-format +msgid "could not import \"__main__\" module" +msgstr "\"__main__\" modülü alınamadı" + +#: plpy_main.c:170 +#, c-format +msgid "could not create globals" +msgstr "evrensel değerler (globals) oluşturulamadı" + +#: plpy_main.c:174 +#, c-format +msgid "could not initialize globals" +msgstr "global değerler ilklendirilemedi" + +#: plpy_main.c:387 +#, c-format +msgid "PL/Python function \"%s\"" +msgstr "\"%s\" PL/Python fonksiyonu" + +#: plpy_main.c:394 +#, c-format +msgid "PL/Python anonymous code block" +msgstr "PL/Python anonim kod bloğu" + +#: plpy_plpymodule.c:181 plpy_plpymodule.c:184 +#, c-format +msgid "could not import \"plpy\" module" +msgstr "\"plpy\" modülü alınamadı" + +#: plpy_plpymodule.c:199 +#, c-format +msgid "could not create the spiexceptions module" +msgstr "spiexceptions modülü oluşturulamadı" + +#: plpy_plpymodule.c:207 +#, c-format +msgid "could not add the spiexceptions module" +msgstr "spiexceptions modülü eklenemedi" + +#: plpy_plpymodule.c:236 +#, c-format +msgid "could not create exception \"%s\"" +msgstr "\"%s\" istisnası (exception) oluşturulamadı" + +#: plpy_plpymodule.c:271 plpy_plpymodule.c:275 +#, c-format +msgid "could not generate SPI exceptions" +msgstr "SPI istisnaları (exception) üretilemedi" + +#: plpy_plpymodule.c:443 +#, c-format +msgid "could not unpack arguments in plpy.elog" +msgstr "plpy.elog dosyasındaki argümanlar unpack edilemedi" + +#: plpy_plpymodule.c:452 +msgid "could not parse error message in plpy.elog" +msgstr "plpy.elog dosyasındaki hata mesajı ayrıştırılamadı" + +#: plpy_plpymodule.c:469 +#, c-format +msgid "argument 'message' given by name and position" +msgstr "ad ve konum tarafından verilen argüman 'mesajı'" + +#: plpy_plpymodule.c:496 +#, c-format +msgid "'%s' is an invalid keyword argument for this function" +msgstr "'%s' bu fonksiyon için geçersiz bir anahtar kelime argümanıdır" + +#: plpy_plpymodule.c:507 plpy_plpymodule.c:513 +#, c-format +msgid "invalid SQLSTATE code" +msgstr "geçersiz SQLSTATE kodu" + +#: plpy_procedure.c:228 +#, c-format +msgid "trigger functions can only be called as triggers" +msgstr "trigger fonksiyonları sadece trigger olarak çağırılabilirler." + +#: plpy_procedure.c:233 +#, c-format +msgid "PL/Python functions cannot return type %s" +msgstr "PL/Python fonksiyonları %s tipini döndüremezler" + +#: plpy_procedure.c:314 +#, c-format +msgid "PL/Python functions cannot accept type %s" +msgstr "PL/Python fonksiyonlar %s tipini kabul etmezler" + +#: plpy_procedure.c:410 +#, c-format +msgid "could not compile PL/Python function \"%s\"" +msgstr "\"%s\" PL/Python fonksiyonu derlenemedi" + +#: plpy_procedure.c:413 +#, c-format +msgid "could not compile anonymous PL/Python code block" +msgstr "anonim PL/Python kod bloğu derlenemedi" + +#: plpy_resultobject.c:145 plpy_resultobject.c:165 plpy_resultobject.c:185 +#, c-format +msgid "command did not produce a result set" +msgstr "komut bir sonuç kümesi üretmedi" + +#: plpy_spi.c:59 +#, c-format +msgid "second argument of plpy.prepare must be a sequence" +msgstr "plpy.prepare'in ikinci argümanı sequence olmalıdır" + +#: plpy_spi.c:115 +#, c-format +msgid "plpy.prepare: type name at ordinal position %d is not a string" +msgstr "plpy.prepare: %d sıra posizyonundaki veri tipi dizi değil" + +#: plpy_spi.c:191 +#, c-format +msgid "plpy.execute expected a query or a plan" +msgstr "plpy.execute bir sorgu ya da bir plan bekledi" + +#: plpy_spi.c:210 +#, c-format +msgid "plpy.execute takes a sequence as its second argument" +msgstr "plpy.execute bir sequence'ı ikinci argüman olarak alır" + +#: plpy_spi.c:335 +#, c-format +msgid "SPI_execute_plan failed: %s" +msgstr "SPI_execute_plan başarısız oldu: %s" + +#: plpy_spi.c:377 +#, c-format +msgid "SPI_execute failed: %s" +msgstr "SPI_execute başarısız oldu: %s" + +#: plpy_subxactobject.c:122 +#, c-format +msgid "this subtransaction has already been entered" +msgstr "bu alt-işleme (subtransaction) zaten girilmiş" + +#: plpy_subxactobject.c:128 plpy_subxactobject.c:186 +#, c-format +msgid "this subtransaction has already been exited" +msgstr "bu alt-işlemden (subtransaction) zaten çıkılmış" + +#: plpy_subxactobject.c:180 +#, c-format +msgid "this subtransaction has not been entered" +msgstr "bu alt-işleme (subtransaction) girilmemiş" + +#: plpy_subxactobject.c:192 +#, c-format +msgid "there is no subtransaction to exit from" +msgstr "çıkılacak bir alt-işlem (subtransaction) yok" + +#: plpy_typeio.c:292 +#, c-format +msgid "could not create new dictionary" +msgstr "Yeni sözlük yaratılamadı" + +#: plpy_typeio.c:560 +#, c-format +msgid "could not import a module for Decimal constructor" +msgstr "Decimal constructor için bir modül alınamadı" + +#: plpy_typeio.c:564 +#, c-format +msgid "no Decimal attribute in module" +msgstr "modülde Decimal niteliği yok" + +#: plpy_typeio.c:570 +#, c-format +msgid "conversion from numeric to Decimal failed" +msgstr "numeric'ten Decimal'e dönüşüm başarısız oldu" + +#: plpy_typeio.c:773 +#, c-format +msgid "could not create bytes representation of Python object" +msgstr "Python nesnesinin bytes gösterimi yaratılamadı" + +#: plpy_typeio.c:882 +#, c-format +msgid "could not create string representation of Python object" +msgstr "Python nesnesinin dizgi gösterimi yaratılamadı" + +#: plpy_typeio.c:893 +#, c-format +msgid "could not convert Python object into cstring: Python string representation appears to contain null bytes" +msgstr "Python nesnesi cstring'e dönüştürülemedi: Python dizgi gösterimi null bayt içeriyor olabilir." + +#: plpy_typeio.c:950 +#, c-format +msgid "malformed record literal: \"%s\"" +msgstr "hatalı değer: \"%s\"" + +#: plpy_typeio.c:951 +#, c-format +msgid "Missing left parenthesis." +msgstr "Sol parantez eksik." + +#: plpy_typeio.c:952 plpy_typeio.c:1390 +#, c-format +msgid "To return a composite type in an array, return the composite type as a Python tuple, e.g., \"[('foo',)]\"." +msgstr "Bir bileşik türü dizi (array) içinde döndürmek için, bileşik türü bir Python tuple, e.g., \"[('foo',)]\"." + +#: plpy_typeio.c:1001 +#, c-format +msgid "number of array dimensions exceeds the maximum allowed (%d)" +msgstr "dizi (array) boyut sayısı izin verilen en yüksek değeri (%d) aşmaktadır" + +#: plpy_typeio.c:1005 +#, c-format +msgid "could not determine sequence length for function return value" +msgstr "fonksiyon dönüş değeri için sequence uzunluğu belirlenemedi" + +#: plpy_typeio.c:1008 plpy_typeio.c:1012 +#, c-format +msgid "array size exceeds the maximum allowed" +msgstr "dizi (array) boyutu izin verilen en yüksek değeri aşmaktadır" + +#: plpy_typeio.c:1038 +#, c-format +msgid "return value of function with array return type is not a Python sequence" +msgstr "dizi dönüp tipli dönüş değeri olan fonksiyon Python sequence'ı değildir" + +#: plpy_typeio.c:1091 +#, c-format +msgid "wrong length of inner sequence: has length %d, but %d was expected" +msgstr "iç sequence'in uzunluğu yanlış: %d uzunlukta, fakat %d bekleniyordu" + +#: plpy_typeio.c:1093 +#, c-format +msgid "To construct a multidimensional array, the inner sequences must all have the same length." +msgstr "Çok boyutlu bir dizi oluşturmak için, iç sequence'lerin tamamı aynı uzunlukta olmalı." + +#: plpy_typeio.c:1213 +#, c-format +msgid "key \"%s\" not found in mapping" +msgstr "\"%s\" anahtarı planlamada bulunnamadı" + +#: plpy_typeio.c:1214 +#, c-format +msgid "To return null in a column, add the value None to the mapping with the key named after the column." +msgstr "Bir kolondan Null döndürmek için, kolonun ismindeki eşleşmenin anahtarına, NONE değerini ekleyin" + +#: plpy_typeio.c:1265 +#, c-format +msgid "length of returned sequence did not match number of columns in row" +msgstr "Dönen sequence'in uzunluğu satırdaki kolonların sayısı ile eşleşmiyor." + +#: plpy_typeio.c:1388 +#, c-format +msgid "attribute \"%s\" does not exist in Python object" +msgstr "\"%s\" niteliği Python nesnesinde bulunmaz" + +#: plpy_typeio.c:1391 +#, c-format +msgid "To return null in a column, let the returned object have an attribute named after column with value None." +msgstr " Bir kolondan null döndürmek için, döndürdüğünüz nesnenin, kolonun adına sahip bir özelliğinin olmasını ve bu özelliğin değerinin NONE olmasını sağlamanız gerekir" + +#: plpy_util.c:36 +#, fuzzy, c-format +#| msgid "could not convert Python Unicode object to PostgreSQL server encoding" +msgid "could not convert Python Unicode object to bytes" +msgstr "Python unicode nesnesi PostgreSQL sunucu dil kodlamasına dönüştürülemedi." + +#: plpy_util.c:42 +#, fuzzy, c-format +#| msgid "could not compare Unicode strings: %m" +msgid "could not extract bytes from encoded string" +msgstr "Unicode satırları karşılaştırılamadı: %m" + +#~ msgid "PL/Python function \"%s\" could not execute plan" +#~ msgstr "\"%s\" PL/Python fonksiyonu planı çalıştıramadı" + +#~ msgid "PL/Python function \"%s\" failed" +#~ msgstr "\"%s\" PL/Python fonksiyonu başarısız oldu" + +#~ msgid "could not create string representation of Python object in PL/Python function \"%s\" while creating return value" +#~ msgstr "dönüş değeri yaratılırken \"%s\" Pl/Python fonksiyonunun içindeki Python ensnesinin dizi gösterimi yaratılamadı" + +#~ msgid "could not compute string representation of Python object in PL/Python function \"%s\" while modifying trigger row" +#~ msgstr "tetikleyici satırı düzenlerken \"%s\" PL/Python fonksiyonunun içindeki Python nesnesinin dizi gösterimi hesaplanamadı" + +#~ msgid "out of memory" +#~ msgstr "yetersiz bellek" + +#~ msgid "PL/Python: %s" +#~ msgstr "PL/Python: %s" + +#~ msgid "could not create procedure cache" +#~ msgstr "yordam önbelleği yaratılamadı" + +#~ msgid "Start a new session to use a different Python major version." +#~ msgstr "Farklı bir Python ana sürümü kullanmak için yeni bir oturum açın." + +#~ msgid "This session has previously used Python major version %d, and it is now attempting to use Python major version %d." +#~ msgstr "Bu oturum daha önceden %d Python ana sürümünü kullandı, ve şimdi %d ana sürümünü kullanmayı deniyor." + +#~ msgid "unrecognized error in PLy_spi_execute_fetch_result" +#~ msgstr "PLy_spi_execute_fetch_result içinde tanımlanamayan hata" + +#~ msgid "unrecognized error in PLy_spi_execute_query" +#~ msgstr "PLy_spi_execute_query içinde tanımlanamayan hata" + +#~ msgid "unrecognized error in PLy_spi_execute_plan" +#~ msgstr "PLy_spi_execute_plan içinde beklenmeyen hata" + +#~ msgid "unrecognized error in PLy_spi_prepare" +#~ msgstr "PLy_spi_prepare içinde tanımlanamayan hata" + +#~ msgid "plpy.prepare does not support composite types" +#~ msgstr "plpy.prepare kompozit tipleri desteklemez" + +#~ msgid "invalid arguments for plpy.prepare" +#~ msgstr "plpy.prepare için geçersiz argümanlar" + +#~ msgid "transaction aborted" +#~ msgstr "transaction iptal edildi" + +#~ msgid "plan.status takes no arguments" +#~ msgstr "plan.status bir argüman almaz" + +#~ msgid "PL/Python only supports one-dimensional arrays." +#~ msgstr "PL/Python sadece bir boyutlu dizileri destekler." + +#~ msgid "cannot convert multidimensional array to Python list" +#~ msgstr "çok boyutlu dizi, Python listesine dönüştürülemedi" + +#~ msgid "PL/Python does not support conversion to arrays of row types." +#~ msgstr "PL/Python satır tiplerinin dizilere dönüşümünü desteklemez." + +#~ msgid "PyCObject_FromVoidPtr() failed" +#~ msgstr "PyCObject_FromVoidPtr() başarısız oldu" + +#~ msgid "PyCObject_AsVoidPtr() failed" +#~ msgstr "PyCObject_AsVoidPtr() başarısız oldu" diff --git a/src/pl/plpython/po/vi.po b/src/pl/plpython/po/vi.po new file mode 100644 index 0000000000..f0404ad80f --- /dev/null +++ b/src/pl/plpython/po/vi.po @@ -0,0 +1,485 @@ +# LANGUAGE message translation file for plpython +# Copyright (C) 2018 PostgreSQL Global Development Group +# This file is distributed under the same license as the plpython (PostgreSQL) package. +# FIRST AUTHOR , 2018. +# +msgid "" +msgstr "" +"Project-Id-Version: plpython (PostgreSQL) 11\n" +"Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" +"POT-Creation-Date: 2018-04-22 12:08+0000\n" +"PO-Revision-Date: 2018-05-06 22:57+0900\n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 2.0.6\n" +"Last-Translator: Dang Minh Huong \n" +"Language: vi_VN\n" + +#: plpy_cursorobject.c:101 +#, c-format +msgid "plpy.cursor expected a query or a plan" +msgstr "plpy.cursor kỳ vọng một câu truy vấn hoặc một plan" + +#: plpy_cursorobject.c:184 +#, c-format +msgid "plpy.cursor takes a sequence as its second argument" +msgstr "plpy.cursor lấy một chuỗi làm đối số thứ hai" + +#: plpy_cursorobject.c:200 plpy_spi.c:211 +#, c-format +msgid "could not execute plan" +msgstr "không thể chạy plan" + +#: plpy_cursorobject.c:203 plpy_spi.c:214 +#, c-format +msgid "Expected sequence of %d argument, got %d: %s" +msgid_plural "Expected sequence of %d arguments, got %d: %s" +msgstr[0] "Kỳ vọng chuỗi của đối số %d, đã nhận %d: %s" + +#: plpy_cursorobject.c:352 +#, c-format +msgid "iterating a closed cursor" +msgstr "lặp lại con trỏ đã đóng" + +#: plpy_cursorobject.c:360 plpy_cursorobject.c:426 +#, c-format +msgid "iterating a cursor in an aborted subtransaction" +msgstr "lặp lại một con trỏ trong một subtransaction đã bị hủy bỏ" + +#: plpy_cursorobject.c:418 +#, c-format +msgid "fetch from a closed cursor" +msgstr "fetch từ một con trỏ đã bị đóng" + +#: plpy_cursorobject.c:461 plpy_spi.c:409 +#, c-format +msgid "query result has too many rows to fit in a Python list" +msgstr "kết quả câu truy vấn có quá nhiều hàng để vừa với một danh sách Python" + +#: plpy_cursorobject.c:512 +#, c-format +msgid "closing a cursor in an aborted subtransaction" +msgstr "đóng một con trỏ trong một subtransaction bị hủy bỏ" + +#: plpy_elog.c:127 plpy_elog.c:128 plpy_plpymodule.c:559 +#, c-format +msgid "%s" +msgstr "%s" + +#: plpy_exec.c:142 +#, c-format +msgid "unsupported set function return mode" +msgstr "không hỗ trợ thiết lập hàm trả về mode" + +#: plpy_exec.c:143 +#, c-format +msgid "" +"PL/Python set-returning functions only support returning one value per call." +msgstr "" +"PL/Python hàm thiết lập-trả về chỉ hỗ trợ trả về một giá trị cho một lần gọi." + +#: plpy_exec.c:156 +#, c-format +msgid "returned object cannot be iterated" +msgstr "đối tượng trả về không thể được lặp lại" + +#: plpy_exec.c:157 +#, c-format +msgid "PL/Python set-returning functions must return an iterable object." +msgstr "PL/Python hàm thiết lập-trả về phải trả về một iterable object." + +#: plpy_exec.c:171 +#, c-format +msgid "error fetching next item from iterator" +msgstr "lỗi khi fetch item tiếp theo từ iterator" + +#: plpy_exec.c:214 +#, c-format +msgid "PL/Python procedure did not return None" +msgstr "Thủ tục PL/Python đã không trả về None" + +#: plpy_exec.c:218 +#, c-format +msgid "PL/Python function with return type \"void\" did not return None" +msgstr "Hàm PL/Python với kiểu trả về là \"void\" đã không trả về None" + +#: plpy_exec.c:374 plpy_exec.c:400 +#, c-format +msgid "unexpected return value from trigger procedure" +msgstr "không mong đợi giá trị trả về từ thủ tục trigger" + +#: plpy_exec.c:375 +#, c-format +msgid "Expected None or a string." +msgstr "Kỳ vọng None hoặc một chuỗi." + +#: plpy_exec.c:390 +#, c-format +msgid "" +"PL/Python trigger function returned \"MODIFY\" in a DELETE trigger -- ignored" +msgstr "" +"Hàm trigger PL/Python đã trả về \"MODIFY\" trong một DELETE trigger -- bỏ qua" + +#: plpy_exec.c:401 +#, c-format +msgid "Expected None, \"OK\", \"SKIP\", or \"MODIFY\"." +msgstr "Kỳ vọng None, \"OK\", \"SKIP\", hoặc \"MODIFY\"." + +#: plpy_exec.c:451 +#, c-format +msgid "PyList_SetItem() failed, while setting up arguments" +msgstr "Lỗi PyList_SetItem(), trong khi thiết lập đối số" + +#: plpy_exec.c:455 +#, c-format +msgid "PyDict_SetItemString() failed, while setting up arguments" +msgstr "Lỗi PyDict_SetItemString(), trong khi thiết lập đối số" + +#: plpy_exec.c:467 +#, c-format +msgid "" +"function returning record called in context that cannot accept type record" +msgstr "" +"hàm trả về bản ghi được gọi trong ngữ cảnh không thể chấp nhận kiểu bản ghi" + +#: plpy_exec.c:684 +#, c-format +msgid "while creating return value" +msgstr "trong khi tạo ra giá trị trả về" + +#: plpy_exec.c:909 +#, c-format +msgid "TD[\"new\"] deleted, cannot modify row" +msgstr "TD[\"new\"] đã bị xóa, không thể sửa đổi hàng" + +#: plpy_exec.c:914 +#, c-format +msgid "TD[\"new\"] is not a dictionary" +msgstr "TD[\"new\"] không phải là từ điển" + +#: plpy_exec.c:941 +#, c-format +msgid "TD[\"new\"] dictionary key at ordinal position %d is not a string" +msgstr "Khóa từ điển TD[\"new\"] ở vị trí thứ tự %d không phải là chuỗi" + +#: plpy_exec.c:948 +#, c-format +msgid "" +"key \"%s\" found in TD[\"new\"] does not exist as a column in the triggering " +"row" +msgstr "" +"khóa \"%s\" được tìm thấy trong TD[\"new\"] không tồn tại như là trigger mức " +"độ hàng" + +#: plpy_exec.c:953 +#, c-format +msgid "cannot set system attribute \"%s\"" +msgstr "không thể thiết lập thuộc tính hệ thống \"%s\"" + +#: plpy_exec.c:1011 +#, c-format +msgid "while modifying trigger row" +msgstr "trong khi sửa đổi trigger mức độ hàng" + +#: plpy_exec.c:1072 +#, c-format +msgid "forcibly aborting a subtransaction that has not been exited" +msgstr "buộc phải hủy bỏ một subtransaction chưa được thoát" + +#: plpy_main.c:125 +#, c-format +msgid "multiple Python libraries are present in session" +msgstr "có nhiều thư viện Python trong một phiên" + +#: plpy_main.c:126 +#, c-format +msgid "Only one Python major version can be used in one session." +msgstr "Chỉ có thể sử dụng một phiên bản chính của Python trong một phiên." + +#: plpy_main.c:142 +#, c-format +msgid "untrapped error in initialization" +msgstr "lỗi chưa được bẫy trong lúc khởi tạo" + +#: plpy_main.c:165 +#, c-format +msgid "could not import \"__main__\" module" +msgstr "không thể nhập mô-đun \"__main__\"" + +#: plpy_main.c:174 +#, c-format +msgid "could not initialize globals" +msgstr "không thể khởi tạo biến global" + +#: plpy_main.c:399 +#, c-format +msgid "PL/Python procedure \"%s\"" +msgstr "Thủ tục PL/Python \"%s\"" + +#: plpy_main.c:402 +#, c-format +msgid "PL/Python function \"%s\"" +msgstr "Hàm PL/Python \"%s\"" + +#: plpy_main.c:410 +#, c-format +msgid "PL/Python anonymous code block" +msgstr "Khối mã ẩn danh PL/Python" + +#: plpy_plpymodule.c:192 plpy_plpymodule.c:195 +#, c-format +msgid "could not import \"plpy\" module" +msgstr "không thể nhập mô-đun \"plpy\"" + +#: plpy_plpymodule.c:210 +#, c-format +msgid "could not create the spiexceptions module" +msgstr "không thể tạo mô-đun spiexceptions" + +#: plpy_plpymodule.c:218 +#, c-format +msgid "could not add the spiexceptions module" +msgstr "không thể thêm mô-đun spiexceptions" + +#: plpy_plpymodule.c:286 +#, c-format +msgid "could not generate SPI exceptions" +msgstr "không thể tạo exception SPI" + +#: plpy_plpymodule.c:454 +#, c-format +msgid "could not unpack arguments in plpy.elog" +msgstr "không thể giải nén đối số trong plpy.elog" + +#: plpy_plpymodule.c:463 +msgid "could not parse error message in plpy.elog" +msgstr "không thể phân tích cú pháp thông điệp lỗi trong plpy.elog" + +#: plpy_plpymodule.c:480 +#, c-format +msgid "argument 'message' given by name and position" +msgstr "đối số 'message' được chỉ định theo tên và vị trí" + +#: plpy_plpymodule.c:507 +#, c-format +msgid "'%s' is an invalid keyword argument for this function" +msgstr "'%s' là đối số từ khóa không hợp lệ cho hàm này" + +#: plpy_plpymodule.c:518 plpy_plpymodule.c:524 +#, c-format +msgid "invalid SQLSTATE code" +msgstr "mã SQLSTATE không hợp lệ" + +#: plpy_procedure.c:230 +#, c-format +msgid "trigger functions can only be called as triggers" +msgstr "hàm trigger chỉ có thể được gọi như trigger" + +#: plpy_procedure.c:234 +#, c-format +msgid "PL/Python functions cannot return type %s" +msgstr "Hàm PL/Python không thể trả về kiểu %s" + +#: plpy_procedure.c:312 +#, c-format +msgid "PL/Python functions cannot accept type %s" +msgstr "Các hàm PL/Python không thể chấp nhận kiểu %s" + +#: plpy_procedure.c:402 +#, c-format +msgid "could not compile PL/Python function \"%s\"" +msgstr "không thể biên dịch hàm PL/Python \"%s\"" + +#: plpy_procedure.c:405 +#, c-format +msgid "could not compile anonymous PL/Python code block" +msgstr "không thể biên dịch khối mã ẩn danh PL/Python" + +#: plpy_resultobject.c:150 plpy_resultobject.c:176 plpy_resultobject.c:202 +#, c-format +msgid "command did not produce a result set" +msgstr "lệnh không tạo ra một tập hợp kết quả" + +#: plpy_spi.c:60 +#, c-format +msgid "second argument of plpy.prepare must be a sequence" +msgstr "đối số thứ hai của plpy.prepare phải là một chuỗi" + +#: plpy_spi.c:104 +#, c-format +msgid "plpy.prepare: type name at ordinal position %d is not a string" +msgstr "plpy.prepare: gõ tên tại vị trí thứ tự %d không phải là một chuỗi" + +#: plpy_spi.c:176 +#, c-format +msgid "plpy.execute expected a query or a plan" +msgstr "plpy.execute kỳ vọng một truy vấn hoặc một plan" + +#: plpy_spi.c:195 +#, c-format +msgid "plpy.execute takes a sequence as its second argument" +msgstr "plpy.execute lấy một chuỗi làm đối số thứ hai" + +#: plpy_spi.c:305 +#, c-format +msgid "SPI_execute_plan failed: %s" +msgstr "SPI_execute_plan lỗi: %s" + +#: plpy_spi.c:347 +#, c-format +msgid "SPI_execute failed: %s" +msgstr "SPI_execute lỗi: %s" + +#: plpy_subxactobject.c:122 +#, c-format +msgid "this subtransaction has already been entered" +msgstr "subtransaction này đã được nhập" + +#: plpy_subxactobject.c:128 plpy_subxactobject.c:186 +#, c-format +msgid "this subtransaction has already been exited" +msgstr "subtransaction này đã được thoát" + +#: plpy_subxactobject.c:180 +#, c-format +msgid "this subtransaction has not been entered" +msgstr "subtransaction này chưa được nhập" + +#: plpy_subxactobject.c:192 +#, c-format +msgid "there is no subtransaction to exit from" +msgstr "không có subtransaction để thoát khỏi" + +#: plpy_typeio.c:591 +#, c-format +msgid "could not import a module for Decimal constructor" +msgstr "không thể nhập mô-đun cho Decimal constructor" + +#: plpy_typeio.c:595 +#, c-format +msgid "no Decimal attribute in module" +msgstr "không có thuộc tính thập phân trong mô-đun" + +#: plpy_typeio.c:601 +#, c-format +msgid "conversion from numeric to Decimal failed" +msgstr "chuyển đổi từ numeric sang thập phân không thành công" + +#: plpy_typeio.c:908 +#, c-format +msgid "could not create bytes representation of Python object" +msgstr "không thể tạo đại diện cho của đối tượng Python" + +#: plpy_typeio.c:1056 +#, c-format +msgid "could not create string representation of Python object" +msgstr "không thể tạo ra chuỗi đại diện cho đối tượng Python" + +#: plpy_typeio.c:1067 +#, c-format +msgid "" +"could not convert Python object into cstring: Python string representation " +"appears to contain null bytes" +msgstr "" +"không thể chuyển đổi đối tượng Python thành cstring: đại diện chuỗi Python " +"chứa byte null" + +#: plpy_typeio.c:1176 +#, c-format +msgid "number of array dimensions exceeds the maximum allowed (%d)" +msgstr "số lượng hướng của mảng vượt quá số lượng tối đa cho phép (%d)" + +#: plpy_typeio.c:1180 +#, c-format +msgid "could not determine sequence length for function return value" +msgstr "không thể xác định độ dài chuỗi cho giá trị trả về hàm" + +#: plpy_typeio.c:1183 plpy_typeio.c:1187 +#, c-format +msgid "array size exceeds the maximum allowed" +msgstr "kích thước mảng vượt quá mức tối đa cho phép" + +#: plpy_typeio.c:1213 +#, c-format +msgid "" +"return value of function with array return type is not a Python sequence" +msgstr "" +"giá trị trả về của hàm với kiểu trả về là mảng không phải là một chuỗi Python" + +#: plpy_typeio.c:1259 +#, c-format +msgid "wrong length of inner sequence: has length %d, but %d was expected" +msgstr "sai độ dài của chuỗi bên trong: có độ dài %d, nhưng %d được mong đợi" + +#: plpy_typeio.c:1261 +#, c-format +msgid "" +"To construct a multidimensional array, the inner sequences must all have the " +"same length." +msgstr "" +"Để xây dựng một mảng đa chiều, các chuỗi bên trong phải có cùng độ dài." + +#: plpy_typeio.c:1340 +#, c-format +msgid "malformed record literal: \"%s\"" +msgstr "bản ghi literal không đúng định dạng: \"%s\"" + +#: plpy_typeio.c:1341 +#, c-format +msgid "Missing left parenthesis." +msgstr "Thiếu dấu ngoặc đơn trái." + +#: plpy_typeio.c:1342 plpy_typeio.c:1543 +#, c-format +msgid "" +"To return a composite type in an array, return the composite type as a " +"Python tuple, e.g., \"[('foo',)]\"." +msgstr "" +"Để trả về kiểu phức hợp trong một mảng, hãy trả về kiểu phức hợp dưới dạng " +"một hàng Python, ví dụ: \"[('foo',)]\"." + +#: plpy_typeio.c:1389 +#, c-format +msgid "key \"%s\" not found in mapping" +msgstr "không tìm thấy khóa \"%s\" trong ánh xạ" + +#: plpy_typeio.c:1390 +#, c-format +msgid "" +"To return null in a column, add the value None to the mapping with the key " +"named after the column." +msgstr "" +"Để trả về null trong một cột, thêm giá trị None vào ánh xạ với khóa được đặt " +"tên sau cột." + +#: plpy_typeio.c:1443 +#, c-format +msgid "length of returned sequence did not match number of columns in row" +msgstr "độ dài của chuỗi được trả về không khớp với số cột trong hàng" + +#: plpy_typeio.c:1541 +#, c-format +msgid "attribute \"%s\" does not exist in Python object" +msgstr "thuộc tính \"% s\" không tồn tại trong đối tượng Python" + +#: plpy_typeio.c:1544 +#, c-format +msgid "" +"To return null in a column, let the returned object have an attribute named " +"after column with value None." +msgstr "" +"Để trả về null trong một cột, hãy để đối tượng trả về có một thuộc tính được " +"đặt tên sau cột với giá trị None." + +#: plpy_util.c:35 +#, c-format +msgid "could not convert Python Unicode object to bytes" +msgstr "không thể chuyển đổi đối tượng Python Unicode thành byte" + +#: plpy_util.c:41 +#, c-format +msgid "could not extract bytes from encoded string" +msgstr "không thể trích xuất byte từ chuỗi đã được mã hóa" diff --git a/src/pl/plpython/sql/plpython_call.sql b/src/pl/plpython/sql/plpython_call.sql new file mode 100644 index 0000000000..2f792f92bd --- /dev/null +++ b/src/pl/plpython/sql/plpython_call.sql @@ -0,0 +1,61 @@ +-- +-- Tests for procedures / CALL syntax +-- + +CREATE PROCEDURE test_proc1() +LANGUAGE plpythonu +AS $$ +pass +$$; + +CALL test_proc1(); + + +-- error: can't return non-None +CREATE PROCEDURE test_proc2() +LANGUAGE plpythonu +AS $$ +return 5 +$$; + +CALL test_proc2(); + + +CREATE TABLE test1 (a int); + +CREATE PROCEDURE test_proc3(x int) +LANGUAGE plpythonu +AS $$ +plpy.execute("INSERT INTO test1 VALUES (%s)" % x) +$$; + +CALL test_proc3(55); + +SELECT * FROM test1; + + +-- output arguments + +CREATE PROCEDURE test_proc5(INOUT a text) +LANGUAGE plpythonu +AS $$ +return [a + '+' + a] +$$; + +CALL test_proc5('abc'); + + +CREATE PROCEDURE test_proc6(a int, INOUT b int, INOUT c int) +LANGUAGE plpythonu +AS $$ +return (b * a, c * a) +$$; + +CALL test_proc6(2, 3, 4); + + +DROP PROCEDURE test_proc1; +DROP PROCEDURE test_proc2; +DROP PROCEDURE test_proc3; + +DROP TABLE test1; diff --git a/src/pl/plpython/sql/plpython_error.sql b/src/pl/plpython/sql/plpython_error.sql index d0df7e607d..d712eb1078 100644 --- a/src/pl/plpython/sql/plpython_error.sql +++ b/src/pl/plpython/sql/plpython_error.sql @@ -328,3 +328,19 @@ EXCEPTION WHEN SQLSTATE 'SILLY' THEN -- NOOP END $$ LANGUAGE plpgsql; + +/* test the context stack trace for nested execution levels + */ +CREATE FUNCTION notice_innerfunc() RETURNS int AS $$ +plpy.execute("DO LANGUAGE plpythonu $x$ plpy.notice('inside DO') $x$") +return 1 +$$ LANGUAGE plpythonu; + +CREATE FUNCTION notice_outerfunc() RETURNS int AS $$ +plpy.execute("SELECT notice_innerfunc()") +return 1 +$$ LANGUAGE plpythonu; + +\set SHOW_CONTEXT always + +SELECT notice_outerfunc(); diff --git a/src/pl/plpython/sql/plpython_subtransaction.sql b/src/pl/plpython/sql/plpython_subtransaction.sql index 3c188e3dd2..398c65720c 100644 --- a/src/pl/plpython/sql/plpython_subtransaction.sql +++ b/src/pl/plpython/sql/plpython_subtransaction.sql @@ -80,7 +80,7 @@ with plpy.subtransaction(): except plpy.SPIError, e: if not swallow: raise - plpy.notice("Swallowed %r" % e) + plpy.notice("Swallowed %s(%r)" % (e.__class__.__name__, e.args[0])) return "ok" $$ LANGUAGE plpythonu; diff --git a/src/pl/plpython/sql/plpython_transaction.sql b/src/pl/plpython/sql/plpython_transaction.sql new file mode 100644 index 0000000000..33b37e5b7f --- /dev/null +++ b/src/pl/plpython/sql/plpython_transaction.sql @@ -0,0 +1,152 @@ +CREATE TABLE test1 (a int, b text); + + +CREATE PROCEDURE transaction_test1() +LANGUAGE plpythonu +AS $$ +for i in range(0, 10): + plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) + if i % 2 == 0: + plpy.commit() + else: + plpy.rollback() +$$; + +CALL transaction_test1(); + +SELECT * FROM test1; + + +TRUNCATE test1; + +DO +LANGUAGE plpythonu +$$ +for i in range(0, 10): + plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) + if i % 2 == 0: + plpy.commit() + else: + plpy.rollback() +$$; + +SELECT * FROM test1; + + +TRUNCATE test1; + +-- not allowed in a function +CREATE FUNCTION transaction_test2() RETURNS int +LANGUAGE plpythonu +AS $$ +for i in range(0, 10): + plpy.execute("INSERT INTO test1 (a) VALUES (%d)" % i) + if i % 2 == 0: + plpy.commit() + else: + plpy.rollback() +return 1 +$$; + +SELECT transaction_test2(); + +SELECT * FROM test1; + + +-- also not allowed if procedure is called from a function +CREATE FUNCTION transaction_test3() RETURNS int +LANGUAGE plpythonu +AS $$ +plpy.execute("CALL transaction_test1()") +return 1 +$$; + +SELECT transaction_test3(); + +SELECT * FROM test1; + + +-- DO block inside function +CREATE FUNCTION transaction_test4() RETURNS int +LANGUAGE plpythonu +AS $$ +plpy.execute("DO LANGUAGE plpythonu $x$ plpy.commit() $x$") +return 1 +$$; + +SELECT transaction_test4(); + + +-- commit inside subtransaction (prohibited) +DO LANGUAGE plpythonu $$ +s = plpy.subtransaction() +s.enter() +plpy.commit() +$$; + + +-- commit inside cursor loop +CREATE TABLE test2 (x int); +INSERT INTO test2 VALUES (0), (1), (2), (3), (4); + +TRUNCATE test1; + +DO LANGUAGE plpythonu $$ +for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): + plpy.execute("INSERT INTO test1 (a) VALUES (%s)" % row['x']) + plpy.commit() +$$; + +SELECT * FROM test1; + +-- check that this doesn't leak a holdable portal +SELECT * FROM pg_cursors; + + +-- error in cursor loop with commit +TRUNCATE test1; + +DO LANGUAGE plpythonu $$ +for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): + plpy.execute("INSERT INTO test1 (a) VALUES (12/(%s-2))" % row['x']) + plpy.commit() +$$; + +SELECT * FROM test1; + +SELECT * FROM pg_cursors; + + +-- rollback inside cursor loop +TRUNCATE test1; + +DO LANGUAGE plpythonu $$ +for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): + plpy.execute("INSERT INTO test1 (a) VALUES (%s)" % row['x']) + plpy.rollback() +$$; + +SELECT * FROM test1; + +SELECT * FROM pg_cursors; + + +-- first commit then rollback inside cursor loop +TRUNCATE test1; + +DO LANGUAGE plpythonu $$ +for row in plpy.cursor("SELECT * FROM test2 ORDER BY x"): + plpy.execute("INSERT INTO test1 (a) VALUES (%s)" % row['x']) + if row['x'] % 2 == 0: + plpy.commit() + else: + plpy.rollback() +$$; + +SELECT * FROM test1; + +SELECT * FROM pg_cursors; + + +DROP TABLE test1; +DROP TABLE test2; diff --git a/src/pl/plpython/sql/plpython_types.sql b/src/pl/plpython/sql/plpython_types.sql index 8c57297c24..cc0524ee80 100644 --- a/src/pl/plpython/sql/plpython_types.sql +++ b/src/pl/plpython/sql/plpython_types.sql @@ -387,6 +387,55 @@ $$ LANGUAGE plpythonu; SELECT * FROM test_type_conversion_array_domain_check_violation(); +-- +-- Arrays of domains +-- + +CREATE FUNCTION test_read_uint2_array(x uint2[]) RETURNS uint2 AS $$ +plpy.info(x, type(x)) +return x[0] +$$ LANGUAGE plpythonu; + +select test_read_uint2_array(array[1::uint2]); + +CREATE FUNCTION test_build_uint2_array(x int2) RETURNS uint2[] AS $$ +return [x, x] +$$ LANGUAGE plpythonu; + +select test_build_uint2_array(1::int2); +select test_build_uint2_array(-1::int2); -- fail + +-- +-- ideally this would work, but for now it doesn't, because the return value +-- is [[2,4], [2,4]] which our conversion code thinks should become a 2-D +-- integer array, not an array of arrays. +-- +CREATE FUNCTION test_type_conversion_domain_array(x integer[]) + RETURNS ordered_pair_domain[] AS $$ +return [x, x] +$$ LANGUAGE plpythonu; + +select test_type_conversion_domain_array(array[2,4]); +select test_type_conversion_domain_array(array[4,2]); -- fail + +CREATE FUNCTION test_type_conversion_domain_array2(x ordered_pair_domain) + RETURNS integer AS $$ +plpy.info(x, type(x)) +return x[1] +$$ LANGUAGE plpythonu; + +select test_type_conversion_domain_array2(array[2,4]); +select test_type_conversion_domain_array2(array[4,2]); -- fail + +CREATE FUNCTION test_type_conversion_array_domain_array(x ordered_pair_domain[]) + RETURNS ordered_pair_domain AS $$ +plpy.info(x, type(x)) +return x[0] +$$ LANGUAGE plpythonu; + +select test_type_conversion_array_domain_array(array[array[2,4]::ordered_pair_domain]); + + --- --- Composite types --- @@ -430,6 +479,48 @@ ALTER TYPE named_pair RENAME TO named_pair_2; SELECT test_composite_type_input(row(1, 2)); +-- +-- Domains within composite +-- + +CREATE TYPE nnint_container AS (f1 int, f2 nnint); + +CREATE FUNCTION nnint_test(x int, y int) RETURNS nnint_container AS $$ +return {'f1': x, 'f2': y} +$$ LANGUAGE plpythonu; + +SELECT nnint_test(null, 3); +SELECT nnint_test(3, null); -- fail + + +-- +-- Domains of composite +-- + +CREATE DOMAIN ordered_named_pair AS named_pair_2 CHECK((VALUE).i <= (VALUE).j); + +CREATE FUNCTION read_ordered_named_pair(p ordered_named_pair) RETURNS integer AS $$ +return p['i'] + p['j'] +$$ LANGUAGE plpythonu; + +SELECT read_ordered_named_pair(row(1, 2)); +SELECT read_ordered_named_pair(row(2, 1)); -- fail + +CREATE FUNCTION build_ordered_named_pair(i int, j int) RETURNS ordered_named_pair AS $$ +return {'i': i, 'j': j} +$$ LANGUAGE plpythonu; + +SELECT build_ordered_named_pair(1,2); +SELECT build_ordered_named_pair(2,1); -- fail + +CREATE FUNCTION build_ordered_named_pairs(i int, j int) RETURNS ordered_named_pair[] AS $$ +return [{'i': i, 'j': j}, {'i': i, 'j': j+1}] +$$ LANGUAGE plpythonu; + +SELECT build_ordered_named_pairs(1,2); +SELECT build_ordered_named_pairs(2,1); -- fail + + -- -- Prepared statements -- diff --git a/src/pl/tcl/Makefile b/src/pl/tcl/Makefile index b8971d3cc8..ef61ee596e 100644 --- a/src/pl/tcl/Makefile +++ b/src/pl/tcl/Makefile @@ -28,7 +28,7 @@ DATA = pltcl.control pltcl--1.0.sql pltcl--unpackaged--1.0.sql \ pltclu.control pltclu--1.0.sql pltclu--unpackaged--1.0.sql REGRESS_OPTS = --dbname=$(PL_TESTDB) --load-extension=pltcl -REGRESS = pltcl_setup pltcl_queries pltcl_start_proc pltcl_subxact pltcl_unicode +REGRESS = pltcl_setup pltcl_queries pltcl_call pltcl_start_proc pltcl_subxact pltcl_unicode pltcl_transaction # Tcl on win32 ships with import libraries only for Microsoft Visual C++, # which are not compatible with mingw gcc. Therefore we need to build a diff --git a/src/pl/tcl/expected/pltcl_call.out b/src/pl/tcl/expected/pltcl_call.out new file mode 100644 index 0000000000..d290c8fbd0 --- /dev/null +++ b/src/pl/tcl/expected/pltcl_call.out @@ -0,0 +1,55 @@ +CREATE PROCEDURE test_proc1() +LANGUAGE pltcl +AS $$ +unset +$$; +CALL test_proc1(); +CREATE PROCEDURE test_proc2() +LANGUAGE pltcl +AS $$ +return 5 +$$; +CALL test_proc2(); +CREATE TABLE test1 (a int); +CREATE PROCEDURE test_proc3(x int) +LANGUAGE pltcl +AS $$ +spi_exec "INSERT INTO test1 VALUES ($1)" +$$; +CALL test_proc3(55); +SELECT * FROM test1; + a +---- + 55 +(1 row) + +-- output arguments +CREATE PROCEDURE test_proc5(INOUT a text) +LANGUAGE pltcl +AS $$ +set aa [concat $1 "+" $1] +return [list a $aa] +$$; +CALL test_proc5('abc'); + a +----------- + abc + abc +(1 row) + +CREATE PROCEDURE test_proc6(a int, INOUT b int, INOUT c int) +LANGUAGE pltcl +AS $$ +set bb [expr $2 * $1] +set cc [expr $3 * $1] +return [list b $bb c $cc] +$$; +CALL test_proc6(2, 3, 4); + b | c +---+--- + 6 | 8 +(1 row) + +DROP PROCEDURE test_proc1; +DROP PROCEDURE test_proc2; +DROP PROCEDURE test_proc3; +DROP TABLE test1; diff --git a/src/pl/tcl/expected/pltcl_queries.out b/src/pl/tcl/expected/pltcl_queries.out index 5f50f46887..736671cc1b 100644 --- a/src/pl/tcl/expected/pltcl_queries.out +++ b/src/pl/tcl/expected/pltcl_queries.out @@ -327,6 +327,46 @@ select tcl_composite_arg_ref2(row('tkey', 42, 'ref2')); ref2 (1 row) +-- More tests for composite argument/result types +create domain d_dta1 as T_dta1 check ((value).ref1 > 0); +create function tcl_record_arg(record, fldname text) returns int as ' + return $1($2) +' language pltcl; +select tcl_record_arg(row('tkey', 42, 'ref2')::T_dta1, 'ref1'); + tcl_record_arg +---------------- + 42 +(1 row) + +select tcl_record_arg(row('tkey', 42, 'ref2')::d_dta1, 'ref1'); + tcl_record_arg +---------------- + 42 +(1 row) + +select tcl_record_arg(row(2,4), 'f2'); + tcl_record_arg +---------------- + 4 +(1 row) + +create function tcl_cdomain_arg(d_dta1) returns int as ' + return $1(ref1) +' language pltcl; +select tcl_cdomain_arg(row('tkey', 42, 'ref2')); + tcl_cdomain_arg +----------------- + 42 +(1 row) + +select tcl_cdomain_arg(row('tkey', 42, 'ref2')::T_dta1); + tcl_cdomain_arg +----------------- + 42 +(1 row) + +select tcl_cdomain_arg(row('tkey', -1, 'ref2')); -- fail +ERROR: value for domain d_dta1 violates check constraint "d_dta1_check" -- Test argisnull primitive select tcl_argisnull('foo'); tcl_argisnull @@ -438,6 +478,60 @@ return_next [list a 1 b 2 cow 3] $$ language pltcl; select bad_field_srf(); ERROR: column name/value list contains nonexistent column name "cow" +-- test composite and domain-over-composite results +create function tcl_composite_result(int) returns T_dta1 as $$ +return [list tkey tkey1 ref1 $1 ref2 ref22] +$$ language pltcl; +select tcl_composite_result(1001); + tcl_composite_result +-------------------------------------------- + ("tkey1 ",1001,"ref22 ") +(1 row) + +select * from tcl_composite_result(1002); + tkey | ref1 | ref2 +------------+------+---------------------- + tkey1 | 1002 | ref22 +(1 row) + +create function tcl_dcomposite_result(int) returns d_dta1 as $$ +return [list tkey tkey2 ref1 $1 ref2 ref42] +$$ language pltcl; +select tcl_dcomposite_result(1001); + tcl_dcomposite_result +-------------------------------------------- + ("tkey2 ",1001,"ref42 ") +(1 row) + +select * from tcl_dcomposite_result(1002); + tkey | ref1 | ref2 +------------+------+---------------------- + tkey2 | 1002 | ref42 +(1 row) + +select * from tcl_dcomposite_result(-1); -- fail +ERROR: value for domain d_dta1 violates check constraint "d_dta1_check" +create function tcl_record_result(int) returns record as $$ +return [list q1 sometext q2 $1 q3 moretext] +$$ language pltcl; +select tcl_record_result(42); -- fail +ERROR: function returning record called in context that cannot accept type record +select * from tcl_record_result(42); -- fail +ERROR: a column definition list is required for functions returning "record" at character 15 +select * from tcl_record_result(42) as (q1 text, q2 int, q3 text); + q1 | q2 | q3 +----------+----+---------- + sometext | 42 | moretext +(1 row) + +select * from tcl_record_result(42) as (q1 text, q2 int, q3 text, q4 int); + q1 | q2 | q3 | q4 +----------+----+----------+---- + sometext | 42 | moretext | +(1 row) + +select * from tcl_record_result(42) as (q1 text, q2 int, q4 int); -- fail +ERROR: column name/value list contains nonexistent column name "q3" -- test quote select tcl_eval('quote foo bar'); ERROR: wrong # args: should be "quote string" diff --git a/src/pl/tcl/expected/pltcl_subxact.out b/src/pl/tcl/expected/pltcl_subxact.out index 4393f4acf6..5e19bbbc63 100644 --- a/src/pl/tcl/expected/pltcl_subxact.out +++ b/src/pl/tcl/expected/pltcl_subxact.out @@ -71,9 +71,9 @@ SELECT * FROM subtransaction_tbl; TRUNCATE subtransaction_tbl; SELECT pltcl_wrapper('SELECT subtransaction_ctx_test(''SPI'')'); - pltcl_wrapper -------------------------------------------------- - ERROR: invalid input syntax for integer: "oops" + pltcl_wrapper +------------------------------------------------------ + ERROR: invalid input syntax for type integer: "oops" (1 row) SELECT * FROM subtransaction_tbl; diff --git a/src/pl/tcl/expected/pltcl_transaction.out b/src/pl/tcl/expected/pltcl_transaction.out new file mode 100644 index 0000000000..007204b99a --- /dev/null +++ b/src/pl/tcl/expected/pltcl_transaction.out @@ -0,0 +1,100 @@ +-- suppress CONTEXT so that function OIDs aren't in output +\set VERBOSITY terse +CREATE TABLE test1 (a int, b text); +CREATE PROCEDURE transaction_test1() +LANGUAGE pltcl +AS $$ +for {set i 0} {$i < 10} {incr i} { + spi_exec "INSERT INTO test1 (a) VALUES ($i)" + if {$i % 2 == 0} { + commit + } else { + rollback + } +} +$$; +CALL transaction_test1(); +SELECT * FROM test1; + a | b +---+--- + 0 | + 2 | + 4 | + 6 | + 8 | +(5 rows) + +TRUNCATE test1; +-- not allowed in a function +CREATE FUNCTION transaction_test2() RETURNS int +LANGUAGE pltcl +AS $$ +for {set i 0} {$i < 10} {incr i} { + spi_exec "INSERT INTO test1 (a) VALUES ($i)" + if {$i % 2 == 0} { + commit + } else { + rollback + } +} +return 1 +$$; +SELECT transaction_test2(); +ERROR: invalid transaction termination +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +-- also not allowed if procedure is called from a function +CREATE FUNCTION transaction_test3() RETURNS int +LANGUAGE pltcl +AS $$ +spi_exec "CALL transaction_test1()" +return 1 +$$; +SELECT transaction_test3(); +ERROR: invalid transaction termination +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +-- commit inside cursor loop +CREATE TABLE test2 (x int); +INSERT INTO test2 VALUES (0), (1), (2), (3), (4); +TRUNCATE test1; +CREATE PROCEDURE transaction_test4a() +LANGUAGE pltcl +AS $$ +spi_exec -array row "SELECT * FROM test2 ORDER BY x" { + spi_exec "INSERT INTO test1 (a) VALUES ($row(x))" + commit +} +$$; +CALL transaction_test4a(); +ERROR: cannot commit while a subtransaction is active +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +-- rollback inside cursor loop +TRUNCATE test1; +CREATE PROCEDURE transaction_test4b() +LANGUAGE pltcl +AS $$ +spi_exec -array row "SELECT * FROM test2 ORDER BY x" { + spi_exec "INSERT INTO test1 (a) VALUES ($row(x))" + rollback +} +$$; +CALL transaction_test4b(); +ERROR: cannot roll back while a subtransaction is active +SELECT * FROM test1; + a | b +---+--- +(0 rows) + +DROP TABLE test1; +DROP TABLE test2; diff --git a/src/pl/tcl/generate-pltclerrcodes.pl b/src/pl/tcl/generate-pltclerrcodes.pl index b4e429a4fb..b5a595510c 100644 --- a/src/pl/tcl/generate-pltclerrcodes.pl +++ b/src/pl/tcl/generate-pltclerrcodes.pl @@ -1,7 +1,7 @@ #!/usr/bin/perl # # Generate the pltclerrcodes.h header from errcodes.txt -# Copyright (c) 2000-2017, PostgreSQL Global Development Group +# Copyright (c) 2000-2018, PostgreSQL Global Development Group use warnings; use strict; diff --git a/src/pl/tcl/nls.mk b/src/pl/tcl/nls.mk index f896dcbfa4..88c2679375 100644 --- a/src/pl/tcl/nls.mk +++ b/src/pl/tcl/nls.mk @@ -1,6 +1,6 @@ # src/pl/tcl/nls.mk CATALOG_NAME = pltcl -AVAIL_LANGUAGES = cs de es fr it ja ko pl pt_BR ro ru sv tr zh_CN zh_TW +AVAIL_LANGUAGES = cs de es fr it ja ko pl pt_BR ro ru sv tr vi zh_CN zh_TW GETTEXT_FILES = pltcl.c GETTEXT_TRIGGERS = $(BACKEND_COMMON_GETTEXT_TRIGGERS) GETTEXT_FLAGS = $(BACKEND_COMMON_GETTEXT_FLAGS) diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c index ed494e1210..e2fa43b890 100644 --- a/src/pl/tcl/pltcl.c +++ b/src/pl/tcl/pltcl.c @@ -143,10 +143,13 @@ typedef struct pltcl_proc_desc bool fn_readonly; /* is function readonly? */ bool lanpltrusted; /* is it pltcl (vs. pltclu)? */ pltcl_interp_desc *interp_desc; /* interpreter to use */ + Oid result_typid; /* OID of fn's result type */ FmgrInfo result_in_func; /* input function for fn's result type */ Oid result_typioparam; /* param to pass to same */ bool fn_retisset; /* true if function returns a set */ bool fn_retistuple; /* true if function returns composite */ + bool fn_retisdomain; /* true if function returns domain */ + void *domain_info; /* opaque cache for domain checks */ int nargs; /* number of arguments */ /* these arrays have nargs entries: */ FmgrInfo *arg_out_func; /* output fns for arg types */ @@ -308,6 +311,10 @@ static int pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp *interp, int objc, Tcl_Obj *const objv[]); static int pltcl_subtransaction(ClientData cdata, Tcl_Interp *interp, int objc, Tcl_Obj *const objv[]); +static int pltcl_commit(ClientData cdata, Tcl_Interp *interp, + int objc, Tcl_Obj *const objv[]); +static int pltcl_rollback(ClientData cdata, Tcl_Interp *interp, + int objc, Tcl_Obj *const objv[]); static void pltcl_subtrans_begin(MemoryContext oldcontext, ResourceOwner oldowner); @@ -520,6 +527,10 @@ pltcl_init_interp(pltcl_interp_desc *interp_desc, Oid prolang, bool pltrusted) pltcl_SPI_lastoid, NULL, NULL); Tcl_CreateObjCommand(interp, "subtransaction", pltcl_subtransaction, NULL, NULL); + Tcl_CreateObjCommand(interp, "commit", + pltcl_commit, NULL, NULL); + Tcl_CreateObjCommand(interp, "rollback", + pltcl_rollback, NULL, NULL); /************************************************************ * Call the appropriate start_proc, if there is one. @@ -614,7 +625,7 @@ call_pltcl_start_proc(Oid prolang, bool pltrusted) /* Current user must have permission to call function */ aclresult = pg_proc_aclcheck(procOid, GetUserId(), ACL_EXECUTE); if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_PROC, start_proc); + aclcheck_error(aclresult, OBJECT_FUNCTION, start_proc); /* Get the function's pg_proc entry */ procTup = SearchSysCache1(PROCOID, ObjectIdGetDatum(procOid)); @@ -793,6 +804,7 @@ static Datum pltcl_func_handler(PG_FUNCTION_ARGS, pltcl_call_state *call_state, bool pltrusted) { + bool nonatomic; pltcl_proc_desc *prodesc; Tcl_Interp *volatile interp; Tcl_Obj *tcl_cmd; @@ -800,8 +812,12 @@ pltcl_func_handler(PG_FUNCTION_ARGS, pltcl_call_state *call_state, int tcl_rc; Datum retval; + nonatomic = fcinfo->context && + IsA(fcinfo->context, CallContext) && + !castNode(CallContext, fcinfo->context)->atomic; + /* Connect to SPI manager */ - if (SPI_connect() != SPI_OK_CONNECT) + if (SPI_connect_ext(nonatomic ? SPI_OPT_NONATOMIC : 0) != SPI_OK_CONNECT) elog(ERROR, "could not connect to SPI manager"); /* Find or compile the function */ @@ -988,11 +1004,26 @@ pltcl_func_handler(PG_FUNCTION_ARGS, pltcl_call_state *call_state, * result type is a named composite type, so it's not exactly trivial. * Maybe worth improving someday. */ - if (get_call_result_type(fcinfo, NULL, &td) != TYPEFUNC_COMPOSITE) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("function returning record called in context " - "that cannot accept type record"))); + switch (get_call_result_type(fcinfo, NULL, &td)) + { + case TYPEFUNC_COMPOSITE: + /* success */ + break; + case TYPEFUNC_COMPOSITE_DOMAIN: + Assert(prodesc->fn_retisdomain); + break; + case TYPEFUNC_RECORD: + /* failed to determine actual type of RECORD */ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("function returning record called in context " + "that cannot accept type record"))); + break; + default: + /* result type isn't composite? */ + elog(ERROR, "return type must be a row type"); + break; + } Assert(!call_state->ret_tupdesc); Assert(!call_state->attinmeta); @@ -1106,11 +1137,13 @@ pltcl_trigger_handler(PG_FUNCTION_ARGS, pltcl_call_state *call_state, Tcl_ListObjAppendElement(NULL, tcl_trigtup, Tcl_NewObj()); for (i = 0; i < tupdesc->natts; i++) { - if (tupdesc->attrs[i]->attisdropped) + Form_pg_attribute att = TupleDescAttr(tupdesc, i); + + if (att->attisdropped) Tcl_ListObjAppendElement(NULL, tcl_trigtup, Tcl_NewObj()); else Tcl_ListObjAppendElement(NULL, tcl_trigtup, - Tcl_NewStringObj(utf_e2u(NameStr(tupdesc->attrs[i]->attname)), -1)); + Tcl_NewStringObj(utf_e2u(NameStr(att->attname)), -1)); } Tcl_ListObjAppendElement(NULL, tcl_cmd, tcl_trigtup); @@ -1423,7 +1456,7 @@ compile_pltcl_function(Oid fn_oid, Oid tgreloid, Datum prosrcdatum; bool isnull; char *proc_source; - char buf[32]; + char buf[48]; Tcl_Interp *interp; int i; int tcl_rc; @@ -1446,10 +1479,9 @@ compile_pltcl_function(Oid fn_oid, Oid tgreloid, /************************************************************ * Allocate a context that will hold all PG data for the procedure. - * We use the internal proc name as the context name. ************************************************************/ proc_cxt = AllocSetContextCreate(TopMemoryContext, - internal_proname, + "PL/Tcl function", ALLOCSET_SMALL_SIZES); /************************************************************ @@ -1459,6 +1491,7 @@ compile_pltcl_function(Oid fn_oid, Oid tgreloid, oldcontext = MemoryContextSwitchTo(proc_cxt); prodesc = (pltcl_proc_desc *) palloc0(sizeof(pltcl_proc_desc)); prodesc->user_proname = pstrdup(NameStr(procStruct->proname)); + MemoryContextSetIdentifier(proc_cxt, prodesc->user_proname); prodesc->internal_proname = pstrdup(internal_proname); prodesc->fn_cxt = proc_cxt; prodesc->fn_refcount = 0; @@ -1488,22 +1521,21 @@ compile_pltcl_function(Oid fn_oid, Oid tgreloid, ************************************************************/ if (!is_trigger && !is_event_trigger) { - typeTup = - SearchSysCache1(TYPEOID, - ObjectIdGetDatum(procStruct->prorettype)); + Oid rettype = procStruct->prorettype; + + typeTup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(rettype)); if (!HeapTupleIsValid(typeTup)) - elog(ERROR, "cache lookup failed for type %u", - procStruct->prorettype); + elog(ERROR, "cache lookup failed for type %u", rettype); typeStruct = (Form_pg_type) GETSTRUCT(typeTup); /* Disallow pseudotype result, except VOID and RECORD */ if (typeStruct->typtype == TYPTYPE_PSEUDO) { - if (procStruct->prorettype == VOIDOID || - procStruct->prorettype == RECORDOID) + if (rettype == VOIDOID || + rettype == RECORDOID) /* okay */ ; - else if (procStruct->prorettype == TRIGGEROID || - procStruct->prorettype == EVTTRIGGEROID) + else if (rettype == TRIGGEROID || + rettype == EVTTRIGGEROID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("trigger functions can only be called as triggers"))); @@ -1511,17 +1543,19 @@ compile_pltcl_function(Oid fn_oid, Oid tgreloid, ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("PL/Tcl functions cannot return type %s", - format_type_be(procStruct->prorettype)))); + format_type_be(rettype)))); } + prodesc->result_typid = rettype; fmgr_info_cxt(typeStruct->typinput, &(prodesc->result_in_func), proc_cxt); prodesc->result_typioparam = getTypeIOParam(typeTup); prodesc->fn_retisset = procStruct->proretset; - prodesc->fn_retistuple = (procStruct->prorettype == RECORDOID || - typeStruct->typtype == TYPTYPE_COMPOSITE); + prodesc->fn_retistuple = type_is_rowtype(rettype); + prodesc->fn_retisdomain = (typeStruct->typtype == TYPTYPE_DOMAIN); + prodesc->domain_info = NULL; ReleaseSysCache(typeTup); } @@ -1535,21 +1569,22 @@ compile_pltcl_function(Oid fn_oid, Oid tgreloid, proc_internal_args[0] = '\0'; for (i = 0; i < prodesc->nargs; i++) { - typeTup = SearchSysCache1(TYPEOID, - ObjectIdGetDatum(procStruct->proargtypes.values[i])); + Oid argtype = procStruct->proargtypes.values[i]; + + typeTup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(argtype)); if (!HeapTupleIsValid(typeTup)) - elog(ERROR, "cache lookup failed for type %u", - procStruct->proargtypes.values[i]); + elog(ERROR, "cache lookup failed for type %u", argtype); typeStruct = (Form_pg_type) GETSTRUCT(typeTup); - /* Disallow pseudotype argument */ - if (typeStruct->typtype == TYPTYPE_PSEUDO) + /* Disallow pseudotype argument, except RECORD */ + if (typeStruct->typtype == TYPTYPE_PSEUDO && + argtype != RECORDOID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("PL/Tcl functions cannot accept type %s", - format_type_be(procStruct->proargtypes.values[i])))); + format_type_be(argtype)))); - if (typeStruct->typtype == TYPTYPE_COMPOSITE) + if (type_is_rowtype(argtype)) { prodesc->arg_is_rowtype[i] = true; snprintf(buf, sizeof(buf), "__PLTcl_Tup_%d", i + 1); @@ -2416,7 +2451,8 @@ pltcl_process_SPI_result(Tcl_Interp *interp, Tcl_SetObjResult(interp, Tcl_NewIntObj(0)); break; } - /* FALL THRU for utility returning tuples */ + /* fall through for utility returning tuples */ + /* FALLTHROUGH */ case SPI_OK_SELECT: case SPI_OK_INSERT_RETURNING: @@ -2726,7 +2762,7 @@ pltcl_SPI_execute_plan(ClientData cdata, Tcl_Interp *interp, } /************************************************************ - * If there was a argtype list on preparation, we need + * If there was an argtype list on preparation, we need * an argument value list now ************************************************************/ if (qdesc->nargs > 0) @@ -2908,6 +2944,86 @@ pltcl_subtransaction(ClientData cdata, Tcl_Interp *interp, } +/********************************************************************** + * pltcl_commit() + * + * Commit the transaction and start a new one. + **********************************************************************/ +static int +pltcl_commit(ClientData cdata, Tcl_Interp *interp, + int objc, Tcl_Obj *const objv[]) +{ + MemoryContext oldcontext = CurrentMemoryContext; + + PG_TRY(); + { + SPI_commit(); + SPI_start_transaction(); + } + PG_CATCH(); + { + ErrorData *edata; + + /* Save error info */ + MemoryContextSwitchTo(oldcontext); + edata = CopyErrorData(); + FlushErrorState(); + + /* Pass the error data to Tcl */ + pltcl_construct_errorCode(interp, edata); + UTF_BEGIN; + Tcl_SetObjResult(interp, Tcl_NewStringObj(UTF_E2U(edata->message), -1)); + UTF_END; + FreeErrorData(edata); + + return TCL_ERROR; + } + PG_END_TRY(); + + return TCL_OK; +} + + +/********************************************************************** + * pltcl_rollback() + * + * Abort the transaction and start a new one. + **********************************************************************/ +static int +pltcl_rollback(ClientData cdata, Tcl_Interp *interp, + int objc, Tcl_Obj *const objv[]) +{ + MemoryContext oldcontext = CurrentMemoryContext; + + PG_TRY(); + { + SPI_rollback(); + SPI_start_transaction(); + } + PG_CATCH(); + { + ErrorData *edata; + + /* Save error info */ + MemoryContextSwitchTo(oldcontext); + edata = CopyErrorData(); + FlushErrorState(); + + /* Pass the error data to Tcl */ + pltcl_construct_errorCode(interp, edata); + UTF_BEGIN; + Tcl_SetObjResult(interp, Tcl_NewStringObj(UTF_E2U(edata->message), -1)); + UTF_END; + FreeErrorData(edata); + + return TCL_ERROR; + } + PG_END_TRY(); + + return TCL_OK; +} + + /********************************************************************** * pltcl_set_tuple_values() - Set variables for all attributes * of a given tuple @@ -2952,15 +3068,17 @@ pltcl_set_tuple_values(Tcl_Interp *interp, const char *arrayname, for (i = 0; i < tupdesc->natts; i++) { + Form_pg_attribute att = TupleDescAttr(tupdesc, i); + /* ignore dropped attributes */ - if (tupdesc->attrs[i]->attisdropped) + if (att->attisdropped) continue; /************************************************************ * Get the attribute name ************************************************************/ UTF_BEGIN; - attname = pstrdup(UTF_E2U(NameStr(tupdesc->attrs[i]->attname))); + attname = pstrdup(UTF_E2U(NameStr(att->attname))); UTF_END; /************************************************************ @@ -2978,8 +3096,7 @@ pltcl_set_tuple_values(Tcl_Interp *interp, const char *arrayname, ************************************************************/ if (!isnull) { - getTypeOutputInfo(tupdesc->attrs[i]->atttypid, - &typoutput, &typisvarlena); + getTypeOutputInfo(att->atttypid, &typoutput, &typisvarlena); outputstr = OidOutputFunctionCall(typoutput, attr); UTF_BEGIN; Tcl_SetVar2Ex(interp, *arrptr, *nameptr, @@ -3013,14 +3130,16 @@ pltcl_build_tuple_argument(HeapTuple tuple, TupleDesc tupdesc) for (i = 0; i < tupdesc->natts; i++) { + Form_pg_attribute att = TupleDescAttr(tupdesc, i); + /* ignore dropped attributes */ - if (tupdesc->attrs[i]->attisdropped) + if (att->attisdropped) continue; /************************************************************ * Get the attribute name ************************************************************/ - attname = NameStr(tupdesc->attrs[i]->attname); + attname = NameStr(att->attname); /************************************************************ * Get the attributes value @@ -3037,7 +3156,7 @@ pltcl_build_tuple_argument(HeapTuple tuple, TupleDesc tupdesc) ************************************************************/ if (!isnull) { - getTypeOutputInfo(tupdesc->attrs[i]->atttypid, + getTypeOutputInfo(att->atttypid, &typoutput, &typisvarlena); outputstr = OidOutputFunctionCall(typoutput, attr); UTF_BEGIN; @@ -3070,6 +3189,7 @@ static HeapTuple pltcl_build_tuple_result(Tcl_Interp *interp, Tcl_Obj **kvObjv, int kvObjc, pltcl_call_state *call_state) { + HeapTuple tuple; TupleDesc tupdesc; AttInMetadata *attinmeta; char **values; @@ -3128,7 +3248,16 @@ pltcl_build_tuple_result(Tcl_Interp *interp, Tcl_Obj **kvObjv, int kvObjc, values[attn - 1] = utf_u2e(Tcl_GetString(kvObjv[i + 1])); } - return BuildTupleFromCStrings(attinmeta, values); + tuple = BuildTupleFromCStrings(attinmeta, values); + + /* if result type is domain-over-composite, check domain constraints */ + if (call_state->prodesc->fn_retisdomain) + domain_check(HeapTupleGetDatum(tuple), false, + call_state->prodesc->result_typid, + &call_state->prodesc->domain_info, + call_state->prodesc->fn_cxt); + + return tuple; } /********************************************************************** diff --git a/src/pl/tcl/po/it.po b/src/pl/tcl/po/it.po index 4b6a7f41b4..0aa100f3a9 100644 --- a/src/pl/tcl/po/it.po +++ b/src/pl/tcl/po/it.po @@ -1,18 +1,17 @@ # -# Translation of pltcl to Italian -# PostgreSQL Project +# pltcl.po +# Italian message translation file for pltcl # -# Associazione Culturale ITPUG - Italian PostgreSQL Users Group -# http://www.itpug.org/ - info@itpug.org +# For development and bug report please use: +# https://github.com/dvarrazzo/postgresql-it # -# Traduttori: -# * Flavio Spada +# Copyright (C) 2012-2017 PostgreSQL Global Development Group +# Copyright (C) 2010, Associazione Culturale ITPUG # -# Revisori: -# * Gabriele Bartolini +# Daniele Varrazzo , 2012-2017. +# Flavio Spada # -# Copyright (c) 2010, Associazione Culturale ITPUG -# Distributed under the same license of the PostgreSQL project +# This file is distributed under the same license as the PostgreSQL package. # msgid "" msgstr "" @@ -21,7 +20,7 @@ msgstr "" "POT-Creation-Date: 2017-05-22 07:37+0000\n" "PO-Revision-Date: 2017-05-29 17:51+0100\n" "Last-Translator: Daniele Varrazzo \n" -"Language-Team: Gruppo traduzioni ITPUG \n" +"Language-Team: https://github.com/dvarrazzo/postgresql-it\n" "Language: it\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" diff --git a/src/pl/tcl/po/ja.po b/src/pl/tcl/po/ja.po index 94ea8d8211..9b34d24d2f 100644 --- a/src/pl/tcl/po/ja.po +++ b/src/pl/tcl/po/ja.po @@ -1,58 +1,71 @@ # LANGUAGE message translation file for pltcl -# Copyright (C) 2009 PostgreSQL Global Development Group +# Copyright (C) 2018 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. -# FIRST AUTHOR , 2009. +# KOIZUMI Satoru , 2015. # msgid "" msgstr "" -"Project-Id-Version: pltcl (PostgreSQL) 9.5\n" +"Project-Id-Version: pltcl (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2015-09-21 01:07+0000\n" -"PO-Revision-Date: 2015-10-04 18:15+0900\n" -"Last-Translator: KOIZUMI Satoru \n" +"POT-Creation-Date: 2018-01-26 10:21+0900\n" +"PO-Revision-Date: 2018-01-29 10:39+0900\n" +"Last-Translator: Michihide Hotta \n" "Language-Team: Japan PostgreSQL Users Group \n" "Language: ja\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 2.0.6\n" -#: pltcl.c:555 +#: pltcl.c:459 +msgid "PL/Tcl function to call once when pltcl is first used." +msgstr "pltcl が最初に使用される際に一度だけ呼び出される PL/Tcl 関数。" + +#: pltcl.c:466 +msgid "PL/TclU function to call once when pltclu is first used." +msgstr "pltclu が最初に使用される際に一度だけ呼び出される PL/TclU 関数。" + +#: pltcl.c:629 #, c-format -msgid "module \"unknown\" not found in pltcl_modules" -msgstr "pltcl_modulesにモジュール\"unknown\"が見つかりません" +msgid "function \"%s\" is in the wrong language" +msgstr "関数 \"%s\" は言語が異なります" -#: pltcl.c:591 +#: pltcl.c:640 #, c-format -msgid "could not load module \"unknown\": %s" -msgstr "モジュール\"unknown\"をロードできませんでした: %s" +msgid "function \"%s\" must not be SECURITY DEFINER" +msgstr "関数 \"%s\" は SECURITY DEFINER であってはなりません" -#: pltcl.c:1047 +#. translator: %s is "pltcl.start_proc" or "pltclu.start_proc" +#: pltcl.c:674 #, c-format -msgid "could not split return value from trigger: %s" -msgstr "トリガからの戻り値を分割できませんでした: %s" +msgid "processing %s parameter" +msgstr "%s パラメーターを処理しています" -#: pltcl.c:1058 +#: pltcl.c:830 #, c-format -msgid "trigger's return list must have even number of elements" -msgstr "トリガが返すリストの要素は偶数個でなければなりません" +msgid "set-valued function called in context that cannot accept a set" +msgstr "このコンテキストでは、集合値の関数は集合を受け付けられません" -#: pltcl.c:1094 +#: pltcl.c:994 #, c-format -msgid "unrecognized attribute \"%s\"" -msgstr "未知の属性 \"%s\"" +msgid "" +"function returning record called in context that cannot accept type record" +msgstr "" +"レコード型を受け付けられないコンテキストでレコードを返す関数が呼び出されま" +"した" -#: pltcl.c:1099 +#: pltcl.c:1263 #, c-format -msgid "cannot set system attribute \"%s\"" -msgstr "システム属性\"%s\"を設定できません" +msgid "could not split return value from trigger: %s" +msgstr "トリガーからの戻り値を分割できませんでした: %s" -#: pltcl.c:1222 pltcl.c:1648 +#: pltcl.c:1343 pltcl.c:1771 #, c-format msgid "%s" msgstr "%s" -#: pltcl.c:1223 +#: pltcl.c:1344 #, c-format msgid "" "%s\n" @@ -61,32 +74,37 @@ msgstr "" "%s\n" "PL/Tcl 関数 \"%s\"" -#: pltcl.c:1331 pltcl.c:1338 -#, c-format -msgid "out of memory" -msgstr "メモリ不足です" - -#: pltcl.c:1386 +#: pltcl.c:1509 #, c-format msgid "trigger functions can only be called as triggers" -msgstr "トリガ関数はトリガとしてのみコールできます" +msgstr "トリガー関数はトリガーとしてのみコールできます" -#: pltcl.c:1395 +#: pltcl.c:1513 #, c-format msgid "PL/Tcl functions cannot return type %s" -msgstr "PL/Tcl 関数は戻り値の型 %s を返せません" +msgstr "PL/Tcl 関数は %s 型の戻り値を返せません" -#: pltcl.c:1407 -#, c-format -msgid "PL/Tcl functions cannot return composite types" -msgstr "PL/Tcl 関数は戻り値の型として複合型を返せません" - -#: pltcl.c:1446 +#: pltcl.c:1549 #, c-format msgid "PL/Tcl functions cannot accept type %s" -msgstr "PL/Tcl 関数は型 %s を受け付けません" +msgstr "PL/Tcl 関数は %s 型を受け付けません" -#: pltcl.c:1564 +#: pltcl.c:1663 #, c-format msgid "could not create internal procedure \"%s\": %s" msgstr "内部プロシージャ \"%s\" を作成できませんでした: %s" + +#: pltcl.c:3100 +#, c-format +msgid "column name/value list must have even number of elements" +msgstr "列名/値のリストの要素は偶数個でなければなりません" + +#: pltcl.c:3118 +#, c-format +msgid "column name/value list contains nonexistent column name \"%s\"" +msgstr "列名/値のリストの中に、存在しない列名 \"%s\" が含まれています" + +#: pltcl.c:3125 +#, c-format +msgid "cannot set system attribute \"%s\"" +msgstr "システム属性 \"%s\" を設定できません" diff --git a/src/pl/tcl/po/ko.po b/src/pl/tcl/po/ko.po index 650b4bccc0..5d2b09a7bd 100644 --- a/src/pl/tcl/po/ko.po +++ b/src/pl/tcl/po/ko.po @@ -5,53 +5,65 @@ # msgid "" msgstr "" -"Project-Id-Version: pltcl (PostgreSQL) 9.5\n" +"Project-Id-Version: pltcl (PostgreSQL) 10\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2016-02-01 11:24+0900\n" -"PO-Revision-Date: 2016-02-01 11:47+0900\n" +"POT-Creation-Date: 2017-09-19 09:51+0900\n" +"PO-Revision-Date: 2017-09-19 10:29+0900\n" "Last-Translator: Ioseph Kim \n" "Language-Team: Korean Team \n" +"Language: ko\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"Language: ko\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +#: pltcl.c:459 +msgid "PL/Tcl function to call once when pltcl is first used." +msgstr "pltcl 언어가 처음 사용될 때 한번 호출 될 PL/Tcl 함수" -#: pltcl.c:555 +#: pltcl.c:466 +msgid "PL/TclU function to call once when pltclu is first used." +msgstr "pltclu 언어가 처음 사용될 때 한번 호출 될 PL/Tcl 함수" + +#: pltcl.c:629 #, c-format -msgid "module \"unknown\" not found in pltcl_modules" -msgstr "pltcl_modules 안에 \"unknown\" 모듈을 찾을 수 없음" +msgid "function \"%s\" is in the wrong language" +msgstr "\"%s\" 함수에 잘못된 언어가 있음" -#: pltcl.c:591 +#: pltcl.c:640 #, c-format -msgid "could not load module \"unknown\": %s" -msgstr "\"unknown\" 모듈을 불러올 수 없음: %s" +msgid "function \"%s\" must not be SECURITY DEFINER" +msgstr "\"%s\" 함수는 SECURITY DEFINER 속성이 없어야 합니다" -#: pltcl.c:1047 +#. translator: %s is "pltcl.start_proc" or "pltclu.start_proc" +#: pltcl.c:674 #, c-format -msgid "could not split return value from trigger: %s" -msgstr "트리거에서 반환값을 분리할 수 없음: %s" +msgid "processing %s parameter" +msgstr "%s 매개 변수 처리 중" -#: pltcl.c:1058 +#: pltcl.c:830 #, c-format -msgid "trigger's return list must have even number of elements" -msgstr "트리거 반환 목록은 그 요소의 개수가 짝수여야 함" +msgid "set-valued function called in context that cannot accept a set" +msgstr "집합이 값이 함수가 집합을 사용할 수 없는 구문에서 호출 되었음" -#: pltcl.c:1094 +#: pltcl.c:994 #, c-format -msgid "unrecognized attribute \"%s\"" -msgstr "\"%s\" 속성을 알 수 없음" +msgid "" +"function returning record called in context that cannot accept type record" +msgstr "" +"레코드를 반환하는 함수가 레코드 형을 사용할 수 없는 구문에서 호출 되었음" -#: pltcl.c:1099 +#: pltcl.c:1263 #, c-format -msgid "cannot set system attribute \"%s\"" -msgstr "\"%s\" 시스템 속성을 지정할 수 없음" +msgid "could not split return value from trigger: %s" +msgstr "트리거에서 반환값을 분리할 수 없음: %s" -#: pltcl.c:1222 pltcl.c:1648 +#: pltcl.c:1343 pltcl.c:1771 #, c-format msgid "%s" msgstr "%s" -#: pltcl.c:1223 +#: pltcl.c:1344 #, c-format msgid "" "%s\n" @@ -60,32 +72,37 @@ msgstr "" "%s\n" "해당 PL/Tcl 함수: \"%s\"" -#: pltcl.c:1331 pltcl.c:1338 -#, c-format -msgid "out of memory" -msgstr "메모리 부족" - -#: pltcl.c:1386 +#: pltcl.c:1509 #, c-format msgid "trigger functions can only be called as triggers" msgstr "트리거 함수는 트리거로만 호출될 수 있음" -#: pltcl.c:1395 +#: pltcl.c:1513 #, c-format msgid "PL/Tcl functions cannot return type %s" msgstr "PL/Tcl 함수는 %s 자료형을 반환할 수 없음" -#: pltcl.c:1407 -#, c-format -msgid "PL/Tcl functions cannot return composite types" -msgstr "PL/Tcl 함수는 복합 자료형을 반환할 수 없음" - -#: pltcl.c:1446 +#: pltcl.c:1549 #, c-format msgid "PL/Tcl functions cannot accept type %s" msgstr "PL/Tcl 함수는 %s 자료형을 사용할 수 없음" -#: pltcl.c:1564 +#: pltcl.c:1663 #, c-format msgid "could not create internal procedure \"%s\": %s" msgstr "\"%s\" 내부 프로시져를 만들 수 없음: %s" + +#: pltcl.c:3100 +#, c-format +msgid "column name/value list must have even number of elements" +msgstr "칼럼 이름/값 목록은 그 요소의 개수가 짝수여야 함" + +#: pltcl.c:3118 +#, c-format +msgid "column name/value list contains nonexistent column name \"%s\"" +msgstr "칼럼 이름/값 목록에 \"%s\" 칼럼에 대한 값이 없음" + +#: pltcl.c:3125 +#, c-format +msgid "cannot set system attribute \"%s\"" +msgstr "\"%s\" 시스템 속성을 지정할 수 없음" diff --git a/src/pl/tcl/po/ru.po b/src/pl/tcl/po/ru.po index d048170a98..fd31a17c18 100644 --- a/src/pl/tcl/po/ru.po +++ b/src/pl/tcl/po/ru.po @@ -2,13 +2,13 @@ # Copyright (C) 2012-2016 PostgreSQL Global Development Group # This file is distributed under the same license as the PostgreSQL package. # Alexander Lakhin , 2012-2017. -# msgid "" msgstr "" "Project-Id-Version: pltcl (PostgreSQL current)\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2017-03-27 12:37+0000\n" +"POT-Creation-Date: 2017-05-22 07:37+0000\n" "PO-Revision-Date: 2017-03-29 14:01+0300\n" +"Last-Translator: Alexander Lakhin \n" "Language-Team: Russian \n" "Language: ru\n" "MIME-Version: 1.0\n" @@ -16,7 +16,6 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" -"Last-Translator: Alexander Lakhin \n" #: pltcl.c:459 msgid "PL/Tcl function to call once when pltcl is first used." @@ -55,17 +54,17 @@ msgid "" msgstr "" "функция, возвращающая запись, вызвана в контексте, не допускающем этот тип" -#: pltcl.c:1258 +#: pltcl.c:1263 #, c-format msgid "could not split return value from trigger: %s" msgstr "разложить возвращаемое из триггера значение не удалось: %s" -#: pltcl.c:1338 pltcl.c:1766 +#: pltcl.c:1343 pltcl.c:1771 #, c-format msgid "%s" msgstr "%s" -#: pltcl.c:1339 +#: pltcl.c:1344 #, c-format msgid "" "%s\n" @@ -74,38 +73,38 @@ msgstr "" "%s\n" "в функции PL/Tcl \"%s\"" -#: pltcl.c:1504 +#: pltcl.c:1509 #, c-format msgid "trigger functions can only be called as triggers" msgstr "триггерные функции могут вызываться только в триггерах" -#: pltcl.c:1508 +#: pltcl.c:1513 #, c-format msgid "PL/Tcl functions cannot return type %s" msgstr "функции PL/Tcl не могут возвращать тип %s" -#: pltcl.c:1544 +#: pltcl.c:1549 #, c-format msgid "PL/Tcl functions cannot accept type %s" msgstr "функции PL/Tcl не могут принимать тип %s" -#: pltcl.c:1658 +#: pltcl.c:1663 #, c-format msgid "could not create internal procedure \"%s\": %s" msgstr "не удалось создать внутреннюю процедуру \"%s\": %s" -#: pltcl.c:3095 +#: pltcl.c:3100 #, c-format msgid "column name/value list must have even number of elements" msgstr "в списке имён/значений столбцов должно быть чётное число элементов" -#: pltcl.c:3113 +#: pltcl.c:3118 #, c-format msgid "column name/value list contains nonexistent column name \"%s\"" msgstr "" "список имён/значений столбцов содержит имя несуществующего столбца \"%s\"" -#: pltcl.c:3120 +#: pltcl.c:3125 #, c-format msgid "cannot set system attribute \"%s\"" msgstr "установить системный атрибут \"%s\" нельзя" diff --git a/src/pl/tcl/po/tr.po b/src/pl/tcl/po/tr.po index 373a289420..9593bf8447 100644 --- a/src/pl/tcl/po/tr.po +++ b/src/pl/tcl/po/tr.po @@ -7,21 +7,61 @@ msgid "" msgstr "" "Project-Id-Version: PostgreSQL 8.4\n" "Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" -"POT-Creation-Date: 2009-04-29 07:08+0000\n" -"PO-Revision-Date: 2013-09-04 20:50-0400\n" +"POT-Creation-Date: 2018-02-22 00:07+0000\n" +"PO-Revision-Date: 2018-02-22 15:01+0300\n" "Last-Translator: Devrim GÜNDÜZ \n" -"Language-Team: TR >\n" +"Language-Team: TR \n" "Language: tr\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.8.7.1\n" -#: pltcl.c:1027 +#: pltcl.c:459 +msgid "PL/Tcl function to call once when pltcl is first used." +msgstr "pltcl ilk sefer kullanıldığında bir kez çağrılacak PL/Tcl fonksiyonu" + +#: pltcl.c:466 +msgid "PL/TclU function to call once when pltclu is first used." +msgstr "pltclu ilk sefer kullanıldığında bir kez çağrılacak PL/Tclu fonksiyonu" + +#: pltcl.c:629 +#, c-format +msgid "function \"%s\" is in the wrong language" +msgstr "\"%s\" fonksiyonu yanlış dilde" + +#: pltcl.c:640 +#, c-format +msgid "function \"%s\" must not be SECURITY DEFINER" +msgstr "\"%s\" fonksiyonu SECURITY DEFINER olmamalı" + +#. translator: %s is "pltcl.start_proc" or "pltclu.start_proc" +#: pltcl.c:674 +#, c-format +msgid "processing %s parameter" +msgstr "%s parametresi işleniyor" + +#: pltcl.c:830 +#, c-format +msgid "set-valued function called in context that cannot accept a set" +msgstr "set değerini kabul etmediği ortamda set değeri alan fonksiyon çağırılmış" + +#: pltcl.c:994 +#, c-format +msgid "function returning record called in context that cannot accept type record" +msgstr "tip kaydı içermeyen alanda çağırılan ve kayıt döndüren fonksiyon" + +#: pltcl.c:1263 +#, c-format +msgid "could not split return value from trigger: %s" +msgstr "sdönüş değeri tetikleyiciden (trigger) ayrılamadı: %s" + +#: pltcl.c:1343 pltcl.c:1771 #, c-format msgid "%s" msgstr "%s" -#: pltcl.c:1028 +#: pltcl.c:1344 #, c-format msgid "" "%s\n" @@ -30,25 +70,43 @@ msgstr "" "%s\n" "Şu PL/Tcl fonksiyonunda: \"%s\"" -#: pltcl.c:1127 -msgid "out of memory" -msgstr "yetersiz bellek" - -#: pltcl.c:1192 +#: pltcl.c:1509 +#, c-format msgid "trigger functions can only be called as triggers" msgstr "trigger fonksiyonları sadece trigger olarak çağırılabilirler" -#: pltcl.c:1201 +#: pltcl.c:1513 #, c-format msgid "PL/Tcl functions cannot return type %s" msgstr "PL/Tcl fonksiyonları %s tipini döndüremezler" -#: pltcl.c:1213 -msgid "PL/Tcl functions cannot return composite types" -msgstr "PL/Tcl fonksiyonları composit tip döndüremezler" - -#: pltcl.c:1253 +#: pltcl.c:1549 #, c-format msgid "PL/Tcl functions cannot accept type %s" msgstr "PL/Tcl fonksiyonları %s veri tipini kabul etmezler" +#: pltcl.c:1663 +#, c-format +msgid "could not create internal procedure \"%s\": %s" +msgstr "\"%s\" dahili yordamı oluşturulamadı: %s" + +#: pltcl.c:3100 +#, c-format +msgid "column name/value list must have even number of elements" +msgstr "sütun adı/değer listesinin çift sayıda öğesi olmalı" + +#: pltcl.c:3118 +#, c-format +msgid "column name/value list contains nonexistent column name \"%s\"" +msgstr "sütun adı/değer listesi mevcut olmayan \"%s\" sütun adını içeriyor" + +#: pltcl.c:3125 +#, c-format +msgid "cannot set system attribute \"%s\"" +msgstr "\"%s\" sistem niteliği ayarlanamaz" + +#~ msgid "PL/Tcl functions cannot return composite types" +#~ msgstr "PL/Tcl fonksiyonları composit tip döndüremezler" + +#~ msgid "out of memory" +#~ msgstr "yetersiz bellek" diff --git a/src/pl/tcl/po/vi.po b/src/pl/tcl/po/vi.po new file mode 100644 index 0000000000..7224bf1486 --- /dev/null +++ b/src/pl/tcl/po/vi.po @@ -0,0 +1,107 @@ +# LANGUAGE message translation file for pltcl +# Copyright (C) 2018 PostgreSQL Global Development Group +# This file is distributed under the same license as the pltcl (PostgreSQL) package. +# FIRST AUTHOR , 2018. +# +msgid "" +msgstr "" +"Project-Id-Version: pltcl (PostgreSQL) 11\n" +"Report-Msgid-Bugs-To: pgsql-bugs@postgresql.org\n" +"POT-Creation-Date: 2018-04-22 12:08+0000\n" +"PO-Revision-Date: 2018-04-29 22:56+0900\n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 2.0.6\n" +"Last-Translator: Dang Minh Huong \n" +"Plural-Forms: nplurals=1; plural=0;\n" +"Language: vi_VN\n" + +#: pltcl.c:466 +msgid "PL/Tcl function to call once when pltcl is first used." +msgstr "Chỉ định hàm PL/Tcl được gọi một lần khi pltcl sử dụng lần đầu tiên." + +#: pltcl.c:473 +msgid "PL/TclU function to call once when pltclu is first used." +msgstr "Chỉ định hàm PL/TclU được gọi một lần khi pltclu sử dụng lần đầu tiên." + +#: pltcl.c:640 +#, c-format +msgid "function \"%s\" is in the wrong language" +msgstr "hàm \"%s\" không đúng ngôn ngữ" + +#: pltcl.c:651 +#, c-format +msgid "function \"%s\" must not be SECURITY DEFINER" +msgstr "hàm \"%s\" không được là SECURITY DEFINER" + +#. translator: %s is "pltcl.start_proc" or "pltclu.start_proc" +#: pltcl.c:685 +#, c-format +msgid "processing %s parameter" +msgstr "xử lý tham số %s" + +#: pltcl.c:846 +#, c-format +msgid "set-valued function called in context that cannot accept a set" +msgstr "hàm thiết lập giá trị được gọi trong ngữ cảnh không thể chấp nhận một tập hợp" + +#: pltcl.c:1019 +#, c-format +msgid "function returning record called in context that cannot accept type record" +msgstr "hàm trả về bản ghi được gọi trong ngữ cảnh không thể chấp nhận kiểu bản ghi" + +#: pltcl.c:1296 +#, c-format +msgid "could not split return value from trigger: %s" +msgstr "không thể tách giá trị trả về khỏi trigger: %s" + +#: pltcl.c:1376 pltcl.c:1806 +#, c-format +msgid "%s" +msgstr "%s" + +#: pltcl.c:1377 +#, c-format +msgid "" +"%s\n" +"in PL/Tcl function \"%s\"" +msgstr "" +"%s\n" +"trong hàm PL/Tcl \"%s\"" + +#: pltcl.c:1541 +#, c-format +msgid "trigger functions can only be called as triggers" +msgstr "hàm trigger chỉ có thể được goi như những triggers." + +#: pltcl.c:1545 +#, c-format +msgid "PL/Tcl functions cannot return type %s" +msgstr "Hàm PL/Tcl không thể trả về kiểu %s" + +#: pltcl.c:1584 +#, c-format +msgid "PL/Tcl functions cannot accept type %s" +msgstr "Hàm PL/Tcl không thể chấp nhận kiểu %s" + +#: pltcl.c:1698 +#, c-format +msgid "could not create internal procedure \"%s\": %s" +msgstr "không thể tạo procedure nội bộ \"%s\": %s" + +#: pltcl.c:3219 +#, c-format +msgid "column name/value list must have even number of elements" +msgstr "danh sách cột tên/giá trị phải có giá trị chẵn cho số phần tử" + +#: pltcl.c:3237 +#, c-format +msgid "column name/value list contains nonexistent column name \"%s\"" +msgstr "danh sách cột tên/giá trị chứa tên cột không tồn tại \"%s\"" + +#: pltcl.c:3244 +#, c-format +msgid "cannot set system attribute \"%s\"" +msgstr "không thể thiết lập attribute hệ thống \"%s\"" diff --git a/src/pl/tcl/sql/pltcl_call.sql b/src/pl/tcl/sql/pltcl_call.sql new file mode 100644 index 0000000000..95791d08be --- /dev/null +++ b/src/pl/tcl/sql/pltcl_call.sql @@ -0,0 +1,59 @@ +CREATE PROCEDURE test_proc1() +LANGUAGE pltcl +AS $$ +unset +$$; + +CALL test_proc1(); + + +CREATE PROCEDURE test_proc2() +LANGUAGE pltcl +AS $$ +return 5 +$$; + +CALL test_proc2(); + + +CREATE TABLE test1 (a int); + +CREATE PROCEDURE test_proc3(x int) +LANGUAGE pltcl +AS $$ +spi_exec "INSERT INTO test1 VALUES ($1)" +$$; + +CALL test_proc3(55); + +SELECT * FROM test1; + + +-- output arguments + +CREATE PROCEDURE test_proc5(INOUT a text) +LANGUAGE pltcl +AS $$ +set aa [concat $1 "+" $1] +return [list a $aa] +$$; + +CALL test_proc5('abc'); + + +CREATE PROCEDURE test_proc6(a int, INOUT b int, INOUT c int) +LANGUAGE pltcl +AS $$ +set bb [expr $2 * $1] +set cc [expr $3 * $1] +return [list b $bb c $cc] +$$; + +CALL test_proc6(2, 3, 4); + + +DROP PROCEDURE test_proc1; +DROP PROCEDURE test_proc2; +DROP PROCEDURE test_proc3; + +DROP TABLE test1; diff --git a/src/pl/tcl/sql/pltcl_queries.sql b/src/pl/tcl/sql/pltcl_queries.sql index dabd8cd35f..71c1238bd2 100644 --- a/src/pl/tcl/sql/pltcl_queries.sql +++ b/src/pl/tcl/sql/pltcl_queries.sql @@ -89,6 +89,26 @@ truncate trigger_test; select tcl_composite_arg_ref1(row('tkey', 42, 'ref2')); select tcl_composite_arg_ref2(row('tkey', 42, 'ref2')); +-- More tests for composite argument/result types + +create domain d_dta1 as T_dta1 check ((value).ref1 > 0); + +create function tcl_record_arg(record, fldname text) returns int as ' + return $1($2) +' language pltcl; + +select tcl_record_arg(row('tkey', 42, 'ref2')::T_dta1, 'ref1'); +select tcl_record_arg(row('tkey', 42, 'ref2')::d_dta1, 'ref1'); +select tcl_record_arg(row(2,4), 'f2'); + +create function tcl_cdomain_arg(d_dta1) returns int as ' + return $1(ref1) +' language pltcl; + +select tcl_cdomain_arg(row('tkey', 42, 'ref2')); +select tcl_cdomain_arg(row('tkey', 42, 'ref2')::T_dta1); +select tcl_cdomain_arg(row('tkey', -1, 'ref2')); -- fail + -- Test argisnull primitive select tcl_argisnull('foo'); select tcl_argisnull(''); @@ -136,6 +156,29 @@ return_next [list a 1 b 2 cow 3] $$ language pltcl; select bad_field_srf(); +-- test composite and domain-over-composite results +create function tcl_composite_result(int) returns T_dta1 as $$ +return [list tkey tkey1 ref1 $1 ref2 ref22] +$$ language pltcl; +select tcl_composite_result(1001); +select * from tcl_composite_result(1002); + +create function tcl_dcomposite_result(int) returns d_dta1 as $$ +return [list tkey tkey2 ref1 $1 ref2 ref42] +$$ language pltcl; +select tcl_dcomposite_result(1001); +select * from tcl_dcomposite_result(1002); +select * from tcl_dcomposite_result(-1); -- fail + +create function tcl_record_result(int) returns record as $$ +return [list q1 sometext q2 $1 q3 moretext] +$$ language pltcl; +select tcl_record_result(42); -- fail +select * from tcl_record_result(42); -- fail +select * from tcl_record_result(42) as (q1 text, q2 int, q3 text); +select * from tcl_record_result(42) as (q1 text, q2 int, q3 text, q4 int); +select * from tcl_record_result(42) as (q1 text, q2 int, q4 int); -- fail + -- test quote select tcl_eval('quote foo bar'); select tcl_eval('quote [format %c 39]'); diff --git a/src/pl/tcl/sql/pltcl_transaction.sql b/src/pl/tcl/sql/pltcl_transaction.sql new file mode 100644 index 0000000000..c752faf665 --- /dev/null +++ b/src/pl/tcl/sql/pltcl_transaction.sql @@ -0,0 +1,98 @@ +-- suppress CONTEXT so that function OIDs aren't in output +\set VERBOSITY terse + +CREATE TABLE test1 (a int, b text); + + +CREATE PROCEDURE transaction_test1() +LANGUAGE pltcl +AS $$ +for {set i 0} {$i < 10} {incr i} { + spi_exec "INSERT INTO test1 (a) VALUES ($i)" + if {$i % 2 == 0} { + commit + } else { + rollback + } +} +$$; + +CALL transaction_test1(); + +SELECT * FROM test1; + + +TRUNCATE test1; + +-- not allowed in a function +CREATE FUNCTION transaction_test2() RETURNS int +LANGUAGE pltcl +AS $$ +for {set i 0} {$i < 10} {incr i} { + spi_exec "INSERT INTO test1 (a) VALUES ($i)" + if {$i % 2 == 0} { + commit + } else { + rollback + } +} +return 1 +$$; + +SELECT transaction_test2(); + +SELECT * FROM test1; + + +-- also not allowed if procedure is called from a function +CREATE FUNCTION transaction_test3() RETURNS int +LANGUAGE pltcl +AS $$ +spi_exec "CALL transaction_test1()" +return 1 +$$; + +SELECT transaction_test3(); + +SELECT * FROM test1; + + +-- commit inside cursor loop +CREATE TABLE test2 (x int); +INSERT INTO test2 VALUES (0), (1), (2), (3), (4); + +TRUNCATE test1; + +CREATE PROCEDURE transaction_test4a() +LANGUAGE pltcl +AS $$ +spi_exec -array row "SELECT * FROM test2 ORDER BY x" { + spi_exec "INSERT INTO test1 (a) VALUES ($row(x))" + commit +} +$$; + +CALL transaction_test4a(); + +SELECT * FROM test1; + + +-- rollback inside cursor loop +TRUNCATE test1; + +CREATE PROCEDURE transaction_test4b() +LANGUAGE pltcl +AS $$ +spi_exec -array row "SELECT * FROM test2 ORDER BY x" { + spi_exec "INSERT INTO test1 (a) VALUES ($row(x))" + rollback +} +$$; + +CALL transaction_test4b(); + +SELECT * FROM test1; + + +DROP TABLE test1; +DROP TABLE test2; diff --git a/src/port/.gitignore b/src/port/.gitignore index 53a4032444..2037b7d2ab 100644 --- a/src/port/.gitignore +++ b/src/port/.gitignore @@ -1,3 +1,4 @@ /libpgport.a +/libpgport_shlib.a /libpgport_srv.a /pg_config_paths.h diff --git a/src/port/Makefile b/src/port/Makefile index 81f01b25bb..585c53757b 100644 --- a/src/port/Makefile +++ b/src/port/Makefile @@ -1,18 +1,23 @@ #------------------------------------------------------------------------- # # Makefile -# Makefile for the port-specific subsystem of the backend +# Makefile for src/port # -# These files are used in other directories for portability on systems -# with broken/missing library files, and for common code sharing. +# These files are used by the Postgres backend, and also by frontend +# programs. Primarily, they are meant to provide portability on systems +# with broken/missing library files. # -# This makefile generates two outputs: +# This makefile generates three outputs: # # libpgport.a - contains object files with FRONTEND defined, -# for use by client application and libraries +# for use by client applications +# +# libpgport_shlib.a - contains object files with FRONTEND defined, +# built suitably for use in shared libraries; for use +# by frontend libraries # # libpgport_srv.a - contains object files without FRONTEND defined, -# for use only by the backend binaries +# for use only by the backend # # LIBOBJS is set by configure (via Makefile.global) to be the list of object # files that are conditionally needed as determined by configure's probing. @@ -33,38 +38,67 @@ LIBS += $(PTHREAD_LIBS) OBJS = $(LIBOBJS) $(PG_CRC32C_OBJS) chklocale.o erand48.o inet_net_ntop.o \ noblock.o path.o pgcheckdir.o pgmkdirp.o pgsleep.o \ pgstrcasecmp.o pqsignal.o \ - qsort.o qsort_arg.o quotes.o sprompt.o tar.o thread.o + qsort.o qsort_arg.o quotes.o snprintf.o sprompt.o strerror.o \ + tar.o thread.o ifeq ($(enable_strong_random), yes) OBJS += pg_strong_random.o endif -# foo_srv.o and foo.o are both built from foo.c, but only foo.o has -DFRONTEND +# libpgport.a, libpgport_shlib.a, and libpgport_srv.a contain the same files +# foo.o, foo_shlib.o, and foo_srv.o are all built from foo.c +OBJS_SHLIB = $(OBJS:%.o=%_shlib.o) OBJS_SRV = $(OBJS:%.o=%_srv.o) -all: libpgport.a libpgport_srv.a +all: libpgport.a libpgport_shlib.a libpgport_srv.a # libpgport is needed by some contrib install: all installdirs $(INSTALL_STLIB) libpgport.a '$(DESTDIR)$(libdir)/libpgport.a' + $(INSTALL_STLIB) libpgport_shlib.a '$(DESTDIR)$(libdir)/libpgport_shlib.a' installdirs: $(MKDIR_P) '$(DESTDIR)$(libdir)' uninstall: rm -f '$(DESTDIR)$(libdir)/libpgport.a' + rm -f '$(DESTDIR)$(libdir)/libpgport_shlib.a' libpgport.a: $(OBJS) rm -f $@ $(AR) $(AROPT) $@ $^ -# thread.o needs PTHREAD_CFLAGS (but thread_srv.o does not) +# thread.o and thread_shlib.o need PTHREAD_CFLAGS (but thread_srv.o does not) thread.o: CFLAGS+=$(PTHREAD_CFLAGS) +thread_shlib.o: CFLAGS+=$(PTHREAD_CFLAGS) -# pg_crc32c_sse42.o and its _srv.o version need CFLAGS_SSE42 +# all versions of pg_crc32c_sse42.o need CFLAGS_SSE42 pg_crc32c_sse42.o: CFLAGS+=$(CFLAGS_SSE42) +pg_crc32c_sse42_shlib.o: CFLAGS+=$(CFLAGS_SSE42) pg_crc32c_sse42_srv.o: CFLAGS+=$(CFLAGS_SSE42) +# all versions of pg_crc32c_armv8.o need CFLAGS_ARMV8_CRC32C +pg_crc32c_armv8.o: CFLAGS+=$(CFLAGS_ARMV8_CRC32C) +pg_crc32c_armv8_shlib.o: CFLAGS+=$(CFLAGS_ARMV8_CRC32C) +pg_crc32c_armv8_srv.o: CFLAGS+=$(CFLAGS_ARMV8_CRC32C) + +# +# Shared library versions of object files +# + +libpgport_shlib.a: $(OBJS_SHLIB) + rm -f $@ + $(AR) $(AROPT) $@ $^ + +# Because this uses its own compilation rule, it doesn't use the +# dependency tracking logic from Makefile.global. To make sure that +# dependency tracking works anyway for the *_shlib.o files, depend on +# their *.o siblings as well, which do have proper dependencies. It's +# a hack that might fail someday if there is a *_shlib.o without a +# corresponding *.o, but there seems little reason for that. +%_shlib.o: %.c %.o + $(CC) $(CFLAGS) $(CFLAGS_SL) $(CPPFLAGS) -c $< -o $@ + # # Server versions of object files # @@ -83,17 +117,12 @@ libpgport_srv.a: $(OBJS_SRV) %_srv.o: %.c %.o $(CC) $(CFLAGS) $(subst -DFRONTEND,, $(CPPFLAGS)) -c $< -o $@ -$(OBJS_SRV): | submake-errcodes - -.PHONY: submake-errcodes - -submake-errcodes: - $(MAKE) -C ../backend submake-errcodes - # Dependency is to ensure that path changes propagate path.o: path.c pg_config_paths.h +path_shlib.o: path.c pg_config_paths.h + path_srv.o: path.c pg_config_paths.h # We create a separate file rather than put these in pg_config.h @@ -114,4 +143,5 @@ pg_config_paths.h: $(top_builddir)/src/Makefile.global echo "#define MANDIR \"$(mandir)\"" >>$@ clean distclean maintainer-clean: - rm -f libpgport.a libpgport_srv.a $(OBJS) $(OBJS_SRV) pg_config_paths.h + rm -f libpgport.a libpgport_shlib.a libpgport_srv.a + rm -f $(OBJS) $(OBJS_SHLIB) $(OBJS_SRV) pg_config_paths.h diff --git a/src/port/README b/src/port/README index 4ae96da015..c446b46e26 100644 --- a/src/port/README +++ b/src/port/README @@ -18,7 +18,7 @@ and adding infrastructure to recompile the object files: OBJS= execute.o typename.o descriptor.o data.o error.o prepare.o memory.o \ connect.o misc.o path.o exec.o \ - $(filter snprintf.o, $(LIBOBJS)) + $(filter strlcat.o, $(LIBOBJS)) The problem is that there is no testing of which object files need to be added, but missing functions usually show up when linking user diff --git a/src/port/chklocale.c b/src/port/chklocale.c index c357fed6dc..dde913099f 100644 --- a/src/port/chklocale.c +++ b/src/port/chklocale.c @@ -4,7 +4,7 @@ * Functions for handling locale-related info * * - * Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Copyright (c) 1996-2018, PostgreSQL Global Development Group * * * IDENTIFICATION diff --git a/src/port/dirent.c b/src/port/dirent.c index 2bab7938a0..7d1d069647 100644 --- a/src/port/dirent.c +++ b/src/port/dirent.c @@ -3,7 +3,7 @@ * dirent.c * opendir/readdir/closedir for win32/msvc * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/port/dirmod.c b/src/port/dirmod.c index eac59bdfda..26611922db 100644 --- a/src/port/dirmod.c +++ b/src/port/dirmod.c @@ -3,7 +3,7 @@ * dirmod.c * directory handling functions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * This includes replacement versions of functions that work on diff --git a/src/port/dlopen.c b/src/port/dlopen.c new file mode 100644 index 0000000000..4cde484d9d --- /dev/null +++ b/src/port/dlopen.c @@ -0,0 +1,145 @@ +/*------------------------------------------------------------------------- + * + * dlopen.c + * dynamic loader for platforms without dlopen() + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/port/dlopen.c + * + *------------------------------------------------------------------------- + */ + +#include "c.h" + +#if defined(__hpux) + +/* System includes */ +#include +#include + +void * +dlopen(const char *file, int mode) +{ + int flags = 0; + + if (mode & RTLD_NOW) + flags |= BIND_IMMEDIATE; +#ifdef NOT_USED + if (mode & RTLD_LAZY) + flags |= BIND_DEFERRED; +#endif + + return shl_load(file, flags | BIND_VERBOSE, 0L); +} + +void * +dlsym(void *handle, const char *symbol) +{ + void *value; + + if (shl_findsym((shl_t *) & handle, symbol, TYPE_PROCEDURE, &value) == -1) + return NULL; + return value; +} + +int +dlclose(void *handle) +{ + return shl_unload((shl_t) handle); +} + +char * +dlerror(void) +{ + static char errmsg[] = "shl_load failed"; + + if (errno) + return strerror(errno); + + return errmsg; +} + +#elif defined(WIN32) + +static char last_dyn_error[512]; + +static void +set_dl_error(void) +{ + DWORD err = GetLastError(); + + if (FormatMessage(FORMAT_MESSAGE_IGNORE_INSERTS | + FORMAT_MESSAGE_FROM_SYSTEM, + NULL, + err, + MAKELANGID(LANG_ENGLISH, SUBLANG_DEFAULT), + last_dyn_error, + sizeof(last_dyn_error) - 1, + NULL) == 0) + { + snprintf(last_dyn_error, sizeof(last_dyn_error) - 1, + "unknown error %lu", err); + } +} + +char * +dlerror(void) +{ + if (last_dyn_error[0]) + return last_dyn_error; + else + return NULL; +} + +int +dlclose(void *handle) +{ + if (!FreeLibrary((HMODULE) handle)) + { + set_dl_error(); + return 1; + } + last_dyn_error[0] = 0; + return 0; +} + +void * +dlsym(void *handle, const char *symbol) +{ + void *ptr; + + ptr = GetProcAddress((HMODULE) handle, symbol); + if (!ptr) + { + set_dl_error(); + return NULL; + } + last_dyn_error[0] = 0; + return ptr; +} + +void * +dlopen(const char *file, int mode) +{ + HMODULE h; + int prevmode; + + /* Disable popup error messages when loading DLLs */ + prevmode = SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOOPENFILEERRORBOX); + h = LoadLibrary(file); + SetErrorMode(prevmode); + + if (!h) + { + set_dl_error(); + return NULL; + } + last_dyn_error[0] = 0; + return (void *) h; +} + +#endif diff --git a/src/port/fls.c b/src/port/fls.c index ddd18f17f5..46dceb59d5 100644 --- a/src/port/fls.c +++ b/src/port/fls.c @@ -3,7 +3,7 @@ * fls.c * finds the last (most significant) bit that is set * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * * IDENTIFICATION diff --git a/src/port/fseeko.c b/src/port/fseeko.c index e9c0b07f0b..38b9cbde91 100644 --- a/src/port/fseeko.c +++ b/src/port/fseeko.c @@ -3,7 +3,7 @@ * fseeko.c * 64-bit versions of fseeko/ftello() * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/port/getaddrinfo.c b/src/port/getaddrinfo.c index e5b5702c79..1054d857b3 100644 --- a/src/port/getaddrinfo.c +++ b/src/port/getaddrinfo.c @@ -13,7 +13,7 @@ * use the Windows native routines, but if not, we use our own. * * - * Copyright (c) 2003-2017, PostgreSQL Global Development Group + * Copyright (c) 2003-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/port/getaddrinfo.c @@ -31,6 +31,7 @@ #include "getaddrinfo.h" #include "libpq/pqcomm.h" /* needed for struct sockaddr_storage */ +#include "port/pg_bswap.h" #ifdef WIN32 @@ -178,7 +179,7 @@ getaddrinfo(const char *node, const char *service, if (node) { if (node[0] == '\0') - sin.sin_addr.s_addr = htonl(INADDR_ANY); + sin.sin_addr.s_addr = pg_hton32(INADDR_ANY); else if (hints.ai_flags & AI_NUMERICHOST) { if (!inet_aton(node, &sin.sin_addr)) @@ -221,13 +222,13 @@ getaddrinfo(const char *node, const char *service, else { if (hints.ai_flags & AI_PASSIVE) - sin.sin_addr.s_addr = htonl(INADDR_ANY); + sin.sin_addr.s_addr = pg_hton32(INADDR_ANY); else - sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + sin.sin_addr.s_addr = pg_hton32(INADDR_LOOPBACK); } if (service) - sin.sin_port = htons((unsigned short) atoi(service)); + sin.sin_port = pg_hton16((unsigned short) atoi(service)); #ifdef HAVE_STRUCT_SOCKADDR_STORAGE_SS_LEN sin.sin_len = sizeof(sin); @@ -402,9 +403,9 @@ getnameinfo(const struct sockaddr *sa, int salen, if (sa->sa_family == AF_INET) { ret = snprintf(service, servicelen, "%d", - ntohs(((struct sockaddr_in *) sa)->sin_port)); + pg_ntoh16(((struct sockaddr_in *) sa)->sin_port)); } - if (ret == -1 || ret >= servicelen) + if (ret < 0 || ret >= servicelen) return EAI_MEMORY; } diff --git a/src/port/getpeereid.c b/src/port/getpeereid.c index 53fa663122..fa871ae68f 100644 --- a/src/port/getpeereid.c +++ b/src/port/getpeereid.c @@ -3,7 +3,7 @@ * getpeereid.c * get peer userid for UNIX-domain socket connection * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * * IDENTIFICATION diff --git a/src/port/getrusage.c b/src/port/getrusage.c index d029fc2c76..229b5bb7fe 100644 --- a/src/port/getrusage.c +++ b/src/port/getrusage.c @@ -3,7 +3,7 @@ * getrusage.c * get information about resource utilisation * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/port/inet_aton.c b/src/port/inet_aton.c index 68efd4723e..adaf18adb3 100644 --- a/src/port/inet_aton.c +++ b/src/port/inet_aton.c @@ -43,6 +43,8 @@ #include #include +#include "port/pg_bswap.h" + /* * Check whether "cp" is a valid ascii representation * of an Internet address and convert to a binary address. @@ -142,6 +144,6 @@ inet_aton(const char *cp, struct in_addr *addr) break; } if (addr) - addr->s_addr = htonl(val); + addr->s_addr = pg_hton32(val); return 1; } diff --git a/src/port/inet_net_ntop.c b/src/port/inet_net_ntop.c index f27fda96ca..90dfeed42d 100644 --- a/src/port/inet_net_ntop.c +++ b/src/port/inet_net_ntop.c @@ -14,7 +14,7 @@ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * - * src/backend/utils/adt/inet_net_ntop.c + * src/port/inet_net_ntop.c */ #if defined(LIBC_SCCS) && !defined(lint) diff --git a/src/port/isinf.c b/src/port/isinf.c index 570aa40fa2..9990f9c422 100644 --- a/src/port/isinf.c +++ b/src/port/isinf.c @@ -2,7 +2,7 @@ * * isinf.c * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/port/kill.c b/src/port/kill.c index 58343c4152..fee5abfdfa 100644 --- a/src/port/kill.c +++ b/src/port/kill.c @@ -3,7 +3,7 @@ * kill.c * kill() * - * Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Copyright (c) 1996-2018, PostgreSQL Global Development Group * * This is a replacement version of kill for Win32 which sends * signals that the backend can recognize. diff --git a/src/port/mkdtemp.c b/src/port/mkdtemp.c index 54844cb2f5..e0b3ada28a 100644 --- a/src/port/mkdtemp.c +++ b/src/port/mkdtemp.c @@ -3,7 +3,7 @@ * mkdtemp.c * create a mode-0700 temporary directory * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * * IDENTIFICATION diff --git a/src/port/noblock.c b/src/port/noblock.c index 673fa8aa3c..bd26bd5f52 100644 --- a/src/port/noblock.c +++ b/src/port/noblock.c @@ -3,7 +3,7 @@ * noblock.c * set a file descriptor as blocking or non-blocking * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/port/open.c b/src/port/open.c index 17a7145ad9..a2f1044a20 100644 --- a/src/port/open.c +++ b/src/port/open.c @@ -4,7 +4,7 @@ * Win32 open() replacement * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/port/open.c * @@ -71,6 +71,20 @@ pgwin32_open(const char *fileName, int fileFlags,...) _O_SHORT_LIVED | O_DSYNC | O_DIRECT | (O_CREAT | O_TRUNC | O_EXCL) | (O_TEXT | O_BINARY))) == fileFlags); +#ifdef FRONTEND + + /* + * Since PostgreSQL 12, those concurrent-safe versions of open() and + * fopen() can be used by frontends, having as side-effect to switch the + * file-translation mode from O_TEXT to O_BINARY if none is specified. + * Caller may want to enforce the binary or text mode, but if nothing is + * defined make sure that the default mode maps with what versions older + * than 12 have been doing. + */ + if ((fileFlags & O_BINARY) == 0) + fileFlags |= O_TEXT; +#endif + sa.nLength = sizeof(sa); sa.bInheritHandle = TRUE; sa.lpSecurityDescriptor = NULL; diff --git a/src/port/path.c b/src/port/path.c index 2578393624..1ac1dbea4f 100644 --- a/src/port/path.c +++ b/src/port/path.c @@ -3,7 +3,7 @@ * path.c * portable path handling routines * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/port/pg_crc32c_armv8.c b/src/port/pg_crc32c_armv8.c new file mode 100644 index 0000000000..820b2c24b4 --- /dev/null +++ b/src/port/pg_crc32c_armv8.c @@ -0,0 +1,75 @@ +/*------------------------------------------------------------------------- + * + * pg_crc32c_armv8.c + * Compute CRC-32C checksum using ARMv8 CRC Extension instructions + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/port/pg_crc32c_armv8.c + * + *------------------------------------------------------------------------- + */ +#include "c.h" + +#include "port/pg_crc32c.h" + +#include + +pg_crc32c +pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t len) +{ + const unsigned char *p = data; + const unsigned char *pend = p + len; + + /* + * ARMv8 doesn't require alignment, but aligned memory access is + * significantly faster. Process leading bytes so that the loop below + * starts with a pointer aligned to eight bytes. + */ + if (!PointerIsAligned(p, uint16) && + p + 1 <= pend) + { + crc = __crc32cb(crc, *p); + p += 1; + } + if (!PointerIsAligned(p, uint32) && + p + 2 <= pend) + { + crc = __crc32ch(crc, *(uint16 *) p); + p += 2; + } + if (!PointerIsAligned(p, uint64) && + p + 4 <= pend) + { + crc = __crc32cw(crc, *(uint32 *) p); + p += 4; + } + + /* Process eight bytes at a time, as far as we can. */ + while (p + 8 <= pend) + { + crc = __crc32cd(crc, *(uint64 *) p); + p += 8; + } + + /* Process remaining 0-7 bytes. */ + if (p + 4 <= pend) + { + crc = __crc32cw(crc, *(uint32 *) p); + p += 4; + } + if (p + 2 <= pend) + { + crc = __crc32ch(crc, *(uint16 *) p); + p += 2; + } + if (p < pend) + { + crc = __crc32cb(crc, *p); + } + + return crc; +} diff --git a/src/port/pg_crc32c_armv8_choose.c b/src/port/pg_crc32c_armv8_choose.c new file mode 100644 index 0000000000..c339af7f16 --- /dev/null +++ b/src/port/pg_crc32c_armv8_choose.c @@ -0,0 +1,95 @@ +/*------------------------------------------------------------------------- + * + * pg_crc32c_armv8_choose.c + * Choose between ARMv8 and software CRC-32C implementation. + * + * On first call, checks if the CPU we're running on supports the ARMv8 + * CRC Extension. If it does, use the special instructions for CRC-32C + * computation. Otherwise, fall back to the pure software implementation + * (slicing-by-8). + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/port/pg_crc32c_armv8_choose.c + * + *------------------------------------------------------------------------- + */ + +#ifndef FRONTEND +#include "postgres.h" +#else +#include "postgres_fe.h" +#endif + +#include +#include + +#include "port/pg_crc32c.h" + + +static sigjmp_buf illegal_instruction_jump; + +/* + * Probe by trying to execute pg_comp_crc32c_armv8(). If the instruction + * isn't available, we expect to get SIGILL, which we can trap. + */ +static void +illegal_instruction_handler(SIGNAL_ARGS) +{ + siglongjmp(illegal_instruction_jump, 1); +} + +static bool +pg_crc32c_armv8_available(void) +{ + uint64 data = 42; + int result; + + /* + * Be careful not to do anything that might throw an error while we have + * the SIGILL handler set to a nonstandard value. + */ + pqsignal(SIGILL, illegal_instruction_handler); + if (sigsetjmp(illegal_instruction_jump, 1) == 0) + { + /* Rather than hard-wiring an expected result, compare to SB8 code */ + result = (pg_comp_crc32c_armv8(0, &data, sizeof(data)) == + pg_comp_crc32c_sb8(0, &data, sizeof(data))); + } + else + { + /* We got the SIGILL trap */ + result = -1; + } + pqsignal(SIGILL, SIG_DFL); + +#ifndef FRONTEND + /* We don't expect this case, so complain loudly */ + if (result == 0) + elog(ERROR, "crc32 hardware and software results disagree"); + + elog(DEBUG1, "using armv8 crc32 hardware = %d", (result > 0)); +#endif + + return (result > 0); +} + +/* + * This gets called on the first call. It replaces the function pointer + * so that subsequent calls are routed directly to the chosen implementation. + */ +static pg_crc32c +pg_comp_crc32c_choose(pg_crc32c crc, const void *data, size_t len) +{ + if (pg_crc32c_armv8_available()) + pg_comp_crc32c = pg_comp_crc32c_armv8; + else + pg_comp_crc32c = pg_comp_crc32c_sb8; + + return pg_comp_crc32c(crc, data, len); +} + +pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len) = pg_comp_crc32c_choose; diff --git a/src/port/pg_crc32c_sb8.c b/src/port/pg_crc32c_sb8.c index dfd6cd9f49..5205ba9cdc 100644 --- a/src/port/pg_crc32c_sb8.c +++ b/src/port/pg_crc32c_sb8.c @@ -8,7 +8,7 @@ * Generation", IEEE Transactions on Computers, vol.57, no. 11, * pp. 1550-1560, November 2008, doi:10.1109/TC.2008.85 * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/port/pg_crc32c_sse42.c b/src/port/pg_crc32c_sse42.c index d698124121..b9def7e2ea 100644 --- a/src/port/pg_crc32c_sse42.c +++ b/src/port/pg_crc32c_sse42.c @@ -3,7 +3,7 @@ * pg_crc32c_sse42.c * Compute CRC-32C checksum using Intel SSE 4.2 instructions. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/port/pg_crc32c_choose.c b/src/port/pg_crc32c_sse42_choose.c similarity index 74% rename from src/port/pg_crc32c_choose.c rename to src/port/pg_crc32c_sse42_choose.c index e82c9c4b27..c2d1242d91 100644 --- a/src/port/pg_crc32c_choose.c +++ b/src/port/pg_crc32c_sse42_choose.c @@ -1,18 +1,19 @@ /*------------------------------------------------------------------------- * - * pg_crc32c_choose.c - * Choose which CRC-32C implementation to use, at runtime. + * pg_crc32c_sse42_choose.c + * Choose between Intel SSE 4.2 and software CRC-32C implementation. * - * Try to the special CRC instructions introduced in Intel SSE 4.2, - * if available on the platform we're running on, but fall back to the - * slicing-by-8 implementation otherwise. + * On first call, checks if the CPU we're running on supports Intel SSE + * 4.2. If it does, use the special SSE instructions for CRC-32C + * computation. Otherwise, fall back to the pure software implementation + * (slicing-by-8). * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION - * src/port/pg_crc32c_choose.c + * src/port/pg_crc32c_sse42_choose.c * *------------------------------------------------------------------------- */ diff --git a/src/port/pg_strong_random.c b/src/port/pg_strong_random.c index c6ee5ea1d4..f9a06d6606 100644 --- a/src/port/pg_strong_random.c +++ b/src/port/pg_strong_random.c @@ -6,7 +6,7 @@ * Our definition of "strong" is that it's suitable for generating random * salts and query cancellation keys, during authentication. * - * Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/port/pg_strong_random.c @@ -103,6 +103,35 @@ pg_strong_random(void *buf, size_t len) * When built with OpenSSL, use OpenSSL's RAND_bytes function. */ #if defined(USE_OPENSSL_RANDOM) + int i; + + /* + * Check that OpenSSL's CSPRNG has been sufficiently seeded, and if not + * add more seed data using RAND_poll(). With some older versions of + * OpenSSL, it may be necessary to call RAND_poll() a number of times. + */ +#define NUM_RAND_POLL_RETRIES 8 + + for (i = 0; i < NUM_RAND_POLL_RETRIES; i++) + { + if (RAND_status() == 1) + { + /* The CSPRNG is sufficiently seeded */ + break; + } + + if (RAND_poll() == 0) + { + /* + * RAND_poll() failed to generate any seed data, which means that + * RAND_bytes() will probably fail. For now, just fall through + * and let that happen. XXX: maybe we could seed it some other + * way. + */ + break; + } + } + if (RAND_bytes(buf, len) == 1) return true; return false; diff --git a/src/port/pgcheckdir.c b/src/port/pgcheckdir.c index 965249eeaa..b5b999b385 100644 --- a/src/port/pgcheckdir.c +++ b/src/port/pgcheckdir.c @@ -5,7 +5,7 @@ * A simple subroutine to check whether a directory exists and is empty or not. * Useful in both initdb and the backend. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * *------------------------------------------------------------------------- diff --git a/src/port/pgsleep.c b/src/port/pgsleep.c index f2db68a33d..48536f4b7a 100644 --- a/src/port/pgsleep.c +++ b/src/port/pgsleep.c @@ -4,7 +4,7 @@ * Portable delay handling. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/port/pgsleep.c * diff --git a/src/port/pgstrcasecmp.c b/src/port/pgstrcasecmp.c index d12778da8d..3aaea305c0 100644 --- a/src/port/pgstrcasecmp.c +++ b/src/port/pgstrcasecmp.c @@ -18,7 +18,7 @@ * C library thinks the locale is. * * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/port/pgstrcasecmp.c * diff --git a/src/port/pqsignal.c b/src/port/pqsignal.c index f176387ca2..5d8d5042b0 100644 --- a/src/port/pqsignal.c +++ b/src/port/pqsignal.c @@ -4,7 +4,7 @@ * reliable BSD-style signal(2) routine stolen from RWW who stole it * from Stevens... * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/port/pread.c b/src/port/pread.c new file mode 100644 index 0000000000..a22d949cca --- /dev/null +++ b/src/port/pread.c @@ -0,0 +1,55 @@ +/*------------------------------------------------------------------------- + * + * pread.c + * Implementation of pread(2) for platforms that lack one. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/port/pread.c + * + * Note that this implementation changes the current file position, unlike + * the POSIX function, so we use the name pg_pread(). + * + *------------------------------------------------------------------------- + */ + + +#include "postgres.h" + +#ifdef WIN32 +#include +#else +#include +#endif + +ssize_t +pg_pread(int fd, void *buf, size_t size, off_t offset) +{ +#ifdef WIN32 + OVERLAPPED overlapped = {0}; + HANDLE handle; + DWORD result; + + handle = (HANDLE) _get_osfhandle(fd); + if (handle == INVALID_HANDLE_VALUE) + { + errno = EBADF; + return -1; + } + + overlapped.Offset = offset; + if (!ReadFile(handle, buf, size, &result, &overlapped)) + { + _dosmaperr(GetLastError()); + return -1; + } + + return result; +#else + if (lseek(fd, offset, SEEK_SET) < 0) + return -1; + + return read(fd, buf, size); +#endif +} diff --git a/src/port/pwrite.c b/src/port/pwrite.c new file mode 100644 index 0000000000..f3e228cf4f --- /dev/null +++ b/src/port/pwrite.c @@ -0,0 +1,55 @@ +/*------------------------------------------------------------------------- + * + * pwrite.c + * Implementation of pwrite(2) for platforms that lack one. + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/port/pwrite.c + * + * Note that this implementation changes the current file position, unlike + * the POSIX function, so we use the name pg_write(). + * + *------------------------------------------------------------------------- + */ + + +#include "postgres.h" + +#ifdef WIN32 +#include +#else +#include +#endif + +ssize_t +pg_pwrite(int fd, const void *buf, size_t size, off_t offset) +{ +#ifdef WIN32 + OVERLAPPED overlapped = {0}; + HANDLE handle; + DWORD result; + + handle = (HANDLE) _get_osfhandle(fd); + if (handle == INVALID_HANDLE_VALUE) + { + errno = EBADF; + return -1; + } + + overlapped.Offset = offset; + if (!WriteFile(handle, buf, size, &result, &overlapped)) + { + _dosmaperr(GetLastError()); + return -1; + } + + return result; +#else + if (lseek(fd, offset, SEEK_SET) < 0) + return -1; + + return write(fd, buf, size); +#endif +} diff --git a/src/port/quotes.c b/src/port/quotes.c index d7ea934c8b..29770c7a00 100644 --- a/src/port/quotes.c +++ b/src/port/quotes.c @@ -3,7 +3,7 @@ * quotes.c * string quoting and escaping functions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/port/random.c b/src/port/random.c index 5071b31b5d..3996225c92 100644 --- a/src/port/random.c +++ b/src/port/random.c @@ -3,7 +3,7 @@ * random.c * random() wrapper * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/port/rint.c b/src/port/rint.c index d27fdfa6b4..d59d9ab774 100644 --- a/src/port/rint.c +++ b/src/port/rint.c @@ -12,7 +12,6 @@ */ #include "c.h" -#include #include /* diff --git a/src/port/snprintf.c b/src/port/snprintf.c index 231e5d6bdb..c79cb88497 100644 --- a/src/port/snprintf.c +++ b/src/port/snprintf.c @@ -2,6 +2,7 @@ * Copyright (c) 1983, 1995, 1996 Eric P. Allman * Copyright (c) 1988, 1993 * The Regents of the University of California. All rights reserved. + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -32,28 +33,23 @@ #include "c.h" -#include -#ifdef _MSC_VER -#include /* for _isnan */ -#endif -#include #include -#ifndef WIN32 -#include -#endif -#include -#ifndef NL_ARGMAX -#define NL_ARGMAX 16 -#endif +/* + * We used to use the platform's NL_ARGMAX here, but that's a bad idea, + * first because the point of this module is to remove platform dependencies + * not perpetuate them, and second because some platforms use ridiculously + * large values, leading to excessive stack consumption in dopr(). + */ +#define PG_NL_ARGMAX 31 /* * SNPRINTF, VSNPRINTF and friends * * These versions have been grabbed off the net. They have been - * cleaned up to compile properly and support for most of the Single Unix - * Specification has been added. Remaining unimplemented features are: + * cleaned up to compile properly and support for most of the C99 + * specification has been added. Remaining unimplemented features are: * * 1. No locale support: the radix character is always '.' and the ' * (single quote) format flag is ignored. @@ -66,26 +62,33 @@ * * 5. Space and '#' flags are not implemented. * + * In addition, we support some extensions over C99: + * + * 1. Argument order control through "%n$" and "*n$", as required by POSIX. + * + * 2. "%m" expands to the value of strerror(errno), where errno is the + * value that variable had at the start of the call. This is a glibc + * extension, but a very useful one. + * * - * The result values of these functions are not the same across different - * platforms. This implementation is compatible with the Single Unix Spec: + * Historically the result values of sprintf/snprintf varied across platforms. + * This implementation now follows the C99 standard: * - * 1. -1 is returned only if processing is abandoned due to an invalid - * parameter, such as incorrect format string. (Although not required by - * the spec, this happens only when no characters have yet been transmitted - * to the destination.) + * 1. -1 is returned if an error is detected in the format string, or if + * a write to the target stream fails (as reported by fwrite). Note that + * overrunning snprintf's target buffer is *not* an error. * - * 2. For snprintf and sprintf, 0 is returned if str == NULL or count == 0; - * no data has been stored. + * 2. For successful writes to streams, the actual number of bytes written + * to the stream is returned. * - * 3. Otherwise, the number of bytes actually transmitted to the destination - * is returned (excluding the trailing '\0' for snprintf and sprintf). + * 3. For successful sprintf/snprintf, the number of bytes that would have + * been written to an infinite-size buffer (excluding the trailing '\0') + * is returned. snprintf will truncate its output to fit in the buffer + * (ensuring a trailing '\0' unless count == 0), but this is not reflected + * in the function result. * - * For snprintf with nonzero count, the result cannot be more than count-1 - * (a trailing '\0' is always stored); it is not possible to distinguish - * buffer overrun from exact fit. This is unlike some implementations that - * return the number of bytes that would have been needed for the complete - * result string. + * snprintf buffer overrun can be detected by checking for function result + * greater than or equal to the supplied count. */ /************************************************************** @@ -99,20 +102,34 @@ /* Prevent recursion */ #undef vsnprintf #undef snprintf +#undef vsprintf #undef sprintf #undef vfprintf #undef fprintf +#undef vprintf #undef printf -/* Info about where the formatted output is going */ +/* + * Info about where the formatted output is going. + * + * dopr and subroutines will not write at/past bufend, but snprintf + * reserves one byte, ensuring it may place the trailing '\0' there. + * + * In snprintf, we use nchars to count the number of bytes dropped on the + * floor due to buffer overrun. The correct result of snprintf is thus + * (bufptr - bufstart) + nchars. (This isn't as inconsistent as it might + * seem: nchars is the number of emitted bytes that are not in the buffer now, + * either because we sent them to the stream or because we couldn't fit them + * into the buffer to begin with.) + */ typedef struct { char *bufptr; /* next buffer output position */ char *bufstart; /* first buffer element */ - char *bufend; /* last buffer element, or NULL */ + char *bufend; /* last+1 buffer element, or NULL */ /* bufend == NULL is for sprintf, where we assume buf is big enough */ FILE *stream; /* eventual output destination, or NULL */ - int nchars; /* # chars already sent to stream */ + int nchars; /* # chars sent to stream, or dropped */ bool failed; /* call is a failure; errno is set */ } PrintfTarget; @@ -136,7 +153,7 @@ typedef union { int i; long l; - int64 ll; + long long ll; double d; char *cptr; } PrintfArgValue; @@ -146,21 +163,39 @@ static void flushbuffer(PrintfTarget *target); static void dopr(PrintfTarget *target, const char *format, va_list args); +/* + * Externally visible entry points. + * + * All of these are just wrappers around dopr(). Note it's essential that + * they not change the value of "errno" before reaching dopr(). + */ + int pg_vsnprintf(char *str, size_t count, const char *fmt, va_list args) { PrintfTarget target; + char onebyte[1]; - if (str == NULL || count == 0) - return 0; + /* + * C99 allows the case str == NULL when count == 0. Rather than + * special-casing this situation further down, we substitute a one-byte + * local buffer. Callers cannot tell, since the function result doesn't + * depend on count. + */ + if (count == 0) + { + str = onebyte; + count = 1; + } target.bufstart = target.bufptr = str; target.bufend = str + count - 1; target.stream = NULL; - /* target.nchars is unused in this case */ + target.nchars = 0; target.failed = false; dopr(&target, fmt, args); *(target.bufptr) = '\0'; - return target.failed ? -1 : (target.bufptr - target.bufstart); + return target.failed ? -1 : (target.bufptr - target.bufstart + + target.nchars); } int @@ -175,21 +210,20 @@ pg_snprintf(char *str, size_t count, const char *fmt,...) return len; } -static int +int pg_vsprintf(char *str, const char *fmt, va_list args) { PrintfTarget target; - if (str == NULL) - return 0; target.bufstart = target.bufptr = str; target.bufend = NULL; target.stream = NULL; - /* target.nchars is unused in this case */ + target.nchars = 0; /* not really used in this case */ target.failed = false; dopr(&target, fmt, args); *(target.bufptr) = '\0'; - return target.failed ? -1 : (target.bufptr - target.bufstart); + return target.failed ? -1 : (target.bufptr - target.bufstart + + target.nchars); } int @@ -216,7 +250,7 @@ pg_vfprintf(FILE *stream, const char *fmt, va_list args) return -1; } target.bufstart = target.bufptr = buffer; - target.bufend = buffer + sizeof(buffer) - 1; + target.bufend = buffer + sizeof(buffer); /* use the whole buffer */ target.stream = stream; target.nchars = 0; target.failed = false; @@ -238,6 +272,12 @@ pg_fprintf(FILE *stream, const char *fmt,...) return len; } +int +pg_vprintf(const char *fmt, va_list args) +{ + return pg_vfprintf(stdout, fmt, args); +} + int pg_printf(const char *fmt,...) { @@ -259,6 +299,10 @@ flushbuffer(PrintfTarget *target) { size_t nc = target->bufptr - target->bufstart; + /* + * Don't write anything if we already failed; this is to ensure we + * preserve the original failure's errno. + */ if (!target->failed && nc > 0) { size_t written; @@ -272,10 +316,12 @@ flushbuffer(PrintfTarget *target) } -static void fmtstr(char *value, int leftjust, int minlen, int maxwidth, +static bool find_arguments(const char *format, va_list args, + PrintfArgValue *argvalues); +static void fmtstr(const char *value, int leftjust, int minlen, int maxwidth, int pointflag, PrintfTarget *target); static void fmtptr(void *value, PrintfTarget *target); -static void fmtint(int64 value, char type, int forcesign, +static void fmtint(long long value, char type, int forcesign, int leftjust, int minlen, int zpad, int precision, int pointflag, PrintfTarget *target); static void fmtchar(int value, int leftjust, int minlen, PrintfTarget *target); @@ -284,23 +330,55 @@ static void fmtfloat(double value, char type, int forcesign, PrintfTarget *target); static void dostr(const char *str, int slen, PrintfTarget *target); static void dopr_outch(int c, PrintfTarget *target); +static void dopr_outchmulti(int c, int slen, PrintfTarget *target); static int adjust_sign(int is_negative, int forcesign, int *signvalue); -static void adjust_padlen(int minlen, int vallen, int leftjust, int *padlen); -static void leading_pad(int zpad, int *signvalue, int *padlen, +static int compute_padlen(int minlen, int vallen, int leftjust); +static void leading_pad(int zpad, int signvalue, int *padlen, PrintfTarget *target); -static void trailing_pad(int *padlen, PrintfTarget *target); +static void trailing_pad(int padlen, PrintfTarget *target); + +/* + * If strchrnul exists (it's a glibc-ism), it's a good bit faster than the + * equivalent manual loop. If it doesn't exist, provide a replacement. + * + * Note: glibc declares this as returning "char *", but that would require + * casting away const internally, so we don't follow that detail. + */ +#ifndef HAVE_STRCHRNUL + +static inline const char * +strchrnul(const char *s, int c) +{ + while (*s != '\0' && *s != c) + s++; + return s; +} + +#else + +/* + * glibc's declares strchrnul only if _GNU_SOURCE is defined. + * While we typically use that on glibc platforms, configure will set + * HAVE_STRCHRNUL whether it's used or not. Fill in the missing declaration + * so that this file will compile cleanly with or without _GNU_SOURCE. + */ +#ifndef _GNU_SOURCE +extern char *strchrnul(const char *s, int c); +#endif + +#endif /* HAVE_STRCHRNUL */ /* - * dopr(): poor man's version of doprintf + * dopr(): the guts of *printf for all cases. */ static void dopr(PrintfTarget *target, const char *format, va_list args) { - const char *format_start = format; + int save_errno = errno; + const char *first_pct = NULL; int ch; bool have_dollar; - bool have_non_dollar; bool have_star; bool afterstar; int accum; @@ -312,225 +390,62 @@ dopr(PrintfTarget *target, const char *format, va_list args) int precision; int zpad; int forcesign; - int last_dollar; int fmtpos; int cvalue; - int64 numvalue; + long long numvalue; double fvalue; char *strvalue; - int i; - PrintfArgType argtypes[NL_ARGMAX + 1]; - PrintfArgValue argvalues[NL_ARGMAX + 1]; + PrintfArgValue argvalues[PG_NL_ARGMAX + 1]; /* - * Parse the format string to determine whether there are %n$ format - * specs, and identify the types and order of the format parameters. + * Initially, we suppose the format string does not use %n$. The first + * time we come to a conversion spec that has that, we'll call + * find_arguments() to check for consistent use of %n$ and fill the + * argvalues array with the argument values in the correct order. */ - have_dollar = have_non_dollar = false; - last_dollar = 0; - MemSet(argtypes, 0, sizeof(argtypes)); + have_dollar = false; - while ((ch = *format++) != '\0') + while (*format != '\0') { - if (ch != '%') - continue; - longflag = longlongflag = pointflag = 0; - fmtpos = accum = 0; - afterstar = false; -nextch1: - ch = *format++; - if (ch == '\0') - break; /* illegal, but we don't complain */ - switch (ch) + /* Locate next conversion specifier */ + if (*format != '%') { - case '-': - case '+': - goto nextch1; - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - accum = accum * 10 + (ch - '0'); - goto nextch1; - case '.': - pointflag = 1; - accum = 0; - goto nextch1; - case '*': - if (afterstar) - have_non_dollar = true; /* multiple stars */ - afterstar = true; - accum = 0; - goto nextch1; - case '$': - have_dollar = true; - if (accum <= 0 || accum > NL_ARGMAX) - goto bad_format; - if (afterstar) - { - if (argtypes[accum] && - argtypes[accum] != ATYPE_INT) - goto bad_format; - argtypes[accum] = ATYPE_INT; - last_dollar = Max(last_dollar, accum); - afterstar = false; - } - else - fmtpos = accum; - accum = 0; - goto nextch1; - case 'l': - if (longflag) - longlongflag = 1; - else - longflag = 1; - goto nextch1; - case 'z': -#if SIZEOF_SIZE_T == 8 -#ifdef HAVE_LONG_INT_64 - longflag = 1; -#elif defined(HAVE_LONG_LONG_INT_64) - longlongflag = 1; -#else -#error "Don't know how to print 64bit integers" -#endif -#else - /* assume size_t is same size as int */ -#endif - goto nextch1; - case 'h': - case '\'': - /* ignore these */ - goto nextch1; - case 'd': - case 'i': - case 'o': - case 'u': - case 'x': - case 'X': - if (fmtpos) - { - PrintfArgType atype; + /* Scan to next '%' or end of string */ + const char *next_pct = strchrnul(format + 1, '%'); - if (longlongflag) - atype = ATYPE_LONGLONG; - else if (longflag) - atype = ATYPE_LONG; - else - atype = ATYPE_INT; - if (argtypes[fmtpos] && - argtypes[fmtpos] != atype) - goto bad_format; - argtypes[fmtpos] = atype; - last_dollar = Max(last_dollar, fmtpos); - } - else - have_non_dollar = true; - break; - case 'c': - if (fmtpos) - { - if (argtypes[fmtpos] && - argtypes[fmtpos] != ATYPE_INT) - goto bad_format; - argtypes[fmtpos] = ATYPE_INT; - last_dollar = Max(last_dollar, fmtpos); - } - else - have_non_dollar = true; - break; - case 's': - case 'p': - if (fmtpos) - { - if (argtypes[fmtpos] && - argtypes[fmtpos] != ATYPE_CHARPTR) - goto bad_format; - argtypes[fmtpos] = ATYPE_CHARPTR; - last_dollar = Max(last_dollar, fmtpos); - } - else - have_non_dollar = true; - break; - case 'e': - case 'E': - case 'f': - case 'g': - case 'G': - if (fmtpos) - { - if (argtypes[fmtpos] && - argtypes[fmtpos] != ATYPE_DOUBLE) - goto bad_format; - argtypes[fmtpos] = ATYPE_DOUBLE; - last_dollar = Max(last_dollar, fmtpos); - } - else - have_non_dollar = true; + /* Dump literal data we just scanned over */ + dostr(format, next_pct - format, target); + if (target->failed) break; - case '%': + + if (*next_pct == '\0') break; + format = next_pct; } /* - * If we finish the spec with afterstar still set, there's a - * non-dollar star in there. + * Remember start of first conversion spec; if we find %n$, then it's + * sufficient for find_arguments() to start here, without rescanning + * earlier literal text. */ - if (afterstar) - have_non_dollar = true; - } + if (first_pct == NULL) + first_pct = format; - /* Per spec, you use either all dollar or all not. */ - if (have_dollar && have_non_dollar) - goto bad_format; + /* Process conversion spec starting at *format */ + format++; - /* - * In dollar mode, collect the arguments in physical order. - */ - for (i = 1; i <= last_dollar; i++) - { - switch (argtypes[i]) + /* Fast path for conversion spec that is exactly %s */ + if (*format == 's') { - case ATYPE_NONE: - goto bad_format; - case ATYPE_INT: - argvalues[i].i = va_arg(args, int); - break; - case ATYPE_LONG: - argvalues[i].l = va_arg(args, long); - break; - case ATYPE_LONGLONG: - argvalues[i].ll = va_arg(args, int64); - break; - case ATYPE_DOUBLE: - argvalues[i].d = va_arg(args, double); - break; - case ATYPE_CHARPTR: - argvalues[i].cptr = va_arg(args, char *); + format++; + strvalue = va_arg(args, char *); + Assert(strvalue != NULL); + dostr(strvalue, strlen(strvalue), target); + if (target->failed) break; - } - } - - /* - * At last we can parse the format for real. - */ - format = format_start; - while ((ch = *format++) != '\0') - { - if (target->failed) - break; - - if (ch != '%') - { - dopr_outch(ch, target); continue; } + fieldwidth = precision = zpad = leftjust = forcesign = 0; longflag = longlongflag = pointflag = 0; fmtpos = accum = 0; @@ -574,7 +489,11 @@ dopr(PrintfTarget *target, const char *format, va_list args) case '*': if (have_dollar) { - /* process value after reading n$ */ + /* + * We'll process value after reading n$. Note it's OK to + * assume have_dollar is set correctly, because in a valid + * format string the initial % must have had n$ if * does. + */ afterstar = true; } else @@ -605,6 +524,14 @@ dopr(PrintfTarget *target, const char *format, va_list args) accum = 0; goto nextch2; case '$': + /* First dollar sign? */ + if (!have_dollar) + { + /* Yup, so examine all conversion specs in format */ + if (!find_arguments(first_pct, args, argvalues)) + goto bad_format; + have_dollar = true; + } if (afterstar) { /* fetch and process star value */ @@ -678,7 +605,7 @@ dopr(PrintfTarget *target, const char *format, va_list args) else { if (longlongflag) - numvalue = va_arg(args, int64); + numvalue = va_arg(args, long long); else if (longflag) numvalue = va_arg(args, long); else @@ -701,7 +628,7 @@ dopr(PrintfTarget *target, const char *format, va_list args) if (have_dollar) { if (longlongflag) - numvalue = (uint64) argvalues[fmtpos].ll; + numvalue = (unsigned long long) argvalues[fmtpos].ll; else if (longflag) numvalue = (unsigned long) argvalues[fmtpos].l; else @@ -710,7 +637,7 @@ dopr(PrintfTarget *target, const char *format, va_list args) else { if (longlongflag) - numvalue = (uint64) va_arg(args, int64); + numvalue = (unsigned long long) va_arg(args, long long); else if (longflag) numvalue = (unsigned long) va_arg(args, long); else @@ -745,6 +672,8 @@ dopr(PrintfTarget *target, const char *format, va_list args) strvalue = argvalues[fmtpos].cptr; else strvalue = va_arg(args, char *); + /* Whine if someone tries to print a NULL string */ + Assert(strvalue != NULL); fmtstr(strvalue, leftjust, fieldwidth, precision, pointflag, target); break; @@ -777,10 +706,23 @@ dopr(PrintfTarget *target, const char *format, va_list args) precision, pointflag, target); break; + case 'm': + { + char errbuf[PG_STRERROR_R_BUFLEN]; + const char *errm = strerror_r(save_errno, + errbuf, sizeof(errbuf)); + + dostr(errm, strlen(errm), target); + } + break; case '%': dopr_outch('%', target); break; } + + /* Check for failure after each conversion spec */ + if (target->failed) + break; } return; @@ -790,43 +732,261 @@ dopr(PrintfTarget *target, const char *format, va_list args) target->failed = true; } -static size_t -pg_strnlen(const char *str, size_t maxlen) +/* + * find_arguments(): sort out the arguments for a format spec with %n$ + * + * If format is valid, return true and fill argvalues[i] with the value + * for the conversion spec that has %i$ or *i$. Else return false. + */ +static bool +find_arguments(const char *format, va_list args, + PrintfArgValue *argvalues) { - const char *p = str; - - while (maxlen-- > 0 && *p) - p++; - return p - str; -} + int ch; + bool afterstar; + int accum; + int longlongflag; + int longflag; + int fmtpos; + int i; + int last_dollar; + PrintfArgType argtypes[PG_NL_ARGMAX + 1]; -static void -fmtstr(char *value, int leftjust, int minlen, int maxwidth, - int pointflag, PrintfTarget *target) -{ - int padlen, - vallen; /* amount to pad */ + /* Initialize to "no dollar arguments known" */ + last_dollar = 0; + MemSet(argtypes, 0, sizeof(argtypes)); /* - * If a maxwidth (precision) is specified, we must not fetch more bytes - * than that. + * This loop must accept the same format strings as the one in dopr(). + * However, we don't need to analyze them to the same level of detail. + * + * Since we're only called if there's a dollar-type spec somewhere, we can + * fail immediately if we find a non-dollar spec. Per the C99 standard, + * all argument references in the format string must be one or the other. */ - if (pointflag) - vallen = pg_strnlen(value, maxwidth); - else - vallen = strlen(value); - - adjust_padlen(minlen, vallen, leftjust, &padlen); - - while (padlen > 0) + while (*format != '\0') { - dopr_outch(' ', target); - --padlen; - } + /* Locate next conversion specifier */ + if (*format != '%') + { + /* Unlike dopr, we can just quit if there's no more specifiers */ + format = strchr(format + 1, '%'); + if (format == NULL) + break; + } - dostr(value, vallen, target); + /* Process conversion spec starting at *format */ + format++; + longflag = longlongflag = 0; + fmtpos = accum = 0; + afterstar = false; +nextch1: + ch = *format++; + if (ch == '\0') + break; /* illegal, but we don't complain */ + switch (ch) + { + case '-': + case '+': + goto nextch1; + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + accum = accum * 10 + (ch - '0'); + goto nextch1; + case '.': + accum = 0; + goto nextch1; + case '*': + if (afterstar) + return false; /* previous star missing dollar */ + afterstar = true; + accum = 0; + goto nextch1; + case '$': + if (accum <= 0 || accum > PG_NL_ARGMAX) + return false; + if (afterstar) + { + if (argtypes[accum] && + argtypes[accum] != ATYPE_INT) + return false; + argtypes[accum] = ATYPE_INT; + last_dollar = Max(last_dollar, accum); + afterstar = false; + } + else + fmtpos = accum; + accum = 0; + goto nextch1; + case 'l': + if (longflag) + longlongflag = 1; + else + longflag = 1; + goto nextch1; + case 'z': +#if SIZEOF_SIZE_T == 8 +#ifdef HAVE_LONG_INT_64 + longflag = 1; +#elif defined(HAVE_LONG_LONG_INT_64) + longlongflag = 1; +#else +#error "Don't know how to print 64bit integers" +#endif +#else + /* assume size_t is same size as int */ +#endif + goto nextch1; + case 'h': + case '\'': + /* ignore these */ + goto nextch1; + case 'd': + case 'i': + case 'o': + case 'u': + case 'x': + case 'X': + if (fmtpos) + { + PrintfArgType atype; + + if (longlongflag) + atype = ATYPE_LONGLONG; + else if (longflag) + atype = ATYPE_LONG; + else + atype = ATYPE_INT; + if (argtypes[fmtpos] && + argtypes[fmtpos] != atype) + return false; + argtypes[fmtpos] = atype; + last_dollar = Max(last_dollar, fmtpos); + } + else + return false; /* non-dollar conversion spec */ + break; + case 'c': + if (fmtpos) + { + if (argtypes[fmtpos] && + argtypes[fmtpos] != ATYPE_INT) + return false; + argtypes[fmtpos] = ATYPE_INT; + last_dollar = Max(last_dollar, fmtpos); + } + else + return false; /* non-dollar conversion spec */ + break; + case 's': + case 'p': + if (fmtpos) + { + if (argtypes[fmtpos] && + argtypes[fmtpos] != ATYPE_CHARPTR) + return false; + argtypes[fmtpos] = ATYPE_CHARPTR; + last_dollar = Max(last_dollar, fmtpos); + } + else + return false; /* non-dollar conversion spec */ + break; + case 'e': + case 'E': + case 'f': + case 'g': + case 'G': + if (fmtpos) + { + if (argtypes[fmtpos] && + argtypes[fmtpos] != ATYPE_DOUBLE) + return false; + argtypes[fmtpos] = ATYPE_DOUBLE; + last_dollar = Max(last_dollar, fmtpos); + } + else + return false; /* non-dollar conversion spec */ + break; + case 'm': + case '%': + break; + } + + /* + * If we finish the spec with afterstar still set, there's a + * non-dollar star in there. + */ + if (afterstar) + return false; /* non-dollar conversion spec */ + } + + /* + * Format appears valid so far, so collect the arguments in physical + * order. (Since we rejected any non-dollar specs that would have + * collected arguments, we know that dopr() hasn't collected any yet.) + */ + for (i = 1; i <= last_dollar; i++) + { + switch (argtypes[i]) + { + case ATYPE_NONE: + return false; + case ATYPE_INT: + argvalues[i].i = va_arg(args, int); + break; + case ATYPE_LONG: + argvalues[i].l = va_arg(args, long); + break; + case ATYPE_LONGLONG: + argvalues[i].ll = va_arg(args, long long); + break; + case ATYPE_DOUBLE: + argvalues[i].d = va_arg(args, double); + break; + case ATYPE_CHARPTR: + argvalues[i].cptr = va_arg(args, char *); + break; + } + } - trailing_pad(&padlen, target); + return true; +} + +static void +fmtstr(const char *value, int leftjust, int minlen, int maxwidth, + int pointflag, PrintfTarget *target) +{ + int padlen, + vallen; /* amount to pad */ + + /* + * If a maxwidth (precision) is specified, we must not fetch more bytes + * than that. + */ + if (pointflag) + vallen = strnlen(value, maxwidth); + else + vallen = strlen(value); + + padlen = compute_padlen(minlen, vallen, leftjust); + + if (padlen > 0) + { + dopr_outchmulti(' ', padlen, target); + padlen = 0; + } + + dostr(value, vallen, target); + + trailing_pad(padlen, target); } static void @@ -844,17 +1004,18 @@ fmtptr(void *value, PrintfTarget *target) } static void -fmtint(int64 value, char type, int forcesign, int leftjust, +fmtint(long long value, char type, int forcesign, int leftjust, int minlen, int zpad, int precision, int pointflag, PrintfTarget *target) { - uint64 base; + unsigned long long base; + unsigned long long uvalue; int dosign; const char *cvt = "0123456789abcdef"; int signvalue = 0; char convert[64]; int vallen = 0; - int padlen = 0; /* amount to pad */ + int padlen; /* amount to pad */ int zeropad; /* extra leading zeroes */ switch (type) @@ -885,9 +1046,19 @@ fmtint(int64 value, char type, int forcesign, int leftjust, return; /* keep compiler quiet */ } + /* disable MSVC warning about applying unary minus to an unsigned value */ +#if _MSC_VER +#pragma warning(push) +#pragma warning(disable: 4146) +#endif /* Handle +/- */ if (dosign && adjust_sign((value < 0), forcesign, &signvalue)) - value = -value; + uvalue = -(unsigned long long) value; + else + uvalue = (unsigned long long) value; +#if _MSC_VER +#pragma warning(pop) +#endif /* * SUS: the result of converting 0 with an explicit precision of 0 is no @@ -898,46 +1069,43 @@ fmtint(int64 value, char type, int forcesign, int leftjust, else { /* make integer string */ - uint64 uvalue = (uint64) value; - do { - convert[vallen++] = cvt[uvalue % base]; + convert[sizeof(convert) - (++vallen)] = cvt[uvalue % base]; uvalue = uvalue / base; } while (uvalue); } zeropad = Max(0, precision - vallen); - adjust_padlen(minlen, vallen + zeropad, leftjust, &padlen); + padlen = compute_padlen(minlen, vallen + zeropad, leftjust); - leading_pad(zpad, &signvalue, &padlen, target); + leading_pad(zpad, signvalue, &padlen, target); - while (zeropad-- > 0) - dopr_outch('0', target); + if (zeropad > 0) + dopr_outchmulti('0', zeropad, target); - while (vallen > 0) - dopr_outch(convert[--vallen], target); + dostr(convert + sizeof(convert) - vallen, vallen, target); - trailing_pad(&padlen, target); + trailing_pad(padlen, target); } static void fmtchar(int value, int leftjust, int minlen, PrintfTarget *target) { - int padlen = 0; /* amount to pad */ + int padlen; /* amount to pad */ - adjust_padlen(minlen, 1, leftjust, &padlen); + padlen = compute_padlen(minlen, 1, leftjust); - while (padlen > 0) + if (padlen > 0) { - dopr_outch(' ', target); - --padlen; + dopr_outchmulti(' ', padlen, target); + padlen = 0; } dopr_outch(value, target); - trailing_pad(&padlen, target); + trailing_pad(padlen, target); } static void @@ -948,10 +1116,10 @@ fmtfloat(double value, char type, int forcesign, int leftjust, int signvalue = 0; int prec; int vallen; - char fmt[32]; + char fmt[8]; char convert[1024]; int zeropadlen = 0; /* amount to pad with zeroes */ - int padlen = 0; /* amount to pad with spaces */ + int padlen; /* amount to pad with spaces */ /* * We rely on the regular C library's sprintf to do the basic conversion, @@ -966,34 +1134,82 @@ fmtfloat(double value, char type, int forcesign, int leftjust, * bytes and limit requested precision to 350 digits; this should prevent * buffer overrun even with non-IEEE math. If the original precision * request was more than 350, separately pad with zeroes. + * + * We handle infinities and NaNs specially to ensure platform-independent + * output. */ if (precision < 0) /* cover possible overflow of "accum" */ precision = 0; prec = Min(precision, 350); - if (pointflag) + if (isnan(value)) { - if (sprintf(fmt, "%%.%d%c", prec, type) < 0) - goto fail; - zeropadlen = precision - prec; + strcpy(convert, "NaN"); + vallen = 3; + /* no zero padding, regardless of precision spec */ } - else if (sprintf(fmt, "%%%c", type) < 0) - goto fail; + else + { + /* + * Handle sign (NaNs have no sign, so we don't do this in the case + * above). "value < 0.0" will not be true for IEEE minus zero, so we + * detect that by looking for the case where value equals 0.0 + * according to == but not according to memcmp. + */ + static const double dzero = 0.0; - if (!isnan(value) && adjust_sign((value < 0), forcesign, &signvalue)) - value = -value; + if (adjust_sign((value < 0.0 || + (value == 0.0 && + memcmp(&value, &dzero, sizeof(double)) != 0)), + forcesign, &signvalue)) + value = -value; - vallen = sprintf(convert, fmt, value); - if (vallen < 0) - goto fail; + if (isinf(value)) + { + strcpy(convert, "Infinity"); + vallen = 8; + /* no zero padding, regardless of precision spec */ + } + else if (pointflag) + { + zeropadlen = precision - prec; + fmt[0] = '%'; + fmt[1] = '.'; + fmt[2] = '*'; + fmt[3] = type; + fmt[4] = '\0'; + vallen = sprintf(convert, fmt, prec, value); + } + else + { + fmt[0] = '%'; + fmt[1] = type; + fmt[2] = '\0'; + vallen = sprintf(convert, fmt, value); + } + if (vallen < 0) + goto fail; - /* If it's infinity or NaN, forget about doing any zero-padding */ - if (zeropadlen > 0 && !isdigit((unsigned char) convert[vallen - 1])) - zeropadlen = 0; + /* + * Windows, alone among our supported platforms, likes to emit + * three-digit exponent fields even when two digits would do. Hack + * such results to look like the way everyone else does it. + */ +#ifdef WIN32 + if (vallen >= 6 && + convert[vallen - 5] == 'e' && + convert[vallen - 3] == '0') + { + convert[vallen - 3] = convert[vallen - 2]; + convert[vallen - 2] = convert[vallen - 1]; + vallen--; + } +#endif + } - adjust_padlen(minlen, vallen + zeropadlen, leftjust, &padlen); + padlen = compute_padlen(minlen, vallen + zeropadlen, leftjust); - leading_pad(zpad, &signvalue, &padlen, target); + leading_pad(zpad, signvalue, &padlen, target); if (zeropadlen > 0) { @@ -1004,18 +1220,18 @@ fmtfloat(double value, char type, int forcesign, int leftjust, epos = strrchr(convert, 'E'); if (epos) { - /* pad after exponent */ + /* pad before exponent */ dostr(convert, epos - convert, target); - while (zeropadlen-- > 0) - dopr_outch('0', target); + if (zeropadlen > 0) + dopr_outchmulti('0', zeropadlen, target); dostr(epos, vallen - (epos - convert), target); } else { /* no exponent, pad after the digits */ dostr(convert, vallen, target); - while (zeropadlen-- > 0) - dopr_outch('0', target); + if (zeropadlen > 0) + dopr_outchmulti('0', zeropadlen, target); } } else @@ -1024,16 +1240,124 @@ fmtfloat(double value, char type, int forcesign, int leftjust, dostr(convert, vallen, target); } - trailing_pad(&padlen, target); + trailing_pad(padlen, target); return; fail: target->failed = true; } +/* + * Nonstandard entry point to print a double value efficiently. + * + * This is approximately equivalent to strfromd(), but has an API more + * adapted to what float8out() wants. The behavior is like snprintf() + * with a format of "%.ng", where n is the specified precision. + * However, the target buffer must be nonempty (i.e. count > 0), and + * the precision is silently bounded to a sane range. + */ +int +pg_strfromd(char *str, size_t count, int precision, double value) +{ + PrintfTarget target; + int signvalue = 0; + int vallen; + char fmt[8]; + char convert[64]; + + /* Set up the target like pg_snprintf, but require nonempty buffer */ + Assert(count > 0); + target.bufstart = target.bufptr = str; + target.bufend = str + count - 1; + target.stream = NULL; + target.nchars = 0; + target.failed = false; + + /* + * We bound precision to a reasonable range; the combination of this and + * the knowledge that we're using "g" format without padding allows the + * convert[] buffer to be reasonably small. + */ + if (precision < 1) + precision = 1; + else if (precision > 32) + precision = 32; + + /* + * The rest is just an inlined version of the fmtfloat() logic above, + * simplified using the knowledge that no padding is wanted. + */ + if (isnan(value)) + { + strcpy(convert, "NaN"); + vallen = 3; + } + else + { + static const double dzero = 0.0; + + if (value < 0.0 || + (value == 0.0 && + memcmp(&value, &dzero, sizeof(double)) != 0)) + { + signvalue = '-'; + value = -value; + } + + if (isinf(value)) + { + strcpy(convert, "Infinity"); + vallen = 8; + } + else + { + fmt[0] = '%'; + fmt[1] = '.'; + fmt[2] = '*'; + fmt[3] = 'g'; + fmt[4] = '\0'; + vallen = sprintf(convert, fmt, precision, value); + if (vallen < 0) + { + target.failed = true; + goto fail; + } + +#ifdef WIN32 + if (vallen >= 6 && + convert[vallen - 5] == 'e' && + convert[vallen - 3] == '0') + { + convert[vallen - 3] = convert[vallen - 2]; + convert[vallen - 2] = convert[vallen - 1]; + vallen--; + } +#endif + } + } + + if (signvalue) + dopr_outch(signvalue, &target); + + dostr(convert, vallen, &target); + +fail: + *(target.bufptr) = '\0'; + return target.failed ? -1 : (target.bufptr - target.bufstart + + target.nchars); +} + + static void dostr(const char *str, int slen, PrintfTarget *target) { + /* fast path for common case of slen == 1 */ + if (slen == 1) + { + dopr_outch(*str, target); + return; + } + while (slen > 0) { int avail; @@ -1046,7 +1370,10 @@ dostr(const char *str, int slen, PrintfTarget *target) { /* buffer full, can we dump to stream? */ if (target->stream == NULL) - return; /* no, lose the data */ + { + target->nchars += slen; /* no, lose the data */ + return; + } flushbuffer(target); continue; } @@ -1065,12 +1392,51 @@ dopr_outch(int c, PrintfTarget *target) { /* buffer full, can we dump to stream? */ if (target->stream == NULL) - return; /* no, lose the data */ + { + target->nchars++; /* no, lose the data */ + return; + } flushbuffer(target); } *(target->bufptr++) = c; } +static void +dopr_outchmulti(int c, int slen, PrintfTarget *target) +{ + /* fast path for common case of slen == 1 */ + if (slen == 1) + { + dopr_outch(c, target); + return; + } + + while (slen > 0) + { + int avail; + + if (target->bufend != NULL) + avail = target->bufend - target->bufptr; + else + avail = slen; + if (avail <= 0) + { + /* buffer full, can we dump to stream? */ + if (target->stream == NULL) + { + target->nchars += slen; /* no, lose the data */ + return; + } + flushbuffer(target); + continue; + } + avail = Min(avail, slen); + memset(target->bufptr, c, avail); + target->bufptr += avail; + slen -= avail; + } +} + static int adjust_sign(int is_negative, int forcesign, int *signvalue) @@ -1086,42 +1452,48 @@ adjust_sign(int is_negative, int forcesign, int *signvalue) } -static void -adjust_padlen(int minlen, int vallen, int leftjust, int *padlen) +static int +compute_padlen(int minlen, int vallen, int leftjust) { - *padlen = minlen - vallen; - if (*padlen < 0) - *padlen = 0; + int padlen; + + padlen = minlen - vallen; + if (padlen < 0) + padlen = 0; if (leftjust) - *padlen = -(*padlen); + padlen = -padlen; + return padlen; } static void -leading_pad(int zpad, int *signvalue, int *padlen, PrintfTarget *target) +leading_pad(int zpad, int signvalue, int *padlen, PrintfTarget *target) { + int maxpad; + if (*padlen > 0 && zpad) { - if (*signvalue) + if (signvalue) { - dopr_outch(*signvalue, target); + dopr_outch(signvalue, target); --(*padlen); - *signvalue = 0; + signvalue = 0; } - while (*padlen > 0) + if (*padlen > 0) { - dopr_outch(zpad, target); - --(*padlen); + dopr_outchmulti(zpad, *padlen, target); + *padlen = 0; } } - while (*padlen > (*signvalue != 0)) + maxpad = (signvalue != 0); + if (*padlen > maxpad) { - dopr_outch(' ', target); - --(*padlen); + dopr_outchmulti(' ', *padlen - maxpad, target); + *padlen = maxpad; } - if (*signvalue) + if (signvalue) { - dopr_outch(*signvalue, target); + dopr_outch(signvalue, target); if (*padlen > 0) --(*padlen); else if (*padlen < 0) @@ -1131,11 +1503,8 @@ leading_pad(int zpad, int *signvalue, int *padlen, PrintfTarget *target) static void -trailing_pad(int *padlen, PrintfTarget *target) +trailing_pad(int padlen, PrintfTarget *target) { - while (*padlen < 0) - { - dopr_outch(' ', target); - ++(*padlen); - } + if (padlen < 0) + dopr_outchmulti(' ', -padlen, target); } diff --git a/src/port/sprompt.c b/src/port/sprompt.c index 47cd9781fd..b617c931fc 100644 --- a/src/port/sprompt.c +++ b/src/port/sprompt.c @@ -3,7 +3,7 @@ * sprompt.c * simple_prompt() routine * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -40,15 +40,13 @@ simple_prompt(const char *prompt, char *destination, size_t destlen, bool echo) FILE *termin, *termout; -#ifdef HAVE_TERMIOS_H +#if defined(HAVE_TERMIOS_H) struct termios t_orig, t; -#else -#ifdef WIN32 +#elif defined(WIN32) HANDLE t = NULL; DWORD t_orig = 0; #endif -#endif #ifdef WIN32 @@ -66,8 +64,11 @@ simple_prompt(const char *prompt, char *destination, size_t destlen, bool echo) * * XXX fgets() still receives text in the console's input code page. This * makes non-ASCII credentials unportable. + * + * Unintuitively, we also open termin in mode "w+", even though we only + * read it; that's needed for SetConsoleMode() to succeed. */ - termin = fopen("CONIN$", "r"); + termin = fopen("CONIN$", "w+"); termout = fopen("CONOUT$", "w+"); #else @@ -99,29 +100,25 @@ simple_prompt(const char *prompt, char *destination, size_t destlen, bool echo) termout = stderr; } -#ifdef HAVE_TERMIOS_H if (!echo) { +#if defined(HAVE_TERMIOS_H) + /* disable echo via tcgetattr/tcsetattr */ tcgetattr(fileno(termin), &t); t_orig = t; t.c_lflag &= ~ECHO; tcsetattr(fileno(termin), TCSAFLUSH, &t); - } -#else -#ifdef WIN32 - if (!echo) - { - /* get a new handle to turn echo off */ - t = GetStdHandle(STD_INPUT_HANDLE); +#elif defined(WIN32) + /* need the file's HANDLE to turn echo off */ + t = (HANDLE) _get_osfhandle(_fileno(termin)); /* save the old configuration first */ GetConsoleMode(t, &t_orig); /* set to the new mode */ SetConsoleMode(t, ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT); - } -#endif #endif + } if (prompt) { @@ -151,24 +148,19 @@ simple_prompt(const char *prompt, char *destination, size_t destlen, bool echo) /* remove trailing newline */ destination[length - 1] = '\0'; -#ifdef HAVE_TERMIOS_H if (!echo) { + /* restore previous echo behavior, then echo \n */ +#if defined(HAVE_TERMIOS_H) tcsetattr(fileno(termin), TCSAFLUSH, &t_orig); fputs("\n", termout); fflush(termout); - } -#else -#ifdef WIN32 - if (!echo) - { - /* reset to the original console mode */ +#elif defined(WIN32) SetConsoleMode(t, t_orig); fputs("\n", termout); fflush(termout); - } -#endif #endif + } if (termin != stdin) { diff --git a/src/port/srandom.c b/src/port/srandom.c index 867c71858c..6939260d33 100644 --- a/src/port/srandom.c +++ b/src/port/srandom.c @@ -3,7 +3,7 @@ * srandom.c * srandom() wrapper * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/port/strerror.c b/src/port/strerror.c index e92ebc9f55..ba93815c50 100644 --- a/src/port/strerror.c +++ b/src/port/strerror.c @@ -1,30 +1,322 @@ -/* src/port/strerror.c */ - -/* - * strerror - map error number to descriptive string +/*------------------------------------------------------------------------- + * + * strerror.c + * Replacements for standard strerror() and strerror_r() functions + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * - * This version is obviously somewhat Unix-specific. * - * based on code by Henry Spencer - * modified for ANSI by D'Arcy J.M. Cain + * IDENTIFICATION + * src/port/strerror.c + * + *------------------------------------------------------------------------- */ - #include "c.h" +/* + * Within this file, "strerror" means the platform's function not pg_strerror, + * and likewise for "strerror_r" + */ +#undef strerror +#undef strerror_r + +static char *gnuish_strerror_r(int errnum, char *buf, size_t buflen); +static char *get_errno_symbol(int errnum); +#ifdef WIN32 +static char *win32_socket_strerror(int errnum, char *buf, size_t buflen); +#endif -extern const char *const sys_errlist[]; -extern int sys_nerr; -const char * -strerror(int errnum) +/* + * A slightly cleaned-up version of strerror() + */ +char * +pg_strerror(int errnum) { - static char buf[24]; + static char errorstr_buf[PG_STRERROR_R_BUFLEN]; + + return pg_strerror_r(errnum, errorstr_buf, sizeof(errorstr_buf)); +} - if (errnum < 0 || errnum > sys_nerr) +/* + * A slightly cleaned-up version of strerror_r() + */ +char * +pg_strerror_r(int errnum, char *buf, size_t buflen) +{ + char *str; + + /* If it's a Windows Winsock error, that needs special handling */ +#ifdef WIN32 + /* Winsock error code range, per WinError.h */ + if (errnum >= 10000 && errnum <= 11999) + return win32_socket_strerror(errnum, buf, buflen); +#endif + + /* Try the platform's strerror_r(), or maybe just strerror() */ + str = gnuish_strerror_r(errnum, buf, buflen); + + /* + * Some strerror()s return an empty string for out-of-range errno. This + * is ANSI C spec compliant, but not exactly useful. Also, we may get + * back strings of question marks if libc cannot transcode the message to + * the codeset specified by LC_CTYPE. If we get nothing useful, first try + * get_errno_symbol(), and if that fails, print the numeric errno. + */ + if (str == NULL || *str == '\0' || *str == '?') + str = get_errno_symbol(errnum); + + if (str == NULL) { - sprintf(buf, _("unrecognized error %d"), errnum); + snprintf(buf, buflen, _("operating system error %d"), errnum); + str = buf; + } + + return str; +} + +/* + * Simple wrapper to emulate GNU strerror_r if what the platform provides is + * POSIX. Also, if platform lacks strerror_r altogether, fall back to plain + * strerror; it might not be very thread-safe, but tough luck. + */ +static char * +gnuish_strerror_r(int errnum, char *buf, size_t buflen) +{ +#ifdef HAVE_STRERROR_R +#ifdef STRERROR_R_INT + /* POSIX API */ + if (strerror_r(errnum, buf, buflen) == 0) return buf; + return NULL; /* let caller deal with failure */ +#else + /* GNU API */ + return strerror_r(errnum, buf, buflen); +#endif +#else /* !HAVE_STRERROR_R */ + char *sbuf = strerror(errnum); + + if (sbuf == NULL) /* can this still happen anywhere? */ + return NULL; + /* To minimize thread-unsafety hazard, copy into caller's buffer */ + strlcpy(buf, sbuf, buflen); + return buf; +#endif +} + +/* + * Returns a symbol (e.g. "ENOENT") for an errno code. + * Returns NULL if the code is unrecognized. + */ +static char * +get_errno_symbol(int errnum) +{ + switch (errnum) + { + case E2BIG: + return "E2BIG"; + case EACCES: + return "EACCES"; +#ifdef EADDRINUSE + case EADDRINUSE: + return "EADDRINUSE"; +#endif +#ifdef EADDRNOTAVAIL + case EADDRNOTAVAIL: + return "EADDRNOTAVAIL"; +#endif + case EAFNOSUPPORT: + return "EAFNOSUPPORT"; +#ifdef EAGAIN + case EAGAIN: + return "EAGAIN"; +#endif +#ifdef EALREADY + case EALREADY: + return "EALREADY"; +#endif + case EBADF: + return "EBADF"; +#ifdef EBADMSG + case EBADMSG: + return "EBADMSG"; +#endif + case EBUSY: + return "EBUSY"; + case ECHILD: + return "ECHILD"; +#ifdef ECONNABORTED + case ECONNABORTED: + return "ECONNABORTED"; +#endif + case ECONNREFUSED: + return "ECONNREFUSED"; +#ifdef ECONNRESET + case ECONNRESET: + return "ECONNRESET"; +#endif + case EDEADLK: + return "EDEADLK"; + case EDOM: + return "EDOM"; + case EEXIST: + return "EEXIST"; + case EFAULT: + return "EFAULT"; + case EFBIG: + return "EFBIG"; +#ifdef EHOSTUNREACH + case EHOSTUNREACH: + return "EHOSTUNREACH"; +#endif + case EIDRM: + return "EIDRM"; + case EINPROGRESS: + return "EINPROGRESS"; + case EINTR: + return "EINTR"; + case EINVAL: + return "EINVAL"; + case EIO: + return "EIO"; +#ifdef EISCONN + case EISCONN: + return "EISCONN"; +#endif + case EISDIR: + return "EISDIR"; +#ifdef ELOOP + case ELOOP: + return "ELOOP"; +#endif + case EMFILE: + return "EMFILE"; + case EMLINK: + return "EMLINK"; + case EMSGSIZE: + return "EMSGSIZE"; + case ENAMETOOLONG: + return "ENAMETOOLONG"; + case ENFILE: + return "ENFILE"; + case ENOBUFS: + return "ENOBUFS"; + case ENODEV: + return "ENODEV"; + case ENOENT: + return "ENOENT"; + case ENOEXEC: + return "ENOEXEC"; + case ENOMEM: + return "ENOMEM"; + case ENOSPC: + return "ENOSPC"; + case ENOSYS: + return "ENOSYS"; +#ifdef ENOTCONN + case ENOTCONN: + return "ENOTCONN"; +#endif + case ENOTDIR: + return "ENOTDIR"; +#if defined(ENOTEMPTY) && (ENOTEMPTY != EEXIST) /* same code on AIX */ + case ENOTEMPTY: + return "ENOTEMPTY"; +#endif +#ifdef ENOTSOCK + case ENOTSOCK: + return "ENOTSOCK"; +#endif +#ifdef ENOTSUP + case ENOTSUP: + return "ENOTSUP"; +#endif + case ENOTTY: + return "ENOTTY"; + case ENXIO: + return "ENXIO"; +#if defined(EOPNOTSUPP) && (!defined(ENOTSUP) || (EOPNOTSUPP != ENOTSUP)) + case EOPNOTSUPP: + return "EOPNOTSUPP"; +#endif +#ifdef EOVERFLOW + case EOVERFLOW: + return "EOVERFLOW"; +#endif + case EPERM: + return "EPERM"; + case EPIPE: + return "EPIPE"; + case EPROTONOSUPPORT: + return "EPROTONOSUPPORT"; + case ERANGE: + return "ERANGE"; +#ifdef EROFS + case EROFS: + return "EROFS"; +#endif + case ESRCH: + return "ESRCH"; +#ifdef ETIMEDOUT + case ETIMEDOUT: + return "ETIMEDOUT"; +#endif +#ifdef ETXTBSY + case ETXTBSY: + return "ETXTBSY"; +#endif +#if defined(EWOULDBLOCK) && (!defined(EAGAIN) || (EWOULDBLOCK != EAGAIN)) + case EWOULDBLOCK: + return "EWOULDBLOCK"; +#endif + case EXDEV: + return "EXDEV"; + } + + return NULL; +} + + +#ifdef WIN32 + +/* + * Windows' strerror() doesn't know the Winsock codes, so handle them this way + */ +static char * +win32_socket_strerror(int errnum, char *buf, size_t buflen) +{ + static HANDLE handleDLL = INVALID_HANDLE_VALUE; + + if (handleDLL == INVALID_HANDLE_VALUE) + { + handleDLL = LoadLibraryEx("netmsg.dll", NULL, + DONT_RESOLVE_DLL_REFERENCES | LOAD_LIBRARY_AS_DATAFILE); + if (handleDLL == NULL) + { + snprintf(buf, buflen, + "winsock error %d (could not load netmsg.dll to translate: error code %lu)", + errnum, GetLastError()); + return buf; + } + } + + ZeroMemory(buf, buflen); + if (FormatMessage(FORMAT_MESSAGE_IGNORE_INSERTS | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_FROM_HMODULE, + handleDLL, + errnum, + MAKELANGID(LANG_ENGLISH, SUBLANG_DEFAULT), + buf, + buflen - 1, + NULL) == 0) + { + /* Failed to get id */ + snprintf(buf, buflen, "unrecognized winsock error %d", errnum); } - return sys_errlist[errnum]; + return buf; } + +#endif /* WIN32 */ diff --git a/src/port/strlcpy.c b/src/port/strlcpy.c index 29c14da0b6..920d7f88f5 100644 --- a/src/port/strlcpy.c +++ b/src/port/strlcpy.c @@ -3,7 +3,7 @@ * strlcpy.c * strncpy done right * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * * IDENTIFICATION diff --git a/src/port/strnlen.c b/src/port/strnlen.c new file mode 100644 index 0000000000..bd4b56bbb1 --- /dev/null +++ b/src/port/strnlen.c @@ -0,0 +1,33 @@ +/*------------------------------------------------------------------------- + * + * strnlen.c + * Fallback implementation of strnlen(). + * + * + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/port/strnlen.c + * + *------------------------------------------------------------------------- + */ + +#include "c.h" + +/* + * Implementation of posix' strnlen for systems where it's not available. + * + * Returns the number of characters before a null-byte in the string pointed + * to by str, unless there's no null-byte before maxlen. In the latter case + * maxlen is returned. + */ +size_t +strnlen(const char *str, size_t maxlen) +{ + const char *p = str; + + while (maxlen-- > 0 && *p) + p++; + return p - str; +} diff --git a/src/port/system.c b/src/port/system.c index 3d99c7985f..9d5766e33c 100644 --- a/src/port/system.c +++ b/src/port/system.c @@ -29,7 +29,7 @@ * quote character on the command line, preserving any text after the last * quote character. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/port/system.c * diff --git a/src/port/thread.c b/src/port/thread.c index a3f37b1237..8e0c7df73a 100644 --- a/src/port/thread.c +++ b/src/port/thread.c @@ -5,7 +5,7 @@ * Prototypes and macros around system calls, used to help make * threaded libraries reentrant and safe to use from threaded applications. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * src/port/thread.c * @@ -53,33 +53,6 @@ */ -/* - * Wrapper around strerror and strerror_r to use the former if it is - * available and also return a more useful value (the error string). - */ -char * -pqStrerror(int errnum, char *strerrbuf, size_t buflen) -{ -#if defined(FRONTEND) && defined(ENABLE_THREAD_SAFETY) && defined(HAVE_STRERROR_R) - /* reentrant strerror_r is available */ -#ifdef STRERROR_R_INT - /* SUSv3 version */ - if (strerror_r(errnum, strerrbuf, buflen) == 0) - return strerrbuf; - else - return "Unknown error"; -#else - /* GNU libc */ - return strerror_r(errnum, strerrbuf, buflen); -#endif -#else - /* no strerror_r() available, just use strerror */ - strlcpy(strerrbuf, strerror(errnum), buflen); - - return strerrbuf; -#endif -} - /* * Wrapper around getpwuid() or getpwuid_r() to mimic POSIX getpwuid_r() * behaviour, if that function is not available or required. diff --git a/src/port/unsetenv.c b/src/port/unsetenv.c index 83b04d2641..7841431482 100644 --- a/src/port/unsetenv.c +++ b/src/port/unsetenv.c @@ -3,7 +3,7 @@ * unsetenv.c * unsetenv() emulation for machines without it * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/port/win32env.c b/src/port/win32env.c index 5480525fa2..af8555a7a7 100644 --- a/src/port/win32env.c +++ b/src/port/win32env.c @@ -4,7 +4,7 @@ * putenv() and unsetenv() for win32, which update both process environment * and caches in (potentially multiple) C run-time library (CRT) versions. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/port/win32error.c b/src/port/win32error.c index fe07f6e0a2..71f6e89ddd 100644 --- a/src/port/win32error.c +++ b/src/port/win32error.c @@ -3,7 +3,7 @@ * win32error.c * Map win32 error codes to errno values * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/port/win32error.c diff --git a/src/port/win32security.c b/src/port/win32security.c index bb9d034a01..8d7bcd2d92 100644 --- a/src/port/win32security.c +++ b/src/port/win32security.c @@ -3,7 +3,7 @@ * win32security.c * Microsoft Windows Win32 Security Support Functions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/port/win32security.c diff --git a/src/port/win32setlocale.c b/src/port/win32setlocale.c index c4da4a8f92..a8cf170dd1 100644 --- a/src/port/win32setlocale.c +++ b/src/port/win32setlocale.c @@ -3,7 +3,7 @@ * win32setlocale.c * Wrapper to work around bugs in Windows setlocale() implementation * - * Copyright (c) 2011-2017, PostgreSQL Global Development Group + * Copyright (c) 2011-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/port/win32setlocale.c @@ -183,7 +183,7 @@ pgwin32_setlocale(int category, const char *locale) * forbidden to modify, so casting away the "const" is innocuous. */ if (result) - result = (char *) map_locale(locale_map_result, result); + result = unconstify(char *, map_locale(locale_map_result, result)); return result; } diff --git a/src/port/win32ver.rc b/src/port/win32ver.rc index 6cb2e99b92..7f559bad03 100644 --- a/src/port/win32ver.rc +++ b/src/port/win32ver.rc @@ -2,8 +2,8 @@ #include "pg_config.h" VS_VERSION_INFO VERSIONINFO - FILEVERSION 11,0,0,0 - PRODUCTVERSION 11,0,0,0 + FILEVERSION 12,0,0,0 + PRODUCTVERSION 12,0,0,0 FILEFLAGSMASK 0x17L FILEFLAGS 0x0L FILEOS VOS_NT_WINDOWS32 @@ -17,7 +17,7 @@ BEGIN VALUE "CompanyName", "PostgreSQL Global Development Group" VALUE "FileDescription", FILEDESC VALUE "FileVersion", PG_VERSION - VALUE "LegalCopyright", "Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group. Portions Copyright (c) 1994, Regents of the University of California." + VALUE "LegalCopyright", "Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group. Portions Copyright (c) 1994, Regents of the University of California." VALUE "ProductName", "PostgreSQL" VALUE "ProductVersion", PG_VERSION END diff --git a/src/template/aix b/src/template/aix index b566ff129d..ed832849da 100644 --- a/src/template/aix +++ b/src/template/aix @@ -10,6 +10,10 @@ if test "$GCC" != yes ; then CFLAGS="-O2 -qmaxmem=16384 -qsrcmsg" ;; esac + + # Due to a compiler bug, see 20171013023536.GA492146@rfd.leadboat.com for details, + # force restrict not to be used when compiling with xlc. + FORCE_DISABLE_RESTRICT=yes fi # Native memset() is faster, tested on: diff --git a/src/template/darwin b/src/template/darwin index ea6d3b0b04..c05adca0bf 100644 --- a/src/template/darwin +++ b/src/template/darwin @@ -3,6 +3,19 @@ # Note: Darwin is the original code name for macOS, also known as OS X. # We still use "darwin" as the port name, partly because config.guess does. +# Select where system include files should be sought. +if test x"$PG_SYSROOT" = x"" ; then + PG_SYSROOT=`xcodebuild -version -sdk macosx Path 2>/dev/null` +fi +# Old xcodebuild versions may produce garbage, so validate the result. +if test x"$PG_SYSROOT" != x"" ; then + if test -d "$PG_SYSROOT" ; then + CPPFLAGS="-isysroot $PG_SYSROOT $CPPFLAGS" + else + PG_SYSROOT="" + fi +fi + # Select appropriate semaphore support. Darwin 6.0 (macOS 10.2) and up # support System V semaphores; before that we have to use named POSIX # semaphores, which are less good for our purposes because they eat a diff --git a/src/template/linux b/src/template/linux index f820bf7280..e39290845a 100644 --- a/src/template/linux +++ b/src/template/linux @@ -6,6 +6,7 @@ if test x"$PREFERRED_SEMAPHORES" = x"" ; then fi # Force _GNU_SOURCE on; plperl is broken with Perl 5.8.0 otherwise +# This is also required for ppoll(2), and perhaps other things CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" # If --enable-profiling is specified, we need -DLINUX_PROFILE diff --git a/src/test/Makefile b/src/test/Makefile index dbfa799a84..efb206aa75 100644 --- a/src/test/Makefile +++ b/src/test/Makefile @@ -14,10 +14,30 @@ include $(top_builddir)/src/Makefile.global SUBDIRS = perl regress isolation modules authentication recovery subscription -# We don't build or execute examples/, locale/, or thread/ by default, -# but we do want "make clean" etc to recurse into them. Likewise for ssl/, -# because the SSL test suite is not secure to run on a multi-user system. -ALWAYS_SUBDIRS = examples locale thread ssl +# Test suites that are not safe by default but can be run if selected +# by the user via the whitespace-separated list in variable +# PG_TEST_EXTRA: +ifeq ($(with_gssapi),yes) +ifneq (,$(filter kerberos,$(PG_TEST_EXTRA))) +SUBDIRS += kerberos +endif +endif +ifeq ($(with_ldap),yes) +ifneq (,$(filter ldap,$(PG_TEST_EXTRA))) +SUBDIRS += ldap +endif +endif +ifeq ($(with_openssl),yes) +ifneq (,$(filter ssl,$(PG_TEST_EXTRA))) +SUBDIRS += ssl +endif +endif + +# We don't build or execute these by default, but we do want "make +# clean" etc to recurse into them. (We must filter out those that we +# have conditionally included into SUBDIRS above, else there will be +# make confusion.) +ALWAYS_SUBDIRS = $(filter-out $(SUBDIRS),examples kerberos ldap locale thread ssl) # We want to recurse to all subdirs for all standard targets, except that # installcheck and install should not recurse into the subdirectory "modules". diff --git a/src/test/authentication/Makefile b/src/test/authentication/Makefile index 21ad15bea9..218452ec76 100644 --- a/src/test/authentication/Makefile +++ b/src/test/authentication/Makefile @@ -2,7 +2,7 @@ # # Makefile for src/test/authentication # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/test/authentication/Makefile @@ -16,5 +16,8 @@ include $(top_builddir)/src/Makefile.global check: $(prove_check) +installcheck: + $(prove_installcheck) + clean distclean maintainer-clean: rm -rf tmp_check diff --git a/src/test/authentication/README b/src/test/authentication/README index 5cffc7dc49..a8f27bfdaf 100644 --- a/src/test/authentication/README +++ b/src/test/authentication/README @@ -11,6 +11,16 @@ are more complicated, and are not safe to run in a multi-user system. Running the tests ================= +NOTE: You must have given the --enable-tap-tests argument to configure. + +Run make check +or + make installcheck +You can use "make installcheck" if you previously did "make install". +In that case, the code in the installation tree is tested. With +"make check", a temporary installation tree is built from the current +sources and then tested. -NOTE: This requires the --enable-tap-tests argument to configure. +Either way, this test initializes, starts, and stops a test Postgres +cluster. diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl index 2d3f674144..3a3b0eb7e8 100644 --- a/src/test/authentication/t/001_password.pl +++ b/src/test/authentication/t/001_password.pl @@ -31,6 +31,7 @@ sub reset_pg_hba unlink($node->data_dir . '/pg_hba.conf'); $node->append_conf('pg_hba.conf', "local all all $hba_method"); $node->reload; + return; } # Test access for a single role, useful to wrap all tests into one. @@ -44,10 +45,10 @@ sub test_role $status_string = 'success' if ($expected_res eq 0); - my $res = - $node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]); + my $res = $node->psql('postgres', undef, extra_params => [ '-U', $role ]); is($res, $expected_res, "authentication $status_string for method $method, role $role"); + return; } # Initialize master node @@ -58,10 +59,11 @@ sub test_role # Create 3 roles with different password methods for each one. The same # password is used for all of them. $node->safe_psql('postgres', -"SET password_encryption='scram-sha-256'; CREATE ROLE scram_role LOGIN PASSWORD 'pass';" + "SET password_encryption='scram-sha-256'; CREATE ROLE scram_role LOGIN PASSWORD 'pass';" ); $node->safe_psql('postgres', -"SET password_encryption='md5'; CREATE ROLE md5_role LOGIN PASSWORD 'pass';"); + "SET password_encryption='md5'; CREATE ROLE md5_role LOGIN PASSWORD 'pass';" +); $ENV{"PGPASSWORD"} = 'pass'; # For "trust" method, all users should be able to connect. diff --git a/src/test/authentication/t/002_saslprep.pl b/src/test/authentication/t/002_saslprep.pl index df9f85d6a9..c4b335c45f 100644 --- a/src/test/authentication/t/002_saslprep.pl +++ b/src/test/authentication/t/002_saslprep.pl @@ -27,6 +27,7 @@ sub reset_pg_hba unlink($node->data_dir . '/pg_hba.conf'); $node->append_conf('pg_hba.conf', "local all all $hba_method"); $node->reload; + return; } # Test access for a single role, useful to wrap all tests into one. @@ -41,11 +42,11 @@ sub test_login $status_string = 'success' if ($expected_res eq 0); $ENV{"PGPASSWORD"} = $password; - my $res = - $node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]); + my $res = $node->psql('postgres', undef, extra_params => [ '-U', $role ]); is($res, $expected_res, "authentication $status_string for role $role with password $password" ); + return; } # Initialize master node. Force UTF-8 encoding, so that we can use non-ASCII diff --git a/src/test/examples/Makefile b/src/test/examples/Makefile index 31da210a31..a67f456904 100644 --- a/src/test/examples/Makefile +++ b/src/test/examples/Makefile @@ -7,11 +7,11 @@ top_builddir = ../../.. include $(top_builddir)/src/Makefile.global ifeq ($(PORTNAME), win32) -LDLIBS += -lws2_32 +LDFLAGS += -lws2_32 endif override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS) -override LDLIBS := $(libpq_pgport) $(LDLIBS) +LDFLAGS_INTERNAL += $(libpq_pgport) PROGS = testlibpq testlibpq2 testlibpq3 testlibpq4 testlo testlo64 diff --git a/src/test/examples/testlibpq.c b/src/test/examples/testlibpq.c index 4d9af82dd1..18c98083de 100644 --- a/src/test/examples/testlibpq.c +++ b/src/test/examples/testlibpq.c @@ -48,6 +48,22 @@ main(int argc, char **argv) exit_nicely(conn); } + /* Set always-secure search path, so malicious users can't take control. */ + res = PQexec(conn, + "SELECT pg_catalog.set_config('search_path', '', false)"); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + fprintf(stderr, "SET failed: %s", PQerrorMessage(conn)); + PQclear(res); + exit_nicely(conn); + } + + /* + * Should PQclear PGresult whenever it is no longer needed to avoid memory + * leaks + */ + PQclear(res); + /* * Our test case here involves using a cursor, for which we must be inside * a transaction block. We could do the whole thing with a single @@ -63,11 +79,6 @@ main(int argc, char **argv) PQclear(res); exit_nicely(conn); } - - /* - * Should PQclear PGresult whenever it is no longer needed to avoid memory - * leaks - */ PQclear(res); /* diff --git a/src/test/examples/testlibpq2.c b/src/test/examples/testlibpq2.c index 07c6317a21..511246763a 100644 --- a/src/test/examples/testlibpq2.c +++ b/src/test/examples/testlibpq2.c @@ -13,16 +13,16 @@ * populate a database with the following commands * (provided in src/test/examples/testlibpq2.sql): * + * CREATE SCHEMA TESTLIBPQ2; + * SET search_path = TESTLIBPQ2; * CREATE TABLE TBL1 (i int4); - * * CREATE TABLE TBL2 (i int4); - * * CREATE RULE r1 AS ON INSERT TO TBL1 DO * (INSERT INTO TBL2 VALUES (new.i); NOTIFY TBL2); * - * and do this four times: + * Start this program, then from psql do this four times: * - * INSERT INTO TBL1 VALUES (10); + * INSERT INTO TESTLIBPQ2.TBL1 VALUES (10); */ #ifdef WIN32 @@ -77,6 +77,22 @@ main(int argc, char **argv) exit_nicely(conn); } + /* Set always-secure search path, so malicious users can't take control. */ + res = PQexec(conn, + "SELECT pg_catalog.set_config('search_path', '', false)"); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + fprintf(stderr, "SET failed: %s", PQerrorMessage(conn)); + PQclear(res); + exit_nicely(conn); + } + + /* + * Should PQclear PGresult whenever it is no longer needed to avoid memory + * leaks + */ + PQclear(res); + /* * Issue LISTEN command to enable notifications from the rule's NOTIFY. */ @@ -87,11 +103,6 @@ main(int argc, char **argv) PQclear(res); exit_nicely(conn); } - - /* - * should PQclear PGresult whenever it is no longer needed to avoid memory - * leaks - */ PQclear(res); /* Quit after four notifies are received. */ @@ -129,6 +140,7 @@ main(int argc, char **argv) notify->relname, notify->be_pid); PQfreemem(notify); nnotifies++; + PQconsumeInput(conn); } } diff --git a/src/test/examples/testlibpq2.sql b/src/test/examples/testlibpq2.sql index fb7d353507..e8173e4293 100644 --- a/src/test/examples/testlibpq2.sql +++ b/src/test/examples/testlibpq2.sql @@ -1,6 +1,6 @@ +CREATE SCHEMA TESTLIBPQ2; +SET search_path = TESTLIBPQ2; CREATE TABLE TBL1 (i int4); - CREATE TABLE TBL2 (i int4); - CREATE RULE r1 AS ON INSERT TO TBL1 DO -(INSERT INTO TBL2 VALUES (new.i); NOTIFY TBL2); + (INSERT INTO TBL2 VALUES (new.i); NOTIFY TBL2); diff --git a/src/test/examples/testlibpq3.c b/src/test/examples/testlibpq3.c index e11e0567ca..c3b524cfdf 100644 --- a/src/test/examples/testlibpq3.c +++ b/src/test/examples/testlibpq3.c @@ -8,8 +8,9 @@ * Before running this, populate a database with the following commands * (provided in src/test/examples/testlibpq3.sql): * + * CREATE SCHEMA testlibpq3; + * SET search_path = testlibpq3; * CREATE TABLE test1 (i int4, t text, b bytea); - * * INSERT INTO test1 values (1, 'joe''s place', '\\000\\001\\002\\003\\004'); * INSERT INTO test1 values (2, 'ho there', '\\004\\003\\002\\001\\000'); * @@ -141,6 +142,16 @@ main(int argc, char **argv) exit_nicely(conn); } + /* Set always-secure search path, so malicious users can't take control. */ + res = PQexec(conn, "SET search_path = testlibpq3"); + if (PQresultStatus(res) != PGRES_COMMAND_OK) + { + fprintf(stderr, "SET failed: %s", PQerrorMessage(conn)); + PQclear(res); + exit_nicely(conn); + } + PQclear(res); + /* * The point of this program is to illustrate use of PQexecParams() with * out-of-line parameters, as well as binary transmission of data. diff --git a/src/test/examples/testlibpq3.sql b/src/test/examples/testlibpq3.sql index 9d9e217e5d..2213306509 100644 --- a/src/test/examples/testlibpq3.sql +++ b/src/test/examples/testlibpq3.sql @@ -1,4 +1,5 @@ +CREATE SCHEMA testlibpq3; +SET search_path = testlibpq3; CREATE TABLE test1 (i int4, t text, b bytea); - INSERT INTO test1 values (1, 'joe''s place', '\\000\\001\\002\\003\\004'); INSERT INTO test1 values (2, 'ho there', '\\004\\003\\002\\001\\000'); diff --git a/src/test/examples/testlibpq4.c b/src/test/examples/testlibpq4.c index 0ec04313c0..df8e454b5d 100644 --- a/src/test/examples/testlibpq4.c +++ b/src/test/examples/testlibpq4.c @@ -22,8 +22,10 @@ exit_nicely(PGconn *conn1, PGconn *conn2) } static void -check_conn(PGconn *conn, const char *dbName) +check_prepare_conn(PGconn *conn, const char *dbName) { + PGresult *res; + /* check to see that the backend connection was successfully made */ if (PQstatus(conn) != CONNECTION_OK) { @@ -31,6 +33,17 @@ check_conn(PGconn *conn, const char *dbName) dbName, PQerrorMessage(conn)); exit(1); } + + /* Set always-secure search path, so malicious users can't take control. */ + res = PQexec(conn, + "SELECT pg_catalog.set_config('search_path', '', false)"); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + fprintf(stderr, "SET failed: %s", PQerrorMessage(conn)); + PQclear(res); + exit(1); + } + PQclear(res); } int @@ -80,10 +93,10 @@ main(int argc, char **argv) /* make a connection to the database */ conn1 = PQsetdb(pghost, pgport, pgoptions, pgtty, dbName1); - check_conn(conn1, dbName1); + check_prepare_conn(conn1, dbName1); conn2 = PQsetdb(pghost, pgport, pgoptions, pgtty, dbName2); - check_conn(conn2, dbName2); + check_prepare_conn(conn2, dbName2); /* start a transaction block */ res1 = PQexec(conn1, "BEGIN"); diff --git a/src/test/examples/testlo.c b/src/test/examples/testlo.c index b7470385eb..79170e97b8 100644 --- a/src/test/examples/testlo.c +++ b/src/test/examples/testlo.c @@ -3,7 +3,7 @@ * testlo.c * test using large objects with libpq * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -232,6 +232,17 @@ main(int argc, char **argv) exit_nicely(conn); } + /* Set always-secure search path, so malicious users can't take control. */ + res = PQexec(conn, + "SELECT pg_catalog.set_config('search_path', '', false)"); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + fprintf(stderr, "SET failed: %s", PQerrorMessage(conn)); + PQclear(res); + exit_nicely(conn); + } + PQclear(res); + res = PQexec(conn, "begin"); PQclear(res); printf("importing file \"%s\" ...\n", in_filename); diff --git a/src/test/examples/testlo64.c b/src/test/examples/testlo64.c index 76558f4797..2b1677aefa 100644 --- a/src/test/examples/testlo64.c +++ b/src/test/examples/testlo64.c @@ -3,7 +3,7 @@ * testlo64.c * test using large objects with libpq using 64-bit APIs * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -256,6 +256,17 @@ main(int argc, char **argv) exit_nicely(conn); } + /* Set always-secure search path, so malicious users can't take control. */ + res = PQexec(conn, + "SELECT pg_catalog.set_config('search_path', '', false)"); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + { + fprintf(stderr, "SET failed: %s", PQerrorMessage(conn)); + PQclear(res); + exit_nicely(conn); + } + PQclear(res); + res = PQexec(conn, "begin"); PQclear(res); printf("importing file \"%s\" ...\n", in_filename); diff --git a/src/test/isolation/Makefile b/src/test/isolation/Makefile index 8eb4969e9b..c3c8280ea2 100644 --- a/src/test/isolation/Makefile +++ b/src/test/isolation/Makefile @@ -15,6 +15,13 @@ OBJS = specparse.o isolationtester.o $(WIN32RES) all: isolationtester$(X) pg_isolation_regress$(X) +# Though we don't install these binaries, build them during installation +# (including temp-install). Otherwise, "make -j check-world" and "make -j +# installcheck-world" would spawn multiple, concurrent builds in this +# directory. Later builds would overwrite files while earlier builds are +# reading them, causing occasional failures. +install: | all + submake-regress: $(MAKE) -C $(top_builddir)/src/test/regress pg_regress.o @@ -29,15 +36,6 @@ isolationtester$(X): $(OBJS) | submake-libpq submake-libpgport distprep: specparse.c specscanner.c -# There is no correct way to write a rule that generates two files. -# Rules with two targets don't have that meaning, they are merely -# shorthand for two otherwise separate rules. To be safe for parallel -# make, we must chain the dependencies like this. The semicolon is -# important, otherwise make will choose the built-in rule for -# gram.y=>gram.c. - -specparse.h: specparse.c ; - # specscanner is compiled as part of specparse specparse.o: specscanner.c diff --git a/src/test/isolation/expected/alter-table-4.out b/src/test/isolation/expected/alter-table-4.out new file mode 100644 index 0000000000..d2dac0be09 --- /dev/null +++ b/src/test/isolation/expected/alter-table-4.out @@ -0,0 +1,57 @@ +Parsed test spec with 2 sessions + +starting permutation: s1b s1delc1 s2sel s1c s2sel +step s1b: BEGIN; +step s1delc1: ALTER TABLE c1 NO INHERIT p; +step s2sel: SELECT SUM(a) FROM p; +step s1c: COMMIT; +step s2sel: <... completed> +sum + +11 +step s2sel: SELECT SUM(a) FROM p; +sum + +1 + +starting permutation: s1b s1delc1 s1addc2 s2sel s1c s2sel +step s1b: BEGIN; +step s1delc1: ALTER TABLE c1 NO INHERIT p; +step s1addc2: ALTER TABLE c2 INHERIT p; +step s2sel: SELECT SUM(a) FROM p; +step s1c: COMMIT; +step s2sel: <... completed> +sum + +11 +step s2sel: SELECT SUM(a) FROM p; +sum + +101 + +starting permutation: s1b s1dropc1 s2sel s1c s2sel +step s1b: BEGIN; +step s1dropc1: DROP TABLE c1; +step s2sel: SELECT SUM(a) FROM p; +step s1c: COMMIT; +step s2sel: <... completed> +sum + +1 +step s2sel: SELECT SUM(a) FROM p; +sum + +1 + +starting permutation: s1b s1delc1 s1modc1a s2sel s1c s2sel +step s1b: BEGIN; +step s1delc1: ALTER TABLE c1 NO INHERIT p; +step s1modc1a: ALTER TABLE c1 ALTER COLUMN a TYPE float; +step s2sel: SELECT SUM(a) FROM p; +step s1c: COMMIT; +step s2sel: <... completed> +error in steps s1c s2sel: ERROR: attribute "a" of relation "c1" does not match parent's type +step s2sel: SELECT SUM(a) FROM p; +sum + +1 diff --git a/src/test/isolation/expected/eval-plan-qual.out b/src/test/isolation/expected/eval-plan-qual.out index 10c784a05f..49b3fb3446 100644 --- a/src/test/isolation/expected/eval-plan-qual.out +++ b/src/test/isolation/expected/eval-plan-qual.out @@ -145,6 +145,26 @@ accountid balance checking 1050 savings 600 +starting permutation: wx2_ext partiallock_ext c2 c1 read_ext +step wx2_ext: UPDATE accounts_ext SET balance = balance + 450; +step partiallock_ext: + SELECT * FROM accounts_ext a1, accounts_ext a2 + WHERE a1.accountid = a2.accountid + FOR UPDATE OF a1; + +step c2: COMMIT; +step partiallock_ext: <... completed> +accountid balance other newcol newcol2 accountid balance other newcol newcol2 + +checking 1050 other 42 checking 600 other 42 +savings 1150 42 savings 700 42 +step c1: COMMIT; +step read_ext: SELECT * FROM accounts_ext ORDER BY accountid; +accountid balance other newcol newcol2 + +checking 1050 other 42 +savings 1150 42 + starting permutation: updateforss readforss c1 c2 step updateforss: UPDATE table_a SET value = 'newTableAValue' WHERE id = 1; @@ -164,6 +184,37 @@ ta_id ta_value tb_row 1 newTableAValue (1,tableBValue) step c2: COMMIT; +starting permutation: updateforcip updateforcip2 c1 c2 read_a +step updateforcip: + UPDATE table_a SET value = NULL WHERE id = 1; + +step updateforcip2: + UPDATE table_a SET value = COALESCE(value, (SELECT text 'newValue')) WHERE id = 1; + +step c1: COMMIT; +step updateforcip2: <... completed> +step c2: COMMIT; +step read_a: SELECT * FROM table_a ORDER BY id; +id value + +1 newValue + +starting permutation: updateforcip updateforcip3 c1 c2 read_a +step updateforcip: + UPDATE table_a SET value = NULL WHERE id = 1; + +step updateforcip3: + WITH d(val) AS (SELECT text 'newValue' FROM generate_series(1,1)) + UPDATE table_a SET value = COALESCE(value, (SELECT val FROM d)) WHERE id = 1; + +step c1: COMMIT; +step updateforcip3: <... completed> +step c2: COMMIT; +step read_a: SELECT * FROM table_a ORDER BY id; +id value + +1 newValue + starting permutation: wrtwcte readwcte c1 c2 step wrtwcte: UPDATE table_a SET value = 'tableAValue2' WHERE id = 1; step readwcte: @@ -184,3 +235,51 @@ step readwcte: <... completed> id value 1 tableAValue2 + +starting permutation: wrjt selectjoinforupdate c2 c1 +step wrjt: UPDATE jointest SET data = 42 WHERE id = 7; +step selectjoinforupdate: + set enable_nestloop to 0; + set enable_hashjoin to 0; + set enable_seqscan to 0; + explain (costs off) + select * from jointest a join jointest b on a.id=b.id for update; + select * from jointest a join jointest b on a.id=b.id for update; + +step c2: COMMIT; +step selectjoinforupdate: <... completed> +QUERY PLAN + +LockRows + -> Merge Join + Merge Cond: (a.id = b.id) + -> Index Scan using jointest_id_idx on jointest a + -> Index Scan using jointest_id_idx on jointest b +id data id data + +1 0 1 0 +2 0 2 0 +3 0 3 0 +4 0 4 0 +5 0 5 0 +6 0 6 0 +7 42 7 42 +8 0 8 0 +9 0 9 0 +10 0 10 0 +step c1: COMMIT; + +starting permutation: wrtwcte multireadwcte c1 c2 +step wrtwcte: UPDATE table_a SET value = 'tableAValue2' WHERE id = 1; +step multireadwcte: + WITH updated AS ( + UPDATE table_a SET value = 'tableAValue3' WHERE id = 1 RETURNING id + ) + SELECT (SELECT id FROM updated) AS subid, * FROM updated; + +step c1: COMMIT; +step c2: COMMIT; +step multireadwcte: <... completed> +subid id + +1 1 diff --git a/src/test/isolation/expected/freeze-the-dead.out b/src/test/isolation/expected/freeze-the-dead.out new file mode 100644 index 0000000000..8e638f132f --- /dev/null +++ b/src/test/isolation/expected/freeze-the-dead.out @@ -0,0 +1,36 @@ +Parsed test spec with 3 sessions + +starting permutation: s1_begin s2_begin s3_begin s1_update s2_key_share s3_key_share s1_update s1_commit s2_commit s2_vacuum s1_selectone s3_commit s2_vacuum s1_selectall +step s1_begin: BEGIN; +step s2_begin: BEGIN; +step s3_begin: BEGIN; +step s1_update: UPDATE tab_freeze SET x = x + 1 WHERE id = 3; +step s2_key_share: SELECT id FROM tab_freeze WHERE id = 3 FOR KEY SHARE; +id + +3 +step s3_key_share: SELECT id FROM tab_freeze WHERE id = 3 FOR KEY SHARE; +id + +3 +step s1_update: UPDATE tab_freeze SET x = x + 1 WHERE id = 3; +step s1_commit: COMMIT; +step s2_commit: COMMIT; +step s2_vacuum: VACUUM FREEZE tab_freeze; +step s1_selectone: + BEGIN; + SET LOCAL enable_seqscan = false; + SET LOCAL enable_bitmapscan = false; + SELECT * FROM tab_freeze WHERE id = 3; + COMMIT; + +id name x + +3 333 2 +step s3_commit: COMMIT; +step s2_vacuum: VACUUM FREEZE tab_freeze; +step s1_selectall: SELECT * FROM tab_freeze ORDER BY name, id; +id name x + +1 111 0 +3 333 2 diff --git a/src/test/isolation/expected/multiple-cic.out b/src/test/isolation/expected/multiple-cic.out new file mode 100644 index 0000000000..2bf8fe365e --- /dev/null +++ b/src/test/isolation/expected/multiple-cic.out @@ -0,0 +1,19 @@ +Parsed test spec with 2 sessions + +starting permutation: s2l s1i s2i +step s2l: SELECT pg_advisory_lock(281457); +pg_advisory_lock + + +step s1i: + CREATE INDEX CONCURRENTLY mcic_one_pkey ON mcic_one (id) + WHERE lck_shr(281457); + +step s2i: + CREATE INDEX CONCURRENTLY mcic_two_pkey ON mcic_two (id) + WHERE unlck(); + +step s1i: <... completed> +unlck + +t diff --git a/src/test/isolation/expected/multiple-cic_1.out b/src/test/isolation/expected/multiple-cic_1.out new file mode 100644 index 0000000000..e41e04a480 --- /dev/null +++ b/src/test/isolation/expected/multiple-cic_1.out @@ -0,0 +1,20 @@ +Parsed test spec with 2 sessions + +starting permutation: s2l s1i s2i +step s2l: SELECT pg_advisory_lock(281457); +pg_advisory_lock + + +step s1i: + CREATE INDEX CONCURRENTLY mcic_one_pkey ON mcic_one (id) + WHERE lck_shr(281457); + +step s2i: + CREATE INDEX CONCURRENTLY mcic_two_pkey ON mcic_two (id) + WHERE unlck(); + +step s1i: <... completed> +step s2i: <... completed> +unlck + +t diff --git a/src/test/isolation/expected/partition-key-update-1.out b/src/test/isolation/expected/partition-key-update-1.out new file mode 100644 index 0000000000..37fe6a7b27 --- /dev/null +++ b/src/test/isolation/expected/partition-key-update-1.out @@ -0,0 +1,119 @@ +Parsed test spec with 2 sessions + +starting permutation: s1b s2b s1u s1c s2d s2c +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s1u: UPDATE foo SET a=2 WHERE a=1; +step s1c: COMMIT; +step s2d: DELETE FROM foo WHERE a=1; +step s2c: COMMIT; + +starting permutation: s1b s2b s1u s2d s1c s2c +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s1u: UPDATE foo SET a=2 WHERE a=1; +step s2d: DELETE FROM foo WHERE a=1; +step s1c: COMMIT; +step s2d: <... completed> +error in steps s1c s2d: ERROR: tuple to be deleted was already moved to another partition due to concurrent update +step s2c: COMMIT; + +starting permutation: s1b s2b s2d s1u s2c s1c +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2d: DELETE FROM foo WHERE a=1; +step s1u: UPDATE foo SET a=2 WHERE a=1; +step s2c: COMMIT; +step s1u: <... completed> +step s1c: COMMIT; + +starting permutation: s1b s2b s1u2 s1c s2u2 s2c +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s1u2: UPDATE footrg SET b='EFG' WHERE a=1; +step s1c: COMMIT; +step s2u2: UPDATE footrg SET b='XYZ' WHERE a=1; +step s2c: COMMIT; + +starting permutation: s1b s2b s1u2 s2u2 s1c s2c +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s1u2: UPDATE footrg SET b='EFG' WHERE a=1; +step s2u2: UPDATE footrg SET b='XYZ' WHERE a=1; +step s1c: COMMIT; +step s2u2: <... completed> +error in steps s1c s2u2: ERROR: tuple to be locked was already moved to another partition due to concurrent update +step s2c: COMMIT; + +starting permutation: s1b s2b s2u2 s1u2 s2c s1c +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2u2: UPDATE footrg SET b='XYZ' WHERE a=1; +step s1u2: UPDATE footrg SET b='EFG' WHERE a=1; +step s2c: COMMIT; +step s1u2: <... completed> +error in steps s2c s1u2: ERROR: tuple to be locked was already moved to another partition due to concurrent update +step s1c: COMMIT; + +starting permutation: s1b s2b s1u3pc s2i s1c s2c +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s1u3pc: UPDATE foo_range_parted SET a=11 WHERE a=7; +step s2i: INSERT INTO bar VALUES(7); +step s1c: COMMIT; +step s2i: <... completed> +error in steps s1c s2i: ERROR: tuple to be locked was already moved to another partition due to concurrent update +step s2c: COMMIT; + +starting permutation: s1b s2b s1u3pc s2i s1r s2c +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s1u3pc: UPDATE foo_range_parted SET a=11 WHERE a=7; +step s2i: INSERT INTO bar VALUES(7); +step s1r: ROLLBACK; +step s2i: <... completed> +step s2c: COMMIT; + +starting permutation: s1b s2b s1u3npc s1u3pc s2i s1c s2c +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s1u3npc: UPDATE foo_range_parted SET b='XYZ' WHERE a=7; +step s1u3pc: UPDATE foo_range_parted SET a=11 WHERE a=7; +step s2i: INSERT INTO bar VALUES(7); +step s1c: COMMIT; +step s2i: <... completed> +error in steps s1c s2i: ERROR: tuple to be locked was already moved to another partition due to concurrent update +step s2c: COMMIT; + +starting permutation: s1b s2b s1u3npc s1u3pc s2i s1r s2c +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s1u3npc: UPDATE foo_range_parted SET b='XYZ' WHERE a=7; +step s1u3pc: UPDATE foo_range_parted SET a=11 WHERE a=7; +step s2i: INSERT INTO bar VALUES(7); +step s1r: ROLLBACK; +step s2i: <... completed> +step s2c: COMMIT; + +starting permutation: s1b s2b s1u3npc s1u3pc s1u3pc s2i s1c s2c +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s1u3npc: UPDATE foo_range_parted SET b='XYZ' WHERE a=7; +step s1u3pc: UPDATE foo_range_parted SET a=11 WHERE a=7; +step s1u3pc: UPDATE foo_range_parted SET a=11 WHERE a=7; +step s2i: INSERT INTO bar VALUES(7); +step s1c: COMMIT; +step s2i: <... completed> +error in steps s1c s2i: ERROR: tuple to be locked was already moved to another partition due to concurrent update +step s2c: COMMIT; + +starting permutation: s1b s2b s1u3npc s1u3pc s1u3pc s2i s1r s2c +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s1u3npc: UPDATE foo_range_parted SET b='XYZ' WHERE a=7; +step s1u3pc: UPDATE foo_range_parted SET a=11 WHERE a=7; +step s1u3pc: UPDATE foo_range_parted SET a=11 WHERE a=7; +step s2i: INSERT INTO bar VALUES(7); +step s1r: ROLLBACK; +step s2i: <... completed> +step s2c: COMMIT; diff --git a/src/test/isolation/expected/partition-key-update-2.out b/src/test/isolation/expected/partition-key-update-2.out new file mode 100644 index 0000000000..363de0d69c --- /dev/null +++ b/src/test/isolation/expected/partition-key-update-2.out @@ -0,0 +1,29 @@ +Parsed test spec with 3 sessions + +starting permutation: s1u s2donothing s3donothing s1c s2c s3select s3c +step s1u: UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; +step s2donothing: INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; +step s3donothing: INSERT INTO foo VALUES(2, 'session-3 donothing') ON CONFLICT DO NOTHING; +step s1c: COMMIT; +step s2donothing: <... completed> +step s3donothing: <... completed> +step s2c: COMMIT; +step s3select: SELECT * FROM foo ORDER BY a; +a b + +1 session-2 donothing +2 initial tuple -> moved by session-1 +step s3c: COMMIT; + +starting permutation: s2donothing s1u s3donothing s1c s2c s3select s3c +step s2donothing: INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; +step s1u: UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; +step s3donothing: INSERT INTO foo VALUES(2, 'session-3 donothing') ON CONFLICT DO NOTHING; +step s1c: COMMIT; +step s3donothing: <... completed> +step s2c: COMMIT; +step s3select: SELECT * FROM foo ORDER BY a; +a b + +2 initial tuple -> moved by session-1 +step s3c: COMMIT; diff --git a/src/test/isolation/expected/partition-key-update-3.out b/src/test/isolation/expected/partition-key-update-3.out new file mode 100644 index 0000000000..42dfe64ad3 --- /dev/null +++ b/src/test/isolation/expected/partition-key-update-3.out @@ -0,0 +1,139 @@ +Parsed test spec with 3 sessions + +starting permutation: s2beginrr s3beginrr s1u s2donothing s1c s2c s3donothing s3c s2select +step s2beginrr: BEGIN ISOLATION LEVEL REPEATABLE READ; +step s3beginrr: BEGIN ISOLATION LEVEL REPEATABLE READ; +step s1u: UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; +step s2donothing: INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; +step s1c: COMMIT; +step s2donothing: <... completed> +step s2c: COMMIT; +step s3donothing: INSERT INTO foo VALUES(2, 'session-3 donothing'), (2, 'session-3 donothing2') ON CONFLICT DO NOTHING; +step s3c: COMMIT; +step s2select: SELECT * FROM foo ORDER BY a; +a b + +1 session-2 donothing +2 initial tuple -> moved by session-1 + +starting permutation: s2beginrr s3beginrr s1u s3donothing s1c s3c s2donothing s2c s2select +step s2beginrr: BEGIN ISOLATION LEVEL REPEATABLE READ; +step s3beginrr: BEGIN ISOLATION LEVEL REPEATABLE READ; +step s1u: UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; +step s3donothing: INSERT INTO foo VALUES(2, 'session-3 donothing'), (2, 'session-3 donothing2') ON CONFLICT DO NOTHING; +step s1c: COMMIT; +step s3donothing: <... completed> +error in steps s1c s3donothing: ERROR: could not serialize access due to concurrent update +step s3c: COMMIT; +step s2donothing: INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; +step s2c: COMMIT; +step s2select: SELECT * FROM foo ORDER BY a; +a b + +1 session-2 donothing +2 initial tuple -> moved by session-1 + +starting permutation: s2beginrr s3beginrr s1u s2donothing s3donothing s1c s2c s3c s2select +step s2beginrr: BEGIN ISOLATION LEVEL REPEATABLE READ; +step s3beginrr: BEGIN ISOLATION LEVEL REPEATABLE READ; +step s1u: UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; +step s2donothing: INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; +step s3donothing: INSERT INTO foo VALUES(2, 'session-3 donothing'), (2, 'session-3 donothing2') ON CONFLICT DO NOTHING; +step s1c: COMMIT; +step s2donothing: <... completed> +step s3donothing: <... completed> +error in steps s1c s2donothing s3donothing: ERROR: could not serialize access due to concurrent update +step s2c: COMMIT; +step s3c: COMMIT; +step s2select: SELECT * FROM foo ORDER BY a; +a b + +1 session-2 donothing +2 initial tuple -> moved by session-1 + +starting permutation: s2beginrr s3beginrr s1u s3donothing s2donothing s1c s3c s2c s2select +step s2beginrr: BEGIN ISOLATION LEVEL REPEATABLE READ; +step s3beginrr: BEGIN ISOLATION LEVEL REPEATABLE READ; +step s1u: UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; +step s3donothing: INSERT INTO foo VALUES(2, 'session-3 donothing'), (2, 'session-3 donothing2') ON CONFLICT DO NOTHING; +step s2donothing: INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; +step s1c: COMMIT; +step s3donothing: <... completed> +step s2donothing: <... completed> +error in steps s1c s3donothing s2donothing: ERROR: could not serialize access due to concurrent update +step s3c: COMMIT; +step s2c: COMMIT; +step s2select: SELECT * FROM foo ORDER BY a; +a b + +1 session-2 donothing +2 initial tuple -> moved by session-1 + +starting permutation: s2begins s3begins s1u s2donothing s1c s2c s3donothing s3c s2select +step s2begins: BEGIN ISOLATION LEVEL SERIALIZABLE; +step s3begins: BEGIN ISOLATION LEVEL SERIALIZABLE; +step s1u: UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; +step s2donothing: INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; +step s1c: COMMIT; +step s2donothing: <... completed> +step s2c: COMMIT; +step s3donothing: INSERT INTO foo VALUES(2, 'session-3 donothing'), (2, 'session-3 donothing2') ON CONFLICT DO NOTHING; +step s3c: COMMIT; +step s2select: SELECT * FROM foo ORDER BY a; +a b + +1 session-2 donothing +2 initial tuple -> moved by session-1 + +starting permutation: s2begins s3begins s1u s3donothing s1c s3c s2donothing s2c s2select +step s2begins: BEGIN ISOLATION LEVEL SERIALIZABLE; +step s3begins: BEGIN ISOLATION LEVEL SERIALIZABLE; +step s1u: UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; +step s3donothing: INSERT INTO foo VALUES(2, 'session-3 donothing'), (2, 'session-3 donothing2') ON CONFLICT DO NOTHING; +step s1c: COMMIT; +step s3donothing: <... completed> +error in steps s1c s3donothing: ERROR: could not serialize access due to concurrent update +step s3c: COMMIT; +step s2donothing: INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; +step s2c: COMMIT; +step s2select: SELECT * FROM foo ORDER BY a; +a b + +1 session-2 donothing +2 initial tuple -> moved by session-1 + +starting permutation: s2begins s3begins s1u s2donothing s3donothing s1c s2c s3c s2select +step s2begins: BEGIN ISOLATION LEVEL SERIALIZABLE; +step s3begins: BEGIN ISOLATION LEVEL SERIALIZABLE; +step s1u: UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; +step s2donothing: INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; +step s3donothing: INSERT INTO foo VALUES(2, 'session-3 donothing'), (2, 'session-3 donothing2') ON CONFLICT DO NOTHING; +step s1c: COMMIT; +step s2donothing: <... completed> +step s3donothing: <... completed> +error in steps s1c s2donothing s3donothing: ERROR: could not serialize access due to concurrent update +step s2c: COMMIT; +step s3c: COMMIT; +step s2select: SELECT * FROM foo ORDER BY a; +a b + +1 session-2 donothing +2 initial tuple -> moved by session-1 + +starting permutation: s2begins s3begins s1u s3donothing s2donothing s1c s3c s2c s2select +step s2begins: BEGIN ISOLATION LEVEL SERIALIZABLE; +step s3begins: BEGIN ISOLATION LEVEL SERIALIZABLE; +step s1u: UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; +step s3donothing: INSERT INTO foo VALUES(2, 'session-3 donothing'), (2, 'session-3 donothing2') ON CONFLICT DO NOTHING; +step s2donothing: INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; +step s1c: COMMIT; +step s3donothing: <... completed> +step s2donothing: <... completed> +error in steps s1c s3donothing s2donothing: ERROR: could not serialize access due to concurrent update +step s3c: COMMIT; +step s2c: COMMIT; +step s2select: SELECT * FROM foo ORDER BY a; +a b + +1 session-2 donothing +2 initial tuple -> moved by session-1 diff --git a/src/test/isolation/expected/partition-key-update-4.out b/src/test/isolation/expected/partition-key-update-4.out new file mode 100644 index 0000000000..774a7faf6c --- /dev/null +++ b/src/test/isolation/expected/partition-key-update-4.out @@ -0,0 +1,60 @@ +Parsed test spec with 2 sessions + +starting permutation: s1b s2b s2u1 s1u s2c s1c s1s +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2u1: UPDATE foo SET b = b || ' update2' WHERE a = 1; +step s1u: UPDATE foo SET a = a + 1, b = b || ' update1' WHERE b like '%ABC%'; +step s2c: COMMIT; +step s1u: <... completed> +step s1c: COMMIT; +step s1s: SELECT tableoid::regclass, * FROM foo ORDER BY a; +tableoid a b + +foo2 2 ABC update2 update1 + +starting permutation: s1b s2b s2ut1 s1ut s2c s1c s1st s1stl +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2ut1: UPDATE footrg SET b = b || ' update2' WHERE a = 1; +step s1ut: UPDATE footrg SET a = a + 1, b = b || ' update1' WHERE b like '%ABC%'; +step s2c: COMMIT; +step s1ut: <... completed> +step s1c: COMMIT; +step s1st: SELECT tableoid::regclass, * FROM footrg ORDER BY a; +tableoid a b + +footrg2 2 ABC update2 update1 +step s1stl: SELECT * FROM triglog ORDER BY a; +a b + +1 ABC update2 trigger + +starting permutation: s1b s2b s2u2 s1u s2c s1c s1s +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2u2: UPDATE foo SET b = 'EFG' WHERE a = 1; +step s1u: UPDATE foo SET a = a + 1, b = b || ' update1' WHERE b like '%ABC%'; +step s2c: COMMIT; +step s1u: <... completed> +step s1c: COMMIT; +step s1s: SELECT tableoid::regclass, * FROM foo ORDER BY a; +tableoid a b + +foo1 1 EFG + +starting permutation: s1b s2b s2ut2 s1ut s2c s1c s1st s1stl +step s1b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2b: BEGIN ISOLATION LEVEL READ COMMITTED; +step s2ut2: UPDATE footrg SET b = 'EFG' WHERE a = 1; +step s1ut: UPDATE footrg SET a = a + 1, b = b || ' update1' WHERE b like '%ABC%'; +step s2c: COMMIT; +step s1ut: <... completed> +step s1c: COMMIT; +step s1st: SELECT tableoid::regclass, * FROM footrg ORDER BY a; +tableoid a b + +footrg1 1 EFG +step s1stl: SELECT * FROM triglog ORDER BY a; +a b + diff --git a/src/test/isolation/expected/plpgsql-toast.out b/src/test/isolation/expected/plpgsql-toast.out new file mode 100644 index 0000000000..43411533f1 --- /dev/null +++ b/src/test/isolation/expected/plpgsql-toast.out @@ -0,0 +1,189 @@ +Parsed test spec with 2 sessions + +starting permutation: lock assign1 vacuum unlock +pg_advisory_unlock_all + + +pg_advisory_unlock_all + + +step lock: + SELECT pg_advisory_lock(1); + +pg_advisory_lock + + +step assign1: +do $$ + declare + x text; + begin + select test1.b into x from test1; + delete from test1; + commit; + perform pg_advisory_lock(1); + raise notice 'x = %', x; + end; +$$; + +step vacuum: + VACUUM test1; + +step unlock: + SELECT pg_advisory_unlock(1); + +pg_advisory_unlock + +t +step assign1: <... completed> + +starting permutation: lock assign2 vacuum unlock +pg_advisory_unlock_all + + +pg_advisory_unlock_all + + +step lock: + SELECT pg_advisory_lock(1); + +pg_advisory_lock + + +step assign2: +do $$ + declare + x text; + begin + x := (select test1.b from test1); + delete from test1; + commit; + perform pg_advisory_lock(1); + raise notice 'x = %', x; + end; +$$; + +step vacuum: + VACUUM test1; + +step unlock: + SELECT pg_advisory_unlock(1); + +pg_advisory_unlock + +t +step assign2: <... completed> + +starting permutation: lock assign3 vacuum unlock +pg_advisory_unlock_all + + +pg_advisory_unlock_all + + +step lock: + SELECT pg_advisory_lock(1); + +pg_advisory_lock + + +step assign3: +do $$ + declare + r record; + begin + select * into r from test1; + r.b := (select test1.b from test1); + delete from test1; + commit; + perform pg_advisory_lock(1); + raise notice 'r = %', r; + end; +$$; + +step vacuum: + VACUUM test1; + +step unlock: + SELECT pg_advisory_unlock(1); + +pg_advisory_unlock + +t +step assign3: <... completed> + +starting permutation: lock assign4 vacuum unlock +pg_advisory_unlock_all + + +pg_advisory_unlock_all + + +step lock: + SELECT pg_advisory_lock(1); + +pg_advisory_lock + + +step assign4: +do $$ + declare + r test2; + begin + select * into r from test1; + delete from test1; + commit; + perform pg_advisory_lock(1); + raise notice 'r = %', r; + end; +$$; + +step vacuum: + VACUUM test1; + +step unlock: + SELECT pg_advisory_unlock(1); + +pg_advisory_unlock + +t +step assign4: <... completed> + +starting permutation: lock assign5 vacuum unlock +pg_advisory_unlock_all + + +pg_advisory_unlock_all + + +step lock: + SELECT pg_advisory_lock(1); + +pg_advisory_lock + + +step assign5: +do $$ + declare + r record; + begin + for r in select test1.b from test1 loop + null; + end loop; + delete from test1; + commit; + perform pg_advisory_lock(1); + raise notice 'r = %', r; + end; +$$; + +step vacuum: + VACUUM test1; + +step unlock: + SELECT pg_advisory_unlock(1); + +pg_advisory_unlock + +t +step assign5: <... completed> diff --git a/src/test/isolation/expected/predicate-gin-fastupdate.out b/src/test/isolation/expected/predicate-gin-fastupdate.out new file mode 100644 index 0000000000..7d4fa8e024 --- /dev/null +++ b/src/test/isolation/expected/predicate-gin-fastupdate.out @@ -0,0 +1,30 @@ +Parsed test spec with 3 sessions + +starting permutation: r1 r2 w1 c1 w2 c2 +step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[1000]; +count + +2 +step r2: SELECT * FROM other_tbl; +id + +step w1: INSERT INTO other_tbl VALUES (42); +step c1: COMMIT; +step w2: INSERT INTO gin_tbl SELECT array[1000,19001]; +ERROR: could not serialize access due to read/write dependencies among transactions +step c2: COMMIT; + +starting permutation: r1 r2 w1 c1 fastupdate_on w2 c2 +step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[1000]; +count + +2 +step r2: SELECT * FROM other_tbl; +id + +step w1: INSERT INTO other_tbl VALUES (42); +step c1: COMMIT; +step fastupdate_on: ALTER INDEX ginidx SET (fastupdate = on); +step w2: INSERT INTO gin_tbl SELECT array[1000,19001]; +ERROR: could not serialize access due to read/write dependencies among transactions +step c2: COMMIT; diff --git a/src/test/isolation/expected/predicate-gin-nomatch.out b/src/test/isolation/expected/predicate-gin-nomatch.out new file mode 100644 index 0000000000..5e733262a4 --- /dev/null +++ b/src/test/isolation/expected/predicate-gin-nomatch.out @@ -0,0 +1,15 @@ +Parsed test spec with 2 sessions + +starting permutation: r1 r2 w1 c1 w2 c2 +step r1: SELECT count(*) FROM gin_tbl WHERE p @> array[-1]; +count + +0 +step r2: SELECT * FROM other_tbl; +id + +step w1: INSERT INTO other_tbl VALUES (42); +step c1: COMMIT; +step w2: INSERT INTO gin_tbl SELECT array[-1]; +ERROR: could not serialize access due to read/write dependencies among transactions +step c2: COMMIT; diff --git a/src/test/isolation/expected/predicate-gin.out b/src/test/isolation/expected/predicate-gin.out new file mode 100644 index 0000000000..bdf8911923 --- /dev/null +++ b/src/test/isolation/expected/predicate-gin.out @@ -0,0 +1,756 @@ +Parsed test spec with 2 sessions + +starting permutation: rxy1 wx1 c1 rxy2 wy2 c2 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step c1: commit; +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10050 +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step c2: commit; + +starting permutation: rxy2 wy2 c2 rxy1 wx1 c1 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step c2: commit; +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10050 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step c1: commit; + +starting permutation: rxy3 wx3 c1 rxy4 wy4 c2 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step c1: commit; +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step c2: commit; + +starting permutation: rxy4 wy4 c2 rxy3 wx3 c1 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step c2: commit; +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step c1: commit; + +starting permutation: rxy1 wx1 rxy2 c1 wy2 c2 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step c1: commit; +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c2: commit; + +starting permutation: rxy1 wx1 rxy2 wy2 c1 c2 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 wx1 rxy2 wy2 c2 c1 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wx1 c1 wy2 c2 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step c1: commit; +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c2: commit; + +starting permutation: rxy1 rxy2 wx1 wy2 c1 c2 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wx1 wy2 c2 c1 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wy2 wx1 c1 c2 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wy2 wx1 c2 c1 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wy2 c2 wx1 c1 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step c2: commit; +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c1: commit; + +starting permutation: rxy2 rxy1 wx1 c1 wy2 c2 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step c1: commit; +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c2: commit; + +starting permutation: rxy2 rxy1 wx1 wy2 c1 c2 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 rxy1 wx1 wy2 c2 c1 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 rxy1 wy2 wx1 c1 c2 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 rxy1 wy2 wx1 c2 c1 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 rxy1 wy2 c2 wx1 c1 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step c2: commit; +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c1: commit; + +starting permutation: rxy2 wy2 rxy1 wx1 c1 c2 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 wy2 rxy1 wx1 c2 c1 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 wy2 rxy1 c2 wx1 c1 +step rxy2: select count(*) from gin_tbl where p @> array[5,6]; +count + +10000 +step wy2: insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step c2: commit; +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c1: commit; + +starting permutation: rxy3 wx3 rxy4 c1 wy4 c2 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step c1: commit; +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step c2: commit; + +starting permutation: rxy3 wx3 rxy4 wy4 c1 c2 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy3 wx3 rxy4 wy4 c2 c1 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy3 rxy4 wx3 c1 wy4 c2 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step c1: commit; +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step c2: commit; + +starting permutation: rxy3 rxy4 wx3 wy4 c1 c2 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy3 rxy4 wx3 wy4 c2 c1 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy3 rxy4 wy4 wx3 c1 c2 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy3 rxy4 wy4 wx3 c2 c1 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy3 rxy4 wy4 c2 wx3 c1 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step c2: commit; +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step c1: commit; + +starting permutation: rxy4 rxy3 wx3 c1 wy4 c2 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step c1: commit; +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step c2: commit; + +starting permutation: rxy4 rxy3 wx3 wy4 c1 c2 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy4 rxy3 wx3 wy4 c2 c1 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy4 rxy3 wy4 wx3 c1 c2 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy4 rxy3 wy4 wx3 c2 c1 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy4 rxy3 wy4 c2 wx3 c1 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step c2: commit; +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step c1: commit; + +starting permutation: rxy4 wy4 rxy3 wx3 c1 c2 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy4 wy4 rxy3 wx3 c2 c1 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy4 wy4 rxy3 c2 wx3 c1 +step rxy4: select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; +count + +4 +step wy4: insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; +step rxy3: select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; +count + +4 +step c2: commit; +step wx3: insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; +step c1: commit; + +starting permutation: rxy1 rxy2fu wx1 c1 wy2fu c2 +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step rxy2fu: select count(*) from gin_tbl where p @> array[10000,10005]; +count + +0 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step c1: commit; +step wy2fu: insert into gin_tbl select g, array[10000,10005] from + generate_series(20051, 20100) g; +step c2: commit; + +starting permutation: fu1 rxy1 rxy2fu wx1 c1 wy2fu c2 +step fu1: alter index ginidx set (fastupdate = on); + commit; + begin isolation level serializable; + set enable_seqscan=off; +step rxy1: select count(*) from gin_tbl where p @> array[4,5]; +count + +10000 +step rxy2fu: select count(*) from gin_tbl where p @> array[10000,10005]; +count + +0 +step wx1: insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; +step c1: commit; +step wy2fu: insert into gin_tbl select g, array[10000,10005] from + generate_series(20051, 20100) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c2: commit; diff --git a/src/test/isolation/expected/predicate-gist.out b/src/test/isolation/expected/predicate-gist.out new file mode 100644 index 0000000000..77a27958af --- /dev/null +++ b/src/test/isolation/expected/predicate-gist.out @@ -0,0 +1,659 @@ +Parsed test spec with 2 sessions + +starting permutation: rxy1 wx1 c1 rxy2 wy2 c2 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step c1: commit; +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2233750 +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step c2: commit; + +starting permutation: rxy2 wy2 c2 rxy1 wx1 c1 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step c2: commit; +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +316250 +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step c1: commit; + +starting permutation: rxy3 wx3 c1 rxy4 wy4 c2 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step c1: commit; +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step c2: commit; + +starting permutation: rxy4 wy4 c2 rxy3 wx3 c1 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step c2: commit; +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step c1: commit; + +starting permutation: rxy1 wx1 rxy2 c1 wy2 c2 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step c1: commit; +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c2: commit; + +starting permutation: rxy1 wx1 rxy2 wy2 c1 c2 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 wx1 rxy2 wy2 c2 c1 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wx1 c1 wy2 c2 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step c1: commit; +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c2: commit; + +starting permutation: rxy1 rxy2 wx1 wy2 c1 c2 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wx1 wy2 c2 c1 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wy2 wx1 c1 c2 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wy2 wx1 c2 c1 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wy2 c2 wx1 c1 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step c2: commit; +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c1: commit; + +starting permutation: rxy2 rxy1 wx1 c1 wy2 c2 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step c1: commit; +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c2: commit; + +starting permutation: rxy2 rxy1 wx1 wy2 c1 c2 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 rxy1 wx1 wy2 c2 c1 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 rxy1 wy2 wx1 c1 c2 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 rxy1 wy2 wx1 c2 c1 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 rxy1 wy2 c2 wx1 c1 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step c2: commit; +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c1: commit; + +starting permutation: rxy2 wy2 rxy1 wx1 c1 c2 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 wy2 rxy1 wx1 c2 c1 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 wy2 rxy1 c2 wx1 c1 +step rxy2: select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); +sum + +2188750 +step wy2: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; +step rxy1: select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); +sum + +311250 +step c2: commit; +step wx1: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c1: commit; + +starting permutation: rxy3 wx3 rxy4 c1 wy4 c2 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step c1: commit; +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step c2: commit; + +starting permutation: rxy3 wx3 rxy4 wy4 c1 c2 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy3 wx3 rxy4 wy4 c2 c1 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy3 rxy4 wx3 c1 wy4 c2 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step c1: commit; +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step c2: commit; + +starting permutation: rxy3 rxy4 wx3 wy4 c1 c2 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy3 rxy4 wx3 wy4 c2 c1 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy3 rxy4 wy4 wx3 c1 c2 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy3 rxy4 wy4 wx3 c2 c1 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy3 rxy4 wy4 c2 wx3 c1 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step c2: commit; +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step c1: commit; + +starting permutation: rxy4 rxy3 wx3 c1 wy4 c2 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step c1: commit; +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step c2: commit; + +starting permutation: rxy4 rxy3 wx3 wy4 c1 c2 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy4 rxy3 wx3 wy4 c2 c1 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy4 rxy3 wy4 wx3 c1 c2 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy4 rxy3 wy4 wx3 c2 c1 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy4 rxy3 wy4 c2 wx3 c1 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step c2: commit; +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step c1: commit; + +starting permutation: rxy4 wy4 rxy3 wx3 c1 c2 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy4 wy4 rxy3 wx3 c2 c1 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy4 wy4 rxy3 c2 wx3 c1 +step rxy4: select sum(p[0]) from gist_point_tbl where p << point(1000,1000); +sum + +49500 +step wy4: insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; +step rxy3: select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); +sum + +3202000 +step c2: commit; +step wx3: insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; +step c1: commit; diff --git a/src/test/isolation/expected/predicate-hash.out b/src/test/isolation/expected/predicate-hash.out new file mode 100644 index 0000000000..53e500fd26 --- /dev/null +++ b/src/test/isolation/expected/predicate-hash.out @@ -0,0 +1,659 @@ +Parsed test spec with 2 sessions + +starting permutation: rxy1 wx1 c1 rxy2 wy2 c2 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step c1: commit; +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +600 +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step c2: commit; + +starting permutation: rxy2 wy2 c2 rxy1 wx1 c1 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step c2: commit; +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +400 +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step c1: commit; + +starting permutation: rxy3 wx3 c1 rxy4 wy4 c2 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step c1: commit; +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step c2: commit; + +starting permutation: rxy4 wy4 c2 rxy3 wx3 c1 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step c2: commit; +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step c1: commit; + +starting permutation: rxy1 wx1 rxy2 c1 wy2 c2 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step c1: commit; +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c2: commit; + +starting permutation: rxy1 wx1 rxy2 wy2 c1 c2 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 wx1 rxy2 wy2 c2 c1 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wx1 c1 wy2 c2 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step c1: commit; +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c2: commit; + +starting permutation: rxy1 rxy2 wx1 wy2 c1 c2 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wx1 wy2 c2 c1 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wy2 wx1 c1 c2 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wy2 wx1 c2 c1 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy1 rxy2 wy2 c2 wx1 c1 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step c2: commit; +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c1: commit; + +starting permutation: rxy2 rxy1 wx1 c1 wy2 c2 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step c1: commit; +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c2: commit; + +starting permutation: rxy2 rxy1 wx1 wy2 c1 c2 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 rxy1 wx1 wy2 c2 c1 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 rxy1 wy2 wx1 c1 c2 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 rxy1 wy2 wx1 c2 c1 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 rxy1 wy2 c2 wx1 c1 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step c2: commit; +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c1: commit; + +starting permutation: rxy2 wy2 rxy1 wx1 c1 c2 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step c1: commit; +step c2: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 wy2 rxy1 wx1 c2 c1 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +step c2: commit; +step c1: commit; +ERROR: could not serialize access due to read/write dependencies among transactions + +starting permutation: rxy2 wy2 rxy1 c2 wx1 c1 +step rxy2: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy2: insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; +step rxy1: select sum(p) from hash_tbl where p=20; +sum + +200 +step c2: commit; +step wx1: insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; +ERROR: could not serialize access due to read/write dependencies among transactions +step c1: commit; + +starting permutation: rxy3 wx3 rxy4 c1 wy4 c2 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step c1: commit; +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step c2: commit; + +starting permutation: rxy3 wx3 rxy4 wy4 c1 c2 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy3 wx3 rxy4 wy4 c2 c1 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy3 rxy4 wx3 c1 wy4 c2 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step c1: commit; +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step c2: commit; + +starting permutation: rxy3 rxy4 wx3 wy4 c1 c2 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy3 rxy4 wx3 wy4 c2 c1 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy3 rxy4 wy4 wx3 c1 c2 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy3 rxy4 wy4 wx3 c2 c1 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy3 rxy4 wy4 c2 wx3 c1 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step c2: commit; +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step c1: commit; + +starting permutation: rxy4 rxy3 wx3 c1 wy4 c2 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step c1: commit; +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step c2: commit; + +starting permutation: rxy4 rxy3 wx3 wy4 c1 c2 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy4 rxy3 wx3 wy4 c2 c1 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy4 rxy3 wy4 wx3 c1 c2 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy4 rxy3 wy4 wx3 c2 c1 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy4 rxy3 wy4 c2 wx3 c1 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step c2: commit; +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step c1: commit; + +starting permutation: rxy4 wy4 rxy3 wx3 c1 c2 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step c1: commit; +step c2: commit; + +starting permutation: rxy4 wy4 rxy3 wx3 c2 c1 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step c2: commit; +step c1: commit; + +starting permutation: rxy4 wy4 rxy3 c2 wx3 c1 +step rxy4: select sum(p) from hash_tbl where p=30; +sum + +300 +step wy4: insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; +step rxy3: select sum(p) from hash_tbl where p=20; +sum + +200 +step c2: commit; +step wx3: insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; +step c1: commit; diff --git a/src/test/isolation/expected/truncate-conflict.out b/src/test/isolation/expected/truncate-conflict.out new file mode 100644 index 0000000000..2c10f8d40d --- /dev/null +++ b/src/test/isolation/expected/truncate-conflict.out @@ -0,0 +1,99 @@ +Parsed test spec with 2 sessions + +starting permutation: s1_begin s1_tab_lookup s2_auth s2_truncate s1_commit s2_reset +step s1_begin: BEGIN; +step s1_tab_lookup: SELECT count(*) >= 0 FROM truncate_tab; +?column? + +t +step s2_auth: SET ROLE regress_truncate_conflict; +step s2_truncate: TRUNCATE truncate_tab; +ERROR: permission denied for table truncate_tab +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_auth s2_truncate s1_tab_lookup s1_commit s2_reset +step s1_begin: BEGIN; +step s2_auth: SET ROLE regress_truncate_conflict; +step s2_truncate: TRUNCATE truncate_tab; +ERROR: permission denied for table truncate_tab +step s1_tab_lookup: SELECT count(*) >= 0 FROM truncate_tab; +?column? + +t +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_auth s1_tab_lookup s2_truncate s1_commit s2_reset +step s1_begin: BEGIN; +step s2_auth: SET ROLE regress_truncate_conflict; +step s1_tab_lookup: SELECT count(*) >= 0 FROM truncate_tab; +?column? + +t +step s2_truncate: TRUNCATE truncate_tab; +ERROR: permission denied for table truncate_tab +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s2_auth s2_truncate s1_begin s1_tab_lookup s1_commit s2_reset +step s2_auth: SET ROLE regress_truncate_conflict; +step s2_truncate: TRUNCATE truncate_tab; +ERROR: permission denied for table truncate_tab +step s1_begin: BEGIN; +step s1_tab_lookup: SELECT count(*) >= 0 FROM truncate_tab; +?column? + +t +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s1_tab_lookup s2_grant s2_auth s2_truncate s1_commit s2_reset +step s1_begin: BEGIN; +step s1_tab_lookup: SELECT count(*) >= 0 FROM truncate_tab; +?column? + +t +step s2_grant: GRANT TRUNCATE ON truncate_tab TO regress_truncate_conflict; +step s2_auth: SET ROLE regress_truncate_conflict; +step s2_truncate: TRUNCATE truncate_tab; +step s1_commit: COMMIT; +step s2_truncate: <... completed> +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_grant s2_auth s2_truncate s1_tab_lookup s1_commit s2_reset +step s1_begin: BEGIN; +step s2_grant: GRANT TRUNCATE ON truncate_tab TO regress_truncate_conflict; +step s2_auth: SET ROLE regress_truncate_conflict; +step s2_truncate: TRUNCATE truncate_tab; +step s1_tab_lookup: SELECT count(*) >= 0 FROM truncate_tab; +?column? + +t +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_grant s2_auth s1_tab_lookup s2_truncate s1_commit s2_reset +step s1_begin: BEGIN; +step s2_grant: GRANT TRUNCATE ON truncate_tab TO regress_truncate_conflict; +step s2_auth: SET ROLE regress_truncate_conflict; +step s1_tab_lookup: SELECT count(*) >= 0 FROM truncate_tab; +?column? + +t +step s2_truncate: TRUNCATE truncate_tab; +step s1_commit: COMMIT; +step s2_truncate: <... completed> +step s2_reset: RESET ROLE; + +starting permutation: s2_grant s2_auth s2_truncate s1_begin s1_tab_lookup s1_commit s2_reset +step s2_grant: GRANT TRUNCATE ON truncate_tab TO regress_truncate_conflict; +step s2_auth: SET ROLE regress_truncate_conflict; +step s2_truncate: TRUNCATE truncate_tab; +step s1_begin: BEGIN; +step s1_tab_lookup: SELECT count(*) >= 0 FROM truncate_tab; +?column? + +t +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; diff --git a/src/test/isolation/expected/vacuum-concurrent-drop.out b/src/test/isolation/expected/vacuum-concurrent-drop.out new file mode 100644 index 0000000000..cf348d7e5d --- /dev/null +++ b/src/test/isolation/expected/vacuum-concurrent-drop.out @@ -0,0 +1,76 @@ +Parsed test spec with 2 sessions + +starting permutation: lock vac_specified drop_and_commit +step lock: + BEGIN; + LOCK part1 IN SHARE MODE; + +step vac_specified: VACUUM part1, part2; +step drop_and_commit: + DROP TABLE part2; + COMMIT; + +s2: WARNING: skipping vacuum of "part2" --- relation no longer exists +step vac_specified: <... completed> + +starting permutation: lock vac_all_parts drop_and_commit +step lock: + BEGIN; + LOCK part1 IN SHARE MODE; + +step vac_all_parts: VACUUM parted; +step drop_and_commit: + DROP TABLE part2; + COMMIT; + +step vac_all_parts: <... completed> + +starting permutation: lock analyze_specified drop_and_commit +step lock: + BEGIN; + LOCK part1 IN SHARE MODE; + +step analyze_specified: ANALYZE part1, part2; +step drop_and_commit: + DROP TABLE part2; + COMMIT; + +s2: WARNING: skipping analyze of "part2" --- relation no longer exists +step analyze_specified: <... completed> + +starting permutation: lock analyze_all_parts drop_and_commit +step lock: + BEGIN; + LOCK part1 IN SHARE MODE; + +step analyze_all_parts: ANALYZE parted; +step drop_and_commit: + DROP TABLE part2; + COMMIT; + +step analyze_all_parts: <... completed> + +starting permutation: lock vac_analyze_specified drop_and_commit +step lock: + BEGIN; + LOCK part1 IN SHARE MODE; + +step vac_analyze_specified: VACUUM ANALYZE part1, part2; +step drop_and_commit: + DROP TABLE part2; + COMMIT; + +s2: WARNING: skipping vacuum of "part2" --- relation no longer exists +step vac_analyze_specified: <... completed> + +starting permutation: lock vac_analyze_all_parts drop_and_commit +step lock: + BEGIN; + LOCK part1 IN SHARE MODE; + +step vac_analyze_all_parts: VACUUM ANALYZE parted; +step drop_and_commit: + DROP TABLE part2; + COMMIT; + +step vac_analyze_all_parts: <... completed> diff --git a/src/test/isolation/expected/vacuum-conflict.out b/src/test/isolation/expected/vacuum-conflict.out new file mode 100644 index 0000000000..ffde537305 --- /dev/null +++ b/src/test/isolation/expected/vacuum-conflict.out @@ -0,0 +1,149 @@ +Parsed test spec with 2 sessions + +starting permutation: s1_begin s1_lock s2_auth s2_vacuum s1_commit s2_reset +step s1_begin: BEGIN; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s2_auth: SET ROLE regress_vacuum_conflict; +s2: WARNING: skipping "vacuum_tab" --- only table or database owner can vacuum it +step s2_vacuum: VACUUM vacuum_tab; +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_auth s2_vacuum s1_lock s1_commit s2_reset +step s1_begin: BEGIN; +step s2_auth: SET ROLE regress_vacuum_conflict; +s2: WARNING: skipping "vacuum_tab" --- only table or database owner can vacuum it +step s2_vacuum: VACUUM vacuum_tab; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_auth s1_lock s2_vacuum s1_commit s2_reset +step s1_begin: BEGIN; +step s2_auth: SET ROLE regress_vacuum_conflict; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +s2: WARNING: skipping "vacuum_tab" --- only table or database owner can vacuum it +step s2_vacuum: VACUUM vacuum_tab; +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s2_auth s2_vacuum s1_begin s1_lock s1_commit s2_reset +step s2_auth: SET ROLE regress_vacuum_conflict; +s2: WARNING: skipping "vacuum_tab" --- only table or database owner can vacuum it +step s2_vacuum: VACUUM vacuum_tab; +step s1_begin: BEGIN; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s1_lock s2_auth s2_analyze s1_commit s2_reset +step s1_begin: BEGIN; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s2_auth: SET ROLE regress_vacuum_conflict; +s2: WARNING: skipping "vacuum_tab" --- only table or database owner can analyze it +step s2_analyze: ANALYZE vacuum_tab; +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_auth s2_analyze s1_lock s1_commit s2_reset +step s1_begin: BEGIN; +step s2_auth: SET ROLE regress_vacuum_conflict; +s2: WARNING: skipping "vacuum_tab" --- only table or database owner can analyze it +step s2_analyze: ANALYZE vacuum_tab; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_auth s1_lock s2_analyze s1_commit s2_reset +step s1_begin: BEGIN; +step s2_auth: SET ROLE regress_vacuum_conflict; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +s2: WARNING: skipping "vacuum_tab" --- only table or database owner can analyze it +step s2_analyze: ANALYZE vacuum_tab; +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s2_auth s2_analyze s1_begin s1_lock s1_commit s2_reset +step s2_auth: SET ROLE regress_vacuum_conflict; +s2: WARNING: skipping "vacuum_tab" --- only table or database owner can analyze it +step s2_analyze: ANALYZE vacuum_tab; +step s1_begin: BEGIN; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_grant s1_lock s2_auth s2_vacuum s1_commit s2_reset +step s1_begin: BEGIN; +step s2_grant: ALTER TABLE vacuum_tab OWNER TO regress_vacuum_conflict; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s2_auth: SET ROLE regress_vacuum_conflict; +step s2_vacuum: VACUUM vacuum_tab; +step s1_commit: COMMIT; +step s2_vacuum: <... completed> +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_grant s2_auth s2_vacuum s1_lock s1_commit s2_reset +step s1_begin: BEGIN; +step s2_grant: ALTER TABLE vacuum_tab OWNER TO regress_vacuum_conflict; +step s2_auth: SET ROLE regress_vacuum_conflict; +step s2_vacuum: VACUUM vacuum_tab; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_grant s2_auth s1_lock s2_vacuum s1_commit s2_reset +step s1_begin: BEGIN; +step s2_grant: ALTER TABLE vacuum_tab OWNER TO regress_vacuum_conflict; +step s2_auth: SET ROLE regress_vacuum_conflict; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s2_vacuum: VACUUM vacuum_tab; +step s1_commit: COMMIT; +step s2_vacuum: <... completed> +step s2_reset: RESET ROLE; + +starting permutation: s2_grant s2_auth s2_vacuum s1_begin s1_lock s1_commit s2_reset +step s2_grant: ALTER TABLE vacuum_tab OWNER TO regress_vacuum_conflict; +step s2_auth: SET ROLE regress_vacuum_conflict; +step s2_vacuum: VACUUM vacuum_tab; +step s1_begin: BEGIN; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_grant s1_lock s2_auth s2_analyze s1_commit s2_reset +step s1_begin: BEGIN; +step s2_grant: ALTER TABLE vacuum_tab OWNER TO regress_vacuum_conflict; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s2_auth: SET ROLE regress_vacuum_conflict; +step s2_analyze: ANALYZE vacuum_tab; +step s1_commit: COMMIT; +step s2_analyze: <... completed> +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_grant s2_auth s2_analyze s1_lock s1_commit s2_reset +step s1_begin: BEGIN; +step s2_grant: ALTER TABLE vacuum_tab OWNER TO regress_vacuum_conflict; +step s2_auth: SET ROLE regress_vacuum_conflict; +step s2_analyze: ANALYZE vacuum_tab; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; + +starting permutation: s1_begin s2_grant s2_auth s1_lock s2_analyze s1_commit s2_reset +step s1_begin: BEGIN; +step s2_grant: ALTER TABLE vacuum_tab OWNER TO regress_vacuum_conflict; +step s2_auth: SET ROLE regress_vacuum_conflict; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s2_analyze: ANALYZE vacuum_tab; +step s1_commit: COMMIT; +step s2_analyze: <... completed> +step s2_reset: RESET ROLE; + +starting permutation: s2_grant s2_auth s2_analyze s1_begin s1_lock s1_commit s2_reset +step s2_grant: ALTER TABLE vacuum_tab OWNER TO regress_vacuum_conflict; +step s2_auth: SET ROLE regress_vacuum_conflict; +step s2_analyze: ANALYZE vacuum_tab; +step s1_begin: BEGIN; +step s1_lock: LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; +step s1_commit: COMMIT; +step s2_reset: RESET ROLE; diff --git a/src/test/isolation/expected/vacuum-skip-locked.out b/src/test/isolation/expected/vacuum-skip-locked.out new file mode 100644 index 0000000000..99db281a15 --- /dev/null +++ b/src/test/isolation/expected/vacuum-skip-locked.out @@ -0,0 +1,171 @@ +Parsed test spec with 2 sessions + +starting permutation: lock_share vac_specified commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +s2: WARNING: skipping vacuum of "part1" --- lock not available +step vac_specified: VACUUM (SKIP_LOCKED) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_share vac_all_parts commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +step vac_all_parts: VACUUM (SKIP_LOCKED) parted; +step commit: + COMMIT; + + +starting permutation: lock_share analyze_specified commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +s2: WARNING: skipping analyze of "part1" --- lock not available +step analyze_specified: ANALYZE (SKIP_LOCKED) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_share analyze_all_parts commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +step analyze_all_parts: ANALYZE (SKIP_LOCKED) parted; +step commit: + COMMIT; + + +starting permutation: lock_share vac_analyze_specified commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +s2: WARNING: skipping vacuum of "part1" --- lock not available +step vac_analyze_specified: VACUUM (ANALYZE, SKIP_LOCKED) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_share vac_analyze_all_parts commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +step vac_analyze_all_parts: VACUUM (ANALYZE, SKIP_LOCKED) parted; +step commit: + COMMIT; + + +starting permutation: lock_share vac_full_specified commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +s2: WARNING: skipping vacuum of "part1" --- lock not available +step vac_full_specified: VACUUM (SKIP_LOCKED, FULL) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_share vac_full_all_parts commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +step vac_full_all_parts: VACUUM (SKIP_LOCKED, FULL) parted; +step commit: + COMMIT; + + +starting permutation: lock_access_exclusive vac_specified commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +s2: WARNING: skipping vacuum of "part1" --- lock not available +step vac_specified: VACUUM (SKIP_LOCKED) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_access_exclusive vac_all_parts commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +step vac_all_parts: VACUUM (SKIP_LOCKED) parted; +step commit: + COMMIT; + + +starting permutation: lock_access_exclusive analyze_specified commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +s2: WARNING: skipping analyze of "part1" --- lock not available +step analyze_specified: ANALYZE (SKIP_LOCKED) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_access_exclusive analyze_all_parts commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +step analyze_all_parts: ANALYZE (SKIP_LOCKED) parted; +step commit: + COMMIT; + +step analyze_all_parts: <... completed> + +starting permutation: lock_access_exclusive vac_analyze_specified commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +s2: WARNING: skipping vacuum of "part1" --- lock not available +step vac_analyze_specified: VACUUM (ANALYZE, SKIP_LOCKED) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_access_exclusive vac_analyze_all_parts commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +step vac_analyze_all_parts: VACUUM (ANALYZE, SKIP_LOCKED) parted; +step commit: + COMMIT; + +step vac_analyze_all_parts: <... completed> + +starting permutation: lock_access_exclusive vac_full_specified commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +s2: WARNING: skipping vacuum of "part1" --- lock not available +step vac_full_specified: VACUUM (SKIP_LOCKED, FULL) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_access_exclusive vac_full_all_parts commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +step vac_full_all_parts: VACUUM (SKIP_LOCKED, FULL) parted; +step commit: + COMMIT; + diff --git a/src/test/isolation/isolation_main.c b/src/test/isolation/isolation_main.c index 8a3d7f51b3..d3ada6d2bc 100644 --- a/src/test/isolation/isolation_main.c +++ b/src/test/isolation/isolation_main.c @@ -2,7 +2,7 @@ * * isolation_main --- pg_regress test launcher for isolation tests * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/test/isolation/isolation_main.c @@ -75,15 +75,27 @@ isolation_start_test(const char *testname, add_stringlist_item(expectfiles, expectfile); if (launcher) + { offset += snprintf(psql_cmd + offset, sizeof(psql_cmd) - offset, "%s ", launcher); + if (offset >= sizeof(psql_cmd)) + { + fprintf(stderr, _("command too long\n")); + exit(2); + } + } - snprintf(psql_cmd + offset, sizeof(psql_cmd) - offset, - "\"%s\" \"dbname=%s\" < \"%s\" > \"%s\" 2>&1", - isolation_exec, - dblist->str, - infile, - outfile); + offset += snprintf(psql_cmd + offset, sizeof(psql_cmd) - offset, + "\"%s\" \"dbname=%s\" < \"%s\" > \"%s\" 2>&1", + isolation_exec, + dblist->str, + infile, + outfile); + if (offset >= sizeof(psql_cmd)) + { + fprintf(stderr, _("command too long\n")); + exit(2); + } pid = spawn_process(psql_cmd); diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule index 32c965b2a0..dd57a96e78 100644 --- a/src/test/isolation/isolation_schedule +++ b/src/test/isolation/isolation_schedule @@ -44,6 +44,7 @@ test: update-locked-tuple test: propagate-lock-delete test: tuplelock-conflict test: tuplelock-update +test: freeze-the-dead test: nowait test: nowait-2 test: nowait-3 @@ -54,11 +55,27 @@ test: skip-locked-2 test: skip-locked-3 test: skip-locked-4 test: drop-index-concurrently-1 +test: multiple-cic test: alter-table-1 test: alter-table-2 test: alter-table-3 +test: alter-table-4 test: create-trigger test: sequence-ddl test: async-notify test: vacuum-reltuples test: timeouts +test: vacuum-concurrent-drop +test: vacuum-conflict +test: vacuum-skip-locked +test: predicate-hash +test: predicate-gist +test: predicate-gin +test: predicate-gin-fastupdate +test: predicate-gin-nomatch +test: partition-key-update-1 +test: partition-key-update-2 +test: partition-key-update-3 +test: partition-key-update-4 +test: plpgsql-toast +test: truncate-conflict diff --git a/src/test/isolation/isolationtester.c b/src/test/isolation/isolationtester.c index ba8082c980..e2638553f6 100644 --- a/src/test/isolation/isolationtester.c +++ b/src/test/isolation/isolationtester.c @@ -32,6 +32,7 @@ static int nconns = 0; /* In dry run only output permutations to be run by the tester. */ static int dry_run = false; +static void exit_nicely(void) pg_attribute_noreturn(); static void run_testspec(TestSpec *testspec); static void run_all_permutations(TestSpec *testspec); static void run_all_permutations_recurse(TestSpec *testspec, int nsteps, @@ -47,6 +48,8 @@ static int step_qsort_cmp(const void *a, const void *b); static int step_bsearch_cmp(const void *a, const void *b); static void printResultSet(PGresult *res); +static void isotesterNoticeProcessor(void *arg, const char *message); +static void blackholeNoticeProcessor(void *arg, const char *message); /* close all connections and exit */ static void @@ -170,6 +173,21 @@ main(int argc, char **argv) exit_nicely(); } + /* + * Set up notice processors for the user-defined connections, so that + * messages can get printed prefixed with the session names. The + * control connection gets a "blackhole" processor instead (hides all + * messages). + */ + if (i != 0) + PQsetNoticeProcessor(conns[i], + isotesterNoticeProcessor, + (void *) (testspec->sessions[i - 1]->name)); + else + PQsetNoticeProcessor(conns[i], + blackholeNoticeProcessor, + NULL); + /* * Suppress NOTIFY messages, which otherwise pop into results at odd * places. @@ -593,7 +611,7 @@ run_permutation(TestSpec *testspec, int nsteps, Step **steps) if (!PQsendQuery(conn, step->sql)) { fprintf(stdout, "failed to send query for step %s: %s\n", - step->name, PQerrorMessage(conns[1 + step->session])); + step->name, PQerrorMessage(conn)); exit_nicely(); } @@ -742,7 +760,7 @@ try_complete_step(Step *step, int flags) PQntuples(res) != 1) { fprintf(stderr, "lock wait query failed: %s", - PQerrorMessage(conn)); + PQerrorMessage(conns[0])); exit_nicely(); } waiting = ((PQgetvalue(res, 0, 0))[0] == 't'); @@ -880,3 +898,17 @@ printResultSet(PGresult *res) printf("\n"); } } + +/* notice processor, prefixes each message with the session name */ +static void +isotesterNoticeProcessor(void *arg, const char *message) +{ + fprintf(stderr, "%s: %s", (char *) arg, message); +} + +/* notice processor, hides the message */ +static void +blackholeNoticeProcessor(void *arg, const char *message) +{ + /* do nothing */ +} diff --git a/src/test/isolation/isolationtester.h b/src/test/isolation/isolationtester.h index 1f28272d65..a4d989bd1a 100644 --- a/src/test/isolation/isolationtester.h +++ b/src/test/isolation/isolationtester.h @@ -3,7 +3,7 @@ * isolationtester.h * include file for isolation tests * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION diff --git a/src/test/isolation/specparse.y b/src/test/isolation/specparse.y index 759b9b456c..654716194c 100644 --- a/src/test/isolation/specparse.y +++ b/src/test/isolation/specparse.y @@ -4,7 +4,7 @@ * specparse.y * bison grammar for the isolation test file format * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * *------------------------------------------------------------------------- diff --git a/src/test/isolation/specs/alter-table-4.spec b/src/test/isolation/specs/alter-table-4.spec new file mode 100644 index 0000000000..a9c1a93723 --- /dev/null +++ b/src/test/isolation/specs/alter-table-4.spec @@ -0,0 +1,37 @@ +# ALTER TABLE - Add and remove inheritance with concurrent reads + +setup +{ + CREATE TABLE p (a integer); + INSERT INTO p VALUES(1); + CREATE TABLE c1 () INHERITS (p); + INSERT INTO c1 VALUES(10); + CREATE TABLE c2 (a integer); + INSERT INTO c2 VALUES(100); +} + +teardown +{ + DROP TABLE IF EXISTS c1, c2, p; +} + +session "s1" +step "s1b" { BEGIN; } +step "s1delc1" { ALTER TABLE c1 NO INHERIT p; } +step "s1modc1a" { ALTER TABLE c1 ALTER COLUMN a TYPE float; } +step "s1addc2" { ALTER TABLE c2 INHERIT p; } +step "s1dropc1" { DROP TABLE c1; } +step "s1c" { COMMIT; } + +session "s2" +step "s2sel" { SELECT SUM(a) FROM p; } + +# NO INHERIT will not be visible to concurrent select, +# since we identify children before locking them +permutation "s1b" "s1delc1" "s2sel" "s1c" "s2sel" +# adding inheritance likewise is not seen if s1 commits after s2 locks p +permutation "s1b" "s1delc1" "s1addc2" "s2sel" "s1c" "s2sel" +# but we do cope with DROP on a child table +permutation "s1b" "s1dropc1" "s2sel" "s1c" "s2sel" +# this case currently results in an error; doesn't seem worth preventing +permutation "s1b" "s1delc1" "s1modc1a" "s2sel" "s1c" "s2sel" diff --git a/src/test/isolation/specs/eval-plan-qual.spec b/src/test/isolation/specs/eval-plan-qual.spec index 7ff6f6b8cc..367922de75 100644 --- a/src/test/isolation/specs/eval-plan-qual.spec +++ b/src/test/isolation/specs/eval-plan-qual.spec @@ -9,6 +9,11 @@ setup CREATE TABLE accounts (accountid text PRIMARY KEY, balance numeric not null); INSERT INTO accounts VALUES ('checking', 600), ('savings', 600); + CREATE TABLE accounts_ext (accountid text PRIMARY KEY, balance numeric not null, other text); + INSERT INTO accounts_ext VALUES ('checking', 600, 'other'), ('savings', 700, null); + ALTER TABLE accounts_ext ADD COLUMN newcol int DEFAULT 42; + ALTER TABLE accounts_ext ADD COLUMN newcol2 text DEFAULT NULL; + CREATE TABLE p (a int, b int, c int); CREATE TABLE c1 () INHERITS (p); CREATE TABLE c2 () INHERITS (p); @@ -21,13 +26,17 @@ setup CREATE TABLE table_b (id integer, value text); INSERT INTO table_a VALUES (1, 'tableAValue'); INSERT INTO table_b VALUES (1, 'tableBValue'); + + CREATE TABLE jointest AS SELECT generate_series(1,10) AS id, 0 AS data; + CREATE INDEX ON jointest(id); } teardown { DROP TABLE accounts; + DROP TABLE accounts_ext; DROP TABLE p CASCADE; - DROP TABLE table_a, table_b; + DROP TABLE table_a, table_b, jointest; } session "s1" @@ -69,6 +78,11 @@ step "lockwithvalues" { WHERE a1.accountid = v.id FOR UPDATE OF a1; } +step "partiallock_ext" { + SELECT * FROM accounts_ext a1, accounts_ext a2 + WHERE a1.accountid = a2.accountid + FOR UPDATE OF a1; +} # these tests exercise EvalPlanQual with a SubLink sub-select (which should be # unaffected by any EPQ recheck behavior in the outer query); cf bug #14034 @@ -78,6 +92,24 @@ step "updateforss" { UPDATE table_b SET value = 'newTableBValue' WHERE id = 1; } +# these tests exercise EvalPlanQual with conditional InitPlans which +# have not been executed prior to the EPQ + +step "updateforcip" { + UPDATE table_a SET value = NULL WHERE id = 1; +} + +# these tests exercise mark/restore during EPQ recheck, cf bug #15032 + +step "selectjoinforupdate" { + set enable_nestloop to 0; + set enable_hashjoin to 0; + set enable_seqscan to 0; + explain (costs off) + select * from jointest a join jointest b on a.id=b.id for update; + select * from jointest a join jointest b on a.id=b.id for update; +} + session "s2" setup { BEGIN ISOLATION LEVEL READ COMMITTED; } @@ -91,6 +123,7 @@ step "upsert2" { INSERT INTO accounts SELECT 'savings', 1234 WHERE NOT EXISTS (SELECT 1 FROM upsert); } +step "wx2_ext" { UPDATE accounts_ext SET balance = balance + 450; } step "readp2" { SELECT tableoid::regclass, ctid, * FROM p WHERE b IN (0, 1) AND c = 0 FOR UPDATE; } step "returningp1" { WITH u AS ( UPDATE p SET b = b WHERE a > 0 RETURNING * ) @@ -103,12 +136,22 @@ step "readforss" { FROM table_a ta WHERE ta.id = 1 FOR UPDATE OF ta; } +step "updateforcip2" { + UPDATE table_a SET value = COALESCE(value, (SELECT text 'newValue')) WHERE id = 1; +} +step "updateforcip3" { + WITH d(val) AS (SELECT text 'newValue' FROM generate_series(1,1)) + UPDATE table_a SET value = COALESCE(value, (SELECT val FROM d)) WHERE id = 1; +} step "wrtwcte" { UPDATE table_a SET value = 'tableAValue2' WHERE id = 1; } +step "wrjt" { UPDATE jointest SET data = 42 WHERE id = 7; } step "c2" { COMMIT; } session "s3" setup { BEGIN ISOLATION LEVEL READ COMMITTED; } step "read" { SELECT * FROM accounts ORDER BY accountid; } +step "read_ext" { SELECT * FROM accounts_ext ORDER BY accountid; } +step "read_a" { SELECT * FROM table_a ORDER BY id; } # this test exercises EvalPlanQual with a CTE, cf bug #14328 step "readwcte" { @@ -124,6 +167,14 @@ step "readwcte" { SELECT * FROM cte2; } +# this test exercises a different CTE misbehavior, cf bug #14870 +step "multireadwcte" { + WITH updated AS ( + UPDATE table_a SET value = 'tableAValue3' WHERE id = 1 RETURNING id + ) + SELECT (SELECT id FROM updated) AS subid, * FROM updated; +} + teardown { COMMIT; } permutation "wx1" "wx2" "c1" "c2" "read" @@ -133,5 +184,10 @@ permutation "readp1" "writep1" "readp2" "c1" "c2" permutation "writep2" "returningp1" "c1" "c2" permutation "wx2" "partiallock" "c2" "c1" "read" permutation "wx2" "lockwithvalues" "c2" "c1" "read" +permutation "wx2_ext" "partiallock_ext" "c2" "c1" "read_ext" permutation "updateforss" "readforss" "c1" "c2" +permutation "updateforcip" "updateforcip2" "c1" "c2" "read_a" +permutation "updateforcip" "updateforcip3" "c1" "c2" "read_a" permutation "wrtwcte" "readwcte" "c1" "c2" +permutation "wrjt" "selectjoinforupdate" "c2" "c1" +permutation "wrtwcte" "multireadwcte" "c1" "c2" diff --git a/src/test/isolation/specs/freeze-the-dead.spec b/src/test/isolation/specs/freeze-the-dead.spec new file mode 100644 index 0000000000..e24d7d5d11 --- /dev/null +++ b/src/test/isolation/specs/freeze-the-dead.spec @@ -0,0 +1,59 @@ +# Test for interactions of tuple freezing with dead, as well as recently-dead +# tuples using multixacts via FOR KEY SHARE. +setup +{ + CREATE TABLE tab_freeze ( + id int PRIMARY KEY, + name char(3), + x int); + INSERT INTO tab_freeze VALUES (1, '111', 0); + INSERT INTO tab_freeze VALUES (3, '333', 0); +} + +teardown +{ + DROP TABLE tab_freeze; +} + +session "s1" +step "s1_begin" { BEGIN; } +step "s1_update" { UPDATE tab_freeze SET x = x + 1 WHERE id = 3; } +step "s1_commit" { COMMIT; } +step "s1_vacuum" { VACUUM FREEZE tab_freeze; } +step "s1_selectone" { + BEGIN; + SET LOCAL enable_seqscan = false; + SET LOCAL enable_bitmapscan = false; + SELECT * FROM tab_freeze WHERE id = 3; + COMMIT; +} +step "s1_selectall" { SELECT * FROM tab_freeze ORDER BY name, id; } +step "s1_reindex" { REINDEX TABLE tab_freeze; } + +session "s2" +step "s2_begin" { BEGIN; } +step "s2_key_share" { SELECT id FROM tab_freeze WHERE id = 3 FOR KEY SHARE; } +step "s2_commit" { COMMIT; } +step "s2_vacuum" { VACUUM FREEZE tab_freeze; } + +session "s3" +step "s3_begin" { BEGIN; } +step "s3_key_share" { SELECT id FROM tab_freeze WHERE id = 3 FOR KEY SHARE; } +step "s3_commit" { COMMIT; } +step "s3_vacuum" { VACUUM FREEZE tab_freeze; } + +# This permutation verfies that a previous bug +# https://postgr.es/m/E5711E62-8FDF-4DCA-A888-C200BF6B5742@amazon.com +# https://postgr.es/m/20171102112019.33wb7g5wp4zpjelu@alap3.anarazel.de +# is not reintroduced. We used to make wrong pruning / freezing +# decision for multixacts, which could lead to a) broken hot chains b) +# dead rows being revived. +permutation "s1_begin" "s2_begin" "s3_begin" # start transactions + "s1_update" "s2_key_share" "s3_key_share" # have xmax be a multi with an updater, updater being oldest xid + "s1_update" # create additional row version that has multis + "s1_commit" "s2_commit" # commit both updater and share locker + "s2_vacuum" # due to bug in freezing logic, we used to *not* prune updated row, and then froze it + "s1_selectone" # if hot chain is broken, the row can't be found via index scan + "s3_commit" # commit remaining open xact + "s2_vacuum" # pruning / freezing in broken hot chains would unset xmax, reviving rows + "s1_selectall" # show borkedness diff --git a/src/test/isolation/specs/insert-conflict-do-nothing-2.spec b/src/test/isolation/specs/insert-conflict-do-nothing-2.spec index f1e5bde357..8a8ec94447 100644 --- a/src/test/isolation/specs/insert-conflict-do-nothing-2.spec +++ b/src/test/isolation/specs/insert-conflict-do-nothing-2.spec @@ -3,7 +3,7 @@ setup { - CREATE TABLE ints (key int primary key, val text); + CREATE TABLE ints (key int, val text, PRIMARY KEY (key) INCLUDE (val)); } teardown diff --git a/src/test/isolation/specs/insert-conflict-do-update-2.spec b/src/test/isolation/specs/insert-conflict-do-update-2.spec index cd7e3f42fe..f5b4f601b5 100644 --- a/src/test/isolation/specs/insert-conflict-do-update-2.spec +++ b/src/test/isolation/specs/insert-conflict-do-update-2.spec @@ -7,7 +7,7 @@ setup { CREATE TABLE upsert (key text not null, payload text); - CREATE UNIQUE INDEX ON upsert(lower(key)); + CREATE UNIQUE INDEX ON upsert(lower(key)) INCLUDE (payload); } teardown diff --git a/src/test/isolation/specs/lock-committed-keyupdate.spec b/src/test/isolation/specs/lock-committed-keyupdate.spec index 1630282d0f..3fb424af0e 100644 --- a/src/test/isolation/specs/lock-committed-keyupdate.spec +++ b/src/test/isolation/specs/lock-committed-keyupdate.spec @@ -8,7 +8,7 @@ setup { DROP TABLE IF EXISTS lcku_table; - CREATE TABLE lcku_table (id INTEGER PRIMARY KEY, value TEXT); + CREATE TABLE lcku_table (id INTEGER, value TEXT, PRIMARY KEY (id) INCLUDE (value)); INSERT INTO lcku_table VALUES (1, 'one'); INSERT INTO lcku_table VALUES (3, 'two'); } diff --git a/src/test/isolation/specs/lock-update-traversal.spec b/src/test/isolation/specs/lock-update-traversal.spec index 7042b9399c..2ffe87d152 100644 --- a/src/test/isolation/specs/lock-update-traversal.spec +++ b/src/test/isolation/specs/lock-update-traversal.spec @@ -7,8 +7,9 @@ setup { CREATE TABLE foo ( - key int PRIMARY KEY, - value int + key int, + value int, + PRIMARY KEY (key) INCLUDE (value) ); INSERT INTO foo VALUES (1, 1); diff --git a/src/test/isolation/specs/multiple-cic.spec b/src/test/isolation/specs/multiple-cic.spec new file mode 100644 index 0000000000..3199667be2 --- /dev/null +++ b/src/test/isolation/specs/multiple-cic.spec @@ -0,0 +1,40 @@ +# Test multiple CREATE INDEX CONCURRENTLY working simultaneously + +setup +{ + CREATE TABLE mcic_one ( + id int + ); + CREATE TABLE mcic_two ( + id int + ); + CREATE FUNCTION lck_shr(bigint) RETURNS bool IMMUTABLE LANGUAGE plpgsql AS $$ + BEGIN PERFORM pg_advisory_lock_shared($1); RETURN true; END; + $$; + CREATE FUNCTION unlck() RETURNS bool IMMUTABLE LANGUAGE plpgsql AS $$ + BEGIN PERFORM pg_advisory_unlock_all(); RETURN true; END; + $$; +} +teardown +{ + DROP TABLE mcic_one, mcic_two; + DROP FUNCTION lck_shr(bigint); + DROP FUNCTION unlck(); +} + +session "s1" +step "s1i" { + CREATE INDEX CONCURRENTLY mcic_one_pkey ON mcic_one (id) + WHERE lck_shr(281457); + } +teardown { SELECT unlck(); } + + +session "s2" +step "s2l" { SELECT pg_advisory_lock(281457); } +step "s2i" { + CREATE INDEX CONCURRENTLY mcic_two_pkey ON mcic_two (id) + WHERE unlck(); + } + +permutation "s2l" "s1i" "s2i" diff --git a/src/test/isolation/specs/partition-key-update-1.spec b/src/test/isolation/specs/partition-key-update-1.spec new file mode 100644 index 0000000000..8393f47c59 --- /dev/null +++ b/src/test/isolation/specs/partition-key-update-1.spec @@ -0,0 +1,85 @@ +# Test that an error if thrown if the target row has been moved to a +# different partition by a concurrent session. + +setup +{ + -- + -- Setup to test an error from ExecUpdate and ExecDelete. + -- + CREATE TABLE foo (a int, b text) PARTITION BY LIST(a); + CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1); + CREATE TABLE foo2 PARTITION OF foo FOR VALUES IN (2); + INSERT INTO foo VALUES (1, 'ABC'); + + -- + -- Setup to test an error from GetTupleForTrigger + -- + CREATE TABLE footrg (a int, b text) PARTITION BY LIST(a); + CREATE TABLE footrg1 PARTITION OF footrg FOR VALUES IN (1); + CREATE TABLE footrg2 PARTITION OF footrg FOR VALUES IN (2); + INSERT INTO footrg VALUES (1, 'ABC'); + CREATE FUNCTION func_footrg_mod_a() RETURNS TRIGGER AS $$ + BEGIN + NEW.a = 2; -- This is changing partition key column. + RETURN NEW; + END $$ LANGUAGE PLPGSQL; + CREATE TRIGGER footrg_mod_a BEFORE UPDATE ON footrg1 + FOR EACH ROW EXECUTE PROCEDURE func_footrg_mod_a(); + + -- + -- Setup to test an error from ExecLockRows + -- + CREATE TABLE foo_range_parted (a int, b text) PARTITION BY RANGE(a); + CREATE TABLE foo_range_parted1 PARTITION OF foo_range_parted FOR VALUES FROM (1) TO (10); + CREATE TABLE foo_range_parted2 PARTITION OF foo_range_parted FOR VALUES FROM (10) TO (20); + INSERT INTO foo_range_parted VALUES(7, 'ABC'); + CREATE UNIQUE INDEX foo_range_parted1_a_unique ON foo_range_parted1 (a); + CREATE TABLE bar (a int REFERENCES foo_range_parted1(a)); +} + +teardown +{ + DROP TABLE foo; + DROP TRIGGER footrg_mod_a ON footrg1; + DROP FUNCTION func_footrg_mod_a(); + DROP TABLE footrg; + DROP TABLE bar, foo_range_parted; +} + +session "s1" +step "s1b" { BEGIN ISOLATION LEVEL READ COMMITTED; } +step "s1u" { UPDATE foo SET a=2 WHERE a=1; } +step "s1u2" { UPDATE footrg SET b='EFG' WHERE a=1; } +step "s1u3pc" { UPDATE foo_range_parted SET a=11 WHERE a=7; } +step "s1u3npc" { UPDATE foo_range_parted SET b='XYZ' WHERE a=7; } +step "s1c" { COMMIT; } +step "s1r" { ROLLBACK; } + +session "s2" +step "s2b" { BEGIN ISOLATION LEVEL READ COMMITTED; } +step "s2u" { UPDATE foo SET b='EFG' WHERE a=1; } +step "s2u2" { UPDATE footrg SET b='XYZ' WHERE a=1; } +step "s2i" { INSERT INTO bar VALUES(7); } +step "s2d" { DELETE FROM foo WHERE a=1; } +step "s2c" { COMMIT; } + +# Concurrency error from ExecUpdate and ExecDelete. +permutation "s1b" "s2b" "s1u" "s1c" "s2d" "s2c" +permutation "s1b" "s2b" "s1u" "s2d" "s1c" "s2c" +permutation "s1b" "s2b" "s2d" "s1u" "s2c" "s1c" + +# Concurrency error from GetTupleForTrigger +permutation "s1b" "s2b" "s1u2" "s1c" "s2u2" "s2c" +permutation "s1b" "s2b" "s1u2" "s2u2" "s1c" "s2c" +permutation "s1b" "s2b" "s2u2" "s1u2" "s2c" "s1c" + +# Concurrency error from ExecLockRows +# test waiting for moved row itself +permutation "s1b" "s2b" "s1u3pc" "s2i" "s1c" "s2c" +permutation "s1b" "s2b" "s1u3pc" "s2i" "s1r" "s2c" +# test waiting for in-partition update, followed by cross-partition move +permutation "s1b" "s2b" "s1u3npc" "s1u3pc" "s2i" "s1c" "s2c" +permutation "s1b" "s2b" "s1u3npc" "s1u3pc" "s2i" "s1r" "s2c" +# test waiting for in-partition update, followed by cross-partition move +permutation "s1b" "s2b" "s1u3npc" "s1u3pc" "s1u3pc" "s2i" "s1c" "s2c" +permutation "s1b" "s2b" "s1u3npc" "s1u3pc" "s1u3pc" "s2i" "s1r" "s2c" diff --git a/src/test/isolation/specs/partition-key-update-2.spec b/src/test/isolation/specs/partition-key-update-2.spec new file mode 100644 index 0000000000..699e2e727f --- /dev/null +++ b/src/test/isolation/specs/partition-key-update-2.spec @@ -0,0 +1,45 @@ +# Concurrent update of a partition key and INSERT...ON CONFLICT DO NOTHING test +# +# This test tries to expose problems with the interaction between concurrent +# sessions during an update of the partition key and INSERT...ON CONFLICT DO +# NOTHING on a partitioned table. +# +# The convention here is that session 1 moves row from one partition to +# another due update of the partition key and session 2 always ends up +# inserting, and session 3 always ends up doing nothing. +# +# Note: This test is slightly resemble to insert-conflict-do-nothing test. + +setup +{ + CREATE TABLE foo (a int primary key, b text) PARTITION BY LIST(a); + CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1); + CREATE TABLE foo2 PARTITION OF foo FOR VALUES IN (2); + INSERT INTO foo VALUES (1, 'initial tuple'); +} + +teardown +{ + DROP TABLE foo; +} + +session "s1" +setup { BEGIN ISOLATION LEVEL READ COMMITTED; } +step "s1u" { UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; } +step "s1c" { COMMIT; } + +session "s2" +setup { BEGIN ISOLATION LEVEL READ COMMITTED; } +step "s2donothing" { INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; } +step "s2c" { COMMIT; } + +session "s3" +setup { BEGIN ISOLATION LEVEL READ COMMITTED; } +step "s3donothing" { INSERT INTO foo VALUES(2, 'session-3 donothing') ON CONFLICT DO NOTHING; } +step "s3select" { SELECT * FROM foo ORDER BY a; } +step "s3c" { COMMIT; } + +# Regular case where one session block-waits on another to determine if it +# should proceed with an insert or do nothing. +permutation "s1u" "s2donothing" "s3donothing" "s1c" "s2c" "s3select" "s3c" +permutation "s2donothing" "s1u" "s3donothing" "s1c" "s2c" "s3select" "s3c" diff --git a/src/test/isolation/specs/partition-key-update-3.spec b/src/test/isolation/specs/partition-key-update-3.spec new file mode 100644 index 0000000000..a6efea1381 --- /dev/null +++ b/src/test/isolation/specs/partition-key-update-3.spec @@ -0,0 +1,44 @@ +# Concurrent update of a partition key and INSERT...ON CONFLICT DO NOTHING +# test on partitioned table with multiple rows in higher isolation levels. +# +# Note: This test is resemble to insert-conflict-do-nothing-2 test + +setup +{ + CREATE TABLE foo (a int primary key, b text) PARTITION BY LIST(a); + CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1); + CREATE TABLE foo2 PARTITION OF foo FOR VALUES IN (2); + INSERT INTO foo VALUES (1, 'initial tuple'); +} + +teardown +{ + DROP TABLE foo; +} + +session "s1" +setup { BEGIN ISOLATION LEVEL READ COMMITTED; } +step "s1u" { UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; } +step "s1c" { COMMIT; } + +session "s2" +step "s2beginrr" { BEGIN ISOLATION LEVEL REPEATABLE READ; } +step "s2begins" { BEGIN ISOLATION LEVEL SERIALIZABLE; } +step "s2donothing" { INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; } +step "s2c" { COMMIT; } +step "s2select" { SELECT * FROM foo ORDER BY a; } + +session "s3" +step "s3beginrr" { BEGIN ISOLATION LEVEL REPEATABLE READ; } +step "s3begins" { BEGIN ISOLATION LEVEL SERIALIZABLE; } +step "s3donothing" { INSERT INTO foo VALUES(2, 'session-3 donothing'), (2, 'session-3 donothing2') ON CONFLICT DO NOTHING; } +step "s3c" { COMMIT; } + +permutation "s2beginrr" "s3beginrr" "s1u" "s2donothing" "s1c" "s2c" "s3donothing" "s3c" "s2select" +permutation "s2beginrr" "s3beginrr" "s1u" "s3donothing" "s1c" "s3c" "s2donothing" "s2c" "s2select" +permutation "s2beginrr" "s3beginrr" "s1u" "s2donothing" "s3donothing" "s1c" "s2c" "s3c" "s2select" +permutation "s2beginrr" "s3beginrr" "s1u" "s3donothing" "s2donothing" "s1c" "s3c" "s2c" "s2select" +permutation "s2begins" "s3begins" "s1u" "s2donothing" "s1c" "s2c" "s3donothing" "s3c" "s2select" +permutation "s2begins" "s3begins" "s1u" "s3donothing" "s1c" "s3c" "s2donothing" "s2c" "s2select" +permutation "s2begins" "s3begins" "s1u" "s2donothing" "s3donothing" "s1c" "s2c" "s3c" "s2select" +permutation "s2begins" "s3begins" "s1u" "s3donothing" "s2donothing" "s1c" "s3c" "s2c" "s2select" diff --git a/src/test/isolation/specs/partition-key-update-4.spec b/src/test/isolation/specs/partition-key-update-4.spec new file mode 100644 index 0000000000..1d53a7d0c6 --- /dev/null +++ b/src/test/isolation/specs/partition-key-update-4.spec @@ -0,0 +1,76 @@ +# Test that a row that ends up in a new partition contains changes made by +# a concurrent transaction. + +setup +{ + -- + -- Setup to test concurrent handling of ExecDelete(). + -- + CREATE TABLE foo (a int, b text) PARTITION BY LIST(a); + CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1); + CREATE TABLE foo2 PARTITION OF foo FOR VALUES IN (2); + INSERT INTO foo VALUES (1, 'ABC'); + + -- + -- Setup to test concurrent handling of GetTupleForTrigger(). + -- + CREATE TABLE footrg (a int, b text) PARTITION BY LIST(a); + CREATE TABLE triglog as select * from footrg; + CREATE TABLE footrg1 PARTITION OF footrg FOR VALUES IN (1); + CREATE TABLE footrg2 PARTITION OF footrg FOR VALUES IN (2); + INSERT INTO footrg VALUES (1, 'ABC'); + CREATE FUNCTION func_footrg() RETURNS TRIGGER AS $$ + BEGIN + OLD.b = OLD.b || ' trigger'; + + -- This will verify that the trigger is not run *before* the row is + -- refetched by EvalPlanQual. The OLD row should contain the changes made + -- by the concurrent session. + INSERT INTO triglog select OLD.*; + + RETURN OLD; + END $$ LANGUAGE PLPGSQL; + CREATE TRIGGER footrg_ondel BEFORE DELETE ON footrg1 + FOR EACH ROW EXECUTE PROCEDURE func_footrg(); + +} + +teardown +{ + DROP TABLE foo; + DROP TRIGGER footrg_ondel ON footrg1; + DROP FUNCTION func_footrg(); + DROP TABLE footrg; + DROP TABLE triglog; +} + +session "s1" +step "s1b" { BEGIN ISOLATION LEVEL READ COMMITTED; } +step "s1u" { UPDATE foo SET a = a + 1, b = b || ' update1' WHERE b like '%ABC%'; } +step "s1ut" { UPDATE footrg SET a = a + 1, b = b || ' update1' WHERE b like '%ABC%'; } +step "s1s" { SELECT tableoid::regclass, * FROM foo ORDER BY a; } +step "s1st" { SELECT tableoid::regclass, * FROM footrg ORDER BY a; } +step "s1stl" { SELECT * FROM triglog ORDER BY a; } +step "s1c" { COMMIT; } + +session "s2" +step "s2b" { BEGIN ISOLATION LEVEL READ COMMITTED; } +step "s2u1" { UPDATE foo SET b = b || ' update2' WHERE a = 1; } +step "s2u2" { UPDATE foo SET b = 'EFG' WHERE a = 1; } +step "s2ut1" { UPDATE footrg SET b = b || ' update2' WHERE a = 1; } +step "s2ut2" { UPDATE footrg SET b = 'EFG' WHERE a = 1; } +step "s2c" { COMMIT; } + + +# Session s1 is moving a row into another partition, but is waiting for +# another session s2 that is updating the original row. The row that ends up +# in the new partition should contain the changes made by session s2. +permutation "s1b" "s2b" "s2u1" "s1u" "s2c" "s1c" "s1s" + +# Same as above, except, session s1 is waiting in GetTupleTrigger(). +permutation "s1b" "s2b" "s2ut1" "s1ut" "s2c" "s1c" "s1st" "s1stl" + +# Below two cases are similar to the above two; except that the session s1 +# fails EvalPlanQual() test, so partition key update does not happen. +permutation "s1b" "s2b" "s2u2" "s1u" "s2c" "s1c" "s1s" +permutation "s1b" "s2b" "s2ut2" "s1ut" "s2c" "s1c" "s1st" "s1stl" diff --git a/src/test/isolation/specs/plpgsql-toast.spec b/src/test/isolation/specs/plpgsql-toast.spec new file mode 100644 index 0000000000..e6228c9ef6 --- /dev/null +++ b/src/test/isolation/specs/plpgsql-toast.spec @@ -0,0 +1,137 @@ +# Test TOAST behavior in PL/pgSQL procedures with transaction control. +# +# We need to ensure that values stored in PL/pgSQL variables are free +# of external TOAST references, because those could disappear after a +# transaction is committed (leading to errors "missing chunk number +# ... for toast value ..."). The tests here do this by running VACUUM +# in a second session. Advisory locks are used to have the VACUUM +# kick in at the right time. The different "assign" steps test +# different code paths for variable assignments in PL/pgSQL. + +setup +{ + CREATE TABLE test1 (a int, b text); + ALTER TABLE test1 ALTER COLUMN b SET STORAGE EXTERNAL; + INSERT INTO test1 VALUES (1, repeat('foo', 2000)); + CREATE TYPE test2 AS (a bigint, b text); +} + +teardown +{ + DROP TABLE test1; + DROP TYPE test2; +} + +session "s1" + +setup +{ + SELECT pg_advisory_unlock_all(); +} + +# assign_simple_var() +step "assign1" +{ +do $$ + declare + x text; + begin + select test1.b into x from test1; + delete from test1; + commit; + perform pg_advisory_lock(1); + raise notice 'x = %', x; + end; +$$; +} + +# assign_simple_var() +step "assign2" +{ +do $$ + declare + x text; + begin + x := (select test1.b from test1); + delete from test1; + commit; + perform pg_advisory_lock(1); + raise notice 'x = %', x; + end; +$$; +} + +# expanded_record_set_field() +step "assign3" +{ +do $$ + declare + r record; + begin + select * into r from test1; + r.b := (select test1.b from test1); + delete from test1; + commit; + perform pg_advisory_lock(1); + raise notice 'r = %', r; + end; +$$; +} + +# expanded_record_set_fields() +step "assign4" +{ +do $$ + declare + r test2; + begin + select * into r from test1; + delete from test1; + commit; + perform pg_advisory_lock(1); + raise notice 'r = %', r; + end; +$$; +} + +# expanded_record_set_tuple() +step "assign5" +{ +do $$ + declare + r record; + begin + for r in select test1.b from test1 loop + null; + end loop; + delete from test1; + commit; + perform pg_advisory_lock(1); + raise notice 'r = %', r; + end; +$$; +} + +session "s2" +setup +{ + SELECT pg_advisory_unlock_all(); +} +step "lock" +{ + SELECT pg_advisory_lock(1); +} +step "vacuum" +{ + VACUUM test1; +} +step "unlock" +{ + SELECT pg_advisory_unlock(1); +} + +permutation "lock" "assign1" "vacuum" "unlock" +permutation "lock" "assign2" "vacuum" "unlock" +permutation "lock" "assign3" "vacuum" "unlock" +permutation "lock" "assign4" "vacuum" "unlock" +permutation "lock" "assign5" "vacuum" "unlock" diff --git a/src/test/isolation/specs/predicate-gin-fastupdate.spec b/src/test/isolation/specs/predicate-gin-fastupdate.spec new file mode 100644 index 0000000000..04b8036fc5 --- /dev/null +++ b/src/test/isolation/specs/predicate-gin-fastupdate.spec @@ -0,0 +1,49 @@ +# +# Test that predicate locking on a GIN index works correctly, even if +# fastupdate is turned on concurrently. +# +# 0. fastupdate is off +# 1. Session 's1' acquires predicate lock on page X +# 2. fastupdate is turned on +# 3. Session 's2' inserts a new tuple to the pending list +# +# This test tests that if the lock acquired in step 1 would conflict with +# the scan in step 1, we detect that conflict correctly, even if fastupdate +# was turned on in-between. +# +setup +{ + create table gin_tbl(p int4[]); + insert into gin_tbl select array[g, g*2,g*3] from generate_series(1, 10000) g; + insert into gin_tbl select array[4,5,6] from generate_series(10001, 20000) g; + create index ginidx on gin_tbl using gin(p) with (fastupdate = off); + + create table other_tbl (id int4); +} + +teardown +{ + drop table gin_tbl; + drop table other_tbl; +} + +session "s1" +setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; } +step "r1" { SELECT count(*) FROM gin_tbl WHERE p @> array[1000]; } +step "w1" { INSERT INTO other_tbl VALUES (42); } +step "c1" { COMMIT; } + +session "s2" +setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; } +step "r2" { SELECT * FROM other_tbl; } +step "w2" { INSERT INTO gin_tbl SELECT array[1000,19001]; } +step "c2" { COMMIT; } + +session "s3" +step "fastupdate_on" { ALTER INDEX ginidx SET (fastupdate = on); } + +# This correctly throws serialization failure. +permutation "r1" "r2" "w1" "c1" "w2" "c2" + +# But if fastupdate is turned on in the middle, we miss it. +permutation "r1" "r2" "w1" "c1" "fastupdate_on" "w2" "c2" diff --git a/src/test/isolation/specs/predicate-gin-nomatch.spec b/src/test/isolation/specs/predicate-gin-nomatch.spec new file mode 100644 index 0000000000..0ad456cb14 --- /dev/null +++ b/src/test/isolation/specs/predicate-gin-nomatch.spec @@ -0,0 +1,35 @@ +# +# Check that GIN index grabs an appropriate lock, even if there is no match. +# +setup +{ + create table gin_tbl(p int4[]); + insert into gin_tbl select array[g, g*2,g*3] from generate_series(1, 10000) g; + insert into gin_tbl select array[4,5,6] from generate_series(10001, 20000) g; + create index ginidx on gin_tbl using gin(p) with (fastupdate = off); + + create table other_tbl (id int4); +} + +teardown +{ + drop table gin_tbl; + drop table other_tbl; +} + +session "s1" +setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; } +# Scan with no match. +step "r1" { SELECT count(*) FROM gin_tbl WHERE p @> array[-1]; } +step "w1" { INSERT INTO other_tbl VALUES (42); } +step "c1" { COMMIT; } + +session "s2" +setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SET enable_seqscan=off; } +step "r2" { SELECT * FROM other_tbl; } +# Insert row that would've matched in step "r1" +step "w2" { INSERT INTO gin_tbl SELECT array[-1]; } +step "c2" { COMMIT; } + +# This should throw serialization failure. +permutation "r1" "r2" "w1" "c1" "w2" "c2" diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec new file mode 100644 index 0000000000..6a4069e995 --- /dev/null +++ b/src/test/isolation/specs/predicate-gin.spec @@ -0,0 +1,133 @@ +# Test for page level predicate locking in gin index +# +# Test to verify serialization failures and to check reduced false positives +# +# To verify serialization failures, queries and permutations are written in such +# a way that an index scan (from one transaction) and an index insert (from +# another transaction) will try to access the same part (sub-tree) of the index +# whereas to check reduced false positives, they will try to access different +# parts (sub-tree) of the index. + + +setup +{ + create table gin_tbl(id int4, p int4[]); + insert into gin_tbl select g, array[g, g*2,g*3] from generate_series(1, 10000) g; + insert into gin_tbl select g, array[4,5,6] from generate_series(10001, 20000) g; + create index ginidx on gin_tbl using gin(p) with (fastupdate = off); +} + +teardown +{ + drop table gin_tbl; +} + +session "s1" +setup +{ + begin isolation level serializable; + set enable_seqscan=off; +} + +# enable pending list for a small subset of tests +step "fu1" { alter index ginidx set (fastupdate = on); + commit; + begin isolation level serializable; + set enable_seqscan=off; } + +step "rxy1" { select count(*) from gin_tbl where p @> array[4,5]; } +step "wx1" { insert into gin_tbl select g, array[5,6] from generate_series + (20001, 20050) g; } +step "rxy3" { select count(*) from gin_tbl where p @> array[1,2] or + p @> array[100,200] or p @> array[500,1000] or p @> array[1000,2000]; } +step "wx3" { insert into gin_tbl select g, array[g,g*2] from generate_series + (1, 50) g; } +step "c1" { commit; } + +session "s2" +setup +{ + begin isolation level serializable; + set enable_seqscan=off; +} + +step "rxy2" { select count(*) from gin_tbl where p @> array[5,6]; } +step "rxy2fu" { select count(*) from gin_tbl where p @> array[10000,10005]; } +step "wy2" { insert into gin_tbl select g, array[4,5] from + generate_series(20051, 20100) g; } +step "wy2fu" { insert into gin_tbl select g, array[10000,10005] from + generate_series(20051, 20100) g; } +step "rxy4" { select count(*) from gin_tbl where p @> array[4000,8000] or + p @> array[5000,10000] or p @> array[6000,12000] or + p @> array[8000,16000]; } +step "wy4" { insert into gin_tbl select g, array[g,g*2] from generate_series + (10000, 10050) g; } +step "c2" { commit; } + + +# An index scan (from one transaction) and an index insert (from another transaction) +# try to access the same part of the index but one transaction commits before other +# transaction begins so no r-w conflict. + +permutation "rxy1" "wx1" "c1" "rxy2" "wy2" "c2" +permutation "rxy2" "wy2" "c2" "rxy1" "wx1" "c1" + +# An index scan (from one transaction) and an index insert (from another transaction) +# try to access different parts of the index and also one transaction commits before +# other transaction begins, so no r-w conflict. + +permutation "rxy3" "wx3" "c1" "rxy4" "wy4" "c2" +permutation "rxy4" "wy4" "c2" "rxy3" "wx3" "c1" + + +# An index scan (from one transaction) and an index insert (from another transaction) +# try to access the same part of the index and one transaction begins before other +# transaction commits so there is a r-w conflict. + +permutation "rxy1" "wx1" "rxy2" "c1" "wy2" "c2" +permutation "rxy1" "wx1" "rxy2" "wy2" "c1" "c2" +permutation "rxy1" "wx1" "rxy2" "wy2" "c2" "c1" +permutation "rxy1" "rxy2" "wx1" "c1" "wy2" "c2" +permutation "rxy1" "rxy2" "wx1" "wy2" "c1" "c2" +permutation "rxy1" "rxy2" "wx1" "wy2" "c2" "c1" +permutation "rxy1" "rxy2" "wy2" "wx1" "c1" "c2" +permutation "rxy1" "rxy2" "wy2" "wx1" "c2" "c1" +permutation "rxy1" "rxy2" "wy2" "c2" "wx1" "c1" +permutation "rxy2" "rxy1" "wx1" "c1" "wy2" "c2" +permutation "rxy2" "rxy1" "wx1" "wy2" "c1" "c2" +permutation "rxy2" "rxy1" "wx1" "wy2" "c2" "c1" +permutation "rxy2" "rxy1" "wy2" "wx1" "c1" "c2" +permutation "rxy2" "rxy1" "wy2" "wx1" "c2" "c1" +permutation "rxy2" "rxy1" "wy2" "c2" "wx1" "c1" +permutation "rxy2" "wy2" "rxy1" "wx1" "c1" "c2" +permutation "rxy2" "wy2" "rxy1" "wx1" "c2" "c1" +permutation "rxy2" "wy2" "rxy1" "c2" "wx1" "c1" + +# An index scan (from one transaction) and an index insert (from another transaction) +# try to access different parts of the index so no r-w conflict. + +permutation "rxy3" "wx3" "rxy4" "c1" "wy4" "c2" +permutation "rxy3" "wx3" "rxy4" "wy4" "c1" "c2" +permutation "rxy3" "wx3" "rxy4" "wy4" "c2" "c1" +permutation "rxy3" "rxy4" "wx3" "c1" "wy4" "c2" +permutation "rxy3" "rxy4" "wx3" "wy4" "c1" "c2" +permutation "rxy3" "rxy4" "wx3" "wy4" "c2" "c1" +permutation "rxy3" "rxy4" "wy4" "wx3" "c1" "c2" +permutation "rxy3" "rxy4" "wy4" "wx3" "c2" "c1" +permutation "rxy3" "rxy4" "wy4" "c2" "wx3" "c1" +permutation "rxy4" "rxy3" "wx3" "c1" "wy4" "c2" +permutation "rxy4" "rxy3" "wx3" "wy4" "c1" "c2" +permutation "rxy4" "rxy3" "wx3" "wy4" "c2" "c1" +permutation "rxy4" "rxy3" "wy4" "wx3" "c1" "c2" +permutation "rxy4" "rxy3" "wy4" "wx3" "c2" "c1" +permutation "rxy4" "rxy3" "wy4" "c2" "wx3" "c1" +permutation "rxy4" "wy4" "rxy3" "wx3" "c1" "c2" +permutation "rxy4" "wy4" "rxy3" "wx3" "c2" "c1" +permutation "rxy4" "wy4" "rxy3" "c2" "wx3" "c1" + +# Test fastupdate = on. First test should pass because fastupdate is off and +# sessions touches different parts of index, second should fail because +# with fastupdate on, then whole index should be under predicate lock. + +permutation "rxy1" "rxy2fu" "wx1" "c1" "wy2fu" "c2" +permutation "fu1" "rxy1" "rxy2fu" "wx1" "c1" "wy2fu" "c2" diff --git a/src/test/isolation/specs/predicate-gist.spec b/src/test/isolation/specs/predicate-gist.spec new file mode 100644 index 0000000000..6d6021f5e4 --- /dev/null +++ b/src/test/isolation/specs/predicate-gist.spec @@ -0,0 +1,117 @@ +# Test for page level predicate locking in gist +# +# Test to verify serialization failures and to check reduced false positives +# +# To verify serialization failures, queries and permutations are written in such +# a way that an index scan (from one transaction) and an index insert (from +# another transaction) will try to access the same part (sub-tree) of the index +# whereas to check reduced false positives, they will try to access different +# parts (sub-tree) of the index. + +setup +{ + create table gist_point_tbl(id int4, p point); + create index gist_pointidx on gist_point_tbl using gist(p); + insert into gist_point_tbl (id, p) + select g, point(g*10, g*10) from generate_series(1, 1000) g; +} + +teardown +{ + drop table gist_point_tbl; +} + +session "s1" +setup +{ + begin isolation level serializable; + set enable_seqscan=off; + set enable_bitmapscan=off; + set enable_indexonlyscan=on; +} + +step "rxy1" { select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); } +step "wx1" { insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(15, 20) g; } +step "rxy3" { select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); } +step "wx3" { insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(12, 18) g; } +step "c1" { commit; } + + +session "s2" +setup +{ + begin isolation level serializable; + set enable_seqscan=off; + set enable_bitmapscan=off; + set enable_indexonlyscan=on; +} + +step "rxy2" { select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); } +step "wy2" { insert into gist_point_tbl (id, p) + select g, point(g*500, g*500) from generate_series(1, 5) g; } +step "rxy4" { select sum(p[0]) from gist_point_tbl where p << point(1000,1000); } +step "wy4" { insert into gist_point_tbl (id, p) + select g, point(g*50, g*50) from generate_series(1, 20) g; } +step "c2" { commit; } + +# An index scan (from one transaction) and an index insert (from another +# transaction) try to access the same part of the index but one transaction +# commits before other transaction begins so no r-w conflict. + +permutation "rxy1" "wx1" "c1" "rxy2" "wy2" "c2" +permutation "rxy2" "wy2" "c2" "rxy1" "wx1" "c1" + +# An index scan (from one transaction) and an index insert (from another +# transaction) try to access different parts of the index and also one +# transaction commits before other transaction begins, so no r-w conflict. + +permutation "rxy3" "wx3" "c1" "rxy4" "wy4" "c2" +permutation "rxy4" "wy4" "c2" "rxy3" "wx3" "c1" + + +# An index scan (from one transaction) and an index insert (from another +# transaction) try to access the same part of the index and one transaction +# begins before other transaction commits so there is a r-w conflict. + +permutation "rxy1" "wx1" "rxy2" "c1" "wy2" "c2" +permutation "rxy1" "wx1" "rxy2" "wy2" "c1" "c2" +permutation "rxy1" "wx1" "rxy2" "wy2" "c2" "c1" +permutation "rxy1" "rxy2" "wx1" "c1" "wy2" "c2" +permutation "rxy1" "rxy2" "wx1" "wy2" "c1" "c2" +permutation "rxy1" "rxy2" "wx1" "wy2" "c2" "c1" +permutation "rxy1" "rxy2" "wy2" "wx1" "c1" "c2" +permutation "rxy1" "rxy2" "wy2" "wx1" "c2" "c1" +permutation "rxy1" "rxy2" "wy2" "c2" "wx1" "c1" +permutation "rxy2" "rxy1" "wx1" "c1" "wy2" "c2" +permutation "rxy2" "rxy1" "wx1" "wy2" "c1" "c2" +permutation "rxy2" "rxy1" "wx1" "wy2" "c2" "c1" +permutation "rxy2" "rxy1" "wy2" "wx1" "c1" "c2" +permutation "rxy2" "rxy1" "wy2" "wx1" "c2" "c1" +permutation "rxy2" "rxy1" "wy2" "c2" "wx1" "c1" +permutation "rxy2" "wy2" "rxy1" "wx1" "c1" "c2" +permutation "rxy2" "wy2" "rxy1" "wx1" "c2" "c1" +permutation "rxy2" "wy2" "rxy1" "c2" "wx1" "c1" + +# An index scan (from one transaction) and an index insert (from another +# transaction) try to access different parts of the index so no r-w conflict. + +permutation "rxy3" "wx3" "rxy4" "c1" "wy4" "c2" +permutation "rxy3" "wx3" "rxy4" "wy4" "c1" "c2" +permutation "rxy3" "wx3" "rxy4" "wy4" "c2" "c1" +permutation "rxy3" "rxy4" "wx3" "c1" "wy4" "c2" +permutation "rxy3" "rxy4" "wx3" "wy4" "c1" "c2" +permutation "rxy3" "rxy4" "wx3" "wy4" "c2" "c1" +permutation "rxy3" "rxy4" "wy4" "wx3" "c1" "c2" +permutation "rxy3" "rxy4" "wy4" "wx3" "c2" "c1" +permutation "rxy3" "rxy4" "wy4" "c2" "wx3" "c1" +permutation "rxy4" "rxy3" "wx3" "c1" "wy4" "c2" +permutation "rxy4" "rxy3" "wx3" "wy4" "c1" "c2" +permutation "rxy4" "rxy3" "wx3" "wy4" "c2" "c1" +permutation "rxy4" "rxy3" "wy4" "wx3" "c1" "c2" +permutation "rxy4" "rxy3" "wy4" "wx3" "c2" "c1" +permutation "rxy4" "rxy3" "wy4" "c2" "wx3" "c1" +permutation "rxy4" "wy4" "rxy3" "wx3" "c1" "c2" +permutation "rxy4" "wy4" "rxy3" "wx3" "c2" "c1" +permutation "rxy4" "wy4" "rxy3" "c2" "wx3" "c1" diff --git a/src/test/isolation/specs/predicate-hash.spec b/src/test/isolation/specs/predicate-hash.spec new file mode 100644 index 0000000000..852c1ca29d --- /dev/null +++ b/src/test/isolation/specs/predicate-hash.spec @@ -0,0 +1,122 @@ +# Test for page level predicate locking in hash index +# +# Test to verify serialization failures and to check reduced false positives +# +# To verify serialization failures, queries and permutations are written in such +# a way that an index scan (from one transaction) and an index insert (from +# another transaction) will try to access the same bucket of the index +# whereas to check reduced false positives, they will try to access different +# buckets of the index. + +setup +{ + create table hash_tbl(id int4, p integer); + create index hash_idx on hash_tbl using hash(p); + insert into hash_tbl (id, p) + select g, 10 from generate_series(1, 10) g; + insert into hash_tbl (id, p) + select g, 20 from generate_series(11, 20) g; + insert into hash_tbl (id, p) + select g, 30 from generate_series(21, 30) g; + insert into hash_tbl (id, p) + select g, 40 from generate_series(31, 40) g; +} + +teardown +{ + drop table hash_tbl; +} + +session "s1" +setup +{ + begin isolation level serializable; + set enable_seqscan=off; + set enable_bitmapscan=off; + set enable_indexonlyscan=on; +} +step "rxy1" { select sum(p) from hash_tbl where p=20; } +step "wx1" { insert into hash_tbl (id, p) + select g, 30 from generate_series(41, 50) g; } +step "rxy3" { select sum(p) from hash_tbl where p=20; } +step "wx3" { insert into hash_tbl (id, p) + select g, 50 from generate_series(41, 50) g; } +step "c1" { commit; } + + +session "s2" +setup +{ + begin isolation level serializable; + set enable_seqscan=off; + set enable_bitmapscan=off; + set enable_indexonlyscan=on; +} +step "rxy2" { select sum(p) from hash_tbl where p=30; } +step "wy2" { insert into hash_tbl (id, p) + select g, 20 from generate_series(51, 60) g; } +step "rxy4" { select sum(p) from hash_tbl where p=30; } +step "wy4" { insert into hash_tbl (id, p) + select g, 60 from generate_series(51, 60) g; } +step "c2" { commit; } + + +# An index scan (from one transaction) and an index insert (from another +# transaction) try to access the same bucket of the index but one transaction +# commits before other transaction begins so no r-w conflict. + +permutation "rxy1" "wx1" "c1" "rxy2" "wy2" "c2" +permutation "rxy2" "wy2" "c2" "rxy1" "wx1" "c1" + +# An index scan (from one transaction) and an index insert (from another +# transaction) try to access different buckets of the index and also one +# transaction commits before other transaction begins, so no r-w conflict. + +permutation "rxy3" "wx3" "c1" "rxy4" "wy4" "c2" +permutation "rxy4" "wy4" "c2" "rxy3" "wx3" "c1" + + +# An index scan (from one transaction) and an index insert (from another +# transaction) try to access the same bucket of the index and one transaction +# begins before other transaction commits so there is a r-w conflict. + +permutation "rxy1" "wx1" "rxy2" "c1" "wy2" "c2" +permutation "rxy1" "wx1" "rxy2" "wy2" "c1" "c2" +permutation "rxy1" "wx1" "rxy2" "wy2" "c2" "c1" +permutation "rxy1" "rxy2" "wx1" "c1" "wy2" "c2" +permutation "rxy1" "rxy2" "wx1" "wy2" "c1" "c2" +permutation "rxy1" "rxy2" "wx1" "wy2" "c2" "c1" +permutation "rxy1" "rxy2" "wy2" "wx1" "c1" "c2" +permutation "rxy1" "rxy2" "wy2" "wx1" "c2" "c1" +permutation "rxy1" "rxy2" "wy2" "c2" "wx1" "c1" +permutation "rxy2" "rxy1" "wx1" "c1" "wy2" "c2" +permutation "rxy2" "rxy1" "wx1" "wy2" "c1" "c2" +permutation "rxy2" "rxy1" "wx1" "wy2" "c2" "c1" +permutation "rxy2" "rxy1" "wy2" "wx1" "c1" "c2" +permutation "rxy2" "rxy1" "wy2" "wx1" "c2" "c1" +permutation "rxy2" "rxy1" "wy2" "c2" "wx1" "c1" +permutation "rxy2" "wy2" "rxy1" "wx1" "c1" "c2" +permutation "rxy2" "wy2" "rxy1" "wx1" "c2" "c1" +permutation "rxy2" "wy2" "rxy1" "c2" "wx1" "c1" + +# An index scan (from one transaction) and an index insert (from another +# transaction) try to access different buckets of the index so no r-w conflict. + +permutation "rxy3" "wx3" "rxy4" "c1" "wy4" "c2" +permutation "rxy3" "wx3" "rxy4" "wy4" "c1" "c2" +permutation "rxy3" "wx3" "rxy4" "wy4" "c2" "c1" +permutation "rxy3" "rxy4" "wx3" "c1" "wy4" "c2" +permutation "rxy3" "rxy4" "wx3" "wy4" "c1" "c2" +permutation "rxy3" "rxy4" "wx3" "wy4" "c2" "c1" +permutation "rxy3" "rxy4" "wy4" "wx3" "c1" "c2" +permutation "rxy3" "rxy4" "wy4" "wx3" "c2" "c1" +permutation "rxy3" "rxy4" "wy4" "c2" "wx3" "c1" +permutation "rxy4" "rxy3" "wx3" "c1" "wy4" "c2" +permutation "rxy4" "rxy3" "wx3" "wy4" "c1" "c2" +permutation "rxy4" "rxy3" "wx3" "wy4" "c2" "c1" +permutation "rxy4" "rxy3" "wy4" "wx3" "c1" "c2" +permutation "rxy4" "rxy3" "wy4" "wx3" "c2" "c1" +permutation "rxy4" "rxy3" "wy4" "c2" "wx3" "c1" +permutation "rxy4" "wy4" "rxy3" "wx3" "c1" "c2" +permutation "rxy4" "wy4" "rxy3" "wx3" "c2" "c1" +permutation "rxy4" "wy4" "rxy3" "c2" "wx3" "c1" diff --git a/src/test/isolation/specs/truncate-conflict.spec b/src/test/isolation/specs/truncate-conflict.spec new file mode 100644 index 0000000000..3c1b1d1b34 --- /dev/null +++ b/src/test/isolation/specs/truncate-conflict.spec @@ -0,0 +1,38 @@ +# Tests for locking conflicts with TRUNCATE commands. + +setup +{ + CREATE ROLE regress_truncate_conflict; + CREATE TABLE truncate_tab (a int); +} + +teardown +{ + DROP TABLE truncate_tab; + DROP ROLE regress_truncate_conflict; +} + +session "s1" +step "s1_begin" { BEGIN; } +step "s1_tab_lookup" { SELECT count(*) >= 0 FROM truncate_tab; } +step "s1_commit" { COMMIT; } + +session "s2" +step "s2_grant" { GRANT TRUNCATE ON truncate_tab TO regress_truncate_conflict; } +step "s2_auth" { SET ROLE regress_truncate_conflict; } +step "s2_truncate" { TRUNCATE truncate_tab; } +step "s2_reset" { RESET ROLE; } + +# The role doesn't have privileges to truncate the table, so TRUNCATE should +# immediately fail without waiting for a lock. +permutation "s1_begin" "s1_tab_lookup" "s2_auth" "s2_truncate" "s1_commit" "s2_reset" +permutation "s1_begin" "s2_auth" "s2_truncate" "s1_tab_lookup" "s1_commit" "s2_reset" +permutation "s1_begin" "s2_auth" "s1_tab_lookup" "s2_truncate" "s1_commit" "s2_reset" +permutation "s2_auth" "s2_truncate" "s1_begin" "s1_tab_lookup" "s1_commit" "s2_reset" + +# The role has privileges to truncate the table, TRUNCATE will block if +# another session holds a lock on the table and succeed in all cases. +permutation "s1_begin" "s1_tab_lookup" "s2_grant" "s2_auth" "s2_truncate" "s1_commit" "s2_reset" +permutation "s1_begin" "s2_grant" "s2_auth" "s2_truncate" "s1_tab_lookup" "s1_commit" "s2_reset" +permutation "s1_begin" "s2_grant" "s2_auth" "s1_tab_lookup" "s2_truncate" "s1_commit" "s2_reset" +permutation "s2_grant" "s2_auth" "s2_truncate" "s1_begin" "s1_tab_lookup" "s1_commit" "s2_reset" diff --git a/src/test/isolation/specs/vacuum-concurrent-drop.spec b/src/test/isolation/specs/vacuum-concurrent-drop.spec new file mode 100644 index 0000000000..cae4092667 --- /dev/null +++ b/src/test/isolation/specs/vacuum-concurrent-drop.spec @@ -0,0 +1,45 @@ +# Test for log messages emitted by VACUUM and ANALYZE when a specified +# relation is concurrently dropped. +# +# This also verifies that log messages are not emitted for concurrently +# dropped relations that were not specified in the VACUUM or ANALYZE +# command. + +setup +{ + CREATE TABLE parted (a INT) PARTITION BY LIST (a); + CREATE TABLE part1 PARTITION OF parted FOR VALUES IN (1); + CREATE TABLE part2 PARTITION OF parted FOR VALUES IN (2); +} + +teardown +{ + DROP TABLE IF EXISTS parted; +} + +session "s1" +step "lock" +{ + BEGIN; + LOCK part1 IN SHARE MODE; +} +step "drop_and_commit" +{ + DROP TABLE part2; + COMMIT; +} + +session "s2" +step "vac_specified" { VACUUM part1, part2; } +step "vac_all_parts" { VACUUM parted; } +step "analyze_specified" { ANALYZE part1, part2; } +step "analyze_all_parts" { ANALYZE parted; } +step "vac_analyze_specified" { VACUUM ANALYZE part1, part2; } +step "vac_analyze_all_parts" { VACUUM ANALYZE parted; } + +permutation "lock" "vac_specified" "drop_and_commit" +permutation "lock" "vac_all_parts" "drop_and_commit" +permutation "lock" "analyze_specified" "drop_and_commit" +permutation "lock" "analyze_all_parts" "drop_and_commit" +permutation "lock" "vac_analyze_specified" "drop_and_commit" +permutation "lock" "vac_analyze_all_parts" "drop_and_commit" diff --git a/src/test/isolation/specs/vacuum-conflict.spec b/src/test/isolation/specs/vacuum-conflict.spec new file mode 100644 index 0000000000..9b45d26c65 --- /dev/null +++ b/src/test/isolation/specs/vacuum-conflict.spec @@ -0,0 +1,51 @@ +# Tests for locking conflicts with VACUUM and ANALYZE commands. + +setup +{ + CREATE ROLE regress_vacuum_conflict; + CREATE TABLE vacuum_tab (a int); +} + +teardown +{ + DROP TABLE vacuum_tab; + DROP ROLE regress_vacuum_conflict; +} + +session "s1" +step "s1_begin" { BEGIN; } +step "s1_lock" { LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; } +step "s1_commit" { COMMIT; } + +session "s2" +step "s2_grant" { ALTER TABLE vacuum_tab OWNER TO regress_vacuum_conflict; } +step "s2_auth" { SET ROLE regress_vacuum_conflict; } +step "s2_vacuum" { VACUUM vacuum_tab; } +step "s2_analyze" { ANALYZE vacuum_tab; } +step "s2_reset" { RESET ROLE; } + +# The role doesn't have privileges to vacuum the table, so VACUUM should +# immediately skip the table without waiting for a lock. +permutation "s1_begin" "s1_lock" "s2_auth" "s2_vacuum" "s1_commit" "s2_reset" +permutation "s1_begin" "s2_auth" "s2_vacuum" "s1_lock" "s1_commit" "s2_reset" +permutation "s1_begin" "s2_auth" "s1_lock" "s2_vacuum" "s1_commit" "s2_reset" +permutation "s2_auth" "s2_vacuum" "s1_begin" "s1_lock" "s1_commit" "s2_reset" + +# Same as previously for ANALYZE +permutation "s1_begin" "s1_lock" "s2_auth" "s2_analyze" "s1_commit" "s2_reset" +permutation "s1_begin" "s2_auth" "s2_analyze" "s1_lock" "s1_commit" "s2_reset" +permutation "s1_begin" "s2_auth" "s1_lock" "s2_analyze" "s1_commit" "s2_reset" +permutation "s2_auth" "s2_analyze" "s1_begin" "s1_lock" "s1_commit" "s2_reset" + +# The role has privileges to vacuum the table, VACUUM will block if +# another session holds a lock on the table and succeed in all cases. +permutation "s1_begin" "s2_grant" "s1_lock" "s2_auth" "s2_vacuum" "s1_commit" "s2_reset" +permutation "s1_begin" "s2_grant" "s2_auth" "s2_vacuum" "s1_lock" "s1_commit" "s2_reset" +permutation "s1_begin" "s2_grant" "s2_auth" "s1_lock" "s2_vacuum" "s1_commit" "s2_reset" +permutation "s2_grant" "s2_auth" "s2_vacuum" "s1_begin" "s1_lock" "s1_commit" "s2_reset" + +# Same as previously for ANALYZE +permutation "s1_begin" "s2_grant" "s1_lock" "s2_auth" "s2_analyze" "s1_commit" "s2_reset" +permutation "s1_begin" "s2_grant" "s2_auth" "s2_analyze" "s1_lock" "s1_commit" "s2_reset" +permutation "s1_begin" "s2_grant" "s2_auth" "s1_lock" "s2_analyze" "s1_commit" "s2_reset" +permutation "s2_grant" "s2_auth" "s2_analyze" "s1_begin" "s1_lock" "s1_commit" "s2_reset" diff --git a/src/test/isolation/specs/vacuum-skip-locked.spec b/src/test/isolation/specs/vacuum-skip-locked.spec new file mode 100644 index 0000000000..4d59b294ca --- /dev/null +++ b/src/test/isolation/specs/vacuum-skip-locked.spec @@ -0,0 +1,59 @@ +# Test for SKIP_LOCKED option of VACUUM and ANALYZE commands. +# +# This also verifies that log messages are not emitted for skipped relations +# that were not specified in the VACUUM or ANALYZE command. + +setup +{ + CREATE TABLE parted (a INT) PARTITION BY LIST (a); + CREATE TABLE part1 PARTITION OF parted FOR VALUES IN (1); + CREATE TABLE part2 PARTITION OF parted FOR VALUES IN (2); +} + +teardown +{ + DROP TABLE IF EXISTS parted; +} + +session "s1" +step "lock_share" +{ + BEGIN; + LOCK part1 IN SHARE MODE; +} +step "lock_access_exclusive" +{ + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; +} +step "commit" +{ + COMMIT; +} + +session "s2" +step "vac_specified" { VACUUM (SKIP_LOCKED) part1, part2; } +step "vac_all_parts" { VACUUM (SKIP_LOCKED) parted; } +step "analyze_specified" { ANALYZE (SKIP_LOCKED) part1, part2; } +step "analyze_all_parts" { ANALYZE (SKIP_LOCKED) parted; } +step "vac_analyze_specified" { VACUUM (ANALYZE, SKIP_LOCKED) part1, part2; } +step "vac_analyze_all_parts" { VACUUM (ANALYZE, SKIP_LOCKED) parted; } +step "vac_full_specified" { VACUUM (SKIP_LOCKED, FULL) part1, part2; } +step "vac_full_all_parts" { VACUUM (SKIP_LOCKED, FULL) parted; } + +permutation "lock_share" "vac_specified" "commit" +permutation "lock_share" "vac_all_parts" "commit" +permutation "lock_share" "analyze_specified" "commit" +permutation "lock_share" "analyze_all_parts" "commit" +permutation "lock_share" "vac_analyze_specified" "commit" +permutation "lock_share" "vac_analyze_all_parts" "commit" +permutation "lock_share" "vac_full_specified" "commit" +permutation "lock_share" "vac_full_all_parts" "commit" +permutation "lock_access_exclusive" "vac_specified" "commit" +permutation "lock_access_exclusive" "vac_all_parts" "commit" +permutation "lock_access_exclusive" "analyze_specified" "commit" +permutation "lock_access_exclusive" "analyze_all_parts" "commit" +permutation "lock_access_exclusive" "vac_analyze_specified" "commit" +permutation "lock_access_exclusive" "vac_analyze_all_parts" "commit" +permutation "lock_access_exclusive" "vac_full_specified" "commit" +permutation "lock_access_exclusive" "vac_full_all_parts" "commit" diff --git a/src/test/isolation/specscanner.l b/src/test/isolation/specscanner.l index aed9269c63..f97f4027f2 100644 --- a/src/test/isolation/specscanner.l +++ b/src/test/isolation/specscanner.l @@ -4,7 +4,7 @@ * specscanner.l * a lexical scanner for an isolation test specification * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * *------------------------------------------------------------------------- @@ -12,11 +12,15 @@ static int yyline = 1; /* line number for error reporting */ -static char litbuf[1024]; -static int litbufpos = 0; +#define LITBUF_INIT 1024 /* initial size of litbuf */ +static char *litbuf = NULL; +static size_t litbufsize = 0; +static size_t litbufpos = 0; static void addlitchar(char c); +/* LCOV_EXCL_START */ + %} %option 8bit @@ -39,11 +43,16 @@ comment ("#"{non_newline}*) %% -permutation { return(PERMUTATION); } -session { return(SESSION); } -setup { return(SETUP); } -step { return(STEP); } -teardown { return(TEARDOWN); } +%{ + litbuf = pg_malloc(LITBUF_INIT); + litbufsize = LITBUF_INIT; +%} + +permutation { return PERMUTATION; } +session { return SESSION; } +setup { return SETUP; } +step { return STEP; } +teardown { return TEARDOWN; } [\n] { yyline++; } {comment} { /* ignore */ } @@ -93,13 +102,17 @@ teardown { return(TEARDOWN); } } %% +/* LCOV_EXCL_STOP */ + static void addlitchar(char c) { - if (litbufpos >= sizeof(litbuf) - 1) + /* We must always leave room to add a trailing \0 */ + if (litbufpos >= litbufsize - 1) { - fprintf(stderr, "SQL step too long\n"); - exit(1); + /* Double the size of litbuf if it gets full */ + litbufsize += litbufsize; + litbuf = pg_realloc(litbuf, litbufsize); } litbuf[litbufpos++] = c; } diff --git a/src/test/kerberos/.gitignore b/src/test/kerberos/.gitignore new file mode 100644 index 0000000000..871e943d50 --- /dev/null +++ b/src/test/kerberos/.gitignore @@ -0,0 +1,2 @@ +# Generated by test suite +/tmp_check/ diff --git a/src/test/kerberos/Makefile b/src/test/kerberos/Makefile new file mode 100644 index 0000000000..4df4989470 --- /dev/null +++ b/src/test/kerberos/Makefile @@ -0,0 +1,25 @@ +#------------------------------------------------------------------------- +# +# Makefile for src/test/kerberos +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/test/kerberos/Makefile +# +#------------------------------------------------------------------------- + +subdir = src/test/kerberos +top_builddir = ../../.. +include $(top_builddir)/src/Makefile.global + +export with_gssapi with_krb_srvnam + +check: + $(prove_check) + +installcheck: + $(prove_installcheck) + +clean distclean maintainer-clean: + rm -rf tmp_check diff --git a/src/test/kerberos/README b/src/test/kerberos/README new file mode 100644 index 0000000000..93af72e163 --- /dev/null +++ b/src/test/kerberos/README @@ -0,0 +1,43 @@ +src/test/kerberos/README + +Tests for Kerberos/GSSAPI functionality +======================================= + +This directory contains a test suite for Kerberos/GSSAPI +functionality. This requires a full MIT Kerberos installation, +including server and client tools, and is therefore kept separate and +not run by default. + +Also, this test suite creates a KDC server that listens for TCP/IP +connections on localhost without any real access control, so it is not +safe to run this on a system where there might be untrusted local +users. + +Running the tests +================= + +NOTE: You must have given the --enable-tap-tests argument to configure. + +Run + make check +or + make installcheck +You can use "make installcheck" if you previously did "make install". +In that case, the code in the installation tree is tested. With +"make check", a temporary installation tree is built from the current +sources and then tested. + +Either way, this test initializes, starts, and stops a test Postgres +cluster, as well as a test KDC server. + +Requirements +============ + +MIT Kerberos server and client tools are required. Heimdal is not +supported. + +Debian/Ubuntu packages: krb5-admin-server krb5-kdc krb5-user + +RHEL/CentOS/Fedora packages: krb5-server krb5-workstation + +FreeBSD port: krb5 (base system has Heimdal) diff --git a/src/test/kerberos/t/001_auth.pl b/src/test/kerberos/t/001_auth.pl new file mode 100644 index 0000000000..1be89aef4f --- /dev/null +++ b/src/test/kerberos/t/001_auth.pl @@ -0,0 +1,195 @@ +use strict; +use warnings; +use TestLib; +use PostgresNode; +use Test::More; + +if ($ENV{with_gssapi} eq 'yes') +{ + plan tests => 4; +} +else +{ + plan skip_all => 'GSSAPI/Kerberos not supported by this build'; +} + +my ($krb5_bin_dir, $krb5_sbin_dir); + +if ($^O eq 'darwin') +{ + $krb5_bin_dir = '/usr/local/opt/krb5/bin'; + $krb5_sbin_dir = '/usr/local/opt/krb5/sbin'; +} +elsif ($^O eq 'freebsd') +{ + $krb5_bin_dir = '/usr/local/bin'; + $krb5_sbin_dir = '/usr/local/sbin'; +} +elsif ($^O eq 'linux') +{ + $krb5_sbin_dir = '/usr/sbin'; +} + +my $krb5_config = 'krb5-config'; +my $kinit = 'kinit'; +my $kdb5_util = 'kdb5_util'; +my $kadmin_local = 'kadmin.local'; +my $krb5kdc = 'krb5kdc'; + +if ($krb5_bin_dir && -d $krb5_bin_dir) +{ + $krb5_config = $krb5_bin_dir . '/' . $krb5_config; + $kinit = $krb5_bin_dir . '/' . $kinit; +} +if ($krb5_sbin_dir && -d $krb5_sbin_dir) +{ + $kdb5_util = $krb5_sbin_dir . '/' . $kdb5_util; + $kadmin_local = $krb5_sbin_dir . '/' . $kadmin_local; + $krb5kdc = $krb5_sbin_dir . '/' . $krb5kdc; +} + +my $host = 'auth-test-localhost.postgresql.example.com'; +my $hostaddr = '127.0.0.1'; +my $realm = 'EXAMPLE.COM'; + +my $krb5_conf = "${TestLib::tmp_check}/krb5.conf"; +my $kdc_conf = "${TestLib::tmp_check}/kdc.conf"; +my $krb5_log = "${TestLib::tmp_check}/krb5libs.log"; +my $kdc_log = "${TestLib::tmp_check}/krb5kdc.log"; +my $kdc_port = int(rand() * 16384) + 49152; +my $kdc_datadir = "${TestLib::tmp_check}/krb5kdc"; +my $kdc_pidfile = "${TestLib::tmp_check}/krb5kdc.pid"; +my $keytab = "${TestLib::tmp_check}/krb5.keytab"; + +note "setting up Kerberos"; + +my ($stdout, $krb5_version); +run_log [ $krb5_config, '--version' ], '>', \$stdout + or BAIL_OUT("could not execute krb5-config"); +BAIL_OUT("Heimdal is not supported") if $stdout =~ m/heimdal/; +$stdout =~ m/Kerberos 5 release ([0-9]+\.[0-9]+)/ + or BAIL_OUT("could not get Kerberos version"); +$krb5_version = $1; + +append_to_file( + $krb5_conf, + qq![logging] +default = FILE:$krb5_log +kdc = FILE:$kdc_log + +[libdefaults] +default_realm = $realm + +[realms] +$realm = { + kdc = $hostaddr:$kdc_port +}!); + +append_to_file( + $kdc_conf, + qq![kdcdefaults] +!); + +# For new-enough versions of krb5, use the _listen settings rather +# than the _ports settings so that we can bind to localhost only. +if ($krb5_version >= 1.15) +{ + append_to_file( + $kdc_conf, + qq!kdc_listen = $hostaddr:$kdc_port +kdc_tcp_listen = $hostaddr:$kdc_port +!); +} +else +{ + append_to_file( + $kdc_conf, + qq!kdc_ports = $kdc_port +kdc_tcp_ports = $kdc_port +!); +} +append_to_file( + $kdc_conf, + qq! +[realms] +$realm = { + database_name = $kdc_datadir/principal + admin_keytab = FILE:$kdc_datadir/kadm5.keytab + acl_file = $kdc_datadir/kadm5.acl + key_stash_file = $kdc_datadir/_k5.$realm +}!); + +mkdir $kdc_datadir or die; + +$ENV{'KRB5_CONFIG'} = $krb5_conf; +$ENV{'KRB5_KDC_PROFILE'} = $kdc_conf; + +my $service_principal = "$ENV{with_krb_srvnam}/$host"; + +system_or_bail $kdb5_util, 'create', '-s', '-P', 'secret0'; + +my $test1_password = 'secret1'; +system_or_bail $kadmin_local, '-q', "addprinc -pw $test1_password test1"; + +system_or_bail $kadmin_local, '-q', "addprinc -randkey $service_principal"; +system_or_bail $kadmin_local, '-q', "ktadd -k $keytab $service_principal"; + +system_or_bail $krb5kdc, '-P', $kdc_pidfile; + +END +{ + kill 'INT', `cat $kdc_pidfile` if -f $kdc_pidfile; +} + +note "setting up PostgreSQL instance"; + +my $node = get_new_node('node'); +$node->init; +$node->append_conf('postgresql.conf', "listen_addresses = '$hostaddr'"); +$node->append_conf('postgresql.conf', "krb_server_keyfile = '$keytab'"); +$node->start; + +$node->safe_psql('postgres', 'CREATE USER test1;'); + +note "running tests"; + +sub test_access +{ + my ($node, $role, $expected_res, $test_name) = @_; + + # need to connect over TCP/IP for Kerberos + my $res = $node->psql( + 'postgres', + 'SELECT 1', + extra_params => [ + '-d', + $node->connstr('postgres') . " host=$host hostaddr=$hostaddr", + '-U', $role + ]); + is($res, $expected_res, $test_name); + return; +} + +unlink($node->data_dir . '/pg_hba.conf'); +$node->append_conf('pg_hba.conf', + qq{host all all $hostaddr/32 gss map=mymap}); +$node->restart; + +test_access($node, 'test1', 2, 'fails without ticket'); + +run_log [ $kinit, 'test1' ], \$test1_password or BAIL_OUT($?); + +test_access($node, 'test1', 2, 'fails without mapping'); + +$node->append_conf('pg_ident.conf', qq{mymap /^(.*)\@$realm\$ \\1}); +$node->restart; + +test_access($node, 'test1', 0, 'succeeds with mapping'); + +truncate($node->data_dir . '/pg_ident.conf', 0); +unlink($node->data_dir . '/pg_hba.conf'); +$node->append_conf('pg_hba.conf', + qq{host all all $hostaddr/32 gss include_realm=0}); +$node->restart; + +test_access($node, 'test1', 0, 'succeeds with include_realm=0'); diff --git a/src/test/ldap/.gitignore b/src/test/ldap/.gitignore new file mode 100644 index 0000000000..871e943d50 --- /dev/null +++ b/src/test/ldap/.gitignore @@ -0,0 +1,2 @@ +# Generated by test suite +/tmp_check/ diff --git a/src/test/ldap/Makefile b/src/test/ldap/Makefile new file mode 100644 index 0000000000..74fef48650 --- /dev/null +++ b/src/test/ldap/Makefile @@ -0,0 +1,25 @@ +#------------------------------------------------------------------------- +# +# Makefile for src/test/ldap +# +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/test/ldap/Makefile +# +#------------------------------------------------------------------------- + +subdir = src/test/ldap +top_builddir = ../../.. +include $(top_builddir)/src/Makefile.global + +export with_ldap + +check: + $(prove_check) + +installcheck: + $(prove_installcheck) + +clean distclean maintainer-clean: + rm -rf tmp_check diff --git a/src/test/ldap/README b/src/test/ldap/README new file mode 100644 index 0000000000..4d641f9284 --- /dev/null +++ b/src/test/ldap/README @@ -0,0 +1,52 @@ +src/test/ldap/README + +Tests for LDAP functionality +============================ + +This directory contains a test suite for LDAP functionality. This +requires a full OpenLDAP installation, including server and client +tools, and is therefore kept separate and not run by default. You +might need to adjust some paths in the test file to have it find +OpenLDAP in a place that hadn't been thought of yet. + +Also, this test suite creates an LDAP server that listens for TCP/IP +connections on localhost without any real access control, so it is not +safe to run this on a system where there might be untrusted local +users. + +Running the tests +================= + +NOTE: You must have given the --enable-tap-tests argument to configure. + +Run + make check +or + make installcheck +You can use "make installcheck" if you previously did "make install". +In that case, the code in the installation tree is tested. With +"make check", a temporary installation tree is built from the current +sources and then tested. + +Either way, this test initializes, starts, and stops a test Postgres +cluster, as well as a test LDAP server. + +Requirements +============ + +LDAP server and client tools are required. + +Debian/Ubuntu packages: slapd ldap-utils + +RHEL/CentOS/Fedora packages: openldap-clients openldap-servers +(You will already have needed openldap and openldap-devel to build.) + +FreeBSD: openldap-server +(You will already have needed openldap-client to build. If building +from the ports source tree, you want to build net/openldap24-client +and net/openldap24-server.) + +macOS: We do not recommend trying to use the Apple-provided version of +OpenLDAP; it's very old, plus Apple seem to have changed the launching +conventions for slapd. The paths in the test file are set on the +assumption that you installed OpenLDAP using Homebrew. diff --git a/src/test/ldap/authdata.ldif b/src/test/ldap/authdata.ldif new file mode 100644 index 0000000000..c0a15daffb --- /dev/null +++ b/src/test/ldap/authdata.ldif @@ -0,0 +1,32 @@ +dn: dc=example,dc=net +objectClass: top +objectClass: dcObject +objectClass: organization +dc: example +o: ExampleCo + +dn: uid=test1,dc=example,dc=net +objectClass: inetOrgPerson +objectClass: posixAccount +uid: test1 +sn: Lastname +givenName: Firstname +cn: First Test User +displayName: First Test User +uidNumber: 101 +gidNumber: 100 +homeDirectory: /home/test1 +mail: test1@example.net + +dn: uid=test2,dc=example,dc=net +objectClass: inetOrgPerson +objectClass: posixAccount +uid: test2 +sn: Lastname +givenName: Firstname +cn: Second Test User +displayName: Second Test User +uidNumber: 102 +gidNumber: 100 +homeDirectory: /home/test2 +mail: test2@example.net diff --git a/src/test/ldap/t/001_auth.pl b/src/test/ldap/t/001_auth.pl new file mode 100644 index 0000000000..67b406c981 --- /dev/null +++ b/src/test/ldap/t/001_auth.pl @@ -0,0 +1,289 @@ +use strict; +use warnings; +use TestLib; +use PostgresNode; +use Test::More; + +if ($ENV{with_ldap} eq 'yes') +{ + plan tests => 19; +} +else +{ + plan skip_all => 'LDAP not supported by this build'; +} + +my ($slapd, $ldap_bin_dir, $ldap_schema_dir); + +$ldap_bin_dir = undef; # usually in PATH + +if ($^O eq 'darwin') +{ + $slapd = '/usr/local/opt/openldap/libexec/slapd'; + $ldap_schema_dir = '/usr/local/etc/openldap/schema'; +} +elsif ($^O eq 'linux') +{ + $slapd = '/usr/sbin/slapd'; + $ldap_schema_dir = '/etc/ldap/schema' if -d '/etc/ldap/schema'; + $ldap_schema_dir = '/etc/openldap/schema' if -d '/etc/openldap/schema'; +} +elsif ($^O eq 'freebsd') +{ + $slapd = '/usr/local/libexec/slapd'; + $ldap_schema_dir = '/usr/local/etc/openldap/schema'; +} + +# make your own edits here +#$slapd = ''; +#$ldap_bin_dir = ''; +#$ldap_schema_dir = ''; + +$ENV{PATH} = "$ldap_bin_dir:$ENV{PATH}" if $ldap_bin_dir; + +my $ldap_datadir = "${TestLib::tmp_check}/openldap-data"; +my $slapd_certs = "${TestLib::tmp_check}/slapd-certs"; +my $slapd_conf = "${TestLib::tmp_check}/slapd.conf"; +my $slapd_pidfile = "${TestLib::tmp_check}/slapd.pid"; +my $slapd_logfile = "${TestLib::tmp_check}/slapd.log"; +my $ldap_conf = "${TestLib::tmp_check}/ldap.conf"; +my $ldap_server = 'localhost'; +my $ldap_port = int(rand() * 16384) + 49152; +my $ldaps_port = $ldap_port + 1; +my $ldap_url = "ldap://$ldap_server:$ldap_port"; +my $ldaps_url = "ldaps://$ldap_server:$ldaps_port"; +my $ldap_basedn = 'dc=example,dc=net'; +my $ldap_rootdn = 'cn=Manager,dc=example,dc=net'; +my $ldap_rootpw = 'secret'; +my $ldap_pwfile = "${TestLib::tmp_check}/ldappassword"; + +note "setting up slapd"; + +append_to_file( + $slapd_conf, + qq{include $ldap_schema_dir/core.schema +include $ldap_schema_dir/cosine.schema +include $ldap_schema_dir/nis.schema +include $ldap_schema_dir/inetorgperson.schema + +pidfile $slapd_pidfile +logfile $slapd_logfile + +access to * + by * read + by anonymous auth + +database ldif +directory $ldap_datadir + +TLSCACertificateFile $slapd_certs/ca.crt +TLSCertificateFile $slapd_certs/server.crt +TLSCertificateKeyFile $slapd_certs/server.key + +suffix "dc=example,dc=net" +rootdn "$ldap_rootdn" +rootpw $ldap_rootpw}); + +# don't bother to check the server's cert (though perhaps we should) +append_to_file( + $ldap_conf, + qq{TLS_REQCERT never +}); + +mkdir $ldap_datadir or die; +mkdir $slapd_certs or die; + +system_or_bail "openssl", "req", "-new", "-nodes", "-keyout", + "$slapd_certs/ca.key", "-x509", "-out", "$slapd_certs/ca.crt", "-subj", + "/cn=CA"; +system_or_bail "openssl", "req", "-new", "-nodes", "-keyout", + "$slapd_certs/server.key", "-out", "$slapd_certs/server.csr", "-subj", + "/cn=server"; +system_or_bail "openssl", "x509", "-req", "-in", "$slapd_certs/server.csr", + "-CA", "$slapd_certs/ca.crt", "-CAkey", "$slapd_certs/ca.key", + "-CAcreateserial", "-out", "$slapd_certs/server.crt"; + +system_or_bail $slapd, '-f', $slapd_conf, '-h', "$ldap_url $ldaps_url"; + +END +{ + kill 'INT', `cat $slapd_pidfile` if -f $slapd_pidfile; +} + +append_to_file($ldap_pwfile, $ldap_rootpw); +chmod 0600, $ldap_pwfile or die; + +$ENV{'LDAPURI'} = $ldap_url; +$ENV{'LDAPBINDDN'} = $ldap_rootdn; +$ENV{'LDAPCONF'} = $ldap_conf; + +note "loading LDAP data"; + +system_or_bail 'ldapadd', '-x', '-y', $ldap_pwfile, '-f', 'authdata.ldif'; +system_or_bail 'ldappasswd', '-x', '-y', $ldap_pwfile, '-s', 'secret1', + 'uid=test1,dc=example,dc=net'; +system_or_bail 'ldappasswd', '-x', '-y', $ldap_pwfile, '-s', 'secret2', + 'uid=test2,dc=example,dc=net'; + +note "setting up PostgreSQL instance"; + +my $node = get_new_node('node'); +$node->init; +$node->start; + +$node->safe_psql('postgres', 'CREATE USER test0;'); +$node->safe_psql('postgres', 'CREATE USER test1;'); +$node->safe_psql('postgres', 'CREATE USER "test2@example.net";'); + +note "running tests"; + +sub test_access +{ + my ($node, $role, $expected_res, $test_name) = @_; + + my $res = + $node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]); + is($res, $expected_res, $test_name); + return; +} + +note "simple bind"; + +unlink($node->data_dir . '/pg_hba.conf'); +$node->append_conf('pg_hba.conf', + qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapprefix="uid=" ldapsuffix=",dc=example,dc=net"} +); +$node->restart; + +$ENV{"PGPASSWORD"} = 'wrong'; +test_access($node, 'test0', 2, + 'simple bind authentication fails if user not found in LDAP'); +test_access($node, 'test1', 2, + 'simple bind authentication fails with wrong password'); +$ENV{"PGPASSWORD"} = 'secret1'; +test_access($node, 'test1', 0, 'simple bind authentication succeeds'); + +note "search+bind"; + +unlink($node->data_dir . '/pg_hba.conf'); +$node->append_conf('pg_hba.conf', + qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn"} +); +$node->restart; + +$ENV{"PGPASSWORD"} = 'wrong'; +test_access($node, 'test0', 2, + 'search+bind authentication fails if user not found in LDAP'); +test_access($node, 'test1', 2, + 'search+bind authentication fails with wrong password'); +$ENV{"PGPASSWORD"} = 'secret1'; +test_access($node, 'test1', 0, 'search+bind authentication succeeds'); + +note "LDAP URLs"; + +unlink($node->data_dir . '/pg_hba.conf'); +$node->append_conf('pg_hba.conf', + qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn?uid?sub"}); +$node->restart; + +$ENV{"PGPASSWORD"} = 'wrong'; +test_access($node, 'test0', 2, + 'search+bind with LDAP URL authentication fails if user not found in LDAP' +); +test_access($node, 'test1', 2, + 'search+bind with LDAP URL authentication fails with wrong password'); +$ENV{"PGPASSWORD"} = 'secret1'; +test_access($node, 'test1', 0, + 'search+bind with LDAP URL authentication succeeds'); + +note "search filters"; + +unlink($node->data_dir . '/pg_hba.conf'); +$node->append_conf('pg_hba.conf', + qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(|(uid=\$username)(mail=\$username))"} +); +$node->restart; + +$ENV{"PGPASSWORD"} = 'secret1'; +test_access($node, 'test1', 0, 'search filter finds by uid'); +$ENV{"PGPASSWORD"} = 'secret2'; +test_access($node, 'test2@example.net', 0, 'search filter finds by mail'); + +note "search filters in LDAP URLs"; + +unlink($node->data_dir . '/pg_hba.conf'); +$node->append_conf('pg_hba.conf', + qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn??sub?(|(uid=\$username)(mail=\$username))"} +); +$node->restart; + +$ENV{"PGPASSWORD"} = 'secret1'; +test_access($node, 'test1', 0, 'search filter finds by uid'); +$ENV{"PGPASSWORD"} = 'secret2'; +test_access($node, 'test2@example.net', 0, 'search filter finds by mail'); + +# This is not documented: You can combine ldapurl and other ldap* +# settings. ldapurl is always parsed first, then the other settings +# override. It might be useful in a case like this. +unlink($node->data_dir . '/pg_hba.conf'); +$node->append_conf('pg_hba.conf', + qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn??sub" ldapsearchfilter="(|(uid=\$username)(mail=\$username))"} +); +$node->restart; + +$ENV{"PGPASSWORD"} = 'secret1'; +test_access($node, 'test1', 0, 'combined LDAP URL and search filter'); + +note "diagnostic message"; + +# note bad ldapprefix with a question mark that triggers a diagnostic message +unlink($node->data_dir . '/pg_hba.conf'); +$node->append_conf('pg_hba.conf', + qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapprefix="?uid=" ldapsuffix=""} +); +$node->restart; + +$ENV{"PGPASSWORD"} = 'secret1'; +test_access($node, 'test1', 2, 'any attempt fails due to bad search pattern'); + +note "TLS"; + +# request StartTLS with ldaptls=1 +unlink($node->data_dir . '/pg_hba.conf'); +$node->append_conf('pg_hba.conf', + qq{local all all ldap ldapserver=$ldap_server ldapport=$ldap_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(uid=\$username)" ldaptls=1} +); +$node->restart; + +$ENV{"PGPASSWORD"} = 'secret1'; +test_access($node, 'test1', 0, 'StartTLS'); + +# request LDAPS with ldapscheme=ldaps +unlink($node->data_dir . '/pg_hba.conf'); +$node->append_conf('pg_hba.conf', + qq{local all all ldap ldapserver=$ldap_server ldapscheme=ldaps ldapport=$ldaps_port ldapbasedn="$ldap_basedn" ldapsearchfilter="(uid=\$username)"} +); +$node->restart; + +$ENV{"PGPASSWORD"} = 'secret1'; +test_access($node, 'test1', 0, 'LDAPS'); + +# request LDAPS with ldapurl=ldaps://... +unlink($node->data_dir . '/pg_hba.conf'); +$node->append_conf('pg_hba.conf', + qq{local all all ldap ldapurl="$ldaps_url/$ldap_basedn??sub?(uid=\$username)"} +); +$node->restart; + +$ENV{"PGPASSWORD"} = 'secret1'; +test_access($node, 'test1', 0, 'LDAPS with URL'); + +# bad combination of LDAPS and StartTLS +unlink($node->data_dir . '/pg_hba.conf'); +$node->append_conf('pg_hba.conf', + qq{local all all ldap ldapurl="$ldaps_url/$ldap_basedn??sub?(uid=\$username)" ldaptls=1} +); +$node->restart; + +$ENV{"PGPASSWORD"} = 'secret1'; +test_access($node, 'test1', 2, 'bad combination of LDAPS and StartTLS'); diff --git a/src/test/locale/Makefile b/src/test/locale/Makefile index 26ec5c9a90..22a45b65f2 100644 --- a/src/test/locale/Makefile +++ b/src/test/locale/Makefile @@ -16,5 +16,6 @@ clean distclean maintainer-clean: $(MAKE) -C $$d clean || exit; \ done +# These behave like installcheck targets. check-%: all @$(MAKE) -C `echo $@ | sed 's/^check-//'` test diff --git a/src/test/modules/Makefile b/src/test/modules/Makefile index 3ce99046f8..19d60a506e 100644 --- a/src/test/modules/Makefile +++ b/src/test/modules/Makefile @@ -9,14 +9,15 @@ SUBDIRS = \ commit_ts \ dummy_seclabel \ snapshot_too_old \ + test_bloomfilter \ test_ddl_deparse \ test_extensions \ test_parser \ test_pg_dump \ + test_predtest \ + test_rbtree \ test_rls_hooks \ test_shm_mq \ worker_spi -all: submake-generated-headers - $(recurse) diff --git a/src/test/modules/brin/Makefile b/src/test/modules/brin/Makefile index dda84c23c7..566655cd61 100644 --- a/src/test/modules/brin/Makefile +++ b/src/test/modules/brin/Makefile @@ -1,6 +1,9 @@ # src/test/modules/brin/Makefile -EXTRA_CLEAN = ./isolation_output +# Note: because we don't tell the Makefile there are any regression tests, +# we have to clean those result files explicitly +EXTRA_CLEAN = $(pg_regress_clean_files) ./isolation_output + EXTRA_INSTALL=contrib/pageinspect ISOLATIONCHECKS=summarization-and-inprogress-insertion @@ -16,15 +19,18 @@ include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif -check: isolation-check +check: isolation-check prove-check -isolation-check: | submake-isolation +isolation-check: | submake-isolation temp-install $(MKDIR_P) isolation_output $(pg_isolation_regress_check) \ --outputdir=./isolation_output \ $(ISOLATIONCHECKS) -.PHONY: check isolation-check +prove-check: | temp-install + $(prove_check) + +.PHONY: check isolation-check prove-check submake-isolation: $(MAKE) -C $(top_builddir)/src/test/isolation all diff --git a/src/test/modules/brin/t/01_workitems.pl b/src/test/modules/brin/t/01_workitems.pl new file mode 100644 index 0000000000..534ab63ab2 --- /dev/null +++ b/src/test/modules/brin/t/01_workitems.pl @@ -0,0 +1,41 @@ +# Verify that work items work correctly + +use strict; +use warnings; + +use TestLib; +use Test::More tests => 2; +use PostgresNode; + +my $node = get_new_node('tango'); +$node->init; +$node->append_conf('postgresql.conf', 'autovacuum_naptime=1s'); +$node->start; + +$node->safe_psql('postgres', 'create extension pageinspect'); + +# Create a table with an autosummarizing BRIN index +$node->safe_psql( + 'postgres', + 'create table brin_wi (a int) with (fillfactor = 10); + create index brin_wi_idx on brin_wi using brin (a) with (pages_per_range=1, autosummarize=on); + ' +); +my $count = $node->safe_psql('postgres', + "select count(*) from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)" +); +is($count, '1', "initial index state is correct"); + +$node->safe_psql('postgres', + 'insert into brin_wi select * from generate_series(1, 100)'); + +$node->poll_query_until( + 'postgres', + "select count(*) > 1 from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)", + 't'); + +$count = $node->safe_psql('postgres', + "select count(*) > 1 from brin_page_items(get_raw_page('brin_wi_idx', 2), 'brin_wi_idx'::regclass)" +); +is($count, 't', "index got summarized"); +$node->stop; diff --git a/src/test/modules/commit_ts/Makefile b/src/test/modules/commit_ts/Makefile index 86b93b5e76..6d4f3be358 100644 --- a/src/test/modules/commit_ts/Makefile +++ b/src/test/modules/commit_ts/Makefile @@ -16,5 +16,5 @@ endif check: prove-check -prove-check: +prove-check: | temp-install $(prove_check) diff --git a/src/test/modules/commit_ts/t/001_base.pl b/src/test/modules/commit_ts/t/001_base.pl index 9290a85d89..f8d5d84cc5 100644 --- a/src/test/modules/commit_ts/t/001_base.pl +++ b/src/test/modules/commit_ts/t/001_base.pl @@ -16,11 +16,11 @@ $node->safe_psql('postgres', 'create table t as select now from (select now(), pg_sleep(1)) f'); my $true = $node->safe_psql('postgres', -'select t.now - ts.* < \'1s\' from t, pg_class c, pg_xact_commit_timestamp(c.xmin) ts where relname = \'t\'' + 'select t.now - ts.* < \'1s\' from t, pg_class c, pg_xact_commit_timestamp(c.xmin) ts where relname = \'t\'' ); is($true, 't', 'commit TS is set'); my $ts = $node->safe_psql('postgres', -'select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = \'t\'' + 'select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = \'t\'' ); # Verify that we read the same TS after crash recovery @@ -28,6 +28,6 @@ $node->start; my $recovered_ts = $node->safe_psql('postgres', -'select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = \'t\'' + 'select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = \'t\'' ); is($recovered_ts, $ts, 'commit TS remains after crash recovery'); diff --git a/src/test/modules/commit_ts/t/002_standby.pl b/src/test/modules/commit_ts/t/002_standby.pl index 83e851954b..f376b59596 100644 --- a/src/test/modules/commit_ts/t/002_standby.pl +++ b/src/test/modules/commit_ts/t/002_standby.pl @@ -28,7 +28,7 @@ $master->safe_psql('postgres', "create table t$i()"); } my $master_ts = $master->safe_psql('postgres', -qq{SELECT ts.* FROM pg_class, pg_xact_commit_timestamp(xmin) AS ts WHERE relname = 't10'} + qq{SELECT ts.* FROM pg_class, pg_xact_commit_timestamp(xmin) AS ts WHERE relname = 't10'} ); my $master_lsn = $master->safe_psql('postgres', 'select pg_current_wal_lsn()'); @@ -37,7 +37,7 @@ or die "standby never caught up"; my $standby_ts = $standby->safe_psql('postgres', -qq{select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = 't10'} + qq{select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = 't10'} ); is($master_ts, $standby_ts, "standby gives same value as master"); @@ -52,7 +52,7 @@ # This one should raise an error now my ($ret, $standby_ts_stdout, $standby_ts_stderr) = $standby->psql('postgres', -'select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = \'t10\'' + 'select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = \'t10\'' ); is($ret, 3, 'standby errors when master turned feature off'); is($standby_ts_stdout, '', diff --git a/src/test/modules/commit_ts/t/003_standby_2.pl b/src/test/modules/commit_ts/t/003_standby_2.pl index 27494709e1..9165d50053 100644 --- a/src/test/modules/commit_ts/t/003_standby_2.pl +++ b/src/test/modules/commit_ts/t/003_standby_2.pl @@ -40,7 +40,7 @@ my ($psql_ret, $standby_ts_stdout, $standby_ts_stderr) = $standby->psql( 'postgres', -qq{SELECT ts.* FROM pg_class, pg_xact_commit_timestamp(xmin) AS ts WHERE relname = 't10'} + qq{SELECT ts.* FROM pg_class, pg_xact_commit_timestamp(xmin) AS ts WHERE relname = 't10'} ); is($psql_ret, 3, 'expect error when getting commit timestamp after restart'); is($standby_ts_stdout, '', "standby does not return a value after restart"); @@ -58,7 +58,7 @@ $standby->safe_psql('postgres', "create table t11()"); my $standby_ts = $standby->safe_psql('postgres', -qq{SELECT ts.* FROM pg_class, pg_xact_commit_timestamp(xmin) AS ts WHERE relname = 't11'} + qq{SELECT ts.* FROM pg_class, pg_xact_commit_timestamp(xmin) AS ts WHERE relname = 't11'} ); isnt($standby_ts, '', "standby gives valid value ($standby_ts) after promotion"); diff --git a/src/test/modules/commit_ts/t/004_restart.pl b/src/test/modules/commit_ts/t/004_restart.pl index daf42d3a02..241b0b08dc 100644 --- a/src/test/modules/commit_ts/t/004_restart.pl +++ b/src/test/modules/commit_ts/t/004_restart.pl @@ -1,4 +1,4 @@ -# Testing of commit timestamps preservation across clean restarts +# Testing of commit timestamps preservation across restarts use strict; use warnings; use PostgresNode; @@ -71,12 +71,36 @@ 'timestamps before and after restart are equal'); # Now disable commit timestamps - $node_master->append_conf('postgresql.conf', 'track_commit_timestamp = off'); - $node_master->stop('fast'); + +# Start the server, which generates a XLOG_PARAMETER_CHANGE record where +# the parameter change is registered. $node_master->start; +# Now restart again the server so as no XLOG_PARAMETER_CHANGE record are +# replayed with the follow-up immediate shutdown. +$node_master->restart; + +# Move commit timestamps across page boundaries. Things should still +# be able to work across restarts with those transactions committed while +# track_commit_timestamp is disabled. +$node_master->safe_psql('postgres', +qq(CREATE PROCEDURE consume_xid(cnt int) +AS \$\$ +DECLARE + i int; + BEGIN + FOR i in 1..cnt LOOP + EXECUTE 'SELECT txid_current()'; + COMMIT; + END LOOP; + END; +\$\$ +LANGUAGE plpgsql; +)); +$node_master->safe_psql('postgres', 'CALL consume_xid(2000)'); + ($ret, $stdout, $stderr) = $node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('$xid');]); is($ret, 3, 'no commit timestamp from enable tx when cts disabled'); @@ -106,10 +130,12 @@ # Re-enable, restart and ensure we can still get the old timestamps $node_master->append_conf('postgresql.conf', 'track_commit_timestamp = on'); -$node_master->stop('fast'); +# An immediate shutdown is used here. At next startup recovery will +# replay transactions which committed when track_commit_timestamp was +# disabled, and the facility should be able to work properly. +$node_master->stop('immediate'); $node_master->start; - my $after_enable_ts = $node_master->safe_psql('postgres', qq[SELECT pg_xact_commit_timestamp('$xid');]); is($after_enable_ts, '', 'timestamp of enabled tx null after re-enable'); diff --git a/src/test/modules/dummy_seclabel/dummy_seclabel.c b/src/test/modules/dummy_seclabel/dummy_seclabel.c index 7fd78f05c7..fc1e745444 100644 --- a/src/test/modules/dummy_seclabel/dummy_seclabel.c +++ b/src/test/modules/dummy_seclabel/dummy_seclabel.c @@ -7,7 +7,7 @@ * perspective, but allows regression testing independent of platform-specific * features like SELinux. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California */ #include "postgres.h" diff --git a/src/test/modules/dummy_seclabel/expected/dummy_seclabel.out b/src/test/modules/dummy_seclabel/expected/dummy_seclabel.out index 77bdc9345d..b2d898a7d1 100644 --- a/src/test/modules/dummy_seclabel/expected/dummy_seclabel.out +++ b/src/test/modules/dummy_seclabel/expected/dummy_seclabel.out @@ -30,14 +30,14 @@ SECURITY LABEL FOR 'dummy' ON TABLE dummy_seclabel_tbl1 IS 'unclassified'; -- OK SECURITY LABEL FOR 'unknown_seclabel' ON TABLE dummy_seclabel_tbl1 IS 'classified'; -- fail ERROR: security label provider "unknown_seclabel" is not loaded SECURITY LABEL ON TABLE dummy_seclabel_tbl2 IS 'unclassified'; -- fail (not owner) -ERROR: must be owner of relation dummy_seclabel_tbl2 +ERROR: must be owner of table dummy_seclabel_tbl2 SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS 'secret'; -- fail (not superuser) ERROR: only superuser can set 'secret' label SECURITY LABEL ON TABLE dummy_seclabel_tbl3 IS 'unclassified'; -- fail (not found) ERROR: relation "dummy_seclabel_tbl3" does not exist SET SESSION AUTHORIZATION regress_dummy_seclabel_user2; SECURITY LABEL ON TABLE dummy_seclabel_tbl1 IS 'unclassified'; -- fail -ERROR: must be owner of relation dummy_seclabel_tbl1 +ERROR: must be owner of table dummy_seclabel_tbl1 SECURITY LABEL ON TABLE dummy_seclabel_tbl2 IS 'classified'; -- OK -- -- Test for shared database object diff --git a/src/test/modules/test_bloomfilter/.gitignore b/src/test/modules/test_bloomfilter/.gitignore new file mode 100644 index 0000000000..5dcb3ff972 --- /dev/null +++ b/src/test/modules/test_bloomfilter/.gitignore @@ -0,0 +1,4 @@ +# Generated subdirectories +/log/ +/results/ +/tmp_check/ diff --git a/src/test/modules/test_bloomfilter/Makefile b/src/test/modules/test_bloomfilter/Makefile new file mode 100644 index 0000000000..808c9314d4 --- /dev/null +++ b/src/test/modules/test_bloomfilter/Makefile @@ -0,0 +1,21 @@ +# src/test/modules/test_bloomfilter/Makefile + +MODULE_big = test_bloomfilter +OBJS = test_bloomfilter.o $(WIN32RES) +PGFILEDESC = "test_bloomfilter - test code for Bloom filter library" + +EXTENSION = test_bloomfilter +DATA = test_bloomfilter--1.0.sql + +REGRESS = test_bloomfilter + +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = src/test/modules/test_bloomfilter +top_builddir = ../../../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif diff --git a/src/test/modules/test_bloomfilter/README b/src/test/modules/test_bloomfilter/README new file mode 100644 index 0000000000..4c05efe5a8 --- /dev/null +++ b/src/test/modules/test_bloomfilter/README @@ -0,0 +1,68 @@ +test_bloomfilter overview +========================= + +test_bloomfilter is a test harness module for testing Bloom filter library set +membership operations. It consists of a single SQL-callable function, +test_bloomfilter(), plus a regression test that calls test_bloomfilter(). +Membership tests are performed against a dataset that the test harness module +generates. + +The test_bloomfilter() function displays instrumentation at DEBUG1 elog level +(WARNING when the false positive rate exceeds a 1% threshold). This can be +used to get a sense of the performance characteristics of the Postgres Bloom +filter implementation under varied conditions. + +Bitset size +----------- + +The main bloomfilter.c criteria for sizing its bitset is that the false +positive rate should not exceed 2% when sufficient bloom_work_mem is available +(and the caller-supplied estimate of the number of elements turns out to have +been accurate). A 1% - 2% rate is currently assumed to be suitable for all +Bloom filter callers. + +With an optimal K (number of hash functions), Bloom filters should only have a +1% false positive rate with just 9.6 bits of memory per element. The Postgres +implementation's 2% worst case guarantee exists because there is a need for +some slop due to implementation inflexibility in bitset sizing. Since the +bitset size is always actually kept to a power of two number of bits, callers +can have their bloom_work_mem argument truncated down by almost half. +In practice, callers that make a point of passing a bloom_work_mem that is an +exact power of two bitset size (such as test_bloomfilter.c) will actually get +the "9.6 bits per element" 1% false positive rate. + +Testing strategy +---------------- + +Our approach to regression testing is to test that a Bloom filter has only a 1% +false positive rate for a single bitset size (2 ^ 23, or 1MB). We test a +dataset with 838,861 elements, which works out at 10 bits of memory per +element. We round up from 9.6 bits to 10 bits to make sure that we reliably +get under 1% for regression testing. Note that a random seed is used in the +regression tests because the exact false positive rate is inconsistent across +platforms. Inconsistent hash function behavior is something that the +regression tests need to be tolerant of anyway. + +test_bloomfilter() SQL-callable function +======================================== + +The SQL-callable function test_bloomfilter() provides the following arguments: + +* "power" is the power of two used to size the Bloom filter's bitset. + +The minimum valid argument value is 23 (2^23 bits), or 1MB of memory. The +maximum valid argument value is 32, or 512MB of memory. + +* "nelements" is the number of elements to generate for testing purposes. + +* "seed" is a seed value for hashing. + +A value < 0 is interpreted as "use random seed". Varying the seed value (or +specifying -1) should result in small variations in the total number of false +positives. + +* "tests" is the number of tests to run. + +This may be increased when it's useful to perform many tests in an interactive +session. It only makes sense to perform multiple tests when a random seed is +used. diff --git a/src/test/modules/test_bloomfilter/expected/test_bloomfilter.out b/src/test/modules/test_bloomfilter/expected/test_bloomfilter.out new file mode 100644 index 0000000000..21c068867d --- /dev/null +++ b/src/test/modules/test_bloomfilter/expected/test_bloomfilter.out @@ -0,0 +1,22 @@ +CREATE EXTENSION test_bloomfilter; +-- See README for explanation of arguments: +SELECT test_bloomfilter(power => 23, + nelements => 838861, + seed => -1, + tests => 1); + test_bloomfilter +------------------ + +(1 row) + +-- Equivalent "10 bits per element" tests for all possible bitset sizes: +-- +-- SELECT test_bloomfilter(24, 1677722) +-- SELECT test_bloomfilter(25, 3355443) +-- SELECT test_bloomfilter(26, 6710886) +-- SELECT test_bloomfilter(27, 13421773) +-- SELECT test_bloomfilter(28, 26843546) +-- SELECT test_bloomfilter(29, 53687091) +-- SELECT test_bloomfilter(30, 107374182) +-- SELECT test_bloomfilter(31, 214748365) +-- SELECT test_bloomfilter(32, 429496730) diff --git a/src/test/modules/test_bloomfilter/sql/test_bloomfilter.sql b/src/test/modules/test_bloomfilter/sql/test_bloomfilter.sql new file mode 100644 index 0000000000..9ec159ce40 --- /dev/null +++ b/src/test/modules/test_bloomfilter/sql/test_bloomfilter.sql @@ -0,0 +1,19 @@ +CREATE EXTENSION test_bloomfilter; + +-- See README for explanation of arguments: +SELECT test_bloomfilter(power => 23, + nelements => 838861, + seed => -1, + tests => 1); + +-- Equivalent "10 bits per element" tests for all possible bitset sizes: +-- +-- SELECT test_bloomfilter(24, 1677722) +-- SELECT test_bloomfilter(25, 3355443) +-- SELECT test_bloomfilter(26, 6710886) +-- SELECT test_bloomfilter(27, 13421773) +-- SELECT test_bloomfilter(28, 26843546) +-- SELECT test_bloomfilter(29, 53687091) +-- SELECT test_bloomfilter(30, 107374182) +-- SELECT test_bloomfilter(31, 214748365) +-- SELECT test_bloomfilter(32, 429496730) diff --git a/src/test/modules/test_bloomfilter/test_bloomfilter--1.0.sql b/src/test/modules/test_bloomfilter/test_bloomfilter--1.0.sql new file mode 100644 index 0000000000..7682318fe3 --- /dev/null +++ b/src/test/modules/test_bloomfilter/test_bloomfilter--1.0.sql @@ -0,0 +1,11 @@ +/* src/test/modules/test_bloomfilter/test_bloomfilter--1.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION test_bloomfilter" to load this file. \quit + +CREATE FUNCTION test_bloomfilter(power integer, + nelements bigint, + seed integer DEFAULT -1, + tests integer DEFAULT 1) +RETURNS pg_catalog.void STRICT +AS 'MODULE_PATHNAME' LANGUAGE C; diff --git a/src/test/modules/test_bloomfilter/test_bloomfilter.c b/src/test/modules/test_bloomfilter/test_bloomfilter.c new file mode 100644 index 0000000000..3b04c65bd0 --- /dev/null +++ b/src/test/modules/test_bloomfilter/test_bloomfilter.c @@ -0,0 +1,138 @@ +/*-------------------------------------------------------------------------- + * + * test_bloomfilter.c + * Test false positive rate of Bloom filter. + * + * Copyright (c) 2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/test/modules/test_bloomfilter/test_bloomfilter.c + * + * ------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "fmgr.h" +#include "lib/bloomfilter.h" +#include "miscadmin.h" + +PG_MODULE_MAGIC; + +/* Fits decimal representation of PG_INT64_MIN + 2 bytes: */ +#define MAX_ELEMENT_BYTES 21 +/* False positive rate WARNING threshold (1%): */ +#define FPOSITIVE_THRESHOLD 0.01 + + +/* + * Populate an empty Bloom filter with "nelements" dummy strings. + */ +static void +populate_with_dummy_strings(bloom_filter *filter, int64 nelements) +{ + char element[MAX_ELEMENT_BYTES]; + int64 i; + + for (i = 0; i < nelements; i++) + { + CHECK_FOR_INTERRUPTS(); + + snprintf(element, sizeof(element), "i" INT64_FORMAT, i); + bloom_add_element(filter, (unsigned char *) element, strlen(element)); + } +} + +/* + * Returns number of strings that are indicated as probably appearing in Bloom + * filter that were in fact never added by populate_with_dummy_strings(). + * These are false positives. + */ +static int64 +nfalsepos_for_missing_strings(bloom_filter *filter, int64 nelements) +{ + char element[MAX_ELEMENT_BYTES]; + int64 nfalsepos = 0; + int64 i; + + for (i = 0; i < nelements; i++) + { + CHECK_FOR_INTERRUPTS(); + + snprintf(element, sizeof(element), "M" INT64_FORMAT, i); + if (!bloom_lacks_element(filter, (unsigned char *) element, + strlen(element))) + nfalsepos++; + } + + return nfalsepos; +} + +static void +create_and_test_bloom(int power, int64 nelements, int callerseed) +{ + int bloom_work_mem; + uint64 seed; + int64 nfalsepos; + bloom_filter *filter; + + bloom_work_mem = (1L << power) / 8L / 1024L; + + elog(DEBUG1, "bloom_work_mem (KB): %d", bloom_work_mem); + + /* + * Generate random seed, or use caller's. Seed should always be a + * positive value less than or equal to PG_INT32_MAX, to ensure that any + * random seed can be recreated through callerseed if the need arises. + * (Don't assume that RAND_MAX cannot exceed PG_INT32_MAX.) + */ + seed = callerseed < 0 ? random() % PG_INT32_MAX : callerseed; + + /* Create Bloom filter, populate it, and report on false positive rate */ + filter = bloom_create(nelements, bloom_work_mem, seed); + populate_with_dummy_strings(filter, nelements); + nfalsepos = nfalsepos_for_missing_strings(filter, nelements); + + ereport((nfalsepos > nelements * FPOSITIVE_THRESHOLD) ? WARNING : DEBUG1, + (errmsg_internal("seed: " UINT64_FORMAT " false positives: " INT64_FORMAT " (%.6f%%) bitset %.2f%% set", + seed, nfalsepos, (double) nfalsepos / nelements, + 100.0 * bloom_prop_bits_set(filter)))); + + bloom_free(filter); +} + +PG_FUNCTION_INFO_V1(test_bloomfilter); + +/* + * SQL-callable entry point to perform all tests. + * + * If a 1% false positive threshold is not met, emits WARNINGs. + * + * See README for details of arguments. + */ +Datum +test_bloomfilter(PG_FUNCTION_ARGS) +{ + int power = PG_GETARG_INT32(0); + int64 nelements = PG_GETARG_INT64(1); + int seed = PG_GETARG_INT32(2); + int tests = PG_GETARG_INT32(3); + int i; + + if (power < 23 || power > 32) + elog(ERROR, "power argument must be between 23 and 32 inclusive"); + + if (tests <= 0) + elog(ERROR, "invalid number of tests: %d", tests); + + if (nelements < 0) + elog(ERROR, "invalid number of elements: %d", tests); + + for (i = 0; i < tests; i++) + { + elog(DEBUG1, "beginning test #%d...", i + 1); + + create_and_test_bloom(power, nelements, seed); + } + + PG_RETURN_VOID(); +} diff --git a/src/test/modules/test_bloomfilter/test_bloomfilter.control b/src/test/modules/test_bloomfilter/test_bloomfilter.control new file mode 100644 index 0000000000..99e56eebdf --- /dev/null +++ b/src/test/modules/test_bloomfilter/test_bloomfilter.control @@ -0,0 +1,4 @@ +comment = 'Test code for Bloom filter library' +default_version = '1.0' +module_pathname = '$libdir/test_bloomfilter' +relocatable = true diff --git a/src/test/modules/test_ddl_deparse/expected/alter_table.out b/src/test/modules/test_ddl_deparse/expected/alter_table.out index e304787bc5..7da847d49e 100644 --- a/src/test/modules/test_ddl_deparse/expected/alter_table.out +++ b/src/test/modules/test_ddl_deparse/expected/alter_table.out @@ -16,3 +16,15 @@ NOTICE: DDL test: type simple, tag ALTER TABLE ALTER TABLE parent ADD CONSTRAINT a_pos CHECK (a > 0); NOTICE: DDL test: type alter table, tag ALTER TABLE NOTICE: subcommand: ADD CONSTRAINT (and recurse) +CREATE TABLE part ( + a int +) PARTITION BY RANGE (a); +NOTICE: DDL test: type simple, tag CREATE TABLE +CREATE TABLE part1 PARTITION OF part FOR VALUES FROM (1) to (100); +NOTICE: DDL test: type simple, tag CREATE TABLE +ALTER TABLE part ADD PRIMARY KEY (a); +NOTICE: DDL test: type alter table, tag CREATE INDEX +NOTICE: subcommand: SET NOT NULL +NOTICE: subcommand: SET NOT NULL +NOTICE: DDL test: type alter table, tag ALTER TABLE +NOTICE: subcommand: ADD INDEX diff --git a/src/test/modules/test_ddl_deparse/expected/matviews.out b/src/test/modules/test_ddl_deparse/expected/matviews.out index b946ff06d2..69a5627a4b 100644 --- a/src/test/modules/test_ddl_deparse/expected/matviews.out +++ b/src/test/modules/test_ddl_deparse/expected/matviews.out @@ -1,8 +1,8 @@ -- -- Materialized views -- -CREATE MATERIALIZED VIEW pg_class_mv AS - SELECT * FROM pg_class LIMIT 1 WITH NO DATA; +CREATE MATERIALIZED VIEW ddl_deparse_mv AS + SELECT * FROM datatype_table LIMIT 1 WITH NO DATA; NOTICE: DDL test: type simple, tag CREATE MATERIALIZED VIEW -REFRESH MATERIALIZED VIEW pg_class_mv; +REFRESH MATERIALIZED VIEW ddl_deparse_mv; NOTICE: DDL test: type simple, tag REFRESH MATERIALIZED VIEW diff --git a/src/test/modules/test_ddl_deparse/expected/test_ddl_deparse.out b/src/test/modules/test_ddl_deparse/expected/test_ddl_deparse.out index e2e49f9d7f..4a5ea9e9ed 100644 --- a/src/test/modules/test_ddl_deparse/expected/test_ddl_deparse.out +++ b/src/test/modules/test_ddl_deparse/expected/test_ddl_deparse.out @@ -12,13 +12,13 @@ BEGIN FOR r IN SELECT * FROM pg_event_trigger_ddl_commands() LOOP -- verify that tags match - tag = get_command_tag(r.command); + tag = public.get_command_tag(r.command); IF tag <> r.command_tag THEN RAISE NOTICE 'tag % doesn''t match %', tag, r.command_tag; END IF; -- log the operation - cmdtype = get_command_type(r.command); + cmdtype = public.get_command_type(r.command); IF cmdtype <> 'grant' THEN RAISE NOTICE 'DDL test: type %, tag %', cmdtype, tag; ELSE @@ -28,7 +28,7 @@ BEGIN -- if alter table, log more IF cmdtype = 'alter table' THEN FOR r2 IN SELECT * - FROM unnest(get_altertable_subcmdtypes(r.command)) + FROM unnest(public.get_altertable_subcmdtypes(r.command)) LOOP RAISE NOTICE ' subcommand: %', r2.unnest; END LOOP; diff --git a/src/test/modules/test_ddl_deparse/sql/alter_table.sql b/src/test/modules/test_ddl_deparse/sql/alter_table.sql index 6e2cca754e..dec53a0640 100644 --- a/src/test/modules/test_ddl_deparse/sql/alter_table.sql +++ b/src/test/modules/test_ddl_deparse/sql/alter_table.sql @@ -11,3 +11,11 @@ ALTER TABLE parent ADD COLUMN b serial; ALTER TABLE parent RENAME COLUMN b TO c; ALTER TABLE parent ADD CONSTRAINT a_pos CHECK (a > 0); + +CREATE TABLE part ( + a int +) PARTITION BY RANGE (a); + +CREATE TABLE part1 PARTITION OF part FOR VALUES FROM (1) to (100); + +ALTER TABLE part ADD PRIMARY KEY (a); diff --git a/src/test/modules/test_ddl_deparse/sql/matviews.sql b/src/test/modules/test_ddl_deparse/sql/matviews.sql index 381c11e1f8..6e22c52a4d 100644 --- a/src/test/modules/test_ddl_deparse/sql/matviews.sql +++ b/src/test/modules/test_ddl_deparse/sql/matviews.sql @@ -2,7 +2,7 @@ -- Materialized views -- -CREATE MATERIALIZED VIEW pg_class_mv AS - SELECT * FROM pg_class LIMIT 1 WITH NO DATA; +CREATE MATERIALIZED VIEW ddl_deparse_mv AS + SELECT * FROM datatype_table LIMIT 1 WITH NO DATA; -REFRESH MATERIALIZED VIEW pg_class_mv; +REFRESH MATERIALIZED VIEW ddl_deparse_mv; diff --git a/src/test/modules/test_ddl_deparse/sql/test_ddl_deparse.sql b/src/test/modules/test_ddl_deparse/sql/test_ddl_deparse.sql index 4d08aaa1c4..e257a215e4 100644 --- a/src/test/modules/test_ddl_deparse/sql/test_ddl_deparse.sql +++ b/src/test/modules/test_ddl_deparse/sql/test_ddl_deparse.sql @@ -13,13 +13,13 @@ BEGIN FOR r IN SELECT * FROM pg_event_trigger_ddl_commands() LOOP -- verify that tags match - tag = get_command_tag(r.command); + tag = public.get_command_tag(r.command); IF tag <> r.command_tag THEN RAISE NOTICE 'tag % doesn''t match %', tag, r.command_tag; END IF; -- log the operation - cmdtype = get_command_type(r.command); + cmdtype = public.get_command_type(r.command); IF cmdtype <> 'grant' THEN RAISE NOTICE 'DDL test: type %, tag %', cmdtype, tag; ELSE @@ -29,7 +29,7 @@ BEGIN -- if alter table, log more IF cmdtype = 'alter table' THEN FOR r2 IN SELECT * - FROM unnest(get_altertable_subcmdtypes(r.command)) + FROM unnest(public.get_altertable_subcmdtypes(r.command)) LOOP RAISE NOTICE ' subcommand: %', r2.unnest; END LOOP; diff --git a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c index 56394de92e..82a51eb303 100644 --- a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c +++ b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c @@ -2,7 +2,7 @@ * test_ddl_deparse.c * Support functions for the test_ddl_deparse module * - * Copyright (c) 2014-2017, PostgreSQL Global Development Group + * Copyright (c) 2014-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/test/modules/test_ddl_deparse/test_ddl_deparse.c diff --git a/src/test/modules/test_parser/test_parser.c b/src/test/modules/test_parser/test_parser.c index bb5305109e..bb700f8a3d 100644 --- a/src/test/modules/test_parser/test_parser.c +++ b/src/test/modules/test_parser/test_parser.c @@ -3,7 +3,7 @@ * test_parser.c * Simple example of a text search parser * - * Copyright (c) 2007-2017, PostgreSQL Global Development Group + * Copyright (c) 2007-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/test/modules/test_parser/test_parser.c diff --git a/src/test/modules/test_pg_dump/Makefile b/src/test/modules/test_pg_dump/Makefile index 5050572777..c64b353707 100644 --- a/src/test/modules/test_pg_dump/Makefile +++ b/src/test/modules/test_pg_dump/Makefile @@ -21,5 +21,5 @@ endif check: prove-check -prove-check: +prove-check: | temp-install $(prove_check) diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl index de70f4716b..fb4ecf8aca 100644 --- a/src/test/modules/test_pg_dump/t/001_base.pl +++ b/src/test/modules/test_pg_dump/t/001_base.pl @@ -43,12 +43,16 @@ dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/binary_upgrade.sql", '--schema-only', - '--binary-upgrade', '--dbname=postgres', ], }, + '--binary-upgrade', '--dbname=postgres', + ], + }, clean => { dump_cmd => [ 'pg_dump', "--file=$tempdir/clean.sql", '-c', '--no-sync', - '--dbname=postgres', ], }, + '--dbname=postgres', + ], + }, clean_if_exists => { dump_cmd => [ 'pg_dump', @@ -57,28 +61,29 @@ '-c', '--if-exists', '--encoding=UTF8', # no-op, just tests that option is accepted - 'postgres', ], }, - column_inserts => { - dump_cmd => [ - 'pg_dump', '--no-sync', - "--file=$tempdir/column_inserts.sql", '-a', - '--column-inserts', 'postgres', ], }, + 'postgres', + ], + }, createdb => { dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/createdb.sql", '-C', - '-R', # no-op, just for testing - 'postgres', ], }, + '-R', # no-op, just for testing + 'postgres', + ], + }, data_only => { dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/data_only.sql", '-a', - '-v', # no-op, just make sure it works - 'postgres', ], }, + '-v', # no-op, just make sure it works + 'postgres', + ], + }, defaults => { dump_cmd => [ 'pg_dump', '-f', "$tempdir/defaults.sql", 'postgres', ], }, @@ -86,70 +91,96 @@ test_key => 'defaults', dump_cmd => [ 'pg_dump', '--no-sync', '-Fc', '-Z6', - "--file=$tempdir/defaults_custom_format.dump", 'postgres', ], + "--file=$tempdir/defaults_custom_format.dump", 'postgres', + ], restore_cmd => [ 'pg_restore', "--file=$tempdir/defaults_custom_format.sql", - "$tempdir/defaults_custom_format.dump", ], }, + "$tempdir/defaults_custom_format.dump", + ], + }, defaults_dir_format => { test_key => 'defaults', dump_cmd => [ 'pg_dump', '--no-sync', '-Fd', - "--file=$tempdir/defaults_dir_format", 'postgres', ], + "--file=$tempdir/defaults_dir_format", 'postgres', + ], restore_cmd => [ 'pg_restore', "--file=$tempdir/defaults_dir_format.sql", - "$tempdir/defaults_dir_format", ], }, + "$tempdir/defaults_dir_format", + ], + }, defaults_parallel => { test_key => 'defaults', dump_cmd => [ 'pg_dump', '--no-sync', '-Fd', '-j2', - "--file=$tempdir/defaults_parallel", 'postgres', ], + "--file=$tempdir/defaults_parallel", 'postgres', + ], restore_cmd => [ 'pg_restore', "--file=$tempdir/defaults_parallel.sql", - "$tempdir/defaults_parallel", ], }, + "$tempdir/defaults_parallel", + ], + }, defaults_tar_format => { test_key => 'defaults', dump_cmd => [ 'pg_dump', '--no-sync', '-Ft', - "--file=$tempdir/defaults_tar_format.tar", 'postgres', ], + "--file=$tempdir/defaults_tar_format.tar", 'postgres', + ], restore_cmd => [ 'pg_restore', "--file=$tempdir/defaults_tar_format.sql", - "$tempdir/defaults_tar_format.tar", ], }, + "$tempdir/defaults_tar_format.tar", + ], + }, pg_dumpall_globals => { dump_cmd => [ 'pg_dumpall', '--no-sync', - "--file=$tempdir/pg_dumpall_globals.sql", '-g', ], }, + "--file=$tempdir/pg_dumpall_globals.sql", '-g', + ], + }, no_privs => { dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/no_privs.sql", '-x', - 'postgres', ], }, + 'postgres', + ], + }, no_owner => { dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/no_owner.sql", '-O', - 'postgres', ], }, + 'postgres', + ], + }, schema_only => { dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/schema_only.sql", - '-s', 'postgres', ], }, + '-s', 'postgres', + ], + }, section_pre_data => { dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/section_pre_data.sql", '--section=pre-data', - 'postgres', ], }, + 'postgres', + ], + }, section_data => { dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/section_data.sql", '--section=data', - 'postgres', ], }, + 'postgres', + ], + }, section_post_data => { dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/section_post_data.sql", - '--section=post-data', 'postgres', ], },); + '--section=post-data', 'postgres', + ], + },); ############################################################### # Definition of the tests to run. @@ -161,22 +192,14 @@ # file of each of the runs which the test is to be run against # and the success of the result will depend on if the regexp # result matches the expected 'like' or 'unlike' case. +# The runs listed as 'like' will be checked if they match the +# regexp and, if so, the test passes. All runs which are not +# listed as 'like' will be checked to ensure they don't match +# the regexp; if they do, the test will fail. # -# For each test, there are two sets of runs defined, one for -# the 'like' tests and one for the 'unlike' tests. 'like' -# essentially means "the regexp for this test must match the -# output file". 'unlike' is the opposite. -# -# There are a few 'catch-all' tests which can be used to have -# a single, simple, test to over a range of other tests. For -# example, there is a '^CREATE ' test, which is used for the -# 'data-only' test as there should never be any kind of CREATE -# statement in a 'data-only' run. Without the catch-all, we -# would have to list the 'data-only' run in each and every -# 'CREATE xxxx' test, which would be a lot of additional tests. -# -# Note that it makes no sense for the same run to ever be listed -# in both 'like' and 'unlike' categories. +# The below hashes provide convenience sets of runs. Individual +# runs can be excluded from a general hash by placing that run +# into the 'unlike' section. # # There can then be a 'create_sql' and 'create_order' for a # given test. The 'create_sql' commands are collected up in @@ -188,28 +211,29 @@ # included in it are compiled. This greatly improves performance # as the regexps are used for each run the test applies to. +# Tests which are considered 'full' dumps by pg_dump, but there +# are flags used to exclude specific items (ACLs, blobs, etc). +my %full_runs = ( + binary_upgrade => 1, + clean => 1, + clean_if_exists => 1, + createdb => 1, + defaults => 1, + no_privs => 1, + no_owner => 1,); + my %tests = ( 'ALTER EXTENSION test_pg_dump' => { create_order => 9, create_sql => -'ALTER EXTENSION test_pg_dump ADD TABLE regress_pg_dump_table_added;', + 'ALTER EXTENSION test_pg_dump ADD TABLE regress_pg_dump_table_added;', regexp => qr/^ - \QCREATE TABLE regress_pg_dump_table_added (\E + \QCREATE TABLE public.regress_pg_dump_table_added (\E \n\s+\Qcol1 integer NOT NULL,\E \n\s+\Qcol2 integer\E \n\);\n/xm, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'CREATE EXTENSION test_pg_dump' => { create_order => 2, @@ -218,39 +242,23 @@ \QCREATE EXTENSION IF NOT EXISTS test_pg_dump WITH SCHEMA public;\E \n/xm, like => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, + %full_runs, schema_only => 1, - section_pre_data => 1, }, - unlike => { - binary_upgrade => 1, - pg_dumpall_globals => 1, - section_post_data => 1, }, }, + section_pre_data => 1, + }, + unlike => { binary_upgrade => 1, }, + }, 'CREATE ROLE regress_dump_test_role' => { create_order => 1, create_sql => 'CREATE ROLE regress_dump_test_role;', regexp => qr/^CREATE ROLE regress_dump_test_role;\n/m, like => { pg_dumpall_globals => 1, }, - unlike => { - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + }, 'CREATE SEQUENCE regress_pg_dump_table_col1_seq' => { regexp => qr/^ - \QCREATE SEQUENCE regress_pg_dump_table_col1_seq\E + \QCREATE SEQUENCE public.regress_pg_dump_table_col1_seq\E \n\s+\QAS integer\E \n\s+\QSTART WITH 1\E \n\s+\QINCREMENT BY 1\E @@ -258,118 +266,61 @@ \n\s+\QNO MAXVALUE\E \n\s+\QCACHE 1;\E \n/xm, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'CREATE TABLE regress_pg_dump_table_added' => { create_order => 7, create_sql => -'CREATE TABLE regress_pg_dump_table_added (col1 int not null, col2 int);', + 'CREATE TABLE regress_pg_dump_table_added (col1 int not null, col2 int);', regexp => qr/^ - \QCREATE TABLE regress_pg_dump_table_added (\E + \QCREATE TABLE public.regress_pg_dump_table_added (\E \n\s+\Qcol1 integer NOT NULL,\E \n\s+\Qcol2 integer\E \n\);\n/xm, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'CREATE SEQUENCE regress_pg_dump_seq' => { regexp => qr/^ - \QCREATE SEQUENCE regress_pg_dump_seq\E + \QCREATE SEQUENCE public.regress_pg_dump_seq\E \n\s+\QSTART WITH 1\E \n\s+\QINCREMENT BY 1\E \n\s+\QNO MINVALUE\E \n\s+\QNO MAXVALUE\E \n\s+\QCACHE 1;\E \n/xm, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'SETVAL SEQUENCE regress_seq_dumpable' => { create_order => 6, create_sql => qq{SELECT nextval('regress_seq_dumpable');}, regexp => qr/^ - \QSELECT pg_catalog.setval('regress_seq_dumpable', 1, true);\E + \QSELECT pg_catalog.setval('public.regress_seq_dumpable', 1, true);\E \n/xm, like => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - data_only => 1, - defaults => 1, - no_owner => 1, - no_privs => 1, }, - unlike => { - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + %full_runs, + data_only => 1, + section_data => 1, + }, + }, 'CREATE TABLE regress_pg_dump_table' => { regexp => qr/^ - \QCREATE TABLE regress_pg_dump_table (\E + \QCREATE TABLE public.regress_pg_dump_table (\E \n\s+\Qcol1 integer NOT NULL,\E \n\s+\Qcol2 integer\E \n\);\n/xm, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'CREATE ACCESS METHOD regress_test_am' => { regexp => qr/^ \QCREATE ACCESS METHOD regress_test_am TYPE INDEX HANDLER bthandler;\E \n/xm, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'COMMENT ON EXTENSION test_pg_dump' => { regexp => qr/^ @@ -377,118 +328,70 @@ \QIS 'Test pg_dump with an extension';\E \n/xm, like => { - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, + %full_runs, schema_only => 1, - section_pre_data => 1, }, - unlike => { - pg_dumpall_globals => 1, - section_post_data => 1, }, }, + section_pre_data => 1, + }, + }, 'GRANT SELECT regress_pg_dump_table_added pre-ALTER EXTENSION' => { create_order => 8, create_sql => -'GRANT SELECT ON regress_pg_dump_table_added TO regress_dump_test_role;', + 'GRANT SELECT ON regress_pg_dump_table_added TO regress_dump_test_role;', regexp => qr/^ - \QGRANT SELECT ON TABLE regress_pg_dump_table_added TO regress_dump_test_role;\E + \QGRANT SELECT ON TABLE public.regress_pg_dump_table_added TO regress_dump_test_role;\E \n/xm, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'REVOKE SELECT regress_pg_dump_table_added post-ALTER EXTENSION' => { create_order => 10, create_sql => -'REVOKE SELECT ON regress_pg_dump_table_added FROM regress_dump_test_role;', + 'REVOKE SELECT ON regress_pg_dump_table_added FROM regress_dump_test_role;', regexp => qr/^ - \QREVOKE SELECT ON TABLE regress_pg_dump_table_added FROM regress_dump_test_role;\E + \QREVOKE SELECT ON TABLE public.regress_pg_dump_table_added FROM regress_dump_test_role;\E \n/xm, like => { - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_owner => 1, + %full_runs, schema_only => 1, - section_pre_data => 1, }, - unlike => { - no_privs => 1, - pg_dumpall_globals => 1, - section_post_data => 1, }, }, + section_pre_data => 1, + }, + unlike => { no_privs => 1, }, + }, 'GRANT SELECT ON TABLE regress_pg_dump_table' => { regexp => qr/^ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n - \QGRANT SELECT ON TABLE regress_pg_dump_table TO regress_dump_test_role;\E\n + \QGRANT SELECT ON TABLE public.regress_pg_dump_table TO regress_dump_test_role;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_owner => 1, - schema_only => 1, - section_pre_data => 1, - no_privs => 1, - pg_dumpall_globals => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'GRANT SELECT(col1) ON regress_pg_dump_table' => { regexp => qr/^ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n - \QGRANT SELECT(col1) ON TABLE regress_pg_dump_table TO PUBLIC;\E\n + \QGRANT SELECT(col1) ON TABLE public.regress_pg_dump_table TO PUBLIC;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_owner => 1, - schema_only => 1, - section_pre_data => 1, - no_privs => 1, - pg_dumpall_globals => 1, - section_post_data => 1, }, }, - - 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' => - { create_order => 4, + like => { binary_upgrade => 1, }, + }, + + 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' + => { + create_order => 4, create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role;', regexp => qr/^ - \QGRANT SELECT(col2) ON TABLE regress_pg_dump_table TO regress_dump_test_role;\E + \QGRANT SELECT(col2) ON TABLE public.regress_pg_dump_table TO regress_dump_test_role;\E \n/xm, like => { - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_owner => 1, + %full_runs, schema_only => 1, - section_pre_data => 1, }, - unlike => { - no_privs => 1, - pg_dumpall_globals => 1, - section_post_data => 1, }, }, + section_pre_data => 1, + }, + unlike => { no_privs => 1, }, + }, 'GRANT USAGE ON regress_pg_dump_table_col1_seq TO regress_dump_test_role' => { @@ -496,251 +399,129 @@ create_sql => 'GRANT USAGE ON SEQUENCE regress_pg_dump_table_col1_seq TO regress_dump_test_role;', regexp => qr/^ - \QGRANT USAGE ON SEQUENCE regress_pg_dump_table_col1_seq TO regress_dump_test_role;\E + \QGRANT USAGE ON SEQUENCE public.regress_pg_dump_table_col1_seq TO regress_dump_test_role;\E \n/xm, like => { - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_owner => 1, + %full_runs, schema_only => 1, - section_pre_data => 1, }, - unlike => { - no_privs => 1, - pg_dumpall_globals => 1, - section_post_data => 1, }, }, + section_pre_data => 1, + }, + unlike => { no_privs => 1, }, + }, 'GRANT USAGE ON regress_pg_dump_seq TO regress_dump_test_role' => { regexp => qr/^ - \QGRANT USAGE ON SEQUENCE regress_pg_dump_seq TO regress_dump_test_role;\E + \QGRANT USAGE ON SEQUENCE public.regress_pg_dump_seq TO regress_dump_test_role;\E \n/xm, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_owner => 1, - no_privs => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'REVOKE SELECT(col1) ON regress_pg_dump_table' => { create_order => 3, create_sql => 'REVOKE SELECT(col1) ON regress_pg_dump_table FROM PUBLIC;', regexp => qr/^ - \QREVOKE SELECT(col1) ON TABLE regress_pg_dump_table FROM PUBLIC;\E + \QREVOKE SELECT(col1) ON TABLE public.regress_pg_dump_table FROM PUBLIC;\E \n/xm, like => { - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_owner => 1, + %full_runs, schema_only => 1, - section_pre_data => 1, }, - unlike => { - no_privs => 1, - pg_dumpall_globals => 1, - section_post_data => 1, }, }, + section_pre_data => 1, + }, + unlike => { no_privs => 1, }, + }, - # Objects included in extension part of a schema created by this extension */ + # Objects included in extension part of a schema created by this extension */ 'CREATE TABLE regress_pg_dump_schema.test_table' => { regexp => qr/^ - \QCREATE TABLE test_table (\E + \QCREATE TABLE regress_pg_dump_schema.test_table (\E \n\s+\Qcol1 integer,\E \n\s+\Qcol2 integer\E \n\);\n/xm, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'GRANT SELECT ON regress_pg_dump_schema.test_table' => { regexp => qr/^ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n - \QGRANT SELECT ON TABLE test_table TO regress_dump_test_role;\E\n + \QGRANT SELECT ON TABLE regress_pg_dump_schema.test_table TO regress_dump_test_role;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_owner => 1, - no_privs => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'CREATE SEQUENCE regress_pg_dump_schema.test_seq' => { regexp => qr/^ - \QCREATE SEQUENCE test_seq\E + \QCREATE SEQUENCE regress_pg_dump_schema.test_seq\E \n\s+\QSTART WITH 1\E \n\s+\QINCREMENT BY 1\E \n\s+\QNO MINVALUE\E \n\s+\QNO MAXVALUE\E \n\s+\QCACHE 1;\E \n/xm, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'GRANT USAGE ON regress_pg_dump_schema.test_seq' => { regexp => qr/^ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n - \QGRANT USAGE ON SEQUENCE test_seq TO regress_dump_test_role;\E\n + \QGRANT USAGE ON SEQUENCE regress_pg_dump_schema.test_seq TO regress_dump_test_role;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_owner => 1, - no_privs => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'CREATE TYPE regress_pg_dump_schema.test_type' => { regexp => qr/^ - \QCREATE TYPE test_type AS (\E + \QCREATE TYPE regress_pg_dump_schema.test_type AS (\E \n\s+\Qcol1 integer\E \n\);\n/xm, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'GRANT USAGE ON regress_pg_dump_schema.test_type' => { regexp => qr/^ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n - \QGRANT ALL ON TYPE test_type TO regress_dump_test_role;\E\n + \QGRANT ALL ON TYPE regress_pg_dump_schema.test_type TO regress_dump_test_role;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_owner => 1, - no_privs => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'CREATE FUNCTION regress_pg_dump_schema.test_func' => { regexp => qr/^ - \QCREATE FUNCTION test_func() RETURNS integer\E + \QCREATE FUNCTION regress_pg_dump_schema.test_func() RETURNS integer\E \n\s+\QLANGUAGE sql\E \n/xm, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'GRANT ALL ON regress_pg_dump_schema.test_func' => { regexp => qr/^ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n - \QGRANT ALL ON FUNCTION test_func() TO regress_dump_test_role;\E\n + \QGRANT ALL ON FUNCTION regress_pg_dump_schema.test_func() TO regress_dump_test_role;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_owner => 1, - no_privs => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'CREATE AGGREGATE regress_pg_dump_schema.test_agg' => { regexp => qr/^ - \QCREATE AGGREGATE test_agg(smallint) (\E + \QCREATE AGGREGATE regress_pg_dump_schema.test_agg(smallint) (\E \n\s+\QSFUNC = int2_sum,\E \n\s+\QSTYPE = bigint\E \n\);\n/xm, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_privs => 1, - no_owner => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, 'GRANT ALL ON regress_pg_dump_schema.test_agg' => { regexp => qr/^ \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(true);\E\n - \QGRANT ALL ON FUNCTION test_agg(smallint) TO regress_dump_test_role;\E\n + \QGRANT ALL ON FUNCTION regress_pg_dump_schema.test_agg(smallint) TO regress_dump_test_role;\E\n \QSELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\E \n/xms, - like => { binary_upgrade => 1, }, - unlike => { - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_owner => 1, - no_privs => 1, - pg_dumpall_globals => 1, - schema_only => 1, - section_pre_data => 1, - section_post_data => 1, }, }, + like => { binary_upgrade => 1, }, + }, # Objects not included in extension, part of schema created by extension 'CREATE TABLE regress_pg_dump_schema.external_tab' => { @@ -748,22 +529,15 @@ create_sql => 'CREATE TABLE regress_pg_dump_schema.external_tab (col1 int);', regexp => qr/^ - \QCREATE TABLE external_tab (\E + \QCREATE TABLE regress_pg_dump_schema.external_tab (\E \n\s+\Qcol1 integer\E \n\);\n/xm, like => { - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - no_owner => 1, - no_privs => 1, + %full_runs, schema_only => 1, - section_pre_data => 1, }, - unlike => { - pg_dumpall_globals => 1, - section_post_data => 1, }, },); + section_pre_data => 1, + }, + },); ######################################### # Create a PG instance to test actually dumping from @@ -797,13 +571,15 @@ # Then count all the tests run against each run foreach my $test (sort keys %tests) { - if ($tests{$test}->{like}->{$test_key}) + # If there is a like entry, but no unlike entry, then we will test the like case + if ($tests{$test}->{like}->{$test_key} + && !defined($tests{$test}->{unlike}->{$test_key})) { $num_tests++; } - - if ($tests{$test}->{unlike}->{$test_key}) + else { + # We will test everything that isn't a 'like' $num_tests++; } } @@ -875,17 +651,24 @@ foreach my $test (sort keys %tests) { - if ($tests{$test}->{like}->{$test_key}) + # Run the test listed as a like, unless it is specifically noted + # as an unlike (generally due to an explicit exclusion or similar). + if ($tests{$test}->{like}->{$test_key} + && !defined($tests{$test}->{unlike}->{$test_key})) { - like($output_file, $tests{$test}->{regexp}, "$run: dumps $test"); + if (!ok($output_file =~ $tests{$test}->{regexp}, + "$run: should dump $test")) + { + diag("Review $run results in $tempdir"); + } } - - if ($tests{$test}->{unlike}->{$test_key}) + else { - unlike( - $output_file, - $tests{$test}->{regexp}, - "$run: does not dump $test"); + if (!ok($output_file !~ $tests{$test}->{regexp}, + "$run: should not dump $test")) + { + diag("Review $run results in $tempdir"); + } } } } diff --git a/src/test/modules/test_predtest/.gitignore b/src/test/modules/test_predtest/.gitignore new file mode 100644 index 0000000000..5dcb3ff972 --- /dev/null +++ b/src/test/modules/test_predtest/.gitignore @@ -0,0 +1,4 @@ +# Generated subdirectories +/log/ +/results/ +/tmp_check/ diff --git a/src/test/modules/test_predtest/Makefile b/src/test/modules/test_predtest/Makefile new file mode 100644 index 0000000000..1b50fa31a4 --- /dev/null +++ b/src/test/modules/test_predtest/Makefile @@ -0,0 +1,21 @@ +# src/test/modules/test_predtest/Makefile + +MODULE_big = test_predtest +OBJS = test_predtest.o $(WIN32RES) +PGFILEDESC = "test_predtest - test code for optimizer/util/predtest.c" + +EXTENSION = test_predtest +DATA = test_predtest--1.0.sql + +REGRESS = test_predtest + +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = src/test/modules/test_predtest +top_builddir = ../../../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif diff --git a/src/test/modules/test_predtest/README b/src/test/modules/test_predtest/README new file mode 100644 index 0000000000..2c9bec0f14 --- /dev/null +++ b/src/test/modules/test_predtest/README @@ -0,0 +1,28 @@ +test_predtest is a module for checking the correctness of the optimizer's +predicate-proof logic, in src/backend/optimizer/util/predtest.c. + +The module provides a function that allows direct application of +predtest.c's exposed functions, predicate_implied_by() and +predicate_refuted_by(), to arbitrary boolean expressions, with direct +inspection of the results. This could be done indirectly by checking +planner results, but it can be difficult to construct end-to-end test +cases that prove that the expected results were obtained. + +In general, the use of this function is like + select * from test_predtest('query string') +where the query string must be a SELECT returning two boolean +columns, for example + + select * from test_predtest($$ + select x, not x + from (values (false), (true), (null)) as v(x) + $$); + +The function parses and plans the given query, and then applies the +predtest.c code to the two boolean expressions in the SELECT list, to see +if the first expression can be proven or refuted by the second. It also +executes the query, and checks the resulting rows to see whether any +claimed implication or refutation relationship actually holds. If the +query is designed to exercise the expressions on a full set of possible +input values, as in the example above, then this provides a mechanical +cross-check as to whether the proof code has given a correct answer. diff --git a/src/test/modules/test_predtest/expected/test_predtest.out b/src/test/modules/test_predtest/expected/test_predtest.out new file mode 100644 index 0000000000..5574e03204 --- /dev/null +++ b/src/test/modules/test_predtest/expected/test_predtest.out @@ -0,0 +1,839 @@ +CREATE EXTENSION test_predtest; +-- Make output more legible +\pset expanded on +-- Test data +-- all combinations of four boolean values +create table booleans as +select + case i%3 when 0 then true when 1 then false else null end as x, + case (i/3)%3 when 0 then true when 1 then false else null end as y, + case (i/9)%3 when 0 then true when 1 then false else null end as z, + case (i/27)%3 when 0 then true when 1 then false else null end as w +from generate_series(0, 3*3*3*3-1) i; +-- all combinations of two integers 0..9, plus null +create table integers as +select + case i%11 when 10 then null else i%11 end as x, + case (i/11)%11 when 10 then null else (i/11)%11 end as y +from generate_series(0, 11*11-1) i; +-- and a simple strict function that's opaque to the optimizer +create function strictf(bool, bool) returns bool +language plpgsql as $$begin return $1 and not $2; end$$ strict; +-- Basic proof rules for single boolean variables +select * from test_predtest($$ +select x, x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x, not x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select not x, x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select not x, not x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x is not null, x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | f +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x is not null, x is null +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x is null, x is not null +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x is not true, x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x, x is not true +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | f +w_i_holds | f +s_r_holds | f +w_r_holds | t + +select * from test_predtest($$ +select x is false, x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x, x is false +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x is unknown, x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x, x is unknown +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | f +w_i_holds | t +s_r_holds | f +w_r_holds | t + +-- Assorted not-so-trivial refutation rules +select * from test_predtest($$ +select x is null, x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x, x is null +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | t +s_i_holds | f +w_i_holds | t +s_r_holds | f +w_r_holds | t + +select * from test_predtest($$ +select strictf(x,y), x is null +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | t +s_i_holds | f +w_i_holds | t +s_r_holds | f +w_r_holds | t + +select * from test_predtest($$ +select (x is not null) is not true, x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select strictf(x,y), (x is not null) is false +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | t +s_i_holds | f +w_i_holds | t +s_r_holds | f +w_r_holds | t + +select * from test_predtest($$ +select x > y, (y < x) is false +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +-- Tests involving AND/OR constructs +select * from test_predtest($$ +select x, x and y +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select not x, x and y +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x, not x and y +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x or y, x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x and y, x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | f +w_i_holds | f +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x and y, not x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x and y, y and x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select not y, y and x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x or y, y or x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x or y or z, x or z +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x and z, x and y and z +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select z or w, x or y +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | f +w_i_holds | f +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select z and w, x or y +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | f +w_i_holds | f +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x, (x and y) or (x and z) +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select (x and y) or z, y and x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select (not x or not y) and z, y and x +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select y or x, (x or y) and z +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select not x and not y, (x or y) and z +from booleans +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +-- Tests using btree operator knowledge +select * from test_predtest($$ +select x <= y, x < y +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x <= y, x > y +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x <= y, y >= x +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x <= y, y > x and y < x+2 +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x <= 5, x <= 7 +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | f +w_i_holds | f +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x <= 5, x > 7 +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x <= 5, 7 > x +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | f +w_i_holds | f +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select 5 >= x, 7 > x +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | f +w_i_holds | f +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select 5 >= x, x > 7 +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select 5 = x, x = 7 +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x is not null, x > 7 +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | f +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x is not null, int4lt(x,8) +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | f +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x is null, x > 7 +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x is null, int4lt(x,8) +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | t +weak_refuted_by | t +s_i_holds | f +w_i_holds | f +s_r_holds | t +w_r_holds | t + +select * from test_predtest($$ +select x is not null, x < 'foo' +from (values + ('aaa'::varchar), ('zzz'::varchar), (null)) as v(x) +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | f +s_r_holds | f +w_r_holds | f + +-- Cases using ScalarArrayOpExpr +select * from test_predtest($$ +select x <= 5, x in (1,3,5) +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x <= 5, x in (1,3,5,7) +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | f +w_i_holds | f +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x <= 5, x in (1,3,5,null) +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | f +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x in (null,1,3,5,7), x in (1,3,5) +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x <= 5, x < all(array[1,3,5]) +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | t +weak_implied_by | t +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | t +w_i_holds | t +s_r_holds | f +w_r_holds | f + +select * from test_predtest($$ +select x <= y, x = any(array[1,3,y]) +from integers +$$); +-[ RECORD 1 ]-----+-- +strong_implied_by | f +weak_implied_by | f +strong_refuted_by | f +weak_refuted_by | f +s_i_holds | f +w_i_holds | f +s_r_holds | f +w_r_holds | f + diff --git a/src/test/modules/test_predtest/sql/test_predtest.sql b/src/test/modules/test_predtest/sql/test_predtest.sql new file mode 100644 index 0000000000..2734735843 --- /dev/null +++ b/src/test/modules/test_predtest/sql/test_predtest.sql @@ -0,0 +1,327 @@ +CREATE EXTENSION test_predtest; + +-- Make output more legible +\pset expanded on + +-- Test data + +-- all combinations of four boolean values +create table booleans as +select + case i%3 when 0 then true when 1 then false else null end as x, + case (i/3)%3 when 0 then true when 1 then false else null end as y, + case (i/9)%3 when 0 then true when 1 then false else null end as z, + case (i/27)%3 when 0 then true when 1 then false else null end as w +from generate_series(0, 3*3*3*3-1) i; + +-- all combinations of two integers 0..9, plus null +create table integers as +select + case i%11 when 10 then null else i%11 end as x, + case (i/11)%11 when 10 then null else (i/11)%11 end as y +from generate_series(0, 11*11-1) i; + +-- and a simple strict function that's opaque to the optimizer +create function strictf(bool, bool) returns bool +language plpgsql as $$begin return $1 and not $2; end$$ strict; + +-- Basic proof rules for single boolean variables + +select * from test_predtest($$ +select x, x +from booleans +$$); + +select * from test_predtest($$ +select x, not x +from booleans +$$); + +select * from test_predtest($$ +select not x, x +from booleans +$$); + +select * from test_predtest($$ +select not x, not x +from booleans +$$); + +select * from test_predtest($$ +select x is not null, x +from booleans +$$); + +select * from test_predtest($$ +select x is not null, x is null +from integers +$$); + +select * from test_predtest($$ +select x is null, x is not null +from integers +$$); + +select * from test_predtest($$ +select x is not true, x +from booleans +$$); + +select * from test_predtest($$ +select x, x is not true +from booleans +$$); + +select * from test_predtest($$ +select x is false, x +from booleans +$$); + +select * from test_predtest($$ +select x, x is false +from booleans +$$); + +select * from test_predtest($$ +select x is unknown, x +from booleans +$$); + +select * from test_predtest($$ +select x, x is unknown +from booleans +$$); + +-- Assorted not-so-trivial refutation rules + +select * from test_predtest($$ +select x is null, x +from booleans +$$); + +select * from test_predtest($$ +select x, x is null +from booleans +$$); + +select * from test_predtest($$ +select strictf(x,y), x is null +from booleans +$$); + +select * from test_predtest($$ +select (x is not null) is not true, x +from booleans +$$); + +select * from test_predtest($$ +select strictf(x,y), (x is not null) is false +from booleans +$$); + +select * from test_predtest($$ +select x > y, (y < x) is false +from integers +$$); + +-- Tests involving AND/OR constructs + +select * from test_predtest($$ +select x, x and y +from booleans +$$); + +select * from test_predtest($$ +select not x, x and y +from booleans +$$); + +select * from test_predtest($$ +select x, not x and y +from booleans +$$); + +select * from test_predtest($$ +select x or y, x +from booleans +$$); + +select * from test_predtest($$ +select x and y, x +from booleans +$$); + +select * from test_predtest($$ +select x and y, not x +from booleans +$$); + +select * from test_predtest($$ +select x and y, y and x +from booleans +$$); + +select * from test_predtest($$ +select not y, y and x +from booleans +$$); + +select * from test_predtest($$ +select x or y, y or x +from booleans +$$); + +select * from test_predtest($$ +select x or y or z, x or z +from booleans +$$); + +select * from test_predtest($$ +select x and z, x and y and z +from booleans +$$); + +select * from test_predtest($$ +select z or w, x or y +from booleans +$$); + +select * from test_predtest($$ +select z and w, x or y +from booleans +$$); + +select * from test_predtest($$ +select x, (x and y) or (x and z) +from booleans +$$); + +select * from test_predtest($$ +select (x and y) or z, y and x +from booleans +$$); + +select * from test_predtest($$ +select (not x or not y) and z, y and x +from booleans +$$); + +select * from test_predtest($$ +select y or x, (x or y) and z +from booleans +$$); + +select * from test_predtest($$ +select not x and not y, (x or y) and z +from booleans +$$); + +-- Tests using btree operator knowledge + +select * from test_predtest($$ +select x <= y, x < y +from integers +$$); + +select * from test_predtest($$ +select x <= y, x > y +from integers +$$); + +select * from test_predtest($$ +select x <= y, y >= x +from integers +$$); + +select * from test_predtest($$ +select x <= y, y > x and y < x+2 +from integers +$$); + +select * from test_predtest($$ +select x <= 5, x <= 7 +from integers +$$); + +select * from test_predtest($$ +select x <= 5, x > 7 +from integers +$$); + +select * from test_predtest($$ +select x <= 5, 7 > x +from integers +$$); + +select * from test_predtest($$ +select 5 >= x, 7 > x +from integers +$$); + +select * from test_predtest($$ +select 5 >= x, x > 7 +from integers +$$); + +select * from test_predtest($$ +select 5 = x, x = 7 +from integers +$$); + +select * from test_predtest($$ +select x is not null, x > 7 +from integers +$$); + +select * from test_predtest($$ +select x is not null, int4lt(x,8) +from integers +$$); + +select * from test_predtest($$ +select x is null, x > 7 +from integers +$$); + +select * from test_predtest($$ +select x is null, int4lt(x,8) +from integers +$$); + +select * from test_predtest($$ +select x is not null, x < 'foo' +from (values + ('aaa'::varchar), ('zzz'::varchar), (null)) as v(x) +$$); + +-- Cases using ScalarArrayOpExpr + +select * from test_predtest($$ +select x <= 5, x in (1,3,5) +from integers +$$); + +select * from test_predtest($$ +select x <= 5, x in (1,3,5,7) +from integers +$$); + +select * from test_predtest($$ +select x <= 5, x in (1,3,5,null) +from integers +$$); + +select * from test_predtest($$ +select x in (null,1,3,5,7), x in (1,3,5) +from integers +$$); + +select * from test_predtest($$ +select x <= 5, x < all(array[1,3,5]) +from integers +$$); + +select * from test_predtest($$ +select x <= y, x = any(array[1,3,y]) +from integers +$$); diff --git a/src/test/modules/test_predtest/test_predtest--1.0.sql b/src/test/modules/test_predtest/test_predtest--1.0.sql new file mode 100644 index 0000000000..11e144493d --- /dev/null +++ b/src/test/modules/test_predtest/test_predtest--1.0.sql @@ -0,0 +1,16 @@ +/* src/test/modules/test_predtest/test_predtest--1.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION test_predtest" to load this file. \quit + +CREATE FUNCTION test_predtest(query text, + OUT strong_implied_by bool, + OUT weak_implied_by bool, + OUT strong_refuted_by bool, + OUT weak_refuted_by bool, + OUT s_i_holds bool, + OUT w_i_holds bool, + OUT s_r_holds bool, + OUT w_r_holds bool) +STRICT +AS 'MODULE_PATHNAME' LANGUAGE C; diff --git a/src/test/modules/test_predtest/test_predtest.c b/src/test/modules/test_predtest/test_predtest.c new file mode 100644 index 0000000000..51320ade2e --- /dev/null +++ b/src/test/modules/test_predtest/test_predtest.c @@ -0,0 +1,218 @@ +/*-------------------------------------------------------------------------- + * + * test_predtest.c + * Test correctness of optimizer's predicate proof logic. + * + * Copyright (c) 2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/test/modules/test_predtest/test_predtest.c + * + * ------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/htup_details.h" +#include "catalog/pg_type.h" +#include "executor/spi.h" +#include "funcapi.h" +#include "optimizer/clauses.h" +#include "optimizer/predtest.h" +#include "utils/builtins.h" + +PG_MODULE_MAGIC; + +/* + * test_predtest(query text) returns record + */ +PG_FUNCTION_INFO_V1(test_predtest); + +Datum +test_predtest(PG_FUNCTION_ARGS) +{ + text *txt = PG_GETARG_TEXT_PP(0); + char *query_string = text_to_cstring(txt); + SPIPlanPtr spiplan; + int spirc; + TupleDesc tupdesc; + bool s_i_holds, + w_i_holds, + s_r_holds, + w_r_holds; + CachedPlan *cplan; + PlannedStmt *stmt; + Plan *plan; + Expr *clause1; + Expr *clause2; + bool strong_implied_by, + weak_implied_by, + strong_refuted_by, + weak_refuted_by; + Datum values[8]; + bool nulls[8]; + int i; + + /* We use SPI to parse, plan, and execute the test query */ + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "SPI_connect failed"); + + /* + * First, plan and execute the query, and inspect the results. To the + * extent that the query fully exercises the two expressions, this + * provides an experimental indication of whether implication or + * refutation holds. + */ + spiplan = SPI_prepare(query_string, 0, NULL); + if (spiplan == NULL) + elog(ERROR, "SPI_prepare failed for \"%s\"", query_string); + + spirc = SPI_execute_plan(spiplan, NULL, NULL, true, 0); + if (spirc != SPI_OK_SELECT) + elog(ERROR, "failed to execute \"%s\"", query_string); + tupdesc = SPI_tuptable->tupdesc; + if (tupdesc->natts != 2 || + TupleDescAttr(tupdesc, 0)->atttypid != BOOLOID || + TupleDescAttr(tupdesc, 1)->atttypid != BOOLOID) + elog(ERROR, "query must yield two boolean columns"); + + s_i_holds = w_i_holds = s_r_holds = w_r_holds = true; + for (i = 0; i < SPI_processed; i++) + { + HeapTuple tup = SPI_tuptable->vals[i]; + Datum dat; + bool isnull; + char c1, + c2; + + /* Extract column values in a 3-way representation */ + dat = SPI_getbinval(tup, tupdesc, 1, &isnull); + if (isnull) + c1 = 'n'; + else if (DatumGetBool(dat)) + c1 = 't'; + else + c1 = 'f'; + + dat = SPI_getbinval(tup, tupdesc, 2, &isnull); + if (isnull) + c2 = 'n'; + else if (DatumGetBool(dat)) + c2 = 't'; + else + c2 = 'f'; + + /* Check for violations of various proof conditions */ + + /* strong implication: truth of c2 implies truth of c1 */ + if (c2 == 't' && c1 != 't') + s_i_holds = false; + /* weak implication: non-falsity of c2 implies non-falsity of c1 */ + if (c2 != 'f' && c1 == 'f') + w_i_holds = false; + /* strong refutation: truth of c2 implies falsity of c1 */ + if (c2 == 't' && c1 != 'f') + s_r_holds = false; + /* weak refutation: truth of c2 implies non-truth of c1 */ + if (c2 == 't' && c1 == 't') + w_r_holds = false; + } + + /* + * Now, dig the clause querytrees out of the plan, and see what predtest.c + * does with them. + */ + cplan = SPI_plan_get_cached_plan(spiplan); + + if (list_length(cplan->stmt_list) != 1) + elog(ERROR, "failed to decipher query plan"); + stmt = linitial_node(PlannedStmt, cplan->stmt_list); + if (stmt->commandType != CMD_SELECT) + elog(ERROR, "failed to decipher query plan"); + plan = stmt->planTree; + Assert(list_length(plan->targetlist) >= 2); + clause1 = castNode(TargetEntry, linitial(plan->targetlist))->expr; + clause2 = castNode(TargetEntry, lsecond(plan->targetlist))->expr; + + /* + * Because the clauses are in the SELECT list, preprocess_expression did + * not pass them through canonicalize_qual nor make_ands_implicit. + * + * We can't do canonicalize_qual here, since it's unclear whether the + * expressions ought to be treated as WHERE or CHECK clauses. Fortunately, + * useful test expressions wouldn't be affected by those transformations + * anyway. We should do make_ands_implicit, though. + * + * Another way in which this does not exactly duplicate the normal usage + * of the proof functions is that they are often given qual clauses + * containing RestrictInfo nodes. But since predtest.c just looks through + * those anyway, it seems OK to not worry about that point. + */ + clause1 = (Expr *) make_ands_implicit(clause1); + clause2 = (Expr *) make_ands_implicit(clause2); + + strong_implied_by = predicate_implied_by((List *) clause1, + (List *) clause2, + false); + + weak_implied_by = predicate_implied_by((List *) clause1, + (List *) clause2, + true); + + strong_refuted_by = predicate_refuted_by((List *) clause1, + (List *) clause2, + false); + + weak_refuted_by = predicate_refuted_by((List *) clause1, + (List *) clause2, + true); + + /* + * Issue warning if any proof is demonstrably incorrect. + */ + if (strong_implied_by && !s_i_holds) + elog(WARNING, "strong_implied_by result is incorrect"); + if (weak_implied_by && !w_i_holds) + elog(WARNING, "weak_implied_by result is incorrect"); + if (strong_refuted_by && !s_r_holds) + elog(WARNING, "strong_refuted_by result is incorrect"); + if (weak_refuted_by && !w_r_holds) + elog(WARNING, "weak_refuted_by result is incorrect"); + + /* + * Clean up and return a record of the results. + */ + if (SPI_finish() != SPI_OK_FINISH) + elog(ERROR, "SPI_finish failed"); + + tupdesc = CreateTemplateTupleDesc(8, false); + TupleDescInitEntry(tupdesc, (AttrNumber) 1, + "strong_implied_by", BOOLOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 2, + "weak_implied_by", BOOLOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 3, + "strong_refuted_by", BOOLOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 4, + "weak_refuted_by", BOOLOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 5, + "s_i_holds", BOOLOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 6, + "w_i_holds", BOOLOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 7, + "s_r_holds", BOOLOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 8, + "w_r_holds", BOOLOID, -1, 0); + tupdesc = BlessTupleDesc(tupdesc); + + MemSet(nulls, 0, sizeof(nulls)); + values[0] = BoolGetDatum(strong_implied_by); + values[1] = BoolGetDatum(weak_implied_by); + values[2] = BoolGetDatum(strong_refuted_by); + values[3] = BoolGetDatum(weak_refuted_by); + values[4] = BoolGetDatum(s_i_holds); + values[5] = BoolGetDatum(w_i_holds); + values[6] = BoolGetDatum(s_r_holds); + values[7] = BoolGetDatum(w_r_holds); + + PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls))); +} diff --git a/src/test/modules/test_predtest/test_predtest.control b/src/test/modules/test_predtest/test_predtest.control new file mode 100644 index 0000000000..a899a9dbe5 --- /dev/null +++ b/src/test/modules/test_predtest/test_predtest.control @@ -0,0 +1,4 @@ +comment = 'Test code for optimizer/util/predtest.c' +default_version = '1.0' +module_pathname = '$libdir/test_predtest' +relocatable = true diff --git a/src/test/modules/test_rbtree/.gitignore b/src/test/modules/test_rbtree/.gitignore new file mode 100644 index 0000000000..5dcb3ff972 --- /dev/null +++ b/src/test/modules/test_rbtree/.gitignore @@ -0,0 +1,4 @@ +# Generated subdirectories +/log/ +/results/ +/tmp_check/ diff --git a/src/test/modules/test_rbtree/Makefile b/src/test/modules/test_rbtree/Makefile new file mode 100644 index 0000000000..a4184b4d2e --- /dev/null +++ b/src/test/modules/test_rbtree/Makefile @@ -0,0 +1,21 @@ +# src/test/modules/test_rbtree/Makefile + +MODULE_big = test_rbtree +OBJS = test_rbtree.o $(WIN32RES) +PGFILEDESC = "test_rbtree - test code for red-black tree library" + +EXTENSION = test_rbtree +DATA = test_rbtree--1.0.sql + +REGRESS = test_rbtree + +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = src/test/modules/test_rbtree +top_builddir = ../../../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif diff --git a/src/test/modules/test_rbtree/README b/src/test/modules/test_rbtree/README new file mode 100644 index 0000000000..d69eb8d3e3 --- /dev/null +++ b/src/test/modules/test_rbtree/README @@ -0,0 +1,13 @@ +test_rbtree is a test module for checking the correctness of red-black +tree operations. + +These tests are performed on red-black trees that store integers. +Since the rbtree logic treats the comparison function as a black +box, it shouldn't be important exactly what the key type is. + +Checking the correctness of traversals is based on the fact that a red-black +tree is a binary search tree, so the elements should be visited in increasing +(for Left-Current-Right) or decreasing (for Right-Current-Left) order. + +Also, this module does some checks of the correctness of the find, delete +and leftmost operations. diff --git a/src/test/modules/test_rbtree/expected/test_rbtree.out b/src/test/modules/test_rbtree/expected/test_rbtree.out new file mode 100644 index 0000000000..3e3295696e --- /dev/null +++ b/src/test/modules/test_rbtree/expected/test_rbtree.out @@ -0,0 +1,12 @@ +CREATE EXTENSION test_rbtree; +-- +-- These tests don't produce any interesting output. We're checking that +-- the operations complete without crashing or hanging and that none of their +-- internal sanity tests fail. +-- +SELECT test_rb_tree(10000); + test_rb_tree +-------------- + +(1 row) + diff --git a/src/test/modules/test_rbtree/sql/test_rbtree.sql b/src/test/modules/test_rbtree/sql/test_rbtree.sql new file mode 100644 index 0000000000..d8dc88e057 --- /dev/null +++ b/src/test/modules/test_rbtree/sql/test_rbtree.sql @@ -0,0 +1,8 @@ +CREATE EXTENSION test_rbtree; + +-- +-- These tests don't produce any interesting output. We're checking that +-- the operations complete without crashing or hanging and that none of their +-- internal sanity tests fail. +-- +SELECT test_rb_tree(10000); diff --git a/src/test/modules/test_rbtree/test_rbtree--1.0.sql b/src/test/modules/test_rbtree/test_rbtree--1.0.sql new file mode 100644 index 0000000000..04f2a3ada6 --- /dev/null +++ b/src/test/modules/test_rbtree/test_rbtree--1.0.sql @@ -0,0 +1,8 @@ +/* src/test/modules/test_rbtree/test_rbtree--1.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION test_rbtree" to load this file. \quit + +CREATE FUNCTION test_rb_tree(size INTEGER) + RETURNS pg_catalog.void STRICT + AS 'MODULE_PATHNAME' LANGUAGE C; diff --git a/src/test/modules/test_rbtree/test_rbtree.c b/src/test/modules/test_rbtree/test_rbtree.c new file mode 100644 index 0000000000..e12284cd8c --- /dev/null +++ b/src/test/modules/test_rbtree/test_rbtree.c @@ -0,0 +1,413 @@ +/*-------------------------------------------------------------------------- + * + * test_rbtree.c + * Test correctness of red-black tree operations. + * + * Copyright (c) 2009-2018, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/test/modules/test_rbtree/test_rbtree.c + * + * ------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "fmgr.h" +#include "lib/rbtree.h" +#include "utils/memutils.h" + +PG_MODULE_MAGIC; + + +/* + * Our test trees store an integer key, and nothing else. + */ +typedef struct IntRBTreeNode +{ + RBTNode rbtnode; + int key; +} IntRBTreeNode; + + +/* + * Node comparator. We don't worry about overflow in the subtraction, + * since none of our test keys are negative. + */ +static int +irbt_cmp(const RBTNode *a, const RBTNode *b, void *arg) +{ + const IntRBTreeNode *ea = (const IntRBTreeNode *) a; + const IntRBTreeNode *eb = (const IntRBTreeNode *) b; + + return ea->key - eb->key; +} + +/* + * Node combiner. For testing purposes, just check that library doesn't + * try to combine unequal keys. + */ +static void +irbt_combine(RBTNode *existing, const RBTNode *newdata, void *arg) +{ + const IntRBTreeNode *eexist = (const IntRBTreeNode *) existing; + const IntRBTreeNode *enew = (const IntRBTreeNode *) newdata; + + if (eexist->key != enew->key) + elog(ERROR, "red-black tree combines %d into %d", + enew->key, eexist->key); +} + +/* Node allocator */ +static RBTNode * +irbt_alloc(void *arg) +{ + return (RBTNode *) palloc(sizeof(IntRBTreeNode)); +} + +/* Node freer */ +static void +irbt_free(RBTNode *node, void *arg) +{ + pfree(node); +} + +/* + * Create a red-black tree using our support functions + */ +static RBTree * +create_int_rbtree(void) +{ + return rbt_create(sizeof(IntRBTreeNode), + irbt_cmp, + irbt_combine, + irbt_alloc, + irbt_free, + NULL); +} + +/* + * Generate a random permutation of the integers 0..size-1 + */ +static int * +GetPermutation(int size) +{ + int *permutation; + int i; + + permutation = (int *) palloc(size * sizeof(int)); + + permutation[0] = 0; + + /* + * This is the "inside-out" variant of the Fisher-Yates shuffle algorithm. + * Notionally, we append each new value to the array and then swap it with + * a randomly-chosen array element (possibly including itself, else we + * fail to generate permutations with the last integer last). The swap + * step can be optimized by combining it with the insertion. + */ + for (i = 1; i < size; i++) + { + int j = random() % (i + 1); + + if (j < i) /* avoid fetching undefined data if j=i */ + permutation[i] = permutation[j]; + permutation[j] = i; + } + + return permutation; +} + +/* + * Populate an empty RBTree with "size" integers having the values + * 0, step, 2*step, 3*step, ..., inserting them in random order + */ +static void +rbt_populate(RBTree *tree, int size, int step) +{ + int *permutation = GetPermutation(size); + IntRBTreeNode node; + bool isNew; + int i; + + /* Insert values. We don't expect any collisions. */ + for (i = 0; i < size; i++) + { + node.key = step * permutation[i]; + rbt_insert(tree, (RBTNode *) &node, &isNew); + if (!isNew) + elog(ERROR, "unexpected !isNew result from rbt_insert"); + } + + /* + * Re-insert the first value to make sure collisions work right. It's + * probably not useful to test that case over again for all the values. + */ + if (size > 0) + { + node.key = step * permutation[0]; + rbt_insert(tree, (RBTNode *) &node, &isNew); + if (isNew) + elog(ERROR, "unexpected isNew result from rbt_insert"); + } + + pfree(permutation); +} + +/* + * Check the correctness of left-right traversal. + * Left-right traversal is correct if all elements are + * visited in increasing order. + */ +static void +testleftright(int size) +{ + RBTree *tree = create_int_rbtree(); + IntRBTreeNode *node; + RBTreeIterator iter; + int lastKey = -1; + int count = 0; + + /* check iteration over empty tree */ + rbt_begin_iterate(tree, LeftRightWalk, &iter); + if (rbt_iterate(&iter) != NULL) + elog(ERROR, "left-right walk over empty tree produced an element"); + + /* fill tree with consecutive natural numbers */ + rbt_populate(tree, size, 1); + + /* iterate over the tree */ + rbt_begin_iterate(tree, LeftRightWalk, &iter); + + while ((node = (IntRBTreeNode *) rbt_iterate(&iter)) != NULL) + { + /* check that order is increasing */ + if (node->key <= lastKey) + elog(ERROR, "left-right walk gives elements not in sorted order"); + lastKey = node->key; + count++; + } + + if (lastKey != size - 1) + elog(ERROR, "left-right walk did not reach end"); + if (count != size) + elog(ERROR, "left-right walk missed some elements"); +} + +/* + * Check the correctness of right-left traversal. + * Right-left traversal is correct if all elements are + * visited in decreasing order. + */ +static void +testrightleft(int size) +{ + RBTree *tree = create_int_rbtree(); + IntRBTreeNode *node; + RBTreeIterator iter; + int lastKey = size; + int count = 0; + + /* check iteration over empty tree */ + rbt_begin_iterate(tree, RightLeftWalk, &iter); + if (rbt_iterate(&iter) != NULL) + elog(ERROR, "right-left walk over empty tree produced an element"); + + /* fill tree with consecutive natural numbers */ + rbt_populate(tree, size, 1); + + /* iterate over the tree */ + rbt_begin_iterate(tree, RightLeftWalk, &iter); + + while ((node = (IntRBTreeNode *) rbt_iterate(&iter)) != NULL) + { + /* check that order is decreasing */ + if (node->key >= lastKey) + elog(ERROR, "right-left walk gives elements not in sorted order"); + lastKey = node->key; + count++; + } + + if (lastKey != 0) + elog(ERROR, "right-left walk did not reach end"); + if (count != size) + elog(ERROR, "right-left walk missed some elements"); +} + +/* + * Check the correctness of the rbt_find operation by searching for + * both elements we inserted and elements we didn't. + */ +static void +testfind(int size) +{ + RBTree *tree = create_int_rbtree(); + int i; + + /* Insert even integers from 0 to 2 * (size-1) */ + rbt_populate(tree, size, 2); + + /* Check that all inserted elements can be found */ + for (i = 0; i < size; i++) + { + IntRBTreeNode node; + IntRBTreeNode *resultNode; + + node.key = 2 * i; + resultNode = (IntRBTreeNode *) rbt_find(tree, (RBTNode *) &node); + if (resultNode == NULL) + elog(ERROR, "inserted element was not found"); + if (node.key != resultNode->key) + elog(ERROR, "find operation in rbtree gave wrong result"); + } + + /* + * Check that not-inserted elements can not be found, being sure to try + * values before the first and after the last element. + */ + for (i = -1; i <= 2 * size; i += 2) + { + IntRBTreeNode node; + IntRBTreeNode *resultNode; + + node.key = i; + resultNode = (IntRBTreeNode *) rbt_find(tree, (RBTNode *) &node); + if (resultNode != NULL) + elog(ERROR, "not-inserted element was found"); + } +} + +/* + * Check the correctness of the rbt_leftmost operation. + * This operation should always return the smallest element of the tree. + */ +static void +testleftmost(int size) +{ + RBTree *tree = create_int_rbtree(); + IntRBTreeNode *result; + + /* Check that empty tree has no leftmost element */ + if (rbt_leftmost(tree) != NULL) + elog(ERROR, "leftmost node of empty tree is not NULL"); + + /* fill tree with consecutive natural numbers */ + rbt_populate(tree, size, 1); + + /* Check that leftmost element is the smallest one */ + result = (IntRBTreeNode *) rbt_leftmost(tree); + if (result == NULL || result->key != 0) + elog(ERROR, "rbt_leftmost gave wrong result"); +} + +/* + * Check the correctness of the rbt_delete operation. + */ +static void +testdelete(int size, int delsize) +{ + RBTree *tree = create_int_rbtree(); + int *deleteIds; + bool *chosen; + int i; + + /* fill tree with consecutive natural numbers */ + rbt_populate(tree, size, 1); + + /* Choose unique ids to delete */ + deleteIds = (int *) palloc(delsize * sizeof(int)); + chosen = (bool *) palloc0(size * sizeof(bool)); + + for (i = 0; i < delsize; i++) + { + int k = random() % size; + + while (chosen[k]) + k = (k + 1) % size; + deleteIds[i] = k; + chosen[k] = true; + } + + /* Delete elements */ + for (i = 0; i < delsize; i++) + { + IntRBTreeNode find; + IntRBTreeNode *node; + + find.key = deleteIds[i]; + /* Locate the node to be deleted */ + node = (IntRBTreeNode *) rbt_find(tree, (RBTNode *) &find); + if (node == NULL || node->key != deleteIds[i]) + elog(ERROR, "expected element was not found during deleting"); + /* Delete it */ + rbt_delete(tree, (RBTNode *) node); + } + + /* Check that deleted elements are deleted */ + for (i = 0; i < size; i++) + { + IntRBTreeNode node; + IntRBTreeNode *result; + + node.key = i; + result = (IntRBTreeNode *) rbt_find(tree, (RBTNode *) &node); + if (chosen[i]) + { + /* Deleted element should be absent */ + if (result != NULL) + elog(ERROR, "deleted element still present in the rbtree"); + } + else + { + /* Else it should be present */ + if (result == NULL || result->key != i) + elog(ERROR, "delete operation removed wrong rbtree value"); + } + } + + /* Delete remaining elements, so as to exercise reducing tree to empty */ + for (i = 0; i < size; i++) + { + IntRBTreeNode find; + IntRBTreeNode *node; + + if (chosen[i]) + continue; + find.key = i; + /* Locate the node to be deleted */ + node = (IntRBTreeNode *) rbt_find(tree, (RBTNode *) &find); + if (node == NULL || node->key != i) + elog(ERROR, "expected element was not found during deleting"); + /* Delete it */ + rbt_delete(tree, (RBTNode *) node); + } + + /* Tree should now be empty */ + if (rbt_leftmost(tree) != NULL) + elog(ERROR, "deleting all elements failed"); + + pfree(deleteIds); + pfree(chosen); +} + +/* + * SQL-callable entry point to perform all tests + * + * Argument is the number of entries to put in the trees + */ +PG_FUNCTION_INFO_V1(test_rb_tree); + +Datum +test_rb_tree(PG_FUNCTION_ARGS) +{ + int size = PG_GETARG_INT32(0); + + if (size <= 0 || size > MaxAllocSize / sizeof(int)) + elog(ERROR, "invalid size for test_rb_tree: %d", size); + testleftright(size); + testrightleft(size); + testfind(size); + testleftmost(size); + testdelete(size, Max(size / 10, 1)); + PG_RETURN_VOID(); +} diff --git a/src/test/modules/test_rbtree/test_rbtree.control b/src/test/modules/test_rbtree/test_rbtree.control new file mode 100644 index 0000000000..17966a5d3f --- /dev/null +++ b/src/test/modules/test_rbtree/test_rbtree.control @@ -0,0 +1,4 @@ +comment = 'Test code for red-black tree library' +default_version = '1.0' +module_pathname = '$libdir/test_rbtree' +relocatable = true diff --git a/src/test/modules/test_rls_hooks/test_rls_hooks.c b/src/test/modules/test_rls_hooks/test_rls_hooks.c index 65bf3e33c9..d492697e88 100644 --- a/src/test/modules/test_rls_hooks/test_rls_hooks.c +++ b/src/test/modules/test_rls_hooks/test_rls_hooks.c @@ -3,7 +3,7 @@ * test_rls_hooks.c * Code for testing RLS hooks. * - * Copyright (c) 2015-2017, PostgreSQL Global Development Group + * Copyright (c) 2015-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/test/modules/test_rls_hooks/test_rls_hooks.c @@ -18,16 +18,16 @@ #include "test_rls_hooks.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "catalog/pg_type.h" +#include "nodes/makefuncs.h" +#include "nodes/makefuncs.h" +#include "parser/parse_clause.h" +#include "parser/parse_node.h" +#include "parser/parse_relation.h" +#include "rewrite/rowsecurity.h" +#include "utils/acl.h" +#include "utils/rel.h" +#include "utils/relcache.h" PG_MODULE_MAGIC; @@ -80,8 +80,8 @@ test_rls_hooks_permissive(CmdType cmdtype, Relation relation) qual_pstate = make_parsestate(NULL); - rte = addRangeTableEntryForRelation(qual_pstate, relation, NULL, false, - false); + rte = addRangeTableEntryForRelation(qual_pstate, relation, AccessShareLock, + NULL, false, false); addRTEtoQuery(qual_pstate, rte, false, true, true); role = ObjectIdGetDatum(ACL_ID_PUBLIC); @@ -143,8 +143,8 @@ test_rls_hooks_restrictive(CmdType cmdtype, Relation relation) qual_pstate = make_parsestate(NULL); - rte = addRangeTableEntryForRelation(qual_pstate, relation, NULL, false, - false); + rte = addRangeTableEntryForRelation(qual_pstate, relation, AccessShareLock, + NULL, false, false); addRTEtoQuery(qual_pstate, rte, false, true, true); role = ObjectIdGetDatum(ACL_ID_PUBLIC); diff --git a/src/test/modules/test_rls_hooks/test_rls_hooks.h b/src/test/modules/test_rls_hooks/test_rls_hooks.h index 81f7b18090..774c64ff43 100644 --- a/src/test/modules/test_rls_hooks/test_rls_hooks.h +++ b/src/test/modules/test_rls_hooks/test_rls_hooks.h @@ -3,7 +3,7 @@ * test_rls_hooks.h * Definitions for RLS hooks * - * Copyright (c) 2015-2017, PostgreSQL Global Development Group + * Copyright (c) 2015-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/test/modules/test_rls_hooks/test_rls_hooks.h diff --git a/src/test/modules/test_shm_mq/setup.c b/src/test/modules/test_shm_mq/setup.c index 3ae9018360..97e8617b3e 100644 --- a/src/test/modules/test_shm_mq/setup.c +++ b/src/test/modules/test_shm_mq/setup.c @@ -5,7 +5,7 @@ * number of background workers for shared memory message queue * testing. * - * Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Copyright (c) 2013-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/test/modules/test_shm_mq/setup.c @@ -219,7 +219,7 @@ setup_background_workers(int nworkers, dsm_segment *seg) worker.bgw_restart_time = BGW_NEVER_RESTART; sprintf(worker.bgw_library_name, "test_shm_mq"); sprintf(worker.bgw_function_name, "test_shm_mq_main"); - snprintf(worker.bgw_name, BGW_MAXLEN, "test_shm_mq"); + snprintf(worker.bgw_type, BGW_MAXLEN, "test_shm_mq"); worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(seg)); /* set bgw_notify_pid, so we can detect if the worker stops */ worker.bgw_notify_pid = MyProcPid; diff --git a/src/test/modules/test_shm_mq/test.c b/src/test/modules/test_shm_mq/test.c index 7a6ad23f75..ebab986601 100644 --- a/src/test/modules/test_shm_mq/test.c +++ b/src/test/modules/test_shm_mq/test.c @@ -3,7 +3,7 @@ * test.c * Test harness code for shared memory message queues. * - * Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Copyright (c) 2013-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/test/modules/test_shm_mq/test.c diff --git a/src/test/modules/test_shm_mq/test_shm_mq.h b/src/test/modules/test_shm_mq/test_shm_mq.h index e76ecab891..2134b1fdf1 100644 --- a/src/test/modules/test_shm_mq/test_shm_mq.h +++ b/src/test/modules/test_shm_mq/test_shm_mq.h @@ -3,7 +3,7 @@ * test_shm_mq.h * Definitions for shared memory message queues * - * Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Copyright (c) 2013-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/test/modules/test_shm_mq/test_shm_mq.h diff --git a/src/test/modules/test_shm_mq/worker.c b/src/test/modules/test_shm_mq/worker.c index e7e29f89c2..4e23419c52 100644 --- a/src/test/modules/test_shm_mq/worker.c +++ b/src/test/modules/test_shm_mq/worker.c @@ -9,7 +9,7 @@ * but it should be possible to use much of the control logic just * as presented here. * - * Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Copyright (c) 2013-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/test/modules/test_shm_mq/worker.c @@ -24,7 +24,6 @@ #include "storage/procarray.h" #include "storage/shm_mq.h" #include "storage/shm_toc.h" -#include "utils/resowner.h" #include "test_shm_mq.h" @@ -69,13 +68,16 @@ test_shm_mq_main(Datum main_arg) * Connect to the dynamic shared memory segment. * * The backend that registered this worker passed us the ID of a shared - * memory segment to which we must attach for further instructions. In - * order to attach to dynamic shared memory, we need a resource owner. - * Once we've mapped the segment in our address space, attach to the table - * of contents so we can locate the various data structures we'll need to + * memory segment to which we must attach for further instructions. Once + * we've mapped the segment in our address space, attach to the table of + * contents so we can locate the various data structures we'll need to * find within the segment. + * + * Note: at this point, we have not created any ResourceOwner in this + * process. This will result in our DSM mapping surviving until process + * exit, which is fine. If there were a ResourceOwner, it would acquire + * ownership of the mapping, but we have no need for that. */ - CurrentResourceOwner = ResourceOwnerCreate(NULL, "test_shm_mq worker"); seg = dsm_attach(DatumGetInt32(main_arg)); if (seg == NULL) ereport(ERROR, @@ -133,10 +135,8 @@ test_shm_mq_main(Datum main_arg) copy_messages(inqh, outqh); /* - * We're done. Explicitly detach the shared memory segment so that we - * don't get a resource leak warning at commit time. This will fire any - * on_dsm_detach callbacks we've registered, as well. Once that's done, - * we can go ahead and exit. + * We're done. For cleanliness, explicitly detach from the shared memory + * segment (that would happen anyway during process exit, though). */ dsm_detach(seg); proc_exit(1); diff --git a/src/test/modules/worker_spi/worker_spi.c b/src/test/modules/worker_spi/worker_spi.c index 12c8cd5774..0d705a3f2e 100644 --- a/src/test/modules/worker_spi/worker_spi.c +++ b/src/test/modules/worker_spi/worker_spi.c @@ -13,7 +13,7 @@ * "delta" type. Delta rows will be deleted by this worker and their values * aggregated into the total. * - * Copyright (c) 2013-2017, PostgreSQL Global Development Group + * Copyright (c) 2013-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/test/modules/worker_spi/worker_spi.c @@ -111,7 +111,7 @@ initialize_worker_spi(worktable *table) StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); - pgstat_report_activity(STATE_RUNNING, "initializing spi_worker schema"); + pgstat_report_activity(STATE_RUNNING, "initializing worker_spi schema"); /* XXX could we use CREATE SCHEMA IF NOT EXISTS? */ initStringInfo(&buf); @@ -179,7 +179,7 @@ worker_spi_main(Datum main_arg) BackgroundWorkerUnblockSignals(); /* Connect to our database */ - BackgroundWorkerInitializeConnection("postgres", NULL); + BackgroundWorkerInitializeConnection("postgres", NULL, 0); elog(LOG, "%s initialized with %s.%s", MyBgworkerEntry->bgw_name, table->schema, table->name); @@ -359,7 +359,8 @@ _PG_init(void) */ for (i = 1; i <= worker_spi_total_workers; i++) { - snprintf(worker.bgw_name, BGW_MAXLEN, "worker %d", i); + snprintf(worker.bgw_name, BGW_MAXLEN, "worker_spi worker %d", i); + snprintf(worker.bgw_type, BGW_MAXLEN, "worker_spi"); worker.bgw_main_arg = Int32GetDatum(i); RegisterBackgroundWorker(&worker); @@ -385,7 +386,8 @@ worker_spi_launch(PG_FUNCTION_ARGS) worker.bgw_restart_time = BGW_NEVER_RESTART; sprintf(worker.bgw_library_name, "worker_spi"); sprintf(worker.bgw_function_name, "worker_spi_main"); - snprintf(worker.bgw_name, BGW_MAXLEN, "worker %d", i); + snprintf(worker.bgw_name, BGW_MAXLEN, "worker_spi worker %d", i); + snprintf(worker.bgw_type, BGW_MAXLEN, "worker_spi"); worker.bgw_main_arg = Int32GetDatum(i); /* set bgw_notify_pid so that we can use WaitForBackgroundWorkerStartup */ worker.bgw_notify_pid = MyProcPid; diff --git a/src/test/perl/Makefile b/src/test/perl/Makefile index a974f358fd..8e7012d943 100644 --- a/src/test/perl/Makefile +++ b/src/test/perl/Makefile @@ -2,7 +2,7 @@ # # Makefile for src/test/perl # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/test/perl/Makefile diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm index d9aeb277d9..efdebc3877 100644 --- a/src/test/perl/PostgresNode.pm +++ b/src/test/perl/PostgresNode.pm @@ -82,11 +82,15 @@ package PostgresNode; use strict; use warnings; +use Carp; use Config; use Cwd; use Exporter 'import'; +use Fcntl qw(:mode); use File::Basename; +use File::Path qw(rmtree); use File::Spec; +use File::stat qw(stat); use File::Temp (); use IPC::Run; use RecursiveCopy; @@ -100,7 +104,7 @@ our @EXPORT = qw( get_new_node ); -our ($test_localhost, $test_pghost, $last_port_assigned, @all_nodes); +our ($test_localhost, $test_pghost, $last_port_assigned, @all_nodes, $died); # Windows path to virtual file system root @@ -149,11 +153,15 @@ sub new my $self = { _port => $pgport, _host => $pghost, - _basedir => TestLib::tempdir("data_" . $name), + _basedir => "$TestLib::tmp_check/t_${testname}_${name}_data", _name => $name, - _logfile => "$TestLib::log_path/${testname}_${name}.log" }; + _logfile => "$TestLib::log_path/${testname}_${name}.log" + }; bless $self, $class; + mkdir $self->{_basedir} + or + BAIL_OUT("could not create data directory \"$self->{_basedir}\": $!"); $self->dump_info; return $self; @@ -264,6 +272,26 @@ sub connstr =pod +=item $node->group_access() + +Does the data dir allow group access? + +=cut + +sub group_access +{ + my ($self) = @_; + + my $dir_stat = stat($self->data_dir); + + defined($dir_stat) + or die('unable to stat ' . $self->data_dir); + + return (S_IMODE($dir_stat->mode) == 0750); +} + +=pod + =item $node->data_dir() Returns the path to the data directory. postgresql.conf and pg_hba.conf are @@ -344,6 +372,7 @@ sub dump_info { my ($self) = @_; print $self->info; + return; } @@ -355,16 +384,17 @@ sub set_replication_conf my $pgdata = $self->data_dir; $self->host eq $test_pghost - or die "set_replication_conf only works with the default host"; + or croak "set_replication_conf only works with the default host"; open my $hba, '>>', "$pgdata/pg_hba.conf"; print $hba "\n# Allow replication (set up by PostgresNode.pm)\n"; if ($TestLib::windows_os) { print $hba -"host replication all $test_localhost/32 sspi include_realm=1 map=regress\n"; + "host replication all $test_localhost/32 sspi include_realm=1 map=regress\n"; } close $hba; + return; } =pod @@ -415,6 +445,7 @@ sub init print $conf "restart_after_crash = off\n"; print $conf "log_line_prefix = '%m [%p] %q%a '\n"; print $conf "log_statement = all\n"; + print $conf "log_replication_commands = on\n"; print $conf "wal_retrieve_retry_interval = '500ms'\n"; print $conf "port = $port\n"; @@ -430,7 +461,6 @@ sub init } print $conf "max_wal_senders = 5\n"; print $conf "max_replication_slots = 5\n"; - print $conf "wal_keep_segments = 20\n"; print $conf "max_wal_size = 128MB\n"; print $conf "shared_buffers = 1MB\n"; print $conf "wal_log_hints = on\n"; @@ -454,8 +484,12 @@ sub init } close $conf; + chmod($self->group_access ? 0640 : 0600, "$pgdata/postgresql.conf") + or die("unable to set permissions for $pgdata/postgresql.conf"); + $self->set_replication_conf if $params{allows_streaming}; $self->enable_archiving if $params{has_archiving}; + return; } =pod @@ -478,6 +512,11 @@ sub append_conf my $conffile = $self->data_dir . '/' . $filename; TestLib::append_to_file($conffile, $str . "\n"); + + chmod($self->group_access() ? 0640 : 0600, $conffile) + or die("unable to set permissions for $conffile"); + + return; } =pod @@ -504,6 +543,7 @@ sub backup TestLib::system_or_bail('pg_basebackup', '-D', $backup_path, '-p', $port, '--no-sync'); print "# Backup finished\n"; + return; } =item $node->backup_fs_hot(backup_name) @@ -522,6 +562,7 @@ sub backup_fs_hot { my ($self, $backup_name) = @_; $self->_backup_fs($backup_name, 1); + return; } =item $node->backup_fs_cold(backup_name) @@ -538,6 +579,7 @@ sub backup_fs_cold { my ($self, $backup_name) = @_; $self->_backup_fs($backup_name, 0); + return; } @@ -578,6 +620,7 @@ sub _backup_fs } print "# Backup finished\n"; + return; } @@ -618,8 +661,8 @@ sub init_from_backup $params{has_restoring} = 0 unless defined $params{has_restoring}; print -"# Initializing node \"$node_name\" from backup \"$backup_name\" of node \"$root_name\"\n"; - die "Backup \"$backup_name\" does not exist at $backup_path" + "# Initializing node \"$node_name\" from backup \"$backup_name\" of node \"$root_name\"\n"; + croak "Backup \"$backup_name\" does not exist at $backup_path" unless -d $backup_path; mkdir $self->backup_dir; @@ -638,6 +681,7 @@ port = $port )); $self->enable_streaming($root_node) if $params{has_streaming}; $self->enable_restoring($root_node) if $params{has_restoring}; + return; } =pod @@ -669,6 +713,7 @@ sub start } $self->_update_pid(1); + return; } =pod @@ -694,6 +739,7 @@ sub stop print "### Stopping node \"$name\" using mode $mode\n"; TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-m', $mode, 'stop'); $self->_update_pid(0); + return; } =pod @@ -712,6 +758,7 @@ sub reload my $name = $self->name; print "### Reloading node \"$name\"\n"; TestLib::system_or_bail('pg_ctl', '-D', $pgdata, 'reload'); + return; } =pod @@ -733,6 +780,7 @@ sub restart TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile, 'restart'); $self->_update_pid(1); + return; } =pod @@ -753,6 +801,28 @@ sub promote print "### Promoting node \"$name\"\n"; TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile, 'promote'); + return; +} + +=pod + +=item $node->logrotate() + +Wrapper for pg_ctl logrotate + +=cut + +sub logrotate +{ + my ($self) = @_; + my $port = $self->port; + my $pgdata = $self->data_dir; + my $logfile = $self->logfile; + my $name = $self->name; + print "### Rotating log in node \"$name\"\n"; + TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile, + 'logrotate'); + return; } # Internal routine to enable streaming replication on a standby node. @@ -768,6 +838,7 @@ sub enable_streaming primary_conninfo='$root_connstr application_name=$name' standby_mode=on )); + return; } # Internal routine to enable archive recovery command on a standby node @@ -796,6 +867,7 @@ sub enable_restoring restore_command = '$copy_command' standby_mode = on )); + return; } # Internal routine to enable archiving @@ -825,6 +897,7 @@ sub enable_archiving archive_mode = on archive_command = '$copy_command' )); + return; } # Internal method @@ -851,6 +924,7 @@ sub _update_pid # Complain if we expected to find a pidfile. BAIL_OUT("postmaster.pid unexpectedly not present") if $is_running; + return; } =pod @@ -928,9 +1002,23 @@ sub get_new_node return $node; } +# Retain the errno on die() if set, else assume a generic errno of 1. +# This will instruct the END handler on how to handle artifacts left +# behind from tests. +$SIG{__DIE__} = sub { + if ($!) + { + $died = $!; + } + else + { + $died = 1; + } +}; + # Automatically shut down any still-running nodes when the test script exits. # Note that this just stops the postmasters (in the same order the nodes were -# created in). Temporary PGDATA directories are deleted, in an unspecified +# created in). Any temporary directories are deleted, in an unspecified # order, later when the File::Temp objects are destroyed. END { @@ -941,6 +1029,13 @@ END foreach my $node (@all_nodes) { $node->teardown_node; + + # skip clean if we are requested to retain the basedir + next if defined $ENV{'PG_TEST_NOCLEAN'}; + + # clean basedir on clean test invocation + $node->clean_node + if TestLib::all_tests_passing() && !defined $died && !$exit_code; } $? = $exit_code; @@ -959,6 +1054,23 @@ sub teardown_node my $self = shift; $self->stop('immediate'); + return; +} + +=pod + +=item $node->clean_node() + +Remove the base directory of the node if the node has been stopped. + +=cut + +sub clean_node +{ + my $self = shift; + + rmtree $self->{_basedir} unless defined $self->{_pid}; + return; } =pod @@ -1126,12 +1238,12 @@ sub psql my $ret; - # Run psql and capture any possible exceptions. If the exception is - # because of a timeout and the caller requested to handle that, just return - # and set the flag. Otherwise, and for any other exception, rethrow. - # - # For background, see - # http://search.cpan.org/~ether/Try-Tiny-0.24/lib/Try/Tiny.pm + # Run psql and capture any possible exceptions. If the exception is + # because of a timeout and the caller requested to handle that, just return + # and set the flag. Otherwise, and for any other exception, rethrow. + # + # For background, see + # https://metacpan.org/pod/release/ETHER/Try-Tiny-0.24/lib/Try/Tiny.pm do { local $@; @@ -1201,7 +1313,7 @@ sub psql die "connection error: '$$stderr'\nwhile running '@psql_params'" if $ret == 2; die -"error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'" + "error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'" if $ret == 3; die "psql returns $ret: '$$stderr'\nwhile running '@psql_params'"; } @@ -1257,9 +1369,18 @@ sub poll_query_until $attempts++; } - # The query result didn't change in 180 seconds. Give up. Print the stderr - # from the last attempt, hopefully that's useful for debugging. - diag $stderr; + # The query result didn't change in 180 seconds. Give up. Print the + # output from the last attempt, hopefully that's useful for debugging. + chomp($stderr); + $stderr =~ s/\r//g if $TestLib::windows_os; + diag qq(poll_query_until timed out executing this query: +$query +expecting this output: +$expected +last actual query output: +$stdout +with stderr: +$stderr); return 0; } @@ -1275,28 +1396,34 @@ PostgresNode. sub command_ok { + local $Test::Builder::Level = $Test::Builder::Level + 1; + my $self = shift; local $ENV{PGPORT} = $self->port; TestLib::command_ok(@_); + return; } =pod -=item $node->command_fails(...) - TestLib::command_fails with our PGPORT +=item $node->command_fails(...) -See command_ok(...) +TestLib::command_fails with our PGPORT. See command_ok(...) =cut sub command_fails { + local $Test::Builder::Level = $Test::Builder::Level + 1; + my $self = shift; local $ENV{PGPORT} = $self->port; TestLib::command_fails(@_); + return; } =pod @@ -1309,11 +1436,34 @@ TestLib::command_like with our PGPORT. See command_ok(...) sub command_like { + local $Test::Builder::Level = $Test::Builder::Level + 1; + my $self = shift; local $ENV{PGPORT} = $self->port; TestLib::command_like(@_); + return; +} + +=pod + +=item $node->command_checks_all(...) + +TestLib::command_checks_all with our PGPORT. See command_ok(...) + +=cut + +sub command_checks_all +{ + local $Test::Builder::Level = $Test::Builder::Level + 1; + + my $self = shift; + + local $ENV{PGPORT} = $self->port; + + TestLib::command_checks_all(@_); + return; } =pod @@ -1330,6 +1480,8 @@ The log file is truncated prior to running the command, however. sub issues_sql_like { + local $Test::Builder::Level = $Test::Builder::Level + 1; + my ($self, $cmd, $expected_sql, $test_name) = @_; local $ENV{PGPORT} = $self->port; @@ -1339,6 +1491,7 @@ sub issues_sql_like ok($result, "@$cmd exit code 0"); my $log = TestLib::slurp_file($self->logfile); like($log, $expected_sql, "$test_name: SQL found in server log"); + return; } =pod @@ -1357,6 +1510,7 @@ sub run_log local $ENV{PGPORT} = $self->port; TestLib::run_log(@_); + return; } =pod @@ -1386,7 +1540,7 @@ sub lsn 'replay' => 'pg_last_wal_replay_lsn()'); $mode = '' if !defined($mode); - die "unknown mode for 'lsn': '$mode', valid modes are " + croak "unknown mode for 'lsn': '$mode', valid modes are " . join(', ', keys %modes) if !defined($modes{$mode}); @@ -1406,11 +1560,13 @@ sub lsn =item $node->wait_for_catchup(standby_name, mode, target_lsn) -Wait for the node with application_name standby_name (usually from node->name) +Wait for the node with application_name standby_name (usually from node->name, +also works for logical subscriptions) until its replication location in pg_stat_replication equals or passes the upstream's WAL insert point at the time this function is called. By default the replay_lsn is waited for, but 'mode' may be specified to wait for any of -sent|write|flush|replay. +sent|write|flush|replay. The connection catching up must be in a streaming +state. If there is no active replication connection from this peer, waits until poll_query_until timeout. @@ -1418,6 +1574,7 @@ poll_query_until timeout. Requires that the 'postgres' db exists and is accessible. target_lsn may be any arbitrary lsn, but is typically $master_node->lsn('insert'). +If omitted, pg_current_wal_lsn() is used. This is not a test. It die()s on failure. @@ -1429,7 +1586,7 @@ sub wait_for_catchup $mode = defined($mode) ? $mode : 'replay'; my %valid_modes = ('sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1); - die "unknown mode $mode for 'wait_for_catchup', valid modes are " + croak "unknown mode $mode for 'wait_for_catchup', valid modes are " . join(', ', keys(%valid_modes)) unless exists($valid_modes{$mode}); @@ -1438,19 +1595,27 @@ sub wait_for_catchup { $standby_name = $standby_name->name; } - die 'target_lsn must be specified' unless defined($target_lsn); + my $lsn_expr; + if (defined($target_lsn)) + { + $lsn_expr = "'$target_lsn'"; + } + else + { + $lsn_expr = 'pg_current_wal_lsn()'; + } print "Waiting for replication conn " . $standby_name . "'s " . $mode . "_lsn to pass " - . $target_lsn . " on " + . $lsn_expr . " on " . $self->name . "\n"; my $query = -qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_stat_replication WHERE application_name = '$standby_name';]; + qq[SELECT $lsn_expr <= ${mode}_lsn AND state = 'streaming' FROM pg_catalog.pg_stat_replication WHERE application_name = '$standby_name';]; $self->poll_query_until('postgres', $query) - or die "timed out waiting for catchup, current location is " - . ($self->safe_psql('postgres', $query) || '(unknown)'); + or croak "timed out waiting for catchup"; print "done\n"; + return; } =pod @@ -1479,9 +1644,9 @@ sub wait_for_slot_catchup $mode = defined($mode) ? $mode : 'restart'; if (!($mode eq 'restart' || $mode eq 'confirmed_flush')) { - die "valid modes are restart, confirmed_flush"; + croak "valid modes are restart, confirmed_flush"; } - die 'target lsn must be specified' unless defined($target_lsn); + croak 'target lsn must be specified' unless defined($target_lsn); print "Waiting for replication slot " . $slot_name . "'s " . $mode @@ -1489,11 +1654,11 @@ sub wait_for_slot_catchup . $target_lsn . " on " . $self->name . "\n"; my $query = -qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name';]; + qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name';]; $self->poll_query_until('postgres', $query) - or die "timed out waiting for catchup, current location is " - . ($self->safe_psql('postgres', $query) || '(unknown)'); + or croak "timed out waiting for catchup"; print "done\n"; + return; } =pod @@ -1521,7 +1686,7 @@ null columns. sub query_hash { my ($self, $dbname, $query, @columns) = @_; - die 'calls in array context for multi-row results not supported yet' + croak 'calls in array context for multi-row results not supported yet' if (wantarray); # Replace __COLUMNS__ if found @@ -1565,7 +1730,7 @@ sub slot 'restart_lsn'); return $self->query_hash( 'postgres', -"SELECT __COLUMNS__ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'", + "SELECT __COLUMNS__ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'", @columns); } @@ -1590,14 +1755,14 @@ to check for timeout. retval is undef on timeout. sub pg_recvlogical_upto { - my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options) = - @_; + my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options) + = @_; my ($stdout, $stderr); my $timeout_exception = 'pg_recvlogical timed out'; - die 'slot name must be specified' unless defined($slot_name); - die 'endpos must be specified' unless defined($endpos); + croak 'slot name must be specified' unless defined($slot_name); + croak 'endpos must be specified' unless defined($endpos); my @cmd = ( 'pg_recvlogical', '-S', $slot_name, '--dbname', @@ -1607,7 +1772,7 @@ sub pg_recvlogical_upto while (my ($k, $v) = each %plugin_options) { - die "= is not permitted to appear in replication option name" + croak "= is not permitted to appear in replication option name" if ($k =~ qr/=/); push @cmd, "-o", "$k=$v"; } @@ -1640,7 +1805,7 @@ sub pg_recvlogical_upto unless $timeout->is_expired; die -"$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'" + "$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'" unless wantarray; } }; @@ -1655,7 +1820,7 @@ sub pg_recvlogical_upto else { die -"pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'" + "pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'" if $ret; return $stdout; } diff --git a/src/test/perl/RecursiveCopy.pm b/src/test/perl/RecursiveCopy.pm index 28ecaf6db2..baf5d0ac63 100644 --- a/src/test/perl/RecursiveCopy.pm +++ b/src/test/perl/RecursiveCopy.pm @@ -19,6 +19,7 @@ package RecursiveCopy; use strict; use warnings; +use Carp; use File::Basename; use File::Copy; @@ -29,12 +30,17 @@ use File::Copy; =head2 copypath($from, $to, %params) Recursively copy all files and directories from $from to $to. +Does not preserve file metadata (e.g., permissions). Only regular files and subdirectories are copied. Trying to copy other types of directory entries raises an exception. Raises an exception if a file would be overwritten, the source directory can't -be read, or any I/O operation fails. Always returns true. +be read, or any I/O operation fails. However, we silently ignore ENOENT on +open, because when copying from a live database it's possible for a file/dir +to be deleted after we see its directory entry but before we can open it. + +Always returns true. If the B parameter is given, it must be a subroutine reference. This subroutine will be called for each entry in the source directory with its @@ -63,9 +69,9 @@ sub copypath if (defined $params{filterfn}) { - die "if specified, filterfn must be a subroutine reference" + croak "if specified, filterfn must be a subroutine reference" unless defined(ref $params{filterfn}) - and (ref $params{filterfn} eq 'CODE'); + and (ref $params{filterfn} eq 'CODE'); $filterfn = $params{filterfn}; } @@ -74,6 +80,9 @@ sub copypath $filterfn = sub { return 1; }; } + # Complain if original path is bogus, because _copypath_recurse won't. + croak "\"$base_src_dir\" does not exist" if !-e $base_src_dir; + # Start recursive copy from current directory return _copypath_recurse($base_src_dir, $base_dest_dir, "", $filterfn); } @@ -89,40 +98,58 @@ sub _copypath_recurse return 1 unless &$filterfn($curr_path); # Check for symlink -- needed only on source dir - die "Cannot operate on symlinks" if -l $srcpath; - - # Can't handle symlinks or other weird things - die "Source path \"$srcpath\" is not a regular file or directory" - unless -f $srcpath - or -d $srcpath; + # (note: this will fall through quietly if file is already gone) + croak "Cannot operate on symlink \"$srcpath\"" if -l $srcpath; # Abort if destination path already exists. Should we allow directories # to exist already? - die "Destination path \"$destpath\" already exists" if -e $destpath; + croak "Destination path \"$destpath\" already exists" if -e $destpath; # If this source path is a file, simply copy it to destination with the # same name and we're done. if (-f $srcpath) { - copy($srcpath, $destpath) + my $fh; + unless (open($fh, '<', $srcpath)) + { + return 1 if ($!{ENOENT}); + die "open($srcpath) failed: $!"; + } + copy($fh, $destpath) or die "copy $srcpath -> $destpath failed: $!"; + close $fh; return 1; } - # Otherwise this is directory: create it on dest and recurse onto it. - mkdir($destpath) or die "mkdir($destpath) failed: $!"; - - opendir(my $directory, $srcpath) or die "could not opendir($srcpath): $!"; - while (my $entry = readdir($directory)) + # If it's a directory, create it on dest and recurse into it. + if (-d $srcpath) { - next if ($entry eq '.' or $entry eq '..'); - _copypath_recurse($base_src_dir, $base_dest_dir, - $curr_path eq '' ? $entry : "$curr_path/$entry", $filterfn) - or die "copypath $srcpath/$entry -> $destpath/$entry failed"; + my $directory; + unless (opendir($directory, $srcpath)) + { + return 1 if ($!{ENOENT}); + die "opendir($srcpath) failed: $!"; + } + + mkdir($destpath) or die "mkdir($destpath) failed: $!"; + + while (my $entry = readdir($directory)) + { + next if ($entry eq '.' or $entry eq '..'); + _copypath_recurse($base_src_dir, $base_dest_dir, + $curr_path eq '' ? $entry : "$curr_path/$entry", $filterfn) + or die "copypath $srcpath/$entry -> $destpath/$entry failed"; + } + + closedir($directory); + return 1; } - closedir($directory); - return 1; + # If it disappeared from sight, that's OK. + return 1 if !-e $srcpath; + + # Else it's some weird file type; complain. + croak "Source path \"$srcpath\" is not a regular file or directory"; } 1; diff --git a/src/test/perl/SimpleTee.pm b/src/test/perl/SimpleTee.pm index ea2f2ee828..9de7b1ac32 100644 --- a/src/test/perl/SimpleTee.pm +++ b/src/test/perl/SimpleTee.pm @@ -13,7 +13,7 @@ use strict; sub TIEHANDLE { my $self = shift; - bless \@_, $self; + return bless \@_, $self; } sub PRINT diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm index 6dba21c073..b9cb51b9d3 100644 --- a/src/test/perl/TestLib.pm +++ b/src/test/perl/TestLib.pm @@ -11,21 +11,28 @@ use strict; use warnings; use Config; +use Cwd; use Exporter 'import'; +use Fcntl qw(:mode); use File::Basename; +use File::Find; use File::Spec; +use File::stat qw(stat); use File::Temp (); use IPC::Run; use SimpleTee; -# specify a recent enough version of Test::More to support the note() function -use Test::More 0.82; +# specify a recent enough version of Test::More to support the done_testing() function +use Test::More 0.87; our @EXPORT = qw( generate_ascii_string slurp_dir slurp_file append_to_file + check_mode_recursive + chmod_recursive + check_pg_config system_or_bail system_log run_log @@ -39,6 +46,7 @@ our @EXPORT = qw( command_like command_like_safe command_fails_like + command_checks_all $windows_os ); @@ -65,7 +73,7 @@ BEGIN delete $ENV{PGPORT}; delete $ENV{PGHOST}; - $ENV{PGAPPNAME} = $0; + $ENV{PGAPPNAME} = basename($0); # Must be set early $windows_os = $Config{osname} eq 'MSWin32' || $Config{osname} eq 'msys'; @@ -74,6 +82,10 @@ BEGIN INIT { + # Return EPIPE instead of killing the process with SIGPIPE. An affected + # test may still fail, but it's more likely to report useful facts. + $SIG{PIPE} = 'IGNORE'; + # Determine output directories, and create them. The base path is the # TESTDIR environment variable, which is normally set by the invoking # Makefile. @@ -152,6 +164,24 @@ sub tempdir_short return File::Temp::tempdir(CLEANUP => 1); } +# Return the real directory for a virtual path directory under msys. +# The directory must exist. If it's not an existing directory or we're +# not under msys, return the input argument unchanged. +sub real_dir +{ + my $dir = "$_[0]"; + return $dir unless -d $dir; + return $dir unless $Config{osname} eq 'msys'; + my $here = cwd; + chdir $dir; + + # this odd way of calling 'pwd -W' is the only way that seems to work. + $dir = qx{sh -c "pwd -W"}; + chomp $dir; + chdir $here; + return $dir; +} + sub system_log { print("# Running: " . join(" ", @_) . "\n"); @@ -164,6 +194,7 @@ sub system_or_bail { BAIL_OUT("system $_[0] failed"); } + return; } sub run_log @@ -214,6 +245,133 @@ sub append_to_file or die "could not write \"$filename\": $!"; print $fh $str; close $fh; + return; +} + +# Check that all file/dir modes in a directory match the expected values, +# ignoring the mode of any specified files. +sub check_mode_recursive +{ + my ($dir, $expected_dir_mode, $expected_file_mode, $ignore_list) = @_; + + # Result defaults to true + my $result = 1; + + find( + { + follow_fast => 1, + wanted => sub { + # Is file in the ignore list? + foreach my $ignore ($ignore_list ? @{$ignore_list} : []) + { + if ("$dir/$ignore" eq $File::Find::name) + { + return; + } + } + + # Allow ENOENT. A running server can delete files, such as + # those in pg_stat. Other stat() failures are fatal. + my $file_stat = stat($File::Find::name); + unless (defined($file_stat)) + { + my $is_ENOENT = $!{ENOENT}; + my $msg = "unable to stat $File::Find::name: $!"; + if ($is_ENOENT) + { + warn $msg; + return; + } + else + { + die $msg; + } + } + + my $file_mode = S_IMODE($file_stat->mode); + + # Is this a file? + if (S_ISREG($file_stat->mode)) + { + if ($file_mode != $expected_file_mode) + { + print( + *STDERR, + sprintf("$File::Find::name mode must be %04o\n", + $expected_file_mode)); + + $result = 0; + return; + } + } + + # Else a directory? + elsif (S_ISDIR($file_stat->mode)) + { + if ($file_mode != $expected_dir_mode) + { + print( + *STDERR, + sprintf("$File::Find::name mode must be %04o\n", + $expected_dir_mode)); + + $result = 0; + return; + } + } + + # Else something we can't handle + else + { + die "unknown file type for $File::Find::name"; + } + } + }, + $dir); + + return $result; +} + +# Change mode recursively on a directory +sub chmod_recursive +{ + my ($dir, $dir_mode, $file_mode) = @_; + + find( + { + follow_fast => 1, + wanted => sub { + my $file_stat = stat($File::Find::name); + + if (defined($file_stat)) + { + chmod( + S_ISDIR($file_stat->mode) ? $dir_mode : $file_mode, + $File::Find::name + ) or die "unable to chmod $File::Find::name"; + } + } + }, + $dir); + return; +} + +# Check presence of a given regexp within pg_config.h for the installation +# where tests are running, returning a match status result depending on +# that. +sub check_pg_config +{ + my ($regexp) = @_; + my ($stdout, $stderr); + my $result = IPC::Run::run [ 'pg_config', '--includedir' ], '>', + \$stdout, '2>', \$stderr + or die "could not execute pg_config"; + chomp($stdout); + + open my $pg_config_h, '<', "$stdout/pg_config.h" or die "$!"; + my $match = (grep { /^$regexp/ } <$pg_config_h>); + close $pg_config_h; + return $match; } # @@ -221,20 +379,25 @@ sub append_to_file # sub command_ok { + local $Test::Builder::Level = $Test::Builder::Level + 1; my ($cmd, $test_name) = @_; my $result = run_log($cmd); ok($result, $test_name); + return; } sub command_fails { + local $Test::Builder::Level = $Test::Builder::Level + 1; my ($cmd, $test_name) = @_; my $result = run_log($cmd); ok(!$result, $test_name); + return; } sub command_exit_is { + local $Test::Builder::Level = $Test::Builder::Level + 1; my ($cmd, $expected, $test_name) = @_; print("# Running: " . join(" ", @{$cmd}) . "\n"); my $h = IPC::Run::start $cmd; @@ -252,10 +415,12 @@ sub command_exit_is ? ($h->full_results)[0] : $h->result(0); is($result, $expected, $test_name); + return; } sub program_help_ok { + local $Test::Builder::Level = $Test::Builder::Level + 1; my ($cmd) = @_; my ($stdout, $stderr); print("# Running: $cmd --help\n"); @@ -264,10 +429,12 @@ sub program_help_ok ok($result, "$cmd --help exit code 0"); isnt($stdout, '', "$cmd --help goes to stdout"); is($stderr, '', "$cmd --help nothing to stderr"); + return; } sub program_version_ok { + local $Test::Builder::Level = $Test::Builder::Level + 1; my ($cmd) = @_; my ($stdout, $stderr); print("# Running: $cmd --version\n"); @@ -276,10 +443,12 @@ sub program_version_ok ok($result, "$cmd --version exit code 0"); isnt($stdout, '', "$cmd --version goes to stdout"); is($stderr, '', "$cmd --version nothing to stderr"); + return; } sub program_options_handling_ok { + local $Test::Builder::Level = $Test::Builder::Level + 1; my ($cmd) = @_; my ($stdout, $stderr); print("# Running: $cmd --not-a-valid-option\n"); @@ -288,10 +457,12 @@ sub program_options_handling_ok '2>', \$stderr; ok(!$result, "$cmd with invalid option nonzero exit code"); isnt($stderr, '', "$cmd with invalid option prints error message"); + return; } sub command_like { + local $Test::Builder::Level = $Test::Builder::Level + 1; my ($cmd, $expected_stdout, $test_name) = @_; my ($stdout, $stderr); print("# Running: " . join(" ", @{$cmd}) . "\n"); @@ -299,10 +470,12 @@ sub command_like ok($result, "$test_name: exit code 0"); is($stderr, '', "$test_name: no stderr"); like($stdout, $expected_stdout, "$test_name: matches"); + return; } sub command_like_safe { + local $Test::Builder::Level = $Test::Builder::Level + 1; # Doesn't rely on detecting end of file on the file descriptors, # which can fail, causing the process to hang, notably on Msys @@ -318,16 +491,62 @@ sub command_like_safe ok($result, "$test_name: exit code 0"); is($stderr, '', "$test_name: no stderr"); like($stdout, $expected_stdout, "$test_name: matches"); + return; } sub command_fails_like { + local $Test::Builder::Level = $Test::Builder::Level + 1; my ($cmd, $expected_stderr, $test_name) = @_; my ($stdout, $stderr); print("# Running: " . join(" ", @{$cmd}) . "\n"); my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr; ok(!$result, "$test_name: exit code not 0"); like($stderr, $expected_stderr, "$test_name: matches"); + return; +} + +# Run a command and check its status and outputs. +# The 5 arguments are: +# - cmd: ref to list for command, options and arguments to run +# - ret: expected exit status +# - out: ref to list of re to be checked against stdout (all must match) +# - err: ref to list of re to be checked against stderr (all must match) +# - test_name: name of test +sub command_checks_all +{ + local $Test::Builder::Level = $Test::Builder::Level + 1; + + my ($cmd, $expected_ret, $out, $err, $test_name) = @_; + + # run command + my ($stdout, $stderr); + print("# Running: " . join(" ", @{$cmd}) . "\n"); + IPC::Run::run($cmd, '>', \$stdout, '2>', \$stderr); + + # See http://perldoc.perl.org/perlvar.html#%24CHILD_ERROR + my $ret = $?; + die "command exited with signal " . ($ret & 127) + if $ret & 127; + $ret = $ret >> 8; + + # check status + ok($ret == $expected_ret, + "$test_name status (got $ret vs expected $expected_ret)"); + + # check stdout + for my $re (@$out) + { + like($stdout, $re, "$test_name stdout /$re/"); + } + + # check stderr + for my $re (@$err) + { + like($stderr, $re, "$test_name stderr /$re/"); + } + + return; } 1; diff --git a/src/test/recovery/Makefile b/src/test/recovery/Makefile index 142a1b8de2..daf79a0b1f 100644 --- a/src/test/recovery/Makefile +++ b/src/test/recovery/Makefile @@ -2,7 +2,7 @@ # # Makefile for src/test/recovery # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/test/recovery/Makefile @@ -18,7 +18,8 @@ include $(top_builddir)/src/Makefile.global check: $(prove_check) +installcheck: + $(prove_installcheck) + clean distclean maintainer-clean: rm -rf tmp_check - -EXTRA_INSTALL = contrib/test_decoding diff --git a/src/test/recovery/README b/src/test/recovery/README index 3cafb9ddfe..632e720ebe 100644 --- a/src/test/recovery/README +++ b/src/test/recovery/README @@ -8,10 +8,18 @@ This directory contains a test suite for recovery and replication. Running the tests ================= - make check +NOTE: You must have given the --enable-tap-tests argument to configure. +Also, to use "make installcheck", you must have built and installed +contrib/test_decoding in addition to the core code. -NOTE: This creates a temporary installation, and some tests may -create one or multiple nodes, be they master or standby(s) for the -purpose of the tests. +Run + make check +or + make installcheck +You can use "make installcheck" if you previously did "make install". +In that case, the code in the installation tree is tested. With +"make check", a temporary installation tree is built from the current +sources and then tested. -NOTE: This requires the --enable-tap-tests argument to configure. +Either way, this test initializes, starts, and stops several test Postgres +clusters. diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl index fb27925069..8dff5fc720 100644 --- a/src/test/recovery/t/001_stream_rep.pl +++ b/src/test/recovery/t/001_stream_rep.pl @@ -3,7 +3,7 @@ use warnings; use PostgresNode; use TestLib; -use Test::More tests => 28; +use Test::More tests => 26; # Initialize master node my $node_master = get_new_node('master'); @@ -95,8 +95,10 @@ sub test_target_session_attrs extra_params => [ '-d', $connstr ]); is( $status == $ret && $stdout eq $target_node->port, 1, -"connect to node $target_name if mode \"$mode\" and $node1_name,$node2_name listed" + "connect to node $target_name if mode \"$mode\" and $node1_name,$node2_name listed" ); + + return; } # Connect to master in "read-write" mode with master,standby1 list. @@ -183,7 +185,7 @@ sub get_slot_xmins sub replay_check { my $newval = $node_master->safe_psql('postgres', -'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val' + 'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val' ); $node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('insert')); @@ -195,6 +197,7 @@ sub replay_check $node_standby_2->safe_psql('postgres', qq[SELECT 1 FROM replayed WHERE val = $newval]) or die "standby_2 didn't replay standby_1 value $newval"; + return; } replay_check(); @@ -279,27 +282,3 @@ sub replay_check is($xmin, '', 'xmin of cascaded slot null with hs feedback reset'); is($catalog_xmin, '', 'catalog xmin of cascaded slot still null with hs_feedback reset'); - -note "re-enabling hot_standby_feedback and disabling while stopped"; -$node_standby_2->safe_psql('postgres', - 'ALTER SYSTEM SET hot_standby_feedback = on;'); -$node_standby_2->reload; - -$node_master->safe_psql('postgres', qq[INSERT INTO tab_int VALUES (11000);]); -replay_check(); - -$node_standby_2->safe_psql('postgres', - 'ALTER SYSTEM SET hot_standby_feedback = off;'); -$node_standby_2->stop; - -($xmin, $catalog_xmin) = - get_slot_xmins($node_standby_1, $slotname_2, "xmin IS NOT NULL"); -isnt($xmin, '', 'xmin of cascaded slot non-null with postgres shut down'); - -# Xmin from a previous run should be cleared on startup. -$node_standby_2->start; - -($xmin, $catalog_xmin) = - get_slot_xmins($node_standby_1, $slotname_2, "xmin IS NULL"); -is($xmin, '', - 'xmin of cascaded slot reset after startup with hs feedback reset'); diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl index cc7c04b6cb..e867479f20 100644 --- a/src/test/recovery/t/003_recovery_targets.pl +++ b/src/test/recovery/t/003_recovery_targets.pl @@ -5,8 +5,9 @@ use TestLib; use Test::More tests => 9; -# Create and test a standby from given backup, with a certain -# recovery target. +# Create and test a standby from given backup, with a certain recovery target. +# Choose $until_lsn later than the transaction commit that causes the row +# count to reach $num_rows, yet not later than the recovery target. sub test_recovery_standby { my $test_name = shift; @@ -40,6 +41,8 @@ sub test_recovery_standby # Stop standby node $node_standby->teardown_node; + + return; } # Initialize master node @@ -70,9 +73,9 @@ sub test_recovery_standby # More data, with recovery target timestamp $node_master->safe_psql('postgres', "INSERT INTO tab_int VALUES (generate_series(2001,3000))"); -$ret = - $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn(), now();"); -my ($lsn3, $recovery_time) = split /\|/, $ret; +my $lsn3 = + $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); +my $recovery_time = $node_master->safe_psql('postgres', "SELECT now()"); # Even more data, this time with a recovery target name $node_master->safe_psql('postgres', @@ -86,10 +89,8 @@ sub test_recovery_standby # And now for a recovery target LSN $node_master->safe_psql('postgres', "INSERT INTO tab_int VALUES (generate_series(4001,5000))"); -my $recovery_lsn = +my $lsn5 = my $recovery_lsn = $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()"); -my $lsn5 = - $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); $node_master->safe_psql('postgres', "INSERT INTO tab_int VALUES (generate_series(5001,6000))"); diff --git a/src/test/recovery/t/004_timeline_switch.pl b/src/test/recovery/t/004_timeline_switch.pl index 34ee335129..a7ccb7b4a3 100644 --- a/src/test/recovery/t/004_timeline_switch.pl +++ b/src/test/recovery/t/004_timeline_switch.pl @@ -6,7 +6,7 @@ use File::Path qw(rmtree); use PostgresNode; use TestLib; -use Test::More tests => 1; +use Test::More tests => 2; $ENV{PGDATABASE} = 'postgres'; @@ -37,9 +37,14 @@ $node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('write')); -# Stop and remove master, and promote standby 1, switching it to a new timeline +# Stop and remove master $node_master->teardown_node; -$node_standby_1->promote; + +# promote standby 1 using "pg_promote", switching it to a new timeline +my $psql_out = ''; +$node_standby_1->psql('postgres', "SELECT pg_promote(wait_seconds => 300)", + stdout => \$psql_out); +is($psql_out, 't', "promotion of standby with pg_promote"); # Switch standby 2 to replay from standby 1 rmtree($node_standby_2->data_dir . '/recovery.conf'); diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl index 4a90e9ac7e..884b0aedd1 100644 --- a/src/test/recovery/t/006_logical_decoding.pl +++ b/src/test/recovery/t/006_logical_decoding.pl @@ -7,7 +7,7 @@ use warnings; use PostgresNode; use TestLib; -use Test::More tests => 16; +use Test::More tests => 10; use Config; # Initialize master node @@ -24,10 +24,11 @@ qq[CREATE TABLE decoding_test(x integer, y text);]); $node_master->safe_psql('postgres', -qq[SELECT pg_create_logical_replication_slot('test_slot', 'test_decoding');]); + qq[SELECT pg_create_logical_replication_slot('test_slot', 'test_decoding');] +); $node_master->safe_psql('postgres', -qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;] + qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;] ); # Basic decoding works @@ -50,7 +51,7 @@ # Insert some rows and verify that we get the same results from pg_recvlogical # and the SQL interface. $node_master->safe_psql('postgres', -qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,4) s;] + qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,4) s;] ); my $expected = q{BEGIN @@ -61,12 +62,12 @@ COMMIT}; my $stdout_sql = $node_master->safe_psql('postgres', -qq[SELECT data FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');] + qq[SELECT data FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');] ); is($stdout_sql, $expected, 'got expected output from SQL decoding session'); my $endpos = $node_master->safe_psql('postgres', -"SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;" + "SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;" ); print "waiting to replay $endpos\n"; @@ -78,6 +79,10 @@ is($stdout_recv, $expected, 'got same expected output from pg_recvlogical decoding session'); +$node_master->poll_query_until('postgres', + "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'test_slot' AND active_pid IS NULL)" +) or die "slot never became inactive"; + $stdout_recv = $node_master->pg_recvlogical_upto( 'postgres', 'test_slot', $endpos, 10, 'include-xids' => '0', @@ -90,27 +95,29 @@ is( $node_master->psql( 'otherdb', -"SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;" + "SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;" ), 3, 'replaying logical slot from another database fails'); $node_master->safe_psql('otherdb', -qq[SELECT pg_create_logical_replication_slot('otherdb_slot', 'test_decoding');] + qq[SELECT pg_create_logical_replication_slot('otherdb_slot', 'test_decoding');] ); # make sure you can't drop a slot while active SKIP: { - # some Windows Perls at least don't like IPC::Run's start/kill_kill regime. + # some Windows Perls at least don't like IPC::Run's start/kill_kill regime. skip "Test fails on Windows perl", 2 if $Config{osname} eq 'MSWin32'; my $pg_recvlogical = IPC::Run::start( - [ 'pg_recvlogical', '-d', $node_master->connstr('otherdb'), - '-S', 'otherdb_slot', '-f', '-', '--start' ]); + [ + 'pg_recvlogical', '-d', $node_master->connstr('otherdb'), + '-S', 'otherdb_slot', '-f', '-', '--start' + ]); $node_master->poll_query_until('otherdb', -"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)" + "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)" ) or die "slot never became active"; is($node_master->psql('postgres', 'DROP DATABASE otherdb'), 3, 'dropping a DB with active logical slots fails'); @@ -120,7 +127,7 @@ } $node_master->poll_query_until('otherdb', -"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NULL)" + "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NULL)" ) or die "slot never became inactive"; is($node_master->psql('postgres', 'DROP DATABASE otherdb'), @@ -128,26 +135,5 @@ is($node_master->slot('otherdb_slot')->{'slot_name'}, undef, 'logical slot was actually dropped with DB'); -# Restarting a node with wal_level = logical that has existing -# slots must succeed, but decoding from those slots must fail. -$node_master->safe_psql('postgres', 'ALTER SYSTEM SET wal_level = replica'); -is($node_master->safe_psql('postgres', 'SHOW wal_level'), - 'logical', 'wal_level is still logical before restart'); -$node_master->restart; -is($node_master->safe_psql('postgres', 'SHOW wal_level'), - 'replica', 'wal_level is replica'); -isnt($node_master->slot('test_slot')->{'catalog_xmin'}, - '0', 'restored slot catalog_xmin is nonzero'); -is( $node_master->psql( - 'postgres', - qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]), - 3, - 'reading from slot with wal_level < logical fails'); -is( $node_master->psql( - 'postgres', q[SELECT pg_drop_replication_slot('test_slot')]), - 0, - 'can drop logical slot while wal_level = replica'); -is($node_master->slot('test_slot')->{'catalog_xmin'}, '', 'slot was dropped'); - # done with the node $node_master->stop; diff --git a/src/test/recovery/t/007_sync_rep.pl b/src/test/recovery/t/007_sync_rep.pl index e21d1a5274..bba47da17a 100644 --- a/src/test/recovery/t/007_sync_rep.pl +++ b/src/test/recovery/t/007_sync_rep.pl @@ -7,7 +7,7 @@ # Query checking sync_priority and sync_state of each standby my $check_sql = -"SELECT application_name, sync_priority, sync_state FROM pg_stat_replication ORDER BY application_name;"; + "SELECT application_name, sync_priority, sync_state FROM pg_stat_replication ORDER BY application_name;"; # Check that sync_state of each standby is expected (waiting till it is). # If $setting is given, synchronous_standby_names is set to it and @@ -24,6 +24,7 @@ sub test_sync_state } ok($self->poll_query_until('postgres', $check_sql, $expected), $msg); + return; } # Initialize master node diff --git a/src/test/recovery/t/009_twophase.pl b/src/test/recovery/t/009_twophase.pl index 6c50139572..9ea3bd65fc 100644 --- a/src/test/recovery/t/009_twophase.pl +++ b/src/test/recovery/t/009_twophase.pl @@ -20,6 +20,7 @@ sub configure_and_reload )); $node->psql('postgres', "SELECT pg_reload_conf()", stdout => \$psql_out); is($psql_out, 't', "reload node $name with $parameter"); + return; } # Set up two nodes, which will alternately be master and replication standby. @@ -331,6 +332,14 @@ sub configure_and_reload CHECKPOINT; COMMIT PREPARED 'xact_009_13';"); +# Ensure that last transaction is replayed on standby. +my $cur_master_lsn = + $cur_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()"); +my $caughtup_query = + "SELECT '$cur_master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()"; +$cur_standby->poll_query_until('postgres', $caughtup_query) + or die "Timed out while waiting for standby to catch up"; + $cur_standby->psql( 'postgres', "SELECT count(*) FROM t_009_tbl2", diff --git a/src/test/recovery/t/010_logical_decoding_timelines.pl b/src/test/recovery/t/010_logical_decoding_timelines.pl index edc0219c9c..a76eea86a5 100644 --- a/src/test/recovery/t/010_logical_decoding_timelines.pl +++ b/src/test/recovery/t/010_logical_decoding_timelines.pl @@ -24,7 +24,6 @@ use PostgresNode; use TestLib; use Test::More tests => 13; -use RecursiveCopy; use File::Copy; use IPC::Run (); use Scalar::Util qw(blessed); @@ -49,7 +48,7 @@ note "testing logical timeline following with a filesystem-level copy"; $node_master->safe_psql('postgres', -"SELECT pg_create_logical_replication_slot('before_basebackup', 'test_decoding');" + "SELECT pg_create_logical_replication_slot('before_basebackup', 'test_decoding');" ); $node_master->safe_psql('postgres', "CREATE TABLE decoding(blah text);"); $node_master->safe_psql('postgres', @@ -61,7 +60,8 @@ # the same physical copy trick, so: $node_master->safe_psql('postgres', 'CREATE DATABASE dropme;'); $node_master->safe_psql('dropme', -"SELECT pg_create_logical_replication_slot('dropme_slot', 'test_decoding');"); + "SELECT pg_create_logical_replication_slot('dropme_slot', 'test_decoding');" +); $node_master->safe_psql('postgres', 'CHECKPOINT;'); @@ -96,7 +96,7 @@ # Back to testing failover... $node_master->safe_psql('postgres', -"SELECT pg_create_logical_replication_slot('after_basebackup', 'test_decoding');" + "SELECT pg_create_logical_replication_slot('after_basebackup', 'test_decoding');" ); $node_master->safe_psql('postgres', "INSERT INTO decoding(blah) VALUES ('afterbb');"); @@ -142,7 +142,7 @@ # Shouldn't be able to read from slot created after base backup ($ret, $stdout, $stderr) = $node_replica->psql('postgres', -"SELECT data FROM pg_logical_slot_peek_changes('after_basebackup', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');" + "SELECT data FROM pg_logical_slot_peek_changes('after_basebackup', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');" ); is($ret, 3, 'replaying from after_basebackup slot fails'); like( @@ -153,7 +153,7 @@ # Should be able to read from slot created before base backup ($ret, $stdout, $stderr) = $node_replica->psql( 'postgres', -"SELECT data FROM pg_logical_slot_peek_changes('before_basebackup', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');", + "SELECT data FROM pg_logical_slot_peek_changes('before_basebackup', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');", timeout => 30); is($ret, 0, 'replay from slot before_basebackup succeeds'); @@ -175,7 +175,7 @@ BEGIN # of the last transaction. There's no max(pg_lsn), so: my $endpos = $node_replica->safe_psql('postgres', -"SELECT lsn FROM pg_logical_slot_peek_changes('before_basebackup', NULL, NULL) ORDER BY lsn DESC LIMIT 1;" + "SELECT lsn FROM pg_logical_slot_peek_changes('before_basebackup', NULL, NULL) ORDER BY lsn DESC LIMIT 1;" ); # now use the walsender protocol to peek the slot changes and make sure we see diff --git a/src/test/recovery/t/011_crash_recovery.pl b/src/test/recovery/t/011_crash_recovery.pl index 7afa94a827..5dc52412ca 100644 --- a/src/test/recovery/t/011_crash_recovery.pl +++ b/src/test/recovery/t/011_crash_recovery.pl @@ -10,7 +10,7 @@ if ($Config{osname} eq 'MSWin32') { - # some Windows Perls at least don't like IPC::Run's start/kill_kill regime. + # some Windows Perls at least don't like IPC::Run's start/kill_kill regime. plan skip_all => "Test fails on Windows perl"; } else @@ -29,8 +29,10 @@ # an xact to be in-progress when we crash and we need to know # its xid. my $tx = IPC::Run::start( - [ 'psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d', - $node->connstr('postgres') ], + [ + 'psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d', + $node->connstr('postgres') + ], '<', \$stdin, '>', diff --git a/src/test/recovery/t/012_subtransactions.pl b/src/test/recovery/t/012_subtransactions.pl index 216c3331d6..efc23d0559 100644 --- a/src/test/recovery/t/012_subtransactions.pl +++ b/src/test/recovery/t/012_subtransactions.pl @@ -177,7 +177,7 @@ $node_standby->start; $psql_rc = $node_master->psql('postgres', "COMMIT PREPARED 'xact_012_1'"); is($psql_rc, '0', -"Restore of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted standby" + "Restore of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted standby" ); $node_master->psql( @@ -218,7 +218,7 @@ $node_standby->start; $psql_rc = $node_master->psql('postgres', "ROLLBACK PREPARED 'xact_012_1'"); is($psql_rc, '0', -"Rollback of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted standby" + "Rollback of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted standby" ); $node_master->psql( diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl new file mode 100644 index 0000000000..c928e9201e --- /dev/null +++ b/src/test/recovery/t/013_crash_restart.pl @@ -0,0 +1,275 @@ +# +# Tests restarts of postgres due to crashes of a subprocess. +# +# Two longer-running psql subprocesses are used: One to kill a +# backend, triggering a crash-restart cycle, one to detect when +# postmaster noticed the backend died. The second backend is +# necessary because it's otherwise hard to determine if postmaster is +# still accepting new sessions (because it hasn't noticed that the +# backend died), or because it's already restarted. +# +use strict; +use warnings; +use PostgresNode; +use TestLib; +use Test::More; +use Config; +use Time::HiRes qw(usleep); + +plan tests => 18; + + +# To avoid hanging while expecting some specific input from a psql +# instance being driven by us, add a timeout high enough that it +# should never trigger even on very slow machines, unless something +# is really wrong. +my $psql_timeout = IPC::Run::timer(60); + +my $node = get_new_node('master'); +$node->init(allows_streaming => 1); +$node->start(); + +# by default PostgresNode doesn't doesn't restart after a crash +$node->safe_psql( + 'postgres', + q[ALTER SYSTEM SET restart_after_crash = 1; + ALTER SYSTEM SET log_connections = 1; + SELECT pg_reload_conf();]); + +# Run psql, keeping session alive, so we have an alive backend to kill. +my ($killme_stdin, $killme_stdout, $killme_stderr) = ('', '', ''); +my $killme = IPC::Run::start( + [ + 'psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d', + $node->connstr('postgres') + ], + '<', + \$killme_stdin, + '>', + \$killme_stdout, + '2>', + \$killme_stderr, + $psql_timeout); + +# Need a second psql to check if crash-restart happened. +my ($monitor_stdin, $monitor_stdout, $monitor_stderr) = ('', '', ''); +my $monitor = IPC::Run::start( + [ + 'psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d', + $node->connstr('postgres') + ], + '<', + \$monitor_stdin, + '>', + \$monitor_stdout, + '2>', + \$monitor_stderr, + $psql_timeout); + +#create table, insert row that should survive +$killme_stdin .= q[ +CREATE TABLE alive(status text); +INSERT INTO alive VALUES($$committed-before-sigquit$$); +SELECT pg_backend_pid(); +]; +ok(pump_until($killme, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), + 'acquired pid for SIGQUIT'); +my $pid = $killme_stdout; +chomp($pid); +$killme_stdout = ''; +$killme_stderr = ''; + +#insert a row that should *not* survive, due to in-progress xact +$killme_stdin .= q[ +BEGIN; +INSERT INTO alive VALUES($$in-progress-before-sigquit$$) RETURNING status; +]; +ok(pump_until($killme, \$killme_stdout, qr/in-progress-before-sigquit/m), + 'inserted in-progress-before-sigquit'); +$killme_stdout = ''; +$killme_stderr = ''; + + +# Start longrunning query in second session, it's failure will signal +# that crash-restart has occurred. The initial wait for the trivial +# select is to be sure that psql successfully connected to backend. +$monitor_stdin .= q[ +SELECT $$psql-connected$$; +SELECT pg_sleep(3600); +]; +ok(pump_until($monitor, \$monitor_stdout, qr/psql-connected/m), + 'monitor connected'); +$monitor_stdout = ''; +$monitor_stderr = ''; + +# kill once with QUIT - we expect psql to exit, while emitting error message first +my $ret = TestLib::system_log('pg_ctl', 'kill', 'QUIT', $pid); + +# Exactly process should have been alive to be killed +is($ret, 0, "killed process with SIGQUIT"); + +# Check that psql sees the killed backend as having been terminated +$killme_stdin .= q[ +SELECT 1; +]; +ok( pump_until( + $killme, + \$killme_stderr, + qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m + ), + "psql query died successfully after SIGQUIT"); +$killme_stderr = ''; +$killme_stdout = ''; +$killme->finish; + +# Wait till server restarts - we should get the WARNING here, but +# sometimes the server is unable to send that, if interrupted while +# sending. +ok( pump_until( + $monitor, + \$monitor_stderr, + qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m + ), + "psql monitor died successfully after SIGQUIT"); +$monitor->finish; + +# Wait till server restarts +is( $node->poll_query_until( + 'postgres', + 'SELECT $$restarted after sigquit$$;', + 'restarted after sigquit'), + "1", + "reconnected after SIGQUIT"); + + +# restart psql processes, now that the crash cycle finished +($killme_stdin, $killme_stdout, $killme_stderr) = ('', '', ''); +$killme->run(); +($monitor_stdin, $monitor_stdout, $monitor_stderr) = ('', '', ''); +$monitor->run(); + + +# Acquire pid of new backend +$killme_stdin .= q[ +SELECT pg_backend_pid(); +]; +ok(pump_until($killme, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), + "acquired pid for SIGKILL"); +$pid = $killme_stdout; +chomp($pid); +$killme_stdout = ''; +$killme_stderr = ''; + +# Insert test rows +$killme_stdin .= q[ +INSERT INTO alive VALUES($$committed-before-sigkill$$) RETURNING status; +BEGIN; +INSERT INTO alive VALUES($$in-progress-before-sigkill$$) RETURNING status; +]; +ok(pump_until($killme, \$killme_stdout, qr/in-progress-before-sigkill/m), + 'inserted in-progress-before-sigkill'); +$killme_stdout = ''; +$killme_stderr = ''; + +# Re-start longrunning query in second session, it's failure will +# signal that crash-restart has occurred. The initial wait for the +# trivial select is to be sure that psql successfully connected to +# backend. +$monitor_stdin .= q[ +SELECT $$psql-connected$$; +SELECT pg_sleep(3600); +]; +ok(pump_until($monitor, \$monitor_stdout, qr/psql-connected/m), + 'monitor connected'); +$monitor_stdout = ''; +$monitor_stderr = ''; + + +# kill with SIGKILL this time - we expect the backend to exit, without +# being able to emit an error error message +$ret = TestLib::system_log('pg_ctl', 'kill', 'KILL', $pid); +is($ret, 0, "killed process with KILL"); + +# Check that psql sees the server as being terminated. No WARNING, +# because signal handlers aren't being run on SIGKILL. +$killme_stdin .= q[ +SELECT 1; +]; +ok( pump_until( + $killme, \$killme_stderr, + qr/server closed the connection unexpectedly/m), + "psql query died successfully after SIGKILL"); +$killme->finish; + +# Wait till server restarts - we should get the WARNING here, but +# sometimes the server is unable to send that, if interrupted while +# sending. +ok( pump_until( + $monitor, + \$monitor_stderr, + qr/WARNING: terminating connection because of crash of another server process|server closed the connection unexpectedly/m + ), + "psql monitor died successfully after SIGKILL"); +$monitor->finish; + +# Wait till server restarts +is($node->poll_query_until('postgres', 'SELECT 1', '1'), + "1", "reconnected after SIGKILL"); + +# Make sure the committed rows survived, in-progress ones not +is( $node->safe_psql('postgres', 'SELECT * FROM alive'), + "committed-before-sigquit\ncommitted-before-sigkill", + 'data survived'); + +is( $node->safe_psql( + 'postgres', + 'INSERT INTO alive VALUES($$before-orderly-restart$$) RETURNING status' + ), + 'before-orderly-restart', + 'can still write after crash restart'); + +# Just to be sure, check that an orderly restart now still works +$node->restart(); + +is( $node->safe_psql('postgres', 'SELECT * FROM alive'), + "committed-before-sigquit\ncommitted-before-sigkill\nbefore-orderly-restart", + 'data survived'); + +is( $node->safe_psql( + 'postgres', + 'INSERT INTO alive VALUES($$after-orderly-restart$$) RETURNING status' + ), + 'after-orderly-restart', + 'can still write after orderly restart'); + +$node->stop(); + +# Pump until string is matched, or timeout occurs +sub pump_until +{ + my ($proc, $stream, $untl) = @_; + $proc->pump_nb(); + while (1) + { + last if $$stream =~ /$untl/; + if ($psql_timeout->is_expired) + { + diag("aborting wait: program timed out"); + diag("stream contents: >>", $$stream, "<<"); + diag("pattern searched for: ", $untl); + + return 0; + } + if (not $proc->pumpable()) + { + diag("aborting wait: program died"); + diag("stream contents: >>", $$stream, "<<"); + diag("pattern searched for: ", $untl); + + return 0; + } + $proc->pump(); + } + return 1; + +} diff --git a/src/test/recovery/t/014_unlogged_reinit.pl b/src/test/recovery/t/014_unlogged_reinit.pl new file mode 100644 index 0000000000..103c0a2b91 --- /dev/null +++ b/src/test/recovery/t/014_unlogged_reinit.pl @@ -0,0 +1,81 @@ +# Tests that unlogged tables are properly reinitialized after a crash. +# +# The behavior should be the same when restoring from a backup, but +# that is not tested here. + +use strict; +use warnings; +use PostgresNode; +use TestLib; +use Test::More tests => 12; + +my $node = get_new_node('main'); + +$node->init; +$node->start; +my $pgdata = $node->data_dir; + +# Create an unlogged table to test that forks other than init are not +# copied. +$node->safe_psql('postgres', 'CREATE UNLOGGED TABLE base_unlogged (id int)'); + +my $baseUnloggedPath = $node->safe_psql('postgres', + q{select pg_relation_filepath('base_unlogged')}); + +# Test that main and init forks exist. +ok(-f "$pgdata/${baseUnloggedPath}_init", 'init fork in base exists'); +ok(-f "$pgdata/$baseUnloggedPath", 'main fork in base exists'); + +# Create an unlogged table in a tablespace. + +my $tablespaceDir = TestLib::tempdir; + +my $realTSDir = TestLib::real_dir($tablespaceDir); + +$node->safe_psql('postgres', "CREATE TABLESPACE ts1 LOCATION '$realTSDir'"); +$node->safe_psql('postgres', + 'CREATE UNLOGGED TABLE ts1_unlogged (id int) TABLESPACE ts1'); + +my $ts1UnloggedPath = $node->safe_psql('postgres', + q{select pg_relation_filepath('ts1_unlogged')}); + +# Test that main and init forks exist. +ok(-f "$pgdata/${ts1UnloggedPath}_init", 'init fork in tablespace exists'); +ok(-f "$pgdata/$ts1UnloggedPath", 'main fork in tablespace exists'); + +# Crash the postmaster. +$node->stop('immediate'); + +# Write fake forks to test that they are removed during recovery. +append_to_file("$pgdata/${baseUnloggedPath}_vm", 'TEST_VM'); +append_to_file("$pgdata/${baseUnloggedPath}_fsm", 'TEST_FSM'); + +# Remove main fork to test that it is recopied from init. +unlink("$pgdata/${baseUnloggedPath}") + or BAIL_OUT("could not remove \"${baseUnloggedPath}\": $!"); + +# the same for the tablespace +append_to_file("$pgdata/${ts1UnloggedPath}_vm", 'TEST_VM'); +append_to_file("$pgdata/${ts1UnloggedPath}_fsm", 'TEST_FSM'); +unlink("$pgdata/${ts1UnloggedPath}") + or BAIL_OUT("could not remove \"${ts1UnloggedPath}\": $!"); + +$node->start; + +# check unlogged table in base +ok(-f "$pgdata/${baseUnloggedPath}_init", 'init fork in base still exists'); +ok(-f "$pgdata/$baseUnloggedPath", 'main fork in base recreated at startup'); +ok(!-f "$pgdata/${baseUnloggedPath}_vm", + 'vm fork in base removed at startup'); +ok( !-f "$pgdata/${baseUnloggedPath}_fsm", + 'fsm fork in base removed at startup'); + +# check unlogged table in tablespace +ok( -f "$pgdata/${ts1UnloggedPath}_init", + 'init fork still exists in tablespace'); +ok(-f "$pgdata/$ts1UnloggedPath", + 'main fork in tablespace recreated at startup'); +ok( !-f "$pgdata/${ts1UnloggedPath}_vm", + 'vm fork in tablespace removed at startup'); +ok( !-f "$pgdata/${ts1UnloggedPath}_fsm", + 'fsm fork in tablespace removed at startup'); diff --git a/src/test/recovery/t/015_promotion_pages.pl b/src/test/recovery/t/015_promotion_pages.pl new file mode 100644 index 0000000000..48f941b963 --- /dev/null +++ b/src/test/recovery/t/015_promotion_pages.pl @@ -0,0 +1,87 @@ +# Test for promotion handling with WAL records generated post-promotion +# before the first checkpoint is generated. This test case checks for +# invalid page references at replay based on the minimum consistent +# recovery point defined. +use strict; +use warnings; +use PostgresNode; +use TestLib; +use Test::More tests => 1; + +# Initialize primary node +my $alpha = get_new_node('alpha'); +$alpha->init(allows_streaming => 1); +# Setting wal_log_hints to off is important to get invalid page +# references. +$alpha->append_conf("postgresql.conf", <start; + +# setup/start a standby +$alpha->backup('bkp'); +my $bravo = get_new_node('bravo'); +$bravo->init_from_backup($alpha, 'bkp', has_streaming => 1); +$bravo->append_conf('postgresql.conf', <start; + +# Dummy table for the upcoming tests. +$alpha->safe_psql('postgres', 'create table test1 (a int)'); +$alpha->safe_psql('postgres', 'insert into test1 select generate_series(1, 10000)'); + +# take a checkpoint +$alpha->safe_psql('postgres', 'checkpoint'); + +# The following vacuum will set visibility map bits and create +# problematic WAL records. +$alpha->safe_psql('postgres', 'vacuum verbose test1'); +# Wait for last record to have been replayed on the standby. +$alpha->wait_for_catchup($bravo, 'replay', + $alpha->lsn('insert')); + +# Now force a checkpoint on the standby. This seems unnecessary but for "some" +# reason, the previous checkpoint on the primary does not reflect on the standby +# and without an explicit checkpoint, it may start redo recovery from a much +# older point, which includes even create table and initial page additions. +$bravo->safe_psql('postgres', 'checkpoint'); + +# Now just use a dummy table and run some operations to move minRecoveryPoint +# beyond the previous vacuum. +$alpha->safe_psql('postgres', 'create table test2 (a int, b text)'); +$alpha->safe_psql('postgres', 'insert into test2 select generate_series(1,10000), md5(random()::text)'); +$alpha->safe_psql('postgres', 'truncate test2'); + +# Wait again for all records to be replayed. +$alpha->wait_for_catchup($bravo, 'replay', + $alpha->lsn('insert')); + +# Do the promotion, which reinitializes minRecoveryPoint in the control +# file so as WAL is replayed up to the end. +$bravo->promote; + +# Truncate the table on the promoted standby, vacuum and extend it +# again to create new page references. The first post-recovery checkpoint +# has not happened yet. +$bravo->safe_psql('postgres', 'truncate test1'); +$bravo->safe_psql('postgres', 'vacuum verbose test1'); +$bravo->safe_psql('postgres', 'insert into test1 select generate_series(1,1000)'); + +# Now crash-stop the promoted standby and restart. This makes sure that +# replay does not see invalid page references because of an invalid +# minimum consistent recovery point. +$bravo->stop('immediate'); +$bravo->start; + +# Check state of the table after full crash recovery. All its data should +# be here. +my $psql_out; +$bravo->psql( + 'postgres', + "SELECT count(*) FROM test1", + stdout => \$psql_out); +is($psql_out, '1000', "Check that table state is correct"); diff --git a/src/test/regress/GNUmakefile b/src/test/regress/GNUmakefile index b923ea1420..378bd01753 100644 --- a/src/test/regress/GNUmakefile +++ b/src/test/regress/GNUmakefile @@ -3,7 +3,7 @@ # GNUmakefile-- # Makefile for src/test/regress (the regression tests) # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/test/regress/GNUmakefile @@ -43,7 +43,8 @@ pg_regress$(X): pg_regress.o pg_regress_main.o $(WIN32RES) | submake-libpgport pg_regress.o: pg_regress.c $(top_builddir)/src/port/pg_config_paths.h pg_regress.o: override CPPFLAGS += -I$(top_builddir)/src/port $(EXTRADEFS) -$(top_builddir)/src/port/pg_config_paths.h: $(top_builddir)/src/Makefile.global +# note: because of the submake dependency, this rule's action is really a no-op +$(top_builddir)/src/port/pg_config_paths.h: | submake-libpgport $(MAKE) -C $(top_builddir)/src/port pg_config_paths.h install: all installdirs @@ -65,7 +66,8 @@ include $(top_srcdir)/src/Makefile.shlib all: all-lib -$(OBJS): | submake-generated-headers +# Ensure parallel safety if a build is started in this directory +$(OBJS): | submake-libpgport submake-generated-headers # Test input and expected files. These are created by pg_regress itself, so we # don't have a rule to create them. We do need rules to clean them however. @@ -124,12 +126,12 @@ tablespace-setup: ## Run tests ## -REGRESS_OPTS = --dlpath=. $(EXTRA_REGRESS_OPTS) +REGRESS_OPTS = --dlpath=. --max-concurrent-tests=20 $(EXTRA_REGRESS_OPTS) check: all tablespace-setup $(pg_regress_check) $(REGRESS_OPTS) --schedule=$(srcdir)/parallel_schedule $(MAXCONNOPT) $(EXTRA_TESTS) -check-tests: all tablespace-setup +check-tests: all tablespace-setup | temp-install $(pg_regress_check) $(REGRESS_OPTS) $(MAXCONNOPT) $(TESTS) $(EXTRA_TESTS) installcheck: all tablespace-setup @@ -153,7 +155,7 @@ runtest-parallel: installcheck-parallel bigtest: all tablespace-setup $(pg_regress_installcheck) $(REGRESS_OPTS) --schedule=$(srcdir)/serial_schedule numeric_big -bigcheck: all tablespace-setup +bigcheck: all tablespace-setup | temp-install $(pg_regress_check) $(REGRESS_OPTS) --schedule=$(srcdir)/parallel_schedule $(MAXCONNOPT) numeric_big diff --git a/src/test/regress/Makefile b/src/test/regress/Makefile index 6409a485e8..7c665ff892 100644 --- a/src/test/regress/Makefile +++ b/src/test/regress/Makefile @@ -7,6 +7,11 @@ # GNU make uses a make file named "GNUmakefile" in preference to "Makefile" # if it exists. Postgres is shipped with a "GNUmakefile". + +# AIX make defaults to building *every* target of the first rule. Start with +# a single-target, empty rule to make the other targets non-default. +all: + all install clean check installcheck: @echo "You must use GNU make to use Postgres. It may be installed" @echo "on your system with the name 'gmake'." diff --git a/src/test/regress/expected/abstime.out b/src/test/regress/expected/abstime.out deleted file mode 100644 index ed48f642ab..0000000000 --- a/src/test/regress/expected/abstime.out +++ /dev/null @@ -1,136 +0,0 @@ --- --- ABSTIME --- testing built-in time type abstime --- uses reltime and tinterval --- --- --- timezones may vary based not only on location but the operating --- system. the main correctness issue is that the OS may not get --- daylight savings time right for times prior to Unix epoch (jan 1 1970). --- -CREATE TABLE ABSTIME_TBL (f1 abstime); -BEGIN; -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'now'); -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'now'); -SELECT count(*) AS two FROM ABSTIME_TBL WHERE f1 = 'now' ; - two ------ - 2 -(1 row) - -END; -DELETE FROM ABSTIME_TBL; -INSERT INTO ABSTIME_TBL (f1) VALUES ('Jan 14, 1973 03:14:21'); -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'Mon May 1 00:30:30 1995'); -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'epoch'); -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'infinity'); -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime '-infinity'); -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'May 10, 1947 23:59:12'); --- what happens if we specify slightly misformatted abstime? -INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 35, 1946 10:00:00'); -ERROR: date/time field value out of range: "Feb 35, 1946 10:00:00" -LINE 1: INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 35, 1946 10:00:00'... - ^ -HINT: Perhaps you need a different "datestyle" setting. -INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 28, 1984 25:08:10'); -ERROR: date/time field value out of range: "Feb 28, 1984 25:08:10" -LINE 1: INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 28, 1984 25:08:10'... - ^ --- badly formatted abstimes: these should result in invalid abstimes -INSERT INTO ABSTIME_TBL (f1) VALUES ('bad date format'); -ERROR: invalid input syntax for type abstime: "bad date format" -LINE 1: INSERT INTO ABSTIME_TBL (f1) VALUES ('bad date format'); - ^ -INSERT INTO ABSTIME_TBL (f1) VALUES ('Jun 10, 1843'); --- test abstime operators -SELECT '' AS eight, * FROM ABSTIME_TBL; - eight | f1 --------+------------------------------ - | Sun Jan 14 03:14:21 1973 PST - | Mon May 01 00:30:30 1995 PDT - | Wed Dec 31 16:00:00 1969 PST - | infinity - | -infinity - | Sat May 10 23:59:12 1947 PST - | invalid -(7 rows) - -SELECT '' AS six, * FROM ABSTIME_TBL - WHERE ABSTIME_TBL.f1 < abstime 'Jun 30, 2001'; - six | f1 ------+------------------------------ - | Sun Jan 14 03:14:21 1973 PST - | Mon May 01 00:30:30 1995 PDT - | Wed Dec 31 16:00:00 1969 PST - | -infinity - | Sat May 10 23:59:12 1947 PST -(5 rows) - -SELECT '' AS six, * FROM ABSTIME_TBL - WHERE ABSTIME_TBL.f1 > abstime '-infinity'; - six | f1 ------+------------------------------ - | Sun Jan 14 03:14:21 1973 PST - | Mon May 01 00:30:30 1995 PDT - | Wed Dec 31 16:00:00 1969 PST - | infinity - | Sat May 10 23:59:12 1947 PST - | invalid -(6 rows) - -SELECT '' AS six, * FROM ABSTIME_TBL - WHERE abstime 'May 10, 1947 23:59:12' <> ABSTIME_TBL.f1; - six | f1 ------+------------------------------ - | Sun Jan 14 03:14:21 1973 PST - | Mon May 01 00:30:30 1995 PDT - | Wed Dec 31 16:00:00 1969 PST - | infinity - | -infinity - | invalid -(6 rows) - -SELECT '' AS three, * FROM ABSTIME_TBL - WHERE abstime 'epoch' >= ABSTIME_TBL.f1; - three | f1 --------+------------------------------ - | Wed Dec 31 16:00:00 1969 PST - | -infinity - | Sat May 10 23:59:12 1947 PST -(3 rows) - -SELECT '' AS four, * FROM ABSTIME_TBL - WHERE ABSTIME_TBL.f1 <= abstime 'Jan 14, 1973 03:14:21'; - four | f1 -------+------------------------------ - | Sun Jan 14 03:14:21 1973 PST - | Wed Dec 31 16:00:00 1969 PST - | -infinity - | Sat May 10 23:59:12 1947 PST -(4 rows) - -SELECT '' AS four, * FROM ABSTIME_TBL - WHERE ABSTIME_TBL.f1 - tinterval '["Apr 1 1950 00:00:00" "Dec 30 1999 23:00:00"]'; - four | f1 -------+------------------------------ - | Sun Jan 14 03:14:21 1973 PST - | Mon May 01 00:30:30 1995 PDT - | Wed Dec 31 16:00:00 1969 PST -(3 rows) - -SELECT '' AS four, f1 AS abstime, - date_part('year', f1) AS year, date_part('month', f1) AS month, - date_part('day',f1) AS day, date_part('hour', f1) AS hour, - date_part('minute', f1) AS minute, date_part('second', f1) AS second - FROM ABSTIME_TBL - WHERE isfinite(f1) - ORDER BY abstime; - four | abstime | year | month | day | hour | minute | second -------+------------------------------+------+-------+-----+------+--------+-------- - | Sat May 10 23:59:12 1947 PST | 1947 | 5 | 10 | 23 | 59 | 12 - | Wed Dec 31 16:00:00 1969 PST | 1969 | 12 | 31 | 16 | 0 | 0 - | Sun Jan 14 03:14:21 1973 PST | 1973 | 1 | 14 | 3 | 14 | 21 - | Mon May 01 00:30:30 1995 PDT | 1995 | 5 | 1 | 0 | 30 | 30 -(4 rows) - diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out index ce6b841a33..20dacfe4b0 100644 --- a/src/test/regress/expected/aggregates.out +++ b/src/test/regress/expected/aggregates.out @@ -198,6 +198,50 @@ select avg('NaN'::numeric) from generate_series(1,3); NaN (1 row) +-- verify correct results for infinite inputs +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES ('1'), ('infinity')) v(x); + avg | var_pop +----------+--------- + Infinity | NaN +(1 row) + +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES ('infinity'), ('1')) v(x); + avg | var_pop +----------+--------- + Infinity | NaN +(1 row) + +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES ('infinity'), ('infinity')) v(x); + avg | var_pop +----------+--------- + Infinity | NaN +(1 row) + +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES ('-infinity'), ('infinity')) v(x); + avg | var_pop +-----+--------- + NaN | NaN +(1 row) + +-- test accuracy with a large input offset +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES (100000003), (100000004), (100000006), (100000007)) v(x); + avg | var_pop +-----------+--------- + 100000005 | 2.5 +(1 row) + +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES (7000000000005), (7000000000007)) v(x); + avg | var_pop +---------------+--------- + 7000000000006 | 1 +(1 row) + -- SQL2003 binary aggregates SELECT regr_count(b, a) FROM aggtest; regr_count @@ -253,6 +297,90 @@ SELECT corr(b, a) FROM aggtest; 0.139634516517873 (1 row) +-- test accum and combine functions directly +CREATE TABLE regr_test (x float8, y float8); +INSERT INTO regr_test VALUES (10,150),(20,250),(30,350),(80,540),(100,200); +SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) +FROM regr_test WHERE x IN (10,20,30,80); + count | sum | regr_sxx | sum | regr_syy | regr_sxy +-------+-----+----------+------+----------+---------- + 4 | 140 | 2900 | 1290 | 83075 | 15050 +(1 row) + +SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) +FROM regr_test; + count | sum | regr_sxx | sum | regr_syy | regr_sxy +-------+-----+----------+------+----------+---------- + 5 | 240 | 6280 | 1490 | 95080 | 8680 +(1 row) + +SELECT float8_accum('{4,140,2900}'::float8[], 100); + float8_accum +-------------- + {5,240,6280} +(1 row) + +SELECT float8_regr_accum('{4,140,2900,1290,83075,15050}'::float8[], 200, 100); + float8_regr_accum +------------------------------ + {5,240,6280,1490,95080,8680} +(1 row) + +SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) +FROM regr_test WHERE x IN (10,20,30); + count | sum | regr_sxx | sum | regr_syy | regr_sxy +-------+-----+----------+-----+----------+---------- + 3 | 60 | 200 | 750 | 20000 | 2000 +(1 row) + +SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) +FROM regr_test WHERE x IN (80,100); + count | sum | regr_sxx | sum | regr_syy | regr_sxy +-------+-----+----------+-----+----------+---------- + 2 | 180 | 200 | 740 | 57800 | -3400 +(1 row) + +SELECT float8_combine('{3,60,200}'::float8[], '{0,0,0}'::float8[]); + float8_combine +---------------- + {3,60,200} +(1 row) + +SELECT float8_combine('{0,0,0}'::float8[], '{2,180,200}'::float8[]); + float8_combine +---------------- + {2,180,200} +(1 row) + +SELECT float8_combine('{3,60,200}'::float8[], '{2,180,200}'::float8[]); + float8_combine +---------------- + {5,240,6280} +(1 row) + +SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[], + '{0,0,0,0,0,0}'::float8[]); + float8_regr_combine +--------------------------- + {3,60,200,750,20000,2000} +(1 row) + +SELECT float8_regr_combine('{0,0,0,0,0,0}'::float8[], + '{2,180,200,740,57800,-3400}'::float8[]); + float8_regr_combine +----------------------------- + {2,180,200,740,57800,-3400} +(1 row) + +SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[], + '{2,180,200,740,57800,-3400}'::float8[]); + float8_regr_combine +------------------------------ + {5,240,6280,1490,95080,8680} +(1 row) + +DROP TABLE regr_test; +-- test count, distinct SELECT count(four) AS cnt_1000 FROM onek; cnt_1000 ---------- @@ -1388,6 +1516,12 @@ select min(unique1) filter (where unique1 > 100) from tenk1; 101 (1 row) +select sum(1/ten) filter (where ten > 0) from tenk1; + sum +------ + 1000 +(1 row) + select ten, sum(distinct four) filter (where four::text ~ '123') from onek a group by ten; ten | sum @@ -1668,7 +1802,7 @@ LINE 1: select rank(3) within group (order by stringu1,stringu2) fro... ^ HINT: To use the hypothetical-set aggregate rank, the number of hypothetical direct arguments (here 1) must match the number of ordering columns (here 2). select rank('fred') within group (order by x) from generate_series(1,5) x; -ERROR: invalid input syntax for integer: "fred" +ERROR: invalid input syntax for type integer: "fred" LINE 1: select rank('fred') within group (order by x) from generate_... ^ select rank('adam'::text collate "C") within group (order by x collate "POSIX") @@ -1860,6 +1994,35 @@ NOTICE: avg_transfn called with 3 2 | 6 (1 row) +-- exercise cases where OSAs share state +select + percentile_cont(0.5) within group (order by a), + percentile_disc(0.5) within group (order by a) +from (values(1::float8),(3),(5),(7)) t(a); + percentile_cont | percentile_disc +-----------------+----------------- + 4 | 3 +(1 row) + +select + percentile_cont(0.25) within group (order by a), + percentile_disc(0.5) within group (order by a) +from (values(1::float8),(3),(5),(7)) t(a); + percentile_cont | percentile_disc +-----------------+----------------- + 2.5 | 3 +(1 row) + +-- these can't share state currently +select + rank(4) within group (order by a), + dense_rank(4) within group (order by a) +from (values(1),(3),(5),(7)) t(a); + rank | dense_rank +------+------------ + 3 | 3 +(1 row) + -- test that aggs with the same sfunc and initcond share the same agg state create aggregate my_sum_init(int4) ( @@ -1958,3 +2121,126 @@ NOTICE: sum_transfn called with 4 (1 row) rollback; +-- test that the aggregate transition logic correctly handles +-- transition / combine functions returning NULL +-- First test the case of a normal transition function returning NULL +BEGIN; +CREATE FUNCTION balkifnull(int8, int4) +RETURNS int8 +STRICT +LANGUAGE plpgsql AS $$ +BEGIN + IF $1 IS NULL THEN + RAISE 'erroneously called with NULL argument'; + END IF; + RETURN NULL; +END$$; +CREATE AGGREGATE balk(int4) +( + SFUNC = balkifnull(int8, int4), + STYPE = int8, + PARALLEL = SAFE, + INITCOND = '0' +); +SELECT balk(hundred) FROM tenk1; + balk +------ + +(1 row) + +ROLLBACK; +-- Secondly test the case of a parallel aggregate combiner function +-- returning NULL. For that use normal transition function, but a +-- combiner function returning NULL. +BEGIN ISOLATION LEVEL REPEATABLE READ; +CREATE FUNCTION balkifnull(int8, int8) +RETURNS int8 +PARALLEL SAFE +STRICT +LANGUAGE plpgsql AS $$ +BEGIN + IF $1 IS NULL THEN + RAISE 'erroneously called with NULL argument'; + END IF; + RETURN NULL; +END$$; +CREATE AGGREGATE balk(int4) +( + SFUNC = int4_sum(int8, int4), + STYPE = int8, + COMBINEFUNC = balkifnull(int8, int8), + PARALLEL = SAFE, + INITCOND = '0' +); +-- force use of parallelism +ALTER TABLE tenk1 set (parallel_workers = 4); +SET LOCAL parallel_setup_cost=0; +SET LOCAL max_parallel_workers_per_gather=4; +EXPLAIN (COSTS OFF) SELECT balk(hundred) FROM tenk1; + QUERY PLAN +------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Index Only Scan using tenk1_hundred on tenk1 +(5 rows) + +SELECT balk(hundred) FROM tenk1; + balk +------ + +(1 row) + +ROLLBACK; +-- test coverage for aggregate combine/serial/deserial functions +BEGIN ISOLATION LEVEL REPEATABLE READ; +SET parallel_setup_cost = 0; +SET parallel_tuple_cost = 0; +SET min_parallel_table_scan_size = 0; +SET max_parallel_workers_per_gather = 4; +SET enable_indexonlyscan = off; +-- variance(int4) covers numeric_poly_combine +-- sum(int8) covers int8_avg_combine +EXPLAIN (COSTS OFF) + SELECT variance(unique1::int4), sum(unique1::int8) FROM tenk1; + QUERY PLAN +---------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Seq Scan on tenk1 +(5 rows) + +SELECT variance(unique1::int4), sum(unique1::int8) FROM tenk1; + variance | sum +----------------------+---------- + 8334166.666666666667 | 49995000 +(1 row) + +ROLLBACK; +-- test coverage for dense_rank +SELECT dense_rank(x) WITHIN GROUP (ORDER BY x) FROM (VALUES (1),(1),(2),(2),(3),(3)) v(x) GROUP BY (x) ORDER BY 1; + dense_rank +------------ + 1 + 1 + 1 +(3 rows) + +-- Ensure that the STRICT checks for aggregates does not take NULLness +-- of ORDER BY columns into account. See bug report around +-- 2a505161-2727-2473-7c46-591ed108ac52@email.cz +SELECT min(x ORDER BY y) FROM (VALUES(1, NULL)) AS d(x,y); + min +----- + 1 +(1 row) + +SELECT min(x ORDER BY y) FROM (VALUES(1, 2)) AS d(x,y); + min +----- + 1 +(1 row) + diff --git a/src/test/regress/expected/alter_generic.out b/src/test/regress/expected/alter_generic.out index 9f6ad4de33..6faa9d739d 100644 --- a/src/test/regress/expected/alter_generic.out +++ b/src/test/regress/expected/alter_generic.out @@ -3,13 +3,13 @@ -- -- Clean up in case a prior regression run failed SET client_min_messages TO 'warning'; -DROP ROLE IF EXISTS regress_alter_user1; -DROP ROLE IF EXISTS regress_alter_user2; -DROP ROLE IF EXISTS regress_alter_user3; +DROP ROLE IF EXISTS regress_alter_generic_user1; +DROP ROLE IF EXISTS regress_alter_generic_user2; +DROP ROLE IF EXISTS regress_alter_generic_user3; RESET client_min_messages; -CREATE USER regress_alter_user3; -CREATE USER regress_alter_user2; -CREATE USER regress_alter_user1 IN ROLE regress_alter_user3; +CREATE USER regress_alter_generic_user3; +CREATE USER regress_alter_generic_user2; +CREATE USER regress_alter_generic_user1 IN ROLE regress_alter_generic_user3; CREATE SCHEMA alt_nsp1; CREATE SCHEMA alt_nsp2; GRANT ALL ON SCHEMA alt_nsp1, alt_nsp2 TO public; @@ -17,7 +17,7 @@ SET search_path = alt_nsp1, public; -- -- Function and Aggregate -- -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql AS 'SELECT $1 + 1'; CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql @@ -30,26 +30,26 @@ CREATE AGGREGATE alt_agg2 ( ); ALTER AGGREGATE alt_func1(int) RENAME TO alt_func3; -- failed (not aggregate) ERROR: function alt_func1(integer) is not an aggregate -ALTER AGGREGATE alt_func1(int) OWNER TO regress_alter_user3; -- failed (not aggregate) +ALTER AGGREGATE alt_func1(int) OWNER TO regress_alter_generic_user3; -- failed (not aggregate) ERROR: function alt_func1(integer) is not an aggregate ALTER AGGREGATE alt_func1(int) SET SCHEMA alt_nsp2; -- failed (not aggregate) ERROR: function alt_func1(integer) is not an aggregate ALTER FUNCTION alt_func1(int) RENAME TO alt_func2; -- failed (name conflict) ERROR: function alt_func2(integer) already exists in schema "alt_nsp1" ALTER FUNCTION alt_func1(int) RENAME TO alt_func3; -- OK -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_user2; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user2" -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_user3; -- OK +ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user2" +ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user3; -- OK ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp1; -- OK, already there ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp2; -- OK ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg2; -- failed (name conflict) ERROR: function alt_agg2(integer) already exists in schema "alt_nsp1" ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg3; -- OK -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_user2; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user2" -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_user3; -- OK +ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user2" +ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- OK ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql AS 'SELECT $1 + 2'; CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql @@ -63,10 +63,10 @@ CREATE AGGREGATE alt_agg2 ( ALTER FUNCTION alt_func3(int) RENAME TO alt_func4; -- failed (not owner) ERROR: must be owner of function alt_func3 ALTER FUNCTION alt_func1(int) RENAME TO alt_func4; -- OK -ALTER FUNCTION alt_func3(int) OWNER TO regress_alter_user2; -- failed (not owner) +ALTER FUNCTION alt_func3(int) OWNER TO regress_alter_generic_user2; -- failed (not owner) ERROR: must be owner of function alt_func3 -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_user3; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user3" +ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user3" ALTER FUNCTION alt_func3(int) SET SCHEMA alt_nsp2; -- failed (not owner) ERROR: must be owner of function alt_func3 ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp2; -- failed (name conflicts) @@ -74,30 +74,30 @@ ERROR: function alt_func2(integer) already exists in schema "alt_nsp2" ALTER AGGREGATE alt_agg3(int) RENAME TO alt_agg4; -- failed (not owner) ERROR: must be owner of function alt_agg3 ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg4; -- OK -ALTER AGGREGATE alt_agg3(int) OWNER TO regress_alter_user2; -- failed (not owner) +ALTER AGGREGATE alt_agg3(int) OWNER TO regress_alter_generic_user2; -- failed (not owner) ERROR: must be owner of function alt_agg3 -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_user3; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user3" +ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user3" ALTER AGGREGATE alt_agg3(int) SET SCHEMA alt_nsp2; -- failed (not owner) ERROR: must be owner of function alt_agg3 ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- failed (name conflict) ERROR: function alt_agg2(integer) already exists in schema "alt_nsp2" RESET SESSION AUTHORIZATION; -SELECT n.nspname, proname, prorettype::regtype, proisagg, a.rolname +SELECT n.nspname, proname, prorettype::regtype, prokind, a.rolname FROM pg_proc p, pg_namespace n, pg_authid a WHERE p.pronamespace = n.oid AND p.proowner = a.oid AND n.nspname IN ('alt_nsp1', 'alt_nsp2') ORDER BY nspname, proname; - nspname | proname | prorettype | proisagg | rolname -----------+-----------+------------+----------+--------------------- - alt_nsp1 | alt_agg2 | integer | t | regress_alter_user2 - alt_nsp1 | alt_agg3 | integer | t | regress_alter_user1 - alt_nsp1 | alt_agg4 | integer | t | regress_alter_user2 - alt_nsp1 | alt_func2 | integer | f | regress_alter_user2 - alt_nsp1 | alt_func3 | integer | f | regress_alter_user1 - alt_nsp1 | alt_func4 | integer | f | regress_alter_user2 - alt_nsp2 | alt_agg2 | integer | t | regress_alter_user3 - alt_nsp2 | alt_func2 | integer | f | regress_alter_user3 + nspname | proname | prorettype | prokind | rolname +----------+-----------+------------+---------+----------------------------- + alt_nsp1 | alt_agg2 | integer | a | regress_alter_generic_user2 + alt_nsp1 | alt_agg3 | integer | a | regress_alter_generic_user1 + alt_nsp1 | alt_agg4 | integer | a | regress_alter_generic_user2 + alt_nsp1 | alt_func2 | integer | f | regress_alter_generic_user2 + alt_nsp1 | alt_func3 | integer | f | regress_alter_generic_user1 + alt_nsp1 | alt_func4 | integer | f | regress_alter_generic_user2 + alt_nsp2 | alt_agg2 | integer | a | regress_alter_generic_user3 + alt_nsp2 | alt_func2 | integer | f | regress_alter_generic_user3 (8 rows) -- @@ -107,26 +107,26 @@ SELECT n.nspname, proname, prorettype::regtype, proisagg, a.rolname -- -- Conversion -- -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; CREATE CONVERSION alt_conv2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; ALTER CONVERSION alt_conv1 RENAME TO alt_conv2; -- failed (name conflict) ERROR: conversion "alt_conv2" already exists in schema "alt_nsp1" ALTER CONVERSION alt_conv1 RENAME TO alt_conv3; -- OK -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_user2; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user2" -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_user3; -- OK +ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user2" +ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user3; -- OK ALTER CONVERSION alt_conv2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; CREATE CONVERSION alt_conv2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; ALTER CONVERSION alt_conv3 RENAME TO alt_conv4; -- failed (not owner) ERROR: must be owner of conversion alt_conv3 ALTER CONVERSION alt_conv1 RENAME TO alt_conv4; -- OK -ALTER CONVERSION alt_conv3 OWNER TO regress_alter_user2; -- failed (not owner) +ALTER CONVERSION alt_conv3 OWNER TO regress_alter_generic_user2; -- failed (not owner) ERROR: must be owner of conversion alt_conv3 -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_user3; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user3" +ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user3" ALTER CONVERSION alt_conv3 SET SCHEMA alt_nsp2; -- failed (not owner) ERROR: must be owner of conversion alt_conv3 ALTER CONVERSION alt_conv2 SET SCHEMA alt_nsp2; -- failed (name conflict) @@ -137,12 +137,12 @@ SELECT n.nspname, c.conname, a.rolname WHERE c.connamespace = n.oid AND c.conowner = a.oid AND n.nspname IN ('alt_nsp1', 'alt_nsp2') ORDER BY nspname, conname; - nspname | conname | rolname -----------+-----------+--------------------- - alt_nsp1 | alt_conv2 | regress_alter_user2 - alt_nsp1 | alt_conv3 | regress_alter_user1 - alt_nsp1 | alt_conv4 | regress_alter_user2 - alt_nsp2 | alt_conv2 | regress_alter_user3 + nspname | conname | rolname +----------+-----------+----------------------------- + alt_nsp1 | alt_conv2 | regress_alter_generic_user2 + alt_nsp1 | alt_conv3 | regress_alter_generic_user1 + alt_nsp1 | alt_conv4 | regress_alter_generic_user2 + alt_nsp2 | alt_conv2 | regress_alter_generic_user3 (4 rows) -- @@ -177,46 +177,46 @@ SELECT srvname FROM pg_foreign_server WHERE srvname like 'alt_fserv%'; -- CREATE LANGUAGE alt_lang1 HANDLER plpgsql_call_handler; CREATE LANGUAGE alt_lang2 HANDLER plpgsql_call_handler; -ALTER LANGUAGE alt_lang1 OWNER TO regress_alter_user1; -- OK -ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_user2; -- OK -SET SESSION AUTHORIZATION regress_alter_user1; +ALTER LANGUAGE alt_lang1 OWNER TO regress_alter_generic_user1; -- OK +ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_generic_user2; -- OK +SET SESSION AUTHORIZATION regress_alter_generic_user1; ALTER LANGUAGE alt_lang1 RENAME TO alt_lang2; -- failed (name conflict) ERROR: language "alt_lang2" already exists ALTER LANGUAGE alt_lang2 RENAME TO alt_lang3; -- failed (not owner) ERROR: must be owner of language alt_lang2 ALTER LANGUAGE alt_lang1 RENAME TO alt_lang3; -- OK -ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_user3; -- failed (not owner) +ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_generic_user3; -- failed (not owner) ERROR: must be owner of language alt_lang2 -ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_user2; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user2" -ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_user3; -- OK +ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user2" +ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_generic_user3; -- OK RESET SESSION AUTHORIZATION; SELECT lanname, a.rolname FROM pg_language l, pg_authid a WHERE l.lanowner = a.oid AND l.lanname like 'alt_lang%' ORDER BY lanname; - lanname | rolname ------------+--------------------- - alt_lang2 | regress_alter_user2 - alt_lang3 | regress_alter_user3 + lanname | rolname +-----------+----------------------------- + alt_lang2 | regress_alter_generic_user2 + alt_lang3 | regress_alter_generic_user3 (2 rows) -- -- Operator -- -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); CREATE OPERATOR @+@ ( leftarg = int4, rightarg = int4, procedure = int4pl ); -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_user2; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user2" -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_user3; -- OK +ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user2" +ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user3; -- OK ALTER OPERATOR @-@(int4, int4) SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_user2; -- failed (not owner) +ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (not owner) ERROR: must be owner of operator @+@ -ALTER OPERATOR @-@(int4, int4) OWNER TO regress_alter_user3; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user3" +ALTER OPERATOR @-@(int4, int4) OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user3" ALTER OPERATOR @+@(int4, int4) SET SCHEMA alt_nsp2; -- failed (not owner) ERROR: must be owner of operator @+@ -- can't test this: the error message includes the raw oid of namespace @@ -228,11 +228,11 @@ SELECT n.nspname, oprname, a.rolname, WHERE o.oprnamespace = n.oid AND o.oprowner = a.oid AND n.nspname IN ('alt_nsp1', 'alt_nsp2') ORDER BY nspname, oprname; - nspname | oprname | rolname | oprleft | oprright | oprcode -----------+---------+---------------------+---------+----------+--------- - alt_nsp1 | @+@ | regress_alter_user3 | integer | integer | int4pl - alt_nsp1 | @-@ | regress_alter_user2 | integer | integer | int4mi - alt_nsp2 | @-@ | regress_alter_user1 | integer | integer | int4mi + nspname | oprname | rolname | oprleft | oprright | oprcode +----------+---------+-----------------------------+---------+----------+--------- + alt_nsp1 | @+@ | regress_alter_generic_user3 | integer | integer | int4pl + alt_nsp1 | @-@ | regress_alter_generic_user2 | integer | integer | int4mi + alt_nsp2 | @-@ | regress_alter_generic_user1 | integer | integer | int4mi (3 rows) -- @@ -240,44 +240,44 @@ SELECT n.nspname, oprname, a.rolname, -- CREATE OPERATOR FAMILY alt_opf1 USING hash; CREATE OPERATOR FAMILY alt_opf2 USING hash; -ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_user1; -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_user1; +ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user1; +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user1; CREATE OPERATOR CLASS alt_opc1 FOR TYPE uuid USING hash AS STORAGE uuid; CREATE OPERATOR CLASS alt_opc2 FOR TYPE uuid USING hash AS STORAGE uuid; -ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_user1; -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_user1; -SET SESSION AUTHORIZATION regress_alter_user1; +ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user1; +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf2; -- failed (name conflict) ERROR: operator family "alt_opf2" for access method "hash" already exists in schema "alt_nsp1" ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf3; -- OK -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_user2; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user2" -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_user3; -- OK +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user2" +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user3; -- OK ALTER OPERATOR FAMILY alt_opf2 USING hash SET SCHEMA alt_nsp2; -- OK ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc2; -- failed (name conflict) ERROR: operator class "alt_opc2" for access method "hash" already exists in schema "alt_nsp1" ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc3; -- OK -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_user2; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user2" -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_user3; -- OK +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user2" +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user3; -- OK ALTER OPERATOR CLASS alt_opc2 USING hash SET SCHEMA alt_nsp2; -- OK RESET SESSION AUTHORIZATION; CREATE OPERATOR FAMILY alt_opf1 USING hash; CREATE OPERATOR FAMILY alt_opf2 USING hash; -ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_user2; -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_user2; +ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user2; +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user2; CREATE OPERATOR CLASS alt_opc1 FOR TYPE macaddr USING hash AS STORAGE macaddr; CREATE OPERATOR CLASS alt_opc2 FOR TYPE macaddr USING hash AS STORAGE macaddr; -ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_user2; -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_user2; -SET SESSION AUTHORIZATION regress_alter_user2; +ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user2; +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; ALTER OPERATOR FAMILY alt_opf3 USING hash RENAME TO alt_opf4; -- failed (not owner) ERROR: must be owner of operator family alt_opf3 ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf4; -- OK -ALTER OPERATOR FAMILY alt_opf3 USING hash OWNER TO regress_alter_user2; -- failed (not owner) +ALTER OPERATOR FAMILY alt_opf3 USING hash OWNER TO regress_alter_generic_user2; -- failed (not owner) ERROR: must be owner of operator family alt_opf3 -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_user3; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user3" +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user3" ALTER OPERATOR FAMILY alt_opf3 USING hash SET SCHEMA alt_nsp2; -- failed (not owner) ERROR: must be owner of operator family alt_opf3 ALTER OPERATOR FAMILY alt_opf2 USING hash SET SCHEMA alt_nsp2; -- failed (name conflict) @@ -285,10 +285,10 @@ ERROR: operator family "alt_opf2" for access method "hash" already exists in sc ALTER OPERATOR CLASS alt_opc3 USING hash RENAME TO alt_opc4; -- failed (not owner) ERROR: must be owner of operator class alt_opc3 ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc4; -- OK -ALTER OPERATOR CLASS alt_opc3 USING hash OWNER TO regress_alter_user2; -- failed (not owner) +ALTER OPERATOR CLASS alt_opc3 USING hash OWNER TO regress_alter_generic_user2; -- failed (not owner) ERROR: must be owner of operator class alt_opc3 -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_user3; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user3" +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user3" ALTER OPERATOR CLASS alt_opc3 USING hash SET SCHEMA alt_nsp2; -- failed (not owner) ERROR: must be owner of operator class alt_opc3 ALTER OPERATOR CLASS alt_opc2 USING hash SET SCHEMA alt_nsp2; -- failed (name conflict) @@ -300,12 +300,12 @@ SELECT nspname, opfname, amname, rolname AND n.nspname IN ('alt_nsp1', 'alt_nsp2') AND NOT opfname LIKE 'alt_opc%' ORDER BY nspname, opfname; - nspname | opfname | amname | rolname -----------+----------+--------+--------------------- - alt_nsp1 | alt_opf2 | hash | regress_alter_user2 - alt_nsp1 | alt_opf3 | hash | regress_alter_user1 - alt_nsp1 | alt_opf4 | hash | regress_alter_user2 - alt_nsp2 | alt_opf2 | hash | regress_alter_user3 + nspname | opfname | amname | rolname +----------+----------+--------+----------------------------- + alt_nsp1 | alt_opf2 | hash | regress_alter_generic_user2 + alt_nsp1 | alt_opf3 | hash | regress_alter_generic_user1 + alt_nsp1 | alt_opf4 | hash | regress_alter_generic_user2 + alt_nsp2 | alt_opf2 | hash | regress_alter_generic_user3 (4 rows) SELECT nspname, opcname, amname, rolname @@ -313,12 +313,12 @@ SELECT nspname, opcname, amname, rolname WHERE o.opcmethod = m.oid AND o.opcnamespace = n.oid AND o.opcowner = a.oid AND n.nspname IN ('alt_nsp1', 'alt_nsp2') ORDER BY nspname, opcname; - nspname | opcname | amname | rolname -----------+----------+--------+--------------------- - alt_nsp1 | alt_opc2 | hash | regress_alter_user2 - alt_nsp1 | alt_opc3 | hash | regress_alter_user1 - alt_nsp1 | alt_opc4 | hash | regress_alter_user2 - alt_nsp2 | alt_opc2 | hash | regress_alter_user3 + nspname | opcname | amname | rolname +----------+----------+--------+----------------------------- + alt_nsp1 | alt_opc2 | hash | regress_alter_generic_user2 + alt_nsp1 | alt_opc3 | hash | regress_alter_generic_user1 + alt_nsp1 | alt_opc4 | hash | regress_alter_generic_user2 + alt_nsp2 | alt_opc2 | hash | regress_alter_generic_user3 (4 rows) -- ALTER OPERATOR FAMILY ... ADD/DROP @@ -354,17 +354,17 @@ ERROR: invalid operator number 0, must be between 1 and 5 ALTER OPERATOR FAMILY alt_opf4 USING btree ADD OPERATOR 1 < ; -- operator without argument types ERROR: operator argument types must be specified in ALTER OPERATOR FAMILY ALTER OPERATOR FAMILY alt_opf4 USING btree ADD FUNCTION 0 btint42cmp(int4, int2); -- function number should be between 1 and 5 -ERROR: invalid procedure number 0, must be between 1 and 2 +ERROR: invalid function number 0, must be between 1 and 3 ALTER OPERATOR FAMILY alt_opf4 USING btree ADD FUNCTION 6 btint42cmp(int4, int2); -- function number should be between 1 and 5 -ERROR: invalid procedure number 6, must be between 1 and 2 +ERROR: invalid function number 6, must be between 1 and 3 ALTER OPERATOR FAMILY alt_opf4 USING btree ADD STORAGE invalid_storage; -- Ensure STORAGE is not a part of ALTER OPERATOR FAMILY ERROR: STORAGE cannot be specified in ALTER OPERATOR FAMILY DROP OPERATOR FAMILY alt_opf4 USING btree; -- Should fail. Need to be SUPERUSER to do ALTER OPERATOR FAMILY .. ADD / DROP BEGIN TRANSACTION; -CREATE ROLE regress_alter_user5 NOSUPERUSER; +CREATE ROLE regress_alter_generic_user5 NOSUPERUSER; CREATE OPERATOR FAMILY alt_opf5 USING btree; -SET ROLE regress_alter_user5; +SET ROLE regress_alter_generic_user5; ALTER OPERATOR FAMILY alt_opf5 USING btree ADD OPERATOR 1 < (int4, int2), FUNCTION 1 btint42cmp(int4, int2); ERROR: must be superuser to alter an operator family RESET ROLE; @@ -374,11 +374,11 @@ ERROR: current transaction is aborted, commands ignored until end of transactio ROLLBACK; -- Should fail. Need rights to namespace for ALTER OPERATOR FAMILY .. ADD / DROP BEGIN TRANSACTION; -CREATE ROLE regress_alter_user6; +CREATE ROLE regress_alter_generic_user6; CREATE SCHEMA alt_nsp6; -REVOKE ALL ON SCHEMA alt_nsp6 FROM regress_alter_user6; +REVOKE ALL ON SCHEMA alt_nsp6 FROM regress_alter_generic_user6; CREATE OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree; -SET ROLE regress_alter_user6; +SET ROLE regress_alter_generic_user6; ALTER OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree ADD OPERATOR 1 < (int4, int2); ERROR: permission denied for schema alt_nsp6 ROLLBACK; @@ -412,7 +412,7 @@ BEGIN TRANSACTION; CREATE OPERATOR FAMILY alt_opf12 USING btree; CREATE FUNCTION fn_opf12 (int4, int2) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; ALTER OPERATOR FAMILY alt_opf12 USING btree ADD FUNCTION 1 fn_opf12(int4, int2); -ERROR: btree comparison procedures must return integer +ERROR: btree comparison functions must return integer DROP OPERATOR FAMILY alt_opf12 USING btree; ERROR: current transaction is aborted, commands ignored until end of transaction block ROLLBACK; @@ -421,7 +421,7 @@ BEGIN TRANSACTION; CREATE OPERATOR FAMILY alt_opf13 USING hash; CREATE FUNCTION fn_opf13 (int4) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; ALTER OPERATOR FAMILY alt_opf13 USING hash ADD FUNCTION 1 fn_opf13(int4); -ERROR: hash procedures must return integer +ERROR: hash function 1 must return integer DROP OPERATOR FAMILY alt_opf13 USING hash; ERROR: current transaction is aborted, commands ignored until end of transaction block ROLLBACK; @@ -430,7 +430,7 @@ BEGIN TRANSACTION; CREATE OPERATOR FAMILY alt_opf14 USING btree; CREATE FUNCTION fn_opf14 (int4) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; ALTER OPERATOR FAMILY alt_opf14 USING btree ADD FUNCTION 1 fn_opf14(int4); -ERROR: btree comparison procedures must have two arguments +ERROR: btree comparison functions must have two arguments DROP OPERATOR FAMILY alt_opf14 USING btree; ERROR: current transaction is aborted, commands ignored until end of transaction block ROLLBACK; @@ -439,7 +439,7 @@ BEGIN TRANSACTION; CREATE OPERATOR FAMILY alt_opf15 USING hash; CREATE FUNCTION fn_opf15 (int4, int2) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; ALTER OPERATOR FAMILY alt_opf15 USING hash ADD FUNCTION 1 fn_opf15(int4, int2); -ERROR: hash procedures must have one argument +ERROR: hash function 1 must have one argument DROP OPERATOR FAMILY alt_opf15 USING hash; ERROR: current transaction is aborted, commands ignored until end of transaction block ROLLBACK; @@ -447,7 +447,7 @@ ROLLBACK; -- without defining left / right type in ALTER OPERATOR FAMILY ... ADD FUNCTION CREATE OPERATOR FAMILY alt_opf16 USING gist; ALTER OPERATOR FAMILY alt_opf16 USING gist ADD FUNCTION 1 btint42cmp(int4, int2); -ERROR: associated data types must be specified for index support procedure +ERROR: associated data types must be specified for index support function DROP OPERATOR FAMILY alt_opf16 USING gist; -- Should fail. duplicate operator number / function number in ALTER OPERATOR FAMILY ... ADD FUNCTION CREATE OPERATOR FAMILY alt_opf17 USING btree; @@ -464,7 +464,7 @@ ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 5 > (int4, int2) , FUNCTION 1 btint42cmp(int4, int2) , FUNCTION 1 btint42cmp(int4, int2); -- procedure 1 appears twice in same statement -ERROR: procedure number 1 for (integer,smallint) appears more than once +ERROR: function number 1 for (integer,smallint) appears more than once ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int2) , OPERATOR 2 <= (int4, int2) , @@ -499,28 +499,28 @@ DROP OPERATOR FAMILY alt_opf18 USING btree; -- -- Statistics -- -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; CREATE TABLE alt_regress_1 (a INTEGER, b INTEGER); CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_1; CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_1; ALTER STATISTICS alt_stat1 RENAME TO alt_stat2; -- failed (name conflict) ERROR: statistics object "alt_stat2" already exists in schema "alt_nsp1" ALTER STATISTICS alt_stat1 RENAME TO alt_stat3; -- failed (name conflict) -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_user2; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user2" -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_user3; -- OK +ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user2" +ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user3; -- OK ALTER STATISTICS alt_stat2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; CREATE TABLE alt_regress_2 (a INTEGER, b INTEGER); CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_2; CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_2; ALTER STATISTICS alt_stat3 RENAME TO alt_stat4; -- failed (not owner) ERROR: must be owner of statistics object alt_stat3 ALTER STATISTICS alt_stat1 RENAME TO alt_stat4; -- OK -ALTER STATISTICS alt_stat3 OWNER TO regress_alter_user2; -- failed (not owner) +ALTER STATISTICS alt_stat3 OWNER TO regress_alter_generic_user2; -- failed (not owner) ERROR: must be owner of statistics object alt_stat3 -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_user3; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user3" +ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user3" ALTER STATISTICS alt_stat3 SET SCHEMA alt_nsp2; -- failed (not owner) ERROR: must be owner of statistics object alt_stat3 ALTER STATISTICS alt_stat2 SET SCHEMA alt_nsp2; -- failed (name conflict) @@ -531,37 +531,37 @@ SELECT nspname, stxname, rolname WHERE s.stxnamespace = n.oid AND s.stxowner = a.oid AND n.nspname in ('alt_nsp1', 'alt_nsp2') ORDER BY nspname, stxname; - nspname | stxname | rolname -----------+-----------+--------------------- - alt_nsp1 | alt_stat2 | regress_alter_user2 - alt_nsp1 | alt_stat3 | regress_alter_user1 - alt_nsp1 | alt_stat4 | regress_alter_user2 - alt_nsp2 | alt_stat2 | regress_alter_user3 + nspname | stxname | rolname +----------+-----------+----------------------------- + alt_nsp1 | alt_stat2 | regress_alter_generic_user2 + alt_nsp1 | alt_stat3 | regress_alter_generic_user1 + alt_nsp1 | alt_stat4 | regress_alter_generic_user2 + alt_nsp2 | alt_stat2 | regress_alter_generic_user3 (4 rows) -- -- Text Search Dictionary -- -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict2; -- failed (name conflict) ERROR: text search dictionary "alt_ts_dict2" already exists in schema "alt_nsp1" ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict3; -- OK -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_user2; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user2" -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_user3; -- OK +ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user2" +ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user3; -- OK ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 RENAME TO alt_ts_dict4; -- failed (not owner) ERROR: must be owner of text search dictionary alt_ts_dict3 ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict4; -- OK -ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 OWNER TO regress_alter_user2; -- failed (not owner) +ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 OWNER TO regress_alter_generic_user2; -- failed (not owner) ERROR: must be owner of text search dictionary alt_ts_dict3 -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_user3; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user3" +ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user3" ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 SET SCHEMA alt_nsp2; -- failed (not owner) ERROR: must be owner of text search dictionary alt_ts_dict3 ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 SET SCHEMA alt_nsp2; -- failed (name conflict) @@ -572,37 +572,37 @@ SELECT nspname, dictname, rolname WHERE t.dictnamespace = n.oid AND t.dictowner = a.oid AND n.nspname in ('alt_nsp1', 'alt_nsp2') ORDER BY nspname, dictname; - nspname | dictname | rolname -----------+--------------+--------------------- - alt_nsp1 | alt_ts_dict2 | regress_alter_user2 - alt_nsp1 | alt_ts_dict3 | regress_alter_user1 - alt_nsp1 | alt_ts_dict4 | regress_alter_user2 - alt_nsp2 | alt_ts_dict2 | regress_alter_user3 + nspname | dictname | rolname +----------+--------------+----------------------------- + alt_nsp1 | alt_ts_dict2 | regress_alter_generic_user2 + alt_nsp1 | alt_ts_dict3 | regress_alter_generic_user1 + alt_nsp1 | alt_ts_dict4 | regress_alter_generic_user2 + alt_nsp2 | alt_ts_dict2 | regress_alter_generic_user3 (4 rows) -- -- Text Search Configuration -- -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf2; -- failed (name conflict) ERROR: text search configuration "alt_ts_conf2" already exists in schema "alt_nsp1" ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf3; -- OK -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_user2; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user2" -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_user3; -- OK +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user2" +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user3; -- OK ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 RENAME TO alt_ts_conf4; -- failed (not owner) ERROR: must be owner of text search configuration alt_ts_conf3 ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf4; -- OK -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 OWNER TO regress_alter_user2; -- failed (not owner) +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 OWNER TO regress_alter_generic_user2; -- failed (not owner) ERROR: must be owner of text search configuration alt_ts_conf3 -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_user3; -- failed (no role membership) -ERROR: must be member of role "regress_alter_user3" +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be member of role "regress_alter_generic_user3" ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 SET SCHEMA alt_nsp2; -- failed (not owner) ERROR: must be owner of text search configuration alt_ts_conf3 ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 SET SCHEMA alt_nsp2; -- failed (name conflict) @@ -613,12 +613,12 @@ SELECT nspname, cfgname, rolname WHERE t.cfgnamespace = n.oid AND t.cfgowner = a.oid AND n.nspname in ('alt_nsp1', 'alt_nsp2') ORDER BY nspname, cfgname; - nspname | cfgname | rolname -----------+--------------+--------------------- - alt_nsp1 | alt_ts_conf2 | regress_alter_user2 - alt_nsp1 | alt_ts_conf3 | regress_alter_user1 - alt_nsp1 | alt_ts_conf4 | regress_alter_user2 - alt_nsp2 | alt_ts_conf2 | regress_alter_user3 + nspname | cfgname | rolname +----------+--------------+----------------------------- + alt_nsp1 | alt_ts_conf2 | regress_alter_generic_user2 + alt_nsp1 | alt_ts_conf3 | regress_alter_generic_user1 + alt_nsp1 | alt_ts_conf4 | regress_alter_generic_user2 + alt_nsp2 | alt_ts_conf2 | regress_alter_generic_user3 (4 rows) -- @@ -633,6 +633,9 @@ ALTER TEXT SEARCH TEMPLATE alt_ts_temp2 SET SCHEMA alt_nsp2; -- OK CREATE TEXT SEARCH TEMPLATE alt_ts_temp2 (lexize=dsimple_lexize); ALTER TEXT SEARCH TEMPLATE alt_ts_temp2 SET SCHEMA alt_nsp2; -- failed (name conflict) ERROR: text search template "alt_ts_temp2" already exists in schema "alt_nsp2" +-- invalid: non-lowercase quoted identifiers +CREATE TEXT SEARCH TEMPLATE tstemp_case ("Init" = init_function); +ERROR: text search template parameter "Init" not recognized SELECT nspname, tmplname FROM pg_ts_template t, pg_namespace n WHERE t.tmplnamespace = n.oid AND nspname like 'alt_nsp%' @@ -659,6 +662,9 @@ CREATE TEXT SEARCH PARSER alt_ts_prs2 (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); ALTER TEXT SEARCH PARSER alt_ts_prs2 SET SCHEMA alt_nsp2; -- failed (name conflict) ERROR: text search parser "alt_ts_prs2" already exists in schema "alt_nsp2" +-- invalid: non-lowercase quoted identifiers +CREATE TEXT SEARCH PARSER tspars_case ("Start" = start_function); +ERROR: text search parser parameter "Start" not recognized SELECT nspname, prsname FROM pg_ts_parser t, pg_namespace n WHERE t.prsnamespace = n.oid AND nspname like 'alt_nsp%' @@ -684,6 +690,6 @@ DROP SCHEMA alt_nsp1 CASCADE; NOTICE: drop cascades to 28 other objects DROP SCHEMA alt_nsp2 CASCADE; NOTICE: drop cascades to 9 other objects -DROP USER regress_alter_user1; -DROP USER regress_alter_user2; -DROP USER regress_alter_user3; +DROP USER regress_alter_generic_user1; +DROP USER regress_alter_generic_user2; +DROP USER regress_alter_generic_user3; diff --git a/src/test/regress/expected/alter_operator.out b/src/test/regress/expected/alter_operator.out index ef47affd7b..71bd484282 100644 --- a/src/test/regress/expected/alter_operator.out +++ b/src/test/regress/expected/alter_operator.out @@ -121,6 +121,9 @@ ALTER OPERATOR === (boolean, boolean) SET (COMMUTATOR = !==); ERROR: operator attribute "commutator" cannot be changed ALTER OPERATOR === (boolean, boolean) SET (NEGATOR = !==); ERROR: operator attribute "negator" cannot be changed +-- invalid: non-lowercase quoted identifiers +ALTER OPERATOR & (bit, bit) SET ("Restrict" = _int_contsel, "Join" = _int_contjoinsel); +ERROR: operator attribute "Restrict" not recognized -- -- Test permission check. Must be owner to ALTER OPERATOR. -- diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out index 58192d2c6a..0aa13b3cec 100644 --- a/src/test/regress/expected/alter_table.out +++ b/src/test/regress/expected/alter_table.out @@ -1,193 +1,244 @@ -- -- ALTER_TABLE +-- +-- Clean up in case a prior regression run failed +SET client_min_messages TO 'warning'; +DROP ROLE IF EXISTS regress_alter_table_user1; +RESET client_min_messages; +CREATE USER regress_alter_table_user1; +-- -- add attribute -- -CREATE TABLE tmp (initial int4); -COMMENT ON TABLE tmp_wrong IS 'table comment'; -ERROR: relation "tmp_wrong" does not exist -COMMENT ON TABLE tmp IS 'table comment'; -COMMENT ON TABLE tmp IS NULL; -ALTER TABLE tmp ADD COLUMN xmin integer; -- fails +CREATE TABLE attmp (initial int4); +COMMENT ON TABLE attmp_wrong IS 'table comment'; +ERROR: relation "attmp_wrong" does not exist +COMMENT ON TABLE attmp IS 'table comment'; +COMMENT ON TABLE attmp IS NULL; +ALTER TABLE attmp ADD COLUMN xmin integer; -- fails ERROR: column name "xmin" conflicts with a system column name -ALTER TABLE tmp ADD COLUMN a int4 default 3; -ALTER TABLE tmp ADD COLUMN b name; -ALTER TABLE tmp ADD COLUMN c text; -ALTER TABLE tmp ADD COLUMN d float8; -ALTER TABLE tmp ADD COLUMN e float4; -ALTER TABLE tmp ADD COLUMN f int2; -ALTER TABLE tmp ADD COLUMN g polygon; -ALTER TABLE tmp ADD COLUMN h abstime; -ALTER TABLE tmp ADD COLUMN i char; -ALTER TABLE tmp ADD COLUMN j abstime[]; -ALTER TABLE tmp ADD COLUMN k int4; -ALTER TABLE tmp ADD COLUMN l tid; -ALTER TABLE tmp ADD COLUMN m xid; -ALTER TABLE tmp ADD COLUMN n oidvector; ---ALTER TABLE tmp ADD COLUMN o lock; -ALTER TABLE tmp ADD COLUMN p smgr; -ALTER TABLE tmp ADD COLUMN q point; -ALTER TABLE tmp ADD COLUMN r lseg; -ALTER TABLE tmp ADD COLUMN s path; -ALTER TABLE tmp ADD COLUMN t box; -ALTER TABLE tmp ADD COLUMN u tinterval; -ALTER TABLE tmp ADD COLUMN v timestamp; -ALTER TABLE tmp ADD COLUMN w interval; -ALTER TABLE tmp ADD COLUMN x float8[]; -ALTER TABLE tmp ADD COLUMN y float4[]; -ALTER TABLE tmp ADD COLUMN z int2[]; -INSERT INTO tmp (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, +ALTER TABLE attmp ADD COLUMN a int4 default 3; +ALTER TABLE attmp ADD COLUMN b name; +ALTER TABLE attmp ADD COLUMN c text; +ALTER TABLE attmp ADD COLUMN d float8; +ALTER TABLE attmp ADD COLUMN e float4; +ALTER TABLE attmp ADD COLUMN f int2; +ALTER TABLE attmp ADD COLUMN g polygon; +ALTER TABLE attmp ADD COLUMN i char; +ALTER TABLE attmp ADD COLUMN k int4; +ALTER TABLE attmp ADD COLUMN l tid; +ALTER TABLE attmp ADD COLUMN m xid; +ALTER TABLE attmp ADD COLUMN n oidvector; +--ALTER TABLE attmp ADD COLUMN o lock; +ALTER TABLE attmp ADD COLUMN p smgr; +ALTER TABLE attmp ADD COLUMN q point; +ALTER TABLE attmp ADD COLUMN r lseg; +ALTER TABLE attmp ADD COLUMN s path; +ALTER TABLE attmp ADD COLUMN t box; +ALTER TABLE attmp ADD COLUMN v timestamp; +ALTER TABLE attmp ADD COLUMN w interval; +ALTER TABLE attmp ADD COLUMN x float8[]; +ALTER TABLE attmp ADD COLUMN y float4[]; +ALTER TABLE attmp ADD COLUMN z int2[]; +INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t, v, w, x, y, z) VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', - 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + 'c', 314159, '(1,1)', '512', '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', - '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); -SELECT * FROM tmp; - initial | a | b | c | d | e | f | g | h | i | j | k | l | m | n | p | q | r | s | t | u | v | w | x | y | z ----------+---+------+------+-----+-----+---+-----------------------+------------------------------+---+------------------------------------------------------------------------------------------------+--------+-------+-----+-----------------+---------------+-----------+-----------------------+-----------------------------+---------------------+---------------------------------------------+--------------------------+------------------+-----------+-----------+----------- - | 4 | name | text | 4.1 | 4.1 | 2 | ((4.1,4.1),(3.1,3.1)) | Mon May 01 00:30:30 1995 PDT | c | {"Mon May 01 00:30:30 1995 PDT","Mon Aug 24 14:43:07 1992 PDT","Wed Dec 31 16:00:00 1969 PST"} | 314159 | (1,1) | 512 | 1 2 3 4 5 6 7 8 | magnetic disk | (1.1,1.1) | [(4.1,4.1),(3.1,3.1)] | ((0,2),(4.1,4.1),(3.1,3.1)) | (4.1,4.1),(3.1,3.1) | ["Wed Dec 31 16:00:00 1969 PST" "infinity"] | Thu Jan 01 00:00:00 1970 | @ 1 hour 10 secs | {1,2,3,4} | {1,2,3,4} | {1,2,3,4} +SELECT * FROM attmp; + initial | a | b | c | d | e | f | g | i | k | l | m | n | p | q | r | s | t | v | w | x | y | z +---------+---+------+------+-----+-----+---+-----------------------+---+--------+-------+-----+-----------------+---------------+-----------+-----------------------+-----------------------------+---------------------+--------------------------+------------------+-----------+-----------+----------- + | 4 | name | text | 4.1 | 4.1 | 2 | ((4.1,4.1),(3.1,3.1)) | c | 314159 | (1,1) | 512 | 1 2 3 4 5 6 7 8 | magnetic disk | (1.1,1.1) | [(4.1,4.1),(3.1,3.1)] | ((0,2),(4.1,4.1),(3.1,3.1)) | (4.1,4.1),(3.1,3.1) | Thu Jan 01 00:00:00 1970 | @ 1 hour 10 secs | {1,2,3,4} | {1,2,3,4} | {1,2,3,4} (1 row) -DROP TABLE tmp; +DROP TABLE attmp; -- the wolf bug - schema mods caused inconsistent row descriptors -CREATE TABLE tmp ( +CREATE TABLE attmp ( initial int4 ); -ALTER TABLE tmp ADD COLUMN a int4; -ALTER TABLE tmp ADD COLUMN b name; -ALTER TABLE tmp ADD COLUMN c text; -ALTER TABLE tmp ADD COLUMN d float8; -ALTER TABLE tmp ADD COLUMN e float4; -ALTER TABLE tmp ADD COLUMN f int2; -ALTER TABLE tmp ADD COLUMN g polygon; -ALTER TABLE tmp ADD COLUMN h abstime; -ALTER TABLE tmp ADD COLUMN i char; -ALTER TABLE tmp ADD COLUMN j abstime[]; -ALTER TABLE tmp ADD COLUMN k int4; -ALTER TABLE tmp ADD COLUMN l tid; -ALTER TABLE tmp ADD COLUMN m xid; -ALTER TABLE tmp ADD COLUMN n oidvector; ---ALTER TABLE tmp ADD COLUMN o lock; -ALTER TABLE tmp ADD COLUMN p smgr; -ALTER TABLE tmp ADD COLUMN q point; -ALTER TABLE tmp ADD COLUMN r lseg; -ALTER TABLE tmp ADD COLUMN s path; -ALTER TABLE tmp ADD COLUMN t box; -ALTER TABLE tmp ADD COLUMN u tinterval; -ALTER TABLE tmp ADD COLUMN v timestamp; -ALTER TABLE tmp ADD COLUMN w interval; -ALTER TABLE tmp ADD COLUMN x float8[]; -ALTER TABLE tmp ADD COLUMN y float4[]; -ALTER TABLE tmp ADD COLUMN z int2[]; -INSERT INTO tmp (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, +ALTER TABLE attmp ADD COLUMN a int4; +ALTER TABLE attmp ADD COLUMN b name; +ALTER TABLE attmp ADD COLUMN c text; +ALTER TABLE attmp ADD COLUMN d float8; +ALTER TABLE attmp ADD COLUMN e float4; +ALTER TABLE attmp ADD COLUMN f int2; +ALTER TABLE attmp ADD COLUMN g polygon; +ALTER TABLE attmp ADD COLUMN i char; +ALTER TABLE attmp ADD COLUMN k int4; +ALTER TABLE attmp ADD COLUMN l tid; +ALTER TABLE attmp ADD COLUMN m xid; +ALTER TABLE attmp ADD COLUMN n oidvector; +--ALTER TABLE attmp ADD COLUMN o lock; +ALTER TABLE attmp ADD COLUMN p smgr; +ALTER TABLE attmp ADD COLUMN q point; +ALTER TABLE attmp ADD COLUMN r lseg; +ALTER TABLE attmp ADD COLUMN s path; +ALTER TABLE attmp ADD COLUMN t box; +ALTER TABLE attmp ADD COLUMN v timestamp; +ALTER TABLE attmp ADD COLUMN w interval; +ALTER TABLE attmp ADD COLUMN x float8[]; +ALTER TABLE attmp ADD COLUMN y float4[]; +ALTER TABLE attmp ADD COLUMN z int2[]; +INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t, v, w, x, y, z) VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', - 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + 'c', 314159, '(1,1)', '512', '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', - '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); -SELECT * FROM tmp; - initial | a | b | c | d | e | f | g | h | i | j | k | l | m | n | p | q | r | s | t | u | v | w | x | y | z ----------+---+------+------+-----+-----+---+-----------------------+------------------------------+---+------------------------------------------------------------------------------------------------+--------+-------+-----+-----------------+---------------+-----------+-----------------------+-----------------------------+---------------------+---------------------------------------------+--------------------------+------------------+-----------+-----------+----------- - | 4 | name | text | 4.1 | 4.1 | 2 | ((4.1,4.1),(3.1,3.1)) | Mon May 01 00:30:30 1995 PDT | c | {"Mon May 01 00:30:30 1995 PDT","Mon Aug 24 14:43:07 1992 PDT","Wed Dec 31 16:00:00 1969 PST"} | 314159 | (1,1) | 512 | 1 2 3 4 5 6 7 8 | magnetic disk | (1.1,1.1) | [(4.1,4.1),(3.1,3.1)] | ((0,2),(4.1,4.1),(3.1,3.1)) | (4.1,4.1),(3.1,3.1) | ["Wed Dec 31 16:00:00 1969 PST" "infinity"] | Thu Jan 01 00:00:00 1970 | @ 1 hour 10 secs | {1,2,3,4} | {1,2,3,4} | {1,2,3,4} +SELECT * FROM attmp; + initial | a | b | c | d | e | f | g | i | k | l | m | n | p | q | r | s | t | v | w | x | y | z +---------+---+------+------+-----+-----+---+-----------------------+---+--------+-------+-----+-----------------+---------------+-----------+-----------------------+-----------------------------+---------------------+--------------------------+------------------+-----------+-----------+----------- + | 4 | name | text | 4.1 | 4.1 | 2 | ((4.1,4.1),(3.1,3.1)) | c | 314159 | (1,1) | 512 | 1 2 3 4 5 6 7 8 | magnetic disk | (1.1,1.1) | [(4.1,4.1),(3.1,3.1)] | ((0,2),(4.1,4.1),(3.1,3.1)) | (4.1,4.1),(3.1,3.1) | Thu Jan 01 00:00:00 1970 | @ 1 hour 10 secs | {1,2,3,4} | {1,2,3,4} | {1,2,3,4} (1 row) -DROP TABLE tmp; +CREATE INDEX attmp_idx ON attmp (a, (d + e), b); +ALTER INDEX attmp_idx ALTER COLUMN 0 SET STATISTICS 1000; +ERROR: column number must be in range from 1 to 32767 +LINE 1: ALTER INDEX attmp_idx ALTER COLUMN 0 SET STATISTICS 1000; + ^ +ALTER INDEX attmp_idx ALTER COLUMN 1 SET STATISTICS 1000; +ERROR: cannot alter statistics on non-expression column "a" of index "attmp_idx" +HINT: Alter statistics on table column instead. +ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS 1000; +\d+ attmp_idx + Index "public.attmp_idx" + Column | Type | Key? | Definition | Storage | Stats target +--------+------------------+------+------------+---------+-------------- + a | integer | yes | a | plain | + expr | double precision | yes | (d + e) | plain | 1000 + b | cstring | yes | b | plain | +btree, for table "public.attmp" + +ALTER INDEX attmp_idx ALTER COLUMN 3 SET STATISTICS 1000; +ERROR: cannot alter statistics on non-expression column "b" of index "attmp_idx" +HINT: Alter statistics on table column instead. +ALTER INDEX attmp_idx ALTER COLUMN 4 SET STATISTICS 1000; +ERROR: column number 4 of relation "attmp_idx" does not exist +ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS -1; +DROP TABLE attmp; -- -- rename - check on both non-temp and temp tables -- -CREATE TABLE tmp (regtable int); -CREATE TEMP TABLE tmp (tmptable int); -ALTER TABLE tmp RENAME TO tmp_new; -SELECT * FROM tmp; +CREATE TABLE attmp (regtable int); +CREATE TEMP TABLE attmp (attmptable int); +ALTER TABLE attmp RENAME TO attmp_new; +SELECT * FROM attmp; regtable ---------- (0 rows) -SELECT * FROM tmp_new; - tmptable ----------- +SELECT * FROM attmp_new; + attmptable +------------ (0 rows) -ALTER TABLE tmp RENAME TO tmp_new2; -SELECT * FROM tmp; -- should fail -ERROR: relation "tmp" does not exist -LINE 1: SELECT * FROM tmp; +ALTER TABLE attmp RENAME TO attmp_new2; +SELECT * FROM attmp; -- should fail +ERROR: relation "attmp" does not exist +LINE 1: SELECT * FROM attmp; ^ -SELECT * FROM tmp_new; - tmptable ----------- +SELECT * FROM attmp_new; + attmptable +------------ (0 rows) -SELECT * FROM tmp_new2; +SELECT * FROM attmp_new2; regtable ---------- (0 rows) -DROP TABLE tmp_new; -DROP TABLE tmp_new2; +DROP TABLE attmp_new; +DROP TABLE attmp_new2; +-- check rename of partitioned tables and indexes also +CREATE TABLE part_attmp (a int primary key) partition by range (a); +CREATE TABLE part_attmp1 PARTITION OF part_attmp FOR VALUES FROM (0) TO (100); +ALTER INDEX part_attmp_pkey RENAME TO part_attmp_index; +ALTER INDEX part_attmp1_pkey RENAME TO part_attmp1_index; +ALTER TABLE part_attmp RENAME TO part_at2tmp; +ALTER TABLE part_attmp1 RENAME TO part_at2tmp1; +SET ROLE regress_alter_table_user1; +ALTER INDEX part_attmp_index RENAME TO fail; +ERROR: must be owner of index part_attmp_index +ALTER INDEX part_attmp1_index RENAME TO fail; +ERROR: must be owner of index part_attmp1_index +ALTER TABLE part_at2tmp RENAME TO fail; +ERROR: must be owner of table part_at2tmp +ALTER TABLE part_at2tmp1 RENAME TO fail; +ERROR: must be owner of table part_at2tmp1 +RESET ROLE; +DROP TABLE part_at2tmp; -- -- check renaming to a table's array type's autogenerated name -- (the array type's name should get out of the way) -- -CREATE TABLE tmp_array (id int); -CREATE TABLE tmp_array2 (id int); -SELECT typname FROM pg_type WHERE oid = 'tmp_array[]'::regtype; - typname ------------- - _tmp_array +CREATE TABLE attmp_array (id int); +CREATE TABLE attmp_array2 (id int); +SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype; + typname +-------------- + _attmp_array (1 row) -SELECT typname FROM pg_type WHERE oid = 'tmp_array2[]'::regtype; - typname -------------- - _tmp_array2 +SELECT typname FROM pg_type WHERE oid = 'attmp_array2[]'::regtype; + typname +--------------- + _attmp_array2 (1 row) -ALTER TABLE tmp_array2 RENAME TO _tmp_array; -SELECT typname FROM pg_type WHERE oid = 'tmp_array[]'::regtype; - typname -------------- - __tmp_array +ALTER TABLE attmp_array2 RENAME TO _attmp_array; +SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype; + typname +--------------- + __attmp_array (1 row) -SELECT typname FROM pg_type WHERE oid = '_tmp_array[]'::regtype; - typname --------------- - ___tmp_array +SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype; + typname +---------------- + ___attmp_array (1 row) -DROP TABLE _tmp_array; -DROP TABLE tmp_array; +DROP TABLE _attmp_array; +DROP TABLE attmp_array; -- renaming to table's own array type's name is an interesting corner case -CREATE TABLE tmp_array (id int); -SELECT typname FROM pg_type WHERE oid = 'tmp_array[]'::regtype; - typname ------------- - _tmp_array +CREATE TABLE attmp_array (id int); +SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype; + typname +-------------- + _attmp_array (1 row) -ALTER TABLE tmp_array RENAME TO _tmp_array; -SELECT typname FROM pg_type WHERE oid = '_tmp_array[]'::regtype; - typname -------------- - __tmp_array +ALTER TABLE attmp_array RENAME TO _attmp_array; +SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype; + typname +--------------- + __attmp_array (1 row) -DROP TABLE _tmp_array; +DROP TABLE _attmp_array; -- ALTER TABLE ... RENAME on non-table relations -- renaming indexes (FIXME: this should probably test the index's functionality) -ALTER INDEX IF EXISTS __onek_unique1 RENAME TO tmp_onek_unique1; +ALTER INDEX IF EXISTS __onek_unique1 RENAME TO attmp_onek_unique1; NOTICE: relation "__onek_unique1" does not exist, skipping -ALTER INDEX IF EXISTS __tmp_onek_unique1 RENAME TO onek_unique1; -NOTICE: relation "__tmp_onek_unique1" does not exist, skipping -ALTER INDEX onek_unique1 RENAME TO tmp_onek_unique1; -ALTER INDEX tmp_onek_unique1 RENAME TO onek_unique1; +ALTER INDEX IF EXISTS __attmp_onek_unique1 RENAME TO onek_unique1; +NOTICE: relation "__attmp_onek_unique1" does not exist, skipping +ALTER INDEX onek_unique1 RENAME TO attmp_onek_unique1; +ALTER INDEX attmp_onek_unique1 RENAME TO onek_unique1; +SET ROLE regress_alter_table_user1; +ALTER INDEX onek_unique1 RENAME TO fail; -- permission denied +ERROR: must be owner of index onek_unique1 +RESET ROLE; -- renaming views -CREATE VIEW tmp_view (unique1) AS SELECT unique1 FROM tenk1; -ALTER TABLE tmp_view RENAME TO tmp_view_new; +CREATE VIEW attmp_view (unique1) AS SELECT unique1 FROM tenk1; +ALTER TABLE attmp_view RENAME TO attmp_view_new; +SET ROLE regress_alter_table_user1; +ALTER VIEW attmp_view_new RENAME TO fail; -- permission denied +ERROR: must be owner of view attmp_view_new +RESET ROLE; -- hack to ensure we get an indexscan here set enable_seqscan to off; set enable_bitmapscan to off; @@ -204,7 +255,7 @@ SELECT unique1 FROM tenk1 WHERE unique1 < 5; reset enable_seqscan; reset enable_bitmapscan; -DROP VIEW tmp_view_new; +DROP VIEW attmp_view_new; -- toast-like relation name alter table stud_emp rename to pg_toast_stud_emp; alter table pg_toast_stud_emp rename to stud_emp; @@ -343,77 +394,77 @@ NOTICE: relation "constraint_not_exist" does not exist, skipping ALTER TABLE IF EXISTS constraint_rename_test ADD CONSTRAINT con4 UNIQUE (a); NOTICE: relation "constraint_rename_test" does not exist, skipping -- FOREIGN KEY CONSTRAINT adding TEST -CREATE TABLE tmp2 (a int primary key); -CREATE TABLE tmp3 (a int, b int); -CREATE TABLE tmp4 (a int, b int, unique(a,b)); -CREATE TABLE tmp5 (a int, b int); --- Insert rows into tmp2 (pktable) -INSERT INTO tmp2 values (1); -INSERT INTO tmp2 values (2); -INSERT INTO tmp2 values (3); -INSERT INTO tmp2 values (4); --- Insert rows into tmp3 -INSERT INTO tmp3 values (1,10); -INSERT INTO tmp3 values (1,20); -INSERT INTO tmp3 values (5,50); +CREATE TABLE attmp2 (a int primary key); +CREATE TABLE attmp3 (a int, b int); +CREATE TABLE attmp4 (a int, b int, unique(a,b)); +CREATE TABLE attmp5 (a int, b int); +-- Insert rows into attmp2 (pktable) +INSERT INTO attmp2 values (1); +INSERT INTO attmp2 values (2); +INSERT INTO attmp2 values (3); +INSERT INTO attmp2 values (4); +-- Insert rows into attmp3 +INSERT INTO attmp3 values (1,10); +INSERT INTO attmp3 values (1,20); +INSERT INTO attmp3 values (5,50); -- Try (and fail) to add constraint due to invalid source columns -ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full; +ALTER TABLE attmp3 add constraint attmpconstr foreign key(c) references attmp2 match full; ERROR: column "c" referenced in foreign key constraint does not exist -- Try (and fail) to add constraint due to invalid destination columns explicitly given -ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full; +ALTER TABLE attmp3 add constraint attmpconstr foreign key(a) references attmp2(b) match full; ERROR: column "b" referenced in foreign key constraint does not exist -- Try (and fail) to add constraint due to invalid data -ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full; -ERROR: insert or update on table "tmp3" violates foreign key constraint "tmpconstr" -DETAIL: Key (a)=(5) is not present in table "tmp2". +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full; +ERROR: insert or update on table "attmp3" violates foreign key constraint "attmpconstr" +DETAIL: Key (a)=(5) is not present in table "attmp2". -- Delete failing row -DELETE FROM tmp3 where a=5; +DELETE FROM attmp3 where a=5; -- Try (and succeed) -ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full; -ALTER TABLE tmp3 drop constraint tmpconstr; -INSERT INTO tmp3 values (5,50); +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full; +ALTER TABLE attmp3 drop constraint attmpconstr; +INSERT INTO attmp3 values (5,50); -- Try NOT VALID and then VALIDATE CONSTRAINT, but fails. Delete failure then re-validate -ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full NOT VALID; -ALTER TABLE tmp3 validate constraint tmpconstr; -ERROR: insert or update on table "tmp3" violates foreign key constraint "tmpconstr" -DETAIL: Key (a)=(5) is not present in table "tmp2". +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full NOT VALID; +ALTER TABLE attmp3 validate constraint attmpconstr; +ERROR: insert or update on table "attmp3" violates foreign key constraint "attmpconstr" +DETAIL: Key (a)=(5) is not present in table "attmp2". -- Delete failing row -DELETE FROM tmp3 where a=5; +DELETE FROM attmp3 where a=5; -- Try (and succeed) and repeat to show it works on already valid constraint -ALTER TABLE tmp3 validate constraint tmpconstr; -ALTER TABLE tmp3 validate constraint tmpconstr; +ALTER TABLE attmp3 validate constraint attmpconstr; +ALTER TABLE attmp3 validate constraint attmpconstr; -- Try a non-verified CHECK constraint -ALTER TABLE tmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10); -- fail +ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10); -- fail ERROR: check constraint "b_greater_than_ten" is violated by some row -ALTER TABLE tmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10) NOT VALID; -- succeeds -ALTER TABLE tmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- fails +ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10) NOT VALID; -- succeeds +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- fails ERROR: check constraint "b_greater_than_ten" is violated by some row -DELETE FROM tmp3 WHERE NOT b > 10; -ALTER TABLE tmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds -ALTER TABLE tmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds +DELETE FROM attmp3 WHERE NOT b > 10; +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds -- Test inherited NOT VALID CHECK constraints -select * from tmp3; +select * from attmp3; a | b ---+---- 1 | 20 (1 row) -CREATE TABLE tmp6 () INHERITS (tmp3); -CREATE TABLE tmp7 () INHERITS (tmp3); -INSERT INTO tmp6 VALUES (6, 30), (7, 16); -ALTER TABLE tmp3 ADD CONSTRAINT b_le_20 CHECK (b <= 20) NOT VALID; -ALTER TABLE tmp3 VALIDATE CONSTRAINT b_le_20; -- fails +CREATE TABLE attmp6 () INHERITS (attmp3); +CREATE TABLE attmp7 () INHERITS (attmp3); +INSERT INTO attmp6 VALUES (6, 30), (7, 16); +ALTER TABLE attmp3 ADD CONSTRAINT b_le_20 CHECK (b <= 20) NOT VALID; +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- fails ERROR: check constraint "b_le_20" is violated by some row -DELETE FROM tmp6 WHERE b > 20; -ALTER TABLE tmp3 VALIDATE CONSTRAINT b_le_20; -- succeeds +DELETE FROM attmp6 WHERE b > 20; +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- succeeds -- An already validated constraint must not be revalidated CREATE FUNCTION boo(int) RETURNS int IMMUTABLE STRICT LANGUAGE plpgsql AS $$ BEGIN RAISE NOTICE 'boo: %', $1; RETURN $1; END; $$; -INSERT INTO tmp7 VALUES (8, 18); -ALTER TABLE tmp7 ADD CONSTRAINT identity CHECK (b = boo(b)); +INSERT INTO attmp7 VALUES (8, 18); +ALTER TABLE attmp7 ADD CONSTRAINT identity CHECK (b = boo(b)); NOTICE: boo: 18 -ALTER TABLE tmp3 ADD CONSTRAINT IDENTITY check (b = boo(b)) NOT VALID; +ALTER TABLE attmp3 ADD CONSTRAINT IDENTITY check (b = boo(b)) NOT VALID; NOTICE: merging constraint "identity" with inherited definition -ALTER TABLE tmp3 VALIDATE CONSTRAINT identity; +ALTER TABLE attmp3 VALIDATE CONSTRAINT identity; NOTICE: boo: 16 NOTICE: boo: 20 -- A NO INHERIT constraint should not be looked for in children during VALIDATE CONSTRAINT @@ -436,16 +487,16 @@ select convalidated from pg_constraint where conrelid = 'parent_noinh_convalid': -- cleanup drop table parent_noinh_convalid, child_noinh_convalid; --- Try (and fail) to create constraint from tmp5(a) to tmp4(a) - unique constraint on --- tmp4 is a,b -ALTER TABLE tmp5 add constraint tmpconstr foreign key(a) references tmp4(a) match full; -ERROR: there is no unique constraint matching given keys for referenced table "tmp4" -DROP TABLE tmp7; -DROP TABLE tmp6; -DROP TABLE tmp5; -DROP TABLE tmp4; -DROP TABLE tmp3; -DROP TABLE tmp2; +-- Try (and fail) to create constraint from attmp5(a) to attmp4(a) - unique constraint on +-- attmp4 is a,b +ALTER TABLE attmp5 add constraint attmpconstr foreign key(a) references attmp4(a) match full; +ERROR: there is no unique constraint matching given keys for referenced table "attmp4" +DROP TABLE attmp7; +DROP TABLE attmp6; +DROP TABLE attmp5; +DROP TABLE attmp4; +DROP TABLE attmp3; +DROP TABLE attmp2; -- NOT VALID with plan invalidation -- ensure we don't use a constraint for -- exclusion until validated set constraint_exclusion TO 'partition'; @@ -1056,7 +1107,7 @@ select * from def_test; -- set defaults to an incorrect type: this should fail alter table def_test alter column c1 set default 'wrong_datatype'; -ERROR: invalid input syntax for integer: "wrong_datatype" +ERROR: invalid input syntax for type integer: "wrong_datatype" alter table def_test alter column c2 set default 20; -- set defaults on a non-existent column: this should fail alter table def_test alter column c3 set default 30; @@ -1325,22 +1376,22 @@ create index "testing_idx" on atacc1("........pg.dropped.1........"); ERROR: column "........pg.dropped.1........" does not exist -- test create as and select into insert into atacc1 values (21, 22, 23); -create table test1 as select * from atacc1; -select * from test1; +create table attest1 as select * from atacc1; +select * from attest1; b | c | d ----+----+---- 21 | 22 | 23 (1 row) -drop table test1; -select * into test2 from atacc1; -select * from test2; +drop table attest1; +select * into attest2 from atacc1; +select * from attest2; b | c | d ----+----+---- 21 | 22 | 23 (1 row) -drop table test2; +drop table attest2; -- try dropping all columns alter table atacc1 drop c; alter table atacc1 drop d; @@ -1421,38 +1472,38 @@ NOTICE: merging column "b" with inherited definition drop table child; drop table parent; -- test copy in/out -create table test (a int4, b int4, c int4); -insert into test values (1,2,3); -alter table test drop a; -copy test to stdout; +create table attest (a int4, b int4, c int4); +insert into attest values (1,2,3); +alter table attest drop a; +copy attest to stdout; 2 3 -copy test(a) to stdout; -ERROR: column "a" of relation "test" does not exist -copy test("........pg.dropped.1........") to stdout; -ERROR: column "........pg.dropped.1........" of relation "test" does not exist -copy test from stdin; +copy attest(a) to stdout; +ERROR: column "a" of relation "attest" does not exist +copy attest("........pg.dropped.1........") to stdout; +ERROR: column "........pg.dropped.1........" of relation "attest" does not exist +copy attest from stdin; ERROR: extra data after last expected column -CONTEXT: COPY test, line 1: "10 11 12" -select * from test; +CONTEXT: COPY attest, line 1: "10 11 12" +select * from attest; b | c ---+--- 2 | 3 (1 row) -copy test from stdin; -select * from test; +copy attest from stdin; +select * from attest; b | c ----+---- 2 | 3 21 | 22 (2 rows) -copy test(a) from stdin; -ERROR: column "a" of relation "test" does not exist -copy test("........pg.dropped.1........") from stdin; -ERROR: column "........pg.dropped.1........" of relation "test" does not exist -copy test(b,c) from stdin; -select * from test; +copy attest(a) from stdin; +ERROR: column "a" of relation "attest" does not exist +copy attest("........pg.dropped.1........") from stdin; +ERROR: column "........pg.dropped.1........" of relation "attest" does not exist +copy attest(b,c) from stdin; +select * from attest; b | c ----+---- 2 | 3 @@ -1460,7 +1511,7 @@ select * from test; 31 | 32 (3 rows) -drop table test; +drop table attest; -- test inheritance create table dropColumn (a int, b int, e int); create table dropColumnChild (c int) inherits (dropColumn); @@ -1805,7 +1856,7 @@ select * from foo; (1 row) drop domain mytype cascade; -NOTICE: drop cascades to table foo column f2 +NOTICE: drop cascades to column f2 of table foo select * from foo; f1 | f3 ----+---- @@ -1895,7 +1946,7 @@ alter table anothertab alter column atcol1 drop default; alter table anothertab alter column atcol1 type boolean using case when atcol1 % 2 = 0 then true else false end; -- fails ERROR: operator does not exist: boolean <= integer -HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. alter table anothertab drop constraint anothertab_chk; alter table anothertab drop constraint anothertab_chk; -- fails ERROR: constraint "anothertab_chk" of relation "anothertab" does not exist @@ -1941,6 +1992,67 @@ create table tab1 (a int, b text); create table tab2 (x int, y tab1); alter table tab1 alter column b type varchar; -- fails ERROR: cannot alter table "tab1" because column "tab2.y" uses its row type +-- Alter column type that's part of a partitioned index +create table at_partitioned (a int, b text) partition by range (a); +create table at_part_1 partition of at_partitioned for values from (0) to (1000); +insert into at_partitioned values (512, '0.123'); +create table at_part_2 (b text, a int); +insert into at_part_2 values ('1.234', 1024); +create index on at_partitioned (b); +create index on at_partitioned (a); +\d at_part_1 + Table "public.at_part_1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | | | +Partition of: at_partitioned FOR VALUES FROM (0) TO (1000) +Indexes: + "at_part_1_a_idx" btree (a) + "at_part_1_b_idx" btree (b) + +\d at_part_2 + Table "public.at_part_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + b | text | | | + a | integer | | | + +alter table at_partitioned attach partition at_part_2 for values from (1000) to (2000); +\d at_part_2 + Table "public.at_part_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + b | text | | | + a | integer | | | +Partition of: at_partitioned FOR VALUES FROM (1000) TO (2000) +Indexes: + "at_part_2_a_idx" btree (a) + "at_part_2_b_idx" btree (b) + +alter table at_partitioned alter column b type numeric using b::numeric; +\d at_part_1 + Table "public.at_part_1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | numeric | | | +Partition of: at_partitioned FOR VALUES FROM (0) TO (1000) +Indexes: + "at_part_1_a_idx" btree (a) + "at_part_1_b_idx" btree (b) + +\d at_part_2 + Table "public.at_part_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + b | numeric | | | + a | integer | | | +Partition of: at_partitioned FOR VALUES FROM (1000) TO (2000) +Indexes: + "at_part_2_a_idx" btree (a) + "at_part_2_b_idx" btree (b) + -- disallow recursive containment of row types create temp table recur1 (f1 int); alter table recur1 add column f2 recur1; -- fails @@ -2156,6 +2268,91 @@ Foreign-key constraints: "check_fk_presence_2_id_fkey" FOREIGN KEY (id) REFERENCES check_fk_presence_1(id) DROP TABLE check_fk_presence_1, check_fk_presence_2; +-- check column addition within a view (bug #14876) +create table at_base_table(id int, stuff text); +insert into at_base_table values (23, 'skidoo'); +create view at_view_1 as select * from at_base_table bt; +create view at_view_2 as select *, to_json(v1) as j from at_view_1 v1; +\d+ at_view_1 + View "public.at_view_1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + id | integer | | | | plain | + stuff | text | | | | extended | +View definition: + SELECT bt.id, + bt.stuff + FROM at_base_table bt; + +\d+ at_view_2 + View "public.at_view_2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + id | integer | | | | plain | + stuff | text | | | | extended | + j | json | | | | extended | +View definition: + SELECT v1.id, + v1.stuff, + to_json(v1.*) AS j + FROM at_view_1 v1; + +explain (verbose, costs off) select * from at_view_2; + QUERY PLAN +---------------------------------------------------------- + Seq Scan on public.at_base_table bt + Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff)) +(2 rows) + +select * from at_view_2; + id | stuff | j +----+--------+---------------------------- + 23 | skidoo | {"id":23,"stuff":"skidoo"} +(1 row) + +create or replace view at_view_1 as select *, 2+2 as more from at_base_table bt; +\d+ at_view_1 + View "public.at_view_1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + id | integer | | | | plain | + stuff | text | | | | extended | + more | integer | | | | plain | +View definition: + SELECT bt.id, + bt.stuff, + 2 + 2 AS more + FROM at_base_table bt; + +\d+ at_view_2 + View "public.at_view_2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + id | integer | | | | plain | + stuff | text | | | | extended | + j | json | | | | extended | +View definition: + SELECT v1.id, + v1.stuff, + to_json(v1.*) AS j + FROM at_view_1 v1; + +explain (verbose, costs off) select * from at_view_2; + QUERY PLAN +---------------------------------------------------------------- + Seq Scan on public.at_base_table bt + Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff, NULL)) +(2 rows) + +select * from at_view_2; + id | stuff | j +----+--------+---------------------------------------- + 23 | skidoo | {"id":23,"stuff":"skidoo","more":null} +(1 row) + +drop view at_view_2; +drop view at_view_1; +drop table at_base_table; -- -- lock levels -- @@ -2495,11 +2692,11 @@ drop cascades to operator family alter2.ctype_hash_ops for access method hash drop cascades to type alter2.ctype drop cascades to function alter2.same(alter2.ctype,alter2.ctype) drop cascades to operator alter2.=(alter2.ctype,alter2.ctype) -drop cascades to conversion ascii_to_utf8 -drop cascades to text search parser prs -drop cascades to text search configuration cfg -drop cascades to text search template tmpl -drop cascades to text search dictionary dict +drop cascades to conversion alter2.ascii_to_utf8 +drop cascades to text search parser alter2.prs +drop cascades to text search configuration alter2.cfg +drop cascades to text search template alter2.tmpl +drop cascades to text search dictionary alter2.dict -- -- composite types -- @@ -2682,6 +2879,23 @@ Typed table of type: test_type2 Inherits: test_tbl2 DROP TABLE test_tbl2_subclass; +CREATE TYPE test_typex AS (a int, b text); +CREATE TABLE test_tblx (x int, y test_typex check ((y).a > 0)); +ALTER TYPE test_typex DROP ATTRIBUTE a; -- fails +ERROR: cannot drop column a of composite type test_typex because other objects depend on it +DETAIL: constraint test_tblx_y_check on table test_tblx depends on column a of composite type test_typex +HINT: Use DROP ... CASCADE to drop the dependent objects too. +ALTER TYPE test_typex DROP ATTRIBUTE a CASCADE; +NOTICE: drop cascades to constraint test_tblx_y_check on table test_tblx +\d test_tblx + Table "public.test_tblx" + Column | Type | Collation | Nullable | Default +--------+------------+-----------+----------+--------- + x | integer | | | + y | test_typex | | | + +DROP TABLE test_tblx; +DROP TYPE test_typex; -- This test isn't that interesting on its own, but the purpose is to leave -- behind a table to test pg_upgrade with. The table has a composite type -- column in it, and the composite type has a dropped attribute. @@ -2774,6 +2988,41 @@ Check constraints: DROP TABLE alter2.tt8; DROP SCHEMA alter2; +-- +-- Check conflicts between index and CHECK constraint names +-- +CREATE TABLE tt9(c integer); +ALTER TABLE tt9 ADD CHECK(c > 1); +ALTER TABLE tt9 ADD CHECK(c > 2); -- picks nonconflicting name +ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 3); +ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 4); -- fail, dup name +ERROR: constraint "foo" for relation "tt9" already exists +ALTER TABLE tt9 ADD UNIQUE(c); +ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name +ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key UNIQUE(c); -- fail, dup name +ERROR: relation "tt9_c_key" already exists +ALTER TABLE tt9 ADD CONSTRAINT foo UNIQUE(c); -- fail, dup name +ERROR: constraint "foo" for relation "tt9" already exists +ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key CHECK(c > 5); -- fail, dup name +ERROR: constraint "tt9_c_key" for relation "tt9" already exists +ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key2 CHECK(c > 6); +ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name +\d tt9 + Table "public.tt9" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c | integer | | | +Indexes: + "tt9_c_key" UNIQUE CONSTRAINT, btree (c) + "tt9_c_key1" UNIQUE CONSTRAINT, btree (c) + "tt9_c_key3" UNIQUE CONSTRAINT, btree (c) +Check constraints: + "foo" CHECK (c > 3) + "tt9_c_check" CHECK (c > 1) + "tt9_c_check1" CHECK (c > 2) + "tt9_c_key2" CHECK (c > 6) + +DROP TABLE tt9; -- Check that comments on constraints and indexes are not lost at ALTER TABLE. CREATE TABLE comment_test ( id int, @@ -3103,18 +3352,6 @@ CREATE TABLE partitioned ( a int, b int ) PARTITION BY RANGE (a, (a+b+1)); -ALTER TABLE partitioned ADD UNIQUE (a); -ERROR: unique constraints are not supported on partitioned tables -LINE 1: ALTER TABLE partitioned ADD UNIQUE (a); - ^ -ALTER TABLE partitioned ADD PRIMARY KEY (a); -ERROR: primary key constraints are not supported on partitioned tables -LINE 1: ALTER TABLE partitioned ADD PRIMARY KEY (a); - ^ -ALTER TABLE partitioned ADD FOREIGN KEY (a) REFERENCES blah; -ERROR: foreign key constraints are not supported on partitioned tables -LINE 1: ALTER TABLE partitioned ADD FOREIGN KEY (a) REFERENCES blah; - ^ ALTER TABLE partitioned ADD EXCLUDE USING gist (a WITH &&); ERROR: exclusion constraints are not supported on partitioned tables LINE 1: ALTER TABLE partitioned ADD EXCLUDE USING gist (a WITH &&); @@ -3150,7 +3387,7 @@ CREATE TABLE unparted ( ); CREATE TABLE fail_part (like unparted); ALTER TABLE unparted ATTACH PARTITION fail_part FOR VALUES IN ('a'); -ERROR: "unparted" is not partitioned +ERROR: table "unparted" is not partitioned DROP TABLE unparted, fail_part; -- check that partition bound is compatible CREATE TABLE list_parted ( @@ -3177,7 +3414,7 @@ CREATE TABLE owned_by_me ( a int ) PARTITION BY LIST (a); ALTER TABLE owned_by_me ATTACH PARTITION not_owned_by_me FOR VALUES IN (1); -ERROR: must be owner of relation not_owned_by_me +ERROR: must be owner of table not_owned_by_me RESET SESSION AUTHORIZATION; DROP TABLE owned_by_me, not_owned_by_me; DROP ROLE regress_test_not_me; @@ -3218,7 +3455,7 @@ DROP TABLE fail_part; CREATE TABLE fail_part (like list_parted, c int); ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); ERROR: table "fail_part" contains column "c" not found in parent "list_parted" -DETAIL: New partition should contain only the columns present in parent. +DETAIL: The new partition may contain only the columns present in parent. DROP TABLE fail_part; -- check that the table being attached has every column of the parent CREATE TABLE fail_part (a int NOT NULL); @@ -3273,6 +3510,15 @@ SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_1'::reg CREATE TABLE fail_part (LIKE part_1 INCLUDING CONSTRAINTS); ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); ERROR: partition "fail_part" would overlap partition "part_1" +DROP TABLE fail_part; +-- check that an existing table can be attached as a default partition +CREATE TABLE def_part (LIKE list_parted INCLUDING CONSTRAINTS); +ALTER TABLE list_parted ATTACH PARTITION def_part DEFAULT; +-- check attaching default partition fails if a default partition already +-- exists +CREATE TABLE fail_def_part (LIKE part_1 INCLUDING CONSTRAINTS); +ALTER TABLE list_parted ATTACH PARTITION fail_def_part DEFAULT; +ERROR: partition "fail_def_part" conflicts with existing default partition "def_part" -- check validation when attaching list partitions CREATE TABLE list_parted2 ( a int, @@ -3286,6 +3532,15 @@ ERROR: partition constraint is violated by some row -- should be ok after deleting the bad row DELETE FROM part_2; ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2); +-- check partition cannot be attached if default has some row for its values +CREATE TABLE list_parted2_def PARTITION OF list_parted2 DEFAULT; +INSERT INTO list_parted2_def VALUES (11, 'z'); +CREATE TABLE part_3 (LIKE list_parted2); +ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11); +ERROR: updated partition constraint for default partition would be violated by some row +-- should be ok after deleting the bad row +DELETE FROM list_parted2_def WHERE a = 11; +ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11); -- adding constraints that describe the desired partition constraint -- (or more restrictive) will help skip the validation scan CREATE TABLE part_3_4 ( @@ -3301,6 +3556,10 @@ ALTER TABLE list_parted2 DETACH PARTITION part_3_4; ALTER TABLE part_3_4 ALTER a SET NOT NULL; ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4); INFO: partition constraint for table "part_3_4" is implied by existing constraints +-- check if default partition scan skipped +ALTER TABLE list_parted2_def ADD CONSTRAINT check_a CHECK (a IN (5, 6)); +CREATE TABLE part_55_66 PARTITION OF list_parted2 FOR VALUES IN (55, 66); +INFO: updated partition constraint for default partition "list_parted2_def" is implied by existing constraints -- check validation when attaching range partitions CREATE TABLE range_parted ( a int, @@ -3326,6 +3585,19 @@ CREATE TABLE part2 ( ); ALTER TABLE range_parted ATTACH PARTITION part2 FOR VALUES FROM (1, 10) TO (1, 20); INFO: partition constraint for table "part2" is implied by existing constraints +-- Create default partition +CREATE TABLE partr_def1 PARTITION OF range_parted DEFAULT; +-- Only one default partition is allowed, hence, following should give error +CREATE TABLE partr_def2 (LIKE part1 INCLUDING CONSTRAINTS); +ALTER TABLE range_parted ATTACH PARTITION partr_def2 DEFAULT; +ERROR: partition "partr_def2" conflicts with existing default partition "partr_def1" +-- Overlapping partitions cannot be attached, hence, following should give error +INSERT INTO partr_def1 VALUES (2, 10); +CREATE TABLE part3 (LIKE range_parted); +ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (2, 10) TO (2, 20); +ERROR: updated partition constraint for default partition would be violated by some row +-- Attaching partitions should be successful when there are no overlapping rows +ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (3, 10) TO (3, 20); -- check that leaf partitions are scanned when attaching a partitioned -- table CREATE TABLE part_5 ( @@ -3378,6 +3650,7 @@ ALTER TABLE part_7 ATTACH PARTITION part_7_a_null FOR VALUES IN ('a', null); INFO: partition constraint for table "part_7_a_null" is implied by existing constraints ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7); INFO: partition constraint for table "part_7" is implied by existing constraints +INFO: updated partition constraint for default partition "list_parted2_def" is implied by existing constraints -- Same example, but check this time that the constraint correctly detects -- violating rows ALTER TABLE list_parted2 DETACH PARTITION part_7; @@ -3391,7 +3664,20 @@ SELECT tableoid::regclass, a, b FROM part_7 order by a; (2 rows) ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7); +INFO: updated partition constraint for default partition "list_parted2_def" is implied by existing constraints ERROR: partition constraint is violated by some row +-- check that leaf partitions of default partition are scanned when +-- attaching a partitioned table. +ALTER TABLE part_5 DROP CONSTRAINT check_a; +CREATE TABLE part5_def PARTITION OF part_5 DEFAULT PARTITION BY LIST(a); +CREATE TABLE part5_def_p1 PARTITION OF part5_def FOR VALUES IN (5); +INSERT INTO part5_def_p1 VALUES (5, 'y'); +CREATE TABLE part5_p1 (LIKE part_5); +ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y'); +ERROR: updated partition constraint for default partition would be violated by some row +-- should be ok after deleting the bad row +DELETE FROM part5_def_p1 WHERE b = 'y'; +ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y'); -- check that the table being attached is not already a partition ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2); ERROR: "part_2" is already a partition @@ -3402,23 +3688,94 @@ DETAIL: "part_5" is already a child of "list_parted2". ALTER TABLE list_parted2 ATTACH PARTITION list_parted2 FOR VALUES IN (0); ERROR: circular inheritance not allowed DETAIL: "list_parted2" is already a child of "list_parted2". +-- If a partitioned table being created or an existing table being attached +-- as a partition does not have a constraint that would allow validation scan +-- to be skipped, but an individual partition does, then the partition's +-- validation scan is skipped. +CREATE TABLE quuux (a int, b text) PARTITION BY LIST (a); +CREATE TABLE quuux_default PARTITION OF quuux DEFAULT PARTITION BY LIST (b); +CREATE TABLE quuux_default1 PARTITION OF quuux_default ( + CONSTRAINT check_1 CHECK (a IS NOT NULL AND a = 1) +) FOR VALUES IN ('b'); +CREATE TABLE quuux1 (a int, b text); +ALTER TABLE quuux ATTACH PARTITION quuux1 FOR VALUES IN (1); -- validate! +CREATE TABLE quuux2 (a int, b text); +ALTER TABLE quuux ATTACH PARTITION quuux2 FOR VALUES IN (2); -- skip validation +INFO: updated partition constraint for default partition "quuux_default1" is implied by existing constraints +DROP TABLE quuux1, quuux2; +-- should validate for quuux1, but not for quuux2 +CREATE TABLE quuux1 PARTITION OF quuux FOR VALUES IN (1); +CREATE TABLE quuux2 PARTITION OF quuux FOR VALUES IN (2); +INFO: updated partition constraint for default partition "quuux_default1" is implied by existing constraints +DROP TABLE quuux; +-- check validation when attaching hash partitions +-- Use hand-rolled hash functions and operator class to get predictable result +-- on different matchines. part_test_int4_ops is defined in insert.sql. +-- check that the new partition won't overlap with an existing partition +CREATE TABLE hash_parted ( + a int, + b int +) PARTITION BY HASH (a part_test_int4_ops); +CREATE TABLE hpart_1 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 4, REMAINDER 0); +CREATE TABLE fail_part (LIKE hpart_1); +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 4); +ERROR: partition "fail_part" would overlap partition "hpart_1" +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 0); +ERROR: partition "fail_part" would overlap partition "hpart_1" +DROP TABLE fail_part; +-- check validation when attaching hash partitions +-- check that violating rows are correctly reported +CREATE TABLE hpart_2 (LIKE hash_parted); +INSERT INTO hpart_2 VALUES (3, 0); +ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1); +ERROR: partition constraint is violated by some row +-- should be ok after deleting the bad row +DELETE FROM hpart_2; +ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1); +-- check that leaf partitions are scanned when attaching a partitioned +-- table +CREATE TABLE hpart_5 ( + LIKE hash_parted +) PARTITION BY LIST (b); +-- check that violating rows are correctly reported +CREATE TABLE hpart_5_a PARTITION OF hpart_5 FOR VALUES IN ('1', '2', '3'); +INSERT INTO hpart_5_a (a, b) VALUES (7, 1); +ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2); +ERROR: partition constraint is violated by some row +-- should be ok after deleting the bad row +DELETE FROM hpart_5_a; +ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2); +-- check that the table being attach is with valid modulus and remainder value +CREATE TABLE fail_part(LIKE hash_parted); +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 0, REMAINDER 1); +ERROR: modulus for hash partition must be a positive integer +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 8); +ERROR: remainder for hash partition must be less than modulus +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 3, REMAINDER 2); +ERROR: every hash partition modulus must be a factor of the next larger modulus +DROP TABLE fail_part; -- -- DETACH PARTITION -- -- check that the table is partitioned at all CREATE TABLE regular_table (a int); ALTER TABLE regular_table DETACH PARTITION any_name; -ERROR: "regular_table" is not partitioned +ERROR: table "regular_table" is not partitioned DROP TABLE regular_table; -- check that the partition being detached exists at all ALTER TABLE list_parted2 DETACH PARTITION part_4; ERROR: relation "part_4" does not exist +ALTER TABLE hash_parted DETACH PARTITION hpart_4; +ERROR: relation "hpart_4" does not exist -- check that the partition being detached is actually a partition of the parent CREATE TABLE not_a_part (a int); ALTER TABLE list_parted2 DETACH PARTITION not_a_part; ERROR: relation "not_a_part" is not a partition of relation "list_parted2" ALTER TABLE list_parted2 DETACH PARTITION part_1; ERROR: relation "part_1" is not a partition of relation "list_parted2" +ALTER TABLE hash_parted DETACH PARTITION not_a_part; +ERROR: relation "not_a_part" is not a partition of relation "hash_parted" +DROP TABLE not_a_part; -- check that, after being detached, attinhcount/coninhcount is dropped to 0 and -- attislocal/conislocal is set to true ALTER TABLE list_parted2 DETACH PARTITION part_3_4; @@ -3512,8 +3869,17 @@ ALTER TABLE list_parted2 DROP COLUMN b; ERROR: cannot drop column named in partition key ALTER TABLE list_parted2 ALTER COLUMN b TYPE text; ERROR: cannot alter type of column named in partition key +-- dropping non-partition key columns should be allowed on the parent table. +ALTER TABLE list_parted DROP COLUMN b; +SELECT * FROM list_parted; + a +--- +(0 rows) + -- cleanup DROP TABLE list_parted, list_parted2, range_parted; +DROP TABLE fail_def_part; +DROP TABLE hash_parted; -- more tests for certain multi-level partitioning scenarios create table p (a int, b int) partition by range (a, b); create table p1 (b int, a int not null) partition by range (b); @@ -3551,3 +3917,73 @@ create table parted_validate_test_1 partition of parted_validate_test for values alter table parted_validate_test add constraint parted_validate_test_chka check (a > 0) not valid; alter table parted_validate_test validate constraint parted_validate_test_chka; drop table parted_validate_test; +-- test alter column options +CREATE TABLE attmp(i integer); +INSERT INTO attmp VALUES (1); +ALTER TABLE attmp ALTER COLUMN i SET (n_distinct = 1, n_distinct_inherited = 2); +ALTER TABLE attmp ALTER COLUMN i RESET (n_distinct_inherited); +ANALYZE attmp; +DROP TABLE attmp; +DROP USER regress_alter_table_user1; +-- check that violating rows are correctly reported when attaching as the +-- default partition +create table defpart_attach_test (a int) partition by list (a); +create table defpart_attach_test1 partition of defpart_attach_test for values in (1); +create table defpart_attach_test_d (like defpart_attach_test); +insert into defpart_attach_test_d values (1), (2); +-- error because its constraint as the default partition would be violated +-- by the row containing 1 +alter table defpart_attach_test attach partition defpart_attach_test_d default; +ERROR: partition constraint is violated by some row +delete from defpart_attach_test_d where a = 1; +alter table defpart_attach_test_d add check (a > 1); +-- should be attached successfully and without needing to be scanned +alter table defpart_attach_test attach partition defpart_attach_test_d default; +INFO: partition constraint for table "defpart_attach_test_d" is implied by existing constraints +drop table defpart_attach_test; +-- check combinations of temporary and permanent relations when attaching +-- partitions. +create table perm_part_parent (a int) partition by list (a); +create temp table temp_part_parent (a int) partition by list (a); +create table perm_part_child (a int); +create temp table temp_part_child (a int); +alter table temp_part_parent attach partition perm_part_child default; -- error +ERROR: cannot attach a permanent relation as partition of temporary relation "temp_part_parent" +alter table perm_part_parent attach partition temp_part_child default; -- error +ERROR: cannot attach a temporary relation as partition of permanent relation "perm_part_parent" +alter table temp_part_parent attach partition temp_part_child default; -- ok +drop table perm_part_parent cascade; +drop table temp_part_parent cascade; +-- check that attaching partitions to a table while it is being used is +-- prevented +create table tab_part_attach (a int) partition by list (a); +create or replace function func_part_attach() returns trigger + language plpgsql as $$ + begin + execute 'create table tab_part_attach_1 (a int)'; + execute 'alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)'; + return null; + end $$; +create trigger trig_part_attach before insert on tab_part_attach + for each statement execute procedure func_part_attach(); +insert into tab_part_attach values (1); +ERROR: cannot ALTER TABLE "tab_part_attach" because it is being used by active queries in this session +CONTEXT: SQL statement "alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)" +PL/pgSQL function func_part_attach() line 4 at EXECUTE +drop table tab_part_attach; +drop function func_part_attach(); +-- test case where the partitioning operator is a SQL function whose +-- evaluation results in the table's relcache being rebuilt partway through +-- the execution of an ATTACH PARTITION command +create function at_test_sql_partop (int4, int4) returns int language sql +as $$ select case when $1 = $2 then 0 when $1 > $2 then 1 else -1 end; $$; +create operator class at_test_sql_partop for type int4 using btree as + operator 1 < (int4, int4), operator 2 <= (int4, int4), + operator 3 = (int4, int4), operator 4 >= (int4, int4), + operator 5 > (int4, int4), function 1 at_test_sql_partop(int4, int4); +create table at_test_sql_partop (a int) partition by range (a at_test_sql_partop); +create table at_test_sql_partop_1 (a int); +alter table at_test_sql_partop attach partition at_test_sql_partop_1 for values from (0) to (10); +drop table at_test_sql_partop; +drop operator class at_test_sql_partop using btree; +drop function at_test_sql_partop; diff --git a/src/test/regress/expected/amutils.out b/src/test/regress/expected/amutils.out index 74f7c9f1fd..4570a39b05 100644 --- a/src/test/regress/expected/amutils.out +++ b/src/test/regress/expected/amutils.out @@ -12,7 +12,7 @@ select prop, 'clusterable', 'index_scan', 'bitmap_scan', 'backward_scan', 'can_order', 'can_unique', 'can_multi_col', - 'can_exclude', + 'can_exclude', 'can_include', 'bogus']::text[]) with ordinality as u(prop,ord) where a.amname = 'btree' @@ -36,8 +36,9 @@ select prop, can_unique | t | | can_multi_col | t | | can_exclude | t | | + can_include | t | | bogus | | | -(18 rows) +(19 rows) select prop, pg_indexam_has_property(a.oid, prop) as "AM", @@ -50,7 +51,7 @@ select prop, 'clusterable', 'index_scan', 'bitmap_scan', 'backward_scan', 'can_order', 'can_unique', 'can_multi_col', - 'can_exclude', + 'can_exclude', 'can_include', 'bogus']::text[]) with ordinality as u(prop,ord) where a.amname = 'gist' @@ -74,14 +75,16 @@ select prop, can_unique | f | | can_multi_col | t | | can_exclude | t | | + can_include | f | | bogus | | | -(18 rows) +(19 rows) select prop, pg_index_column_has_property('onek_hundred'::regclass, 1, prop) as btree, pg_index_column_has_property('hash_i4_index'::regclass, 1, prop) as hash, pg_index_column_has_property('gcircleind'::regclass, 1, prop) as gist, - pg_index_column_has_property('sp_radix_ind'::regclass, 1, prop) as spgist, + pg_index_column_has_property('sp_radix_ind'::regclass, 1, prop) as spgist_radix, + pg_index_column_has_property('sp_quad_ind'::regclass, 1, prop) as spgist_quad, pg_index_column_has_property('botharrayidx'::regclass, 1, prop) as gin, pg_index_column_has_property('brinidx'::regclass, 1, prop) as brin from unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', @@ -90,18 +93,18 @@ select prop, 'bogus']::text[]) with ordinality as u(prop,ord) order by ord; - prop | btree | hash | gist | spgist | gin | brin ---------------------+-------+------+------+--------+-----+------ - asc | t | f | f | f | f | f - desc | f | f | f | f | f | f - nulls_first | f | f | f | f | f | f - nulls_last | t | f | f | f | f | f - orderable | t | f | f | f | f | f - distance_orderable | f | f | t | f | f | f - returnable | t | f | f | t | f | f - search_array | t | f | f | f | f | f - search_nulls | t | f | t | t | f | t - bogus | | | | | | + prop | btree | hash | gist | spgist_radix | spgist_quad | gin | brin +--------------------+-------+------+------+--------------+-------------+-----+------ + asc | t | f | f | f | f | f | f + desc | f | f | f | f | f | f | f + nulls_first | f | f | f | f | f | f | f + nulls_last | t | f | f | f | f | f | f + orderable | t | f | f | f | f | f | f + distance_orderable | f | f | t | f | t | f | f + returnable | t | f | f | t | t | f | f + search_array | t | f | f | f | f | f | f + search_nulls | t | f | t | t | t | f | t + bogus | | | | | | | (10 rows) select prop, @@ -128,7 +131,7 @@ select prop, select amname, prop, pg_indexam_has_property(a.oid, prop) as p from pg_am a, unnest(array['can_order', 'can_unique', 'can_multi_col', - 'can_exclude', 'bogus']::text[]) + 'can_exclude', 'can_include', 'bogus']::text[]) with ordinality as u(prop,ord) where amtype = 'i' order by amname, ord; @@ -138,33 +141,39 @@ select amname, prop, pg_indexam_has_property(a.oid, prop) as p brin | can_unique | f brin | can_multi_col | t brin | can_exclude | f + brin | can_include | f brin | bogus | btree | can_order | t btree | can_unique | t btree | can_multi_col | t btree | can_exclude | t + btree | can_include | t btree | bogus | gin | can_order | f gin | can_unique | f gin | can_multi_col | t gin | can_exclude | f + gin | can_include | f gin | bogus | gist | can_order | f gist | can_unique | f gist | can_multi_col | t gist | can_exclude | t + gist | can_include | f gist | bogus | hash | can_order | f hash | can_unique | f hash | can_multi_col | f hash | can_exclude | t + hash | can_include | f hash | bogus | spgist | can_order | f spgist | can_unique | f spgist | can_multi_col | f spgist | can_exclude | t + spgist | can_include | f spgist | bogus | -(30 rows) +(36 rows) -- -- additional checks for pg_index_column_has_property @@ -206,3 +215,40 @@ select col, prop, pg_index_column_has_property(o, col, prop) 4 | bogus | (24 rows) +CREATE INDEX foocover ON foo (f1) INCLUDE (f2,f3); +select col, prop, pg_index_column_has_property(o, col, prop) + from (values ('foocover'::regclass)) v1(o), + (values (1,'orderable'),(2,'asc'),(3,'desc'), + (4,'nulls_first'),(5,'nulls_last'), + (6,'distance_orderable'),(7,'returnable'), + (8, 'bogus')) v2(idx,prop), + generate_series(1,3) col + order by col, idx; + col | prop | pg_index_column_has_property +-----+--------------------+------------------------------ + 1 | orderable | t + 1 | asc | t + 1 | desc | f + 1 | nulls_first | f + 1 | nulls_last | t + 1 | distance_orderable | f + 1 | returnable | t + 1 | bogus | + 2 | orderable | f + 2 | asc | + 2 | desc | + 2 | nulls_first | + 2 | nulls_last | + 2 | distance_orderable | f + 2 | returnable | t + 2 | bogus | + 3 | orderable | f + 3 | asc | + 3 | desc | + 3 | nulls_first | + 3 | nulls_last | + 3 | distance_orderable | f + 3 | returnable | t + 3 | bogus | +(24 rows) + diff --git a/src/test/regress/expected/bit.out b/src/test/regress/expected/bit.out index 9c7d202149..ef8aaea3ef 100644 --- a/src/test/regress/expected/bit.out +++ b/src/test/regress/expected/bit.out @@ -549,3 +549,26 @@ SELECT overlay(B'0101011100' placing '001' from 20); 0101011100001 (1 row) +-- This table is intentionally left around to exercise pg_dump/pg_upgrade +CREATE TABLE bit_defaults( + b1 bit(4) DEFAULT '1001', + b2 bit(4) DEFAULT B'0101', + b3 bit varying(5) DEFAULT '1001', + b4 bit varying(5) DEFAULT B'0101' +); +\d bit_defaults + Table "public.bit_defaults" + Column | Type | Collation | Nullable | Default +--------+----------------+-----------+----------+--------------------- + b1 | bit(4) | | | '1001'::"bit" + b2 | bit(4) | | | '0101'::"bit" + b3 | bit varying(5) | | | '1001'::bit varying + b4 | bit varying(5) | | | '0101'::"bit" + +INSERT INTO bit_defaults DEFAULT VALUES; +TABLE bit_defaults; + b1 | b2 | b3 | b4 +------+------+------+------ + 1001 | 0101 | 1001 | 0101 +(1 row) + diff --git a/src/test/regress/expected/boolean.out b/src/test/regress/expected/boolean.out index a6e6000c66..a812aee00c 100644 --- a/src/test/regress/expected/boolean.out +++ b/src/test/regress/expected/boolean.out @@ -465,6 +465,88 @@ FROM booltbl3 ORDER BY o; null | f | t | f | t | t | f (3 rows) +-- Test to make sure short-circuiting and NULL handling is +-- correct. Use a table as source to prevent constant simplification +-- to interfer. +CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool); +INSERT INTO booltbl4 VALUES (false, true, null); +\pset null '(null)' +-- AND expression need to return null if there's any nulls and not all +-- of the value are true +SELECT istrue AND isnul AND istrue FROM booltbl4; + ?column? +---------- + (null) +(1 row) + +SELECT istrue AND istrue AND isnul FROM booltbl4; + ?column? +---------- + (null) +(1 row) + +SELECT isnul AND istrue AND istrue FROM booltbl4; + ?column? +---------- + (null) +(1 row) + +SELECT isfalse AND isnul AND istrue FROM booltbl4; + ?column? +---------- + f +(1 row) + +SELECT istrue AND isfalse AND isnul FROM booltbl4; + ?column? +---------- + f +(1 row) + +SELECT isnul AND istrue AND isfalse FROM booltbl4; + ?column? +---------- + f +(1 row) + +-- OR expression need to return null if there's any nulls and none +-- of the value is true +SELECT isfalse OR isnul OR isfalse FROM booltbl4; + ?column? +---------- + (null) +(1 row) + +SELECT isfalse OR isfalse OR isnul FROM booltbl4; + ?column? +---------- + (null) +(1 row) + +SELECT isnul OR isfalse OR isfalse FROM booltbl4; + ?column? +---------- + (null) +(1 row) + +SELECT isfalse OR isnul OR istrue FROM booltbl4; + ?column? +---------- + t +(1 row) + +SELECT istrue OR isfalse OR isnul FROM booltbl4; + ?column? +---------- + t +(1 row) + +SELECT isnul OR istrue OR isfalse FROM booltbl4; + ?column? +---------- + t +(1 row) + -- -- Clean up -- Many tables are retained by the regression test, but these do not seem @@ -474,3 +556,4 @@ FROM booltbl3 ORDER BY o; DROP TABLE BOOLTBL1; DROP TABLE BOOLTBL2; DROP TABLE BOOLTBL3; +DROP TABLE BOOLTBL4; diff --git a/src/test/regress/expected/box.out b/src/test/regress/expected/box.out index 49af242c8c..998b52223c 100644 --- a/src/test/regress/expected/box.out +++ b/src/test/regress/expected/box.out @@ -18,6 +18,7 @@ CREATE TABLE BOX_TBL (f1 box); INSERT INTO BOX_TBL (f1) VALUES ('(2.0,2.0,0.0,0.0)'); INSERT INTO BOX_TBL (f1) VALUES ('(1.0,1.0,3.0,3.0)'); +INSERT INTO BOX_TBL (f1) VALUES ('((-8, 2), (-2, -10))'); -- degenerate cases where the box is a line or a point -- note that lines and points boxes all have zero area INSERT INTO BOX_TBL (f1) VALUES ('(2.5, 2.5, 2.5,3.5)'); @@ -27,6 +28,18 @@ INSERT INTO BOX_TBL (f1) VALUES ('(2.3, 4.5)'); ERROR: invalid input syntax for type box: "(2.3, 4.5)" LINE 1: INSERT INTO BOX_TBL (f1) VALUES ('(2.3, 4.5)'); ^ +INSERT INTO BOX_TBL (f1) VALUES ('[1, 2, 3, 4)'); +ERROR: invalid input syntax for type box: "[1, 2, 3, 4)" +LINE 1: INSERT INTO BOX_TBL (f1) VALUES ('[1, 2, 3, 4)'); + ^ +INSERT INTO BOX_TBL (f1) VALUES ('(1, 2, 3, 4]'); +ERROR: invalid input syntax for type box: "(1, 2, 3, 4]" +LINE 1: INSERT INTO BOX_TBL (f1) VALUES ('(1, 2, 3, 4]'); + ^ +INSERT INTO BOX_TBL (f1) VALUES ('(1, 2, 3, 4) x'); +ERROR: invalid input syntax for type box: "(1, 2, 3, 4) x" +LINE 1: INSERT INTO BOX_TBL (f1) VALUES ('(1, 2, 3, 4) x'); + ^ INSERT INTO BOX_TBL (f1) VALUES ('asdfasdf(ad'); ERROR: invalid input syntax for type box: "asdfasdf(ad" LINE 1: INSERT INTO BOX_TBL (f1) VALUES ('asdfasdf(ad'); @@ -36,9 +49,10 @@ SELECT '' AS four, * FROM BOX_TBL; ------+--------------------- | (2,2),(0,0) | (3,3),(1,1) + | (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) | (3,3),(3,3) -(4 rows) +(5 rows) SELECT '' AS four, b.*, area(b.f1) as barea FROM BOX_TBL b; @@ -46,9 +60,10 @@ SELECT '' AS four, b.*, area(b.f1) as barea ------+---------------------+------- | (2,2),(0,0) | 4 | (3,3),(1,1) | 4 + | (-2,2),(-8,-10) | 72 | (2.5,3.5),(2.5,2.5) | 0 | (3,3),(3,3) | 0 -(4 rows) +(5 rows) -- overlap SELECT '' AS three, b.f1 @@ -68,8 +83,9 @@ SELECT '' AS two, b1.* two | f1 -----+--------------------- | (2,2),(0,0) + | (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) -(2 rows) +(3 rows) -- right-or-overlap (x only) SELECT '' AS two, b1.* @@ -88,8 +104,9 @@ SELECT '' AS two, b.f1 two | f1 -----+--------------------- | (2,2),(0,0) + | (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) -(2 rows) +(3 rows) -- area <= SELECT '' AS four, b.f1 @@ -127,11 +144,12 @@ SELECT '' AS two, b.f1 SELECT '' AS two, b.f1 FROM BOX_TBL b -- zero area WHERE b.f1 > box '(3.5,3.0,4.5,3.0)'; - two | f1 ------+------------- + two | f1 +-----+----------------- | (2,2),(0,0) | (3,3),(1,1) -(2 rows) + | (-2,2),(-8,-10) +(3 rows) -- area >= SELECT '' AS four, b.f1 @@ -141,9 +159,10 @@ SELECT '' AS four, b.f1 ------+--------------------- | (2,2),(0,0) | (3,3),(1,1) + | (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) | (3,3),(3,3) -(4 rows) +(5 rows) -- right of SELECT '' AS two, b.f1 @@ -152,8 +171,9 @@ SELECT '' AS two, b.f1 two | f1 -----+--------------------- | (2,2),(0,0) + | (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) -(2 rows) +(3 rows) -- contained in SELECT '' AS three, b.f1 @@ -193,9 +213,10 @@ SELECT '' AS four, @@(b1.f1) AS p ------+--------- | (1,1) | (2,2) + | (-5,-4) | (2.5,3) | (3,3) -(4 rows) +(5 rows) -- wholly-contained SELECT '' AS one, b1.*, b2.* @@ -211,9 +232,10 @@ SELECT '' AS four, height(f1), width(f1) FROM BOX_TBL; ------+--------+------- | 2 | 2 | 2 | 2 + | 12 | 6 | 1 | 0 | 0 | 0 -(4 rows) +(5 rows) -- -- Test the SP-GiST index diff --git a/src/test/regress/expected/btree_index.out b/src/test/regress/expected/btree_index.out index 755cd17792..0bd48dc5a0 100644 --- a/src/test/regress/expected/btree_index.out +++ b/src/test/regress/expected/btree_index.out @@ -150,3 +150,32 @@ vacuum btree_tall_tbl; -- need to insert some rows to cause the fast root page to split. insert into btree_tall_tbl (id, t) select g, repeat('x', 100) from generate_series(1, 500) g; +-- +-- Test vacuum_cleanup_index_scale_factor +-- +-- Simple create +create table btree_test(a int); +create index btree_idx1 on btree_test(a) with (vacuum_cleanup_index_scale_factor = 40.0); +select reloptions from pg_class WHERE oid = 'btree_idx1'::regclass; + reloptions +------------------------------------------ + {vacuum_cleanup_index_scale_factor=40.0} +(1 row) + +-- Fail while setting improper values +create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = -10.0); +ERROR: value -10.0 out of bounds for option "vacuum_cleanup_index_scale_factor" +DETAIL: Valid values are between "0.000000" and "10000000000.000000". +create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = 100.0); +create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = 'string'); +ERROR: invalid value for floating point option "vacuum_cleanup_index_scale_factor": string +create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = true); +ERROR: invalid value for floating point option "vacuum_cleanup_index_scale_factor": true +-- Simple ALTER INDEX +alter index btree_idx1 set (vacuum_cleanup_index_scale_factor = 70.0); +select reloptions from pg_class WHERE oid = 'btree_idx1'::regclass; + reloptions +------------------------------------------ + {vacuum_cleanup_index_scale_factor=70.0} +(1 row) + diff --git a/src/test/regress/expected/case.out b/src/test/regress/expected/case.out index 36bf15c4ac..c0c8acf035 100644 --- a/src/test/regress/expected/case.out +++ b/src/test/regress/expected/case.out @@ -372,6 +372,20 @@ SELECT CASE make_ad(1,2) right (1 row) +ROLLBACK; +-- Test interaction of CASE with ArrayCoerceExpr (bug #15471) +BEGIN; +CREATE TYPE casetestenum AS ENUM ('e', 'f', 'g'); +SELECT + CASE 'foo'::text + WHEN 'foo' THEN ARRAY['a', 'b', 'c', 'd'] || enum_range(NULL::casetestenum)::text[] + ELSE ARRAY['x', 'y'] + END; + array +----------------- + {a,b,c,d,e,f,g} +(1 row) + ROLLBACK; -- -- Clean up diff --git a/src/test/regress/expected/circle.out b/src/test/regress/expected/circle.out index 9ba4a0495d..2ed74cc6aa 100644 --- a/src/test/regress/expected/circle.out +++ b/src/test/regress/expected/circle.out @@ -7,12 +7,22 @@ INSERT INTO CIRCLE_TBL VALUES ('<(1,2),100>'); INSERT INTO CIRCLE_TBL VALUES ('1,3,5'); INSERT INTO CIRCLE_TBL VALUES ('((1,2),3)'); INSERT INTO CIRCLE_TBL VALUES ('<(100,200),10>'); -INSERT INTO CIRCLE_TBL VALUES ('<(100,1),115>'); +INSERT INTO CIRCLE_TBL VALUES (' < ( 100 , 1 ) , 115 > '); +INSERT INTO CIRCLE_TBL VALUES ('<(3,5),0>'); -- Zero radius +INSERT INTO CIRCLE_TBL VALUES ('<(3,5),NaN>'); -- NaN radius -- bad values INSERT INTO CIRCLE_TBL VALUES ('<(-100,0),-100>'); ERROR: invalid input syntax for type circle: "<(-100,0),-100>" LINE 1: INSERT INTO CIRCLE_TBL VALUES ('<(-100,0),-100>'); ^ +INSERT INTO CIRCLE_TBL VALUES ('<(100,200),10'); +ERROR: invalid input syntax for type circle: "<(100,200),10" +LINE 1: INSERT INTO CIRCLE_TBL VALUES ('<(100,200),10'); + ^ +INSERT INTO CIRCLE_TBL VALUES ('<(100,200),10> x'); +ERROR: invalid input syntax for type circle: "<(100,200),10> x" +LINE 1: INSERT INTO CIRCLE_TBL VALUES ('<(100,200),10> x'); + ^ INSERT INTO CIRCLE_TBL VALUES ('1abc,3,5'); ERROR: invalid input syntax for type circle: "1abc,3,5" LINE 1: INSERT INTO CIRCLE_TBL VALUES ('1abc,3,5'); @@ -30,7 +40,9 @@ SELECT * FROM CIRCLE_TBL; <(1,2),3> <(100,200),10> <(100,1),115> -(6 rows) + <(3,5),0> + <(3,5),NaN> +(8 rows) SELECT '' AS six, center(f1) AS center FROM CIRCLE_TBL; @@ -42,7 +54,9 @@ SELECT '' AS six, center(f1) AS center | (1,2) | (100,200) | (100,1) -(6 rows) + | (3,5) + | (3,5) +(8 rows) SELECT '' AS six, radius(f1) AS radius FROM CIRCLE_TBL; @@ -54,7 +68,9 @@ SELECT '' AS six, radius(f1) AS radius | 3 | 10 | 115 -(6 rows) + | 0 + | NaN +(8 rows) SELECT '' AS six, diameter(f1) AS diameter FROM CIRCLE_TBL; @@ -66,14 +82,17 @@ SELECT '' AS six, diameter(f1) AS diameter | 6 | 20 | 230 -(6 rows) + | 0 + | NaN +(8 rows) SELECT '' AS two, f1 FROM CIRCLE_TBL WHERE radius(f1) < 5; two | f1 -----+----------- | <(5,1),3> | <(1,2),3> -(2 rows) + | <(3,5),0> +(3 rows) SELECT '' AS four, f1 FROM CIRCLE_TBL WHERE diameter(f1) >= 10; four | f1 @@ -82,7 +101,8 @@ SELECT '' AS four, f1 FROM CIRCLE_TBL WHERE diameter(f1) >= 10; | <(1,3),5> | <(100,200),10> | <(100,1),115> -(4 rows) + | <(3,5),NaN> +(5 rows) SELECT '' as five, c1.f1 AS one, c2.f1 AS two, (c1.f1 <-> c2.f1) AS distance FROM CIRCLE_TBL c1, CIRCLE_TBL c2 @@ -90,10 +110,13 @@ SELECT '' as five, c1.f1 AS one, c2.f1 AS two, (c1.f1 <-> c2.f1) AS distance ORDER BY distance, area(c1.f1), area(c2.f1); five | one | two | distance ------+----------------+----------------+------------------ + | <(3,5),0> | <(1,2),3> | 0.60555127546399 + | <(3,5),0> | <(5,1),3> | 1.47213595499958 | <(100,200),10> | <(100,1),115> | 74 | <(100,200),10> | <(1,2),100> | 111.370729772479 | <(1,3),5> | <(100,200),10> | 205.476756144497 | <(5,1),3> | <(100,200),10> | 207.51303816328 + | <(3,5),0> | <(100,200),10> | 207.793480159531 | <(1,2),3> | <(100,200),10> | 208.370729772479 -(5 rows) +(8 rows) diff --git a/src/test/regress/expected/cluster.out b/src/test/regress/expected/cluster.out index 097ac2b006..2bb62212ea 100644 --- a/src/test/regress/expected/cluster.out +++ b/src/test/regress/expected/cluster.out @@ -439,27 +439,21 @@ select * from clstr_temp; drop table clstr_temp; RESET SESSION AUTHORIZATION; +-- Check that partitioned tables cannot be clustered +CREATE TABLE clstrpart (a int) PARTITION BY RANGE (a); +CREATE INDEX clstrpart_idx ON clstrpart (a); +ALTER TABLE clstrpart CLUSTER ON clstrpart_idx; +ERROR: cannot mark index clustered in partitioned table +CLUSTER clstrpart USING clstrpart_idx; +ERROR: cannot cluster a partitioned table +DROP TABLE clstrpart; -- Test CLUSTER with external tuplesorting create table clstr_4 as select * from tenk1; create index cluster_sort on clstr_4 (hundred, thousand, tenthous); -- ensure we don't use the index in CLUSTER nor the checking SELECTs set enable_indexscan = off; --- Use external sort that only ever uses quicksort to sort runs: +-- Use external sort: set maintenance_work_mem = '1MB'; -set replacement_sort_tuples = 0; -cluster clstr_4 using cluster_sort; -select * from -(select hundred, lag(hundred) over () as lhundred, - thousand, lag(thousand) over () as lthousand, - tenthous, lag(tenthous) over () as ltenthous from clstr_4) ss -where row(hundred, thousand, tenthous) <= row(lhundred, lthousand, ltenthous); - hundred | lhundred | thousand | lthousand | tenthous | ltenthous ----------+----------+----------+-----------+----------+----------- -(0 rows) - --- Replacement selection will now be forced. It should only produce a single --- run, due to the fact that input is found to be presorted: -set replacement_sort_tuples = 150000; cluster clstr_4 using cluster_sort; select * from (select hundred, lag(hundred) over () as lhundred, @@ -472,7 +466,6 @@ where row(hundred, thousand, tenthous) <= row(lhundred, lthousand, ltenthous); reset enable_indexscan; reset maintenance_work_mem; -reset replacement_sort_tuples; -- clean up DROP TABLE clustertest; DROP TABLE clstr_1; diff --git a/src/test/regress/expected/collate.icu.utf8.out b/src/test/regress/expected/collate.icu.utf8.out index e1fc9984f2..f485b5c330 100644 --- a/src/test/regress/expected/collate.icu.utf8.out +++ b/src/test/regress/expected/collate.icu.utf8.out @@ -968,12 +968,12 @@ ERROR: collations are not supported by type integer LINE 1: ...ATE INDEX collate_test1_idx6 ON collate_test1 ((a COLLATE "C... ^ SELECT relname, pg_get_indexdef(oid) FROM pg_class WHERE relname LIKE 'collate_test%_idx%' ORDER BY 1; - relname | pg_get_indexdef ---------------------+----------------------------------------------------------------------------------------------------- - collate_test1_idx1 | CREATE INDEX collate_test1_idx1 ON collate_test1 USING btree (b) - collate_test1_idx2 | CREATE INDEX collate_test1_idx2 ON collate_test1 USING btree (b COLLATE "C") - collate_test1_idx3 | CREATE INDEX collate_test1_idx3 ON collate_test1 USING btree (b COLLATE "C") - collate_test1_idx4 | CREATE INDEX collate_test1_idx4 ON collate_test1 USING btree (((b || 'foo'::text)) COLLATE "POSIX") + relname | pg_get_indexdef +--------------------+------------------------------------------------------------------------------------------------------------------- + collate_test1_idx1 | CREATE INDEX collate_test1_idx1 ON collate_tests.collate_test1 USING btree (b) + collate_test1_idx2 | CREATE INDEX collate_test1_idx2 ON collate_tests.collate_test1 USING btree (b COLLATE "C") + collate_test1_idx3 | CREATE INDEX collate_test1_idx3 ON collate_tests.collate_test1 USING btree (b COLLATE "C") + collate_test1_idx4 | CREATE INDEX collate_test1_idx4 ON collate_tests.collate_test1 USING btree (((b || 'foo'::text)) COLLATE "POSIX") (4 rows) -- schema manipulation commands @@ -1056,17 +1056,17 @@ CREATE TABLE collate_dep_test4t (a int, b text); CREATE INDEX collate_dep_test4i ON collate_dep_test4t (b COLLATE test0); DROP COLLATION test0 RESTRICT; -- fail ERROR: cannot drop collation test0 because other objects depend on it -DETAIL: table collate_dep_test1 column b depends on collation test0 +DETAIL: column b of table collate_dep_test1 depends on collation test0 type collate_dep_dom1 depends on collation test0 -composite type collate_dep_test2 column y depends on collation test0 +column y of composite type collate_dep_test2 depends on collation test0 view collate_dep_test3 depends on collation test0 index collate_dep_test4i depends on collation test0 HINT: Use DROP ... CASCADE to drop the dependent objects too. DROP COLLATION test0 CASCADE; NOTICE: drop cascades to 5 other objects -DETAIL: drop cascades to table collate_dep_test1 column b +DETAIL: drop cascades to column b of table collate_dep_test1 drop cascades to type collate_dep_dom1 -drop cascades to composite type collate_dep_test2 column y +drop cascades to column y of composite type collate_dep_test2 drop cascades to view collate_dep_test3 drop cascades to index collate_dep_test4i \d collate_dep_test1 diff --git a/src/test/regress/expected/collate.linux.utf8.out b/src/test/regress/expected/collate.linux.utf8.out index 6b7318613a..400a747cdc 100644 --- a/src/test/regress/expected/collate.linux.utf8.out +++ b/src/test/regress/expected/collate.linux.utf8.out @@ -977,12 +977,12 @@ ERROR: collations are not supported by type integer LINE 1: ...ATE INDEX collate_test1_idx6 ON collate_test1 ((a COLLATE "C... ^ SELECT relname, pg_get_indexdef(oid) FROM pg_class WHERE relname LIKE 'collate_test%_idx%' ORDER BY 1; - relname | pg_get_indexdef ---------------------+----------------------------------------------------------------------------------------------------- - collate_test1_idx1 | CREATE INDEX collate_test1_idx1 ON collate_test1 USING btree (b) - collate_test1_idx2 | CREATE INDEX collate_test1_idx2 ON collate_test1 USING btree (b COLLATE "C") - collate_test1_idx3 | CREATE INDEX collate_test1_idx3 ON collate_test1 USING btree (b COLLATE "C") - collate_test1_idx4 | CREATE INDEX collate_test1_idx4 ON collate_test1 USING btree (((b || 'foo'::text)) COLLATE "POSIX") + relname | pg_get_indexdef +--------------------+------------------------------------------------------------------------------------------------------------------- + collate_test1_idx1 | CREATE INDEX collate_test1_idx1 ON collate_tests.collate_test1 USING btree (b) + collate_test1_idx2 | CREATE INDEX collate_test1_idx2 ON collate_tests.collate_test1 USING btree (b COLLATE "C") + collate_test1_idx3 | CREATE INDEX collate_test1_idx3 ON collate_tests.collate_test1 USING btree (b COLLATE "C") + collate_test1_idx4 | CREATE INDEX collate_test1_idx4 ON collate_tests.collate_test1 USING btree (((b || 'foo'::text)) COLLATE "POSIX") (4 rows) -- schema manipulation commands @@ -1073,17 +1073,17 @@ CREATE TABLE collate_dep_test4t (a int, b text); CREATE INDEX collate_dep_test4i ON collate_dep_test4t (b COLLATE test0); DROP COLLATION test0 RESTRICT; -- fail ERROR: cannot drop collation test0 because other objects depend on it -DETAIL: table collate_dep_test1 column b depends on collation test0 +DETAIL: column b of table collate_dep_test1 depends on collation test0 type collate_dep_dom1 depends on collation test0 -composite type collate_dep_test2 column y depends on collation test0 +column y of composite type collate_dep_test2 depends on collation test0 view collate_dep_test3 depends on collation test0 index collate_dep_test4i depends on collation test0 HINT: Use DROP ... CASCADE to drop the dependent objects too. DROP COLLATION test0 CASCADE; NOTICE: drop cascades to 5 other objects -DETAIL: drop cascades to table collate_dep_test1 column b +DETAIL: drop cascades to column b of table collate_dep_test1 drop cascades to type collate_dep_dom1 -drop cascades to composite type collate_dep_test2 column y +drop cascades to column y of composite type collate_dep_test2 drop cascades to view collate_dep_test3 drop cascades to index collate_dep_test4i \d collate_dep_test1 diff --git a/src/test/regress/expected/collate.out b/src/test/regress/expected/collate.out index b0025c0a87..fcbe3a5cc8 100644 --- a/src/test/regress/expected/collate.out +++ b/src/test/regress/expected/collate.out @@ -572,12 +572,12 @@ ERROR: collations are not supported by type integer LINE 1: ...ATE INDEX collate_test1_idx6 ON collate_test1 ((a COLLATE "P... ^ SELECT relname, pg_get_indexdef(oid) FROM pg_class WHERE relname LIKE 'collate_test%_idx%' ORDER BY 1; - relname | pg_get_indexdef ---------------------+----------------------------------------------------------------------------------------------------- - collate_test1_idx1 | CREATE INDEX collate_test1_idx1 ON collate_test1 USING btree (b) - collate_test1_idx2 | CREATE INDEX collate_test1_idx2 ON collate_test1 USING btree (b COLLATE "POSIX") - collate_test1_idx3 | CREATE INDEX collate_test1_idx3 ON collate_test1 USING btree (b COLLATE "POSIX") - collate_test1_idx4 | CREATE INDEX collate_test1_idx4 ON collate_test1 USING btree (((b || 'foo'::text)) COLLATE "POSIX") + relname | pg_get_indexdef +--------------------+------------------------------------------------------------------------------------------------------------------- + collate_test1_idx1 | CREATE INDEX collate_test1_idx1 ON collate_tests.collate_test1 USING btree (b) + collate_test1_idx2 | CREATE INDEX collate_test1_idx2 ON collate_tests.collate_test1 USING btree (b COLLATE "POSIX") + collate_test1_idx3 | CREATE INDEX collate_test1_idx3 ON collate_tests.collate_test1 USING btree (b COLLATE "POSIX") + collate_test1_idx4 | CREATE INDEX collate_test1_idx4 ON collate_tests.collate_test1 USING btree (((b || 'foo'::text)) COLLATE "POSIX") (4 rows) -- foreign keys @@ -631,8 +631,13 @@ DROP COLLATION mycoll1; CREATE TABLE collate_test23 (f1 text collate mycoll2); DROP COLLATION mycoll2; -- fail ERROR: cannot drop collation mycoll2 because other objects depend on it -DETAIL: table collate_test23 column f1 depends on collation mycoll2 +DETAIL: column f1 of table collate_test23 depends on collation mycoll2 HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- invalid: non-lowercase quoted identifiers +CREATE COLLATION case_coll ("Lc_Collate" = "POSIX", "Lc_Ctype" = "POSIX"); +ERROR: collation attribute "Lc_Collate" not recognized +LINE 1: CREATE COLLATION case_coll ("Lc_Collate" = "POSIX", "Lc_Ctyp... + ^ -- 9.1 bug with useless COLLATE in an expression subject to length coercion CREATE TEMP TABLE vctable (f1 varchar(25)); INSERT INTO vctable VALUES ('foo' COLLATE "C"); diff --git a/src/test/regress/expected/copy2.out b/src/test/regress/expected/copy2.out index 65e9c626b3..eb9e4b9774 100644 --- a/src/test/regress/expected/copy2.out +++ b/src/test/regress/expected/copy2.out @@ -33,7 +33,7 @@ COPY x (a, b, c, d, e, d, c) from stdin; ERROR: column "d" specified more than once -- missing data: should fail COPY x from stdin; -ERROR: invalid input syntax for integer: "" +ERROR: invalid input syntax for type integer: "" CONTEXT: COPY x, line 1, column a: "" COPY x from stdin; ERROR: missing data for column "e" @@ -521,12 +521,12 @@ RESET SESSION AUTHORIZATION; SET SESSION AUTHORIZATION regress_rls_copy_user_colperms; -- attempt all columns (should fail) COPY rls_t1 TO stdout; -ERROR: permission denied for relation rls_t1 +ERROR: permission denied for table rls_t1 COPY rls_t1 (a, b, c) TO stdout; -ERROR: permission denied for relation rls_t1 +ERROR: permission denied for table rls_t1 -- try to copy column with no privileges (should fail) COPY rls_t1 (c) TO stdout; -ERROR: permission denied for relation rls_t1 +ERROR: permission denied for table rls_t1 -- subset of columns (should succeed) COPY rls_t1 (a) TO stdout; 2 diff --git a/src/test/regress/expected/create_aggregate.out b/src/test/regress/expected/create_aggregate.out index 341ba52b8d..3d92084e13 100644 --- a/src/test/regress/expected/create_aggregate.out +++ b/src/test/regress/expected/create_aggregate.out @@ -71,7 +71,8 @@ create aggregate my_percentile_disc(float8 ORDER BY anyelement) ( stype = internal, sfunc = ordered_set_transition, finalfunc = percentile_disc_final, - finalfunc_extra = true + finalfunc_extra = true, + finalfunc_modify = read_write ); create aggregate my_rank(VARIADIC "any" ORDER BY VARIADIC "any") ( stype = internal, @@ -146,15 +147,17 @@ CREATE AGGREGATE myavg (numeric) finalfunc = numeric_avg, serialfunc = numeric_avg_serialize, deserialfunc = numeric_avg_deserialize, - combinefunc = numeric_avg_combine + combinefunc = numeric_avg_combine, + finalfunc_modify = shareable -- just to test a non-default setting ); -- Ensure all these functions made it into the catalog -SELECT aggfnoid,aggtransfn,aggcombinefn,aggtranstype,aggserialfn,aggdeserialfn +SELECT aggfnoid, aggtransfn, aggcombinefn, aggtranstype::regtype, + aggserialfn, aggdeserialfn, aggfinalmodify FROM pg_aggregate WHERE aggfnoid = 'myavg'::REGPROC; - aggfnoid | aggtransfn | aggcombinefn | aggtranstype | aggserialfn | aggdeserialfn -----------+-------------------+---------------------+--------------+-----------------------+------------------------- - myavg | numeric_avg_accum | numeric_avg_combine | 2281 | numeric_avg_serialize | numeric_avg_deserialize + aggfnoid | aggtransfn | aggcombinefn | aggtranstype | aggserialfn | aggdeserialfn | aggfinalmodify +----------+-------------------+---------------------+--------------+-----------------------+-------------------------+---------------- + myavg | numeric_avg_accum | numeric_avg_combine | internal | numeric_avg_serialize | numeric_avg_deserialize | s (1 row) DROP AGGREGATE myavg (numeric); @@ -192,3 +195,33 @@ CREATE AGGREGATE wrongreturntype (float8) minvfunc = float8mi_int ); ERROR: return type of inverse transition function float8mi_int is not double precision +-- invalid: non-lowercase quoted identifiers +CREATE AGGREGATE case_agg ( -- old syntax + "Sfunc1" = int4pl, + "Basetype" = int4, + "Stype1" = int4, + "Initcond1" = '0', + "Parallel" = safe +); +WARNING: aggregate attribute "Sfunc1" not recognized +WARNING: aggregate attribute "Basetype" not recognized +WARNING: aggregate attribute "Stype1" not recognized +WARNING: aggregate attribute "Initcond1" not recognized +WARNING: aggregate attribute "Parallel" not recognized +ERROR: aggregate stype must be specified +CREATE AGGREGATE case_agg(float8) +( + "Stype" = internal, + "Sfunc" = ordered_set_transition, + "Finalfunc" = percentile_disc_final, + "Finalfunc_extra" = true, + "Finalfunc_modify" = read_write, + "Parallel" = safe +); +WARNING: aggregate attribute "Stype" not recognized +WARNING: aggregate attribute "Sfunc" not recognized +WARNING: aggregate attribute "Finalfunc" not recognized +WARNING: aggregate attribute "Finalfunc_extra" not recognized +WARNING: aggregate attribute "Finalfunc_modify" not recognized +WARNING: aggregate attribute "Parallel" not recognized +ERROR: aggregate stype must be specified diff --git a/src/test/regress/expected/create_am.out b/src/test/regress/expected/create_am.out index 1b464aae2d..47dd885c4e 100644 --- a/src/test/regress/expected/create_am.out +++ b/src/test/regress/expected/create_am.out @@ -26,12 +26,10 @@ CREATE OPERATOR CLASS box_ops DEFAULT OPERATOR 14 @, FUNCTION 1 gist_box_consistent(internal, box, smallint, oid, internal), FUNCTION 2 gist_box_union(internal, internal), - FUNCTION 3 gist_box_compress(internal), - FUNCTION 4 gist_box_decompress(internal), + -- don't need compress, decompress, or fetch functions FUNCTION 5 gist_box_penalty(internal, internal, internal), FUNCTION 6 gist_box_picksplit(internal, internal), - FUNCTION 7 gist_box_same(box, box, internal), - FUNCTION 9 gist_box_fetch(internal); + FUNCTION 7 gist_box_same(box, box, internal); -- Create gist2 index on fast_emp4000 CREATE INDEX grect2ind2 ON fast_emp4000 USING gist2 (home_base); -- Now check the results from plain indexscan; temporarily drop existing diff --git a/src/test/regress/expected/create_function_3.out b/src/test/regress/expected/create_function_3.out index b5e19485e5..3301885fc8 100644 --- a/src/test/regress/expected/create_function_3.out +++ b/src/test/regress/expected/create_function_3.out @@ -1,13 +1,17 @@ -- -- CREATE FUNCTION -- --- sanity check of pg_proc catalog to the given parameters +-- Assorted tests using SQL-language functions -- +-- All objects made in this test are in temp_func_test schema CREATE USER regress_unpriv_user; CREATE SCHEMA temp_func_test; GRANT ALL ON SCHEMA temp_func_test TO public; SET search_path TO temp_func_test, public; -- +-- Make sanity checks on the pg_proc entries created by CREATE FUNCTION +-- +-- -- ARGUMENT and RETURN TYPES -- CREATE FUNCTION functest_A_1(text, date) RETURNS bool LANGUAGE 'sql' @@ -69,126 +73,170 @@ SELECT proname, provolatile FROM pg_proc -- -- SECURITY DEFINER | INVOKER -- -CREATE FUNCTION functext_C_1(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_C_1(int) RETURNS bool LANGUAGE 'sql' AS 'SELECT $1 > 0'; -CREATE FUNCTION functext_C_2(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_C_2(int) RETURNS bool LANGUAGE 'sql' SECURITY DEFINER AS 'SELECT $1 = 0'; -CREATE FUNCTION functext_C_3(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_C_3(int) RETURNS bool LANGUAGE 'sql' SECURITY INVOKER AS 'SELECT $1 < 0'; SELECT proname, prosecdef FROM pg_proc - WHERE oid in ('functext_C_1'::regproc, - 'functext_C_2'::regproc, - 'functext_C_3'::regproc) ORDER BY proname; + WHERE oid in ('functest_C_1'::regproc, + 'functest_C_2'::regproc, + 'functest_C_3'::regproc) ORDER BY proname; proname | prosecdef --------------+----------- - functext_c_1 | f - functext_c_2 | t - functext_c_3 | f + functest_c_1 | f + functest_c_2 | t + functest_c_3 | f (3 rows) -ALTER FUNCTION functext_C_1(int) IMMUTABLE; -- unrelated change, no effect -ALTER FUNCTION functext_C_2(int) SECURITY INVOKER; -ALTER FUNCTION functext_C_3(int) SECURITY DEFINER; +ALTER FUNCTION functest_C_1(int) IMMUTABLE; -- unrelated change, no effect +ALTER FUNCTION functest_C_2(int) SECURITY INVOKER; +ALTER FUNCTION functest_C_3(int) SECURITY DEFINER; SELECT proname, prosecdef FROM pg_proc - WHERE oid in ('functext_C_1'::regproc, - 'functext_C_2'::regproc, - 'functext_C_3'::regproc) ORDER BY proname; + WHERE oid in ('functest_C_1'::regproc, + 'functest_C_2'::regproc, + 'functest_C_3'::regproc) ORDER BY proname; proname | prosecdef --------------+----------- - functext_c_1 | f - functext_c_2 | f - functext_c_3 | t + functest_c_1 | f + functest_c_2 | f + functest_c_3 | t (3 rows) -- -- LEAKPROOF -- -CREATE FUNCTION functext_E_1(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_E_1(int) RETURNS bool LANGUAGE 'sql' AS 'SELECT $1 > 100'; -CREATE FUNCTION functext_E_2(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_E_2(int) RETURNS bool LANGUAGE 'sql' LEAKPROOF AS 'SELECT $1 > 100'; SELECT proname, proleakproof FROM pg_proc - WHERE oid in ('functext_E_1'::regproc, - 'functext_E_2'::regproc) ORDER BY proname; + WHERE oid in ('functest_E_1'::regproc, + 'functest_E_2'::regproc) ORDER BY proname; proname | proleakproof --------------+-------------- - functext_e_1 | f - functext_e_2 | t + functest_e_1 | f + functest_e_2 | t (2 rows) -ALTER FUNCTION functext_E_1(int) LEAKPROOF; -ALTER FUNCTION functext_E_2(int) STABLE; -- unrelated change, no effect +ALTER FUNCTION functest_E_1(int) LEAKPROOF; +ALTER FUNCTION functest_E_2(int) STABLE; -- unrelated change, no effect SELECT proname, proleakproof FROM pg_proc - WHERE oid in ('functext_E_1'::regproc, - 'functext_E_2'::regproc) ORDER BY proname; + WHERE oid in ('functest_E_1'::regproc, + 'functest_E_2'::regproc) ORDER BY proname; proname | proleakproof --------------+-------------- - functext_e_1 | t - functext_e_2 | t + functest_e_1 | t + functest_e_2 | t (2 rows) -ALTER FUNCTION functext_E_2(int) NOT LEAKPROOF; -- remove leakproog attribute +ALTER FUNCTION functest_E_2(int) NOT LEAKPROOF; -- remove leakproof attribute SELECT proname, proleakproof FROM pg_proc - WHERE oid in ('functext_E_1'::regproc, - 'functext_E_2'::regproc) ORDER BY proname; + WHERE oid in ('functest_E_1'::regproc, + 'functest_E_2'::regproc) ORDER BY proname; proname | proleakproof --------------+-------------- - functext_e_1 | t - functext_e_2 | f + functest_e_1 | t + functest_e_2 | f (2 rows) --- it takes superuser privilege to turn on leakproof, but not for turn off -ALTER FUNCTION functext_E_1(int) OWNER TO regress_unpriv_user; -ALTER FUNCTION functext_E_2(int) OWNER TO regress_unpriv_user; +-- it takes superuser privilege to turn on leakproof, but not to turn off +ALTER FUNCTION functest_E_1(int) OWNER TO regress_unpriv_user; +ALTER FUNCTION functest_E_2(int) OWNER TO regress_unpriv_user; SET SESSION AUTHORIZATION regress_unpriv_user; SET search_path TO temp_func_test, public; -ALTER FUNCTION functext_E_1(int) NOT LEAKPROOF; -ALTER FUNCTION functext_E_2(int) LEAKPROOF; +ALTER FUNCTION functest_E_1(int) NOT LEAKPROOF; +ALTER FUNCTION functest_E_2(int) LEAKPROOF; ERROR: only superuser can define a leakproof function -CREATE FUNCTION functext_E_3(int) RETURNS bool LANGUAGE 'sql' - LEAKPROOF AS 'SELECT $1 < 200'; -- failed +CREATE FUNCTION functest_E_3(int) RETURNS bool LANGUAGE 'sql' + LEAKPROOF AS 'SELECT $1 < 200'; -- fail ERROR: only superuser can define a leakproof function RESET SESSION AUTHORIZATION; -- -- CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT -- -CREATE FUNCTION functext_F_1(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_F_1(int) RETURNS bool LANGUAGE 'sql' AS 'SELECT $1 > 50'; -CREATE FUNCTION functext_F_2(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_F_2(int) RETURNS bool LANGUAGE 'sql' CALLED ON NULL INPUT AS 'SELECT $1 = 50'; -CREATE FUNCTION functext_F_3(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_F_3(int) RETURNS bool LANGUAGE 'sql' RETURNS NULL ON NULL INPUT AS 'SELECT $1 < 50'; -CREATE FUNCTION functext_F_4(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_F_4(int) RETURNS bool LANGUAGE 'sql' STRICT AS 'SELECT $1 = 50'; SELECT proname, proisstrict FROM pg_proc - WHERE oid in ('functext_F_1'::regproc, - 'functext_F_2'::regproc, - 'functext_F_3'::regproc, - 'functext_F_4'::regproc) ORDER BY proname; + WHERE oid in ('functest_F_1'::regproc, + 'functest_F_2'::regproc, + 'functest_F_3'::regproc, + 'functest_F_4'::regproc) ORDER BY proname; proname | proisstrict --------------+------------- - functext_f_1 | f - functext_f_2 | f - functext_f_3 | t - functext_f_4 | t + functest_f_1 | f + functest_f_2 | f + functest_f_3 | t + functest_f_4 | t (4 rows) -ALTER FUNCTION functext_F_1(int) IMMUTABLE; -- unrelated change, no effect -ALTER FUNCTION functext_F_2(int) STRICT; -ALTER FUNCTION functext_F_3(int) CALLED ON NULL INPUT; +ALTER FUNCTION functest_F_1(int) IMMUTABLE; -- unrelated change, no effect +ALTER FUNCTION functest_F_2(int) STRICT; +ALTER FUNCTION functest_F_3(int) CALLED ON NULL INPUT; SELECT proname, proisstrict FROM pg_proc - WHERE oid in ('functext_F_1'::regproc, - 'functext_F_2'::regproc, - 'functext_F_3'::regproc, - 'functext_F_4'::regproc) ORDER BY proname; + WHERE oid in ('functest_F_1'::regproc, + 'functest_F_2'::regproc, + 'functest_F_3'::regproc, + 'functest_F_4'::regproc) ORDER BY proname; proname | proisstrict --------------+------------- - functext_f_1 | f - functext_f_2 | t - functext_f_3 | f - functext_f_4 | t + functest_f_1 | f + functest_f_2 | t + functest_f_3 | f + functest_f_4 | t (4 rows) +-- pg_get_functiondef tests +SELECT pg_get_functiondef('functest_A_1'::regproc); + pg_get_functiondef +-------------------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_a_1(text, date)+ + RETURNS boolean + + LANGUAGE sql + + AS $function$SELECT $1 = 'abcd' AND $2 > '2001-01-01'$function$ + + +(1 row) + +SELECT pg_get_functiondef('functest_B_3'::regproc); + pg_get_functiondef +----------------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_b_3(integer)+ + RETURNS boolean + + LANGUAGE sql + + STABLE + + AS $function$SELECT $1 = 0$function$ + + +(1 row) + +SELECT pg_get_functiondef('functest_C_3'::regproc); + pg_get_functiondef +----------------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_c_3(integer)+ + RETURNS boolean + + LANGUAGE sql + + SECURITY DEFINER + + AS $function$SELECT $1 < 0$function$ + + +(1 row) + +SELECT pg_get_functiondef('functest_F_2'::regproc); + pg_get_functiondef +----------------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_f_2(integer)+ + RETURNS boolean + + LANGUAGE sql + + STRICT + + AS $function$SELECT $1 = 50$function$ + + +(1 row) + -- information_schema tests CREATE FUNCTION functest_IS_1(a int, b int default 1, c text default 'foo') RETURNS int @@ -227,24 +275,75 @@ ERROR: could not find a function named "functest_b_1" DROP FUNCTION functest_b_2; -- error, ambiguous ERROR: function name "functest_b_2" is not unique HINT: Specify the argument list to select the function unambiguously. --- Cleanups +-- CREATE OR REPLACE tests +CREATE FUNCTION functest1(a int) RETURNS int LANGUAGE SQL AS 'SELECT $1'; +CREATE OR REPLACE FUNCTION functest1(a int) RETURNS int LANGUAGE SQL WINDOW AS 'SELECT $1'; +ERROR: cannot change routine kind +DETAIL: "functest1" is a function. +CREATE OR REPLACE PROCEDURE functest1(a int) LANGUAGE SQL AS 'SELECT $1'; +ERROR: cannot change routine kind +DETAIL: "functest1" is a function. +DROP FUNCTION functest1(a int); +-- Check behavior of VOID-returning SQL functions +CREATE FUNCTION voidtest1(a int) RETURNS VOID LANGUAGE SQL AS +$$ SELECT a + 1 $$; +SELECT voidtest1(42); + voidtest1 +----------- + +(1 row) + +CREATE FUNCTION voidtest2(a int, b int) RETURNS VOID LANGUAGE SQL AS +$$ SELECT voidtest1(a + b) $$; +SELECT voidtest2(11,22); + voidtest2 +----------- + +(1 row) + +-- currently, we can inline voidtest2 but not voidtest1 +EXPLAIN (verbose, costs off) SELECT voidtest2(11,22); + QUERY PLAN +------------------------- + Result + Output: voidtest1(33) +(2 rows) + +CREATE TEMP TABLE sometable(f1 int); +CREATE FUNCTION voidtest3(a int) RETURNS VOID LANGUAGE SQL AS +$$ INSERT INTO sometable VALUES(a + 1) $$; +SELECT voidtest3(17); + voidtest3 +----------- + +(1 row) + +CREATE FUNCTION voidtest4(a int) RETURNS VOID LANGUAGE SQL AS +$$ INSERT INTO sometable VALUES(a - 1) RETURNING f1 $$; +SELECT voidtest4(39); + voidtest4 +----------- + +(1 row) + +TABLE sometable; + f1 +---- + 18 + 38 +(2 rows) + +CREATE FUNCTION voidtest5(a int) RETURNS SETOF VOID LANGUAGE SQL AS +$$ SELECT generate_series(1, a) $$ STABLE; +SELECT * FROM voidtest5(3); + voidtest5 +----------- +(0 rows) + +-- Cleanup +\set VERBOSITY terse \\ -- suppress cascade details DROP SCHEMA temp_func_test CASCADE; -NOTICE: drop cascades to 16 other objects -DETAIL: drop cascades to function functest_a_1(text,date) -drop cascades to function functest_a_2(text[]) -drop cascades to function functest_a_3() -drop cascades to function functest_b_2(integer) -drop cascades to function functest_b_3(integer) -drop cascades to function functest_b_4(integer) -drop cascades to function functext_c_1(integer) -drop cascades to function functext_c_2(integer) -drop cascades to function functext_c_3(integer) -drop cascades to function functext_e_1(integer) -drop cascades to function functext_e_2(integer) -drop cascades to function functext_f_1(integer) -drop cascades to function functext_f_2(integer) -drop cascades to function functext_f_3(integer) -drop cascades to function functext_f_4(integer) -drop cascades to function functest_b_2(bigint) +NOTICE: drop cascades to 21 other objects +\set VERBOSITY default DROP USER regress_unpriv_user; RESET search_path; diff --git a/src/test/regress/expected/create_index.out b/src/test/regress/expected/create_index.out index 064adb4640..297535bb8f 100644 --- a/src/test/regress/expected/create_index.out +++ b/src/test/regress/expected/create_index.out @@ -157,7 +157,7 @@ SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; SELECT count(*) FROM point_tbl WHERE f1 <@ polygon '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; count ------- - 3 + 5 (1 row) SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; @@ -175,7 +175,7 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; count ------- - 2 + 3 (1 row) SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, 0.0)'; @@ -187,7 +187,7 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 <^ '(0.0, 0.0)'; SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; count ------- - 3 + 4 (1 row) SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; @@ -197,16 +197,19 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; (1 row) SELECT * FROM point_tbl ORDER BY f1 <-> '0,1'; - f1 ------------- + f1 +------------------- (0,0) + (1e-300,-1e-300) (-3,4) (-10,0) (10,10) (-5,-12) (5.1,34.5) + (1e+300,Infinity) + (NaN,NaN) -(7 rows) +(10 rows) SELECT * FROM point_tbl WHERE f1 IS NULL; f1 @@ -215,24 +218,28 @@ SELECT * FROM point_tbl WHERE f1 IS NULL; (1 row) SELECT * FROM point_tbl WHERE f1 IS NOT NULL ORDER BY f1 <-> '0,1'; - f1 ------------- + f1 +------------------- + (1e-300,-1e-300) (0,0) (-3,4) (-10,0) (10,10) (-5,-12) (5.1,34.5) -(6 rows) + (1e+300,Infinity) + (NaN,NaN) +(9 rows) SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1'; - f1 ---------- + f1 +------------------ (0,0) + (1e-300,-1e-300) (-3,4) (-10,0) (10,10) -(4 rows) +(5 rows) SELECT count(*) FROM quad_point_tbl WHERE p IS NULL; count @@ -294,6 +301,15 @@ SELECT count(*) FROM quad_point_tbl WHERE p ~= '(4585, 365)'; 1 (1 row) +CREATE TEMP TABLE quad_point_tbl_ord_seq1 AS +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl; +CREATE TEMP TABLE quad_point_tbl_ord_seq2 AS +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; +CREATE TEMP TABLE quad_point_tbl_ord_seq3 AS +SELECT rank() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM quad_point_tbl WHERE p IS NOT NULL; SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdef'; count ------- @@ -372,6 +388,12 @@ SELECT count(*) FROM radix_text_tbl WHERE t ~>~ 'Worth 48 (1 row) +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; + count +------- + 2 +(1 row) + SELECT * FROM gpolygon_tbl ORDER BY f1 <-> '(0,0)'::point LIMIT 10; f1 ------------------------------------------------- @@ -568,7 +590,7 @@ SELECT count(*) FROM point_tbl WHERE f1 <@ polygon '(0,0),(0,100),(100,100),(50, SELECT count(*) FROM point_tbl WHERE f1 <@ polygon '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; count ------- - 3 + 4 (1 row) EXPLAIN (COSTS OFF) @@ -613,7 +635,7 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; count ------- - 2 + 3 (1 row) EXPLAIN (COSTS OFF) @@ -643,7 +665,7 @@ SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; SELECT count(*) FROM point_tbl p WHERE p.f1 >^ '(0.0, 0.0)'; count ------- - 3 + 4 (1 row) EXPLAIN (COSTS OFF) @@ -670,16 +692,19 @@ SELECT * FROM point_tbl ORDER BY f1 <-> '0,1'; (2 rows) SELECT * FROM point_tbl ORDER BY f1 <-> '0,1'; - f1 ------------- + f1 +------------------- + (10,10) + (NaN,NaN) (0,0) + (1e-300,-1e-300) (-3,4) (-10,0) - (10,10) (-5,-12) (5.1,34.5) -(7 rows) + (1e+300,Infinity) +(10 rows) EXPLAIN (COSTS OFF) SELECT * FROM point_tbl WHERE f1 IS NULL; @@ -705,15 +730,18 @@ SELECT * FROM point_tbl WHERE f1 IS NOT NULL ORDER BY f1 <-> '0,1'; (3 rows) SELECT * FROM point_tbl WHERE f1 IS NOT NULL ORDER BY f1 <-> '0,1'; - f1 ------------- + f1 +------------------- + (10,10) + (NaN,NaN) (0,0) + (1e-300,-1e-300) (-3,4) (-10,0) - (10,10) (-5,-12) (5.1,34.5) -(6 rows) + (1e+300,Infinity) +(9 rows) EXPLAIN (COSTS OFF) SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1'; @@ -725,13 +753,14 @@ SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0 (3 rows) SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1'; - f1 ---------- + f1 +------------------ (0,0) + (1e-300,-1e-300) (-3,4) (-10,0) (10,10) -(4 rows) +(5 rows) EXPLAIN (COSTS OFF) SELECT count(*) FROM quad_point_tbl WHERE p IS NULL; @@ -882,6 +911,71 @@ SELECT count(*) FROM quad_point_tbl WHERE p ~= '(4585, 365)'; 1 (1 row) +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl; + QUERY PLAN +----------------------------------------------------------- + WindowAgg + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Order By: (p <-> '(0,0)'::point) +(3 rows) + +CREATE TEMP TABLE quad_point_tbl_ord_idx1 AS +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl; +SELECT * FROM quad_point_tbl_ord_seq1 seq FULL JOIN quad_point_tbl_ord_idx1 idx +ON seq.n = idx.n +AND (seq.dist = idx.dist AND seq.p ~= idx.p OR seq.p IS NULL AND idx.p IS NULL) +WHERE seq.n IS NULL OR idx.n IS NULL; + n | dist | p | n | dist | p +---+------+---+---+------+--- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + QUERY PLAN +----------------------------------------------------------- + WindowAgg + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Index Cond: (p <@ '(1000,1000),(200,200)'::box) + Order By: (p <-> '(0,0)'::point) +(4 rows) + +CREATE TEMP TABLE quad_point_tbl_ord_idx2 AS +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; +SELECT * FROM quad_point_tbl_ord_seq2 seq FULL JOIN quad_point_tbl_ord_idx2 idx +ON seq.n = idx.n +AND (seq.dist = idx.dist AND seq.p ~= idx.p OR seq.p IS NULL AND idx.p IS NULL) +WHERE seq.n IS NULL OR idx.n IS NULL; + n | dist | p | n | dist | p +---+------+---+---+------+--- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM quad_point_tbl WHERE p IS NOT NULL; + QUERY PLAN +----------------------------------------------------------- + WindowAgg + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Index Cond: (p IS NOT NULL) + Order By: (p <-> '(333,400)'::point) +(4 rows) + +CREATE TEMP TABLE quad_point_tbl_ord_idx3 AS +SELECT rank() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM quad_point_tbl WHERE p IS NOT NULL; +SELECT * FROM quad_point_tbl_ord_seq3 seq FULL JOIN quad_point_tbl_ord_idx3 idx +ON seq.n = idx.n +AND (seq.dist = idx.dist AND seq.p ~= idx.p OR seq.p IS NULL AND idx.p IS NULL) +WHERE seq.n IS NULL OR idx.n IS NULL; + n | dist | p | n | dist | p +---+------+---+---+------+--- +(0 rows) + EXPLAIN (COSTS OFF) SELECT count(*) FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; QUERY PLAN @@ -987,6 +1081,71 @@ SELECT count(*) FROM kd_point_tbl WHERE p ~= '(4585, 365)'; 1 (1 row) +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM kd_point_tbl; + QUERY PLAN +------------------------------------------------------- + WindowAgg + -> Index Only Scan using sp_kd_ind on kd_point_tbl + Order By: (p <-> '(0,0)'::point) +(3 rows) + +CREATE TEMP TABLE kd_point_tbl_ord_idx1 AS +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM kd_point_tbl; +SELECT * FROM quad_point_tbl_ord_seq1 seq FULL JOIN kd_point_tbl_ord_idx1 idx +ON seq.n = idx.n AND +(seq.dist = idx.dist AND seq.p ~= idx.p OR seq.p IS NULL AND idx.p IS NULL) +WHERE seq.n IS NULL OR idx.n IS NULL; + n | dist | p | n | dist | p +---+------+---+---+------+--- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + QUERY PLAN +--------------------------------------------------------- + WindowAgg + -> Index Only Scan using sp_kd_ind on kd_point_tbl + Index Cond: (p <@ '(1000,1000),(200,200)'::box) + Order By: (p <-> '(0,0)'::point) +(4 rows) + +CREATE TEMP TABLE kd_point_tbl_ord_idx2 AS +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; +SELECT * FROM quad_point_tbl_ord_seq2 seq FULL JOIN kd_point_tbl_ord_idx2 idx +ON seq.n = idx.n AND +(seq.dist = idx.dist AND seq.p ~= idx.p OR seq.p IS NULL AND idx.p IS NULL) +WHERE seq.n IS NULL OR idx.n IS NULL; + n | dist | p | n | dist | p +---+------+---+---+------+--- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM kd_point_tbl WHERE p IS NOT NULL; + QUERY PLAN +------------------------------------------------------- + WindowAgg + -> Index Only Scan using sp_kd_ind on kd_point_tbl + Index Cond: (p IS NOT NULL) + Order By: (p <-> '(333,400)'::point) +(4 rows) + +CREATE TEMP TABLE kd_point_tbl_ord_idx3 AS +SELECT rank() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM kd_point_tbl WHERE p IS NOT NULL; +SELECT * FROM quad_point_tbl_ord_seq3 seq FULL JOIN kd_point_tbl_ord_idx3 idx +ON seq.n = idx.n AND +(seq.dist = idx.dist AND seq.p ~= idx.p OR seq.p IS NULL AND idx.p IS NULL) +WHERE seq.n IS NULL OR idx.n IS NULL; + n | dist | p | n | dist | p +---+------+---+---+------+--- +(0 rows) + EXPLAIN (COSTS OFF) SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdef'; QUERY PLAN @@ -1182,6 +1341,21 @@ SELECT count(*) FROM radix_text_tbl WHERE t ~>~ 'Worth 48 (1 row) +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; + QUERY PLAN +------------------------------------------------------------ + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t ^@ 'Worth'::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; + count +------- + 2 +(1 row) + EXPLAIN (COSTS OFF) SELECT * FROM gpolygon_tbl ORDER BY f1 <-> '(0,0)'::point LIMIT 10; QUERY PLAN @@ -1247,13 +1421,14 @@ SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0 (6 rows) SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1'; - f1 ---------- + f1 +------------------ (0,0) + (1e-300,-1e-300) (-3,4) (-10,0) (10,10) -(4 rows) +(5 rows) EXPLAIN (COSTS OFF) SELECT count(*) FROM quad_point_tbl WHERE p IS NULL; @@ -1763,6 +1938,23 @@ SELECT count(*) FROM radix_text_tbl WHERE t ~>~ 'Worth 48 (1 row) +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; + QUERY PLAN +------------------------------------------------ + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t ^@ 'Worth'::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t ^@ 'Worth'::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; + count +------- + 2 +(1 row) + RESET enable_seqscan; RESET enable_indexscan; RESET enable_bitmapscan; @@ -2324,10 +2516,10 @@ DROP TABLE array_gin_test; CREATE INDEX gin_relopts_test ON array_index_op_test USING gin (i) WITH (FASTUPDATE=on, GIN_PENDING_LIST_LIMIT=128); \d+ gin_relopts_test - Index "public.gin_relopts_test" - Column | Type | Definition | Storage ---------+---------+------------+--------- - i | integer | i | plain + Index "public.gin_relopts_test" + Column | Type | Key? | Definition | Storage | Stats target +--------+---------+------+------------+---------+-------------- + i | integer | yes | i | plain | gin, for table "public.array_index_op_test" Options: fastupdate=on, gin_pending_list_limit=128 @@ -2337,7 +2529,7 @@ Options: fastupdate=on, gin_pending_list_limit=128 CREATE INDEX hash_i4_index ON hash_i4_heap USING hash (random int4_ops); CREATE INDEX hash_name_index ON hash_name_heap USING hash (random name_ops); CREATE INDEX hash_txt_index ON hash_txt_heap USING hash (random text_ops); -CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops); +CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops) WITH (fillfactor=60); CREATE UNLOGGED TABLE unlogged_hash_table (id int4); CREATE INDEX unlogged_hash_index ON unlogged_hash_table USING hash (id int4_ops); DROP TABLE unlogged_hash_table; @@ -2395,6 +2587,25 @@ DETAIL: Key ((f1 || f2))=(ABCDEF) already exists. -- but this shouldn't: INSERT INTO func_index_heap VALUES('QWERTY'); -- +-- Test unique index with included columns +-- +CREATE TABLE covering_index_heap (f1 int, f2 int, f3 text); +CREATE UNIQUE INDEX covering_index_index on covering_index_heap (f1,f2) INCLUDE(f3); +INSERT INTO covering_index_heap VALUES(1,1,'AAA'); +INSERT INTO covering_index_heap VALUES(1,2,'AAA'); +-- this should fail because of unique index on f1,f2: +INSERT INTO covering_index_heap VALUES(1,2,'BBB'); +ERROR: duplicate key value violates unique constraint "covering_index_index" +DETAIL: Key (f1, f2)=(1, 2) already exists. +-- and this shouldn't: +INSERT INTO covering_index_heap VALUES(1,4,'AAA'); +-- Try to build index on table that already contains data +CREATE UNIQUE INDEX covering_pkey on covering_index_heap (f1,f2) INCLUDE(f3); +-- Try to use existing covering index as primary key +ALTER TABLE covering_index_heap ADD CONSTRAINT covering_pkey PRIMARY KEY USING INDEX +covering_pkey; +DROP TABLE covering_index_heap; +-- -- Also try building functional, expressional, and partial indexes on -- tables that already contain data. -- @@ -2525,11 +2736,11 @@ Indexes: "cwi_uniq_idx" PRIMARY KEY, btree (a, b) \d cwi_uniq_idx - Index "public.cwi_uniq_idx" - Column | Type | Definition ---------+-----------------------+------------ - a | integer | a - b | character varying(10) | b + Index "public.cwi_uniq_idx" + Column | Type | Key? | Definition +--------+-----------------------+------+------------ + a | integer | yes | a + b | character varying(10) | yes | b primary key, btree, for table "public.cwi_test" CREATE UNIQUE INDEX cwi_uniq2_idx ON cwi_test(b , a); @@ -2548,17 +2759,23 @@ Indexes: "cwi_replaced_pkey" PRIMARY KEY, btree (b, a) \d cwi_replaced_pkey - Index "public.cwi_replaced_pkey" - Column | Type | Definition ---------+-----------------------+------------ - b | character varying(10) | b - a | integer | a + Index "public.cwi_replaced_pkey" + Column | Type | Key? | Definition +--------+-----------------------+------+------------ + b | character varying(10) | yes | b + a | integer | yes | a primary key, btree, for table "public.cwi_test" DROP INDEX cwi_replaced_pkey; -- Should fail; a constraint depends on it ERROR: cannot drop index cwi_replaced_pkey because constraint cwi_replaced_pkey on table cwi_test requires it HINT: You can drop constraint cwi_replaced_pkey on table cwi_test instead. DROP TABLE cwi_test; +-- ADD CONSTRAINT USING INDEX is forbidden on partitioned tables +CREATE TABLE cwi_test(a int) PARTITION BY hash (a); +create unique index on cwi_test (a); +alter table cwi_test add primary key using index cwi_test_a_idx ; +ERROR: ALTER TABLE / ADD CONSTRAINT USING INDEX is not supported on partitioned tables +DROP TABLE cwi_test; -- -- Check handling of indexes on system columns -- @@ -2989,6 +3206,16 @@ explain (costs off) Filter: (NOT b) (4 rows) +-- +-- Test for multilevel page deletion +-- +CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint); +INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM generate_series(1,80000) i; +ALTER TABLE delete_test_table ADD PRIMARY KEY (a,b,c,d); +DELETE FROM delete_test_table WHERE a > 40000; +VACUUM delete_test_table; +DELETE FROM delete_test_table WHERE a > 10; +VACUUM delete_test_table; -- -- REINDEX (VERBOSE) -- diff --git a/src/test/regress/expected/create_misc.out b/src/test/regress/expected/create_misc.out index 45125fedfd..8366841ff0 100644 --- a/src/test/regress/expected/create_misc.out +++ b/src/test/regress/expected/create_misc.out @@ -24,6 +24,16 @@ INSERT INTO equipment_r (name, hobby) VALUES ('advil', 'posthacking'); INSERT INTO equipment_r (name, hobby) VALUES ('peet''s coffee', 'posthacking'); INSERT INTO equipment_r (name, hobby) VALUES ('hightops', 'basketball'); INSERT INTO equipment_r (name, hobby) VALUES ('guts', 'skywalking'); +INSERT INTO city VALUES +('Podunk', '(1,2),(3,4)', '100,127,1000'), +('Gotham', '(1000,34),(1100,334)', '123456,127,-1000,6789'); +TABLE city; + name | location | budget +--------+----------------------+----------------------- + Podunk | (3,4),(1,2) | 100,127,1000,0 + Gotham | (1100,334),(1000,34) | 123456,127,-1000,6789 +(2 rows) + SELECT * INTO TABLE ramp FROM road diff --git a/src/test/regress/expected/create_operator.out b/src/test/regress/expected/create_operator.out index 3a216c2ca8..54e8b79159 100644 --- a/src/test/regress/expected/create_operator.out +++ b/src/test/regress/expected/create_operator.out @@ -4,7 +4,7 @@ CREATE OPERATOR ## ( leftarg = path, rightarg = path, - procedure = path_inter, + function = path_inter, commutator = ## ); CREATE OPERATOR <% ( @@ -26,6 +26,14 @@ CREATE OPERATOR #%# ( leftarg = int8, -- right unary procedure = numeric_fac ); +-- Test operator created above +SELECT point '(1,2)' <% widget '(0,0,3)' AS t, + point '(1,2)' <% widget '(0,0,1)' AS f; + t | f +---+--- + t | f +(1 row) + -- Test comments COMMENT ON OPERATOR ###### (int4, NONE) IS 'bad right unary'; ERROR: operator does not exist: integer ###### @@ -37,6 +45,80 @@ CREATE OPERATOR => ( ERROR: syntax error at or near "=>" LINE 1: CREATE OPERATOR => ( ^ +-- lexing of <=, >=, <>, != has a number of edge cases +-- (=> is tested elsewhere) +-- this is legal because ! is not allowed in sql ops +CREATE OPERATOR !=- ( + leftarg = int8, -- right unary + procedure = numeric_fac +); +SELECT 2 !=-; + ?column? +---------- + 2 +(1 row) + +-- make sure lexer returns != as <> even in edge cases +SELECT 2 !=/**/ 1, 2 !=/**/ 2; + ?column? | ?column? +----------+---------- + t | f +(1 row) + +SELECT 2 !=-- comment to be removed by psql + 1; + ?column? +---------- + t +(1 row) + +DO $$ -- use DO to protect -- from psql + declare r boolean; + begin + execute $e$ select 2 !=-- comment + 1 $e$ into r; + raise info 'r = %', r; + end; +$$; +INFO: r = t +-- check that <= etc. followed by more operator characters are returned +-- as the correct token with correct precedence +SELECT true<>-1 BETWEEN 1 AND 1; -- BETWEEN has prec. above <> but below Op + ?column? +---------- + t +(1 row) + +SELECT false<>/**/1 BETWEEN 1 AND 1; + ?column? +---------- + t +(1 row) + +SELECT false<=-1 BETWEEN 1 AND 1; + ?column? +---------- + t +(1 row) + +SELECT false>=-1 BETWEEN 1 AND 1; + ?column? +---------- + t +(1 row) + +SELECT 2<=/**/3, 3>=/**/2, 2<>/**/3; + ?column? | ?column? | ?column? +----------+----------+---------- + t | t | t +(1 row) + +SELECT 3<=/**/2, 2>=/**/3, 2<>/**/2; + ?column? | ?column? | ?column? +----------+----------+---------- + f | f | f +(1 row) + -- Should fail. CREATE OPERATOR requires USAGE on SCHEMA BEGIN TRANSACTION; CREATE ROLE regress_rol_op1; @@ -99,7 +181,7 @@ ERROR: at least one of leftarg or rightarg must be specified CREATE OPERATOR #@%# ( leftarg = int8 ); -ERROR: operator procedure must be specified +ERROR: operator function must be specified -- Should fail. CREATE OPERATOR requires USAGE on TYPE BEGIN TRANSACTION; CREATE ROLE regress_rol_op3; @@ -172,3 +254,26 @@ CREATE OPERATOR #*# ( ); ERROR: permission denied for type type_op6 ROLLBACK; +-- invalid: non-lowercase quoted identifiers +CREATE OPERATOR === +( + "Leftarg" = box, + "Rightarg" = box, + "Procedure" = area_equal_function, + "Commutator" = ===, + "Negator" = !==, + "Restrict" = area_restriction_function, + "Join" = area_join_function, + "Hashes", + "Merges" +); +WARNING: operator attribute "Leftarg" not recognized +WARNING: operator attribute "Rightarg" not recognized +WARNING: operator attribute "Procedure" not recognized +WARNING: operator attribute "Commutator" not recognized +WARNING: operator attribute "Negator" not recognized +WARNING: operator attribute "Restrict" not recognized +WARNING: operator attribute "Join" not recognized +WARNING: operator attribute "Hashes" not recognized +WARNING: operator attribute "Merges" not recognized +ERROR: operator function must be specified diff --git a/src/test/regress/expected/create_procedure.out b/src/test/regress/expected/create_procedure.out new file mode 100644 index 0000000000..5b9b83839c --- /dev/null +++ b/src/test/regress/expected/create_procedure.out @@ -0,0 +1,202 @@ +CALL nonexistent(); -- error +ERROR: procedure nonexistent() does not exist +LINE 1: CALL nonexistent(); + ^ +HINT: No procedure matches the given name and argument types. You might need to add explicit type casts. +CALL random(); -- error +ERROR: random() is not a procedure +LINE 1: CALL random(); + ^ +HINT: To call a function, use SELECT. +CREATE FUNCTION cp_testfunc1(a int) RETURNS int LANGUAGE SQL AS $$ SELECT a $$; +CREATE TABLE cp_test (a int, b text); +CREATE PROCEDURE ptest1(x text) +LANGUAGE SQL +AS $$ +INSERT INTO cp_test VALUES (1, x); +$$; +\df ptest1 + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+--------+------------------+---------------------+------ + public | ptest1 | | x text | proc +(1 row) + +SELECT pg_get_functiondef('ptest1'::regproc); + pg_get_functiondef +--------------------------------------------------- + CREATE OR REPLACE PROCEDURE public.ptest1(x text)+ + LANGUAGE sql + + AS $procedure$ + + INSERT INTO cp_test VALUES (1, x); + + $procedure$ + + +(1 row) + +-- show only normal functions +\dfn public.*test*1 + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+--------------+------------------+---------------------+------ + public | cp_testfunc1 | integer | a integer | func +(1 row) + +-- show only procedures +\dfp public.*test*1 + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+--------+------------------+---------------------+------ + public | ptest1 | | x text | proc +(1 row) + +SELECT ptest1('x'); -- error +ERROR: ptest1(unknown) is a procedure +LINE 1: SELECT ptest1('x'); + ^ +HINT: To call a procedure, use CALL. +CALL ptest1('a'); -- ok +CALL ptest1('xy' || 'zzy'); -- ok, constant-folded arg +CALL ptest1(substring(random()::numeric(20,15)::text, 1, 1)); -- ok, volatile arg +SELECT * FROM cp_test ORDER BY b COLLATE "C"; + a | b +---+------- + 1 | 0 + 1 | a + 1 | xyzzy +(3 rows) + +CREATE PROCEDURE ptest2() +LANGUAGE SQL +AS $$ +SELECT 5; +$$; +CALL ptest2(); +-- nested CALL +TRUNCATE cp_test; +CREATE PROCEDURE ptest3(y text) +LANGUAGE SQL +AS $$ +CALL ptest1(y); +CALL ptest1($1); +$$; +CALL ptest3('b'); +SELECT * FROM cp_test; + a | b +---+--- + 1 | b + 1 | b +(2 rows) + +-- output arguments +CREATE PROCEDURE ptest4a(INOUT a int, INOUT b int) +LANGUAGE SQL +AS $$ +SELECT 1, 2; +$$; +CALL ptest4a(NULL, NULL); + a | b +---+--- + 1 | 2 +(1 row) + +CREATE PROCEDURE ptest4b(INOUT b int, INOUT a int) +LANGUAGE SQL +AS $$ +CALL ptest4a(a, b); -- error, not supported +$$; +ERROR: calling procedures with output arguments is not supported in SQL functions +CONTEXT: SQL function "ptest4b" +DROP PROCEDURE ptest4a; +-- named and default parameters +CREATE OR REPLACE PROCEDURE ptest5(a int, b text, c int default 100) +LANGUAGE SQL +AS $$ +INSERT INTO cp_test VALUES(a, b); +INSERT INTO cp_test VALUES(c, b); +$$; +TRUNCATE cp_test; +CALL ptest5(10, 'Hello', 20); +CALL ptest5(10, 'Hello'); +CALL ptest5(10, b => 'Hello'); +CALL ptest5(b => 'Hello', a => 10); +SELECT * FROM cp_test; + a | b +-----+------- + 10 | Hello + 20 | Hello + 10 | Hello + 100 | Hello + 10 | Hello + 100 | Hello + 10 | Hello + 100 | Hello +(8 rows) + +-- polymorphic types +CREATE PROCEDURE ptest6(a int, b anyelement) +LANGUAGE SQL +AS $$ +SELECT NULL::int; +$$; +CALL ptest6(1, 2); +-- various error cases +CALL version(); -- error: not a procedure +ERROR: version() is not a procedure +LINE 1: CALL version(); + ^ +HINT: To call a function, use SELECT. +CALL sum(1); -- error: not a procedure +ERROR: sum(integer) is not a procedure +LINE 1: CALL sum(1); + ^ +HINT: To call a function, use SELECT. +CREATE PROCEDURE ptestx() LANGUAGE SQL WINDOW AS $$ INSERT INTO cp_test VALUES (1, 'a') $$; +ERROR: invalid attribute in procedure definition +LINE 1: CREATE PROCEDURE ptestx() LANGUAGE SQL WINDOW AS $$ INSERT I... + ^ +CREATE PROCEDURE ptestx() LANGUAGE SQL STRICT AS $$ INSERT INTO cp_test VALUES (1, 'a') $$; +ERROR: invalid attribute in procedure definition +LINE 1: CREATE PROCEDURE ptestx() LANGUAGE SQL STRICT AS $$ INSERT I... + ^ +CREATE PROCEDURE ptestx(OUT a int) LANGUAGE SQL AS $$ INSERT INTO cp_test VALUES (1, 'a') $$; +ERROR: procedures cannot have OUT arguments +HINT: INOUT arguments are permitted. +ALTER PROCEDURE ptest1(text) STRICT; +ERROR: invalid attribute in procedure definition +LINE 1: ALTER PROCEDURE ptest1(text) STRICT; + ^ +ALTER FUNCTION ptest1(text) VOLATILE; -- error: not a function +ERROR: ptest1(text) is not a function +ALTER PROCEDURE cp_testfunc1(int) VOLATILE; -- error: not a procedure +ERROR: cp_testfunc1(integer) is not a procedure +ALTER PROCEDURE nonexistent() VOLATILE; +ERROR: procedure nonexistent() does not exist +DROP FUNCTION ptest1(text); -- error: not a function +ERROR: ptest1(text) is not a function +DROP PROCEDURE cp_testfunc1(int); -- error: not a procedure +ERROR: cp_testfunc1(integer) is not a procedure +DROP PROCEDURE nonexistent(); +ERROR: procedure nonexistent() does not exist +-- privileges +CREATE USER regress_cp_user1; +GRANT INSERT ON cp_test TO regress_cp_user1; +REVOKE EXECUTE ON PROCEDURE ptest1(text) FROM PUBLIC; +SET ROLE regress_cp_user1; +CALL ptest1('a'); -- error +ERROR: permission denied for procedure ptest1 +RESET ROLE; +GRANT EXECUTE ON PROCEDURE ptest1(text) TO regress_cp_user1; +SET ROLE regress_cp_user1; +CALL ptest1('a'); -- ok +RESET ROLE; +-- ROUTINE syntax +ALTER ROUTINE cp_testfunc1(int) RENAME TO cp_testfunc1a; +ALTER ROUTINE cp_testfunc1a RENAME TO cp_testfunc1; +ALTER ROUTINE ptest1(text) RENAME TO ptest1a; +ALTER ROUTINE ptest1a RENAME TO ptest1; +DROP ROUTINE cp_testfunc1(int); +-- cleanup +DROP PROCEDURE ptest1; +DROP PROCEDURE ptest2; +DROP TABLE cp_test; +DROP USER regress_cp_user1; diff --git a/src/test/regress/expected/create_table.out b/src/test/regress/expected/create_table.out index babda8978c..7b184330ed 100644 --- a/src/test/regress/expected/create_table.out +++ b/src/test/regress/expected/create_table.out @@ -215,6 +215,11 @@ CREATE TABLE IF NOT EXISTS test_tsvector( t text ); NOTICE: relation "test_tsvector" already exists, skipping +-- invalid: non-lowercase quoted reloptions identifiers +CREATE TABLE tas_case WITH ("Fillfactor" = 10) AS SELECT 1 a; +ERROR: unrecognized parameter "Fillfactor" +CREATE TABLE tas_case (a text) WITH ("Oids" = true); +ERROR: unrecognized parameter "Oids" CREATE UNLOGGED TABLE unlogged1 (a int primary key); -- OK CREATE TEMPORARY TABLE unlogged2 (a int primary key); -- OK SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged\d' ORDER BY relname; @@ -276,28 +281,6 @@ CREATE TABLE partitioned ( ) PARTITION BY LIST (a1, a2); -- fail ERROR: cannot use "list" partition strategy with more than one column -- unsupported constraint type for partitioned tables -CREATE TABLE partitioned ( - a int PRIMARY KEY -) PARTITION BY RANGE (a); -ERROR: primary key constraints are not supported on partitioned tables -LINE 2: a int PRIMARY KEY - ^ -CREATE TABLE pkrel ( - a int PRIMARY KEY -); -CREATE TABLE partitioned ( - a int REFERENCES pkrel(a) -) PARTITION BY RANGE (a); -ERROR: foreign key constraints are not supported on partitioned tables -LINE 2: a int REFERENCES pkrel(a) - ^ -DROP TABLE pkrel; -CREATE TABLE partitioned ( - a int UNIQUE -) PARTITION BY RANGE (a); -ERROR: unique constraints are not supported on partitioned tables -LINE 2: a int UNIQUE - ^ CREATE TABLE partitioned ( a int, EXCLUDE USING gist (a WITH &&) @@ -305,11 +288,6 @@ CREATE TABLE partitioned ( ERROR: exclusion constraints are not supported on partitioned tables LINE 3: EXCLUDE USING gist (a WITH &&) ^ --- prevent column from being used twice in the partition key -CREATE TABLE partitioned ( - a int -) PARTITION BY RANGE (a, a); -ERROR: column "a" appears more than once in partition key -- prevent using prohibited expressions in the key CREATE FUNCTION retset (a int) RETURNS SETOF int AS $$ SELECT 1; $$ LANGUAGE SQL IMMUTABLE; CREATE TABLE partitioned ( @@ -320,12 +298,12 @@ DROP FUNCTION retset(int); CREATE TABLE partitioned ( a int ) PARTITION BY RANGE ((avg(a))); -ERROR: aggregate functions are not allowed in partition key expression +ERROR: aggregate functions are not allowed in partition key expressions CREATE TABLE partitioned ( a int, b int ) PARTITION BY RANGE ((avg(a) OVER (PARTITION BY b))); -ERROR: window functions are not allowed in partition key expression +ERROR: window functions are not allowed in partition key expressions CREATE TABLE partitioned ( a int ) PARTITION BY LIST ((a LIKE (SELECT 1))); @@ -340,21 +318,25 @@ CREATE TABLE partitioned ( ) PARTITION BY RANGE (const_func()); ERROR: cannot use constant expression as partition key DROP FUNCTION const_func(); --- only accept "list" and "range" as partitioning strategy +-- only accept valid partitioning strategy CREATE TABLE partitioned ( - a int -) PARTITION BY HASH (a); -ERROR: unrecognized partitioning strategy "hash" + a int +) PARTITION BY MAGIC (a); +ERROR: unrecognized partitioning strategy "magic" -- specified column must be present in the table CREATE TABLE partitioned ( a int ) PARTITION BY RANGE (b); ERROR: column "b" named in partition key does not exist +LINE 3: ) PARTITION BY RANGE (b); + ^ -- cannot use system columns in partition key CREATE TABLE partitioned ( a int ) PARTITION BY RANGE (xmin); ERROR: cannot use system column "xmin" in partition key +LINE 3: ) PARTITION BY RANGE (xmin); + ^ -- functions in key must be immutable CREATE FUNCTION immut_func (a int) RETURNS int AS $$ SELECT a + random()::int; $$ LANGUAGE SQL; CREATE TABLE partitioned ( @@ -414,8 +396,9 @@ DETAIL: table partitioned depends on function plusone(integer) HINT: Use DROP ... CASCADE to drop the dependent objects too. -- partitioned table cannot participate in regular inheritance CREATE TABLE partitioned2 ( - a int -) PARTITION BY LIST ((a+1)); + a int, + b text +) PARTITION BY RANGE ((a+1), substr(b, 1, 5)); CREATE TABLE fail () INHERITS (partitioned2); ERROR: cannot inherit from partitioned table "partitioned2" -- Partition key in describe output @@ -428,13 +411,29 @@ ERROR: cannot inherit from partitioned table "partitioned2" c | text | | | d | text | | | Partition key: RANGE (a oid_ops, plusone(b), c, d COLLATE "C") +Number of partitions: 0 -\d partitioned2 - Table "public.partitioned2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition key: LIST (((a + 1))) +\d+ partitioned2 + Table "public.partitioned2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | integer | | | | plain | | + b | text | | | | extended | | +Partition key: RANGE (((a + 1)), substr(b, 1, 5)) +Number of partitions: 0 + +INSERT INTO partitioned2 VALUES (1, 'hello'); +ERROR: no partition of relation "partitioned2" found for row +DETAIL: Partition key of the failing row contains ((a + 1), substr(b, 1, 5)) = (2, hello). +CREATE TABLE part2_1 PARTITION OF partitioned2 FOR VALUES FROM (-1, 'aaaaa') TO (100, 'ccccc'); +\d+ part2_1 + Table "public.part2_1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | integer | | | | plain | | + b | text | | | | extended | | +Partition of: partitioned2 FOR VALUES FROM ('-1', 'aaaaa') TO (100, 'ccccc') +Partition constraint: (((a + 1) IS NOT NULL) AND (substr(b, 1, 5) IS NOT NULL) AND (((a + 1) > '-1'::integer) OR (((a + 1) = '-1'::integer) AND (substr(b, 1, 5) >= 'aaaaa'::text))) AND (((a + 1) < 100) OR (((a + 1) = 100) AND (substr(b, 1, 5) < 'ccccc'::text)))) DROP TABLE partitioned, partitioned2; -- @@ -467,6 +466,15 @@ CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES FROM (1) TO (2); ERROR: invalid bound specification for a list partition LINE 1: ...BLE fail_part PARTITION OF list_parted FOR VALUES FROM (1) T... ^ +-- trying to specify modulus and remainder for list partitioned table +CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1); +ERROR: invalid bound specification for a list partition +LINE 1: ...BLE fail_part PARTITION OF list_parted FOR VALUES WITH (MODU... + ^ +-- check default partition cannot be created more than once +CREATE TABLE part_default PARTITION OF list_parted DEFAULT; +CREATE TABLE fail_default_part PARTITION OF list_parted DEFAULT; +ERROR: partition "fail_default_part" conflicts with existing default partition "part_default" -- specified literal can't be cast to the partition column data type CREATE TABLE bools ( a bool @@ -505,6 +513,11 @@ CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES IN ('a'); ERROR: invalid bound specification for a range partition LINE 1: ...BLE fail_part PARTITION OF range_parted FOR VALUES IN ('a'); ^ +-- trying to specify modulus and remainder for range partitioned table +CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1); +ERROR: invalid bound specification for a range partition +LINE 1: ...LE fail_part PARTITION OF range_parted FOR VALUES WITH (MODU... + ^ -- each of start and end bounds must have same number of values as the -- length of the partition key CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM ('a', 1) TO ('z'); @@ -514,6 +527,37 @@ ERROR: TO must specify exactly one value per partitioning column -- cannot specify null values in range bounds CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM (null) TO (maxvalue); ERROR: cannot specify NULL in range bound +-- trying to specify modulus and remainder for range partitioned table +CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1); +ERROR: invalid bound specification for a range partition +LINE 1: ...LE fail_part PARTITION OF range_parted FOR VALUES WITH (MODU... + ^ +-- check partition bound syntax for the hash partition +CREATE TABLE hash_parted ( + a int +) PARTITION BY HASH (a); +CREATE TABLE hpart_1 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 10, REMAINDER 0); +CREATE TABLE hpart_2 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 50, REMAINDER 1); +CREATE TABLE hpart_3 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 200, REMAINDER 2); +-- modulus 25 is factor of modulus of 50 but 10 is not factor of 25. +CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODULUS 25, REMAINDER 3); +ERROR: every hash partition modulus must be a factor of the next larger modulus +-- previous modulus 50 is factor of 150 but this modulus is not factor of next modulus 200. +CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODULUS 150, REMAINDER 3); +ERROR: every hash partition modulus must be a factor of the next larger modulus +-- trying to specify range for the hash partitioned table +CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES FROM ('a', 1) TO ('z'); +ERROR: invalid bound specification for a hash partition +LINE 1: ...BLE fail_part PARTITION OF hash_parted FOR VALUES FROM ('a',... + ^ +-- trying to specify list value for the hash partitioned table +CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES IN (1000); +ERROR: invalid bound specification for a hash partition +LINE 1: ...BLE fail_part PARTITION OF hash_parted FOR VALUES IN (1000); + ^ +-- trying to create default partition for the hash partitioned table +CREATE TABLE fail_default_part PARTITION OF hash_parted DEFAULT; +ERROR: a hash-partitioned table may not have a default partition -- check if compatible with the specified parent -- cannot create as partition of a non-partitioned table CREATE TABLE unparted ( @@ -521,6 +565,8 @@ CREATE TABLE unparted ( ); CREATE TABLE fail_part PARTITION OF unparted FOR VALUES IN ('a'); ERROR: "unparted" is not partitioned +CREATE TABLE fail_part PARTITION OF unparted FOR VALUES WITH (MODULUS 2, REMAINDER 1); +ERROR: "unparted" is not partitioned DROP TABLE unparted; -- cannot create a permanent rel as partition of a temp rel CREATE TEMP TABLE temp_parted ( @@ -558,10 +604,15 @@ CREATE TABLE list_parted2 ( ) PARTITION BY LIST (a); CREATE TABLE part_null_z PARTITION OF list_parted2 FOR VALUES IN (null, 'z'); CREATE TABLE part_ab PARTITION OF list_parted2 FOR VALUES IN ('a', 'b'); +CREATE TABLE list_parted2_def PARTITION OF list_parted2 DEFAULT; CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN (null); ERROR: partition "fail_part" would overlap partition "part_null_z" CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN ('b', 'c'); ERROR: partition "fail_part" would overlap partition "part_ab" +-- check default partition overlap +INSERT INTO list_parted2 VALUES('X'); +CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN ('W', 'X', 'Y'); +ERROR: updated partition constraint for default partition "list_parted2_def" would be violated by some row CREATE TABLE range_parted2 ( a int ) PARTITION BY RANGE (a); @@ -585,6 +636,16 @@ CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (10) TO (30); ERROR: partition "fail_part" would overlap partition "part2" CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (10) TO (50); ERROR: partition "fail_part" would overlap partition "part2" +-- Create a default partition for range partitioned table +CREATE TABLE range2_default PARTITION OF range_parted2 DEFAULT; +-- More than one default partition is not allowed, so this should give error +CREATE TABLE fail_default_part PARTITION OF range_parted2 DEFAULT; +ERROR: partition "fail_default_part" conflicts with existing default partition "range2_default" +-- Check if the range for default partitions overlap +INSERT INTO range_parted2 VALUES (85); +CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (80) TO (90); +ERROR: updated partition constraint for default partition "range2_default" would be violated by some row +CREATE TABLE part4 PARTITION OF range_parted2 FOR VALUES FROM (90) TO (100); -- now check for multi-column range partition key CREATE TABLE range_parted3 ( a int, @@ -598,11 +659,29 @@ CREATE TABLE part11 PARTITION OF range_parted3 FOR VALUES FROM (1, 1) TO (1, 10) CREATE TABLE part12 PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1, maxvalue); CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1, 20); ERROR: partition "fail_part" would overlap partition "part12" +CREATE TABLE range3_default PARTITION OF range_parted3 DEFAULT; -- cannot create a partition that says column b is allowed to range -- from -infinity to +infinity, while there exist partitions that have -- more specific ranges CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (1, minvalue) TO (1, maxvalue); ERROR: partition "fail_part" would overlap partition "part10" +-- check for partition bound overlap and other invalid specifications for the hash partition +CREATE TABLE hash_parted2 ( + a varchar +) PARTITION BY HASH (a); +CREATE TABLE h2part_1 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 4, REMAINDER 2); +CREATE TABLE h2part_2 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 0); +CREATE TABLE h2part_3 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 4); +CREATE TABLE h2part_4 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 5); +-- overlap with part_4 +CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +ERROR: partition "fail_part" would overlap partition "h2part_4" +-- modulus must be greater than zero +CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 0, REMAINDER 1); +ERROR: modulus for hash partition must be a positive integer +-- remainder must be greater than or equal to zero and less than modulus +CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 8); +ERROR: remainder for hash partition must be less than modulus -- check schema propagation from parent CREATE TABLE parted ( a text, @@ -644,9 +723,37 @@ SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_b'::reg -- specify PARTITION BY for a partition CREATE TABLE fail_part_col_not_found PARTITION OF parted FOR VALUES IN ('c') PARTITION BY RANGE (c); ERROR: column "c" named in partition key does not exist +LINE 1: ...TITION OF parted FOR VALUES IN ('c') PARTITION BY RANGE (c); + ^ CREATE TABLE part_c PARTITION OF parted (b WITH OPTIONS NOT NULL DEFAULT 0) FOR VALUES IN ('c') PARTITION BY RANGE ((b)); -- create a level-2 partition CREATE TABLE part_c_1_10 PARTITION OF part_c FOR VALUES FROM (1) TO (10); +-- check that NOT NULL and default value are inherited correctly +create table parted_notnull_inh_test (a int default 1, b int not null default 0) partition by list (a); +create table parted_notnull_inh_test1 partition of parted_notnull_inh_test (a not null, b default 1) for values in (1); +insert into parted_notnull_inh_test (b) values (null); +ERROR: null value in column "b" violates not-null constraint +DETAIL: Failing row contains (1, null). +-- note that while b's default is overriden, a's default is preserved +\d parted_notnull_inh_test1 + Table "public.parted_notnull_inh_test1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | 1 + b | integer | | not null | 1 +Partition of: parted_notnull_inh_test FOR VALUES IN (1) + +drop table parted_notnull_inh_test; +-- check for a conflicting COLLATE clause +create table parted_collate_must_match (a text collate "C", b text collate "C") + partition by range (a); +-- on the partition key +create table parted_collate_must_match1 partition of parted_collate_must_match + (a collate "POSIX") for values from ('a') to ('m'); +-- on another column +create table parted_collate_must_match2 partition of parted_collate_must_match + (b collate "POSIX") for values from ('m') to ('z'); +drop table parted_collate_must_match; -- Partition bound in describe output \d+ part_b Table "public.part_b" @@ -655,7 +762,7 @@ CREATE TABLE part_c_1_10 PARTITION OF part_c FOR VALUES FROM (1) TO (10); a | text | | | | extended | | b | integer | | not null | 1 | plain | | Partition of: parted FOR VALUES IN ('b') -Partition constraint: ((a IS NOT NULL) AND (a = ANY (ARRAY['b'::text]))) +Partition constraint: ((a IS NOT NULL) AND (a = 'b'::text)) Check constraints: "check_a" CHECK (length(a) > 0) "part_b_b_check" CHECK (b >= 0) @@ -668,7 +775,7 @@ Check constraints: a | text | | | | extended | | b | integer | | not null | 0 | plain | | Partition of: parted FOR VALUES IN ('c') -Partition constraint: ((a IS NOT NULL) AND (a = ANY (ARRAY['c'::text]))) +Partition constraint: ((a IS NOT NULL) AND (a = 'c'::text)) Partition key: RANGE (b) Check constraints: "check_a" CHECK (length(a) > 0) @@ -682,7 +789,7 @@ Partitions: part_c_1_10 FOR VALUES FROM (1) TO (10) a | text | | | | extended | | b | integer | | not null | 0 | plain | | Partition of: part_c FOR VALUES FROM (1) TO (10) -Partition constraint: ((a IS NOT NULL) AND (a = ANY (ARRAY['c'::text])) AND (b IS NOT NULL) AND (b >= 1) AND (b < 10)) +Partition constraint: ((a IS NOT NULL) AND (a = 'c'::text) AND (b IS NOT NULL) AND (b >= 1) AND (b < 10)) Check constraints: "check_a" CHECK (length(a) > 0) @@ -701,9 +808,17 @@ Check constraints: "check_a" CHECK (length(a) > 0) Number of partitions: 3 (Use \d+ to list them.) +\d hash_parted + Table "public.hash_parted" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition key: HASH (a) +Number of partitions: 3 (Use \d+ to list them.) + -- check that we get the expected partition constraints CREATE TABLE range_parted4 (a int, b int, c int) PARTITION BY RANGE (abs(a), abs(b), c); -CREATE TABLE unbounded_range_part PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, 0, 0) TO (MAXVALUE, 0, 0); +CREATE TABLE unbounded_range_part PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (MAXVALUE, MAXVALUE, MAXVALUE); \d+ unbounded_range_part Table "public.unbounded_range_part" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -711,11 +826,11 @@ CREATE TABLE unbounded_range_part PARTITION OF range_parted4 FOR VALUES FROM (MI a | integer | | | | plain | | b | integer | | | | plain | | c | integer | | | | plain | | -Partition of: range_parted4 FOR VALUES FROM (MINVALUE, 0, 0) TO (MAXVALUE, 0, 0) +Partition of: range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (MAXVALUE, MAXVALUE, MAXVALUE) Partition constraint: ((abs(a) IS NOT NULL) AND (abs(b) IS NOT NULL) AND (c IS NOT NULL)) DROP TABLE unbounded_range_part; -CREATE TABLE range_parted4_1 PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, 0, 0) TO (1, MAXVALUE, 0); +CREATE TABLE range_parted4_1 PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (1, MAXVALUE, MAXVALUE); \d+ range_parted4_1 Table "public.range_parted4_1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -723,7 +838,7 @@ CREATE TABLE range_parted4_1 PARTITION OF range_parted4 FOR VALUES FROM (MINVALU a | integer | | | | plain | | b | integer | | | | plain | | c | integer | | | | plain | | -Partition of: range_parted4 FOR VALUES FROM (MINVALUE, 0, 0) TO (1, MAXVALUE, 0) +Partition of: range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (1, MAXVALUE, MAXVALUE) Partition constraint: ((abs(a) IS NOT NULL) AND (abs(b) IS NOT NULL) AND (c IS NOT NULL) AND (abs(a) <= 1)) CREATE TABLE range_parted4_2 PARTITION OF range_parted4 FOR VALUES FROM (3, 4, 5) TO (6, 7, MAXVALUE); @@ -737,7 +852,7 @@ CREATE TABLE range_parted4_2 PARTITION OF range_parted4 FOR VALUES FROM (3, 4, 5 Partition of: range_parted4 FOR VALUES FROM (3, 4, 5) TO (6, 7, MAXVALUE) Partition constraint: ((abs(a) IS NOT NULL) AND (abs(b) IS NOT NULL) AND (c IS NOT NULL) AND ((abs(a) > 3) OR ((abs(a) = 3) AND (abs(b) > 4)) OR ((abs(a) = 3) AND (abs(b) = 4) AND (c >= 5))) AND ((abs(a) < 6) OR ((abs(a) = 6) AND (abs(b) <= 7)))) -CREATE TABLE range_parted4_3 PARTITION OF range_parted4 FOR VALUES FROM (6, 8, MINVALUE) TO (9, MAXVALUE, 0); +CREATE TABLE range_parted4_3 PARTITION OF range_parted4 FOR VALUES FROM (6, 8, MINVALUE) TO (9, MAXVALUE, MAXVALUE); \d+ range_parted4_3 Table "public.range_parted4_3" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -745,12 +860,26 @@ CREATE TABLE range_parted4_3 PARTITION OF range_parted4 FOR VALUES FROM (6, 8, M a | integer | | | | plain | | b | integer | | | | plain | | c | integer | | | | plain | | -Partition of: range_parted4 FOR VALUES FROM (6, 8, MINVALUE) TO (9, MAXVALUE, 0) +Partition of: range_parted4 FOR VALUES FROM (6, 8, MINVALUE) TO (9, MAXVALUE, MAXVALUE) Partition constraint: ((abs(a) IS NOT NULL) AND (abs(b) IS NOT NULL) AND (c IS NOT NULL) AND ((abs(a) > 6) OR ((abs(a) = 6) AND (abs(b) >= 8))) AND (abs(a) <= 9)) DROP TABLE range_parted4; +-- user-defined operator class in partition key +CREATE FUNCTION my_int4_sort(int4,int4) RETURNS int LANGUAGE sql + AS $$ SELECT CASE WHEN $1 = $2 THEN 0 WHEN $1 > $2 THEN 1 ELSE -1 END; $$; +CREATE OPERATOR CLASS test_int4_ops FOR TYPE int4 USING btree AS + OPERATOR 1 < (int4,int4), OPERATOR 2 <= (int4,int4), + OPERATOR 3 = (int4,int4), OPERATOR 4 >= (int4,int4), + OPERATOR 5 > (int4,int4), FUNCTION 1 my_int4_sort(int4,int4); +CREATE TABLE partkey_t (a int4) PARTITION BY RANGE (a test_int4_ops); +CREATE TABLE partkey_t_1 PARTITION OF partkey_t FOR VALUES FROM (0) TO (1000); +INSERT INTO partkey_t VALUES (100); +INSERT INTO partkey_t VALUES (200); -- cleanup DROP TABLE parted, list_parted, range_parted, list_parted2, range_parted2, range_parted3; +DROP TABLE partkey_t, hash_parted, hash_parted2; +DROP OPERATOR CLASS test_int4_ops USING btree; +DROP FUNCTION my_int4_sort(int4,int4); -- comments on partitioned tables columns CREATE TABLE parted_col_comment (a int, b text) PARTITION BY LIST (a); COMMENT ON TABLE parted_col_comment IS 'Am partitioned table'; @@ -768,5 +897,58 @@ SELECT obj_description('parted_col_comment'::regclass); a | integer | | | | plain | | Partition key b | text | | | | extended | | Partition key: LIST (a) +Number of partitions: 0 DROP TABLE parted_col_comment; +-- list partitioning on array type column +CREATE TABLE arrlp (a int[]) PARTITION BY LIST (a); +CREATE TABLE arrlp12 PARTITION OF arrlp FOR VALUES IN ('{1}', '{2}'); +\d+ arrlp12 + Table "public.arrlp12" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+-----------+-----------+----------+---------+----------+--------------+------------- + a | integer[] | | | | extended | | +Partition of: arrlp FOR VALUES IN ('{1}', '{2}') +Partition constraint: ((a IS NOT NULL) AND ((a = '{1}'::integer[]) OR (a = '{2}'::integer[]))) + +DROP TABLE arrlp; +-- partition on boolean column +create table boolspart (a bool) partition by list (a); +create table boolspart_t partition of boolspart for values in (true); +create table boolspart_f partition of boolspart for values in (false); +\d+ boolspart + Table "public.boolspart" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | boolean | | | | plain | | +Partition key: LIST (a) +Partitions: boolspart_f FOR VALUES IN (false), + boolspart_t FOR VALUES IN (true) + +drop table boolspart; +-- partitions mixing temporary and permanent relations +create table perm_parted (a int) partition by list (a); +create temporary table temp_parted (a int) partition by list (a); +create table perm_part partition of temp_parted default; -- error +ERROR: cannot create a permanent relation as partition of temporary relation "temp_parted" +create temp table temp_part partition of perm_parted default; -- error +ERROR: cannot create a temporary relation as partition of permanent relation "perm_parted" +create temp table temp_part partition of temp_parted default; -- ok +drop table perm_parted cascade; +drop table temp_parted cascade; +-- check that adding partitions to a table while it is being used is prevented +create table tab_part_create (a int) partition by list (a); +create or replace function func_part_create() returns trigger + language plpgsql as $$ + begin + execute 'create table tab_part_create_1 partition of tab_part_create for values in (1)'; + return null; + end $$; +create trigger trig_part_create before insert on tab_part_create + for each statement execute procedure func_part_create(); +insert into tab_part_create values (1); +ERROR: cannot CREATE TABLE .. PARTITION OF "tab_part_create" because it is being used by active queries in this session +CONTEXT: SQL statement "create table tab_part_create_1 partition of tab_part_create for values in (1)" +PL/pgSQL function func_part_create() line 3 at EXECUTE +drop table tab_part_create; +drop function func_part_create(); diff --git a/src/test/regress/expected/create_table_like.out b/src/test/regress/expected/create_table_like.out index 3f405c94ce..8d4543bfe8 100644 --- a/src/test/regress/expected/create_table_like.out +++ b/src/test/regress/expected/create_table_like.out @@ -66,13 +66,13 @@ SELECT * FROM inhg; /* Two records with three columns in order x=x, xx=text, y=y (2 rows) DROP TABLE inhg; -CREATE TABLE test_like_id_1 (a int GENERATED ALWAYS AS IDENTITY, b text); +CREATE TABLE test_like_id_1 (a bigint GENERATED ALWAYS AS IDENTITY, b text); \d test_like_id_1 Table "public.test_like_id_1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------ - a | integer | | not null | generated always as identity - b | text | | | + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+------------------------------ + a | bigint | | not null | generated always as identity + b | text | | | INSERT INTO test_like_id_1 (b) VALUES ('b1'); SELECT * FROM test_like_id_1; @@ -83,11 +83,11 @@ SELECT * FROM test_like_id_1; CREATE TABLE test_like_id_2 (LIKE test_like_id_1); \d test_like_id_2 - Table "public.test_like_id_2" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | not null | - b | text | | | + Table "public.test_like_id_2" + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+--------- + a | bigint | | not null | + b | text | | | INSERT INTO test_like_id_2 (b) VALUES ('b2'); ERROR: null value in column "a" violates not-null constraint @@ -100,10 +100,10 @@ SELECT * FROM test_like_id_2; -- identity was not copied CREATE TABLE test_like_id_3 (LIKE test_like_id_1 INCLUDING IDENTITY); \d test_like_id_3 Table "public.test_like_id_3" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+------------------------------ - a | integer | | not null | generated always as identity - b | text | | | + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+------------------------------ + a | bigint | | not null | generated always as identity + b | text | | | INSERT INTO test_like_id_3 (b) VALUES ('b3'); SELECT * FROM test_like_id_3; -- identity was copied and applied @@ -137,6 +137,8 @@ DROP TABLE inhz; CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) PRIMARY KEY, b text); CREATE INDEX ctlt1_b_key ON ctlt1 (b); CREATE INDEX ctlt1_fnidx ON ctlt1 ((a || b)); +CREATE STATISTICS ctlt1_a_b_stat ON a,b FROM ctlt1; +COMMENT ON STATISTICS ctlt1_a_b_stat IS 'ab stats'; COMMENT ON COLUMN ctlt1.a IS 'A'; COMMENT ON COLUMN ctlt1.b IS 'B'; COMMENT ON CONSTRAINT ctlt1_a_check ON ctlt1 IS 't1_a_check'; @@ -240,6 +242,8 @@ Indexes: "ctlt_all_expr_idx" btree ((a || b)) Check constraints: "ctlt1_a_check" CHECK (length(a) > 2) +Statistics objects: + "public"."ctlt_all_a_b_stat" (ndistinct, dependencies) ON a, b FROM ctlt_all SELECT c.relname, objsubid, description FROM pg_description, pg_index i, pg_class c WHERE classoid = 'pg_class'::regclass AND objoid = i.indexrelid AND c.oid = i.indexrelid AND i.indrelid = 'ctlt_all'::regclass ORDER BY c.relname, objsubid; relname | objsubid | description @@ -248,6 +252,12 @@ SELECT c.relname, objsubid, description FROM pg_description, pg_index i, pg_clas ctlt_all_pkey | 0 | index pkey (2 rows) +SELECT s.stxname, objsubid, description FROM pg_description, pg_statistic_ext s WHERE classoid = 'pg_statistic_ext'::regclass AND objoid = s.oid AND s.stxrelid = 'ctlt_all'::regclass ORDER BY s.stxname, objsubid; + stxname | objsubid | description +-------------------+----------+------------- + ctlt_all_a_b_stat | 0 | ab stats +(1 row) + CREATE TABLE inh_error1 () INHERITS (ctlt1, ctlt4); NOTICE: merging multiple inherited definitions of column "a" ERROR: inherited column "a" has a storage parameter conflict diff --git a/src/test/regress/expected/create_type.out b/src/test/regress/expected/create_type.out index 5886a1f37f..2f7d5f94d7 100644 --- a/src/test/regress/expected/create_type.out +++ b/src/test/regress/expected/create_type.out @@ -83,6 +83,34 @@ SELECT * FROM default_test; zippo | 42 (1 row) +-- invalid: non-lowercase quoted identifiers +CREATE TYPE case_int42 ( + "Internallength" = 4, + "Input" = int42_in, + "Output" = int42_out, + "Alignment" = int4, + "Default" = 42, + "Passedbyvalue" +); +WARNING: type attribute "Internallength" not recognized +LINE 2: "Internallength" = 4, + ^ +WARNING: type attribute "Input" not recognized +LINE 3: "Input" = int42_in, + ^ +WARNING: type attribute "Output" not recognized +LINE 4: "Output" = int42_out, + ^ +WARNING: type attribute "Alignment" not recognized +LINE 5: "Alignment" = int4, + ^ +WARNING: type attribute "Default" not recognized +LINE 6: "Default" = 42, + ^ +WARNING: type attribute "Passedbyvalue" not recognized +LINE 7: "Passedbyvalue" + ^ +ERROR: type input function must be specified -- Test stand-alone composite type CREATE TYPE default_test_row AS (f1 text_w_default, f2 int42); CREATE FUNCTION get_default_test() RETURNS SETOF default_test_row AS ' @@ -154,3 +182,32 @@ WHERE attrelid = 'mytab'::regclass AND attnum > 0; widget(42,13) (1 row) +-- might as well exercise the widget type while we're here +INSERT INTO mytab VALUES ('(1,2,3)'), ('(-44,5.5,12)'); +TABLE mytab; + foo +-------------- + (1,2,3) + (-44,5.5,12) +(2 rows) + +-- and test format_type() a bit more, too +select format_type('varchar'::regtype, 42); + format_type +----------------------- + character varying(38) +(1 row) + +select format_type('bpchar'::regtype, null); + format_type +------------- + character +(1 row) + +-- this behavior difference is intentional +select format_type('bpchar'::regtype, -1); + format_type +------------- + bpchar +(1 row) + diff --git a/src/test/regress/expected/create_view.out b/src/test/regress/expected/create_view.out index f909a3cefe..141fc6da62 100644 --- a/src/test/regress/expected/create_view.out +++ b/src/test/regress/expected/create_view.out @@ -83,7 +83,7 @@ CREATE VIEW temp_view_test.v3_temp AS SELECT * FROM temp_table; NOTICE: view "v3_temp" will be a temporary view ERROR: cannot create temporary relation in non-temporary schema -- should fail -CREATE SCHEMA test_schema +CREATE SCHEMA test_view_schema CREATE TEMP VIEW testview AS SELECT 1; ERROR: cannot create temporary relation in non-temporary schema -- joins: if any of the join relations are temporary, the view @@ -1605,7 +1605,7 @@ select 'foo'::text = any((select array['abc','def','foo']::text[])); -- fail ERROR: operator does not exist: text = text[] LINE 1: select 'foo'::text = any((select array['abc','def','foo']::t... ^ -HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. select 'foo'::text = any((select array['abc','def','foo']::text[])::text[]); ?column? ---------- diff --git a/src/test/regress/expected/domain.out b/src/test/regress/expected/domain.out index 3acc696863..0b5a9041b0 100644 --- a/src/test/regress/expected/domain.out +++ b/src/test/regress/expected/domain.out @@ -198,6 +198,119 @@ select pg_typeof('{1,2,3}'::dia || 42); -- should be int[] not dia (1 row) drop domain dia; +-- Test domains over composites +create type comptype as (r float8, i float8); +create domain dcomptype as comptype; +create table dcomptable (d1 dcomptype unique); +insert into dcomptable values (row(1,2)::dcomptype); +insert into dcomptable values (row(3,4)::comptype); +insert into dcomptable values (row(1,2)::dcomptype); -- fail on uniqueness +ERROR: duplicate key value violates unique constraint "dcomptable_d1_key" +DETAIL: Key (d1)=((1,2)) already exists. +insert into dcomptable (d1.r) values(11); +select * from dcomptable; + d1 +------- + (1,2) + (3,4) + (11,) +(3 rows) + +select (d1).r, (d1).i, (d1).* from dcomptable; + r | i | r | i +----+---+----+--- + 1 | 2 | 1 | 2 + 3 | 4 | 3 | 4 + 11 | | 11 | +(3 rows) + +update dcomptable set d1.r = (d1).r + 1 where (d1).i > 0; +select * from dcomptable; + d1 +------- + (11,) + (2,2) + (4,4) +(3 rows) + +alter domain dcomptype add constraint c1 check ((value).r <= (value).i); +alter domain dcomptype add constraint c2 check ((value).r > (value).i); -- fail +ERROR: column "d1" of table "dcomptable" contains values that violate the new constraint +select row(2,1)::dcomptype; -- fail +ERROR: value for domain dcomptype violates check constraint "c1" +insert into dcomptable values (row(1,2)::comptype); +insert into dcomptable values (row(2,1)::comptype); -- fail +ERROR: value for domain dcomptype violates check constraint "c1" +insert into dcomptable (d1.r) values(99); +insert into dcomptable (d1.r, d1.i) values(99, 100); +insert into dcomptable (d1.r, d1.i) values(100, 99); -- fail +ERROR: value for domain dcomptype violates check constraint "c1" +update dcomptable set d1.r = (d1).r + 1 where (d1).i > 0; -- fail +ERROR: value for domain dcomptype violates check constraint "c1" +update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; +select * from dcomptable; + d1 +---------- + (11,) + (99,) + (1,3) + (3,5) + (0,3) + (98,101) +(6 rows) + +explain (verbose, costs off) + update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Update on public.dcomptable + -> Seq Scan on public.dcomptable + Output: ROW(((d1).r - '1'::double precision), ((d1).i + '1'::double precision)), ctid + Filter: ((dcomptable.d1).i > '0'::double precision) +(4 rows) + +create rule silly as on delete to dcomptable do instead + update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; +\d+ dcomptable + Table "public.dcomptable" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+-----------+-----------+----------+---------+----------+--------------+------------- + d1 | dcomptype | | | | extended | | +Indexes: + "dcomptable_d1_key" UNIQUE CONSTRAINT, btree (d1) +Rules: + silly AS + ON DELETE TO dcomptable DO INSTEAD UPDATE dcomptable SET d1.r = (dcomptable.d1).r - 1::double precision, d1.i = (dcomptable.d1).i + 1::double precision + WHERE (dcomptable.d1).i > 0::double precision + +drop table dcomptable; +drop type comptype cascade; +NOTICE: drop cascades to type dcomptype +-- check altering and dropping columns used by domain constraints +create type comptype as (r float8, i float8); +create domain dcomptype as comptype; +alter domain dcomptype add constraint c1 check ((value).r > 0); +comment on constraint c1 on domain dcomptype is 'random commentary'; +select row(0,1)::dcomptype; -- fail +ERROR: value for domain dcomptype violates check constraint "c1" +alter type comptype alter attribute r type varchar; -- fail +ERROR: operator does not exist: character varying > double precision +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +alter type comptype alter attribute r type bigint; +alter type comptype drop attribute r; -- fail +ERROR: cannot drop column r of composite type comptype because other objects depend on it +DETAIL: constraint c1 depends on column r of composite type comptype +HINT: Use DROP ... CASCADE to drop the dependent objects too. +alter type comptype drop attribute i; +select conname, obj_description(oid, 'pg_constraint') from pg_constraint + where contypid = 'dcomptype'::regtype; -- check comment is still there + conname | obj_description +---------+------------------- + c1 | random commentary +(1 row) + +drop type comptype cascade; +NOTICE: drop cascades to type dcomptype -- Test domains over arrays of composite create type comptype as (r float8, i float8); create domain dcomptypea as comptype[]; @@ -310,6 +423,101 @@ Rules: drop table dcomptable; drop type comptype cascade; NOTICE: drop cascades to type dcomptypea +-- Test arrays over domains +create domain posint as int check (value > 0); +create table pitable (f1 posint[]); +insert into pitable values(array[42]); +insert into pitable values(array[-1]); -- fail +ERROR: value for domain posint violates check constraint "posint_check" +insert into pitable values('{0}'); -- fail +ERROR: value for domain posint violates check constraint "posint_check" +LINE 1: insert into pitable values('{0}'); + ^ +update pitable set f1[1] = f1[1] + 1; +update pitable set f1[1] = 0; -- fail +ERROR: value for domain posint violates check constraint "posint_check" +select * from pitable; + f1 +------ + {43} +(1 row) + +drop table pitable; +create domain vc4 as varchar(4); +create table vc4table (f1 vc4[]); +insert into vc4table values(array['too long']); -- fail +ERROR: value too long for type character varying(4) +insert into vc4table values(array['too long']::vc4[]); -- cast truncates +select * from vc4table; + f1 +---------- + {"too "} +(1 row) + +drop table vc4table; +drop type vc4; +-- You can sort of fake arrays-of-arrays by putting a domain in between +create domain dposinta as posint[]; +create table dposintatable (f1 dposinta[]); +insert into dposintatable values(array[array[42]]); -- fail +ERROR: column "f1" is of type dposinta[] but expression is of type integer[] +LINE 1: insert into dposintatable values(array[array[42]]); + ^ +HINT: You will need to rewrite or cast the expression. +insert into dposintatable values(array[array[42]::posint[]]); -- still fail +ERROR: column "f1" is of type dposinta[] but expression is of type posint[] +LINE 1: insert into dposintatable values(array[array[42]::posint[]])... + ^ +HINT: You will need to rewrite or cast the expression. +insert into dposintatable values(array[array[42]::dposinta]); -- but this works +select f1, f1[1], (f1[1])[1] from dposintatable; + f1 | f1 | f1 +----------+------+---- + {"{42}"} | {42} | 42 +(1 row) + +select pg_typeof(f1) from dposintatable; + pg_typeof +------------ + dposinta[] +(1 row) + +select pg_typeof(f1[1]) from dposintatable; + pg_typeof +----------- + dposinta +(1 row) + +select pg_typeof(f1[1][1]) from dposintatable; + pg_typeof +----------- + dposinta +(1 row) + +select pg_typeof((f1[1])[1]) from dposintatable; + pg_typeof +----------- + posint +(1 row) + +update dposintatable set f1[2] = array[99]; +select f1, f1[1], (f1[2])[1] from dposintatable; + f1 | f1 | f1 +-----------------+------+---- + {"{42}","{99}"} | {42} | 99 +(1 row) + +-- it'd be nice if you could do something like this, but for now you can't: +update dposintatable set f1[2][1] = array[97]; +ERROR: wrong number of array subscripts +-- maybe someday we can make this syntax work: +update dposintatable set (f1[2])[1] = array[98]; +ERROR: syntax error at or near "[" +LINE 1: update dposintatable set (f1[2])[1] = array[98]; + ^ +drop table dposintatable; +drop domain posint cascade; +NOTICE: drop cascades to type dposinta -- Test not-null restrictions create domain dnotnull varchar(15) NOT NULL; create domain dnull varchar(15); @@ -437,8 +645,8 @@ alter domain dnotnulltest drop not null; update domnotnull set col1 = null; drop domain dnotnulltest cascade; NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table domnotnull column col1 -drop cascades to table domnotnull column col2 +DETAIL: drop cascades to column col1 of table domnotnull +drop cascades to column col2 of table domnotnull -- Test ALTER DOMAIN .. DEFAULT .. create table domdeftest (col1 ddef1); insert into domdeftest default values; @@ -667,6 +875,14 @@ insert into ddtest2 values('{(-1)}'); alter domain posint add constraint c1 check(value >= 0); ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it drop table ddtest2; +-- Likewise for domains within domains over composite +create domain ddtest1d as ddtest1; +create table ddtest2(f1 ddtest1d); +insert into ddtest2 values('(-1)'); +alter domain posint add constraint c1 check(value >= 0); +ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it +drop table ddtest2; +drop domain ddtest1d; -- Likewise for domains within domains over array of composite create domain ddtest1d as ddtest1[]; create table ddtest2(f1 ddtest1d); diff --git a/src/test/regress/expected/enum.out b/src/test/regress/expected/enum.out index 0e6030443f..4f839ce027 100644 --- a/src/test/regress/expected/enum.out +++ b/src/test/regress/expected/enum.out @@ -633,20 +633,39 @@ ERROR: unsafe use of new value "bad" of enum type bogon LINE 1: SELECT 'bad'::bogon; ^ HINT: New enum values must be committed before they can be used. +ROLLBACK; +-- but a renamed value is safe to use later in same transaction +BEGIN; +ALTER TYPE bogus RENAME VALUE 'good' to 'bad'; +SELECT 'bad'::bogus; + bogus +------- + bad +(1 row) + ROLLBACK; DROP TYPE bogus; --- check that we can add new values to existing enums in a transaction --- and use them, if the type is new as well +-- check that values created during CREATE TYPE can be used in any case BEGIN; -CREATE TYPE bogus AS ENUM('good'); -ALTER TYPE bogus ADD VALUE 'bad'; -ALTER TYPE bogus ADD VALUE 'ugly'; -SELECT enum_range(null::bogus); +CREATE TYPE bogus AS ENUM('good','bad','ugly'); +ALTER TYPE bogus RENAME TO bogon; +select enum_range(null::bogon); enum_range ----------------- {good,bad,ugly} (1 row) +ROLLBACK; +-- ideally, we'd allow this usage; but it requires keeping track of whether +-- the enum type was created in the current transaction, which is expensive +BEGIN; +CREATE TYPE bogus AS ENUM('good'); +ALTER TYPE bogus RENAME TO bogon; +ALTER TYPE bogon ADD VALUE 'bad'; +ALTER TYPE bogon ADD VALUE 'ugly'; +select enum_range(null::bogon); -- fails +ERROR: unsafe use of new value "bad" of enum type bogon +HINT: New enum values must be committed before they can be used. ROLLBACK; -- -- Cleanup diff --git a/src/test/regress/expected/equivclass.out b/src/test/regress/expected/equivclass.out index a96b2a1b07..c448d85dec 100644 --- a/src/test/regress/expected/equivclass.out +++ b/src/test/regress/expected/equivclass.out @@ -421,3 +421,21 @@ reset session authorization; revoke select on ec0 from regress_user_ectest; revoke select on ec1 from regress_user_ectest; drop user regress_user_ectest; +-- check that X=X is converted to X IS NOT NULL when appropriate +explain (costs off) + select * from tenk1 where unique1 = unique1 and unique2 = unique2; + QUERY PLAN +------------------------------------------------------------- + Seq Scan on tenk1 + Filter: ((unique1 IS NOT NULL) AND (unique2 IS NOT NULL)) +(2 rows) + +-- this could be converted, but isn't at present +explain (costs off) + select * from tenk1 where unique1 = unique1 or unique2 = unique2; + QUERY PLAN +-------------------------------------------------------- + Seq Scan on tenk1 + Filter: ((unique1 = unique1) OR (unique2 = unique2)) +(2 rows) + diff --git a/src/test/regress/expected/event_trigger.out b/src/test/regress/expected/event_trigger.out index 906dcb8b31..0e32d5c427 100644 --- a/src/test/regress/expected/event_trigger.out +++ b/src/test/regress/expected/event_trigger.out @@ -27,7 +27,7 @@ create event trigger regress_event_trigger on ddl_command_start execute procedure test_event_trigger(); -- OK create event trigger regress_event_trigger_end on ddl_command_end - execute procedure test_event_trigger(); + execute function test_event_trigger(); -- should fail, food is not a valid filter variable create event trigger regress_event_trigger2 on ddl_command_start when food in ('sandwich') @@ -88,16 +88,69 @@ create event trigger regress_event_trigger_noperms on ddl_command_start ERROR: permission denied to create event trigger "regress_event_trigger_noperms" HINT: Must be superuser to create an event trigger. reset role; --- all OK -alter event trigger regress_event_trigger enable replica; -alter event trigger regress_event_trigger enable always; -alter event trigger regress_event_trigger enable; +-- test enabling and disabling alter event trigger regress_event_trigger disable; --- regress_event_trigger2 and regress_event_trigger_end should fire, but not --- regress_event_trigger +-- fires _trigger2 and _trigger_end should fire, but not _trigger create table event_trigger_fire1 (a int); NOTICE: test_event_trigger: ddl_command_start CREATE TABLE NOTICE: test_event_trigger: ddl_command_end CREATE TABLE +alter event trigger regress_event_trigger enable; +set session_replication_role = replica; +-- fires nothing +create table event_trigger_fire2 (a int); +alter event trigger regress_event_trigger enable replica; +-- fires only _trigger +create table event_trigger_fire3 (a int); +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +alter event trigger regress_event_trigger enable always; +-- fires only _trigger +create table event_trigger_fire4 (a int); +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +reset session_replication_role; +-- fires all three +create table event_trigger_fire5 (a int); +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +NOTICE: test_event_trigger: ddl_command_end CREATE TABLE +-- non-top-level command +create function f1() returns int +language plpgsql +as $$ +begin + create table event_trigger_fire6 (a int); + return 0; +end $$; +NOTICE: test_event_trigger: ddl_command_start CREATE FUNCTION +NOTICE: test_event_trigger: ddl_command_start CREATE FUNCTION +NOTICE: test_event_trigger: ddl_command_end CREATE FUNCTION +select f1(); +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +NOTICE: test_event_trigger: ddl_command_end CREATE TABLE + f1 +---- + 0 +(1 row) + +-- non-top-level command +create procedure p1() +language plpgsql +as $$ +begin + create table event_trigger_fire7 (a int); +end $$; +NOTICE: test_event_trigger: ddl_command_start CREATE PROCEDURE +NOTICE: test_event_trigger: ddl_command_end CREATE PROCEDURE +call p1(); +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +NOTICE: test_event_trigger: ddl_command_end CREATE TABLE +-- clean up +alter event trigger regress_event_trigger disable; +drop table event_trigger_fire2, event_trigger_fire3, event_trigger_fire4, event_trigger_fire5, event_trigger_fire6, event_trigger_fire7; +NOTICE: test_event_trigger: ddl_command_end DROP TABLE +drop routine f1(), p1(); +NOTICE: test_event_trigger: ddl_command_end DROP ROUTINE -- regress_event_trigger_end should fire on these commands grant all on table event_trigger_fire1 to public; NOTICE: test_event_trigger: ddl_command_end GRANT @@ -331,6 +384,18 @@ CREATE SCHEMA evttrig CREATE TABLE one (col_a SERIAL PRIMARY KEY, col_b text DEFAULT 'forty two') CREATE INDEX one_idx ON one (col_b) CREATE TABLE two (col_c INTEGER CHECK (col_c > 0) REFERENCES one DEFAULT 42); +-- Partitioned tables with a partitioned index +CREATE TABLE evttrig.parted ( + id int PRIMARY KEY) + PARTITION BY RANGE (id); +CREATE TABLE evttrig.part_1_10 PARTITION OF evttrig.parted (id) + FOR VALUES FROM (1) TO (10); +CREATE TABLE evttrig.part_10_20 PARTITION OF evttrig.parted (id) + FOR VALUES FROM (10) TO (20) PARTITION BY RANGE (id); +CREATE TABLE evttrig.part_10_15 PARTITION OF evttrig.part_10_20 (id) + FOR VALUES FROM (10) TO (15); +CREATE TABLE evttrig.part_15_20 PARTITION OF evttrig.part_10_20 (id) + FOR VALUES FROM (15) TO (20); ALTER TABLE evttrig.two DROP COLUMN col_c; NOTICE: NORMAL: orig=t normal=f istemp=f type=table column identity=evttrig.two.col_c name={evttrig,two,col_c} args={} NOTICE: NORMAL: orig=f normal=t istemp=f type=table constraint identity=two_col_c_check on evttrig.two name={evttrig,two,two_col_c_check} args={} @@ -341,14 +406,20 @@ NOTICE: NORMAL: orig=t normal=f istemp=f type=table constraint identity=one_pke DROP INDEX evttrig.one_idx; NOTICE: NORMAL: orig=t normal=f istemp=f type=index identity=evttrig.one_idx name={evttrig,one_idx} args={} DROP SCHEMA evttrig CASCADE; -NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to table evttrig.one drop cascades to table evttrig.two +drop cascades to table evttrig.parted NOTICE: NORMAL: orig=t normal=f istemp=f type=schema identity=evttrig name={evttrig} args={} NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.one name={evttrig,one} args={} NOTICE: NORMAL: orig=f normal=t istemp=f type=sequence identity=evttrig.one_col_a_seq name={evttrig,one_col_a_seq} args={} NOTICE: NORMAL: orig=f normal=t istemp=f type=default value identity=for evttrig.one.col_a name={evttrig,one,col_a} args={} NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.two name={evttrig,two} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.parted name={evttrig,parted} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_1_10 name={evttrig,part_1_10} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_10_20 name={evttrig,part_10_20} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_10_15 name={evttrig,part_10_15} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_15_20 name={evttrig,part_15_20} args={} DROP TABLE a_temp_tbl; NOTICE: NORMAL: orig=t normal=f istemp=t type=table identity=pg_temp.a_temp_tbl name={pg_temp,a_temp_tbl} args={} DROP EVENT TRIGGER regress_event_trigger_report_dropped; @@ -371,8 +442,6 @@ alter table rewriteme alter column foo type numeric; ERROR: rewrites not allowed CONTEXT: PL/pgSQL function test_evtrig_no_rewrite() line 3 at RAISE alter table rewriteme add column baz int default 0; -ERROR: rewrites not allowed -CONTEXT: PL/pgSQL function test_evtrig_no_rewrite() line 3 at RAISE -- test with more than one reason to rewrite a single table CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger LANGUAGE plpgsql AS $$ @@ -386,7 +455,7 @@ alter table rewriteme add column onemore int default 0, add column another int default -1, alter column foo type numeric(10,4); -NOTICE: Table 'rewriteme' is being rewritten (reason = 6) +NOTICE: Table 'rewriteme' is being rewritten (reason = 4) -- shouldn't trigger a table_rewrite event alter table rewriteme alter column foo type numeric(12,4); -- typed tables are rewritten when their type changes. Don't emit table diff --git a/src/test/regress/expected/fast_default.out b/src/test/regress/expected/fast_default.out new file mode 100644 index 0000000000..1c1924cd5c --- /dev/null +++ b/src/test/regress/expected/fast_default.out @@ -0,0 +1,760 @@ +-- +-- ALTER TABLE ADD COLUMN DEFAULT test +-- +SET search_path = fast_default; +CREATE SCHEMA fast_default; +CREATE TABLE m(id OID); +INSERT INTO m VALUES (NULL::OID); +CREATE FUNCTION set(tabname name) RETURNS VOID +AS $$ +BEGIN + UPDATE m + SET id = (SELECT c.relfilenode + FROM pg_class AS c, pg_namespace AS s + WHERE c.relname = tabname + AND c.relnamespace = s.oid + AND s.nspname = 'fast_default'); +END; +$$ LANGUAGE 'plpgsql'; +CREATE FUNCTION comp() RETURNS TEXT +AS $$ +BEGIN + RETURN (SELECT CASE + WHEN m.id = c.relfilenode THEN 'Unchanged' + ELSE 'Rewritten' + END + FROM m, pg_class AS c, pg_namespace AS s + WHERE c.relname = 't' + AND c.relnamespace = s.oid + AND s.nspname = 'fast_default'); +END; +$$ LANGUAGE 'plpgsql'; +CREATE FUNCTION log_rewrite() RETURNS event_trigger +LANGUAGE plpgsql as +$func$ + +declare + this_schema text; +begin + select into this_schema relnamespace::regnamespace::text + from pg_class + where oid = pg_event_trigger_table_rewrite_oid(); + if this_schema = 'fast_default' + then + RAISE NOTICE 'rewriting table % for reason %', + pg_event_trigger_table_rewrite_oid()::regclass, + pg_event_trigger_table_rewrite_reason(); + end if; +end; +$func$; +CREATE TABLE has_volatile AS +SELECT * FROM generate_series(1,10) id; +CREATE EVENT TRIGGER has_volatile_rewrite + ON table_rewrite + EXECUTE PROCEDURE log_rewrite(); +-- only the last of these should trigger a rewrite +ALTER TABLE has_volatile ADD col1 int; +ALTER TABLE has_volatile ADD col2 int DEFAULT 1; +ALTER TABLE has_volatile ADD col3 timestamptz DEFAULT current_timestamp; +ALTER TABLE has_volatile ADD col4 int DEFAULT (random() * 10000)::int; +NOTICE: rewriting table has_volatile for reason 2 +-- Test a large sample of different datatypes +CREATE TABLE T(pk INT NOT NULL PRIMARY KEY, c_int INT DEFAULT 1); +SELECT set('t'); + set +----- + +(1 row) + +INSERT INTO T VALUES (1), (2); +ALTER TABLE T ADD COLUMN c_bpchar BPCHAR(5) DEFAULT 'hello', + ALTER COLUMN c_int SET DEFAULT 2; +INSERT INTO T VALUES (3), (4); +ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'world', + ALTER COLUMN c_bpchar SET DEFAULT 'dog'; +INSERT INTO T VALUES (5), (6); +ALTER TABLE T ADD COLUMN c_date DATE DEFAULT '2016-06-02', + ALTER COLUMN c_text SET DEFAULT 'cat'; +INSERT INTO T VALUES (7), (8); +ALTER TABLE T ADD COLUMN c_timestamp TIMESTAMP DEFAULT '2016-09-01 12:00:00', + ADD COLUMN c_timestamp_null TIMESTAMP, + ALTER COLUMN c_date SET DEFAULT '2010-01-01'; +INSERT INTO T VALUES (9), (10); +ALTER TABLE T ADD COLUMN c_array TEXT[] + DEFAULT '{"This", "is", "the", "real", "world"}', + ALTER COLUMN c_timestamp SET DEFAULT '1970-12-31 11:12:13', + ALTER COLUMN c_timestamp_null SET DEFAULT '2016-09-29 12:00:00'; +INSERT INTO T VALUES (11), (12); +ALTER TABLE T ADD COLUMN c_small SMALLINT DEFAULT -5, + ADD COLUMN c_small_null SMALLINT, + ALTER COLUMN c_array + SET DEFAULT '{"This", "is", "no", "fantasy"}'; +INSERT INTO T VALUES (13), (14); +ALTER TABLE T ADD COLUMN c_big BIGINT DEFAULT 180000000000018, + ALTER COLUMN c_small SET DEFAULT 9, + ALTER COLUMN c_small_null SET DEFAULT 13; +INSERT INTO T VALUES (15), (16); +ALTER TABLE T ADD COLUMN c_num NUMERIC DEFAULT 1.00000000001, + ALTER COLUMN c_big SET DEFAULT -9999999999999999; +INSERT INTO T VALUES (17), (18); +ALTER TABLE T ADD COLUMN c_time TIME DEFAULT '12:00:00', + ALTER COLUMN c_num SET DEFAULT 2.000000000000002; +INSERT INTO T VALUES (19), (20); +ALTER TABLE T ADD COLUMN c_interval INTERVAL DEFAULT '1 day', + ALTER COLUMN c_time SET DEFAULT '23:59:59'; +INSERT INTO T VALUES (21), (22); +ALTER TABLE T ADD COLUMN c_hugetext TEXT DEFAULT repeat('abcdefg',1000), + ALTER COLUMN c_interval SET DEFAULT '3 hours'; +INSERT INTO T VALUES (23), (24); +ALTER TABLE T ALTER COLUMN c_interval DROP DEFAULT, + ALTER COLUMN c_hugetext SET DEFAULT repeat('poiuyt', 1000); +INSERT INTO T VALUES (25), (26); +ALTER TABLE T ALTER COLUMN c_bpchar DROP DEFAULT, + ALTER COLUMN c_date DROP DEFAULT, + ALTER COLUMN c_text DROP DEFAULT, + ALTER COLUMN c_timestamp DROP DEFAULT, + ALTER COLUMN c_array DROP DEFAULT, + ALTER COLUMN c_small DROP DEFAULT, + ALTER COLUMN c_big DROP DEFAULT, + ALTER COLUMN c_num DROP DEFAULT, + ALTER COLUMN c_time DROP DEFAULT, + ALTER COLUMN c_hugetext DROP DEFAULT; +INSERT INTO T VALUES (27), (28); +SELECT pk, c_int, c_bpchar, c_text, c_date, c_timestamp, + c_timestamp_null, c_array, c_small, c_small_null, + c_big, c_num, c_time, c_interval, + c_hugetext = repeat('abcdefg',1000) as c_hugetext_origdef, + c_hugetext = repeat('poiuyt', 1000) as c_hugetext_newdef +FROM T ORDER BY pk; + pk | c_int | c_bpchar | c_text | c_date | c_timestamp | c_timestamp_null | c_array | c_small | c_small_null | c_big | c_num | c_time | c_interval | c_hugetext_origdef | c_hugetext_newdef +----+-------+----------+--------+------------+--------------------------+--------------------------+--------------------------+---------+--------------+-------------------+-------------------+----------+------------+--------------------+------------------- + 1 | 1 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 2 | 1 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 3 | 2 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 4 | 2 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 5 | 2 | dog | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 6 | 2 | dog | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 7 | 2 | dog | cat | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 8 | 2 | dog | cat | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 9 | 2 | dog | cat | 01-01-2010 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 10 | 2 | dog | cat | 01-01-2010 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 11 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 12 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 13 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 14 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 15 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 16 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 17 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 18 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 19 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 12:00:00 | @ 1 day | t | f + 20 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 12:00:00 | @ 1 day | t | f + 21 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 1 day | t | f + 22 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 1 day | t | f + 23 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 3 hours | t | f + 24 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 3 hours | t | f + 25 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | | f | t + 26 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | | f | t + 27 | 2 | | | | | Thu Sep 29 12:00:00 2016 | | | 13 | | | | | | + 28 | 2 | | | | | Thu Sep 29 12:00:00 2016 | | | 13 | | | | | | +(28 rows) + +SELECT comp(); + comp +----------- + Unchanged +(1 row) + +DROP TABLE T; +-- Test expressions in the defaults +CREATE OR REPLACE FUNCTION foo(a INT) RETURNS TEXT AS $$ +DECLARE res TEXT := ''; + i INT; +BEGIN + i := 0; + WHILE (i < a) LOOP + res := res || chr(ascii('a') + i); + i := i + 1; + END LOOP; + RETURN res; +END; $$ LANGUAGE PLPGSQL STABLE; +CREATE TABLE T(pk INT NOT NULL PRIMARY KEY, c_int INT DEFAULT LENGTH(foo(6))); +SELECT set('t'); + set +----- + +(1 row) + +INSERT INTO T VALUES (1), (2); +ALTER TABLE T ADD COLUMN c_bpchar BPCHAR(5) DEFAULT foo(4), + ALTER COLUMN c_int SET DEFAULT LENGTH(foo(8)); +INSERT INTO T VALUES (3), (4); +ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT foo(6), + ALTER COLUMN c_bpchar SET DEFAULT foo(3); +INSERT INTO T VALUES (5), (6); +ALTER TABLE T ADD COLUMN c_date DATE + DEFAULT '2016-06-02'::DATE + LENGTH(foo(10)), + ALTER COLUMN c_text SET DEFAULT foo(12); +INSERT INTO T VALUES (7), (8); +ALTER TABLE T ADD COLUMN c_timestamp TIMESTAMP + DEFAULT '2016-09-01'::DATE + LENGTH(foo(10)), + ALTER COLUMN c_date + SET DEFAULT '2010-01-01'::DATE - LENGTH(foo(4)); +INSERT INTO T VALUES (9), (10); +ALTER TABLE T ADD COLUMN c_array TEXT[] + DEFAULT ('{"This", "is", "' || foo(4) || + '","the", "real", "world"}')::TEXT[], + ALTER COLUMN c_timestamp + SET DEFAULT '1970-12-31'::DATE + LENGTH(foo(30)); +INSERT INTO T VALUES (11), (12); +ALTER TABLE T ALTER COLUMN c_int DROP DEFAULT, + ALTER COLUMN c_array + SET DEFAULT ('{"This", "is", "' || foo(1) || + '", "fantasy"}')::text[]; +INSERT INTO T VALUES (13), (14); +ALTER TABLE T ALTER COLUMN c_bpchar DROP DEFAULT, + ALTER COLUMN c_date DROP DEFAULT, + ALTER COLUMN c_text DROP DEFAULT, + ALTER COLUMN c_timestamp DROP DEFAULT, + ALTER COLUMN c_array DROP DEFAULT; +INSERT INTO T VALUES (15), (16); +SELECT * FROM T; + pk | c_int | c_bpchar | c_text | c_date | c_timestamp | c_array +----+-------+----------+--------------+------------+--------------------------+------------------------------- + 1 | 6 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 2 | 6 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 3 | 8 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 4 | 8 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 5 | 8 | abc | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 6 | 8 | abc | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 7 | 8 | abc | abcdefghijkl | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 8 | 8 | abc | abcdefghijkl | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 9 | 8 | abc | abcdefghijkl | 12-28-2009 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 10 | 8 | abc | abcdefghijkl | 12-28-2009 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 11 | 8 | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,abcd,the,real,world} + 12 | 8 | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,abcd,the,real,world} + 13 | | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,a,fantasy} + 14 | | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,a,fantasy} + 15 | | | | | | + 16 | | | | | | +(16 rows) + +SELECT comp(); + comp +----------- + Unchanged +(1 row) + +DROP TABLE T; +DROP FUNCTION foo(INT); +-- Fall back to full rewrite for volatile expressions +CREATE TABLE T(pk INT NOT NULL PRIMARY KEY); +INSERT INTO T VALUES (1); +SELECT set('t'); + set +----- + +(1 row) + +-- now() is stable, because it returns the transaction timestamp +ALTER TABLE T ADD COLUMN c1 TIMESTAMP DEFAULT now(); +SELECT comp(); + comp +----------- + Unchanged +(1 row) + +-- clock_timestamp() is volatile +ALTER TABLE T ADD COLUMN c2 TIMESTAMP DEFAULT clock_timestamp(); +NOTICE: rewriting table t for reason 2 +SELECT comp(); + comp +----------- + Rewritten +(1 row) + +DROP TABLE T; +-- Simple querie +CREATE TABLE T (pk INT NOT NULL PRIMARY KEY); +SELECT set('t'); + set +----- + +(1 row) + +INSERT INTO T SELECT * FROM generate_series(1, 10) a; +ALTER TABLE T ADD COLUMN c_bigint BIGINT NOT NULL DEFAULT -1; +INSERT INTO T SELECT b, b - 10 FROM generate_series(11, 20) a(b); +ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'hello'; +INSERT INTO T SELECT b, b - 10, (b + 10)::text FROM generate_series(21, 30) a(b); +-- WHERE clause +SELECT c_bigint, c_text FROM T WHERE c_bigint = -1 LIMIT 1; + c_bigint | c_text +----------+-------- + -1 | hello +(1 row) + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) +SELECT c_bigint, c_text FROM T WHERE c_bigint = -1 LIMIT 1; + QUERY PLAN +---------------------------------------------- + Limit + Output: c_bigint, c_text + -> Seq Scan on fast_default.t + Output: c_bigint, c_text + Filter: (t.c_bigint = '-1'::integer) +(5 rows) + +SELECT c_bigint, c_text FROM T WHERE c_text = 'hello' LIMIT 1; + c_bigint | c_text +----------+-------- + -1 | hello +(1 row) + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) SELECT c_bigint, c_text FROM T WHERE c_text = 'hello' LIMIT 1; + QUERY PLAN +-------------------------------------------- + Limit + Output: c_bigint, c_text + -> Seq Scan on fast_default.t + Output: c_bigint, c_text + Filter: (t.c_text = 'hello'::text) +(5 rows) + +-- COALESCE +SELECT COALESCE(c_bigint, pk), COALESCE(c_text, pk::text) +FROM T +ORDER BY pk LIMIT 10; + coalesce | coalesce +----------+---------- + -1 | hello + -1 | hello + -1 | hello + -1 | hello + -1 | hello + -1 | hello + -1 | hello + -1 | hello + -1 | hello + -1 | hello +(10 rows) + +-- Aggregate function +SELECT SUM(c_bigint), MAX(c_text COLLATE "C" ), MIN(c_text COLLATE "C") FROM T; + sum | max | min +-----+-------+----- + 200 | hello | 31 +(1 row) + +-- ORDER BY +SELECT * FROM T ORDER BY c_bigint, c_text, pk LIMIT 10; + pk | c_bigint | c_text +----+----------+-------- + 1 | -1 | hello + 2 | -1 | hello + 3 | -1 | hello + 4 | -1 | hello + 5 | -1 | hello + 6 | -1 | hello + 7 | -1 | hello + 8 | -1 | hello + 9 | -1 | hello + 10 | -1 | hello +(10 rows) + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) +SELECT * FROM T ORDER BY c_bigint, c_text, pk LIMIT 10; + QUERY PLAN +---------------------------------------------- + Limit + Output: pk, c_bigint, c_text + -> Sort + Output: pk, c_bigint, c_text + Sort Key: t.c_bigint, t.c_text, t.pk + -> Seq Scan on fast_default.t + Output: pk, c_bigint, c_text +(7 rows) + +-- LIMIT +SELECT * FROM T WHERE c_bigint > -1 ORDER BY c_bigint, c_text, pk LIMIT 10; + pk | c_bigint | c_text +----+----------+-------- + 11 | 1 | hello + 12 | 2 | hello + 13 | 3 | hello + 14 | 4 | hello + 15 | 5 | hello + 16 | 6 | hello + 17 | 7 | hello + 18 | 8 | hello + 19 | 9 | hello + 20 | 10 | hello +(10 rows) + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) +SELECT * FROM T WHERE c_bigint > -1 ORDER BY c_bigint, c_text, pk LIMIT 10; + QUERY PLAN +---------------------------------------------------- + Limit + Output: pk, c_bigint, c_text + -> Sort + Output: pk, c_bigint, c_text + Sort Key: t.c_bigint, t.c_text, t.pk + -> Seq Scan on fast_default.t + Output: pk, c_bigint, c_text + Filter: (t.c_bigint > '-1'::integer) +(8 rows) + +-- DELETE with RETURNING +DELETE FROM T WHERE pk BETWEEN 10 AND 20 RETURNING *; + pk | c_bigint | c_text +----+----------+-------- + 10 | -1 | hello + 11 | 1 | hello + 12 | 2 | hello + 13 | 3 | hello + 14 | 4 | hello + 15 | 5 | hello + 16 | 6 | hello + 17 | 7 | hello + 18 | 8 | hello + 19 | 9 | hello + 20 | 10 | hello +(11 rows) + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) +DELETE FROM T WHERE pk BETWEEN 10 AND 20 RETURNING *; + QUERY PLAN +----------------------------------------------------------- + Delete on fast_default.t + Output: pk, c_bigint, c_text + -> Bitmap Heap Scan on fast_default.t + Output: ctid + Recheck Cond: ((t.pk >= 10) AND (t.pk <= 20)) + -> Bitmap Index Scan on t_pkey + Index Cond: ((t.pk >= 10) AND (t.pk <= 20)) +(7 rows) + +-- UPDATE +UPDATE T SET c_text = '"' || c_text || '"' WHERE pk < 10; +SELECT * FROM T WHERE c_text LIKE '"%"' ORDER BY PK; + pk | c_bigint | c_text +----+----------+--------- + 1 | -1 | "hello" + 2 | -1 | "hello" + 3 | -1 | "hello" + 4 | -1 | "hello" + 5 | -1 | "hello" + 6 | -1 | "hello" + 7 | -1 | "hello" + 8 | -1 | "hello" + 9 | -1 | "hello" +(9 rows) + +SELECT comp(); + comp +----------- + Unchanged +(1 row) + +DROP TABLE T; +-- Combine with other DDL +CREATE TABLE T(pk INT NOT NULL PRIMARY KEY); +SELECT set('t'); + set +----- + +(1 row) + +INSERT INTO T VALUES (1), (2); +ALTER TABLE T ADD COLUMN c_int INT NOT NULL DEFAULT -1; +INSERT INTO T VALUES (3), (4); +ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'Hello'; +INSERT INTO T VALUES (5), (6); +ALTER TABLE T ALTER COLUMN c_text SET DEFAULT 'world', + ALTER COLUMN c_int SET DEFAULT 1; +INSERT INTO T VALUES (7), (8); +SELECT * FROM T ORDER BY pk; + pk | c_int | c_text +----+-------+-------- + 1 | -1 | Hello + 2 | -1 | Hello + 3 | -1 | Hello + 4 | -1 | Hello + 5 | -1 | Hello + 6 | -1 | Hello + 7 | 1 | world + 8 | 1 | world +(8 rows) + +-- Add an index +CREATE INDEX i ON T(c_int, c_text); +SELECT c_text FROM T WHERE c_int = -1; + c_text +-------- + Hello + Hello + Hello + Hello + Hello + Hello +(6 rows) + +SELECT comp(); + comp +----------- + Unchanged +(1 row) + +-- query to exercise expand_tuple function +CREATE TABLE t1 AS +SELECT 1::int AS a , 2::int AS b +FROM generate_series(1,20) q; +ALTER TABLE t1 ADD COLUMN c text; +SELECT a, + stddev(cast((SELECT sum(1) FROM generate_series(1,20) x) AS float4)) + OVER (PARTITION BY a,b,c ORDER BY b) + AS z +FROM t1; + a | z +---+--- + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 +(20 rows) + +DROP TABLE T; +-- test that we account for missing columns without defaults correctly +-- in expand_tuple, and that rows are correctly expanded for triggers +CREATE FUNCTION test_trigger() +RETURNS trigger +LANGUAGE plpgsql +AS $$ + +begin + raise notice 'old tuple: %', to_json(OLD)::text; + if TG_OP = 'DELETE' + then + return OLD; + else + return NEW; + end if; +end; + +$$; +-- 2 new columns, both have defaults +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,3); +ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; +ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | 4 | 5 +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":4,"y":5} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | 4 | 2 +(1 row) + +DROP TABLE t; +-- 2 new columns, first has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,3); +ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; +ALTER TABLE t ADD COLUMN y int; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | 4 | +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":4,"y":null} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | 4 | 2 +(1 row) + +DROP TABLE t; +-- 2 new columns, second has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,3); +ALTER TABLE t ADD COLUMN x int; +ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | | 5 +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":null,"y":5} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | | 2 +(1 row) + +DROP TABLE t; +-- 2 new columns, neither has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,3); +ALTER TABLE t ADD COLUMN x int; +ALTER TABLE t ADD COLUMN y int; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | | +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":null,"y":null} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | | 2 +(1 row) + +DROP TABLE t; +-- same as last 4 tests but here the last original column has a NULL value +-- 2 new columns, both have defaults +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,NULL); +ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; +ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | 4 | 5 +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":4,"y":5} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | 4 | 2 +(1 row) + +DROP TABLE t; +-- 2 new columns, first has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,NULL); +ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; +ALTER TABLE t ADD COLUMN y int; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | 4 | +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":4,"y":null} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | 4 | 2 +(1 row) + +DROP TABLE t; +-- 2 new columns, second has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,NULL); +ALTER TABLE t ADD COLUMN x int; +ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | | 5 +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":null,"y":5} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | | 2 +(1 row) + +DROP TABLE t; +-- 2 new columns, neither has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,NULL); +ALTER TABLE t ADD COLUMN x int; +ALTER TABLE t ADD COLUMN y int; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | | +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":null,"y":null} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | | 2 +(1 row) + +DROP TABLE t; +-- make sure expanded tuple has correct self pointer +-- it will be required by the RI trigger doing the cascading delete +CREATE TABLE leader (a int PRIMARY KEY, b int); +CREATE TABLE follower (a int REFERENCES leader ON DELETE CASCADE, b int); +INSERT INTO leader VALUES (1, 1), (2, 2); +ALTER TABLE leader ADD c int; +ALTER TABLE leader DROP c; +DELETE FROM leader; +-- cleanup +DROP TABLE follower; +DROP TABLE leader; +DROP FUNCTION test_trigger(); +DROP TABLE t1; +DROP FUNCTION set(name); +DROP FUNCTION comp(); +DROP TABLE m; +DROP TABLE has_volatile; +DROP EVENT TRIGGER has_volatile_rewrite; +DROP FUNCTION log_rewrite; +DROP SCHEMA fast_default; +-- Leave a table with an active fast default in place, for pg_upgrade testing +set search_path = public; +create table has_fast_default(f1 int); +insert into has_fast_default values(1); +alter table has_fast_default add column f2 int default 42; +table has_fast_default; + f1 | f2 +----+---- + 1 | 42 +(1 row) + diff --git a/src/test/regress/expected/float4-exp-three-digits.out b/src/test/regress/expected/float4-exp-three-digits.out deleted file mode 100644 index f17f95697a..0000000000 --- a/src/test/regress/expected/float4-exp-three-digits.out +++ /dev/null @@ -1,259 +0,0 @@ --- --- FLOAT4 --- -CREATE TABLE FLOAT4_TBL (f1 float4); -INSERT INTO FLOAT4_TBL(f1) VALUES (' 0.0'); -INSERT INTO FLOAT4_TBL(f1) VALUES ('1004.30 '); -INSERT INTO FLOAT4_TBL(f1) VALUES (' -34.84 '); -INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20'); -INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20'); --- test for over and under flow -INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'); -ERROR: value out of range: overflow -LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'); - ^ -INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'); -ERROR: value out of range: overflow -LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'); - ^ -INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'); -ERROR: value out of range: underflow -LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'); - ^ -INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'); -ERROR: value out of range: underflow -LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'); - ^ --- bad input -INSERT INTO FLOAT4_TBL(f1) VALUES (''); -ERROR: invalid input syntax for type real: "" -LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (''); - ^ -INSERT INTO FLOAT4_TBL(f1) VALUES (' '); -ERROR: invalid input syntax for type real: " " -LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (' '); - ^ -INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz'); -ERROR: invalid input syntax for type real: "xyz" -LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz'); - ^ -INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0'); -ERROR: invalid input syntax for type real: "5.0.0" -LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0'); - ^ -INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0'); -ERROR: invalid input syntax for type real: "5 . 0" -LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0'); - ^ -INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0'); -ERROR: invalid input syntax for type real: "5. 0" -LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0'); - ^ -INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0'); -ERROR: invalid input syntax for type real: " - 3.0" -LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0'); - ^ -INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5'); -ERROR: invalid input syntax for type real: "123 5" -LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5'); - ^ --- special inputs -SELECT 'NaN'::float4; - float4 --------- - NaN -(1 row) - -SELECT 'nan'::float4; - float4 --------- - NaN -(1 row) - -SELECT ' NAN '::float4; - float4 --------- - NaN -(1 row) - -SELECT 'infinity'::float4; - float4 ----------- - Infinity -(1 row) - -SELECT ' -INFINiTY '::float4; - float4 ------------ - -Infinity -(1 row) - --- bad special inputs -SELECT 'N A N'::float4; -ERROR: invalid input syntax for type real: "N A N" -LINE 1: SELECT 'N A N'::float4; - ^ -SELECT 'NaN x'::float4; -ERROR: invalid input syntax for type real: "NaN x" -LINE 1: SELECT 'NaN x'::float4; - ^ -SELECT ' INFINITY x'::float4; -ERROR: invalid input syntax for type real: " INFINITY x" -LINE 1: SELECT ' INFINITY x'::float4; - ^ -SELECT 'Infinity'::float4 + 100.0; - ?column? ----------- - Infinity -(1 row) - -SELECT 'Infinity'::float4 / 'Infinity'::float4; - ?column? ----------- - NaN -(1 row) - -SELECT 'nan'::float4 / 'nan'::float4; - ?column? ----------- - NaN -(1 row) - -SELECT 'nan'::numeric::float4; - float4 --------- - NaN -(1 row) - -SELECT '' AS five, * FROM FLOAT4_TBL; - five | f1 -------+-------------- - | 0 - | 1004.3 - | -34.84 - | 1.23457e+020 - | 1.23457e-020 -(5 rows) - -SELECT '' AS four, f.* FROM FLOAT4_TBL f WHERE f.f1 <> '1004.3'; - four | f1 -------+-------------- - | 0 - | -34.84 - | 1.23457e+020 - | 1.23457e-020 -(4 rows) - -SELECT '' AS one, f.* FROM FLOAT4_TBL f WHERE f.f1 = '1004.3'; - one | f1 ------+-------- - | 1004.3 -(1 row) - -SELECT '' AS three, f.* FROM FLOAT4_TBL f WHERE '1004.3' > f.f1; - three | f1 --------+-------------- - | 0 - | -34.84 - | 1.23457e-020 -(3 rows) - -SELECT '' AS three, f.* FROM FLOAT4_TBL f WHERE f.f1 < '1004.3'; - three | f1 --------+-------------- - | 0 - | -34.84 - | 1.23457e-020 -(3 rows) - -SELECT '' AS four, f.* FROM FLOAT4_TBL f WHERE '1004.3' >= f.f1; - four | f1 -------+-------------- - | 0 - | 1004.3 - | -34.84 - | 1.23457e-020 -(4 rows) - -SELECT '' AS four, f.* FROM FLOAT4_TBL f WHERE f.f1 <= '1004.3'; - four | f1 -------+-------------- - | 0 - | 1004.3 - | -34.84 - | 1.23457e-020 -(4 rows) - -SELECT '' AS three, f.f1, f.f1 * '-10' AS x FROM FLOAT4_TBL f - WHERE f.f1 > '0.0'; - three | f1 | x --------+--------------+--------------- - | 1004.3 | -10043 - | 1.23457e+020 | -1.23457e+021 - | 1.23457e-020 | -1.23457e-019 -(3 rows) - -SELECT '' AS three, f.f1, f.f1 + '-10' AS x FROM FLOAT4_TBL f - WHERE f.f1 > '0.0'; - three | f1 | x --------+--------------+-------------- - | 1004.3 | 994.3 - | 1.23457e+020 | 1.23457e+020 - | 1.23457e-020 | -10 -(3 rows) - -SELECT '' AS three, f.f1, f.f1 / '-10' AS x FROM FLOAT4_TBL f - WHERE f.f1 > '0.0'; - three | f1 | x --------+--------------+--------------- - | 1004.3 | -100.43 - | 1.23457e+020 | -1.23457e+019 - | 1.23457e-020 | -1.23457e-021 -(3 rows) - -SELECT '' AS three, f.f1, f.f1 - '-10' AS x FROM FLOAT4_TBL f - WHERE f.f1 > '0.0'; - three | f1 | x --------+--------------+-------------- - | 1004.3 | 1014.3 - | 1.23457e+020 | 1.23457e+020 - | 1.23457e-020 | 10 -(3 rows) - --- test divide by zero -SELECT '' AS bad, f.f1 / '0.0' from FLOAT4_TBL f; -ERROR: division by zero -SELECT '' AS five, * FROM FLOAT4_TBL; - five | f1 -------+-------------- - | 0 - | 1004.3 - | -34.84 - | 1.23457e+020 - | 1.23457e-020 -(5 rows) - --- test the unary float4abs operator -SELECT '' AS five, f.f1, @f.f1 AS abs_f1 FROM FLOAT4_TBL f; - five | f1 | abs_f1 -------+--------------+-------------- - | 0 | 0 - | 1004.3 | 1004.3 - | -34.84 | 34.84 - | 1.23457e+020 | 1.23457e+020 - | 1.23457e-020 | 1.23457e-020 -(5 rows) - -UPDATE FLOAT4_TBL - SET f1 = FLOAT4_TBL.f1 * '-1' - WHERE FLOAT4_TBL.f1 > '0.0'; -SELECT '' AS five, * FROM FLOAT4_TBL; - five | f1 -------+--------------- - | 0 - | -34.84 - | -1004.3 - | -1.23457e+020 - | -1.23457e-020 -(5 rows) - diff --git a/src/test/regress/expected/float8-exp-three-digits-win32.out b/src/test/regress/expected/float8-exp-three-digits-win32.out deleted file mode 100644 index 7e1153308f..0000000000 --- a/src/test/regress/expected/float8-exp-three-digits-win32.out +++ /dev/null @@ -1,550 +0,0 @@ --- --- FLOAT8 --- -CREATE TABLE FLOAT8_TBL(f1 float8); -INSERT INTO FLOAT8_TBL(f1) VALUES (' 0.0 '); -INSERT INTO FLOAT8_TBL(f1) VALUES ('1004.30 '); -INSERT INTO FLOAT8_TBL(f1) VALUES (' -34.84'); -INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e+200'); -INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e-200'); --- test for underflow and overflow handling -SELECT '10e400'::float8; -ERROR: "10e400" is out of range for type double precision -LINE 1: SELECT '10e400'::float8; - ^ -SELECT '-10e400'::float8; -ERROR: "-10e400" is out of range for type double precision -LINE 1: SELECT '-10e400'::float8; - ^ -SELECT '10e-400'::float8; -ERROR: "10e-400" is out of range for type double precision -LINE 1: SELECT '10e-400'::float8; - ^ -SELECT '-10e-400'::float8; -ERROR: "-10e-400" is out of range for type double precision -LINE 1: SELECT '-10e-400'::float8; - ^ --- bad input -INSERT INTO FLOAT8_TBL(f1) VALUES (''); -ERROR: invalid input syntax for type double precision: "" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (''); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES (' '); -ERROR: invalid input syntax for type double precision: " " -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (' '); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz'); -ERROR: invalid input syntax for type double precision: "xyz" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0'); -ERROR: invalid input syntax for type double precision: "5.0.0" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0'); -ERROR: invalid input syntax for type double precision: "5 . 0" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0'); -ERROR: invalid input syntax for type double precision: "5. 0" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3'); -ERROR: invalid input syntax for type double precision: " - 3" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5'); -ERROR: invalid input syntax for type double precision: "123 5" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5'); - ^ --- special inputs -SELECT 'NaN'::float8; - float8 --------- - NaN -(1 row) - -SELECT 'nan'::float8; - float8 --------- - NaN -(1 row) - -SELECT ' NAN '::float8; - float8 --------- - NaN -(1 row) - -SELECT 'infinity'::float8; - float8 ----------- - Infinity -(1 row) - -SELECT ' -INFINiTY '::float8; - float8 ------------ - -Infinity -(1 row) - --- bad special inputs -SELECT 'N A N'::float8; -ERROR: invalid input syntax for type double precision: "N A N" -LINE 1: SELECT 'N A N'::float8; - ^ -SELECT 'NaN x'::float8; -ERROR: invalid input syntax for type double precision: "NaN x" -LINE 1: SELECT 'NaN x'::float8; - ^ -SELECT ' INFINITY x'::float8; -ERROR: invalid input syntax for type double precision: " INFINITY x" -LINE 1: SELECT ' INFINITY x'::float8; - ^ -SELECT 'Infinity'::float8 + 100.0; - ?column? ----------- - Infinity -(1 row) - -SELECT 'Infinity'::float8 / 'Infinity'::float8; - ?column? ----------- - NaN -(1 row) - -SELECT 'nan'::float8 / 'nan'::float8; - ?column? ----------- - NaN -(1 row) - -SELECT 'nan'::numeric::float8; - float8 --------- - NaN -(1 row) - -SELECT '' AS five, * FROM FLOAT8_TBL; - five | f1 -------+---------------------- - | 0 - | 1004.3 - | -34.84 - | 1.2345678901234e+200 - | 1.2345678901234e-200 -(5 rows) - -SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE f.f1 <> '1004.3'; - four | f1 -------+---------------------- - | 0 - | -34.84 - | 1.2345678901234e+200 - | 1.2345678901234e-200 -(4 rows) - -SELECT '' AS one, f.* FROM FLOAT8_TBL f WHERE f.f1 = '1004.3'; - one | f1 ------+-------- - | 1004.3 -(1 row) - -SELECT '' AS three, f.* FROM FLOAT8_TBL f WHERE '1004.3' > f.f1; - three | f1 --------+---------------------- - | 0 - | -34.84 - | 1.2345678901234e-200 -(3 rows) - -SELECT '' AS three, f.* FROM FLOAT8_TBL f WHERE f.f1 < '1004.3'; - three | f1 --------+---------------------- - | 0 - | -34.84 - | 1.2345678901234e-200 -(3 rows) - -SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE '1004.3' >= f.f1; - four | f1 -------+---------------------- - | 0 - | 1004.3 - | -34.84 - | 1.2345678901234e-200 -(4 rows) - -SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3'; - four | f1 -------+---------------------- - | 0 - | 1004.3 - | -34.84 - | 1.2345678901234e-200 -(4 rows) - -SELECT '' AS three, f.f1, f.f1 * '-10' AS x - FROM FLOAT8_TBL f - WHERE f.f1 > '0.0'; - three | f1 | x --------+----------------------+----------------------- - | 1004.3 | -10043 - | 1.2345678901234e+200 | -1.2345678901234e+201 - | 1.2345678901234e-200 | -1.2345678901234e-199 -(3 rows) - -SELECT '' AS three, f.f1, f.f1 + '-10' AS x - FROM FLOAT8_TBL f - WHERE f.f1 > '0.0'; - three | f1 | x --------+----------------------+---------------------- - | 1004.3 | 994.3 - | 1.2345678901234e+200 | 1.2345678901234e+200 - | 1.2345678901234e-200 | -10 -(3 rows) - -SELECT '' AS three, f.f1, f.f1 / '-10' AS x - FROM FLOAT8_TBL f - WHERE f.f1 > '0.0'; - three | f1 | x --------+----------------------+----------------------- - | 1004.3 | -100.43 - | 1.2345678901234e+200 | -1.2345678901234e+199 - | 1.2345678901234e-200 | -1.2345678901234e-201 -(3 rows) - -SELECT '' AS three, f.f1, f.f1 - '-10' AS x - FROM FLOAT8_TBL f - WHERE f.f1 > '0.0'; - three | f1 | x --------+----------------------+---------------------- - | 1004.3 | 1014.3 - | 1.2345678901234e+200 | 1.2345678901234e+200 - | 1.2345678901234e-200 | 10 -(3 rows) - -SELECT '' AS one, f.f1 ^ '2.0' AS square_f1 - FROM FLOAT8_TBL f where f.f1 = '1004.3'; - one | square_f1 ------+------------ - | 1008618.49 -(1 row) - --- absolute value -SELECT '' AS five, f.f1, @f.f1 AS abs_f1 - FROM FLOAT8_TBL f; - five | f1 | abs_f1 -------+----------------------+---------------------- - | 0 | 0 - | 1004.3 | 1004.3 - | -34.84 | 34.84 - | 1.2345678901234e+200 | 1.2345678901234e+200 - | 1.2345678901234e-200 | 1.2345678901234e-200 -(5 rows) - --- truncate -SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1 - FROM FLOAT8_TBL f; - five | f1 | trunc_f1 -------+----------------------+---------------------- - | 0 | 0 - | 1004.3 | 1004 - | -34.84 | -34 - | 1.2345678901234e+200 | 1.2345678901234e+200 - | 1.2345678901234e-200 | 0 -(5 rows) - --- round -SELECT '' AS five, f.f1, round(f.f1) AS round_f1 - FROM FLOAT8_TBL f; - five | f1 | round_f1 -------+----------------------+---------------------- - | 0 | 0 - | 1004.3 | 1004 - | -34.84 | -35 - | 1.2345678901234e+200 | 1.2345678901234e+200 - | 1.2345678901234e-200 | 0 -(5 rows) - --- ceil / ceiling -select ceil(f1) as ceil_f1 from float8_tbl f; - ceil_f1 ----------------------- - 0 - 1005 - -34 - 1.2345678901234e+200 - 1 -(5 rows) - -select ceiling(f1) as ceiling_f1 from float8_tbl f; - ceiling_f1 ----------------------- - 0 - 1005 - -34 - 1.2345678901234e+200 - 1 -(5 rows) - --- floor -select floor(f1) as floor_f1 from float8_tbl f; - floor_f1 ----------------------- - 0 - 1004 - -35 - 1.2345678901234e+200 - 0 -(5 rows) - --- sign -select sign(f1) as sign_f1 from float8_tbl f; - sign_f1 ---------- - 0 - 1 - -1 - 1 - 1 -(5 rows) - --- square root -SELECT sqrt(float8 '64') AS eight; - eight -------- - 8 -(1 row) - -SELECT |/ float8 '64' AS eight; - eight -------- - 8 -(1 row) - -SELECT '' AS three, f.f1, |/f.f1 AS sqrt_f1 - FROM FLOAT8_TBL f - WHERE f.f1 > '0.0'; - three | f1 | sqrt_f1 --------+----------------------+----------------------- - | 1004.3 | 31.6906926399535 - | 1.2345678901234e+200 | 1.11111110611109e+100 - | 1.2345678901234e-200 | 1.11111110611109e-100 -(3 rows) - --- power -SELECT power(float8 '144', float8 '0.5'); - power -------- - 12 -(1 row) - --- take exp of ln(f.f1) -SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 - FROM FLOAT8_TBL f - WHERE f.f1 > '0.0'; - three | f1 | exp_ln_f1 --------+----------------------+----------------------- - | 1004.3 | 1004.3 - | 1.2345678901234e+200 | 1.23456789012338e+200 - | 1.2345678901234e-200 | 1.23456789012339e-200 -(3 rows) - --- cube root -SELECT ||/ float8 '27' AS three; - three -------- - 3 -(1 row) - -SELECT '' AS five, f.f1, ||/f.f1 AS cbrt_f1 FROM FLOAT8_TBL f; - five | f1 | cbrt_f1 -------+----------------------+----------------------- - | 0 | 0 - | 1004.3 | 10.014312837827 - | -34.84 | -3.26607421344208 - | 1.2345678901234e+200 | 4.97933859234765e+066 - | 1.2345678901234e-200 | 2.3112042409018e-067 -(5 rows) - -SELECT '' AS five, * FROM FLOAT8_TBL; - five | f1 -------+---------------------- - | 0 - | 1004.3 - | -34.84 - | 1.2345678901234e+200 - | 1.2345678901234e-200 -(5 rows) - -UPDATE FLOAT8_TBL - SET f1 = FLOAT8_TBL.f1 * '-1' - WHERE FLOAT8_TBL.f1 > '0.0'; -SELECT '' AS bad, f.f1 * '1e200' from FLOAT8_TBL f; -ERROR: value out of range: overflow -SELECT '' AS bad, f.f1 ^ '1e200' from FLOAT8_TBL f; -ERROR: value out of range: overflow -SELECT 0 ^ 0 + 0 ^ 1 + 0 ^ 0.0 + 0 ^ 0.5; - ?column? ----------- - 2 -(1 row) - -SELECT '' AS bad, ln(f.f1) from FLOAT8_TBL f where f.f1 = '0.0' ; -ERROR: cannot take logarithm of zero -SELECT '' AS bad, ln(f.f1) from FLOAT8_TBL f where f.f1 < '0.0' ; -ERROR: cannot take logarithm of a negative number -SELECT '' AS bad, exp(f.f1) from FLOAT8_TBL f; -ERROR: value out of range: underflow -SELECT '' AS bad, f.f1 / '0.0' from FLOAT8_TBL f; -ERROR: division by zero -SELECT '' AS five, * FROM FLOAT8_TBL; - five | f1 -------+----------------------- - | 0 - | -34.84 - | -1004.3 - | -1.2345678901234e+200 - | -1.2345678901234e-200 -(5 rows) - --- test for over- and underflow -INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); -ERROR: "10e400" is out of range for type double precision -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400'); -ERROR: "-10e400" is out of range for type double precision -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400'); -ERROR: "10e-400" is out of range for type double precision -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400'); -ERROR: "-10e-400" is out of range for type double precision -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400'); - ^ --- maintain external table consistency across platforms --- delete all values and reinsert well-behaved ones -DELETE FROM FLOAT8_TBL; -INSERT INTO FLOAT8_TBL(f1) VALUES ('0.0'); -INSERT INTO FLOAT8_TBL(f1) VALUES ('-34.84'); -INSERT INTO FLOAT8_TBL(f1) VALUES ('-1004.30'); -INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e+200'); -INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e-200'); -SELECT '' AS five, * FROM FLOAT8_TBL; - five | f1 -------+----------------------- - | 0 - | -34.84 - | -1004.3 - | -1.2345678901234e+200 - | -1.2345678901234e-200 -(5 rows) - --- test exact cases for trigonometric functions in degrees -SET extra_float_digits = 3; -SELECT x, - sind(x), - sind(x) IN (-1,-0.5,0,0.5,1) AS sind_exact -FROM (VALUES (0), (30), (90), (150), (180), - (210), (270), (330), (360)) AS t(x); - x | sind | sind_exact ------+------+------------ - 0 | 0 | t - 30 | 0.5 | t - 90 | 1 | t - 150 | 0.5 | t - 180 | 0 | t - 210 | -0.5 | t - 270 | -1 | t - 330 | -0.5 | t - 360 | 0 | t -(9 rows) - -SELECT x, - cosd(x), - cosd(x) IN (-1,-0.5,0,0.5,1) AS cosd_exact -FROM (VALUES (0), (60), (90), (120), (180), - (240), (270), (300), (360)) AS t(x); - x | cosd | cosd_exact ------+------+------------ - 0 | 1 | t - 60 | 0.5 | t - 90 | 0 | t - 120 | -0.5 | t - 180 | -1 | t - 240 | -0.5 | t - 270 | 0 | t - 300 | 0.5 | t - 360 | 1 | t -(9 rows) - -SELECT x, - tand(x), - tand(x) IN ('-Infinity'::float8,-1,0, - 1,'Infinity'::float8) AS tand_exact, - cotd(x), - cotd(x) IN ('-Infinity'::float8,-1,0, - 1,'Infinity'::float8) AS cotd_exact -FROM (VALUES (0), (45), (90), (135), (180), - (225), (270), (315), (360)) AS t(x); - x | tand | tand_exact | cotd | cotd_exact ------+-----------+------------+-----------+------------ - 0 | 0 | t | Infinity | t - 45 | 1 | t | 1 | t - 90 | Infinity | t | 0 | t - 135 | -1 | t | -1 | t - 180 | 0 | t | -Infinity | t - 225 | 1 | t | 1 | t - 270 | -Infinity | t | 0 | t - 315 | -1 | t | -1 | t - 360 | 0 | t | Infinity | t -(9 rows) - -SELECT x, - asind(x), - asind(x) IN (-90,-30,0,30,90) AS asind_exact, - acosd(x), - acosd(x) IN (0,60,90,120,180) AS acosd_exact -FROM (VALUES (-1), (-0.5), (0), (0.5), (1)) AS t(x); - x | asind | asind_exact | acosd | acosd_exact -------+-------+-------------+-------+------------- - -1 | -90 | t | 180 | t - -0.5 | -30 | t | 120 | t - 0 | 0 | t | 90 | t - 0.5 | 30 | t | 60 | t - 1 | 90 | t | 0 | t -(5 rows) - -SELECT x, - atand(x), - atand(x) IN (-90,-45,0,45,90) AS atand_exact -FROM (VALUES ('-Infinity'::float8), (-1), (0), (1), - ('Infinity'::float8)) AS t(x); - x | atand | atand_exact ------------+-------+------------- - -Infinity | -90 | t - -1 | -45 | t - 0 | 0 | t - 1 | 45 | t - Infinity | 90 | t -(5 rows) - -SELECT x, y, - atan2d(y, x), - atan2d(y, x) IN (-90,0,90,180) AS atan2d_exact -FROM (SELECT 10*cosd(a), 10*sind(a) - FROM generate_series(0, 360, 90) AS t(a)) AS t(x,y); - x | y | atan2d | atan2d_exact ------+-----+--------+-------------- - 10 | 0 | 0 | t - 0 | 10 | 90 | t - -10 | 0 | 180 | t - 0 | -10 | -90 | t - 10 | 0 | 0 | t -(5 rows) - -RESET extra_float_digits; diff --git a/src/test/regress/expected/float8-small-is-zero.out b/src/test/regress/expected/float8-small-is-zero.out index 26b8378150..f8e09390f5 100644 --- a/src/test/regress/expected/float8-small-is-zero.out +++ b/src/test/regress/expected/float8-small-is-zero.out @@ -344,6 +344,42 @@ SELECT power(float8 '144', float8 '0.5'); 12 (1 row) +SELECT power(float8 'NaN', float8 '0.5'); + power +------- + NaN +(1 row) + +SELECT power(float8 '144', float8 'NaN'); + power +------- + NaN +(1 row) + +SELECT power(float8 'NaN', float8 'NaN'); + power +------- + NaN +(1 row) + +SELECT power(float8 '-1', float8 'NaN'); + power +------- + NaN +(1 row) + +SELECT power(float8 '1', float8 'NaN'); + power +------- + 1 +(1 row) + +SELECT power(float8 'NaN', float8 '0'); + power +------- + 1 +(1 row) + -- take exp of ln(f.f1) SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 FROM FLOAT8_TBL f diff --git a/src/test/regress/expected/float8-small-is-zero_1.out b/src/test/regress/expected/float8-small-is-zero_1.out deleted file mode 100644 index cea27908eb..0000000000 --- a/src/test/regress/expected/float8-small-is-zero_1.out +++ /dev/null @@ -1,548 +0,0 @@ --- --- FLOAT8 --- -CREATE TABLE FLOAT8_TBL(f1 float8); -INSERT INTO FLOAT8_TBL(f1) VALUES (' 0.0 '); -INSERT INTO FLOAT8_TBL(f1) VALUES ('1004.30 '); -INSERT INTO FLOAT8_TBL(f1) VALUES (' -34.84'); -INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e+200'); -INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e-200'); --- test for underflow and overflow handling -SELECT '10e400'::float8; -ERROR: "10e400" is out of range for type double precision -LINE 1: SELECT '10e400'::float8; - ^ -SELECT '-10e400'::float8; -ERROR: "-10e400" is out of range for type double precision -LINE 1: SELECT '-10e400'::float8; - ^ -SELECT '10e-400'::float8; - float8 --------- - 0 -(1 row) - -SELECT '-10e-400'::float8; - float8 --------- - 0 -(1 row) - --- bad input -INSERT INTO FLOAT8_TBL(f1) VALUES (''); -ERROR: invalid input syntax for type double precision: "" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (''); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES (' '); -ERROR: invalid input syntax for type double precision: " " -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (' '); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz'); -ERROR: invalid input syntax for type double precision: "xyz" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0'); -ERROR: invalid input syntax for type double precision: "5.0.0" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0'); -ERROR: invalid input syntax for type double precision: "5 . 0" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0'); -ERROR: invalid input syntax for type double precision: "5. 0" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3'); -ERROR: invalid input syntax for type double precision: " - 3" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5'); -ERROR: invalid input syntax for type double precision: "123 5" -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5'); - ^ --- special inputs -SELECT 'NaN'::float8; - float8 --------- - NaN -(1 row) - -SELECT 'nan'::float8; - float8 --------- - NaN -(1 row) - -SELECT ' NAN '::float8; - float8 --------- - NaN -(1 row) - -SELECT 'infinity'::float8; - float8 ----------- - Infinity -(1 row) - -SELECT ' -INFINiTY '::float8; - float8 ------------ - -Infinity -(1 row) - --- bad special inputs -SELECT 'N A N'::float8; -ERROR: invalid input syntax for type double precision: "N A N" -LINE 1: SELECT 'N A N'::float8; - ^ -SELECT 'NaN x'::float8; -ERROR: invalid input syntax for type double precision: "NaN x" -LINE 1: SELECT 'NaN x'::float8; - ^ -SELECT ' INFINITY x'::float8; -ERROR: invalid input syntax for type double precision: " INFINITY x" -LINE 1: SELECT ' INFINITY x'::float8; - ^ -SELECT 'Infinity'::float8 + 100.0; - ?column? ----------- - Infinity -(1 row) - -SELECT 'Infinity'::float8 / 'Infinity'::float8; - ?column? ----------- - NaN -(1 row) - -SELECT 'nan'::float8 / 'nan'::float8; - ?column? ----------- - NaN -(1 row) - -SELECT 'nan'::numeric::float8; - float8 --------- - NaN -(1 row) - -SELECT '' AS five, * FROM FLOAT8_TBL; - five | f1 -------+---------------------- - | 0 - | 1004.3 - | -34.84 - | 1.2345678901234e+200 - | 1.2345678901234e-200 -(5 rows) - -SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE f.f1 <> '1004.3'; - four | f1 -------+---------------------- - | 0 - | -34.84 - | 1.2345678901234e+200 - | 1.2345678901234e-200 -(4 rows) - -SELECT '' AS one, f.* FROM FLOAT8_TBL f WHERE f.f1 = '1004.3'; - one | f1 ------+-------- - | 1004.3 -(1 row) - -SELECT '' AS three, f.* FROM FLOAT8_TBL f WHERE '1004.3' > f.f1; - three | f1 --------+---------------------- - | 0 - | -34.84 - | 1.2345678901234e-200 -(3 rows) - -SELECT '' AS three, f.* FROM FLOAT8_TBL f WHERE f.f1 < '1004.3'; - three | f1 --------+---------------------- - | 0 - | -34.84 - | 1.2345678901234e-200 -(3 rows) - -SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE '1004.3' >= f.f1; - four | f1 -------+---------------------- - | 0 - | 1004.3 - | -34.84 - | 1.2345678901234e-200 -(4 rows) - -SELECT '' AS four, f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3'; - four | f1 -------+---------------------- - | 0 - | 1004.3 - | -34.84 - | 1.2345678901234e-200 -(4 rows) - -SELECT '' AS three, f.f1, f.f1 * '-10' AS x - FROM FLOAT8_TBL f - WHERE f.f1 > '0.0'; - three | f1 | x --------+----------------------+----------------------- - | 1004.3 | -10043 - | 1.2345678901234e+200 | -1.2345678901234e+201 - | 1.2345678901234e-200 | -1.2345678901234e-199 -(3 rows) - -SELECT '' AS three, f.f1, f.f1 + '-10' AS x - FROM FLOAT8_TBL f - WHERE f.f1 > '0.0'; - three | f1 | x --------+----------------------+---------------------- - | 1004.3 | 994.3 - | 1.2345678901234e+200 | 1.2345678901234e+200 - | 1.2345678901234e-200 | -10 -(3 rows) - -SELECT '' AS three, f.f1, f.f1 / '-10' AS x - FROM FLOAT8_TBL f - WHERE f.f1 > '0.0'; - three | f1 | x --------+----------------------+----------------------- - | 1004.3 | -100.43 - | 1.2345678901234e+200 | -1.2345678901234e+199 - | 1.2345678901234e-200 | -1.2345678901234e-201 -(3 rows) - -SELECT '' AS three, f.f1, f.f1 - '-10' AS x - FROM FLOAT8_TBL f - WHERE f.f1 > '0.0'; - three | f1 | x --------+----------------------+---------------------- - | 1004.3 | 1014.3 - | 1.2345678901234e+200 | 1.2345678901234e+200 - | 1.2345678901234e-200 | 10 -(3 rows) - -SELECT '' AS one, f.f1 ^ '2.0' AS square_f1 - FROM FLOAT8_TBL f where f.f1 = '1004.3'; - one | square_f1 ------+------------ - | 1008618.49 -(1 row) - --- absolute value -SELECT '' AS five, f.f1, @f.f1 AS abs_f1 - FROM FLOAT8_TBL f; - five | f1 | abs_f1 -------+----------------------+---------------------- - | 0 | 0 - | 1004.3 | 1004.3 - | -34.84 | 34.84 - | 1.2345678901234e+200 | 1.2345678901234e+200 - | 1.2345678901234e-200 | 1.2345678901234e-200 -(5 rows) - --- truncate -SELECT '' AS five, f.f1, trunc(f.f1) AS trunc_f1 - FROM FLOAT8_TBL f; - five | f1 | trunc_f1 -------+----------------------+---------------------- - | 0 | 0 - | 1004.3 | 1004 - | -34.84 | -34 - | 1.2345678901234e+200 | 1.2345678901234e+200 - | 1.2345678901234e-200 | 0 -(5 rows) - --- round -SELECT '' AS five, f.f1, round(f.f1) AS round_f1 - FROM FLOAT8_TBL f; - five | f1 | round_f1 -------+----------------------+---------------------- - | 0 | 0 - | 1004.3 | 1004 - | -34.84 | -35 - | 1.2345678901234e+200 | 1.2345678901234e+200 - | 1.2345678901234e-200 | 0 -(5 rows) - --- ceil / ceiling -select ceil(f1) as ceil_f1 from float8_tbl f; - ceil_f1 ----------------------- - 0 - 1005 - -34 - 1.2345678901234e+200 - 1 -(5 rows) - -select ceiling(f1) as ceiling_f1 from float8_tbl f; - ceiling_f1 ----------------------- - 0 - 1005 - -34 - 1.2345678901234e+200 - 1 -(5 rows) - --- floor -select floor(f1) as floor_f1 from float8_tbl f; - floor_f1 ----------------------- - 0 - 1004 - -35 - 1.2345678901234e+200 - 0 -(5 rows) - --- sign -select sign(f1) as sign_f1 from float8_tbl f; - sign_f1 ---------- - 0 - 1 - -1 - 1 - 1 -(5 rows) - --- square root -SELECT sqrt(float8 '64') AS eight; - eight -------- - 8 -(1 row) - -SELECT |/ float8 '64' AS eight; - eight -------- - 8 -(1 row) - -SELECT '' AS three, f.f1, |/f.f1 AS sqrt_f1 - FROM FLOAT8_TBL f - WHERE f.f1 > '0.0'; - three | f1 | sqrt_f1 --------+----------------------+----------------------- - | 1004.3 | 31.6906926399535 - | 1.2345678901234e+200 | 1.11111110611109e+100 - | 1.2345678901234e-200 | 1.11111110611109e-100 -(3 rows) - --- power -SELECT power(float8 '144', float8 '0.5'); - power -------- - 12 -(1 row) - --- take exp of ln(f.f1) -SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 - FROM FLOAT8_TBL f - WHERE f.f1 > '0.0'; - three | f1 | exp_ln_f1 --------+----------------------+----------------------- - | 1004.3 | 1004.3 - | 1.2345678901234e+200 | 1.23456789012338e+200 - | 1.2345678901234e-200 | 1.23456789012339e-200 -(3 rows) - --- cube root -SELECT ||/ float8 '27' AS three; - three -------- - 3 -(1 row) - -SELECT '' AS five, f.f1, ||/f.f1 AS cbrt_f1 FROM FLOAT8_TBL f; - five | f1 | cbrt_f1 -------+----------------------+---------------------- - | 0 | 0 - | 1004.3 | 10.014312837827 - | -34.84 | -3.26607421344208 - | 1.2345678901234e+200 | 4.97933859234765e+66 - | 1.2345678901234e-200 | 2.3112042409018e-67 -(5 rows) - -SELECT '' AS five, * FROM FLOAT8_TBL; - five | f1 -------+---------------------- - | 0 - | 1004.3 - | -34.84 - | 1.2345678901234e+200 - | 1.2345678901234e-200 -(5 rows) - -UPDATE FLOAT8_TBL - SET f1 = FLOAT8_TBL.f1 * '-1' - WHERE FLOAT8_TBL.f1 > '0.0'; -SELECT '' AS bad, f.f1 * '1e200' from FLOAT8_TBL f; -ERROR: value out of range: overflow -SELECT '' AS bad, f.f1 ^ '1e200' from FLOAT8_TBL f; -ERROR: value out of range: overflow -SELECT 0 ^ 0 + 0 ^ 1 + 0 ^ 0.0 + 0 ^ 0.5; - ?column? ----------- - 2 -(1 row) - -SELECT '' AS bad, ln(f.f1) from FLOAT8_TBL f where f.f1 = '0.0' ; -ERROR: cannot take logarithm of zero -SELECT '' AS bad, ln(f.f1) from FLOAT8_TBL f where f.f1 < '0.0' ; -ERROR: cannot take logarithm of a negative number -SELECT '' AS bad, exp(f.f1) from FLOAT8_TBL f; -ERROR: value out of range: underflow -SELECT '' AS bad, f.f1 / '0.0' from FLOAT8_TBL f; -ERROR: division by zero -SELECT '' AS five, * FROM FLOAT8_TBL; - five | f1 -------+----------------------- - | 0 - | -34.84 - | -1004.3 - | -1.2345678901234e+200 - | -1.2345678901234e-200 -(5 rows) - --- test for over- and underflow -INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); -ERROR: "10e400" is out of range for type double precision -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400'); -ERROR: "-10e400" is out of range for type double precision -LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400'); - ^ -INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400'); -INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400'); --- maintain external table consistency across platforms --- delete all values and reinsert well-behaved ones -DELETE FROM FLOAT8_TBL; -INSERT INTO FLOAT8_TBL(f1) VALUES ('0.0'); -INSERT INTO FLOAT8_TBL(f1) VALUES ('-34.84'); -INSERT INTO FLOAT8_TBL(f1) VALUES ('-1004.30'); -INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e+200'); -INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e-200'); -SELECT '' AS five, * FROM FLOAT8_TBL; - five | f1 -------+----------------------- - | 0 - | -34.84 - | -1004.3 - | -1.2345678901234e+200 - | -1.2345678901234e-200 -(5 rows) - --- test exact cases for trigonometric functions in degrees -SET extra_float_digits = 3; -SELECT x, - sind(x), - sind(x) IN (-1,-0.5,0,0.5,1) AS sind_exact -FROM (VALUES (0), (30), (90), (150), (180), - (210), (270), (330), (360)) AS t(x); - x | sind | sind_exact ------+------+------------ - 0 | 0 | t - 30 | 0.5 | t - 90 | 1 | t - 150 | 0.5 | t - 180 | 0 | t - 210 | -0.5 | t - 270 | -1 | t - 330 | -0.5 | t - 360 | 0 | t -(9 rows) - -SELECT x, - cosd(x), - cosd(x) IN (-1,-0.5,0,0.5,1) AS cosd_exact -FROM (VALUES (0), (60), (90), (120), (180), - (240), (270), (300), (360)) AS t(x); - x | cosd | cosd_exact ------+------+------------ - 0 | 1 | t - 60 | 0.5 | t - 90 | 0 | t - 120 | -0.5 | t - 180 | -1 | t - 240 | -0.5 | t - 270 | 0 | t - 300 | 0.5 | t - 360 | 1 | t -(9 rows) - -SELECT x, - tand(x), - tand(x) IN ('-Infinity'::float8,-1,0, - 1,'Infinity'::float8) AS tand_exact, - cotd(x), - cotd(x) IN ('-Infinity'::float8,-1,0, - 1,'Infinity'::float8) AS cotd_exact -FROM (VALUES (0), (45), (90), (135), (180), - (225), (270), (315), (360)) AS t(x); - x | tand | tand_exact | cotd | cotd_exact ------+-----------+------------+-----------+------------ - 0 | 0 | t | Infinity | t - 45 | 1 | t | 1 | t - 90 | Infinity | t | 0 | t - 135 | -1 | t | -1 | t - 180 | 0 | t | -Infinity | t - 225 | 1 | t | 1 | t - 270 | -Infinity | t | 0 | t - 315 | -1 | t | -1 | t - 360 | 0 | t | Infinity | t -(9 rows) - -SELECT x, - asind(x), - asind(x) IN (-90,-30,0,30,90) AS asind_exact, - acosd(x), - acosd(x) IN (0,60,90,120,180) AS acosd_exact -FROM (VALUES (-1), (-0.5), (0), (0.5), (1)) AS t(x); - x | asind | asind_exact | acosd | acosd_exact -------+-------+-------------+-------+------------- - -1 | -90 | t | 180 | t - -0.5 | -30 | t | 120 | t - 0 | 0 | t | 90 | t - 0.5 | 30 | t | 60 | t - 1 | 90 | t | 0 | t -(5 rows) - -SELECT x, - atand(x), - atand(x) IN (-90,-45,0,45,90) AS atand_exact -FROM (VALUES ('-Infinity'::float8), (-1), (0), (1), - ('Infinity'::float8)) AS t(x); - x | atand | atand_exact ------------+-------+------------- - -Infinity | -90 | t - -1 | -45 | t - 0 | 0 | t - 1 | 45 | t - Infinity | 90 | t -(5 rows) - -SELECT x, y, - atan2d(y, x), - atan2d(y, x) IN (-90,0,90,180) AS atan2d_exact -FROM (SELECT 10*cosd(a), 10*sind(a) - FROM generate_series(0, 360, 90) AS t(a)) AS t(x,y); - x | y | atan2d | atan2d_exact ------+-----+--------+-------------- - 10 | 0 | 0 | t - 0 | 10 | 90 | t - -10 | 0 | 180 | t - 0 | -10 | -90 | t - 10 | 0 | 0 | t -(5 rows) - -RESET extra_float_digits; diff --git a/src/test/regress/expected/float8.out b/src/test/regress/expected/float8.out index 20c985e5df..b05831d45c 100644 --- a/src/test/regress/expected/float8.out +++ b/src/test/regress/expected/float8.out @@ -340,6 +340,42 @@ SELECT power(float8 '144', float8 '0.5'); 12 (1 row) +SELECT power(float8 'NaN', float8 '0.5'); + power +------- + NaN +(1 row) + +SELECT power(float8 '144', float8 'NaN'); + power +------- + NaN +(1 row) + +SELECT power(float8 'NaN', float8 'NaN'); + power +------- + NaN +(1 row) + +SELECT power(float8 '-1', float8 'NaN'); + power +------- + NaN +(1 row) + +SELECT power(float8 '1', float8 'NaN'); + power +------- + 1 +(1 row) + +SELECT power(float8 'NaN', float8 '0'); + power +------- + 1 +(1 row) + -- take exp of ln(f.f1) SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 FROM FLOAT8_TBL f diff --git a/src/test/regress/expected/foreign_data.out b/src/test/regress/expected/foreign_data.out index 927d0189a0..75365501d4 100644 --- a/src/test/regress/expected/foreign_data.out +++ b/src/test/regress/expected/foreign_data.out @@ -89,6 +89,14 @@ CREATE FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator; postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | (3 rows) +-- HANDLER related checks +CREATE FUNCTION invalid_fdw_handler() RETURNS int LANGUAGE SQL AS 'SELECT 1;'; +CREATE FOREIGN DATA WRAPPER test_fdw HANDLER invalid_fdw_handler; -- ERROR +ERROR: function invalid_fdw_handler must return type fdw_handler +CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler HANDLER invalid_fdw_handler; -- ERROR +ERROR: conflicting or redundant options +CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler; +DROP FOREIGN DATA WRAPPER test_fdw; -- ALTER FOREIGN DATA WRAPPER ALTER FOREIGN DATA WRAPPER foo; -- ERROR ERROR: syntax error at or near ";" @@ -188,18 +196,26 @@ ALTER FOREIGN DATA WRAPPER foo RENAME TO foo1; (3 rows) ALTER FOREIGN DATA WRAPPER foo1 RENAME TO foo; +-- HANDLER related checks +ALTER FOREIGN DATA WRAPPER foo HANDLER invalid_fdw_handler; -- ERROR +ERROR: function invalid_fdw_handler must return type fdw_handler +ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler HANDLER anything; -- ERROR +ERROR: conflicting or redundant options +ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler; +WARNING: changing the foreign-data wrapper handler can change behavior of existing foreign tables +DROP FUNCTION invalid_fdw_handler(); -- DROP FOREIGN DATA WRAPPER DROP FOREIGN DATA WRAPPER nonexistent; -- ERROR ERROR: foreign-data wrapper "nonexistent" does not exist DROP FOREIGN DATA WRAPPER IF EXISTS nonexistent; NOTICE: foreign-data wrapper "nonexistent" does not exist, skipping \dew+ - List of foreign-data wrappers - Name | Owner | Handler | Validator | Access privileges | FDW options | Description -------------+---------------------------+---------+--------------------------+-------------------+------------------------------+------------- - dummy | regress_foreign_data_user | - | - | | | useless - foo | regress_test_role_super | - | - | | (b '3', c '4', a '2', d '5') | - postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+------------------+--------------------------+-------------------+------------------------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + foo | regress_test_role_super | test_fdw_handler | - | | (b '3', c '4', a '2', d '5') | + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | (3 rows) DROP ROLE regress_test_role_super; -- ERROR @@ -733,6 +749,13 @@ SELECT * FROM ft1; -- ERROR ERROR: foreign-data wrapper "dummy" has no handler EXPLAIN SELECT * FROM ft1; -- ERROR ERROR: foreign-data wrapper "dummy" has no handler +CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a); +CREATE FOREIGN TABLE ft_part1 + PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; +CREATE INDEX ON lt1 (a); -- ERROR +ERROR: cannot create index on partitioned table "lt1" +DETAIL: Table "lt1" contains partitions that are foreign tables. +DROP TABLE lt1; -- ALTER FOREIGN TABLE COMMENT ON FOREIGN TABLE ft1 IS 'foreign table'; COMMENT ON FOREIGN TABLE ft1 IS NULL; @@ -1306,15 +1329,15 @@ DROP TRIGGER trigtest_after_stmt ON foreign_schema.foreign_table_1; DROP TRIGGER trigtest_after_row ON foreign_schema.foreign_table_1; DROP FUNCTION dummy_trigger(); -- Table inheritance -CREATE TABLE pt1 ( +CREATE TABLE fd_pt1 ( c1 integer NOT NULL, c2 text, c3 date ); -CREATE FOREIGN TABLE ft2 () INHERITS (pt1) +CREATE FOREIGN TABLE ft2 () INHERITS (fd_pt1) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ pt1 - Table "public.pt1" +\d+ fd_pt1 + Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | | @@ -1331,11 +1354,11 @@ Child tables: ft2 c3 | date | | | | | plain | | Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: pt1 +Inherits: fd_pt1 DROP FOREIGN TABLE ft2; -\d+ pt1 - Table "public.pt1" +\d+ fd_pt1 + Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | | @@ -1357,9 +1380,9 @@ CREATE FOREIGN TABLE ft2 ( Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -ALTER FOREIGN TABLE ft2 INHERIT pt1; -\d+ pt1 - Table "public.pt1" +ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; +\d+ fd_pt1 + Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | | @@ -1376,7 +1399,7 @@ Child tables: ft2 c3 | date | | | | | plain | | Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: pt1 +Inherits: fd_pt1 CREATE TABLE ct3() INHERITS(ft2); CREATE FOREIGN TABLE ft3 ( @@ -1397,7 +1420,7 @@ NOTICE: merging column "c3" with inherited definition c3 | date | | | | | plain | | Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: pt1 +Inherits: fd_pt1 Child tables: ct3, ft3 @@ -1421,13 +1444,13 @@ Server: s0 Inherits: ft2 -- add attributes recursively -ALTER TABLE pt1 ADD COLUMN c4 integer; -ALTER TABLE pt1 ADD COLUMN c5 integer DEFAULT 0; -ALTER TABLE pt1 ADD COLUMN c6 integer; -ALTER TABLE pt1 ADD COLUMN c7 integer NOT NULL; -ALTER TABLE pt1 ADD COLUMN c8 integer; -\d+ pt1 - Table "public.pt1" +ALTER TABLE fd_pt1 ADD COLUMN c4 integer; +ALTER TABLE fd_pt1 ADD COLUMN c5 integer DEFAULT 0; +ALTER TABLE fd_pt1 ADD COLUMN c6 integer; +ALTER TABLE fd_pt1 ADD COLUMN c7 integer NOT NULL; +ALTER TABLE fd_pt1 ADD COLUMN c8 integer; +\d+ fd_pt1 + Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | | @@ -1454,7 +1477,7 @@ Child tables: ft2 c8 | integer | | | | | plain | | Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: pt1 +Inherits: fd_pt1 Child tables: ct3, ft3 @@ -1488,20 +1511,20 @@ Server: s0 Inherits: ft2 -- alter attributes recursively -ALTER TABLE pt1 ALTER COLUMN c4 SET DEFAULT 0; -ALTER TABLE pt1 ALTER COLUMN c5 DROP DEFAULT; -ALTER TABLE pt1 ALTER COLUMN c6 SET NOT NULL; -ALTER TABLE pt1 ALTER COLUMN c7 DROP NOT NULL; -ALTER TABLE pt1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR +ALTER TABLE fd_pt1 ALTER COLUMN c4 SET DEFAULT 0; +ALTER TABLE fd_pt1 ALTER COLUMN c5 DROP DEFAULT; +ALTER TABLE fd_pt1 ALTER COLUMN c6 SET NOT NULL; +ALTER TABLE fd_pt1 ALTER COLUMN c7 DROP NOT NULL; +ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR ERROR: "ft2" is not a table -ALTER TABLE pt1 ALTER COLUMN c8 TYPE char(10); -ALTER TABLE pt1 ALTER COLUMN c8 SET DATA TYPE text; -ALTER TABLE pt1 ALTER COLUMN c1 SET STATISTICS 10000; -ALTER TABLE pt1 ALTER COLUMN c1 SET (n_distinct = 100); -ALTER TABLE pt1 ALTER COLUMN c8 SET STATISTICS -1; -ALTER TABLE pt1 ALTER COLUMN c8 SET STORAGE EXTERNAL; -\d+ pt1 - Table "public.pt1" +ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10); +ALTER TABLE fd_pt1 ALTER COLUMN c8 SET DATA TYPE text; +ALTER TABLE fd_pt1 ALTER COLUMN c1 SET STATISTICS 10000; +ALTER TABLE fd_pt1 ALTER COLUMN c1 SET (n_distinct = 100); +ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STATISTICS -1; +ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STORAGE EXTERNAL; +\d+ fd_pt1 + Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | 10000 | @@ -1528,18 +1551,18 @@ Child tables: ft2 c8 | text | | | | | external | | Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: pt1 +Inherits: fd_pt1 Child tables: ct3, ft3 -- drop attributes recursively -ALTER TABLE pt1 DROP COLUMN c4; -ALTER TABLE pt1 DROP COLUMN c5; -ALTER TABLE pt1 DROP COLUMN c6; -ALTER TABLE pt1 DROP COLUMN c7; -ALTER TABLE pt1 DROP COLUMN c8; -\d+ pt1 - Table "public.pt1" +ALTER TABLE fd_pt1 DROP COLUMN c4; +ALTER TABLE fd_pt1 DROP COLUMN c5; +ALTER TABLE fd_pt1 DROP COLUMN c6; +ALTER TABLE fd_pt1 DROP COLUMN c7; +ALTER TABLE fd_pt1 DROP COLUMN c8; +\d+ fd_pt1 + Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | 10000 | @@ -1556,35 +1579,35 @@ Child tables: ft2 c3 | date | | | | | plain | | Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: pt1 +Inherits: fd_pt1 Child tables: ct3, ft3 -- add constraints recursively -ALTER TABLE pt1 ADD CONSTRAINT pt1chk1 CHECK (c1 > 0) NO INHERIT; -ALTER TABLE pt1 ADD CONSTRAINT pt1chk2 CHECK (c2 <> ''); +ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk1 CHECK (c1 > 0) NO INHERIT; +ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk2 CHECK (c2 <> ''); -- connoinherit should be true for NO INHERIT constraint SELECT relname, conname, contype, conislocal, coninhcount, connoinherit FROM pg_class AS pc JOIN pg_constraint AS pgc ON (conrelid = pc.oid) - WHERE pc.relname = 'pt1' + WHERE pc.relname = 'fd_pt1' ORDER BY 1,2; - relname | conname | contype | conislocal | coninhcount | connoinherit ----------+---------+---------+------------+-------------+-------------- - pt1 | pt1chk1 | c | t | 0 | t - pt1 | pt1chk2 | c | t | 0 | f + relname | conname | contype | conislocal | coninhcount | connoinherit +---------+------------+---------+------------+-------------+-------------- + fd_pt1 | fd_pt1chk1 | c | t | 0 | t + fd_pt1 | fd_pt1chk2 | c | t | 0 | f (2 rows) -- child does not inherit NO INHERIT constraints -\d+ pt1 - Table "public.pt1" +\d+ fd_pt1 + Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | 10000 | c2 | text | | | | extended | | c3 | date | | | | plain | | Check constraints: - "pt1chk1" CHECK (c1 > 0) NO INHERIT - "pt1chk2" CHECK (c2 <> ''::text) + "fd_pt1chk1" CHECK (c1 > 0) NO INHERIT + "fd_pt1chk2" CHECK (c2 <> ''::text) Child tables: ft2 \d+ ft2 @@ -1595,10 +1618,10 @@ Child tables: ft2 c2 | text | | | | | extended | | c3 | date | | | | | plain | | Check constraints: - "pt1chk2" CHECK (c2 <> ''::text) + "fd_pt1chk2" CHECK (c2 <> ''::text) Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: pt1 +Inherits: fd_pt1 Child tables: ct3, ft3 @@ -1614,21 +1637,21 @@ CREATE FOREIGN TABLE ft2 ( c3 date ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- child must have parent's INHERIT constraints -ALTER FOREIGN TABLE ft2 INHERIT pt1; -- ERROR -ERROR: child table is missing constraint "pt1chk2" -ALTER FOREIGN TABLE ft2 ADD CONSTRAINT pt1chk2 CHECK (c2 <> ''); -ALTER FOREIGN TABLE ft2 INHERIT pt1; +ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; -- ERROR +ERROR: child table is missing constraint "fd_pt1chk2" +ALTER FOREIGN TABLE ft2 ADD CONSTRAINT fd_pt1chk2 CHECK (c2 <> ''); +ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; -- child does not inherit NO INHERIT constraints -\d+ pt1 - Table "public.pt1" +\d+ fd_pt1 + Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | 10000 | c2 | text | | | | extended | | c3 | date | | | | plain | | Check constraints: - "pt1chk1" CHECK (c1 > 0) NO INHERIT - "pt1chk2" CHECK (c2 <> ''::text) + "fd_pt1chk1" CHECK (c1 > 0) NO INHERIT + "fd_pt1chk2" CHECK (c2 <> ''::text) Child tables: ft2 \d+ ft2 @@ -1639,26 +1662,26 @@ Child tables: ft2 c2 | text | | | | | extended | | c3 | date | | | | | plain | | Check constraints: - "pt1chk2" CHECK (c2 <> ''::text) + "fd_pt1chk2" CHECK (c2 <> ''::text) Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: pt1 +Inherits: fd_pt1 -- drop constraints recursively -ALTER TABLE pt1 DROP CONSTRAINT pt1chk1 CASCADE; -ALTER TABLE pt1 DROP CONSTRAINT pt1chk2 CASCADE; +ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk1 CASCADE; +ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk2 CASCADE; -- NOT VALID case -INSERT INTO pt1 VALUES (1, 'pt1'::text, '1994-01-01'::date); -ALTER TABLE pt1 ADD CONSTRAINT pt1chk3 CHECK (c2 <> '') NOT VALID; -\d+ pt1 - Table "public.pt1" +INSERT INTO fd_pt1 VALUES (1, 'fd_pt1'::text, '1994-01-01'::date); +ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk3 CHECK (c2 <> '') NOT VALID; +\d+ fd_pt1 + Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | 10000 | c2 | text | | | | extended | | c3 | date | | | | plain | | Check constraints: - "pt1chk3" CHECK (c2 <> ''::text) NOT VALID + "fd_pt1chk3" CHECK (c2 <> ''::text) NOT VALID Child tables: ft2 \d+ ft2 @@ -1669,23 +1692,23 @@ Child tables: ft2 c2 | text | | | | | extended | | c3 | date | | | | | plain | | Check constraints: - "pt1chk2" CHECK (c2 <> ''::text) - "pt1chk3" CHECK (c2 <> ''::text) NOT VALID + "fd_pt1chk2" CHECK (c2 <> ''::text) + "fd_pt1chk3" CHECK (c2 <> ''::text) NOT VALID Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: pt1 +Inherits: fd_pt1 -- VALIDATE CONSTRAINT need do nothing on foreign tables -ALTER TABLE pt1 VALIDATE CONSTRAINT pt1chk3; -\d+ pt1 - Table "public.pt1" +ALTER TABLE fd_pt1 VALIDATE CONSTRAINT fd_pt1chk3; +\d+ fd_pt1 + Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | 10000 | c2 | text | | | | extended | | c3 | date | | | | plain | | Check constraints: - "pt1chk3" CHECK (c2 <> ''::text) + "fd_pt1chk3" CHECK (c2 <> ''::text) Child tables: ft2 \d+ ft2 @@ -1696,23 +1719,23 @@ Child tables: ft2 c2 | text | | | | | extended | | c3 | date | | | | | plain | | Check constraints: - "pt1chk2" CHECK (c2 <> ''::text) - "pt1chk3" CHECK (c2 <> ''::text) + "fd_pt1chk2" CHECK (c2 <> ''::text) + "fd_pt1chk3" CHECK (c2 <> ''::text) Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: pt1 +Inherits: fd_pt1 -- OID system column -ALTER TABLE pt1 SET WITH OIDS; -\d+ pt1 - Table "public.pt1" +ALTER TABLE fd_pt1 SET WITH OIDS; +\d+ fd_pt1 + Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | 10000 | c2 | text | | | | extended | | c3 | date | | | | plain | | Check constraints: - "pt1chk3" CHECK (c2 <> ''::text) + "fd_pt1chk3" CHECK (c2 <> ''::text) Child tables: ft2 Has OIDs: yes @@ -1724,25 +1747,25 @@ Has OIDs: yes c2 | text | | | | | extended | | c3 | date | | | | | plain | | Check constraints: - "pt1chk2" CHECK (c2 <> ''::text) - "pt1chk3" CHECK (c2 <> ''::text) + "fd_pt1chk2" CHECK (c2 <> ''::text) + "fd_pt1chk3" CHECK (c2 <> ''::text) Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: pt1 +Inherits: fd_pt1 Has OIDs: yes ALTER TABLE ft2 SET WITHOUT OIDS; -- ERROR ERROR: cannot drop inherited column "oid" -ALTER TABLE pt1 SET WITHOUT OIDS; -\d+ pt1 - Table "public.pt1" +ALTER TABLE fd_pt1 SET WITHOUT OIDS; +\d+ fd_pt1 + Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | 10000 | c2 | text | | | | extended | | c3 | date | | | | plain | | Check constraints: - "pt1chk3" CHECK (c2 <> ''::text) + "fd_pt1chk3" CHECK (c2 <> ''::text) Child tables: ft2 \d+ ft2 @@ -1753,20 +1776,20 @@ Child tables: ft2 c2 | text | | | | | extended | | c3 | date | | | | | plain | | Check constraints: - "pt1chk2" CHECK (c2 <> ''::text) - "pt1chk3" CHECK (c2 <> ''::text) + "fd_pt1chk2" CHECK (c2 <> ''::text) + "fd_pt1chk3" CHECK (c2 <> ''::text) Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: pt1 +Inherits: fd_pt1 -- changes name of an attribute recursively -ALTER TABLE pt1 RENAME COLUMN c1 TO f1; -ALTER TABLE pt1 RENAME COLUMN c2 TO f2; -ALTER TABLE pt1 RENAME COLUMN c3 TO f3; +ALTER TABLE fd_pt1 RENAME COLUMN c1 TO f1; +ALTER TABLE fd_pt1 RENAME COLUMN c2 TO f2; +ALTER TABLE fd_pt1 RENAME COLUMN c3 TO f3; -- changes name of a constraint recursively -ALTER TABLE pt1 RENAME CONSTRAINT pt1chk3 TO f2_check; -\d+ pt1 - Table "public.pt1" +ALTER TABLE fd_pt1 RENAME CONSTRAINT fd_pt1chk3 TO f2_check; +\d+ fd_pt1 + Table "public.fd_pt1" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- f1 | integer | | not null | | plain | 10000 | @@ -1785,17 +1808,17 @@ Child tables: ft2 f3 | date | | | | | plain | | Check constraints: "f2_check" CHECK (f2 <> ''::text) - "pt1chk2" CHECK (f2 <> ''::text) + "fd_pt1chk2" CHECK (f2 <> ''::text) Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -Inherits: pt1 +Inherits: fd_pt1 -- TRUNCATE doesn't work on foreign tables, either directly or recursively TRUNCATE ft2; -- ERROR ERROR: "ft2" is not a table -TRUNCATE pt1; -- ERROR +TRUNCATE fd_pt1; -- ERROR ERROR: "ft2" is not a table -DROP TABLE pt1 CASCADE; +DROP TABLE fd_pt1 CASCADE; NOTICE: drop cascades to foreign table ft2 -- IMPORT FOREIGN SCHEMA IMPORT FOREIGN SCHEMA s1 FROM SERVER s9 INTO public; -- ERROR @@ -1822,45 +1845,45 @@ HINT: Use DROP ... CASCADE to drop the dependent objects too. DROP OWNED BY regress_test_role2 CASCADE; NOTICE: drop cascades to user mapping for regress_test_role on server s5 -- Foreign partition DDL stuff -CREATE TABLE pt2 ( +CREATE TABLE fd_pt2 ( c1 integer NOT NULL, c2 text, c3 date ) PARTITION BY LIST (c1); -CREATE FOREIGN TABLE pt2_1 PARTITION OF pt2 FOR VALUES IN (1) +CREATE FOREIGN TABLE fd_pt2_1 PARTITION OF fd_pt2 FOR VALUES IN (1) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ pt2 - Table "public.pt2" +\d+ fd_pt2 + Table "public.fd_pt2" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | | c2 | text | | | | extended | | c3 | date | | | | plain | | Partition key: LIST (c1) -Partitions: pt2_1 FOR VALUES IN (1) +Partitions: fd_pt2_1 FOR VALUES IN (1) -\d+ pt2_1 - Foreign table "public.pt2_1" +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description --------+---------+-----------+----------+---------+-------------+----------+--------------+------------- c1 | integer | | not null | | | plain | | c2 | text | | | | | extended | | c3 | date | | | | | plain | | -Partition of: pt2 FOR VALUES IN (1) -Partition constraint: ((c1 IS NOT NULL) AND (c1 = ANY (ARRAY[1]))) +Partition of: fd_pt2 FOR VALUES IN (1) +Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1)) Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -- partition cannot have additional columns -DROP FOREIGN TABLE pt2_1; -CREATE FOREIGN TABLE pt2_1 ( +DROP FOREIGN TABLE fd_pt2_1; +CREATE FOREIGN TABLE fd_pt2_1 ( c1 integer NOT NULL, c2 text, c3 date, c4 char ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ pt2_1 - Foreign table "public.pt2_1" +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description --------+--------------+-----------+----------+---------+-------------+----------+--------------+------------- c1 | integer | | not null | | | plain | | @@ -1870,26 +1893,27 @@ CREATE FOREIGN TABLE pt2_1 ( Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); -- ERROR -ERROR: table "pt2_1" contains column "c4" not found in parent "pt2" -DETAIL: New partition should contain only the columns present in parent. -DROP FOREIGN TABLE pt2_1; -\d+ pt2 - Table "public.pt2" +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR +ERROR: table "fd_pt2_1" contains column "c4" not found in parent "fd_pt2" +DETAIL: The new partition may contain only the columns present in parent. +DROP FOREIGN TABLE fd_pt2_1; +\d+ fd_pt2 + Table "public.fd_pt2" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | | c2 | text | | | | extended | | c3 | date | | | | plain | | Partition key: LIST (c1) +Number of partitions: 0 -CREATE FOREIGN TABLE pt2_1 ( +CREATE FOREIGN TABLE fd_pt2_1 ( c1 integer NOT NULL, c2 text, c3 date ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ pt2_1 - Foreign table "public.pt2_1" +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description --------+---------+-----------+----------+---------+-------------+----------+--------------+------------- c1 | integer | | not null | | | plain | | @@ -1899,76 +1923,77 @@ Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -- no attach partition validation occurs for foreign tables -ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); -\d+ pt2 - Table "public.pt2" +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); +\d+ fd_pt2 + Table "public.fd_pt2" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | | c2 | text | | | | extended | | c3 | date | | | | plain | | Partition key: LIST (c1) -Partitions: pt2_1 FOR VALUES IN (1) +Partitions: fd_pt2_1 FOR VALUES IN (1) -\d+ pt2_1 - Foreign table "public.pt2_1" +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description --------+---------+-----------+----------+---------+-------------+----------+--------------+------------- c1 | integer | | not null | | | plain | | c2 | text | | | | | extended | | c3 | date | | | | | plain | | -Partition of: pt2 FOR VALUES IN (1) -Partition constraint: ((c1 IS NOT NULL) AND (c1 = ANY (ARRAY[1]))) +Partition of: fd_pt2 FOR VALUES IN (1) +Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1)) Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -- cannot add column to a partition -ALTER TABLE pt2_1 ADD c4 char; +ALTER TABLE fd_pt2_1 ADD c4 char; ERROR: cannot add column to a partition -- ok to have a partition's own constraints though -ALTER TABLE pt2_1 ALTER c3 SET NOT NULL; -ALTER TABLE pt2_1 ADD CONSTRAINT p21chk CHECK (c2 <> ''); -\d+ pt2 - Table "public.pt2" +ALTER TABLE fd_pt2_1 ALTER c3 SET NOT NULL; +ALTER TABLE fd_pt2_1 ADD CONSTRAINT p21chk CHECK (c2 <> ''); +\d+ fd_pt2 + Table "public.fd_pt2" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | | c2 | text | | | | extended | | c3 | date | | | | plain | | Partition key: LIST (c1) -Partitions: pt2_1 FOR VALUES IN (1) +Partitions: fd_pt2_1 FOR VALUES IN (1) -\d+ pt2_1 - Foreign table "public.pt2_1" +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description --------+---------+-----------+----------+---------+-------------+----------+--------------+------------- c1 | integer | | not null | | | plain | | c2 | text | | | | | extended | | c3 | date | | not null | | | plain | | -Partition of: pt2 FOR VALUES IN (1) -Partition constraint: ((c1 IS NOT NULL) AND (c1 = ANY (ARRAY[1]))) +Partition of: fd_pt2 FOR VALUES IN (1) +Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1)) Check constraints: "p21chk" CHECK (c2 <> ''::text) Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -- cannot drop inherited NOT NULL constraint from a partition -ALTER TABLE pt2_1 ALTER c1 DROP NOT NULL; +ALTER TABLE fd_pt2_1 ALTER c1 DROP NOT NULL; ERROR: column "c1" is marked NOT NULL in parent table -- partition must have parent's constraints -ALTER TABLE pt2 DETACH PARTITION pt2_1; -ALTER TABLE pt2 ALTER c2 SET NOT NULL; -\d+ pt2 - Table "public.pt2" +ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1; +ALTER TABLE fd_pt2 ALTER c2 SET NOT NULL; +\d+ fd_pt2 + Table "public.fd_pt2" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | | c2 | text | | not null | | extended | | c3 | date | | | | plain | | Partition key: LIST (c1) +Number of partitions: 0 -\d+ pt2_1 - Foreign table "public.pt2_1" +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description --------+---------+-----------+----------+---------+-------------+----------+--------------+------------- c1 | integer | | not null | | | plain | | @@ -1979,14 +2004,14 @@ Check constraints: Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); -- ERROR +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR ERROR: column "c2" in child table must be marked NOT NULL -ALTER FOREIGN TABLE pt2_1 ALTER c2 SET NOT NULL; -ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); -ALTER TABLE pt2 DETACH PARTITION pt2_1; -ALTER TABLE pt2 ADD CONSTRAINT pt2chk1 CHECK (c1 > 0); -\d+ pt2 - Table "public.pt2" +ALTER FOREIGN TABLE fd_pt2_1 ALTER c2 SET NOT NULL; +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); +ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1; +ALTER TABLE fd_pt2 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0); +\d+ fd_pt2 + Table "public.fd_pt2" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+----------+--------------+------------- c1 | integer | | not null | | plain | | @@ -1994,10 +2019,11 @@ ALTER TABLE pt2 ADD CONSTRAINT pt2chk1 CHECK (c1 > 0); c3 | date | | | | plain | | Partition key: LIST (c1) Check constraints: - "pt2chk1" CHECK (c1 > 0) + "fd_pt2chk1" CHECK (c1 > 0) +Number of partitions: 0 -\d+ pt2_1 - Foreign table "public.pt2_1" +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description --------+---------+-----------+----------+---------+-------------+----------+--------------+------------- c1 | integer | | not null | | | plain | | @@ -2008,17 +2034,28 @@ Check constraints: Server: s0 FDW options: (delimiter ',', quote '"', "be quoted" 'value') -ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); -- ERROR -ERROR: child table is missing constraint "pt2chk1" -ALTER FOREIGN TABLE pt2_1 ADD CONSTRAINT pt2chk1 CHECK (c1 > 0); -ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR +ERROR: child table is missing constraint "fd_pt2chk1" +ALTER FOREIGN TABLE fd_pt2_1 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0); +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- TRUNCATE doesn't work on foreign tables, either directly or recursively -TRUNCATE pt2_1; -- ERROR -ERROR: "pt2_1" is not a table -TRUNCATE pt2; -- ERROR -ERROR: "pt2_1" is not a table -DROP FOREIGN TABLE pt2_1; -DROP TABLE pt2; +TRUNCATE fd_pt2_1; -- ERROR +ERROR: "fd_pt2_1" is not a table +TRUNCATE fd_pt2; -- ERROR +ERROR: "fd_pt2_1" is not a table +DROP FOREIGN TABLE fd_pt2_1; +DROP TABLE fd_pt2; +-- foreign table cannot be part of partition tree made of temporary +-- relations. +CREATE TEMP TABLE temp_parted (a int) PARTITION BY LIST (a); +CREATE FOREIGN TABLE foreign_part PARTITION OF temp_parted DEFAULT + SERVER s0; -- ERROR +ERROR: cannot create a permanent relation as partition of temporary relation "temp_parted" +CREATE FOREIGN TABLE foreign_part (a int) SERVER s0; +ALTER TABLE temp_parted ATTACH PARTITION foreign_part DEFAULT; -- ERROR +ERROR: cannot attach a permanent relation as partition of temporary relation "temp_parted" +DROP FOREIGN TABLE foreign_part; +DROP TABLE temp_parted; -- Cleanup DROP SCHEMA foreign_schema CASCADE; DROP ROLE regress_test_role; -- ERROR diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out index fef072eddf..fee594531d 100644 --- a/src/test/regress/expected/foreign_key.out +++ b/src/test/regress/expected/foreign_key.out @@ -1381,6 +1381,19 @@ explain (costs off) delete from t1 where a = 1; (10 rows) delete from t1 where a = 1; +-- Test a primary key with attributes located in later attnum positions +-- compared to the fk attributes. +create table pktable2 (a int, b int, c int, d int, e int, primary key (d, e)); +create table fktable2 (d int, e int, foreign key (d, e) references pktable2); +insert into pktable2 values (1, 2, 3, 4, 5); +insert into fktable2 values (4, 5); +delete from pktable2; +ERROR: update or delete on table "pktable2" violates foreign key constraint "fktable2_d_fkey" on table "fktable2" +DETAIL: Key (d, e)=(4, 5) is still referenced from table "fktable2". +update pktable2 set d = 5; +ERROR: update or delete on table "pktable2" violates foreign key constraint "fktable2_d_fkey" on table "fktable2" +DETAIL: Key (d, e)=(4, 5) is still referenced from table "fktable2". +drop table pktable2, fktable2; -- -- Test deferred FK check on a tuple deleted by a rolled-back subtransaction -- @@ -1415,3 +1428,356 @@ alter table fktable2 drop constraint fktable2_f1_fkey; ERROR: cannot ALTER TABLE "pktable2" because it has pending trigger events commit; drop table pktable2, fktable2; +-- +-- Foreign keys and partitioned tables +-- +-- partitioned table in the referenced side are not allowed +CREATE TABLE fk_partitioned_pk (a int, b int, primary key (a, b)) + PARTITION BY RANGE (a, b); +-- verify with create table first ... +CREATE TABLE fk_notpartitioned_fk (a int, b int, + FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk); +ERROR: cannot reference partitioned table "fk_partitioned_pk" +-- and then with alter table. +CREATE TABLE fk_notpartitioned_fk_2 (a int, b int); +ALTER TABLE fk_notpartitioned_fk_2 ADD FOREIGN KEY (a, b) + REFERENCES fk_partitioned_pk; +ERROR: cannot reference partitioned table "fk_partitioned_pk" +DROP TABLE fk_partitioned_pk, fk_notpartitioned_fk_2; +-- Creation of a partitioned hierarchy with irregular definitions +CREATE TABLE fk_notpartitioned_pk (fdrop1 int, a int, fdrop2 int, b int, + PRIMARY KEY (a, b)); +ALTER TABLE fk_notpartitioned_pk DROP COLUMN fdrop1, DROP COLUMN fdrop2; +CREATE TABLE fk_partitioned_fk (b int, fdrop1 int, a int) PARTITION BY RANGE (a, b); +ALTER TABLE fk_partitioned_fk DROP COLUMN fdrop1; +CREATE TABLE fk_partitioned_fk_1 (fdrop1 int, fdrop2 int, a int, fdrop3 int, b int); +ALTER TABLE fk_partitioned_fk_1 DROP COLUMN fdrop1, DROP COLUMN fdrop2, DROP COLUMN fdrop3; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_1 FOR VALUES FROM (0,0) TO (1000,1000); +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk; +CREATE TABLE fk_partitioned_fk_2 (b int, fdrop1 int, fdrop2 int, a int); +ALTER TABLE fk_partitioned_fk_2 DROP COLUMN fdrop1, DROP COLUMN fdrop2; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES FROM (1000,1000) TO (2000,2000); +CREATE TABLE fk_partitioned_fk_3 (fdrop1 int, fdrop2 int, fdrop3 int, fdrop4 int, b int, a int) + PARTITION BY HASH (a); +ALTER TABLE fk_partitioned_fk_3 DROP COLUMN fdrop1, DROP COLUMN fdrop2, + DROP COLUMN fdrop3, DROP COLUMN fdrop4; +CREATE TABLE fk_partitioned_fk_3_0 PARTITION OF fk_partitioned_fk_3 FOR VALUES WITH (MODULUS 5, REMAINDER 0); +CREATE TABLE fk_partitioned_fk_3_1 PARTITION OF fk_partitioned_fk_3 FOR VALUES WITH (MODULUS 5, REMAINDER 1); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_3 + FOR VALUES FROM (2000,2000) TO (3000,3000); +-- Creating a foreign key with ONLY on a partitioned table referencing +-- a non-partitioned table fails. +ALTER TABLE ONLY fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk; +ERROR: cannot use ONLY for foreign key on partitioned table "fk_partitioned_fk" referencing relation "fk_notpartitioned_pk" +-- Adding a NOT VALID foreign key on a partitioned table referencing +-- a non-partitioned table fails. +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk NOT VALID; +ERROR: cannot add NOT VALID foreign key on partitioned table "fk_partitioned_fk" referencing relation "fk_notpartitioned_pk" +DETAIL: This feature is not yet supported on partitioned tables. +-- these inserts, targeting both the partition directly as well as the +-- partitioned table, should all fail +INSERT INTO fk_partitioned_fk (a,b) VALUES (500, 501); +ERROR: insert or update on table "fk_partitioned_fk_1" violates foreign key constraint "fk_partitioned_fk_a_fkey" +DETAIL: Key (a, b)=(500, 501) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk_1 (a,b) VALUES (500, 501); +ERROR: insert or update on table "fk_partitioned_fk_1" violates foreign key constraint "fk_partitioned_fk_a_fkey" +DETAIL: Key (a, b)=(500, 501) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk (a,b) VALUES (1500, 1501); +ERROR: insert or update on table "fk_partitioned_fk_2" violates foreign key constraint "fk_partitioned_fk_a_fkey" +DETAIL: Key (a, b)=(1500, 1501) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk_2 (a,b) VALUES (1500, 1501); +ERROR: insert or update on table "fk_partitioned_fk_2" violates foreign key constraint "fk_partitioned_fk_a_fkey" +DETAIL: Key (a, b)=(1500, 1501) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk (a,b) VALUES (2500, 2502); +ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_fkey" +DETAIL: Key (a, b)=(2500, 2502) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk_3 (a,b) VALUES (2500, 2502); +ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_fkey" +DETAIL: Key (a, b)=(2500, 2502) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk (a,b) VALUES (2501, 2503); +ERROR: insert or update on table "fk_partitioned_fk_3_0" violates foreign key constraint "fk_partitioned_fk_a_fkey" +DETAIL: Key (a, b)=(2501, 2503) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk_3 (a,b) VALUES (2501, 2503); +ERROR: insert or update on table "fk_partitioned_fk_3_0" violates foreign key constraint "fk_partitioned_fk_a_fkey" +DETAIL: Key (a, b)=(2501, 2503) is not present in table "fk_notpartitioned_pk". +-- but if we insert the values that make them valid, then they work +INSERT INTO fk_notpartitioned_pk VALUES (500, 501), (1500, 1501), + (2500, 2502), (2501, 2503); +INSERT INTO fk_partitioned_fk (a,b) VALUES (500, 501); +INSERT INTO fk_partitioned_fk (a,b) VALUES (1500, 1501); +INSERT INTO fk_partitioned_fk (a,b) VALUES (2500, 2502); +INSERT INTO fk_partitioned_fk (a,b) VALUES (2501, 2503); +-- this update fails because there is no referenced row +UPDATE fk_partitioned_fk SET a = a + 1 WHERE a = 2501; +ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_fkey" +DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk". +-- but we can fix it thusly: +INSERT INTO fk_notpartitioned_pk (a,b) VALUES (2502, 2503); +UPDATE fk_partitioned_fk SET a = a + 1 WHERE a = 2501; +-- these updates would leave lingering rows in the referencing table; disallow +UPDATE fk_notpartitioned_pk SET b = 502 WHERE a = 500; +ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_fkey" on table "fk_partitioned_fk" +DETAIL: Key (a, b)=(500, 501) is still referenced from table "fk_partitioned_fk". +UPDATE fk_notpartitioned_pk SET b = 1502 WHERE a = 1500; +ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_fkey" on table "fk_partitioned_fk" +DETAIL: Key (a, b)=(1500, 1501) is still referenced from table "fk_partitioned_fk". +UPDATE fk_notpartitioned_pk SET b = 2504 WHERE a = 2500; +ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_fkey" on table "fk_partitioned_fk" +DETAIL: Key (a, b)=(2500, 2502) is still referenced from table "fk_partitioned_fk". +ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_fkey; +-- done. +DROP TABLE fk_notpartitioned_pk, fk_partitioned_fk; +-- Altering a type referenced by a foreign key needs to drop/recreate the FK. +-- Ensure that works. +CREATE TABLE fk_notpartitioned_pk (a INT, PRIMARY KEY(a), CHECK (a > 0)); +CREATE TABLE fk_partitioned_fk (a INT REFERENCES fk_notpartitioned_pk(a) PRIMARY KEY) PARTITION BY RANGE(a); +CREATE TABLE fk_partitioned_fk_1 PARTITION OF fk_partitioned_fk FOR VALUES FROM (MINVALUE) TO (MAXVALUE); +INSERT INTO fk_notpartitioned_pk VALUES (1); +INSERT INTO fk_partitioned_fk VALUES (1); +ALTER TABLE fk_notpartitioned_pk ALTER COLUMN a TYPE bigint; +DELETE FROM fk_notpartitioned_pk WHERE a = 1; +ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_fkey" on table "fk_partitioned_fk" +DETAIL: Key (a)=(1) is still referenced from table "fk_partitioned_fk". +DROP TABLE fk_notpartitioned_pk, fk_partitioned_fk; +-- Test some other exotic foreign key features: MATCH SIMPLE, ON UPDATE/DELETE +-- actions +CREATE TABLE fk_notpartitioned_pk (a int, b int, primary key (a, b)); +CREATE TABLE fk_partitioned_fk (a int default 2501, b int default 142857) PARTITION BY LIST (a); +CREATE TABLE fk_partitioned_fk_1 PARTITION OF fk_partitioned_fk FOR VALUES IN (NULL,500,501,502); +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk MATCH SIMPLE + ON DELETE SET NULL ON UPDATE SET NULL; +CREATE TABLE fk_partitioned_fk_2 PARTITION OF fk_partitioned_fk FOR VALUES IN (1500,1502); +CREATE TABLE fk_partitioned_fk_3 (a int, b int); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_3 FOR VALUES IN (2500,2501,2502,2503); +-- this insert fails +INSERT INTO fk_partitioned_fk (a, b) VALUES (2502, 2503); +ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_fkey" +DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); +ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_fkey" +DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk". +-- but since the FK is MATCH SIMPLE, this one doesn't +INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, NULL); +-- now create the referenced row ... +INSERT INTO fk_notpartitioned_pk VALUES (2502, 2503); +--- and now the same insert work +INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); +-- this always works +INSERT INTO fk_partitioned_fk (a,b) VALUES (NULL, NULL); +-- ON UPDATE SET NULL +SELECT tableoid::regclass, a, b FROM fk_partitioned_fk WHERE b IS NULL ORDER BY a; + tableoid | a | b +---------------------+------+--- + fk_partitioned_fk_3 | 2502 | + fk_partitioned_fk_1 | | +(2 rows) + +UPDATE fk_notpartitioned_pk SET a = a + 1 WHERE a = 2502; +SELECT tableoid::regclass, a, b FROM fk_partitioned_fk WHERE b IS NULL ORDER BY a; + tableoid | a | b +---------------------+------+--- + fk_partitioned_fk_3 | 2502 | + fk_partitioned_fk_1 | | + fk_partitioned_fk_1 | | +(3 rows) + +-- ON DELETE SET NULL +INSERT INTO fk_partitioned_fk VALUES (2503, 2503); +SELECT count(*) FROM fk_partitioned_fk WHERE a IS NULL; + count +------- + 2 +(1 row) + +DELETE FROM fk_notpartitioned_pk; +SELECT count(*) FROM fk_partitioned_fk WHERE a IS NULL; + count +------- + 3 +(1 row) + +-- ON UPDATE/DELETE SET DEFAULT +ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_fkey; +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk + ON DELETE SET DEFAULT ON UPDATE SET DEFAULT; +INSERT INTO fk_notpartitioned_pk VALUES (2502, 2503); +INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); +-- this fails, because the defaults for the referencing table are not present +-- in the referenced table: +UPDATE fk_notpartitioned_pk SET a = 1500 WHERE a = 2502; +ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_fkey" +DETAIL: Key (a, b)=(2501, 142857) is not present in table "fk_notpartitioned_pk". +-- but inserting the row we can make it work: +INSERT INTO fk_notpartitioned_pk VALUES (2501, 142857); +UPDATE fk_notpartitioned_pk SET a = 1500 WHERE a = 2502; +SELECT * FROM fk_partitioned_fk WHERE b = 142857; + a | b +------+-------- + 2501 | 142857 +(1 row) + +-- ON UPDATE/DELETE CASCADE +ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_fkey; +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk + ON DELETE CASCADE ON UPDATE CASCADE; +UPDATE fk_notpartitioned_pk SET a = 2502 WHERE a = 2501; +SELECT * FROM fk_partitioned_fk WHERE b = 142857; + a | b +------+-------- + 2502 | 142857 +(1 row) + +-- Now you see it ... +SELECT * FROM fk_partitioned_fk WHERE b = 142857; + a | b +------+-------- + 2502 | 142857 +(1 row) + +DELETE FROM fk_notpartitioned_pk WHERE b = 142857; +-- now you don't. +SELECT * FROM fk_partitioned_fk WHERE a = 142857; + a | b +---+--- +(0 rows) + +-- verify that DROP works +DROP TABLE fk_partitioned_fk_2; +-- Test behavior of the constraint together with attaching and detaching +-- partitions. +CREATE TABLE fk_partitioned_fk_2 PARTITION OF fk_partitioned_fk FOR VALUES IN (1500,1502); +ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_2; +BEGIN; +DROP TABLE fk_partitioned_fk; +-- constraint should still be there +\d fk_partitioned_fk_2; + Table "public.fk_partitioned_fk_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | 2501 + b | integer | | | 142857 +Foreign-key constraints: + "fk_partitioned_fk_a_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE + +ROLLBACK; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); +DROP TABLE fk_partitioned_fk_2; +CREATE TABLE fk_partitioned_fk_2 (b int, c text, a int, + FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk ON UPDATE CASCADE ON DELETE CASCADE); +ALTER TABLE fk_partitioned_fk_2 DROP COLUMN c; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); +-- should have only one constraint +\d fk_partitioned_fk_2 + Table "public.fk_partitioned_fk_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + b | integer | | | + a | integer | | | +Partition of: fk_partitioned_fk FOR VALUES IN (1500, 1502) +Foreign-key constraints: + "fk_partitioned_fk_2_a_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE + +DROP TABLE fk_partitioned_fk_2; +CREATE TABLE fk_partitioned_fk_4 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE) PARTITION BY RANGE (b, a); +CREATE TABLE fk_partitioned_fk_4_1 PARTITION OF fk_partitioned_fk_4 FOR VALUES FROM (1,1) TO (100,100); +CREATE TABLE fk_partitioned_fk_4_2 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE SET NULL); +ALTER TABLE fk_partitioned_fk_4 ATTACH PARTITION fk_partitioned_fk_4_2 FOR VALUES FROM (100,100) TO (1000,1000); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN (3500,3502); +ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_4; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN (3500,3502); +-- should only have one constraint +\d fk_partitioned_fk_4 + Table "public.fk_partitioned_fk_4" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: fk_partitioned_fk FOR VALUES IN (3500, 3502) +Partition key: RANGE (b, a) +Foreign-key constraints: + "fk_partitioned_fk_4_a_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE +Number of partitions: 2 (Use \d+ to list them.) + +\d fk_partitioned_fk_4_1 + Table "public.fk_partitioned_fk_4_1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: fk_partitioned_fk_4 FOR VALUES FROM (1, 1) TO (100, 100) +Foreign-key constraints: + "fk_partitioned_fk_4_a_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE + +-- this one has an FK with mismatched properties +\d fk_partitioned_fk_4_2 + Table "public.fk_partitioned_fk_4_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: fk_partitioned_fk_4 FOR VALUES FROM (100, 100) TO (1000, 1000) +Foreign-key constraints: + "fk_partitioned_fk_4_2_a_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE SET NULL + "fk_partitioned_fk_4_a_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE + +CREATE TABLE fk_partitioned_fk_5 (a int, b int, + FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE, + FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE) + PARTITION BY RANGE (a); +CREATE TABLE fk_partitioned_fk_5_1 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500); +ALTER TABLE fk_partitioned_fk_5 ATTACH PARTITION fk_partitioned_fk_5_1 FOR VALUES FROM (0) TO (10); +ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_5; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500); +-- this one has two constraints, similar but not quite the one in the parent, +-- so it gets a new one +\d fk_partitioned_fk_5 + Table "public.fk_partitioned_fk_5" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: fk_partitioned_fk FOR VALUES IN (4500) +Partition key: RANGE (a) +Foreign-key constraints: + "fk_partitioned_fk_5_a_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE + "fk_partitioned_fk_5_a_fkey1" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE + "fk_partitioned_fk_a_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE +Number of partitions: 1 (Use \d+ to list them.) + +-- verify that it works to reattaching a child with multiple candidate +-- constraints +ALTER TABLE fk_partitioned_fk_5 DETACH PARTITION fk_partitioned_fk_5_1; +ALTER TABLE fk_partitioned_fk_5 ATTACH PARTITION fk_partitioned_fk_5_1 FOR VALUES FROM (0) TO (10); +\d fk_partitioned_fk_5_1 + Table "public.fk_partitioned_fk_5_1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: fk_partitioned_fk_5 FOR VALUES FROM (0) TO (10) +Foreign-key constraints: + "fk_partitioned_fk_5_1_a_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) + "fk_partitioned_fk_5_a_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE + "fk_partitioned_fk_5_a_fkey1" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE + "fk_partitioned_fk_a_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE + +-- verify that attaching a table checks that the existing data satisfies the +-- constraint +CREATE TABLE fk_partitioned_fk_2 (a int, b int) PARTITION BY RANGE (b); +CREATE TABLE fk_partitioned_fk_2_1 PARTITION OF fk_partitioned_fk_2 FOR VALUES FROM (0) TO (1000); +CREATE TABLE fk_partitioned_fk_2_2 PARTITION OF fk_partitioned_fk_2 FOR VALUES FROM (1000) TO (2000); +INSERT INTO fk_partitioned_fk_2 VALUES (1600, 601), (1600, 1601); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 + FOR VALUES IN (1600); +ERROR: insert or update on table "fk_partitioned_fk_2" violates foreign key constraint "fk_partitioned_fk_a_fkey" +DETAIL: Key (a, b)=(1600, 601) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_notpartitioned_pk VALUES (1600, 601), (1600, 1601); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 + FOR VALUES IN (1600); +-- leave these tables around intentionally diff --git a/src/test/regress/expected/geometry.out b/src/test/regress/expected/geometry.out index 1271395d4e..055d32c0e2 100644 --- a/src/test/regress/expected/geometry.out +++ b/src/test/regress/expected/geometry.out @@ -13,9 +13,10 @@ SELECT '' AS four, center(f1) AS center ------+--------- | (1,1) | (2,2) + | (-5,-4) | (2.5,3) | (3,3) -(4 rows) +(5 rows) SELECT '' AS four, (@@ f1) AS center FROM BOX_TBL; @@ -23,9 +24,10 @@ SELECT '' AS four, (@@ f1) AS center ------+--------- | (1,1) | (2,2) + | (-5,-4) | (2.5,3) | (3,3) -(4 rows) +(5 rows) SELECT '' AS six, point(f1) AS center FROM CIRCLE_TBL; @@ -37,7 +39,9 @@ SELECT '' AS six, point(f1) AS center | (1,2) | (100,200) | (100,1) -(6 rows) + | (3,5) + | (3,5) +(8 rows) SELECT '' AS six, (@@ f1) AS center FROM CIRCLE_TBL; @@ -49,7 +53,9 @@ SELECT '' AS six, (@@ f1) AS center | (1,2) | (100,200) | (100,1) -(6 rows) + | (3,5) + | (3,5) +(8 rows) SELECT '' AS two, (@@ f1) AS center FROM POLYGON_TBL @@ -58,27 +64,32 @@ SELECT '' AS two, (@@ f1) AS center -----+------------------------------- | (1.33333333333,1.33333333333) | (2.33333333333,1.33333333333) -(2 rows) + | (4,5) + | (4,5) + | (4,3) +(5 rows) -- "is horizontal" function SELECT '' AS two, p1.f1 FROM POINT_TBL p1 WHERE ishorizontal(p1.f1, point '(0,0)'); - two | f1 ------+--------- + two | f1 +-----+------------------ | (0,0) | (-10,0) -(2 rows) + | (1e-300,-1e-300) +(3 rows) -- "is horizontal" operator SELECT '' AS two, p1.f1 FROM POINT_TBL p1 WHERE p1.f1 ?- point '(0,0)'; - two | f1 ------+--------- + two | f1 +-----+------------------ | (0,0) | (-10,0) -(2 rows) + | (1e-300,-1e-300) +(3 rows) -- "is vertical" function SELECT '' AS one, p1.f1 @@ -98,6 +109,1453 @@ SELECT '' AS one, p1.f1 | (5.1,34.5) (1 row) +-- Slope +SELECT p1.f1, p2.f1, slope(p1.f1, p2.f1) FROM POINT_TBL p1, POINT_TBL p2; + f1 | f1 | slope +-------------------+-------------------+-------------------- + (0,0) | (0,0) | 1.79769313486e+308 + (0,0) | (-10,0) | 0 + (0,0) | (-3,4) | -1.33333333333 + (0,0) | (5.1,34.5) | 6.76470588235 + (0,0) | (-5,-12) | 2.4 + (0,0) | (1e-300,-1e-300) | 1.79769313486e+308 + (0,0) | (1e+300,Infinity) | Infinity + (0,0) | (NaN,NaN) | NaN + (0,0) | (10,10) | 1 + (-10,0) | (0,0) | 0 + (-10,0) | (-10,0) | 1.79769313486e+308 + (-10,0) | (-3,4) | 0.571428571429 + (-10,0) | (5.1,34.5) | 2.28476821192 + (-10,0) | (-5,-12) | -2.4 + (-10,0) | (1e-300,-1e-300) | 0 + (-10,0) | (1e+300,Infinity) | Infinity + (-10,0) | (NaN,NaN) | NaN + (-10,0) | (10,10) | 0.5 + (-3,4) | (0,0) | -1.33333333333 + (-3,4) | (-10,0) | 0.571428571429 + (-3,4) | (-3,4) | 1.79769313486e+308 + (-3,4) | (5.1,34.5) | 3.76543209877 + (-3,4) | (-5,-12) | 8 + (-3,4) | (1e-300,-1e-300) | -1.33333333333 + (-3,4) | (1e+300,Infinity) | Infinity + (-3,4) | (NaN,NaN) | NaN + (-3,4) | (10,10) | 0.461538461538 + (5.1,34.5) | (0,0) | 6.76470588235 + (5.1,34.5) | (-10,0) | 2.28476821192 + (5.1,34.5) | (-3,4) | 3.76543209877 + (5.1,34.5) | (5.1,34.5) | 1.79769313486e+308 + (5.1,34.5) | (-5,-12) | 4.60396039604 + (5.1,34.5) | (1e-300,-1e-300) | 6.76470588235 + (5.1,34.5) | (1e+300,Infinity) | Infinity + (5.1,34.5) | (NaN,NaN) | NaN + (5.1,34.5) | (10,10) | -5 + (-5,-12) | (0,0) | 2.4 + (-5,-12) | (-10,0) | -2.4 + (-5,-12) | (-3,4) | 8 + (-5,-12) | (5.1,34.5) | 4.60396039604 + (-5,-12) | (-5,-12) | 1.79769313486e+308 + (-5,-12) | (1e-300,-1e-300) | 2.4 + (-5,-12) | (1e+300,Infinity) | Infinity + (-5,-12) | (NaN,NaN) | NaN + (-5,-12) | (10,10) | 1.46666666667 + (1e-300,-1e-300) | (0,0) | 1.79769313486e+308 + (1e-300,-1e-300) | (-10,0) | 0 + (1e-300,-1e-300) | (-3,4) | -1.33333333333 + (1e-300,-1e-300) | (5.1,34.5) | 6.76470588235 + (1e-300,-1e-300) | (-5,-12) | 2.4 + (1e-300,-1e-300) | (1e-300,-1e-300) | 1.79769313486e+308 + (1e-300,-1e-300) | (1e+300,Infinity) | Infinity + (1e-300,-1e-300) | (NaN,NaN) | NaN + (1e-300,-1e-300) | (10,10) | 1 + (1e+300,Infinity) | (0,0) | Infinity + (1e+300,Infinity) | (-10,0) | Infinity + (1e+300,Infinity) | (-3,4) | Infinity + (1e+300,Infinity) | (5.1,34.5) | Infinity + (1e+300,Infinity) | (-5,-12) | Infinity + (1e+300,Infinity) | (1e-300,-1e-300) | Infinity + (1e+300,Infinity) | (1e+300,Infinity) | 1.79769313486e+308 + (1e+300,Infinity) | (NaN,NaN) | NaN + (1e+300,Infinity) | (10,10) | Infinity + (NaN,NaN) | (0,0) | NaN + (NaN,NaN) | (-10,0) | NaN + (NaN,NaN) | (-3,4) | NaN + (NaN,NaN) | (5.1,34.5) | NaN + (NaN,NaN) | (-5,-12) | NaN + (NaN,NaN) | (1e-300,-1e-300) | NaN + (NaN,NaN) | (1e+300,Infinity) | NaN + (NaN,NaN) | (NaN,NaN) | NaN + (NaN,NaN) | (10,10) | NaN + (10,10) | (0,0) | 1 + (10,10) | (-10,0) | 0.5 + (10,10) | (-3,4) | 0.461538461538 + (10,10) | (5.1,34.5) | -5 + (10,10) | (-5,-12) | 1.46666666667 + (10,10) | (1e-300,-1e-300) | 1 + (10,10) | (1e+300,Infinity) | Infinity + (10,10) | (NaN,NaN) | NaN + (10,10) | (10,10) | 1.79769313486e+308 +(81 rows) + +-- Add point +SELECT p1.f1, p2.f1, p1.f1 + p2.f1 FROM POINT_TBL p1, POINT_TBL p2; + f1 | f1 | ?column? +-------------------+-------------------+------------------- + (0,0) | (0,0) | (0,0) + (0,0) | (-10,0) | (-10,0) + (0,0) | (-3,4) | (-3,4) + (0,0) | (5.1,34.5) | (5.1,34.5) + (0,0) | (-5,-12) | (-5,-12) + (0,0) | (1e-300,-1e-300) | (1e-300,-1e-300) + (0,0) | (1e+300,Infinity) | (1e+300,Infinity) + (0,0) | (NaN,NaN) | (NaN,NaN) + (0,0) | (10,10) | (10,10) + (-10,0) | (0,0) | (-10,0) + (-10,0) | (-10,0) | (-20,0) + (-10,0) | (-3,4) | (-13,4) + (-10,0) | (5.1,34.5) | (-4.9,34.5) + (-10,0) | (-5,-12) | (-15,-12) + (-10,0) | (1e-300,-1e-300) | (-10,-1e-300) + (-10,0) | (1e+300,Infinity) | (1e+300,Infinity) + (-10,0) | (NaN,NaN) | (NaN,NaN) + (-10,0) | (10,10) | (0,10) + (-3,4) | (0,0) | (-3,4) + (-3,4) | (-10,0) | (-13,4) + (-3,4) | (-3,4) | (-6,8) + (-3,4) | (5.1,34.5) | (2.1,38.5) + (-3,4) | (-5,-12) | (-8,-8) + (-3,4) | (1e-300,-1e-300) | (-3,4) + (-3,4) | (1e+300,Infinity) | (1e+300,Infinity) + (-3,4) | (NaN,NaN) | (NaN,NaN) + (-3,4) | (10,10) | (7,14) + (5.1,34.5) | (0,0) | (5.1,34.5) + (5.1,34.5) | (-10,0) | (-4.9,34.5) + (5.1,34.5) | (-3,4) | (2.1,38.5) + (5.1,34.5) | (5.1,34.5) | (10.2,69) + (5.1,34.5) | (-5,-12) | (0.1,22.5) + (5.1,34.5) | (1e-300,-1e-300) | (5.1,34.5) + (5.1,34.5) | (1e+300,Infinity) | (1e+300,Infinity) + (5.1,34.5) | (NaN,NaN) | (NaN,NaN) + (5.1,34.5) | (10,10) | (15.1,44.5) + (-5,-12) | (0,0) | (-5,-12) + (-5,-12) | (-10,0) | (-15,-12) + (-5,-12) | (-3,4) | (-8,-8) + (-5,-12) | (5.1,34.5) | (0.1,22.5) + (-5,-12) | (-5,-12) | (-10,-24) + (-5,-12) | (1e-300,-1e-300) | (-5,-12) + (-5,-12) | (1e+300,Infinity) | (1e+300,Infinity) + (-5,-12) | (NaN,NaN) | (NaN,NaN) + (-5,-12) | (10,10) | (5,-2) + (1e-300,-1e-300) | (0,0) | (1e-300,-1e-300) + (1e-300,-1e-300) | (-10,0) | (-10,-1e-300) + (1e-300,-1e-300) | (-3,4) | (-3,4) + (1e-300,-1e-300) | (5.1,34.5) | (5.1,34.5) + (1e-300,-1e-300) | (-5,-12) | (-5,-12) + (1e-300,-1e-300) | (1e-300,-1e-300) | (2e-300,-2e-300) + (1e-300,-1e-300) | (1e+300,Infinity) | (1e+300,Infinity) + (1e-300,-1e-300) | (NaN,NaN) | (NaN,NaN) + (1e-300,-1e-300) | (10,10) | (10,10) + (1e+300,Infinity) | (0,0) | (1e+300,Infinity) + (1e+300,Infinity) | (-10,0) | (1e+300,Infinity) + (1e+300,Infinity) | (-3,4) | (1e+300,Infinity) + (1e+300,Infinity) | (5.1,34.5) | (1e+300,Infinity) + (1e+300,Infinity) | (-5,-12) | (1e+300,Infinity) + (1e+300,Infinity) | (1e-300,-1e-300) | (1e+300,Infinity) + (1e+300,Infinity) | (1e+300,Infinity) | (2e+300,Infinity) + (1e+300,Infinity) | (NaN,NaN) | (NaN,NaN) + (1e+300,Infinity) | (10,10) | (1e+300,Infinity) + (NaN,NaN) | (0,0) | (NaN,NaN) + (NaN,NaN) | (-10,0) | (NaN,NaN) + (NaN,NaN) | (-3,4) | (NaN,NaN) + (NaN,NaN) | (5.1,34.5) | (NaN,NaN) + (NaN,NaN) | (-5,-12) | (NaN,NaN) + (NaN,NaN) | (1e-300,-1e-300) | (NaN,NaN) + (NaN,NaN) | (1e+300,Infinity) | (NaN,NaN) + (NaN,NaN) | (NaN,NaN) | (NaN,NaN) + (NaN,NaN) | (10,10) | (NaN,NaN) + (10,10) | (0,0) | (10,10) + (10,10) | (-10,0) | (0,10) + (10,10) | (-3,4) | (7,14) + (10,10) | (5.1,34.5) | (15.1,44.5) + (10,10) | (-5,-12) | (5,-2) + (10,10) | (1e-300,-1e-300) | (10,10) + (10,10) | (1e+300,Infinity) | (1e+300,Infinity) + (10,10) | (NaN,NaN) | (NaN,NaN) + (10,10) | (10,10) | (20,20) +(81 rows) + +-- Subtract point +SELECT p1.f1, p2.f1, p1.f1 - p2.f1 FROM POINT_TBL p1, POINT_TBL p2; + f1 | f1 | ?column? +-------------------+-------------------+--------------------- + (0,0) | (0,0) | (0,0) + (0,0) | (-10,0) | (10,0) + (0,0) | (-3,4) | (3,-4) + (0,0) | (5.1,34.5) | (-5.1,-34.5) + (0,0) | (-5,-12) | (5,12) + (0,0) | (1e-300,-1e-300) | (-1e-300,1e-300) + (0,0) | (1e+300,Infinity) | (-1e+300,-Infinity) + (0,0) | (NaN,NaN) | (NaN,NaN) + (0,0) | (10,10) | (-10,-10) + (-10,0) | (0,0) | (-10,0) + (-10,0) | (-10,0) | (0,0) + (-10,0) | (-3,4) | (-7,-4) + (-10,0) | (5.1,34.5) | (-15.1,-34.5) + (-10,0) | (-5,-12) | (-5,12) + (-10,0) | (1e-300,-1e-300) | (-10,1e-300) + (-10,0) | (1e+300,Infinity) | (-1e+300,-Infinity) + (-10,0) | (NaN,NaN) | (NaN,NaN) + (-10,0) | (10,10) | (-20,-10) + (-3,4) | (0,0) | (-3,4) + (-3,4) | (-10,0) | (7,4) + (-3,4) | (-3,4) | (0,0) + (-3,4) | (5.1,34.5) | (-8.1,-30.5) + (-3,4) | (-5,-12) | (2,16) + (-3,4) | (1e-300,-1e-300) | (-3,4) + (-3,4) | (1e+300,Infinity) | (-1e+300,-Infinity) + (-3,4) | (NaN,NaN) | (NaN,NaN) + (-3,4) | (10,10) | (-13,-6) + (5.1,34.5) | (0,0) | (5.1,34.5) + (5.1,34.5) | (-10,0) | (15.1,34.5) + (5.1,34.5) | (-3,4) | (8.1,30.5) + (5.1,34.5) | (5.1,34.5) | (0,0) + (5.1,34.5) | (-5,-12) | (10.1,46.5) + (5.1,34.5) | (1e-300,-1e-300) | (5.1,34.5) + (5.1,34.5) | (1e+300,Infinity) | (-1e+300,-Infinity) + (5.1,34.5) | (NaN,NaN) | (NaN,NaN) + (5.1,34.5) | (10,10) | (-4.9,24.5) + (-5,-12) | (0,0) | (-5,-12) + (-5,-12) | (-10,0) | (5,-12) + (-5,-12) | (-3,4) | (-2,-16) + (-5,-12) | (5.1,34.5) | (-10.1,-46.5) + (-5,-12) | (-5,-12) | (0,0) + (-5,-12) | (1e-300,-1e-300) | (-5,-12) + (-5,-12) | (1e+300,Infinity) | (-1e+300,-Infinity) + (-5,-12) | (NaN,NaN) | (NaN,NaN) + (-5,-12) | (10,10) | (-15,-22) + (1e-300,-1e-300) | (0,0) | (1e-300,-1e-300) + (1e-300,-1e-300) | (-10,0) | (10,-1e-300) + (1e-300,-1e-300) | (-3,4) | (3,-4) + (1e-300,-1e-300) | (5.1,34.5) | (-5.1,-34.5) + (1e-300,-1e-300) | (-5,-12) | (5,12) + (1e-300,-1e-300) | (1e-300,-1e-300) | (0,0) + (1e-300,-1e-300) | (1e+300,Infinity) | (-1e+300,-Infinity) + (1e-300,-1e-300) | (NaN,NaN) | (NaN,NaN) + (1e-300,-1e-300) | (10,10) | (-10,-10) + (1e+300,Infinity) | (0,0) | (1e+300,Infinity) + (1e+300,Infinity) | (-10,0) | (1e+300,Infinity) + (1e+300,Infinity) | (-3,4) | (1e+300,Infinity) + (1e+300,Infinity) | (5.1,34.5) | (1e+300,Infinity) + (1e+300,Infinity) | (-5,-12) | (1e+300,Infinity) + (1e+300,Infinity) | (1e-300,-1e-300) | (1e+300,Infinity) + (1e+300,Infinity) | (1e+300,Infinity) | (0,NaN) + (1e+300,Infinity) | (NaN,NaN) | (NaN,NaN) + (1e+300,Infinity) | (10,10) | (1e+300,Infinity) + (NaN,NaN) | (0,0) | (NaN,NaN) + (NaN,NaN) | (-10,0) | (NaN,NaN) + (NaN,NaN) | (-3,4) | (NaN,NaN) + (NaN,NaN) | (5.1,34.5) | (NaN,NaN) + (NaN,NaN) | (-5,-12) | (NaN,NaN) + (NaN,NaN) | (1e-300,-1e-300) | (NaN,NaN) + (NaN,NaN) | (1e+300,Infinity) | (NaN,NaN) + (NaN,NaN) | (NaN,NaN) | (NaN,NaN) + (NaN,NaN) | (10,10) | (NaN,NaN) + (10,10) | (0,0) | (10,10) + (10,10) | (-10,0) | (20,10) + (10,10) | (-3,4) | (13,6) + (10,10) | (5.1,34.5) | (4.9,-24.5) + (10,10) | (-5,-12) | (15,22) + (10,10) | (1e-300,-1e-300) | (10,10) + (10,10) | (1e+300,Infinity) | (-1e+300,-Infinity) + (10,10) | (NaN,NaN) | (NaN,NaN) + (10,10) | (10,10) | (0,0) +(81 rows) + +-- Multiply with point +SELECT p1.f1, p2.f1, p1.f1 * p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p1.f1[0] BETWEEN 1 AND 1000; + f1 | f1 | ?column? +------------+-------------------+----------------------- + (5.1,34.5) | (0,0) | (0,0) + (10,10) | (0,0) | (0,0) + (5.1,34.5) | (-10,0) | (-51,-345) + (10,10) | (-10,0) | (-100,-100) + (5.1,34.5) | (-3,4) | (-153.3,-83.1) + (10,10) | (-3,4) | (-70,10) + (5.1,34.5) | (5.1,34.5) | (-1164.24,351.9) + (10,10) | (5.1,34.5) | (-294,396) + (5.1,34.5) | (-5,-12) | (388.5,-233.7) + (10,10) | (-5,-12) | (70,-170) + (5.1,34.5) | (1e-300,-1e-300) | (3.96e-299,2.94e-299) + (10,10) | (1e-300,-1e-300) | (2e-299,0) + (5.1,34.5) | (1e+300,Infinity) | (-Infinity,Infinity) + (10,10) | (1e+300,Infinity) | (-Infinity,Infinity) + (5.1,34.5) | (NaN,NaN) | (NaN,NaN) + (10,10) | (NaN,NaN) | (NaN,NaN) + (5.1,34.5) | (10,10) | (-294,396) + (10,10) | (10,10) | (0,200) +(18 rows) + +-- Underflow error +SELECT p1.f1, p2.f1, p1.f1 * p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p1.f1[0] < 1; +ERROR: value out of range: underflow +-- Divide by point +SELECT p1.f1, p2.f1, p1.f1 / p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p2.f1[0] BETWEEN 1 AND 1000; + f1 | f1 | ?column? +-------------------+------------+------------------------------------------- + (0,0) | (5.1,34.5) | (0,0) + (0,0) | (10,10) | (0,0) + (-10,0) | (5.1,34.5) | (-0.0419318237877,0.283656455034) + (-10,0) | (10,10) | (-0.5,0.5) + (-3,4) | (5.1,34.5) | (0.100883034877,0.101869666025) + (-3,4) | (10,10) | (0.05,0.35) + (5.1,34.5) | (5.1,34.5) | (1,0) + (5.1,34.5) | (10,10) | (1.98,1.47) + (-5,-12) | (5.1,34.5) | (-0.361353657935,0.0915100389719) + (-5,-12) | (10,10) | (-0.85,-0.35) + (1e-300,-1e-300) | (5.1,34.5) | (-2.41724631247e-302,-3.25588278822e-302) + (1e-300,-1e-300) | (10,10) | (0,-1e-301) + (1e+300,Infinity) | (5.1,34.5) | (Infinity,Infinity) + (1e+300,Infinity) | (10,10) | (Infinity,Infinity) + (NaN,NaN) | (5.1,34.5) | (NaN,NaN) + (NaN,NaN) | (10,10) | (NaN,NaN) + (10,10) | (5.1,34.5) | (0.325588278822,-0.241724631247) + (10,10) | (10,10) | (1,0) +(18 rows) + +-- Overflow error +SELECT p1.f1, p2.f1, p1.f1 / p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p2.f1[0] > 1000; +ERROR: value out of range: overflow +-- Division by 0 error +SELECT p1.f1, p2.f1, p1.f1 / p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p2.f1 ~= '(0,0)'::point; +ERROR: division by zero +-- Distance to line +SELECT p.f1, l.s, p.f1 <-> l.s FROM POINT_TBL p, LINE_TBL l; + f1 | s | ?column? +-------------------+---------------------------------------+-------------------- + (0,0) | {0,-1,5} | 5 + (0,0) | {1,0,5} | 5 + (0,0) | {0,3,0} | 0 + (0,0) | {1,-1,0} | 0 + (0,0) | {-0.4,-1,-6} | 5.57086014531 + (0,0) | {-0.000184615384615,-1,15.3846153846} | 15.3846151224 + (0,0) | {3,NaN,5} | NaN + (0,0) | {NaN,NaN,NaN} | NaN + (0,0) | {0,-1,3} | 3 + (0,0) | {-1,0,3} | 3 + (-10,0) | {0,-1,5} | 5 + (-10,0) | {1,0,5} | 5 + (-10,0) | {0,3,0} | 0 + (-10,0) | {1,-1,0} | 7.07106781187 + (-10,0) | {-0.4,-1,-6} | 1.85695338177 + (-10,0) | {-0.000184615384615,-1,15.3846153846} | 15.3864612763 + (-10,0) | {3,NaN,5} | NaN + (-10,0) | {NaN,NaN,NaN} | NaN + (-10,0) | {0,-1,3} | 3 + (-10,0) | {-1,0,3} | 13 + (-3,4) | {0,-1,5} | 1 + (-3,4) | {1,0,5} | 2 + (-3,4) | {0,3,0} | 4 + (-3,4) | {1,-1,0} | 4.94974746831 + (-3,4) | {-0.4,-1,-6} | 8.17059487979 + (-3,4) | {-0.000184615384615,-1,15.3846153846} | 11.3851690368 + (-3,4) | {3,NaN,5} | NaN + (-3,4) | {NaN,NaN,NaN} | NaN + (-3,4) | {0,-1,3} | 1 + (-3,4) | {-1,0,3} | 6 + (5.1,34.5) | {0,-1,5} | 29.5 + (5.1,34.5) | {1,0,5} | 10.1 + (5.1,34.5) | {0,3,0} | 34.5 + (5.1,34.5) | {1,-1,0} | 20.7889393669 + (5.1,34.5) | {-0.4,-1,-6} | 39.4973984303 + (5.1,34.5) | {-0.000184615384615,-1,15.3846153846} | 19.1163258281 + (5.1,34.5) | {3,NaN,5} | NaN + (5.1,34.5) | {NaN,NaN,NaN} | NaN + (5.1,34.5) | {0,-1,3} | 31.5 + (5.1,34.5) | {-1,0,3} | 2.1 + (-5,-12) | {0,-1,5} | 17 + (-5,-12) | {1,0,5} | 0 + (-5,-12) | {0,3,0} | 12 + (-5,-12) | {1,-1,0} | 4.94974746831 + (-5,-12) | {-0.4,-1,-6} | 7.42781352708 + (-5,-12) | {-0.000184615384615,-1,15.3846153846} | 27.3855379948 + (-5,-12) | {3,NaN,5} | NaN + (-5,-12) | {NaN,NaN,NaN} | NaN + (-5,-12) | {0,-1,3} | 15 + (-5,-12) | {-1,0,3} | 8 + (1e-300,-1e-300) | {0,-1,5} | 5 + (1e-300,-1e-300) | {1,0,5} | 5 + (1e-300,-1e-300) | {0,3,0} | 1e-300 + (1e-300,-1e-300) | {1,-1,0} | 1.41421356237e-300 + (1e-300,-1e-300) | {-0.4,-1,-6} | 5.57086014531 + (1e-300,-1e-300) | {-0.000184615384615,-1,15.3846153846} | 15.3846151224 + (1e-300,-1e-300) | {3,NaN,5} | NaN + (1e-300,-1e-300) | {NaN,NaN,NaN} | NaN + (1e-300,-1e-300) | {0,-1,3} | 3 + (1e-300,-1e-300) | {-1,0,3} | 3 + (1e+300,Infinity) | {0,-1,5} | Infinity + (1e+300,Infinity) | {1,0,5} | NaN + (1e+300,Infinity) | {0,3,0} | Infinity + (1e+300,Infinity) | {1,-1,0} | Infinity + (1e+300,Infinity) | {-0.4,-1,-6} | Infinity + (1e+300,Infinity) | {-0.000184615384615,-1,15.3846153846} | Infinity + (1e+300,Infinity) | {3,NaN,5} | NaN + (1e+300,Infinity) | {NaN,NaN,NaN} | NaN + (1e+300,Infinity) | {0,-1,3} | Infinity + (1e+300,Infinity) | {-1,0,3} | NaN + (NaN,NaN) | {0,-1,5} | NaN + (NaN,NaN) | {1,0,5} | NaN + (NaN,NaN) | {0,3,0} | NaN + (NaN,NaN) | {1,-1,0} | NaN + (NaN,NaN) | {-0.4,-1,-6} | NaN + (NaN,NaN) | {-0.000184615384615,-1,15.3846153846} | NaN + (NaN,NaN) | {3,NaN,5} | NaN + (NaN,NaN) | {NaN,NaN,NaN} | NaN + (NaN,NaN) | {0,-1,3} | NaN + (NaN,NaN) | {-1,0,3} | NaN + (10,10) | {0,-1,5} | 5 + (10,10) | {1,0,5} | 15 + (10,10) | {0,3,0} | 10 + (10,10) | {1,-1,0} | 0 + (10,10) | {-0.4,-1,-6} | 18.5695338177 + (10,10) | {-0.000184615384615,-1,15.3846153846} | 5.38276913903 + (10,10) | {3,NaN,5} | NaN + (10,10) | {NaN,NaN,NaN} | NaN + (10,10) | {0,-1,3} | 7 + (10,10) | {-1,0,3} | 7 +(90 rows) + +-- Distance to line segment +SELECT p.f1, l.s, p.f1 <-> l.s FROM POINT_TBL p, LSEG_TBL l; + f1 | s | ?column? +-------------------+-------------------------------+-------------------- + (0,0) | [(1,2),(3,4)] | 2.2360679775 + (0,0) | [(0,0),(6,6)] | 0 + (0,0) | [(10,-10),(-3,-4)] | 4.88901207039 + (0,0) | [(-1000000,200),(300000,-40)] | 15.3846151224 + (0,0) | [(11,22),(33,44)] | 24.5967477525 + (0,0) | [(-10,2),(-10,3)] | 10.1980390272 + (0,0) | [(0,-20),(30,-20)] | 20 + (0,0) | [(NaN,1),(NaN,90)] | NaN + (-10,0) | [(1,2),(3,4)] | 11.1803398875 + (-10,0) | [(0,0),(6,6)] | 10 + (-10,0) | [(10,-10),(-3,-4)] | 8.0622577483 + (-10,0) | [(-1000000,200),(300000,-40)] | 15.3864612763 + (-10,0) | [(11,22),(33,44)] | 30.4138126515 + (-10,0) | [(-10,2),(-10,3)] | 2 + (-10,0) | [(0,-20),(30,-20)] | 22.360679775 + (-10,0) | [(NaN,1),(NaN,90)] | NaN + (-3,4) | [(1,2),(3,4)] | 4.472135955 + (-3,4) | [(0,0),(6,6)] | 4.94974746831 + (-3,4) | [(10,-10),(-3,-4)] | 8 + (-3,4) | [(-1000000,200),(300000,-40)] | 11.3851690367 + (-3,4) | [(11,22),(33,44)] | 22.803508502 + (-3,4) | [(-10,2),(-10,3)] | 7.07106781187 + (-3,4) | [(0,-20),(30,-20)] | 24.1867732449 + (-3,4) | [(NaN,1),(NaN,90)] | NaN + (5.1,34.5) | [(1,2),(3,4)] | 30.5722096028 + (5.1,34.5) | [(0,0),(6,6)] | 28.5142069853 + (5.1,34.5) | [(10,-10),(-3,-4)] | 39.3428519556 + (5.1,34.5) | [(-1000000,200),(300000,-40)] | 19.1163258281 + (5.1,34.5) | [(11,22),(33,44)] | 13.0107647738 + (5.1,34.5) | [(-10,2),(-10,3)] | 34.932220084 + (5.1,34.5) | [(0,-20),(30,-20)] | 54.5 + (5.1,34.5) | [(NaN,1),(NaN,90)] | NaN + (-5,-12) | [(1,2),(3,4)] | 15.2315462117 + (-5,-12) | [(0,0),(6,6)] | 13 + (-5,-12) | [(10,-10),(-3,-4)] | 8.10179143093 + (-5,-12) | [(-1000000,200),(300000,-40)] | 27.3855379949 + (-5,-12) | [(11,22),(33,44)] | 37.5765884561 + (-5,-12) | [(-10,2),(-10,3)] | 14.8660687473 + (-5,-12) | [(0,-20),(30,-20)] | 9.43398113206 + (-5,-12) | [(NaN,1),(NaN,90)] | NaN + (1e-300,-1e-300) | [(1,2),(3,4)] | 2.2360679775 + (1e-300,-1e-300) | [(0,0),(6,6)] | 1.41421356237e-300 + (1e-300,-1e-300) | [(10,-10),(-3,-4)] | 4.88901207039 + (1e-300,-1e-300) | [(-1000000,200),(300000,-40)] | 15.3846151224 + (1e-300,-1e-300) | [(11,22),(33,44)] | 24.5967477525 + (1e-300,-1e-300) | [(-10,2),(-10,3)] | 10.1980390272 + (1e-300,-1e-300) | [(0,-20),(30,-20)] | 20 + (1e-300,-1e-300) | [(NaN,1),(NaN,90)] | NaN + (1e+300,Infinity) | [(1,2),(3,4)] | Infinity + (1e+300,Infinity) | [(0,0),(6,6)] | Infinity + (1e+300,Infinity) | [(10,-10),(-3,-4)] | Infinity + (1e+300,Infinity) | [(-1000000,200),(300000,-40)] | Infinity + (1e+300,Infinity) | [(11,22),(33,44)] | Infinity + (1e+300,Infinity) | [(-10,2),(-10,3)] | Infinity + (1e+300,Infinity) | [(0,-20),(30,-20)] | Infinity + (1e+300,Infinity) | [(NaN,1),(NaN,90)] | Infinity + (NaN,NaN) | [(1,2),(3,4)] | NaN + (NaN,NaN) | [(0,0),(6,6)] | NaN + (NaN,NaN) | [(10,-10),(-3,-4)] | NaN + (NaN,NaN) | [(-1000000,200),(300000,-40)] | NaN + (NaN,NaN) | [(11,22),(33,44)] | NaN + (NaN,NaN) | [(-10,2),(-10,3)] | NaN + (NaN,NaN) | [(0,-20),(30,-20)] | NaN + (NaN,NaN) | [(NaN,1),(NaN,90)] | NaN + (10,10) | [(1,2),(3,4)] | 9.21954445729 + (10,10) | [(0,0),(6,6)] | 5.65685424949 + (10,10) | [(10,-10),(-3,-4)] | 18.15918769 + (10,10) | [(-1000000,200),(300000,-40)] | 5.38276913904 + (10,10) | [(11,22),(33,44)] | 12.0415945788 + (10,10) | [(-10,2),(-10,3)] | 21.1896201004 + (10,10) | [(0,-20),(30,-20)] | 30 + (10,10) | [(NaN,1),(NaN,90)] | NaN +(72 rows) + +-- Distance to box +SELECT p.f1, b.f1, p.f1 <-> b.f1 FROM POINT_TBL p, BOX_TBL b; + f1 | f1 | ?column? +-------------------+---------------------+-------------------- + (0,0) | (2,2),(0,0) | 0 + (0,0) | (3,3),(1,1) | 1.41421356237 + (0,0) | (-2,2),(-8,-10) | 2 + (0,0) | (2.5,3.5),(2.5,2.5) | 3.53553390593 + (0,0) | (3,3),(3,3) | 4.24264068712 + (-10,0) | (2,2),(0,0) | 10 + (-10,0) | (3,3),(1,1) | 11.0453610172 + (-10,0) | (-2,2),(-8,-10) | 2 + (-10,0) | (2.5,3.5),(2.5,2.5) | 12.747548784 + (-10,0) | (3,3),(3,3) | 13.3416640641 + (-3,4) | (2,2),(0,0) | 3.60555127546 + (-3,4) | (3,3),(1,1) | 4.12310562562 + (-3,4) | (-2,2),(-8,-10) | 2 + (-3,4) | (2.5,3.5),(2.5,2.5) | 5.52268050859 + (-3,4) | (3,3),(3,3) | 6.0827625303 + (5.1,34.5) | (2,2),(0,0) | 32.6475113906 + (5.1,34.5) | (3,3),(1,1) | 31.5699223946 + (5.1,34.5) | (-2,2),(-8,-10) | 33.2664996656 + (5.1,34.5) | (2.5,3.5),(2.5,2.5) | 31.108841187 + (5.1,34.5) | (3,3),(3,3) | 31.5699223946 + (-5,-12) | (2,2),(0,0) | 13 + (-5,-12) | (3,3),(1,1) | 14.3178210633 + (-5,-12) | (-2,2),(-8,-10) | 2 + (-5,-12) | (2.5,3.5),(2.5,2.5) | 16.3248277173 + (-5,-12) | (3,3),(3,3) | 17 + (1e-300,-1e-300) | (2,2),(0,0) | 1.41421356237e-300 + (1e-300,-1e-300) | (3,3),(1,1) | 1.41421356237 + (1e-300,-1e-300) | (-2,2),(-8,-10) | 2 + (1e-300,-1e-300) | (2.5,3.5),(2.5,2.5) | 3.53553390593 + (1e-300,-1e-300) | (3,3),(3,3) | 4.24264068712 + (1e+300,Infinity) | (2,2),(0,0) | Infinity + (1e+300,Infinity) | (3,3),(1,1) | Infinity + (1e+300,Infinity) | (-2,2),(-8,-10) | Infinity + (1e+300,Infinity) | (2.5,3.5),(2.5,2.5) | Infinity + (1e+300,Infinity) | (3,3),(3,3) | Infinity + (NaN,NaN) | (2,2),(0,0) | NaN + (NaN,NaN) | (3,3),(1,1) | NaN + (NaN,NaN) | (-2,2),(-8,-10) | NaN + (NaN,NaN) | (2.5,3.5),(2.5,2.5) | NaN + (NaN,NaN) | (3,3),(3,3) | NaN + (10,10) | (2,2),(0,0) | 11.313708499 + (10,10) | (3,3),(1,1) | 9.89949493661 + (10,10) | (-2,2),(-8,-10) | 14.4222051019 + (10,10) | (2.5,3.5),(2.5,2.5) | 9.92471662064 + (10,10) | (3,3),(3,3) | 9.89949493661 +(45 rows) + +-- Distance to path +SELECT p.f1, p1.f1, p.f1 <-> p1.f1 FROM POINT_TBL p, PATH_TBL p1; + f1 | f1 | ?column? +-------------------+---------------------------+-------------------- + (0,0) | [(1,2),(3,4)] | 2.2360679775 + (0,0) | ((1,2),(3,4)) | 2.2360679775 + (0,0) | [(0,0),(3,0),(4,5),(1,6)] | 0 + (0,0) | ((1,2),(3,4)) | 2.2360679775 + (0,0) | ((1,2),(3,4)) | 2.2360679775 + (0,0) | [(1,2),(3,4)] | 2.2360679775 + (0,0) | ((10,20)) | 22.360679775 + (0,0) | [(11,12),(13,14)] | 16.2788205961 + (0,0) | ((11,12),(13,14)) | 16.2788205961 + (-10,0) | [(1,2),(3,4)] | 11.1803398875 + (-10,0) | ((1,2),(3,4)) | 11.1803398875 + (-10,0) | [(0,0),(3,0),(4,5),(1,6)] | 10 + (-10,0) | ((1,2),(3,4)) | 11.1803398875 + (-10,0) | ((1,2),(3,4)) | 11.1803398875 + (-10,0) | [(1,2),(3,4)] | 11.1803398875 + (-10,0) | ((10,20)) | 28.2842712475 + (-10,0) | [(11,12),(13,14)] | 24.1867732449 + (-10,0) | ((11,12),(13,14)) | 24.1867732449 + (-3,4) | [(1,2),(3,4)] | 4.472135955 + (-3,4) | ((1,2),(3,4)) | 4.472135955 + (-3,4) | [(0,0),(3,0),(4,5),(1,6)] | 4.472135955 + (-3,4) | ((1,2),(3,4)) | 4.472135955 + (-3,4) | ((1,2),(3,4)) | 4.472135955 + (-3,4) | [(1,2),(3,4)] | 4.472135955 + (-3,4) | ((10,20)) | 20.6155281281 + (-3,4) | [(11,12),(13,14)] | 16.1245154966 + (-3,4) | ((11,12),(13,14)) | 16.1245154966 + (5.1,34.5) | [(1,2),(3,4)] | 30.5722096028 + (5.1,34.5) | ((1,2),(3,4)) | 30.5722096028 + (5.1,34.5) | [(0,0),(3,0),(4,5),(1,6)] | 28.793402022 + (5.1,34.5) | ((1,2),(3,4)) | 30.5722096028 + (5.1,34.5) | ((1,2),(3,4)) | 30.5722096028 + (5.1,34.5) | [(1,2),(3,4)] | 30.5722096028 + (5.1,34.5) | ((10,20)) | 15.3055545473 + (5.1,34.5) | [(11,12),(13,14)] | 21.9695243462 + (5.1,34.5) | ((11,12),(13,14)) | 21.9695243462 + (-5,-12) | [(1,2),(3,4)] | 15.2315462117 + (-5,-12) | ((1,2),(3,4)) | 15.2315462117 + (-5,-12) | [(0,0),(3,0),(4,5),(1,6)] | 13 + (-5,-12) | ((1,2),(3,4)) | 15.2315462117 + (-5,-12) | ((1,2),(3,4)) | 15.2315462117 + (-5,-12) | [(1,2),(3,4)] | 15.2315462117 + (-5,-12) | ((10,20)) | 35.3411940941 + (-5,-12) | [(11,12),(13,14)] | 28.8444102037 + (-5,-12) | ((11,12),(13,14)) | 28.8444102037 + (1e-300,-1e-300) | [(1,2),(3,4)] | 2.2360679775 + (1e-300,-1e-300) | ((1,2),(3,4)) | 2.2360679775 + (1e-300,-1e-300) | [(0,0),(3,0),(4,5),(1,6)] | 1.41421356237e-300 + (1e-300,-1e-300) | ((1,2),(3,4)) | 2.2360679775 + (1e-300,-1e-300) | ((1,2),(3,4)) | 2.2360679775 + (1e-300,-1e-300) | [(1,2),(3,4)] | 2.2360679775 + (1e-300,-1e-300) | ((10,20)) | 22.360679775 + (1e-300,-1e-300) | [(11,12),(13,14)] | 16.2788205961 + (1e-300,-1e-300) | ((11,12),(13,14)) | 16.2788205961 + (1e+300,Infinity) | [(1,2),(3,4)] | Infinity + (1e+300,Infinity) | ((1,2),(3,4)) | Infinity + (1e+300,Infinity) | [(0,0),(3,0),(4,5),(1,6)] | Infinity + (1e+300,Infinity) | ((1,2),(3,4)) | Infinity + (1e+300,Infinity) | ((1,2),(3,4)) | Infinity + (1e+300,Infinity) | [(1,2),(3,4)] | Infinity + (1e+300,Infinity) | ((10,20)) | Infinity + (1e+300,Infinity) | [(11,12),(13,14)] | Infinity + (1e+300,Infinity) | ((11,12),(13,14)) | Infinity + (NaN,NaN) | [(1,2),(3,4)] | NaN + (NaN,NaN) | ((1,2),(3,4)) | NaN + (NaN,NaN) | [(0,0),(3,0),(4,5),(1,6)] | NaN + (NaN,NaN) | ((1,2),(3,4)) | NaN + (NaN,NaN) | ((1,2),(3,4)) | NaN + (NaN,NaN) | [(1,2),(3,4)] | NaN + (NaN,NaN) | ((10,20)) | NaN + (NaN,NaN) | [(11,12),(13,14)] | NaN + (NaN,NaN) | ((11,12),(13,14)) | NaN + (10,10) | [(1,2),(3,4)] | 9.21954445729 + (10,10) | ((1,2),(3,4)) | 9.21954445729 + (10,10) | [(0,0),(3,0),(4,5),(1,6)] | 7.81024967591 + (10,10) | ((1,2),(3,4)) | 9.21954445729 + (10,10) | ((1,2),(3,4)) | 9.21954445729 + (10,10) | [(1,2),(3,4)] | 9.21954445729 + (10,10) | ((10,20)) | 10 + (10,10) | [(11,12),(13,14)] | 2.2360679775 + (10,10) | ((11,12),(13,14)) | 2.2360679775 +(81 rows) + +-- Distance to polygon +SELECT p.f1, p1.f1, p.f1 <-> p1.f1 FROM POINT_TBL p, POLYGON_TBL p1; + f1 | f1 | ?column? +-------------------+----------------------------+--------------- + (0,0) | ((2,0),(2,4),(0,0)) | 0 + (0,0) | ((3,1),(3,3),(1,0)) | 1 + (0,0) | ((1,2),(3,4),(5,6),(7,8)) | 2.2360679775 + (0,0) | ((7,8),(5,6),(3,4),(1,2)) | 2.2360679775 + (0,0) | ((1,2),(7,8),(5,6),(3,-4)) | 1.58113883008 + (0,0) | ((0,0)) | 0 + (0,0) | ((0,1),(0,1)) | 1 + (-10,0) | ((2,0),(2,4),(0,0)) | 10 + (-10,0) | ((3,1),(3,3),(1,0)) | 11 + (-10,0) | ((1,2),(3,4),(5,6),(7,8)) | 11.1803398875 + (-10,0) | ((7,8),(5,6),(3,4),(1,2)) | 11.1803398875 + (-10,0) | ((1,2),(7,8),(5,6),(3,-4)) | 11.1803398875 + (-10,0) | ((0,0)) | 10 + (-10,0) | ((0,1),(0,1)) | 10.0498756211 + (-3,4) | ((2,0),(2,4),(0,0)) | 4.472135955 + (-3,4) | ((3,1),(3,3),(1,0)) | 5.54700196225 + (-3,4) | ((1,2),(3,4),(5,6),(7,8)) | 4.472135955 + (-3,4) | ((7,8),(5,6),(3,4),(1,2)) | 4.472135955 + (-3,4) | ((1,2),(7,8),(5,6),(3,-4)) | 4.472135955 + (-3,4) | ((0,0)) | 5 + (-3,4) | ((0,1),(0,1)) | 4.24264068712 + (5.1,34.5) | ((2,0),(2,4),(0,0)) | 30.6571362002 + (5.1,34.5) | ((3,1),(3,3),(1,0)) | 31.5699223946 + (5.1,34.5) | ((1,2),(3,4),(5,6),(7,8)) | 26.5680258958 + (5.1,34.5) | ((7,8),(5,6),(3,4),(1,2)) | 26.5680258958 + (5.1,34.5) | ((1,2),(7,8),(5,6),(3,-4)) | 26.5680258958 + (5.1,34.5) | ((0,0)) | 34.8749193547 + (5.1,34.5) | ((0,1),(0,1)) | 33.8859853037 + (-5,-12) | ((2,0),(2,4),(0,0)) | 13 + (-5,-12) | ((3,1),(3,3),(1,0)) | 13.416407865 + (-5,-12) | ((1,2),(3,4),(5,6),(7,8)) | 15.2315462117 + (-5,-12) | ((7,8),(5,6),(3,4),(1,2)) | 15.2315462117 + (-5,-12) | ((1,2),(7,8),(5,6),(3,-4)) | 11.313708499 + (-5,-12) | ((0,0)) | 13 + (-5,-12) | ((0,1),(0,1)) | 13.9283882772 + (1e-300,-1e-300) | ((2,0),(2,4),(0,0)) | 0 + (1e-300,-1e-300) | ((3,1),(3,3),(1,0)) | 1 + (1e-300,-1e-300) | ((1,2),(3,4),(5,6),(7,8)) | 2.2360679775 + (1e-300,-1e-300) | ((7,8),(5,6),(3,4),(1,2)) | 2.2360679775 + (1e-300,-1e-300) | ((1,2),(7,8),(5,6),(3,-4)) | 1.58113883008 + (1e-300,-1e-300) | ((0,0)) | 0 + (1e-300,-1e-300) | ((0,1),(0,1)) | 1 + (1e+300,Infinity) | ((2,0),(2,4),(0,0)) | Infinity + (1e+300,Infinity) | ((3,1),(3,3),(1,0)) | Infinity + (1e+300,Infinity) | ((1,2),(3,4),(5,6),(7,8)) | Infinity + (1e+300,Infinity) | ((7,8),(5,6),(3,4),(1,2)) | Infinity + (1e+300,Infinity) | ((1,2),(7,8),(5,6),(3,-4)) | Infinity + (1e+300,Infinity) | ((0,0)) | Infinity + (1e+300,Infinity) | ((0,1),(0,1)) | Infinity + (NaN,NaN) | ((2,0),(2,4),(0,0)) | 0 + (NaN,NaN) | ((3,1),(3,3),(1,0)) | 0 + (NaN,NaN) | ((1,2),(3,4),(5,6),(7,8)) | 0 + (NaN,NaN) | ((7,8),(5,6),(3,4),(1,2)) | 0 + (NaN,NaN) | ((1,2),(7,8),(5,6),(3,-4)) | 0 + (NaN,NaN) | ((0,0)) | 0 + (NaN,NaN) | ((0,1),(0,1)) | 0 + (10,10) | ((2,0),(2,4),(0,0)) | 10 + (10,10) | ((3,1),(3,3),(1,0)) | 9.89949493661 + (10,10) | ((1,2),(3,4),(5,6),(7,8)) | 3.60555127546 + (10,10) | ((7,8),(5,6),(3,4),(1,2)) | 3.60555127546 + (10,10) | ((1,2),(7,8),(5,6),(3,-4)) | 3.60555127546 + (10,10) | ((0,0)) | 14.1421356237 + (10,10) | ((0,1),(0,1)) | 13.4536240471 +(63 rows) + +-- Closest point to line +SELECT p.f1, l.s, p.f1 ## l.s FROM POINT_TBL p, LINE_TBL l; + f1 | s | ?column? +-------------------+---------------------------------------+---------------------------------- + (0,0) | {0,-1,5} | (0,5) + (0,0) | {1,0,5} | (-5,0) + (0,0) | {0,3,0} | (0,0) + (0,0) | {1,-1,0} | (0,0) + (0,0) | {-0.4,-1,-6} | (-2.06896551724,-5.1724137931) + (0,0) | {-0.000184615384615,-1,15.3846153846} | (0.00284023658959,15.3846148603) + (0,0) | {3,NaN,5} | + (0,0) | {NaN,NaN,NaN} | + (0,0) | {0,-1,3} | (0,3) + (0,0) | {-1,0,3} | (3,0) + (-10,0) | {0,-1,5} | (-10,5) + (-10,0) | {1,0,5} | (-5,0) + (-10,0) | {0,3,0} | (-10,0) + (-10,0) | {1,-1,0} | (-5,-5) + (-10,0) | {-0.4,-1,-6} | (-10.6896551724,-1.72413793103) + (-10,0) | {-0.000184615384615,-1,15.3846153846} | (-9.99715942258,15.386461014) + (-10,0) | {3,NaN,5} | + (-10,0) | {NaN,NaN,NaN} | + (-10,0) | {0,-1,3} | (-10,3) + (-10,0) | {-1,0,3} | (3,0) + (-3,4) | {0,-1,5} | (-3,5) + (-3,4) | {1,0,5} | (-5,4) + (-3,4) | {0,3,0} | (-3,0) + (-3,4) | {1,-1,0} | (0.5,0.5) + (-3,4) | {-0.4,-1,-6} | (-6.03448275862,-3.58620689655) + (-3,4) | {-0.000184615384615,-1,15.3846153846} | (-2.99789812268,15.3851688427) + (-3,4) | {3,NaN,5} | + (-3,4) | {NaN,NaN,NaN} | + (-3,4) | {0,-1,3} | (-3,3) + (-3,4) | {-1,0,3} | (3,4) + (5.1,34.5) | {0,-1,5} | (5.1,5) + (5.1,34.5) | {1,0,5} | (-5,34.5) + (5.1,34.5) | {0,3,0} | (5.1,0) + (5.1,34.5) | {1,-1,0} | (19.8,19.8) + (5.1,34.5) | {-0.4,-1,-6} | (-9.56896551724,-2.1724137931) + (5.1,34.5) | {-0.000184615384615,-1,15.3846153846} | (5.09647083221,15.3836744977) + (5.1,34.5) | {3,NaN,5} | + (5.1,34.5) | {NaN,NaN,NaN} | + (5.1,34.5) | {0,-1,3} | (5.1,3) + (5.1,34.5) | {-1,0,3} | (3,34.5) + (-5,-12) | {0,-1,5} | (-5,5) + (-5,-12) | {1,0,5} | (-5,-12) + (-5,-12) | {0,3,0} | (-5,0) + (-5,-12) | {1,-1,0} | (-8.5,-8.5) + (-5,-12) | {-0.4,-1,-6} | (-2.24137931034,-5.10344827586) + (-5,-12) | {-0.000184615384615,-1,15.3846153846} | (-4.99494420846,15.3855375282) + (-5,-12) | {3,NaN,5} | + (-5,-12) | {NaN,NaN,NaN} | + (-5,-12) | {0,-1,3} | (-5,3) + (-5,-12) | {-1,0,3} | (3,-12) + (1e-300,-1e-300) | {0,-1,5} | (1e-300,5) + (1e-300,-1e-300) | {1,0,5} | (-5,-1e-300) + (1e-300,-1e-300) | {0,3,0} | (1e-300,0) + (1e-300,-1e-300) | {1,-1,0} | (0,0) + (1e-300,-1e-300) | {-0.4,-1,-6} | (-2.06896551724,-5.1724137931) + (1e-300,-1e-300) | {-0.000184615384615,-1,15.3846153846} | (0.00284023658959,15.3846148603) + (1e-300,-1e-300) | {3,NaN,5} | + (1e-300,-1e-300) | {NaN,NaN,NaN} | + (1e-300,-1e-300) | {0,-1,3} | (1e-300,3) + (1e-300,-1e-300) | {-1,0,3} | (3,-1e-300) + (1e+300,Infinity) | {0,-1,5} | (1e+300,5) + (1e+300,Infinity) | {1,0,5} | + (1e+300,Infinity) | {0,3,0} | (1e+300,0) + (1e+300,Infinity) | {1,-1,0} | (Infinity,NaN) + (1e+300,Infinity) | {-0.4,-1,-6} | (-Infinity,NaN) + (1e+300,Infinity) | {-0.000184615384615,-1,15.3846153846} | (-Infinity,NaN) + (1e+300,Infinity) | {3,NaN,5} | + (1e+300,Infinity) | {NaN,NaN,NaN} | + (1e+300,Infinity) | {0,-1,3} | (1e+300,3) + (1e+300,Infinity) | {-1,0,3} | + (NaN,NaN) | {0,-1,5} | + (NaN,NaN) | {1,0,5} | + (NaN,NaN) | {0,3,0} | + (NaN,NaN) | {1,-1,0} | + (NaN,NaN) | {-0.4,-1,-6} | + (NaN,NaN) | {-0.000184615384615,-1,15.3846153846} | + (NaN,NaN) | {3,NaN,5} | + (NaN,NaN) | {NaN,NaN,NaN} | + (NaN,NaN) | {0,-1,3} | + (NaN,NaN) | {-1,0,3} | + (10,10) | {0,-1,5} | (10,5) + (10,10) | {1,0,5} | (-5,10) + (10,10) | {0,3,0} | (10,0) + (10,10) | {1,-1,0} | (10,10) + (10,10) | {-0.4,-1,-6} | (3.10344827586,-7.24137931034) + (10,10) | {-0.000184615384615,-1,15.3846153846} | (10.000993742,15.3827690473) + (10,10) | {3,NaN,5} | + (10,10) | {NaN,NaN,NaN} | + (10,10) | {0,-1,3} | (10,3) + (10,10) | {-1,0,3} | (3,10) +(90 rows) + +-- Closest point to line segment +SELECT p.f1, l.s, p.f1 ## l.s FROM POINT_TBL p, LSEG_TBL l; + f1 | s | ?column? +-------------------+-------------------------------+---------------------------------- + (0,0) | [(1,2),(3,4)] | (1,2) + (0,0) | [(0,0),(6,6)] | (0,0) + (0,0) | [(10,-10),(-3,-4)] | (-2.0487804878,-4.43902439024) + (0,0) | [(-1000000,200),(300000,-40)] | (0.00284023658959,15.3846148603) + (0,0) | [(11,22),(33,44)] | (11,22) + (0,0) | [(-10,2),(-10,3)] | (-10,2) + (0,0) | [(0,-20),(30,-20)] | (0,-20) + (0,0) | [(NaN,1),(NaN,90)] | + (-10,0) | [(1,2),(3,4)] | (1,2) + (-10,0) | [(0,0),(6,6)] | (0,0) + (-10,0) | [(10,-10),(-3,-4)] | (-3,-4) + (-10,0) | [(-1000000,200),(300000,-40)] | (-9.99715942258,15.386461014) + (-10,0) | [(11,22),(33,44)] | (11,22) + (-10,0) | [(-10,2),(-10,3)] | (-10,2) + (-10,0) | [(0,-20),(30,-20)] | (0,-20) + (-10,0) | [(NaN,1),(NaN,90)] | + (-3,4) | [(1,2),(3,4)] | (1,2) + (-3,4) | [(0,0),(6,6)] | (0.5,0.5) + (-3,4) | [(10,-10),(-3,-4)] | (-3,-4) + (-3,4) | [(-1000000,200),(300000,-40)] | (-2.99789812268,15.3851688427) + (-3,4) | [(11,22),(33,44)] | (11,22) + (-3,4) | [(-10,2),(-10,3)] | (-10,3) + (-3,4) | [(0,-20),(30,-20)] | (0,-20) + (-3,4) | [(NaN,1),(NaN,90)] | + (5.1,34.5) | [(1,2),(3,4)] | (3,4) + (5.1,34.5) | [(0,0),(6,6)] | (6,6) + (5.1,34.5) | [(10,-10),(-3,-4)] | (-3,-4) + (5.1,34.5) | [(-1000000,200),(300000,-40)] | (5.09647083221,15.3836744977) + (5.1,34.5) | [(11,22),(33,44)] | (14.3,25.3) + (5.1,34.5) | [(-10,2),(-10,3)] | (-10,3) + (5.1,34.5) | [(0,-20),(30,-20)] | (5.1,-20) + (5.1,34.5) | [(NaN,1),(NaN,90)] | + (-5,-12) | [(1,2),(3,4)] | (1,2) + (-5,-12) | [(0,0),(6,6)] | (0,0) + (-5,-12) | [(10,-10),(-3,-4)] | (-1.60487804878,-4.64390243902) + (-5,-12) | [(-1000000,200),(300000,-40)] | (-4.99494420846,15.3855375282) + (-5,-12) | [(11,22),(33,44)] | (11,22) + (-5,-12) | [(-10,2),(-10,3)] | (-10,2) + (-5,-12) | [(0,-20),(30,-20)] | (0,-20) + (-5,-12) | [(NaN,1),(NaN,90)] | + (1e-300,-1e-300) | [(1,2),(3,4)] | (1,2) + (1e-300,-1e-300) | [(0,0),(6,6)] | (0,0) + (1e-300,-1e-300) | [(10,-10),(-3,-4)] | (-2.0487804878,-4.43902439024) + (1e-300,-1e-300) | [(-1000000,200),(300000,-40)] | (0.00284023658959,15.3846148603) + (1e-300,-1e-300) | [(11,22),(33,44)] | (11,22) + (1e-300,-1e-300) | [(-10,2),(-10,3)] | (-10,2) + (1e-300,-1e-300) | [(0,-20),(30,-20)] | (0,-20) + (1e-300,-1e-300) | [(NaN,1),(NaN,90)] | + (1e+300,Infinity) | [(1,2),(3,4)] | (3,4) + (1e+300,Infinity) | [(0,0),(6,6)] | (6,6) + (1e+300,Infinity) | [(10,-10),(-3,-4)] | (-3,-4) + (1e+300,Infinity) | [(-1000000,200),(300000,-40)] | (300000,-40) + (1e+300,Infinity) | [(11,22),(33,44)] | (33,44) + (1e+300,Infinity) | [(-10,2),(-10,3)] | (-10,3) + (1e+300,Infinity) | [(0,-20),(30,-20)] | (30,-20) + (1e+300,Infinity) | [(NaN,1),(NaN,90)] | (NaN,90) + (NaN,NaN) | [(1,2),(3,4)] | + (NaN,NaN) | [(0,0),(6,6)] | + (NaN,NaN) | [(10,-10),(-3,-4)] | + (NaN,NaN) | [(-1000000,200),(300000,-40)] | + (NaN,NaN) | [(11,22),(33,44)] | + (NaN,NaN) | [(-10,2),(-10,3)] | + (NaN,NaN) | [(0,-20),(30,-20)] | + (NaN,NaN) | [(NaN,1),(NaN,90)] | + (10,10) | [(1,2),(3,4)] | (3,4) + (10,10) | [(0,0),(6,6)] | (6,6) + (10,10) | [(10,-10),(-3,-4)] | (2.39024390244,-6.48780487805) + (10,10) | [(-1000000,200),(300000,-40)] | (10.000993742,15.3827690473) + (10,10) | [(11,22),(33,44)] | (11,22) + (10,10) | [(-10,2),(-10,3)] | (-10,3) + (10,10) | [(0,-20),(30,-20)] | (10,-20) + (10,10) | [(NaN,1),(NaN,90)] | +(72 rows) + +-- Closest point to box +SELECT p.f1, b.f1, p.f1 ## b.f1 FROM POINT_TBL p, BOX_TBL b; + f1 | f1 | ?column? +-------------------+---------------------+-------------- + (0,0) | (2,2),(0,0) | (0,0) + (0,0) | (3,3),(1,1) | (1,1) + (0,0) | (-2,2),(-8,-10) | (-2,0) + (0,0) | (2.5,3.5),(2.5,2.5) | (2.5,2.5) + (0,0) | (3,3),(3,3) | (3,3) + (-10,0) | (2,2),(0,0) | (0,0) + (-10,0) | (3,3),(1,1) | (1,1) + (-10,0) | (-2,2),(-8,-10) | (-8,0) + (-10,0) | (2.5,3.5),(2.5,2.5) | (2.5,2.5) + (-10,0) | (3,3),(3,3) | (3,3) + (-3,4) | (2,2),(0,0) | (0,2) + (-3,4) | (3,3),(1,1) | (1,3) + (-3,4) | (-2,2),(-8,-10) | (-3,2) + (-3,4) | (2.5,3.5),(2.5,2.5) | (2.5,3.5) + (-3,4) | (3,3),(3,3) | (3,3) + (5.1,34.5) | (2,2),(0,0) | (2,2) + (5.1,34.5) | (3,3),(1,1) | (3,3) + (5.1,34.5) | (-2,2),(-8,-10) | (-2,2) + (5.1,34.5) | (2.5,3.5),(2.5,2.5) | (2.5,3.5) + (5.1,34.5) | (3,3),(3,3) | (3,3) + (-5,-12) | (2,2),(0,0) | (0,0) + (-5,-12) | (3,3),(1,1) | (1,1) + (-5,-12) | (-2,2),(-8,-10) | (-5,-10) + (-5,-12) | (2.5,3.5),(2.5,2.5) | (2.5,2.5) + (-5,-12) | (3,3),(3,3) | (3,3) + (1e-300,-1e-300) | (2,2),(0,0) | (0,0) + (1e-300,-1e-300) | (3,3),(1,1) | (1,1) + (1e-300,-1e-300) | (-2,2),(-8,-10) | (-2,-1e-300) + (1e-300,-1e-300) | (2.5,3.5),(2.5,2.5) | (2.5,2.5) + (1e-300,-1e-300) | (3,3),(3,3) | (3,3) + (1e+300,Infinity) | (2,2),(0,0) | (0,2) + (1e+300,Infinity) | (3,3),(1,1) | (1,3) + (1e+300,Infinity) | (-2,2),(-8,-10) | (-8,2) + (1e+300,Infinity) | (2.5,3.5),(2.5,2.5) | (2.5,3.5) + (1e+300,Infinity) | (3,3),(3,3) | (3,3) + (NaN,NaN) | (2,2),(0,0) | + (NaN,NaN) | (3,3),(1,1) | + (NaN,NaN) | (-2,2),(-8,-10) | + (NaN,NaN) | (2.5,3.5),(2.5,2.5) | + (NaN,NaN) | (3,3),(3,3) | + (10,10) | (2,2),(0,0) | (2,2) + (10,10) | (3,3),(1,1) | (3,3) + (10,10) | (-2,2),(-8,-10) | (-2,2) + (10,10) | (2.5,3.5),(2.5,2.5) | (2.5,3.5) + (10,10) | (3,3),(3,3) | (3,3) +(45 rows) + +-- On line +SELECT p.f1, l.s FROM POINT_TBL p, LINE_TBL l WHERE p.f1 <@ l.s; + f1 | s +------------------+---------- + (0,0) | {0,3,0} + (0,0) | {1,-1,0} + (-10,0) | {0,3,0} + (-5,-12) | {1,0,5} + (1e-300,-1e-300) | {0,3,0} + (1e-300,-1e-300) | {1,-1,0} + (10,10) | {1,-1,0} +(7 rows) + +-- On line segment +SELECT p.f1, l.s FROM POINT_TBL p, LSEG_TBL l WHERE p.f1 <@ l.s; + f1 | s +------------------+--------------- + (0,0) | [(0,0),(6,6)] + (1e-300,-1e-300) | [(0,0),(6,6)] +(2 rows) + +-- On path +SELECT p.f1, p1.f1 FROM POINT_TBL p, PATH_TBL p1 WHERE p.f1 <@ p1.f1; + f1 | f1 +------------------+--------------------------- + (0,0) | [(0,0),(3,0),(4,5),(1,6)] + (1e-300,-1e-300) | [(0,0),(3,0),(4,5),(1,6)] + (NaN,NaN) | ((1,2),(3,4)) + (NaN,NaN) | ((1,2),(3,4)) + (NaN,NaN) | ((1,2),(3,4)) + (NaN,NaN) | ((10,20)) + (NaN,NaN) | ((11,12),(13,14)) +(7 rows) + +-- +-- Lines +-- +-- Vertical +SELECT s FROM LINE_TBL WHERE ?| s; + s +---------- + {1,0,5} + {-1,0,3} +(2 rows) + +-- Horizontal +SELECT s FROM LINE_TBL WHERE ?- s; + s +---------- + {0,-1,5} + {0,3,0} + {0,-1,3} +(3 rows) + +-- Same as line +SELECT l1.s, l2.s FROM LINE_TBL l1, LINE_TBL l2 WHERE l1.s = l2.s; + s | s +---------------------------------------+--------------------------------------- + {0,-1,5} | {0,-1,5} + {1,0,5} | {1,0,5} + {0,3,0} | {0,3,0} + {1,-1,0} | {1,-1,0} + {-0.4,-1,-6} | {-0.4,-1,-6} + {-0.000184615384615,-1,15.3846153846} | {-0.000184615384615,-1,15.3846153846} + {3,NaN,5} | {3,NaN,5} + {NaN,NaN,NaN} | {NaN,NaN,NaN} + {0,-1,3} | {0,-1,3} + {-1,0,3} | {-1,0,3} +(10 rows) + +-- Parallel to line +SELECT l1.s, l2.s FROM LINE_TBL l1, LINE_TBL l2 WHERE l1.s ?|| l2.s; + s | s +---------------------------------------+--------------------------------------- + {0,-1,5} | {0,-1,5} + {0,-1,5} | {0,3,0} + {0,-1,5} | {0,-1,3} + {1,0,5} | {1,0,5} + {1,0,5} | {-1,0,3} + {0,3,0} | {0,-1,5} + {0,3,0} | {0,3,0} + {0,3,0} | {0,-1,3} + {1,-1,0} | {1,-1,0} + {-0.4,-1,-6} | {-0.4,-1,-6} + {-0.000184615384615,-1,15.3846153846} | {-0.000184615384615,-1,15.3846153846} + {0,-1,3} | {0,-1,5} + {0,-1,3} | {0,3,0} + {0,-1,3} | {0,-1,3} + {-1,0,3} | {1,0,5} + {-1,0,3} | {-1,0,3} +(16 rows) + +-- Perpendicular to line +SELECT l1.s, l2.s FROM LINE_TBL l1, LINE_TBL l2 WHERE l1.s ?-| l2.s; + s | s +----------+---------- + {0,-1,5} | {1,0,5} + {0,-1,5} | {-1,0,3} + {1,0,5} | {0,-1,5} + {1,0,5} | {0,3,0} + {1,0,5} | {0,-1,3} + {0,3,0} | {1,0,5} + {0,3,0} | {-1,0,3} + {0,-1,3} | {1,0,5} + {0,-1,3} | {-1,0,3} + {-1,0,3} | {0,-1,5} + {-1,0,3} | {0,3,0} + {-1,0,3} | {0,-1,3} +(12 rows) + +-- Distance to line +SELECT l1.s, l2.s, l1.s <-> l2.s FROM LINE_TBL l1, LINE_TBL l2; + s | s | ?column? +---------------------------------------+---------------------------------------+---------- + {0,-1,5} | {0,-1,5} | 0 + {0,-1,5} | {1,0,5} | 0 + {0,-1,5} | {0,3,0} | 5 + {0,-1,5} | {1,-1,0} | 0 + {0,-1,5} | {-0.4,-1,-6} | 0 + {0,-1,5} | {-0.000184615384615,-1,15.3846153846} | 0 + {0,-1,5} | {3,NaN,5} | 0 + {0,-1,5} | {NaN,NaN,NaN} | 0 + {0,-1,5} | {0,-1,3} | 2 + {0,-1,5} | {-1,0,3} | 0 + {1,0,5} | {0,-1,5} | 0 + {1,0,5} | {1,0,5} | 0 + {1,0,5} | {0,3,0} | 0 + {1,0,5} | {1,-1,0} | 0 + {1,0,5} | {-0.4,-1,-6} | 0 + {1,0,5} | {-0.000184615384615,-1,15.3846153846} | 0 + {1,0,5} | {3,NaN,5} | 0 + {1,0,5} | {NaN,NaN,NaN} | 0 + {1,0,5} | {0,-1,3} | 0 + {1,0,5} | {-1,0,3} | 8 + {0,3,0} | {0,-1,5} | 5 + {0,3,0} | {1,0,5} | 0 + {0,3,0} | {0,3,0} | 0 + {0,3,0} | {1,-1,0} | 0 + {0,3,0} | {-0.4,-1,-6} | 0 + {0,3,0} | {-0.000184615384615,-1,15.3846153846} | 0 + {0,3,0} | {3,NaN,5} | 0 + {0,3,0} | {NaN,NaN,NaN} | 0 + {0,3,0} | {0,-1,3} | 3 + {0,3,0} | {-1,0,3} | 0 + {1,-1,0} | {0,-1,5} | 0 + {1,-1,0} | {1,0,5} | 0 + {1,-1,0} | {0,3,0} | 0 + {1,-1,0} | {1,-1,0} | 0 + {1,-1,0} | {-0.4,-1,-6} | 0 + {1,-1,0} | {-0.000184615384615,-1,15.3846153846} | 0 + {1,-1,0} | {3,NaN,5} | 0 + {1,-1,0} | {NaN,NaN,NaN} | 0 + {1,-1,0} | {0,-1,3} | 0 + {1,-1,0} | {-1,0,3} | 0 + {-0.4,-1,-6} | {0,-1,5} | 0 + {-0.4,-1,-6} | {1,0,5} | 0 + {-0.4,-1,-6} | {0,3,0} | 0 + {-0.4,-1,-6} | {1,-1,0} | 0 + {-0.4,-1,-6} | {-0.4,-1,-6} | 0 + {-0.4,-1,-6} | {-0.000184615384615,-1,15.3846153846} | 0 + {-0.4,-1,-6} | {3,NaN,5} | 0 + {-0.4,-1,-6} | {NaN,NaN,NaN} | 0 + {-0.4,-1,-6} | {0,-1,3} | 0 + {-0.4,-1,-6} | {-1,0,3} | 0 + {-0.000184615384615,-1,15.3846153846} | {0,-1,5} | 0 + {-0.000184615384615,-1,15.3846153846} | {1,0,5} | 0 + {-0.000184615384615,-1,15.3846153846} | {0,3,0} | 0 + {-0.000184615384615,-1,15.3846153846} | {1,-1,0} | 0 + {-0.000184615384615,-1,15.3846153846} | {-0.4,-1,-6} | 0 + {-0.000184615384615,-1,15.3846153846} | {-0.000184615384615,-1,15.3846153846} | 0 + {-0.000184615384615,-1,15.3846153846} | {3,NaN,5} | 0 + {-0.000184615384615,-1,15.3846153846} | {NaN,NaN,NaN} | 0 + {-0.000184615384615,-1,15.3846153846} | {0,-1,3} | 0 + {-0.000184615384615,-1,15.3846153846} | {-1,0,3} | 0 + {3,NaN,5} | {0,-1,5} | 0 + {3,NaN,5} | {1,0,5} | 0 + {3,NaN,5} | {0,3,0} | 0 + {3,NaN,5} | {1,-1,0} | 0 + {3,NaN,5} | {-0.4,-1,-6} | 0 + {3,NaN,5} | {-0.000184615384615,-1,15.3846153846} | 0 + {3,NaN,5} | {3,NaN,5} | 0 + {3,NaN,5} | {NaN,NaN,NaN} | 0 + {3,NaN,5} | {0,-1,3} | 0 + {3,NaN,5} | {-1,0,3} | 0 + {NaN,NaN,NaN} | {0,-1,5} | 0 + {NaN,NaN,NaN} | {1,0,5} | 0 + {NaN,NaN,NaN} | {0,3,0} | 0 + {NaN,NaN,NaN} | {1,-1,0} | 0 + {NaN,NaN,NaN} | {-0.4,-1,-6} | 0 + {NaN,NaN,NaN} | {-0.000184615384615,-1,15.3846153846} | 0 + {NaN,NaN,NaN} | {3,NaN,5} | 0 + {NaN,NaN,NaN} | {NaN,NaN,NaN} | 0 + {NaN,NaN,NaN} | {0,-1,3} | 0 + {NaN,NaN,NaN} | {-1,0,3} | 0 + {0,-1,3} | {0,-1,5} | 2 + {0,-1,3} | {1,0,5} | 0 + {0,-1,3} | {0,3,0} | 3 + {0,-1,3} | {1,-1,0} | 0 + {0,-1,3} | {-0.4,-1,-6} | 0 + {0,-1,3} | {-0.000184615384615,-1,15.3846153846} | 0 + {0,-1,3} | {3,NaN,5} | 0 + {0,-1,3} | {NaN,NaN,NaN} | 0 + {0,-1,3} | {0,-1,3} | 0 + {0,-1,3} | {-1,0,3} | 0 + {-1,0,3} | {0,-1,5} | 0 + {-1,0,3} | {1,0,5} | 8 + {-1,0,3} | {0,3,0} | 0 + {-1,0,3} | {1,-1,0} | 0 + {-1,0,3} | {-0.4,-1,-6} | 0 + {-1,0,3} | {-0.000184615384615,-1,15.3846153846} | 0 + {-1,0,3} | {3,NaN,5} | 0 + {-1,0,3} | {NaN,NaN,NaN} | 0 + {-1,0,3} | {0,-1,3} | 0 + {-1,0,3} | {-1,0,3} | 0 +(100 rows) + +-- Distance to box +SELECT l.s, b.f1, l.s <-> b.f1 FROM LINE_TBL l, BOX_TBL b; +ERROR: function "dist_lb" not implemented +-- Intersect with line +SELECT l1.s, l2.s FROM LINE_TBL l1, LINE_TBL l2 WHERE l1.s ?# l2.s; + s | s +---------------------------------------+--------------------------------------- + {0,-1,5} | {1,0,5} + {0,-1,5} | {1,-1,0} + {0,-1,5} | {-0.4,-1,-6} + {0,-1,5} | {-0.000184615384615,-1,15.3846153846} + {0,-1,5} | {3,NaN,5} + {0,-1,5} | {NaN,NaN,NaN} + {0,-1,5} | {-1,0,3} + {1,0,5} | {0,-1,5} + {1,0,5} | {0,3,0} + {1,0,5} | {1,-1,0} + {1,0,5} | {-0.4,-1,-6} + {1,0,5} | {-0.000184615384615,-1,15.3846153846} + {1,0,5} | {3,NaN,5} + {1,0,5} | {NaN,NaN,NaN} + {1,0,5} | {0,-1,3} + {0,3,0} | {1,0,5} + {0,3,0} | {1,-1,0} + {0,3,0} | {-0.4,-1,-6} + {0,3,0} | {-0.000184615384615,-1,15.3846153846} + {0,3,0} | {3,NaN,5} + {0,3,0} | {NaN,NaN,NaN} + {0,3,0} | {-1,0,3} + {1,-1,0} | {0,-1,5} + {1,-1,0} | {1,0,5} + {1,-1,0} | {0,3,0} + {1,-1,0} | {-0.4,-1,-6} + {1,-1,0} | {-0.000184615384615,-1,15.3846153846} + {1,-1,0} | {3,NaN,5} + {1,-1,0} | {NaN,NaN,NaN} + {1,-1,0} | {0,-1,3} + {1,-1,0} | {-1,0,3} + {-0.4,-1,-6} | {0,-1,5} + {-0.4,-1,-6} | {1,0,5} + {-0.4,-1,-6} | {0,3,0} + {-0.4,-1,-6} | {1,-1,0} + {-0.4,-1,-6} | {-0.000184615384615,-1,15.3846153846} + {-0.4,-1,-6} | {3,NaN,5} + {-0.4,-1,-6} | {NaN,NaN,NaN} + {-0.4,-1,-6} | {0,-1,3} + {-0.4,-1,-6} | {-1,0,3} + {-0.000184615384615,-1,15.3846153846} | {0,-1,5} + {-0.000184615384615,-1,15.3846153846} | {1,0,5} + {-0.000184615384615,-1,15.3846153846} | {0,3,0} + {-0.000184615384615,-1,15.3846153846} | {1,-1,0} + {-0.000184615384615,-1,15.3846153846} | {-0.4,-1,-6} + {-0.000184615384615,-1,15.3846153846} | {3,NaN,5} + {-0.000184615384615,-1,15.3846153846} | {NaN,NaN,NaN} + {-0.000184615384615,-1,15.3846153846} | {0,-1,3} + {-0.000184615384615,-1,15.3846153846} | {-1,0,3} + {3,NaN,5} | {0,-1,5} + {3,NaN,5} | {1,0,5} + {3,NaN,5} | {0,3,0} + {3,NaN,5} | {1,-1,0} + {3,NaN,5} | {-0.4,-1,-6} + {3,NaN,5} | {-0.000184615384615,-1,15.3846153846} + {3,NaN,5} | {3,NaN,5} + {3,NaN,5} | {NaN,NaN,NaN} + {3,NaN,5} | {0,-1,3} + {3,NaN,5} | {-1,0,3} + {NaN,NaN,NaN} | {0,-1,5} + {NaN,NaN,NaN} | {1,0,5} + {NaN,NaN,NaN} | {0,3,0} + {NaN,NaN,NaN} | {1,-1,0} + {NaN,NaN,NaN} | {-0.4,-1,-6} + {NaN,NaN,NaN} | {-0.000184615384615,-1,15.3846153846} + {NaN,NaN,NaN} | {3,NaN,5} + {NaN,NaN,NaN} | {NaN,NaN,NaN} + {NaN,NaN,NaN} | {0,-1,3} + {NaN,NaN,NaN} | {-1,0,3} + {0,-1,3} | {1,0,5} + {0,-1,3} | {1,-1,0} + {0,-1,3} | {-0.4,-1,-6} + {0,-1,3} | {-0.000184615384615,-1,15.3846153846} + {0,-1,3} | {3,NaN,5} + {0,-1,3} | {NaN,NaN,NaN} + {0,-1,3} | {-1,0,3} + {-1,0,3} | {0,-1,5} + {-1,0,3} | {0,3,0} + {-1,0,3} | {1,-1,0} + {-1,0,3} | {-0.4,-1,-6} + {-1,0,3} | {-0.000184615384615,-1,15.3846153846} + {-1,0,3} | {3,NaN,5} + {-1,0,3} | {NaN,NaN,NaN} + {-1,0,3} | {0,-1,3} +(84 rows) + +-- Intersect with box +SELECT l.s, b.f1 FROM LINE_TBL l, BOX_TBL b WHERE l.s ?# b.f1; + s | f1 +--------------+--------------------- + {1,0,5} | (-2,2),(-8,-10) + {0,3,0} | (2,2),(0,0) + {0,3,0} | (-2,2),(-8,-10) + {1,-1,0} | (2,2),(0,0) + {1,-1,0} | (3,3),(1,1) + {1,-1,0} | (-2,2),(-8,-10) + {1,-1,0} | (2.5,3.5),(2.5,2.5) + {1,-1,0} | (3,3),(3,3) + {-0.4,-1,-6} | (-2,2),(-8,-10) + {0,-1,3} | (3,3),(1,1) + {0,-1,3} | (2.5,3.5),(2.5,2.5) + {0,-1,3} | (3,3),(3,3) + {-1,0,3} | (3,3),(1,1) +(13 rows) + +-- Intersection point with line +SELECT l1.s, l2.s, l1.s # l2.s FROM LINE_TBL l1, LINE_TBL l2; + s | s | ?column? +---------------------------------------+---------------------------------------+----------------------------------- + {0,-1,5} | {0,-1,5} | + {0,-1,5} | {1,0,5} | (-5,5) + {0,-1,5} | {0,3,0} | + {0,-1,5} | {1,-1,0} | (5,5) + {0,-1,5} | {-0.4,-1,-6} | (-27.5,5) + {0,-1,5} | {-0.000184615384615,-1,15.3846153846} | (56250,5) + {0,-1,5} | {3,NaN,5} | (NaN,NaN) + {0,-1,5} | {NaN,NaN,NaN} | (NaN,NaN) + {0,-1,5} | {0,-1,3} | + {0,-1,5} | {-1,0,3} | (3,5) + {1,0,5} | {0,-1,5} | (-5,5) + {1,0,5} | {1,0,5} | + {1,0,5} | {0,3,0} | (-5,0) + {1,0,5} | {1,-1,0} | (-5,-5) + {1,0,5} | {-0.4,-1,-6} | (-5,-4) + {1,0,5} | {-0.000184615384615,-1,15.3846153846} | (-5,15.3855384615) + {1,0,5} | {3,NaN,5} | (NaN,NaN) + {1,0,5} | {NaN,NaN,NaN} | (NaN,NaN) + {1,0,5} | {0,-1,3} | (-5,3) + {1,0,5} | {-1,0,3} | + {0,3,0} | {0,-1,5} | + {0,3,0} | {1,0,5} | (-5,0) + {0,3,0} | {0,3,0} | + {0,3,0} | {1,-1,0} | (0,0) + {0,3,0} | {-0.4,-1,-6} | (-15,0) + {0,3,0} | {-0.000184615384615,-1,15.3846153846} | (83333.3333333,0) + {0,3,0} | {3,NaN,5} | (NaN,NaN) + {0,3,0} | {NaN,NaN,NaN} | (NaN,NaN) + {0,3,0} | {0,-1,3} | + {0,3,0} | {-1,0,3} | (3,0) + {1,-1,0} | {0,-1,5} | (5,5) + {1,-1,0} | {1,0,5} | (-5,-5) + {1,-1,0} | {0,3,0} | (0,0) + {1,-1,0} | {1,-1,0} | + {1,-1,0} | {-0.4,-1,-6} | (-4.28571428571,-4.28571428571) + {1,-1,0} | {-0.000184615384615,-1,15.3846153846} | (15.3817756722,15.3817756722) + {1,-1,0} | {3,NaN,5} | (NaN,NaN) + {1,-1,0} | {NaN,NaN,NaN} | (NaN,NaN) + {1,-1,0} | {0,-1,3} | (3,3) + {1,-1,0} | {-1,0,3} | (3,3) + {-0.4,-1,-6} | {0,-1,5} | (-27.5,5) + {-0.4,-1,-6} | {1,0,5} | (-5,-4) + {-0.4,-1,-6} | {0,3,0} | (-15,0) + {-0.4,-1,-6} | {1,-1,0} | (-4.28571428571,-4.28571428571) + {-0.4,-1,-6} | {-0.4,-1,-6} | + {-0.4,-1,-6} | {-0.000184615384615,-1,15.3846153846} | (-53.4862244113,15.3944897645) + {-0.4,-1,-6} | {3,NaN,5} | (NaN,NaN) + {-0.4,-1,-6} | {NaN,NaN,NaN} | (NaN,NaN) + {-0.4,-1,-6} | {0,-1,3} | (-22.5,3) + {-0.4,-1,-6} | {-1,0,3} | (3,-7.2) + {-0.000184615384615,-1,15.3846153846} | {0,-1,5} | (56250,5) + {-0.000184615384615,-1,15.3846153846} | {1,0,5} | (-5,15.3855384615) + {-0.000184615384615,-1,15.3846153846} | {0,3,0} | (83333.3333333,-1.7763568394e-15) + {-0.000184615384615,-1,15.3846153846} | {1,-1,0} | (15.3817756722,15.3817756722) + {-0.000184615384615,-1,15.3846153846} | {-0.4,-1,-6} | (-53.4862244113,15.3944897645) + {-0.000184615384615,-1,15.3846153846} | {-0.000184615384615,-1,15.3846153846} | + {-0.000184615384615,-1,15.3846153846} | {3,NaN,5} | (NaN,NaN) + {-0.000184615384615,-1,15.3846153846} | {NaN,NaN,NaN} | (NaN,NaN) + {-0.000184615384615,-1,15.3846153846} | {0,-1,3} | (67083.3333333,3) + {-0.000184615384615,-1,15.3846153846} | {-1,0,3} | (3,15.3840615385) + {3,NaN,5} | {0,-1,5} | (NaN,NaN) + {3,NaN,5} | {1,0,5} | (NaN,NaN) + {3,NaN,5} | {0,3,0} | (NaN,NaN) + {3,NaN,5} | {1,-1,0} | (NaN,NaN) + {3,NaN,5} | {-0.4,-1,-6} | (NaN,NaN) + {3,NaN,5} | {-0.000184615384615,-1,15.3846153846} | (NaN,NaN) + {3,NaN,5} | {3,NaN,5} | (NaN,NaN) + {3,NaN,5} | {NaN,NaN,NaN} | (NaN,NaN) + {3,NaN,5} | {0,-1,3} | (NaN,NaN) + {3,NaN,5} | {-1,0,3} | (NaN,NaN) + {NaN,NaN,NaN} | {0,-1,5} | (NaN,NaN) + {NaN,NaN,NaN} | {1,0,5} | (NaN,NaN) + {NaN,NaN,NaN} | {0,3,0} | (NaN,NaN) + {NaN,NaN,NaN} | {1,-1,0} | (NaN,NaN) + {NaN,NaN,NaN} | {-0.4,-1,-6} | (NaN,NaN) + {NaN,NaN,NaN} | {-0.000184615384615,-1,15.3846153846} | (NaN,NaN) + {NaN,NaN,NaN} | {3,NaN,5} | (NaN,NaN) + {NaN,NaN,NaN} | {NaN,NaN,NaN} | (NaN,NaN) + {NaN,NaN,NaN} | {0,-1,3} | (NaN,NaN) + {NaN,NaN,NaN} | {-1,0,3} | (NaN,NaN) + {0,-1,3} | {0,-1,5} | + {0,-1,3} | {1,0,5} | (-5,3) + {0,-1,3} | {0,3,0} | + {0,-1,3} | {1,-1,0} | (3,3) + {0,-1,3} | {-0.4,-1,-6} | (-22.5,3) + {0,-1,3} | {-0.000184615384615,-1,15.3846153846} | (67083.3333333,3) + {0,-1,3} | {3,NaN,5} | (NaN,NaN) + {0,-1,3} | {NaN,NaN,NaN} | (NaN,NaN) + {0,-1,3} | {0,-1,3} | + {0,-1,3} | {-1,0,3} | (3,3) + {-1,0,3} | {0,-1,5} | (3,5) + {-1,0,3} | {1,0,5} | + {-1,0,3} | {0,3,0} | (3,0) + {-1,0,3} | {1,-1,0} | (3,3) + {-1,0,3} | {-0.4,-1,-6} | (3,-7.2) + {-1,0,3} | {-0.000184615384615,-1,15.3846153846} | (3,15.3840615385) + {-1,0,3} | {3,NaN,5} | (NaN,NaN) + {-1,0,3} | {NaN,NaN,NaN} | (NaN,NaN) + {-1,0,3} | {0,-1,3} | (3,3) + {-1,0,3} | {-1,0,3} | +(100 rows) + +-- Closest point to line segment +SELECT l.s, l1.s, l.s ## l1.s FROM LINE_TBL l, LSEG_TBL l1; + s | s | ?column? +---------------------------------------+-------------------------------+----------------------------------- + {0,-1,5} | [(1,2),(3,4)] | (3,4) + {0,-1,5} | [(0,0),(6,6)] | (5,5) + {0,-1,5} | [(10,-10),(-3,-4)] | (-3,-4) + {0,-1,5} | [(-1000000,200),(300000,-40)] | (56250,5) + {0,-1,5} | [(11,22),(33,44)] | (11,22) + {0,-1,5} | [(-10,2),(-10,3)] | (-10,3) + {0,-1,5} | [(0,-20),(30,-20)] | + {0,-1,5} | [(NaN,1),(NaN,90)] | + {1,0,5} | [(1,2),(3,4)] | (1,2) + {1,0,5} | [(0,0),(6,6)] | (0,0) + {1,0,5} | [(10,-10),(-3,-4)] | (-3,-4) + {1,0,5} | [(-1000000,200),(300000,-40)] | (-5,15.3855384615) + {1,0,5} | [(11,22),(33,44)] | (11,22) + {1,0,5} | [(-10,2),(-10,3)] | + {1,0,5} | [(0,-20),(30,-20)] | (0,-20) + {1,0,5} | [(NaN,1),(NaN,90)] | + {0,3,0} | [(1,2),(3,4)] | (1,2) + {0,3,0} | [(0,0),(6,6)] | (0,0) + {0,3,0} | [(10,-10),(-3,-4)] | (-3,-4) + {0,3,0} | [(-1000000,200),(300000,-40)] | (83333.3333333,-1.7763568394e-15) + {0,3,0} | [(11,22),(33,44)] | (11,22) + {0,3,0} | [(-10,2),(-10,3)] | (-10,2) + {0,3,0} | [(0,-20),(30,-20)] | + {0,3,0} | [(NaN,1),(NaN,90)] | + {1,-1,0} | [(1,2),(3,4)] | + {1,-1,0} | [(0,0),(6,6)] | + {1,-1,0} | [(10,-10),(-3,-4)] | (-3,-4) + {1,-1,0} | [(-1000000,200),(300000,-40)] | (15.3817756722,15.3817756722) + {1,-1,0} | [(11,22),(33,44)] | + {1,-1,0} | [(-10,2),(-10,3)] | (-10,2) + {1,-1,0} | [(0,-20),(30,-20)] | (0,-20) + {1,-1,0} | [(NaN,1),(NaN,90)] | + {-0.4,-1,-6} | [(1,2),(3,4)] | (1,2) + {-0.4,-1,-6} | [(0,0),(6,6)] | (0,0) + {-0.4,-1,-6} | [(10,-10),(-3,-4)] | (10,-10) + {-0.4,-1,-6} | [(-1000000,200),(300000,-40)] | (-53.4862244113,15.3944897645) + {-0.4,-1,-6} | [(11,22),(33,44)] | (11,22) + {-0.4,-1,-6} | [(-10,2),(-10,3)] | (-10,2) + {-0.4,-1,-6} | [(0,-20),(30,-20)] | (30,-20) + {-0.4,-1,-6} | [(NaN,1),(NaN,90)] | + {-0.000184615384615,-1,15.3846153846} | [(1,2),(3,4)] | (3,4) + {-0.000184615384615,-1,15.3846153846} | [(0,0),(6,6)] | (6,6) + {-0.000184615384615,-1,15.3846153846} | [(10,-10),(-3,-4)] | (-3,-4) + {-0.000184615384615,-1,15.3846153846} | [(-1000000,200),(300000,-40)] | + {-0.000184615384615,-1,15.3846153846} | [(11,22),(33,44)] | (11,22) + {-0.000184615384615,-1,15.3846153846} | [(-10,2),(-10,3)] | (-10,3) + {-0.000184615384615,-1,15.3846153846} | [(0,-20),(30,-20)] | (30,-20) + {-0.000184615384615,-1,15.3846153846} | [(NaN,1),(NaN,90)] | + {3,NaN,5} | [(1,2),(3,4)] | + {3,NaN,5} | [(0,0),(6,6)] | + {3,NaN,5} | [(10,-10),(-3,-4)] | + {3,NaN,5} | [(-1000000,200),(300000,-40)] | + {3,NaN,5} | [(11,22),(33,44)] | + {3,NaN,5} | [(-10,2),(-10,3)] | + {3,NaN,5} | [(0,-20),(30,-20)] | + {3,NaN,5} | [(NaN,1),(NaN,90)] | + {NaN,NaN,NaN} | [(1,2),(3,4)] | + {NaN,NaN,NaN} | [(0,0),(6,6)] | + {NaN,NaN,NaN} | [(10,-10),(-3,-4)] | + {NaN,NaN,NaN} | [(-1000000,200),(300000,-40)] | + {NaN,NaN,NaN} | [(11,22),(33,44)] | + {NaN,NaN,NaN} | [(-10,2),(-10,3)] | + {NaN,NaN,NaN} | [(0,-20),(30,-20)] | + {NaN,NaN,NaN} | [(NaN,1),(NaN,90)] | + {0,-1,3} | [(1,2),(3,4)] | (2,3) + {0,-1,3} | [(0,0),(6,6)] | (3,3) + {0,-1,3} | [(10,-10),(-3,-4)] | (-3,-4) + {0,-1,3} | [(-1000000,200),(300000,-40)] | (67083.3333333,3) + {0,-1,3} | [(11,22),(33,44)] | (11,22) + {0,-1,3} | [(-10,2),(-10,3)] | (-10,3) + {0,-1,3} | [(0,-20),(30,-20)] | + {0,-1,3} | [(NaN,1),(NaN,90)] | + {-1,0,3} | [(1,2),(3,4)] | (3,4) + {-1,0,3} | [(0,0),(6,6)] | (3,3) + {-1,0,3} | [(10,-10),(-3,-4)] | (3,-6.76923076923) + {-1,0,3} | [(-1000000,200),(300000,-40)] | (3,15.3840615385) + {-1,0,3} | [(11,22),(33,44)] | (11,22) + {-1,0,3} | [(-10,2),(-10,3)] | + {-1,0,3} | [(0,-20),(30,-20)] | (3,-20) + {-1,0,3} | [(NaN,1),(NaN,90)] | +(80 rows) + +-- Closest point to box +SELECT l.s, b.f1, l.s ## b.f1 FROM LINE_TBL l, BOX_TBL b; +ERROR: function "close_lb" not implemented -- -- Line segments -- @@ -107,43 +1565,729 @@ SELECT '' AS count, p.f1, l.s, l.s # p.f1 AS intersection ERROR: operator does not exist: lseg # point LINE 1: SELECT '' AS count, p.f1, l.s, l.s # p.f1 AS intersection ^ -HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. --- closest point -SELECT '' AS thirty, p.f1, l.s, p.f1 ## l.s AS closest - FROM LSEG_TBL l, POINT_TBL p; - thirty | f1 | s | closest ---------+------------+-------------------------------+---------------------------------- - | (0,0) | [(1,2),(3,4)] | (1,2) - | (0,0) | [(0,0),(6,6)] | (-0,0) - | (0,0) | [(10,-10),(-3,-4)] | (-2.0487804878,-4.43902439024) - | (0,0) | [(-1000000,200),(300000,-40)] | (0.00284023658959,15.3846148603) - | (0,0) | [(11,22),(33,44)] | (11,22) - | (-10,0) | [(1,2),(3,4)] | (1,2) - | (-10,0) | [(0,0),(6,6)] | (0,0) - | (-10,0) | [(10,-10),(-3,-4)] | (-3,-4) - | (-10,0) | [(-1000000,200),(300000,-40)] | (-9.99715942258,15.386461014) - | (-10,0) | [(11,22),(33,44)] | (11,22) - | (-3,4) | [(1,2),(3,4)] | (1,2) - | (-3,4) | [(0,0),(6,6)] | (0.5,0.5) - | (-3,4) | [(10,-10),(-3,-4)] | (-3,-4) - | (-3,4) | [(-1000000,200),(300000,-40)] | (-2.99789812268,15.3851688427) - | (-3,4) | [(11,22),(33,44)] | (11,22) - | (5.1,34.5) | [(1,2),(3,4)] | (3,4) - | (5.1,34.5) | [(0,0),(6,6)] | (6,6) - | (5.1,34.5) | [(10,-10),(-3,-4)] | (-3,-4) - | (5.1,34.5) | [(-1000000,200),(300000,-40)] | (5.09647083221,15.3836744977) - | (5.1,34.5) | [(11,22),(33,44)] | (14.3,25.3) - | (-5,-12) | [(1,2),(3,4)] | (1,2) - | (-5,-12) | [(0,0),(6,6)] | (0,0) - | (-5,-12) | [(10,-10),(-3,-4)] | (-1.60487804878,-4.64390243902) - | (-5,-12) | [(-1000000,200),(300000,-40)] | (-4.99494420846,15.3855375282) - | (-5,-12) | [(11,22),(33,44)] | (11,22) - | (10,10) | [(1,2),(3,4)] | (3,4) - | (10,10) | [(0,0),(6,6)] | (6,6) - | (10,10) | [(10,-10),(-3,-4)] | (2.39024390244,-6.48780487805) - | (10,10) | [(-1000000,200),(300000,-40)] | (10.000993742,15.3827690473) - | (10,10) | [(11,22),(33,44)] | (11,22) -(30 rows) +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +-- Length +SELECT s, @-@ s FROM LSEG_TBL; + s | ?column? +-------------------------------+--------------- + [(1,2),(3,4)] | 2.82842712475 + [(0,0),(6,6)] | 8.48528137424 + [(10,-10),(-3,-4)] | 14.3178210633 + [(-1000000,200),(300000,-40)] | 1300000.02215 + [(11,22),(33,44)] | 31.1126983722 + [(-10,2),(-10,3)] | 1 + [(0,-20),(30,-20)] | 30 + [(NaN,1),(NaN,90)] | NaN +(8 rows) + +-- Vertical +SELECT s FROM LSEG_TBL WHERE ?| s; + s +------------------- + [(-10,2),(-10,3)] +(1 row) + +-- Horizontal +SELECT s FROM LSEG_TBL WHERE ?- s; + s +-------------------- + [(0,-20),(30,-20)] +(1 row) + +-- Center +SELECT s, @@ s FROM LSEG_TBL; + s | ?column? +-------------------------------+-------------- + [(1,2),(3,4)] | (2,3) + [(0,0),(6,6)] | (3,3) + [(10,-10),(-3,-4)] | (3.5,-7) + [(-1000000,200),(300000,-40)] | (-350000,80) + [(11,22),(33,44)] | (22,33) + [(-10,2),(-10,3)] | (-10,2.5) + [(0,-20),(30,-20)] | (15,-20) + [(NaN,1),(NaN,90)] | (NaN,45.5) +(8 rows) + +-- To point +SELECT s, s::point FROM LSEG_TBL; + s | s +-------------------------------+-------------- + [(1,2),(3,4)] | (2,3) + [(0,0),(6,6)] | (3,3) + [(10,-10),(-3,-4)] | (3.5,-7) + [(-1000000,200),(300000,-40)] | (-350000,80) + [(11,22),(33,44)] | (22,33) + [(-10,2),(-10,3)] | (-10,2.5) + [(0,-20),(30,-20)] | (15,-20) + [(NaN,1),(NaN,90)] | (NaN,45.5) +(8 rows) + +-- Has points less than line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s < l2.s; + s | s +--------------------+------------------------------- + [(1,2),(3,4)] | [(0,0),(6,6)] + [(1,2),(3,4)] | [(10,-10),(-3,-4)] + [(1,2),(3,4)] | [(-1000000,200),(300000,-40)] + [(1,2),(3,4)] | [(11,22),(33,44)] + [(1,2),(3,4)] | [(0,-20),(30,-20)] + [(0,0),(6,6)] | [(10,-10),(-3,-4)] + [(0,0),(6,6)] | [(-1000000,200),(300000,-40)] + [(0,0),(6,6)] | [(11,22),(33,44)] + [(0,0),(6,6)] | [(0,-20),(30,-20)] + [(10,-10),(-3,-4)] | [(-1000000,200),(300000,-40)] + [(10,-10),(-3,-4)] | [(11,22),(33,44)] + [(10,-10),(-3,-4)] | [(0,-20),(30,-20)] + [(11,22),(33,44)] | [(-1000000,200),(300000,-40)] + [(-10,2),(-10,3)] | [(1,2),(3,4)] + [(-10,2),(-10,3)] | [(0,0),(6,6)] + [(-10,2),(-10,3)] | [(10,-10),(-3,-4)] + [(-10,2),(-10,3)] | [(-1000000,200),(300000,-40)] + [(-10,2),(-10,3)] | [(11,22),(33,44)] + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] + [(0,-20),(30,-20)] | [(-1000000,200),(300000,-40)] + [(0,-20),(30,-20)] | [(11,22),(33,44)] +(21 rows) + +-- Has points less than or equal to line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s <= l2.s; + s | s +-------------------------------+------------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(0,0),(6,6)] + [(1,2),(3,4)] | [(10,-10),(-3,-4)] + [(1,2),(3,4)] | [(-1000000,200),(300000,-40)] + [(1,2),(3,4)] | [(11,22),(33,44)] + [(1,2),(3,4)] | [(0,-20),(30,-20)] + [(0,0),(6,6)] | [(0,0),(6,6)] + [(0,0),(6,6)] | [(10,-10),(-3,-4)] + [(0,0),(6,6)] | [(-1000000,200),(300000,-40)] + [(0,0),(6,6)] | [(11,22),(33,44)] + [(0,0),(6,6)] | [(0,-20),(30,-20)] + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] + [(10,-10),(-3,-4)] | [(-1000000,200),(300000,-40)] + [(10,-10),(-3,-4)] | [(11,22),(33,44)] + [(10,-10),(-3,-4)] | [(0,-20),(30,-20)] + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] + [(11,22),(33,44)] | [(-1000000,200),(300000,-40)] + [(11,22),(33,44)] | [(11,22),(33,44)] + [(-10,2),(-10,3)] | [(1,2),(3,4)] + [(-10,2),(-10,3)] | [(0,0),(6,6)] + [(-10,2),(-10,3)] | [(10,-10),(-3,-4)] + [(-10,2),(-10,3)] | [(-1000000,200),(300000,-40)] + [(-10,2),(-10,3)] | [(11,22),(33,44)] + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] + [(0,-20),(30,-20)] | [(-1000000,200),(300000,-40)] + [(0,-20),(30,-20)] | [(11,22),(33,44)] + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] +(28 rows) + +-- Has points equal to line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s = l2.s; + s | s +-------------------------------+------------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(0,0),(6,6)] | [(0,0),(6,6)] + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] + [(11,22),(33,44)] | [(11,22),(33,44)] + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] + [(NaN,1),(NaN,90)] | [(NaN,1),(NaN,90)] +(8 rows) + +-- Has points greater than or equal to line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s >= l2.s; + s | s +-------------------------------+------------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(-10,2),(-10,3)] + [(0,0),(6,6)] | [(1,2),(3,4)] + [(0,0),(6,6)] | [(0,0),(6,6)] + [(0,0),(6,6)] | [(-10,2),(-10,3)] + [(10,-10),(-3,-4)] | [(1,2),(3,4)] + [(10,-10),(-3,-4)] | [(0,0),(6,6)] + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] + [(10,-10),(-3,-4)] | [(-10,2),(-10,3)] + [(-1000000,200),(300000,-40)] | [(1,2),(3,4)] + [(-1000000,200),(300000,-40)] | [(0,0),(6,6)] + [(-1000000,200),(300000,-40)] | [(10,-10),(-3,-4)] + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] + [(-1000000,200),(300000,-40)] | [(11,22),(33,44)] + [(-1000000,200),(300000,-40)] | [(-10,2),(-10,3)] + [(-1000000,200),(300000,-40)] | [(0,-20),(30,-20)] + [(11,22),(33,44)] | [(1,2),(3,4)] + [(11,22),(33,44)] | [(0,0),(6,6)] + [(11,22),(33,44)] | [(10,-10),(-3,-4)] + [(11,22),(33,44)] | [(11,22),(33,44)] + [(11,22),(33,44)] | [(-10,2),(-10,3)] + [(11,22),(33,44)] | [(0,-20),(30,-20)] + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] + [(0,-20),(30,-20)] | [(1,2),(3,4)] + [(0,-20),(30,-20)] | [(0,0),(6,6)] + [(0,-20),(30,-20)] | [(10,-10),(-3,-4)] + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] +(28 rows) + +-- Has points greater than line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s > l2.s; + s | s +-------------------------------+-------------------- + [(1,2),(3,4)] | [(-10,2),(-10,3)] + [(0,0),(6,6)] | [(1,2),(3,4)] + [(0,0),(6,6)] | [(-10,2),(-10,3)] + [(10,-10),(-3,-4)] | [(1,2),(3,4)] + [(10,-10),(-3,-4)] | [(0,0),(6,6)] + [(10,-10),(-3,-4)] | [(-10,2),(-10,3)] + [(-1000000,200),(300000,-40)] | [(1,2),(3,4)] + [(-1000000,200),(300000,-40)] | [(0,0),(6,6)] + [(-1000000,200),(300000,-40)] | [(10,-10),(-3,-4)] + [(-1000000,200),(300000,-40)] | [(11,22),(33,44)] + [(-1000000,200),(300000,-40)] | [(-10,2),(-10,3)] + [(-1000000,200),(300000,-40)] | [(0,-20),(30,-20)] + [(11,22),(33,44)] | [(1,2),(3,4)] + [(11,22),(33,44)] | [(0,0),(6,6)] + [(11,22),(33,44)] | [(10,-10),(-3,-4)] + [(11,22),(33,44)] | [(-10,2),(-10,3)] + [(11,22),(33,44)] | [(0,-20),(30,-20)] + [(0,-20),(30,-20)] | [(1,2),(3,4)] + [(0,-20),(30,-20)] | [(0,0),(6,6)] + [(0,-20),(30,-20)] | [(10,-10),(-3,-4)] + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] +(21 rows) + +-- Has points not equal to line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s != l2.s; + s | s +-------------------------------+------------------------------- + [(1,2),(3,4)] | [(0,0),(6,6)] + [(1,2),(3,4)] | [(10,-10),(-3,-4)] + [(1,2),(3,4)] | [(-1000000,200),(300000,-40)] + [(1,2),(3,4)] | [(11,22),(33,44)] + [(1,2),(3,4)] | [(-10,2),(-10,3)] + [(1,2),(3,4)] | [(0,-20),(30,-20)] + [(1,2),(3,4)] | [(NaN,1),(NaN,90)] + [(0,0),(6,6)] | [(1,2),(3,4)] + [(0,0),(6,6)] | [(10,-10),(-3,-4)] + [(0,0),(6,6)] | [(-1000000,200),(300000,-40)] + [(0,0),(6,6)] | [(11,22),(33,44)] + [(0,0),(6,6)] | [(-10,2),(-10,3)] + [(0,0),(6,6)] | [(0,-20),(30,-20)] + [(0,0),(6,6)] | [(NaN,1),(NaN,90)] + [(10,-10),(-3,-4)] | [(1,2),(3,4)] + [(10,-10),(-3,-4)] | [(0,0),(6,6)] + [(10,-10),(-3,-4)] | [(-1000000,200),(300000,-40)] + [(10,-10),(-3,-4)] | [(11,22),(33,44)] + [(10,-10),(-3,-4)] | [(-10,2),(-10,3)] + [(10,-10),(-3,-4)] | [(0,-20),(30,-20)] + [(10,-10),(-3,-4)] | [(NaN,1),(NaN,90)] + [(-1000000,200),(300000,-40)] | [(1,2),(3,4)] + [(-1000000,200),(300000,-40)] | [(0,0),(6,6)] + [(-1000000,200),(300000,-40)] | [(10,-10),(-3,-4)] + [(-1000000,200),(300000,-40)] | [(11,22),(33,44)] + [(-1000000,200),(300000,-40)] | [(-10,2),(-10,3)] + [(-1000000,200),(300000,-40)] | [(0,-20),(30,-20)] + [(-1000000,200),(300000,-40)] | [(NaN,1),(NaN,90)] + [(11,22),(33,44)] | [(1,2),(3,4)] + [(11,22),(33,44)] | [(0,0),(6,6)] + [(11,22),(33,44)] | [(10,-10),(-3,-4)] + [(11,22),(33,44)] | [(-1000000,200),(300000,-40)] + [(11,22),(33,44)] | [(-10,2),(-10,3)] + [(11,22),(33,44)] | [(0,-20),(30,-20)] + [(11,22),(33,44)] | [(NaN,1),(NaN,90)] + [(-10,2),(-10,3)] | [(1,2),(3,4)] + [(-10,2),(-10,3)] | [(0,0),(6,6)] + [(-10,2),(-10,3)] | [(10,-10),(-3,-4)] + [(-10,2),(-10,3)] | [(-1000000,200),(300000,-40)] + [(-10,2),(-10,3)] | [(11,22),(33,44)] + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] + [(-10,2),(-10,3)] | [(NaN,1),(NaN,90)] + [(0,-20),(30,-20)] | [(1,2),(3,4)] + [(0,-20),(30,-20)] | [(0,0),(6,6)] + [(0,-20),(30,-20)] | [(10,-10),(-3,-4)] + [(0,-20),(30,-20)] | [(-1000000,200),(300000,-40)] + [(0,-20),(30,-20)] | [(11,22),(33,44)] + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] + [(0,-20),(30,-20)] | [(NaN,1),(NaN,90)] + [(NaN,1),(NaN,90)] | [(1,2),(3,4)] + [(NaN,1),(NaN,90)] | [(0,0),(6,6)] + [(NaN,1),(NaN,90)] | [(10,-10),(-3,-4)] + [(NaN,1),(NaN,90)] | [(-1000000,200),(300000,-40)] + [(NaN,1),(NaN,90)] | [(11,22),(33,44)] + [(NaN,1),(NaN,90)] | [(-10,2),(-10,3)] + [(NaN,1),(NaN,90)] | [(0,-20),(30,-20)] +(56 rows) + +-- Parallel with line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s ?|| l2.s; + s | s +-------------------------------+------------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(0,0),(6,6)] + [(1,2),(3,4)] | [(11,22),(33,44)] + [(0,0),(6,6)] | [(1,2),(3,4)] + [(0,0),(6,6)] | [(0,0),(6,6)] + [(0,0),(6,6)] | [(11,22),(33,44)] + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] + [(11,22),(33,44)] | [(1,2),(3,4)] + [(11,22),(33,44)] | [(0,0),(6,6)] + [(11,22),(33,44)] | [(11,22),(33,44)] + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] +(13 rows) + +-- Perpendicular with line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s ?-| l2.s; + s | s +--------------------+-------------------- + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] +(2 rows) + +-- Distance to line +SELECT l.s, l1.s, l.s <-> l1.s FROM LSEG_TBL l, LINE_TBL l1; + s | s | ?column? +-------------------------------+---------------------------------------+---------------- + [(1,2),(3,4)] | {0,-1,5} | 1 + [(0,0),(6,6)] | {0,-1,5} | 0 + [(10,-10),(-3,-4)] | {0,-1,5} | 9 + [(-1000000,200),(300000,-40)] | {0,-1,5} | 0 + [(11,22),(33,44)] | {0,-1,5} | 17 + [(-10,2),(-10,3)] | {0,-1,5} | 2 + [(0,-20),(30,-20)] | {0,-1,5} | 25 + [(NaN,1),(NaN,90)] | {0,-1,5} | NaN + [(1,2),(3,4)] | {1,0,5} | 6 + [(0,0),(6,6)] | {1,0,5} | 5 + [(10,-10),(-3,-4)] | {1,0,5} | 2 + [(-1000000,200),(300000,-40)] | {1,0,5} | 0 + [(11,22),(33,44)] | {1,0,5} | 16 + [(-10,2),(-10,3)] | {1,0,5} | 5 + [(0,-20),(30,-20)] | {1,0,5} | 5 + [(NaN,1),(NaN,90)] | {1,0,5} | NaN + [(1,2),(3,4)] | {0,3,0} | 2 + [(0,0),(6,6)] | {0,3,0} | 0 + [(10,-10),(-3,-4)] | {0,3,0} | 4 + [(-1000000,200),(300000,-40)] | {0,3,0} | 0 + [(11,22),(33,44)] | {0,3,0} | 22 + [(-10,2),(-10,3)] | {0,3,0} | 2 + [(0,-20),(30,-20)] | {0,3,0} | 20 + [(NaN,1),(NaN,90)] | {0,3,0} | NaN + [(1,2),(3,4)] | {1,-1,0} | 0.707106781187 + [(0,0),(6,6)] | {1,-1,0} | 0 + [(10,-10),(-3,-4)] | {1,-1,0} | 0.707106781187 + [(-1000000,200),(300000,-40)] | {1,-1,0} | 0 + [(11,22),(33,44)] | {1,-1,0} | 7.77817459305 + [(-10,2),(-10,3)] | {1,-1,0} | 8.48528137424 + [(0,-20),(30,-20)] | {1,-1,0} | 14.1421356237 + [(NaN,1),(NaN,90)] | {1,-1,0} | NaN + [(1,2),(3,4)] | {-0.4,-1,-6} | 7.79920420344 + [(0,0),(6,6)] | {-0.4,-1,-6} | 5.57086014531 + [(10,-10),(-3,-4)] | {-0.4,-1,-6} | 0 + [(-1000000,200),(300000,-40)] | {-0.4,-1,-6} | 0 + [(11,22),(33,44)] | {-0.4,-1,-6} | 30.0826447847 + [(-10,2),(-10,3)] | {-0.4,-1,-6} | 3.71390676354 + [(0,-20),(30,-20)] | {-0.4,-1,-6} | 1.85695338177 + [(NaN,1),(NaN,90)] | {-0.4,-1,-6} | NaN + [(1,2),(3,4)] | {-0.000184615384615,-1,15.3846153846} | 11.3840613445 + [(0,0),(6,6)] | {-0.000184615384615,-1,15.3846153846} | 9.3835075324 + [(10,-10),(-3,-4)] | {-0.000184615384615,-1,15.3846153846} | 19.3851689004 + [(-1000000,200),(300000,-40)] | {-0.000184615384615,-1,15.3846153846} | 0 + [(11,22),(33,44)] | {-0.000184615384615,-1,15.3846153846} | 6.61741527185 + [(-10,2),(-10,3)] | {-0.000184615384615,-1,15.3846153846} | 12.3864613274 + [(0,-20),(30,-20)] | {-0.000184615384615,-1,15.3846153846} | 35.3790763202 + [(NaN,1),(NaN,90)] | {-0.000184615384615,-1,15.3846153846} | NaN + [(1,2),(3,4)] | {3,NaN,5} | NaN + [(0,0),(6,6)] | {3,NaN,5} | NaN + [(10,-10),(-3,-4)] | {3,NaN,5} | NaN + [(-1000000,200),(300000,-40)] | {3,NaN,5} | NaN + [(11,22),(33,44)] | {3,NaN,5} | NaN + [(-10,2),(-10,3)] | {3,NaN,5} | NaN + [(0,-20),(30,-20)] | {3,NaN,5} | NaN + [(NaN,1),(NaN,90)] | {3,NaN,5} | NaN + [(1,2),(3,4)] | {NaN,NaN,NaN} | NaN + [(0,0),(6,6)] | {NaN,NaN,NaN} | NaN + [(10,-10),(-3,-4)] | {NaN,NaN,NaN} | NaN + [(-1000000,200),(300000,-40)] | {NaN,NaN,NaN} | NaN + [(11,22),(33,44)] | {NaN,NaN,NaN} | NaN + [(-10,2),(-10,3)] | {NaN,NaN,NaN} | NaN + [(0,-20),(30,-20)] | {NaN,NaN,NaN} | NaN + [(NaN,1),(NaN,90)] | {NaN,NaN,NaN} | NaN + [(1,2),(3,4)] | {0,-1,3} | 0 + [(0,0),(6,6)] | {0,-1,3} | 0 + [(10,-10),(-3,-4)] | {0,-1,3} | 7 + [(-1000000,200),(300000,-40)] | {0,-1,3} | 0 + [(11,22),(33,44)] | {0,-1,3} | 19 + [(-10,2),(-10,3)] | {0,-1,3} | 0 + [(0,-20),(30,-20)] | {0,-1,3} | 23 + [(NaN,1),(NaN,90)] | {0,-1,3} | NaN + [(1,2),(3,4)] | {-1,0,3} | 0 + [(0,0),(6,6)] | {-1,0,3} | 0 + [(10,-10),(-3,-4)] | {-1,0,3} | 0 + [(-1000000,200),(300000,-40)] | {-1,0,3} | 0 + [(11,22),(33,44)] | {-1,0,3} | 8 + [(-10,2),(-10,3)] | {-1,0,3} | 13 + [(0,-20),(30,-20)] | {-1,0,3} | 0 + [(NaN,1),(NaN,90)] | {-1,0,3} | NaN +(80 rows) + +-- Distance to line segment +SELECT l1.s, l2.s, l1.s <-> l2.s FROM LSEG_TBL l1, LSEG_TBL l2; + s | s | ?column? +-------------------------------+-------------------------------+---------------- + [(1,2),(3,4)] | [(1,2),(3,4)] | 0 + [(1,2),(3,4)] | [(0,0),(6,6)] | 0.707106781187 + [(1,2),(3,4)] | [(10,-10),(-3,-4)] | 7.12398901685 + [(1,2),(3,4)] | [(-1000000,200),(300000,-40)] | 11.3840613445 + [(1,2),(3,4)] | [(11,22),(33,44)] | 19.6977156036 + [(1,2),(3,4)] | [(-10,2),(-10,3)] | 11 + [(1,2),(3,4)] | [(0,-20),(30,-20)] | 22 + [(1,2),(3,4)] | [(NaN,1),(NaN,90)] | NaN + [(0,0),(6,6)] | [(1,2),(3,4)] | 0.707106781187 + [(0,0),(6,6)] | [(0,0),(6,6)] | 0 + [(0,0),(6,6)] | [(10,-10),(-3,-4)] | 4.88901207039 + [(0,0),(6,6)] | [(-1000000,200),(300000,-40)] | 9.3835075324 + [(0,0),(6,6)] | [(11,22),(33,44)] | 16.7630546142 + [(0,0),(6,6)] | [(-10,2),(-10,3)] | 10.1980390272 + [(0,0),(6,6)] | [(0,-20),(30,-20)] | 20 + [(0,0),(6,6)] | [(NaN,1),(NaN,90)] | NaN + [(10,-10),(-3,-4)] | [(1,2),(3,4)] | 7.12398901685 + [(10,-10),(-3,-4)] | [(0,0),(6,6)] | 4.88901207039 + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] | 0 + [(10,-10),(-3,-4)] | [(-1000000,200),(300000,-40)] | 19.3851689004 + [(10,-10),(-3,-4)] | [(11,22),(33,44)] | 29.4737584815 + [(10,-10),(-3,-4)] | [(-10,2),(-10,3)] | 9.21954445729 + [(10,-10),(-3,-4)] | [(0,-20),(30,-20)] | 10 + [(10,-10),(-3,-4)] | [(NaN,1),(NaN,90)] | NaN + [(-1000000,200),(300000,-40)] | [(1,2),(3,4)] | 11.3840613445 + [(-1000000,200),(300000,-40)] | [(0,0),(6,6)] | 9.3835075324 + [(-1000000,200),(300000,-40)] | [(10,-10),(-3,-4)] | 19.3851689004 + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] | 0 + [(-1000000,200),(300000,-40)] | [(11,22),(33,44)] | 6.61741527185 + [(-1000000,200),(300000,-40)] | [(-10,2),(-10,3)] | 12.3864613274 + [(-1000000,200),(300000,-40)] | [(0,-20),(30,-20)] | 35.3790763202 + [(-1000000,200),(300000,-40)] | [(NaN,1),(NaN,90)] | NaN + [(11,22),(33,44)] | [(1,2),(3,4)] | 19.6977156036 + [(11,22),(33,44)] | [(0,0),(6,6)] | 16.7630546142 + [(11,22),(33,44)] | [(10,-10),(-3,-4)] | 29.4737584815 + [(11,22),(33,44)] | [(-1000000,200),(300000,-40)] | 6.61741527185 + [(11,22),(33,44)] | [(11,22),(33,44)] | 0 + [(11,22),(33,44)] | [(-10,2),(-10,3)] | 28.319604517 + [(11,22),(33,44)] | [(0,-20),(30,-20)] | 42 + [(11,22),(33,44)] | [(NaN,1),(NaN,90)] | NaN + [(-10,2),(-10,3)] | [(1,2),(3,4)] | 11 + [(-10,2),(-10,3)] | [(0,0),(6,6)] | 10.1980390272 + [(-10,2),(-10,3)] | [(10,-10),(-3,-4)] | 9.21954445729 + [(-10,2),(-10,3)] | [(-1000000,200),(300000,-40)] | 12.3864613274 + [(-10,2),(-10,3)] | [(11,22),(33,44)] | 28.319604517 + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] | 0 + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] | 24.1660919472 + [(-10,2),(-10,3)] | [(NaN,1),(NaN,90)] | NaN + [(0,-20),(30,-20)] | [(1,2),(3,4)] | 22 + [(0,-20),(30,-20)] | [(0,0),(6,6)] | 20 + [(0,-20),(30,-20)] | [(10,-10),(-3,-4)] | 10 + [(0,-20),(30,-20)] | [(-1000000,200),(300000,-40)] | 35.3790763202 + [(0,-20),(30,-20)] | [(11,22),(33,44)] | 42 + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] | 24.1660919472 + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] | 0 + [(0,-20),(30,-20)] | [(NaN,1),(NaN,90)] | NaN + [(NaN,1),(NaN,90)] | [(1,2),(3,4)] | NaN + [(NaN,1),(NaN,90)] | [(0,0),(6,6)] | NaN + [(NaN,1),(NaN,90)] | [(10,-10),(-3,-4)] | NaN + [(NaN,1),(NaN,90)] | [(-1000000,200),(300000,-40)] | NaN + [(NaN,1),(NaN,90)] | [(11,22),(33,44)] | NaN + [(NaN,1),(NaN,90)] | [(-10,2),(-10,3)] | NaN + [(NaN,1),(NaN,90)] | [(0,-20),(30,-20)] | NaN + [(NaN,1),(NaN,90)] | [(NaN,1),(NaN,90)] | NaN +(64 rows) + +-- Distance to box +SELECT l.s, b.f1, l.s <-> b.f1 FROM LSEG_TBL l, BOX_TBL b; + s | f1 | ?column? +-------------------------------+---------------------+---------------- + [(1,2),(3,4)] | (2,2),(0,0) | 0 + [(1,2),(3,4)] | (3,3),(1,1) | 0 + [(1,2),(3,4)] | (-2,2),(-8,-10) | 3 + [(1,2),(3,4)] | (2.5,3.5),(2.5,2.5) | 0 + [(1,2),(3,4)] | (3,3),(3,3) | 0.707106781187 + [(0,0),(6,6)] | (2,2),(0,0) | 0 + [(0,0),(6,6)] | (3,3),(1,1) | 0 + [(0,0),(6,6)] | (-2,2),(-8,-10) | 2 + [(0,0),(6,6)] | (2.5,3.5),(2.5,2.5) | 0 + [(0,0),(6,6)] | (3,3),(3,3) | 0 + [(10,-10),(-3,-4)] | (2,2),(0,0) | 4.88901207039 + [(10,-10),(-3,-4)] | (3,3),(1,1) | 6.21602963235 + [(10,-10),(-3,-4)] | (-2,2),(-8,-10) | 0 + [(10,-10),(-3,-4)] | (2.5,3.5),(2.5,2.5) | 8.20655597529 + [(10,-10),(-3,-4)] | (3,3),(3,3) | 8.87006475627 + [(-1000000,200),(300000,-40)] | (2,2),(0,0) | 13.3842459258 + [(-1000000,200),(300000,-40)] | (3,3),(1,1) | 12.3840613274 + [(-1000000,200),(300000,-40)] | (-2,2),(-8,-10) | 13.3849843873 + [(-1000000,200),(300000,-40)] | (2.5,3.5),(2.5,2.5) | 11.8841536436 + [(-1000000,200),(300000,-40)] | (3,3),(3,3) | 12.3840613274 + [(11,22),(33,44)] | (2,2),(0,0) | 21.9317121995 + [(11,22),(33,44)] | (3,3),(1,1) | 20.6155281281 + [(11,22),(33,44)] | (-2,2),(-8,-10) | 23.8537208838 + [(11,22),(33,44)] | (2.5,3.5),(2.5,2.5) | 20.3592730715 + [(11,22),(33,44)] | (3,3),(3,3) | 20.6155281281 + [(-10,2),(-10,3)] | (2,2),(0,0) | 10 + [(-10,2),(-10,3)] | (3,3),(1,1) | 11 + [(-10,2),(-10,3)] | (-2,2),(-8,-10) | 2 + [(-10,2),(-10,3)] | (2.5,3.5),(2.5,2.5) | 12.5 + [(-10,2),(-10,3)] | (3,3),(3,3) | 13 + [(0,-20),(30,-20)] | (2,2),(0,0) | 20 + [(0,-20),(30,-20)] | (3,3),(1,1) | 21 + [(0,-20),(30,-20)] | (-2,2),(-8,-10) | 10.1980390272 + [(0,-20),(30,-20)] | (2.5,3.5),(2.5,2.5) | 22.5 + [(0,-20),(30,-20)] | (3,3),(3,3) | 23 + [(NaN,1),(NaN,90)] | (2,2),(0,0) | NaN + [(NaN,1),(NaN,90)] | (3,3),(1,1) | NaN + [(NaN,1),(NaN,90)] | (-2,2),(-8,-10) | NaN + [(NaN,1),(NaN,90)] | (2.5,3.5),(2.5,2.5) | NaN + [(NaN,1),(NaN,90)] | (3,3),(3,3) | NaN +(40 rows) + +-- Intersect with line segment +SELECT l.s, l1.s FROM LSEG_TBL l, LINE_TBL l1 WHERE l.s ?# l1.s; + s | s +-------------------------------+-------------- + [(0,0),(6,6)] | {0,-1,5} + [(-1000000,200),(300000,-40)] | {0,-1,5} + [(-1000000,200),(300000,-40)] | {1,0,5} + [(0,0),(6,6)] | {0,3,0} + [(-1000000,200),(300000,-40)] | {0,3,0} + [(-1000000,200),(300000,-40)] | {1,-1,0} + [(10,-10),(-3,-4)] | {-0.4,-1,-6} + [(-1000000,200),(300000,-40)] | {-0.4,-1,-6} + [(1,2),(3,4)] | {0,-1,3} + [(0,0),(6,6)] | {0,-1,3} + [(-1000000,200),(300000,-40)] | {0,-1,3} + [(-10,2),(-10,3)] | {0,-1,3} + [(1,2),(3,4)] | {-1,0,3} + [(0,0),(6,6)] | {-1,0,3} + [(10,-10),(-3,-4)] | {-1,0,3} + [(-1000000,200),(300000,-40)] | {-1,0,3} + [(0,-20),(30,-20)] | {-1,0,3} +(17 rows) + +-- Intersect with box +SELECT l.s, b.f1 FROM LSEG_TBL l, BOX_TBL b WHERE l.s ?# b.f1; + s | f1 +--------------------+--------------------- + [(1,2),(3,4)] | (2,2),(0,0) + [(1,2),(3,4)] | (3,3),(1,1) + [(1,2),(3,4)] | (2.5,3.5),(2.5,2.5) + [(0,0),(6,6)] | (2,2),(0,0) + [(0,0),(6,6)] | (3,3),(1,1) + [(0,0),(6,6)] | (2.5,3.5),(2.5,2.5) + [(0,0),(6,6)] | (3,3),(3,3) + [(10,-10),(-3,-4)] | (-2,2),(-8,-10) +(8 rows) + +-- Intersection point with line segment +SELECT l1.s, l2.s, l1.s # l2.s FROM LSEG_TBL l1, LSEG_TBL l2; + s | s | ?column? +-------------------------------+-------------------------------+---------- + [(1,2),(3,4)] | [(1,2),(3,4)] | + [(1,2),(3,4)] | [(0,0),(6,6)] | + [(1,2),(3,4)] | [(10,-10),(-3,-4)] | + [(1,2),(3,4)] | [(-1000000,200),(300000,-40)] | + [(1,2),(3,4)] | [(11,22),(33,44)] | + [(1,2),(3,4)] | [(-10,2),(-10,3)] | + [(1,2),(3,4)] | [(0,-20),(30,-20)] | + [(1,2),(3,4)] | [(NaN,1),(NaN,90)] | + [(0,0),(6,6)] | [(1,2),(3,4)] | + [(0,0),(6,6)] | [(0,0),(6,6)] | + [(0,0),(6,6)] | [(10,-10),(-3,-4)] | + [(0,0),(6,6)] | [(-1000000,200),(300000,-40)] | + [(0,0),(6,6)] | [(11,22),(33,44)] | + [(0,0),(6,6)] | [(-10,2),(-10,3)] | + [(0,0),(6,6)] | [(0,-20),(30,-20)] | + [(0,0),(6,6)] | [(NaN,1),(NaN,90)] | + [(10,-10),(-3,-4)] | [(1,2),(3,4)] | + [(10,-10),(-3,-4)] | [(0,0),(6,6)] | + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] | + [(10,-10),(-3,-4)] | [(-1000000,200),(300000,-40)] | + [(10,-10),(-3,-4)] | [(11,22),(33,44)] | + [(10,-10),(-3,-4)] | [(-10,2),(-10,3)] | + [(10,-10),(-3,-4)] | [(0,-20),(30,-20)] | + [(10,-10),(-3,-4)] | [(NaN,1),(NaN,90)] | + [(-1000000,200),(300000,-40)] | [(1,2),(3,4)] | + [(-1000000,200),(300000,-40)] | [(0,0),(6,6)] | + [(-1000000,200),(300000,-40)] | [(10,-10),(-3,-4)] | + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] | + [(-1000000,200),(300000,-40)] | [(11,22),(33,44)] | + [(-1000000,200),(300000,-40)] | [(-10,2),(-10,3)] | + [(-1000000,200),(300000,-40)] | [(0,-20),(30,-20)] | + [(-1000000,200),(300000,-40)] | [(NaN,1),(NaN,90)] | + [(11,22),(33,44)] | [(1,2),(3,4)] | + [(11,22),(33,44)] | [(0,0),(6,6)] | + [(11,22),(33,44)] | [(10,-10),(-3,-4)] | + [(11,22),(33,44)] | [(-1000000,200),(300000,-40)] | + [(11,22),(33,44)] | [(11,22),(33,44)] | + [(11,22),(33,44)] | [(-10,2),(-10,3)] | + [(11,22),(33,44)] | [(0,-20),(30,-20)] | + [(11,22),(33,44)] | [(NaN,1),(NaN,90)] | + [(-10,2),(-10,3)] | [(1,2),(3,4)] | + [(-10,2),(-10,3)] | [(0,0),(6,6)] | + [(-10,2),(-10,3)] | [(10,-10),(-3,-4)] | + [(-10,2),(-10,3)] | [(-1000000,200),(300000,-40)] | + [(-10,2),(-10,3)] | [(11,22),(33,44)] | + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] | + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] | + [(-10,2),(-10,3)] | [(NaN,1),(NaN,90)] | + [(0,-20),(30,-20)] | [(1,2),(3,4)] | + [(0,-20),(30,-20)] | [(0,0),(6,6)] | + [(0,-20),(30,-20)] | [(10,-10),(-3,-4)] | + [(0,-20),(30,-20)] | [(-1000000,200),(300000,-40)] | + [(0,-20),(30,-20)] | [(11,22),(33,44)] | + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] | + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] | + [(0,-20),(30,-20)] | [(NaN,1),(NaN,90)] | + [(NaN,1),(NaN,90)] | [(1,2),(3,4)] | + [(NaN,1),(NaN,90)] | [(0,0),(6,6)] | + [(NaN,1),(NaN,90)] | [(10,-10),(-3,-4)] | + [(NaN,1),(NaN,90)] | [(-1000000,200),(300000,-40)] | + [(NaN,1),(NaN,90)] | [(11,22),(33,44)] | + [(NaN,1),(NaN,90)] | [(-10,2),(-10,3)] | + [(NaN,1),(NaN,90)] | [(0,-20),(30,-20)] | + [(NaN,1),(NaN,90)] | [(NaN,1),(NaN,90)] | +(64 rows) + +-- Closest point to line +SELECT l.s, l1.s, l.s ## l1.s FROM LSEG_TBL l, LINE_TBL l1; +ERROR: function "close_sl" not implemented +-- Closest point to line segment +SELECT l1.s, l2.s, l1.s ## l2.s FROM LSEG_TBL l1, LSEG_TBL l2; + s | s | ?column? +-------------------------------+-------------------------------+--------------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] | + [(1,2),(3,4)] | [(0,0),(6,6)] | + [(1,2),(3,4)] | [(10,-10),(-3,-4)] | (-1.98536585366,-4.46829268293) + [(1,2),(3,4)] | [(-1000000,200),(300000,-40)] | (3.00210167283,15.3840611505) + [(1,2),(3,4)] | [(11,22),(33,44)] | + [(1,2),(3,4)] | [(-10,2),(-10,3)] | (-10,2) + [(1,2),(3,4)] | [(0,-20),(30,-20)] | (1,-20) + [(1,2),(3,4)] | [(NaN,1),(NaN,90)] | + [(0,0),(6,6)] | [(1,2),(3,4)] | + [(0,0),(6,6)] | [(0,0),(6,6)] | + [(0,0),(6,6)] | [(10,-10),(-3,-4)] | (-2.0487804878,-4.43902439024) + [(0,0),(6,6)] | [(-1000000,200),(300000,-40)] | (6.00173233982,15.3835073725) + [(0,0),(6,6)] | [(11,22),(33,44)] | + [(0,0),(6,6)] | [(-10,2),(-10,3)] | (-10,2) + [(0,0),(6,6)] | [(0,-20),(30,-20)] | (0,-20) + [(0,0),(6,6)] | [(NaN,1),(NaN,90)] | + [(10,-10),(-3,-4)] | [(1,2),(3,4)] | (1,2) + [(10,-10),(-3,-4)] | [(0,0),(6,6)] | (0,0) + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] | + [(10,-10),(-3,-4)] | [(-1000000,200),(300000,-40)] | (-2.99642119965,15.3851685701) + [(10,-10),(-3,-4)] | [(11,22),(33,44)] | (11,22) + [(10,-10),(-3,-4)] | [(-10,2),(-10,3)] | (-10,2) + [(10,-10),(-3,-4)] | [(0,-20),(30,-20)] | (10,-20) + [(10,-10),(-3,-4)] | [(NaN,1),(NaN,90)] | + [(-1000000,200),(300000,-40)] | [(1,2),(3,4)] | (3,4) + [(-1000000,200),(300000,-40)] | [(0,0),(6,6)] | (6,6) + [(-1000000,200),(300000,-40)] | [(10,-10),(-3,-4)] | (-3,-4) + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] | + [(-1000000,200),(300000,-40)] | [(11,22),(33,44)] | (11,22) + [(-1000000,200),(300000,-40)] | [(-10,2),(-10,3)] | (-10,3) + [(-1000000,200),(300000,-40)] | [(0,-20),(30,-20)] | (30,-20) + [(-1000000,200),(300000,-40)] | [(NaN,1),(NaN,90)] | + [(11,22),(33,44)] | [(1,2),(3,4)] | + [(11,22),(33,44)] | [(0,0),(6,6)] | + [(11,22),(33,44)] | [(10,-10),(-3,-4)] | (-1.3512195122,-4.76097560976) + [(11,22),(33,44)] | [(-1000000,200),(300000,-40)] | (10.9987783234,15.3825848409) + [(11,22),(33,44)] | [(11,22),(33,44)] | + [(11,22),(33,44)] | [(-10,2),(-10,3)] | (-10,3) + [(11,22),(33,44)] | [(0,-20),(30,-20)] | (11,-20) + [(11,22),(33,44)] | [(NaN,1),(NaN,90)] | + [(-10,2),(-10,3)] | [(1,2),(3,4)] | (1,2) + [(-10,2),(-10,3)] | [(0,0),(6,6)] | (0,0) + [(-10,2),(-10,3)] | [(10,-10),(-3,-4)] | (-3,-4) + [(-10,2),(-10,3)] | [(-1000000,200),(300000,-40)] | (-9.99771326872,15.3864611163) + [(-10,2),(-10,3)] | [(11,22),(33,44)] | (11,22) + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] | + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] | (0,-20) + [(-10,2),(-10,3)] | [(NaN,1),(NaN,90)] | + [(0,-20),(30,-20)] | [(1,2),(3,4)] | (1,2) + [(0,-20),(30,-20)] | [(0,0),(6,6)] | (0,0) + [(0,-20),(30,-20)] | [(10,-10),(-3,-4)] | (10,-10) + [(0,-20),(30,-20)] | [(-1000000,200),(300000,-40)] | (30.0065315217,15.3790757173) + [(0,-20),(30,-20)] | [(11,22),(33,44)] | (11,22) + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] | (-10,2) + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] | + [(0,-20),(30,-20)] | [(NaN,1),(NaN,90)] | + [(NaN,1),(NaN,90)] | [(1,2),(3,4)] | + [(NaN,1),(NaN,90)] | [(0,0),(6,6)] | + [(NaN,1),(NaN,90)] | [(10,-10),(-3,-4)] | + [(NaN,1),(NaN,90)] | [(-1000000,200),(300000,-40)] | + [(NaN,1),(NaN,90)] | [(11,22),(33,44)] | + [(NaN,1),(NaN,90)] | [(-10,2),(-10,3)] | + [(NaN,1),(NaN,90)] | [(0,-20),(30,-20)] | + [(NaN,1),(NaN,90)] | [(NaN,1),(NaN,90)] | +(64 rows) + +-- Closest point to box +SELECT l.s, b.f1, l.s ## b.f1 FROM LSEG_TBL l, BOX_TBL b; + s | f1 | ?column? +-------------------------------+---------------------+------------- + [(1,2),(3,4)] | (2,2),(0,0) | (1,2) + [(1,2),(3,4)] | (3,3),(1,1) | (1.5,2.5) + [(1,2),(3,4)] | (-2,2),(-8,-10) | (-2,2) + [(1,2),(3,4)] | (2.5,3.5),(2.5,2.5) | (2.25,3.25) + [(1,2),(3,4)] | (3,3),(3,3) | (3,3) + [(0,0),(6,6)] | (2,2),(0,0) | (1,1) + [(0,0),(6,6)] | (3,3),(1,1) | (2,2) + [(0,0),(6,6)] | (-2,2),(-8,-10) | (-2,0) + [(0,0),(6,6)] | (2.5,3.5),(2.5,2.5) | (2.75,2.75) + [(0,0),(6,6)] | (3,3),(3,3) | (3,3) + [(10,-10),(-3,-4)] | (2,2),(0,0) | (0,0) + [(10,-10),(-3,-4)] | (3,3),(1,1) | (1,1) + [(10,-10),(-3,-4)] | (-2,2),(-8,-10) | (-3,-4) + [(10,-10),(-3,-4)] | (2.5,3.5),(2.5,2.5) | (2.5,2.5) + [(10,-10),(-3,-4)] | (3,3),(3,3) | (3,3) + [(-1000000,200),(300000,-40)] | (2,2),(0,0) | (2,2) + [(-1000000,200),(300000,-40)] | (3,3),(1,1) | (3,3) + [(-1000000,200),(300000,-40)] | (-2,2),(-8,-10) | (-2,2) + [(-1000000,200),(300000,-40)] | (2.5,3.5),(2.5,2.5) | (2.5,3.5) + [(-1000000,200),(300000,-40)] | (3,3),(3,3) | (3,3) + [(11,22),(33,44)] | (2,2),(0,0) | (2,2) + [(11,22),(33,44)] | (3,3),(1,1) | (3,3) + [(11,22),(33,44)] | (-2,2),(-8,-10) | (-2,2) + [(11,22),(33,44)] | (2.5,3.5),(2.5,2.5) | (2.5,3.5) + [(11,22),(33,44)] | (3,3),(3,3) | (3,3) + [(-10,2),(-10,3)] | (2,2),(0,0) | (0,2) + [(-10,2),(-10,3)] | (3,3),(1,1) | (1,2) + [(-10,2),(-10,3)] | (-2,2),(-8,-10) | (-8,2) + [(-10,2),(-10,3)] | (2.5,3.5),(2.5,2.5) | (2.5,3) + [(-10,2),(-10,3)] | (3,3),(3,3) | (3,3) + [(0,-20),(30,-20)] | (2,2),(0,0) | (0,0) + [(0,-20),(30,-20)] | (3,3),(1,1) | (1,1) + [(0,-20),(30,-20)] | (-2,2),(-8,-10) | (-2,-10) + [(0,-20),(30,-20)] | (2.5,3.5),(2.5,2.5) | (2.5,2.5) + [(0,-20),(30,-20)] | (3,3),(3,3) | (3,3) + [(NaN,1),(NaN,90)] | (2,2),(0,0) | + [(NaN,1),(NaN,90)] | (3,3),(1,1) | + [(NaN,1),(NaN,90)] | (-2,2),(-8,-10) | + [(NaN,1),(NaN,90)] | (2.5,3.5),(2.5,2.5) | + [(NaN,1),(NaN,90)] | (3,3),(3,3) | +(40 rows) + +-- On line +SELECT l.s, l1.s FROM LSEG_TBL l, LINE_TBL l1 WHERE l.s <@ l1.s; + s | s +-------------------------------+--------------------------------------- + [(0,0),(6,6)] | {1,-1,0} + [(-1000000,200),(300000,-40)] | {-0.000184615384615,-1,15.3846153846} +(2 rows) + +-- On box +SELECT l.s, b.f1 FROM LSEG_TBL l, BOX_TBL b WHERE l.s <@ b.f1; + s | f1 +---+---- +(0 rows) -- -- Boxes @@ -157,138 +2301,176 @@ SELECT '' as six, box(f1) AS box FROM CIRCLE_TBL; | (3.12132034356,4.12132034356),(-1.12132034356,-0.12132034356) | (107.071067812,207.071067812),(92.9289321881,192.928932188) | (181.317279836,82.3172798365),(18.6827201635,-80.3172798365) -(6 rows) + | (3,5),(3,5) + | (NaN,NaN),(NaN,NaN) +(8 rows) -- translation SELECT '' AS twentyfour, b.f1 + p.f1 AS translation FROM BOX_TBL b, POINT_TBL p; - twentyfour | translation -------------+------------------------- + twentyfour | translation +------------+------------------------------------- | (2,2),(0,0) | (3,3),(1,1) + | (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) | (3,3),(3,3) | (-8,2),(-10,0) | (-7,3),(-9,1) + | (-12,2),(-18,-10) | (-7.5,3.5),(-7.5,2.5) | (-7,3),(-7,3) | (-1,6),(-3,4) | (0,7),(-2,5) + | (-5,6),(-11,-6) | (-0.5,7.5),(-0.5,6.5) | (0,7),(0,7) | (7.1,36.5),(5.1,34.5) | (8.1,37.5),(6.1,35.5) + | (3.1,36.5),(-2.9,24.5) | (7.6,38),(7.6,37) | (8.1,37.5),(8.1,37.5) | (-3,-10),(-5,-12) | (-2,-9),(-4,-11) + | (-7,-10),(-13,-22) | (-2.5,-8.5),(-2.5,-9.5) | (-2,-9),(-2,-9) + | (2,2),(1e-300,-1e-300) + | (3,3),(1,1) + | (-2,2),(-8,-10) + | (2.5,3.5),(2.5,2.5) + | (3,3),(3,3) + | (1e+300,Infinity),(1e+300,Infinity) + | (1e+300,Infinity),(1e+300,Infinity) + | (1e+300,Infinity),(1e+300,Infinity) + | (1e+300,Infinity),(1e+300,Infinity) + | (1e+300,Infinity),(1e+300,Infinity) + | (NaN,NaN),(NaN,NaN) + | (NaN,NaN),(NaN,NaN) + | (NaN,NaN),(NaN,NaN) + | (NaN,NaN),(NaN,NaN) + | (NaN,NaN),(NaN,NaN) | (12,12),(10,10) | (13,13),(11,11) + | (8,12),(2,0) | (12.5,13.5),(12.5,12.5) | (13,13),(13,13) -(24 rows) +(45 rows) SELECT '' AS twentyfour, b.f1 - p.f1 AS translation FROM BOX_TBL b, POINT_TBL p; - twentyfour | translation -------------+--------------------------- + twentyfour | translation +------------+----------------------------------------- | (2,2),(0,0) | (3,3),(1,1) + | (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) | (3,3),(3,3) | (12,2),(10,0) | (13,3),(11,1) + | (8,2),(2,-10) | (12.5,3.5),(12.5,2.5) | (13,3),(13,3) | (5,-2),(3,-4) | (6,-1),(4,-3) + | (1,-2),(-5,-14) | (5.5,-0.5),(5.5,-1.5) | (6,-1),(6,-1) | (-3.1,-32.5),(-5.1,-34.5) | (-2.1,-31.5),(-4.1,-33.5) + | (-7.1,-32.5),(-13.1,-44.5) | (-2.6,-31),(-2.6,-32) | (-2.1,-31.5),(-2.1,-31.5) | (7,14),(5,12) | (8,15),(6,13) + | (3,14),(-3,2) | (7.5,15.5),(7.5,14.5) | (8,15),(8,15) + | (2,2),(-1e-300,1e-300) + | (3,3),(1,1) + | (-2,2),(-8,-10) + | (2.5,3.5),(2.5,2.5) + | (3,3),(3,3) + | (-1e+300,-Infinity),(-1e+300,-Infinity) + | (-1e+300,-Infinity),(-1e+300,-Infinity) + | (-1e+300,-Infinity),(-1e+300,-Infinity) + | (-1e+300,-Infinity),(-1e+300,-Infinity) + | (-1e+300,-Infinity),(-1e+300,-Infinity) + | (NaN,NaN),(NaN,NaN) + | (NaN,NaN),(NaN,NaN) + | (NaN,NaN),(NaN,NaN) + | (NaN,NaN),(NaN,NaN) + | (NaN,NaN),(NaN,NaN) | (-8,-8),(-10,-10) | (-7,-7),(-9,-9) + | (-12,-8),(-18,-20) | (-7.5,-6.5),(-7.5,-7.5) | (-7,-7),(-7,-7) -(24 rows) +(45 rows) --- scaling and rotation -SELECT '' AS twentyfour, b.f1 * p.f1 AS rotation - FROM BOX_TBL b, POINT_TBL p; - twentyfour | rotation -------------+----------------------------- - | (0,0),(0,0) - | (0,0),(0,0) - | (0,0),(0,0) - | (0,0),(0,0) - | (-0,0),(-20,-20) - | (-10,-10),(-30,-30) - | (-25,-25),(-25,-35) - | (-30,-30),(-30,-30) - | (-0,2),(-14,0) - | (-7,3),(-21,1) - | (-17.5,2.5),(-21.5,-0.5) - | (-21,3),(-21,3) - | (0,79.2),(-58.8,0) - | (-29.4,118.8),(-88.2,39.6) - | (-73.5,104.1),(-108,99) - | (-88.2,118.8),(-88.2,118.8) - | (14,-0),(0,-34) - | (21,-17),(7,-51) - | (29.5,-42.5),(17.5,-47.5) - | (21,-51),(21,-51) - | (0,40),(0,0) - | (0,60),(0,20) - | (0,60),(-10,50) - | (0,60),(0,60) -(24 rows) - -SELECT '' AS twenty, b.f1 / p.f1 AS rotation - FROM BOX_TBL b, POINT_TBL p - WHERE (p.f1 <-> point '(0,0)') >= 1; - twenty | rotation ---------+---------------------------------------------------------------------- - | (0,-0),(-0.2,-0.2) - | (0.08,-0),(0,-0.56) - | (0.0651176557644,0),(0,-0.0483449262493) - | (-0,0.0828402366864),(-0.201183431953,0) - | (0.2,0),(0,0) - | (-0.1,-0.1),(-0.3,-0.3) - | (0.12,-0.28),(0.04,-0.84) - | (0.0976764836466,-0.0241724631247),(0.0325588278822,-0.072517389374) - | (-0.100591715976,0.12426035503),(-0.301775147929,0.0414201183432) - | (0.3,0),(0.1,0) - | (-0.25,-0.25),(-0.25,-0.35) - | (0.26,-0.7),(0.1,-0.82) - | (0.109762715209,-0.0562379754329),(0.0813970697055,-0.0604311578117) - | (-0.251479289941,0.103550295858),(-0.322485207101,0.0739644970414) - | (0.3,0.05),(0.25,0) - | (-0.3,-0.3),(-0.3,-0.3) - | (0.12,-0.84),(0.12,-0.84) - | (0.0976764836466,-0.072517389374),(0.0976764836466,-0.072517389374) - | (-0.301775147929,0.12426035503),(-0.301775147929,0.12426035503) - | (0.3,0),(0.3,0) -(20 rows) +-- Multiply with point +SELECT b.f1, p.f1, b.f1 * p.f1 FROM BOX_TBL b, POINT_TBL p WHERE p.f1[0] BETWEEN 1 AND 1000; + f1 | f1 | ?column? +---------------------+------------+----------------------------- + (2,2),(0,0) | (5.1,34.5) | (0,79.2),(-58.8,0) + (2,2),(0,0) | (10,10) | (0,40),(0,0) + (3,3),(1,1) | (5.1,34.5) | (-29.4,118.8),(-88.2,39.6) + (3,3),(1,1) | (10,10) | (0,60),(0,20) + (-2,2),(-8,-10) | (5.1,34.5) | (304.2,-58.8),(-79.2,-327) + (-2,2),(-8,-10) | (10,10) | (20,0),(-40,-180) + (2.5,3.5),(2.5,2.5) | (5.1,34.5) | (-73.5,104.1),(-108,99) + (2.5,3.5),(2.5,2.5) | (10,10) | (0,60),(-10,50) + (3,3),(3,3) | (5.1,34.5) | (-88.2,118.8),(-88.2,118.8) + (3,3),(3,3) | (10,10) | (0,60),(0,60) +(10 rows) + +-- Overflow error +SELECT b.f1, p.f1, b.f1 * p.f1 FROM BOX_TBL b, POINT_TBL p WHERE p.f1[0] > 1000; + f1 | f1 | ?column? +---------------------+-------------------+-------------------------------------------- + (2,2),(0,0) | (1e+300,Infinity) | (NaN,NaN),(-Infinity,Infinity) + (2,2),(0,0) | (NaN,NaN) | (NaN,NaN),(NaN,NaN) + (3,3),(1,1) | (1e+300,Infinity) | (-Infinity,Infinity),(-Infinity,Infinity) + (3,3),(1,1) | (NaN,NaN) | (NaN,NaN),(NaN,NaN) + (-2,2),(-8,-10) | (1e+300,Infinity) | (Infinity,-Infinity),(-Infinity,-Infinity) + (-2,2),(-8,-10) | (NaN,NaN) | (NaN,NaN),(NaN,NaN) + (2.5,3.5),(2.5,2.5) | (1e+300,Infinity) | (-Infinity,Infinity),(-Infinity,Infinity) + (2.5,3.5),(2.5,2.5) | (NaN,NaN) | (NaN,NaN),(NaN,NaN) + (3,3),(3,3) | (1e+300,Infinity) | (-Infinity,Infinity),(-Infinity,Infinity) + (3,3),(3,3) | (NaN,NaN) | (NaN,NaN),(NaN,NaN) +(10 rows) + +-- Divide by point +SELECT b.f1, p.f1, b.f1 / p.f1 FROM BOX_TBL b, POINT_TBL p WHERE p.f1[0] BETWEEN 1 AND 1000; + f1 | f1 | ?column? +---------------------+------------+---------------------------------------------------------------------- + (2,2),(0,0) | (5.1,34.5) | (0.0651176557644,0),(0,-0.0483449262493) + (2,2),(0,0) | (10,10) | (0.2,0),(0,0) + (3,3),(1,1) | (5.1,34.5) | (0.0976764836466,-0.0241724631247),(0.0325588278822,-0.072517389374) + (3,3),(1,1) | (10,10) | (0.3,0),(0.1,0) + (-2,2),(-8,-10) | (5.1,34.5) | (0.0483449262493,0.18499334024),(-0.317201914064,0.0651176557644) + (-2,2),(-8,-10) | (10,10) | (0,0.2),(-0.9,-0.1) + (2.5,3.5),(2.5,2.5) | (5.1,34.5) | (0.109762715209,-0.0562379754329),(0.0813970697055,-0.0604311578117) + (2.5,3.5),(2.5,2.5) | (10,10) | (0.3,0.05),(0.25,0) + (3,3),(3,3) | (5.1,34.5) | (0.0976764836466,-0.072517389374),(0.0976764836466,-0.072517389374) + (3,3),(3,3) | (10,10) | (0.3,0),(0.3,0) +(10 rows) +-- To box SELECT f1::box FROM POINT_TBL; - f1 ------------------------ + f1 +------------------------------------- (0,0),(0,0) (-10,0),(-10,0) (-3,4),(-3,4) (5.1,34.5),(5.1,34.5) (-5,-12),(-5,-12) + (1e-300,-1e-300),(1e-300,-1e-300) + (1e+300,Infinity),(1e+300,Infinity) + (NaN,NaN),(NaN,NaN) (10,10),(10,10) -(6 rows) +(9 rows) SELECT bound_box(a.f1, b.f1) FROM BOX_TBL a, BOX_TBL b; @@ -296,76 +2478,935 @@ SELECT bound_box(a.f1, b.f1) --------------------- (2,2),(0,0) (3,3),(0,0) + (2,2),(-8,-10) (2.5,3.5),(0,0) (3,3),(0,0) (3,3),(0,0) (3,3),(1,1) + (3,3),(-8,-10) (3,3.5),(1,1) (3,3),(1,1) + (2,2),(-8,-10) + (3,3),(-8,-10) + (-2,2),(-8,-10) + (2.5,3.5),(-8,-10) + (3,3),(-8,-10) (2.5,3.5),(0,0) (3,3.5),(1,1) + (2.5,3.5),(-8,-10) (2.5,3.5),(2.5,2.5) (3,3.5),(2.5,2.5) (3,3),(0,0) (3,3),(1,1) + (3,3),(-8,-10) (3,3.5),(2.5,2.5) (3,3),(3,3) -(16 rows) +(25 rows) + +-- Below box +SELECT b1.f1, b2.f1, b1.f1 <^ b2.f1 FROM BOX_TBL b1, BOX_TBL b2; + f1 | f1 | ?column? +---------------------+---------------------+---------- + (2,2),(0,0) | (2,2),(0,0) | f + (2,2),(0,0) | (3,3),(1,1) | f + (2,2),(0,0) | (-2,2),(-8,-10) | f + (2,2),(0,0) | (2.5,3.5),(2.5,2.5) | t + (2,2),(0,0) | (3,3),(3,3) | t + (3,3),(1,1) | (2,2),(0,0) | f + (3,3),(1,1) | (3,3),(1,1) | f + (3,3),(1,1) | (-2,2),(-8,-10) | f + (3,3),(1,1) | (2.5,3.5),(2.5,2.5) | f + (3,3),(1,1) | (3,3),(3,3) | t + (-2,2),(-8,-10) | (2,2),(0,0) | f + (-2,2),(-8,-10) | (3,3),(1,1) | f + (-2,2),(-8,-10) | (-2,2),(-8,-10) | f + (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) | t + (-2,2),(-8,-10) | (3,3),(3,3) | t + (2.5,3.5),(2.5,2.5) | (2,2),(0,0) | f + (2.5,3.5),(2.5,2.5) | (3,3),(1,1) | f + (2.5,3.5),(2.5,2.5) | (-2,2),(-8,-10) | f + (2.5,3.5),(2.5,2.5) | (2.5,3.5),(2.5,2.5) | f + (2.5,3.5),(2.5,2.5) | (3,3),(3,3) | f + (3,3),(3,3) | (2,2),(0,0) | f + (3,3),(3,3) | (3,3),(1,1) | f + (3,3),(3,3) | (-2,2),(-8,-10) | f + (3,3),(3,3) | (2.5,3.5),(2.5,2.5) | f + (3,3),(3,3) | (3,3),(3,3) | t +(25 rows) + +-- Above box +SELECT b1.f1, b2.f1, b1.f1 >^ b2.f1 FROM BOX_TBL b1, BOX_TBL b2; + f1 | f1 | ?column? +---------------------+---------------------+---------- + (2,2),(0,0) | (2,2),(0,0) | f + (2,2),(0,0) | (3,3),(1,1) | f + (2,2),(0,0) | (-2,2),(-8,-10) | f + (2,2),(0,0) | (2.5,3.5),(2.5,2.5) | f + (2,2),(0,0) | (3,3),(3,3) | f + (3,3),(1,1) | (2,2),(0,0) | f + (3,3),(1,1) | (3,3),(1,1) | f + (3,3),(1,1) | (-2,2),(-8,-10) | f + (3,3),(1,1) | (2.5,3.5),(2.5,2.5) | f + (3,3),(1,1) | (3,3),(3,3) | f + (-2,2),(-8,-10) | (2,2),(0,0) | f + (-2,2),(-8,-10) | (3,3),(1,1) | f + (-2,2),(-8,-10) | (-2,2),(-8,-10) | f + (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) | f + (-2,2),(-8,-10) | (3,3),(3,3) | f + (2.5,3.5),(2.5,2.5) | (2,2),(0,0) | t + (2.5,3.5),(2.5,2.5) | (3,3),(1,1) | f + (2.5,3.5),(2.5,2.5) | (-2,2),(-8,-10) | t + (2.5,3.5),(2.5,2.5) | (2.5,3.5),(2.5,2.5) | f + (2.5,3.5),(2.5,2.5) | (3,3),(3,3) | f + (3,3),(3,3) | (2,2),(0,0) | t + (3,3),(3,3) | (3,3),(1,1) | t + (3,3),(3,3) | (-2,2),(-8,-10) | t + (3,3),(3,3) | (2.5,3.5),(2.5,2.5) | f + (3,3),(3,3) | (3,3),(3,3) | t +(25 rows) + +-- Intersection point with box +SELECT b1.f1, b2.f1, b1.f1 # b2.f1 FROM BOX_TBL b1, BOX_TBL b2; + f1 | f1 | ?column? +---------------------+---------------------+--------------------- + (2,2),(0,0) | (2,2),(0,0) | (2,2),(0,0) + (2,2),(0,0) | (3,3),(1,1) | (2,2),(1,1) + (2,2),(0,0) | (-2,2),(-8,-10) | + (2,2),(0,0) | (2.5,3.5),(2.5,2.5) | + (2,2),(0,0) | (3,3),(3,3) | + (3,3),(1,1) | (2,2),(0,0) | (2,2),(1,1) + (3,3),(1,1) | (3,3),(1,1) | (3,3),(1,1) + (3,3),(1,1) | (-2,2),(-8,-10) | + (3,3),(1,1) | (2.5,3.5),(2.5,2.5) | (2.5,3),(2.5,2.5) + (3,3),(1,1) | (3,3),(3,3) | (3,3),(3,3) + (-2,2),(-8,-10) | (2,2),(0,0) | + (-2,2),(-8,-10) | (3,3),(1,1) | + (-2,2),(-8,-10) | (-2,2),(-8,-10) | (-2,2),(-8,-10) + (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) | + (-2,2),(-8,-10) | (3,3),(3,3) | + (2.5,3.5),(2.5,2.5) | (2,2),(0,0) | + (2.5,3.5),(2.5,2.5) | (3,3),(1,1) | (2.5,3),(2.5,2.5) + (2.5,3.5),(2.5,2.5) | (-2,2),(-8,-10) | + (2.5,3.5),(2.5,2.5) | (2.5,3.5),(2.5,2.5) | (2.5,3.5),(2.5,2.5) + (2.5,3.5),(2.5,2.5) | (3,3),(3,3) | + (3,3),(3,3) | (2,2),(0,0) | + (3,3),(3,3) | (3,3),(1,1) | (3,3),(3,3) + (3,3),(3,3) | (-2,2),(-8,-10) | + (3,3),(3,3) | (2.5,3.5),(2.5,2.5) | + (3,3),(3,3) | (3,3),(3,3) | (3,3),(3,3) +(25 rows) + +-- Diagonal +SELECT f1, diagonal(f1) FROM BOX_TBL; + f1 | diagonal +---------------------+----------------------- + (2,2),(0,0) | [(2,2),(0,0)] + (3,3),(1,1) | [(3,3),(1,1)] + (-2,2),(-8,-10) | [(-2,2),(-8,-10)] + (2.5,3.5),(2.5,2.5) | [(2.5,3.5),(2.5,2.5)] + (3,3),(3,3) | [(3,3),(3,3)] +(5 rows) + +-- Distance to box +SELECT b1.f1, b2.f1, b1.f1 <-> b2.f1 FROM BOX_TBL b1, BOX_TBL b2; + f1 | f1 | ?column? +---------------------+---------------------+--------------- + (2,2),(0,0) | (2,2),(0,0) | 0 + (2,2),(0,0) | (3,3),(1,1) | 1.41421356237 + (2,2),(0,0) | (-2,2),(-8,-10) | 7.81024967591 + (2,2),(0,0) | (2.5,3.5),(2.5,2.5) | 2.5 + (2,2),(0,0) | (3,3),(3,3) | 2.82842712475 + (3,3),(1,1) | (2,2),(0,0) | 1.41421356237 + (3,3),(1,1) | (3,3),(1,1) | 0 + (3,3),(1,1) | (-2,2),(-8,-10) | 9.21954445729 + (3,3),(1,1) | (2.5,3.5),(2.5,2.5) | 1.11803398875 + (3,3),(1,1) | (3,3),(3,3) | 1.41421356237 + (-2,2),(-8,-10) | (2,2),(0,0) | 7.81024967591 + (-2,2),(-8,-10) | (3,3),(1,1) | 9.21954445729 + (-2,2),(-8,-10) | (-2,2),(-8,-10) | 0 + (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) | 10.2591422643 + (-2,2),(-8,-10) | (3,3),(3,3) | 10.6301458127 + (2.5,3.5),(2.5,2.5) | (2,2),(0,0) | 2.5 + (2.5,3.5),(2.5,2.5) | (3,3),(1,1) | 1.11803398875 + (2.5,3.5),(2.5,2.5) | (-2,2),(-8,-10) | 10.2591422643 + (2.5,3.5),(2.5,2.5) | (2.5,3.5),(2.5,2.5) | 0 + (2.5,3.5),(2.5,2.5) | (3,3),(3,3) | 0.5 + (3,3),(3,3) | (2,2),(0,0) | 2.82842712475 + (3,3),(3,3) | (3,3),(1,1) | 1.41421356237 + (3,3),(3,3) | (-2,2),(-8,-10) | 10.6301458127 + (3,3),(3,3) | (2.5,3.5),(2.5,2.5) | 0.5 + (3,3),(3,3) | (3,3),(3,3) | 0 +(25 rows) -- -- Paths -- -SELECT '' AS eight, npoints(f1) AS npoints, f1 AS path FROM PATH_TBL; - eight | npoints | path --------+---------+--------------------------- - | 2 | [(1,2),(3,4)] - | 2 | ((1,2),(3,4)) - | 4 | [(0,0),(3,0),(4,5),(1,6)] - | 2 | ((1,2),(3,4)) - | 2 | ((1,2),(3,4)) - | 2 | [(1,2),(3,4)] - | 2 | [(11,12),(13,14)] - | 2 | ((11,12),(13,14)) -(8 rows) +-- Points +SELECT f1, npoints(f1) FROM PATH_TBL; + f1 | npoints +---------------------------+--------- + [(1,2),(3,4)] | 2 + ((1,2),(3,4)) | 2 + [(0,0),(3,0),(4,5),(1,6)] | 4 + ((1,2),(3,4)) | 2 + ((1,2),(3,4)) | 2 + [(1,2),(3,4)] | 2 + ((10,20)) | 1 + [(11,12),(13,14)] | 2 + ((11,12),(13,14)) | 2 +(9 rows) -SELECT '' AS four, path(f1) FROM POLYGON_TBL; - four | path -------+--------------------- - | ((2,0),(2,4),(0,0)) - | ((3,1),(3,3),(1,0)) - | ((0,0)) - | ((0,1),(0,1)) -(4 rows) +-- Area +SELECT f1, area(f1) FROM PATH_TBL; + f1 | area +---------------------------+------ + [(1,2),(3,4)] | + ((1,2),(3,4)) | 0 + [(0,0),(3,0),(4,5),(1,6)] | + ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | + ((10,20)) | 0 + [(11,12),(13,14)] | + ((11,12),(13,14)) | 0 +(9 rows) --- translation -SELECT '' AS eight, p1.f1 + point '(10,10)' AS dist_add - FROM PATH_TBL p1; - eight | dist_add --------+----------------------------------- - | [(11,12),(13,14)] - | ((11,12),(13,14)) - | [(10,10),(13,10),(14,15),(11,16)] - | ((11,12),(13,14)) - | ((11,12),(13,14)) - | [(11,12),(13,14)] - | [(21,22),(23,24)] - | ((21,22),(23,24)) -(8 rows) +-- Length +SELECT f1, @-@ f1 FROM PATH_TBL; + f1 | ?column? +---------------------------+--------------- + [(1,2),(3,4)] | 2.82842712475 + ((1,2),(3,4)) | 5.65685424949 + [(0,0),(3,0),(4,5),(1,6)] | 11.2612971738 + ((1,2),(3,4)) | 5.65685424949 + ((1,2),(3,4)) | 5.65685424949 + [(1,2),(3,4)] | 2.82842712475 + ((10,20)) | 0 + [(11,12),(13,14)] | 2.82842712475 + ((11,12),(13,14)) | 5.65685424949 +(9 rows) --- scaling and rotation -SELECT '' AS eight, p1.f1 * point '(2,-1)' AS dist_mul - FROM PATH_TBL p1; - eight | dist_mul --------+------------------------------ - | [(4,3),(10,5)] - | ((4,3),(10,5)) - | [(0,0),(6,-3),(13,6),(8,11)] - | ((4,3),(10,5)) - | ((4,3),(10,5)) - | [(4,3),(10,5)] - | [(34,13),(40,15)] - | ((34,13),(40,15)) -(8 rows) +-- Center +SELECT f1, @@ f1 FROM PATH_TBL; +ERROR: function "path_center" not implemented +-- To polygon +SELECT f1, f1::polygon FROM PATH_TBL WHERE isclosed(f1); + f1 | f1 +-------------------+------------------- + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((10,20)) | ((10,20)) + ((11,12),(13,14)) | ((11,12),(13,14)) +(5 rows) + +-- Open path cannot be converted to polygon error +SELECT f1, f1::polygon FROM PATH_TBL WHERE isopen(f1); +ERROR: open path cannot be converted to polygon +-- Has points less than path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 < p2.f1; + f1 | f1 +-------------------+--------------------------- + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] + ((10,20)) | [(1,2),(3,4)] + ((10,20)) | ((1,2),(3,4)) + ((10,20)) | [(0,0),(3,0),(4,5),(1,6)] + ((10,20)) | ((1,2),(3,4)) + ((10,20)) | ((1,2),(3,4)) + ((10,20)) | [(1,2),(3,4)] + ((10,20)) | [(11,12),(13,14)] + ((10,20)) | ((11,12),(13,14)) + [(11,12),(13,14)] | [(0,0),(3,0),(4,5),(1,6)] + ((11,12),(13,14)) | [(0,0),(3,0),(4,5),(1,6)] +(15 rows) + +-- Has points less than or equal to path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 <= p2.f1; + f1 | f1 +---------------------------+--------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + [(0,0),(3,0),(4,5),(1,6)] | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) + ((10,20)) | [(1,2),(3,4)] + ((10,20)) | ((1,2),(3,4)) + ((10,20)) | [(0,0),(3,0),(4,5),(1,6)] + ((10,20)) | ((1,2),(3,4)) + ((10,20)) | ((1,2),(3,4)) + ((10,20)) | [(1,2),(3,4)] + ((10,20)) | ((10,20)) + ((10,20)) | [(11,12),(13,14)] + ((10,20)) | ((11,12),(13,14)) + [(11,12),(13,14)] | [(1,2),(3,4)] + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | [(0,0),(3,0),(4,5),(1,6)] + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | [(1,2),(3,4)] + [(11,12),(13,14)] | [(11,12),(13,14)] + [(11,12),(13,14)] | ((11,12),(13,14)) + ((11,12),(13,14)) | [(1,2),(3,4)] + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | [(0,0),(3,0),(4,5),(1,6)] + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | [(1,2),(3,4)] + ((11,12),(13,14)) | [(11,12),(13,14)] + ((11,12),(13,14)) | ((11,12),(13,14)) +(66 rows) + +-- Has points equal to path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 = p2.f1; + f1 | f1 +---------------------------+--------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + [(0,0),(3,0),(4,5),(1,6)] | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) + ((10,20)) | ((10,20)) + [(11,12),(13,14)] | [(1,2),(3,4)] + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | [(1,2),(3,4)] + [(11,12),(13,14)] | [(11,12),(13,14)] + [(11,12),(13,14)] | ((11,12),(13,14)) + ((11,12),(13,14)) | [(1,2),(3,4)] + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | [(1,2),(3,4)] + ((11,12),(13,14)) | [(11,12),(13,14)] + ((11,12),(13,14)) | ((11,12),(13,14)) +(51 rows) + +-- Has points greater than or equal to path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 >= p2.f1; + f1 | f1 +---------------------------+--------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((10,20)) + [(1,2),(3,4)] | [(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((10,20)) + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | [(0,0),(3,0),(4,5),(1,6)] + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] | ((10,20)) + [(0,0),(3,0),(4,5),(1,6)] | [(11,12),(13,14)] + [(0,0),(3,0),(4,5),(1,6)] | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((10,20)) + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((10,20)) + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((10,20)) + [(1,2),(3,4)] | [(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) + ((10,20)) | ((10,20)) + [(11,12),(13,14)] | [(1,2),(3,4)] + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | [(1,2),(3,4)] + [(11,12),(13,14)] | ((10,20)) + [(11,12),(13,14)] | [(11,12),(13,14)] + [(11,12),(13,14)] | ((11,12),(13,14)) + ((11,12),(13,14)) | [(1,2),(3,4)] + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | [(1,2),(3,4)] + ((11,12),(13,14)) | ((10,20)) + ((11,12),(13,14)) | [(11,12),(13,14)] + ((11,12),(13,14)) | ((11,12),(13,14)) +(66 rows) + +-- Has points greater than path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 > p2.f1; + f1 | f1 +---------------------------+------------------- + [(1,2),(3,4)] | ((10,20)) + ((1,2),(3,4)) | ((10,20)) + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] | ((10,20)) + [(0,0),(3,0),(4,5),(1,6)] | [(11,12),(13,14)] + [(0,0),(3,0),(4,5),(1,6)] | ((11,12),(13,14)) + ((1,2),(3,4)) | ((10,20)) + ((1,2),(3,4)) | ((10,20)) + [(1,2),(3,4)] | ((10,20)) + [(11,12),(13,14)] | ((10,20)) + ((11,12),(13,14)) | ((10,20)) +(15 rows) + +-- Add path +SELECT p1.f1, p2.f1, p1.f1 + p2.f1 FROM PATH_TBL p1, PATH_TBL p2; + f1 | f1 | ?column? +---------------------------+---------------------------+--------------------------------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] | [(1,2),(3,4),(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) | + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4),(0,0),(3,0),(4,5),(1,6)] + [(1,2),(3,4)] | ((1,2),(3,4)) | + [(1,2),(3,4)] | ((1,2),(3,4)) | + [(1,2),(3,4)] | [(1,2),(3,4)] | [(1,2),(3,4),(1,2),(3,4)] + [(1,2),(3,4)] | ((10,20)) | + [(1,2),(3,4)] | [(11,12),(13,14)] | [(1,2),(3,4),(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) | + ((1,2),(3,4)) | [(1,2),(3,4)] | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | [(1,2),(3,4)] | + ((1,2),(3,4)) | ((10,20)) | + ((1,2),(3,4)) | [(11,12),(13,14)] | + ((1,2),(3,4)) | ((11,12),(13,14)) | + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6),(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) | + [(0,0),(3,0),(4,5),(1,6)] | [(0,0),(3,0),(4,5),(1,6)] | [(0,0),(3,0),(4,5),(1,6),(0,0),(3,0),(4,5),(1,6)] + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) | + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) | + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6),(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] | ((10,20)) | + [(0,0),(3,0),(4,5),(1,6)] | [(11,12),(13,14)] | [(0,0),(3,0),(4,5),(1,6),(11,12),(13,14)] + [(0,0),(3,0),(4,5),(1,6)] | ((11,12),(13,14)) | + ((1,2),(3,4)) | [(1,2),(3,4)] | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | [(1,2),(3,4)] | + ((1,2),(3,4)) | ((10,20)) | + ((1,2),(3,4)) | [(11,12),(13,14)] | + ((1,2),(3,4)) | ((11,12),(13,14)) | + ((1,2),(3,4)) | [(1,2),(3,4)] | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | [(1,2),(3,4)] | + ((1,2),(3,4)) | ((10,20)) | + ((1,2),(3,4)) | [(11,12),(13,14)] | + ((1,2),(3,4)) | ((11,12),(13,14)) | + [(1,2),(3,4)] | [(1,2),(3,4)] | [(1,2),(3,4),(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) | + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4),(0,0),(3,0),(4,5),(1,6)] + [(1,2),(3,4)] | ((1,2),(3,4)) | + [(1,2),(3,4)] | ((1,2),(3,4)) | + [(1,2),(3,4)] | [(1,2),(3,4)] | [(1,2),(3,4),(1,2),(3,4)] + [(1,2),(3,4)] | ((10,20)) | + [(1,2),(3,4)] | [(11,12),(13,14)] | [(1,2),(3,4),(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) | + ((10,20)) | [(1,2),(3,4)] | + ((10,20)) | ((1,2),(3,4)) | + ((10,20)) | [(0,0),(3,0),(4,5),(1,6)] | + ((10,20)) | ((1,2),(3,4)) | + ((10,20)) | ((1,2),(3,4)) | + ((10,20)) | [(1,2),(3,4)] | + ((10,20)) | ((10,20)) | + ((10,20)) | [(11,12),(13,14)] | + ((10,20)) | ((11,12),(13,14)) | + [(11,12),(13,14)] | [(1,2),(3,4)] | [(11,12),(13,14),(1,2),(3,4)] + [(11,12),(13,14)] | ((1,2),(3,4)) | + [(11,12),(13,14)] | [(0,0),(3,0),(4,5),(1,6)] | [(11,12),(13,14),(0,0),(3,0),(4,5),(1,6)] + [(11,12),(13,14)] | ((1,2),(3,4)) | + [(11,12),(13,14)] | ((1,2),(3,4)) | + [(11,12),(13,14)] | [(1,2),(3,4)] | [(11,12),(13,14),(1,2),(3,4)] + [(11,12),(13,14)] | ((10,20)) | + [(11,12),(13,14)] | [(11,12),(13,14)] | [(11,12),(13,14),(11,12),(13,14)] + [(11,12),(13,14)] | ((11,12),(13,14)) | + ((11,12),(13,14)) | [(1,2),(3,4)] | + ((11,12),(13,14)) | ((1,2),(3,4)) | + ((11,12),(13,14)) | [(0,0),(3,0),(4,5),(1,6)] | + ((11,12),(13,14)) | ((1,2),(3,4)) | + ((11,12),(13,14)) | ((1,2),(3,4)) | + ((11,12),(13,14)) | [(1,2),(3,4)] | + ((11,12),(13,14)) | ((10,20)) | + ((11,12),(13,14)) | [(11,12),(13,14)] | + ((11,12),(13,14)) | ((11,12),(13,14)) | +(81 rows) + +-- Add point +SELECT p.f1, p1.f1, p.f1 + p1.f1 FROM PATH_TBL p, POINT_TBL p1; + f1 | f1 | ?column? +---------------------------+-------------------+--------------------------------------------------------------------------- + [(1,2),(3,4)] | (0,0) | [(1,2),(3,4)] + ((1,2),(3,4)) | (0,0) | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | (0,0) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | (0,0) | ((1,2),(3,4)) + ((1,2),(3,4)) | (0,0) | ((1,2),(3,4)) + [(1,2),(3,4)] | (0,0) | [(1,2),(3,4)] + ((10,20)) | (0,0) | ((10,20)) + [(11,12),(13,14)] | (0,0) | [(11,12),(13,14)] + ((11,12),(13,14)) | (0,0) | ((11,12),(13,14)) + [(1,2),(3,4)] | (-10,0) | [(-9,2),(-7,4)] + ((1,2),(3,4)) | (-10,0) | ((-9,2),(-7,4)) + [(0,0),(3,0),(4,5),(1,6)] | (-10,0) | [(-10,0),(-7,0),(-6,5),(-9,6)] + ((1,2),(3,4)) | (-10,0) | ((-9,2),(-7,4)) + ((1,2),(3,4)) | (-10,0) | ((-9,2),(-7,4)) + [(1,2),(3,4)] | (-10,0) | [(-9,2),(-7,4)] + ((10,20)) | (-10,0) | ((0,20)) + [(11,12),(13,14)] | (-10,0) | [(1,12),(3,14)] + ((11,12),(13,14)) | (-10,0) | ((1,12),(3,14)) + [(1,2),(3,4)] | (-3,4) | [(-2,6),(0,8)] + ((1,2),(3,4)) | (-3,4) | ((-2,6),(0,8)) + [(0,0),(3,0),(4,5),(1,6)] | (-3,4) | [(-3,4),(0,4),(1,9),(-2,10)] + ((1,2),(3,4)) | (-3,4) | ((-2,6),(0,8)) + ((1,2),(3,4)) | (-3,4) | ((-2,6),(0,8)) + [(1,2),(3,4)] | (-3,4) | [(-2,6),(0,8)] + ((10,20)) | (-3,4) | ((7,24)) + [(11,12),(13,14)] | (-3,4) | [(8,16),(10,18)] + ((11,12),(13,14)) | (-3,4) | ((8,16),(10,18)) + [(1,2),(3,4)] | (5.1,34.5) | [(6.1,36.5),(8.1,38.5)] + ((1,2),(3,4)) | (5.1,34.5) | ((6.1,36.5),(8.1,38.5)) + [(0,0),(3,0),(4,5),(1,6)] | (5.1,34.5) | [(5.1,34.5),(8.1,34.5),(9.1,39.5),(6.1,40.5)] + ((1,2),(3,4)) | (5.1,34.5) | ((6.1,36.5),(8.1,38.5)) + ((1,2),(3,4)) | (5.1,34.5) | ((6.1,36.5),(8.1,38.5)) + [(1,2),(3,4)] | (5.1,34.5) | [(6.1,36.5),(8.1,38.5)] + ((10,20)) | (5.1,34.5) | ((15.1,54.5)) + [(11,12),(13,14)] | (5.1,34.5) | [(16.1,46.5),(18.1,48.5)] + ((11,12),(13,14)) | (5.1,34.5) | ((16.1,46.5),(18.1,48.5)) + [(1,2),(3,4)] | (-5,-12) | [(-4,-10),(-2,-8)] + ((1,2),(3,4)) | (-5,-12) | ((-4,-10),(-2,-8)) + [(0,0),(3,0),(4,5),(1,6)] | (-5,-12) | [(-5,-12),(-2,-12),(-1,-7),(-4,-6)] + ((1,2),(3,4)) | (-5,-12) | ((-4,-10),(-2,-8)) + ((1,2),(3,4)) | (-5,-12) | ((-4,-10),(-2,-8)) + [(1,2),(3,4)] | (-5,-12) | [(-4,-10),(-2,-8)] + ((10,20)) | (-5,-12) | ((5,8)) + [(11,12),(13,14)] | (-5,-12) | [(6,0),(8,2)] + ((11,12),(13,14)) | (-5,-12) | ((6,0),(8,2)) + [(1,2),(3,4)] | (1e-300,-1e-300) | [(1,2),(3,4)] + ((1,2),(3,4)) | (1e-300,-1e-300) | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | (1e-300,-1e-300) | [(1e-300,-1e-300),(3,-1e-300),(4,5),(1,6)] + ((1,2),(3,4)) | (1e-300,-1e-300) | ((1,2),(3,4)) + ((1,2),(3,4)) | (1e-300,-1e-300) | ((1,2),(3,4)) + [(1,2),(3,4)] | (1e-300,-1e-300) | [(1,2),(3,4)] + ((10,20)) | (1e-300,-1e-300) | ((10,20)) + [(11,12),(13,14)] | (1e-300,-1e-300) | [(11,12),(13,14)] + ((11,12),(13,14)) | (1e-300,-1e-300) | ((11,12),(13,14)) + [(1,2),(3,4)] | (1e+300,Infinity) | [(1e+300,Infinity),(1e+300,Infinity)] + ((1,2),(3,4)) | (1e+300,Infinity) | ((1e+300,Infinity),(1e+300,Infinity)) + [(0,0),(3,0),(4,5),(1,6)] | (1e+300,Infinity) | [(1e+300,Infinity),(1e+300,Infinity),(1e+300,Infinity),(1e+300,Infinity)] + ((1,2),(3,4)) | (1e+300,Infinity) | ((1e+300,Infinity),(1e+300,Infinity)) + ((1,2),(3,4)) | (1e+300,Infinity) | ((1e+300,Infinity),(1e+300,Infinity)) + [(1,2),(3,4)] | (1e+300,Infinity) | [(1e+300,Infinity),(1e+300,Infinity)] + ((10,20)) | (1e+300,Infinity) | ((1e+300,Infinity)) + [(11,12),(13,14)] | (1e+300,Infinity) | [(1e+300,Infinity),(1e+300,Infinity)] + ((11,12),(13,14)) | (1e+300,Infinity) | ((1e+300,Infinity),(1e+300,Infinity)) + [(1,2),(3,4)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(0,0),(3,0),(4,5),(1,6)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN),(NaN,NaN),(NaN,NaN)] + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(1,2),(3,4)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((10,20)) | (NaN,NaN) | ((NaN,NaN)) + [(11,12),(13,14)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((11,12),(13,14)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(1,2),(3,4)] | (10,10) | [(11,12),(13,14)] + ((1,2),(3,4)) | (10,10) | ((11,12),(13,14)) + [(0,0),(3,0),(4,5),(1,6)] | (10,10) | [(10,10),(13,10),(14,15),(11,16)] + ((1,2),(3,4)) | (10,10) | ((11,12),(13,14)) + ((1,2),(3,4)) | (10,10) | ((11,12),(13,14)) + [(1,2),(3,4)] | (10,10) | [(11,12),(13,14)] + ((10,20)) | (10,10) | ((20,30)) + [(11,12),(13,14)] | (10,10) | [(21,22),(23,24)] + ((11,12),(13,14)) | (10,10) | ((21,22),(23,24)) +(81 rows) + +-- Subtract point +SELECT p.f1, p1.f1, p.f1 - p1.f1 FROM PATH_TBL p, POINT_TBL p1; + f1 | f1 | ?column? +---------------------------+-------------------+----------------------------------------------------------------------------------- + [(1,2),(3,4)] | (0,0) | [(1,2),(3,4)] + ((1,2),(3,4)) | (0,0) | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | (0,0) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | (0,0) | ((1,2),(3,4)) + ((1,2),(3,4)) | (0,0) | ((1,2),(3,4)) + [(1,2),(3,4)] | (0,0) | [(1,2),(3,4)] + ((10,20)) | (0,0) | ((10,20)) + [(11,12),(13,14)] | (0,0) | [(11,12),(13,14)] + ((11,12),(13,14)) | (0,0) | ((11,12),(13,14)) + [(1,2),(3,4)] | (-10,0) | [(11,2),(13,4)] + ((1,2),(3,4)) | (-10,0) | ((11,2),(13,4)) + [(0,0),(3,0),(4,5),(1,6)] | (-10,0) | [(10,0),(13,0),(14,5),(11,6)] + ((1,2),(3,4)) | (-10,0) | ((11,2),(13,4)) + ((1,2),(3,4)) | (-10,0) | ((11,2),(13,4)) + [(1,2),(3,4)] | (-10,0) | [(11,2),(13,4)] + ((10,20)) | (-10,0) | ((20,20)) + [(11,12),(13,14)] | (-10,0) | [(21,12),(23,14)] + ((11,12),(13,14)) | (-10,0) | ((21,12),(23,14)) + [(1,2),(3,4)] | (-3,4) | [(4,-2),(6,0)] + ((1,2),(3,4)) | (-3,4) | ((4,-2),(6,0)) + [(0,0),(3,0),(4,5),(1,6)] | (-3,4) | [(3,-4),(6,-4),(7,1),(4,2)] + ((1,2),(3,4)) | (-3,4) | ((4,-2),(6,0)) + ((1,2),(3,4)) | (-3,4) | ((4,-2),(6,0)) + [(1,2),(3,4)] | (-3,4) | [(4,-2),(6,0)] + ((10,20)) | (-3,4) | ((13,16)) + [(11,12),(13,14)] | (-3,4) | [(14,8),(16,10)] + ((11,12),(13,14)) | (-3,4) | ((14,8),(16,10)) + [(1,2),(3,4)] | (5.1,34.5) | [(-4.1,-32.5),(-2.1,-30.5)] + ((1,2),(3,4)) | (5.1,34.5) | ((-4.1,-32.5),(-2.1,-30.5)) + [(0,0),(3,0),(4,5),(1,6)] | (5.1,34.5) | [(-5.1,-34.5),(-2.1,-34.5),(-1.1,-29.5),(-4.1,-28.5)] + ((1,2),(3,4)) | (5.1,34.5) | ((-4.1,-32.5),(-2.1,-30.5)) + ((1,2),(3,4)) | (5.1,34.5) | ((-4.1,-32.5),(-2.1,-30.5)) + [(1,2),(3,4)] | (5.1,34.5) | [(-4.1,-32.5),(-2.1,-30.5)] + ((10,20)) | (5.1,34.5) | ((4.9,-14.5)) + [(11,12),(13,14)] | (5.1,34.5) | [(5.9,-22.5),(7.9,-20.5)] + ((11,12),(13,14)) | (5.1,34.5) | ((5.9,-22.5),(7.9,-20.5)) + [(1,2),(3,4)] | (-5,-12) | [(6,14),(8,16)] + ((1,2),(3,4)) | (-5,-12) | ((6,14),(8,16)) + [(0,0),(3,0),(4,5),(1,6)] | (-5,-12) | [(5,12),(8,12),(9,17),(6,18)] + ((1,2),(3,4)) | (-5,-12) | ((6,14),(8,16)) + ((1,2),(3,4)) | (-5,-12) | ((6,14),(8,16)) + [(1,2),(3,4)] | (-5,-12) | [(6,14),(8,16)] + ((10,20)) | (-5,-12) | ((15,32)) + [(11,12),(13,14)] | (-5,-12) | [(16,24),(18,26)] + ((11,12),(13,14)) | (-5,-12) | ((16,24),(18,26)) + [(1,2),(3,4)] | (1e-300,-1e-300) | [(1,2),(3,4)] + ((1,2),(3,4)) | (1e-300,-1e-300) | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | (1e-300,-1e-300) | [(-1e-300,1e-300),(3,1e-300),(4,5),(1,6)] + ((1,2),(3,4)) | (1e-300,-1e-300) | ((1,2),(3,4)) + ((1,2),(3,4)) | (1e-300,-1e-300) | ((1,2),(3,4)) + [(1,2),(3,4)] | (1e-300,-1e-300) | [(1,2),(3,4)] + ((10,20)) | (1e-300,-1e-300) | ((10,20)) + [(11,12),(13,14)] | (1e-300,-1e-300) | [(11,12),(13,14)] + ((11,12),(13,14)) | (1e-300,-1e-300) | ((11,12),(13,14)) + [(1,2),(3,4)] | (1e+300,Infinity) | [(-1e+300,-Infinity),(-1e+300,-Infinity)] + ((1,2),(3,4)) | (1e+300,Infinity) | ((-1e+300,-Infinity),(-1e+300,-Infinity)) + [(0,0),(3,0),(4,5),(1,6)] | (1e+300,Infinity) | [(-1e+300,-Infinity),(-1e+300,-Infinity),(-1e+300,-Infinity),(-1e+300,-Infinity)] + ((1,2),(3,4)) | (1e+300,Infinity) | ((-1e+300,-Infinity),(-1e+300,-Infinity)) + ((1,2),(3,4)) | (1e+300,Infinity) | ((-1e+300,-Infinity),(-1e+300,-Infinity)) + [(1,2),(3,4)] | (1e+300,Infinity) | [(-1e+300,-Infinity),(-1e+300,-Infinity)] + ((10,20)) | (1e+300,Infinity) | ((-1e+300,-Infinity)) + [(11,12),(13,14)] | (1e+300,Infinity) | [(-1e+300,-Infinity),(-1e+300,-Infinity)] + ((11,12),(13,14)) | (1e+300,Infinity) | ((-1e+300,-Infinity),(-1e+300,-Infinity)) + [(1,2),(3,4)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(0,0),(3,0),(4,5),(1,6)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN),(NaN,NaN),(NaN,NaN)] + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(1,2),(3,4)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((10,20)) | (NaN,NaN) | ((NaN,NaN)) + [(11,12),(13,14)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((11,12),(13,14)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(1,2),(3,4)] | (10,10) | [(-9,-8),(-7,-6)] + ((1,2),(3,4)) | (10,10) | ((-9,-8),(-7,-6)) + [(0,0),(3,0),(4,5),(1,6)] | (10,10) | [(-10,-10),(-7,-10),(-6,-5),(-9,-4)] + ((1,2),(3,4)) | (10,10) | ((-9,-8),(-7,-6)) + ((1,2),(3,4)) | (10,10) | ((-9,-8),(-7,-6)) + [(1,2),(3,4)] | (10,10) | [(-9,-8),(-7,-6)] + ((10,20)) | (10,10) | ((0,10)) + [(11,12),(13,14)] | (10,10) | [(1,2),(3,4)] + ((11,12),(13,14)) | (10,10) | ((1,2),(3,4)) +(81 rows) + +-- Multiply with point +SELECT p.f1, p1.f1, p.f1 * p1.f1 FROM PATH_TBL p, POINT_TBL p1; + f1 | f1 | ?column? +---------------------------+-------------------+---------------------------------------------------------------------- + [(1,2),(3,4)] | (0,0) | [(0,0),(0,0)] + ((1,2),(3,4)) | (0,0) | ((0,0),(0,0)) + [(0,0),(3,0),(4,5),(1,6)] | (0,0) | [(0,0),(0,0),(0,0),(0,0)] + ((1,2),(3,4)) | (0,0) | ((0,0),(0,0)) + ((1,2),(3,4)) | (0,0) | ((0,0),(0,0)) + [(1,2),(3,4)] | (0,0) | [(0,0),(0,0)] + ((10,20)) | (0,0) | ((0,0)) + [(11,12),(13,14)] | (0,0) | [(0,0),(0,0)] + ((11,12),(13,14)) | (0,0) | ((0,0),(0,0)) + [(1,2),(3,4)] | (-10,0) | [(-10,-20),(-30,-40)] + ((1,2),(3,4)) | (-10,0) | ((-10,-20),(-30,-40)) + [(0,0),(3,0),(4,5),(1,6)] | (-10,0) | [(-0,0),(-30,0),(-40,-50),(-10,-60)] + ((1,2),(3,4)) | (-10,0) | ((-10,-20),(-30,-40)) + ((1,2),(3,4)) | (-10,0) | ((-10,-20),(-30,-40)) + [(1,2),(3,4)] | (-10,0) | [(-10,-20),(-30,-40)] + ((10,20)) | (-10,0) | ((-100,-200)) + [(11,12),(13,14)] | (-10,0) | [(-110,-120),(-130,-140)] + ((11,12),(13,14)) | (-10,0) | ((-110,-120),(-130,-140)) + [(1,2),(3,4)] | (-3,4) | [(-11,-2),(-25,0)] + ((1,2),(3,4)) | (-3,4) | ((-11,-2),(-25,0)) + [(0,0),(3,0),(4,5),(1,6)] | (-3,4) | [(-0,0),(-9,12),(-32,1),(-27,-14)] + ((1,2),(3,4)) | (-3,4) | ((-11,-2),(-25,0)) + ((1,2),(3,4)) | (-3,4) | ((-11,-2),(-25,0)) + [(1,2),(3,4)] | (-3,4) | [(-11,-2),(-25,0)] + ((10,20)) | (-3,4) | ((-110,-20)) + [(11,12),(13,14)] | (-3,4) | [(-81,8),(-95,10)] + ((11,12),(13,14)) | (-3,4) | ((-81,8),(-95,10)) + [(1,2),(3,4)] | (5.1,34.5) | [(-63.9,44.7),(-122.7,123.9)] + ((1,2),(3,4)) | (5.1,34.5) | ((-63.9,44.7),(-122.7,123.9)) + [(0,0),(3,0),(4,5),(1,6)] | (5.1,34.5) | [(0,0),(15.3,103.5),(-152.1,163.5),(-201.9,65.1)] + ((1,2),(3,4)) | (5.1,34.5) | ((-63.9,44.7),(-122.7,123.9)) + ((1,2),(3,4)) | (5.1,34.5) | ((-63.9,44.7),(-122.7,123.9)) + [(1,2),(3,4)] | (5.1,34.5) | [(-63.9,44.7),(-122.7,123.9)] + ((10,20)) | (5.1,34.5) | ((-639,447)) + [(11,12),(13,14)] | (5.1,34.5) | [(-357.9,440.7),(-416.7,519.9)] + ((11,12),(13,14)) | (5.1,34.5) | ((-357.9,440.7),(-416.7,519.9)) + [(1,2),(3,4)] | (-5,-12) | [(19,-22),(33,-56)] + ((1,2),(3,4)) | (-5,-12) | ((19,-22),(33,-56)) + [(0,0),(3,0),(4,5),(1,6)] | (-5,-12) | [(0,-0),(-15,-36),(40,-73),(67,-42)] + ((1,2),(3,4)) | (-5,-12) | ((19,-22),(33,-56)) + ((1,2),(3,4)) | (-5,-12) | ((19,-22),(33,-56)) + [(1,2),(3,4)] | (-5,-12) | [(19,-22),(33,-56)] + ((10,20)) | (-5,-12) | ((190,-220)) + [(11,12),(13,14)] | (-5,-12) | [(89,-192),(103,-226)] + ((11,12),(13,14)) | (-5,-12) | ((89,-192),(103,-226)) + [(1,2),(3,4)] | (1e-300,-1e-300) | [(3e-300,1e-300),(7e-300,1e-300)] + ((1,2),(3,4)) | (1e-300,-1e-300) | ((3e-300,1e-300),(7e-300,1e-300)) + [(0,0),(3,0),(4,5),(1,6)] | (1e-300,-1e-300) | [(0,0),(3e-300,-3e-300),(9e-300,1e-300),(7e-300,5e-300)] + ((1,2),(3,4)) | (1e-300,-1e-300) | ((3e-300,1e-300),(7e-300,1e-300)) + ((1,2),(3,4)) | (1e-300,-1e-300) | ((3e-300,1e-300),(7e-300,1e-300)) + [(1,2),(3,4)] | (1e-300,-1e-300) | [(3e-300,1e-300),(7e-300,1e-300)] + ((10,20)) | (1e-300,-1e-300) | ((3e-299,1e-299)) + [(11,12),(13,14)] | (1e-300,-1e-300) | [(2.3e-299,1e-300),(2.7e-299,1e-300)] + ((11,12),(13,14)) | (1e-300,-1e-300) | ((2.3e-299,1e-300),(2.7e-299,1e-300)) + [(1,2),(3,4)] | (1e+300,Infinity) | [(-Infinity,Infinity),(-Infinity,Infinity)] + ((1,2),(3,4)) | (1e+300,Infinity) | ((-Infinity,Infinity),(-Infinity,Infinity)) + [(0,0),(3,0),(4,5),(1,6)] | (1e+300,Infinity) | [(NaN,NaN),(NaN,Infinity),(-Infinity,Infinity),(-Infinity,Infinity)] + ((1,2),(3,4)) | (1e+300,Infinity) | ((-Infinity,Infinity),(-Infinity,Infinity)) + ((1,2),(3,4)) | (1e+300,Infinity) | ((-Infinity,Infinity),(-Infinity,Infinity)) + [(1,2),(3,4)] | (1e+300,Infinity) | [(-Infinity,Infinity),(-Infinity,Infinity)] + ((10,20)) | (1e+300,Infinity) | ((-Infinity,Infinity)) + [(11,12),(13,14)] | (1e+300,Infinity) | [(-Infinity,Infinity),(-Infinity,Infinity)] + ((11,12),(13,14)) | (1e+300,Infinity) | ((-Infinity,Infinity),(-Infinity,Infinity)) + [(1,2),(3,4)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(0,0),(3,0),(4,5),(1,6)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN),(NaN,NaN),(NaN,NaN)] + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(1,2),(3,4)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((10,20)) | (NaN,NaN) | ((NaN,NaN)) + [(11,12),(13,14)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((11,12),(13,14)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(1,2),(3,4)] | (10,10) | [(-10,30),(-10,70)] + ((1,2),(3,4)) | (10,10) | ((-10,30),(-10,70)) + [(0,0),(3,0),(4,5),(1,6)] | (10,10) | [(0,0),(30,30),(-10,90),(-50,70)] + ((1,2),(3,4)) | (10,10) | ((-10,30),(-10,70)) + ((1,2),(3,4)) | (10,10) | ((-10,30),(-10,70)) + [(1,2),(3,4)] | (10,10) | [(-10,30),(-10,70)] + ((10,20)) | (10,10) | ((-100,300)) + [(11,12),(13,14)] | (10,10) | [(-10,230),(-10,270)] + ((11,12),(13,14)) | (10,10) | ((-10,230),(-10,270)) +(81 rows) + +-- Divide by point +SELECT p.f1, p1.f1, p.f1 / p1.f1 FROM PATH_TBL p, POINT_TBL p1 WHERE p1.f1[0] BETWEEN 1 AND 1000; + f1 | f1 | ?column? +---------------------------+------------+----------------------------------------------------------------------------------------------------------------- + [(1,2),(3,4)] | (5.1,34.5) | [(0.0609244733856,-0.0199792807459),(0.12604212915,-0.0683242069952)] + [(1,2),(3,4)] | (10,10) | [(0.15,0.05),(0.35,0.05)] + ((1,2),(3,4)) | (5.1,34.5) | ((0.0609244733856,-0.0199792807459),(0.12604212915,-0.0683242069952)) + ((1,2),(3,4)) | (10,10) | ((0.15,0.05),(0.35,0.05)) + [(0,0),(3,0),(4,5),(1,6)] | (5.1,34.5) | [(0,0),(0.0125795471363,-0.0850969365103),(0.158600957032,-0.0924966701199),(0.174387055399,-0.00320655123082)] + [(0,0),(3,0),(4,5),(1,6)] | (10,10) | [(0,0),(0.15,-0.15),(0.45,0.05),(0.35,0.25)] + ((1,2),(3,4)) | (5.1,34.5) | ((0.0609244733856,-0.0199792807459),(0.12604212915,-0.0683242069952)) + ((1,2),(3,4)) | (10,10) | ((0.15,0.05),(0.35,0.05)) + ((1,2),(3,4)) | (5.1,34.5) | ((0.0609244733856,-0.0199792807459),(0.12604212915,-0.0683242069952)) + ((1,2),(3,4)) | (10,10) | ((0.15,0.05),(0.35,0.05)) + [(1,2),(3,4)] | (5.1,34.5) | [(0.0609244733856,-0.0199792807459),(0.12604212915,-0.0683242069952)] + [(1,2),(3,4)] | (10,10) | [(0.15,0.05),(0.35,0.05)] + ((10,20)) | (5.1,34.5) | ((0.609244733856,-0.199792807459)) + ((10,20)) | (10,10) | ((1.5,0.5)) + [(11,12),(13,14)] | (5.1,34.5) | [(0.386512752208,-0.261703911993),(0.451630407972,-0.310048838242)] + [(11,12),(13,14)] | (10,10) | [(1.15,0.05),(1.35,0.05)] + ((11,12),(13,14)) | (5.1,34.5) | ((0.386512752208,-0.261703911993),(0.451630407972,-0.310048838242)) + ((11,12),(13,14)) | (10,10) | ((1.15,0.05),(1.35,0.05)) +(18 rows) + +-- Division by 0 error +SELECT p.f1, p1.f1, p.f1 / p1.f1 FROM PATH_TBL p, POINT_TBL p1 WHERE p1.f1 ~= '(0,0)'::point; +ERROR: division by zero +-- Distance to path +SELECT p1.f1, p2.f1, p1.f1 <-> p2.f1 FROM PATH_TBL p1, PATH_TBL p2; + f1 | f1 | ?column? +---------------------------+---------------------------+---------------- + [(1,2),(3,4)] | [(1,2),(3,4)] | 0 + [(1,2),(3,4)] | ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] | 0.784464540553 + [(1,2),(3,4)] | ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | [(1,2),(3,4)] | 0 + [(1,2),(3,4)] | ((10,20)) | 17.4642491966 + [(1,2),(3,4)] | [(11,12),(13,14)] | 11.313708499 + [(1,2),(3,4)] | ((11,12),(13,14)) | 11.313708499 + ((1,2),(3,4)) | [(1,2),(3,4)] | 0 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] | 0.784464540553 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | [(1,2),(3,4)] | 0 + ((1,2),(3,4)) | ((10,20)) | 17.4642491966 + ((1,2),(3,4)) | [(11,12),(13,14)] | 11.313708499 + ((1,2),(3,4)) | ((11,12),(13,14)) | 11.313708499 + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] | 0.784464540553 + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) | 0.784464540553 + [(0,0),(3,0),(4,5),(1,6)] | [(0,0),(3,0),(4,5),(1,6)] | 0 + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) | 0.784464540553 + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) | 0.784464540553 + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] | 0.784464540553 + [(0,0),(3,0),(4,5),(1,6)] | ((10,20)) | 16.1554944214 + [(0,0),(3,0),(4,5),(1,6)] | [(11,12),(13,14)] | 9.89949493661 + [(0,0),(3,0),(4,5),(1,6)] | ((11,12),(13,14)) | 9.89949493661 + ((1,2),(3,4)) | [(1,2),(3,4)] | 0 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] | 0.784464540553 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | [(1,2),(3,4)] | 0 + ((1,2),(3,4)) | ((10,20)) | 17.4642491966 + ((1,2),(3,4)) | [(11,12),(13,14)] | 11.313708499 + ((1,2),(3,4)) | ((11,12),(13,14)) | 11.313708499 + ((1,2),(3,4)) | [(1,2),(3,4)] | 0 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] | 0.784464540553 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | [(1,2),(3,4)] | 0 + ((1,2),(3,4)) | ((10,20)) | 17.4642491966 + ((1,2),(3,4)) | [(11,12),(13,14)] | 11.313708499 + ((1,2),(3,4)) | ((11,12),(13,14)) | 11.313708499 + [(1,2),(3,4)] | [(1,2),(3,4)] | 0 + [(1,2),(3,4)] | ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] | 0.784464540553 + [(1,2),(3,4)] | ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | [(1,2),(3,4)] | 0 + [(1,2),(3,4)] | ((10,20)) | 17.4642491966 + [(1,2),(3,4)] | [(11,12),(13,14)] | 11.313708499 + [(1,2),(3,4)] | ((11,12),(13,14)) | 11.313708499 + ((10,20)) | [(1,2),(3,4)] | 17.4642491966 + ((10,20)) | ((1,2),(3,4)) | 17.4642491966 + ((10,20)) | [(0,0),(3,0),(4,5),(1,6)] | 16.1554944214 + ((10,20)) | ((1,2),(3,4)) | 17.4642491966 + ((10,20)) | ((1,2),(3,4)) | 17.4642491966 + ((10,20)) | [(1,2),(3,4)] | 17.4642491966 + ((10,20)) | ((10,20)) | 0 + ((10,20)) | [(11,12),(13,14)] | 6.7082039325 + ((10,20)) | ((11,12),(13,14)) | 6.7082039325 + [(11,12),(13,14)] | [(1,2),(3,4)] | 11.313708499 + [(11,12),(13,14)] | ((1,2),(3,4)) | 11.313708499 + [(11,12),(13,14)] | [(0,0),(3,0),(4,5),(1,6)] | 9.89949493661 + [(11,12),(13,14)] | ((1,2),(3,4)) | 11.313708499 + [(11,12),(13,14)] | ((1,2),(3,4)) | 11.313708499 + [(11,12),(13,14)] | [(1,2),(3,4)] | 11.313708499 + [(11,12),(13,14)] | ((10,20)) | 6.7082039325 + [(11,12),(13,14)] | [(11,12),(13,14)] | 0 + [(11,12),(13,14)] | ((11,12),(13,14)) | 0 + ((11,12),(13,14)) | [(1,2),(3,4)] | 11.313708499 + ((11,12),(13,14)) | ((1,2),(3,4)) | 11.313708499 + ((11,12),(13,14)) | [(0,0),(3,0),(4,5),(1,6)] | 9.89949493661 + ((11,12),(13,14)) | ((1,2),(3,4)) | 11.313708499 + ((11,12),(13,14)) | ((1,2),(3,4)) | 11.313708499 + ((11,12),(13,14)) | [(1,2),(3,4)] | 11.313708499 + ((11,12),(13,14)) | ((10,20)) | 6.7082039325 + ((11,12),(13,14)) | [(11,12),(13,14)] | 0 + ((11,12),(13,14)) | ((11,12),(13,14)) | 0 +(81 rows) -- -- Polygons @@ -373,73 +3414,154 @@ SELECT '' AS eight, p1.f1 * point '(2,-1)' AS dist_mul -- containment SELECT '' AS twentyfour, p.f1, poly.f1, poly.f1 @> p.f1 AS contains FROM POLYGON_TBL poly, POINT_TBL p; - twentyfour | f1 | f1 | contains -------------+------------+---------------------+---------- - | (0,0) | ((2,0),(2,4),(0,0)) | t - | (0,0) | ((3,1),(3,3),(1,0)) | f - | (0,0) | ((0,0)) | t - | (0,0) | ((0,1),(0,1)) | f - | (-10,0) | ((2,0),(2,4),(0,0)) | f - | (-10,0) | ((3,1),(3,3),(1,0)) | f - | (-10,0) | ((0,0)) | f - | (-10,0) | ((0,1),(0,1)) | f - | (-3,4) | ((2,0),(2,4),(0,0)) | f - | (-3,4) | ((3,1),(3,3),(1,0)) | f - | (-3,4) | ((0,0)) | f - | (-3,4) | ((0,1),(0,1)) | f - | (5.1,34.5) | ((2,0),(2,4),(0,0)) | f - | (5.1,34.5) | ((3,1),(3,3),(1,0)) | f - | (5.1,34.5) | ((0,0)) | f - | (5.1,34.5) | ((0,1),(0,1)) | f - | (-5,-12) | ((2,0),(2,4),(0,0)) | f - | (-5,-12) | ((3,1),(3,3),(1,0)) | f - | (-5,-12) | ((0,0)) | f - | (-5,-12) | ((0,1),(0,1)) | f - | (10,10) | ((2,0),(2,4),(0,0)) | f - | (10,10) | ((3,1),(3,3),(1,0)) | f - | (10,10) | ((0,0)) | f - | (10,10) | ((0,1),(0,1)) | f -(24 rows) + twentyfour | f1 | f1 | contains +------------+-------------------+----------------------------+---------- + | (0,0) | ((2,0),(2,4),(0,0)) | t + | (0,0) | ((3,1),(3,3),(1,0)) | f + | (0,0) | ((1,2),(3,4),(5,6),(7,8)) | f + | (0,0) | ((7,8),(5,6),(3,4),(1,2)) | f + | (0,0) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (0,0) | ((0,0)) | t + | (0,0) | ((0,1),(0,1)) | f + | (-10,0) | ((2,0),(2,4),(0,0)) | f + | (-10,0) | ((3,1),(3,3),(1,0)) | f + | (-10,0) | ((1,2),(3,4),(5,6),(7,8)) | f + | (-10,0) | ((7,8),(5,6),(3,4),(1,2)) | f + | (-10,0) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (-10,0) | ((0,0)) | f + | (-10,0) | ((0,1),(0,1)) | f + | (-3,4) | ((2,0),(2,4),(0,0)) | f + | (-3,4) | ((3,1),(3,3),(1,0)) | f + | (-3,4) | ((1,2),(3,4),(5,6),(7,8)) | f + | (-3,4) | ((7,8),(5,6),(3,4),(1,2)) | f + | (-3,4) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (-3,4) | ((0,0)) | f + | (-3,4) | ((0,1),(0,1)) | f + | (5.1,34.5) | ((2,0),(2,4),(0,0)) | f + | (5.1,34.5) | ((3,1),(3,3),(1,0)) | f + | (5.1,34.5) | ((1,2),(3,4),(5,6),(7,8)) | f + | (5.1,34.5) | ((7,8),(5,6),(3,4),(1,2)) | f + | (5.1,34.5) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (5.1,34.5) | ((0,0)) | f + | (5.1,34.5) | ((0,1),(0,1)) | f + | (-5,-12) | ((2,0),(2,4),(0,0)) | f + | (-5,-12) | ((3,1),(3,3),(1,0)) | f + | (-5,-12) | ((1,2),(3,4),(5,6),(7,8)) | f + | (-5,-12) | ((7,8),(5,6),(3,4),(1,2)) | f + | (-5,-12) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (-5,-12) | ((0,0)) | f + | (-5,-12) | ((0,1),(0,1)) | f + | (1e-300,-1e-300) | ((2,0),(2,4),(0,0)) | t + | (1e-300,-1e-300) | ((3,1),(3,3),(1,0)) | f + | (1e-300,-1e-300) | ((1,2),(3,4),(5,6),(7,8)) | f + | (1e-300,-1e-300) | ((7,8),(5,6),(3,4),(1,2)) | f + | (1e-300,-1e-300) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (1e-300,-1e-300) | ((0,0)) | t + | (1e-300,-1e-300) | ((0,1),(0,1)) | f + | (1e+300,Infinity) | ((2,0),(2,4),(0,0)) | f + | (1e+300,Infinity) | ((3,1),(3,3),(1,0)) | f + | (1e+300,Infinity) | ((1,2),(3,4),(5,6),(7,8)) | f + | (1e+300,Infinity) | ((7,8),(5,6),(3,4),(1,2)) | f + | (1e+300,Infinity) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (1e+300,Infinity) | ((0,0)) | f + | (1e+300,Infinity) | ((0,1),(0,1)) | f + | (NaN,NaN) | ((2,0),(2,4),(0,0)) | t + | (NaN,NaN) | ((3,1),(3,3),(1,0)) | t + | (NaN,NaN) | ((1,2),(3,4),(5,6),(7,8)) | t + | (NaN,NaN) | ((7,8),(5,6),(3,4),(1,2)) | t + | (NaN,NaN) | ((1,2),(7,8),(5,6),(3,-4)) | t + | (NaN,NaN) | ((0,0)) | t + | (NaN,NaN) | ((0,1),(0,1)) | t + | (10,10) | ((2,0),(2,4),(0,0)) | f + | (10,10) | ((3,1),(3,3),(1,0)) | f + | (10,10) | ((1,2),(3,4),(5,6),(7,8)) | f + | (10,10) | ((7,8),(5,6),(3,4),(1,2)) | f + | (10,10) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (10,10) | ((0,0)) | f + | (10,10) | ((0,1),(0,1)) | f +(63 rows) SELECT '' AS twentyfour, p.f1, poly.f1, p.f1 <@ poly.f1 AS contained FROM POLYGON_TBL poly, POINT_TBL p; - twentyfour | f1 | f1 | contained -------------+------------+---------------------+----------- - | (0,0) | ((2,0),(2,4),(0,0)) | t - | (0,0) | ((3,1),(3,3),(1,0)) | f - | (0,0) | ((0,0)) | t - | (0,0) | ((0,1),(0,1)) | f - | (-10,0) | ((2,0),(2,4),(0,0)) | f - | (-10,0) | ((3,1),(3,3),(1,0)) | f - | (-10,0) | ((0,0)) | f - | (-10,0) | ((0,1),(0,1)) | f - | (-3,4) | ((2,0),(2,4),(0,0)) | f - | (-3,4) | ((3,1),(3,3),(1,0)) | f - | (-3,4) | ((0,0)) | f - | (-3,4) | ((0,1),(0,1)) | f - | (5.1,34.5) | ((2,0),(2,4),(0,0)) | f - | (5.1,34.5) | ((3,1),(3,3),(1,0)) | f - | (5.1,34.5) | ((0,0)) | f - | (5.1,34.5) | ((0,1),(0,1)) | f - | (-5,-12) | ((2,0),(2,4),(0,0)) | f - | (-5,-12) | ((3,1),(3,3),(1,0)) | f - | (-5,-12) | ((0,0)) | f - | (-5,-12) | ((0,1),(0,1)) | f - | (10,10) | ((2,0),(2,4),(0,0)) | f - | (10,10) | ((3,1),(3,3),(1,0)) | f - | (10,10) | ((0,0)) | f - | (10,10) | ((0,1),(0,1)) | f -(24 rows) + twentyfour | f1 | f1 | contained +------------+-------------------+----------------------------+----------- + | (0,0) | ((2,0),(2,4),(0,0)) | t + | (0,0) | ((3,1),(3,3),(1,0)) | f + | (0,0) | ((1,2),(3,4),(5,6),(7,8)) | f + | (0,0) | ((7,8),(5,6),(3,4),(1,2)) | f + | (0,0) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (0,0) | ((0,0)) | t + | (0,0) | ((0,1),(0,1)) | f + | (-10,0) | ((2,0),(2,4),(0,0)) | f + | (-10,0) | ((3,1),(3,3),(1,0)) | f + | (-10,0) | ((1,2),(3,4),(5,6),(7,8)) | f + | (-10,0) | ((7,8),(5,6),(3,4),(1,2)) | f + | (-10,0) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (-10,0) | ((0,0)) | f + | (-10,0) | ((0,1),(0,1)) | f + | (-3,4) | ((2,0),(2,4),(0,0)) | f + | (-3,4) | ((3,1),(3,3),(1,0)) | f + | (-3,4) | ((1,2),(3,4),(5,6),(7,8)) | f + | (-3,4) | ((7,8),(5,6),(3,4),(1,2)) | f + | (-3,4) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (-3,4) | ((0,0)) | f + | (-3,4) | ((0,1),(0,1)) | f + | (5.1,34.5) | ((2,0),(2,4),(0,0)) | f + | (5.1,34.5) | ((3,1),(3,3),(1,0)) | f + | (5.1,34.5) | ((1,2),(3,4),(5,6),(7,8)) | f + | (5.1,34.5) | ((7,8),(5,6),(3,4),(1,2)) | f + | (5.1,34.5) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (5.1,34.5) | ((0,0)) | f + | (5.1,34.5) | ((0,1),(0,1)) | f + | (-5,-12) | ((2,0),(2,4),(0,0)) | f + | (-5,-12) | ((3,1),(3,3),(1,0)) | f + | (-5,-12) | ((1,2),(3,4),(5,6),(7,8)) | f + | (-5,-12) | ((7,8),(5,6),(3,4),(1,2)) | f + | (-5,-12) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (-5,-12) | ((0,0)) | f + | (-5,-12) | ((0,1),(0,1)) | f + | (1e-300,-1e-300) | ((2,0),(2,4),(0,0)) | t + | (1e-300,-1e-300) | ((3,1),(3,3),(1,0)) | f + | (1e-300,-1e-300) | ((1,2),(3,4),(5,6),(7,8)) | f + | (1e-300,-1e-300) | ((7,8),(5,6),(3,4),(1,2)) | f + | (1e-300,-1e-300) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (1e-300,-1e-300) | ((0,0)) | t + | (1e-300,-1e-300) | ((0,1),(0,1)) | f + | (1e+300,Infinity) | ((2,0),(2,4),(0,0)) | f + | (1e+300,Infinity) | ((3,1),(3,3),(1,0)) | f + | (1e+300,Infinity) | ((1,2),(3,4),(5,6),(7,8)) | f + | (1e+300,Infinity) | ((7,8),(5,6),(3,4),(1,2)) | f + | (1e+300,Infinity) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (1e+300,Infinity) | ((0,0)) | f + | (1e+300,Infinity) | ((0,1),(0,1)) | f + | (NaN,NaN) | ((2,0),(2,4),(0,0)) | t + | (NaN,NaN) | ((3,1),(3,3),(1,0)) | t + | (NaN,NaN) | ((1,2),(3,4),(5,6),(7,8)) | t + | (NaN,NaN) | ((7,8),(5,6),(3,4),(1,2)) | t + | (NaN,NaN) | ((1,2),(7,8),(5,6),(3,-4)) | t + | (NaN,NaN) | ((0,0)) | t + | (NaN,NaN) | ((0,1),(0,1)) | t + | (10,10) | ((2,0),(2,4),(0,0)) | f + | (10,10) | ((3,1),(3,3),(1,0)) | f + | (10,10) | ((1,2),(3,4),(5,6),(7,8)) | f + | (10,10) | ((7,8),(5,6),(3,4),(1,2)) | f + | (10,10) | ((1,2),(7,8),(5,6),(3,-4)) | f + | (10,10) | ((0,0)) | f + | (10,10) | ((0,1),(0,1)) | f +(63 rows) SELECT '' AS four, npoints(f1) AS npoints, f1 AS polygon FROM POLYGON_TBL; - four | npoints | polygon -------+---------+--------------------- + four | npoints | polygon +------+---------+---------------------------- | 3 | ((2,0),(2,4),(0,0)) | 3 | ((3,1),(3,3),(1,0)) + | 4 | ((1,2),(3,4),(5,6),(7,8)) + | 4 | ((7,8),(5,6),(3,4),(1,2)) + | 4 | ((1,2),(7,8),(5,6),(3,-4)) | 1 | ((0,0)) | 2 | ((0,1),(0,1)) -(4 rows) +(7 rows) SELECT '' AS four, polygon(f1) FROM BOX_TBL; @@ -447,9 +3569,10 @@ SELECT '' AS four, polygon(f1) ------+------------------------------------------- | ((0,0),(0,2),(2,2),(2,0)) | ((1,1),(1,3),(3,3),(3,1)) + | ((-8,-10),(-8,2),(-2,2),(-2,-10)) | ((2.5,2.5),(2.5,3.5),(2.5,3.5),(2.5,2.5)) | ((3,3),(3,3),(3,3),(3,3)) -(4 rows) +(5 rows) SELECT '' AS four, polygon(f1) FROM PATH_TBL WHERE isclosed(f1); @@ -458,8 +3581,9 @@ SELECT '' AS four, polygon(f1) | ((1,2),(3,4)) | ((1,2),(3,4)) | ((1,2),(3,4)) + | ((10,20)) | ((11,12),(13,14)) -(4 rows) +(5 rows) SELECT '' AS four, f1 AS open_path, polygon( pclose(f1)) AS polygon FROM PATH_TBL @@ -472,56 +3596,351 @@ SELECT '' AS four, f1 AS open_path, polygon( pclose(f1)) AS polygon | [(11,12),(13,14)] | ((11,12),(13,14)) (4 rows) --- convert circles to polygons using the default number of points -SELECT '' AS six, polygon(f1) - FROM CIRCLE_TBL; - six | polygon ------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - | ((2,1),(2.40192378865,2.5),(3.5,3.59807621135),(5,4),(6.5,3.59807621135),(7.59807621135,2.5),(8,1),(7.59807621135,-0.5),(6.5,-1.59807621135),(5,-2),(3.5,-1.59807621135),(2.40192378865,-0.5)) - | ((-99,2),(-85.6025403784,52),(-49,88.6025403784),(1,102),(51,88.6025403784),(87.6025403784,52),(101,2),(87.6025403784,-48),(51,-84.6025403784),(1,-98),(-49,-84.6025403784),(-85.6025403784,-48)) - | ((-4,3),(-3.33012701892,5.5),(-1.5,7.33012701892),(1,8),(3.5,7.33012701892),(5.33012701892,5.5),(6,3),(5.33012701892,0.5),(3.5,-1.33012701892),(1,-2),(-1.5,-1.33012701892),(-3.33012701892,0.5)) - | ((-2,2),(-1.59807621135,3.5),(-0.5,4.59807621135),(1,5),(2.5,4.59807621135),(3.59807621135,3.5),(4,2),(3.59807621135,0.5),(2.5,-0.598076211353),(1,-1),(-0.5,-0.598076211353),(-1.59807621135,0.5)) - | ((90,200),(91.3397459622,205),(95,208.660254038),(100,210),(105,208.660254038),(108.660254038,205),(110,200),(108.660254038,195),(105,191.339745962),(100,190),(95,191.339745962),(91.3397459622,195)) - | ((-15,1),(0.40707856479,58.5),(42.5,100.592921435),(100,116),(157.5,100.592921435),(199.592921435,58.5),(215,1),(199.592921435,-56.5),(157.5,-98.5929214352),(100,-114),(42.5,-98.5929214352),(0.40707856479,-56.5)) -(6 rows) +-- To box +SELECT f1, f1::box FROM POLYGON_TBL; + f1 | f1 +----------------------------+-------------- + ((2,0),(2,4),(0,0)) | (2,4),(0,0) + ((3,1),(3,3),(1,0)) | (3,3),(1,0) + ((1,2),(3,4),(5,6),(7,8)) | (7,8),(1,2) + ((7,8),(5,6),(3,4),(1,2)) | (7,8),(1,2) + ((1,2),(7,8),(5,6),(3,-4)) | (7,8),(1,-4) + ((0,0)) | (0,0),(0,0) + ((0,1),(0,1)) | (0,1),(0,1) +(7 rows) --- convert the circle to an 8-point polygon -SELECT '' AS six, polygon(8, f1) - FROM CIRCLE_TBL; - six | polygon ------+------------------------------------------------------------------------------------------------------------------------------------------------------------------ - | ((2,1),(2.87867965644,3.12132034356),(5,4),(7.12132034356,3.12132034356),(8,1),(7.12132034356,-1.12132034356),(5,-2),(2.87867965644,-1.12132034356)) - | ((-99,2),(-69.7106781187,72.7106781187),(1,102),(71.7106781187,72.7106781187),(101,2),(71.7106781187,-68.7106781187),(1,-98),(-69.7106781187,-68.7106781187)) - | ((-4,3),(-2.53553390593,6.53553390593),(1,8),(4.53553390593,6.53553390593),(6,3),(4.53553390593,-0.535533905933),(1,-2),(-2.53553390593,-0.535533905933)) - | ((-2,2),(-1.12132034356,4.12132034356),(1,5),(3.12132034356,4.12132034356),(4,2),(3.12132034356,-0.12132034356),(1,-1),(-1.12132034356,-0.12132034356)) - | ((90,200),(92.9289321881,207.071067812),(100,210),(107.071067812,207.071067812),(110,200),(107.071067812,192.928932188),(100,190),(92.9289321881,192.928932188)) - | ((-15,1),(18.6827201635,82.3172798365),(100,116),(181.317279836,82.3172798365),(215,1),(181.317279836,-80.3172798365),(100,-114),(18.6827201635,-80.3172798365)) -(6 rows) +-- To path +SELECT f1, f1::path FROM POLYGON_TBL; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(7 rows) + +-- Same as polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 ~= p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(9 rows) + +-- Contained by polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 <@ p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(7,8),(5,6),(3,-4)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((2,0),(2,4),(0,0)) + ((0,0)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(12 rows) + +-- Contains polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 @> p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((2,0),(2,4),(0,0)) | ((0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(7,8),(5,6),(3,-4)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(12 rows) + +-- Overlap with polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 && p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((2,0),(2,4),(0,0)) | ((3,1),(3,3),(1,0)) + ((2,0),(2,4),(0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((2,0),(2,4),(0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((2,0),(2,4),(0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((2,0),(2,4),(0,0)) | ((0,0)) + ((3,1),(3,3),(1,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((3,1),(3,3),(1,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(3,4),(5,6),(7,8)) | ((2,0),(2,4),(0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(7,8),(5,6),(3,-4)) + ((7,8),(5,6),(3,4),(1,2)) | ((2,0),(2,4),(0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(7,8),(5,6),(3,-4)) | ((2,0),(2,4),(0,0)) + ((1,2),(7,8),(5,6),(3,-4)) | ((3,1),(3,3),(1,0)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(7,8),(5,6),(3,-4)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((2,0),(2,4),(0,0)) + ((0,0)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(25 rows) + +-- Left of polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 << p2.f1; + f1 | f1 +---------------+---------------------------- + ((0,0)) | ((3,1),(3,3),(1,0)) + ((0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,1),(0,1)) | ((3,1),(3,3),(1,0)) + ((0,1),(0,1)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,1),(0,1)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,1),(0,1)) | ((1,2),(7,8),(5,6),(3,-4)) +(8 rows) + +-- Overlap of left of polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 &< p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((2,0),(2,4),(0,0)) | ((3,1),(3,3),(1,0)) + ((2,0),(2,4),(0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((2,0),(2,4),(0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((2,0),(2,4),(0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((3,1),(3,3),(1,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((3,1),(3,3),(1,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((3,1),(3,3),(1,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(7,8),(5,6),(3,-4)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(7,8),(5,6),(3,-4)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((2,0),(2,4),(0,0)) + ((0,0)) | ((3,1),(3,3),(1,0)) + ((0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((0,0)) + ((0,0)) | ((0,1),(0,1)) + ((0,1),(0,1)) | ((2,0),(2,4),(0,0)) + ((0,1),(0,1)) | ((3,1),(3,3),(1,0)) + ((0,1),(0,1)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,1),(0,1)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,1),(0,1)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,1),(0,1)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(32 rows) + +-- Right of polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 >> p2.f1; + f1 | f1 +----------------------------+--------------- + ((3,1),(3,3),(1,0)) | ((0,0)) + ((3,1),(3,3),(1,0)) | ((0,1),(0,1)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,1),(0,1)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,1),(0,1)) + ((1,2),(7,8),(5,6),(3,-4)) | ((0,0)) + ((1,2),(7,8),(5,6),(3,-4)) | ((0,1),(0,1)) +(8 rows) + +-- Overlap of right of polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 &> p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((2,0),(2,4),(0,0)) | ((0,0)) + ((2,0),(2,4),(0,0)) | ((0,1),(0,1)) + ((3,1),(3,3),(1,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((3,1),(3,3),(1,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((3,1),(3,3),(1,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((3,1),(3,3),(1,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((3,1),(3,3),(1,0)) | ((0,0)) + ((3,1),(3,3),(1,0)) | ((0,1),(0,1)) + ((1,2),(3,4),(5,6),(7,8)) | ((2,0),(2,4),(0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((3,1),(3,3),(1,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,1),(0,1)) + ((7,8),(5,6),(3,4),(1,2)) | ((2,0),(2,4),(0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((3,1),(3,3),(1,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(7,8),(5,6),(3,-4)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,1),(0,1)) + ((1,2),(7,8),(5,6),(3,-4)) | ((2,0),(2,4),(0,0)) + ((1,2),(7,8),(5,6),(3,-4)) | ((3,1),(3,3),(1,0)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(7,8),(5,6),(3,-4)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(7,8),(5,6),(3,-4)) | ((0,0)) + ((1,2),(7,8),(5,6),(3,-4)) | ((0,1),(0,1)) + ((0,0)) | ((2,0),(2,4),(0,0)) + ((0,0)) | ((0,0)) + ((0,0)) | ((0,1),(0,1)) + ((0,1),(0,1)) | ((2,0),(2,4),(0,0)) + ((0,1),(0,1)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(37 rows) + +-- Below polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 <<| p2.f1; + f1 | f1 +---------------+--------------------------- + ((0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,0)) | ((0,1),(0,1)) + ((0,1),(0,1)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,1),(0,1)) | ((7,8),(5,6),(3,4),(1,2)) +(5 rows) + +-- Overlap or below polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 &<| p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((2,0),(2,4),(0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((2,0),(2,4),(0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((2,0),(2,4),(0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((3,1),(3,3),(1,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((3,1),(3,3),(1,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((3,1),(3,3),(1,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((3,1),(3,3),(1,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(7,8),(5,6),(3,-4)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(7,8),(5,6),(3,-4)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((2,0),(2,4),(0,0)) + ((0,0)) | ((3,1),(3,3),(1,0)) + ((0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((0,0)) + ((0,0)) | ((0,1),(0,1)) + ((0,1),(0,1)) | ((2,0),(2,4),(0,0)) + ((0,1),(0,1)) | ((3,1),(3,3),(1,0)) + ((0,1),(0,1)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,1),(0,1)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,1),(0,1)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(31 rows) +-- Above polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 |>> p2.f1; + f1 | f1 +---------------------------+--------------- + ((1,2),(3,4),(5,6),(7,8)) | ((0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,1),(0,1)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,1),(0,1)) + ((0,1),(0,1)) | ((0,0)) +(5 rows) + +-- Overlap or above polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 |&> p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((2,0),(2,4),(0,0)) | ((3,1),(3,3),(1,0)) + ((2,0),(2,4),(0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((2,0),(2,4),(0,0)) | ((0,0)) + ((3,1),(3,3),(1,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((3,1),(3,3),(1,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((3,1),(3,3),(1,0)) | ((0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((2,0),(2,4),(0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((3,1),(3,3),(1,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,1),(0,1)) + ((7,8),(5,6),(3,4),(1,2)) | ((2,0),(2,4),(0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((3,1),(3,3),(1,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(7,8),(5,6),(3,-4)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,1),(0,1)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((2,0),(2,4),(0,0)) + ((0,0)) | ((3,1),(3,3),(1,0)) + ((0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((0,0)) + ((0,1),(0,1)) | ((2,0),(2,4),(0,0)) + ((0,1),(0,1)) | ((3,1),(3,3),(1,0)) + ((0,1),(0,1)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,1),(0,1)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(32 rows) + +-- Distance to polygon +SELECT p1.f1, p2.f1, p1.f1 <-> p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2; +ERROR: function "poly_distance" not implemented -- -- Circles -- SELECT '' AS six, circle(f1, 50.0) FROM POINT_TBL; - six | circle ------+----------------- + six | circle +-----+------------------------ | <(0,0),50> | <(-10,0),50> | <(-3,4),50> | <(5.1,34.5),50> | <(-5,-12),50> + | <(1e-300,-1e-300),50> + | <(1e+300,Infinity),50> + | <(NaN,NaN),50> | <(10,10),50> -(6 rows) +(9 rows) SELECT '' AS four, circle(f1) FROM BOX_TBL; - four | circle -------+----------------------- + four | circle +------+------------------------ | <(1,1),1.41421356237> | <(2,2),1.41421356237> + | <(-5,-4),6.7082039325> | <(2.5,3),0.5> | <(3,3),0> -(4 rows) +(5 rows) SELECT '' AS two, circle(f1) FROM POLYGON_TBL @@ -530,34 +3949,942 @@ SELECT '' AS two, circle(f1) -----+----------------------------------------------- | <(1.33333333333,1.33333333333),2.04168905064> | <(2.33333333333,1.33333333333),1.47534300379> -(2 rows) + | <(4,5),2.82842712475> + | <(4,5),2.82842712475> + | <(4,3),4.80664375676> +(5 rows) SELECT '' AS twentyfour, c1.f1 AS circle, p1.f1 AS point, (p1.f1 <-> c1.f1) AS distance FROM CIRCLE_TBL c1, POINT_TBL p1 WHERE (p1.f1 <-> c1.f1) > 0 ORDER BY distance, area(c1.f1), p1.f1[0]; - twentyfour | circle | point | distance -------------+----------------+------------+--------------- - | <(1,2),3> | (-3,4) | 1.472135955 - | <(5,1),3> | (0,0) | 2.09901951359 - | <(5,1),3> | (-3,4) | 5.54400374532 - | <(1,3),5> | (-10,0) | 6.40175425099 - | <(1,3),5> | (10,10) | 6.40175425099 - | <(5,1),3> | (10,10) | 7.29563014099 - | <(1,2),3> | (-10,0) | 8.1803398875 - | <(1,2),3> | (10,10) | 9.04159457879 - | <(1,3),5> | (-5,-12) | 11.1554944214 - | <(5,1),3> | (-10,0) | 12.0332963784 - | <(1,2),3> | (-5,-12) | 12.2315462117 - | <(5,1),3> | (-5,-12) | 13.4012194669 - | <(1,3),5> | (5.1,34.5) | 26.7657047773 - | <(1,2),3> | (5.1,34.5) | 29.7575945393 - | <(5,1),3> | (5.1,34.5) | 30.5001492534 - | <(100,200),10> | (5.1,34.5) | 180.778038568 - | <(100,200),10> | (10,10) | 200.237960416 - | <(100,200),10> | (-3,4) | 211.415898255 - | <(100,200),10> | (0,0) | 213.60679775 - | <(100,200),10> | (-10,0) | 218.25424421 - | <(100,200),10> | (-5,-12) | 226.577682802 -(21 rows) + twentyfour | circle | point | distance +------------+----------------+-------------------+--------------- + | <(1,2),3> | (-3,4) | 1.472135955 + | <(5,1),3> | (0,0) | 2.09901951359 + | <(5,1),3> | (1e-300,-1e-300) | 2.09901951359 + | <(5,1),3> | (-3,4) | 5.54400374532 + | <(3,5),0> | (0,0) | 5.83095189485 + | <(3,5),0> | (1e-300,-1e-300) | 5.83095189485 + | <(3,5),0> | (-3,4) | 6.0827625303 + | <(1,3),5> | (-10,0) | 6.40175425099 + | <(1,3),5> | (10,10) | 6.40175425099 + | <(5,1),3> | (10,10) | 7.29563014099 + | <(1,2),3> | (-10,0) | 8.1803398875 + | <(3,5),0> | (10,10) | 8.60232526704 + | <(1,2),3> | (10,10) | 9.04159457879 + | <(1,3),5> | (-5,-12) | 11.1554944214 + | <(5,1),3> | (-10,0) | 12.0332963784 + | <(1,2),3> | (-5,-12) | 12.2315462117 + | <(5,1),3> | (-5,-12) | 13.4012194669 + | <(3,5),0> | (-10,0) | 13.9283882772 + | <(3,5),0> | (-5,-12) | 18.7882942281 + | <(1,3),5> | (5.1,34.5) | 26.7657047773 + | <(3,5),0> | (5.1,34.5) | 29.5746513082 + | <(1,2),3> | (5.1,34.5) | 29.7575945393 + | <(5,1),3> | (5.1,34.5) | 30.5001492534 + | <(100,200),10> | (5.1,34.5) | 180.778038568 + | <(100,200),10> | (10,10) | 200.237960416 + | <(100,200),10> | (-3,4) | 211.415898255 + | <(100,200),10> | (0,0) | 213.60679775 + | <(100,200),10> | (1e-300,-1e-300) | 213.60679775 + | <(100,200),10> | (-10,0) | 218.25424421 + | <(100,200),10> | (-5,-12) | 226.577682802 + | <(3,5),0> | (1e+300,Infinity) | Infinity + | <(1,2),3> | (1e+300,Infinity) | Infinity + | <(5,1),3> | (1e+300,Infinity) | Infinity + | <(1,3),5> | (1e+300,Infinity) | Infinity + | <(100,200),10> | (1e+300,Infinity) | Infinity + | <(1,2),100> | (1e+300,Infinity) | Infinity + | <(100,1),115> | (1e+300,Infinity) | Infinity + | <(3,5),0> | (NaN,NaN) | NaN + | <(1,2),3> | (NaN,NaN) | NaN + | <(5,1),3> | (NaN,NaN) | NaN + | <(1,3),5> | (NaN,NaN) | NaN + | <(100,200),10> | (NaN,NaN) | NaN + | <(1,2),100> | (NaN,NaN) | NaN + | <(100,1),115> | (NaN,NaN) | NaN + | <(3,5),NaN> | (-10,0) | NaN + | <(3,5),NaN> | (-5,-12) | NaN + | <(3,5),NaN> | (-3,4) | NaN + | <(3,5),NaN> | (0,0) | NaN + | <(3,5),NaN> | (1e-300,-1e-300) | NaN + | <(3,5),NaN> | (5.1,34.5) | NaN + | <(3,5),NaN> | (10,10) | NaN + | <(3,5),NaN> | (1e+300,Infinity) | NaN + | <(3,5),NaN> | (NaN,NaN) | NaN +(53 rows) + +-- To polygon +SELECT f1, f1::polygon FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>'; + f1 | f1 +----------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + <(5,1),3> | ((2,1),(2.40192378865,2.5),(3.5,3.59807621135),(5,4),(6.5,3.59807621135),(7.59807621135,2.5),(8,1),(7.59807621135,-0.5),(6.5,-1.59807621135),(5,-2),(3.5,-1.59807621135),(2.40192378865,-0.5)) + <(1,2),100> | ((-99,2),(-85.6025403784,52),(-49,88.6025403784),(1,102),(51,88.6025403784),(87.6025403784,52),(101,2),(87.6025403784,-48),(51,-84.6025403784),(1,-98),(-49,-84.6025403784),(-85.6025403784,-48)) + <(1,3),5> | ((-4,3),(-3.33012701892,5.5),(-1.5,7.33012701892),(1,8),(3.5,7.33012701892),(5.33012701892,5.5),(6,3),(5.33012701892,0.5),(3.5,-1.33012701892),(1,-2),(-1.5,-1.33012701892),(-3.33012701892,0.5)) + <(1,2),3> | ((-2,2),(-1.59807621135,3.5),(-0.5,4.59807621135),(1,5),(2.5,4.59807621135),(3.59807621135,3.5),(4,2),(3.59807621135,0.5),(2.5,-0.598076211353),(1,-1),(-0.5,-0.598076211353),(-1.59807621135,0.5)) + <(100,200),10> | ((90,200),(91.3397459622,205),(95,208.660254038),(100,210),(105,208.660254038),(108.660254038,205),(110,200),(108.660254038,195),(105,191.339745962),(100,190),(95,191.339745962),(91.3397459622,195)) + <(100,1),115> | ((-15,1),(0.40707856479,58.5),(42.5,100.592921435),(100,116),(157.5,100.592921435),(199.592921435,58.5),(215,1),(199.592921435,-56.5),(157.5,-98.5929214352),(100,-114),(42.5,-98.5929214352),(0.40707856479,-56.5)) +(6 rows) + +-- To polygon with less points +SELECT f1, polygon(8, f1) FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>'; + f1 | polygon +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------ + <(5,1),3> | ((2,1),(2.87867965644,3.12132034356),(5,4),(7.12132034356,3.12132034356),(8,1),(7.12132034356,-1.12132034356),(5,-2),(2.87867965644,-1.12132034356)) + <(1,2),100> | ((-99,2),(-69.7106781187,72.7106781187),(1,102),(71.7106781187,72.7106781187),(101,2),(71.7106781187,-68.7106781187),(1,-98),(-69.7106781187,-68.7106781187)) + <(1,3),5> | ((-4,3),(-2.53553390593,6.53553390593),(1,8),(4.53553390593,6.53553390593),(6,3),(4.53553390593,-0.535533905933),(1,-2),(-2.53553390593,-0.535533905933)) + <(1,2),3> | ((-2,2),(-1.12132034356,4.12132034356),(1,5),(3.12132034356,4.12132034356),(4,2),(3.12132034356,-0.12132034356),(1,-1),(-1.12132034356,-0.12132034356)) + <(100,200),10> | ((90,200),(92.9289321881,207.071067812),(100,210),(107.071067812,207.071067812),(110,200),(107.071067812,192.928932188),(100,190),(92.9289321881,192.928932188)) + <(100,1),115> | ((-15,1),(18.6827201635,82.3172798365),(100,116),(181.317279836,82.3172798365),(215,1),(181.317279836,-80.3172798365),(100,-114),(18.6827201635,-80.3172798365)) +(6 rows) + +-- Too less points error +SELECT f1, polygon(1, f1) FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>'; +ERROR: must request at least 2 points +-- Zero radius error +SELECT f1, polygon(10, f1) FROM CIRCLE_TBL WHERE f1 < '<(0,0),1>'; +ERROR: cannot convert circle with radius zero to polygon +-- Same as circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 ~= c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(1,2),100> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(100,200),10> | <(100,200),10> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(3,5),0> + <(3,5),NaN> | <(3,5),0> + <(3,5),NaN> | <(3,5),NaN> +(9 rows) + +-- Overlap with circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 && c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(1,2),3> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(5,1),3> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(1,3),5> + <(1,2),100> | <(1,2),3> + <(1,2),100> | <(100,1),115> + <(1,2),100> | <(3,5),0> + <(1,3),5> | <(5,1),3> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(1,2),3> + <(1,3),5> | <(100,1),115> + <(1,3),5> | <(3,5),0> + <(1,2),3> | <(5,1),3> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(100,200),10> + <(100,1),115> | <(5,1),3> + <(100,1),115> | <(1,2),100> + <(100,1),115> | <(1,3),5> + <(100,1),115> | <(1,2),3> + <(100,1),115> | <(100,1),115> + <(100,1),115> | <(3,5),0> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(33 rows) + +-- Overlap or left of circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 &< c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(100,200),10> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(5,1),3> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(5,1),3> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(100,200),10> + <(100,200),10> | <(100,1),115> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(28 rows) + +-- Left of circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 << c2.f1; + f1 | f1 +-----------+---------------- + <(5,1),3> | <(100,200),10> + <(1,3),5> | <(100,200),10> + <(1,2),3> | <(100,200),10> + <(3,5),0> | <(100,200),10> +(4 rows) + +-- Right of circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 >> c2.f1; + f1 | f1 +----------------+----------- + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(3,5),0> +(4 rows) + +-- Overlap or right of circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 &> c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(1,2),3> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(1,2),100> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(100,200),10> + <(100,200),10> | <(100,1),115> + <(100,200),10> | <(3,5),0> + <(100,1),115> | <(1,2),100> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(28 rows) + +-- Contained by circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 <@ c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(1,2),100> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(100,200),10> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(17 rows) + +-- Contain by circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 @> c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(1,2),100> | <(5,1),3> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(1,3),5> + <(1,2),100> | <(1,2),3> + <(1,2),100> | <(3,5),0> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(1,2),3> + <(1,3),5> | <(3,5),0> + <(1,2),3> | <(1,2),3> + <(100,200),10> | <(100,200),10> + <(100,1),115> | <(5,1),3> + <(100,1),115> | <(1,3),5> + <(100,1),115> | <(1,2),3> + <(100,1),115> | <(100,1),115> + <(100,1),115> | <(3,5),0> + <(3,5),0> | <(3,5),0> +(17 rows) + +-- Below circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 <<| c2.f1; + f1 | f1 +---------------+---------------- + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(3,5),0> + <(1,2),100> | <(100,200),10> + <(1,3),5> | <(100,200),10> + <(1,2),3> | <(100,200),10> + <(100,1),115> | <(100,200),10> + <(3,5),0> | <(100,200),10> +(7 rows) + +-- Above circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 |>> c2.f1; + f1 | f1 +----------------+--------------- + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(100,1),115> + <(100,200),10> | <(3,5),0> + <(3,5),0> | <(5,1),3> +(7 rows) + +-- Overlap or below circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 &<| c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(1,2),3> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(5,1),3> | <(3,5),0> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(100,200),10> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(1,2),3> | <(3,5),0> + <(100,200),10> | <(100,200),10> + <(100,1),115> | <(100,200),10> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(29 rows) + +-- Overlap or above circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 |&> c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(5,1),3> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(5,1),3> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(100,200),10> + <(100,200),10> | <(100,1),115> + <(100,200),10> | <(3,5),0> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(29 rows) + +-- Area equal with circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 = c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),3> + <(1,2),100> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,2),3> | <(5,1),3> + <(1,2),3> | <(1,2),3> + <(100,200),10> | <(100,200),10> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(9 rows) + +-- Area not equal with circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 != c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(5,1),3> | <(3,5),0> + <(1,2),100> | <(5,1),3> + <(1,2),100> | <(1,3),5> + <(1,2),100> | <(1,2),3> + <(1,2),100> | <(100,200),10> + <(1,2),100> | <(100,1),115> + <(1,2),100> | <(3,5),0> + <(1,3),5> | <(5,1),3> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,2),3> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,3),5> | <(3,5),0> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(1,2),3> | <(3,5),0> + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(100,1),115> + <(100,200),10> | <(3,5),0> + <(100,1),115> | <(5,1),3> + <(100,1),115> | <(1,2),100> + <(100,1),115> | <(1,3),5> + <(100,1),115> | <(1,2),3> + <(100,1),115> | <(100,200),10> + <(100,1),115> | <(3,5),0> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> +(40 rows) + +-- Area less than circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 < c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> +(20 rows) + +-- Area greater than circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 > c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(3,5),0> + <(1,2),100> | <(5,1),3> + <(1,2),100> | <(1,3),5> + <(1,2),100> | <(1,2),3> + <(1,2),100> | <(100,200),10> + <(1,2),100> | <(3,5),0> + <(1,3),5> | <(5,1),3> + <(1,3),5> | <(1,2),3> + <(1,3),5> | <(3,5),0> + <(1,2),3> | <(3,5),0> + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(3,5),0> + <(100,1),115> | <(5,1),3> + <(100,1),115> | <(1,2),100> + <(100,1),115> | <(1,3),5> + <(100,1),115> | <(1,2),3> + <(100,1),115> | <(100,200),10> + <(100,1),115> | <(3,5),0> +(20 rows) + +-- Area less than or equal circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 <= c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(1,2),3> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(5,1),3> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(100,200),10> + <(100,200),10> | <(100,1),115> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(29 rows) + +-- Area greater than or equal circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 >= c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),3> + <(5,1),3> | <(3,5),0> + <(1,2),100> | <(5,1),3> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(1,3),5> + <(1,2),100> | <(1,2),3> + <(1,2),100> | <(100,200),10> + <(1,2),100> | <(3,5),0> + <(1,3),5> | <(5,1),3> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(1,2),3> + <(1,3),5> | <(3,5),0> + <(1,2),3> | <(5,1),3> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(3,5),0> + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(100,200),10> + <(100,200),10> | <(3,5),0> + <(100,1),115> | <(5,1),3> + <(100,1),115> | <(1,2),100> + <(100,1),115> | <(1,3),5> + <(100,1),115> | <(1,2),3> + <(100,1),115> | <(100,200),10> + <(100,1),115> | <(100,1),115> + <(100,1),115> | <(3,5),0> + <(3,5),0> | <(3,5),0> +(29 rows) + +-- Area less than circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 < c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> +(20 rows) + +-- Area greater than circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 < c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> +(20 rows) + +-- Add point +SELECT c.f1, p.f1, c.f1 + p.f1 FROM CIRCLE_TBL c, POINT_TBL p; + f1 | f1 | ?column? +----------------+-------------------+------------------------- + <(5,1),3> | (0,0) | <(5,1),3> + <(1,2),100> | (0,0) | <(1,2),100> + <(1,3),5> | (0,0) | <(1,3),5> + <(1,2),3> | (0,0) | <(1,2),3> + <(100,200),10> | (0,0) | <(100,200),10> + <(100,1),115> | (0,0) | <(100,1),115> + <(3,5),0> | (0,0) | <(3,5),0> + <(3,5),NaN> | (0,0) | <(3,5),NaN> + <(5,1),3> | (-10,0) | <(-5,1),3> + <(1,2),100> | (-10,0) | <(-9,2),100> + <(1,3),5> | (-10,0) | <(-9,3),5> + <(1,2),3> | (-10,0) | <(-9,2),3> + <(100,200),10> | (-10,0) | <(90,200),10> + <(100,1),115> | (-10,0) | <(90,1),115> + <(3,5),0> | (-10,0) | <(-7,5),0> + <(3,5),NaN> | (-10,0) | <(-7,5),NaN> + <(5,1),3> | (-3,4) | <(2,5),3> + <(1,2),100> | (-3,4) | <(-2,6),100> + <(1,3),5> | (-3,4) | <(-2,7),5> + <(1,2),3> | (-3,4) | <(-2,6),3> + <(100,200),10> | (-3,4) | <(97,204),10> + <(100,1),115> | (-3,4) | <(97,5),115> + <(3,5),0> | (-3,4) | <(0,9),0> + <(3,5),NaN> | (-3,4) | <(0,9),NaN> + <(5,1),3> | (5.1,34.5) | <(10.1,35.5),3> + <(1,2),100> | (5.1,34.5) | <(6.1,36.5),100> + <(1,3),5> | (5.1,34.5) | <(6.1,37.5),5> + <(1,2),3> | (5.1,34.5) | <(6.1,36.5),3> + <(100,200),10> | (5.1,34.5) | <(105.1,234.5),10> + <(100,1),115> | (5.1,34.5) | <(105.1,35.5),115> + <(3,5),0> | (5.1,34.5) | <(8.1,39.5),0> + <(3,5),NaN> | (5.1,34.5) | <(8.1,39.5),NaN> + <(5,1),3> | (-5,-12) | <(0,-11),3> + <(1,2),100> | (-5,-12) | <(-4,-10),100> + <(1,3),5> | (-5,-12) | <(-4,-9),5> + <(1,2),3> | (-5,-12) | <(-4,-10),3> + <(100,200),10> | (-5,-12) | <(95,188),10> + <(100,1),115> | (-5,-12) | <(95,-11),115> + <(3,5),0> | (-5,-12) | <(-2,-7),0> + <(3,5),NaN> | (-5,-12) | <(-2,-7),NaN> + <(5,1),3> | (1e-300,-1e-300) | <(5,1),3> + <(1,2),100> | (1e-300,-1e-300) | <(1,2),100> + <(1,3),5> | (1e-300,-1e-300) | <(1,3),5> + <(1,2),3> | (1e-300,-1e-300) | <(1,2),3> + <(100,200),10> | (1e-300,-1e-300) | <(100,200),10> + <(100,1),115> | (1e-300,-1e-300) | <(100,1),115> + <(3,5),0> | (1e-300,-1e-300) | <(3,5),0> + <(3,5),NaN> | (1e-300,-1e-300) | <(3,5),NaN> + <(5,1),3> | (1e+300,Infinity) | <(1e+300,Infinity),3> + <(1,2),100> | (1e+300,Infinity) | <(1e+300,Infinity),100> + <(1,3),5> | (1e+300,Infinity) | <(1e+300,Infinity),5> + <(1,2),3> | (1e+300,Infinity) | <(1e+300,Infinity),3> + <(100,200),10> | (1e+300,Infinity) | <(1e+300,Infinity),10> + <(100,1),115> | (1e+300,Infinity) | <(1e+300,Infinity),115> + <(3,5),0> | (1e+300,Infinity) | <(1e+300,Infinity),0> + <(3,5),NaN> | (1e+300,Infinity) | <(1e+300,Infinity),NaN> + <(5,1),3> | (NaN,NaN) | <(NaN,NaN),3> + <(1,2),100> | (NaN,NaN) | <(NaN,NaN),100> + <(1,3),5> | (NaN,NaN) | <(NaN,NaN),5> + <(1,2),3> | (NaN,NaN) | <(NaN,NaN),3> + <(100,200),10> | (NaN,NaN) | <(NaN,NaN),10> + <(100,1),115> | (NaN,NaN) | <(NaN,NaN),115> + <(3,5),0> | (NaN,NaN) | <(NaN,NaN),0> + <(3,5),NaN> | (NaN,NaN) | <(NaN,NaN),NaN> + <(5,1),3> | (10,10) | <(15,11),3> + <(1,2),100> | (10,10) | <(11,12),100> + <(1,3),5> | (10,10) | <(11,13),5> + <(1,2),3> | (10,10) | <(11,12),3> + <(100,200),10> | (10,10) | <(110,210),10> + <(100,1),115> | (10,10) | <(110,11),115> + <(3,5),0> | (10,10) | <(13,15),0> + <(3,5),NaN> | (10,10) | <(13,15),NaN> +(72 rows) + +-- Subtract point +SELECT c.f1, p.f1, c.f1 - p.f1 FROM CIRCLE_TBL c, POINT_TBL p; + f1 | f1 | ?column? +----------------+-------------------+--------------------------- + <(5,1),3> | (0,0) | <(5,1),3> + <(1,2),100> | (0,0) | <(1,2),100> + <(1,3),5> | (0,0) | <(1,3),5> + <(1,2),3> | (0,0) | <(1,2),3> + <(100,200),10> | (0,0) | <(100,200),10> + <(100,1),115> | (0,0) | <(100,1),115> + <(3,5),0> | (0,0) | <(3,5),0> + <(3,5),NaN> | (0,0) | <(3,5),NaN> + <(5,1),3> | (-10,0) | <(15,1),3> + <(1,2),100> | (-10,0) | <(11,2),100> + <(1,3),5> | (-10,0) | <(11,3),5> + <(1,2),3> | (-10,0) | <(11,2),3> + <(100,200),10> | (-10,0) | <(110,200),10> + <(100,1),115> | (-10,0) | <(110,1),115> + <(3,5),0> | (-10,0) | <(13,5),0> + <(3,5),NaN> | (-10,0) | <(13,5),NaN> + <(5,1),3> | (-3,4) | <(8,-3),3> + <(1,2),100> | (-3,4) | <(4,-2),100> + <(1,3),5> | (-3,4) | <(4,-1),5> + <(1,2),3> | (-3,4) | <(4,-2),3> + <(100,200),10> | (-3,4) | <(103,196),10> + <(100,1),115> | (-3,4) | <(103,-3),115> + <(3,5),0> | (-3,4) | <(6,1),0> + <(3,5),NaN> | (-3,4) | <(6,1),NaN> + <(5,1),3> | (5.1,34.5) | <(-0.1,-33.5),3> + <(1,2),100> | (5.1,34.5) | <(-4.1,-32.5),100> + <(1,3),5> | (5.1,34.5) | <(-4.1,-31.5),5> + <(1,2),3> | (5.1,34.5) | <(-4.1,-32.5),3> + <(100,200),10> | (5.1,34.5) | <(94.9,165.5),10> + <(100,1),115> | (5.1,34.5) | <(94.9,-33.5),115> + <(3,5),0> | (5.1,34.5) | <(-2.1,-29.5),0> + <(3,5),NaN> | (5.1,34.5) | <(-2.1,-29.5),NaN> + <(5,1),3> | (-5,-12) | <(10,13),3> + <(1,2),100> | (-5,-12) | <(6,14),100> + <(1,3),5> | (-5,-12) | <(6,15),5> + <(1,2),3> | (-5,-12) | <(6,14),3> + <(100,200),10> | (-5,-12) | <(105,212),10> + <(100,1),115> | (-5,-12) | <(105,13),115> + <(3,5),0> | (-5,-12) | <(8,17),0> + <(3,5),NaN> | (-5,-12) | <(8,17),NaN> + <(5,1),3> | (1e-300,-1e-300) | <(5,1),3> + <(1,2),100> | (1e-300,-1e-300) | <(1,2),100> + <(1,3),5> | (1e-300,-1e-300) | <(1,3),5> + <(1,2),3> | (1e-300,-1e-300) | <(1,2),3> + <(100,200),10> | (1e-300,-1e-300) | <(100,200),10> + <(100,1),115> | (1e-300,-1e-300) | <(100,1),115> + <(3,5),0> | (1e-300,-1e-300) | <(3,5),0> + <(3,5),NaN> | (1e-300,-1e-300) | <(3,5),NaN> + <(5,1),3> | (1e+300,Infinity) | <(-1e+300,-Infinity),3> + <(1,2),100> | (1e+300,Infinity) | <(-1e+300,-Infinity),100> + <(1,3),5> | (1e+300,Infinity) | <(-1e+300,-Infinity),5> + <(1,2),3> | (1e+300,Infinity) | <(-1e+300,-Infinity),3> + <(100,200),10> | (1e+300,Infinity) | <(-1e+300,-Infinity),10> + <(100,1),115> | (1e+300,Infinity) | <(-1e+300,-Infinity),115> + <(3,5),0> | (1e+300,Infinity) | <(-1e+300,-Infinity),0> + <(3,5),NaN> | (1e+300,Infinity) | <(-1e+300,-Infinity),NaN> + <(5,1),3> | (NaN,NaN) | <(NaN,NaN),3> + <(1,2),100> | (NaN,NaN) | <(NaN,NaN),100> + <(1,3),5> | (NaN,NaN) | <(NaN,NaN),5> + <(1,2),3> | (NaN,NaN) | <(NaN,NaN),3> + <(100,200),10> | (NaN,NaN) | <(NaN,NaN),10> + <(100,1),115> | (NaN,NaN) | <(NaN,NaN),115> + <(3,5),0> | (NaN,NaN) | <(NaN,NaN),0> + <(3,5),NaN> | (NaN,NaN) | <(NaN,NaN),NaN> + <(5,1),3> | (10,10) | <(-5,-9),3> + <(1,2),100> | (10,10) | <(-9,-8),100> + <(1,3),5> | (10,10) | <(-9,-7),5> + <(1,2),3> | (10,10) | <(-9,-8),3> + <(100,200),10> | (10,10) | <(90,190),10> + <(100,1),115> | (10,10) | <(90,-9),115> + <(3,5),0> | (10,10) | <(-7,-5),0> + <(3,5),NaN> | (10,10) | <(-7,-5),NaN> +(72 rows) + +-- Multiply with point +SELECT c.f1, p.f1, c.f1 * p.f1 FROM CIRCLE_TBL c, POINT_TBL p; + f1 | f1 | ?column? +----------------+-------------------+-------------------------------------------- + <(5,1),3> | (0,0) | <(0,0),0> + <(1,2),100> | (0,0) | <(0,0),0> + <(1,3),5> | (0,0) | <(0,0),0> + <(1,2),3> | (0,0) | <(0,0),0> + <(100,200),10> | (0,0) | <(0,0),0> + <(100,1),115> | (0,0) | <(0,0),0> + <(3,5),0> | (0,0) | <(0,0),0> + <(3,5),NaN> | (0,0) | <(0,0),NaN> + <(5,1),3> | (-10,0) | <(-50,-10),30> + <(1,2),100> | (-10,0) | <(-10,-20),1000> + <(1,3),5> | (-10,0) | <(-10,-30),50> + <(1,2),3> | (-10,0) | <(-10,-20),30> + <(100,200),10> | (-10,0) | <(-1000,-2000),100> + <(100,1),115> | (-10,0) | <(-1000,-10),1150> + <(3,5),0> | (-10,0) | <(-30,-50),0> + <(3,5),NaN> | (-10,0) | <(-30,-50),NaN> + <(5,1),3> | (-3,4) | <(-19,17),15> + <(1,2),100> | (-3,4) | <(-11,-2),500> + <(1,3),5> | (-3,4) | <(-15,-5),25> + <(1,2),3> | (-3,4) | <(-11,-2),15> + <(100,200),10> | (-3,4) | <(-1100,-200),50> + <(100,1),115> | (-3,4) | <(-304,397),575> + <(3,5),0> | (-3,4) | <(-29,-3),0> + <(3,5),NaN> | (-3,4) | <(-29,-3),NaN> + <(5,1),3> | (5.1,34.5) | <(-9,177.6),104.624758064> + <(1,2),100> | (5.1,34.5) | <(-63.9,44.7),3487.49193547> + <(1,3),5> | (5.1,34.5) | <(-98.4,49.8),174.374596774> + <(1,2),3> | (5.1,34.5) | <(-63.9,44.7),104.624758064> + <(100,200),10> | (5.1,34.5) | <(-6390,4470),348.749193547> + <(100,1),115> | (5.1,34.5) | <(475.5,3455.1),4010.6157258> + <(3,5),0> | (5.1,34.5) | <(-157.2,129),0> + <(3,5),NaN> | (5.1,34.5) | <(-157.2,129),NaN> + <(5,1),3> | (-5,-12) | <(-13,-65),39> + <(1,2),100> | (-5,-12) | <(19,-22),1300> + <(1,3),5> | (-5,-12) | <(31,-27),65> + <(1,2),3> | (-5,-12) | <(19,-22),39> + <(100,200),10> | (-5,-12) | <(1900,-2200),130> + <(100,1),115> | (-5,-12) | <(-488,-1205),1495> + <(3,5),0> | (-5,-12) | <(45,-61),0> + <(3,5),NaN> | (-5,-12) | <(45,-61),NaN> + <(5,1),3> | (1e-300,-1e-300) | <(6e-300,-4e-300),4.24264068712e-300> + <(1,2),100> | (1e-300,-1e-300) | <(3e-300,1e-300),1.41421356237e-298> + <(1,3),5> | (1e-300,-1e-300) | <(4e-300,2e-300),7.07106781187e-300> + <(1,2),3> | (1e-300,-1e-300) | <(3e-300,1e-300),4.24264068712e-300> + <(100,200),10> | (1e-300,-1e-300) | <(3e-298,1e-298),1.41421356237e-299> + <(100,1),115> | (1e-300,-1e-300) | <(1.01e-298,-9.9e-299),1.62634559673e-298> + <(3,5),0> | (1e-300,-1e-300) | <(8e-300,2e-300),0> + <(3,5),NaN> | (1e-300,-1e-300) | <(8e-300,2e-300),NaN> + <(5,1),3> | (1e+300,Infinity) | <(-Infinity,Infinity),Infinity> + <(1,2),100> | (1e+300,Infinity) | <(-Infinity,Infinity),Infinity> + <(1,3),5> | (1e+300,Infinity) | <(-Infinity,Infinity),Infinity> + <(1,2),3> | (1e+300,Infinity) | <(-Infinity,Infinity),Infinity> + <(100,200),10> | (1e+300,Infinity) | <(-Infinity,Infinity),Infinity> + <(100,1),115> | (1e+300,Infinity) | <(-Infinity,Infinity),Infinity> + <(3,5),0> | (1e+300,Infinity) | <(-Infinity,Infinity),NaN> + <(3,5),NaN> | (1e+300,Infinity) | <(-Infinity,Infinity),NaN> + <(5,1),3> | (NaN,NaN) | <(NaN,NaN),NaN> + <(1,2),100> | (NaN,NaN) | <(NaN,NaN),NaN> + <(1,3),5> | (NaN,NaN) | <(NaN,NaN),NaN> + <(1,2),3> | (NaN,NaN) | <(NaN,NaN),NaN> + <(100,200),10> | (NaN,NaN) | <(NaN,NaN),NaN> + <(100,1),115> | (NaN,NaN) | <(NaN,NaN),NaN> + <(3,5),0> | (NaN,NaN) | <(NaN,NaN),NaN> + <(3,5),NaN> | (NaN,NaN) | <(NaN,NaN),NaN> + <(5,1),3> | (10,10) | <(40,60),42.4264068712> + <(1,2),100> | (10,10) | <(-10,30),1414.21356237> + <(1,3),5> | (10,10) | <(-20,40),70.7106781187> + <(1,2),3> | (10,10) | <(-10,30),42.4264068712> + <(100,200),10> | (10,10) | <(-1000,3000),141.421356237> + <(100,1),115> | (10,10) | <(990,1010),1626.34559673> + <(3,5),0> | (10,10) | <(-20,80),0> + <(3,5),NaN> | (10,10) | <(-20,80),NaN> +(72 rows) + +-- Divide by point +SELECT c.f1, p.f1, c.f1 / p.f1 FROM CIRCLE_TBL c, POINT_TBL p WHERE p.f1[0] BETWEEN 1 AND 1000; + f1 | f1 | ?column? +----------------+------------+------------------------------------------------------ + <(5,1),3> | (5.1,34.5) | <(0.0493315573973,-0.137635045138),0.0860217042937> + <(5,1),3> | (10,10) | <(0.3,-0.2),0.212132034356> + <(1,2),100> | (5.1,34.5) | <(0.0609244733856,-0.0199792807459),2.86739014312> + <(1,2),100> | (10,10) | <(0.15,0.05),7.07106781187> + <(1,3),5> | (5.1,34.5) | <(0.0892901188891,-0.0157860983671),0.143369507156> + <(1,3),5> | (10,10) | <(0.2,0.1),0.353553390593> + <(1,2),3> | (5.1,34.5) | <(0.0609244733856,-0.0199792807459),0.0860217042937> + <(1,2),3> | (10,10) | <(0.15,0.05),0.212132034356> + <(100,200),10> | (5.1,34.5) | <(6.09244733856,-1.99792807459),0.286739014312> + <(100,200),10> | (10,10) | <(15,5),0.707106781187> + <(100,1),115> | (5.1,34.5) | <(0.44768388338,-2.83237136796),3.29749866459> + <(100,1),115> | (10,10) | <(5.05,-4.95),8.13172798365> + <(3,5),0> | (5.1,34.5) | <(0.154407774653,-0.0641310246164),0> + <(3,5),0> | (10,10) | <(0.4,0.1),0> + <(3,5),NaN> | (5.1,34.5) | <(0.154407774653,-0.0641310246164),NaN> + <(3,5),NaN> | (10,10) | <(0.4,0.1),NaN> +(16 rows) + +-- Overflow error +SELECT c.f1, p.f1, c.f1 / p.f1 FROM CIRCLE_TBL c, POINT_TBL p WHERE p.f1[0] > 1000; +ERROR: value out of range: overflow +-- Division by 0 error +SELECT c.f1, p.f1, c.f1 / p.f1 FROM CIRCLE_TBL c, POINT_TBL p WHERE p.f1 ~= '(0,0)'::point; +ERROR: division by zero +-- Distance to polygon +SELECT c.f1, p.f1, c.f1 <-> p.f1 FROM CIRCLE_TBL c, POLYGON_TBL p; + f1 | f1 | ?column? +----------------+----------------------------+---------------- + <(5,1),3> | ((2,0),(2,4),(0,0)) | 0 + <(5,1),3> | ((3,1),(3,3),(1,0)) | 0 + <(5,1),3> | ((1,2),(3,4),(5,6),(7,8)) | 0.535533905933 + <(5,1),3> | ((7,8),(5,6),(3,4),(1,2)) | 0.535533905933 + <(5,1),3> | ((1,2),(7,8),(5,6),(3,-4)) | 0 + <(5,1),3> | ((0,0)) | 2.09901951359 + <(5,1),3> | ((0,1),(0,1)) | 2 + <(1,2),100> | ((2,0),(2,4),(0,0)) | 0 + <(1,2),100> | ((3,1),(3,3),(1,0)) | 0 + <(1,2),100> | ((1,2),(3,4),(5,6),(7,8)) | 0 + <(1,2),100> | ((7,8),(5,6),(3,4),(1,2)) | 0 + <(1,2),100> | ((1,2),(7,8),(5,6),(3,-4)) | 0 + <(1,2),100> | ((0,0)) | 0 + <(1,2),100> | ((0,1),(0,1)) | 0 + <(1,3),5> | ((2,0),(2,4),(0,0)) | 0 + <(1,3),5> | ((3,1),(3,3),(1,0)) | 0 + <(1,3),5> | ((1,2),(3,4),(5,6),(7,8)) | 0 + <(1,3),5> | ((7,8),(5,6),(3,4),(1,2)) | 0 + <(1,3),5> | ((1,2),(7,8),(5,6),(3,-4)) | 0 + <(1,3),5> | ((0,0)) | 0 + <(1,3),5> | ((0,1),(0,1)) | 0 + <(1,2),3> | ((2,0),(2,4),(0,0)) | 0 + <(1,2),3> | ((3,1),(3,3),(1,0)) | 0 + <(1,2),3> | ((1,2),(3,4),(5,6),(7,8)) | 0 + <(1,2),3> | ((7,8),(5,6),(3,4),(1,2)) | 0 + <(1,2),3> | ((1,2),(7,8),(5,6),(3,-4)) | 0 + <(1,2),3> | ((0,0)) | 0 + <(1,2),3> | ((0,1),(0,1)) | 0 + <(100,200),10> | ((2,0),(2,4),(0,0)) | 209.134661795 + <(100,200),10> | ((3,1),(3,3),(1,0)) | 209.585974051 + <(100,200),10> | ((1,2),(3,4),(5,6),(7,8)) | 203.337760371 + <(100,200),10> | ((7,8),(5,6),(3,4),(1,2)) | 203.337760371 + <(100,200),10> | ((1,2),(7,8),(5,6),(3,-4)) | 203.337760371 + <(100,200),10> | ((0,0)) | 213.60679775 + <(100,200),10> | ((0,1),(0,1)) | 212.712819568 + <(100,1),115> | ((2,0),(2,4),(0,0)) | 0 + <(100,1),115> | ((3,1),(3,3),(1,0)) | 0 + <(100,1),115> | ((1,2),(3,4),(5,6),(7,8)) | 0 + <(100,1),115> | ((7,8),(5,6),(3,4),(1,2)) | 0 + <(100,1),115> | ((1,2),(7,8),(5,6),(3,-4)) | 0 + <(100,1),115> | ((0,0)) | 0 + <(100,1),115> | ((0,1),(0,1)) | 0 + <(3,5),0> | ((2,0),(2,4),(0,0)) | 1.41421356237 + <(3,5),0> | ((3,1),(3,3),(1,0)) | 2 + <(3,5),0> | ((1,2),(3,4),(5,6),(7,8)) | 0.707106781187 + <(3,5),0> | ((7,8),(5,6),(3,4),(1,2)) | 0.707106781187 + <(3,5),0> | ((1,2),(7,8),(5,6),(3,-4)) | 0.707106781187 + <(3,5),0> | ((0,0)) | 5.83095189485 + <(3,5),0> | ((0,1),(0,1)) | 5 + <(3,5),NaN> | ((2,0),(2,4),(0,0)) | NaN + <(3,5),NaN> | ((3,1),(3,3),(1,0)) | NaN + <(3,5),NaN> | ((1,2),(3,4),(5,6),(7,8)) | NaN + <(3,5),NaN> | ((7,8),(5,6),(3,4),(1,2)) | NaN + <(3,5),NaN> | ((1,2),(7,8),(5,6),(3,-4)) | NaN + <(3,5),NaN> | ((0,0)) | NaN + <(3,5),NaN> | ((0,1),(0,1)) | NaN +(56 rows) diff --git a/src/test/regress/expected/geometry_1.out b/src/test/regress/expected/geometry_1.out deleted file mode 100644 index fad246c2b9..0000000000 --- a/src/test/regress/expected/geometry_1.out +++ /dev/null @@ -1,563 +0,0 @@ --- --- GEOMETRY --- --- Back off displayed precision a little bit to reduce platform-to-platform --- variation in results. -SET extra_float_digits TO -3; --- --- Points --- -SELECT '' AS four, center(f1) AS center - FROM BOX_TBL; - four | center -------+--------- - | (1,1) - | (2,2) - | (2.5,3) - | (3,3) -(4 rows) - -SELECT '' AS four, (@@ f1) AS center - FROM BOX_TBL; - four | center -------+--------- - | (1,1) - | (2,2) - | (2.5,3) - | (3,3) -(4 rows) - -SELECT '' AS six, point(f1) AS center - FROM CIRCLE_TBL; - six | center ------+----------- - | (5,1) - | (1,2) - | (1,3) - | (1,2) - | (100,200) - | (100,1) -(6 rows) - -SELECT '' AS six, (@@ f1) AS center - FROM CIRCLE_TBL; - six | center ------+----------- - | (5,1) - | (1,2) - | (1,3) - | (1,2) - | (100,200) - | (100,1) -(6 rows) - -SELECT '' AS two, (@@ f1) AS center - FROM POLYGON_TBL - WHERE (# f1) > 2; - two | center ------+------------------------------- - | (1.33333333333,1.33333333333) - | (2.33333333333,1.33333333333) -(2 rows) - --- "is horizontal" function -SELECT '' AS two, p1.f1 - FROM POINT_TBL p1 - WHERE ishorizontal(p1.f1, point '(0,0)'); - two | f1 ------+--------- - | (0,0) - | (-10,0) -(2 rows) - --- "is horizontal" operator -SELECT '' AS two, p1.f1 - FROM POINT_TBL p1 - WHERE p1.f1 ?- point '(0,0)'; - two | f1 ------+--------- - | (0,0) - | (-10,0) -(2 rows) - --- "is vertical" function -SELECT '' AS one, p1.f1 - FROM POINT_TBL p1 - WHERE isvertical(p1.f1, point '(5.1,34.5)'); - one | f1 ------+------------ - | (5.1,34.5) -(1 row) - --- "is vertical" operator -SELECT '' AS one, p1.f1 - FROM POINT_TBL p1 - WHERE p1.f1 ?| point '(5.1,34.5)'; - one | f1 ------+------------ - | (5.1,34.5) -(1 row) - --- --- Line segments --- --- intersection -SELECT '' AS count, p.f1, l.s, l.s # p.f1 AS intersection - FROM LSEG_TBL l, POINT_TBL p; -ERROR: operator does not exist: lseg # point -LINE 1: SELECT '' AS count, p.f1, l.s, l.s # p.f1 AS intersection - ^ -HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. --- closest point -SELECT '' AS thirty, p.f1, l.s, p.f1 ## l.s AS closest - FROM LSEG_TBL l, POINT_TBL p; - thirty | f1 | s | closest ---------+------------+-------------------------------+---------------------------------- - | (0,0) | [(1,2),(3,4)] | (1,2) - | (0,0) | [(0,0),(6,6)] | (0,0) - | (0,0) | [(10,-10),(-3,-4)] | (-2.0487804878,-4.43902439024) - | (0,0) | [(-1000000,200),(300000,-40)] | (0.00284023658959,15.3846148603) - | (0,0) | [(11,22),(33,44)] | (11,22) - | (-10,0) | [(1,2),(3,4)] | (1,2) - | (-10,0) | [(0,0),(6,6)] | (0,0) - | (-10,0) | [(10,-10),(-3,-4)] | (-3,-4) - | (-10,0) | [(-1000000,200),(300000,-40)] | (-9.99715942258,15.386461014) - | (-10,0) | [(11,22),(33,44)] | (11,22) - | (-3,4) | [(1,2),(3,4)] | (1,2) - | (-3,4) | [(0,0),(6,6)] | (0.5,0.5) - | (-3,4) | [(10,-10),(-3,-4)] | (-3,-4) - | (-3,4) | [(-1000000,200),(300000,-40)] | (-2.99789812268,15.3851688427) - | (-3,4) | [(11,22),(33,44)] | (11,22) - | (5.1,34.5) | [(1,2),(3,4)] | (3,4) - | (5.1,34.5) | [(0,0),(6,6)] | (6,6) - | (5.1,34.5) | [(10,-10),(-3,-4)] | (-3,-4) - | (5.1,34.5) | [(-1000000,200),(300000,-40)] | (5.09647083221,15.3836744977) - | (5.1,34.5) | [(11,22),(33,44)] | (14.3,25.3) - | (-5,-12) | [(1,2),(3,4)] | (1,2) - | (-5,-12) | [(0,0),(6,6)] | (0,0) - | (-5,-12) | [(10,-10),(-3,-4)] | (-1.60487804878,-4.64390243902) - | (-5,-12) | [(-1000000,200),(300000,-40)] | (-4.99494420846,15.3855375282) - | (-5,-12) | [(11,22),(33,44)] | (11,22) - | (10,10) | [(1,2),(3,4)] | (3,4) - | (10,10) | [(0,0),(6,6)] | (6,6) - | (10,10) | [(10,-10),(-3,-4)] | (2.39024390244,-6.48780487805) - | (10,10) | [(-1000000,200),(300000,-40)] | (10.000993742,15.3827690473) - | (10,10) | [(11,22),(33,44)] | (11,22) -(30 rows) - --- --- Boxes --- -SELECT '' as six, box(f1) AS box FROM CIRCLE_TBL; - six | box ------+---------------------------------------------------------------- - | (7.12132034356,3.12132034356),(2.87867965644,-1.12132034356) - | (71.7106781187,72.7106781187),(-69.7106781187,-68.7106781187) - | (4.53553390593,6.53553390593),(-2.53553390593,-0.535533905933) - | (3.12132034356,4.12132034356),(-1.12132034356,-0.12132034356) - | (107.071067812,207.071067812),(92.9289321881,192.928932188) - | (181.317279836,82.3172798365),(18.6827201635,-80.3172798365) -(6 rows) - --- translation -SELECT '' AS twentyfour, b.f1 + p.f1 AS translation - FROM BOX_TBL b, POINT_TBL p; - twentyfour | translation -------------+------------------------- - | (2,2),(0,0) - | (3,3),(1,1) - | (2.5,3.5),(2.5,2.5) - | (3,3),(3,3) - | (-8,2),(-10,0) - | (-7,3),(-9,1) - | (-7.5,3.5),(-7.5,2.5) - | (-7,3),(-7,3) - | (-1,6),(-3,4) - | (0,7),(-2,5) - | (-0.5,7.5),(-0.5,6.5) - | (0,7),(0,7) - | (7.1,36.5),(5.1,34.5) - | (8.1,37.5),(6.1,35.5) - | (7.6,38),(7.6,37) - | (8.1,37.5),(8.1,37.5) - | (-3,-10),(-5,-12) - | (-2,-9),(-4,-11) - | (-2.5,-8.5),(-2.5,-9.5) - | (-2,-9),(-2,-9) - | (12,12),(10,10) - | (13,13),(11,11) - | (12.5,13.5),(12.5,12.5) - | (13,13),(13,13) -(24 rows) - -SELECT '' AS twentyfour, b.f1 - p.f1 AS translation - FROM BOX_TBL b, POINT_TBL p; - twentyfour | translation -------------+--------------------------- - | (2,2),(0,0) - | (3,3),(1,1) - | (2.5,3.5),(2.5,2.5) - | (3,3),(3,3) - | (12,2),(10,0) - | (13,3),(11,1) - | (12.5,3.5),(12.5,2.5) - | (13,3),(13,3) - | (5,-2),(3,-4) - | (6,-1),(4,-3) - | (5.5,-0.5),(5.5,-1.5) - | (6,-1),(6,-1) - | (-3.1,-32.5),(-5.1,-34.5) - | (-2.1,-31.5),(-4.1,-33.5) - | (-2.6,-31),(-2.6,-32) - | (-2.1,-31.5),(-2.1,-31.5) - | (7,14),(5,12) - | (8,15),(6,13) - | (7.5,15.5),(7.5,14.5) - | (8,15),(8,15) - | (-8,-8),(-10,-10) - | (-7,-7),(-9,-9) - | (-7.5,-6.5),(-7.5,-7.5) - | (-7,-7),(-7,-7) -(24 rows) - --- scaling and rotation -SELECT '' AS twentyfour, b.f1 * p.f1 AS rotation - FROM BOX_TBL b, POINT_TBL p; - twentyfour | rotation -------------+----------------------------- - | (0,0),(0,0) - | (0,0),(0,0) - | (0,0),(0,0) - | (0,0),(0,0) - | (0,0),(-20,-20) - | (-10,-10),(-30,-30) - | (-25,-25),(-25,-35) - | (-30,-30),(-30,-30) - | (0,2),(-14,0) - | (-7,3),(-21,1) - | (-17.5,2.5),(-21.5,-0.5) - | (-21,3),(-21,3) - | (0,79.2),(-58.8,0) - | (-29.4,118.8),(-88.2,39.6) - | (-73.5,104.1),(-108,99) - | (-88.2,118.8),(-88.2,118.8) - | (14,0),(0,-34) - | (21,-17),(7,-51) - | (29.5,-42.5),(17.5,-47.5) - | (21,-51),(21,-51) - | (0,40),(0,0) - | (0,60),(0,20) - | (0,60),(-10,50) - | (0,60),(0,60) -(24 rows) - -SELECT '' AS twenty, b.f1 / p.f1 AS rotation - FROM BOX_TBL b, POINT_TBL p - WHERE (p.f1 <-> point '(0,0)') >= 1; - twenty | rotation ---------+---------------------------------------------------------------------- - | (0,0),(-0.2,-0.2) - | (0.08,0),(0,-0.56) - | (0.0651176557644,0),(0,-0.0483449262493) - | (0,0.0828402366864),(-0.201183431953,0) - | (0.2,0),(0,0) - | (-0.1,-0.1),(-0.3,-0.3) - | (0.12,-0.28),(0.04,-0.84) - | (0.0976764836466,-0.0241724631247),(0.0325588278822,-0.072517389374) - | (-0.100591715976,0.12426035503),(-0.301775147929,0.0414201183432) - | (0.3,0),(0.1,0) - | (-0.25,-0.25),(-0.25,-0.35) - | (0.26,-0.7),(0.1,-0.82) - | (0.109762715209,-0.0562379754329),(0.0813970697055,-0.0604311578117) - | (-0.251479289941,0.103550295858),(-0.322485207101,0.0739644970414) - | (0.3,0.05),(0.25,0) - | (-0.3,-0.3),(-0.3,-0.3) - | (0.12,-0.84),(0.12,-0.84) - | (0.0976764836466,-0.072517389374),(0.0976764836466,-0.072517389374) - | (-0.301775147929,0.12426035503),(-0.301775147929,0.12426035503) - | (0.3,0),(0.3,0) -(20 rows) - -SELECT f1::box - FROM POINT_TBL; - f1 ------------------------ - (0,0),(0,0) - (-10,0),(-10,0) - (-3,4),(-3,4) - (5.1,34.5),(5.1,34.5) - (-5,-12),(-5,-12) - (10,10),(10,10) -(6 rows) - -SELECT bound_box(a.f1, b.f1) - FROM BOX_TBL a, BOX_TBL b; - bound_box ---------------------- - (2,2),(0,0) - (3,3),(0,0) - (2.5,3.5),(0,0) - (3,3),(0,0) - (3,3),(0,0) - (3,3),(1,1) - (3,3.5),(1,1) - (3,3),(1,1) - (2.5,3.5),(0,0) - (3,3.5),(1,1) - (2.5,3.5),(2.5,2.5) - (3,3.5),(2.5,2.5) - (3,3),(0,0) - (3,3),(1,1) - (3,3.5),(2.5,2.5) - (3,3),(3,3) -(16 rows) - --- --- Paths --- -SELECT '' AS eight, npoints(f1) AS npoints, f1 AS path FROM PATH_TBL; - eight | npoints | path --------+---------+--------------------------- - | 2 | [(1,2),(3,4)] - | 2 | ((1,2),(3,4)) - | 4 | [(0,0),(3,0),(4,5),(1,6)] - | 2 | ((1,2),(3,4)) - | 2 | ((1,2),(3,4)) - | 2 | [(1,2),(3,4)] - | 2 | [(11,12),(13,14)] - | 2 | ((11,12),(13,14)) -(8 rows) - -SELECT '' AS four, path(f1) FROM POLYGON_TBL; - four | path -------+--------------------- - | ((2,0),(2,4),(0,0)) - | ((3,1),(3,3),(1,0)) - | ((0,0)) - | ((0,1),(0,1)) -(4 rows) - --- translation -SELECT '' AS eight, p1.f1 + point '(10,10)' AS dist_add - FROM PATH_TBL p1; - eight | dist_add --------+----------------------------------- - | [(11,12),(13,14)] - | ((11,12),(13,14)) - | [(10,10),(13,10),(14,15),(11,16)] - | ((11,12),(13,14)) - | ((11,12),(13,14)) - | [(11,12),(13,14)] - | [(21,22),(23,24)] - | ((21,22),(23,24)) -(8 rows) - --- scaling and rotation -SELECT '' AS eight, p1.f1 * point '(2,-1)' AS dist_mul - FROM PATH_TBL p1; - eight | dist_mul --------+------------------------------ - | [(4,3),(10,5)] - | ((4,3),(10,5)) - | [(0,0),(6,-3),(13,6),(8,11)] - | ((4,3),(10,5)) - | ((4,3),(10,5)) - | [(4,3),(10,5)] - | [(34,13),(40,15)] - | ((34,13),(40,15)) -(8 rows) - --- --- Polygons --- --- containment -SELECT '' AS twentyfour, p.f1, poly.f1, poly.f1 @> p.f1 AS contains - FROM POLYGON_TBL poly, POINT_TBL p; - twentyfour | f1 | f1 | contains -------------+------------+---------------------+---------- - | (0,0) | ((2,0),(2,4),(0,0)) | t - | (0,0) | ((3,1),(3,3),(1,0)) | f - | (0,0) | ((0,0)) | t - | (0,0) | ((0,1),(0,1)) | f - | (-10,0) | ((2,0),(2,4),(0,0)) | f - | (-10,0) | ((3,1),(3,3),(1,0)) | f - | (-10,0) | ((0,0)) | f - | (-10,0) | ((0,1),(0,1)) | f - | (-3,4) | ((2,0),(2,4),(0,0)) | f - | (-3,4) | ((3,1),(3,3),(1,0)) | f - | (-3,4) | ((0,0)) | f - | (-3,4) | ((0,1),(0,1)) | f - | (5.1,34.5) | ((2,0),(2,4),(0,0)) | f - | (5.1,34.5) | ((3,1),(3,3),(1,0)) | f - | (5.1,34.5) | ((0,0)) | f - | (5.1,34.5) | ((0,1),(0,1)) | f - | (-5,-12) | ((2,0),(2,4),(0,0)) | f - | (-5,-12) | ((3,1),(3,3),(1,0)) | f - | (-5,-12) | ((0,0)) | f - | (-5,-12) | ((0,1),(0,1)) | f - | (10,10) | ((2,0),(2,4),(0,0)) | f - | (10,10) | ((3,1),(3,3),(1,0)) | f - | (10,10) | ((0,0)) | f - | (10,10) | ((0,1),(0,1)) | f -(24 rows) - -SELECT '' AS twentyfour, p.f1, poly.f1, p.f1 <@ poly.f1 AS contained - FROM POLYGON_TBL poly, POINT_TBL p; - twentyfour | f1 | f1 | contained -------------+------------+---------------------+----------- - | (0,0) | ((2,0),(2,4),(0,0)) | t - | (0,0) | ((3,1),(3,3),(1,0)) | f - | (0,0) | ((0,0)) | t - | (0,0) | ((0,1),(0,1)) | f - | (-10,0) | ((2,0),(2,4),(0,0)) | f - | (-10,0) | ((3,1),(3,3),(1,0)) | f - | (-10,0) | ((0,0)) | f - | (-10,0) | ((0,1),(0,1)) | f - | (-3,4) | ((2,0),(2,4),(0,0)) | f - | (-3,4) | ((3,1),(3,3),(1,0)) | f - | (-3,4) | ((0,0)) | f - | (-3,4) | ((0,1),(0,1)) | f - | (5.1,34.5) | ((2,0),(2,4),(0,0)) | f - | (5.1,34.5) | ((3,1),(3,3),(1,0)) | f - | (5.1,34.5) | ((0,0)) | f - | (5.1,34.5) | ((0,1),(0,1)) | f - | (-5,-12) | ((2,0),(2,4),(0,0)) | f - | (-5,-12) | ((3,1),(3,3),(1,0)) | f - | (-5,-12) | ((0,0)) | f - | (-5,-12) | ((0,1),(0,1)) | f - | (10,10) | ((2,0),(2,4),(0,0)) | f - | (10,10) | ((3,1),(3,3),(1,0)) | f - | (10,10) | ((0,0)) | f - | (10,10) | ((0,1),(0,1)) | f -(24 rows) - -SELECT '' AS four, npoints(f1) AS npoints, f1 AS polygon - FROM POLYGON_TBL; - four | npoints | polygon -------+---------+--------------------- - | 3 | ((2,0),(2,4),(0,0)) - | 3 | ((3,1),(3,3),(1,0)) - | 1 | ((0,0)) - | 2 | ((0,1),(0,1)) -(4 rows) - -SELECT '' AS four, polygon(f1) - FROM BOX_TBL; - four | polygon -------+------------------------------------------- - | ((0,0),(0,2),(2,2),(2,0)) - | ((1,1),(1,3),(3,3),(3,1)) - | ((2.5,2.5),(2.5,3.5),(2.5,3.5),(2.5,2.5)) - | ((3,3),(3,3),(3,3),(3,3)) -(4 rows) - -SELECT '' AS four, polygon(f1) - FROM PATH_TBL WHERE isclosed(f1); - four | polygon -------+------------------- - | ((1,2),(3,4)) - | ((1,2),(3,4)) - | ((1,2),(3,4)) - | ((11,12),(13,14)) -(4 rows) - -SELECT '' AS four, f1 AS open_path, polygon( pclose(f1)) AS polygon - FROM PATH_TBL - WHERE isopen(f1); - four | open_path | polygon -------+---------------------------+--------------------------- - | [(1,2),(3,4)] | ((1,2),(3,4)) - | [(0,0),(3,0),(4,5),(1,6)] | ((0,0),(3,0),(4,5),(1,6)) - | [(1,2),(3,4)] | ((1,2),(3,4)) - | [(11,12),(13,14)] | ((11,12),(13,14)) -(4 rows) - --- convert circles to polygons using the default number of points -SELECT '' AS six, polygon(f1) - FROM CIRCLE_TBL; - six | polygon ------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - | ((2,1),(2.40192378865,2.5),(3.5,3.59807621135),(5,4),(6.5,3.59807621135),(7.59807621135,2.5),(8,1),(7.59807621135,-0.5),(6.5,-1.59807621135),(5,-2),(3.5,-1.59807621135),(2.40192378865,-0.5)) - | ((-99,2),(-85.6025403784,52),(-49,88.6025403784),(1,102),(51,88.6025403784),(87.6025403784,52),(101,2),(87.6025403784,-48),(51,-84.6025403784),(1,-98),(-49,-84.6025403784),(-85.6025403784,-48)) - | ((-4,3),(-3.33012701892,5.5),(-1.5,7.33012701892),(1,8),(3.5,7.33012701892),(5.33012701892,5.5),(6,3),(5.33012701892,0.5),(3.5,-1.33012701892),(1,-2),(-1.5,-1.33012701892),(-3.33012701892,0.5)) - | ((-2,2),(-1.59807621135,3.5),(-0.5,4.59807621135),(1,5),(2.5,4.59807621135),(3.59807621135,3.5),(4,2),(3.59807621135,0.5),(2.5,-0.598076211353),(1,-1),(-0.5,-0.598076211353),(-1.59807621135,0.5)) - | ((90,200),(91.3397459622,205),(95,208.660254038),(100,210),(105,208.660254038),(108.660254038,205),(110,200),(108.660254038,195),(105,191.339745962),(100,190),(95,191.339745962),(91.3397459622,195)) - | ((-15,1),(0.40707856479,58.5),(42.5,100.592921435),(100,116),(157.5,100.592921435),(199.592921435,58.5),(215,1),(199.592921435,-56.5),(157.5,-98.5929214352),(100,-114),(42.5,-98.5929214352),(0.40707856479,-56.5)) -(6 rows) - --- convert the circle to an 8-point polygon -SELECT '' AS six, polygon(8, f1) - FROM CIRCLE_TBL; - six | polygon ------+------------------------------------------------------------------------------------------------------------------------------------------------------------------ - | ((2,1),(2.87867965644,3.12132034356),(5,4),(7.12132034356,3.12132034356),(8,1),(7.12132034356,-1.12132034356),(5,-2),(2.87867965644,-1.12132034356)) - | ((-99,2),(-69.7106781187,72.7106781187),(1,102),(71.7106781187,72.7106781187),(101,2),(71.7106781187,-68.7106781187),(1,-98),(-69.7106781187,-68.7106781187)) - | ((-4,3),(-2.53553390593,6.53553390593),(1,8),(4.53553390593,6.53553390593),(6,3),(4.53553390593,-0.535533905933),(1,-2),(-2.53553390593,-0.535533905933)) - | ((-2,2),(-1.12132034356,4.12132034356),(1,5),(3.12132034356,4.12132034356),(4,2),(3.12132034356,-0.12132034356),(1,-1),(-1.12132034356,-0.12132034356)) - | ((90,200),(92.9289321881,207.071067812),(100,210),(107.071067812,207.071067812),(110,200),(107.071067812,192.928932188),(100,190),(92.9289321881,192.928932188)) - | ((-15,1),(18.6827201635,82.3172798365),(100,116),(181.317279836,82.3172798365),(215,1),(181.317279836,-80.3172798365),(100,-114),(18.6827201635,-80.3172798365)) -(6 rows) - --- --- Circles --- -SELECT '' AS six, circle(f1, 50.0) - FROM POINT_TBL; - six | circle ------+----------------- - | <(0,0),50> - | <(-10,0),50> - | <(-3,4),50> - | <(5.1,34.5),50> - | <(-5,-12),50> - | <(10,10),50> -(6 rows) - -SELECT '' AS four, circle(f1) - FROM BOX_TBL; - four | circle -------+----------------------- - | <(1,1),1.41421356237> - | <(2,2),1.41421356237> - | <(2.5,3),0.5> - | <(3,3),0> -(4 rows) - -SELECT '' AS two, circle(f1) - FROM POLYGON_TBL - WHERE (# f1) >= 3; - two | circle ------+----------------------------------------------- - | <(1.33333333333,1.33333333333),2.04168905064> - | <(2.33333333333,1.33333333333),1.47534300379> -(2 rows) - -SELECT '' AS twentyfour, c1.f1 AS circle, p1.f1 AS point, (p1.f1 <-> c1.f1) AS distance - FROM CIRCLE_TBL c1, POINT_TBL p1 - WHERE (p1.f1 <-> c1.f1) > 0 - ORDER BY distance, area(c1.f1), p1.f1[0]; - twentyfour | circle | point | distance -------------+----------------+------------+--------------- - | <(1,2),3> | (-3,4) | 1.472135955 - | <(5,1),3> | (0,0) | 2.09901951359 - | <(5,1),3> | (-3,4) | 5.54400374532 - | <(1,3),5> | (-10,0) | 6.40175425099 - | <(1,3),5> | (10,10) | 6.40175425099 - | <(5,1),3> | (10,10) | 7.29563014099 - | <(1,2),3> | (-10,0) | 8.1803398875 - | <(1,2),3> | (10,10) | 9.04159457879 - | <(1,3),5> | (-5,-12) | 11.1554944214 - | <(5,1),3> | (-10,0) | 12.0332963784 - | <(1,2),3> | (-5,-12) | 12.2315462117 - | <(5,1),3> | (-5,-12) | 13.4012194669 - | <(1,3),5> | (5.1,34.5) | 26.7657047773 - | <(1,2),3> | (5.1,34.5) | 29.7575945393 - | <(5,1),3> | (5.1,34.5) | 30.5001492534 - | <(100,200),10> | (5.1,34.5) | 180.778038568 - | <(100,200),10> | (10,10) | 200.237960416 - | <(100,200),10> | (-3,4) | 211.415898255 - | <(100,200),10> | (0,0) | 213.60679775 - | <(100,200),10> | (-10,0) | 218.25424421 - | <(100,200),10> | (-5,-12) | 226.577682802 -(21 rows) - diff --git a/src/test/regress/expected/geometry_2.out b/src/test/regress/expected/geometry_2.out deleted file mode 100644 index c938e66418..0000000000 --- a/src/test/regress/expected/geometry_2.out +++ /dev/null @@ -1,563 +0,0 @@ --- --- GEOMETRY --- --- Back off displayed precision a little bit to reduce platform-to-platform --- variation in results. -SET extra_float_digits TO -3; --- --- Points --- -SELECT '' AS four, center(f1) AS center - FROM BOX_TBL; - four | center -------+--------- - | (1,1) - | (2,2) - | (2.5,3) - | (3,3) -(4 rows) - -SELECT '' AS four, (@@ f1) AS center - FROM BOX_TBL; - four | center -------+--------- - | (1,1) - | (2,2) - | (2.5,3) - | (3,3) -(4 rows) - -SELECT '' AS six, point(f1) AS center - FROM CIRCLE_TBL; - six | center ------+----------- - | (5,1) - | (1,2) - | (1,3) - | (1,2) - | (100,200) - | (100,1) -(6 rows) - -SELECT '' AS six, (@@ f1) AS center - FROM CIRCLE_TBL; - six | center ------+----------- - | (5,1) - | (1,2) - | (1,3) - | (1,2) - | (100,200) - | (100,1) -(6 rows) - -SELECT '' AS two, (@@ f1) AS center - FROM POLYGON_TBL - WHERE (# f1) > 2; - two | center ------+------------------------------- - | (1.33333333333,1.33333333333) - | (2.33333333333,1.33333333333) -(2 rows) - --- "is horizontal" function -SELECT '' AS two, p1.f1 - FROM POINT_TBL p1 - WHERE ishorizontal(p1.f1, point '(0,0)'); - two | f1 ------+--------- - | (0,0) - | (-10,0) -(2 rows) - --- "is horizontal" operator -SELECT '' AS two, p1.f1 - FROM POINT_TBL p1 - WHERE p1.f1 ?- point '(0,0)'; - two | f1 ------+--------- - | (0,0) - | (-10,0) -(2 rows) - --- "is vertical" function -SELECT '' AS one, p1.f1 - FROM POINT_TBL p1 - WHERE isvertical(p1.f1, point '(5.1,34.5)'); - one | f1 ------+------------ - | (5.1,34.5) -(1 row) - --- "is vertical" operator -SELECT '' AS one, p1.f1 - FROM POINT_TBL p1 - WHERE p1.f1 ?| point '(5.1,34.5)'; - one | f1 ------+------------ - | (5.1,34.5) -(1 row) - --- --- Line segments --- --- intersection -SELECT '' AS count, p.f1, l.s, l.s # p.f1 AS intersection - FROM LSEG_TBL l, POINT_TBL p; -ERROR: operator does not exist: lseg # point -LINE 1: SELECT '' AS count, p.f1, l.s, l.s # p.f1 AS intersection - ^ -HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. --- closest point -SELECT '' AS thirty, p.f1, l.s, p.f1 ## l.s AS closest - FROM LSEG_TBL l, POINT_TBL p; - thirty | f1 | s | closest ---------+------------+-------------------------------+---------------------------------- - | (0,0) | [(1,2),(3,4)] | (1,2) - | (0,0) | [(0,0),(6,6)] | (0,0) - | (0,0) | [(10,-10),(-3,-4)] | (-2.0487804878,-4.43902439024) - | (0,0) | [(-1000000,200),(300000,-40)] | (0.00284023658959,15.3846148603) - | (0,0) | [(11,22),(33,44)] | (11,22) - | (-10,0) | [(1,2),(3,4)] | (1,2) - | (-10,0) | [(0,0),(6,6)] | (0,0) - | (-10,0) | [(10,-10),(-3,-4)] | (-3,-4) - | (-10,0) | [(-1000000,200),(300000,-40)] | (-9.99715942258,15.386461014) - | (-10,0) | [(11,22),(33,44)] | (11,22) - | (-3,4) | [(1,2),(3,4)] | (1,2) - | (-3,4) | [(0,0),(6,6)] | (0.5,0.5) - | (-3,4) | [(10,-10),(-3,-4)] | (-3,-4) - | (-3,4) | [(-1000000,200),(300000,-40)] | (-2.99789812268,15.3851688427) - | (-3,4) | [(11,22),(33,44)] | (11,22) - | (5.1,34.5) | [(1,2),(3,4)] | (3,4) - | (5.1,34.5) | [(0,0),(6,6)] | (6,6) - | (5.1,34.5) | [(10,-10),(-3,-4)] | (-3,-4) - | (5.1,34.5) | [(-1000000,200),(300000,-40)] | (5.09647083221,15.3836744977) - | (5.1,34.5) | [(11,22),(33,44)] | (14.3,25.3) - | (-5,-12) | [(1,2),(3,4)] | (1,2) - | (-5,-12) | [(0,0),(6,6)] | (0,0) - | (-5,-12) | [(10,-10),(-3,-4)] | (-1.60487804878,-4.64390243902) - | (-5,-12) | [(-1000000,200),(300000,-40)] | (-4.99494420846,15.3855375282) - | (-5,-12) | [(11,22),(33,44)] | (11,22) - | (10,10) | [(1,2),(3,4)] | (3,4) - | (10,10) | [(0,0),(6,6)] | (6,6) - | (10,10) | [(10,-10),(-3,-4)] | (2.39024390244,-6.48780487805) - | (10,10) | [(-1000000,200),(300000,-40)] | (10.000993742,15.3827690473) - | (10,10) | [(11,22),(33,44)] | (11,22) -(30 rows) - --- --- Boxes --- -SELECT '' as six, box(f1) AS box FROM CIRCLE_TBL; - six | box ------+---------------------------------------------------------------- - | (7.12132034356,3.12132034356),(2.87867965644,-1.12132034356) - | (71.7106781187,72.7106781187),(-69.7106781187,-68.7106781187) - | (4.53553390593,6.53553390593),(-2.53553390593,-0.535533905933) - | (3.12132034356,4.12132034356),(-1.12132034356,-0.12132034356) - | (107.071067812,207.071067812),(92.9289321881,192.928932188) - | (181.317279836,82.3172798365),(18.6827201635,-80.3172798365) -(6 rows) - --- translation -SELECT '' AS twentyfour, b.f1 + p.f1 AS translation - FROM BOX_TBL b, POINT_TBL p; - twentyfour | translation -------------+------------------------- - | (2,2),(0,0) - | (3,3),(1,1) - | (2.5,3.5),(2.5,2.5) - | (3,3),(3,3) - | (-8,2),(-10,0) - | (-7,3),(-9,1) - | (-7.5,3.5),(-7.5,2.5) - | (-7,3),(-7,3) - | (-1,6),(-3,4) - | (0,7),(-2,5) - | (-0.5,7.5),(-0.5,6.5) - | (0,7),(0,7) - | (7.1,36.5),(5.1,34.5) - | (8.1,37.5),(6.1,35.5) - | (7.6,38),(7.6,37) - | (8.1,37.5),(8.1,37.5) - | (-3,-10),(-5,-12) - | (-2,-9),(-4,-11) - | (-2.5,-8.5),(-2.5,-9.5) - | (-2,-9),(-2,-9) - | (12,12),(10,10) - | (13,13),(11,11) - | (12.5,13.5),(12.5,12.5) - | (13,13),(13,13) -(24 rows) - -SELECT '' AS twentyfour, b.f1 - p.f1 AS translation - FROM BOX_TBL b, POINT_TBL p; - twentyfour | translation -------------+--------------------------- - | (2,2),(0,0) - | (3,3),(1,1) - | (2.5,3.5),(2.5,2.5) - | (3,3),(3,3) - | (12,2),(10,0) - | (13,3),(11,1) - | (12.5,3.5),(12.5,2.5) - | (13,3),(13,3) - | (5,-2),(3,-4) - | (6,-1),(4,-3) - | (5.5,-0.5),(5.5,-1.5) - | (6,-1),(6,-1) - | (-3.1,-32.5),(-5.1,-34.5) - | (-2.1,-31.5),(-4.1,-33.5) - | (-2.6,-31),(-2.6,-32) - | (-2.1,-31.5),(-2.1,-31.5) - | (7,14),(5,12) - | (8,15),(6,13) - | (7.5,15.5),(7.5,14.5) - | (8,15),(8,15) - | (-8,-8),(-10,-10) - | (-7,-7),(-9,-9) - | (-7.5,-6.5),(-7.5,-7.5) - | (-7,-7),(-7,-7) -(24 rows) - --- scaling and rotation -SELECT '' AS twentyfour, b.f1 * p.f1 AS rotation - FROM BOX_TBL b, POINT_TBL p; - twentyfour | rotation -------------+----------------------------- - | (0,0),(0,0) - | (0,0),(0,0) - | (0,0),(0,0) - | (0,0),(0,0) - | (-0,0),(-20,-20) - | (-10,-10),(-30,-30) - | (-25,-25),(-25,-35) - | (-30,-30),(-30,-30) - | (-0,2),(-14,0) - | (-7,3),(-21,1) - | (-17.5,2.5),(-21.5,-0.5) - | (-21,3),(-21,3) - | (0,79.2),(-58.8,0) - | (-29.4,118.8),(-88.2,39.6) - | (-73.5,104.1),(-108,99) - | (-88.2,118.8),(-88.2,118.8) - | (14,-0),(0,-34) - | (21,-17),(7,-51) - | (29.5,-42.5),(17.5,-47.5) - | (21,-51),(21,-51) - | (0,40),(0,0) - | (0,60),(0,20) - | (0,60),(-10,50) - | (0,60),(0,60) -(24 rows) - -SELECT '' AS twenty, b.f1 / p.f1 AS rotation - FROM BOX_TBL b, POINT_TBL p - WHERE (p.f1 <-> point '(0,0)') >= 1; - twenty | rotation ---------+---------------------------------------------------------------------- - | (0,-0),(-0.2,-0.2) - | (0.08,-0),(0,-0.56) - | (0.0651176557644,0),(0,-0.0483449262493) - | (-0,0.0828402366864),(-0.201183431953,0) - | (0.2,0),(0,0) - | (-0.1,-0.1),(-0.3,-0.3) - | (0.12,-0.28),(0.04,-0.84) - | (0.0976764836466,-0.0241724631247),(0.0325588278822,-0.072517389374) - | (-0.100591715976,0.12426035503),(-0.301775147929,0.0414201183432) - | (0.3,0),(0.1,0) - | (-0.25,-0.25),(-0.25,-0.35) - | (0.26,-0.7),(0.1,-0.82) - | (0.109762715209,-0.0562379754329),(0.0813970697055,-0.0604311578117) - | (-0.251479289941,0.103550295858),(-0.322485207101,0.0739644970414) - | (0.3,0.05),(0.25,0) - | (-0.3,-0.3),(-0.3,-0.3) - | (0.12,-0.84),(0.12,-0.84) - | (0.0976764836466,-0.072517389374),(0.0976764836466,-0.072517389374) - | (-0.301775147929,0.12426035503),(-0.301775147929,0.12426035503) - | (0.3,0),(0.3,0) -(20 rows) - -SELECT f1::box - FROM POINT_TBL; - f1 ------------------------ - (0,0),(0,0) - (-10,0),(-10,0) - (-3,4),(-3,4) - (5.1,34.5),(5.1,34.5) - (-5,-12),(-5,-12) - (10,10),(10,10) -(6 rows) - -SELECT bound_box(a.f1, b.f1) - FROM BOX_TBL a, BOX_TBL b; - bound_box ---------------------- - (2,2),(0,0) - (3,3),(0,0) - (2.5,3.5),(0,0) - (3,3),(0,0) - (3,3),(0,0) - (3,3),(1,1) - (3,3.5),(1,1) - (3,3),(1,1) - (2.5,3.5),(0,0) - (3,3.5),(1,1) - (2.5,3.5),(2.5,2.5) - (3,3.5),(2.5,2.5) - (3,3),(0,0) - (3,3),(1,1) - (3,3.5),(2.5,2.5) - (3,3),(3,3) -(16 rows) - --- --- Paths --- -SELECT '' AS eight, npoints(f1) AS npoints, f1 AS path FROM PATH_TBL; - eight | npoints | path --------+---------+--------------------------- - | 2 | [(1,2),(3,4)] - | 2 | ((1,2),(3,4)) - | 4 | [(0,0),(3,0),(4,5),(1,6)] - | 2 | ((1,2),(3,4)) - | 2 | ((1,2),(3,4)) - | 2 | [(1,2),(3,4)] - | 2 | [(11,12),(13,14)] - | 2 | ((11,12),(13,14)) -(8 rows) - -SELECT '' AS four, path(f1) FROM POLYGON_TBL; - four | path -------+--------------------- - | ((2,0),(2,4),(0,0)) - | ((3,1),(3,3),(1,0)) - | ((0,0)) - | ((0,1),(0,1)) -(4 rows) - --- translation -SELECT '' AS eight, p1.f1 + point '(10,10)' AS dist_add - FROM PATH_TBL p1; - eight | dist_add --------+----------------------------------- - | [(11,12),(13,14)] - | ((11,12),(13,14)) - | [(10,10),(13,10),(14,15),(11,16)] - | ((11,12),(13,14)) - | ((11,12),(13,14)) - | [(11,12),(13,14)] - | [(21,22),(23,24)] - | ((21,22),(23,24)) -(8 rows) - --- scaling and rotation -SELECT '' AS eight, p1.f1 * point '(2,-1)' AS dist_mul - FROM PATH_TBL p1; - eight | dist_mul --------+------------------------------ - | [(4,3),(10,5)] - | ((4,3),(10,5)) - | [(0,0),(6,-3),(13,6),(8,11)] - | ((4,3),(10,5)) - | ((4,3),(10,5)) - | [(4,3),(10,5)] - | [(34,13),(40,15)] - | ((34,13),(40,15)) -(8 rows) - --- --- Polygons --- --- containment -SELECT '' AS twentyfour, p.f1, poly.f1, poly.f1 @> p.f1 AS contains - FROM POLYGON_TBL poly, POINT_TBL p; - twentyfour | f1 | f1 | contains -------------+------------+---------------------+---------- - | (0,0) | ((2,0),(2,4),(0,0)) | t - | (0,0) | ((3,1),(3,3),(1,0)) | f - | (0,0) | ((0,0)) | t - | (0,0) | ((0,1),(0,1)) | f - | (-10,0) | ((2,0),(2,4),(0,0)) | f - | (-10,0) | ((3,1),(3,3),(1,0)) | f - | (-10,0) | ((0,0)) | f - | (-10,0) | ((0,1),(0,1)) | f - | (-3,4) | ((2,0),(2,4),(0,0)) | f - | (-3,4) | ((3,1),(3,3),(1,0)) | f - | (-3,4) | ((0,0)) | f - | (-3,4) | ((0,1),(0,1)) | f - | (5.1,34.5) | ((2,0),(2,4),(0,0)) | f - | (5.1,34.5) | ((3,1),(3,3),(1,0)) | f - | (5.1,34.5) | ((0,0)) | f - | (5.1,34.5) | ((0,1),(0,1)) | f - | (-5,-12) | ((2,0),(2,4),(0,0)) | f - | (-5,-12) | ((3,1),(3,3),(1,0)) | f - | (-5,-12) | ((0,0)) | f - | (-5,-12) | ((0,1),(0,1)) | f - | (10,10) | ((2,0),(2,4),(0,0)) | f - | (10,10) | ((3,1),(3,3),(1,0)) | f - | (10,10) | ((0,0)) | f - | (10,10) | ((0,1),(0,1)) | f -(24 rows) - -SELECT '' AS twentyfour, p.f1, poly.f1, p.f1 <@ poly.f1 AS contained - FROM POLYGON_TBL poly, POINT_TBL p; - twentyfour | f1 | f1 | contained -------------+------------+---------------------+----------- - | (0,0) | ((2,0),(2,4),(0,0)) | t - | (0,0) | ((3,1),(3,3),(1,0)) | f - | (0,0) | ((0,0)) | t - | (0,0) | ((0,1),(0,1)) | f - | (-10,0) | ((2,0),(2,4),(0,0)) | f - | (-10,0) | ((3,1),(3,3),(1,0)) | f - | (-10,0) | ((0,0)) | f - | (-10,0) | ((0,1),(0,1)) | f - | (-3,4) | ((2,0),(2,4),(0,0)) | f - | (-3,4) | ((3,1),(3,3),(1,0)) | f - | (-3,4) | ((0,0)) | f - | (-3,4) | ((0,1),(0,1)) | f - | (5.1,34.5) | ((2,0),(2,4),(0,0)) | f - | (5.1,34.5) | ((3,1),(3,3),(1,0)) | f - | (5.1,34.5) | ((0,0)) | f - | (5.1,34.5) | ((0,1),(0,1)) | f - | (-5,-12) | ((2,0),(2,4),(0,0)) | f - | (-5,-12) | ((3,1),(3,3),(1,0)) | f - | (-5,-12) | ((0,0)) | f - | (-5,-12) | ((0,1),(0,1)) | f - | (10,10) | ((2,0),(2,4),(0,0)) | f - | (10,10) | ((3,1),(3,3),(1,0)) | f - | (10,10) | ((0,0)) | f - | (10,10) | ((0,1),(0,1)) | f -(24 rows) - -SELECT '' AS four, npoints(f1) AS npoints, f1 AS polygon - FROM POLYGON_TBL; - four | npoints | polygon -------+---------+--------------------- - | 3 | ((2,0),(2,4),(0,0)) - | 3 | ((3,1),(3,3),(1,0)) - | 1 | ((0,0)) - | 2 | ((0,1),(0,1)) -(4 rows) - -SELECT '' AS four, polygon(f1) - FROM BOX_TBL; - four | polygon -------+------------------------------------------- - | ((0,0),(0,2),(2,2),(2,0)) - | ((1,1),(1,3),(3,3),(3,1)) - | ((2.5,2.5),(2.5,3.5),(2.5,3.5),(2.5,2.5)) - | ((3,3),(3,3),(3,3),(3,3)) -(4 rows) - -SELECT '' AS four, polygon(f1) - FROM PATH_TBL WHERE isclosed(f1); - four | polygon -------+------------------- - | ((1,2),(3,4)) - | ((1,2),(3,4)) - | ((1,2),(3,4)) - | ((11,12),(13,14)) -(4 rows) - -SELECT '' AS four, f1 AS open_path, polygon( pclose(f1)) AS polygon - FROM PATH_TBL - WHERE isopen(f1); - four | open_path | polygon -------+---------------------------+--------------------------- - | [(1,2),(3,4)] | ((1,2),(3,4)) - | [(0,0),(3,0),(4,5),(1,6)] | ((0,0),(3,0),(4,5),(1,6)) - | [(1,2),(3,4)] | ((1,2),(3,4)) - | [(11,12),(13,14)] | ((11,12),(13,14)) -(4 rows) - --- convert circles to polygons using the default number of points -SELECT '' AS six, polygon(f1) - FROM CIRCLE_TBL; - six | polygon ------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - | ((2,1),(2.40192378865,2.5),(3.5,3.59807621135),(5,4),(6.5,3.59807621135),(7.59807621135,2.5),(8,1),(7.59807621135,-0.5),(6.5,-1.59807621135),(5,-2),(3.5,-1.59807621135),(2.40192378865,-0.5)) - | ((-99,2),(-85.6025403784,52),(-49,88.6025403784),(1,102),(51,88.6025403784),(87.6025403784,52),(101,2),(87.6025403784,-48),(51,-84.6025403784),(1,-98),(-49,-84.6025403784),(-85.6025403784,-48)) - | ((-4,3),(-3.33012701892,5.5),(-1.5,7.33012701892),(1,8),(3.5,7.33012701892),(5.33012701892,5.5),(6,3),(5.33012701892,0.5),(3.5,-1.33012701892),(1,-2),(-1.5,-1.33012701892),(-3.33012701892,0.5)) - | ((-2,2),(-1.59807621135,3.5),(-0.5,4.59807621135),(1,5),(2.5,4.59807621135),(3.59807621135,3.5),(4,2),(3.59807621135,0.5),(2.5,-0.598076211353),(1,-1),(-0.5,-0.598076211353),(-1.59807621135,0.5)) - | ((90,200),(91.3397459622,205),(95,208.660254038),(100,210),(105,208.660254038),(108.660254038,205),(110,200),(108.660254038,195),(105,191.339745962),(100,190),(95,191.339745962),(91.3397459622,195)) - | ((-15,1),(0.40707856479,58.5),(42.5,100.592921435),(100,116),(157.5,100.592921435),(199.592921435,58.5),(215,1),(199.592921435,-56.5),(157.5,-98.5929214352),(100,-114),(42.5,-98.5929214352),(0.40707856479,-56.5)) -(6 rows) - --- convert the circle to an 8-point polygon -SELECT '' AS six, polygon(8, f1) - FROM CIRCLE_TBL; - six | polygon ------+------------------------------------------------------------------------------------------------------------------------------------------------------------------ - | ((2,1),(2.87867965644,3.12132034356),(5,4),(7.12132034356,3.12132034356),(8,1),(7.12132034356,-1.12132034356),(5,-2),(2.87867965644,-1.12132034356)) - | ((-99,2),(-69.7106781187,72.7106781187),(1,102),(71.7106781187,72.7106781187),(101,2),(71.7106781187,-68.7106781187),(1,-98),(-69.7106781187,-68.7106781187)) - | ((-4,3),(-2.53553390593,6.53553390593),(1,8),(4.53553390593,6.53553390593),(6,3),(4.53553390593,-0.535533905933),(1,-2),(-2.53553390593,-0.535533905933)) - | ((-2,2),(-1.12132034356,4.12132034356),(1,5),(3.12132034356,4.12132034356),(4,2),(3.12132034356,-0.12132034356),(1,-1),(-1.12132034356,-0.12132034356)) - | ((90,200),(92.9289321881,207.071067812),(100,210),(107.071067812,207.071067812),(110,200),(107.071067812,192.928932188),(100,190),(92.9289321881,192.928932188)) - | ((-15,1),(18.6827201635,82.3172798365),(100,116),(181.317279836,82.3172798365),(215,1),(181.317279836,-80.3172798365),(100,-114),(18.6827201635,-80.3172798365)) -(6 rows) - --- --- Circles --- -SELECT '' AS six, circle(f1, 50.0) - FROM POINT_TBL; - six | circle ------+----------------- - | <(0,0),50> - | <(-10,0),50> - | <(-3,4),50> - | <(5.1,34.5),50> - | <(-5,-12),50> - | <(10,10),50> -(6 rows) - -SELECT '' AS four, circle(f1) - FROM BOX_TBL; - four | circle -------+----------------------- - | <(1,1),1.41421356237> - | <(2,2),1.41421356237> - | <(2.5,3),0.5> - | <(3,3),0> -(4 rows) - -SELECT '' AS two, circle(f1) - FROM POLYGON_TBL - WHERE (# f1) >= 3; - two | circle ------+----------------------------------------------- - | <(1.33333333333,1.33333333333),2.04168905064> - | <(2.33333333333,1.33333333333),1.47534300379> -(2 rows) - -SELECT '' AS twentyfour, c1.f1 AS circle, p1.f1 AS point, (p1.f1 <-> c1.f1) AS distance - FROM CIRCLE_TBL c1, POINT_TBL p1 - WHERE (p1.f1 <-> c1.f1) > 0 - ORDER BY distance, area(c1.f1), p1.f1[0]; - twentyfour | circle | point | distance -------------+----------------+------------+--------------- - | <(1,2),3> | (-3,4) | 1.472135955 - | <(5,1),3> | (0,0) | 2.09901951359 - | <(5,1),3> | (-3,4) | 5.54400374532 - | <(1,3),5> | (-10,0) | 6.40175425099 - | <(1,3),5> | (10,10) | 6.40175425099 - | <(5,1),3> | (10,10) | 7.29563014099 - | <(1,2),3> | (-10,0) | 8.1803398875 - | <(1,2),3> | (10,10) | 9.04159457879 - | <(1,3),5> | (-5,-12) | 11.1554944214 - | <(5,1),3> | (-10,0) | 12.0332963784 - | <(1,2),3> | (-5,-12) | 12.2315462117 - | <(5,1),3> | (-5,-12) | 13.4012194669 - | <(1,3),5> | (5.1,34.5) | 26.7657047773 - | <(1,2),3> | (5.1,34.5) | 29.7575945393 - | <(5,1),3> | (5.1,34.5) | 30.5001492534 - | <(100,200),10> | (5.1,34.5) | 180.778038568 - | <(100,200),10> | (10,10) | 200.237960416 - | <(100,200),10> | (-3,4) | 211.415898255 - | <(100,200),10> | (0,0) | 213.60679775 - | <(100,200),10> | (-10,0) | 218.25424421 - | <(100,200),10> | (-5,-12) | 226.577682802 -(21 rows) - diff --git a/src/test/regress/expected/gist.out b/src/test/regress/expected/gist.out index 91f9998140..f5a2993aaf 100644 --- a/src/test/regress/expected/gist.out +++ b/src/test/regress/expected/gist.out @@ -5,6 +5,21 @@ -- testing GiST code itself. Vacuuming in particular. create table gist_point_tbl(id int4, p point); create index gist_pointidx on gist_point_tbl using gist(p); +-- Verify the fillfactor and buffering options +create index gist_pointidx2 on gist_point_tbl using gist(p) with (buffering = on, fillfactor=50); +create index gist_pointidx3 on gist_point_tbl using gist(p) with (buffering = off); +create index gist_pointidx4 on gist_point_tbl using gist(p) with (buffering = auto); +drop index gist_pointidx2, gist_pointidx3, gist_pointidx4; +-- Make sure bad values are refused +create index gist_pointidx5 on gist_point_tbl using gist(p) with (buffering = invalid_value); +ERROR: invalid value for "buffering" option +DETAIL: Valid values are "on", "off", and "auto". +create index gist_pointidx5 on gist_point_tbl using gist(p) with (fillfactor=9); +ERROR: value 9 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +create index gist_pointidx5 on gist_point_tbl using gist(p) with (fillfactor=101); +ERROR: value 101 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". -- Insert enough data to create a tree that's a couple of levels deep. insert into gist_point_tbl (id, p) select g, point(g*10, g*10) from generate_series(1, 10000) g; @@ -17,6 +32,9 @@ delete from gist_point_tbl where id % 2 = 1; -- would exercise it) delete from gist_point_tbl where id < 10000; vacuum analyze gist_point_tbl; +-- rebuild the index with a different fillfactor +alter index gist_pointidx SET (fillfactor = 40); +reindex index gist_pointidx; -- -- Test Index-only plans on GiST indexes -- diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out index fd618afe60..c7deec2ff4 100644 --- a/src/test/regress/expected/groupingsets.out +++ b/src/test/regress/expected/groupingsets.out @@ -360,6 +360,80 @@ select a, d, grouping(a,b,c) 2 | 2 | 2 (4 rows) +-- check that distinct grouping columns are kept separate +-- even if they are equal() +explain (costs off) +select g as alias1, g as alias2 + from generate_series(1,3) g + group by alias1, rollup(alias2); + QUERY PLAN +------------------------------------------------ + GroupAggregate + Group Key: g, g + Group Key: g + -> Sort + Sort Key: g + -> Function Scan on generate_series g +(6 rows) + +select g as alias1, g as alias2 + from generate_series(1,3) g + group by alias1, rollup(alias2); + alias1 | alias2 +--------+-------- + 1 | 1 + 1 | + 2 | 2 + 2 | + 3 | 3 + 3 | +(6 rows) + +-- check that pulled-up subquery outputs still go to null when appropriate +select four, x + from (select four, ten, 'foo'::text as x from tenk1) as t + group by grouping sets (four, x) + having x = 'foo'; + four | x +------+----- + | foo +(1 row) + +select four, x || 'x' + from (select four, ten, 'foo'::text as x from tenk1) as t + group by grouping sets (four, x) + order by four; + four | ?column? +------+---------- + 0 | + 1 | + 2 | + 3 | + | foox +(5 rows) + +select (x+y)*1, sum(z) + from (select 1 as x, 2 as y, 3 as z) s + group by grouping sets (x+y, x); + ?column? | sum +----------+----- + 3 | 3 + | 3 +(2 rows) + +select x, not x as not_x, q2 from + (select *, q1 = 1 as x from int8_tbl i1) as t + group by grouping sets(x, q2) + order by x, q2; + x | not_x | q2 +---+-------+------------------- + f | t | + | | -4567890123456789 + | | 123 + | | 456 + | | 4567890123456789 +(5 rows) + -- simple rescan tests select a, b, sum(v.x) from (values (1),(2)) v(x), gstest_data(v.x) @@ -944,6 +1018,18 @@ explain (costs off) -> Values Scan on "*VALUES*" (9 rows) +-- unsortable cases +select unsortable_col, count(*) + from gstest4 group by grouping sets ((unsortable_col),(unsortable_col)) + order by unsortable_col::text; + unsortable_col | count +----------------+------- + 1 | 4 + 1 | 4 + 2 | 4 + 2 | 4 +(4 rows) + -- mixed hashable/sortable cases select unhashable_col, unsortable_col, grouping(unhashable_col, unsortable_col), @@ -1109,29 +1195,33 @@ explain (costs off) -- simple rescan tests select a, b, sum(v.x) from (values (1),(2)) v(x), gstest_data(v.x) - group by grouping sets (a,b); + group by grouping sets (a,b) + order by 1, 2, 3; a | b | sum ---+---+----- - 2 | | 6 1 | | 3 + 2 | | 6 + | 1 | 3 | 2 | 3 | 3 | 3 - | 1 | 3 (5 rows) explain (costs off) select a, b, sum(v.x) from (values (1),(2)) v(x), gstest_data(v.x) - group by grouping sets (a,b); - QUERY PLAN ------------------------------------------- - HashAggregate - Hash Key: gstest_data.a - Hash Key: gstest_data.b - -> Nested Loop - -> Values Scan on "*VALUES*" - -> Function Scan on gstest_data -(6 rows) + group by grouping sets (a,b) + order by 3, 1, 2; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: (sum("*VALUES*".column1)), gstest_data.a, gstest_data.b + -> HashAggregate + Hash Key: gstest_data.a + Hash Key: gstest_data.b + -> Nested Loop + -> Values Scan on "*VALUES*" + -> Function Scan on gstest_data +(8 rows) select * from (values (1),(2)) v(x), diff --git a/src/test/regress/expected/hash_func.out b/src/test/regress/expected/hash_func.out new file mode 100644 index 0000000000..da0948e95a --- /dev/null +++ b/src/test/regress/expected/hash_func.out @@ -0,0 +1,300 @@ +-- +-- Test hash functions +-- +-- When the salt is 0, the extended hash function should produce a result +-- whose low 32 bits match the standard hash function. When the salt is +-- not 0, we should get a different result. +-- +SELECT v as value, hashint2(v)::bit(32) as standard, + hashint2extended(v, 0)::bit(32) as extended0, + hashint2extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0::int2), (1::int2), (17::int2), (42::int2)) x(v) +WHERE hashint2(v)::bit(32) != hashint2extended(v, 0)::bit(32) + OR hashint2(v)::bit(32) = hashint2extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashint4(v)::bit(32) as standard, + hashint4extended(v, 0)::bit(32) as extended0, + hashint4extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashint4(v)::bit(32) != hashint4extended(v, 0)::bit(32) + OR hashint4(v)::bit(32) = hashint4extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashint8(v)::bit(32) as standard, + hashint8extended(v, 0)::bit(32) as extended0, + hashint8extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashint8(v)::bit(32) != hashint8extended(v, 0)::bit(32) + OR hashint8(v)::bit(32) = hashint8extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashfloat4(v)::bit(32) as standard, + hashfloat4extended(v, 0)::bit(32) as extended0, + hashfloat4extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashfloat4(v)::bit(32) != hashfloat4extended(v, 0)::bit(32) + OR hashfloat4(v)::bit(32) = hashfloat4extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashfloat8(v)::bit(32) as standard, + hashfloat8extended(v, 0)::bit(32) as extended0, + hashfloat8extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashfloat8(v)::bit(32) != hashfloat8extended(v, 0)::bit(32) + OR hashfloat8(v)::bit(32) = hashfloat8extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashoid(v)::bit(32) as standard, + hashoidextended(v, 0)::bit(32) as extended0, + hashoidextended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashoid(v)::bit(32) != hashoidextended(v, 0)::bit(32) + OR hashoid(v)::bit(32) = hashoidextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashchar(v)::bit(32) as standard, + hashcharextended(v, 0)::bit(32) as extended0, + hashcharextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::"char"), ('1'), ('x'), ('X'), ('p'), ('N')) x(v) +WHERE hashchar(v)::bit(32) != hashcharextended(v, 0)::bit(32) + OR hashchar(v)::bit(32) = hashcharextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashname(v)::bit(32) as standard, + hashnameextended(v, 0)::bit(32) as extended0, + hashnameextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), + ('muop28x03'), ('yi3nm0d73')) x(v) +WHERE hashname(v)::bit(32) != hashnameextended(v, 0)::bit(32) + OR hashname(v)::bit(32) = hashnameextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashtext(v)::bit(32) as standard, + hashtextextended(v, 0)::bit(32) as extended0, + hashtextextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), + ('muop28x03'), ('yi3nm0d73')) x(v) +WHERE hashtext(v)::bit(32) != hashtextextended(v, 0)::bit(32) + OR hashtext(v)::bit(32) = hashtextextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashoidvector(v)::bit(32) as standard, + hashoidvectorextended(v, 0)::bit(32) as extended0, + hashoidvectorextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::oidvector), ('0 1 2 3 4'), ('17 18 19 20'), + ('42 43 42 45'), ('550273 550273 570274'), + ('207112489 207112499 21512 2155 372325 1363252')) x(v) +WHERE hashoidvector(v)::bit(32) != hashoidvectorextended(v, 0)::bit(32) + OR hashoidvector(v)::bit(32) = hashoidvectorextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hash_aclitem(v)::bit(32) as standard, + hash_aclitem_extended(v, 0)::bit(32) as extended0, + hash_aclitem_extended(v, 1)::bit(32) as extended1 +FROM (SELECT DISTINCT(relacl[1]) FROM pg_class LIMIT 10) x(v) +WHERE hash_aclitem(v)::bit(32) != hash_aclitem_extended(v, 0)::bit(32) + OR hash_aclitem(v)::bit(32) = hash_aclitem_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashmacaddr(v)::bit(32) as standard, + hashmacaddrextended(v, 0)::bit(32) as extended0, + hashmacaddrextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::macaddr), ('08:00:2b:01:02:04'), ('08:00:2b:01:02:04'), + ('e2:7f:51:3e:70:49'), ('d6:a9:4a:78:1c:d5'), + ('ea:29:b1:5e:1f:a5')) x(v) +WHERE hashmacaddr(v)::bit(32) != hashmacaddrextended(v, 0)::bit(32) + OR hashmacaddr(v)::bit(32) = hashmacaddrextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashinet(v)::bit(32) as standard, + hashinetextended(v, 0)::bit(32) as extended0, + hashinetextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::inet), ('192.168.100.128/25'), ('192.168.100.0/8'), + ('172.168.10.126/16'), ('172.18.103.126/24'), ('192.188.13.16/32')) x(v) +WHERE hashinet(v)::bit(32) != hashinetextended(v, 0)::bit(32) + OR hashinet(v)::bit(32) = hashinetextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hash_numeric(v)::bit(32) as standard, + hash_numeric_extended(v, 0)::bit(32) as extended0, + hash_numeric_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1.149484958), (17.149484958), (42.149484958), + (149484958.550273), (2071124898672)) x(v) +WHERE hash_numeric(v)::bit(32) != hash_numeric_extended(v, 0)::bit(32) + OR hash_numeric(v)::bit(32) = hash_numeric_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashmacaddr8(v)::bit(32) as standard, + hashmacaddr8extended(v, 0)::bit(32) as extended0, + hashmacaddr8extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::macaddr8), ('08:00:2b:01:02:04:36:49'), + ('08:00:2b:01:02:04:f0:e8'), ('e2:7f:51:3e:70:49:16:29'), + ('d6:a9:4a:78:1c:d5:47:32'), ('ea:29:b1:5e:1f:a5')) x(v) +WHERE hashmacaddr8(v)::bit(32) != hashmacaddr8extended(v, 0)::bit(32) + OR hashmacaddr8(v)::bit(32) = hashmacaddr8extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hash_array(v)::bit(32) as standard, + hash_array_extended(v, 0)::bit(32) as extended0, + hash_array_extended(v, 1)::bit(32) as extended1 +FROM (VALUES ('{0}'::int4[]), ('{0,1,2,3,4}'), ('{17,18,19,20}'), + ('{42,34,65,98}'), ('{550273,590027, 870273}'), + ('{207112489, 807112489}')) x(v) +WHERE hash_array(v)::bit(32) != hash_array_extended(v, 0)::bit(32) + OR hash_array(v)::bit(32) = hash_array_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashbpchar(v)::bit(32) as standard, + hashbpcharextended(v, 0)::bit(32) as extended0, + hashbpcharextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), + ('muop28x03'), ('yi3nm0d73')) x(v) +WHERE hashbpchar(v)::bit(32) != hashbpcharextended(v, 0)::bit(32) + OR hashbpchar(v)::bit(32) = hashbpcharextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, time_hash(v)::bit(32) as standard, + time_hash_extended(v, 0)::bit(32) as extended0, + time_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::time), ('11:09:59'), ('1:09:59'), ('11:59:59'), + ('7:9:59'), ('5:15:59')) x(v) +WHERE time_hash(v)::bit(32) != time_hash_extended(v, 0)::bit(32) + OR time_hash(v)::bit(32) = time_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, timetz_hash(v)::bit(32) as standard, + timetz_hash_extended(v, 0)::bit(32) as extended0, + timetz_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::timetz), ('00:11:52.518762-07'), ('00:11:52.51762-08'), + ('00:11:52.62-01'), ('00:11:52.62+01'), ('11:59:59+04')) x(v) +WHERE timetz_hash(v)::bit(32) != timetz_hash_extended(v, 0)::bit(32) + OR timetz_hash(v)::bit(32) = timetz_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, interval_hash(v)::bit(32) as standard, + interval_hash_extended(v, 0)::bit(32) as extended0, + interval_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::interval), + ('5 month 7 day 46 minutes'), ('1 year 7 day 46 minutes'), + ('1 year 7 month 20 day 46 minutes'), ('5 month'), + ('17 year 11 month 7 day 9 hours 46 minutes 5 seconds')) x(v) +WHERE interval_hash(v)::bit(32) != interval_hash_extended(v, 0)::bit(32) + OR interval_hash(v)::bit(32) = interval_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, timestamp_hash(v)::bit(32) as standard, + timestamp_hash_extended(v, 0)::bit(32) as extended0, + timestamp_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::timestamp), ('2017-08-22 00:09:59.518762'), + ('2015-08-20 00:11:52.51762-08'), + ('2017-05-22 00:11:52.62-01'), + ('2013-08-22 00:11:52.62+01'), ('2013-08-22 11:59:59+04')) x(v) +WHERE timestamp_hash(v)::bit(32) != timestamp_hash_extended(v, 0)::bit(32) + OR timestamp_hash(v)::bit(32) = timestamp_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, uuid_hash(v)::bit(32) as standard, + uuid_hash_extended(v, 0)::bit(32) as extended0, + uuid_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::uuid), ('a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'), + ('5a9ba4ac-8d6f-11e7-bb31-be2e44b06b34'), + ('99c6705c-d939-461c-a3c9-1690ad64ed7b'), + ('7deed3ca-8d6f-11e7-bb31-be2e44b06b34'), + ('9ad46d4f-6f2a-4edd-aadb-745993928e1e')) x(v) +WHERE uuid_hash(v)::bit(32) != uuid_hash_extended(v, 0)::bit(32) + OR uuid_hash(v)::bit(32) = uuid_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, pg_lsn_hash(v)::bit(32) as standard, + pg_lsn_hash_extended(v, 0)::bit(32) as extended0, + pg_lsn_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::pg_lsn), ('16/B374D84'), ('30/B374D84'), + ('255/B374D84'), ('25/B379D90'), ('900/F37FD90')) x(v) +WHERE pg_lsn_hash(v)::bit(32) != pg_lsn_hash_extended(v, 0)::bit(32) + OR pg_lsn_hash(v)::bit(32) = pg_lsn_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy'); +SELECT v as value, hashenum(v)::bit(32) as standard, + hashenumextended(v, 0)::bit(32) as extended0, + hashenumextended(v, 1)::bit(32) as extended1 +FROM (VALUES ('sad'::mood), ('ok'), ('happy')) x(v) +WHERE hashenum(v)::bit(32) != hashenumextended(v, 0)::bit(32) + OR hashenum(v)::bit(32) = hashenumextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +DROP TYPE mood; +SELECT v as value, jsonb_hash(v)::bit(32) as standard, + jsonb_hash_extended(v, 0)::bit(32) as extended0, + jsonb_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::jsonb), + ('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'), + ('{"foo": [true, "bar"], "tags": {"e": 1, "f": null}}'), + ('{"g": {"h": "value"}}')) x(v) +WHERE jsonb_hash(v)::bit(32) != jsonb_hash_extended(v, 0)::bit(32) + OR jsonb_hash(v)::bit(32) = jsonb_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hash_range(v)::bit(32) as standard, + hash_range_extended(v, 0)::bit(32) as extended0, + hash_range_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (int4range(10, 20)), (int4range(23, 43)), + (int4range(5675, 550273)), + (int4range(550274, 1550274)), (int4range(1550275, 208112489))) x(v) +WHERE hash_range(v)::bit(32) != hash_range_extended(v, 0)::bit(32) + OR hash_range(v)::bit(32) = hash_range_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + diff --git a/src/test/regress/expected/hash_index.out b/src/test/regress/expected/hash_index.out index 0bbaa2a768..e23de21b41 100644 --- a/src/test/regress/expected/hash_index.out +++ b/src/test/regress/expected/hash_index.out @@ -217,6 +217,9 @@ END; DELETE FROM hash_split_heap WHERE keycol = 1; INSERT INTO hash_split_heap SELECT a/2 FROM generate_series(1, 25000) a; VACUUM hash_split_heap; +-- Rebuild the index using a different fillfactor +ALTER INDEX hash_split_index SET (fillfactor = 10); +REINDEX INDEX hash_split_index; -- Clean up. DROP TABLE hash_split_heap; -- Index on temp table. @@ -229,3 +232,12 @@ CREATE TABLE hash_heap_float4 (x float4, y int); INSERT INTO hash_heap_float4 VALUES (1.1,1); CREATE INDEX hash_idx ON hash_heap_float4 USING hash (x); DROP TABLE hash_heap_float4 CASCADE; +-- Test out-of-range fillfactor values +CREATE INDEX hash_f8_index2 ON hash_f8_heap USING hash (random float8_ops) + WITH (fillfactor=9); +ERROR: value 9 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +CREATE INDEX hash_f8_index2 ON hash_f8_heap USING hash (random float8_ops) + WITH (fillfactor=101); +ERROR: value 101 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". diff --git a/src/test/regress/expected/hash_part.out b/src/test/regress/expected/hash_part.out new file mode 100644 index 0000000000..731d26fc3d --- /dev/null +++ b/src/test/regress/expected/hash_part.out @@ -0,0 +1,104 @@ +-- +-- Hash partitioning. +-- +-- Use hand-rolled hash functions and operator classes to get predictable +-- result on different matchines. See the definitions of +-- part_part_test_int4_ops and part_test_text_ops in insert.sql. +CREATE TABLE mchash (a int, b text, c jsonb) + PARTITION BY HASH (a part_test_int4_ops, b part_test_text_ops); +CREATE TABLE mchash1 + PARTITION OF mchash FOR VALUES WITH (MODULUS 4, REMAINDER 0); +-- invalid OID, no such table +SELECT satisfies_hash_partition(0, 4, 0, NULL); + satisfies_hash_partition +-------------------------- + +(1 row) + +-- not partitioned +SELECT satisfies_hash_partition('tenk1'::regclass, 4, 0, NULL); +ERROR: "tenk1" is not a hash partitioned table +-- partition rather than the parent +SELECT satisfies_hash_partition('mchash1'::regclass, 4, 0, NULL); +ERROR: "mchash1" is not a hash partitioned table +-- invalid modulus +SELECT satisfies_hash_partition('mchash'::regclass, 0, 0, NULL); +ERROR: modulus for hash partition must be a positive integer +-- remainder too small +SELECT satisfies_hash_partition('mchash'::regclass, 1, -1, NULL); +ERROR: remainder for hash partition must be a non-negative integer +-- remainder too large +SELECT satisfies_hash_partition('mchash'::regclass, 1, 1, NULL); +ERROR: remainder for hash partition must be less than modulus +-- modulus is null +SELECT satisfies_hash_partition('mchash'::regclass, NULL, 0, NULL); + satisfies_hash_partition +-------------------------- + +(1 row) + +-- remainder is null +SELECT satisfies_hash_partition('mchash'::regclass, 4, NULL, NULL); + satisfies_hash_partition +-------------------------- + +(1 row) + +-- too many arguments +SELECT satisfies_hash_partition('mchash'::regclass, 4, 0, NULL::int, NULL::text, NULL::json); +ERROR: number of partitioning columns (2) does not match number of partition keys provided (3) +-- too few arguments +SELECT satisfies_hash_partition('mchash'::regclass, 3, 1, NULL::int); +ERROR: number of partitioning columns (2) does not match number of partition keys provided (1) +-- wrong argument type +SELECT satisfies_hash_partition('mchash'::regclass, 2, 1, NULL::int, NULL::int); +ERROR: column 2 of the partition key has type "text", but supplied value is of type "integer" +-- ok, should be false +SELECT satisfies_hash_partition('mchash'::regclass, 4, 0, 0, ''::text); + satisfies_hash_partition +-------------------------- + f +(1 row) + +-- ok, should be true +SELECT satisfies_hash_partition('mchash'::regclass, 4, 0, 2, ''::text); + satisfies_hash_partition +-------------------------- + t +(1 row) + +-- argument via variadic syntax, should fail because not all partitioning +-- columns are of the correct type +SELECT satisfies_hash_partition('mchash'::regclass, 2, 1, + variadic array[1,2]::int[]); +ERROR: column 2 of the partition key has type "text", but supplied value is of type "integer" +-- multiple partitioning columns of the same type +CREATE TABLE mcinthash (a int, b int, c jsonb) + PARTITION BY HASH (a part_test_int4_ops, b part_test_int4_ops); +-- now variadic should work, should be false +SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, + variadic array[0, 0]); + satisfies_hash_partition +-------------------------- + f +(1 row) + +-- should be true +SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, + variadic array[0, 1]); + satisfies_hash_partition +-------------------------- + t +(1 row) + +-- wrong length +SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, + variadic array[]::int[]); +ERROR: number of partitioning columns (2) does not match number of partition keys provided (0) +-- wrong type +SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, + variadic array[now(), now()]); +ERROR: column 1 of the partition key has type "integer", but supplied value is of type "timestamp with time zone" +-- cleanup +DROP TABLE mchash; +DROP TABLE mcinthash; diff --git a/src/test/regress/expected/horology.out b/src/test/regress/expected/horology.out index f9d12e0f8a..b2b4577333 100644 --- a/src/test/regress/expected/horology.out +++ b/src/test/regress/expected/horology.out @@ -321,7 +321,7 @@ SELECT date '1991-02-03' - time with time zone '04:05:06 UTC' AS "Subtract Time ERROR: operator does not exist: date - time with time zone LINE 1: SELECT date '1991-02-03' - time with time zone '04:05:06 UTC... ^ -HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. -- -- timestamp, interval arithmetic -- @@ -2046,70 +2046,6 @@ SELECT '' AS "226", d1.f1 AS timestamp1, d2.f1 AS timestamp2, d1.f1 - d2.f1 AS d | Sat Sep 22 18:19:20 2001 PDT | Sat Sep 22 18:19:20 2001 PDT | @ 0 (256 rows) --- --- abstime, reltime arithmetic --- -SELECT '' AS ten, ABSTIME_TBL.f1 AS abstime, RELTIME_TBL.f1 AS reltime - FROM ABSTIME_TBL, RELTIME_TBL - WHERE (ABSTIME_TBL.f1 + RELTIME_TBL.f1) < abstime 'Jan 14 14:00:00 1971' - ORDER BY abstime, reltime; - ten | abstime | reltime ------+------------------------------+--------------- - | Sat May 10 23:59:12 1947 PST | @ 14 secs ago - | Sat May 10 23:59:12 1947 PST | @ 1 min - | Sat May 10 23:59:12 1947 PST | @ 5 hours - | Sat May 10 23:59:12 1947 PST | @ 10 days - | Sat May 10 23:59:12 1947 PST | @ 3 mons - | Wed Dec 31 16:00:00 1969 PST | @ 14 secs ago - | Wed Dec 31 16:00:00 1969 PST | @ 1 min - | Wed Dec 31 16:00:00 1969 PST | @ 5 hours - | Wed Dec 31 16:00:00 1969 PST | @ 10 days - | Wed Dec 31 16:00:00 1969 PST | @ 3 mons -(10 rows) - --- these four queries should return the same answer --- the "infinity" and "-infinity" tuples in ABSTIME_TBL cannot be added and --- therefore, should not show up in the results. -SELECT '' AS three, * FROM ABSTIME_TBL - WHERE (ABSTIME_TBL.f1 + reltime '@ 3 year') -- +3 years - < abstime 'Jan 14 14:00:00 1977'; - three | f1 --------+------------------------------ - | Sun Jan 14 03:14:21 1973 PST - | Wed Dec 31 16:00:00 1969 PST - | Sat May 10 23:59:12 1947 PST -(3 rows) - -SELECT '' AS three, * FROM ABSTIME_TBL - WHERE (ABSTIME_TBL.f1 + reltime '@ 3 year ago') -- -3 years - < abstime 'Jan 14 14:00:00 1971'; - three | f1 --------+------------------------------ - | Sun Jan 14 03:14:21 1973 PST - | Wed Dec 31 16:00:00 1969 PST - | Sat May 10 23:59:12 1947 PST -(3 rows) - -SELECT '' AS three, * FROM ABSTIME_TBL - WHERE (ABSTIME_TBL.f1 - reltime '@ 3 year') -- -(+3) years - < abstime 'Jan 14 14:00:00 1971'; - three | f1 --------+------------------------------ - | Sun Jan 14 03:14:21 1973 PST - | Wed Dec 31 16:00:00 1969 PST - | Sat May 10 23:59:12 1947 PST -(3 rows) - -SELECT '' AS three, * FROM ABSTIME_TBL - WHERE (ABSTIME_TBL.f1 - reltime '@ 3 year ago') -- -(-3) years - < abstime 'Jan 14 14:00:00 1977'; - three | f1 --------+------------------------------ - | Sun Jan 14 03:14:21 1973 PST - | Wed Dec 31 16:00:00 1969 PST - | Sat May 10 23:59:12 1947 PST -(3 rows) - -- -- Conversions -- @@ -2137,80 +2073,6 @@ SELECT '' AS "16", f1 AS "timestamp", date(f1) AS date | Sat Sep 22 18:19:20 2001 PDT | 09-22-2001 (16 rows) -SELECT '' AS "16", f1 AS "timestamp", abstime(f1) AS abstime - FROM TEMP_TIMESTAMP - ORDER BY abstime; - 16 | timestamp | abstime -----+------------------------------+------------------------------ - | Thu Jan 01 00:00:00 1970 PST | Thu Jan 01 00:00:00 1970 PST - | Wed Feb 28 17:32:01 1996 PST | Wed Feb 28 17:32:01 1996 PST - | Thu Feb 29 17:32:01 1996 PST | Thu Feb 29 17:32:01 1996 PST - | Fri Mar 01 17:32:01 1996 PST | Fri Mar 01 17:32:01 1996 PST - | Mon Dec 30 17:32:01 1996 PST | Mon Dec 30 17:32:01 1996 PST - | Tue Dec 31 17:32:01 1996 PST | Tue Dec 31 17:32:01 1996 PST - | Fri Dec 31 17:32:01 1999 PST | Fri Dec 31 17:32:01 1999 PST - | Sat Jan 01 17:32:01 2000 PST | Sat Jan 01 17:32:01 2000 PST - | Wed Mar 15 02:14:05 2000 PST | Wed Mar 15 02:14:05 2000 PST - | Wed Mar 15 03:14:04 2000 PST | Wed Mar 15 03:14:04 2000 PST - | Wed Mar 15 08:14:01 2000 PST | Wed Mar 15 08:14:01 2000 PST - | Wed Mar 15 12:14:03 2000 PST | Wed Mar 15 12:14:03 2000 PST - | Wed Mar 15 13:14:02 2000 PST | Wed Mar 15 13:14:02 2000 PST - | Sun Dec 31 17:32:01 2000 PST | Sun Dec 31 17:32:01 2000 PST - | Mon Jan 01 17:32:01 2001 PST | Mon Jan 01 17:32:01 2001 PST - | Sat Sep 22 18:19:20 2001 PDT | Sat Sep 22 18:19:20 2001 PDT -(16 rows) - -SELECT '' AS four, f1 AS abstime, date(f1) AS date - FROM ABSTIME_TBL - WHERE isfinite(f1) AND f1 <> abstime 'now' - ORDER BY date, abstime; - four | abstime | date -------+------------------------------+------------ - | Sat May 10 23:59:12 1947 PST | 05-10-1947 - | Wed Dec 31 16:00:00 1969 PST | 12-31-1969 - | Sun Jan 14 03:14:21 1973 PST | 01-14-1973 - | Mon May 01 00:30:30 1995 PDT | 05-01-1995 -(4 rows) - -SELECT '' AS two, d1 AS "timestamp", abstime(d1) AS abstime - FROM TIMESTAMP_TBL WHERE NOT isfinite(d1); - two | timestamp | abstime ------+-----------+----------- - | -infinity | -infinity - | infinity | infinity -(2 rows) - -SELECT '' AS three, f1 as abstime, cast(f1 as timestamp) AS "timestamp" - FROM ABSTIME_TBL WHERE NOT isfinite(f1); -ERROR: cannot convert abstime "invalid" to timestamp -SELECT '' AS ten, f1 AS interval, reltime(f1) AS reltime - FROM INTERVAL_TBL; - ten | interval | reltime ------+-------------------------------+------------------------------- - | @ 1 min | @ 1 min - | @ 5 hours | @ 5 hours - | @ 10 days | @ 10 days - | @ 34 years | @ 34 years - | @ 3 mons | @ 3 mons - | @ 14 secs ago | @ 14 secs ago - | @ 1 day 2 hours 3 mins 4 secs | @ 1 day 2 hours 3 mins 4 secs - | @ 6 years | @ 6 years - | @ 5 mons | @ 5 mons - | @ 5 mons 12 hours | @ 5 mons 12 hours -(10 rows) - -SELECT '' AS six, f1 as reltime, CAST(f1 AS interval) AS interval - FROM RELTIME_TBL; - six | reltime | interval ------+---------------+--------------- - | @ 1 min | @ 1 min - | @ 5 hours | @ 5 hours - | @ 10 days | @ 10 days - | @ 34 years | @ 34 years - | @ 3 mons | @ 3 mons - | @ 14 secs ago | @ 14 secs ago -(6 rows) - DROP TABLE TEMP_TIMESTAMP; -- -- Formats @@ -2292,18 +2154,6 @@ SELECT '' AS "64", d1 AS us_postgres FROM TIMESTAMP_TBL; | Mon Jan 01 17:32:01 2001 (65 rows) -SELECT '' AS seven, f1 AS us_postgres FROM ABSTIME_TBL; - seven | us_postgres --------+------------------------------ - | Sun Jan 14 03:14:21 1973 PST - | Mon May 01 00:30:30 1995 PDT - | Wed Dec 31 16:00:00 1969 PST - | infinity - | -infinity - | Sat May 10 23:59:12 1947 PST - | invalid -(7 rows) - SET DateStyle TO 'US,ISO'; SELECT '' AS "64", d1 AS us_iso FROM TIMESTAMP_TBL; 64 | us_iso @@ -2375,18 +2225,6 @@ SELECT '' AS "64", d1 AS us_iso FROM TIMESTAMP_TBL; | 2001-01-01 17:32:01 (65 rows) -SELECT '' AS seven, f1 AS us_iso FROM ABSTIME_TBL; - seven | us_iso --------+------------------------ - | 1973-01-14 03:14:21-08 - | 1995-05-01 00:30:30-07 - | 1969-12-31 16:00:00-08 - | infinity - | -infinity - | 1947-05-10 23:59:12-08 - | invalid -(7 rows) - SET DateStyle TO 'US,SQL'; SHOW DateStyle; DateStyle @@ -2464,18 +2302,6 @@ SELECT '' AS "64", d1 AS us_sql FROM TIMESTAMP_TBL; | 01/01/2001 17:32:01 (65 rows) -SELECT '' AS seven, f1 AS us_sql FROM ABSTIME_TBL; - seven | us_sql --------+------------------------- - | 01/14/1973 03:14:21 PST - | 05/01/1995 00:30:30 PDT - | 12/31/1969 16:00:00 PST - | infinity - | -infinity - | 05/10/1947 23:59:12 PST - | invalid -(7 rows) - SET DateStyle TO 'European,Postgres'; SHOW DateStyle; DateStyle @@ -2561,18 +2387,6 @@ SELECT '' AS "65", d1 AS european_postgres FROM TIMESTAMP_TBL; | Thu 13 Jun 00:00:00 1957 (66 rows) -SELECT '' AS seven, f1 AS european_postgres FROM ABSTIME_TBL; - seven | european_postgres --------+------------------------------ - | Sun 14 Jan 03:14:21 1973 PST - | Mon 01 May 00:30:30 1995 PDT - | Wed 31 Dec 16:00:00 1969 PST - | infinity - | -infinity - | Sat 10 May 23:59:12 1947 PST - | invalid -(7 rows) - SET DateStyle TO 'European,ISO'; SHOW DateStyle; DateStyle @@ -2651,18 +2465,6 @@ SELECT '' AS "65", d1 AS european_iso FROM TIMESTAMP_TBL; | 1957-06-13 00:00:00 (66 rows) -SELECT '' AS seven, f1 AS european_iso FROM ABSTIME_TBL; - seven | european_iso --------+------------------------ - | 1973-01-14 03:14:21-08 - | 1995-05-01 00:30:30-07 - | 1969-12-31 16:00:00-08 - | infinity - | -infinity - | 1947-05-10 23:59:12-08 - | invalid -(7 rows) - SET DateStyle TO 'European,SQL'; SHOW DateStyle; DateStyle @@ -2741,18 +2543,6 @@ SELECT '' AS "65", d1 AS european_sql FROM TIMESTAMP_TBL; | 13/06/1957 00:00:00 (66 rows) -SELECT '' AS seven, f1 AS european_sql FROM ABSTIME_TBL; - seven | european_sql --------+------------------------- - | 14/01/1973 03:14:21 PST - | 01/05/1995 00:30:30 PDT - | 31/12/1969 16:00:00 PST - | infinity - | -infinity - | 10/05/1947 23:59:12 PST - | invalid -(7 rows) - RESET DateStyle; -- -- to_timestamp() @@ -2769,14 +2559,32 @@ SELECT to_timestamp('97/2/16 8:14:30', 'FMYYYY/FMMM/FMDD FMHH:FMMI:FMSS'); Sat Feb 16 08:14:30 0097 PST (1 row) +SELECT to_timestamp('2011$03!18 23_38_15', 'YYYY-MM-DD HH24:MI:SS'); + to_timestamp +------------------------------ + Fri Mar 18 23:38:15 2011 PDT +(1 row) + SELECT to_timestamp('1985 January 12', 'YYYY FMMonth DD'); to_timestamp ------------------------------ Sat Jan 12 00:00:00 1985 PST (1 row) +SELECT to_timestamp('1985 FMMonth 12', 'YYYY "FMMonth" DD'); + to_timestamp +------------------------------ + Sat Jan 12 00:00:00 1985 PST +(1 row) + +SELECT to_timestamp('1985 \ 12', 'YYYY \\ DD'); + to_timestamp +------------------------------ + Sat Jan 12 00:00:00 1985 PST +(1 row) + SELECT to_timestamp('My birthday-> Year: 1976, Month: May, Day: 16', - '"My birthday-> Year" YYYY, "Month:" FMMonth, "Day:" DD'); + '"My birthday-> Year:" YYYY, "Month:" FMMonth, "Day:" DD'); to_timestamp ------------------------------ Sun May 16 00:00:00 1976 PDT @@ -2789,7 +2597,7 @@ SELECT to_timestamp('1,582nd VIII 21', 'Y,YYYth FMRM DD'); (1 row) SELECT to_timestamp('15 "text between quote marks" 98 54 45', - E'HH24 "\\text between quote marks\\"" YY MI SS'); + E'HH24 "\\"text between quote marks\\"" YY MI SS'); to_timestamp ------------------------------ Thu Jan 01 15:54:45 1998 PST @@ -2810,6 +2618,24 @@ SELECT to_timestamp('2000January09Sunday', 'YYYYFMMonthDDFMDay'); SELECT to_timestamp('97/Feb/16', 'YYMonDD'); ERROR: invalid value "/Fe" for "Mon" DETAIL: The given value did not match any of the allowed values for this field. +SELECT to_timestamp('97/Feb/16', 'YY:Mon:DD'); + to_timestamp +------------------------------ + Sun Feb 16 00:00:00 1997 PST +(1 row) + +SELECT to_timestamp('97/Feb/16', 'FXYY:Mon:DD'); + to_timestamp +------------------------------ + Sun Feb 16 00:00:00 1997 PST +(1 row) + +SELECT to_timestamp('97/Feb/16', 'FXYY/Mon/DD'); + to_timestamp +------------------------------ + Sun Feb 16 00:00:00 1997 PST +(1 row) + SELECT to_timestamp('19971116', 'YYYYMMDD'); to_timestamp ------------------------------ @@ -2930,13 +2756,43 @@ SELECT to_timestamp('2011-12-18 11:38 PM', 'YYYY-MM-DD HH12:MI PM'); Sun Dec 18 23:38:00 2011 PST (1 row) +SELECT to_timestamp('2011-12-18 11:38 +05', 'YYYY-MM-DD HH12:MI TZH'); + to_timestamp +------------------------------ + Sat Dec 17 22:38:00 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 -05', 'YYYY-MM-DD HH12:MI TZH'); + to_timestamp +------------------------------ + Sun Dec 18 08:38:00 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 +05:20', 'YYYY-MM-DD HH12:MI TZH:TZM'); + to_timestamp +------------------------------ + Sat Dec 17 22:18:00 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 -05:20', 'YYYY-MM-DD HH12:MI TZH:TZM'); + to_timestamp +------------------------------ + Sun Dec 18 08:58:00 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 20', 'YYYY-MM-DD HH12:MI TZM'); + to_timestamp +------------------------------ + Sun Dec 18 03:18:00 2011 PST +(1 row) + -- -- Check handling of multiple spaces in format and/or input -- SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS'); to_timestamp ------------------------------ - Sun Dec 18 03:38:15 2011 PST + Sun Dec 18 23:38:15 2011 PST (1 row) SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS'); @@ -2966,7 +2822,64 @@ SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS'); SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS'); to_timestamp ------------------------------ - Sun Dec 18 03:38:15 2011 PST + Sun Dec 18 23:38:15 2011 PST +(1 row) + +SELECT to_timestamp('2000+ JUN', 'YYYY/MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp(' 2000 +JUN', 'YYYY/MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp(' 2000 +JUN', 'YYYY//MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp('2000 +JUN', 'YYYY//MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp('2000 + JUN', 'YYYY MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp('2000 ++ JUN', 'YYYY MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp('2000 + + JUN', 'YYYY MON'); +ERROR: invalid value "+ J" for "MON" +DETAIL: The given value did not match any of the allowed values for this field. +SELECT to_timestamp('2000 + + JUN', 'YYYY MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp('2000 -10', 'YYYY TZH'); + to_timestamp +------------------------------ + Sat Jan 01 02:00:00 2000 PST +(1 row) + +SELECT to_timestamp('2000 -10', 'YYYY TZH'); + to_timestamp +------------------------------ + Fri Dec 31 06:00:00 1999 PST (1 row) SELECT to_date('2011 12 18', 'YYYY MM DD'); @@ -2984,13 +2897,13 @@ SELECT to_date('2011 12 18', 'YYYY MM DD'); SELECT to_date('2011 12 18', 'YYYY MM DD'); to_date ------------ - 12-08-2011 + 12-18-2011 (1 row) SELECT to_date('2011 12 18', 'YYYY MM DD'); to_date ------------ - 02-18-2011 + 12-18-2011 (1 row) SELECT to_date('2011 12 18', 'YYYY MM DD'); @@ -3005,6 +2918,21 @@ SELECT to_date('2011 12 18', 'YYYY MM DD'); 12-18-2011 (1 row) +SELECT to_date('2011 12 18', 'YYYYxMMxDD'); + to_date +------------ + 12-18-2011 +(1 row) + +SELECT to_date('2011x 12x 18', 'YYYYxMMxDD'); + to_date +------------ + 12-18-2011 +(1 row) + +SELECT to_date('2011 x12 x18', 'YYYYxMMxDD'); +ERROR: invalid value "x1" for "MM" +DETAIL: Value must be an integer. -- -- Check errors for some incorrect usages of to_timestamp() and to_date() -- diff --git a/src/test/regress/expected/identity.out b/src/test/regress/expected/identity.out index 88b56dad93..d7d5178f5d 100644 --- a/src/test/regress/expected/identity.out +++ b/src/test/regress/expected/identity.out @@ -26,6 +26,19 @@ SELECT sequence_name FROM information_schema.sequences WHERE sequence_name LIKE --------------- (0 rows) +SELECT pg_get_serial_sequence('itest1', 'a'); + pg_get_serial_sequence +------------------------ + public.itest1_a_seq +(1 row) + +\d itest1_a_seq + Sequence "public.itest1_a_seq" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +---------+-------+---------+------------+-----------+---------+------- + integer | 1 | 1 | 2147483647 | 1 | no | 1 +Sequence for identity column: public.itest1.a + CREATE TABLE itest4 (a int, b text); ALTER TABLE itest4 ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; -- error, requires NOT NULL ERROR: column "a" of relation "itest4" must be declared NOT NULL before identity can be added @@ -91,6 +104,19 @@ SELECT * FROM itest4; 2 | (2 rows) +-- VALUES RTEs +INSERT INTO itest3 VALUES (DEFAULT, 'a'); +INSERT INTO itest3 VALUES (DEFAULT, 'b'), (DEFAULT, 'c'); +SELECT * FROM itest3; + a | b +----+--- + 7 | + 12 | + 17 | a + 22 | b + 27 | c +(5 rows) + -- OVERRIDING tests INSERT INTO itest1 VALUES (10, 'xyz'); INSERT INTO itest1 OVERRIDING USER VALUE VALUES (10, 'xyz'); @@ -140,6 +166,19 @@ SELECT * FROM itest2; 3 | (3 rows) +-- COPY tests +CREATE TABLE itest9 (a int GENERATED ALWAYS AS IDENTITY, b text, c bigint); +COPY itest9 FROM stdin; +COPY itest9 (b, c) FROM stdin; +SELECT * FROM itest9 ORDER BY c; + a | b | c +-----+------+----- + 100 | foo | 200 + 101 | bar | 201 + 1 | foo2 | 202 + 2 | bar2 | 203 +(4 rows) + -- DROP IDENTITY tests ALTER TABLE itest4 ALTER COLUMN a DROP IDENTITY; ALTER TABLE itest4 ALTER COLUMN a DROP IDENTITY; -- error @@ -211,6 +250,21 @@ SELECT * FROM itestv11; 11 | xyz (3 rows) +-- ADD COLUMN +CREATE TABLE itest13 (a int); +-- add column to empty table +ALTER TABLE itest13 ADD COLUMN b int GENERATED BY DEFAULT AS IDENTITY; +INSERT INTO itest13 VALUES (1), (2), (3); +-- add column to populated table +ALTER TABLE itest13 ADD COLUMN c int GENERATED BY DEFAULT AS IDENTITY; +SELECT * FROM itest13; + a | b | c +---+---+--- + 1 | 1 | 1 + 2 | 2 | 2 + 3 | 3 | 3 +(3 rows) + -- various ALTER COLUMN tests -- fail, not allowed for identity columns ALTER TABLE itest1 ALTER COLUMN a SET DEFAULT 1; @@ -306,10 +360,10 @@ ALTER TABLE itest7 ALTER COLUMN a SET GENERATED BY DEFAULT; ALTER TABLE itest7 ALTER COLUMN a RESTART; ALTER TABLE itest7 ALTER COLUMN a DROP IDENTITY; -- privileges -CREATE USER regress_user1; +CREATE USER regress_identity_user1; CREATE TABLE itest8 (a int GENERATED ALWAYS AS IDENTITY, b text); -GRANT SELECT, INSERT ON itest8 TO regress_user1; -SET ROLE regress_user1; +GRANT SELECT, INSERT ON itest8 TO regress_identity_user1; +SET ROLE regress_identity_user1; INSERT INTO itest8 DEFAULT VALUES; SELECT * FROM itest8; a | b @@ -319,4 +373,16 @@ SELECT * FROM itest8; RESET ROLE; DROP TABLE itest8; -DROP USER regress_user1; +DROP USER regress_identity_user1; +-- typed tables (currently not supported) +CREATE TYPE itest_type AS (f1 integer, f2 text, f3 bigint); +CREATE TABLE itest12 OF itest_type (f1 WITH OPTIONS GENERATED ALWAYS AS IDENTITY); -- error +ERROR: identity columns are not supported on typed tables +DROP TYPE itest_type CASCADE; +-- table partitions (currently not supported) +CREATE TABLE itest_parent (f1 date NOT NULL, f2 text, f3 bigint) PARTITION BY RANGE (f1); +CREATE TABLE itest_child PARTITION OF itest_parent ( + f3 WITH OPTIONS GENERATED ALWAYS AS IDENTITY +) FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); -- error +ERROR: identity columns are not supported on partitions +DROP TABLE itest_parent; diff --git a/src/test/regress/expected/index_including.out b/src/test/regress/expected/index_including.out new file mode 100644 index 0000000000..16b4be34de --- /dev/null +++ b/src/test/regress/expected/index_including.out @@ -0,0 +1,367 @@ +/* + * 1.test CREATE INDEX + * + * Deliberately avoid dropping objects in this section, to get some pg_dump + * coverage. + */ +-- Regular index with included columns +CREATE TABLE tbl_include_reg (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_reg SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE INDEX tbl_include_reg_idx ON tbl_include_reg (c1, c2) INCLUDE (c3, c4); +-- duplicate column is pretty pointless, but we allow it anyway +CREATE INDEX ON tbl_include_reg (c1, c2) INCLUDE (c1, c3); +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_reg'::regclass ORDER BY c.relname; + pg_get_indexdef +--------------------------------------------------------------------------------------------------------------- + CREATE INDEX tbl_include_reg_c1_c2_c11_c3_idx ON public.tbl_include_reg USING btree (c1, c2) INCLUDE (c1, c3) + CREATE INDEX tbl_include_reg_idx ON public.tbl_include_reg USING btree (c1, c2) INCLUDE (c3, c4) +(2 rows) + +\d tbl_include_reg_idx + Index "public.tbl_include_reg_idx" + Column | Type | Key? | Definition +--------+---------+------+------------ + c1 | integer | yes | c1 + c2 | integer | yes | c2 + c3 | integer | no | c3 + c4 | box | no | c4 +btree, for table "public.tbl_include_reg" + +-- Unique index and unique constraint +CREATE TABLE tbl_include_unique1 (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_unique1 SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_include_unique1_idx_unique ON tbl_include_unique1 using btree (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl_include_unique1 add UNIQUE USING INDEX tbl_include_unique1_idx_unique; +ALTER TABLE tbl_include_unique1 add UNIQUE (c1, c2) INCLUDE (c3, c4); +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_unique1'::regclass ORDER BY c.relname; + pg_get_indexdef +----------------------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_include_unique1_c1_c2_c3_c4_key ON public.tbl_include_unique1 USING btree (c1, c2) INCLUDE (c3, c4) + CREATE UNIQUE INDEX tbl_include_unique1_idx_unique ON public.tbl_include_unique1 USING btree (c1, c2) INCLUDE (c3, c4) +(2 rows) + +-- Unique index and unique constraint. Both must fail. +CREATE TABLE tbl_include_unique2 (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_unique2 SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_include_unique2_idx_unique ON tbl_include_unique2 using btree (c1, c2) INCLUDE (c3, c4); +ERROR: could not create unique index "tbl_include_unique2_idx_unique" +DETAIL: Key (c1, c2)=(1, 2) is duplicated. +ALTER TABLE tbl_include_unique2 add UNIQUE (c1, c2) INCLUDE (c3, c4); +ERROR: could not create unique index "tbl_include_unique2_c1_c2_c3_c4_key" +DETAIL: Key (c1, c2)=(1, 2) is duplicated. +-- PK constraint +CREATE TABLE tbl_include_pk (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_pk SELECT 1, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl_include_pk add PRIMARY KEY (c1, c2) INCLUDE (c3, c4); +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_pk'::regclass ORDER BY c.relname; + pg_get_indexdef +-------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_include_pk_pkey ON public.tbl_include_pk USING btree (c1, c2) INCLUDE (c3, c4) +(1 row) + +CREATE TABLE tbl_include_box (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_box SELECT 1, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_include_box_idx_unique ON tbl_include_box using btree (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl_include_box add PRIMARY KEY USING INDEX tbl_include_box_idx_unique; +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_box'::regclass ORDER BY c.relname; + pg_get_indexdef +---------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_include_box_idx_unique ON public.tbl_include_box USING btree (c1, c2) INCLUDE (c3, c4) +(1 row) + +-- PK constraint. Must fail. +CREATE TABLE tbl_include_box_pk (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_box_pk SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl_include_box_pk add PRIMARY KEY (c1, c2) INCLUDE (c3, c4); +ERROR: could not create unique index "tbl_include_box_pk_pkey" +DETAIL: Key (c1, c2)=(1, 2) is duplicated. +/* + * 2. Test CREATE TABLE with constraint + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + CONSTRAINT covering UNIQUE(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +------------+----------+-------------+-------------+--------------+---------+----------- + covering | 4 | 2 | t | f | 1 2 3 4 | 1978 1978 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey +----------------------------------+----------+-------- + UNIQUE (c1, c2) INCLUDE (c3, c4) | covering | {1,2} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: duplicate key value violates unique constraint "covering" +DETAIL: Key (c1, c2)=(1, 2) already exists. +DROP TABLE tbl; +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + CONSTRAINT covering PRIMARY KEY(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +------------+----------+-------------+-------------+--------------+---------+----------- + covering | 4 | 2 | t | t | 1 2 3 4 | 1978 1978 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey +---------------------------------------+----------+-------- + PRIMARY KEY (c1, c2) INCLUDE (c3, c4) | covering | {1,2} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: duplicate key value violates unique constraint "covering" +DETAIL: Key (c1, c2)=(1, 2) already exists. +INSERT INTO tbl SELECT 1, NULL, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: null value in column "c2" violates not-null constraint +DETAIL: Failing row contains (1, null, 3, (4,4),(4,4)). +INSERT INTO tbl SELECT x, 2*x, NULL, NULL FROM generate_series(1,10) AS x; +DROP TABLE tbl; +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + UNIQUE(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +---------------------+----------+-------------+-------------+--------------+---------+----------- + tbl_c1_c2_c3_c4_key | 4 | 2 | t | f | 1 2 3 4 | 1978 1978 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey +----------------------------------+---------------------+-------- + UNIQUE (c1, c2) INCLUDE (c3, c4) | tbl_c1_c2_c3_c4_key | {1,2} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: duplicate key value violates unique constraint "tbl_c1_c2_c3_c4_key" +DETAIL: Key (c1, c2)=(1, 2) already exists. +DROP TABLE tbl; +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + PRIMARY KEY(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +------------+----------+-------------+-------------+--------------+---------+----------- + tbl_pkey | 4 | 2 | t | t | 1 2 3 4 | 1978 1978 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey +---------------------------------------+----------+-------- + PRIMARY KEY (c1, c2) INCLUDE (c3, c4) | tbl_pkey | {1,2} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: duplicate key value violates unique constraint "tbl_pkey" +DETAIL: Key (c1, c2)=(1, 2) already exists. +INSERT INTO tbl SELECT 1, NULL, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: null value in column "c2" violates not-null constraint +DETAIL: Failing row contains (1, null, 3, (4,4),(4,4)). +INSERT INTO tbl SELECT x, 2*x, NULL, NULL FROM generate_series(1,10) AS x; +DROP TABLE tbl; +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + EXCLUDE USING btree (c1 WITH =) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +-------------------+----------+-------------+-------------+--------------+--------+---------- + tbl_c1_c3_c4_excl | 3 | 1 | f | f | 1 3 4 | 1978 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey +--------------------------------------------------+-------------------+-------- + EXCLUDE USING btree (c1 WITH =) INCLUDE (c3, c4) | tbl_c1_c3_c4_excl | {1} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: conflicting key value violates exclusion constraint "tbl_c1_c3_c4_excl" +DETAIL: Key (c1)=(1) conflicts with existing key (c1)=(1). +INSERT INTO tbl SELECT x, 2*x, NULL, NULL FROM generate_series(1,10) AS x; +DROP TABLE tbl; +/* + * 3.0 Test ALTER TABLE DROP COLUMN. + * Any column deletion leads to index deletion. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 int); +CREATE UNIQUE INDEX tbl_idx ON tbl using btree(c1, c2, c3, c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +------------------------------------------------------------------------ + CREATE UNIQUE INDEX tbl_idx ON public.tbl USING btree (c1, c2, c3, c4) +(1 row) + +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +DROP TABLE tbl; +/* + * 3.1 Test ALTER TABLE DROP COLUMN. + * Included column deletion leads to the index deletion, + * AS well AS key columns deletion. It's explained in documentation. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box); +CREATE UNIQUE INDEX tbl_idx ON tbl using btree(c1, c2) INCLUDE(c3,c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +--------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_idx ON public.tbl USING btree (c1, c2) INCLUDE (c3, c4) +(1 row) + +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +DROP TABLE tbl; +/* + * 3.2 Test ALTER TABLE DROP COLUMN. + * Included column deletion leads to the index deletion. + * AS well AS key columns deletion. It's explained in documentation. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +--------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_c1_c2_c3_c4_key ON public.tbl USING btree (c1, c2) INCLUDE (c3, c4) +(1 row) + +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +ALTER TABLE tbl DROP COLUMN c1; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +DROP TABLE tbl; +/* + * 3.3 Test ALTER TABLE SET STATISTICS + */ +CREATE TABLE tbl (c1 int, c2 int); +CREATE INDEX tbl_idx ON tbl (c1, (c1+0)) INCLUDE (c2); +ALTER INDEX tbl_idx ALTER COLUMN 1 SET STATISTICS 1000; +ERROR: cannot alter statistics on non-expression column "c1" of index "tbl_idx" +HINT: Alter statistics on table column instead. +ALTER INDEX tbl_idx ALTER COLUMN 2 SET STATISTICS 1000; +ALTER INDEX tbl_idx ALTER COLUMN 3 SET STATISTICS 1000; +ERROR: cannot alter statistics on included column "c2" of index "tbl_idx" +ALTER INDEX tbl_idx ALTER COLUMN 4 SET STATISTICS 1000; +ERROR: column number 4 of relation "tbl_idx" does not exist +DROP TABLE tbl; +/* + * 4. CREATE INDEX CONCURRENTLY + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,1000) AS x; +CREATE UNIQUE INDEX CONCURRENTLY on tbl (c1, c2) INCLUDE (c3, c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +--------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_c1_c2_c3_c4_idx ON public.tbl USING btree (c1, c2) INCLUDE (c3, c4) + CREATE UNIQUE INDEX tbl_c1_c2_c3_c4_key ON public.tbl USING btree (c1, c2) INCLUDE (c3, c4) +(2 rows) + +DROP TABLE tbl; +/* + * 5. REINDEX + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +--------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_c1_c2_c3_c4_key ON public.tbl USING btree (c1, c2) INCLUDE (c3, c4) +(1 row) + +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +REINDEX INDEX tbl_c1_c2_c3_c4_key; +ERROR: relation "tbl_c1_c2_c3_c4_key" does not exist +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +ALTER TABLE tbl DROP COLUMN c1; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +DROP TABLE tbl; +/* + * 7. Check various AMs. All but btree must fail. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 box, c4 box); +CREATE INDEX on tbl USING brin(c1, c2) INCLUDE (c3, c4); +ERROR: access method "brin" does not support included columns +CREATE INDEX on tbl USING gist(c3) INCLUDE (c4); +ERROR: access method "gist" does not support included columns +CREATE INDEX on tbl USING spgist(c3) INCLUDE (c4); +ERROR: access method "spgist" does not support included columns +CREATE INDEX on tbl USING gin(c1, c2) INCLUDE (c3, c4); +ERROR: access method "gin" does not support included columns +CREATE INDEX on tbl USING hash(c1, c2) INCLUDE (c3, c4); +ERROR: access method "hash" does not support included columns +CREATE INDEX on tbl USING rtree(c1, c2) INCLUDE (c3, c4); +NOTICE: substituting access method "gist" for obsolete method "rtree" +ERROR: access method "gist" does not support included columns +CREATE INDEX on tbl USING btree(c1, c2) INCLUDE (c3, c4); +DROP TABLE tbl; +/* + * 8. Update, delete values in indexed table. + */ +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_idx_unique ON tbl using btree(c1, c2) INCLUDE (c3,c4); +UPDATE tbl SET c1 = 100 WHERE c1 = 2; +UPDATE tbl SET c1 = 1 WHERE c1 = 3; +-- should fail +UPDATE tbl SET c2 = 2 WHERE c1 = 1; +ERROR: duplicate key value violates unique constraint "tbl_idx_unique" +DETAIL: Key (c1, c2)=(1, 2) already exists. +UPDATE tbl SET c3 = 1; +DELETE FROM tbl WHERE c1 = 5 OR c3 = 12; +DROP TABLE tbl; +/* + * 9. Alter column type. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl ALTER c1 TYPE bigint; +ALTER TABLE tbl ALTER c3 TYPE bigint; +\d tbl + Table "public.tbl" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | bigint | | | + c2 | integer | | | + c3 | bigint | | | + c4 | box | | | +Indexes: + "tbl_c1_c2_c3_c4_key" UNIQUE CONSTRAINT, btree (c1, c2) INCLUDE (c3, c4) + +DROP TABLE tbl; diff --git a/src/test/regress/expected/indexing.out b/src/test/regress/expected/indexing.out new file mode 100644 index 0000000000..ca27346f18 --- /dev/null +++ b/src/test/regress/expected/indexing.out @@ -0,0 +1,1406 @@ +-- Creating an index on a partitioned table makes the partitions +-- automatically get the index +create table idxpart (a int, b int, c text) partition by range (a); +-- relhassubclass of a partitioned index is false before creating any partition. +-- It will be set after the first partition is created. +create index idxpart_idx on idxpart (a); +select relhassubclass from pg_class where relname = 'idxpart_idx'; + relhassubclass +---------------- + f +(1 row) + +drop index idxpart_idx; +create table idxpart1 partition of idxpart for values from (0) to (10); +create table idxpart2 partition of idxpart for values from (10) to (100) + partition by range (b); +create table idxpart21 partition of idxpart2 for values from (0) to (100); +-- Even with partitions, relhassubclass should not be set if a partitioned +-- index is created only on the parent. +create index idxpart_idx on only idxpart(a); +select relhassubclass from pg_class where relname = 'idxpart_idx'; + relhassubclass +---------------- + f +(1 row) + +drop index idxpart_idx; +create index on idxpart (a); +select relname, relkind, relhassubclass, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; + relname | relkind | relhassubclass | inhparent +-----------------+---------+----------------+---------------- + idxpart | p | t | + idxpart1 | r | f | + idxpart1_a_idx | i | f | idxpart_a_idx + idxpart2 | p | t | + idxpart21 | r | f | + idxpart21_a_idx | i | f | idxpart2_a_idx + idxpart2_a_idx | I | t | idxpart_a_idx + idxpart_a_idx | I | t | +(8 rows) + +drop table idxpart; +-- Some unsupported features +create table idxpart (a int, b int, c text) partition by range (a); +create table idxpart1 partition of idxpart for values from (0) to (10); +create index concurrently on idxpart (a); +ERROR: cannot create index on partitioned table "idxpart" concurrently +drop table idxpart; +-- Verify bugfix with query on indexed partitioned table with no partitions +-- https://postgr.es/m/20180124162006.pmapfiznhgngwtjf@alvherre.pgsql +CREATE TABLE idxpart (col1 INT) PARTITION BY RANGE (col1); +CREATE INDEX ON idxpart (col1); +CREATE TABLE idxpart_two (col2 INT); +SELECT col2 FROM idxpart_two fk LEFT OUTER JOIN idxpart pk ON (col1 = col2); + col2 +------ +(0 rows) + +DROP table idxpart, idxpart_two; +-- Verify bugfix with index rewrite on ALTER TABLE / SET DATA TYPE +-- https://postgr.es/m/CAKcux6mxNCGsgATwf5CGMF8g4WSupCXicCVMeKUTuWbyxHOMsQ@mail.gmail.com +CREATE TABLE idxpart (a INT, b TEXT, c INT) PARTITION BY RANGE(a); +CREATE TABLE idxpart1 PARTITION OF idxpart FOR VALUES FROM (MINVALUE) TO (MAXVALUE); +CREATE INDEX partidx_abc_idx ON idxpart (a, b, c); +INSERT INTO idxpart (a, b, c) SELECT i, i, i FROM generate_series(1, 50) i; +ALTER TABLE idxpart ALTER COLUMN c TYPE numeric; +DROP TABLE idxpart; +-- If a table without index is attached as partition to a table with +-- an index, the index is automatically created +create table idxpart (a int, b int, c text) partition by range (a); +create index idxparti on idxpart (a); +create index idxparti2 on idxpart (b, c); +create table idxpart1 (like idxpart); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | text | | | + +alter table idxpart attach partition idxpart1 for values from (0) to (10); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | text | | | +Partition of: idxpart FOR VALUES FROM (0) TO (10) +Indexes: + "idxpart1_a_idx" btree (a) + "idxpart1_b_c_idx" btree (b, c) + +\d+ idxpart1_a_idx + Index "public.idxpart1_a_idx" + Column | Type | Key? | Definition | Storage | Stats target +--------+---------+------+------------+---------+-------------- + a | integer | yes | a | plain | +Partition of: idxparti +No partition constraint +btree, for table "public.idxpart1" + +\d+ idxpart1_b_c_idx + Index "public.idxpart1_b_c_idx" + Column | Type | Key? | Definition | Storage | Stats target +--------+---------+------+------------+----------+-------------- + b | integer | yes | b | plain | + c | text | yes | c | extended | +Partition of: idxparti2 +No partition constraint +btree, for table "public.idxpart1" + +drop table idxpart; +-- If a partition already has an index, don't create a duplicative one +create table idxpart (a int, b int) partition by range (a, b); +create table idxpart1 partition of idxpart for values from (0, 0) to (10, 10); +create index on idxpart1 (a, b); +create index on idxpart (a, b); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: idxpart FOR VALUES FROM (0, 0) TO (10, 10) +Indexes: + "idxpart1_a_b_idx" btree (a, b) + +select relname, relkind, relhassubclass, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; + relname | relkind | relhassubclass | inhparent +------------------+---------+----------------+----------------- + idxpart | p | t | + idxpart1 | r | f | + idxpart1_a_b_idx | i | f | idxpart_a_b_idx + idxpart_a_b_idx | I | t | +(4 rows) + +drop table idxpart; +-- DROP behavior for partitioned indexes +create table idxpart (a int) partition by range (a); +create index on idxpart (a); +create table idxpart1 partition of idxpart for values from (0) to (10); +drop index idxpart1_a_idx; -- no way +ERROR: cannot drop index idxpart1_a_idx because index idxpart_a_idx requires it +HINT: You can drop index idxpart_a_idx instead. +drop index idxpart_a_idx; -- both indexes go away +select relname, relkind from pg_class + where relname like 'idxpart%' order by relname; + relname | relkind +----------+--------- + idxpart | p + idxpart1 | r +(2 rows) + +create index on idxpart (a); +drop table idxpart1; -- the index on partition goes away too +select relname, relkind from pg_class + where relname like 'idxpart%' order by relname; + relname | relkind +---------------+--------- + idxpart | p + idxpart_a_idx | I +(2 rows) + +drop table idxpart; +-- ALTER INDEX .. ATTACH, error cases +create table idxpart (a int, b int) partition by range (a, b); +create table idxpart1 partition of idxpart for values from (0, 0) to (10, 10); +create index idxpart_a_b_idx on only idxpart (a, b); +create index idxpart1_a_b_idx on idxpart1 (a, b); +create index idxpart1_tst1 on idxpart1 (b, a); +create index idxpart1_tst2 on idxpart1 using hash (a); +create index idxpart1_tst3 on idxpart1 (a, b) where a > 10; +alter index idxpart attach partition idxpart1; +ERROR: "idxpart" is not an index +alter index idxpart_a_b_idx attach partition idxpart1; +ERROR: "idxpart1" is not an index +alter index idxpart_a_b_idx attach partition idxpart_a_b_idx; +ERROR: cannot attach index "idxpart_a_b_idx" as a partition of index "idxpart_a_b_idx" +DETAIL: Index "idxpart_a_b_idx" is not an index on any partition of table "idxpart". +alter index idxpart_a_b_idx attach partition idxpart1_b_idx; +ERROR: relation "idxpart1_b_idx" does not exist +alter index idxpart_a_b_idx attach partition idxpart1_tst1; +ERROR: cannot attach index "idxpart1_tst1" as a partition of index "idxpart_a_b_idx" +DETAIL: The index definitions do not match. +alter index idxpart_a_b_idx attach partition idxpart1_tst2; +ERROR: cannot attach index "idxpart1_tst2" as a partition of index "idxpart_a_b_idx" +DETAIL: The index definitions do not match. +alter index idxpart_a_b_idx attach partition idxpart1_tst3; +ERROR: cannot attach index "idxpart1_tst3" as a partition of index "idxpart_a_b_idx" +DETAIL: The index definitions do not match. +-- OK +alter index idxpart_a_b_idx attach partition idxpart1_a_b_idx; +alter index idxpart_a_b_idx attach partition idxpart1_a_b_idx; -- quiet +-- reject dupe +create index idxpart1_2_a_b on idxpart1 (a, b); +alter index idxpart_a_b_idx attach partition idxpart1_2_a_b; +ERROR: cannot attach index "idxpart1_2_a_b" as a partition of index "idxpart_a_b_idx" +DETAIL: Another index is already attached for partition "idxpart1". +drop table idxpart; +-- make sure everything's gone +select indexrelid::regclass, indrelid::regclass + from pg_index where indexrelid::regclass::text like 'idxpart%'; + indexrelid | indrelid +------------+---------- +(0 rows) + +-- Don't auto-attach incompatible indexes +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 (a int, b int); +create index on idxpart1 using hash (a); +create index on idxpart1 (a) where b > 1; +create index on idxpart1 ((a + 0)); +create index on idxpart1 (a, a); +create index on idxpart (a); +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: idxpart FOR VALUES FROM (0) TO (1000) +Indexes: + "idxpart1_a_a1_idx" btree (a, a) + "idxpart1_a_idx" hash (a) + "idxpart1_a_idx1" btree (a) WHERE b > 1 + "idxpart1_a_idx2" btree (a) + "idxpart1_expr_idx" btree ((a + 0)) + +drop table idxpart; +-- If CREATE INDEX ONLY, don't create indexes on partitions; and existing +-- indexes on partitions don't change parent. ALTER INDEX ATTACH can change +-- the parent after the fact. +create table idxpart (a int) partition by range (a); +create table idxpart1 partition of idxpart for values from (0) to (100); +create table idxpart2 partition of idxpart for values from (100) to (1000) + partition by range (a); +create table idxpart21 partition of idxpart2 for values from (100) to (200); +create table idxpart22 partition of idxpart2 for values from (200) to (300); +create index on idxpart22 (a); +create index on only idxpart2 (a); +create index on idxpart (a); +-- Here we expect that idxpart1 and idxpart2 have a new index, but idxpart21 +-- does not; also, idxpart22 is not attached. +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: idxpart FOR VALUES FROM (0) TO (100) +Indexes: + "idxpart1_a_idx" btree (a) + +\d idxpart2 + Table "public.idxpart2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: idxpart FOR VALUES FROM (100) TO (1000) +Partition key: RANGE (a) +Indexes: + "idxpart2_a_idx" btree (a) INVALID +Number of partitions: 2 (Use \d+ to list them.) + +\d idxpart21 + Table "public.idxpart21" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: idxpart2 FOR VALUES FROM (100) TO (200) + +select indexrelid::regclass, indrelid::regclass, inhparent::regclass + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) +where indexrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indexrelid | indrelid | inhparent +-----------------+-----------+--------------- + idxpart1_a_idx | idxpart1 | idxpart_a_idx + idxpart22_a_idx | idxpart22 | + idxpart2_a_idx | idxpart2 | idxpart_a_idx + idxpart_a_idx | idxpart | +(4 rows) + +alter index idxpart2_a_idx attach partition idxpart22_a_idx; +select indexrelid::regclass, indrelid::regclass, inhparent::regclass + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) +where indexrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indexrelid | indrelid | inhparent +-----------------+-----------+---------------- + idxpart1_a_idx | idxpart1 | idxpart_a_idx + idxpart22_a_idx | idxpart22 | idxpart2_a_idx + idxpart2_a_idx | idxpart2 | idxpart_a_idx + idxpart_a_idx | idxpart | +(4 rows) + +-- attaching idxpart22 is not enough to set idxpart22_a_idx valid ... +alter index idxpart2_a_idx attach partition idxpart22_a_idx; +\d idxpart2 + Table "public.idxpart2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: idxpart FOR VALUES FROM (100) TO (1000) +Partition key: RANGE (a) +Indexes: + "idxpart2_a_idx" btree (a) INVALID +Number of partitions: 2 (Use \d+ to list them.) + +-- ... but this one is. +create index on idxpart21 (a); +alter index idxpart2_a_idx attach partition idxpart21_a_idx; +\d idxpart2 + Table "public.idxpart2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: idxpart FOR VALUES FROM (100) TO (1000) +Partition key: RANGE (a) +Indexes: + "idxpart2_a_idx" btree (a) +Number of partitions: 2 (Use \d+ to list them.) + +drop table idxpart; +-- When a table is attached a partition and it already has an index, a +-- duplicate index should not get created, but rather the index becomes +-- attached to the parent's index. +create table idxpart (a int, b int, c text) partition by range (a); +create index idxparti on idxpart (a); +create index idxparti2 on idxpart (b, c); +create table idxpart1 (like idxpart including indexes); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | text | | | +Indexes: + "idxpart1_a_idx" btree (a) + "idxpart1_b_c_idx" btree (b, c) + +select relname, relkind, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; + relname | relkind | inhparent +------------------+---------+----------- + idxpart | p | + idxpart1 | r | + idxpart1_a_idx | i | + idxpart1_b_c_idx | i | + idxparti | I | + idxparti2 | I | +(6 rows) + +alter table idxpart attach partition idxpart1 for values from (0) to (10); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | text | | | +Partition of: idxpart FOR VALUES FROM (0) TO (10) +Indexes: + "idxpart1_a_idx" btree (a) + "idxpart1_b_c_idx" btree (b, c) + +select relname, relkind, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; + relname | relkind | inhparent +------------------+---------+----------- + idxpart | p | + idxpart1 | r | + idxpart1_a_idx | i | idxparti + idxpart1_b_c_idx | i | idxparti2 + idxparti | I | + idxparti2 | I | +(6 rows) + +drop table idxpart; +-- Verify that attaching an invalid index does not mark the parent index valid. +-- On the other hand, attaching a valid index marks not only its direct +-- ancestor valid, but also any indirect ancestor that was only missing the one +-- that was just made valid +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 partition of idxpart for values from (1) to (1000) partition by range (a); +create table idxpart11 partition of idxpart1 for values from (1) to (100); +create index on only idxpart1 (a); +create index on only idxpart (a); +-- this results in two invalid indexes: +select relname, indisvalid from pg_class join pg_index on indexrelid = oid + where relname like 'idxpart%' order by relname; + relname | indisvalid +----------------+------------ + idxpart1_a_idx | f + idxpart_a_idx | f +(2 rows) + +-- idxpart1_a_idx is not valid, so idxpart_a_idx should not become valid: +alter index idxpart_a_idx attach partition idxpart1_a_idx; +select relname, indisvalid from pg_class join pg_index on indexrelid = oid + where relname like 'idxpart%' order by relname; + relname | indisvalid +----------------+------------ + idxpart1_a_idx | f + idxpart_a_idx | f +(2 rows) + +-- after creating and attaching this, both idxpart1_a_idx and idxpart_a_idx +-- should become valid +create index on idxpart11 (a); +alter index idxpart1_a_idx attach partition idxpart11_a_idx; +select relname, indisvalid from pg_class join pg_index on indexrelid = oid + where relname like 'idxpart%' order by relname; + relname | indisvalid +-----------------+------------ + idxpart11_a_idx | t + idxpart1_a_idx | t + idxpart_a_idx | t +(3 rows) + +drop table idxpart; +-- verify dependency handling during ALTER TABLE DETACH PARTITION +create table idxpart (a int) partition by range (a); +create table idxpart1 (like idxpart); +create index on idxpart1 (a); +create index on idxpart (a); +create table idxpart2 (like idxpart); +alter table idxpart attach partition idxpart1 for values from (0000) to (1000); +alter table idxpart attach partition idxpart2 for values from (1000) to (2000); +create table idxpart3 partition of idxpart for values from (2000) to (3000); +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + relname | relkind +----------------+--------- + idxpart | p + idxpart1 | r + idxpart1_a_idx | i + idxpart2 | r + idxpart2_a_idx | i + idxpart3 | r + idxpart3_a_idx | i + idxpart_a_idx | I +(8 rows) + +-- a) after detaching partitions, the indexes can be dropped independently +alter table idxpart detach partition idxpart1; +alter table idxpart detach partition idxpart2; +alter table idxpart detach partition idxpart3; +drop index idxpart1_a_idx; +drop index idxpart2_a_idx; +drop index idxpart3_a_idx; +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + relname | relkind +---------------+--------- + idxpart | p + idxpart1 | r + idxpart2 | r + idxpart3 | r + idxpart_a_idx | I +(5 rows) + +drop table idxpart, idxpart1, idxpart2, idxpart3; +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + relname | relkind +---------+--------- +(0 rows) + +create table idxpart (a int) partition by range (a); +create table idxpart1 (like idxpart); +create index on idxpart1 (a); +create index on idxpart (a); +create table idxpart2 (like idxpart); +alter table idxpart attach partition idxpart1 for values from (0000) to (1000); +alter table idxpart attach partition idxpart2 for values from (1000) to (2000); +create table idxpart3 partition of idxpart for values from (2000) to (3000); +-- b) after detaching, dropping the index on parent does not remove the others +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + relname | relkind +----------------+--------- + idxpart | p + idxpart1 | r + idxpart1_a_idx | i + idxpart2 | r + idxpart2_a_idx | i + idxpart3 | r + idxpart3_a_idx | i + idxpart_a_idx | I +(8 rows) + +alter table idxpart detach partition idxpart1; +alter table idxpart detach partition idxpart2; +alter table idxpart detach partition idxpart3; +drop index idxpart_a_idx; +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + relname | relkind +----------------+--------- + idxpart | p + idxpart1 | r + idxpart1_a_idx | i + idxpart2 | r + idxpart2_a_idx | i + idxpart3 | r + idxpart3_a_idx | i +(7 rows) + +drop table idxpart, idxpart1, idxpart2, idxpart3; +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + relname | relkind +---------+--------- +(0 rows) + +-- Verify that expression indexes inherit correctly +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 (like idxpart); +create index on idxpart1 ((a + b)); +create index on idxpart ((a + b)); +create table idxpart2 (like idxpart); +alter table idxpart attach partition idxpart1 for values from (0000) to (1000); +alter table idxpart attach partition idxpart2 for values from (1000) to (2000); +create table idxpart3 partition of idxpart for values from (2000) to (3000); +select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef + from pg_class join pg_inherits on inhrelid = oid, + lateral pg_get_indexdef(pg_class.oid) + where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; + child | parent | childdef +-------------------+------------------+--------------------------------------------------------------------------- + idxpart1_expr_idx | idxpart_expr_idx | CREATE INDEX idxpart1_expr_idx ON public.idxpart1 USING btree (((a + b))) + idxpart2_expr_idx | idxpart_expr_idx | CREATE INDEX idxpart2_expr_idx ON public.idxpart2 USING btree (((a + b))) + idxpart3_expr_idx | idxpart_expr_idx | CREATE INDEX idxpart3_expr_idx ON public.idxpart3 USING btree (((a + b))) +(3 rows) + +drop table idxpart; +-- Verify behavior for collation (mis)matches +create table idxpart (a text) partition by range (a); +create table idxpart1 (like idxpart); +create table idxpart2 (like idxpart); +create index on idxpart2 (a collate "POSIX"); +create index on idxpart2 (a); +create index on idxpart2 (a collate "C"); +alter table idxpart attach partition idxpart1 for values from ('aaa') to ('bbb'); +alter table idxpart attach partition idxpart2 for values from ('bbb') to ('ccc'); +create table idxpart3 partition of idxpart for values from ('ccc') to ('ddd'); +create index on idxpart (a collate "C"); +create table idxpart4 partition of idxpart for values from ('ddd') to ('eee'); +select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef + from pg_class left join pg_inherits on inhrelid = oid, + lateral pg_get_indexdef(pg_class.oid) + where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; + child | parent | childdef +-----------------+---------------+-------------------------------------------------------------------------------- + idxpart1_a_idx | idxpart_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a COLLATE "C") + idxpart2_a_idx | | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a COLLATE "POSIX") + idxpart2_a_idx1 | | CREATE INDEX idxpart2_a_idx1 ON public.idxpart2 USING btree (a) + idxpart2_a_idx2 | idxpart_a_idx | CREATE INDEX idxpart2_a_idx2 ON public.idxpart2 USING btree (a COLLATE "C") + idxpart3_a_idx | idxpart_a_idx | CREATE INDEX idxpart3_a_idx ON public.idxpart3 USING btree (a COLLATE "C") + idxpart4_a_idx | idxpart_a_idx | CREATE INDEX idxpart4_a_idx ON public.idxpart4 USING btree (a COLLATE "C") + idxpart_a_idx | | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a COLLATE "C") +(7 rows) + +drop table idxpart; +-- Verify behavior for opclass (mis)matches +create table idxpart (a text) partition by range (a); +create table idxpart1 (like idxpart); +create table idxpart2 (like idxpart); +create index on idxpart2 (a); +alter table idxpart attach partition idxpart1 for values from ('aaa') to ('bbb'); +alter table idxpart attach partition idxpart2 for values from ('bbb') to ('ccc'); +create table idxpart3 partition of idxpart for values from ('ccc') to ('ddd'); +create index on idxpart (a text_pattern_ops); +create table idxpart4 partition of idxpart for values from ('ddd') to ('eee'); +-- must *not* have attached the index we created on idxpart2 +select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef + from pg_class left join pg_inherits on inhrelid = oid, + lateral pg_get_indexdef(pg_class.oid) + where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; + child | parent | childdef +-----------------+---------------+------------------------------------------------------------------------------------ + idxpart1_a_idx | idxpart_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a text_pattern_ops) + idxpart2_a_idx | | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) + idxpart2_a_idx1 | idxpart_a_idx | CREATE INDEX idxpart2_a_idx1 ON public.idxpart2 USING btree (a text_pattern_ops) + idxpart3_a_idx | idxpart_a_idx | CREATE INDEX idxpart3_a_idx ON public.idxpart3 USING btree (a text_pattern_ops) + idxpart4_a_idx | idxpart_a_idx | CREATE INDEX idxpart4_a_idx ON public.idxpart4 USING btree (a text_pattern_ops) + idxpart_a_idx | | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a text_pattern_ops) +(6 rows) + +drop index idxpart_a_idx; +create index on only idxpart (a text_pattern_ops); +-- must reject +alter index idxpart_a_idx attach partition idxpart2_a_idx; +ERROR: cannot attach index "idxpart2_a_idx" as a partition of index "idxpart_a_idx" +DETAIL: The index definitions do not match. +drop table idxpart; +-- Verify that attaching indexes maps attribute numbers correctly +create table idxpart (col1 int, a int, col2 int, b int) partition by range (a); +create table idxpart1 (b int, col1 int, col2 int, col3 int, a int); +alter table idxpart drop column col1, drop column col2; +alter table idxpart1 drop column col1, drop column col2, drop column col3; +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +create index idxpart_1_idx on only idxpart (b, a); +create index idxpart1_1_idx on idxpart1 (b, a); +create index idxpart1_1b_idx on idxpart1 (b); +-- test expressions and partial-index predicate, too +create index idxpart_2_idx on only idxpart ((b + a)) where a > 1; +create index idxpart1_2_idx on idxpart1 ((b + a)) where a > 1; +create index idxpart1_2b_idx on idxpart1 ((a + b)) where a > 1; +create index idxpart1_2c_idx on idxpart1 ((b + a)) where b > 1; +alter index idxpart_1_idx attach partition idxpart1_1b_idx; -- fail +ERROR: cannot attach index "idxpart1_1b_idx" as a partition of index "idxpart_1_idx" +DETAIL: The index definitions do not match. +alter index idxpart_1_idx attach partition idxpart1_1_idx; +alter index idxpart_2_idx attach partition idxpart1_2b_idx; -- fail +ERROR: cannot attach index "idxpart1_2b_idx" as a partition of index "idxpart_2_idx" +DETAIL: The index definitions do not match. +alter index idxpart_2_idx attach partition idxpart1_2c_idx; -- fail +ERROR: cannot attach index "idxpart1_2c_idx" as a partition of index "idxpart_2_idx" +DETAIL: The index definitions do not match. +alter index idxpart_2_idx attach partition idxpart1_2_idx; -- ok +select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef + from pg_class left join pg_inherits on inhrelid = oid, + lateral pg_get_indexdef(pg_class.oid) + where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; + child | parent | childdef +-----------------+---------------+----------------------------------------------------------------------------------------- + idxpart1_1_idx | idxpart_1_idx | CREATE INDEX idxpart1_1_idx ON public.idxpart1 USING btree (b, a) + idxpart1_1b_idx | | CREATE INDEX idxpart1_1b_idx ON public.idxpart1 USING btree (b) + idxpart1_2_idx | idxpart_2_idx | CREATE INDEX idxpart1_2_idx ON public.idxpart1 USING btree (((b + a))) WHERE (a > 1) + idxpart1_2b_idx | | CREATE INDEX idxpart1_2b_idx ON public.idxpart1 USING btree (((a + b))) WHERE (a > 1) + idxpart1_2c_idx | | CREATE INDEX idxpart1_2c_idx ON public.idxpart1 USING btree (((b + a))) WHERE (b > 1) + idxpart_1_idx | | CREATE INDEX idxpart_1_idx ON ONLY public.idxpart USING btree (b, a) + idxpart_2_idx | | CREATE INDEX idxpart_2_idx ON ONLY public.idxpart USING btree (((b + a))) WHERE (a > 1) +(7 rows) + +drop table idxpart; +-- Make sure the partition columns are mapped correctly +create table idxpart (a int, b int, c text) partition by range (a); +create index idxparti on idxpart (a); +create index idxparti2 on idxpart (c, b); +create table idxpart1 (c text, a int, b int); +alter table idxpart attach partition idxpart1 for values from (0) to (10); +create table idxpart2 (c text, a int, b int); +create index on idxpart2 (a); +create index on idxpart2 (c, b); +alter table idxpart attach partition idxpart2 for values from (10) to (20); +select c.relname, pg_get_indexdef(indexrelid) + from pg_class c join pg_index i on c.oid = i.indexrelid + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + relname | pg_get_indexdef +------------------+--------------------------------------------------------------------- + idxpart1_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a) + idxpart1_c_b_idx | CREATE INDEX idxpart1_c_b_idx ON public.idxpart1 USING btree (c, b) + idxpart2_a_idx | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) + idxpart2_c_b_idx | CREATE INDEX idxpart2_c_b_idx ON public.idxpart2 USING btree (c, b) + idxparti | CREATE INDEX idxparti ON ONLY public.idxpart USING btree (a) + idxparti2 | CREATE INDEX idxparti2 ON ONLY public.idxpart USING btree (c, b) +(6 rows) + +drop table idxpart; +-- Verify that columns are mapped correctly in expression indexes +create table idxpart (col1 int, col2 int, a int, b int) partition by range (a); +create table idxpart1 (col2 int, b int, col1 int, a int); +create table idxpart2 (col1 int, col2 int, b int, a int); +alter table idxpart drop column col1, drop column col2; +alter table idxpart1 drop column col1, drop column col2; +alter table idxpart2 drop column col1, drop column col2; +create index on idxpart2 (abs(b)); +alter table idxpart attach partition idxpart2 for values from (0) to (1); +create index on idxpart (abs(b)); +create index on idxpart ((b + 1)); +alter table idxpart attach partition idxpart1 for values from (1) to (2); +select c.relname, pg_get_indexdef(indexrelid) + from pg_class c join pg_index i on c.oid = i.indexrelid + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + relname | pg_get_indexdef +-------------------+------------------------------------------------------------------------------ + idxpart1_abs_idx | CREATE INDEX idxpart1_abs_idx ON public.idxpart1 USING btree (abs(b)) + idxpart1_expr_idx | CREATE INDEX idxpart1_expr_idx ON public.idxpart1 USING btree (((b + 1))) + idxpart2_abs_idx | CREATE INDEX idxpart2_abs_idx ON public.idxpart2 USING btree (abs(b)) + idxpart2_expr_idx | CREATE INDEX idxpart2_expr_idx ON public.idxpart2 USING btree (((b + 1))) + idxpart_abs_idx | CREATE INDEX idxpart_abs_idx ON ONLY public.idxpart USING btree (abs(b)) + idxpart_expr_idx | CREATE INDEX idxpart_expr_idx ON ONLY public.idxpart USING btree (((b + 1))) +(6 rows) + +drop table idxpart; +-- Verify that columns are mapped correctly for WHERE in a partial index +create table idxpart (col1 int, a int, col3 int, b int) partition by range (a); +alter table idxpart drop column col1, drop column col3; +create table idxpart1 (col1 int, col2 int, col3 int, col4 int, b int, a int); +alter table idxpart1 drop column col1, drop column col2, drop column col3, drop column col4; +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +create table idxpart2 (col1 int, col2 int, b int, a int); +create index on idxpart2 (a) where b > 1000; +alter table idxpart2 drop column col1, drop column col2; +alter table idxpart attach partition idxpart2 for values from (1000) to (2000); +create index on idxpart (a) where b > 1000; +select c.relname, pg_get_indexdef(indexrelid) + from pg_class c join pg_index i on c.oid = i.indexrelid + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + relname | pg_get_indexdef +----------------+------------------------------------------------------------------------------------ + idxpart1_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a) WHERE (b > 1000) + idxpart2_a_idx | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) WHERE (b > 1000) + idxpart_a_idx | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a) WHERE (b > 1000) +(3 rows) + +drop table idxpart; +-- Column number mapping: dropped columns in the partition +create table idxpart1 (drop_1 int, drop_2 int, col_keep int, drop_3 int); +alter table idxpart1 drop column drop_1; +alter table idxpart1 drop column drop_2; +alter table idxpart1 drop column drop_3; +create index on idxpart1 (col_keep); +create table idxpart (col_keep int) partition by range (col_keep); +create index on idxpart (col_keep); +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +\d idxpart + Table "public.idxpart" + Column | Type | Collation | Nullable | Default +----------+---------+-----------+----------+--------- + col_keep | integer | | | +Partition key: RANGE (col_keep) +Indexes: + "idxpart_col_keep_idx" btree (col_keep) +Number of partitions: 1 (Use \d+ to list them.) + +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +----------+---------+-----------+----------+--------- + col_keep | integer | | | +Partition of: idxpart FOR VALUES FROM (0) TO (1000) +Indexes: + "idxpart1_col_keep_idx" btree (col_keep) + +select attrelid::regclass, attname, attnum from pg_attribute + where attrelid::regclass::text like 'idxpart%' and attnum > 0 + order by attrelid::regclass, attnum; + attrelid | attname | attnum +-----------------------+------------------------------+-------- + idxpart1 | ........pg.dropped.1........ | 1 + idxpart1 | ........pg.dropped.2........ | 2 + idxpart1 | col_keep | 3 + idxpart1 | ........pg.dropped.4........ | 4 + idxpart1_col_keep_idx | col_keep | 1 + idxpart | col_keep | 1 + idxpart_col_keep_idx | col_keep | 1 +(7 rows) + +drop table idxpart; +-- Column number mapping: dropped columns in the parent table +create table idxpart(drop_1 int, drop_2 int, col_keep int, drop_3 int) partition by range (col_keep); +alter table idxpart drop column drop_1; +alter table idxpart drop column drop_2; +alter table idxpart drop column drop_3; +create table idxpart1 (col_keep int); +create index on idxpart1 (col_keep); +create index on idxpart (col_keep); +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +\d idxpart + Table "public.idxpart" + Column | Type | Collation | Nullable | Default +----------+---------+-----------+----------+--------- + col_keep | integer | | | +Partition key: RANGE (col_keep) +Indexes: + "idxpart_col_keep_idx" btree (col_keep) +Number of partitions: 1 (Use \d+ to list them.) + +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +----------+---------+-----------+----------+--------- + col_keep | integer | | | +Partition of: idxpart FOR VALUES FROM (0) TO (1000) +Indexes: + "idxpart1_col_keep_idx" btree (col_keep) + +select attrelid::regclass, attname, attnum from pg_attribute + where attrelid::regclass::text like 'idxpart%' and attnum > 0 + order by attrelid::regclass, attnum; + attrelid | attname | attnum +-----------------------+------------------------------+-------- + idxpart | ........pg.dropped.1........ | 1 + idxpart | ........pg.dropped.2........ | 2 + idxpart | col_keep | 3 + idxpart | ........pg.dropped.4........ | 4 + idxpart1 | col_keep | 1 + idxpart1_col_keep_idx | col_keep | 1 + idxpart_col_keep_idx | col_keep | 1 +(7 rows) + +drop table idxpart; +-- +-- Constraint-related indexes +-- +-- Verify that it works to add primary key / unique to partitioned tables +create table idxpart (a int primary key, b int) partition by range (a); +\d idxpart + Table "public.idxpart" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | +Partition key: RANGE (a) +Indexes: + "idxpart_pkey" PRIMARY KEY, btree (a) +Number of partitions: 0 + +-- multiple primary key on child should fail +create table failpart partition of idxpart (b primary key) for values from (0) to (100); +ERROR: multiple primary keys for table "failpart" are not allowed +drop table idxpart; +-- primary key on child is okay if there's no PK in the parent, though +create table idxpart (a int) partition by range (a); +create table idxpart1pk partition of idxpart (a primary key) for values from (0) to (100); +\d idxpart1pk + Table "public.idxpart1pk" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | +Partition of: idxpart FOR VALUES FROM (0) TO (100) +Indexes: + "idxpart1pk_pkey" PRIMARY KEY, btree (a) + +drop table idxpart; +-- Failing to use the full partition key is not allowed +create table idxpart (a int unique, b int) partition by range (a, b); +ERROR: insufficient columns in UNIQUE constraint definition +DETAIL: UNIQUE constraint on table "idxpart" lacks column "b" which is part of the partition key. +create table idxpart (a int, b int unique) partition by range (a, b); +ERROR: insufficient columns in UNIQUE constraint definition +DETAIL: UNIQUE constraint on table "idxpart" lacks column "a" which is part of the partition key. +create table idxpart (a int primary key, b int) partition by range (b, a); +ERROR: insufficient columns in PRIMARY KEY constraint definition +DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "b" which is part of the partition key. +create table idxpart (a int, b int primary key) partition by range (b, a); +ERROR: insufficient columns in PRIMARY KEY constraint definition +DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "a" which is part of the partition key. +-- OK if you use them in some other order +create table idxpart (a int, b int, c text, primary key (a, b, c)) partition by range (b, c, a); +drop table idxpart; +-- not other types of index-based constraints +create table idxpart (a int, exclude (a with = )) partition by range (a); +ERROR: exclusion constraints are not supported on partitioned tables +LINE 1: create table idxpart (a int, exclude (a with = )) partition ... + ^ +-- no expressions in partition key for PK/UNIQUE +create table idxpart (a int primary key, b int) partition by range ((b + a)); +ERROR: unsupported PRIMARY KEY constraint with partition key definition +DETAIL: PRIMARY KEY constraints cannot be used when partition keys include expressions. +create table idxpart (a int unique, b int) partition by range ((b + a)); +ERROR: unsupported UNIQUE constraint with partition key definition +DETAIL: UNIQUE constraints cannot be used when partition keys include expressions. +-- use ALTER TABLE to add a primary key +create table idxpart (a int, b int, c text) partition by range (a, b); +alter table idxpart add primary key (a); -- not an incomplete one though +ERROR: insufficient columns in PRIMARY KEY constraint definition +DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "b" which is part of the partition key. +alter table idxpart add primary key (a, b); -- this works +\d idxpart + Table "public.idxpart" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | not null | + c | text | | | +Partition key: RANGE (a, b) +Indexes: + "idxpart_pkey" PRIMARY KEY, btree (a, b) +Number of partitions: 0 + +create table idxpart1 partition of idxpart for values from (0, 0) to (1000, 1000); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | not null | + c | text | | | +Partition of: idxpart FOR VALUES FROM (0, 0) TO (1000, 1000) +Indexes: + "idxpart1_pkey" PRIMARY KEY, btree (a, b) + +drop table idxpart; +-- use ALTER TABLE to add a unique constraint +create table idxpart (a int, b int) partition by range (a, b); +alter table idxpart add unique (a); -- not an incomplete one though +ERROR: insufficient columns in UNIQUE constraint definition +DETAIL: UNIQUE constraint on table "idxpart" lacks column "b" which is part of the partition key. +alter table idxpart add unique (b, a); -- this works +\d idxpart + Table "public.idxpart" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition key: RANGE (a, b) +Indexes: + "idxpart_b_a_key" UNIQUE CONSTRAINT, btree (b, a) +Number of partitions: 0 + +drop table idxpart; +-- Exclusion constraints cannot be added +create table idxpart (a int, b int) partition by range (a); +alter table idxpart add exclude (a with =); +ERROR: exclusion constraints are not supported on partitioned tables +LINE 1: alter table idxpart add exclude (a with =); + ^ +drop table idxpart; +-- When (sub)partitions are created, they also contain the constraint +create table idxpart (a int, b int, primary key (a, b)) partition by range (a, b); +create table idxpart1 partition of idxpart for values from (1, 1) to (10, 10); +create table idxpart2 partition of idxpart for values from (10, 10) to (20, 20) + partition by range (b); +create table idxpart21 partition of idxpart2 for values from (10) to (15); +create table idxpart22 partition of idxpart2 for values from (15) to (20); +create table idxpart3 (b int not null, a int not null); +alter table idxpart attach partition idxpart3 for values from (20, 20) to (30, 30); +select conname, contype, conrelid::regclass, conindid::regclass, conkey + from pg_constraint where conrelid::regclass::text like 'idxpart%' + order by conname; + conname | contype | conrelid | conindid | conkey +----------------+---------+-----------+----------------+-------- + idxpart1_pkey | p | idxpart1 | idxpart1_pkey | {1,2} + idxpart21_pkey | p | idxpart21 | idxpart21_pkey | {1,2} + idxpart22_pkey | p | idxpart22 | idxpart22_pkey | {1,2} + idxpart2_pkey | p | idxpart2 | idxpart2_pkey | {1,2} + idxpart3_pkey | p | idxpart3 | idxpart3_pkey | {2,1} + idxpart_pkey | p | idxpart | idxpart_pkey | {1,2} +(6 rows) + +drop table idxpart; +-- Verify that multi-layer partitioning honors the requirement that all +-- columns in the partition key must appear in primary/unique key +create table idxpart (a int, b int, primary key (a)) partition by range (a); +create table idxpart2 partition of idxpart +for values from (0) to (1000) partition by range (b); -- fail +ERROR: insufficient columns in PRIMARY KEY constraint definition +DETAIL: PRIMARY KEY constraint on table "idxpart2" lacks column "b" which is part of the partition key. +drop table idxpart; +-- Ditto for the ATTACH PARTITION case +create table idxpart (a int unique, b int) partition by range (a); +create table idxpart1 (a int not null, b int, unique (a, b)) + partition by range (a, b); +alter table idxpart attach partition idxpart1 for values from (1) to (1000); +ERROR: insufficient columns in UNIQUE constraint definition +DETAIL: UNIQUE constraint on table "idxpart1" lacks column "b" which is part of the partition key. +DROP TABLE idxpart, idxpart1; +-- Multi-layer partitioning works correctly in this case: +create table idxpart (a int, b int, primary key (a, b)) partition by range (a); +create table idxpart2 partition of idxpart for values from (0) to (1000) partition by range (b); +create table idxpart21 partition of idxpart2 for values from (0) to (1000); +select conname, contype, conrelid::regclass, conindid::regclass, conkey + from pg_constraint where conrelid::regclass::text like 'idxpart%' + order by conname; + conname | contype | conrelid | conindid | conkey +----------------+---------+-----------+----------------+-------- + idxpart21_pkey | p | idxpart21 | idxpart21_pkey | {1,2} + idxpart2_pkey | p | idxpart2 | idxpart2_pkey | {1,2} + idxpart_pkey | p | idxpart | idxpart_pkey | {1,2} +(3 rows) + +drop table idxpart; +-- If a partitioned table has a unique/PK constraint, then it's not possible +-- to drop the corresponding constraint in the children; nor it's possible +-- to drop the indexes individually. Dropping the constraint in the parent +-- gets rid of the lot. +create table idxpart (i int) partition by hash (i); +create table idxpart0 partition of idxpart (i) for values with (modulus 2, remainder 0); +create table idxpart1 partition of idxpart (i) for values with (modulus 2, remainder 1); +alter table idxpart0 add primary key(i); +alter table idxpart add primary key(i); +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated +----------+---------------+--------------+------------+---------------+------------+-------------+--------------+-------------- + idxpart0 | idxpart0_pkey | idxpart_pkey | t | idxpart0_pkey | f | 1 | t | t + idxpart1 | idxpart1_pkey | idxpart_pkey | t | idxpart1_pkey | f | 1 | f | t + idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t +(3 rows) + +drop index idxpart0_pkey; -- fail +ERROR: cannot drop index idxpart0_pkey because index idxpart_pkey requires it +HINT: You can drop index idxpart_pkey instead. +drop index idxpart1_pkey; -- fail +ERROR: cannot drop index idxpart1_pkey because index idxpart_pkey requires it +HINT: You can drop index idxpart_pkey instead. +alter table idxpart0 drop constraint idxpart0_pkey; -- fail +ERROR: cannot drop inherited constraint "idxpart0_pkey" of relation "idxpart0" +alter table idxpart1 drop constraint idxpart1_pkey; -- fail +ERROR: cannot drop inherited constraint "idxpart1_pkey" of relation "idxpart1" +alter table idxpart drop constraint idxpart_pkey; -- ok +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated +----------+------------+-----------+------------+---------+------------+-------------+--------------+-------------- +(0 rows) + +drop table idxpart; +-- If the partition to be attached already has a primary key, fail if +-- it doesn't match the parent's PK. +CREATE TABLE idxpart (c1 INT PRIMARY KEY, c2 INT, c3 VARCHAR(10)) PARTITION BY RANGE(c1); +CREATE TABLE idxpart1 (LIKE idxpart); +ALTER TABLE idxpart1 ADD PRIMARY KEY (c1, c2); +ALTER TABLE idxpart ATTACH PARTITION idxpart1 FOR VALUES FROM (100) TO (200); +ERROR: multiple primary keys for table "idxpart1" are not allowed +DROP TABLE idxpart, idxpart1; +-- Ditto if there is some distance between the PKs (subpartitioning) +create table idxpart (a int, b int, primary key (a)) partition by range (a); +create table idxpart1 (a int not null, b int) partition by range (a); +create table idxpart11 (a int not null, b int primary key); +alter table idxpart1 attach partition idxpart11 for values from (0) to (1000); +alter table idxpart attach partition idxpart1 for values from (0) to (10000); +ERROR: multiple primary keys for table "idxpart11" are not allowed +drop table idxpart, idxpart1, idxpart11; +-- If a partitioned table has a constraint whose index is not valid, +-- attaching a missing partition makes it valid. +create table idxpart (a int) partition by range (a); +create table idxpart0 (like idxpart); +alter table idxpart0 add primary key (a); +alter table idxpart attach partition idxpart0 for values from (0) to (1000); +alter table only idxpart add primary key (a); +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated +----------+---------------+-----------+------------+---------------+------------+-------------+--------------+-------------- + idxpart0 | idxpart0_pkey | | t | idxpart0_pkey | t | 0 | t | t + idxpart | idxpart_pkey | | f | idxpart_pkey | t | 0 | t | t +(2 rows) + +alter index idxpart_pkey attach partition idxpart0_pkey; +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated +----------+---------------+--------------+------------+---------------+------------+-------------+--------------+-------------- + idxpart0 | idxpart0_pkey | idxpart_pkey | t | idxpart0_pkey | f | 1 | t | t + idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t +(2 rows) + +drop table idxpart; +-- if a partition has a unique index without a constraint, does not attach +-- automatically; creates a new index instead. +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 (a int not null, b int); +create unique index on idxpart1 (a); +alter table idxpart add primary key (a); +alter table idxpart attach partition idxpart1 for values from (1) to (1000); +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated +----------+----------------+--------------+------------+---------------+------------+-------------+--------------+-------------- + idxpart1 | idxpart1_a_idx | | t | | | | | + idxpart1 | idxpart1_pkey | idxpart_pkey | t | idxpart1_pkey | f | 1 | f | t + idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t +(3 rows) + +drop table idxpart; +-- Can't attach an index without a corresponding constraint +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 (a int not null, b int); +create unique index on idxpart1 (a); +alter table idxpart attach partition idxpart1 for values from (1) to (1000); +alter table only idxpart add primary key (a); +alter index idxpart_pkey attach partition idxpart1_a_idx; -- fail +ERROR: cannot attach index "idxpart1_a_idx" as a partition of index "idxpart_pkey" +DETAIL: The index "idxpart_pkey" belongs to a constraint in table "idxpart" but no constraint exists for index "idxpart1_a_idx". +drop table idxpart; +-- Test that unique constraints are working +create table idxpart (a int, b text, primary key (a, b)) partition by range (a); +create table idxpart1 partition of idxpart for values from (0) to (100000); +create table idxpart2 (c int, like idxpart); +insert into idxpart2 (c, a, b) values (42, 572814, 'inserted first'); +alter table idxpart2 drop column c; +create unique index on idxpart (a); +alter table idxpart attach partition idxpart2 for values from (100000) to (1000000); +insert into idxpart values (0, 'zero'), (42, 'life'), (2^16, 'sixteen'); +insert into idxpart select 2^g, format('two to power of %s', g) from generate_series(15, 17) g; +ERROR: duplicate key value violates unique constraint "idxpart1_a_idx" +DETAIL: Key (a)=(65536) already exists. +insert into idxpart values (16, 'sixteen'); +insert into idxpart (b, a) values ('one', 142857), ('two', 285714); +insert into idxpart select a * 2, b || b from idxpart where a between 2^16 and 2^19; +ERROR: duplicate key value violates unique constraint "idxpart2_a_idx" +DETAIL: Key (a)=(285714) already exists. +insert into idxpart values (572814, 'five'); +ERROR: duplicate key value violates unique constraint "idxpart2_a_idx" +DETAIL: Key (a)=(572814) already exists. +insert into idxpart values (857142, 'six'); +select tableoid::regclass, * from idxpart order by a; + tableoid | a | b +----------+--------+---------------- + idxpart1 | 0 | zero + idxpart1 | 16 | sixteen + idxpart1 | 42 | life + idxpart1 | 65536 | sixteen + idxpart2 | 142857 | one + idxpart2 | 285714 | two + idxpart2 | 572814 | inserted first + idxpart2 | 857142 | six +(8 rows) + +drop table idxpart; +-- test fastpath mechanism for index insertion +create table fastpath (a int, b text, c numeric); +create unique index fpindex1 on fastpath(a); +insert into fastpath values (1, 'b1', 100.00); +insert into fastpath values (1, 'b1', 100.00); -- unique key check +ERROR: duplicate key value violates unique constraint "fpindex1" +DETAIL: Key (a)=(1) already exists. +truncate fastpath; +insert into fastpath select generate_series(1,10000), 'b', 100; +-- vacuum the table so as to improve chances of index-only scans. we can't +-- guarantee if index-only scans will be picked up in all cases or not, but +-- that fuzziness actually helps the test. +vacuum fastpath; +set enable_seqscan to false; +set enable_bitmapscan to false; +select sum(a) from fastpath where a = 6456; + sum +------ + 6456 +(1 row) + +select sum(a) from fastpath where a >= 5000 and a < 5700; + sum +--------- + 3744650 +(1 row) + +-- drop the only index on the table and compute hashes for +-- a few queries which orders the results in various different ways. +drop index fpindex1; +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2ca216010a558a52d7df12f76dfc77ab +(1 row) + +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 6167a852b3e0679886b84a5405b5b53d +(1 row) + +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + dfcf2bd5e5fea8397d47b2fd14618d31 +(1 row) + +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2ca216010a558a52d7df12f76dfc77ab +(1 row) + +-- now create a multi-column index with both column asc +create index fpindex2 on fastpath(a, b); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +-- again, vacuum here either forces index-only scans or creates fuzziness +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2ca216010a558a52d7df12f76dfc77ab +(1 row) + +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 6167a852b3e0679886b84a5405b5b53d +(1 row) + +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + dfcf2bd5e5fea8397d47b2fd14618d31 +(1 row) + +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2ca216010a558a52d7df12f76dfc77ab +(1 row) + +-- same queries with a different kind of index now. the final result must not +-- change irrespective of what kind of index we have. +drop index fpindex2; +create index fpindex3 on fastpath(a desc, b asc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2ca216010a558a52d7df12f76dfc77ab +(1 row) + +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 6167a852b3e0679886b84a5405b5b53d +(1 row) + +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + dfcf2bd5e5fea8397d47b2fd14618d31 +(1 row) + +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2ca216010a558a52d7df12f76dfc77ab +(1 row) + +-- repeat again +drop index fpindex3; +create index fpindex4 on fastpath(a asc, b desc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2ca216010a558a52d7df12f76dfc77ab +(1 row) + +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 6167a852b3e0679886b84a5405b5b53d +(1 row) + +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + dfcf2bd5e5fea8397d47b2fd14618d31 +(1 row) + +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2ca216010a558a52d7df12f76dfc77ab +(1 row) + +-- and again, this time indexing by (b, a). Note that column "b" has non-unique +-- values. +drop index fpindex4; +create index fpindex5 on fastpath(b asc, a desc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2ca216010a558a52d7df12f76dfc77ab +(1 row) + +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 6167a852b3e0679886b84a5405b5b53d +(1 row) + +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + dfcf2bd5e5fea8397d47b2fd14618d31 +(1 row) + +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2ca216010a558a52d7df12f76dfc77ab +(1 row) + +-- one last time +drop index fpindex5; +create index fpindex6 on fastpath(b desc, a desc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2ca216010a558a52d7df12f76dfc77ab +(1 row) + +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 6167a852b3e0679886b84a5405b5b53d +(1 row) + +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + dfcf2bd5e5fea8397d47b2fd14618d31 +(1 row) + +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + md5 +---------------------------------- + 2ca216010a558a52d7df12f76dfc77ab +(1 row) + +drop table fastpath; +-- intentionally leave some objects around +create table idxpart (a int) partition by range (a); +create table idxpart1 partition of idxpart for values from (0) to (100); +create table idxpart2 partition of idxpart for values from (100) to (1000) + partition by range (a); +create table idxpart21 partition of idxpart2 for values from (100) to (200); +create table idxpart22 partition of idxpart2 for values from (200) to (300); +create index on idxpart22 (a); +create index on only idxpart2 (a); +alter index idxpart2_a_idx attach partition idxpart22_a_idx; +create index on idxpart (a); +create table idxpart_another (a int, b int, primary key (a, b)) partition by range (a); +create table idxpart_another_1 partition of idxpart_another for values from (0) to (100); +-- Test that covering partitioned indexes work in various cases +create table covidxpart (a int, b int) partition by list (a); +create unique index on covidxpart (a) include (b); +create table covidxpart1 partition of covidxpart for values in (1); +create table covidxpart2 partition of covidxpart for values in (2); +insert into covidxpart values (1, 1); +insert into covidxpart values (1, 1); +ERROR: duplicate key value violates unique constraint "covidxpart1_a_b_idx" +DETAIL: Key (a)=(1) already exists. +create table covidxpart3 (b int, c int, a int); +alter table covidxpart3 drop c; +alter table covidxpart attach partition covidxpart3 for values in (3); +insert into covidxpart values (3, 1); +insert into covidxpart values (3, 1); +ERROR: duplicate key value violates unique constraint "covidxpart3_a_b_idx" +DETAIL: Key (a)=(3) already exists. +create table covidxpart4 (b int, a int); +create unique index on covidxpart4 (a) include (b); +create unique index on covidxpart4 (a); +alter table covidxpart attach partition covidxpart4 for values in (4); +insert into covidxpart values (4, 1); +insert into covidxpart values (4, 1); +ERROR: duplicate key value violates unique constraint "covidxpart4_a_b_idx" +DETAIL: Key (a)=(4) already exists. diff --git a/src/test/regress/expected/indirect_toast.out b/src/test/regress/expected/indirect_toast.out index 3e255fbded..b05173c43b 100644 --- a/src/test/regress/expected/indirect_toast.out +++ b/src/test/regress/expected/indirect_toast.out @@ -1,10 +1,10 @@ -CREATE TABLE toasttest(descr text, cnt int DEFAULT 0, f1 text, f2 text); -INSERT INTO toasttest(descr, f1, f2) VALUES('two-compressed', repeat('1234567890',1000), repeat('1234567890',1000)); -INSERT INTO toasttest(descr, f1, f2) VALUES('two-toasted', repeat('1234567890',30000), repeat('1234567890',50000)); -INSERT INTO toasttest(descr, f1, f2) VALUES('one-compressed,one-null', NULL, repeat('1234567890',1000)); -INSERT INTO toasttest(descr, f1, f2) VALUES('one-toasted,one-null', NULL, repeat('1234567890',50000)); +CREATE TABLE indtoasttest(descr text, cnt int DEFAULT 0, f1 text, f2 text); +INSERT INTO indtoasttest(descr, f1, f2) VALUES('two-compressed', repeat('1234567890',1000), repeat('1234567890',1000)); +INSERT INTO indtoasttest(descr, f1, f2) VALUES('two-toasted', repeat('1234567890',30000), repeat('1234567890',50000)); +INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-compressed,one-null', NULL, repeat('1234567890',1000)); +INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-toasted,one-null', NULL, repeat('1234567890',50000)); -- check whether indirect tuples works on the most basic level -SELECT descr, substring(make_tuple_indirect(toasttest)::text, 1, 200) FROM toasttest; +SELECT descr, substring(make_tuple_indirect(indtoasttest)::text, 1, 200) FROM indtoasttest; descr | substring -------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- two-compressed | (two-compressed,0,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 @@ -14,7 +14,7 @@ SELECT descr, substring(make_tuple_indirect(toasttest)::text, 1, 200) FROM toast (4 rows) -- modification without changing varlenas -UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1 RETURNING substring(indtoasttest::text, 1, 200); substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- (two-compressed,1,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 @@ -24,7 +24,7 @@ UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200); (4 rows) -- modification without modifying assigned value -UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(indtoasttest::text, 1, 200); substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- (two-compressed,2,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 @@ -34,7 +34,7 @@ UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, (4 rows) -- modification modifying, but effectively not changing -UPDATE toasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(indtoasttest::text, 1, 200); substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- (two-compressed,3,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 @@ -43,7 +43,7 @@ UPDATE toasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(toasttest::te ("one-toasted,one-null",3,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 (4 rows) -UPDATE toasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(indtoasttest::text, 1, 200); substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 @@ -52,7 +52,7 @@ UPDATE toasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(toastte ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 (4 rows) -SELECT substring(toasttest::text, 1, 200) FROM toasttest; +SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 @@ -62,8 +62,8 @@ SELECT substring(toasttest::text, 1, 200) FROM toasttest; (4 rows) -- check we didn't screw with main/toast tuple visibility -VACUUM FREEZE toasttest; -SELECT substring(toasttest::text, 1, 200) FROM toasttest; +VACUUM FREEZE indtoasttest; +SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 @@ -80,13 +80,13 @@ BEGIN NEW := make_tuple_indirect(NEW); RETURN NEW; END$$; -CREATE TRIGGER toasttest_update_indirect +CREATE TRIGGER indtoasttest_update_indirect BEFORE INSERT OR UPDATE - ON toasttest + ON indtoasttest FOR EACH ROW EXECUTE PROCEDURE update_using_indirect(); -- modification without changing varlenas -UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1 RETURNING substring(indtoasttest::text, 1, 200); substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- (two-compressed,5,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 @@ -96,7 +96,7 @@ UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200); (4 rows) -- modification without modifying assigned value -UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(indtoasttest::text, 1, 200); substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- (two-compressed,6,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 @@ -106,7 +106,7 @@ UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, (4 rows) -- modification modifying, but effectively not changing -UPDATE toasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(indtoasttest::text, 1, 200); substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- (two-compressed,7,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 @@ -115,7 +115,7 @@ UPDATE toasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(toasttest::te ("one-toasted,one-null",7,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 (4 rows) -UPDATE toasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(indtoasttest::text, 1, 200); substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 @@ -124,8 +124,8 @@ UPDATE toasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(toastte ("one-toasted,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 (4 rows) -INSERT INTO toasttest(descr, f1, f2) VALUES('one-toasted,one-null, via indirect', repeat('1234567890',30000), NULL); -SELECT substring(toasttest::text, 1, 200) FROM toasttest; +INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-toasted,one-null, via indirect', repeat('1234567890',30000), NULL); +SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 @@ -136,8 +136,8 @@ SELECT substring(toasttest::text, 1, 200) FROM toasttest; (5 rows) -- check we didn't screw with main/toast tuple visibility -VACUUM FREEZE toasttest; -SELECT substring(toasttest::text, 1, 200) FROM toasttest; +VACUUM FREEZE indtoasttest; +SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 @@ -147,5 +147,5 @@ SELECT substring(toasttest::text, 1, 200) FROM toasttest; ("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 (5 rows) -DROP TABLE toasttest; +DROP TABLE indtoasttest; DROP FUNCTION update_using_indirect(); diff --git a/src/test/regress/expected/inherit.out b/src/test/regress/expected/inherit.out index 1fa9650ec9..1e00c849f3 100644 --- a/src/test/regress/expected/inherit.out +++ b/src/test/regress/expected/inherit.out @@ -625,6 +625,28 @@ select tableoid::regclass::text as relname, parted_tab.* from parted_tab order b (3 rows) drop table parted_tab; +-- Check UPDATE with multi-level partitioned inherited target +create table mlparted_tab (a int, b char, c text) partition by list (a); +create table mlparted_tab_part1 partition of mlparted_tab for values in (1); +create table mlparted_tab_part2 partition of mlparted_tab for values in (2) partition by list (b); +create table mlparted_tab_part3 partition of mlparted_tab for values in (3); +create table mlparted_tab_part2a partition of mlparted_tab_part2 for values in ('a'); +create table mlparted_tab_part2b partition of mlparted_tab_part2 for values in ('b'); +insert into mlparted_tab values (1, 'a'), (2, 'a'), (2, 'b'), (3, 'a'); +update mlparted_tab mlp set c = 'xxx' +from + (select a from some_tab union all select a+1 from some_tab) ss (a) +where (mlp.a = ss.a and mlp.b = 'b') or mlp.a = 3; +select tableoid::regclass::text as relname, mlparted_tab.* from mlparted_tab order by 1,2; + relname | a | b | c +---------------------+---+---+----- + mlparted_tab_part1 | 1 | a | + mlparted_tab_part2a | 2 | a | + mlparted_tab_part2b | 2 | b | xxx + mlparted_tab_part3 | 3 | a | xxx +(4 rows) + +drop table mlparted_tab; drop table some_tab cascade; NOTICE: drop cascades to table some_tab_child /* Test multiple inheritance of column defaults */ @@ -742,6 +764,8 @@ NOTICE: drop cascades to table c1 -- tables. See the pgsql-hackers thread beginning Dec. 4/04 create table base (i integer); create table derived () inherits (base); +create table more_derived (like derived, b int) inherits (derived); +NOTICE: merging column "i" with inherited definition insert into derived (i) values (0); select derived::base from derived; derived @@ -755,6 +779,22 @@ select NULL::derived::base; (1 row) +-- remove redundant conversions. +explain (verbose on, costs off) select row(i, b)::more_derived::derived::base from more_derived; + QUERY PLAN +------------------------------------------- + Seq Scan on public.more_derived + Output: (ROW(i, b)::more_derived)::base +(2 rows) + +explain (verbose on, costs off) select (1, 2)::more_derived::derived::base; + QUERY PLAN +----------------------- + Result + Output: '(1)'::base +(2 rows) + +drop table more_derived; drop table derived; drop table base; create table p1(ff1 int); @@ -775,7 +815,7 @@ drop table p1; CREATE TABLE ac (aa TEXT); alter table ac add constraint ac_check check (aa is not null); CREATE TABLE bc (bb TEXT) INHERITS (ac); -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; relname | conname | contype | conislocal | coninhcount | consrc ---------+----------+---------+------------+-------------+------------------ ac | ac_check | c | t | 0 | (aa IS NOT NULL) @@ -791,14 +831,14 @@ DETAIL: Failing row contains (null, null). alter table bc drop constraint ac_check; -- fail, disallowed ERROR: cannot drop inherited constraint "ac_check" of relation "bc" alter table ac drop constraint ac_check; -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; relname | conname | contype | conislocal | coninhcount | consrc ---------+---------+---------+------------+-------------+-------- (0 rows) -- try the unnamed-constraint case alter table ac add check (aa is not null); -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; relname | conname | contype | conislocal | coninhcount | consrc ---------+-------------+---------+------------+-------------+------------------ ac | ac_aa_check | c | t | 0 | (aa IS NOT NULL) @@ -814,14 +854,14 @@ DETAIL: Failing row contains (null, null). alter table bc drop constraint ac_aa_check; -- fail, disallowed ERROR: cannot drop inherited constraint "ac_aa_check" of relation "bc" alter table ac drop constraint ac_aa_check; -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; relname | conname | contype | conislocal | coninhcount | consrc ---------+---------+---------+------------+-------------+-------- (0 rows) alter table ac add constraint ac_check check (aa is not null); alter table bc no inherit ac; -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; relname | conname | contype | conislocal | coninhcount | consrc ---------+----------+---------+------------+-------------+------------------ ac | ac_check | c | t | 0 | (aa IS NOT NULL) @@ -829,14 +869,14 @@ select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg (2 rows) alter table bc drop constraint ac_check; -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; relname | conname | contype | conislocal | coninhcount | consrc ---------+----------+---------+------------+-------------+------------------ ac | ac_check | c | t | 0 | (aa IS NOT NULL) (1 row) alter table ac drop constraint ac_check; -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; relname | conname | contype | conislocal | coninhcount | consrc ---------+---------+---------+------------+-------------+-------- (0 rows) @@ -847,7 +887,7 @@ create table ac (a int constraint check_a check (a <> 0)); create table bc (a int constraint check_a check (a <> 0), b int constraint check_b check (b <> 0)) inherits (ac); NOTICE: merging column "a" with inherited definition NOTICE: merging constraint "check_a" with inherited definition -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; relname | conname | contype | conislocal | coninhcount | consrc ---------+---------+---------+------------+-------------+---------- ac | check_a | c | t | 0 | (a <> 0) @@ -860,7 +900,7 @@ drop table ac; create table ac (a int constraint check_a check (a <> 0)); create table bc (b int constraint check_b check (b <> 0)); create table cc (c int constraint check_c check (c <> 0)) inherits (ac, bc); -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc', 'cc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc', 'cc') order by 1,2; relname | conname | contype | conislocal | coninhcount | consrc ---------+---------+---------+------------+-------------+---------- ac | check_a | c | t | 0 | (a <> 0) @@ -871,7 +911,7 @@ select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg (5 rows) alter table cc no inherit bc; -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc', 'cc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc', 'cc') order by 1,2; relname | conname | contype | conislocal | coninhcount | consrc ---------+---------+---------+------------+-------------+---------- ac | check_a | c | t | 0 | (a <> 0) @@ -1187,6 +1227,31 @@ Inherits: test_foreign_constraints DROP TABLE test_foreign_constraints_inh; DROP TABLE test_foreign_constraints; DROP TABLE test_primary_constraints; +-- Test foreign key behavior +create table inh_fk_1 (a int primary key); +insert into inh_fk_1 values (1), (2), (3); +create table inh_fk_2 (x int primary key, y int references inh_fk_1 on delete cascade); +insert into inh_fk_2 values (11, 1), (22, 2), (33, 3); +create table inh_fk_2_child () inherits (inh_fk_2); +insert into inh_fk_2_child values (111, 1), (222, 2); +delete from inh_fk_1 where a = 1; +select * from inh_fk_1 order by 1; + a +--- + 2 + 3 +(2 rows) + +select * from inh_fk_2 order by 1, 2; + x | y +-----+--- + 22 | 2 + 33 | 3 + 111 | 1 + 222 | 2 +(4 rows) + +drop table inh_fk_1, inh_fk_2, inh_fk_2_child; -- Test that parent and child CHECK constraints can be created in either order create table p1(f1 int); create table p1_c1() inherits(p1); @@ -1382,6 +1447,7 @@ select min(1-id) from matest0; reset enable_indexscan; set enable_seqscan = off; -- plan with fewest seqscans should be merge +set enable_parallel_append = off; -- Don't let parallel-append interfere explain (verbose, costs off) select * from matest0 order by 1-id; QUERY PLAN ------------------------------------------------------------------ @@ -1448,6 +1514,7 @@ select min(1-id) from matest0; (1 row) reset enable_seqscan; +reset enable_parallel_append; drop table matest0 cascade; NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to table matest1 @@ -1637,6 +1704,61 @@ reset enable_seqscan; reset enable_indexscan; reset enable_bitmapscan; -- +-- Check handling of a constant-null CHECK constraint +-- +create table cnullparent (f1 int); +create table cnullchild (check (f1 = 1 or f1 = null)) inherits(cnullparent); +insert into cnullchild values(1); +insert into cnullchild values(2); +insert into cnullchild values(null); +select * from cnullparent; + f1 +---- + 1 + 2 + +(3 rows) + +select * from cnullparent where f1 = 2; + f1 +---- + 2 +(1 row) + +drop table cnullparent cascade; +NOTICE: drop cascades to table cnullchild +-- +-- Check use of temporary tables with inheritance trees +-- +create table inh_perm_parent (a1 int); +create temp table inh_temp_parent (a1 int); +create temp table inh_temp_child () inherits (inh_perm_parent); -- ok +create table inh_perm_child () inherits (inh_temp_parent); -- error +ERROR: cannot inherit from temporary relation "inh_temp_parent" +create temp table inh_temp_child_2 () inherits (inh_temp_parent); -- ok +insert into inh_perm_parent values (1); +insert into inh_temp_parent values (2); +insert into inh_temp_child values (3); +insert into inh_temp_child_2 values (4); +select tableoid::regclass, a1 from inh_perm_parent; + tableoid | a1 +-----------------+---- + inh_perm_parent | 1 + inh_temp_child | 3 +(2 rows) + +select tableoid::regclass, a1 from inh_temp_parent; + tableoid | a1 +------------------+---- + inh_temp_parent | 2 + inh_temp_child_2 | 4 +(2 rows) + +drop table inh_perm_parent cascade; +NOTICE: drop cascades to table inh_temp_child +drop table inh_temp_parent cascade; +NOTICE: drop cascades to table inh_temp_child_2 +-- -- Check that constraint exclusion works correctly with partitions using -- implicit constraints generated from the partition bound information. -- @@ -1691,11 +1813,7 @@ explain (costs off) select * from list_parted where a = 'ab' or a in (null, 'cd' Append -> Seq Scan on part_ab_cd Filter: (((a)::text = 'ab'::text) OR ((a)::text = ANY ('{NULL,cd}'::text[]))) - -> Seq Scan on part_ef_gh - Filter: (((a)::text = 'ab'::text) OR ((a)::text = ANY ('{NULL,cd}'::text[]))) - -> Seq Scan on part_null_xy - Filter: (((a)::text = 'ab'::text) OR ((a)::text = ANY ('{NULL,cd}'::text[]))) -(7 rows) +(3 rows) explain (costs off) select * from list_parted where a = 'ab'; QUERY PLAN @@ -1831,29 +1949,34 @@ drop table range_list_parted; -- check that constraint exclusion is able to cope with the partition -- constraint emitted for multi-column range partitioned tables create table mcrparted (a int, b int, c int) partition by range (a, abs(b), c); -create table mcrparted0 partition of mcrparted for values from (minvalue, 0, 0) to (1, 1, 1); +create table mcrparted_def partition of mcrparted default; +create table mcrparted0 partition of mcrparted for values from (minvalue, minvalue, minvalue) to (1, 1, 1); create table mcrparted1 partition of mcrparted for values from (1, 1, 1) to (10, 5, 10); create table mcrparted2 partition of mcrparted for values from (10, 5, 10) to (10, 10, 10); create table mcrparted3 partition of mcrparted for values from (11, 1, 1) to (20, 10, 10); create table mcrparted4 partition of mcrparted for values from (20, 10, 10) to (20, 20, 20); -create table mcrparted5 partition of mcrparted for values from (20, 20, 20) to (maxvalue, 0, 0); -explain (costs off) select * from mcrparted where a = 0; -- scans mcrparted0 - QUERY PLAN ------------------------------- +create table mcrparted5 partition of mcrparted for values from (20, 20, 20) to (maxvalue, maxvalue, maxvalue); +explain (costs off) select * from mcrparted where a = 0; -- scans mcrparted0, mcrparted_def + QUERY PLAN +--------------------------------- Append -> Seq Scan on mcrparted0 Filter: (a = 0) -(3 rows) + -> Seq Scan on mcrparted_def + Filter: (a = 0) +(5 rows) -explain (costs off) select * from mcrparted where a = 10 and abs(b) < 5; -- scans mcrparted1 +explain (costs off) select * from mcrparted where a = 10 and abs(b) < 5; -- scans mcrparted1, mcrparted_def QUERY PLAN --------------------------------------------- Append -> Seq Scan on mcrparted1 Filter: ((a = 10) AND (abs(b) < 5)) -(3 rows) + -> Seq Scan on mcrparted_def + Filter: ((a = 10) AND (abs(b) < 5)) +(5 rows) -explain (costs off) select * from mcrparted where a = 10 and abs(b) = 5; -- scans mcrparted1, mcrparted2 +explain (costs off) select * from mcrparted where a = 10 and abs(b) = 5; -- scans mcrparted1, mcrparted2, mcrparted_def QUERY PLAN --------------------------------------------- Append @@ -1861,11 +1984,13 @@ explain (costs off) select * from mcrparted where a = 10 and abs(b) = 5; -- scan Filter: ((a = 10) AND (abs(b) = 5)) -> Seq Scan on mcrparted2 Filter: ((a = 10) AND (abs(b) = 5)) -(5 rows) + -> Seq Scan on mcrparted_def + Filter: ((a = 10) AND (abs(b) = 5)) +(7 rows) explain (costs off) select * from mcrparted where abs(b) = 5; -- scans all partitions - QUERY PLAN ------------------------------- + QUERY PLAN +--------------------------------- Append -> Seq Scan on mcrparted0 Filter: (abs(b) = 5) @@ -1875,9 +2000,13 @@ explain (costs off) select * from mcrparted where abs(b) = 5; -- scans all parti Filter: (abs(b) = 5) -> Seq Scan on mcrparted3 Filter: (abs(b) = 5) + -> Seq Scan on mcrparted4 + Filter: (abs(b) = 5) -> Seq Scan on mcrparted5 Filter: (abs(b) = 5) -(11 rows) + -> Seq Scan on mcrparted_def + Filter: (abs(b) = 5) +(15 rows) explain (costs off) select * from mcrparted where a > -1; -- scans all partitions QUERY PLAN @@ -1895,7 +2024,9 @@ explain (costs off) select * from mcrparted where a > -1; -- scans all partition Filter: (a > '-1'::integer) -> Seq Scan on mcrparted5 Filter: (a > '-1'::integer) -(13 rows) + -> Seq Scan on mcrparted_def + Filter: (a > '-1'::integer) +(15 rows) explain (costs off) select * from mcrparted where a = 20 and abs(b) = 10 and c > 10; -- scans mcrparted4 QUERY PLAN @@ -1905,7 +2036,7 @@ explain (costs off) select * from mcrparted where a = 20 and abs(b) = 10 and c > Filter: ((c > 10) AND (a = 20) AND (abs(b) = 10)) (3 rows) -explain (costs off) select * from mcrparted where a = 20 and c > 20; -- scans mcrparted3, mcrparte4, mcrparte5 +explain (costs off) select * from mcrparted where a = 20 and c > 20; -- scans mcrparted3, mcrparte4, mcrparte5, mcrparted_def QUERY PLAN ----------------------------------------- Append @@ -1915,7 +2046,9 @@ explain (costs off) select * from mcrparted where a = 20 and c > 20; -- scans mc Filter: ((c > 20) AND (a = 20)) -> Seq Scan on mcrparted5 Filter: ((c > 20) AND (a = 20)) -(7 rows) + -> Seq Scan on mcrparted_def + Filter: ((c > 20) AND (a = 20)) +(9 rows) drop table mcrparted; -- check that partitioned table Appends cope with being referenced in diff --git a/src/test/regress/expected/insert.out b/src/test/regress/expected/insert.out index a2d9469592..5edf269367 100644 --- a/src/test/regress/expected/insert.out +++ b/src/test/regress/expected/insert.out @@ -165,6 +165,10 @@ create table range_parted ( a text, b int ) partition by range (a, (b+0)); +-- no partitions, so fail +insert into range_parted values ('a', 11); +ERROR: no partition of relation "range_parted" found for row +DETAIL: Partition key of the failing row contains (a, (b + 0)) = (a, 11). create table part1 partition of range_parted for values from ('a', 1) to ('a', 10); create table part2 partition of range_parted for values from ('a', 10) to ('a', 20); create table part3 partition of range_parted for values from ('b', 1) to ('b', 10); @@ -219,17 +223,63 @@ insert into part_null values (null, 0); create table part_ee_ff partition of list_parted for values in ('ee', 'ff') partition by range (b); create table part_ee_ff1 partition of part_ee_ff for values from (1) to (10); create table part_ee_ff2 partition of part_ee_ff for values from (10) to (20); +-- test default partition +create table part_default partition of list_parted default; +-- Negative test: a row, which would fit in other partition, does not fit +-- default partition, even when inserted directly +insert into part_default values ('aa', 2); +ERROR: new row for relation "part_default" violates partition constraint +DETAIL: Failing row contains (aa, 2). +insert into part_default values (null, 2); +ERROR: new row for relation "part_default" violates partition constraint +DETAIL: Failing row contains (null, 2). +-- ok +insert into part_default values ('Zz', 2); +-- test if default partition works as expected for multi-level partitioned +-- table as well as when default partition itself is further partitioned +drop table part_default; +create table part_xx_yy partition of list_parted for values in ('xx', 'yy') partition by list (a); +create table part_xx_yy_p1 partition of part_xx_yy for values in ('xx'); +create table part_xx_yy_defpart partition of part_xx_yy default; +create table part_default partition of list_parted default partition by range(b); +create table part_default_p1 partition of part_default for values from (20) to (30); +create table part_default_p2 partition of part_default for values from (30) to (40); -- fail insert into part_ee_ff1 values ('EE', 11); ERROR: new row for relation "part_ee_ff1" violates partition constraint DETAIL: Failing row contains (EE, 11). +insert into part_default_p2 values ('gg', 43); +ERROR: new row for relation "part_default_p2" violates partition constraint +DETAIL: Failing row contains (gg, 43). -- fail (even the parent's, ie, part_ee_ff's partition constraint applies) insert into part_ee_ff1 values ('cc', 1); ERROR: new row for relation "part_ee_ff1" violates partition constraint DETAIL: Failing row contains (cc, 1). +insert into part_default values ('gg', 43); +ERROR: no partition of relation "part_default" found for row +DETAIL: Partition key of the failing row contains (b) = (43). -- ok insert into part_ee_ff1 values ('ff', 1); insert into part_ee_ff2 values ('ff', 11); +insert into part_default_p1 values ('cd', 25); +insert into part_default_p2 values ('de', 35); +insert into list_parted values ('ab', 21); +insert into list_parted values ('xx', 1); +insert into list_parted values ('yy', 2); +select tableoid::regclass, * from list_parted; + tableoid | a | b +--------------------+----+---- + part_cc_dd | cC | 1 + part_ee_ff1 | ff | 1 + part_ee_ff2 | ff | 11 + part_xx_yy_p1 | xx | 1 + part_xx_yy_defpart | yy | 2 + part_null | | 0 + part_default_p1 | cd | 25 + part_default_p1 | ab | 21 + part_default_p2 | de | 35 +(9 rows) + -- Check tuple routing for partitioned tables -- fail insert into range_parted values ('a', 0); @@ -249,6 +299,18 @@ insert into range_parted values ('b', 10); insert into range_parted values ('a'); ERROR: no partition of relation "range_parted" found for row DETAIL: Partition key of the failing row contains (a, (b + 0)) = (a, null). +-- Check default partition +create table part_def partition of range_parted default; +-- fail +insert into part_def values ('b', 10); +ERROR: new row for relation "part_def" violates partition constraint +DETAIL: Failing row contains (b, 10). +-- ok +insert into part_def values ('c', 10); +insert into range_parted values (null, null); +insert into range_parted values ('a', null); +insert into range_parted values (null, 19); +insert into range_parted values ('b', 20); select tableoid::regclass, * from range_parted; tableoid | a | b ----------+---+---- @@ -258,7 +320,12 @@ select tableoid::regclass, * from range_parted; part3 | b | 1 part4 | b | 10 part4 | b | 10 -(6 rows) + part_def | c | 10 + part_def | | + part_def | a | + part_def | | 19 + part_def | b | 20 +(11 rows) -- ok insert into list_parted values (null, 1); @@ -274,17 +341,22 @@ DETAIL: Partition key of the failing row contains (b) = (0). insert into list_parted values ('EE', 1); insert into part_ee_ff values ('EE', 10); select tableoid::regclass, * from list_parted; - tableoid | a | b --------------+----+---- - part_aa_bb | aA | - part_cc_dd | cC | 1 - part_null | | 0 - part_null | | 1 - part_ee_ff1 | ff | 1 - part_ee_ff1 | EE | 1 - part_ee_ff2 | ff | 11 - part_ee_ff2 | EE | 10 -(8 rows) + tableoid | a | b +--------------------+----+---- + part_aa_bb | aA | + part_cc_dd | cC | 1 + part_ee_ff1 | ff | 1 + part_ee_ff1 | EE | 1 + part_ee_ff2 | ff | 11 + part_ee_ff2 | EE | 10 + part_xx_yy_p1 | xx | 1 + part_xx_yy_defpart | yy | 2 + part_null | | 0 + part_null | | 1 + part_default_p1 | cd | 25 + part_default_p1 | ab | 21 + part_default_p2 | de | 35 +(13 rows) -- some more tests to exercise tuple-routing with multi-level partitioning create table part_gg partition of list_parted for values in ('gg') partition by range (b); @@ -314,8 +386,110 @@ select tableoid::regclass::text, a, min(b) as min_b, max(b) as max_b from list_p part_null | | 1 | 1 (9 rows) +-- direct partition inserts should check hash partition bound constraint +-- Use hand-rolled hash functions and operator classes to get predictable +-- result on different matchines. The hash function for int4 simply returns +-- the sum of the values passed to it and the one for text returns the length +-- of the non-empty string value passed to it or 0. +create or replace function part_hashint4_noop(value int4, seed int8) +returns int8 as $$ +select value + seed; +$$ language sql immutable; +create operator class part_test_int4_ops +for type int4 +using hash as +operator 1 =, +function 2 part_hashint4_noop(int4, int8); +create or replace function part_hashtext_length(value text, seed int8) +RETURNS int8 AS $$ +select length(coalesce(value, ''))::int8 +$$ language sql immutable; +create operator class part_test_text_ops +for type text +using hash as +operator 1 =, +function 2 part_hashtext_length(text, int8); +create table hash_parted ( + a int +) partition by hash (a part_test_int4_ops); +create table hpart0 partition of hash_parted for values with (modulus 4, remainder 0); +create table hpart1 partition of hash_parted for values with (modulus 4, remainder 1); +create table hpart2 partition of hash_parted for values with (modulus 4, remainder 2); +create table hpart3 partition of hash_parted for values with (modulus 4, remainder 3); +insert into hash_parted values(generate_series(1,10)); +-- direct insert of values divisible by 4 - ok; +insert into hpart0 values(12),(16); +-- fail; +insert into hpart0 values(11); +ERROR: new row for relation "hpart0" violates partition constraint +DETAIL: Failing row contains (11). +-- 11 % 4 -> 3 remainder i.e. valid data for hpart3 partition +insert into hpart3 values(11); +-- view data +select tableoid::regclass as part, a, a%4 as "remainder = a % 4" +from hash_parted order by part; + part | a | remainder = a % 4 +--------+----+------------------- + hpart0 | 4 | 0 + hpart0 | 8 | 0 + hpart0 | 12 | 0 + hpart0 | 16 | 0 + hpart1 | 1 | 1 + hpart1 | 5 | 1 + hpart1 | 9 | 1 + hpart2 | 2 | 2 + hpart2 | 6 | 2 + hpart2 | 10 | 2 + hpart3 | 3 | 3 + hpart3 | 7 | 3 + hpart3 | 11 | 3 +(13 rows) + +-- test \d+ output on a table which has both partitioned and unpartitioned +-- partitions +\d+ list_parted + Table "public.list_parted" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | | | plain | | +Partition key: LIST (lower(a)) +Partitions: part_aa_bb FOR VALUES IN ('aa', 'bb'), + part_cc_dd FOR VALUES IN ('cc', 'dd'), + part_ee_ff FOR VALUES IN ('ee', 'ff'), PARTITIONED, + part_gg FOR VALUES IN ('gg'), PARTITIONED, + part_null FOR VALUES IN (NULL), + part_xx_yy FOR VALUES IN ('xx', 'yy'), PARTITIONED, + part_default DEFAULT, PARTITIONED + -- cleanup drop table range_parted, list_parted; +drop table hash_parted; +-- test that a default partition added as the first partition accepts any value +-- including null +create table list_parted (a int) partition by list (a); +create table part_default partition of list_parted default; +\d+ part_default + Table "public.part_default" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | +Partition of: list_parted DEFAULT +No partition constraint + +insert into part_default values (null); +insert into part_default values (1); +insert into part_default values (-1); +select tableoid::regclass, a from list_parted; + tableoid | a +--------------+---- + part_default | + part_default | 1 + part_default | -1 +(3 rows) + +-- cleanup +drop table list_parted; -- more tests for certain multi-level partitioning scenarios create table mlparted (a int, b int) partition by range (a, b); create table mlparted1 (b int not null, a int not null) partition by range ((b+0)); @@ -425,20 +599,50 @@ insert into mlparted5 (a, b, c) values (1, 40, 'a'); ERROR: new row for relation "mlparted5a" violates partition constraint DETAIL: Failing row contains (b, 1, 40). drop table mlparted5; +alter table mlparted drop constraint check_b; +-- Check multi-level default partition +create table mlparted_def partition of mlparted default partition by range(a); +create table mlparted_def1 partition of mlparted_def for values from (40) to (50); +create table mlparted_def2 partition of mlparted_def for values from (50) to (60); +insert into mlparted values (40, 100); +insert into mlparted_def1 values (42, 100); +insert into mlparted_def2 values (54, 50); +-- fail +insert into mlparted values (70, 100); +ERROR: no partition of relation "mlparted_def" found for row +DETAIL: Partition key of the failing row contains (a) = (70). +insert into mlparted_def1 values (52, 50); +ERROR: new row for relation "mlparted_def1" violates partition constraint +DETAIL: Failing row contains (52, 50, null). +insert into mlparted_def2 values (34, 50); +ERROR: new row for relation "mlparted_def2" violates partition constraint +DETAIL: Failing row contains (34, 50, null). +-- ok +create table mlparted_defd partition of mlparted_def default; +insert into mlparted values (70, 100); +select tableoid::regclass, * from mlparted_def; + tableoid | a | b | c +---------------+----+-----+--- + mlparted_def1 | 40 | 100 | + mlparted_def1 | 42 | 100 | + mlparted_def2 | 54 | 50 | + mlparted_defd | 70 | 100 | +(4 rows) + -- check that message shown after failure to find a partition shows the -- appropriate key description (or none) in various situations create table key_desc (a int, b int) partition by list ((a+0)); create table key_desc_1 partition of key_desc for values in (1) partition by range (b); -create user someone_else; -grant select (a) on key_desc_1 to someone_else; -grant insert on key_desc to someone_else; -set role someone_else; +create user regress_insert_other_user; +grant select (a) on key_desc_1 to regress_insert_other_user; +grant insert on key_desc to regress_insert_other_user; +set role regress_insert_other_user; -- no key description is shown insert into key_desc values (1, 1); ERROR: no partition of relation "key_desc_1" found for row reset role; -grant select (b) on key_desc_1 to someone_else; -set role someone_else; +grant select (b) on key_desc_1 to regress_insert_other_user; +set role regress_insert_other_user; -- key description (b)=(1) is now shown insert into key_desc values (1, 1); ERROR: no partition of relation "key_desc_1" found for row @@ -447,19 +651,36 @@ DETAIL: Partition key of the failing row contains (b) = (1). insert into key_desc values (2, 1); ERROR: no partition of relation "key_desc" found for row reset role; -revoke all on key_desc from someone_else; -revoke all on key_desc_1 from someone_else; -drop role someone_else; +revoke all on key_desc from regress_insert_other_user; +revoke all on key_desc_1 from regress_insert_other_user; +drop role regress_insert_other_user; drop table key_desc, key_desc_1; +-- test minvalue/maxvalue restrictions +create table mcrparted (a int, b int, c int) partition by range (a, abs(b), c); +create table mcrparted0 partition of mcrparted for values from (minvalue, 0, 0) to (1, maxvalue, maxvalue); +ERROR: every bound following MINVALUE must also be MINVALUE +LINE 1: ...partition of mcrparted for values from (minvalue, 0, 0) to (... + ^ +create table mcrparted2 partition of mcrparted for values from (10, 6, minvalue) to (10, maxvalue, minvalue); +ERROR: every bound following MAXVALUE must also be MAXVALUE +LINE 1: ...r values from (10, 6, minvalue) to (10, maxvalue, minvalue); + ^ +create table mcrparted4 partition of mcrparted for values from (21, minvalue, 0) to (30, 20, minvalue); +ERROR: every bound following MINVALUE must also be MINVALUE +LINE 1: ...ition of mcrparted for values from (21, minvalue, 0) to (30,... + ^ -- check multi-column range partitioning expression enforces the same -- constraint as what tuple-routing would determine it to be -create table mcrparted (a int, b int, c int) partition by range (a, abs(b), c); -create table mcrparted0 partition of mcrparted for values from (minvalue, 0, 0) to (1, maxvalue, 0); +create table mcrparted0 partition of mcrparted for values from (minvalue, minvalue, minvalue) to (1, maxvalue, maxvalue); create table mcrparted1 partition of mcrparted for values from (2, 1, minvalue) to (10, 5, 10); -create table mcrparted2 partition of mcrparted for values from (10, 6, minvalue) to (10, maxvalue, 0); +create table mcrparted2 partition of mcrparted for values from (10, 6, minvalue) to (10, maxvalue, maxvalue); create table mcrparted3 partition of mcrparted for values from (11, 1, 1) to (20, 10, 10); -create table mcrparted4 partition of mcrparted for values from (21, minvalue, 0) to (30, 20, maxvalue); -create table mcrparted5 partition of mcrparted for values from (30, 21, 20) to (maxvalue, 0, 0); +create table mcrparted4 partition of mcrparted for values from (21, minvalue, minvalue) to (30, 20, maxvalue); +create table mcrparted5 partition of mcrparted for values from (30, 21, 20) to (maxvalue, maxvalue, maxvalue); +-- null not allowed in range partition +insert into mcrparted values (null, null, null); +ERROR: no partition of relation "mcrparted" found for row +DETAIL: Partition key of the failing row contains (a, abs(b), c) = (null, null, null). -- routed to mcrparted0 insert into mcrparted values (0, 1, 1); insert into mcrparted0 values (0, 1, 1); @@ -541,16 +762,42 @@ drop role regress_coldesc_role; drop table inserttest3; drop table brtrigpartcon; drop function brtrigpartcon1trigf(); +-- check that "do nothing" BR triggers work with tuple-routing (this checks +-- that estate->es_result_relation_info is appropriately set/reset for each +-- routed tuple) +create table donothingbrtrig_test (a int, b text) partition by list (a); +create table donothingbrtrig_test1 (b text, a int); +create table donothingbrtrig_test2 (c text, b text, a int); +alter table donothingbrtrig_test2 drop column c; +create or replace function donothingbrtrig_func() returns trigger as $$begin raise notice 'b: %', new.b; return NULL; end$$ language plpgsql; +create trigger donothingbrtrig1 before insert on donothingbrtrig_test1 for each row execute procedure donothingbrtrig_func(); +create trigger donothingbrtrig2 before insert on donothingbrtrig_test2 for each row execute procedure donothingbrtrig_func(); +alter table donothingbrtrig_test attach partition donothingbrtrig_test1 for values in (1); +alter table donothingbrtrig_test attach partition donothingbrtrig_test2 for values in (2); +insert into donothingbrtrig_test values (1, 'foo'), (2, 'bar'); +NOTICE: b: foo +NOTICE: b: bar +copy donothingbrtrig_test from stdout; +NOTICE: b: baz +NOTICE: b: qux +select tableoid::regclass, * from donothingbrtrig_test; + tableoid | a | b +----------+---+--- +(0 rows) + +-- cleanup +drop table donothingbrtrig_test; +drop function donothingbrtrig_func(); -- check multi-column range partitioning with minvalue/maxvalue constraints create table mcrparted (a text, b int) partition by range(a, b); -create table mcrparted1_lt_b partition of mcrparted for values from (minvalue, 0) to ('b', minvalue); +create table mcrparted1_lt_b partition of mcrparted for values from (minvalue, minvalue) to ('b', minvalue); create table mcrparted2_b partition of mcrparted for values from ('b', minvalue) to ('c', minvalue); create table mcrparted3_c_to_common partition of mcrparted for values from ('c', minvalue) to ('common', minvalue); create table mcrparted4_common_lt_0 partition of mcrparted for values from ('common', minvalue) to ('common', 0); create table mcrparted5_common_0_to_10 partition of mcrparted for values from ('common', 0) to ('common', 10); create table mcrparted6_common_ge_10 partition of mcrparted for values from ('common', 10) to ('common', maxvalue); create table mcrparted7_gt_common_lt_d partition of mcrparted for values from ('common', maxvalue) to ('d', minvalue); -create table mcrparted8_ge_d partition of mcrparted for values from ('d', minvalue) to (maxvalue, 0); +create table mcrparted8_ge_d partition of mcrparted for values from ('d', minvalue) to (maxvalue, maxvalue); \d+ mcrparted Table "public.mcrparted" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description @@ -558,14 +805,14 @@ create table mcrparted8_ge_d partition of mcrparted for values from ('d', minval a | text | | | | extended | | b | integer | | | | plain | | Partition key: RANGE (a, b) -Partitions: mcrparted1_lt_b FOR VALUES FROM (MINVALUE, 0) TO ('b', MINVALUE), +Partitions: mcrparted1_lt_b FOR VALUES FROM (MINVALUE, MINVALUE) TO ('b', MINVALUE), mcrparted2_b FOR VALUES FROM ('b', MINVALUE) TO ('c', MINVALUE), mcrparted3_c_to_common FOR VALUES FROM ('c', MINVALUE) TO ('common', MINVALUE), mcrparted4_common_lt_0 FOR VALUES FROM ('common', MINVALUE) TO ('common', 0), mcrparted5_common_0_to_10 FOR VALUES FROM ('common', 0) TO ('common', 10), mcrparted6_common_ge_10 FOR VALUES FROM ('common', 10) TO ('common', MAXVALUE), mcrparted7_gt_common_lt_d FOR VALUES FROM ('common', MAXVALUE) TO ('d', MINVALUE), - mcrparted8_ge_d FOR VALUES FROM ('d', MINVALUE) TO (MAXVALUE, 0) + mcrparted8_ge_d FOR VALUES FROM ('d', MINVALUE) TO (MAXVALUE, MAXVALUE) \d+ mcrparted1_lt_b Table "public.mcrparted1_lt_b" @@ -573,7 +820,7 @@ Partitions: mcrparted1_lt_b FOR VALUES FROM (MINVALUE, 0) TO ('b', MINVALUE), --------+---------+-----------+----------+---------+----------+--------------+------------- a | text | | | | extended | | b | integer | | | | plain | | -Partition of: mcrparted FOR VALUES FROM (MINVALUE, 0) TO ('b', MINVALUE) +Partition of: mcrparted FOR VALUES FROM (MINVALUE, MINVALUE) TO ('b', MINVALUE) Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a < 'b'::text)) \d+ mcrparted2_b @@ -636,7 +883,7 @@ Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a > 'common'::te --------+---------+-----------+----------+---------+----------+--------------+------------- a | text | | | | extended | | b | integer | | | | plain | | -Partition of: mcrparted FOR VALUES FROM ('d', MINVALUE) TO (MAXVALUE, 0) +Partition of: mcrparted FOR VALUES FROM ('d', MINVALUE) TO (MAXVALUE, MAXVALUE) Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a >= 'd'::text)) insert into mcrparted values ('aaa', 0), ('b', 0), ('bz', 10), ('c', -10), diff --git a/src/test/regress/expected/insert_conflict.out b/src/test/regress/expected/insert_conflict.out index 8d005fddd4..27cf5a01b3 100644 --- a/src/test/regress/expected/insert_conflict.out +++ b/src/test/regress/expected/insert_conflict.out @@ -786,3 +786,122 @@ select * from selfconflict; (3 rows) drop table selfconflict; +-- check ON CONFLICT handling with partitioned tables +create table parted_conflict_test (a int unique, b char) partition by list (a); +create table parted_conflict_test_1 partition of parted_conflict_test (b unique) for values in (1, 2); +-- no indexes required here +insert into parted_conflict_test values (1, 'a') on conflict do nothing; +-- index on a required, which does exist in parent +insert into parted_conflict_test values (1, 'a') on conflict (a) do nothing; +insert into parted_conflict_test values (1, 'a') on conflict (a) do update set b = excluded.b; +-- targeting partition directly will work +insert into parted_conflict_test_1 values (1, 'a') on conflict (a) do nothing; +insert into parted_conflict_test_1 values (1, 'b') on conflict (a) do update set b = excluded.b; +-- index on b required, which doesn't exist in parent +insert into parted_conflict_test values (2, 'b') on conflict (b) do update set a = excluded.a; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +-- targeting partition directly will work +insert into parted_conflict_test_1 values (2, 'b') on conflict (b) do update set a = excluded.a; +-- should see (2, 'b') +select * from parted_conflict_test order by a; + a | b +---+--- + 2 | b +(1 row) + +-- now check that DO UPDATE works correctly for target partition with +-- different attribute numbers +create table parted_conflict_test_2 (b char, a int unique); +alter table parted_conflict_test attach partition parted_conflict_test_2 for values in (3); +truncate parted_conflict_test; +insert into parted_conflict_test values (3, 'a') on conflict (a) do update set b = excluded.b; +insert into parted_conflict_test values (3, 'b') on conflict (a) do update set b = excluded.b; +-- should see (3, 'b') +select * from parted_conflict_test order by a; + a | b +---+--- + 3 | b +(1 row) + +-- case where parent will have a dropped column, but the partition won't +alter table parted_conflict_test drop b, add b char; +create table parted_conflict_test_3 partition of parted_conflict_test for values in (4); +truncate parted_conflict_test; +insert into parted_conflict_test (a, b) values (4, 'a') on conflict (a) do update set b = excluded.b; +insert into parted_conflict_test (a, b) values (4, 'b') on conflict (a) do update set b = excluded.b where parted_conflict_test.b = 'a'; +-- should see (4, 'b') +select * from parted_conflict_test order by a; + a | b +---+--- + 4 | b +(1 row) + +-- case with multi-level partitioning +create table parted_conflict_test_4 partition of parted_conflict_test for values in (5) partition by list (a); +create table parted_conflict_test_4_1 partition of parted_conflict_test_4 for values in (5); +truncate parted_conflict_test; +insert into parted_conflict_test (a, b) values (5, 'a') on conflict (a) do update set b = excluded.b; +insert into parted_conflict_test (a, b) values (5, 'b') on conflict (a) do update set b = excluded.b where parted_conflict_test.b = 'a'; +-- should see (5, 'b') +select * from parted_conflict_test order by a; + a | b +---+--- + 5 | b +(1 row) + +-- test with multiple rows +truncate parted_conflict_test; +insert into parted_conflict_test (a, b) values (1, 'a'), (2, 'a'), (4, 'a') on conflict (a) do update set b = excluded.b where excluded.b = 'b'; +insert into parted_conflict_test (a, b) values (1, 'b'), (2, 'c'), (4, 'b') on conflict (a) do update set b = excluded.b where excluded.b = 'b'; +-- should see (1, 'b'), (2, 'a'), (4, 'b') +select * from parted_conflict_test order by a; + a | b +---+--- + 1 | b + 2 | a + 4 | b +(3 rows) + +drop table parted_conflict_test; +-- test behavior of inserting a conflicting tuple into an intermediate +-- partitioning level +create table parted_conflict (a int primary key, b text) partition by range (a); +create table parted_conflict_1 partition of parted_conflict for values from (0) to (1000) partition by range (a); +create table parted_conflict_1_1 partition of parted_conflict_1 for values from (0) to (500); +insert into parted_conflict values (40, 'forty'); +insert into parted_conflict_1 values (40, 'cuarenta') + on conflict (a) do update set b = excluded.b; +drop table parted_conflict; +-- same thing, but this time try to use an index that's created not in the +-- partition +create table parted_conflict (a int, b text) partition by range (a); +create table parted_conflict_1 partition of parted_conflict for values from (0) to (1000) partition by range (a); +create unique index on only parted_conflict_1 (a); +create unique index on only parted_conflict (a); +alter index parted_conflict_a_idx attach partition parted_conflict_1_a_idx; +create table parted_conflict_1_1 partition of parted_conflict_1 for values from (0) to (500); +insert into parted_conflict values (40, 'forty'); +insert into parted_conflict_1 values (40, 'cuarenta') + on conflict (a) do update set b = excluded.b; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +drop table parted_conflict; +-- test whole-row Vars in ON CONFLICT expressions +create table parted_conflict (a int, b text, c int) partition by range (a); +create table parted_conflict_1 (drp text, c int, a int, b text); +alter table parted_conflict_1 drop column drp; +create unique index on parted_conflict (a, b); +alter table parted_conflict attach partition parted_conflict_1 for values from (0) to (1000); +truncate parted_conflict; +insert into parted_conflict values (50, 'cincuenta', 1); +insert into parted_conflict values (50, 'cincuenta', 2) + on conflict (a, b) do update set (a, b, c) = row(excluded.*) + where parted_conflict = (50, text 'cincuenta', 1) and + excluded = (50, text 'cincuenta', 2); +-- should see (50, 'cincuenta', 2) +select * from parted_conflict order by a; + a | b | c +----+-----------+--- + 50 | cincuenta | 2 +(1 row) + +drop table parted_conflict; diff --git a/src/test/regress/expected/int2.out b/src/test/regress/expected/int2.out index 3ea4ed93a0..8c255b9e4d 100644 --- a/src/test/regress/expected/int2.out +++ b/src/test/regress/expected/int2.out @@ -6,7 +6,7 @@ INSERT INTO INT2_TBL(f1) VALUES ('0 '); INSERT INTO INT2_TBL(f1) VALUES (' 1234 '); INSERT INTO INT2_TBL(f1) VALUES (' -1234'); INSERT INTO INT2_TBL(f1) VALUES ('34.5'); -ERROR: invalid input syntax for integer: "34.5" +ERROR: invalid input syntax for type smallint: "34.5" LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('34.5'); ^ -- largest and smallest values @@ -18,27 +18,27 @@ ERROR: value "100000" is out of range for type smallint LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('100000'); ^ INSERT INTO INT2_TBL(f1) VALUES ('asdf'); -ERROR: invalid input syntax for integer: "asdf" +ERROR: invalid input syntax for type smallint: "asdf" LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('asdf'); ^ INSERT INTO INT2_TBL(f1) VALUES (' '); -ERROR: invalid input syntax for integer: " " +ERROR: invalid input syntax for type smallint: " " LINE 1: INSERT INTO INT2_TBL(f1) VALUES (' '); ^ INSERT INTO INT2_TBL(f1) VALUES ('- 1234'); -ERROR: invalid input syntax for integer: "- 1234" +ERROR: invalid input syntax for type smallint: "- 1234" LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('- 1234'); ^ INSERT INTO INT2_TBL(f1) VALUES ('4 444'); -ERROR: invalid input syntax for integer: "4 444" +ERROR: invalid input syntax for type smallint: "4 444" LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('4 444'); ^ INSERT INTO INT2_TBL(f1) VALUES ('123 dt'); -ERROR: invalid input syntax for integer: "123 dt" +ERROR: invalid input syntax for type smallint: "123 dt" LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('123 dt'); ^ INSERT INTO INT2_TBL(f1) VALUES (''); -ERROR: invalid input syntax for integer: "" +ERROR: invalid input syntax for type smallint: "" LINE 1: INSERT INTO INT2_TBL(f1) VALUES (''); ^ SELECT '' AS five, * FROM INT2_TBL; diff --git a/src/test/regress/expected/int4.out b/src/test/regress/expected/int4.out index 372fd4d94c..bda7a8daef 100644 --- a/src/test/regress/expected/int4.out +++ b/src/test/regress/expected/int4.out @@ -6,7 +6,7 @@ INSERT INTO INT4_TBL(f1) VALUES (' 0 '); INSERT INTO INT4_TBL(f1) VALUES ('123456 '); INSERT INTO INT4_TBL(f1) VALUES (' -123456'); INSERT INTO INT4_TBL(f1) VALUES ('34.5'); -ERROR: invalid input syntax for integer: "34.5" +ERROR: invalid input syntax for type integer: "34.5" LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('34.5'); ^ -- largest and smallest values @@ -18,27 +18,27 @@ ERROR: value "1000000000000" is out of range for type integer LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('1000000000000'); ^ INSERT INTO INT4_TBL(f1) VALUES ('asdf'); -ERROR: invalid input syntax for integer: "asdf" +ERROR: invalid input syntax for type integer: "asdf" LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('asdf'); ^ INSERT INTO INT4_TBL(f1) VALUES (' '); -ERROR: invalid input syntax for integer: " " +ERROR: invalid input syntax for type integer: " " LINE 1: INSERT INTO INT4_TBL(f1) VALUES (' '); ^ INSERT INTO INT4_TBL(f1) VALUES (' asdf '); -ERROR: invalid input syntax for integer: " asdf " +ERROR: invalid input syntax for type integer: " asdf " LINE 1: INSERT INTO INT4_TBL(f1) VALUES (' asdf '); ^ INSERT INTO INT4_TBL(f1) VALUES ('- 1234'); -ERROR: invalid input syntax for integer: "- 1234" +ERROR: invalid input syntax for type integer: "- 1234" LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('- 1234'); ^ INSERT INTO INT4_TBL(f1) VALUES ('123 5'); -ERROR: invalid input syntax for integer: "123 5" +ERROR: invalid input syntax for type integer: "123 5" LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('123 5'); ^ INSERT INTO INT4_TBL(f1) VALUES (''); -ERROR: invalid input syntax for integer: "" +ERROR: invalid input syntax for type integer: "" LINE 1: INSERT INTO INT4_TBL(f1) VALUES (''); ^ SELECT '' AS five, * FROM INT4_TBL; diff --git a/src/test/regress/expected/int8-exp-three-digits.out b/src/test/regress/expected/int8-exp-three-digits.out deleted file mode 100644 index 7ad4dcea0f..0000000000 --- a/src/test/regress/expected/int8-exp-three-digits.out +++ /dev/null @@ -1,888 +0,0 @@ --- --- INT8 --- Test int8 64-bit integers. --- -CREATE TABLE INT8_TBL(q1 int8, q2 int8); -INSERT INTO INT8_TBL VALUES(' 123 ',' 456'); -INSERT INTO INT8_TBL VALUES('123 ','4567890123456789'); -INSERT INTO INT8_TBL VALUES('4567890123456789','123'); -INSERT INTO INT8_TBL VALUES(+4567890123456789,'4567890123456789'); -INSERT INTO INT8_TBL VALUES('+4567890123456789','-4567890123456789'); --- bad inputs -INSERT INTO INT8_TBL(q1) VALUES (' '); -ERROR: invalid input syntax for integer: " " -LINE 1: INSERT INTO INT8_TBL(q1) VALUES (' '); - ^ -INSERT INTO INT8_TBL(q1) VALUES ('xxx'); -ERROR: invalid input syntax for integer: "xxx" -LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('xxx'); - ^ -INSERT INTO INT8_TBL(q1) VALUES ('3908203590239580293850293850329485'); -ERROR: value "3908203590239580293850293850329485" is out of range for type bigint -LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('39082035902395802938502938... - ^ -INSERT INTO INT8_TBL(q1) VALUES ('-1204982019841029840928340329840934'); -ERROR: value "-1204982019841029840928340329840934" is out of range for type bigint -LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('-1204982019841029840928340... - ^ -INSERT INTO INT8_TBL(q1) VALUES ('- 123'); -ERROR: invalid input syntax for integer: "- 123" -LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('- 123'); - ^ -INSERT INTO INT8_TBL(q1) VALUES (' 345 5'); -ERROR: invalid input syntax for integer: " 345 5" -LINE 1: INSERT INTO INT8_TBL(q1) VALUES (' 345 5'); - ^ -INSERT INTO INT8_TBL(q1) VALUES (''); -ERROR: invalid input syntax for integer: "" -LINE 1: INSERT INTO INT8_TBL(q1) VALUES (''); - ^ -SELECT * FROM INT8_TBL; - q1 | q2 -------------------+------------------- - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(5 rows) - --- int8/int8 cmp -SELECT * FROM INT8_TBL WHERE q2 = 4567890123456789; - q1 | q2 -------------------+------------------ - 123 | 4567890123456789 - 4567890123456789 | 4567890123456789 -(2 rows) - -SELECT * FROM INT8_TBL WHERE q2 <> 4567890123456789; - q1 | q2 -------------------+------------------- - 123 | 456 - 4567890123456789 | 123 - 4567890123456789 | -4567890123456789 -(3 rows) - -SELECT * FROM INT8_TBL WHERE q2 < 4567890123456789; - q1 | q2 -------------------+------------------- - 123 | 456 - 4567890123456789 | 123 - 4567890123456789 | -4567890123456789 -(3 rows) - -SELECT * FROM INT8_TBL WHERE q2 > 4567890123456789; - q1 | q2 -----+---- -(0 rows) - -SELECT * FROM INT8_TBL WHERE q2 <= 4567890123456789; - q1 | q2 -------------------+------------------- - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(5 rows) - -SELECT * FROM INT8_TBL WHERE q2 >= 4567890123456789; - q1 | q2 -------------------+------------------ - 123 | 4567890123456789 - 4567890123456789 | 4567890123456789 -(2 rows) - --- int8/int4 cmp -SELECT * FROM INT8_TBL WHERE q2 = 456; - q1 | q2 ------+----- - 123 | 456 -(1 row) - -SELECT * FROM INT8_TBL WHERE q2 <> 456; - q1 | q2 -------------------+------------------- - 123 | 4567890123456789 - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(4 rows) - -SELECT * FROM INT8_TBL WHERE q2 < 456; - q1 | q2 -------------------+------------------- - 4567890123456789 | 123 - 4567890123456789 | -4567890123456789 -(2 rows) - -SELECT * FROM INT8_TBL WHERE q2 > 456; - q1 | q2 -------------------+------------------ - 123 | 4567890123456789 - 4567890123456789 | 4567890123456789 -(2 rows) - -SELECT * FROM INT8_TBL WHERE q2 <= 456; - q1 | q2 -------------------+------------------- - 123 | 456 - 4567890123456789 | 123 - 4567890123456789 | -4567890123456789 -(3 rows) - -SELECT * FROM INT8_TBL WHERE q2 >= 456; - q1 | q2 -------------------+------------------ - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 4567890123456789 -(3 rows) - --- int4/int8 cmp -SELECT * FROM INT8_TBL WHERE 123 = q1; - q1 | q2 ------+------------------ - 123 | 456 - 123 | 4567890123456789 -(2 rows) - -SELECT * FROM INT8_TBL WHERE 123 <> q1; - q1 | q2 -------------------+------------------- - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(3 rows) - -SELECT * FROM INT8_TBL WHERE 123 < q1; - q1 | q2 -------------------+------------------- - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(3 rows) - -SELECT * FROM INT8_TBL WHERE 123 > q1; - q1 | q2 -----+---- -(0 rows) - -SELECT * FROM INT8_TBL WHERE 123 <= q1; - q1 | q2 -------------------+------------------- - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(5 rows) - -SELECT * FROM INT8_TBL WHERE 123 >= q1; - q1 | q2 ------+------------------ - 123 | 456 - 123 | 4567890123456789 -(2 rows) - --- int8/int2 cmp -SELECT * FROM INT8_TBL WHERE q2 = '456'::int2; - q1 | q2 ------+----- - 123 | 456 -(1 row) - -SELECT * FROM INT8_TBL WHERE q2 <> '456'::int2; - q1 | q2 -------------------+------------------- - 123 | 4567890123456789 - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(4 rows) - -SELECT * FROM INT8_TBL WHERE q2 < '456'::int2; - q1 | q2 -------------------+------------------- - 4567890123456789 | 123 - 4567890123456789 | -4567890123456789 -(2 rows) - -SELECT * FROM INT8_TBL WHERE q2 > '456'::int2; - q1 | q2 -------------------+------------------ - 123 | 4567890123456789 - 4567890123456789 | 4567890123456789 -(2 rows) - -SELECT * FROM INT8_TBL WHERE q2 <= '456'::int2; - q1 | q2 -------------------+------------------- - 123 | 456 - 4567890123456789 | 123 - 4567890123456789 | -4567890123456789 -(3 rows) - -SELECT * FROM INT8_TBL WHERE q2 >= '456'::int2; - q1 | q2 -------------------+------------------ - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 4567890123456789 -(3 rows) - --- int2/int8 cmp -SELECT * FROM INT8_TBL WHERE '123'::int2 = q1; - q1 | q2 ------+------------------ - 123 | 456 - 123 | 4567890123456789 -(2 rows) - -SELECT * FROM INT8_TBL WHERE '123'::int2 <> q1; - q1 | q2 -------------------+------------------- - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(3 rows) - -SELECT * FROM INT8_TBL WHERE '123'::int2 < q1; - q1 | q2 -------------------+------------------- - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(3 rows) - -SELECT * FROM INT8_TBL WHERE '123'::int2 > q1; - q1 | q2 -----+---- -(0 rows) - -SELECT * FROM INT8_TBL WHERE '123'::int2 <= q1; - q1 | q2 -------------------+------------------- - 123 | 456 - 123 | 4567890123456789 - 4567890123456789 | 123 - 4567890123456789 | 4567890123456789 - 4567890123456789 | -4567890123456789 -(5 rows) - -SELECT * FROM INT8_TBL WHERE '123'::int2 >= q1; - q1 | q2 ------+------------------ - 123 | 456 - 123 | 4567890123456789 -(2 rows) - -SELECT '' AS five, q1 AS plus, -q1 AS minus FROM INT8_TBL; - five | plus | minus -------+------------------+------------------- - | 123 | -123 - | 123 | -123 - | 4567890123456789 | -4567890123456789 - | 4567890123456789 | -4567890123456789 - | 4567890123456789 | -4567890123456789 -(5 rows) - -SELECT '' AS five, q1, q2, q1 + q2 AS plus FROM INT8_TBL; - five | q1 | q2 | plus -------+------------------+-------------------+------------------ - | 123 | 456 | 579 - | 123 | 4567890123456789 | 4567890123456912 - | 4567890123456789 | 123 | 4567890123456912 - | 4567890123456789 | 4567890123456789 | 9135780246913578 - | 4567890123456789 | -4567890123456789 | 0 -(5 rows) - -SELECT '' AS five, q1, q2, q1 - q2 AS minus FROM INT8_TBL; - five | q1 | q2 | minus -------+------------------+-------------------+------------------- - | 123 | 456 | -333 - | 123 | 4567890123456789 | -4567890123456666 - | 4567890123456789 | 123 | 4567890123456666 - | 4567890123456789 | 4567890123456789 | 0 - | 4567890123456789 | -4567890123456789 | 9135780246913578 -(5 rows) - -SELECT '' AS three, q1, q2, q1 * q2 AS multiply FROM INT8_TBL; -ERROR: bigint out of range -SELECT '' AS three, q1, q2, q1 * q2 AS multiply FROM INT8_TBL - WHERE q1 < 1000 or (q2 > 0 and q2 < 1000); - three | q1 | q2 | multiply --------+------------------+------------------+-------------------- - | 123 | 456 | 56088 - | 123 | 4567890123456789 | 561850485185185047 - | 4567890123456789 | 123 | 561850485185185047 -(3 rows) - -SELECT '' AS five, q1, q2, q1 / q2 AS divide, q1 % q2 AS mod FROM INT8_TBL; - five | q1 | q2 | divide | mod -------+------------------+-------------------+----------------+----- - | 123 | 456 | 0 | 123 - | 123 | 4567890123456789 | 0 | 123 - | 4567890123456789 | 123 | 37137318076884 | 57 - | 4567890123456789 | 4567890123456789 | 1 | 0 - | 4567890123456789 | -4567890123456789 | -1 | 0 -(5 rows) - -SELECT '' AS five, q1, float8(q1) FROM INT8_TBL; - five | q1 | float8 -------+------------------+----------------------- - | 123 | 123 - | 123 | 123 - | 4567890123456789 | 4.56789012345679e+015 - | 4567890123456789 | 4.56789012345679e+015 - | 4567890123456789 | 4.56789012345679e+015 -(5 rows) - -SELECT '' AS five, q2, float8(q2) FROM INT8_TBL; - five | q2 | float8 -------+-------------------+------------------------ - | 456 | 456 - | 4567890123456789 | 4.56789012345679e+015 - | 123 | 123 - | 4567890123456789 | 4.56789012345679e+015 - | -4567890123456789 | -4.56789012345679e+015 -(5 rows) - -SELECT 37 + q1 AS plus4 FROM INT8_TBL; - plus4 ------------------- - 160 - 160 - 4567890123456826 - 4567890123456826 - 4567890123456826 -(5 rows) - -SELECT 37 - q1 AS minus4 FROM INT8_TBL; - minus4 -------------------- - -86 - -86 - -4567890123456752 - -4567890123456752 - -4567890123456752 -(5 rows) - -SELECT '' AS five, 2 * q1 AS "twice int4" FROM INT8_TBL; - five | twice int4 -------+------------------ - | 246 - | 246 - | 9135780246913578 - | 9135780246913578 - | 9135780246913578 -(5 rows) - -SELECT '' AS five, q1 * 2 AS "twice int4" FROM INT8_TBL; - five | twice int4 -------+------------------ - | 246 - | 246 - | 9135780246913578 - | 9135780246913578 - | 9135780246913578 -(5 rows) - --- int8 op int4 -SELECT q1 + 42::int4 AS "8plus4", q1 - 42::int4 AS "8minus4", q1 * 42::int4 AS "8mul4", q1 / 42::int4 AS "8div4" FROM INT8_TBL; - 8plus4 | 8minus4 | 8mul4 | 8div4 -------------------+------------------+--------------------+----------------- - 165 | 81 | 5166 | 2 - 165 | 81 | 5166 | 2 - 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733 - 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733 - 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733 -(5 rows) - --- int4 op int8 -SELECT 246::int4 + q1 AS "4plus8", 246::int4 - q1 AS "4minus8", 246::int4 * q1 AS "4mul8", 246::int4 / q1 AS "4div8" FROM INT8_TBL; - 4plus8 | 4minus8 | 4mul8 | 4div8 -------------------+-------------------+---------------------+------- - 369 | 123 | 30258 | 2 - 369 | 123 | 30258 | 2 - 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0 - 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0 - 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0 -(5 rows) - --- int8 op int2 -SELECT q1 + 42::int2 AS "8plus2", q1 - 42::int2 AS "8minus2", q1 * 42::int2 AS "8mul2", q1 / 42::int2 AS "8div2" FROM INT8_TBL; - 8plus2 | 8minus2 | 8mul2 | 8div2 -------------------+------------------+--------------------+----------------- - 165 | 81 | 5166 | 2 - 165 | 81 | 5166 | 2 - 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733 - 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733 - 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733 -(5 rows) - --- int2 op int8 -SELECT 246::int2 + q1 AS "2plus8", 246::int2 - q1 AS "2minus8", 246::int2 * q1 AS "2mul8", 246::int2 / q1 AS "2div8" FROM INT8_TBL; - 2plus8 | 2minus8 | 2mul8 | 2div8 -------------------+-------------------+---------------------+------- - 369 | 123 | 30258 | 2 - 369 | 123 | 30258 | 2 - 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0 - 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0 - 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0 -(5 rows) - -SELECT q2, abs(q2) FROM INT8_TBL; - q2 | abs --------------------+------------------ - 456 | 456 - 4567890123456789 | 4567890123456789 - 123 | 123 - 4567890123456789 | 4567890123456789 - -4567890123456789 | 4567890123456789 -(5 rows) - -SELECT min(q1), min(q2) FROM INT8_TBL; - min | min ------+------------------- - 123 | -4567890123456789 -(1 row) - -SELECT max(q1), max(q2) FROM INT8_TBL; - max | max -------------------+------------------ - 4567890123456789 | 4567890123456789 -(1 row) - --- TO_CHAR() --- -SELECT '' AS to_char_1, to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999') - FROM INT8_TBL; - to_char_1 | to_char | to_char ------------+------------------------+------------------------ - | 123 | 456 - | 123 | 4,567,890,123,456,789 - | 4,567,890,123,456,789 | 123 - | 4,567,890,123,456,789 | 4,567,890,123,456,789 - | 4,567,890,123,456,789 | -4,567,890,123,456,789 -(5 rows) - -SELECT '' AS to_char_2, to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999') - FROM INT8_TBL; - to_char_2 | to_char | to_char ------------+--------------------------------+-------------------------------- - | 123.000,000 | 456.000,000 - | 123.000,000 | 4,567,890,123,456,789.000,000 - | 4,567,890,123,456,789.000,000 | 123.000,000 - | 4,567,890,123,456,789.000,000 | 4,567,890,123,456,789.000,000 - | 4,567,890,123,456,789.000,000 | -4,567,890,123,456,789.000,000 -(5 rows) - -SELECT '' AS to_char_3, to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR') - FROM INT8_TBL; - to_char_3 | to_char | to_char ------------+--------------------+------------------------ - | <123> | <456.000> - | <123> | <4567890123456789.000> - | <4567890123456789> | <123.000> - | <4567890123456789> | <4567890123456789.000> - | <4567890123456789> | 4567890123456789.000 -(5 rows) - -SELECT '' AS to_char_4, to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999') - FROM INT8_TBL; - to_char_4 | to_char | to_char ------------+-------------------+------------------- - | 123- | -456 - | 123- | -4567890123456789 - | 4567890123456789- | -123 - | 4567890123456789- | -4567890123456789 - | 4567890123456789- | +4567890123456789 -(5 rows) - -SELECT '' AS to_char_5, to_char(q2, 'MI9999999999999999') FROM INT8_TBL; - to_char_5 | to_char ------------+------------------- - | 456 - | 4567890123456789 - | 123 - | 4567890123456789 - | -4567890123456789 -(5 rows) - -SELECT '' AS to_char_6, to_char(q2, 'FMS9999999999999999') FROM INT8_TBL; - to_char_6 | to_char ------------+------------------- - | +456 - | +4567890123456789 - | +123 - | +4567890123456789 - | -4567890123456789 -(5 rows) - -SELECT '' AS to_char_7, to_char(q2, 'FM9999999999999999THPR') FROM INT8_TBL; - to_char_7 | to_char ------------+-------------------- - | 456TH - | 4567890123456789TH - | 123RD - | 4567890123456789TH - | <4567890123456789> -(5 rows) - -SELECT '' AS to_char_8, to_char(q2, 'SG9999999999999999th') FROM INT8_TBL; - to_char_8 | to_char ------------+--------------------- - | + 456th - | +4567890123456789th - | + 123rd - | +4567890123456789th - | -4567890123456789 -(5 rows) - -SELECT '' AS to_char_9, to_char(q2, '0999999999999999') FROM INT8_TBL; - to_char_9 | to_char ------------+------------------- - | 0000000000000456 - | 4567890123456789 - | 0000000000000123 - | 4567890123456789 - | -4567890123456789 -(5 rows) - -SELECT '' AS to_char_10, to_char(q2, 'S0999999999999999') FROM INT8_TBL; - to_char_10 | to_char -------------+------------------- - | +0000000000000456 - | +4567890123456789 - | +0000000000000123 - | +4567890123456789 - | -4567890123456789 -(5 rows) - -SELECT '' AS to_char_11, to_char(q2, 'FM0999999999999999') FROM INT8_TBL; - to_char_11 | to_char -------------+------------------- - | 0000000000000456 - | 4567890123456789 - | 0000000000000123 - | 4567890123456789 - | -4567890123456789 -(5 rows) - -SELECT '' AS to_char_12, to_char(q2, 'FM9999999999999999.000') FROM INT8_TBL; - to_char_12 | to_char -------------+----------------------- - | 456.000 - | 4567890123456789.000 - | 123.000 - | 4567890123456789.000 - | -4567890123456789.000 -(5 rows) - -SELECT '' AS to_char_13, to_char(q2, 'L9999999999999999.000') FROM INT8_TBL; - to_char_13 | to_char -------------+------------------------ - | 456.000 - | 4567890123456789.000 - | 123.000 - | 4567890123456789.000 - | -4567890123456789.000 -(5 rows) - -SELECT '' AS to_char_14, to_char(q2, 'FM9999999999999999.999') FROM INT8_TBL; - to_char_14 | to_char -------------+-------------------- - | 456. - | 4567890123456789. - | 123. - | 4567890123456789. - | -4567890123456789. -(5 rows) - -SELECT '' AS to_char_15, to_char(q2, 'S 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9') FROM INT8_TBL; - to_char_15 | to_char -------------+------------------------------------------- - | +4 5 6 . 0 0 0 - | +4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 . 0 0 0 - | +1 2 3 . 0 0 0 - | +4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 . 0 0 0 - | -4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 . 0 0 0 -(5 rows) - -SELECT '' AS to_char_16, to_char(q2, E'99999 "text" 9999 "9999" 999 "\\"text between quote marks\\"" 9999') FROM INT8_TBL; - to_char_16 | to_char -------------+----------------------------------------------------------- - | text 9999 "text between quote marks" 456 - | 45678 text 9012 9999 345 "text between quote marks" 6789 - | text 9999 "text between quote marks" 123 - | 45678 text 9012 9999 345 "text between quote marks" 6789 - | -45678 text 9012 9999 345 "text between quote marks" 6789 -(5 rows) - -SELECT '' AS to_char_17, to_char(q2, '999999SG9999999999') FROM INT8_TBL; - to_char_17 | to_char -------------+------------------- - | + 456 - | 456789+0123456789 - | + 123 - | 456789+0123456789 - | 456789-0123456789 -(5 rows) - --- check min/max values and overflow behavior -select '-9223372036854775808'::int8; - int8 ----------------------- - -9223372036854775808 -(1 row) - -select '-9223372036854775809'::int8; -ERROR: value "-9223372036854775809" is out of range for type bigint -LINE 1: select '-9223372036854775809'::int8; - ^ -select '9223372036854775807'::int8; - int8 ---------------------- - 9223372036854775807 -(1 row) - -select '9223372036854775808'::int8; -ERROR: value "9223372036854775808" is out of range for type bigint -LINE 1: select '9223372036854775808'::int8; - ^ -select -('-9223372036854775807'::int8); - ?column? ---------------------- - 9223372036854775807 -(1 row) - -select -('-9223372036854775808'::int8); -ERROR: bigint out of range -select '9223372036854775800'::int8 + '9223372036854775800'::int8; -ERROR: bigint out of range -select '-9223372036854775800'::int8 + '-9223372036854775800'::int8; -ERROR: bigint out of range -select '9223372036854775800'::int8 - '-9223372036854775800'::int8; -ERROR: bigint out of range -select '-9223372036854775800'::int8 - '9223372036854775800'::int8; -ERROR: bigint out of range -select '9223372036854775800'::int8 * '9223372036854775800'::int8; -ERROR: bigint out of range -select '9223372036854775800'::int8 / '0'::int8; -ERROR: division by zero -select '9223372036854775800'::int8 % '0'::int8; -ERROR: division by zero -select abs('-9223372036854775808'::int8); -ERROR: bigint out of range -select '9223372036854775800'::int8 + '100'::int4; -ERROR: bigint out of range -select '-9223372036854775800'::int8 - '100'::int4; -ERROR: bigint out of range -select '9223372036854775800'::int8 * '100'::int4; -ERROR: bigint out of range -select '100'::int4 + '9223372036854775800'::int8; -ERROR: bigint out of range -select '-100'::int4 - '9223372036854775800'::int8; -ERROR: bigint out of range -select '100'::int4 * '9223372036854775800'::int8; -ERROR: bigint out of range -select '9223372036854775800'::int8 + '100'::int2; -ERROR: bigint out of range -select '-9223372036854775800'::int8 - '100'::int2; -ERROR: bigint out of range -select '9223372036854775800'::int8 * '100'::int2; -ERROR: bigint out of range -select '-9223372036854775808'::int8 / '0'::int2; -ERROR: division by zero -select '100'::int2 + '9223372036854775800'::int8; -ERROR: bigint out of range -select '-100'::int2 - '9223372036854775800'::int8; -ERROR: bigint out of range -select '100'::int2 * '9223372036854775800'::int8; -ERROR: bigint out of range -select '100'::int2 / '0'::int8; -ERROR: division by zero -SELECT CAST(q1 AS int4) FROM int8_tbl WHERE q2 = 456; - q1 ------ - 123 -(1 row) - -SELECT CAST(q1 AS int4) FROM int8_tbl WHERE q2 <> 456; -ERROR: integer out of range -SELECT CAST(q1 AS int2) FROM int8_tbl WHERE q2 = 456; - q1 ------ - 123 -(1 row) - -SELECT CAST(q1 AS int2) FROM int8_tbl WHERE q2 <> 456; -ERROR: smallint out of range -SELECT CAST('42'::int2 AS int8), CAST('-37'::int2 AS int8); - int8 | int8 -------+------ - 42 | -37 -(1 row) - -SELECT CAST(q1 AS float4), CAST(q2 AS float8) FROM INT8_TBL; - q1 | q2 ---------------+------------------------ - 123 | 456 - 123 | 4.56789012345679e+015 - 4.56789e+015 | 123 - 4.56789e+015 | 4.56789012345679e+015 - 4.56789e+015 | -4.56789012345679e+015 -(5 rows) - -SELECT CAST('36854775807.0'::float4 AS int8); - int8 -------------- - 36854775808 -(1 row) - -SELECT CAST('922337203685477580700.0'::float8 AS int8); -ERROR: bigint out of range -SELECT CAST(q1 AS oid) FROM INT8_TBL; -ERROR: OID out of range -SELECT oid::int8 FROM pg_class WHERE relname = 'pg_class'; - oid ------- - 1259 -(1 row) - --- bit operations -SELECT q1, q2, q1 & q2 AS "and", q1 | q2 AS "or", q1 # q2 AS "xor", ~q1 AS "not" FROM INT8_TBL; - q1 | q2 | and | or | xor | not -------------------+-------------------+------------------+------------------+------------------+------------------- - 123 | 456 | 72 | 507 | 435 | -124 - 123 | 4567890123456789 | 17 | 4567890123456895 | 4567890123456878 | -124 - 4567890123456789 | 123 | 17 | 4567890123456895 | 4567890123456878 | -4567890123456790 - 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 0 | -4567890123456790 - 4567890123456789 | -4567890123456789 | 1 | -1 | -2 | -4567890123456790 -(5 rows) - -SELECT q1, q1 << 2 AS "shl", q1 >> 3 AS "shr" FROM INT8_TBL; - q1 | shl | shr -------------------+-------------------+----------------- - 123 | 492 | 15 - 123 | 492 | 15 - 4567890123456789 | 18271560493827156 | 570986265432098 - 4567890123456789 | 18271560493827156 | 570986265432098 - 4567890123456789 | 18271560493827156 | 570986265432098 -(5 rows) - --- generate_series -SELECT * FROM generate_series('+4567890123456789'::int8, '+4567890123456799'::int8); - generate_series ------------------- - 4567890123456789 - 4567890123456790 - 4567890123456791 - 4567890123456792 - 4567890123456793 - 4567890123456794 - 4567890123456795 - 4567890123456796 - 4567890123456797 - 4567890123456798 - 4567890123456799 -(11 rows) - -SELECT * FROM generate_series('+4567890123456789'::int8, '+4567890123456799'::int8, 0); -ERROR: step size cannot equal zero -SELECT * FROM generate_series('+4567890123456789'::int8, '+4567890123456799'::int8, 2); - generate_series ------------------- - 4567890123456789 - 4567890123456791 - 4567890123456793 - 4567890123456795 - 4567890123456797 - 4567890123456799 -(6 rows) - --- corner case -SELECT (-1::int8<<63)::text; - text ----------------------- - -9223372036854775808 -(1 row) - -SELECT ((-1::int8<<63)+1)::text; - text ----------------------- - -9223372036854775807 -(1 row) - --- check sane handling of INT64_MIN overflow cases -SELECT (-9223372036854775808)::int8 * (-1)::int8; -ERROR: bigint out of range -SELECT (-9223372036854775808)::int8 / (-1)::int8; -ERROR: bigint out of range -SELECT (-9223372036854775808)::int8 % (-1)::int8; - ?column? ----------- - 0 -(1 row) - -SELECT (-9223372036854775808)::int8 * (-1)::int4; -ERROR: bigint out of range -SELECT (-9223372036854775808)::int8 / (-1)::int4; -ERROR: bigint out of range -SELECT (-9223372036854775808)::int8 % (-1)::int4; - ?column? ----------- - 0 -(1 row) - -SELECT (-9223372036854775808)::int8 * (-1)::int2; -ERROR: bigint out of range -SELECT (-9223372036854775808)::int8 / (-1)::int2; -ERROR: bigint out of range -SELECT (-9223372036854775808)::int8 % (-1)::int2; - ?column? ----------- - 0 -(1 row) - --- check rounding when casting from float -SELECT x, x::int8 AS int8_value -FROM (VALUES (-2.5::float8), - (-1.5::float8), - (-0.5::float8), - (0.0::float8), - (0.5::float8), - (1.5::float8), - (2.5::float8)) t(x); - x | int8_value -------+------------ - -2.5 | -2 - -1.5 | -2 - -0.5 | 0 - 0 | 0 - 0.5 | 0 - 1.5 | 2 - 2.5 | 2 -(7 rows) - --- check rounding when casting from numeric -SELECT x, x::int8 AS int8_value -FROM (VALUES (-2.5::numeric), - (-1.5::numeric), - (-0.5::numeric), - (0.0::numeric), - (0.5::numeric), - (1.5::numeric), - (2.5::numeric)) t(x); - x | int8_value -------+------------ - -2.5 | -3 - -1.5 | -2 - -0.5 | -1 - 0.0 | 0 - 0.5 | 1 - 1.5 | 2 - 2.5 | 3 -(7 rows) - diff --git a/src/test/regress/expected/int8.out b/src/test/regress/expected/int8.out index ed0bd34221..35e3b3ff81 100644 --- a/src/test/regress/expected/int8.out +++ b/src/test/regress/expected/int8.out @@ -10,11 +10,11 @@ INSERT INTO INT8_TBL VALUES(+4567890123456789,'4567890123456789'); INSERT INTO INT8_TBL VALUES('+4567890123456789','-4567890123456789'); -- bad inputs INSERT INTO INT8_TBL(q1) VALUES (' '); -ERROR: invalid input syntax for integer: " " +ERROR: invalid input syntax for type bigint: " " LINE 1: INSERT INTO INT8_TBL(q1) VALUES (' '); ^ INSERT INTO INT8_TBL(q1) VALUES ('xxx'); -ERROR: invalid input syntax for integer: "xxx" +ERROR: invalid input syntax for type bigint: "xxx" LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('xxx'); ^ INSERT INTO INT8_TBL(q1) VALUES ('3908203590239580293850293850329485'); @@ -26,15 +26,15 @@ ERROR: value "-1204982019841029840928340329840934" is out of range for type big LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('-1204982019841029840928340... ^ INSERT INTO INT8_TBL(q1) VALUES ('- 123'); -ERROR: invalid input syntax for integer: "- 123" +ERROR: invalid input syntax for type bigint: "- 123" LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('- 123'); ^ INSERT INTO INT8_TBL(q1) VALUES (' 345 5'); -ERROR: invalid input syntax for integer: " 345 5" +ERROR: invalid input syntax for type bigint: " 345 5" LINE 1: INSERT INTO INT8_TBL(q1) VALUES (' 345 5'); ^ INSERT INTO INT8_TBL(q1) VALUES (''); -ERROR: invalid input syntax for integer: "" +ERROR: invalid input syntax for type bigint: "" LINE 1: INSERT INTO INT8_TBL(q1) VALUES (''); ^ SELECT * FROM INT8_TBL; diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out index 9f4c88dab4..1f5378080d 100644 --- a/src/test/regress/expected/join.out +++ b/src/test/regress/expected/join.out @@ -1845,6 +1845,28 @@ SELECT '' AS "xxx", * | 1 | 4 | one | -1 (1 row) +-- +-- semijoin selectivity for <> +-- +explain (costs off) +select * from int4_tbl i4, tenk1 a +where exists(select * from tenk1 b + where a.twothousand = b.twothousand and a.fivethous <> b.fivethous) + and i4.f1 = a.tenthous; + QUERY PLAN +---------------------------------------------- + Hash Semi Join + Hash Cond: (a.twothousand = b.twothousand) + Join Filter: (a.fivethous <> b.fivethous) + -> Hash Join + Hash Cond: (a.tenthous = i4.f1) + -> Seq Scan on tenk1 a + -> Hash + -> Seq Scan on int4_tbl i4 + -> Hash + -> Seq Scan on tenk1 b +(10 rows) + -- -- More complicated constructs -- @@ -2002,6 +2024,20 @@ NATURAL FULL JOIN ee | | 42 | 2 | (4 rows) +-- Constants as join keys can also be problematic +SELECT * FROM + (SELECT name, n as s1_n FROM t1) as s1 +FULL JOIN + (SELECT name, 2 as s2_n FROM t2) as s2 +ON (s1_n = s2_n); + name | s1_n | name | s2_n +------+------+------+------ + | | bb | 2 + | | cc | 2 + | | ee | 2 + bb | 11 | | +(4 rows) + -- Test for propagation of nullability constraints into sub-joins create temp table x (x1 int, x2 int); insert into x values (1,11); @@ -2268,6 +2304,86 @@ where b.f1 = t.thousand and a.f1 = b.f1 and (a.f1+b.f1+999) = t.tenthous; ----+----+----------+---------- (0 rows) +-- +-- check a case where we formerly got confused by conflicting sort orders +-- in redundant merge join path keys +-- +explain (costs off) +select * from + j1_tbl full join + (select * from j2_tbl order by j2_tbl.i desc, j2_tbl.k asc) j2_tbl + on j1_tbl.i = j2_tbl.i and j1_tbl.i = j2_tbl.k; + QUERY PLAN +----------------------------------------------------------------- + Merge Full Join + Merge Cond: ((j2_tbl.i = j1_tbl.i) AND (j2_tbl.k = j1_tbl.i)) + -> Sort + Sort Key: j2_tbl.i DESC, j2_tbl.k + -> Seq Scan on j2_tbl + -> Sort + Sort Key: j1_tbl.i DESC + -> Seq Scan on j1_tbl +(8 rows) + +select * from + j1_tbl full join + (select * from j2_tbl order by j2_tbl.i desc, j2_tbl.k asc) j2_tbl + on j1_tbl.i = j2_tbl.i and j1_tbl.i = j2_tbl.k; + i | j | t | i | k +---+---+-------+---+---- + | | | | 0 + | | | | + | 0 | zero | | + | | null | | + 8 | 8 | eight | | + 7 | 7 | seven | | + 6 | 6 | six | | + | | | 5 | -5 + | | | 5 | -5 + 5 | 0 | five | | + 4 | 1 | four | | + | | | 3 | -3 + 3 | 2 | three | | + 2 | 3 | two | 2 | 2 + | | | 2 | 4 + | | | 1 | -1 + | | | 0 | + 1 | 4 | one | | + 0 | | zero | | +(19 rows) + +-- +-- a different check for handling of redundant sort keys in merge joins +-- +explain (costs off) +select count(*) from + (select * from tenk1 x order by x.thousand, x.twothousand, x.fivethous) x + left join + (select * from tenk1 y order by y.unique2) y + on x.thousand = y.unique2 and x.twothousand = y.hundred and x.fivethous = y.unique2; + QUERY PLAN +---------------------------------------------------------------------------------- + Aggregate + -> Merge Left Join + Merge Cond: (x.thousand = y.unique2) + Join Filter: ((x.twothousand = y.hundred) AND (x.fivethous = y.unique2)) + -> Sort + Sort Key: x.thousand, x.twothousand, x.fivethous + -> Seq Scan on tenk1 x + -> Materialize + -> Index Scan using tenk1_unique2 on tenk1 y +(9 rows) + +select count(*) from + (select * from tenk1 x order by x.thousand, x.twothousand, x.fivethous) x + left join + (select * from tenk1 y order by y.unique2) y + on x.thousand = y.unique2 and x.twothousand = y.hundred and x.fivethous = y.unique2; + count +------- + 10000 +(1 row) + -- -- Clean up -- @@ -2570,6 +2686,36 @@ select * from a left join b on i = x and i = y and x = i; ---+---+--- (0 rows) +rollback; +-- +-- test handling of merge clauses using record_ops +-- +begin; +create type mycomptype as (id int, v bigint); +create temp table tidv (idv mycomptype); +create index on tidv (idv); +explain (costs off) +select a.idv, b.idv from tidv a, tidv b where a.idv = b.idv; + QUERY PLAN +---------------------------------------------------------- + Merge Join + Merge Cond: (a.idv = b.idv) + -> Index Only Scan using tidv_idv_idx on tidv a + -> Materialize + -> Index Only Scan using tidv_idv_idx on tidv b +(5 rows) + +set enable_mergejoin = 0; +explain (costs off) +select a.idv, b.idv from tidv a, tidv b where a.idv = b.idv; + QUERY PLAN +---------------------------------------------------- + Nested Loop + -> Seq Scan on tidv a + -> Index Only Scan using tidv_idv_idx on tidv b + Index Cond: (idv = a.idv) +(4 rows) + rollback; -- -- test NULL behavior of whole-row Vars, per bug #5025 @@ -2722,7 +2868,7 @@ SELECT qq, unique1 --------------------------------------------------------------------------------------------------------- Nested Loop -> Hash Full Join - Hash Cond: (COALESCE(a.q1, '0'::bigint) = COALESCE(b.q2, '-1'::bigint)) + Hash Cond: ((COALESCE(a.q1, '0'::bigint)) = (COALESCE(b.q2, '-1'::bigint))) -> Seq Scan on int8_tbl a -> Hash -> Seq Scan on int8_tbl b @@ -3269,6 +3415,33 @@ order by fault; | 123 | 122 (1 row) +explain (costs off) +select * from +(values (1, array[10,20]), (2, array[20,30])) as v1(v1x,v1ys) +left join (values (1, 10), (2, 20)) as v2(v2x,v2y) on v2x = v1x +left join unnest(v1ys) as u1(u1y) on u1y = v2y; + QUERY PLAN +------------------------------------------------------------- + Nested Loop Left Join + -> Values Scan on "*VALUES*" + -> Hash Right Join + Hash Cond: (u1.u1y = "*VALUES*_1".column2) + Filter: ("*VALUES*_1".column1 = "*VALUES*".column1) + -> Function Scan on unnest u1 + -> Hash + -> Values Scan on "*VALUES*_1" +(8 rows) + +select * from +(values (1, array[10,20]), (2, array[20,30])) as v1(v1x,v1ys) +left join (values (1, 10), (2, 20)) as v2(v2x,v2y) on v2x = v1x +left join unnest(v1ys) as u1(u1y) on u1y = v2y; + v1x | v1ys | v2x | v2y | u1y +-----+---------+-----+-----+----- + 1 | {10,20} | 1 | 10 | 10 + 2 | {20,30} | 2 | 20 | 20 +(2 rows) + -- -- test handling of potential equivalence clauses above outer joins -- @@ -4060,6 +4233,18 @@ select i8.* from int8_tbl i8 left join (select f1 from int4_tbl group by f1) i4 Seq Scan on int8_tbl i8 (1 row) +-- check join removal with lateral references +explain (costs off) +select 1 from (select a.id FROM a left join b on a.b_id = b.id) q, + lateral generate_series(1, q.id) gs(i) where q.id = gs.i; + QUERY PLAN +------------------------------------------- + Nested Loop + -> Seq Scan on a + -> Function Scan on generate_series gs + Filter: (a.id = i) +(4 rows) + rollback; create temp table parent (k int primary key, pd int); create temp table child (k int unique, cd int); @@ -5154,6 +5339,25 @@ select * from Output: 3 (11 rows) +-- check handling of nested appendrels inside LATERAL +select * from + ((select 2 as v) union all (select 3 as v)) as q1 + cross join lateral + ((select * from + ((select 4 as v) union all (select 5 as v)) as q3) + union all + (select q1.v) + ) as q2; + v | v +---+--- + 2 | 4 + 2 | 5 + 2 | 2 + 3 | 4 + 3 | 5 + 3 | 3 +(6 rows) + -- check we don't try to do a unique-ified semijoin with LATERAL explain (verbose, costs off) select * from @@ -5328,6 +5532,59 @@ LINE 1: ...xx1 using lateral (select * from int4_tbl where f1 = x1) ss; ^ HINT: There is an entry for table "xx1", but it cannot be referenced from this part of the query. -- +-- test LATERAL reference propagation down a multi-level inheritance hierarchy +-- produced for a multi-level partitioned table hierarchy. +-- +create table join_pt1 (a int, b int, c varchar) partition by range(a); +create table join_pt1p1 partition of join_pt1 for values from (0) to (100) partition by range(b); +create table join_pt1p2 partition of join_pt1 for values from (100) to (200); +create table join_pt1p1p1 partition of join_pt1p1 for values from (0) to (100); +insert into join_pt1 values (1, 1, 'x'), (101, 101, 'y'); +create table join_ut1 (a int, b int, c varchar); +insert into join_ut1 values (101, 101, 'y'), (2, 2, 'z'); +explain (verbose, costs off) +select t1.b, ss.phv from join_ut1 t1 left join lateral + (select t2.a as t2a, t3.a t3a, least(t1.a, t2.a, t3.a) phv + from join_pt1 t2 join join_ut1 t3 on t2.a = t3.b) ss + on t1.a = ss.t2a order by t1.a; + QUERY PLAN +------------------------------------------------------------------ + Sort + Output: t1.b, (LEAST(t1.a, t2.a, t3.a)), t1.a + Sort Key: t1.a + -> Nested Loop Left Join + Output: t1.b, (LEAST(t1.a, t2.a, t3.a)), t1.a + -> Seq Scan on public.join_ut1 t1 + Output: t1.a, t1.b, t1.c + -> Hash Join + Output: t2.a, LEAST(t1.a, t2.a, t3.a) + Hash Cond: (t3.b = t2.a) + -> Seq Scan on public.join_ut1 t3 + Output: t3.a, t3.b, t3.c + -> Hash + Output: t2.a + -> Append + -> Seq Scan on public.join_pt1p1p1 t2 + Output: t2.a + Filter: (t1.a = t2.a) + -> Seq Scan on public.join_pt1p2 t2_1 + Output: t2_1.a + Filter: (t1.a = t2_1.a) +(21 rows) + +select t1.b, ss.phv from join_ut1 t1 left join lateral + (select t2.a as t2a, t3.a t3a, least(t1.a, t2.a, t3.a) phv + from join_pt1 t2 join join_ut1 t3 on t2.a = t3.b) ss + on t1.a = ss.t2a order by t1.a; + b | phv +-----+----- + 2 | + 101 | 101 +(2 rows) + +drop table join_pt1; +drop table join_ut1; +-- -- test that foreign key join estimation performs sanely for outer joins -- begin; @@ -5720,3 +5977,883 @@ where exists (select 1 from j3 (13 rows) drop table j3; +-- +-- exercises for the hash join code +-- +begin; +set local min_parallel_table_scan_size = 0; +set local parallel_setup_cost = 0; +-- Extract bucket and batch counts from an explain analyze plan. In +-- general we can't make assertions about how many batches (or +-- buckets) will be required because it can vary, but we can in some +-- special cases and we can check for growth. +create or replace function find_hash(node json) +returns json language plpgsql +as +$$ +declare + x json; + child json; +begin + if node->>'Node Type' = 'Hash' then + return node; + else + for child in select json_array_elements(node->'Plans') + loop + x := find_hash(child); + if x is not null then + return x; + end if; + end loop; + return null; + end if; +end; +$$; +create or replace function hash_join_batches(query text) +returns table (original int, final int) language plpgsql +as +$$ +declare + whole_plan json; + hash_node json; +begin + for whole_plan in + execute 'explain (analyze, format ''json'') ' || query + loop + hash_node := find_hash(json_extract_path(whole_plan, '0', 'Plan')); + original := hash_node->>'Original Hash Batches'; + final := hash_node->>'Hash Batches'; + return next; + end loop; +end; +$$; +-- Make a simple relation with well distributed keys and correctly +-- estimated size. +create table simple as + select generate_series(1, 20000) AS id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'; +alter table simple set (parallel_workers = 2); +analyze simple; +-- Make a relation whose size we will under-estimate. We want stats +-- to say 1000 rows, but actually there are 20,000 rows. +create table bigger_than_it_looks as + select generate_series(1, 20000) as id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'; +alter table bigger_than_it_looks set (autovacuum_enabled = 'false'); +alter table bigger_than_it_looks set (parallel_workers = 2); +analyze bigger_than_it_looks; +update pg_class set reltuples = 1000 where relname = 'bigger_than_it_looks'; +-- Make a relation whose size we underestimate and that also has a +-- kind of skew that breaks our batching scheme. We want stats to say +-- 2 rows, but actually there are 20,000 rows with the same key. +create table extremely_skewed (id int, t text); +alter table extremely_skewed set (autovacuum_enabled = 'false'); +alter table extremely_skewed set (parallel_workers = 2); +analyze extremely_skewed; +insert into extremely_skewed + select 42 as id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' + from generate_series(1, 20000); +update pg_class + set reltuples = 2, relpages = pg_relation_size('extremely_skewed') / 8192 + where relname = 'extremely_skewed'; +-- Make a relation with a couple of enormous tuples. +create table wide as select generate_series(1, 2) as id, rpad('', 320000, 'x') as t; +alter table wide set (parallel_workers = 2); +-- The "optimal" case: the hash table fits in memory; we plan for 1 +-- batch, we stick to that number, and peak memory usage stays within +-- our work_mem budget +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +set local work_mem = '4MB'; +explain (costs off) + select count(*) from simple r join simple s using (id); + QUERY PLAN +---------------------------------------- + Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Seq Scan on simple r + -> Hash + -> Seq Scan on simple s +(6 rows) + +select count(*) from simple r join simple s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + f | f +(1 row) + +rollback to settings; +-- parallel with parallel-oblivious hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '4MB'; +set local enable_parallel_hash = off; +explain (costs off) + select count(*) from simple r join simple s using (id); + QUERY PLAN +------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Hash + -> Seq Scan on simple s +(9 rows) + +select count(*) from simple r join simple s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + f | f +(1 row) + +rollback to settings; +-- parallel with parallel-aware hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '4MB'; +set local enable_parallel_hash = on; +explain (costs off) + select count(*) from simple r join simple s using (id); + QUERY PLAN +------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Parallel Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Parallel Hash + -> Parallel Seq Scan on simple s +(9 rows) + +select count(*) from simple r join simple s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + f | f +(1 row) + +rollback to settings; +-- The "good" case: batches required, but we plan the right number; we +-- plan for some number of batches, and we stick to that number, and +-- peak memory usage says within our work_mem budget +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +set local work_mem = '128kB'; +explain (costs off) + select count(*) from simple r join simple s using (id); + QUERY PLAN +---------------------------------------- + Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Seq Scan on simple r + -> Hash + -> Seq Scan on simple s +(6 rows) + +select count(*) from simple r join simple s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + t | f +(1 row) + +rollback to settings; +-- parallel with parallel-oblivious hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '128kB'; +set local enable_parallel_hash = off; +explain (costs off) + select count(*) from simple r join simple s using (id); + QUERY PLAN +------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Hash + -> Seq Scan on simple s +(9 rows) + +select count(*) from simple r join simple s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + t | f +(1 row) + +rollback to settings; +-- parallel with parallel-aware hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '192kB'; +set local enable_parallel_hash = on; +explain (costs off) + select count(*) from simple r join simple s using (id); + QUERY PLAN +------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Parallel Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Parallel Hash + -> Parallel Seq Scan on simple s +(9 rows) + +select count(*) from simple r join simple s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + t | f +(1 row) + +rollback to settings; +-- The "bad" case: during execution we need to increase number of +-- batches; in this case we plan for 1 batch, and increase at least a +-- couple of times, and peak memory usage stays within our work_mem +-- budget +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +set local work_mem = '128kB'; +explain (costs off) + select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id); + QUERY PLAN +------------------------------------------------------ + Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Seq Scan on simple r + -> Hash + -> Seq Scan on bigger_than_it_looks s +(6 rows) + +select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + f | t +(1 row) + +rollback to settings; +-- parallel with parallel-oblivious hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '128kB'; +set local enable_parallel_hash = off; +explain (costs off) + select count(*) from simple r join bigger_than_it_looks s using (id); + QUERY PLAN +------------------------------------------------------------------ + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Hash + -> Seq Scan on bigger_than_it_looks s +(9 rows) + +select count(*) from simple r join bigger_than_it_looks s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join bigger_than_it_looks s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + f | t +(1 row) + +rollback to settings; +-- parallel with parallel-aware hash join +savepoint settings; +set local max_parallel_workers_per_gather = 1; +set local work_mem = '192kB'; +set local enable_parallel_hash = on; +explain (costs off) + select count(*) from simple r join bigger_than_it_looks s using (id); + QUERY PLAN +--------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 1 + -> Partial Aggregate + -> Parallel Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Parallel Hash + -> Parallel Seq Scan on bigger_than_it_looks s +(9 rows) + +select count(*) from simple r join bigger_than_it_looks s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join bigger_than_it_looks s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + f | t +(1 row) + +rollback to settings; +-- The "ugly" case: increasing the number of batches during execution +-- doesn't help, so stop trying to fit in work_mem and hope for the +-- best; in this case we plan for 1 batch, increases just once and +-- then stop increasing because that didn't help at all, so we blow +-- right through the work_mem budget and hope for the best... +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +set local work_mem = '128kB'; +explain (costs off) + select count(*) from simple r join extremely_skewed s using (id); + QUERY PLAN +-------------------------------------------------- + Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Seq Scan on simple r + -> Hash + -> Seq Scan on extremely_skewed s +(6 rows) + +select count(*) from simple r join extremely_skewed s using (id); + count +------- + 20000 +(1 row) + +select * from hash_join_batches( +$$ + select count(*) from simple r join extremely_skewed s using (id); +$$); + original | final +----------+------- + 1 | 2 +(1 row) + +rollback to settings; +-- parallel with parallel-oblivious hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '128kB'; +set local enable_parallel_hash = off; +explain (costs off) + select count(*) from simple r join extremely_skewed s using (id); + QUERY PLAN +-------------------------------------------------------- + Aggregate + -> Gather + Workers Planned: 2 + -> Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Hash + -> Seq Scan on extremely_skewed s +(8 rows) + +select count(*) from simple r join extremely_skewed s using (id); + count +------- + 20000 +(1 row) + +select * from hash_join_batches( +$$ + select count(*) from simple r join extremely_skewed s using (id); +$$); + original | final +----------+------- + 1 | 2 +(1 row) + +rollback to settings; +-- parallel with parallel-aware hash join +savepoint settings; +set local max_parallel_workers_per_gather = 1; +set local work_mem = '128kB'; +set local enable_parallel_hash = on; +explain (costs off) + select count(*) from simple r join extremely_skewed s using (id); + QUERY PLAN +----------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 1 + -> Partial Aggregate + -> Parallel Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Parallel Hash + -> Parallel Seq Scan on extremely_skewed s +(9 rows) + +select count(*) from simple r join extremely_skewed s using (id); + count +------- + 20000 +(1 row) + +select * from hash_join_batches( +$$ + select count(*) from simple r join extremely_skewed s using (id); +$$); + original | final +----------+------- + 1 | 4 +(1 row) + +rollback to settings; +-- A couple of other hash join tests unrelated to work_mem management. +-- Check that EXPLAIN ANALYZE has data even if the leader doesn't participate +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '4MB'; +set local parallel_leader_participation = off; +select * from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + original | final +----------+------- + 1 | 1 +(1 row) + +rollback to settings; +-- Exercise rescans. We'll turn off parallel_leader_participation so +-- that we can check that instrumentation comes back correctly. +create table join_foo as select generate_series(1, 3) as id, 'xxxxx'::text as t; +alter table join_foo set (parallel_workers = 0); +create table join_bar as select generate_series(1, 10000) as id, 'xxxxx'::text as t; +alter table join_bar set (parallel_workers = 2); +-- multi-batch with rescan, parallel-oblivious +savepoint settings; +set enable_parallel_hash = off; +set parallel_leader_participation = off; +set min_parallel_table_scan_size = 0; +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set max_parallel_workers_per_gather = 2; +set enable_material = off; +set enable_mergejoin = off; +set work_mem = '64kB'; +explain (costs off) + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Aggregate + -> Nested Loop Left Join + Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) + -> Seq Scan on join_foo + -> Gather + Workers Planned: 2 + -> Hash Join + Hash Cond: (b1.id = b2.id) + -> Parallel Seq Scan on join_bar b1 + -> Hash + -> Seq Scan on join_bar b2 +(11 rows) + +select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + count +------- + 3 +(1 row) + +select final > 1 as multibatch + from hash_join_batches( +$$ + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +$$); + multibatch +------------ + t +(1 row) + +rollback to settings; +-- single-batch with rescan, parallel-oblivious +savepoint settings; +set enable_parallel_hash = off; +set parallel_leader_participation = off; +set min_parallel_table_scan_size = 0; +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set max_parallel_workers_per_gather = 2; +set enable_material = off; +set enable_mergejoin = off; +set work_mem = '4MB'; +explain (costs off) + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Aggregate + -> Nested Loop Left Join + Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) + -> Seq Scan on join_foo + -> Gather + Workers Planned: 2 + -> Hash Join + Hash Cond: (b1.id = b2.id) + -> Parallel Seq Scan on join_bar b1 + -> Hash + -> Seq Scan on join_bar b2 +(11 rows) + +select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + count +------- + 3 +(1 row) + +select final > 1 as multibatch + from hash_join_batches( +$$ + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +$$); + multibatch +------------ + f +(1 row) + +rollback to settings; +-- multi-batch with rescan, parallel-aware +savepoint settings; +set enable_parallel_hash = on; +set parallel_leader_participation = off; +set min_parallel_table_scan_size = 0; +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set max_parallel_workers_per_gather = 2; +set enable_material = off; +set enable_mergejoin = off; +set work_mem = '64kB'; +explain (costs off) + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Aggregate + -> Nested Loop Left Join + Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) + -> Seq Scan on join_foo + -> Gather + Workers Planned: 2 + -> Parallel Hash Join + Hash Cond: (b1.id = b2.id) + -> Parallel Seq Scan on join_bar b1 + -> Parallel Hash + -> Parallel Seq Scan on join_bar b2 +(11 rows) + +select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + count +------- + 3 +(1 row) + +select final > 1 as multibatch + from hash_join_batches( +$$ + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +$$); + multibatch +------------ + t +(1 row) + +rollback to settings; +-- single-batch with rescan, parallel-aware +savepoint settings; +set enable_parallel_hash = on; +set parallel_leader_participation = off; +set min_parallel_table_scan_size = 0; +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set max_parallel_workers_per_gather = 2; +set enable_material = off; +set enable_mergejoin = off; +set work_mem = '4MB'; +explain (costs off) + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Aggregate + -> Nested Loop Left Join + Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) + -> Seq Scan on join_foo + -> Gather + Workers Planned: 2 + -> Parallel Hash Join + Hash Cond: (b1.id = b2.id) + -> Parallel Seq Scan on join_bar b1 + -> Parallel Hash + -> Parallel Seq Scan on join_bar b2 +(11 rows) + +select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + count +------- + 3 +(1 row) + +select final > 1 as multibatch + from hash_join_batches( +$$ + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +$$); + multibatch +------------ + f +(1 row) + +rollback to settings; +-- A full outer join where every record is matched. +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +explain (costs off) + select count(*) from simple r full outer join simple s using (id); + QUERY PLAN +---------------------------------------- + Aggregate + -> Hash Full Join + Hash Cond: (r.id = s.id) + -> Seq Scan on simple r + -> Hash + -> Seq Scan on simple s +(6 rows) + +select count(*) from simple r full outer join simple s using (id); + count +------- + 20000 +(1 row) + +rollback to settings; +-- parallelism not possible with parallel-oblivious outer hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +explain (costs off) + select count(*) from simple r full outer join simple s using (id); + QUERY PLAN +---------------------------------------- + Aggregate + -> Hash Full Join + Hash Cond: (r.id = s.id) + -> Seq Scan on simple r + -> Hash + -> Seq Scan on simple s +(6 rows) + +select count(*) from simple r full outer join simple s using (id); + count +------- + 20000 +(1 row) + +rollback to settings; +-- An full outer join where every record is not matched. +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +explain (costs off) + select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); + QUERY PLAN +---------------------------------------- + Aggregate + -> Hash Full Join + Hash Cond: ((0 - s.id) = r.id) + -> Seq Scan on simple s + -> Hash + -> Seq Scan on simple r +(6 rows) + +select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); + count +------- + 40000 +(1 row) + +rollback to settings; +-- parallelism not possible with parallel-oblivious outer hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +explain (costs off) + select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); + QUERY PLAN +---------------------------------------- + Aggregate + -> Hash Full Join + Hash Cond: ((0 - s.id) = r.id) + -> Seq Scan on simple s + -> Hash + -> Seq Scan on simple r +(6 rows) + +select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); + count +------- + 40000 +(1 row) + +rollback to settings; +-- exercise special code paths for huge tuples (note use of non-strict +-- expression and left join required to get the detoasted tuple into +-- the hash table) +-- parallel with parallel-aware hash join (hits ExecParallelHashLoadTuple and +-- sts_puttuple oversized tuple cases because it's multi-batch) +savepoint settings; +set max_parallel_workers_per_gather = 2; +set enable_parallel_hash = on; +set work_mem = '128kB'; +explain (costs off) + select length(max(s.t)) + from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); + QUERY PLAN +---------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Parallel Hash Left Join + Hash Cond: (wide.id = wide_1.id) + -> Parallel Seq Scan on wide + -> Parallel Hash + -> Parallel Seq Scan on wide wide_1 +(9 rows) + +select length(max(s.t)) +from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); + length +-------- + 320000 +(1 row) + +select final > 1 as multibatch + from hash_join_batches( +$$ + select length(max(s.t)) + from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); +$$); + multibatch +------------ + t +(1 row) + +rollback to settings; +rollback; diff --git a/src/test/regress/expected/json.out b/src/test/regress/expected/json.out index b25e20ca20..6020feeea4 100644 --- a/src/test/regress/expected/json.out +++ b/src/test/regress/expected/json.out @@ -1316,6 +1316,8 @@ create type jpop as (a text, b int, c timestamp); CREATE DOMAIN js_int_not_null AS int NOT NULL; CREATE DOMAIN js_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3); CREATE DOMAIN js_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3); +create type j_unordered_pair as (x int, y int); +create domain j_ordered_pair as j_unordered_pair check((value).x <= (value).y); CREATE TYPE jsrec AS ( i int, ia _int4, @@ -1399,8 +1401,8 @@ SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": null}') q; (1 row) SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": 123}') q; -ERROR: expected json array -HINT: see the value of key "ia" +ERROR: expected JSON array +HINT: See the value of key "ia". SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [1, "2", null, 4]}') q; ia -------------- @@ -1414,10 +1416,10 @@ SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1, 2], [3, 4]]}') q; (1 row) SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], 2]}') q; -ERROR: expected json array -HINT: see the array element [1] of key "ia" +ERROR: expected JSON array +HINT: See the array element [1] of key "ia". SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], [2, 3]]}') q; -ERROR: malformed json array +ERROR: malformed JSON array DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": "{1,2,3}"}') q; ia @@ -1432,8 +1434,8 @@ SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": null}') q; (1 row) SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": 123}') q; -ERROR: expected json array -HINT: see the value of key "ia1" +ERROR: expected JSON array +HINT: See the value of key "ia1". SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": [1, "2", null, 4]}') q; ia1 -------------- @@ -1453,8 +1455,8 @@ SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": null}') q; (1 row) SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": 123}') q; -ERROR: expected json array -HINT: see the value of key "ia1d" +ERROR: expected JSON array +HINT: See the value of key "ia1d". SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null, 4]}') q; ERROR: value for domain js_int_array_1d violates check constraint "js_int_array_1d_check" SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null]}') q; @@ -1482,11 +1484,11 @@ SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[], []]}') q; (1 row) SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], [3]]}') q; -ERROR: malformed json array +ERROR: malformed JSON array DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], 3, 4]}') q; -ERROR: expected json array -HINT: see the array element [1] of key "ia2" +ERROR: expected JSON array +HINT: See the array element [1] of key "ia2". SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2"], [null, 4]]}') q; ERROR: value for domain js_int_array_2d violates check constraint "js_int_array_2d_check" SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q; @@ -1526,7 +1528,7 @@ SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [ (1 row) SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q; -ERROR: malformed json array +ERROR: malformed JSON array DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": null}') q; ta @@ -1535,8 +1537,8 @@ SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": null}') q; (1 row) SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": 123}') q; -ERROR: expected json array -HINT: see the value of key "ta" +ERROR: expected JSON array +HINT: See the value of key "ta". SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [1, "2", null, 4]}') q; ta -------------- @@ -1544,8 +1546,8 @@ SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [1, "2", null, 4]}') q; (1 row) SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q; -ERROR: expected json array -HINT: see the array element [1] of key "ta" +ERROR: expected JSON array +HINT: See the array element [1] of key "ta". SELECT c FROM json_populate_record(NULL::jsrec, '{"c": null}') q; c --- @@ -1573,8 +1575,8 @@ SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": null}') q; (1 row) SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": 123}') q; -ERROR: expected json array -HINT: see the value of key "ca" +ERROR: expected JSON array +HINT: See the value of key "ca". SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [1, "2", null, 4]}') q; ca ----------------------------------------------- @@ -1584,8 +1586,8 @@ SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [1, "2", null, 4]}') q; SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q; ERROR: value too long for type character(10) SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q; -ERROR: expected json array -HINT: see the array element [1] of key "ca" +ERROR: expected JSON array +HINT: See the array element [1] of key "ca". SELECT js FROM json_populate_record(NULL::jsrec, '{"js": null}') q; js ---- @@ -1677,8 +1679,8 @@ SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": null}') q; (1 row) SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": 123}') q; -ERROR: expected json array -HINT: see the value of key "jsa" +ERROR: expected JSON array +HINT: See the value of key "jsa". SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": [1, "2", null, 4]}') q; jsa -------------------- @@ -1708,8 +1710,8 @@ SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": "(abc,42,01.02.2003)" (1 row) SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": 123}') q; -ERROR: expected json array -HINT: see the value of key "reca" +ERROR: expected JSON array +HINT: See the value of key "reca". SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [1, 2]}') q; ERROR: cannot call populate_composite on a scalar SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q; @@ -1740,6 +1742,30 @@ SELECT rec FROM json_populate_record( (abc,3,"Thu Jan 02 00:00:00 2003") (1 row) +-- anonymous record type +SELECT json_populate_record(null::record, '{"x": 0, "y": 1}'); +ERROR: record type has not been registered +SELECT json_populate_record(row(1,2), '{"f1": 0, "f2": 1}'); + json_populate_record +---------------------- + (0,1) +(1 row) + +-- composite domain +SELECT json_populate_record(null::j_ordered_pair, '{"x": 0, "y": 1}'); + json_populate_record +---------------------- + (0,1) +(1 row) + +SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 0}'); + json_populate_record +---------------------- + (0,2) +(1 row) + +SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 1, "y": 0}'); +ERROR: value for domain j_ordered_pair violates check constraint "j_ordered_pair_check" -- populate_recordset select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; a | b | c @@ -1806,6 +1832,54 @@ select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,3 {"z":true} | 3 | Fri Jan 20 10:42:53 2012 (2 rows) +-- anonymous record type +SELECT json_populate_recordset(null::record, '[{"x": 0, "y": 1}]'); +ERROR: record type has not been registered +SELECT json_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]'); + json_populate_recordset +------------------------- + (0,1) +(1 row) + +SELECT i, json_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]') +FROM (VALUES (1),(2)) v(i); + i | json_populate_recordset +---+------------------------- + 1 | (42,50) + 1 | (1,43) + 2 | (42,50) + 2 | (2,43) +(4 rows) + +-- composite domain +SELECT json_populate_recordset(null::j_ordered_pair, '[{"x": 0, "y": 1}]'); + json_populate_recordset +------------------------- + (0,1) +(1 row) + +SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 0}, {"y": 3}]'); + json_populate_recordset +------------------------- + (0,2) + (1,3) +(2 rows) + +SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 1, "y": 0}]'); +ERROR: value for domain j_ordered_pair violates check constraint "j_ordered_pair_check" +-- negative cases where the wrong record type is supplied +select * from json_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned row contains 1 attribute, but query expects 2. +select * from json_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned type integer at ordinal position 1, but query expects text. +select * from json_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned row contains 3 attributes, but query expects 2. +select * from json_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned type integer at ordinal position 1, but query expects text. -- test type info caching in json_populate_record() CREATE TEMP TABLE jspoptest (js json); INSERT INTO jspoptest @@ -1828,6 +1902,8 @@ DROP TYPE jsrec_i_not_null; DROP DOMAIN js_int_not_null; DROP DOMAIN js_int_array_1d; DROP DOMAIN js_int_array_2d; +DROP DOMAIN j_ordered_pair; +DROP TYPE j_unordered_pair; --json_typeof() function select value, json_typeof(value) from (values (json '123.4'), @@ -1864,6 +1940,54 @@ SELECT json_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": ["a", 1, "b", 1.2, "c", true, "d", null, "e", {"x": 3, "y": [1,2,3]}] (1 row) +SELECT json_build_array('a', NULL); -- ok + json_build_array +------------------ + ["a", null] +(1 row) + +SELECT json_build_array(VARIADIC NULL::text[]); -- ok + json_build_array +------------------ + +(1 row) + +SELECT json_build_array(VARIADIC '{}'::text[]); -- ok + json_build_array +------------------ + [] +(1 row) + +SELECT json_build_array(VARIADIC '{a,b,c}'::text[]); -- ok + json_build_array +------------------ + ["a", "b", "c"] +(1 row) + +SELECT json_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok + json_build_array +------------------ + ["a", null] +(1 row) + +SELECT json_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok + json_build_array +---------------------- + ["1", "2", "3", "4"] +(1 row) + +SELECT json_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok + json_build_array +------------------ + [1, 2, 3, 4] +(1 row) + +SELECT json_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok + json_build_array +-------------------- + [1, 4, 2, 5, 3, 6] +(1 row) + SELECT json_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); json_build_object ---------------------------------------------------------------------------- @@ -1879,6 +2003,65 @@ SELECT json_build_object( {"a" : {"b" : false, "c" : 99}, "d" : {"e" : [9,8,7], "f" : {"relkind":"r","name":"pg_class"}}} (1 row) +SELECT json_build_object('{a,b,c}'::text[]); -- error +ERROR: argument list must have even number of elements +HINT: The arguments of json_build_object() must consist of alternating keys and values. +SELECT json_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array +ERROR: key value must be scalar, not array, composite, or json +SELECT json_build_object('a', 'b', 'c'); -- error +ERROR: argument list must have even number of elements +HINT: The arguments of json_build_object() must consist of alternating keys and values. +SELECT json_build_object(NULL, 'a'); -- error, key cannot be NULL +ERROR: argument 1 cannot be null +HINT: Object keys should be text. +SELECT json_build_object('a', NULL); -- ok + json_build_object +------------------- + {"a" : null} +(1 row) + +SELECT json_build_object(VARIADIC NULL::text[]); -- ok + json_build_object +------------------- + +(1 row) + +SELECT json_build_object(VARIADIC '{}'::text[]); -- ok + json_build_object +------------------- + {} +(1 row) + +SELECT json_build_object(VARIADIC '{a,b,c}'::text[]); -- error +ERROR: argument list must have even number of elements +HINT: The arguments of json_build_object() must consist of alternating keys and values. +SELECT json_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok + json_build_object +------------------- + {"a" : null} +(1 row) + +SELECT json_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL +ERROR: argument 1 cannot be null +HINT: Object keys should be text. +SELECT json_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok + json_build_object +------------------------ + {"1" : "2", "3" : "4"} +(1 row) + +SELECT json_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok + json_build_object +-------------------- + {"1" : 2, "3" : 4} +(1 row) + +SELECT json_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok + json_build_object +----------------------------- + {"1" : 4, "2" : 5, "3" : 6} +(1 row) + -- empty objects/arrays SELECT json_build_array(); json_build_array @@ -2042,8 +2225,8 @@ select * from json_to_record('{"ia": null}') as x(ia _int4); (1 row) select * from json_to_record('{"ia": 123}') as x(ia _int4); -ERROR: expected json array -HINT: see the value of key "ia" +ERROR: expected JSON array +HINT: See the value of key "ia". select * from json_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4); ia -------------- @@ -2057,10 +2240,10 @@ select * from json_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4); (1 row) select * from json_to_record('{"ia": [[1], 2]}') as x(ia _int4); -ERROR: expected json array -HINT: see the array element [1] of key "ia" +ERROR: expected JSON array +HINT: See the array element [1] of key "ia". select * from json_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4); -ERROR: malformed json array +ERROR: malformed JSON array DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. select * from json_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]); ia2 @@ -2151,6 +2334,86 @@ select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff gg 'aaa':1 'bbb':3 'ccc':5 'ddd':4 'eee':8 'fff':9 'ggg':10 'hhh':12 'iii':13 (1 row) +-- json to tsvector with numeric values +select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::json); + to_tsvector +--------------------------------- + 'aaa':1 'bbb':3 'ccc':5 'ddd':4 +(1 row) + +-- json_to_tsvector +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"'); + json_to_tsvector +---------------------------------------------------------------------------------------- + '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"'); + json_to_tsvector +-------------------------------- + 'b':2 'c':4 'd':6 'f':8 'g':10 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"'); + json_to_tsvector +------------------ + 'aaa':1 'bbb':3 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"'); + json_to_tsvector +------------------ + '123':1 '456':3 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"'); + json_to_tsvector +------------------- + 'fals':3 'true':1 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]'); + json_to_tsvector +--------------------------------- + '123':5 '456':7 'aaa':1 'bbb':3 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"'); + json_to_tsvector +---------------------------------------------------------------------------------------- + '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"'); + json_to_tsvector +-------------------------------- + 'b':2 'c':4 'd':6 'f':8 'g':10 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"'); + json_to_tsvector +------------------ + 'aaa':1 'bbb':3 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"'); + json_to_tsvector +------------------ + '123':1 '456':3 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"'); + json_to_tsvector +------------------- + 'fals':3 'true':1 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]'); + json_to_tsvector +--------------------------------- + '123':5 '456':7 'aaa':1 'bbb':3 +(1 row) + -- ts_vector corner cases select to_tsvector('""'::json); to_tsvector @@ -2176,6 +2439,48 @@ select to_tsvector('null'::json); (1 row) +-- json_to_tsvector corner cases +select json_to_tsvector('""'::json, '"all"'); + json_to_tsvector +------------------ + +(1 row) + +select json_to_tsvector('{}'::json, '"all"'); + json_to_tsvector +------------------ + +(1 row) + +select json_to_tsvector('[]'::json, '"all"'); + json_to_tsvector +------------------ + +(1 row) + +select json_to_tsvector('null'::json, '"all"'); + json_to_tsvector +------------------ + +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '""'); +ERROR: wrong flag in flag array: "" +HINT: Possible values are: "string", "numeric", "boolean", "key", and "all" +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '{}'); +ERROR: wrong flag type, only arrays and scalars are allowed +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '[]'); + json_to_tsvector +------------------ + +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, 'null'); +ERROR: flag array element is not a string +HINT: Possible values are: "string", "numeric", "boolean", "key", and "all" +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["all", null]'); +ERROR: flag array element is not a string +HINT: Possible values are: "string", "numeric", "boolean", "key", and "all" -- ts_headline for json select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh')); ts_headline diff --git a/src/test/regress/expected/jsonb.out b/src/test/regress/expected/jsonb.out index 79547035bd..f045e08538 100644 --- a/src/test/regress/expected/jsonb.out +++ b/src/test/regress/expected/jsonb.out @@ -1345,6 +1345,54 @@ SELECT jsonb_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": ["a", 1, "b", 1.2, "c", true, "d", null, "e", {"x": 3, "y": [1, 2, 3]}] (1 row) +SELECT jsonb_build_array('a', NULL); -- ok + jsonb_build_array +------------------- + ["a", null] +(1 row) + +SELECT jsonb_build_array(VARIADIC NULL::text[]); -- ok + jsonb_build_array +------------------- + +(1 row) + +SELECT jsonb_build_array(VARIADIC '{}'::text[]); -- ok + jsonb_build_array +------------------- + [] +(1 row) + +SELECT jsonb_build_array(VARIADIC '{a,b,c}'::text[]); -- ok + jsonb_build_array +------------------- + ["a", "b", "c"] +(1 row) + +SELECT jsonb_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok + jsonb_build_array +------------------- + ["a", null] +(1 row) + +SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok + jsonb_build_array +---------------------- + ["1", "2", "3", "4"] +(1 row) + +SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok + jsonb_build_array +------------------- + [1, 2, 3, 4] +(1 row) + +SELECT jsonb_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok + jsonb_build_array +-------------------- + [1, 4, 2, 5, 3, 6] +(1 row) + SELECT jsonb_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); jsonb_build_object ------------------------------------------------------------------------- @@ -1360,6 +1408,63 @@ SELECT jsonb_build_object( {"a": {"b": false, "c": 99}, "d": {"e": [9, 8, 7], "f": {"name": "pg_class", "relkind": "r"}}} (1 row) +SELECT jsonb_build_object('{a,b,c}'::text[]); -- error +ERROR: argument list must have even number of elements +HINT: The arguments of jsonb_build_object() must consist of alternating keys and values. +SELECT jsonb_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array +ERROR: key value must be scalar, not array, composite, or json +SELECT jsonb_build_object('a', 'b', 'c'); -- error +ERROR: argument list must have even number of elements +HINT: The arguments of jsonb_build_object() must consist of alternating keys and values. +SELECT jsonb_build_object(NULL, 'a'); -- error, key cannot be NULL +ERROR: argument 1: key must not be null +SELECT jsonb_build_object('a', NULL); -- ok + jsonb_build_object +-------------------- + {"a": null} +(1 row) + +SELECT jsonb_build_object(VARIADIC NULL::text[]); -- ok + jsonb_build_object +-------------------- + +(1 row) + +SELECT jsonb_build_object(VARIADIC '{}'::text[]); -- ok + jsonb_build_object +-------------------- + {} +(1 row) + +SELECT jsonb_build_object(VARIADIC '{a,b,c}'::text[]); -- error +ERROR: argument list must have even number of elements +HINT: The arguments of jsonb_build_object() must consist of alternating keys and values. +SELECT jsonb_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok + jsonb_build_object +-------------------- + {"a": null} +(1 row) + +SELECT jsonb_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL +ERROR: argument 1: key must not be null +SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok + jsonb_build_object +---------------------- + {"1": "2", "3": "4"} +(1 row) + +SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok + jsonb_build_object +-------------------- + {"1": 2, "3": 4} +(1 row) + +SELECT jsonb_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok + jsonb_build_object +-------------------------- + {"1": 4, "2": 5, "3": 6} +(1 row) + -- empty objects/arrays SELECT jsonb_build_array(); jsonb_build_array @@ -1900,6 +2005,8 @@ CREATE TYPE jbpop AS (a text, b int, c timestamp); CREATE DOMAIN jsb_int_not_null AS int NOT NULL; CREATE DOMAIN jsb_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3); CREATE DOMAIN jsb_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3); +create type jb_unordered_pair as (x int, y int); +create domain jb_ordered_pair as jb_unordered_pair check((value).x <= (value).y); CREATE TYPE jsbrec AS ( i int, ia _int4, @@ -1983,8 +2090,8 @@ SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": null}') q; (1 row) SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": 123}') q; -ERROR: expected json array -HINT: see the value of key "ia" +ERROR: expected JSON array +HINT: See the value of key "ia". SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [1, "2", null, 4]}') q; ia -------------- @@ -1998,10 +2105,10 @@ SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1, 2], [3, 4]]}') q (1 row) SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], 2]}') q; -ERROR: expected json array -HINT: see the array element [1] of key "ia" +ERROR: expected JSON array +HINT: See the array element [1] of key "ia". SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], [2, 3]]}') q; -ERROR: malformed json array +ERROR: malformed JSON array DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": "{1,2,3}"}') q; ia @@ -2016,8 +2123,8 @@ SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": null}') q; (1 row) SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": 123}') q; -ERROR: expected json array -HINT: see the value of key "ia1" +ERROR: expected JSON array +HINT: See the value of key "ia1". SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": [1, "2", null, 4]}') q; ia1 -------------- @@ -2037,8 +2144,8 @@ SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": null}') q; (1 row) SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": 123}') q; -ERROR: expected json array -HINT: see the value of key "ia1d" +ERROR: expected JSON array +HINT: See the value of key "ia1d". SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null, 4]}') q; ERROR: value for domain jsb_int_array_1d violates check constraint "jsb_int_array_1d_check" SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null]}') q; @@ -2066,11 +2173,11 @@ SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[], []]}') q; (1 row) SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], [3]]}') q; -ERROR: malformed json array +ERROR: malformed JSON array DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], 3, 4]}') q; -ERROR: expected json array -HINT: see the array element [1] of key "ia2" +ERROR: expected JSON array +HINT: See the array element [1] of key "ia2". SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2"], [null, 4]]}') q; ERROR: value for domain jsb_int_array_2d violates check constraint "jsb_int_array_2d_check" SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q; @@ -2110,7 +2217,7 @@ SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], (1 row) SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q; -ERROR: malformed json array +ERROR: malformed JSON array DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": null}') q; ta @@ -2119,8 +2226,8 @@ SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": null}') q; (1 row) SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": 123}') q; -ERROR: expected json array -HINT: see the value of key "ta" +ERROR: expected JSON array +HINT: See the value of key "ta". SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [1, "2", null, 4]}') q; ta -------------- @@ -2128,8 +2235,8 @@ SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [1, "2", null, 4]}') (1 row) SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q; -ERROR: expected json array -HINT: see the array element [1] of key "ta" +ERROR: expected JSON array +HINT: See the array element [1] of key "ta". SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": null}') q; c --- @@ -2157,8 +2264,8 @@ SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": null}') q; (1 row) SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": 123}') q; -ERROR: expected json array -HINT: see the value of key "ca" +ERROR: expected JSON array +HINT: See the value of key "ca". SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [1, "2", null, 4]}') q; ca ----------------------------------------------- @@ -2168,8 +2275,8 @@ SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [1, "2", null, 4]}') SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q; ERROR: value too long for type character(10) SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q; -ERROR: expected json array -HINT: see the array element [1] of key "ca" +ERROR: expected JSON array +HINT: See the array element [1] of key "ca". SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": null}') q; js ---- @@ -2261,8 +2368,8 @@ SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": null}') q; (1 row) SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": 123}') q; -ERROR: expected json array -HINT: see the value of key "jsa" +ERROR: expected JSON array +HINT: See the value of key "jsa". SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": [1, "2", null, 4]}') q; jsa -------------------- @@ -2292,8 +2399,8 @@ SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": "(abc,42,01.02.2003 (1 row) SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": 123}') q; -ERROR: expected json array -HINT: see the value of key "reca" +ERROR: expected JSON array +HINT: See the value of key "reca". SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [1, 2]}') q; ERROR: cannot call populate_composite on a scalar SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q; @@ -2324,6 +2431,30 @@ SELECT rec FROM jsonb_populate_record( (abc,3,"Thu Jan 02 00:00:00 2003") (1 row) +-- anonymous record type +SELECT jsonb_populate_record(null::record, '{"x": 0, "y": 1}'); +ERROR: record type has not been registered +SELECT jsonb_populate_record(row(1,2), '{"f1": 0, "f2": 1}'); + jsonb_populate_record +----------------------- + (0,1) +(1 row) + +-- composite domain +SELECT jsonb_populate_record(null::jb_ordered_pair, '{"x": 0, "y": 1}'); + jsonb_populate_record +----------------------- + (0,1) +(1 row) + +SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 0}'); + jsonb_populate_record +----------------------- + (0,2) +(1 row) + +SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 1, "y": 0}'); +ERROR: value for domain jb_ordered_pair violates check constraint "jb_ordered_pair_check" -- populate_recordset SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; a | b | c @@ -2383,6 +2514,54 @@ SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200 {"z": true} | 3 | Fri Jan 20 10:42:53 2012 (2 rows) +-- anonymous record type +SELECT jsonb_populate_recordset(null::record, '[{"x": 0, "y": 1}]'); +ERROR: record type has not been registered +SELECT jsonb_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]'); + jsonb_populate_recordset +-------------------------- + (0,1) +(1 row) + +SELECT i, jsonb_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]') +FROM (VALUES (1),(2)) v(i); + i | jsonb_populate_recordset +---+-------------------------- + 1 | (42,50) + 1 | (1,43) + 2 | (42,50) + 2 | (2,43) +(4 rows) + +-- composite domain +SELECT jsonb_populate_recordset(null::jb_ordered_pair, '[{"x": 0, "y": 1}]'); + jsonb_populate_recordset +-------------------------- + (0,1) +(1 row) + +SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 0}, {"y": 3}]'); + jsonb_populate_recordset +-------------------------- + (0,2) + (1,3) +(2 rows) + +SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 1, "y": 0}]'); +ERROR: value for domain jb_ordered_pair violates check constraint "jb_ordered_pair_check" +-- negative cases where the wrong record type is supplied +select * from jsonb_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned row contains 1 attribute, but query expects 2. +select * from jsonb_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned type integer at ordinal position 1, but query expects text. +select * from jsonb_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned row contains 3 attributes, but query expects 2. +select * from jsonb_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned type integer at ordinal position 1, but query expects text. -- jsonb_to_record and jsonb_to_recordset select * from jsonb_to_record('{"a":1,"b":"foo","c":"bar"}') as x(a int, b text, d text); @@ -2422,8 +2601,8 @@ select * from jsonb_to_record('{"ia": null}') as x(ia _int4); (1 row) select * from jsonb_to_record('{"ia": 123}') as x(ia _int4); -ERROR: expected json array -HINT: see the value of key "ia" +ERROR: expected JSON array +HINT: See the value of key "ia". select * from jsonb_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4); ia -------------- @@ -2437,10 +2616,10 @@ select * from jsonb_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4); (1 row) select * from jsonb_to_record('{"ia": [[1], 2]}') as x(ia _int4); -ERROR: expected json array -HINT: see the array element [1] of key "ia" +ERROR: expected JSON array +HINT: See the array element [1] of key "ia". select * from jsonb_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4); -ERROR: malformed json array +ERROR: malformed JSON array DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. select * from jsonb_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]); ia2 @@ -2482,6 +2661,8 @@ DROP TYPE jsbrec_i_not_null; DROP DOMAIN jsb_int_not_null; DROP DOMAIN jsb_int_array_1d; DROP DOMAIN jsb_int_array_2d; +DROP DOMAIN jb_ordered_pair; +DROP TYPE jb_unordered_pair; -- indexing SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}'; count @@ -3951,6 +4132,86 @@ select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff gg 'aaa':1 'bbb':3 'ccc':5 'ddd':4 'eee':8 'fff':9 'ggg':10 'hhh':12 'iii':13 (1 row) +-- jsonb to tsvector with numeric values +select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::jsonb); + to_tsvector +--------------------------------- + 'aaa':1 'bbb':3 'ccc':5 'ddd':4 +(1 row) + +-- jsonb_to_tsvector +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"'); + jsonb_to_tsvector +---------------------------------------------------------------------------------------- + '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"'); + jsonb_to_tsvector +-------------------------------- + 'b':2 'c':4 'd':6 'f':8 'g':10 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"'); + jsonb_to_tsvector +------------------- + 'aaa':1 'bbb':3 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"'); + jsonb_to_tsvector +------------------- + '123':1 '456':3 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"'); + jsonb_to_tsvector +------------------- + 'fals':3 'true':1 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]'); + jsonb_to_tsvector +--------------------------------- + '123':5 '456':7 'aaa':1 'bbb':3 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"'); + jsonb_to_tsvector +---------------------------------------------------------------------------------------- + '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"'); + jsonb_to_tsvector +-------------------------------- + 'b':2 'c':4 'd':6 'f':8 'g':10 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"'); + jsonb_to_tsvector +------------------- + 'aaa':1 'bbb':3 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"'); + jsonb_to_tsvector +------------------- + '123':1 '456':3 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"'); + jsonb_to_tsvector +------------------- + 'fals':3 'true':1 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]'); + jsonb_to_tsvector +--------------------------------- + '123':5 '456':7 'aaa':1 'bbb':3 +(1 row) + -- ts_vector corner cases select to_tsvector('""'::jsonb); to_tsvector @@ -3976,6 +4237,48 @@ select to_tsvector('null'::jsonb); (1 row) +-- jsonb_to_tsvector corner cases +select jsonb_to_tsvector('""'::jsonb, '"all"'); + jsonb_to_tsvector +------------------- + +(1 row) + +select jsonb_to_tsvector('{}'::jsonb, '"all"'); + jsonb_to_tsvector +------------------- + +(1 row) + +select jsonb_to_tsvector('[]'::jsonb, '"all"'); + jsonb_to_tsvector +------------------- + +(1 row) + +select jsonb_to_tsvector('null'::jsonb, '"all"'); + jsonb_to_tsvector +------------------- + +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '""'); +ERROR: wrong flag in flag array: "" +HINT: Possible values are: "string", "numeric", "boolean", "key", and "all" +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '{}'); +ERROR: wrong flag type, only arrays and scalars are allowed +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '[]'); + jsonb_to_tsvector +------------------- + +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, 'null'); +ERROR: flag array element is not a string +HINT: Possible values are: "string", "numeric", "boolean", "key", and "all" +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["all", null]'); +ERROR: flag array element is not a string +HINT: Possible values are: "string", "numeric", "boolean", "key", and "all" -- ts_headline for jsonb select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh')); ts_headline @@ -4020,3 +4323,108 @@ select ts_headline('[]'::jsonb, tsquery('aaa & bbb')); [] (1 row) +-- casts +select 'true'::jsonb::bool; + bool +------ + t +(1 row) + +select '[]'::jsonb::bool; +ERROR: cannot cast jsonb array to type boolean +select '1.0'::jsonb::float; + float8 +-------- + 1 +(1 row) + +select '[1.0]'::jsonb::float; +ERROR: cannot cast jsonb array to type double precision +select '12345'::jsonb::int4; + int4 +------- + 12345 +(1 row) + +select '"hello"'::jsonb::int4; +ERROR: cannot cast jsonb string to type integer +select '12345'::jsonb::numeric; + numeric +--------- + 12345 +(1 row) + +select '{}'::jsonb::numeric; +ERROR: cannot cast jsonb object to type numeric +select '12345.05'::jsonb::numeric; + numeric +---------- + 12345.05 +(1 row) + +select '12345.05'::jsonb::float4; + float4 +-------- + 12345 +(1 row) + +select '12345.05'::jsonb::float8; + float8 +---------- + 12345.05 +(1 row) + +select '12345.05'::jsonb::int2; + int2 +------- + 12345 +(1 row) + +select '12345.05'::jsonb::int4; + int4 +------- + 12345 +(1 row) + +select '12345.05'::jsonb::int8; + int8 +------- + 12345 +(1 row) + +select '12345.0000000000000000000000000000000000000000000005'::jsonb::numeric; + numeric +------------------------------------------------------ + 12345.0000000000000000000000000000000000000000000005 +(1 row) + +select '12345.0000000000000000000000000000000000000000000005'::jsonb::float4; + float4 +-------- + 12345 +(1 row) + +select '12345.0000000000000000000000000000000000000000000005'::jsonb::float8; + float8 +-------- + 12345 +(1 row) + +select '12345.0000000000000000000000000000000000000000000005'::jsonb::int2; + int2 +------- + 12345 +(1 row) + +select '12345.0000000000000000000000000000000000000000000005'::jsonb::int4; + int4 +------- + 12345 +(1 row) + +select '12345.0000000000000000000000000000000000000000000005'::jsonb::int8; + int8 +------- + 12345 +(1 row) + diff --git a/src/test/regress/expected/line.out b/src/test/regress/expected/line.out index f20abdc430..bf780daa2c 100644 --- a/src/test/regress/expected/line.out +++ b/src/test/regress/expected/line.out @@ -4,24 +4,43 @@ -- --DROP TABLE LINE_TBL; CREATE TABLE LINE_TBL (s line); -INSERT INTO LINE_TBL VALUES ('{1,-1,1}'); -INSERT INTO LINE_TBL VALUES ('(0,0),(6,6)'); +INSERT INTO LINE_TBL VALUES ('{0,-1,5}'); -- A == 0 +INSERT INTO LINE_TBL VALUES ('{1,0,5}'); -- B == 0 +INSERT INTO LINE_TBL VALUES ('{0,3,0}'); -- A == C == 0 +INSERT INTO LINE_TBL VALUES (' (0,0), (6,6)'); INSERT INTO LINE_TBL VALUES ('10,-10 ,-5,-4'); INSERT INTO LINE_TBL VALUES ('[-1e6,2e2,3e5, -4e1]'); -INSERT INTO LINE_TBL VALUES ('(11,22,33,44)'); -INSERT INTO LINE_TBL VALUES ('[(1,0),(1,0)]'); -ERROR: invalid line specification: must be two distinct points -LINE 1: INSERT INTO LINE_TBL VALUES ('[(1,0),(1,0)]'); - ^ +INSERT INTO LINE_TBL VALUES ('{3,NaN,5}'); +INSERT INTO LINE_TBL VALUES ('{NaN,NaN,NaN}'); -- horizontal INSERT INTO LINE_TBL VALUES ('[(1,3),(2,3)]'); -- vertical -INSERT INTO LINE_TBL VALUES ('[(3,1),(3,2)]'); +INSERT INTO LINE_TBL VALUES (line(point '(3,1)', point '(3,2)')); -- bad values for parser testing +INSERT INTO LINE_TBL VALUES ('{}'); +ERROR: invalid input syntax for type line: "{}" +LINE 1: INSERT INTO LINE_TBL VALUES ('{}'); + ^ +INSERT INTO LINE_TBL VALUES ('{0'); +ERROR: invalid input syntax for type line: "{0" +LINE 1: INSERT INTO LINE_TBL VALUES ('{0'); + ^ +INSERT INTO LINE_TBL VALUES ('{0,0}'); +ERROR: invalid input syntax for type line: "{0,0}" +LINE 1: INSERT INTO LINE_TBL VALUES ('{0,0}'); + ^ +INSERT INTO LINE_TBL VALUES ('{0,0,1'); +ERROR: invalid input syntax for type line: "{0,0,1" +LINE 1: INSERT INTO LINE_TBL VALUES ('{0,0,1'); + ^ INSERT INTO LINE_TBL VALUES ('{0,0,1}'); ERROR: invalid line specification: A and B cannot both be zero LINE 1: INSERT INTO LINE_TBL VALUES ('{0,0,1}'); ^ +INSERT INTO LINE_TBL VALUES ('{0,0,1} x'); +ERROR: invalid input syntax for type line: "{0,0,1} x" +LINE 1: INSERT INTO LINE_TBL VALUES ('{0,0,1} x'); + ^ INSERT INTO LINE_TBL VALUES ('(3asdf,2 ,3,4r2)'); ERROR: invalid input syntax for type line: "(3asdf,2 ,3,4r2)" LINE 1: INSERT INTO LINE_TBL VALUES ('(3asdf,2 ,3,4r2)'); @@ -38,234 +57,31 @@ INSERT INTO LINE_TBL VALUES ('[(1,2),(3,4)'); ERROR: invalid input syntax for type line: "[(1,2),(3,4)" LINE 1: INSERT INTO LINE_TBL VALUES ('[(1,2),(3,4)'); ^ +INSERT INTO LINE_TBL VALUES ('[(1,2),(1,2)]'); +ERROR: invalid line specification: must be two distinct points +LINE 1: INSERT INTO LINE_TBL VALUES ('[(1,2),(1,2)]'); + ^ +INSERT INTO LINE_TBL VALUES (line(point '(1,0)', point '(1,0)')); +ERROR: invalid line specification: must be two distinct points select * from LINE_TBL; s --------------------------------------------- - {1,-1,1} + {0,-1,5} + {1,0,5} + {0,3,0} {1,-1,0} {-0.4,-1,-6} {-0.000184615384615385,-1,15.3846153846154} - {1,-1,11} + {3,NaN,5} + {NaN,NaN,NaN} {0,-1,3} {-1,0,3} -(7 rows) - --- functions and operators -SELECT * FROM LINE_TBL WHERE (s <-> line '[(1,2),(3,4)]') < 10; - s ---------------------------------------------- - {1,-1,1} - {1,-1,0} - {-0.4,-1,-6} - {-0.000184615384615385,-1,15.3846153846154} - {1,-1,11} - {0,-1,3} - {-1,0,3} -(7 rows) - -SELECT * FROM LINE_TBL WHERE (point '(0.1,0.1)' <-> s) < 1; - s ----------- - {1,-1,1} - {1,-1,0} -(2 rows) - -SELECT * FROM LINE_TBL WHERE (lseg '[(0.1,0.1),(0.2,0.2)]' <-> s) < 1; - s ----------- - {1,-1,1} - {1,-1,0} -(2 rows) - -SELECT line '[(1,1),(2,1)]' <-> line '[(-1,-1),(-2,-1)]'; - ?column? ----------- - 2 -(1 row) - -SELECT lseg '[(1,1),(2,1)]' <-> line '[(-1,-1),(-2,-1)]'; - ?column? ----------- - 2 -(1 row) - -SELECT point '(-1,1)' <-> line '[(-3,0),(-4,0)]'; - ?column? ----------- - 1 -(1 row) - -SELECT lseg '[(1,1),(5,5)]' ?# line '[(2,0),(0,2)]'; -- true - ?column? ----------- - t -(1 row) - -SELECT lseg '[(1,1),(5,5)]' ?# line '[(0,0),(1,0)]'; -- false - ?column? ----------- - f -(1 row) - -SELECT line '[(0,0),(1,1)]' ?# box '(0,0,2,2)'; -- true - ?column? ----------- - t -(1 row) - -SELECT line '[(3,0),(4,1)]' ?# box '(0,0,2,2)'; -- false - ?column? ----------- - f -(1 row) - -SELECT point '(1,1)' <@ line '[(0,0),(2,2)]'; -- true - ?column? ----------- - t -(1 row) - -SELECT point '(1,1)' <@ line '[(0,0),(1,0)]'; -- false - ?column? ----------- - f -(1 row) - -SELECT point '(1,1)' @ line '[(0,0),(2,2)]'; -- true - ?column? ----------- - t -(1 row) - -SELECT point '(1,1)' @ line '[(0,0),(1,0)]'; -- false - ?column? ----------- - f -(1 row) - -SELECT lseg '[(1,1),(2,2)]' <@ line '[(0,0),(2,2)]'; -- true - ?column? ----------- - t -(1 row) - -SELECT lseg '[(1,1),(2,1)]' <@ line '[(0,0),(1,0)]'; -- false - ?column? ----------- - f -(1 row) - -SELECT lseg '[(1,1),(2,2)]' @ line '[(0,0),(2,2)]'; -- true - ?column? ----------- - t -(1 row) - -SELECT lseg '[(1,1),(2,1)]' @ line '[(0,0),(1,0)]'; -- false - ?column? ----------- - f -(1 row) - -SELECT point '(0,1)' ## line '[(0,0),(1,1)]'; - ?column? ------------ - (0.5,0.5) -(1 row) - -SELECT line '[(0,0),(1,1)]' ## lseg '[(1,0),(2,0)]'; - ?column? ----------- - (1,0) -(1 row) - -SELECT line '[(0,0),(1,1)]' ?# line '[(1,0),(2,1)]'; -- false - ?column? ----------- - f -(1 row) - -SELECT line '[(0,0),(1,1)]' ?# line '[(1,0),(1,1)]'; -- true - ?column? ----------- - t -(1 row) - -SELECT line '[(0,0),(1,1)]' # line '[(1,0),(2,1)]'; - ?column? ----------- - -(1 row) - -SELECT line '[(0,0),(1,1)]' # line '[(1,0),(1,1)]'; - ?column? ----------- - (1,1) -(1 row) - -SELECT line '[(0,0),(1,1)]' ?|| line '[(1,0),(2,1)]'; -- true - ?column? ----------- - t -(1 row) - -SELECT line '[(0,0),(1,1)]' ?|| line '[(1,0),(1,1)]'; -- false - ?column? ----------- - f -(1 row) - -SELECT line '[(0,0),(1,0)]' ?-| line '[(0,0),(0,1)]'; -- true - ?column? ----------- - t -(1 row) - -SELECT line '[(0,0),(1,1)]' ?-| line '[(1,0),(1,1)]'; -- false - ?column? ----------- - f -(1 row) - -SELECT ?- line '[(0,0),(1,0)]'; -- true - ?column? ----------- - t -(1 row) - -SELECT ?- line '[(0,0),(1,1)]'; -- false - ?column? ----------- - f -(1 row) - -SELECT ?| line '[(0,0),(0,1)]'; -- true - ?column? ----------- - t -(1 row) - -SELECT ?| line '[(0,0),(1,1)]'; -- false - ?column? ----------- - f -(1 row) - -SELECT line(point '(1,2)', point '(3,4)'); - line ----------- - {1,-1,1} -(1 row) - -SELECT line '[(1,2),(3,4)]' = line '[(3,4),(4,5)]'; -- true - ?column? ----------- - t -(1 row) +(10 rows) -SELECT line '[(1,2),(3,4)]' = line '[(3,4),(4,4)]'; -- false - ?column? ----------- - f +select '{nan, 1, nan}'::line = '{nan, 1, nan}'::line as true, + '{nan, 1, nan}'::line = '{nan, 2, nan}'::line as false; + true | false +------+------- + t | f (1 row) diff --git a/src/test/regress/expected/lock.out b/src/test/regress/expected/lock.out index fd27344503..185fd2f879 100644 --- a/src/test/regress/expected/lock.out +++ b/src/test/regress/expected/lock.out @@ -5,7 +5,13 @@ CREATE SCHEMA lock_schema1; SET search_path = lock_schema1; CREATE TABLE lock_tbl1 (a BIGINT); -CREATE VIEW lock_view1 AS SELECT 1; +CREATE TABLE lock_tbl1a (a BIGINT); +CREATE VIEW lock_view1 AS SELECT * FROM lock_tbl1; +CREATE VIEW lock_view2(a,b) AS SELECT * FROM lock_tbl1, lock_tbl1a; +CREATE VIEW lock_view3 AS SELECT * from lock_view2; +CREATE VIEW lock_view4 AS SELECT (select a from lock_tbl1a limit 1) from lock_tbl1; +CREATE VIEW lock_view5 AS SELECT * from lock_tbl1 where a in (select * from lock_tbl1a); +CREATE VIEW lock_view6 AS SELECT * from (select * from lock_tbl1) sub; CREATE ROLE regress_rol_lock1; ALTER ROLE regress_rol_lock1 SET search_path = lock_schema1; GRANT USAGE ON SCHEMA lock_schema1 TO regress_rol_lock1; @@ -30,8 +36,101 @@ LOCK TABLE lock_tbl1 IN SHARE MODE NOWAIT; LOCK TABLE lock_tbl1 IN SHARE ROW EXCLUSIVE MODE NOWAIT; LOCK TABLE lock_tbl1 IN EXCLUSIVE MODE NOWAIT; LOCK TABLE lock_tbl1 IN ACCESS EXCLUSIVE MODE NOWAIT; -LOCK TABLE lock_view1 IN EXCLUSIVE MODE; -- Will fail; can't lock a non-table -ERROR: "lock_view1" is not a table +ROLLBACK; +-- Verify that we can lock views. +BEGIN TRANSACTION; +LOCK TABLE lock_view1 IN EXCLUSIVE MODE; +-- lock_view1 and lock_tbl1 are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_view1 +(2 rows) + +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view2 IN EXCLUSIVE MODE; +-- lock_view1, lock_tbl1, and lock_tbl1a are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_tbl1a + lock_view2 +(3 rows) + +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view3 IN EXCLUSIVE MODE; +-- lock_view3, lock_view2, lock_tbl1, and lock_tbl1a are locked recursively. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_tbl1a + lock_view2 + lock_view3 +(4 rows) + +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view4 IN EXCLUSIVE MODE; +-- lock_view4, lock_tbl1, and lock_tbl1a are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_tbl1a + lock_view4 +(3 rows) + +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view5 IN EXCLUSIVE MODE; +-- lock_view5, lock_tbl1, and lock_tbl1a are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_tbl1a + lock_view5 +(3 rows) + +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view6 IN EXCLUSIVE MODE; +-- lock_view6 an lock_tbl1 are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_view6 +(2 rows) + +ROLLBACK; +-- detecting infinite recursions in view definitions +CREATE OR REPLACE VIEW lock_view2 AS SELECT * from lock_view3; +BEGIN TRANSACTION; +LOCK TABLE lock_view2 IN EXCLUSIVE MODE; +ERROR: infinite recursion detected in rules for relation "lock_view2" +ROLLBACK; +CREATE VIEW lock_view7 AS SELECT * from lock_view2; +BEGIN TRANSACTION; +LOCK TABLE lock_view7 IN EXCLUSIVE MODE; +ERROR: infinite recursion detected in rules for relation "lock_view2" ROLLBACK; -- Verify that we can lock a table with inheritance children. CREATE TABLE lock_tbl2 (b BIGINT) INHERITS (lock_tbl1); @@ -45,7 +144,7 @@ GRANT UPDATE ON TABLE lock_tbl1 TO regress_rol_lock1; SET ROLE regress_rol_lock1; BEGIN; LOCK TABLE lock_tbl1 * IN ACCESS EXCLUSIVE MODE; -ERROR: permission denied for relation lock_tbl2 +ERROR: permission denied for table lock_tbl2 ROLLBACK; BEGIN; LOCK TABLE ONLY lock_tbl1; @@ -54,10 +153,17 @@ RESET ROLE; -- -- Clean up -- +DROP VIEW lock_view7; +DROP VIEW lock_view6; +DROP VIEW lock_view5; +DROP VIEW lock_view4; +DROP VIEW lock_view3 CASCADE; +NOTICE: drop cascades to view lock_view2 DROP VIEW lock_view1; DROP TABLE lock_tbl3; DROP TABLE lock_tbl2; DROP TABLE lock_tbl1; +DROP TABLE lock_tbl1a; DROP SCHEMA lock_schema1 CASCADE; DROP ROLE regress_rol_lock1; -- atomic ops tests diff --git a/src/test/regress/expected/lseg.out b/src/test/regress/expected/lseg.out index bba1f3ee80..7e878b5577 100644 --- a/src/test/regress/expected/lseg.out +++ b/src/test/regress/expected/lseg.out @@ -8,7 +8,10 @@ INSERT INTO LSEG_TBL VALUES ('[(1,2),(3,4)]'); INSERT INTO LSEG_TBL VALUES ('(0,0),(6,6)'); INSERT INTO LSEG_TBL VALUES ('10,-10 ,-3,-4'); INSERT INTO LSEG_TBL VALUES ('[-1e6,2e2,3e5, -4e1]'); -INSERT INTO LSEG_TBL VALUES ('(11,22,33,44)'); +INSERT INTO LSEG_TBL VALUES (lseg(point(11, 22), point(33,44))); +INSERT INTO LSEG_TBL VALUES ('[(-10,2),(-10,3)]'); -- vertical +INSERT INTO LSEG_TBL VALUES ('[(0,-20),(30,-20)]'); -- horizontal +INSERT INTO LSEG_TBL VALUES ('[(NaN,1),(NaN,90)]'); -- NaN -- bad values for parser testing INSERT INTO LSEG_TBL VALUES ('(3asdf,2 ,3,4r2)'); ERROR: invalid input syntax for type lseg: "(3asdf,2 ,3,4r2)" @@ -34,19 +37,8 @@ select * from LSEG_TBL; [(10,-10),(-3,-4)] [(-1000000,200),(300000,-40)] [(11,22),(33,44)] -(5 rows) - -SELECT * FROM LSEG_TBL WHERE s <= lseg '[(1,2),(3,4)]'; - s ---------------- - [(1,2),(3,4)] -(1 row) - -SELECT * FROM LSEG_TBL WHERE (s <-> lseg '[(1,2),(3,4)]') < 10; - s --------------------- - [(1,2),(3,4)] - [(0,0),(6,6)] - [(10,-10),(-3,-4)] -(3 rows) + [(-10,2),(-10,3)] + [(0,-20),(30,-20)] + [(NaN,1),(NaN,90)] +(8 rows) diff --git a/src/test/regress/expected/misc_sanity.out b/src/test/regress/expected/misc_sanity.out index f02689660b..2d3522b500 100644 --- a/src/test/regress/expected/misc_sanity.out +++ b/src/test/regress/expected/misc_sanity.out @@ -29,7 +29,7 @@ SELECT * FROM pg_shdepend as d1 WHERE refclassid = 0 OR refobjid = 0 OR deptype NOT IN ('a', 'o', 'p', 'r') OR - (deptype != 'p' AND (dbid = 0 OR classid = 0 OR objid = 0)) OR + (deptype != 'p' AND (classid = 0 OR objid = 0)) OR (deptype = 'p' AND (dbid != 0 OR classid != 0 OR objid != 0 OR objsubid != 0)); dbid | classid | objid | objsubid | refclassid | refobjid | deptype ------+---------+-------+----------+------------+----------+--------- @@ -76,3 +76,34 @@ NOTICE: pg_database contains unpinned initdb-created object(s) NOTICE: pg_extension contains unpinned initdb-created object(s) NOTICE: pg_rewrite contains unpinned initdb-created object(s) NOTICE: pg_tablespace contains unpinned initdb-created object(s) +-- **************** pg_class **************** +-- Look for system tables with varlena columns but no toast table. All +-- system tables with toastable columns should have toast tables, with +-- the following exceptions: +-- 1. pg_class, pg_attribute, and pg_index, due to fear of recursive +-- dependencies as toast tables depend on them. +-- 2. pg_largeobject and pg_largeobject_metadata. Large object catalogs +-- and toast tables are mutually exclusive and large object data is handled +-- as user data by pg_upgrade, which would cause failures. +SELECT relname, attname, atttypid::regtype +FROM pg_class c JOIN pg_attribute a ON c.oid = attrelid +WHERE c.oid < 16384 AND + reltoastrelid = 0 AND + relkind = 'r' AND + attstorage != 'p' +ORDER BY 1, 2; + relname | attname | atttypid +-------------------------+---------------+-------------- + pg_attribute | attacl | aclitem[] + pg_attribute | attfdwoptions | text[] + pg_attribute | attmissingval | anyarray + pg_attribute | attoptions | text[] + pg_class | relacl | aclitem[] + pg_class | reloptions | text[] + pg_class | relpartbound | pg_node_tree + pg_index | indexprs | pg_node_tree + pg_index | indpred | pg_node_tree + pg_largeobject | data | bytea + pg_largeobject_metadata | lomacl | aclitem[] +(11 rows) + diff --git a/src/test/regress/expected/namespace.out b/src/test/regress/expected/namespace.out index b0cdd65af3..2564d1b080 100644 --- a/src/test/regress/expected/namespace.out +++ b/src/test/regress/expected/namespace.out @@ -1,7 +1,7 @@ -- -- Regression tests for schemas (namespaces) -- -CREATE SCHEMA test_schema_1 +CREATE SCHEMA test_ns_schema_1 CREATE UNIQUE INDEX abc_a_idx ON abc (a) CREATE VIEW abc_view AS SELECT a+1 AS a, b+1 AS b FROM abc @@ -11,16 +11,16 @@ CREATE SCHEMA test_schema_1 ); -- verify that the objects were created SELECT COUNT(*) FROM pg_class WHERE relnamespace = - (SELECT oid FROM pg_namespace WHERE nspname = 'test_schema_1'); + (SELECT oid FROM pg_namespace WHERE nspname = 'test_ns_schema_1'); count ------- 5 (1 row) -INSERT INTO test_schema_1.abc DEFAULT VALUES; -INSERT INTO test_schema_1.abc DEFAULT VALUES; -INSERT INTO test_schema_1.abc DEFAULT VALUES; -SELECT * FROM test_schema_1.abc; +INSERT INTO test_ns_schema_1.abc DEFAULT VALUES; +INSERT INTO test_ns_schema_1.abc DEFAULT VALUES; +INSERT INTO test_ns_schema_1.abc DEFAULT VALUES; +SELECT * FROM test_ns_schema_1.abc; a | b ---+--- 1 | @@ -28,7 +28,7 @@ SELECT * FROM test_schema_1.abc; 3 | (3 rows) -SELECT * FROM test_schema_1.abc_view; +SELECT * FROM test_ns_schema_1.abc_view; a | b ---+--- 2 | @@ -36,20 +36,20 @@ SELECT * FROM test_schema_1.abc_view; 4 | (3 rows) -ALTER SCHEMA test_schema_1 RENAME TO test_schema_renamed; +ALTER SCHEMA test_ns_schema_1 RENAME TO test_ns_schema_renamed; SELECT COUNT(*) FROM pg_class WHERE relnamespace = - (SELECT oid FROM pg_namespace WHERE nspname = 'test_schema_1'); + (SELECT oid FROM pg_namespace WHERE nspname = 'test_ns_schema_1'); count ------- 0 (1 row) -- test IF NOT EXISTS cases -CREATE SCHEMA test_schema_renamed; -- fail, already exists -ERROR: schema "test_schema_renamed" already exists -CREATE SCHEMA IF NOT EXISTS test_schema_renamed; -- ok with notice -NOTICE: schema "test_schema_renamed" already exists, skipping -CREATE SCHEMA IF NOT EXISTS test_schema_renamed -- fail, disallowed +CREATE SCHEMA test_ns_schema_renamed; -- fail, already exists +ERROR: schema "test_ns_schema_renamed" already exists +CREATE SCHEMA IF NOT EXISTS test_ns_schema_renamed; -- ok with notice +NOTICE: schema "test_ns_schema_renamed" already exists, skipping +CREATE SCHEMA IF NOT EXISTS test_ns_schema_renamed -- fail, disallowed CREATE TABLE abc ( a serial, b int UNIQUE @@ -57,13 +57,13 @@ CREATE SCHEMA IF NOT EXISTS test_schema_renamed -- fail, disallowed ERROR: CREATE SCHEMA IF NOT EXISTS cannot include schema elements LINE 2: CREATE TABLE abc ( ^ -DROP SCHEMA test_schema_renamed CASCADE; +DROP SCHEMA test_ns_schema_renamed CASCADE; NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table test_schema_renamed.abc -drop cascades to view test_schema_renamed.abc_view +DETAIL: drop cascades to table test_ns_schema_renamed.abc +drop cascades to view test_ns_schema_renamed.abc_view -- verify that the objects were dropped SELECT COUNT(*) FROM pg_class WHERE relnamespace = - (SELECT oid FROM pg_namespace WHERE nspname = 'test_schema_renamed'); + (SELECT oid FROM pg_namespace WHERE nspname = 'test_ns_schema_renamed'); count ------- 0 diff --git a/src/test/regress/expected/numeric.out b/src/test/regress/expected/numeric.out index ae0beb9b68..1cb3c3bfab 100644 --- a/src/test/regress/expected/numeric.out +++ b/src/test/regress/expected/numeric.out @@ -708,6 +708,27 @@ SELECT * FROM fract_only; (6 rows) DROP TABLE fract_only; +-- Check inf/nan conversion behavior +SELECT 'NaN'::float8::numeric; + numeric +--------- + NaN +(1 row) + +SELECT 'Infinity'::float8::numeric; +ERROR: cannot convert infinity to numeric +SELECT '-Infinity'::float8::numeric; +ERROR: cannot convert infinity to numeric +SELECT 'NaN'::float4::numeric; + numeric +--------- + NaN +(1 row) + +SELECT 'Infinity'::float4::numeric; +ERROR: cannot convert infinity to numeric +SELECT '-Infinity'::float4::numeric; +ERROR: cannot convert infinity to numeric -- Simple check that ceil(), floor(), and round() work correctly CREATE TABLE ceil_floor_round (a numeric); INSERT INTO ceil_floor_round VALUES ('-5.5'); @@ -1196,8 +1217,70 @@ SELECT '' AS to_char_26, to_char('100'::numeric, 'FM999'); | 100 (1 row) +-- Check parsing of literal text in a format string +SELECT '' AS to_char_27, to_char('100'::numeric, 'foo999'); + to_char_27 | to_char +------------+--------- + | foo 100 +(1 row) + +SELECT '' AS to_char_28, to_char('100'::numeric, 'f\oo999'); + to_char_28 | to_char +------------+---------- + | f\oo 100 +(1 row) + +SELECT '' AS to_char_29, to_char('100'::numeric, 'f\\oo999'); + to_char_29 | to_char +------------+----------- + | f\\oo 100 +(1 row) + +SELECT '' AS to_char_30, to_char('100'::numeric, 'f\"oo999'); + to_char_30 | to_char +------------+---------- + | f"oo 100 +(1 row) + +SELECT '' AS to_char_31, to_char('100'::numeric, 'f\\"oo999'); + to_char_31 | to_char +------------+----------- + | f\"oo 100 +(1 row) + +SELECT '' AS to_char_32, to_char('100'::numeric, 'f"ool"999'); + to_char_32 | to_char +------------+---------- + | fool 100 +(1 row) + +SELECT '' AS to_char_33, to_char('100'::numeric, 'f"\ool"999'); + to_char_33 | to_char +------------+---------- + | fool 100 +(1 row) + +SELECT '' AS to_char_34, to_char('100'::numeric, 'f"\\ool"999'); + to_char_34 | to_char +------------+----------- + | f\ool 100 +(1 row) + +SELECT '' AS to_char_35, to_char('100'::numeric, 'f"ool\"999'); + to_char_35 | to_char +------------+---------- + | fool"999 +(1 row) + +SELECT '' AS to_char_36, to_char('100'::numeric, 'f"ool\\"999'); + to_char_36 | to_char +------------+----------- + | fool\ 100 +(1 row) + -- TO_NUMBER() -- +SET lc_numeric = 'C'; SELECT '' AS to_number_1, to_number('-34,338,492', '99G999G999'); to_number_1 | to_number -------------+----------- @@ -1276,6 +1359,61 @@ SELECT '' AS to_number_13, to_number(' . 0 1-', ' 9 9 . 9 9 S'); | -0.01 (1 row) +SELECT '' AS to_number_14, to_number('34,50','999,99'); + to_number_14 | to_number +--------------+----------- + | 3450 +(1 row) + +SELECT '' AS to_number_15, to_number('123,000','999G'); + to_number_15 | to_number +--------------+----------- + | 123 +(1 row) + +SELECT '' AS to_number_16, to_number('123456','999G999'); + to_number_16 | to_number +--------------+----------- + | 123456 +(1 row) + +SELECT '' AS to_number_17, to_number('$1234.56','L9,999.99'); + to_number_17 | to_number +--------------+----------- + | 1234.56 +(1 row) + +SELECT '' AS to_number_18, to_number('$1234.56','L99,999.99'); + to_number_18 | to_number +--------------+----------- + | 1234.56 +(1 row) + +SELECT '' AS to_number_19, to_number('$1,234.56','L99,999.99'); + to_number_19 | to_number +--------------+----------- + | 1234.56 +(1 row) + +SELECT '' AS to_number_20, to_number('1234.56','L99,999.99'); + to_number_20 | to_number +--------------+----------- + | 1234.56 +(1 row) + +SELECT '' AS to_number_21, to_number('1,234.56','L99,999.99'); + to_number_21 | to_number +--------------+----------- + | 1234.56 +(1 row) + +SELECT '' AS to_number_22, to_number('42nd', '99th'); + to_number_22 | to_number +--------------+----------- + | 42 +(1 row) + +RESET lc_numeric; -- -- Input syntax -- @@ -1526,6 +1664,37 @@ select 0.0 ^ 12.34; 0.0000000000000000 (1 row) +-- NaNs +select 'NaN'::numeric ^ 'NaN'::numeric; + ?column? +---------- + NaN +(1 row) + +select 'NaN'::numeric ^ 0; + ?column? +---------- + 1 +(1 row) + +select 'NaN'::numeric ^ 1; + ?column? +---------- + NaN +(1 row) + +select 0 ^ 'NaN'::numeric; + ?column? +---------- + NaN +(1 row) + +select 1 ^ 'NaN'::numeric; + ?column? +---------- + 1 +(1 row) + -- invalid inputs select 0.0 ^ (-12.34); ERROR: zero raised to a negative power is undefined diff --git a/src/test/regress/expected/numerology_1.out b/src/test/regress/expected/numerology_1.out deleted file mode 100644 index d404d9db68..0000000000 --- a/src/test/regress/expected/numerology_1.out +++ /dev/null @@ -1,136 +0,0 @@ --- --- NUMEROLOGY --- Test various combinations of numeric types and functions. --- --- --- Test implicit type conversions --- This fails for Postgres v6.1 (and earlier?) --- so let's try explicit conversions for now - tgl 97/05/07 --- -CREATE TABLE TEMP_FLOAT (f1 FLOAT8); -INSERT INTO TEMP_FLOAT (f1) - SELECT float8(f1) FROM INT4_TBL; -INSERT INTO TEMP_FLOAT (f1) - SELECT float8(f1) FROM INT2_TBL; -SELECT '' AS ten, f1 FROM TEMP_FLOAT - ORDER BY f1; - ten | f1 ------+------------- - | -2147483647 - | -123456 - | -32767 - | -1234 - | 0 - | 0 - | 1234 - | 32767 - | 123456 - | 2147483647 -(10 rows) - --- int4 -CREATE TABLE TEMP_INT4 (f1 INT4); -INSERT INTO TEMP_INT4 (f1) - SELECT int4(f1) FROM FLOAT8_TBL - WHERE (f1 > -2147483647) AND (f1 < 2147483647); -INSERT INTO TEMP_INT4 (f1) - SELECT int4(f1) FROM INT2_TBL; -SELECT '' AS nine, f1 FROM TEMP_INT4 - ORDER BY f1; - nine | f1 -------+-------- - | -32767 - | -1234 - | -1004 - | -35 - | 0 - | 0 - | 0 - | 1234 - | 32767 -(9 rows) - --- int2 -CREATE TABLE TEMP_INT2 (f1 INT2); -INSERT INTO TEMP_INT2 (f1) - SELECT int2(f1) FROM FLOAT8_TBL - WHERE (f1 >= -32767) AND (f1 <= 32767); -INSERT INTO TEMP_INT2 (f1) - SELECT int2(f1) FROM INT4_TBL - WHERE (f1 >= -32767) AND (f1 <= 32767); -SELECT '' AS five, f1 FROM TEMP_INT2 - ORDER BY f1; - five | f1 -------+------- - | -1004 - | -35 - | 0 - | 0 - | 0 -(5 rows) - --- --- Group-by combinations --- -CREATE TABLE TEMP_GROUP (f1 INT4, f2 INT4, f3 FLOAT8); -INSERT INTO TEMP_GROUP - SELECT 1, (- i.f1), (- f.f1) - FROM INT4_TBL i, FLOAT8_TBL f; -INSERT INTO TEMP_GROUP - SELECT 2, i.f1, f.f1 - FROM INT4_TBL i, FLOAT8_TBL f; -SELECT DISTINCT f1 AS two FROM TEMP_GROUP ORDER BY 1; - two ------ - 1 - 2 -(2 rows) - -SELECT f1 AS two, max(f3) AS max_float, min(f3) as min_float - FROM TEMP_GROUP - GROUP BY f1 - ORDER BY two, max_float, min_float; - two | max_float | min_float ------+----------------------+----------------------- - 1 | 1.2345678901234e+200 | 0 - 2 | 0 | -1.2345678901234e+200 -(2 rows) - --- GROUP BY a result column name is not legal per SQL92, but we accept it --- anyway (if the name is not the name of any column exposed by FROM). -SELECT f1 AS two, max(f3) AS max_float, min(f3) AS min_float - FROM TEMP_GROUP - GROUP BY two - ORDER BY two, max_float, min_float; - two | max_float | min_float ------+----------------------+----------------------- - 1 | 1.2345678901234e+200 | 0 - 2 | 0 | -1.2345678901234e+200 -(2 rows) - -SELECT f1 AS two, (max(f3) + 1) AS max_plus_1, (min(f3) - 1) AS min_minus_1 - FROM TEMP_GROUP - GROUP BY f1 - ORDER BY two, min_minus_1; - two | max_plus_1 | min_minus_1 ------+----------------------+----------------------- - 1 | 1.2345678901234e+200 | -1 - 2 | 1 | -1.2345678901234e+200 -(2 rows) - -SELECT f1 AS two, - max(f2) + min(f2) AS max_plus_min, - min(f3) - 1 AS min_minus_1 - FROM TEMP_GROUP - GROUP BY f1 - ORDER BY two, min_minus_1; - two | max_plus_min | min_minus_1 ------+--------------+----------------------- - 1 | 0 | -1 - 2 | 0 | -1.2345678901234e+200 -(2 rows) - -DROP TABLE TEMP_INT2; -DROP TABLE TEMP_INT4; -DROP TABLE TEMP_FLOAT; -DROP TABLE TEMP_GROUP; diff --git a/src/test/regress/expected/object_address.out b/src/test/regress/expected/object_address.out index 1fdadbc9ef..4085e451e4 100644 --- a/src/test/regress/expected/object_address.out +++ b/src/test/regress/expected/object_address.out @@ -19,6 +19,9 @@ CREATE TEXT SEARCH PARSER addr_ts_prs CREATE TABLE addr_nsp.gentable ( a serial primary key CONSTRAINT a_chk CHECK (a > 0), b text DEFAULT 'hello'); +CREATE TABLE addr_nsp.parttable ( + a int PRIMARY KEY +) PARTITION BY RANGE (a); CREATE VIEW addr_nsp.genview AS SELECT * from addr_nsp.gentable; CREATE MATERIALIZED VIEW addr_nsp.genmatview AS SELECT * FROM addr_nsp.gentable; CREATE TYPE addr_nsp.gencomptype AS (a int); @@ -29,6 +32,7 @@ CREATE DOMAIN addr_nsp.gendomain AS int4 CONSTRAINT domconstr CHECK (value > 0); CREATE FUNCTION addr_nsp.trig() RETURNS TRIGGER LANGUAGE plpgsql AS $$ BEGIN END; $$; CREATE TRIGGER t BEFORE INSERT ON addr_nsp.gentable FOR EACH ROW EXECUTE PROCEDURE addr_nsp.trig(); CREATE POLICY genpol ON addr_nsp.gentable; +CREATE PROCEDURE addr_nsp.proc(int4) LANGUAGE SQL AS $$ $$; CREATE SERVER "integer" FOREIGN DATA WRAPPER addr_fdw; CREATE USER MAPPING FOR regress_addr_user SERVER "integer"; ALTER DEFAULT PRIVILEGES FOR ROLE regress_addr_user IN SCHEMA public GRANT ALL ON TABLES TO regress_addr_user; @@ -88,7 +92,7 @@ BEGIN ('table'), ('index'), ('sequence'), ('view'), ('materialized view'), ('foreign table'), ('table column'), ('foreign table column'), - ('aggregate'), ('function'), ('type'), ('cast'), + ('aggregate'), ('function'), ('procedure'), ('type'), ('cast'), ('table constraint'), ('domain constraint'), ('conversion'), ('default value'), ('operator'), ('operator class'), ('operator family'), ('rule'), ('trigger'), ('text search parser'), ('text search dictionary'), @@ -171,6 +175,12 @@ WARNING: error for function,{addr_nsp,zwei},{}: function addr_nsp.zwei() does n WARNING: error for function,{addr_nsp,zwei},{integer}: function addr_nsp.zwei(integer) does not exist WARNING: error for function,{eins,zwei,drei},{}: cross-database references are not implemented: eins.zwei.drei WARNING: error for function,{eins,zwei,drei},{integer}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for procedure,{eins},{}: procedure eins() does not exist +WARNING: error for procedure,{eins},{integer}: procedure eins(integer) does not exist +WARNING: error for procedure,{addr_nsp,zwei},{}: procedure addr_nsp.zwei() does not exist +WARNING: error for procedure,{addr_nsp,zwei},{integer}: procedure addr_nsp.zwei(integer) does not exist +WARNING: error for procedure,{eins,zwei,drei},{}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for procedure,{eins,zwei,drei},{integer}: cross-database references are not implemented: eins.zwei.drei WARNING: error for type,{eins},{}: type "eins" does not exist WARNING: error for type,{eins},{integer}: type "eins" does not exist WARNING: error for type,{addr_nsp,zwei},{}: name list length must be exactly 1 @@ -361,7 +371,9 @@ ERROR: name list length must be exactly 1 -- test successful cases WITH objects (type, name, args) AS (VALUES ('table', '{addr_nsp, gentable}'::text[], '{}'::text[]), + ('table', '{addr_nsp, parttable}'::text[], '{}'::text[]), ('index', '{addr_nsp, gentable_pkey}', '{}'), + ('index', '{addr_nsp, parttable_pkey}', '{}'), ('sequence', '{addr_nsp, gentable_a_seq}', '{}'), -- toast table ('view', '{addr_nsp, genview}', '{}'), @@ -371,6 +383,7 @@ WITH objects (type, name, args) AS (VALUES ('foreign table column', '{addr_nsp, genftable, a}', '{}'), ('aggregate', '{addr_nsp, genaggr}', '{int4}'), ('function', '{pg_catalog, pg_identify_object}', '{pg_catalog.oid, pg_catalog.oid, int4}'), + ('procedure', '{addr_nsp, proc}', '{int4}'), ('type', '{pg_catalog._int4}', '{}'), ('type', '{addr_nsp.gendomain}', '{}'), ('type', '{addr_nsp.gencomptype}', '{}'), @@ -431,10 +444,13 @@ SELECT (pg_identify_object(addr1.classid, addr1.objid, addr1.objsubid)).*, type | addr_nsp | gendomain | addr_nsp.gendomain | t function | pg_catalog | | pg_catalog.pg_identify_object(pg_catalog.oid,pg_catalog.oid,integer) | t aggregate | addr_nsp | | addr_nsp.genaggr(integer) | t + procedure | addr_nsp | | addr_nsp.proc(integer) | t sequence | addr_nsp | gentable_a_seq | addr_nsp.gentable_a_seq | t table | addr_nsp | gentable | addr_nsp.gentable | t table column | addr_nsp | gentable | addr_nsp.gentable.b | t index | addr_nsp | gentable_pkey | addr_nsp.gentable_pkey | t + table | addr_nsp | parttable | addr_nsp.parttable | t + index | addr_nsp | parttable_pkey | addr_nsp.parttable_pkey | t view | addr_nsp | genview | addr_nsp.genview | t materialized view | addr_nsp | genmatview | addr_nsp.genmatview | t foreign table | addr_nsp | genftable | addr_nsp.genftable | t @@ -468,8 +484,8 @@ SELECT (pg_identify_object(addr1.classid, addr1.objid, addr1.objsubid)).*, text search template | addr_nsp | addr_ts_temp | addr_nsp.addr_ts_temp | t subscription | | addr_sub | addr_sub | t publication | | addr_pub | addr_pub | t - publication relation | | | gentable in publication addr_pub | t -(46 rows) + publication relation | | | addr_nsp.gentable in publication addr_pub | t +(49 rows) --- --- Cleanup resources @@ -480,6 +496,6 @@ NOTICE: drop cascades to 4 other objects DROP PUBLICATION addr_pub; DROP SUBSCRIPTION addr_sub; DROP SCHEMA addr_nsp CASCADE; -NOTICE: drop cascades to 12 other objects +NOTICE: drop cascades to 14 other objects DROP OWNED BY regress_addr_user; DROP USER regress_addr_user; diff --git a/src/test/regress/expected/oidjoins.out b/src/test/regress/expected/oidjoins.out index 234b44fdf2..ef268d348e 100644 --- a/src/test/regress/expected/oidjoins.out +++ b/src/test/regress/expected/oidjoins.out @@ -369,6 +369,14 @@ WHERE conindid != 0 AND ------+---------- (0 rows) +SELECT ctid, conparentid +FROM pg_catalog.pg_constraint fk +WHERE conparentid != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_constraint pk WHERE pk.oid = fk.conparentid); + ctid | conparentid +------+------------- +(0 rows) + SELECT ctid, confrelid FROM pg_catalog.pg_constraint fk WHERE confrelid != 0 AND @@ -753,6 +761,14 @@ WHERE partrelid != 0 AND ------+----------- (0 rows) +SELECT ctid, partdefid +FROM pg_catalog.pg_partitioned_table fk +WHERE partdefid != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.partdefid); + ctid | partdefid +------+----------- +(0 rows) + SELECT ctid, polrelid FROM pg_catalog.pg_policy fk WHERE polrelid != 0 AND diff --git a/src/test/regress/expected/opr_sanity.out b/src/test/regress/expected/opr_sanity.out index fcf8bd7565..c073a5ac3f 100644 --- a/src/test/regress/expected/opr_sanity.out +++ b/src/test/regress/expected/opr_sanity.out @@ -19,6 +19,8 @@ -- Helper functions to deal with cases where binary-coercible matches are -- allowed. -- This should match IsBinaryCoercible() in parse_coerce.c. +-- It doesn't currently know about some cases, notably domains, anyelement, +-- anynonarray, anyenum, or record, but it doesn't need to (yet). create function binary_coercible(oid, oid) returns bool as $$ begin if $1 = $2 then return true; end if; @@ -39,9 +41,11 @@ begin return false; end $$ language plpgsql strict stable; --- This one ignores castcontext, so it considers only physical equivalence --- and not whether the coercion can be invoked implicitly. -create function physically_coercible(oid, oid) returns bool as $$ +-- This one ignores castcontext, so it will allow cases where an explicit +-- (but still binary) cast would be required to convert the input type. +-- We don't currently use this for any tests in this file, but it is a +-- reasonable alternative definition for some scenarios. +create function explicitly_binary_coercible(oid, oid) returns bool as $$ begin if $1 = $2 then return true; end if; if EXISTS(select 1 from pg_catalog.pg_cast where @@ -74,6 +78,7 @@ WHERE p1.prolang = 0 OR p1.prorettype = 0 OR 0::oid = ANY (p1.proargtypes) OR procost <= 0 OR CASE WHEN proretset THEN prorows <= 0 ELSE prorows != 0 END OR + prokind NOT IN ('f', 'a', 'w', 'p') OR provolatile NOT IN ('i', 's', 'v') OR proparallel NOT IN ('s', 'r', 'u'); oid | proname @@ -88,10 +93,10 @@ WHERE prosrc IS NULL OR prosrc = '' OR prosrc = '-'; -----+--------- (0 rows) --- proiswindow shouldn't be set together with proisagg or proretset +-- proretset should only be set for normal functions SELECT p1.oid, p1.proname FROM pg_proc AS p1 -WHERE proiswindow AND (proisagg OR proretset); +WHERE proretset AND prokind != 'f'; oid | proname -----+--------- (0 rows) @@ -154,9 +159,9 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid < p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - (p1.proisagg = false OR p2.proisagg = false) AND + (p1.prokind != 'a' OR p2.prokind != 'a') AND (p1.prolang != p2.prolang OR - p1.proisagg != p2.proisagg OR + p1.prokind != p2.prokind OR p1.prosecdef != p2.prosecdef OR p1.proleakproof != p2.proleakproof OR p1.proisstrict != p2.proisstrict OR @@ -182,7 +187,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND p1.prosrc NOT LIKE E'range\\_constructor_' AND p2.prosrc NOT LIKE E'range\\_constructor_' AND (p1.prorettype < p2.prorettype) @@ -198,7 +203,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND p1.prosrc NOT LIKE E'range\\_constructor_' AND p2.prosrc NOT LIKE E'range\\_constructor_' AND (p1.proargtypes[0] < p2.proargtypes[0]) @@ -216,7 +221,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND p1.prosrc NOT LIKE E'range\\_constructor_' AND p2.prosrc NOT LIKE E'range\\_constructor_' AND (p1.proargtypes[1] < p2.proargtypes[1]) @@ -233,7 +238,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND (p1.proargtypes[2] < p2.proargtypes[2]) ORDER BY 1, 2; proargtypes | proargtypes @@ -246,7 +251,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND (p1.proargtypes[3] < p2.proargtypes[3]) ORDER BY 1, 2; proargtypes | proargtypes @@ -259,7 +264,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND (p1.proargtypes[4] < p2.proargtypes[4]) ORDER BY 1, 2; proargtypes | proargtypes @@ -271,7 +276,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND (p1.proargtypes[5] < p2.proargtypes[5]) ORDER BY 1, 2; proargtypes | proargtypes @@ -283,7 +288,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND (p1.proargtypes[6] < p2.proargtypes[6]) ORDER BY 1, 2; proargtypes | proargtypes @@ -295,7 +300,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND (p1.proargtypes[7] < p2.proargtypes[7]) ORDER BY 1, 2; proargtypes | proargtypes @@ -520,24 +525,6 @@ int24ge(smallint,integer) int42ge(integer,smallint) oideq(oid,oid) oidne(oid,oid) -abstimeeq(abstime,abstime) -abstimene(abstime,abstime) -abstimelt(abstime,abstime) -abstimegt(abstime,abstime) -abstimele(abstime,abstime) -abstimege(abstime,abstime) -reltimeeq(reltime,reltime) -reltimene(reltime,reltime) -reltimelt(reltime,reltime) -reltimegt(reltime,reltime) -reltimele(reltime,reltime) -reltimege(reltime,reltime) -tintervalleneq(tinterval,reltime) -tintervallenne(tinterval,reltime) -tintervallenlt(tinterval,reltime) -tintervallengt(tinterval,reltime) -tintervallenle(tinterval,reltime) -tintervallenge(tinterval,reltime) float4eq(real,real) float4ne(real,real) float4lt(real,real) @@ -562,6 +549,14 @@ float84lt(double precision,real) float84le(double precision,real) float84gt(double precision,real) float84ge(double precision,real) +btint2cmp(smallint,smallint) +btint4cmp(integer,integer) +btfloat4cmp(real,real) +btfloat8cmp(double precision,double precision) +btoidcmp(oid,oid) +btcharcmp("char","char") +btnamecmp(name,name) +cash_cmp(money,money) int8eq(bigint,bigint) int8ne(bigint,bigint) int8lt(bigint,bigint) @@ -581,18 +576,14 @@ namege(name,name) namene(name,name) oidlt(oid,oid) oidle(oid,oid) -tintervaleq(tinterval,tinterval) -tintervalne(tinterval,tinterval) -tintervallt(tinterval,tinterval) -tintervalgt(tinterval,tinterval) -tintervalle(tinterval,tinterval) -tintervalge(tinterval,tinterval) macaddr_eq(macaddr,macaddr) macaddr_lt(macaddr,macaddr) macaddr_le(macaddr,macaddr) macaddr_gt(macaddr,macaddr) macaddr_ge(macaddr,macaddr) macaddr_ne(macaddr,macaddr) +macaddr_cmp(macaddr,macaddr) +btint8cmp(bigint,bigint) int48eq(integer,bigint) int48ne(integer,bigint) int48lt(integer,bigint) @@ -611,6 +602,7 @@ network_le(inet,inet) network_gt(inet,inet) network_ge(inet,inet) network_ne(inet,inet) +network_cmp(inet,inet) lseg_eq(lseg,lseg) bpchareq(character,character) bpcharne(character,character) @@ -620,11 +612,13 @@ date_le(date,date) date_gt(date,date) date_ge(date,date) date_ne(date,date) +date_cmp(date,date) time_lt(time without time zone,time without time zone) time_le(time without time zone,time without time zone) time_gt(time without time zone,time without time zone) time_ge(time without time zone,time without time zone) time_ne(time without time zone,time without time zone) +time_cmp(time without time zone,time without time zone) time_eq(time without time zone,time without time zone) timestamptz_eq(timestamp with time zone,timestamp with time zone) timestamptz_ne(timestamp with time zone,timestamp with time zone) @@ -641,6 +635,8 @@ interval_gt(interval,interval) charlt("char","char") tidne(tid,tid) tideq(tid,tid) +timestamptz_cmp(timestamp with time zone,timestamp with time zone) +interval_cmp(interval,interval) xideqint4(xid,integer) timetz_eq(time with time zone,time with time zone) timetz_ne(time with time zone,time with time zone) @@ -648,6 +644,7 @@ timetz_lt(time with time zone,time with time zone) timetz_le(time with time zone,time with time zone) timetz_ge(time with time zone,time with time zone) timetz_gt(time with time zone,time with time zone) +timetz_cmp(time with time zone,time with time zone) circle_eq(circle,circle) circle_ne(circle,circle) circle_lt(circle,circle) @@ -665,6 +662,7 @@ bitge(bit,bit) bitgt(bit,bit) bitle(bit,bit) bitlt(bit,bit) +bitcmp(bit,bit) oidgt(oid,oid) oidge(oid,oid) varbiteq(bit varying,bit varying) @@ -673,8 +671,10 @@ varbitge(bit varying,bit varying) varbitgt(bit varying,bit varying) varbitle(bit varying,bit varying) varbitlt(bit varying,bit varying) +varbitcmp(bit varying,bit varying) boolle(boolean,boolean) boolge(boolean,boolean) +btboolcmp(boolean,boolean) int28eq(smallint,bigint) int28ne(smallint,bigint) int28lt(smallint,bigint) @@ -693,30 +693,50 @@ byteale(bytea,bytea) byteagt(bytea,bytea) byteage(bytea,bytea) byteane(bytea,bytea) +byteacmp(bytea,bytea) +timestamp_cmp(timestamp without time zone,timestamp without time zone) timestamp_eq(timestamp without time zone,timestamp without time zone) timestamp_ne(timestamp without time zone,timestamp without time zone) timestamp_lt(timestamp without time zone,timestamp without time zone) timestamp_le(timestamp without time zone,timestamp without time zone) timestamp_ge(timestamp without time zone,timestamp without time zone) timestamp_gt(timestamp without time zone,timestamp without time zone) +btint48cmp(integer,bigint) +btint84cmp(bigint,integer) +btint24cmp(smallint,integer) +btint42cmp(integer,smallint) +btint28cmp(smallint,bigint) +btint82cmp(bigint,smallint) +btfloat48cmp(real,double precision) +btfloat84cmp(double precision,real) +md5(text) +md5(bytea) tidgt(tid,tid) tidlt(tid,tid) tidge(tid,tid) tidle(tid,tid) +bttidcmp(tid,tid) uuid_lt(uuid,uuid) uuid_le(uuid,uuid) uuid_eq(uuid,uuid) uuid_ge(uuid,uuid) uuid_gt(uuid,uuid) uuid_ne(uuid,uuid) +uuid_cmp(uuid,uuid) xidneq(xid,xid) xidneqint4(xid,integer) +sha224(bytea) +sha256(bytea) +sha384(bytea) +sha512(bytea) +starts_with(text,text) macaddr8_eq(macaddr8,macaddr8) macaddr8_lt(macaddr8,macaddr8) macaddr8_le(macaddr8,macaddr8) macaddr8_gt(macaddr8,macaddr8) macaddr8_ge(macaddr8,macaddr8) macaddr8_ne(macaddr8,macaddr8) +macaddr8_cmp(macaddr8,macaddr8) -- restore normal output mode \a\t -- List of functions used by libpq's fe-lobj.c @@ -1031,9 +1051,6 @@ ORDER BY 1, 2; !~* | ~* !~~ | ~~ !~~* | ~~* - #< | #>= - #<= | #> - #<> | #= *< | *>= *<= | *> *<> | *= @@ -1043,7 +1060,7 @@ ORDER BY 1, 2; <> | ~= ~<=~ | ~>~ ~<~ | ~>=~ -(16 rows) +(13 rows) -- A mergejoinable or hashjoinable operator must be binary, must return -- boolean, and must have a commutator (itself, unless it's a cross-type @@ -1208,7 +1225,7 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999; -- Check that operators' underlying functions have suitable comments, -- namely 'implementation of XXX operator'. (Note: it's not necessary to --- put such comments into pg_proc.h; initdb will generate them as needed.) +-- put such comments into pg_proc.dat; initdb will generate them as needed.) -- In some cases involving legacy names for operators, there are multiple -- operators referencing the same pg_proc entry, so ignore operators whose -- comments say they are deprecated. @@ -1275,6 +1292,8 @@ WHERE aggfnoid = 0 OR aggtransfn = 0 OR aggkind NOT IN ('n', 'o', 'h') OR aggnumdirectargs < 0 OR (aggkind = 'n' AND aggnumdirectargs > 0) OR + aggfinalmodify NOT IN ('r', 's', 'w') OR + aggmfinalmodify NOT IN ('r', 's', 'w') OR aggtranstype = 0 OR aggtransspace < 0 OR aggmtransspace < 0; ctid | aggfnoid ------+---------- @@ -1284,15 +1303,15 @@ WHERE aggfnoid = 0 OR aggtransfn = 0 OR SELECT a.aggfnoid::oid, p.proname FROM pg_aggregate as a, pg_proc as p WHERE a.aggfnoid = p.oid AND - (NOT p.proisagg OR p.proretset OR p.pronargs < a.aggnumdirectargs); + (p.prokind != 'a' OR p.proretset OR p.pronargs < a.aggnumdirectargs); aggfnoid | proname ----------+--------- (0 rows) --- Make sure there are no proisagg pg_proc entries without matches. +-- Make sure there are no prokind = PROKIND_AGGREGATE pg_proc entries without matches. SELECT oid, proname FROM pg_proc as p -WHERE p.proisagg AND +WHERE p.prokind = 'a' AND NOT EXISTS (SELECT 1 FROM pg_aggregate a WHERE a.aggfnoid = p.oid); oid | proname -----+--------- @@ -1308,8 +1327,6 @@ WHERE a.aggfnoid = p.oid AND (0 rows) -- Cross-check transfn against its entry in pg_proc. --- NOTE: use physically_coercible here, not binary_coercible, because --- max and min on abstime are implemented using int4larger/int4smaller. SELECT a.aggfnoid::oid, p.proname, ptr.oid, ptr.proname FROM pg_aggregate AS a, pg_proc AS p, pg_proc AS ptr WHERE a.aggfnoid = p.oid AND @@ -1318,15 +1335,16 @@ WHERE a.aggfnoid = p.oid AND OR NOT (ptr.pronargs = CASE WHEN a.aggkind = 'n' THEN p.pronargs + 1 ELSE greatest(p.pronargs - a.aggnumdirectargs, 1) + 1 END) - OR NOT physically_coercible(ptr.prorettype, a.aggtranstype) - OR NOT physically_coercible(a.aggtranstype, ptr.proargtypes[0]) + OR NOT binary_coercible(ptr.prorettype, a.aggtranstype) + OR NOT binary_coercible(a.aggtranstype, ptr.proargtypes[0]) OR (p.pronargs > 0 AND - NOT physically_coercible(p.proargtypes[0], ptr.proargtypes[1])) + NOT binary_coercible(p.proargtypes[0], ptr.proargtypes[1])) OR (p.pronargs > 1 AND - NOT physically_coercible(p.proargtypes[1], ptr.proargtypes[2])) + NOT binary_coercible(p.proargtypes[1], ptr.proargtypes[2])) OR (p.pronargs > 2 AND - NOT physically_coercible(p.proargtypes[2], ptr.proargtypes[3])) + NOT binary_coercible(p.proargtypes[2], ptr.proargtypes[3])) -- we could carry the check further, but 3 args is enough for now + OR (p.pronargs > 3) ); aggfnoid | proname | oid | proname ----------+---------+-----+--------- @@ -1348,7 +1366,8 @@ WHERE a.aggfnoid = p.oid AND NOT binary_coercible(p.proargtypes[1], pfn.proargtypes[2])) OR (pfn.pronargs > 3 AND NOT binary_coercible(p.proargtypes[2], pfn.proargtypes[3])) - -- we could carry the check further, but 3 args is enough for now + -- we could carry the check further, but 4 args is enough for now + OR (pfn.pronargs > 4) ); aggfnoid | proname | oid | proname ----------+---------+-----+--------- @@ -1404,15 +1423,16 @@ WHERE a.aggfnoid = p.oid AND OR NOT (ptr.pronargs = CASE WHEN a.aggkind = 'n' THEN p.pronargs + 1 ELSE greatest(p.pronargs - a.aggnumdirectargs, 1) + 1 END) - OR NOT physically_coercible(ptr.prorettype, a.aggmtranstype) - OR NOT physically_coercible(a.aggmtranstype, ptr.proargtypes[0]) + OR NOT binary_coercible(ptr.prorettype, a.aggmtranstype) + OR NOT binary_coercible(a.aggmtranstype, ptr.proargtypes[0]) OR (p.pronargs > 0 AND - NOT physically_coercible(p.proargtypes[0], ptr.proargtypes[1])) + NOT binary_coercible(p.proargtypes[0], ptr.proargtypes[1])) OR (p.pronargs > 1 AND - NOT physically_coercible(p.proargtypes[1], ptr.proargtypes[2])) + NOT binary_coercible(p.proargtypes[1], ptr.proargtypes[2])) OR (p.pronargs > 2 AND - NOT physically_coercible(p.proargtypes[2], ptr.proargtypes[3])) + NOT binary_coercible(p.proargtypes[2], ptr.proargtypes[3])) -- we could carry the check further, but 3 args is enough for now + OR (p.pronargs > 3) ); aggfnoid | proname | oid | proname ----------+---------+-----+--------- @@ -1427,15 +1447,16 @@ WHERE a.aggfnoid = p.oid AND OR NOT (ptr.pronargs = CASE WHEN a.aggkind = 'n' THEN p.pronargs + 1 ELSE greatest(p.pronargs - a.aggnumdirectargs, 1) + 1 END) - OR NOT physically_coercible(ptr.prorettype, a.aggmtranstype) - OR NOT physically_coercible(a.aggmtranstype, ptr.proargtypes[0]) + OR NOT binary_coercible(ptr.prorettype, a.aggmtranstype) + OR NOT binary_coercible(a.aggmtranstype, ptr.proargtypes[0]) OR (p.pronargs > 0 AND - NOT physically_coercible(p.proargtypes[0], ptr.proargtypes[1])) + NOT binary_coercible(p.proargtypes[0], ptr.proargtypes[1])) OR (p.pronargs > 1 AND - NOT physically_coercible(p.proargtypes[1], ptr.proargtypes[2])) + NOT binary_coercible(p.proargtypes[1], ptr.proargtypes[2])) OR (p.pronargs > 2 AND - NOT physically_coercible(p.proargtypes[2], ptr.proargtypes[3])) + NOT binary_coercible(p.proargtypes[2], ptr.proargtypes[3])) -- we could carry the check further, but 3 args is enough for now + OR (p.pronargs > 3) ); aggfnoid | proname | oid | proname ----------+---------+-----+--------- @@ -1457,7 +1478,8 @@ WHERE a.aggfnoid = p.oid AND NOT binary_coercible(p.proargtypes[1], pfn.proargtypes[2])) OR (pfn.pronargs > 3 AND NOT binary_coercible(p.proargtypes[2], pfn.proargtypes[3])) - -- we could carry the check further, but 3 args is enough for now + -- we could carry the check further, but 4 args is enough for now + OR (pfn.pronargs > 4) ); aggfnoid | proname | oid | proname ----------+---------+-----+--------- @@ -1489,15 +1511,13 @@ WHERE a.aggfnoid = p.oid AND -- Check that all combine functions have signature -- combine(transtype, transtype) returns transtype --- NOTE: use physically_coercible here, not binary_coercible, because --- max and min on abstime are implemented using int4larger/int4smaller. SELECT a.aggfnoid, p.proname FROM pg_aggregate as a, pg_proc as p WHERE a.aggcombinefn = p.oid AND (p.pronargs != 2 OR p.prorettype != p.proargtypes[0] OR p.prorettype != p.proargtypes[1] OR - NOT physically_coercible(a.aggtranstype, p.proargtypes[0])); + NOT binary_coercible(a.aggtranstype, p.proargtypes[0])); aggfnoid | proname ----------+--------- (0 rows) @@ -1631,7 +1651,7 @@ ORDER BY 1, 2; SELECT p1.oid::regprocedure, p2.oid::regprocedure FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid < p2.oid AND p1.proname = p2.proname AND - p1.proisagg AND p2.proisagg AND + p1.prokind = 'a' AND p2.prokind = 'a' AND array_dims(p1.proargtypes) != array_dims(p2.proargtypes) ORDER BY 1; oid | oid @@ -1642,7 +1662,7 @@ ORDER BY 1; -- For the same reason, built-in aggregates with default arguments are no good. SELECT oid, proname FROM pg_proc AS p -WHERE proisagg AND proargdefaults IS NOT NULL; +WHERE prokind = 'a' AND proargdefaults IS NOT NULL; oid | proname -----+--------- (0 rows) @@ -1652,7 +1672,7 @@ WHERE proisagg AND proargdefaults IS NOT NULL; -- that is not subject to the misplaced ORDER BY issue). SELECT p.oid, proname FROM pg_proc AS p JOIN pg_aggregate AS a ON a.aggfnoid = p.oid -WHERE proisagg AND provariadic != 0 AND a.aggkind = 'n'; +WHERE prokind = 'a' AND provariadic != 0 AND a.aggkind = 'n'; oid | proname -----+--------- (0 rows) @@ -1666,6 +1686,16 @@ WHERE p1.opfmethod = 0 OR p1.opfnamespace = 0; ----- (0 rows) +-- Look for opfamilies having no opclasses. While most validation of +-- opfamilies is now handled by AM-specific amvalidate functions, that's +-- driven from pg_opclass entries below, so an empty opfamily would not +-- get noticed. +SELECT oid, opfname FROM pg_opfamily f +WHERE NOT EXISTS (SELECT 1 FROM pg_opclass WHERE opcfamily = f.oid); + oid | opfname +-----+--------- +(0 rows) + -- **************** pg_opclass **************** -- Look for illegal values in pg_opclass fields SELECT p1.oid @@ -1866,6 +1896,7 @@ ORDER BY 1, 2, 3; 4000 | 12 | <= 4000 | 12 | |&> 4000 | 14 | >= + 4000 | 15 | <-> 4000 | 15 | > 4000 | 16 | @> 4000 | 18 | = @@ -1878,7 +1909,8 @@ ORDER BY 1, 2, 3; 4000 | 25 | <<= 4000 | 26 | >> 4000 | 27 | >>= -(121 rows) + 4000 | 28 | ^@ +(123 rows) -- Check that all opclass search operators have selectivity estimators. -- This is not absolutely required, but it seems a reasonable thing diff --git a/src/test/regress/expected/partition_aggregate.out b/src/test/regress/expected/partition_aggregate.out new file mode 100644 index 0000000000..6bc106831e --- /dev/null +++ b/src/test/regress/expected/partition_aggregate.out @@ -0,0 +1,1533 @@ +-- +-- PARTITION_AGGREGATE +-- Test partitionwise aggregation on partitioned tables +-- +-- Enable partitionwise aggregate, which by default is disabled. +SET enable_partitionwise_aggregate TO true; +-- Enable partitionwise join, which by default is disabled. +SET enable_partitionwise_join TO true; +-- Disable parallel plans. +SET max_parallel_workers_per_gather TO 0; +-- +-- Tests for list partitioned tables. +-- +CREATE TABLE pagg_tab (a int, b int, c text, d int) PARTITION BY LIST(c); +CREATE TABLE pagg_tab_p1 PARTITION OF pagg_tab FOR VALUES IN ('0000', '0001', '0002', '0003'); +CREATE TABLE pagg_tab_p2 PARTITION OF pagg_tab FOR VALUES IN ('0004', '0005', '0006', '0007'); +CREATE TABLE pagg_tab_p3 PARTITION OF pagg_tab FOR VALUES IN ('0008', '0009', '0010', '0011'); +INSERT INTO pagg_tab SELECT i % 20, i % 30, to_char(i % 12, 'FM0000'), i % 30 FROM generate_series(0, 2999) i; +ANALYZE pagg_tab; +-- When GROUP BY clause matches; full aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT c, sum(a), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY c HAVING avg(d) < 15 ORDER BY 1, 2, 3; + QUERY PLAN +----------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_p1.c, (sum(pagg_tab_p1.a)), (avg(pagg_tab_p1.b)) + -> Append + -> HashAggregate + Group Key: pagg_tab_p1.c + Filter: (avg(pagg_tab_p1.d) < '15'::numeric) + -> Seq Scan on pagg_tab_p1 + -> HashAggregate + Group Key: pagg_tab_p2.c + Filter: (avg(pagg_tab_p2.d) < '15'::numeric) + -> Seq Scan on pagg_tab_p2 + -> HashAggregate + Group Key: pagg_tab_p3.c + Filter: (avg(pagg_tab_p3.d) < '15'::numeric) + -> Seq Scan on pagg_tab_p3 +(15 rows) + +SELECT c, sum(a), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY c HAVING avg(d) < 15 ORDER BY 1, 2, 3; + c | sum | avg | count | min | max +------+------+---------------------+-------+-----+----- + 0000 | 2000 | 12.0000000000000000 | 250 | 0 | 24 + 0001 | 2250 | 13.0000000000000000 | 250 | 1 | 25 + 0002 | 2500 | 14.0000000000000000 | 250 | 2 | 26 + 0006 | 2500 | 12.0000000000000000 | 250 | 2 | 24 + 0007 | 2750 | 13.0000000000000000 | 250 | 3 | 25 + 0008 | 2000 | 14.0000000000000000 | 250 | 0 | 26 +(6 rows) + +-- When GROUP BY clause does not match; partial aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY a HAVING avg(d) < 15 ORDER BY 1, 2, 3; + QUERY PLAN +----------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_p1.a, (sum(pagg_tab_p1.b)), (avg(pagg_tab_p1.b)) + -> Finalize HashAggregate + Group Key: pagg_tab_p1.a + Filter: (avg(pagg_tab_p1.d) < '15'::numeric) + -> Append + -> Partial HashAggregate + Group Key: pagg_tab_p1.a + -> Seq Scan on pagg_tab_p1 + -> Partial HashAggregate + Group Key: pagg_tab_p2.a + -> Seq Scan on pagg_tab_p2 + -> Partial HashAggregate + Group Key: pagg_tab_p3.a + -> Seq Scan on pagg_tab_p3 +(15 rows) + +SELECT a, sum(b), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY a HAVING avg(d) < 15 ORDER BY 1, 2, 3; + a | sum | avg | count | min | max +----+------+---------------------+-------+-----+----- + 0 | 1500 | 10.0000000000000000 | 150 | 0 | 20 + 1 | 1650 | 11.0000000000000000 | 150 | 1 | 21 + 2 | 1800 | 12.0000000000000000 | 150 | 2 | 22 + 3 | 1950 | 13.0000000000000000 | 150 | 3 | 23 + 4 | 2100 | 14.0000000000000000 | 150 | 4 | 24 + 10 | 1500 | 10.0000000000000000 | 150 | 10 | 20 + 11 | 1650 | 11.0000000000000000 | 150 | 11 | 21 + 12 | 1800 | 12.0000000000000000 | 150 | 12 | 22 + 13 | 1950 | 13.0000000000000000 | 150 | 13 | 23 + 14 | 2100 | 14.0000000000000000 | 150 | 14 | 24 +(10 rows) + +-- Check with multiple columns in GROUP BY +EXPLAIN (COSTS OFF) +SELECT a, c, count(*) FROM pagg_tab GROUP BY a, c; + QUERY PLAN +------------------------------------------------- + Append + -> HashAggregate + Group Key: pagg_tab_p1.a, pagg_tab_p1.c + -> Seq Scan on pagg_tab_p1 + -> HashAggregate + Group Key: pagg_tab_p2.a, pagg_tab_p2.c + -> Seq Scan on pagg_tab_p2 + -> HashAggregate + Group Key: pagg_tab_p3.a, pagg_tab_p3.c + -> Seq Scan on pagg_tab_p3 +(10 rows) + +-- Check with multiple columns in GROUP BY, order in GROUP BY is reversed +EXPLAIN (COSTS OFF) +SELECT a, c, count(*) FROM pagg_tab GROUP BY c, a; + QUERY PLAN +------------------------------------------------- + Append + -> HashAggregate + Group Key: pagg_tab_p1.c, pagg_tab_p1.a + -> Seq Scan on pagg_tab_p1 + -> HashAggregate + Group Key: pagg_tab_p2.c, pagg_tab_p2.a + -> Seq Scan on pagg_tab_p2 + -> HashAggregate + Group Key: pagg_tab_p3.c, pagg_tab_p3.a + -> Seq Scan on pagg_tab_p3 +(10 rows) + +-- Check with multiple columns in GROUP BY, order in target-list is reversed +EXPLAIN (COSTS OFF) +SELECT c, a, count(*) FROM pagg_tab GROUP BY a, c; + QUERY PLAN +------------------------------------------------- + Append + -> HashAggregate + Group Key: pagg_tab_p1.a, pagg_tab_p1.c + -> Seq Scan on pagg_tab_p1 + -> HashAggregate + Group Key: pagg_tab_p2.a, pagg_tab_p2.c + -> Seq Scan on pagg_tab_p2 + -> HashAggregate + Group Key: pagg_tab_p3.a, pagg_tab_p3.c + -> Seq Scan on pagg_tab_p3 +(10 rows) + +-- Test when input relation for grouping is dummy +EXPLAIN (COSTS OFF) +SELECT c, sum(a) FROM pagg_tab WHERE 1 = 2 GROUP BY c; + QUERY PLAN +-------------------------------- + HashAggregate + Group Key: pagg_tab.c + -> Result + One-Time Filter: false +(4 rows) + +SELECT c, sum(a) FROM pagg_tab WHERE 1 = 2 GROUP BY c; + c | sum +---+----- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT c, sum(a) FROM pagg_tab WHERE c = 'x' GROUP BY c; + QUERY PLAN +-------------------------------- + GroupAggregate + Group Key: pagg_tab.c + -> Result + One-Time Filter: false +(4 rows) + +SELECT c, sum(a) FROM pagg_tab WHERE c = 'x' GROUP BY c; + c | sum +---+----- +(0 rows) + +-- Test GroupAggregate paths by disabling hash aggregates. +SET enable_hashagg TO false; +-- When GROUP BY clause matches full aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT c, sum(a), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; + QUERY PLAN +----------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_p1.c, (sum(pagg_tab_p1.a)), (avg(pagg_tab_p1.b)) + -> Append + -> GroupAggregate + Group Key: pagg_tab_p1.c + Filter: (avg(pagg_tab_p1.d) < '15'::numeric) + -> Sort + Sort Key: pagg_tab_p1.c + -> Seq Scan on pagg_tab_p1 + -> GroupAggregate + Group Key: pagg_tab_p2.c + Filter: (avg(pagg_tab_p2.d) < '15'::numeric) + -> Sort + Sort Key: pagg_tab_p2.c + -> Seq Scan on pagg_tab_p2 + -> GroupAggregate + Group Key: pagg_tab_p3.c + Filter: (avg(pagg_tab_p3.d) < '15'::numeric) + -> Sort + Sort Key: pagg_tab_p3.c + -> Seq Scan on pagg_tab_p3 +(21 rows) + +SELECT c, sum(a), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; + c | sum | avg | count +------+------+---------------------+------- + 0000 | 2000 | 12.0000000000000000 | 250 + 0001 | 2250 | 13.0000000000000000 | 250 + 0002 | 2500 | 14.0000000000000000 | 250 + 0006 | 2500 | 12.0000000000000000 | 250 + 0007 | 2750 | 13.0000000000000000 | 250 + 0008 | 2000 | 14.0000000000000000 | 250 +(6 rows) + +-- When GROUP BY clause does not match; partial aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; + QUERY PLAN +----------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_p1.a, (sum(pagg_tab_p1.b)), (avg(pagg_tab_p1.b)) + -> Finalize GroupAggregate + Group Key: pagg_tab_p1.a + Filter: (avg(pagg_tab_p1.d) < '15'::numeric) + -> Merge Append + Sort Key: pagg_tab_p1.a + -> Partial GroupAggregate + Group Key: pagg_tab_p1.a + -> Sort + Sort Key: pagg_tab_p1.a + -> Seq Scan on pagg_tab_p1 + -> Partial GroupAggregate + Group Key: pagg_tab_p2.a + -> Sort + Sort Key: pagg_tab_p2.a + -> Seq Scan on pagg_tab_p2 + -> Partial GroupAggregate + Group Key: pagg_tab_p3.a + -> Sort + Sort Key: pagg_tab_p3.a + -> Seq Scan on pagg_tab_p3 +(22 rows) + +SELECT a, sum(b), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; + a | sum | avg | count +----+------+---------------------+------- + 0 | 1500 | 10.0000000000000000 | 150 + 1 | 1650 | 11.0000000000000000 | 150 + 2 | 1800 | 12.0000000000000000 | 150 + 3 | 1950 | 13.0000000000000000 | 150 + 4 | 2100 | 14.0000000000000000 | 150 + 10 | 1500 | 10.0000000000000000 | 150 + 11 | 1650 | 11.0000000000000000 | 150 + 12 | 1800 | 12.0000000000000000 | 150 + 13 | 1950 | 13.0000000000000000 | 150 + 14 | 2100 | 14.0000000000000000 | 150 +(10 rows) + +-- Test partitionwise grouping without any aggregates +EXPLAIN (COSTS OFF) +SELECT c FROM pagg_tab GROUP BY c ORDER BY 1; + QUERY PLAN +------------------------------------------- + Merge Append + Sort Key: pagg_tab_p1.c + -> Group + Group Key: pagg_tab_p1.c + -> Sort + Sort Key: pagg_tab_p1.c + -> Seq Scan on pagg_tab_p1 + -> Group + Group Key: pagg_tab_p2.c + -> Sort + Sort Key: pagg_tab_p2.c + -> Seq Scan on pagg_tab_p2 + -> Group + Group Key: pagg_tab_p3.c + -> Sort + Sort Key: pagg_tab_p3.c + -> Seq Scan on pagg_tab_p3 +(17 rows) + +SELECT c FROM pagg_tab GROUP BY c ORDER BY 1; + c +------ + 0000 + 0001 + 0002 + 0003 + 0004 + 0005 + 0006 + 0007 + 0008 + 0009 + 0010 + 0011 +(12 rows) + +EXPLAIN (COSTS OFF) +SELECT a FROM pagg_tab WHERE a < 3 GROUP BY a ORDER BY 1; + QUERY PLAN +------------------------------------------------- + Group + Group Key: pagg_tab_p1.a + -> Merge Append + Sort Key: pagg_tab_p1.a + -> Group + Group Key: pagg_tab_p1.a + -> Sort + Sort Key: pagg_tab_p1.a + -> Seq Scan on pagg_tab_p1 + Filter: (a < 3) + -> Group + Group Key: pagg_tab_p2.a + -> Sort + Sort Key: pagg_tab_p2.a + -> Seq Scan on pagg_tab_p2 + Filter: (a < 3) + -> Group + Group Key: pagg_tab_p3.a + -> Sort + Sort Key: pagg_tab_p3.a + -> Seq Scan on pagg_tab_p3 + Filter: (a < 3) +(22 rows) + +SELECT a FROM pagg_tab WHERE a < 3 GROUP BY a ORDER BY 1; + a +--- + 0 + 1 + 2 +(3 rows) + +RESET enable_hashagg; +-- ROLLUP, partitionwise aggregation does not apply +EXPLAIN (COSTS OFF) +SELECT c, sum(a) FROM pagg_tab GROUP BY rollup(c) ORDER BY 1, 2; + QUERY PLAN +------------------------------------------------- + Sort + Sort Key: pagg_tab_p1.c, (sum(pagg_tab_p1.a)) + -> MixedAggregate + Hash Key: pagg_tab_p1.c + Group Key: () + -> Append + -> Seq Scan on pagg_tab_p1 + -> Seq Scan on pagg_tab_p2 + -> Seq Scan on pagg_tab_p3 +(9 rows) + +-- ORDERED SET within the aggregate. +-- Full aggregation; since all the rows that belong to the same group come +-- from the same partition, having an ORDER BY within the aggregate doesn't +-- make any difference. +EXPLAIN (COSTS OFF) +SELECT c, sum(b order by a) FROM pagg_tab GROUP BY c ORDER BY 1, 2; + QUERY PLAN +------------------------------------------------------------------------ + Sort + Sort Key: pagg_tab_p1.c, (sum(pagg_tab_p1.b ORDER BY pagg_tab_p1.a)) + -> Append + -> GroupAggregate + Group Key: pagg_tab_p1.c + -> Sort + Sort Key: pagg_tab_p1.c + -> Seq Scan on pagg_tab_p1 + -> GroupAggregate + Group Key: pagg_tab_p2.c + -> Sort + Sort Key: pagg_tab_p2.c + -> Seq Scan on pagg_tab_p2 + -> GroupAggregate + Group Key: pagg_tab_p3.c + -> Sort + Sort Key: pagg_tab_p3.c + -> Seq Scan on pagg_tab_p3 +(18 rows) + +-- Since GROUP BY clause does not match with PARTITION KEY; we need to do +-- partial aggregation. However, ORDERED SET are not partial safe and thus +-- partitionwise aggregation plan is not generated. +EXPLAIN (COSTS OFF) +SELECT a, sum(b order by a) FROM pagg_tab GROUP BY a ORDER BY 1, 2; + QUERY PLAN +------------------------------------------------------------------------ + Sort + Sort Key: pagg_tab_p1.a, (sum(pagg_tab_p1.b ORDER BY pagg_tab_p1.a)) + -> GroupAggregate + Group Key: pagg_tab_p1.a + -> Sort + Sort Key: pagg_tab_p1.a + -> Append + -> Seq Scan on pagg_tab_p1 + -> Seq Scan on pagg_tab_p2 + -> Seq Scan on pagg_tab_p3 +(10 rows) + +-- JOIN query +CREATE TABLE pagg_tab1(x int, y int) PARTITION BY RANGE(x); +CREATE TABLE pagg_tab1_p1 PARTITION OF pagg_tab1 FOR VALUES FROM (0) TO (10); +CREATE TABLE pagg_tab1_p2 PARTITION OF pagg_tab1 FOR VALUES FROM (10) TO (20); +CREATE TABLE pagg_tab1_p3 PARTITION OF pagg_tab1 FOR VALUES FROM (20) TO (30); +CREATE TABLE pagg_tab2(x int, y int) PARTITION BY RANGE(y); +CREATE TABLE pagg_tab2_p1 PARTITION OF pagg_tab2 FOR VALUES FROM (0) TO (10); +CREATE TABLE pagg_tab2_p2 PARTITION OF pagg_tab2 FOR VALUES FROM (10) TO (20); +CREATE TABLE pagg_tab2_p3 PARTITION OF pagg_tab2 FOR VALUES FROM (20) TO (30); +INSERT INTO pagg_tab1 SELECT i % 30, i % 20 FROM generate_series(0, 299, 2) i; +INSERT INTO pagg_tab2 SELECT i % 20, i % 30 FROM generate_series(0, 299, 3) i; +ANALYZE pagg_tab1; +ANALYZE pagg_tab2; +-- When GROUP BY clause matches; full aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: t1.x, (sum(t1.y)), (count(*)) + -> Append + -> HashAggregate + Group Key: t1.x + -> Hash Join + Hash Cond: (t1.x = t2.y) + -> Seq Scan on pagg_tab1_p1 t1 + -> Hash + -> Seq Scan on pagg_tab2_p1 t2 + -> HashAggregate + Group Key: t1_1.x + -> Hash Join + Hash Cond: (t1_1.x = t2_1.y) + -> Seq Scan on pagg_tab1_p2 t1_1 + -> Hash + -> Seq Scan on pagg_tab2_p2 t2_1 + -> HashAggregate + Group Key: t1_2.x + -> Hash Join + Hash Cond: (t2_2.y = t1_2.x) + -> Seq Scan on pagg_tab2_p3 t2_2 + -> Hash + -> Seq Scan on pagg_tab1_p3 t1_2 +(24 rows) + +SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; + x | sum | count +----+------+------- + 0 | 500 | 100 + 6 | 1100 | 100 + 12 | 700 | 100 + 18 | 1300 | 100 + 24 | 900 | 100 +(5 rows) + +-- Check with whole-row reference; partitionwise aggregation does not apply +EXPLAIN (COSTS OFF) +SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: t1.x, (sum(t1.y)), (count(((t1.*)::pagg_tab1))) + -> HashAggregate + Group Key: t1.x + -> Hash Join + Hash Cond: (t1.x = t2.y) + -> Append + -> Seq Scan on pagg_tab1_p1 t1 + -> Seq Scan on pagg_tab1_p2 t1_1 + -> Seq Scan on pagg_tab1_p3 t1_2 + -> Hash + -> Append + -> Seq Scan on pagg_tab2_p1 t2 + -> Seq Scan on pagg_tab2_p2 t2_1 + -> Seq Scan on pagg_tab2_p3 t2_2 +(15 rows) + +SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; + x | sum | count +----+------+------- + 0 | 500 | 100 + 6 | 1100 | 100 + 12 | 700 | 100 + 18 | 1300 | 100 + 24 | 900 | 100 +(5 rows) + +-- GROUP BY having other matching key +EXPLAIN (COSTS OFF) +SELECT t2.y, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t2.y ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: t2.y, (sum(t1.y)), (count(*)) + -> Append + -> HashAggregate + Group Key: t2.y + -> Hash Join + Hash Cond: (t1.x = t2.y) + -> Seq Scan on pagg_tab1_p1 t1 + -> Hash + -> Seq Scan on pagg_tab2_p1 t2 + -> HashAggregate + Group Key: t2_1.y + -> Hash Join + Hash Cond: (t1_1.x = t2_1.y) + -> Seq Scan on pagg_tab1_p2 t1_1 + -> Hash + -> Seq Scan on pagg_tab2_p2 t2_1 + -> HashAggregate + Group Key: t2_2.y + -> Hash Join + Hash Cond: (t2_2.y = t1_2.x) + -> Seq Scan on pagg_tab2_p3 t2_2 + -> Hash + -> Seq Scan on pagg_tab1_p3 t1_2 +(24 rows) + +-- When GROUP BY clause does not match; partial aggregation is performed for each partition. +-- Also test GroupAggregate paths by disabling hash aggregates. +SET enable_hashagg TO false; +EXPLAIN (COSTS OFF) +SELECT t1.y, sum(t1.x), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.y HAVING avg(t1.x) > 10 ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------------------- + Sort + Sort Key: t1.y, (sum(t1.x)), (count(*)) + -> Finalize GroupAggregate + Group Key: t1.y + Filter: (avg(t1.x) > '10'::numeric) + -> Merge Append + Sort Key: t1.y + -> Partial GroupAggregate + Group Key: t1.y + -> Sort + Sort Key: t1.y + -> Hash Join + Hash Cond: (t1.x = t2.y) + -> Seq Scan on pagg_tab1_p1 t1 + -> Hash + -> Seq Scan on pagg_tab2_p1 t2 + -> Partial GroupAggregate + Group Key: t1_1.y + -> Sort + Sort Key: t1_1.y + -> Hash Join + Hash Cond: (t1_1.x = t2_1.y) + -> Seq Scan on pagg_tab1_p2 t1_1 + -> Hash + -> Seq Scan on pagg_tab2_p2 t2_1 + -> Partial GroupAggregate + Group Key: t1_2.y + -> Sort + Sort Key: t1_2.y + -> Hash Join + Hash Cond: (t2_2.y = t1_2.x) + -> Seq Scan on pagg_tab2_p3 t2_2 + -> Hash + -> Seq Scan on pagg_tab1_p3 t1_2 +(34 rows) + +SELECT t1.y, sum(t1.x), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.y HAVING avg(t1.x) > 10 ORDER BY 1, 2, 3; + y | sum | count +----+------+------- + 2 | 600 | 50 + 4 | 1200 | 50 + 8 | 900 | 50 + 12 | 600 | 50 + 14 | 1200 | 50 + 18 | 900 | 50 +(6 rows) + +RESET enable_hashagg; +-- Check with LEFT/RIGHT/FULL OUTER JOINs which produces NULL values for +-- aggregation +-- LEFT JOIN, should produce partial partitionwise aggregation plan as +-- GROUP BY is on nullable column +EXPLAIN (COSTS OFF) +SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; + QUERY PLAN +------------------------------------------------------------------ + Finalize GroupAggregate + Group Key: b.y + -> Sort + Sort Key: b.y + -> Append + -> Partial HashAggregate + Group Key: b.y + -> Hash Left Join + Hash Cond: (a.x = b.y) + -> Seq Scan on pagg_tab1_p1 a + -> Hash + -> Seq Scan on pagg_tab2_p1 b + -> Partial HashAggregate + Group Key: b_1.y + -> Hash Left Join + Hash Cond: (a_1.x = b_1.y) + -> Seq Scan on pagg_tab1_p2 a_1 + -> Hash + -> Seq Scan on pagg_tab2_p2 b_1 + -> Partial HashAggregate + Group Key: b_2.y + -> Hash Right Join + Hash Cond: (b_2.y = a_2.x) + -> Seq Scan on pagg_tab2_p3 b_2 + -> Hash + -> Seq Scan on pagg_tab1_p3 a_2 +(26 rows) + +SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; + y | sum +----+------ + 0 | 500 + 6 | 1100 + 12 | 700 + 18 | 1300 + 24 | 900 + | 900 +(6 rows) + +-- RIGHT JOIN, should produce full partitionwise aggregation plan as +-- GROUP BY is on non-nullable column +EXPLAIN (COSTS OFF) +SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; + QUERY PLAN +------------------------------------------------------------ + Sort + Sort Key: b.y + -> Append + -> HashAggregate + Group Key: b.y + -> Hash Right Join + Hash Cond: (a.x = b.y) + -> Seq Scan on pagg_tab1_p1 a + -> Hash + -> Seq Scan on pagg_tab2_p1 b + -> HashAggregate + Group Key: b_1.y + -> Hash Right Join + Hash Cond: (a_1.x = b_1.y) + -> Seq Scan on pagg_tab1_p2 a_1 + -> Hash + -> Seq Scan on pagg_tab2_p2 b_1 + -> HashAggregate + Group Key: b_2.y + -> Hash Left Join + Hash Cond: (b_2.y = a_2.x) + -> Seq Scan on pagg_tab2_p3 b_2 + -> Hash + -> Seq Scan on pagg_tab1_p3 a_2 +(24 rows) + +SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; + y | sum +----+------ + 0 | 500 + 3 | + 6 | 1100 + 9 | + 12 | 700 + 15 | + 18 | 1300 + 21 | + 24 | 900 + 27 | +(10 rows) + +-- FULL JOIN, should produce partial partitionwise aggregation plan as +-- GROUP BY is on nullable column +EXPLAIN (COSTS OFF) +SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x ORDER BY 1 NULLS LAST; + QUERY PLAN +------------------------------------------------------------------ + Finalize GroupAggregate + Group Key: a.x + -> Sort + Sort Key: a.x + -> Append + -> Partial HashAggregate + Group Key: a.x + -> Hash Full Join + Hash Cond: (a.x = b.y) + -> Seq Scan on pagg_tab1_p1 a + -> Hash + -> Seq Scan on pagg_tab2_p1 b + -> Partial HashAggregate + Group Key: a_1.x + -> Hash Full Join + Hash Cond: (a_1.x = b_1.y) + -> Seq Scan on pagg_tab1_p2 a_1 + -> Hash + -> Seq Scan on pagg_tab2_p2 b_1 + -> Partial HashAggregate + Group Key: a_2.x + -> Hash Full Join + Hash Cond: (b_2.y = a_2.x) + -> Seq Scan on pagg_tab2_p3 b_2 + -> Hash + -> Seq Scan on pagg_tab1_p3 a_2 +(26 rows) + +SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x ORDER BY 1 NULLS LAST; + x | sum +----+------ + 0 | 500 + 2 | + 4 | + 6 | 1100 + 8 | + 10 | + 12 | 700 + 14 | + 16 | + 18 | 1300 + 20 | + 22 | + 24 | 900 + 26 | + 28 | + | 500 +(16 rows) + +-- LEFT JOIN, with dummy relation on right side, +-- should produce full partitionwise aggregation plan as GROUP BY is on +-- non-nullable columns +EXPLAIN (COSTS OFF) +SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab1_p1.x, y + -> Append + -> HashAggregate + Group Key: pagg_tab1_p1.x, y + -> Hash Left Join + Hash Cond: (pagg_tab1_p1.x = y) + Filter: ((pagg_tab1_p1.x > 5) OR (y < 20)) + -> Seq Scan on pagg_tab1_p1 + Filter: (x < 20) + -> Hash + -> Result + One-Time Filter: false + -> HashAggregate + Group Key: pagg_tab1_p2.x, pagg_tab2_p2.y + -> Hash Left Join + Hash Cond: (pagg_tab1_p2.x = pagg_tab2_p2.y) + Filter: ((pagg_tab1_p2.x > 5) OR (pagg_tab2_p2.y < 20)) + -> Seq Scan on pagg_tab1_p2 + Filter: (x < 20) + -> Hash + -> Seq Scan on pagg_tab2_p2 + Filter: (y > 10) +(23 rows) + +SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; + x | y | count +----+----+------- + 6 | | 10 + 8 | | 10 + 10 | | 10 + 12 | 12 | 100 + 14 | | 10 + 16 | | 10 + 18 | 18 | 100 +(7 rows) + +-- FULL JOIN, with dummy relations on both sides, +-- should produce partial partitionwise aggregation plan as GROUP BY is on +-- nullable columns +EXPLAIN (COSTS OFF) +SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; + QUERY PLAN +----------------------------------------------------------------------------------- + Finalize GroupAggregate + Group Key: pagg_tab1_p1.x, y + -> Sort + Sort Key: pagg_tab1_p1.x, y + -> Append + -> Partial HashAggregate + Group Key: pagg_tab1_p1.x, y + -> Hash Full Join + Hash Cond: (pagg_tab1_p1.x = y) + Filter: ((pagg_tab1_p1.x > 5) OR (y < 20)) + -> Seq Scan on pagg_tab1_p1 + Filter: (x < 20) + -> Hash + -> Result + One-Time Filter: false + -> Partial HashAggregate + Group Key: pagg_tab1_p2.x, pagg_tab2_p2.y + -> Hash Full Join + Hash Cond: (pagg_tab1_p2.x = pagg_tab2_p2.y) + Filter: ((pagg_tab1_p2.x > 5) OR (pagg_tab2_p2.y < 20)) + -> Seq Scan on pagg_tab1_p2 + Filter: (x < 20) + -> Hash + -> Seq Scan on pagg_tab2_p2 + Filter: (y > 10) + -> Partial HashAggregate + Group Key: x, pagg_tab2_p3.y + -> Hash Full Join + Hash Cond: (pagg_tab2_p3.y = x) + Filter: ((x > 5) OR (pagg_tab2_p3.y < 20)) + -> Seq Scan on pagg_tab2_p3 + Filter: (y > 10) + -> Hash + -> Result + One-Time Filter: false +(35 rows) + +SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; + x | y | count +----+----+------- + 6 | | 10 + 8 | | 10 + 10 | | 10 + 12 | 12 | 100 + 14 | | 10 + 16 | | 10 + 18 | 18 | 100 + | 15 | 10 +(8 rows) + +-- Empty join relation because of empty outer side, no partitionwise agg plan +EXPLAIN (COSTS OFF) +SELECT a.x, a.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x = 1 AND x = 2) a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x, a.y ORDER BY 1, 2; + QUERY PLAN +--------------------------------------- + GroupAggregate + Group Key: pagg_tab1.x, pagg_tab1.y + -> Sort + Sort Key: pagg_tab1.y + -> Result + One-Time Filter: false +(6 rows) + +SELECT a.x, a.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x = 1 AND x = 2) a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x, a.y ORDER BY 1, 2; + x | y | count +---+---+------- +(0 rows) + +-- Partition by multiple columns +CREATE TABLE pagg_tab_m (a int, b int, c int) PARTITION BY RANGE(a, ((a+b)/2)); +CREATE TABLE pagg_tab_m_p1 PARTITION OF pagg_tab_m FOR VALUES FROM (0, 0) TO (10, 10); +CREATE TABLE pagg_tab_m_p2 PARTITION OF pagg_tab_m FOR VALUES FROM (10, 10) TO (20, 20); +CREATE TABLE pagg_tab_m_p3 PARTITION OF pagg_tab_m FOR VALUES FROM (20, 20) TO (30, 30); +INSERT INTO pagg_tab_m SELECT i % 30, i % 40, i % 50 FROM generate_series(0, 2999) i; +ANALYZE pagg_tab_m; +-- Partial aggregation as GROUP BY clause does not match with PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22 ORDER BY 1, 2, 3; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_m_p1.a, (sum(pagg_tab_m_p1.b)), (avg(pagg_tab_m_p1.c)) + -> Finalize HashAggregate + Group Key: pagg_tab_m_p1.a + Filter: (avg(pagg_tab_m_p1.c) < '22'::numeric) + -> Append + -> Partial HashAggregate + Group Key: pagg_tab_m_p1.a + -> Seq Scan on pagg_tab_m_p1 + -> Partial HashAggregate + Group Key: pagg_tab_m_p2.a + -> Seq Scan on pagg_tab_m_p2 + -> Partial HashAggregate + Group Key: pagg_tab_m_p3.a + -> Seq Scan on pagg_tab_m_p3 +(15 rows) + +SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22 ORDER BY 1, 2, 3; + a | sum | avg | count +----+------+---------------------+------- + 0 | 1500 | 20.0000000000000000 | 100 + 1 | 1600 | 21.0000000000000000 | 100 + 10 | 1500 | 20.0000000000000000 | 100 + 11 | 1600 | 21.0000000000000000 | 100 + 20 | 1500 | 20.0000000000000000 | 100 + 21 | 1600 | 21.0000000000000000 | 100 +(6 rows) + +-- Full aggregation as GROUP BY clause matches with PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING sum(b) < 50 ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_m_p1.a, (sum(pagg_tab_m_p1.b)), (avg(pagg_tab_m_p1.c)) + -> Append + -> HashAggregate + Group Key: pagg_tab_m_p1.a, ((pagg_tab_m_p1.a + pagg_tab_m_p1.b) / 2) + Filter: (sum(pagg_tab_m_p1.b) < 50) + -> Seq Scan on pagg_tab_m_p1 + -> HashAggregate + Group Key: pagg_tab_m_p2.a, ((pagg_tab_m_p2.a + pagg_tab_m_p2.b) / 2) + Filter: (sum(pagg_tab_m_p2.b) < 50) + -> Seq Scan on pagg_tab_m_p2 + -> HashAggregate + Group Key: pagg_tab_m_p3.a, ((pagg_tab_m_p3.a + pagg_tab_m_p3.b) / 2) + Filter: (sum(pagg_tab_m_p3.b) < 50) + -> Seq Scan on pagg_tab_m_p3 +(15 rows) + +SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING sum(b) < 50 ORDER BY 1, 2, 3; + a | sum | avg | count +----+-----+---------------------+------- + 0 | 0 | 20.0000000000000000 | 25 + 1 | 25 | 21.0000000000000000 | 25 + 10 | 0 | 20.0000000000000000 | 25 + 11 | 25 | 21.0000000000000000 | 25 + 20 | 0 | 20.0000000000000000 | 25 + 21 | 25 | 21.0000000000000000 | 25 +(6 rows) + +-- Full aggregation as PARTITION KEY is part of GROUP BY clause +EXPLAIN (COSTS OFF) +SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAVING sum(b) = 50 AND avg(c) > 25 ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: pagg_tab_m_p1.a, pagg_tab_m_p1.c, (sum(pagg_tab_m_p1.b)) + -> Append + -> HashAggregate + Group Key: ((pagg_tab_m_p1.a + pagg_tab_m_p1.b) / 2), pagg_tab_m_p1.c, pagg_tab_m_p1.a + Filter: ((sum(pagg_tab_m_p1.b) = 50) AND (avg(pagg_tab_m_p1.c) > '25'::numeric)) + -> Seq Scan on pagg_tab_m_p1 + -> HashAggregate + Group Key: ((pagg_tab_m_p2.a + pagg_tab_m_p2.b) / 2), pagg_tab_m_p2.c, pagg_tab_m_p2.a + Filter: ((sum(pagg_tab_m_p2.b) = 50) AND (avg(pagg_tab_m_p2.c) > '25'::numeric)) + -> Seq Scan on pagg_tab_m_p2 + -> HashAggregate + Group Key: ((pagg_tab_m_p3.a + pagg_tab_m_p3.b) / 2), pagg_tab_m_p3.c, pagg_tab_m_p3.a + Filter: ((sum(pagg_tab_m_p3.b) = 50) AND (avg(pagg_tab_m_p3.c) > '25'::numeric)) + -> Seq Scan on pagg_tab_m_p3 +(15 rows) + +SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAVING sum(b) = 50 AND avg(c) > 25 ORDER BY 1, 2, 3; + a | c | sum | avg | count +----+----+-----+---------------------+------- + 0 | 30 | 50 | 30.0000000000000000 | 5 + 0 | 40 | 50 | 40.0000000000000000 | 5 + 10 | 30 | 50 | 30.0000000000000000 | 5 + 10 | 40 | 50 | 40.0000000000000000 | 5 + 20 | 30 | 50 | 30.0000000000000000 | 5 + 20 | 40 | 50 | 40.0000000000000000 | 5 +(6 rows) + +-- Test with multi-level partitioning scheme +CREATE TABLE pagg_tab_ml (a int, b int, c text) PARTITION BY RANGE(a); +CREATE TABLE pagg_tab_ml_p1 PARTITION OF pagg_tab_ml FOR VALUES FROM (0) TO (10); +CREATE TABLE pagg_tab_ml_p2 PARTITION OF pagg_tab_ml FOR VALUES FROM (10) TO (20) PARTITION BY LIST (c); +CREATE TABLE pagg_tab_ml_p2_s1 PARTITION OF pagg_tab_ml_p2 FOR VALUES IN ('0000', '0001'); +CREATE TABLE pagg_tab_ml_p2_s2 PARTITION OF pagg_tab_ml_p2 FOR VALUES IN ('0002', '0003'); +-- This level of partitioning has different column positions than the parent +CREATE TABLE pagg_tab_ml_p3(b int, c text, a int) PARTITION BY RANGE (b); +CREATE TABLE pagg_tab_ml_p3_s1(c text, a int, b int); +CREATE TABLE pagg_tab_ml_p3_s2 PARTITION OF pagg_tab_ml_p3 FOR VALUES FROM (5) TO (10); +ALTER TABLE pagg_tab_ml_p3 ATTACH PARTITION pagg_tab_ml_p3_s1 FOR VALUES FROM (0) TO (5); +ALTER TABLE pagg_tab_ml ATTACH PARTITION pagg_tab_ml_p3 FOR VALUES FROM (20) TO (30); +INSERT INTO pagg_tab_ml SELECT i % 30, i % 10, to_char(i % 4, 'FM0000') FROM generate_series(0, 29999) i; +ANALYZE pagg_tab_ml; +-- For Parallel Append +SET max_parallel_workers_per_gather TO 2; +-- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY +-- for level 1 only. For subpartitions, GROUP BY clause does not match with +-- PARTITION KEY, but still we do not see a partial aggregation as array_agg() +-- is not partial agg safe. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_ml_p2_s1.a, (sum(pagg_tab_ml_p2_s1.b)), (array_agg(DISTINCT pagg_tab_ml_p2_s1.c)) + -> Gather + Workers Planned: 2 + -> Parallel Append + -> GroupAggregate + Group Key: pagg_tab_ml_p2_s1.a + Filter: (avg(pagg_tab_ml_p2_s1.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_p2_s1.a + -> Append + -> Seq Scan on pagg_tab_ml_p2_s1 + -> Seq Scan on pagg_tab_ml_p2_s2 + -> GroupAggregate + Group Key: pagg_tab_ml_p3_s1.a + Filter: (avg(pagg_tab_ml_p3_s1.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_p3_s1.a + -> Append + -> Seq Scan on pagg_tab_ml_p3_s1 + -> Seq Scan on pagg_tab_ml_p3_s2 + -> GroupAggregate + Group Key: pagg_tab_ml_p1.a + Filter: (avg(pagg_tab_ml_p1.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_p1.a + -> Seq Scan on pagg_tab_ml_p1 +(27 rows) + +SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + a | sum | array_agg | count +----+------+-------------+------- + 0 | 0 | {0000,0002} | 1000 + 1 | 1000 | {0001,0003} | 1000 + 2 | 2000 | {0000,0002} | 1000 + 10 | 0 | {0000,0002} | 1000 + 11 | 1000 | {0001,0003} | 1000 + 12 | 2000 | {0000,0002} | 1000 + 20 | 0 | {0000,0002} | 1000 + 21 | 1000 | {0001,0003} | 1000 + 22 | 2000 | {0000,0002} | 1000 +(9 rows) + +-- Without ORDER BY clause, to test Gather at top-most path +EXPLAIN (COSTS OFF) +SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3; + QUERY PLAN +----------------------------------------------------------------- + Gather + Workers Planned: 2 + -> Parallel Append + -> GroupAggregate + Group Key: pagg_tab_ml_p2_s1.a + Filter: (avg(pagg_tab_ml_p2_s1.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_p2_s1.a + -> Append + -> Seq Scan on pagg_tab_ml_p2_s1 + -> Seq Scan on pagg_tab_ml_p2_s2 + -> GroupAggregate + Group Key: pagg_tab_ml_p3_s1.a + Filter: (avg(pagg_tab_ml_p3_s1.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_p3_s1.a + -> Append + -> Seq Scan on pagg_tab_ml_p3_s1 + -> Seq Scan on pagg_tab_ml_p3_s2 + -> GroupAggregate + Group Key: pagg_tab_ml_p1.a + Filter: (avg(pagg_tab_ml_p1.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_p1.a + -> Seq Scan on pagg_tab_ml_p1 +(25 rows) + +-- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY +-- for level 1 only. For subpartitions, GROUP BY clause does not match with +-- PARTITION KEY, thus we will have a partial aggregation for them. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_ml_p1.a, (sum(pagg_tab_ml_p1.b)), (count(*)) + -> Append + -> HashAggregate + Group Key: pagg_tab_ml_p1.a + Filter: (avg(pagg_tab_ml_p1.b) < '3'::numeric) + -> Seq Scan on pagg_tab_ml_p1 + -> Finalize GroupAggregate + Group Key: pagg_tab_ml_p2_s1.a + Filter: (avg(pagg_tab_ml_p2_s1.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_p2_s1.a + -> Append + -> Partial HashAggregate + Group Key: pagg_tab_ml_p2_s1.a + -> Seq Scan on pagg_tab_ml_p2_s1 + -> Partial HashAggregate + Group Key: pagg_tab_ml_p2_s2.a + -> Seq Scan on pagg_tab_ml_p2_s2 + -> Finalize GroupAggregate + Group Key: pagg_tab_ml_p3_s1.a + Filter: (avg(pagg_tab_ml_p3_s1.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_p3_s1.a + -> Append + -> Partial HashAggregate + Group Key: pagg_tab_ml_p3_s1.a + -> Seq Scan on pagg_tab_ml_p3_s1 + -> Partial HashAggregate + Group Key: pagg_tab_ml_p3_s2.a + -> Seq Scan on pagg_tab_ml_p3_s2 +(31 rows) + +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + a | sum | count +----+------+------- + 0 | 0 | 1000 + 1 | 1000 | 1000 + 2 | 2000 | 1000 + 10 | 0 | 1000 + 11 | 1000 | 1000 + 12 | 2000 | 1000 + 20 | 0 | 1000 + 21 | 1000 | 1000 + 22 | 2000 | 1000 +(9 rows) + +-- Partial aggregation at all levels as GROUP BY clause does not match with +-- PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_ml_p1.b, (sum(pagg_tab_ml_p1.a)), (count(*)) + -> Finalize GroupAggregate + Group Key: pagg_tab_ml_p1.b + -> Sort + Sort Key: pagg_tab_ml_p1.b + -> Append + -> Partial HashAggregate + Group Key: pagg_tab_ml_p1.b + -> Seq Scan on pagg_tab_ml_p1 + -> Partial HashAggregate + Group Key: pagg_tab_ml_p2_s1.b + -> Seq Scan on pagg_tab_ml_p2_s1 + -> Partial HashAggregate + Group Key: pagg_tab_ml_p2_s2.b + -> Seq Scan on pagg_tab_ml_p2_s2 + -> Partial HashAggregate + Group Key: pagg_tab_ml_p3_s1.b + -> Seq Scan on pagg_tab_ml_p3_s1 + -> Partial HashAggregate + Group Key: pagg_tab_ml_p3_s2.b + -> Seq Scan on pagg_tab_ml_p3_s2 +(22 rows) + +SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; + b | sum | count +---+-------+------- + 0 | 30000 | 3000 + 1 | 33000 | 3000 + 2 | 36000 | 3000 + 3 | 39000 | 3000 + 4 | 42000 | 3000 +(5 rows) + +-- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; + QUERY PLAN +---------------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_ml_p1.a, (sum(pagg_tab_ml_p1.b)), (count(*)) + -> Append + -> HashAggregate + Group Key: pagg_tab_ml_p1.a, pagg_tab_ml_p1.b, pagg_tab_ml_p1.c + Filter: (avg(pagg_tab_ml_p1.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p1 + -> HashAggregate + Group Key: pagg_tab_ml_p2_s1.a, pagg_tab_ml_p2_s1.b, pagg_tab_ml_p2_s1.c + Filter: (avg(pagg_tab_ml_p2_s1.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p2_s1 + -> HashAggregate + Group Key: pagg_tab_ml_p2_s2.a, pagg_tab_ml_p2_s2.b, pagg_tab_ml_p2_s2.c + Filter: (avg(pagg_tab_ml_p2_s2.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p2_s2 + -> HashAggregate + Group Key: pagg_tab_ml_p3_s1.a, pagg_tab_ml_p3_s1.b, pagg_tab_ml_p3_s1.c + Filter: (avg(pagg_tab_ml_p3_s1.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p3_s1 + -> HashAggregate + Group Key: pagg_tab_ml_p3_s2.a, pagg_tab_ml_p3_s2.b, pagg_tab_ml_p3_s2.c + Filter: (avg(pagg_tab_ml_p3_s2.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p3_s2 +(23 rows) + +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; + a | sum | count +----+------+------- + 8 | 4000 | 500 + 8 | 4000 | 500 + 9 | 4500 | 500 + 9 | 4500 | 500 + 18 | 4000 | 500 + 18 | 4000 | 500 + 19 | 4500 | 500 + 19 | 4500 | 500 + 28 | 4000 | 500 + 28 | 4000 | 500 + 29 | 4500 | 500 + 29 | 4500 | 500 +(12 rows) + +-- Parallelism within partitionwise aggregates +SET min_parallel_table_scan_size TO '8kB'; +SET parallel_setup_cost TO 0; +-- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY +-- for level 1 only. For subpartitions, GROUP BY clause does not match with +-- PARTITION KEY, thus we will have a partial aggregation for them. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_ml_p1.a, (sum(pagg_tab_ml_p1.b)), (count(*)) + -> Append + -> Finalize GroupAggregate + Group Key: pagg_tab_ml_p1.a + Filter: (avg(pagg_tab_ml_p1.b) < '3'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_ml_p1.a + -> Partial HashAggregate + Group Key: pagg_tab_ml_p1.a + -> Parallel Seq Scan on pagg_tab_ml_p1 + -> Finalize GroupAggregate + Group Key: pagg_tab_ml_p2_s1.a + Filter: (avg(pagg_tab_ml_p2_s1.b) < '3'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_ml_p2_s1.a + -> Parallel Append + -> Partial HashAggregate + Group Key: pagg_tab_ml_p2_s1.a + -> Parallel Seq Scan on pagg_tab_ml_p2_s1 + -> Partial HashAggregate + Group Key: pagg_tab_ml_p2_s2.a + -> Parallel Seq Scan on pagg_tab_ml_p2_s2 + -> Finalize GroupAggregate + Group Key: pagg_tab_ml_p3_s1.a + Filter: (avg(pagg_tab_ml_p3_s1.b) < '3'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_ml_p3_s1.a + -> Parallel Append + -> Partial HashAggregate + Group Key: pagg_tab_ml_p3_s1.a + -> Parallel Seq Scan on pagg_tab_ml_p3_s1 + -> Partial HashAggregate + Group Key: pagg_tab_ml_p3_s2.a + -> Parallel Seq Scan on pagg_tab_ml_p3_s2 +(41 rows) + +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + a | sum | count +----+------+------- + 0 | 0 | 1000 + 1 | 1000 | 1000 + 2 | 2000 | 1000 + 10 | 0 | 1000 + 11 | 1000 | 1000 + 12 | 2000 | 1000 + 20 | 0 | 1000 + 21 | 1000 | 1000 + 22 | 2000 | 1000 +(9 rows) + +-- Partial aggregation at all levels as GROUP BY clause does not match with +-- PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; + QUERY PLAN +---------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_ml_p1.b, (sum(pagg_tab_ml_p1.a)), (count(*)) + -> Finalize GroupAggregate + Group Key: pagg_tab_ml_p1.b + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_ml_p1.b + -> Parallel Append + -> Partial HashAggregate + Group Key: pagg_tab_ml_p1.b + -> Parallel Seq Scan on pagg_tab_ml_p1 + -> Partial HashAggregate + Group Key: pagg_tab_ml_p2_s1.b + -> Parallel Seq Scan on pagg_tab_ml_p2_s1 + -> Partial HashAggregate + Group Key: pagg_tab_ml_p2_s2.b + -> Parallel Seq Scan on pagg_tab_ml_p2_s2 + -> Partial HashAggregate + Group Key: pagg_tab_ml_p3_s1.b + -> Parallel Seq Scan on pagg_tab_ml_p3_s1 + -> Partial HashAggregate + Group Key: pagg_tab_ml_p3_s2.b + -> Parallel Seq Scan on pagg_tab_ml_p3_s2 +(24 rows) + +SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; + b | sum | count +---+-------+------- + 0 | 30000 | 3000 + 1 | 33000 | 3000 + 2 | 36000 | 3000 + 3 | 39000 | 3000 + 4 | 42000 | 3000 +(5 rows) + +-- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_ml_p1.a, (sum(pagg_tab_ml_p1.b)), (count(*)) + -> Parallel Append + -> HashAggregate + Group Key: pagg_tab_ml_p1.a, pagg_tab_ml_p1.b, pagg_tab_ml_p1.c + Filter: (avg(pagg_tab_ml_p1.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p1 + -> HashAggregate + Group Key: pagg_tab_ml_p2_s1.a, pagg_tab_ml_p2_s1.b, pagg_tab_ml_p2_s1.c + Filter: (avg(pagg_tab_ml_p2_s1.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p2_s1 + -> HashAggregate + Group Key: pagg_tab_ml_p2_s2.a, pagg_tab_ml_p2_s2.b, pagg_tab_ml_p2_s2.c + Filter: (avg(pagg_tab_ml_p2_s2.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p2_s2 + -> HashAggregate + Group Key: pagg_tab_ml_p3_s1.a, pagg_tab_ml_p3_s1.b, pagg_tab_ml_p3_s1.c + Filter: (avg(pagg_tab_ml_p3_s1.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p3_s1 + -> HashAggregate + Group Key: pagg_tab_ml_p3_s2.a, pagg_tab_ml_p3_s2.b, pagg_tab_ml_p3_s2.c + Filter: (avg(pagg_tab_ml_p3_s2.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p3_s2 +(25 rows) + +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; + a | sum | count +----+------+------- + 8 | 4000 | 500 + 8 | 4000 | 500 + 9 | 4500 | 500 + 9 | 4500 | 500 + 18 | 4000 | 500 + 18 | 4000 | 500 + 19 | 4500 | 500 + 19 | 4500 | 500 + 28 | 4000 | 500 + 28 | 4000 | 500 + 29 | 4500 | 500 + 29 | 4500 | 500 +(12 rows) + +-- Parallelism within partitionwise aggregates (single level) +-- Add few parallel setup cost, so that we will see a plan which gathers +-- partially created paths even for full aggregation and sticks a single Gather +-- followed by finalization step. +-- Without this, the cost of doing partial aggregation + Gather + finalization +-- for each partition and then Append over it turns out to be same and this +-- wins as we add it first. This parallel_setup_cost plays a vital role in +-- costing such plans. +SET parallel_setup_cost TO 10; +CREATE TABLE pagg_tab_para(x int, y int) PARTITION BY RANGE(x); +CREATE TABLE pagg_tab_para_p1 PARTITION OF pagg_tab_para FOR VALUES FROM (0) TO (10); +CREATE TABLE pagg_tab_para_p2 PARTITION OF pagg_tab_para FOR VALUES FROM (10) TO (20); +CREATE TABLE pagg_tab_para_p3 PARTITION OF pagg_tab_para FOR VALUES FROM (20) TO (30); +INSERT INTO pagg_tab_para SELECT i % 30, i % 20 FROM generate_series(0, 29999) i; +ANALYZE pagg_tab_para; +-- When GROUP BY clause matches; full aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_para_p1.x, (sum(pagg_tab_para_p1.y)), (avg(pagg_tab_para_p1.y)) + -> Finalize GroupAggregate + Group Key: pagg_tab_para_p1.x + Filter: (avg(pagg_tab_para_p1.y) < '7'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_para_p1.x + -> Parallel Append + -> Partial HashAggregate + Group Key: pagg_tab_para_p1.x + -> Parallel Seq Scan on pagg_tab_para_p1 + -> Partial HashAggregate + Group Key: pagg_tab_para_p2.x + -> Parallel Seq Scan on pagg_tab_para_p2 + -> Partial HashAggregate + Group Key: pagg_tab_para_p3.x + -> Parallel Seq Scan on pagg_tab_para_p3 +(19 rows) + +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + x | sum | avg | count +----+------+--------------------+------- + 0 | 5000 | 5.0000000000000000 | 1000 + 1 | 6000 | 6.0000000000000000 | 1000 + 10 | 5000 | 5.0000000000000000 | 1000 + 11 | 6000 | 6.0000000000000000 | 1000 + 20 | 5000 | 5.0000000000000000 | 1000 + 21 | 6000 | 6.0000000000000000 | 1000 +(6 rows) + +-- When GROUP BY clause does not match; partial aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_para_p1.y, (sum(pagg_tab_para_p1.x)), (avg(pagg_tab_para_p1.x)) + -> Finalize GroupAggregate + Group Key: pagg_tab_para_p1.y + Filter: (avg(pagg_tab_para_p1.x) < '12'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_para_p1.y + -> Parallel Append + -> Partial HashAggregate + Group Key: pagg_tab_para_p1.y + -> Parallel Seq Scan on pagg_tab_para_p1 + -> Partial HashAggregate + Group Key: pagg_tab_para_p2.y + -> Parallel Seq Scan on pagg_tab_para_p2 + -> Partial HashAggregate + Group Key: pagg_tab_para_p3.y + -> Parallel Seq Scan on pagg_tab_para_p3 +(19 rows) + +SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; + y | sum | avg | count +----+-------+---------------------+------- + 0 | 15000 | 10.0000000000000000 | 1500 + 1 | 16500 | 11.0000000000000000 | 1500 + 10 | 15000 | 10.0000000000000000 | 1500 + 11 | 16500 | 11.0000000000000000 | 1500 +(4 rows) + +-- Test when parent can produce parallel paths but not any (or some) of its children +ALTER TABLE pagg_tab_para_p1 SET (parallel_workers = 0); +ALTER TABLE pagg_tab_para_p3 SET (parallel_workers = 0); +ANALYZE pagg_tab_para; +EXPLAIN (COSTS OFF) +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_para_p1.x, (sum(pagg_tab_para_p1.y)), (avg(pagg_tab_para_p1.y)) + -> Finalize GroupAggregate + Group Key: pagg_tab_para_p1.x + Filter: (avg(pagg_tab_para_p1.y) < '7'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_para_p1.x + -> Partial HashAggregate + Group Key: pagg_tab_para_p1.x + -> Parallel Append + -> Seq Scan on pagg_tab_para_p1 + -> Seq Scan on pagg_tab_para_p3 + -> Parallel Seq Scan on pagg_tab_para_p2 +(15 rows) + +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + x | sum | avg | count +----+------+--------------------+------- + 0 | 5000 | 5.0000000000000000 | 1000 + 1 | 6000 | 6.0000000000000000 | 1000 + 10 | 5000 | 5.0000000000000000 | 1000 + 11 | 6000 | 6.0000000000000000 | 1000 + 20 | 5000 | 5.0000000000000000 | 1000 + 21 | 6000 | 6.0000000000000000 | 1000 +(6 rows) + +ALTER TABLE pagg_tab_para_p2 SET (parallel_workers = 0); +ANALYZE pagg_tab_para; +EXPLAIN (COSTS OFF) +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_para_p1.x, (sum(pagg_tab_para_p1.y)), (avg(pagg_tab_para_p1.y)) + -> Finalize GroupAggregate + Group Key: pagg_tab_para_p1.x + Filter: (avg(pagg_tab_para_p1.y) < '7'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_para_p1.x + -> Partial HashAggregate + Group Key: pagg_tab_para_p1.x + -> Parallel Append + -> Seq Scan on pagg_tab_para_p1 + -> Seq Scan on pagg_tab_para_p2 + -> Seq Scan on pagg_tab_para_p3 +(15 rows) + +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + x | sum | avg | count +----+------+--------------------+------- + 0 | 5000 | 5.0000000000000000 | 1000 + 1 | 6000 | 6.0000000000000000 | 1000 + 10 | 5000 | 5.0000000000000000 | 1000 + 11 | 6000 | 6.0000000000000000 | 1000 + 20 | 5000 | 5.0000000000000000 | 1000 + 21 | 6000 | 6.0000000000000000 | 1000 +(6 rows) + +-- Reset parallelism parameters to get partitionwise aggregation plan. +RESET min_parallel_table_scan_size; +RESET parallel_setup_cost; +EXPLAIN (COSTS OFF) +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_para_p1.x, (sum(pagg_tab_para_p1.y)), (avg(pagg_tab_para_p1.y)) + -> Append + -> HashAggregate + Group Key: pagg_tab_para_p1.x + Filter: (avg(pagg_tab_para_p1.y) < '7'::numeric) + -> Seq Scan on pagg_tab_para_p1 + -> HashAggregate + Group Key: pagg_tab_para_p2.x + Filter: (avg(pagg_tab_para_p2.y) < '7'::numeric) + -> Seq Scan on pagg_tab_para_p2 + -> HashAggregate + Group Key: pagg_tab_para_p3.x + Filter: (avg(pagg_tab_para_p3.y) < '7'::numeric) + -> Seq Scan on pagg_tab_para_p3 +(15 rows) + +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + x | sum | avg | count +----+------+--------------------+------- + 0 | 5000 | 5.0000000000000000 | 1000 + 1 | 6000 | 6.0000000000000000 | 1000 + 10 | 5000 | 5.0000000000000000 | 1000 + 11 | 6000 | 6.0000000000000000 | 1000 + 20 | 5000 | 5.0000000000000000 | 1000 + 21 | 6000 | 6.0000000000000000 | 1000 +(6 rows) + diff --git a/src/test/regress/expected/partition_info.out b/src/test/regress/expected/partition_info.out new file mode 100644 index 0000000000..6b116125e6 --- /dev/null +++ b/src/test/regress/expected/partition_info.out @@ -0,0 +1,114 @@ +-- +-- Tests for pg_partition_tree +-- +SELECT * FROM pg_partition_tree(NULL); + relid | parentrelid | isleaf | level +-------+-------------+--------+------- +(0 rows) + +-- Test table partition trees +CREATE TABLE ptif_test (a int, b int) PARTITION BY range (a); +CREATE TABLE ptif_test0 PARTITION OF ptif_test + FOR VALUES FROM (minvalue) TO (0) PARTITION BY list (b); +CREATE TABLE ptif_test01 PARTITION OF ptif_test0 FOR VALUES IN (1); +CREATE TABLE ptif_test1 PARTITION OF ptif_test + FOR VALUES FROM (0) TO (100) PARTITION BY list (b); +CREATE TABLE ptif_test11 PARTITION OF ptif_test1 FOR VALUES IN (1); +CREATE TABLE ptif_test2 PARTITION OF ptif_test + FOR VALUES FROM (100) TO (maxvalue); +-- Test index partition tree +CREATE INDEX ptif_test_index ON ONLY ptif_test (a); +CREATE INDEX ptif_test0_index ON ONLY ptif_test0 (a); +ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test0_index; +CREATE INDEX ptif_test01_index ON ptif_test01 (a); +ALTER INDEX ptif_test0_index ATTACH PARTITION ptif_test01_index; +CREATE INDEX ptif_test1_index ON ONLY ptif_test1 (a); +ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test1_index; +CREATE INDEX ptif_test11_index ON ptif_test11 (a); +ALTER INDEX ptif_test1_index ATTACH PARTITION ptif_test11_index; +CREATE INDEX ptif_test2_index ON ptif_test2 (a); +ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test2_index; +-- List all tables members of the tree +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test'); + relid | parentrelid | level | isleaf +-------------+-------------+-------+-------- + ptif_test | | 0 | f + ptif_test0 | ptif_test | 1 | f + ptif_test1 | ptif_test | 1 | f + ptif_test2 | ptif_test | 1 | t + ptif_test01 | ptif_test0 | 2 | t + ptif_test11 | ptif_test1 | 2 | t +(6 rows) + +-- List tables from an intermediate level +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test0') p + JOIN pg_class c ON (p.relid = c.oid); + relid | parentrelid | level | isleaf +-------------+-------------+-------+-------- + ptif_test0 | ptif_test | 0 | f + ptif_test01 | ptif_test0 | 1 | t +(2 rows) + +-- List from leaf table +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test01') p + JOIN pg_class c ON (p.relid = c.oid); + relid | parentrelid | level | isleaf +-------------+-------------+-------+-------- + ptif_test01 | ptif_test0 | 0 | t +(1 row) + +-- List all indexes members of the tree +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test_index'); + relid | parentrelid | level | isleaf +-------------------+------------------+-------+-------- + ptif_test_index | | 0 | f + ptif_test0_index | ptif_test_index | 1 | f + ptif_test1_index | ptif_test_index | 1 | f + ptif_test2_index | ptif_test_index | 1 | t + ptif_test01_index | ptif_test0_index | 2 | t + ptif_test11_index | ptif_test1_index | 2 | t +(6 rows) + +-- List indexes from an intermediate level +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test0_index') p + JOIN pg_class c ON (p.relid = c.oid); + relid | parentrelid | level | isleaf +-------------------+------------------+-------+-------- + ptif_test0_index | ptif_test_index | 0 | f + ptif_test01_index | ptif_test0_index | 1 | t +(2 rows) + +-- List from leaf index +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test01_index') p + JOIN pg_class c ON (p.relid = c.oid); + relid | parentrelid | level | isleaf +-------------------+------------------+-------+-------- + ptif_test01_index | ptif_test0_index | 0 | t +(1 row) + +DROP TABLE ptif_test; +-- A table not part of a partition tree works is the only member listed. +CREATE TABLE ptif_normal_table(a int); +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_normal_table'); + relid | parentrelid | level | isleaf +-------------------+-------------+-------+-------- + ptif_normal_table | | 0 | t +(1 row) + +DROP TABLE ptif_normal_table; +-- Views and materialized viewS cannot be part of a partition tree. +CREATE VIEW ptif_test_view AS SELECT 1; +CREATE MATERIALIZED VIEW ptif_test_matview AS SELECT 1; +SELECT * FROM pg_partition_tree('ptif_test_view'); +ERROR: "ptif_test_view" is not a table, a foreign table, or an index +SELECT * FROM pg_partition_tree('ptif_test_matview'); +ERROR: "ptif_test_matview" is not a table, a foreign table, or an index +DROP VIEW ptif_test_view; +DROP MATERIALIZED VIEW ptif_test_matview; diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out new file mode 100644 index 0000000000..3ba3aaf2d8 --- /dev/null +++ b/src/test/regress/expected/partition_join.out @@ -0,0 +1,2028 @@ +-- +-- PARTITION_JOIN +-- Test partitionwise join between partitioned tables +-- +-- Enable partitionwise join, which by default is disabled. +SET enable_partitionwise_join to true; +-- +-- partitioned by a single column +-- +CREATE TABLE prt1 (a int, b int, c varchar) PARTITION BY RANGE(a); +CREATE TABLE prt1_p1 PARTITION OF prt1 FOR VALUES FROM (0) TO (250); +CREATE TABLE prt1_p3 PARTITION OF prt1 FOR VALUES FROM (500) TO (600); +CREATE TABLE prt1_p2 PARTITION OF prt1 FOR VALUES FROM (250) TO (500); +INSERT INTO prt1 SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(0, 599) i WHERE i % 2 = 0; +CREATE INDEX iprt1_p1_a on prt1_p1(a); +CREATE INDEX iprt1_p2_a on prt1_p2(a); +CREATE INDEX iprt1_p3_a on prt1_p3(a); +ANALYZE prt1; +CREATE TABLE prt2 (a int, b int, c varchar) PARTITION BY RANGE(b); +CREATE TABLE prt2_p1 PARTITION OF prt2 FOR VALUES FROM (0) TO (250); +CREATE TABLE prt2_p2 PARTITION OF prt2 FOR VALUES FROM (250) TO (500); +CREATE TABLE prt2_p3 PARTITION OF prt2 FOR VALUES FROM (500) TO (600); +INSERT INTO prt2 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(0, 599) i WHERE i % 3 = 0; +CREATE INDEX iprt2_p1_b on prt2_p1(b); +CREATE INDEX iprt2_p2_b on prt2_p2(b); +CREATE INDEX iprt2_p3_b on prt2_p3(b); +ANALYZE prt2; +-- inner join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: (t2.b = t1.a) + -> Seq Scan on prt2_p1 t2 + -> Hash + -> Seq Scan on prt1_p1 t1 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_p2 t2_1 + -> Hash + -> Seq Scan on prt1_p2 t1_1 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_p3 t2_2 + -> Hash + -> Seq Scan on prt1_p3 t1_2 + Filter: (b = 0) +(21 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | 0 | 0000 + 150 | 0150 | 150 | 0150 + 300 | 0300 | 300 | 0300 + 450 | 0450 | 450 | 0450 +(4 rows) + +-- left outer join, with whole-row reference; partitionwise join does not apply +EXPLAIN (COSTS OFF) +SELECT t1, t2 FROM prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: t1.a, t2.b + -> Hash Right Join + Hash Cond: (t2.b = t1.a) + -> Append + -> Seq Scan on prt2_p1 t2 + -> Seq Scan on prt2_p2 t2_1 + -> Seq Scan on prt2_p3 t2_2 + -> Hash + -> Append + -> Seq Scan on prt1_p1 t1 + Filter: (b = 0) + -> Seq Scan on prt1_p2 t1_1 + Filter: (b = 0) + -> Seq Scan on prt1_p3 t1_2 + Filter: (b = 0) +(16 rows) + +SELECT t1, t2 FROM prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + t1 | t2 +--------------+-------------- + (0,0,0000) | (0,0,0000) + (50,0,0050) | + (100,0,0100) | + (150,0,0150) | (0,150,0150) + (200,0,0200) | + (250,0,0250) | + (300,0,0300) | (0,300,0300) + (350,0,0350) | + (400,0,0400) | + (450,0,0450) | (0,450,0450) + (500,0,0500) | + (550,0,0550) | +(12 rows) + +-- right outer join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1 RIGHT JOIN prt2 t2 ON t1.a = t2.b WHERE t2.a = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +--------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.b + -> Append + -> Hash Right Join + Hash Cond: (t1.a = t2.b) + -> Seq Scan on prt1_p1 t1 + -> Hash + -> Seq Scan on prt2_p1 t2 + Filter: (a = 0) + -> Hash Right Join + Hash Cond: (t1_1.a = t2_1.b) + -> Seq Scan on prt1_p2 t1_1 + -> Hash + -> Seq Scan on prt2_p2 t2_1 + Filter: (a = 0) + -> Nested Loop Left Join + -> Seq Scan on prt2_p3 t2_2 + Filter: (a = 0) + -> Index Scan using iprt1_p3_a on prt1_p3 t1_2 + Index Cond: (a = t2_2.b) +(20 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1 RIGHT JOIN prt2 t2 ON t1.a = t2.b WHERE t2.a = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | 0 | 0000 + 150 | 0150 | 150 | 0150 + 300 | 0300 | 300 | 0300 + 450 | 0450 | 450 | 0450 + | | 75 | 0075 + | | 225 | 0225 + | | 375 | 0375 + | | 525 | 0525 +(8 rows) + +-- full outer join, with placeholder vars +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------------------ + Sort + Sort Key: prt1_p1.a, prt2_p1.b + -> Append + -> Hash Full Join + Hash Cond: (prt1_p1.a = prt2_p1.b) + Filter: (((50) = prt1_p1.a) OR ((75) = prt2_p1.b)) + -> Seq Scan on prt1_p1 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p1 + Filter: (a = 0) + -> Hash Full Join + Hash Cond: (prt1_p2.a = prt2_p2.b) + Filter: (((50) = prt1_p2.a) OR ((75) = prt2_p2.b)) + -> Seq Scan on prt1_p2 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p2 + Filter: (a = 0) + -> Hash Full Join + Hash Cond: (prt1_p3.a = prt2_p3.b) + Filter: (((50) = prt1_p3.a) OR ((75) = prt2_p3.b)) + -> Seq Scan on prt1_p3 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p3 + Filter: (a = 0) +(27 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; + a | c | b | c +----+------+----+------ + 50 | 0050 | | + | | 75 | 0075 +(2 rows) + +-- Join with pruned partitions from joining relations +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.a < 450 AND t2.b > 250 AND t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: (t2.b = t1.a) + -> Seq Scan on prt2_p2 t2 + Filter: (b > 250) + -> Hash + -> Seq Scan on prt1_p2 t1 + Filter: ((a < 450) AND (b = 0)) +(10 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.a < 450 AND t2.b > 250 AND t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 300 | 0300 | 300 | 0300 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: prt1_p1.a, b + -> Append + -> Hash Left Join + Hash Cond: (prt1_p1.a = b) + -> Seq Scan on prt1_p1 + Filter: ((a < 450) AND (b = 0)) + -> Hash + -> Result + One-Time Filter: false + -> Hash Right Join + Hash Cond: (prt2_p2.b = prt1_p2.a) + -> Seq Scan on prt2_p2 + Filter: (b > 250) + -> Hash + -> Seq Scan on prt1_p2 + Filter: ((a < 450) AND (b = 0)) +(17 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | | + 50 | 0050 | | + 100 | 0100 | | + 150 | 0150 | | + 200 | 0200 | | + 250 | 0250 | | + 300 | 0300 | 300 | 0300 + 350 | 0350 | | + 400 | 0400 | | +(9 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 FULL JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 OR t2.a = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------------ + Sort + Sort Key: prt1_p1.a, b + -> Append + -> Hash Full Join + Hash Cond: (prt1_p1.a = b) + Filter: ((prt1_p1.b = 0) OR (a = 0)) + -> Seq Scan on prt1_p1 + Filter: (a < 450) + -> Hash + -> Result + One-Time Filter: false + -> Hash Full Join + Hash Cond: (prt1_p2.a = prt2_p2.b) + Filter: ((prt1_p2.b = 0) OR (prt2_p2.a = 0)) + -> Seq Scan on prt1_p2 + Filter: (a < 450) + -> Hash + -> Seq Scan on prt2_p2 + Filter: (b > 250) + -> Hash Full Join + Hash Cond: (prt2_p3.b = a) + Filter: ((b = 0) OR (prt2_p3.a = 0)) + -> Seq Scan on prt2_p3 + Filter: (b > 250) + -> Hash + -> Result + One-Time Filter: false +(27 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 FULL JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 OR t2.a = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | | + 50 | 0050 | | + 100 | 0100 | | + 150 | 0150 | | + 200 | 0200 | | + 250 | 0250 | | + 300 | 0300 | 300 | 0300 + 350 | 0350 | | + 400 | 0400 | | + | | 375 | 0375 + | | 450 | 0450 + | | 525 | 0525 +(12 rows) + +-- Semi-join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t2.b FROM prt2 t2 WHERE t2.a = 0) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Semi Join + Hash Cond: (t1.a = t2.b) + -> Seq Scan on prt1_p1 t1 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p1 t2 + Filter: (a = 0) + -> Hash Semi Join + Hash Cond: (t1_1.a = t2_1.b) + -> Seq Scan on prt1_p2 t1_1 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p2 t2_1 + Filter: (a = 0) + -> Nested Loop Semi Join + Join Filter: (t1_2.a = t2_2.b) + -> Seq Scan on prt1_p3 t1_2 + Filter: (b = 0) + -> Materialize + -> Seq Scan on prt2_p3 t2_2 + Filter: (a = 0) +(24 rows) + +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t2.b FROM prt2 t2 WHERE t2.a = 0) AND t1.b = 0 ORDER BY t1.a; + a | b | c +-----+---+------ + 0 | 0 | 0000 + 150 | 0 | 0150 + 300 | 0 | 0300 + 450 | 0 | 0450 +(4 rows) + +-- Anti-join with aggregates +EXPLAIN (COSTS OFF) +SELECT sum(t1.a), avg(t1.a), sum(t1.b), avg(t1.b) FROM prt1 t1 WHERE NOT EXISTS (SELECT 1 FROM prt2 t2 WHERE t1.a = t2.b); + QUERY PLAN +-------------------------------------------------- + Aggregate + -> Append + -> Hash Anti Join + Hash Cond: (t1.a = t2.b) + -> Seq Scan on prt1_p1 t1 + -> Hash + -> Seq Scan on prt2_p1 t2 + -> Hash Anti Join + Hash Cond: (t1_1.a = t2_1.b) + -> Seq Scan on prt1_p2 t1_1 + -> Hash + -> Seq Scan on prt2_p2 t2_1 + -> Hash Anti Join + Hash Cond: (t1_2.a = t2_2.b) + -> Seq Scan on prt1_p3 t1_2 + -> Hash + -> Seq Scan on prt2_p3 t2_2 +(17 rows) + +SELECT sum(t1.a), avg(t1.a), sum(t1.b), avg(t1.b) FROM prt1 t1 WHERE NOT EXISTS (SELECT 1 FROM prt2 t2 WHERE t1.a = t2.b); + sum | avg | sum | avg +-------+----------------------+------+--------------------- + 60000 | 300.0000000000000000 | 2400 | 12.0000000000000000 +(1 row) + +-- lateral reference +EXPLAIN (COSTS OFF) +SELECT * FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t3.a AS t3a, least(t1.a,t2.a,t3.b) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss + ON t1.a = ss.t2a WHERE t1.b = 0 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Nested Loop Left Join + -> Seq Scan on prt1_p1 t1 + Filter: (b = 0) + -> Nested Loop + -> Index Only Scan using iprt1_p1_a on prt1_p1 t2 + Index Cond: (a = t1.a) + -> Index Scan using iprt2_p1_b on prt2_p1 t3 + Index Cond: (b = t2.a) + -> Nested Loop Left Join + -> Seq Scan on prt1_p2 t1_1 + Filter: (b = 0) + -> Nested Loop + -> Index Only Scan using iprt1_p2_a on prt1_p2 t2_1 + Index Cond: (a = t1_1.a) + -> Index Scan using iprt2_p2_b on prt2_p2 t3_1 + Index Cond: (b = t2_1.a) + -> Nested Loop Left Join + -> Seq Scan on prt1_p3 t1_2 + Filter: (b = 0) + -> Nested Loop + -> Index Only Scan using iprt1_p3_a on prt1_p3 t2_2 + Index Cond: (a = t1_2.a) + -> Index Scan using iprt2_p3_b on prt2_p3 t3_2 + Index Cond: (b = t2_2.a) +(27 rows) + +SELECT * FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t3.a AS t3a, least(t1.a,t2.a,t3.b) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss + ON t1.a = ss.t2a WHERE t1.b = 0 ORDER BY t1.a; + a | b | c | t2a | t3a | least +-----+---+------+-----+-----+------- + 0 | 0 | 0000 | 0 | 0 | 0 + 50 | 0 | 0050 | | | + 100 | 0 | 0100 | | | + 150 | 0 | 0150 | 150 | 0 | 150 + 200 | 0 | 0200 | | | + 250 | 0 | 0250 | | | + 300 | 0 | 0300 | 300 | 0 | 300 + 350 | 0 | 0350 | | | + 400 | 0 | 0400 | | | + 450 | 0 | 0450 | 450 | 0 | 450 + 500 | 0 | 0500 | | | + 550 | 0 | 0550 | | | +(12 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, ss.t2a, ss.t2c FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t3.a AS t3a, t2.b t2b, t2.c t2c, least(t1.a,t2.a,t3.b) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss + ON t1.c = ss.t2c WHERE (t1.b + coalesce(ss.t2b, 0)) = 0 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Hash Left Join + Hash Cond: ((t1.c)::text = (t2.c)::text) + Filter: ((t1.b + COALESCE(t2.b, 0)) = 0) + -> Append + -> Seq Scan on prt1_p1 t1 + -> Seq Scan on prt1_p2 t1_1 + -> Seq Scan on prt1_p3 t1_2 + -> Hash + -> Append + -> Hash Join + Hash Cond: (t2.a = t3.b) + -> Seq Scan on prt1_p1 t2 + -> Hash + -> Seq Scan on prt2_p1 t3 + -> Hash Join + Hash Cond: (t2_1.a = t3_1.b) + -> Seq Scan on prt1_p2 t2_1 + -> Hash + -> Seq Scan on prt2_p2 t3_1 + -> Hash Join + Hash Cond: (t2_2.a = t3_2.b) + -> Seq Scan on prt1_p3 t2_2 + -> Hash + -> Seq Scan on prt2_p3 t3_2 +(26 rows) + +SELECT t1.a, ss.t2a, ss.t2c FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t3.a AS t3a, t2.b t2b, t2.c t2c, least(t1.a,t2.a,t3.a) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss + ON t1.c = ss.t2c WHERE (t1.b + coalesce(ss.t2b, 0)) = 0 ORDER BY t1.a; + a | t2a | t2c +-----+-----+------ + 0 | 0 | 0000 + 50 | | + 100 | | + 150 | 150 | 0150 + 200 | | + 250 | | + 300 | 300 | 0300 + 350 | | + 400 | | + 450 | 450 | 0450 + 500 | | + 550 | | +(12 rows) + +-- +-- partitioned by expression +-- +CREATE TABLE prt1_e (a int, b int, c int) PARTITION BY RANGE(((a + b)/2)); +CREATE TABLE prt1_e_p1 PARTITION OF prt1_e FOR VALUES FROM (0) TO (250); +CREATE TABLE prt1_e_p2 PARTITION OF prt1_e FOR VALUES FROM (250) TO (500); +CREATE TABLE prt1_e_p3 PARTITION OF prt1_e FOR VALUES FROM (500) TO (600); +INSERT INTO prt1_e SELECT i, i, i % 25 FROM generate_series(0, 599, 2) i; +CREATE INDEX iprt1_e_p1_ab2 on prt1_e_p1(((a+b)/2)); +CREATE INDEX iprt1_e_p2_ab2 on prt1_e_p2(((a+b)/2)); +CREATE INDEX iprt1_e_p3_ab2 on prt1_e_p3(((a+b)/2)); +ANALYZE prt1_e; +CREATE TABLE prt2_e (a int, b int, c int) PARTITION BY RANGE(((b + a)/2)); +CREATE TABLE prt2_e_p1 PARTITION OF prt2_e FOR VALUES FROM (0) TO (250); +CREATE TABLE prt2_e_p2 PARTITION OF prt2_e FOR VALUES FROM (250) TO (500); +CREATE TABLE prt2_e_p3 PARTITION OF prt2_e FOR VALUES FROM (500) TO (600); +INSERT INTO prt2_e SELECT i, i, i % 25 FROM generate_series(0, 599, 3) i; +ANALYZE prt2_e; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_e t1, prt2_e t2 WHERE (t1.a + t1.b)/2 = (t2.b + t2.a)/2 AND t1.c = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------------------------------ + Sort + Sort Key: t1.a, t2.b + -> Append + -> Hash Join + Hash Cond: (((t2.b + t2.a) / 2) = ((t1.a + t1.b) / 2)) + -> Seq Scan on prt2_e_p1 t2 + -> Hash + -> Seq Scan on prt1_e_p1 t1 + Filter: (c = 0) + -> Hash Join + Hash Cond: (((t2_1.b + t2_1.a) / 2) = ((t1_1.a + t1_1.b) / 2)) + -> Seq Scan on prt2_e_p2 t2_1 + -> Hash + -> Seq Scan on prt1_e_p2 t1_1 + Filter: (c = 0) + -> Hash Join + Hash Cond: (((t2_2.b + t2_2.a) / 2) = ((t1_2.a + t1_2.b) / 2)) + -> Seq Scan on prt2_e_p3 t2_2 + -> Hash + -> Seq Scan on prt1_e_p3 t1_2 + Filter: (c = 0) +(21 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_e t1, prt2_e t2 WHERE (t1.a + t1.b)/2 = (t2.b + t2.a)/2 AND t1.c = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+---+-----+--- + 0 | 0 | 0 | 0 + 150 | 0 | 150 | 0 + 300 | 0 | 300 | 0 + 450 | 0 | 450 | 0 +(4 rows) + +-- +-- N-way join +-- +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM prt1 t1, prt2 t2, prt1_e t3 WHERE t1.a = t2.b AND t1.a = (t3.a + t3.b)/2 AND t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Nested Loop + Join Filter: (t1.a = ((t3.a + t3.b) / 2)) + -> Hash Join + Hash Cond: (t2.b = t1.a) + -> Seq Scan on prt2_p1 t2 + -> Hash + -> Seq Scan on prt1_p1 t1 + Filter: (b = 0) + -> Index Scan using iprt1_e_p1_ab2 on prt1_e_p1 t3 + Index Cond: (((a + b) / 2) = t2.b) + -> Nested Loop + Join Filter: (t1_1.a = ((t3_1.a + t3_1.b) / 2)) + -> Hash Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_p2 t2_1 + -> Hash + -> Seq Scan on prt1_p2 t1_1 + Filter: (b = 0) + -> Index Scan using iprt1_e_p2_ab2 on prt1_e_p2 t3_1 + Index Cond: (((a + b) / 2) = t2_1.b) + -> Nested Loop + Join Filter: (t1_2.a = ((t3_2.a + t3_2.b) / 2)) + -> Hash Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_p3 t2_2 + -> Hash + -> Seq Scan on prt1_p3 t1_2 + Filter: (b = 0) + -> Index Scan using iprt1_e_p3_ab2 on prt1_e_p3 t3_2 + Index Cond: (((a + b) / 2) = t2_2.b) +(33 rows) + +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM prt1 t1, prt2 t2, prt1_e t3 WHERE t1.a = t2.b AND t1.a = (t3.a + t3.b)/2 AND t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c | ?column? | c +-----+------+-----+------+----------+--- + 0 | 0000 | 0 | 0000 | 0 | 0 + 150 | 0150 | 150 | 0150 | 300 | 0 + 300 | 0300 | 300 | 0300 | 600 | 0 + 450 | 0450 | 450 | 0450 | 900 | 0 +(4 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) LEFT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + QUERY PLAN +-------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.b, ((t3.a + t3.b)) + -> Append + -> Hash Right Join + Hash Cond: (((t3.a + t3.b) / 2) = t1.a) + -> Seq Scan on prt1_e_p1 t3 + -> Hash + -> Hash Right Join + Hash Cond: (t2.b = t1.a) + -> Seq Scan on prt2_p1 t2 + -> Hash + -> Seq Scan on prt1_p1 t1 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: (((t3_1.a + t3_1.b) / 2) = t1_1.a) + -> Seq Scan on prt1_e_p2 t3_1 + -> Hash + -> Hash Right Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_p2 t2_1 + -> Hash + -> Seq Scan on prt1_p2 t1_1 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: (((t3_2.a + t3_2.b) / 2) = t1_2.a) + -> Seq Scan on prt1_e_p3 t3_2 + -> Hash + -> Hash Right Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_p3 t2_2 + -> Hash + -> Seq Scan on prt1_p3 t1_2 + Filter: (b = 0) +(33 rows) + +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) LEFT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + a | c | b | c | ?column? | c +-----+------+-----+------+----------+--- + 0 | 0000 | 0 | 0000 | 0 | 0 + 50 | 0050 | | | 100 | 0 + 100 | 0100 | | | 200 | 0 + 150 | 0150 | 150 | 0150 | 300 | 0 + 200 | 0200 | | | 400 | 0 + 250 | 0250 | | | 500 | 0 + 300 | 0300 | 300 | 0300 | 600 | 0 + 350 | 0350 | | | 700 | 0 + 400 | 0400 | | | 800 | 0 + 450 | 0450 | 450 | 0450 | 900 | 0 + 500 | 0500 | | | 1000 | 0 + 550 | 0550 | | | 1100 | 0 +(12 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + QUERY PLAN +------------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.b, ((t3.a + t3.b)) + -> Append + -> Nested Loop Left Join + -> Hash Right Join + Hash Cond: (t1.a = ((t3.a + t3.b) / 2)) + -> Seq Scan on prt1_p1 t1 + -> Hash + -> Seq Scan on prt1_e_p1 t3 + Filter: (c = 0) + -> Index Scan using iprt2_p1_b on prt2_p1 t2 + Index Cond: (t1.a = b) + -> Nested Loop Left Join + -> Hash Right Join + Hash Cond: (t1_1.a = ((t3_1.a + t3_1.b) / 2)) + -> Seq Scan on prt1_p2 t1_1 + -> Hash + -> Seq Scan on prt1_e_p2 t3_1 + Filter: (c = 0) + -> Index Scan using iprt2_p2_b on prt2_p2 t2_1 + Index Cond: (t1_1.a = b) + -> Nested Loop Left Join + -> Hash Right Join + Hash Cond: (t1_2.a = ((t3_2.a + t3_2.b) / 2)) + -> Seq Scan on prt1_p3 t1_2 + -> Hash + -> Seq Scan on prt1_e_p3 t3_2 + Filter: (c = 0) + -> Index Scan using iprt2_p3_b on prt2_p3 t2_2 + Index Cond: (t1_2.a = b) +(30 rows) + +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + a | c | b | c | ?column? | c +-----+------+-----+------+----------+--- + 0 | 0000 | 0 | 0000 | 0 | 0 + 50 | 0050 | | | 100 | 0 + 100 | 0100 | | | 200 | 0 + 150 | 0150 | 150 | 0150 | 300 | 0 + 200 | 0200 | | | 400 | 0 + 250 | 0250 | | | 500 | 0 + 300 | 0300 | 300 | 0300 | 600 | 0 + 350 | 0350 | | | 700 | 0 + 400 | 0400 | | | 800 | 0 + 450 | 0450 | 450 | 0450 | 900 | 0 + 500 | 0500 | | | 1000 | 0 + 550 | 0550 | | | 1100 | 0 +(12 rows) + +-- Cases with non-nullable expressions in subquery results; +-- make sure these go to null as expected +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.phv, t2.b, t2.phv, t3.a + t3.b, t3.phv FROM ((SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b)) FULL JOIN (SELECT 50 phv, * FROM prt1_e WHERE prt1_e.c = 0) t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.a = t1.phv OR t2.b = t2.phv OR (t3.a + t3.b)/2 = t3.phv ORDER BY t1.a, t2.b, t3.a + t3.b; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: prt1_p1.a, prt2_p1.b, ((prt1_e_p1.a + prt1_e_p1.b)) + -> Append + -> Hash Full Join + Hash Cond: (prt1_p1.a = ((prt1_e_p1.a + prt1_e_p1.b) / 2)) + Filter: ((prt1_p1.a = (50)) OR (prt2_p1.b = (75)) OR (((prt1_e_p1.a + prt1_e_p1.b) / 2) = (50))) + -> Hash Full Join + Hash Cond: (prt1_p1.a = prt2_p1.b) + -> Seq Scan on prt1_p1 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p1 + Filter: (a = 0) + -> Hash + -> Seq Scan on prt1_e_p1 + Filter: (c = 0) + -> Hash Full Join + Hash Cond: (prt1_p2.a = ((prt1_e_p2.a + prt1_e_p2.b) / 2)) + Filter: ((prt1_p2.a = (50)) OR (prt2_p2.b = (75)) OR (((prt1_e_p2.a + prt1_e_p2.b) / 2) = (50))) + -> Hash Full Join + Hash Cond: (prt1_p2.a = prt2_p2.b) + -> Seq Scan on prt1_p2 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p2 + Filter: (a = 0) + -> Hash + -> Seq Scan on prt1_e_p2 + Filter: (c = 0) + -> Hash Full Join + Hash Cond: (prt1_p3.a = ((prt1_e_p3.a + prt1_e_p3.b) / 2)) + Filter: ((prt1_p3.a = (50)) OR (prt2_p3.b = (75)) OR (((prt1_e_p3.a + prt1_e_p3.b) / 2) = (50))) + -> Hash Full Join + Hash Cond: (prt1_p3.a = prt2_p3.b) + -> Seq Scan on prt1_p3 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p3 + Filter: (a = 0) + -> Hash + -> Seq Scan on prt1_e_p3 + Filter: (c = 0) +(42 rows) + +SELECT t1.a, t1.phv, t2.b, t2.phv, t3.a + t3.b, t3.phv FROM ((SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b)) FULL JOIN (SELECT 50 phv, * FROM prt1_e WHERE prt1_e.c = 0) t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.a = t1.phv OR t2.b = t2.phv OR (t3.a + t3.b)/2 = t3.phv ORDER BY t1.a, t2.b, t3.a + t3.b; + a | phv | b | phv | ?column? | phv +----+-----+----+-----+----------+----- + 50 | 50 | | | 100 | 50 + | | 75 | 75 | | +(2 rows) + +-- Semi-join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHERE t1.a = 0 AND t1.b = (t2.a + t2.b)/2) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Nested Loop + Join Filter: (t1.a = t1_3.b) + -> HashAggregate + Group Key: t1_3.b + -> Hash Join + Hash Cond: (((t2.a + t2.b) / 2) = t1_3.b) + -> Seq Scan on prt1_e_p1 t2 + -> Hash + -> Seq Scan on prt2_p1 t1_3 + Filter: (a = 0) + -> Index Scan using iprt1_p1_a on prt1_p1 t1 + Index Cond: (a = ((t2.a + t2.b) / 2)) + Filter: (b = 0) + -> Nested Loop + Join Filter: (t1_1.a = t1_4.b) + -> HashAggregate + Group Key: t1_4.b + -> Hash Join + Hash Cond: (((t2_1.a + t2_1.b) / 2) = t1_4.b) + -> Seq Scan on prt1_e_p2 t2_1 + -> Hash + -> Seq Scan on prt2_p2 t1_4 + Filter: (a = 0) + -> Index Scan using iprt1_p2_a on prt1_p2 t1_1 + Index Cond: (a = ((t2_1.a + t2_1.b) / 2)) + Filter: (b = 0) + -> Nested Loop + Join Filter: (t1_2.a = t1_5.b) + -> HashAggregate + Group Key: t1_5.b + -> Nested Loop + -> Seq Scan on prt2_p3 t1_5 + Filter: (a = 0) + -> Index Scan using iprt1_e_p3_ab2 on prt1_e_p3 t2_2 + Index Cond: (((a + b) / 2) = t1_5.b) + -> Index Scan using iprt1_p3_a on prt1_p3 t1_2 + Index Cond: (a = ((t2_2.a + t2_2.b) / 2)) + Filter: (b = 0) +(41 rows) + +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHERE t1.a = 0 AND t1.b = (t2.a + t2.b)/2) AND t1.b = 0 ORDER BY t1.a; + a | b | c +-----+---+------ + 0 | 0 | 0000 + 150 | 0 | 0150 + 300 | 0 | 0300 + 450 | 0 | 0450 +(4 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Nested Loop + -> HashAggregate + Group Key: t1_3.b + -> Hash Semi Join + Hash Cond: (t1_3.b = ((t1_6.a + t1_6.b) / 2)) + -> Seq Scan on prt2_p1 t1_3 + -> Hash + -> Seq Scan on prt1_e_p1 t1_6 + Filter: (c = 0) + -> Index Scan using iprt1_p1_a on prt1_p1 t1 + Index Cond: (a = t1_3.b) + Filter: (b = 0) + -> Nested Loop + -> HashAggregate + Group Key: t1_4.b + -> Hash Semi Join + Hash Cond: (t1_4.b = ((t1_7.a + t1_7.b) / 2)) + -> Seq Scan on prt2_p2 t1_4 + -> Hash + -> Seq Scan on prt1_e_p2 t1_7 + Filter: (c = 0) + -> Index Scan using iprt1_p2_a on prt1_p2 t1_1 + Index Cond: (a = t1_4.b) + Filter: (b = 0) + -> Nested Loop + -> Unique + -> Sort + Sort Key: t1_5.b + -> Hash Semi Join + Hash Cond: (t1_5.b = ((t1_8.a + t1_8.b) / 2)) + -> Seq Scan on prt2_p3 t1_5 + -> Hash + -> Seq Scan on prt1_e_p3 t1_8 + Filter: (c = 0) + -> Index Scan using iprt1_p3_a on prt1_p3 t1_2 + Index Cond: (a = t1_5.b) + Filter: (b = 0) +(40 rows) + +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; + a | b | c +-----+---+------ + 0 | 0 | 0000 + 150 | 0 | 0150 + 300 | 0 | 0300 + 450 | 0 | 0450 +(4 rows) + +-- test merge joins +SET enable_hashjoin TO off; +SET enable_nestloop TO off; +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +---------------------------------------------------------------- + Merge Append + Sort Key: t1.a + -> Merge Semi Join + Merge Cond: (t1.a = t1_3.b) + -> Sort + Sort Key: t1.a + -> Seq Scan on prt1_p1 t1 + Filter: (b = 0) + -> Merge Semi Join + Merge Cond: (t1_3.b = (((t1_6.a + t1_6.b) / 2))) + -> Sort + Sort Key: t1_3.b + -> Seq Scan on prt2_p1 t1_3 + -> Sort + Sort Key: (((t1_6.a + t1_6.b) / 2)) + -> Seq Scan on prt1_e_p1 t1_6 + Filter: (c = 0) + -> Merge Semi Join + Merge Cond: (t1_1.a = t1_4.b) + -> Sort + Sort Key: t1_1.a + -> Seq Scan on prt1_p2 t1_1 + Filter: (b = 0) + -> Merge Semi Join + Merge Cond: (t1_4.b = (((t1_7.a + t1_7.b) / 2))) + -> Sort + Sort Key: t1_4.b + -> Seq Scan on prt2_p2 t1_4 + -> Sort + Sort Key: (((t1_7.a + t1_7.b) / 2)) + -> Seq Scan on prt1_e_p2 t1_7 + Filter: (c = 0) + -> Merge Semi Join + Merge Cond: (t1_2.a = t1_5.b) + -> Sort + Sort Key: t1_2.a + -> Seq Scan on prt1_p3 t1_2 + Filter: (b = 0) + -> Merge Semi Join + Merge Cond: (t1_5.b = (((t1_8.a + t1_8.b) / 2))) + -> Sort + Sort Key: t1_5.b + -> Seq Scan on prt2_p3 t1_5 + -> Sort + Sort Key: (((t1_8.a + t1_8.b) / 2)) + -> Seq Scan on prt1_e_p3 t1_8 + Filter: (c = 0) +(47 rows) + +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; + a | b | c +-----+---+------ + 0 | 0 | 0000 + 150 | 0 | 0150 + 300 | 0 | 0300 + 450 | 0 | 0450 +(4 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + QUERY PLAN +---------------------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.b, ((t3.a + t3.b)) + -> Append + -> Merge Left Join + Merge Cond: (t1.a = t2.b) + -> Sort + Sort Key: t1.a + -> Merge Left Join + Merge Cond: ((((t3.a + t3.b) / 2)) = t1.a) + -> Sort + Sort Key: (((t3.a + t3.b) / 2)) + -> Seq Scan on prt1_e_p1 t3 + Filter: (c = 0) + -> Sort + Sort Key: t1.a + -> Seq Scan on prt1_p1 t1 + -> Sort + Sort Key: t2.b + -> Seq Scan on prt2_p1 t2 + -> Merge Left Join + Merge Cond: (t1_1.a = t2_1.b) + -> Sort + Sort Key: t1_1.a + -> Merge Left Join + Merge Cond: ((((t3_1.a + t3_1.b) / 2)) = t1_1.a) + -> Sort + Sort Key: (((t3_1.a + t3_1.b) / 2)) + -> Seq Scan on prt1_e_p2 t3_1 + Filter: (c = 0) + -> Sort + Sort Key: t1_1.a + -> Seq Scan on prt1_p2 t1_1 + -> Sort + Sort Key: t2_1.b + -> Seq Scan on prt2_p2 t2_1 + -> Merge Left Join + Merge Cond: (t1_2.a = t2_2.b) + -> Sort + Sort Key: t1_2.a + -> Merge Left Join + Merge Cond: ((((t3_2.a + t3_2.b) / 2)) = t1_2.a) + -> Sort + Sort Key: (((t3_2.a + t3_2.b) / 2)) + -> Seq Scan on prt1_e_p3 t3_2 + Filter: (c = 0) + -> Sort + Sort Key: t1_2.a + -> Seq Scan on prt1_p3 t1_2 + -> Sort + Sort Key: t2_2.b + -> Seq Scan on prt2_p3 t2_2 +(51 rows) + +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + a | c | b | c | ?column? | c +-----+------+-----+------+----------+--- + 0 | 0000 | 0 | 0000 | 0 | 0 + 50 | 0050 | | | 100 | 0 + 100 | 0100 | | | 200 | 0 + 150 | 0150 | 150 | 0150 | 300 | 0 + 200 | 0200 | | | 400 | 0 + 250 | 0250 | | | 500 | 0 + 300 | 0300 | 300 | 0300 | 600 | 0 + 350 | 0350 | | | 700 | 0 + 400 | 0400 | | | 800 | 0 + 450 | 0450 | 450 | 0450 | 900 | 0 + 500 | 0500 | | | 1000 | 0 + 550 | 0550 | | | 1100 | 0 +(12 rows) + +-- MergeAppend on nullable column +EXPLAIN (COSTS OFF) +SELECT t1.a, t2.b FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: prt1_p1.a, b + -> Append + -> Merge Left Join + Merge Cond: (prt1_p1.a = b) + -> Sort + Sort Key: prt1_p1.a + -> Seq Scan on prt1_p1 + Filter: ((a < 450) AND (b = 0)) + -> Sort + Sort Key: b + -> Result + One-Time Filter: false + -> Merge Left Join + Merge Cond: (prt1_p2.a = prt2_p2.b) + -> Sort + Sort Key: prt1_p2.a + -> Seq Scan on prt1_p2 + Filter: ((a < 450) AND (b = 0)) + -> Sort + Sort Key: prt2_p2.b + -> Seq Scan on prt2_p2 + Filter: (b > 250) +(23 rows) + +SELECT t1.a, t2.b FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + a | b +-----+----- + 0 | + 50 | + 100 | + 150 | + 200 | + 250 | + 300 | 300 + 350 | + 400 | +(9 rows) + +-- merge join when expression with whole-row reference needs to be sorted; +-- partitionwise join does not apply +EXPLAIN (COSTS OFF) +SELECT t1.a, t2.b FROM prt1 t1, prt2 t2 WHERE t1::text = t2::text AND t1.a = t2.b ORDER BY t1.a; + QUERY PLAN +----------------------------------------------------------------------------------------- + Merge Join + Merge Cond: ((t1.a = t2.b) AND (((((t1.*)::prt1))::text) = ((((t2.*)::prt2))::text))) + -> Sort + Sort Key: t1.a, ((((t1.*)::prt1))::text) + -> Result + -> Append + -> Seq Scan on prt1_p1 t1 + -> Seq Scan on prt1_p2 t1_1 + -> Seq Scan on prt1_p3 t1_2 + -> Sort + Sort Key: t2.b, ((((t2.*)::prt2))::text) + -> Result + -> Append + -> Seq Scan on prt2_p1 t2 + -> Seq Scan on prt2_p2 t2_1 + -> Seq Scan on prt2_p3 t2_2 +(16 rows) + +SELECT t1.a, t2.b FROM prt1 t1, prt2 t2 WHERE t1::text = t2::text AND t1.a = t2.b ORDER BY t1.a; + a | b +----+---- + 0 | 0 + 6 | 6 + 12 | 12 + 18 | 18 + 24 | 24 +(5 rows) + +RESET enable_hashjoin; +RESET enable_nestloop; +-- +-- partitioned by multiple columns +-- +CREATE TABLE prt1_m (a int, b int, c int) PARTITION BY RANGE(a, ((a + b)/2)); +CREATE TABLE prt1_m_p1 PARTITION OF prt1_m FOR VALUES FROM (0, 0) TO (250, 250); +CREATE TABLE prt1_m_p2 PARTITION OF prt1_m FOR VALUES FROM (250, 250) TO (500, 500); +CREATE TABLE prt1_m_p3 PARTITION OF prt1_m FOR VALUES FROM (500, 500) TO (600, 600); +INSERT INTO prt1_m SELECT i, i, i % 25 FROM generate_series(0, 599, 2) i; +ANALYZE prt1_m; +CREATE TABLE prt2_m (a int, b int, c int) PARTITION BY RANGE(((b + a)/2), b); +CREATE TABLE prt2_m_p1 PARTITION OF prt2_m FOR VALUES FROM (0, 0) TO (250, 250); +CREATE TABLE prt2_m_p2 PARTITION OF prt2_m FOR VALUES FROM (250, 250) TO (500, 500); +CREATE TABLE prt2_m_p3 PARTITION OF prt2_m FOR VALUES FROM (500, 500) TO (600, 600); +INSERT INTO prt2_m SELECT i, i, i % 25 FROM generate_series(0, 599, 3) i; +ANALYZE prt2_m; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_m WHERE prt1_m.c = 0) t1 FULL JOIN (SELECT * FROM prt2_m WHERE prt2_m.c = 0) t2 ON (t1.a = (t2.b + t2.a)/2 AND t2.b = (t1.a + t1.b)/2) ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ + Sort + Sort Key: prt1_m_p1.a, prt2_m_p1.b + -> Append + -> Hash Full Join + Hash Cond: ((prt1_m_p1.a = ((prt2_m_p1.b + prt2_m_p1.a) / 2)) AND (((prt1_m_p1.a + prt1_m_p1.b) / 2) = prt2_m_p1.b)) + -> Seq Scan on prt1_m_p1 + Filter: (c = 0) + -> Hash + -> Seq Scan on prt2_m_p1 + Filter: (c = 0) + -> Hash Full Join + Hash Cond: ((prt1_m_p2.a = ((prt2_m_p2.b + prt2_m_p2.a) / 2)) AND (((prt1_m_p2.a + prt1_m_p2.b) / 2) = prt2_m_p2.b)) + -> Seq Scan on prt1_m_p2 + Filter: (c = 0) + -> Hash + -> Seq Scan on prt2_m_p2 + Filter: (c = 0) + -> Hash Full Join + Hash Cond: ((prt1_m_p3.a = ((prt2_m_p3.b + prt2_m_p3.a) / 2)) AND (((prt1_m_p3.a + prt1_m_p3.b) / 2) = prt2_m_p3.b)) + -> Seq Scan on prt1_m_p3 + Filter: (c = 0) + -> Hash + -> Seq Scan on prt2_m_p3 + Filter: (c = 0) +(24 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_m WHERE prt1_m.c = 0) t1 FULL JOIN (SELECT * FROM prt2_m WHERE prt2_m.c = 0) t2 ON (t1.a = (t2.b + t2.a)/2 AND t2.b = (t1.a + t1.b)/2) ORDER BY t1.a, t2.b; + a | c | b | c +-----+---+-----+--- + 0 | 0 | 0 | 0 + 50 | 0 | | + 100 | 0 | | + 150 | 0 | 150 | 0 + 200 | 0 | | + 250 | 0 | | + 300 | 0 | 300 | 0 + 350 | 0 | | + 400 | 0 | | + 450 | 0 | 450 | 0 + 500 | 0 | | + 550 | 0 | | + | | 75 | 0 + | | 225 | 0 + | | 375 | 0 + | | 525 | 0 +(16 rows) + +-- +-- tests for list partitioned tables. +-- +CREATE TABLE plt1 (a int, b int, c text) PARTITION BY LIST(c); +CREATE TABLE plt1_p1 PARTITION OF plt1 FOR VALUES IN ('0000', '0003', '0004', '0010'); +CREATE TABLE plt1_p2 PARTITION OF plt1 FOR VALUES IN ('0001', '0005', '0002', '0009'); +CREATE TABLE plt1_p3 PARTITION OF plt1 FOR VALUES IN ('0006', '0007', '0008', '0011'); +INSERT INTO plt1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE plt1; +CREATE TABLE plt2 (a int, b int, c text) PARTITION BY LIST(c); +CREATE TABLE plt2_p1 PARTITION OF plt2 FOR VALUES IN ('0000', '0003', '0004', '0010'); +CREATE TABLE plt2_p2 PARTITION OF plt2 FOR VALUES IN ('0001', '0005', '0002', '0009'); +CREATE TABLE plt2_p3 PARTITION OF plt2 FOR VALUES IN ('0006', '0007', '0008', '0011'); +INSERT INTO plt2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 3) i; +ANALYZE plt2; +-- +-- list partitioned by expression +-- +CREATE TABLE plt1_e (a int, b int, c text) PARTITION BY LIST(ltrim(c, 'A')); +CREATE TABLE plt1_e_p1 PARTITION OF plt1_e FOR VALUES IN ('0000', '0003', '0004', '0010'); +CREATE TABLE plt1_e_p2 PARTITION OF plt1_e FOR VALUES IN ('0001', '0005', '0002', '0009'); +CREATE TABLE plt1_e_p3 PARTITION OF plt1_e FOR VALUES IN ('0006', '0007', '0008', '0011'); +INSERT INTO plt1_e SELECT i, i, 'A' || to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE plt1_e; +-- test partition matching with N-way join +EXPLAIN (COSTS OFF) +SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM plt1 t1, plt2 t2, plt1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; + QUERY PLAN +-------------------------------------------------------------------------------- + GroupAggregate + Group Key: t1.c, t2.c, t3.c + -> Sort + Sort Key: t1.c, t3.c + -> Append + -> Hash Join + Hash Cond: (t1.c = ltrim(t3.c, 'A'::text)) + -> Hash Join + Hash Cond: ((t1.b = t2.b) AND (t1.c = t2.c)) + -> Seq Scan on plt1_p1 t1 + -> Hash + -> Seq Scan on plt2_p1 t2 + -> Hash + -> Seq Scan on plt1_e_p1 t3 + -> Hash Join + Hash Cond: (t1_1.c = ltrim(t3_1.c, 'A'::text)) + -> Hash Join + Hash Cond: ((t1_1.b = t2_1.b) AND (t1_1.c = t2_1.c)) + -> Seq Scan on plt1_p2 t1_1 + -> Hash + -> Seq Scan on plt2_p2 t2_1 + -> Hash + -> Seq Scan on plt1_e_p2 t3_1 + -> Hash Join + Hash Cond: (t1_2.c = ltrim(t3_2.c, 'A'::text)) + -> Hash Join + Hash Cond: ((t1_2.b = t2_2.b) AND (t1_2.c = t2_2.c)) + -> Seq Scan on plt1_p3 t1_2 + -> Hash + -> Seq Scan on plt2_p3 t2_2 + -> Hash + -> Seq Scan on plt1_e_p3 t3_2 +(32 rows) + +SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM plt1 t1, plt2 t2, plt1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; + avg | avg | avg | c | c | c +----------------------+----------------------+-----------------------+------+------+------- + 24.0000000000000000 | 24.0000000000000000 | 48.0000000000000000 | 0000 | 0000 | A0000 + 75.0000000000000000 | 75.0000000000000000 | 148.0000000000000000 | 0001 | 0001 | A0001 + 123.0000000000000000 | 123.0000000000000000 | 248.0000000000000000 | 0002 | 0002 | A0002 + 174.0000000000000000 | 174.0000000000000000 | 348.0000000000000000 | 0003 | 0003 | A0003 + 225.0000000000000000 | 225.0000000000000000 | 448.0000000000000000 | 0004 | 0004 | A0004 + 273.0000000000000000 | 273.0000000000000000 | 548.0000000000000000 | 0005 | 0005 | A0005 + 324.0000000000000000 | 324.0000000000000000 | 648.0000000000000000 | 0006 | 0006 | A0006 + 375.0000000000000000 | 375.0000000000000000 | 748.0000000000000000 | 0007 | 0007 | A0007 + 423.0000000000000000 | 423.0000000000000000 | 848.0000000000000000 | 0008 | 0008 | A0008 + 474.0000000000000000 | 474.0000000000000000 | 948.0000000000000000 | 0009 | 0009 | A0009 + 525.0000000000000000 | 525.0000000000000000 | 1048.0000000000000000 | 0010 | 0010 | A0010 + 573.0000000000000000 | 573.0000000000000000 | 1148.0000000000000000 | 0011 | 0011 | A0011 +(12 rows) + +-- joins where one of the relations is proven empty +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.a = 1 AND t1.a = 2; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a = 1 AND a = 2) t1 LEFT JOIN prt2 t2 ON t1.a = t2.b; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a = 1 AND a = 2) t1 RIGHT JOIN prt2 t2 ON t1.a = t2.b, prt1 t3 WHERE t2.b = t3.a; + QUERY PLAN +-------------------------------------------------- + Hash Left Join + Hash Cond: (t2.b = a) + -> Append + -> Hash Join + Hash Cond: (t3.a = t2.b) + -> Seq Scan on prt1_p1 t3 + -> Hash + -> Seq Scan on prt2_p1 t2 + -> Hash Join + Hash Cond: (t3_1.a = t2_1.b) + -> Seq Scan on prt1_p2 t3_1 + -> Hash + -> Seq Scan on prt2_p2 t2_1 + -> Hash Join + Hash Cond: (t3_2.a = t2_2.b) + -> Seq Scan on prt1_p3 t3_2 + -> Hash + -> Seq Scan on prt2_p3 t2_2 + -> Hash + -> Result + One-Time Filter: false +(21 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a = 1 AND a = 2) t1 FULL JOIN prt2 t2 ON t1.a = t2.b WHERE t2.a = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +-------------------------------------------- + Sort + Sort Key: a, t2.b + -> Hash Left Join + Hash Cond: (t2.b = a) + -> Append + -> Seq Scan on prt2_p1 t2 + Filter: (a = 0) + -> Seq Scan on prt2_p2 t2_1 + Filter: (a = 0) + -> Seq Scan on prt2_p3 t2_2 + Filter: (a = 0) + -> Hash + -> Result + One-Time Filter: false +(14 rows) + +-- +-- tests for hash partitioned tables. +-- +CREATE TABLE pht1 (a int, b int, c text) PARTITION BY HASH(c); +CREATE TABLE pht1_p1 PARTITION OF pht1 FOR VALUES WITH (MODULUS 3, REMAINDER 0); +CREATE TABLE pht1_p2 PARTITION OF pht1 FOR VALUES WITH (MODULUS 3, REMAINDER 1); +CREATE TABLE pht1_p3 PARTITION OF pht1 FOR VALUES WITH (MODULUS 3, REMAINDER 2); +INSERT INTO pht1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE pht1; +CREATE TABLE pht2 (a int, b int, c text) PARTITION BY HASH(c); +CREATE TABLE pht2_p1 PARTITION OF pht2 FOR VALUES WITH (MODULUS 3, REMAINDER 0); +CREATE TABLE pht2_p2 PARTITION OF pht2 FOR VALUES WITH (MODULUS 3, REMAINDER 1); +CREATE TABLE pht2_p3 PARTITION OF pht2 FOR VALUES WITH (MODULUS 3, REMAINDER 2); +INSERT INTO pht2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 3) i; +ANALYZE pht2; +-- +-- hash partitioned by expression +-- +CREATE TABLE pht1_e (a int, b int, c text) PARTITION BY HASH(ltrim(c, 'A')); +CREATE TABLE pht1_e_p1 PARTITION OF pht1_e FOR VALUES WITH (MODULUS 3, REMAINDER 0); +CREATE TABLE pht1_e_p2 PARTITION OF pht1_e FOR VALUES WITH (MODULUS 3, REMAINDER 1); +CREATE TABLE pht1_e_p3 PARTITION OF pht1_e FOR VALUES WITH (MODULUS 3, REMAINDER 2); +INSERT INTO pht1_e SELECT i, i, 'A' || to_char(i/50, 'FM0000') FROM generate_series(0, 299, 2) i; +ANALYZE pht1_e; +-- test partition matching with N-way join +EXPLAIN (COSTS OFF) +SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM pht1 t1, pht2 t2, pht1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; + QUERY PLAN +-------------------------------------------------------------------------------- + GroupAggregate + Group Key: t1.c, t2.c, t3.c + -> Sort + Sort Key: t1.c, t3.c + -> Append + -> Hash Join + Hash Cond: (t1.c = ltrim(t3.c, 'A'::text)) + -> Hash Join + Hash Cond: ((t1.b = t2.b) AND (t1.c = t2.c)) + -> Seq Scan on pht1_p1 t1 + -> Hash + -> Seq Scan on pht2_p1 t2 + -> Hash + -> Seq Scan on pht1_e_p1 t3 + -> Hash Join + Hash Cond: (t1_1.c = ltrim(t3_1.c, 'A'::text)) + -> Hash Join + Hash Cond: ((t1_1.b = t2_1.b) AND (t1_1.c = t2_1.c)) + -> Seq Scan on pht1_p2 t1_1 + -> Hash + -> Seq Scan on pht2_p2 t2_1 + -> Hash + -> Seq Scan on pht1_e_p2 t3_1 + -> Hash Join + Hash Cond: (t1_2.c = ltrim(t3_2.c, 'A'::text)) + -> Hash Join + Hash Cond: ((t1_2.b = t2_2.b) AND (t1_2.c = t2_2.c)) + -> Seq Scan on pht1_p3 t1_2 + -> Hash + -> Seq Scan on pht2_p3 t2_2 + -> Hash + -> Seq Scan on pht1_e_p3 t3_2 +(32 rows) + +SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM pht1 t1, pht2 t2, pht1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; + avg | avg | avg | c | c | c +----------------------+----------------------+----------------------+------+------+------- + 24.0000000000000000 | 24.0000000000000000 | 48.0000000000000000 | 0000 | 0000 | A0000 + 75.0000000000000000 | 75.0000000000000000 | 148.0000000000000000 | 0001 | 0001 | A0001 + 123.0000000000000000 | 123.0000000000000000 | 248.0000000000000000 | 0002 | 0002 | A0002 + 174.0000000000000000 | 174.0000000000000000 | 348.0000000000000000 | 0003 | 0003 | A0003 + 225.0000000000000000 | 225.0000000000000000 | 448.0000000000000000 | 0004 | 0004 | A0004 + 273.0000000000000000 | 273.0000000000000000 | 548.0000000000000000 | 0005 | 0005 | A0005 +(6 rows) + +-- test default partition behavior for range +ALTER TABLE prt1 DETACH PARTITION prt1_p3; +ALTER TABLE prt1 ATTACH PARTITION prt1_p3 DEFAULT; +ANALYZE prt1; +ALTER TABLE prt2 DETACH PARTITION prt2_p3; +ALTER TABLE prt2 ATTACH PARTITION prt2_p3 DEFAULT; +ANALYZE prt2; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: (t2.b = t1.a) + -> Seq Scan on prt2_p1 t2 + -> Hash + -> Seq Scan on prt1_p1 t1 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_p2 t2_1 + -> Hash + -> Seq Scan on prt1_p2 t1_1 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_p3 t2_2 + -> Hash + -> Seq Scan on prt1_p3 t1_2 + Filter: (b = 0) +(21 rows) + +-- test default partition behavior for list +ALTER TABLE plt1 DETACH PARTITION plt1_p3; +ALTER TABLE plt1 ATTACH PARTITION plt1_p3 DEFAULT; +ANALYZE plt1; +ALTER TABLE plt2 DETACH PARTITION plt2_p3; +ALTER TABLE plt2 ATTACH PARTITION plt2_p3 DEFAULT; +ANALYZE plt2; +EXPLAIN (COSTS OFF) +SELECT avg(t1.a), avg(t2.b), t1.c, t2.c FROM plt1 t1 RIGHT JOIN plt2 t2 ON t1.c = t2.c WHERE t1.a % 25 = 0 GROUP BY t1.c, t2.c ORDER BY t1.c, t2.c; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: t1.c + -> HashAggregate + Group Key: t1.c, t2.c + -> Append + -> Hash Join + Hash Cond: (t2.c = t1.c) + -> Seq Scan on plt2_p1 t2 + -> Hash + -> Seq Scan on plt1_p1 t1 + Filter: ((a % 25) = 0) + -> Hash Join + Hash Cond: (t2_1.c = t1_1.c) + -> Seq Scan on plt2_p2 t2_1 + -> Hash + -> Seq Scan on plt1_p2 t1_1 + Filter: ((a % 25) = 0) + -> Hash Join + Hash Cond: (t2_2.c = t1_2.c) + -> Seq Scan on plt2_p3 t2_2 + -> Hash + -> Seq Scan on plt1_p3 t1_2 + Filter: ((a % 25) = 0) +(23 rows) + +-- +-- multiple levels of partitioning +-- +CREATE TABLE prt1_l (a int, b int, c varchar) PARTITION BY RANGE(a); +CREATE TABLE prt1_l_p1 PARTITION OF prt1_l FOR VALUES FROM (0) TO (250); +CREATE TABLE prt1_l_p2 PARTITION OF prt1_l FOR VALUES FROM (250) TO (500) PARTITION BY LIST (c); +CREATE TABLE prt1_l_p2_p1 PARTITION OF prt1_l_p2 FOR VALUES IN ('0000', '0001'); +CREATE TABLE prt1_l_p2_p2 PARTITION OF prt1_l_p2 FOR VALUES IN ('0002', '0003'); +CREATE TABLE prt1_l_p3 PARTITION OF prt1_l FOR VALUES FROM (500) TO (600) PARTITION BY RANGE (b); +CREATE TABLE prt1_l_p3_p1 PARTITION OF prt1_l_p3 FOR VALUES FROM (0) TO (13); +CREATE TABLE prt1_l_p3_p2 PARTITION OF prt1_l_p3 FOR VALUES FROM (13) TO (25); +INSERT INTO prt1_l SELECT i, i % 25, to_char(i % 4, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE prt1_l; +CREATE TABLE prt2_l (a int, b int, c varchar) PARTITION BY RANGE(b); +CREATE TABLE prt2_l_p1 PARTITION OF prt2_l FOR VALUES FROM (0) TO (250); +CREATE TABLE prt2_l_p2 PARTITION OF prt2_l FOR VALUES FROM (250) TO (500) PARTITION BY LIST (c); +CREATE TABLE prt2_l_p2_p1 PARTITION OF prt2_l_p2 FOR VALUES IN ('0000', '0001'); +CREATE TABLE prt2_l_p2_p2 PARTITION OF prt2_l_p2 FOR VALUES IN ('0002', '0003'); +CREATE TABLE prt2_l_p3 PARTITION OF prt2_l FOR VALUES FROM (500) TO (600) PARTITION BY RANGE (a); +CREATE TABLE prt2_l_p3_p1 PARTITION OF prt2_l_p3 FOR VALUES FROM (0) TO (13); +CREATE TABLE prt2_l_p3_p2 PARTITION OF prt2_l_p3 FOR VALUES FROM (13) TO (25); +INSERT INTO prt2_l SELECT i % 25, i, to_char(i % 4, 'FM0000') FROM generate_series(0, 599, 3) i; +ANALYZE prt2_l; +-- inner join, qual covering only top-level partitions +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1, prt2_l t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: (t2.b = t1.a) + -> Seq Scan on prt2_l_p1 t2 + -> Hash + -> Seq Scan on prt1_l_p1 t1 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_1.b = t1_1.a) + -> Append + -> Seq Scan on prt2_l_p2_p1 t2_1 + -> Seq Scan on prt2_l_p2_p2 t2_2 + -> Hash + -> Append + -> Seq Scan on prt1_l_p2_p1 t1_1 + Filter: (b = 0) + -> Seq Scan on prt1_l_p2_p2 t1_2 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_3.b = t1_3.a) + -> Append + -> Seq Scan on prt2_l_p3_p1 t2_3 + -> Seq Scan on prt2_l_p3_p2 t2_4 + -> Hash + -> Append + -> Seq Scan on prt1_l_p3_p1 t1_3 + Filter: (b = 0) +(29 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1, prt2_l t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | 0 | 0000 + 150 | 0002 | 150 | 0002 + 300 | 0000 | 300 | 0000 + 450 | 0002 | 450 | 0002 +(4 rows) + +-- left join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 LEFT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Sort Key: t1.a, t2.b + -> Append + -> Hash Right Join + Hash Cond: ((t2.b = t1.a) AND ((t2.c)::text = (t1.c)::text)) + -> Seq Scan on prt2_l_p1 t2 + -> Hash + -> Seq Scan on prt1_l_p1 t1 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: ((t2_1.b = t1_1.a) AND ((t2_1.c)::text = (t1_1.c)::text)) + -> Seq Scan on prt2_l_p2_p1 t2_1 + -> Hash + -> Seq Scan on prt1_l_p2_p1 t1_1 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: ((t2_2.b = t1_2.a) AND ((t2_2.c)::text = (t1_2.c)::text)) + -> Seq Scan on prt2_l_p2_p2 t2_2 + -> Hash + -> Seq Scan on prt1_l_p2_p2 t1_2 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: ((t2_3.b = t1_3.a) AND ((t2_3.c)::text = (t1_3.c)::text)) + -> Append + -> Seq Scan on prt2_l_p3_p1 t2_3 + -> Seq Scan on prt2_l_p3_p2 t2_4 + -> Hash + -> Append + -> Seq Scan on prt1_l_p3_p1 t1_3 + Filter: (b = 0) +(30 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 LEFT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | 0 | 0000 + 50 | 0002 | | + 100 | 0000 | | + 150 | 0002 | 150 | 0002 + 200 | 0000 | | + 250 | 0002 | | + 300 | 0000 | 300 | 0000 + 350 | 0002 | | + 400 | 0000 | | + 450 | 0002 | 450 | 0002 + 500 | 0000 | | + 550 | 0002 | | +(12 rows) + +-- right join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t2.a = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Sort Key: t1.a, t2.b + -> Append + -> Hash Right Join + Hash Cond: ((t1.a = t2.b) AND ((t1.c)::text = (t2.c)::text)) + -> Seq Scan on prt1_l_p1 t1 + -> Hash + -> Seq Scan on prt2_l_p1 t2 + Filter: (a = 0) + -> Hash Right Join + Hash Cond: ((t1_1.a = t2_1.b) AND ((t1_1.c)::text = (t2_1.c)::text)) + -> Seq Scan on prt1_l_p2_p1 t1_1 + -> Hash + -> Seq Scan on prt2_l_p2_p1 t2_1 + Filter: (a = 0) + -> Hash Right Join + Hash Cond: ((t1_2.a = t2_2.b) AND ((t1_2.c)::text = (t2_2.c)::text)) + -> Seq Scan on prt1_l_p2_p2 t1_2 + -> Hash + -> Seq Scan on prt2_l_p2_p2 t2_2 + Filter: (a = 0) + -> Hash Right Join + Hash Cond: ((t1_3.a = t2_3.b) AND ((t1_3.c)::text = (t2_3.c)::text)) + -> Append + -> Seq Scan on prt1_l_p3_p1 t1_3 + -> Seq Scan on prt1_l_p3_p2 t1_4 + -> Hash + -> Append + -> Seq Scan on prt2_l_p3_p1 t2_3 + Filter: (a = 0) +(30 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t2.a = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | 0 | 0000 + 150 | 0002 | 150 | 0002 + 300 | 0000 | 300 | 0000 + 450 | 0002 | 450 | 0002 + | | 75 | 0003 + | | 225 | 0001 + | | 375 | 0003 + | | 525 | 0001 +(8 rows) + +-- full join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE prt1_l.b = 0) t1 FULL JOIN (SELECT * FROM prt2_l WHERE prt2_l.a = 0) t2 ON (t1.a = t2.b AND t1.c = t2.c) ORDER BY t1.a, t2.b; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: prt1_l_p1.a, prt2_l_p1.b + -> Append + -> Hash Full Join + Hash Cond: ((prt1_l_p1.a = prt2_l_p1.b) AND ((prt1_l_p1.c)::text = (prt2_l_p1.c)::text)) + -> Seq Scan on prt1_l_p1 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_l_p1 + Filter: (a = 0) + -> Hash Full Join + Hash Cond: ((prt1_l_p2_p1.a = prt2_l_p2_p1.b) AND ((prt1_l_p2_p1.c)::text = (prt2_l_p2_p1.c)::text)) + -> Seq Scan on prt1_l_p2_p1 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_l_p2_p1 + Filter: (a = 0) + -> Hash Full Join + Hash Cond: ((prt1_l_p2_p2.a = prt2_l_p2_p2.b) AND ((prt1_l_p2_p2.c)::text = (prt2_l_p2_p2.c)::text)) + -> Seq Scan on prt1_l_p2_p2 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_l_p2_p2 + Filter: (a = 0) + -> Hash Full Join + Hash Cond: ((prt1_l_p3_p1.a = prt2_l_p3_p1.b) AND ((prt1_l_p3_p1.c)::text = (prt2_l_p3_p1.c)::text)) + -> Append + -> Seq Scan on prt1_l_p3_p1 + Filter: (b = 0) + -> Hash + -> Append + -> Seq Scan on prt2_l_p3_p1 + Filter: (a = 0) +(33 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE prt1_l.b = 0) t1 FULL JOIN (SELECT * FROM prt2_l WHERE prt2_l.a = 0) t2 ON (t1.a = t2.b AND t1.c = t2.c) ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | 0 | 0000 + 50 | 0002 | | + 100 | 0000 | | + 150 | 0002 | 150 | 0002 + 200 | 0000 | | + 250 | 0002 | | + 300 | 0000 | 300 | 0000 + 350 | 0002 | | + 400 | 0000 | | + 450 | 0002 | 450 | 0002 + 500 | 0000 | | + 550 | 0002 | | + | | 75 | 0003 + | | 225 | 0001 + | | 375 | 0003 + | | 525 | 0001 +(16 rows) + +-- lateral partitionwise join +EXPLAIN (COSTS OFF) +SELECT * FROM prt1_l t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t2.c AS t2c, t2.b AS t2b, t3.b AS t3b, least(t1.a,t2.a,t3.b) FROM prt1_l t2 JOIN prt2_l t3 ON (t2.a = t3.b AND t2.c = t3.c)) ss + ON t1.a = ss.t2a AND t1.c = ss.t2c WHERE t1.b = 0 ORDER BY t1.a; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Nested Loop Left Join + -> Seq Scan on prt1_l_p1 t1 + Filter: (b = 0) + -> Hash Join + Hash Cond: ((t3.b = t2.a) AND ((t3.c)::text = (t2.c)::text)) + -> Seq Scan on prt2_l_p1 t3 + -> Hash + -> Seq Scan on prt1_l_p1 t2 + Filter: ((t1.a = a) AND ((t1.c)::text = (c)::text)) + -> Nested Loop Left Join + -> Seq Scan on prt1_l_p2_p1 t1_1 + Filter: (b = 0) + -> Hash Join + Hash Cond: ((t3_1.b = t2_1.a) AND ((t3_1.c)::text = (t2_1.c)::text)) + -> Seq Scan on prt2_l_p2_p1 t3_1 + -> Hash + -> Seq Scan on prt1_l_p2_p1 t2_1 + Filter: ((t1_1.a = a) AND ((t1_1.c)::text = (c)::text)) + -> Nested Loop Left Join + -> Seq Scan on prt1_l_p2_p2 t1_2 + Filter: (b = 0) + -> Hash Join + Hash Cond: ((t3_2.b = t2_2.a) AND ((t3_2.c)::text = (t2_2.c)::text)) + -> Seq Scan on prt2_l_p2_p2 t3_2 + -> Hash + -> Seq Scan on prt1_l_p2_p2 t2_2 + Filter: ((t1_2.a = a) AND ((t1_2.c)::text = (c)::text)) + -> Nested Loop Left Join + -> Append + -> Seq Scan on prt1_l_p3_p1 t1_3 + Filter: (b = 0) + -> Hash Join + Hash Cond: ((t3_3.b = t2_3.a) AND ((t3_3.c)::text = (t2_3.c)::text)) + -> Append + -> Seq Scan on prt2_l_p3_p1 t3_3 + -> Seq Scan on prt2_l_p3_p2 t3_4 + -> Hash + -> Append + -> Seq Scan on prt1_l_p3_p1 t2_3 + Filter: ((t1_3.a = a) AND ((t1_3.c)::text = (c)::text)) + -> Seq Scan on prt1_l_p3_p2 t2_4 + Filter: ((t1_3.a = a) AND ((t1_3.c)::text = (c)::text)) +(45 rows) + +SELECT * FROM prt1_l t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t2.c AS t2c, t2.b AS t2b, t3.b AS t3b, least(t1.a,t2.a,t3.b) FROM prt1_l t2 JOIN prt2_l t3 ON (t2.a = t3.b AND t2.c = t3.c)) ss + ON t1.a = ss.t2a AND t1.c = ss.t2c WHERE t1.b = 0 ORDER BY t1.a; + a | b | c | t2a | t2c | t2b | t3b | least +-----+---+------+-----+------+-----+-----+------- + 0 | 0 | 0000 | 0 | 0000 | 0 | 0 | 0 + 50 | 0 | 0002 | | | | | + 100 | 0 | 0000 | | | | | + 150 | 0 | 0002 | 150 | 0002 | 0 | 150 | 150 + 200 | 0 | 0000 | | | | | + 250 | 0 | 0002 | | | | | + 300 | 0 | 0000 | 300 | 0000 | 0 | 300 | 300 + 350 | 0 | 0002 | | | | | + 400 | 0 | 0000 | | | | | + 450 | 0 | 0002 | 450 | 0002 | 0 | 450 | 450 + 500 | 0 | 0000 | | | | | + 550 | 0 | 0002 | | | | | +(12 rows) + +-- join with one side empty +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE a = 1 AND a = 2) t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.b = t2.a AND t1.c = t2.c; + QUERY PLAN +------------------------------------------------------------------------- + Hash Left Join + Hash Cond: ((t2.b = a) AND (t2.a = b) AND ((t2.c)::text = (c)::text)) + -> Append + -> Seq Scan on prt2_l_p1 t2 + -> Seq Scan on prt2_l_p2_p1 t2_1 + -> Seq Scan on prt2_l_p2_p2 t2_2 + -> Seq Scan on prt2_l_p3_p1 t2_3 + -> Seq Scan on prt2_l_p3_p2 t2_4 + -> Hash + -> Result + One-Time Filter: false +(11 rows) + +-- Test case to verify proper handling of subqueries in a partitioned delete. +-- The weird-looking lateral join is just there to force creation of a +-- nestloop parameter within the subquery, which exposes the problem if the +-- planner fails to make multiple copies of the subquery as appropriate. +EXPLAIN (COSTS OFF) +DELETE FROM prt1_l +WHERE EXISTS ( + SELECT 1 + FROM int4_tbl, + LATERAL (SELECT int4_tbl.f1 FROM int8_tbl LIMIT 2) ss + WHERE prt1_l.c IS NULL); + QUERY PLAN +--------------------------------------------------------------- + Delete on prt1_l + Delete on prt1_l_p1 + Delete on prt1_l_p3_p1 + Delete on prt1_l_p3_p2 + -> Nested Loop Semi Join + -> Seq Scan on prt1_l_p1 + Filter: (c IS NULL) + -> Nested Loop + -> Seq Scan on int4_tbl + -> Subquery Scan on ss + -> Limit + -> Seq Scan on int8_tbl + -> Nested Loop Semi Join + -> Seq Scan on prt1_l_p3_p1 + Filter: (c IS NULL) + -> Nested Loop + -> Seq Scan on int4_tbl + -> Subquery Scan on ss_1 + -> Limit + -> Seq Scan on int8_tbl int8_tbl_1 + -> Nested Loop Semi Join + -> Seq Scan on prt1_l_p3_p2 + Filter: (c IS NULL) + -> Nested Loop + -> Seq Scan on int4_tbl + -> Subquery Scan on ss_2 + -> Limit + -> Seq Scan on int8_tbl int8_tbl_2 +(28 rows) + +-- +-- negative testcases +-- +CREATE TABLE prt1_n (a int, b int, c varchar) PARTITION BY RANGE(c); +CREATE TABLE prt1_n_p1 PARTITION OF prt1_n FOR VALUES FROM ('0000') TO ('0250'); +CREATE TABLE prt1_n_p2 PARTITION OF prt1_n FOR VALUES FROM ('0250') TO ('0500'); +INSERT INTO prt1_n SELECT i, i, to_char(i, 'FM0000') FROM generate_series(0, 499, 2) i; +ANALYZE prt1_n; +CREATE TABLE prt2_n (a int, b int, c text) PARTITION BY LIST(c); +CREATE TABLE prt2_n_p1 PARTITION OF prt2_n FOR VALUES IN ('0000', '0003', '0004', '0010', '0006', '0007'); +CREATE TABLE prt2_n_p2 PARTITION OF prt2_n FOR VALUES IN ('0001', '0005', '0002', '0009', '0008', '0011'); +INSERT INTO prt2_n SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE prt2_n; +CREATE TABLE prt3_n (a int, b int, c text) PARTITION BY LIST(c); +CREATE TABLE prt3_n_p1 PARTITION OF prt3_n FOR VALUES IN ('0000', '0004', '0006', '0007'); +CREATE TABLE prt3_n_p2 PARTITION OF prt3_n FOR VALUES IN ('0001', '0002', '0008', '0010'); +CREATE TABLE prt3_n_p3 PARTITION OF prt3_n FOR VALUES IN ('0003', '0005', '0009', '0011'); +INSERT INTO prt2_n SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE prt3_n; +CREATE TABLE prt4_n (a int, b int, c text) PARTITION BY RANGE(a); +CREATE TABLE prt4_n_p1 PARTITION OF prt4_n FOR VALUES FROM (0) TO (300); +CREATE TABLE prt4_n_p2 PARTITION OF prt4_n FOR VALUES FROM (300) TO (500); +CREATE TABLE prt4_n_p3 PARTITION OF prt4_n FOR VALUES FROM (500) TO (600); +INSERT INTO prt4_n SELECT i, i, to_char(i, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE prt4_n; +-- partitionwise join can not be applied if the partition ranges differ +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt4_n t2 WHERE t1.a = t2.a; + QUERY PLAN +---------------------------------------------- + Hash Join + Hash Cond: (t1.a = t2.a) + -> Append + -> Seq Scan on prt1_p1 t1 + -> Seq Scan on prt1_p2 t1_1 + -> Seq Scan on prt1_p3 t1_2 + -> Hash + -> Append + -> Seq Scan on prt4_n_p1 t2 + -> Seq Scan on prt4_n_p2 t2_1 + -> Seq Scan on prt4_n_p3 t2_2 +(11 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt4_n t2, prt2 t3 WHERE t1.a = t2.a and t1.a = t3.b; + QUERY PLAN +-------------------------------------------------------- + Hash Join + Hash Cond: (t2.a = t1.a) + -> Append + -> Seq Scan on prt4_n_p1 t2 + -> Seq Scan on prt4_n_p2 t2_1 + -> Seq Scan on prt4_n_p3 t2_2 + -> Hash + -> Append + -> Hash Join + Hash Cond: (t1.a = t3.b) + -> Seq Scan on prt1_p1 t1 + -> Hash + -> Seq Scan on prt2_p1 t3 + -> Hash Join + Hash Cond: (t1_1.a = t3_1.b) + -> Seq Scan on prt1_p2 t1_1 + -> Hash + -> Seq Scan on prt2_p2 t3_1 + -> Hash Join + Hash Cond: (t1_2.a = t3_2.b) + -> Seq Scan on prt1_p3 t1_2 + -> Hash + -> Seq Scan on prt2_p3 t3_2 +(23 rows) + +-- partitionwise join can not be applied if there are no equi-join conditions +-- between partition keys +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1 LEFT JOIN prt2 t2 ON (t1.a < t2.b); + QUERY PLAN +--------------------------------------------------------- + Nested Loop Left Join + -> Append + -> Seq Scan on prt1_p1 t1 + -> Seq Scan on prt1_p2 t1_1 + -> Seq Scan on prt1_p3 t1_2 + -> Append + -> Index Scan using iprt2_p1_b on prt2_p1 t2 + Index Cond: (t1.a < b) + -> Index Scan using iprt2_p2_b on prt2_p2 t2_1 + Index Cond: (t1.a < b) + -> Index Scan using iprt2_p3_b on prt2_p3 t2_2 + Index Cond: (t1.a < b) +(12 rows) + +-- equi-join with join condition on partial keys does not qualify for +-- partitionwise join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1, prt2_m t2 WHERE t1.a = (t2.b + t2.a)/2; + QUERY PLAN +---------------------------------------------- + Hash Join + Hash Cond: (((t2.b + t2.a) / 2) = t1.a) + -> Append + -> Seq Scan on prt2_m_p1 t2 + -> Seq Scan on prt2_m_p2 t2_1 + -> Seq Scan on prt2_m_p3 t2_2 + -> Hash + -> Append + -> Seq Scan on prt1_m_p1 t1 + -> Seq Scan on prt1_m_p2 t1_1 + -> Seq Scan on prt1_m_p3 t1_2 +(11 rows) + +-- equi-join between out-of-order partition key columns does not qualify for +-- partitionwise join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1 LEFT JOIN prt2_m t2 ON t1.a = t2.b; + QUERY PLAN +---------------------------------------------- + Hash Left Join + Hash Cond: (t1.a = t2.b) + -> Append + -> Seq Scan on prt1_m_p1 t1 + -> Seq Scan on prt1_m_p2 t1_1 + -> Seq Scan on prt1_m_p3 t1_2 + -> Hash + -> Append + -> Seq Scan on prt2_m_p1 t2 + -> Seq Scan on prt2_m_p2 t2_1 + -> Seq Scan on prt2_m_p3 t2_2 +(11 rows) + +-- equi-join between non-key columns does not qualify for partitionwise join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1 LEFT JOIN prt2_m t2 ON t1.c = t2.c; + QUERY PLAN +---------------------------------------------- + Hash Left Join + Hash Cond: (t1.c = t2.c) + -> Append + -> Seq Scan on prt1_m_p1 t1 + -> Seq Scan on prt1_m_p2 t1_1 + -> Seq Scan on prt1_m_p3 t1_2 + -> Hash + -> Append + -> Seq Scan on prt2_m_p1 t2 + -> Seq Scan on prt2_m_p2 t2_1 + -> Seq Scan on prt2_m_p3 t2_2 +(11 rows) + +-- partitionwise join can not be applied between tables with different +-- partition lists +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 LEFT JOIN prt2_n t2 ON (t1.c = t2.c); + QUERY PLAN +---------------------------------------------- + Hash Right Join + Hash Cond: (t2.c = (t1.c)::text) + -> Append + -> Seq Scan on prt2_n_p1 t2 + -> Seq Scan on prt2_n_p2 t2_1 + -> Hash + -> Append + -> Seq Scan on prt1_n_p1 t1 + -> Seq Scan on prt1_n_p2 t1_1 +(9 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 JOIN prt2_n t2 ON (t1.c = t2.c) JOIN plt1 t3 ON (t1.c = t3.c); + QUERY PLAN +---------------------------------------------------------- + Hash Join + Hash Cond: (t2.c = (t1.c)::text) + -> Append + -> Seq Scan on prt2_n_p1 t2 + -> Seq Scan on prt2_n_p2 t2_1 + -> Hash + -> Hash Join + Hash Cond: (t3.c = (t1.c)::text) + -> Append + -> Seq Scan on plt1_p1 t3 + -> Seq Scan on plt1_p2 t3_1 + -> Seq Scan on plt1_p3 t3_2 + -> Hash + -> Append + -> Seq Scan on prt1_n_p1 t1 + -> Seq Scan on prt1_n_p2 t1_1 +(16 rows) + +-- partitionwise join can not be applied for a join between list and range +-- partitioned table +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 FULL JOIN prt1 t2 ON (t1.c = t2.c); + QUERY PLAN +---------------------------------------------- + Hash Full Join + Hash Cond: ((t2.c)::text = (t1.c)::text) + -> Append + -> Seq Scan on prt1_p1 t2 + -> Seq Scan on prt1_p2 t2_1 + -> Seq Scan on prt1_p3 t2_2 + -> Hash + -> Append + -> Seq Scan on prt1_n_p1 t1 + -> Seq Scan on prt1_n_p2 t1_1 +(10 rows) + +-- partitionwise join can not be applied if only one of joining table has +-- default partition +ALTER TABLE prt2 DETACH PARTITION prt2_p3; +ALTER TABLE prt2 ATTACH PARTITION prt2_p3 FOR VALUES FROM (500) TO (600); +ANALYZE prt2; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: t1.a + -> Hash Join + Hash Cond: (t2.b = t1.a) + -> Append + -> Seq Scan on prt2_p1 t2 + -> Seq Scan on prt2_p2 t2_1 + -> Seq Scan on prt2_p3 t2_2 + -> Hash + -> Append + -> Seq Scan on prt1_p1 t1 + Filter: (b = 0) + -> Seq Scan on prt1_p2 t1_1 + Filter: (b = 0) + -> Seq Scan on prt1_p3 t1_2 + Filter: (b = 0) +(16 rows) + diff --git a/src/test/regress/expected/partition_prune.out b/src/test/regress/expected/partition_prune.out new file mode 100644 index 0000000000..24313e8c78 --- /dev/null +++ b/src/test/regress/expected/partition_prune.out @@ -0,0 +1,3590 @@ +-- +-- Test partitioning planner code +-- +create table lp (a char) partition by list (a); +create table lp_default partition of lp default; +create table lp_ef partition of lp for values in ('e', 'f'); +create table lp_ad partition of lp for values in ('a', 'd'); +create table lp_bc partition of lp for values in ('b', 'c'); +create table lp_g partition of lp for values in ('g'); +create table lp_null partition of lp for values in (null); +explain (costs off) select * from lp; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on lp_ad + -> Seq Scan on lp_bc + -> Seq Scan on lp_ef + -> Seq Scan on lp_g + -> Seq Scan on lp_null + -> Seq Scan on lp_default +(7 rows) + +explain (costs off) select * from lp where a > 'a' and a < 'd'; + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on lp_bc + Filter: ((a > 'a'::bpchar) AND (a < 'd'::bpchar)) + -> Seq Scan on lp_default + Filter: ((a > 'a'::bpchar) AND (a < 'd'::bpchar)) +(5 rows) + +explain (costs off) select * from lp where a > 'a' and a <= 'd'; + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on lp_ad + Filter: ((a > 'a'::bpchar) AND (a <= 'd'::bpchar)) + -> Seq Scan on lp_bc + Filter: ((a > 'a'::bpchar) AND (a <= 'd'::bpchar)) + -> Seq Scan on lp_default + Filter: ((a > 'a'::bpchar) AND (a <= 'd'::bpchar)) +(7 rows) + +explain (costs off) select * from lp where a = 'a'; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on lp_ad + Filter: (a = 'a'::bpchar) +(3 rows) + +explain (costs off) select * from lp where 'a' = a; /* commuted */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on lp_ad + Filter: ('a'::bpchar = a) +(3 rows) + +explain (costs off) select * from lp where a is not null; + QUERY PLAN +--------------------------------- + Append + -> Seq Scan on lp_ad + Filter: (a IS NOT NULL) + -> Seq Scan on lp_bc + Filter: (a IS NOT NULL) + -> Seq Scan on lp_ef + Filter: (a IS NOT NULL) + -> Seq Scan on lp_g + Filter: (a IS NOT NULL) + -> Seq Scan on lp_default + Filter: (a IS NOT NULL) +(11 rows) + +explain (costs off) select * from lp where a is null; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on lp_null + Filter: (a IS NULL) +(3 rows) + +explain (costs off) select * from lp where a = 'a' or a = 'c'; + QUERY PLAN +---------------------------------------------------------- + Append + -> Seq Scan on lp_ad + Filter: ((a = 'a'::bpchar) OR (a = 'c'::bpchar)) + -> Seq Scan on lp_bc + Filter: ((a = 'a'::bpchar) OR (a = 'c'::bpchar)) +(5 rows) + +explain (costs off) select * from lp where a is not null and (a = 'a' or a = 'c'); + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on lp_ad + Filter: ((a IS NOT NULL) AND ((a = 'a'::bpchar) OR (a = 'c'::bpchar))) + -> Seq Scan on lp_bc + Filter: ((a IS NOT NULL) AND ((a = 'a'::bpchar) OR (a = 'c'::bpchar))) +(5 rows) + +explain (costs off) select * from lp where a <> 'g'; + QUERY PLAN +------------------------------------ + Append + -> Seq Scan on lp_ad + Filter: (a <> 'g'::bpchar) + -> Seq Scan on lp_bc + Filter: (a <> 'g'::bpchar) + -> Seq Scan on lp_ef + Filter: (a <> 'g'::bpchar) + -> Seq Scan on lp_default + Filter: (a <> 'g'::bpchar) +(9 rows) + +explain (costs off) select * from lp where a <> 'a' and a <> 'd'; + QUERY PLAN +------------------------------------------------------------- + Append + -> Seq Scan on lp_bc + Filter: ((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) + -> Seq Scan on lp_ef + Filter: ((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) + -> Seq Scan on lp_g + Filter: ((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) + -> Seq Scan on lp_default + Filter: ((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) +(9 rows) + +explain (costs off) select * from lp where a not in ('a', 'd'); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on lp_bc + Filter: (a <> ALL ('{a,d}'::bpchar[])) + -> Seq Scan on lp_ef + Filter: (a <> ALL ('{a,d}'::bpchar[])) + -> Seq Scan on lp_g + Filter: (a <> ALL ('{a,d}'::bpchar[])) + -> Seq Scan on lp_default + Filter: (a <> ALL ('{a,d}'::bpchar[])) +(9 rows) + +-- collation matches the partitioning collation, pruning works +create table coll_pruning (a text collate "C") partition by list (a); +create table coll_pruning_a partition of coll_pruning for values in ('a'); +create table coll_pruning_b partition of coll_pruning for values in ('b'); +create table coll_pruning_def partition of coll_pruning default; +explain (costs off) select * from coll_pruning where a collate "C" = 'a' collate "C"; + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on coll_pruning_a + Filter: (a = 'a'::text COLLATE "C") +(3 rows) + +-- collation doesn't match the partitioning collation, no pruning occurs +explain (costs off) select * from coll_pruning where a collate "POSIX" = 'a' collate "POSIX"; + QUERY PLAN +--------------------------------------------------------- + Append + -> Seq Scan on coll_pruning_a + Filter: ((a)::text = 'a'::text COLLATE "POSIX") + -> Seq Scan on coll_pruning_b + Filter: ((a)::text = 'a'::text COLLATE "POSIX") + -> Seq Scan on coll_pruning_def + Filter: ((a)::text = 'a'::text COLLATE "POSIX") +(7 rows) + +create table rlp (a int, b varchar) partition by range (a); +create table rlp_default partition of rlp default partition by list (a); +create table rlp_default_default partition of rlp_default default; +create table rlp_default_10 partition of rlp_default for values in (10); +create table rlp_default_30 partition of rlp_default for values in (30); +create table rlp_default_null partition of rlp_default for values in (null); +create table rlp1 partition of rlp for values from (minvalue) to (1); +create table rlp2 partition of rlp for values from (1) to (10); +create table rlp3 (b varchar, a int) partition by list (b varchar_ops); +create table rlp3_default partition of rlp3 default; +create table rlp3abcd partition of rlp3 for values in ('ab', 'cd'); +create table rlp3efgh partition of rlp3 for values in ('ef', 'gh'); +create table rlp3nullxy partition of rlp3 for values in (null, 'xy'); +alter table rlp attach partition rlp3 for values from (15) to (20); +create table rlp4 partition of rlp for values from (20) to (30) partition by range (a); +create table rlp4_default partition of rlp4 default; +create table rlp4_1 partition of rlp4 for values from (20) to (25); +create table rlp4_2 partition of rlp4 for values from (25) to (29); +create table rlp5 partition of rlp for values from (31) to (maxvalue) partition by range (a); +create table rlp5_default partition of rlp5 default; +create table rlp5_1 partition of rlp5 for values from (31) to (40); +explain (costs off) select * from rlp where a < 1; + QUERY PLAN +------------------------- + Append + -> Seq Scan on rlp1 + Filter: (a < 1) +(3 rows) + +explain (costs off) select * from rlp where 1 > a; /* commuted */ + QUERY PLAN +------------------------- + Append + -> Seq Scan on rlp1 + Filter: (1 > a) +(3 rows) + +explain (costs off) select * from rlp where a <= 1; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on rlp1 + Filter: (a <= 1) + -> Seq Scan on rlp2 + Filter: (a <= 1) +(5 rows) + +explain (costs off) select * from rlp where a = 1; + QUERY PLAN +------------------------- + Append + -> Seq Scan on rlp2 + Filter: (a = 1) +(3 rows) + +explain (costs off) select * from rlp where a = 1::bigint; /* same as above */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on rlp2 + Filter: (a = '1'::bigint) +(3 rows) + +explain (costs off) select * from rlp where a = 1::numeric; /* no pruning */ + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on rlp1 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp2 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp3abcd + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp3efgh + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp3nullxy + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp3_default + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp4_1 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp4_2 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp4_default + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp5_1 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp5_default + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp_default_10 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp_default_30 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp_default_null + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp_default_default + Filter: ((a)::numeric = '1'::numeric) +(31 rows) + +explain (costs off) select * from rlp where a <= 10; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on rlp1 + Filter: (a <= 10) + -> Seq Scan on rlp2 + Filter: (a <= 10) + -> Seq Scan on rlp_default_10 + Filter: (a <= 10) + -> Seq Scan on rlp_default_default + Filter: (a <= 10) +(9 rows) + +explain (costs off) select * from rlp where a > 10; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on rlp3abcd + Filter: (a > 10) + -> Seq Scan on rlp3efgh + Filter: (a > 10) + -> Seq Scan on rlp3nullxy + Filter: (a > 10) + -> Seq Scan on rlp3_default + Filter: (a > 10) + -> Seq Scan on rlp4_1 + Filter: (a > 10) + -> Seq Scan on rlp4_2 + Filter: (a > 10) + -> Seq Scan on rlp4_default + Filter: (a > 10) + -> Seq Scan on rlp5_1 + Filter: (a > 10) + -> Seq Scan on rlp5_default + Filter: (a > 10) + -> Seq Scan on rlp_default_30 + Filter: (a > 10) + -> Seq Scan on rlp_default_default + Filter: (a > 10) +(23 rows) + +explain (costs off) select * from rlp where a < 15; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on rlp1 + Filter: (a < 15) + -> Seq Scan on rlp2 + Filter: (a < 15) + -> Seq Scan on rlp_default_10 + Filter: (a < 15) + -> Seq Scan on rlp_default_default + Filter: (a < 15) +(9 rows) + +explain (costs off) select * from rlp where a <= 15; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on rlp1 + Filter: (a <= 15) + -> Seq Scan on rlp2 + Filter: (a <= 15) + -> Seq Scan on rlp3abcd + Filter: (a <= 15) + -> Seq Scan on rlp3efgh + Filter: (a <= 15) + -> Seq Scan on rlp3nullxy + Filter: (a <= 15) + -> Seq Scan on rlp3_default + Filter: (a <= 15) + -> Seq Scan on rlp_default_10 + Filter: (a <= 15) + -> Seq Scan on rlp_default_default + Filter: (a <= 15) +(17 rows) + +explain (costs off) select * from rlp where a > 15 and b = 'ab'; + QUERY PLAN +--------------------------------------------------------- + Append + -> Seq Scan on rlp3abcd + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp4_1 + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp4_2 + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp4_default + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp5_1 + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp5_default + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp_default_30 + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp_default_default + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) +(17 rows) + +explain (costs off) select * from rlp where a = 16; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on rlp3abcd + Filter: (a = 16) + -> Seq Scan on rlp3efgh + Filter: (a = 16) + -> Seq Scan on rlp3nullxy + Filter: (a = 16) + -> Seq Scan on rlp3_default + Filter: (a = 16) +(9 rows) + +explain (costs off) select * from rlp where a = 16 and b in ('not', 'in', 'here'); + QUERY PLAN +---------------------------------------------------------------------------- + Append + -> Seq Scan on rlp3_default + Filter: ((a = 16) AND ((b)::text = ANY ('{not,in,here}'::text[]))) +(3 rows) + +explain (costs off) select * from rlp where a = 16 and b < 'ab'; + QUERY PLAN +--------------------------------------------------------- + Append + -> Seq Scan on rlp3_default + Filter: (((b)::text < 'ab'::text) AND (a = 16)) +(3 rows) + +explain (costs off) select * from rlp where a = 16 and b <= 'ab'; + QUERY PLAN +---------------------------------------------------------- + Append + -> Seq Scan on rlp3abcd + Filter: (((b)::text <= 'ab'::text) AND (a = 16)) + -> Seq Scan on rlp3_default + Filter: (((b)::text <= 'ab'::text) AND (a = 16)) +(5 rows) + +explain (costs off) select * from rlp where a = 16 and b is null; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on rlp3nullxy + Filter: ((b IS NULL) AND (a = 16)) +(3 rows) + +explain (costs off) select * from rlp where a = 16 and b is not null; + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on rlp3abcd + Filter: ((b IS NOT NULL) AND (a = 16)) + -> Seq Scan on rlp3efgh + Filter: ((b IS NOT NULL) AND (a = 16)) + -> Seq Scan on rlp3nullxy + Filter: ((b IS NOT NULL) AND (a = 16)) + -> Seq Scan on rlp3_default + Filter: ((b IS NOT NULL) AND (a = 16)) +(9 rows) + +explain (costs off) select * from rlp where a is null; + QUERY PLAN +------------------------------------ + Append + -> Seq Scan on rlp_default_null + Filter: (a IS NULL) +(3 rows) + +explain (costs off) select * from rlp where a is not null; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on rlp1 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp2 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp3abcd + Filter: (a IS NOT NULL) + -> Seq Scan on rlp3efgh + Filter: (a IS NOT NULL) + -> Seq Scan on rlp3nullxy + Filter: (a IS NOT NULL) + -> Seq Scan on rlp3_default + Filter: (a IS NOT NULL) + -> Seq Scan on rlp4_1 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp4_2 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp4_default + Filter: (a IS NOT NULL) + -> Seq Scan on rlp5_1 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp5_default + Filter: (a IS NOT NULL) + -> Seq Scan on rlp_default_10 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp_default_30 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp_default_default + Filter: (a IS NOT NULL) +(29 rows) + +explain (costs off) select * from rlp where a > 30; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on rlp5_1 + Filter: (a > 30) + -> Seq Scan on rlp5_default + Filter: (a > 30) + -> Seq Scan on rlp_default_default + Filter: (a > 30) +(7 rows) + +explain (costs off) select * from rlp where a = 30; /* only default is scanned */ + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on rlp_default_30 + Filter: (a = 30) +(3 rows) + +explain (costs off) select * from rlp where a <= 31; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on rlp1 + Filter: (a <= 31) + -> Seq Scan on rlp2 + Filter: (a <= 31) + -> Seq Scan on rlp3abcd + Filter: (a <= 31) + -> Seq Scan on rlp3efgh + Filter: (a <= 31) + -> Seq Scan on rlp3nullxy + Filter: (a <= 31) + -> Seq Scan on rlp3_default + Filter: (a <= 31) + -> Seq Scan on rlp4_1 + Filter: (a <= 31) + -> Seq Scan on rlp4_2 + Filter: (a <= 31) + -> Seq Scan on rlp4_default + Filter: (a <= 31) + -> Seq Scan on rlp5_1 + Filter: (a <= 31) + -> Seq Scan on rlp5_default + Filter: (a <= 31) + -> Seq Scan on rlp_default_10 + Filter: (a <= 31) + -> Seq Scan on rlp_default_30 + Filter: (a <= 31) + -> Seq Scan on rlp_default_default + Filter: (a <= 31) +(29 rows) + +explain (costs off) select * from rlp where a = 1 or a = 7; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on rlp2 + Filter: ((a = 1) OR (a = 7)) +(3 rows) + +explain (costs off) select * from rlp where a = 1 or b = 'ab'; + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on rlp1 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp2 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp3abcd + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp4_1 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp4_2 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp4_default + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp5_1 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp5_default + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp_default_10 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp_default_30 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp_default_null + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp_default_default + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) +(25 rows) + +explain (costs off) select * from rlp where a > 20 and a < 27; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on rlp4_1 + Filter: ((a > 20) AND (a < 27)) + -> Seq Scan on rlp4_2 + Filter: ((a > 20) AND (a < 27)) + -> Seq Scan on rlp4_default + Filter: ((a > 20) AND (a < 27)) + -> Seq Scan on rlp_default_default + Filter: ((a > 20) AND (a < 27)) +(9 rows) + +explain (costs off) select * from rlp where a = 29; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on rlp4_default + Filter: (a = 29) +(3 rows) + +explain (costs off) select * from rlp where a >= 29; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on rlp4_default + Filter: (a >= 29) + -> Seq Scan on rlp5_1 + Filter: (a >= 29) + -> Seq Scan on rlp5_default + Filter: (a >= 29) + -> Seq Scan on rlp_default_30 + Filter: (a >= 29) + -> Seq Scan on rlp_default_default + Filter: (a >= 29) +(11 rows) + +-- redundant clauses are eliminated +explain (costs off) select * from rlp where a > 1 and a = 10; /* only default */ + QUERY PLAN +---------------------------------------- + Append + -> Seq Scan on rlp_default_10 + Filter: ((a > 1) AND (a = 10)) +(3 rows) + +explain (costs off) select * from rlp where a > 1 and a >=15; /* rlp3 onwards, including default */ + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on rlp3abcd + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp3efgh + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp3nullxy + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp3_default + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp4_1 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp4_2 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp4_default + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp5_1 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp5_default + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp_default_30 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp_default_default + Filter: ((a > 1) AND (a >= 15)) +(23 rows) + +explain (costs off) select * from rlp where a = 1 and a = 3; /* empty */ + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) select * from rlp where (a = 1 and a = 3) or (a > 1 and a = 15); + QUERY PLAN +------------------------------------------------------------------- + Append + -> Seq Scan on rlp2 + Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) + -> Seq Scan on rlp3abcd + Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) + -> Seq Scan on rlp3efgh + Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) + -> Seq Scan on rlp3nullxy + Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) + -> Seq Scan on rlp3_default + Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) +(11 rows) + +-- multi-column keys +create table mc3p (a int, b int, c int) partition by range (a, abs(b), c); +create table mc3p_default partition of mc3p default; +create table mc3p0 partition of mc3p for values from (minvalue, minvalue, minvalue) to (1, 1, 1); +create table mc3p1 partition of mc3p for values from (1, 1, 1) to (10, 5, 10); +create table mc3p2 partition of mc3p for values from (10, 5, 10) to (10, 10, 10); +create table mc3p3 partition of mc3p for values from (10, 10, 10) to (10, 10, 20); +create table mc3p4 partition of mc3p for values from (10, 10, 20) to (10, maxvalue, maxvalue); +create table mc3p5 partition of mc3p for values from (11, 1, 1) to (20, 10, 10); +create table mc3p6 partition of mc3p for values from (20, 10, 10) to (20, 20, 20); +create table mc3p7 partition of mc3p for values from (20, 20, 20) to (maxvalue, maxvalue, maxvalue); +explain (costs off) select * from mc3p where a = 1; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on mc3p0 + Filter: (a = 1) + -> Seq Scan on mc3p1 + Filter: (a = 1) + -> Seq Scan on mc3p_default + Filter: (a = 1) +(7 rows) + +explain (costs off) select * from mc3p where a = 1 and abs(b) < 1; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on mc3p0 + Filter: ((a = 1) AND (abs(b) < 1)) + -> Seq Scan on mc3p_default + Filter: ((a = 1) AND (abs(b) < 1)) +(5 rows) + +explain (costs off) select * from mc3p where a = 1 and abs(b) = 1; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on mc3p0 + Filter: ((a = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p1 + Filter: ((a = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p_default + Filter: ((a = 1) AND (abs(b) = 1)) +(7 rows) + +explain (costs off) select * from mc3p where a = 1 and abs(b) = 1 and c < 8; + QUERY PLAN +-------------------------------------------------------- + Append + -> Seq Scan on mc3p0 + Filter: ((c < 8) AND (a = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p1 + Filter: ((c < 8) AND (a = 1) AND (abs(b) = 1)) +(5 rows) + +explain (costs off) select * from mc3p where a = 10 and abs(b) between 5 and 35; + QUERY PLAN +----------------------------------------------------------------- + Append + -> Seq Scan on mc3p1 + Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) + -> Seq Scan on mc3p2 + Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) + -> Seq Scan on mc3p3 + Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) + -> Seq Scan on mc3p4 + Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) + -> Seq Scan on mc3p_default + Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) +(11 rows) + +explain (costs off) select * from mc3p where a > 10; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on mc3p5 + Filter: (a > 10) + -> Seq Scan on mc3p6 + Filter: (a > 10) + -> Seq Scan on mc3p7 + Filter: (a > 10) + -> Seq Scan on mc3p_default + Filter: (a > 10) +(9 rows) + +explain (costs off) select * from mc3p where a >= 10; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on mc3p1 + Filter: (a >= 10) + -> Seq Scan on mc3p2 + Filter: (a >= 10) + -> Seq Scan on mc3p3 + Filter: (a >= 10) + -> Seq Scan on mc3p4 + Filter: (a >= 10) + -> Seq Scan on mc3p5 + Filter: (a >= 10) + -> Seq Scan on mc3p6 + Filter: (a >= 10) + -> Seq Scan on mc3p7 + Filter: (a >= 10) + -> Seq Scan on mc3p_default + Filter: (a >= 10) +(17 rows) + +explain (costs off) select * from mc3p where a < 10; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on mc3p0 + Filter: (a < 10) + -> Seq Scan on mc3p1 + Filter: (a < 10) + -> Seq Scan on mc3p_default + Filter: (a < 10) +(7 rows) + +explain (costs off) select * from mc3p where a <= 10 and abs(b) < 10; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on mc3p0 + Filter: ((a <= 10) AND (abs(b) < 10)) + -> Seq Scan on mc3p1 + Filter: ((a <= 10) AND (abs(b) < 10)) + -> Seq Scan on mc3p2 + Filter: ((a <= 10) AND (abs(b) < 10)) + -> Seq Scan on mc3p_default + Filter: ((a <= 10) AND (abs(b) < 10)) +(9 rows) + +explain (costs off) select * from mc3p where a = 11 and abs(b) = 0; + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on mc3p_default + Filter: ((a = 11) AND (abs(b) = 0)) +(3 rows) + +explain (costs off) select * from mc3p where a = 20 and abs(b) = 10 and c = 100; + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on mc3p6 + Filter: ((a = 20) AND (c = 100) AND (abs(b) = 10)) +(3 rows) + +explain (costs off) select * from mc3p where a > 20; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on mc3p7 + Filter: (a > 20) + -> Seq Scan on mc3p_default + Filter: (a > 20) +(5 rows) + +explain (costs off) select * from mc3p where a >= 20; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on mc3p5 + Filter: (a >= 20) + -> Seq Scan on mc3p6 + Filter: (a >= 20) + -> Seq Scan on mc3p7 + Filter: (a >= 20) + -> Seq Scan on mc3p_default + Filter: (a >= 20) +(9 rows) + +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1 and c = 1) or (a = 10 and abs(b) = 5 and c = 10) or (a > 11 and a < 20); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on mc3p1 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20))) + -> Seq Scan on mc3p2 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20))) + -> Seq Scan on mc3p5 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20))) + -> Seq Scan on mc3p_default + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20))) +(9 rows) + +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1 and c = 1) or (a = 10 and abs(b) = 5 and c = 10) or (a > 11 and a < 20) or a < 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on mc3p0 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) + -> Seq Scan on mc3p1 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) + -> Seq Scan on mc3p2 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) + -> Seq Scan on mc3p5 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) + -> Seq Scan on mc3p_default + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) +(11 rows) + +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1 and c = 1) or (a = 10 and abs(b) = 5 and c = 10) or (a > 11 and a < 20) or a < 1 or a = 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on mc3p0 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) + -> Seq Scan on mc3p1 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) + -> Seq Scan on mc3p2 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) + -> Seq Scan on mc3p5 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) + -> Seq Scan on mc3p_default + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) +(11 rows) + +explain (costs off) select * from mc3p where a = 1 or abs(b) = 1 or c = 1; + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on mc3p0 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p1 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p2 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p3 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p4 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p5 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p6 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p7 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p_default + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) +(19 rows) + +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1) or (a = 10 and abs(b) = 10); + QUERY PLAN +------------------------------------------------------------------------------ + Append + -> Seq Scan on mc3p0 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) + -> Seq Scan on mc3p1 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) + -> Seq Scan on mc3p2 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) + -> Seq Scan on mc3p3 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) + -> Seq Scan on mc3p4 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) + -> Seq Scan on mc3p_default + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) +(13 rows) + +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1) or (a = 10 and abs(b) = 9); + QUERY PLAN +----------------------------------------------------------------------------- + Append + -> Seq Scan on mc3p0 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 9))) + -> Seq Scan on mc3p1 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 9))) + -> Seq Scan on mc3p2 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 9))) + -> Seq Scan on mc3p_default + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 9))) +(9 rows) + +-- a simpler multi-column keys case +create table mc2p (a int, b int) partition by range (a, b); +create table mc2p_default partition of mc2p default; +create table mc2p0 partition of mc2p for values from (minvalue, minvalue) to (1, minvalue); +create table mc2p1 partition of mc2p for values from (1, minvalue) to (1, 1); +create table mc2p2 partition of mc2p for values from (1, 1) to (2, minvalue); +create table mc2p3 partition of mc2p for values from (2, minvalue) to (2, 1); +create table mc2p4 partition of mc2p for values from (2, 1) to (2, maxvalue); +create table mc2p5 partition of mc2p for values from (2, maxvalue) to (maxvalue, maxvalue); +explain (costs off) select * from mc2p where a < 2; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on mc2p0 + Filter: (a < 2) + -> Seq Scan on mc2p1 + Filter: (a < 2) + -> Seq Scan on mc2p2 + Filter: (a < 2) + -> Seq Scan on mc2p_default + Filter: (a < 2) +(9 rows) + +explain (costs off) select * from mc2p where a = 2 and b < 1; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on mc2p3 + Filter: ((b < 1) AND (a = 2)) +(3 rows) + +explain (costs off) select * from mc2p where a > 1; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on mc2p2 + Filter: (a > 1) + -> Seq Scan on mc2p3 + Filter: (a > 1) + -> Seq Scan on mc2p4 + Filter: (a > 1) + -> Seq Scan on mc2p5 + Filter: (a > 1) + -> Seq Scan on mc2p_default + Filter: (a > 1) +(11 rows) + +explain (costs off) select * from mc2p where a = 1 and b > 1; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on mc2p2 + Filter: ((b > 1) AND (a = 1)) +(3 rows) + +-- all partitions but the default one should be pruned +explain (costs off) select * from mc2p where a = 1 and b is null; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on mc2p_default + Filter: ((b IS NULL) AND (a = 1)) +(3 rows) + +explain (costs off) select * from mc2p where a is null and b is null; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on mc2p_default + Filter: ((a IS NULL) AND (b IS NULL)) +(3 rows) + +explain (costs off) select * from mc2p where a is null and b = 1; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on mc2p_default + Filter: ((a IS NULL) AND (b = 1)) +(3 rows) + +explain (costs off) select * from mc2p where a is null; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on mc2p_default + Filter: (a IS NULL) +(3 rows) + +explain (costs off) select * from mc2p where b is null; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on mc2p_default + Filter: (b IS NULL) +(3 rows) + +-- boolean partitioning +create table boolpart (a bool) partition by list (a); +create table boolpart_default partition of boolpart default; +create table boolpart_t partition of boolpart for values in ('true'); +create table boolpart_f partition of boolpart for values in ('false'); +explain (costs off) select * from boolpart where a in (true, false); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on boolpart_f + Filter: (a = ANY ('{t,f}'::boolean[])) + -> Seq Scan on boolpart_t + Filter: (a = ANY ('{t,f}'::boolean[])) +(5 rows) + +explain (costs off) select * from boolpart where a = false; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on boolpart_f + Filter: (NOT a) +(3 rows) + +explain (costs off) select * from boolpart where not a = false; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on boolpart_t + Filter: a +(3 rows) + +explain (costs off) select * from boolpart where a is true or a is not true; + QUERY PLAN +-------------------------------------------------- + Append + -> Seq Scan on boolpart_f + Filter: ((a IS TRUE) OR (a IS NOT TRUE)) + -> Seq Scan on boolpart_t + Filter: ((a IS TRUE) OR (a IS NOT TRUE)) +(5 rows) + +explain (costs off) select * from boolpart where a is not true; + QUERY PLAN +--------------------------------- + Append + -> Seq Scan on boolpart_f + Filter: (a IS NOT TRUE) +(3 rows) + +explain (costs off) select * from boolpart where a is not true and a is not false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) select * from boolpart where a is unknown; + QUERY PLAN +------------------------------------ + Append + -> Seq Scan on boolpart_f + Filter: (a IS UNKNOWN) + -> Seq Scan on boolpart_t + Filter: (a IS UNKNOWN) + -> Seq Scan on boolpart_default + Filter: (a IS UNKNOWN) +(7 rows) + +explain (costs off) select * from boolpart where a is not unknown; + QUERY PLAN +------------------------------------ + Append + -> Seq Scan on boolpart_f + Filter: (a IS NOT UNKNOWN) + -> Seq Scan on boolpart_t + Filter: (a IS NOT UNKNOWN) + -> Seq Scan on boolpart_default + Filter: (a IS NOT UNKNOWN) +(7 rows) + +-- test scalar-to-array operators +create table coercepart (a varchar) partition by list (a); +create table coercepart_ab partition of coercepart for values in ('ab'); +create table coercepart_bc partition of coercepart for values in ('bc'); +create table coercepart_cd partition of coercepart for values in ('cd'); +explain (costs off) select * from coercepart where a in ('ab', to_char(125, '999')); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Append + -> Seq Scan on coercepart_ab + Filter: ((a)::text = ANY ((ARRAY['ab'::character varying, (to_char(125, '999'::text))::character varying])::text[])) + -> Seq Scan on coercepart_bc + Filter: ((a)::text = ANY ((ARRAY['ab'::character varying, (to_char(125, '999'::text))::character varying])::text[])) + -> Seq Scan on coercepart_cd + Filter: ((a)::text = ANY ((ARRAY['ab'::character varying, (to_char(125, '999'::text))::character varying])::text[])) +(7 rows) + +explain (costs off) select * from coercepart where a ~ any ('{ab}'); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on coercepart_ab + Filter: ((a)::text ~ ANY ('{ab}'::text[])) + -> Seq Scan on coercepart_bc + Filter: ((a)::text ~ ANY ('{ab}'::text[])) + -> Seq Scan on coercepart_cd + Filter: ((a)::text ~ ANY ('{ab}'::text[])) +(7 rows) + +explain (costs off) select * from coercepart where a !~ all ('{ab}'); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on coercepart_ab + Filter: ((a)::text !~ ALL ('{ab}'::text[])) + -> Seq Scan on coercepart_bc + Filter: ((a)::text !~ ALL ('{ab}'::text[])) + -> Seq Scan on coercepart_cd + Filter: ((a)::text !~ ALL ('{ab}'::text[])) +(7 rows) + +explain (costs off) select * from coercepart where a ~ any ('{ab,bc}'); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on coercepart_ab + Filter: ((a)::text ~ ANY ('{ab,bc}'::text[])) + -> Seq Scan on coercepart_bc + Filter: ((a)::text ~ ANY ('{ab,bc}'::text[])) + -> Seq Scan on coercepart_cd + Filter: ((a)::text ~ ANY ('{ab,bc}'::text[])) +(7 rows) + +explain (costs off) select * from coercepart where a !~ all ('{ab,bc}'); + QUERY PLAN +-------------------------------------------------------- + Append + -> Seq Scan on coercepart_ab + Filter: ((a)::text !~ ALL ('{ab,bc}'::text[])) + -> Seq Scan on coercepart_bc + Filter: ((a)::text !~ ALL ('{ab,bc}'::text[])) + -> Seq Scan on coercepart_cd + Filter: ((a)::text !~ ALL ('{ab,bc}'::text[])) +(7 rows) + +drop table coercepart; +CREATE TABLE part (a INT, b INT) PARTITION BY LIST (a); +CREATE TABLE part_p1 PARTITION OF part FOR VALUES IN (-2,-1,0,1,2); +CREATE TABLE part_p2 PARTITION OF part DEFAULT PARTITION BY RANGE(a); +CREATE TABLE part_p2_p1 PARTITION OF part_p2 DEFAULT; +INSERT INTO part VALUES (-1,-1), (1,1), (2,NULL), (NULL,-2),(NULL,NULL); +EXPLAIN (COSTS OFF) SELECT tableoid::regclass as part, a, b FROM part WHERE a IS NULL ORDER BY 1, 2, 3; + QUERY PLAN +--------------------------------------------------------------------------- + Sort + Sort Key: ((part_p2_p1.tableoid)::regclass), part_p2_p1.a, part_p2_p1.b + -> Append + -> Seq Scan on part_p2_p1 + Filter: (a IS NULL) +(5 rows) + +-- +-- some more cases +-- +-- +-- pruning for partitioned table appearing inside a sub-query +-- +-- pruning won't work for mc3p, because some keys are Params +explain (costs off) select * from mc2p t1, lateral (select count(*) from mc3p t2 where t2.a = t1.b and abs(t2.b) = 1 and t2.c = 1) s where t1.a = 1; + QUERY PLAN +----------------------------------------------------------------------- + Nested Loop + -> Append + -> Seq Scan on mc2p1 t1 + Filter: (a = 1) + -> Seq Scan on mc2p2 t1_1 + Filter: (a = 1) + -> Seq Scan on mc2p_default t1_2 + Filter: (a = 1) + -> Aggregate + -> Append + -> Seq Scan on mc3p0 t2 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p1 t2_1 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p2 t2_2 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p3 t2_3 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p4 t2_4 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p5 t2_5 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p6 t2_6 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p7 t2_7 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p_default t2_8 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) +(28 rows) + +-- pruning should work fine, because values for a prefix of keys (a, b) are +-- available +explain (costs off) select * from mc2p t1, lateral (select count(*) from mc3p t2 where t2.c = t1.b and abs(t2.b) = 1 and t2.a = 1) s where t1.a = 1; + QUERY PLAN +----------------------------------------------------------------------- + Nested Loop + -> Append + -> Seq Scan on mc2p1 t1 + Filter: (a = 1) + -> Seq Scan on mc2p2 t1_1 + Filter: (a = 1) + -> Seq Scan on mc2p_default t1_2 + Filter: (a = 1) + -> Aggregate + -> Append + -> Seq Scan on mc3p0 t2 + Filter: ((c = t1.b) AND (a = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p1 t2_1 + Filter: ((c = t1.b) AND (a = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p_default t2_2 + Filter: ((c = t1.b) AND (a = 1) AND (abs(b) = 1)) +(16 rows) + +-- also here, because values for all keys are provided +explain (costs off) select * from mc2p t1, lateral (select count(*) from mc3p t2 where t2.a = 1 and abs(t2.b) = 1 and t2.c = 1) s where t1.a = 1; + QUERY PLAN +-------------------------------------------------------------------- + Nested Loop + -> Aggregate + -> Append + -> Seq Scan on mc3p1 t2 + Filter: ((a = 1) AND (c = 1) AND (abs(b) = 1)) + -> Append + -> Seq Scan on mc2p1 t1 + Filter: (a = 1) + -> Seq Scan on mc2p2 t1_1 + Filter: (a = 1) + -> Seq Scan on mc2p_default t1_2 + Filter: (a = 1) +(12 rows) + +-- +-- pruning with clauses containing <> operator +-- +-- doesn't prune range partitions +create table rp (a int) partition by range (a); +create table rp0 partition of rp for values from (minvalue) to (1); +create table rp1 partition of rp for values from (1) to (2); +create table rp2 partition of rp for values from (2) to (maxvalue); +explain (costs off) select * from rp where a <> 1; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on rp0 + Filter: (a <> 1) + -> Seq Scan on rp1 + Filter: (a <> 1) + -> Seq Scan on rp2 + Filter: (a <> 1) +(7 rows) + +explain (costs off) select * from rp where a <> 1 and a <> 2; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on rp0 + Filter: ((a <> 1) AND (a <> 2)) + -> Seq Scan on rp1 + Filter: ((a <> 1) AND (a <> 2)) + -> Seq Scan on rp2 + Filter: ((a <> 1) AND (a <> 2)) +(7 rows) + +-- null partition should be eliminated due to strict <> clause. +explain (costs off) select * from lp where a <> 'a'; + QUERY PLAN +------------------------------------ + Append + -> Seq Scan on lp_ad + Filter: (a <> 'a'::bpchar) + -> Seq Scan on lp_bc + Filter: (a <> 'a'::bpchar) + -> Seq Scan on lp_ef + Filter: (a <> 'a'::bpchar) + -> Seq Scan on lp_g + Filter: (a <> 'a'::bpchar) + -> Seq Scan on lp_default + Filter: (a <> 'a'::bpchar) +(11 rows) + +-- ensure we detect contradictions in clauses; a can't be NULL and NOT NULL. +explain (costs off) select * from lp where a <> 'a' and a is null; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) select * from lp where (a <> 'a' and a <> 'd') or a is null; + QUERY PLAN +------------------------------------------------------------------------------ + Append + -> Seq Scan on lp_bc + Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) + -> Seq Scan on lp_ef + Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) + -> Seq Scan on lp_g + Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) + -> Seq Scan on lp_null + Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) + -> Seq Scan on lp_default + Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) +(11 rows) + +-- check that it also works for a partitioned table that's not root, +-- which in this case are partitions of rlp that are themselves +-- list-partitioned on b +explain (costs off) select * from rlp where a = 15 and b <> 'ab' and b <> 'cd' and b <> 'xy' and b is not null; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------ + Append + -> Seq Scan on rlp3efgh + Filter: ((b IS NOT NULL) AND ((b)::text <> 'ab'::text) AND ((b)::text <> 'cd'::text) AND ((b)::text <> 'xy'::text) AND (a = 15)) + -> Seq Scan on rlp3_default + Filter: ((b IS NOT NULL) AND ((b)::text <> 'ab'::text) AND ((b)::text <> 'cd'::text) AND ((b)::text <> 'xy'::text) AND (a = 15)) +(5 rows) + +-- +-- different collations for different keys with same expression +-- +create table coll_pruning_multi (a text) partition by range (substr(a, 1) collate "POSIX", substr(a, 1) collate "C"); +create table coll_pruning_multi1 partition of coll_pruning_multi for values from ('a', 'a') to ('a', 'e'); +create table coll_pruning_multi2 partition of coll_pruning_multi for values from ('a', 'e') to ('a', 'z'); +create table coll_pruning_multi3 partition of coll_pruning_multi for values from ('b', 'a') to ('b', 'e'); +-- no pruning, because no value for the leading key +explain (costs off) select * from coll_pruning_multi where substr(a, 1) = 'e' collate "C"; + QUERY PLAN +-------------------------------------------------------- + Append + -> Seq Scan on coll_pruning_multi1 + Filter: (substr(a, 1) = 'e'::text COLLATE "C") + -> Seq Scan on coll_pruning_multi2 + Filter: (substr(a, 1) = 'e'::text COLLATE "C") + -> Seq Scan on coll_pruning_multi3 + Filter: (substr(a, 1) = 'e'::text COLLATE "C") +(7 rows) + +-- pruning, with a value provided for the leading key +explain (costs off) select * from coll_pruning_multi where substr(a, 1) = 'a' collate "POSIX"; + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on coll_pruning_multi1 + Filter: (substr(a, 1) = 'a'::text COLLATE "POSIX") + -> Seq Scan on coll_pruning_multi2 + Filter: (substr(a, 1) = 'a'::text COLLATE "POSIX") +(5 rows) + +-- pruning, with values provided for both keys +explain (costs off) select * from coll_pruning_multi where substr(a, 1) = 'e' collate "C" and substr(a, 1) = 'a' collate "POSIX"; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on coll_pruning_multi2 + Filter: ((substr(a, 1) = 'e'::text COLLATE "C") AND (substr(a, 1) = 'a'::text COLLATE "POSIX")) +(3 rows) + +-- +-- LIKE operators don't prune +-- +create table like_op_noprune (a text) partition by list (a); +create table like_op_noprune1 partition of like_op_noprune for values in ('ABC'); +create table like_op_noprune2 partition of like_op_noprune for values in ('BCD'); +explain (costs off) select * from like_op_noprune where a like '%BC'; + QUERY PLAN +------------------------------------ + Append + -> Seq Scan on like_op_noprune1 + Filter: (a ~~ '%BC'::text) + -> Seq Scan on like_op_noprune2 + Filter: (a ~~ '%BC'::text) +(5 rows) + +-- +-- tests wherein clause value requires a cross-type comparison function +-- +create table lparted_by_int2 (a smallint) partition by list (a); +create table lparted_by_int2_1 partition of lparted_by_int2 for values in (1); +create table lparted_by_int2_16384 partition of lparted_by_int2 for values in (16384); +explain (costs off) select * from lparted_by_int2 where a = 100000000000000; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +create table rparted_by_int2 (a smallint) partition by range (a); +create table rparted_by_int2_1 partition of rparted_by_int2 for values from (1) to (10); +create table rparted_by_int2_16384 partition of rparted_by_int2 for values from (10) to (16384); +-- all partitions pruned +explain (costs off) select * from rparted_by_int2 where a > 100000000000000; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +create table rparted_by_int2_maxvalue partition of rparted_by_int2 for values from (16384) to (maxvalue); +-- all partitions but rparted_by_int2_maxvalue pruned +explain (costs off) select * from rparted_by_int2 where a > 100000000000000; + QUERY PLAN +------------------------------------------------- + Append + -> Seq Scan on rparted_by_int2_maxvalue + Filter: (a > '100000000000000'::bigint) +(3 rows) + +drop table lp, coll_pruning, rlp, mc3p, mc2p, boolpart, rp, coll_pruning_multi, like_op_noprune, lparted_by_int2, rparted_by_int2; +-- +-- Test Partition pruning for HASH partitioning +-- +-- Use hand-rolled hash functions and operator classes to get predictable +-- result on different matchines. See the definitions of +-- part_part_test_int4_ops and part_test_text_ops in insert.sql. +-- +create table hp (a int, b text) partition by hash (a part_test_int4_ops, b part_test_text_ops); +create table hp0 partition of hp for values with (modulus 4, remainder 0); +create table hp3 partition of hp for values with (modulus 4, remainder 3); +create table hp1 partition of hp for values with (modulus 4, remainder 1); +create table hp2 partition of hp for values with (modulus 4, remainder 2); +insert into hp values (null, null); +insert into hp values (1, null); +insert into hp values (1, 'xxx'); +insert into hp values (null, 'xxx'); +insert into hp values (2, 'xxx'); +insert into hp values (1, 'abcde'); +select tableoid::regclass, * from hp order by 1; + tableoid | a | b +----------+---+------- + hp0 | | + hp0 | 1 | xxx + hp3 | 2 | xxx + hp1 | 1 | + hp2 | | xxx + hp2 | 1 | abcde +(6 rows) + +-- partial keys won't prune, nor would non-equality conditions +explain (costs off) select * from hp where a = 1; + QUERY PLAN +------------------------- + Append + -> Seq Scan on hp0 + Filter: (a = 1) + -> Seq Scan on hp1 + Filter: (a = 1) + -> Seq Scan on hp2 + Filter: (a = 1) + -> Seq Scan on hp3 + Filter: (a = 1) +(9 rows) + +explain (costs off) select * from hp where b = 'xxx'; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hp0 + Filter: (b = 'xxx'::text) + -> Seq Scan on hp1 + Filter: (b = 'xxx'::text) + -> Seq Scan on hp2 + Filter: (b = 'xxx'::text) + -> Seq Scan on hp3 + Filter: (b = 'xxx'::text) +(9 rows) + +explain (costs off) select * from hp where a is null; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on hp0 + Filter: (a IS NULL) + -> Seq Scan on hp1 + Filter: (a IS NULL) + -> Seq Scan on hp2 + Filter: (a IS NULL) + -> Seq Scan on hp3 + Filter: (a IS NULL) +(9 rows) + +explain (costs off) select * from hp where b is null; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on hp0 + Filter: (b IS NULL) + -> Seq Scan on hp1 + Filter: (b IS NULL) + -> Seq Scan on hp2 + Filter: (b IS NULL) + -> Seq Scan on hp3 + Filter: (b IS NULL) +(9 rows) + +explain (costs off) select * from hp where a < 1 and b = 'xxx'; + QUERY PLAN +------------------------------------------------- + Append + -> Seq Scan on hp0 + Filter: ((a < 1) AND (b = 'xxx'::text)) + -> Seq Scan on hp1 + Filter: ((a < 1) AND (b = 'xxx'::text)) + -> Seq Scan on hp2 + Filter: ((a < 1) AND (b = 'xxx'::text)) + -> Seq Scan on hp3 + Filter: ((a < 1) AND (b = 'xxx'::text)) +(9 rows) + +explain (costs off) select * from hp where a <> 1 and b = 'yyy'; + QUERY PLAN +-------------------------------------------------- + Append + -> Seq Scan on hp0 + Filter: ((a <> 1) AND (b = 'yyy'::text)) + -> Seq Scan on hp1 + Filter: ((a <> 1) AND (b = 'yyy'::text)) + -> Seq Scan on hp2 + Filter: ((a <> 1) AND (b = 'yyy'::text)) + -> Seq Scan on hp3 + Filter: ((a <> 1) AND (b = 'yyy'::text)) +(9 rows) + +explain (costs off) select * from hp where a <> 1 and b <> 'xxx'; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on hp0 + Filter: ((a <> 1) AND (b <> 'xxx'::text)) + -> Seq Scan on hp1 + Filter: ((a <> 1) AND (b <> 'xxx'::text)) + -> Seq Scan on hp2 + Filter: ((a <> 1) AND (b <> 'xxx'::text)) + -> Seq Scan on hp3 + Filter: ((a <> 1) AND (b <> 'xxx'::text)) +(9 rows) + +-- pruning should work if either a value or a IS NULL clause is provided for +-- each of the keys +explain (costs off) select * from hp where a is null and b is null; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on hp0 + Filter: ((a IS NULL) AND (b IS NULL)) +(3 rows) + +explain (costs off) select * from hp where a = 1 and b is null; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on hp1 + Filter: ((b IS NULL) AND (a = 1)) +(3 rows) + +explain (costs off) select * from hp where a = 1 and b = 'xxx'; + QUERY PLAN +------------------------------------------------- + Append + -> Seq Scan on hp0 + Filter: ((a = 1) AND (b = 'xxx'::text)) +(3 rows) + +explain (costs off) select * from hp where a is null and b = 'xxx'; + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on hp2 + Filter: ((a IS NULL) AND (b = 'xxx'::text)) +(3 rows) + +explain (costs off) select * from hp where a = 2 and b = 'xxx'; + QUERY PLAN +------------------------------------------------- + Append + -> Seq Scan on hp3 + Filter: ((a = 2) AND (b = 'xxx'::text)) +(3 rows) + +explain (costs off) select * from hp where a = 1 and b = 'abcde'; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on hp2 + Filter: ((a = 1) AND (b = 'abcde'::text)) +(3 rows) + +explain (costs off) select * from hp where (a = 1 and b = 'abcde') or (a = 2 and b = 'xxx') or (a is null and b is null); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on hp0 + Filter: (((a = 1) AND (b = 'abcde'::text)) OR ((a = 2) AND (b = 'xxx'::text)) OR ((a IS NULL) AND (b IS NULL))) + -> Seq Scan on hp2 + Filter: (((a = 1) AND (b = 'abcde'::text)) OR ((a = 2) AND (b = 'xxx'::text)) OR ((a IS NULL) AND (b IS NULL))) + -> Seq Scan on hp3 + Filter: (((a = 1) AND (b = 'abcde'::text)) OR ((a = 2) AND (b = 'xxx'::text)) OR ((a IS NULL) AND (b IS NULL))) +(7 rows) + +drop table hp; +-- +-- Test runtime partition pruning +-- +create table ab (a int not null, b int not null) partition by list (a); +create table ab_a2 partition of ab for values in(2) partition by list (b); +create table ab_a2_b1 partition of ab_a2 for values in (1); +create table ab_a2_b2 partition of ab_a2 for values in (2); +create table ab_a2_b3 partition of ab_a2 for values in (3); +create table ab_a1 partition of ab for values in(1) partition by list (b); +create table ab_a1_b1 partition of ab_a1 for values in (1); +create table ab_a1_b2 partition of ab_a1 for values in (2); +create table ab_a1_b3 partition of ab_a1 for values in (3); +create table ab_a3 partition of ab for values in(3) partition by list (b); +create table ab_a3_b1 partition of ab_a3 for values in (1); +create table ab_a3_b2 partition of ab_a3 for values in (2); +create table ab_a3_b3 partition of ab_a3 for values in (3); +-- Disallow index only scans as concurrent transactions may stop visibility +-- bits being set causing "Heap Fetches" to be unstable in the EXPLAIN ANALYZE +-- output. +set enable_indexonlyscan = off; +prepare ab_q1 (int, int, int) as +select * from ab where a between $1 and $2 and b <= $3; +-- Execute query 5 times to allow choose_custom_plan +-- to start considering a generic plan. +execute ab_q1 (1, 8, 3); + a | b +---+--- +(0 rows) + +execute ab_q1 (1, 8, 3); + a | b +---+--- +(0 rows) + +execute ab_q1 (1, 8, 3); + a | b +---+--- +(0 rows) + +execute ab_q1 (1, 8, 3); + a | b +---+--- +(0 rows) + +execute ab_q1 (1, 8, 3); + a | b +---+--- +(0 rows) + +explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 2, 3); + QUERY PLAN +--------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 6 + -> Seq Scan on ab_a2_b1 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a2_b2 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a2_b3 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) +(8 rows) + +explain (analyze, costs off, summary off, timing off) execute ab_q1 (1, 2, 3); + QUERY PLAN +--------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 3 + -> Seq Scan on ab_a1_b1 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a1_b2 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a1_b3 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a2_b1 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a2_b2 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a2_b3 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) +(14 rows) + +deallocate ab_q1; +-- Runtime pruning after optimizer pruning +prepare ab_q1 (int, int) as +select a from ab where a between $1 and $2 and b < 3; +-- Execute query 5 times to allow choose_custom_plan +-- to start considering a generic plan. +execute ab_q1 (1, 8); + a +--- +(0 rows) + +execute ab_q1 (1, 8); + a +--- +(0 rows) + +execute ab_q1 (1, 8); + a +--- +(0 rows) + +execute ab_q1 (1, 8); + a +--- +(0 rows) + +execute ab_q1 (1, 8); + a +--- +(0 rows) + +explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 2); + QUERY PLAN +------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 4 + -> Seq Scan on ab_a2_b1 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) + -> Seq Scan on ab_a2_b2 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) +(6 rows) + +explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 4); + QUERY PLAN +------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 2 + -> Seq Scan on ab_a2_b1 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) + -> Seq Scan on ab_a2_b2 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) + -> Seq Scan on ab_a3_b1 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) + -> Seq Scan on ab_a3_b2 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) +(10 rows) + +-- Ensure a mix of PARAM_EXTERN and PARAM_EXEC Params work together at +-- different levels of partitioning. +prepare ab_q2 (int, int) as +select a from ab where a between $1 and $2 and b < (select 3); +execute ab_q2 (1, 8); + a +--- +(0 rows) + +execute ab_q2 (1, 8); + a +--- +(0 rows) + +execute ab_q2 (1, 8); + a +--- +(0 rows) + +execute ab_q2 (1, 8); + a +--- +(0 rows) + +execute ab_q2 (1, 8); + a +--- +(0 rows) + +explain (analyze, costs off, summary off, timing off) execute ab_q2 (2, 2); + QUERY PLAN +-------------------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + Subplans Removed: 6 + -> Seq Scan on ab_a2_b1 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < $0)) + -> Seq Scan on ab_a2_b2 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < $0)) + -> Seq Scan on ab_a2_b3 (never executed) + Filter: ((a >= $1) AND (a <= $2) AND (b < $0)) +(10 rows) + +-- As above, but swap the PARAM_EXEC Param to the first partition level +prepare ab_q3 (int, int) as +select a from ab where b between $1 and $2 and a < (select 3); +execute ab_q3 (1, 8); + a +--- +(0 rows) + +execute ab_q3 (1, 8); + a +--- +(0 rows) + +execute ab_q3 (1, 8); + a +--- +(0 rows) + +execute ab_q3 (1, 8); + a +--- +(0 rows) + +execute ab_q3 (1, 8); + a +--- +(0 rows) + +explain (analyze, costs off, summary off, timing off) execute ab_q3 (2, 2); + QUERY PLAN +-------------------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + Subplans Removed: 6 + -> Seq Scan on ab_a1_b2 (actual rows=0 loops=1) + Filter: ((b >= $1) AND (b <= $2) AND (a < $0)) + -> Seq Scan on ab_a2_b2 (actual rows=0 loops=1) + Filter: ((b >= $1) AND (b <= $2) AND (a < $0)) + -> Seq Scan on ab_a3_b2 (never executed) + Filter: ((b >= $1) AND (b <= $2) AND (a < $0)) +(10 rows) + +-- Test a backwards Append scan +create table list_part (a int) partition by list (a); +create table list_part1 partition of list_part for values in (1); +create table list_part2 partition of list_part for values in (2); +create table list_part3 partition of list_part for values in (3); +create table list_part4 partition of list_part for values in (4); +insert into list_part select generate_series(1,4); +begin; +-- Don't select an actual value out of the table as the order of the Append's +-- subnodes may not be stable. +declare cur SCROLL CURSOR for select 1 from list_part where a > (select 1) and a < (select 4); +-- move beyond the final row +move 3 from cur; +-- Ensure we get two rows. +fetch backward all from cur; + ?column? +---------- + 1 + 1 +(2 rows) + +commit; +begin; +-- Test run-time pruning using stable functions +create function list_part_fn(int) returns int as $$ begin return $1; end;$$ language plpgsql stable; +-- Ensure pruning works using a stable function containing no Vars +explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1); + QUERY PLAN +------------------------------------------------------ + Append (actual rows=1 loops=1) + Subplans Removed: 3 + -> Seq Scan on list_part1 (actual rows=1 loops=1) + Filter: (a = list_part_fn(1)) +(4 rows) + +-- Ensure pruning does not take place when the function has a Var parameter +explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(a); + QUERY PLAN +------------------------------------------------------ + Append (actual rows=4 loops=1) + -> Seq Scan on list_part1 (actual rows=1 loops=1) + Filter: (a = list_part_fn(a)) + -> Seq Scan on list_part2 (actual rows=1 loops=1) + Filter: (a = list_part_fn(a)) + -> Seq Scan on list_part3 (actual rows=1 loops=1) + Filter: (a = list_part_fn(a)) + -> Seq Scan on list_part4 (actual rows=1 loops=1) + Filter: (a = list_part_fn(a)) +(9 rows) + +-- Ensure pruning does not take place when the expression contains a Var. +explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1) + a; + QUERY PLAN +------------------------------------------------------ + Append (actual rows=0 loops=1) + -> Seq Scan on list_part1 (actual rows=0 loops=1) + Filter: (a = (list_part_fn(1) + a)) + Rows Removed by Filter: 1 + -> Seq Scan on list_part2 (actual rows=0 loops=1) + Filter: (a = (list_part_fn(1) + a)) + Rows Removed by Filter: 1 + -> Seq Scan on list_part3 (actual rows=0 loops=1) + Filter: (a = (list_part_fn(1) + a)) + Rows Removed by Filter: 1 + -> Seq Scan on list_part4 (actual rows=0 loops=1) + Filter: (a = (list_part_fn(1) + a)) + Rows Removed by Filter: 1 +(13 rows) + +rollback; +drop table list_part; +-- Parallel append +-- Suppress the number of loops each parallel node runs for. This is because +-- more than one worker may run the same parallel node if timing conditions +-- are just right, which destabilizes the test. +create function explain_parallel_append(text) returns setof text +language plpgsql as +$$ +declare + ln text; +begin + for ln in + execute format('explain (analyze, costs off, summary off, timing off) %s', + $1) + loop + if ln like '%Parallel%' then + ln := regexp_replace(ln, 'loops=\d*', 'loops=N'); + end if; + return next ln; + end loop; +end; +$$; +prepare ab_q4 (int, int) as +select avg(a) from ab where a between $1 and $2 and b < 4; +-- Encourage use of parallel plans +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set min_parallel_table_scan_size = 0; +set max_parallel_workers_per_gather = 2; +-- Execute query 5 times to allow choose_custom_plan +-- to start considering a generic plan. +execute ab_q4 (1, 8); + avg +----- + +(1 row) + +execute ab_q4 (1, 8); + avg +----- + +(1 row) + +execute ab_q4 (1, 8); + avg +----- + +(1 row) + +execute ab_q4 (1, 8); + avg +----- + +(1 row) + +execute ab_q4 (1, 8); + avg +----- + +(1 row) + +select explain_parallel_append('execute ab_q4 (2, 2)'); + explain_parallel_append +------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=1 loops=1) + -> Gather (actual rows=3 loops=1) + Workers Planned: 2 + Workers Launched: 2 + -> Partial Aggregate (actual rows=1 loops=3) + -> Parallel Append (actual rows=0 loops=N) + Subplans Removed: 6 + -> Parallel Seq Scan on ab_a2_b1 (actual rows=0 loops=N) + Filter: ((a >= $1) AND (a <= $2) AND (b < 4)) + -> Parallel Seq Scan on ab_a2_b2 (actual rows=0 loops=N) + Filter: ((a >= $1) AND (a <= $2) AND (b < 4)) + -> Parallel Seq Scan on ab_a2_b3 (actual rows=0 loops=N) + Filter: ((a >= $1) AND (a <= $2) AND (b < 4)) +(13 rows) + +-- Test run-time pruning with IN lists. +prepare ab_q5 (int, int, int) as +select avg(a) from ab where a in($1,$2,$3) and b < 4; +-- Execute query 5 times to allow choose_custom_plan +-- to start considering a generic plan. +execute ab_q5 (1, 2, 3); + avg +----- + +(1 row) + +execute ab_q5 (1, 2, 3); + avg +----- + +(1 row) + +execute ab_q5 (1, 2, 3); + avg +----- + +(1 row) + +execute ab_q5 (1, 2, 3); + avg +----- + +(1 row) + +execute ab_q5 (1, 2, 3); + avg +----- + +(1 row) + +select explain_parallel_append('execute ab_q5 (1, 1, 1)'); + explain_parallel_append +------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=1 loops=1) + -> Gather (actual rows=3 loops=1) + Workers Planned: 2 + Workers Launched: 2 + -> Partial Aggregate (actual rows=1 loops=3) + -> Parallel Append (actual rows=0 loops=N) + Subplans Removed: 6 + -> Parallel Seq Scan on ab_a1_b1 (actual rows=0 loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a1_b2 (actual rows=0 loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a1_b3 (actual rows=0 loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) +(13 rows) + +select explain_parallel_append('execute ab_q5 (2, 3, 3)'); + explain_parallel_append +------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=1 loops=1) + -> Gather (actual rows=3 loops=1) + Workers Planned: 2 + Workers Launched: 2 + -> Partial Aggregate (actual rows=1 loops=3) + -> Parallel Append (actual rows=0 loops=N) + Subplans Removed: 3 + -> Parallel Seq Scan on ab_a2_b1 (actual rows=0 loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a2_b2 (actual rows=0 loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a2_b3 (actual rows=0 loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a3_b1 (actual rows=0 loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a3_b2 (actual rows=0 loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a3_b3 (actual rows=0 loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) +(19 rows) + +-- Try some params whose values do not belong to any partition. +-- We'll still get a single subplan in this case, but it should not be scanned. +select explain_parallel_append('execute ab_q5 (33, 44, 55)'); + explain_parallel_append +------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=1 loops=1) + -> Gather (actual rows=3 loops=1) + Workers Planned: 2 + Workers Launched: 2 + -> Partial Aggregate (actual rows=1 loops=3) + -> Parallel Append (actual rows=0 loops=N) + Subplans Removed: 8 + -> Parallel Seq Scan on ab_a1_b1 (never executed) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) +(9 rows) + +-- Test Parallel Append with PARAM_EXEC Params +select explain_parallel_append('select count(*) from ab where (a = (select 1) or a = (select 3)) and b = 2'); + explain_parallel_append +------------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + -> Gather (actual rows=0 loops=1) + Workers Planned: 2 + Params Evaluated: $0, $1 + Workers Launched: 2 + -> Parallel Append (actual rows=0 loops=N) + -> Parallel Seq Scan on ab_a1_b2 (actual rows=0 loops=N) + Filter: ((b = 2) AND ((a = $0) OR (a = $1))) + -> Parallel Seq Scan on ab_a2_b2 (never executed) + Filter: ((b = 2) AND ((a = $0) OR (a = $1))) + -> Parallel Seq Scan on ab_a3_b2 (actual rows=0 loops=N) + Filter: ((b = 2) AND ((a = $0) OR (a = $1))) +(16 rows) + +-- Test pruning during parallel nested loop query +create table lprt_a (a int not null); +-- Insert some values we won't find in ab +insert into lprt_a select 0 from generate_series(1,100); +-- and insert some values that we should find. +insert into lprt_a values(1),(1); +analyze lprt_a; +create index ab_a2_b1_a_idx on ab_a2_b1 (a); +create index ab_a2_b2_a_idx on ab_a2_b2 (a); +create index ab_a2_b3_a_idx on ab_a2_b3 (a); +create index ab_a1_b1_a_idx on ab_a1_b1 (a); +create index ab_a1_b2_a_idx on ab_a1_b2 (a); +create index ab_a1_b3_a_idx on ab_a1_b3 (a); +create index ab_a3_b1_a_idx on ab_a3_b1 (a); +create index ab_a3_b2_a_idx on ab_a3_b2 (a); +create index ab_a3_b3_a_idx on ab_a3_b3 (a); +set enable_hashjoin = 0; +set enable_mergejoin = 0; +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(0, 0, 1)'); + explain_parallel_append +--------------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=1 loops=1) + -> Gather (actual rows=2 loops=1) + Workers Planned: 1 + Workers Launched: 1 + -> Partial Aggregate (actual rows=1 loops=2) + -> Nested Loop (actual rows=0 loops=2) + -> Parallel Seq Scan on lprt_a a (actual rows=51 loops=N) + Filter: (a = ANY ('{0,0,1}'::integer[])) + -> Append (actual rows=0 loops=102) + -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 (actual rows=0 loops=2) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 (actual rows=0 loops=2) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 (actual rows=0 loops=2) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 (never executed) + Index Cond: (a = a.a) +(27 rows) + +-- Ensure the same partitions are pruned when we make the nested loop +-- parameter an Expr rather than a plain Param. +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a + 0 where a.a in(0, 0, 1)'); + explain_parallel_append +--------------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=1 loops=1) + -> Gather (actual rows=2 loops=1) + Workers Planned: 1 + Workers Launched: 1 + -> Partial Aggregate (actual rows=1 loops=2) + -> Nested Loop (actual rows=0 loops=2) + -> Parallel Seq Scan on lprt_a a (actual rows=51 loops=N) + Filter: (a = ANY ('{0,0,1}'::integer[])) + -> Append (actual rows=0 loops=102) + -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 (actual rows=0 loops=2) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 (actual rows=0 loops=2) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 (actual rows=0 loops=2) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 (never executed) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 (never executed) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 (never executed) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 (never executed) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 (never executed) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 (never executed) + Index Cond: (a = (a.a + 0)) +(27 rows) + +insert into lprt_a values(3),(3); +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 3)'); + explain_parallel_append +--------------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=1 loops=1) + -> Gather (actual rows=2 loops=1) + Workers Planned: 1 + Workers Launched: 1 + -> Partial Aggregate (actual rows=1 loops=2) + -> Nested Loop (actual rows=0 loops=2) + -> Parallel Seq Scan on lprt_a a (actual rows=52 loops=N) + Filter: (a = ANY ('{1,0,3}'::integer[])) + -> Append (actual rows=0 loops=104) + -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 (actual rows=0 loops=2) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 (actual rows=0 loops=2) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 (actual rows=0 loops=2) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 (actual rows=0 loops=2) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 (actual rows=0 loops=2) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 (actual rows=0 loops=2) + Index Cond: (a = a.a) +(27 rows) + +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 0)'); + explain_parallel_append +--------------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=1 loops=1) + -> Gather (actual rows=2 loops=1) + Workers Planned: 1 + Workers Launched: 1 + -> Partial Aggregate (actual rows=1 loops=2) + -> Nested Loop (actual rows=0 loops=2) + -> Parallel Seq Scan on lprt_a a (actual rows=51 loops=N) + Filter: (a = ANY ('{1,0,0}'::integer[])) + Rows Removed by Filter: 1 + -> Append (actual rows=0 loops=102) + -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 (actual rows=0 loops=2) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 (actual rows=0 loops=2) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 (actual rows=0 loops=2) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 (never executed) + Index Cond: (a = a.a) +(28 rows) + +delete from lprt_a where a = 1; +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 0)'); + explain_parallel_append +-------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=1 loops=1) + -> Gather (actual rows=2 loops=1) + Workers Planned: 1 + Workers Launched: 1 + -> Partial Aggregate (actual rows=1 loops=2) + -> Nested Loop (actual rows=0 loops=2) + -> Parallel Seq Scan on lprt_a a (actual rows=50 loops=N) + Filter: (a = ANY ('{1,0,0}'::integer[])) + Rows Removed by Filter: 1 + -> Append (actual rows=0 loops=100) + -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 (never executed) + Index Cond: (a = a.a) +(28 rows) + +reset enable_hashjoin; +reset enable_mergejoin; +reset parallel_setup_cost; +reset parallel_tuple_cost; +reset min_parallel_table_scan_size; +reset max_parallel_workers_per_gather; +-- Test run-time partition pruning with an initplan +explain (analyze, costs off, summary off, timing off) +select * from ab where a = (select max(a) from lprt_a) and b = (select max(a)-1 from lprt_a); + QUERY PLAN +------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Aggregate (actual rows=1 loops=1) + -> Seq Scan on lprt_a (actual rows=102 loops=1) + InitPlan 2 (returns $1) + -> Aggregate (actual rows=1 loops=1) + -> Seq Scan on lprt_a lprt_a_1 (actual rows=102 loops=1) + -> Bitmap Heap Scan on ab_a1_b1 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a1_b1_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a1_b2 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a1_b3 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a2_b1 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a2_b1_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a2_b2 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a2_b2_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a2_b3 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a2_b3_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a3_b1 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a3_b1_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a3_b2 (actual rows=0 loops=1) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a3_b2_a_idx (actual rows=0 loops=1) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a3_b3 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a3_b3_a_idx (never executed) + Index Cond: (a = $0) +(52 rows) + +-- Test run-time partition pruning with UNION ALL parents +explain (analyze, costs off, summary off, timing off) +select * from (select * from ab where a = 1 union all select * from ab) ab where b = (select 1); + QUERY PLAN +------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Append (actual rows=0 loops=1) + -> Bitmap Heap Scan on ab_a1_b1 ab_a1_b1_1 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + Filter: (b = $0) + -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b2 ab_a1_b2_1 (never executed) + Recheck Cond: (a = 1) + Filter: (b = $0) + -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b3 ab_a1_b3_1 (never executed) + Recheck Cond: (a = 1) + Filter: (b = $0) + -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) + Index Cond: (a = 1) + -> Seq Scan on ab_a1_b1 (actual rows=0 loops=1) + Filter: (b = $0) + -> Seq Scan on ab_a1_b2 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a1_b3 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a2_b1 (actual rows=0 loops=1) + Filter: (b = $0) + -> Seq Scan on ab_a2_b2 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a2_b3 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a3_b1 (actual rows=0 loops=1) + Filter: (b = $0) + -> Seq Scan on ab_a3_b2 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a3_b3 (never executed) + Filter: (b = $0) +(37 rows) + +-- A case containing a UNION ALL with a non-partitioned child. +explain (analyze, costs off, summary off, timing off) +select * from (select * from ab where a = 1 union all (values(10,5)) union all select * from ab) ab where b = (select 1); + QUERY PLAN +------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Append (actual rows=0 loops=1) + -> Bitmap Heap Scan on ab_a1_b1 ab_a1_b1_1 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + Filter: (b = $0) + -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b2 ab_a1_b2_1 (never executed) + Recheck Cond: (a = 1) + Filter: (b = $0) + -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b3 ab_a1_b3_1 (never executed) + Recheck Cond: (a = 1) + Filter: (b = $0) + -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) + Index Cond: (a = 1) + -> Result (actual rows=0 loops=1) + One-Time Filter: (5 = $0) + -> Seq Scan on ab_a1_b1 (actual rows=0 loops=1) + Filter: (b = $0) + -> Seq Scan on ab_a1_b2 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a1_b3 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a2_b1 (actual rows=0 loops=1) + Filter: (b = $0) + -> Seq Scan on ab_a2_b2 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a2_b3 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a3_b1 (actual rows=0 loops=1) + Filter: (b = $0) + -> Seq Scan on ab_a3_b2 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a3_b3 (never executed) + Filter: (b = $0) +(39 rows) + +deallocate ab_q1; +deallocate ab_q2; +deallocate ab_q3; +deallocate ab_q4; +deallocate ab_q5; +-- UPDATE on a partition subtree has been seen to have problems. +insert into ab values (1,2); +explain (analyze, costs off, summary off, timing off) +update ab_a1 set b = 3 from ab where ab.a = 1 and ab.a = ab_a1.a; + QUERY PLAN +------------------------------------------------------------------------------------- + Update on ab_a1 (actual rows=0 loops=1) + Update on ab_a1_b1 + Update on ab_a1_b2 + Update on ab_a1_b3 + -> Nested Loop (actual rows=0 loops=1) + -> Append (actual rows=1 loops=1) + -> Bitmap Heap Scan on ab_a1_b1 ab_a1_b1_1 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b2 ab_a1_b2_1 (actual rows=1 loops=1) + Recheck Cond: (a = 1) + Heap Blocks: exact=1 + -> Bitmap Index Scan on ab_a1_b2_a_idx (actual rows=1 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b3 ab_a1_b3_1 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + -> Bitmap Index Scan on ab_a1_b3_a_idx (actual rows=0 loops=1) + Index Cond: (a = 1) + -> Materialize (actual rows=0 loops=1) + -> Bitmap Heap Scan on ab_a1_b1 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) + Index Cond: (a = 1) + -> Nested Loop (actual rows=1 loops=1) + -> Append (actual rows=1 loops=1) + -> Bitmap Heap Scan on ab_a1_b1 ab_a1_b1_1 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b2 ab_a1_b2_1 (actual rows=1 loops=1) + Recheck Cond: (a = 1) + Heap Blocks: exact=1 + -> Bitmap Index Scan on ab_a1_b2_a_idx (actual rows=1 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b3 ab_a1_b3_1 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + -> Bitmap Index Scan on ab_a1_b3_a_idx (actual rows=1 loops=1) + Index Cond: (a = 1) + -> Materialize (actual rows=1 loops=1) + -> Bitmap Heap Scan on ab_a1_b2 (actual rows=1 loops=1) + Recheck Cond: (a = 1) + Heap Blocks: exact=1 + -> Bitmap Index Scan on ab_a1_b2_a_idx (actual rows=1 loops=1) + Index Cond: (a = 1) + -> Nested Loop (actual rows=0 loops=1) + -> Append (actual rows=1 loops=1) + -> Bitmap Heap Scan on ab_a1_b1 ab_a1_b1_1 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b2 ab_a1_b2_1 (actual rows=1 loops=1) + Recheck Cond: (a = 1) + Heap Blocks: exact=1 + -> Bitmap Index Scan on ab_a1_b2_a_idx (actual rows=1 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b3 ab_a1_b3_1 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + -> Bitmap Index Scan on ab_a1_b3_a_idx (actual rows=1 loops=1) + Index Cond: (a = 1) + -> Materialize (actual rows=0 loops=1) + -> Bitmap Heap Scan on ab_a1_b3 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + -> Bitmap Index Scan on ab_a1_b3_a_idx (actual rows=1 loops=1) + Index Cond: (a = 1) +(65 rows) + +table ab; + a | b +---+--- + 1 | 3 +(1 row) + +drop table ab, lprt_a; +-- Join +create table tbl1(col1 int); +insert into tbl1 values (501), (505); +-- Basic table +create table tprt (col1 int) partition by range (col1); +create table tprt_1 partition of tprt for values from (1) to (501); +create table tprt_2 partition of tprt for values from (501) to (1001); +create table tprt_3 partition of tprt for values from (1001) to (2001); +create table tprt_4 partition of tprt for values from (2001) to (3001); +create table tprt_5 partition of tprt for values from (3001) to (4001); +create table tprt_6 partition of tprt for values from (4001) to (5001); +create index tprt1_idx on tprt_1 (col1); +create index tprt2_idx on tprt_2 (col1); +create index tprt3_idx on tprt_3 (col1); +create index tprt4_idx on tprt_4 (col1); +create index tprt5_idx on tprt_5 (col1); +create index tprt6_idx on tprt_6 (col1); +insert into tprt values (10), (20), (501), (502), (505), (1001), (4500); +set enable_hashjoin = off; +set enable_mergejoin = off; +explain (analyze, costs off, summary off, timing off) +select * from tbl1 join tprt on tbl1.col1 > tprt.col1; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop (actual rows=6 loops=1) + -> Seq Scan on tbl1 (actual rows=2 loops=1) + -> Append (actual rows=3 loops=2) + -> Index Scan using tprt1_idx on tprt_1 (actual rows=2 loops=2) + Index Cond: (tbl1.col1 > col1) + -> Index Scan using tprt2_idx on tprt_2 (actual rows=2 loops=1) + Index Cond: (tbl1.col1 > col1) + -> Index Scan using tprt3_idx on tprt_3 (never executed) + Index Cond: (tbl1.col1 > col1) + -> Index Scan using tprt4_idx on tprt_4 (never executed) + Index Cond: (tbl1.col1 > col1) + -> Index Scan using tprt5_idx on tprt_5 (never executed) + Index Cond: (tbl1.col1 > col1) + -> Index Scan using tprt6_idx on tprt_6 (never executed) + Index Cond: (tbl1.col1 > col1) +(15 rows) + +explain (analyze, costs off, summary off, timing off) +select * from tbl1 join tprt on tbl1.col1 = tprt.col1; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop (actual rows=2 loops=1) + -> Seq Scan on tbl1 (actual rows=2 loops=1) + -> Append (actual rows=1 loops=2) + -> Index Scan using tprt1_idx on tprt_1 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt2_idx on tprt_2 (actual rows=1 loops=2) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt3_idx on tprt_3 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt4_idx on tprt_4 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt5_idx on tprt_5 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt6_idx on tprt_6 (never executed) + Index Cond: (col1 = tbl1.col1) +(15 rows) + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 > tprt.col1 +order by tbl1.col1, tprt.col1; + col1 | col1 +------+------ + 501 | 10 + 501 | 20 + 505 | 10 + 505 | 20 + 505 | 501 + 505 | 502 +(6 rows) + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 = tprt.col1 +order by tbl1.col1, tprt.col1; + col1 | col1 +------+------ + 501 | 501 + 505 | 505 +(2 rows) + +-- Multiple partitions +insert into tbl1 values (1001), (1010), (1011); +explain (analyze, costs off, summary off, timing off) +select * from tbl1 inner join tprt on tbl1.col1 > tprt.col1; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop (actual rows=23 loops=1) + -> Seq Scan on tbl1 (actual rows=5 loops=1) + -> Append (actual rows=5 loops=5) + -> Index Scan using tprt1_idx on tprt_1 (actual rows=2 loops=5) + Index Cond: (tbl1.col1 > col1) + -> Index Scan using tprt2_idx on tprt_2 (actual rows=3 loops=4) + Index Cond: (tbl1.col1 > col1) + -> Index Scan using tprt3_idx on tprt_3 (actual rows=1 loops=2) + Index Cond: (tbl1.col1 > col1) + -> Index Scan using tprt4_idx on tprt_4 (never executed) + Index Cond: (tbl1.col1 > col1) + -> Index Scan using tprt5_idx on tprt_5 (never executed) + Index Cond: (tbl1.col1 > col1) + -> Index Scan using tprt6_idx on tprt_6 (never executed) + Index Cond: (tbl1.col1 > col1) +(15 rows) + +explain (analyze, costs off, summary off, timing off) +select * from tbl1 inner join tprt on tbl1.col1 = tprt.col1; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop (actual rows=3 loops=1) + -> Seq Scan on tbl1 (actual rows=5 loops=1) + -> Append (actual rows=1 loops=5) + -> Index Scan using tprt1_idx on tprt_1 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt2_idx on tprt_2 (actual rows=1 loops=2) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt3_idx on tprt_3 (actual rows=0 loops=3) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt4_idx on tprt_4 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt5_idx on tprt_5 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt6_idx on tprt_6 (never executed) + Index Cond: (col1 = tbl1.col1) +(15 rows) + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 > tprt.col1 +order by tbl1.col1, tprt.col1; + col1 | col1 +------+------ + 501 | 10 + 501 | 20 + 505 | 10 + 505 | 20 + 505 | 501 + 505 | 502 + 1001 | 10 + 1001 | 20 + 1001 | 501 + 1001 | 502 + 1001 | 505 + 1010 | 10 + 1010 | 20 + 1010 | 501 + 1010 | 502 + 1010 | 505 + 1010 | 1001 + 1011 | 10 + 1011 | 20 + 1011 | 501 + 1011 | 502 + 1011 | 505 + 1011 | 1001 +(23 rows) + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 = tprt.col1 +order by tbl1.col1, tprt.col1; + col1 | col1 +------+------ + 501 | 501 + 505 | 505 + 1001 | 1001 +(3 rows) + +-- Last partition +delete from tbl1; +insert into tbl1 values (4400); +explain (analyze, costs off, summary off, timing off) +select * from tbl1 join tprt on tbl1.col1 < tprt.col1; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop (actual rows=1 loops=1) + -> Seq Scan on tbl1 (actual rows=1 loops=1) + -> Append (actual rows=1 loops=1) + -> Index Scan using tprt1_idx on tprt_1 (never executed) + Index Cond: (tbl1.col1 < col1) + -> Index Scan using tprt2_idx on tprt_2 (never executed) + Index Cond: (tbl1.col1 < col1) + -> Index Scan using tprt3_idx on tprt_3 (never executed) + Index Cond: (tbl1.col1 < col1) + -> Index Scan using tprt4_idx on tprt_4 (never executed) + Index Cond: (tbl1.col1 < col1) + -> Index Scan using tprt5_idx on tprt_5 (never executed) + Index Cond: (tbl1.col1 < col1) + -> Index Scan using tprt6_idx on tprt_6 (actual rows=1 loops=1) + Index Cond: (tbl1.col1 < col1) +(15 rows) + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 < tprt.col1 +order by tbl1.col1, tprt.col1; + col1 | col1 +------+------ + 4400 | 4500 +(1 row) + +-- No matching partition +delete from tbl1; +insert into tbl1 values (10000); +explain (analyze, costs off, summary off, timing off) +select * from tbl1 join tprt on tbl1.col1 = tprt.col1; + QUERY PLAN +------------------------------------------------------------------- + Nested Loop (actual rows=0 loops=1) + -> Seq Scan on tbl1 (actual rows=1 loops=1) + -> Append (actual rows=0 loops=1) + -> Index Scan using tprt1_idx on tprt_1 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt2_idx on tprt_2 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt3_idx on tprt_3 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt4_idx on tprt_4 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt5_idx on tprt_5 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt6_idx on tprt_6 (never executed) + Index Cond: (col1 = tbl1.col1) +(15 rows) + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 = tprt.col1 +order by tbl1.col1, tprt.col1; + col1 | col1 +------+------ +(0 rows) + +drop table tbl1, tprt; +-- Test with columns defined in varying orders between each level +create table part_abc (a int not null, b int not null, c int not null) partition by list (a); +create table part_bac (b int not null, a int not null, c int not null) partition by list (b); +create table part_cab (c int not null, a int not null, b int not null) partition by list (c); +create table part_abc_p1 (a int not null, b int not null, c int not null); +alter table part_abc attach partition part_bac for values in(1); +alter table part_bac attach partition part_cab for values in(2); +alter table part_cab attach partition part_abc_p1 for values in(3); +prepare part_abc_q1 (int, int, int) as +select * from part_abc where a = $1 and b = $2 and c = $3; +-- Execute query 5 times to allow choose_custom_plan +-- to start considering a generic plan. +execute part_abc_q1 (1, 2, 3); + a | b | c +---+---+--- +(0 rows) + +execute part_abc_q1 (1, 2, 3); + a | b | c +---+---+--- +(0 rows) + +execute part_abc_q1 (1, 2, 3); + a | b | c +---+---+--- +(0 rows) + +execute part_abc_q1 (1, 2, 3); + a | b | c +---+---+--- +(0 rows) + +execute part_abc_q1 (1, 2, 3); + a | b | c +---+---+--- +(0 rows) + +-- Single partition should be scanned. +explain (analyze, costs off, summary off, timing off) execute part_abc_q1 (1, 2, 3); + QUERY PLAN +------------------------------------------------------- + Append (actual rows=0 loops=1) + -> Seq Scan on part_abc_p1 (actual rows=0 loops=1) + Filter: ((a = $1) AND (b = $2) AND (c = $3)) +(3 rows) + +deallocate part_abc_q1; +drop table part_abc; +-- Ensure that an Append node properly handles a sub-partitioned table +-- matching without any of its leaf partitions matching the clause. +create table listp (a int, b int) partition by list (a); +create table listp_1 partition of listp for values in(1) partition by list (b); +create table listp_1_1 partition of listp_1 for values in(1); +create table listp_2 partition of listp for values in(2) partition by list (b); +create table listp_2_1 partition of listp_2 for values in(2); +select * from listp where b = 1; + a | b +---+--- +(0 rows) + +-- Ensure that an Append node properly can handle selection of all first level +-- partitions before finally detecting the correct set of 2nd level partitions +-- which match the given parameter. +prepare q1 (int,int) as select * from listp where b in ($1,$2); +execute q1 (1,2); + a | b +---+--- +(0 rows) + +execute q1 (1,2); + a | b +---+--- +(0 rows) + +execute q1 (1,2); + a | b +---+--- +(0 rows) + +execute q1 (1,2); + a | b +---+--- +(0 rows) + +execute q1 (1,2); + a | b +---+--- +(0 rows) + +explain (analyze, costs off, summary off, timing off) execute q1 (1,1); + QUERY PLAN +----------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 1 + -> Seq Scan on listp_1_1 (actual rows=0 loops=1) + Filter: (b = ANY (ARRAY[$1, $2])) +(4 rows) + +explain (analyze, costs off, summary off, timing off) execute q1 (2,2); + QUERY PLAN +----------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 1 + -> Seq Scan on listp_2_1 (actual rows=0 loops=1) + Filter: (b = ANY (ARRAY[$1, $2])) +(4 rows) + +-- Try with no matching partitions. One subplan should remain in this case, +-- but it shouldn't be executed. +explain (analyze, costs off, summary off, timing off) execute q1 (0,0); + QUERY PLAN +---------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 1 + -> Seq Scan on listp_1_1 (never executed) + Filter: (b = ANY (ARRAY[$1, $2])) +(4 rows) + +deallocate q1; +-- Test more complex cases where a not-equal condition further eliminates partitions. +prepare q1 (int,int,int,int) as select * from listp where b in($1,$2) and $3 <> b and $4 <> b; +execute q1 (1,2,3,4); + a | b +---+--- +(0 rows) + +execute q1 (1,2,3,4); + a | b +---+--- +(0 rows) + +execute q1 (1,2,3,4); + a | b +---+--- +(0 rows) + +execute q1 (1,2,3,4); + a | b +---+--- +(0 rows) + +execute q1 (1,2,3,4); + a | b +---+--- +(0 rows) + +-- Both partitions allowed by IN clause, but one disallowed by <> clause +explain (analyze, costs off, summary off, timing off) execute q1 (1,2,2,0); + QUERY PLAN +------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 1 + -> Seq Scan on listp_1_1 (actual rows=0 loops=1) + Filter: ((b = ANY (ARRAY[$1, $2])) AND ($3 <> b) AND ($4 <> b)) +(4 rows) + +-- Both partitions allowed by IN clause, then both excluded again by <> clauses. +-- One subplan will remain in this case, but it should not be executed. +explain (analyze, costs off, summary off, timing off) execute q1 (1,2,2,1); + QUERY PLAN +------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 1 + -> Seq Scan on listp_1_1 (never executed) + Filter: ((b = ANY (ARRAY[$1, $2])) AND ($3 <> b) AND ($4 <> b)) +(4 rows) + +-- Ensure Params that evaluate to NULL properly prune away all partitions +explain (analyze, costs off, summary off, timing off) +select * from listp where a = (select null::int); + QUERY PLAN +---------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Seq Scan on listp_1_1 (never executed) + Filter: (a = $0) + -> Seq Scan on listp_2_1 (never executed) + Filter: (a = $0) +(7 rows) + +drop table listp; +-- Ensure runtime pruning works with initplans params with boolean types +create table boolvalues (value bool not null); +insert into boolvalues values('t'),('f'); +create table boolp (a bool) partition by list (a); +create table boolp_t partition of boolp for values in('t'); +create table boolp_f partition of boolp for values in('f'); +explain (analyze, costs off, summary off, timing off) +select * from boolp where a = (select value from boolvalues where value); + QUERY PLAN +-------------------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Seq Scan on boolvalues (actual rows=1 loops=1) + Filter: value + Rows Removed by Filter: 1 + -> Seq Scan on boolp_f (never executed) + Filter: (a = $0) + -> Seq Scan on boolp_t (actual rows=0 loops=1) + Filter: (a = $0) +(9 rows) + +explain (analyze, costs off, summary off, timing off) +select * from boolp where a = (select value from boolvalues where not value); + QUERY PLAN +-------------------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Seq Scan on boolvalues (actual rows=1 loops=1) + Filter: (NOT value) + Rows Removed by Filter: 1 + -> Seq Scan on boolp_f (actual rows=0 loops=1) + Filter: (a = $0) + -> Seq Scan on boolp_t (never executed) + Filter: (a = $0) +(9 rows) + +drop table boolp; +-- +-- Test run-time pruning of MergeAppend subnodes +-- +set enable_seqscan = off; +set enable_sort = off; +create table ma_test (a int) partition by range (a); +create table ma_test_p1 partition of ma_test for values from (0) to (10); +create table ma_test_p2 partition of ma_test for values from (10) to (20); +create table ma_test_p3 partition of ma_test for values from (20) to (30); +insert into ma_test select x from generate_series(0,29) t(x); +create index on ma_test (a); +analyze ma_test; +prepare mt_q1 (int) as select * from ma_test where a >= $1 and a % 10 = 5 order by a; +-- Execute query 5 times to allow choose_custom_plan +-- to start considering a generic plan. +execute mt_q1(0); + a +---- + 5 + 15 + 25 +(3 rows) + +execute mt_q1(0); + a +---- + 5 + 15 + 25 +(3 rows) + +execute mt_q1(0); + a +---- + 5 + 15 + 25 +(3 rows) + +execute mt_q1(0); + a +---- + 5 + 15 + 25 +(3 rows) + +execute mt_q1(0); + a +---- + 5 + 15 + 25 +(3 rows) + +explain (analyze, costs off, summary off, timing off) execute mt_q1(15); + QUERY PLAN +------------------------------------------------------------------------------- + Merge Append (actual rows=2 loops=1) + Sort Key: ma_test_p2.a + Subplans Removed: 1 + -> Index Scan using ma_test_p2_a_idx on ma_test_p2 (actual rows=1 loops=1) + Index Cond: (a >= $1) + Filter: ((a % 10) = 5) + Rows Removed by Filter: 4 + -> Index Scan using ma_test_p3_a_idx on ma_test_p3 (actual rows=1 loops=1) + Index Cond: (a >= $1) + Filter: ((a % 10) = 5) + Rows Removed by Filter: 9 +(11 rows) + +execute mt_q1(15); + a +---- + 15 + 25 +(2 rows) + +explain (analyze, costs off, summary off, timing off) execute mt_q1(25); + QUERY PLAN +------------------------------------------------------------------------------- + Merge Append (actual rows=1 loops=1) + Sort Key: ma_test_p3.a + Subplans Removed: 2 + -> Index Scan using ma_test_p3_a_idx on ma_test_p3 (actual rows=1 loops=1) + Index Cond: (a >= $1) + Filter: ((a % 10) = 5) + Rows Removed by Filter: 4 +(7 rows) + +execute mt_q1(25); + a +---- + 25 +(1 row) + +-- Ensure MergeAppend behaves correctly when no subplans match +explain (analyze, costs off, summary off, timing off) execute mt_q1(35); + QUERY PLAN +------------------------------------------------------------------------ + Merge Append (actual rows=0 loops=1) + Sort Key: ma_test_p1.a + Subplans Removed: 2 + -> Index Scan using ma_test_p1_a_idx on ma_test_p1 (never executed) + Index Cond: (a >= $1) + Filter: ((a % 10) = 5) +(6 rows) + +execute mt_q1(35); + a +--- +(0 rows) + +deallocate mt_q1; +-- ensure initplan params properly prune partitions +explain (analyze, costs off, summary off, timing off) select * from ma_test where a >= (select min(a) from ma_test_p2) order by a; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Merge Append (actual rows=20 loops=1) + Sort Key: ma_test_p1.a + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Index Scan using ma_test_p2_a_idx on ma_test_p2 ma_test_p2_1 (actual rows=1 loops=1) + Index Cond: (a IS NOT NULL) + -> Index Scan using ma_test_p1_a_idx on ma_test_p1 (never executed) + Index Cond: (a >= $1) + -> Index Scan using ma_test_p2_a_idx on ma_test_p2 (actual rows=10 loops=1) + Index Cond: (a >= $1) + -> Index Scan using ma_test_p3_a_idx on ma_test_p3 (actual rows=10 loops=1) + Index Cond: (a >= $1) +(14 rows) + +reset enable_seqscan; +reset enable_sort; +drop table ma_test; +reset enable_indexonlyscan; +-- +-- check that pruning works properly when the partition key is of a +-- pseudotype +-- +-- array type list partition key +create table pp_arrpart (a int[]) partition by list (a); +create table pp_arrpart1 partition of pp_arrpart for values in ('{1}'); +create table pp_arrpart2 partition of pp_arrpart for values in ('{2, 3}', '{4, 5}'); +explain (costs off) select * from pp_arrpart where a = '{1}'; + QUERY PLAN +---------------------------------------- + Append + -> Seq Scan on pp_arrpart1 + Filter: (a = '{1}'::integer[]) +(3 rows) + +explain (costs off) select * from pp_arrpart where a = '{1, 2}'; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) select * from pp_arrpart where a in ('{4, 5}', '{1}'); + QUERY PLAN +---------------------------------------------------------------------- + Append + -> Seq Scan on pp_arrpart1 + Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) + -> Seq Scan on pp_arrpart2 + Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) +(5 rows) + +explain (costs off) update pp_arrpart set a = a where a = '{1}'; + QUERY PLAN +---------------------------------------- + Update on pp_arrpart + Update on pp_arrpart1 + -> Seq Scan on pp_arrpart1 + Filter: (a = '{1}'::integer[]) +(4 rows) + +explain (costs off) delete from pp_arrpart where a = '{1}'; + QUERY PLAN +---------------------------------------- + Delete on pp_arrpart + Delete on pp_arrpart1 + -> Seq Scan on pp_arrpart1 + Filter: (a = '{1}'::integer[]) +(4 rows) + +drop table pp_arrpart; +-- array type hash partition key +create table pph_arrpart (a int[]) partition by hash (a); +create table pph_arrpart1 partition of pph_arrpart for values with (modulus 2, remainder 0); +create table pph_arrpart2 partition of pph_arrpart for values with (modulus 2, remainder 1); +insert into pph_arrpart values ('{1}'), ('{1, 2}'), ('{4, 5}'); +select tableoid::regclass, * from pph_arrpart order by 1; + tableoid | a +--------------+------- + pph_arrpart1 | {1,2} + pph_arrpart1 | {4,5} + pph_arrpart2 | {1} +(3 rows) + +explain (costs off) select * from pph_arrpart where a = '{1}'; + QUERY PLAN +---------------------------------------- + Append + -> Seq Scan on pph_arrpart2 + Filter: (a = '{1}'::integer[]) +(3 rows) + +explain (costs off) select * from pph_arrpart where a = '{1, 2}'; + QUERY PLAN +------------------------------------------ + Append + -> Seq Scan on pph_arrpart1 + Filter: (a = '{1,2}'::integer[]) +(3 rows) + +explain (costs off) select * from pph_arrpart where a in ('{4, 5}', '{1}'); + QUERY PLAN +---------------------------------------------------------------------- + Append + -> Seq Scan on pph_arrpart1 + Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) + -> Seq Scan on pph_arrpart2 + Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) +(5 rows) + +drop table pph_arrpart; +-- enum type list partition key +create type pp_colors as enum ('green', 'blue', 'black'); +create table pp_enumpart (a pp_colors) partition by list (a); +create table pp_enumpart_green partition of pp_enumpart for values in ('green'); +create table pp_enumpart_blue partition of pp_enumpart for values in ('blue'); +explain (costs off) select * from pp_enumpart where a = 'blue'; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on pp_enumpart_blue + Filter: (a = 'blue'::pp_colors) +(3 rows) + +explain (costs off) select * from pp_enumpart where a = 'black'; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +drop table pp_enumpart; +drop type pp_colors; +-- record type as partition key +create type pp_rectype as (a int, b int); +create table pp_recpart (a pp_rectype) partition by list (a); +create table pp_recpart_11 partition of pp_recpart for values in ('(1,1)'); +create table pp_recpart_23 partition of pp_recpart for values in ('(2,3)'); +explain (costs off) select * from pp_recpart where a = '(1,1)'::pp_rectype; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on pp_recpart_11 + Filter: (a = '(1,1)'::pp_rectype) +(3 rows) + +explain (costs off) select * from pp_recpart where a = '(1,2)'::pp_rectype; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +drop table pp_recpart; +drop type pp_rectype; +-- range type partition key +create table pp_intrangepart (a int4range) partition by list (a); +create table pp_intrangepart12 partition of pp_intrangepart for values in ('[1,2]'); +create table pp_intrangepart2inf partition of pp_intrangepart for values in ('[2,)'); +explain (costs off) select * from pp_intrangepart where a = '[1,2]'::int4range; + QUERY PLAN +------------------------------------------ + Append + -> Seq Scan on pp_intrangepart12 + Filter: (a = '[1,3)'::int4range) +(3 rows) + +explain (costs off) select * from pp_intrangepart where a = '(1,2)'::int4range; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +drop table pp_intrangepart; +-- +-- Ensure the enable_partition_prune GUC properly disables partition pruning. +-- +create table pp_lp (a int, value int) partition by list (a); +create table pp_lp1 partition of pp_lp for values in(1); +create table pp_lp2 partition of pp_lp for values in(2); +explain (costs off) select * from pp_lp where a = 1; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on pp_lp1 + Filter: (a = 1) +(3 rows) + +explain (costs off) update pp_lp set value = 10 where a = 1; + QUERY PLAN +-------------------------- + Update on pp_lp + Update on pp_lp1 + -> Seq Scan on pp_lp1 + Filter: (a = 1) +(4 rows) + +explain (costs off) delete from pp_lp where a = 1; + QUERY PLAN +-------------------------- + Delete on pp_lp + Delete on pp_lp1 + -> Seq Scan on pp_lp1 + Filter: (a = 1) +(4 rows) + +set enable_partition_pruning = off; +set constraint_exclusion = 'partition'; -- this should not affect the result. +explain (costs off) select * from pp_lp where a = 1; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on pp_lp1 + Filter: (a = 1) + -> Seq Scan on pp_lp2 + Filter: (a = 1) +(5 rows) + +explain (costs off) update pp_lp set value = 10 where a = 1; + QUERY PLAN +-------------------------- + Update on pp_lp + Update on pp_lp1 + Update on pp_lp2 + -> Seq Scan on pp_lp1 + Filter: (a = 1) + -> Seq Scan on pp_lp2 + Filter: (a = 1) +(7 rows) + +explain (costs off) delete from pp_lp where a = 1; + QUERY PLAN +-------------------------- + Delete on pp_lp + Delete on pp_lp1 + Delete on pp_lp2 + -> Seq Scan on pp_lp1 + Filter: (a = 1) + -> Seq Scan on pp_lp2 + Filter: (a = 1) +(7 rows) + +set constraint_exclusion = 'off'; -- this should not affect the result. +explain (costs off) select * from pp_lp where a = 1; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on pp_lp1 + Filter: (a = 1) + -> Seq Scan on pp_lp2 + Filter: (a = 1) +(5 rows) + +explain (costs off) update pp_lp set value = 10 where a = 1; + QUERY PLAN +-------------------------- + Update on pp_lp + Update on pp_lp1 + Update on pp_lp2 + -> Seq Scan on pp_lp1 + Filter: (a = 1) + -> Seq Scan on pp_lp2 + Filter: (a = 1) +(7 rows) + +explain (costs off) delete from pp_lp where a = 1; + QUERY PLAN +-------------------------- + Delete on pp_lp + Delete on pp_lp1 + Delete on pp_lp2 + -> Seq Scan on pp_lp1 + Filter: (a = 1) + -> Seq Scan on pp_lp2 + Filter: (a = 1) +(7 rows) + +drop table pp_lp; +-- Ensure enable_partition_prune does not affect non-partitioned tables. +create table inh_lp (a int, value int); +create table inh_lp1 (a int, value int, check(a = 1)) inherits (inh_lp); +NOTICE: merging column "a" with inherited definition +NOTICE: merging column "value" with inherited definition +create table inh_lp2 (a int, value int, check(a = 2)) inherits (inh_lp); +NOTICE: merging column "a" with inherited definition +NOTICE: merging column "value" with inherited definition +set constraint_exclusion = 'partition'; +-- inh_lp2 should be removed in the following 3 cases. +explain (costs off) select * from inh_lp where a = 1; + QUERY PLAN +--------------------------- + Append + -> Seq Scan on inh_lp + Filter: (a = 1) + -> Seq Scan on inh_lp1 + Filter: (a = 1) +(5 rows) + +explain (costs off) update inh_lp set value = 10 where a = 1; + QUERY PLAN +--------------------------- + Update on inh_lp + Update on inh_lp + Update on inh_lp1 + -> Seq Scan on inh_lp + Filter: (a = 1) + -> Seq Scan on inh_lp1 + Filter: (a = 1) +(7 rows) + +explain (costs off) delete from inh_lp where a = 1; + QUERY PLAN +--------------------------- + Delete on inh_lp + Delete on inh_lp + Delete on inh_lp1 + -> Seq Scan on inh_lp + Filter: (a = 1) + -> Seq Scan on inh_lp1 + Filter: (a = 1) +(7 rows) + +-- Ensure we don't exclude normal relations when we only expect to exclude +-- inheritance children +explain (costs off) update inh_lp1 set value = 10 where a = 2; + QUERY PLAN +--------------------------- + Update on inh_lp1 + -> Seq Scan on inh_lp1 + Filter: (a = 2) +(3 rows) + +\set VERBOSITY terse \\ -- suppress cascade details +drop table inh_lp cascade; +NOTICE: drop cascades to 2 other objects +\set VERBOSITY default +reset enable_partition_pruning; +reset constraint_exclusion; +-- Check pruning for a partition tree containing only temporary relations +create temp table pp_temp_parent (a int) partition by list (a); +create temp table pp_temp_part_1 partition of pp_temp_parent for values in (1); +create temp table pp_temp_part_def partition of pp_temp_parent default; +explain (costs off) select * from pp_temp_parent where true; + QUERY PLAN +------------------------------------ + Append + -> Seq Scan on pp_temp_part_1 + -> Seq Scan on pp_temp_part_def +(3 rows) + +explain (costs off) select * from pp_temp_parent where a = 2; + QUERY PLAN +------------------------------------ + Append + -> Seq Scan on pp_temp_part_def + Filter: (a = 2) +(3 rows) + +drop table pp_temp_parent; +-- Stress run-time partition pruning a bit more, per bug reports +create temp table p (a int, b int, c int) partition by list (a); +create temp table p1 partition of p for values in (1); +create temp table p2 partition of p for values in (2); +create temp table q (a int, b int, c int) partition by list (a); +create temp table q1 partition of q for values in (1) partition by list (b); +create temp table q11 partition of q1 for values in (1) partition by list (c); +create temp table q111 partition of q11 for values in (1); +create temp table q2 partition of q for values in (2) partition by list (b); +create temp table q21 partition of q2 for values in (1); +create temp table q22 partition of q2 for values in (2); +insert into q22 values (2, 2, 3); +explain (costs off) +select * +from ( + select * from p + union all + select * from q1 + union all + select 1, 1, 1 + ) s(a, b, c) +where s.a = 1 and s.b = 1 and s.c = (select 1); + QUERY PLAN +---------------------------------------------------- + Append + InitPlan 1 (returns $0) + -> Result + -> Seq Scan on p1 + Filter: ((a = 1) AND (b = 1) AND (c = $0)) + -> Seq Scan on q111 + Filter: ((a = 1) AND (b = 1) AND (c = $0)) + -> Result + One-Time Filter: (1 = $0) +(9 rows) + +select * +from ( + select * from p + union all + select * from q1 + union all + select 1, 1, 1 + ) s(a, b, c) +where s.a = 1 and s.b = 1 and s.c = (select 1); + a | b | c +---+---+--- + 1 | 1 | 1 +(1 row) + +prepare q (int, int) as +select * +from ( + select * from p + union all + select * from q1 + union all + select 1, 1, 1 + ) s(a, b, c) +where s.a = $1 and s.b = $2 and s.c = (select 1); +set plan_cache_mode to force_generic_plan; +explain (costs off) execute q (1, 1); + QUERY PLAN +--------------------------------------------------------------- + Append + InitPlan 1 (returns $0) + -> Result + Subplans Removed: 1 + -> Seq Scan on p1 + Filter: ((a = $1) AND (b = $2) AND (c = $0)) + -> Seq Scan on q111 + Filter: ((a = $1) AND (b = $2) AND (c = $0)) + -> Result + One-Time Filter: ((1 = $1) AND (1 = $2) AND (1 = $0)) +(10 rows) + +execute q (1, 1); + a | b | c +---+---+--- + 1 | 1 | 1 +(1 row) + +reset plan_cache_mode; +drop table p, q; +-- Ensure run-time pruning works correctly when we match a partitioned table +-- on the first level but find no matching partitions on the second level. +create table listp (a int, b int) partition by list (a); +create table listp1 partition of listp for values in(1); +create table listp2 partition of listp for values in(2) partition by list(b); +create table listp2_10 partition of listp2 for values in (10); +explain (analyze, costs off, summary off, timing off) +select * from listp where a = (select 2) and b <> 10; + QUERY PLAN +------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Seq Scan on listp1 (never executed) + Filter: ((b <> 10) AND (a = $0)) +(5 rows) + +drop table listp; diff --git a/src/test/regress/expected/path.out b/src/test/regress/expected/path.out index 08d6d61dda..bd6e467752 100644 --- a/src/test/regress/expected/path.out +++ b/src/test/regress/expected/path.out @@ -4,14 +4,19 @@ --DROP TABLE PATH_TBL; CREATE TABLE PATH_TBL (f1 path); INSERT INTO PATH_TBL VALUES ('[(1,2),(3,4)]'); -INSERT INTO PATH_TBL VALUES ('((1,2),(3,4))'); -INSERT INTO PATH_TBL VALUES ('[(0,0),(3,0),(4,5),(1,6)]'); -INSERT INTO PATH_TBL VALUES ('((1,2),(3,4))'); -INSERT INTO PATH_TBL VALUES ('1,2 ,3,4'); -INSERT INTO PATH_TBL VALUES ('[1,2,3, 4]'); -INSERT INTO PATH_TBL VALUES ('[11,12,13,14]'); -INSERT INTO PATH_TBL VALUES ('(11,12,13,14)'); +INSERT INTO PATH_TBL VALUES (' ( ( 1 , 2 ) , ( 3 , 4 ) ) '); +INSERT INTO PATH_TBL VALUES ('[ (0,0),(3,0),(4,5),(1,6) ]'); +INSERT INTO PATH_TBL VALUES ('((1,2) ,(3,4 ))'); +INSERT INTO PATH_TBL VALUES ('1,2 ,3,4 '); +INSERT INTO PATH_TBL VALUES (' [1,2,3, 4] '); +INSERT INTO PATH_TBL VALUES ('((10,20))'); -- Only one point +INSERT INTO PATH_TBL VALUES ('[ 11,12,13,14 ]'); +INSERT INTO PATH_TBL VALUES ('( 11,12,13,14) '); -- bad values for parser testing +INSERT INTO PATH_TBL VALUES ('[]'); +ERROR: invalid input syntax for type path: "[]" +LINE 1: INSERT INTO PATH_TBL VALUES ('[]'); + ^ INSERT INTO PATH_TBL VALUES ('[(,2),(3,4)]'); ERROR: invalid input syntax for type path: "[(,2),(3,4)]" LINE 1: INSERT INTO PATH_TBL VALUES ('[(,2),(3,4)]'); @@ -20,19 +25,14 @@ INSERT INTO PATH_TBL VALUES ('[(1,2),(3,4)'); ERROR: invalid input syntax for type path: "[(1,2),(3,4)" LINE 1: INSERT INTO PATH_TBL VALUES ('[(1,2),(3,4)'); ^ -SELECT f1 FROM PATH_TBL; - f1 ---------------------------- - [(1,2),(3,4)] - ((1,2),(3,4)) - [(0,0),(3,0),(4,5),(1,6)] - ((1,2),(3,4)) - ((1,2),(3,4)) - [(1,2),(3,4)] - [(11,12),(13,14)] - ((11,12),(13,14)) -(8 rows) - +INSERT INTO PATH_TBL VALUES ('(1,2,3,4'); +ERROR: invalid input syntax for type path: "(1,2,3,4" +LINE 1: INSERT INTO PATH_TBL VALUES ('(1,2,3,4'); + ^ +INSERT INTO PATH_TBL VALUES ('(1,2),(3,4)]'); +ERROR: invalid input syntax for type path: "(1,2),(3,4)]" +LINE 1: INSERT INTO PATH_TBL VALUES ('(1,2),(3,4)]'); + ^ SELECT '' AS count, f1 AS open_path FROM PATH_TBL WHERE isopen(f1); count | open_path -------+--------------------------- @@ -48,8 +48,9 @@ SELECT '' AS count, f1 AS closed_path FROM PATH_TBL WHERE isclosed(f1); | ((1,2),(3,4)) | ((1,2),(3,4)) | ((1,2),(3,4)) + | ((10,20)) | ((11,12),(13,14)) -(4 rows) +(5 rows) SELECT '' AS count, pclose(f1) AS closed_path FROM PATH_TBL; count | closed_path @@ -60,9 +61,10 @@ SELECT '' AS count, pclose(f1) AS closed_path FROM PATH_TBL; | ((1,2),(3,4)) | ((1,2),(3,4)) | ((1,2),(3,4)) + | ((10,20)) | ((11,12),(13,14)) | ((11,12),(13,14)) -(8 rows) +(9 rows) SELECT '' AS count, popen(f1) AS open_path FROM PATH_TBL; count | open_path @@ -73,7 +75,8 @@ SELECT '' AS count, popen(f1) AS open_path FROM PATH_TBL; | [(1,2),(3,4)] | [(1,2),(3,4)] | [(1,2),(3,4)] + | [(10,20)] | [(11,12),(13,14)] | [(11,12),(13,14)] -(8 rows) +(9 rows) diff --git a/src/test/regress/expected/plancache.out b/src/test/regress/expected/plancache.out index 3f3db337c5..7d289b8c5e 100644 --- a/src/test/regress/expected/plancache.out +++ b/src/test/regress/expected/plancache.out @@ -252,3 +252,106 @@ NOTICE: 3 (1 row) +-- Check that addition or removal of any partition is correctly dealt with by +-- default partition table when it is being used in prepared statement. +create table pc_list_parted (a int) partition by list(a); +create table pc_list_part_null partition of pc_list_parted for values in (null); +create table pc_list_part_1 partition of pc_list_parted for values in (1); +create table pc_list_part_def partition of pc_list_parted default; +prepare pstmt_def_insert (int) as insert into pc_list_part_def values($1); +-- should fail +execute pstmt_def_insert(null); +ERROR: new row for relation "pc_list_part_def" violates partition constraint +DETAIL: Failing row contains (null). +execute pstmt_def_insert(1); +ERROR: new row for relation "pc_list_part_def" violates partition constraint +DETAIL: Failing row contains (1). +create table pc_list_part_2 partition of pc_list_parted for values in (2); +execute pstmt_def_insert(2); +ERROR: new row for relation "pc_list_part_def" violates partition constraint +DETAIL: Failing row contains (2). +alter table pc_list_parted detach partition pc_list_part_null; +-- should be ok +execute pstmt_def_insert(null); +drop table pc_list_part_1; +-- should be ok +execute pstmt_def_insert(1); +drop table pc_list_parted, pc_list_part_null; +deallocate pstmt_def_insert; +-- Test plan_cache_mode +create table test_mode (a int); +insert into test_mode select 1 from generate_series(1,1000) union all select 2; +create index on test_mode (a); +analyze test_mode; +prepare test_mode_pp (int) as select count(*) from test_mode where a = $1; +-- up to 5 executions, custom plan is used +explain (costs off) execute test_mode_pp(2); + QUERY PLAN +---------------------------------------------------------- + Aggregate + -> Index Only Scan using test_mode_a_idx on test_mode + Index Cond: (a = 2) +(3 rows) + +-- force generic plan +set plan_cache_mode to force_generic_plan; +explain (costs off) execute test_mode_pp(2); + QUERY PLAN +----------------------------- + Aggregate + -> Seq Scan on test_mode + Filter: (a = $1) +(3 rows) + +-- get to generic plan by 5 executions +set plan_cache_mode to auto; +execute test_mode_pp(1); -- 1x + count +------- + 1000 +(1 row) + +execute test_mode_pp(1); -- 2x + count +------- + 1000 +(1 row) + +execute test_mode_pp(1); -- 3x + count +------- + 1000 +(1 row) + +execute test_mode_pp(1); -- 4x + count +------- + 1000 +(1 row) + +execute test_mode_pp(1); -- 5x + count +------- + 1000 +(1 row) + +-- we should now get a really bad plan +explain (costs off) execute test_mode_pp(2); + QUERY PLAN +----------------------------- + Aggregate + -> Seq Scan on test_mode + Filter: (a = $1) +(3 rows) + +-- but we can force a custom plan +set plan_cache_mode to force_custom_plan; +explain (costs off) execute test_mode_pp(2); + QUERY PLAN +---------------------------------------------------------- + Aggregate + -> Index Only Scan using test_mode_a_idx on test_mode + Index Cond: (a = 2) +(3 rows) + +drop table test_mode; diff --git a/src/test/regress/expected/plpgsql.out b/src/test/regress/expected/plpgsql.out index 71099969a4..f78db4aae5 100644 --- a/src/test/regress/expected/plpgsql.out +++ b/src/test/regress/expected/plpgsql.out @@ -1870,7 +1870,7 @@ create table perform_test ( a INT, b INT ); -create function simple_func(int) returns boolean as ' +create function perform_simple_func(int) returns boolean as ' BEGIN IF $1 < 20 THEN INSERT INTO perform_test VALUES ($1, $1 + 10); @@ -1885,13 +1885,13 @@ BEGIN INSERT INTO perform_test VALUES (100, 100); END IF; - PERFORM simple_func(5); + PERFORM perform_simple_func(5); IF FOUND then INSERT INTO perform_test VALUES (100, 100); END IF; - PERFORM simple_func(50); + PERFORM perform_simple_func(50); IF FOUND then INSERT INTO perform_test VALUES (100, 100); @@ -2242,6 +2242,30 @@ drop function sp_id_user(text); -- create table rc_test (a int, b int); copy rc_test from stdin; +create function return_unnamed_refcursor() returns refcursor as $$ +declare + rc refcursor; +begin + open rc for select a from rc_test; + return rc; +end +$$ language plpgsql; +create function use_refcursor(rc refcursor) returns int as $$ +declare + rc refcursor; + x record; +begin + rc := return_unnamed_refcursor(); + fetch next from rc into x; + return x.a; +end +$$ language plpgsql; +select use_refcursor(return_unnamed_refcursor()); + use_refcursor +--------------- + 5 +(1 row) + create function return_refcursor(rc refcursor) returns refcursor as $$ begin open rc for select a from rc_test; @@ -2706,339 +2730,6 @@ NOTICE: {10,20,30}; 20; xyz; xyzabc; (10,aaa,,30); (1 row) drop function raise_exprs(); --- continue statement -create table conttesttbl(idx serial, v integer); -insert into conttesttbl(v) values(10); -insert into conttesttbl(v) values(20); -insert into conttesttbl(v) values(30); -insert into conttesttbl(v) values(40); -create function continue_test1() returns void as $$ -declare _i integer = 0; _r record; -begin - raise notice '---1---'; - loop - _i := _i + 1; - raise notice '%', _i; - continue when _i < 10; - exit; - end loop; - - raise notice '---2---'; - <> - loop - _i := _i - 1; - loop - raise notice '%', _i; - continue lbl when _i > 0; - exit lbl; - end loop; - end loop; - - raise notice '---3---'; - <> - while _i < 10 loop - _i := _i + 1; - continue the_loop when _i % 2 = 0; - raise notice '%', _i; - end loop; - - raise notice '---4---'; - for _i in 1..10 loop - begin - -- applies to outer loop, not the nested begin block - continue when _i < 5; - raise notice '%', _i; - end; - end loop; - - raise notice '---5---'; - for _r in select * from conttesttbl loop - continue when _r.v <= 20; - raise notice '%', _r.v; - end loop; - - raise notice '---6---'; - for _r in execute 'select * from conttesttbl' loop - continue when _r.v <= 20; - raise notice '%', _r.v; - end loop; - - raise notice '---7---'; - for _i in 1..3 loop - raise notice '%', _i; - continue when _i = 3; - end loop; - - raise notice '---8---'; - _i := 1; - while _i <= 3 loop - raise notice '%', _i; - _i := _i + 1; - continue when _i = 3; - end loop; - - raise notice '---9---'; - for _r in select * from conttesttbl order by v limit 1 loop - raise notice '%', _r.v; - continue; - end loop; - - raise notice '---10---'; - for _r in execute 'select * from conttesttbl order by v limit 1' loop - raise notice '%', _r.v; - continue; - end loop; -end; $$ language plpgsql; -select continue_test1(); -NOTICE: ---1--- -NOTICE: 1 -NOTICE: 2 -NOTICE: 3 -NOTICE: 4 -NOTICE: 5 -NOTICE: 6 -NOTICE: 7 -NOTICE: 8 -NOTICE: 9 -NOTICE: 10 -NOTICE: ---2--- -NOTICE: 9 -NOTICE: 8 -NOTICE: 7 -NOTICE: 6 -NOTICE: 5 -NOTICE: 4 -NOTICE: 3 -NOTICE: 2 -NOTICE: 1 -NOTICE: 0 -NOTICE: ---3--- -NOTICE: 1 -NOTICE: 3 -NOTICE: 5 -NOTICE: 7 -NOTICE: 9 -NOTICE: ---4--- -NOTICE: 5 -NOTICE: 6 -NOTICE: 7 -NOTICE: 8 -NOTICE: 9 -NOTICE: 10 -NOTICE: ---5--- -NOTICE: 30 -NOTICE: 40 -NOTICE: ---6--- -NOTICE: 30 -NOTICE: 40 -NOTICE: ---7--- -NOTICE: 1 -NOTICE: 2 -NOTICE: 3 -NOTICE: ---8--- -NOTICE: 1 -NOTICE: 2 -NOTICE: 3 -NOTICE: ---9--- -NOTICE: 10 -NOTICE: ---10--- -NOTICE: 10 - continue_test1 ----------------- - -(1 row) - -drop function continue_test1(); -drop table conttesttbl; --- should fail: CONTINUE is only legal inside a loop -create function continue_error1() returns void as $$ -begin - begin - continue; - end; -end; -$$ language plpgsql; -ERROR: CONTINUE cannot be used outside a loop -LINE 4: continue; - ^ --- should fail: unlabeled EXIT is only legal inside a loop -create function exit_error1() returns void as $$ -begin - begin - exit; - end; -end; -$$ language plpgsql; -ERROR: EXIT cannot be used outside a loop, unless it has a label -LINE 4: exit; - ^ --- should fail: no such label -create function continue_error2() returns void as $$ -begin - begin - loop - continue no_such_label; - end loop; - end; -end; -$$ language plpgsql; -ERROR: there is no label "no_such_label" attached to any block or loop enclosing this statement -LINE 5: continue no_such_label; - ^ --- should fail: no such label -create function exit_error2() returns void as $$ -begin - begin - loop - exit no_such_label; - end loop; - end; -end; -$$ language plpgsql; -ERROR: there is no label "no_such_label" attached to any block or loop enclosing this statement -LINE 5: exit no_such_label; - ^ --- should fail: CONTINUE can't reference the label of a named block -create function continue_error3() returns void as $$ -begin - <> - begin - loop - continue begin_block1; - end loop; - end; -end; -$$ language plpgsql; -ERROR: block label "begin_block1" cannot be used in CONTINUE -LINE 6: continue begin_block1; - ^ --- On the other hand, EXIT *can* reference the label of a named block -create function exit_block1() returns void as $$ -begin - <> - begin - loop - exit begin_block1; - raise exception 'should not get here'; - end loop; - end; -end; -$$ language plpgsql; -select exit_block1(); - exit_block1 -------------- - -(1 row) - -drop function exit_block1(); --- verbose end block and end loop -create function end_label1() returns void as $$ -<> -begin - <> - for _i in 1 .. 10 loop - exit flbl1; - end loop flbl1; - <> - for _i in 1 .. 10 loop - exit flbl2; - end loop; -end blbl; -$$ language plpgsql; -select end_label1(); - end_label1 ------------- - -(1 row) - -drop function end_label1(); --- should fail: undefined end label -create function end_label2() returns void as $$ -begin - for _i in 1 .. 10 loop - exit; - end loop flbl1; -end; -$$ language plpgsql; -ERROR: end label "flbl1" specified for unlabelled block -LINE 5: end loop flbl1; - ^ --- should fail: end label does not match start label -create function end_label3() returns void as $$ -<> -begin - <> - for _i in 1 .. 10 loop - exit; - end loop outer_label; -end; -$$ language plpgsql; -ERROR: end label "outer_label" differs from block's label "inner_label" -LINE 7: end loop outer_label; - ^ --- should fail: end label on a block without a start label -create function end_label4() returns void as $$ -<> -begin - for _i in 1 .. 10 loop - exit; - end loop outer_label; -end; -$$ language plpgsql; -ERROR: end label "outer_label" specified for unlabelled block -LINE 6: end loop outer_label; - ^ --- using list of scalars in fori and fore stmts -create function for_vect() returns void as $proc$ -<>declare a integer; b varchar; c varchar; r record; -begin - -- fori - for i in 1 .. 3 loop - raise notice '%', i; - end loop; - -- fore with record var - for r in select gs as aa, 'BB' as bb, 'CC' as cc from generate_series(1,4) gs loop - raise notice '% % %', r.aa, r.bb, r.cc; - end loop; - -- fore with single scalar - for a in select gs from generate_series(1,4) gs loop - raise notice '%', a; - end loop; - -- fore with multiple scalars - for a,b,c in select gs, 'BB','CC' from generate_series(1,4) gs loop - raise notice '% % %', a, b, c; - end loop; - -- using qualified names in fors, fore is enabled, disabled only for fori - for lbl.a, lbl.b, lbl.c in execute $$select gs, 'bb','cc' from generate_series(1,4) gs$$ loop - raise notice '% % %', a, b, c; - end loop; -end; -$proc$ language plpgsql; -select for_vect(); -NOTICE: 1 -NOTICE: 2 -NOTICE: 3 -NOTICE: 1 BB CC -NOTICE: 2 BB CC -NOTICE: 3 BB CC -NOTICE: 4 BB CC -NOTICE: 1 -NOTICE: 2 -NOTICE: 3 -NOTICE: 4 -NOTICE: 1 BB CC -NOTICE: 2 BB CC -NOTICE: 3 BB CC -NOTICE: 4 BB CC -NOTICE: 1 bb cc -NOTICE: 2 bb cc -NOTICE: 3 bb cc -NOTICE: 4 bb cc - for_vect ----------- - -(1 row) - -- regression test: verify that multiple uses of same plpgsql datum within -- a SQL command all get mapped to the same $n parameter. The return value -- of the SELECT is not important, we only care that it doesn't fail with @@ -3064,55 +2755,56 @@ select multi_datum_use(42); -- create temp table foo (f1 int, f2 int); insert into foo values (1,2), (3,4); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should work insert into foo values(5,6) returning * into x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); NOTICE: x.f1 = 5, x.f2 = 6 - footest ---------- + stricttest +------------ (1 row) -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should fail due to implicit strict insert into foo values(7,8),(9,10) returning * into x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); ERROR: query returned more than one row -CONTEXT: PL/pgSQL function footest() line 5 at SQL statement -create or replace function footest() returns void as $$ +HINT: Make sure the query returns a single row, or use LIMIT 1 +CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement +create or replace function stricttest() returns void as $$ declare x record; begin -- should work execute 'insert into foo values(5,6) returning *' into x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); NOTICE: x.f1 = 5, x.f2 = 6 - footest ---------- + stricttest +------------ (1 row) -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- this should work since EXECUTE isn't as picky execute 'insert into foo values(7,8),(9,10) returning *' into x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); NOTICE: x.f1 = 7, x.f2 = 8 - footest ---------- + stricttest +------------ (1 row) @@ -3127,78 +2819,79 @@ select * from foo; 9 | 10 (6 rows) -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should work select * from foo where f1 = 3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); NOTICE: x.f1 = 3, x.f2 = 4 - footest ---------- + stricttest +------------ (1 row) -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should fail, no rows select * from foo where f1 = 0 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); ERROR: query returned no rows -CONTEXT: PL/pgSQL function footest() line 5 at SQL statement -create or replace function footest() returns void as $$ +CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement +create or replace function stricttest() returns void as $$ declare x record; begin -- should fail, too many rows select * from foo where f1 > 3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); ERROR: query returned more than one row -CONTEXT: PL/pgSQL function footest() line 5 at SQL statement -create or replace function footest() returns void as $$ +HINT: Make sure the query returns a single row, or use LIMIT 1 +CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement +create or replace function stricttest() returns void as $$ declare x record; begin -- should work execute 'select * from foo where f1 = 3' into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); NOTICE: x.f1 = 3, x.f2 = 4 - footest ---------- + stricttest +------------ (1 row) -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should fail, no rows execute 'select * from foo where f1 = 0' into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); ERROR: query returned no rows -CONTEXT: PL/pgSQL function footest() line 5 at EXECUTE -create or replace function footest() returns void as $$ +CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE +create or replace function stricttest() returns void as $$ declare x record; begin -- should fail, too many rows execute 'select * from foo where f1 > 3' into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); ERROR: query returned more than one row -CONTEXT: PL/pgSQL function footest() line 5 at EXECUTE -drop function footest(); +CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE +drop function stricttest(); -- test printing parameters after failure due to STRICT set plpgsql.print_strict_params to true; -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; p1 int := 2; @@ -3208,11 +2901,11 @@ begin select * from foo where f1 = p1 and f1::text = p3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); ERROR: query returned no rows DETAIL: parameters: p1 = '2', p3 = 'foo' -CONTEXT: PL/pgSQL function footest() line 8 at SQL statement -create or replace function footest() returns void as $$ +CONTEXT: PL/pgSQL function stricttest() line 8 at SQL statement +create or replace function stricttest() returns void as $$ declare x record; p1 int := 2; @@ -3222,53 +2915,55 @@ begin select * from foo where f1 > p1 or f1::text = p3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); ERROR: query returned more than one row DETAIL: parameters: p1 = '2', p3 = 'foo' -CONTEXT: PL/pgSQL function footest() line 8 at SQL statement -create or replace function footest() returns void as $$ +HINT: Make sure the query returns a single row, or use LIMIT 1 +CONTEXT: PL/pgSQL function stricttest() line 8 at SQL statement +create or replace function stricttest() returns void as $$ declare x record; begin -- too many rows, no params select * from foo where f1 > 3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); ERROR: query returned more than one row -CONTEXT: PL/pgSQL function footest() line 5 at SQL statement -create or replace function footest() returns void as $$ +HINT: Make sure the query returns a single row, or use LIMIT 1 +CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement +create or replace function stricttest() returns void as $$ declare x record; begin -- no rows execute 'select * from foo where f1 = $1 or f1::text = $2' using 0, 'foo' into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); ERROR: query returned no rows DETAIL: parameters: $1 = '0', $2 = 'foo' -CONTEXT: PL/pgSQL function footest() line 5 at EXECUTE -create or replace function footest() returns void as $$ +CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE +create or replace function stricttest() returns void as $$ declare x record; begin -- too many rows execute 'select * from foo where f1 > $1' using 1 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); ERROR: query returned more than one row DETAIL: parameters: $1 = '1' -CONTEXT: PL/pgSQL function footest() line 5 at EXECUTE -create or replace function footest() returns void as $$ +CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE +create or replace function stricttest() returns void as $$ declare x record; begin -- too many rows, no parameters execute 'select * from foo where f1 > 3' into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); ERROR: query returned more than one row -CONTEXT: PL/pgSQL function footest() line 5 at EXECUTE -create or replace function footest() returns void as $$ +CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE +create or replace function stricttest() returns void as $$ -- override the global #print_strict_params off declare @@ -3280,11 +2975,12 @@ begin select * from foo where f1 > p1 or f1::text = p3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); ERROR: query returned more than one row -CONTEXT: PL/pgSQL function footest() line 10 at SQL statement +HINT: Make sure the query returns a single row, or use LIMIT 1 +CONTEXT: PL/pgSQL function stricttest() line 10 at SQL statement reset plpgsql.print_strict_params; -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ -- override the global #print_strict_params on declare @@ -3296,10 +2992,11 @@ begin select * from foo where f1 > p1 or f1::text = p3 into strict x; raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); ERROR: query returned more than one row DETAIL: parameters: p1 = '2', p3 = 'foo' -CONTEXT: PL/pgSQL function footest() line 10 at SQL statement +HINT: Make sure the query returns a single row, or use LIMIT 1 +CONTEXT: PL/pgSQL function stricttest() line 10 at SQL statement -- test warnings and errors set plpgsql.extra_warnings to 'all'; set plpgsql.extra_warnings to 'none'; @@ -3422,6 +3119,107 @@ select shadowtest(1); t (1 row) +-- runtime extra checks +set plpgsql.extra_warnings to 'too_many_rows'; +do $$ +declare x int; +begin + select v from generate_series(1,2) g(v) into x; +end; +$$; +WARNING: query returned more than one row +HINT: Make sure the query returns a single row, or use LIMIT 1 +set plpgsql.extra_errors to 'too_many_rows'; +do $$ +declare x int; +begin + select v from generate_series(1,2) g(v) into x; +end; +$$; +ERROR: query returned more than one row +HINT: Make sure the query returns a single row, or use LIMIT 1 +CONTEXT: PL/pgSQL function inline_code_block line 4 at SQL statement +reset plpgsql.extra_errors; +reset plpgsql.extra_warnings; +set plpgsql.extra_warnings to 'strict_multi_assignment'; +do $$ +declare + x int; + y int; +begin + select 1 into x, y; + select 1,2 into x, y; + select 1,2,3 into x, y; +end +$$; +WARNING: number of source and target fields in assignment do not match +DETAIL: strict_multi_assignment check of extra_warnings is active. +HINT: Make sure the query returns the exact list of columns. +WARNING: number of source and target fields in assignment do not match +DETAIL: strict_multi_assignment check of extra_warnings is active. +HINT: Make sure the query returns the exact list of columns. +set plpgsql.extra_errors to 'strict_multi_assignment'; +do $$ +declare + x int; + y int; +begin + select 1 into x, y; + select 1,2 into x, y; + select 1,2,3 into x, y; +end +$$; +ERROR: number of source and target fields in assignment do not match +DETAIL: strict_multi_assignment check of extra_errors is active. +HINT: Make sure the query returns the exact list of columns. +CONTEXT: PL/pgSQL function inline_code_block line 6 at SQL statement +create table test_01(a int, b int, c int); +alter table test_01 drop column a; +-- the check is active only when source table is not empty +insert into test_01 values(10,20); +do $$ +declare + x int; + y int; +begin + select * from test_01 into x, y; -- should be ok + raise notice 'ok'; + select * from test_01 into x; -- should to fail +end; +$$; +NOTICE: ok +ERROR: number of source and target fields in assignment do not match +DETAIL: strict_multi_assignment check of extra_errors is active. +HINT: Make sure the query returns the exact list of columns. +CONTEXT: PL/pgSQL function inline_code_block line 8 at SQL statement +do $$ +declare + t test_01; +begin + select 1, 2 into t; -- should be ok + raise notice 'ok'; + select 1, 2, 3 into t; -- should fail; +end; +$$; +NOTICE: ok +ERROR: number of source and target fields in assignment do not match +DETAIL: strict_multi_assignment check of extra_errors is active. +HINT: Make sure the query returns the exact list of columns. +CONTEXT: PL/pgSQL function inline_code_block line 7 at SQL statement +do $$ +declare + t test_01; +begin + select 1 into t; -- should fail; +end; +$$; +ERROR: number of source and target fields in assignment do not match +DETAIL: strict_multi_assignment check of extra_errors is active. +HINT: Make sure the query returns the exact list of columns. +CONTEXT: PL/pgSQL function inline_code_block line 5 at SQL statement +drop table test_01; +reset plpgsql.extra_errors; +reset plpgsql.extra_warnings; -- test scrollable cursor support create function sc_test() returns setof integer as $$ declare @@ -4091,7 +3889,7 @@ begin end; $$ language plpgsql; select compos(); -ERROR: invalid input syntax for integer: "(1,hello)" +ERROR: invalid input syntax for type integer: "(1,hello)" CONTEXT: PL/pgSQL function compos() while casting return value to function's return type -- test: invalid use of composite expression in scalar-returning function create or replace function compos() returns int as $$ @@ -4100,7 +3898,7 @@ begin end; $$ language plpgsql; select compos(); -ERROR: invalid input syntax for integer: "(1,hello)" +ERROR: invalid input syntax for type integer: "(1,hello)" CONTEXT: PL/pgSQL function compos() while casting return value to function's return type drop function compos(); drop type compostype; @@ -4368,136 +4166,6 @@ NOTICE: column >>some column name<<, constraint >>some constraint name<<, type (1 row) drop function stacked_diagnostics_test(); --- test CASE statement -create or replace function case_test(bigint) returns text as $$ -declare a int = 10; - b int = 1; -begin - case $1 - when 1 then - return 'one'; - when 2 then - return 'two'; - when 3,4,3+5 then - return 'three, four or eight'; - when a then - return 'ten'; - when a+b, a+b+1 then - return 'eleven, twelve'; - end case; -end; -$$ language plpgsql immutable; -select case_test(1); - case_test ------------ - one -(1 row) - -select case_test(2); - case_test ------------ - two -(1 row) - -select case_test(3); - case_test ----------------------- - three, four or eight -(1 row) - -select case_test(4); - case_test ----------------------- - three, four or eight -(1 row) - -select case_test(5); -- fails -ERROR: case not found -HINT: CASE statement is missing ELSE part. -CONTEXT: PL/pgSQL function case_test(bigint) line 5 at CASE -select case_test(8); - case_test ----------------------- - three, four or eight -(1 row) - -select case_test(10); - case_test ------------ - ten -(1 row) - -select case_test(11); - case_test ----------------- - eleven, twelve -(1 row) - -select case_test(12); - case_test ----------------- - eleven, twelve -(1 row) - -select case_test(13); -- fails -ERROR: case not found -HINT: CASE statement is missing ELSE part. -CONTEXT: PL/pgSQL function case_test(bigint) line 5 at CASE -create or replace function catch() returns void as $$ -begin - raise notice '%', case_test(6); -exception - when case_not_found then - raise notice 'caught case_not_found % %', SQLSTATE, SQLERRM; -end -$$ language plpgsql; -select catch(); -NOTICE: caught case_not_found 20000 case not found - catch -------- - -(1 row) - --- test the searched variant too, as well as ELSE -create or replace function case_test(bigint) returns text as $$ -declare a int = 10; -begin - case - when $1 = 1 then - return 'one'; - when $1 = a + 2 then - return 'twelve'; - else - return 'other'; - end case; -end; -$$ language plpgsql immutable; -select case_test(1); - case_test ------------ - one -(1 row) - -select case_test(2); - case_test ------------ - other -(1 row) - -select case_test(12); - case_test ------------ - twelve -(1 row) - -select case_test(13); - case_test ------------ - other -(1 row) - -drop function catch(); -drop function case_test(bigint); -- test variadic functions create or replace function vari(variadic int[]) returns void as $$ @@ -5382,6 +5050,12 @@ create function consumes_rw_array(int[]) returns int language plpgsql as $$ begin return $1[1]; end; $$ stable; +select consumes_rw_array(returns_rw_array(42)); + consumes_rw_array +------------------- + 42 +(1 row) + -- bug #14174 explain (verbose, costs off) select i, a from @@ -5438,6 +5112,13 @@ select consumes_rw_array(a), a from 2 | {2,2} (2 rows) +do $$ +declare a int[] := array[1,2]; +begin + a := a || 3; + raise notice 'a = %', a; +end$$; +NOTICE: a = {1,2,3} -- -- Test access to call stack -- @@ -5879,19 +5560,19 @@ CREATE FUNCTION transition_table_level2_bad_usage_func() LANGUAGE plpgsql AS $$ BEGIN - INSERT INTO d VALUES (1000000, 1000000, 'x'); + INSERT INTO dx VALUES (1000000, 1000000, 'x'); RETURN NULL; END; $$; CREATE TRIGGER transition_table_level2_bad_usage_trigger AFTER DELETE ON transition_table_level2 - REFERENCING OLD TABLE AS d + REFERENCING OLD TABLE AS dx FOR EACH STATEMENT EXECUTE PROCEDURE transition_table_level2_bad_usage_func(); DELETE FROM transition_table_level2 WHERE level2_no BETWEEN 301 AND 305; -ERROR: relation "d" cannot be the target of a modifying statement -CONTEXT: SQL statement "INSERT INTO d VALUES (1000000, 1000000, 'x')" +ERROR: relation "dx" cannot be the target of a modifying statement +CONTEXT: SQL statement "INSERT INTO dx VALUES (1000000, 1000000, 'x')" PL/pgSQL function transition_table_level2_bad_usage_func() line 3 at SQL statement DROP TRIGGER transition_table_level2_bad_usage_trigger ON transition_table_level2; @@ -5985,6 +5666,28 @@ LINE 1: SELECT (SELECT string_agg(id || '=' || name, ',') FROM d) QUERY: SELECT (SELECT string_agg(id || '=' || name, ',') FROM d) CONTEXT: PL/pgSQL function alter_table_under_transition_tables_upd_func() line 3 at RAISE -- +-- Test multiple reference to a transition table +-- +CREATE TABLE multi_test (i int); +INSERT INTO multi_test VALUES (1); +CREATE OR REPLACE FUNCTION multi_test_trig() RETURNS trigger +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'count = %', (SELECT COUNT(*) FROM new_test); + RAISE NOTICE 'count union = %', + (SELECT COUNT(*) + FROM (SELECT * FROM new_test UNION ALL SELECT * FROM new_test) ss); + RETURN NULL; +END$$; +CREATE TRIGGER my_trigger AFTER UPDATE ON multi_test + REFERENCING NEW TABLE AS new_test OLD TABLE as old_test + FOR EACH STATEMENT EXECUTE PROCEDURE multi_test_trig(); +UPDATE multi_test SET i = i; +NOTICE: count = 1 +NOTICE: count union = 2 +DROP TABLE multi_test; +DROP FUNCTION multi_test_trig(); +-- -- Check type parsing and record fetching from partitioned tables -- CREATE TABLE partitioned_table (a int, b text) PARTITION BY LIST (a); @@ -6029,3 +5732,14 @@ SELECT * FROM list_partitioned_table() AS t; 2 (2 rows) +-- +-- Check argument name is used instead of $n in error message +-- +CREATE FUNCTION fx(x WSlot) RETURNS void AS $$ +BEGIN + GET DIAGNOSTICS x = ROW_COUNT; + RETURN; +END; $$ LANGUAGE plpgsql; +ERROR: "x" is not a scalar variable +LINE 3: GET DIAGNOSTICS x = ROW_COUNT; + ^ diff --git a/src/test/regress/expected/point.out b/src/test/regress/expected/point.out index bfc0962749..c18e865370 100644 --- a/src/test/regress/expected/point.out +++ b/src/test/regress/expected/point.out @@ -7,6 +7,9 @@ INSERT INTO POINT_TBL(f1) VALUES ('(-10.0,0.0)'); INSERT INTO POINT_TBL(f1) VALUES ('(-3.0,4.0)'); INSERT INTO POINT_TBL(f1) VALUES ('(5.1, 34.5)'); INSERT INTO POINT_TBL(f1) VALUES ('(-5.0,-12.0)'); +INSERT INTO POINT_TBL(f1) VALUES ('(1e-300,-1e-300)'); -- To underflow +INSERT INTO POINT_TBL(f1) VALUES ('(1e+300,Inf)'); -- To overflow +INSERT INTO POINT_TBL(f1) VALUES (' ( Nan , NaN ) '); -- bad format points INSERT INTO POINT_TBL(f1) VALUES ('asdfasdf'); ERROR: invalid input syntax for type point: "asdfasdf" @@ -17,20 +20,31 @@ INSERT INTO POINT_TBL(f1) VALUES ('(10.0 10.0)'); ERROR: invalid input syntax for type point: "(10.0 10.0)" LINE 1: INSERT INTO POINT_TBL(f1) VALUES ('(10.0 10.0)'); ^ +INSERT INTO POINT_TBL(f1) VALUES ('(10.0, 10.0) x'); +ERROR: invalid input syntax for type point: "(10.0, 10.0) x" +LINE 1: INSERT INTO POINT_TBL(f1) VALUES ('(10.0, 10.0) x'); + ^ INSERT INTO POINT_TBL(f1) VALUES ('(10.0,10.0'); ERROR: invalid input syntax for type point: "(10.0,10.0" LINE 1: INSERT INTO POINT_TBL(f1) VALUES ('(10.0,10.0'); ^ +INSERT INTO POINT_TBL(f1) VALUES ('(10.0, 1e+500)'); -- Out of range +ERROR: "1e+500" is out of range for type double precision +LINE 1: INSERT INTO POINT_TBL(f1) VALUES ('(10.0, 1e+500)'); + ^ SELECT '' AS six, * FROM POINT_TBL; - six | f1 ------+------------ + six | f1 +-----+------------------- | (0,0) | (-10,0) | (-3,4) | (5.1,34.5) | (-5,-12) + | (1e-300,-1e-300) + | (1e+300,Infinity) + | (NaN,NaN) | (10,10) -(6 rows) +(9 rows) -- left of SELECT '' AS three, p.* FROM POINT_TBL p WHERE p.f1 << '(0.0, 0.0)'; @@ -92,158 +106,268 @@ SELECT '' AS three, p.* FROM POINT_TBL p SELECT '' AS three, p.* FROM POINT_TBL p WHERE not p.f1 <@ box '(0,0,100,100)'; - three | f1 --------+---------- + three | f1 +-------+------------------- | (-10,0) | (-3,4) | (-5,-12) -(3 rows) + | (1e-300,-1e-300) + | (1e+300,Infinity) + | (NaN,NaN) +(6 rows) SELECT '' AS two, p.* FROM POINT_TBL p WHERE p.f1 <@ path '[(0,0),(-10,0),(-10,10)]'; - two | f1 ------+--------- + two | f1 +-----+------------------ | (0,0) | (-10,0) -(2 rows) + | (1e-300,-1e-300) +(3 rows) SELECT '' AS three, p.* FROM POINT_TBL p WHERE not box '(0,0,100,100)' @> p.f1; - three | f1 --------+---------- + three | f1 +-------+------------------- | (-10,0) | (-3,4) | (-5,-12) -(3 rows) + | (1e-300,-1e-300) + | (1e+300,Infinity) + | (NaN,NaN) +(6 rows) SELECT '' AS six, p.f1, p.f1 <-> point '(0,0)' AS dist FROM POINT_TBL p ORDER BY dist; - six | f1 | dist ------+------------+------------------ - | (0,0) | 0 - | (-3,4) | 5 - | (-10,0) | 10 - | (-5,-12) | 13 - | (10,10) | 14.142135623731 - | (5.1,34.5) | 34.8749193547455 -(6 rows) + six | f1 | dist +-----+-------------------+---------------------- + | (0,0) | 0 + | (1e-300,-1e-300) | 1.4142135623731e-300 + | (-3,4) | 5 + | (-10,0) | 10 + | (-5,-12) | 13 + | (10,10) | 14.142135623731 + | (5.1,34.5) | 34.8749193547455 + | (1e+300,Infinity) | Infinity + | (NaN,NaN) | NaN +(9 rows) SELECT '' AS thirtysix, p1.f1 AS point1, p2.f1 AS point2, p1.f1 <-> p2.f1 AS dist FROM POINT_TBL p1, POINT_TBL p2 ORDER BY dist, p1.f1[0], p2.f1[0]; - thirtysix | point1 | point2 | dist ------------+------------+------------+------------------ - | (-10,0) | (-10,0) | 0 - | (-5,-12) | (-5,-12) | 0 - | (-3,4) | (-3,4) | 0 - | (0,0) | (0,0) | 0 - | (5.1,34.5) | (5.1,34.5) | 0 - | (10,10) | (10,10) | 0 - | (-3,4) | (0,0) | 5 - | (0,0) | (-3,4) | 5 - | (-10,0) | (-3,4) | 8.06225774829855 - | (-3,4) | (-10,0) | 8.06225774829855 - | (-10,0) | (0,0) | 10 - | (0,0) | (-10,0) | 10 - | (-10,0) | (-5,-12) | 13 - | (-5,-12) | (-10,0) | 13 - | (-5,-12) | (0,0) | 13 - | (0,0) | (-5,-12) | 13 - | (0,0) | (10,10) | 14.142135623731 - | (10,10) | (0,0) | 14.142135623731 - | (-3,4) | (10,10) | 14.3178210632764 - | (10,10) | (-3,4) | 14.3178210632764 - | (-5,-12) | (-3,4) | 16.1245154965971 - | (-3,4) | (-5,-12) | 16.1245154965971 - | (-10,0) | (10,10) | 22.3606797749979 - | (10,10) | (-10,0) | 22.3606797749979 - | (5.1,34.5) | (10,10) | 24.9851956166046 - | (10,10) | (5.1,34.5) | 24.9851956166046 - | (-5,-12) | (10,10) | 26.6270539113887 - | (10,10) | (-5,-12) | 26.6270539113887 - | (-3,4) | (5.1,34.5) | 31.5572495632937 - | (5.1,34.5) | (-3,4) | 31.5572495632937 - | (0,0) | (5.1,34.5) | 34.8749193547455 - | (5.1,34.5) | (0,0) | 34.8749193547455 - | (-10,0) | (5.1,34.5) | 37.6597928831267 - | (5.1,34.5) | (-10,0) | 37.6597928831267 - | (-5,-12) | (5.1,34.5) | 47.5842410888311 - | (5.1,34.5) | (-5,-12) | 47.5842410888311 -(36 rows) + thirtysix | point1 | point2 | dist +-----------+-------------------+-------------------+---------------------- + | (-10,0) | (-10,0) | 0 + | (-5,-12) | (-5,-12) | 0 + | (-3,4) | (-3,4) | 0 + | (0,0) | (0,0) | 0 + | (1e-300,-1e-300) | (1e-300,-1e-300) | 0 + | (5.1,34.5) | (5.1,34.5) | 0 + | (10,10) | (10,10) | 0 + | (0,0) | (1e-300,-1e-300) | 1.4142135623731e-300 + | (1e-300,-1e-300) | (0,0) | 1.4142135623731e-300 + | (-3,4) | (0,0) | 5 + | (-3,4) | (1e-300,-1e-300) | 5 + | (0,0) | (-3,4) | 5 + | (1e-300,-1e-300) | (-3,4) | 5 + | (-10,0) | (-3,4) | 8.06225774829855 + | (-3,4) | (-10,0) | 8.06225774829855 + | (-10,0) | (0,0) | 10 + | (-10,0) | (1e-300,-1e-300) | 10 + | (0,0) | (-10,0) | 10 + | (1e-300,-1e-300) | (-10,0) | 10 + | (-10,0) | (-5,-12) | 13 + | (-5,-12) | (-10,0) | 13 + | (-5,-12) | (0,0) | 13 + | (-5,-12) | (1e-300,-1e-300) | 13 + | (0,0) | (-5,-12) | 13 + | (1e-300,-1e-300) | (-5,-12) | 13 + | (0,0) | (10,10) | 14.142135623731 + | (1e-300,-1e-300) | (10,10) | 14.142135623731 + | (10,10) | (0,0) | 14.142135623731 + | (10,10) | (1e-300,-1e-300) | 14.142135623731 + | (-3,4) | (10,10) | 14.3178210632764 + | (10,10) | (-3,4) | 14.3178210632764 + | (-5,-12) | (-3,4) | 16.1245154965971 + | (-3,4) | (-5,-12) | 16.1245154965971 + | (-10,0) | (10,10) | 22.3606797749979 + | (10,10) | (-10,0) | 22.3606797749979 + | (5.1,34.5) | (10,10) | 24.9851956166046 + | (10,10) | (5.1,34.5) | 24.9851956166046 + | (-5,-12) | (10,10) | 26.6270539113887 + | (10,10) | (-5,-12) | 26.6270539113887 + | (-3,4) | (5.1,34.5) | 31.5572495632937 + | (5.1,34.5) | (-3,4) | 31.5572495632937 + | (0,0) | (5.1,34.5) | 34.8749193547455 + | (1e-300,-1e-300) | (5.1,34.5) | 34.8749193547455 + | (5.1,34.5) | (0,0) | 34.8749193547455 + | (5.1,34.5) | (1e-300,-1e-300) | 34.8749193547455 + | (-10,0) | (5.1,34.5) | 37.6597928831267 + | (5.1,34.5) | (-10,0) | 37.6597928831267 + | (-5,-12) | (5.1,34.5) | 47.5842410888311 + | (5.1,34.5) | (-5,-12) | 47.5842410888311 + | (-10,0) | (1e+300,Infinity) | Infinity + | (-5,-12) | (1e+300,Infinity) | Infinity + | (-3,4) | (1e+300,Infinity) | Infinity + | (0,0) | (1e+300,Infinity) | Infinity + | (1e-300,-1e-300) | (1e+300,Infinity) | Infinity + | (5.1,34.5) | (1e+300,Infinity) | Infinity + | (10,10) | (1e+300,Infinity) | Infinity + | (1e+300,Infinity) | (-10,0) | Infinity + | (1e+300,Infinity) | (-5,-12) | Infinity + | (1e+300,Infinity) | (-3,4) | Infinity + | (1e+300,Infinity) | (0,0) | Infinity + | (1e+300,Infinity) | (1e-300,-1e-300) | Infinity + | (1e+300,Infinity) | (5.1,34.5) | Infinity + | (1e+300,Infinity) | (10,10) | Infinity + | (-10,0) | (NaN,NaN) | NaN + | (-5,-12) | (NaN,NaN) | NaN + | (-3,4) | (NaN,NaN) | NaN + | (0,0) | (NaN,NaN) | NaN + | (1e-300,-1e-300) | (NaN,NaN) | NaN + | (5.1,34.5) | (NaN,NaN) | NaN + | (10,10) | (NaN,NaN) | NaN + | (1e+300,Infinity) | (1e+300,Infinity) | NaN + | (1e+300,Infinity) | (NaN,NaN) | NaN + | (NaN,NaN) | (-10,0) | NaN + | (NaN,NaN) | (-5,-12) | NaN + | (NaN,NaN) | (-3,4) | NaN + | (NaN,NaN) | (0,0) | NaN + | (NaN,NaN) | (1e-300,-1e-300) | NaN + | (NaN,NaN) | (5.1,34.5) | NaN + | (NaN,NaN) | (10,10) | NaN + | (NaN,NaN) | (1e+300,Infinity) | NaN + | (NaN,NaN) | (NaN,NaN) | NaN +(81 rows) SELECT '' AS thirty, p1.f1 AS point1, p2.f1 AS point2 FROM POINT_TBL p1, POINT_TBL p2 WHERE (p1.f1 <-> p2.f1) > 3; - thirty | point1 | point2 ---------+------------+------------ - | (0,0) | (-10,0) - | (0,0) | (-3,4) - | (0,0) | (5.1,34.5) - | (0,0) | (-5,-12) - | (0,0) | (10,10) - | (-10,0) | (0,0) - | (-10,0) | (-3,4) - | (-10,0) | (5.1,34.5) - | (-10,0) | (-5,-12) - | (-10,0) | (10,10) - | (-3,4) | (0,0) - | (-3,4) | (-10,0) - | (-3,4) | (5.1,34.5) - | (-3,4) | (-5,-12) - | (-3,4) | (10,10) - | (5.1,34.5) | (0,0) - | (5.1,34.5) | (-10,0) - | (5.1,34.5) | (-3,4) - | (5.1,34.5) | (-5,-12) - | (5.1,34.5) | (10,10) - | (-5,-12) | (0,0) - | (-5,-12) | (-10,0) - | (-5,-12) | (-3,4) - | (-5,-12) | (5.1,34.5) - | (-5,-12) | (10,10) - | (10,10) | (0,0) - | (10,10) | (-10,0) - | (10,10) | (-3,4) - | (10,10) | (5.1,34.5) - | (10,10) | (-5,-12) -(30 rows) + thirty | point1 | point2 +--------+-------------------+------------------- + | (0,0) | (-10,0) + | (0,0) | (-3,4) + | (0,0) | (5.1,34.5) + | (0,0) | (-5,-12) + | (0,0) | (1e+300,Infinity) + | (0,0) | (NaN,NaN) + | (0,0) | (10,10) + | (-10,0) | (0,0) + | (-10,0) | (-3,4) + | (-10,0) | (5.1,34.5) + | (-10,0) | (-5,-12) + | (-10,0) | (1e-300,-1e-300) + | (-10,0) | (1e+300,Infinity) + | (-10,0) | (NaN,NaN) + | (-10,0) | (10,10) + | (-3,4) | (0,0) + | (-3,4) | (-10,0) + | (-3,4) | (5.1,34.5) + | (-3,4) | (-5,-12) + | (-3,4) | (1e-300,-1e-300) + | (-3,4) | (1e+300,Infinity) + | (-3,4) | (NaN,NaN) + | (-3,4) | (10,10) + | (5.1,34.5) | (0,0) + | (5.1,34.5) | (-10,0) + | (5.1,34.5) | (-3,4) + | (5.1,34.5) | (-5,-12) + | (5.1,34.5) | (1e-300,-1e-300) + | (5.1,34.5) | (1e+300,Infinity) + | (5.1,34.5) | (NaN,NaN) + | (5.1,34.5) | (10,10) + | (-5,-12) | (0,0) + | (-5,-12) | (-10,0) + | (-5,-12) | (-3,4) + | (-5,-12) | (5.1,34.5) + | (-5,-12) | (1e-300,-1e-300) + | (-5,-12) | (1e+300,Infinity) + | (-5,-12) | (NaN,NaN) + | (-5,-12) | (10,10) + | (1e-300,-1e-300) | (-10,0) + | (1e-300,-1e-300) | (-3,4) + | (1e-300,-1e-300) | (5.1,34.5) + | (1e-300,-1e-300) | (-5,-12) + | (1e-300,-1e-300) | (1e+300,Infinity) + | (1e-300,-1e-300) | (NaN,NaN) + | (1e-300,-1e-300) | (10,10) + | (1e+300,Infinity) | (0,0) + | (1e+300,Infinity) | (-10,0) + | (1e+300,Infinity) | (-3,4) + | (1e+300,Infinity) | (5.1,34.5) + | (1e+300,Infinity) | (-5,-12) + | (1e+300,Infinity) | (1e-300,-1e-300) + | (1e+300,Infinity) | (1e+300,Infinity) + | (1e+300,Infinity) | (NaN,NaN) + | (1e+300,Infinity) | (10,10) + | (NaN,NaN) | (0,0) + | (NaN,NaN) | (-10,0) + | (NaN,NaN) | (-3,4) + | (NaN,NaN) | (5.1,34.5) + | (NaN,NaN) | (-5,-12) + | (NaN,NaN) | (1e-300,-1e-300) + | (NaN,NaN) | (1e+300,Infinity) + | (NaN,NaN) | (NaN,NaN) + | (NaN,NaN) | (10,10) + | (10,10) | (0,0) + | (10,10) | (-10,0) + | (10,10) | (-3,4) + | (10,10) | (5.1,34.5) + | (10,10) | (-5,-12) + | (10,10) | (1e-300,-1e-300) + | (10,10) | (1e+300,Infinity) + | (10,10) | (NaN,NaN) +(72 rows) -- put distance result into output to allow sorting with GEQ optimizer - tgl 97/05/10 SELECT '' AS fifteen, p1.f1 AS point1, p2.f1 AS point2, (p1.f1 <-> p2.f1) AS distance FROM POINT_TBL p1, POINT_TBL p2 WHERE (p1.f1 <-> p2.f1) > 3 and p1.f1 << p2.f1 ORDER BY distance, p1.f1[0], p2.f1[0]; - fifteen | point1 | point2 | distance ----------+------------+------------+------------------ - | (-3,4) | (0,0) | 5 - | (-10,0) | (-3,4) | 8.06225774829855 - | (-10,0) | (0,0) | 10 - | (-10,0) | (-5,-12) | 13 - | (-5,-12) | (0,0) | 13 - | (0,0) | (10,10) | 14.142135623731 - | (-3,4) | (10,10) | 14.3178210632764 - | (-5,-12) | (-3,4) | 16.1245154965971 - | (-10,0) | (10,10) | 22.3606797749979 - | (5.1,34.5) | (10,10) | 24.9851956166046 - | (-5,-12) | (10,10) | 26.6270539113887 - | (-3,4) | (5.1,34.5) | 31.5572495632937 - | (0,0) | (5.1,34.5) | 34.8749193547455 - | (-10,0) | (5.1,34.5) | 37.6597928831267 - | (-5,-12) | (5.1,34.5) | 47.5842410888311 -(15 rows) + fifteen | point1 | point2 | distance +---------+------------------+-------------------+------------------ + | (-3,4) | (0,0) | 5 + | (-3,4) | (1e-300,-1e-300) | 5 + | (-10,0) | (-3,4) | 8.06225774829855 + | (-10,0) | (0,0) | 10 + | (-10,0) | (1e-300,-1e-300) | 10 + | (-10,0) | (-5,-12) | 13 + | (-5,-12) | (0,0) | 13 + | (-5,-12) | (1e-300,-1e-300) | 13 + | (0,0) | (10,10) | 14.142135623731 + | (1e-300,-1e-300) | (10,10) | 14.142135623731 + | (-3,4) | (10,10) | 14.3178210632764 + | (-5,-12) | (-3,4) | 16.1245154965971 + | (-10,0) | (10,10) | 22.3606797749979 + | (5.1,34.5) | (10,10) | 24.9851956166046 + | (-5,-12) | (10,10) | 26.6270539113887 + | (-3,4) | (5.1,34.5) | 31.5572495632937 + | (0,0) | (5.1,34.5) | 34.8749193547455 + | (1e-300,-1e-300) | (5.1,34.5) | 34.8749193547455 + | (-10,0) | (5.1,34.5) | 37.6597928831267 + | (-5,-12) | (5.1,34.5) | 47.5842410888311 + | (-10,0) | (1e+300,Infinity) | Infinity + | (-5,-12) | (1e+300,Infinity) | Infinity + | (-3,4) | (1e+300,Infinity) | Infinity + | (0,0) | (1e+300,Infinity) | Infinity + | (1e-300,-1e-300) | (1e+300,Infinity) | Infinity + | (5.1,34.5) | (1e+300,Infinity) | Infinity + | (10,10) | (1e+300,Infinity) | Infinity +(27 rows) -- put distance result into output to allow sorting with GEQ optimizer - tgl 97/05/10 SELECT '' AS three, p1.f1 AS point1, p2.f1 AS point2, (p1.f1 <-> p2.f1) AS distance FROM POINT_TBL p1, POINT_TBL p2 WHERE (p1.f1 <-> p2.f1) > 3 and p1.f1 << p2.f1 and p1.f1 >^ p2.f1 ORDER BY distance; - three | point1 | point2 | distance --------+------------+----------+------------------ - | (-3,4) | (0,0) | 5 - | (-10,0) | (-5,-12) | 13 - | (5.1,34.5) | (10,10) | 24.9851956166046 -(3 rows) + three | point1 | point2 | distance +-------+------------+------------------+------------------ + | (-3,4) | (0,0) | 5 + | (-3,4) | (1e-300,-1e-300) | 5 + | (-10,0) | (-5,-12) | 13 + | (5.1,34.5) | (10,10) | 24.9851956166046 +(4 rows) -- Test that GiST indexes provide same behavior as sequential scan CREATE TEMP TABLE point_gist_tbl(f1 point); diff --git a/src/test/regress/expected/polygon.out b/src/test/regress/expected/polygon.out index 2361274f9e..91e2647c46 100644 --- a/src/test/regress/expected/polygon.out +++ b/src/test/regress/expected/polygon.out @@ -6,6 +6,9 @@ CREATE TABLE POLYGON_TBL(f1 polygon); INSERT INTO POLYGON_TBL(f1) VALUES ('(2.0,0.0),(2.0,4.0),(0.0,0.0)'); INSERT INTO POLYGON_TBL(f1) VALUES ('(3.0,1.0),(3.0,3.0),(1.0,0.0)'); +INSERT INTO POLYGON_TBL(f1) VALUES ('(1,2),(3,4),(5,6),(7,8)'); +INSERT INTO POLYGON_TBL(f1) VALUES ('(7,8),(5,6),(3,4),(1,2)'); -- Reverse +INSERT INTO POLYGON_TBL(f1) VALUES ('(1,2),(7,8),(5,6),(3,-4)'); -- degenerate polygons INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0,0.0)'); INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0,1.0),(0.0,1.0)'); @@ -31,199 +34,300 @@ ERROR: invalid input syntax for type polygon: "asdf" LINE 1: INSERT INTO POLYGON_TBL(f1) VALUES ('asdf'); ^ SELECT '' AS four, * FROM POLYGON_TBL; - four | f1 -------+--------------------- + four | f1 +------+---------------------------- | ((2,0),(2,4),(0,0)) | ((3,1),(3,3),(1,0)) + | ((1,2),(3,4),(5,6),(7,8)) + | ((7,8),(5,6),(3,4),(1,2)) + | ((1,2),(7,8),(5,6),(3,-4)) | ((0,0)) | ((0,1),(0,1)) -(4 rows) +(7 rows) --- overlap -SELECT '' AS three, p.* - FROM POLYGON_TBL p - WHERE p.f1 && '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - three | f1 --------+--------------------- - | ((2,0),(2,4),(0,0)) - | ((3,1),(3,3),(1,0)) -(2 rows) - --- left overlap -SELECT '' AS four, p.* - FROM POLYGON_TBL p - WHERE p.f1 &< '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - four | f1 -------+--------------------- - | ((2,0),(2,4),(0,0)) - | ((3,1),(3,3),(1,0)) - | ((0,0)) - | ((0,1),(0,1)) -(4 rows) +-- +-- Test the SP-GiST index +-- +CREATE TABLE quad_poly_tbl (id int, p polygon); +INSERT INTO quad_poly_tbl + SELECT (x - 1) * 100 + y, polygon(circle(point(x * 10, y * 10), 1 + (x + y) % 10)) + FROM generate_series(1, 100) x, + generate_series(1, 100) y; +INSERT INTO quad_poly_tbl + SELECT i, polygon '((200, 300),(210, 310),(230, 290))' + FROM generate_series(10001, 11000) AS i; +INSERT INTO quad_poly_tbl + VALUES + (11001, NULL), + (11002, NULL), + (11003, NULL); +CREATE INDEX quad_poly_tbl_idx ON quad_poly_tbl USING spgist(p); +-- get reference results for ORDER BY distance from seq scan +SET enable_seqscan = ON; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +CREATE TABLE quad_poly_tbl_ord_seq1 AS +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl; +CREATE TABLE quad_poly_tbl_ord_seq2 AS +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; +-- check results results from index scan +SET enable_seqscan = OFF; +SET enable_indexscan = OFF; +SET enable_bitmapscan = ON; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p << polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p << '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p << '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) --- right overlap -SELECT '' AS two, p.* - FROM POLYGON_TBL p - WHERE p.f1 &> '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - two | f1 ------+--------------------- - | ((3,1),(3,3),(1,0)) +SELECT count(*) FROM quad_poly_tbl WHERE p << polygon '((300,300),(400,600),(600,500),(700,200))'; + count +------- + 3890 (1 row) --- left of -SELECT '' AS one, p.* - FROM POLYGON_TBL p - WHERE p.f1 << '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - one | f1 ------+--------------- - | ((0,0)) - | ((0,1),(0,1)) -(2 rows) - --- right of -SELECT '' AS zero, p.* - FROM POLYGON_TBL p - WHERE p.f1 >> '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - zero | f1 -------+---- -(0 rows) - --- contained -SELECT '' AS one, p.* - FROM POLYGON_TBL p - WHERE p.f1 <@ polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - one | f1 ------+--------------------- - | ((3,1),(3,3),(1,0)) -(1 row) +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p &< polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p &< '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p &< '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) --- same -SELECT '' AS one, p.* - FROM POLYGON_TBL p - WHERE p.f1 ~= polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - one | f1 ------+--------------------- - | ((3,1),(3,3),(1,0)) +SELECT count(*) FROM quad_poly_tbl WHERE p &< polygon '((300,300),(400,600),(600,500),(700,200))'; + count +------- + 7900 (1 row) --- contains -SELECT '' AS one, p.* - FROM POLYGON_TBL p - WHERE p.f1 @> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - one | f1 ------+--------------------- - | ((3,1),(3,3),(1,0)) -(1 row) +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p && polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p && '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p && '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) --- --- polygon logic --- --- left of -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' << polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; - false +SELECT count(*) FROM quad_poly_tbl WHERE p && polygon '((300,300),(400,600),(600,500),(700,200))'; + count ------- - f + 977 (1 row) --- left overlap -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' << polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS true; - true ------- - f -(1 row) +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p &> polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p &> '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p &> '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) --- right overlap -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' &> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; - false +SELECT count(*) FROM quad_poly_tbl WHERE p &> polygon '((300,300),(400,600),(600,500),(700,200))'; + count ------- - f + 7000 (1 row) --- right of -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' >> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; - false +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p >> polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p >> '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p >> '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p >> polygon '((300,300),(400,600),(600,500),(700,200))'; + count ------- - f + 2990 (1 row) --- contained in -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' <@ polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; - false +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p <<| polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +---------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p <<| '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p <<| '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p <<| polygon '((300,300),(400,600),(600,500),(700,200))'; + count ------- - f + 1890 (1 row) --- contains -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' @> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; - false +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p &<| polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +---------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p &<| '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p &<| '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p &<| polygon '((300,300),(400,600),(600,500),(700,200))'; + count ------- - f + 6900 (1 row) -SELECT '((0,4),(6,4),(1,2),(6,0),(0,0))'::polygon @> '((2,1),(2,3),(3,3),(3,1))'::polygon AS "false"; - false +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p |&> polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +---------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p |&> '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p |&> '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p |&> polygon '((300,300),(400,600),(600,500),(700,200))'; + count ------- - f + 9000 (1 row) -SELECT '((0,4),(6,4),(3,2),(6,0),(0,0))'::polygon @> '((2,1),(2,3),(3,3),(3,1))'::polygon AS "true"; - true ------- - t -(1 row) +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p |>> polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +---------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p |>> '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p |>> '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) -SELECT '((1,1),(1,4),(5,4),(5,3),(2,3),(2,2),(5,2),(5,1))'::polygon @> '((3,2),(3,3),(4,3),(4,2))'::polygon AS "false"; - false +SELECT count(*) FROM quad_poly_tbl WHERE p |>> polygon '((300,300),(400,600),(600,500),(700,200))'; + count ------- - f + 3990 (1 row) -SELECT '((0,0),(0,3),(3,3),(3,0))'::polygon @> '((2,1),(2,2),(3,2),(3,1))'::polygon AS "true"; - true ------- - t -(1 row) +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p <@ '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p <@ '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) --- same -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' ~= polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; - false +SELECT count(*) FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; + count ------- - f + 831 (1 row) --- overlap -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' && polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS true; - true ------- - t -(1 row) +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p @> polygon '((340,550),(343,552),(341,553))'; + QUERY PLAN +----------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p @> '((340,550),(343,552),(341,553))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p @> '((340,550),(343,552),(341,553))'::polygon) +(5 rows) -SELECT '((0,4),(6,4),(1,2),(6,0),(0,0))'::polygon && '((2,1),(2,3),(3,3),(3,1))'::polygon AS "true"; - true ------- - t +SELECT count(*) FROM quad_poly_tbl WHERE p @> polygon '((340,550),(343,552),(341,553))'; + count +------- + 1 (1 row) -SELECT '((1,4),(1,1),(4,1),(4,2),(2,2),(2,4),(1,4))'::polygon && '((3,3),(4,3),(4,4),(3,4),(3,3))'::polygon AS "false"; - false +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p ~= polygon '((200, 300),(210, 310),(230, 290))'; + QUERY PLAN +----------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p ~= '((200,300),(210,310),(230,290))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p ~= '((200,300),(210,310),(230,290))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p ~= polygon '((200, 300),(210, 310),(230, 290))'; + count ------- - f + 1000 (1 row) -SELECT '((200,800),(800,800),(800,200),(200,200))' && '(1000,1000,0,0)'::polygon AS "true"; - true ------- - t -(1 row) +-- test ORDER BY distance +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl; + QUERY PLAN +----------------------------------------------------------- + WindowAgg + -> Index Scan using quad_poly_tbl_idx on quad_poly_tbl + Order By: (p <-> '(123,456)'::point) +(3 rows) --- distance from a point -SELECT '(0,0)'::point <-> '((0,0),(1,2),(2,1))'::polygon as on_corner, - '(1,1)'::point <-> '((0,0),(2,2),(1,3))'::polygon as on_segment, - '(2,2)'::point <-> '((0,0),(1,4),(3,1))'::polygon as inside, - '(3,3)'::point <-> '((0,2),(2,0),(2,2))'::polygon as near_corner, - '(4,4)'::point <-> '((0,0),(0,3),(4,0))'::polygon as near_segment; - on_corner | on_segment | inside | near_corner | near_segment ------------+------------+--------+-----------------+-------------- - 0 | 0 | 0 | 1.4142135623731 | 3.2 -(1 row) +CREATE TEMP TABLE quad_poly_tbl_ord_idx1 AS +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl; +SELECT * +FROM quad_poly_tbl_ord_seq1 seq FULL JOIN quad_poly_tbl_ord_idx1 idx + ON seq.n = idx.n AND seq.id = idx.id AND + (seq.dist = idx.dist OR seq.dist IS NULL AND idx.dist IS NULL) +WHERE seq.id IS NULL OR idx.id IS NULL; + n | dist | id | n | dist | id +---+------+----+---+------+---- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------- + WindowAgg + -> Index Scan using quad_poly_tbl_idx on quad_poly_tbl + Index Cond: (p <@ '((300,300),(400,600),(600,500),(700,200))'::polygon) + Order By: (p <-> '(123,456)'::point) +(4 rows) + +CREATE TEMP TABLE quad_poly_tbl_ord_idx2 AS +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; +SELECT * +FROM quad_poly_tbl_ord_seq2 seq FULL JOIN quad_poly_tbl_ord_idx2 idx + ON seq.n = idx.n AND seq.id = idx.id AND + (seq.dist = idx.dist OR seq.dist IS NULL AND idx.dist IS NULL) +WHERE seq.id IS NULL OR idx.id IS NULL; + n | dist | id | n | dist | id +---+------+----+---+------+---- +(0 rows) +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; diff --git a/src/test/regress/expected/polymorphism.out b/src/test/regress/expected/polymorphism.out index 91cfb743b6..986417a188 100644 --- a/src/test/regress/expected/polymorphism.out +++ b/src/test/regress/expected/polymorphism.out @@ -915,10 +915,10 @@ select dfunc(); -- verify it lists properly \df dfunc - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+-------+------------------+-----------------------------------------------------------+-------- - public | dfunc | integer | a integer DEFAULT 1, OUT sum integer, b integer DEFAULT 2 | normal + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+-------+------------------+-----------------------------------------------------------+------ + public | dfunc | integer | a integer DEFAULT 1, OUT sum integer, b integer DEFAULT 2 | func (1 row) drop function dfunc(int, int); @@ -1083,10 +1083,10 @@ $$ select array_upper($1, 1) $$ language sql; ERROR: cannot remove parameter defaults from existing function HINT: Use DROP FUNCTION dfunc(integer[]) first. \df dfunc - List of functions - Schema | Name | Result data type | Argument data types | Type ---------+-------+------------------+-------------------------------------------------+-------- - public | dfunc | integer | VARIADIC a integer[] DEFAULT ARRAY[]::integer[] | normal + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+-------+------------------+-------------------------------------------------+------ + public | dfunc | integer | VARIADIC a integer[] DEFAULT ARRAY[]::integer[] | func (1 row) drop function dfunc(a variadic int[]); @@ -1303,31 +1303,31 @@ ERROR: cannot change name of input parameter "c" HINT: Use DROP FUNCTION dfunc(character varying,numeric) first. drop function dfunc(varchar, numeric); --fail, named parameters are not unique -create function testfoo(a int, a int) returns int as $$ select 1;$$ language sql; +create function testpolym(a int, a int) returns int as $$ select 1;$$ language sql; ERROR: parameter name "a" used more than once -create function testfoo(int, out a int, out a int) returns int as $$ select 1;$$ language sql; +create function testpolym(int, out a int, out a int) returns int as $$ select 1;$$ language sql; ERROR: parameter name "a" used more than once -create function testfoo(out a int, inout a int) returns int as $$ select 1;$$ language sql; +create function testpolym(out a int, inout a int) returns int as $$ select 1;$$ language sql; ERROR: parameter name "a" used more than once -create function testfoo(a int, inout a int) returns int as $$ select 1;$$ language sql; +create function testpolym(a int, inout a int) returns int as $$ select 1;$$ language sql; ERROR: parameter name "a" used more than once -- valid -create function testfoo(a int, out a int) returns int as $$ select $1;$$ language sql; -select testfoo(37); - testfoo ---------- - 37 +create function testpolym(a int, out a int) returns int as $$ select $1;$$ language sql; +select testpolym(37); + testpolym +----------- + 37 (1 row) -drop function testfoo(int); -create function testfoo(a int) returns table(a int) as $$ select $1;$$ language sql; -select * from testfoo(37); +drop function testpolym(int); +create function testpolym(a int) returns table(a int) as $$ select $1;$$ language sql; +select * from testpolym(37); a ---- 37 (1 row) -drop function testfoo(int); +drop function testpolym(int); -- test polymorphic params and defaults create function dfunc(a anyelement, b anyelement = null, flag bool = true) returns anyelement as $$ @@ -1478,6 +1478,42 @@ select dfunc('a'::text, 'b', flag => true); -- mixed notation a (1 row) +-- this tests lexer edge cases around => +select dfunc(a =>-1); + dfunc +------- + -1 +(1 row) + +select dfunc(a =>+1); + dfunc +------- + 1 +(1 row) + +select dfunc(a =>/**/1); + dfunc +------- + 1 +(1 row) + +select dfunc(a =>--comment to be removed by psql + 1); + dfunc +------- + 1 +(1 row) + +-- need DO to protect the -- from psql +do $$ + declare r integer; + begin + select dfunc(a=>-- comment + 1) into r; + raise info 'r = %', r; + end; +$$; +INFO: r = 1 -- check reverse-listing of named-arg calls CREATE VIEW dfview AS SELECT q1, q2, diff --git a/src/test/regress/expected/portals.out b/src/test/regress/expected/portals.out index 1b8f7b69d1..dc0d2ef7dd 100644 --- a/src/test/regress/expected/portals.out +++ b/src/test/regress/expected/portals.out @@ -1245,6 +1245,100 @@ FETCH FROM c1; DELETE FROM ucview WHERE CURRENT OF c1; -- fail, views not supported ERROR: WHERE CURRENT OF on a view is not implemented +ROLLBACK; +-- Check WHERE CURRENT OF with an index-only scan +BEGIN; +EXPLAIN (costs off) +DECLARE c1 CURSOR FOR SELECT stringu1 FROM onek WHERE stringu1 = 'DZAAAA'; + QUERY PLAN +--------------------------------------------- + Index Only Scan using onek_stringu1 on onek + Index Cond: (stringu1 = 'DZAAAA'::name) +(2 rows) + +DECLARE c1 CURSOR FOR SELECT stringu1 FROM onek WHERE stringu1 = 'DZAAAA'; +FETCH FROM c1; + stringu1 +---------- + DZAAAA +(1 row) + +DELETE FROM onek WHERE CURRENT OF c1; +SELECT stringu1 FROM onek WHERE stringu1 = 'DZAAAA'; + stringu1 +---------- +(0 rows) + +ROLLBACK; +-- Check behavior with rewinding to a previous child scan node, +-- as per bug #15395 +BEGIN; +CREATE TABLE current_check (currentid int, payload text); +CREATE TABLE current_check_1 () INHERITS (current_check); +CREATE TABLE current_check_2 () INHERITS (current_check); +INSERT INTO current_check_1 SELECT i, 'p' || i FROM generate_series(1,9) i; +INSERT INTO current_check_2 SELECT i, 'P' || i FROM generate_series(10,19) i; +DECLARE c1 SCROLL CURSOR FOR SELECT * FROM current_check; +-- This tests the fetch-backwards code path +FETCH ABSOLUTE 12 FROM c1; + currentid | payload +-----------+--------- + 12 | P12 +(1 row) + +FETCH ABSOLUTE 8 FROM c1; + currentid | payload +-----------+--------- + 8 | p8 +(1 row) + +DELETE FROM current_check WHERE CURRENT OF c1 RETURNING *; + currentid | payload +-----------+--------- + 8 | p8 +(1 row) + +-- This tests the ExecutorRewind code path +FETCH ABSOLUTE 13 FROM c1; + currentid | payload +-----------+--------- + 13 | P13 +(1 row) + +FETCH ABSOLUTE 1 FROM c1; + currentid | payload +-----------+--------- + 1 | p1 +(1 row) + +DELETE FROM current_check WHERE CURRENT OF c1 RETURNING *; + currentid | payload +-----------+--------- + 1 | p1 +(1 row) + +SELECT * FROM current_check; + currentid | payload +-----------+--------- + 2 | p2 + 3 | p3 + 4 | p4 + 5 | p5 + 6 | p6 + 7 | p7 + 9 | p9 + 10 | P10 + 11 | P11 + 12 | P12 + 13 | P13 + 14 | P14 + 15 | P15 + 16 | P16 + 17 | P17 + 18 | P18 + 19 | P19 +(17 rows) + ROLLBACK; -- Make sure snapshot management works okay, per bug report in -- 235395b90909301035v7228ce63q392931f15aa74b31@mail.gmail.com diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out index f37df6c709..3af92ed1a8 100644 --- a/src/test/regress/expected/privileges.out +++ b/src/test/regress/expected/privileges.out @@ -4,14 +4,14 @@ -- Clean up in case a prior regression run failed -- Suppress NOTICE messages when users/groups don't exist SET client_min_messages TO 'warning'; -DROP ROLE IF EXISTS regress_group1; -DROP ROLE IF EXISTS regress_group2; -DROP ROLE IF EXISTS regress_user1; -DROP ROLE IF EXISTS regress_user2; -DROP ROLE IF EXISTS regress_user3; -DROP ROLE IF EXISTS regress_user4; -DROP ROLE IF EXISTS regress_user5; -DROP ROLE IF EXISTS regress_user6; +DROP ROLE IF EXISTS regress_priv_group1; +DROP ROLE IF EXISTS regress_priv_group2; +DROP ROLE IF EXISTS regress_priv_user1; +DROP ROLE IF EXISTS regress_priv_user2; +DROP ROLE IF EXISTS regress_priv_user3; +DROP ROLE IF EXISTS regress_priv_user4; +DROP ROLE IF EXISTS regress_priv_user5; +DROP ROLE IF EXISTS regress_priv_user6; SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; lo_unlink ----------- @@ -19,26 +19,26 @@ SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3 RESET client_min_messages; -- test proper begins here -CREATE USER regress_user1; -CREATE USER regress_user2; -CREATE USER regress_user3; -CREATE USER regress_user4; -CREATE USER regress_user5; -CREATE USER regress_user5; -- duplicate -ERROR: role "regress_user5" already exists -CREATE GROUP regress_group1; -CREATE GROUP regress_group2 WITH USER regress_user1, regress_user2; -ALTER GROUP regress_group1 ADD USER regress_user4; -ALTER GROUP regress_group2 ADD USER regress_user2; -- duplicate -NOTICE: role "regress_user2" is already a member of role "regress_group2" -ALTER GROUP regress_group2 DROP USER regress_user2; -GRANT regress_group2 TO regress_user4 WITH ADMIN OPTION; +CREATE USER regress_priv_user1; +CREATE USER regress_priv_user2; +CREATE USER regress_priv_user3; +CREATE USER regress_priv_user4; +CREATE USER regress_priv_user5; +CREATE USER regress_priv_user5; -- duplicate +ERROR: role "regress_priv_user5" already exists +CREATE GROUP regress_priv_group1; +CREATE GROUP regress_priv_group2 WITH USER regress_priv_user1, regress_priv_user2; +ALTER GROUP regress_priv_group1 ADD USER regress_priv_user4; +ALTER GROUP regress_priv_group2 ADD USER regress_priv_user2; -- duplicate +NOTICE: role "regress_priv_user2" is already a member of role "regress_priv_group2" +ALTER GROUP regress_priv_group2 DROP USER regress_priv_user2; +GRANT regress_priv_group2 TO regress_priv_user4 WITH ADMIN OPTION; -- test owner privileges -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; SELECT session_user, current_user; - session_user | current_user ----------------+--------------- - regress_user1 | regress_user1 + session_user | current_user +--------------------+-------------------- + regress_priv_user1 | regress_priv_user1 (1 row) CREATE TABLE atest1 ( a int, b text ); @@ -60,23 +60,23 @@ SELECT * FROM atest1; ---+--- (0 rows) -GRANT ALL ON atest1 TO regress_user2; -GRANT SELECT ON atest1 TO regress_user3, regress_user4; +GRANT ALL ON atest1 TO regress_priv_user2; +GRANT SELECT ON atest1 TO regress_priv_user3, regress_priv_user4; SELECT * FROM atest1; a | b ---+--- (0 rows) CREATE TABLE atest2 (col1 varchar(10), col2 boolean); -GRANT SELECT ON atest2 TO regress_user2; -GRANT UPDATE ON atest2 TO regress_user3; -GRANT INSERT ON atest2 TO regress_user4; -GRANT TRUNCATE ON atest2 TO regress_user5; -SET SESSION AUTHORIZATION regress_user2; +GRANT SELECT ON atest2 TO regress_priv_user2; +GRANT UPDATE ON atest2 TO regress_priv_user3; +GRANT INSERT ON atest2 TO regress_priv_user4; +GRANT TRUNCATE ON atest2 TO regress_priv_user5; +SET SESSION AUTHORIZATION regress_priv_user2; SELECT session_user, current_user; - session_user | current_user ----------------+--------------- - regress_user2 | regress_user2 + session_user | current_user +--------------------+-------------------- + regress_priv_user2 | regress_priv_user2 (1 row) -- try various combinations of queries on atest1 and atest2 @@ -92,11 +92,11 @@ SELECT * FROM atest2; -- ok INSERT INTO atest1 VALUES (2, 'two'); -- ok INSERT INTO atest2 VALUES ('foo', true); -- fail -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 INSERT INTO atest1 SELECT 1, b FROM atest1; -- ok UPDATE atest1 SET a = 1 WHERE a = 2; -- ok UPDATE atest2 SET col2 = NOT col2; -- fail -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 SELECT * FROM atest1 FOR UPDATE; -- ok a | b ---+----- @@ -105,17 +105,17 @@ SELECT * FROM atest1 FOR UPDATE; -- ok (2 rows) SELECT * FROM atest2 FOR UPDATE; -- fail -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 DELETE FROM atest2; -- fail -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 TRUNCATE atest2; -- fail -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 BEGIN; LOCK atest2 IN ACCESS EXCLUSIVE MODE; -- fail -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 COMMIT; COPY atest2 FROM stdin; -- fail -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 GRANT ALL ON atest1 TO PUBLIC; -- fail WARNING: no privileges were granted for "atest1" -- checks in subquery, both ok @@ -129,11 +129,11 @@ SELECT * FROM atest2 WHERE ( col1 IN ( SELECT b FROM atest1 ) ); ------+------ (0 rows) -SET SESSION AUTHORIZATION regress_user3; +SET SESSION AUTHORIZATION regress_priv_user3; SELECT session_user, current_user; - session_user | current_user ----------------+--------------- - regress_user3 | regress_user3 + session_user | current_user +--------------------+-------------------- + regress_priv_user3 | regress_priv_user3 (1 row) SELECT * FROM atest1; -- ok @@ -144,38 +144,38 @@ SELECT * FROM atest1; -- ok (2 rows) SELECT * FROM atest2; -- fail -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 INSERT INTO atest1 VALUES (2, 'two'); -- fail -ERROR: permission denied for relation atest1 +ERROR: permission denied for table atest1 INSERT INTO atest2 VALUES ('foo', true); -- fail -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 INSERT INTO atest1 SELECT 1, b FROM atest1; -- fail -ERROR: permission denied for relation atest1 +ERROR: permission denied for table atest1 UPDATE atest1 SET a = 1 WHERE a = 2; -- fail -ERROR: permission denied for relation atest1 +ERROR: permission denied for table atest1 UPDATE atest2 SET col2 = NULL; -- ok UPDATE atest2 SET col2 = NOT col2; -- fails; requires SELECT on atest2 -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 UPDATE atest2 SET col2 = true FROM atest1 WHERE atest1.a = 5; -- ok SELECT * FROM atest1 FOR UPDATE; -- fail -ERROR: permission denied for relation atest1 +ERROR: permission denied for table atest1 SELECT * FROM atest2 FOR UPDATE; -- fail -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 DELETE FROM atest2; -- fail -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 TRUNCATE atest2; -- fail -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 BEGIN; LOCK atest2 IN ACCESS EXCLUSIVE MODE; -- ok COMMIT; COPY atest2 FROM stdin; -- fail -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 -- checks in subquery, both fail SELECT * FROM atest1 WHERE ( b IN ( SELECT col1 FROM atest2 ) ); -ERROR: permission denied for relation atest2 +ERROR: permission denied for table atest2 SELECT * FROM atest2 WHERE ( col1 IN ( SELECT b FROM atest1 ) ); -ERROR: permission denied for relation atest2 -SET SESSION AUTHORIZATION regress_user4; +ERROR: permission denied for table atest2 +SET SESSION AUTHORIZATION regress_priv_user4; COPY atest2 FROM stdin; -- ok SELECT * FROM atest1; -- ok a | b @@ -185,8 +185,8 @@ SELECT * FROM atest1; -- ok (2 rows) -- test leaky-function protections in selfuncs --- regress_user1 will own a table and provide a view for it. -SET SESSION AUTHORIZATION regress_user1; +-- regress_priv_user1 will own a table and provide a view for it. +SET SESSION AUTHORIZATION regress_priv_user1; CREATE TABLE atest12 as SELECT x AS a, 10001 - x AS b FROM generate_series(1,10000) x; CREATE INDEX ON atest12 (a); @@ -225,8 +225,8 @@ EXPLAIN (COSTS OFF) SELECT * FROM atest12 x, atest12 y Index Cond: (a = y.b) (5 rows) --- Check if regress_user2 can break security. -SET SESSION AUTHORIZATION regress_user2; +-- Check if regress_priv_user2 can break security. +SET SESSION AUTHORIZATION regress_priv_user2; CREATE FUNCTION leak2(integer,integer) RETURNS boolean AS $$begin raise notice 'leak % %', $1, $2; return $1 > $2; end$$ LANGUAGE plpgsql immutable; @@ -234,7 +234,7 @@ CREATE OPERATOR >>> (procedure = leak2, leftarg = integer, rightarg = integer, restrict = scalargtsel); -- This should not show any "leak" notices before failing. EXPLAIN (COSTS OFF) SELECT * FROM atest12 WHERE a >>> 0; -ERROR: permission denied for relation atest12 +ERROR: permission denied for table atest12 -- This plan should use hashjoin, as it will expect many rows to be selected. EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; QUERY PLAN @@ -248,11 +248,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; Filter: (b <<< 5) (7 rows) --- Now regress_user1 grants sufficient access to regress_user2. -SET SESSION AUTHORIZATION regress_user1; +-- Now regress_priv_user1 grants sufficient access to regress_priv_user2. +SET SESSION AUTHORIZATION regress_priv_user1; GRANT SELECT (a, b) ON atest12 TO PUBLIC; -SET SESSION AUTHORIZATION regress_user2; --- Now regress_user2 will also get a good row estimate. +SET SESSION AUTHORIZATION regress_priv_user2; +-- Now regress_priv_user2 will also get a good row estimate. EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; QUERY PLAN ------------------------------------------------- @@ -278,19 +278,19 @@ EXPLAIN (COSTS OFF) SELECT * FROM atest12 x, atest12 y Filter: (abs(a) <<< 5) (6 rows) --- clean up (regress_user1's objects are all dropped later) +-- clean up (regress_priv_user1's objects are all dropped later) DROP FUNCTION leak2(integer, integer) CASCADE; NOTICE: drop cascades to operator >>>(integer,integer) -- groups -SET SESSION AUTHORIZATION regress_user3; +SET SESSION AUTHORIZATION regress_priv_user3; CREATE TABLE atest3 (one int, two int, three int); -GRANT DELETE ON atest3 TO GROUP regress_group2; -SET SESSION AUTHORIZATION regress_user1; +GRANT DELETE ON atest3 TO GROUP regress_priv_group2; +SET SESSION AUTHORIZATION regress_priv_user1; SELECT * FROM atest3; -- fail -ERROR: permission denied for relation atest3 +ERROR: permission denied for table atest3 DELETE FROM atest3; -- ok -- views -SET SESSION AUTHORIZATION regress_user3; +SET SESSION AUTHORIZATION regress_priv_user3; CREATE VIEW atestv1 AS SELECT * FROM atest1; -- ok /* The next *should* fail, but it's not implemented that way yet. */ CREATE VIEW atestv2 AS SELECT * FROM atest2; @@ -305,10 +305,10 @@ SELECT * FROM atestv1; -- ok (2 rows) SELECT * FROM atestv2; -- fail -ERROR: permission denied for relation atest2 -GRANT SELECT ON atestv1, atestv3 TO regress_user4; -GRANT SELECT ON atestv2 TO regress_user2; -SET SESSION AUTHORIZATION regress_user4; +ERROR: permission denied for table atest2 +GRANT SELECT ON atestv1, atestv3 TO regress_priv_user4; +GRANT SELECT ON atestv2 TO regress_priv_user2; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT * FROM atestv1; -- ok a | b ---+----- @@ -317,28 +317,28 @@ SELECT * FROM atestv1; -- ok (2 rows) SELECT * FROM atestv2; -- fail -ERROR: permission denied for relation atestv2 +ERROR: permission denied for view atestv2 SELECT * FROM atestv3; -- ok one | two | three -----+-----+------- (0 rows) SELECT * FROM atestv0; -- fail -ERROR: permission denied for relation atestv0 +ERROR: permission denied for view atestv0 -- Appendrels excluded by constraints failed to check permissions in 8.4-9.2. select * from ((select a.q1 as x from int8_tbl a offset 0) union all (select b.q2 as x from int8_tbl b offset 0)) ss where false; -ERROR: permission denied for relation int8_tbl +ERROR: permission denied for table int8_tbl set constraint_exclusion = on; select * from ((select a.q1 as x, random() from int8_tbl a where q1 > 0) union all (select b.q2 as x, random() from int8_tbl b where q2 > 0)) ss where x < 0; -ERROR: permission denied for relation int8_tbl +ERROR: permission denied for table int8_tbl reset constraint_exclusion; CREATE VIEW atestv4 AS SELECT * FROM atestv3; -- nested view SELECT * FROM atestv4; -- ok @@ -346,12 +346,12 @@ SELECT * FROM atestv4; -- ok -----+-----+------- (0 rows) -GRANT SELECT ON atestv4 TO regress_user2; -SET SESSION AUTHORIZATION regress_user2; +GRANT SELECT ON atestv4 TO regress_priv_user2; +SET SESSION AUTHORIZATION regress_priv_user2; -- Two complex cases: SELECT * FROM atestv3; -- fail -ERROR: permission denied for relation atestv3 -SELECT * FROM atestv4; -- ok (even though regress_user2 cannot access underlying atestv3) +ERROR: permission denied for view atestv3 +SELECT * FROM atestv4; -- ok (even though regress_priv_user2 cannot access underlying atestv3) one | two | three -----+-----+------- (0 rows) @@ -362,18 +362,18 @@ SELECT * FROM atest2; -- ok bar | t (1 row) -SELECT * FROM atestv2; -- fail (even though regress_user2 can access underlying atest2) -ERROR: permission denied for relation atest2 +SELECT * FROM atestv2; -- fail (even though regress_priv_user2 can access underlying atest2) +ERROR: permission denied for table atest2 -- Test column level permissions -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; CREATE TABLE atest5 (one int, two int unique, three int, four int unique); CREATE TABLE atest6 (one int, two int, blue int); -GRANT SELECT (one), INSERT (two), UPDATE (three) ON atest5 TO regress_user4; -GRANT ALL (one) ON atest5 TO regress_user3; +GRANT SELECT (one), INSERT (two), UPDATE (three) ON atest5 TO regress_priv_user4; +GRANT ALL (one) ON atest5 TO regress_priv_user3; INSERT INTO atest5 VALUES (1,2,3); -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT * FROM atest5; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 SELECT one FROM atest5; -- ok one ----- @@ -383,13 +383,13 @@ SELECT one FROM atest5; -- ok COPY atest5 (one) TO stdout; -- ok 1 SELECT two FROM atest5; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 COPY atest5 (two) TO stdout; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 SELECT atest5 FROM atest5; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 COPY atest5 (one,two) TO stdout; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 SELECT 1 FROM atest5; -- ok ?column? ---------- @@ -403,15 +403,15 @@ SELECT 1 FROM atest5 a JOIN atest5 b USING (one); -- ok (1 row) SELECT 1 FROM atest5 a JOIN atest5 b USING (two); -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 SELECT 1 FROM atest5 a NATURAL JOIN atest5 b; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 SELECT (j.*) IS NULL FROM (atest5 a JOIN atest5 b USING (one)) j; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 SELECT 1 FROM atest5 WHERE two = 2; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 SELECT * FROM atest1, atest5; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 SELECT atest1.* FROM atest1, atest5; -- ok a | b ---+----- @@ -427,7 +427,7 @@ SELECT atest1.*,atest5.one FROM atest1, atest5; -- ok (2 rows) SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.two); -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.one); -- ok a | b | one ---+-----+----- @@ -436,15 +436,15 @@ SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.one); - (2 rows) SELECT one, two FROM atest5; -- fail -ERROR: permission denied for relation atest5 -SET SESSION AUTHORIZATION regress_user1; -GRANT SELECT (one,two) ON atest6 TO regress_user4; -SET SESSION AUTHORIZATION regress_user4; +ERROR: permission denied for table atest5 +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT (one,two) ON atest6 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT one, two FROM atest5 NATURAL JOIN atest6; -- fail still -ERROR: permission denied for relation atest5 -SET SESSION AUTHORIZATION regress_user1; -GRANT SELECT (two) ON atest5 TO regress_user4; -SET SESSION AUTHORIZATION regress_user4; +ERROR: permission denied for table atest5 +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT (two) ON atest5 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT one, two FROM atest5 NATURAL JOIN atest6; -- ok now one | two -----+----- @@ -453,23 +453,23 @@ SELECT one, two FROM atest5 NATURAL JOIN atest6; -- ok now -- test column-level privileges for INSERT and UPDATE INSERT INTO atest5 (two) VALUES (3); -- ok COPY atest5 FROM stdin; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 COPY atest5 (two) FROM stdin; -- ok INSERT INTO atest5 (three) VALUES (4); -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 INSERT INTO atest5 VALUES (5,5,5); -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 UPDATE atest5 SET three = 10; -- ok UPDATE atest5 SET one = 8; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 UPDATE atest5 SET three = 5, one = 2; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 -- Check that column level privs are enforced in RETURNING -- Ok. INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = 10; -- Error. No SELECT on column three. INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = 10 RETURNING atest5.three; -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 -- Ok. May SELECT on column "one": INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = 10 RETURNING atest5.one; one @@ -482,23 +482,35 @@ INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = 10 RE INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = EXCLUDED.one; -- Error. No select rights on three INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = EXCLUDED.three; -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set one = 8; -- fails (due to UPDATE) -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 INSERT INTO atest5(three) VALUES (4) ON CONFLICT (two) DO UPDATE set three = 10; -- fails (due to INSERT) -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 -- Check that the columns in the inference require select privileges --- Error. No privs on four -INSERT INTO atest5(three) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 10; -ERROR: permission denied for relation atest5 -SET SESSION AUTHORIZATION regress_user1; -REVOKE ALL (one) ON atest5 FROM regress_user4; -GRANT SELECT (one,two,blue) ON atest6 TO regress_user4; -SET SESSION AUTHORIZATION regress_user4; +INSERT INTO atest5(four) VALUES (4); -- fail +ERROR: permission denied for table atest5 +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT INSERT (four) ON atest5 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; +INSERT INTO atest5(four) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 3; -- fails (due to SELECT) +ERROR: permission denied for table atest5 +INSERT INTO atest5(four) VALUES (4) ON CONFLICT ON CONSTRAINT atest5_four_key DO UPDATE set three = 3; -- fails (due to SELECT) +ERROR: permission denied for table atest5 +INSERT INTO atest5(four) VALUES (4); -- ok +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT (four) ON atest5 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; +INSERT INTO atest5(four) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 3; -- ok +INSERT INTO atest5(four) VALUES (4) ON CONFLICT ON CONSTRAINT atest5_four_key DO UPDATE set three = 3; -- ok +SET SESSION AUTHORIZATION regress_priv_user1; +REVOKE ALL (one) ON atest5 FROM regress_priv_user4; +GRANT SELECT (one,two,blue) ON atest6 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT one FROM atest5; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 UPDATE atest5 SET one = 1; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 SELECT atest6 FROM atest6; -- ok atest6 -------- @@ -506,18 +518,18 @@ SELECT atest6 FROM atest6; -- ok COPY atest6 TO stdout; -- ok -- check error reporting with column privs -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; CREATE TABLE t1 (c1 int, c2 int, c3 int check (c3 < 5), primary key (c1, c2)); -GRANT SELECT (c1) ON t1 TO regress_user2; -GRANT INSERT (c1, c2, c3) ON t1 TO regress_user2; -GRANT UPDATE (c1, c2, c3) ON t1 TO regress_user2; +GRANT SELECT (c1) ON t1 TO regress_priv_user2; +GRANT INSERT (c1, c2, c3) ON t1 TO regress_priv_user2; +GRANT UPDATE (c1, c2, c3) ON t1 TO regress_priv_user2; -- seed data INSERT INTO t1 VALUES (1, 1, 1); INSERT INTO t1 VALUES (1, 2, 1); INSERT INTO t1 VALUES (2, 1, 2); INSERT INTO t1 VALUES (2, 2, 2); INSERT INTO t1 VALUES (3, 1, 3); -SET SESSION AUTHORIZATION regress_user2; +SET SESSION AUTHORIZATION regress_priv_user2; INSERT INTO t1 (c1, c2) VALUES (1, 1); -- fail, but row not shown ERROR: duplicate key value violates unique constraint "t1_pkey" UPDATE t1 SET c2 = 1; -- fail, but row not shown @@ -534,23 +546,23 @@ DETAIL: Failing row contains (c1) = (5). UPDATE t1 SET c3 = 10; -- fail, but see columns with SELECT rights, or being modified ERROR: new row for relation "t1" violates check constraint "t1_c3_check" DETAIL: Failing row contains (c1, c3) = (1, 10). -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; DROP TABLE t1; -- test column-level privileges when involved with DELETE -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; ALTER TABLE atest6 ADD COLUMN three integer; -GRANT DELETE ON atest5 TO regress_user3; -GRANT SELECT (two) ON atest5 TO regress_user3; -REVOKE ALL (one) ON atest5 FROM regress_user3; -GRANT SELECT (one) ON atest5 TO regress_user4; -SET SESSION AUTHORIZATION regress_user4; +GRANT DELETE ON atest5 TO regress_priv_user3; +GRANT SELECT (two) ON atest5 TO regress_priv_user3; +REVOKE ALL (one) ON atest5 FROM regress_priv_user3; +GRANT SELECT (one) ON atest5 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT atest6 FROM atest6; -- fail -ERROR: permission denied for relation atest6 +ERROR: permission denied for table atest6 SELECT one FROM atest5 NATURAL JOIN atest6; -- fail -ERROR: permission denied for relation atest5 -SET SESSION AUTHORIZATION regress_user1; +ERROR: permission denied for table atest5 +SET SESSION AUTHORIZATION regress_priv_user1; ALTER TABLE atest6 DROP COLUMN three; -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT atest6 FROM atest6; -- ok atest6 -------- @@ -561,26 +573,26 @@ SELECT one FROM atest5 NATURAL JOIN atest6; -- ok ----- (0 rows) -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; ALTER TABLE atest6 DROP COLUMN two; -REVOKE SELECT (one,blue) ON atest6 FROM regress_user4; -SET SESSION AUTHORIZATION regress_user4; +REVOKE SELECT (one,blue) ON atest6 FROM regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT * FROM atest6; -- fail -ERROR: permission denied for relation atest6 +ERROR: permission denied for table atest6 SELECT 1 FROM atest6; -- fail -ERROR: permission denied for relation atest6 -SET SESSION AUTHORIZATION regress_user3; +ERROR: permission denied for table atest6 +SET SESSION AUTHORIZATION regress_priv_user3; DELETE FROM atest5 WHERE one = 1; -- fail -ERROR: permission denied for relation atest5 +ERROR: permission denied for table atest5 DELETE FROM atest5 WHERE two = 2; -- ok -- check inheritance cases -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; CREATE TABLE atestp1 (f1 int, f2 int) WITH OIDS; CREATE TABLE atestp2 (fx int, fy int) WITH OIDS; CREATE TABLE atestc (fz int) INHERITS (atestp1, atestp2); -GRANT SELECT(fx,fy,oid) ON atestp2 TO regress_user2; -GRANT SELECT(fx) ON atestc TO regress_user2; -SET SESSION AUTHORIZATION regress_user2; +GRANT SELECT(fx,fy,oid) ON atestp2 TO regress_priv_user2; +GRANT SELECT(fx) ON atestc TO regress_priv_user2; +SET SESSION AUTHORIZATION regress_priv_user2; SELECT fx FROM atestp2; -- ok fx ---- @@ -602,10 +614,10 @@ SELECT oid FROM atestp2; -- ok (0 rows) SELECT fy FROM atestc; -- fail -ERROR: permission denied for relation atestc -SET SESSION AUTHORIZATION regress_user1; -GRANT SELECT(fy,oid) ON atestc TO regress_user2; -SET SESSION AUTHORIZATION regress_user2; +ERROR: permission denied for table atestc +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT(fy,oid) ON atestc TO regress_priv_user2; +SET SESSION AUTHORIZATION regress_priv_user2; SELECT fx FROM atestp2; -- still ok fx ---- @@ -630,57 +642,91 @@ SELECT oid FROM atestp2; -- ok -- switch to superuser \c - REVOKE ALL PRIVILEGES ON LANGUAGE sql FROM PUBLIC; -GRANT USAGE ON LANGUAGE sql TO regress_user1; -- ok +GRANT USAGE ON LANGUAGE sql TO regress_priv_user1; -- ok GRANT USAGE ON LANGUAGE c TO PUBLIC; -- fail ERROR: language "c" is not trusted DETAIL: GRANT and REVOKE are not allowed on untrusted languages, because only superusers can use untrusted languages. -SET SESSION AUTHORIZATION regress_user1; -GRANT USAGE ON LANGUAGE sql TO regress_user2; -- fail +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT USAGE ON LANGUAGE sql TO regress_priv_user2; -- fail WARNING: no privileges were granted for "sql" -CREATE FUNCTION testfunc1(int) RETURNS int AS 'select 2 * $1;' LANGUAGE sql; -CREATE FUNCTION testfunc2(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; -REVOKE ALL ON FUNCTION testfunc1(int), testfunc2(int) FROM PUBLIC; -GRANT EXECUTE ON FUNCTION testfunc1(int), testfunc2(int) TO regress_user2; -GRANT USAGE ON FUNCTION testfunc1(int) TO regress_user3; -- semantic error +CREATE FUNCTION priv_testfunc1(int) RETURNS int AS 'select 2 * $1;' LANGUAGE sql; +CREATE FUNCTION priv_testfunc2(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; +CREATE AGGREGATE priv_testagg1(int) (sfunc = int4pl, stype = int4); +CREATE PROCEDURE priv_testproc1(int) AS 'select $1;' LANGUAGE sql; +REVOKE ALL ON FUNCTION priv_testfunc1(int), priv_testfunc2(int), priv_testagg1(int) FROM PUBLIC; +GRANT EXECUTE ON FUNCTION priv_testfunc1(int), priv_testfunc2(int), priv_testagg1(int) TO regress_priv_user2; +REVOKE ALL ON FUNCTION priv_testproc1(int) FROM PUBLIC; -- fail, not a function +ERROR: priv_testproc1(integer) is not a function +REVOKE ALL ON PROCEDURE priv_testproc1(int) FROM PUBLIC; +GRANT EXECUTE ON PROCEDURE priv_testproc1(int) TO regress_priv_user2; +GRANT USAGE ON FUNCTION priv_testfunc1(int) TO regress_priv_user3; -- semantic error ERROR: invalid privilege type USAGE for function -GRANT ALL PRIVILEGES ON FUNCTION testfunc1(int) TO regress_user4; -GRANT ALL PRIVILEGES ON FUNCTION testfunc_nosuch(int) TO regress_user4; -ERROR: function testfunc_nosuch(integer) does not exist -CREATE FUNCTION testfunc4(boolean) RETURNS text +GRANT USAGE ON FUNCTION priv_testagg1(int) TO regress_priv_user3; -- semantic error +ERROR: invalid privilege type USAGE for function +GRANT USAGE ON PROCEDURE priv_testproc1(int) TO regress_priv_user3; -- semantic error +ERROR: invalid privilege type USAGE for procedure +GRANT ALL PRIVILEGES ON FUNCTION priv_testfunc1(int) TO regress_priv_user4; +GRANT ALL PRIVILEGES ON FUNCTION priv_testfunc_nosuch(int) TO regress_priv_user4; +ERROR: function priv_testfunc_nosuch(integer) does not exist +GRANT ALL PRIVILEGES ON FUNCTION priv_testagg1(int) TO regress_priv_user4; +GRANT ALL PRIVILEGES ON PROCEDURE priv_testproc1(int) TO regress_priv_user4; +CREATE FUNCTION priv_testfunc4(boolean) RETURNS text AS 'select col1 from atest2 where col2 = $1;' LANGUAGE sql SECURITY DEFINER; -GRANT EXECUTE ON FUNCTION testfunc4(boolean) TO regress_user3; -SET SESSION AUTHORIZATION regress_user2; -SELECT testfunc1(5), testfunc2(5); -- ok - testfunc1 | testfunc2 ------------+----------- - 10 | 15 +GRANT EXECUTE ON FUNCTION priv_testfunc4(boolean) TO regress_priv_user3; +SET SESSION AUTHORIZATION regress_priv_user2; +SELECT priv_testfunc1(5), priv_testfunc2(5); -- ok + priv_testfunc1 | priv_testfunc2 +----------------+---------------- + 10 | 15 (1 row) -CREATE FUNCTION testfunc3(int) RETURNS int AS 'select 2 * $1;' LANGUAGE sql; -- fail +CREATE FUNCTION priv_testfunc3(int) RETURNS int AS 'select 2 * $1;' LANGUAGE sql; -- fail ERROR: permission denied for language sql -SET SESSION AUTHORIZATION regress_user3; -SELECT testfunc1(5); -- fail -ERROR: permission denied for function testfunc1 +SELECT priv_testagg1(x) FROM (VALUES (1), (2), (3)) _(x); -- ok + priv_testagg1 +--------------- + 6 +(1 row) + +CALL priv_testproc1(6); -- ok +SET SESSION AUTHORIZATION regress_priv_user3; +SELECT priv_testfunc1(5); -- fail +ERROR: permission denied for function priv_testfunc1 +SELECT priv_testagg1(x) FROM (VALUES (1), (2), (3)) _(x); -- fail +ERROR: permission denied for aggregate priv_testagg1 +CALL priv_testproc1(6); -- fail +ERROR: permission denied for procedure priv_testproc1 SELECT col1 FROM atest2 WHERE col2 = true; -- fail -ERROR: permission denied for relation atest2 -SELECT testfunc4(true); -- ok - testfunc4 ------------ +ERROR: permission denied for table atest2 +SELECT priv_testfunc4(true); -- ok + priv_testfunc4 +---------------- bar (1 row) -SET SESSION AUTHORIZATION regress_user4; -SELECT testfunc1(5); -- ok - testfunc1 ------------ - 10 +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT priv_testfunc1(5); -- ok + priv_testfunc1 +---------------- + 10 (1 row) -DROP FUNCTION testfunc1(int); -- fail -ERROR: must be owner of function testfunc1 +SELECT priv_testagg1(x) FROM (VALUES (1), (2), (3)) _(x); -- ok + priv_testagg1 +--------------- + 6 +(1 row) + +CALL priv_testproc1(6); -- ok +DROP FUNCTION priv_testfunc1(int); -- fail +ERROR: must be owner of function priv_testfunc1 +DROP AGGREGATE priv_testagg1(int); -- fail +ERROR: must be owner of aggregate priv_testagg1 +DROP PROCEDURE priv_testproc1(int); -- fail +ERROR: must be owner of procedure priv_testproc1 \c - -DROP FUNCTION testfunc1(int); -- ok +DROP FUNCTION priv_testfunc1(int); -- ok -- restore to sanity GRANT ALL PRIVILEGES ON LANGUAGE sql TO PUBLIC; -- verify privilege checks on array-element coercions @@ -692,118 +738,118 @@ SELECT '{1}'::int4[]::int8[]; (1 row) REVOKE ALL ON FUNCTION int8(integer) FROM PUBLIC; -SELECT '{1}'::int4[]::int8[]; --superuser, suceed +SELECT '{1}'::int4[]::int8[]; --superuser, succeed int8 ------ {1} (1 row) -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT '{1}'::int4[]::int8[]; --other user, fail ERROR: permission denied for function int8 ROLLBACK; -- privileges on types -- switch to superuser \c - -CREATE TYPE testtype1 AS (a int, b text); -REVOKE USAGE ON TYPE testtype1 FROM PUBLIC; -GRANT USAGE ON TYPE testtype1 TO regress_user2; -GRANT USAGE ON TYPE _testtype1 TO regress_user2; -- fail +CREATE TYPE priv_testtype1 AS (a int, b text); +REVOKE USAGE ON TYPE priv_testtype1 FROM PUBLIC; +GRANT USAGE ON TYPE priv_testtype1 TO regress_priv_user2; +GRANT USAGE ON TYPE _priv_testtype1 TO regress_priv_user2; -- fail ERROR: cannot set privileges of array types HINT: Set the privileges of the element type instead. -GRANT USAGE ON DOMAIN testtype1 TO regress_user2; -- fail -ERROR: "testtype1" is not a domain -CREATE DOMAIN testdomain1 AS int; -REVOKE USAGE on DOMAIN testdomain1 FROM PUBLIC; -GRANT USAGE ON DOMAIN testdomain1 TO regress_user2; -GRANT USAGE ON TYPE testdomain1 TO regress_user2; -- ok -SET SESSION AUTHORIZATION regress_user1; +GRANT USAGE ON DOMAIN priv_testtype1 TO regress_priv_user2; -- fail +ERROR: "priv_testtype1" is not a domain +CREATE DOMAIN priv_testdomain1 AS int; +REVOKE USAGE on DOMAIN priv_testdomain1 FROM PUBLIC; +GRANT USAGE ON DOMAIN priv_testdomain1 TO regress_priv_user2; +GRANT USAGE ON TYPE priv_testdomain1 TO regress_priv_user2; -- ok +SET SESSION AUTHORIZATION regress_priv_user1; -- commands that should fail -CREATE AGGREGATE testagg1a(testdomain1) (sfunc = int4_sum, stype = bigint); -ERROR: permission denied for type testdomain1 -CREATE DOMAIN testdomain2a AS testdomain1; -ERROR: permission denied for type testdomain1 -CREATE DOMAIN testdomain3a AS int; -CREATE FUNCTION castfunc(int) RETURNS testdomain3a AS $$ SELECT $1::testdomain3a $$ LANGUAGE SQL; -CREATE CAST (testdomain1 AS testdomain3a) WITH FUNCTION castfunc(int); -ERROR: permission denied for type testdomain1 +CREATE AGGREGATE priv_testagg1a(priv_testdomain1) (sfunc = int4_sum, stype = bigint); +ERROR: permission denied for type priv_testdomain1 +CREATE DOMAIN priv_testdomain2a AS priv_testdomain1; +ERROR: permission denied for type priv_testdomain1 +CREATE DOMAIN priv_testdomain3a AS int; +CREATE FUNCTION castfunc(int) RETURNS priv_testdomain3a AS $$ SELECT $1::priv_testdomain3a $$ LANGUAGE SQL; +CREATE CAST (priv_testdomain1 AS priv_testdomain3a) WITH FUNCTION castfunc(int); +ERROR: permission denied for type priv_testdomain1 DROP FUNCTION castfunc(int) CASCADE; -DROP DOMAIN testdomain3a; -CREATE FUNCTION testfunc5a(a testdomain1) RETURNS int LANGUAGE SQL AS $$ SELECT $1 $$; -ERROR: permission denied for type testdomain1 -CREATE FUNCTION testfunc6a(b int) RETURNS testdomain1 LANGUAGE SQL AS $$ SELECT $1::testdomain1 $$; -ERROR: permission denied for type testdomain1 -CREATE OPERATOR !+! (PROCEDURE = int4pl, LEFTARG = testdomain1, RIGHTARG = testdomain1); -ERROR: permission denied for type testdomain1 -CREATE TABLE test5a (a int, b testdomain1); -ERROR: permission denied for type testdomain1 -CREATE TABLE test6a OF testtype1; -ERROR: permission denied for type testtype1 -CREATE TABLE test10a (a int[], b testtype1[]); -ERROR: permission denied for type testtype1 +DROP DOMAIN priv_testdomain3a; +CREATE FUNCTION priv_testfunc5a(a priv_testdomain1) RETURNS int LANGUAGE SQL AS $$ SELECT $1 $$; +ERROR: permission denied for type priv_testdomain1 +CREATE FUNCTION priv_testfunc6a(b int) RETURNS priv_testdomain1 LANGUAGE SQL AS $$ SELECT $1::priv_testdomain1 $$; +ERROR: permission denied for type priv_testdomain1 +CREATE OPERATOR !+! (PROCEDURE = int4pl, LEFTARG = priv_testdomain1, RIGHTARG = priv_testdomain1); +ERROR: permission denied for type priv_testdomain1 +CREATE TABLE test5a (a int, b priv_testdomain1); +ERROR: permission denied for type priv_testdomain1 +CREATE TABLE test6a OF priv_testtype1; +ERROR: permission denied for type priv_testtype1 +CREATE TABLE test10a (a int[], b priv_testtype1[]); +ERROR: permission denied for type priv_testtype1 CREATE TABLE test9a (a int, b int); -ALTER TABLE test9a ADD COLUMN c testdomain1; -ERROR: permission denied for type testdomain1 -ALTER TABLE test9a ALTER COLUMN b TYPE testdomain1; -ERROR: permission denied for type testdomain1 -CREATE TYPE test7a AS (a int, b testdomain1); -ERROR: permission denied for type testdomain1 +ALTER TABLE test9a ADD COLUMN c priv_testdomain1; +ERROR: permission denied for type priv_testdomain1 +ALTER TABLE test9a ALTER COLUMN b TYPE priv_testdomain1; +ERROR: permission denied for type priv_testdomain1 +CREATE TYPE test7a AS (a int, b priv_testdomain1); +ERROR: permission denied for type priv_testdomain1 CREATE TYPE test8a AS (a int, b int); -ALTER TYPE test8a ADD ATTRIBUTE c testdomain1; -ERROR: permission denied for type testdomain1 -ALTER TYPE test8a ALTER ATTRIBUTE b TYPE testdomain1; -ERROR: permission denied for type testdomain1 -CREATE TABLE test11a AS (SELECT 1::testdomain1 AS a); -ERROR: permission denied for type testdomain1 -REVOKE ALL ON TYPE testtype1 FROM PUBLIC; -ERROR: permission denied for type testtype1 -SET SESSION AUTHORIZATION regress_user2; +ALTER TYPE test8a ADD ATTRIBUTE c priv_testdomain1; +ERROR: permission denied for type priv_testdomain1 +ALTER TYPE test8a ALTER ATTRIBUTE b TYPE priv_testdomain1; +ERROR: permission denied for type priv_testdomain1 +CREATE TABLE test11a AS (SELECT 1::priv_testdomain1 AS a); +ERROR: permission denied for type priv_testdomain1 +REVOKE ALL ON TYPE priv_testtype1 FROM PUBLIC; +ERROR: permission denied for type priv_testtype1 +SET SESSION AUTHORIZATION regress_priv_user2; -- commands that should succeed -CREATE AGGREGATE testagg1b(testdomain1) (sfunc = int4_sum, stype = bigint); -CREATE DOMAIN testdomain2b AS testdomain1; -CREATE DOMAIN testdomain3b AS int; -CREATE FUNCTION castfunc(int) RETURNS testdomain3b AS $$ SELECT $1::testdomain3b $$ LANGUAGE SQL; -CREATE CAST (testdomain1 AS testdomain3b) WITH FUNCTION castfunc(int); +CREATE AGGREGATE priv_testagg1b(priv_testdomain1) (sfunc = int4_sum, stype = bigint); +CREATE DOMAIN priv_testdomain2b AS priv_testdomain1; +CREATE DOMAIN priv_testdomain3b AS int; +CREATE FUNCTION castfunc(int) RETURNS priv_testdomain3b AS $$ SELECT $1::priv_testdomain3b $$ LANGUAGE SQL; +CREATE CAST (priv_testdomain1 AS priv_testdomain3b) WITH FUNCTION castfunc(int); WARNING: cast will be ignored because the source data type is a domain -CREATE FUNCTION testfunc5b(a testdomain1) RETURNS int LANGUAGE SQL AS $$ SELECT $1 $$; -CREATE FUNCTION testfunc6b(b int) RETURNS testdomain1 LANGUAGE SQL AS $$ SELECT $1::testdomain1 $$; -CREATE OPERATOR !! (PROCEDURE = testfunc5b, RIGHTARG = testdomain1); -CREATE TABLE test5b (a int, b testdomain1); -CREATE TABLE test6b OF testtype1; -CREATE TABLE test10b (a int[], b testtype1[]); +CREATE FUNCTION priv_testfunc5b(a priv_testdomain1) RETURNS int LANGUAGE SQL AS $$ SELECT $1 $$; +CREATE FUNCTION priv_testfunc6b(b int) RETURNS priv_testdomain1 LANGUAGE SQL AS $$ SELECT $1::priv_testdomain1 $$; +CREATE OPERATOR !! (PROCEDURE = priv_testfunc5b, RIGHTARG = priv_testdomain1); +CREATE TABLE test5b (a int, b priv_testdomain1); +CREATE TABLE test6b OF priv_testtype1; +CREATE TABLE test10b (a int[], b priv_testtype1[]); CREATE TABLE test9b (a int, b int); -ALTER TABLE test9b ADD COLUMN c testdomain1; -ALTER TABLE test9b ALTER COLUMN b TYPE testdomain1; -CREATE TYPE test7b AS (a int, b testdomain1); +ALTER TABLE test9b ADD COLUMN c priv_testdomain1; +ALTER TABLE test9b ALTER COLUMN b TYPE priv_testdomain1; +CREATE TYPE test7b AS (a int, b priv_testdomain1); CREATE TYPE test8b AS (a int, b int); -ALTER TYPE test8b ADD ATTRIBUTE c testdomain1; -ALTER TYPE test8b ALTER ATTRIBUTE b TYPE testdomain1; -CREATE TABLE test11b AS (SELECT 1::testdomain1 AS a); -REVOKE ALL ON TYPE testtype1 FROM PUBLIC; -WARNING: no privileges could be revoked for "testtype1" +ALTER TYPE test8b ADD ATTRIBUTE c priv_testdomain1; +ALTER TYPE test8b ALTER ATTRIBUTE b TYPE priv_testdomain1; +CREATE TABLE test11b AS (SELECT 1::priv_testdomain1 AS a); +REVOKE ALL ON TYPE priv_testtype1 FROM PUBLIC; +WARNING: no privileges could be revoked for "priv_testtype1" \c - -DROP AGGREGATE testagg1b(testdomain1); -DROP DOMAIN testdomain2b; -DROP OPERATOR !! (NONE, testdomain1); -DROP FUNCTION testfunc5b(a testdomain1); -DROP FUNCTION testfunc6b(b int); +DROP AGGREGATE priv_testagg1b(priv_testdomain1); +DROP DOMAIN priv_testdomain2b; +DROP OPERATOR !! (NONE, priv_testdomain1); +DROP FUNCTION priv_testfunc5b(a priv_testdomain1); +DROP FUNCTION priv_testfunc6b(b int); DROP TABLE test5b; DROP TABLE test6b; DROP TABLE test9b; DROP TABLE test10b; DROP TYPE test7b; DROP TYPE test8b; -DROP CAST (testdomain1 AS testdomain3b); +DROP CAST (priv_testdomain1 AS priv_testdomain3b); DROP FUNCTION castfunc(int) CASCADE; -DROP DOMAIN testdomain3b; +DROP DOMAIN priv_testdomain3b; DROP TABLE test11b; -DROP TYPE testtype1; -- ok -DROP DOMAIN testdomain1; -- ok +DROP TYPE priv_testtype1; -- ok +DROP DOMAIN priv_testdomain1; -- ok -- truncate -SET SESSION AUTHORIZATION regress_user5; +SET SESSION AUTHORIZATION regress_priv_user5; TRUNCATE atest2; -- ok TRUNCATE atest3; -- fail -ERROR: permission denied for relation atest3 +ERROR: permission denied for table atest3 -- has_table_privilege function -- bad-input checks select has_table_privilege(NULL,'pg_authid','select'); @@ -923,7 +969,7 @@ from (select oid from pg_class where relname = 'pg_authid') as t1; (1 row) -- non-superuser -SET SESSION AUTHORIZATION regress_user3; +SET SESSION AUTHORIZATION regress_priv_user3; select has_table_privilege(current_user,'pg_class','select'); has_table_privilege --------------------- @@ -1086,94 +1132,152 @@ from (select oid from pg_class where relname = 'atest1') as t1; f (1 row) +-- has_column_privilege function +-- bad-input checks (as non-super-user) +select has_column_privilege('pg_authid',NULL,'select'); + has_column_privilege +---------------------- + +(1 row) + +select has_column_privilege('pg_authid','nosuchcol','select'); +ERROR: column "nosuchcol" of relation "pg_authid" does not exist +select has_column_privilege(9999,'nosuchcol','select'); + has_column_privilege +---------------------- + +(1 row) + +select has_column_privilege(9999,99::int2,'select'); + has_column_privilege +---------------------- + +(1 row) + +select has_column_privilege('pg_authid',99::int2,'select'); + has_column_privilege +---------------------- + +(1 row) + +select has_column_privilege(9999,99::int2,'select'); + has_column_privilege +---------------------- + +(1 row) + +create temp table mytable(f1 int, f2 int, f3 int); +alter table mytable drop column f2; +select has_column_privilege('mytable','f2','select'); +ERROR: column "f2" of relation "mytable" does not exist +select has_column_privilege('mytable','........pg.dropped.2........','select'); + has_column_privilege +---------------------- + +(1 row) + +select has_column_privilege('mytable',2::int2,'select'); + has_column_privilege +---------------------- + t +(1 row) + +revoke select on table mytable from regress_priv_user3; +select has_column_privilege('mytable',2::int2,'select'); + has_column_privilege +---------------------- + +(1 row) + +drop table mytable; -- Grant options -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; CREATE TABLE atest4 (a int); -GRANT SELECT ON atest4 TO regress_user2 WITH GRANT OPTION; -GRANT UPDATE ON atest4 TO regress_user2; -GRANT SELECT ON atest4 TO GROUP regress_group1 WITH GRANT OPTION; -SET SESSION AUTHORIZATION regress_user2; -GRANT SELECT ON atest4 TO regress_user3; -GRANT UPDATE ON atest4 TO regress_user3; -- fail +GRANT SELECT ON atest4 TO regress_priv_user2 WITH GRANT OPTION; +GRANT UPDATE ON atest4 TO regress_priv_user2; +GRANT SELECT ON atest4 TO GROUP regress_priv_group1 WITH GRANT OPTION; +SET SESSION AUTHORIZATION regress_priv_user2; +GRANT SELECT ON atest4 TO regress_priv_user3; +GRANT UPDATE ON atest4 TO regress_priv_user3; -- fail WARNING: no privileges were granted for "atest4" -SET SESSION AUTHORIZATION regress_user1; -REVOKE SELECT ON atest4 FROM regress_user3; -- does nothing -SELECT has_table_privilege('regress_user3', 'atest4', 'SELECT'); -- true +SET SESSION AUTHORIZATION regress_priv_user1; +REVOKE SELECT ON atest4 FROM regress_priv_user3; -- does nothing +SELECT has_table_privilege('regress_priv_user3', 'atest4', 'SELECT'); -- true has_table_privilege --------------------- t (1 row) -REVOKE SELECT ON atest4 FROM regress_user2; -- fail +REVOKE SELECT ON atest4 FROM regress_priv_user2; -- fail ERROR: dependent privileges exist HINT: Use CASCADE to revoke them too. -REVOKE GRANT OPTION FOR SELECT ON atest4 FROM regress_user2 CASCADE; -- ok -SELECT has_table_privilege('regress_user2', 'atest4', 'SELECT'); -- true +REVOKE GRANT OPTION FOR SELECT ON atest4 FROM regress_priv_user2 CASCADE; -- ok +SELECT has_table_privilege('regress_priv_user2', 'atest4', 'SELECT'); -- true has_table_privilege --------------------- t (1 row) -SELECT has_table_privilege('regress_user3', 'atest4', 'SELECT'); -- false +SELECT has_table_privilege('regress_priv_user3', 'atest4', 'SELECT'); -- false has_table_privilege --------------------- f (1 row) -SELECT has_table_privilege('regress_user1', 'atest4', 'SELECT WITH GRANT OPTION'); -- true +SELECT has_table_privilege('regress_priv_user1', 'atest4', 'SELECT WITH GRANT OPTION'); -- true has_table_privilege --------------------- t (1 row) -- Admin options -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; CREATE FUNCTION dogrant_ok() RETURNS void LANGUAGE sql SECURITY DEFINER AS - 'GRANT regress_group2 TO regress_user5'; -GRANT regress_group2 TO regress_user5; -- ok: had ADMIN OPTION -SET ROLE regress_group2; -GRANT regress_group2 TO regress_user5; -- fails: SET ROLE suspended privilege -ERROR: must have admin option on role "regress_group2" -SET SESSION AUTHORIZATION regress_user1; -GRANT regress_group2 TO regress_user5; -- fails: no ADMIN OPTION -ERROR: must have admin option on role "regress_group2" + 'GRANT regress_priv_group2 TO regress_priv_user5'; +GRANT regress_priv_group2 TO regress_priv_user5; -- ok: had ADMIN OPTION +SET ROLE regress_priv_group2; +GRANT regress_priv_group2 TO regress_priv_user5; -- fails: SET ROLE suspended privilege +ERROR: must have admin option on role "regress_priv_group2" +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT regress_priv_group2 TO regress_priv_user5; -- fails: no ADMIN OPTION +ERROR: must have admin option on role "regress_priv_group2" SELECT dogrant_ok(); -- ok: SECURITY DEFINER conveys ADMIN -NOTICE: role "regress_user5" is already a member of role "regress_group2" +NOTICE: role "regress_priv_user5" is already a member of role "regress_priv_group2" dogrant_ok ------------ (1 row) -SET ROLE regress_group2; -GRANT regress_group2 TO regress_user5; -- fails: SET ROLE did not help -ERROR: must have admin option on role "regress_group2" -SET SESSION AUTHORIZATION regress_group2; -GRANT regress_group2 TO regress_user5; -- ok: a role can self-admin -NOTICE: role "regress_user5" is already a member of role "regress_group2" +SET ROLE regress_priv_group2; +GRANT regress_priv_group2 TO regress_priv_user5; -- fails: SET ROLE did not help +ERROR: must have admin option on role "regress_priv_group2" +SET SESSION AUTHORIZATION regress_priv_group2; +GRANT regress_priv_group2 TO regress_priv_user5; -- ok: a role can self-admin +NOTICE: role "regress_priv_user5" is already a member of role "regress_priv_group2" CREATE FUNCTION dogrant_fails() RETURNS void LANGUAGE sql SECURITY DEFINER AS - 'GRANT regress_group2 TO regress_user5'; + 'GRANT regress_priv_group2 TO regress_priv_user5'; SELECT dogrant_fails(); -- fails: no self-admin in SECURITY DEFINER -ERROR: must have admin option on role "regress_group2" +ERROR: must have admin option on role "regress_priv_group2" CONTEXT: SQL function "dogrant_fails" statement 1 DROP FUNCTION dogrant_fails(); -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; DROP FUNCTION dogrant_ok(); -REVOKE regress_group2 FROM regress_user5; +REVOKE regress_priv_group2 FROM regress_priv_user5; -- has_sequence_privilege tests \c - CREATE SEQUENCE x_seq; -GRANT USAGE on x_seq to regress_user2; -SELECT has_sequence_privilege('regress_user1', 'atest1', 'SELECT'); +GRANT USAGE on x_seq to regress_priv_user2; +SELECT has_sequence_privilege('regress_priv_user1', 'atest1', 'SELECT'); ERROR: "atest1" is not a sequence -SELECT has_sequence_privilege('regress_user1', 'x_seq', 'INSERT'); +SELECT has_sequence_privilege('regress_priv_user1', 'x_seq', 'INSERT'); ERROR: unrecognized privilege type: "INSERT" -SELECT has_sequence_privilege('regress_user1', 'x_seq', 'SELECT'); +SELECT has_sequence_privilege('regress_priv_user1', 'x_seq', 'SELECT'); has_sequence_privilege ------------------------ f (1 row) -SET SESSION AUTHORIZATION regress_user2; +SET SESSION AUTHORIZATION regress_priv_user2; SELECT has_sequence_privilege('x_seq', 'USAGE'); has_sequence_privilege ------------------------ @@ -1182,7 +1286,7 @@ SELECT has_sequence_privilege('x_seq', 'USAGE'); -- largeobject privilege tests \c - -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; SELECT lo_create(1001); lo_create ----------- @@ -1214,10 +1318,10 @@ SELECT lo_create(1005); (1 row) GRANT ALL ON LARGE OBJECT 1001 TO PUBLIC; -GRANT SELECT ON LARGE OBJECT 1003 TO regress_user2; -GRANT SELECT,UPDATE ON LARGE OBJECT 1004 TO regress_user2; -GRANT ALL ON LARGE OBJECT 1005 TO regress_user2; -GRANT SELECT ON LARGE OBJECT 1005 TO regress_user2 WITH GRANT OPTION; +GRANT SELECT ON LARGE OBJECT 1003 TO regress_priv_user2; +GRANT SELECT,UPDATE ON LARGE OBJECT 1004 TO regress_priv_user2; +GRANT ALL ON LARGE OBJECT 1005 TO regress_priv_user2; +GRANT SELECT ON LARGE OBJECT 1005 TO regress_priv_user2 WITH GRANT OPTION; GRANT SELECT, INSERT ON LARGE OBJECT 1001 TO PUBLIC; -- to be failed ERROR: invalid privilege type INSERT for large object GRANT SELECT, UPDATE ON LARGE OBJECT 1001 TO nosuchuser; -- to be failed @@ -1225,7 +1329,7 @@ ERROR: role "nosuchuser" does not exist GRANT SELECT, UPDATE ON LARGE OBJECT 999 TO PUBLIC; -- to be failed ERROR: large object 999 does not exist \c - -SET SESSION AUTHORIZATION regress_user2; +SET SESSION AUTHORIZATION regress_priv_user2; SELECT lo_create(2001); lo_create ----------- @@ -1282,11 +1386,11 @@ SELECT lowrite(lo_open(1004, x'20000'::int), 'abcd'); 4 (1 row) -GRANT SELECT ON LARGE OBJECT 1005 TO regress_user3; -GRANT UPDATE ON LARGE OBJECT 1006 TO regress_user3; -- to be denied +GRANT SELECT ON LARGE OBJECT 1005 TO regress_priv_user3; +GRANT UPDATE ON LARGE OBJECT 1006 TO regress_priv_user3; -- to be denied ERROR: large object 1006 does not exist REVOKE ALL ON LARGE OBJECT 2001, 2002 FROM PUBLIC; -GRANT ALL ON LARGE OBJECT 2001 TO regress_user3; +GRANT ALL ON LARGE OBJECT 2001 TO regress_priv_user3; SELECT lo_unlink(1001); -- to be denied ERROR: must be owner of large object 1001 SELECT lo_unlink(2002); @@ -1298,17 +1402,17 @@ SELECT lo_unlink(2002); \c - -- confirm ACL setting SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; - oid | ownername | lomacl -------+---------------+------------------------------------------------------------------------------------------------ - 1001 | regress_user1 | {regress_user1=rw/regress_user1,=rw/regress_user1} - 1002 | regress_user1 | - 1003 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=r/regress_user1} - 1004 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=rw/regress_user1} - 1005 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=r*w/regress_user1,regress_user3=r/regress_user2} - 2001 | regress_user2 | {regress_user2=rw/regress_user2,regress_user3=rw/regress_user2} + oid | ownername | lomacl +------+--------------------+------------------------------------------------------------------------------------------------------------------------------ + 1001 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,=rw/regress_priv_user1} + 1002 | regress_priv_user1 | + 1003 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=r/regress_priv_user1} + 1004 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=rw/regress_priv_user1} + 1005 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=r*w/regress_priv_user1,regress_priv_user3=r/regress_priv_user2} + 2001 | regress_priv_user2 | {regress_priv_user2=rw/regress_priv_user2,regress_priv_user3=rw/regress_priv_user2} (6 rows) -SET SESSION AUTHORIZATION regress_user3; +SET SESSION AUTHORIZATION regress_priv_user3; SELECT loread(lo_open(1001, x'40000'::int), 32); loread ------------ @@ -1334,7 +1438,7 @@ SELECT lo_truncate(lo_open(2001, x'20000'::int), 10); -- compatibility mode in largeobject permission \c - SET lo_compat_privileges = false; -- default setting -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT loread(lo_open(1002, x'40000'::int), 32); -- to be denied ERROR: permission denied for large object 1002 SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); -- to be denied @@ -1346,11 +1450,14 @@ ERROR: permission denied for large object 1002 SELECT lo_unlink(1002); -- to be denied ERROR: must be owner of large object 1002 SELECT lo_export(1001, '/dev/null'); -- to be denied -ERROR: must be superuser to use server-side lo_export() -HINT: Anyone can use the client-side lo_export() provided by libpq. +ERROR: permission denied for function lo_export +SELECT lo_import('/dev/null'); -- to be denied +ERROR: permission denied for function lo_import +SELECT lo_import('/dev/null', 2003); -- to be denied +ERROR: permission denied for function lo_import \c - SET lo_compat_privileges = true; -- compatibility mode -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT loread(lo_open(1002, x'40000'::int), 32); loread -------- @@ -1376,8 +1483,7 @@ SELECT lo_unlink(1002); (1 row) SELECT lo_export(1001, '/dev/null'); -- to be denied -ERROR: must be superuser to use server-side lo_export() -HINT: Anyone can use the client-side lo_export() provided by libpq. +ERROR: permission denied for function lo_export -- don't allow unpriv users to access pg_largeobject contents \c - SELECT * FROM pg_largeobject LIMIT 0; @@ -1385,34 +1491,34 @@ SELECT * FROM pg_largeobject LIMIT 0; ------+--------+------ (0 rows) -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; SELECT * FROM pg_largeobject LIMIT 0; -- to be denied -ERROR: permission denied for relation pg_largeobject +ERROR: permission denied for table pg_largeobject -- test default ACLs \c - CREATE SCHEMA testns; -GRANT ALL ON SCHEMA testns TO regress_user1; +GRANT ALL ON SCHEMA testns TO regress_priv_user1; CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'SELECT'); -- no +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- no has_table_privilege --------------------- f (1 row) -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'INSERT'); -- no +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no has_table_privilege --------------------- f (1 row) ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT SELECT ON TABLES TO public; -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'SELECT'); -- no +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- no has_table_privilege --------------------- f (1 row) -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'INSERT'); -- no +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no has_table_privilege --------------------- f @@ -1420,143 +1526,182 @@ SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'INSERT'); -- no DROP TABLE testns.acltest1; CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'SELECT'); -- yes +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes has_table_privilege --------------------- t (1 row) -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'INSERT'); -- no +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no has_table_privilege --------------------- f (1 row) -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT INSERT ON TABLES TO regress_user1; +ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT INSERT ON TABLES TO regress_priv_user1; DROP TABLE testns.acltest1; CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'SELECT'); -- yes +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes has_table_privilege --------------------- t (1 row) -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'INSERT'); -- yes +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- yes has_table_privilege --------------------- t (1 row) -ALTER DEFAULT PRIVILEGES IN SCHEMA testns REVOKE INSERT ON TABLES FROM regress_user1; +ALTER DEFAULT PRIVILEGES IN SCHEMA testns REVOKE INSERT ON TABLES FROM regress_priv_user1; DROP TABLE testns.acltest1; CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'SELECT'); -- yes +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes has_table_privilege --------------------- t (1 row) -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'INSERT'); -- no +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no has_table_privilege --------------------- f (1 row) -ALTER DEFAULT PRIVILEGES FOR ROLE regress_user1 REVOKE EXECUTE ON FUNCTIONS FROM public; -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON SCHEMAS TO regress_user2; -- error +ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE EXECUTE ON FUNCTIONS FROM public; +ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON SCHEMAS TO regress_priv_user2; -- error ERROR: cannot use IN SCHEMA clause when using GRANT/REVOKE ON SCHEMAS -ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO regress_user2; +-- +-- Testing blanket default grants is very hazardous since it might change +-- the privileges attached to objects created by concurrent regression tests. +-- To avoid that, be sure to revoke the privileges again before committing. +-- +BEGIN; +ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO regress_priv_user2; CREATE SCHEMA testns2; -SELECT has_schema_privilege('regress_user2', 'testns2', 'USAGE'); -- yes +SELECT has_schema_privilege('regress_priv_user2', 'testns2', 'USAGE'); -- yes has_schema_privilege ---------------------- t (1 row) -SELECT has_schema_privilege('regress_user2', 'testns2', 'CREATE'); -- no +SELECT has_schema_privilege('regress_priv_user2', 'testns2', 'CREATE'); -- no has_schema_privilege ---------------------- f (1 row) -ALTER DEFAULT PRIVILEGES REVOKE USAGE ON SCHEMAS FROM regress_user2; +ALTER DEFAULT PRIVILEGES REVOKE USAGE ON SCHEMAS FROM regress_priv_user2; CREATE SCHEMA testns3; -SELECT has_schema_privilege('regress_user2', 'testns3', 'USAGE'); -- no +SELECT has_schema_privilege('regress_priv_user2', 'testns3', 'USAGE'); -- no has_schema_privilege ---------------------- f (1 row) -SELECT has_schema_privilege('regress_user2', 'testns3', 'CREATE'); -- no +SELECT has_schema_privilege('regress_priv_user2', 'testns3', 'CREATE'); -- no has_schema_privilege ---------------------- f (1 row) -ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO regress_user2; +ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO regress_priv_user2; CREATE SCHEMA testns4; -SELECT has_schema_privilege('regress_user2', 'testns4', 'USAGE'); -- yes +SELECT has_schema_privilege('regress_priv_user2', 'testns4', 'USAGE'); -- yes has_schema_privilege ---------------------- t (1 row) -SELECT has_schema_privilege('regress_user2', 'testns4', 'CREATE'); -- yes +SELECT has_schema_privilege('regress_priv_user2', 'testns4', 'CREATE'); -- yes has_schema_privilege ---------------------- t (1 row) -ALTER DEFAULT PRIVILEGES REVOKE ALL ON SCHEMAS FROM regress_user2; +ALTER DEFAULT PRIVILEGES REVOKE ALL ON SCHEMAS FROM regress_priv_user2; +COMMIT; CREATE SCHEMA testns5; -SELECT has_schema_privilege('regress_user2', 'testns5', 'USAGE'); -- no +SELECT has_schema_privilege('regress_priv_user2', 'testns5', 'USAGE'); -- no has_schema_privilege ---------------------- f (1 row) -SELECT has_schema_privilege('regress_user2', 'testns5', 'CREATE'); -- no +SELECT has_schema_privilege('regress_priv_user2', 'testns5', 'CREATE'); -- no has_schema_privilege ---------------------- f (1 row) -SET ROLE regress_user1; +SET ROLE regress_priv_user1; CREATE FUNCTION testns.foo() RETURNS int AS 'select 1' LANGUAGE sql; -SELECT has_function_privilege('regress_user2', 'testns.foo()', 'EXECUTE'); -- no +CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); +CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; +SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- no + has_function_privilege +------------------------ + f +(1 row) + +SELECT has_function_privilege('regress_priv_user2', 'testns.agg1(int)', 'EXECUTE'); -- no + has_function_privilege +------------------------ + f +(1 row) + +SELECT has_function_privilege('regress_priv_user2', 'testns.bar()', 'EXECUTE'); -- no has_function_privilege ------------------------ f (1 row) -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT EXECUTE ON FUNCTIONS to public; +ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT EXECUTE ON ROUTINES to public; DROP FUNCTION testns.foo(); CREATE FUNCTION testns.foo() RETURNS int AS 'select 1' LANGUAGE sql; -SELECT has_function_privilege('regress_user2', 'testns.foo()', 'EXECUTE'); -- yes +DROP AGGREGATE testns.agg1(int); +CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); +DROP PROCEDURE testns.bar(); +CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; +SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- yes + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_priv_user2', 'testns.agg1(int)', 'EXECUTE'); -- yes + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_priv_user2', 'testns.bar()', 'EXECUTE'); -- yes (counts as function here) has_function_privilege ------------------------ t (1 row) DROP FUNCTION testns.foo(); -ALTER DEFAULT PRIVILEGES FOR ROLE regress_user1 REVOKE USAGE ON TYPES FROM public; -CREATE DOMAIN testns.testdomain1 AS int; -SELECT has_type_privilege('regress_user2', 'testns.testdomain1', 'USAGE'); -- no +DROP AGGREGATE testns.agg1(int); +DROP PROCEDURE testns.bar(); +ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE USAGE ON TYPES FROM public; +CREATE DOMAIN testns.priv_testdomain1 AS int; +SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- no has_type_privilege -------------------- f (1 row) ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON TYPES to public; -DROP DOMAIN testns.testdomain1; -CREATE DOMAIN testns.testdomain1 AS int; -SELECT has_type_privilege('regress_user2', 'testns.testdomain1', 'USAGE'); -- yes +DROP DOMAIN testns.priv_testdomain1; +CREATE DOMAIN testns.priv_testdomain1 AS int; +SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- yes has_type_privilege -------------------- t (1 row) -DROP DOMAIN testns.testdomain1; +DROP DOMAIN testns.priv_testdomain1; RESET ROLE; SELECT count(*) FROM pg_default_acl d LEFT JOIN pg_namespace n ON defaclnamespace = n.oid @@ -1584,55 +1729,107 @@ SELECT d.* -- check that entries went away CREATE SCHEMA testns; CREATE TABLE testns.t1 (f1 int); CREATE TABLE testns.t2 (f1 int); -SELECT has_table_privilege('regress_user1', 'testns.t1', 'SELECT'); -- false +SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- false has_table_privilege --------------------- f (1 row) -GRANT ALL ON ALL TABLES IN SCHEMA testns TO regress_user1; -SELECT has_table_privilege('regress_user1', 'testns.t1', 'SELECT'); -- true +GRANT ALL ON ALL TABLES IN SCHEMA testns TO regress_priv_user1; +SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- true has_table_privilege --------------------- t (1 row) -SELECT has_table_privilege('regress_user1', 'testns.t2', 'SELECT'); -- true +SELECT has_table_privilege('regress_priv_user1', 'testns.t2', 'SELECT'); -- true has_table_privilege --------------------- t (1 row) -REVOKE ALL ON ALL TABLES IN SCHEMA testns FROM regress_user1; -SELECT has_table_privilege('regress_user1', 'testns.t1', 'SELECT'); -- false +REVOKE ALL ON ALL TABLES IN SCHEMA testns FROM regress_priv_user1; +SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- false has_table_privilege --------------------- f (1 row) -SELECT has_table_privilege('regress_user1', 'testns.t2', 'SELECT'); -- false +SELECT has_table_privilege('regress_priv_user1', 'testns.t2', 'SELECT'); -- false has_table_privilege --------------------- f (1 row) -CREATE FUNCTION testns.testfunc(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; -SELECT has_function_privilege('regress_user1', 'testns.testfunc(int)', 'EXECUTE'); -- true by default +CREATE FUNCTION testns.priv_testfunc(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; +CREATE AGGREGATE testns.priv_testagg(int) (sfunc = int4pl, stype = int4); +CREATE PROCEDURE testns.priv_testproc(int) AS 'select 3' LANGUAGE sql; +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true by default + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true by default + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true by default has_function_privilege ------------------------ t (1 row) REVOKE ALL ON ALL FUNCTIONS IN SCHEMA testns FROM PUBLIC; -SELECT has_function_privilege('regress_user1', 'testns.testfunc(int)', 'EXECUTE'); -- false +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- false + has_function_privilege +------------------------ + f +(1 row) + +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- false has_function_privilege ------------------------ f (1 row) +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- still true, not a function + has_function_privilege +------------------------ + t +(1 row) + +REVOKE ALL ON ALL PROCEDURES IN SCHEMA testns FROM PUBLIC; +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- now false + has_function_privilege +------------------------ + f +(1 row) + +GRANT ALL ON ALL ROUTINES IN SCHEMA testns TO PUBLIC; +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true + has_function_privilege +------------------------ + t +(1 row) + \set VERBOSITY terse \\ -- suppress cascade details DROP SCHEMA testns CASCADE; -NOTICE: drop cascades to 3 other objects +NOTICE: drop cascades to 5 other objects \set VERBOSITY default -- Change owner of the schema & and rename of new schema owner \c - @@ -1664,59 +1861,61 @@ DROP ROLE regress_schemauser1; DROP ROLE regress_schemauser_renamed; -- test that dependent privileges are revoked (or not) properly \c - -set session role regress_user1; +set session role regress_priv_user1; create table dep_priv_test (a int); -grant select on dep_priv_test to regress_user2 with grant option; -grant select on dep_priv_test to regress_user3 with grant option; -set session role regress_user2; -grant select on dep_priv_test to regress_user4 with grant option; -set session role regress_user3; -grant select on dep_priv_test to regress_user4 with grant option; -set session role regress_user4; -grant select on dep_priv_test to regress_user5; +grant select on dep_priv_test to regress_priv_user2 with grant option; +grant select on dep_priv_test to regress_priv_user3 with grant option; +set session role regress_priv_user2; +grant select on dep_priv_test to regress_priv_user4 with grant option; +set session role regress_priv_user3; +grant select on dep_priv_test to regress_priv_user4 with grant option; +set session role regress_priv_user4; +grant select on dep_priv_test to regress_priv_user5; \dp dep_priv_test - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+---------------+-------+-------------------------------------+-------------------+---------- - public | dep_priv_test | table | regress_user1=arwdDxt/regress_user1+| | - | | | regress_user2=r*/regress_user1 +| | - | | | regress_user3=r*/regress_user1 +| | - | | | regress_user4=r*/regress_user2 +| | - | | | regress_user4=r*/regress_user3 +| | - | | | regress_user5=r/regress_user4 | | -(1 row) - -set session role regress_user2; -revoke select on dep_priv_test from regress_user4 cascade; + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+---------------+-------+-----------------------------------------------+-------------------+---------- + public | dep_priv_test | table | regress_priv_user1=arwdDxt/regress_priv_user1+| | + | | | regress_priv_user2=r*/regress_priv_user1 +| | + | | | regress_priv_user3=r*/regress_priv_user1 +| | + | | | regress_priv_user4=r*/regress_priv_user2 +| | + | | | regress_priv_user4=r*/regress_priv_user3 +| | + | | | regress_priv_user5=r/regress_priv_user4 | | +(1 row) + +set session role regress_priv_user2; +revoke select on dep_priv_test from regress_priv_user4 cascade; \dp dep_priv_test - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+---------------+-------+-------------------------------------+-------------------+---------- - public | dep_priv_test | table | regress_user1=arwdDxt/regress_user1+| | - | | | regress_user2=r*/regress_user1 +| | - | | | regress_user3=r*/regress_user1 +| | - | | | regress_user4=r*/regress_user3 +| | - | | | regress_user5=r/regress_user4 | | -(1 row) - -set session role regress_user3; -revoke select on dep_priv_test from regress_user4 cascade; + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+---------------+-------+-----------------------------------------------+-------------------+---------- + public | dep_priv_test | table | regress_priv_user1=arwdDxt/regress_priv_user1+| | + | | | regress_priv_user2=r*/regress_priv_user1 +| | + | | | regress_priv_user3=r*/regress_priv_user1 +| | + | | | regress_priv_user4=r*/regress_priv_user3 +| | + | | | regress_priv_user5=r/regress_priv_user4 | | +(1 row) + +set session role regress_priv_user3; +revoke select on dep_priv_test from regress_priv_user4 cascade; \dp dep_priv_test - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------+---------------+-------+-------------------------------------+-------------------+---------- - public | dep_priv_test | table | regress_user1=arwdDxt/regress_user1+| | - | | | regress_user2=r*/regress_user1 +| | - | | | regress_user3=r*/regress_user1 | | + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+---------------+-------+-----------------------------------------------+-------------------+---------- + public | dep_priv_test | table | regress_priv_user1=arwdDxt/regress_priv_user1+| | + | | | regress_priv_user2=r*/regress_priv_user1 +| | + | | | regress_priv_user3=r*/regress_priv_user1 | | (1 row) -set session role regress_user1; +set session role regress_priv_user1; drop table dep_priv_test; -- clean up \c drop sequence x_seq; -DROP FUNCTION testfunc2(int); -DROP FUNCTION testfunc4(boolean); +DROP AGGREGATE priv_testagg1(int); +DROP FUNCTION priv_testfunc2(int); +DROP FUNCTION priv_testfunc4(boolean); +DROP PROCEDURE priv_testproc1(int); DROP VIEW atestv0; DROP VIEW atestv1; DROP VIEW atestv2; @@ -1745,18 +1944,18 @@ SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3 1 (5 rows) -DROP GROUP regress_group1; -DROP GROUP regress_group2; +DROP GROUP regress_priv_group1; +DROP GROUP regress_priv_group2; -- these are needed to clean up permissions -REVOKE USAGE ON LANGUAGE sql FROM regress_user1; -DROP OWNED BY regress_user1; -DROP USER regress_user1; -DROP USER regress_user2; -DROP USER regress_user3; -DROP USER regress_user4; -DROP USER regress_user5; -DROP USER regress_user6; -ERROR: role "regress_user6" does not exist +REVOKE USAGE ON LANGUAGE sql FROM regress_priv_user1; +DROP OWNED BY regress_priv_user1; +DROP USER regress_priv_user1; +DROP USER regress_priv_user2; +DROP USER regress_priv_user3; +DROP USER regress_priv_user4; +DROP USER regress_priv_user5; +DROP USER regress_priv_user6; +ERROR: role "regress_priv_user6" does not exist -- permissions with LOCK TABLE CREATE USER regress_locktable_user; CREATE TABLE lock_table (a int); @@ -1765,14 +1964,14 @@ GRANT SELECT ON lock_table TO regress_locktable_user; SET SESSION AUTHORIZATION regress_locktable_user; BEGIN; LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should fail -ERROR: permission denied for relation lock_table +ERROR: permission denied for table lock_table ROLLBACK; BEGIN; LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass COMMIT; BEGIN; LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail -ERROR: permission denied for relation lock_table +ERROR: permission denied for table lock_table ROLLBACK; \c REVOKE SELECT ON lock_table FROM regress_locktable_user; @@ -1784,11 +1983,11 @@ LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass COMMIT; BEGIN; LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should fail -ERROR: permission denied for relation lock_table +ERROR: permission denied for table lock_table ROLLBACK; BEGIN; LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail -ERROR: permission denied for relation lock_table +ERROR: permission denied for table lock_table ROLLBACK; \c REVOKE INSERT ON lock_table FROM regress_locktable_user; @@ -1800,7 +1999,7 @@ LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass COMMIT; BEGIN; LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should fail -ERROR: permission denied for relation lock_table +ERROR: permission denied for table lock_table ROLLBACK; BEGIN; LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass @@ -1815,7 +2014,7 @@ LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass COMMIT; BEGIN; LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should fail -ERROR: permission denied for relation lock_table +ERROR: permission denied for table lock_table ROLLBACK; BEGIN; LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass @@ -1830,7 +2029,7 @@ LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass COMMIT; BEGIN; LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should fail -ERROR: permission denied for relation lock_table +ERROR: permission denied for table lock_table ROLLBACK; BEGIN; LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass diff --git a/src/test/regress/expected/psql.out b/src/test/regress/expected/psql.out index d602aeef42..3818cfea7e 100644 --- a/src/test/regress/expected/psql.out +++ b/src/test/regress/expected/psql.out @@ -8,7 +8,7 @@ invalid variable name: "invalid/name" -- fail: invalid value for special variable \set AUTOCOMMIT foo -unrecognized value "foo" for "AUTOCOMMIT": boolean expected +unrecognized value "foo" for "AUTOCOMMIT": Boolean expected \set FETCH_COUNT foo invalid value "foo" for "FETCH_COUNT": integer expected -- check handling of built-in boolean variable @@ -51,6 +51,31 @@ four | 4 3 | 4 (1 row) +-- \gx should work in FETCH_COUNT mode too +\set FETCH_COUNT 1 +SELECT 1 as one, 2 as two \g + one | two +-----+----- + 1 | 2 +(1 row) + +\gx +-[ RECORD 1 ] +one | 1 +two | 2 + +SELECT 3 as three, 4 as four \gx +-[ RECORD 1 ] +three | 3 +four | 4 + +\g + three | four +-------+------ + 3 | 4 +(1 row) + +\unset FETCH_COUNT -- \gset select 10 as test01, 20 as test02, 'Hello' as test03 \gset pref01_ \echo :pref01_test01 :pref01_test02 :pref01_test03 @@ -101,6 +126,91 @@ more than one row returned for \gset select 10 as test01, 20 as test02 from generate_series(1,0) \gset no rows returned for \gset \unset FETCH_COUNT +-- \gdesc +SELECT + NULL AS zero, + 1 AS one, + 2.0 AS two, + 'three' AS three, + $1 AS four, + sin($2) as five, + 'foo'::varchar(4) as six, + CURRENT_DATE AS now +\gdesc + Column | Type +--------+---------------------- + zero | text + one | integer + two | numeric + three | text + four | text + five | double precision + six | character varying(4) + now | date +(8 rows) + +-- should work with tuple-returning utilities, such as EXECUTE +PREPARE test AS SELECT 1 AS first, 2 AS second; +EXECUTE test \gdesc + Column | Type +--------+--------- + first | integer + second | integer +(2 rows) + +EXPLAIN EXECUTE test \gdesc + Column | Type +------------+------ + QUERY PLAN | text +(1 row) + +-- should fail cleanly - syntax error +SELECT 1 + \gdesc +ERROR: syntax error at end of input +LINE 1: SELECT 1 + + ^ +-- check behavior with empty results +SELECT \gdesc +The command has no result, or the result has no columns. +CREATE TABLE bububu(a int) \gdesc +The command has no result, or the result has no columns. +-- subject command should not have executed +TABLE bububu; -- fail +ERROR: relation "bububu" does not exist +LINE 1: TABLE bububu; + ^ +-- query buffer should remain unchanged +SELECT 1 AS x, 'Hello', 2 AS y, true AS "dirty\name" +\gdesc + Column | Type +------------+--------- + x | integer + ?column? | text + y | integer + dirty\name | boolean +(4 rows) + +\g + x | ?column? | y | dirty\name +---+----------+---+------------ + 1 | Hello | 2 | t +(1 row) + +-- all on one line +SELECT 3 AS x, 'Hello', 4 AS y, true AS "dirty\name" \gdesc \g + Column | Type +------------+--------- + x | integer + ?column? | text + y | integer + dirty\name | boolean +(4 rows) + + x | ?column? | y | dirty\name +---+----------+---+------------ + 3 | Hello | 4 | t +(1 row) + -- \gexec create temporary table gexec_test(a int, b text, c date, d float); select format('create index on gexec_test(%I)', attname) @@ -2829,7 +2939,7 @@ second thing true \endif -- invalid boolean expressions are false \if invalid boolean expression -unrecognized value "invalid boolean expression" for "\if expression": boolean expected +unrecognized value "invalid boolean expression" for "\if expression": Boolean expected \echo 'will not print #6-1' \else \echo 'will print anyway #6-2' @@ -2904,6 +3014,32 @@ bar 'bar' "bar" \echo 'should print #8-1' should print #8-1 \endif +-- :{?...} defined variable test +\set i 1 +\if :{?i} + \echo '#9-1 ok, variable i is defined' +#9-1 ok, variable i is defined +\else + \echo 'should not print #9-2' +\endif +\if :{?no_such_variable} + \echo 'should not print #10-1' +\else + \echo '#10-2 ok, variable no_such_variable is not defined' +#10-2 ok, variable no_such_variable is not defined +\endif +SELECT :{?i} AS i_is_defined; + i_is_defined +-------------- + t +(1 row) + +SELECT NOT :{?no_such_var} AS no_such_var_is_not_defined; + no_such_var_is_not_defined +---------------------------- + t +(1 row) + -- SHOW_CONTEXT \set SHOW_CONTEXT never do $$ @@ -2964,3 +3100,146 @@ SELECT 3 UNION SELECT 4 UNION SELECT 5 ORDER BY 1; +-- tests for special result variables +-- working query, 2 rows selected +SELECT 1 AS stuff UNION SELECT 2; + stuff +------- + 1 + 2 +(2 rows) + +\echo 'error:' :ERROR +error: false +\echo 'error code:' :SQLSTATE +error code: 00000 +\echo 'number of rows:' :ROW_COUNT +number of rows: 2 +-- syntax error +SELECT 1 UNION; +ERROR: syntax error at or near ";" +LINE 1: SELECT 1 UNION; + ^ +\echo 'error:' :ERROR +error: true +\echo 'error code:' :SQLSTATE +error code: 42601 +\echo 'number of rows:' :ROW_COUNT +number of rows: 0 +\echo 'last error message:' :LAST_ERROR_MESSAGE +last error message: syntax error at or near ";" +\echo 'last error code:' :LAST_ERROR_SQLSTATE +last error code: 42601 +-- empty query +; +\echo 'error:' :ERROR +error: false +\echo 'error code:' :SQLSTATE +error code: 00000 +\echo 'number of rows:' :ROW_COUNT +number of rows: 0 +-- must have kept previous values +\echo 'last error message:' :LAST_ERROR_MESSAGE +last error message: syntax error at or near ";" +\echo 'last error code:' :LAST_ERROR_SQLSTATE +last error code: 42601 +-- other query error +DROP TABLE this_table_does_not_exist; +ERROR: table "this_table_does_not_exist" does not exist +\echo 'error:' :ERROR +error: true +\echo 'error code:' :SQLSTATE +error code: 42P01 +\echo 'number of rows:' :ROW_COUNT +number of rows: 0 +\echo 'last error message:' :LAST_ERROR_MESSAGE +last error message: table "this_table_does_not_exist" does not exist +\echo 'last error code:' :LAST_ERROR_SQLSTATE +last error code: 42P01 +-- working \gdesc +SELECT 3 AS three, 4 AS four \gdesc + Column | Type +--------+--------- + three | integer + four | integer +(2 rows) + +\echo 'error:' :ERROR +error: false +\echo 'error code:' :SQLSTATE +error code: 00000 +\echo 'number of rows:' :ROW_COUNT +number of rows: 2 +-- \gdesc with an error +SELECT 4 AS \gdesc +ERROR: syntax error at end of input +LINE 1: SELECT 4 AS + ^ +\echo 'error:' :ERROR +error: true +\echo 'error code:' :SQLSTATE +error code: 42601 +\echo 'number of rows:' :ROW_COUNT +number of rows: 0 +\echo 'last error message:' :LAST_ERROR_MESSAGE +last error message: syntax error at end of input +\echo 'last error code:' :LAST_ERROR_SQLSTATE +last error code: 42601 +-- check row count for a cursor-fetched query +\set FETCH_COUNT 10 +select unique2 from tenk1 order by unique2 limit 19; + unique2 +--------- + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 +(19 rows) + +\echo 'error:' :ERROR +error: false +\echo 'error code:' :SQLSTATE +error code: 00000 +\echo 'number of rows:' :ROW_COUNT +number of rows: 19 +-- cursor-fetched query with an error after the first group +select 1/(15-unique2) from tenk1 order by unique2 limit 19; + ?column? +---------- + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 +ERROR: division by zero +\echo 'error:' :ERROR +error: true +\echo 'error code:' :SQLSTATE +error code: 22012 +\echo 'number of rows:' :ROW_COUNT +number of rows: 0 +\echo 'last error message:' :LAST_ERROR_MESSAGE +last error message: division by zero +\echo 'last error code:' :LAST_ERROR_SQLSTATE +last error code: 22012 +\unset FETCH_COUNT diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out index b101331d69..afbbdd543d 100644 --- a/src/test/regress/expected/publication.out +++ b/src/test/regress/expected/publication.out @@ -21,20 +21,20 @@ ERROR: unrecognized publication parameter: foo CREATE PUBLICATION testpub_xxx WITH (publish = 'cluster, vacuum'); ERROR: unrecognized "publish" value: "cluster" \dRp - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes ---------------------+--------------------------+------------+---------+---------+--------- - testpib_ins_trunct | regress_publication_user | f | t | f | f - testpub_default | regress_publication_user | f | f | t | f + List of publications + Name | Owner | All tables | Inserts | Updates | Deletes | Truncates +--------------------+--------------------------+------------+---------+---------+---------+----------- + testpib_ins_trunct | regress_publication_user | f | t | f | f | f + testpub_default | regress_publication_user | f | f | t | f | f (2 rows) ALTER PUBLICATION testpub_default SET (publish = 'insert, update, delete'); \dRp - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes ---------------------+--------------------------+------------+---------+---------+--------- - testpib_ins_trunct | regress_publication_user | f | t | f | f - testpub_default | regress_publication_user | f | t | t | t + List of publications + Name | Owner | All tables | Inserts | Updates | Deletes | Truncates +--------------------+--------------------------+------------+---------+---------+---------+----------- + testpib_ins_trunct | regress_publication_user | f | t | f | f | f + testpub_default | regress_publication_user | f | t | t | t | f (2 rows) --- adding tables @@ -76,10 +76,10 @@ Publications: "testpub_foralltables" \dRp+ testpub_foralltables - Publication testpub_foralltables - Owner | All tables | Inserts | Updates | Deletes ---------------------------+------------+---------+---------+--------- - regress_publication_user | t | t | t | f + Publication testpub_foralltables + Owner | All tables | Inserts | Updates | Deletes | Truncates +--------------------------+------------+---------+---------+---------+----------- + regress_publication_user | t | t | t | f | f (1 row) DROP TABLE testpub_tbl2; @@ -89,19 +89,19 @@ CREATE TABLE testpub_tbl3a (b text) INHERITS (testpub_tbl3); CREATE PUBLICATION testpub3 FOR TABLE testpub_tbl3; CREATE PUBLICATION testpub4 FOR TABLE ONLY testpub_tbl3; \dRp+ testpub3 - Publication testpub3 - Owner | All tables | Inserts | Updates | Deletes ---------------------------+------------+---------+---------+--------- - regress_publication_user | f | t | t | t + Publication testpub3 + Owner | All tables | Inserts | Updates | Deletes | Truncates +--------------------------+------------+---------+---------+---------+----------- + regress_publication_user | f | t | t | t | t Tables: "public.testpub_tbl3" "public.testpub_tbl3a" \dRp+ testpub4 - Publication testpub4 - Owner | All tables | Inserts | Updates | Deletes ---------------------------+------------+---------+---------+--------- - regress_publication_user | f | t | t | t + Publication testpub4 + Owner | All tables | Inserts | Updates | Deletes | Truncates +--------------------------+------------+---------+---------+---------+----------- + regress_publication_user | f | t | t | t | t Tables: "public.testpub_tbl3" @@ -119,10 +119,10 @@ ERROR: relation "testpub_tbl1" is already member of publication "testpub_fortbl CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_tbl1; ERROR: publication "testpub_fortbl" already exists \dRp+ testpub_fortbl - Publication testpub_fortbl - Owner | All tables | Inserts | Updates | Deletes ---------------------------+------------+---------+---------+--------- - regress_publication_user | f | t | t | t + Publication testpub_fortbl + Owner | All tables | Inserts | Updates | Deletes | Truncates +--------------------------+------------+---------+---------+---------+----------- + regress_publication_user | f | t | t | t | t Tables: "pub_test.testpub_nopk" "public.testpub_tbl1" @@ -165,10 +165,10 @@ Publications: "testpub_fortbl" \dRp+ testpub_default - Publication testpub_default - Owner | All tables | Inserts | Updates | Deletes ---------------------------+------------+---------+---------+--------- - regress_publication_user | f | t | t | t + Publication testpub_default + Owner | All tables | Inserts | Updates | Deletes | Truncates +--------------------------+------------+---------+---------+---------+----------- + regress_publication_user | f | t | t | t | f Tables: "pub_test.testpub_nopk" "public.testpub_tbl1" @@ -198,7 +198,7 @@ GRANT CREATE ON DATABASE regression TO regress_publication_user2; SET ROLE regress_publication_user2; CREATE PUBLICATION testpub2; -- ok ALTER PUBLICATION testpub2 ADD TABLE testpub_tbl1; -- fail -ERROR: must be owner of relation testpub_tbl1 +ERROR: must be owner of table testpub_tbl1 SET ROLE regress_publication_user; GRANT regress_publication_user TO regress_publication_user2; SET ROLE regress_publication_user2; @@ -210,10 +210,10 @@ DROP TABLE testpub_parted; DROP VIEW testpub_view; DROP TABLE testpub_tbl1; \dRp+ testpub_default - Publication testpub_default - Owner | All tables | Inserts | Updates | Deletes ---------------------------+------------+---------+---------+--------- - regress_publication_user | f | t | t | t + Publication testpub_default + Owner | All tables | Inserts | Updates | Deletes | Truncates +--------------------------+------------+---------+---------+---------+----------- + regress_publication_user | f | t | t | t | f (1 row) -- fail - must be owner of publication @@ -223,20 +223,20 @@ ERROR: must be owner of publication testpub_default RESET ROLE; ALTER PUBLICATION testpub_default RENAME TO testpub_foo; \dRp testpub_foo - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes --------------+--------------------------+------------+---------+---------+--------- - testpub_foo | regress_publication_user | f | t | t | t + List of publications + Name | Owner | All tables | Inserts | Updates | Deletes | Truncates +-------------+--------------------------+------------+---------+---------+---------+----------- + testpub_foo | regress_publication_user | f | t | t | t | f (1 row) -- rename back to keep the rest simple ALTER PUBLICATION testpub_foo RENAME TO testpub_default; ALTER PUBLICATION testpub_default OWNER TO regress_publication_user2; \dRp testpub_default - List of publications - Name | Owner | All tables | Inserts | Updates | Deletes ------------------+---------------------------+------------+---------+---------+--------- - testpub_default | regress_publication_user2 | f | t | t | t + List of publications + Name | Owner | All tables | Inserts | Updates | Deletes | Truncates +-----------------+---------------------------+------------+---------+---------+---------+----------- + testpub_default | regress_publication_user2 | f | t | t | t | f (1 row) DROP PUBLICATION testpub_default; diff --git a/src/test/regress/expected/rangefuncs.out b/src/test/regress/expected/rangefuncs.out index 5c82614e05..34ca0ef890 100644 --- a/src/test/regress/expected/rangefuncs.out +++ b/src/test/regress/expected/rangefuncs.out @@ -1,24 +1,24 @@ -CREATE TABLE foo2(fooid int, f2 int); -INSERT INTO foo2 VALUES(1, 11); -INSERT INTO foo2 VALUES(2, 22); -INSERT INTO foo2 VALUES(1, 111); -CREATE FUNCTION foot(int) returns setof foo2 as 'SELECT * FROM foo2 WHERE fooid = $1 ORDER BY f2;' LANGUAGE SQL; +CREATE TABLE rngfunc2(rngfuncid int, f2 int); +INSERT INTO rngfunc2 VALUES(1, 11); +INSERT INTO rngfunc2 VALUES(2, 22); +INSERT INTO rngfunc2 VALUES(1, 111); +CREATE FUNCTION rngfunct(int) returns setof rngfunc2 as 'SELECT * FROM rngfunc2 WHERE rngfuncid = $1 ORDER BY f2;' LANGUAGE SQL; -- function with ORDINALITY -select * from foot(1) with ordinality as z(a,b,ord); +select * from rngfunct(1) with ordinality as z(a,b,ord); a | b | ord ---+-----+----- 1 | 11 | 1 1 | 111 | 2 (2 rows) -select * from foot(1) with ordinality as z(a,b,ord) where b > 100; -- ordinal 2, not 1 +select * from rngfunct(1) with ordinality as z(a,b,ord) where b > 100; -- ordinal 2, not 1 a | b | ord ---+-----+----- 1 | 111 | 2 (1 row) -- ordinality vs. column names and types -select a,b,ord from foot(1) with ordinality as z(a,b,ord); +select a,b,ord from rngfunct(1) with ordinality as z(a,b,ord); a | b | ord ---+-----+----- 1 | 11 | 1 @@ -61,7 +61,7 @@ select row_to_json(s.*) from generate_series(11,14) with ordinality s; (4 rows) -- ordinality vs. views -create temporary view vw_ord as select * from (values (1)) v(n) join foot(1) with ordinality as z(a,b,ord) on (n=ord); +create temporary view vw_ord as select * from (values (1)) v(n) join rngfunct(1) with ordinality as z(a,b,ord) on (n=ord); select * from vw_ord; n | a | b | ord ---+---+----+----- @@ -69,26 +69,26 @@ select * from vw_ord; (1 row) select definition from pg_views where viewname='vw_ord'; - definition ---------------------------------------------------------------------- - SELECT v.n, + - z.a, + - z.b, + - z.ord + - FROM (( VALUES (1)) v(n) + - JOIN foot(1) WITH ORDINALITY z(a, b, ord) ON ((v.n = z.ord))); + definition +------------------------------------------------------------------------- + SELECT v.n, + + z.a, + + z.b, + + z.ord + + FROM (( VALUES (1)) v(n) + + JOIN rngfunct(1) WITH ORDINALITY z(a, b, ord) ON ((v.n = z.ord))); (1 row) drop view vw_ord; -- multiple functions -select * from rows from(foot(1),foot(2)) with ordinality as z(a,b,c,d,ord); +select * from rows from(rngfunct(1),rngfunct(2)) with ordinality as z(a,b,c,d,ord); a | b | c | d | ord ---+-----+---+----+----- 1 | 11 | 2 | 22 | 1 1 | 111 | | | 2 (2 rows) -create temporary view vw_ord as select * from (values (1)) v(n) join rows from(foot(1),foot(2)) with ordinality as z(a,b,c,d,ord) on (n=ord); +create temporary view vw_ord as select * from (values (1)) v(n) join rows from(rngfunct(1),rngfunct(2)) with ordinality as z(a,b,c,d,ord) on (n=ord); select * from vw_ord; n | a | b | c | d | ord ---+---+----+---+----+----- @@ -96,16 +96,16 @@ select * from vw_ord; (1 row) select definition from pg_views where viewname='vw_ord'; - definition ------------------------------------------------------------------------------------------------ - SELECT v.n, + - z.a, + - z.b, + - z.c, + - z.d, + - z.ord + - FROM (( VALUES (1)) v(n) + - JOIN ROWS FROM(foot(1), foot(2)) WITH ORDINALITY z(a, b, c, d, ord) ON ((v.n = z.ord))); + definition +------------------------------------------------------------------------------------------------------- + SELECT v.n, + + z.a, + + z.b, + + z.c, + + z.d, + + z.ord + + FROM (( VALUES (1)) v(n) + + JOIN ROWS FROM(rngfunct(1), rngfunct(2)) WITH ORDINALITY z(a, b, c, d, ord) ON ((v.n = z.ord))); (1 row) drop view vw_ord; @@ -194,8 +194,8 @@ select definition from pg_views where viewname='vw_ord'; drop view vw_ord; -- ordinality and multiple functions vs. rewind and reverse scan begin; -declare foo scroll cursor for select * from rows from(generate_series(1,5),generate_series(1,2)) with ordinality as g(i,j,o); -fetch all from foo; +declare rf_cur scroll cursor for select * from rows from(generate_series(1,5),generate_series(1,2)) with ordinality as g(i,j,o); +fetch all from rf_cur; i | j | o ---+---+--- 1 | 1 | 1 @@ -205,7 +205,7 @@ fetch all from foo; 5 | | 5 (5 rows) -fetch backward all from foo; +fetch backward all from rf_cur; i | j | o ---+---+--- 5 | | 5 @@ -215,7 +215,7 @@ fetch backward all from foo; 1 | 1 | 1 (5 rows) -fetch all from foo; +fetch all from rf_cur; i | j | o ---+---+--- 1 | 1 | 1 @@ -225,59 +225,59 @@ fetch all from foo; 5 | | 5 (5 rows) -fetch next from foo; +fetch next from rf_cur; i | j | o ---+---+--- (0 rows) -fetch next from foo; +fetch next from rf_cur; i | j | o ---+---+--- (0 rows) -fetch prior from foo; +fetch prior from rf_cur; i | j | o ---+---+--- 5 | | 5 (1 row) -fetch absolute 1 from foo; +fetch absolute 1 from rf_cur; i | j | o ---+---+--- 1 | 1 | 1 (1 row) -fetch next from foo; +fetch next from rf_cur; i | j | o ---+---+--- 2 | 2 | 2 (1 row) -fetch next from foo; +fetch next from rf_cur; i | j | o ---+---+--- 3 | | 3 (1 row) -fetch next from foo; +fetch next from rf_cur; i | j | o ---+---+--- 4 | | 4 (1 row) -fetch prior from foo; +fetch prior from rf_cur; i | j | o ---+---+--- 3 | | 3 (1 row) -fetch prior from foo; +fetch prior from rf_cur; i | j | o ---+---+--- 2 | 2 | 2 (1 row) -fetch prior from foo; +fetch prior from rf_cur; i | j | o ---+---+--- 1 | 1 | 1 @@ -285,357 +285,357 @@ fetch prior from foo; commit; -- function with implicit LATERAL -select * from foo2, foot(foo2.fooid) z where foo2.f2 = z.f2; - fooid | f2 | fooid | f2 --------+-----+-------+----- - 1 | 11 | 1 | 11 - 2 | 22 | 2 | 22 - 1 | 111 | 1 | 111 +select * from rngfunc2, rngfunct(rngfunc2.rngfuncid) z where rngfunc2.f2 = z.f2; + rngfuncid | f2 | rngfuncid | f2 +-----------+-----+-----------+----- + 1 | 11 | 1 | 11 + 2 | 22 | 2 | 22 + 1 | 111 | 1 | 111 (3 rows) -- function with implicit LATERAL and explicit ORDINALITY -select * from foo2, foot(foo2.fooid) with ordinality as z(fooid,f2,ord) where foo2.f2 = z.f2; - fooid | f2 | fooid | f2 | ord --------+-----+-------+-----+----- - 1 | 11 | 1 | 11 | 1 - 2 | 22 | 2 | 22 | 1 - 1 | 111 | 1 | 111 | 2 +select * from rngfunc2, rngfunct(rngfunc2.rngfuncid) with ordinality as z(rngfuncid,f2,ord) where rngfunc2.f2 = z.f2; + rngfuncid | f2 | rngfuncid | f2 | ord +-----------+-----+-----------+-----+----- + 1 | 11 | 1 | 11 | 1 + 2 | 22 | 2 | 22 | 1 + 1 | 111 | 1 | 111 | 2 (3 rows) -- function in subselect -select * from foo2 where f2 in (select f2 from foot(foo2.fooid) z where z.fooid = foo2.fooid) ORDER BY 1,2; - fooid | f2 --------+----- - 1 | 11 - 1 | 111 - 2 | 22 +select * from rngfunc2 where f2 in (select f2 from rngfunct(rngfunc2.rngfuncid) z where z.rngfuncid = rngfunc2.rngfuncid) ORDER BY 1,2; + rngfuncid | f2 +-----------+----- + 1 | 11 + 1 | 111 + 2 | 22 (3 rows) -- function in subselect -select * from foo2 where f2 in (select f2 from foot(1) z where z.fooid = foo2.fooid) ORDER BY 1,2; - fooid | f2 --------+----- - 1 | 11 - 1 | 111 +select * from rngfunc2 where f2 in (select f2 from rngfunct(1) z where z.rngfuncid = rngfunc2.rngfuncid) ORDER BY 1,2; + rngfuncid | f2 +-----------+----- + 1 | 11 + 1 | 111 (2 rows) -- function in subselect -select * from foo2 where f2 in (select f2 from foot(foo2.fooid) z where z.fooid = 1) ORDER BY 1,2; - fooid | f2 --------+----- - 1 | 11 - 1 | 111 +select * from rngfunc2 where f2 in (select f2 from rngfunct(rngfunc2.rngfuncid) z where z.rngfuncid = 1) ORDER BY 1,2; + rngfuncid | f2 +-----------+----- + 1 | 11 + 1 | 111 (2 rows) -- nested functions -select foot.fooid, foot.f2 from foot(sin(pi()/2)::int) ORDER BY 1,2; - fooid | f2 --------+----- - 1 | 11 - 1 | 111 +select rngfunct.rngfuncid, rngfunct.f2 from rngfunct(sin(pi()/2)::int) ORDER BY 1,2; + rngfuncid | f2 +-----------+----- + 1 | 11 + 1 | 111 (2 rows) -CREATE TABLE foo (fooid int, foosubid int, fooname text, primary key(fooid,foosubid)); -INSERT INTO foo VALUES(1,1,'Joe'); -INSERT INTO foo VALUES(1,2,'Ed'); -INSERT INTO foo VALUES(2,1,'Mary'); +CREATE TABLE rngfunc (rngfuncid int, rngfuncsubid int, rngfuncname text, primary key(rngfuncid,rngfuncsubid)); +INSERT INTO rngfunc VALUES(1,1,'Joe'); +INSERT INTO rngfunc VALUES(1,2,'Ed'); +INSERT INTO rngfunc VALUES(2,1,'Mary'); -- sql, proretset = f, prorettype = b -CREATE FUNCTION getfoo1(int) RETURNS int AS 'SELECT $1;' LANGUAGE SQL; -SELECT * FROM getfoo1(1) AS t1; +CREATE FUNCTION getrngfunc1(int) RETURNS int AS 'SELECT $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc1(1) AS t1; t1 ---- 1 (1 row) -SELECT * FROM getfoo1(1) WITH ORDINALITY AS t1(v,o); +SELECT * FROM getrngfunc1(1) WITH ORDINALITY AS t1(v,o); v | o ---+--- 1 | 1 (1 row) -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo1(1); -SELECT * FROM vw_getfoo; - getfoo1 ---------- - 1 +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc1(1); +SELECT * FROM vw_getrngfunc; + getrngfunc1 +------------- + 1 (1 row) -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo1(1) WITH ORDINALITY as t1(v,o); -SELECT * FROM vw_getfoo; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc1(1) WITH ORDINALITY as t1(v,o); +SELECT * FROM vw_getrngfunc; v | o ---+--- 1 | 1 (1 row) -DROP VIEW vw_getfoo; +DROP VIEW vw_getrngfunc; -- sql, proretset = t, prorettype = b -CREATE FUNCTION getfoo2(int) RETURNS setof int AS 'SELECT fooid FROM foo WHERE fooid = $1;' LANGUAGE SQL; -SELECT * FROM getfoo2(1) AS t1; +CREATE FUNCTION getrngfunc2(int) RETURNS setof int AS 'SELECT rngfuncid FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc2(1) AS t1; t1 ---- 1 1 (2 rows) -SELECT * FROM getfoo2(1) WITH ORDINALITY AS t1(v,o); +SELECT * FROM getrngfunc2(1) WITH ORDINALITY AS t1(v,o); v | o ---+--- 1 | 1 1 | 2 (2 rows) -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo2(1); -SELECT * FROM vw_getfoo; - getfoo2 ---------- - 1 - 1 +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc2(1); +SELECT * FROM vw_getrngfunc; + getrngfunc2 +------------- + 1 + 1 (2 rows) -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo2(1) WITH ORDINALITY AS t1(v,o); -SELECT * FROM vw_getfoo; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc2(1) WITH ORDINALITY AS t1(v,o); +SELECT * FROM vw_getrngfunc; v | o ---+--- 1 | 1 1 | 2 (2 rows) -DROP VIEW vw_getfoo; +DROP VIEW vw_getrngfunc; -- sql, proretset = t, prorettype = b -CREATE FUNCTION getfoo3(int) RETURNS setof text AS 'SELECT fooname FROM foo WHERE fooid = $1;' LANGUAGE SQL; -SELECT * FROM getfoo3(1) AS t1; +CREATE FUNCTION getrngfunc3(int) RETURNS setof text AS 'SELECT rngfuncname FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc3(1) AS t1; t1 ----- Joe Ed (2 rows) -SELECT * FROM getfoo3(1) WITH ORDINALITY AS t1(v,o); +SELECT * FROM getrngfunc3(1) WITH ORDINALITY AS t1(v,o); v | o -----+--- Joe | 1 Ed | 2 (2 rows) -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo3(1); -SELECT * FROM vw_getfoo; - getfoo3 ---------- +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc3(1); +SELECT * FROM vw_getrngfunc; + getrngfunc3 +------------- Joe Ed (2 rows) -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo3(1) WITH ORDINALITY AS t1(v,o); -SELECT * FROM vw_getfoo; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc3(1) WITH ORDINALITY AS t1(v,o); +SELECT * FROM vw_getrngfunc; v | o -----+--- Joe | 1 Ed | 2 (2 rows) -DROP VIEW vw_getfoo; +DROP VIEW vw_getrngfunc; -- sql, proretset = f, prorettype = c -CREATE FUNCTION getfoo4(int) RETURNS foo AS 'SELECT * FROM foo WHERE fooid = $1;' LANGUAGE SQL; -SELECT * FROM getfoo4(1) AS t1; - fooid | foosubid | fooname --------+----------+--------- - 1 | 1 | Joe +CREATE FUNCTION getrngfunc4(int) RETURNS rngfunc AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc4(1) AS t1; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe (1 row) -SELECT * FROM getfoo4(1) WITH ORDINALITY AS t1(a,b,c,o); +SELECT * FROM getrngfunc4(1) WITH ORDINALITY AS t1(a,b,c,o); a | b | c | o ---+---+-----+--- 1 | 1 | Joe | 1 (1 row) -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo4(1); -SELECT * FROM vw_getfoo; - fooid | foosubid | fooname --------+----------+--------- - 1 | 1 | Joe +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc4(1); +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe (1 row) -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo4(1) WITH ORDINALITY AS t1(a,b,c,o); -SELECT * FROM vw_getfoo; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc4(1) WITH ORDINALITY AS t1(a,b,c,o); +SELECT * FROM vw_getrngfunc; a | b | c | o ---+---+-----+--- 1 | 1 | Joe | 1 (1 row) -DROP VIEW vw_getfoo; +DROP VIEW vw_getrngfunc; -- sql, proretset = t, prorettype = c -CREATE FUNCTION getfoo5(int) RETURNS setof foo AS 'SELECT * FROM foo WHERE fooid = $1;' LANGUAGE SQL; -SELECT * FROM getfoo5(1) AS t1; - fooid | foosubid | fooname --------+----------+--------- - 1 | 1 | Joe - 1 | 2 | Ed +CREATE FUNCTION getrngfunc5(int) RETURNS setof rngfunc AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc5(1) AS t1; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe + 1 | 2 | Ed (2 rows) -SELECT * FROM getfoo5(1) WITH ORDINALITY AS t1(a,b,c,o); +SELECT * FROM getrngfunc5(1) WITH ORDINALITY AS t1(a,b,c,o); a | b | c | o ---+---+-----+--- 1 | 1 | Joe | 1 1 | 2 | Ed | 2 (2 rows) -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo5(1); -SELECT * FROM vw_getfoo; - fooid | foosubid | fooname --------+----------+--------- - 1 | 1 | Joe - 1 | 2 | Ed +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc5(1); +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe + 1 | 2 | Ed (2 rows) -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo5(1) WITH ORDINALITY AS t1(a,b,c,o); -SELECT * FROM vw_getfoo; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc5(1) WITH ORDINALITY AS t1(a,b,c,o); +SELECT * FROM vw_getrngfunc; a | b | c | o ---+---+-----+--- 1 | 1 | Joe | 1 1 | 2 | Ed | 2 (2 rows) -DROP VIEW vw_getfoo; +DROP VIEW vw_getrngfunc; -- sql, proretset = f, prorettype = record -CREATE FUNCTION getfoo6(int) RETURNS RECORD AS 'SELECT * FROM foo WHERE fooid = $1;' LANGUAGE SQL; -SELECT * FROM getfoo6(1) AS t1(fooid int, foosubid int, fooname text); - fooid | foosubid | fooname --------+----------+--------- - 1 | 1 | Joe +CREATE FUNCTION getrngfunc6(int) RETURNS RECORD AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc6(1) AS t1(rngfuncid int, rngfuncsubid int, rngfuncname text); + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe (1 row) -SELECT * FROM ROWS FROM( getfoo6(1) AS (fooid int, foosubid int, fooname text) ) WITH ORDINALITY; - fooid | foosubid | fooname | ordinality --------+----------+---------+------------ - 1 | 1 | Joe | 1 +SELECT * FROM ROWS FROM( getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY; + rngfuncid | rngfuncsubid | rngfuncname | ordinality +-----------+--------------+-------------+------------ + 1 | 1 | Joe | 1 (1 row) -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo6(1) AS -(fooid int, foosubid int, fooname text); -SELECT * FROM vw_getfoo; - fooid | foosubid | fooname --------+----------+--------- - 1 | 1 | Joe +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc6(1) AS +(rngfuncid int, rngfuncsubid int, rngfuncname text); +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe (1 row) -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS - SELECT * FROM ROWS FROM( getfoo6(1) AS (fooid int, foosubid int, fooname text) ) +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS + SELECT * FROM ROWS FROM( getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY; -SELECT * FROM vw_getfoo; - fooid | foosubid | fooname | ordinality --------+----------+---------+------------ - 1 | 1 | Joe | 1 +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname | ordinality +-----------+--------------+-------------+------------ + 1 | 1 | Joe | 1 (1 row) -DROP VIEW vw_getfoo; +DROP VIEW vw_getrngfunc; -- sql, proretset = t, prorettype = record -CREATE FUNCTION getfoo7(int) RETURNS setof record AS 'SELECT * FROM foo WHERE fooid = $1;' LANGUAGE SQL; -SELECT * FROM getfoo7(1) AS t1(fooid int, foosubid int, fooname text); - fooid | foosubid | fooname --------+----------+--------- - 1 | 1 | Joe - 1 | 2 | Ed +CREATE FUNCTION getrngfunc7(int) RETURNS setof record AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc7(1) AS t1(rngfuncid int, rngfuncsubid int, rngfuncname text); + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe + 1 | 2 | Ed (2 rows) -SELECT * FROM ROWS FROM( getfoo7(1) AS (fooid int, foosubid int, fooname text) ) WITH ORDINALITY; - fooid | foosubid | fooname | ordinality --------+----------+---------+------------ - 1 | 1 | Joe | 1 - 1 | 2 | Ed | 2 +SELECT * FROM ROWS FROM( getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY; + rngfuncid | rngfuncsubid | rngfuncname | ordinality +-----------+--------------+-------------+------------ + 1 | 1 | Joe | 1 + 1 | 2 | Ed | 2 (2 rows) -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo7(1) AS -(fooid int, foosubid int, fooname text); -SELECT * FROM vw_getfoo; - fooid | foosubid | fooname --------+----------+--------- - 1 | 1 | Joe - 1 | 2 | Ed +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc7(1) AS +(rngfuncid int, rngfuncsubid int, rngfuncname text); +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe + 1 | 2 | Ed (2 rows) -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS - SELECT * FROM ROWS FROM( getfoo7(1) AS (fooid int, foosubid int, fooname text) ) +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS + SELECT * FROM ROWS FROM( getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY; -SELECT * FROM vw_getfoo; - fooid | foosubid | fooname | ordinality --------+----------+---------+------------ - 1 | 1 | Joe | 1 - 1 | 2 | Ed | 2 +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname | ordinality +-----------+--------------+-------------+------------ + 1 | 1 | Joe | 1 + 1 | 2 | Ed | 2 (2 rows) -DROP VIEW vw_getfoo; +DROP VIEW vw_getrngfunc; -- plpgsql, proretset = f, prorettype = b -CREATE FUNCTION getfoo8(int) RETURNS int AS 'DECLARE fooint int; BEGIN SELECT fooid into fooint FROM foo WHERE fooid = $1; RETURN fooint; END;' LANGUAGE plpgsql; -SELECT * FROM getfoo8(1) AS t1; +CREATE FUNCTION getrngfunc8(int) RETURNS int AS 'DECLARE rngfuncint int; BEGIN SELECT rngfuncid into rngfuncint FROM rngfunc WHERE rngfuncid = $1; RETURN rngfuncint; END;' LANGUAGE plpgsql; +SELECT * FROM getrngfunc8(1) AS t1; t1 ---- 1 (1 row) -SELECT * FROM getfoo8(1) WITH ORDINALITY AS t1(v,o); +SELECT * FROM getrngfunc8(1) WITH ORDINALITY AS t1(v,o); v | o ---+--- 1 | 1 (1 row) -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo8(1); -SELECT * FROM vw_getfoo; - getfoo8 ---------- - 1 +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc8(1); +SELECT * FROM vw_getrngfunc; + getrngfunc8 +------------- + 1 (1 row) -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo8(1) WITH ORDINALITY AS t1(v,o); -SELECT * FROM vw_getfoo; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc8(1) WITH ORDINALITY AS t1(v,o); +SELECT * FROM vw_getrngfunc; v | o ---+--- 1 | 1 (1 row) -DROP VIEW vw_getfoo; +DROP VIEW vw_getrngfunc; -- plpgsql, proretset = f, prorettype = c -CREATE FUNCTION getfoo9(int) RETURNS foo AS 'DECLARE footup foo%ROWTYPE; BEGIN SELECT * into footup FROM foo WHERE fooid = $1; RETURN footup; END;' LANGUAGE plpgsql; -SELECT * FROM getfoo9(1) AS t1; - fooid | foosubid | fooname --------+----------+--------- - 1 | 1 | Joe +CREATE FUNCTION getrngfunc9(int) RETURNS rngfunc AS 'DECLARE rngfunctup rngfunc%ROWTYPE; BEGIN SELECT * into rngfunctup FROM rngfunc WHERE rngfuncid = $1; RETURN rngfunctup; END;' LANGUAGE plpgsql; +SELECT * FROM getrngfunc9(1) AS t1; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe (1 row) -SELECT * FROM getfoo9(1) WITH ORDINALITY AS t1(a,b,c,o); +SELECT * FROM getrngfunc9(1) WITH ORDINALITY AS t1(a,b,c,o); a | b | c | o ---+---+-----+--- 1 | 1 | Joe | 1 (1 row) -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo9(1); -SELECT * FROM vw_getfoo; - fooid | foosubid | fooname --------+----------+--------- - 1 | 1 | Joe +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc9(1); +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe (1 row) -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo9(1) WITH ORDINALITY AS t1(a,b,c,o); -SELECT * FROM vw_getfoo; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc9(1) WITH ORDINALITY AS t1(a,b,c,o); +SELECT * FROM vw_getrngfunc; a | b | c | o ---+---+-----+--- 1 | 1 | Joe | 1 (1 row) -DROP VIEW vw_getfoo; +DROP VIEW vw_getrngfunc; -- mix 'n match kinds, to exercise expandRTE and related logic -select * from rows from(getfoo1(1),getfoo2(1),getfoo3(1),getfoo4(1),getfoo5(1), - getfoo6(1) AS (fooid int, foosubid int, fooname text), - getfoo7(1) AS (fooid int, foosubid int, fooname text), - getfoo8(1),getfoo9(1)) +select * from rows from(getrngfunc1(1),getrngfunc2(1),getrngfunc3(1),getrngfunc4(1),getrngfunc5(1), + getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc8(1),getrngfunc9(1)) with ordinality as t1(a,b,c,d,e,f,g,h,i,j,k,l,m,o,p,q,r,s,t,u); a | b | c | d | e | f | g | h | i | j | k | l | m | o | p | q | r | s | t | u ---+---+-----+---+---+-----+---+---+-----+---+---+-----+---+---+-----+---+---+---+-----+--- @@ -643,10 +643,10 @@ select * from rows from(getfoo1(1),getfoo2(1),getfoo3(1),getfoo4(1),getfoo5(1), | 1 | Ed | | | | 1 | 2 | Ed | | | | 1 | 2 | Ed | | | | | 2 (2 rows) -select * from rows from(getfoo9(1),getfoo8(1), - getfoo7(1) AS (fooid int, foosubid int, fooname text), - getfoo6(1) AS (fooid int, foosubid int, fooname text), - getfoo5(1),getfoo4(1),getfoo3(1),getfoo2(1),getfoo1(1)) +select * from rows from(getrngfunc9(1),getrngfunc8(1), + getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc5(1),getrngfunc4(1),getrngfunc3(1),getrngfunc2(1),getrngfunc1(1)) with ordinality as t1(a,b,c,d,e,f,g,h,i,j,k,l,m,o,p,q,r,s,t,u); a | b | c | d | e | f | g | h | i | j | k | l | m | o | p | q | r | s | t | u ---+---+-----+---+---+---+-----+---+---+-----+---+---+-----+---+---+-----+-----+---+---+--- @@ -654,62 +654,62 @@ select * from rows from(getfoo9(1),getfoo8(1), | | | | 1 | 2 | Ed | | | | 1 | 2 | Ed | | | | Ed | 1 | | 2 (2 rows) -create temporary view vw_foo as - select * from rows from(getfoo9(1), - getfoo7(1) AS (fooid int, foosubid int, fooname text), - getfoo1(1)) +create temporary view vw_rngfunc as + select * from rows from(getrngfunc9(1), + getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc1(1)) with ordinality as t1(a,b,c,d,e,f,g,n); -select * from vw_foo; +select * from vw_rngfunc; a | b | c | d | e | f | g | n ---+---+-----+---+---+-----+---+--- 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | | | 1 | 2 | Ed | | 2 (2 rows) -select pg_get_viewdef('vw_foo'); - pg_get_viewdef ------------------------------------------------------------------------------------------------------------------------------------------------------- - SELECT t1.a, + - t1.b, + - t1.c, + - t1.d, + - t1.e, + - t1.f, + - t1.g, + - t1.n + - FROM ROWS FROM(getfoo9(1), getfoo7(1) AS (fooid integer, foosubid integer, fooname text), getfoo1(1)) WITH ORDINALITY t1(a, b, c, d, e, f, g, n); -(1 row) - -drop view vw_foo; -DROP FUNCTION getfoo1(int); -DROP FUNCTION getfoo2(int); -DROP FUNCTION getfoo3(int); -DROP FUNCTION getfoo4(int); -DROP FUNCTION getfoo5(int); -DROP FUNCTION getfoo6(int); -DROP FUNCTION getfoo7(int); -DROP FUNCTION getfoo8(int); -DROP FUNCTION getfoo9(int); -DROP FUNCTION foot(int); -DROP TABLE foo2; -DROP TABLE foo; +select pg_get_viewdef('vw_rngfunc'); + pg_get_viewdef +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + SELECT t1.a, + + t1.b, + + t1.c, + + t1.d, + + t1.e, + + t1.f, + + t1.g, + + t1.n + + FROM ROWS FROM(getrngfunc9(1), getrngfunc7(1) AS (rngfuncid integer, rngfuncsubid integer, rngfuncname text), getrngfunc1(1)) WITH ORDINALITY t1(a, b, c, d, e, f, g, n); +(1 row) + +drop view vw_rngfunc; +DROP FUNCTION getrngfunc1(int); +DROP FUNCTION getrngfunc2(int); +DROP FUNCTION getrngfunc3(int); +DROP FUNCTION getrngfunc4(int); +DROP FUNCTION getrngfunc5(int); +DROP FUNCTION getrngfunc6(int); +DROP FUNCTION getrngfunc7(int); +DROP FUNCTION getrngfunc8(int); +DROP FUNCTION getrngfunc9(int); +DROP FUNCTION rngfunct(int); +DROP TABLE rngfunc2; +DROP TABLE rngfunc; -- Rescan tests -- -CREATE TEMPORARY SEQUENCE foo_rescan_seq1; -CREATE TEMPORARY SEQUENCE foo_rescan_seq2; -CREATE TYPE foo_rescan_t AS (i integer, s bigint); -CREATE FUNCTION foo_sql(int,int) RETURNS setof foo_rescan_t AS 'SELECT i, nextval(''foo_rescan_seq1'') FROM generate_series($1,$2) i;' LANGUAGE SQL; +CREATE TEMPORARY SEQUENCE rngfunc_rescan_seq1; +CREATE TEMPORARY SEQUENCE rngfunc_rescan_seq2; +CREATE TYPE rngfunc_rescan_t AS (i integer, s bigint); +CREATE FUNCTION rngfunc_sql(int,int) RETURNS setof rngfunc_rescan_t AS 'SELECT i, nextval(''rngfunc_rescan_seq1'') FROM generate_series($1,$2) i;' LANGUAGE SQL; -- plpgsql functions use materialize mode -CREATE FUNCTION foo_mat(int,int) RETURNS setof foo_rescan_t AS 'begin for i in $1..$2 loop return next (i, nextval(''foo_rescan_seq2'')); end loop; end;' LANGUAGE plpgsql; +CREATE FUNCTION rngfunc_mat(int,int) RETURNS setof rngfunc_rescan_t AS 'begin for i in $1..$2 loop return next (i, nextval(''rngfunc_rescan_seq2'')); end loop; end;' LANGUAGE plpgsql; --invokes ExecReScanFunctionScan - all these cases should materialize the function only once -- LEFT JOIN on a condition that the planner can't prove to be true is used to ensure the function -- is on the inner path of a nestloop join -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN foo_sql(11,13) ON (r+i)<100; +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_sql(11,13) ON (r+i)<100; r | i | s ---+----+--- 1 | 11 | 1 @@ -723,13 +723,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN foo_sql(11,13) ON (r+i)<100; 3 | 13 | 3 (9 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN foo_sql(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100; +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_sql(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100; r | i | s | o ---+----+---+--- 1 | 11 | 1 | 1 @@ -743,13 +743,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN foo_sql(11,13) WITH ORDINALITY 3 | 13 | 3 | 3 (9 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN foo_mat(11,13) ON (r+i)<100; +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_mat(11,13) ON (r+i)<100; r | i | s ---+----+--- 1 | 11 | 1 @@ -763,13 +763,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN foo_mat(11,13) ON (r+i)<100; 3 | 13 | 3 (9 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN foo_mat(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100; +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_mat(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100; r | i | s | o ---+----+---+--- 1 | 11 | 1 | 1 @@ -783,13 +783,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN foo_mat(11,13) WITH ORDINALITY 3 | 13 | 3 | 3 (9 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN ROWS FROM( foo_sql(11,13), foo_mat(11,13) ) WITH ORDINALITY AS f(i1,s1,i2,s2,o) ON (r+i1+i2)<100; +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN ROWS FROM( rngfunc_sql(11,13), rngfunc_mat(11,13) ) WITH ORDINALITY AS f(i1,s1,i2,s2,o) ON (r+i1+i2)<100; r | i1 | s1 | i2 | s2 | o ---+----+----+----+----+--- 1 | 11 | 1 | 11 | 1 | 1 @@ -860,13 +860,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN unnest(array[10,20,30]) WITH O (9 rows) --invokes ExecReScanFunctionScan with chgParam != NULL (using implied LATERAL) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_sql(10+r,13); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(10+r,13); r | i | s ---+----+--- 1 | 11 | 1 @@ -877,13 +877,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_sql(10+r,13); 3 | 13 | 6 (6 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_sql(10+r,13) WITH ORDINALITY AS f(i,s,o); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(10+r,13) WITH ORDINALITY AS f(i,s,o); r | i | s | o ---+----+---+--- 1 | 11 | 1 | 1 @@ -894,13 +894,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_sql(10+r,13) WITH ORDINALITY AS f(i 3 | 13 | 6 | 1 (6 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_sql(11,10+r); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(11,10+r); r | i | s ---+----+--- 1 | 11 | 1 @@ -911,13 +911,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_sql(11,10+r); 3 | 13 | 6 (6 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_sql(11,10+r) WITH ORDINALITY AS f(i,s,o); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(11,10+r) WITH ORDINALITY AS f(i,s,o); r | i | s | o ---+----+---+--- 1 | 11 | 1 | 1 @@ -928,13 +928,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_sql(11,10+r) WITH ORDINALITY AS f(i 3 | 13 | 6 | 3 (6 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), foo_sql(r1,r2); +SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_sql(r1,r2); r1 | r2 | i | s ----+----+----+---- 11 | 12 | 11 | 1 @@ -949,13 +949,13 @@ SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), foo_sql(r1,r2); 16 | 20 | 20 | 10 (10 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), foo_sql(r1,r2) WITH ORDINALITY AS f(i,s,o); +SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_sql(r1,r2) WITH ORDINALITY AS f(i,s,o); r1 | r2 | i | s | o ----+----+----+----+--- 11 | 12 | 11 | 1 | 1 @@ -970,13 +970,13 @@ SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), foo_sql(r1,r2) WITH ORD 16 | 20 | 20 | 10 | 5 (10 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_mat(10+r,13); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(10+r,13); r | i | s ---+----+--- 1 | 11 | 1 @@ -987,13 +987,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_mat(10+r,13); 3 | 13 | 6 (6 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_mat(10+r,13) WITH ORDINALITY AS f(i,s,o); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(10+r,13) WITH ORDINALITY AS f(i,s,o); r | i | s | o ---+----+---+--- 1 | 11 | 1 | 1 @@ -1004,13 +1004,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_mat(10+r,13) WITH ORDINALITY AS f(i 3 | 13 | 6 | 1 (6 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_mat(11,10+r); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(11,10+r); r | i | s ---+----+--- 1 | 11 | 1 @@ -1021,13 +1021,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_mat(11,10+r); 3 | 13 | 6 (6 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_mat(11,10+r) WITH ORDINALITY AS f(i,s,o); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(11,10+r) WITH ORDINALITY AS f(i,s,o); r | i | s | o ---+----+---+--- 1 | 11 | 1 | 1 @@ -1038,13 +1038,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_mat(11,10+r) WITH ORDINALITY AS f(i 3 | 13 | 6 | 3 (6 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), foo_mat(r1,r2); +SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_mat(r1,r2); r1 | r2 | i | s ----+----+----+---- 11 | 12 | 11 | 1 @@ -1059,13 +1059,13 @@ SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), foo_mat(r1,r2); 16 | 20 | 20 | 10 (10 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), foo_mat(r1,r2) WITH ORDINALITY AS f(i,s,o); +SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_mat(r1,r2) WITH ORDINALITY AS f(i,s,o); r1 | r2 | i | s | o ----+----+----+----+--- 11 | 12 | 11 | 1 | 1 @@ -1081,13 +1081,13 @@ SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), foo_mat(r1,r2) WITH ORD (10 rows) -- selective rescan of multiple functions: -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( foo_sql(11,11), foo_mat(10+r,13) ); +SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(11,11), rngfunc_mat(10+r,13) ); r | i | s | i | s ---+----+---+----+--- 1 | 11 | 1 | 11 | 1 @@ -1098,13 +1098,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( foo_sql(11,11), foo_mat(10+r 3 | 11 | 1 | 13 | 6 (6 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( foo_sql(10+r,13), foo_mat(11,11) ); +SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(10+r,13), rngfunc_mat(11,11) ); r | i | s | i | s ---+----+---+----+--- 1 | 11 | 1 | 11 | 1 @@ -1115,13 +1115,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( foo_sql(10+r,13), foo_mat(11 3 | 13 | 6 | 11 | 1 (6 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( foo_sql(10+r,13), foo_mat(10+r,13) ); +SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(10+r,13), rngfunc_mat(10+r,13) ); r | i | s | i | s ---+----+---+----+--- 1 | 11 | 1 | 11 | 1 @@ -1132,13 +1132,13 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( foo_sql(10+r,13), foo_mat(10 3 | 13 | 6 | 13 | 6 (6 rows) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); setval | setval --------+-------- 1 | 1 (1 row) -SELECT * FROM generate_series(1,2) r1, generate_series(r1,3) r2, ROWS FROM( foo_sql(10+r1,13), foo_mat(10+r2,13) ); +SELECT * FROM generate_series(1,2) r1, generate_series(r1,3) r2, ROWS FROM( rngfunc_sql(10+r1,13), rngfunc_mat(10+r2,13) ); r1 | r2 | i | s | i | s ----+----+----+----+----+--- 1 | 1 | 11 | 1 | 11 | 1 @@ -1391,53 +1391,53 @@ SELECT * FROM (VALUES (1),(2),(3)) v1(r1), 3 | 3 | 30 | 8 (45 rows) -DROP FUNCTION foo_sql(int,int); -DROP FUNCTION foo_mat(int,int); -DROP SEQUENCE foo_rescan_seq1; -DROP SEQUENCE foo_rescan_seq2; +DROP FUNCTION rngfunc_sql(int,int); +DROP FUNCTION rngfunc_mat(int,int); +DROP SEQUENCE rngfunc_rescan_seq1; +DROP SEQUENCE rngfunc_rescan_seq2; -- -- Test cases involving OUT parameters -- -CREATE FUNCTION foo(in f1 int, out f2 int) +CREATE FUNCTION rngfunc(in f1 int, out f2 int) AS 'select $1+1' LANGUAGE sql; -SELECT foo(42); - foo ------ - 43 +SELECT rngfunc(42); + rngfunc +--------- + 43 (1 row) -SELECT * FROM foo(42); +SELECT * FROM rngfunc(42); f2 ---- 43 (1 row) -SELECT * FROM foo(42) AS p(x); +SELECT * FROM rngfunc(42) AS p(x); x ---- 43 (1 row) -- explicit spec of return type is OK -CREATE OR REPLACE FUNCTION foo(in f1 int, out f2 int) RETURNS int +CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int) RETURNS int AS 'select $1+1' LANGUAGE sql; -- error, wrong result type -CREATE OR REPLACE FUNCTION foo(in f1 int, out f2 int) RETURNS float +CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int) RETURNS float AS 'select $1+1' LANGUAGE sql; ERROR: function result type must be integer because of OUT parameters -- with multiple OUT params you must get a RECORD result -CREATE OR REPLACE FUNCTION foo(in f1 int, out f2 int, out f3 text) RETURNS int +CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int, out f3 text) RETURNS int AS 'select $1+1' LANGUAGE sql; ERROR: function result type must be record because of OUT parameters -CREATE OR REPLACE FUNCTION foo(in f1 int, out f2 int, out f3 text) +CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int, out f3 text) RETURNS record AS 'select $1+1' LANGUAGE sql; ERROR: cannot change return type of existing function -HINT: Use DROP FUNCTION foo(integer) first. -CREATE OR REPLACE FUNCTION foor(in f1 int, out f2 int, out text) +HINT: Use DROP FUNCTION rngfunc(integer) first. +CREATE OR REPLACE FUNCTION rngfuncr(in f1 int, out f2 int, out text) AS $$select $1-1, $1::text || 'z'$$ LANGUAGE sql; -SELECT f1, foor(f1) FROM int4_tbl; - f1 | foor +SELECT f1, rngfuncr(f1) FROM int4_tbl; + f1 | rngfuncr -------------+---------------------------- 0 | (-1,0z) 123456 | (123455,123456z) @@ -1446,22 +1446,22 @@ SELECT f1, foor(f1) FROM int4_tbl; -2147483647 | (-2147483648,-2147483647z) (5 rows) -SELECT * FROM foor(42); +SELECT * FROM rngfuncr(42); f2 | column2 ----+--------- 41 | 42z (1 row) -SELECT * FROM foor(42) AS p(a,b); +SELECT * FROM rngfuncr(42) AS p(a,b); a | b ----+----- 41 | 42z (1 row) -CREATE OR REPLACE FUNCTION foob(in f1 int, inout f2 int, out text) +CREATE OR REPLACE FUNCTION rngfuncb(in f1 int, inout f2 int, out text) AS $$select $2-1, $1::text || 'z'$$ LANGUAGE sql; -SELECT f1, foob(f1, f1/2) FROM int4_tbl; - f1 | foob +SELECT f1, rngfuncb(f1, f1/2) FROM int4_tbl; + f1 | rngfuncb -------------+---------------------------- 0 | (-1,0z) 123456 | (61727,123456z) @@ -1470,22 +1470,22 @@ SELECT f1, foob(f1, f1/2) FROM int4_tbl; -2147483647 | (-1073741824,-2147483647z) (5 rows) -SELECT * FROM foob(42, 99); +SELECT * FROM rngfuncb(42, 99); f2 | column2 ----+--------- 98 | 42z (1 row) -SELECT * FROM foob(42, 99) AS p(a,b); +SELECT * FROM rngfuncb(42, 99) AS p(a,b); a | b ----+----- 98 | 42z (1 row) -- Can reference function with or without OUT params for DROP, etc -DROP FUNCTION foo(int); -DROP FUNCTION foor(in f2 int, out f1 int, out text); -DROP FUNCTION foob(in f1 int, inout f2 int); +DROP FUNCTION rngfunc(int); +DROP FUNCTION rngfuncr(in f2 int, out f1 int, out text); +DROP FUNCTION rngfuncb(in f1 int, inout f2 int); -- -- For my next trick, polymorphic OUT parameters -- @@ -1535,10 +1535,10 @@ DETAIL: A function returning a polymorphic type must have at least one polymorp -- -- table functions -- -CREATE OR REPLACE FUNCTION foo() +CREATE OR REPLACE FUNCTION rngfunc() RETURNS TABLE(a int) AS $$ SELECT a FROM generate_series(1,5) a(a) $$ LANGUAGE sql; -SELECT * FROM foo(); +SELECT * FROM rngfunc(); a --- 1 @@ -1548,13 +1548,13 @@ SELECT * FROM foo(); 5 (5 rows) -DROP FUNCTION foo(); -CREATE OR REPLACE FUNCTION foo(int) +DROP FUNCTION rngfunc(); +CREATE OR REPLACE FUNCTION rngfunc(int) RETURNS TABLE(a int, b int) AS $$ SELECT a, b FROM generate_series(1,$1) a(a), generate_series(1,$1) b(b) $$ LANGUAGE sql; -SELECT * FROM foo(3); +SELECT * FROM rngfunc(3); a | b ---+--- 1 | 1 @@ -1568,18 +1568,18 @@ SELECT * FROM foo(3); 3 | 3 (9 rows) -DROP FUNCTION foo(int); +DROP FUNCTION rngfunc(int); -- case that causes change of typmod knowledge during inlining -CREATE OR REPLACE FUNCTION foo() +CREATE OR REPLACE FUNCTION rngfunc() RETURNS TABLE(a varchar(5)) AS $$ SELECT 'hello'::varchar(5) $$ LANGUAGE sql STABLE; -SELECT * FROM foo() GROUP BY 1; +SELECT * FROM rngfunc() GROUP BY 1; a ------- hello (1 row) -DROP FUNCTION foo(); +DROP FUNCTION rngfunc(); -- -- some tests on SQL functions with RETURNING -- @@ -1752,25 +1752,25 @@ select * from tt_log; (2 rows) -- test case for a whole-row-variable bug -create function foo1(n integer, out a text, out b text) +create function rngfunc1(n integer, out a text, out b text) returns setof record language sql as $$ select 'foo ' || i, 'bar ' || i from generate_series(1,$1) i $$; set work_mem='64kB'; -select t.a, t, t.a from foo1(10000) t limit 1; +select t.a, t, t.a from rngfunc1(10000) t limit 1; a | t | a -------+-------------------+------- foo 1 | ("foo 1","bar 1") | foo 1 (1 row) reset work_mem; -select t.a, t, t.a from foo1(10000) t limit 1; +select t.a, t, t.a from rngfunc1(10000) t limit 1; a | t | a -------+-------------------+------- foo 1 | ("foo 1","bar 1") | foo 1 (1 row) -drop function foo1(n integer); +drop function rngfunc1(n integer); -- test use of SQL functions returning record -- this is supported in some cases where the query doesn't specify -- the actual record type ... @@ -1795,49 +1795,49 @@ select * from array_to_set(array['one', 'two']); -- fail ERROR: a column definition list is required for functions returning "record" LINE 1: select * from array_to_set(array['one', 'two']); ^ -create temp table foo(f1 int8, f2 int8); -create function testfoo() returns record as $$ - insert into foo values (1,2) returning *; +create temp table rngfunc(f1 int8, f2 int8); +create function testrngfunc() returns record as $$ + insert into rngfunc values (1,2) returning *; $$ language sql; -select testfoo(); - testfoo ---------- +select testrngfunc(); + testrngfunc +------------- (1,2) (1 row) -select * from testfoo() as t(f1 int8,f2 int8); +select * from testrngfunc() as t(f1 int8,f2 int8); f1 | f2 ----+---- 1 | 2 (1 row) -select * from testfoo(); -- fail +select * from testrngfunc(); -- fail ERROR: a column definition list is required for functions returning "record" -LINE 1: select * from testfoo(); +LINE 1: select * from testrngfunc(); ^ -drop function testfoo(); -create function testfoo() returns setof record as $$ - insert into foo values (1,2), (3,4) returning *; +drop function testrngfunc(); +create function testrngfunc() returns setof record as $$ + insert into rngfunc values (1,2), (3,4) returning *; $$ language sql; -select testfoo(); - testfoo ---------- +select testrngfunc(); + testrngfunc +------------- (1,2) (3,4) (2 rows) -select * from testfoo() as t(f1 int8,f2 int8); +select * from testrngfunc() as t(f1 int8,f2 int8); f1 | f2 ----+---- 1 | 2 3 | 4 (2 rows) -select * from testfoo(); -- fail +select * from testrngfunc(); -- fail ERROR: a column definition list is required for functions returning "record" -LINE 1: select * from testfoo(); +LINE 1: select * from testrngfunc(); ^ -drop function testfoo(); +drop function testrngfunc(); -- -- Check some cases involving added/dropped columns in a rowtype result -- @@ -1931,44 +1931,44 @@ drop function get_first_user(); drop function get_users(); drop table users; -- this won't get inlined because of type coercion, but it shouldn't fail -create or replace function foobar() returns setof text as +create or replace function rngfuncbar() returns setof text as $$ select 'foo'::varchar union all select 'bar'::varchar ; $$ language sql stable; -select foobar(); - foobar --------- +select rngfuncbar(); + rngfuncbar +------------ foo bar (2 rows) -select * from foobar(); - foobar --------- +select * from rngfuncbar(); + rngfuncbar +------------ foo bar (2 rows) -drop function foobar(); +drop function rngfuncbar(); -- check handling of a SQL function with multiple OUT params (bug #5777) -create or replace function foobar(out integer, out numeric) as +create or replace function rngfuncbar(out integer, out numeric) as $$ select (1, 2.1) $$ language sql; -select * from foobar(); +select * from rngfuncbar(); column1 | column2 ---------+--------- 1 | 2.1 (1 row) -create or replace function foobar(out integer, out numeric) as +create or replace function rngfuncbar(out integer, out numeric) as $$ select (1, 2) $$ language sql; -select * from foobar(); -- fail +select * from rngfuncbar(); -- fail ERROR: function return row and query-specified return row do not match DETAIL: Returned type integer at ordinal position 2, but query expects numeric. -create or replace function foobar(out integer, out numeric) as +create or replace function rngfuncbar(out integer, out numeric) as $$ select (1, 2.1, 3) $$ language sql; -select * from foobar(); -- fail +select * from rngfuncbar(); -- fail ERROR: function return row and query-specified return row do not match DETAIL: Returned row contains 3 attributes, but query expects 2. -drop function foobar(); +drop function rngfuncbar(); -- check whole-row-Var handling in nested lateral functions (bug #11703) create function extractq2(t int8_tbl) returns int8 as $$ select t.q2 @@ -2044,22 +2044,22 @@ select x from int8_tbl, extractq2_2_opt(int8_tbl) f(x); (5 rows) -- check handling of nulls in SRF results (bug #7808) -create type foo2 as (a integer, b text); -select *, row_to_json(u) from unnest(array[(1,'foo')::foo2, null::foo2]) u; +create type rngfunc2 as (a integer, b text); +select *, row_to_json(u) from unnest(array[(1,'foo')::rngfunc2, null::rngfunc2]) u; a | b | row_to_json ---+-----+--------------------- 1 | foo | {"a":1,"b":"foo"} | | {"a":null,"b":null} (2 rows) -select *, row_to_json(u) from unnest(array[null::foo2, null::foo2]) u; +select *, row_to_json(u) from unnest(array[null::rngfunc2, null::rngfunc2]) u; a | b | row_to_json ---+---+--------------------- | | {"a":null,"b":null} | | {"a":null,"b":null} (2 rows) -select *, row_to_json(u) from unnest(array[null::foo2, (1,'foo')::foo2, null::foo2]) u; +select *, row_to_json(u) from unnest(array[null::rngfunc2, (1,'foo')::rngfunc2, null::rngfunc2]) u; a | b | row_to_json ---+-----+--------------------- | | {"a":null,"b":null} @@ -2067,9 +2067,9 @@ select *, row_to_json(u) from unnest(array[null::foo2, (1,'foo')::foo2, null::fo | | {"a":null,"b":null} (3 rows) -select *, row_to_json(u) from unnest(array[]::foo2[]) u; +select *, row_to_json(u) from unnest(array[]::rngfunc2[]) u; a | b | row_to_json ---+---+------------- (0 rows) -drop type foo2; +drop type rngfunc2; diff --git a/src/test/regress/expected/rangetypes.out b/src/test/regress/expected/rangetypes.out index 4a2336cd8d..accf1e0d9e 100644 --- a/src/test/regress/expected/rangetypes.out +++ b/src/test/regress/expected/rangetypes.out @@ -1354,6 +1354,18 @@ select *, row_to_json(upper(t)) as u from drop type two_ints cascade; NOTICE: drop cascades to type two_ints_range -- +-- Check behavior when subtype lacks a hash function +-- +create type cashrange as range (subtype = money); +set enable_sort = off; -- try to make it pick a hash setop implementation +select '(2,5)'::cashrange except select '(5,6)'::cashrange; + cashrange +--------------- + ($2.00,$5.00) +(1 row) + +reset enable_sort; +-- -- OUT/INOUT/TABLE functions -- create function outparam_succeed(i anyrange, out r anyrange, out t text) diff --git a/src/test/regress/expected/reloptions.out b/src/test/regress/expected/reloptions.out new file mode 100644 index 0000000000..df3c99d1eb --- /dev/null +++ b/src/test/regress/expected/reloptions.out @@ -0,0 +1,185 @@ +-- Simple create +CREATE TABLE reloptions_test(i INT) WITH (FiLLFaCToR=30, + autovacuum_enabled = false, autovacuum_analyze_scale_factor = 0.2); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + reloptions +------------------------------------------------------------------------------ + {fillfactor=30,autovacuum_enabled=false,autovacuum_analyze_scale_factor=0.2} +(1 row) + +-- Fail min/max values check +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=2); +ERROR: value 2 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=110); +ERROR: value 110 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor = -10.0); +ERROR: value -10.0 out of bounds for option "autovacuum_analyze_scale_factor" +DETAIL: Valid values are between "0.000000" and "100.000000". +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor = 110.0); +ERROR: value 110.0 out of bounds for option "autovacuum_analyze_scale_factor" +DETAIL: Valid values are between "0.000000" and "100.000000". +-- Fail when option and namespace do not exist +CREATE TABLE reloptions_test2(i INT) WITH (not_existing_option=2); +ERROR: unrecognized parameter "not_existing_option" +CREATE TABLE reloptions_test2(i INT) WITH (not_existing_namespace.fillfactor=2); +ERROR: unrecognized parameter namespace "not_existing_namespace" +-- Fail while setting improper values +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=30.5); +ERROR: invalid value for integer option "fillfactor": 30.5 +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor='string'); +ERROR: invalid value for integer option "fillfactor": string +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=true); +ERROR: invalid value for integer option "fillfactor": true +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_enabled=12); +ERROR: invalid value for boolean option "autovacuum_enabled": 12 +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_enabled=30.5); +ERROR: invalid value for boolean option "autovacuum_enabled": 30.5 +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_enabled='string'); +ERROR: invalid value for boolean option "autovacuum_enabled": string +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor='string'); +ERROR: invalid value for floating point option "autovacuum_analyze_scale_factor": string +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor=true); +ERROR: invalid value for floating point option "autovacuum_analyze_scale_factor": true +-- Fail if option is specified twice +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=30, fillfactor=40); +ERROR: parameter "fillfactor" specified more than once +-- Specifying name only for a non-Boolean option should fail +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor); +ERROR: invalid value for integer option "fillfactor": true +-- Simple ALTER TABLE +ALTER TABLE reloptions_test SET (fillfactor=31, + autovacuum_analyze_scale_factor = 0.3); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + reloptions +------------------------------------------------------------------------------ + {autovacuum_enabled=false,fillfactor=31,autovacuum_analyze_scale_factor=0.3} +(1 row) + +-- Set boolean option to true without specifying value +ALTER TABLE reloptions_test SET (autovacuum_enabled, fillfactor=32); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + reloptions +----------------------------------------------------------------------------- + {autovacuum_analyze_scale_factor=0.3,autovacuum_enabled=true,fillfactor=32} +(1 row) + +-- Check that RESET works well +ALTER TABLE reloptions_test RESET (fillfactor); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + reloptions +--------------------------------------------------------------- + {autovacuum_analyze_scale_factor=0.3,autovacuum_enabled=true} +(1 row) + +-- Resetting all values causes the column to become null +ALTER TABLE reloptions_test RESET (autovacuum_enabled, + autovacuum_analyze_scale_factor); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass AND + reloptions IS NULL; + reloptions +------------ + +(1 row) + +-- RESET fails if a value is specified +ALTER TABLE reloptions_test RESET (fillfactor=12); +ERROR: RESET must not include values for parameters +-- The OIDS option is not stored as reloption +DROP TABLE reloptions_test; +CREATE TABLE reloptions_test(i INT) WITH (fillfactor=20, oids=true); +SELECT reloptions, relhasoids FROM pg_class WHERE oid = 'reloptions_test'::regclass; + reloptions | relhasoids +-----------------+------------ + {fillfactor=20} | t +(1 row) + +-- Test toast.* options +DROP TABLE reloptions_test; +CREATE TABLE reloptions_test (s VARCHAR) + WITH (toast.autovacuum_vacuum_cost_delay = 23); +SELECT reltoastrelid as toast_oid + FROM pg_class WHERE oid = 'reloptions_test'::regclass \gset +SELECT reloptions FROM pg_class WHERE oid = :toast_oid; + reloptions +----------------------------------- + {autovacuum_vacuum_cost_delay=23} +(1 row) + +ALTER TABLE reloptions_test SET (toast.autovacuum_vacuum_cost_delay = 24); +SELECT reloptions FROM pg_class WHERE oid = :toast_oid; + reloptions +----------------------------------- + {autovacuum_vacuum_cost_delay=24} +(1 row) + +ALTER TABLE reloptions_test RESET (toast.autovacuum_vacuum_cost_delay); +SELECT reloptions FROM pg_class WHERE oid = :toast_oid; + reloptions +------------ + +(1 row) + +-- Fail on non-existent options in toast namespace +CREATE TABLE reloptions_test2 (i int) WITH (toast.not_existing_option = 42); +ERROR: unrecognized parameter "not_existing_option" +-- Mix TOAST & heap +DROP TABLE reloptions_test; +CREATE TABLE reloptions_test (s VARCHAR) WITH + (toast.autovacuum_vacuum_cost_delay = 23, + autovacuum_vacuum_cost_delay = 24, fillfactor = 40); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + reloptions +------------------------------------------------- + {autovacuum_vacuum_cost_delay=24,fillfactor=40} +(1 row) + +SELECT reloptions FROM pg_class WHERE oid = ( + SELECT reltoastrelid FROM pg_class WHERE oid = 'reloptions_test'::regclass); + reloptions +----------------------------------- + {autovacuum_vacuum_cost_delay=23} +(1 row) + +-- +-- CREATE INDEX, ALTER INDEX for btrees +-- +CREATE INDEX reloptions_test_idx ON reloptions_test (s) WITH (fillfactor=30); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test_idx'::regclass; + reloptions +----------------- + {fillfactor=30} +(1 row) + +-- Fail when option and namespace do not exist +CREATE INDEX reloptions_test_idx ON reloptions_test (s) + WITH (not_existing_option=2); +ERROR: unrecognized parameter "not_existing_option" +CREATE INDEX reloptions_test_idx ON reloptions_test (s) + WITH (not_existing_ns.fillfactor=2); +ERROR: unrecognized parameter namespace "not_existing_ns" +-- Check allowed ranges +CREATE INDEX reloptions_test_idx2 ON reloptions_test (s) WITH (fillfactor=1); +ERROR: value 1 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +CREATE INDEX reloptions_test_idx2 ON reloptions_test (s) WITH (fillfactor=130); +ERROR: value 130 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +-- Check ALTER +ALTER INDEX reloptions_test_idx SET (fillfactor=40); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test_idx'::regclass; + reloptions +----------------- + {fillfactor=40} +(1 row) + +-- Check ALTER on empty reloption list +CREATE INDEX reloptions_test_idx3 ON reloptions_test (s); +ALTER INDEX reloptions_test_idx3 SET (fillfactor=40); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test_idx3'::regclass; + reloptions +----------------- + {fillfactor=40} +(1 row) + diff --git a/src/test/regress/expected/reltime.out b/src/test/regress/expected/reltime.out deleted file mode 100644 index 14fdc6aeec..0000000000 --- a/src/test/regress/expected/reltime.out +++ /dev/null @@ -1,109 +0,0 @@ --- --- RELTIME --- -CREATE TABLE RELTIME_TBL (f1 reltime); -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 1 minute'); -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 5 hour'); -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 10 day'); -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 34 year'); -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 3 months'); -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 14 seconds ago'); --- badly formatted reltimes -INSERT INTO RELTIME_TBL (f1) VALUES ('badly formatted reltime'); -ERROR: invalid input syntax for type reltime: "badly formatted reltime" -LINE 1: INSERT INTO RELTIME_TBL (f1) VALUES ('badly formatted reltim... - ^ -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 30 eons ago'); -ERROR: invalid input syntax for type reltime: "@ 30 eons ago" -LINE 1: INSERT INTO RELTIME_TBL (f1) VALUES ('@ 30 eons ago'); - ^ --- test reltime operators -SELECT '' AS six, * FROM RELTIME_TBL; - six | f1 ------+--------------- - | @ 1 min - | @ 5 hours - | @ 10 days - | @ 34 years - | @ 3 mons - | @ 14 secs ago -(6 rows) - -SELECT '' AS five, * FROM RELTIME_TBL - WHERE RELTIME_TBL.f1 <> reltime '@ 10 days'; - five | f1 -------+--------------- - | @ 1 min - | @ 5 hours - | @ 34 years - | @ 3 mons - | @ 14 secs ago -(5 rows) - -SELECT '' AS three, * FROM RELTIME_TBL - WHERE RELTIME_TBL.f1 <= reltime '@ 5 hours'; - three | f1 --------+--------------- - | @ 1 min - | @ 5 hours - | @ 14 secs ago -(3 rows) - -SELECT '' AS three, * FROM RELTIME_TBL - WHERE RELTIME_TBL.f1 < reltime '@ 1 day'; - three | f1 --------+--------------- - | @ 1 min - | @ 5 hours - | @ 14 secs ago -(3 rows) - -SELECT '' AS one, * FROM RELTIME_TBL - WHERE RELTIME_TBL.f1 = reltime '@ 34 years'; - one | f1 ------+------------ - | @ 34 years -(1 row) - -SELECT '' AS two, * FROM RELTIME_TBL - WHERE RELTIME_TBL.f1 >= reltime '@ 1 month'; - two | f1 ------+------------ - | @ 34 years - | @ 3 mons -(2 rows) - -SELECT '' AS five, * FROM RELTIME_TBL - WHERE RELTIME_TBL.f1 > reltime '@ 3 seconds ago'; - five | f1 -------+------------ - | @ 1 min - | @ 5 hours - | @ 10 days - | @ 34 years - | @ 3 mons -(5 rows) - -SELECT '' AS fifteen, r1.*, r2.* - FROM RELTIME_TBL r1, RELTIME_TBL r2 - WHERE r1.f1 > r2.f1 - ORDER BY r1.f1, r2.f1; - fifteen | f1 | f1 ----------+------------+--------------- - | @ 1 min | @ 14 secs ago - | @ 5 hours | @ 14 secs ago - | @ 5 hours | @ 1 min - | @ 10 days | @ 14 secs ago - | @ 10 days | @ 1 min - | @ 10 days | @ 5 hours - | @ 3 mons | @ 14 secs ago - | @ 3 mons | @ 1 min - | @ 3 mons | @ 5 hours - | @ 3 mons | @ 10 days - | @ 34 years | @ 14 secs ago - | @ 34 years | @ 1 min - | @ 34 years | @ 5 hours - | @ 34 years | @ 10 days - | @ 34 years | @ 3 mons -(15 rows) - diff --git a/src/test/regress/expected/rolenames.out b/src/test/regress/expected/rolenames.out index dce82f5de7..68dacb786a 100644 --- a/src/test/regress/expected/rolenames.out +++ b/src/test/regress/expected/rolenames.out @@ -813,7 +813,7 @@ NOTICE: role "nonexistent" does not exist, skipping GRANT regress_testrol0 TO pg_signal_backend; -- success SET ROLE pg_signal_backend; --success RESET ROLE; -CREATE SCHEMA test_schema AUTHORIZATION pg_signal_backend; --success +CREATE SCHEMA test_roles_schema AUTHORIZATION pg_signal_backend; --success SET ROLE regress_testrol2; UPDATE pg_proc SET proacl = null WHERE proname LIKE 'testagg_'; SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_'; @@ -944,9 +944,56 @@ SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_'; testagg9 | (9 rows) +-- DEFAULT MONITORING ROLES +CREATE ROLE regress_role_haspriv; +CREATE ROLE regress_role_nopriv; +-- pg_read_all_stats +GRANT pg_read_all_stats TO regress_role_haspriv; +SET SESSION AUTHORIZATION regress_role_haspriv; +-- returns true with role member of pg_read_all_stats +SELECT COUNT(*) = 0 AS haspriv FROM pg_stat_activity + WHERE query = ''; + haspriv +--------- + t +(1 row) + +SET SESSION AUTHORIZATION regress_role_nopriv; +-- returns false with role not member of pg_read_all_stats +SELECT COUNT(*) = 0 AS haspriv FROM pg_stat_activity + WHERE query = ''; + haspriv +--------- + f +(1 row) + +RESET SESSION AUTHORIZATION; +REVOKE pg_read_all_stats FROM regress_role_haspriv; +-- pg_read_all_settings +GRANT pg_read_all_settings TO regress_role_haspriv; +BEGIN; +-- A GUC using GUC_SUPERUSER_ONLY is useful for negative tests. +SET LOCAL session_preload_libraries TO 'path-to-preload-libraries'; +SET SESSION AUTHORIZATION regress_role_haspriv; +-- passes with role member of pg_read_all_settings +SHOW session_preload_libraries; + session_preload_libraries +----------------------------- + "path-to-preload-libraries" +(1 row) + +SET SESSION AUTHORIZATION regress_role_nopriv; +-- fails with role not member of pg_read_all_settings +SHOW session_preload_libraries; +ERROR: must be superuser or a member of pg_read_all_settings to examine "session_preload_libraries" +RESET SESSION AUTHORIZATION; +ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +REVOKE pg_read_all_settings FROM regress_role_haspriv; -- clean up \c -DROP SCHEMA test_schema; +DROP SCHEMA test_roles_schema; DROP OWNED BY regress_testrol0, "Public", "current_user", regress_testrol1, regress_testrol2, regress_testrolx CASCADE; DROP ROLE regress_testrol0, regress_testrol1, regress_testrol2, regress_testrolx; DROP ROLE "Public", "None", "current_user", "session_user", "user"; +DROP ROLE regress_role_haspriv, regress_role_nopriv; diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index de2ee4d2c9..bc16ca4c43 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -361,7 +361,7 @@ INSERT INTO document VALUES (100, 55, 1, 'regress_rls_dave', 'testing sorting of ERROR: new row violates row-level security policy "p2r" for table "document" -- only owner can change policies ALTER POLICY p1 ON document USING (true); --fail -ERROR: must be owner of relation document +ERROR: must be owner of table document DROP POLICY p1 ON document; --fail ERROR: must be owner of relation document SET SESSION AUTHORIZATION regress_rls_alice; @@ -1192,7 +1192,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle); -- only owner can change policies ALTER POLICY pp1 ON part_document USING (true); --fail -ERROR: must be owner of relation part_document +ERROR: must be owner of table part_document DROP POLICY pp1 ON part_document; --fail ERROR: must be owner of relation part_document SET SESSION AUTHORIZATION regress_rls_alice; @@ -2108,7 +2108,7 @@ SET SESSION AUTHORIZATION regress_rls_bob; INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'technology book, can only insert') ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; ERROR: new row violates row-level security policy for table "document" --- UPDATE path is taken here. Existing tuple passes, since it's cid +-- UPDATE path is taken here. Existing tuple passes, since its cid -- corresponds to "novel", but default USING qual is enforced against -- post-UPDATE tuple too (as always when updating with a policy that lacks an -- explicit WCO), and so this fails: @@ -2446,9 +2446,9 @@ EXPLAIN (COSTS OFF) SELECT * FROM rls_view; -- Query as role that is not the owner of the table or view without permissions. SET SESSION AUTHORIZATION regress_rls_carol; SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for relation rls_view +ERROR: permission denied for view rls_view EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. -ERROR: permission denied for relation rls_view +ERROR: permission denied for view rls_view -- Query as role that is not the owner of the table or view with permissions. SET SESSION AUTHORIZATION regress_rls_bob; GRANT SELECT ON rls_view TO regress_rls_carol; @@ -3235,7 +3235,7 @@ COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail ERROR: query would be affected by row-level security policy for table "copy_t" SET row_security TO ON; COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for relation copy_t +ERROR: permission denied for table copy_t -- Check COPY relation TO; keep it just one row to avoid reordering issues RESET SESSION AUTHORIZATION; SET row_security TO ON; @@ -3271,10 +3271,10 @@ COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok SET SESSION AUTHORIZATION regress_rls_carol; SET row_security TO OFF; COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for relation copy_rel_to +ERROR: permission denied for table copy_rel_to SET row_security TO ON; COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied -ERROR: permission denied for relation copy_rel_to +ERROR: permission denied for table copy_rel_to -- Check COPY FROM as Superuser/owner. RESET SESSION AUTHORIZATION; SET row_security TO OFF; @@ -3298,10 +3298,10 @@ COPY copy_t FROM STDIN; --ok SET SESSION AUTHORIZATION regress_rls_carol; SET row_security TO OFF; COPY copy_t FROM STDIN; --fail - permission denied. -ERROR: permission denied for relation copy_t +ERROR: permission denied for table copy_t SET row_security TO ON; COPY copy_t FROM STDIN; --fail - permission denied. -ERROR: permission denied for relation copy_t +ERROR: permission denied for table copy_t RESET SESSION AUTHORIZATION; DROP TABLE copy_t; DROP TABLE copy_rel_to CASCADE; @@ -3807,9 +3807,10 @@ DROP TABLE r1; -- SET SESSION AUTHORIZATION regress_rls_alice; SET row_security = on; -CREATE TABLE r1 (a int); +CREATE TABLE r1 (a int PRIMARY KEY); CREATE POLICY p1 ON r1 FOR SELECT USING (a < 20); CREATE POLICY p2 ON r1 FOR UPDATE USING (a < 20) WITH CHECK (true); +CREATE POLICY p3 ON r1 FOR INSERT WITH CHECK (true); INSERT INTO r1 VALUES (10); ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; ALTER TABLE r1 FORCE ROW LEVEL SECURITY; @@ -3836,6 +3837,18 @@ ALTER TABLE r1 FORCE ROW LEVEL SECURITY; -- Error UPDATE r1 SET a = 30 RETURNING *; ERROR: new row violates row-level security policy for table "r1" +-- UPDATE path of INSERT ... ON CONFLICT DO UPDATE should also error out +INSERT INTO r1 VALUES (10) + ON CONFLICT (a) DO UPDATE SET a = 30 RETURNING *; +ERROR: new row violates row-level security policy for table "r1" +-- Should still error out without RETURNING (use of arbiter always requires +-- SELECT permissions) +INSERT INTO r1 VALUES (10) + ON CONFLICT (a) DO UPDATE SET a = 30; +ERROR: new row violates row-level security policy for table "r1" +INSERT INTO r1 VALUES (10) + ON CONFLICT ON CONSTRAINT r1_pkey DO UPDATE SET a = 30; +ERROR: new row violates row-level security policy for table "r1" DROP TABLE r1; -- Check dependency handling RESET SESSION AUTHORIZATION; diff --git a/src/test/regress/expected/rowtypes.out b/src/test/regress/expected/rowtypes.out index 43b36f6566..30053d07df 100644 --- a/src/test/regress/expected/rowtypes.out +++ b/src/test/regress/expected/rowtypes.out @@ -53,6 +53,22 @@ ERROR: malformed record literal: "(Joe,,)" LINE 1: select '(Joe,,)'::fullname; ^ DETAIL: Too many columns. +select '[]'::fullname; -- bad +ERROR: malformed record literal: "[]" +LINE 1: select '[]'::fullname; + ^ +DETAIL: Missing left parenthesis. +select ' (Joe,Blow) '::fullname; -- ok, extra whitespace + fullname +------------ + (Joe,Blow) +(1 row) + +select '(Joe,Blow) /'::fullname; -- bad +ERROR: malformed record literal: "(Joe,Blow) /" +LINE 1: select '(Joe,Blow) /'::fullname; + ^ +DETAIL: Junk after right parenthesis. create temp table quadtable(f1 int, q quad); insert into quadtable values (1, ((3.3,4.4),(5.5,6.6))); insert into quadtable values (2, ((null,4.4),(5.5,6.6))); @@ -307,10 +323,10 @@ ERROR: cannot compare dissimilar column types bigint and integer at record colu explain (costs off) select * from int8_tbl i8 where i8 in (row(123,456)::int8_tbl, '(4567890123456789,123)'); - QUERY PLAN ------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +------------------------------------------------------------------------------- Seq Scan on int8_tbl i8 - Filter: (i8.* = ANY (ARRAY[ROW('123'::bigint, '456'::bigint)::int8_tbl, '(4567890123456789,123)'::int8_tbl])) + Filter: (i8.* = ANY ('{"(123,456)","(4567890123456789,123)"}'::int8_tbl[])) (2 rows) select * from int8_tbl i8 @@ -369,6 +385,290 @@ LINE 1: select * from cc order by f1; ^ HINT: Use an explicit ordering operator or modify the query. -- +-- Tests for record_{eq,cmp} +-- +create type testtype1 as (a int, b int); +-- all true +select row(1, 2)::testtype1 < row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 2)::testtype1 <= row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 2)::testtype1 = row(1, 2)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 2)::testtype1 <> row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 3)::testtype1 >= row(1, 2)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 3)::testtype1 > row(1, 2)::testtype1; + ?column? +---------- + t +(1 row) + +-- all false +select row(1, -2)::testtype1 < row(1, -3)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -2)::testtype1 <= row(1, -3)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -2)::testtype1 = row(1, -3)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -2)::testtype1 <> row(1, -2)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -3)::testtype1 >= row(1, -2)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -3)::testtype1 > row(1, -2)::testtype1; + ?column? +---------- + f +(1 row) + +-- true, but see *< below +select row(1, -2)::testtype1 < row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +-- mismatches +create type testtype3 as (a int, b text); +select row(1, 2)::testtype1 < row(1, 'abc')::testtype3; +ERROR: cannot compare dissimilar column types integer and text at record column 2 +select row(1, 2)::testtype1 <> row(1, 'abc')::testtype3; +ERROR: cannot compare dissimilar column types integer and text at record column 2 +create type testtype5 as (a int); +select row(1, 2)::testtype1 < row(1)::testtype5; +ERROR: cannot compare record types with different numbers of columns +select row(1, 2)::testtype1 <> row(1)::testtype5; +ERROR: cannot compare record types with different numbers of columns +-- non-comparable types +create type testtype6 as (a int, b point); +select row(1, '(1,2)')::testtype6 < row(1, '(1,3)')::testtype6; +ERROR: could not identify a comparison function for type point +select row(1, '(1,2)')::testtype6 <> row(1, '(1,3)')::testtype6; +ERROR: could not identify an equality operator for type point +drop type testtype1, testtype3, testtype5, testtype6; +-- +-- Tests for record_image_{eq,cmp} +-- +create type testtype1 as (a int, b int); +-- all true +select row(1, 2)::testtype1 *< row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 2)::testtype1 *<= row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 2)::testtype1 *= row(1, 2)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 2)::testtype1 *<> row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 3)::testtype1 *>= row(1, 2)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 3)::testtype1 *> row(1, 2)::testtype1; + ?column? +---------- + t +(1 row) + +-- all false +select row(1, -2)::testtype1 *< row(1, -3)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -2)::testtype1 *<= row(1, -3)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -2)::testtype1 *= row(1, -3)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -2)::testtype1 *<> row(1, -2)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -3)::testtype1 *>= row(1, -2)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -3)::testtype1 *> row(1, -2)::testtype1; + ?column? +---------- + f +(1 row) + +-- This returns the "wrong" order because record_image_cmp works on +-- unsigned datums without knowing about the actual data type. +select row(1, -2)::testtype1 *< row(1, 3)::testtype1; + ?column? +---------- + f +(1 row) + +-- other types +create type testtype2 as (a smallint, b bool); -- byval different sizes +select row(1, true)::testtype2 *< row(2, true)::testtype2; + ?column? +---------- + t +(1 row) + +select row(-2, true)::testtype2 *< row(-1, true)::testtype2; + ?column? +---------- + t +(1 row) + +select row(0, false)::testtype2 *< row(0, true)::testtype2; + ?column? +---------- + t +(1 row) + +select row(0, false)::testtype2 *<> row(0, true)::testtype2; + ?column? +---------- + t +(1 row) + +create type testtype3 as (a int, b text); -- variable length +select row(1, 'abc')::testtype3 *< row(1, 'abd')::testtype3; + ?column? +---------- + t +(1 row) + +select row(1, 'abc')::testtype3 *< row(1, 'abcd')::testtype3; + ?column? +---------- + t +(1 row) + +select row(1, 'abc')::testtype3 *> row(1, 'abd')::testtype3; + ?column? +---------- + f +(1 row) + +select row(1, 'abc')::testtype3 *<> row(1, 'abd')::testtype3; + ?column? +---------- + t +(1 row) + +create type testtype4 as (a int, b point); -- by ref, fixed length +select row(1, '(1,2)')::testtype4 *< row(1, '(1,3)')::testtype4; + ?column? +---------- + t +(1 row) + +select row(1, '(1,2)')::testtype4 *<> row(1, '(1,3)')::testtype4; + ?column? +---------- + t +(1 row) + +-- mismatches +select row(1, 2)::testtype1 *< row(1, 'abc')::testtype3; +ERROR: cannot compare dissimilar column types integer and text at record column 2 +select row(1, 2)::testtype1 *<> row(1, 'abc')::testtype3; +ERROR: cannot compare dissimilar column types integer and text at record column 2 +create type testtype5 as (a int); +select row(1, 2)::testtype1 *< row(1)::testtype5; +ERROR: cannot compare record types with different numbers of columns +select row(1, 2)::testtype1 *<> row(1)::testtype5; +ERROR: cannot compare record types with different numbers of columns +-- non-comparable types +create type testtype6 as (a int, b point); +select row(1, '(1,2)')::testtype6 *< row(1, '(1,3)')::testtype6; + ?column? +---------- + t +(1 row) + +select row(1, '(1,2)')::testtype6 *>= row(1, '(1,3)')::testtype6; + ?column? +---------- + f +(1 row) + +select row(1, '(1,2)')::testtype6 *<> row(1, '(1,3)')::testtype6; + ?column? +---------- + t +(1 row) + +drop type testtype1, testtype2, testtype3, testtype4, testtype5, testtype6; +-- -- Test case derived from bug #5716: check multiple uses of a rowtype result -- BEGIN; @@ -497,6 +797,50 @@ select (row('Jim', 'Beam')).text; -- error ERROR: could not identify column "text" in record data type LINE 1: select (row('Jim', 'Beam')).text; ^ +-- +-- Check the equivalence of functional and column notation +-- +insert into fullname values ('Joe', 'Blow'); +select f.last from fullname f; + last +------ + Blow +(1 row) + +select last(f) from fullname f; + last +------ + Blow +(1 row) + +create function longname(fullname) returns text language sql +as $$select $1.first || ' ' || $1.last$$; +select f.longname from fullname f; + longname +---------- + Joe Blow +(1 row) + +select longname(f) from fullname f; + longname +---------- + Joe Blow +(1 row) + +-- Starting in v11, the notational form does matter if there's ambiguity +alter table fullname add column longname text; +select f.longname from fullname f; + longname +---------- + +(1 row) + +select longname(f) from fullname f; + longname +---------- + Joe Blow +(1 row) + -- -- Test that composite values are seen to have the correct column names -- (bug #11210 and other reports) diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index d582bc9ee4..735dd37acf 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -1175,47 +1175,47 @@ SELECT count(*) FROM shoe; -- -- Simple test of qualified ON INSERT ... this did not work in 7.0 ... -- -create table foo (f1 int); -create table foo2 (f1 int); -create rule foorule as on insert to foo where f1 < 100 +create table rules_foo (f1 int); +create table rules_foo2 (f1 int); +create rule rules_foorule as on insert to rules_foo where f1 < 100 do instead nothing; -insert into foo values(1); -insert into foo values(1001); -select * from foo; +insert into rules_foo values(1); +insert into rules_foo values(1001); +select * from rules_foo; f1 ------ 1001 (1 row) -drop rule foorule on foo; +drop rule rules_foorule on rules_foo; -- this should fail because f1 is not exposed for unqualified reference: -create rule foorule as on insert to foo where f1 < 100 -do instead insert into foo2 values (f1); +create rule rules_foorule as on insert to rules_foo where f1 < 100 +do instead insert into rules_foo2 values (f1); ERROR: column "f1" does not exist -LINE 2: do instead insert into foo2 values (f1); - ^ +LINE 2: do instead insert into rules_foo2 values (f1); + ^ HINT: There is a column named "f1" in table "old", but it cannot be referenced from this part of the query. -- this is the correct way: -create rule foorule as on insert to foo where f1 < 100 -do instead insert into foo2 values (new.f1); -insert into foo values(2); -insert into foo values(100); -select * from foo; +create rule rules_foorule as on insert to rules_foo where f1 < 100 +do instead insert into rules_foo2 values (new.f1); +insert into rules_foo values(2); +insert into rules_foo values(100); +select * from rules_foo; f1 ------ 1001 100 (2 rows) -select * from foo2; +select * from rules_foo2; f1 ---- 2 (1 row) -drop rule foorule on foo; -drop table foo; -drop table foo2; +drop rule rules_foorule on rules_foo; +drop table rules_foo; +drop table rules_foo2; -- -- Test rules containing INSERT ... SELECT, which is a very ugly special -- case as of 7.1. Example is based on bug report from Joel Burton. @@ -1521,9 +1521,11 @@ UNION ALL SELECT l.objoid, l.classoid, l.objsubid, - CASE - WHEN (pro.proisagg = true) THEN 'aggregate'::text - WHEN (pro.proisagg = false) THEN 'function'::text + CASE pro.prokind + WHEN 'a'::"char" THEN 'aggregate'::text + WHEN 'f'::"char" THEN 'function'::text + WHEN 'p'::"char" THEN 'procedure'::text + WHEN 'w'::"char" THEN 'window'::text ELSE NULL::text END AS objtype, pro.pronamespace AS objnamespace, @@ -1704,7 +1706,7 @@ pg_shadow| SELECT pg_authid.rolname AS usename, pg_authid.rolreplication AS userepl, pg_authid.rolbypassrls AS usebypassrls, pg_authid.rolpassword AS passwd, - (pg_authid.rolvaliduntil)::abstime AS valuntil, + pg_authid.rolvaliduntil AS valuntil, s.setconfig AS useconfig FROM (pg_authid LEFT JOIN pg_db_role_setting s ON (((pg_authid.oid = s.setrole) AND (s.setdatabase = (0)::oid)))) @@ -1970,8 +1972,10 @@ pg_stat_wal_receiver| SELECT s.pid, s.latest_end_lsn, s.latest_end_time, s.slot_name, + s.sender_host, + s.sender_port, s.conninfo - FROM pg_stat_get_wal_receiver() s(pid, status, receive_start_lsn, receive_start_tli, received_lsn, received_tli, last_msg_send_time, last_msg_receipt_time, latest_end_lsn, latest_end_time, slot_name, conninfo) + FROM pg_stat_get_wal_receiver() s(pid, status, receive_start_lsn, receive_start_tli, received_lsn, received_tli, last_msg_send_time, last_msg_receipt_time, latest_end_lsn, latest_end_time, slot_name, sender_host, sender_port, conninfo) WHERE (s.pid IS NOT NULL); pg_stat_xact_all_tables| SELECT c.oid AS relid, n.nspname AS schemaname, @@ -2342,103 +2346,103 @@ toyemp| SELECT emp.name, SELECT tablename, rulename, definition FROM pg_rules ORDER BY tablename, rulename; pg_settings|pg_settings_n|CREATE RULE pg_settings_n AS - ON UPDATE TO pg_settings DO INSTEAD NOTHING; + ON UPDATE TO pg_catalog.pg_settings DO INSTEAD NOTHING; pg_settings|pg_settings_u|CREATE RULE pg_settings_u AS - ON UPDATE TO pg_settings + ON UPDATE TO pg_catalog.pg_settings WHERE (new.name = old.name) DO SELECT set_config(old.name, new.setting, false) AS set_config; rtest_emp|rtest_emp_del|CREATE RULE rtest_emp_del AS - ON DELETE TO rtest_emp DO INSERT INTO rtest_emplog (ename, who, action, newsal, oldsal) + ON DELETE TO public.rtest_emp DO INSERT INTO rtest_emplog (ename, who, action, newsal, oldsal) VALUES (old.ename, CURRENT_USER, 'fired'::bpchar, '$0.00'::money, old.salary); rtest_emp|rtest_emp_ins|CREATE RULE rtest_emp_ins AS - ON INSERT TO rtest_emp DO INSERT INTO rtest_emplog (ename, who, action, newsal, oldsal) + ON INSERT TO public.rtest_emp DO INSERT INTO rtest_emplog (ename, who, action, newsal, oldsal) VALUES (new.ename, CURRENT_USER, 'hired'::bpchar, new.salary, '$0.00'::money); rtest_emp|rtest_emp_upd|CREATE RULE rtest_emp_upd AS - ON UPDATE TO rtest_emp + ON UPDATE TO public.rtest_emp WHERE (new.salary <> old.salary) DO INSERT INTO rtest_emplog (ename, who, action, newsal, oldsal) VALUES (new.ename, CURRENT_USER, 'honored'::bpchar, new.salary, old.salary); rtest_nothn1|rtest_nothn_r1|CREATE RULE rtest_nothn_r1 AS - ON INSERT TO rtest_nothn1 + ON INSERT TO public.rtest_nothn1 WHERE ((new.a >= 10) AND (new.a < 20)) DO INSTEAD NOTHING; rtest_nothn1|rtest_nothn_r2|CREATE RULE rtest_nothn_r2 AS - ON INSERT TO rtest_nothn1 + ON INSERT TO public.rtest_nothn1 WHERE ((new.a >= 30) AND (new.a < 40)) DO INSTEAD NOTHING; rtest_nothn2|rtest_nothn_r3|CREATE RULE rtest_nothn_r3 AS - ON INSERT TO rtest_nothn2 + ON INSERT TO public.rtest_nothn2 WHERE (new.a >= 100) DO INSTEAD INSERT INTO rtest_nothn3 (a, b) VALUES (new.a, new.b); rtest_nothn2|rtest_nothn_r4|CREATE RULE rtest_nothn_r4 AS - ON INSERT TO rtest_nothn2 DO INSTEAD NOTHING; + ON INSERT TO public.rtest_nothn2 DO INSTEAD NOTHING; rtest_order1|rtest_order_r1|CREATE RULE rtest_order_r1 AS - ON INSERT TO rtest_order1 DO INSTEAD INSERT INTO rtest_order2 (a, b, c) + ON INSERT TO public.rtest_order1 DO INSTEAD INSERT INTO rtest_order2 (a, b, c) VALUES (new.a, nextval('rtest_seq'::regclass), 'rule 1 - this should run 1st'::text); rtest_order1|rtest_order_r2|CREATE RULE rtest_order_r2 AS - ON INSERT TO rtest_order1 DO INSERT INTO rtest_order2 (a, b, c) + ON INSERT TO public.rtest_order1 DO INSERT INTO rtest_order2 (a, b, c) VALUES (new.a, nextval('rtest_seq'::regclass), 'rule 2 - this should run 2nd'::text); rtest_order1|rtest_order_r3|CREATE RULE rtest_order_r3 AS - ON INSERT TO rtest_order1 DO INSTEAD INSERT INTO rtest_order2 (a, b, c) + ON INSERT TO public.rtest_order1 DO INSTEAD INSERT INTO rtest_order2 (a, b, c) VALUES (new.a, nextval('rtest_seq'::regclass), 'rule 3 - this should run 3rd'::text); rtest_order1|rtest_order_r4|CREATE RULE rtest_order_r4 AS - ON INSERT TO rtest_order1 + ON INSERT TO public.rtest_order1 WHERE (new.a < 100) DO INSTEAD INSERT INTO rtest_order2 (a, b, c) VALUES (new.a, nextval('rtest_seq'::regclass), 'rule 4 - this should run 4th'::text); rtest_person|rtest_pers_del|CREATE RULE rtest_pers_del AS - ON DELETE TO rtest_person DO DELETE FROM rtest_admin + ON DELETE TO public.rtest_person DO DELETE FROM rtest_admin WHERE (rtest_admin.pname = old.pname); rtest_person|rtest_pers_upd|CREATE RULE rtest_pers_upd AS - ON UPDATE TO rtest_person DO UPDATE rtest_admin SET pname = new.pname + ON UPDATE TO public.rtest_person DO UPDATE rtest_admin SET pname = new.pname WHERE (rtest_admin.pname = old.pname); rtest_system|rtest_sys_del|CREATE RULE rtest_sys_del AS - ON DELETE TO rtest_system DO ( DELETE FROM rtest_interface + ON DELETE TO public.rtest_system DO ( DELETE FROM rtest_interface WHERE (rtest_interface.sysname = old.sysname); DELETE FROM rtest_admin WHERE (rtest_admin.sysname = old.sysname); ); rtest_system|rtest_sys_upd|CREATE RULE rtest_sys_upd AS - ON UPDATE TO rtest_system DO ( UPDATE rtest_interface SET sysname = new.sysname + ON UPDATE TO public.rtest_system DO ( UPDATE rtest_interface SET sysname = new.sysname WHERE (rtest_interface.sysname = old.sysname); UPDATE rtest_admin SET sysname = new.sysname WHERE (rtest_admin.sysname = old.sysname); ); rtest_t4|rtest_t4_ins1|CREATE RULE rtest_t4_ins1 AS - ON INSERT TO rtest_t4 + ON INSERT TO public.rtest_t4 WHERE ((new.a >= 10) AND (new.a < 20)) DO INSTEAD INSERT INTO rtest_t5 (a, b) VALUES (new.a, new.b); rtest_t4|rtest_t4_ins2|CREATE RULE rtest_t4_ins2 AS - ON INSERT TO rtest_t4 + ON INSERT TO public.rtest_t4 WHERE ((new.a >= 20) AND (new.a < 30)) DO INSERT INTO rtest_t6 (a, b) VALUES (new.a, new.b); rtest_t5|rtest_t5_ins|CREATE RULE rtest_t5_ins AS - ON INSERT TO rtest_t5 + ON INSERT TO public.rtest_t5 WHERE (new.a > 15) DO INSERT INTO rtest_t7 (a, b) VALUES (new.a, new.b); rtest_t6|rtest_t6_ins|CREATE RULE rtest_t6_ins AS - ON INSERT TO rtest_t6 + ON INSERT TO public.rtest_t6 WHERE (new.a > 25) DO INSTEAD INSERT INTO rtest_t8 (a, b) VALUES (new.a, new.b); rtest_v1|rtest_v1_del|CREATE RULE rtest_v1_del AS - ON DELETE TO rtest_v1 DO INSTEAD DELETE FROM rtest_t1 + ON DELETE TO public.rtest_v1 DO INSTEAD DELETE FROM rtest_t1 WHERE (rtest_t1.a = old.a); rtest_v1|rtest_v1_ins|CREATE RULE rtest_v1_ins AS - ON INSERT TO rtest_v1 DO INSTEAD INSERT INTO rtest_t1 (a, b) + ON INSERT TO public.rtest_v1 DO INSTEAD INSERT INTO rtest_t1 (a, b) VALUES (new.a, new.b); rtest_v1|rtest_v1_upd|CREATE RULE rtest_v1_upd AS - ON UPDATE TO rtest_v1 DO INSTEAD UPDATE rtest_t1 SET a = new.a, b = new.b + ON UPDATE TO public.rtest_v1 DO INSTEAD UPDATE rtest_t1 SET a = new.a, b = new.b WHERE (rtest_t1.a = old.a); shoelace|shoelace_del|CREATE RULE shoelace_del AS - ON DELETE TO shoelace DO INSTEAD DELETE FROM shoelace_data + ON DELETE TO public.shoelace DO INSTEAD DELETE FROM shoelace_data WHERE (shoelace_data.sl_name = old.sl_name); shoelace|shoelace_ins|CREATE RULE shoelace_ins AS - ON INSERT TO shoelace DO INSTEAD INSERT INTO shoelace_data (sl_name, sl_avail, sl_color, sl_len, sl_unit) + ON INSERT TO public.shoelace DO INSTEAD INSERT INTO shoelace_data (sl_name, sl_avail, sl_color, sl_len, sl_unit) VALUES (new.sl_name, new.sl_avail, new.sl_color, new.sl_len, new.sl_unit); shoelace|shoelace_upd|CREATE RULE shoelace_upd AS - ON UPDATE TO shoelace DO INSTEAD UPDATE shoelace_data SET sl_name = new.sl_name, sl_avail = new.sl_avail, sl_color = new.sl_color, sl_len = new.sl_len, sl_unit = new.sl_unit + ON UPDATE TO public.shoelace DO INSTEAD UPDATE shoelace_data SET sl_name = new.sl_name, sl_avail = new.sl_avail, sl_color = new.sl_color, sl_len = new.sl_len, sl_unit = new.sl_unit WHERE (shoelace_data.sl_name = old.sl_name); shoelace_data|log_shoelace|CREATE RULE log_shoelace AS - ON UPDATE TO shoelace_data + ON UPDATE TO public.shoelace_data WHERE (new.sl_avail <> old.sl_avail) DO INSERT INTO shoelace_log (sl_name, sl_avail, log_who, log_when) VALUES (new.sl_name, new.sl_avail, 'Al Bundy'::name, 'Thu Jan 01 00:00:00 1970'::timestamp without time zone); shoelace_ok|shoelace_ok_ins|CREATE RULE shoelace_ok_ins AS - ON INSERT TO shoelace_ok DO INSTEAD UPDATE shoelace SET sl_avail = (shoelace.sl_avail + new.ok_quant) + ON INSERT TO public.shoelace_ok DO INSTEAD UPDATE shoelace SET sl_avail = (shoelace.sl_avail + new.ok_quant) WHERE (shoelace.sl_name = new.ok_name); -- restore normal output mode \a\t @@ -2533,50 +2537,50 @@ DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". -- -- disallow dropping a view's rule (bug #5072) -- -create view fooview as select 'foo'::text; -drop rule "_RETURN" on fooview; -ERROR: cannot drop rule _RETURN on view fooview because view fooview requires it -HINT: You can drop view fooview instead. -drop view fooview; +create view rules_fooview as select 'rules_foo'::text; +drop rule "_RETURN" on rules_fooview; +ERROR: cannot drop rule _RETURN on view rules_fooview because view rules_fooview requires it +HINT: You can drop view rules_fooview instead. +drop view rules_fooview; -- -- test conversion of table to view (needed to load some pg_dump files) -- -create table fooview (x int, y text); -select xmin, * from fooview; +create table rules_fooview (x int, y text); +select xmin, * from rules_fooview; xmin | x | y ------+---+--- (0 rows) -create rule "_RETURN" as on select to fooview do instead +create rule "_RETURN" as on select to rules_fooview do instead select 1 as x, 'aaa'::text as y; -select * from fooview; +select * from rules_fooview; x | y ---+----- 1 | aaa (1 row) -select xmin, * from fooview; -- fail, views don't have such a column +select xmin, * from rules_fooview; -- fail, views don't have such a column ERROR: column "xmin" does not exist -LINE 1: select xmin, * from fooview; +LINE 1: select xmin, * from rules_fooview; ^ select reltoastrelid, relkind, relfrozenxid - from pg_class where oid = 'fooview'::regclass; + from pg_class where oid = 'rules_fooview'::regclass; reltoastrelid | relkind | relfrozenxid ---------------+---------+-------------- 0 | v | 0 (1 row) -drop view fooview; +drop view rules_fooview; -- trying to convert a partitioned table to view is not allowed -create table fooview (x int, y text) partition by list (x); -create rule "_RETURN" as on select to fooview do instead +create table rules_fooview (x int, y text) partition by list (x); +create rule "_RETURN" as on select to rules_fooview do instead select 1 as x, 'aaa'::text as y; -ERROR: could not convert partitioned table "fooview" to a view +ERROR: cannot convert partitioned table "rules_fooview" to a view -- nor can one convert a partition to view -create table fooview_part partition of fooview for values in (1); -create rule "_RETURN" as on select to fooview_part do instead +create table rules_fooview_part partition of rules_fooview for values in (1); +create rule "_RETURN" as on select to rules_fooview_part do instead select 1 as x, 'aaa'::text as y; -ERROR: could not convert partition "fooview_part" to a view +ERROR: cannot convert partition "rules_fooview_part" to a view -- -- check for planner problems with complex inherited UPDATES -- @@ -2814,7 +2818,7 @@ Rules: NOTIFY rules_src_deletion -- --- Ensure a aliased target relation for insert is correctly deparsed. +-- Ensure an aliased target relation for insert is correctly deparsed. -- create rule r4 as on insert to rules_src do instead insert into rules_log AS trgt SELECT NEW.* RETURNING trgt.f1, trgt.f2; create rule r5 as on update to rules_src do instead UPDATE rules_log AS trgt SET tag = 'updated' WHERE trgt.f1 = new.f1; @@ -2961,7 +2965,7 @@ SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; definition --------------------------------------------------------------------------------------------- CREATE RULE hat_nosert AS + - ON INSERT TO hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + + ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name COLLATE "C" bpchar_pattern_ops)+ WHERE (hat_color = 'green'::bpchar) DO NOTHING + RETURNING hat_data.hat_name, + @@ -2986,7 +2990,7 @@ SELECT tablename, rulename, definition FROM pg_rules tablename | rulename | definition -----------+------------+--------------------------------------------------------------------------------------------- hats | hat_nosert | CREATE RULE hat_nosert AS + - | | ON INSERT TO hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + + | | ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + | | VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name COLLATE "C" bpchar_pattern_ops)+ | | WHERE (hat_color = 'green'::bpchar) DO NOTHING + | | RETURNING hat_data.hat_name, + @@ -3004,12 +3008,12 @@ CREATE RULE hat_nosert_all AS ON INSERT TO hats DO NOTHING RETURNING *; SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; - definition ------------------------------------------------------------------------------- - CREATE RULE hat_nosert_all AS + - ON INSERT TO hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color)+ - VALUES (new.hat_name, new.hat_color) ON CONFLICT DO NOTHING + - RETURNING hat_data.hat_name, + + definition +------------------------------------------------------------------------------------- + CREATE RULE hat_nosert_all AS + + ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color)+ + VALUES (new.hat_name, new.hat_color) ON CONFLICT DO NOTHING + + RETURNING hat_data.hat_name, + hat_data.hat_color; (1 row) @@ -3036,7 +3040,7 @@ SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; definition ----------------------------------------------------------------------------------------------------------------------------------------- CREATE RULE hat_upsert AS + - ON INSERT TO hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + + ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name) DO UPDATE SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color+ WHERE ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) + RETURNING hat_data.hat_name, + @@ -3084,7 +3088,7 @@ SELECT tablename, rulename, definition FROM pg_rules tablename | rulename | definition -----------+------------+----------------------------------------------------------------------------------------------------------------------------------------- hats | hat_upsert | CREATE RULE hat_upsert AS + - | | ON INSERT TO hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + + | | ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + | | VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name) DO UPDATE SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color+ | | WHERE ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) + | | RETURNING hat_data.hat_name, + @@ -3147,6 +3151,33 @@ SELECT * FROM hat_data WHERE hat_name IN ('h8', 'h9', 'h7') ORDER BY hat_name; DROP RULE hat_upsert ON hats; drop table hats; drop table hat_data; +-- test for pg_get_functiondef properly regurgitating SET parameters +-- Note that the function is kept around to stress pg_dump. +CREATE FUNCTION func_with_set_params() RETURNS integer + AS 'select 1;' + LANGUAGE SQL + SET search_path TO PG_CATALOG + SET extra_float_digits TO 2 + SET work_mem TO '4MB' + SET datestyle to iso, mdy + SET local_preload_libraries TO "Mixed/Case", 'c:/''a"/path', '', '0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789' + IMMUTABLE STRICT; +SELECT pg_get_functiondef('func_with_set_params()'::regprocedure); + pg_get_functiondef +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + CREATE OR REPLACE FUNCTION public.func_with_set_params() + + RETURNS integer + + LANGUAGE sql + + IMMUTABLE STRICT + + SET search_path TO 'pg_catalog' + + SET extra_float_digits TO '2' + + SET work_mem TO '4MB' + + SET "DateStyle" TO 'iso, mdy' + + SET local_preload_libraries TO 'Mixed/Case', 'c:/''a"/path', '', '0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789'+ + AS $function$select 1;$function$ + + +(1 row) + -- tests for pg_get_*def with invalid objects SELECT pg_get_constraintdef(0); pg_get_constraintdef @@ -3227,9 +3258,43 @@ SELECT pg_get_partkeydef(0); (1 row) -- test rename for a rule defined on a partitioned table -CREATE TABLE parted_table (a int) PARTITION BY LIST (a); -CREATE TABLE parted_table_1 PARTITION OF parted_table FOR VALUES IN (1); -CREATE RULE parted_table_insert AS ON INSERT to parted_table - DO INSTEAD INSERT INTO parted_table_1 VALUES (NEW.*); -ALTER RULE parted_table_insert ON parted_table RENAME TO parted_table_insert_redirect; -DROP TABLE parted_table; +CREATE TABLE rules_parted_table (a int) PARTITION BY LIST (a); +CREATE TABLE rules_parted_table_1 PARTITION OF rules_parted_table FOR VALUES IN (1); +CREATE RULE rules_parted_table_insert AS ON INSERT to rules_parted_table + DO INSTEAD INSERT INTO rules_parted_table_1 VALUES (NEW.*); +ALTER RULE rules_parted_table_insert ON rules_parted_table RENAME TO rules_parted_table_insert_redirect; +DROP TABLE rules_parted_table; +-- +-- Test enabling/disabling +-- +CREATE TABLE ruletest1 (a int); +CREATE TABLE ruletest2 (b int); +CREATE RULE rule1 AS ON INSERT TO ruletest1 + DO INSTEAD INSERT INTO ruletest2 VALUES (NEW.*); +INSERT INTO ruletest1 VALUES (1); +ALTER TABLE ruletest1 DISABLE RULE rule1; +INSERT INTO ruletest1 VALUES (2); +ALTER TABLE ruletest1 ENABLE RULE rule1; +SET session_replication_role = replica; +INSERT INTO ruletest1 VALUES (3); +ALTER TABLE ruletest1 ENABLE REPLICA RULE rule1; +INSERT INTO ruletest1 VALUES (4); +RESET session_replication_role; +INSERT INTO ruletest1 VALUES (5); +SELECT * FROM ruletest1; + a +--- + 2 + 3 + 5 +(3 rows) + +SELECT * FROM ruletest2; + b +--- + 1 + 4 +(2 rows) + +DROP TABLE ruletest1; +DROP TABLE ruletest2; diff --git a/src/test/regress/expected/sanity_check.out b/src/test/regress/expected/sanity_check.out index 6750152e0f..9c7a60c092 100644 --- a/src/test/regress/expected/sanity_check.out +++ b/src/test/regress/expected/sanity_check.out @@ -13,12 +13,12 @@ SELECT relname, relhasindex ORDER BY relname; a|f a_star|f -abstime_tbl|f aggtest|f array_index_op_test|t array_op_test|f b|f b_star|f +bit_defaults|f box_tbl|f bprime|f bt_f8_heap|t @@ -38,6 +38,7 @@ d_star|f date_tbl|f default_tbl|f defaultexpr_tbl|f +delete_test_table|t dept|f dupindexcols|t e_star|f @@ -77,6 +78,10 @@ mlparted12|f mlparted2|f mlparted3|f mlparted4|f +mlparted_def|f +mlparted_def1|f +mlparted_def2|f +mlparted_defd|f money_data|f num_data|f num_exp_add|t @@ -162,10 +167,12 @@ point_tbl|t polygon_tbl|t quad_box_tbl|t quad_point_tbl|t +quad_poly_tbl|t +quad_poly_tbl_ord_seq1|f +quad_poly_tbl_ord_seq2|f radix_text_tbl|t ramp|f real_city|f -reltime_tbl|f road|t shighway|t slow_emp4000|f @@ -178,6 +185,12 @@ sql_sizing|f sql_sizing_profiles|f stud_emp|f student|f +tbl_include_box|t +tbl_include_box_pk|f +tbl_include_pk|t +tbl_include_reg|t +tbl_include_unique1|t +tbl_include_unique2|f tenk1|t tenk2|t test_range_excl|t @@ -190,7 +203,6 @@ time_tbl|f timestamp_tbl|f timestamptz_tbl|f timetz_tbl|f -tinterval_tbl|f varchar_tbl|f -- restore normal output mode \a\t diff --git a/src/test/regress/expected/select.out b/src/test/regress/expected/select.out index 1fab5136d2..c441049f41 100644 --- a/src/test/regress/expected/select.out +++ b/src/test/regress/expected/select.out @@ -438,19 +438,19 @@ SELECT p.name, p.age FROM person* p ORDER BY age using >, name; -- -- Test some cases involving whole-row Var referencing a subquery -- -select foo from (select 1) as foo; +select foo from (select 1 offset 0) as foo; foo ----- (1) (1 row) -select foo from (select null) as foo; +select foo from (select null offset 0) as foo; foo ----- () (1 row) -select foo from (select 'xyzzy',1,null) as foo; +select foo from (select 'xyzzy',1,null offset 0) as foo; foo ------------ (xyzzy,1,) @@ -951,3 +951,16 @@ select * from (values (2),(null),(1)) v(k) where k = k; 1 (2 rows) +-- Test partitioned tables with no partitions, which should be handled the +-- same as the non-inheritance case when expanding its RTE. +create table list_parted_tbl (a int,b int) partition by list (a); +create table list_parted_tbl1 partition of list_parted_tbl + for values in (1) partition by list(b); +explain (costs off) select * from list_parted_tbl; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +drop table list_parted_tbl; diff --git a/src/test/regress/expected/select_into.out b/src/test/regress/expected/select_into.out index 5d54bbf3b0..942f975e95 100644 --- a/src/test/regress/expected/select_into.out +++ b/src/test/regress/expected/select_into.out @@ -2,15 +2,15 @@ -- SELECT_INTO -- SELECT * - INTO TABLE tmp1 + INTO TABLE sitmp1 FROM onek WHERE onek.unique1 < 2; -DROP TABLE tmp1; +DROP TABLE sitmp1; SELECT * - INTO TABLE tmp1 + INTO TABLE sitmp1 FROM onek2 WHERE onek2.unique1 < 2; -DROP TABLE tmp1; +DROP TABLE sitmp1; -- -- SELECT INTO and INSERT permission, if owner is not allowed to insert. -- @@ -22,15 +22,15 @@ GRANT ALL ON SCHEMA selinto_schema TO public; SET SESSION AUTHORIZATION regress_selinto_user; SELECT * INTO TABLE selinto_schema.tmp1 FROM pg_class WHERE relname like '%a%'; -- Error -ERROR: permission denied for relation tmp1 +ERROR: permission denied for table tmp1 SELECT oid AS clsoid, relname, relnatts + 10 AS x INTO selinto_schema.tmp2 FROM pg_class WHERE relname like '%b%'; -- Error -ERROR: permission denied for relation tmp2 +ERROR: permission denied for table tmp2 CREATE TABLE selinto_schema.tmp3 (a,b,c) AS SELECT oid,relname,relacl FROM pg_class WHERE relname like '%c%'; -- Error -ERROR: permission denied for relation tmp3 +ERROR: permission denied for table tmp3 RESET SESSION AUTHORIZATION; ALTER DEFAULT PRIVILEGES FOR ROLE regress_selinto_user GRANT INSERT ON TABLES TO regress_selinto_user; diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out index 0efb211c97..0eca76cb41 100644 --- a/src/test/regress/expected/select_parallel.out +++ b/src/test/regress/expected/select_parallel.out @@ -1,7 +1,7 @@ -- -- PARALLEL -- -create or replace function parallel_restricted(int) returns int as +create function sp_parallel_restricted(int) returns int as $$begin return $1; end$$ language plpgsql parallel restricted; -- Serializable isolation would disable parallel query, so explicitly use an -- arbitrary other level. @@ -11,8 +11,94 @@ set parallel_setup_cost=0; set parallel_tuple_cost=0; set min_parallel_table_scan_size=0; set max_parallel_workers_per_gather=4; +-- Parallel Append with partial-subplans explain (costs off) - select count(*) from a_star; + select round(avg(aa)), sum(aa) from a_star; + QUERY PLAN +----------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 3 + -> Partial Aggregate + -> Parallel Append + -> Parallel Seq Scan on d_star + -> Parallel Seq Scan on f_star + -> Parallel Seq Scan on e_star + -> Parallel Seq Scan on b_star + -> Parallel Seq Scan on c_star + -> Parallel Seq Scan on a_star +(11 rows) + +select round(avg(aa)), sum(aa) from a_star a1; + round | sum +-------+----- + 14 | 355 +(1 row) + +-- Parallel Append with both partial and non-partial subplans +alter table c_star set (parallel_workers = 0); +alter table d_star set (parallel_workers = 0); +explain (costs off) + select round(avg(aa)), sum(aa) from a_star; + QUERY PLAN +----------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 3 + -> Partial Aggregate + -> Parallel Append + -> Seq Scan on d_star + -> Seq Scan on c_star + -> Parallel Seq Scan on f_star + -> Parallel Seq Scan on e_star + -> Parallel Seq Scan on b_star + -> Parallel Seq Scan on a_star +(11 rows) + +select round(avg(aa)), sum(aa) from a_star a2; + round | sum +-------+----- + 14 | 355 +(1 row) + +-- Parallel Append with only non-partial subplans +alter table a_star set (parallel_workers = 0); +alter table b_star set (parallel_workers = 0); +alter table e_star set (parallel_workers = 0); +alter table f_star set (parallel_workers = 0); +explain (costs off) + select round(avg(aa)), sum(aa) from a_star; + QUERY PLAN +-------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 3 + -> Partial Aggregate + -> Parallel Append + -> Seq Scan on d_star + -> Seq Scan on f_star + -> Seq Scan on e_star + -> Seq Scan on b_star + -> Seq Scan on c_star + -> Seq Scan on a_star +(11 rows) + +select round(avg(aa)), sum(aa) from a_star a3; + round | sum +-------+----- + 14 | 355 +(1 row) + +-- Disable Parallel Append +alter table a_star reset (parallel_workers); +alter table b_star reset (parallel_workers); +alter table c_star reset (parallel_workers); +alter table d_star reset (parallel_workers); +alter table e_star reset (parallel_workers); +alter table f_star reset (parallel_workers); +set enable_parallel_append to off; +explain (costs off) + select round(avg(aa)), sum(aa) from a_star; QUERY PLAN ----------------------------------------------------- Finalize Aggregate @@ -28,24 +114,105 @@ explain (costs off) -> Parallel Seq Scan on f_star (11 rows) -select count(*) from a_star; +select round(avg(aa)), sum(aa) from a_star a4; + round | sum +-------+----- + 14 | 355 +(1 row) + +reset enable_parallel_append; +-- Parallel Append that runs serially +create function sp_test_func() returns setof text as +$$ select 'foo'::varchar union all select 'bar'::varchar $$ +language sql stable; +select sp_test_func() order by 1; + sp_test_func +-------------- + bar + foo +(2 rows) + +-- Parallel Append is not to be used when the subpath depends on the outer param +create table part_pa_test(a int, b int) partition by range(a); +create table part_pa_test_p1 partition of part_pa_test for values from (minvalue) to (0); +create table part_pa_test_p2 partition of part_pa_test for values from (0) to (maxvalue); +explain (costs off) + select (select max((select pa1.b from part_pa_test pa1 where pa1.a = pa2.a))) + from part_pa_test pa2; + QUERY PLAN +-------------------------------------------------------------- + Aggregate + -> Gather + Workers Planned: 3 + -> Parallel Append + -> Parallel Seq Scan on part_pa_test_p1 pa2 + -> Parallel Seq Scan on part_pa_test_p2 pa2_1 + SubPlan 2 + -> Result + SubPlan 1 + -> Append + -> Seq Scan on part_pa_test_p1 pa1 + Filter: (a = pa2.a) + -> Seq Scan on part_pa_test_p2 pa1_1 + Filter: (a = pa2.a) +(14 rows) + +drop table part_pa_test; +-- test with leader participation disabled +set parallel_leader_participation = off; +explain (costs off) + select count(*) from tenk1 where stringu1 = 'GRAAAA'; + QUERY PLAN +--------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Seq Scan on tenk1 + Filter: (stringu1 = 'GRAAAA'::name) +(6 rows) + +select count(*) from tenk1 where stringu1 = 'GRAAAA'; + count +------- + 15 +(1 row) + +-- test with leader participation disabled, but no workers available (so +-- the leader will have to run the plan despite the setting) +set max_parallel_workers = 0; +explain (costs off) + select count(*) from tenk1 where stringu1 = 'GRAAAA'; + QUERY PLAN +--------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Seq Scan on tenk1 + Filter: (stringu1 = 'GRAAAA'::name) +(6 rows) + +select count(*) from tenk1 where stringu1 = 'GRAAAA'; count ------- - 50 + 15 (1 row) +reset max_parallel_workers; +reset parallel_leader_participation; -- test that parallel_restricted function doesn't run in worker alter table tenk1 set (parallel_workers = 4); explain (verbose, costs off) -select parallel_restricted(unique1) from tenk1 +select sp_parallel_restricted(unique1) from tenk1 where stringu1 = 'GRAAAA' order by 1; QUERY PLAN --------------------------------------------------------- Sort - Output: (parallel_restricted(unique1)) - Sort Key: (parallel_restricted(tenk1.unique1)) + Output: (sp_parallel_restricted(unique1)) + Sort Key: (sp_parallel_restricted(tenk1.unique1)) -> Gather - Output: parallel_restricted(unique1) + Output: sp_parallel_restricted(unique1) Workers Planned: 4 -> Parallel Seq Scan on public.tenk1 Output: unique1 @@ -90,17 +257,37 @@ explain (costs off) -- test that parallel plan for aggregates is not selected when -- target list contains parallel restricted clause. explain (costs off) - select sum(parallel_restricted(unique1)) from tenk1 - group by(parallel_restricted(unique1)); + select sum(sp_parallel_restricted(unique1)) from tenk1 + group by(sp_parallel_restricted(unique1)); QUERY PLAN ------------------------------------------------------------------- HashAggregate - Group Key: parallel_restricted(unique1) + Group Key: sp_parallel_restricted(unique1) -> Gather Workers Planned: 4 -> Parallel Index Only Scan using tenk1_unique1 on tenk1 (5 rows) +-- test prepared statement +prepare tenk1_count(integer) As select count((unique1)) from tenk1 where hundred > $1; +explain (costs off) execute tenk1_count(1); + QUERY PLAN +---------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Seq Scan on tenk1 + Filter: (hundred > 1) +(6 rows) + +execute tenk1_count(1); + count +------- + 9800 +(1 row) + +deallocate tenk1_count; -- test parallel plans for queries containing un-correlated subplans. alter table tenk2 set (parallel_workers = 0); explain (costs off) @@ -138,6 +325,41 @@ explain (costs off) -> Seq Scan on tenk2 (4 rows) +alter table tenk2 reset (parallel_workers); +-- test parallel plan for a query containing initplan. +set enable_indexscan = off; +set enable_indexonlyscan = off; +set enable_bitmapscan = off; +alter table tenk2 set (parallel_workers = 2); +explain (costs off) + select count(*) from tenk1 + where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2); + QUERY PLAN +------------------------------------------------------ + Aggregate + InitPlan 1 (returns $2) + -> Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Parallel Seq Scan on tenk2 + -> Gather + Workers Planned: 4 + Params Evaluated: $2 + -> Parallel Seq Scan on tenk1 + Filter: (unique1 = $2) +(12 rows) + +select count(*) from tenk1 + where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2); + count +------- + 1 +(1 row) + +reset enable_indexscan; +reset enable_indexonlyscan; +reset enable_bitmapscan; alter table tenk2 reset (parallel_workers); -- test parallel index scans. set enable_seqscan to off; @@ -179,6 +401,61 @@ select count(*) from tenk1 where thousand > 95; 9040 (1 row) +-- test rescan cases too +set enable_material = false; +explain (costs off) +select * from + (select count(unique1) from tenk1 where hundred > 10) ss + right join (values (1),(2),(3)) v(x) on true; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop Left Join + -> Values Scan on "*VALUES*" + -> Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Index Scan using tenk1_hundred on tenk1 + Index Cond: (hundred > 10) +(8 rows) + +select * from + (select count(unique1) from tenk1 where hundred > 10) ss + right join (values (1),(2),(3)) v(x) on true; + count | x +-------+--- + 8900 | 1 + 8900 | 2 + 8900 | 3 +(3 rows) + +explain (costs off) +select * from + (select count(*) from tenk1 where thousand > 99) ss + right join (values (1),(2),(3)) v(x) on true; + QUERY PLAN +-------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Values Scan on "*VALUES*" + -> Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Index Only Scan using tenk1_thous_tenthous on tenk1 + Index Cond: (thousand > 99) +(8 rows) + +select * from + (select count(*) from tenk1 where thousand > 99) ss + right join (values (1),(2),(3)) v(x) on true; + count | x +-------+--- + 9000 | 1 + 9000 | 2 + 9000 | 3 +(3 rows) + +reset enable_material; reset enable_seqscan; reset enable_bitmapscan; -- test parallel bitmap heap scan. @@ -225,14 +502,71 @@ select count(*) from bmscantest where a>1; 99999 (1 row) +-- test accumulation of stats for parallel nodes reset enable_seqscan; +alter table tenk2 set (parallel_workers = 0); +explain (analyze, timing off, summary off, costs off) + select count(*) from tenk1, tenk2 where tenk1.hundred > 1 + and tenk2.thousand=0; + QUERY PLAN +-------------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Nested Loop (actual rows=98000 loops=1) + -> Seq Scan on tenk2 (actual rows=10 loops=1) + Filter: (thousand = 0) + Rows Removed by Filter: 9990 + -> Gather (actual rows=9800 loops=10) + Workers Planned: 4 + Workers Launched: 4 + -> Parallel Seq Scan on tenk1 (actual rows=1960 loops=50) + Filter: (hundred > 1) + Rows Removed by Filter: 40 +(11 rows) + +alter table tenk2 reset (parallel_workers); +reset work_mem; +create function explain_parallel_sort_stats() returns setof text +language plpgsql as +$$ +declare ln text; +begin + for ln in + explain (analyze, timing off, summary off, costs off) + select * from + (select ten from tenk1 where ten < 100 order by ten) ss + right join (values (1),(2),(3)) v(x) on true + loop + ln := regexp_replace(ln, 'Memory: \S*', 'Memory: xxx'); + return next ln; + end loop; +end; +$$; +select * from explain_parallel_sort_stats(); + explain_parallel_sort_stats +-------------------------------------------------------------------------- + Nested Loop Left Join (actual rows=30000 loops=1) + -> Values Scan on "*VALUES*" (actual rows=3 loops=1) + -> Gather Merge (actual rows=10000 loops=3) + Workers Planned: 4 + Workers Launched: 4 + -> Sort (actual rows=2000 loops=15) + Sort Key: tenk1.ten + Sort Method: quicksort Memory: xxx + Worker 0: Sort Method: quicksort Memory: xxx + Worker 1: Sort Method: quicksort Memory: xxx + Worker 2: Sort Method: quicksort Memory: xxx + Worker 3: Sort Method: quicksort Memory: xxx + -> Parallel Seq Scan on tenk1 (actual rows=2000 loops=15) + Filter: (ten < 100) +(14 rows) + reset enable_indexscan; reset enable_hashjoin; reset enable_mergejoin; reset enable_material; reset effective_io_concurrency; -reset work_mem; drop table bmscantest; +drop function explain_parallel_sort_stats(); -- test parallel merge join path. set enable_hashjoin to off; set enable_nestloop to off; @@ -300,6 +634,220 @@ select count(*) from tenk1 group by twenty; 500 (20 rows) +--test expressions in targetlist are pushed down for gather merge +create function sp_simple_func(var1 integer) returns integer +as $$ +begin + return var1 + 10; +end; +$$ language plpgsql PARALLEL SAFE; +explain (costs off, verbose) + select ten, sp_simple_func(ten) from tenk1 where ten < 100 order by ten; + QUERY PLAN +----------------------------------------------------- + Gather Merge + Output: ten, (sp_simple_func(ten)) + Workers Planned: 4 + -> Result + Output: ten, sp_simple_func(ten) + -> Sort + Output: ten + Sort Key: tenk1.ten + -> Parallel Seq Scan on public.tenk1 + Output: ten + Filter: (tenk1.ten < 100) +(11 rows) + +drop function sp_simple_func(integer); +-- test handling of SRFs in targetlist (bug in 10.0) +explain (costs off) + select count(*), generate_series(1,2) from tenk1 group by twenty; + QUERY PLAN +---------------------------------------------------------- + ProjectSet + -> Finalize GroupAggregate + Group Key: twenty + -> Gather Merge + Workers Planned: 4 + -> Partial GroupAggregate + Group Key: twenty + -> Sort + Sort Key: twenty + -> Parallel Seq Scan on tenk1 +(10 rows) + +select count(*), generate_series(1,2) from tenk1 group by twenty; + count | generate_series +-------+----------------- + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 +(40 rows) + +-- test gather merge with parallel leader participation disabled +set parallel_leader_participation = off; +explain (costs off) + select count(*) from tenk1 group by twenty; + QUERY PLAN +---------------------------------------------------- + Finalize GroupAggregate + Group Key: twenty + -> Gather Merge + Workers Planned: 4 + -> Partial GroupAggregate + Group Key: twenty + -> Sort + Sort Key: twenty + -> Parallel Seq Scan on tenk1 +(9 rows) + +select count(*) from tenk1 group by twenty; + count +------- + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 +(20 rows) + +reset parallel_leader_participation; +--test rescan behavior of gather merge +set enable_material = false; +explain (costs off) +select * from + (select string4, count(unique2) + from tenk1 group by string4 order by string4) ss + right join (values (1),(2),(3)) v(x) on true; + QUERY PLAN +---------------------------------------------------------- + Nested Loop Left Join + -> Values Scan on "*VALUES*" + -> Finalize GroupAggregate + Group Key: tenk1.string4 + -> Gather Merge + Workers Planned: 4 + -> Partial GroupAggregate + Group Key: tenk1.string4 + -> Sort + Sort Key: tenk1.string4 + -> Parallel Seq Scan on tenk1 +(11 rows) + +select * from + (select string4, count(unique2) + from tenk1 group by string4 order by string4) ss + right join (values (1),(2),(3)) v(x) on true; + string4 | count | x +---------+-------+--- + AAAAxx | 2500 | 1 + HHHHxx | 2500 | 1 + OOOOxx | 2500 | 1 + VVVVxx | 2500 | 1 + AAAAxx | 2500 | 2 + HHHHxx | 2500 | 2 + OOOOxx | 2500 | 2 + VVVVxx | 2500 | 2 + AAAAxx | 2500 | 3 + HHHHxx | 2500 | 3 + OOOOxx | 2500 | 3 + VVVVxx | 2500 | 3 +(12 rows) + +reset enable_material; +reset enable_hashagg; +-- check parallelized int8 aggregate (bug #14897) +explain (costs off) +select avg(unique1::int8) from tenk1; + QUERY PLAN +------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Index Only Scan using tenk1_unique1 on tenk1 +(5 rows) + +select avg(unique1::int8) from tenk1; + avg +----------------------- + 4999.5000000000000000 +(1 row) + +-- gather merge test with a LIMIT +explain (costs off) + select fivethous from tenk1 order by fivethous limit 4; + QUERY PLAN +---------------------------------------------- + Limit + -> Gather Merge + Workers Planned: 4 + -> Sort + Sort Key: fivethous + -> Parallel Seq Scan on tenk1 +(6 rows) + +select fivethous from tenk1 order by fivethous limit 4; + fivethous +----------- + 0 + 0 + 1 + 1 +(4 rows) + -- gather merge test with 0 worker set max_parallel_workers = 0; explain (costs off) @@ -324,9 +872,36 @@ select string4 from tenk1 order by string4 limit 5; AAAAxx (5 rows) +-- gather merge test with 0 workers, with parallel leader +-- participation disabled (the leader will have to run the plan +-- despite the setting) +set parallel_leader_participation = off; +explain (costs off) + select string4 from tenk1 order by string4 limit 5; + QUERY PLAN +---------------------------------------------- + Limit + -> Gather Merge + Workers Planned: 4 + -> Sort + Sort Key: string4 + -> Parallel Seq Scan on tenk1 +(6 rows) + +select string4 from tenk1 order by string4 limit 5; + string4 +--------- + AAAAxx + AAAAxx + AAAAxx + AAAAxx + AAAAxx +(5 rows) + +reset parallel_leader_participation; reset max_parallel_workers; -reset enable_hashagg; -set force_parallel_mode=1; +SAVEPOINT settings; +SET LOCAL force_parallel_mode = 1; explain (costs off) select stringu1::int2 from tenk1 where unique1 = 1; QUERY PLAN @@ -338,7 +913,99 @@ explain (costs off) Index Cond: (unique1 = 1) (5 rows) +ROLLBACK TO SAVEPOINT settings; +-- exercise record typmod remapping between backends +CREATE FUNCTION make_record(n int) + RETURNS RECORD LANGUAGE plpgsql PARALLEL SAFE AS +$$ +BEGIN + RETURN CASE n + WHEN 1 THEN ROW(1) + WHEN 2 THEN ROW(1, 2) + WHEN 3 THEN ROW(1, 2, 3) + WHEN 4 THEN ROW(1, 2, 3, 4) + ELSE ROW(1, 2, 3, 4, 5) + END; +END; +$$; +SAVEPOINT settings; +SET LOCAL force_parallel_mode = 1; +SELECT make_record(x) FROM (SELECT generate_series(1, 5) x) ss ORDER BY x; + make_record +------------- + (1) + (1,2) + (1,2,3) + (1,2,3,4) + (1,2,3,4,5) +(5 rows) + +ROLLBACK TO SAVEPOINT settings; +DROP function make_record(n int); +-- test the sanity of parallel query after the active role is dropped. +drop role if exists regress_parallel_worker; +NOTICE: role "regress_parallel_worker" does not exist, skipping +create role regress_parallel_worker; +set role regress_parallel_worker; +reset session authorization; +drop role regress_parallel_worker; +set force_parallel_mode = 1; +select count(*) from tenk1; + count +------- + 10000 +(1 row) + +reset force_parallel_mode; +reset role; +-- Window function calculation can't be pushed to workers. +explain (costs off, verbose) + select count(*) from tenk1 a where (unique1, two) in + (select unique1, row_number() over() from tenk1 b); + QUERY PLAN +---------------------------------------------------------------------------------------------- + Aggregate + Output: count(*) + -> Hash Semi Join + Hash Cond: ((a.unique1 = b.unique1) AND (a.two = (row_number() OVER (?)))) + -> Gather + Output: a.unique1, a.two + Workers Planned: 4 + -> Parallel Seq Scan on public.tenk1 a + Output: a.unique1, a.two + -> Hash + Output: b.unique1, (row_number() OVER (?)) + -> WindowAgg + Output: b.unique1, row_number() OVER (?) + -> Gather + Output: b.unique1 + Workers Planned: 4 + -> Parallel Index Only Scan using tenk1_unique1 on public.tenk1 b + Output: b.unique1 +(18 rows) + +-- LIMIT/OFFSET within sub-selects can't be pushed to workers. +explain (costs off) + select * from tenk1 a where two in + (select two from tenk1 b where stringu1 like '%AAAA' limit 3); + QUERY PLAN +--------------------------------------------------------------- + Hash Semi Join + Hash Cond: (a.two = b.two) + -> Gather + Workers Planned: 4 + -> Parallel Seq Scan on tenk1 a + -> Hash + -> Limit + -> Gather + Workers Planned: 4 + -> Parallel Seq Scan on tenk1 b + Filter: (stringu1 ~~ '%AAAA'::text) +(11 rows) + -- to increase the parallel query test coverage +SAVEPOINT settings; +SET LOCAL force_parallel_mode = 1; EXPLAIN (analyze, timing off, summary off, costs off) SELECT * FROM tenk1; QUERY PLAN ------------------------------------------------------------- @@ -348,8 +1015,122 @@ EXPLAIN (analyze, timing off, summary off, costs off) SELECT * FROM tenk1; -> Parallel Seq Scan on tenk1 (actual rows=2000 loops=5) (4 rows) +ROLLBACK TO SAVEPOINT settings; -- provoke error in worker +SAVEPOINT settings; +SET LOCAL force_parallel_mode = 1; select stringu1::int2 from tenk1 where unique1 = 1; -ERROR: invalid input syntax for integer: "BAAAAA" +ERROR: invalid input syntax for type smallint: "BAAAAA" CONTEXT: parallel worker +ROLLBACK TO SAVEPOINT settings; +-- test interaction with set-returning functions +SAVEPOINT settings; +-- multiple subqueries under a single Gather node +-- must set parallel_setup_cost > 0 to discourage multiple Gather nodes +SET LOCAL parallel_setup_cost = 10; +EXPLAIN (COSTS OFF) +SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1 +UNION ALL +SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1; + QUERY PLAN +---------------------------------------------------- + Gather + Workers Planned: 4 + -> Parallel Append + -> Parallel Seq Scan on tenk1 + Filter: (fivethous = (tenthous + 1)) + -> Parallel Seq Scan on tenk1 tenk1_1 + Filter: (fivethous = (tenthous + 1)) +(7 rows) + +ROLLBACK TO SAVEPOINT settings; +-- can't use multiple subqueries under a single Gather node due to initPlans +EXPLAIN (COSTS OFF) +SELECT unique1 FROM tenk1 WHERE fivethous = + (SELECT unique1 FROM tenk1 WHERE fivethous = 1 LIMIT 1) +UNION ALL +SELECT unique1 FROM tenk1 WHERE fivethous = + (SELECT unique2 FROM tenk1 WHERE fivethous = 1 LIMIT 1) +ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: tenk1.unique1 + -> Append + -> Gather + Workers Planned: 4 + Params Evaluated: $1 + InitPlan 1 (returns $1) + -> Limit + -> Gather + Workers Planned: 4 + -> Parallel Seq Scan on tenk1 tenk1_2 + Filter: (fivethous = 1) + -> Parallel Seq Scan on tenk1 + Filter: (fivethous = $1) + -> Gather + Workers Planned: 4 + Params Evaluated: $3 + InitPlan 2 (returns $3) + -> Limit + -> Gather + Workers Planned: 4 + -> Parallel Seq Scan on tenk1 tenk1_3 + Filter: (fivethous = 1) + -> Parallel Seq Scan on tenk1 tenk1_1 + Filter: (fivethous = $3) +(25 rows) + +-- test interaction with SRFs +SELECT * FROM information_schema.foreign_data_wrapper_options +ORDER BY 1, 2, 3; + foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name | option_value +------------------------------+---------------------------+-------------+-------------- +(0 rows) + +-- test passing expanded-value representations to workers +CREATE FUNCTION make_some_array(int,int) returns int[] as +$$declare x int[]; + begin + x[1] := $1; + x[2] := $2; + return x; + end$$ language plpgsql parallel safe; +CREATE TABLE fooarr(f1 text, f2 int[], f3 text); +INSERT INTO fooarr VALUES('1', ARRAY[1,2], 'one'); +PREPARE pstmt(text, int[]) AS SELECT * FROM fooarr WHERE f1 = $1 AND f2 = $2; +EXPLAIN (COSTS OFF) EXECUTE pstmt('1', make_some_array(1,2)); + QUERY PLAN +------------------------------------------------------------------ + Gather + Workers Planned: 3 + -> Parallel Seq Scan on fooarr + Filter: ((f1 = '1'::text) AND (f2 = '{1,2}'::integer[])) +(4 rows) + +EXECUTE pstmt('1', make_some_array(1,2)); + f1 | f2 | f3 +----+-------+----- + 1 | {1,2} | one +(1 row) + +DEALLOCATE pstmt; +-- test interaction between subquery and partial_paths +CREATE VIEW tenk1_vw_sec WITH (security_barrier) AS SELECT * FROM tenk1; +EXPLAIN (COSTS OFF) +SELECT 1 FROM tenk1_vw_sec + WHERE (SELECT sum(f1) FROM int4_tbl WHERE f1 < unique1) < 100; + QUERY PLAN +------------------------------------------------------------------- + Subquery Scan on tenk1_vw_sec + Filter: ((SubPlan 1) < 100) + -> Gather + Workers Planned: 4 + -> Parallel Index Only Scan using tenk1_unique1 on tenk1 + SubPlan 1 + -> Aggregate + -> Seq Scan on int4_tbl + Filter: (f1 < tenk1_vw_sec.unique1) +(9 rows) + rollback; diff --git a/src/test/regress/expected/sequence.out b/src/test/regress/expected/sequence.out index a43b52cfc1..a0d2b22d3c 100644 --- a/src/test/regress/expected/sequence.out +++ b/src/test/regress/expected/sequence.out @@ -79,6 +79,12 @@ SELECT * FROM serialTest1; force | 100 (3 rows) +SELECT pg_get_serial_sequence('serialTest1', 'f2'); + pg_get_serial_sequence +--------------------------- + public.serialtest1_f2_seq +(1 row) + -- test smallserial / bigserial CREATE TABLE serialTest2 (f1 text, f2 serial, f3 smallserial, f4 serial2, f5 bigserial, f6 serial8); @@ -287,11 +293,11 @@ CREATE TEMP TABLE t1 ( -- Both drops should fail, but with different error messages: DROP SEQUENCE t1_f1_seq; ERROR: cannot drop sequence t1_f1_seq because other objects depend on it -DETAIL: default for table t1 column f1 depends on sequence t1_f1_seq +DETAIL: default value for column f1 of table t1 depends on sequence t1_f1_seq HINT: Use DROP ... CASCADE to drop the dependent objects too. DROP SEQUENCE myseq2; ERROR: cannot drop sequence myseq2 because other objects depend on it -DETAIL: default for table t1 column f2 depends on sequence myseq2 +DETAIL: default value for column f2 of table t1 depends on sequence myseq2 HINT: Use DROP ... CASCADE to drop the dependent objects too. -- This however will work: DROP SEQUENCE myseq3; @@ -529,6 +535,19 @@ SELECT * FROM pg_sequence_parameters('sequence_test4'::regclass); -1 | -9223372036854775808 | -1 | -1 | f | 1 | 20 (1 row) +\d sequence_test4 + Sequence "public.sequence_test4" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +--------+-------+----------------------+---------+-----------+---------+------- + bigint | -1 | -9223372036854775808 | -1 | -1 | no | 1 + +\d serialtest2_f2_seq + Sequence "public.serialtest2_f2_seq" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +---------+-------+---------+------------+-----------+---------+------- + integer | 1 | 1 | 2147483647 | 1 | no | 1 +Owned by: public.serialtest2.f2 + -- Test comments COMMENT ON SEQUENCE asdf IS 'won''t work'; ERROR: relation "asdf" does not exist @@ -766,7 +785,7 @@ ROLLBACK; BEGIN; SET LOCAL SESSION AUTHORIZATION regress_seq_user; ALTER SEQUENCE sequence_test2 START WITH 1; -ERROR: must be owner of relation sequence_test2 +ERROR: must be owner of sequence sequence_test2 ROLLBACK; -- Sequences should get wiped out as well: DROP TABLE serialTest1, serialTest2; diff --git a/src/test/regress/expected/spgist.out b/src/test/regress/expected/spgist.out index 0691e910c4..9364b88bc2 100644 --- a/src/test/regress/expected/spgist.out +++ b/src/test/regress/expected/spgist.out @@ -4,7 +4,7 @@ -- There are other tests to test different SP-GiST opclasses. This is for -- testing SP-GiST code itself. create table spgist_point_tbl(id int4, p point); -create index spgist_point_idx on spgist_point_tbl using spgist(p); +create index spgist_point_idx on spgist_point_tbl using spgist(p) with (fillfactor = 75); -- Test vacuum-root operation. It gets invoked when the root is also a leaf, -- i.e. the index is very small. insert into spgist_point_tbl (id, p) @@ -23,6 +23,24 @@ delete from spgist_point_tbl where id % 2 = 1; -- would exercise it) delete from spgist_point_tbl where id < 10000; vacuum spgist_point_tbl; +-- Test rescan paths (cf. bug #15378) +-- use box and && rather than point, so that rescan happens when the +-- traverse stack is non-empty +create table spgist_box_tbl(id serial, b box); +insert into spgist_box_tbl(b) +select box(point(i,j),point(i+s,j+s)) + from generate_series(1,100,5) i, + generate_series(1,100,5) j, + generate_series(1,10) s; +create index spgist_box_idx on spgist_box_tbl using spgist (b); +select count(*) + from (values (point(5,5)),(point(8,8)),(point(12,12))) v(p) + where exists(select * from spgist_box_tbl b where b.b && box(v.p,v.p)); + count +------- + 3 +(1 row) + -- The point opclass's choose method only uses the spgMatchNode action, -- so the other actions are not tested by the above. Create an index using -- text opclass, which uses the others actions. @@ -37,3 +55,13 @@ select g, 'baaaaaaaaaaaaaar' || g from generate_series(1, 1000) g; -- tuple to be moved to another page. insert into spgist_text_tbl (id, t) select -g, 'f' || repeat('o', 100-g) || 'surprise' from generate_series(1, 100) g; +-- Test out-of-range fillfactor values +create index spgist_point_idx2 on spgist_point_tbl using spgist(p) with (fillfactor = 9); +ERROR: value 9 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +create index spgist_point_idx2 on spgist_point_tbl using spgist(p) with (fillfactor = 101); +ERROR: value 101 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +-- Modify fillfactor in existing index +alter index spgist_point_idx set (fillfactor = 90); +reindex index spgist_point_idx; diff --git a/src/test/regress/expected/stats.out b/src/test/regress/expected/stats.out index fc91f3ce36..991c287b11 100644 --- a/src/test/regress/expected/stats.out +++ b/src/test/regress/expected/stats.out @@ -136,12 +136,15 @@ SELECT count(*) FROM tenk2; (1 row) -- do an indexscan +-- make sure it is not a bitmap scan, which might skip fetching heap tuples +SET enable_bitmapscan TO off; SELECT count(*) FROM tenk2 WHERE unique1 = 1; count ------- 1 (1 row) +RESET enable_bitmapscan; -- We can't just call wait_for_stats() at this point, because we only -- transmit stats when the session goes idle, and we probably didn't -- transmit the last couple of counts yet thanks to the rate-limiting logic diff --git a/src/test/regress/expected/stats_ext.out b/src/test/regress/expected/stats_ext.out index 441cfaa411..054a381dad 100644 --- a/src/test/regress/expected/stats_ext.out +++ b/src/test/regress/expected/stats_ext.out @@ -21,7 +21,7 @@ LINE 1: CREATE STATISTICS tst FROM sometab; CREATE STATISTICS tst ON a, b FROM nonexistant; ERROR: relation "nonexistant" does not exist CREATE STATISTICS tst ON a, b FROM pg_class; -ERROR: column "a" referenced in statistics does not exist +ERROR: column "a" does not exist CREATE STATISTICS tst ON relname, relname, relnatts FROM pg_class; ERROR: duplicate column name in statistics definition CREATE STATISTICS tst ON relnatts + relpages FROM pg_class; @@ -29,7 +29,7 @@ ERROR: only simple column references are allowed in CREATE STATISTICS CREATE STATISTICS tst ON (relpages, reltuples) FROM pg_class; ERROR: only simple column references are allowed in CREATE STATISTICS CREATE STATISTICS tst (unrecognized) ON relname, relnatts FROM pg_class; -ERROR: unrecognized statistic type "unrecognized" +ERROR: unrecognized statistics kind "unrecognized" -- Ensure stats are dropped sanely, and test IF NOT EXISTS while at it CREATE TABLE ab1 (a INTEGER, b INTEGER, c INTEGER); CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1; diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out index 35cadb24aa..189bdffdca 100644 --- a/src/test/regress/expected/strings.out +++ b/src/test/regress/expected/strings.out @@ -674,6 +674,24 @@ SELECT regexp_split_to_array('123456','.'); {"","","","","","",""} (1 row) +SELECT regexp_split_to_array('123456',''); + regexp_split_to_array +----------------------- + {1,2,3,4,5,6} +(1 row) + +SELECT regexp_split_to_array('123456','(?:)'); + regexp_split_to_array +----------------------- + {1,2,3,4,5,6} +(1 row) + +SELECT regexp_split_to_array('1',''); + regexp_split_to_array +----------------------- + {1} +(1 row) + -- errors SELECT foo, length(foo) FROM regexp_split_to_table('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'zippy') AS foo; ERROR: invalid regexp option: "z" @@ -1166,6 +1184,31 @@ SELECT substr(f1, 99995, 10) from toasttest; 567890 (4 rows) +TRUNCATE TABLE toasttest; +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +-- expect >0 blocks +select 0 = pg_relation_size('pg_toast.pg_toast_'||(select oid from pg_class where relname = 'toasttest'))/current_setting('block_size')::integer as blocks; + blocks +-------- + f +(1 row) + +TRUNCATE TABLE toasttest; +ALTER TABLE toasttest set (toast_tuple_target = 4080); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +-- expect 0 blocks +select 0 = pg_relation_size('pg_toast.pg_toast_'||(select oid from pg_class where relname = 'toasttest'))/current_setting('block_size')::integer as blocks; + blocks +-------- + t +(1 row) + DROP TABLE toasttest; -- -- test substr with toasted bytea values @@ -1414,6 +1457,58 @@ select md5('12345678901234567890123456789012345678901234567890123456789012345678 t (1 row) +-- +-- SHA-2 +-- +SET bytea_output TO hex; +SELECT sha224(''); + sha224 +------------------------------------------------------------ + \xd14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f +(1 row) + +SELECT sha224('The quick brown fox jumps over the lazy dog.'); + sha224 +------------------------------------------------------------ + \x619cba8e8e05826e9b8c519c0a5c68f4fb653e8a3d8aa04bb2c8cd4c +(1 row) + +SELECT sha256(''); + sha256 +-------------------------------------------------------------------- + \xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +(1 row) + +SELECT sha256('The quick brown fox jumps over the lazy dog.'); + sha256 +-------------------------------------------------------------------- + \xef537f25c895bfa782526529a9b63d97aa631564d5d789c2b765448c8635fb6c +(1 row) + +SELECT sha384(''); + sha384 +---------------------------------------------------------------------------------------------------- + \x38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b +(1 row) + +SELECT sha384('The quick brown fox jumps over the lazy dog.'); + sha384 +---------------------------------------------------------------------------------------------------- + \xed892481d8272ca6df370bf706e4d7bc1b5739fa2177aae6c50e946678718fc67a7af2819a021c2fc34e91bdb63409d7 +(1 row) + +SELECT sha512(''); + sha512 +------------------------------------------------------------------------------------------------------------------------------------ + \xcf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e +(1 row) + +SELECT sha512('The quick brown fox jumps over the lazy dog.'); + sha512 +------------------------------------------------------------------------------------------------------------------------------------ + \x91ea1245f20d46ae9a037a989f54f1f790f0a47607eeb8a14d12890cea77a1bbc6c7ed9cf205e67b7f2b8fd4c7dfd3a7a8617e45f3c463d481c7e586c39ac1ed +(1 row) + -- -- test behavior of escape_string_warning and standard_conforming_strings options -- @@ -1500,6 +1595,7 @@ select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' as f4, ' -- -- Additional string functions -- +SET bytea_output TO escape; SELECT initcap('hi THOMAS'); initcap ----------- diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out index ed7d6d8034..588d069589 100644 --- a/src/test/regress/expected/subselect.out +++ b/src/test/regress/expected/subselect.out @@ -232,23 +232,23 @@ SELECT *, pg_typeof(f1) FROM (3 rows) -- ... unless there's context to suggest differently -explain verbose select '42' union all select '43'; - QUERY PLAN -------------------------------------------------- - Append (cost=0.00..0.04 rows=2 width=32) - -> Result (cost=0.00..0.01 rows=1 width=32) +explain (verbose, costs off) select '42' union all select '43'; + QUERY PLAN +---------------------------- + Append + -> Result Output: '42'::text - -> Result (cost=0.00..0.01 rows=1 width=32) + -> Result Output: '43'::text (5 rows) -explain verbose select '42' union all select 43; - QUERY PLAN ------------------------------------------------- - Append (cost=0.00..0.04 rows=2 width=4) - -> Result (cost=0.00..0.01 rows=1 width=4) +explain (verbose, costs off) select '42' union all select 43; + QUERY PLAN +-------------------- + Append + -> Result Output: 42 - -> Result (cost=0.00..0.01 rows=1 width=4) + -> Result Output: 43 (5 rows) @@ -678,6 +678,20 @@ with q as (select max(f1) from int4_tbl group by f1 order by f1) (2147483647) (5 rows) +-- +-- Test case for sublinks pulled up into joinaliasvars lists in an +-- inherited update/delete query +-- +begin; -- this shouldn't delete anything, but be safe +delete from road +where exists ( + select 1 + from + int4_tbl cross join + ( select f1, array(select q1 from int8_tbl) as arr + from text_tbl ) ss + where road.name = ss.f1 ); +rollback; -- -- Test case for sublinks pushed down into subselects via join alias expansion -- @@ -826,6 +840,36 @@ select exists(select * from nocolumns); f (1 row) +-- +-- Check behavior with a SubPlan in VALUES (bug #14924) +-- +select val.x + from generate_series(1,10) as s(i), + lateral ( + values ((select s.i + 1)), (s.i + 101) + ) as val(x) +where s.i < 10 and (select val.x) < 110; + x +----- + 2 + 102 + 3 + 103 + 4 + 104 + 5 + 105 + 6 + 106 + 7 + 107 + 8 + 108 + 9 + 109 + 10 +(17 rows) + -- -- Check sane behavior with nested IN SubLinks -- @@ -1041,3 +1085,72 @@ NOTICE: x = 9, y = 13 (3 rows) drop function tattle(x int, y int); +-- +-- Test that LIMIT can be pushed to SORT through a subquery that just projects +-- columns. We check for that having happened by looking to see if EXPLAIN +-- ANALYZE shows that a top-N sort was used. We must suppress or filter away +-- all the non-invariant parts of the EXPLAIN ANALYZE output. +-- +create table sq_limit (pk int primary key, c1 int, c2 int); +insert into sq_limit values + (1, 1, 1), + (2, 2, 2), + (3, 3, 3), + (4, 4, 4), + (5, 1, 1), + (6, 2, 2), + (7, 3, 3), + (8, 4, 4); +create function explain_sq_limit() returns setof text language plpgsql as +$$ +declare ln text; +begin + for ln in + explain (analyze, summary off, timing off, costs off) + select * from (select pk,c2 from sq_limit order by c1,pk) as x limit 3 + loop + ln := regexp_replace(ln, 'Memory: \S*', 'Memory: xxx'); + -- this case might occur if force_parallel_mode is on: + ln := regexp_replace(ln, 'Worker 0: Sort Method', 'Sort Method'); + return next ln; + end loop; +end; +$$; +select * from explain_sq_limit(); + explain_sq_limit +---------------------------------------------------------------- + Limit (actual rows=3 loops=1) + -> Subquery Scan on x (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: sq_limit.c1, sq_limit.pk + Sort Method: top-N heapsort Memory: xxx + -> Seq Scan on sq_limit (actual rows=8 loops=1) +(6 rows) + +select * from (select pk,c2 from sq_limit order by c1,pk) as x limit 3; + pk | c2 +----+---- + 1 | 1 + 5 | 1 + 2 | 2 +(3 rows) + +drop function explain_sq_limit(); +drop table sq_limit; +-- +-- Ensure that backward scan direction isn't propagated into +-- expression subqueries (bug #15336) +-- +begin; +declare c1 scroll cursor for + select * from generate_series(1,4) i + where i <> all (values (2),(3)); +move forward all in c1; +fetch backward all in c1; + i +--- + 4 + 1 +(2 rows) + +commit; diff --git a/src/test/regress/expected/sysviews.out b/src/test/regress/expected/sysviews.out index 568b783f5e..a1c90eb905 100644 --- a/src/test/regress/expected/sysviews.out +++ b/src/test/regress/expected/sysviews.out @@ -70,21 +70,26 @@ select count(*) >= 0 as ok from pg_prepared_xacts; -- This is to record the prevailing planner enable_foo settings during -- a regression test run. select name, setting from pg_settings where name like 'enable%'; - name | setting -----------------------+--------- - enable_bitmapscan | on - enable_gathermerge | on - enable_hashagg | on - enable_hashjoin | on - enable_indexonlyscan | on - enable_indexscan | on - enable_material | on - enable_mergejoin | on - enable_nestloop | on - enable_seqscan | on - enable_sort | on - enable_tidscan | on -(12 rows) + name | setting +--------------------------------+--------- + enable_bitmapscan | on + enable_gathermerge | on + enable_hashagg | on + enable_hashjoin | on + enable_indexonlyscan | on + enable_indexscan | on + enable_material | on + enable_mergejoin | on + enable_nestloop | on + enable_parallel_append | on + enable_parallel_hash | on + enable_partition_pruning | on + enable_partitionwise_aggregate | off + enable_partitionwise_join | off + enable_seqscan | on + enable_sort | on + enable_tidscan | on +(17 rows) -- Test that the pg_timezone_names and pg_timezone_abbrevs views are -- more-or-less working. We can't test their contents in any great detail diff --git a/src/test/regress/expected/temp.out b/src/test/regress/expected/temp.out index addf1ec444..f018f17ca0 100644 --- a/src/test/regress/expected/temp.out +++ b/src/test/regress/expected/temp.out @@ -199,3 +199,105 @@ select pg_temp.whoami(); (1 row) drop table public.whereami; +-- For partitioned temp tables, ON COMMIT actions ignore storage-less +-- partitioned tables. +begin; +create temp table temp_parted_oncommit (a int) + partition by list (a) on commit delete rows; +create temp table temp_parted_oncommit_1 + partition of temp_parted_oncommit + for values in (1) on commit delete rows; +insert into temp_parted_oncommit values (1); +commit; +-- partitions are emptied by the previous commit +select * from temp_parted_oncommit; + a +--- +(0 rows) + +drop table temp_parted_oncommit; +-- Check dependencies between ON COMMIT actions with a partitioned +-- table and its partitions. Using ON COMMIT DROP on a parent removes +-- the whole set. +begin; +create temp table temp_parted_oncommit_test (a int) + partition by list (a) on commit drop; +create temp table temp_parted_oncommit_test1 + partition of temp_parted_oncommit_test + for values in (1) on commit delete rows; +create temp table temp_parted_oncommit_test2 + partition of temp_parted_oncommit_test + for values in (2) on commit drop; +insert into temp_parted_oncommit_test values (1), (2); +commit; +-- no relations remain in this case. +select relname from pg_class where relname like 'temp_parted_oncommit_test%'; + relname +--------- +(0 rows) + +-- Using ON COMMIT DELETE on a partitioned table does not remove +-- all rows if partitions preserve their data. +begin; +create temp table temp_parted_oncommit_test (a int) + partition by list (a) on commit delete rows; +create temp table temp_parted_oncommit_test1 + partition of temp_parted_oncommit_test + for values in (1) on commit preserve rows; +create temp table temp_parted_oncommit_test2 + partition of temp_parted_oncommit_test + for values in (2) on commit drop; +insert into temp_parted_oncommit_test values (1), (2); +commit; +-- Data from the remaining partition is still here as its rows are +-- preserved. +select * from temp_parted_oncommit_test; + a +--- + 1 +(1 row) + +-- two relations remain in this case. +select relname from pg_class where relname like 'temp_parted_oncommit_test%'; + relname +---------------------------- + temp_parted_oncommit_test + temp_parted_oncommit_test1 +(2 rows) + +drop table temp_parted_oncommit_test; +-- Check dependencies between ON COMMIT actions with inheritance trees. +-- Using ON COMMIT DROP on a parent removes the whole set. +begin; +create temp table temp_inh_oncommit_test (a int) on commit drop; +create temp table temp_inh_oncommit_test1 () + inherits(temp_inh_oncommit_test) on commit delete rows; +insert into temp_inh_oncommit_test1 values (1); +commit; +-- no relations remain in this case +select relname from pg_class where relname like 'temp_inh_oncommit_test%'; + relname +--------- +(0 rows) + +-- Data on the parent is removed, and the child goes away. +begin; +create temp table temp_inh_oncommit_test (a int) on commit delete rows; +create temp table temp_inh_oncommit_test1 () + inherits(temp_inh_oncommit_test) on commit drop; +insert into temp_inh_oncommit_test1 values (1); +insert into temp_inh_oncommit_test values (1); +commit; +select * from temp_inh_oncommit_test; + a +--- +(0 rows) + +-- one relation remains +select relname from pg_class where relname like 'temp_inh_oncommit_test%'; + relname +------------------------ + temp_inh_oncommit_test +(1 row) + +drop table temp_inh_oncommit_test; diff --git a/src/test/regress/expected/text.out b/src/test/regress/expected/text.out index 829f2c224c..d28961cf88 100644 --- a/src/test/regress/expected/text.out +++ b/src/test/regress/expected/text.out @@ -50,7 +50,7 @@ select 3 || 4.0; ERROR: operator does not exist: integer || numeric LINE 1: select 3 || 4.0; ^ -HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. /* * various string functions */ diff --git a/src/test/regress/expected/timestamptz.out b/src/test/regress/expected/timestamptz.out index 7226670962..2340f30794 100644 --- a/src/test/regress/expected/timestamptz.out +++ b/src/test/regress/expected/timestamptz.out @@ -1699,54 +1699,68 @@ SELECT '' AS to_char_11, to_char(d1, 'FMIYYY FMIYY FMIY FMI FMIW FMIDDD FMID') | 2001 1 1 1 1 1 1 (66 rows) --- Check OF with various zone offsets, particularly fractional hours +-- Check OF, TZH, TZM with various zone offsets, particularly fractional hours SET timezone = '00:00'; -SELECT to_char(now(), 'OF'); - to_char ---------- - +00 +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +-----+--------- + +00 | +00:00 (1 row) SET timezone = '+02:00'; -SELECT to_char(now(), 'OF'); - to_char ---------- - -02 +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +-----+--------- + -02 | -02:00 (1 row) SET timezone = '-13:00'; -SELECT to_char(now(), 'OF'); - to_char ---------- - +13 +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +-----+--------- + +13 | +13:00 (1 row) SET timezone = '-00:30'; -SELECT to_char(now(), 'OF'); - to_char ---------- - +00:30 +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +--------+--------- + +00:30 | +00:30 (1 row) SET timezone = '00:30'; -SELECT to_char(now(), 'OF'); - to_char ---------- - -00:30 +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +--------+--------- + -00:30 | -00:30 (1 row) SET timezone = '-04:30'; -SELECT to_char(now(), 'OF'); - to_char ---------- - +04:30 +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +--------+--------- + +04:30 | +04:30 (1 row) SET timezone = '04:30'; -SELECT to_char(now(), 'OF'); - to_char ---------- - -04:30 +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +--------+--------- + -04:30 | -04:30 +(1 row) + +SET timezone = '-04:15'; +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +--------+--------- + +04:15 | +04:15 +(1 row) + +SET timezone = '04:15'; +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +--------+--------- + -04:15 | -04:15 (1 row) RESET timezone; @@ -1820,7 +1834,7 @@ WITH tzs (tz) AS (VALUES -- these should fail SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33, '2'); -ERROR: invalid input syntax for numeric time zone: "2" +ERROR: invalid input syntax for type numeric time zone: "2" HINT: Numeric time zones must have "-" or "+" as first character. SELECT make_timestamptz(2014, 12, 10, 10, 10, 10, '+16'); ERROR: numeric time zone "+16" out of range diff --git a/src/test/regress/expected/timetz.out b/src/test/regress/expected/timetz.out index 43911312f9..33ff8e18c9 100644 --- a/src/test/regress/expected/timetz.out +++ b/src/test/regress/expected/timetz.out @@ -92,4 +92,4 @@ SELECT f1 + time with time zone '00:01' AS "Illegal" FROM TIMETZ_TBL; ERROR: operator does not exist: time with time zone + time with time zone LINE 1: SELECT f1 + time with time zone '00:01' AS "Illegal" FROM TI... ^ -HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. diff --git a/src/test/regress/expected/tinterval.out b/src/test/regress/expected/tinterval.out deleted file mode 100644 index a0189729fc..0000000000 --- a/src/test/regress/expected/tinterval.out +++ /dev/null @@ -1,172 +0,0 @@ --- --- TINTERVAL --- -CREATE TABLE TINTERVAL_TBL (f1 tinterval); --- Should accept any abstime, --- so do not bother with extensive testing of values -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["-infinity" "infinity"]'); -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["May 10, 1947 23:59:12" "Jan 14, 1973 03:14:21"]'); -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["Sep 4, 1983 23:59:12" "Oct 4, 1983 23:59:12"]'); -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["epoch" "Mon May 1 00:30:30 1995"]'); -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["Feb 15 1990 12:15:03" "2001-09-23 11:12:13"]'); --- badly formatted tintervals -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["bad time specifications" ""]'); -ERROR: invalid input syntax for type abstime: "bad time specifications" -LINE 2: VALUES ('["bad time specifications" ""]'); - ^ -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["" "infinity"]'); -ERROR: invalid input syntax for type abstime: "" -LINE 2: VALUES ('["" "infinity"]'); - ^ --- test tinterval operators -SELECT '' AS five, * FROM TINTERVAL_TBL; - five | f1 -------+----------------------------------------------------------------- - | ["-infinity" "infinity"] - | ["Sat May 10 23:59:12 1947 PST" "Sun Jan 14 03:14:21 1973 PST"] - | ["Sun Sep 04 23:59:12 1983 PDT" "Tue Oct 04 23:59:12 1983 PDT"] - | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] - | ["Thu Feb 15 12:15:03 1990 PST" "Sun Sep 23 11:12:13 2001 PDT"] -(5 rows) - --- length == -SELECT '' AS one, t.* - FROM TINTERVAL_TBL t - WHERE t.f1 #= '@ 1 months'; - one | f1 ------+----------------------------------------------------------------- - | ["Sun Sep 04 23:59:12 1983 PDT" "Tue Oct 04 23:59:12 1983 PDT"] -(1 row) - --- length <> -SELECT '' AS three, t.* - FROM TINTERVAL_TBL t - WHERE t.f1 #<> '@ 1 months'; - three | f1 --------+----------------------------------------------------------------- - | ["Sat May 10 23:59:12 1947 PST" "Sun Jan 14 03:14:21 1973 PST"] - | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] - | ["Thu Feb 15 12:15:03 1990 PST" "Sun Sep 23 11:12:13 2001 PDT"] -(3 rows) - --- length < -SELECT '' AS zero, t.* - FROM TINTERVAL_TBL t - WHERE t.f1 #< '@ 1 month'; - zero | f1 -------+---- -(0 rows) - --- length <= -SELECT '' AS one, t.* - FROM TINTERVAL_TBL t - WHERE t.f1 #<= '@ 1 month'; - one | f1 ------+----------------------------------------------------------------- - | ["Sun Sep 04 23:59:12 1983 PDT" "Tue Oct 04 23:59:12 1983 PDT"] -(1 row) - --- length > -SELECT '' AS three, t.* - FROM TINTERVAL_TBL t - WHERE t.f1 #> '@ 1 year'; - three | f1 --------+----------------------------------------------------------------- - | ["Sat May 10 23:59:12 1947 PST" "Sun Jan 14 03:14:21 1973 PST"] - | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] - | ["Thu Feb 15 12:15:03 1990 PST" "Sun Sep 23 11:12:13 2001 PDT"] -(3 rows) - --- length >= -SELECT '' AS three, t.* - FROM TINTERVAL_TBL t - WHERE t.f1 #>= '@ 3 years'; - three | f1 --------+----------------------------------------------------------------- - | ["Sat May 10 23:59:12 1947 PST" "Sun Jan 14 03:14:21 1973 PST"] - | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] - | ["Thu Feb 15 12:15:03 1990 PST" "Sun Sep 23 11:12:13 2001 PDT"] -(3 rows) - --- overlaps -SELECT '' AS three, t1.* - FROM TINTERVAL_TBL t1 - WHERE t1.f1 && - tinterval '["Aug 15 14:23:19 1983" "Sep 16 14:23:19 1983"]'; - three | f1 --------+----------------------------------------------------------------- - | ["-infinity" "infinity"] - | ["Sun Sep 04 23:59:12 1983 PDT" "Tue Oct 04 23:59:12 1983 PDT"] - | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] -(3 rows) - -SELECT '' AS five, t1.f1, t2.f1 - FROM TINTERVAL_TBL t1, TINTERVAL_TBL t2 - WHERE t1.f1 && t2.f1 and - t1.f1 = t2.f1 - ORDER BY t1.f1, t2.f1; - five | f1 | f1 -------+-----------------------------------------------------------------+----------------------------------------------------------------- - | ["-infinity" "infinity"] | ["-infinity" "infinity"] - | ["Sun Sep 04 23:59:12 1983 PDT" "Tue Oct 04 23:59:12 1983 PDT"] | ["Sun Sep 04 23:59:12 1983 PDT" "Tue Oct 04 23:59:12 1983 PDT"] - | ["Thu Feb 15 12:15:03 1990 PST" "Sun Sep 23 11:12:13 2001 PDT"] | ["Thu Feb 15 12:15:03 1990 PST" "Sun Sep 23 11:12:13 2001 PDT"] - | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] - | ["Sat May 10 23:59:12 1947 PST" "Sun Jan 14 03:14:21 1973 PST"] | ["Sat May 10 23:59:12 1947 PST" "Sun Jan 14 03:14:21 1973 PST"] -(5 rows) - -SELECT '' AS fourteen, t1.f1 AS interval1, t2.f1 AS interval2 - FROM TINTERVAL_TBL t1, TINTERVAL_TBL t2 - WHERE t1.f1 && t2.f1 and not t1.f1 = t2.f1 - ORDER BY interval1, interval2; - fourteen | interval1 | interval2 -----------+-----------------------------------------------------------------+----------------------------------------------------------------- - | ["-infinity" "infinity"] | ["Sun Sep 04 23:59:12 1983 PDT" "Tue Oct 04 23:59:12 1983 PDT"] - | ["-infinity" "infinity"] | ["Thu Feb 15 12:15:03 1990 PST" "Sun Sep 23 11:12:13 2001 PDT"] - | ["-infinity" "infinity"] | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] - | ["-infinity" "infinity"] | ["Sat May 10 23:59:12 1947 PST" "Sun Jan 14 03:14:21 1973 PST"] - | ["Sun Sep 04 23:59:12 1983 PDT" "Tue Oct 04 23:59:12 1983 PDT"] | ["-infinity" "infinity"] - | ["Sun Sep 04 23:59:12 1983 PDT" "Tue Oct 04 23:59:12 1983 PDT"] | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] - | ["Thu Feb 15 12:15:03 1990 PST" "Sun Sep 23 11:12:13 2001 PDT"] | ["-infinity" "infinity"] - | ["Thu Feb 15 12:15:03 1990 PST" "Sun Sep 23 11:12:13 2001 PDT"] | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] - | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] | ["-infinity" "infinity"] - | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] | ["Sun Sep 04 23:59:12 1983 PDT" "Tue Oct 04 23:59:12 1983 PDT"] - | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] | ["Thu Feb 15 12:15:03 1990 PST" "Sun Sep 23 11:12:13 2001 PDT"] - | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] | ["Sat May 10 23:59:12 1947 PST" "Sun Jan 14 03:14:21 1973 PST"] - | ["Sat May 10 23:59:12 1947 PST" "Sun Jan 14 03:14:21 1973 PST"] | ["-infinity" "infinity"] - | ["Sat May 10 23:59:12 1947 PST" "Sun Jan 14 03:14:21 1973 PST"] | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] -(14 rows) - --- contains -SELECT '' AS five, t1.f1 - FROM TINTERVAL_TBL t1 - WHERE not t1.f1 << - tinterval '["Aug 15 14:23:19 1980" "Sep 16 14:23:19 1990"]' - ORDER BY t1.f1; - five | f1 -------+----------------------------------------------------------------- - | ["Sun Sep 04 23:59:12 1983 PDT" "Tue Oct 04 23:59:12 1983 PDT"] - | ["Thu Feb 15 12:15:03 1990 PST" "Sun Sep 23 11:12:13 2001 PDT"] - | ["Sat May 10 23:59:12 1947 PST" "Sun Jan 14 03:14:21 1973 PST"] -(3 rows) - --- make time interval -SELECT '' AS three, t1.f1 - FROM TINTERVAL_TBL t1 - WHERE t1.f1 && - (abstime 'Aug 15 14:23:19 1983' <#> - abstime 'Sep 16 14:23:19 1983') - ORDER BY t1.f1; - three | f1 --------+----------------------------------------------------------------- - | ["-infinity" "infinity"] - | ["Sun Sep 04 23:59:12 1983 PDT" "Tue Oct 04 23:59:12 1983 PDT"] - | ["Wed Dec 31 16:00:00 1969 PST" "Mon May 01 00:30:30 1995 PDT"] -(3 rows) - diff --git a/src/test/regress/expected/transactions.out b/src/test/regress/expected/transactions.out index d9b702d016..69e176c525 100644 --- a/src/test/regress/expected/transactions.out +++ b/src/test/regress/expected/transactions.out @@ -146,69 +146,69 @@ COMMIT; -- Subtransactions, basic tests -- create & drop tables SET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE; -CREATE TABLE foobar (a int); +CREATE TABLE trans_foobar (a int); BEGIN; - CREATE TABLE foo (a int); + CREATE TABLE trans_foo (a int); SAVEPOINT one; - DROP TABLE foo; - CREATE TABLE bar (a int); + DROP TABLE trans_foo; + CREATE TABLE trans_bar (a int); ROLLBACK TO SAVEPOINT one; RELEASE SAVEPOINT one; SAVEPOINT two; - CREATE TABLE baz (a int); + CREATE TABLE trans_baz (a int); RELEASE SAVEPOINT two; - drop TABLE foobar; - CREATE TABLE barbaz (a int); + drop TABLE trans_foobar; + CREATE TABLE trans_barbaz (a int); COMMIT; --- should exist: barbaz, baz, foo -SELECT * FROM foo; -- should be empty +-- should exist: trans_barbaz, trans_baz, trans_foo +SELECT * FROM trans_foo; -- should be empty a --- (0 rows) -SELECT * FROM bar; -- shouldn't exist -ERROR: relation "bar" does not exist -LINE 1: SELECT * FROM bar; +SELECT * FROM trans_bar; -- shouldn't exist +ERROR: relation "trans_bar" does not exist +LINE 1: SELECT * FROM trans_bar; ^ -SELECT * FROM barbaz; -- should be empty +SELECT * FROM trans_barbaz; -- should be empty a --- (0 rows) -SELECT * FROM baz; -- should be empty +SELECT * FROM trans_baz; -- should be empty a --- (0 rows) -- inserts BEGIN; - INSERT INTO foo VALUES (1); + INSERT INTO trans_foo VALUES (1); SAVEPOINT one; - INSERT into bar VALUES (1); -ERROR: relation "bar" does not exist -LINE 1: INSERT into bar VALUES (1); + INSERT into trans_bar VALUES (1); +ERROR: relation "trans_bar" does not exist +LINE 1: INSERT into trans_bar VALUES (1); ^ ROLLBACK TO one; RELEASE SAVEPOINT one; SAVEPOINT two; - INSERT into barbaz VALUES (1); + INSERT into trans_barbaz VALUES (1); RELEASE two; SAVEPOINT three; SAVEPOINT four; - INSERT INTO foo VALUES (2); + INSERT INTO trans_foo VALUES (2); RELEASE SAVEPOINT four; ROLLBACK TO SAVEPOINT three; RELEASE SAVEPOINT three; - INSERT INTO foo VALUES (3); + INSERT INTO trans_foo VALUES (3); COMMIT; -SELECT * FROM foo; -- should have 1 and 3 +SELECT * FROM trans_foo; -- should have 1 and 3 a --- 1 3 (2 rows) -SELECT * FROM barbaz; -- should have 1 +SELECT * FROM trans_barbaz; -- should have 1 a --- 1 @@ -217,9 +217,9 @@ SELECT * FROM barbaz; -- should have 1 -- test whole-tree commit BEGIN; SAVEPOINT one; - SELECT foo; -ERROR: column "foo" does not exist -LINE 1: SELECT foo; + SELECT trans_foo; +ERROR: column "trans_foo" does not exist +LINE 1: SELECT trans_foo; ^ ROLLBACK TO SAVEPOINT one; RELEASE SAVEPOINT one; @@ -266,9 +266,9 @@ BEGIN; INSERT INTO savepoints VALUES (4); SAVEPOINT one; INSERT INTO savepoints VALUES (5); - SELECT foo; -ERROR: column "foo" does not exist -LINE 1: SELECT foo; + SELECT trans_foo; +ERROR: column "trans_foo" does not exist +LINE 1: SELECT trans_foo; ^ COMMIT; SELECT * FROM savepoints; @@ -549,9 +549,9 @@ DETAIL: Key (a)=(1) already exists. ERROR: duplicate key value violates unique constraint "koju_a_key" DETAIL: Key (a)=(1) already exists. ROLLBACK; -DROP TABLE foo; -DROP TABLE baz; -DROP TABLE barbaz; +DROP TABLE trans_foo; +DROP TABLE trans_baz; +DROP TABLE trans_barbaz; -- test case for problems with revalidating an open relation during abort create function inverse(int) returns float8 as $$ @@ -659,11 +659,95 @@ ERROR: portal "ctt" cannot be run COMMIT; DROP FUNCTION create_temp_tab(); DROP FUNCTION invert(x float8); +-- Test assorted behaviors around the implicit transaction block created +-- when multiple SQL commands are sent in a single Query message. These +-- tests rely on the fact that psql will not break SQL commands apart at a +-- backslash-quoted semicolon, but will send them as one Query. +create temp table i_table (f1 int); +-- psql will show only the last result in a multi-statement Query +SELECT 1\; SELECT 2\; SELECT 3; + ?column? +---------- + 3 +(1 row) + +-- this implicitly commits: +insert into i_table values(1)\; select * from i_table; + f1 +---- + 1 +(1 row) + +-- 1/0 error will cause rolling back the whole implicit transaction +insert into i_table values(2)\; select * from i_table\; select 1/0; +ERROR: division by zero +select * from i_table; + f1 +---- + 1 +(1 row) + +rollback; -- we are not in a transaction at this point +WARNING: there is no transaction in progress +-- can use regular begin/commit/rollback within a single Query +begin\; insert into i_table values(3)\; commit; +rollback; -- we are not in a transaction at this point +WARNING: there is no transaction in progress +begin\; insert into i_table values(4)\; rollback; +rollback; -- we are not in a transaction at this point +WARNING: there is no transaction in progress +-- begin converts implicit transaction into a regular one that +-- can extend past the end of the Query +select 1\; begin\; insert into i_table values(5); +commit; +select 1\; begin\; insert into i_table values(6); +rollback; +-- commit in implicit-transaction state commits but issues a warning. +insert into i_table values(7)\; commit\; insert into i_table values(8)\; select 1/0; +WARNING: there is no transaction in progress +ERROR: division by zero +-- similarly, rollback aborts but issues a warning. +insert into i_table values(9)\; rollback\; select 2; +WARNING: there is no transaction in progress + ?column? +---------- + 2 +(1 row) + +select * from i_table; + f1 +---- + 1 + 3 + 5 + 7 +(4 rows) + +rollback; -- we are not in a transaction at this point +WARNING: there is no transaction in progress +-- implicit transaction block is still a transaction block, for e.g. VACUUM +SELECT 1\; VACUUM; +ERROR: VACUUM cannot run inside a transaction block +SELECT 1\; COMMIT\; VACUUM; +WARNING: there is no transaction in progress +ERROR: VACUUM cannot run inside a transaction block +-- we disallow savepoint-related commands in implicit-transaction state +SELECT 1\; SAVEPOINT sp; +ERROR: SAVEPOINT can only be used in transaction blocks +SELECT 1\; COMMIT\; SAVEPOINT sp; +WARNING: there is no transaction in progress +ERROR: SAVEPOINT can only be used in transaction blocks +ROLLBACK TO SAVEPOINT sp\; SELECT 2; +ERROR: ROLLBACK TO SAVEPOINT can only be used in transaction blocks +SELECT 2\; RELEASE SAVEPOINT sp\; SELECT 3; +ERROR: RELEASE SAVEPOINT can only be used in transaction blocks +-- but this is OK, because the BEGIN converts it to a regular xact +SELECT 1\; BEGIN\; SAVEPOINT sp\; ROLLBACK TO SAVEPOINT sp\; COMMIT; -- Test for successful cleanup of an aborted transaction at session exit. -- THIS MUST BE THE LAST TEST IN THIS FILE. begin; select 1/0; ERROR: division by zero rollback to X; -ERROR: no such savepoint +ERROR: savepoint "x" does not exist -- DO NOT ADD ANYTHING HERE. diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out index ac132b042d..7d59de98eb 100644 --- a/src/test/regress/expected/triggers.out +++ b/src/test/regress/expected/triggers.out @@ -22,12 +22,12 @@ create unique index pkeys_i on pkeys (pkey1, pkey2); create trigger check_fkeys_pkey_exist before insert or update on fkeys for each row - execute procedure + execute function check_primary_key ('fkey1', 'fkey2', 'pkeys', 'pkey1', 'pkey2'); create trigger check_fkeys_pkey2_exist before insert or update on fkeys for each row - execute procedure check_primary_key ('fkey3', 'fkeys2', 'pkey23'); + execute function check_primary_key ('fkey3', 'fkeys2', 'pkey23'); -- -- For fkeys2: -- (fkey21, fkey22) --> pkeys (pkey1, pkey2) @@ -96,43 +96,55 @@ CONTEXT: SQL statement "delete from fkeys2 where fkey21 = $1 and fkey22 = $2 " update pkeys set pkey1 = 7, pkey2 = '70' where pkey1 = 10 and pkey2 = '1'; NOTICE: check_pkeys_fkey_cascade: 1 tuple(s) of fkeys are deleted NOTICE: check_pkeys_fkey_cascade: 1 tuple(s) of fkeys2 are deleted +SELECT trigger_name, event_manipulation, event_object_schema, event_object_table, + action_order, action_condition, action_orientation, action_timing, + action_reference_old_table, action_reference_new_table + FROM information_schema.triggers + WHERE event_object_table in ('pkeys', 'fkeys', 'fkeys2') + ORDER BY trigger_name COLLATE "C", 2; + trigger_name | event_manipulation | event_object_schema | event_object_table | action_order | action_condition | action_orientation | action_timing | action_reference_old_table | action_reference_new_table +----------------------------+--------------------+---------------------+--------------------+--------------+------------------+--------------------+---------------+----------------------------+---------------------------- + check_fkeys2_fkey_restrict | DELETE | public | fkeys2 | 1 | | ROW | BEFORE | | + check_fkeys2_fkey_restrict | UPDATE | public | fkeys2 | 1 | | ROW | BEFORE | | + check_fkeys2_pkey_exist | INSERT | public | fkeys2 | 1 | | ROW | BEFORE | | + check_fkeys2_pkey_exist | UPDATE | public | fkeys2 | 2 | | ROW | BEFORE | | + check_fkeys_pkey2_exist | INSERT | public | fkeys | 1 | | ROW | BEFORE | | + check_fkeys_pkey2_exist | UPDATE | public | fkeys | 1 | | ROW | BEFORE | | + check_fkeys_pkey_exist | INSERT | public | fkeys | 2 | | ROW | BEFORE | | + check_fkeys_pkey_exist | UPDATE | public | fkeys | 2 | | ROW | BEFORE | | + check_pkeys_fkey_cascade | DELETE | public | pkeys | 1 | | ROW | BEFORE | | + check_pkeys_fkey_cascade | UPDATE | public | pkeys | 1 | | ROW | BEFORE | | +(10 rows) + DROP TABLE pkeys; DROP TABLE fkeys; DROP TABLE fkeys2; --- -- I've disabled the funny_dup17 test because the new semantics --- -- of AFTER ROW triggers, which get now fired at the end of a --- -- query always, cause funny_dup17 to enter an endless loop. --- -- --- -- Jan --- --- create table dup17 (x int4); --- --- create trigger dup17_before --- before insert on dup17 --- for each row --- execute procedure --- funny_dup17 () --- ; --- --- insert into dup17 values (17); --- select count(*) from dup17; --- insert into dup17 values (17); --- select count(*) from dup17; --- --- drop trigger dup17_before on dup17; --- --- create trigger dup17_after --- after insert on dup17 --- for each row --- execute procedure --- funny_dup17 () --- ; --- insert into dup17 values (13); --- select count(*) from dup17 where x = 13; --- insert into dup17 values (13); --- select count(*) from dup17 where x = 13; --- --- DROP TABLE dup17; +-- Check behavior when trigger returns unmodified trigtuple +create table trigtest (f1 int, f2 text); +create trigger trigger_return_old + before insert or delete or update on trigtest + for each row execute procedure trigger_return_old(); +insert into trigtest values(1, 'foo'); +select * from trigtest; + f1 | f2 +----+----- + 1 | foo +(1 row) + +update trigtest set f2 = f2 || 'bar'; +select * from trigtest; + f1 | f2 +----+----- + 1 | foo +(1 row) + +delete from trigtest; +select * from trigtest; + f1 | f2 +----+---- +(0 rows) + +drop table trigtest; create sequence ttdummy_seq increment 10 start 0 minvalue 0; create table tttest ( price_id int4, @@ -347,6 +359,26 @@ CREATE TRIGGER insert_when BEFORE INSERT ON main_table FOR EACH STATEMENT WHEN (true) EXECUTE PROCEDURE trigger_func('insert_when'); CREATE TRIGGER delete_when AFTER DELETE ON main_table FOR EACH STATEMENT WHEN (true) EXECUTE PROCEDURE trigger_func('delete_when'); +SELECT trigger_name, event_manipulation, event_object_schema, event_object_table, + action_order, action_condition, action_orientation, action_timing, + action_reference_old_table, action_reference_new_table + FROM information_schema.triggers + WHERE event_object_table IN ('main_table') + ORDER BY trigger_name COLLATE "C", 2; + trigger_name | event_manipulation | event_object_schema | event_object_table | action_order | action_condition | action_orientation | action_timing | action_reference_old_table | action_reference_new_table +----------------------+--------------------+---------------------+--------------------+--------------+--------------------------------+--------------------+---------------+----------------------------+---------------------------- + after_ins_stmt_trig | INSERT | public | main_table | 1 | | STATEMENT | AFTER | | + after_upd_row_trig | UPDATE | public | main_table | 1 | | ROW | AFTER | | + after_upd_stmt_trig | UPDATE | public | main_table | 1 | | STATEMENT | AFTER | | + before_ins_stmt_trig | INSERT | public | main_table | 1 | | STATEMENT | BEFORE | | + delete_a | DELETE | public | main_table | 1 | (old.a = 123) | ROW | AFTER | | + delete_when | DELETE | public | main_table | 1 | true | STATEMENT | AFTER | | + insert_a | INSERT | public | main_table | 1 | (new.a = 123) | ROW | AFTER | | + insert_when | INSERT | public | main_table | 2 | true | STATEMENT | BEFORE | | + modified_a | UPDATE | public | main_table | 1 | (old.a <> new.a) | ROW | BEFORE | | + modified_any | UPDATE | public | main_table | 2 | (old.* IS DISTINCT FROM new.*) | ROW | BEFORE | | +(10 rows) + INSERT INTO main_table (a) VALUES (123), (456); NOTICE: trigger_func(before_ins_stmt) called: action = INSERT, when = BEFORE, level = STATEMENT NOTICE: trigger_func(insert_when) called: action = INSERT, when = BEFORE, level = STATEMENT @@ -391,9 +423,9 @@ SELECT pg_get_triggerdef(oid, true) FROM pg_trigger WHERE tgrelid = 'main_table' (1 row) SELECT pg_get_triggerdef(oid, false) FROM pg_trigger WHERE tgrelid = 'main_table'::regclass AND tgname = 'modified_a'; - pg_get_triggerdef ----------------------------------------------------------------------------------------------------------------------------------------------- - CREATE TRIGGER modified_a BEFORE UPDATE OF a ON main_table FOR EACH ROW WHEN ((old.a <> new.a)) EXECUTE PROCEDURE trigger_func('modified_a') + pg_get_triggerdef +----------------------------------------------------------------------------------------------------------------------------------------------------- + CREATE TRIGGER modified_a BEFORE UPDATE OF a ON public.main_table FOR EACH ROW WHEN ((old.a <> new.a)) EXECUTE PROCEDURE trigger_func('modified_a') (1 row) SELECT pg_get_triggerdef(oid, true) FROM pg_trigger WHERE tgrelid = 'main_table'::regclass AND tgname = 'modified_any'; @@ -408,6 +440,16 @@ DROP TRIGGER insert_a ON main_table; DROP TRIGGER delete_a ON main_table; DROP TRIGGER insert_when ON main_table; DROP TRIGGER delete_when ON main_table; +-- Test WHEN condition accessing system columns. +create table table_with_oids(a int) with oids; +insert into table_with_oids values (1); +create trigger oid_unchanged_trig after update on table_with_oids + for each row + when (new.oid = old.oid AND new.oid <> 0) + execute procedure trigger_func('after_upd_oid_unchanged'); +update table_with_oids set a = a + 1; +NOTICE: trigger_func(after_upd_oid_unchanged) called: action = UPDATE, when = AFTER, level = ROW +drop table table_with_oids; -- Test column-level triggers DROP TRIGGER after_upd_row_trig ON main_table; CREATE TRIGGER before_upd_a_row_trig BEFORE UPDATE OF a ON main_table @@ -421,9 +463,9 @@ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func('before_upd_a_stmt'); CREATE TRIGGER after_upd_b_stmt_trig AFTER UPDATE OF b ON main_table FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func('after_upd_b_stmt'); SELECT pg_get_triggerdef(oid) FROM pg_trigger WHERE tgrelid = 'main_table'::regclass AND tgname = 'after_upd_a_b_row_trig'; - pg_get_triggerdef -------------------------------------------------------------------------------------------------------------------------------------------- - CREATE TRIGGER after_upd_a_b_row_trig AFTER UPDATE OF a, b ON main_table FOR EACH ROW EXECUTE PROCEDURE trigger_func('after_upd_a_b_row') + pg_get_triggerdef +-------------------------------------------------------------------------------------------------------------------------------------------------- + CREATE TRIGGER after_upd_a_b_row_trig AFTER UPDATE OF a, b ON public.main_table FOR EACH ROW EXECUTE PROCEDURE trigger_func('after_upd_a_b_row') (1 row) UPDATE main_table SET a = 50; @@ -525,10 +567,10 @@ LINE 2: FOR EACH STATEMENT WHEN (OLD.* IS DISTINCT FROM NEW.*) ^ -- check dependency restrictions ALTER TABLE main_table DROP COLUMN b; -ERROR: cannot drop table main_table column b because other objects depend on it -DETAIL: trigger after_upd_b_row_trig on table main_table depends on table main_table column b -trigger after_upd_a_b_row_trig on table main_table depends on table main_table column b -trigger after_upd_b_stmt_trig on table main_table depends on table main_table column b +ERROR: cannot drop column b of table main_table because other objects depend on it +DETAIL: trigger after_upd_b_row_trig on table main_table depends on column b of table main_table +trigger after_upd_a_b_row_trig on table main_table depends on column b of table main_table +trigger after_upd_b_stmt_trig on table main_table depends on column b of table main_table HINT: Use DROP ... CASCADE to drop the dependent objects too. -- this should succeed, but we'll roll it back to keep the triggers around begin; @@ -569,6 +611,12 @@ insert into trigtest default values; alter table trigtest enable trigger trigtest_a_stmt_tg; insert into trigtest default values; NOTICE: trigtest INSERT AFTER STATEMENT +set session_replication_role = replica; +insert into trigtest default values; -- does not trigger +alter table trigtest enable always trigger trigtest_a_stmt_tg; +insert into trigtest default values; -- now it does +NOTICE: trigtest INSERT AFTER STATEMENT +reset session_replication_role; insert into trigtest2 values(1); insert into trigtest2 values(2); delete from trigtest where i=2; @@ -595,7 +643,9 @@ select * from trigtest; 3 4 5 -(3 rows) + 6 + 7 +(5 rows) drop table trigtest2; drop table trigtest; @@ -1807,7 +1857,74 @@ drop function my_trigger_function(); drop view my_view; drop table my_table; -- --- Verify that per-statement triggers are fired for partitioned tables +-- Verify cases that are unsupported with partitioned tables +-- +create table parted_trig (a int) partition by list (a); +create function trigger_nothing() returns trigger + language plpgsql as $$ begin end; $$; +create trigger failed before insert or update or delete on parted_trig + for each row execute procedure trigger_nothing(); +ERROR: "parted_trig" is a partitioned table +DETAIL: Partitioned tables cannot have BEFORE / FOR EACH ROW triggers. +create trigger failed instead of update on parted_trig + for each row execute procedure trigger_nothing(); +ERROR: "parted_trig" is a table +DETAIL: Tables cannot have INSTEAD OF triggers. +create trigger failed after update on parted_trig + referencing old table as old_table + for each row execute procedure trigger_nothing(); +ERROR: "parted_trig" is a partitioned table +DETAIL: Triggers on partitioned tables cannot have transition tables. +drop table parted_trig; +-- +-- Verify trigger creation for partitioned tables, and drop behavior +-- +create table trigpart (a int, b int) partition by range (a); +create table trigpart1 partition of trigpart for values from (0) to (1000); +create trigger trg1 after insert on trigpart for each row execute procedure trigger_nothing(); +create table trigpart2 partition of trigpart for values from (1000) to (2000); +create table trigpart3 (like trigpart); +alter table trigpart attach partition trigpart3 for values from (2000) to (3000); +select tgrelid::regclass, tgname, tgfoid::regproc from pg_trigger + where tgrelid::regclass::text like 'trigpart%' order by tgrelid::regclass::text; + tgrelid | tgname | tgfoid +-----------+--------+----------------- + trigpart | trg1 | trigger_nothing + trigpart1 | trg1 | trigger_nothing + trigpart2 | trg1 | trigger_nothing + trigpart3 | trg1 | trigger_nothing +(4 rows) + +drop trigger trg1 on trigpart1; -- fail +ERROR: cannot drop trigger trg1 on table trigpart1 because trigger trg1 on table trigpart requires it +HINT: You can drop trigger trg1 on table trigpart instead. +drop trigger trg1 on trigpart2; -- fail +ERROR: cannot drop trigger trg1 on table trigpart2 because trigger trg1 on table trigpart requires it +HINT: You can drop trigger trg1 on table trigpart instead. +drop trigger trg1 on trigpart3; -- fail +ERROR: cannot drop trigger trg1 on table trigpart3 because trigger trg1 on table trigpart requires it +HINT: You can drop trigger trg1 on table trigpart instead. +drop table trigpart2; -- ok, trigger should be gone in that partition +select tgrelid::regclass, tgname, tgfoid::regproc from pg_trigger + where tgrelid::regclass::text like 'trigpart%' order by tgrelid::regclass::text; + tgrelid | tgname | tgfoid +-----------+--------+----------------- + trigpart | trg1 | trigger_nothing + trigpart1 | trg1 | trigger_nothing + trigpart3 | trg1 | trigger_nothing +(3 rows) + +drop trigger trg1 on trigpart; -- ok, all gone +select tgrelid::regclass, tgname, tgfoid::regproc from pg_trigger + where tgrelid::regclass::text like 'trigpart%' order by tgrelid::regclass::text; + tgrelid | tgname | tgfoid +---------+--------+-------- +(0 rows) + +drop table trigpart; +drop function trigger_nothing(); +-- +-- Verify that triggers are fired for partitioned tables -- create table parted_stmt_trig (a int) partition by list (a); create table parted_stmt_trig1 partition of parted_stmt_trig for values in (1); @@ -1817,14 +1934,14 @@ create table parted2_stmt_trig1 partition of parted2_stmt_trig for values in (1) create table parted2_stmt_trig2 partition of parted2_stmt_trig for values in (2); create or replace function trigger_notice() returns trigger as $$ begin - raise notice 'trigger on % % % for %', TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL; + raise notice 'trigger % on % % % for %', TG_NAME, TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL; if TG_LEVEL = 'ROW' then return NEW; end if; return null; end; $$ language plpgsql; --- insert/update/delete statment-level triggers on the parent +-- insert/update/delete statement-level triggers on the parent create trigger trig_ins_before before insert on parted_stmt_trig for each statement execute procedure trigger_notice(); create trigger trig_ins_after after insert on parted_stmt_trig @@ -1837,37 +1954,50 @@ create trigger trig_del_before before delete on parted_stmt_trig for each statement execute procedure trigger_notice(); create trigger trig_del_after after delete on parted_stmt_trig for each statement execute procedure trigger_notice(); +-- insert/update/delete row-level triggers on the parent +create trigger trig_ins_after_parent after insert on parted_stmt_trig + for each row execute procedure trigger_notice(); +create trigger trig_upd_after_parent after update on parted_stmt_trig + for each row execute procedure trigger_notice(); +create trigger trig_del_after_parent after delete on parted_stmt_trig + for each row execute procedure trigger_notice(); -- insert/update/delete row-level triggers on the first partition -create trigger trig_ins_before before insert on parted_stmt_trig1 +create trigger trig_ins_before_child before insert on parted_stmt_trig1 + for each row execute procedure trigger_notice(); +create trigger trig_ins_after_child after insert on parted_stmt_trig1 + for each row execute procedure trigger_notice(); +create trigger trig_upd_before_child before update on parted_stmt_trig1 for each row execute procedure trigger_notice(); -create trigger trig_ins_after after insert on parted_stmt_trig1 +create trigger trig_upd_after_child after update on parted_stmt_trig1 for each row execute procedure trigger_notice(); -create trigger trig_upd_before before update on parted_stmt_trig1 +create trigger trig_del_before_child before delete on parted_stmt_trig1 for each row execute procedure trigger_notice(); -create trigger trig_upd_after after update on parted_stmt_trig1 +create trigger trig_del_after_child after delete on parted_stmt_trig1 for each row execute procedure trigger_notice(); -- insert/update/delete statement-level triggers on the parent -create trigger trig_ins_before before insert on parted2_stmt_trig +create trigger trig_ins_before_3 before insert on parted2_stmt_trig for each statement execute procedure trigger_notice(); -create trigger trig_ins_after after insert on parted2_stmt_trig +create trigger trig_ins_after_3 after insert on parted2_stmt_trig for each statement execute procedure trigger_notice(); -create trigger trig_upd_before before update on parted2_stmt_trig +create trigger trig_upd_before_3 before update on parted2_stmt_trig for each statement execute procedure trigger_notice(); -create trigger trig_upd_after after update on parted2_stmt_trig +create trigger trig_upd_after_3 after update on parted2_stmt_trig for each statement execute procedure trigger_notice(); -create trigger trig_del_before before delete on parted2_stmt_trig +create trigger trig_del_before_3 before delete on parted2_stmt_trig for each statement execute procedure trigger_notice(); -create trigger trig_del_after after delete on parted2_stmt_trig +create trigger trig_del_after_3 after delete on parted2_stmt_trig for each statement execute procedure trigger_notice(); with ins (a) as ( insert into parted2_stmt_trig values (1), (2) returning a ) insert into parted_stmt_trig select a from ins returning tableoid::regclass, a; -NOTICE: trigger on parted_stmt_trig BEFORE INSERT for STATEMENT -NOTICE: trigger on parted2_stmt_trig BEFORE INSERT for STATEMENT -NOTICE: trigger on parted_stmt_trig1 BEFORE INSERT for ROW -NOTICE: trigger on parted_stmt_trig1 AFTER INSERT for ROW -NOTICE: trigger on parted2_stmt_trig AFTER INSERT for STATEMENT -NOTICE: trigger on parted_stmt_trig AFTER INSERT for STATEMENT +NOTICE: trigger trig_ins_before on parted_stmt_trig BEFORE INSERT for STATEMENT +NOTICE: trigger trig_ins_before_3 on parted2_stmt_trig BEFORE INSERT for STATEMENT +NOTICE: trigger trig_ins_before_child on parted_stmt_trig1 BEFORE INSERT for ROW +NOTICE: trigger trig_ins_after_child on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_parent on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_parent on parted_stmt_trig2 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_3 on parted2_stmt_trig AFTER INSERT for STATEMENT +NOTICE: trigger trig_ins_after on parted_stmt_trig AFTER INSERT for STATEMENT tableoid | a -------------------+--- parted_stmt_trig1 | 1 @@ -1877,26 +2007,266 @@ NOTICE: trigger on parted_stmt_trig AFTER INSERT for STATEMENT with upd as ( update parted2_stmt_trig set a = a ) update parted_stmt_trig set a = a; -NOTICE: trigger on parted_stmt_trig BEFORE UPDATE for STATEMENT -NOTICE: trigger on parted_stmt_trig1 BEFORE UPDATE for ROW -NOTICE: trigger on parted2_stmt_trig BEFORE UPDATE for STATEMENT -NOTICE: trigger on parted_stmt_trig1 AFTER UPDATE for ROW -NOTICE: trigger on parted_stmt_trig AFTER UPDATE for STATEMENT -NOTICE: trigger on parted2_stmt_trig AFTER UPDATE for STATEMENT +NOTICE: trigger trig_upd_before on parted_stmt_trig BEFORE UPDATE for STATEMENT +NOTICE: trigger trig_upd_before_child on parted_stmt_trig1 BEFORE UPDATE for ROW +NOTICE: trigger trig_upd_before_3 on parted2_stmt_trig BEFORE UPDATE for STATEMENT +NOTICE: trigger trig_upd_after_child on parted_stmt_trig1 AFTER UPDATE for ROW +NOTICE: trigger trig_upd_after_parent on parted_stmt_trig1 AFTER UPDATE for ROW +NOTICE: trigger trig_upd_after_parent on parted_stmt_trig2 AFTER UPDATE for ROW +NOTICE: trigger trig_upd_after on parted_stmt_trig AFTER UPDATE for STATEMENT +NOTICE: trigger trig_upd_after_3 on parted2_stmt_trig AFTER UPDATE for STATEMENT delete from parted_stmt_trig; -NOTICE: trigger on parted_stmt_trig BEFORE DELETE for STATEMENT -NOTICE: trigger on parted_stmt_trig AFTER DELETE for STATEMENT +NOTICE: trigger trig_del_before on parted_stmt_trig BEFORE DELETE for STATEMENT +NOTICE: trigger trig_del_before_child on parted_stmt_trig1 BEFORE DELETE for ROW +NOTICE: trigger trig_del_after_parent on parted_stmt_trig2 AFTER DELETE for ROW +NOTICE: trigger trig_del_after on parted_stmt_trig AFTER DELETE for STATEMENT -- insert via copy on the parent copy parted_stmt_trig(a) from stdin; -NOTICE: trigger on parted_stmt_trig BEFORE INSERT for STATEMENT -NOTICE: trigger on parted_stmt_trig1 BEFORE INSERT for ROW -NOTICE: trigger on parted_stmt_trig1 AFTER INSERT for ROW -NOTICE: trigger on parted_stmt_trig AFTER INSERT for STATEMENT +NOTICE: trigger trig_ins_before on parted_stmt_trig BEFORE INSERT for STATEMENT +NOTICE: trigger trig_ins_before_child on parted_stmt_trig1 BEFORE INSERT for ROW +NOTICE: trigger trig_ins_after_child on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_parent on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_parent on parted_stmt_trig2 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after on parted_stmt_trig AFTER INSERT for STATEMENT -- insert via copy on the first partition copy parted_stmt_trig1(a) from stdin; -NOTICE: trigger on parted_stmt_trig1 BEFORE INSERT for ROW -NOTICE: trigger on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_before_child on parted_stmt_trig1 BEFORE INSERT for ROW +NOTICE: trigger trig_ins_after_child on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_parent on parted_stmt_trig1 AFTER INSERT for ROW +-- Disabling a trigger in the parent table should disable children triggers too +alter table parted_stmt_trig disable trigger trig_ins_after_parent; +insert into parted_stmt_trig values (1); +NOTICE: trigger trig_ins_before on parted_stmt_trig BEFORE INSERT for STATEMENT +NOTICE: trigger trig_ins_before_child on parted_stmt_trig1 BEFORE INSERT for ROW +NOTICE: trigger trig_ins_after_child on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after on parted_stmt_trig AFTER INSERT for STATEMENT +alter table parted_stmt_trig enable trigger trig_ins_after_parent; +insert into parted_stmt_trig values (1); +NOTICE: trigger trig_ins_before on parted_stmt_trig BEFORE INSERT for STATEMENT +NOTICE: trigger trig_ins_before_child on parted_stmt_trig1 BEFORE INSERT for ROW +NOTICE: trigger trig_ins_after_child on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_parent on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after on parted_stmt_trig AFTER INSERT for STATEMENT drop table parted_stmt_trig, parted2_stmt_trig; +-- Verify that triggers fire in alphabetical order +create table parted_trig (a int) partition by range (a); +create table parted_trig_1 partition of parted_trig for values from (0) to (1000) + partition by range (a); +create table parted_trig_1_1 partition of parted_trig_1 for values from (0) to (100); +create table parted_trig_2 partition of parted_trig for values from (1000) to (2000); +create trigger zzz after insert on parted_trig for each row execute procedure trigger_notice(); +create trigger mmm after insert on parted_trig_1_1 for each row execute procedure trigger_notice(); +create trigger aaa after insert on parted_trig_1 for each row execute procedure trigger_notice(); +create trigger bbb after insert on parted_trig for each row execute procedure trigger_notice(); +create trigger qqq after insert on parted_trig_1_1 for each row execute procedure trigger_notice(); +insert into parted_trig values (50), (1500); +NOTICE: trigger aaa on parted_trig_1_1 AFTER INSERT for ROW +NOTICE: trigger bbb on parted_trig_1_1 AFTER INSERT for ROW +NOTICE: trigger mmm on parted_trig_1_1 AFTER INSERT for ROW +NOTICE: trigger qqq on parted_trig_1_1 AFTER INSERT for ROW +NOTICE: trigger zzz on parted_trig_1_1 AFTER INSERT for ROW +NOTICE: trigger bbb on parted_trig_2 AFTER INSERT for ROW +NOTICE: trigger zzz on parted_trig_2 AFTER INSERT for ROW +drop table parted_trig; +-- test irregular partitions (i.e., different column definitions), +-- including that the WHEN clause works +create function bark(text) returns bool language plpgsql immutable + as $$ begin raise notice '% <- woof!', $1; return true; end; $$; +create or replace function trigger_notice_ab() returns trigger as $$ + begin + raise notice 'trigger % on % % % for %: (a,b)=(%,%)', + TG_NAME, TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL, + NEW.a, NEW.b; + if TG_LEVEL = 'ROW' then + return NEW; + end if; + return null; + end; + $$ language plpgsql; +create table parted_irreg_ancestor (fd text, b text, fd2 int, fd3 int, a int) + partition by range (b); +alter table parted_irreg_ancestor drop column fd, + drop column fd2, drop column fd3; +create table parted_irreg (fd int, a int, fd2 int, b text) + partition by range (b); +alter table parted_irreg drop column fd, drop column fd2; +alter table parted_irreg_ancestor attach partition parted_irreg + for values from ('aaaa') to ('zzzz'); +create table parted1_irreg (b text, fd int, a int); +alter table parted1_irreg drop column fd; +alter table parted_irreg attach partition parted1_irreg + for values from ('aaaa') to ('bbbb'); +create trigger parted_trig after insert on parted_irreg + for each row execute procedure trigger_notice_ab(); +create trigger parted_trig_odd after insert on parted_irreg for each row + when (bark(new.b) AND new.a % 2 = 1) execute procedure trigger_notice_ab(); +-- we should hear barking for every insert, but parted_trig_odd only emits +-- noise for odd values of a. parted_trig does it for all inserts. +insert into parted_irreg values (1, 'aardvark'), (2, 'aanimals'); +NOTICE: aardvark <- woof! +NOTICE: aanimals <- woof! +NOTICE: trigger parted_trig on parted1_irreg AFTER INSERT for ROW: (a,b)=(1,aardvark) +NOTICE: trigger parted_trig_odd on parted1_irreg AFTER INSERT for ROW: (a,b)=(1,aardvark) +NOTICE: trigger parted_trig on parted1_irreg AFTER INSERT for ROW: (a,b)=(2,aanimals) +insert into parted1_irreg values ('aardwolf', 2); +NOTICE: aardwolf <- woof! +NOTICE: trigger parted_trig on parted1_irreg AFTER INSERT for ROW: (a,b)=(2,aardwolf) +insert into parted_irreg_ancestor values ('aasvogel', 3); +NOTICE: aasvogel <- woof! +NOTICE: trigger parted_trig on parted1_irreg AFTER INSERT for ROW: (a,b)=(3,aasvogel) +NOTICE: trigger parted_trig_odd on parted1_irreg AFTER INSERT for ROW: (a,b)=(3,aasvogel) +drop table parted_irreg_ancestor; +-- +-- Constraint triggers and partitioned tables +create table parted_constr_ancestor (a int, b text) + partition by range (b); +create table parted_constr (a int, b text) + partition by range (b); +alter table parted_constr_ancestor attach partition parted_constr + for values from ('aaaa') to ('zzzz'); +create table parted1_constr (a int, b text); +alter table parted_constr attach partition parted1_constr + for values from ('aaaa') to ('bbbb'); +create constraint trigger parted_trig after insert on parted_constr_ancestor + deferrable + for each row execute procedure trigger_notice_ab(); +create constraint trigger parted_trig_two after insert on parted_constr + deferrable initially deferred + for each row when (bark(new.b) AND new.a % 2 = 1) + execute procedure trigger_notice_ab(); +-- The immediate constraint is fired immediately; the WHEN clause of the +-- deferred constraint is also called immediately. The deferred constraint +-- is fired at commit time. +begin; +insert into parted_constr values (1, 'aardvark'); +NOTICE: aardvark <- woof! +NOTICE: trigger parted_trig on parted1_constr AFTER INSERT for ROW: (a,b)=(1,aardvark) +insert into parted1_constr values (2, 'aardwolf'); +NOTICE: aardwolf <- woof! +NOTICE: trigger parted_trig on parted1_constr AFTER INSERT for ROW: (a,b)=(2,aardwolf) +insert into parted_constr_ancestor values (3, 'aasvogel'); +NOTICE: aasvogel <- woof! +NOTICE: trigger parted_trig on parted1_constr AFTER INSERT for ROW: (a,b)=(3,aasvogel) +commit; +NOTICE: trigger parted_trig_two on parted1_constr AFTER INSERT for ROW: (a,b)=(1,aardvark) +NOTICE: trigger parted_trig_two on parted1_constr AFTER INSERT for ROW: (a,b)=(3,aasvogel) +-- The WHEN clause is immediate, and both constraint triggers are fired at +-- commit time. +begin; +set constraints parted_trig deferred; +insert into parted_constr values (1, 'aardvark'); +NOTICE: aardvark <- woof! +insert into parted1_constr values (2, 'aardwolf'), (3, 'aasvogel'); +NOTICE: aardwolf <- woof! +NOTICE: aasvogel <- woof! +commit; +NOTICE: trigger parted_trig on parted1_constr AFTER INSERT for ROW: (a,b)=(1,aardvark) +NOTICE: trigger parted_trig_two on parted1_constr AFTER INSERT for ROW: (a,b)=(1,aardvark) +NOTICE: trigger parted_trig on parted1_constr AFTER INSERT for ROW: (a,b)=(2,aardwolf) +NOTICE: trigger parted_trig on parted1_constr AFTER INSERT for ROW: (a,b)=(3,aasvogel) +NOTICE: trigger parted_trig_two on parted1_constr AFTER INSERT for ROW: (a,b)=(3,aasvogel) +drop table parted_constr_ancestor; +drop function bark(text); +-- Test that the WHEN clause is set properly to partitions +create table parted_trigger (a int, b text) partition by range (a); +create table parted_trigger_1 partition of parted_trigger for values from (0) to (1000); +create table parted_trigger_2 (drp int, a int, b text); +alter table parted_trigger_2 drop column drp; +alter table parted_trigger attach partition parted_trigger_2 for values from (1000) to (2000); +create trigger parted_trigger after update on parted_trigger + for each row when (new.a % 2 = 1 and length(old.b) >= 2) execute procedure trigger_notice_ab(); +create table parted_trigger_3 (b text, a int) partition by range (length(b)); +create table parted_trigger_3_1 partition of parted_trigger_3 for values from (1) to (3); +create table parted_trigger_3_2 partition of parted_trigger_3 for values from (3) to (5); +alter table parted_trigger attach partition parted_trigger_3 for values from (2000) to (3000); +insert into parted_trigger values + (0, 'a'), (1, 'bbb'), (2, 'bcd'), (3, 'c'), + (1000, 'c'), (1001, 'ddd'), (1002, 'efg'), (1003, 'f'), + (2000, 'e'), (2001, 'fff'), (2002, 'ghi'), (2003, 'h'); +update parted_trigger set a = a + 2; -- notice for odd 'a' values, long 'b' values +NOTICE: trigger parted_trigger on parted_trigger_1 AFTER UPDATE for ROW: (a,b)=(3,bbb) +NOTICE: trigger parted_trigger on parted_trigger_2 AFTER UPDATE for ROW: (a,b)=(1003,ddd) +NOTICE: trigger parted_trigger on parted_trigger_3_2 AFTER UPDATE for ROW: (a,b)=(2003,fff) +drop table parted_trigger; +-- try a constraint trigger, also +create table parted_referenced (a int); +create table unparted_trigger (a int, b text); -- for comparison purposes +create table parted_trigger (a int, b text) partition by range (a); +create table parted_trigger_1 partition of parted_trigger for values from (0) to (1000); +create table parted_trigger_2 (drp int, a int, b text); +alter table parted_trigger_2 drop column drp; +alter table parted_trigger attach partition parted_trigger_2 for values from (1000) to (2000); +create constraint trigger parted_trigger after update on parted_trigger + from parted_referenced + for each row execute procedure trigger_notice_ab(); +create constraint trigger parted_trigger after update on unparted_trigger + from parted_referenced + for each row execute procedure trigger_notice_ab(); +create table parted_trigger_3 (b text, a int) partition by range (length(b)); +create table parted_trigger_3_1 partition of parted_trigger_3 for values from (1) to (3); +create table parted_trigger_3_2 partition of parted_trigger_3 for values from (3) to (5); +alter table parted_trigger attach partition parted_trigger_3 for values from (2000) to (3000); +select tgname, conname, t.tgrelid::regclass, t.tgconstrrelid::regclass, + c.conrelid::regclass, c.confrelid::regclass + from pg_trigger t join pg_constraint c on (t.tgconstraint = c.oid) + where tgname = 'parted_trigger' + order by t.tgrelid::regclass::text; + tgname | conname | tgrelid | tgconstrrelid | conrelid | confrelid +----------------+----------------+--------------------+-------------------+--------------------+----------- + parted_trigger | parted_trigger | parted_trigger | parted_referenced | parted_trigger | - + parted_trigger | parted_trigger | parted_trigger_1 | parted_referenced | parted_trigger_1 | - + parted_trigger | parted_trigger | parted_trigger_2 | parted_referenced | parted_trigger_2 | - + parted_trigger | parted_trigger | parted_trigger_3 | parted_referenced | parted_trigger_3 | - + parted_trigger | parted_trigger | parted_trigger_3_1 | parted_referenced | parted_trigger_3_1 | - + parted_trigger | parted_trigger | parted_trigger_3_2 | parted_referenced | parted_trigger_3_2 | - + parted_trigger | parted_trigger | unparted_trigger | parted_referenced | unparted_trigger | - +(7 rows) + +drop table parted_referenced, parted_trigger, unparted_trigger; +-- verify that the "AFTER UPDATE OF columns" event is propagated correctly +create table parted_trigger (a int, b text) partition by range (a); +create table parted_trigger_1 partition of parted_trigger for values from (0) to (1000); +create table parted_trigger_2 (drp int, a int, b text); +alter table parted_trigger_2 drop column drp; +alter table parted_trigger attach partition parted_trigger_2 for values from (1000) to (2000); +create trigger parted_trigger after update of b on parted_trigger + for each row execute procedure trigger_notice_ab(); +create table parted_trigger_3 (b text, a int) partition by range (length(b)); +create table parted_trigger_3_1 partition of parted_trigger_3 for values from (1) to (4); +create table parted_trigger_3_2 partition of parted_trigger_3 for values from (4) to (8); +alter table parted_trigger attach partition parted_trigger_3 for values from (2000) to (3000); +insert into parted_trigger values (0, 'a'), (1000, 'c'), (2000, 'e'), (2001, 'eeee'); +update parted_trigger set a = a + 2; -- no notices here +update parted_trigger set b = b || 'b'; -- all triggers should fire +NOTICE: trigger parted_trigger on parted_trigger_1 AFTER UPDATE for ROW: (a,b)=(2,ab) +NOTICE: trigger parted_trigger on parted_trigger_2 AFTER UPDATE for ROW: (a,b)=(1002,cb) +NOTICE: trigger parted_trigger on parted_trigger_3_1 AFTER UPDATE for ROW: (a,b)=(2002,eb) +NOTICE: trigger parted_trigger on parted_trigger_3_2 AFTER UPDATE for ROW: (a,b)=(2003,eeeeb) +drop table parted_trigger; +drop function trigger_notice_ab(); +-- Make sure we don't end up with unnecessary copies of triggers, when +-- cloning them. +create table trg_clone (a int) partition by range (a); +create table trg_clone1 partition of trg_clone for values from (0) to (1000); +alter table trg_clone add constraint uniq unique (a) deferrable; +create table trg_clone2 partition of trg_clone for values from (1000) to (2000); +create table trg_clone3 partition of trg_clone for values from (2000) to (3000) + partition by range (a); +create table trg_clone_3_3 partition of trg_clone3 for values from (2000) to (2100); +select tgrelid::regclass, count(*) from pg_trigger + where tgrelid::regclass in ('trg_clone', 'trg_clone1', 'trg_clone2', + 'trg_clone3', 'trg_clone_3_3') + group by tgrelid::regclass order by tgrelid::regclass; + tgrelid | count +---------------+------- + trg_clone | 1 + trg_clone1 | 1 + trg_clone2 | 1 + trg_clone3 | 1 + trg_clone_3_3 | 1 +(5 rows) + +drop table trg_clone; -- -- Test the interaction between transition tables and both kinds of -- inheritance. We'll dump the contents of the transition tables in a @@ -1934,7 +2304,7 @@ $$; -- -- Verify behavior of statement triggers on partition hierarchy with -- transition tables. Tuples should appear to each trigger in the --- format of the the relation the trigger is attached to. +-- format of the relation the trigger is attached to. -- -- set up a partition hierarchy with some different TupleDescriptors create table parent (a text, b int) partition by list (a); @@ -1983,6 +2353,28 @@ create trigger child3_update_trig create trigger child3_delete_trig after delete on child3 referencing old table as old_table for each statement execute procedure dump_delete(); +SELECT trigger_name, event_manipulation, event_object_schema, event_object_table, + action_order, action_condition, action_orientation, action_timing, + action_reference_old_table, action_reference_new_table + FROM information_schema.triggers + WHERE event_object_table IN ('parent', 'child1', 'child2', 'child3') + ORDER BY trigger_name COLLATE "C", 2; + trigger_name | event_manipulation | event_object_schema | event_object_table | action_order | action_condition | action_orientation | action_timing | action_reference_old_table | action_reference_new_table +--------------------+--------------------+---------------------+--------------------+--------------+------------------+--------------------+---------------+----------------------------+---------------------------- + child1_delete_trig | DELETE | public | child1 | 1 | | STATEMENT | AFTER | old_table | + child1_insert_trig | INSERT | public | child1 | 1 | | STATEMENT | AFTER | | new_table + child1_update_trig | UPDATE | public | child1 | 1 | | STATEMENT | AFTER | old_table | new_table + child2_delete_trig | DELETE | public | child2 | 1 | | STATEMENT | AFTER | old_table | + child2_insert_trig | INSERT | public | child2 | 1 | | STATEMENT | AFTER | | new_table + child2_update_trig | UPDATE | public | child2 | 1 | | STATEMENT | AFTER | old_table | new_table + child3_delete_trig | DELETE | public | child3 | 1 | | STATEMENT | AFTER | old_table | + child3_insert_trig | INSERT | public | child3 | 1 | | STATEMENT | AFTER | | new_table + child3_update_trig | UPDATE | public | child3 | 1 | | STATEMENT | AFTER | old_table | new_table + parent_delete_trig | DELETE | public | parent | 1 | | STATEMENT | AFTER | old_table | + parent_insert_trig | INSERT | public | parent | 1 | | STATEMENT | AFTER | | new_table + parent_update_trig | UPDATE | public | parent | 1 | | STATEMENT | AFTER | old_table | new_table +(12 rows) + -- insert directly into children sees respective child-format tuples insert into child1 values ('AAA', 42); NOTICE: trigger = child1_insert_trig, new table = (AAA,42) @@ -2217,6 +2609,23 @@ with wcte as (insert into table1 values (42)) insert into table2 values ('hello world'); NOTICE: trigger = table2_trig, new table = ("hello world") NOTICE: trigger = table1_trig, new table = (42) +with wcte as (insert into table1 values (43)) + insert into table1 values (44); +NOTICE: trigger = table1_trig, new table = (43), (44) +select * from table1; + a +---- + 42 + 44 + 43 +(3 rows) + +select * from table2; + a +------------- + hello world +(1 row) + drop table table1; drop table table2; -- @@ -2249,14 +2658,142 @@ insert into my_table values (3, 'CCC'), (4, 'DDD') NOTICE: trigger = my_table_update_trig, old table = (3,CCC), (4,DDD), new table = (3,CCC:CCC), (4,DDD:DDD) NOTICE: trigger = my_table_insert_trig, new table = -- +-- now using a partitioned table +-- +create table iocdu_tt_parted (a int primary key, b text) partition by list (a); +create table iocdu_tt_parted1 partition of iocdu_tt_parted for values in (1); +create table iocdu_tt_parted2 partition of iocdu_tt_parted for values in (2); +create table iocdu_tt_parted3 partition of iocdu_tt_parted for values in (3); +create table iocdu_tt_parted4 partition of iocdu_tt_parted for values in (4); +create trigger iocdu_tt_parted_insert_trig + after insert on iocdu_tt_parted referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger iocdu_tt_parted_update_trig + after update on iocdu_tt_parted referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +-- inserts only +insert into iocdu_tt_parted values (1, 'AAA'), (2, 'BBB') + on conflict (a) do + update set b = iocdu_tt_parted.b || ':' || excluded.b; +NOTICE: trigger = iocdu_tt_parted_update_trig, old table = , new table = +NOTICE: trigger = iocdu_tt_parted_insert_trig, new table = (1,AAA), (2,BBB) +-- mixture of inserts and updates +insert into iocdu_tt_parted values (1, 'AAA'), (2, 'BBB'), (3, 'CCC'), (4, 'DDD') + on conflict (a) do + update set b = iocdu_tt_parted.b || ':' || excluded.b; +NOTICE: trigger = iocdu_tt_parted_update_trig, old table = (1,AAA), (2,BBB), new table = (1,AAA:AAA), (2,BBB:BBB) +NOTICE: trigger = iocdu_tt_parted_insert_trig, new table = (3,CCC), (4,DDD) +-- updates only +insert into iocdu_tt_parted values (3, 'CCC'), (4, 'DDD') + on conflict (a) do + update set b = iocdu_tt_parted.b || ':' || excluded.b; +NOTICE: trigger = iocdu_tt_parted_update_trig, old table = (3,CCC), (4,DDD), new table = (3,CCC:CCC), (4,DDD:DDD) +NOTICE: trigger = iocdu_tt_parted_insert_trig, new table = +drop table iocdu_tt_parted; +-- -- Verify that you can't create a trigger with transition tables for -- more than one event. -- create trigger my_table_multievent_trig after insert or update on my_table referencing new table as new_table for each statement execute procedure dump_insert(); -ERROR: Transition tables cannot be specified for triggers with more than one event +ERROR: transition tables cannot be specified for triggers with more than one event +-- +-- Verify that you can't create a trigger with transition tables with +-- a column list. +-- +create trigger my_table_col_update_trig + after update of b on my_table referencing new table as new_table + for each statement execute procedure dump_insert(); +ERROR: transition tables cannot be specified for triggers with column lists drop table my_table; +-- +-- Test firing of triggers with transition tables by foreign key cascades +-- +create table refd_table (a int primary key, b text); +create table trig_table (a int, b text, + foreign key (a) references refd_table on update cascade on delete cascade +); +create trigger trig_table_before_trig + before insert or update or delete on trig_table + for each statement execute procedure trigger_func('trig_table'); +create trigger trig_table_insert_trig + after insert on trig_table referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger trig_table_update_trig + after update on trig_table referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +create trigger trig_table_delete_trig + after delete on trig_table referencing old table as old_table + for each statement execute procedure dump_delete(); +insert into refd_table values + (1, 'one'), + (2, 'two'), + (3, 'three'); +insert into trig_table values + (1, 'one a'), + (1, 'one b'), + (2, 'two a'), + (2, 'two b'), + (3, 'three a'), + (3, 'three b'); +NOTICE: trigger_func(trig_table) called: action = INSERT, when = BEFORE, level = STATEMENT +NOTICE: trigger = trig_table_insert_trig, new table = (1,"one a"), (1,"one b"), (2,"two a"), (2,"two b"), (3,"three a"), (3,"three b") +update refd_table set a = 11 where b = 'one'; +NOTICE: trigger_func(trig_table) called: action = UPDATE, when = BEFORE, level = STATEMENT +NOTICE: trigger = trig_table_update_trig, old table = (1,"one a"), (1,"one b"), new table = (11,"one a"), (11,"one b") +select * from trig_table; + a | b +----+--------- + 2 | two a + 2 | two b + 3 | three a + 3 | three b + 11 | one a + 11 | one b +(6 rows) + +delete from refd_table where length(b) = 3; +NOTICE: trigger_func(trig_table) called: action = DELETE, when = BEFORE, level = STATEMENT +NOTICE: trigger = trig_table_delete_trig, old table = (2,"two a"), (2,"two b"), (11,"one a"), (11,"one b") +select * from trig_table; + a | b +---+--------- + 3 | three a + 3 | three b +(2 rows) + +drop table refd_table, trig_table; +-- +-- self-referential FKs are even more fun +-- +create table self_ref (a int primary key, + b int references self_ref(a) on delete cascade); +create trigger self_ref_before_trig + before delete on self_ref + for each statement execute procedure trigger_func('self_ref'); +create trigger self_ref_r_trig + after delete on self_ref referencing old table as old_table + for each row execute procedure dump_delete(); +create trigger self_ref_s_trig + after delete on self_ref referencing old table as old_table + for each statement execute procedure dump_delete(); +insert into self_ref values (1, null), (2, 1), (3, 2); +delete from self_ref where a = 1; +NOTICE: trigger_func(self_ref) called: action = DELETE, when = BEFORE, level = STATEMENT +NOTICE: trigger = self_ref_r_trig, old table = (1,), (2,1) +NOTICE: trigger_func(self_ref) called: action = DELETE, when = BEFORE, level = STATEMENT +NOTICE: trigger = self_ref_r_trig, old table = (1,), (2,1) +NOTICE: trigger = self_ref_s_trig, old table = (1,), (2,1) +NOTICE: trigger = self_ref_r_trig, old table = (3,2) +NOTICE: trigger = self_ref_s_trig, old table = (3,2) +-- without AR trigger, cascaded deletes all end up in one transition table +drop trigger self_ref_r_trig on self_ref; +insert into self_ref values (1, null), (2, 1), (3, 2), (4, 3); +delete from self_ref where a = 1; +NOTICE: trigger_func(self_ref) called: action = DELETE, when = BEFORE, level = STATEMENT +NOTICE: trigger = self_ref_s_trig, old table = (1,), (2,1), (3,2), (4,3) +drop table self_ref; -- cleanup drop function dump_insert(); drop function dump_update(); diff --git a/src/test/regress/expected/truncate.out b/src/test/regress/expected/truncate.out index d967e8dd21..2e26510522 100644 --- a/src/test/regress/expected/truncate.out +++ b/src/test/regress/expected/truncate.out @@ -455,12 +455,87 @@ CREATE TABLE truncparted (a int, b char) PARTITION BY LIST (a); -- error, can't truncate a partitioned table TRUNCATE ONLY truncparted; ERROR: cannot truncate only a partitioned table -HINT: Do not specify the ONLY keyword, or use truncate only on the partitions directly. +HINT: Do not specify the ONLY keyword, or use TRUNCATE ONLY on the partitions directly. CREATE TABLE truncparted1 PARTITION OF truncparted FOR VALUES IN (1); INSERT INTO truncparted VALUES (1, 'a'); -- error, must truncate partitions TRUNCATE ONLY truncparted; ERROR: cannot truncate only a partitioned table -HINT: Do not specify the ONLY keyword, or use truncate only on the partitions directly. +HINT: Do not specify the ONLY keyword, or use TRUNCATE ONLY on the partitions directly. TRUNCATE truncparted; DROP TABLE truncparted; +-- foreign key on partitioned table: partition key is referencing column. +-- Make sure truncate did execute on all tables +CREATE FUNCTION tp_ins_data() RETURNS void LANGUAGE plpgsql AS $$ + BEGIN + INSERT INTO truncprim VALUES (1), (100), (150); + INSERT INTO truncpart VALUES (1), (100), (150); + END +$$; +CREATE FUNCTION tp_chk_data(OUT pktb regclass, OUT pkval int, OUT fktb regclass, OUT fkval int) + RETURNS SETOF record LANGUAGE plpgsql AS $$ + BEGIN + RETURN QUERY SELECT + pk.tableoid::regclass, pk.a, fk.tableoid::regclass, fk.a + FROM truncprim pk FULL JOIN truncpart fk USING (a) + ORDER BY 2, 4; + END +$$; +CREATE TABLE truncprim (a int PRIMARY KEY); +CREATE TABLE truncpart (a int REFERENCES truncprim) + PARTITION BY RANGE (a); +CREATE TABLE truncpart_1 PARTITION OF truncpart FOR VALUES FROM (0) TO (100); +CREATE TABLE truncpart_2 PARTITION OF truncpart FOR VALUES FROM (100) TO (200) + PARTITION BY RANGE (a); +CREATE TABLE truncpart_2_1 PARTITION OF truncpart_2 FOR VALUES FROM (100) TO (150); +CREATE TABLE truncpart_2_d PARTITION OF truncpart_2 DEFAULT; +TRUNCATE TABLE truncprim; -- should fail +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "truncpart" references "truncprim". +HINT: Truncate table "truncpart" at the same time, or use TRUNCATE ... CASCADE. +select tp_ins_data(); + tp_ins_data +------------- + +(1 row) + +-- should truncate everything +TRUNCATE TABLE truncprim, truncpart; +select * from tp_chk_data(); + pktb | pkval | fktb | fkval +------+-------+------+------- +(0 rows) + +select tp_ins_data(); + tp_ins_data +------------- + +(1 row) + +-- should truncate everything +SET client_min_messages TO WARNING; -- suppress cascading notices +TRUNCATE TABLE truncprim CASCADE; +RESET client_min_messages; +SELECT * FROM tp_chk_data(); + pktb | pkval | fktb | fkval +------+-------+------+------- +(0 rows) + +SELECT tp_ins_data(); + tp_ins_data +------------- + +(1 row) + +-- should truncate all partitions +TRUNCATE TABLE truncpart; +SELECT * FROM tp_chk_data(); + pktb | pkval | fktb | fkval +-----------+-------+------+------- + truncprim | 1 | | + truncprim | 100 | | + truncprim | 150 | | +(3 rows) + +DROP TABLE truncprim, truncpart; +DROP FUNCTION tp_ins_data(), tp_chk_data(); diff --git a/src/test/regress/expected/tsdicts.out b/src/test/regress/expected/tsdicts.out index 0744ef803b..2524ec2768 100644 --- a/src/test/regress/expected/tsdicts.out +++ b/src/test/regress/expected/tsdicts.out @@ -263,6 +263,12 @@ SELECT ts_lexize('hunspell_long', 'unbook'); {book} (1 row) +SELECT ts_lexize('hunspell_long', 'booked'); + ts_lexize +----------- + {book} +(1 row) + SELECT ts_lexize('hunspell_long', 'footklubber'); ts_lexize ---------------- @@ -281,12 +287,24 @@ SELECT ts_lexize('hunspell_long', 'ballyklubber'); {ball,klubber} (1 row) +SELECT ts_lexize('hunspell_long', 'ballsklubber'); + ts_lexize +---------------- + {ball,klubber} +(1 row) + SELECT ts_lexize('hunspell_long', 'footballyklubber'); ts_lexize --------------------- {foot,ball,klubber} (1 row) +SELECT ts_lexize('hunspell_long', 'ex-machina'); + ts_lexize +--------------- + {ex-,machina} +(1 row) + -- Test ISpell dictionary with hunspell affix file with FLAG num parameter CREATE TEXT SEARCH DICTIONARY hunspell_num ( Template=ispell, @@ -299,6 +317,12 @@ SELECT ts_lexize('hunspell_num', 'skies'); {sky} (1 row) +SELECT ts_lexize('hunspell_num', 'sk'); + ts_lexize +----------- + {sky} +(1 row) + SELECT ts_lexize('hunspell_num', 'bookings'); ts_lexize ---------------- @@ -359,6 +383,12 @@ SELECT ts_lexize('hunspell_num', 'unbook'); {book} (1 row) +SELECT ts_lexize('hunspell_num', 'booked'); + ts_lexize +----------- + {book} +(1 row) + SELECT ts_lexize('hunspell_num', 'footklubber'); ts_lexize ---------------- @@ -580,3 +610,11 @@ SELECT to_tsvector('thesaurus_tst', 'Booking tickets is looking like a booking a 'card':3,10 'invit':2,9 'like':6 'look':5 'order':1,8 (1 row) +-- invalid: non-lowercase quoted identifiers +CREATE TEXT SEARCH DICTIONARY tsdict_case +( + Template = ispell, + "DictFile" = ispell_sample, + "AffFile" = ispell_sample +); +ERROR: unrecognized Ispell parameter: "DictFile" diff --git a/src/test/regress/expected/tsearch.out b/src/test/regress/expected/tsearch.out index b2fc9e207e..b088ff0d4f 100644 --- a/src/test/regress/expected/tsearch.out +++ b/src/test/regress/expected/tsearch.out @@ -618,6 +618,17 @@ SELECT * from ts_debug('english', '5aew.werc.ewr:8100/?xx'); url_path | URL path | /?xx | {simple} | simple | {/?xx} (3 rows) +SELECT token, alias, + dictionaries, dictionaries is null as dnull, array_dims(dictionaries) as ddims, + lexemes, lexemes is null as lnull, array_dims(lexemes) as ldims +from ts_debug('english', 'a title'); + token | alias | dictionaries | dnull | ddims | lexemes | lnull | ldims +-------+-----------+----------------+-------+-------+---------+-------+------- + a | asciiword | {english_stem} | f | [1:1] | {} | f | + | blank | {} | f | | | t | + title | asciiword | {english_stem} | f | [1:1] | {titl} | f | [1:1] +(3 rows) + -- to_tsquery SELECT to_tsquery('english', 'qwe & sKies '); to_tsquery @@ -1661,3 +1672,420 @@ select * from phrase_index_test where fts @@ phraseto_tsquery('english', 'fat ca (1 row) set enable_seqscan = on; +-- test websearch_to_tsquery function +select websearch_to_tsquery('simple', 'I have a fat:*ABCD cat'); + websearch_to_tsquery +--------------------------------------------- + 'i' & 'have' & 'a' & 'fat' & 'abcd' & 'cat' +(1 row) + +select websearch_to_tsquery('simple', 'orange:**AABBCCDD'); + websearch_to_tsquery +----------------------- + 'orange' & 'aabbccdd' +(1 row) + +select websearch_to_tsquery('simple', 'fat:A!cat:B|rat:C<'); + websearch_to_tsquery +----------------------------------------- + 'fat' & 'a' & 'cat' & 'b' & 'rat' & 'c' +(1 row) + +select websearch_to_tsquery('simple', 'fat:A : cat:B'); + websearch_to_tsquery +--------------------------- + 'fat' & 'a' & 'cat' & 'b' +(1 row) + +select websearch_to_tsquery('simple', 'fat*rat'); + websearch_to_tsquery +---------------------- + 'fat' & 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat-rat'); + websearch_to_tsquery +--------------------------- + 'fat-rat' & 'fat' & 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat_rat'); + websearch_to_tsquery +---------------------- + 'fat' & 'rat' +(1 row) + +-- weights are completely ignored +select websearch_to_tsquery('simple', 'abc : def'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('simple', 'abc:def'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('simple', 'a:::b'); + websearch_to_tsquery +---------------------- + 'a' & 'b' +(1 row) + +select websearch_to_tsquery('simple', 'abc:d'); + websearch_to_tsquery +---------------------- + 'abc' & 'd' +(1 row) + +select websearch_to_tsquery('simple', ':'); +NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored + websearch_to_tsquery +---------------------- + +(1 row) + +-- these operators are ignored +select websearch_to_tsquery('simple', 'abc & def'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('simple', 'abc | def'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('simple', 'abc <-> def'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('simple', 'abc (pg or class)'); + websearch_to_tsquery +------------------------ + 'abc' & 'pg' | 'class' +(1 row) + +-- NOT is ignored in quotes +select websearch_to_tsquery('english', 'My brand new smartphone'); + websearch_to_tsquery +------------------------------- + 'brand' & 'new' & 'smartphon' +(1 row) + +select websearch_to_tsquery('english', 'My brand "new smartphone"'); + websearch_to_tsquery +--------------------------------- + 'brand' & 'new' <-> 'smartphon' +(1 row) + +select websearch_to_tsquery('english', 'My brand "new -smartphone"'); + websearch_to_tsquery +--------------------------------- + 'brand' & 'new' <-> 'smartphon' +(1 row) + +-- test OR operator +select websearch_to_tsquery('simple', 'cat or rat'); + websearch_to_tsquery +---------------------- + 'cat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'cat OR rat'); + websearch_to_tsquery +---------------------- + 'cat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'cat "OR" rat'); + websearch_to_tsquery +---------------------- + 'cat' & 'or' & 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'cat OR'); + websearch_to_tsquery +---------------------- + 'cat' & 'or' +(1 row) + +select websearch_to_tsquery('simple', 'OR rat'); + websearch_to_tsquery +---------------------- + 'or' & 'rat' +(1 row) + +select websearch_to_tsquery('simple', '"fat cat OR rat"'); + websearch_to_tsquery +------------------------------------ + 'fat' <-> 'cat' <-> 'or' <-> 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat (cat OR rat'); + websearch_to_tsquery +----------------------- + 'fat' & 'cat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'or OR or'); + websearch_to_tsquery +---------------------- + 'or' | 'or' +(1 row) + +-- OR is an operator here ... +select websearch_to_tsquery('simple', '"fat cat"or"fat rat"'); + websearch_to_tsquery +----------------------------------- + 'fat' <-> 'cat' | 'fat' <-> 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat or(rat'); + websearch_to_tsquery +---------------------- + 'fat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat or)rat'); + websearch_to_tsquery +---------------------- + 'fat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat or&rat'); + websearch_to_tsquery +---------------------- + 'fat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat or|rat'); + websearch_to_tsquery +---------------------- + 'fat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat or!rat'); + websearch_to_tsquery +---------------------- + 'fat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat orrat'); + websearch_to_tsquery +---------------------- + 'fat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat or '); + websearch_to_tsquery +---------------------- + 'fat' & 'or' +(1 row) + +-- ... but not here +select websearch_to_tsquery('simple', 'abc orange'); + websearch_to_tsquery +---------------------- + 'abc' & 'orange' +(1 row) + +select websearch_to_tsquery('simple', 'abc OR1234'); + websearch_to_tsquery +---------------------- + 'abc' & 'or1234' +(1 row) + +select websearch_to_tsquery('simple', 'abc or-abc'); + websearch_to_tsquery +--------------------------------- + 'abc' & 'or-abc' & 'or' & 'abc' +(1 row) + +select websearch_to_tsquery('simple', 'abc OR_abc'); + websearch_to_tsquery +---------------------- + 'abc' & 'or' & 'abc' +(1 row) + +-- test quotes +select websearch_to_tsquery('english', '"pg_class pg'); + websearch_to_tsquery +----------------------- + 'pg' & 'class' & 'pg' +(1 row) + +select websearch_to_tsquery('english', 'pg_class pg"'); + websearch_to_tsquery +----------------------- + 'pg' & 'class' & 'pg' +(1 row) + +select websearch_to_tsquery('english', '"pg_class pg"'); + websearch_to_tsquery +----------------------------- + ( 'pg' & 'class' ) <-> 'pg' +(1 row) + +select websearch_to_tsquery('english', 'abc "pg_class pg"'); + websearch_to_tsquery +------------------------------------- + 'abc' & ( 'pg' & 'class' ) <-> 'pg' +(1 row) + +select websearch_to_tsquery('english', '"pg_class pg" def'); + websearch_to_tsquery +------------------------------------- + ( 'pg' & 'class' ) <-> 'pg' & 'def' +(1 row) + +select websearch_to_tsquery('english', 'abc "pg pg_class pg" def'); + websearch_to_tsquery +------------------------------------------------------ + 'abc' & 'pg' <-> ( 'pg' & 'class' ) <-> 'pg' & 'def' +(1 row) + +select websearch_to_tsquery('english', ' or "pg pg_class pg" or '); + websearch_to_tsquery +-------------------------------------- + 'pg' <-> ( 'pg' & 'class' ) <-> 'pg' +(1 row) + +select websearch_to_tsquery('english', '""pg pg_class pg""'); + websearch_to_tsquery +------------------------------ + 'pg' & 'pg' & 'class' & 'pg' +(1 row) + +select websearch_to_tsquery('english', 'abc """"" def'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('english', 'cat -"fat rat"'); + websearch_to_tsquery +------------------------------ + 'cat' & !( 'fat' <-> 'rat' ) +(1 row) + +select websearch_to_tsquery('english', 'cat -"fat rat" cheese'); + websearch_to_tsquery +---------------------------------------- + 'cat' & !( 'fat' <-> 'rat' ) & 'chees' +(1 row) + +select websearch_to_tsquery('english', 'abc "def -"'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('english', 'abc "def :"'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('english', '"A fat cat" has just eaten a -rat.'); + websearch_to_tsquery +------------------------------------ + 'fat' <-> 'cat' & 'eaten' & !'rat' +(1 row) + +select websearch_to_tsquery('english', '"A fat cat" has just eaten OR !rat.'); + websearch_to_tsquery +----------------------------------- + 'fat' <-> 'cat' & 'eaten' | 'rat' +(1 row) + +select websearch_to_tsquery('english', '"A fat cat" has just (+eaten OR -rat)'); + websearch_to_tsquery +------------------------------------ + 'fat' <-> 'cat' & 'eaten' | !'rat' +(1 row) + +select websearch_to_tsquery('english', 'this is ----fine'); + websearch_to_tsquery +---------------------- + !!!!'fine' +(1 row) + +select websearch_to_tsquery('english', '(()) )))) this ||| is && -fine, "dear friend" OR good'); + websearch_to_tsquery +---------------------------------------- + !'fine' & 'dear' <-> 'friend' | 'good' +(1 row) + +select websearch_to_tsquery('english', 'an old <-> cat " is fine &&& too'); + websearch_to_tsquery +------------------------ + 'old' & 'cat' & 'fine' +(1 row) + +select websearch_to_tsquery('english', '"A the" OR just on'); +NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored + websearch_to_tsquery +---------------------- + +(1 row) + +select websearch_to_tsquery('english', '"a fat cat" ate a rat'); + websearch_to_tsquery +--------------------------------- + 'fat' <-> 'cat' & 'ate' & 'rat' +(1 row) + +select to_tsvector('english', 'A fat cat ate a rat') @@ + websearch_to_tsquery('english', '"a fat cat" ate a rat'); + ?column? +---------- + t +(1 row) + +select to_tsvector('english', 'A fat grey cat ate a rat') @@ + websearch_to_tsquery('english', '"a fat cat" ate a rat'); + ?column? +---------- + f +(1 row) + +-- cases handled by gettoken_tsvector() +select websearch_to_tsquery(''''); +NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored + websearch_to_tsquery +---------------------- + +(1 row) + +select websearch_to_tsquery('''abc''''def'''); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('\abc'); + websearch_to_tsquery +---------------------- + 'abc' +(1 row) + +select websearch_to_tsquery('\'); +NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored + websearch_to_tsquery +---------------------- + +(1 row) + diff --git a/src/test/regress/expected/tsrf.out b/src/test/regress/expected/tsrf.out index 6d33fbd3c8..25be6b9ab1 100644 --- a/src/test/regress/expected/tsrf.out +++ b/src/test/regress/expected/tsrf.out @@ -421,6 +421,28 @@ SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(d (24 rows) reset enable_hashagg; +-- case with degenerate ORDER BY +explain (verbose, costs off) +select 'foo' as f, generate_series(1,2) as g from few order by 1; + QUERY PLAN +---------------------------------------------- + ProjectSet + Output: 'foo'::text, generate_series(1, 2) + -> Seq Scan on public.few + Output: id, dataa, datab +(4 rows) + +select 'foo' as f, generate_series(1,2) as g from few order by 1; + f | g +-----+--- + foo | 1 + foo | 2 + foo | 1 + foo | 2 + foo | 1 + foo | 2 +(6 rows) + -- data modification CREATE TABLE fewmore AS SELECT generate_series(1,3) AS data; INSERT INTO fewmore VALUES(generate_series(4,5)); diff --git a/src/test/regress/expected/type_sanity.out b/src/test/regress/expected/type_sanity.out index 7b200baef8..b1419d4bc2 100644 --- a/src/test/regress/expected/type_sanity.out +++ b/src/test/regress/expected/type_sanity.out @@ -129,6 +129,35 @@ WHERE p1.typinput = p2.oid AND NOT -----+---------+-----+--------- (0 rows) +-- Check for type of the variadic array parameter's elements. +-- provariadic should be ANYOID if the type of the last element is ANYOID, +-- ANYELEMENTOID if the type of the last element is ANYARRAYOID, and otherwise +-- the element type corresponding to the array type. +SELECT oid::regprocedure, provariadic::regtype, proargtypes::regtype[] +FROM pg_proc +WHERE provariadic != 0 +AND case proargtypes[array_length(proargtypes, 1)-1] + WHEN 2276 THEN 2276 -- any -> any + WHEN 2277 THEN 2283 -- anyarray -> anyelement + ELSE (SELECT t.oid + FROM pg_type t + WHERE t.typarray = proargtypes[array_length(proargtypes, 1)-1]) + END != provariadic; + oid | provariadic | proargtypes +-----+-------------+------------- +(0 rows) + +-- Check that all and only those functions with a variadic type have +-- a variadic argument. +SELECT oid::regprocedure, proargmodes, provariadic +FROM pg_proc +WHERE (proargmodes IS NOT NULL AND 'v' = any(proargmodes)) + IS DISTINCT FROM + (provariadic != 0); + oid | proargmodes | provariadic +-----+-------------+------------- +(0 rows) + -- As of 8.0, this check finds refcursor, which is borrowing -- other types' I/O routines SELECT p1.oid, p1.typname, p2.oid, p2.proname diff --git a/src/test/regress/expected/union.out b/src/test/regress/expected/union.out index ee26b163f7..92d427a690 100644 --- a/src/test/regress/expected/union.out +++ b/src/test/regress/expected/union.out @@ -552,6 +552,121 @@ SELECT q1 FROM int8_tbl EXCEPT (((SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1))) 4567890123456789 | -4567890123456789 (5 rows) +-- +-- Check behavior with empty select list (allowed since 9.4) +-- +select union select; +-- +(1 row) + +select intersect select; +-- +(1 row) + +select except select; +-- +(0 rows) + +-- check hashed implementation +set enable_hashagg = true; +set enable_sort = false; +explain (costs off) +select from generate_series(1,5) union select from generate_series(1,3); + QUERY PLAN +---------------------------------------------------------------- + HashAggregate + -> Append + -> Function Scan on generate_series + -> Function Scan on generate_series generate_series_1 +(4 rows) + +explain (costs off) +select from generate_series(1,5) intersect select from generate_series(1,3); + QUERY PLAN +---------------------------------------------------------------------- + HashSetOp Intersect + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Function Scan on generate_series + -> Subquery Scan on "*SELECT* 2" + -> Function Scan on generate_series generate_series_1 +(6 rows) + +select from generate_series(1,5) union select from generate_series(1,3); +-- +(1 row) + +select from generate_series(1,5) union all select from generate_series(1,3); +-- +(8 rows) + +select from generate_series(1,5) intersect select from generate_series(1,3); +-- +(1 row) + +select from generate_series(1,5) intersect all select from generate_series(1,3); +-- +(3 rows) + +select from generate_series(1,5) except select from generate_series(1,3); +-- +(0 rows) + +select from generate_series(1,5) except all select from generate_series(1,3); +-- +(2 rows) + +-- check sorted implementation +set enable_hashagg = false; +set enable_sort = true; +explain (costs off) +select from generate_series(1,5) union select from generate_series(1,3); + QUERY PLAN +---------------------------------------------------------------- + Unique + -> Append + -> Function Scan on generate_series + -> Function Scan on generate_series generate_series_1 +(4 rows) + +explain (costs off) +select from generate_series(1,5) intersect select from generate_series(1,3); + QUERY PLAN +---------------------------------------------------------------------- + SetOp Intersect + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Function Scan on generate_series + -> Subquery Scan on "*SELECT* 2" + -> Function Scan on generate_series generate_series_1 +(6 rows) + +select from generate_series(1,5) union select from generate_series(1,3); +-- +(1 row) + +select from generate_series(1,5) union all select from generate_series(1,3); +-- +(8 rows) + +select from generate_series(1,5) intersect select from generate_series(1,3); +-- +(1 row) + +select from generate_series(1,5) intersect all select from generate_series(1,3); +-- +(3 rows) + +select from generate_series(1,5) except select from generate_series(1,3); +-- +(0 rows) + +select from generate_series(1,5) except all select from generate_series(1,3); +-- +(2 rows) + +reset enable_hashagg; +reset enable_sort; -- -- Check handling of a case with unknown constants. We don't guarantee -- an undecorated constant will work in all cases, but historically this diff --git a/src/test/regress/expected/updatable_views.out b/src/test/regress/expected/updatable_views.out index 2090a411fe..e64d693e9c 100644 --- a/src/test/regress/expected/updatable_views.out +++ b/src/test/regress/expected/updatable_views.out @@ -23,8 +23,8 @@ CREATE VIEW rw_view15 AS SELECT a, upper(b) FROM base_tbl; -- Expression/functio CREATE VIEW rw_view16 AS SELECT a, b, a AS aa FROM base_tbl; -- Repeated column may be part of an updatable view CREATE VIEW ro_view17 AS SELECT * FROM ro_view1; -- Base relation not updatable CREATE VIEW ro_view18 AS SELECT * FROM (VALUES(1)) AS tmp(a); -- VALUES in rangetable -CREATE SEQUENCE seq; -CREATE VIEW ro_view19 AS SELECT * FROM seq; -- View based on a sequence +CREATE SEQUENCE uv_seq; +CREATE VIEW ro_view19 AS SELECT * FROM uv_seq; -- View based on a sequence CREATE VIEW ro_view20 AS SELECT a, b, generate_series(1, a) g FROM base_tbl; -- SRF in targetlist not supported SELECT table_name, is_insertable_into FROM information_schema.tables @@ -347,7 +347,7 @@ drop cascades to view ro_view20 drop cascades to view ro_view4 drop cascades to view rw_view14 DROP VIEW ro_view10, ro_view12, ro_view18; -DROP SEQUENCE seq CASCADE; +DROP SEQUENCE uv_seq CASCADE; NOTICE: drop cascades to view ro_view19 -- simple updatable view CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified'); @@ -990,26 +990,26 @@ SELECT * FROM rw_view2; -- ok (2 rows) INSERT INTO base_tbl VALUES (3, 'Row 3', 3.0); -- not allowed -ERROR: permission denied for relation base_tbl +ERROR: permission denied for table base_tbl INSERT INTO rw_view1 VALUES ('Row 3', 3.0, 3); -- not allowed -ERROR: permission denied for relation rw_view1 +ERROR: permission denied for view rw_view1 INSERT INTO rw_view2 VALUES ('Row 3', 3.0, 3); -- not allowed -ERROR: permission denied for relation base_tbl +ERROR: permission denied for table base_tbl UPDATE base_tbl SET a=a, c=c; -- ok UPDATE base_tbl SET b=b; -- not allowed -ERROR: permission denied for relation base_tbl +ERROR: permission denied for table base_tbl UPDATE rw_view1 SET bb=bb, cc=cc; -- ok UPDATE rw_view1 SET aa=aa; -- not allowed -ERROR: permission denied for relation rw_view1 +ERROR: permission denied for view rw_view1 UPDATE rw_view2 SET aa=aa, cc=cc; -- ok UPDATE rw_view2 SET bb=bb; -- not allowed -ERROR: permission denied for relation base_tbl +ERROR: permission denied for table base_tbl DELETE FROM base_tbl; -- not allowed -ERROR: permission denied for relation base_tbl +ERROR: permission denied for table base_tbl DELETE FROM rw_view1; -- not allowed -ERROR: permission denied for relation rw_view1 +ERROR: permission denied for view rw_view1 DELETE FROM rw_view2; -- not allowed -ERROR: permission denied for relation base_tbl +ERROR: permission denied for table base_tbl RESET SESSION AUTHORIZATION; SET SESSION AUTHORIZATION regress_view_user1; GRANT INSERT, DELETE ON base_tbl TO regress_view_user2; @@ -1017,11 +1017,11 @@ RESET SESSION AUTHORIZATION; SET SESSION AUTHORIZATION regress_view_user2; INSERT INTO base_tbl VALUES (3, 'Row 3', 3.0); -- ok INSERT INTO rw_view1 VALUES ('Row 4', 4.0, 4); -- not allowed -ERROR: permission denied for relation rw_view1 +ERROR: permission denied for view rw_view1 INSERT INTO rw_view2 VALUES ('Row 4', 4.0, 4); -- ok DELETE FROM base_tbl WHERE a=1; -- ok DELETE FROM rw_view1 WHERE aa=2; -- not allowed -ERROR: permission denied for relation rw_view1 +ERROR: permission denied for view rw_view1 DELETE FROM rw_view2 WHERE aa=2; -- ok SELECT * FROM base_tbl; a | b | c @@ -1037,15 +1037,15 @@ GRANT INSERT, DELETE ON rw_view1 TO regress_view_user2; RESET SESSION AUTHORIZATION; SET SESSION AUTHORIZATION regress_view_user2; INSERT INTO base_tbl VALUES (5, 'Row 5', 5.0); -- not allowed -ERROR: permission denied for relation base_tbl +ERROR: permission denied for table base_tbl INSERT INTO rw_view1 VALUES ('Row 5', 5.0, 5); -- ok INSERT INTO rw_view2 VALUES ('Row 6', 6.0, 6); -- not allowed -ERROR: permission denied for relation base_tbl +ERROR: permission denied for table base_tbl DELETE FROM base_tbl WHERE a=3; -- not allowed -ERROR: permission denied for relation base_tbl +ERROR: permission denied for table base_tbl DELETE FROM rw_view1 WHERE aa=3; -- ok DELETE FROM rw_view2 WHERE aa=4; -- not allowed -ERROR: permission denied for relation base_tbl +ERROR: permission denied for table base_tbl SELECT * FROM base_tbl; a | b | c ---+-------+--- @@ -1053,6 +1053,130 @@ SELECT * FROM base_tbl; 5 | Row 5 | 5 (2 rows) +RESET SESSION AUTHORIZATION; +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +-- nested-view permissions +CREATE TABLE base_tbl(a int, b text, c float); +INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0); +SET SESSION AUTHORIZATION regress_view_user1; +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl; +SELECT * FROM rw_view1; -- not allowed +ERROR: permission denied for table base_tbl +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- not allowed +ERROR: permission denied for table base_tbl +SET SESSION AUTHORIZATION regress_view_user2; +CREATE VIEW rw_view2 AS SELECT * FROM rw_view1; +SELECT * FROM rw_view2; -- not allowed +ERROR: permission denied for view rw_view1 +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed +ERROR: permission denied for view rw_view1 +RESET SESSION AUTHORIZATION; +GRANT SELECT ON base_tbl TO regress_view_user1; +SET SESSION AUTHORIZATION regress_view_user1; +SELECT * FROM rw_view1; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- not allowed +ERROR: permission denied for table base_tbl +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; -- not allowed +ERROR: permission denied for view rw_view1 +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed +ERROR: permission denied for view rw_view1 +SET SESSION AUTHORIZATION regress_view_user1; +GRANT SELECT ON rw_view1 TO regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed +ERROR: permission denied for view rw_view1 +RESET SESSION AUTHORIZATION; +GRANT UPDATE ON base_tbl TO regress_view_user1; +SET SESSION AUTHORIZATION regress_view_user1; +SELECT * FROM rw_view1; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +SELECT * FROM rw_view1 FOR UPDATE; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; + a | b | c +---+-----+--- + 1 | foo | 1 +(1 row) + +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed +ERROR: permission denied for view rw_view1 +SET SESSION AUTHORIZATION regress_view_user1; +GRANT UPDATE ON rw_view1 TO regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; + a | b | c +---+-----+--- + 1 | foo | 1 +(1 row) + +SELECT * FROM rw_view2 FOR UPDATE; + a | b | c +---+-----+--- + 1 | foo | 1 +(1 row) + +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; +RESET SESSION AUTHORIZATION; +REVOKE UPDATE ON base_tbl FROM regress_view_user1; +SET SESSION AUTHORIZATION regress_view_user1; +SELECT * FROM rw_view1; + a | b | c +---+-----+--- + 1 | bar | 1 +(1 row) + +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- not allowed +ERROR: permission denied for table base_tbl +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; + a | b | c +---+-----+--- + 1 | bar | 1 +(1 row) + +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed +ERROR: permission denied for table base_tbl RESET SESSION AUTHORIZATION; DROP TABLE base_tbl CASCADE; NOTICE: drop cascades to 2 other objects @@ -2368,66 +2492,66 @@ DETAIL: Failing row contains (-1, invalid). DROP VIEW v1; DROP TABLE t1; -- check that an auto-updatable view on a partitioned table works correctly -create table pt (a int, b int, v varchar) partition by range (a, b); -create table pt1 (b int not null, v varchar, a int not null) partition by range (b); -create table pt11 (like pt1); -alter table pt11 drop a; -alter table pt11 add a int; -alter table pt11 drop a; -alter table pt11 add a int not null; -alter table pt1 attach partition pt11 for values from (2) to (5); -alter table pt attach partition pt1 for values from (1, 2) to (1, 10); -create view ptv as select * from pt; +create table uv_pt (a int, b int, v varchar) partition by range (a, b); +create table uv_pt1 (b int not null, v varchar, a int not null) partition by range (b); +create table uv_pt11 (like uv_pt1); +alter table uv_pt11 drop a; +alter table uv_pt11 add a int; +alter table uv_pt11 drop a; +alter table uv_pt11 add a int not null; +alter table uv_pt1 attach partition uv_pt11 for values from (2) to (5); +alter table uv_pt attach partition uv_pt1 for values from (1, 2) to (1, 10); +create view uv_ptv as select * from uv_pt; select events & 4 != 0 AS upd, events & 8 != 0 AS ins, events & 16 != 0 AS del - from pg_catalog.pg_relation_is_updatable('pt'::regclass, false) t(events); + from pg_catalog.pg_relation_is_updatable('uv_pt'::regclass, false) t(events); upd | ins | del -----+-----+----- t | t | t (1 row) -select pg_catalog.pg_column_is_updatable('pt'::regclass, 1::smallint, false); +select pg_catalog.pg_column_is_updatable('uv_pt'::regclass, 1::smallint, false); pg_column_is_updatable ------------------------ t (1 row) -select pg_catalog.pg_column_is_updatable('pt'::regclass, 2::smallint, false); +select pg_catalog.pg_column_is_updatable('uv_pt'::regclass, 2::smallint, false); pg_column_is_updatable ------------------------ t (1 row) select table_name, is_updatable, is_insertable_into - from information_schema.views where table_name = 'ptv'; + from information_schema.views where table_name = 'uv_ptv'; table_name | is_updatable | is_insertable_into ------------+--------------+-------------------- - ptv | YES | YES + uv_ptv | YES | YES (1 row) select table_name, column_name, is_updatable - from information_schema.columns where table_name = 'ptv' order by column_name; + from information_schema.columns where table_name = 'uv_ptv' order by column_name; table_name | column_name | is_updatable ------------+-------------+-------------- - ptv | a | YES - ptv | b | YES - ptv | v | YES + uv_ptv | a | YES + uv_ptv | b | YES + uv_ptv | v | YES (3 rows) -insert into ptv values (1, 2); -select tableoid::regclass, * from pt; +insert into uv_ptv values (1, 2); +select tableoid::regclass, * from uv_pt; tableoid | a | b | v ----------+---+---+--- - pt11 | 1 | 2 | + uv_pt11 | 1 | 2 | (1 row) -create view ptv_wco as select * from pt where a = 0 with check option; -insert into ptv_wco values (1, 2); -ERROR: new row violates check option for view "ptv_wco" +create view uv_ptv_wco as select * from uv_pt where a = 0 with check option; +insert into uv_ptv_wco values (1, 2); +ERROR: new row violates check option for view "uv_ptv_wco" DETAIL: Failing row contains (1, 2, null). -drop view ptv, ptv_wco; -drop table pt, pt1, pt11; +drop view uv_ptv, uv_ptv_wco; +drop table uv_pt, uv_pt1, uv_pt11; -- check that wholerow vars appearing in WITH CHECK OPTION constraint expressions -- work fine with partitioned tables create table wcowrtest (a int) partition by list (a); @@ -2454,3 +2578,197 @@ ERROR: new row violates check option for view "wcowrtest_v2" DETAIL: Failing row contains (2, no such row in sometable). drop view wcowrtest_v, wcowrtest_v2; drop table wcowrtest, sometable; +-- Check INSERT .. ON CONFLICT DO UPDATE works correctly when the view's +-- columns are named and ordered differently than the underlying table's. +create table uv_iocu_tab (a text unique, b float); +insert into uv_iocu_tab values ('xyxyxy', 0); +create view uv_iocu_view as + select b, b+1 as c, a, '2.0'::text as two from uv_iocu_tab; +insert into uv_iocu_view (a, b) values ('xyxyxy', 1) + on conflict (a) do update set b = uv_iocu_view.b; +select * from uv_iocu_tab; + a | b +--------+--- + xyxyxy | 0 +(1 row) + +insert into uv_iocu_view (a, b) values ('xyxyxy', 1) + on conflict (a) do update set b = excluded.b; +select * from uv_iocu_tab; + a | b +--------+--- + xyxyxy | 1 +(1 row) + +-- OK to access view columns that are not present in underlying base +-- relation in the ON CONFLICT portion of the query +insert into uv_iocu_view (a, b) values ('xyxyxy', 3) + on conflict (a) do update set b = cast(excluded.two as float); +select * from uv_iocu_tab; + a | b +--------+--- + xyxyxy | 2 +(1 row) + +explain (costs off) +insert into uv_iocu_view (a, b) values ('xyxyxy', 3) + on conflict (a) do update set b = excluded.b where excluded.c > 0; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on uv_iocu_tab + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: uv_iocu_tab_a_key + Conflict Filter: ((excluded.b + '1'::double precision) > '0'::double precision) + -> Result +(5 rows) + +insert into uv_iocu_view (a, b) values ('xyxyxy', 3) + on conflict (a) do update set b = excluded.b where excluded.c > 0; +select * from uv_iocu_tab; + a | b +--------+--- + xyxyxy | 3 +(1 row) + +drop view uv_iocu_view; +drop table uv_iocu_tab; +-- Test whole-row references to the view +create table uv_iocu_tab (a int unique, b text); +create view uv_iocu_view as + select b as bb, a as aa, uv_iocu_tab::text as cc from uv_iocu_tab; +insert into uv_iocu_view (aa,bb) values (1,'x'); +explain (costs off) +insert into uv_iocu_view (aa,bb) values (1,'y') + on conflict (aa) do update set bb = 'Rejected: '||excluded.* + where excluded.aa > 0 + and excluded.bb != '' + and excluded.cc is not null; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Insert on uv_iocu_tab + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: uv_iocu_tab_a_key + Conflict Filter: ((excluded.a > 0) AND (excluded.b <> ''::text) AND ((excluded.*)::text IS NOT NULL)) + -> Result +(5 rows) + +insert into uv_iocu_view (aa,bb) values (1,'y') + on conflict (aa) do update set bb = 'Rejected: '||excluded.* + where excluded.aa > 0 + and excluded.bb != '' + and excluded.cc is not null; +select * from uv_iocu_view; + bb | aa | cc +-------------------------+----+--------------------------------- + Rejected: (y,1,"(1,y)") | 1 | (1,"Rejected: (y,1,""(1,y)"")") +(1 row) + +-- Test omitting a column of the base relation +delete from uv_iocu_view; +insert into uv_iocu_view (aa,bb) values (1,'x'); +insert into uv_iocu_view (aa) values (1) + on conflict (aa) do update set bb = 'Rejected: '||excluded.*; +select * from uv_iocu_view; + bb | aa | cc +-----------------------+----+------------------------------- + Rejected: (,1,"(1,)") | 1 | (1,"Rejected: (,1,""(1,)"")") +(1 row) + +alter table uv_iocu_tab alter column b set default 'table default'; +insert into uv_iocu_view (aa) values (1) + on conflict (aa) do update set bb = 'Rejected: '||excluded.*; +select * from uv_iocu_view; + bb | aa | cc +-------------------------------------------------------+----+--------------------------------------------------------------------- + Rejected: ("table default",1,"(1,""table default"")") | 1 | (1,"Rejected: (""table default"",1,""(1,""""table default"""")"")") +(1 row) + +alter view uv_iocu_view alter column bb set default 'view default'; +insert into uv_iocu_view (aa) values (1) + on conflict (aa) do update set bb = 'Rejected: '||excluded.*; +select * from uv_iocu_view; + bb | aa | cc +-----------------------------------------------------+----+------------------------------------------------------------------- + Rejected: ("view default",1,"(1,""view default"")") | 1 | (1,"Rejected: (""view default"",1,""(1,""""view default"""")"")") +(1 row) + +-- Should fail to update non-updatable columns +insert into uv_iocu_view (aa) values (1) + on conflict (aa) do update set cc = 'XXX'; +ERROR: cannot insert into column "cc" of view "uv_iocu_view" +DETAIL: View columns that are not columns of their base relation are not updatable. +drop view uv_iocu_view; +drop table uv_iocu_tab; +-- ON CONFLICT DO UPDATE permissions checks +create user regress_view_user1; +create user regress_view_user2; +set session authorization regress_view_user1; +create table base_tbl(a int unique, b text, c float); +insert into base_tbl values (1,'xxx',1.0); +create view rw_view1 as select b as bb, c as cc, a as aa from base_tbl; +grant select (aa,bb) on rw_view1 to regress_view_user2; +grant insert on rw_view1 to regress_view_user2; +grant update (bb) on rw_view1 to regress_view_user2; +set session authorization regress_view_user2; +insert into rw_view1 values ('yyy',2.0,1) + on conflict (aa) do update set bb = excluded.cc; -- Not allowed +ERROR: permission denied for view rw_view1 +insert into rw_view1 values ('yyy',2.0,1) + on conflict (aa) do update set bb = rw_view1.cc; -- Not allowed +ERROR: permission denied for view rw_view1 +insert into rw_view1 values ('yyy',2.0,1) + on conflict (aa) do update set bb = excluded.bb; -- OK +insert into rw_view1 values ('zzz',2.0,1) + on conflict (aa) do update set bb = rw_view1.bb||'xxx'; -- OK +insert into rw_view1 values ('zzz',2.0,1) + on conflict (aa) do update set cc = 3.0; -- Not allowed +ERROR: permission denied for view rw_view1 +reset session authorization; +select * from base_tbl; + a | b | c +---+--------+--- + 1 | yyyxxx | 1 +(1 row) + +set session authorization regress_view_user1; +grant select (a,b) on base_tbl to regress_view_user2; +grant insert (a,b) on base_tbl to regress_view_user2; +grant update (a,b) on base_tbl to regress_view_user2; +set session authorization regress_view_user2; +create view rw_view2 as select b as bb, c as cc, a as aa from base_tbl; +insert into rw_view2 (aa,bb) values (1,'xxx') + on conflict (aa) do update set bb = excluded.bb; -- Not allowed +ERROR: permission denied for table base_tbl +create view rw_view3 as select b as bb, a as aa from base_tbl; +insert into rw_view3 (aa,bb) values (1,'xxx') + on conflict (aa) do update set bb = excluded.bb; -- OK +reset session authorization; +select * from base_tbl; + a | b | c +---+-----+--- + 1 | xxx | 1 +(1 row) + +set session authorization regress_view_user2; +create view rw_view4 as select aa, bb, cc FROM rw_view1; +insert into rw_view4 (aa,bb) values (1,'yyy') + on conflict (aa) do update set bb = excluded.bb; -- Not allowed +ERROR: permission denied for view rw_view1 +create view rw_view5 as select aa, bb FROM rw_view1; +insert into rw_view5 (aa,bb) values (1,'yyy') + on conflict (aa) do update set bb = excluded.bb; -- OK +reset session authorization; +select * from base_tbl; + a | b | c +---+-----+--- + 1 | yyy | 1 +(1 row) + +drop view rw_view5; +drop view rw_view4; +drop view rw_view3; +drop view rw_view2; +drop view rw_view1; +drop table base_tbl; +drop user regress_view_user1; +drop user regress_view_user2; diff --git a/src/test/regress/expected/update.out b/src/test/regress/expected/update.out index 9366f04255..d09326c182 100644 --- a/src/test/regress/expected/update.out +++ b/src/test/regress/expected/update.out @@ -198,25 +198,694 @@ INSERT INTO upsert_test VALUES (1, 'Bat') ON CONFLICT(a) DROP TABLE update_test; DROP TABLE upsert_test; --- update to a partition should check partition bound constraint for the new tuple -create table range_parted ( +--------------------------- +-- UPDATE with row movement +--------------------------- +-- When a partitioned table receives an UPDATE to the partitioned key and the +-- new values no longer meet the partition's bound, the row must be moved to +-- the correct partition for the new partition key (if one exists). We must +-- also ensure that updatable views on partitioned tables properly enforce any +-- WITH CHECK OPTION that is defined. The situation with triggers in this case +-- also requires thorough testing as partition key updates causing row +-- movement convert UPDATEs into DELETE+INSERT. +CREATE TABLE range_parted ( a text, + b bigint, + c numeric, + d int, + e varchar +) PARTITION BY RANGE (a, b); +-- Create partitions intentionally in descending bound order, so as to test +-- that update-row-movement works with the leaf partitions not in bound order. +CREATE TABLE part_b_20_b_30 (e varchar, c numeric, a text, b bigint, d int); +ALTER TABLE range_parted ATTACH PARTITION part_b_20_b_30 FOR VALUES FROM ('b', 20) TO ('b', 30); +CREATE TABLE part_b_10_b_20 (e varchar, c numeric, a text, b bigint, d int) PARTITION BY RANGE (c); +CREATE TABLE part_b_1_b_10 PARTITION OF range_parted FOR VALUES FROM ('b', 1) TO ('b', 10); +ALTER TABLE range_parted ATTACH PARTITION part_b_10_b_20 FOR VALUES FROM ('b', 10) TO ('b', 20); +CREATE TABLE part_a_10_a_20 PARTITION OF range_parted FOR VALUES FROM ('a', 10) TO ('a', 20); +CREATE TABLE part_a_1_a_10 PARTITION OF range_parted FOR VALUES FROM ('a', 1) TO ('a', 10); +-- Check that partition-key UPDATE works sanely on a partitioned table that +-- does not have any child partitions. +UPDATE part_b_10_b_20 set b = b - 6; +-- Create some more partitions following the above pattern of descending bound +-- order, but let's make the situation a bit more complex by having the +-- attribute numbers of the columns vary from their parent partition. +CREATE TABLE part_c_100_200 (e varchar, c numeric, a text, b bigint, d int) PARTITION BY range (abs(d)); +ALTER TABLE part_c_100_200 DROP COLUMN e, DROP COLUMN c, DROP COLUMN a; +ALTER TABLE part_c_100_200 ADD COLUMN c numeric, ADD COLUMN e varchar, ADD COLUMN a text; +ALTER TABLE part_c_100_200 DROP COLUMN b; +ALTER TABLE part_c_100_200 ADD COLUMN b bigint; +CREATE TABLE part_d_1_15 PARTITION OF part_c_100_200 FOR VALUES FROM (1) TO (15); +CREATE TABLE part_d_15_20 PARTITION OF part_c_100_200 FOR VALUES FROM (15) TO (20); +ALTER TABLE part_b_10_b_20 ATTACH PARTITION part_c_100_200 FOR VALUES FROM (100) TO (200); +CREATE TABLE part_c_1_100 (e varchar, d int, c numeric, b bigint, a text); +ALTER TABLE part_b_10_b_20 ATTACH PARTITION part_c_1_100 FOR VALUES FROM (1) TO (100); +\set init_range_parted 'truncate range_parted; insert into range_parted VALUES (''a'', 1, 1, 1), (''a'', 10, 200, 1), (''b'', 12, 96, 1), (''b'', 13, 97, 2), (''b'', 15, 105, 16), (''b'', 17, 105, 19)' +\set show_data 'select tableoid::regclass::text COLLATE "C" partname, * from range_parted ORDER BY 1, 2, 3, 4, 5, 6' +:init_range_parted; +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_d_15_20 | b | 15 | 105 | 16 | + part_d_15_20 | b | 17 | 105 | 19 | +(6 rows) + +-- The order of subplans should be in bound order +EXPLAIN (costs off) UPDATE range_parted set c = c - 50 WHERE c > 97; + QUERY PLAN +------------------------------------- + Update on range_parted + Update on part_a_1_a_10 + Update on part_a_10_a_20 + Update on part_b_1_b_10 + Update on part_c_1_100 + Update on part_d_1_15 + Update on part_d_15_20 + Update on part_b_20_b_30 + -> Seq Scan on part_a_1_a_10 + Filter: (c > '97'::numeric) + -> Seq Scan on part_a_10_a_20 + Filter: (c > '97'::numeric) + -> Seq Scan on part_b_1_b_10 + Filter: (c > '97'::numeric) + -> Seq Scan on part_c_1_100 + Filter: (c > '97'::numeric) + -> Seq Scan on part_d_1_15 + Filter: (c > '97'::numeric) + -> Seq Scan on part_d_15_20 + Filter: (c > '97'::numeric) + -> Seq Scan on part_b_20_b_30 + Filter: (c > '97'::numeric) +(22 rows) + +-- fail, row movement happens only within the partition subtree. +UPDATE part_c_100_200 set c = c - 20, d = c WHERE c = 105; +ERROR: new row for relation "part_c_100_200" violates partition constraint +DETAIL: Failing row contains (105, 85, null, b, 15). +-- fail, no partition key update, so no attempt to move tuple, +-- but "a = 'a'" violates partition constraint enforced by root partition) +UPDATE part_b_10_b_20 set a = 'a'; +ERROR: new row for relation "part_c_1_100" violates partition constraint +DETAIL: Failing row contains (null, 1, 96, 12, a). +-- ok, partition key update, no constraint violation +UPDATE range_parted set d = d - 10 WHERE d > 10; +-- ok, no partition key update, no constraint violation +UPDATE range_parted set e = d; +-- No row found +UPDATE part_c_1_100 set c = c + 20 WHERE c = 98; +-- ok, row movement +UPDATE part_b_10_b_20 set c = c + 20 returning c, b, a; + c | b | a +-----+----+--- + 116 | 12 | b + 117 | 13 | b + 125 | 15 | b + 125 | 17 | b +(4 rows) + +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+---+--- + part_a_10_a_20 | a | 10 | 200 | 1 | 1 + part_a_1_a_10 | a | 1 | 1 | 1 | 1 + part_d_1_15 | b | 12 | 116 | 1 | 1 + part_d_1_15 | b | 13 | 117 | 2 | 2 + part_d_1_15 | b | 15 | 125 | 6 | 6 + part_d_1_15 | b | 17 | 125 | 9 | 9 +(6 rows) + +-- fail, row movement happens only within the partition subtree. +UPDATE part_b_10_b_20 set b = b - 6 WHERE c > 116 returning *; +ERROR: new row for relation "part_d_1_15" violates partition constraint +DETAIL: Failing row contains (2, 117, 2, b, 7). +-- ok, row movement, with subset of rows moved into different partition. +UPDATE range_parted set b = b - 6 WHERE c > 116 returning a, b + c; + a | ?column? +---+---------- + a | 204 + b | 124 + b | 134 + b | 136 +(4 rows) + +:show_data; + partname | a | b | c | d | e +---------------+---+----+-----+---+--- + part_a_1_a_10 | a | 1 | 1 | 1 | 1 + part_a_1_a_10 | a | 4 | 200 | 1 | 1 + part_b_1_b_10 | b | 7 | 117 | 2 | 2 + part_b_1_b_10 | b | 9 | 125 | 6 | 6 + part_d_1_15 | b | 11 | 125 | 9 | 9 + part_d_1_15 | b | 12 | 116 | 1 | 1 +(6 rows) + +-- Common table needed for multiple test scenarios. +CREATE TABLE mintab(c1 int); +INSERT into mintab VALUES (120); +-- update partition key using updatable view. +CREATE VIEW upview AS SELECT * FROM range_parted WHERE (select c > c1 FROM mintab) WITH CHECK OPTION; +-- ok +UPDATE upview set c = 199 WHERE b = 4; +-- fail, check option violation +UPDATE upview set c = 120 WHERE b = 4; +ERROR: new row violates check option for view "upview" +DETAIL: Failing row contains (a, 4, 120, 1, 1). +-- fail, row movement with check option violation +UPDATE upview set a = 'b', b = 15, c = 120 WHERE b = 4; +ERROR: new row violates check option for view "upview" +DETAIL: Failing row contains (b, 15, 120, 1, 1). +-- ok, row movement, check option passes +UPDATE upview set a = 'b', b = 15 WHERE b = 4; +:show_data; + partname | a | b | c | d | e +---------------+---+----+-----+---+--- + part_a_1_a_10 | a | 1 | 1 | 1 | 1 + part_b_1_b_10 | b | 7 | 117 | 2 | 2 + part_b_1_b_10 | b | 9 | 125 | 6 | 6 + part_d_1_15 | b | 11 | 125 | 9 | 9 + part_d_1_15 | b | 12 | 116 | 1 | 1 + part_d_1_15 | b | 15 | 199 | 1 | 1 +(6 rows) + +-- cleanup +DROP VIEW upview; +-- RETURNING having whole-row vars. +:init_range_parted; +UPDATE range_parted set c = 95 WHERE a = 'b' and b > 10 and c > 100 returning (range_parted), *; + range_parted | a | b | c | d | e +---------------+---+----+----+----+--- + (b,15,95,16,) | b | 15 | 95 | 16 | + (b,17,95,19,) | b | 17 | 95 | 19 | +(2 rows) + +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_c_1_100 | b | 15 | 95 | 16 | + part_c_1_100 | b | 17 | 95 | 19 | +(6 rows) + +-- Transition tables with update row movement +:init_range_parted; +CREATE FUNCTION trans_updatetrigfunc() RETURNS trigger LANGUAGE plpgsql AS +$$ + begin + raise notice 'trigger = %, old table = %, new table = %', + TG_NAME, + (select string_agg(old_table::text, ', ' ORDER BY a) FROM old_table), + (select string_agg(new_table::text, ', ' ORDER BY a) FROM new_table); + return null; + end; +$$; +CREATE TRIGGER trans_updatetrig + AFTER UPDATE ON range_parted REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); +UPDATE range_parted set c = (case when c = 96 then 110 else c + 1 end ) WHERE a = 'b' and b > 10 and c >= 96; +NOTICE: trigger = trans_updatetrig, old table = (b,12,96,1,), (b,13,97,2,), (b,15,105,16,), (b,17,105,19,), new table = (b,12,110,1,), (b,13,98,2,), (b,15,106,16,), (b,17,106,19,) +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 13 | 98 | 2 | + part_d_15_20 | b | 15 | 106 | 16 | + part_d_15_20 | b | 17 | 106 | 19 | + part_d_1_15 | b | 12 | 110 | 1 | +(6 rows) + +:init_range_parted; +-- Enabling OLD TABLE capture for both DELETE as well as UPDATE stmt triggers +-- should not cause DELETEd rows to be captured twice. Similar thing for +-- INSERT triggers and inserted rows. +CREATE TRIGGER trans_deletetrig + AFTER DELETE ON range_parted REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); +CREATE TRIGGER trans_inserttrig + AFTER INSERT ON range_parted REFERENCING NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); +UPDATE range_parted set c = c + 50 WHERE a = 'b' and b > 10 and c >= 96; +NOTICE: trigger = trans_updatetrig, old table = (b,12,96,1,), (b,13,97,2,), (b,15,105,16,), (b,17,105,19,), new table = (b,12,146,1,), (b,13,147,2,), (b,15,155,16,), (b,17,155,19,) +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_d_15_20 | b | 15 | 155 | 16 | + part_d_15_20 | b | 17 | 155 | 19 | + part_d_1_15 | b | 12 | 146 | 1 | + part_d_1_15 | b | 13 | 147 | 2 | +(6 rows) + +DROP TRIGGER trans_deletetrig ON range_parted; +DROP TRIGGER trans_inserttrig ON range_parted; +-- Don't drop trans_updatetrig yet. It is required below. +-- Test with transition tuple conversion happening for rows moved into the +-- new partition. This requires a trigger that references transition table +-- (we already have trans_updatetrig). For inserted rows, the conversion +-- is not usually needed, because the original tuple is already compatible with +-- the desired transition tuple format. But conversion happens when there is a +-- BR trigger because the trigger can change the inserted row. So install a +-- BR triggers on those child partitions where the rows will be moved. +CREATE FUNCTION func_parted_mod_b() RETURNS trigger AS $$ +BEGIN + NEW.b = NEW.b + 1; + return NEW; +END $$ language plpgsql; +CREATE TRIGGER trig_c1_100 BEFORE UPDATE OR INSERT ON part_c_1_100 + FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); +CREATE TRIGGER trig_d1_15 BEFORE UPDATE OR INSERT ON part_d_1_15 + FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); +CREATE TRIGGER trig_d15_20 BEFORE UPDATE OR INSERT ON part_d_15_20 + FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); +:init_range_parted; +UPDATE range_parted set c = (case when c = 96 then 110 else c + 1 end) WHERE a = 'b' and b > 10 and c >= 96; +NOTICE: trigger = trans_updatetrig, old table = (b,13,96,1,), (b,14,97,2,), (b,16,105,16,), (b,18,105,19,), new table = (b,15,110,1,), (b,15,98,2,), (b,17,106,16,), (b,19,106,19,) +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 15 | 98 | 2 | + part_d_15_20 | b | 17 | 106 | 16 | + part_d_15_20 | b | 19 | 106 | 19 | + part_d_1_15 | b | 15 | 110 | 1 | +(6 rows) + +:init_range_parted; +UPDATE range_parted set c = c + 50 WHERE a = 'b' and b > 10 and c >= 96; +NOTICE: trigger = trans_updatetrig, old table = (b,13,96,1,), (b,14,97,2,), (b,16,105,16,), (b,18,105,19,), new table = (b,15,146,1,), (b,16,147,2,), (b,17,155,16,), (b,19,155,19,) +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_d_15_20 | b | 17 | 155 | 16 | + part_d_15_20 | b | 19 | 155 | 19 | + part_d_1_15 | b | 15 | 146 | 1 | + part_d_1_15 | b | 16 | 147 | 2 | +(6 rows) + +-- Case where per-partition tuple conversion map array is allocated, but the +-- map is not required for the particular tuple that is routed, thanks to +-- matching table attributes of the partition and the target table. +:init_range_parted; +UPDATE range_parted set b = 15 WHERE b = 1; +NOTICE: trigger = trans_updatetrig, old table = (a,1,1,1,), new table = (a,15,1,1,) +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_10_a_20 | a | 15 | 1 | 1 | + part_c_1_100 | b | 13 | 96 | 1 | + part_c_1_100 | b | 14 | 97 | 2 | + part_d_15_20 | b | 16 | 105 | 16 | + part_d_15_20 | b | 18 | 105 | 19 | +(6 rows) + +DROP TRIGGER trans_updatetrig ON range_parted; +DROP TRIGGER trig_c1_100 ON part_c_1_100; +DROP TRIGGER trig_d1_15 ON part_d_1_15; +DROP TRIGGER trig_d15_20 ON part_d_15_20; +DROP FUNCTION func_parted_mod_b(); +-- RLS policies with update-row-movement +----------------------------------------- +ALTER TABLE range_parted ENABLE ROW LEVEL SECURITY; +CREATE USER regress_range_parted_user; +GRANT ALL ON range_parted, mintab TO regress_range_parted_user; +CREATE POLICY seeall ON range_parted AS PERMISSIVE FOR SELECT USING (true); +CREATE POLICY policy_range_parted ON range_parted for UPDATE USING (true) WITH CHECK (c % 2 = 0); +:init_range_parted; +SET SESSION AUTHORIZATION regress_range_parted_user; +-- This should fail with RLS violation error while moving row from +-- part_a_10_a_20 to part_d_1_15, because we are setting 'c' to an odd number. +UPDATE range_parted set a = 'b', c = 151 WHERE a = 'a' and c = 200; +ERROR: new row violates row-level security policy for table "range_parted" +RESET SESSION AUTHORIZATION; +-- Create a trigger on part_d_1_15 +CREATE FUNCTION func_d_1_15() RETURNS trigger AS $$ +BEGIN + NEW.c = NEW.c + 1; -- Make even numbers odd, or vice versa + return NEW; +END $$ LANGUAGE plpgsql; +CREATE TRIGGER trig_d_1_15 BEFORE INSERT ON part_d_1_15 + FOR EACH ROW EXECUTE PROCEDURE func_d_1_15(); +:init_range_parted; +SET SESSION AUTHORIZATION regress_range_parted_user; +-- Here, RLS checks should succeed while moving row from part_a_10_a_20 to +-- part_d_1_15. Even though the UPDATE is setting 'c' to an odd number, the +-- trigger at the destination partition again makes it an even number. +UPDATE range_parted set a = 'b', c = 151 WHERE a = 'a' and c = 200; +RESET SESSION AUTHORIZATION; +:init_range_parted; +SET SESSION AUTHORIZATION regress_range_parted_user; +-- This should fail with RLS violation error. Even though the UPDATE is setting +-- 'c' to an even number, the trigger at the destination partition again makes +-- it an odd number. +UPDATE range_parted set a = 'b', c = 150 WHERE a = 'a' and c = 200; +ERROR: new row violates row-level security policy for table "range_parted" +-- Cleanup +RESET SESSION AUTHORIZATION; +DROP TRIGGER trig_d_1_15 ON part_d_1_15; +DROP FUNCTION func_d_1_15(); +-- Policy expression contains SubPlan +RESET SESSION AUTHORIZATION; +:init_range_parted; +CREATE POLICY policy_range_parted_subplan on range_parted + AS RESTRICTIVE for UPDATE USING (true) + WITH CHECK ((SELECT range_parted.c <= c1 FROM mintab)); +SET SESSION AUTHORIZATION regress_range_parted_user; +-- fail, mintab has row with c1 = 120 +UPDATE range_parted set a = 'b', c = 122 WHERE a = 'a' and c = 200; +ERROR: new row violates row-level security policy "policy_range_parted_subplan" for table "range_parted" +-- ok +UPDATE range_parted set a = 'b', c = 120 WHERE a = 'a' and c = 200; +-- RLS policy expression contains whole row. +RESET SESSION AUTHORIZATION; +:init_range_parted; +CREATE POLICY policy_range_parted_wholerow on range_parted AS RESTRICTIVE for UPDATE USING (true) + WITH CHECK (range_parted = row('b', 10, 112, 1, NULL)::range_parted); +SET SESSION AUTHORIZATION regress_range_parted_user; +-- ok, should pass the RLS check +UPDATE range_parted set a = 'b', c = 112 WHERE a = 'a' and c = 200; +RESET SESSION AUTHORIZATION; +:init_range_parted; +SET SESSION AUTHORIZATION regress_range_parted_user; +-- fail, the whole row RLS check should fail +UPDATE range_parted set a = 'b', c = 116 WHERE a = 'a' and c = 200; +ERROR: new row violates row-level security policy "policy_range_parted_wholerow" for table "range_parted" +-- Cleanup +RESET SESSION AUTHORIZATION; +DROP POLICY policy_range_parted ON range_parted; +DROP POLICY policy_range_parted_subplan ON range_parted; +DROP POLICY policy_range_parted_wholerow ON range_parted; +REVOKE ALL ON range_parted, mintab FROM regress_range_parted_user; +DROP USER regress_range_parted_user; +DROP TABLE mintab; +-- statement triggers with update row movement +--------------------------------------------------- +:init_range_parted; +CREATE FUNCTION trigfunc() returns trigger language plpgsql as +$$ + begin + raise notice 'trigger = % fired on table % during %', + TG_NAME, TG_TABLE_NAME, TG_OP; + return null; + end; +$$; +-- Triggers on root partition +CREATE TRIGGER parent_delete_trig + AFTER DELETE ON range_parted for each statement execute procedure trigfunc(); +CREATE TRIGGER parent_update_trig + AFTER UPDATE ON range_parted for each statement execute procedure trigfunc(); +CREATE TRIGGER parent_insert_trig + AFTER INSERT ON range_parted for each statement execute procedure trigfunc(); +-- Triggers on leaf partition part_c_1_100 +CREATE TRIGGER c1_delete_trig + AFTER DELETE ON part_c_1_100 for each statement execute procedure trigfunc(); +CREATE TRIGGER c1_update_trig + AFTER UPDATE ON part_c_1_100 for each statement execute procedure trigfunc(); +CREATE TRIGGER c1_insert_trig + AFTER INSERT ON part_c_1_100 for each statement execute procedure trigfunc(); +-- Triggers on leaf partition part_d_1_15 +CREATE TRIGGER d1_delete_trig + AFTER DELETE ON part_d_1_15 for each statement execute procedure trigfunc(); +CREATE TRIGGER d1_update_trig + AFTER UPDATE ON part_d_1_15 for each statement execute procedure trigfunc(); +CREATE TRIGGER d1_insert_trig + AFTER INSERT ON part_d_1_15 for each statement execute procedure trigfunc(); +-- Triggers on leaf partition part_d_15_20 +CREATE TRIGGER d15_delete_trig + AFTER DELETE ON part_d_15_20 for each statement execute procedure trigfunc(); +CREATE TRIGGER d15_update_trig + AFTER UPDATE ON part_d_15_20 for each statement execute procedure trigfunc(); +CREATE TRIGGER d15_insert_trig + AFTER INSERT ON part_d_15_20 for each statement execute procedure trigfunc(); +-- Move all rows from part_c_100_200 to part_c_1_100. None of the delete or +-- insert statement triggers should be fired. +UPDATE range_parted set c = c - 50 WHERE c > 97; +NOTICE: trigger = parent_update_trig fired on table range_parted during UPDATE +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 150 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_c_1_100 | b | 15 | 55 | 16 | + part_c_1_100 | b | 17 | 55 | 19 | +(6 rows) + +DROP TRIGGER parent_delete_trig ON range_parted; +DROP TRIGGER parent_update_trig ON range_parted; +DROP TRIGGER parent_insert_trig ON range_parted; +DROP TRIGGER c1_delete_trig ON part_c_1_100; +DROP TRIGGER c1_update_trig ON part_c_1_100; +DROP TRIGGER c1_insert_trig ON part_c_1_100; +DROP TRIGGER d1_delete_trig ON part_d_1_15; +DROP TRIGGER d1_update_trig ON part_d_1_15; +DROP TRIGGER d1_insert_trig ON part_d_1_15; +DROP TRIGGER d15_delete_trig ON part_d_15_20; +DROP TRIGGER d15_update_trig ON part_d_15_20; +DROP TRIGGER d15_insert_trig ON part_d_15_20; +-- Creating default partition for range +:init_range_parted; +create table part_def partition of range_parted default; +\d+ part_def + Table "public.part_def" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+-------------------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | bigint | | | | plain | | + c | numeric | | | | main | | + d | integer | | | | plain | | + e | character varying | | | | extended | | +Partition of: range_parted DEFAULT +Partition constraint: (NOT ((a IS NOT NULL) AND (b IS NOT NULL) AND (((a = 'a'::text) AND (b >= '1'::bigint) AND (b < '10'::bigint)) OR ((a = 'a'::text) AND (b >= '10'::bigint) AND (b < '20'::bigint)) OR ((a = 'b'::text) AND (b >= '1'::bigint) AND (b < '10'::bigint)) OR ((a = 'b'::text) AND (b >= '10'::bigint) AND (b < '20'::bigint)) OR ((a = 'b'::text) AND (b >= '20'::bigint) AND (b < '30'::bigint))))) + +insert into range_parted values ('c', 9); +-- ok +update part_def set a = 'd' where a = 'c'; +-- fail +update part_def set a = 'a' where a = 'd'; +ERROR: new row for relation "part_def" violates partition constraint +DETAIL: Failing row contains (a, 9, null, null, null). +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_d_15_20 | b | 15 | 105 | 16 | + part_d_15_20 | b | 17 | 105 | 19 | + part_def | d | 9 | | | +(7 rows) + +-- Update row movement from non-default to default partition. +-- fail, default partition is not under part_a_10_a_20; +UPDATE part_a_10_a_20 set a = 'ad' WHERE a = 'a'; +ERROR: new row for relation "part_a_10_a_20" violates partition constraint +DETAIL: Failing row contains (ad, 10, 200, 1, null). +-- ok +UPDATE range_parted set a = 'ad' WHERE a = 'a'; +UPDATE range_parted set a = 'bd' WHERE a = 'b'; +:show_data; + partname | a | b | c | d | e +----------+----+----+-----+----+--- + part_def | ad | 1 | 1 | 1 | + part_def | ad | 10 | 200 | 1 | + part_def | bd | 12 | 96 | 1 | + part_def | bd | 13 | 97 | 2 | + part_def | bd | 15 | 105 | 16 | + part_def | bd | 17 | 105 | 19 | + part_def | d | 9 | | | +(7 rows) + +-- Update row movement from default to non-default partitions. +-- ok +UPDATE range_parted set a = 'a' WHERE a = 'ad'; +UPDATE range_parted set a = 'b' WHERE a = 'bd'; +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_d_15_20 | b | 15 | 105 | 16 | + part_d_15_20 | b | 17 | 105 | 19 | + part_def | d | 9 | | | +(7 rows) + +-- Cleanup: range_parted no longer needed. +DROP TABLE range_parted; +CREATE TABLE list_parted ( + a text, + b int +) PARTITION BY list (a); +CREATE TABLE list_part1 PARTITION OF list_parted for VALUES in ('a', 'b'); +CREATE TABLE list_default PARTITION OF list_parted default; +INSERT into list_part1 VALUES ('a', 1); +INSERT into list_default VALUES ('d', 10); +-- fail +UPDATE list_default set a = 'a' WHERE a = 'd'; +ERROR: new row for relation "list_default" violates partition constraint +DETAIL: Failing row contains (a, 10). +-- ok +UPDATE list_default set a = 'x' WHERE a = 'd'; +DROP TABLE list_parted; +-------------- +-- Some more update-partition-key test scenarios below. This time use list +-- partitions. +-------------- +-- Setup for list partitions +CREATE TABLE list_parted (a numeric, b int, c int8) PARTITION BY list (a); +CREATE TABLE sub_parted PARTITION OF list_parted for VALUES in (1) PARTITION BY list (b); +CREATE TABLE sub_part1(b int, c int8, a numeric); +ALTER TABLE sub_parted ATTACH PARTITION sub_part1 for VALUES in (1); +CREATE TABLE sub_part2(b int, c int8, a numeric); +ALTER TABLE sub_parted ATTACH PARTITION sub_part2 for VALUES in (2); +CREATE TABLE list_part1(a numeric, b int, c int8); +ALTER TABLE list_parted ATTACH PARTITION list_part1 for VALUES in (2,3); +INSERT into list_parted VALUES (2,5,50); +INSERT into list_parted VALUES (3,6,60); +INSERT into sub_parted VALUES (1,1,60); +INSERT into sub_parted VALUES (1,2,10); +-- Test partition constraint violation when intermediate ancestor is used and +-- constraint is inherited from upper root. +UPDATE sub_parted set a = 2 WHERE c = 10; +ERROR: new row for relation "sub_part2" violates partition constraint +DETAIL: Failing row contains (2, 10, 2). +-- Test update-partition-key, where the unpruned partitions do not have their +-- partition keys updated. +SELECT tableoid::regclass::text, * FROM list_parted WHERE a = 2 ORDER BY 1; + tableoid | a | b | c +------------+---+---+---- + list_part1 | 2 | 5 | 50 +(1 row) + +UPDATE list_parted set b = c + a WHERE a = 2; +SELECT tableoid::regclass::text, * FROM list_parted WHERE a = 2 ORDER BY 1; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 +(1 row) + +-- Test the case where BR UPDATE triggers change the partition key. +CREATE FUNCTION func_parted_mod_b() returns trigger as $$ +BEGIN + NEW.b = 2; -- This is changing partition key column. + return NEW; +END $$ LANGUAGE plpgsql; +CREATE TRIGGER parted_mod_b before update on sub_part1 + for each row execute procedure func_parted_mod_b(); +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 + sub_part1 | 1 | 1 | 60 + sub_part2 | 1 | 2 | 10 +(4 rows) + +-- This should do the tuple routing even though there is no explicit +-- partition-key update, because there is a trigger on sub_part1. +UPDATE list_parted set c = 70 WHERE b = 1; +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 + sub_part2 | 1 | 2 | 10 + sub_part2 | 1 | 2 | 70 +(4 rows) + +DROP TRIGGER parted_mod_b ON sub_part1; +-- If BR DELETE trigger prevented DELETE from happening, we should also skip +-- the INSERT if that delete is part of UPDATE=>DELETE+INSERT. +CREATE OR REPLACE FUNCTION func_parted_mod_b() returns trigger as $$ +BEGIN + raise notice 'Trigger: Got OLD row %, but returning NULL', OLD; + return NULL; +END $$ LANGUAGE plpgsql; +CREATE TRIGGER trig_skip_delete before delete on sub_part2 + for each row execute procedure func_parted_mod_b(); +UPDATE list_parted set b = 1 WHERE c = 70; +NOTICE: Trigger: Got OLD row (2,70,1), but returning NULL +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 + sub_part2 | 1 | 2 | 10 + sub_part2 | 1 | 2 | 70 +(4 rows) + +-- Drop the trigger. Now the row should be moved. +DROP TRIGGER trig_skip_delete ON sub_part2; +UPDATE list_parted set b = 1 WHERE c = 70; +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 + sub_part1 | 1 | 1 | 70 + sub_part2 | 1 | 2 | 10 +(4 rows) + +DROP FUNCTION func_parted_mod_b(); +-- UPDATE partition-key with FROM clause. If join produces multiple output +-- rows for the same row to be modified, we should tuple-route the row only +-- once. There should not be any rows inserted. +CREATE TABLE non_parted (id int); +INSERT into non_parted VALUES (1), (1), (1), (2), (2), (2), (3), (3), (3); +UPDATE list_parted t1 set a = 2 FROM non_parted t2 WHERE t1.a = t2.id and a = 1; +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 1 | 70 + list_part1 | 2 | 2 | 10 + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 +(4 rows) + +DROP TABLE non_parted; +-- Cleanup: list_parted no longer needed. +DROP TABLE list_parted; +-- create custom operator class and hash function, for the same reason +-- explained in alter_table.sql +create or replace function dummy_hashint4(a int4, seed int8) returns int8 as +$$ begin return (a + seed); end; $$ language 'plpgsql' immutable; +create operator class custom_opclass for type int4 using hash as +operator 1 = , function 2 dummy_hashint4(int4, int8); +create table hash_parted ( + a int, b int -) partition by range (a, b); -create table part_a_1_a_10 partition of range_parted for values from ('a', 1) to ('a', 10); -create table part_a_10_a_20 partition of range_parted for values from ('a', 10) to ('a', 20); -create table part_b_1_b_10 partition of range_parted for values from ('b', 1) to ('b', 10); -create table part_b_10_b_20 partition of range_parted for values from ('b', 10) to ('b', 20); -insert into part_a_1_a_10 values ('a', 1); -insert into part_b_10_b_20 values ('b', 10); +) partition by hash (a custom_opclass, b custom_opclass); +create table hpart1 partition of hash_parted for values with (modulus 2, remainder 1); +create table hpart2 partition of hash_parted for values with (modulus 4, remainder 2); +create table hpart3 partition of hash_parted for values with (modulus 8, remainder 0); +create table hpart4 partition of hash_parted for values with (modulus 8, remainder 4); +insert into hpart1 values (1, 1); +insert into hpart2 values (2, 5); +insert into hpart4 values (3, 4); -- fail -update part_a_1_a_10 set a = 'b' where a = 'a'; -ERROR: new row for relation "part_a_1_a_10" violates partition constraint -DETAIL: Failing row contains (b, 1). -update range_parted set b = b - 1 where b = 10; -ERROR: new row for relation "part_b_10_b_20" violates partition constraint -DETAIL: Failing row contains (b, 9). +update hpart1 set a = 3, b=4 where a = 1; +ERROR: new row for relation "hpart1" violates partition constraint +DETAIL: Failing row contains (3, 4). +-- ok, row movement +update hash_parted set b = b - 1 where b = 1; -- ok -update range_parted set b = b + 1 where b = 10; +update hash_parted set b = b + 8 where b = 1; -- cleanup -drop table range_parted; +drop table hash_parted; +drop operator class custom_opclass using hash; +drop function dummy_hashint4(a int4, seed int8); diff --git a/src/test/regress/expected/vacuum.out b/src/test/regress/expected/vacuum.out index 6f68663087..fa9d663abd 100644 --- a/src/test/regress/expected/vacuum.out +++ b/src/test/regress/expected/vacuum.out @@ -80,8 +80,6 @@ CONTEXT: SQL function "do_analyze" statement 1 SQL function "wrap_do_analyze" statement 1 VACUUM FULL vactst; VACUUM (DISABLE_PAGE_SKIPPING) vaccluster; -DROP TABLE vaccluster; -DROP TABLE vactst; -- partitioned table CREATE TABLE vacparted (a int, b char) PARTITION BY LIST (a); CREATE TABLE vacparted1 PARTITION OF vacparted FOR VALUES IN (1); @@ -90,4 +88,174 @@ UPDATE vacparted SET b = 'b'; VACUUM (ANALYZE) vacparted; VACUUM (FULL) vacparted; VACUUM (FREEZE) vacparted; +-- check behavior with duplicate column mentions +VACUUM ANALYZE vacparted(a,b,a); +ERROR: column "a" of relation "vacparted" appears more than once +ANALYZE vacparted(a,b,b); +ERROR: column "b" of relation "vacparted" appears more than once +-- multiple tables specified +VACUUM vaccluster, vactst; +VACUUM vacparted, does_not_exist; +ERROR: relation "does_not_exist" does not exist +VACUUM (FREEZE) vacparted, vaccluster, vactst; +VACUUM (FREEZE) does_not_exist, vaccluster; +ERROR: relation "does_not_exist" does not exist +VACUUM ANALYZE vactst, vacparted (a); +VACUUM ANALYZE vactst (does_not_exist), vacparted (b); +ERROR: column "does_not_exist" of relation "vactst" does not exist +VACUUM FULL vacparted, vactst; +VACUUM FULL vactst, vacparted (a, b), vaccluster (i); +ERROR: ANALYZE option must be specified when a column list is provided +ANALYZE vactst, vacparted; +ANALYZE vacparted (b), vactst; +ANALYZE vactst, does_not_exist, vacparted; +ERROR: relation "does_not_exist" does not exist +ANALYZE vactst (i), vacparted (does_not_exist); +ERROR: column "does_not_exist" of relation "vacparted" does not exist +-- parenthesized syntax for ANALYZE +ANALYZE (VERBOSE) does_not_exist; +ERROR: relation "does_not_exist" does not exist +ANALYZE (nonexistent-arg) does_not_exist; +ERROR: unrecognized ANALYZE option "nonexistent" +LINE 1: ANALYZE (nonexistent-arg) does_not_exist; + ^ +-- ensure argument order independence, and that SKIP_LOCKED on non-existing +-- relation still errors out. +ANALYZE (SKIP_LOCKED, VERBOSE) does_not_exist; +ERROR: relation "does_not_exist" does not exist +ANALYZE (VERBOSE, SKIP_LOCKED) does_not_exist; +ERROR: relation "does_not_exist" does not exist +-- SKIP_LOCKED option +VACUUM (SKIP_LOCKED) vactst; +VACUUM (SKIP_LOCKED, FULL) vactst; +ANALYZE (SKIP_LOCKED) vactst; +DROP TABLE vaccluster; +DROP TABLE vactst; DROP TABLE vacparted; +-- relation ownership, WARNING logs generated as all are skipped. +CREATE TABLE vacowned (a int); +CREATE TABLE vacowned_parted (a int) PARTITION BY LIST (a); +CREATE TABLE vacowned_part1 PARTITION OF vacowned_parted FOR VALUES IN (1); +CREATE TABLE vacowned_part2 PARTITION OF vacowned_parted FOR VALUES IN (2); +CREATE ROLE regress_vacuum; +SET ROLE regress_vacuum; +-- Simple table +VACUUM vacowned; +WARNING: skipping "vacowned" --- only table or database owner can vacuum it +ANALYZE vacowned; +WARNING: skipping "vacowned" --- only table or database owner can analyze it +VACUUM (ANALYZE) vacowned; +WARNING: skipping "vacowned" --- only table or database owner can vacuum it +-- Catalog +VACUUM pg_catalog.pg_class; +WARNING: skipping "pg_class" --- only superuser or database owner can vacuum it +ANALYZE pg_catalog.pg_class; +WARNING: skipping "pg_class" --- only superuser or database owner can analyze it +VACUUM (ANALYZE) pg_catalog.pg_class; +WARNING: skipping "pg_class" --- only superuser or database owner can vacuum it +-- Shared catalog +VACUUM pg_catalog.pg_authid; +WARNING: skipping "pg_authid" --- only superuser can vacuum it +ANALYZE pg_catalog.pg_authid; +WARNING: skipping "pg_authid" --- only superuser can analyze it +VACUUM (ANALYZE) pg_catalog.pg_authid; +WARNING: skipping "pg_authid" --- only superuser can vacuum it +-- Partitioned table and its partitions, nothing owned by other user. +-- Relations are not listed in a single command to test ownership +-- independently. +VACUUM vacowned_parted; +WARNING: skipping "vacowned_parted" --- only table or database owner can vacuum it +WARNING: skipping "vacowned_part1" --- only table or database owner can vacuum it +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +VACUUM vacowned_part1; +WARNING: skipping "vacowned_part1" --- only table or database owner can vacuum it +VACUUM vacowned_part2; +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +ANALYZE vacowned_parted; +WARNING: skipping "vacowned_parted" --- only table or database owner can analyze it +WARNING: skipping "vacowned_part1" --- only table or database owner can analyze it +WARNING: skipping "vacowned_part2" --- only table or database owner can analyze it +ANALYZE vacowned_part1; +WARNING: skipping "vacowned_part1" --- only table or database owner can analyze it +ANALYZE vacowned_part2; +WARNING: skipping "vacowned_part2" --- only table or database owner can analyze it +VACUUM (ANALYZE) vacowned_parted; +WARNING: skipping "vacowned_parted" --- only table or database owner can vacuum it +WARNING: skipping "vacowned_part1" --- only table or database owner can vacuum it +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +VACUUM (ANALYZE) vacowned_part1; +WARNING: skipping "vacowned_part1" --- only table or database owner can vacuum it +VACUUM (ANALYZE) vacowned_part2; +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +RESET ROLE; +-- Partitioned table and one partition owned by other user. +ALTER TABLE vacowned_parted OWNER TO regress_vacuum; +ALTER TABLE vacowned_part1 OWNER TO regress_vacuum; +SET ROLE regress_vacuum; +VACUUM vacowned_parted; +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +VACUUM vacowned_part1; +VACUUM vacowned_part2; +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +ANALYZE vacowned_parted; +WARNING: skipping "vacowned_part2" --- only table or database owner can analyze it +ANALYZE vacowned_part1; +ANALYZE vacowned_part2; +WARNING: skipping "vacowned_part2" --- only table or database owner can analyze it +VACUUM (ANALYZE) vacowned_parted; +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +VACUUM (ANALYZE) vacowned_part1; +VACUUM (ANALYZE) vacowned_part2; +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +RESET ROLE; +-- Only one partition owned by other user. +ALTER TABLE vacowned_parted OWNER TO CURRENT_USER; +SET ROLE regress_vacuum; +VACUUM vacowned_parted; +WARNING: skipping "vacowned_parted" --- only table or database owner can vacuum it +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +VACUUM vacowned_part1; +VACUUM vacowned_part2; +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +ANALYZE vacowned_parted; +WARNING: skipping "vacowned_parted" --- only table or database owner can analyze it +WARNING: skipping "vacowned_part2" --- only table or database owner can analyze it +ANALYZE vacowned_part1; +ANALYZE vacowned_part2; +WARNING: skipping "vacowned_part2" --- only table or database owner can analyze it +VACUUM (ANALYZE) vacowned_parted; +WARNING: skipping "vacowned_parted" --- only table or database owner can vacuum it +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +VACUUM (ANALYZE) vacowned_part1; +VACUUM (ANALYZE) vacowned_part2; +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +RESET ROLE; +-- Only partitioned table owned by other user. +ALTER TABLE vacowned_parted OWNER TO regress_vacuum; +ALTER TABLE vacowned_part1 OWNER TO CURRENT_USER; +SET ROLE regress_vacuum; +VACUUM vacowned_parted; +WARNING: skipping "vacowned_part1" --- only table or database owner can vacuum it +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +VACUUM vacowned_part1; +WARNING: skipping "vacowned_part1" --- only table or database owner can vacuum it +VACUUM vacowned_part2; +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +ANALYZE vacowned_parted; +WARNING: skipping "vacowned_part1" --- only table or database owner can analyze it +WARNING: skipping "vacowned_part2" --- only table or database owner can analyze it +ANALYZE vacowned_part1; +WARNING: skipping "vacowned_part1" --- only table or database owner can analyze it +ANALYZE vacowned_part2; +WARNING: skipping "vacowned_part2" --- only table or database owner can analyze it +VACUUM (ANALYZE) vacowned_parted; +WARNING: skipping "vacowned_part1" --- only table or database owner can vacuum it +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +VACUUM (ANALYZE) vacowned_part1; +WARNING: skipping "vacowned_part1" --- only table or database owner can vacuum it +VACUUM (ANALYZE) vacowned_part2; +WARNING: skipping "vacowned_part2" --- only table or database owner can vacuum it +RESET ROLE; +DROP TABLE vacowned; +DROP TABLE vacowned_parted; +DROP ROLE regress_vacuum; diff --git a/src/test/regress/expected/window.out b/src/test/regress/expected/window.out index 19f909f3d1..662d348653 100644 --- a/src/test/regress/expected/window.out +++ b/src/test/regress/expected/window.out @@ -504,9 +504,9 @@ SELECT sum(salary), FROM empsalary GROUP BY depname; sum | row_number | sum -------+------------+------- - 14600 | 3 | 14600 - 7400 | 2 | 22000 25100 | 1 | 47100 + 7400 | 2 | 22000 + 14600 | 3 | 14600 (3 rows) -- identical windows with different names @@ -819,6 +819,176 @@ FROM tenk1 WHERE unique1 < 10; 10 | 0 | 0 (10 rows) +SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude no others), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 7 | 4 | 0 + 13 | 2 | 2 + 22 | 1 | 1 + 26 | 6 | 2 + 29 | 9 | 1 + 31 | 8 | 0 + 32 | 5 | 1 + 23 | 3 | 3 + 15 | 7 | 3 + 10 | 0 | 0 +(10 rows) + +SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 3 | 4 | 0 + 11 | 2 | 2 + 21 | 1 | 1 + 20 | 6 | 2 + 20 | 9 | 1 + 23 | 8 | 0 + 27 | 5 | 1 + 20 | 3 | 3 + 8 | 7 | 3 + 10 | 0 | 0 +(10 rows) + +SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 4 | 0 + | 2 | 2 + | 1 | 1 + | 6 | 2 + | 9 | 1 + | 8 | 0 + | 5 | 1 + | 3 | 3 + | 7 | 3 + | 0 | 0 +(10 rows) + +SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 4 | 4 | 0 + 2 | 2 | 2 + 1 | 1 | 1 + 6 | 6 | 2 + 9 | 9 | 1 + 8 | 8 | 0 + 5 | 5 | 1 + 3 | 3 | 3 + 7 | 7 | 3 + 0 | 0 | 0 +(10 rows) + +SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + first_value | unique1 | four +-------------+---------+------ + 8 | 0 | 0 + 4 | 8 | 0 + 5 | 4 | 0 + 9 | 5 | 1 + 1 | 9 | 1 + 6 | 1 | 1 + 2 | 6 | 2 + 3 | 2 | 2 + 7 | 3 | 3 + | 7 | 3 +(10 rows) + +SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + first_value | unique1 | four +-------------+---------+------ + | 0 | 0 + 5 | 8 | 0 + 5 | 4 | 0 + | 5 | 1 + 6 | 9 | 1 + 6 | 1 | 1 + 3 | 6 | 2 + 3 | 2 | 2 + | 3 | 3 + | 7 | 3 +(10 rows) + +SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + first_value | unique1 | four +-------------+---------+------ + 0 | 0 | 0 + 8 | 8 | 0 + 4 | 4 | 0 + 5 | 5 | 1 + 9 | 9 | 1 + 1 | 1 | 1 + 6 | 6 | 2 + 2 | 2 | 2 + 3 | 3 | 3 + 7 | 7 | 3 +(10 rows) + +SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + last_value | unique1 | four +------------+---------+------ + 4 | 0 | 0 + 5 | 8 | 0 + 9 | 4 | 0 + 1 | 5 | 1 + 6 | 9 | 1 + 2 | 1 | 1 + 3 | 6 | 2 + 7 | 2 | 2 + 7 | 3 | 3 + | 7 | 3 +(10 rows) + +SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + last_value | unique1 | four +------------+---------+------ + | 0 | 0 + 5 | 8 | 0 + 9 | 4 | 0 + | 5 | 1 + 6 | 9 | 1 + 2 | 1 | 1 + 3 | 6 | 2 + 7 | 2 | 2 + | 3 | 3 + | 7 | 3 +(10 rows) + +SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + last_value | unique1 | four +------------+---------+------ + 0 | 0 | 0 + 5 | 8 | 0 + 9 | 4 | 0 + 5 | 5 | 1 + 6 | 9 | 1 + 2 | 1 | 1 + 3 | 6 | 2 + 7 | 2 | 2 + 3 | 3 | 3 + 7 | 7 | 3 +(10 rows) + SELECT sum(unique1) over (rows between 2 preceding and 1 preceding), unique1, four FROM tenk1 WHERE unique1 < 10; @@ -887,13 +1057,57 @@ FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); 10 | 7 | 3 (10 rows) --- fail: not implemented yet -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding), +SELECT sum(unique1) over (w range between unbounded preceding and current row exclude current row), unique1, four -FROM tenk1 WHERE unique1 < 10; -ERROR: RANGE PRECEDING is only supported with UNBOUNDED -LINE 1: SELECT sum(unique1) over (order by four range between 2::int... - ^ +FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); + sum | unique1 | four +-----+---------+------ + 12 | 0 | 0 + 4 | 8 | 0 + 8 | 4 | 0 + 22 | 5 | 1 + 18 | 9 | 1 + 26 | 1 | 1 + 29 | 6 | 2 + 33 | 2 | 2 + 42 | 3 | 3 + 38 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (w range between unbounded preceding and current row exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 35 | 3 | 3 + 35 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (w range between unbounded preceding and current row exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); + sum | unique1 | four +-----+---------+------ + 0 | 0 | 0 + 8 | 8 | 0 + 4 | 4 | 0 + 17 | 5 | 1 + 21 | 9 | 1 + 13 | 1 | 1 + 33 | 6 | 2 + 29 | 2 | 2 + 38 | 3 | 3 + 42 | 7 | 3 +(10 rows) + SELECT first_value(unique1) over w, nth_value(unique1, 2) over w AS nth_2, last_value(unique1) over w, unique1, four @@ -958,122 +1172,1907 @@ SELECT pg_get_viewdef('v_window'); FROM generate_series(1, 10) i(i); (1 row) --- with UNION -SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk2)s LIMIT 0; - count -------- -(0 rows) +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following + exclude current row) as sum_rows FROM generate_series(1, 10) i; +SELECT * FROM v_window; + i | sum_rows +----+---------- + 1 | 2 + 2 | 4 + 3 | 6 + 4 | 8 + 5 | 10 + 6 | 12 + 7 | 14 + 8 | 16 + 9 | 18 + 10 | 9 +(10 rows) --- ordering by a non-integer constant is allowed -SELECT rank() OVER (ORDER BY length('abc')); - rank ------- - 1 +SELECT pg_get_viewdef('v_window'); + pg_get_viewdef +----------------------------------------------------------------------------------------------------------- + SELECT i.i, + + sum(i.i) OVER (ORDER BY i.i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) AS sum_rows+ + FROM generate_series(1, 10) i(i); (1 row) --- can't order by another window function -SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random())); -ERROR: window functions are not allowed in window definitions -LINE 1: SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random())... - ^ --- some other errors -SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10; -ERROR: window functions are not allowed in WHERE -LINE 1: SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY sa... - ^ -SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVER (ORDER BY salary) < 10; -ERROR: window functions are not allowed in JOIN conditions -LINE 1: SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVE... - ^ -SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY 1; -ERROR: window functions are not allowed in GROUP BY -LINE 1: SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GRO... - ^ -SELECT * FROM rank() OVER (ORDER BY random()); -ERROR: syntax error at or near "ORDER" -LINE 1: SELECT * FROM rank() OVER (ORDER BY random()); - ^ -DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())) > 10; -ERROR: window functions are not allowed in WHERE -LINE 1: DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())... - ^ -DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random()); -ERROR: window functions are not allowed in RETURNING -LINE 1: DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random... - ^ -SELECT count(*) OVER w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY unique1); -ERROR: window "w" is already defined -LINE 1: ...w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY ... - ^ -SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1; -ERROR: syntax error at or near "ORDER" -LINE 1: SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM te... - ^ -SELECT count() OVER () FROM tenk1; -ERROR: count(*) must be used to call a parameterless aggregate function -LINE 1: SELECT count() OVER () FROM tenk1; - ^ -SELECT generate_series(1, 100) OVER () FROM empsalary; -ERROR: OVER specified, but generate_series is not a window function nor an aggregate function -LINE 1: SELECT generate_series(1, 100) OVER () FROM empsalary; - ^ -SELECT ntile(0) OVER (ORDER BY ten), ten, four FROM tenk1; -ERROR: argument of ntile must be greater than zero -SELECT nth_value(four, 0) OVER (ORDER BY ten), ten, four FROM tenk1; -ERROR: argument of nth_value must be greater than zero --- filter -SELECT sum(salary), row_number() OVER (ORDER BY depname), sum( - sum(salary) FILTER (WHERE enroll_date > '2007-01-01') -) FILTER (WHERE depname <> 'sales') OVER (ORDER BY depname DESC) AS "filtered_sum", - depname -FROM empsalary GROUP BY depname; - sum | row_number | filtered_sum | depname --------+------------+--------------+----------- - 14600 | 3 | | sales - 7400 | 2 | 3500 | personnel - 25100 | 1 | 22600 | develop -(3 rows) +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following + exclude group) as sum_rows FROM generate_series(1, 10) i; +SELECT * FROM v_window; + i | sum_rows +----+---------- + 1 | 2 + 2 | 4 + 3 | 6 + 4 | 8 + 5 | 10 + 6 | 12 + 7 | 14 + 8 | 16 + 9 | 18 + 10 | 9 +(10 rows) --- Test pushdown of quals into a subquery containing window functions --- pushdown is safe because all PARTITION BY clauses include depname: -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT depname, - sum(salary) OVER (PARTITION BY depname) depsalary, - min(salary) OVER (PARTITION BY depname || 'A', depname) depminsalary - FROM empsalary) emp -WHERE depname = 'sales'; - QUERY PLAN ---------------------------------------------------------------------- - Subquery Scan on emp - -> WindowAgg - -> Sort - Sort Key: (((empsalary.depname)::text || 'A'::text)) - -> WindowAgg - -> Seq Scan on empsalary - Filter: ((depname)::text = 'sales'::text) -(7 rows) +SELECT pg_get_viewdef('v_window'); + pg_get_viewdef +----------------------------------------------------------------------------------------------------- + SELECT i.i, + + sum(i.i) OVER (ORDER BY i.i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE GROUP) AS sum_rows+ + FROM generate_series(1, 10) i(i); +(1 row) --- pushdown is unsafe because there's a PARTITION BY clause without depname: -EXPLAIN (COSTS OFF) -SELECT * FROM - (SELECT depname, - sum(salary) OVER (PARTITION BY enroll_date) enroll_salary, - min(salary) OVER (PARTITION BY depname) depminsalary - FROM empsalary) emp -WHERE depname = 'sales'; - QUERY PLAN ------------------------------------------------------------ - Subquery Scan on emp - Filter: ((emp.depname)::text = 'sales'::text) - -> WindowAgg - -> Sort - Sort Key: empsalary.depname - -> WindowAgg - -> Sort - Sort Key: empsalary.enroll_date - -> Seq Scan on empsalary -(9 rows) +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following + exclude ties) as sum_rows FROM generate_series(1, 10) i; +SELECT * FROM v_window; + i | sum_rows +----+---------- + 1 | 3 + 2 | 6 + 3 | 9 + 4 | 12 + 5 | 15 + 6 | 18 + 7 | 21 + 8 | 24 + 9 | 27 + 10 | 19 +(10 rows) + +SELECT pg_get_viewdef('v_window'); + pg_get_viewdef +---------------------------------------------------------------------------------------------------- + SELECT i.i, + + sum(i.i) OVER (ORDER BY i.i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE TIES) AS sum_rows+ + FROM generate_series(1, 10) i(i); +(1 row) + +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following + exclude no others) as sum_rows FROM generate_series(1, 10) i; +SELECT * FROM v_window; + i | sum_rows +----+---------- + 1 | 3 + 2 | 6 + 3 | 9 + 4 | 12 + 5 | 15 + 6 | 18 + 7 | 21 + 8 | 24 + 9 | 27 + 10 | 19 +(10 rows) + +SELECT pg_get_viewdef('v_window'); + pg_get_viewdef +--------------------------------------------------------------------------------------- + SELECT i.i, + + sum(i.i) OVER (ORDER BY i.i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+ + FROM generate_series(1, 10) i(i); +(1 row) + +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i groups between 1 preceding and 1 following) as sum_rows FROM generate_series(1, 10) i; +SELECT * FROM v_window; + i | sum_rows +----+---------- + 1 | 3 + 2 | 6 + 3 | 9 + 4 | 12 + 5 | 15 + 6 | 18 + 7 | 21 + 8 | 24 + 9 | 27 + 10 | 19 +(10 rows) + +SELECT pg_get_viewdef('v_window'); + pg_get_viewdef +----------------------------------------------------------------------------------------- + SELECT i.i, + + sum(i.i) OVER (ORDER BY i.i GROUPS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+ + FROM generate_series(1, 10) i(i); +(1 row) + +DROP VIEW v_window; +CREATE TEMP VIEW v_window AS + SELECT i, min(i) over (order by i range between '1 day' preceding and '10 days' following) as min_i + FROM generate_series(now(), now()+'100 days'::interval, '1 hour') i; +SELECT pg_get_viewdef('v_window'); + pg_get_viewdef +--------------------------------------------------------------------------------------------------------------------------- + SELECT i.i, + + min(i.i) OVER (ORDER BY i.i RANGE BETWEEN '@ 1 day'::interval PRECEDING AND '@ 10 days'::interval FOLLOWING) AS min_i+ + FROM generate_series(now(), (now() + '@ 100 days'::interval), '@ 1 hour'::interval) i(i); +(1 row) + +-- RANGE offset PRECEDING/FOLLOWING tests +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four desc range between 2::int8 preceding and 1::int2 preceding), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 3 | 3 + | 7 | 3 + 10 | 6 | 2 + 10 | 2 | 2 + 18 | 9 | 1 + 18 | 5 | 1 + 18 | 1 | 1 + 23 | 0 | 0 + 23 | 8 | 0 + 23 | 4 | 0 +(10 rows) + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude no others), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 33 | 0 | 0 + 41 | 8 | 0 + 37 | 4 | 0 + 35 | 5 | 1 + 39 | 9 | 1 + 31 | 1 | 1 + 43 | 6 | 2 + 39 | 2 | 2 + 26 | 3 | 3 + 30 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 33 | 0 | 0 + 33 | 8 | 0 + 33 | 4 | 0 + 30 | 5 | 1 + 30 | 9 | 1 + 30 | 1 | 1 + 37 | 6 | 2 + 37 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 4 | 0 | 0 + 12 | 4 | 0 + 12 | 8 | 0 + 6 | 1 | 1 + 15 | 5 | 1 + 14 | 9 | 1 + 8 | 2 | 2 + 8 | 6 | 2 + 10 | 3 | 3 + 10 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following + exclude current row),unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 4 | 0 | 0 + 8 | 4 | 0 + 4 | 8 | 0 + 5 | 1 | 1 + 10 | 5 | 1 + 5 | 9 | 1 + 6 | 2 | 2 + 2 | 6 | 2 + 7 | 3 | 3 + 3 | 7 | 3 +(10 rows) + +select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following), + salary, enroll_date from empsalary; + sum | salary | enroll_date +-------+--------+------------- + 34900 | 5000 | 10-01-2006 + 34900 | 6000 | 10-01-2006 + 38400 | 3900 | 12-23-2006 + 47100 | 4800 | 08-01-2007 + 47100 | 5200 | 08-01-2007 + 47100 | 4800 | 08-08-2007 + 47100 | 5200 | 08-15-2007 + 36100 | 3500 | 12-10-2007 + 32200 | 4500 | 01-01-2008 + 32200 | 4200 | 01-01-2008 +(10 rows) + +select sum(salary) over (order by enroll_date desc range between '1 year'::interval preceding and '1 year'::interval following), + salary, enroll_date from empsalary; + sum | salary | enroll_date +-------+--------+------------- + 32200 | 4200 | 01-01-2008 + 32200 | 4500 | 01-01-2008 + 36100 | 3500 | 12-10-2007 + 47100 | 5200 | 08-15-2007 + 47100 | 4800 | 08-08-2007 + 47100 | 4800 | 08-01-2007 + 47100 | 5200 | 08-01-2007 + 38400 | 3900 | 12-23-2006 + 34900 | 5000 | 10-01-2006 + 34900 | 6000 | 10-01-2006 +(10 rows) + +select sum(salary) over (order by enroll_date desc range between '1 year'::interval following and '1 year'::interval following), + salary, enroll_date from empsalary; + sum | salary | enroll_date +-----+--------+------------- + | 4200 | 01-01-2008 + | 4500 | 01-01-2008 + | 3500 | 12-10-2007 + | 5200 | 08-15-2007 + | 4800 | 08-08-2007 + | 4800 | 08-01-2007 + | 5200 | 08-01-2007 + | 3900 | 12-23-2006 + | 5000 | 10-01-2006 + | 6000 | 10-01-2006 +(10 rows) + +select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following + exclude current row), salary, enroll_date from empsalary; + sum | salary | enroll_date +-------+--------+------------- + 29900 | 5000 | 10-01-2006 + 28900 | 6000 | 10-01-2006 + 34500 | 3900 | 12-23-2006 + 42300 | 4800 | 08-01-2007 + 41900 | 5200 | 08-01-2007 + 42300 | 4800 | 08-08-2007 + 41900 | 5200 | 08-15-2007 + 32600 | 3500 | 12-10-2007 + 27700 | 4500 | 01-01-2008 + 28000 | 4200 | 01-01-2008 +(10 rows) + +select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following + exclude group), salary, enroll_date from empsalary; + sum | salary | enroll_date +-------+--------+------------- + 23900 | 5000 | 10-01-2006 + 23900 | 6000 | 10-01-2006 + 34500 | 3900 | 12-23-2006 + 37100 | 4800 | 08-01-2007 + 37100 | 5200 | 08-01-2007 + 42300 | 4800 | 08-08-2007 + 41900 | 5200 | 08-15-2007 + 32600 | 3500 | 12-10-2007 + 23500 | 4500 | 01-01-2008 + 23500 | 4200 | 01-01-2008 +(10 rows) + +select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following + exclude ties), salary, enroll_date from empsalary; + sum | salary | enroll_date +-------+--------+------------- + 28900 | 5000 | 10-01-2006 + 29900 | 6000 | 10-01-2006 + 38400 | 3900 | 12-23-2006 + 41900 | 4800 | 08-01-2007 + 42300 | 5200 | 08-01-2007 + 47100 | 4800 | 08-08-2007 + 47100 | 5200 | 08-15-2007 + 36100 | 3500 | 12-10-2007 + 28000 | 4500 | 01-01-2008 + 27700 | 4200 | 01-01-2008 +(10 rows) + +select first_value(salary) over(order by salary range between 1000 preceding and 1000 following), + lead(salary) over(order by salary range between 1000 preceding and 1000 following), + nth_value(salary, 1) over(order by salary range between 1000 preceding and 1000 following), + salary from empsalary; + first_value | lead | nth_value | salary +-------------+------+-----------+-------- + 3500 | 3900 | 3500 | 3500 + 3500 | 4200 | 3500 | 3900 + 3500 | 4500 | 3500 | 4200 + 3500 | 4800 | 3500 | 4500 + 3900 | 4800 | 3900 | 4800 + 3900 | 5000 | 3900 | 4800 + 4200 | 5200 | 4200 | 5000 + 4200 | 5200 | 4200 | 5200 + 4200 | 6000 | 4200 | 5200 + 5000 | | 5000 | 6000 +(10 rows) + +select last_value(salary) over(order by salary range between 1000 preceding and 1000 following), + lag(salary) over(order by salary range between 1000 preceding and 1000 following), + salary from empsalary; + last_value | lag | salary +------------+------+-------- + 4500 | | 3500 + 4800 | 3500 | 3900 + 5200 | 3900 | 4200 + 5200 | 4200 | 4500 + 5200 | 4500 | 4800 + 5200 | 4800 | 4800 + 6000 | 4800 | 5000 + 6000 | 5000 | 5200 + 6000 | 5200 | 5200 + 6000 | 5200 | 6000 +(10 rows) + +select first_value(salary) over(order by salary range between 1000 following and 3000 following + exclude current row), + lead(salary) over(order by salary range between 1000 following and 3000 following exclude ties), + nth_value(salary, 1) over(order by salary range between 1000 following and 3000 following + exclude ties), + salary from empsalary; + first_value | lead | nth_value | salary +-------------+------+-----------+-------- + 4500 | 3900 | 4500 | 3500 + 5000 | 4200 | 5000 | 3900 + 5200 | 4500 | 5200 | 4200 + 6000 | 4800 | 6000 | 4500 + 6000 | 4800 | 6000 | 4800 + 6000 | 5000 | 6000 | 4800 + 6000 | 5200 | 6000 | 5000 + | 5200 | | 5200 + | 6000 | | 5200 + | | | 6000 +(10 rows) + +select last_value(salary) over(order by salary range between 1000 following and 3000 following + exclude group), + lag(salary) over(order by salary range between 1000 following and 3000 following exclude group), + salary from empsalary; + last_value | lag | salary +------------+------+-------- + 6000 | | 3500 + 6000 | 3500 | 3900 + 6000 | 3900 | 4200 + 6000 | 4200 | 4500 + 6000 | 4500 | 4800 + 6000 | 4800 | 4800 + 6000 | 4800 | 5000 + | 5000 | 5200 + | 5200 | 5200 + | 5200 | 6000 +(10 rows) + +select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude ties), + last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following), + salary, enroll_date from empsalary; + first_value | last_value | salary | enroll_date +-------------+------------+--------+------------- + 5000 | 5200 | 5000 | 10-01-2006 + 6000 | 5200 | 6000 | 10-01-2006 + 5000 | 3500 | 3900 | 12-23-2006 + 5000 | 4200 | 4800 | 08-01-2007 + 5000 | 4200 | 5200 | 08-01-2007 + 5000 | 4200 | 4800 | 08-08-2007 + 5000 | 4200 | 5200 | 08-15-2007 + 5000 | 4200 | 3500 | 12-10-2007 + 5000 | 4200 | 4500 | 01-01-2008 + 5000 | 4200 | 4200 | 01-01-2008 +(10 rows) + +select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude ties), + last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude ties), + salary, enroll_date from empsalary; + first_value | last_value | salary | enroll_date +-------------+------------+--------+------------- + 5000 | 5200 | 5000 | 10-01-2006 + 6000 | 5200 | 6000 | 10-01-2006 + 5000 | 3500 | 3900 | 12-23-2006 + 5000 | 4200 | 4800 | 08-01-2007 + 5000 | 4200 | 5200 | 08-01-2007 + 5000 | 4200 | 4800 | 08-08-2007 + 5000 | 4200 | 5200 | 08-15-2007 + 5000 | 4200 | 3500 | 12-10-2007 + 5000 | 4500 | 4500 | 01-01-2008 + 5000 | 4200 | 4200 | 01-01-2008 +(10 rows) + +select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude group), + last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude group), + salary, enroll_date from empsalary; + first_value | last_value | salary | enroll_date +-------------+------------+--------+------------- + 3900 | 5200 | 5000 | 10-01-2006 + 3900 | 5200 | 6000 | 10-01-2006 + 5000 | 3500 | 3900 | 12-23-2006 + 5000 | 4200 | 4800 | 08-01-2007 + 5000 | 4200 | 5200 | 08-01-2007 + 5000 | 4200 | 4800 | 08-08-2007 + 5000 | 4200 | 5200 | 08-15-2007 + 5000 | 4200 | 3500 | 12-10-2007 + 5000 | 3500 | 4500 | 01-01-2008 + 5000 | 3500 | 4200 | 01-01-2008 +(10 rows) + +select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude current row), + last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude current row), + salary, enroll_date from empsalary; + first_value | last_value | salary | enroll_date +-------------+------------+--------+------------- + 6000 | 5200 | 5000 | 10-01-2006 + 5000 | 5200 | 6000 | 10-01-2006 + 5000 | 3500 | 3900 | 12-23-2006 + 5000 | 4200 | 4800 | 08-01-2007 + 5000 | 4200 | 5200 | 08-01-2007 + 5000 | 4200 | 4800 | 08-08-2007 + 5000 | 4200 | 5200 | 08-15-2007 + 5000 | 4200 | 3500 | 12-10-2007 + 5000 | 4200 | 4500 | 01-01-2008 + 5000 | 4500 | 4200 | 01-01-2008 +(10 rows) + +-- RANGE offset PRECEDING/FOLLOWING with null values +select x, y, + first_value(y) over w, + last_value(y) over w +from + (select x, x as y from generate_series(1,5) as x + union all select null, 42 + union all select null, 43) ss +window w as + (order by x asc nulls first range between 2 preceding and 2 following); + x | y | first_value | last_value +---+----+-------------+------------ + | 42 | 42 | 43 + | 43 | 42 | 43 + 1 | 1 | 1 | 3 + 2 | 2 | 1 | 4 + 3 | 3 | 1 | 5 + 4 | 4 | 2 | 5 + 5 | 5 | 3 | 5 +(7 rows) + +select x, y, + first_value(y) over w, + last_value(y) over w +from + (select x, x as y from generate_series(1,5) as x + union all select null, 42 + union all select null, 43) ss +window w as + (order by x asc nulls last range between 2 preceding and 2 following); + x | y | first_value | last_value +---+----+-------------+------------ + 1 | 1 | 1 | 3 + 2 | 2 | 1 | 4 + 3 | 3 | 1 | 5 + 4 | 4 | 2 | 5 + 5 | 5 | 3 | 5 + | 42 | 42 | 43 + | 43 | 42 | 43 +(7 rows) + +select x, y, + first_value(y) over w, + last_value(y) over w +from + (select x, x as y from generate_series(1,5) as x + union all select null, 42 + union all select null, 43) ss +window w as + (order by x desc nulls first range between 2 preceding and 2 following); + x | y | first_value | last_value +---+----+-------------+------------ + | 43 | 43 | 42 + | 42 | 43 | 42 + 5 | 5 | 5 | 3 + 4 | 4 | 5 | 2 + 3 | 3 | 5 | 1 + 2 | 2 | 4 | 1 + 1 | 1 | 3 | 1 +(7 rows) + +select x, y, + first_value(y) over w, + last_value(y) over w +from + (select x, x as y from generate_series(1,5) as x + union all select null, 42 + union all select null, 43) ss +window w as + (order by x desc nulls last range between 2 preceding and 2 following); + x | y | first_value | last_value +---+----+-------------+------------ + 5 | 5 | 5 | 3 + 4 | 4 | 5 | 2 + 3 | 3 | 5 | 1 + 2 | 2 | 4 | 1 + 1 | 1 | 3 | 1 + | 42 | 42 | 43 + | 43 | 42 | 43 +(7 rows) + +-- Check overflow behavior for various integer sizes +select x, last_value(x) over (order by x::smallint range between current row and 2147450884 following) +from generate_series(32764, 32766) x; + x | last_value +-------+------------ + 32764 | 32766 + 32765 | 32766 + 32766 | 32766 +(3 rows) + +select x, last_value(x) over (order by x::smallint desc range between current row and 2147450885 following) +from generate_series(-32766, -32764) x; + x | last_value +--------+------------ + -32764 | -32766 + -32765 | -32766 + -32766 | -32766 +(3 rows) + +select x, last_value(x) over (order by x range between current row and 4 following) +from generate_series(2147483644, 2147483646) x; + x | last_value +------------+------------ + 2147483644 | 2147483646 + 2147483645 | 2147483646 + 2147483646 | 2147483646 +(3 rows) + +select x, last_value(x) over (order by x desc range between current row and 5 following) +from generate_series(-2147483646, -2147483644) x; + x | last_value +-------------+------------- + -2147483644 | -2147483646 + -2147483645 | -2147483646 + -2147483646 | -2147483646 +(3 rows) + +select x, last_value(x) over (order by x range between current row and 4 following) +from generate_series(9223372036854775804, 9223372036854775806) x; + x | last_value +---------------------+--------------------- + 9223372036854775804 | 9223372036854775806 + 9223372036854775805 | 9223372036854775806 + 9223372036854775806 | 9223372036854775806 +(3 rows) + +select x, last_value(x) over (order by x desc range between current row and 5 following) +from generate_series(-9223372036854775806, -9223372036854775804) x; + x | last_value +----------------------+---------------------- + -9223372036854775804 | -9223372036854775806 + -9223372036854775805 | -9223372036854775806 + -9223372036854775806 | -9223372036854775806 +(3 rows) + +-- Test in_range for other numeric datatypes +create temp table numerics( + id int, + f_float4 float4, + f_float8 float8, + f_numeric numeric +); +insert into numerics values +(0, '-infinity', '-infinity', '-1000'), -- numeric type lacks infinities +(1, -3, -3, -3), +(2, -1, -1, -1), +(3, 0, 0, 0), +(4, 1.1, 1.1, 1.1), +(5, 1.12, 1.12, 1.12), +(6, 2, 2, 2), +(7, 100, 100, 100), +(8, 'infinity', 'infinity', '1000'), +(9, 'NaN', 'NaN', 'NaN'); +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 1 preceding and 1 following); + id | f_float4 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 0 + 1 | -3 | 1 | 1 + 2 | -1 | 2 | 3 + 3 | 0 | 2 | 3 + 4 | 1.1 | 4 | 6 + 5 | 1.12 | 4 | 6 + 6 | 2 | 4 | 6 + 7 | 100 | 7 | 7 + 8 | Infinity | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 1 preceding and 1.1::float4 following); + id | f_float4 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 0 + 1 | -3 | 1 | 1 + 2 | -1 | 2 | 3 + 3 | 0 | 2 | 4 + 4 | 1.1 | 4 | 6 + 5 | 1.12 | 4 | 6 + 6 | 2 | 4 | 6 + 7 | 100 | 7 | 7 + 8 | Infinity | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 'inf' preceding and 'inf' following); + id | f_float4 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 8 + 1 | -3 | 0 | 8 + 2 | -1 | 0 | 8 + 3 | 0 | 0 | 8 + 4 | 1.1 | 0 | 8 + 5 | 1.12 | 0 | 8 + 6 | 2 | 0 | 8 + 7 | 100 | 0 | 8 + 8 | Infinity | 0 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 1.1 preceding and 'NaN' following); -- error, NaN disallowed +ERROR: invalid preceding or following size in window function +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 1 preceding and 1 following); + id | f_float8 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 0 + 1 | -3 | 1 | 1 + 2 | -1 | 2 | 3 + 3 | 0 | 2 | 3 + 4 | 1.1 | 4 | 6 + 5 | 1.12 | 4 | 6 + 6 | 2 | 4 | 6 + 7 | 100 | 7 | 7 + 8 | Infinity | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 1 preceding and 1.1::float8 following); + id | f_float8 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 0 + 1 | -3 | 1 | 1 + 2 | -1 | 2 | 3 + 3 | 0 | 2 | 4 + 4 | 1.1 | 4 | 6 + 5 | 1.12 | 4 | 6 + 6 | 2 | 4 | 6 + 7 | 100 | 7 | 7 + 8 | Infinity | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 'inf' preceding and 'inf' following); + id | f_float8 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 8 + 1 | -3 | 0 | 8 + 2 | -1 | 0 | 8 + 3 | 0 | 0 | 8 + 4 | 1.1 | 0 | 8 + 5 | 1.12 | 0 | 8 + 6 | 2 | 0 | 8 + 7 | 100 | 0 | 8 + 8 | Infinity | 0 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 1.1 preceding and 'NaN' following); -- error, NaN disallowed +ERROR: invalid preceding or following size in window function +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 1 preceding and 1 following); + id | f_numeric | first_value | last_value +----+-----------+-------------+------------ + 0 | -1000 | 0 | 0 + 1 | -3 | 1 | 1 + 2 | -1 | 2 | 3 + 3 | 0 | 2 | 3 + 4 | 1.1 | 4 | 6 + 5 | 1.12 | 4 | 6 + 6 | 2 | 4 | 6 + 7 | 100 | 7 | 7 + 8 | 1000 | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 1 preceding and 1.1::numeric following); + id | f_numeric | first_value | last_value +----+-----------+-------------+------------ + 0 | -1000 | 0 | 0 + 1 | -3 | 1 | 1 + 2 | -1 | 2 | 3 + 3 | 0 | 2 | 4 + 4 | 1.1 | 4 | 6 + 5 | 1.12 | 4 | 6 + 6 | 2 | 4 | 6 + 7 | 100 | 7 | 7 + 8 | 1000 | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 1 preceding and 1.1::float8 following); -- currently unsupported +ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type numeric and offset type double precision +LINE 4: 1 preceding and 1.1::float8 following); + ^ +HINT: Cast the offset value to an appropriate type. +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 1.1 preceding and 'NaN' following); -- error, NaN disallowed +ERROR: invalid preceding or following size in window function +-- Test in_range for other datetime datatypes +create temp table datetimes( + id int, + f_time time, + f_timetz timetz, + f_interval interval, + f_timestamptz timestamptz, + f_timestamp timestamp +); +insert into datetimes values +(1, '11:00', '11:00 BST', '1 year', '2000-10-19 10:23:54+01', '2000-10-19 10:23:54'), +(2, '12:00', '12:00 BST', '2 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'), +(3, '13:00', '13:00 BST', '3 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'), +(4, '14:00', '14:00 BST', '4 years', '2002-10-19 10:23:54+01', '2002-10-19 10:23:54'), +(5, '15:00', '15:00 BST', '5 years', '2003-10-19 10:23:54+01', '2003-10-19 10:23:54'), +(6, '15:00', '15:00 BST', '5 years', '2004-10-19 10:23:54+01', '2004-10-19 10:23:54'), +(7, '17:00', '17:00 BST', '7 years', '2005-10-19 10:23:54+01', '2005-10-19 10:23:54'), +(8, '18:00', '18:00 BST', '8 years', '2006-10-19 10:23:54+01', '2006-10-19 10:23:54'), +(9, '19:00', '19:00 BST', '9 years', '2007-10-19 10:23:54+01', '2007-10-19 10:23:54'), +(10, '20:00', '20:00 BST', '10 years', '2008-10-19 10:23:54+01', '2008-10-19 10:23:54'); +select id, f_time, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_time range between + '70 min'::interval preceding and '2 hours'::interval following); + id | f_time | first_value | last_value +----+----------+-------------+------------ + 1 | 11:00:00 | 1 | 3 + 2 | 12:00:00 | 1 | 4 + 3 | 13:00:00 | 2 | 6 + 4 | 14:00:00 | 3 | 6 + 5 | 15:00:00 | 4 | 7 + 6 | 15:00:00 | 4 | 7 + 7 | 17:00:00 | 7 | 9 + 8 | 18:00:00 | 7 | 10 + 9 | 19:00:00 | 8 | 10 + 10 | 20:00:00 | 9 | 10 +(10 rows) + +select id, f_time, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_time desc range between + '70 min' preceding and '2 hours' following); + id | f_time | first_value | last_value +----+----------+-------------+------------ + 10 | 20:00:00 | 10 | 8 + 9 | 19:00:00 | 10 | 7 + 8 | 18:00:00 | 9 | 7 + 7 | 17:00:00 | 8 | 5 + 6 | 15:00:00 | 6 | 3 + 5 | 15:00:00 | 6 | 3 + 4 | 14:00:00 | 6 | 2 + 3 | 13:00:00 | 4 | 1 + 2 | 12:00:00 | 3 | 1 + 1 | 11:00:00 | 2 | 1 +(10 rows) + +select id, f_timetz, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timetz range between + '70 min'::interval preceding and '2 hours'::interval following); + id | f_timetz | first_value | last_value +----+-------------+-------------+------------ + 1 | 11:00:00+01 | 1 | 3 + 2 | 12:00:00+01 | 1 | 4 + 3 | 13:00:00+01 | 2 | 6 + 4 | 14:00:00+01 | 3 | 6 + 5 | 15:00:00+01 | 4 | 7 + 6 | 15:00:00+01 | 4 | 7 + 7 | 17:00:00+01 | 7 | 9 + 8 | 18:00:00+01 | 7 | 10 + 9 | 19:00:00+01 | 8 | 10 + 10 | 20:00:00+01 | 9 | 10 +(10 rows) + +select id, f_timetz, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timetz desc range between + '70 min' preceding and '2 hours' following); + id | f_timetz | first_value | last_value +----+-------------+-------------+------------ + 10 | 20:00:00+01 | 10 | 8 + 9 | 19:00:00+01 | 10 | 7 + 8 | 18:00:00+01 | 9 | 7 + 7 | 17:00:00+01 | 8 | 5 + 6 | 15:00:00+01 | 6 | 3 + 5 | 15:00:00+01 | 6 | 3 + 4 | 14:00:00+01 | 6 | 2 + 3 | 13:00:00+01 | 4 | 1 + 2 | 12:00:00+01 | 3 | 1 + 1 | 11:00:00+01 | 2 | 1 +(10 rows) + +select id, f_interval, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_interval range between + '1 year'::interval preceding and '1 year'::interval following); + id | f_interval | first_value | last_value +----+------------+-------------+------------ + 1 | @ 1 year | 1 | 2 + 2 | @ 2 years | 1 | 3 + 3 | @ 3 years | 2 | 4 + 4 | @ 4 years | 3 | 6 + 5 | @ 5 years | 4 | 6 + 6 | @ 5 years | 4 | 6 + 7 | @ 7 years | 7 | 8 + 8 | @ 8 years | 7 | 9 + 9 | @ 9 years | 8 | 10 + 10 | @ 10 years | 9 | 10 +(10 rows) + +select id, f_interval, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_interval desc range between + '1 year' preceding and '1 year' following); + id | f_interval | first_value | last_value +----+------------+-------------+------------ + 10 | @ 10 years | 10 | 9 + 9 | @ 9 years | 10 | 8 + 8 | @ 8 years | 9 | 7 + 7 | @ 7 years | 8 | 7 + 6 | @ 5 years | 6 | 4 + 5 | @ 5 years | 6 | 4 + 4 | @ 4 years | 6 | 3 + 3 | @ 3 years | 4 | 2 + 2 | @ 2 years | 3 | 1 + 1 | @ 1 year | 2 | 1 +(10 rows) + +select id, f_timestamptz, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timestamptz range between + '1 year'::interval preceding and '1 year'::interval following); + id | f_timestamptz | first_value | last_value +----+------------------------------+-------------+------------ + 1 | Thu Oct 19 02:23:54 2000 PDT | 1 | 3 + 2 | Fri Oct 19 02:23:54 2001 PDT | 1 | 4 + 3 | Fri Oct 19 02:23:54 2001 PDT | 1 | 4 + 4 | Sat Oct 19 02:23:54 2002 PDT | 2 | 5 + 5 | Sun Oct 19 02:23:54 2003 PDT | 4 | 6 + 6 | Tue Oct 19 02:23:54 2004 PDT | 5 | 7 + 7 | Wed Oct 19 02:23:54 2005 PDT | 6 | 8 + 8 | Thu Oct 19 02:23:54 2006 PDT | 7 | 9 + 9 | Fri Oct 19 02:23:54 2007 PDT | 8 | 10 + 10 | Sun Oct 19 02:23:54 2008 PDT | 9 | 10 +(10 rows) + +select id, f_timestamptz, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timestamptz desc range between + '1 year' preceding and '1 year' following); + id | f_timestamptz | first_value | last_value +----+------------------------------+-------------+------------ + 10 | Sun Oct 19 02:23:54 2008 PDT | 10 | 9 + 9 | Fri Oct 19 02:23:54 2007 PDT | 10 | 8 + 8 | Thu Oct 19 02:23:54 2006 PDT | 9 | 7 + 7 | Wed Oct 19 02:23:54 2005 PDT | 8 | 6 + 6 | Tue Oct 19 02:23:54 2004 PDT | 7 | 5 + 5 | Sun Oct 19 02:23:54 2003 PDT | 6 | 4 + 4 | Sat Oct 19 02:23:54 2002 PDT | 5 | 2 + 3 | Fri Oct 19 02:23:54 2001 PDT | 4 | 1 + 2 | Fri Oct 19 02:23:54 2001 PDT | 4 | 1 + 1 | Thu Oct 19 02:23:54 2000 PDT | 3 | 1 +(10 rows) + +select id, f_timestamp, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timestamp range between + '1 year'::interval preceding and '1 year'::interval following); + id | f_timestamp | first_value | last_value +----+--------------------------+-------------+------------ + 1 | Thu Oct 19 10:23:54 2000 | 1 | 3 + 2 | Fri Oct 19 10:23:54 2001 | 1 | 4 + 3 | Fri Oct 19 10:23:54 2001 | 1 | 4 + 4 | Sat Oct 19 10:23:54 2002 | 2 | 5 + 5 | Sun Oct 19 10:23:54 2003 | 4 | 6 + 6 | Tue Oct 19 10:23:54 2004 | 5 | 7 + 7 | Wed Oct 19 10:23:54 2005 | 6 | 8 + 8 | Thu Oct 19 10:23:54 2006 | 7 | 9 + 9 | Fri Oct 19 10:23:54 2007 | 8 | 10 + 10 | Sun Oct 19 10:23:54 2008 | 9 | 10 +(10 rows) + +select id, f_timestamp, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timestamp desc range between + '1 year' preceding and '1 year' following); + id | f_timestamp | first_value | last_value +----+--------------------------+-------------+------------ + 10 | Sun Oct 19 10:23:54 2008 | 10 | 9 + 9 | Fri Oct 19 10:23:54 2007 | 10 | 8 + 8 | Thu Oct 19 10:23:54 2006 | 9 | 7 + 7 | Wed Oct 19 10:23:54 2005 | 8 | 6 + 6 | Tue Oct 19 10:23:54 2004 | 7 | 5 + 5 | Sun Oct 19 10:23:54 2003 | 6 | 4 + 4 | Sat Oct 19 10:23:54 2002 | 5 | 2 + 3 | Fri Oct 19 10:23:54 2001 | 4 | 1 + 2 | Fri Oct 19 10:23:54 2001 | 4 | 1 + 1 | Thu Oct 19 10:23:54 2000 | 3 | 1 +(10 rows) + +-- RANGE offset PRECEDING/FOLLOWING error cases +select sum(salary) over (order by enroll_date, salary range between '1 year'::interval preceding and '2 years'::interval following + exclude ties), salary, enroll_date from empsalary; +ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column +LINE 1: select sum(salary) over (order by enroll_date, salary range ... + ^ +select sum(salary) over (range between '1 year'::interval preceding and '2 years'::interval following + exclude ties), salary, enroll_date from empsalary; +ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column +LINE 1: select sum(salary) over (range between '1 year'::interval pr... + ^ +select sum(salary) over (order by depname range between '1 year'::interval preceding and '2 years'::interval following + exclude ties), salary, enroll_date from empsalary; +ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type text +LINE 1: ... sum(salary) over (order by depname range between '1 year'::... + ^ +select max(enroll_date) over (order by enroll_date range between 1 preceding and 2 following + exclude ties), salary, enroll_date from empsalary; +ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type date and offset type integer +LINE 1: ...ll_date) over (order by enroll_date range between 1 precedin... + ^ +HINT: Cast the offset value to an appropriate type. +select max(enroll_date) over (order by salary range between -1 preceding and 2 following + exclude ties), salary, enroll_date from empsalary; +ERROR: invalid preceding or following size in window function +select max(enroll_date) over (order by salary range between 1 preceding and -2 following + exclude ties), salary, enroll_date from empsalary; +ERROR: invalid preceding or following size in window function +select max(enroll_date) over (order by salary range between '1 year'::interval preceding and '2 years'::interval following + exclude ties), salary, enroll_date from empsalary; +ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type integer and offset type interval +LINE 1: ...(enroll_date) over (order by salary range between '1 year'::... + ^ +HINT: Cast the offset value to an appropriate type. +select max(enroll_date) over (order by enroll_date range between '1 year'::interval preceding and '-2 years'::interval following + exclude ties), salary, enroll_date from empsalary; +ERROR: invalid preceding or following size in window function +-- GROUPS tests +SELECT sum(unique1) over (order by four groups between unbounded preceding and current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 12 | 0 | 0 + 12 | 8 | 0 + 12 | 4 | 0 + 27 | 5 | 1 + 27 | 9 | 1 + 27 | 1 | 1 + 35 | 6 | 2 + 35 | 2 | 2 + 45 | 3 | 3 + 45 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between unbounded preceding and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 45 | 0 | 0 + 45 | 8 | 0 + 45 | 4 | 0 + 45 | 5 | 1 + 45 | 9 | 1 + 45 | 1 | 1 + 45 | 6 | 2 + 45 | 2 | 2 + 45 | 3 | 3 + 45 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between current row and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 45 | 0 | 0 + 45 | 8 | 0 + 45 | 4 | 0 + 33 | 5 | 1 + 33 | 9 | 1 + 33 | 1 | 1 + 18 | 6 | 2 + 18 | 2 | 2 + 10 | 3 | 3 + 10 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 1 preceding and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 45 | 0 | 0 + 45 | 8 | 0 + 45 | 4 | 0 + 45 | 5 | 1 + 45 | 9 | 1 + 45 | 1 | 1 + 33 | 6 | 2 + 33 | 2 | 2 + 18 | 3 | 3 + 18 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 1 following and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 33 | 0 | 0 + 33 | 8 | 0 + 33 | 4 | 0 + 18 | 5 | 1 + 18 | 9 | 1 + 18 | 1 | 1 + 10 | 6 | 2 + 10 | 2 | 2 + | 3 | 3 + | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between unbounded preceding and 2 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 35 | 0 | 0 + 35 | 8 | 0 + 35 | 4 | 0 + 45 | 5 | 1 + 45 | 9 | 1 + 45 | 1 | 1 + 45 | 6 | 2 + 45 | 2 | 2 + 45 | 3 | 3 + 45 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 preceding), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 27 | 0 | 0 + 27 | 8 | 0 + 27 | 4 | 0 + 35 | 5 | 1 + 35 | 9 | 1 + 35 | 1 | 1 + 45 | 6 | 2 + 45 | 2 | 2 + 33 | 3 | 3 + 33 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 0 preceding and 0 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 12 | 0 | 0 + 12 | 8 | 0 + 12 | 4 | 0 + 15 | 5 | 1 + 15 | 9 | 1 + 15 | 1 | 1 + 8 | 6 | 2 + 8 | 2 | 2 + 10 | 3 | 3 + 10 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following + exclude current row), unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 27 | 0 | 0 + 19 | 8 | 0 + 23 | 4 | 0 + 30 | 5 | 1 + 26 | 9 | 1 + 34 | 1 | 1 + 39 | 6 | 2 + 43 | 2 | 2 + 30 | 3 | 3 + 26 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following + exclude group), unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 15 | 0 | 0 + 15 | 8 | 0 + 15 | 4 | 0 + 20 | 5 | 1 + 20 | 9 | 1 + 20 | 1 | 1 + 37 | 6 | 2 + 37 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following + exclude ties), unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 15 | 0 | 0 + 23 | 8 | 0 + 19 | 4 | 0 + 25 | 5 | 1 + 29 | 9 | 1 + 21 | 1 | 1 + 43 | 6 | 2 + 39 | 2 | 2 + 26 | 3 | 3 + 30 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (partition by ten + order by four groups between 0 preceding and 0 following),unique1, four, ten +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four | ten +-----+---------+------+----- + 0 | 0 | 0 | 0 + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 + 4 | 4 | 0 | 4 + 5 | 5 | 1 | 5 + 6 | 6 | 2 | 6 + 7 | 7 | 3 | 7 + 8 | 8 | 0 | 8 + 9 | 9 | 1 | 9 +(10 rows) + +SELECT sum(unique1) over (partition by ten + order by four groups between 0 preceding and 0 following exclude current row), unique1, four, ten +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four | ten +-----+---------+------+----- + | 0 | 0 | 0 + | 1 | 1 | 1 + | 2 | 2 | 2 + | 3 | 3 | 3 + | 4 | 0 | 4 + | 5 | 1 | 5 + | 6 | 2 | 6 + | 7 | 3 | 7 + | 8 | 0 | 8 + | 9 | 1 | 9 +(10 rows) + +SELECT sum(unique1) over (partition by ten + order by four groups between 0 preceding and 0 following exclude group), unique1, four, ten +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four | ten +-----+---------+------+----- + | 0 | 0 | 0 + | 1 | 1 | 1 + | 2 | 2 | 2 + | 3 | 3 | 3 + | 4 | 0 | 4 + | 5 | 1 | 5 + | 6 | 2 | 6 + | 7 | 3 | 7 + | 8 | 0 | 8 + | 9 | 1 | 9 +(10 rows) + +SELECT sum(unique1) over (partition by ten + order by four groups between 0 preceding and 0 following exclude ties), unique1, four, ten +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four | ten +-----+---------+------+----- + 0 | 0 | 0 | 0 + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 + 4 | 4 | 0 | 4 + 5 | 5 | 1 | 5 + 6 | 6 | 2 | 6 + 7 | 7 | 3 | 7 + 8 | 8 | 0 | 8 + 9 | 9 | 1 | 9 +(10 rows) + +select first_value(salary) over(order by enroll_date groups between 1 preceding and 1 following), + lead(salary) over(order by enroll_date groups between 1 preceding and 1 following), + nth_value(salary, 1) over(order by enroll_date groups between 1 preceding and 1 following), + salary, enroll_date from empsalary; + first_value | lead | nth_value | salary | enroll_date +-------------+------+-----------+--------+------------- + 5000 | 6000 | 5000 | 5000 | 10-01-2006 + 5000 | 3900 | 5000 | 6000 | 10-01-2006 + 5000 | 4800 | 5000 | 3900 | 12-23-2006 + 3900 | 5200 | 3900 | 4800 | 08-01-2007 + 3900 | 4800 | 3900 | 5200 | 08-01-2007 + 4800 | 5200 | 4800 | 4800 | 08-08-2007 + 4800 | 3500 | 4800 | 5200 | 08-15-2007 + 5200 | 4500 | 5200 | 3500 | 12-10-2007 + 3500 | 4200 | 3500 | 4500 | 01-01-2008 + 3500 | | 3500 | 4200 | 01-01-2008 +(10 rows) + +select last_value(salary) over(order by enroll_date groups between 1 preceding and 1 following), + lag(salary) over(order by enroll_date groups between 1 preceding and 1 following), + salary, enroll_date from empsalary; + last_value | lag | salary | enroll_date +------------+------+--------+------------- + 3900 | | 5000 | 10-01-2006 + 3900 | 5000 | 6000 | 10-01-2006 + 5200 | 6000 | 3900 | 12-23-2006 + 4800 | 3900 | 4800 | 08-01-2007 + 4800 | 4800 | 5200 | 08-01-2007 + 5200 | 5200 | 4800 | 08-08-2007 + 3500 | 4800 | 5200 | 08-15-2007 + 4200 | 5200 | 3500 | 12-10-2007 + 4200 | 3500 | 4500 | 01-01-2008 + 4200 | 4500 | 4200 | 01-01-2008 +(10 rows) + +select first_value(salary) over(order by enroll_date groups between 1 following and 3 following + exclude current row), + lead(salary) over(order by enroll_date groups between 1 following and 3 following exclude ties), + nth_value(salary, 1) over(order by enroll_date groups between 1 following and 3 following + exclude ties), + salary, enroll_date from empsalary; + first_value | lead | nth_value | salary | enroll_date +-------------+------+-----------+--------+------------- + 3900 | 6000 | 3900 | 5000 | 10-01-2006 + 3900 | 3900 | 3900 | 6000 | 10-01-2006 + 4800 | 4800 | 4800 | 3900 | 12-23-2006 + 4800 | 5200 | 4800 | 4800 | 08-01-2007 + 4800 | 4800 | 4800 | 5200 | 08-01-2007 + 5200 | 5200 | 5200 | 4800 | 08-08-2007 + 3500 | 3500 | 3500 | 5200 | 08-15-2007 + 4500 | 4500 | 4500 | 3500 | 12-10-2007 + | 4200 | | 4500 | 01-01-2008 + | | | 4200 | 01-01-2008 +(10 rows) + +select last_value(salary) over(order by enroll_date groups between 1 following and 3 following + exclude group), + lag(salary) over(order by enroll_date groups between 1 following and 3 following exclude group), + salary, enroll_date from empsalary; + last_value | lag | salary | enroll_date +------------+------+--------+------------- + 4800 | | 5000 | 10-01-2006 + 4800 | 5000 | 6000 | 10-01-2006 + 5200 | 6000 | 3900 | 12-23-2006 + 3500 | 3900 | 4800 | 08-01-2007 + 3500 | 4800 | 5200 | 08-01-2007 + 4200 | 5200 | 4800 | 08-08-2007 + 4200 | 4800 | 5200 | 08-15-2007 + 4200 | 5200 | 3500 | 12-10-2007 + | 3500 | 4500 | 01-01-2008 + | 4500 | 4200 | 01-01-2008 +(10 rows) + +-- Show differences in offset interpretation between ROWS, RANGE, and GROUPS +WITH cte (x) AS ( + SELECT * FROM generate_series(1, 35, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following); + x | sum +----+----- + 1 | 4 + 3 | 9 + 5 | 15 + 7 | 21 + 9 | 27 + 11 | 33 + 13 | 39 + 15 | 45 + 17 | 51 + 19 | 57 + 21 | 63 + 23 | 69 + 25 | 75 + 27 | 81 + 29 | 87 + 31 | 93 + 33 | 99 + 35 | 68 +(18 rows) + +WITH cte (x) AS ( + SELECT * FROM generate_series(1, 35, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x range between 1 preceding and 1 following); + x | sum +----+----- + 1 | 1 + 3 | 3 + 5 | 5 + 7 | 7 + 9 | 9 + 11 | 11 + 13 | 13 + 15 | 15 + 17 | 17 + 19 | 19 + 21 | 21 + 23 | 23 + 25 | 25 + 27 | 27 + 29 | 29 + 31 | 31 + 33 | 33 + 35 | 35 +(18 rows) + +WITH cte (x) AS ( + SELECT * FROM generate_series(1, 35, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following); + x | sum +----+----- + 1 | 4 + 3 | 9 + 5 | 15 + 7 | 21 + 9 | 27 + 11 | 33 + 13 | 39 + 15 | 45 + 17 | 51 + 19 | 57 + 21 | 63 + 23 | 69 + 25 | 75 + 27 | 81 + 29 | 87 + 31 | 93 + 33 | 99 + 35 | 68 +(18 rows) + +WITH cte (x) AS ( + select 1 union all select 1 union all select 1 union all + SELECT * FROM generate_series(5, 49, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following); + x | sum +----+----- + 1 | 2 + 1 | 3 + 1 | 7 + 5 | 13 + 7 | 21 + 9 | 27 + 11 | 33 + 13 | 39 + 15 | 45 + 17 | 51 + 19 | 57 + 21 | 63 + 23 | 69 + 25 | 75 + 27 | 81 + 29 | 87 + 31 | 93 + 33 | 99 + 35 | 105 + 37 | 111 + 39 | 117 + 41 | 123 + 43 | 129 + 45 | 135 + 47 | 141 + 49 | 96 +(26 rows) + +WITH cte (x) AS ( + select 1 union all select 1 union all select 1 union all + SELECT * FROM generate_series(5, 49, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x range between 1 preceding and 1 following); + x | sum +----+----- + 1 | 3 + 1 | 3 + 1 | 3 + 5 | 5 + 7 | 7 + 9 | 9 + 11 | 11 + 13 | 13 + 15 | 15 + 17 | 17 + 19 | 19 + 21 | 21 + 23 | 23 + 25 | 25 + 27 | 27 + 29 | 29 + 31 | 31 + 33 | 33 + 35 | 35 + 37 | 37 + 39 | 39 + 41 | 41 + 43 | 43 + 45 | 45 + 47 | 47 + 49 | 49 +(26 rows) + +WITH cte (x) AS ( + select 1 union all select 1 union all select 1 union all + SELECT * FROM generate_series(5, 49, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following); + x | sum +----+----- + 1 | 8 + 1 | 8 + 1 | 8 + 5 | 15 + 7 | 21 + 9 | 27 + 11 | 33 + 13 | 39 + 15 | 45 + 17 | 51 + 19 | 57 + 21 | 63 + 23 | 69 + 25 | 75 + 27 | 81 + 29 | 87 + 31 | 93 + 33 | 99 + 35 | 105 + 37 | 111 + 39 | 117 + 41 | 123 + 43 | 129 + 45 | 135 + 47 | 141 + 49 | 96 +(26 rows) + +-- with UNION +SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk2)s LIMIT 0; + count +------- +(0 rows) + +-- check some degenerate cases +create temp table t1 (f1 int, f2 int8); +insert into t1 values (1,1),(1,2),(2,2); +select f1, sum(f1) over (partition by f1 + range between 1 preceding and 1 following) +from t1 where f1 = f2; -- error, must have order by +ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column +LINE 1: select f1, sum(f1) over (partition by f1 + ^ +explain (costs off) +select f1, sum(f1) over (partition by f1 order by f2 + range between 1 preceding and 1 following) +from t1 where f1 = f2; + QUERY PLAN +--------------------------------- + WindowAgg + -> Sort + Sort Key: f1 + -> Seq Scan on t1 + Filter: (f1 = f2) +(5 rows) + +select f1, sum(f1) over (partition by f1 order by f2 + range between 1 preceding and 1 following) +from t1 where f1 = f2; + f1 | sum +----+----- + 1 | 1 + 2 | 2 +(2 rows) + +select f1, sum(f1) over (partition by f1, f1 order by f2 + range between 2 preceding and 1 preceding) +from t1 where f1 = f2; + f1 | sum +----+----- + 1 | + 2 | +(2 rows) + +select f1, sum(f1) over (partition by f1, f2 order by f2 + range between 1 following and 2 following) +from t1 where f1 = f2; + f1 | sum +----+----- + 1 | + 2 | +(2 rows) + +select f1, sum(f1) over (partition by f1 + groups between 1 preceding and 1 following) +from t1 where f1 = f2; -- error, must have order by +ERROR: GROUPS mode requires an ORDER BY clause +LINE 1: select f1, sum(f1) over (partition by f1 + ^ +explain (costs off) +select f1, sum(f1) over (partition by f1 order by f2 + groups between 1 preceding and 1 following) +from t1 where f1 = f2; + QUERY PLAN +--------------------------------- + WindowAgg + -> Sort + Sort Key: f1 + -> Seq Scan on t1 + Filter: (f1 = f2) +(5 rows) + +select f1, sum(f1) over (partition by f1 order by f2 + groups between 1 preceding and 1 following) +from t1 where f1 = f2; + f1 | sum +----+----- + 1 | 1 + 2 | 2 +(2 rows) + +select f1, sum(f1) over (partition by f1, f1 order by f2 + groups between 2 preceding and 1 preceding) +from t1 where f1 = f2; + f1 | sum +----+----- + 1 | + 2 | +(2 rows) + +select f1, sum(f1) over (partition by f1, f2 order by f2 + groups between 1 following and 2 following) +from t1 where f1 = f2; + f1 | sum +----+----- + 1 | + 2 | +(2 rows) + +-- ordering by a non-integer constant is allowed +SELECT rank() OVER (ORDER BY length('abc')); + rank +------ + 1 +(1 row) + +-- can't order by another window function +SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random())); +ERROR: window functions are not allowed in window definitions +LINE 1: SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random())... + ^ +-- some other errors +SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10; +ERROR: window functions are not allowed in WHERE +LINE 1: SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY sa... + ^ +SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVER (ORDER BY salary) < 10; +ERROR: window functions are not allowed in JOIN conditions +LINE 1: SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVE... + ^ +SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY 1; +ERROR: window functions are not allowed in GROUP BY +LINE 1: SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GRO... + ^ +SELECT * FROM rank() OVER (ORDER BY random()); +ERROR: syntax error at or near "ORDER" +LINE 1: SELECT * FROM rank() OVER (ORDER BY random()); + ^ +DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())) > 10; +ERROR: window functions are not allowed in WHERE +LINE 1: DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())... + ^ +DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random()); +ERROR: window functions are not allowed in RETURNING +LINE 1: DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random... + ^ +SELECT count(*) OVER w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY unique1); +ERROR: window "w" is already defined +LINE 1: ...w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY ... + ^ +SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1; +ERROR: syntax error at or near "ORDER" +LINE 1: SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM te... + ^ +SELECT count() OVER () FROM tenk1; +ERROR: count(*) must be used to call a parameterless aggregate function +LINE 1: SELECT count() OVER () FROM tenk1; + ^ +SELECT generate_series(1, 100) OVER () FROM empsalary; +ERROR: OVER specified, but generate_series is not a window function nor an aggregate function +LINE 1: SELECT generate_series(1, 100) OVER () FROM empsalary; + ^ +SELECT ntile(0) OVER (ORDER BY ten), ten, four FROM tenk1; +ERROR: argument of ntile must be greater than zero +SELECT nth_value(four, 0) OVER (ORDER BY ten), ten, four FROM tenk1; +ERROR: argument of nth_value must be greater than zero +-- filter +SELECT sum(salary), row_number() OVER (ORDER BY depname), sum( + sum(salary) FILTER (WHERE enroll_date > '2007-01-01') +) FILTER (WHERE depname <> 'sales') OVER (ORDER BY depname DESC) AS "filtered_sum", + depname +FROM empsalary GROUP BY depname; + sum | row_number | filtered_sum | depname +-------+------------+--------------+----------- + 25100 | 1 | 22600 | develop + 7400 | 2 | 3500 | personnel + 14600 | 3 | | sales +(3 rows) + +-- Test pushdown of quals into a subquery containing window functions +-- pushdown is safe because all PARTITION BY clauses include depname: +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT depname, + sum(salary) OVER (PARTITION BY depname) depsalary, + min(salary) OVER (PARTITION BY depname || 'A', depname) depminsalary + FROM empsalary) emp +WHERE depname = 'sales'; + QUERY PLAN +-------------------------------------------------------------------------- + Subquery Scan on emp + -> WindowAgg + -> WindowAgg + -> Sort + Sort Key: (((empsalary.depname)::text || 'A'::text)) + -> Seq Scan on empsalary + Filter: ((depname)::text = 'sales'::text) +(7 rows) + +-- pushdown is unsafe because there's a PARTITION BY clause without depname: +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT depname, + sum(salary) OVER (PARTITION BY enroll_date) enroll_salary, + min(salary) OVER (PARTITION BY depname) depminsalary + FROM empsalary) emp +WHERE depname = 'sales'; + QUERY PLAN +------------------------------------------------------- + Subquery Scan on emp + Filter: ((emp.depname)::text = 'sales'::text) + -> WindowAgg + -> Sort + Sort Key: empsalary.enroll_date + -> WindowAgg + -> Sort + Sort Key: empsalary.depname + -> Seq Scan on empsalary +(9 rows) + +-- Test Sort node collapsing +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT depname, + sum(salary) OVER (PARTITION BY depname order by empno) depsalary, + min(salary) OVER (PARTITION BY depname, empno order by enroll_date) depminsalary + FROM empsalary) emp +WHERE depname = 'sales'; + QUERY PLAN +---------------------------------------------------------------------- + Subquery Scan on emp + -> WindowAgg + -> WindowAgg + -> Sort + Sort Key: empsalary.empno, empsalary.enroll_date + -> Seq Scan on empsalary + Filter: ((depname)::text = 'sales'::text) +(7 rows) + +-- Test Sort node reordering +EXPLAIN (COSTS OFF) +SELECT + lead(1) OVER (PARTITION BY depname ORDER BY salary, enroll_date), + lag(1) OVER (PARTITION BY depname ORDER BY salary,enroll_date,empno) +FROM empsalary; + QUERY PLAN +------------------------------------------------------------- + WindowAgg + -> WindowAgg + -> Sort + Sort Key: depname, salary, enroll_date, empno + -> Seq Scan on empsalary +(5 rows) -- cleanup DROP TABLE empsalary; diff --git a/src/test/regress/expected/with.out b/src/test/regress/expected/with.out index fdcc4970a1..2a2085556b 100644 --- a/src/test/regress/expected/with.out +++ b/src/test/regress/expected/with.out @@ -166,7 +166,7 @@ SELECT n, n IS OF (int) AS is_int FROM t; ERROR: operator does not exist: text + integer LINE 4: SELECT n+1 FROM t WHERE n < 10 ^ -HINT: No operator matches the given name and argument type(s). You might need to add explicit type casts. +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. -- -- Some examples with a tree -- @@ -1819,12 +1819,12 @@ SELECT * FROM y; (22 rows) -- data-modifying WITH containing INSERT...ON CONFLICT DO UPDATE -CREATE TABLE z AS SELECT i AS k, (i || ' v')::text v FROM generate_series(1, 16, 3) i; -ALTER TABLE z ADD UNIQUE (k); +CREATE TABLE withz AS SELECT i AS k, (i || ' v')::text v FROM generate_series(1, 16, 3) i; +ALTER TABLE withz ADD UNIQUE (k); WITH t AS ( - INSERT INTO z SELECT i, 'insert' + INSERT INTO withz SELECT i, 'insert' FROM generate_series(0, 16) i - ON CONFLICT (k) DO UPDATE SET v = z.v || ', now update' + ON CONFLICT (k) DO UPDATE SET v = withz.v || ', now update' RETURNING * ) SELECT * FROM t JOIN y ON t.k = y.a ORDER BY a, k; @@ -1836,8 +1836,8 @@ SELECT * FROM t JOIN y ON t.k = y.a ORDER BY a, k; -- Test EXCLUDED.* reference within CTE WITH aa AS ( - INSERT INTO z VALUES(1, 5) ON CONFLICT (k) DO UPDATE SET v = EXCLUDED.v - WHERE z.k != EXCLUDED.k + INSERT INTO withz VALUES(1, 5) ON CONFLICT (k) DO UPDATE SET v = EXCLUDED.v + WHERE withz.k != EXCLUDED.k RETURNING * ) SELECT * FROM aa; @@ -1846,7 +1846,7 @@ SELECT * FROM aa; (0 rows) -- New query/snapshot demonstrates side-effects of previous query. -SELECT * FROM z ORDER BY k; +SELECT * FROM withz ORDER BY k; k | v ----+------------------ 0 | insert @@ -1873,19 +1873,19 @@ SELECT * FROM z ORDER BY k; -- reference outside values -- WITH aa AS (SELECT 1 a, 2 b) -INSERT INTO z VALUES(1, 'insert') +INSERT INTO withz VALUES(1, 'insert') ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1); WITH aa AS (SELECT 1 a, 2 b) -INSERT INTO z VALUES(1, 'insert') -ON CONFLICT (k) DO UPDATE SET v = ' update' WHERE z.k = (SELECT a FROM aa); +INSERT INTO withz VALUES(1, 'insert') +ON CONFLICT (k) DO UPDATE SET v = ' update' WHERE withz.k = (SELECT a FROM aa); WITH aa AS (SELECT 1 a, 2 b) -INSERT INTO z VALUES(1, 'insert') +INSERT INTO withz VALUES(1, 'insert') ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1); WITH aa AS (SELECT 'a' a, 'b' b UNION ALL SELECT 'a' a, 'b' b) -INSERT INTO z VALUES(1, 'insert') +INSERT INTO withz VALUES(1, 'insert') ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 'a' LIMIT 1); WITH aa AS (SELECT 1 a, 2 b) -INSERT INTO z VALUES(1, (SELECT b || ' insert' FROM aa WHERE a = 1 )) +INSERT INTO withz VALUES(1, (SELECT b || ' insert' FROM aa WHERE a = 1 )) ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1); -- Update a row more than once, in different parts of a wCTE. That is -- an allowed, presumably very rare, edge case, but since it was @@ -1893,17 +1893,17 @@ ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIM WITH simpletup AS ( SELECT 2 k, 'Green' v), upsert_cte AS ( - INSERT INTO z VALUES(2, 'Blue') ON CONFLICT (k) DO - UPDATE SET (k, v) = (SELECT k, v FROM simpletup WHERE simpletup.k = z.k) + INSERT INTO withz VALUES(2, 'Blue') ON CONFLICT (k) DO + UPDATE SET (k, v) = (SELECT k, v FROM simpletup WHERE simpletup.k = withz.k) RETURNING k, v) -INSERT INTO z VALUES(2, 'Red') ON CONFLICT (k) DO -UPDATE SET (k, v) = (SELECT k, v FROM upsert_cte WHERE upsert_cte.k = z.k) +INSERT INTO withz VALUES(2, 'Red') ON CONFLICT (k) DO +UPDATE SET (k, v) = (SELECT k, v FROM upsert_cte WHERE upsert_cte.k = withz.k) RETURNING k, v; k | v ---+--- (0 rows) -DROP TABLE z; +DROP TABLE withz; -- check that run to completion happens in proper ordering TRUNCATE TABLE y; INSERT INTO y SELECT generate_series(1, 3); @@ -2273,5 +2273,19 @@ with ordinality as (select 1 as x) select * from ordinality; (1 row) -- check sane response to attempt to modify CTE relation -WITH d AS (SELECT 42) INSERT INTO d VALUES (1); -ERROR: relation "d" cannot be the target of a modifying statement +WITH test AS (SELECT 42) INSERT INTO test VALUES (1); +ERROR: relation "test" does not exist +LINE 1: WITH test AS (SELECT 42) INSERT INTO test VALUES (1); + ^ +-- check response to attempt to modify table with same name as a CTE (perhaps +-- surprisingly it works, because CTEs don't hide tables from data-modifying +-- statements) +create temp table test (i int); +with test as (select 42) insert into test select * from test; +select * from test; + i +---- + 42 +(1 row) + +drop table test; diff --git a/src/test/regress/expected/write_parallel.out b/src/test/regress/expected/write_parallel.out new file mode 100644 index 0000000000..0c4da2591a --- /dev/null +++ b/src/test/regress/expected/write_parallel.out @@ -0,0 +1,79 @@ +-- +-- PARALLEL +-- +-- Serializable isolation would disable parallel query, so explicitly use an +-- arbitrary other level. +begin isolation level repeatable read; +-- encourage use of parallel plans +set parallel_setup_cost=0; +set parallel_tuple_cost=0; +set min_parallel_table_scan_size=0; +set max_parallel_workers_per_gather=4; +-- +-- Test write operations that has an underlying query that is eligble +-- for parallel plans +-- +explain (costs off) create table parallel_write as + select length(stringu1) from tenk1 group by length(stringu1); + QUERY PLAN +--------------------------------------------------- + Finalize HashAggregate + Group Key: (length((stringu1)::text)) + -> Gather + Workers Planned: 4 + -> Partial HashAggregate + Group Key: length((stringu1)::text) + -> Parallel Seq Scan on tenk1 +(7 rows) + +create table parallel_write as + select length(stringu1) from tenk1 group by length(stringu1); +drop table parallel_write; +explain (costs off) select length(stringu1) into parallel_write + from tenk1 group by length(stringu1); + QUERY PLAN +--------------------------------------------------- + Finalize HashAggregate + Group Key: (length((stringu1)::text)) + -> Gather + Workers Planned: 4 + -> Partial HashAggregate + Group Key: length((stringu1)::text) + -> Parallel Seq Scan on tenk1 +(7 rows) + +select length(stringu1) into parallel_write + from tenk1 group by length(stringu1); +drop table parallel_write; +explain (costs off) create materialized view parallel_mat_view as + select length(stringu1) from tenk1 group by length(stringu1); + QUERY PLAN +--------------------------------------------------- + Finalize HashAggregate + Group Key: (length((stringu1)::text)) + -> Gather + Workers Planned: 4 + -> Partial HashAggregate + Group Key: length((stringu1)::text) + -> Parallel Seq Scan on tenk1 +(7 rows) + +create materialized view parallel_mat_view as + select length(stringu1) from tenk1 group by length(stringu1); +drop materialized view parallel_mat_view; +prepare prep_stmt as select length(stringu1) from tenk1 group by length(stringu1); +explain (costs off) create table parallel_write as execute prep_stmt; + QUERY PLAN +--------------------------------------------------- + Finalize HashAggregate + Group Key: (length((stringu1)::text)) + -> Gather + Workers Planned: 4 + -> Partial HashAggregate + Group Key: length((stringu1)::text) + -> Parallel Seq Scan on tenk1 +(7 rows) + +create table parallel_write as execute prep_stmt; +drop table parallel_write; +rollback; diff --git a/src/test/regress/expected/xml.out b/src/test/regress/expected/xml.out index bcc585d427..6e1f885112 100644 --- a/src/test/regress/expected/xml.out +++ b/src/test/regress/expected/xml.out @@ -670,6 +670,47 @@ SELECT xpath('/nosuchtag', ''); {} (1 row) +SELECT xpath('root', ''); + xpath +----------- + {} +(1 row) + +-- Round-trip non-ASCII data through xpath(). +DO $$ +DECLARE + xml_declaration text := ''; + degree_symbol text; + res xml[]; +BEGIN + -- Per the documentation, except when the server encoding is UTF8, xpath() + -- may not work on non-ASCII data. The untranslatable_character and + -- undefined_function traps below, currently dead code, will become relevant + -- if we remove this limitation. + IF current_setting('server_encoding') <> 'UTF8' THEN + RAISE LOG 'skip: encoding % unsupported for xpath', + current_setting('server_encoding'); + RETURN; + END IF; + + degree_symbol := convert_from('\xc2b0', 'UTF8'); + res := xpath('text()', (xml_declaration || + '' || degree_symbol || '')::xml); + IF degree_symbol <> res[1]::text THEN + RAISE 'expected % (%), got % (%)', + degree_symbol, convert_to(degree_symbol, 'UTF8'), + res[1], convert_to(res[1]::text, 'UTF8'); + END IF; +EXCEPTION + -- character with byte sequence 0xc2 0xb0 in encoding "UTF8" has no equivalent in encoding "LATIN8" + WHEN untranslatable_character + -- default conversion function for encoding "UTF8" to "MULE_INTERNAL" does not exist + OR undefined_function + -- unsupported XML feature + OR feature_not_supported THEN + RAISE LOG 'skip: %', SQLERRM; +END +$$; -- Test xmlexists and xpath_exists SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol'); xmlexists @@ -989,7 +1030,7 @@ SELECT xmltable.* PASSING data COLUMNS id int PATH '@id', _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, country_id text PATH 'COUNTRY_ID', region_id int PATH 'REGION_ID', size float PATH 'SIZE', @@ -1011,7 +1052,7 @@ CREATE VIEW xmltableview1 AS SELECT xmltable.* PASSING data COLUMNS id int PATH '@id', _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, country_id text PATH 'COUNTRY_ID', region_id int PATH 'REGION_ID', size float PATH 'SIZE', @@ -1040,7 +1081,7 @@ CREATE OR REPLACE VIEW public.xmltableview1 AS "xmltable".premier_name FROM ( SELECT xmldata.data FROM xmldata) x, - LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) + LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1; QUERY PLAN ----------------------------------------- @@ -1050,15 +1091,15 @@ EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1; (3 rows) EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Nested Loop Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name -> Seq Scan on public.xmldata Output: xmldata.data -> Table Function Scan on "xmltable" Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name - Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) + Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) (7 rows) -- XMLNAMESPACES tests @@ -1177,7 +1218,7 @@ SELECT * FROM xmltable('/root' passing 'a1aa2aa1aa2a bbbbxxxcccc' COLUMNS element text PATH 'element/text()'); -- should fail ERROR: more than one value returned by column XPath expression -- CDATA test -select * from xmltable('r' passing ' &"<>!foo]]>2' columns c text); +select * from xmltable('d/r' passing ' &"<>!foo]]>2' columns c text); c ------------------------- &"<>!foo diff --git a/src/test/regress/expected/xml_1.out b/src/test/regress/expected/xml_1.out index d3bd8c91d7..0eba424346 100644 --- a/src/test/regress/expected/xml_1.out +++ b/src/test/regress/expected/xml_1.out @@ -576,6 +576,47 @@ LINE 1: SELECT xpath('/nosuchtag', ''); ^ DETAIL: This functionality requires the server to be built with libxml support. HINT: You need to rebuild PostgreSQL using --with-libxml. +SELECT xpath('root', ''); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('root', ''); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +HINT: You need to rebuild PostgreSQL using --with-libxml. +-- Round-trip non-ASCII data through xpath(). +DO $$ +DECLARE + xml_declaration text := ''; + degree_symbol text; + res xml[]; +BEGIN + -- Per the documentation, except when the server encoding is UTF8, xpath() + -- may not work on non-ASCII data. The untranslatable_character and + -- undefined_function traps below, currently dead code, will become relevant + -- if we remove this limitation. + IF current_setting('server_encoding') <> 'UTF8' THEN + RAISE LOG 'skip: encoding % unsupported for xpath', + current_setting('server_encoding'); + RETURN; + END IF; + + degree_symbol := convert_from('\xc2b0', 'UTF8'); + res := xpath('text()', (xml_declaration || + '' || degree_symbol || '')::xml); + IF degree_symbol <> res[1]::text THEN + RAISE 'expected % (%), got % (%)', + degree_symbol, convert_to(degree_symbol, 'UTF8'), + res[1], convert_to(res[1]::text, 'UTF8'); + END IF; +EXCEPTION + -- character with byte sequence 0xc2 0xb0 in encoding "UTF8" has no equivalent in encoding "LATIN8" + WHEN untranslatable_character + -- default conversion function for encoding "UTF8" to "MULE_INTERNAL" does not exist + OR undefined_function + -- unsupported XML feature + OR feature_not_supported THEN + RAISE LOG 'skip: %', SQLERRM; +END +$$; -- Test xmlexists and xpath_exists SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol'); ERROR: unsupported XML feature @@ -873,7 +914,7 @@ SELECT xmltable.* PASSING data COLUMNS id int PATH '@id', _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, country_id text PATH 'COUNTRY_ID', region_id int PATH 'REGION_ID', size float PATH 'SIZE', @@ -889,7 +930,7 @@ CREATE VIEW xmltableview1 AS SELECT xmltable.* PASSING data COLUMNS id int PATH '@id', _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, country_id text PATH 'COUNTRY_ID', region_id int PATH 'REGION_ID', size float PATH 'SIZE', @@ -912,7 +953,7 @@ CREATE OR REPLACE VIEW public.xmltableview1 AS "xmltable".premier_name FROM ( SELECT xmldata.data FROM xmldata) x, - LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) + LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1; QUERY PLAN ----------------------------------------- @@ -922,15 +963,15 @@ EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1; (3 rows) EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Nested Loop Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name -> Seq Scan on public.xmldata Output: xmldata.data -> Table Function Scan on "xmltable" Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name - Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) + Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) (7 rows) -- XMLNAMESPACES tests @@ -1032,10 +1073,10 @@ LINE 1: SELECT * FROM xmltable('/root' passing 'a1a &"<>!foo]]>2' columns c text); +select * from xmltable('d/r' passing ' &"<>!foo]]>2' columns c text); ERROR: unsupported XML feature -LINE 1: select * from xmltable('r' passing ''); {} (1 row) +SELECT xpath('root', ''); + xpath +----------- + {} +(1 row) + +-- Round-trip non-ASCII data through xpath(). +DO $$ +DECLARE + xml_declaration text := ''; + degree_symbol text; + res xml[]; +BEGIN + -- Per the documentation, except when the server encoding is UTF8, xpath() + -- may not work on non-ASCII data. The untranslatable_character and + -- undefined_function traps below, currently dead code, will become relevant + -- if we remove this limitation. + IF current_setting('server_encoding') <> 'UTF8' THEN + RAISE LOG 'skip: encoding % unsupported for xpath', + current_setting('server_encoding'); + RETURN; + END IF; + + degree_symbol := convert_from('\xc2b0', 'UTF8'); + res := xpath('text()', (xml_declaration || + '' || degree_symbol || '')::xml); + IF degree_symbol <> res[1]::text THEN + RAISE 'expected % (%), got % (%)', + degree_symbol, convert_to(degree_symbol, 'UTF8'), + res[1], convert_to(res[1]::text, 'UTF8'); + END IF; +EXCEPTION + -- character with byte sequence 0xc2 0xb0 in encoding "UTF8" has no equivalent in encoding "LATIN8" + WHEN untranslatable_character + -- default conversion function for encoding "UTF8" to "MULE_INTERNAL" does not exist + OR undefined_function + -- unsupported XML feature + OR feature_not_supported THEN + RAISE LOG 'skip: %', SQLERRM; +END +$$; -- Test xmlexists and xpath_exists SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol'); xmlexists @@ -969,7 +1010,7 @@ SELECT xmltable.* PASSING data COLUMNS id int PATH '@id', _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, country_id text PATH 'COUNTRY_ID', region_id int PATH 'REGION_ID', size float PATH 'SIZE', @@ -991,7 +1032,7 @@ CREATE VIEW xmltableview1 AS SELECT xmltable.* PASSING data COLUMNS id int PATH '@id', _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, country_id text PATH 'COUNTRY_ID', region_id int PATH 'REGION_ID', size float PATH 'SIZE', @@ -1020,7 +1061,7 @@ CREATE OR REPLACE VIEW public.xmltableview1 AS "xmltable".premier_name FROM ( SELECT xmldata.data FROM xmldata) x, - LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) + LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1; QUERY PLAN ----------------------------------------- @@ -1030,15 +1071,15 @@ EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1; (3 rows) EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Nested Loop Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name -> Seq Scan on public.xmldata Output: xmldata.data -> Table Function Scan on "xmltable" Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name - Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) + Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) (7 rows) -- XMLNAMESPACES tests @@ -1157,7 +1198,7 @@ SELECT * FROM xmltable('/root' passing 'a1aa2aa1aa2a bbbbxxxcccc' COLUMNS element text PATH 'element/text()'); -- should fail ERROR: more than one value returned by column XPath expression -- CDATA test -select * from xmltable('r' passing ' &"<>!foo]]>2' columns c text); +select * from xmltable('d/r' passing ' &"<>!foo]]>2' columns c text); c ------------------------- &"<>!foo diff --git a/src/test/regress/input/constraints.source b/src/test/regress/input/constraints.source index dbab8f159b..98dd4210e9 100644 --- a/src/test/regress/input/constraints.source +++ b/src/test/regress/input/constraints.source @@ -394,6 +394,22 @@ SET CONSTRAINTS ALL IMMEDIATE; -- should fail COMMIT; +-- test deferrable UNIQUE with a partitioned table +CREATE TABLE parted_uniq_tbl (i int UNIQUE DEFERRABLE) partition by range (i); +CREATE TABLE parted_uniq_tbl_1 PARTITION OF parted_uniq_tbl FOR VALUES FROM (0) TO (10); +CREATE TABLE parted_uniq_tbl_2 PARTITION OF parted_uniq_tbl FOR VALUES FROM (20) TO (30); +SELECT conname, conrelid::regclass FROM pg_constraint + WHERE conname LIKE 'parted_uniq%' ORDER BY conname; +BEGIN; +INSERT INTO parted_uniq_tbl VALUES (1); +SAVEPOINT f; +INSERT INTO parted_uniq_tbl VALUES (1); -- unique violation +ROLLBACK TO f; +SET CONSTRAINTS parted_uniq_tbl_i_key DEFERRED; +INSERT INTO parted_uniq_tbl VALUES (1); -- OK now, fail at commit +COMMIT; +DROP TABLE parted_uniq_tbl; + -- test a HOT update that invalidates the conflicting tuple. -- the trigger should still fire and catch the violation diff --git a/src/test/regress/input/copy.source b/src/test/regress/input/copy.source index cb13606d14..4cb03c566f 100644 --- a/src/test/regress/input/copy.source +++ b/src/test/regress/input/copy.source @@ -133,3 +133,52 @@ this is just a line full of junk that would error out if parsed \. copy copytest3 to stdout csv header; + +-- test copy from with a partitioned table +create table parted_copytest ( + a int, + b int, + c text +) partition by list (b); + +create table parted_copytest_a1 (c text, b int, a int); +create table parted_copytest_a2 (a int, c text, b int); + +alter table parted_copytest attach partition parted_copytest_a1 for values in(1); +alter table parted_copytest attach partition parted_copytest_a2 for values in(2); + +-- We must insert enough rows to trigger multi-inserts. These are only +-- enabled adaptively when there are few enough partition changes. +insert into parted_copytest select x,1,'One' from generate_series(1,1000) x; +insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x; +insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x; + +copy (select * from parted_copytest order by a) to '@abs_builddir@/results/parted_copytest.csv'; + +truncate parted_copytest; + +copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv'; + +select tableoid::regclass,count(*),sum(a) from parted_copytest +group by tableoid order by tableoid::regclass::name; + +truncate parted_copytest; + +-- create before insert row trigger on parted_copytest_a2 +create function part_ins_func() returns trigger language plpgsql as $$ +begin + return new; +end; +$$; + +create trigger part_ins_trig + before insert on parted_copytest_a2 + for each row + execute procedure part_ins_func(); + +copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv'; + +select tableoid::regclass,count(*),sum(a) from parted_copytest +group by tableoid order by tableoid::regclass::name; + +drop table parted_copytest; diff --git a/src/test/regress/input/create_function_1.source b/src/test/regress/input/create_function_1.source index f2b1561cc2..26e2227d3a 100644 --- a/src/test/regress/input/create_function_1.source +++ b/src/test/regress/input/create_function_1.source @@ -37,7 +37,7 @@ CREATE FUNCTION autoinc () AS '@libdir@/autoinc@DLSUFFIX@' LANGUAGE C; -CREATE FUNCTION funny_dup17 () +CREATE FUNCTION trigger_return_old () RETURNS trigger AS '@libdir@/regress@DLSUFFIX@' LANGUAGE C; @@ -62,6 +62,12 @@ CREATE FUNCTION test_atomic_ops() AS '@libdir@/regress@DLSUFFIX@' LANGUAGE C; +-- Tests creating a FDW handler +CREATE FUNCTION test_fdw_handler() + RETURNS fdw_handler + AS '@libdir@/regress@DLSUFFIX@', 'test_fdw_handler' + LANGUAGE C; + -- Things that shouldn't work: CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL diff --git a/src/test/regress/input/create_function_2.source b/src/test/regress/input/create_function_2.source index b167c8ac6d..9e6d2942ec 100644 --- a/src/test/regress/input/create_function_2.source +++ b/src/test/regress/input/create_function_2.source @@ -72,11 +72,6 @@ CREATE FUNCTION overpaid(emp) AS '@libdir@/regress@DLSUFFIX@' LANGUAGE C STRICT; -CREATE FUNCTION boxarea(box) - RETURNS float8 - AS '@libdir@/regress@DLSUFFIX@' - LANGUAGE C STRICT; - CREATE FUNCTION interpt_pp(path, path) RETURNS point AS '@libdir@/regress@DLSUFFIX@' diff --git a/src/test/regress/input/tablespace.source b/src/test/regress/input/tablespace.source index 03a62bd760..60c87261db 100644 --- a/src/test/regress/input/tablespace.source +++ b/src/test/regress/input/tablespace.source @@ -12,10 +12,10 @@ DROP TABLESPACE regress_tblspacewith; CREATE TABLESPACE regress_tblspace LOCATION '@testtablespace@'; -- try setting and resetting some properties for the new tablespace -ALTER TABLESPACE regress_tblspace SET (random_page_cost = 1.0); +ALTER TABLESPACE regress_tblspace SET (random_page_cost = 1.0, seq_page_cost = 1.1); ALTER TABLESPACE regress_tblspace SET (some_nonexistent_parameter = true); -- fail ALTER TABLESPACE regress_tblspace RESET (random_page_cost = 2.0); -- fail -ALTER TABLESPACE regress_tblspace RESET (random_page_cost, seq_page_cost); -- ok +ALTER TABLESPACE regress_tblspace RESET (random_page_cost, effective_io_concurrency); -- ok -- create a schema we can use CREATE SCHEMA testschema; @@ -44,6 +44,14 @@ CREATE INDEX foo_idx on testschema.foo(i) TABLESPACE regress_tblspace; SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c where c.reltablespace = t.oid AND c.relname = 'foo_idx'; +-- partitioned index +CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); +CREATE TABLE testschema.part1 PARTITION OF testschema.part FOR VALUES IN (1); +CREATE INDEX part_a_idx ON testschema.part (a) TABLESPACE regress_tblspace; +CREATE TABLE testschema.part2 PARTITION OF testschema.part FOR VALUES IN (2); +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname LIKE 'part%_idx'; + -- check that default_tablespace doesn't affect ALTER TABLE index rebuilds CREATE TABLE testschema.test_default_tab(id bigint) TABLESPACE regress_tblspace; INSERT INTO testschema.test_default_tab VALUES (1); @@ -93,6 +101,8 @@ CREATE UNIQUE INDEX anindex ON testschema.atable(column1); ALTER TABLE testschema.atable SET TABLESPACE regress_tblspace; ALTER INDEX testschema.anindex SET TABLESPACE regress_tblspace; +ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; +ALTER INDEX testschema.part_a_idx SET TABLESPACE regress_tblspace; INSERT INTO testschema.atable VALUES(3); -- ok INSERT INTO testschema.atable VALUES(1); -- fail (checks index) diff --git a/src/test/regress/output/constraints.source b/src/test/regress/output/constraints.source index bb75165cc2..e8389064b0 100644 --- a/src/test/regress/output/constraints.source +++ b/src/test/regress/output/constraints.source @@ -228,6 +228,8 @@ CREATE TABLE SYS_COL_CHECK_TBL (city text, state text, is_capital bool, altitude int, CHECK (NOT (is_capital AND ctid::text = 'sys_col_check_tbl'))); ERROR: system column "ctid" reference in check constraint is invalid +LINE 3: CHECK (NOT (is_capital AND ctid::text = 'sys_col_check... + ^ -- -- Check inheritance of defaults and constraints -- @@ -547,6 +549,32 @@ SET CONSTRAINTS ALL IMMEDIATE; -- should fail ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" DETAIL: Key (i)=(3) already exists. COMMIT; +-- test deferrable UNIQUE with a partitioned table +CREATE TABLE parted_uniq_tbl (i int UNIQUE DEFERRABLE) partition by range (i); +CREATE TABLE parted_uniq_tbl_1 PARTITION OF parted_uniq_tbl FOR VALUES FROM (0) TO (10); +CREATE TABLE parted_uniq_tbl_2 PARTITION OF parted_uniq_tbl FOR VALUES FROM (20) TO (30); +SELECT conname, conrelid::regclass FROM pg_constraint + WHERE conname LIKE 'parted_uniq%' ORDER BY conname; + conname | conrelid +-------------------------+------------------- + parted_uniq_tbl_1_i_key | parted_uniq_tbl_1 + parted_uniq_tbl_2_i_key | parted_uniq_tbl_2 + parted_uniq_tbl_i_key | parted_uniq_tbl +(3 rows) + +BEGIN; +INSERT INTO parted_uniq_tbl VALUES (1); +SAVEPOINT f; +INSERT INTO parted_uniq_tbl VALUES (1); -- unique violation +ERROR: duplicate key value violates unique constraint "parted_uniq_tbl_1_i_key" +DETAIL: Key (i)=(1) already exists. +ROLLBACK TO f; +SET CONSTRAINTS parted_uniq_tbl_i_key DEFERRED; +INSERT INTO parted_uniq_tbl VALUES (1); -- OK now, fail at commit +COMMIT; +ERROR: duplicate key value violates unique constraint "parted_uniq_tbl_1_i_key" +DETAIL: Key (i)=(1) already exists. +DROP TABLE parted_uniq_tbl; -- test a HOT update that invalidates the conflicting tuple. -- the trigger should still fire and catch the violation BEGIN; diff --git a/src/test/regress/output/copy.source b/src/test/regress/output/copy.source index b7e372d61b..ddd652c712 100644 --- a/src/test/regress/output/copy.source +++ b/src/test/regress/output/copy.source @@ -95,3 +95,50 @@ copy copytest3 to stdout csv header; c1,"col with , comma","col with "" quote" 1,a,1 2,b,2 +-- test copy from with a partitioned table +create table parted_copytest ( + a int, + b int, + c text +) partition by list (b); +create table parted_copytest_a1 (c text, b int, a int); +create table parted_copytest_a2 (a int, c text, b int); +alter table parted_copytest attach partition parted_copytest_a1 for values in(1); +alter table parted_copytest attach partition parted_copytest_a2 for values in(2); +-- We must insert enough rows to trigger multi-inserts. These are only +-- enabled adaptively when there are few enough partition changes. +insert into parted_copytest select x,1,'One' from generate_series(1,1000) x; +insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x; +insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x; +copy (select * from parted_copytest order by a) to '@abs_builddir@/results/parted_copytest.csv'; +truncate parted_copytest; +copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv'; +select tableoid::regclass,count(*),sum(a) from parted_copytest +group by tableoid order by tableoid::regclass::name; + tableoid | count | sum +--------------------+-------+-------- + parted_copytest_a1 | 1010 | 510655 + parted_copytest_a2 | 10 | 10055 +(2 rows) + +truncate parted_copytest; +-- create before insert row trigger on parted_copytest_a2 +create function part_ins_func() returns trigger language plpgsql as $$ +begin + return new; +end; +$$; +create trigger part_ins_trig + before insert on parted_copytest_a2 + for each row + execute procedure part_ins_func(); +copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv'; +select tableoid::regclass,count(*),sum(a) from parted_copytest +group by tableoid order by tableoid::regclass::name; + tableoid | count | sum +--------------------+-------+-------- + parted_copytest_a1 | 1010 | 510655 + parted_copytest_a2 | 10 | 10055 +(2 rows) + +drop table parted_copytest; diff --git a/src/test/regress/output/create_function_1.source b/src/test/regress/output/create_function_1.source index 957595c51e..8c50d9b309 100644 --- a/src/test/regress/output/create_function_1.source +++ b/src/test/regress/output/create_function_1.source @@ -35,7 +35,7 @@ CREATE FUNCTION autoinc () RETURNS trigger AS '@libdir@/autoinc@DLSUFFIX@' LANGUAGE C; -CREATE FUNCTION funny_dup17 () +CREATE FUNCTION trigger_return_old () RETURNS trigger AS '@libdir@/regress@DLSUFFIX@' LANGUAGE C; @@ -55,6 +55,11 @@ CREATE FUNCTION test_atomic_ops() RETURNS bool AS '@libdir@/regress@DLSUFFIX@' LANGUAGE C; +-- Tests creating a FDW handler +CREATE FUNCTION test_fdw_handler() + RETURNS fdw_handler + AS '@libdir@/regress@DLSUFFIX@', 'test_fdw_handler' + LANGUAGE C; -- Things that shouldn't work: CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL AS 'SELECT ''not an integer'';'; diff --git a/src/test/regress/output/create_function_2.source b/src/test/regress/output/create_function_2.source index 8f28bff298..ac9a7f5cf8 100644 --- a/src/test/regress/output/create_function_2.source +++ b/src/test/regress/output/create_function_2.source @@ -55,10 +55,6 @@ CREATE FUNCTION overpaid(emp) RETURNS bool AS '@libdir@/regress@DLSUFFIX@' LANGUAGE C STRICT; -CREATE FUNCTION boxarea(box) - RETURNS float8 - AS '@libdir@/regress@DLSUFFIX@' - LANGUAGE C STRICT; CREATE FUNCTION interpt_pp(path, path) RETURNS point AS '@libdir@/regress@DLSUFFIX@' diff --git a/src/test/regress/output/tablespace.source b/src/test/regress/output/tablespace.source index aaedf5f248..43962e6f01 100644 --- a/src/test/regress/output/tablespace.source +++ b/src/test/regress/output/tablespace.source @@ -14,12 +14,12 @@ DROP TABLESPACE regress_tblspacewith; -- create a tablespace we can use CREATE TABLESPACE regress_tblspace LOCATION '@testtablespace@'; -- try setting and resetting some properties for the new tablespace -ALTER TABLESPACE regress_tblspace SET (random_page_cost = 1.0); +ALTER TABLESPACE regress_tblspace SET (random_page_cost = 1.0, seq_page_cost = 1.1); ALTER TABLESPACE regress_tblspace SET (some_nonexistent_parameter = true); -- fail ERROR: unrecognized parameter "some_nonexistent_parameter" ALTER TABLESPACE regress_tblspace RESET (random_page_cost = 2.0); -- fail ERROR: RESET must not include values for parameters -ALTER TABLESPACE regress_tblspace RESET (random_page_cost, seq_page_cost); -- ok +ALTER TABLESPACE regress_tblspace RESET (random_page_cost, effective_io_concurrency); -- ok -- create a schema we can use CREATE SCHEMA testschema; -- try a table @@ -61,23 +61,37 @@ SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c foo_idx | regress_tblspace (1 row) +-- partitioned index +CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); +CREATE TABLE testschema.part1 PARTITION OF testschema.part FOR VALUES IN (1); +CREATE INDEX part_a_idx ON testschema.part (a) TABLESPACE regress_tblspace; +CREATE TABLE testschema.part2 PARTITION OF testschema.part FOR VALUES IN (2); +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname LIKE 'part%_idx'; + relname | spcname +-------------+------------------ + part1_a_idx | regress_tblspace + part2_a_idx | regress_tblspace + part_a_idx | regress_tblspace +(3 rows) + -- check that default_tablespace doesn't affect ALTER TABLE index rebuilds CREATE TABLE testschema.test_default_tab(id bigint) TABLESPACE regress_tblspace; INSERT INTO testschema.test_default_tab VALUES (1); CREATE INDEX test_index1 on testschema.test_default_tab (id); CREATE INDEX test_index2 on testschema.test_default_tab (id) TABLESPACE regress_tblspace; \d testschema.test_index1 -Index "testschema.test_index1" - Column | Type | Definition ---------+--------+------------ - id | bigint | id + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id btree, for table "testschema.test_default_tab" \d testschema.test_index2 -Index "testschema.test_index2" - Column | Type | Definition ---------+--------+------------ - id | bigint | id + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id btree, for table "testschema.test_default_tab" Tablespace: "regress_tblspace" @@ -86,17 +100,17 @@ SET default_tablespace TO regress_tblspace; -- tablespace should not change if no rewrite ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; \d testschema.test_index1 -Index "testschema.test_index1" - Column | Type | Definition ---------+--------+------------ - id | bigint | id + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id btree, for table "testschema.test_default_tab" \d testschema.test_index2 -Index "testschema.test_index2" - Column | Type | Definition ---------+--------+------------ - id | bigint | id + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id btree, for table "testschema.test_default_tab" Tablespace: "regress_tblspace" @@ -109,17 +123,17 @@ SELECT * FROM testschema.test_default_tab; -- tablespace should not change even if there is an index rewrite ALTER TABLE testschema.test_default_tab ALTER id TYPE int; \d testschema.test_index1 -Index "testschema.test_index1" - Column | Type | Definition ---------+---------+------------ - id | integer | id + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id btree, for table "testschema.test_default_tab" \d testschema.test_index2 -Index "testschema.test_index2" - Column | Type | Definition ---------+---------+------------ - id | integer | id + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id btree, for table "testschema.test_default_tab" Tablespace: "regress_tblspace" @@ -134,34 +148,34 @@ SET default_tablespace TO ''; -- tablespace should not change if no rewrite ALTER TABLE testschema.test_default_tab ALTER id TYPE int; \d testschema.test_index1 -Index "testschema.test_index1" - Column | Type | Definition ---------+---------+------------ - id | integer | id + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id btree, for table "testschema.test_default_tab" \d testschema.test_index2 -Index "testschema.test_index2" - Column | Type | Definition ---------+---------+------------ - id | integer | id + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id btree, for table "testschema.test_default_tab" Tablespace: "regress_tblspace" -- tablespace should not change even if there is an index rewrite ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; \d testschema.test_index1 -Index "testschema.test_index1" - Column | Type | Definition ---------+--------+------------ - id | bigint | id + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id btree, for table "testschema.test_default_tab" \d testschema.test_index2 -Index "testschema.test_index2" - Column | Type | Definition ---------+--------+------------ - id | bigint | id + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id btree, for table "testschema.test_default_tab" Tablespace: "regress_tblspace" @@ -174,18 +188,18 @@ ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (id); SET default_tablespace TO ''; ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_pkey PRIMARY KEY (id); \d testschema.test_tab_unique -Index "testschema.test_tab_unique" - Column | Type | Definition ---------+---------+------------ - id | integer | id + Index "testschema.test_tab_unique" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id unique, btree, for table "testschema.test_tab" Tablespace: "regress_tblspace" \d testschema.test_tab_pkey -Index "testschema.test_tab_pkey" - Column | Type | Definition ---------+---------+------------ - id | integer | id + Index "testschema.test_tab_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id primary key, btree, for table "testschema.test_tab" SELECT * FROM testschema.test_tab; @@ -200,6 +214,8 @@ CREATE TABLE testschema.atable AS VALUES (1), (2); CREATE UNIQUE INDEX anindex ON testschema.atable(column1); ALTER TABLE testschema.atable SET TABLESPACE regress_tblspace; ALTER INDEX testschema.anindex SET TABLESPACE regress_tblspace; +ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; +ALTER INDEX testschema.part_a_idx SET TABLESPACE regress_tblspace; INSERT INTO testschema.atable VALUES(3); -- ok INSERT INTO testschema.atable VALUES(1); -- fail (checks index) ERROR: duplicate key value violates unique constraint "anindex" @@ -241,10 +257,11 @@ NOTICE: no matching relations in tablespace "regress_tblspace_renamed" found -- Should succeed DROP TABLESPACE regress_tblspace_renamed; DROP SCHEMA testschema CASCADE; -NOTICE: drop cascades to 5 other objects +NOTICE: drop cascades to 6 other objects DETAIL: drop cascades to table testschema.foo drop cascades to table testschema.asselect drop cascades to table testschema.asexecute +drop cascades to table testschema.part drop cascades to table testschema.atable drop cascades to table testschema.tablespace_acl DROP ROLE regress_tablespace_user1; diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule index eefdeeacae..289c658483 100644 --- a/src/test/regress/parallel_schedule +++ b/src/test/regress/parallel_schedule @@ -23,12 +23,12 @@ test: numerology # ---------- # The second group of parallel tests # ---------- -test: point lseg line box path polygon circle date time timetz timestamp timestamptz interval abstime reltime tinterval inet macaddr macaddr8 tstypes +test: point lseg line box path polygon circle date time timetz timestamp timestamptz interval inet macaddr macaddr8 tstypes # ---------- # Another group of parallel tests # geometry depends on point, lseg, box, path, polygon and circle -# horology depends on interval, timetz, timestamp, timestamptz, reltime and abstime +# horology depends on interval, timetz, timestamp, timestamptz # ---------- test: geometry horology regex oidjoins type_sanity opr_sanity misc_sanity comments expressions @@ -53,14 +53,14 @@ test: copy copyselect copydml # ---------- # More groups of parallel tests # ---------- -test: create_misc create_operator +test: create_misc create_operator create_procedure # These depend on the above two -test: create_index create_view +test: create_index create_view index_including # ---------- # Another group of parallel tests # ---------- -test: create_aggregate create_function_3 create_cast constraints triggers inherit create_table_like typed_table vacuum drop_if_exists updatable_views rolenames roleattributes create_am +test: create_aggregate create_function_3 create_cast constraints triggers inherit create_table_like typed_table vacuum drop_if_exists updatable_views rolenames roleattributes create_am hash_func # ---------- # sanity_check does a vacuum, affecting the sort order of SELECT * @@ -96,6 +96,7 @@ test: rules psql_crosstab amutils # run by itself so it can run parallel workers test: select_parallel +test: write_parallel # no relation related tests can be put in this group test: publication subscription @@ -104,6 +105,7 @@ test: publication subscription # Another group of parallel tests # ---------- test: select_views portals_p2 foreign_key cluster dependency guc bitmapops combocid tsearch tsdicts foreign_data window xmlmap functional_deps advisory_lock json jsonb json_encoding indirect_toast equivclass + # ---------- # Another group of parallel tests # NB: temp.sql does a reconnect which transiently uses 2 connections, @@ -114,10 +116,12 @@ test: plancache limit plpgsql copy2 temp domain rangefuncs prepare without_oid c # ---------- # Another group of parallel tests # ---------- -test: identity +test: identity partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info # event triggers cannot run concurrently with any test that runs DDL test: event_trigger +# this test also uses event triggers, so likewise run it by itself +test: fast_default # run stats by itself because its delay may be insufficient under heavy load test: stats diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c index abb742b1ed..3248603da1 100644 --- a/src/test/regress/pg_regress.c +++ b/src/test/regress/pg_regress.c @@ -8,7 +8,7 @@ * * This code is released under the terms of the PostgreSQL License. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/test/regress/pg_regress.c @@ -78,6 +78,7 @@ char *launcher = NULL; static _stringlist *loadlanguage = NULL; static _stringlist *loadextension = NULL; static int max_connections = 0; +static int max_concurrent_tests = 0; static char *encoding = NULL; static _stringlist *schedulelist = NULL; static _stringlist *extra_tests = NULL; @@ -437,7 +438,7 @@ string_matches_pattern(const char *str, const char *pattern) * NOTE: Assumes there is enough room in the target buffer! */ void -replace_string(char *string, char *replace, char *replacement) +replace_string(char *string, const char *replace, const char *replacement) { char *ptr; @@ -459,7 +460,7 @@ replace_string(char *string, char *replace, char *replacement) * the given suffix. */ static void -convert_sourcefiles_in(char *source_subdir, char *dest_dir, char *dest_subdir, char *suffix) +convert_sourcefiles_in(const char *source_subdir, const char *dest_dir, const char *dest_subdir, const char *suffix) { char testtablespace[MAXPGPATH]; char indir[MAXPGPATH]; @@ -1023,7 +1024,7 @@ config_sspi_auth(const char *pgdata) } while (0) res = snprintf(fname, sizeof(fname), "%s/pg_hba.conf", pgdata); - if (res < 0 || res >= sizeof(fname) - 1) + if (res < 0 || res >= sizeof(fname)) { /* * Truncating this name is a fatal error, because we must not fail to @@ -1592,9 +1593,10 @@ run_schedule(const char *schedule, test_function tfunc) FILE *scf; int line_num = 0; - memset(resultfiles, 0, sizeof(_stringlist *) * MAX_PARALLEL_TESTS); - memset(expectfiles, 0, sizeof(_stringlist *) * MAX_PARALLEL_TESTS); - memset(tags, 0, sizeof(_stringlist *) * MAX_PARALLEL_TESTS); + memset(tests, 0, sizeof(tests)); + memset(resultfiles, 0, sizeof(resultfiles)); + memset(expectfiles, 0, sizeof(expectfiles)); + memset(tags, 0, sizeof(tags)); scf = fopen(schedule, "r"); if (!scf) @@ -1614,15 +1616,6 @@ run_schedule(const char *schedule, test_function tfunc) line_num++; - for (i = 0; i < MAX_PARALLEL_TESTS; i++) - { - if (resultfiles[i] == NULL) - break; - free_stringlist(&resultfiles[i]); - free_stringlist(&expectfiles[i]); - free_stringlist(&tags[i]); - } - /* strip trailing whitespace, especially the newline */ i = strlen(scbuf); while (i > 0 && isspace((unsigned char) scbuf[i - 1])) @@ -1655,24 +1648,35 @@ run_schedule(const char *schedule, test_function tfunc) num_tests = 0; inword = false; - for (c = test; *c; c++) + for (c = test;; c++) { - if (isspace((unsigned char) *c)) + if (*c == '\0' || isspace((unsigned char) *c)) { - *c = '\0'; - inword = false; + if (inword) + { + /* Reached end of a test name */ + char sav; + + if (num_tests >= MAX_PARALLEL_TESTS) + { + fprintf(stderr, _("too many parallel tests (more than %d) in schedule file \"%s\" line %d: %s\n"), + MAX_PARALLEL_TESTS, schedule, line_num, scbuf); + exit(2); + } + sav = *c; + *c = '\0'; + tests[num_tests] = pg_strdup(test); + num_tests++; + *c = sav; + inword = false; + } + if (*c == '\0') + break; /* loop exit is here */ } else if (!inword) { - if (num_tests >= MAX_PARALLEL_TESTS) - { - /* can't print scbuf here, it's already been trashed */ - fprintf(stderr, _("too many parallel tests in schedule file \"%s\", line %d\n"), - schedule, line_num); - exit(2); - } - tests[num_tests] = c; - num_tests++; + /* Start of a test name */ + test = c; inword = true; } } @@ -1686,11 +1690,17 @@ run_schedule(const char *schedule, test_function tfunc) if (num_tests == 1) { - status(_("test %-24s ... "), tests[0]); + status(_("test %-28s ... "), tests[0]); pids[0] = (tfunc) (tests[0], &resultfiles[0], &expectfiles[0], &tags[0]); wait_for_tests(pids, statuses, NULL, 1); /* status line is finished below */ } + else if (max_concurrent_tests > 0 && max_concurrent_tests < num_tests) + { + fprintf(stderr, _("too many parallel tests (more than %d) in schedule file \"%s\" line %d: %s\n"), + max_concurrent_tests, schedule, line_num, scbuf); + exit(2); + } else if (max_connections > 0 && max_connections < num_tests) { int oldest = 0; @@ -1731,7 +1741,7 @@ run_schedule(const char *schedule, test_function tfunc) bool differ = false; if (num_tests > 1) - status(_(" %-24s ... "), tests[i]); + status(_(" %-28s ... "), tests[i]); /* * Advance over all three lists simultaneously. @@ -1742,14 +1752,11 @@ run_schedule(const char *schedule, test_function tfunc) */ for (rl = resultfiles[i], el = expectfiles[i], tl = tags[i]; rl != NULL; /* rl and el have the same length */ - rl = rl->next, el = el->next) + rl = rl->next, el = el->next, + tl = tl ? tl->next : NULL) { bool newdiff; - if (tl) - tl = tl->next; /* tl has the same length as rl and el if - * it exists */ - newdiff = results_differ(tests[i], rl->str, el->str); if (newdiff && tl) { @@ -1793,6 +1800,15 @@ run_schedule(const char *schedule, test_function tfunc) status_end(); } + + for (i = 0; i < num_tests; i++) + { + pg_free(tests[i]); + tests[i] = NULL; + free_stringlist(&resultfiles[i]); + free_stringlist(&expectfiles[i]); + free_stringlist(&tags[i]); + } } free_stringlist(&ignorelist); @@ -1816,7 +1832,7 @@ run_single_test(const char *test, test_function tfunc) *tl; bool differ = false; - status(_("test %-24s ... "), test); + status(_("test %-28s ... "), test); pid = (tfunc) (test, &resultfiles, &expectfiles, &tags); wait_for_tests(&pid, &exit_status, NULL, 1); @@ -1829,14 +1845,11 @@ run_single_test(const char *test, test_function tfunc) */ for (rl = resultfiles, el = expectfiles, tl = tags; rl != NULL; /* rl and el have the same length */ - rl = rl->next, el = el->next) + rl = rl->next, el = el->next, + tl = tl ? tl->next : NULL) { bool newdiff; - if (tl) - tl = tl->next; /* tl has the same length as rl and el if it - * exists */ - newdiff = results_differ(test, rl->str, el->str); if (newdiff && tl) { @@ -1985,35 +1998,41 @@ help(void) printf(_("Usage:\n %s [OPTION]... [EXTRA-TEST]...\n"), progname); printf(_("\n")); printf(_("Options:\n")); - printf(_(" --config-auth=DATADIR update authentication settings for DATADIR\n")); - printf(_(" --create-role=ROLE create the specified role before testing\n")); - printf(_(" --dbname=DB use database DB (default \"regression\")\n")); - printf(_(" --debug turn on debug mode in programs that are run\n")); - printf(_(" --dlpath=DIR look for dynamic libraries in DIR\n")); - printf(_(" --encoding=ENCODING use ENCODING as the encoding\n")); - printf(_(" --inputdir=DIR take input files from DIR (default \".\")\n")); - printf(_(" --launcher=CMD use CMD as launcher of psql\n")); - printf(_(" --load-extension=EXT load the named extension before running the\n")); - printf(_(" tests; can appear multiple times\n")); - printf(_(" --load-language=LANG load the named language before running the\n")); - printf(_(" tests; can appear multiple times\n")); - printf(_(" --max-connections=N maximum number of concurrent connections\n")); - printf(_(" (default is 0, meaning unlimited)\n")); - printf(_(" --outputdir=DIR place output files in DIR (default \".\")\n")); - printf(_(" --schedule=FILE use test ordering schedule from FILE\n")); - printf(_(" (can be used multiple times to concatenate)\n")); - printf(_(" --temp-instance=DIR create a temporary instance in DIR\n")); - printf(_(" --use-existing use an existing installation\n")); + printf(_(" --bindir=BINPATH use BINPATH for programs that are run;\n")); + printf(_(" if empty, use PATH from the environment\n")); + printf(_(" --config-auth=DATADIR update authentication settings for DATADIR\n")); + printf(_(" --create-role=ROLE create the specified role before testing\n")); + printf(_(" --dbname=DB use database DB (default \"regression\")\n")); + printf(_(" --debug turn on debug mode in programs that are run\n")); + printf(_(" --dlpath=DIR look for dynamic libraries in DIR\n")); + printf(_(" --encoding=ENCODING use ENCODING as the encoding\n")); + printf(_(" -h, --help show this help, then exit\n")); + printf(_(" --inputdir=DIR take input files from DIR (default \".\")\n")); + printf(_(" --launcher=CMD use CMD as launcher of psql\n")); + printf(_(" --load-extension=EXT load the named extension before running the\n")); + printf(_(" tests; can appear multiple times\n")); + printf(_(" --load-language=LANG load the named language before running the\n")); + printf(_(" tests; can appear multiple times\n")); + printf(_(" --max-connections=N maximum number of concurrent connections\n")); + printf(_(" (default is 0, meaning unlimited)\n")); + printf(_(" --max-concurrent-tests=N maximum number of concurrent tests in schedule\n")); + printf(_(" (default is 0, meaning unlimited)\n")); + printf(_(" --outputdir=DIR place output files in DIR (default \".\")\n")); + printf(_(" --schedule=FILE use test ordering schedule from FILE\n")); + printf(_(" (can be used multiple times to concatenate)\n")); + printf(_(" --temp-instance=DIR create a temporary instance in DIR\n")); + printf(_(" --use-existing use an existing installation\n")); + printf(_(" -V, --version output version information, then exit\n")); printf(_("\n")); printf(_("Options for \"temp-instance\" mode:\n")); - printf(_(" --no-locale use C locale\n")); - printf(_(" --port=PORT start postmaster on PORT\n")); - printf(_(" --temp-config=FILE append contents of FILE to temporary config\n")); + printf(_(" --no-locale use C locale\n")); + printf(_(" --port=PORT start postmaster on PORT\n")); + printf(_(" --temp-config=FILE append contents of FILE to temporary config\n")); printf(_("\n")); printf(_("Options for using an existing installation:\n")); - printf(_(" --host=HOST use postmaster running on HOST\n")); - printf(_(" --port=PORT use postmaster running at PORT\n")); - printf(_(" --user=USER connect as USER\n")); + printf(_(" --host=HOST use postmaster running on HOST\n")); + printf(_(" --port=PORT use postmaster running at PORT\n")); + printf(_(" --user=USER connect as USER\n")); printf(_("\n")); printf(_("The exit status is 0 if all tests passed, 1 if some tests failed, and 2\n")); printf(_("if the tests could not be run for some reason.\n")); @@ -2048,6 +2067,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc {"launcher", required_argument, NULL, 21}, {"load-extension", required_argument, NULL, 22}, {"config-auth", required_argument, NULL, 24}, + {"max-concurrent-tests", required_argument, NULL, 25}, {NULL, 0, NULL, 0} }; @@ -2061,6 +2081,8 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc progname = get_progname(argv[0]); set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_regress")); + get_restricted_token(progname); + atexit(stop_postmaster); #ifndef HAVE_UNIX_SOCKETS @@ -2161,6 +2183,9 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc case 24: config_auth_datadir = pg_strdup(optarg); break; + case 25: + max_concurrent_tests = atoi(optarg); + break; default: /* getopt_long already emitted a complaint */ fprintf(stderr, _("\nTry \"%s -h\" for more information.\n"), diff --git a/src/test/regress/pg_regress.h b/src/test/regress/pg_regress.h index 4abfc628e5..e9045b75b6 100644 --- a/src/test/regress/pg_regress.h +++ b/src/test/regress/pg_regress.h @@ -1,7 +1,7 @@ /*------------------------------------------------------------------------- * pg_regress.h --- regression test driver * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/test/regress/pg_regress.h @@ -49,5 +49,5 @@ int regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc); void add_stringlist_item(_stringlist **listhead, const char *str); PID_TYPE spawn_process(const char *cmdline); -void replace_string(char *string, char *replace, char *replacement); +void replace_string(char *string, const char *replace, const char *replacement); bool file_exists(const char *file); diff --git a/src/test/regress/pg_regress_main.c b/src/test/regress/pg_regress_main.c index 298ed758ee..bd613e4fda 100644 --- a/src/test/regress/pg_regress_main.c +++ b/src/test/regress/pg_regress_main.c @@ -8,7 +8,7 @@ * * This code is released under the terms of the PostgreSQL License. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/test/regress/pg_regress_main.c @@ -63,20 +63,32 @@ psql_start_test(const char *testname, add_stringlist_item(expectfiles, expectfile); if (launcher) + { offset += snprintf(psql_cmd + offset, sizeof(psql_cmd) - offset, "%s ", launcher); + if (offset >= sizeof(psql_cmd)) + { + fprintf(stderr, _("command too long\n")); + exit(2); + } + } + + offset += snprintf(psql_cmd + offset, sizeof(psql_cmd) - offset, + "\"%s%spsql\" -X -a -q -d \"%s\" < \"%s\" > \"%s\" 2>&1", + bindir ? bindir : "", + bindir ? "/" : "", + dblist->str, + infile, + outfile); + if (offset >= sizeof(psql_cmd)) + { + fprintf(stderr, _("command too long\n")); + exit(2); + } appnameenv = psprintf("PGAPPNAME=pg_regress/%s", testname); putenv(appnameenv); - snprintf(psql_cmd + offset, sizeof(psql_cmd) - offset, - "\"%s%spsql\" -X -a -q -d \"%s\" < \"%s\" > \"%s\" 2>&1", - bindir ? bindir : "", - bindir ? "/" : "", - dblist->str, - infile, - outfile); - pid = spawn_process(psql_cmd); if (pid == INVALID_PID) diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c index b73bccec3d..a2e57768d4 100644 --- a/src/test/regress/regress.c +++ b/src/test/regress/regress.c @@ -6,7 +6,7 @@ * * This code is released under the terms of the PostgreSQL License. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/test/regress/regress.c @@ -16,7 +16,6 @@ #include "postgres.h" -#include #include #include @@ -38,127 +37,15 @@ #include "utils/memutils.h" -#define P_MAXDIG 12 #define LDELIM '(' #define RDELIM ')' #define DELIM ',' -extern PATH *poly2path(POLYGON *poly); -extern void regress_lseg_construct(LSEG *lseg, Point *pt1, Point *pt2); +static void regress_lseg_construct(LSEG *lseg, Point *pt1, Point *pt2); -#ifdef PG_MODULE_MAGIC PG_MODULE_MAGIC; -#endif -/* - * Distance from a point to a path - */ -PG_FUNCTION_INFO_V1(regress_dist_ptpath); - -Datum -regress_dist_ptpath(PG_FUNCTION_ARGS) -{ - Point *pt = PG_GETARG_POINT_P(0); - PATH *path = PG_GETARG_PATH_P(1); - float8 result = 0.0; /* keep compiler quiet */ - float8 tmp; - int i; - LSEG lseg; - - switch (path->npts) - { - case 0: - PG_RETURN_NULL(); - case 1: - result = point_dt(pt, &path->p[0]); - break; - default: - - /* - * the distance from a point to a path is the smallest distance - * from the point to any of its constituent segments. - */ - Assert(path->npts > 1); - for (i = 0; i < path->npts - 1; ++i) - { - regress_lseg_construct(&lseg, &path->p[i], &path->p[i + 1]); - tmp = DatumGetFloat8(DirectFunctionCall2(dist_ps, - PointPGetDatum(pt), - LsegPGetDatum(&lseg))); - if (i == 0 || tmp < result) - result = tmp; - } - break; - } - PG_RETURN_FLOAT8(result); -} - -/* - * this essentially does a cartesian product of the lsegs in the - * two paths, and finds the min distance between any two lsegs - */ -PG_FUNCTION_INFO_V1(regress_path_dist); - -Datum -regress_path_dist(PG_FUNCTION_ARGS) -{ - PATH *p1 = PG_GETARG_PATH_P(0); - PATH *p2 = PG_GETARG_PATH_P(1); - bool have_min = false; - float8 min = 0.0; /* initialize to keep compiler quiet */ - float8 tmp; - int i, - j; - LSEG seg1, - seg2; - - for (i = 0; i < p1->npts - 1; i++) - { - for (j = 0; j < p2->npts - 1; j++) - { - regress_lseg_construct(&seg1, &p1->p[i], &p1->p[i + 1]); - regress_lseg_construct(&seg2, &p2->p[j], &p2->p[j + 1]); - - tmp = DatumGetFloat8(DirectFunctionCall2(lseg_distance, - LsegPGetDatum(&seg1), - LsegPGetDatum(&seg2))); - if (!have_min || tmp < min) - { - min = tmp; - have_min = true; - } - } - } - - if (!have_min) - PG_RETURN_NULL(); - - PG_RETURN_FLOAT8(min); -} - -PATH * -poly2path(POLYGON *poly) -{ - int i; - char *output = (char *) palloc(2 * (P_MAXDIG + 1) * poly->npts + 64); - char buf[2 * (P_MAXDIG) + 20]; - - sprintf(output, "(1, %*d", P_MAXDIG, poly->npts); - - for (i = 0; i < poly->npts; i++) - { - snprintf(buf, sizeof(buf), ",%*g,%*g", - P_MAXDIG, poly->p[i].x, P_MAXDIG, poly->p[i].y); - strcat(output, buf); - } - - snprintf(buf, sizeof(buf), "%c", RDELIM); - strcat(output, buf); - return DatumGetPathP(DirectFunctionCall1(path_in, - CStringGetDatum(output))); -} - /* return the point where two paths intersect, or NULL if no intersection. */ PG_FUNCTION_INFO_V1(interpt_pp); @@ -203,7 +90,7 @@ interpt_pp(PG_FUNCTION_ARGS) /* like lseg_construct, but assume space already allocated */ -void +static void regress_lseg_construct(LSEG *lseg, Point *pt1, Point *pt2) { lseg->p[0].x = pt1->x; @@ -261,8 +148,8 @@ widget_in(PG_FUNCTION_ARGS) if (i < NARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input syntax for type widget: \"%s\"", - str))); + errmsg("invalid input syntax for type %s: \"%s\"", + "widget", str))); result = (WIDGET *) palloc(sizeof(WIDGET)); result->center.x = atof(coord[0]); @@ -289,22 +176,13 @@ pt_in_widget(PG_FUNCTION_ARGS) { Point *point = PG_GETARG_POINT_P(0); WIDGET *widget = (WIDGET *) PG_GETARG_POINTER(1); + float8 distance; - PG_RETURN_BOOL(point_dt(point, &widget->center) < widget->radius); -} - -PG_FUNCTION_INFO_V1(boxarea); - -Datum -boxarea(PG_FUNCTION_ARGS) -{ - BOX *box = PG_GETARG_BOX_P(0); - double width, - height; + distance = DatumGetFloat8(DirectFunctionCall2(point_distance, + PointPGetDatum(point), + PointPGetDatum(&widget->center))); - width = Abs(box->high.x - box->low.x); - height = Abs(box->high.y - box->low.y); - PG_RETURN_FLOAT8(width * height); + PG_RETURN_BOOL(distance < widget->radius); } PG_FUNCTION_INFO_V1(reverse_name); @@ -328,120 +206,18 @@ reverse_name(PG_FUNCTION_ARGS) PG_RETURN_CSTRING(new_string); } - -static TransactionId fd17b_xid = InvalidTransactionId; -static TransactionId fd17a_xid = InvalidTransactionId; -static int fd17b_level = 0; -static int fd17a_level = 0; -static bool fd17b_recursion = true; -static bool fd17a_recursion = true; - -PG_FUNCTION_INFO_V1(funny_dup17); +PG_FUNCTION_INFO_V1(trigger_return_old); Datum -funny_dup17(PG_FUNCTION_ARGS) +trigger_return_old(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; - TransactionId *xid; - int *level; - bool *recursion; - Relation rel; - TupleDesc tupdesc; HeapTuple tuple; - char *query, - *fieldval, - *fieldtype; - char *when; - uint64 inserted; - int selected = 0; - int ret; if (!CALLED_AS_TRIGGER(fcinfo)) - elog(ERROR, "funny_dup17: not fired by trigger manager"); + elog(ERROR, "trigger_return_old: not fired by trigger manager"); tuple = trigdata->tg_trigtuple; - rel = trigdata->tg_relation; - tupdesc = rel->rd_att; - if (TRIGGER_FIRED_BEFORE(trigdata->tg_event)) - { - xid = &fd17b_xid; - level = &fd17b_level; - recursion = &fd17b_recursion; - when = "BEFORE"; - } - else - { - xid = &fd17a_xid; - level = &fd17a_level; - recursion = &fd17a_recursion; - when = "AFTER "; - } - - if (!TransactionIdIsCurrentTransactionId(*xid)) - { - *xid = GetCurrentTransactionId(); - *level = 0; - *recursion = true; - } - - if (*level == 17) - { - *recursion = false; - return PointerGetDatum(tuple); - } - - if (!(*recursion)) - return PointerGetDatum(tuple); - - (*level)++; - - SPI_connect(); - - fieldval = SPI_getvalue(tuple, tupdesc, 1); - fieldtype = SPI_gettype(tupdesc, 1); - - query = (char *) palloc(100 + NAMEDATALEN * 3 + - strlen(fieldval) + strlen(fieldtype)); - - sprintf(query, "insert into %s select * from %s where %s = '%s'::%s", - SPI_getrelname(rel), SPI_getrelname(rel), - SPI_fname(tupdesc, 1), - fieldval, fieldtype); - - if ((ret = SPI_exec(query, 0)) < 0) - elog(ERROR, "funny_dup17 (fired %s) on level %3d: SPI_exec (insert ...) returned %d", - when, *level, ret); - - inserted = SPI_processed; - - sprintf(query, "select count (*) from %s where %s = '%s'::%s", - SPI_getrelname(rel), - SPI_fname(tupdesc, 1), - fieldval, fieldtype); - - if ((ret = SPI_exec(query, 0)) < 0) - elog(ERROR, "funny_dup17 (fired %s) on level %3d: SPI_exec (select ...) returned %d", - when, *level, ret); - - if (SPI_processed > 0) - { - selected = DatumGetInt32(DirectFunctionCall1(int4in, - CStringGetDatum(SPI_getvalue( - SPI_tuptable->vals[0], - SPI_tuptable->tupdesc, - 1 - )))); - } - - elog(DEBUG4, "funny_dup17 (fired %s) on level %3d: " UINT64_FORMAT "/%d tuples inserted/selected", - when, *level, inserted, selected); - - SPI_finish(); - - (*level)--; - - if (*level == 0) - *xid = InvalidTransactionId; return PointerGetDatum(tuple); } @@ -614,7 +390,7 @@ ttdummy(PG_FUNCTION_ARGS) /* Prepare plan for query */ pplan = SPI_prepare(query, natts, ctypes); if (pplan == NULL) - elog(ERROR, "ttdummy (%s): SPI_prepare returned %d", relname, SPI_result); + elog(ERROR, "ttdummy (%s): SPI_prepare returned %s", relname, SPI_result_code_string(SPI_result)); if (SPI_keepplan(pplan)) elog(ERROR, "ttdummy (%s): SPI_keepplan failed", relname); @@ -669,12 +445,12 @@ set_ttdummy(PG_FUNCTION_ARGS) /* - * Type int44 has no real-world use, but the regression tests use it. - * It's a four-element vector of int4's. + * Type int44 has no real-world use, but the regression tests use it + * (under the alias "city_budget"). It's a four-element vector of int4's. */ /* - * int44in - converts "num num ..." to internal form + * int44in - converts "num, num, ..." to internal form * * Note: Fills any missing positions with zeroes. */ @@ -700,7 +476,7 @@ int44in(PG_FUNCTION_ARGS) } /* - * int44out - converts internal form to "num num ..." + * int44out - converts internal form to "num, num, ..." */ PG_FUNCTION_INFO_V1(int44out); @@ -708,19 +484,14 @@ Datum int44out(PG_FUNCTION_ARGS) { int32 *an_array = (int32 *) PG_GETARG_POINTER(0); - char *result = (char *) palloc(16 * 4); /* Allow 14 digits + sign */ - int i; - char *walk; + char *result = (char *) palloc(16 * 4); + + snprintf(result, 16 * 4, "%d,%d,%d,%d", + an_array[0], + an_array[1], + an_array[2], + an_array[3]); - walk = result; - for (i = 0; i < 4; i++) - { - pg_ltoa(an_array[i], walk); - while (*++walk != '\0') - ; - *walk++ = ' '; - } - *--walk = '\0'; PG_RETURN_CSTRING(result); } @@ -770,9 +541,9 @@ make_tuple_indirect(PG_FUNCTION_ARGS) struct varatt_indirect redirect_pointer; /* only work on existing, not-null varlenas */ - if (tupdesc->attrs[i]->attisdropped || + if (TupleDescAttr(tupdesc, i)->attisdropped || nulls[i] || - tupdesc->attrs[i]->attlen != -1) + TupleDescAttr(tupdesc, i)->attlen != -1) continue; attr = (struct varlena *) DatumGetPointer(values[i]); @@ -865,7 +636,6 @@ wait_pid(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -#ifndef PG_HAVE_ATOMIC_FLAG_SIMULATION static void test_atomic_flag(void) { @@ -895,7 +665,6 @@ test_atomic_flag(void) pg_atomic_clear_flag(&flag); } -#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */ static void test_atomic_uint32(void) @@ -1078,19 +847,7 @@ PG_FUNCTION_INFO_V1(test_atomic_ops); Datum test_atomic_ops(PG_FUNCTION_ARGS) { - /* --- - * Can't run the test under the semaphore emulation, it doesn't handle - * checking two edge cases well: - * - pg_atomic_unlocked_test_flag() always returns true - * - locking a already locked flag blocks - * it seems better to not test the semaphore fallback here, than weaken - * the checks for the other cases. The semaphore code will be the same - * everywhere, whereas the efficient implementations wont. - * --- - */ -#ifndef PG_HAVE_ATOMIC_FLAG_SIMULATION test_atomic_flag(); -#endif test_atomic_uint32(); @@ -1098,3 +855,11 @@ test_atomic_ops(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); } + +PG_FUNCTION_INFO_V1(test_fdw_handler); +Datum +test_fdw_handler(PG_FUNCTION_ARGS) +{ + elog(ERROR, "test_fdw_handler is not implemented"); + PG_RETURN_NULL(); +} diff --git a/src/test/regress/resultmap b/src/test/regress/resultmap index 04ba99fe33..46ca5639c2 100644 --- a/src/test/regress/resultmap +++ b/src/test/regress/resultmap @@ -1,17 +1,5 @@ -float4:out:i.86-pc-mingw32=float4-exp-three-digits.out -float4:out:x86_64-w64-mingw32=float4-exp-three-digits.out -float4:out:i.86-w64-mingw32=float4-exp-three-digits.out -float4:out:i.86-pc-win32vc=float4-exp-three-digits.out float8:out:i.86-.*-freebsd=float8-small-is-zero.out float8:out:i.86-.*-openbsd=float8-small-is-zero.out float8:out:i.86-.*-netbsd=float8-small-is-zero.out float8:out:m68k-.*-netbsd=float8-small-is-zero.out -float8:out:i.86-pc-mingw32=float8-exp-three-digits-win32.out -float8:out:x86_64-w64-mingw32=float8-exp-three-digits-win32.out -float8:out:i.86-w64-mingw32=float8-exp-three-digits-win32.out -float8:out:i.86-pc-win32vc=float8-exp-three-digits-win32.out float8:out:i.86-pc-cygwin=float8-small-is-zero.out -int8:out:i.86-pc-mingw32=int8-exp-three-digits.out -int8:out:x86_64-w64-mingw32=int8-exp-three-digits.out -int8:out:i.86-w64-mingw32=int8-exp-three-digits.out -int8:out:i.86-pc-win32vc=int8-exp-three-digits.out diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule index 76b0de30a7..bc43b18c62 100644 --- a/src/test/regress/serial_schedule +++ b/src/test/regress/serial_schedule @@ -36,9 +36,6 @@ test: timetz test: timestamp test: timestamptz test: interval -test: abstime -test: reltime -test: tinterval test: inet test: macaddr test: macaddr8 @@ -63,7 +60,9 @@ test: copyselect test: copydml test: create_misc test: create_operator +test: create_procedure test: create_index +test: index_including test: create_view test: create_aggregate test: create_function_3 @@ -79,6 +78,7 @@ test: updatable_views test: rolenames test: roleattributes test: create_am +test: hash_func test: sanity_check test: errors test: select @@ -134,6 +134,7 @@ test: stats_ext test: rules test: psql_crosstab test: select_parallel +test: write_parallel test: publication test: subscription test: amutils @@ -170,12 +171,20 @@ test: conversion test: truncate test: alter_table test: sequence -test: identity test: polymorphism test: rowtypes test: returning test: largeobject test: with test: xml +test: identity +test: partition_join +test: partition_prune +test: reloptions +test: hash_part +test: indexing +test: partition_aggregate +test: partition_info test: event_trigger +test: fast_default test: stats diff --git a/src/test/regress/sql/abstime.sql b/src/test/regress/sql/abstime.sql deleted file mode 100644 index 4ab821b1b8..0000000000 --- a/src/test/regress/sql/abstime.sql +++ /dev/null @@ -1,67 +0,0 @@ --- --- ABSTIME --- testing built-in time type abstime --- uses reltime and tinterval --- - --- --- timezones may vary based not only on location but the operating --- system. the main correctness issue is that the OS may not get --- daylight savings time right for times prior to Unix epoch (jan 1 1970). --- - -CREATE TABLE ABSTIME_TBL (f1 abstime); - -BEGIN; -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'now'); -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'now'); -SELECT count(*) AS two FROM ABSTIME_TBL WHERE f1 = 'now' ; -END; - -DELETE FROM ABSTIME_TBL; - -INSERT INTO ABSTIME_TBL (f1) VALUES ('Jan 14, 1973 03:14:21'); -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'Mon May 1 00:30:30 1995'); -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'epoch'); -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'infinity'); -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime '-infinity'); -INSERT INTO ABSTIME_TBL (f1) VALUES (abstime 'May 10, 1947 23:59:12'); - --- what happens if we specify slightly misformatted abstime? -INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 35, 1946 10:00:00'); -INSERT INTO ABSTIME_TBL (f1) VALUES ('Feb 28, 1984 25:08:10'); - --- badly formatted abstimes: these should result in invalid abstimes -INSERT INTO ABSTIME_TBL (f1) VALUES ('bad date format'); -INSERT INTO ABSTIME_TBL (f1) VALUES ('Jun 10, 1843'); - --- test abstime operators - -SELECT '' AS eight, * FROM ABSTIME_TBL; - -SELECT '' AS six, * FROM ABSTIME_TBL - WHERE ABSTIME_TBL.f1 < abstime 'Jun 30, 2001'; - -SELECT '' AS six, * FROM ABSTIME_TBL - WHERE ABSTIME_TBL.f1 > abstime '-infinity'; - -SELECT '' AS six, * FROM ABSTIME_TBL - WHERE abstime 'May 10, 1947 23:59:12' <> ABSTIME_TBL.f1; - -SELECT '' AS three, * FROM ABSTIME_TBL - WHERE abstime 'epoch' >= ABSTIME_TBL.f1; - -SELECT '' AS four, * FROM ABSTIME_TBL - WHERE ABSTIME_TBL.f1 <= abstime 'Jan 14, 1973 03:14:21'; - -SELECT '' AS four, * FROM ABSTIME_TBL - WHERE ABSTIME_TBL.f1 - tinterval '["Apr 1 1950 00:00:00" "Dec 30 1999 23:00:00"]'; - -SELECT '' AS four, f1 AS abstime, - date_part('year', f1) AS year, date_part('month', f1) AS month, - date_part('day',f1) AS day, date_part('hour', f1) AS hour, - date_part('minute', f1) AS minute, date_part('second', f1) AS second - FROM ABSTIME_TBL - WHERE isfinite(f1) - ORDER BY abstime; diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql index 2eeb3eedbd..8192d457e9 100644 --- a/src/test/regress/sql/aggregates.sql +++ b/src/test/regress/sql/aggregates.sql @@ -51,6 +51,22 @@ select avg(null::float8) from generate_series(1,3); select sum('NaN'::numeric) from generate_series(1,3); select avg('NaN'::numeric) from generate_series(1,3); +-- verify correct results for infinite inputs +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES ('1'), ('infinity')) v(x); +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES ('infinity'), ('1')) v(x); +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES ('infinity'), ('infinity')) v(x); +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES ('-infinity'), ('infinity')) v(x); + +-- test accuracy with a large input offset +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES (100000003), (100000004), (100000006), (100000007)) v(x); +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES (7000000000005), (7000000000007)) v(x); + -- SQL2003 binary aggregates SELECT regr_count(b, a) FROM aggtest; SELECT regr_sxx(b, a) FROM aggtest; @@ -62,6 +78,31 @@ SELECT regr_slope(b, a), regr_intercept(b, a) FROM aggtest; SELECT covar_pop(b, a), covar_samp(b, a) FROM aggtest; SELECT corr(b, a) FROM aggtest; +-- test accum and combine functions directly +CREATE TABLE regr_test (x float8, y float8); +INSERT INTO regr_test VALUES (10,150),(20,250),(30,350),(80,540),(100,200); +SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) +FROM regr_test WHERE x IN (10,20,30,80); +SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) +FROM regr_test; +SELECT float8_accum('{4,140,2900}'::float8[], 100); +SELECT float8_regr_accum('{4,140,2900,1290,83075,15050}'::float8[], 200, 100); +SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) +FROM regr_test WHERE x IN (10,20,30); +SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) +FROM regr_test WHERE x IN (80,100); +SELECT float8_combine('{3,60,200}'::float8[], '{0,0,0}'::float8[]); +SELECT float8_combine('{0,0,0}'::float8[], '{2,180,200}'::float8[]); +SELECT float8_combine('{3,60,200}'::float8[], '{2,180,200}'::float8[]); +SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[], + '{0,0,0,0,0,0}'::float8[]); +SELECT float8_regr_combine('{0,0,0,0,0,0}'::float8[], + '{2,180,200,740,57800,-3400}'::float8[]); +SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[], + '{2,180,200,740,57800,-3400}'::float8[]); +DROP TABLE regr_test; + +-- test count, distinct SELECT count(four) AS cnt_1000 FROM onek; SELECT count(DISTINCT four) AS cnt_4 FROM onek; @@ -524,6 +565,8 @@ drop table bytea_test_table; select min(unique1) filter (where unique1 > 100) from tenk1; +select sum(1/ten) filter (where ten > 0) from tenk1; + select ten, sum(distinct four) filter (where four::text ~ '123') from onek a group by ten; @@ -739,6 +782,23 @@ select my_avg(one) filter (where one > 1),my_sum(one) from (values(1),(3)) t(one -- this should not share the state due to different input columns. select my_avg(one),my_sum(two) from (values(1,2),(3,4)) t(one,two); +-- exercise cases where OSAs share state +select + percentile_cont(0.5) within group (order by a), + percentile_disc(0.5) within group (order by a) +from (values(1::float8),(3),(5),(7)) t(a); + +select + percentile_cont(0.25) within group (order by a), + percentile_disc(0.5) within group (order by a) +from (values(1::float8),(3),(5),(7)) t(a); + +-- these can't share state currently +select + rank(4) within group (order by a), + dense_rank(4) within group (order by a) +from (values(1),(3),(5),(7)) t(a); + -- test that aggs with the same sfunc and initcond share the same agg state create aggregate my_sum_init(int4) ( @@ -824,3 +884,95 @@ create aggregate my_half_sum(int4) select my_sum(one),my_half_sum(one) from (values(1),(2),(3),(4)) t(one); rollback; + + +-- test that the aggregate transition logic correctly handles +-- transition / combine functions returning NULL + +-- First test the case of a normal transition function returning NULL +BEGIN; +CREATE FUNCTION balkifnull(int8, int4) +RETURNS int8 +STRICT +LANGUAGE plpgsql AS $$ +BEGIN + IF $1 IS NULL THEN + RAISE 'erroneously called with NULL argument'; + END IF; + RETURN NULL; +END$$; + +CREATE AGGREGATE balk(int4) +( + SFUNC = balkifnull(int8, int4), + STYPE = int8, + PARALLEL = SAFE, + INITCOND = '0' +); + +SELECT balk(hundred) FROM tenk1; + +ROLLBACK; + +-- Secondly test the case of a parallel aggregate combiner function +-- returning NULL. For that use normal transition function, but a +-- combiner function returning NULL. +BEGIN ISOLATION LEVEL REPEATABLE READ; +CREATE FUNCTION balkifnull(int8, int8) +RETURNS int8 +PARALLEL SAFE +STRICT +LANGUAGE plpgsql AS $$ +BEGIN + IF $1 IS NULL THEN + RAISE 'erroneously called with NULL argument'; + END IF; + RETURN NULL; +END$$; + +CREATE AGGREGATE balk(int4) +( + SFUNC = int4_sum(int8, int4), + STYPE = int8, + COMBINEFUNC = balkifnull(int8, int8), + PARALLEL = SAFE, + INITCOND = '0' +); + +-- force use of parallelism +ALTER TABLE tenk1 set (parallel_workers = 4); +SET LOCAL parallel_setup_cost=0; +SET LOCAL max_parallel_workers_per_gather=4; + +EXPLAIN (COSTS OFF) SELECT balk(hundred) FROM tenk1; +SELECT balk(hundred) FROM tenk1; + +ROLLBACK; + +-- test coverage for aggregate combine/serial/deserial functions +BEGIN ISOLATION LEVEL REPEATABLE READ; + +SET parallel_setup_cost = 0; +SET parallel_tuple_cost = 0; +SET min_parallel_table_scan_size = 0; +SET max_parallel_workers_per_gather = 4; +SET enable_indexonlyscan = off; + +-- variance(int4) covers numeric_poly_combine +-- sum(int8) covers int8_avg_combine +EXPLAIN (COSTS OFF) + SELECT variance(unique1::int4), sum(unique1::int8) FROM tenk1; + +SELECT variance(unique1::int4), sum(unique1::int8) FROM tenk1; + +ROLLBACK; + +-- test coverage for dense_rank +SELECT dense_rank(x) WITHIN GROUP (ORDER BY x) FROM (VALUES (1),(1),(2),(2),(3),(3)) v(x) GROUP BY (x) ORDER BY 1; + + +-- Ensure that the STRICT checks for aggregates does not take NULLness +-- of ORDER BY columns into account. See bug report around +-- 2a505161-2727-2473-7c46-591ed108ac52@email.cz +SELECT min(x ORDER BY y) FROM (VALUES(1, NULL)) AS d(x,y); +SELECT min(x ORDER BY y) FROM (VALUES(1, 2)) AS d(x,y); diff --git a/src/test/regress/sql/alter_generic.sql b/src/test/regress/sql/alter_generic.sql index 311812e351..84fd900b24 100644 --- a/src/test/regress/sql/alter_generic.sql +++ b/src/test/regress/sql/alter_generic.sql @@ -5,15 +5,15 @@ -- Clean up in case a prior regression run failed SET client_min_messages TO 'warning'; -DROP ROLE IF EXISTS regress_alter_user1; -DROP ROLE IF EXISTS regress_alter_user2; -DROP ROLE IF EXISTS regress_alter_user3; +DROP ROLE IF EXISTS regress_alter_generic_user1; +DROP ROLE IF EXISTS regress_alter_generic_user2; +DROP ROLE IF EXISTS regress_alter_generic_user3; RESET client_min_messages; -CREATE USER regress_alter_user3; -CREATE USER regress_alter_user2; -CREATE USER regress_alter_user1 IN ROLE regress_alter_user3; +CREATE USER regress_alter_generic_user3; +CREATE USER regress_alter_generic_user2; +CREATE USER regress_alter_generic_user1 IN ROLE regress_alter_generic_user3; CREATE SCHEMA alt_nsp1; CREATE SCHEMA alt_nsp2; @@ -25,7 +25,7 @@ SET search_path = alt_nsp1, public; -- -- Function and Aggregate -- -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql AS 'SELECT $1 + 1'; CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql @@ -37,23 +37,23 @@ CREATE AGGREGATE alt_agg2 ( sfunc1 = int4mi, basetype = int4, stype1 = int4, initcond = 0 ); ALTER AGGREGATE alt_func1(int) RENAME TO alt_func3; -- failed (not aggregate) -ALTER AGGREGATE alt_func1(int) OWNER TO regress_alter_user3; -- failed (not aggregate) +ALTER AGGREGATE alt_func1(int) OWNER TO regress_alter_generic_user3; -- failed (not aggregate) ALTER AGGREGATE alt_func1(int) SET SCHEMA alt_nsp2; -- failed (not aggregate) ALTER FUNCTION alt_func1(int) RENAME TO alt_func2; -- failed (name conflict) ALTER FUNCTION alt_func1(int) RENAME TO alt_func3; -- OK -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_user2; -- failed (no role membership) -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_user3; -- OK +ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user3; -- OK ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp1; -- OK, already there ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp2; -- OK ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg2; -- failed (name conflict) ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg3; -- OK -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_user2; -- failed (no role membership) -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_user3; -- OK +ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- OK ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql AS 'SELECT $1 + 2'; CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql @@ -67,21 +67,21 @@ CREATE AGGREGATE alt_agg2 ( ALTER FUNCTION alt_func3(int) RENAME TO alt_func4; -- failed (not owner) ALTER FUNCTION alt_func1(int) RENAME TO alt_func4; -- OK -ALTER FUNCTION alt_func3(int) OWNER TO regress_alter_user2; -- failed (not owner) -ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_user3; -- failed (no role membership) +ALTER FUNCTION alt_func3(int) OWNER TO regress_alter_generic_user2; -- failed (not owner) +ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user3; -- failed (no role membership) ALTER FUNCTION alt_func3(int) SET SCHEMA alt_nsp2; -- failed (not owner) ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp2; -- failed (name conflicts) ALTER AGGREGATE alt_agg3(int) RENAME TO alt_agg4; -- failed (not owner) ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg4; -- OK -ALTER AGGREGATE alt_agg3(int) OWNER TO regress_alter_user2; -- failed (not owner) -ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_user3; -- failed (no role membership) +ALTER AGGREGATE alt_agg3(int) OWNER TO regress_alter_generic_user2; -- failed (not owner) +ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- failed (no role membership) ALTER AGGREGATE alt_agg3(int) SET SCHEMA alt_nsp2; -- failed (not owner) ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- failed (name conflict) RESET SESSION AUTHORIZATION; -SELECT n.nspname, proname, prorettype::regtype, proisagg, a.rolname +SELECT n.nspname, proname, prorettype::regtype, prokind, a.rolname FROM pg_proc p, pg_namespace n, pg_authid a WHERE p.pronamespace = n.oid AND p.proowner = a.oid AND n.nspname IN ('alt_nsp1', 'alt_nsp2') @@ -95,24 +95,24 @@ SELECT n.nspname, proname, prorettype::regtype, proisagg, a.rolname -- -- Conversion -- -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; CREATE CONVERSION alt_conv2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; ALTER CONVERSION alt_conv1 RENAME TO alt_conv2; -- failed (name conflict) ALTER CONVERSION alt_conv1 RENAME TO alt_conv3; -- OK -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_user2; -- failed (no role membership) -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_user3; -- OK +ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user3; -- OK ALTER CONVERSION alt_conv2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; CREATE CONVERSION alt_conv2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; ALTER CONVERSION alt_conv3 RENAME TO alt_conv4; -- failed (not owner) ALTER CONVERSION alt_conv1 RENAME TO alt_conv4; -- OK -ALTER CONVERSION alt_conv3 OWNER TO regress_alter_user2; -- failed (not owner) -ALTER CONVERSION alt_conv2 OWNER TO regress_alter_user3; -- failed (no role membership) +ALTER CONVERSION alt_conv3 OWNER TO regress_alter_generic_user2; -- failed (not owner) +ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) ALTER CONVERSION alt_conv3 SET SCHEMA alt_nsp2; -- failed (not owner) ALTER CONVERSION alt_conv2 SET SCHEMA alt_nsp2; -- failed (name conflict) @@ -148,17 +148,17 @@ SELECT srvname FROM pg_foreign_server WHERE srvname like 'alt_fserv%'; CREATE LANGUAGE alt_lang1 HANDLER plpgsql_call_handler; CREATE LANGUAGE alt_lang2 HANDLER plpgsql_call_handler; -ALTER LANGUAGE alt_lang1 OWNER TO regress_alter_user1; -- OK -ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_user2; -- OK +ALTER LANGUAGE alt_lang1 OWNER TO regress_alter_generic_user1; -- OK +ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_generic_user2; -- OK -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; ALTER LANGUAGE alt_lang1 RENAME TO alt_lang2; -- failed (name conflict) ALTER LANGUAGE alt_lang2 RENAME TO alt_lang3; -- failed (not owner) ALTER LANGUAGE alt_lang1 RENAME TO alt_lang3; -- OK -ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_user3; -- failed (not owner) -ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_user2; -- failed (no role membership) -ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_user3; -- OK +ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_generic_user3; -- failed (not owner) +ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_generic_user3; -- OK RESET SESSION AUTHORIZATION; SELECT lanname, a.rolname @@ -169,21 +169,21 @@ SELECT lanname, a.rolname -- -- Operator -- -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); CREATE OPERATOR @+@ ( leftarg = int4, rightarg = int4, procedure = int4pl ); -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_user2; -- failed (no role membership) -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_user3; -- OK +ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user3; -- OK ALTER OPERATOR @-@(int4, int4) SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); -ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_user2; -- failed (not owner) -ALTER OPERATOR @-@(int4, int4) OWNER TO regress_alter_user3; -- failed (no role membership) +ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (not owner) +ALTER OPERATOR @-@(int4, int4) OWNER TO regress_alter_generic_user3; -- failed (no role membership) ALTER OPERATOR @+@(int4, int4) SET SCHEMA alt_nsp2; -- failed (not owner) -- can't test this: the error message includes the raw oid of namespace -- ALTER OPERATOR @-@(int4, int4) SET SCHEMA alt_nsp2; -- failed (name conflict) @@ -202,53 +202,53 @@ SELECT n.nspname, oprname, a.rolname, -- CREATE OPERATOR FAMILY alt_opf1 USING hash; CREATE OPERATOR FAMILY alt_opf2 USING hash; -ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_user1; -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_user1; +ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user1; +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user1; CREATE OPERATOR CLASS alt_opc1 FOR TYPE uuid USING hash AS STORAGE uuid; CREATE OPERATOR CLASS alt_opc2 FOR TYPE uuid USING hash AS STORAGE uuid; -ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_user1; -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_user1; +ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user1; +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user1; -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf2; -- failed (name conflict) ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf3; -- OK -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_user2; -- failed (no role membership) -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_user3; -- OK +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user3; -- OK ALTER OPERATOR FAMILY alt_opf2 USING hash SET SCHEMA alt_nsp2; -- OK ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc2; -- failed (name conflict) ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc3; -- OK -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_user2; -- failed (no role membership) -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_user3; -- OK +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user3; -- OK ALTER OPERATOR CLASS alt_opc2 USING hash SET SCHEMA alt_nsp2; -- OK RESET SESSION AUTHORIZATION; CREATE OPERATOR FAMILY alt_opf1 USING hash; CREATE OPERATOR FAMILY alt_opf2 USING hash; -ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_user2; -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_user2; +ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user2; +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user2; CREATE OPERATOR CLASS alt_opc1 FOR TYPE macaddr USING hash AS STORAGE macaddr; CREATE OPERATOR CLASS alt_opc2 FOR TYPE macaddr USING hash AS STORAGE macaddr; -ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_user2; -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_user2; +ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user2; +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user2; -SET SESSION AUTHORIZATION regress_alter_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; ALTER OPERATOR FAMILY alt_opf3 USING hash RENAME TO alt_opf4; -- failed (not owner) ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf4; -- OK -ALTER OPERATOR FAMILY alt_opf3 USING hash OWNER TO regress_alter_user2; -- failed (not owner) -ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_user3; -- failed (no role membership) +ALTER OPERATOR FAMILY alt_opf3 USING hash OWNER TO regress_alter_generic_user2; -- failed (not owner) +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user3; -- failed (no role membership) ALTER OPERATOR FAMILY alt_opf3 USING hash SET SCHEMA alt_nsp2; -- failed (not owner) ALTER OPERATOR FAMILY alt_opf2 USING hash SET SCHEMA alt_nsp2; -- failed (name conflict) ALTER OPERATOR CLASS alt_opc3 USING hash RENAME TO alt_opc4; -- failed (not owner) ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc4; -- OK -ALTER OPERATOR CLASS alt_opc3 USING hash OWNER TO regress_alter_user2; -- failed (not owner) -ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_user3; -- failed (no role membership) +ALTER OPERATOR CLASS alt_opc3 USING hash OWNER TO regress_alter_generic_user2; -- failed (not owner) +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user3; -- failed (no role membership) ALTER OPERATOR CLASS alt_opc3 USING hash SET SCHEMA alt_nsp2; -- failed (not owner) ALTER OPERATOR CLASS alt_opc2 USING hash SET SCHEMA alt_nsp2; -- failed (name conflict) @@ -305,9 +305,9 @@ DROP OPERATOR FAMILY alt_opf4 USING btree; -- Should fail. Need to be SUPERUSER to do ALTER OPERATOR FAMILY .. ADD / DROP BEGIN TRANSACTION; -CREATE ROLE regress_alter_user5 NOSUPERUSER; +CREATE ROLE regress_alter_generic_user5 NOSUPERUSER; CREATE OPERATOR FAMILY alt_opf5 USING btree; -SET ROLE regress_alter_user5; +SET ROLE regress_alter_generic_user5; ALTER OPERATOR FAMILY alt_opf5 USING btree ADD OPERATOR 1 < (int4, int2), FUNCTION 1 btint42cmp(int4, int2); RESET ROLE; DROP OPERATOR FAMILY alt_opf5 USING btree; @@ -315,11 +315,11 @@ ROLLBACK; -- Should fail. Need rights to namespace for ALTER OPERATOR FAMILY .. ADD / DROP BEGIN TRANSACTION; -CREATE ROLE regress_alter_user6; +CREATE ROLE regress_alter_generic_user6; CREATE SCHEMA alt_nsp6; -REVOKE ALL ON SCHEMA alt_nsp6 FROM regress_alter_user6; +REVOKE ALL ON SCHEMA alt_nsp6 FROM regress_alter_generic_user6; CREATE OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree; -SET ROLE regress_alter_user6; +SET ROLE regress_alter_generic_user6; ALTER OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree ADD OPERATOR 1 < (int4, int2); ROLLBACK; @@ -436,26 +436,26 @@ DROP OPERATOR FAMILY alt_opf18 USING btree; -- -- Statistics -- -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; CREATE TABLE alt_regress_1 (a INTEGER, b INTEGER); CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_1; CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_1; ALTER STATISTICS alt_stat1 RENAME TO alt_stat2; -- failed (name conflict) ALTER STATISTICS alt_stat1 RENAME TO alt_stat3; -- failed (name conflict) -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_user2; -- failed (no role membership) -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_user3; -- OK +ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user3; -- OK ALTER STATISTICS alt_stat2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; CREATE TABLE alt_regress_2 (a INTEGER, b INTEGER); CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_2; CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_2; ALTER STATISTICS alt_stat3 RENAME TO alt_stat4; -- failed (not owner) ALTER STATISTICS alt_stat1 RENAME TO alt_stat4; -- OK -ALTER STATISTICS alt_stat3 OWNER TO regress_alter_user2; -- failed (not owner) -ALTER STATISTICS alt_stat2 OWNER TO regress_alter_user3; -- failed (no role membership) +ALTER STATISTICS alt_stat3 OWNER TO regress_alter_generic_user2; -- failed (not owner) +ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) ALTER STATISTICS alt_stat3 SET SCHEMA alt_nsp2; -- failed (not owner) ALTER STATISTICS alt_stat2 SET SCHEMA alt_nsp2; -- failed (name conflict) @@ -469,24 +469,24 @@ SELECT nspname, stxname, rolname -- -- Text Search Dictionary -- -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict2; -- failed (name conflict) ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict3; -- OK -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_user2; -- failed (no role membership) -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_user3; -- OK +ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user3; -- OK ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 RENAME TO alt_ts_dict4; -- failed (not owner) ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict4; -- OK -ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 OWNER TO regress_alter_user2; -- failed (not owner) -ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_user3; -- failed (no role membership) +ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 OWNER TO regress_alter_generic_user2; -- failed (not owner) +ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 SET SCHEMA alt_nsp2; -- failed (not owner) ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 SET SCHEMA alt_nsp2; -- failed (name conflict) @@ -501,24 +501,24 @@ SELECT nspname, dictname, rolname -- -- Text Search Configuration -- -SET SESSION AUTHORIZATION regress_alter_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf2; -- failed (name conflict) ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf3; -- OK -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_user2; -- failed (no role membership) -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_user3; -- OK +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user3; -- OK ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 SET SCHEMA alt_nsp2; -- OK -SET SESSION AUTHORIZATION regress_alter_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 RENAME TO alt_ts_conf4; -- failed (not owner) ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf4; -- OK -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 OWNER TO regress_alter_user2; -- failed (not owner) -ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_user3; -- failed (no role membership) +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 OWNER TO regress_alter_generic_user2; -- failed (not owner) +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 SET SCHEMA alt_nsp2; -- failed (not owner) ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 SET SCHEMA alt_nsp2; -- failed (name conflict) @@ -543,6 +543,9 @@ ALTER TEXT SEARCH TEMPLATE alt_ts_temp2 SET SCHEMA alt_nsp2; -- OK CREATE TEXT SEARCH TEMPLATE alt_ts_temp2 (lexize=dsimple_lexize); ALTER TEXT SEARCH TEMPLATE alt_ts_temp2 SET SCHEMA alt_nsp2; -- failed (name conflict) +-- invalid: non-lowercase quoted identifiers +CREATE TEXT SEARCH TEMPLATE tstemp_case ("Init" = init_function); + SELECT nspname, tmplname FROM pg_ts_template t, pg_namespace n WHERE t.tmplnamespace = n.oid AND nspname like 'alt_nsp%' @@ -565,6 +568,9 @@ CREATE TEXT SEARCH PARSER alt_ts_prs2 (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); ALTER TEXT SEARCH PARSER alt_ts_prs2 SET SCHEMA alt_nsp2; -- failed (name conflict) +-- invalid: non-lowercase quoted identifiers +CREATE TEXT SEARCH PARSER tspars_case ("Start" = start_function); + SELECT nspname, prsname FROM pg_ts_parser t, pg_namespace n WHERE t.prsnamespace = n.oid AND nspname like 'alt_nsp%' @@ -584,6 +590,6 @@ DROP LANGUAGE alt_lang3 CASCADE; DROP SCHEMA alt_nsp1 CASCADE; DROP SCHEMA alt_nsp2 CASCADE; -DROP USER regress_alter_user1; -DROP USER regress_alter_user2; -DROP USER regress_alter_user3; +DROP USER regress_alter_generic_user1; +DROP USER regress_alter_generic_user2; +DROP USER regress_alter_generic_user3; diff --git a/src/test/regress/sql/alter_operator.sql b/src/test/regress/sql/alter_operator.sql index 51ffd7e0e0..fd40370165 100644 --- a/src/test/regress/sql/alter_operator.sql +++ b/src/test/regress/sql/alter_operator.sql @@ -81,6 +81,9 @@ ALTER OPERATOR === (boolean, boolean) SET (JOIN = non_existent_func); ALTER OPERATOR === (boolean, boolean) SET (COMMUTATOR = !==); ALTER OPERATOR === (boolean, boolean) SET (NEGATOR = !==); +-- invalid: non-lowercase quoted identifiers +ALTER OPERATOR & (bit, bit) SET ("Restrict" = _int_contsel, "Join" = _int_contjoinsel); + -- -- Test permission check. Must be owner to ALTER OPERATOR. -- diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql index 9a20dd141a..0352536fa5 100644 --- a/src/test/regress/sql/alter_table.sql +++ b/src/test/regress/sql/alter_table.sql @@ -1,201 +1,239 @@ -- -- ALTER_TABLE --- add attribute -- -CREATE TABLE tmp (initial int4); +-- Clean up in case a prior regression run failed +SET client_min_messages TO 'warning'; +DROP ROLE IF EXISTS regress_alter_table_user1; +RESET client_min_messages; -COMMENT ON TABLE tmp_wrong IS 'table comment'; -COMMENT ON TABLE tmp IS 'table comment'; -COMMENT ON TABLE tmp IS NULL; +CREATE USER regress_alter_table_user1; -ALTER TABLE tmp ADD COLUMN xmin integer; -- fails +-- +-- add attribute +-- -ALTER TABLE tmp ADD COLUMN a int4 default 3; +CREATE TABLE attmp (initial int4); -ALTER TABLE tmp ADD COLUMN b name; +COMMENT ON TABLE attmp_wrong IS 'table comment'; +COMMENT ON TABLE attmp IS 'table comment'; +COMMENT ON TABLE attmp IS NULL; -ALTER TABLE tmp ADD COLUMN c text; +ALTER TABLE attmp ADD COLUMN xmin integer; -- fails -ALTER TABLE tmp ADD COLUMN d float8; +ALTER TABLE attmp ADD COLUMN a int4 default 3; -ALTER TABLE tmp ADD COLUMN e float4; +ALTER TABLE attmp ADD COLUMN b name; -ALTER TABLE tmp ADD COLUMN f int2; +ALTER TABLE attmp ADD COLUMN c text; -ALTER TABLE tmp ADD COLUMN g polygon; +ALTER TABLE attmp ADD COLUMN d float8; -ALTER TABLE tmp ADD COLUMN h abstime; +ALTER TABLE attmp ADD COLUMN e float4; -ALTER TABLE tmp ADD COLUMN i char; +ALTER TABLE attmp ADD COLUMN f int2; -ALTER TABLE tmp ADD COLUMN j abstime[]; +ALTER TABLE attmp ADD COLUMN g polygon; -ALTER TABLE tmp ADD COLUMN k int4; +ALTER TABLE attmp ADD COLUMN i char; -ALTER TABLE tmp ADD COLUMN l tid; +ALTER TABLE attmp ADD COLUMN k int4; -ALTER TABLE tmp ADD COLUMN m xid; +ALTER TABLE attmp ADD COLUMN l tid; -ALTER TABLE tmp ADD COLUMN n oidvector; +ALTER TABLE attmp ADD COLUMN m xid; ---ALTER TABLE tmp ADD COLUMN o lock; -ALTER TABLE tmp ADD COLUMN p smgr; +ALTER TABLE attmp ADD COLUMN n oidvector; -ALTER TABLE tmp ADD COLUMN q point; +--ALTER TABLE attmp ADD COLUMN o lock; +ALTER TABLE attmp ADD COLUMN p smgr; -ALTER TABLE tmp ADD COLUMN r lseg; +ALTER TABLE attmp ADD COLUMN q point; -ALTER TABLE tmp ADD COLUMN s path; +ALTER TABLE attmp ADD COLUMN r lseg; -ALTER TABLE tmp ADD COLUMN t box; +ALTER TABLE attmp ADD COLUMN s path; -ALTER TABLE tmp ADD COLUMN u tinterval; +ALTER TABLE attmp ADD COLUMN t box; -ALTER TABLE tmp ADD COLUMN v timestamp; +ALTER TABLE attmp ADD COLUMN v timestamp; -ALTER TABLE tmp ADD COLUMN w interval; +ALTER TABLE attmp ADD COLUMN w interval; -ALTER TABLE tmp ADD COLUMN x float8[]; +ALTER TABLE attmp ADD COLUMN x float8[]; -ALTER TABLE tmp ADD COLUMN y float4[]; +ALTER TABLE attmp ADD COLUMN y float4[]; -ALTER TABLE tmp ADD COLUMN z int2[]; +ALTER TABLE attmp ADD COLUMN z int2[]; -INSERT INTO tmp (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, +INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t, v, w, x, y, z) VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', - 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + 'c', 314159, '(1,1)', '512', '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', - '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); -SELECT * FROM tmp; +SELECT * FROM attmp; -DROP TABLE tmp; +DROP TABLE attmp; -- the wolf bug - schema mods caused inconsistent row descriptors -CREATE TABLE tmp ( +CREATE TABLE attmp ( initial int4 ); -ALTER TABLE tmp ADD COLUMN a int4; +ALTER TABLE attmp ADD COLUMN a int4; -ALTER TABLE tmp ADD COLUMN b name; +ALTER TABLE attmp ADD COLUMN b name; -ALTER TABLE tmp ADD COLUMN c text; +ALTER TABLE attmp ADD COLUMN c text; -ALTER TABLE tmp ADD COLUMN d float8; +ALTER TABLE attmp ADD COLUMN d float8; -ALTER TABLE tmp ADD COLUMN e float4; +ALTER TABLE attmp ADD COLUMN e float4; -ALTER TABLE tmp ADD COLUMN f int2; +ALTER TABLE attmp ADD COLUMN f int2; -ALTER TABLE tmp ADD COLUMN g polygon; +ALTER TABLE attmp ADD COLUMN g polygon; -ALTER TABLE tmp ADD COLUMN h abstime; +ALTER TABLE attmp ADD COLUMN i char; -ALTER TABLE tmp ADD COLUMN i char; +ALTER TABLE attmp ADD COLUMN k int4; -ALTER TABLE tmp ADD COLUMN j abstime[]; +ALTER TABLE attmp ADD COLUMN l tid; -ALTER TABLE tmp ADD COLUMN k int4; +ALTER TABLE attmp ADD COLUMN m xid; -ALTER TABLE tmp ADD COLUMN l tid; +ALTER TABLE attmp ADD COLUMN n oidvector; -ALTER TABLE tmp ADD COLUMN m xid; +--ALTER TABLE attmp ADD COLUMN o lock; +ALTER TABLE attmp ADD COLUMN p smgr; -ALTER TABLE tmp ADD COLUMN n oidvector; +ALTER TABLE attmp ADD COLUMN q point; ---ALTER TABLE tmp ADD COLUMN o lock; -ALTER TABLE tmp ADD COLUMN p smgr; +ALTER TABLE attmp ADD COLUMN r lseg; -ALTER TABLE tmp ADD COLUMN q point; +ALTER TABLE attmp ADD COLUMN s path; -ALTER TABLE tmp ADD COLUMN r lseg; +ALTER TABLE attmp ADD COLUMN t box; -ALTER TABLE tmp ADD COLUMN s path; +ALTER TABLE attmp ADD COLUMN v timestamp; -ALTER TABLE tmp ADD COLUMN t box; +ALTER TABLE attmp ADD COLUMN w interval; -ALTER TABLE tmp ADD COLUMN u tinterval; +ALTER TABLE attmp ADD COLUMN x float8[]; -ALTER TABLE tmp ADD COLUMN v timestamp; +ALTER TABLE attmp ADD COLUMN y float4[]; -ALTER TABLE tmp ADD COLUMN w interval; +ALTER TABLE attmp ADD COLUMN z int2[]; -ALTER TABLE tmp ADD COLUMN x float8[]; - -ALTER TABLE tmp ADD COLUMN y float4[]; - -ALTER TABLE tmp ADD COLUMN z int2[]; - -INSERT INTO tmp (a, b, c, d, e, f, g, h, i, j, k, l, m, n, p, q, r, s, t, u, +INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t, v, w, x, y, z) VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', - 'Mon May 1 00:30:30 1995', 'c', '{Mon May 1 00:30:30 1995, Monday Aug 24 14:43:07 1992, epoch}', + 'c', 314159, '(1,1)', '512', '1 2 3 4 5 6 7 8', 'magnetic disk', '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', - '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', '["epoch" "infinity"]', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); -SELECT * FROM tmp; +SELECT * FROM attmp; -DROP TABLE tmp; +CREATE INDEX attmp_idx ON attmp (a, (d + e), b); +ALTER INDEX attmp_idx ALTER COLUMN 0 SET STATISTICS 1000; --- --- rename - check on both non-temp and temp tables --- -CREATE TABLE tmp (regtable int); -CREATE TEMP TABLE tmp (tmptable int); +ALTER INDEX attmp_idx ALTER COLUMN 1 SET STATISTICS 1000; + +ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS 1000; + +\d+ attmp_idx -ALTER TABLE tmp RENAME TO tmp_new; +ALTER INDEX attmp_idx ALTER COLUMN 3 SET STATISTICS 1000; -SELECT * FROM tmp; -SELECT * FROM tmp_new; +ALTER INDEX attmp_idx ALTER COLUMN 4 SET STATISTICS 1000; -ALTER TABLE tmp RENAME TO tmp_new2; +ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS -1; -SELECT * FROM tmp; -- should fail -SELECT * FROM tmp_new; -SELECT * FROM tmp_new2; +DROP TABLE attmp; -DROP TABLE tmp_new; -DROP TABLE tmp_new2; + +-- +-- rename - check on both non-temp and temp tables +-- +CREATE TABLE attmp (regtable int); +CREATE TEMP TABLE attmp (attmptable int); + +ALTER TABLE attmp RENAME TO attmp_new; + +SELECT * FROM attmp; +SELECT * FROM attmp_new; + +ALTER TABLE attmp RENAME TO attmp_new2; + +SELECT * FROM attmp; -- should fail +SELECT * FROM attmp_new; +SELECT * FROM attmp_new2; + +DROP TABLE attmp_new; +DROP TABLE attmp_new2; + +-- check rename of partitioned tables and indexes also +CREATE TABLE part_attmp (a int primary key) partition by range (a); +CREATE TABLE part_attmp1 PARTITION OF part_attmp FOR VALUES FROM (0) TO (100); +ALTER INDEX part_attmp_pkey RENAME TO part_attmp_index; +ALTER INDEX part_attmp1_pkey RENAME TO part_attmp1_index; +ALTER TABLE part_attmp RENAME TO part_at2tmp; +ALTER TABLE part_attmp1 RENAME TO part_at2tmp1; +SET ROLE regress_alter_table_user1; +ALTER INDEX part_attmp_index RENAME TO fail; +ALTER INDEX part_attmp1_index RENAME TO fail; +ALTER TABLE part_at2tmp RENAME TO fail; +ALTER TABLE part_at2tmp1 RENAME TO fail; +RESET ROLE; +DROP TABLE part_at2tmp; -- -- check renaming to a table's array type's autogenerated name -- (the array type's name should get out of the way) -- -CREATE TABLE tmp_array (id int); -CREATE TABLE tmp_array2 (id int); -SELECT typname FROM pg_type WHERE oid = 'tmp_array[]'::regtype; -SELECT typname FROM pg_type WHERE oid = 'tmp_array2[]'::regtype; -ALTER TABLE tmp_array2 RENAME TO _tmp_array; -SELECT typname FROM pg_type WHERE oid = 'tmp_array[]'::regtype; -SELECT typname FROM pg_type WHERE oid = '_tmp_array[]'::regtype; -DROP TABLE _tmp_array; -DROP TABLE tmp_array; +CREATE TABLE attmp_array (id int); +CREATE TABLE attmp_array2 (id int); +SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype; +SELECT typname FROM pg_type WHERE oid = 'attmp_array2[]'::regtype; +ALTER TABLE attmp_array2 RENAME TO _attmp_array; +SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype; +SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype; +DROP TABLE _attmp_array; +DROP TABLE attmp_array; -- renaming to table's own array type's name is an interesting corner case -CREATE TABLE tmp_array (id int); -SELECT typname FROM pg_type WHERE oid = 'tmp_array[]'::regtype; -ALTER TABLE tmp_array RENAME TO _tmp_array; -SELECT typname FROM pg_type WHERE oid = '_tmp_array[]'::regtype; -DROP TABLE _tmp_array; +CREATE TABLE attmp_array (id int); +SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype; +ALTER TABLE attmp_array RENAME TO _attmp_array; +SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype; +DROP TABLE _attmp_array; -- ALTER TABLE ... RENAME on non-table relations -- renaming indexes (FIXME: this should probably test the index's functionality) -ALTER INDEX IF EXISTS __onek_unique1 RENAME TO tmp_onek_unique1; -ALTER INDEX IF EXISTS __tmp_onek_unique1 RENAME TO onek_unique1; +ALTER INDEX IF EXISTS __onek_unique1 RENAME TO attmp_onek_unique1; +ALTER INDEX IF EXISTS __attmp_onek_unique1 RENAME TO onek_unique1; + +ALTER INDEX onek_unique1 RENAME TO attmp_onek_unique1; +ALTER INDEX attmp_onek_unique1 RENAME TO onek_unique1; + +SET ROLE regress_alter_table_user1; +ALTER INDEX onek_unique1 RENAME TO fail; -- permission denied +RESET ROLE; -ALTER INDEX onek_unique1 RENAME TO tmp_onek_unique1; -ALTER INDEX tmp_onek_unique1 RENAME TO onek_unique1; -- renaming views -CREATE VIEW tmp_view (unique1) AS SELECT unique1 FROM tenk1; -ALTER TABLE tmp_view RENAME TO tmp_view_new; +CREATE VIEW attmp_view (unique1) AS SELECT unique1 FROM tenk1; +ALTER TABLE attmp_view RENAME TO attmp_view_new; + +SET ROLE regress_alter_table_user1; +ALTER VIEW attmp_view_new RENAME TO fail; -- permission denied +RESET ROLE; -- hack to ensure we get an indexscan here set enable_seqscan to off; @@ -205,7 +243,7 @@ SELECT unique1 FROM tenk1 WHERE unique1 < 5; reset enable_seqscan; reset enable_bitmapscan; -DROP VIEW tmp_view_new; +DROP VIEW attmp_view_new; -- toast-like relation name alter table stud_emp rename to pg_toast_stud_emp; alter table pg_toast_stud_emp rename to stud_emp; @@ -253,79 +291,79 @@ ALTER TABLE IF EXISTS constraint_rename_test ADD CONSTRAINT con4 UNIQUE (a); -- FOREIGN KEY CONSTRAINT adding TEST -CREATE TABLE tmp2 (a int primary key); +CREATE TABLE attmp2 (a int primary key); -CREATE TABLE tmp3 (a int, b int); +CREATE TABLE attmp3 (a int, b int); -CREATE TABLE tmp4 (a int, b int, unique(a,b)); +CREATE TABLE attmp4 (a int, b int, unique(a,b)); -CREATE TABLE tmp5 (a int, b int); +CREATE TABLE attmp5 (a int, b int); --- Insert rows into tmp2 (pktable) -INSERT INTO tmp2 values (1); -INSERT INTO tmp2 values (2); -INSERT INTO tmp2 values (3); -INSERT INTO tmp2 values (4); +-- Insert rows into attmp2 (pktable) +INSERT INTO attmp2 values (1); +INSERT INTO attmp2 values (2); +INSERT INTO attmp2 values (3); +INSERT INTO attmp2 values (4); --- Insert rows into tmp3 -INSERT INTO tmp3 values (1,10); -INSERT INTO tmp3 values (1,20); -INSERT INTO tmp3 values (5,50); +-- Insert rows into attmp3 +INSERT INTO attmp3 values (1,10); +INSERT INTO attmp3 values (1,20); +INSERT INTO attmp3 values (5,50); -- Try (and fail) to add constraint due to invalid source columns -ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full; +ALTER TABLE attmp3 add constraint attmpconstr foreign key(c) references attmp2 match full; -- Try (and fail) to add constraint due to invalid destination columns explicitly given -ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full; +ALTER TABLE attmp3 add constraint attmpconstr foreign key(a) references attmp2(b) match full; -- Try (and fail) to add constraint due to invalid data -ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full; +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full; -- Delete failing row -DELETE FROM tmp3 where a=5; +DELETE FROM attmp3 where a=5; -- Try (and succeed) -ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full; -ALTER TABLE tmp3 drop constraint tmpconstr; +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full; +ALTER TABLE attmp3 drop constraint attmpconstr; -INSERT INTO tmp3 values (5,50); +INSERT INTO attmp3 values (5,50); -- Try NOT VALID and then VALIDATE CONSTRAINT, but fails. Delete failure then re-validate -ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full NOT VALID; -ALTER TABLE tmp3 validate constraint tmpconstr; +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full NOT VALID; +ALTER TABLE attmp3 validate constraint attmpconstr; -- Delete failing row -DELETE FROM tmp3 where a=5; +DELETE FROM attmp3 where a=5; -- Try (and succeed) and repeat to show it works on already valid constraint -ALTER TABLE tmp3 validate constraint tmpconstr; -ALTER TABLE tmp3 validate constraint tmpconstr; +ALTER TABLE attmp3 validate constraint attmpconstr; +ALTER TABLE attmp3 validate constraint attmpconstr; -- Try a non-verified CHECK constraint -ALTER TABLE tmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10); -- fail -ALTER TABLE tmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10) NOT VALID; -- succeeds -ALTER TABLE tmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- fails -DELETE FROM tmp3 WHERE NOT b > 10; -ALTER TABLE tmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds -ALTER TABLE tmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds +ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10); -- fail +ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10) NOT VALID; -- succeeds +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- fails +DELETE FROM attmp3 WHERE NOT b > 10; +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds -- Test inherited NOT VALID CHECK constraints -select * from tmp3; -CREATE TABLE tmp6 () INHERITS (tmp3); -CREATE TABLE tmp7 () INHERITS (tmp3); +select * from attmp3; +CREATE TABLE attmp6 () INHERITS (attmp3); +CREATE TABLE attmp7 () INHERITS (attmp3); -INSERT INTO tmp6 VALUES (6, 30), (7, 16); -ALTER TABLE tmp3 ADD CONSTRAINT b_le_20 CHECK (b <= 20) NOT VALID; -ALTER TABLE tmp3 VALIDATE CONSTRAINT b_le_20; -- fails -DELETE FROM tmp6 WHERE b > 20; -ALTER TABLE tmp3 VALIDATE CONSTRAINT b_le_20; -- succeeds +INSERT INTO attmp6 VALUES (6, 30), (7, 16); +ALTER TABLE attmp3 ADD CONSTRAINT b_le_20 CHECK (b <= 20) NOT VALID; +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- fails +DELETE FROM attmp6 WHERE b > 20; +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- succeeds -- An already validated constraint must not be revalidated CREATE FUNCTION boo(int) RETURNS int IMMUTABLE STRICT LANGUAGE plpgsql AS $$ BEGIN RAISE NOTICE 'boo: %', $1; RETURN $1; END; $$; -INSERT INTO tmp7 VALUES (8, 18); -ALTER TABLE tmp7 ADD CONSTRAINT identity CHECK (b = boo(b)); -ALTER TABLE tmp3 ADD CONSTRAINT IDENTITY check (b = boo(b)) NOT VALID; -ALTER TABLE tmp3 VALIDATE CONSTRAINT identity; +INSERT INTO attmp7 VALUES (8, 18); +ALTER TABLE attmp7 ADD CONSTRAINT identity CHECK (b = boo(b)); +ALTER TABLE attmp3 ADD CONSTRAINT IDENTITY check (b = boo(b)) NOT VALID; +ALTER TABLE attmp3 VALIDATE CONSTRAINT identity; -- A NO INHERIT constraint should not be looked for in children during VALIDATE CONSTRAINT create table parent_noinh_convalid (a int); @@ -342,22 +380,22 @@ select convalidated from pg_constraint where conrelid = 'parent_noinh_convalid': -- cleanup drop table parent_noinh_convalid, child_noinh_convalid; --- Try (and fail) to create constraint from tmp5(a) to tmp4(a) - unique constraint on --- tmp4 is a,b +-- Try (and fail) to create constraint from attmp5(a) to attmp4(a) - unique constraint on +-- attmp4 is a,b -ALTER TABLE tmp5 add constraint tmpconstr foreign key(a) references tmp4(a) match full; +ALTER TABLE attmp5 add constraint attmpconstr foreign key(a) references attmp4(a) match full; -DROP TABLE tmp7; +DROP TABLE attmp7; -DROP TABLE tmp6; +DROP TABLE attmp6; -DROP TABLE tmp5; +DROP TABLE attmp5; -DROP TABLE tmp4; +DROP TABLE attmp4; -DROP TABLE tmp3; +DROP TABLE attmp3; -DROP TABLE tmp2; +DROP TABLE attmp2; -- NOT VALID with plan invalidation -- ensure we don't use a constraint for -- exclusion until validated @@ -934,12 +972,12 @@ create index "testing_idx" on atacc1("........pg.dropped.1........"); -- test create as and select into insert into atacc1 values (21, 22, 23); -create table test1 as select * from atacc1; -select * from test1; -drop table test1; -select * into test2 from atacc1; -select * from test2; -drop table test2; +create table attest1 as select * from atacc1; +select * from attest1; +drop table attest1; +select * into attest2 from atacc1; +select * from attest2; +drop table attest2; -- try dropping all columns alter table atacc1 drop c; @@ -986,27 +1024,27 @@ drop table child; drop table parent; -- test copy in/out -create table test (a int4, b int4, c int4); -insert into test values (1,2,3); -alter table test drop a; -copy test to stdout; -copy test(a) to stdout; -copy test("........pg.dropped.1........") to stdout; -copy test from stdin; +create table attest (a int4, b int4, c int4); +insert into attest values (1,2,3); +alter table attest drop a; +copy attest to stdout; +copy attest(a) to stdout; +copy attest("........pg.dropped.1........") to stdout; +copy attest from stdin; 10 11 12 \. -select * from test; -copy test from stdin; +select * from attest; +copy attest from stdin; 21 22 \. -select * from test; -copy test(a) from stdin; -copy test("........pg.dropped.1........") from stdin; -copy test(b,c) from stdin; +select * from attest; +copy attest(a) from stdin; +copy attest("........pg.dropped.1........") from stdin; +copy attest(b,c) from stdin; 31 32 \. -select * from test; -drop table test; +select * from attest; +drop table attest; -- test inheritance @@ -1314,6 +1352,22 @@ create table tab1 (a int, b text); create table tab2 (x int, y tab1); alter table tab1 alter column b type varchar; -- fails +-- Alter column type that's part of a partitioned index +create table at_partitioned (a int, b text) partition by range (a); +create table at_part_1 partition of at_partitioned for values from (0) to (1000); +insert into at_partitioned values (512, '0.123'); +create table at_part_2 (b text, a int); +insert into at_part_2 values ('1.234', 1024); +create index on at_partitioned (b); +create index on at_partitioned (a); +\d at_part_1 +\d at_part_2 +alter table at_partitioned attach partition at_part_2 for values from (1000) to (2000); +\d at_part_2 +alter table at_partitioned alter column b type numeric using b::numeric; +\d at_part_1 +\d at_part_2 + -- disallow recursive containment of row types create temp table recur1 (f1 int); alter table recur1 add column f2 recur1; -- fails @@ -1402,6 +1456,26 @@ ROLLBACK; \d check_fk_presence_2 DROP TABLE check_fk_presence_1, check_fk_presence_2; +-- check column addition within a view (bug #14876) +create table at_base_table(id int, stuff text); +insert into at_base_table values (23, 'skidoo'); +create view at_view_1 as select * from at_base_table bt; +create view at_view_2 as select *, to_json(v1) as j from at_view_1 v1; +\d+ at_view_1 +\d+ at_view_2 +explain (verbose, costs off) select * from at_view_2; +select * from at_view_2; + +create or replace view at_view_1 as select *, 2+2 as more from at_base_table bt; +\d+ at_view_1 +\d+ at_view_2 +explain (verbose, costs off) select * from at_view_2; +select * from at_view_2; + +drop view at_view_2; +drop view at_view_1; +drop table at_base_table; + -- -- lock levels -- @@ -1698,6 +1772,14 @@ ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE; DROP TABLE test_tbl2_subclass; +CREATE TYPE test_typex AS (a int, b text); +CREATE TABLE test_tblx (x int, y test_typex check ((y).a > 0)); +ALTER TYPE test_typex DROP ATTRIBUTE a; -- fails +ALTER TYPE test_typex DROP ATTRIBUTE a CASCADE; +\d test_tblx +DROP TABLE test_tblx; +DROP TYPE test_typex; + -- This test isn't that interesting on its own, but the purpose is to leave -- behind a table to test pg_upgrade with. The table has a composite type -- column in it, and the composite type has a dropped attribute. @@ -1771,6 +1853,24 @@ ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; DROP TABLE alter2.tt8; DROP SCHEMA alter2; +-- +-- Check conflicts between index and CHECK constraint names +-- +CREATE TABLE tt9(c integer); +ALTER TABLE tt9 ADD CHECK(c > 1); +ALTER TABLE tt9 ADD CHECK(c > 2); -- picks nonconflicting name +ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 3); +ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 4); -- fail, dup name +ALTER TABLE tt9 ADD UNIQUE(c); +ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name +ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key UNIQUE(c); -- fail, dup name +ALTER TABLE tt9 ADD CONSTRAINT foo UNIQUE(c); -- fail, dup name +ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key CHECK(c > 5); -- fail, dup name +ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key2 CHECK(c > 6); +ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name +\d tt9 +DROP TABLE tt9; + -- Check that comments on constraints and indexes are not lost at ALTER TABLE. CREATE TABLE comment_test ( @@ -1956,9 +2056,6 @@ CREATE TABLE partitioned ( a int, b int ) PARTITION BY RANGE (a, (a+b+1)); -ALTER TABLE partitioned ADD UNIQUE (a); -ALTER TABLE partitioned ADD PRIMARY KEY (a); -ALTER TABLE partitioned ADD FOREIGN KEY (a) REFERENCES blah; ALTER TABLE partitioned ADD EXCLUDE USING gist (a WITH &&); -- cannot drop column that is part of the partition key @@ -2095,6 +2192,14 @@ SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_1'::reg -- check that the new partition won't overlap with an existing partition CREATE TABLE fail_part (LIKE part_1 INCLUDING CONSTRAINTS); ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); +DROP TABLE fail_part; +-- check that an existing table can be attached as a default partition +CREATE TABLE def_part (LIKE list_parted INCLUDING CONSTRAINTS); +ALTER TABLE list_parted ATTACH PARTITION def_part DEFAULT; +-- check attaching default partition fails if a default partition already +-- exists +CREATE TABLE fail_def_part (LIKE part_1 INCLUDING CONSTRAINTS); +ALTER TABLE list_parted ATTACH PARTITION fail_def_part DEFAULT; -- check validation when attaching list partitions CREATE TABLE list_parted2 ( @@ -2111,6 +2216,15 @@ ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2); DELETE FROM part_2; ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2); +-- check partition cannot be attached if default has some row for its values +CREATE TABLE list_parted2_def PARTITION OF list_parted2 DEFAULT; +INSERT INTO list_parted2_def VALUES (11, 'z'); +CREATE TABLE part_3 (LIKE list_parted2); +ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11); +-- should be ok after deleting the bad row +DELETE FROM list_parted2_def WHERE a = 11; +ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11); + -- adding constraints that describe the desired partition constraint -- (or more restrictive) will help skip the validation scan CREATE TABLE part_3_4 ( @@ -2128,6 +2242,9 @@ ALTER TABLE list_parted2 DETACH PARTITION part_3_4; ALTER TABLE part_3_4 ALTER a SET NOT NULL; ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4); +-- check if default partition scan skipped +ALTER TABLE list_parted2_def ADD CONSTRAINT check_a CHECK (a IN (5, 6)); +CREATE TABLE part_55_66 PARTITION OF list_parted2 FOR VALUES IN (55, 66); -- check validation when attaching range partitions CREATE TABLE range_parted ( @@ -2156,6 +2273,21 @@ CREATE TABLE part2 ( ); ALTER TABLE range_parted ATTACH PARTITION part2 FOR VALUES FROM (1, 10) TO (1, 20); +-- Create default partition +CREATE TABLE partr_def1 PARTITION OF range_parted DEFAULT; + +-- Only one default partition is allowed, hence, following should give error +CREATE TABLE partr_def2 (LIKE part1 INCLUDING CONSTRAINTS); +ALTER TABLE range_parted ATTACH PARTITION partr_def2 DEFAULT; + +-- Overlapping partitions cannot be attached, hence, following should give error +INSERT INTO partr_def1 VALUES (2, 10); +CREATE TABLE part3 (LIKE range_parted); +ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (2, 10) TO (2, 20); + +-- Attaching partitions should be successful when there are no overlapping rows +ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (3, 10) TO (3, 20); + -- check that leaf partitions are scanned when attaching a partitioned -- table CREATE TABLE part_5 ( @@ -2216,6 +2348,18 @@ INSERT INTO part_7 (a, b) VALUES (8, null), (9, 'a'); SELECT tableoid::regclass, a, b FROM part_7 order by a; ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7); +-- check that leaf partitions of default partition are scanned when +-- attaching a partitioned table. +ALTER TABLE part_5 DROP CONSTRAINT check_a; +CREATE TABLE part5_def PARTITION OF part_5 DEFAULT PARTITION BY LIST(a); +CREATE TABLE part5_def_p1 PARTITION OF part5_def FOR VALUES IN (5); +INSERT INTO part5_def_p1 VALUES (5, 'y'); +CREATE TABLE part5_p1 (LIKE part_5); +ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y'); +-- should be ok after deleting the bad row +DELETE FROM part5_def_p1 WHERE b = 'y'; +ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y'); + -- check that the table being attached is not already a partition ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2); @@ -2223,6 +2367,74 @@ ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2); ALTER TABLE part_5 ATTACH PARTITION list_parted2 FOR VALUES IN ('b'); ALTER TABLE list_parted2 ATTACH PARTITION list_parted2 FOR VALUES IN (0); +-- If a partitioned table being created or an existing table being attached +-- as a partition does not have a constraint that would allow validation scan +-- to be skipped, but an individual partition does, then the partition's +-- validation scan is skipped. +CREATE TABLE quuux (a int, b text) PARTITION BY LIST (a); +CREATE TABLE quuux_default PARTITION OF quuux DEFAULT PARTITION BY LIST (b); +CREATE TABLE quuux_default1 PARTITION OF quuux_default ( + CONSTRAINT check_1 CHECK (a IS NOT NULL AND a = 1) +) FOR VALUES IN ('b'); +CREATE TABLE quuux1 (a int, b text); +ALTER TABLE quuux ATTACH PARTITION quuux1 FOR VALUES IN (1); -- validate! +CREATE TABLE quuux2 (a int, b text); +ALTER TABLE quuux ATTACH PARTITION quuux2 FOR VALUES IN (2); -- skip validation +DROP TABLE quuux1, quuux2; +-- should validate for quuux1, but not for quuux2 +CREATE TABLE quuux1 PARTITION OF quuux FOR VALUES IN (1); +CREATE TABLE quuux2 PARTITION OF quuux FOR VALUES IN (2); +DROP TABLE quuux; + +-- check validation when attaching hash partitions + +-- Use hand-rolled hash functions and operator class to get predictable result +-- on different matchines. part_test_int4_ops is defined in insert.sql. + +-- check that the new partition won't overlap with an existing partition +CREATE TABLE hash_parted ( + a int, + b int +) PARTITION BY HASH (a part_test_int4_ops); +CREATE TABLE hpart_1 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 4, REMAINDER 0); +CREATE TABLE fail_part (LIKE hpart_1); +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 4); +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 0); +DROP TABLE fail_part; + +-- check validation when attaching hash partitions + +-- check that violating rows are correctly reported +CREATE TABLE hpart_2 (LIKE hash_parted); +INSERT INTO hpart_2 VALUES (3, 0); +ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1); + +-- should be ok after deleting the bad row +DELETE FROM hpart_2; +ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1); + +-- check that leaf partitions are scanned when attaching a partitioned +-- table +CREATE TABLE hpart_5 ( + LIKE hash_parted +) PARTITION BY LIST (b); + +-- check that violating rows are correctly reported +CREATE TABLE hpart_5_a PARTITION OF hpart_5 FOR VALUES IN ('1', '2', '3'); +INSERT INTO hpart_5_a (a, b) VALUES (7, 1); +ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2); + +-- should be ok after deleting the bad row +DELETE FROM hpart_5_a; +ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2); + +-- check that the table being attach is with valid modulus and remainder value +CREATE TABLE fail_part(LIKE hash_parted); +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 0, REMAINDER 1); +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 8); +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 3, REMAINDER 2); +DROP TABLE fail_part; + -- -- DETACH PARTITION -- @@ -2234,12 +2446,16 @@ DROP TABLE regular_table; -- check that the partition being detached exists at all ALTER TABLE list_parted2 DETACH PARTITION part_4; +ALTER TABLE hash_parted DETACH PARTITION hpart_4; -- check that the partition being detached is actually a partition of the parent CREATE TABLE not_a_part (a int); ALTER TABLE list_parted2 DETACH PARTITION not_a_part; ALTER TABLE list_parted2 DETACH PARTITION part_1; +ALTER TABLE hash_parted DETACH PARTITION not_a_part; +DROP TABLE not_a_part; + -- check that, after being detached, attinhcount/coninhcount is dropped to 0 and -- attislocal/conislocal is set to true ALTER TABLE list_parted2 DETACH PARTITION part_3_4; @@ -2309,8 +2525,14 @@ ALTER TABLE part_2 INHERIT inh_test; ALTER TABLE list_parted2 DROP COLUMN b; ALTER TABLE list_parted2 ALTER COLUMN b TYPE text; +-- dropping non-partition key columns should be allowed on the parent table. +ALTER TABLE list_parted DROP COLUMN b; +SELECT * FROM list_parted; + -- cleanup DROP TABLE list_parted, list_parted2, range_parted; +DROP TABLE fail_def_part; +DROP TABLE hash_parted; -- more tests for certain multi-level partitioning scenarios create table p (a int, b int) partition by range (a, b); @@ -2345,3 +2567,74 @@ create table parted_validate_test_1 partition of parted_validate_test for values alter table parted_validate_test add constraint parted_validate_test_chka check (a > 0) not valid; alter table parted_validate_test validate constraint parted_validate_test_chka; drop table parted_validate_test; +-- test alter column options +CREATE TABLE attmp(i integer); +INSERT INTO attmp VALUES (1); +ALTER TABLE attmp ALTER COLUMN i SET (n_distinct = 1, n_distinct_inherited = 2); +ALTER TABLE attmp ALTER COLUMN i RESET (n_distinct_inherited); +ANALYZE attmp; +DROP TABLE attmp; + +DROP USER regress_alter_table_user1; + +-- check that violating rows are correctly reported when attaching as the +-- default partition +create table defpart_attach_test (a int) partition by list (a); +create table defpart_attach_test1 partition of defpart_attach_test for values in (1); +create table defpart_attach_test_d (like defpart_attach_test); +insert into defpart_attach_test_d values (1), (2); + +-- error because its constraint as the default partition would be violated +-- by the row containing 1 +alter table defpart_attach_test attach partition defpart_attach_test_d default; +delete from defpart_attach_test_d where a = 1; +alter table defpart_attach_test_d add check (a > 1); + +-- should be attached successfully and without needing to be scanned +alter table defpart_attach_test attach partition defpart_attach_test_d default; + +drop table defpart_attach_test; + +-- check combinations of temporary and permanent relations when attaching +-- partitions. +create table perm_part_parent (a int) partition by list (a); +create temp table temp_part_parent (a int) partition by list (a); +create table perm_part_child (a int); +create temp table temp_part_child (a int); +alter table temp_part_parent attach partition perm_part_child default; -- error +alter table perm_part_parent attach partition temp_part_child default; -- error +alter table temp_part_parent attach partition temp_part_child default; -- ok +drop table perm_part_parent cascade; +drop table temp_part_parent cascade; + +-- check that attaching partitions to a table while it is being used is +-- prevented +create table tab_part_attach (a int) partition by list (a); +create or replace function func_part_attach() returns trigger + language plpgsql as $$ + begin + execute 'create table tab_part_attach_1 (a int)'; + execute 'alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)'; + return null; + end $$; +create trigger trig_part_attach before insert on tab_part_attach + for each statement execute procedure func_part_attach(); +insert into tab_part_attach values (1); +drop table tab_part_attach; +drop function func_part_attach(); + +-- test case where the partitioning operator is a SQL function whose +-- evaluation results in the table's relcache being rebuilt partway through +-- the execution of an ATTACH PARTITION command +create function at_test_sql_partop (int4, int4) returns int language sql +as $$ select case when $1 = $2 then 0 when $1 > $2 then 1 else -1 end; $$; +create operator class at_test_sql_partop for type int4 using btree as + operator 1 < (int4, int4), operator 2 <= (int4, int4), + operator 3 = (int4, int4), operator 4 >= (int4, int4), + operator 5 > (int4, int4), function 1 at_test_sql_partop(int4, int4); +create table at_test_sql_partop (a int) partition by range (a at_test_sql_partop); +create table at_test_sql_partop_1 (a int); +alter table at_test_sql_partop attach partition at_test_sql_partop_1 for values from (0) to (10); +drop table at_test_sql_partop; +drop operator class at_test_sql_partop using btree; +drop function at_test_sql_partop; diff --git a/src/test/regress/sql/amutils.sql b/src/test/regress/sql/amutils.sql index cec1dcb53b..06e7fa10d9 100644 --- a/src/test/regress/sql/amutils.sql +++ b/src/test/regress/sql/amutils.sql @@ -13,7 +13,7 @@ select prop, 'clusterable', 'index_scan', 'bitmap_scan', 'backward_scan', 'can_order', 'can_unique', 'can_multi_col', - 'can_exclude', + 'can_exclude', 'can_include', 'bogus']::text[]) with ordinality as u(prop,ord) where a.amname = 'btree' @@ -30,7 +30,7 @@ select prop, 'clusterable', 'index_scan', 'bitmap_scan', 'backward_scan', 'can_order', 'can_unique', 'can_multi_col', - 'can_exclude', + 'can_exclude', 'can_include', 'bogus']::text[]) with ordinality as u(prop,ord) where a.amname = 'gist' @@ -40,7 +40,8 @@ select prop, pg_index_column_has_property('onek_hundred'::regclass, 1, prop) as btree, pg_index_column_has_property('hash_i4_index'::regclass, 1, prop) as hash, pg_index_column_has_property('gcircleind'::regclass, 1, prop) as gist, - pg_index_column_has_property('sp_radix_ind'::regclass, 1, prop) as spgist, + pg_index_column_has_property('sp_radix_ind'::regclass, 1, prop) as spgist_radix, + pg_index_column_has_property('sp_quad_ind'::regclass, 1, prop) as spgist_quad, pg_index_column_has_property('botharrayidx'::regclass, 1, prop) as gin, pg_index_column_has_property('brinidx'::regclass, 1, prop) as brin from unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', @@ -66,7 +67,7 @@ select prop, select amname, prop, pg_indexam_has_property(a.oid, prop) as p from pg_am a, unnest(array['can_order', 'can_unique', 'can_multi_col', - 'can_exclude', 'bogus']::text[]) + 'can_exclude', 'can_include', 'bogus']::text[]) with ordinality as u(prop,ord) where amtype = 'i' order by amname, ord; @@ -85,3 +86,14 @@ select col, prop, pg_index_column_has_property(o, col, prop) (6, 'bogus')) v2(idx,prop), generate_series(1,4) col order by col, idx; + +CREATE INDEX foocover ON foo (f1) INCLUDE (f2,f3); + +select col, prop, pg_index_column_has_property(o, col, prop) + from (values ('foocover'::regclass)) v1(o), + (values (1,'orderable'),(2,'asc'),(3,'desc'), + (4,'nulls_first'),(5,'nulls_last'), + (6,'distance_orderable'),(7,'returnable'), + (8, 'bogus')) v2(idx,prop), + generate_series(1,3) col + order by col, idx; diff --git a/src/test/regress/sql/bit.sql b/src/test/regress/sql/bit.sql index 419d47c8b7..a73da8cb8f 100644 --- a/src/test/regress/sql/bit.sql +++ b/src/test/regress/sql/bit.sql @@ -195,3 +195,14 @@ SELECT overlay(B'0101011100' placing '001' from 2 for 3); SELECT overlay(B'0101011100' placing '101' from 6); SELECT overlay(B'0101011100' placing '001' from 11); SELECT overlay(B'0101011100' placing '001' from 20); + +-- This table is intentionally left around to exercise pg_dump/pg_upgrade +CREATE TABLE bit_defaults( + b1 bit(4) DEFAULT '1001', + b2 bit(4) DEFAULT B'0101', + b3 bit varying(5) DEFAULT '1001', + b4 bit varying(5) DEFAULT B'0101' +); +\d bit_defaults +INSERT INTO bit_defaults DEFAULT VALUES; +TABLE bit_defaults; diff --git a/src/test/regress/sql/boolean.sql b/src/test/regress/sql/boolean.sql index cbf335467b..df61fa4e3e 100644 --- a/src/test/regress/sql/boolean.sql +++ b/src/test/regress/sql/boolean.sql @@ -219,6 +219,33 @@ SELECT b IS NOT UNKNOWN AS isnotunknown FROM booltbl3 ORDER BY o; + +-- Test to make sure short-circuiting and NULL handling is +-- correct. Use a table as source to prevent constant simplification +-- to interfer. +CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool); +INSERT INTO booltbl4 VALUES (false, true, null); +\pset null '(null)' + +-- AND expression need to return null if there's any nulls and not all +-- of the value are true +SELECT istrue AND isnul AND istrue FROM booltbl4; +SELECT istrue AND istrue AND isnul FROM booltbl4; +SELECT isnul AND istrue AND istrue FROM booltbl4; +SELECT isfalse AND isnul AND istrue FROM booltbl4; +SELECT istrue AND isfalse AND isnul FROM booltbl4; +SELECT isnul AND istrue AND isfalse FROM booltbl4; + +-- OR expression need to return null if there's any nulls and none +-- of the value is true +SELECT isfalse OR isnul OR isfalse FROM booltbl4; +SELECT isfalse OR isfalse OR isnul FROM booltbl4; +SELECT isnul OR isfalse OR isfalse FROM booltbl4; +SELECT isfalse OR isnul OR istrue FROM booltbl4; +SELECT istrue OR isfalse OR isnul FROM booltbl4; +SELECT isnul OR istrue OR isfalse FROM booltbl4; + + -- -- Clean up -- Many tables are retained by the regression test, but these do not seem @@ -231,3 +258,5 @@ DROP TABLE BOOLTBL1; DROP TABLE BOOLTBL2; DROP TABLE BOOLTBL3; + +DROP TABLE BOOLTBL4; diff --git a/src/test/regress/sql/box.sql b/src/test/regress/sql/box.sql index 135ac108eb..6710fc90f5 100644 --- a/src/test/regress/sql/box.sql +++ b/src/test/regress/sql/box.sql @@ -25,6 +25,9 @@ INSERT INTO BOX_TBL (f1) VALUES ('(2.0,2.0,0.0,0.0)'); INSERT INTO BOX_TBL (f1) VALUES ('(1.0,1.0,3.0,3.0)'); +INSERT INTO BOX_TBL (f1) VALUES ('((-8, 2), (-2, -10))'); + + -- degenerate cases where the box is a line or a point -- note that lines and points boxes all have zero area INSERT INTO BOX_TBL (f1) VALUES ('(2.5, 2.5, 2.5,3.5)'); @@ -34,6 +37,12 @@ INSERT INTO BOX_TBL (f1) VALUES ('(3.0, 3.0,3.0,3.0)'); -- badly formatted box inputs INSERT INTO BOX_TBL (f1) VALUES ('(2.3, 4.5)'); +INSERT INTO BOX_TBL (f1) VALUES ('[1, 2, 3, 4)'); + +INSERT INTO BOX_TBL (f1) VALUES ('(1, 2, 3, 4]'); + +INSERT INTO BOX_TBL (f1) VALUES ('(1, 2, 3, 4) x'); + INSERT INTO BOX_TBL (f1) VALUES ('asdfasdf(ad'); diff --git a/src/test/regress/sql/btree_index.sql b/src/test/regress/sql/btree_index.sql index 65b08c8282..21171f7762 100644 --- a/src/test/regress/sql/btree_index.sql +++ b/src/test/regress/sql/btree_index.sql @@ -92,3 +92,22 @@ vacuum btree_tall_tbl; -- need to insert some rows to cause the fast root page to split. insert into btree_tall_tbl (id, t) select g, repeat('x', 100) from generate_series(1, 500) g; + +-- +-- Test vacuum_cleanup_index_scale_factor +-- + +-- Simple create +create table btree_test(a int); +create index btree_idx1 on btree_test(a) with (vacuum_cleanup_index_scale_factor = 40.0); +select reloptions from pg_class WHERE oid = 'btree_idx1'::regclass; + +-- Fail while setting improper values +create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = -10.0); +create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = 100.0); +create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = 'string'); +create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = true); + +-- Simple ALTER INDEX +alter index btree_idx1 set (vacuum_cleanup_index_scale_factor = 70.0); +select reloptions from pg_class WHERE oid = 'btree_idx1'::regclass; diff --git a/src/test/regress/sql/case.sql b/src/test/regress/sql/case.sql index 66b6e98fb1..17436c524a 100644 --- a/src/test/regress/sql/case.sql +++ b/src/test/regress/sql/case.sql @@ -233,6 +233,19 @@ SELECT CASE make_ad(1,2) ROLLBACK; +-- Test interaction of CASE with ArrayCoerceExpr (bug #15471) +BEGIN; + +CREATE TYPE casetestenum AS ENUM ('e', 'f', 'g'); + +SELECT + CASE 'foo'::text + WHEN 'foo' THEN ARRAY['a', 'b', 'c', 'd'] || enum_range(NULL::casetestenum)::text[] + ELSE ARRAY['x', 'y'] + END; + +ROLLBACK; + -- -- Clean up -- diff --git a/src/test/regress/sql/circle.sql b/src/test/regress/sql/circle.sql index c0284b2b59..46c96e1400 100644 --- a/src/test/regress/sql/circle.sql +++ b/src/test/regress/sql/circle.sql @@ -14,12 +14,20 @@ INSERT INTO CIRCLE_TBL VALUES ('((1,2),3)'); INSERT INTO CIRCLE_TBL VALUES ('<(100,200),10>'); -INSERT INTO CIRCLE_TBL VALUES ('<(100,1),115>'); +INSERT INTO CIRCLE_TBL VALUES (' < ( 100 , 1 ) , 115 > '); + +INSERT INTO CIRCLE_TBL VALUES ('<(3,5),0>'); -- Zero radius + +INSERT INTO CIRCLE_TBL VALUES ('<(3,5),NaN>'); -- NaN radius -- bad values INSERT INTO CIRCLE_TBL VALUES ('<(-100,0),-100>'); +INSERT INTO CIRCLE_TBL VALUES ('<(100,200),10'); + +INSERT INTO CIRCLE_TBL VALUES ('<(100,200),10> x'); + INSERT INTO CIRCLE_TBL VALUES ('1abc,3,5'); INSERT INTO CIRCLE_TBL VALUES ('(3,(1,2),3)'); diff --git a/src/test/regress/sql/cluster.sql b/src/test/regress/sql/cluster.sql index 8dd9459bda..522bfeead4 100644 --- a/src/test/regress/sql/cluster.sql +++ b/src/test/regress/sql/cluster.sql @@ -196,6 +196,13 @@ drop table clstr_temp; RESET SESSION AUTHORIZATION; +-- Check that partitioned tables cannot be clustered +CREATE TABLE clstrpart (a int) PARTITION BY RANGE (a); +CREATE INDEX clstrpart_idx ON clstrpart (a); +ALTER TABLE clstrpart CLUSTER ON clstrpart_idx; +CLUSTER clstrpart USING clstrpart_idx; +DROP TABLE clstrpart; + -- Test CLUSTER with external tuplesorting create table clstr_4 as select * from tenk1; @@ -203,19 +210,8 @@ create index cluster_sort on clstr_4 (hundred, thousand, tenthous); -- ensure we don't use the index in CLUSTER nor the checking SELECTs set enable_indexscan = off; --- Use external sort that only ever uses quicksort to sort runs: +-- Use external sort: set maintenance_work_mem = '1MB'; -set replacement_sort_tuples = 0; -cluster clstr_4 using cluster_sort; -select * from -(select hundred, lag(hundred) over () as lhundred, - thousand, lag(thousand) over () as lthousand, - tenthous, lag(tenthous) over () as ltenthous from clstr_4) ss -where row(hundred, thousand, tenthous) <= row(lhundred, lthousand, ltenthous); - --- Replacement selection will now be forced. It should only produce a single --- run, due to the fact that input is found to be presorted: -set replacement_sort_tuples = 150000; cluster clstr_4 using cluster_sort; select * from (select hundred, lag(hundred) over () as lhundred, @@ -225,7 +221,6 @@ where row(hundred, thousand, tenthous) <= row(lhundred, lthousand, ltenthous); reset enable_indexscan; reset maintenance_work_mem; -reset replacement_sort_tuples; -- clean up DROP TABLE clustertest; diff --git a/src/test/regress/sql/collate.sql b/src/test/regress/sql/collate.sql index 698f577490..4ddde95a5e 100644 --- a/src/test/regress/sql/collate.sql +++ b/src/test/regress/sql/collate.sql @@ -239,6 +239,8 @@ DROP COLLATION mycoll1; CREATE TABLE collate_test23 (f1 text collate mycoll2); DROP COLLATION mycoll2; -- fail +-- invalid: non-lowercase quoted identifiers +CREATE COLLATION case_coll ("Lc_Collate" = "POSIX", "Lc_Ctype" = "POSIX"); -- 9.1 bug with useless COLLATE in an expression subject to length coercion diff --git a/src/test/regress/sql/create_aggregate.sql b/src/test/regress/sql/create_aggregate.sql index ae3a6c0ebe..cb6552e2d6 100644 --- a/src/test/regress/sql/create_aggregate.sql +++ b/src/test/regress/sql/create_aggregate.sql @@ -86,7 +86,8 @@ create aggregate my_percentile_disc(float8 ORDER BY anyelement) ( stype = internal, sfunc = ordered_set_transition, finalfunc = percentile_disc_final, - finalfunc_extra = true + finalfunc_extra = true, + finalfunc_modify = read_write ); create aggregate my_rank(VARIADIC "any" ORDER BY VARIADIC "any") ( @@ -161,11 +162,13 @@ CREATE AGGREGATE myavg (numeric) finalfunc = numeric_avg, serialfunc = numeric_avg_serialize, deserialfunc = numeric_avg_deserialize, - combinefunc = numeric_avg_combine + combinefunc = numeric_avg_combine, + finalfunc_modify = shareable -- just to test a non-default setting ); -- Ensure all these functions made it into the catalog -SELECT aggfnoid,aggtransfn,aggcombinefn,aggtranstype,aggserialfn,aggdeserialfn +SELECT aggfnoid, aggtransfn, aggcombinefn, aggtranstype::regtype, + aggserialfn, aggdeserialfn, aggfinalmodify FROM pg_aggregate WHERE aggfnoid = 'myavg'::REGPROC; @@ -208,3 +211,23 @@ CREATE AGGREGATE wrongreturntype (float8) msfunc = float8pl, minvfunc = float8mi_int ); + +-- invalid: non-lowercase quoted identifiers + +CREATE AGGREGATE case_agg ( -- old syntax + "Sfunc1" = int4pl, + "Basetype" = int4, + "Stype1" = int4, + "Initcond1" = '0', + "Parallel" = safe +); + +CREATE AGGREGATE case_agg(float8) +( + "Stype" = internal, + "Sfunc" = ordered_set_transition, + "Finalfunc" = percentile_disc_final, + "Finalfunc_extra" = true, + "Finalfunc_modify" = read_write, + "Parallel" = safe +); diff --git a/src/test/regress/sql/create_am.sql b/src/test/regress/sql/create_am.sql index 2f116d98c7..3e0ac104f3 100644 --- a/src/test/regress/sql/create_am.sql +++ b/src/test/regress/sql/create_am.sql @@ -27,12 +27,10 @@ CREATE OPERATOR CLASS box_ops DEFAULT OPERATOR 14 @, FUNCTION 1 gist_box_consistent(internal, box, smallint, oid, internal), FUNCTION 2 gist_box_union(internal, internal), - FUNCTION 3 gist_box_compress(internal), - FUNCTION 4 gist_box_decompress(internal), + -- don't need compress, decompress, or fetch functions FUNCTION 5 gist_box_penalty(internal, internal, internal), FUNCTION 6 gist_box_picksplit(internal, internal), - FUNCTION 7 gist_box_same(box, box, internal), - FUNCTION 9 gist_box_fetch(internal); + FUNCTION 7 gist_box_same(box, box, internal); -- Create gist2 index on fast_emp4000 CREATE INDEX grect2ind2 ON fast_emp4000 USING gist2 (home_base); diff --git a/src/test/regress/sql/create_function_3.sql b/src/test/regress/sql/create_function_3.sql index 0a0e407aab..24bb900990 100644 --- a/src/test/regress/sql/create_function_3.sql +++ b/src/test/regress/sql/create_function_3.sql @@ -1,8 +1,11 @@ -- -- CREATE FUNCTION -- --- sanity check of pg_proc catalog to the given parameters +-- Assorted tests using SQL-language functions -- + +-- All objects made in this test are in temp_func_test schema + CREATE USER regress_unpriv_user; CREATE SCHEMA temp_func_test; @@ -10,6 +13,10 @@ GRANT ALL ON SCHEMA temp_func_test TO public; SET search_path TO temp_func_test, public; +-- +-- Make sanity checks on the pg_proc entries created by CREATE FUNCTION +-- + -- -- ARGUMENT and RETURN TYPES -- @@ -52,86 +59,94 @@ SELECT proname, provolatile FROM pg_proc -- -- SECURITY DEFINER | INVOKER -- -CREATE FUNCTION functext_C_1(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_C_1(int) RETURNS bool LANGUAGE 'sql' AS 'SELECT $1 > 0'; -CREATE FUNCTION functext_C_2(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_C_2(int) RETURNS bool LANGUAGE 'sql' SECURITY DEFINER AS 'SELECT $1 = 0'; -CREATE FUNCTION functext_C_3(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_C_3(int) RETURNS bool LANGUAGE 'sql' SECURITY INVOKER AS 'SELECT $1 < 0'; SELECT proname, prosecdef FROM pg_proc - WHERE oid in ('functext_C_1'::regproc, - 'functext_C_2'::regproc, - 'functext_C_3'::regproc) ORDER BY proname; + WHERE oid in ('functest_C_1'::regproc, + 'functest_C_2'::regproc, + 'functest_C_3'::regproc) ORDER BY proname; -ALTER FUNCTION functext_C_1(int) IMMUTABLE; -- unrelated change, no effect -ALTER FUNCTION functext_C_2(int) SECURITY INVOKER; -ALTER FUNCTION functext_C_3(int) SECURITY DEFINER; +ALTER FUNCTION functest_C_1(int) IMMUTABLE; -- unrelated change, no effect +ALTER FUNCTION functest_C_2(int) SECURITY INVOKER; +ALTER FUNCTION functest_C_3(int) SECURITY DEFINER; SELECT proname, prosecdef FROM pg_proc - WHERE oid in ('functext_C_1'::regproc, - 'functext_C_2'::regproc, - 'functext_C_3'::regproc) ORDER BY proname; + WHERE oid in ('functest_C_1'::regproc, + 'functest_C_2'::regproc, + 'functest_C_3'::regproc) ORDER BY proname; -- -- LEAKPROOF -- -CREATE FUNCTION functext_E_1(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_E_1(int) RETURNS bool LANGUAGE 'sql' AS 'SELECT $1 > 100'; -CREATE FUNCTION functext_E_2(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_E_2(int) RETURNS bool LANGUAGE 'sql' LEAKPROOF AS 'SELECT $1 > 100'; SELECT proname, proleakproof FROM pg_proc - WHERE oid in ('functext_E_1'::regproc, - 'functext_E_2'::regproc) ORDER BY proname; + WHERE oid in ('functest_E_1'::regproc, + 'functest_E_2'::regproc) ORDER BY proname; -ALTER FUNCTION functext_E_1(int) LEAKPROOF; -ALTER FUNCTION functext_E_2(int) STABLE; -- unrelated change, no effect +ALTER FUNCTION functest_E_1(int) LEAKPROOF; +ALTER FUNCTION functest_E_2(int) STABLE; -- unrelated change, no effect SELECT proname, proleakproof FROM pg_proc - WHERE oid in ('functext_E_1'::regproc, - 'functext_E_2'::regproc) ORDER BY proname; + WHERE oid in ('functest_E_1'::regproc, + 'functest_E_2'::regproc) ORDER BY proname; -ALTER FUNCTION functext_E_2(int) NOT LEAKPROOF; -- remove leakproog attribute +ALTER FUNCTION functest_E_2(int) NOT LEAKPROOF; -- remove leakproof attribute SELECT proname, proleakproof FROM pg_proc - WHERE oid in ('functext_E_1'::regproc, - 'functext_E_2'::regproc) ORDER BY proname; + WHERE oid in ('functest_E_1'::regproc, + 'functest_E_2'::regproc) ORDER BY proname; --- it takes superuser privilege to turn on leakproof, but not for turn off -ALTER FUNCTION functext_E_1(int) OWNER TO regress_unpriv_user; -ALTER FUNCTION functext_E_2(int) OWNER TO regress_unpriv_user; +-- it takes superuser privilege to turn on leakproof, but not to turn off +ALTER FUNCTION functest_E_1(int) OWNER TO regress_unpriv_user; +ALTER FUNCTION functest_E_2(int) OWNER TO regress_unpriv_user; SET SESSION AUTHORIZATION regress_unpriv_user; SET search_path TO temp_func_test, public; -ALTER FUNCTION functext_E_1(int) NOT LEAKPROOF; -ALTER FUNCTION functext_E_2(int) LEAKPROOF; +ALTER FUNCTION functest_E_1(int) NOT LEAKPROOF; +ALTER FUNCTION functest_E_2(int) LEAKPROOF; -CREATE FUNCTION functext_E_3(int) RETURNS bool LANGUAGE 'sql' - LEAKPROOF AS 'SELECT $1 < 200'; -- failed +CREATE FUNCTION functest_E_3(int) RETURNS bool LANGUAGE 'sql' + LEAKPROOF AS 'SELECT $1 < 200'; -- fail RESET SESSION AUTHORIZATION; -- -- CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT -- -CREATE FUNCTION functext_F_1(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_F_1(int) RETURNS bool LANGUAGE 'sql' AS 'SELECT $1 > 50'; -CREATE FUNCTION functext_F_2(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_F_2(int) RETURNS bool LANGUAGE 'sql' CALLED ON NULL INPUT AS 'SELECT $1 = 50'; -CREATE FUNCTION functext_F_3(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_F_3(int) RETURNS bool LANGUAGE 'sql' RETURNS NULL ON NULL INPUT AS 'SELECT $1 < 50'; -CREATE FUNCTION functext_F_4(int) RETURNS bool LANGUAGE 'sql' +CREATE FUNCTION functest_F_4(int) RETURNS bool LANGUAGE 'sql' STRICT AS 'SELECT $1 = 50'; SELECT proname, proisstrict FROM pg_proc - WHERE oid in ('functext_F_1'::regproc, - 'functext_F_2'::regproc, - 'functext_F_3'::regproc, - 'functext_F_4'::regproc) ORDER BY proname; - -ALTER FUNCTION functext_F_1(int) IMMUTABLE; -- unrelated change, no effect -ALTER FUNCTION functext_F_2(int) STRICT; -ALTER FUNCTION functext_F_3(int) CALLED ON NULL INPUT; + WHERE oid in ('functest_F_1'::regproc, + 'functest_F_2'::regproc, + 'functest_F_3'::regproc, + 'functest_F_4'::regproc) ORDER BY proname; + +ALTER FUNCTION functest_F_1(int) IMMUTABLE; -- unrelated change, no effect +ALTER FUNCTION functest_F_2(int) STRICT; +ALTER FUNCTION functest_F_3(int) CALLED ON NULL INPUT; SELECT proname, proisstrict FROM pg_proc - WHERE oid in ('functext_F_1'::regproc, - 'functext_F_2'::regproc, - 'functext_F_3'::regproc, - 'functext_F_4'::regproc) ORDER BY proname; + WHERE oid in ('functest_F_1'::regproc, + 'functest_F_2'::regproc, + 'functest_F_3'::regproc, + 'functest_F_4'::regproc) ORDER BY proname; + + +-- pg_get_functiondef tests + +SELECT pg_get_functiondef('functest_A_1'::regproc); +SELECT pg_get_functiondef('functest_B_3'::regproc); +SELECT pg_get_functiondef('functest_C_3'::regproc); +SELECT pg_get_functiondef('functest_F_2'::regproc); -- information_schema tests @@ -167,7 +182,46 @@ DROP FUNCTION functest_b_1; -- error, not found DROP FUNCTION functest_b_2; -- error, ambiguous --- Cleanups +-- CREATE OR REPLACE tests + +CREATE FUNCTION functest1(a int) RETURNS int LANGUAGE SQL AS 'SELECT $1'; +CREATE OR REPLACE FUNCTION functest1(a int) RETURNS int LANGUAGE SQL WINDOW AS 'SELECT $1'; +CREATE OR REPLACE PROCEDURE functest1(a int) LANGUAGE SQL AS 'SELECT $1'; +DROP FUNCTION functest1(a int); + + +-- Check behavior of VOID-returning SQL functions + +CREATE FUNCTION voidtest1(a int) RETURNS VOID LANGUAGE SQL AS +$$ SELECT a + 1 $$; +SELECT voidtest1(42); + +CREATE FUNCTION voidtest2(a int, b int) RETURNS VOID LANGUAGE SQL AS +$$ SELECT voidtest1(a + b) $$; +SELECT voidtest2(11,22); + +-- currently, we can inline voidtest2 but not voidtest1 +EXPLAIN (verbose, costs off) SELECT voidtest2(11,22); + +CREATE TEMP TABLE sometable(f1 int); + +CREATE FUNCTION voidtest3(a int) RETURNS VOID LANGUAGE SQL AS +$$ INSERT INTO sometable VALUES(a + 1) $$; +SELECT voidtest3(17); + +CREATE FUNCTION voidtest4(a int) RETURNS VOID LANGUAGE SQL AS +$$ INSERT INTO sometable VALUES(a - 1) RETURNING f1 $$; +SELECT voidtest4(39); + +TABLE sometable; + +CREATE FUNCTION voidtest5(a int) RETURNS SETOF VOID LANGUAGE SQL AS +$$ SELECT generate_series(1, a) $$ STABLE; +SELECT * FROM voidtest5(3); + +-- Cleanup +\set VERBOSITY terse \\ -- suppress cascade details DROP SCHEMA temp_func_test CASCADE; +\set VERBOSITY default DROP USER regress_unpriv_user; RESET search_path; diff --git a/src/test/regress/sql/create_index.sql b/src/test/regress/sql/create_index.sql index 67470db918..be7f261871 100644 --- a/src/test/regress/sql/create_index.sql +++ b/src/test/regress/sql/create_index.sql @@ -198,6 +198,18 @@ SELECT count(*) FROM quad_point_tbl WHERE p >^ '(5000, 4000)'; SELECT count(*) FROM quad_point_tbl WHERE p ~= '(4585, 365)'; +CREATE TEMP TABLE quad_point_tbl_ord_seq1 AS +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl; + +CREATE TEMP TABLE quad_point_tbl_ord_seq2 AS +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + +CREATE TEMP TABLE quad_point_tbl_ord_seq3 AS +SELECT rank() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM quad_point_tbl WHERE p IS NOT NULL; + SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdef'; SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcde'; @@ -224,6 +236,8 @@ SELECT count(*) FROM radix_text_tbl WHERE t > 'Worth SELECT count(*) FROM radix_text_tbl WHERE t ~>~ 'Worth St '; +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; + SELECT * FROM gpolygon_tbl ORDER BY f1 <-> '(0,0)'::point LIMIT 10; SELECT circle_center(f1), round(radius(f1)) as radius FROM gcircle_tbl ORDER BY f1 <-> '(200,300)'::point LIMIT 10; @@ -361,6 +375,39 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM quad_point_tbl WHERE p ~= '(4585, 365)'; SELECT count(*) FROM quad_point_tbl WHERE p ~= '(4585, 365)'; +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl; +CREATE TEMP TABLE quad_point_tbl_ord_idx1 AS +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl; +SELECT * FROM quad_point_tbl_ord_seq1 seq FULL JOIN quad_point_tbl_ord_idx1 idx +ON seq.n = idx.n +AND (seq.dist = idx.dist AND seq.p ~= idx.p OR seq.p IS NULL AND idx.p IS NULL) +WHERE seq.n IS NULL OR idx.n IS NULL; + +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; +CREATE TEMP TABLE quad_point_tbl_ord_idx2 AS +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; +SELECT * FROM quad_point_tbl_ord_seq2 seq FULL JOIN quad_point_tbl_ord_idx2 idx +ON seq.n = idx.n +AND (seq.dist = idx.dist AND seq.p ~= idx.p OR seq.p IS NULL AND idx.p IS NULL) +WHERE seq.n IS NULL OR idx.n IS NULL; + +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM quad_point_tbl WHERE p IS NOT NULL; +CREATE TEMP TABLE quad_point_tbl_ord_idx3 AS +SELECT rank() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM quad_point_tbl WHERE p IS NOT NULL; +SELECT * FROM quad_point_tbl_ord_seq3 seq FULL JOIN quad_point_tbl_ord_idx3 idx +ON seq.n = idx.n +AND (seq.dist = idx.dist AND seq.p ~= idx.p OR seq.p IS NULL AND idx.p IS NULL) +WHERE seq.n IS NULL OR idx.n IS NULL; + EXPLAIN (COSTS OFF) SELECT count(*) FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; SELECT count(*) FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; @@ -389,6 +436,39 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM kd_point_tbl WHERE p ~= '(4585, 365)'; SELECT count(*) FROM kd_point_tbl WHERE p ~= '(4585, 365)'; +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM kd_point_tbl; +CREATE TEMP TABLE kd_point_tbl_ord_idx1 AS +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM kd_point_tbl; +SELECT * FROM quad_point_tbl_ord_seq1 seq FULL JOIN kd_point_tbl_ord_idx1 idx +ON seq.n = idx.n AND +(seq.dist = idx.dist AND seq.p ~= idx.p OR seq.p IS NULL AND idx.p IS NULL) +WHERE seq.n IS NULL OR idx.n IS NULL; + +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; +CREATE TEMP TABLE kd_point_tbl_ord_idx2 AS +SELECT rank() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; +SELECT * FROM quad_point_tbl_ord_seq2 seq FULL JOIN kd_point_tbl_ord_idx2 idx +ON seq.n = idx.n AND +(seq.dist = idx.dist AND seq.p ~= idx.p OR seq.p IS NULL AND idx.p IS NULL) +WHERE seq.n IS NULL OR idx.n IS NULL; + +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM kd_point_tbl WHERE p IS NOT NULL; +CREATE TEMP TABLE kd_point_tbl_ord_idx3 AS +SELECT rank() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM kd_point_tbl WHERE p IS NOT NULL; +SELECT * FROM quad_point_tbl_ord_seq3 seq FULL JOIN kd_point_tbl_ord_idx3 idx +ON seq.n = idx.n AND +(seq.dist = idx.dist AND seq.p ~= idx.p OR seq.p IS NULL AND idx.p IS NULL) +WHERE seq.n IS NULL OR idx.n IS NULL; + EXPLAIN (COSTS OFF) SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdef'; SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdef'; @@ -441,6 +521,10 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM radix_text_tbl WHERE t ~>~ 'Worth St '; SELECT count(*) FROM radix_text_tbl WHERE t ~>~ 'Worth St '; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; + EXPLAIN (COSTS OFF) SELECT * FROM gpolygon_tbl ORDER BY f1 <-> '(0,0)'::point LIMIT 10; SELECT * FROM gpolygon_tbl ORDER BY f1 <-> '(0,0)'::point LIMIT 10; @@ -578,6 +662,10 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM radix_text_tbl WHERE t ~>~ 'Worth St '; SELECT count(*) FROM radix_text_tbl WHERE t ~>~ 'Worth St '; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; + RESET enable_seqscan; RESET enable_indexscan; RESET enable_bitmapscan; @@ -682,7 +770,7 @@ CREATE INDEX hash_name_index ON hash_name_heap USING hash (random name_ops); CREATE INDEX hash_txt_index ON hash_txt_heap USING hash (random text_ops); -CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops); +CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops) WITH (fillfactor=60); CREATE UNLOGGED TABLE unlogged_hash_table (id int4); CREATE INDEX unlogged_hash_index ON unlogged_hash_table USING hash (id int4_ops); @@ -731,6 +819,26 @@ INSERT INTO func_index_heap VALUES('ABCD', 'EF'); -- but this shouldn't: INSERT INTO func_index_heap VALUES('QWERTY'); +-- +-- Test unique index with included columns +-- +CREATE TABLE covering_index_heap (f1 int, f2 int, f3 text); +CREATE UNIQUE INDEX covering_index_index on covering_index_heap (f1,f2) INCLUDE(f3); + +INSERT INTO covering_index_heap VALUES(1,1,'AAA'); +INSERT INTO covering_index_heap VALUES(1,2,'AAA'); +-- this should fail because of unique index on f1,f2: +INSERT INTO covering_index_heap VALUES(1,2,'BBB'); +-- and this shouldn't: +INSERT INTO covering_index_heap VALUES(1,4,'AAA'); +-- Try to build index on table that already contains data +CREATE UNIQUE INDEX covering_pkey on covering_index_heap (f1,f2) INCLUDE(f3); +-- Try to use existing covering index as primary key +ALTER TABLE covering_index_heap ADD CONSTRAINT covering_pkey PRIMARY KEY USING INDEX +covering_pkey; +DROP TABLE covering_index_heap; + + -- -- Also try building functional, expressional, and partial indexes on -- tables that already contain data. @@ -834,6 +942,12 @@ DROP INDEX cwi_replaced_pkey; -- Should fail; a constraint depends on it DROP TABLE cwi_test; +-- ADD CONSTRAINT USING INDEX is forbidden on partitioned tables +CREATE TABLE cwi_test(a int) PARTITION BY hash (a); +create unique index on cwi_test (a); +alter table cwi_test add primary key using index cwi_test_a_idx ; +DROP TABLE cwi_test; + -- -- Check handling of indexes on system columns -- @@ -1025,6 +1139,17 @@ explain (costs off) explain (costs off) select * from boolindex where not b order by i limit 10; +-- +-- Test for multilevel page deletion +-- +CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint); +INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM generate_series(1,80000) i; +ALTER TABLE delete_test_table ADD PRIMARY KEY (a,b,c,d); +DELETE FROM delete_test_table WHERE a > 40000; +VACUUM delete_test_table; +DELETE FROM delete_test_table WHERE a > 10; +VACUUM delete_test_table; + -- -- REINDEX (VERBOSE) -- diff --git a/src/test/regress/sql/create_misc.sql b/src/test/regress/sql/create_misc.sql index 705a7e55b1..d4a63b7aed 100644 --- a/src/test/regress/sql/create_misc.sql +++ b/src/test/regress/sql/create_misc.sql @@ -37,6 +37,11 @@ INSERT INTO equipment_r (name, hobby) VALUES ('hightops', 'basketball'); INSERT INTO equipment_r (name, hobby) VALUES ('guts', 'skywalking'); +INSERT INTO city VALUES +('Podunk', '(1,2),(3,4)', '100,127,1000'), +('Gotham', '(1000,34),(1100,334)', '123456,127,-1000,6789'); +TABLE city; + SELECT * INTO TABLE ramp FROM road diff --git a/src/test/regress/sql/create_operator.sql b/src/test/regress/sql/create_operator.sql index 0e5d6356bc..8b6fd0bb43 100644 --- a/src/test/regress/sql/create_operator.sql +++ b/src/test/regress/sql/create_operator.sql @@ -5,7 +5,7 @@ CREATE OPERATOR ## ( leftarg = path, rightarg = path, - procedure = path_inter, + function = path_inter, commutator = ## ); @@ -32,6 +32,10 @@ CREATE OPERATOR #%# ( procedure = numeric_fac ); +-- Test operator created above +SELECT point '(1,2)' <% widget '(0,0,3)' AS t, + point '(1,2)' <% widget '(0,0,1)' AS f; + -- Test comments COMMENT ON OPERATOR ###### (int4, NONE) IS 'bad right unary'; @@ -41,6 +45,37 @@ CREATE OPERATOR => ( procedure = numeric_fac ); +-- lexing of <=, >=, <>, != has a number of edge cases +-- (=> is tested elsewhere) + +-- this is legal because ! is not allowed in sql ops +CREATE OPERATOR !=- ( + leftarg = int8, -- right unary + procedure = numeric_fac +); +SELECT 2 !=-; +-- make sure lexer returns != as <> even in edge cases +SELECT 2 !=/**/ 1, 2 !=/**/ 2; +SELECT 2 !=-- comment to be removed by psql + 1; +DO $$ -- use DO to protect -- from psql + declare r boolean; + begin + execute $e$ select 2 !=-- comment + 1 $e$ into r; + raise info 'r = %', r; + end; +$$; + +-- check that <= etc. followed by more operator characters are returned +-- as the correct token with correct precedence +SELECT true<>-1 BETWEEN 1 AND 1; -- BETWEEN has prec. above <> but below Op +SELECT false<>/**/1 BETWEEN 1 AND 1; +SELECT false<=-1 BETWEEN 1 AND 1; +SELECT false>=-1 BETWEEN 1 AND 1; +SELECT 2<=/**/3, 3>=/**/2, 2<>/**/3; +SELECT 3<=/**/2, 2>=/**/3, 2<>/**/2; + -- Should fail. CREATE OPERATOR requires USAGE on SCHEMA BEGIN TRANSACTION; CREATE ROLE regress_rol_op1; @@ -179,3 +214,17 @@ CREATE OPERATOR #*# ( procedure = fn_op6 ); ROLLBACK; + +-- invalid: non-lowercase quoted identifiers +CREATE OPERATOR === +( + "Leftarg" = box, + "Rightarg" = box, + "Procedure" = area_equal_function, + "Commutator" = ===, + "Negator" = !==, + "Restrict" = area_restriction_function, + "Join" = area_join_function, + "Hashes", + "Merges" +); diff --git a/src/test/regress/sql/create_procedure.sql b/src/test/regress/sql/create_procedure.sql new file mode 100644 index 0000000000..b64293ed66 --- /dev/null +++ b/src/test/regress/sql/create_procedure.sql @@ -0,0 +1,155 @@ +CALL nonexistent(); -- error +CALL random(); -- error + +CREATE FUNCTION cp_testfunc1(a int) RETURNS int LANGUAGE SQL AS $$ SELECT a $$; + +CREATE TABLE cp_test (a int, b text); + +CREATE PROCEDURE ptest1(x text) +LANGUAGE SQL +AS $$ +INSERT INTO cp_test VALUES (1, x); +$$; + +\df ptest1 +SELECT pg_get_functiondef('ptest1'::regproc); + +-- show only normal functions +\dfn public.*test*1 + +-- show only procedures +\dfp public.*test*1 + +SELECT ptest1('x'); -- error +CALL ptest1('a'); -- ok +CALL ptest1('xy' || 'zzy'); -- ok, constant-folded arg +CALL ptest1(substring(random()::numeric(20,15)::text, 1, 1)); -- ok, volatile arg + +SELECT * FROM cp_test ORDER BY b COLLATE "C"; + + +CREATE PROCEDURE ptest2() +LANGUAGE SQL +AS $$ +SELECT 5; +$$; + +CALL ptest2(); + + +-- nested CALL +TRUNCATE cp_test; + +CREATE PROCEDURE ptest3(y text) +LANGUAGE SQL +AS $$ +CALL ptest1(y); +CALL ptest1($1); +$$; + +CALL ptest3('b'); + +SELECT * FROM cp_test; + + +-- output arguments + +CREATE PROCEDURE ptest4a(INOUT a int, INOUT b int) +LANGUAGE SQL +AS $$ +SELECT 1, 2; +$$; + +CALL ptest4a(NULL, NULL); + +CREATE PROCEDURE ptest4b(INOUT b int, INOUT a int) +LANGUAGE SQL +AS $$ +CALL ptest4a(a, b); -- error, not supported +$$; + +DROP PROCEDURE ptest4a; + + +-- named and default parameters + +CREATE OR REPLACE PROCEDURE ptest5(a int, b text, c int default 100) +LANGUAGE SQL +AS $$ +INSERT INTO cp_test VALUES(a, b); +INSERT INTO cp_test VALUES(c, b); +$$; + +TRUNCATE cp_test; + +CALL ptest5(10, 'Hello', 20); +CALL ptest5(10, 'Hello'); +CALL ptest5(10, b => 'Hello'); +CALL ptest5(b => 'Hello', a => 10); + +SELECT * FROM cp_test; + + +-- polymorphic types + +CREATE PROCEDURE ptest6(a int, b anyelement) +LANGUAGE SQL +AS $$ +SELECT NULL::int; +$$; + +CALL ptest6(1, 2); + + +-- various error cases + +CALL version(); -- error: not a procedure +CALL sum(1); -- error: not a procedure + +CREATE PROCEDURE ptestx() LANGUAGE SQL WINDOW AS $$ INSERT INTO cp_test VALUES (1, 'a') $$; +CREATE PROCEDURE ptestx() LANGUAGE SQL STRICT AS $$ INSERT INTO cp_test VALUES (1, 'a') $$; +CREATE PROCEDURE ptestx(OUT a int) LANGUAGE SQL AS $$ INSERT INTO cp_test VALUES (1, 'a') $$; + +ALTER PROCEDURE ptest1(text) STRICT; +ALTER FUNCTION ptest1(text) VOLATILE; -- error: not a function +ALTER PROCEDURE cp_testfunc1(int) VOLATILE; -- error: not a procedure +ALTER PROCEDURE nonexistent() VOLATILE; + +DROP FUNCTION ptest1(text); -- error: not a function +DROP PROCEDURE cp_testfunc1(int); -- error: not a procedure +DROP PROCEDURE nonexistent(); + + +-- privileges + +CREATE USER regress_cp_user1; +GRANT INSERT ON cp_test TO regress_cp_user1; +REVOKE EXECUTE ON PROCEDURE ptest1(text) FROM PUBLIC; +SET ROLE regress_cp_user1; +CALL ptest1('a'); -- error +RESET ROLE; +GRANT EXECUTE ON PROCEDURE ptest1(text) TO regress_cp_user1; +SET ROLE regress_cp_user1; +CALL ptest1('a'); -- ok +RESET ROLE; + + +-- ROUTINE syntax + +ALTER ROUTINE cp_testfunc1(int) RENAME TO cp_testfunc1a; +ALTER ROUTINE cp_testfunc1a RENAME TO cp_testfunc1; + +ALTER ROUTINE ptest1(text) RENAME TO ptest1a; +ALTER ROUTINE ptest1a RENAME TO ptest1; + +DROP ROUTINE cp_testfunc1(int); + + +-- cleanup + +DROP PROCEDURE ptest1; +DROP PROCEDURE ptest2; + +DROP TABLE cp_test; + +DROP USER regress_cp_user1; diff --git a/src/test/regress/sql/create_table.sql b/src/test/regress/sql/create_table.sql index 1c0ce92763..2af4455ecf 100644 --- a/src/test/regress/sql/create_table.sql +++ b/src/test/regress/sql/create_table.sql @@ -253,6 +253,10 @@ CREATE TABLE IF NOT EXISTS test_tsvector( t text ); +-- invalid: non-lowercase quoted reloptions identifiers +CREATE TABLE tas_case WITH ("Fillfactor" = 10) AS SELECT 1 a; +CREATE TABLE tas_case (a text) WITH ("Oids" = true); + CREATE UNLOGGED TABLE unlogged1 (a int primary key); -- OK CREATE TEMPORARY TABLE unlogged2 (a int primary key); -- OK SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged\d' ORDER BY relname; @@ -294,32 +298,11 @@ CREATE TABLE partitioned ( ) PARTITION BY LIST (a1, a2); -- fail -- unsupported constraint type for partitioned tables -CREATE TABLE partitioned ( - a int PRIMARY KEY -) PARTITION BY RANGE (a); - -CREATE TABLE pkrel ( - a int PRIMARY KEY -); -CREATE TABLE partitioned ( - a int REFERENCES pkrel(a) -) PARTITION BY RANGE (a); -DROP TABLE pkrel; - -CREATE TABLE partitioned ( - a int UNIQUE -) PARTITION BY RANGE (a); - CREATE TABLE partitioned ( a int, EXCLUDE USING gist (a WITH &&) ) PARTITION BY RANGE (a); --- prevent column from being used twice in the partition key -CREATE TABLE partitioned ( - a int -) PARTITION BY RANGE (a, a); - -- prevent using prohibited expressions in the key CREATE FUNCTION retset (a int) RETURNS SETOF int AS $$ SELECT 1; $$ LANGUAGE SQL IMMUTABLE; CREATE TABLE partitioned ( @@ -350,10 +333,10 @@ CREATE TABLE partitioned ( ) PARTITION BY RANGE (const_func()); DROP FUNCTION const_func(); --- only accept "list" and "range" as partitioning strategy +-- only accept valid partitioning strategy CREATE TABLE partitioned ( - a int -) PARTITION BY HASH (a); + a int +) PARTITION BY MAGIC (a); -- specified column must be present in the table CREATE TABLE partitioned ( @@ -415,13 +398,18 @@ DROP FUNCTION plusone(int); -- partitioned table cannot participate in regular inheritance CREATE TABLE partitioned2 ( - a int -) PARTITION BY LIST ((a+1)); + a int, + b text +) PARTITION BY RANGE ((a+1), substr(b, 1, 5)); CREATE TABLE fail () INHERITS (partitioned2); -- Partition key in describe output \d partitioned -\d partitioned2 +\d+ partitioned2 + +INSERT INTO partitioned2 VALUES (1, 'hello'); +CREATE TABLE part2_1 PARTITION OF partitioned2 FOR VALUES FROM (-1, 'aaaaa') TO (100, 'ccccc'); +\d+ part2_1 DROP TABLE partitioned, partitioned2; @@ -446,6 +434,12 @@ CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES IN ('1'::int); CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES IN (); -- trying to specify range for list partitioned table CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES FROM (1) TO (2); +-- trying to specify modulus and remainder for list partitioned table +CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1); + +-- check default partition cannot be created more than once +CREATE TABLE part_default PARTITION OF list_parted DEFAULT; +CREATE TABLE fail_default_part PARTITION OF list_parted DEFAULT; -- specified literal can't be cast to the partition column data type CREATE TABLE bools ( @@ -477,6 +471,8 @@ CREATE TABLE range_parted ( -- trying to specify list for range partitioned table CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES IN ('a'); +-- trying to specify modulus and remainder for range partitioned table +CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1); -- each of start and end bounds must have same number of values as the -- length of the partition key CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM ('a', 1) TO ('z'); @@ -485,6 +481,28 @@ CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM ('a') TO ('z', -- cannot specify null values in range bounds CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM (null) TO (maxvalue); +-- trying to specify modulus and remainder for range partitioned table +CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1); + +-- check partition bound syntax for the hash partition +CREATE TABLE hash_parted ( + a int +) PARTITION BY HASH (a); +CREATE TABLE hpart_1 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 10, REMAINDER 0); +CREATE TABLE hpart_2 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 50, REMAINDER 1); +CREATE TABLE hpart_3 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 200, REMAINDER 2); +-- modulus 25 is factor of modulus of 50 but 10 is not factor of 25. +CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODULUS 25, REMAINDER 3); +-- previous modulus 50 is factor of 150 but this modulus is not factor of next modulus 200. +CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODULUS 150, REMAINDER 3); +-- trying to specify range for the hash partitioned table +CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES FROM ('a', 1) TO ('z'); +-- trying to specify list value for the hash partitioned table +CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES IN (1000); + +-- trying to create default partition for the hash partitioned table +CREATE TABLE fail_default_part PARTITION OF hash_parted DEFAULT; + -- check if compatible with the specified parent -- cannot create as partition of a non-partitioned table @@ -492,6 +510,7 @@ CREATE TABLE unparted ( a int ); CREATE TABLE fail_part PARTITION OF unparted FOR VALUES IN ('a'); +CREATE TABLE fail_part PARTITION OF unparted FOR VALUES WITH (MODULUS 2, REMAINDER 1); DROP TABLE unparted; -- cannot create a permanent rel as partition of a temp rel @@ -524,9 +543,13 @@ CREATE TABLE list_parted2 ( ) PARTITION BY LIST (a); CREATE TABLE part_null_z PARTITION OF list_parted2 FOR VALUES IN (null, 'z'); CREATE TABLE part_ab PARTITION OF list_parted2 FOR VALUES IN ('a', 'b'); +CREATE TABLE list_parted2_def PARTITION OF list_parted2 DEFAULT; CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN (null); CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN ('b', 'c'); +-- check default partition overlap +INSERT INTO list_parted2 VALUES('X'); +CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN ('W', 'X', 'Y'); CREATE TABLE range_parted2 ( a int @@ -546,6 +569,17 @@ CREATE TABLE part3 PARTITION OF range_parted2 FOR VALUES FROM (30) TO (40); CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (10) TO (30); CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (10) TO (50); +-- Create a default partition for range partitioned table +CREATE TABLE range2_default PARTITION OF range_parted2 DEFAULT; + +-- More than one default partition is not allowed, so this should give error +CREATE TABLE fail_default_part PARTITION OF range_parted2 DEFAULT; + +-- Check if the range for default partitions overlap +INSERT INTO range_parted2 VALUES (85); +CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (80) TO (90); +CREATE TABLE part4 PARTITION OF range_parted2 FOR VALUES FROM (90) TO (100); + -- now check for multi-column range partition key CREATE TABLE range_parted3 ( a int, @@ -559,12 +593,28 @@ CREATE TABLE part10 PARTITION OF range_parted3 FOR VALUES FROM (1, minvalue) TO CREATE TABLE part11 PARTITION OF range_parted3 FOR VALUES FROM (1, 1) TO (1, 10); CREATE TABLE part12 PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1, maxvalue); CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1, 20); +CREATE TABLE range3_default PARTITION OF range_parted3 DEFAULT; -- cannot create a partition that says column b is allowed to range -- from -infinity to +infinity, while there exist partitions that have -- more specific ranges CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (1, minvalue) TO (1, maxvalue); +-- check for partition bound overlap and other invalid specifications for the hash partition +CREATE TABLE hash_parted2 ( + a varchar +) PARTITION BY HASH (a); +CREATE TABLE h2part_1 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 4, REMAINDER 2); +CREATE TABLE h2part_2 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 0); +CREATE TABLE h2part_3 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 4); +CREATE TABLE h2part_4 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 5); +-- overlap with part_4 +CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +-- modulus must be greater than zero +CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 0, REMAINDER 1); +-- remainder must be greater than or equal to zero and less than modulus +CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 8); + -- check schema propagation from parent CREATE TABLE parted ( @@ -604,6 +654,25 @@ CREATE TABLE part_c PARTITION OF parted (b WITH OPTIONS NOT NULL DEFAULT 0) FOR -- create a level-2 partition CREATE TABLE part_c_1_10 PARTITION OF part_c FOR VALUES FROM (1) TO (10); +-- check that NOT NULL and default value are inherited correctly +create table parted_notnull_inh_test (a int default 1, b int not null default 0) partition by list (a); +create table parted_notnull_inh_test1 partition of parted_notnull_inh_test (a not null, b default 1) for values in (1); +insert into parted_notnull_inh_test (b) values (null); +-- note that while b's default is overriden, a's default is preserved +\d parted_notnull_inh_test1 +drop table parted_notnull_inh_test; + +-- check for a conflicting COLLATE clause +create table parted_collate_must_match (a text collate "C", b text collate "C") + partition by range (a); +-- on the partition key +create table parted_collate_must_match1 partition of parted_collate_must_match + (a collate "POSIX") for values from ('a') to ('m'); +-- on another column +create table parted_collate_must_match2 partition of parted_collate_must_match + (b collate "POSIX") for values from ('m') to ('z'); +drop table parted_collate_must_match; + -- Partition bound in describe output \d+ part_b @@ -618,22 +687,38 @@ CREATE TABLE part_c_1_10 PARTITION OF part_c FOR VALUES FROM (1) TO (10); -- output could vary depending on the order in which partition oids are -- returned. \d parted +\d hash_parted -- check that we get the expected partition constraints CREATE TABLE range_parted4 (a int, b int, c int) PARTITION BY RANGE (abs(a), abs(b), c); -CREATE TABLE unbounded_range_part PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, 0, 0) TO (MAXVALUE, 0, 0); +CREATE TABLE unbounded_range_part PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (MAXVALUE, MAXVALUE, MAXVALUE); \d+ unbounded_range_part DROP TABLE unbounded_range_part; -CREATE TABLE range_parted4_1 PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, 0, 0) TO (1, MAXVALUE, 0); +CREATE TABLE range_parted4_1 PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (1, MAXVALUE, MAXVALUE); \d+ range_parted4_1 CREATE TABLE range_parted4_2 PARTITION OF range_parted4 FOR VALUES FROM (3, 4, 5) TO (6, 7, MAXVALUE); \d+ range_parted4_2 -CREATE TABLE range_parted4_3 PARTITION OF range_parted4 FOR VALUES FROM (6, 8, MINVALUE) TO (9, MAXVALUE, 0); +CREATE TABLE range_parted4_3 PARTITION OF range_parted4 FOR VALUES FROM (6, 8, MINVALUE) TO (9, MAXVALUE, MAXVALUE); \d+ range_parted4_3 DROP TABLE range_parted4; +-- user-defined operator class in partition key +CREATE FUNCTION my_int4_sort(int4,int4) RETURNS int LANGUAGE sql + AS $$ SELECT CASE WHEN $1 = $2 THEN 0 WHEN $1 > $2 THEN 1 ELSE -1 END; $$; +CREATE OPERATOR CLASS test_int4_ops FOR TYPE int4 USING btree AS + OPERATOR 1 < (int4,int4), OPERATOR 2 <= (int4,int4), + OPERATOR 3 = (int4,int4), OPERATOR 4 >= (int4,int4), + OPERATOR 5 > (int4,int4), FUNCTION 1 my_int4_sort(int4,int4); +CREATE TABLE partkey_t (a int4) PARTITION BY RANGE (a test_int4_ops); +CREATE TABLE partkey_t_1 PARTITION OF partkey_t FOR VALUES FROM (0) TO (1000); +INSERT INTO partkey_t VALUES (100); +INSERT INTO partkey_t VALUES (200); + -- cleanup DROP TABLE parted, list_parted, range_parted, list_parted2, range_parted2, range_parted3; +DROP TABLE partkey_t, hash_parted, hash_parted2; +DROP OPERATOR CLASS test_int4_ops USING btree; +DROP FUNCTION my_int4_sort(int4,int4); -- comments on partitioned tables columns CREATE TABLE parted_col_comment (a int, b text) PARTITION BY LIST (a); @@ -642,3 +727,39 @@ COMMENT ON COLUMN parted_col_comment.a IS 'Partition key'; SELECT obj_description('parted_col_comment'::regclass); \d+ parted_col_comment DROP TABLE parted_col_comment; + +-- list partitioning on array type column +CREATE TABLE arrlp (a int[]) PARTITION BY LIST (a); +CREATE TABLE arrlp12 PARTITION OF arrlp FOR VALUES IN ('{1}', '{2}'); +\d+ arrlp12 +DROP TABLE arrlp; + +-- partition on boolean column +create table boolspart (a bool) partition by list (a); +create table boolspart_t partition of boolspart for values in (true); +create table boolspart_f partition of boolspart for values in (false); +\d+ boolspart +drop table boolspart; + +-- partitions mixing temporary and permanent relations +create table perm_parted (a int) partition by list (a); +create temporary table temp_parted (a int) partition by list (a); +create table perm_part partition of temp_parted default; -- error +create temp table temp_part partition of perm_parted default; -- error +create temp table temp_part partition of temp_parted default; -- ok +drop table perm_parted cascade; +drop table temp_parted cascade; + +-- check that adding partitions to a table while it is being used is prevented +create table tab_part_create (a int) partition by list (a); +create or replace function func_part_create() returns trigger + language plpgsql as $$ + begin + execute 'create table tab_part_create_1 partition of tab_part_create for values in (1)'; + return null; + end $$; +create trigger trig_part_create before insert on tab_part_create + for each statement execute procedure func_part_create(); +insert into tab_part_create values (1); +drop table tab_part_create; +drop function func_part_create(); diff --git a/src/test/regress/sql/create_table_like.sql b/src/test/regress/sql/create_table_like.sql index 557040bbe7..42cad6826b 100644 --- a/src/test/regress/sql/create_table_like.sql +++ b/src/test/regress/sql/create_table_like.sql @@ -37,7 +37,7 @@ INSERT INTO inhg VALUES ('x', 'foo', 'y'); /* fails due to constraint */ SELECT * FROM inhg; /* Two records with three columns in order x=x, xx=text, y=y */ DROP TABLE inhg; -CREATE TABLE test_like_id_1 (a int GENERATED ALWAYS AS IDENTITY, b text); +CREATE TABLE test_like_id_1 (a bigint GENERATED ALWAYS AS IDENTITY, b text); \d test_like_id_1 INSERT INTO test_like_id_1 (b) VALUES ('b1'); SELECT * FROM test_like_id_1; @@ -71,6 +71,8 @@ DROP TABLE inhz; CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) PRIMARY KEY, b text); CREATE INDEX ctlt1_b_key ON ctlt1 (b); CREATE INDEX ctlt1_fnidx ON ctlt1 ((a || b)); +CREATE STATISTICS ctlt1_a_b_stat ON a,b FROM ctlt1; +COMMENT ON STATISTICS ctlt1_a_b_stat IS 'ab stats'; COMMENT ON COLUMN ctlt1.a IS 'A'; COMMENT ON COLUMN ctlt1.b IS 'B'; COMMENT ON CONSTRAINT ctlt1_a_check ON ctlt1 IS 't1_a_check'; @@ -108,6 +110,7 @@ SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_con CREATE TABLE ctlt_all (LIKE ctlt1 INCLUDING ALL); \d+ ctlt_all SELECT c.relname, objsubid, description FROM pg_description, pg_index i, pg_class c WHERE classoid = 'pg_class'::regclass AND objoid = i.indexrelid AND c.oid = i.indexrelid AND i.indrelid = 'ctlt_all'::regclass ORDER BY c.relname, objsubid; +SELECT s.stxname, objsubid, description FROM pg_description, pg_statistic_ext s WHERE classoid = 'pg_statistic_ext'::regclass AND objoid = s.oid AND s.stxrelid = 'ctlt_all'::regclass ORDER BY s.stxname, objsubid; CREATE TABLE inh_error1 () INHERITS (ctlt1, ctlt4); CREATE TABLE inh_error2 (LIKE ctlt4 INCLUDING STORAGE) INHERITS (ctlt1); diff --git a/src/test/regress/sql/create_type.sql b/src/test/regress/sql/create_type.sql index a28303aa6a..3d1deba97c 100644 --- a/src/test/regress/sql/create_type.sql +++ b/src/test/regress/sql/create_type.sql @@ -84,6 +84,16 @@ INSERT INTO default_test DEFAULT VALUES; SELECT * FROM default_test; +-- invalid: non-lowercase quoted identifiers +CREATE TYPE case_int42 ( + "Internallength" = 4, + "Input" = int42_in, + "Output" = int42_out, + "Alignment" = int4, + "Default" = 42, + "Passedbyvalue" +); + -- Test stand-alone composite type CREATE TYPE default_test_row AS (f1 text_w_default, f2 int42); @@ -134,3 +144,13 @@ CREATE TEMP TABLE mytab (foo widget(42,13)); SELECT format_type(atttypid,atttypmod) FROM pg_attribute WHERE attrelid = 'mytab'::regclass AND attnum > 0; + +-- might as well exercise the widget type while we're here +INSERT INTO mytab VALUES ('(1,2,3)'), ('(-44,5.5,12)'); +TABLE mytab; + +-- and test format_type() a bit more, too +select format_type('varchar'::regtype, 42); +select format_type('bpchar'::regtype, null); +-- this behavior difference is intentional +select format_type('bpchar'::regtype, -1); diff --git a/src/test/regress/sql/create_view.sql b/src/test/regress/sql/create_view.sql index b4e7a8793c..9480030005 100644 --- a/src/test/regress/sql/create_view.sql +++ b/src/test/regress/sql/create_view.sql @@ -89,7 +89,7 @@ CREATE VIEW temp_view_test.v2 AS SELECT * FROM base_table; -- should fail CREATE VIEW temp_view_test.v3_temp AS SELECT * FROM temp_table; -- should fail -CREATE SCHEMA test_schema +CREATE SCHEMA test_view_schema CREATE TEMP VIEW testview AS SELECT 1; -- joins: if any of the join relations are temporary, the view diff --git a/src/test/regress/sql/domain.sql b/src/test/regress/sql/domain.sql index 0fd383e272..68da27de22 100644 --- a/src/test/regress/sql/domain.sql +++ b/src/test/regress/sql/domain.sql @@ -120,6 +120,65 @@ select pg_typeof('{1,2,3}'::dia || 42); -- should be int[] not dia drop domain dia; +-- Test domains over composites + +create type comptype as (r float8, i float8); +create domain dcomptype as comptype; +create table dcomptable (d1 dcomptype unique); + +insert into dcomptable values (row(1,2)::dcomptype); +insert into dcomptable values (row(3,4)::comptype); +insert into dcomptable values (row(1,2)::dcomptype); -- fail on uniqueness +insert into dcomptable (d1.r) values(11); + +select * from dcomptable; +select (d1).r, (d1).i, (d1).* from dcomptable; +update dcomptable set d1.r = (d1).r + 1 where (d1).i > 0; +select * from dcomptable; + +alter domain dcomptype add constraint c1 check ((value).r <= (value).i); +alter domain dcomptype add constraint c2 check ((value).r > (value).i); -- fail + +select row(2,1)::dcomptype; -- fail +insert into dcomptable values (row(1,2)::comptype); +insert into dcomptable values (row(2,1)::comptype); -- fail +insert into dcomptable (d1.r) values(99); +insert into dcomptable (d1.r, d1.i) values(99, 100); +insert into dcomptable (d1.r, d1.i) values(100, 99); -- fail +update dcomptable set d1.r = (d1).r + 1 where (d1).i > 0; -- fail +update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; +select * from dcomptable; + +explain (verbose, costs off) + update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; +create rule silly as on delete to dcomptable do instead + update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; +\d+ dcomptable + +drop table dcomptable; +drop type comptype cascade; + + +-- check altering and dropping columns used by domain constraints +create type comptype as (r float8, i float8); +create domain dcomptype as comptype; +alter domain dcomptype add constraint c1 check ((value).r > 0); +comment on constraint c1 on domain dcomptype is 'random commentary'; + +select row(0,1)::dcomptype; -- fail + +alter type comptype alter attribute r type varchar; -- fail +alter type comptype alter attribute r type bigint; + +alter type comptype drop attribute r; -- fail +alter type comptype drop attribute i; + +select conname, obj_description(oid, 'pg_constraint') from pg_constraint + where contypid = 'dcomptype'::regtype; -- check comment is still there + +drop type comptype cascade; + + -- Test domains over arrays of composite create type comptype as (r float8, i float8); @@ -166,6 +225,49 @@ drop table dcomptable; drop type comptype cascade; +-- Test arrays over domains + +create domain posint as int check (value > 0); + +create table pitable (f1 posint[]); +insert into pitable values(array[42]); +insert into pitable values(array[-1]); -- fail +insert into pitable values('{0}'); -- fail +update pitable set f1[1] = f1[1] + 1; +update pitable set f1[1] = 0; -- fail +select * from pitable; +drop table pitable; + +create domain vc4 as varchar(4); +create table vc4table (f1 vc4[]); +insert into vc4table values(array['too long']); -- fail +insert into vc4table values(array['too long']::vc4[]); -- cast truncates +select * from vc4table; +drop table vc4table; +drop type vc4; + +-- You can sort of fake arrays-of-arrays by putting a domain in between +create domain dposinta as posint[]; +create table dposintatable (f1 dposinta[]); +insert into dposintatable values(array[array[42]]); -- fail +insert into dposintatable values(array[array[42]::posint[]]); -- still fail +insert into dposintatable values(array[array[42]::dposinta]); -- but this works +select f1, f1[1], (f1[1])[1] from dposintatable; +select pg_typeof(f1) from dposintatable; +select pg_typeof(f1[1]) from dposintatable; +select pg_typeof(f1[1][1]) from dposintatable; +select pg_typeof((f1[1])[1]) from dposintatable; +update dposintatable set f1[2] = array[99]; +select f1, f1[1], (f1[2])[1] from dposintatable; +-- it'd be nice if you could do something like this, but for now you can't: +update dposintatable set f1[2][1] = array[97]; +-- maybe someday we can make this syntax work: +update dposintatable set (f1[2])[1] = array[98]; + +drop table dposintatable; +drop domain posint cascade; + + -- Test not-null restrictions create domain dnotnull varchar(15) NOT NULL; @@ -457,6 +559,14 @@ insert into ddtest2 values('{(-1)}'); alter domain posint add constraint c1 check(value >= 0); drop table ddtest2; +-- Likewise for domains within domains over composite +create domain ddtest1d as ddtest1; +create table ddtest2(f1 ddtest1d); +insert into ddtest2 values('(-1)'); +alter domain posint add constraint c1 check(value >= 0); +drop table ddtest2; +drop domain ddtest1d; + -- Likewise for domains within domains over array of composite create domain ddtest1d as ddtest1[]; create table ddtest2(f1 ddtest1d); diff --git a/src/test/regress/sql/enum.sql b/src/test/regress/sql/enum.sql index d7e87143a0..6affd0d1eb 100644 --- a/src/test/regress/sql/enum.sql +++ b/src/test/regress/sql/enum.sql @@ -300,15 +300,29 @@ ALTER TYPE bogon ADD VALUE 'bad'; SELECT 'bad'::bogon; ROLLBACK; +-- but a renamed value is safe to use later in same transaction +BEGIN; +ALTER TYPE bogus RENAME VALUE 'good' to 'bad'; +SELECT 'bad'::bogus; +ROLLBACK; + DROP TYPE bogus; --- check that we can add new values to existing enums in a transaction --- and use them, if the type is new as well +-- check that values created during CREATE TYPE can be used in any case +BEGIN; +CREATE TYPE bogus AS ENUM('good','bad','ugly'); +ALTER TYPE bogus RENAME TO bogon; +select enum_range(null::bogon); +ROLLBACK; + +-- ideally, we'd allow this usage; but it requires keeping track of whether +-- the enum type was created in the current transaction, which is expensive BEGIN; CREATE TYPE bogus AS ENUM('good'); -ALTER TYPE bogus ADD VALUE 'bad'; -ALTER TYPE bogus ADD VALUE 'ugly'; -SELECT enum_range(null::bogus); +ALTER TYPE bogus RENAME TO bogon; +ALTER TYPE bogon ADD VALUE 'bad'; +ALTER TYPE bogon ADD VALUE 'ugly'; +select enum_range(null::bogon); -- fails ROLLBACK; -- diff --git a/src/test/regress/sql/equivclass.sql b/src/test/regress/sql/equivclass.sql index 0e4aa0cd2c..85aa65de39 100644 --- a/src/test/regress/sql/equivclass.sql +++ b/src/test/regress/sql/equivclass.sql @@ -254,3 +254,11 @@ revoke select on ec0 from regress_user_ectest; revoke select on ec1 from regress_user_ectest; drop user regress_user_ectest; + +-- check that X=X is converted to X IS NOT NULL when appropriate +explain (costs off) + select * from tenk1 where unique1 = unique1 and unique2 = unique2; + +-- this could be converted, but isn't at present +explain (costs off) + select * from tenk1 where unique1 = unique1 or unique2 = unique2; diff --git a/src/test/regress/sql/event_trigger.sql b/src/test/regress/sql/event_trigger.sql index b65bf3ec66..f022cfaed0 100644 --- a/src/test/regress/sql/event_trigger.sql +++ b/src/test/regress/sql/event_trigger.sql @@ -28,7 +28,7 @@ create event trigger regress_event_trigger on ddl_command_start -- OK create event trigger regress_event_trigger_end on ddl_command_end - execute procedure test_event_trigger(); + execute function test_event_trigger(); -- should fail, food is not a valid filter variable create event trigger regress_event_trigger2 on ddl_command_start @@ -89,15 +89,45 @@ create event trigger regress_event_trigger_noperms on ddl_command_start execute procedure test_event_trigger(); reset role; --- all OK +-- test enabling and disabling +alter event trigger regress_event_trigger disable; +-- fires _trigger2 and _trigger_end should fire, but not _trigger +create table event_trigger_fire1 (a int); +alter event trigger regress_event_trigger enable; +set session_replication_role = replica; +-- fires nothing +create table event_trigger_fire2 (a int); alter event trigger regress_event_trigger enable replica; +-- fires only _trigger +create table event_trigger_fire3 (a int); alter event trigger regress_event_trigger enable always; -alter event trigger regress_event_trigger enable; +-- fires only _trigger +create table event_trigger_fire4 (a int); +reset session_replication_role; +-- fires all three +create table event_trigger_fire5 (a int); +-- non-top-level command +create function f1() returns int +language plpgsql +as $$ +begin + create table event_trigger_fire6 (a int); + return 0; +end $$; +select f1(); +-- non-top-level command +create procedure p1() +language plpgsql +as $$ +begin + create table event_trigger_fire7 (a int); +end $$; +call p1(); + +-- clean up alter event trigger regress_event_trigger disable; - --- regress_event_trigger2 and regress_event_trigger_end should fire, but not --- regress_event_trigger -create table event_trigger_fire1 (a int); +drop table event_trigger_fire2, event_trigger_fire3, event_trigger_fire4, event_trigger_fire5, event_trigger_fire6, event_trigger_fire7; +drop routine f1(), p1(); -- regress_event_trigger_end should fire on these commands grant all on table event_trigger_fire1 to public; @@ -263,6 +293,19 @@ CREATE SCHEMA evttrig CREATE INDEX one_idx ON one (col_b) CREATE TABLE two (col_c INTEGER CHECK (col_c > 0) REFERENCES one DEFAULT 42); +-- Partitioned tables with a partitioned index +CREATE TABLE evttrig.parted ( + id int PRIMARY KEY) + PARTITION BY RANGE (id); +CREATE TABLE evttrig.part_1_10 PARTITION OF evttrig.parted (id) + FOR VALUES FROM (1) TO (10); +CREATE TABLE evttrig.part_10_20 PARTITION OF evttrig.parted (id) + FOR VALUES FROM (10) TO (20) PARTITION BY RANGE (id); +CREATE TABLE evttrig.part_10_15 PARTITION OF evttrig.part_10_20 (id) + FOR VALUES FROM (10) TO (15); +CREATE TABLE evttrig.part_15_20 PARTITION OF evttrig.part_10_20 (id) + FOR VALUES FROM (15) TO (20); + ALTER TABLE evttrig.two DROP COLUMN col_c; ALTER TABLE evttrig.one ALTER COLUMN col_b DROP DEFAULT; ALTER TABLE evttrig.one DROP CONSTRAINT one_pkey; diff --git a/src/test/regress/sql/fast_default.sql b/src/test/regress/sql/fast_default.sql new file mode 100644 index 0000000000..344b5841d7 --- /dev/null +++ b/src/test/regress/sql/fast_default.sql @@ -0,0 +1,502 @@ +-- +-- ALTER TABLE ADD COLUMN DEFAULT test +-- + +SET search_path = fast_default; +CREATE SCHEMA fast_default; +CREATE TABLE m(id OID); +INSERT INTO m VALUES (NULL::OID); + +CREATE FUNCTION set(tabname name) RETURNS VOID +AS $$ +BEGIN + UPDATE m + SET id = (SELECT c.relfilenode + FROM pg_class AS c, pg_namespace AS s + WHERE c.relname = tabname + AND c.relnamespace = s.oid + AND s.nspname = 'fast_default'); +END; +$$ LANGUAGE 'plpgsql'; + +CREATE FUNCTION comp() RETURNS TEXT +AS $$ +BEGIN + RETURN (SELECT CASE + WHEN m.id = c.relfilenode THEN 'Unchanged' + ELSE 'Rewritten' + END + FROM m, pg_class AS c, pg_namespace AS s + WHERE c.relname = 't' + AND c.relnamespace = s.oid + AND s.nspname = 'fast_default'); +END; +$$ LANGUAGE 'plpgsql'; + +CREATE FUNCTION log_rewrite() RETURNS event_trigger +LANGUAGE plpgsql as +$func$ + +declare + this_schema text; +begin + select into this_schema relnamespace::regnamespace::text + from pg_class + where oid = pg_event_trigger_table_rewrite_oid(); + if this_schema = 'fast_default' + then + RAISE NOTICE 'rewriting table % for reason %', + pg_event_trigger_table_rewrite_oid()::regclass, + pg_event_trigger_table_rewrite_reason(); + end if; +end; +$func$; + +CREATE TABLE has_volatile AS +SELECT * FROM generate_series(1,10) id; + + +CREATE EVENT TRIGGER has_volatile_rewrite + ON table_rewrite + EXECUTE PROCEDURE log_rewrite(); + +-- only the last of these should trigger a rewrite +ALTER TABLE has_volatile ADD col1 int; +ALTER TABLE has_volatile ADD col2 int DEFAULT 1; +ALTER TABLE has_volatile ADD col3 timestamptz DEFAULT current_timestamp; +ALTER TABLE has_volatile ADD col4 int DEFAULT (random() * 10000)::int; + + + +-- Test a large sample of different datatypes +CREATE TABLE T(pk INT NOT NULL PRIMARY KEY, c_int INT DEFAULT 1); + +SELECT set('t'); + +INSERT INTO T VALUES (1), (2); + +ALTER TABLE T ADD COLUMN c_bpchar BPCHAR(5) DEFAULT 'hello', + ALTER COLUMN c_int SET DEFAULT 2; + +INSERT INTO T VALUES (3), (4); + + +ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'world', + ALTER COLUMN c_bpchar SET DEFAULT 'dog'; + +INSERT INTO T VALUES (5), (6); + +ALTER TABLE T ADD COLUMN c_date DATE DEFAULT '2016-06-02', + ALTER COLUMN c_text SET DEFAULT 'cat'; + +INSERT INTO T VALUES (7), (8); + +ALTER TABLE T ADD COLUMN c_timestamp TIMESTAMP DEFAULT '2016-09-01 12:00:00', + ADD COLUMN c_timestamp_null TIMESTAMP, + ALTER COLUMN c_date SET DEFAULT '2010-01-01'; + +INSERT INTO T VALUES (9), (10); + +ALTER TABLE T ADD COLUMN c_array TEXT[] + DEFAULT '{"This", "is", "the", "real", "world"}', + ALTER COLUMN c_timestamp SET DEFAULT '1970-12-31 11:12:13', + ALTER COLUMN c_timestamp_null SET DEFAULT '2016-09-29 12:00:00'; + +INSERT INTO T VALUES (11), (12); + +ALTER TABLE T ADD COLUMN c_small SMALLINT DEFAULT -5, + ADD COLUMN c_small_null SMALLINT, + ALTER COLUMN c_array + SET DEFAULT '{"This", "is", "no", "fantasy"}'; + +INSERT INTO T VALUES (13), (14); + +ALTER TABLE T ADD COLUMN c_big BIGINT DEFAULT 180000000000018, + ALTER COLUMN c_small SET DEFAULT 9, + ALTER COLUMN c_small_null SET DEFAULT 13; + +INSERT INTO T VALUES (15), (16); + +ALTER TABLE T ADD COLUMN c_num NUMERIC DEFAULT 1.00000000001, + ALTER COLUMN c_big SET DEFAULT -9999999999999999; + +INSERT INTO T VALUES (17), (18); + +ALTER TABLE T ADD COLUMN c_time TIME DEFAULT '12:00:00', + ALTER COLUMN c_num SET DEFAULT 2.000000000000002; + +INSERT INTO T VALUES (19), (20); + +ALTER TABLE T ADD COLUMN c_interval INTERVAL DEFAULT '1 day', + ALTER COLUMN c_time SET DEFAULT '23:59:59'; + +INSERT INTO T VALUES (21), (22); + +ALTER TABLE T ADD COLUMN c_hugetext TEXT DEFAULT repeat('abcdefg',1000), + ALTER COLUMN c_interval SET DEFAULT '3 hours'; + +INSERT INTO T VALUES (23), (24); + +ALTER TABLE T ALTER COLUMN c_interval DROP DEFAULT, + ALTER COLUMN c_hugetext SET DEFAULT repeat('poiuyt', 1000); + +INSERT INTO T VALUES (25), (26); + +ALTER TABLE T ALTER COLUMN c_bpchar DROP DEFAULT, + ALTER COLUMN c_date DROP DEFAULT, + ALTER COLUMN c_text DROP DEFAULT, + ALTER COLUMN c_timestamp DROP DEFAULT, + ALTER COLUMN c_array DROP DEFAULT, + ALTER COLUMN c_small DROP DEFAULT, + ALTER COLUMN c_big DROP DEFAULT, + ALTER COLUMN c_num DROP DEFAULT, + ALTER COLUMN c_time DROP DEFAULT, + ALTER COLUMN c_hugetext DROP DEFAULT; + +INSERT INTO T VALUES (27), (28); + +SELECT pk, c_int, c_bpchar, c_text, c_date, c_timestamp, + c_timestamp_null, c_array, c_small, c_small_null, + c_big, c_num, c_time, c_interval, + c_hugetext = repeat('abcdefg',1000) as c_hugetext_origdef, + c_hugetext = repeat('poiuyt', 1000) as c_hugetext_newdef +FROM T ORDER BY pk; + +SELECT comp(); + +DROP TABLE T; + +-- Test expressions in the defaults +CREATE OR REPLACE FUNCTION foo(a INT) RETURNS TEXT AS $$ +DECLARE res TEXT := ''; + i INT; +BEGIN + i := 0; + WHILE (i < a) LOOP + res := res || chr(ascii('a') + i); + i := i + 1; + END LOOP; + RETURN res; +END; $$ LANGUAGE PLPGSQL STABLE; + +CREATE TABLE T(pk INT NOT NULL PRIMARY KEY, c_int INT DEFAULT LENGTH(foo(6))); + +SELECT set('t'); + +INSERT INTO T VALUES (1), (2); + +ALTER TABLE T ADD COLUMN c_bpchar BPCHAR(5) DEFAULT foo(4), + ALTER COLUMN c_int SET DEFAULT LENGTH(foo(8)); + +INSERT INTO T VALUES (3), (4); + +ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT foo(6), + ALTER COLUMN c_bpchar SET DEFAULT foo(3); + +INSERT INTO T VALUES (5), (6); + +ALTER TABLE T ADD COLUMN c_date DATE + DEFAULT '2016-06-02'::DATE + LENGTH(foo(10)), + ALTER COLUMN c_text SET DEFAULT foo(12); + +INSERT INTO T VALUES (7), (8); + +ALTER TABLE T ADD COLUMN c_timestamp TIMESTAMP + DEFAULT '2016-09-01'::DATE + LENGTH(foo(10)), + ALTER COLUMN c_date + SET DEFAULT '2010-01-01'::DATE - LENGTH(foo(4)); + +INSERT INTO T VALUES (9), (10); + +ALTER TABLE T ADD COLUMN c_array TEXT[] + DEFAULT ('{"This", "is", "' || foo(4) || + '","the", "real", "world"}')::TEXT[], + ALTER COLUMN c_timestamp + SET DEFAULT '1970-12-31'::DATE + LENGTH(foo(30)); + +INSERT INTO T VALUES (11), (12); + +ALTER TABLE T ALTER COLUMN c_int DROP DEFAULT, + ALTER COLUMN c_array + SET DEFAULT ('{"This", "is", "' || foo(1) || + '", "fantasy"}')::text[]; + +INSERT INTO T VALUES (13), (14); + +ALTER TABLE T ALTER COLUMN c_bpchar DROP DEFAULT, + ALTER COLUMN c_date DROP DEFAULT, + ALTER COLUMN c_text DROP DEFAULT, + ALTER COLUMN c_timestamp DROP DEFAULT, + ALTER COLUMN c_array DROP DEFAULT; + +INSERT INTO T VALUES (15), (16); + +SELECT * FROM T; + +SELECT comp(); + +DROP TABLE T; + +DROP FUNCTION foo(INT); + +-- Fall back to full rewrite for volatile expressions +CREATE TABLE T(pk INT NOT NULL PRIMARY KEY); + +INSERT INTO T VALUES (1); + +SELECT set('t'); + +-- now() is stable, because it returns the transaction timestamp +ALTER TABLE T ADD COLUMN c1 TIMESTAMP DEFAULT now(); + +SELECT comp(); + +-- clock_timestamp() is volatile +ALTER TABLE T ADD COLUMN c2 TIMESTAMP DEFAULT clock_timestamp(); + +SELECT comp(); + +DROP TABLE T; + +-- Simple querie +CREATE TABLE T (pk INT NOT NULL PRIMARY KEY); + +SELECT set('t'); + +INSERT INTO T SELECT * FROM generate_series(1, 10) a; + +ALTER TABLE T ADD COLUMN c_bigint BIGINT NOT NULL DEFAULT -1; + +INSERT INTO T SELECT b, b - 10 FROM generate_series(11, 20) a(b); + +ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'hello'; + +INSERT INTO T SELECT b, b - 10, (b + 10)::text FROM generate_series(21, 30) a(b); + +-- WHERE clause +SELECT c_bigint, c_text FROM T WHERE c_bigint = -1 LIMIT 1; + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) +SELECT c_bigint, c_text FROM T WHERE c_bigint = -1 LIMIT 1; + +SELECT c_bigint, c_text FROM T WHERE c_text = 'hello' LIMIT 1; + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) SELECT c_bigint, c_text FROM T WHERE c_text = 'hello' LIMIT 1; + + +-- COALESCE +SELECT COALESCE(c_bigint, pk), COALESCE(c_text, pk::text) +FROM T +ORDER BY pk LIMIT 10; + +-- Aggregate function +SELECT SUM(c_bigint), MAX(c_text COLLATE "C" ), MIN(c_text COLLATE "C") FROM T; + +-- ORDER BY +SELECT * FROM T ORDER BY c_bigint, c_text, pk LIMIT 10; + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) +SELECT * FROM T ORDER BY c_bigint, c_text, pk LIMIT 10; + +-- LIMIT +SELECT * FROM T WHERE c_bigint > -1 ORDER BY c_bigint, c_text, pk LIMIT 10; + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) +SELECT * FROM T WHERE c_bigint > -1 ORDER BY c_bigint, c_text, pk LIMIT 10; + +-- DELETE with RETURNING +DELETE FROM T WHERE pk BETWEEN 10 AND 20 RETURNING *; +EXPLAIN (VERBOSE TRUE, COSTS FALSE) +DELETE FROM T WHERE pk BETWEEN 10 AND 20 RETURNING *; + +-- UPDATE +UPDATE T SET c_text = '"' || c_text || '"' WHERE pk < 10; +SELECT * FROM T WHERE c_text LIKE '"%"' ORDER BY PK; + +SELECT comp(); + +DROP TABLE T; + + +-- Combine with other DDL +CREATE TABLE T(pk INT NOT NULL PRIMARY KEY); + +SELECT set('t'); + +INSERT INTO T VALUES (1), (2); + +ALTER TABLE T ADD COLUMN c_int INT NOT NULL DEFAULT -1; + +INSERT INTO T VALUES (3), (4); + +ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'Hello'; + +INSERT INTO T VALUES (5), (6); + +ALTER TABLE T ALTER COLUMN c_text SET DEFAULT 'world', + ALTER COLUMN c_int SET DEFAULT 1; + +INSERT INTO T VALUES (7), (8); + +SELECT * FROM T ORDER BY pk; + +-- Add an index +CREATE INDEX i ON T(c_int, c_text); + +SELECT c_text FROM T WHERE c_int = -1; + +SELECT comp(); + +-- query to exercise expand_tuple function +CREATE TABLE t1 AS +SELECT 1::int AS a , 2::int AS b +FROM generate_series(1,20) q; + +ALTER TABLE t1 ADD COLUMN c text; + +SELECT a, + stddev(cast((SELECT sum(1) FROM generate_series(1,20) x) AS float4)) + OVER (PARTITION BY a,b,c ORDER BY b) + AS z +FROM t1; + +DROP TABLE T; + +-- test that we account for missing columns without defaults correctly +-- in expand_tuple, and that rows are correctly expanded for triggers + +CREATE FUNCTION test_trigger() +RETURNS trigger +LANGUAGE plpgsql +AS $$ + +begin + raise notice 'old tuple: %', to_json(OLD)::text; + if TG_OP = 'DELETE' + then + return OLD; + else + return NEW; + end if; +end; + +$$; + +-- 2 new columns, both have defaults +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,3); +ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; +ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; +UPDATE t SET y = 2; +SELECT * FROM t; +DROP TABLE t; + +-- 2 new columns, first has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,3); +ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; +ALTER TABLE t ADD COLUMN y int; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; +UPDATE t SET y = 2; +SELECT * FROM t; +DROP TABLE t; + +-- 2 new columns, second has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,3); +ALTER TABLE t ADD COLUMN x int; +ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; +UPDATE t SET y = 2; +SELECT * FROM t; +DROP TABLE t; + +-- 2 new columns, neither has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,3); +ALTER TABLE t ADD COLUMN x int; +ALTER TABLE t ADD COLUMN y int; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; +UPDATE t SET y = 2; +SELECT * FROM t; +DROP TABLE t; + +-- same as last 4 tests but here the last original column has a NULL value +-- 2 new columns, both have defaults +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,NULL); +ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; +ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; +UPDATE t SET y = 2; +SELECT * FROM t; +DROP TABLE t; + +-- 2 new columns, first has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,NULL); +ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; +ALTER TABLE t ADD COLUMN y int; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; +UPDATE t SET y = 2; +SELECT * FROM t; +DROP TABLE t; + +-- 2 new columns, second has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,NULL); +ALTER TABLE t ADD COLUMN x int; +ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; +UPDATE t SET y = 2; +SELECT * FROM t; +DROP TABLE t; + +-- 2 new columns, neither has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,NULL); +ALTER TABLE t ADD COLUMN x int; +ALTER TABLE t ADD COLUMN y int; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; +UPDATE t SET y = 2; +SELECT * FROM t; +DROP TABLE t; + +-- make sure expanded tuple has correct self pointer +-- it will be required by the RI trigger doing the cascading delete + +CREATE TABLE leader (a int PRIMARY KEY, b int); +CREATE TABLE follower (a int REFERENCES leader ON DELETE CASCADE, b int); +INSERT INTO leader VALUES (1, 1), (2, 2); +ALTER TABLE leader ADD c int; +ALTER TABLE leader DROP c; +DELETE FROM leader; + +-- cleanup +DROP TABLE follower; +DROP TABLE leader; +DROP FUNCTION test_trigger(); +DROP TABLE t1; +DROP FUNCTION set(name); +DROP FUNCTION comp(); +DROP TABLE m; +DROP TABLE has_volatile; +DROP EVENT TRIGGER has_volatile_rewrite; +DROP FUNCTION log_rewrite; +DROP SCHEMA fast_default; + +-- Leave a table with an active fast default in place, for pg_upgrade testing +set search_path = public; +create table has_fast_default(f1 int); +insert into has_fast_default values(1); +alter table has_fast_default add column f2 int default 42; +table has_fast_default; diff --git a/src/test/regress/sql/float8.sql b/src/test/regress/sql/float8.sql index 215e7a4784..eeebddd4b7 100644 --- a/src/test/regress/sql/float8.sql +++ b/src/test/regress/sql/float8.sql @@ -108,6 +108,12 @@ SELECT '' AS three, f.f1, |/f.f1 AS sqrt_f1 -- power SELECT power(float8 '144', float8 '0.5'); +SELECT power(float8 'NaN', float8 '0.5'); +SELECT power(float8 '144', float8 'NaN'); +SELECT power(float8 'NaN', float8 'NaN'); +SELECT power(float8 '-1', float8 'NaN'); +SELECT power(float8 '1', float8 'NaN'); +SELECT power(float8 'NaN', float8 '0'); -- take exp of ln(f.f1) SELECT '' AS three, f.f1, exp(ln(f.f1)) AS exp_ln_f1 diff --git a/src/test/regress/sql/foreign_data.sql b/src/test/regress/sql/foreign_data.sql index ebe8ffbffe..dab9b62900 100644 --- a/src/test/regress/sql/foreign_data.sql +++ b/src/test/regress/sql/foreign_data.sql @@ -51,6 +51,13 @@ RESET ROLE; CREATE FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator; \dew+ +-- HANDLER related checks +CREATE FUNCTION invalid_fdw_handler() RETURNS int LANGUAGE SQL AS 'SELECT 1;'; +CREATE FOREIGN DATA WRAPPER test_fdw HANDLER invalid_fdw_handler; -- ERROR +CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler HANDLER invalid_fdw_handler; -- ERROR +CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler; +DROP FOREIGN DATA WRAPPER test_fdw; + -- ALTER FOREIGN DATA WRAPPER ALTER FOREIGN DATA WRAPPER foo; -- ERROR ALTER FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR @@ -88,6 +95,12 @@ ALTER FOREIGN DATA WRAPPER foo RENAME TO foo1; \dew+ ALTER FOREIGN DATA WRAPPER foo1 RENAME TO foo; +-- HANDLER related checks +ALTER FOREIGN DATA WRAPPER foo HANDLER invalid_fdw_handler; -- ERROR +ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler HANDLER anything; -- ERROR +ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler; +DROP FUNCTION invalid_fdw_handler(); + -- DROP FOREIGN DATA WRAPPER DROP FOREIGN DATA WRAPPER nonexistent; -- ERROR DROP FOREIGN DATA WRAPPER IF EXISTS nonexistent; @@ -303,6 +316,12 @@ CREATE INDEX id_ft1_c2 ON ft1 (c2); -- ERROR SELECT * FROM ft1; -- ERROR EXPLAIN SELECT * FROM ft1; -- ERROR +CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a); +CREATE FOREIGN TABLE ft_part1 + PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; +CREATE INDEX ON lt1 (a); -- ERROR +DROP TABLE lt1; + -- ALTER FOREIGN TABLE COMMENT ON FOREIGN TABLE ft1 IS 'foreign table'; COMMENT ON FOREIGN TABLE ft1 IS NULL; @@ -566,25 +585,25 @@ DROP TRIGGER trigtest_after_row ON foreign_schema.foreign_table_1; DROP FUNCTION dummy_trigger(); -- Table inheritance -CREATE TABLE pt1 ( +CREATE TABLE fd_pt1 ( c1 integer NOT NULL, c2 text, c3 date ); -CREATE FOREIGN TABLE ft2 () INHERITS (pt1) +CREATE FOREIGN TABLE ft2 () INHERITS (fd_pt1) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ pt1 +\d+ fd_pt1 \d+ ft2 DROP FOREIGN TABLE ft2; -\d+ pt1 +\d+ fd_pt1 CREATE FOREIGN TABLE ft2 ( c1 integer NOT NULL, c2 text, c3 date ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); \d+ ft2 -ALTER FOREIGN TABLE ft2 INHERIT pt1; -\d+ pt1 +ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; +\d+ fd_pt1 \d+ ft2 CREATE TABLE ct3() INHERITS(ft2); CREATE FOREIGN TABLE ft3 ( @@ -598,50 +617,50 @@ CREATE FOREIGN TABLE ft3 ( \d+ ft3 -- add attributes recursively -ALTER TABLE pt1 ADD COLUMN c4 integer; -ALTER TABLE pt1 ADD COLUMN c5 integer DEFAULT 0; -ALTER TABLE pt1 ADD COLUMN c6 integer; -ALTER TABLE pt1 ADD COLUMN c7 integer NOT NULL; -ALTER TABLE pt1 ADD COLUMN c8 integer; -\d+ pt1 +ALTER TABLE fd_pt1 ADD COLUMN c4 integer; +ALTER TABLE fd_pt1 ADD COLUMN c5 integer DEFAULT 0; +ALTER TABLE fd_pt1 ADD COLUMN c6 integer; +ALTER TABLE fd_pt1 ADD COLUMN c7 integer NOT NULL; +ALTER TABLE fd_pt1 ADD COLUMN c8 integer; +\d+ fd_pt1 \d+ ft2 \d+ ct3 \d+ ft3 -- alter attributes recursively -ALTER TABLE pt1 ALTER COLUMN c4 SET DEFAULT 0; -ALTER TABLE pt1 ALTER COLUMN c5 DROP DEFAULT; -ALTER TABLE pt1 ALTER COLUMN c6 SET NOT NULL; -ALTER TABLE pt1 ALTER COLUMN c7 DROP NOT NULL; -ALTER TABLE pt1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR -ALTER TABLE pt1 ALTER COLUMN c8 TYPE char(10); -ALTER TABLE pt1 ALTER COLUMN c8 SET DATA TYPE text; -ALTER TABLE pt1 ALTER COLUMN c1 SET STATISTICS 10000; -ALTER TABLE pt1 ALTER COLUMN c1 SET (n_distinct = 100); -ALTER TABLE pt1 ALTER COLUMN c8 SET STATISTICS -1; -ALTER TABLE pt1 ALTER COLUMN c8 SET STORAGE EXTERNAL; -\d+ pt1 +ALTER TABLE fd_pt1 ALTER COLUMN c4 SET DEFAULT 0; +ALTER TABLE fd_pt1 ALTER COLUMN c5 DROP DEFAULT; +ALTER TABLE fd_pt1 ALTER COLUMN c6 SET NOT NULL; +ALTER TABLE fd_pt1 ALTER COLUMN c7 DROP NOT NULL; +ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR +ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10); +ALTER TABLE fd_pt1 ALTER COLUMN c8 SET DATA TYPE text; +ALTER TABLE fd_pt1 ALTER COLUMN c1 SET STATISTICS 10000; +ALTER TABLE fd_pt1 ALTER COLUMN c1 SET (n_distinct = 100); +ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STATISTICS -1; +ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STORAGE EXTERNAL; +\d+ fd_pt1 \d+ ft2 -- drop attributes recursively -ALTER TABLE pt1 DROP COLUMN c4; -ALTER TABLE pt1 DROP COLUMN c5; -ALTER TABLE pt1 DROP COLUMN c6; -ALTER TABLE pt1 DROP COLUMN c7; -ALTER TABLE pt1 DROP COLUMN c8; -\d+ pt1 +ALTER TABLE fd_pt1 DROP COLUMN c4; +ALTER TABLE fd_pt1 DROP COLUMN c5; +ALTER TABLE fd_pt1 DROP COLUMN c6; +ALTER TABLE fd_pt1 DROP COLUMN c7; +ALTER TABLE fd_pt1 DROP COLUMN c8; +\d+ fd_pt1 \d+ ft2 -- add constraints recursively -ALTER TABLE pt1 ADD CONSTRAINT pt1chk1 CHECK (c1 > 0) NO INHERIT; -ALTER TABLE pt1 ADD CONSTRAINT pt1chk2 CHECK (c2 <> ''); +ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk1 CHECK (c1 > 0) NO INHERIT; +ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk2 CHECK (c2 <> ''); -- connoinherit should be true for NO INHERIT constraint SELECT relname, conname, contype, conislocal, coninhcount, connoinherit FROM pg_class AS pc JOIN pg_constraint AS pgc ON (conrelid = pc.oid) - WHERE pc.relname = 'pt1' + WHERE pc.relname = 'fd_pt1' ORDER BY 1,2; -- child does not inherit NO INHERIT constraints -\d+ pt1 +\d+ fd_pt1 \d+ ft2 \set VERBOSITY terse DROP FOREIGN TABLE ft2; -- ERROR @@ -653,50 +672,50 @@ CREATE FOREIGN TABLE ft2 ( c3 date ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- child must have parent's INHERIT constraints -ALTER FOREIGN TABLE ft2 INHERIT pt1; -- ERROR -ALTER FOREIGN TABLE ft2 ADD CONSTRAINT pt1chk2 CHECK (c2 <> ''); -ALTER FOREIGN TABLE ft2 INHERIT pt1; +ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; -- ERROR +ALTER FOREIGN TABLE ft2 ADD CONSTRAINT fd_pt1chk2 CHECK (c2 <> ''); +ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; -- child does not inherit NO INHERIT constraints -\d+ pt1 +\d+ fd_pt1 \d+ ft2 -- drop constraints recursively -ALTER TABLE pt1 DROP CONSTRAINT pt1chk1 CASCADE; -ALTER TABLE pt1 DROP CONSTRAINT pt1chk2 CASCADE; +ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk1 CASCADE; +ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk2 CASCADE; -- NOT VALID case -INSERT INTO pt1 VALUES (1, 'pt1'::text, '1994-01-01'::date); -ALTER TABLE pt1 ADD CONSTRAINT pt1chk3 CHECK (c2 <> '') NOT VALID; -\d+ pt1 +INSERT INTO fd_pt1 VALUES (1, 'fd_pt1'::text, '1994-01-01'::date); +ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk3 CHECK (c2 <> '') NOT VALID; +\d+ fd_pt1 \d+ ft2 -- VALIDATE CONSTRAINT need do nothing on foreign tables -ALTER TABLE pt1 VALIDATE CONSTRAINT pt1chk3; -\d+ pt1 +ALTER TABLE fd_pt1 VALIDATE CONSTRAINT fd_pt1chk3; +\d+ fd_pt1 \d+ ft2 -- OID system column -ALTER TABLE pt1 SET WITH OIDS; -\d+ pt1 +ALTER TABLE fd_pt1 SET WITH OIDS; +\d+ fd_pt1 \d+ ft2 ALTER TABLE ft2 SET WITHOUT OIDS; -- ERROR -ALTER TABLE pt1 SET WITHOUT OIDS; -\d+ pt1 +ALTER TABLE fd_pt1 SET WITHOUT OIDS; +\d+ fd_pt1 \d+ ft2 -- changes name of an attribute recursively -ALTER TABLE pt1 RENAME COLUMN c1 TO f1; -ALTER TABLE pt1 RENAME COLUMN c2 TO f2; -ALTER TABLE pt1 RENAME COLUMN c3 TO f3; +ALTER TABLE fd_pt1 RENAME COLUMN c1 TO f1; +ALTER TABLE fd_pt1 RENAME COLUMN c2 TO f2; +ALTER TABLE fd_pt1 RENAME COLUMN c3 TO f3; -- changes name of a constraint recursively -ALTER TABLE pt1 RENAME CONSTRAINT pt1chk3 TO f2_check; -\d+ pt1 +ALTER TABLE fd_pt1 RENAME CONSTRAINT fd_pt1chk3 TO f2_check; +\d+ fd_pt1 \d+ ft2 -- TRUNCATE doesn't work on foreign tables, either directly or recursively TRUNCATE ft2; -- ERROR -TRUNCATE pt1; -- ERROR +TRUNCATE fd_pt1; -- ERROR -DROP TABLE pt1 CASCADE; +DROP TABLE fd_pt1 CASCADE; -- IMPORT FOREIGN SCHEMA IMPORT FOREIGN SCHEMA s1 FROM SERVER s9 INTO public; -- ERROR @@ -716,75 +735,85 @@ DROP OWNED BY regress_test_role2; DROP OWNED BY regress_test_role2 CASCADE; -- Foreign partition DDL stuff -CREATE TABLE pt2 ( +CREATE TABLE fd_pt2 ( c1 integer NOT NULL, c2 text, c3 date ) PARTITION BY LIST (c1); -CREATE FOREIGN TABLE pt2_1 PARTITION OF pt2 FOR VALUES IN (1) +CREATE FOREIGN TABLE fd_pt2_1 PARTITION OF fd_pt2 FOR VALUES IN (1) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ pt2 -\d+ pt2_1 +\d+ fd_pt2 +\d+ fd_pt2_1 -- partition cannot have additional columns -DROP FOREIGN TABLE pt2_1; -CREATE FOREIGN TABLE pt2_1 ( +DROP FOREIGN TABLE fd_pt2_1; +CREATE FOREIGN TABLE fd_pt2_1 ( c1 integer NOT NULL, c2 text, c3 date, c4 char ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ pt2_1 -ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); -- ERROR +\d+ fd_pt2_1 +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR -DROP FOREIGN TABLE pt2_1; -\d+ pt2 -CREATE FOREIGN TABLE pt2_1 ( +DROP FOREIGN TABLE fd_pt2_1; +\d+ fd_pt2 +CREATE FOREIGN TABLE fd_pt2_1 ( c1 integer NOT NULL, c2 text, c3 date ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -\d+ pt2_1 +\d+ fd_pt2_1 -- no attach partition validation occurs for foreign tables -ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); -\d+ pt2 -\d+ pt2_1 +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); +\d+ fd_pt2 +\d+ fd_pt2_1 -- cannot add column to a partition -ALTER TABLE pt2_1 ADD c4 char; +ALTER TABLE fd_pt2_1 ADD c4 char; -- ok to have a partition's own constraints though -ALTER TABLE pt2_1 ALTER c3 SET NOT NULL; -ALTER TABLE pt2_1 ADD CONSTRAINT p21chk CHECK (c2 <> ''); -\d+ pt2 -\d+ pt2_1 +ALTER TABLE fd_pt2_1 ALTER c3 SET NOT NULL; +ALTER TABLE fd_pt2_1 ADD CONSTRAINT p21chk CHECK (c2 <> ''); +\d+ fd_pt2 +\d+ fd_pt2_1 -- cannot drop inherited NOT NULL constraint from a partition -ALTER TABLE pt2_1 ALTER c1 DROP NOT NULL; +ALTER TABLE fd_pt2_1 ALTER c1 DROP NOT NULL; -- partition must have parent's constraints -ALTER TABLE pt2 DETACH PARTITION pt2_1; -ALTER TABLE pt2 ALTER c2 SET NOT NULL; -\d+ pt2 -\d+ pt2_1 -ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); -- ERROR -ALTER FOREIGN TABLE pt2_1 ALTER c2 SET NOT NULL; -ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); - -ALTER TABLE pt2 DETACH PARTITION pt2_1; -ALTER TABLE pt2 ADD CONSTRAINT pt2chk1 CHECK (c1 > 0); -\d+ pt2 -\d+ pt2_1 -ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); -- ERROR -ALTER FOREIGN TABLE pt2_1 ADD CONSTRAINT pt2chk1 CHECK (c1 > 0); -ALTER TABLE pt2 ATTACH PARTITION pt2_1 FOR VALUES IN (1); +ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1; +ALTER TABLE fd_pt2 ALTER c2 SET NOT NULL; +\d+ fd_pt2 +\d+ fd_pt2_1 +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR +ALTER FOREIGN TABLE fd_pt2_1 ALTER c2 SET NOT NULL; +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); + +ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1; +ALTER TABLE fd_pt2 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0); +\d+ fd_pt2 +\d+ fd_pt2_1 +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR +ALTER FOREIGN TABLE fd_pt2_1 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0); +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- TRUNCATE doesn't work on foreign tables, either directly or recursively -TRUNCATE pt2_1; -- ERROR -TRUNCATE pt2; -- ERROR - -DROP FOREIGN TABLE pt2_1; -DROP TABLE pt2; +TRUNCATE fd_pt2_1; -- ERROR +TRUNCATE fd_pt2; -- ERROR + +DROP FOREIGN TABLE fd_pt2_1; +DROP TABLE fd_pt2; + +-- foreign table cannot be part of partition tree made of temporary +-- relations. +CREATE TEMP TABLE temp_parted (a int) PARTITION BY LIST (a); +CREATE FOREIGN TABLE foreign_part PARTITION OF temp_parted DEFAULT + SERVER s0; -- ERROR +CREATE FOREIGN TABLE foreign_part (a int) SERVER s0; +ALTER TABLE temp_parted ATTACH PARTITION foreign_part DEFAULT; -- ERROR +DROP FOREIGN TABLE foreign_part; +DROP TABLE temp_parted; -- Cleanup DROP SCHEMA foreign_schema CASCADE; diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql index 5f19dad03c..068ab2aab7 100644 --- a/src/test/regress/sql/foreign_key.sql +++ b/src/test/regress/sql/foreign_key.sql @@ -1020,6 +1020,16 @@ create rule r1 as on delete to t1 do delete from t2 where t2.b = old.a; explain (costs off) delete from t1 where a = 1; delete from t1 where a = 1; +-- Test a primary key with attributes located in later attnum positions +-- compared to the fk attributes. +create table pktable2 (a int, b int, c int, d int, e int, primary key (d, e)); +create table fktable2 (d int, e int, foreign key (d, e) references pktable2); +insert into pktable2 values (1, 2, 3, 4, 5); +insert into fktable2 values (4, 5); +delete from pktable2; +update pktable2 set d = 5; +drop table pktable2, fktable2; + -- -- Test deferred FK check on a tuple deleted by a rolled-back subtransaction -- @@ -1055,3 +1065,227 @@ alter table fktable2 drop constraint fktable2_f1_fkey; commit; drop table pktable2, fktable2; + + +-- +-- Foreign keys and partitioned tables +-- + +-- partitioned table in the referenced side are not allowed +CREATE TABLE fk_partitioned_pk (a int, b int, primary key (a, b)) + PARTITION BY RANGE (a, b); +-- verify with create table first ... +CREATE TABLE fk_notpartitioned_fk (a int, b int, + FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk); +-- and then with alter table. +CREATE TABLE fk_notpartitioned_fk_2 (a int, b int); +ALTER TABLE fk_notpartitioned_fk_2 ADD FOREIGN KEY (a, b) + REFERENCES fk_partitioned_pk; +DROP TABLE fk_partitioned_pk, fk_notpartitioned_fk_2; + +-- Creation of a partitioned hierarchy with irregular definitions +CREATE TABLE fk_notpartitioned_pk (fdrop1 int, a int, fdrop2 int, b int, + PRIMARY KEY (a, b)); +ALTER TABLE fk_notpartitioned_pk DROP COLUMN fdrop1, DROP COLUMN fdrop2; +CREATE TABLE fk_partitioned_fk (b int, fdrop1 int, a int) PARTITION BY RANGE (a, b); +ALTER TABLE fk_partitioned_fk DROP COLUMN fdrop1; +CREATE TABLE fk_partitioned_fk_1 (fdrop1 int, fdrop2 int, a int, fdrop3 int, b int); +ALTER TABLE fk_partitioned_fk_1 DROP COLUMN fdrop1, DROP COLUMN fdrop2, DROP COLUMN fdrop3; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_1 FOR VALUES FROM (0,0) TO (1000,1000); +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk; +CREATE TABLE fk_partitioned_fk_2 (b int, fdrop1 int, fdrop2 int, a int); +ALTER TABLE fk_partitioned_fk_2 DROP COLUMN fdrop1, DROP COLUMN fdrop2; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES FROM (1000,1000) TO (2000,2000); + +CREATE TABLE fk_partitioned_fk_3 (fdrop1 int, fdrop2 int, fdrop3 int, fdrop4 int, b int, a int) + PARTITION BY HASH (a); +ALTER TABLE fk_partitioned_fk_3 DROP COLUMN fdrop1, DROP COLUMN fdrop2, + DROP COLUMN fdrop3, DROP COLUMN fdrop4; +CREATE TABLE fk_partitioned_fk_3_0 PARTITION OF fk_partitioned_fk_3 FOR VALUES WITH (MODULUS 5, REMAINDER 0); +CREATE TABLE fk_partitioned_fk_3_1 PARTITION OF fk_partitioned_fk_3 FOR VALUES WITH (MODULUS 5, REMAINDER 1); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_3 + FOR VALUES FROM (2000,2000) TO (3000,3000); + +-- Creating a foreign key with ONLY on a partitioned table referencing +-- a non-partitioned table fails. +ALTER TABLE ONLY fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk; +-- Adding a NOT VALID foreign key on a partitioned table referencing +-- a non-partitioned table fails. +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk NOT VALID; + +-- these inserts, targeting both the partition directly as well as the +-- partitioned table, should all fail +INSERT INTO fk_partitioned_fk (a,b) VALUES (500, 501); +INSERT INTO fk_partitioned_fk_1 (a,b) VALUES (500, 501); +INSERT INTO fk_partitioned_fk (a,b) VALUES (1500, 1501); +INSERT INTO fk_partitioned_fk_2 (a,b) VALUES (1500, 1501); +INSERT INTO fk_partitioned_fk (a,b) VALUES (2500, 2502); +INSERT INTO fk_partitioned_fk_3 (a,b) VALUES (2500, 2502); +INSERT INTO fk_partitioned_fk (a,b) VALUES (2501, 2503); +INSERT INTO fk_partitioned_fk_3 (a,b) VALUES (2501, 2503); + +-- but if we insert the values that make them valid, then they work +INSERT INTO fk_notpartitioned_pk VALUES (500, 501), (1500, 1501), + (2500, 2502), (2501, 2503); +INSERT INTO fk_partitioned_fk (a,b) VALUES (500, 501); +INSERT INTO fk_partitioned_fk (a,b) VALUES (1500, 1501); +INSERT INTO fk_partitioned_fk (a,b) VALUES (2500, 2502); +INSERT INTO fk_partitioned_fk (a,b) VALUES (2501, 2503); + +-- this update fails because there is no referenced row +UPDATE fk_partitioned_fk SET a = a + 1 WHERE a = 2501; +-- but we can fix it thusly: +INSERT INTO fk_notpartitioned_pk (a,b) VALUES (2502, 2503); +UPDATE fk_partitioned_fk SET a = a + 1 WHERE a = 2501; + +-- these updates would leave lingering rows in the referencing table; disallow +UPDATE fk_notpartitioned_pk SET b = 502 WHERE a = 500; +UPDATE fk_notpartitioned_pk SET b = 1502 WHERE a = 1500; +UPDATE fk_notpartitioned_pk SET b = 2504 WHERE a = 2500; +ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_fkey; +-- done. +DROP TABLE fk_notpartitioned_pk, fk_partitioned_fk; + +-- Altering a type referenced by a foreign key needs to drop/recreate the FK. +-- Ensure that works. +CREATE TABLE fk_notpartitioned_pk (a INT, PRIMARY KEY(a), CHECK (a > 0)); +CREATE TABLE fk_partitioned_fk (a INT REFERENCES fk_notpartitioned_pk(a) PRIMARY KEY) PARTITION BY RANGE(a); +CREATE TABLE fk_partitioned_fk_1 PARTITION OF fk_partitioned_fk FOR VALUES FROM (MINVALUE) TO (MAXVALUE); +INSERT INTO fk_notpartitioned_pk VALUES (1); +INSERT INTO fk_partitioned_fk VALUES (1); +ALTER TABLE fk_notpartitioned_pk ALTER COLUMN a TYPE bigint; +DELETE FROM fk_notpartitioned_pk WHERE a = 1; +DROP TABLE fk_notpartitioned_pk, fk_partitioned_fk; + +-- Test some other exotic foreign key features: MATCH SIMPLE, ON UPDATE/DELETE +-- actions +CREATE TABLE fk_notpartitioned_pk (a int, b int, primary key (a, b)); +CREATE TABLE fk_partitioned_fk (a int default 2501, b int default 142857) PARTITION BY LIST (a); +CREATE TABLE fk_partitioned_fk_1 PARTITION OF fk_partitioned_fk FOR VALUES IN (NULL,500,501,502); +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk MATCH SIMPLE + ON DELETE SET NULL ON UPDATE SET NULL; +CREATE TABLE fk_partitioned_fk_2 PARTITION OF fk_partitioned_fk FOR VALUES IN (1500,1502); +CREATE TABLE fk_partitioned_fk_3 (a int, b int); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_3 FOR VALUES IN (2500,2501,2502,2503); + +-- this insert fails +INSERT INTO fk_partitioned_fk (a, b) VALUES (2502, 2503); +INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); +-- but since the FK is MATCH SIMPLE, this one doesn't +INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, NULL); +-- now create the referenced row ... +INSERT INTO fk_notpartitioned_pk VALUES (2502, 2503); +--- and now the same insert work +INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); +-- this always works +INSERT INTO fk_partitioned_fk (a,b) VALUES (NULL, NULL); + +-- ON UPDATE SET NULL +SELECT tableoid::regclass, a, b FROM fk_partitioned_fk WHERE b IS NULL ORDER BY a; +UPDATE fk_notpartitioned_pk SET a = a + 1 WHERE a = 2502; +SELECT tableoid::regclass, a, b FROM fk_partitioned_fk WHERE b IS NULL ORDER BY a; + +-- ON DELETE SET NULL +INSERT INTO fk_partitioned_fk VALUES (2503, 2503); +SELECT count(*) FROM fk_partitioned_fk WHERE a IS NULL; +DELETE FROM fk_notpartitioned_pk; +SELECT count(*) FROM fk_partitioned_fk WHERE a IS NULL; + +-- ON UPDATE/DELETE SET DEFAULT +ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_fkey; +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk + ON DELETE SET DEFAULT ON UPDATE SET DEFAULT; +INSERT INTO fk_notpartitioned_pk VALUES (2502, 2503); +INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); +-- this fails, because the defaults for the referencing table are not present +-- in the referenced table: +UPDATE fk_notpartitioned_pk SET a = 1500 WHERE a = 2502; +-- but inserting the row we can make it work: +INSERT INTO fk_notpartitioned_pk VALUES (2501, 142857); +UPDATE fk_notpartitioned_pk SET a = 1500 WHERE a = 2502; +SELECT * FROM fk_partitioned_fk WHERE b = 142857; + +-- ON UPDATE/DELETE CASCADE +ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_fkey; +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk + ON DELETE CASCADE ON UPDATE CASCADE; +UPDATE fk_notpartitioned_pk SET a = 2502 WHERE a = 2501; +SELECT * FROM fk_partitioned_fk WHERE b = 142857; + +-- Now you see it ... +SELECT * FROM fk_partitioned_fk WHERE b = 142857; +DELETE FROM fk_notpartitioned_pk WHERE b = 142857; +-- now you don't. +SELECT * FROM fk_partitioned_fk WHERE a = 142857; + +-- verify that DROP works +DROP TABLE fk_partitioned_fk_2; + +-- Test behavior of the constraint together with attaching and detaching +-- partitions. +CREATE TABLE fk_partitioned_fk_2 PARTITION OF fk_partitioned_fk FOR VALUES IN (1500,1502); +ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_2; +BEGIN; +DROP TABLE fk_partitioned_fk; +-- constraint should still be there +\d fk_partitioned_fk_2; +ROLLBACK; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); +DROP TABLE fk_partitioned_fk_2; +CREATE TABLE fk_partitioned_fk_2 (b int, c text, a int, + FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk ON UPDATE CASCADE ON DELETE CASCADE); +ALTER TABLE fk_partitioned_fk_2 DROP COLUMN c; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); +-- should have only one constraint +\d fk_partitioned_fk_2 +DROP TABLE fk_partitioned_fk_2; + +CREATE TABLE fk_partitioned_fk_4 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE) PARTITION BY RANGE (b, a); +CREATE TABLE fk_partitioned_fk_4_1 PARTITION OF fk_partitioned_fk_4 FOR VALUES FROM (1,1) TO (100,100); +CREATE TABLE fk_partitioned_fk_4_2 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE SET NULL); +ALTER TABLE fk_partitioned_fk_4 ATTACH PARTITION fk_partitioned_fk_4_2 FOR VALUES FROM (100,100) TO (1000,1000); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN (3500,3502); +ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_4; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN (3500,3502); +-- should only have one constraint +\d fk_partitioned_fk_4 +\d fk_partitioned_fk_4_1 +-- this one has an FK with mismatched properties +\d fk_partitioned_fk_4_2 + +CREATE TABLE fk_partitioned_fk_5 (a int, b int, + FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE, + FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE) + PARTITION BY RANGE (a); +CREATE TABLE fk_partitioned_fk_5_1 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500); +ALTER TABLE fk_partitioned_fk_5 ATTACH PARTITION fk_partitioned_fk_5_1 FOR VALUES FROM (0) TO (10); +ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_5; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500); +-- this one has two constraints, similar but not quite the one in the parent, +-- so it gets a new one +\d fk_partitioned_fk_5 +-- verify that it works to reattaching a child with multiple candidate +-- constraints +ALTER TABLE fk_partitioned_fk_5 DETACH PARTITION fk_partitioned_fk_5_1; +ALTER TABLE fk_partitioned_fk_5 ATTACH PARTITION fk_partitioned_fk_5_1 FOR VALUES FROM (0) TO (10); +\d fk_partitioned_fk_5_1 + +-- verify that attaching a table checks that the existing data satisfies the +-- constraint +CREATE TABLE fk_partitioned_fk_2 (a int, b int) PARTITION BY RANGE (b); +CREATE TABLE fk_partitioned_fk_2_1 PARTITION OF fk_partitioned_fk_2 FOR VALUES FROM (0) TO (1000); +CREATE TABLE fk_partitioned_fk_2_2 PARTITION OF fk_partitioned_fk_2 FOR VALUES FROM (1000) TO (2000); +INSERT INTO fk_partitioned_fk_2 VALUES (1600, 601), (1600, 1601); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 + FOR VALUES IN (1600); +INSERT INTO fk_notpartitioned_pk VALUES (1600, 601), (1600, 1601); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 + FOR VALUES IN (1600); + +-- leave these tables around intentionally diff --git a/src/test/regress/sql/geometry.sql b/src/test/regress/sql/geometry.sql index 1429ee772a..ce98b3e90c 100644 --- a/src/test/regress/sql/geometry.sql +++ b/src/test/regress/sql/geometry.sql @@ -46,6 +46,103 @@ SELECT '' AS one, p1.f1 FROM POINT_TBL p1 WHERE p1.f1 ?| point '(5.1,34.5)'; +-- Slope +SELECT p1.f1, p2.f1, slope(p1.f1, p2.f1) FROM POINT_TBL p1, POINT_TBL p2; + +-- Add point +SELECT p1.f1, p2.f1, p1.f1 + p2.f1 FROM POINT_TBL p1, POINT_TBL p2; + +-- Subtract point +SELECT p1.f1, p2.f1, p1.f1 - p2.f1 FROM POINT_TBL p1, POINT_TBL p2; + +-- Multiply with point +SELECT p1.f1, p2.f1, p1.f1 * p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p1.f1[0] BETWEEN 1 AND 1000; + +-- Underflow error +SELECT p1.f1, p2.f1, p1.f1 * p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p1.f1[0] < 1; + +-- Divide by point +SELECT p1.f1, p2.f1, p1.f1 / p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p2.f1[0] BETWEEN 1 AND 1000; + +-- Overflow error +SELECT p1.f1, p2.f1, p1.f1 / p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p2.f1[0] > 1000; + +-- Division by 0 error +SELECT p1.f1, p2.f1, p1.f1 / p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p2.f1 ~= '(0,0)'::point; + +-- Distance to line +SELECT p.f1, l.s, p.f1 <-> l.s FROM POINT_TBL p, LINE_TBL l; + +-- Distance to line segment +SELECT p.f1, l.s, p.f1 <-> l.s FROM POINT_TBL p, LSEG_TBL l; + +-- Distance to box +SELECT p.f1, b.f1, p.f1 <-> b.f1 FROM POINT_TBL p, BOX_TBL b; + +-- Distance to path +SELECT p.f1, p1.f1, p.f1 <-> p1.f1 FROM POINT_TBL p, PATH_TBL p1; + +-- Distance to polygon +SELECT p.f1, p1.f1, p.f1 <-> p1.f1 FROM POINT_TBL p, POLYGON_TBL p1; + +-- Closest point to line +SELECT p.f1, l.s, p.f1 ## l.s FROM POINT_TBL p, LINE_TBL l; + +-- Closest point to line segment +SELECT p.f1, l.s, p.f1 ## l.s FROM POINT_TBL p, LSEG_TBL l; + +-- Closest point to box +SELECT p.f1, b.f1, p.f1 ## b.f1 FROM POINT_TBL p, BOX_TBL b; + +-- On line +SELECT p.f1, l.s FROM POINT_TBL p, LINE_TBL l WHERE p.f1 <@ l.s; + +-- On line segment +SELECT p.f1, l.s FROM POINT_TBL p, LSEG_TBL l WHERE p.f1 <@ l.s; + +-- On path +SELECT p.f1, p1.f1 FROM POINT_TBL p, PATH_TBL p1 WHERE p.f1 <@ p1.f1; + +-- +-- Lines +-- + +-- Vertical +SELECT s FROM LINE_TBL WHERE ?| s; + +-- Horizontal +SELECT s FROM LINE_TBL WHERE ?- s; + +-- Same as line +SELECT l1.s, l2.s FROM LINE_TBL l1, LINE_TBL l2 WHERE l1.s = l2.s; + +-- Parallel to line +SELECT l1.s, l2.s FROM LINE_TBL l1, LINE_TBL l2 WHERE l1.s ?|| l2.s; + +-- Perpendicular to line +SELECT l1.s, l2.s FROM LINE_TBL l1, LINE_TBL l2 WHERE l1.s ?-| l2.s; + +-- Distance to line +SELECT l1.s, l2.s, l1.s <-> l2.s FROM LINE_TBL l1, LINE_TBL l2; + +-- Distance to box +SELECT l.s, b.f1, l.s <-> b.f1 FROM LINE_TBL l, BOX_TBL b; + +-- Intersect with line +SELECT l1.s, l2.s FROM LINE_TBL l1, LINE_TBL l2 WHERE l1.s ?# l2.s; + +-- Intersect with box +SELECT l.s, b.f1 FROM LINE_TBL l, BOX_TBL b WHERE l.s ?# b.f1; + +-- Intersection point with line +SELECT l1.s, l2.s, l1.s # l2.s FROM LINE_TBL l1, LINE_TBL l2; + +-- Closest point to line segment +SELECT l.s, l1.s, l.s ## l1.s FROM LINE_TBL l, LSEG_TBL l1; + +-- Closest point to box +SELECT l.s, b.f1, l.s ## b.f1 FROM LINE_TBL l, BOX_TBL b; + -- -- Line segments -- @@ -54,9 +151,77 @@ SELECT '' AS one, p1.f1 SELECT '' AS count, p.f1, l.s, l.s # p.f1 AS intersection FROM LSEG_TBL l, POINT_TBL p; --- closest point -SELECT '' AS thirty, p.f1, l.s, p.f1 ## l.s AS closest - FROM LSEG_TBL l, POINT_TBL p; +-- Length +SELECT s, @-@ s FROM LSEG_TBL; + +-- Vertical +SELECT s FROM LSEG_TBL WHERE ?| s; + +-- Horizontal +SELECT s FROM LSEG_TBL WHERE ?- s; + +-- Center +SELECT s, @@ s FROM LSEG_TBL; + +-- To point +SELECT s, s::point FROM LSEG_TBL; + +-- Has points less than line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s < l2.s; + +-- Has points less than or equal to line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s <= l2.s; + +-- Has points equal to line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s = l2.s; + +-- Has points greater than or equal to line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s >= l2.s; + +-- Has points greater than line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s > l2.s; + +-- Has points not equal to line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s != l2.s; + +-- Parallel with line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s ?|| l2.s; + +-- Perpendicular with line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s ?-| l2.s; + +-- Distance to line +SELECT l.s, l1.s, l.s <-> l1.s FROM LSEG_TBL l, LINE_TBL l1; + +-- Distance to line segment +SELECT l1.s, l2.s, l1.s <-> l2.s FROM LSEG_TBL l1, LSEG_TBL l2; + +-- Distance to box +SELECT l.s, b.f1, l.s <-> b.f1 FROM LSEG_TBL l, BOX_TBL b; + +-- Intersect with line segment +SELECT l.s, l1.s FROM LSEG_TBL l, LINE_TBL l1 WHERE l.s ?# l1.s; + +-- Intersect with box +SELECT l.s, b.f1 FROM LSEG_TBL l, BOX_TBL b WHERE l.s ?# b.f1; + +-- Intersection point with line segment +SELECT l1.s, l2.s, l1.s # l2.s FROM LSEG_TBL l1, LSEG_TBL l2; + +-- Closest point to line +SELECT l.s, l1.s, l.s ## l1.s FROM LSEG_TBL l, LINE_TBL l1; + +-- Closest point to line segment +SELECT l1.s, l2.s, l1.s ## l2.s FROM LSEG_TBL l1, LSEG_TBL l2; + +-- Closest point to box +SELECT l.s, b.f1, l.s ## b.f1 FROM LSEG_TBL l, BOX_TBL b; + +-- On line +SELECT l.s, l1.s FROM LSEG_TBL l, LINE_TBL l1 WHERE l.s <@ l1.s; + +-- On box +SELECT l.s, b.f1 FROM LSEG_TBL l, BOX_TBL b WHERE l.s <@ b.f1; -- -- Boxes @@ -71,35 +236,94 @@ SELECT '' AS twentyfour, b.f1 + p.f1 AS translation SELECT '' AS twentyfour, b.f1 - p.f1 AS translation FROM BOX_TBL b, POINT_TBL p; --- scaling and rotation -SELECT '' AS twentyfour, b.f1 * p.f1 AS rotation - FROM BOX_TBL b, POINT_TBL p; +-- Multiply with point +SELECT b.f1, p.f1, b.f1 * p.f1 FROM BOX_TBL b, POINT_TBL p WHERE p.f1[0] BETWEEN 1 AND 1000; + +-- Overflow error +SELECT b.f1, p.f1, b.f1 * p.f1 FROM BOX_TBL b, POINT_TBL p WHERE p.f1[0] > 1000; -SELECT '' AS twenty, b.f1 / p.f1 AS rotation - FROM BOX_TBL b, POINT_TBL p - WHERE (p.f1 <-> point '(0,0)') >= 1; +-- Divide by point +SELECT b.f1, p.f1, b.f1 / p.f1 FROM BOX_TBL b, POINT_TBL p WHERE p.f1[0] BETWEEN 1 AND 1000; +-- To box SELECT f1::box FROM POINT_TBL; SELECT bound_box(a.f1, b.f1) FROM BOX_TBL a, BOX_TBL b; +-- Below box +SELECT b1.f1, b2.f1, b1.f1 <^ b2.f1 FROM BOX_TBL b1, BOX_TBL b2; + +-- Above box +SELECT b1.f1, b2.f1, b1.f1 >^ b2.f1 FROM BOX_TBL b1, BOX_TBL b2; + +-- Intersection point with box +SELECT b1.f1, b2.f1, b1.f1 # b2.f1 FROM BOX_TBL b1, BOX_TBL b2; + +-- Diagonal +SELECT f1, diagonal(f1) FROM BOX_TBL; + +-- Distance to box +SELECT b1.f1, b2.f1, b1.f1 <-> b2.f1 FROM BOX_TBL b1, BOX_TBL b2; + -- -- Paths -- -SELECT '' AS eight, npoints(f1) AS npoints, f1 AS path FROM PATH_TBL; +-- Points +SELECT f1, npoints(f1) FROM PATH_TBL; + +-- Area +SELECT f1, area(f1) FROM PATH_TBL; -SELECT '' AS four, path(f1) FROM POLYGON_TBL; +-- Length +SELECT f1, @-@ f1 FROM PATH_TBL; --- translation -SELECT '' AS eight, p1.f1 + point '(10,10)' AS dist_add - FROM PATH_TBL p1; +-- Center +SELECT f1, @@ f1 FROM PATH_TBL; + +-- To polygon +SELECT f1, f1::polygon FROM PATH_TBL WHERE isclosed(f1); + +-- Open path cannot be converted to polygon error +SELECT f1, f1::polygon FROM PATH_TBL WHERE isopen(f1); + +-- Has points less than path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 < p2.f1; + +-- Has points less than or equal to path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 <= p2.f1; + +-- Has points equal to path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 = p2.f1; + +-- Has points greater than or equal to path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 >= p2.f1; + +-- Has points greater than path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 > p2.f1; + +-- Add path +SELECT p1.f1, p2.f1, p1.f1 + p2.f1 FROM PATH_TBL p1, PATH_TBL p2; + +-- Add point +SELECT p.f1, p1.f1, p.f1 + p1.f1 FROM PATH_TBL p, POINT_TBL p1; + +-- Subtract point +SELECT p.f1, p1.f1, p.f1 - p1.f1 FROM PATH_TBL p, POINT_TBL p1; + +-- Multiply with point +SELECT p.f1, p1.f1, p.f1 * p1.f1 FROM PATH_TBL p, POINT_TBL p1; + +-- Divide by point +SELECT p.f1, p1.f1, p.f1 / p1.f1 FROM PATH_TBL p, POINT_TBL p1 WHERE p1.f1[0] BETWEEN 1 AND 1000; --- scaling and rotation -SELECT '' AS eight, p1.f1 * point '(2,-1)' AS dist_mul - FROM PATH_TBL p1; +-- Division by 0 error +SELECT p.f1, p1.f1, p.f1 / p1.f1 FROM PATH_TBL p, POINT_TBL p1 WHERE p1.f1 ~= '(0,0)'::point; + +-- Distance to path +SELECT p1.f1, p2.f1, p1.f1 <-> p2.f1 FROM PATH_TBL p1, PATH_TBL p2; -- -- Polygons @@ -125,13 +349,50 @@ SELECT '' AS four, f1 AS open_path, polygon( pclose(f1)) AS polygon FROM PATH_TBL WHERE isopen(f1); --- convert circles to polygons using the default number of points -SELECT '' AS six, polygon(f1) - FROM CIRCLE_TBL; +-- To box +SELECT f1, f1::box FROM POLYGON_TBL; --- convert the circle to an 8-point polygon -SELECT '' AS six, polygon(8, f1) - FROM CIRCLE_TBL; +-- To path +SELECT f1, f1::path FROM POLYGON_TBL; + +-- Same as polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 ~= p2.f1; + +-- Contained by polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 <@ p2.f1; + +-- Contains polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 @> p2.f1; + +-- Overlap with polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 && p2.f1; + +-- Left of polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 << p2.f1; + +-- Overlap of left of polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 &< p2.f1; + +-- Right of polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 >> p2.f1; + +-- Overlap of right of polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 &> p2.f1; + +-- Below polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 <<| p2.f1; + +-- Overlap or below polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 &<| p2.f1; + +-- Above polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 |>> p2.f1; + +-- Overlap or above polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 |&> p2.f1; + +-- Distance to polygon +SELECT p1.f1, p2.f1, p1.f1 <-> p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2; -- -- Circles @@ -151,3 +412,96 @@ SELECT '' AS twentyfour, c1.f1 AS circle, p1.f1 AS point, (p1.f1 <-> c1.f1) AS d FROM CIRCLE_TBL c1, POINT_TBL p1 WHERE (p1.f1 <-> c1.f1) > 0 ORDER BY distance, area(c1.f1), p1.f1[0]; + +-- To polygon +SELECT f1, f1::polygon FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>'; + +-- To polygon with less points +SELECT f1, polygon(8, f1) FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>'; + +-- Too less points error +SELECT f1, polygon(1, f1) FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>'; + +-- Zero radius error +SELECT f1, polygon(10, f1) FROM CIRCLE_TBL WHERE f1 < '<(0,0),1>'; + +-- Same as circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 ~= c2.f1; + +-- Overlap with circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 && c2.f1; + +-- Overlap or left of circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 &< c2.f1; + +-- Left of circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 << c2.f1; + +-- Right of circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 >> c2.f1; + +-- Overlap or right of circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 &> c2.f1; + +-- Contained by circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 <@ c2.f1; + +-- Contain by circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 @> c2.f1; + +-- Below circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 <<| c2.f1; + +-- Above circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 |>> c2.f1; + +-- Overlap or below circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 &<| c2.f1; + +-- Overlap or above circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 |&> c2.f1; + +-- Area equal with circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 = c2.f1; + +-- Area not equal with circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 != c2.f1; + +-- Area less than circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 < c2.f1; + +-- Area greater than circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 > c2.f1; + +-- Area less than or equal circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 <= c2.f1; + +-- Area greater than or equal circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 >= c2.f1; + +-- Area less than circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 < c2.f1; + +-- Area greater than circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 < c2.f1; + +-- Add point +SELECT c.f1, p.f1, c.f1 + p.f1 FROM CIRCLE_TBL c, POINT_TBL p; + +-- Subtract point +SELECT c.f1, p.f1, c.f1 - p.f1 FROM CIRCLE_TBL c, POINT_TBL p; + +-- Multiply with point +SELECT c.f1, p.f1, c.f1 * p.f1 FROM CIRCLE_TBL c, POINT_TBL p; + +-- Divide by point +SELECT c.f1, p.f1, c.f1 / p.f1 FROM CIRCLE_TBL c, POINT_TBL p WHERE p.f1[0] BETWEEN 1 AND 1000; + +-- Overflow error +SELECT c.f1, p.f1, c.f1 / p.f1 FROM CIRCLE_TBL c, POINT_TBL p WHERE p.f1[0] > 1000; + +-- Division by 0 error +SELECT c.f1, p.f1, c.f1 / p.f1 FROM CIRCLE_TBL c, POINT_TBL p WHERE p.f1 ~= '(0,0)'::point; + +-- Distance to polygon +SELECT c.f1, p.f1, c.f1 <-> p.f1 FROM CIRCLE_TBL c, POLYGON_TBL p; diff --git a/src/test/regress/sql/gist.sql b/src/test/regress/sql/gist.sql index 49126fd466..bae722fe13 100644 --- a/src/test/regress/sql/gist.sql +++ b/src/test/regress/sql/gist.sql @@ -7,6 +7,17 @@ create table gist_point_tbl(id int4, p point); create index gist_pointidx on gist_point_tbl using gist(p); +-- Verify the fillfactor and buffering options +create index gist_pointidx2 on gist_point_tbl using gist(p) with (buffering = on, fillfactor=50); +create index gist_pointidx3 on gist_point_tbl using gist(p) with (buffering = off); +create index gist_pointidx4 on gist_point_tbl using gist(p) with (buffering = auto); +drop index gist_pointidx2, gist_pointidx3, gist_pointidx4; + +-- Make sure bad values are refused +create index gist_pointidx5 on gist_point_tbl using gist(p) with (buffering = invalid_value); +create index gist_pointidx5 on gist_point_tbl using gist(p) with (fillfactor=9); +create index gist_pointidx5 on gist_point_tbl using gist(p) with (fillfactor=101); + -- Insert enough data to create a tree that's a couple of levels deep. insert into gist_point_tbl (id, p) select g, point(g*10, g*10) from generate_series(1, 10000) g; @@ -24,6 +35,9 @@ delete from gist_point_tbl where id < 10000; vacuum analyze gist_point_tbl; +-- rebuild the index with a different fillfactor +alter index gist_pointidx SET (fillfactor = 40); +reindex index gist_pointidx; -- -- Test Index-only plans on GiST indexes diff --git a/src/test/regress/sql/groupingsets.sql b/src/test/regress/sql/groupingsets.sql index 564ebc9b05..c32d23b8d7 100644 --- a/src/test/regress/sql/groupingsets.sql +++ b/src/test/regress/sql/groupingsets.sql @@ -141,6 +141,37 @@ select a, d, grouping(a,b,c) from gstest3 group by grouping sets ((a,b), (a,c)); +-- check that distinct grouping columns are kept separate +-- even if they are equal() +explain (costs off) +select g as alias1, g as alias2 + from generate_series(1,3) g + group by alias1, rollup(alias2); + +select g as alias1, g as alias2 + from generate_series(1,3) g + group by alias1, rollup(alias2); + +-- check that pulled-up subquery outputs still go to null when appropriate +select four, x + from (select four, ten, 'foo'::text as x from tenk1) as t + group by grouping sets (four, x) + having x = 'foo'; + +select four, x || 'x' + from (select four, ten, 'foo'::text as x from tenk1) as t + group by grouping sets (four, x) + order by four; + +select (x+y)*1, sum(z) + from (select 1 as x, 2 as y, 3 as z) s + group by grouping sets (x+y, x); + +select x, not x as not_x, q2 from + (select *, q1 = 1 as x from int8_tbl i1) as t + group by grouping sets(x, q2) + order by x, q2; + -- simple rescan tests select a, b, sum(v.x) @@ -261,6 +292,11 @@ explain (costs off) select a, b, grouping(a,b), array_agg(v order by v) from gstest1 group by cube(a,b); +-- unsortable cases +select unsortable_col, count(*) + from gstest4 group by grouping sets ((unsortable_col),(unsortable_col)) + order by unsortable_col::text; + -- mixed hashable/sortable cases select unhashable_col, unsortable_col, grouping(unhashable_col, unsortable_col), @@ -311,12 +347,13 @@ explain (costs off) select a, b, sum(v.x) from (values (1),(2)) v(x), gstest_data(v.x) - group by grouping sets (a,b); + group by grouping sets (a,b) + order by 1, 2, 3; explain (costs off) select a, b, sum(v.x) from (values (1),(2)) v(x), gstest_data(v.x) - group by grouping sets (a,b); - + group by grouping sets (a,b) + order by 3, 1, 2; select * from (values (1),(2)) v(x), lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s; diff --git a/src/test/regress/sql/hash_func.sql b/src/test/regress/sql/hash_func.sql new file mode 100644 index 0000000000..b7ce8b21a3 --- /dev/null +++ b/src/test/regress/sql/hash_func.sql @@ -0,0 +1,222 @@ +-- +-- Test hash functions +-- +-- When the salt is 0, the extended hash function should produce a result +-- whose low 32 bits match the standard hash function. When the salt is +-- not 0, we should get a different result. +-- + +SELECT v as value, hashint2(v)::bit(32) as standard, + hashint2extended(v, 0)::bit(32) as extended0, + hashint2extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0::int2), (1::int2), (17::int2), (42::int2)) x(v) +WHERE hashint2(v)::bit(32) != hashint2extended(v, 0)::bit(32) + OR hashint2(v)::bit(32) = hashint2extended(v, 1)::bit(32); + +SELECT v as value, hashint4(v)::bit(32) as standard, + hashint4extended(v, 0)::bit(32) as extended0, + hashint4extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashint4(v)::bit(32) != hashint4extended(v, 0)::bit(32) + OR hashint4(v)::bit(32) = hashint4extended(v, 1)::bit(32); + +SELECT v as value, hashint8(v)::bit(32) as standard, + hashint8extended(v, 0)::bit(32) as extended0, + hashint8extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashint8(v)::bit(32) != hashint8extended(v, 0)::bit(32) + OR hashint8(v)::bit(32) = hashint8extended(v, 1)::bit(32); + +SELECT v as value, hashfloat4(v)::bit(32) as standard, + hashfloat4extended(v, 0)::bit(32) as extended0, + hashfloat4extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashfloat4(v)::bit(32) != hashfloat4extended(v, 0)::bit(32) + OR hashfloat4(v)::bit(32) = hashfloat4extended(v, 1)::bit(32); + +SELECT v as value, hashfloat8(v)::bit(32) as standard, + hashfloat8extended(v, 0)::bit(32) as extended0, + hashfloat8extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashfloat8(v)::bit(32) != hashfloat8extended(v, 0)::bit(32) + OR hashfloat8(v)::bit(32) = hashfloat8extended(v, 1)::bit(32); + +SELECT v as value, hashoid(v)::bit(32) as standard, + hashoidextended(v, 0)::bit(32) as extended0, + hashoidextended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashoid(v)::bit(32) != hashoidextended(v, 0)::bit(32) + OR hashoid(v)::bit(32) = hashoidextended(v, 1)::bit(32); + +SELECT v as value, hashchar(v)::bit(32) as standard, + hashcharextended(v, 0)::bit(32) as extended0, + hashcharextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::"char"), ('1'), ('x'), ('X'), ('p'), ('N')) x(v) +WHERE hashchar(v)::bit(32) != hashcharextended(v, 0)::bit(32) + OR hashchar(v)::bit(32) = hashcharextended(v, 1)::bit(32); + +SELECT v as value, hashname(v)::bit(32) as standard, + hashnameextended(v, 0)::bit(32) as extended0, + hashnameextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), + ('muop28x03'), ('yi3nm0d73')) x(v) +WHERE hashname(v)::bit(32) != hashnameextended(v, 0)::bit(32) + OR hashname(v)::bit(32) = hashnameextended(v, 1)::bit(32); + +SELECT v as value, hashtext(v)::bit(32) as standard, + hashtextextended(v, 0)::bit(32) as extended0, + hashtextextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), + ('muop28x03'), ('yi3nm0d73')) x(v) +WHERE hashtext(v)::bit(32) != hashtextextended(v, 0)::bit(32) + OR hashtext(v)::bit(32) = hashtextextended(v, 1)::bit(32); + +SELECT v as value, hashoidvector(v)::bit(32) as standard, + hashoidvectorextended(v, 0)::bit(32) as extended0, + hashoidvectorextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::oidvector), ('0 1 2 3 4'), ('17 18 19 20'), + ('42 43 42 45'), ('550273 550273 570274'), + ('207112489 207112499 21512 2155 372325 1363252')) x(v) +WHERE hashoidvector(v)::bit(32) != hashoidvectorextended(v, 0)::bit(32) + OR hashoidvector(v)::bit(32) = hashoidvectorextended(v, 1)::bit(32); + +SELECT v as value, hash_aclitem(v)::bit(32) as standard, + hash_aclitem_extended(v, 0)::bit(32) as extended0, + hash_aclitem_extended(v, 1)::bit(32) as extended1 +FROM (SELECT DISTINCT(relacl[1]) FROM pg_class LIMIT 10) x(v) +WHERE hash_aclitem(v)::bit(32) != hash_aclitem_extended(v, 0)::bit(32) + OR hash_aclitem(v)::bit(32) = hash_aclitem_extended(v, 1)::bit(32); + +SELECT v as value, hashmacaddr(v)::bit(32) as standard, + hashmacaddrextended(v, 0)::bit(32) as extended0, + hashmacaddrextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::macaddr), ('08:00:2b:01:02:04'), ('08:00:2b:01:02:04'), + ('e2:7f:51:3e:70:49'), ('d6:a9:4a:78:1c:d5'), + ('ea:29:b1:5e:1f:a5')) x(v) +WHERE hashmacaddr(v)::bit(32) != hashmacaddrextended(v, 0)::bit(32) + OR hashmacaddr(v)::bit(32) = hashmacaddrextended(v, 1)::bit(32); + +SELECT v as value, hashinet(v)::bit(32) as standard, + hashinetextended(v, 0)::bit(32) as extended0, + hashinetextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::inet), ('192.168.100.128/25'), ('192.168.100.0/8'), + ('172.168.10.126/16'), ('172.18.103.126/24'), ('192.188.13.16/32')) x(v) +WHERE hashinet(v)::bit(32) != hashinetextended(v, 0)::bit(32) + OR hashinet(v)::bit(32) = hashinetextended(v, 1)::bit(32); + +SELECT v as value, hash_numeric(v)::bit(32) as standard, + hash_numeric_extended(v, 0)::bit(32) as extended0, + hash_numeric_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1.149484958), (17.149484958), (42.149484958), + (149484958.550273), (2071124898672)) x(v) +WHERE hash_numeric(v)::bit(32) != hash_numeric_extended(v, 0)::bit(32) + OR hash_numeric(v)::bit(32) = hash_numeric_extended(v, 1)::bit(32); + +SELECT v as value, hashmacaddr8(v)::bit(32) as standard, + hashmacaddr8extended(v, 0)::bit(32) as extended0, + hashmacaddr8extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::macaddr8), ('08:00:2b:01:02:04:36:49'), + ('08:00:2b:01:02:04:f0:e8'), ('e2:7f:51:3e:70:49:16:29'), + ('d6:a9:4a:78:1c:d5:47:32'), ('ea:29:b1:5e:1f:a5')) x(v) +WHERE hashmacaddr8(v)::bit(32) != hashmacaddr8extended(v, 0)::bit(32) + OR hashmacaddr8(v)::bit(32) = hashmacaddr8extended(v, 1)::bit(32); + +SELECT v as value, hash_array(v)::bit(32) as standard, + hash_array_extended(v, 0)::bit(32) as extended0, + hash_array_extended(v, 1)::bit(32) as extended1 +FROM (VALUES ('{0}'::int4[]), ('{0,1,2,3,4}'), ('{17,18,19,20}'), + ('{42,34,65,98}'), ('{550273,590027, 870273}'), + ('{207112489, 807112489}')) x(v) +WHERE hash_array(v)::bit(32) != hash_array_extended(v, 0)::bit(32) + OR hash_array(v)::bit(32) = hash_array_extended(v, 1)::bit(32); + +SELECT v as value, hashbpchar(v)::bit(32) as standard, + hashbpcharextended(v, 0)::bit(32) as extended0, + hashbpcharextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), + ('muop28x03'), ('yi3nm0d73')) x(v) +WHERE hashbpchar(v)::bit(32) != hashbpcharextended(v, 0)::bit(32) + OR hashbpchar(v)::bit(32) = hashbpcharextended(v, 1)::bit(32); + +SELECT v as value, time_hash(v)::bit(32) as standard, + time_hash_extended(v, 0)::bit(32) as extended0, + time_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::time), ('11:09:59'), ('1:09:59'), ('11:59:59'), + ('7:9:59'), ('5:15:59')) x(v) +WHERE time_hash(v)::bit(32) != time_hash_extended(v, 0)::bit(32) + OR time_hash(v)::bit(32) = time_hash_extended(v, 1)::bit(32); + +SELECT v as value, timetz_hash(v)::bit(32) as standard, + timetz_hash_extended(v, 0)::bit(32) as extended0, + timetz_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::timetz), ('00:11:52.518762-07'), ('00:11:52.51762-08'), + ('00:11:52.62-01'), ('00:11:52.62+01'), ('11:59:59+04')) x(v) +WHERE timetz_hash(v)::bit(32) != timetz_hash_extended(v, 0)::bit(32) + OR timetz_hash(v)::bit(32) = timetz_hash_extended(v, 1)::bit(32); + +SELECT v as value, interval_hash(v)::bit(32) as standard, + interval_hash_extended(v, 0)::bit(32) as extended0, + interval_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::interval), + ('5 month 7 day 46 minutes'), ('1 year 7 day 46 minutes'), + ('1 year 7 month 20 day 46 minutes'), ('5 month'), + ('17 year 11 month 7 day 9 hours 46 minutes 5 seconds')) x(v) +WHERE interval_hash(v)::bit(32) != interval_hash_extended(v, 0)::bit(32) + OR interval_hash(v)::bit(32) = interval_hash_extended(v, 1)::bit(32); + +SELECT v as value, timestamp_hash(v)::bit(32) as standard, + timestamp_hash_extended(v, 0)::bit(32) as extended0, + timestamp_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::timestamp), ('2017-08-22 00:09:59.518762'), + ('2015-08-20 00:11:52.51762-08'), + ('2017-05-22 00:11:52.62-01'), + ('2013-08-22 00:11:52.62+01'), ('2013-08-22 11:59:59+04')) x(v) +WHERE timestamp_hash(v)::bit(32) != timestamp_hash_extended(v, 0)::bit(32) + OR timestamp_hash(v)::bit(32) = timestamp_hash_extended(v, 1)::bit(32); + +SELECT v as value, uuid_hash(v)::bit(32) as standard, + uuid_hash_extended(v, 0)::bit(32) as extended0, + uuid_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::uuid), ('a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'), + ('5a9ba4ac-8d6f-11e7-bb31-be2e44b06b34'), + ('99c6705c-d939-461c-a3c9-1690ad64ed7b'), + ('7deed3ca-8d6f-11e7-bb31-be2e44b06b34'), + ('9ad46d4f-6f2a-4edd-aadb-745993928e1e')) x(v) +WHERE uuid_hash(v)::bit(32) != uuid_hash_extended(v, 0)::bit(32) + OR uuid_hash(v)::bit(32) = uuid_hash_extended(v, 1)::bit(32); + +SELECT v as value, pg_lsn_hash(v)::bit(32) as standard, + pg_lsn_hash_extended(v, 0)::bit(32) as extended0, + pg_lsn_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::pg_lsn), ('16/B374D84'), ('30/B374D84'), + ('255/B374D84'), ('25/B379D90'), ('900/F37FD90')) x(v) +WHERE pg_lsn_hash(v)::bit(32) != pg_lsn_hash_extended(v, 0)::bit(32) + OR pg_lsn_hash(v)::bit(32) = pg_lsn_hash_extended(v, 1)::bit(32); + +CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy'); +SELECT v as value, hashenum(v)::bit(32) as standard, + hashenumextended(v, 0)::bit(32) as extended0, + hashenumextended(v, 1)::bit(32) as extended1 +FROM (VALUES ('sad'::mood), ('ok'), ('happy')) x(v) +WHERE hashenum(v)::bit(32) != hashenumextended(v, 0)::bit(32) + OR hashenum(v)::bit(32) = hashenumextended(v, 1)::bit(32); +DROP TYPE mood; + +SELECT v as value, jsonb_hash(v)::bit(32) as standard, + jsonb_hash_extended(v, 0)::bit(32) as extended0, + jsonb_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::jsonb), + ('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'), + ('{"foo": [true, "bar"], "tags": {"e": 1, "f": null}}'), + ('{"g": {"h": "value"}}')) x(v) +WHERE jsonb_hash(v)::bit(32) != jsonb_hash_extended(v, 0)::bit(32) + OR jsonb_hash(v)::bit(32) = jsonb_hash_extended(v, 1)::bit(32); + +SELECT v as value, hash_range(v)::bit(32) as standard, + hash_range_extended(v, 0)::bit(32) as extended0, + hash_range_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (int4range(10, 20)), (int4range(23, 43)), + (int4range(5675, 550273)), + (int4range(550274, 1550274)), (int4range(1550275, 208112489))) x(v) +WHERE hash_range(v)::bit(32) != hash_range_extended(v, 0)::bit(32) + OR hash_range(v)::bit(32) = hash_range_extended(v, 1)::bit(32); diff --git a/src/test/regress/sql/hash_index.sql b/src/test/regress/sql/hash_index.sql index 9af03d2bc1..4d1aa020a9 100644 --- a/src/test/regress/sql/hash_index.sql +++ b/src/test/regress/sql/hash_index.sql @@ -178,6 +178,10 @@ INSERT INTO hash_split_heap SELECT a/2 FROM generate_series(1, 25000) a; VACUUM hash_split_heap; +-- Rebuild the index using a different fillfactor +ALTER INDEX hash_split_index SET (fillfactor = 10); +REINDEX INDEX hash_split_index; + -- Clean up. DROP TABLE hash_split_heap; @@ -192,3 +196,9 @@ CREATE TABLE hash_heap_float4 (x float4, y int); INSERT INTO hash_heap_float4 VALUES (1.1,1); CREATE INDEX hash_idx ON hash_heap_float4 USING hash (x); DROP TABLE hash_heap_float4 CASCADE; + +-- Test out-of-range fillfactor values +CREATE INDEX hash_f8_index2 ON hash_f8_heap USING hash (random float8_ops) + WITH (fillfactor=9); +CREATE INDEX hash_f8_index2 ON hash_f8_heap USING hash (random float8_ops) + WITH (fillfactor=101); diff --git a/src/test/regress/sql/hash_part.sql b/src/test/regress/sql/hash_part.sql new file mode 100644 index 0000000000..f457ac344c --- /dev/null +++ b/src/test/regress/sql/hash_part.sql @@ -0,0 +1,80 @@ +-- +-- Hash partitioning. +-- + +-- Use hand-rolled hash functions and operator classes to get predictable +-- result on different matchines. See the definitions of +-- part_part_test_int4_ops and part_test_text_ops in insert.sql. + +CREATE TABLE mchash (a int, b text, c jsonb) + PARTITION BY HASH (a part_test_int4_ops, b part_test_text_ops); +CREATE TABLE mchash1 + PARTITION OF mchash FOR VALUES WITH (MODULUS 4, REMAINDER 0); + +-- invalid OID, no such table +SELECT satisfies_hash_partition(0, 4, 0, NULL); + +-- not partitioned +SELECT satisfies_hash_partition('tenk1'::regclass, 4, 0, NULL); + +-- partition rather than the parent +SELECT satisfies_hash_partition('mchash1'::regclass, 4, 0, NULL); + +-- invalid modulus +SELECT satisfies_hash_partition('mchash'::regclass, 0, 0, NULL); + +-- remainder too small +SELECT satisfies_hash_partition('mchash'::regclass, 1, -1, NULL); + +-- remainder too large +SELECT satisfies_hash_partition('mchash'::regclass, 1, 1, NULL); + +-- modulus is null +SELECT satisfies_hash_partition('mchash'::regclass, NULL, 0, NULL); + +-- remainder is null +SELECT satisfies_hash_partition('mchash'::regclass, 4, NULL, NULL); + +-- too many arguments +SELECT satisfies_hash_partition('mchash'::regclass, 4, 0, NULL::int, NULL::text, NULL::json); + +-- too few arguments +SELECT satisfies_hash_partition('mchash'::regclass, 3, 1, NULL::int); + +-- wrong argument type +SELECT satisfies_hash_partition('mchash'::regclass, 2, 1, NULL::int, NULL::int); + +-- ok, should be false +SELECT satisfies_hash_partition('mchash'::regclass, 4, 0, 0, ''::text); + +-- ok, should be true +SELECT satisfies_hash_partition('mchash'::regclass, 4, 0, 2, ''::text); + +-- argument via variadic syntax, should fail because not all partitioning +-- columns are of the correct type +SELECT satisfies_hash_partition('mchash'::regclass, 2, 1, + variadic array[1,2]::int[]); + +-- multiple partitioning columns of the same type +CREATE TABLE mcinthash (a int, b int, c jsonb) + PARTITION BY HASH (a part_test_int4_ops, b part_test_int4_ops); + +-- now variadic should work, should be false +SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, + variadic array[0, 0]); + +-- should be true +SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, + variadic array[0, 1]); + +-- wrong length +SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, + variadic array[]::int[]); + +-- wrong type +SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, + variadic array[now(), now()]); + +-- cleanup +DROP TABLE mchash; +DROP TABLE mcinthash; diff --git a/src/test/regress/sql/horology.sql b/src/test/regress/sql/horology.sql index a7bc9dcfc4..e356dd563e 100644 --- a/src/test/regress/sql/horology.sql +++ b/src/test/regress/sql/horology.sql @@ -267,35 +267,6 @@ SELECT '' AS "226", d1.f1 AS timestamp1, d2.f1 AS timestamp2, d1.f1 - d2.f1 AS d FROM TEMP_TIMESTAMP d1, TEMP_TIMESTAMP d2 ORDER BY timestamp1, timestamp2, difference; --- --- abstime, reltime arithmetic --- - -SELECT '' AS ten, ABSTIME_TBL.f1 AS abstime, RELTIME_TBL.f1 AS reltime - FROM ABSTIME_TBL, RELTIME_TBL - WHERE (ABSTIME_TBL.f1 + RELTIME_TBL.f1) < abstime 'Jan 14 14:00:00 1971' - ORDER BY abstime, reltime; - --- these four queries should return the same answer --- the "infinity" and "-infinity" tuples in ABSTIME_TBL cannot be added and --- therefore, should not show up in the results. - -SELECT '' AS three, * FROM ABSTIME_TBL - WHERE (ABSTIME_TBL.f1 + reltime '@ 3 year') -- +3 years - < abstime 'Jan 14 14:00:00 1977'; - -SELECT '' AS three, * FROM ABSTIME_TBL - WHERE (ABSTIME_TBL.f1 + reltime '@ 3 year ago') -- -3 years - < abstime 'Jan 14 14:00:00 1971'; - -SELECT '' AS three, * FROM ABSTIME_TBL - WHERE (ABSTIME_TBL.f1 - reltime '@ 3 year') -- -(+3) years - < abstime 'Jan 14 14:00:00 1971'; - -SELECT '' AS three, * FROM ABSTIME_TBL - WHERE (ABSTIME_TBL.f1 - reltime '@ 3 year ago') -- -(-3) years - < abstime 'Jan 14 14:00:00 1977'; - -- -- Conversions -- @@ -305,27 +276,6 @@ SELECT '' AS "16", f1 AS "timestamp", date(f1) AS date WHERE f1 <> timestamp 'now' ORDER BY date, "timestamp"; -SELECT '' AS "16", f1 AS "timestamp", abstime(f1) AS abstime - FROM TEMP_TIMESTAMP - ORDER BY abstime; - -SELECT '' AS four, f1 AS abstime, date(f1) AS date - FROM ABSTIME_TBL - WHERE isfinite(f1) AND f1 <> abstime 'now' - ORDER BY date, abstime; - -SELECT '' AS two, d1 AS "timestamp", abstime(d1) AS abstime - FROM TIMESTAMP_TBL WHERE NOT isfinite(d1); - -SELECT '' AS three, f1 as abstime, cast(f1 as timestamp) AS "timestamp" - FROM ABSTIME_TBL WHERE NOT isfinite(f1); - -SELECT '' AS ten, f1 AS interval, reltime(f1) AS reltime - FROM INTERVAL_TBL; - -SELECT '' AS six, f1 as reltime, CAST(f1 AS interval) AS interval - FROM RELTIME_TBL; - DROP TABLE TEMP_TIMESTAMP; -- @@ -338,22 +288,16 @@ SHOW DateStyle; SELECT '' AS "64", d1 AS us_postgres FROM TIMESTAMP_TBL; -SELECT '' AS seven, f1 AS us_postgres FROM ABSTIME_TBL; - SET DateStyle TO 'US,ISO'; SELECT '' AS "64", d1 AS us_iso FROM TIMESTAMP_TBL; -SELECT '' AS seven, f1 AS us_iso FROM ABSTIME_TBL; - SET DateStyle TO 'US,SQL'; SHOW DateStyle; SELECT '' AS "64", d1 AS us_sql FROM TIMESTAMP_TBL; -SELECT '' AS seven, f1 AS us_sql FROM ABSTIME_TBL; - SET DateStyle TO 'European,Postgres'; SHOW DateStyle; @@ -364,24 +308,18 @@ SELECT count(*) as one FROM TIMESTAMP_TBL WHERE d1 = 'Jun 13 1957'; SELECT '' AS "65", d1 AS european_postgres FROM TIMESTAMP_TBL; -SELECT '' AS seven, f1 AS european_postgres FROM ABSTIME_TBL; - SET DateStyle TO 'European,ISO'; SHOW DateStyle; SELECT '' AS "65", d1 AS european_iso FROM TIMESTAMP_TBL; -SELECT '' AS seven, f1 AS european_iso FROM ABSTIME_TBL; - SET DateStyle TO 'European,SQL'; SHOW DateStyle; SELECT '' AS "65", d1 AS european_sql FROM TIMESTAMP_TBL; -SELECT '' AS seven, f1 AS european_sql FROM ABSTIME_TBL; - RESET DateStyle; -- @@ -392,15 +330,21 @@ SELECT to_timestamp('0097/Feb/16 --> 08:14:30', 'YYYY/Mon/DD --> HH:MI:SS'); SELECT to_timestamp('97/2/16 8:14:30', 'FMYYYY/FMMM/FMDD FMHH:FMMI:FMSS'); +SELECT to_timestamp('2011$03!18 23_38_15', 'YYYY-MM-DD HH24:MI:SS'); + SELECT to_timestamp('1985 January 12', 'YYYY FMMonth DD'); +SELECT to_timestamp('1985 FMMonth 12', 'YYYY "FMMonth" DD'); + +SELECT to_timestamp('1985 \ 12', 'YYYY \\ DD'); + SELECT to_timestamp('My birthday-> Year: 1976, Month: May, Day: 16', - '"My birthday-> Year" YYYY, "Month:" FMMonth, "Day:" DD'); + '"My birthday-> Year:" YYYY, "Month:" FMMonth, "Day:" DD'); SELECT to_timestamp('1,582nd VIII 21', 'Y,YYYth FMRM DD'); SELECT to_timestamp('15 "text between quote marks" 98 54 45', - E'HH24 "\\text between quote marks\\"" YY MI SS'); + E'HH24 "\\"text between quote marks\\"" YY MI SS'); SELECT to_timestamp('05121445482000', 'MMDDHH24MISSYYYY'); @@ -408,6 +352,12 @@ SELECT to_timestamp('2000January09Sunday', 'YYYYFMMonthDDFMDay'); SELECT to_timestamp('97/Feb/16', 'YYMonDD'); +SELECT to_timestamp('97/Feb/16', 'YY:Mon:DD'); + +SELECT to_timestamp('97/Feb/16', 'FXYY:Mon:DD'); + +SELECT to_timestamp('97/Feb/16', 'FXYY/Mon/DD'); + SELECT to_timestamp('19971116', 'YYYYMMDD'); SELECT to_timestamp('20000-1116', 'YYYY-MMDD'); @@ -446,6 +396,12 @@ SELECT to_timestamp(' 20050302', 'YYYYMMDD'); SELECT to_timestamp('2011-12-18 11:38 AM', 'YYYY-MM-DD HH12:MI PM'); SELECT to_timestamp('2011-12-18 11:38 PM', 'YYYY-MM-DD HH12:MI PM'); +SELECT to_timestamp('2011-12-18 11:38 +05', 'YYYY-MM-DD HH12:MI TZH'); +SELECT to_timestamp('2011-12-18 11:38 -05', 'YYYY-MM-DD HH12:MI TZH'); +SELECT to_timestamp('2011-12-18 11:38 +05:20', 'YYYY-MM-DD HH12:MI TZH:TZM'); +SELECT to_timestamp('2011-12-18 11:38 -05:20', 'YYYY-MM-DD HH12:MI TZH:TZM'); +SELECT to_timestamp('2011-12-18 11:38 20', 'YYYY-MM-DD HH12:MI TZM'); + -- -- Check handling of multiple spaces in format and/or input -- @@ -458,6 +414,17 @@ SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS'); SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS'); SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS'); +SELECT to_timestamp('2000+ JUN', 'YYYY/MON'); +SELECT to_timestamp(' 2000 +JUN', 'YYYY/MON'); +SELECT to_timestamp(' 2000 +JUN', 'YYYY//MON'); +SELECT to_timestamp('2000 +JUN', 'YYYY//MON'); +SELECT to_timestamp('2000 + JUN', 'YYYY MON'); +SELECT to_timestamp('2000 ++ JUN', 'YYYY MON'); +SELECT to_timestamp('2000 + + JUN', 'YYYY MON'); +SELECT to_timestamp('2000 + + JUN', 'YYYY MON'); +SELECT to_timestamp('2000 -10', 'YYYY TZH'); +SELECT to_timestamp('2000 -10', 'YYYY TZH'); + SELECT to_date('2011 12 18', 'YYYY MM DD'); SELECT to_date('2011 12 18', 'YYYY MM DD'); SELECT to_date('2011 12 18', 'YYYY MM DD'); @@ -466,6 +433,10 @@ SELECT to_date('2011 12 18', 'YYYY MM DD'); SELECT to_date('2011 12 18', 'YYYY MM DD'); SELECT to_date('2011 12 18', 'YYYY MM DD'); +SELECT to_date('2011 12 18', 'YYYYxMMxDD'); +SELECT to_date('2011x 12x 18', 'YYYYxMMxDD'); +SELECT to_date('2011 x12 x18', 'YYYYxMMxDD'); + -- -- Check errors for some incorrect usages of to_timestamp() and to_date() -- diff --git a/src/test/regress/sql/identity.sql b/src/test/regress/sql/identity.sql index a7e7b15737..a35f331f4e 100644 --- a/src/test/regress/sql/identity.sql +++ b/src/test/regress/sql/identity.sql @@ -12,6 +12,10 @@ SELECT table_name, column_name, column_default, is_nullable, is_identity, identi -- internal sequences should not be shown here SELECT sequence_name FROM information_schema.sequences WHERE sequence_name LIKE 'itest%'; +SELECT pg_get_serial_sequence('itest1', 'a'); + +\d itest1_a_seq + CREATE TABLE itest4 (a int, b text); ALTER TABLE itest4 ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; -- error, requires NOT NULL ALTER TABLE itest4 ALTER COLUMN a SET NOT NULL; @@ -50,6 +54,14 @@ SELECT * FROM itest3; SELECT * FROM itest4; +-- VALUES RTEs + +INSERT INTO itest3 VALUES (DEFAULT, 'a'); +INSERT INTO itest3 VALUES (DEFAULT, 'b'), (DEFAULT, 'c'); + +SELECT * FROM itest3; + + -- OVERRIDING tests INSERT INTO itest1 VALUES (10, 'xyz'); @@ -74,6 +86,23 @@ UPDATE itest2 SET a = DEFAULT WHERE a = 2; SELECT * FROM itest2; +-- COPY tests + +CREATE TABLE itest9 (a int GENERATED ALWAYS AS IDENTITY, b text, c bigint); + +COPY itest9 FROM stdin; +100 foo 200 +101 bar 201 +\. + +COPY itest9 (b, c) FROM stdin; +foo2 202 +bar2 203 +\. + +SELECT * FROM itest9 ORDER BY c; + + -- DROP IDENTITY tests ALTER TABLE itest4 ALTER COLUMN a DROP IDENTITY; @@ -117,6 +146,17 @@ INSERT INTO itestv11 OVERRIDING SYSTEM VALUE VALUES (11, 'xyz'); SELECT * FROM itestv11; +-- ADD COLUMN + +CREATE TABLE itest13 (a int); +-- add column to empty table +ALTER TABLE itest13 ADD COLUMN b int GENERATED BY DEFAULT AS IDENTITY; +INSERT INTO itest13 VALUES (1), (2), (3); +-- add column to populated table +ALTER TABLE itest13 ADD COLUMN c int GENERATED BY DEFAULT AS IDENTITY; +SELECT * FROM itest13; + + -- various ALTER COLUMN tests -- fail, not allowed for identity columns @@ -181,12 +221,28 @@ ALTER TABLE itest7 ALTER COLUMN a RESTART; ALTER TABLE itest7 ALTER COLUMN a DROP IDENTITY; -- privileges -CREATE USER regress_user1; +CREATE USER regress_identity_user1; CREATE TABLE itest8 (a int GENERATED ALWAYS AS IDENTITY, b text); -GRANT SELECT, INSERT ON itest8 TO regress_user1; -SET ROLE regress_user1; +GRANT SELECT, INSERT ON itest8 TO regress_identity_user1; +SET ROLE regress_identity_user1; INSERT INTO itest8 DEFAULT VALUES; SELECT * FROM itest8; RESET ROLE; DROP TABLE itest8; -DROP USER regress_user1; +DROP USER regress_identity_user1; + + +-- typed tables (currently not supported) + +CREATE TYPE itest_type AS (f1 integer, f2 text, f3 bigint); +CREATE TABLE itest12 OF itest_type (f1 WITH OPTIONS GENERATED ALWAYS AS IDENTITY); -- error +DROP TYPE itest_type CASCADE; + + +-- table partitions (currently not supported) + +CREATE TABLE itest_parent (f1 date NOT NULL, f2 text, f3 bigint) PARTITION BY RANGE (f1); +CREATE TABLE itest_child PARTITION OF itest_parent ( + f3 WITH OPTIONS GENERATED ALWAYS AS IDENTITY +) FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); -- error +DROP TABLE itest_parent; diff --git a/src/test/regress/sql/index_including.sql b/src/test/regress/sql/index_including.sql new file mode 100644 index 0000000000..ef5fd882f5 --- /dev/null +++ b/src/test/regress/sql/index_including.sql @@ -0,0 +1,210 @@ +/* + * 1.test CREATE INDEX + * + * Deliberately avoid dropping objects in this section, to get some pg_dump + * coverage. + */ + +-- Regular index with included columns +CREATE TABLE tbl_include_reg (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_reg SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE INDEX tbl_include_reg_idx ON tbl_include_reg (c1, c2) INCLUDE (c3, c4); +-- duplicate column is pretty pointless, but we allow it anyway +CREATE INDEX ON tbl_include_reg (c1, c2) INCLUDE (c1, c3); +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_reg'::regclass ORDER BY c.relname; +\d tbl_include_reg_idx + +-- Unique index and unique constraint +CREATE TABLE tbl_include_unique1 (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_unique1 SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_include_unique1_idx_unique ON tbl_include_unique1 using btree (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl_include_unique1 add UNIQUE USING INDEX tbl_include_unique1_idx_unique; +ALTER TABLE tbl_include_unique1 add UNIQUE (c1, c2) INCLUDE (c3, c4); +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_unique1'::regclass ORDER BY c.relname; + +-- Unique index and unique constraint. Both must fail. +CREATE TABLE tbl_include_unique2 (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_unique2 SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_include_unique2_idx_unique ON tbl_include_unique2 using btree (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl_include_unique2 add UNIQUE (c1, c2) INCLUDE (c3, c4); + +-- PK constraint +CREATE TABLE tbl_include_pk (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_pk SELECT 1, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl_include_pk add PRIMARY KEY (c1, c2) INCLUDE (c3, c4); +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_pk'::regclass ORDER BY c.relname; + +CREATE TABLE tbl_include_box (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_box SELECT 1, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_include_box_idx_unique ON tbl_include_box using btree (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl_include_box add PRIMARY KEY USING INDEX tbl_include_box_idx_unique; +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_box'::regclass ORDER BY c.relname; + +-- PK constraint. Must fail. +CREATE TABLE tbl_include_box_pk (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_box_pk SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl_include_box_pk add PRIMARY KEY (c1, c2) INCLUDE (c3, c4); + + +/* + * 2. Test CREATE TABLE with constraint + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + CONSTRAINT covering UNIQUE(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +DROP TABLE tbl; + +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + CONSTRAINT covering PRIMARY KEY(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +INSERT INTO tbl SELECT 1, NULL, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +INSERT INTO tbl SELECT x, 2*x, NULL, NULL FROM generate_series(1,10) AS x; +DROP TABLE tbl; + +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + UNIQUE(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +DROP TABLE tbl; + +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + PRIMARY KEY(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +INSERT INTO tbl SELECT 1, NULL, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +INSERT INTO tbl SELECT x, 2*x, NULL, NULL FROM generate_series(1,10) AS x; +DROP TABLE tbl; + +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + EXCLUDE USING btree (c1 WITH =) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +INSERT INTO tbl SELECT x, 2*x, NULL, NULL FROM generate_series(1,10) AS x; +DROP TABLE tbl; + +/* + * 3.0 Test ALTER TABLE DROP COLUMN. + * Any column deletion leads to index deletion. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 int); +CREATE UNIQUE INDEX tbl_idx ON tbl using btree(c1, c2, c3, c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +DROP TABLE tbl; + +/* + * 3.1 Test ALTER TABLE DROP COLUMN. + * Included column deletion leads to the index deletion, + * AS well AS key columns deletion. It's explained in documentation. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box); +CREATE UNIQUE INDEX tbl_idx ON tbl using btree(c1, c2) INCLUDE(c3,c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +DROP TABLE tbl; + +/* + * 3.2 Test ALTER TABLE DROP COLUMN. + * Included column deletion leads to the index deletion. + * AS well AS key columns deletion. It's explained in documentation. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +ALTER TABLE tbl DROP COLUMN c1; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +DROP TABLE tbl; + +/* + * 3.3 Test ALTER TABLE SET STATISTICS + */ +CREATE TABLE tbl (c1 int, c2 int); +CREATE INDEX tbl_idx ON tbl (c1, (c1+0)) INCLUDE (c2); +ALTER INDEX tbl_idx ALTER COLUMN 1 SET STATISTICS 1000; +ALTER INDEX tbl_idx ALTER COLUMN 2 SET STATISTICS 1000; +ALTER INDEX tbl_idx ALTER COLUMN 3 SET STATISTICS 1000; +ALTER INDEX tbl_idx ALTER COLUMN 4 SET STATISTICS 1000; +DROP TABLE tbl; + +/* + * 4. CREATE INDEX CONCURRENTLY + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,1000) AS x; +CREATE UNIQUE INDEX CONCURRENTLY on tbl (c1, c2) INCLUDE (c3, c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +DROP TABLE tbl; + + +/* + * 5. REINDEX + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +REINDEX INDEX tbl_c1_c2_c3_c4_key; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +ALTER TABLE tbl DROP COLUMN c1; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; +DROP TABLE tbl; + +/* + * 7. Check various AMs. All but btree must fail. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 box, c4 box); +CREATE INDEX on tbl USING brin(c1, c2) INCLUDE (c3, c4); +CREATE INDEX on tbl USING gist(c3) INCLUDE (c4); +CREATE INDEX on tbl USING spgist(c3) INCLUDE (c4); +CREATE INDEX on tbl USING gin(c1, c2) INCLUDE (c3, c4); +CREATE INDEX on tbl USING hash(c1, c2) INCLUDE (c3, c4); +CREATE INDEX on tbl USING rtree(c1, c2) INCLUDE (c3, c4); +CREATE INDEX on tbl USING btree(c1, c2) INCLUDE (c3, c4); +DROP TABLE tbl; + +/* + * 8. Update, delete values in indexed table. + */ +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_idx_unique ON tbl using btree(c1, c2) INCLUDE (c3,c4); +UPDATE tbl SET c1 = 100 WHERE c1 = 2; +UPDATE tbl SET c1 = 1 WHERE c1 = 3; +-- should fail +UPDATE tbl SET c2 = 2 WHERE c1 = 1; +UPDATE tbl SET c3 = 1; +DELETE FROM tbl WHERE c1 = 5 OR c3 = 12; +DROP TABLE tbl; + +/* + * 9. Alter column type. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl ALTER c1 TYPE bigint; +ALTER TABLE tbl ALTER c3 TYPE bigint; +\d tbl +DROP TABLE tbl; diff --git a/src/test/regress/sql/indexing.sql b/src/test/regress/sql/indexing.sql new file mode 100644 index 0000000000..400b7eb7ba --- /dev/null +++ b/src/test/regress/sql/indexing.sql @@ -0,0 +1,755 @@ +-- Creating an index on a partitioned table makes the partitions +-- automatically get the index +create table idxpart (a int, b int, c text) partition by range (a); + +-- relhassubclass of a partitioned index is false before creating any partition. +-- It will be set after the first partition is created. +create index idxpart_idx on idxpart (a); +select relhassubclass from pg_class where relname = 'idxpart_idx'; +drop index idxpart_idx; + +create table idxpart1 partition of idxpart for values from (0) to (10); +create table idxpart2 partition of idxpart for values from (10) to (100) + partition by range (b); +create table idxpart21 partition of idxpart2 for values from (0) to (100); + +-- Even with partitions, relhassubclass should not be set if a partitioned +-- index is created only on the parent. +create index idxpart_idx on only idxpart(a); +select relhassubclass from pg_class where relname = 'idxpart_idx'; +drop index idxpart_idx; + +create index on idxpart (a); +select relname, relkind, relhassubclass, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; +drop table idxpart; + +-- Some unsupported features +create table idxpart (a int, b int, c text) partition by range (a); +create table idxpart1 partition of idxpart for values from (0) to (10); +create index concurrently on idxpart (a); +drop table idxpart; + +-- Verify bugfix with query on indexed partitioned table with no partitions +-- https://postgr.es/m/20180124162006.pmapfiznhgngwtjf@alvherre.pgsql +CREATE TABLE idxpart (col1 INT) PARTITION BY RANGE (col1); +CREATE INDEX ON idxpart (col1); +CREATE TABLE idxpart_two (col2 INT); +SELECT col2 FROM idxpart_two fk LEFT OUTER JOIN idxpart pk ON (col1 = col2); +DROP table idxpart, idxpart_two; + +-- Verify bugfix with index rewrite on ALTER TABLE / SET DATA TYPE +-- https://postgr.es/m/CAKcux6mxNCGsgATwf5CGMF8g4WSupCXicCVMeKUTuWbyxHOMsQ@mail.gmail.com +CREATE TABLE idxpart (a INT, b TEXT, c INT) PARTITION BY RANGE(a); +CREATE TABLE idxpart1 PARTITION OF idxpart FOR VALUES FROM (MINVALUE) TO (MAXVALUE); +CREATE INDEX partidx_abc_idx ON idxpart (a, b, c); +INSERT INTO idxpart (a, b, c) SELECT i, i, i FROM generate_series(1, 50) i; +ALTER TABLE idxpart ALTER COLUMN c TYPE numeric; +DROP TABLE idxpart; + +-- If a table without index is attached as partition to a table with +-- an index, the index is automatically created +create table idxpart (a int, b int, c text) partition by range (a); +create index idxparti on idxpart (a); +create index idxparti2 on idxpart (b, c); +create table idxpart1 (like idxpart); +\d idxpart1 +alter table idxpart attach partition idxpart1 for values from (0) to (10); +\d idxpart1 +\d+ idxpart1_a_idx +\d+ idxpart1_b_c_idx +drop table idxpart; + +-- If a partition already has an index, don't create a duplicative one +create table idxpart (a int, b int) partition by range (a, b); +create table idxpart1 partition of idxpart for values from (0, 0) to (10, 10); +create index on idxpart1 (a, b); +create index on idxpart (a, b); +\d idxpart1 +select relname, relkind, relhassubclass, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; +drop table idxpart; + +-- DROP behavior for partitioned indexes +create table idxpart (a int) partition by range (a); +create index on idxpart (a); +create table idxpart1 partition of idxpart for values from (0) to (10); +drop index idxpart1_a_idx; -- no way +drop index idxpart_a_idx; -- both indexes go away +select relname, relkind from pg_class + where relname like 'idxpart%' order by relname; +create index on idxpart (a); +drop table idxpart1; -- the index on partition goes away too +select relname, relkind from pg_class + where relname like 'idxpart%' order by relname; +drop table idxpart; + +-- ALTER INDEX .. ATTACH, error cases +create table idxpart (a int, b int) partition by range (a, b); +create table idxpart1 partition of idxpart for values from (0, 0) to (10, 10); +create index idxpart_a_b_idx on only idxpart (a, b); +create index idxpart1_a_b_idx on idxpart1 (a, b); +create index idxpart1_tst1 on idxpart1 (b, a); +create index idxpart1_tst2 on idxpart1 using hash (a); +create index idxpart1_tst3 on idxpart1 (a, b) where a > 10; + +alter index idxpart attach partition idxpart1; +alter index idxpart_a_b_idx attach partition idxpart1; +alter index idxpart_a_b_idx attach partition idxpart_a_b_idx; +alter index idxpart_a_b_idx attach partition idxpart1_b_idx; +alter index idxpart_a_b_idx attach partition idxpart1_tst1; +alter index idxpart_a_b_idx attach partition idxpart1_tst2; +alter index idxpart_a_b_idx attach partition idxpart1_tst3; +-- OK +alter index idxpart_a_b_idx attach partition idxpart1_a_b_idx; +alter index idxpart_a_b_idx attach partition idxpart1_a_b_idx; -- quiet + +-- reject dupe +create index idxpart1_2_a_b on idxpart1 (a, b); +alter index idxpart_a_b_idx attach partition idxpart1_2_a_b; +drop table idxpart; +-- make sure everything's gone +select indexrelid::regclass, indrelid::regclass + from pg_index where indexrelid::regclass::text like 'idxpart%'; + +-- Don't auto-attach incompatible indexes +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 (a int, b int); +create index on idxpart1 using hash (a); +create index on idxpart1 (a) where b > 1; +create index on idxpart1 ((a + 0)); +create index on idxpart1 (a, a); +create index on idxpart (a); +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +\d idxpart1 +drop table idxpart; + +-- If CREATE INDEX ONLY, don't create indexes on partitions; and existing +-- indexes on partitions don't change parent. ALTER INDEX ATTACH can change +-- the parent after the fact. +create table idxpart (a int) partition by range (a); +create table idxpart1 partition of idxpart for values from (0) to (100); +create table idxpart2 partition of idxpart for values from (100) to (1000) + partition by range (a); +create table idxpart21 partition of idxpart2 for values from (100) to (200); +create table idxpart22 partition of idxpart2 for values from (200) to (300); +create index on idxpart22 (a); +create index on only idxpart2 (a); +create index on idxpart (a); +-- Here we expect that idxpart1 and idxpart2 have a new index, but idxpart21 +-- does not; also, idxpart22 is not attached. +\d idxpart1 +\d idxpart2 +\d idxpart21 +select indexrelid::regclass, indrelid::regclass, inhparent::regclass + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) +where indexrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; +alter index idxpart2_a_idx attach partition idxpart22_a_idx; +select indexrelid::regclass, indrelid::regclass, inhparent::regclass + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) +where indexrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; +-- attaching idxpart22 is not enough to set idxpart22_a_idx valid ... +alter index idxpart2_a_idx attach partition idxpart22_a_idx; +\d idxpart2 +-- ... but this one is. +create index on idxpart21 (a); +alter index idxpart2_a_idx attach partition idxpart21_a_idx; +\d idxpart2 +drop table idxpart; + +-- When a table is attached a partition and it already has an index, a +-- duplicate index should not get created, but rather the index becomes +-- attached to the parent's index. +create table idxpart (a int, b int, c text) partition by range (a); +create index idxparti on idxpart (a); +create index idxparti2 on idxpart (b, c); +create table idxpart1 (like idxpart including indexes); +\d idxpart1 +select relname, relkind, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; +alter table idxpart attach partition idxpart1 for values from (0) to (10); +\d idxpart1 +select relname, relkind, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; +drop table idxpart; + +-- Verify that attaching an invalid index does not mark the parent index valid. +-- On the other hand, attaching a valid index marks not only its direct +-- ancestor valid, but also any indirect ancestor that was only missing the one +-- that was just made valid +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 partition of idxpart for values from (1) to (1000) partition by range (a); +create table idxpart11 partition of idxpart1 for values from (1) to (100); +create index on only idxpart1 (a); +create index on only idxpart (a); +-- this results in two invalid indexes: +select relname, indisvalid from pg_class join pg_index on indexrelid = oid + where relname like 'idxpart%' order by relname; +-- idxpart1_a_idx is not valid, so idxpart_a_idx should not become valid: +alter index idxpart_a_idx attach partition idxpart1_a_idx; +select relname, indisvalid from pg_class join pg_index on indexrelid = oid + where relname like 'idxpart%' order by relname; +-- after creating and attaching this, both idxpart1_a_idx and idxpart_a_idx +-- should become valid +create index on idxpart11 (a); +alter index idxpart1_a_idx attach partition idxpart11_a_idx; +select relname, indisvalid from pg_class join pg_index on indexrelid = oid + where relname like 'idxpart%' order by relname; +drop table idxpart; + +-- verify dependency handling during ALTER TABLE DETACH PARTITION +create table idxpart (a int) partition by range (a); +create table idxpart1 (like idxpart); +create index on idxpart1 (a); +create index on idxpart (a); +create table idxpart2 (like idxpart); +alter table idxpart attach partition idxpart1 for values from (0000) to (1000); +alter table idxpart attach partition idxpart2 for values from (1000) to (2000); +create table idxpart3 partition of idxpart for values from (2000) to (3000); +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; +-- a) after detaching partitions, the indexes can be dropped independently +alter table idxpart detach partition idxpart1; +alter table idxpart detach partition idxpart2; +alter table idxpart detach partition idxpart3; +drop index idxpart1_a_idx; +drop index idxpart2_a_idx; +drop index idxpart3_a_idx; +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; +drop table idxpart, idxpart1, idxpart2, idxpart3; +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + +create table idxpart (a int) partition by range (a); +create table idxpart1 (like idxpart); +create index on idxpart1 (a); +create index on idxpart (a); +create table idxpart2 (like idxpart); +alter table idxpart attach partition idxpart1 for values from (0000) to (1000); +alter table idxpart attach partition idxpart2 for values from (1000) to (2000); +create table idxpart3 partition of idxpart for values from (2000) to (3000); +-- b) after detaching, dropping the index on parent does not remove the others +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; +alter table idxpart detach partition idxpart1; +alter table idxpart detach partition idxpart2; +alter table idxpart detach partition idxpart3; +drop index idxpart_a_idx; +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; +drop table idxpart, idxpart1, idxpart2, idxpart3; +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + +-- Verify that expression indexes inherit correctly +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 (like idxpart); +create index on idxpart1 ((a + b)); +create index on idxpart ((a + b)); +create table idxpart2 (like idxpart); +alter table idxpart attach partition idxpart1 for values from (0000) to (1000); +alter table idxpart attach partition idxpart2 for values from (1000) to (2000); +create table idxpart3 partition of idxpart for values from (2000) to (3000); +select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef + from pg_class join pg_inherits on inhrelid = oid, + lateral pg_get_indexdef(pg_class.oid) + where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; +drop table idxpart; + +-- Verify behavior for collation (mis)matches +create table idxpart (a text) partition by range (a); +create table idxpart1 (like idxpart); +create table idxpart2 (like idxpart); +create index on idxpart2 (a collate "POSIX"); +create index on idxpart2 (a); +create index on idxpart2 (a collate "C"); +alter table idxpart attach partition idxpart1 for values from ('aaa') to ('bbb'); +alter table idxpart attach partition idxpart2 for values from ('bbb') to ('ccc'); +create table idxpart3 partition of idxpart for values from ('ccc') to ('ddd'); +create index on idxpart (a collate "C"); +create table idxpart4 partition of idxpart for values from ('ddd') to ('eee'); +select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef + from pg_class left join pg_inherits on inhrelid = oid, + lateral pg_get_indexdef(pg_class.oid) + where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; +drop table idxpart; + +-- Verify behavior for opclass (mis)matches +create table idxpart (a text) partition by range (a); +create table idxpart1 (like idxpart); +create table idxpart2 (like idxpart); +create index on idxpart2 (a); +alter table idxpart attach partition idxpart1 for values from ('aaa') to ('bbb'); +alter table idxpart attach partition idxpart2 for values from ('bbb') to ('ccc'); +create table idxpart3 partition of idxpart for values from ('ccc') to ('ddd'); +create index on idxpart (a text_pattern_ops); +create table idxpart4 partition of idxpart for values from ('ddd') to ('eee'); +-- must *not* have attached the index we created on idxpart2 +select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef + from pg_class left join pg_inherits on inhrelid = oid, + lateral pg_get_indexdef(pg_class.oid) + where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; +drop index idxpart_a_idx; +create index on only idxpart (a text_pattern_ops); +-- must reject +alter index idxpart_a_idx attach partition idxpart2_a_idx; +drop table idxpart; + +-- Verify that attaching indexes maps attribute numbers correctly +create table idxpart (col1 int, a int, col2 int, b int) partition by range (a); +create table idxpart1 (b int, col1 int, col2 int, col3 int, a int); +alter table idxpart drop column col1, drop column col2; +alter table idxpart1 drop column col1, drop column col2, drop column col3; +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +create index idxpart_1_idx on only idxpart (b, a); +create index idxpart1_1_idx on idxpart1 (b, a); +create index idxpart1_1b_idx on idxpart1 (b); +-- test expressions and partial-index predicate, too +create index idxpart_2_idx on only idxpart ((b + a)) where a > 1; +create index idxpart1_2_idx on idxpart1 ((b + a)) where a > 1; +create index idxpart1_2b_idx on idxpart1 ((a + b)) where a > 1; +create index idxpart1_2c_idx on idxpart1 ((b + a)) where b > 1; +alter index idxpart_1_idx attach partition idxpart1_1b_idx; -- fail +alter index idxpart_1_idx attach partition idxpart1_1_idx; +alter index idxpart_2_idx attach partition idxpart1_2b_idx; -- fail +alter index idxpart_2_idx attach partition idxpart1_2c_idx; -- fail +alter index idxpart_2_idx attach partition idxpart1_2_idx; -- ok +select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef + from pg_class left join pg_inherits on inhrelid = oid, + lateral pg_get_indexdef(pg_class.oid) + where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; +drop table idxpart; + +-- Make sure the partition columns are mapped correctly +create table idxpart (a int, b int, c text) partition by range (a); +create index idxparti on idxpart (a); +create index idxparti2 on idxpart (c, b); +create table idxpart1 (c text, a int, b int); +alter table idxpart attach partition idxpart1 for values from (0) to (10); +create table idxpart2 (c text, a int, b int); +create index on idxpart2 (a); +create index on idxpart2 (c, b); +alter table idxpart attach partition idxpart2 for values from (10) to (20); +select c.relname, pg_get_indexdef(indexrelid) + from pg_class c join pg_index i on c.oid = i.indexrelid + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; +drop table idxpart; + +-- Verify that columns are mapped correctly in expression indexes +create table idxpart (col1 int, col2 int, a int, b int) partition by range (a); +create table idxpart1 (col2 int, b int, col1 int, a int); +create table idxpart2 (col1 int, col2 int, b int, a int); +alter table idxpart drop column col1, drop column col2; +alter table idxpart1 drop column col1, drop column col2; +alter table idxpart2 drop column col1, drop column col2; +create index on idxpart2 (abs(b)); +alter table idxpart attach partition idxpart2 for values from (0) to (1); +create index on idxpart (abs(b)); +create index on idxpart ((b + 1)); +alter table idxpart attach partition idxpart1 for values from (1) to (2); +select c.relname, pg_get_indexdef(indexrelid) + from pg_class c join pg_index i on c.oid = i.indexrelid + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; +drop table idxpart; + +-- Verify that columns are mapped correctly for WHERE in a partial index +create table idxpart (col1 int, a int, col3 int, b int) partition by range (a); +alter table idxpart drop column col1, drop column col3; +create table idxpart1 (col1 int, col2 int, col3 int, col4 int, b int, a int); +alter table idxpart1 drop column col1, drop column col2, drop column col3, drop column col4; +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +create table idxpart2 (col1 int, col2 int, b int, a int); +create index on idxpart2 (a) where b > 1000; +alter table idxpart2 drop column col1, drop column col2; +alter table idxpart attach partition idxpart2 for values from (1000) to (2000); +create index on idxpart (a) where b > 1000; +select c.relname, pg_get_indexdef(indexrelid) + from pg_class c join pg_index i on c.oid = i.indexrelid + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; +drop table idxpart; + +-- Column number mapping: dropped columns in the partition +create table idxpart1 (drop_1 int, drop_2 int, col_keep int, drop_3 int); +alter table idxpart1 drop column drop_1; +alter table idxpart1 drop column drop_2; +alter table idxpart1 drop column drop_3; +create index on idxpart1 (col_keep); +create table idxpart (col_keep int) partition by range (col_keep); +create index on idxpart (col_keep); +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +\d idxpart +\d idxpart1 +select attrelid::regclass, attname, attnum from pg_attribute + where attrelid::regclass::text like 'idxpart%' and attnum > 0 + order by attrelid::regclass, attnum; +drop table idxpart; + +-- Column number mapping: dropped columns in the parent table +create table idxpart(drop_1 int, drop_2 int, col_keep int, drop_3 int) partition by range (col_keep); +alter table idxpart drop column drop_1; +alter table idxpart drop column drop_2; +alter table idxpart drop column drop_3; +create table idxpart1 (col_keep int); +create index on idxpart1 (col_keep); +create index on idxpart (col_keep); +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +\d idxpart +\d idxpart1 +select attrelid::regclass, attname, attnum from pg_attribute + where attrelid::regclass::text like 'idxpart%' and attnum > 0 + order by attrelid::regclass, attnum; +drop table idxpart; + +-- +-- Constraint-related indexes +-- + +-- Verify that it works to add primary key / unique to partitioned tables +create table idxpart (a int primary key, b int) partition by range (a); +\d idxpart +-- multiple primary key on child should fail +create table failpart partition of idxpart (b primary key) for values from (0) to (100); +drop table idxpart; +-- primary key on child is okay if there's no PK in the parent, though +create table idxpart (a int) partition by range (a); +create table idxpart1pk partition of idxpart (a primary key) for values from (0) to (100); +\d idxpart1pk +drop table idxpart; + +-- Failing to use the full partition key is not allowed +create table idxpart (a int unique, b int) partition by range (a, b); +create table idxpart (a int, b int unique) partition by range (a, b); +create table idxpart (a int primary key, b int) partition by range (b, a); +create table idxpart (a int, b int primary key) partition by range (b, a); + +-- OK if you use them in some other order +create table idxpart (a int, b int, c text, primary key (a, b, c)) partition by range (b, c, a); +drop table idxpart; + +-- not other types of index-based constraints +create table idxpart (a int, exclude (a with = )) partition by range (a); + +-- no expressions in partition key for PK/UNIQUE +create table idxpart (a int primary key, b int) partition by range ((b + a)); +create table idxpart (a int unique, b int) partition by range ((b + a)); + +-- use ALTER TABLE to add a primary key +create table idxpart (a int, b int, c text) partition by range (a, b); +alter table idxpart add primary key (a); -- not an incomplete one though +alter table idxpart add primary key (a, b); -- this works +\d idxpart +create table idxpart1 partition of idxpart for values from (0, 0) to (1000, 1000); +\d idxpart1 +drop table idxpart; + +-- use ALTER TABLE to add a unique constraint +create table idxpart (a int, b int) partition by range (a, b); +alter table idxpart add unique (a); -- not an incomplete one though +alter table idxpart add unique (b, a); -- this works +\d idxpart +drop table idxpart; + +-- Exclusion constraints cannot be added +create table idxpart (a int, b int) partition by range (a); +alter table idxpart add exclude (a with =); +drop table idxpart; + +-- When (sub)partitions are created, they also contain the constraint +create table idxpart (a int, b int, primary key (a, b)) partition by range (a, b); +create table idxpart1 partition of idxpart for values from (1, 1) to (10, 10); +create table idxpart2 partition of idxpart for values from (10, 10) to (20, 20) + partition by range (b); +create table idxpart21 partition of idxpart2 for values from (10) to (15); +create table idxpart22 partition of idxpart2 for values from (15) to (20); +create table idxpart3 (b int not null, a int not null); +alter table idxpart attach partition idxpart3 for values from (20, 20) to (30, 30); +select conname, contype, conrelid::regclass, conindid::regclass, conkey + from pg_constraint where conrelid::regclass::text like 'idxpart%' + order by conname; +drop table idxpart; + +-- Verify that multi-layer partitioning honors the requirement that all +-- columns in the partition key must appear in primary/unique key +create table idxpart (a int, b int, primary key (a)) partition by range (a); +create table idxpart2 partition of idxpart +for values from (0) to (1000) partition by range (b); -- fail +drop table idxpart; + +-- Ditto for the ATTACH PARTITION case +create table idxpart (a int unique, b int) partition by range (a); +create table idxpart1 (a int not null, b int, unique (a, b)) + partition by range (a, b); +alter table idxpart attach partition idxpart1 for values from (1) to (1000); +DROP TABLE idxpart, idxpart1; + +-- Multi-layer partitioning works correctly in this case: +create table idxpart (a int, b int, primary key (a, b)) partition by range (a); +create table idxpart2 partition of idxpart for values from (0) to (1000) partition by range (b); +create table idxpart21 partition of idxpart2 for values from (0) to (1000); +select conname, contype, conrelid::regclass, conindid::regclass, conkey + from pg_constraint where conrelid::regclass::text like 'idxpart%' + order by conname; +drop table idxpart; + +-- If a partitioned table has a unique/PK constraint, then it's not possible +-- to drop the corresponding constraint in the children; nor it's possible +-- to drop the indexes individually. Dropping the constraint in the parent +-- gets rid of the lot. +create table idxpart (i int) partition by hash (i); +create table idxpart0 partition of idxpart (i) for values with (modulus 2, remainder 0); +create table idxpart1 partition of idxpart (i) for values with (modulus 2, remainder 1); +alter table idxpart0 add primary key(i); +alter table idxpart add primary key(i); +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; +drop index idxpart0_pkey; -- fail +drop index idxpart1_pkey; -- fail +alter table idxpart0 drop constraint idxpart0_pkey; -- fail +alter table idxpart1 drop constraint idxpart1_pkey; -- fail +alter table idxpart drop constraint idxpart_pkey; -- ok +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; +drop table idxpart; + +-- If the partition to be attached already has a primary key, fail if +-- it doesn't match the parent's PK. +CREATE TABLE idxpart (c1 INT PRIMARY KEY, c2 INT, c3 VARCHAR(10)) PARTITION BY RANGE(c1); +CREATE TABLE idxpart1 (LIKE idxpart); +ALTER TABLE idxpart1 ADD PRIMARY KEY (c1, c2); +ALTER TABLE idxpart ATTACH PARTITION idxpart1 FOR VALUES FROM (100) TO (200); +DROP TABLE idxpart, idxpart1; + +-- Ditto if there is some distance between the PKs (subpartitioning) +create table idxpart (a int, b int, primary key (a)) partition by range (a); +create table idxpart1 (a int not null, b int) partition by range (a); +create table idxpart11 (a int not null, b int primary key); +alter table idxpart1 attach partition idxpart11 for values from (0) to (1000); +alter table idxpart attach partition idxpart1 for values from (0) to (10000); +drop table idxpart, idxpart1, idxpart11; + +-- If a partitioned table has a constraint whose index is not valid, +-- attaching a missing partition makes it valid. +create table idxpart (a int) partition by range (a); +create table idxpart0 (like idxpart); +alter table idxpart0 add primary key (a); +alter table idxpart attach partition idxpart0 for values from (0) to (1000); +alter table only idxpart add primary key (a); +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; +alter index idxpart_pkey attach partition idxpart0_pkey; +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; +drop table idxpart; + +-- if a partition has a unique index without a constraint, does not attach +-- automatically; creates a new index instead. +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 (a int not null, b int); +create unique index on idxpart1 (a); +alter table idxpart add primary key (a); +alter table idxpart attach partition idxpart1 for values from (1) to (1000); +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; +drop table idxpart; + +-- Can't attach an index without a corresponding constraint +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 (a int not null, b int); +create unique index on idxpart1 (a); +alter table idxpart attach partition idxpart1 for values from (1) to (1000); +alter table only idxpart add primary key (a); +alter index idxpart_pkey attach partition idxpart1_a_idx; -- fail +drop table idxpart; + +-- Test that unique constraints are working +create table idxpart (a int, b text, primary key (a, b)) partition by range (a); +create table idxpart1 partition of idxpart for values from (0) to (100000); +create table idxpart2 (c int, like idxpart); +insert into idxpart2 (c, a, b) values (42, 572814, 'inserted first'); +alter table idxpart2 drop column c; +create unique index on idxpart (a); +alter table idxpart attach partition idxpart2 for values from (100000) to (1000000); +insert into idxpart values (0, 'zero'), (42, 'life'), (2^16, 'sixteen'); +insert into idxpart select 2^g, format('two to power of %s', g) from generate_series(15, 17) g; +insert into idxpart values (16, 'sixteen'); +insert into idxpart (b, a) values ('one', 142857), ('two', 285714); +insert into idxpart select a * 2, b || b from idxpart where a between 2^16 and 2^19; +insert into idxpart values (572814, 'five'); +insert into idxpart values (857142, 'six'); +select tableoid::regclass, * from idxpart order by a; +drop table idxpart; + +-- test fastpath mechanism for index insertion +create table fastpath (a int, b text, c numeric); +create unique index fpindex1 on fastpath(a); + +insert into fastpath values (1, 'b1', 100.00); +insert into fastpath values (1, 'b1', 100.00); -- unique key check + +truncate fastpath; +insert into fastpath select generate_series(1,10000), 'b', 100; + +-- vacuum the table so as to improve chances of index-only scans. we can't +-- guarantee if index-only scans will be picked up in all cases or not, but +-- that fuzziness actually helps the test. +vacuum fastpath; + +set enable_seqscan to false; +set enable_bitmapscan to false; + +select sum(a) from fastpath where a = 6456; +select sum(a) from fastpath where a >= 5000 and a < 5700; + +-- drop the only index on the table and compute hashes for +-- a few queries which orders the results in various different ways. +drop index fpindex1; +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + +-- now create a multi-column index with both column asc +create index fpindex2 on fastpath(a, b); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +-- again, vacuum here either forces index-only scans or creates fuzziness +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + +-- same queries with a different kind of index now. the final result must not +-- change irrespective of what kind of index we have. +drop index fpindex2; +create index fpindex3 on fastpath(a desc, b asc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + +-- repeat again +drop index fpindex3; +create index fpindex4 on fastpath(a asc, b desc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + +-- and again, this time indexing by (b, a). Note that column "b" has non-unique +-- values. +drop index fpindex4; +create index fpindex5 on fastpath(b asc, a desc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + +-- one last time +drop index fpindex5; +create index fpindex6 on fastpath(b desc, a desc); +truncate fastpath; +insert into fastpath select y.x, 'b' || (y.x/10)::text, 100 from (select generate_series(1,10000) as x) y; +vacuum fastpath; +select md5(string_agg(a::text, b order by a, b asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by a desc, b desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a desc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; +select md5(string_agg(a::text, b order by b, a asc)) from fastpath + where a >= 1000 and a < 2000 and b > 'b1' and b < 'b3'; + +drop table fastpath; + +-- intentionally leave some objects around +create table idxpart (a int) partition by range (a); +create table idxpart1 partition of idxpart for values from (0) to (100); +create table idxpart2 partition of idxpart for values from (100) to (1000) + partition by range (a); +create table idxpart21 partition of idxpart2 for values from (100) to (200); +create table idxpart22 partition of idxpart2 for values from (200) to (300); +create index on idxpart22 (a); +create index on only idxpart2 (a); +alter index idxpart2_a_idx attach partition idxpart22_a_idx; +create index on idxpart (a); +create table idxpart_another (a int, b int, primary key (a, b)) partition by range (a); +create table idxpart_another_1 partition of idxpart_another for values from (0) to (100); + +-- Test that covering partitioned indexes work in various cases +create table covidxpart (a int, b int) partition by list (a); +create unique index on covidxpart (a) include (b); +create table covidxpart1 partition of covidxpart for values in (1); +create table covidxpart2 partition of covidxpart for values in (2); +insert into covidxpart values (1, 1); +insert into covidxpart values (1, 1); +create table covidxpart3 (b int, c int, a int); +alter table covidxpart3 drop c; +alter table covidxpart attach partition covidxpart3 for values in (3); +insert into covidxpart values (3, 1); +insert into covidxpart values (3, 1); +create table covidxpart4 (b int, a int); +create unique index on covidxpart4 (a) include (b); +create unique index on covidxpart4 (a); +alter table covidxpart attach partition covidxpart4 for values in (4); +insert into covidxpart values (4, 1); +insert into covidxpart values (4, 1); diff --git a/src/test/regress/sql/indirect_toast.sql b/src/test/regress/sql/indirect_toast.sql index 18b6cc3a95..efb1eb4e2f 100644 --- a/src/test/regress/sql/indirect_toast.sql +++ b/src/test/regress/sql/indirect_toast.sql @@ -1,28 +1,28 @@ -CREATE TABLE toasttest(descr text, cnt int DEFAULT 0, f1 text, f2 text); +CREATE TABLE indtoasttest(descr text, cnt int DEFAULT 0, f1 text, f2 text); -INSERT INTO toasttest(descr, f1, f2) VALUES('two-compressed', repeat('1234567890',1000), repeat('1234567890',1000)); -INSERT INTO toasttest(descr, f1, f2) VALUES('two-toasted', repeat('1234567890',30000), repeat('1234567890',50000)); -INSERT INTO toasttest(descr, f1, f2) VALUES('one-compressed,one-null', NULL, repeat('1234567890',1000)); -INSERT INTO toasttest(descr, f1, f2) VALUES('one-toasted,one-null', NULL, repeat('1234567890',50000)); +INSERT INTO indtoasttest(descr, f1, f2) VALUES('two-compressed', repeat('1234567890',1000), repeat('1234567890',1000)); +INSERT INTO indtoasttest(descr, f1, f2) VALUES('two-toasted', repeat('1234567890',30000), repeat('1234567890',50000)); +INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-compressed,one-null', NULL, repeat('1234567890',1000)); +INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-toasted,one-null', NULL, repeat('1234567890',50000)); -- check whether indirect tuples works on the most basic level -SELECT descr, substring(make_tuple_indirect(toasttest)::text, 1, 200) FROM toasttest; +SELECT descr, substring(make_tuple_indirect(indtoasttest)::text, 1, 200) FROM indtoasttest; -- modification without changing varlenas -UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1 RETURNING substring(indtoasttest::text, 1, 200); -- modification without modifying assigned value -UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(indtoasttest::text, 1, 200); -- modification modifying, but effectively not changing -UPDATE toasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(indtoasttest::text, 1, 200); -UPDATE toasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(indtoasttest::text, 1, 200); -SELECT substring(toasttest::text, 1, 200) FROM toasttest; +SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; -- check we didn't screw with main/toast tuple visibility -VACUUM FREEZE toasttest; -SELECT substring(toasttest::text, 1, 200) FROM toasttest; +VACUUM FREEZE indtoasttest; +SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; -- now create a trigger that forces all Datums to be indirect ones CREATE FUNCTION update_using_indirect() @@ -33,29 +33,29 @@ BEGIN RETURN NEW; END$$; -CREATE TRIGGER toasttest_update_indirect +CREATE TRIGGER indtoasttest_update_indirect BEFORE INSERT OR UPDATE - ON toasttest + ON indtoasttest FOR EACH ROW EXECUTE PROCEDURE update_using_indirect(); -- modification without changing varlenas -UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1 RETURNING substring(indtoasttest::text, 1, 200); -- modification without modifying assigned value -UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(indtoasttest::text, 1, 200); -- modification modifying, but effectively not changing -UPDATE toasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(indtoasttest::text, 1, 200); -UPDATE toasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(toasttest::text, 1, 200); +UPDATE indtoasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(indtoasttest::text, 1, 200); -INSERT INTO toasttest(descr, f1, f2) VALUES('one-toasted,one-null, via indirect', repeat('1234567890',30000), NULL); +INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-toasted,one-null, via indirect', repeat('1234567890',30000), NULL); -SELECT substring(toasttest::text, 1, 200) FROM toasttest; +SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; -- check we didn't screw with main/toast tuple visibility -VACUUM FREEZE toasttest; -SELECT substring(toasttest::text, 1, 200) FROM toasttest; +VACUUM FREEZE indtoasttest; +SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; -DROP TABLE toasttest; +DROP TABLE indtoasttest; DROP FUNCTION update_using_indirect(); diff --git a/src/test/regress/sql/inherit.sql b/src/test/regress/sql/inherit.sql index c96580cd81..afc72f47bc 100644 --- a/src/test/regress/sql/inherit.sql +++ b/src/test/regress/sql/inherit.sql @@ -154,6 +154,23 @@ where parted_tab.a = ss.a; select tableoid::regclass::text as relname, parted_tab.* from parted_tab order by 1,2; drop table parted_tab; + +-- Check UPDATE with multi-level partitioned inherited target +create table mlparted_tab (a int, b char, c text) partition by list (a); +create table mlparted_tab_part1 partition of mlparted_tab for values in (1); +create table mlparted_tab_part2 partition of mlparted_tab for values in (2) partition by list (b); +create table mlparted_tab_part3 partition of mlparted_tab for values in (3); +create table mlparted_tab_part2a partition of mlparted_tab_part2 for values in ('a'); +create table mlparted_tab_part2b partition of mlparted_tab_part2 for values in ('b'); +insert into mlparted_tab values (1, 'a'), (2, 'a'), (2, 'b'), (3, 'a'); + +update mlparted_tab mlp set c = 'xxx' +from + (select a from some_tab union all select a+1 from some_tab) ss (a) +where (mlp.a = ss.a and mlp.b = 'b') or mlp.a = 3; +select tableoid::regclass::text as relname, mlparted_tab.* from mlparted_tab order by 1,2; + +drop table mlparted_tab; drop table some_tab cascade; /* Test multiple inheritance of column defaults */ @@ -220,9 +237,14 @@ drop table p1 cascade; -- tables. See the pgsql-hackers thread beginning Dec. 4/04 create table base (i integer); create table derived () inherits (base); +create table more_derived (like derived, b int) inherits (derived); insert into derived (i) values (0); select derived::base from derived; select NULL::derived::base; +-- remove redundant conversions. +explain (verbose on, costs off) select row(i, b)::more_derived::derived::base from more_derived; +explain (verbose on, costs off) select (1, 2)::more_derived::derived::base; +drop table more_derived; drop table derived; drop table base; @@ -240,40 +262,40 @@ drop table p1; CREATE TABLE ac (aa TEXT); alter table ac add constraint ac_check check (aa is not null); CREATE TABLE bc (bb TEXT) INHERITS (ac); -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; insert into ac (aa) values (NULL); insert into bc (aa) values (NULL); alter table bc drop constraint ac_check; -- fail, disallowed alter table ac drop constraint ac_check; -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; -- try the unnamed-constraint case alter table ac add check (aa is not null); -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; insert into ac (aa) values (NULL); insert into bc (aa) values (NULL); alter table bc drop constraint ac_aa_check; -- fail, disallowed alter table ac drop constraint ac_aa_check; -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; alter table ac add constraint ac_check check (aa is not null); alter table bc no inherit ac; -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; alter table bc drop constraint ac_check; -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; alter table ac drop constraint ac_check; -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; drop table bc; drop table ac; create table ac (a int constraint check_a check (a <> 0)); create table bc (a int constraint check_a check (a <> 0), b int constraint check_b check (b <> 0)) inherits (ac); -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; drop table bc; drop table ac; @@ -281,10 +303,10 @@ drop table ac; create table ac (a int constraint check_a check (a <> 0)); create table bc (b int constraint check_b check (b <> 0)); create table cc (c int constraint check_c check (c <> 0)) inherits (ac, bc); -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc', 'cc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc', 'cc') order by 1,2; alter table cc no inherit bc; -select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc', 'cc') order by 1,2; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc', 'cc') order by 1,2; drop table cc; drop table bc; @@ -392,6 +414,18 @@ DROP TABLE test_foreign_constraints_inh; DROP TABLE test_foreign_constraints; DROP TABLE test_primary_constraints; +-- Test foreign key behavior +create table inh_fk_1 (a int primary key); +insert into inh_fk_1 values (1), (2), (3); +create table inh_fk_2 (x int primary key, y int references inh_fk_1 on delete cascade); +insert into inh_fk_2 values (11, 1), (22, 2), (33, 3); +create table inh_fk_2_child () inherits (inh_fk_2); +insert into inh_fk_2_child values (111, 1), (222, 2); +delete from inh_fk_1 where a = 1; +select * from inh_fk_1 order by 1; +select * from inh_fk_2 order by 1, 2; +drop table inh_fk_1, inh_fk_2, inh_fk_2_child; + -- Test that parent and child CHECK constraints can be created in either order create table p1(f1 int); create table p1_c1() inherits(p1); @@ -491,11 +525,13 @@ select min(1-id) from matest0; reset enable_indexscan; set enable_seqscan = off; -- plan with fewest seqscans should be merge +set enable_parallel_append = off; -- Don't let parallel-append interfere explain (verbose, costs off) select * from matest0 order by 1-id; select * from matest0 order by 1-id; explain (verbose, costs off) select min(1-id) from matest0; select min(1-id) from matest0; reset enable_seqscan; +reset enable_parallel_append; drop table matest0 cascade; @@ -592,6 +628,35 @@ reset enable_seqscan; reset enable_indexscan; reset enable_bitmapscan; +-- +-- Check handling of a constant-null CHECK constraint +-- +create table cnullparent (f1 int); +create table cnullchild (check (f1 = 1 or f1 = null)) inherits(cnullparent); +insert into cnullchild values(1); +insert into cnullchild values(2); +insert into cnullchild values(null); +select * from cnullparent; +select * from cnullparent where f1 = 2; +drop table cnullparent cascade; + +-- +-- Check use of temporary tables with inheritance trees +-- +create table inh_perm_parent (a1 int); +create temp table inh_temp_parent (a1 int); +create temp table inh_temp_child () inherits (inh_perm_parent); -- ok +create table inh_perm_child () inherits (inh_temp_parent); -- error +create temp table inh_temp_child_2 () inherits (inh_temp_parent); -- ok +insert into inh_perm_parent values (1); +insert into inh_temp_parent values (2); +insert into inh_temp_child values (3); +insert into inh_temp_child_2 values (4); +select tableoid::regclass, a1 from inh_perm_parent; +select tableoid::regclass, a1 from inh_temp_parent; +drop table inh_perm_parent cascade; +drop table inh_temp_parent cascade; + -- -- Check that constraint exclusion works correctly with partitions using -- implicit constraints generated from the partition bound information. @@ -647,19 +712,20 @@ drop table range_list_parted; -- check that constraint exclusion is able to cope with the partition -- constraint emitted for multi-column range partitioned tables create table mcrparted (a int, b int, c int) partition by range (a, abs(b), c); -create table mcrparted0 partition of mcrparted for values from (minvalue, 0, 0) to (1, 1, 1); +create table mcrparted_def partition of mcrparted default; +create table mcrparted0 partition of mcrparted for values from (minvalue, minvalue, minvalue) to (1, 1, 1); create table mcrparted1 partition of mcrparted for values from (1, 1, 1) to (10, 5, 10); create table mcrparted2 partition of mcrparted for values from (10, 5, 10) to (10, 10, 10); create table mcrparted3 partition of mcrparted for values from (11, 1, 1) to (20, 10, 10); create table mcrparted4 partition of mcrparted for values from (20, 10, 10) to (20, 20, 20); -create table mcrparted5 partition of mcrparted for values from (20, 20, 20) to (maxvalue, 0, 0); -explain (costs off) select * from mcrparted where a = 0; -- scans mcrparted0 -explain (costs off) select * from mcrparted where a = 10 and abs(b) < 5; -- scans mcrparted1 -explain (costs off) select * from mcrparted where a = 10 and abs(b) = 5; -- scans mcrparted1, mcrparted2 +create table mcrparted5 partition of mcrparted for values from (20, 20, 20) to (maxvalue, maxvalue, maxvalue); +explain (costs off) select * from mcrparted where a = 0; -- scans mcrparted0, mcrparted_def +explain (costs off) select * from mcrparted where a = 10 and abs(b) < 5; -- scans mcrparted1, mcrparted_def +explain (costs off) select * from mcrparted where a = 10 and abs(b) = 5; -- scans mcrparted1, mcrparted2, mcrparted_def explain (costs off) select * from mcrparted where abs(b) = 5; -- scans all partitions explain (costs off) select * from mcrparted where a > -1; -- scans all partitions explain (costs off) select * from mcrparted where a = 20 and abs(b) = 10 and c > 10; -- scans mcrparted4 -explain (costs off) select * from mcrparted where a = 20 and c > 20; -- scans mcrparted3, mcrparte4, mcrparte5 +explain (costs off) select * from mcrparted where a = 20 and c > 20; -- scans mcrparted3, mcrparte4, mcrparte5, mcrparted_def drop table mcrparted; -- check that partitioned table Appends cope with being referenced in diff --git a/src/test/regress/sql/insert.sql b/src/test/regress/sql/insert.sql index 6f17872087..a7f659bc2b 100644 --- a/src/test/regress/sql/insert.sql +++ b/src/test/regress/sql/insert.sql @@ -90,6 +90,10 @@ create table range_parted ( a text, b int ) partition by range (a, (b+0)); + +-- no partitions, so fail +insert into range_parted values ('a', 11); + create table part1 partition of range_parted for values from ('a', 1) to ('a', 10); create table part2 partition of range_parted for values from ('a', 10) to ('a', 20); create table part3 partition of range_parted for values from ('b', 1) to ('b', 10); @@ -132,13 +136,39 @@ create table part_ee_ff partition of list_parted for values in ('ee', 'ff') part create table part_ee_ff1 partition of part_ee_ff for values from (1) to (10); create table part_ee_ff2 partition of part_ee_ff for values from (10) to (20); +-- test default partition +create table part_default partition of list_parted default; +-- Negative test: a row, which would fit in other partition, does not fit +-- default partition, even when inserted directly +insert into part_default values ('aa', 2); +insert into part_default values (null, 2); +-- ok +insert into part_default values ('Zz', 2); +-- test if default partition works as expected for multi-level partitioned +-- table as well as when default partition itself is further partitioned +drop table part_default; +create table part_xx_yy partition of list_parted for values in ('xx', 'yy') partition by list (a); +create table part_xx_yy_p1 partition of part_xx_yy for values in ('xx'); +create table part_xx_yy_defpart partition of part_xx_yy default; +create table part_default partition of list_parted default partition by range(b); +create table part_default_p1 partition of part_default for values from (20) to (30); +create table part_default_p2 partition of part_default for values from (30) to (40); + -- fail insert into part_ee_ff1 values ('EE', 11); +insert into part_default_p2 values ('gg', 43); -- fail (even the parent's, ie, part_ee_ff's partition constraint applies) insert into part_ee_ff1 values ('cc', 1); +insert into part_default values ('gg', 43); -- ok insert into part_ee_ff1 values ('ff', 1); insert into part_ee_ff2 values ('ff', 11); +insert into part_default_p1 values ('cd', 25); +insert into part_default_p2 values ('de', 35); +insert into list_parted values ('ab', 21); +insert into list_parted values ('xx', 1); +insert into list_parted values ('yy', 2); +select tableoid::regclass, * from list_parted; -- Check tuple routing for partitioned tables @@ -154,8 +184,19 @@ insert into range_parted values ('b', 1); insert into range_parted values ('b', 10); -- fail (partition key (b+0) is null) insert into range_parted values ('a'); -select tableoid::regclass, * from range_parted; +-- Check default partition +create table part_def partition of range_parted default; +-- fail +insert into part_def values ('b', 10); +-- ok +insert into part_def values ('c', 10); +insert into range_parted values (null, null); +insert into range_parted values ('a', null); +insert into range_parted values (null, 19); +insert into range_parted values ('b', 20); + +select tableoid::regclass, * from range_parted; -- ok insert into list_parted values (null, 1); insert into list_parted (a) values ('aA'); @@ -185,8 +226,75 @@ insert into list_parted select 'gg', s.a from generate_series(1, 9) s(a); insert into list_parted (b) values (1); select tableoid::regclass::text, a, min(b) as min_b, max(b) as max_b from list_parted group by 1, 2 order by 1; +-- direct partition inserts should check hash partition bound constraint + +-- Use hand-rolled hash functions and operator classes to get predictable +-- result on different matchines. The hash function for int4 simply returns +-- the sum of the values passed to it and the one for text returns the length +-- of the non-empty string value passed to it or 0. + +create or replace function part_hashint4_noop(value int4, seed int8) +returns int8 as $$ +select value + seed; +$$ language sql immutable; + +create operator class part_test_int4_ops +for type int4 +using hash as +operator 1 =, +function 2 part_hashint4_noop(int4, int8); + +create or replace function part_hashtext_length(value text, seed int8) +RETURNS int8 AS $$ +select length(coalesce(value, ''))::int8 +$$ language sql immutable; + +create operator class part_test_text_ops +for type text +using hash as +operator 1 =, +function 2 part_hashtext_length(text, int8); + +create table hash_parted ( + a int +) partition by hash (a part_test_int4_ops); +create table hpart0 partition of hash_parted for values with (modulus 4, remainder 0); +create table hpart1 partition of hash_parted for values with (modulus 4, remainder 1); +create table hpart2 partition of hash_parted for values with (modulus 4, remainder 2); +create table hpart3 partition of hash_parted for values with (modulus 4, remainder 3); + +insert into hash_parted values(generate_series(1,10)); + +-- direct insert of values divisible by 4 - ok; +insert into hpart0 values(12),(16); +-- fail; +insert into hpart0 values(11); +-- 11 % 4 -> 3 remainder i.e. valid data for hpart3 partition +insert into hpart3 values(11); + +-- view data +select tableoid::regclass as part, a, a%4 as "remainder = a % 4" +from hash_parted order by part; + +-- test \d+ output on a table which has both partitioned and unpartitioned +-- partitions +\d+ list_parted + -- cleanup drop table range_parted, list_parted; +drop table hash_parted; + +-- test that a default partition added as the first partition accepts any value +-- including null +create table list_parted (a int) partition by list (a); +create table part_default partition of list_parted default; +\d+ part_default +insert into part_default values (null); +insert into part_default values (1); +insert into part_default values (-1); +select tableoid::regclass, a from list_parted; +-- cleanup +drop table list_parted; -- more tests for certain multi-level partitioning scenarios create table mlparted (a int, b int) partition by range (a, b); @@ -274,43 +382,69 @@ create function mlparted5abrtrig_func() returns trigger as $$ begin new.c = 'b'; create trigger mlparted5abrtrig before insert on mlparted5a for each row execute procedure mlparted5abrtrig_func(); insert into mlparted5 (a, b, c) values (1, 40, 'a'); drop table mlparted5; +alter table mlparted drop constraint check_b; + +-- Check multi-level default partition +create table mlparted_def partition of mlparted default partition by range(a); +create table mlparted_def1 partition of mlparted_def for values from (40) to (50); +create table mlparted_def2 partition of mlparted_def for values from (50) to (60); +insert into mlparted values (40, 100); +insert into mlparted_def1 values (42, 100); +insert into mlparted_def2 values (54, 50); +-- fail +insert into mlparted values (70, 100); +insert into mlparted_def1 values (52, 50); +insert into mlparted_def2 values (34, 50); +-- ok +create table mlparted_defd partition of mlparted_def default; +insert into mlparted values (70, 100); + +select tableoid::regclass, * from mlparted_def; -- check that message shown after failure to find a partition shows the -- appropriate key description (or none) in various situations create table key_desc (a int, b int) partition by list ((a+0)); create table key_desc_1 partition of key_desc for values in (1) partition by range (b); -create user someone_else; -grant select (a) on key_desc_1 to someone_else; -grant insert on key_desc to someone_else; +create user regress_insert_other_user; +grant select (a) on key_desc_1 to regress_insert_other_user; +grant insert on key_desc to regress_insert_other_user; -set role someone_else; +set role regress_insert_other_user; -- no key description is shown insert into key_desc values (1, 1); reset role; -grant select (b) on key_desc_1 to someone_else; -set role someone_else; +grant select (b) on key_desc_1 to regress_insert_other_user; +set role regress_insert_other_user; -- key description (b)=(1) is now shown insert into key_desc values (1, 1); -- key description is not shown if key contains expression insert into key_desc values (2, 1); reset role; -revoke all on key_desc from someone_else; -revoke all on key_desc_1 from someone_else; -drop role someone_else; +revoke all on key_desc from regress_insert_other_user; +revoke all on key_desc_1 from regress_insert_other_user; +drop role regress_insert_other_user; drop table key_desc, key_desc_1; +-- test minvalue/maxvalue restrictions +create table mcrparted (a int, b int, c int) partition by range (a, abs(b), c); +create table mcrparted0 partition of mcrparted for values from (minvalue, 0, 0) to (1, maxvalue, maxvalue); +create table mcrparted2 partition of mcrparted for values from (10, 6, minvalue) to (10, maxvalue, minvalue); +create table mcrparted4 partition of mcrparted for values from (21, minvalue, 0) to (30, 20, minvalue); + -- check multi-column range partitioning expression enforces the same -- constraint as what tuple-routing would determine it to be -create table mcrparted (a int, b int, c int) partition by range (a, abs(b), c); -create table mcrparted0 partition of mcrparted for values from (minvalue, 0, 0) to (1, maxvalue, 0); +create table mcrparted0 partition of mcrparted for values from (minvalue, minvalue, minvalue) to (1, maxvalue, maxvalue); create table mcrparted1 partition of mcrparted for values from (2, 1, minvalue) to (10, 5, 10); -create table mcrparted2 partition of mcrparted for values from (10, 6, minvalue) to (10, maxvalue, 0); +create table mcrparted2 partition of mcrparted for values from (10, 6, minvalue) to (10, maxvalue, maxvalue); create table mcrparted3 partition of mcrparted for values from (11, 1, 1) to (20, 10, 10); -create table mcrparted4 partition of mcrparted for values from (21, minvalue, 0) to (30, 20, maxvalue); -create table mcrparted5 partition of mcrparted for values from (30, 21, 20) to (maxvalue, 0, 0); +create table mcrparted4 partition of mcrparted for values from (21, minvalue, minvalue) to (30, 20, maxvalue); +create table mcrparted5 partition of mcrparted for values from (30, 21, 20) to (maxvalue, maxvalue, maxvalue); + +-- null not allowed in range partition +insert into mcrparted values (null, null, null); -- routed to mcrparted0 insert into mcrparted values (0, 1, 1); @@ -373,16 +507,39 @@ drop table inserttest3; drop table brtrigpartcon; drop function brtrigpartcon1trigf(); +-- check that "do nothing" BR triggers work with tuple-routing (this checks +-- that estate->es_result_relation_info is appropriately set/reset for each +-- routed tuple) +create table donothingbrtrig_test (a int, b text) partition by list (a); +create table donothingbrtrig_test1 (b text, a int); +create table donothingbrtrig_test2 (c text, b text, a int); +alter table donothingbrtrig_test2 drop column c; +create or replace function donothingbrtrig_func() returns trigger as $$begin raise notice 'b: %', new.b; return NULL; end$$ language plpgsql; +create trigger donothingbrtrig1 before insert on donothingbrtrig_test1 for each row execute procedure donothingbrtrig_func(); +create trigger donothingbrtrig2 before insert on donothingbrtrig_test2 for each row execute procedure donothingbrtrig_func(); +alter table donothingbrtrig_test attach partition donothingbrtrig_test1 for values in (1); +alter table donothingbrtrig_test attach partition donothingbrtrig_test2 for values in (2); +insert into donothingbrtrig_test values (1, 'foo'), (2, 'bar'); +copy donothingbrtrig_test from stdout; +1 baz +2 qux +\. +select tableoid::regclass, * from donothingbrtrig_test; + +-- cleanup +drop table donothingbrtrig_test; +drop function donothingbrtrig_func(); + -- check multi-column range partitioning with minvalue/maxvalue constraints create table mcrparted (a text, b int) partition by range(a, b); -create table mcrparted1_lt_b partition of mcrparted for values from (minvalue, 0) to ('b', minvalue); +create table mcrparted1_lt_b partition of mcrparted for values from (minvalue, minvalue) to ('b', minvalue); create table mcrparted2_b partition of mcrparted for values from ('b', minvalue) to ('c', minvalue); create table mcrparted3_c_to_common partition of mcrparted for values from ('c', minvalue) to ('common', minvalue); create table mcrparted4_common_lt_0 partition of mcrparted for values from ('common', minvalue) to ('common', 0); create table mcrparted5_common_0_to_10 partition of mcrparted for values from ('common', 0) to ('common', 10); create table mcrparted6_common_ge_10 partition of mcrparted for values from ('common', 10) to ('common', maxvalue); create table mcrparted7_gt_common_lt_d partition of mcrparted for values from ('common', maxvalue) to ('d', minvalue); -create table mcrparted8_ge_d partition of mcrparted for values from ('d', minvalue) to (maxvalue, 0); +create table mcrparted8_ge_d partition of mcrparted for values from ('d', minvalue) to (maxvalue, maxvalue); \d+ mcrparted \d+ mcrparted1_lt_b diff --git a/src/test/regress/sql/insert_conflict.sql b/src/test/regress/sql/insert_conflict.sql index df3a9b59b5..c677d70fb7 100644 --- a/src/test/regress/sql/insert_conflict.sql +++ b/src/test/regress/sql/insert_conflict.sql @@ -471,3 +471,109 @@ commit; select * from selfconflict; drop table selfconflict; + +-- check ON CONFLICT handling with partitioned tables +create table parted_conflict_test (a int unique, b char) partition by list (a); +create table parted_conflict_test_1 partition of parted_conflict_test (b unique) for values in (1, 2); + +-- no indexes required here +insert into parted_conflict_test values (1, 'a') on conflict do nothing; + +-- index on a required, which does exist in parent +insert into parted_conflict_test values (1, 'a') on conflict (a) do nothing; +insert into parted_conflict_test values (1, 'a') on conflict (a) do update set b = excluded.b; + +-- targeting partition directly will work +insert into parted_conflict_test_1 values (1, 'a') on conflict (a) do nothing; +insert into parted_conflict_test_1 values (1, 'b') on conflict (a) do update set b = excluded.b; + +-- index on b required, which doesn't exist in parent +insert into parted_conflict_test values (2, 'b') on conflict (b) do update set a = excluded.a; + +-- targeting partition directly will work +insert into parted_conflict_test_1 values (2, 'b') on conflict (b) do update set a = excluded.a; + +-- should see (2, 'b') +select * from parted_conflict_test order by a; + +-- now check that DO UPDATE works correctly for target partition with +-- different attribute numbers +create table parted_conflict_test_2 (b char, a int unique); +alter table parted_conflict_test attach partition parted_conflict_test_2 for values in (3); +truncate parted_conflict_test; +insert into parted_conflict_test values (3, 'a') on conflict (a) do update set b = excluded.b; +insert into parted_conflict_test values (3, 'b') on conflict (a) do update set b = excluded.b; + +-- should see (3, 'b') +select * from parted_conflict_test order by a; + +-- case where parent will have a dropped column, but the partition won't +alter table parted_conflict_test drop b, add b char; +create table parted_conflict_test_3 partition of parted_conflict_test for values in (4); +truncate parted_conflict_test; +insert into parted_conflict_test (a, b) values (4, 'a') on conflict (a) do update set b = excluded.b; +insert into parted_conflict_test (a, b) values (4, 'b') on conflict (a) do update set b = excluded.b where parted_conflict_test.b = 'a'; + +-- should see (4, 'b') +select * from parted_conflict_test order by a; + +-- case with multi-level partitioning +create table parted_conflict_test_4 partition of parted_conflict_test for values in (5) partition by list (a); +create table parted_conflict_test_4_1 partition of parted_conflict_test_4 for values in (5); +truncate parted_conflict_test; +insert into parted_conflict_test (a, b) values (5, 'a') on conflict (a) do update set b = excluded.b; +insert into parted_conflict_test (a, b) values (5, 'b') on conflict (a) do update set b = excluded.b where parted_conflict_test.b = 'a'; + +-- should see (5, 'b') +select * from parted_conflict_test order by a; + +-- test with multiple rows +truncate parted_conflict_test; +insert into parted_conflict_test (a, b) values (1, 'a'), (2, 'a'), (4, 'a') on conflict (a) do update set b = excluded.b where excluded.b = 'b'; +insert into parted_conflict_test (a, b) values (1, 'b'), (2, 'c'), (4, 'b') on conflict (a) do update set b = excluded.b where excluded.b = 'b'; + +-- should see (1, 'b'), (2, 'a'), (4, 'b') +select * from parted_conflict_test order by a; + +drop table parted_conflict_test; + +-- test behavior of inserting a conflicting tuple into an intermediate +-- partitioning level +create table parted_conflict (a int primary key, b text) partition by range (a); +create table parted_conflict_1 partition of parted_conflict for values from (0) to (1000) partition by range (a); +create table parted_conflict_1_1 partition of parted_conflict_1 for values from (0) to (500); +insert into parted_conflict values (40, 'forty'); +insert into parted_conflict_1 values (40, 'cuarenta') + on conflict (a) do update set b = excluded.b; +drop table parted_conflict; + +-- same thing, but this time try to use an index that's created not in the +-- partition +create table parted_conflict (a int, b text) partition by range (a); +create table parted_conflict_1 partition of parted_conflict for values from (0) to (1000) partition by range (a); +create unique index on only parted_conflict_1 (a); +create unique index on only parted_conflict (a); +alter index parted_conflict_a_idx attach partition parted_conflict_1_a_idx; +create table parted_conflict_1_1 partition of parted_conflict_1 for values from (0) to (500); +insert into parted_conflict values (40, 'forty'); +insert into parted_conflict_1 values (40, 'cuarenta') + on conflict (a) do update set b = excluded.b; +drop table parted_conflict; + +-- test whole-row Vars in ON CONFLICT expressions +create table parted_conflict (a int, b text, c int) partition by range (a); +create table parted_conflict_1 (drp text, c int, a int, b text); +alter table parted_conflict_1 drop column drp; +create unique index on parted_conflict (a, b); +alter table parted_conflict attach partition parted_conflict_1 for values from (0) to (1000); +truncate parted_conflict; +insert into parted_conflict values (50, 'cincuenta', 1); +insert into parted_conflict values (50, 'cincuenta', 2) + on conflict (a, b) do update set (a, b, c) = row(excluded.*) + where parted_conflict = (50, text 'cincuenta', 1) and + excluded = (50, text 'cincuenta', 2); + +-- should see (50, 'cincuenta', 2) +select * from parted_conflict order by a; + +drop table parted_conflict; diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index 835d67551c..334a4dce2d 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -193,6 +193,15 @@ SELECT '' AS "xxx", * SELECT '' AS "xxx", * FROM J1_TBL LEFT JOIN J2_TBL USING (i) WHERE (i = 1); +-- +-- semijoin selectivity for <> +-- +explain (costs off) +select * from int4_tbl i4, tenk1 a +where exists(select * from tenk1 b + where a.twothousand = b.twothousand and a.fivethous <> b.fivethous) + and i4.f1 = a.tenthous; + -- -- More complicated constructs @@ -288,6 +297,13 @@ NATURAL FULL JOIN (SELECT name, n as s3_n FROM t3) as s3 ) ss2; +-- Constants as join keys can also be problematic +SELECT * FROM + (SELECT name, n as s1_n FROM t1) as s1 +FULL JOIN + (SELECT name, 2 as s2_n FROM t2) as s2 +ON (s1_n = s2_n); + -- Test for propagation of nullability constraints into sub-joins @@ -408,6 +424,37 @@ select a.f1, b.f1, t.thousand, t.tenthous from (select sum(f1) as f1 from int4_tbl i4b) b where b.f1 = t.thousand and a.f1 = b.f1 and (a.f1+b.f1+999) = t.tenthous; +-- +-- check a case where we formerly got confused by conflicting sort orders +-- in redundant merge join path keys +-- +explain (costs off) +select * from + j1_tbl full join + (select * from j2_tbl order by j2_tbl.i desc, j2_tbl.k asc) j2_tbl + on j1_tbl.i = j2_tbl.i and j1_tbl.i = j2_tbl.k; + +select * from + j1_tbl full join + (select * from j2_tbl order by j2_tbl.i desc, j2_tbl.k asc) j2_tbl + on j1_tbl.i = j2_tbl.i and j1_tbl.i = j2_tbl.k; + +-- +-- a different check for handling of redundant sort keys in merge joins +-- +explain (costs off) +select count(*) from + (select * from tenk1 x order by x.thousand, x.twothousand, x.fivethous) x + left join + (select * from tenk1 y order by y.unique2) y + on x.thousand = y.unique2 and x.twothousand = y.hundred and x.fivethous = y.unique2; + +select count(*) from + (select * from tenk1 x order by x.thousand, x.twothousand, x.fivethous) x + left join + (select * from tenk1 y order by y.unique2) y + on x.thousand = y.unique2 and x.twothousand = y.hundred and x.fivethous = y.unique2; + -- -- Clean up @@ -633,6 +680,26 @@ select * from a left join b on i = x and i = y and x = i; rollback; +-- +-- test handling of merge clauses using record_ops +-- +begin; + +create type mycomptype as (id int, v bigint); + +create temp table tidv (idv mycomptype); +create index on tidv (idv); + +explain (costs off) +select a.idv, b.idv from tidv a, tidv b where a.idv = b.idv; + +set enable_mergejoin = 0; + +explain (costs off) +select a.idv, b.idv from tidv a, tidv b where a.idv = b.idv; + +rollback; + -- -- test NULL behavior of whole-row Vars, per bug #5025 -- @@ -988,6 +1055,17 @@ select * from where fault = 122 order by fault; +explain (costs off) +select * from +(values (1, array[10,20]), (2, array[20,30])) as v1(v1x,v1ys) +left join (values (1, 10), (2, 20)) as v2(v2x,v2y) on v2x = v1x +left join unnest(v1ys) as u1(u1y) on u1y = v2y; + +select * from +(values (1, array[10,20]), (2, array[20,30])) as v1(v1x,v1ys) +left join (values (1, 10), (2, 20)) as v2(v2x,v2y) on v2x = v1x +left join unnest(v1ys) as u1(u1y) on u1y = v2y; + -- -- test handling of potential equivalence clauses above outer joins -- @@ -1336,6 +1414,11 @@ explain (costs off) select i8.* from int8_tbl i8 left join (select f1 from int4_tbl group by f1) i4 on i8.q1 = i4.f1; +-- check join removal with lateral references +explain (costs off) +select 1 from (select a.id FROM a left join b on a.b_id = b.id) q, + lateral generate_series(1, q.id) gs(i) where q.id = gs.i; + rollback; create temp table parent (k int primary key, pd int); @@ -1668,6 +1751,16 @@ select * from select * from (select 3 as z offset 0) z where z.z = x.x ) zz on zz.z = y.y; +-- check handling of nested appendrels inside LATERAL +select * from + ((select 2 as v) union all (select 3 as v)) as q1 + cross join lateral + ((select * from + ((select 4 as v) union all (select 5 as v)) as q3) + union all + (select q1.v) + ) as q2; + -- check we don't try to do a unique-ified semijoin with LATERAL explain (verbose, costs off) select * from @@ -1733,6 +1826,29 @@ delete from xx1 using (select * from int4_tbl where f1 = x1) ss; delete from xx1 using (select * from int4_tbl where f1 = xx1.x1) ss; delete from xx1 using lateral (select * from int4_tbl where f1 = x1) ss; +-- +-- test LATERAL reference propagation down a multi-level inheritance hierarchy +-- produced for a multi-level partitioned table hierarchy. +-- +create table join_pt1 (a int, b int, c varchar) partition by range(a); +create table join_pt1p1 partition of join_pt1 for values from (0) to (100) partition by range(b); +create table join_pt1p2 partition of join_pt1 for values from (100) to (200); +create table join_pt1p1p1 partition of join_pt1p1 for values from (0) to (100); +insert into join_pt1 values (1, 1, 'x'), (101, 101, 'y'); +create table join_ut1 (a int, b int, c varchar); +insert into join_ut1 values (101, 101, 'y'), (2, 2, 'z'); +explain (verbose, costs off) +select t1.b, ss.phv from join_ut1 t1 left join lateral + (select t2.a as t2a, t3.a t3a, least(t1.a, t2.a, t3.a) phv + from join_pt1 t2 join join_ut1 t3 on t2.a = t3.b) ss + on t1.a = ss.t2a order by t1.a; +select t1.b, ss.phv from join_ut1 t1 left join lateral + (select t2.a as t2a, t3.a t3a, least(t1.a, t2.a, t3.a) phv + from join_pt1 t2 join join_ut1 t3 on t2.a = t3.b) ss + on t1.a = ss.t2a order by t1.a; + +drop table join_pt1; +drop table join_ut1; -- -- test that foreign key join estimation performs sanely for outer joins -- @@ -1906,3 +2022,473 @@ where exists (select 1 from j3 and t1.unique1 < 1; drop table j3; + +-- +-- exercises for the hash join code +-- + +begin; + +set local min_parallel_table_scan_size = 0; +set local parallel_setup_cost = 0; + +-- Extract bucket and batch counts from an explain analyze plan. In +-- general we can't make assertions about how many batches (or +-- buckets) will be required because it can vary, but we can in some +-- special cases and we can check for growth. +create or replace function find_hash(node json) +returns json language plpgsql +as +$$ +declare + x json; + child json; +begin + if node->>'Node Type' = 'Hash' then + return node; + else + for child in select json_array_elements(node->'Plans') + loop + x := find_hash(child); + if x is not null then + return x; + end if; + end loop; + return null; + end if; +end; +$$; +create or replace function hash_join_batches(query text) +returns table (original int, final int) language plpgsql +as +$$ +declare + whole_plan json; + hash_node json; +begin + for whole_plan in + execute 'explain (analyze, format ''json'') ' || query + loop + hash_node := find_hash(json_extract_path(whole_plan, '0', 'Plan')); + original := hash_node->>'Original Hash Batches'; + final := hash_node->>'Hash Batches'; + return next; + end loop; +end; +$$; + +-- Make a simple relation with well distributed keys and correctly +-- estimated size. +create table simple as + select generate_series(1, 20000) AS id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'; +alter table simple set (parallel_workers = 2); +analyze simple; + +-- Make a relation whose size we will under-estimate. We want stats +-- to say 1000 rows, but actually there are 20,000 rows. +create table bigger_than_it_looks as + select generate_series(1, 20000) as id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'; +alter table bigger_than_it_looks set (autovacuum_enabled = 'false'); +alter table bigger_than_it_looks set (parallel_workers = 2); +analyze bigger_than_it_looks; +update pg_class set reltuples = 1000 where relname = 'bigger_than_it_looks'; + +-- Make a relation whose size we underestimate and that also has a +-- kind of skew that breaks our batching scheme. We want stats to say +-- 2 rows, but actually there are 20,000 rows with the same key. +create table extremely_skewed (id int, t text); +alter table extremely_skewed set (autovacuum_enabled = 'false'); +alter table extremely_skewed set (parallel_workers = 2); +analyze extremely_skewed; +insert into extremely_skewed + select 42 as id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' + from generate_series(1, 20000); +update pg_class + set reltuples = 2, relpages = pg_relation_size('extremely_skewed') / 8192 + where relname = 'extremely_skewed'; + +-- Make a relation with a couple of enormous tuples. +create table wide as select generate_series(1, 2) as id, rpad('', 320000, 'x') as t; +alter table wide set (parallel_workers = 2); + +-- The "optimal" case: the hash table fits in memory; we plan for 1 +-- batch, we stick to that number, and peak memory usage stays within +-- our work_mem budget + +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +set local work_mem = '4MB'; +explain (costs off) + select count(*) from simple r join simple s using (id); +select count(*) from simple r join simple s using (id); +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); +rollback to settings; + +-- parallel with parallel-oblivious hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '4MB'; +set local enable_parallel_hash = off; +explain (costs off) + select count(*) from simple r join simple s using (id); +select count(*) from simple r join simple s using (id); +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); +rollback to settings; + +-- parallel with parallel-aware hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '4MB'; +set local enable_parallel_hash = on; +explain (costs off) + select count(*) from simple r join simple s using (id); +select count(*) from simple r join simple s using (id); +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); +rollback to settings; + +-- The "good" case: batches required, but we plan the right number; we +-- plan for some number of batches, and we stick to that number, and +-- peak memory usage says within our work_mem budget + +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +set local work_mem = '128kB'; +explain (costs off) + select count(*) from simple r join simple s using (id); +select count(*) from simple r join simple s using (id); +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); +rollback to settings; + +-- parallel with parallel-oblivious hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '128kB'; +set local enable_parallel_hash = off; +explain (costs off) + select count(*) from simple r join simple s using (id); +select count(*) from simple r join simple s using (id); +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); +rollback to settings; + +-- parallel with parallel-aware hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '192kB'; +set local enable_parallel_hash = on; +explain (costs off) + select count(*) from simple r join simple s using (id); +select count(*) from simple r join simple s using (id); +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); +rollback to settings; + +-- The "bad" case: during execution we need to increase number of +-- batches; in this case we plan for 1 batch, and increase at least a +-- couple of times, and peak memory usage stays within our work_mem +-- budget + +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +set local work_mem = '128kB'; +explain (costs off) + select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id); +select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id); +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id); +$$); +rollback to settings; + +-- parallel with parallel-oblivious hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '128kB'; +set local enable_parallel_hash = off; +explain (costs off) + select count(*) from simple r join bigger_than_it_looks s using (id); +select count(*) from simple r join bigger_than_it_looks s using (id); +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join bigger_than_it_looks s using (id); +$$); +rollback to settings; + +-- parallel with parallel-aware hash join +savepoint settings; +set local max_parallel_workers_per_gather = 1; +set local work_mem = '192kB'; +set local enable_parallel_hash = on; +explain (costs off) + select count(*) from simple r join bigger_than_it_looks s using (id); +select count(*) from simple r join bigger_than_it_looks s using (id); +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join bigger_than_it_looks s using (id); +$$); +rollback to settings; + +-- The "ugly" case: increasing the number of batches during execution +-- doesn't help, so stop trying to fit in work_mem and hope for the +-- best; in this case we plan for 1 batch, increases just once and +-- then stop increasing because that didn't help at all, so we blow +-- right through the work_mem budget and hope for the best... + +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +set local work_mem = '128kB'; +explain (costs off) + select count(*) from simple r join extremely_skewed s using (id); +select count(*) from simple r join extremely_skewed s using (id); +select * from hash_join_batches( +$$ + select count(*) from simple r join extremely_skewed s using (id); +$$); +rollback to settings; + +-- parallel with parallel-oblivious hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '128kB'; +set local enable_parallel_hash = off; +explain (costs off) + select count(*) from simple r join extremely_skewed s using (id); +select count(*) from simple r join extremely_skewed s using (id); +select * from hash_join_batches( +$$ + select count(*) from simple r join extremely_skewed s using (id); +$$); +rollback to settings; + +-- parallel with parallel-aware hash join +savepoint settings; +set local max_parallel_workers_per_gather = 1; +set local work_mem = '128kB'; +set local enable_parallel_hash = on; +explain (costs off) + select count(*) from simple r join extremely_skewed s using (id); +select count(*) from simple r join extremely_skewed s using (id); +select * from hash_join_batches( +$$ + select count(*) from simple r join extremely_skewed s using (id); +$$); +rollback to settings; + +-- A couple of other hash join tests unrelated to work_mem management. + +-- Check that EXPLAIN ANALYZE has data even if the leader doesn't participate +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '4MB'; +set local parallel_leader_participation = off; +select * from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); +rollback to settings; + +-- Exercise rescans. We'll turn off parallel_leader_participation so +-- that we can check that instrumentation comes back correctly. + +create table join_foo as select generate_series(1, 3) as id, 'xxxxx'::text as t; +alter table join_foo set (parallel_workers = 0); +create table join_bar as select generate_series(1, 10000) as id, 'xxxxx'::text as t; +alter table join_bar set (parallel_workers = 2); + +-- multi-batch with rescan, parallel-oblivious +savepoint settings; +set enable_parallel_hash = off; +set parallel_leader_participation = off; +set min_parallel_table_scan_size = 0; +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set max_parallel_workers_per_gather = 2; +set enable_material = off; +set enable_mergejoin = off; +set work_mem = '64kB'; +explain (costs off) + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +select final > 1 as multibatch + from hash_join_batches( +$$ + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +$$); +rollback to settings; + +-- single-batch with rescan, parallel-oblivious +savepoint settings; +set enable_parallel_hash = off; +set parallel_leader_participation = off; +set min_parallel_table_scan_size = 0; +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set max_parallel_workers_per_gather = 2; +set enable_material = off; +set enable_mergejoin = off; +set work_mem = '4MB'; +explain (costs off) + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +select final > 1 as multibatch + from hash_join_batches( +$$ + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +$$); +rollback to settings; + +-- multi-batch with rescan, parallel-aware +savepoint settings; +set enable_parallel_hash = on; +set parallel_leader_participation = off; +set min_parallel_table_scan_size = 0; +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set max_parallel_workers_per_gather = 2; +set enable_material = off; +set enable_mergejoin = off; +set work_mem = '64kB'; +explain (costs off) + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +select final > 1 as multibatch + from hash_join_batches( +$$ + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +$$); +rollback to settings; + +-- single-batch with rescan, parallel-aware +savepoint settings; +set enable_parallel_hash = on; +set parallel_leader_participation = off; +set min_parallel_table_scan_size = 0; +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set max_parallel_workers_per_gather = 2; +set enable_material = off; +set enable_mergejoin = off; +set work_mem = '4MB'; +explain (costs off) + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +select final > 1 as multibatch + from hash_join_batches( +$$ + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +$$); +rollback to settings; + +-- A full outer join where every record is matched. + +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +explain (costs off) + select count(*) from simple r full outer join simple s using (id); +select count(*) from simple r full outer join simple s using (id); +rollback to settings; + +-- parallelism not possible with parallel-oblivious outer hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +explain (costs off) + select count(*) from simple r full outer join simple s using (id); +select count(*) from simple r full outer join simple s using (id); +rollback to settings; + +-- An full outer join where every record is not matched. + +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +explain (costs off) + select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); +select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); +rollback to settings; + +-- parallelism not possible with parallel-oblivious outer hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +explain (costs off) + select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); +select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); +rollback to settings; + +-- exercise special code paths for huge tuples (note use of non-strict +-- expression and left join required to get the detoasted tuple into +-- the hash table) + +-- parallel with parallel-aware hash join (hits ExecParallelHashLoadTuple and +-- sts_puttuple oversized tuple cases because it's multi-batch) +savepoint settings; +set max_parallel_workers_per_gather = 2; +set enable_parallel_hash = on; +set work_mem = '128kB'; +explain (costs off) + select length(max(s.t)) + from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); +select length(max(s.t)) +from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); +select final > 1 as multibatch + from hash_join_batches( +$$ + select length(max(s.t)) + from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); +$$); +rollback to settings; + +rollback; diff --git a/src/test/regress/sql/json.sql b/src/test/regress/sql/json.sql index 506e3a8fc5..97c75420e9 100644 --- a/src/test/regress/sql/json.sql +++ b/src/test/regress/sql/json.sql @@ -388,6 +388,9 @@ CREATE DOMAIN js_int_not_null AS int NOT NULL; CREATE DOMAIN js_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3); CREATE DOMAIN js_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3); +create type j_unordered_pair as (x int, y int); +create domain j_ordered_pair as j_unordered_pair check((value).x <= (value).y); + CREATE TYPE jsrec AS ( i int, ia _int4, @@ -516,6 +519,15 @@ SELECT rec FROM json_populate_record( '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}' ) q; +-- anonymous record type +SELECT json_populate_record(null::record, '{"x": 0, "y": 1}'); +SELECT json_populate_record(row(1,2), '{"f1": 0, "f2": 1}'); + +-- composite domain +SELECT json_populate_record(null::j_ordered_pair, '{"x": 0, "y": 1}'); +SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 0}'); +SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 1, "y": 0}'); + -- populate_recordset select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; @@ -532,6 +544,23 @@ select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b": select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; +-- anonymous record type +SELECT json_populate_recordset(null::record, '[{"x": 0, "y": 1}]'); +SELECT json_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]'); +SELECT i, json_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]') +FROM (VALUES (1),(2)) v(i); + +-- composite domain +SELECT json_populate_recordset(null::j_ordered_pair, '[{"x": 0, "y": 1}]'); +SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 0}, {"y": 3}]'); +SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 1, "y": 0}]'); + +-- negative cases where the wrong record type is supplied +select * from json_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +select * from json_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +select * from json_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +select * from json_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text); + -- test type info caching in json_populate_record() CREATE TEMP TABLE jspoptest (js json); @@ -550,6 +579,8 @@ DROP TYPE jsrec_i_not_null; DROP DOMAIN js_int_not_null; DROP DOMAIN js_int_array_1d; DROP DOMAIN js_int_array_2d; +DROP DOMAIN j_ordered_pair; +DROP TYPE j_unordered_pair; --json_typeof() function select value, json_typeof(value) @@ -569,6 +600,14 @@ select value, json_typeof(value) -- json_build_array, json_build_object, json_object_agg SELECT json_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); +SELECT json_build_array('a', NULL); -- ok +SELECT json_build_array(VARIADIC NULL::text[]); -- ok +SELECT json_build_array(VARIADIC '{}'::text[]); -- ok +SELECT json_build_array(VARIADIC '{a,b,c}'::text[]); -- ok +SELECT json_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok +SELECT json_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok +SELECT json_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok +SELECT json_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok SELECT json_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); @@ -576,6 +615,19 @@ SELECT json_build_object( 'a', json_build_object('b',false,'c',99), 'd', json_build_object('e',array[9,8,7]::int[], 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r))); +SELECT json_build_object('{a,b,c}'::text[]); -- error +SELECT json_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array +SELECT json_build_object('a', 'b', 'c'); -- error +SELECT json_build_object(NULL, 'a'); -- error, key cannot be NULL +SELECT json_build_object('a', NULL); -- ok +SELECT json_build_object(VARIADIC NULL::text[]); -- ok +SELECT json_build_object(VARIADIC '{}'::text[]); -- ok +SELECT json_build_object(VARIADIC '{a,b,c}'::text[]); -- error +SELECT json_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok +SELECT json_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL +SELECT json_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok +SELECT json_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok +SELECT json_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok -- empty objects/arrays SELECT json_build_array(); @@ -713,12 +765,42 @@ select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c" -- json to tsvector with stop words select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::json); +-- json to tsvector with numeric values +select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::json); + +-- json_to_tsvector +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]'); + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]'); + -- ts_vector corner cases select to_tsvector('""'::json); select to_tsvector('{}'::json); select to_tsvector('[]'::json); select to_tsvector('null'::json); +-- json_to_tsvector corner cases +select json_to_tsvector('""'::json, '"all"'); +select json_to_tsvector('{}'::json, '"all"'); +select json_to_tsvector('[]'::json, '"all"'); +select json_to_tsvector('null'::json, '"all"'); + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '""'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '{}'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '[]'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, 'null'); +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["all", null]'); + -- ts_headline for json select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh')); select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh')); diff --git a/src/test/regress/sql/jsonb.sql b/src/test/regress/sql/jsonb.sql index 57fff3bfb3..bd82fd13f7 100644 --- a/src/test/regress/sql/jsonb.sql +++ b/src/test/regress/sql/jsonb.sql @@ -313,6 +313,14 @@ SELECT jsonb_typeof('"1.0"') AS string; -- jsonb_build_array, jsonb_build_object, jsonb_object_agg SELECT jsonb_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); +SELECT jsonb_build_array('a', NULL); -- ok +SELECT jsonb_build_array(VARIADIC NULL::text[]); -- ok +SELECT jsonb_build_array(VARIADIC '{}'::text[]); -- ok +SELECT jsonb_build_array(VARIADIC '{a,b,c}'::text[]); -- ok +SELECT jsonb_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok +SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok +SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok +SELECT jsonb_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok SELECT jsonb_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); @@ -320,7 +328,19 @@ SELECT jsonb_build_object( 'a', jsonb_build_object('b',false,'c',99), 'd', jsonb_build_object('e',array[9,8,7]::int[], 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r))); - +SELECT jsonb_build_object('{a,b,c}'::text[]); -- error +SELECT jsonb_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array +SELECT jsonb_build_object('a', 'b', 'c'); -- error +SELECT jsonb_build_object(NULL, 'a'); -- error, key cannot be NULL +SELECT jsonb_build_object('a', NULL); -- ok +SELECT jsonb_build_object(VARIADIC NULL::text[]); -- ok +SELECT jsonb_build_object(VARIADIC '{}'::text[]); -- ok +SELECT jsonb_build_object(VARIADIC '{a,b,c}'::text[]); -- error +SELECT jsonb_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok +SELECT jsonb_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL +SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok +SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok +SELECT jsonb_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok -- empty objects/arrays SELECT jsonb_build_array(); @@ -488,6 +508,9 @@ CREATE DOMAIN jsb_int_not_null AS int NOT NULL; CREATE DOMAIN jsb_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3); CREATE DOMAIN jsb_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3); +create type jb_unordered_pair as (x int, y int); +create domain jb_ordered_pair as jb_unordered_pair check((value).x <= (value).y); + CREATE TYPE jsbrec AS ( i int, ia _int4, @@ -616,6 +639,15 @@ SELECT rec FROM jsonb_populate_record( '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}' ) q; +-- anonymous record type +SELECT jsonb_populate_record(null::record, '{"x": 0, "y": 1}'); +SELECT jsonb_populate_record(row(1,2), '{"f1": 0, "f2": 1}'); + +-- composite domain +SELECT jsonb_populate_record(null::jb_ordered_pair, '{"x": 0, "y": 1}'); +SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 0}'); +SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 1, "y": 0}'); + -- populate_recordset SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; @@ -628,6 +660,23 @@ SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; +-- anonymous record type +SELECT jsonb_populate_recordset(null::record, '[{"x": 0, "y": 1}]'); +SELECT jsonb_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]'); +SELECT i, jsonb_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]') +FROM (VALUES (1),(2)) v(i); + +-- composite domain +SELECT jsonb_populate_recordset(null::jb_ordered_pair, '[{"x": 0, "y": 1}]'); +SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 0}, {"y": 3}]'); +SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 1, "y": 0}]'); + +-- negative cases where the wrong record type is supplied +select * from jsonb_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +select * from jsonb_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +select * from jsonb_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +select * from jsonb_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text); + -- jsonb_to_record and jsonb_to_recordset select * from jsonb_to_record('{"a":1,"b":"foo","c":"bar"}') @@ -673,6 +722,8 @@ DROP TYPE jsbrec_i_not_null; DROP DOMAIN jsb_int_not_null; DROP DOMAIN jsb_int_array_1d; DROP DOMAIN jsb_int_array_2d; +DROP DOMAIN jb_ordered_pair; +DROP TYPE jb_unordered_pair; -- indexing SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}'; @@ -1040,12 +1091,42 @@ select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c" -- jsonb to tsvector with stop words select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::jsonb); +-- jsonb to tsvector with numeric values +select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::jsonb); + +-- jsonb_to_tsvector +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]'); + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]'); + -- ts_vector corner cases select to_tsvector('""'::jsonb); select to_tsvector('{}'::jsonb); select to_tsvector('[]'::jsonb); select to_tsvector('null'::jsonb); +-- jsonb_to_tsvector corner cases +select jsonb_to_tsvector('""'::jsonb, '"all"'); +select jsonb_to_tsvector('{}'::jsonb, '"all"'); +select jsonb_to_tsvector('[]'::jsonb, '"all"'); +select jsonb_to_tsvector('null'::jsonb, '"all"'); + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '""'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '{}'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '[]'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, 'null'); +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["all", null]'); + -- ts_headline for jsonb select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh')); select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh')); @@ -1056,3 +1137,25 @@ select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": select ts_headline('null'::jsonb, tsquery('aaa & bbb')); select ts_headline('{}'::jsonb, tsquery('aaa & bbb')); select ts_headline('[]'::jsonb, tsquery('aaa & bbb')); + +-- casts +select 'true'::jsonb::bool; +select '[]'::jsonb::bool; +select '1.0'::jsonb::float; +select '[1.0]'::jsonb::float; +select '12345'::jsonb::int4; +select '"hello"'::jsonb::int4; +select '12345'::jsonb::numeric; +select '{}'::jsonb::numeric; +select '12345.05'::jsonb::numeric; +select '12345.05'::jsonb::float4; +select '12345.05'::jsonb::float8; +select '12345.05'::jsonb::int2; +select '12345.05'::jsonb::int4; +select '12345.05'::jsonb::int8; +select '12345.0000000000000000000000000000000000000000000005'::jsonb::numeric; +select '12345.0000000000000000000000000000000000000000000005'::jsonb::float4; +select '12345.0000000000000000000000000000000000000000000005'::jsonb::float8; +select '12345.0000000000000000000000000000000000000000000005'::jsonb::int2; +select '12345.0000000000000000000000000000000000000000000005'::jsonb::int4; +select '12345.0000000000000000000000000000000000000000000005'::jsonb::int8; diff --git a/src/test/regress/sql/line.sql b/src/test/regress/sql/line.sql index 94067b0cee..f589ffecc8 100644 --- a/src/test/regress/sql/line.sql +++ b/src/test/regress/sql/line.sql @@ -6,82 +6,37 @@ --DROP TABLE LINE_TBL; CREATE TABLE LINE_TBL (s line); -INSERT INTO LINE_TBL VALUES ('{1,-1,1}'); -INSERT INTO LINE_TBL VALUES ('(0,0),(6,6)'); +INSERT INTO LINE_TBL VALUES ('{0,-1,5}'); -- A == 0 +INSERT INTO LINE_TBL VALUES ('{1,0,5}'); -- B == 0 +INSERT INTO LINE_TBL VALUES ('{0,3,0}'); -- A == C == 0 +INSERT INTO LINE_TBL VALUES (' (0,0), (6,6)'); INSERT INTO LINE_TBL VALUES ('10,-10 ,-5,-4'); INSERT INTO LINE_TBL VALUES ('[-1e6,2e2,3e5, -4e1]'); -INSERT INTO LINE_TBL VALUES ('(11,22,33,44)'); -INSERT INTO LINE_TBL VALUES ('[(1,0),(1,0)]'); +INSERT INTO LINE_TBL VALUES ('{3,NaN,5}'); +INSERT INTO LINE_TBL VALUES ('{NaN,NaN,NaN}'); -- horizontal INSERT INTO LINE_TBL VALUES ('[(1,3),(2,3)]'); -- vertical -INSERT INTO LINE_TBL VALUES ('[(3,1),(3,2)]'); +INSERT INTO LINE_TBL VALUES (line(point '(3,1)', point '(3,2)')); -- bad values for parser testing +INSERT INTO LINE_TBL VALUES ('{}'); +INSERT INTO LINE_TBL VALUES ('{0'); +INSERT INTO LINE_TBL VALUES ('{0,0}'); +INSERT INTO LINE_TBL VALUES ('{0,0,1'); INSERT INTO LINE_TBL VALUES ('{0,0,1}'); +INSERT INTO LINE_TBL VALUES ('{0,0,1} x'); INSERT INTO LINE_TBL VALUES ('(3asdf,2 ,3,4r2)'); INSERT INTO LINE_TBL VALUES ('[1,2,3, 4'); INSERT INTO LINE_TBL VALUES ('[(,2),(3,4)]'); INSERT INTO LINE_TBL VALUES ('[(1,2),(3,4)'); +INSERT INTO LINE_TBL VALUES ('[(1,2),(1,2)]'); -select * from LINE_TBL; - - --- functions and operators - -SELECT * FROM LINE_TBL WHERE (s <-> line '[(1,2),(3,4)]') < 10; - -SELECT * FROM LINE_TBL WHERE (point '(0.1,0.1)' <-> s) < 1; - -SELECT * FROM LINE_TBL WHERE (lseg '[(0.1,0.1),(0.2,0.2)]' <-> s) < 1; - -SELECT line '[(1,1),(2,1)]' <-> line '[(-1,-1),(-2,-1)]'; -SELECT lseg '[(1,1),(2,1)]' <-> line '[(-1,-1),(-2,-1)]'; -SELECT point '(-1,1)' <-> line '[(-3,0),(-4,0)]'; - -SELECT lseg '[(1,1),(5,5)]' ?# line '[(2,0),(0,2)]'; -- true -SELECT lseg '[(1,1),(5,5)]' ?# line '[(0,0),(1,0)]'; -- false - -SELECT line '[(0,0),(1,1)]' ?# box '(0,0,2,2)'; -- true -SELECT line '[(3,0),(4,1)]' ?# box '(0,0,2,2)'; -- false - -SELECT point '(1,1)' <@ line '[(0,0),(2,2)]'; -- true -SELECT point '(1,1)' <@ line '[(0,0),(1,0)]'; -- false - -SELECT point '(1,1)' @ line '[(0,0),(2,2)]'; -- true -SELECT point '(1,1)' @ line '[(0,0),(1,0)]'; -- false +INSERT INTO LINE_TBL VALUES (line(point '(1,0)', point '(1,0)')); -SELECT lseg '[(1,1),(2,2)]' <@ line '[(0,0),(2,2)]'; -- true -SELECT lseg '[(1,1),(2,1)]' <@ line '[(0,0),(1,0)]'; -- false - -SELECT lseg '[(1,1),(2,2)]' @ line '[(0,0),(2,2)]'; -- true -SELECT lseg '[(1,1),(2,1)]' @ line '[(0,0),(1,0)]'; -- false - -SELECT point '(0,1)' ## line '[(0,0),(1,1)]'; - -SELECT line '[(0,0),(1,1)]' ## lseg '[(1,0),(2,0)]'; - -SELECT line '[(0,0),(1,1)]' ?# line '[(1,0),(2,1)]'; -- false -SELECT line '[(0,0),(1,1)]' ?# line '[(1,0),(1,1)]'; -- true - -SELECT line '[(0,0),(1,1)]' # line '[(1,0),(2,1)]'; -SELECT line '[(0,0),(1,1)]' # line '[(1,0),(1,1)]'; - -SELECT line '[(0,0),(1,1)]' ?|| line '[(1,0),(2,1)]'; -- true -SELECT line '[(0,0),(1,1)]' ?|| line '[(1,0),(1,1)]'; -- false - -SELECT line '[(0,0),(1,0)]' ?-| line '[(0,0),(0,1)]'; -- true -SELECT line '[(0,0),(1,1)]' ?-| line '[(1,0),(1,1)]'; -- false - -SELECT ?- line '[(0,0),(1,0)]'; -- true -SELECT ?- line '[(0,0),(1,1)]'; -- false - -SELECT ?| line '[(0,0),(0,1)]'; -- true -SELECT ?| line '[(0,0),(1,1)]'; -- false - -SELECT line(point '(1,2)', point '(3,4)'); +select * from LINE_TBL; -SELECT line '[(1,2),(3,4)]' = line '[(3,4),(4,5)]'; -- true -SELECT line '[(1,2),(3,4)]' = line '[(3,4),(4,4)]'; -- false +select '{nan, 1, nan}'::line = '{nan, 1, nan}'::line as true, + '{nan, 1, nan}'::line = '{nan, 2, nan}'::line as false; diff --git a/src/test/regress/sql/lock.sql b/src/test/regress/sql/lock.sql index 567e8bccf1..26a7e59a13 100644 --- a/src/test/regress/sql/lock.sql +++ b/src/test/regress/sql/lock.sql @@ -6,7 +6,13 @@ CREATE SCHEMA lock_schema1; SET search_path = lock_schema1; CREATE TABLE lock_tbl1 (a BIGINT); -CREATE VIEW lock_view1 AS SELECT 1; +CREATE TABLE lock_tbl1a (a BIGINT); +CREATE VIEW lock_view1 AS SELECT * FROM lock_tbl1; +CREATE VIEW lock_view2(a,b) AS SELECT * FROM lock_tbl1, lock_tbl1a; +CREATE VIEW lock_view3 AS SELECT * from lock_view2; +CREATE VIEW lock_view4 AS SELECT (select a from lock_tbl1a limit 1) from lock_tbl1; +CREATE VIEW lock_view5 AS SELECT * from lock_tbl1 where a in (select * from lock_tbl1a); +CREATE VIEW lock_view6 AS SELECT * from (select * from lock_tbl1) sub; CREATE ROLE regress_rol_lock1; ALTER ROLE regress_rol_lock1 SET search_path = lock_schema1; GRANT USAGE ON SCHEMA lock_schema1 TO regress_rol_lock1; @@ -33,7 +39,59 @@ LOCK TABLE lock_tbl1 IN SHARE MODE NOWAIT; LOCK TABLE lock_tbl1 IN SHARE ROW EXCLUSIVE MODE NOWAIT; LOCK TABLE lock_tbl1 IN EXCLUSIVE MODE NOWAIT; LOCK TABLE lock_tbl1 IN ACCESS EXCLUSIVE MODE NOWAIT; -LOCK TABLE lock_view1 IN EXCLUSIVE MODE; -- Will fail; can't lock a non-table +ROLLBACK; + +-- Verify that we can lock views. +BEGIN TRANSACTION; +LOCK TABLE lock_view1 IN EXCLUSIVE MODE; +-- lock_view1 and lock_tbl1 are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view2 IN EXCLUSIVE MODE; +-- lock_view1, lock_tbl1, and lock_tbl1a are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view3 IN EXCLUSIVE MODE; +-- lock_view3, lock_view2, lock_tbl1, and lock_tbl1a are locked recursively. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view4 IN EXCLUSIVE MODE; +-- lock_view4, lock_tbl1, and lock_tbl1a are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view5 IN EXCLUSIVE MODE; +-- lock_view5, lock_tbl1, and lock_tbl1a are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view6 IN EXCLUSIVE MODE; +-- lock_view6 an lock_tbl1 are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; +ROLLBACK; +-- detecting infinite recursions in view definitions +CREATE OR REPLACE VIEW lock_view2 AS SELECT * from lock_view3; +BEGIN TRANSACTION; +LOCK TABLE lock_view2 IN EXCLUSIVE MODE; +ROLLBACK; +CREATE VIEW lock_view7 AS SELECT * from lock_view2; +BEGIN TRANSACTION; +LOCK TABLE lock_view7 IN EXCLUSIVE MODE; ROLLBACK; -- Verify that we can lock a table with inheritance children. @@ -58,10 +116,16 @@ RESET ROLE; -- -- Clean up -- +DROP VIEW lock_view7; +DROP VIEW lock_view6; +DROP VIEW lock_view5; +DROP VIEW lock_view4; +DROP VIEW lock_view3 CASCADE; DROP VIEW lock_view1; DROP TABLE lock_tbl3; DROP TABLE lock_tbl2; DROP TABLE lock_tbl1; +DROP TABLE lock_tbl1a; DROP SCHEMA lock_schema1 CASCADE; DROP ROLE regress_rol_lock1; diff --git a/src/test/regress/sql/lseg.sql b/src/test/regress/sql/lseg.sql index 07c5a29e0a..f266ca3e09 100644 --- a/src/test/regress/sql/lseg.sql +++ b/src/test/regress/sql/lseg.sql @@ -10,7 +10,10 @@ INSERT INTO LSEG_TBL VALUES ('[(1,2),(3,4)]'); INSERT INTO LSEG_TBL VALUES ('(0,0),(6,6)'); INSERT INTO LSEG_TBL VALUES ('10,-10 ,-3,-4'); INSERT INTO LSEG_TBL VALUES ('[-1e6,2e2,3e5, -4e1]'); -INSERT INTO LSEG_TBL VALUES ('(11,22,33,44)'); +INSERT INTO LSEG_TBL VALUES (lseg(point(11, 22), point(33,44))); +INSERT INTO LSEG_TBL VALUES ('[(-10,2),(-10,3)]'); -- vertical +INSERT INTO LSEG_TBL VALUES ('[(0,-20),(30,-20)]'); -- horizontal +INSERT INTO LSEG_TBL VALUES ('[(NaN,1),(NaN,90)]'); -- NaN -- bad values for parser testing INSERT INTO LSEG_TBL VALUES ('(3asdf,2 ,3,4r2)'); @@ -19,7 +22,3 @@ INSERT INTO LSEG_TBL VALUES ('[(,2),(3,4)]'); INSERT INTO LSEG_TBL VALUES ('[(1,2),(3,4)'); select * from LSEG_TBL; - -SELECT * FROM LSEG_TBL WHERE s <= lseg '[(1,2),(3,4)]'; - -SELECT * FROM LSEG_TBL WHERE (s <-> lseg '[(1,2),(3,4)]') < 10; diff --git a/src/test/regress/sql/misc_sanity.sql b/src/test/regress/sql/misc_sanity.sql index 5130a4ab79..f2af5e3750 100644 --- a/src/test/regress/sql/misc_sanity.sql +++ b/src/test/regress/sql/misc_sanity.sql @@ -32,7 +32,7 @@ SELECT * FROM pg_shdepend as d1 WHERE refclassid = 0 OR refobjid = 0 OR deptype NOT IN ('a', 'o', 'p', 'r') OR - (deptype != 'p' AND (dbid = 0 OR classid = 0 OR objid = 0)) OR + (deptype != 'p' AND (classid = 0 OR objid = 0)) OR (deptype = 'p' AND (dbid != 0 OR classid != 0 OR objid != 0 OR objsubid != 0)); @@ -72,3 +72,22 @@ loop end if; end loop; end$$; + +-- **************** pg_class **************** + +-- Look for system tables with varlena columns but no toast table. All +-- system tables with toastable columns should have toast tables, with +-- the following exceptions: +-- 1. pg_class, pg_attribute, and pg_index, due to fear of recursive +-- dependencies as toast tables depend on them. +-- 2. pg_largeobject and pg_largeobject_metadata. Large object catalogs +-- and toast tables are mutually exclusive and large object data is handled +-- as user data by pg_upgrade, which would cause failures. + +SELECT relname, attname, atttypid::regtype +FROM pg_class c JOIN pg_attribute a ON c.oid = attrelid +WHERE c.oid < 16384 AND + reltoastrelid = 0 AND + relkind = 'r' AND + attstorage != 'p' +ORDER BY 1, 2; diff --git a/src/test/regress/sql/namespace.sql b/src/test/regress/sql/namespace.sql index 51cb091cc5..6b12c96193 100644 --- a/src/test/regress/sql/namespace.sql +++ b/src/test/regress/sql/namespace.sql @@ -2,7 +2,7 @@ -- Regression tests for schemas (namespaces) -- -CREATE SCHEMA test_schema_1 +CREATE SCHEMA test_ns_schema_1 CREATE UNIQUE INDEX abc_a_idx ON abc (a) CREATE VIEW abc_view AS @@ -15,30 +15,30 @@ CREATE SCHEMA test_schema_1 -- verify that the objects were created SELECT COUNT(*) FROM pg_class WHERE relnamespace = - (SELECT oid FROM pg_namespace WHERE nspname = 'test_schema_1'); + (SELECT oid FROM pg_namespace WHERE nspname = 'test_ns_schema_1'); -INSERT INTO test_schema_1.abc DEFAULT VALUES; -INSERT INTO test_schema_1.abc DEFAULT VALUES; -INSERT INTO test_schema_1.abc DEFAULT VALUES; +INSERT INTO test_ns_schema_1.abc DEFAULT VALUES; +INSERT INTO test_ns_schema_1.abc DEFAULT VALUES; +INSERT INTO test_ns_schema_1.abc DEFAULT VALUES; -SELECT * FROM test_schema_1.abc; -SELECT * FROM test_schema_1.abc_view; +SELECT * FROM test_ns_schema_1.abc; +SELECT * FROM test_ns_schema_1.abc_view; -ALTER SCHEMA test_schema_1 RENAME TO test_schema_renamed; +ALTER SCHEMA test_ns_schema_1 RENAME TO test_ns_schema_renamed; SELECT COUNT(*) FROM pg_class WHERE relnamespace = - (SELECT oid FROM pg_namespace WHERE nspname = 'test_schema_1'); + (SELECT oid FROM pg_namespace WHERE nspname = 'test_ns_schema_1'); -- test IF NOT EXISTS cases -CREATE SCHEMA test_schema_renamed; -- fail, already exists -CREATE SCHEMA IF NOT EXISTS test_schema_renamed; -- ok with notice -CREATE SCHEMA IF NOT EXISTS test_schema_renamed -- fail, disallowed +CREATE SCHEMA test_ns_schema_renamed; -- fail, already exists +CREATE SCHEMA IF NOT EXISTS test_ns_schema_renamed; -- ok with notice +CREATE SCHEMA IF NOT EXISTS test_ns_schema_renamed -- fail, disallowed CREATE TABLE abc ( a serial, b int UNIQUE ); -DROP SCHEMA test_schema_renamed CASCADE; +DROP SCHEMA test_ns_schema_renamed CASCADE; -- verify that the objects were dropped SELECT COUNT(*) FROM pg_class WHERE relnamespace = - (SELECT oid FROM pg_namespace WHERE nspname = 'test_schema_renamed'); + (SELECT oid FROM pg_namespace WHERE nspname = 'test_ns_schema_renamed'); diff --git a/src/test/regress/sql/numeric.sql b/src/test/regress/sql/numeric.sql index b51225c47f..a939412359 100644 --- a/src/test/regress/sql/numeric.sql +++ b/src/test/regress/sql/numeric.sql @@ -655,6 +655,14 @@ INSERT INTO fract_only VALUES (8, '0.00017'); SELECT * FROM fract_only; DROP TABLE fract_only; +-- Check inf/nan conversion behavior +SELECT 'NaN'::float8::numeric; +SELECT 'Infinity'::float8::numeric; +SELECT '-Infinity'::float8::numeric; +SELECT 'NaN'::float4::numeric; +SELECT 'Infinity'::float4::numeric; +SELECT '-Infinity'::float4::numeric; + -- Simple check that ceil(), floor(), and round() work correctly CREATE TABLE ceil_floor_round (a numeric); INSERT INTO ceil_floor_round VALUES ('-5.5'); @@ -778,8 +786,21 @@ SELECT '' AS to_char_24, to_char('100'::numeric, 'FM999.9'); SELECT '' AS to_char_25, to_char('100'::numeric, 'FM999.'); SELECT '' AS to_char_26, to_char('100'::numeric, 'FM999'); +-- Check parsing of literal text in a format string +SELECT '' AS to_char_27, to_char('100'::numeric, 'foo999'); +SELECT '' AS to_char_28, to_char('100'::numeric, 'f\oo999'); +SELECT '' AS to_char_29, to_char('100'::numeric, 'f\\oo999'); +SELECT '' AS to_char_30, to_char('100'::numeric, 'f\"oo999'); +SELECT '' AS to_char_31, to_char('100'::numeric, 'f\\"oo999'); +SELECT '' AS to_char_32, to_char('100'::numeric, 'f"ool"999'); +SELECT '' AS to_char_33, to_char('100'::numeric, 'f"\ool"999'); +SELECT '' AS to_char_34, to_char('100'::numeric, 'f"\\ool"999'); +SELECT '' AS to_char_35, to_char('100'::numeric, 'f"ool\"999'); +SELECT '' AS to_char_36, to_char('100'::numeric, 'f"ool\\"999'); + -- TO_NUMBER() -- +SET lc_numeric = 'C'; SELECT '' AS to_number_1, to_number('-34,338,492', '99G999G999'); SELECT '' AS to_number_2, to_number('-34,338,492.654,878', '99G999G999D999G999'); SELECT '' AS to_number_3, to_number('<564646.654564>', '999999.999999PR'); @@ -793,6 +814,16 @@ SELECT '' AS to_number_10, to_number('0', '99.99'); SELECT '' AS to_number_11, to_number('.-01', 'S99.99'); SELECT '' AS to_number_12, to_number('.01-', '99.99S'); SELECT '' AS to_number_13, to_number(' . 0 1-', ' 9 9 . 9 9 S'); +SELECT '' AS to_number_14, to_number('34,50','999,99'); +SELECT '' AS to_number_15, to_number('123,000','999G'); +SELECT '' AS to_number_16, to_number('123456','999G999'); +SELECT '' AS to_number_17, to_number('$1234.56','L9,999.99'); +SELECT '' AS to_number_18, to_number('$1234.56','L99,999.99'); +SELECT '' AS to_number_19, to_number('$1,234.56','L99,999.99'); +SELECT '' AS to_number_20, to_number('1234.56','L99,999.99'); +SELECT '' AS to_number_21, to_number('1,234.56','L99,999.99'); +SELECT '' AS to_number_22, to_number('42nd', '99th'); +RESET lc_numeric; -- -- Input syntax @@ -880,6 +911,13 @@ select (-12.34) ^ 0.0; select 12.34 ^ 0.0; select 0.0 ^ 12.34; +-- NaNs +select 'NaN'::numeric ^ 'NaN'::numeric; +select 'NaN'::numeric ^ 0; +select 'NaN'::numeric ^ 1; +select 0 ^ 'NaN'::numeric; +select 1 ^ 'NaN'::numeric; + -- invalid inputs select 0.0 ^ (-12.34); select (-12.34) ^ 1.2; diff --git a/src/test/regress/sql/object_address.sql b/src/test/regress/sql/object_address.sql index 63821b8008..d7df322873 100644 --- a/src/test/regress/sql/object_address.sql +++ b/src/test/regress/sql/object_address.sql @@ -22,6 +22,9 @@ CREATE TEXT SEARCH PARSER addr_ts_prs CREATE TABLE addr_nsp.gentable ( a serial primary key CONSTRAINT a_chk CHECK (a > 0), b text DEFAULT 'hello'); +CREATE TABLE addr_nsp.parttable ( + a int PRIMARY KEY +) PARTITION BY RANGE (a); CREATE VIEW addr_nsp.genview AS SELECT * from addr_nsp.gentable; CREATE MATERIALIZED VIEW addr_nsp.genmatview AS SELECT * FROM addr_nsp.gentable; CREATE TYPE addr_nsp.gencomptype AS (a int); @@ -32,6 +35,7 @@ CREATE DOMAIN addr_nsp.gendomain AS int4 CONSTRAINT domconstr CHECK (value > 0); CREATE FUNCTION addr_nsp.trig() RETURNS TRIGGER LANGUAGE plpgsql AS $$ BEGIN END; $$; CREATE TRIGGER t BEFORE INSERT ON addr_nsp.gentable FOR EACH ROW EXECUTE PROCEDURE addr_nsp.trig(); CREATE POLICY genpol ON addr_nsp.gentable; +CREATE PROCEDURE addr_nsp.proc(int4) LANGUAGE SQL AS $$ $$; CREATE SERVER "integer" FOREIGN DATA WRAPPER addr_fdw; CREATE USER MAPPING FOR regress_addr_user SERVER "integer"; ALTER DEFAULT PRIVILEGES FOR ROLE regress_addr_user IN SCHEMA public GRANT ALL ON TABLES TO regress_addr_user; @@ -81,7 +85,7 @@ BEGIN ('table'), ('index'), ('sequence'), ('view'), ('materialized view'), ('foreign table'), ('table column'), ('foreign table column'), - ('aggregate'), ('function'), ('type'), ('cast'), + ('aggregate'), ('function'), ('procedure'), ('type'), ('cast'), ('table constraint'), ('domain constraint'), ('conversion'), ('default value'), ('operator'), ('operator class'), ('operator family'), ('rule'), ('trigger'), ('text search parser'), ('text search dictionary'), @@ -137,7 +141,9 @@ SELECT pg_get_object_address('subscription', '{one,two}', '{}'); -- test successful cases WITH objects (type, name, args) AS (VALUES ('table', '{addr_nsp, gentable}'::text[], '{}'::text[]), + ('table', '{addr_nsp, parttable}'::text[], '{}'::text[]), ('index', '{addr_nsp, gentable_pkey}', '{}'), + ('index', '{addr_nsp, parttable_pkey}', '{}'), ('sequence', '{addr_nsp, gentable_a_seq}', '{}'), -- toast table ('view', '{addr_nsp, genview}', '{}'), @@ -147,6 +153,7 @@ WITH objects (type, name, args) AS (VALUES ('foreign table column', '{addr_nsp, genftable, a}', '{}'), ('aggregate', '{addr_nsp, genaggr}', '{int4}'), ('function', '{pg_catalog, pg_identify_object}', '{pg_catalog.oid, pg_catalog.oid, int4}'), + ('procedure', '{addr_nsp, proc}', '{int4}'), ('type', '{pg_catalog._int4}', '{}'), ('type', '{addr_nsp.gendomain}', '{}'), ('type', '{addr_nsp.gencomptype}', '{}'), diff --git a/src/test/regress/sql/oidjoins.sql b/src/test/regress/sql/oidjoins.sql index fcf9990f6b..c8291d3973 100644 --- a/src/test/regress/sql/oidjoins.sql +++ b/src/test/regress/sql/oidjoins.sql @@ -185,6 +185,10 @@ SELECT ctid, conindid FROM pg_catalog.pg_constraint fk WHERE conindid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.conindid); +SELECT ctid, conparentid +FROM pg_catalog.pg_constraint fk +WHERE conparentid != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_constraint pk WHERE pk.oid = fk.conparentid); SELECT ctid, confrelid FROM pg_catalog.pg_constraint fk WHERE confrelid != 0 AND @@ -377,6 +381,10 @@ SELECT ctid, partrelid FROM pg_catalog.pg_partitioned_table fk WHERE partrelid != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.partrelid); +SELECT ctid, partdefid +FROM pg_catalog.pg_partitioned_table fk +WHERE partdefid != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_class pk WHERE pk.oid = fk.partdefid); SELECT ctid, polrelid FROM pg_catalog.pg_policy fk WHERE polrelid != 0 AND diff --git a/src/test/regress/sql/opr_sanity.sql b/src/test/regress/sql/opr_sanity.sql index 2945966c0e..91c68f4204 100644 --- a/src/test/regress/sql/opr_sanity.sql +++ b/src/test/regress/sql/opr_sanity.sql @@ -22,6 +22,8 @@ -- allowed. -- This should match IsBinaryCoercible() in parse_coerce.c. +-- It doesn't currently know about some cases, notably domains, anyelement, +-- anynonarray, anyenum, or record, but it doesn't need to (yet). create function binary_coercible(oid, oid) returns bool as $$ begin if $1 = $2 then return true; end if; @@ -43,9 +45,11 @@ begin end $$ language plpgsql strict stable; --- This one ignores castcontext, so it considers only physical equivalence --- and not whether the coercion can be invoked implicitly. -create function physically_coercible(oid, oid) returns bool as $$ +-- This one ignores castcontext, so it will allow cases where an explicit +-- (but still binary) cast would be required to convert the input type. +-- We don't currently use this for any tests in this file, but it is a +-- reasonable alternative definition for some scenarios. +create function explicitly_binary_coercible(oid, oid) returns bool as $$ begin if $1 = $2 then return true; end if; if EXISTS(select 1 from pg_catalog.pg_cast where @@ -82,6 +86,7 @@ WHERE p1.prolang = 0 OR p1.prorettype = 0 OR 0::oid = ANY (p1.proargtypes) OR procost <= 0 OR CASE WHEN proretset THEN prorows <= 0 ELSE prorows != 0 END OR + prokind NOT IN ('f', 'a', 'w', 'p') OR provolatile NOT IN ('i', 's', 'v') OR proparallel NOT IN ('s', 'r', 'u'); @@ -90,10 +95,10 @@ SELECT p1.oid, p1.proname FROM pg_proc as p1 WHERE prosrc IS NULL OR prosrc = '' OR prosrc = '-'; --- proiswindow shouldn't be set together with proisagg or proretset +-- proretset should only be set for normal functions SELECT p1.oid, p1.proname FROM pg_proc AS p1 -WHERE proiswindow AND (proisagg OR proretset); +WHERE proretset AND prokind != 'f'; -- currently, no built-in functions should be SECURITY DEFINER; -- this might change in future, but there will probably never be many. @@ -140,9 +145,9 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid < p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - (p1.proisagg = false OR p2.proisagg = false) AND + (p1.prokind != 'a' OR p2.prokind != 'a') AND (p1.prolang != p2.prolang OR - p1.proisagg != p2.proisagg OR + p1.prokind != p2.prokind OR p1.prosecdef != p2.prosecdef OR p1.proleakproof != p2.proleakproof OR p1.proisstrict != p2.proisstrict OR @@ -166,7 +171,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND p1.prosrc NOT LIKE E'range\\_constructor_' AND p2.prosrc NOT LIKE E'range\\_constructor_' AND (p1.prorettype < p2.prorettype) @@ -177,7 +182,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND p1.prosrc NOT LIKE E'range\\_constructor_' AND p2.prosrc NOT LIKE E'range\\_constructor_' AND (p1.proargtypes[0] < p2.proargtypes[0]) @@ -188,7 +193,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND p1.prosrc NOT LIKE E'range\\_constructor_' AND p2.prosrc NOT LIKE E'range\\_constructor_' AND (p1.proargtypes[1] < p2.proargtypes[1]) @@ -199,7 +204,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND (p1.proargtypes[2] < p2.proargtypes[2]) ORDER BY 1, 2; @@ -208,7 +213,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND (p1.proargtypes[3] < p2.proargtypes[3]) ORDER BY 1, 2; @@ -217,7 +222,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND (p1.proargtypes[4] < p2.proargtypes[4]) ORDER BY 1, 2; @@ -226,7 +231,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND (p1.proargtypes[5] < p2.proargtypes[5]) ORDER BY 1, 2; @@ -235,7 +240,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND (p1.proargtypes[6] < p2.proargtypes[6]) ORDER BY 1, 2; @@ -244,7 +249,7 @@ FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid != p2.oid AND p1.prosrc = p2.prosrc AND p1.prolang = 12 AND p2.prolang = 12 AND - NOT p1.proisagg AND NOT p2.proisagg AND + p1.prokind != 'a' AND p2.prokind != 'a' AND (p1.proargtypes[7] < p2.proargtypes[7]) ORDER BY 1, 2; @@ -740,7 +745,7 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999; -- Check that operators' underlying functions have suitable comments, -- namely 'implementation of XXX operator'. (Note: it's not necessary to --- put such comments into pg_proc.h; initdb will generate them as needed.) +-- put such comments into pg_proc.dat; initdb will generate them as needed.) -- In some cases involving legacy names for operators, there are multiple -- operators referencing the same pg_proc entry, so ignore operators whose -- comments say they are deprecated. @@ -795,6 +800,8 @@ WHERE aggfnoid = 0 OR aggtransfn = 0 OR aggkind NOT IN ('n', 'o', 'h') OR aggnumdirectargs < 0 OR (aggkind = 'n' AND aggnumdirectargs > 0) OR + aggfinalmodify NOT IN ('r', 's', 'w') OR + aggmfinalmodify NOT IN ('r', 's', 'w') OR aggtranstype = 0 OR aggtransspace < 0 OR aggmtransspace < 0; -- Make sure the matching pg_proc entry is sensible, too. @@ -802,13 +809,13 @@ WHERE aggfnoid = 0 OR aggtransfn = 0 OR SELECT a.aggfnoid::oid, p.proname FROM pg_aggregate as a, pg_proc as p WHERE a.aggfnoid = p.oid AND - (NOT p.proisagg OR p.proretset OR p.pronargs < a.aggnumdirectargs); + (p.prokind != 'a' OR p.proretset OR p.pronargs < a.aggnumdirectargs); --- Make sure there are no proisagg pg_proc entries without matches. +-- Make sure there are no prokind = PROKIND_AGGREGATE pg_proc entries without matches. SELECT oid, proname FROM pg_proc as p -WHERE p.proisagg AND +WHERE p.prokind = 'a' AND NOT EXISTS (SELECT 1 FROM pg_aggregate a WHERE a.aggfnoid = p.oid); -- If there is no finalfn then the output type must be the transtype. @@ -819,8 +826,6 @@ WHERE a.aggfnoid = p.oid AND a.aggfinalfn = 0 AND p.prorettype != a.aggtranstype; -- Cross-check transfn against its entry in pg_proc. --- NOTE: use physically_coercible here, not binary_coercible, because --- max and min on abstime are implemented using int4larger/int4smaller. SELECT a.aggfnoid::oid, p.proname, ptr.oid, ptr.proname FROM pg_aggregate AS a, pg_proc AS p, pg_proc AS ptr WHERE a.aggfnoid = p.oid AND @@ -829,15 +834,16 @@ WHERE a.aggfnoid = p.oid AND OR NOT (ptr.pronargs = CASE WHEN a.aggkind = 'n' THEN p.pronargs + 1 ELSE greatest(p.pronargs - a.aggnumdirectargs, 1) + 1 END) - OR NOT physically_coercible(ptr.prorettype, a.aggtranstype) - OR NOT physically_coercible(a.aggtranstype, ptr.proargtypes[0]) + OR NOT binary_coercible(ptr.prorettype, a.aggtranstype) + OR NOT binary_coercible(a.aggtranstype, ptr.proargtypes[0]) OR (p.pronargs > 0 AND - NOT physically_coercible(p.proargtypes[0], ptr.proargtypes[1])) + NOT binary_coercible(p.proargtypes[0], ptr.proargtypes[1])) OR (p.pronargs > 1 AND - NOT physically_coercible(p.proargtypes[1], ptr.proargtypes[2])) + NOT binary_coercible(p.proargtypes[1], ptr.proargtypes[2])) OR (p.pronargs > 2 AND - NOT physically_coercible(p.proargtypes[2], ptr.proargtypes[3])) + NOT binary_coercible(p.proargtypes[2], ptr.proargtypes[3])) -- we could carry the check further, but 3 args is enough for now + OR (p.pronargs > 3) ); -- Cross-check finalfn (if present) against its entry in pg_proc. @@ -857,7 +863,8 @@ WHERE a.aggfnoid = p.oid AND NOT binary_coercible(p.proargtypes[1], pfn.proargtypes[2])) OR (pfn.pronargs > 3 AND NOT binary_coercible(p.proargtypes[2], pfn.proargtypes[3])) - -- we could carry the check further, but 3 args is enough for now + -- we could carry the check further, but 4 args is enough for now + OR (pfn.pronargs > 4) ); -- If transfn is strict then either initval should be non-NULL, or @@ -901,15 +908,16 @@ WHERE a.aggfnoid = p.oid AND OR NOT (ptr.pronargs = CASE WHEN a.aggkind = 'n' THEN p.pronargs + 1 ELSE greatest(p.pronargs - a.aggnumdirectargs, 1) + 1 END) - OR NOT physically_coercible(ptr.prorettype, a.aggmtranstype) - OR NOT physically_coercible(a.aggmtranstype, ptr.proargtypes[0]) + OR NOT binary_coercible(ptr.prorettype, a.aggmtranstype) + OR NOT binary_coercible(a.aggmtranstype, ptr.proargtypes[0]) OR (p.pronargs > 0 AND - NOT physically_coercible(p.proargtypes[0], ptr.proargtypes[1])) + NOT binary_coercible(p.proargtypes[0], ptr.proargtypes[1])) OR (p.pronargs > 1 AND - NOT physically_coercible(p.proargtypes[1], ptr.proargtypes[2])) + NOT binary_coercible(p.proargtypes[1], ptr.proargtypes[2])) OR (p.pronargs > 2 AND - NOT physically_coercible(p.proargtypes[2], ptr.proargtypes[3])) + NOT binary_coercible(p.proargtypes[2], ptr.proargtypes[3])) -- we could carry the check further, but 3 args is enough for now + OR (p.pronargs > 3) ); -- Cross-check minvtransfn (if present) against its entry in pg_proc. @@ -921,15 +929,16 @@ WHERE a.aggfnoid = p.oid AND OR NOT (ptr.pronargs = CASE WHEN a.aggkind = 'n' THEN p.pronargs + 1 ELSE greatest(p.pronargs - a.aggnumdirectargs, 1) + 1 END) - OR NOT physically_coercible(ptr.prorettype, a.aggmtranstype) - OR NOT physically_coercible(a.aggmtranstype, ptr.proargtypes[0]) + OR NOT binary_coercible(ptr.prorettype, a.aggmtranstype) + OR NOT binary_coercible(a.aggmtranstype, ptr.proargtypes[0]) OR (p.pronargs > 0 AND - NOT physically_coercible(p.proargtypes[0], ptr.proargtypes[1])) + NOT binary_coercible(p.proargtypes[0], ptr.proargtypes[1])) OR (p.pronargs > 1 AND - NOT physically_coercible(p.proargtypes[1], ptr.proargtypes[2])) + NOT binary_coercible(p.proargtypes[1], ptr.proargtypes[2])) OR (p.pronargs > 2 AND - NOT physically_coercible(p.proargtypes[2], ptr.proargtypes[3])) + NOT binary_coercible(p.proargtypes[2], ptr.proargtypes[3])) -- we could carry the check further, but 3 args is enough for now + OR (p.pronargs > 3) ); -- Cross-check mfinalfn (if present) against its entry in pg_proc. @@ -949,7 +958,8 @@ WHERE a.aggfnoid = p.oid AND NOT binary_coercible(p.proargtypes[1], pfn.proargtypes[2])) OR (pfn.pronargs > 3 AND NOT binary_coercible(p.proargtypes[2], pfn.proargtypes[3])) - -- we could carry the check further, but 3 args is enough for now + -- we could carry the check further, but 4 args is enough for now + OR (pfn.pronargs > 4) ); -- If mtransfn is strict then either minitval should be non-NULL, or @@ -974,8 +984,6 @@ WHERE a.aggfnoid = p.oid AND -- Check that all combine functions have signature -- combine(transtype, transtype) returns transtype --- NOTE: use physically_coercible here, not binary_coercible, because --- max and min on abstime are implemented using int4larger/int4smaller. SELECT a.aggfnoid, p.proname FROM pg_aggregate as a, pg_proc as p @@ -983,7 +991,7 @@ WHERE a.aggcombinefn = p.oid AND (p.pronargs != 2 OR p.prorettype != p.proargtypes[0] OR p.prorettype != p.proargtypes[1] OR - NOT physically_coercible(a.aggtranstype, p.proargtypes[0])); + NOT binary_coercible(a.aggtranstype, p.proargtypes[0])); -- Check that no combine function for an INTERNAL transtype is strict. @@ -1087,7 +1095,7 @@ ORDER BY 1, 2; SELECT p1.oid::regprocedure, p2.oid::regprocedure FROM pg_proc AS p1, pg_proc AS p2 WHERE p1.oid < p2.oid AND p1.proname = p2.proname AND - p1.proisagg AND p2.proisagg AND + p1.prokind = 'a' AND p2.prokind = 'a' AND array_dims(p1.proargtypes) != array_dims(p2.proargtypes) ORDER BY 1; @@ -1095,7 +1103,7 @@ ORDER BY 1; SELECT oid, proname FROM pg_proc AS p -WHERE proisagg AND proargdefaults IS NOT NULL; +WHERE prokind = 'a' AND proargdefaults IS NOT NULL; -- For the same reason, we avoid creating built-in variadic aggregates, except -- that variadic ordered-set aggregates are OK (since they have special syntax @@ -1103,7 +1111,7 @@ WHERE proisagg AND proargdefaults IS NOT NULL; SELECT p.oid, proname FROM pg_proc AS p JOIN pg_aggregate AS a ON a.aggfnoid = p.oid -WHERE proisagg AND provariadic != 0 AND a.aggkind = 'n'; +WHERE prokind = 'a' AND provariadic != 0 AND a.aggkind = 'n'; -- **************** pg_opfamily **************** @@ -1114,6 +1122,14 @@ SELECT p1.oid FROM pg_opfamily as p1 WHERE p1.opfmethod = 0 OR p1.opfnamespace = 0; +-- Look for opfamilies having no opclasses. While most validation of +-- opfamilies is now handled by AM-specific amvalidate functions, that's +-- driven from pg_opclass entries below, so an empty opfamily would not +-- get noticed. + +SELECT oid, opfname FROM pg_opfamily f +WHERE NOT EXISTS (SELECT 1 FROM pg_opclass WHERE opcfamily = f.oid); + -- **************** pg_opclass **************** diff --git a/src/test/regress/sql/partition_aggregate.sql b/src/test/regress/sql/partition_aggregate.sql new file mode 100644 index 0000000000..c387d64db3 --- /dev/null +++ b/src/test/regress/sql/partition_aggregate.sql @@ -0,0 +1,325 @@ +-- +-- PARTITION_AGGREGATE +-- Test partitionwise aggregation on partitioned tables +-- + +-- Enable partitionwise aggregate, which by default is disabled. +SET enable_partitionwise_aggregate TO true; +-- Enable partitionwise join, which by default is disabled. +SET enable_partitionwise_join TO true; +-- Disable parallel plans. +SET max_parallel_workers_per_gather TO 0; + +-- +-- Tests for list partitioned tables. +-- +CREATE TABLE pagg_tab (a int, b int, c text, d int) PARTITION BY LIST(c); +CREATE TABLE pagg_tab_p1 PARTITION OF pagg_tab FOR VALUES IN ('0000', '0001', '0002', '0003'); +CREATE TABLE pagg_tab_p2 PARTITION OF pagg_tab FOR VALUES IN ('0004', '0005', '0006', '0007'); +CREATE TABLE pagg_tab_p3 PARTITION OF pagg_tab FOR VALUES IN ('0008', '0009', '0010', '0011'); +INSERT INTO pagg_tab SELECT i % 20, i % 30, to_char(i % 12, 'FM0000'), i % 30 FROM generate_series(0, 2999) i; +ANALYZE pagg_tab; + +-- When GROUP BY clause matches; full aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT c, sum(a), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY c HAVING avg(d) < 15 ORDER BY 1, 2, 3; +SELECT c, sum(a), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY c HAVING avg(d) < 15 ORDER BY 1, 2, 3; + +-- When GROUP BY clause does not match; partial aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY a HAVING avg(d) < 15 ORDER BY 1, 2, 3; +SELECT a, sum(b), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY a HAVING avg(d) < 15 ORDER BY 1, 2, 3; + +-- Check with multiple columns in GROUP BY +EXPLAIN (COSTS OFF) +SELECT a, c, count(*) FROM pagg_tab GROUP BY a, c; +-- Check with multiple columns in GROUP BY, order in GROUP BY is reversed +EXPLAIN (COSTS OFF) +SELECT a, c, count(*) FROM pagg_tab GROUP BY c, a; +-- Check with multiple columns in GROUP BY, order in target-list is reversed +EXPLAIN (COSTS OFF) +SELECT c, a, count(*) FROM pagg_tab GROUP BY a, c; + +-- Test when input relation for grouping is dummy +EXPLAIN (COSTS OFF) +SELECT c, sum(a) FROM pagg_tab WHERE 1 = 2 GROUP BY c; +SELECT c, sum(a) FROM pagg_tab WHERE 1 = 2 GROUP BY c; +EXPLAIN (COSTS OFF) +SELECT c, sum(a) FROM pagg_tab WHERE c = 'x' GROUP BY c; +SELECT c, sum(a) FROM pagg_tab WHERE c = 'x' GROUP BY c; + +-- Test GroupAggregate paths by disabling hash aggregates. +SET enable_hashagg TO false; + +-- When GROUP BY clause matches full aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT c, sum(a), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; +SELECT c, sum(a), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; + +-- When GROUP BY clause does not match; partial aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; +SELECT a, sum(b), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; + +-- Test partitionwise grouping without any aggregates +EXPLAIN (COSTS OFF) +SELECT c FROM pagg_tab GROUP BY c ORDER BY 1; +SELECT c FROM pagg_tab GROUP BY c ORDER BY 1; +EXPLAIN (COSTS OFF) +SELECT a FROM pagg_tab WHERE a < 3 GROUP BY a ORDER BY 1; +SELECT a FROM pagg_tab WHERE a < 3 GROUP BY a ORDER BY 1; + +RESET enable_hashagg; + +-- ROLLUP, partitionwise aggregation does not apply +EXPLAIN (COSTS OFF) +SELECT c, sum(a) FROM pagg_tab GROUP BY rollup(c) ORDER BY 1, 2; + +-- ORDERED SET within the aggregate. +-- Full aggregation; since all the rows that belong to the same group come +-- from the same partition, having an ORDER BY within the aggregate doesn't +-- make any difference. +EXPLAIN (COSTS OFF) +SELECT c, sum(b order by a) FROM pagg_tab GROUP BY c ORDER BY 1, 2; +-- Since GROUP BY clause does not match with PARTITION KEY; we need to do +-- partial aggregation. However, ORDERED SET are not partial safe and thus +-- partitionwise aggregation plan is not generated. +EXPLAIN (COSTS OFF) +SELECT a, sum(b order by a) FROM pagg_tab GROUP BY a ORDER BY 1, 2; + + +-- JOIN query + +CREATE TABLE pagg_tab1(x int, y int) PARTITION BY RANGE(x); +CREATE TABLE pagg_tab1_p1 PARTITION OF pagg_tab1 FOR VALUES FROM (0) TO (10); +CREATE TABLE pagg_tab1_p2 PARTITION OF pagg_tab1 FOR VALUES FROM (10) TO (20); +CREATE TABLE pagg_tab1_p3 PARTITION OF pagg_tab1 FOR VALUES FROM (20) TO (30); + +CREATE TABLE pagg_tab2(x int, y int) PARTITION BY RANGE(y); +CREATE TABLE pagg_tab2_p1 PARTITION OF pagg_tab2 FOR VALUES FROM (0) TO (10); +CREATE TABLE pagg_tab2_p2 PARTITION OF pagg_tab2 FOR VALUES FROM (10) TO (20); +CREATE TABLE pagg_tab2_p3 PARTITION OF pagg_tab2 FOR VALUES FROM (20) TO (30); + +INSERT INTO pagg_tab1 SELECT i % 30, i % 20 FROM generate_series(0, 299, 2) i; +INSERT INTO pagg_tab2 SELECT i % 20, i % 30 FROM generate_series(0, 299, 3) i; + +ANALYZE pagg_tab1; +ANALYZE pagg_tab2; + +-- When GROUP BY clause matches; full aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; +SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; + +-- Check with whole-row reference; partitionwise aggregation does not apply +EXPLAIN (COSTS OFF) +SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; +SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; + +-- GROUP BY having other matching key +EXPLAIN (COSTS OFF) +SELECT t2.y, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t2.y ORDER BY 1, 2, 3; + +-- When GROUP BY clause does not match; partial aggregation is performed for each partition. +-- Also test GroupAggregate paths by disabling hash aggregates. +SET enable_hashagg TO false; +EXPLAIN (COSTS OFF) +SELECT t1.y, sum(t1.x), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.y HAVING avg(t1.x) > 10 ORDER BY 1, 2, 3; +SELECT t1.y, sum(t1.x), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.y HAVING avg(t1.x) > 10 ORDER BY 1, 2, 3; +RESET enable_hashagg; + +-- Check with LEFT/RIGHT/FULL OUTER JOINs which produces NULL values for +-- aggregation + +-- LEFT JOIN, should produce partial partitionwise aggregation plan as +-- GROUP BY is on nullable column +EXPLAIN (COSTS OFF) +SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; +SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; + +-- RIGHT JOIN, should produce full partitionwise aggregation plan as +-- GROUP BY is on non-nullable column +EXPLAIN (COSTS OFF) +SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; +SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; + +-- FULL JOIN, should produce partial partitionwise aggregation plan as +-- GROUP BY is on nullable column +EXPLAIN (COSTS OFF) +SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x ORDER BY 1 NULLS LAST; +SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x ORDER BY 1 NULLS LAST; + +-- LEFT JOIN, with dummy relation on right side, +-- should produce full partitionwise aggregation plan as GROUP BY is on +-- non-nullable columns +EXPLAIN (COSTS OFF) +SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; +SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; + +-- FULL JOIN, with dummy relations on both sides, +-- should produce partial partitionwise aggregation plan as GROUP BY is on +-- nullable columns +EXPLAIN (COSTS OFF) +SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; +SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; + +-- Empty join relation because of empty outer side, no partitionwise agg plan +EXPLAIN (COSTS OFF) +SELECT a.x, a.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x = 1 AND x = 2) a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x, a.y ORDER BY 1, 2; +SELECT a.x, a.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x = 1 AND x = 2) a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x, a.y ORDER BY 1, 2; + + +-- Partition by multiple columns + +CREATE TABLE pagg_tab_m (a int, b int, c int) PARTITION BY RANGE(a, ((a+b)/2)); +CREATE TABLE pagg_tab_m_p1 PARTITION OF pagg_tab_m FOR VALUES FROM (0, 0) TO (10, 10); +CREATE TABLE pagg_tab_m_p2 PARTITION OF pagg_tab_m FOR VALUES FROM (10, 10) TO (20, 20); +CREATE TABLE pagg_tab_m_p3 PARTITION OF pagg_tab_m FOR VALUES FROM (20, 20) TO (30, 30); +INSERT INTO pagg_tab_m SELECT i % 30, i % 40, i % 50 FROM generate_series(0, 2999) i; +ANALYZE pagg_tab_m; + +-- Partial aggregation as GROUP BY clause does not match with PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22 ORDER BY 1, 2, 3; +SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22 ORDER BY 1, 2, 3; + +-- Full aggregation as GROUP BY clause matches with PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING sum(b) < 50 ORDER BY 1, 2, 3; +SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING sum(b) < 50 ORDER BY 1, 2, 3; + +-- Full aggregation as PARTITION KEY is part of GROUP BY clause +EXPLAIN (COSTS OFF) +SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAVING sum(b) = 50 AND avg(c) > 25 ORDER BY 1, 2, 3; +SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAVING sum(b) = 50 AND avg(c) > 25 ORDER BY 1, 2, 3; + + +-- Test with multi-level partitioning scheme + +CREATE TABLE pagg_tab_ml (a int, b int, c text) PARTITION BY RANGE(a); +CREATE TABLE pagg_tab_ml_p1 PARTITION OF pagg_tab_ml FOR VALUES FROM (0) TO (10); +CREATE TABLE pagg_tab_ml_p2 PARTITION OF pagg_tab_ml FOR VALUES FROM (10) TO (20) PARTITION BY LIST (c); +CREATE TABLE pagg_tab_ml_p2_s1 PARTITION OF pagg_tab_ml_p2 FOR VALUES IN ('0000', '0001'); +CREATE TABLE pagg_tab_ml_p2_s2 PARTITION OF pagg_tab_ml_p2 FOR VALUES IN ('0002', '0003'); + +-- This level of partitioning has different column positions than the parent +CREATE TABLE pagg_tab_ml_p3(b int, c text, a int) PARTITION BY RANGE (b); +CREATE TABLE pagg_tab_ml_p3_s1(c text, a int, b int); +CREATE TABLE pagg_tab_ml_p3_s2 PARTITION OF pagg_tab_ml_p3 FOR VALUES FROM (5) TO (10); + +ALTER TABLE pagg_tab_ml_p3 ATTACH PARTITION pagg_tab_ml_p3_s1 FOR VALUES FROM (0) TO (5); +ALTER TABLE pagg_tab_ml ATTACH PARTITION pagg_tab_ml_p3 FOR VALUES FROM (20) TO (30); + +INSERT INTO pagg_tab_ml SELECT i % 30, i % 10, to_char(i % 4, 'FM0000') FROM generate_series(0, 29999) i; +ANALYZE pagg_tab_ml; + +-- For Parallel Append +SET max_parallel_workers_per_gather TO 2; + +-- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY +-- for level 1 only. For subpartitions, GROUP BY clause does not match with +-- PARTITION KEY, but still we do not see a partial aggregation as array_agg() +-- is not partial agg safe. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; +SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + +-- Without ORDER BY clause, to test Gather at top-most path +EXPLAIN (COSTS OFF) +SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3; + +-- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY +-- for level 1 only. For subpartitions, GROUP BY clause does not match with +-- PARTITION KEY, thus we will have a partial aggregation for them. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + +-- Partial aggregation at all levels as GROUP BY clause does not match with +-- PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; +SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; + +-- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; + +-- Parallelism within partitionwise aggregates + +SET min_parallel_table_scan_size TO '8kB'; +SET parallel_setup_cost TO 0; + +-- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY +-- for level 1 only. For subpartitions, GROUP BY clause does not match with +-- PARTITION KEY, thus we will have a partial aggregation for them. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + +-- Partial aggregation at all levels as GROUP BY clause does not match with +-- PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; +SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; + +-- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; + + +-- Parallelism within partitionwise aggregates (single level) + +-- Add few parallel setup cost, so that we will see a plan which gathers +-- partially created paths even for full aggregation and sticks a single Gather +-- followed by finalization step. +-- Without this, the cost of doing partial aggregation + Gather + finalization +-- for each partition and then Append over it turns out to be same and this +-- wins as we add it first. This parallel_setup_cost plays a vital role in +-- costing such plans. +SET parallel_setup_cost TO 10; + +CREATE TABLE pagg_tab_para(x int, y int) PARTITION BY RANGE(x); +CREATE TABLE pagg_tab_para_p1 PARTITION OF pagg_tab_para FOR VALUES FROM (0) TO (10); +CREATE TABLE pagg_tab_para_p2 PARTITION OF pagg_tab_para FOR VALUES FROM (10) TO (20); +CREATE TABLE pagg_tab_para_p3 PARTITION OF pagg_tab_para FOR VALUES FROM (20) TO (30); + +INSERT INTO pagg_tab_para SELECT i % 30, i % 20 FROM generate_series(0, 29999) i; + +ANALYZE pagg_tab_para; + +-- When GROUP BY clause matches; full aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + +-- When GROUP BY clause does not match; partial aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; +SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; + +-- Test when parent can produce parallel paths but not any (or some) of its children +ALTER TABLE pagg_tab_para_p1 SET (parallel_workers = 0); +ALTER TABLE pagg_tab_para_p3 SET (parallel_workers = 0); +ANALYZE pagg_tab_para; + +EXPLAIN (COSTS OFF) +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + +ALTER TABLE pagg_tab_para_p2 SET (parallel_workers = 0); +ANALYZE pagg_tab_para; + +EXPLAIN (COSTS OFF) +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + +-- Reset parallelism parameters to get partitionwise aggregation plan. +RESET min_parallel_table_scan_size; +RESET parallel_setup_cost; + +EXPLAIN (COSTS OFF) +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; diff --git a/src/test/regress/sql/partition_info.sql b/src/test/regress/sql/partition_info.sql new file mode 100644 index 0000000000..5a76f22b05 --- /dev/null +++ b/src/test/regress/sql/partition_info.sql @@ -0,0 +1,68 @@ +-- +-- Tests for pg_partition_tree +-- +SELECT * FROM pg_partition_tree(NULL); + +-- Test table partition trees +CREATE TABLE ptif_test (a int, b int) PARTITION BY range (a); +CREATE TABLE ptif_test0 PARTITION OF ptif_test + FOR VALUES FROM (minvalue) TO (0) PARTITION BY list (b); +CREATE TABLE ptif_test01 PARTITION OF ptif_test0 FOR VALUES IN (1); +CREATE TABLE ptif_test1 PARTITION OF ptif_test + FOR VALUES FROM (0) TO (100) PARTITION BY list (b); +CREATE TABLE ptif_test11 PARTITION OF ptif_test1 FOR VALUES IN (1); +CREATE TABLE ptif_test2 PARTITION OF ptif_test + FOR VALUES FROM (100) TO (maxvalue); + +-- Test index partition tree +CREATE INDEX ptif_test_index ON ONLY ptif_test (a); +CREATE INDEX ptif_test0_index ON ONLY ptif_test0 (a); +ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test0_index; +CREATE INDEX ptif_test01_index ON ptif_test01 (a); +ALTER INDEX ptif_test0_index ATTACH PARTITION ptif_test01_index; +CREATE INDEX ptif_test1_index ON ONLY ptif_test1 (a); +ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test1_index; +CREATE INDEX ptif_test11_index ON ptif_test11 (a); +ALTER INDEX ptif_test1_index ATTACH PARTITION ptif_test11_index; +CREATE INDEX ptif_test2_index ON ptif_test2 (a); +ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test2_index; + +-- List all tables members of the tree +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test'); +-- List tables from an intermediate level +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test0') p + JOIN pg_class c ON (p.relid = c.oid); +-- List from leaf table +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test01') p + JOIN pg_class c ON (p.relid = c.oid); + +-- List all indexes members of the tree +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test_index'); +-- List indexes from an intermediate level +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test0_index') p + JOIN pg_class c ON (p.relid = c.oid); +-- List from leaf index +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test01_index') p + JOIN pg_class c ON (p.relid = c.oid); + +DROP TABLE ptif_test; + +-- A table not part of a partition tree works is the only member listed. +CREATE TABLE ptif_normal_table(a int); +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_normal_table'); +DROP TABLE ptif_normal_table; + +-- Views and materialized viewS cannot be part of a partition tree. +CREATE VIEW ptif_test_view AS SELECT 1; +CREATE MATERIALIZED VIEW ptif_test_matview AS SELECT 1; +SELECT * FROM pg_partition_tree('ptif_test_view'); +SELECT * FROM pg_partition_tree('ptif_test_matview'); +DROP VIEW ptif_test_view; +DROP MATERIALIZED VIEW ptif_test_matview; diff --git a/src/test/regress/sql/partition_join.sql b/src/test/regress/sql/partition_join.sql new file mode 100644 index 0000000000..c1c9859651 --- /dev/null +++ b/src/test/regress/sql/partition_join.sql @@ -0,0 +1,434 @@ +-- +-- PARTITION_JOIN +-- Test partitionwise join between partitioned tables +-- + +-- Enable partitionwise join, which by default is disabled. +SET enable_partitionwise_join to true; + +-- +-- partitioned by a single column +-- +CREATE TABLE prt1 (a int, b int, c varchar) PARTITION BY RANGE(a); +CREATE TABLE prt1_p1 PARTITION OF prt1 FOR VALUES FROM (0) TO (250); +CREATE TABLE prt1_p3 PARTITION OF prt1 FOR VALUES FROM (500) TO (600); +CREATE TABLE prt1_p2 PARTITION OF prt1 FOR VALUES FROM (250) TO (500); +INSERT INTO prt1 SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(0, 599) i WHERE i % 2 = 0; +CREATE INDEX iprt1_p1_a on prt1_p1(a); +CREATE INDEX iprt1_p2_a on prt1_p2(a); +CREATE INDEX iprt1_p3_a on prt1_p3(a); +ANALYZE prt1; + +CREATE TABLE prt2 (a int, b int, c varchar) PARTITION BY RANGE(b); +CREATE TABLE prt2_p1 PARTITION OF prt2 FOR VALUES FROM (0) TO (250); +CREATE TABLE prt2_p2 PARTITION OF prt2 FOR VALUES FROM (250) TO (500); +CREATE TABLE prt2_p3 PARTITION OF prt2 FOR VALUES FROM (500) TO (600); +INSERT INTO prt2 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(0, 599) i WHERE i % 3 = 0; +CREATE INDEX iprt2_p1_b on prt2_p1(b); +CREATE INDEX iprt2_p2_b on prt2_p2(b); +CREATE INDEX iprt2_p3_b on prt2_p3(b); +ANALYZE prt2; + +-- inner join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + +-- left outer join, with whole-row reference; partitionwise join does not apply +EXPLAIN (COSTS OFF) +SELECT t1, t2 FROM prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; +SELECT t1, t2 FROM prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + +-- right outer join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1 RIGHT JOIN prt2 t2 ON t1.a = t2.b WHERE t2.a = 0 ORDER BY t1.a, t2.b; +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1 RIGHT JOIN prt2 t2 ON t1.a = t2.b WHERE t2.a = 0 ORDER BY t1.a, t2.b; + +-- full outer join, with placeholder vars +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; + +-- Join with pruned partitions from joining relations +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.a < 450 AND t2.b > 250 AND t1.b = 0 ORDER BY t1.a, t2.b; +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.a < 450 AND t2.b > 250 AND t1.b = 0 ORDER BY t1.a, t2.b; + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 FULL JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 OR t2.a = 0 ORDER BY t1.a, t2.b; +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 FULL JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 OR t2.a = 0 ORDER BY t1.a, t2.b; + +-- Semi-join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t2.b FROM prt2 t2 WHERE t2.a = 0) AND t1.b = 0 ORDER BY t1.a; +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t2.b FROM prt2 t2 WHERE t2.a = 0) AND t1.b = 0 ORDER BY t1.a; + +-- Anti-join with aggregates +EXPLAIN (COSTS OFF) +SELECT sum(t1.a), avg(t1.a), sum(t1.b), avg(t1.b) FROM prt1 t1 WHERE NOT EXISTS (SELECT 1 FROM prt2 t2 WHERE t1.a = t2.b); +SELECT sum(t1.a), avg(t1.a), sum(t1.b), avg(t1.b) FROM prt1 t1 WHERE NOT EXISTS (SELECT 1 FROM prt2 t2 WHERE t1.a = t2.b); + +-- lateral reference +EXPLAIN (COSTS OFF) +SELECT * FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t3.a AS t3a, least(t1.a,t2.a,t3.b) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss + ON t1.a = ss.t2a WHERE t1.b = 0 ORDER BY t1.a; +SELECT * FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t3.a AS t3a, least(t1.a,t2.a,t3.b) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss + ON t1.a = ss.t2a WHERE t1.b = 0 ORDER BY t1.a; + +EXPLAIN (COSTS OFF) +SELECT t1.a, ss.t2a, ss.t2c FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t3.a AS t3a, t2.b t2b, t2.c t2c, least(t1.a,t2.a,t3.b) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss + ON t1.c = ss.t2c WHERE (t1.b + coalesce(ss.t2b, 0)) = 0 ORDER BY t1.a; +SELECT t1.a, ss.t2a, ss.t2c FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t3.a AS t3a, t2.b t2b, t2.c t2c, least(t1.a,t2.a,t3.a) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss + ON t1.c = ss.t2c WHERE (t1.b + coalesce(ss.t2b, 0)) = 0 ORDER BY t1.a; + +-- +-- partitioned by expression +-- +CREATE TABLE prt1_e (a int, b int, c int) PARTITION BY RANGE(((a + b)/2)); +CREATE TABLE prt1_e_p1 PARTITION OF prt1_e FOR VALUES FROM (0) TO (250); +CREATE TABLE prt1_e_p2 PARTITION OF prt1_e FOR VALUES FROM (250) TO (500); +CREATE TABLE prt1_e_p3 PARTITION OF prt1_e FOR VALUES FROM (500) TO (600); +INSERT INTO prt1_e SELECT i, i, i % 25 FROM generate_series(0, 599, 2) i; +CREATE INDEX iprt1_e_p1_ab2 on prt1_e_p1(((a+b)/2)); +CREATE INDEX iprt1_e_p2_ab2 on prt1_e_p2(((a+b)/2)); +CREATE INDEX iprt1_e_p3_ab2 on prt1_e_p3(((a+b)/2)); +ANALYZE prt1_e; + +CREATE TABLE prt2_e (a int, b int, c int) PARTITION BY RANGE(((b + a)/2)); +CREATE TABLE prt2_e_p1 PARTITION OF prt2_e FOR VALUES FROM (0) TO (250); +CREATE TABLE prt2_e_p2 PARTITION OF prt2_e FOR VALUES FROM (250) TO (500); +CREATE TABLE prt2_e_p3 PARTITION OF prt2_e FOR VALUES FROM (500) TO (600); +INSERT INTO prt2_e SELECT i, i, i % 25 FROM generate_series(0, 599, 3) i; +ANALYZE prt2_e; + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_e t1, prt2_e t2 WHERE (t1.a + t1.b)/2 = (t2.b + t2.a)/2 AND t1.c = 0 ORDER BY t1.a, t2.b; +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_e t1, prt2_e t2 WHERE (t1.a + t1.b)/2 = (t2.b + t2.a)/2 AND t1.c = 0 ORDER BY t1.a, t2.b; + +-- +-- N-way join +-- +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM prt1 t1, prt2 t2, prt1_e t3 WHERE t1.a = t2.b AND t1.a = (t3.a + t3.b)/2 AND t1.b = 0 ORDER BY t1.a, t2.b; +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM prt1 t1, prt2 t2, prt1_e t3 WHERE t1.a = t2.b AND t1.a = (t3.a + t3.b)/2 AND t1.b = 0 ORDER BY t1.a, t2.b; + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) LEFT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) LEFT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + +-- Cases with non-nullable expressions in subquery results; +-- make sure these go to null as expected +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.phv, t2.b, t2.phv, t3.a + t3.b, t3.phv FROM ((SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b)) FULL JOIN (SELECT 50 phv, * FROM prt1_e WHERE prt1_e.c = 0) t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.a = t1.phv OR t2.b = t2.phv OR (t3.a + t3.b)/2 = t3.phv ORDER BY t1.a, t2.b, t3.a + t3.b; +SELECT t1.a, t1.phv, t2.b, t2.phv, t3.a + t3.b, t3.phv FROM ((SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b)) FULL JOIN (SELECT 50 phv, * FROM prt1_e WHERE prt1_e.c = 0) t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.a = t1.phv OR t2.b = t2.phv OR (t3.a + t3.b)/2 = t3.phv ORDER BY t1.a, t2.b, t3.a + t3.b; + +-- Semi-join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHERE t1.a = 0 AND t1.b = (t2.a + t2.b)/2) AND t1.b = 0 ORDER BY t1.a; +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHERE t1.a = 0 AND t1.b = (t2.a + t2.b)/2) AND t1.b = 0 ORDER BY t1.a; + +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; + +-- test merge joins +SET enable_hashjoin TO off; +SET enable_nestloop TO off; + +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + +-- MergeAppend on nullable column +EXPLAIN (COSTS OFF) +SELECT t1.a, t2.b FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; +SELECT t1.a, t2.b FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + +-- merge join when expression with whole-row reference needs to be sorted; +-- partitionwise join does not apply +EXPLAIN (COSTS OFF) +SELECT t1.a, t2.b FROM prt1 t1, prt2 t2 WHERE t1::text = t2::text AND t1.a = t2.b ORDER BY t1.a; +SELECT t1.a, t2.b FROM prt1 t1, prt2 t2 WHERE t1::text = t2::text AND t1.a = t2.b ORDER BY t1.a; + +RESET enable_hashjoin; +RESET enable_nestloop; + +-- +-- partitioned by multiple columns +-- +CREATE TABLE prt1_m (a int, b int, c int) PARTITION BY RANGE(a, ((a + b)/2)); +CREATE TABLE prt1_m_p1 PARTITION OF prt1_m FOR VALUES FROM (0, 0) TO (250, 250); +CREATE TABLE prt1_m_p2 PARTITION OF prt1_m FOR VALUES FROM (250, 250) TO (500, 500); +CREATE TABLE prt1_m_p3 PARTITION OF prt1_m FOR VALUES FROM (500, 500) TO (600, 600); +INSERT INTO prt1_m SELECT i, i, i % 25 FROM generate_series(0, 599, 2) i; +ANALYZE prt1_m; + +CREATE TABLE prt2_m (a int, b int, c int) PARTITION BY RANGE(((b + a)/2), b); +CREATE TABLE prt2_m_p1 PARTITION OF prt2_m FOR VALUES FROM (0, 0) TO (250, 250); +CREATE TABLE prt2_m_p2 PARTITION OF prt2_m FOR VALUES FROM (250, 250) TO (500, 500); +CREATE TABLE prt2_m_p3 PARTITION OF prt2_m FOR VALUES FROM (500, 500) TO (600, 600); +INSERT INTO prt2_m SELECT i, i, i % 25 FROM generate_series(0, 599, 3) i; +ANALYZE prt2_m; + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_m WHERE prt1_m.c = 0) t1 FULL JOIN (SELECT * FROM prt2_m WHERE prt2_m.c = 0) t2 ON (t1.a = (t2.b + t2.a)/2 AND t2.b = (t1.a + t1.b)/2) ORDER BY t1.a, t2.b; +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_m WHERE prt1_m.c = 0) t1 FULL JOIN (SELECT * FROM prt2_m WHERE prt2_m.c = 0) t2 ON (t1.a = (t2.b + t2.a)/2 AND t2.b = (t1.a + t1.b)/2) ORDER BY t1.a, t2.b; + +-- +-- tests for list partitioned tables. +-- +CREATE TABLE plt1 (a int, b int, c text) PARTITION BY LIST(c); +CREATE TABLE plt1_p1 PARTITION OF plt1 FOR VALUES IN ('0000', '0003', '0004', '0010'); +CREATE TABLE plt1_p2 PARTITION OF plt1 FOR VALUES IN ('0001', '0005', '0002', '0009'); +CREATE TABLE plt1_p3 PARTITION OF plt1 FOR VALUES IN ('0006', '0007', '0008', '0011'); +INSERT INTO plt1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE plt1; + +CREATE TABLE plt2 (a int, b int, c text) PARTITION BY LIST(c); +CREATE TABLE plt2_p1 PARTITION OF plt2 FOR VALUES IN ('0000', '0003', '0004', '0010'); +CREATE TABLE plt2_p2 PARTITION OF plt2 FOR VALUES IN ('0001', '0005', '0002', '0009'); +CREATE TABLE plt2_p3 PARTITION OF plt2 FOR VALUES IN ('0006', '0007', '0008', '0011'); +INSERT INTO plt2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 3) i; +ANALYZE plt2; + +-- +-- list partitioned by expression +-- +CREATE TABLE plt1_e (a int, b int, c text) PARTITION BY LIST(ltrim(c, 'A')); +CREATE TABLE plt1_e_p1 PARTITION OF plt1_e FOR VALUES IN ('0000', '0003', '0004', '0010'); +CREATE TABLE plt1_e_p2 PARTITION OF plt1_e FOR VALUES IN ('0001', '0005', '0002', '0009'); +CREATE TABLE plt1_e_p3 PARTITION OF plt1_e FOR VALUES IN ('0006', '0007', '0008', '0011'); +INSERT INTO plt1_e SELECT i, i, 'A' || to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE plt1_e; + +-- test partition matching with N-way join +EXPLAIN (COSTS OFF) +SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM plt1 t1, plt2 t2, plt1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; +SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM plt1 t1, plt2 t2, plt1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; + +-- joins where one of the relations is proven empty +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.a = 1 AND t1.a = 2; + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a = 1 AND a = 2) t1 LEFT JOIN prt2 t2 ON t1.a = t2.b; + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a = 1 AND a = 2) t1 RIGHT JOIN prt2 t2 ON t1.a = t2.b, prt1 t3 WHERE t2.b = t3.a; + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a = 1 AND a = 2) t1 FULL JOIN prt2 t2 ON t1.a = t2.b WHERE t2.a = 0 ORDER BY t1.a, t2.b; + +-- +-- tests for hash partitioned tables. +-- +CREATE TABLE pht1 (a int, b int, c text) PARTITION BY HASH(c); +CREATE TABLE pht1_p1 PARTITION OF pht1 FOR VALUES WITH (MODULUS 3, REMAINDER 0); +CREATE TABLE pht1_p2 PARTITION OF pht1 FOR VALUES WITH (MODULUS 3, REMAINDER 1); +CREATE TABLE pht1_p3 PARTITION OF pht1 FOR VALUES WITH (MODULUS 3, REMAINDER 2); +INSERT INTO pht1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE pht1; + +CREATE TABLE pht2 (a int, b int, c text) PARTITION BY HASH(c); +CREATE TABLE pht2_p1 PARTITION OF pht2 FOR VALUES WITH (MODULUS 3, REMAINDER 0); +CREATE TABLE pht2_p2 PARTITION OF pht2 FOR VALUES WITH (MODULUS 3, REMAINDER 1); +CREATE TABLE pht2_p3 PARTITION OF pht2 FOR VALUES WITH (MODULUS 3, REMAINDER 2); +INSERT INTO pht2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 3) i; +ANALYZE pht2; + +-- +-- hash partitioned by expression +-- +CREATE TABLE pht1_e (a int, b int, c text) PARTITION BY HASH(ltrim(c, 'A')); +CREATE TABLE pht1_e_p1 PARTITION OF pht1_e FOR VALUES WITH (MODULUS 3, REMAINDER 0); +CREATE TABLE pht1_e_p2 PARTITION OF pht1_e FOR VALUES WITH (MODULUS 3, REMAINDER 1); +CREATE TABLE pht1_e_p3 PARTITION OF pht1_e FOR VALUES WITH (MODULUS 3, REMAINDER 2); +INSERT INTO pht1_e SELECT i, i, 'A' || to_char(i/50, 'FM0000') FROM generate_series(0, 299, 2) i; +ANALYZE pht1_e; + +-- test partition matching with N-way join +EXPLAIN (COSTS OFF) +SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM pht1 t1, pht2 t2, pht1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; +SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM pht1 t1, pht2 t2, pht1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; + +-- test default partition behavior for range +ALTER TABLE prt1 DETACH PARTITION prt1_p3; +ALTER TABLE prt1 ATTACH PARTITION prt1_p3 DEFAULT; +ANALYZE prt1; +ALTER TABLE prt2 DETACH PARTITION prt2_p3; +ALTER TABLE prt2 ATTACH PARTITION prt2_p3 DEFAULT; +ANALYZE prt2; + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + +-- test default partition behavior for list +ALTER TABLE plt1 DETACH PARTITION plt1_p3; +ALTER TABLE plt1 ATTACH PARTITION plt1_p3 DEFAULT; +ANALYZE plt1; +ALTER TABLE plt2 DETACH PARTITION plt2_p3; +ALTER TABLE plt2 ATTACH PARTITION plt2_p3 DEFAULT; +ANALYZE plt2; + +EXPLAIN (COSTS OFF) +SELECT avg(t1.a), avg(t2.b), t1.c, t2.c FROM plt1 t1 RIGHT JOIN plt2 t2 ON t1.c = t2.c WHERE t1.a % 25 = 0 GROUP BY t1.c, t2.c ORDER BY t1.c, t2.c; +-- +-- multiple levels of partitioning +-- +CREATE TABLE prt1_l (a int, b int, c varchar) PARTITION BY RANGE(a); +CREATE TABLE prt1_l_p1 PARTITION OF prt1_l FOR VALUES FROM (0) TO (250); +CREATE TABLE prt1_l_p2 PARTITION OF prt1_l FOR VALUES FROM (250) TO (500) PARTITION BY LIST (c); +CREATE TABLE prt1_l_p2_p1 PARTITION OF prt1_l_p2 FOR VALUES IN ('0000', '0001'); +CREATE TABLE prt1_l_p2_p2 PARTITION OF prt1_l_p2 FOR VALUES IN ('0002', '0003'); +CREATE TABLE prt1_l_p3 PARTITION OF prt1_l FOR VALUES FROM (500) TO (600) PARTITION BY RANGE (b); +CREATE TABLE prt1_l_p3_p1 PARTITION OF prt1_l_p3 FOR VALUES FROM (0) TO (13); +CREATE TABLE prt1_l_p3_p2 PARTITION OF prt1_l_p3 FOR VALUES FROM (13) TO (25); +INSERT INTO prt1_l SELECT i, i % 25, to_char(i % 4, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE prt1_l; + +CREATE TABLE prt2_l (a int, b int, c varchar) PARTITION BY RANGE(b); +CREATE TABLE prt2_l_p1 PARTITION OF prt2_l FOR VALUES FROM (0) TO (250); +CREATE TABLE prt2_l_p2 PARTITION OF prt2_l FOR VALUES FROM (250) TO (500) PARTITION BY LIST (c); +CREATE TABLE prt2_l_p2_p1 PARTITION OF prt2_l_p2 FOR VALUES IN ('0000', '0001'); +CREATE TABLE prt2_l_p2_p2 PARTITION OF prt2_l_p2 FOR VALUES IN ('0002', '0003'); +CREATE TABLE prt2_l_p3 PARTITION OF prt2_l FOR VALUES FROM (500) TO (600) PARTITION BY RANGE (a); +CREATE TABLE prt2_l_p3_p1 PARTITION OF prt2_l_p3 FOR VALUES FROM (0) TO (13); +CREATE TABLE prt2_l_p3_p2 PARTITION OF prt2_l_p3 FOR VALUES FROM (13) TO (25); +INSERT INTO prt2_l SELECT i % 25, i, to_char(i % 4, 'FM0000') FROM generate_series(0, 599, 3) i; +ANALYZE prt2_l; + +-- inner join, qual covering only top-level partitions +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1, prt2_l t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1, prt2_l t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + +-- left join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 LEFT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t1.b = 0 ORDER BY t1.a, t2.b; +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 LEFT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t1.b = 0 ORDER BY t1.a, t2.b; + +-- right join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t2.a = 0 ORDER BY t1.a, t2.b; +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t2.a = 0 ORDER BY t1.a, t2.b; + +-- full join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE prt1_l.b = 0) t1 FULL JOIN (SELECT * FROM prt2_l WHERE prt2_l.a = 0) t2 ON (t1.a = t2.b AND t1.c = t2.c) ORDER BY t1.a, t2.b; +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE prt1_l.b = 0) t1 FULL JOIN (SELECT * FROM prt2_l WHERE prt2_l.a = 0) t2 ON (t1.a = t2.b AND t1.c = t2.c) ORDER BY t1.a, t2.b; + +-- lateral partitionwise join +EXPLAIN (COSTS OFF) +SELECT * FROM prt1_l t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t2.c AS t2c, t2.b AS t2b, t3.b AS t3b, least(t1.a,t2.a,t3.b) FROM prt1_l t2 JOIN prt2_l t3 ON (t2.a = t3.b AND t2.c = t3.c)) ss + ON t1.a = ss.t2a AND t1.c = ss.t2c WHERE t1.b = 0 ORDER BY t1.a; +SELECT * FROM prt1_l t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t2.c AS t2c, t2.b AS t2b, t3.b AS t3b, least(t1.a,t2.a,t3.b) FROM prt1_l t2 JOIN prt2_l t3 ON (t2.a = t3.b AND t2.c = t3.c)) ss + ON t1.a = ss.t2a AND t1.c = ss.t2c WHERE t1.b = 0 ORDER BY t1.a; + +-- join with one side empty +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE a = 1 AND a = 2) t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.b = t2.a AND t1.c = t2.c; + +-- Test case to verify proper handling of subqueries in a partitioned delete. +-- The weird-looking lateral join is just there to force creation of a +-- nestloop parameter within the subquery, which exposes the problem if the +-- planner fails to make multiple copies of the subquery as appropriate. +EXPLAIN (COSTS OFF) +DELETE FROM prt1_l +WHERE EXISTS ( + SELECT 1 + FROM int4_tbl, + LATERAL (SELECT int4_tbl.f1 FROM int8_tbl LIMIT 2) ss + WHERE prt1_l.c IS NULL); + +-- +-- negative testcases +-- +CREATE TABLE prt1_n (a int, b int, c varchar) PARTITION BY RANGE(c); +CREATE TABLE prt1_n_p1 PARTITION OF prt1_n FOR VALUES FROM ('0000') TO ('0250'); +CREATE TABLE prt1_n_p2 PARTITION OF prt1_n FOR VALUES FROM ('0250') TO ('0500'); +INSERT INTO prt1_n SELECT i, i, to_char(i, 'FM0000') FROM generate_series(0, 499, 2) i; +ANALYZE prt1_n; + +CREATE TABLE prt2_n (a int, b int, c text) PARTITION BY LIST(c); +CREATE TABLE prt2_n_p1 PARTITION OF prt2_n FOR VALUES IN ('0000', '0003', '0004', '0010', '0006', '0007'); +CREATE TABLE prt2_n_p2 PARTITION OF prt2_n FOR VALUES IN ('0001', '0005', '0002', '0009', '0008', '0011'); +INSERT INTO prt2_n SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE prt2_n; + +CREATE TABLE prt3_n (a int, b int, c text) PARTITION BY LIST(c); +CREATE TABLE prt3_n_p1 PARTITION OF prt3_n FOR VALUES IN ('0000', '0004', '0006', '0007'); +CREATE TABLE prt3_n_p2 PARTITION OF prt3_n FOR VALUES IN ('0001', '0002', '0008', '0010'); +CREATE TABLE prt3_n_p3 PARTITION OF prt3_n FOR VALUES IN ('0003', '0005', '0009', '0011'); +INSERT INTO prt2_n SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE prt3_n; + +CREATE TABLE prt4_n (a int, b int, c text) PARTITION BY RANGE(a); +CREATE TABLE prt4_n_p1 PARTITION OF prt4_n FOR VALUES FROM (0) TO (300); +CREATE TABLE prt4_n_p2 PARTITION OF prt4_n FOR VALUES FROM (300) TO (500); +CREATE TABLE prt4_n_p3 PARTITION OF prt4_n FOR VALUES FROM (500) TO (600); +INSERT INTO prt4_n SELECT i, i, to_char(i, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE prt4_n; + +-- partitionwise join can not be applied if the partition ranges differ +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt4_n t2 WHERE t1.a = t2.a; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt4_n t2, prt2 t3 WHERE t1.a = t2.a and t1.a = t3.b; + +-- partitionwise join can not be applied if there are no equi-join conditions +-- between partition keys +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1 LEFT JOIN prt2 t2 ON (t1.a < t2.b); + +-- equi-join with join condition on partial keys does not qualify for +-- partitionwise join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1, prt2_m t2 WHERE t1.a = (t2.b + t2.a)/2; + +-- equi-join between out-of-order partition key columns does not qualify for +-- partitionwise join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1 LEFT JOIN prt2_m t2 ON t1.a = t2.b; + +-- equi-join between non-key columns does not qualify for partitionwise join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1 LEFT JOIN prt2_m t2 ON t1.c = t2.c; + +-- partitionwise join can not be applied between tables with different +-- partition lists +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 LEFT JOIN prt2_n t2 ON (t1.c = t2.c); +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 JOIN prt2_n t2 ON (t1.c = t2.c) JOIN plt1 t3 ON (t1.c = t3.c); + +-- partitionwise join can not be applied for a join between list and range +-- partitioned table +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 FULL JOIN prt1 t2 ON (t1.c = t2.c); + +-- partitionwise join can not be applied if only one of joining table has +-- default partition +ALTER TABLE prt2 DETACH PARTITION prt2_p3; +ALTER TABLE prt2 ATTACH PARTITION prt2_p3 FOR VALUES FROM (500) TO (600); +ANALYZE prt2; + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; diff --git a/src/test/regress/sql/partition_prune.sql b/src/test/regress/sql/partition_prune.sql new file mode 100644 index 0000000000..eca1a7c5ac --- /dev/null +++ b/src/test/regress/sql/partition_prune.sql @@ -0,0 +1,960 @@ +-- +-- Test partitioning planner code +-- +create table lp (a char) partition by list (a); +create table lp_default partition of lp default; +create table lp_ef partition of lp for values in ('e', 'f'); +create table lp_ad partition of lp for values in ('a', 'd'); +create table lp_bc partition of lp for values in ('b', 'c'); +create table lp_g partition of lp for values in ('g'); +create table lp_null partition of lp for values in (null); +explain (costs off) select * from lp; +explain (costs off) select * from lp where a > 'a' and a < 'd'; +explain (costs off) select * from lp where a > 'a' and a <= 'd'; +explain (costs off) select * from lp where a = 'a'; +explain (costs off) select * from lp where 'a' = a; /* commuted */ +explain (costs off) select * from lp where a is not null; +explain (costs off) select * from lp where a is null; +explain (costs off) select * from lp where a = 'a' or a = 'c'; +explain (costs off) select * from lp where a is not null and (a = 'a' or a = 'c'); +explain (costs off) select * from lp where a <> 'g'; +explain (costs off) select * from lp where a <> 'a' and a <> 'd'; +explain (costs off) select * from lp where a not in ('a', 'd'); + +-- collation matches the partitioning collation, pruning works +create table coll_pruning (a text collate "C") partition by list (a); +create table coll_pruning_a partition of coll_pruning for values in ('a'); +create table coll_pruning_b partition of coll_pruning for values in ('b'); +create table coll_pruning_def partition of coll_pruning default; +explain (costs off) select * from coll_pruning where a collate "C" = 'a' collate "C"; +-- collation doesn't match the partitioning collation, no pruning occurs +explain (costs off) select * from coll_pruning where a collate "POSIX" = 'a' collate "POSIX"; + +create table rlp (a int, b varchar) partition by range (a); +create table rlp_default partition of rlp default partition by list (a); +create table rlp_default_default partition of rlp_default default; +create table rlp_default_10 partition of rlp_default for values in (10); +create table rlp_default_30 partition of rlp_default for values in (30); +create table rlp_default_null partition of rlp_default for values in (null); +create table rlp1 partition of rlp for values from (minvalue) to (1); +create table rlp2 partition of rlp for values from (1) to (10); + +create table rlp3 (b varchar, a int) partition by list (b varchar_ops); +create table rlp3_default partition of rlp3 default; +create table rlp3abcd partition of rlp3 for values in ('ab', 'cd'); +create table rlp3efgh partition of rlp3 for values in ('ef', 'gh'); +create table rlp3nullxy partition of rlp3 for values in (null, 'xy'); +alter table rlp attach partition rlp3 for values from (15) to (20); + +create table rlp4 partition of rlp for values from (20) to (30) partition by range (a); +create table rlp4_default partition of rlp4 default; +create table rlp4_1 partition of rlp4 for values from (20) to (25); +create table rlp4_2 partition of rlp4 for values from (25) to (29); + +create table rlp5 partition of rlp for values from (31) to (maxvalue) partition by range (a); +create table rlp5_default partition of rlp5 default; +create table rlp5_1 partition of rlp5 for values from (31) to (40); + +explain (costs off) select * from rlp where a < 1; +explain (costs off) select * from rlp where 1 > a; /* commuted */ +explain (costs off) select * from rlp where a <= 1; +explain (costs off) select * from rlp where a = 1; +explain (costs off) select * from rlp where a = 1::bigint; /* same as above */ +explain (costs off) select * from rlp where a = 1::numeric; /* no pruning */ +explain (costs off) select * from rlp where a <= 10; +explain (costs off) select * from rlp where a > 10; +explain (costs off) select * from rlp where a < 15; +explain (costs off) select * from rlp where a <= 15; +explain (costs off) select * from rlp where a > 15 and b = 'ab'; +explain (costs off) select * from rlp where a = 16; +explain (costs off) select * from rlp where a = 16 and b in ('not', 'in', 'here'); +explain (costs off) select * from rlp where a = 16 and b < 'ab'; +explain (costs off) select * from rlp where a = 16 and b <= 'ab'; +explain (costs off) select * from rlp where a = 16 and b is null; +explain (costs off) select * from rlp where a = 16 and b is not null; +explain (costs off) select * from rlp where a is null; +explain (costs off) select * from rlp where a is not null; +explain (costs off) select * from rlp where a > 30; +explain (costs off) select * from rlp where a = 30; /* only default is scanned */ +explain (costs off) select * from rlp where a <= 31; +explain (costs off) select * from rlp where a = 1 or a = 7; +explain (costs off) select * from rlp where a = 1 or b = 'ab'; + +explain (costs off) select * from rlp where a > 20 and a < 27; +explain (costs off) select * from rlp where a = 29; +explain (costs off) select * from rlp where a >= 29; + +-- redundant clauses are eliminated +explain (costs off) select * from rlp where a > 1 and a = 10; /* only default */ +explain (costs off) select * from rlp where a > 1 and a >=15; /* rlp3 onwards, including default */ +explain (costs off) select * from rlp where a = 1 and a = 3; /* empty */ +explain (costs off) select * from rlp where (a = 1 and a = 3) or (a > 1 and a = 15); + +-- multi-column keys +create table mc3p (a int, b int, c int) partition by range (a, abs(b), c); +create table mc3p_default partition of mc3p default; +create table mc3p0 partition of mc3p for values from (minvalue, minvalue, minvalue) to (1, 1, 1); +create table mc3p1 partition of mc3p for values from (1, 1, 1) to (10, 5, 10); +create table mc3p2 partition of mc3p for values from (10, 5, 10) to (10, 10, 10); +create table mc3p3 partition of mc3p for values from (10, 10, 10) to (10, 10, 20); +create table mc3p4 partition of mc3p for values from (10, 10, 20) to (10, maxvalue, maxvalue); +create table mc3p5 partition of mc3p for values from (11, 1, 1) to (20, 10, 10); +create table mc3p6 partition of mc3p for values from (20, 10, 10) to (20, 20, 20); +create table mc3p7 partition of mc3p for values from (20, 20, 20) to (maxvalue, maxvalue, maxvalue); + +explain (costs off) select * from mc3p where a = 1; +explain (costs off) select * from mc3p where a = 1 and abs(b) < 1; +explain (costs off) select * from mc3p where a = 1 and abs(b) = 1; +explain (costs off) select * from mc3p where a = 1 and abs(b) = 1 and c < 8; +explain (costs off) select * from mc3p where a = 10 and abs(b) between 5 and 35; +explain (costs off) select * from mc3p where a > 10; +explain (costs off) select * from mc3p where a >= 10; +explain (costs off) select * from mc3p where a < 10; +explain (costs off) select * from mc3p where a <= 10 and abs(b) < 10; +explain (costs off) select * from mc3p where a = 11 and abs(b) = 0; +explain (costs off) select * from mc3p where a = 20 and abs(b) = 10 and c = 100; +explain (costs off) select * from mc3p where a > 20; +explain (costs off) select * from mc3p where a >= 20; +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1 and c = 1) or (a = 10 and abs(b) = 5 and c = 10) or (a > 11 and a < 20); +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1 and c = 1) or (a = 10 and abs(b) = 5 and c = 10) or (a > 11 and a < 20) or a < 1; +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1 and c = 1) or (a = 10 and abs(b) = 5 and c = 10) or (a > 11 and a < 20) or a < 1 or a = 1; +explain (costs off) select * from mc3p where a = 1 or abs(b) = 1 or c = 1; +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1) or (a = 10 and abs(b) = 10); +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1) or (a = 10 and abs(b) = 9); + +-- a simpler multi-column keys case +create table mc2p (a int, b int) partition by range (a, b); +create table mc2p_default partition of mc2p default; +create table mc2p0 partition of mc2p for values from (minvalue, minvalue) to (1, minvalue); +create table mc2p1 partition of mc2p for values from (1, minvalue) to (1, 1); +create table mc2p2 partition of mc2p for values from (1, 1) to (2, minvalue); +create table mc2p3 partition of mc2p for values from (2, minvalue) to (2, 1); +create table mc2p4 partition of mc2p for values from (2, 1) to (2, maxvalue); +create table mc2p5 partition of mc2p for values from (2, maxvalue) to (maxvalue, maxvalue); + +explain (costs off) select * from mc2p where a < 2; +explain (costs off) select * from mc2p where a = 2 and b < 1; +explain (costs off) select * from mc2p where a > 1; +explain (costs off) select * from mc2p where a = 1 and b > 1; + +-- all partitions but the default one should be pruned +explain (costs off) select * from mc2p where a = 1 and b is null; +explain (costs off) select * from mc2p where a is null and b is null; +explain (costs off) select * from mc2p where a is null and b = 1; +explain (costs off) select * from mc2p where a is null; +explain (costs off) select * from mc2p where b is null; + +-- boolean partitioning +create table boolpart (a bool) partition by list (a); +create table boolpart_default partition of boolpart default; +create table boolpart_t partition of boolpart for values in ('true'); +create table boolpart_f partition of boolpart for values in ('false'); + +explain (costs off) select * from boolpart where a in (true, false); +explain (costs off) select * from boolpart where a = false; +explain (costs off) select * from boolpart where not a = false; +explain (costs off) select * from boolpart where a is true or a is not true; +explain (costs off) select * from boolpart where a is not true; +explain (costs off) select * from boolpart where a is not true and a is not false; +explain (costs off) select * from boolpart where a is unknown; +explain (costs off) select * from boolpart where a is not unknown; + +-- test scalar-to-array operators +create table coercepart (a varchar) partition by list (a); +create table coercepart_ab partition of coercepart for values in ('ab'); +create table coercepart_bc partition of coercepart for values in ('bc'); +create table coercepart_cd partition of coercepart for values in ('cd'); + +explain (costs off) select * from coercepart where a in ('ab', to_char(125, '999')); +explain (costs off) select * from coercepart where a ~ any ('{ab}'); +explain (costs off) select * from coercepart where a !~ all ('{ab}'); +explain (costs off) select * from coercepart where a ~ any ('{ab,bc}'); +explain (costs off) select * from coercepart where a !~ all ('{ab,bc}'); + +drop table coercepart; + +CREATE TABLE part (a INT, b INT) PARTITION BY LIST (a); +CREATE TABLE part_p1 PARTITION OF part FOR VALUES IN (-2,-1,0,1,2); +CREATE TABLE part_p2 PARTITION OF part DEFAULT PARTITION BY RANGE(a); +CREATE TABLE part_p2_p1 PARTITION OF part_p2 DEFAULT; +INSERT INTO part VALUES (-1,-1), (1,1), (2,NULL), (NULL,-2),(NULL,NULL); +EXPLAIN (COSTS OFF) SELECT tableoid::regclass as part, a, b FROM part WHERE a IS NULL ORDER BY 1, 2, 3; + +-- +-- some more cases +-- + +-- +-- pruning for partitioned table appearing inside a sub-query +-- +-- pruning won't work for mc3p, because some keys are Params +explain (costs off) select * from mc2p t1, lateral (select count(*) from mc3p t2 where t2.a = t1.b and abs(t2.b) = 1 and t2.c = 1) s where t1.a = 1; + +-- pruning should work fine, because values for a prefix of keys (a, b) are +-- available +explain (costs off) select * from mc2p t1, lateral (select count(*) from mc3p t2 where t2.c = t1.b and abs(t2.b) = 1 and t2.a = 1) s where t1.a = 1; + +-- also here, because values for all keys are provided +explain (costs off) select * from mc2p t1, lateral (select count(*) from mc3p t2 where t2.a = 1 and abs(t2.b) = 1 and t2.c = 1) s where t1.a = 1; + +-- +-- pruning with clauses containing <> operator +-- + +-- doesn't prune range partitions +create table rp (a int) partition by range (a); +create table rp0 partition of rp for values from (minvalue) to (1); +create table rp1 partition of rp for values from (1) to (2); +create table rp2 partition of rp for values from (2) to (maxvalue); + +explain (costs off) select * from rp where a <> 1; +explain (costs off) select * from rp where a <> 1 and a <> 2; + +-- null partition should be eliminated due to strict <> clause. +explain (costs off) select * from lp where a <> 'a'; + +-- ensure we detect contradictions in clauses; a can't be NULL and NOT NULL. +explain (costs off) select * from lp where a <> 'a' and a is null; +explain (costs off) select * from lp where (a <> 'a' and a <> 'd') or a is null; + +-- check that it also works for a partitioned table that's not root, +-- which in this case are partitions of rlp that are themselves +-- list-partitioned on b +explain (costs off) select * from rlp where a = 15 and b <> 'ab' and b <> 'cd' and b <> 'xy' and b is not null; + +-- +-- different collations for different keys with same expression +-- +create table coll_pruning_multi (a text) partition by range (substr(a, 1) collate "POSIX", substr(a, 1) collate "C"); +create table coll_pruning_multi1 partition of coll_pruning_multi for values from ('a', 'a') to ('a', 'e'); +create table coll_pruning_multi2 partition of coll_pruning_multi for values from ('a', 'e') to ('a', 'z'); +create table coll_pruning_multi3 partition of coll_pruning_multi for values from ('b', 'a') to ('b', 'e'); + +-- no pruning, because no value for the leading key +explain (costs off) select * from coll_pruning_multi where substr(a, 1) = 'e' collate "C"; + +-- pruning, with a value provided for the leading key +explain (costs off) select * from coll_pruning_multi where substr(a, 1) = 'a' collate "POSIX"; + +-- pruning, with values provided for both keys +explain (costs off) select * from coll_pruning_multi where substr(a, 1) = 'e' collate "C" and substr(a, 1) = 'a' collate "POSIX"; + +-- +-- LIKE operators don't prune +-- +create table like_op_noprune (a text) partition by list (a); +create table like_op_noprune1 partition of like_op_noprune for values in ('ABC'); +create table like_op_noprune2 partition of like_op_noprune for values in ('BCD'); +explain (costs off) select * from like_op_noprune where a like '%BC'; + +-- +-- tests wherein clause value requires a cross-type comparison function +-- +create table lparted_by_int2 (a smallint) partition by list (a); +create table lparted_by_int2_1 partition of lparted_by_int2 for values in (1); +create table lparted_by_int2_16384 partition of lparted_by_int2 for values in (16384); +explain (costs off) select * from lparted_by_int2 where a = 100000000000000; + +create table rparted_by_int2 (a smallint) partition by range (a); +create table rparted_by_int2_1 partition of rparted_by_int2 for values from (1) to (10); +create table rparted_by_int2_16384 partition of rparted_by_int2 for values from (10) to (16384); +-- all partitions pruned +explain (costs off) select * from rparted_by_int2 where a > 100000000000000; +create table rparted_by_int2_maxvalue partition of rparted_by_int2 for values from (16384) to (maxvalue); +-- all partitions but rparted_by_int2_maxvalue pruned +explain (costs off) select * from rparted_by_int2 where a > 100000000000000; + +drop table lp, coll_pruning, rlp, mc3p, mc2p, boolpart, rp, coll_pruning_multi, like_op_noprune, lparted_by_int2, rparted_by_int2; + +-- +-- Test Partition pruning for HASH partitioning +-- +-- Use hand-rolled hash functions and operator classes to get predictable +-- result on different matchines. See the definitions of +-- part_part_test_int4_ops and part_test_text_ops in insert.sql. +-- + +create table hp (a int, b text) partition by hash (a part_test_int4_ops, b part_test_text_ops); +create table hp0 partition of hp for values with (modulus 4, remainder 0); +create table hp3 partition of hp for values with (modulus 4, remainder 3); +create table hp1 partition of hp for values with (modulus 4, remainder 1); +create table hp2 partition of hp for values with (modulus 4, remainder 2); + +insert into hp values (null, null); +insert into hp values (1, null); +insert into hp values (1, 'xxx'); +insert into hp values (null, 'xxx'); +insert into hp values (2, 'xxx'); +insert into hp values (1, 'abcde'); +select tableoid::regclass, * from hp order by 1; + +-- partial keys won't prune, nor would non-equality conditions +explain (costs off) select * from hp where a = 1; +explain (costs off) select * from hp where b = 'xxx'; +explain (costs off) select * from hp where a is null; +explain (costs off) select * from hp where b is null; +explain (costs off) select * from hp where a < 1 and b = 'xxx'; +explain (costs off) select * from hp where a <> 1 and b = 'yyy'; +explain (costs off) select * from hp where a <> 1 and b <> 'xxx'; + +-- pruning should work if either a value or a IS NULL clause is provided for +-- each of the keys +explain (costs off) select * from hp where a is null and b is null; +explain (costs off) select * from hp where a = 1 and b is null; +explain (costs off) select * from hp where a = 1 and b = 'xxx'; +explain (costs off) select * from hp where a is null and b = 'xxx'; +explain (costs off) select * from hp where a = 2 and b = 'xxx'; +explain (costs off) select * from hp where a = 1 and b = 'abcde'; +explain (costs off) select * from hp where (a = 1 and b = 'abcde') or (a = 2 and b = 'xxx') or (a is null and b is null); + +drop table hp; + +-- +-- Test runtime partition pruning +-- +create table ab (a int not null, b int not null) partition by list (a); +create table ab_a2 partition of ab for values in(2) partition by list (b); +create table ab_a2_b1 partition of ab_a2 for values in (1); +create table ab_a2_b2 partition of ab_a2 for values in (2); +create table ab_a2_b3 partition of ab_a2 for values in (3); +create table ab_a1 partition of ab for values in(1) partition by list (b); +create table ab_a1_b1 partition of ab_a1 for values in (1); +create table ab_a1_b2 partition of ab_a1 for values in (2); +create table ab_a1_b3 partition of ab_a1 for values in (3); +create table ab_a3 partition of ab for values in(3) partition by list (b); +create table ab_a3_b1 partition of ab_a3 for values in (1); +create table ab_a3_b2 partition of ab_a3 for values in (2); +create table ab_a3_b3 partition of ab_a3 for values in (3); + +-- Disallow index only scans as concurrent transactions may stop visibility +-- bits being set causing "Heap Fetches" to be unstable in the EXPLAIN ANALYZE +-- output. +set enable_indexonlyscan = off; + +prepare ab_q1 (int, int, int) as +select * from ab where a between $1 and $2 and b <= $3; + +-- Execute query 5 times to allow choose_custom_plan +-- to start considering a generic plan. +execute ab_q1 (1, 8, 3); +execute ab_q1 (1, 8, 3); +execute ab_q1 (1, 8, 3); +execute ab_q1 (1, 8, 3); +execute ab_q1 (1, 8, 3); + +explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 2, 3); +explain (analyze, costs off, summary off, timing off) execute ab_q1 (1, 2, 3); + +deallocate ab_q1; + +-- Runtime pruning after optimizer pruning +prepare ab_q1 (int, int) as +select a from ab where a between $1 and $2 and b < 3; + +-- Execute query 5 times to allow choose_custom_plan +-- to start considering a generic plan. +execute ab_q1 (1, 8); +execute ab_q1 (1, 8); +execute ab_q1 (1, 8); +execute ab_q1 (1, 8); +execute ab_q1 (1, 8); + +explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 2); +explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 4); + +-- Ensure a mix of PARAM_EXTERN and PARAM_EXEC Params work together at +-- different levels of partitioning. +prepare ab_q2 (int, int) as +select a from ab where a between $1 and $2 and b < (select 3); + +execute ab_q2 (1, 8); +execute ab_q2 (1, 8); +execute ab_q2 (1, 8); +execute ab_q2 (1, 8); +execute ab_q2 (1, 8); + +explain (analyze, costs off, summary off, timing off) execute ab_q2 (2, 2); + +-- As above, but swap the PARAM_EXEC Param to the first partition level +prepare ab_q3 (int, int) as +select a from ab where b between $1 and $2 and a < (select 3); + +execute ab_q3 (1, 8); +execute ab_q3 (1, 8); +execute ab_q3 (1, 8); +execute ab_q3 (1, 8); +execute ab_q3 (1, 8); + +explain (analyze, costs off, summary off, timing off) execute ab_q3 (2, 2); + +-- Test a backwards Append scan +create table list_part (a int) partition by list (a); +create table list_part1 partition of list_part for values in (1); +create table list_part2 partition of list_part for values in (2); +create table list_part3 partition of list_part for values in (3); +create table list_part4 partition of list_part for values in (4); + +insert into list_part select generate_series(1,4); + +begin; + +-- Don't select an actual value out of the table as the order of the Append's +-- subnodes may not be stable. +declare cur SCROLL CURSOR for select 1 from list_part where a > (select 1) and a < (select 4); + +-- move beyond the final row +move 3 from cur; + +-- Ensure we get two rows. +fetch backward all from cur; + +commit; + +begin; + +-- Test run-time pruning using stable functions +create function list_part_fn(int) returns int as $$ begin return $1; end;$$ language plpgsql stable; + +-- Ensure pruning works using a stable function containing no Vars +explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1); + +-- Ensure pruning does not take place when the function has a Var parameter +explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(a); + +-- Ensure pruning does not take place when the expression contains a Var. +explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1) + a; + +rollback; + +drop table list_part; + +-- Parallel append + +-- Suppress the number of loops each parallel node runs for. This is because +-- more than one worker may run the same parallel node if timing conditions +-- are just right, which destabilizes the test. +create function explain_parallel_append(text) returns setof text +language plpgsql as +$$ +declare + ln text; +begin + for ln in + execute format('explain (analyze, costs off, summary off, timing off) %s', + $1) + loop + if ln like '%Parallel%' then + ln := regexp_replace(ln, 'loops=\d*', 'loops=N'); + end if; + return next ln; + end loop; +end; +$$; + +prepare ab_q4 (int, int) as +select avg(a) from ab where a between $1 and $2 and b < 4; + +-- Encourage use of parallel plans +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set min_parallel_table_scan_size = 0; +set max_parallel_workers_per_gather = 2; + +-- Execute query 5 times to allow choose_custom_plan +-- to start considering a generic plan. +execute ab_q4 (1, 8); +execute ab_q4 (1, 8); +execute ab_q4 (1, 8); +execute ab_q4 (1, 8); +execute ab_q4 (1, 8); +select explain_parallel_append('execute ab_q4 (2, 2)'); + +-- Test run-time pruning with IN lists. +prepare ab_q5 (int, int, int) as +select avg(a) from ab where a in($1,$2,$3) and b < 4; + +-- Execute query 5 times to allow choose_custom_plan +-- to start considering a generic plan. +execute ab_q5 (1, 2, 3); +execute ab_q5 (1, 2, 3); +execute ab_q5 (1, 2, 3); +execute ab_q5 (1, 2, 3); +execute ab_q5 (1, 2, 3); + +select explain_parallel_append('execute ab_q5 (1, 1, 1)'); +select explain_parallel_append('execute ab_q5 (2, 3, 3)'); + +-- Try some params whose values do not belong to any partition. +-- We'll still get a single subplan in this case, but it should not be scanned. +select explain_parallel_append('execute ab_q5 (33, 44, 55)'); + +-- Test Parallel Append with PARAM_EXEC Params +select explain_parallel_append('select count(*) from ab where (a = (select 1) or a = (select 3)) and b = 2'); + +-- Test pruning during parallel nested loop query +create table lprt_a (a int not null); +-- Insert some values we won't find in ab +insert into lprt_a select 0 from generate_series(1,100); + +-- and insert some values that we should find. +insert into lprt_a values(1),(1); + +analyze lprt_a; + +create index ab_a2_b1_a_idx on ab_a2_b1 (a); +create index ab_a2_b2_a_idx on ab_a2_b2 (a); +create index ab_a2_b3_a_idx on ab_a2_b3 (a); +create index ab_a1_b1_a_idx on ab_a1_b1 (a); +create index ab_a1_b2_a_idx on ab_a1_b2 (a); +create index ab_a1_b3_a_idx on ab_a1_b3 (a); +create index ab_a3_b1_a_idx on ab_a3_b1 (a); +create index ab_a3_b2_a_idx on ab_a3_b2 (a); +create index ab_a3_b3_a_idx on ab_a3_b3 (a); + +set enable_hashjoin = 0; +set enable_mergejoin = 0; + +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(0, 0, 1)'); + +-- Ensure the same partitions are pruned when we make the nested loop +-- parameter an Expr rather than a plain Param. +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a + 0 where a.a in(0, 0, 1)'); + +insert into lprt_a values(3),(3); + +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 3)'); +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 0)'); + +delete from lprt_a where a = 1; + +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 0)'); + +reset enable_hashjoin; +reset enable_mergejoin; +reset parallel_setup_cost; +reset parallel_tuple_cost; +reset min_parallel_table_scan_size; +reset max_parallel_workers_per_gather; + +-- Test run-time partition pruning with an initplan +explain (analyze, costs off, summary off, timing off) +select * from ab where a = (select max(a) from lprt_a) and b = (select max(a)-1 from lprt_a); + +-- Test run-time partition pruning with UNION ALL parents +explain (analyze, costs off, summary off, timing off) +select * from (select * from ab where a = 1 union all select * from ab) ab where b = (select 1); + +-- A case containing a UNION ALL with a non-partitioned child. +explain (analyze, costs off, summary off, timing off) +select * from (select * from ab where a = 1 union all (values(10,5)) union all select * from ab) ab where b = (select 1); + +deallocate ab_q1; +deallocate ab_q2; +deallocate ab_q3; +deallocate ab_q4; +deallocate ab_q5; + +-- UPDATE on a partition subtree has been seen to have problems. +insert into ab values (1,2); +explain (analyze, costs off, summary off, timing off) +update ab_a1 set b = 3 from ab where ab.a = 1 and ab.a = ab_a1.a; +table ab; + +drop table ab, lprt_a; + +-- Join +create table tbl1(col1 int); +insert into tbl1 values (501), (505); + +-- Basic table +create table tprt (col1 int) partition by range (col1); +create table tprt_1 partition of tprt for values from (1) to (501); +create table tprt_2 partition of tprt for values from (501) to (1001); +create table tprt_3 partition of tprt for values from (1001) to (2001); +create table tprt_4 partition of tprt for values from (2001) to (3001); +create table tprt_5 partition of tprt for values from (3001) to (4001); +create table tprt_6 partition of tprt for values from (4001) to (5001); + +create index tprt1_idx on tprt_1 (col1); +create index tprt2_idx on tprt_2 (col1); +create index tprt3_idx on tprt_3 (col1); +create index tprt4_idx on tprt_4 (col1); +create index tprt5_idx on tprt_5 (col1); +create index tprt6_idx on tprt_6 (col1); + +insert into tprt values (10), (20), (501), (502), (505), (1001), (4500); + +set enable_hashjoin = off; +set enable_mergejoin = off; + +explain (analyze, costs off, summary off, timing off) +select * from tbl1 join tprt on tbl1.col1 > tprt.col1; + +explain (analyze, costs off, summary off, timing off) +select * from tbl1 join tprt on tbl1.col1 = tprt.col1; + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 > tprt.col1 +order by tbl1.col1, tprt.col1; + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 = tprt.col1 +order by tbl1.col1, tprt.col1; + +-- Multiple partitions +insert into tbl1 values (1001), (1010), (1011); +explain (analyze, costs off, summary off, timing off) +select * from tbl1 inner join tprt on tbl1.col1 > tprt.col1; + +explain (analyze, costs off, summary off, timing off) +select * from tbl1 inner join tprt on tbl1.col1 = tprt.col1; + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 > tprt.col1 +order by tbl1.col1, tprt.col1; + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 = tprt.col1 +order by tbl1.col1, tprt.col1; + +-- Last partition +delete from tbl1; +insert into tbl1 values (4400); +explain (analyze, costs off, summary off, timing off) +select * from tbl1 join tprt on tbl1.col1 < tprt.col1; + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 < tprt.col1 +order by tbl1.col1, tprt.col1; + +-- No matching partition +delete from tbl1; +insert into tbl1 values (10000); +explain (analyze, costs off, summary off, timing off) +select * from tbl1 join tprt on tbl1.col1 = tprt.col1; + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 = tprt.col1 +order by tbl1.col1, tprt.col1; + +drop table tbl1, tprt; + +-- Test with columns defined in varying orders between each level +create table part_abc (a int not null, b int not null, c int not null) partition by list (a); +create table part_bac (b int not null, a int not null, c int not null) partition by list (b); +create table part_cab (c int not null, a int not null, b int not null) partition by list (c); +create table part_abc_p1 (a int not null, b int not null, c int not null); + +alter table part_abc attach partition part_bac for values in(1); +alter table part_bac attach partition part_cab for values in(2); +alter table part_cab attach partition part_abc_p1 for values in(3); + +prepare part_abc_q1 (int, int, int) as +select * from part_abc where a = $1 and b = $2 and c = $3; + +-- Execute query 5 times to allow choose_custom_plan +-- to start considering a generic plan. +execute part_abc_q1 (1, 2, 3); +execute part_abc_q1 (1, 2, 3); +execute part_abc_q1 (1, 2, 3); +execute part_abc_q1 (1, 2, 3); +execute part_abc_q1 (1, 2, 3); + +-- Single partition should be scanned. +explain (analyze, costs off, summary off, timing off) execute part_abc_q1 (1, 2, 3); + +deallocate part_abc_q1; + +drop table part_abc; + +-- Ensure that an Append node properly handles a sub-partitioned table +-- matching without any of its leaf partitions matching the clause. +create table listp (a int, b int) partition by list (a); +create table listp_1 partition of listp for values in(1) partition by list (b); +create table listp_1_1 partition of listp_1 for values in(1); +create table listp_2 partition of listp for values in(2) partition by list (b); +create table listp_2_1 partition of listp_2 for values in(2); +select * from listp where b = 1; + +-- Ensure that an Append node properly can handle selection of all first level +-- partitions before finally detecting the correct set of 2nd level partitions +-- which match the given parameter. +prepare q1 (int,int) as select * from listp where b in ($1,$2); + +execute q1 (1,2); +execute q1 (1,2); +execute q1 (1,2); +execute q1 (1,2); +execute q1 (1,2); + +explain (analyze, costs off, summary off, timing off) execute q1 (1,1); + +explain (analyze, costs off, summary off, timing off) execute q1 (2,2); + +-- Try with no matching partitions. One subplan should remain in this case, +-- but it shouldn't be executed. +explain (analyze, costs off, summary off, timing off) execute q1 (0,0); + +deallocate q1; + +-- Test more complex cases where a not-equal condition further eliminates partitions. +prepare q1 (int,int,int,int) as select * from listp where b in($1,$2) and $3 <> b and $4 <> b; + +execute q1 (1,2,3,4); +execute q1 (1,2,3,4); +execute q1 (1,2,3,4); +execute q1 (1,2,3,4); +execute q1 (1,2,3,4); + +-- Both partitions allowed by IN clause, but one disallowed by <> clause +explain (analyze, costs off, summary off, timing off) execute q1 (1,2,2,0); + +-- Both partitions allowed by IN clause, then both excluded again by <> clauses. +-- One subplan will remain in this case, but it should not be executed. +explain (analyze, costs off, summary off, timing off) execute q1 (1,2,2,1); + +-- Ensure Params that evaluate to NULL properly prune away all partitions +explain (analyze, costs off, summary off, timing off) +select * from listp where a = (select null::int); + +drop table listp; + +-- Ensure runtime pruning works with initplans params with boolean types +create table boolvalues (value bool not null); +insert into boolvalues values('t'),('f'); + +create table boolp (a bool) partition by list (a); +create table boolp_t partition of boolp for values in('t'); +create table boolp_f partition of boolp for values in('f'); + +explain (analyze, costs off, summary off, timing off) +select * from boolp where a = (select value from boolvalues where value); + +explain (analyze, costs off, summary off, timing off) +select * from boolp where a = (select value from boolvalues where not value); + +drop table boolp; + +-- +-- Test run-time pruning of MergeAppend subnodes +-- +set enable_seqscan = off; +set enable_sort = off; +create table ma_test (a int) partition by range (a); +create table ma_test_p1 partition of ma_test for values from (0) to (10); +create table ma_test_p2 partition of ma_test for values from (10) to (20); +create table ma_test_p3 partition of ma_test for values from (20) to (30); +insert into ma_test select x from generate_series(0,29) t(x); +create index on ma_test (a); + +analyze ma_test; +prepare mt_q1 (int) as select * from ma_test where a >= $1 and a % 10 = 5 order by a; + +-- Execute query 5 times to allow choose_custom_plan +-- to start considering a generic plan. +execute mt_q1(0); +execute mt_q1(0); +execute mt_q1(0); +execute mt_q1(0); +execute mt_q1(0); + +explain (analyze, costs off, summary off, timing off) execute mt_q1(15); +execute mt_q1(15); +explain (analyze, costs off, summary off, timing off) execute mt_q1(25); +execute mt_q1(25); +-- Ensure MergeAppend behaves correctly when no subplans match +explain (analyze, costs off, summary off, timing off) execute mt_q1(35); +execute mt_q1(35); + +deallocate mt_q1; + +-- ensure initplan params properly prune partitions +explain (analyze, costs off, summary off, timing off) select * from ma_test where a >= (select min(a) from ma_test_p2) order by a; + +reset enable_seqscan; +reset enable_sort; + +drop table ma_test; + +reset enable_indexonlyscan; + +-- +-- check that pruning works properly when the partition key is of a +-- pseudotype +-- + +-- array type list partition key +create table pp_arrpart (a int[]) partition by list (a); +create table pp_arrpart1 partition of pp_arrpart for values in ('{1}'); +create table pp_arrpart2 partition of pp_arrpart for values in ('{2, 3}', '{4, 5}'); +explain (costs off) select * from pp_arrpart where a = '{1}'; +explain (costs off) select * from pp_arrpart where a = '{1, 2}'; +explain (costs off) select * from pp_arrpart where a in ('{4, 5}', '{1}'); +explain (costs off) update pp_arrpart set a = a where a = '{1}'; +explain (costs off) delete from pp_arrpart where a = '{1}'; +drop table pp_arrpart; + +-- array type hash partition key +create table pph_arrpart (a int[]) partition by hash (a); +create table pph_arrpart1 partition of pph_arrpart for values with (modulus 2, remainder 0); +create table pph_arrpart2 partition of pph_arrpart for values with (modulus 2, remainder 1); +insert into pph_arrpart values ('{1}'), ('{1, 2}'), ('{4, 5}'); +select tableoid::regclass, * from pph_arrpart order by 1; +explain (costs off) select * from pph_arrpart where a = '{1}'; +explain (costs off) select * from pph_arrpart where a = '{1, 2}'; +explain (costs off) select * from pph_arrpart where a in ('{4, 5}', '{1}'); +drop table pph_arrpart; + +-- enum type list partition key +create type pp_colors as enum ('green', 'blue', 'black'); +create table pp_enumpart (a pp_colors) partition by list (a); +create table pp_enumpart_green partition of pp_enumpart for values in ('green'); +create table pp_enumpart_blue partition of pp_enumpart for values in ('blue'); +explain (costs off) select * from pp_enumpart where a = 'blue'; +explain (costs off) select * from pp_enumpart where a = 'black'; +drop table pp_enumpart; +drop type pp_colors; + +-- record type as partition key +create type pp_rectype as (a int, b int); +create table pp_recpart (a pp_rectype) partition by list (a); +create table pp_recpart_11 partition of pp_recpart for values in ('(1,1)'); +create table pp_recpart_23 partition of pp_recpart for values in ('(2,3)'); +explain (costs off) select * from pp_recpart where a = '(1,1)'::pp_rectype; +explain (costs off) select * from pp_recpart where a = '(1,2)'::pp_rectype; +drop table pp_recpart; +drop type pp_rectype; + +-- range type partition key +create table pp_intrangepart (a int4range) partition by list (a); +create table pp_intrangepart12 partition of pp_intrangepart for values in ('[1,2]'); +create table pp_intrangepart2inf partition of pp_intrangepart for values in ('[2,)'); +explain (costs off) select * from pp_intrangepart where a = '[1,2]'::int4range; +explain (costs off) select * from pp_intrangepart where a = '(1,2)'::int4range; +drop table pp_intrangepart; + +-- +-- Ensure the enable_partition_prune GUC properly disables partition pruning. +-- + +create table pp_lp (a int, value int) partition by list (a); +create table pp_lp1 partition of pp_lp for values in(1); +create table pp_lp2 partition of pp_lp for values in(2); + +explain (costs off) select * from pp_lp where a = 1; +explain (costs off) update pp_lp set value = 10 where a = 1; +explain (costs off) delete from pp_lp where a = 1; + +set enable_partition_pruning = off; + +set constraint_exclusion = 'partition'; -- this should not affect the result. + +explain (costs off) select * from pp_lp where a = 1; +explain (costs off) update pp_lp set value = 10 where a = 1; +explain (costs off) delete from pp_lp where a = 1; + +set constraint_exclusion = 'off'; -- this should not affect the result. + +explain (costs off) select * from pp_lp where a = 1; +explain (costs off) update pp_lp set value = 10 where a = 1; +explain (costs off) delete from pp_lp where a = 1; + +drop table pp_lp; + +-- Ensure enable_partition_prune does not affect non-partitioned tables. + +create table inh_lp (a int, value int); +create table inh_lp1 (a int, value int, check(a = 1)) inherits (inh_lp); +create table inh_lp2 (a int, value int, check(a = 2)) inherits (inh_lp); + +set constraint_exclusion = 'partition'; + +-- inh_lp2 should be removed in the following 3 cases. +explain (costs off) select * from inh_lp where a = 1; +explain (costs off) update inh_lp set value = 10 where a = 1; +explain (costs off) delete from inh_lp where a = 1; + +-- Ensure we don't exclude normal relations when we only expect to exclude +-- inheritance children +explain (costs off) update inh_lp1 set value = 10 where a = 2; + +\set VERBOSITY terse \\ -- suppress cascade details +drop table inh_lp cascade; +\set VERBOSITY default + +reset enable_partition_pruning; +reset constraint_exclusion; + +-- Check pruning for a partition tree containing only temporary relations +create temp table pp_temp_parent (a int) partition by list (a); +create temp table pp_temp_part_1 partition of pp_temp_parent for values in (1); +create temp table pp_temp_part_def partition of pp_temp_parent default; +explain (costs off) select * from pp_temp_parent where true; +explain (costs off) select * from pp_temp_parent where a = 2; +drop table pp_temp_parent; + +-- Stress run-time partition pruning a bit more, per bug reports +create temp table p (a int, b int, c int) partition by list (a); +create temp table p1 partition of p for values in (1); +create temp table p2 partition of p for values in (2); +create temp table q (a int, b int, c int) partition by list (a); +create temp table q1 partition of q for values in (1) partition by list (b); +create temp table q11 partition of q1 for values in (1) partition by list (c); +create temp table q111 partition of q11 for values in (1); +create temp table q2 partition of q for values in (2) partition by list (b); +create temp table q21 partition of q2 for values in (1); +create temp table q22 partition of q2 for values in (2); + +insert into q22 values (2, 2, 3); + +explain (costs off) +select * +from ( + select * from p + union all + select * from q1 + union all + select 1, 1, 1 + ) s(a, b, c) +where s.a = 1 and s.b = 1 and s.c = (select 1); + +select * +from ( + select * from p + union all + select * from q1 + union all + select 1, 1, 1 + ) s(a, b, c) +where s.a = 1 and s.b = 1 and s.c = (select 1); + +prepare q (int, int) as +select * +from ( + select * from p + union all + select * from q1 + union all + select 1, 1, 1 + ) s(a, b, c) +where s.a = $1 and s.b = $2 and s.c = (select 1); + +set plan_cache_mode to force_generic_plan; + +explain (costs off) execute q (1, 1); +execute q (1, 1); + +reset plan_cache_mode; +drop table p, q; + +-- Ensure run-time pruning works correctly when we match a partitioned table +-- on the first level but find no matching partitions on the second level. +create table listp (a int, b int) partition by list (a); +create table listp1 partition of listp for values in(1); +create table listp2 partition of listp for values in(2) partition by list(b); +create table listp2_10 partition of listp2 for values in (10); + +explain (analyze, costs off, summary off, timing off) +select * from listp where a = (select 2) and b <> 10; + +drop table listp; diff --git a/src/test/regress/sql/path.sql b/src/test/regress/sql/path.sql index 7e69b539ad..318decf974 100644 --- a/src/test/regress/sql/path.sql +++ b/src/test/regress/sql/path.sql @@ -8,26 +8,32 @@ CREATE TABLE PATH_TBL (f1 path); INSERT INTO PATH_TBL VALUES ('[(1,2),(3,4)]'); -INSERT INTO PATH_TBL VALUES ('((1,2),(3,4))'); +INSERT INTO PATH_TBL VALUES (' ( ( 1 , 2 ) , ( 3 , 4 ) ) '); -INSERT INTO PATH_TBL VALUES ('[(0,0),(3,0),(4,5),(1,6)]'); +INSERT INTO PATH_TBL VALUES ('[ (0,0),(3,0),(4,5),(1,6) ]'); -INSERT INTO PATH_TBL VALUES ('((1,2),(3,4))'); +INSERT INTO PATH_TBL VALUES ('((1,2) ,(3,4 ))'); -INSERT INTO PATH_TBL VALUES ('1,2 ,3,4'); +INSERT INTO PATH_TBL VALUES ('1,2 ,3,4 '); -INSERT INTO PATH_TBL VALUES ('[1,2,3, 4]'); +INSERT INTO PATH_TBL VALUES (' [1,2,3, 4] '); -INSERT INTO PATH_TBL VALUES ('[11,12,13,14]'); +INSERT INTO PATH_TBL VALUES ('((10,20))'); -- Only one point -INSERT INTO PATH_TBL VALUES ('(11,12,13,14)'); +INSERT INTO PATH_TBL VALUES ('[ 11,12,13,14 ]'); + +INSERT INTO PATH_TBL VALUES ('( 11,12,13,14) '); -- bad values for parser testing +INSERT INTO PATH_TBL VALUES ('[]'); + INSERT INTO PATH_TBL VALUES ('[(,2),(3,4)]'); INSERT INTO PATH_TBL VALUES ('[(1,2),(3,4)'); -SELECT f1 FROM PATH_TBL; +INSERT INTO PATH_TBL VALUES ('(1,2,3,4'); + +INSERT INTO PATH_TBL VALUES ('(1,2),(3,4)]'); SELECT '' AS count, f1 AS open_path FROM PATH_TBL WHERE isopen(f1); diff --git a/src/test/regress/sql/plancache.sql b/src/test/regress/sql/plancache.sql index bc2086166b..fa218c8d21 100644 --- a/src/test/regress/sql/plancache.sql +++ b/src/test/regress/sql/plancache.sql @@ -156,3 +156,57 @@ end$$ language plpgsql; select cachebug(); select cachebug(); + +-- Check that addition or removal of any partition is correctly dealt with by +-- default partition table when it is being used in prepared statement. +create table pc_list_parted (a int) partition by list(a); +create table pc_list_part_null partition of pc_list_parted for values in (null); +create table pc_list_part_1 partition of pc_list_parted for values in (1); +create table pc_list_part_def partition of pc_list_parted default; +prepare pstmt_def_insert (int) as insert into pc_list_part_def values($1); +-- should fail +execute pstmt_def_insert(null); +execute pstmt_def_insert(1); +create table pc_list_part_2 partition of pc_list_parted for values in (2); +execute pstmt_def_insert(2); +alter table pc_list_parted detach partition pc_list_part_null; +-- should be ok +execute pstmt_def_insert(null); +drop table pc_list_part_1; +-- should be ok +execute pstmt_def_insert(1); +drop table pc_list_parted, pc_list_part_null; +deallocate pstmt_def_insert; + +-- Test plan_cache_mode + +create table test_mode (a int); +insert into test_mode select 1 from generate_series(1,1000) union all select 2; +create index on test_mode (a); +analyze test_mode; + +prepare test_mode_pp (int) as select count(*) from test_mode where a = $1; + +-- up to 5 executions, custom plan is used +explain (costs off) execute test_mode_pp(2); + +-- force generic plan +set plan_cache_mode to force_generic_plan; +explain (costs off) execute test_mode_pp(2); + +-- get to generic plan by 5 executions +set plan_cache_mode to auto; +execute test_mode_pp(1); -- 1x +execute test_mode_pp(1); -- 2x +execute test_mode_pp(1); -- 3x +execute test_mode_pp(1); -- 4x +execute test_mode_pp(1); -- 5x + +-- we should now get a really bad plan +explain (costs off) execute test_mode_pp(2); + +-- but we can force a custom plan +set plan_cache_mode to force_custom_plan; +explain (costs off) execute test_mode_pp(2); + +drop table test_mode; diff --git a/src/test/regress/sql/plpgsql.sql b/src/test/regress/sql/plpgsql.sql index 771d68282e..01239e26be 100644 --- a/src/test/regress/sql/plpgsql.sql +++ b/src/test/regress/sql/plpgsql.sql @@ -1648,7 +1648,7 @@ create table perform_test ( b INT ); -create function simple_func(int) returns boolean as ' +create function perform_simple_func(int) returns boolean as ' BEGIN IF $1 < 20 THEN INSERT INTO perform_test VALUES ($1, $1 + 10); @@ -1664,13 +1664,13 @@ BEGIN INSERT INTO perform_test VALUES (100, 100); END IF; - PERFORM simple_func(5); + PERFORM perform_simple_func(5); IF FOUND then INSERT INTO perform_test VALUES (100, 100); END IF; - PERFORM simple_func(50); + PERFORM perform_simple_func(50); IF FOUND then INSERT INTO perform_test VALUES (100, 100); @@ -1910,6 +1910,28 @@ copy rc_test from stdin; 500 1000 \. +create function return_unnamed_refcursor() returns refcursor as $$ +declare + rc refcursor; +begin + open rc for select a from rc_test; + return rc; +end +$$ language plpgsql; + +create function use_refcursor(rc refcursor) returns int as $$ +declare + rc refcursor; + x record; +begin + rc := return_unnamed_refcursor(); + fetch next from rc into x; + return x.a; +end +$$ language plpgsql; + +select use_refcursor(return_unnamed_refcursor()); + create function return_refcursor(rc refcursor) returns refcursor as $$ begin open rc for select a from rc_test; @@ -2285,241 +2307,6 @@ end;$$ language plpgsql; select raise_exprs(); drop function raise_exprs(); --- continue statement -create table conttesttbl(idx serial, v integer); -insert into conttesttbl(v) values(10); -insert into conttesttbl(v) values(20); -insert into conttesttbl(v) values(30); -insert into conttesttbl(v) values(40); - -create function continue_test1() returns void as $$ -declare _i integer = 0; _r record; -begin - raise notice '---1---'; - loop - _i := _i + 1; - raise notice '%', _i; - continue when _i < 10; - exit; - end loop; - - raise notice '---2---'; - <> - loop - _i := _i - 1; - loop - raise notice '%', _i; - continue lbl when _i > 0; - exit lbl; - end loop; - end loop; - - raise notice '---3---'; - <> - while _i < 10 loop - _i := _i + 1; - continue the_loop when _i % 2 = 0; - raise notice '%', _i; - end loop; - - raise notice '---4---'; - for _i in 1..10 loop - begin - -- applies to outer loop, not the nested begin block - continue when _i < 5; - raise notice '%', _i; - end; - end loop; - - raise notice '---5---'; - for _r in select * from conttesttbl loop - continue when _r.v <= 20; - raise notice '%', _r.v; - end loop; - - raise notice '---6---'; - for _r in execute 'select * from conttesttbl' loop - continue when _r.v <= 20; - raise notice '%', _r.v; - end loop; - - raise notice '---7---'; - for _i in 1..3 loop - raise notice '%', _i; - continue when _i = 3; - end loop; - - raise notice '---8---'; - _i := 1; - while _i <= 3 loop - raise notice '%', _i; - _i := _i + 1; - continue when _i = 3; - end loop; - - raise notice '---9---'; - for _r in select * from conttesttbl order by v limit 1 loop - raise notice '%', _r.v; - continue; - end loop; - - raise notice '---10---'; - for _r in execute 'select * from conttesttbl order by v limit 1' loop - raise notice '%', _r.v; - continue; - end loop; -end; $$ language plpgsql; - -select continue_test1(); - -drop function continue_test1(); -drop table conttesttbl; - --- should fail: CONTINUE is only legal inside a loop -create function continue_error1() returns void as $$ -begin - begin - continue; - end; -end; -$$ language plpgsql; - --- should fail: unlabeled EXIT is only legal inside a loop -create function exit_error1() returns void as $$ -begin - begin - exit; - end; -end; -$$ language plpgsql; - --- should fail: no such label -create function continue_error2() returns void as $$ -begin - begin - loop - continue no_such_label; - end loop; - end; -end; -$$ language plpgsql; - --- should fail: no such label -create function exit_error2() returns void as $$ -begin - begin - loop - exit no_such_label; - end loop; - end; -end; -$$ language plpgsql; - --- should fail: CONTINUE can't reference the label of a named block -create function continue_error3() returns void as $$ -begin - <> - begin - loop - continue begin_block1; - end loop; - end; -end; -$$ language plpgsql; - --- On the other hand, EXIT *can* reference the label of a named block -create function exit_block1() returns void as $$ -begin - <> - begin - loop - exit begin_block1; - raise exception 'should not get here'; - end loop; - end; -end; -$$ language plpgsql; - -select exit_block1(); -drop function exit_block1(); - --- verbose end block and end loop -create function end_label1() returns void as $$ -<> -begin - <> - for _i in 1 .. 10 loop - exit flbl1; - end loop flbl1; - <> - for _i in 1 .. 10 loop - exit flbl2; - end loop; -end blbl; -$$ language plpgsql; - -select end_label1(); -drop function end_label1(); - --- should fail: undefined end label -create function end_label2() returns void as $$ -begin - for _i in 1 .. 10 loop - exit; - end loop flbl1; -end; -$$ language plpgsql; - --- should fail: end label does not match start label -create function end_label3() returns void as $$ -<> -begin - <> - for _i in 1 .. 10 loop - exit; - end loop outer_label; -end; -$$ language plpgsql; - --- should fail: end label on a block without a start label -create function end_label4() returns void as $$ -<> -begin - for _i in 1 .. 10 loop - exit; - end loop outer_label; -end; -$$ language plpgsql; - --- using list of scalars in fori and fore stmts -create function for_vect() returns void as $proc$ -<>declare a integer; b varchar; c varchar; r record; -begin - -- fori - for i in 1 .. 3 loop - raise notice '%', i; - end loop; - -- fore with record var - for r in select gs as aa, 'BB' as bb, 'CC' as cc from generate_series(1,4) gs loop - raise notice '% % %', r.aa, r.bb, r.cc; - end loop; - -- fore with single scalar - for a in select gs from generate_series(1,4) gs loop - raise notice '%', a; - end loop; - -- fore with multiple scalars - for a,b,c in select gs, 'BB','CC' from generate_series(1,4) gs loop - raise notice '% % %', a, b, c; - end loop; - -- using qualified names in fors, fore is enabled, disabled only for fori - for lbl.a, lbl.b, lbl.c in execute $$select gs, 'bb','cc' from generate_series(1,4) gs$$ loop - raise notice '% % %', a, b, c; - end loop; -end; -$proc$ language plpgsql; - -select for_vect(); - -- regression test: verify that multiple uses of same plpgsql datum within -- a SQL command all get mapped to the same $n parameter. The return value -- of the SELECT is not important, we only care that it doesn't fail with @@ -2545,7 +2332,7 @@ create temp table foo (f1 int, f2 int); insert into foo values (1,2), (3,4); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should work @@ -2553,9 +2340,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should fail due to implicit strict @@ -2563,9 +2350,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should work @@ -2573,9 +2360,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- this should work since EXECUTE isn't as picky @@ -2583,11 +2370,11 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); select * from foo; -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should work @@ -2595,9 +2382,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should fail, no rows @@ -2605,9 +2392,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should fail, too many rows @@ -2615,9 +2402,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should work @@ -2625,9 +2412,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should fail, no rows @@ -2635,9 +2422,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- should fail, too many rows @@ -2645,15 +2432,15 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -drop function footest(); +drop function stricttest(); -- test printing parameters after failure due to STRICT set plpgsql.print_strict_params to true; -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; p1 int := 2; @@ -2664,9 +2451,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; p1 int := 2; @@ -2677,9 +2464,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- too many rows, no params @@ -2687,9 +2474,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- no rows @@ -2697,9 +2484,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- too many rows @@ -2707,9 +2494,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ declare x record; begin -- too many rows, no parameters @@ -2717,9 +2504,9 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ -- override the global #print_strict_params off declare @@ -2732,11 +2519,11 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); reset plpgsql.print_strict_params; -create or replace function footest() returns void as $$ +create or replace function stricttest() returns void as $$ -- override the global #print_strict_params on declare @@ -2749,7 +2536,7 @@ begin raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; end$$ language plpgsql; -select footest(); +select stricttest(); -- test warnings and errors set plpgsql.extra_warnings to 'all'; @@ -2840,6 +2627,95 @@ declare f1 int; begin return 1; end $$ language plpgsql; select shadowtest(1); +-- runtime extra checks +set plpgsql.extra_warnings to 'too_many_rows'; + +do $$ +declare x int; +begin + select v from generate_series(1,2) g(v) into x; +end; +$$; + +set plpgsql.extra_errors to 'too_many_rows'; + +do $$ +declare x int; +begin + select v from generate_series(1,2) g(v) into x; +end; +$$; + +reset plpgsql.extra_errors; +reset plpgsql.extra_warnings; + +set plpgsql.extra_warnings to 'strict_multi_assignment'; + +do $$ +declare + x int; + y int; +begin + select 1 into x, y; + select 1,2 into x, y; + select 1,2,3 into x, y; +end +$$; + +set plpgsql.extra_errors to 'strict_multi_assignment'; + +do $$ +declare + x int; + y int; +begin + select 1 into x, y; + select 1,2 into x, y; + select 1,2,3 into x, y; +end +$$; + +create table test_01(a int, b int, c int); + +alter table test_01 drop column a; + +-- the check is active only when source table is not empty +insert into test_01 values(10,20); + +do $$ +declare + x int; + y int; +begin + select * from test_01 into x, y; -- should be ok + raise notice 'ok'; + select * from test_01 into x; -- should to fail +end; +$$; + +do $$ +declare + t test_01; +begin + select 1, 2 into t; -- should be ok + raise notice 'ok'; + select 1, 2, 3 into t; -- should fail; +end; +$$; + +do $$ +declare + t test_01; +begin + select 1 into t; -- should fail; +end; +$$; + +drop table test_01; + +reset plpgsql.extra_errors; +reset plpgsql.extra_warnings; + -- test scrollable cursor support create function sc_test() returns setof integer as $$ @@ -3580,72 +3456,6 @@ select stacked_diagnostics_test(); drop function stacked_diagnostics_test(); --- test CASE statement - -create or replace function case_test(bigint) returns text as $$ -declare a int = 10; - b int = 1; -begin - case $1 - when 1 then - return 'one'; - when 2 then - return 'two'; - when 3,4,3+5 then - return 'three, four or eight'; - when a then - return 'ten'; - when a+b, a+b+1 then - return 'eleven, twelve'; - end case; -end; -$$ language plpgsql immutable; - -select case_test(1); -select case_test(2); -select case_test(3); -select case_test(4); -select case_test(5); -- fails -select case_test(8); -select case_test(10); -select case_test(11); -select case_test(12); -select case_test(13); -- fails - -create or replace function catch() returns void as $$ -begin - raise notice '%', case_test(6); -exception - when case_not_found then - raise notice 'caught case_not_found % %', SQLSTATE, SQLERRM; -end -$$ language plpgsql; - -select catch(); - --- test the searched variant too, as well as ELSE -create or replace function case_test(bigint) returns text as $$ -declare a int = 10; -begin - case - when $1 = 1 then - return 'one'; - when $1 = a + 2 then - return 'twelve'; - else - return 'other'; - end case; -end; -$$ language plpgsql immutable; - -select case_test(1); -select case_test(2); -select case_test(12); -select case_test(13); - -drop function catch(); -drop function case_test(bigint); - -- test variadic functions create or replace function vari(variadic int[]) @@ -4255,6 +4065,8 @@ language plpgsql as $$ begin return $1[1]; end; $$ stable; +select consumes_rw_array(returns_rw_array(42)); + -- bug #14174 explain (verbose, costs off) select i, a from @@ -4277,6 +4089,13 @@ select consumes_rw_array(a), a from select consumes_rw_array(a), a from (values (returns_rw_array(1)), (returns_rw_array(2))) v(a); +do $$ +declare a int[] := array[1,2]; +begin + a := a || 3; + raise notice 'a = %', a; +end$$; + -- -- Test access to call stack @@ -4678,14 +4497,14 @@ CREATE FUNCTION transition_table_level2_bad_usage_func() LANGUAGE plpgsql AS $$ BEGIN - INSERT INTO d VALUES (1000000, 1000000, 'x'); + INSERT INTO dx VALUES (1000000, 1000000, 'x'); RETURN NULL; END; $$; CREATE TRIGGER transition_table_level2_bad_usage_trigger AFTER DELETE ON transition_table_level2 - REFERENCING OLD TABLE AS d + REFERENCING OLD TABLE AS dx FOR EACH STATEMENT EXECUTE PROCEDURE transition_table_level2_bad_usage_func(); @@ -4773,6 +4592,32 @@ ALTER TABLE alter_table_under_transition_tables UPDATE alter_table_under_transition_tables SET id = id; +-- +-- Test multiple reference to a transition table +-- + +CREATE TABLE multi_test (i int); +INSERT INTO multi_test VALUES (1); + +CREATE OR REPLACE FUNCTION multi_test_trig() RETURNS trigger +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'count = %', (SELECT COUNT(*) FROM new_test); + RAISE NOTICE 'count union = %', + (SELECT COUNT(*) + FROM (SELECT * FROM new_test UNION ALL SELECT * FROM new_test) ss); + RETURN NULL; +END$$; + +CREATE TRIGGER my_trigger AFTER UPDATE ON multi_test + REFERENCING NEW TABLE AS new_test OLD TABLE as old_test + FOR EACH STATEMENT EXECUTE PROCEDURE multi_test_trig(); + +UPDATE multi_test SET i = i; + +DROP TABLE multi_test; +DROP FUNCTION multi_test_trig(); + -- -- Check type parsing and record fetching from partitioned tables -- @@ -4811,3 +4656,12 @@ BEGIN END; $$ LANGUAGE plpgsql; SELECT * FROM list_partitioned_table() AS t; + +-- +-- Check argument name is used instead of $n in error message +-- +CREATE FUNCTION fx(x WSlot) RETURNS void AS $$ +BEGIN + GET DIAGNOSTICS x = ROW_COUNT; + RETURN; +END; $$ LANGUAGE plpgsql; diff --git a/src/test/regress/sql/point.sql b/src/test/regress/sql/point.sql index 63a803a809..a209f3bfeb 100644 --- a/src/test/regress/sql/point.sql +++ b/src/test/regress/sql/point.sql @@ -14,6 +14,12 @@ INSERT INTO POINT_TBL(f1) VALUES ('(5.1, 34.5)'); INSERT INTO POINT_TBL(f1) VALUES ('(-5.0,-12.0)'); +INSERT INTO POINT_TBL(f1) VALUES ('(1e-300,-1e-300)'); -- To underflow + +INSERT INTO POINT_TBL(f1) VALUES ('(1e+300,Inf)'); -- To overflow + +INSERT INTO POINT_TBL(f1) VALUES (' ( Nan , NaN ) '); + -- bad format points INSERT INTO POINT_TBL(f1) VALUES ('asdfasdf'); @@ -21,8 +27,12 @@ INSERT INTO POINT_TBL(f1) VALUES ('10.0,10.0'); INSERT INTO POINT_TBL(f1) VALUES ('(10.0 10.0)'); +INSERT INTO POINT_TBL(f1) VALUES ('(10.0, 10.0) x'); + INSERT INTO POINT_TBL(f1) VALUES ('(10.0,10.0'); +INSERT INTO POINT_TBL(f1) VALUES ('(10.0, 1e+500)'); -- Out of range + SELECT '' AS six, * FROM POINT_TBL; diff --git a/src/test/regress/sql/polygon.sql b/src/test/regress/sql/polygon.sql index 7ac8079465..d3a6625cb7 100644 --- a/src/test/regress/sql/polygon.sql +++ b/src/test/regress/sql/polygon.sql @@ -11,6 +11,10 @@ INSERT INTO POLYGON_TBL(f1) VALUES ('(2.0,0.0),(2.0,4.0),(0.0,0.0)'); INSERT INTO POLYGON_TBL(f1) VALUES ('(3.0,1.0),(3.0,3.0),(1.0,0.0)'); +INSERT INTO POLYGON_TBL(f1) VALUES ('(1,2),(3,4),(5,6),(7,8)'); +INSERT INTO POLYGON_TBL(f1) VALUES ('(7,8),(5,6),(3,4),(1,2)'); -- Reverse +INSERT INTO POLYGON_TBL(f1) VALUES ('(1,2),(7,8),(5,6),(3,-4)'); + -- degenerate polygons INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0,0.0)'); @@ -30,89 +34,128 @@ INSERT INTO POLYGON_TBL(f1) VALUES ('asdf'); SELECT '' AS four, * FROM POLYGON_TBL; --- overlap -SELECT '' AS three, p.* - FROM POLYGON_TBL p - WHERE p.f1 && '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - --- left overlap -SELECT '' AS four, p.* - FROM POLYGON_TBL p - WHERE p.f1 &< '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - --- right overlap -SELECT '' AS two, p.* - FROM POLYGON_TBL p - WHERE p.f1 &> '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - --- left of -SELECT '' AS one, p.* - FROM POLYGON_TBL p - WHERE p.f1 << '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - --- right of -SELECT '' AS zero, p.* - FROM POLYGON_TBL p - WHERE p.f1 >> '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - --- contained -SELECT '' AS one, p.* - FROM POLYGON_TBL p - WHERE p.f1 <@ polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - --- same -SELECT '' AS one, p.* - FROM POLYGON_TBL p - WHERE p.f1 ~= polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - --- contains -SELECT '' AS one, p.* - FROM POLYGON_TBL p - WHERE p.f1 @> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)'; - -- --- polygon logic +-- Test the SP-GiST index -- --- left of -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' << polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; --- left overlap -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' << polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS true; +CREATE TABLE quad_poly_tbl (id int, p polygon); + +INSERT INTO quad_poly_tbl + SELECT (x - 1) * 100 + y, polygon(circle(point(x * 10, y * 10), 1 + (x + y) % 10)) + FROM generate_series(1, 100) x, + generate_series(1, 100) y; + +INSERT INTO quad_poly_tbl + SELECT i, polygon '((200, 300),(210, 310),(230, 290))' + FROM generate_series(10001, 11000) AS i; + +INSERT INTO quad_poly_tbl + VALUES + (11001, NULL), + (11002, NULL), + (11003, NULL); + +CREATE INDEX quad_poly_tbl_idx ON quad_poly_tbl USING spgist(p); + +-- get reference results for ORDER BY distance from seq scan +SET enable_seqscan = ON; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; + +CREATE TABLE quad_poly_tbl_ord_seq1 AS +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl; + +CREATE TABLE quad_poly_tbl_ord_seq2 AS +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; + +-- check results results from index scan +SET enable_seqscan = OFF; +SET enable_indexscan = OFF; +SET enable_bitmapscan = ON; + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p << polygon '((300,300),(400,600),(600,500),(700,200))'; +SELECT count(*) FROM quad_poly_tbl WHERE p << polygon '((300,300),(400,600),(600,500),(700,200))'; + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p &< polygon '((300,300),(400,600),(600,500),(700,200))'; +SELECT count(*) FROM quad_poly_tbl WHERE p &< polygon '((300,300),(400,600),(600,500),(700,200))'; + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p && polygon '((300,300),(400,600),(600,500),(700,200))'; +SELECT count(*) FROM quad_poly_tbl WHERE p && polygon '((300,300),(400,600),(600,500),(700,200))'; --- right overlap -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' &> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p &> polygon '((300,300),(400,600),(600,500),(700,200))'; +SELECT count(*) FROM quad_poly_tbl WHERE p &> polygon '((300,300),(400,600),(600,500),(700,200))'; --- right of -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' >> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p >> polygon '((300,300),(400,600),(600,500),(700,200))'; +SELECT count(*) FROM quad_poly_tbl WHERE p >> polygon '((300,300),(400,600),(600,500),(700,200))'; --- contained in -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' <@ polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p <<| polygon '((300,300),(400,600),(600,500),(700,200))'; +SELECT count(*) FROM quad_poly_tbl WHERE p <<| polygon '((300,300),(400,600),(600,500),(700,200))'; --- contains -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' @> polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p &<| polygon '((300,300),(400,600),(600,500),(700,200))'; +SELECT count(*) FROM quad_poly_tbl WHERE p &<| polygon '((300,300),(400,600),(600,500),(700,200))'; -SELECT '((0,4),(6,4),(1,2),(6,0),(0,0))'::polygon @> '((2,1),(2,3),(3,3),(3,1))'::polygon AS "false"; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p |&> polygon '((300,300),(400,600),(600,500),(700,200))'; +SELECT count(*) FROM quad_poly_tbl WHERE p |&> polygon '((300,300),(400,600),(600,500),(700,200))'; -SELECT '((0,4),(6,4),(3,2),(6,0),(0,0))'::polygon @> '((2,1),(2,3),(3,3),(3,1))'::polygon AS "true"; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p |>> polygon '((300,300),(400,600),(600,500),(700,200))'; +SELECT count(*) FROM quad_poly_tbl WHERE p |>> polygon '((300,300),(400,600),(600,500),(700,200))'; -SELECT '((1,1),(1,4),(5,4),(5,3),(2,3),(2,2),(5,2),(5,1))'::polygon @> '((3,2),(3,3),(4,3),(4,2))'::polygon AS "false"; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; +SELECT count(*) FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; -SELECT '((0,0),(0,3),(3,3),(3,0))'::polygon @> '((2,1),(2,2),(3,2),(3,1))'::polygon AS "true"; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p @> polygon '((340,550),(343,552),(341,553))'; +SELECT count(*) FROM quad_poly_tbl WHERE p @> polygon '((340,550),(343,552),(341,553))'; --- same -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' ~= polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS false; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p ~= polygon '((200, 300),(210, 310),(230, 290))'; +SELECT count(*) FROM quad_poly_tbl WHERE p ~= polygon '((200, 300),(210, 310),(230, 290))'; --- overlap -SELECT polygon '(2.0,0.0),(2.0,4.0),(0.0,0.0)' && polygon '(3.0,1.0),(3.0,3.0),(1.0,0.0)' AS true; +-- test ORDER BY distance +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; -SELECT '((0,4),(6,4),(1,2),(6,0),(0,0))'::polygon && '((2,1),(2,3),(3,3),(3,1))'::polygon AS "true"; +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl; -SELECT '((1,4),(1,1),(4,1),(4,2),(2,2),(2,4),(1,4))'::polygon && '((3,3),(4,3),(4,4),(3,4),(3,3))'::polygon AS "false"; -SELECT '((200,800),(800,800),(800,200),(200,200))' && '(1000,1000,0,0)'::polygon AS "true"; +CREATE TEMP TABLE quad_poly_tbl_ord_idx1 AS +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl; --- distance from a point -SELECT '(0,0)'::point <-> '((0,0),(1,2),(2,1))'::polygon as on_corner, - '(1,1)'::point <-> '((0,0),(2,2),(1,3))'::polygon as on_segment, - '(2,2)'::point <-> '((0,0),(1,4),(3,1))'::polygon as inside, - '(3,3)'::point <-> '((0,2),(2,0),(2,2))'::polygon as near_corner, - '(4,4)'::point <-> '((0,0),(0,3),(4,0))'::polygon as near_segment; +SELECT * +FROM quad_poly_tbl_ord_seq1 seq FULL JOIN quad_poly_tbl_ord_idx1 idx + ON seq.n = idx.n AND seq.id = idx.id AND + (seq.dist = idx.dist OR seq.dist IS NULL AND idx.dist IS NULL) +WHERE seq.id IS NULL OR idx.id IS NULL; + + +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; + +CREATE TEMP TABLE quad_poly_tbl_ord_idx2 AS +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; + +SELECT * +FROM quad_poly_tbl_ord_seq2 seq FULL JOIN quad_poly_tbl_ord_idx2 idx + ON seq.n = idx.n AND seq.id = idx.id AND + (seq.dist = idx.dist OR seq.dist IS NULL AND idx.dist IS NULL) +WHERE seq.id IS NULL OR idx.id IS NULL; + +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; diff --git a/src/test/regress/sql/polymorphism.sql b/src/test/regress/sql/polymorphism.sql index 45ae7a23aa..03606671d9 100644 --- a/src/test/regress/sql/polymorphism.sql +++ b/src/test/regress/sql/polymorphism.sql @@ -734,18 +734,18 @@ $$ language sql; drop function dfunc(varchar, numeric); --fail, named parameters are not unique -create function testfoo(a int, a int) returns int as $$ select 1;$$ language sql; -create function testfoo(int, out a int, out a int) returns int as $$ select 1;$$ language sql; -create function testfoo(out a int, inout a int) returns int as $$ select 1;$$ language sql; -create function testfoo(a int, inout a int) returns int as $$ select 1;$$ language sql; +create function testpolym(a int, a int) returns int as $$ select 1;$$ language sql; +create function testpolym(int, out a int, out a int) returns int as $$ select 1;$$ language sql; +create function testpolym(out a int, inout a int) returns int as $$ select 1;$$ language sql; +create function testpolym(a int, inout a int) returns int as $$ select 1;$$ language sql; -- valid -create function testfoo(a int, out a int) returns int as $$ select $1;$$ language sql; -select testfoo(37); -drop function testfoo(int); -create function testfoo(a int) returns table(a int) as $$ select $1;$$ language sql; -select * from testfoo(37); -drop function testfoo(int); +create function testpolym(a int, out a int) returns int as $$ select $1;$$ language sql; +select testpolym(37); +drop function testpolym(int); +create function testpolym(a int) returns table(a int) as $$ select $1;$$ language sql; +select * from testpolym(37); +drop function testpolym(int); -- test polymorphic params and defaults create function dfunc(a anyelement, b anyelement = null, flag bool = true) @@ -785,6 +785,21 @@ select dfunc('a'::text, 'b', flag => false); -- mixed notation select dfunc('a'::text, 'b', true); -- full positional notation select dfunc('a'::text, 'b', flag => true); -- mixed notation +-- this tests lexer edge cases around => +select dfunc(a =>-1); +select dfunc(a =>+1); +select dfunc(a =>/**/1); +select dfunc(a =>--comment to be removed by psql + 1); +-- need DO to protect the -- from psql +do $$ + declare r integer; + begin + select dfunc(a=>-- comment + 1) into r; + raise info 'r = %', r; + end; +$$; -- check reverse-listing of named-arg calls CREATE VIEW dfview AS diff --git a/src/test/regress/sql/portals.sql b/src/test/regress/sql/portals.sql index a1c812e937..52560ac027 100644 --- a/src/test/regress/sql/portals.sql +++ b/src/test/regress/sql/portals.sql @@ -462,6 +462,40 @@ FETCH FROM c1; DELETE FROM ucview WHERE CURRENT OF c1; -- fail, views not supported ROLLBACK; +-- Check WHERE CURRENT OF with an index-only scan +BEGIN; +EXPLAIN (costs off) +DECLARE c1 CURSOR FOR SELECT stringu1 FROM onek WHERE stringu1 = 'DZAAAA'; +DECLARE c1 CURSOR FOR SELECT stringu1 FROM onek WHERE stringu1 = 'DZAAAA'; +FETCH FROM c1; +DELETE FROM onek WHERE CURRENT OF c1; +SELECT stringu1 FROM onek WHERE stringu1 = 'DZAAAA'; +ROLLBACK; + +-- Check behavior with rewinding to a previous child scan node, +-- as per bug #15395 +BEGIN; +CREATE TABLE current_check (currentid int, payload text); +CREATE TABLE current_check_1 () INHERITS (current_check); +CREATE TABLE current_check_2 () INHERITS (current_check); +INSERT INTO current_check_1 SELECT i, 'p' || i FROM generate_series(1,9) i; +INSERT INTO current_check_2 SELECT i, 'P' || i FROM generate_series(10,19) i; + +DECLARE c1 SCROLL CURSOR FOR SELECT * FROM current_check; + +-- This tests the fetch-backwards code path +FETCH ABSOLUTE 12 FROM c1; +FETCH ABSOLUTE 8 FROM c1; +DELETE FROM current_check WHERE CURRENT OF c1 RETURNING *; + +-- This tests the ExecutorRewind code path +FETCH ABSOLUTE 13 FROM c1; +FETCH ABSOLUTE 1 FROM c1; +DELETE FROM current_check WHERE CURRENT OF c1 RETURNING *; + +SELECT * FROM current_check; +ROLLBACK; + -- Make sure snapshot management works okay, per bug report in -- 235395b90909301035v7228ce63q392931f15aa74b31@mail.gmail.com BEGIN; diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql index e2c13e08a4..e3e69302a2 100644 --- a/src/test/regress/sql/privileges.sql +++ b/src/test/regress/sql/privileges.sql @@ -7,15 +7,15 @@ -- Suppress NOTICE messages when users/groups don't exist SET client_min_messages TO 'warning'; -DROP ROLE IF EXISTS regress_group1; -DROP ROLE IF EXISTS regress_group2; +DROP ROLE IF EXISTS regress_priv_group1; +DROP ROLE IF EXISTS regress_priv_group2; -DROP ROLE IF EXISTS regress_user1; -DROP ROLE IF EXISTS regress_user2; -DROP ROLE IF EXISTS regress_user3; -DROP ROLE IF EXISTS regress_user4; -DROP ROLE IF EXISTS regress_user5; -DROP ROLE IF EXISTS regress_user6; +DROP ROLE IF EXISTS regress_priv_user1; +DROP ROLE IF EXISTS regress_priv_user2; +DROP ROLE IF EXISTS regress_priv_user3; +DROP ROLE IF EXISTS regress_priv_user4; +DROP ROLE IF EXISTS regress_priv_user5; +DROP ROLE IF EXISTS regress_priv_user6; SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; @@ -23,25 +23,25 @@ RESET client_min_messages; -- test proper begins here -CREATE USER regress_user1; -CREATE USER regress_user2; -CREATE USER regress_user3; -CREATE USER regress_user4; -CREATE USER regress_user5; -CREATE USER regress_user5; -- duplicate +CREATE USER regress_priv_user1; +CREATE USER regress_priv_user2; +CREATE USER regress_priv_user3; +CREATE USER regress_priv_user4; +CREATE USER regress_priv_user5; +CREATE USER regress_priv_user5; -- duplicate -CREATE GROUP regress_group1; -CREATE GROUP regress_group2 WITH USER regress_user1, regress_user2; +CREATE GROUP regress_priv_group1; +CREATE GROUP regress_priv_group2 WITH USER regress_priv_user1, regress_priv_user2; -ALTER GROUP regress_group1 ADD USER regress_user4; +ALTER GROUP regress_priv_group1 ADD USER regress_priv_user4; -ALTER GROUP regress_group2 ADD USER regress_user2; -- duplicate -ALTER GROUP regress_group2 DROP USER regress_user2; -GRANT regress_group2 TO regress_user4 WITH ADMIN OPTION; +ALTER GROUP regress_priv_group2 ADD USER regress_priv_user2; -- duplicate +ALTER GROUP regress_priv_group2 DROP USER regress_priv_user2; +GRANT regress_priv_group2 TO regress_priv_user4 WITH ADMIN OPTION; -- test owner privileges -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; SELECT session_user, current_user; CREATE TABLE atest1 ( a int, b text ); @@ -57,18 +57,18 @@ COMMIT; REVOKE ALL ON atest1 FROM PUBLIC; SELECT * FROM atest1; -GRANT ALL ON atest1 TO regress_user2; -GRANT SELECT ON atest1 TO regress_user3, regress_user4; +GRANT ALL ON atest1 TO regress_priv_user2; +GRANT SELECT ON atest1 TO regress_priv_user3, regress_priv_user4; SELECT * FROM atest1; CREATE TABLE atest2 (col1 varchar(10), col2 boolean); -GRANT SELECT ON atest2 TO regress_user2; -GRANT UPDATE ON atest2 TO regress_user3; -GRANT INSERT ON atest2 TO regress_user4; -GRANT TRUNCATE ON atest2 TO regress_user5; +GRANT SELECT ON atest2 TO regress_priv_user2; +GRANT UPDATE ON atest2 TO regress_priv_user3; +GRANT INSERT ON atest2 TO regress_priv_user4; +GRANT TRUNCATE ON atest2 TO regress_priv_user5; -SET SESSION AUTHORIZATION regress_user2; +SET SESSION AUTHORIZATION regress_priv_user2; SELECT session_user, current_user; -- try various combinations of queries on atest1 and atest2 @@ -95,7 +95,7 @@ SELECT * FROM atest1 WHERE ( b IN ( SELECT col1 FROM atest2 ) ); SELECT * FROM atest2 WHERE ( col1 IN ( SELECT b FROM atest1 ) ); -SET SESSION AUTHORIZATION regress_user3; +SET SESSION AUTHORIZATION regress_priv_user3; SELECT session_user, current_user; SELECT * FROM atest1; -- ok @@ -120,7 +120,7 @@ COPY atest2 FROM stdin; -- fail SELECT * FROM atest1 WHERE ( b IN ( SELECT col1 FROM atest2 ) ); SELECT * FROM atest2 WHERE ( col1 IN ( SELECT b FROM atest1 ) ); -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; COPY atest2 FROM stdin; -- ok bar true \. @@ -129,8 +129,8 @@ SELECT * FROM atest1; -- ok -- test leaky-function protections in selfuncs --- regress_user1 will own a table and provide a view for it. -SET SESSION AUTHORIZATION regress_user1; +-- regress_priv_user1 will own a table and provide a view for it. +SET SESSION AUTHORIZATION regress_priv_user1; CREATE TABLE atest12 as SELECT x AS a, 10001 - x AS b FROM generate_series(1,10000) x; @@ -156,8 +156,8 @@ EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; EXPLAIN (COSTS OFF) SELECT * FROM atest12 x, atest12 y WHERE x.a = y.b and abs(y.a) <<< 5; --- Check if regress_user2 can break security. -SET SESSION AUTHORIZATION regress_user2; +-- Check if regress_priv_user2 can break security. +SET SESSION AUTHORIZATION regress_priv_user2; CREATE FUNCTION leak2(integer,integer) RETURNS boolean AS $$begin raise notice 'leak % %', $1, $2; return $1 > $2; end$$ @@ -171,12 +171,12 @@ EXPLAIN (COSTS OFF) SELECT * FROM atest12 WHERE a >>> 0; -- This plan should use hashjoin, as it will expect many rows to be selected. EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; --- Now regress_user1 grants sufficient access to regress_user2. -SET SESSION AUTHORIZATION regress_user1; +-- Now regress_priv_user1 grants sufficient access to regress_priv_user2. +SET SESSION AUTHORIZATION regress_priv_user1; GRANT SELECT (a, b) ON atest12 TO PUBLIC; -SET SESSION AUTHORIZATION regress_user2; +SET SESSION AUTHORIZATION regress_priv_user2; --- Now regress_user2 will also get a good row estimate. +-- Now regress_priv_user2 will also get a good row estimate. EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; -- But not for this, due to lack of table-wide permissions needed @@ -184,17 +184,17 @@ EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; EXPLAIN (COSTS OFF) SELECT * FROM atest12 x, atest12 y WHERE x.a = y.b and abs(y.a) <<< 5; --- clean up (regress_user1's objects are all dropped later) +-- clean up (regress_priv_user1's objects are all dropped later) DROP FUNCTION leak2(integer, integer) CASCADE; -- groups -SET SESSION AUTHORIZATION regress_user3; +SET SESSION AUTHORIZATION regress_priv_user3; CREATE TABLE atest3 (one int, two int, three int); -GRANT DELETE ON atest3 TO GROUP regress_group2; +GRANT DELETE ON atest3 TO GROUP regress_priv_group2; -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; SELECT * FROM atest3; -- fail DELETE FROM atest3; -- ok @@ -202,7 +202,7 @@ DELETE FROM atest3; -- ok -- views -SET SESSION AUTHORIZATION regress_user3; +SET SESSION AUTHORIZATION regress_priv_user3; CREATE VIEW atestv1 AS SELECT * FROM atest1; -- ok /* The next *should* fail, but it's not implemented that way yet. */ @@ -213,10 +213,10 @@ CREATE VIEW atestv0 AS SELECT 0 as x WHERE false; -- ok SELECT * FROM atestv1; -- ok SELECT * FROM atestv2; -- fail -GRANT SELECT ON atestv1, atestv3 TO regress_user4; -GRANT SELECT ON atestv2 TO regress_user2; +GRANT SELECT ON atestv1, atestv3 TO regress_priv_user4; +GRANT SELECT ON atestv2 TO regress_priv_user2; -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT * FROM atestv1; -- ok SELECT * FROM atestv2; -- fail @@ -240,29 +240,29 @@ reset constraint_exclusion; CREATE VIEW atestv4 AS SELECT * FROM atestv3; -- nested view SELECT * FROM atestv4; -- ok -GRANT SELECT ON atestv4 TO regress_user2; +GRANT SELECT ON atestv4 TO regress_priv_user2; -SET SESSION AUTHORIZATION regress_user2; +SET SESSION AUTHORIZATION regress_priv_user2; -- Two complex cases: SELECT * FROM atestv3; -- fail -SELECT * FROM atestv4; -- ok (even though regress_user2 cannot access underlying atestv3) +SELECT * FROM atestv4; -- ok (even though regress_priv_user2 cannot access underlying atestv3) SELECT * FROM atest2; -- ok -SELECT * FROM atestv2; -- fail (even though regress_user2 can access underlying atest2) +SELECT * FROM atestv2; -- fail (even though regress_priv_user2 can access underlying atest2) -- Test column level permissions -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; CREATE TABLE atest5 (one int, two int unique, three int, four int unique); CREATE TABLE atest6 (one int, two int, blue int); -GRANT SELECT (one), INSERT (two), UPDATE (three) ON atest5 TO regress_user4; -GRANT ALL (one) ON atest5 TO regress_user3; +GRANT SELECT (one), INSERT (two), UPDATE (three) ON atest5 TO regress_priv_user4; +GRANT ALL (one) ON atest5 TO regress_priv_user3; INSERT INTO atest5 VALUES (1,2,3); -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT * FROM atest5; -- fail SELECT one FROM atest5; -- ok COPY atest5 (one) TO stdout; -- ok @@ -283,16 +283,16 @@ SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.two); - SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.one); -- ok SELECT one, two FROM atest5; -- fail -SET SESSION AUTHORIZATION regress_user1; -GRANT SELECT (one,two) ON atest6 TO regress_user4; +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT (one,two) ON atest6 TO regress_priv_user4; -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT one, two FROM atest5 NATURAL JOIN atest6; -- fail still -SET SESSION AUTHORIZATION regress_user1; -GRANT SELECT (two) ON atest5 TO regress_user4; +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT (two) ON atest5 TO regress_priv_user4; -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT one, two FROM atest5 NATURAL JOIN atest6; -- ok now -- test column-level privileges for INSERT and UPDATE @@ -320,26 +320,41 @@ INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = EXCLU INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = EXCLUDED.three; INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set one = 8; -- fails (due to UPDATE) INSERT INTO atest5(three) VALUES (4) ON CONFLICT (two) DO UPDATE set three = 10; -- fails (due to INSERT) + -- Check that the columns in the inference require select privileges --- Error. No privs on four -INSERT INTO atest5(three) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 10; +INSERT INTO atest5(four) VALUES (4); -- fail + +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT INSERT (four) ON atest5 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; + +INSERT INTO atest5(four) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 3; -- fails (due to SELECT) +INSERT INTO atest5(four) VALUES (4) ON CONFLICT ON CONSTRAINT atest5_four_key DO UPDATE set three = 3; -- fails (due to SELECT) +INSERT INTO atest5(four) VALUES (4); -- ok + +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT (four) ON atest5 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; + +INSERT INTO atest5(four) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 3; -- ok +INSERT INTO atest5(four) VALUES (4) ON CONFLICT ON CONSTRAINT atest5_four_key DO UPDATE set three = 3; -- ok -SET SESSION AUTHORIZATION regress_user1; -REVOKE ALL (one) ON atest5 FROM regress_user4; -GRANT SELECT (one,two,blue) ON atest6 TO regress_user4; +SET SESSION AUTHORIZATION regress_priv_user1; +REVOKE ALL (one) ON atest5 FROM regress_priv_user4; +GRANT SELECT (one,two,blue) ON atest6 TO regress_priv_user4; -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT one FROM atest5; -- fail UPDATE atest5 SET one = 1; -- fail SELECT atest6 FROM atest6; -- ok COPY atest6 TO stdout; -- ok -- check error reporting with column privs -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; CREATE TABLE t1 (c1 int, c2 int, c3 int check (c3 < 5), primary key (c1, c2)); -GRANT SELECT (c1) ON t1 TO regress_user2; -GRANT INSERT (c1, c2, c3) ON t1 TO regress_user2; -GRANT UPDATE (c1, c2, c3) ON t1 TO regress_user2; +GRANT SELECT (c1) ON t1 TO regress_priv_user2; +GRANT INSERT (c1, c2, c3) ON t1 TO regress_priv_user2; +GRANT UPDATE (c1, c2, c3) ON t1 TO regress_priv_user2; -- seed data INSERT INTO t1 VALUES (1, 1, 1); @@ -348,7 +363,7 @@ INSERT INTO t1 VALUES (2, 1, 2); INSERT INTO t1 VALUES (2, 2, 2); INSERT INTO t1 VALUES (3, 1, 3); -SET SESSION AUTHORIZATION regress_user2; +SET SESSION AUTHORIZATION regress_priv_user2; INSERT INTO t1 (c1, c2) VALUES (1, 1); -- fail, but row not shown UPDATE t1 SET c2 = 1; -- fail, but row not shown INSERT INTO t1 (c1, c2) VALUES (null, null); -- fail, but see columns being inserted @@ -356,59 +371,59 @@ INSERT INTO t1 (c3) VALUES (null); -- fail, but see columns being inserted or ha INSERT INTO t1 (c1) VALUES (5); -- fail, but see columns being inserted or have SELECT UPDATE t1 SET c3 = 10; -- fail, but see columns with SELECT rights, or being modified -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; DROP TABLE t1; -- test column-level privileges when involved with DELETE -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; ALTER TABLE atest6 ADD COLUMN three integer; -GRANT DELETE ON atest5 TO regress_user3; -GRANT SELECT (two) ON atest5 TO regress_user3; -REVOKE ALL (one) ON atest5 FROM regress_user3; -GRANT SELECT (one) ON atest5 TO regress_user4; +GRANT DELETE ON atest5 TO regress_priv_user3; +GRANT SELECT (two) ON atest5 TO regress_priv_user3; +REVOKE ALL (one) ON atest5 FROM regress_priv_user3; +GRANT SELECT (one) ON atest5 TO regress_priv_user4; -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT atest6 FROM atest6; -- fail SELECT one FROM atest5 NATURAL JOIN atest6; -- fail -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; ALTER TABLE atest6 DROP COLUMN three; -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT atest6 FROM atest6; -- ok SELECT one FROM atest5 NATURAL JOIN atest6; -- ok -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; ALTER TABLE atest6 DROP COLUMN two; -REVOKE SELECT (one,blue) ON atest6 FROM regress_user4; +REVOKE SELECT (one,blue) ON atest6 FROM regress_priv_user4; -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT * FROM atest6; -- fail SELECT 1 FROM atest6; -- fail -SET SESSION AUTHORIZATION regress_user3; +SET SESSION AUTHORIZATION regress_priv_user3; DELETE FROM atest5 WHERE one = 1; -- fail DELETE FROM atest5 WHERE two = 2; -- ok -- check inheritance cases -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; CREATE TABLE atestp1 (f1 int, f2 int) WITH OIDS; CREATE TABLE atestp2 (fx int, fy int) WITH OIDS; CREATE TABLE atestc (fz int) INHERITS (atestp1, atestp2); -GRANT SELECT(fx,fy,oid) ON atestp2 TO regress_user2; -GRANT SELECT(fx) ON atestc TO regress_user2; +GRANT SELECT(fx,fy,oid) ON atestp2 TO regress_priv_user2; +GRANT SELECT(fx) ON atestc TO regress_priv_user2; -SET SESSION AUTHORIZATION regress_user2; +SET SESSION AUTHORIZATION regress_priv_user2; SELECT fx FROM atestp2; -- ok SELECT fy FROM atestp2; -- ok SELECT atestp2 FROM atestp2; -- ok SELECT oid FROM atestp2; -- ok SELECT fy FROM atestc; -- fail -SET SESSION AUTHORIZATION regress_user1; -GRANT SELECT(fy,oid) ON atestc TO regress_user2; +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT(fy,oid) ON atestc TO regress_priv_user2; -SET SESSION AUTHORIZATION regress_user2; +SET SESSION AUTHORIZATION regress_priv_user2; SELECT fx FROM atestp2; -- still ok SELECT fy FROM atestp2; -- ok SELECT atestp2 FROM atestp2; -- ok @@ -420,42 +435,59 @@ SELECT oid FROM atestp2; -- ok \c - REVOKE ALL PRIVILEGES ON LANGUAGE sql FROM PUBLIC; -GRANT USAGE ON LANGUAGE sql TO regress_user1; -- ok +GRANT USAGE ON LANGUAGE sql TO regress_priv_user1; -- ok GRANT USAGE ON LANGUAGE c TO PUBLIC; -- fail -SET SESSION AUTHORIZATION regress_user1; -GRANT USAGE ON LANGUAGE sql TO regress_user2; -- fail -CREATE FUNCTION testfunc1(int) RETURNS int AS 'select 2 * $1;' LANGUAGE sql; -CREATE FUNCTION testfunc2(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; - -REVOKE ALL ON FUNCTION testfunc1(int), testfunc2(int) FROM PUBLIC; -GRANT EXECUTE ON FUNCTION testfunc1(int), testfunc2(int) TO regress_user2; -GRANT USAGE ON FUNCTION testfunc1(int) TO regress_user3; -- semantic error -GRANT ALL PRIVILEGES ON FUNCTION testfunc1(int) TO regress_user4; -GRANT ALL PRIVILEGES ON FUNCTION testfunc_nosuch(int) TO regress_user4; - -CREATE FUNCTION testfunc4(boolean) RETURNS text +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT USAGE ON LANGUAGE sql TO regress_priv_user2; -- fail +CREATE FUNCTION priv_testfunc1(int) RETURNS int AS 'select 2 * $1;' LANGUAGE sql; +CREATE FUNCTION priv_testfunc2(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; +CREATE AGGREGATE priv_testagg1(int) (sfunc = int4pl, stype = int4); +CREATE PROCEDURE priv_testproc1(int) AS 'select $1;' LANGUAGE sql; + +REVOKE ALL ON FUNCTION priv_testfunc1(int), priv_testfunc2(int), priv_testagg1(int) FROM PUBLIC; +GRANT EXECUTE ON FUNCTION priv_testfunc1(int), priv_testfunc2(int), priv_testagg1(int) TO regress_priv_user2; +REVOKE ALL ON FUNCTION priv_testproc1(int) FROM PUBLIC; -- fail, not a function +REVOKE ALL ON PROCEDURE priv_testproc1(int) FROM PUBLIC; +GRANT EXECUTE ON PROCEDURE priv_testproc1(int) TO regress_priv_user2; +GRANT USAGE ON FUNCTION priv_testfunc1(int) TO regress_priv_user3; -- semantic error +GRANT USAGE ON FUNCTION priv_testagg1(int) TO regress_priv_user3; -- semantic error +GRANT USAGE ON PROCEDURE priv_testproc1(int) TO regress_priv_user3; -- semantic error +GRANT ALL PRIVILEGES ON FUNCTION priv_testfunc1(int) TO regress_priv_user4; +GRANT ALL PRIVILEGES ON FUNCTION priv_testfunc_nosuch(int) TO regress_priv_user4; +GRANT ALL PRIVILEGES ON FUNCTION priv_testagg1(int) TO regress_priv_user4; +GRANT ALL PRIVILEGES ON PROCEDURE priv_testproc1(int) TO regress_priv_user4; + +CREATE FUNCTION priv_testfunc4(boolean) RETURNS text AS 'select col1 from atest2 where col2 = $1;' LANGUAGE sql SECURITY DEFINER; -GRANT EXECUTE ON FUNCTION testfunc4(boolean) TO regress_user3; - -SET SESSION AUTHORIZATION regress_user2; -SELECT testfunc1(5), testfunc2(5); -- ok -CREATE FUNCTION testfunc3(int) RETURNS int AS 'select 2 * $1;' LANGUAGE sql; -- fail - -SET SESSION AUTHORIZATION regress_user3; -SELECT testfunc1(5); -- fail +GRANT EXECUTE ON FUNCTION priv_testfunc4(boolean) TO regress_priv_user3; + +SET SESSION AUTHORIZATION regress_priv_user2; +SELECT priv_testfunc1(5), priv_testfunc2(5); -- ok +CREATE FUNCTION priv_testfunc3(int) RETURNS int AS 'select 2 * $1;' LANGUAGE sql; -- fail +SELECT priv_testagg1(x) FROM (VALUES (1), (2), (3)) _(x); -- ok +CALL priv_testproc1(6); -- ok + +SET SESSION AUTHORIZATION regress_priv_user3; +SELECT priv_testfunc1(5); -- fail +SELECT priv_testagg1(x) FROM (VALUES (1), (2), (3)) _(x); -- fail +CALL priv_testproc1(6); -- fail SELECT col1 FROM atest2 WHERE col2 = true; -- fail -SELECT testfunc4(true); -- ok +SELECT priv_testfunc4(true); -- ok -SET SESSION AUTHORIZATION regress_user4; -SELECT testfunc1(5); -- ok +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT priv_testfunc1(5); -- ok +SELECT priv_testagg1(x) FROM (VALUES (1), (2), (3)) _(x); -- ok +CALL priv_testproc1(6); -- ok -DROP FUNCTION testfunc1(int); -- fail +DROP FUNCTION priv_testfunc1(int); -- fail +DROP AGGREGATE priv_testagg1(int); -- fail +DROP PROCEDURE priv_testproc1(int); -- fail \c - -DROP FUNCTION testfunc1(int); -- ok +DROP FUNCTION priv_testfunc1(int); -- ok -- restore to sanity GRANT ALL PRIVILEGES ON LANGUAGE sql TO PUBLIC; @@ -463,8 +495,8 @@ GRANT ALL PRIVILEGES ON LANGUAGE sql TO PUBLIC; BEGIN; SELECT '{1}'::int4[]::int8[]; REVOKE ALL ON FUNCTION int8(integer) FROM PUBLIC; -SELECT '{1}'::int4[]::int8[]; --superuser, suceed -SET SESSION AUTHORIZATION regress_user4; +SELECT '{1}'::int4[]::int8[]; --superuser, succeed +SET SESSION AUTHORIZATION regress_priv_user4; SELECT '{1}'::int4[]::int8[]; --other user, fail ROLLBACK; @@ -473,112 +505,112 @@ ROLLBACK; -- switch to superuser \c - -CREATE TYPE testtype1 AS (a int, b text); -REVOKE USAGE ON TYPE testtype1 FROM PUBLIC; -GRANT USAGE ON TYPE testtype1 TO regress_user2; -GRANT USAGE ON TYPE _testtype1 TO regress_user2; -- fail -GRANT USAGE ON DOMAIN testtype1 TO regress_user2; -- fail +CREATE TYPE priv_testtype1 AS (a int, b text); +REVOKE USAGE ON TYPE priv_testtype1 FROM PUBLIC; +GRANT USAGE ON TYPE priv_testtype1 TO regress_priv_user2; +GRANT USAGE ON TYPE _priv_testtype1 TO regress_priv_user2; -- fail +GRANT USAGE ON DOMAIN priv_testtype1 TO regress_priv_user2; -- fail -CREATE DOMAIN testdomain1 AS int; -REVOKE USAGE on DOMAIN testdomain1 FROM PUBLIC; -GRANT USAGE ON DOMAIN testdomain1 TO regress_user2; -GRANT USAGE ON TYPE testdomain1 TO regress_user2; -- ok +CREATE DOMAIN priv_testdomain1 AS int; +REVOKE USAGE on DOMAIN priv_testdomain1 FROM PUBLIC; +GRANT USAGE ON DOMAIN priv_testdomain1 TO regress_priv_user2; +GRANT USAGE ON TYPE priv_testdomain1 TO regress_priv_user2; -- ok -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; -- commands that should fail -CREATE AGGREGATE testagg1a(testdomain1) (sfunc = int4_sum, stype = bigint); +CREATE AGGREGATE priv_testagg1a(priv_testdomain1) (sfunc = int4_sum, stype = bigint); -CREATE DOMAIN testdomain2a AS testdomain1; +CREATE DOMAIN priv_testdomain2a AS priv_testdomain1; -CREATE DOMAIN testdomain3a AS int; -CREATE FUNCTION castfunc(int) RETURNS testdomain3a AS $$ SELECT $1::testdomain3a $$ LANGUAGE SQL; -CREATE CAST (testdomain1 AS testdomain3a) WITH FUNCTION castfunc(int); +CREATE DOMAIN priv_testdomain3a AS int; +CREATE FUNCTION castfunc(int) RETURNS priv_testdomain3a AS $$ SELECT $1::priv_testdomain3a $$ LANGUAGE SQL; +CREATE CAST (priv_testdomain1 AS priv_testdomain3a) WITH FUNCTION castfunc(int); DROP FUNCTION castfunc(int) CASCADE; -DROP DOMAIN testdomain3a; +DROP DOMAIN priv_testdomain3a; -CREATE FUNCTION testfunc5a(a testdomain1) RETURNS int LANGUAGE SQL AS $$ SELECT $1 $$; -CREATE FUNCTION testfunc6a(b int) RETURNS testdomain1 LANGUAGE SQL AS $$ SELECT $1::testdomain1 $$; +CREATE FUNCTION priv_testfunc5a(a priv_testdomain1) RETURNS int LANGUAGE SQL AS $$ SELECT $1 $$; +CREATE FUNCTION priv_testfunc6a(b int) RETURNS priv_testdomain1 LANGUAGE SQL AS $$ SELECT $1::priv_testdomain1 $$; -CREATE OPERATOR !+! (PROCEDURE = int4pl, LEFTARG = testdomain1, RIGHTARG = testdomain1); +CREATE OPERATOR !+! (PROCEDURE = int4pl, LEFTARG = priv_testdomain1, RIGHTARG = priv_testdomain1); -CREATE TABLE test5a (a int, b testdomain1); -CREATE TABLE test6a OF testtype1; -CREATE TABLE test10a (a int[], b testtype1[]); +CREATE TABLE test5a (a int, b priv_testdomain1); +CREATE TABLE test6a OF priv_testtype1; +CREATE TABLE test10a (a int[], b priv_testtype1[]); CREATE TABLE test9a (a int, b int); -ALTER TABLE test9a ADD COLUMN c testdomain1; -ALTER TABLE test9a ALTER COLUMN b TYPE testdomain1; +ALTER TABLE test9a ADD COLUMN c priv_testdomain1; +ALTER TABLE test9a ALTER COLUMN b TYPE priv_testdomain1; -CREATE TYPE test7a AS (a int, b testdomain1); +CREATE TYPE test7a AS (a int, b priv_testdomain1); CREATE TYPE test8a AS (a int, b int); -ALTER TYPE test8a ADD ATTRIBUTE c testdomain1; -ALTER TYPE test8a ALTER ATTRIBUTE b TYPE testdomain1; +ALTER TYPE test8a ADD ATTRIBUTE c priv_testdomain1; +ALTER TYPE test8a ALTER ATTRIBUTE b TYPE priv_testdomain1; -CREATE TABLE test11a AS (SELECT 1::testdomain1 AS a); +CREATE TABLE test11a AS (SELECT 1::priv_testdomain1 AS a); -REVOKE ALL ON TYPE testtype1 FROM PUBLIC; +REVOKE ALL ON TYPE priv_testtype1 FROM PUBLIC; -SET SESSION AUTHORIZATION regress_user2; +SET SESSION AUTHORIZATION regress_priv_user2; -- commands that should succeed -CREATE AGGREGATE testagg1b(testdomain1) (sfunc = int4_sum, stype = bigint); +CREATE AGGREGATE priv_testagg1b(priv_testdomain1) (sfunc = int4_sum, stype = bigint); -CREATE DOMAIN testdomain2b AS testdomain1; +CREATE DOMAIN priv_testdomain2b AS priv_testdomain1; -CREATE DOMAIN testdomain3b AS int; -CREATE FUNCTION castfunc(int) RETURNS testdomain3b AS $$ SELECT $1::testdomain3b $$ LANGUAGE SQL; -CREATE CAST (testdomain1 AS testdomain3b) WITH FUNCTION castfunc(int); +CREATE DOMAIN priv_testdomain3b AS int; +CREATE FUNCTION castfunc(int) RETURNS priv_testdomain3b AS $$ SELECT $1::priv_testdomain3b $$ LANGUAGE SQL; +CREATE CAST (priv_testdomain1 AS priv_testdomain3b) WITH FUNCTION castfunc(int); -CREATE FUNCTION testfunc5b(a testdomain1) RETURNS int LANGUAGE SQL AS $$ SELECT $1 $$; -CREATE FUNCTION testfunc6b(b int) RETURNS testdomain1 LANGUAGE SQL AS $$ SELECT $1::testdomain1 $$; +CREATE FUNCTION priv_testfunc5b(a priv_testdomain1) RETURNS int LANGUAGE SQL AS $$ SELECT $1 $$; +CREATE FUNCTION priv_testfunc6b(b int) RETURNS priv_testdomain1 LANGUAGE SQL AS $$ SELECT $1::priv_testdomain1 $$; -CREATE OPERATOR !! (PROCEDURE = testfunc5b, RIGHTARG = testdomain1); +CREATE OPERATOR !! (PROCEDURE = priv_testfunc5b, RIGHTARG = priv_testdomain1); -CREATE TABLE test5b (a int, b testdomain1); -CREATE TABLE test6b OF testtype1; -CREATE TABLE test10b (a int[], b testtype1[]); +CREATE TABLE test5b (a int, b priv_testdomain1); +CREATE TABLE test6b OF priv_testtype1; +CREATE TABLE test10b (a int[], b priv_testtype1[]); CREATE TABLE test9b (a int, b int); -ALTER TABLE test9b ADD COLUMN c testdomain1; -ALTER TABLE test9b ALTER COLUMN b TYPE testdomain1; +ALTER TABLE test9b ADD COLUMN c priv_testdomain1; +ALTER TABLE test9b ALTER COLUMN b TYPE priv_testdomain1; -CREATE TYPE test7b AS (a int, b testdomain1); +CREATE TYPE test7b AS (a int, b priv_testdomain1); CREATE TYPE test8b AS (a int, b int); -ALTER TYPE test8b ADD ATTRIBUTE c testdomain1; -ALTER TYPE test8b ALTER ATTRIBUTE b TYPE testdomain1; +ALTER TYPE test8b ADD ATTRIBUTE c priv_testdomain1; +ALTER TYPE test8b ALTER ATTRIBUTE b TYPE priv_testdomain1; -CREATE TABLE test11b AS (SELECT 1::testdomain1 AS a); +CREATE TABLE test11b AS (SELECT 1::priv_testdomain1 AS a); -REVOKE ALL ON TYPE testtype1 FROM PUBLIC; +REVOKE ALL ON TYPE priv_testtype1 FROM PUBLIC; \c - -DROP AGGREGATE testagg1b(testdomain1); -DROP DOMAIN testdomain2b; -DROP OPERATOR !! (NONE, testdomain1); -DROP FUNCTION testfunc5b(a testdomain1); -DROP FUNCTION testfunc6b(b int); +DROP AGGREGATE priv_testagg1b(priv_testdomain1); +DROP DOMAIN priv_testdomain2b; +DROP OPERATOR !! (NONE, priv_testdomain1); +DROP FUNCTION priv_testfunc5b(a priv_testdomain1); +DROP FUNCTION priv_testfunc6b(b int); DROP TABLE test5b; DROP TABLE test6b; DROP TABLE test9b; DROP TABLE test10b; DROP TYPE test7b; DROP TYPE test8b; -DROP CAST (testdomain1 AS testdomain3b); +DROP CAST (priv_testdomain1 AS priv_testdomain3b); DROP FUNCTION castfunc(int) CASCADE; -DROP DOMAIN testdomain3b; +DROP DOMAIN priv_testdomain3b; DROP TABLE test11b; -DROP TYPE testtype1; -- ok -DROP DOMAIN testdomain1; -- ok +DROP TYPE priv_testtype1; -- ok +DROP DOMAIN priv_testdomain1; -- ok -- truncate -SET SESSION AUTHORIZATION regress_user5; +SET SESSION AUTHORIZATION regress_priv_user5; TRUNCATE atest2; -- ok TRUNCATE atest3; -- fail @@ -627,7 +659,7 @@ select has_table_privilege(t1.oid,'trigger') from (select oid from pg_class where relname = 'pg_authid') as t1; -- non-superuser -SET SESSION AUTHORIZATION regress_user3; +SET SESSION AUTHORIZATION regress_priv_user3; select has_table_privilege(current_user,'pg_class','select'); select has_table_privilege(current_user,'pg_class','insert'); @@ -683,59 +715,77 @@ from (select oid from pg_class where relname = 'atest1') as t1; select has_table_privilege(t1.oid,'trigger') from (select oid from pg_class where relname = 'atest1') as t1; +-- has_column_privilege function + +-- bad-input checks (as non-super-user) +select has_column_privilege('pg_authid',NULL,'select'); +select has_column_privilege('pg_authid','nosuchcol','select'); +select has_column_privilege(9999,'nosuchcol','select'); +select has_column_privilege(9999,99::int2,'select'); +select has_column_privilege('pg_authid',99::int2,'select'); +select has_column_privilege(9999,99::int2,'select'); + +create temp table mytable(f1 int, f2 int, f3 int); +alter table mytable drop column f2; +select has_column_privilege('mytable','f2','select'); +select has_column_privilege('mytable','........pg.dropped.2........','select'); +select has_column_privilege('mytable',2::int2,'select'); +revoke select on table mytable from regress_priv_user3; +select has_column_privilege('mytable',2::int2,'select'); +drop table mytable; -- Grant options -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; CREATE TABLE atest4 (a int); -GRANT SELECT ON atest4 TO regress_user2 WITH GRANT OPTION; -GRANT UPDATE ON atest4 TO regress_user2; -GRANT SELECT ON atest4 TO GROUP regress_group1 WITH GRANT OPTION; +GRANT SELECT ON atest4 TO regress_priv_user2 WITH GRANT OPTION; +GRANT UPDATE ON atest4 TO regress_priv_user2; +GRANT SELECT ON atest4 TO GROUP regress_priv_group1 WITH GRANT OPTION; -SET SESSION AUTHORIZATION regress_user2; +SET SESSION AUTHORIZATION regress_priv_user2; -GRANT SELECT ON atest4 TO regress_user3; -GRANT UPDATE ON atest4 TO regress_user3; -- fail +GRANT SELECT ON atest4 TO regress_priv_user3; +GRANT UPDATE ON atest4 TO regress_priv_user3; -- fail -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; -REVOKE SELECT ON atest4 FROM regress_user3; -- does nothing -SELECT has_table_privilege('regress_user3', 'atest4', 'SELECT'); -- true -REVOKE SELECT ON atest4 FROM regress_user2; -- fail -REVOKE GRANT OPTION FOR SELECT ON atest4 FROM regress_user2 CASCADE; -- ok -SELECT has_table_privilege('regress_user2', 'atest4', 'SELECT'); -- true -SELECT has_table_privilege('regress_user3', 'atest4', 'SELECT'); -- false +REVOKE SELECT ON atest4 FROM regress_priv_user3; -- does nothing +SELECT has_table_privilege('regress_priv_user3', 'atest4', 'SELECT'); -- true +REVOKE SELECT ON atest4 FROM regress_priv_user2; -- fail +REVOKE GRANT OPTION FOR SELECT ON atest4 FROM regress_priv_user2 CASCADE; -- ok +SELECT has_table_privilege('regress_priv_user2', 'atest4', 'SELECT'); -- true +SELECT has_table_privilege('regress_priv_user3', 'atest4', 'SELECT'); -- false -SELECT has_table_privilege('regress_user1', 'atest4', 'SELECT WITH GRANT OPTION'); -- true +SELECT has_table_privilege('regress_priv_user1', 'atest4', 'SELECT WITH GRANT OPTION'); -- true -- Admin options -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; CREATE FUNCTION dogrant_ok() RETURNS void LANGUAGE sql SECURITY DEFINER AS - 'GRANT regress_group2 TO regress_user5'; -GRANT regress_group2 TO regress_user5; -- ok: had ADMIN OPTION -SET ROLE regress_group2; -GRANT regress_group2 TO regress_user5; -- fails: SET ROLE suspended privilege + 'GRANT regress_priv_group2 TO regress_priv_user5'; +GRANT regress_priv_group2 TO regress_priv_user5; -- ok: had ADMIN OPTION +SET ROLE regress_priv_group2; +GRANT regress_priv_group2 TO regress_priv_user5; -- fails: SET ROLE suspended privilege -SET SESSION AUTHORIZATION regress_user1; -GRANT regress_group2 TO regress_user5; -- fails: no ADMIN OPTION +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT regress_priv_group2 TO regress_priv_user5; -- fails: no ADMIN OPTION SELECT dogrant_ok(); -- ok: SECURITY DEFINER conveys ADMIN -SET ROLE regress_group2; -GRANT regress_group2 TO regress_user5; -- fails: SET ROLE did not help +SET ROLE regress_priv_group2; +GRANT regress_priv_group2 TO regress_priv_user5; -- fails: SET ROLE did not help -SET SESSION AUTHORIZATION regress_group2; -GRANT regress_group2 TO regress_user5; -- ok: a role can self-admin +SET SESSION AUTHORIZATION regress_priv_group2; +GRANT regress_priv_group2 TO regress_priv_user5; -- ok: a role can self-admin CREATE FUNCTION dogrant_fails() RETURNS void LANGUAGE sql SECURITY DEFINER AS - 'GRANT regress_group2 TO regress_user5'; + 'GRANT regress_priv_group2 TO regress_priv_user5'; SELECT dogrant_fails(); -- fails: no self-admin in SECURITY DEFINER DROP FUNCTION dogrant_fails(); -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; DROP FUNCTION dogrant_ok(); -REVOKE regress_group2 FROM regress_user5; +REVOKE regress_priv_group2 FROM regress_priv_user5; -- has_sequence_privilege tests @@ -743,19 +793,19 @@ REVOKE regress_group2 FROM regress_user5; CREATE SEQUENCE x_seq; -GRANT USAGE on x_seq to regress_user2; +GRANT USAGE on x_seq to regress_priv_user2; -SELECT has_sequence_privilege('regress_user1', 'atest1', 'SELECT'); -SELECT has_sequence_privilege('regress_user1', 'x_seq', 'INSERT'); -SELECT has_sequence_privilege('regress_user1', 'x_seq', 'SELECT'); +SELECT has_sequence_privilege('regress_priv_user1', 'atest1', 'SELECT'); +SELECT has_sequence_privilege('regress_priv_user1', 'x_seq', 'INSERT'); +SELECT has_sequence_privilege('regress_priv_user1', 'x_seq', 'SELECT'); -SET SESSION AUTHORIZATION regress_user2; +SET SESSION AUTHORIZATION regress_priv_user2; SELECT has_sequence_privilege('x_seq', 'USAGE'); -- largeobject privilege tests \c - -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; SELECT lo_create(1001); SELECT lo_create(1002); @@ -764,17 +814,17 @@ SELECT lo_create(1004); SELECT lo_create(1005); GRANT ALL ON LARGE OBJECT 1001 TO PUBLIC; -GRANT SELECT ON LARGE OBJECT 1003 TO regress_user2; -GRANT SELECT,UPDATE ON LARGE OBJECT 1004 TO regress_user2; -GRANT ALL ON LARGE OBJECT 1005 TO regress_user2; -GRANT SELECT ON LARGE OBJECT 1005 TO regress_user2 WITH GRANT OPTION; +GRANT SELECT ON LARGE OBJECT 1003 TO regress_priv_user2; +GRANT SELECT,UPDATE ON LARGE OBJECT 1004 TO regress_priv_user2; +GRANT ALL ON LARGE OBJECT 1005 TO regress_priv_user2; +GRANT SELECT ON LARGE OBJECT 1005 TO regress_priv_user2 WITH GRANT OPTION; GRANT SELECT, INSERT ON LARGE OBJECT 1001 TO PUBLIC; -- to be failed GRANT SELECT, UPDATE ON LARGE OBJECT 1001 TO nosuchuser; -- to be failed GRANT SELECT, UPDATE ON LARGE OBJECT 999 TO PUBLIC; -- to be failed \c - -SET SESSION AUTHORIZATION regress_user2; +SET SESSION AUTHORIZATION regress_priv_user2; SELECT lo_create(2001); SELECT lo_create(2002); @@ -792,10 +842,10 @@ SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); -- to be denied SELECT lowrite(lo_open(1003, x'20000'::int), 'abcd'); -- to be denied SELECT lowrite(lo_open(1004, x'20000'::int), 'abcd'); -GRANT SELECT ON LARGE OBJECT 1005 TO regress_user3; -GRANT UPDATE ON LARGE OBJECT 1006 TO regress_user3; -- to be denied +GRANT SELECT ON LARGE OBJECT 1005 TO regress_priv_user3; +GRANT UPDATE ON LARGE OBJECT 1006 TO regress_priv_user3; -- to be denied REVOKE ALL ON LARGE OBJECT 2001, 2002 FROM PUBLIC; -GRANT ALL ON LARGE OBJECT 2001 TO regress_user3; +GRANT ALL ON LARGE OBJECT 2001 TO regress_priv_user3; SELECT lo_unlink(1001); -- to be denied SELECT lo_unlink(2002); @@ -804,7 +854,7 @@ SELECT lo_unlink(2002); -- confirm ACL setting SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; -SET SESSION AUTHORIZATION regress_user3; +SET SESSION AUTHORIZATION regress_priv_user3; SELECT loread(lo_open(1001, x'40000'::int), 32); SELECT loread(lo_open(1003, x'40000'::int), 32); -- to be denied @@ -816,7 +866,7 @@ SELECT lo_truncate(lo_open(2001, x'20000'::int), 10); -- compatibility mode in largeobject permission \c - SET lo_compat_privileges = false; -- default setting -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT loread(lo_open(1002, x'40000'::int), 32); -- to be denied SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); -- to be denied @@ -824,10 +874,12 @@ SELECT lo_truncate(lo_open(1002, x'20000'::int), 10); -- to be denied SELECT lo_put(1002, 1, 'abcd'); -- to be denied SELECT lo_unlink(1002); -- to be denied SELECT lo_export(1001, '/dev/null'); -- to be denied +SELECT lo_import('/dev/null'); -- to be denied +SELECT lo_import('/dev/null', 2003); -- to be denied \c - SET lo_compat_privileges = true; -- compatibility mode -SET SESSION AUTHORIZATION regress_user4; +SET SESSION AUTHORIZATION regress_priv_user4; SELECT loread(lo_open(1002, x'40000'::int), 32); SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); @@ -839,107 +891,128 @@ SELECT lo_export(1001, '/dev/null'); -- to be denied \c - SELECT * FROM pg_largeobject LIMIT 0; -SET SESSION AUTHORIZATION regress_user1; +SET SESSION AUTHORIZATION regress_priv_user1; SELECT * FROM pg_largeobject LIMIT 0; -- to be denied -- test default ACLs \c - CREATE SCHEMA testns; -GRANT ALL ON SCHEMA testns TO regress_user1; +GRANT ALL ON SCHEMA testns TO regress_priv_user1; CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'SELECT'); -- no -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'INSERT'); -- no +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- no +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT SELECT ON TABLES TO public; -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'SELECT'); -- no -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'INSERT'); -- no +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- no +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no DROP TABLE testns.acltest1; CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'SELECT'); -- yes -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'INSERT'); -- no +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT INSERT ON TABLES TO regress_user1; +ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT INSERT ON TABLES TO regress_priv_user1; DROP TABLE testns.acltest1; CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'SELECT'); -- yes -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'INSERT'); -- yes +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- yes -ALTER DEFAULT PRIVILEGES IN SCHEMA testns REVOKE INSERT ON TABLES FROM regress_user1; +ALTER DEFAULT PRIVILEGES IN SCHEMA testns REVOKE INSERT ON TABLES FROM regress_priv_user1; DROP TABLE testns.acltest1; CREATE TABLE testns.acltest1 (x int); -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'SELECT'); -- yes -SELECT has_table_privilege('regress_user1', 'testns.acltest1', 'INSERT'); -- no +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no -ALTER DEFAULT PRIVILEGES FOR ROLE regress_user1 REVOKE EXECUTE ON FUNCTIONS FROM public; +ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE EXECUTE ON FUNCTIONS FROM public; -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON SCHEMAS TO regress_user2; -- error +ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON SCHEMAS TO regress_priv_user2; -- error -ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO regress_user2; +-- +-- Testing blanket default grants is very hazardous since it might change +-- the privileges attached to objects created by concurrent regression tests. +-- To avoid that, be sure to revoke the privileges again before committing. +-- +BEGIN; + +ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO regress_priv_user2; CREATE SCHEMA testns2; -SELECT has_schema_privilege('regress_user2', 'testns2', 'USAGE'); -- yes -SELECT has_schema_privilege('regress_user2', 'testns2', 'CREATE'); -- no +SELECT has_schema_privilege('regress_priv_user2', 'testns2', 'USAGE'); -- yes +SELECT has_schema_privilege('regress_priv_user2', 'testns2', 'CREATE'); -- no -ALTER DEFAULT PRIVILEGES REVOKE USAGE ON SCHEMAS FROM regress_user2; +ALTER DEFAULT PRIVILEGES REVOKE USAGE ON SCHEMAS FROM regress_priv_user2; CREATE SCHEMA testns3; -SELECT has_schema_privilege('regress_user2', 'testns3', 'USAGE'); -- no -SELECT has_schema_privilege('regress_user2', 'testns3', 'CREATE'); -- no +SELECT has_schema_privilege('regress_priv_user2', 'testns3', 'USAGE'); -- no +SELECT has_schema_privilege('regress_priv_user2', 'testns3', 'CREATE'); -- no -ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO regress_user2; +ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO regress_priv_user2; CREATE SCHEMA testns4; -SELECT has_schema_privilege('regress_user2', 'testns4', 'USAGE'); -- yes -SELECT has_schema_privilege('regress_user2', 'testns4', 'CREATE'); -- yes +SELECT has_schema_privilege('regress_priv_user2', 'testns4', 'USAGE'); -- yes +SELECT has_schema_privilege('regress_priv_user2', 'testns4', 'CREATE'); -- yes -ALTER DEFAULT PRIVILEGES REVOKE ALL ON SCHEMAS FROM regress_user2; +ALTER DEFAULT PRIVILEGES REVOKE ALL ON SCHEMAS FROM regress_priv_user2; + +COMMIT; CREATE SCHEMA testns5; -SELECT has_schema_privilege('regress_user2', 'testns5', 'USAGE'); -- no -SELECT has_schema_privilege('regress_user2', 'testns5', 'CREATE'); -- no +SELECT has_schema_privilege('regress_priv_user2', 'testns5', 'USAGE'); -- no +SELECT has_schema_privilege('regress_priv_user2', 'testns5', 'CREATE'); -- no -SET ROLE regress_user1; +SET ROLE regress_priv_user1; CREATE FUNCTION testns.foo() RETURNS int AS 'select 1' LANGUAGE sql; +CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); +CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; -SELECT has_function_privilege('regress_user2', 'testns.foo()', 'EXECUTE'); -- no +SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- no +SELECT has_function_privilege('regress_priv_user2', 'testns.agg1(int)', 'EXECUTE'); -- no +SELECT has_function_privilege('regress_priv_user2', 'testns.bar()', 'EXECUTE'); -- no -ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT EXECUTE ON FUNCTIONS to public; +ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT EXECUTE ON ROUTINES to public; DROP FUNCTION testns.foo(); CREATE FUNCTION testns.foo() RETURNS int AS 'select 1' LANGUAGE sql; +DROP AGGREGATE testns.agg1(int); +CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); +DROP PROCEDURE testns.bar(); +CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; -SELECT has_function_privilege('regress_user2', 'testns.foo()', 'EXECUTE'); -- yes +SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- yes +SELECT has_function_privilege('regress_priv_user2', 'testns.agg1(int)', 'EXECUTE'); -- yes +SELECT has_function_privilege('regress_priv_user2', 'testns.bar()', 'EXECUTE'); -- yes (counts as function here) DROP FUNCTION testns.foo(); +DROP AGGREGATE testns.agg1(int); +DROP PROCEDURE testns.bar(); -ALTER DEFAULT PRIVILEGES FOR ROLE regress_user1 REVOKE USAGE ON TYPES FROM public; +ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE USAGE ON TYPES FROM public; -CREATE DOMAIN testns.testdomain1 AS int; +CREATE DOMAIN testns.priv_testdomain1 AS int; -SELECT has_type_privilege('regress_user2', 'testns.testdomain1', 'USAGE'); -- no +SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- no ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON TYPES to public; -DROP DOMAIN testns.testdomain1; -CREATE DOMAIN testns.testdomain1 AS int; +DROP DOMAIN testns.priv_testdomain1; +CREATE DOMAIN testns.priv_testdomain1 AS int; -SELECT has_type_privilege('regress_user2', 'testns.testdomain1', 'USAGE'); -- yes +SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- yes -DROP DOMAIN testns.testdomain1; +DROP DOMAIN testns.priv_testdomain1; RESET ROLE; @@ -965,25 +1038,41 @@ CREATE SCHEMA testns; CREATE TABLE testns.t1 (f1 int); CREATE TABLE testns.t2 (f1 int); -SELECT has_table_privilege('regress_user1', 'testns.t1', 'SELECT'); -- false +SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- false -GRANT ALL ON ALL TABLES IN SCHEMA testns TO regress_user1; +GRANT ALL ON ALL TABLES IN SCHEMA testns TO regress_priv_user1; -SELECT has_table_privilege('regress_user1', 'testns.t1', 'SELECT'); -- true -SELECT has_table_privilege('regress_user1', 'testns.t2', 'SELECT'); -- true +SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- true +SELECT has_table_privilege('regress_priv_user1', 'testns.t2', 'SELECT'); -- true -REVOKE ALL ON ALL TABLES IN SCHEMA testns FROM regress_user1; +REVOKE ALL ON ALL TABLES IN SCHEMA testns FROM regress_priv_user1; -SELECT has_table_privilege('regress_user1', 'testns.t1', 'SELECT'); -- false -SELECT has_table_privilege('regress_user1', 'testns.t2', 'SELECT'); -- false +SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- false +SELECT has_table_privilege('regress_priv_user1', 'testns.t2', 'SELECT'); -- false -CREATE FUNCTION testns.testfunc(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; +CREATE FUNCTION testns.priv_testfunc(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; +CREATE AGGREGATE testns.priv_testagg(int) (sfunc = int4pl, stype = int4); +CREATE PROCEDURE testns.priv_testproc(int) AS 'select 3' LANGUAGE sql; -SELECT has_function_privilege('regress_user1', 'testns.testfunc(int)', 'EXECUTE'); -- true by default +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true by default +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true by default +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true by default REVOKE ALL ON ALL FUNCTIONS IN SCHEMA testns FROM PUBLIC; -SELECT has_function_privilege('regress_user1', 'testns.testfunc(int)', 'EXECUTE'); -- false +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- false +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- false +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- still true, not a function + +REVOKE ALL ON ALL PROCEDURES IN SCHEMA testns FROM PUBLIC; + +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- now false + +GRANT ALL ON ALL ROUTINES IN SCHEMA testns TO PUBLIC; + +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true \set VERBOSITY terse \\ -- suppress cascade details DROP SCHEMA testns CASCADE; @@ -1020,24 +1109,24 @@ DROP ROLE regress_schemauser_renamed; -- test that dependent privileges are revoked (or not) properly \c - -set session role regress_user1; +set session role regress_priv_user1; create table dep_priv_test (a int); -grant select on dep_priv_test to regress_user2 with grant option; -grant select on dep_priv_test to regress_user3 with grant option; -set session role regress_user2; -grant select on dep_priv_test to regress_user4 with grant option; -set session role regress_user3; -grant select on dep_priv_test to regress_user4 with grant option; -set session role regress_user4; -grant select on dep_priv_test to regress_user5; +grant select on dep_priv_test to regress_priv_user2 with grant option; +grant select on dep_priv_test to regress_priv_user3 with grant option; +set session role regress_priv_user2; +grant select on dep_priv_test to regress_priv_user4 with grant option; +set session role regress_priv_user3; +grant select on dep_priv_test to regress_priv_user4 with grant option; +set session role regress_priv_user4; +grant select on dep_priv_test to regress_priv_user5; \dp dep_priv_test -set session role regress_user2; -revoke select on dep_priv_test from regress_user4 cascade; +set session role regress_priv_user2; +revoke select on dep_priv_test from regress_priv_user4 cascade; \dp dep_priv_test -set session role regress_user3; -revoke select on dep_priv_test from regress_user4 cascade; +set session role regress_priv_user3; +revoke select on dep_priv_test from regress_priv_user4 cascade; \dp dep_priv_test -set session role regress_user1; +set session role regress_priv_user1; drop table dep_priv_test; @@ -1047,8 +1136,10 @@ drop table dep_priv_test; drop sequence x_seq; -DROP FUNCTION testfunc2(int); -DROP FUNCTION testfunc4(boolean); +DROP AGGREGATE priv_testagg1(int); +DROP FUNCTION priv_testfunc2(int); +DROP FUNCTION priv_testfunc4(boolean); +DROP PROCEDURE priv_testproc1(int); DROP VIEW atestv0; DROP VIEW atestv1; @@ -1070,19 +1161,19 @@ DROP TABLE atestp2; SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; -DROP GROUP regress_group1; -DROP GROUP regress_group2; +DROP GROUP regress_priv_group1; +DROP GROUP regress_priv_group2; -- these are needed to clean up permissions -REVOKE USAGE ON LANGUAGE sql FROM regress_user1; -DROP OWNED BY regress_user1; - -DROP USER regress_user1; -DROP USER regress_user2; -DROP USER regress_user3; -DROP USER regress_user4; -DROP USER regress_user5; -DROP USER regress_user6; +REVOKE USAGE ON LANGUAGE sql FROM regress_priv_user1; +DROP OWNED BY regress_priv_user1; + +DROP USER regress_priv_user1; +DROP USER regress_priv_user2; +DROP USER regress_priv_user3; +DROP USER regress_priv_user4; +DROP USER regress_priv_user5; +DROP USER regress_priv_user6; -- permissions with LOCK TABLE diff --git a/src/test/regress/sql/psql.sql b/src/test/regress/sql/psql.sql index b56a05f7f0..b45da9bb8d 100644 --- a/src/test/regress/sql/psql.sql +++ b/src/test/regress/sql/psql.sql @@ -28,6 +28,16 @@ SELECT 1 as one, 2 as two \g SELECT 3 as three, 4 as four \gx \g +-- \gx should work in FETCH_COUNT mode too +\set FETCH_COUNT 1 + +SELECT 1 as one, 2 as two \g +\gx +SELECT 3 as three, 4 as four \gx +\g + +\unset FETCH_COUNT + -- \gset select 10 as test01, 20 as test02, 'Hello' as test03 \gset pref01_ @@ -63,6 +73,42 @@ select 10 as test01, 20 as test02 from generate_series(1,0) \gset \unset FETCH_COUNT +-- \gdesc + +SELECT + NULL AS zero, + 1 AS one, + 2.0 AS two, + 'three' AS three, + $1 AS four, + sin($2) as five, + 'foo'::varchar(4) as six, + CURRENT_DATE AS now +\gdesc + +-- should work with tuple-returning utilities, such as EXECUTE +PREPARE test AS SELECT 1 AS first, 2 AS second; +EXECUTE test \gdesc +EXPLAIN EXECUTE test \gdesc + +-- should fail cleanly - syntax error +SELECT 1 + \gdesc + +-- check behavior with empty results +SELECT \gdesc +CREATE TABLE bububu(a int) \gdesc + +-- subject command should not have executed +TABLE bububu; -- fail + +-- query buffer should remain unchanged +SELECT 1 AS x, 'Hello', 2 AS y, true AS "dirty\name" +\gdesc +\g + +-- all on one line +SELECT 3 AS x, 'Hello', 4 AS y, true AS "dirty\name" \gdesc \g + -- \gexec create temporary table gexec_test(a int, b text, c date, d float); @@ -526,6 +572,24 @@ select \if false \\ (bogus \else \\ 42 \endif \\ forty_two; \echo 'should print #8-1' \endif +-- :{?...} defined variable test +\set i 1 +\if :{?i} + \echo '#9-1 ok, variable i is defined' +\else + \echo 'should not print #9-2' +\endif + +\if :{?no_such_variable} + \echo 'should not print #10-1' +\else + \echo '#10-2 ok, variable no_such_variable is not defined' +\endif + +SELECT :{?i} AS i_is_defined; + +SELECT NOT :{?no_such_var} AS no_such_var_is_not_defined; + -- SHOW_CONTEXT \set SHOW_CONTEXT never @@ -560,3 +624,67 @@ UNION SELECT 5 ORDER BY 1; \r \p + +-- tests for special result variables + +-- working query, 2 rows selected +SELECT 1 AS stuff UNION SELECT 2; +\echo 'error:' :ERROR +\echo 'error code:' :SQLSTATE +\echo 'number of rows:' :ROW_COUNT + +-- syntax error +SELECT 1 UNION; +\echo 'error:' :ERROR +\echo 'error code:' :SQLSTATE +\echo 'number of rows:' :ROW_COUNT +\echo 'last error message:' :LAST_ERROR_MESSAGE +\echo 'last error code:' :LAST_ERROR_SQLSTATE + +-- empty query +; +\echo 'error:' :ERROR +\echo 'error code:' :SQLSTATE +\echo 'number of rows:' :ROW_COUNT +-- must have kept previous values +\echo 'last error message:' :LAST_ERROR_MESSAGE +\echo 'last error code:' :LAST_ERROR_SQLSTATE + +-- other query error +DROP TABLE this_table_does_not_exist; +\echo 'error:' :ERROR +\echo 'error code:' :SQLSTATE +\echo 'number of rows:' :ROW_COUNT +\echo 'last error message:' :LAST_ERROR_MESSAGE +\echo 'last error code:' :LAST_ERROR_SQLSTATE + +-- working \gdesc +SELECT 3 AS three, 4 AS four \gdesc +\echo 'error:' :ERROR +\echo 'error code:' :SQLSTATE +\echo 'number of rows:' :ROW_COUNT + +-- \gdesc with an error +SELECT 4 AS \gdesc +\echo 'error:' :ERROR +\echo 'error code:' :SQLSTATE +\echo 'number of rows:' :ROW_COUNT +\echo 'last error message:' :LAST_ERROR_MESSAGE +\echo 'last error code:' :LAST_ERROR_SQLSTATE + +-- check row count for a cursor-fetched query +\set FETCH_COUNT 10 +select unique2 from tenk1 order by unique2 limit 19; +\echo 'error:' :ERROR +\echo 'error code:' :SQLSTATE +\echo 'number of rows:' :ROW_COUNT + +-- cursor-fetched query with an error after the first group +select 1/(15-unique2) from tenk1 order by unique2 limit 19; +\echo 'error:' :ERROR +\echo 'error code:' :SQLSTATE +\echo 'number of rows:' :ROW_COUNT +\echo 'last error message:' :LAST_ERROR_MESSAGE +\echo 'last error code:' :LAST_ERROR_SQLSTATE + +\unset FETCH_COUNT diff --git a/src/test/regress/sql/rangefuncs.sql b/src/test/regress/sql/rangefuncs.sql index 442d397d4a..fc8ad9a158 100644 --- a/src/test/regress/sql/rangefuncs.sql +++ b/src/test/regress/sql/rangefuncs.sql @@ -1,29 +1,29 @@ -CREATE TABLE foo2(fooid int, f2 int); -INSERT INTO foo2 VALUES(1, 11); -INSERT INTO foo2 VALUES(2, 22); -INSERT INTO foo2 VALUES(1, 111); +CREATE TABLE rngfunc2(rngfuncid int, f2 int); +INSERT INTO rngfunc2 VALUES(1, 11); +INSERT INTO rngfunc2 VALUES(2, 22); +INSERT INTO rngfunc2 VALUES(1, 111); -CREATE FUNCTION foot(int) returns setof foo2 as 'SELECT * FROM foo2 WHERE fooid = $1 ORDER BY f2;' LANGUAGE SQL; +CREATE FUNCTION rngfunct(int) returns setof rngfunc2 as 'SELECT * FROM rngfunc2 WHERE rngfuncid = $1 ORDER BY f2;' LANGUAGE SQL; -- function with ORDINALITY -select * from foot(1) with ordinality as z(a,b,ord); -select * from foot(1) with ordinality as z(a,b,ord) where b > 100; -- ordinal 2, not 1 +select * from rngfunct(1) with ordinality as z(a,b,ord); +select * from rngfunct(1) with ordinality as z(a,b,ord) where b > 100; -- ordinal 2, not 1 -- ordinality vs. column names and types -select a,b,ord from foot(1) with ordinality as z(a,b,ord); +select a,b,ord from rngfunct(1) with ordinality as z(a,b,ord); select a,ord from unnest(array['a','b']) with ordinality as z(a,ord); select * from unnest(array['a','b']) with ordinality as z(a,ord); select a,ord from unnest(array[1.0::float8]) with ordinality as z(a,ord); select * from unnest(array[1.0::float8]) with ordinality as z(a,ord); select row_to_json(s.*) from generate_series(11,14) with ordinality s; -- ordinality vs. views -create temporary view vw_ord as select * from (values (1)) v(n) join foot(1) with ordinality as z(a,b,ord) on (n=ord); +create temporary view vw_ord as select * from (values (1)) v(n) join rngfunct(1) with ordinality as z(a,b,ord) on (n=ord); select * from vw_ord; select definition from pg_views where viewname='vw_ord'; drop view vw_ord; -- multiple functions -select * from rows from(foot(1),foot(2)) with ordinality as z(a,b,c,d,ord); -create temporary view vw_ord as select * from (values (1)) v(n) join rows from(foot(1),foot(2)) with ordinality as z(a,b,c,d,ord) on (n=ord); +select * from rows from(rngfunct(1),rngfunct(2)) with ordinality as z(a,b,c,d,ord); +create temporary view vw_ord as select * from (values (1)) v(n) join rows from(rngfunct(1),rngfunct(2)) with ordinality as z(a,b,c,d,ord) on (n=ord); select * from vw_ord; select definition from pg_views where viewname='vw_ord'; drop view vw_ord; @@ -48,209 +48,209 @@ drop view vw_ord; -- ordinality and multiple functions vs. rewind and reverse scan begin; -declare foo scroll cursor for select * from rows from(generate_series(1,5),generate_series(1,2)) with ordinality as g(i,j,o); -fetch all from foo; -fetch backward all from foo; -fetch all from foo; -fetch next from foo; -fetch next from foo; -fetch prior from foo; -fetch absolute 1 from foo; -fetch next from foo; -fetch next from foo; -fetch next from foo; -fetch prior from foo; -fetch prior from foo; -fetch prior from foo; +declare rf_cur scroll cursor for select * from rows from(generate_series(1,5),generate_series(1,2)) with ordinality as g(i,j,o); +fetch all from rf_cur; +fetch backward all from rf_cur; +fetch all from rf_cur; +fetch next from rf_cur; +fetch next from rf_cur; +fetch prior from rf_cur; +fetch absolute 1 from rf_cur; +fetch next from rf_cur; +fetch next from rf_cur; +fetch next from rf_cur; +fetch prior from rf_cur; +fetch prior from rf_cur; +fetch prior from rf_cur; commit; -- function with implicit LATERAL -select * from foo2, foot(foo2.fooid) z where foo2.f2 = z.f2; +select * from rngfunc2, rngfunct(rngfunc2.rngfuncid) z where rngfunc2.f2 = z.f2; -- function with implicit LATERAL and explicit ORDINALITY -select * from foo2, foot(foo2.fooid) with ordinality as z(fooid,f2,ord) where foo2.f2 = z.f2; +select * from rngfunc2, rngfunct(rngfunc2.rngfuncid) with ordinality as z(rngfuncid,f2,ord) where rngfunc2.f2 = z.f2; -- function in subselect -select * from foo2 where f2 in (select f2 from foot(foo2.fooid) z where z.fooid = foo2.fooid) ORDER BY 1,2; +select * from rngfunc2 where f2 in (select f2 from rngfunct(rngfunc2.rngfuncid) z where z.rngfuncid = rngfunc2.rngfuncid) ORDER BY 1,2; -- function in subselect -select * from foo2 where f2 in (select f2 from foot(1) z where z.fooid = foo2.fooid) ORDER BY 1,2; +select * from rngfunc2 where f2 in (select f2 from rngfunct(1) z where z.rngfuncid = rngfunc2.rngfuncid) ORDER BY 1,2; -- function in subselect -select * from foo2 where f2 in (select f2 from foot(foo2.fooid) z where z.fooid = 1) ORDER BY 1,2; +select * from rngfunc2 where f2 in (select f2 from rngfunct(rngfunc2.rngfuncid) z where z.rngfuncid = 1) ORDER BY 1,2; -- nested functions -select foot.fooid, foot.f2 from foot(sin(pi()/2)::int) ORDER BY 1,2; +select rngfunct.rngfuncid, rngfunct.f2 from rngfunct(sin(pi()/2)::int) ORDER BY 1,2; -CREATE TABLE foo (fooid int, foosubid int, fooname text, primary key(fooid,foosubid)); -INSERT INTO foo VALUES(1,1,'Joe'); -INSERT INTO foo VALUES(1,2,'Ed'); -INSERT INTO foo VALUES(2,1,'Mary'); +CREATE TABLE rngfunc (rngfuncid int, rngfuncsubid int, rngfuncname text, primary key(rngfuncid,rngfuncsubid)); +INSERT INTO rngfunc VALUES(1,1,'Joe'); +INSERT INTO rngfunc VALUES(1,2,'Ed'); +INSERT INTO rngfunc VALUES(2,1,'Mary'); -- sql, proretset = f, prorettype = b -CREATE FUNCTION getfoo1(int) RETURNS int AS 'SELECT $1;' LANGUAGE SQL; -SELECT * FROM getfoo1(1) AS t1; -SELECT * FROM getfoo1(1) WITH ORDINALITY AS t1(v,o); -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo1(1); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo1(1) WITH ORDINALITY as t1(v,o); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; +CREATE FUNCTION getrngfunc1(int) RETURNS int AS 'SELECT $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc1(1) AS t1; +SELECT * FROM getrngfunc1(1) WITH ORDINALITY AS t1(v,o); +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc1(1); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc1(1) WITH ORDINALITY as t1(v,o); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; -- sql, proretset = t, prorettype = b -CREATE FUNCTION getfoo2(int) RETURNS setof int AS 'SELECT fooid FROM foo WHERE fooid = $1;' LANGUAGE SQL; -SELECT * FROM getfoo2(1) AS t1; -SELECT * FROM getfoo2(1) WITH ORDINALITY AS t1(v,o); -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo2(1); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo2(1) WITH ORDINALITY AS t1(v,o); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; +CREATE FUNCTION getrngfunc2(int) RETURNS setof int AS 'SELECT rngfuncid FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc2(1) AS t1; +SELECT * FROM getrngfunc2(1) WITH ORDINALITY AS t1(v,o); +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc2(1); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc2(1) WITH ORDINALITY AS t1(v,o); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; -- sql, proretset = t, prorettype = b -CREATE FUNCTION getfoo3(int) RETURNS setof text AS 'SELECT fooname FROM foo WHERE fooid = $1;' LANGUAGE SQL; -SELECT * FROM getfoo3(1) AS t1; -SELECT * FROM getfoo3(1) WITH ORDINALITY AS t1(v,o); -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo3(1); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo3(1) WITH ORDINALITY AS t1(v,o); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; +CREATE FUNCTION getrngfunc3(int) RETURNS setof text AS 'SELECT rngfuncname FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc3(1) AS t1; +SELECT * FROM getrngfunc3(1) WITH ORDINALITY AS t1(v,o); +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc3(1); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc3(1) WITH ORDINALITY AS t1(v,o); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; -- sql, proretset = f, prorettype = c -CREATE FUNCTION getfoo4(int) RETURNS foo AS 'SELECT * FROM foo WHERE fooid = $1;' LANGUAGE SQL; -SELECT * FROM getfoo4(1) AS t1; -SELECT * FROM getfoo4(1) WITH ORDINALITY AS t1(a,b,c,o); -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo4(1); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo4(1) WITH ORDINALITY AS t1(a,b,c,o); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; +CREATE FUNCTION getrngfunc4(int) RETURNS rngfunc AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc4(1) AS t1; +SELECT * FROM getrngfunc4(1) WITH ORDINALITY AS t1(a,b,c,o); +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc4(1); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc4(1) WITH ORDINALITY AS t1(a,b,c,o); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; -- sql, proretset = t, prorettype = c -CREATE FUNCTION getfoo5(int) RETURNS setof foo AS 'SELECT * FROM foo WHERE fooid = $1;' LANGUAGE SQL; -SELECT * FROM getfoo5(1) AS t1; -SELECT * FROM getfoo5(1) WITH ORDINALITY AS t1(a,b,c,o); -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo5(1); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo5(1) WITH ORDINALITY AS t1(a,b,c,o); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; +CREATE FUNCTION getrngfunc5(int) RETURNS setof rngfunc AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc5(1) AS t1; +SELECT * FROM getrngfunc5(1) WITH ORDINALITY AS t1(a,b,c,o); +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc5(1); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc5(1) WITH ORDINALITY AS t1(a,b,c,o); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; -- sql, proretset = f, prorettype = record -CREATE FUNCTION getfoo6(int) RETURNS RECORD AS 'SELECT * FROM foo WHERE fooid = $1;' LANGUAGE SQL; -SELECT * FROM getfoo6(1) AS t1(fooid int, foosubid int, fooname text); -SELECT * FROM ROWS FROM( getfoo6(1) AS (fooid int, foosubid int, fooname text) ) WITH ORDINALITY; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo6(1) AS -(fooid int, foosubid int, fooname text); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS - SELECT * FROM ROWS FROM( getfoo6(1) AS (fooid int, foosubid int, fooname text) ) +CREATE FUNCTION getrngfunc6(int) RETURNS RECORD AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc6(1) AS t1(rngfuncid int, rngfuncsubid int, rngfuncname text); +SELECT * FROM ROWS FROM( getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc6(1) AS +(rngfuncid int, rngfuncsubid int, rngfuncname text); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS + SELECT * FROM ROWS FROM( getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY; -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; -- sql, proretset = t, prorettype = record -CREATE FUNCTION getfoo7(int) RETURNS setof record AS 'SELECT * FROM foo WHERE fooid = $1;' LANGUAGE SQL; -SELECT * FROM getfoo7(1) AS t1(fooid int, foosubid int, fooname text); -SELECT * FROM ROWS FROM( getfoo7(1) AS (fooid int, foosubid int, fooname text) ) WITH ORDINALITY; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo7(1) AS -(fooid int, foosubid int, fooname text); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS - SELECT * FROM ROWS FROM( getfoo7(1) AS (fooid int, foosubid int, fooname text) ) +CREATE FUNCTION getrngfunc7(int) RETURNS setof record AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc7(1) AS t1(rngfuncid int, rngfuncsubid int, rngfuncname text); +SELECT * FROM ROWS FROM( getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc7(1) AS +(rngfuncid int, rngfuncsubid int, rngfuncname text); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS + SELECT * FROM ROWS FROM( getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY; -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; -- plpgsql, proretset = f, prorettype = b -CREATE FUNCTION getfoo8(int) RETURNS int AS 'DECLARE fooint int; BEGIN SELECT fooid into fooint FROM foo WHERE fooid = $1; RETURN fooint; END;' LANGUAGE plpgsql; -SELECT * FROM getfoo8(1) AS t1; -SELECT * FROM getfoo8(1) WITH ORDINALITY AS t1(v,o); -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo8(1); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo8(1) WITH ORDINALITY AS t1(v,o); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; +CREATE FUNCTION getrngfunc8(int) RETURNS int AS 'DECLARE rngfuncint int; BEGIN SELECT rngfuncid into rngfuncint FROM rngfunc WHERE rngfuncid = $1; RETURN rngfuncint; END;' LANGUAGE plpgsql; +SELECT * FROM getrngfunc8(1) AS t1; +SELECT * FROM getrngfunc8(1) WITH ORDINALITY AS t1(v,o); +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc8(1); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc8(1) WITH ORDINALITY AS t1(v,o); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; -- plpgsql, proretset = f, prorettype = c -CREATE FUNCTION getfoo9(int) RETURNS foo AS 'DECLARE footup foo%ROWTYPE; BEGIN SELECT * into footup FROM foo WHERE fooid = $1; RETURN footup; END;' LANGUAGE plpgsql; -SELECT * FROM getfoo9(1) AS t1; -SELECT * FROM getfoo9(1) WITH ORDINALITY AS t1(a,b,c,o); -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo9(1); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; -CREATE VIEW vw_getfoo AS SELECT * FROM getfoo9(1) WITH ORDINALITY AS t1(a,b,c,o); -SELECT * FROM vw_getfoo; -DROP VIEW vw_getfoo; +CREATE FUNCTION getrngfunc9(int) RETURNS rngfunc AS 'DECLARE rngfunctup rngfunc%ROWTYPE; BEGIN SELECT * into rngfunctup FROM rngfunc WHERE rngfuncid = $1; RETURN rngfunctup; END;' LANGUAGE plpgsql; +SELECT * FROM getrngfunc9(1) AS t1; +SELECT * FROM getrngfunc9(1) WITH ORDINALITY AS t1(a,b,c,o); +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc9(1); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc9(1) WITH ORDINALITY AS t1(a,b,c,o); +SELECT * FROM vw_getrngfunc; +DROP VIEW vw_getrngfunc; -- mix 'n match kinds, to exercise expandRTE and related logic -select * from rows from(getfoo1(1),getfoo2(1),getfoo3(1),getfoo4(1),getfoo5(1), - getfoo6(1) AS (fooid int, foosubid int, fooname text), - getfoo7(1) AS (fooid int, foosubid int, fooname text), - getfoo8(1),getfoo9(1)) +select * from rows from(getrngfunc1(1),getrngfunc2(1),getrngfunc3(1),getrngfunc4(1),getrngfunc5(1), + getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc8(1),getrngfunc9(1)) with ordinality as t1(a,b,c,d,e,f,g,h,i,j,k,l,m,o,p,q,r,s,t,u); -select * from rows from(getfoo9(1),getfoo8(1), - getfoo7(1) AS (fooid int, foosubid int, fooname text), - getfoo6(1) AS (fooid int, foosubid int, fooname text), - getfoo5(1),getfoo4(1),getfoo3(1),getfoo2(1),getfoo1(1)) +select * from rows from(getrngfunc9(1),getrngfunc8(1), + getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc5(1),getrngfunc4(1),getrngfunc3(1),getrngfunc2(1),getrngfunc1(1)) with ordinality as t1(a,b,c,d,e,f,g,h,i,j,k,l,m,o,p,q,r,s,t,u); -create temporary view vw_foo as - select * from rows from(getfoo9(1), - getfoo7(1) AS (fooid int, foosubid int, fooname text), - getfoo1(1)) +create temporary view vw_rngfunc as + select * from rows from(getrngfunc9(1), + getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc1(1)) with ordinality as t1(a,b,c,d,e,f,g,n); -select * from vw_foo; -select pg_get_viewdef('vw_foo'); -drop view vw_foo; - -DROP FUNCTION getfoo1(int); -DROP FUNCTION getfoo2(int); -DROP FUNCTION getfoo3(int); -DROP FUNCTION getfoo4(int); -DROP FUNCTION getfoo5(int); -DROP FUNCTION getfoo6(int); -DROP FUNCTION getfoo7(int); -DROP FUNCTION getfoo8(int); -DROP FUNCTION getfoo9(int); -DROP FUNCTION foot(int); -DROP TABLE foo2; -DROP TABLE foo; +select * from vw_rngfunc; +select pg_get_viewdef('vw_rngfunc'); +drop view vw_rngfunc; + +DROP FUNCTION getrngfunc1(int); +DROP FUNCTION getrngfunc2(int); +DROP FUNCTION getrngfunc3(int); +DROP FUNCTION getrngfunc4(int); +DROP FUNCTION getrngfunc5(int); +DROP FUNCTION getrngfunc6(int); +DROP FUNCTION getrngfunc7(int); +DROP FUNCTION getrngfunc8(int); +DROP FUNCTION getrngfunc9(int); +DROP FUNCTION rngfunct(int); +DROP TABLE rngfunc2; +DROP TABLE rngfunc; -- Rescan tests -- -CREATE TEMPORARY SEQUENCE foo_rescan_seq1; -CREATE TEMPORARY SEQUENCE foo_rescan_seq2; -CREATE TYPE foo_rescan_t AS (i integer, s bigint); +CREATE TEMPORARY SEQUENCE rngfunc_rescan_seq1; +CREATE TEMPORARY SEQUENCE rngfunc_rescan_seq2; +CREATE TYPE rngfunc_rescan_t AS (i integer, s bigint); -CREATE FUNCTION foo_sql(int,int) RETURNS setof foo_rescan_t AS 'SELECT i, nextval(''foo_rescan_seq1'') FROM generate_series($1,$2) i;' LANGUAGE SQL; +CREATE FUNCTION rngfunc_sql(int,int) RETURNS setof rngfunc_rescan_t AS 'SELECT i, nextval(''rngfunc_rescan_seq1'') FROM generate_series($1,$2) i;' LANGUAGE SQL; -- plpgsql functions use materialize mode -CREATE FUNCTION foo_mat(int,int) RETURNS setof foo_rescan_t AS 'begin for i in $1..$2 loop return next (i, nextval(''foo_rescan_seq2'')); end loop; end;' LANGUAGE plpgsql; +CREATE FUNCTION rngfunc_mat(int,int) RETURNS setof rngfunc_rescan_t AS 'begin for i in $1..$2 loop return next (i, nextval(''rngfunc_rescan_seq2'')); end loop; end;' LANGUAGE plpgsql; --invokes ExecReScanFunctionScan - all these cases should materialize the function only once -- LEFT JOIN on a condition that the planner can't prove to be true is used to ensure the function -- is on the inner path of a nestloop join -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN foo_sql(11,13) ON (r+i)<100; -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN foo_sql(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100; +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_sql(11,13) ON (r+i)<100; +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_sql(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100; -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN foo_mat(11,13) ON (r+i)<100; -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN foo_mat(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100; -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN ROWS FROM( foo_sql(11,13), foo_mat(11,13) ) WITH ORDINALITY AS f(i1,s1,i2,s2,o) ON (r+i1+i2)<100; +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_mat(11,13) ON (r+i)<100; +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_mat(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100; +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN ROWS FROM( rngfunc_sql(11,13), rngfunc_mat(11,13) ) WITH ORDINALITY AS f(i1,s1,i2,s2,o) ON (r+i1+i2)<100; SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN generate_series(11,13) f(i) ON (r+i)<100; SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN generate_series(11,13) WITH ORDINALITY AS f(i,o) ON (r+i)<100; @@ -260,43 +260,43 @@ SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN unnest(array[10,20,30]) WITH O --invokes ExecReScanFunctionScan with chgParam != NULL (using implied LATERAL) -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_sql(10+r,13); -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_sql(10+r,13) WITH ORDINALITY AS f(i,s,o); -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_sql(11,10+r); -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_sql(11,10+r) WITH ORDINALITY AS f(i,s,o); -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), foo_sql(r1,r2); -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), foo_sql(r1,r2) WITH ORDINALITY AS f(i,s,o); - -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_mat(10+r,13); -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_mat(10+r,13) WITH ORDINALITY AS f(i,s,o); -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_mat(11,10+r); -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r), foo_mat(11,10+r) WITH ORDINALITY AS f(i,s,o); -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), foo_mat(r1,r2); -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), foo_mat(r1,r2) WITH ORDINALITY AS f(i,s,o); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(10+r,13); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(10+r,13) WITH ORDINALITY AS f(i,s,o); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(11,10+r); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(11,10+r) WITH ORDINALITY AS f(i,s,o); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_sql(r1,r2); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_sql(r1,r2) WITH ORDINALITY AS f(i,s,o); + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(10+r,13); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(10+r,13) WITH ORDINALITY AS f(i,s,o); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(11,10+r); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(11,10+r) WITH ORDINALITY AS f(i,s,o); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_mat(r1,r2); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_mat(r1,r2) WITH ORDINALITY AS f(i,s,o); -- selective rescan of multiple functions: -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( foo_sql(11,11), foo_mat(10+r,13) ); -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( foo_sql(10+r,13), foo_mat(11,11) ); -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( foo_sql(10+r,13), foo_mat(10+r,13) ); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(11,11), rngfunc_mat(10+r,13) ); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(10+r,13), rngfunc_mat(11,11) ); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(10+r,13), rngfunc_mat(10+r,13) ); -SELECT setval('foo_rescan_seq1',1,false),setval('foo_rescan_seq2',1,false); -SELECT * FROM generate_series(1,2) r1, generate_series(r1,3) r2, ROWS FROM( foo_sql(10+r1,13), foo_mat(10+r2,13) ); +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); +SELECT * FROM generate_series(1,2) r1, generate_series(r1,3) r2, ROWS FROM( rngfunc_sql(10+r1,13), rngfunc_mat(10+r2,13) ); SELECT * FROM (VALUES (1),(2),(3)) v(r), generate_series(10+r,20-r) f(i); SELECT * FROM (VALUES (1),(2),(3)) v(r), generate_series(10+r,20-r) WITH ORDINALITY AS f(i,o); @@ -319,50 +319,50 @@ SELECT * FROM (VALUES (1),(2),(3)) v1(r1), LATERAL (SELECT r1, * FROM (VALUES (10),(20),(30)) v2(r2) LEFT JOIN generate_series(r1,2+r2/5) f(i) ON ((r2+i)<100) OFFSET 0) s1; -DROP FUNCTION foo_sql(int,int); -DROP FUNCTION foo_mat(int,int); -DROP SEQUENCE foo_rescan_seq1; -DROP SEQUENCE foo_rescan_seq2; +DROP FUNCTION rngfunc_sql(int,int); +DROP FUNCTION rngfunc_mat(int,int); +DROP SEQUENCE rngfunc_rescan_seq1; +DROP SEQUENCE rngfunc_rescan_seq2; -- -- Test cases involving OUT parameters -- -CREATE FUNCTION foo(in f1 int, out f2 int) +CREATE FUNCTION rngfunc(in f1 int, out f2 int) AS 'select $1+1' LANGUAGE sql; -SELECT foo(42); -SELECT * FROM foo(42); -SELECT * FROM foo(42) AS p(x); +SELECT rngfunc(42); +SELECT * FROM rngfunc(42); +SELECT * FROM rngfunc(42) AS p(x); -- explicit spec of return type is OK -CREATE OR REPLACE FUNCTION foo(in f1 int, out f2 int) RETURNS int +CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int) RETURNS int AS 'select $1+1' LANGUAGE sql; -- error, wrong result type -CREATE OR REPLACE FUNCTION foo(in f1 int, out f2 int) RETURNS float +CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int) RETURNS float AS 'select $1+1' LANGUAGE sql; -- with multiple OUT params you must get a RECORD result -CREATE OR REPLACE FUNCTION foo(in f1 int, out f2 int, out f3 text) RETURNS int +CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int, out f3 text) RETURNS int AS 'select $1+1' LANGUAGE sql; -CREATE OR REPLACE FUNCTION foo(in f1 int, out f2 int, out f3 text) +CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int, out f3 text) RETURNS record AS 'select $1+1' LANGUAGE sql; -CREATE OR REPLACE FUNCTION foor(in f1 int, out f2 int, out text) +CREATE OR REPLACE FUNCTION rngfuncr(in f1 int, out f2 int, out text) AS $$select $1-1, $1::text || 'z'$$ LANGUAGE sql; -SELECT f1, foor(f1) FROM int4_tbl; -SELECT * FROM foor(42); -SELECT * FROM foor(42) AS p(a,b); +SELECT f1, rngfuncr(f1) FROM int4_tbl; +SELECT * FROM rngfuncr(42); +SELECT * FROM rngfuncr(42) AS p(a,b); -CREATE OR REPLACE FUNCTION foob(in f1 int, inout f2 int, out text) +CREATE OR REPLACE FUNCTION rngfuncb(in f1 int, inout f2 int, out text) AS $$select $2-1, $1::text || 'z'$$ LANGUAGE sql; -SELECT f1, foob(f1, f1/2) FROM int4_tbl; -SELECT * FROM foob(42, 99); -SELECT * FROM foob(42, 99) AS p(a,b); +SELECT f1, rngfuncb(f1, f1/2) FROM int4_tbl; +SELECT * FROM rngfuncb(42, 99); +SELECT * FROM rngfuncb(42, 99) AS p(a,b); -- Can reference function with or without OUT params for DROP, etc -DROP FUNCTION foo(int); -DROP FUNCTION foor(in f2 int, out f1 int, out text); -DROP FUNCTION foob(in f1 int, inout f2 int); +DROP FUNCTION rngfunc(int); +DROP FUNCTION rngfuncr(in f2 int, out f1 int, out text); +DROP FUNCTION rngfuncb(in f1 int, inout f2 int); -- -- For my next trick, polymorphic OUT parameters @@ -396,26 +396,26 @@ AS 'select $1, array[$1,$1]' LANGUAGE sql; -- table functions -- -CREATE OR REPLACE FUNCTION foo() +CREATE OR REPLACE FUNCTION rngfunc() RETURNS TABLE(a int) AS $$ SELECT a FROM generate_series(1,5) a(a) $$ LANGUAGE sql; -SELECT * FROM foo(); -DROP FUNCTION foo(); +SELECT * FROM rngfunc(); +DROP FUNCTION rngfunc(); -CREATE OR REPLACE FUNCTION foo(int) +CREATE OR REPLACE FUNCTION rngfunc(int) RETURNS TABLE(a int, b int) AS $$ SELECT a, b FROM generate_series(1,$1) a(a), generate_series(1,$1) b(b) $$ LANGUAGE sql; -SELECT * FROM foo(3); -DROP FUNCTION foo(int); +SELECT * FROM rngfunc(3); +DROP FUNCTION rngfunc(int); -- case that causes change of typmod knowledge during inlining -CREATE OR REPLACE FUNCTION foo() +CREATE OR REPLACE FUNCTION rngfunc() RETURNS TABLE(a varchar(5)) AS $$ SELECT 'hello'::varchar(5) $$ LANGUAGE sql STABLE; -SELECT * FROM foo() GROUP BY 1; -DROP FUNCTION foo(); +SELECT * FROM rngfunc() GROUP BY 1; +DROP FUNCTION rngfunc(); -- -- some tests on SQL functions with RETURNING @@ -477,17 +477,17 @@ select * from tt; select * from tt_log; -- test case for a whole-row-variable bug -create function foo1(n integer, out a text, out b text) +create function rngfunc1(n integer, out a text, out b text) returns setof record language sql as $$ select 'foo ' || i, 'bar ' || i from generate_series(1,$1) i $$; set work_mem='64kB'; -select t.a, t, t.a from foo1(10000) t limit 1; +select t.a, t, t.a from rngfunc1(10000) t limit 1; reset work_mem; -select t.a, t, t.a from foo1(10000) t limit 1; +select t.a, t, t.a from rngfunc1(10000) t limit 1; -drop function foo1(n integer); +drop function rngfunc1(n integer); -- test use of SQL functions returning record -- this is supported in some cases where the query doesn't specify @@ -501,27 +501,27 @@ select array_to_set(array['one', 'two']); select * from array_to_set(array['one', 'two']) as t(f1 int,f2 text); select * from array_to_set(array['one', 'two']); -- fail -create temp table foo(f1 int8, f2 int8); +create temp table rngfunc(f1 int8, f2 int8); -create function testfoo() returns record as $$ - insert into foo values (1,2) returning *; +create function testrngfunc() returns record as $$ + insert into rngfunc values (1,2) returning *; $$ language sql; -select testfoo(); -select * from testfoo() as t(f1 int8,f2 int8); -select * from testfoo(); -- fail +select testrngfunc(); +select * from testrngfunc() as t(f1 int8,f2 int8); +select * from testrngfunc(); -- fail -drop function testfoo(); +drop function testrngfunc(); -create function testfoo() returns setof record as $$ - insert into foo values (1,2), (3,4) returning *; +create function testrngfunc() returns setof record as $$ + insert into rngfunc values (1,2), (3,4) returning *; $$ language sql; -select testfoo(); -select * from testfoo() as t(f1 int8,f2 int8); -select * from testfoo(); -- fail +select testrngfunc(); +select * from testrngfunc() as t(f1 int8,f2 int8); +select * from testrngfunc(); -- fail -drop function testfoo(); +drop function testrngfunc(); -- -- Check some cases involving added/dropped columns in a rowtype result @@ -572,33 +572,33 @@ drop table users; -- this won't get inlined because of type coercion, but it shouldn't fail -create or replace function foobar() returns setof text as +create or replace function rngfuncbar() returns setof text as $$ select 'foo'::varchar union all select 'bar'::varchar ; $$ language sql stable; -select foobar(); -select * from foobar(); +select rngfuncbar(); +select * from rngfuncbar(); -drop function foobar(); +drop function rngfuncbar(); -- check handling of a SQL function with multiple OUT params (bug #5777) -create or replace function foobar(out integer, out numeric) as +create or replace function rngfuncbar(out integer, out numeric) as $$ select (1, 2.1) $$ language sql; -select * from foobar(); +select * from rngfuncbar(); -create or replace function foobar(out integer, out numeric) as +create or replace function rngfuncbar(out integer, out numeric) as $$ select (1, 2) $$ language sql; -select * from foobar(); -- fail +select * from rngfuncbar(); -- fail -create or replace function foobar(out integer, out numeric) as +create or replace function rngfuncbar(out integer, out numeric) as $$ select (1, 2.1, 3) $$ language sql; -select * from foobar(); -- fail +select * from rngfuncbar(); -- fail -drop function foobar(); +drop function rngfuncbar(); -- check whole-row-Var handling in nested lateral functions (bug #11703) @@ -633,11 +633,11 @@ select x from int8_tbl, extractq2_2_opt(int8_tbl) f(x); -- check handling of nulls in SRF results (bug #7808) -create type foo2 as (a integer, b text); +create type rngfunc2 as (a integer, b text); -select *, row_to_json(u) from unnest(array[(1,'foo')::foo2, null::foo2]) u; -select *, row_to_json(u) from unnest(array[null::foo2, null::foo2]) u; -select *, row_to_json(u) from unnest(array[null::foo2, (1,'foo')::foo2, null::foo2]) u; -select *, row_to_json(u) from unnest(array[]::foo2[]) u; +select *, row_to_json(u) from unnest(array[(1,'foo')::rngfunc2, null::rngfunc2]) u; +select *, row_to_json(u) from unnest(array[null::rngfunc2, null::rngfunc2]) u; +select *, row_to_json(u) from unnest(array[null::rngfunc2, (1,'foo')::rngfunc2, null::rngfunc2]) u; +select *, row_to_json(u) from unnest(array[]::rngfunc2[]) u; -drop type foo2; +drop type rngfunc2; diff --git a/src/test/regress/sql/rangetypes.sql b/src/test/regress/sql/rangetypes.sql index a60df9095e..55638a85ee 100644 --- a/src/test/regress/sql/rangetypes.sql +++ b/src/test/regress/sql/rangetypes.sql @@ -461,6 +461,18 @@ select *, row_to_json(upper(t)) as u from drop type two_ints cascade; +-- +-- Check behavior when subtype lacks a hash function +-- + +create type cashrange as range (subtype = money); + +set enable_sort = off; -- try to make it pick a hash setop implementation + +select '(2,5)'::cashrange except select '(5,6)'::cashrange; + +reset enable_sort; + -- -- OUT/INOUT/TABLE functions -- diff --git a/src/test/regress/sql/reloptions.sql b/src/test/regress/sql/reloptions.sql new file mode 100644 index 0000000000..37fbf41f7d --- /dev/null +++ b/src/test/regress/sql/reloptions.sql @@ -0,0 +1,113 @@ + +-- Simple create +CREATE TABLE reloptions_test(i INT) WITH (FiLLFaCToR=30, + autovacuum_enabled = false, autovacuum_analyze_scale_factor = 0.2); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + +-- Fail min/max values check +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=2); +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=110); +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor = -10.0); +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor = 110.0); + +-- Fail when option and namespace do not exist +CREATE TABLE reloptions_test2(i INT) WITH (not_existing_option=2); +CREATE TABLE reloptions_test2(i INT) WITH (not_existing_namespace.fillfactor=2); + +-- Fail while setting improper values +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=30.5); +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor='string'); +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=true); +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_enabled=12); +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_enabled=30.5); +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_enabled='string'); +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor='string'); +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor=true); + +-- Fail if option is specified twice +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=30, fillfactor=40); + +-- Specifying name only for a non-Boolean option should fail +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor); + +-- Simple ALTER TABLE +ALTER TABLE reloptions_test SET (fillfactor=31, + autovacuum_analyze_scale_factor = 0.3); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + +-- Set boolean option to true without specifying value +ALTER TABLE reloptions_test SET (autovacuum_enabled, fillfactor=32); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + +-- Check that RESET works well +ALTER TABLE reloptions_test RESET (fillfactor); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + +-- Resetting all values causes the column to become null +ALTER TABLE reloptions_test RESET (autovacuum_enabled, + autovacuum_analyze_scale_factor); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass AND + reloptions IS NULL; + +-- RESET fails if a value is specified +ALTER TABLE reloptions_test RESET (fillfactor=12); + +-- The OIDS option is not stored as reloption +DROP TABLE reloptions_test; +CREATE TABLE reloptions_test(i INT) WITH (fillfactor=20, oids=true); +SELECT reloptions, relhasoids FROM pg_class WHERE oid = 'reloptions_test'::regclass; + +-- Test toast.* options +DROP TABLE reloptions_test; + +CREATE TABLE reloptions_test (s VARCHAR) + WITH (toast.autovacuum_vacuum_cost_delay = 23); +SELECT reltoastrelid as toast_oid + FROM pg_class WHERE oid = 'reloptions_test'::regclass \gset +SELECT reloptions FROM pg_class WHERE oid = :toast_oid; + +ALTER TABLE reloptions_test SET (toast.autovacuum_vacuum_cost_delay = 24); +SELECT reloptions FROM pg_class WHERE oid = :toast_oid; + +ALTER TABLE reloptions_test RESET (toast.autovacuum_vacuum_cost_delay); +SELECT reloptions FROM pg_class WHERE oid = :toast_oid; + +-- Fail on non-existent options in toast namespace +CREATE TABLE reloptions_test2 (i int) WITH (toast.not_existing_option = 42); + +-- Mix TOAST & heap +DROP TABLE reloptions_test; + +CREATE TABLE reloptions_test (s VARCHAR) WITH + (toast.autovacuum_vacuum_cost_delay = 23, + autovacuum_vacuum_cost_delay = 24, fillfactor = 40); + +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; +SELECT reloptions FROM pg_class WHERE oid = ( + SELECT reltoastrelid FROM pg_class WHERE oid = 'reloptions_test'::regclass); + +-- +-- CREATE INDEX, ALTER INDEX for btrees +-- + +CREATE INDEX reloptions_test_idx ON reloptions_test (s) WITH (fillfactor=30); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test_idx'::regclass; + +-- Fail when option and namespace do not exist +CREATE INDEX reloptions_test_idx ON reloptions_test (s) + WITH (not_existing_option=2); +CREATE INDEX reloptions_test_idx ON reloptions_test (s) + WITH (not_existing_ns.fillfactor=2); + +-- Check allowed ranges +CREATE INDEX reloptions_test_idx2 ON reloptions_test (s) WITH (fillfactor=1); +CREATE INDEX reloptions_test_idx2 ON reloptions_test (s) WITH (fillfactor=130); + +-- Check ALTER +ALTER INDEX reloptions_test_idx SET (fillfactor=40); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test_idx'::regclass; + +-- Check ALTER on empty reloption list +CREATE INDEX reloptions_test_idx3 ON reloptions_test (s); +ALTER INDEX reloptions_test_idx3 SET (fillfactor=40); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test_idx3'::regclass; diff --git a/src/test/regress/sql/reltime.sql b/src/test/regress/sql/reltime.sql deleted file mode 100644 index a07b64e29d..0000000000 --- a/src/test/regress/sql/reltime.sql +++ /dev/null @@ -1,50 +0,0 @@ --- --- RELTIME --- - -CREATE TABLE RELTIME_TBL (f1 reltime); - -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 1 minute'); - -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 5 hour'); - -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 10 day'); - -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 34 year'); - -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 3 months'); - -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 14 seconds ago'); - - --- badly formatted reltimes -INSERT INTO RELTIME_TBL (f1) VALUES ('badly formatted reltime'); - -INSERT INTO RELTIME_TBL (f1) VALUES ('@ 30 eons ago'); - --- test reltime operators - -SELECT '' AS six, * FROM RELTIME_TBL; - -SELECT '' AS five, * FROM RELTIME_TBL - WHERE RELTIME_TBL.f1 <> reltime '@ 10 days'; - -SELECT '' AS three, * FROM RELTIME_TBL - WHERE RELTIME_TBL.f1 <= reltime '@ 5 hours'; - -SELECT '' AS three, * FROM RELTIME_TBL - WHERE RELTIME_TBL.f1 < reltime '@ 1 day'; - -SELECT '' AS one, * FROM RELTIME_TBL - WHERE RELTIME_TBL.f1 = reltime '@ 34 years'; - -SELECT '' AS two, * FROM RELTIME_TBL - WHERE RELTIME_TBL.f1 >= reltime '@ 1 month'; - -SELECT '' AS five, * FROM RELTIME_TBL - WHERE RELTIME_TBL.f1 > reltime '@ 3 seconds ago'; - -SELECT '' AS fifteen, r1.*, r2.* - FROM RELTIME_TBL r1, RELTIME_TBL r2 - WHERE r1.f1 > r2.f1 - ORDER BY r1.f1, r2.f1; diff --git a/src/test/regress/sql/rolenames.sql b/src/test/regress/sql/rolenames.sql index 4c5706bbaa..1246d19715 100644 --- a/src/test/regress/sql/rolenames.sql +++ b/src/test/regress/sql/rolenames.sql @@ -385,7 +385,7 @@ GRANT regress_testrol0 TO pg_signal_backend; -- success SET ROLE pg_signal_backend; --success RESET ROLE; -CREATE SCHEMA test_schema AUTHORIZATION pg_signal_backend; --success +CREATE SCHEMA test_roles_schema AUTHORIZATION pg_signal_backend; --success SET ROLE regress_testrol2; UPDATE pg_proc SET proacl = null WHERE proname LIKE 'testagg_'; @@ -438,10 +438,43 @@ REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM "none"; --error SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_'; +-- DEFAULT MONITORING ROLES +CREATE ROLE regress_role_haspriv; +CREATE ROLE regress_role_nopriv; + +-- pg_read_all_stats +GRANT pg_read_all_stats TO regress_role_haspriv; +SET SESSION AUTHORIZATION regress_role_haspriv; +-- returns true with role member of pg_read_all_stats +SELECT COUNT(*) = 0 AS haspriv FROM pg_stat_activity + WHERE query = ''; +SET SESSION AUTHORIZATION regress_role_nopriv; +-- returns false with role not member of pg_read_all_stats +SELECT COUNT(*) = 0 AS haspriv FROM pg_stat_activity + WHERE query = ''; +RESET SESSION AUTHORIZATION; +REVOKE pg_read_all_stats FROM regress_role_haspriv; + +-- pg_read_all_settings +GRANT pg_read_all_settings TO regress_role_haspriv; +BEGIN; +-- A GUC using GUC_SUPERUSER_ONLY is useful for negative tests. +SET LOCAL session_preload_libraries TO 'path-to-preload-libraries'; +SET SESSION AUTHORIZATION regress_role_haspriv; +-- passes with role member of pg_read_all_settings +SHOW session_preload_libraries; +SET SESSION AUTHORIZATION regress_role_nopriv; +-- fails with role not member of pg_read_all_settings +SHOW session_preload_libraries; +RESET SESSION AUTHORIZATION; +ROLLBACK; +REVOKE pg_read_all_settings FROM regress_role_haspriv; + -- clean up \c -DROP SCHEMA test_schema; +DROP SCHEMA test_roles_schema; DROP OWNED BY regress_testrol0, "Public", "current_user", regress_testrol1, regress_testrol2, regress_testrolx CASCADE; DROP ROLE regress_testrol0, regress_testrol1, regress_testrol2, regress_testrolx; DROP ROLE "Public", "None", "current_user", "session_user", "user"; +DROP ROLE regress_role_haspriv, regress_role_nopriv; diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql index e03a7ab65f..5a9fdcad74 100644 --- a/src/test/regress/sql/rowsecurity.sql +++ b/src/test/regress/sql/rowsecurity.sql @@ -781,7 +781,7 @@ SET SESSION AUTHORIZATION regress_rls_bob; INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'technology book, can only insert') ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; --- UPDATE path is taken here. Existing tuple passes, since it's cid +-- UPDATE path is taken here. Existing tuple passes, since its cid -- corresponds to "novel", but default USING qual is enforced against -- post-UPDATE tuple too (as always when updating with a policy that lacks an -- explicit WCO), and so this fails: @@ -1674,10 +1674,11 @@ DROP TABLE r1; -- SET SESSION AUTHORIZATION regress_rls_alice; SET row_security = on; -CREATE TABLE r1 (a int); +CREATE TABLE r1 (a int PRIMARY KEY); CREATE POLICY p1 ON r1 FOR SELECT USING (a < 20); CREATE POLICY p2 ON r1 FOR UPDATE USING (a < 20) WITH CHECK (true); +CREATE POLICY p3 ON r1 FOR INSERT WITH CHECK (true); INSERT INTO r1 VALUES (10); ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; ALTER TABLE r1 FORCE ROW LEVEL SECURITY; @@ -1699,6 +1700,17 @@ ALTER TABLE r1 FORCE ROW LEVEL SECURITY; -- Error UPDATE r1 SET a = 30 RETURNING *; +-- UPDATE path of INSERT ... ON CONFLICT DO UPDATE should also error out +INSERT INTO r1 VALUES (10) + ON CONFLICT (a) DO UPDATE SET a = 30 RETURNING *; + +-- Should still error out without RETURNING (use of arbiter always requires +-- SELECT permissions) +INSERT INTO r1 VALUES (10) + ON CONFLICT (a) DO UPDATE SET a = 30; +INSERT INTO r1 VALUES (10) + ON CONFLICT ON CONSTRAINT r1_pkey DO UPDATE SET a = 30; + DROP TABLE r1; -- Check dependency handling diff --git a/src/test/regress/sql/rowtypes.sql b/src/test/regress/sql/rowtypes.sql index 8d63060500..faf2e108d6 100644 --- a/src/test/regress/sql/rowtypes.sql +++ b/src/test/regress/sql/rowtypes.sql @@ -27,6 +27,9 @@ select '(Joe,"Blow,Jr")'::fullname; select '(Joe,)'::fullname; -- ok, null 2nd column select '(Joe)'::fullname; -- bad select '(Joe,,)'::fullname; -- bad +select '[]'::fullname; -- bad +select ' (Joe,Blow) '::fullname; -- ok, extra whitespace +select '(Joe,Blow) /'::fullname; -- bad create temp table quadtable(f1 int, q quad); @@ -160,6 +163,105 @@ insert into cc values('("(1,2)",3)'); insert into cc values('("(4,5)",6)'); select * from cc order by f1; -- fail, but should complain about cantcompare +-- +-- Tests for record_{eq,cmp} +-- + +create type testtype1 as (a int, b int); + +-- all true +select row(1, 2)::testtype1 < row(1, 3)::testtype1; +select row(1, 2)::testtype1 <= row(1, 3)::testtype1; +select row(1, 2)::testtype1 = row(1, 2)::testtype1; +select row(1, 2)::testtype1 <> row(1, 3)::testtype1; +select row(1, 3)::testtype1 >= row(1, 2)::testtype1; +select row(1, 3)::testtype1 > row(1, 2)::testtype1; + +-- all false +select row(1, -2)::testtype1 < row(1, -3)::testtype1; +select row(1, -2)::testtype1 <= row(1, -3)::testtype1; +select row(1, -2)::testtype1 = row(1, -3)::testtype1; +select row(1, -2)::testtype1 <> row(1, -2)::testtype1; +select row(1, -3)::testtype1 >= row(1, -2)::testtype1; +select row(1, -3)::testtype1 > row(1, -2)::testtype1; + +-- true, but see *< below +select row(1, -2)::testtype1 < row(1, 3)::testtype1; + +-- mismatches +create type testtype3 as (a int, b text); +select row(1, 2)::testtype1 < row(1, 'abc')::testtype3; +select row(1, 2)::testtype1 <> row(1, 'abc')::testtype3; +create type testtype5 as (a int); +select row(1, 2)::testtype1 < row(1)::testtype5; +select row(1, 2)::testtype1 <> row(1)::testtype5; + +-- non-comparable types +create type testtype6 as (a int, b point); +select row(1, '(1,2)')::testtype6 < row(1, '(1,3)')::testtype6; +select row(1, '(1,2)')::testtype6 <> row(1, '(1,3)')::testtype6; + +drop type testtype1, testtype3, testtype5, testtype6; + +-- +-- Tests for record_image_{eq,cmp} +-- + +create type testtype1 as (a int, b int); + +-- all true +select row(1, 2)::testtype1 *< row(1, 3)::testtype1; +select row(1, 2)::testtype1 *<= row(1, 3)::testtype1; +select row(1, 2)::testtype1 *= row(1, 2)::testtype1; +select row(1, 2)::testtype1 *<> row(1, 3)::testtype1; +select row(1, 3)::testtype1 *>= row(1, 2)::testtype1; +select row(1, 3)::testtype1 *> row(1, 2)::testtype1; + +-- all false +select row(1, -2)::testtype1 *< row(1, -3)::testtype1; +select row(1, -2)::testtype1 *<= row(1, -3)::testtype1; +select row(1, -2)::testtype1 *= row(1, -3)::testtype1; +select row(1, -2)::testtype1 *<> row(1, -2)::testtype1; +select row(1, -3)::testtype1 *>= row(1, -2)::testtype1; +select row(1, -3)::testtype1 *> row(1, -2)::testtype1; + +-- This returns the "wrong" order because record_image_cmp works on +-- unsigned datums without knowing about the actual data type. +select row(1, -2)::testtype1 *< row(1, 3)::testtype1; + +-- other types +create type testtype2 as (a smallint, b bool); -- byval different sizes +select row(1, true)::testtype2 *< row(2, true)::testtype2; +select row(-2, true)::testtype2 *< row(-1, true)::testtype2; +select row(0, false)::testtype2 *< row(0, true)::testtype2; +select row(0, false)::testtype2 *<> row(0, true)::testtype2; + +create type testtype3 as (a int, b text); -- variable length +select row(1, 'abc')::testtype3 *< row(1, 'abd')::testtype3; +select row(1, 'abc')::testtype3 *< row(1, 'abcd')::testtype3; +select row(1, 'abc')::testtype3 *> row(1, 'abd')::testtype3; +select row(1, 'abc')::testtype3 *<> row(1, 'abd')::testtype3; + +create type testtype4 as (a int, b point); -- by ref, fixed length +select row(1, '(1,2)')::testtype4 *< row(1, '(1,3)')::testtype4; +select row(1, '(1,2)')::testtype4 *<> row(1, '(1,3)')::testtype4; + +-- mismatches +select row(1, 2)::testtype1 *< row(1, 'abc')::testtype3; +select row(1, 2)::testtype1 *<> row(1, 'abc')::testtype3; +create type testtype5 as (a int); +select row(1, 2)::testtype1 *< row(1)::testtype5; +select row(1, 2)::testtype1 *<> row(1)::testtype5; + +-- non-comparable types +create type testtype6 as (a int, b point); +select row(1, '(1,2)')::testtype6 *< row(1, '(1,3)')::testtype6; +select row(1, '(1,2)')::testtype6 *>= row(1, '(1,3)')::testtype6; +select row(1, '(1,2)')::testtype6 *<> row(1, '(1,3)')::testtype6; + +drop type testtype1, testtype2, testtype3, testtype4, testtype5, testtype6; + + -- -- Test case derived from bug #5716: check multiple uses of a rowtype result -- @@ -243,6 +345,26 @@ select (row('Jim', 'Beam'))::text; select text(row('Jim', 'Beam')); -- error select (row('Jim', 'Beam')).text; -- error +-- +-- Check the equivalence of functional and column notation +-- +insert into fullname values ('Joe', 'Blow'); + +select f.last from fullname f; +select last(f) from fullname f; + +create function longname(fullname) returns text language sql +as $$select $1.first || ' ' || $1.last$$; + +select f.longname from fullname f; +select longname(f) from fullname f; + +-- Starting in v11, the notational form does matter if there's ambiguity +alter table fullname add column longname text; + +select f.longname from fullname f; +select longname(f) from fullname f; + -- -- Test that composite values are seen to have the correct column names -- (bug #11210 and other reports) diff --git a/src/test/regress/sql/rules.sql b/src/test/regress/sql/rules.sql index 0ded0f01d2..f4ee30ec8f 100644 --- a/src/test/regress/sql/rules.sql +++ b/src/test/regress/sql/rules.sql @@ -700,34 +700,34 @@ SELECT count(*) FROM shoe; -- -- Simple test of qualified ON INSERT ... this did not work in 7.0 ... -- -create table foo (f1 int); -create table foo2 (f1 int); +create table rules_foo (f1 int); +create table rules_foo2 (f1 int); -create rule foorule as on insert to foo where f1 < 100 +create rule rules_foorule as on insert to rules_foo where f1 < 100 do instead nothing; -insert into foo values(1); -insert into foo values(1001); -select * from foo; +insert into rules_foo values(1); +insert into rules_foo values(1001); +select * from rules_foo; -drop rule foorule on foo; +drop rule rules_foorule on rules_foo; -- this should fail because f1 is not exposed for unqualified reference: -create rule foorule as on insert to foo where f1 < 100 -do instead insert into foo2 values (f1); +create rule rules_foorule as on insert to rules_foo where f1 < 100 +do instead insert into rules_foo2 values (f1); -- this is the correct way: -create rule foorule as on insert to foo where f1 < 100 -do instead insert into foo2 values (new.f1); +create rule rules_foorule as on insert to rules_foo where f1 < 100 +do instead insert into rules_foo2 values (new.f1); -insert into foo values(2); -insert into foo values(100); +insert into rules_foo values(2); +insert into rules_foo values(100); -select * from foo; -select * from foo2; +select * from rules_foo; +select * from rules_foo2; -drop rule foorule on foo; -drop table foo; -drop table foo2; +drop rule rules_foorule on rules_foo; +drop table rules_foo; +drop table rules_foo2; -- @@ -876,36 +876,36 @@ insert into rule_and_refint_t3 values (1, 13, 11, 'row8'); -- disallow dropping a view's rule (bug #5072) -- -create view fooview as select 'foo'::text; -drop rule "_RETURN" on fooview; -drop view fooview; +create view rules_fooview as select 'rules_foo'::text; +drop rule "_RETURN" on rules_fooview; +drop view rules_fooview; -- -- test conversion of table to view (needed to load some pg_dump files) -- -create table fooview (x int, y text); -select xmin, * from fooview; +create table rules_fooview (x int, y text); +select xmin, * from rules_fooview; -create rule "_RETURN" as on select to fooview do instead +create rule "_RETURN" as on select to rules_fooview do instead select 1 as x, 'aaa'::text as y; -select * from fooview; -select xmin, * from fooview; -- fail, views don't have such a column +select * from rules_fooview; +select xmin, * from rules_fooview; -- fail, views don't have such a column select reltoastrelid, relkind, relfrozenxid - from pg_class where oid = 'fooview'::regclass; + from pg_class where oid = 'rules_fooview'::regclass; -drop view fooview; +drop view rules_fooview; -- trying to convert a partitioned table to view is not allowed -create table fooview (x int, y text) partition by list (x); -create rule "_RETURN" as on select to fooview do instead +create table rules_fooview (x int, y text) partition by list (x); +create rule "_RETURN" as on select to rules_fooview do instead select 1 as x, 'aaa'::text as y; -- nor can one convert a partition to view -create table fooview_part partition of fooview for values in (1); -create rule "_RETURN" as on select to fooview_part do instead +create table rules_fooview_part partition of rules_fooview for values in (1); +create rule "_RETURN" as on select to rules_fooview_part do instead select 1 as x, 'aaa'::text as y; -- @@ -1007,7 +1007,7 @@ create rule r3 as on delete to rules_src do notify rules_src_deletion; \d+ rules_src -- --- Ensure a aliased target relation for insert is correctly deparsed. +-- Ensure an aliased target relation for insert is correctly deparsed. -- create rule r4 as on insert to rules_src do instead insert into rules_log AS trgt SELECT NEW.* RETURNING trgt.f1, trgt.f2; create rule r5 as on update to rules_src do instead UPDATE rules_log AS trgt SET tag = 'updated' WHERE trgt.f1 = new.f1; @@ -1155,6 +1155,19 @@ DROP RULE hat_upsert ON hats; drop table hats; drop table hat_data; +-- test for pg_get_functiondef properly regurgitating SET parameters +-- Note that the function is kept around to stress pg_dump. +CREATE FUNCTION func_with_set_params() RETURNS integer + AS 'select 1;' + LANGUAGE SQL + SET search_path TO PG_CATALOG + SET extra_float_digits TO 2 + SET work_mem TO '4MB' + SET datestyle to iso, mdy + SET local_preload_libraries TO "Mixed/Case", 'c:/''a"/path', '', '0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789' + IMMUTABLE STRICT; +SELECT pg_get_functiondef('func_with_set_params()'::regprocedure); + -- tests for pg_get_*def with invalid objects SELECT pg_get_constraintdef(0); SELECT pg_get_functiondef(0); @@ -1171,9 +1184,35 @@ SELECT pg_get_function_arg_default('pg_class'::regclass, 0); SELECT pg_get_partkeydef(0); -- test rename for a rule defined on a partitioned table -CREATE TABLE parted_table (a int) PARTITION BY LIST (a); -CREATE TABLE parted_table_1 PARTITION OF parted_table FOR VALUES IN (1); -CREATE RULE parted_table_insert AS ON INSERT to parted_table - DO INSTEAD INSERT INTO parted_table_1 VALUES (NEW.*); -ALTER RULE parted_table_insert ON parted_table RENAME TO parted_table_insert_redirect; -DROP TABLE parted_table; +CREATE TABLE rules_parted_table (a int) PARTITION BY LIST (a); +CREATE TABLE rules_parted_table_1 PARTITION OF rules_parted_table FOR VALUES IN (1); +CREATE RULE rules_parted_table_insert AS ON INSERT to rules_parted_table + DO INSTEAD INSERT INTO rules_parted_table_1 VALUES (NEW.*); +ALTER RULE rules_parted_table_insert ON rules_parted_table RENAME TO rules_parted_table_insert_redirect; +DROP TABLE rules_parted_table; + +-- +-- Test enabling/disabling +-- +CREATE TABLE ruletest1 (a int); +CREATE TABLE ruletest2 (b int); + +CREATE RULE rule1 AS ON INSERT TO ruletest1 + DO INSTEAD INSERT INTO ruletest2 VALUES (NEW.*); + +INSERT INTO ruletest1 VALUES (1); +ALTER TABLE ruletest1 DISABLE RULE rule1; +INSERT INTO ruletest1 VALUES (2); +ALTER TABLE ruletest1 ENABLE RULE rule1; +SET session_replication_role = replica; +INSERT INTO ruletest1 VALUES (3); +ALTER TABLE ruletest1 ENABLE REPLICA RULE rule1; +INSERT INTO ruletest1 VALUES (4); +RESET session_replication_role; +INSERT INTO ruletest1 VALUES (5); + +SELECT * FROM ruletest1; +SELECT * FROM ruletest2; + +DROP TABLE ruletest1; +DROP TABLE ruletest2; diff --git a/src/test/regress/sql/select.sql b/src/test/regress/sql/select.sql index c80429e7d0..b5929b2eca 100644 --- a/src/test/regress/sql/select.sql +++ b/src/test/regress/sql/select.sql @@ -116,9 +116,9 @@ SELECT p.name, p.age FROM person* p ORDER BY age using >, name; -- -- Test some cases involving whole-row Var referencing a subquery -- -select foo from (select 1) as foo; -select foo from (select null) as foo; -select foo from (select 'xyzzy',1,null) as foo; +select foo from (select 1 offset 0) as foo; +select foo from (select null offset 0) as foo; +select foo from (select 'xyzzy',1,null offset 0) as foo; -- -- Test VALUES lists @@ -254,3 +254,11 @@ drop function sillysrf(int); -- (see bug #5084) select * from (values (2),(null),(1)) v(k) where k = k order by k; select * from (values (2),(null),(1)) v(k) where k = k; + +-- Test partitioned tables with no partitions, which should be handled the +-- same as the non-inheritance case when expanding its RTE. +create table list_parted_tbl (a int,b int) partition by list (a); +create table list_parted_tbl1 partition of list_parted_tbl + for values in (1) partition by list(b); +explain (costs off) select * from list_parted_tbl; +drop table list_parted_tbl; diff --git a/src/test/regress/sql/select_into.sql b/src/test/regress/sql/select_into.sql index 5cb7ce0922..62eddeed9d 100644 --- a/src/test/regress/sql/select_into.sql +++ b/src/test/regress/sql/select_into.sql @@ -3,18 +3,18 @@ -- SELECT * - INTO TABLE tmp1 + INTO TABLE sitmp1 FROM onek WHERE onek.unique1 < 2; -DROP TABLE tmp1; +DROP TABLE sitmp1; SELECT * - INTO TABLE tmp1 + INTO TABLE sitmp1 FROM onek2 WHERE onek2.unique1 < 2; -DROP TABLE tmp1; +DROP TABLE sitmp1; -- -- SELECT INTO and INSERT permission, if owner is not allowed to insert. diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql index e717f92e53..03c056b8b7 100644 --- a/src/test/regress/sql/select_parallel.sql +++ b/src/test/regress/sql/select_parallel.sql @@ -2,7 +2,7 @@ -- PARALLEL -- -create or replace function parallel_restricted(int) returns int as +create function sp_parallel_restricted(int) returns int as $$begin return $1; end$$ language plpgsql parallel restricted; -- Serializable isolation would disable parallel query, so explicitly use an @@ -15,14 +15,75 @@ set parallel_tuple_cost=0; set min_parallel_table_scan_size=0; set max_parallel_workers_per_gather=4; +-- Parallel Append with partial-subplans explain (costs off) - select count(*) from a_star; -select count(*) from a_star; + select round(avg(aa)), sum(aa) from a_star; +select round(avg(aa)), sum(aa) from a_star a1; + +-- Parallel Append with both partial and non-partial subplans +alter table c_star set (parallel_workers = 0); +alter table d_star set (parallel_workers = 0); +explain (costs off) + select round(avg(aa)), sum(aa) from a_star; +select round(avg(aa)), sum(aa) from a_star a2; + +-- Parallel Append with only non-partial subplans +alter table a_star set (parallel_workers = 0); +alter table b_star set (parallel_workers = 0); +alter table e_star set (parallel_workers = 0); +alter table f_star set (parallel_workers = 0); +explain (costs off) + select round(avg(aa)), sum(aa) from a_star; +select round(avg(aa)), sum(aa) from a_star a3; + +-- Disable Parallel Append +alter table a_star reset (parallel_workers); +alter table b_star reset (parallel_workers); +alter table c_star reset (parallel_workers); +alter table d_star reset (parallel_workers); +alter table e_star reset (parallel_workers); +alter table f_star reset (parallel_workers); +set enable_parallel_append to off; +explain (costs off) + select round(avg(aa)), sum(aa) from a_star; +select round(avg(aa)), sum(aa) from a_star a4; +reset enable_parallel_append; + +-- Parallel Append that runs serially +create function sp_test_func() returns setof text as +$$ select 'foo'::varchar union all select 'bar'::varchar $$ +language sql stable; +select sp_test_func() order by 1; + +-- Parallel Append is not to be used when the subpath depends on the outer param +create table part_pa_test(a int, b int) partition by range(a); +create table part_pa_test_p1 partition of part_pa_test for values from (minvalue) to (0); +create table part_pa_test_p2 partition of part_pa_test for values from (0) to (maxvalue); +explain (costs off) + select (select max((select pa1.b from part_pa_test pa1 where pa1.a = pa2.a))) + from part_pa_test pa2; +drop table part_pa_test; + +-- test with leader participation disabled +set parallel_leader_participation = off; +explain (costs off) + select count(*) from tenk1 where stringu1 = 'GRAAAA'; +select count(*) from tenk1 where stringu1 = 'GRAAAA'; + +-- test with leader participation disabled, but no workers available (so +-- the leader will have to run the plan despite the setting) +set max_parallel_workers = 0; +explain (costs off) + select count(*) from tenk1 where stringu1 = 'GRAAAA'; +select count(*) from tenk1 where stringu1 = 'GRAAAA'; + +reset max_parallel_workers; +reset parallel_leader_participation; -- test that parallel_restricted function doesn't run in worker alter table tenk1 set (parallel_workers = 4); explain (verbose, costs off) -select parallel_restricted(unique1) from tenk1 +select sp_parallel_restricted(unique1) from tenk1 where stringu1 = 'GRAAAA' order by 1; -- test parallel plan when group by expression is in target list. @@ -36,8 +97,14 @@ explain (costs off) -- test that parallel plan for aggregates is not selected when -- target list contains parallel restricted clause. explain (costs off) - select sum(parallel_restricted(unique1)) from tenk1 - group by(parallel_restricted(unique1)); + select sum(sp_parallel_restricted(unique1)) from tenk1 + group by(sp_parallel_restricted(unique1)); + +-- test prepared statement +prepare tenk1_count(integer) As select count((unique1)) from tenk1 where hundred > $1; +explain (costs off) execute tenk1_count(1); +execute tenk1_count(1); +deallocate tenk1_count; -- test parallel plans for queries containing un-correlated subplans. alter table tenk2 set (parallel_workers = 0); @@ -52,6 +119,23 @@ explain (costs off) (select ten from tenk2); alter table tenk2 reset (parallel_workers); +-- test parallel plan for a query containing initplan. +set enable_indexscan = off; +set enable_indexonlyscan = off; +set enable_bitmapscan = off; +alter table tenk2 set (parallel_workers = 2); + +explain (costs off) + select count(*) from tenk1 + where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2); +select count(*) from tenk1 + where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2); + +reset enable_indexscan; +reset enable_indexonlyscan; +reset enable_bitmapscan; +alter table tenk2 reset (parallel_workers); + -- test parallel index scans. set enable_seqscan to off; set enable_bitmapscan to off; @@ -65,6 +149,26 @@ explain (costs off) select count(*) from tenk1 where thousand > 95; select count(*) from tenk1 where thousand > 95; +-- test rescan cases too +set enable_material = false; + +explain (costs off) +select * from + (select count(unique1) from tenk1 where hundred > 10) ss + right join (values (1),(2),(3)) v(x) on true; +select * from + (select count(unique1) from tenk1 where hundred > 10) ss + right join (values (1),(2),(3)) v(x) on true; + +explain (costs off) +select * from + (select count(*) from tenk1 where thousand > 99) ss + right join (values (1),(2),(3)) v(x) on true; +select * from + (select count(*) from tenk1 where thousand > 99) ss + right join (values (1),(2),(3)) v(x) on true; + +reset enable_material; reset enable_seqscan; reset enable_bitmapscan; @@ -90,14 +194,40 @@ insert into bmscantest select r, 'fooooooooooooooooooooooooooooooooooooooooooooo create index i_bmtest ON bmscantest(a); select count(*) from bmscantest where a>1; +-- test accumulation of stats for parallel nodes reset enable_seqscan; +alter table tenk2 set (parallel_workers = 0); +explain (analyze, timing off, summary off, costs off) + select count(*) from tenk1, tenk2 where tenk1.hundred > 1 + and tenk2.thousand=0; +alter table tenk2 reset (parallel_workers); + +reset work_mem; +create function explain_parallel_sort_stats() returns setof text +language plpgsql as +$$ +declare ln text; +begin + for ln in + explain (analyze, timing off, summary off, costs off) + select * from + (select ten from tenk1 where ten < 100 order by ten) ss + right join (values (1),(2),(3)) v(x) on true + loop + ln := regexp_replace(ln, 'Memory: \S*', 'Memory: xxx'); + return next ln; + end loop; +end; +$$; +select * from explain_parallel_sort_stats(); + reset enable_indexscan; reset enable_hashjoin; reset enable_mergejoin; reset enable_material; reset effective_io_concurrency; -reset work_mem; drop table bmscantest; +drop function explain_parallel_sort_stats(); -- test parallel merge join path. set enable_hashjoin to off; @@ -118,23 +248,188 @@ explain (costs off) select count(*) from tenk1 group by twenty; +--test expressions in targetlist are pushed down for gather merge +create function sp_simple_func(var1 integer) returns integer +as $$ +begin + return var1 + 10; +end; +$$ language plpgsql PARALLEL SAFE; + +explain (costs off, verbose) + select ten, sp_simple_func(ten) from tenk1 where ten < 100 order by ten; + +drop function sp_simple_func(integer); + +-- test handling of SRFs in targetlist (bug in 10.0) + +explain (costs off) + select count(*), generate_series(1,2) from tenk1 group by twenty; + +select count(*), generate_series(1,2) from tenk1 group by twenty; + +-- test gather merge with parallel leader participation disabled +set parallel_leader_participation = off; + +explain (costs off) + select count(*) from tenk1 group by twenty; + +select count(*) from tenk1 group by twenty; + +reset parallel_leader_participation; + +--test rescan behavior of gather merge +set enable_material = false; + +explain (costs off) +select * from + (select string4, count(unique2) + from tenk1 group by string4 order by string4) ss + right join (values (1),(2),(3)) v(x) on true; + +select * from + (select string4, count(unique2) + from tenk1 group by string4 order by string4) ss + right join (values (1),(2),(3)) v(x) on true; + +reset enable_material; + +reset enable_hashagg; + +-- check parallelized int8 aggregate (bug #14897) +explain (costs off) +select avg(unique1::int8) from tenk1; + +select avg(unique1::int8) from tenk1; + +-- gather merge test with a LIMIT +explain (costs off) + select fivethous from tenk1 order by fivethous limit 4; + +select fivethous from tenk1 order by fivethous limit 4; + -- gather merge test with 0 worker set max_parallel_workers = 0; explain (costs off) select string4 from tenk1 order by string4 limit 5; select string4 from tenk1 order by string4 limit 5; -reset max_parallel_workers; -reset enable_hashagg; -set force_parallel_mode=1; +-- gather merge test with 0 workers, with parallel leader +-- participation disabled (the leader will have to run the plan +-- despite the setting) +set parallel_leader_participation = off; +explain (costs off) + select string4 from tenk1 order by string4 limit 5; +select string4 from tenk1 order by string4 limit 5; +reset parallel_leader_participation; +reset max_parallel_workers; + +SAVEPOINT settings; +SET LOCAL force_parallel_mode = 1; explain (costs off) select stringu1::int2 from tenk1 where unique1 = 1; +ROLLBACK TO SAVEPOINT settings; + +-- exercise record typmod remapping between backends +CREATE FUNCTION make_record(n int) + RETURNS RECORD LANGUAGE plpgsql PARALLEL SAFE AS +$$ +BEGIN + RETURN CASE n + WHEN 1 THEN ROW(1) + WHEN 2 THEN ROW(1, 2) + WHEN 3 THEN ROW(1, 2, 3) + WHEN 4 THEN ROW(1, 2, 3, 4) + ELSE ROW(1, 2, 3, 4, 5) + END; +END; +$$; +SAVEPOINT settings; +SET LOCAL force_parallel_mode = 1; +SELECT make_record(x) FROM (SELECT generate_series(1, 5) x) ss ORDER BY x; +ROLLBACK TO SAVEPOINT settings; +DROP function make_record(n int); + +-- test the sanity of parallel query after the active role is dropped. +drop role if exists regress_parallel_worker; +create role regress_parallel_worker; +set role regress_parallel_worker; +reset session authorization; +drop role regress_parallel_worker; +set force_parallel_mode = 1; +select count(*) from tenk1; +reset force_parallel_mode; +reset role; + +-- Window function calculation can't be pushed to workers. +explain (costs off, verbose) + select count(*) from tenk1 a where (unique1, two) in + (select unique1, row_number() over() from tenk1 b); + + +-- LIMIT/OFFSET within sub-selects can't be pushed to workers. +explain (costs off) + select * from tenk1 a where two in + (select two from tenk1 b where stringu1 like '%AAAA' limit 3); -- to increase the parallel query test coverage +SAVEPOINT settings; +SET LOCAL force_parallel_mode = 1; EXPLAIN (analyze, timing off, summary off, costs off) SELECT * FROM tenk1; +ROLLBACK TO SAVEPOINT settings; -- provoke error in worker +SAVEPOINT settings; +SET LOCAL force_parallel_mode = 1; select stringu1::int2 from tenk1 where unique1 = 1; +ROLLBACK TO SAVEPOINT settings; + +-- test interaction with set-returning functions +SAVEPOINT settings; + +-- multiple subqueries under a single Gather node +-- must set parallel_setup_cost > 0 to discourage multiple Gather nodes +SET LOCAL parallel_setup_cost = 10; +EXPLAIN (COSTS OFF) +SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1 +UNION ALL +SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1; +ROLLBACK TO SAVEPOINT settings; + +-- can't use multiple subqueries under a single Gather node due to initPlans +EXPLAIN (COSTS OFF) +SELECT unique1 FROM tenk1 WHERE fivethous = + (SELECT unique1 FROM tenk1 WHERE fivethous = 1 LIMIT 1) +UNION ALL +SELECT unique1 FROM tenk1 WHERE fivethous = + (SELECT unique2 FROM tenk1 WHERE fivethous = 1 LIMIT 1) +ORDER BY 1; + +-- test interaction with SRFs +SELECT * FROM information_schema.foreign_data_wrapper_options +ORDER BY 1, 2, 3; + +-- test passing expanded-value representations to workers +CREATE FUNCTION make_some_array(int,int) returns int[] as +$$declare x int[]; + begin + x[1] := $1; + x[2] := $2; + return x; + end$$ language plpgsql parallel safe; +CREATE TABLE fooarr(f1 text, f2 int[], f3 text); +INSERT INTO fooarr VALUES('1', ARRAY[1,2], 'one'); + +PREPARE pstmt(text, int[]) AS SELECT * FROM fooarr WHERE f1 = $1 AND f2 = $2; +EXPLAIN (COSTS OFF) EXECUTE pstmt('1', make_some_array(1,2)); +EXECUTE pstmt('1', make_some_array(1,2)); +DEALLOCATE pstmt; + +-- test interaction between subquery and partial_paths +CREATE VIEW tenk1_vw_sec WITH (security_barrier) AS SELECT * FROM tenk1; +EXPLAIN (COSTS OFF) +SELECT 1 FROM tenk1_vw_sec + WHERE (SELECT sum(f1) FROM int4_tbl WHERE f1 < unique1) < 100; rollback; diff --git a/src/test/regress/sql/sequence.sql b/src/test/regress/sql/sequence.sql index b41c5a753d..a7b9e63372 100644 --- a/src/test/regress/sql/sequence.sql +++ b/src/test/regress/sql/sequence.sql @@ -61,6 +61,8 @@ INSERT INTO serialTest1 VALUES ('wrong', NULL); SELECT * FROM serialTest1; +SELECT pg_get_serial_sequence('serialTest1', 'f2'); + -- test smallserial / bigserial CREATE TABLE serialTest2 (f1 text, f2 serial, f3 smallserial, f4 serial2, f5 bigserial, f6 serial8); @@ -244,6 +246,10 @@ WHERE sequencename ~ ANY(ARRAY['sequence_test', 'serialtest']) SELECT * FROM pg_sequence_parameters('sequence_test4'::regclass); +\d sequence_test4 +\d serialtest2_f2_seq + + -- Test comments COMMENT ON SEQUENCE asdf IS 'won''t work'; COMMENT ON SEQUENCE sequence_test2 IS 'will work'; diff --git a/src/test/regress/sql/spgist.sql b/src/test/regress/sql/spgist.sql index 5896b50865..c72cf42a33 100644 --- a/src/test/regress/sql/spgist.sql +++ b/src/test/regress/sql/spgist.sql @@ -5,7 +5,7 @@ -- testing SP-GiST code itself. create table spgist_point_tbl(id int4, p point); -create index spgist_point_idx on spgist_point_tbl using spgist(p); +create index spgist_point_idx on spgist_point_tbl using spgist(p) with (fillfactor = 75); -- Test vacuum-root operation. It gets invoked when the root is also a leaf, -- i.e. the index is very small. @@ -30,6 +30,21 @@ delete from spgist_point_tbl where id < 10000; vacuum spgist_point_tbl; +-- Test rescan paths (cf. bug #15378) +-- use box and && rather than point, so that rescan happens when the +-- traverse stack is non-empty + +create table spgist_box_tbl(id serial, b box); +insert into spgist_box_tbl(b) +select box(point(i,j),point(i+s,j+s)) + from generate_series(1,100,5) i, + generate_series(1,100,5) j, + generate_series(1,10) s; +create index spgist_box_idx on spgist_box_tbl using spgist (b); + +select count(*) + from (values (point(5,5)),(point(8,8)),(point(12,12))) v(p) + where exists(select * from spgist_box_tbl b where b.b && box(v.p,v.p)); -- The point opclass's choose method only uses the spgMatchNode action, -- so the other actions are not tested by the above. Create an index using @@ -48,3 +63,11 @@ select g, 'baaaaaaaaaaaaaar' || g from generate_series(1, 1000) g; -- tuple to be moved to another page. insert into spgist_text_tbl (id, t) select -g, 'f' || repeat('o', 100-g) || 'surprise' from generate_series(1, 100) g; + +-- Test out-of-range fillfactor values +create index spgist_point_idx2 on spgist_point_tbl using spgist(p) with (fillfactor = 9); +create index spgist_point_idx2 on spgist_point_tbl using spgist(p) with (fillfactor = 101); + +-- Modify fillfactor in existing index +alter index spgist_point_idx set (fillfactor = 90); +reindex index spgist_point_idx; diff --git a/src/test/regress/sql/stats.sql b/src/test/regress/sql/stats.sql index 6e882bf3ac..2be7dde834 100644 --- a/src/test/regress/sql/stats.sql +++ b/src/test/regress/sql/stats.sql @@ -138,7 +138,10 @@ ROLLBACK; -- do a seqscan SELECT count(*) FROM tenk2; -- do an indexscan +-- make sure it is not a bitmap scan, which might skip fetching heap tuples +SET enable_bitmapscan TO off; SELECT count(*) FROM tenk2 WHERE unique1 = 1; +RESET enable_bitmapscan; -- We can't just call wait_for_stats() at this point, because we only -- transmit stats when the session goes idle, and we probably didn't diff --git a/src/test/regress/sql/strings.sql b/src/test/regress/sql/strings.sql index f9cfaeb44a..f2203ef1b1 100644 --- a/src/test/regress/sql/strings.sql +++ b/src/test/regress/sql/strings.sql @@ -188,6 +188,9 @@ SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', 'nom SELECT regexp_split_to_array('123456','1'); SELECT regexp_split_to_array('123456','6'); SELECT regexp_split_to_array('123456','.'); +SELECT regexp_split_to_array('123456',''); +SELECT regexp_split_to_array('123456','(?:)'); +SELECT regexp_split_to_array('1',''); -- errors SELECT foo, length(foo) FROM regexp_split_to_table('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'zippy') AS foo; SELECT regexp_split_to_array('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'iz'); @@ -366,6 +369,23 @@ SELECT substr(f1, 99995) from toasttest; -- string length SELECT substr(f1, 99995, 10) from toasttest; +TRUNCATE TABLE toasttest; +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +-- expect >0 blocks +select 0 = pg_relation_size('pg_toast.pg_toast_'||(select oid from pg_class where relname = 'toasttest'))/current_setting('block_size')::integer as blocks; + +TRUNCATE TABLE toasttest; +ALTER TABLE toasttest set (toast_tuple_target = 4080); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +-- expect 0 blocks +select 0 = pg_relation_size('pg_toast.pg_toast_'||(select oid from pg_class where relname = 'toasttest'))/current_setting('block_size')::integer as blocks; + DROP TABLE toasttest; -- @@ -489,6 +509,23 @@ select md5('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'::byt select md5('12345678901234567890123456789012345678901234567890123456789012345678901234567890'::bytea) = '57edf4a22be3c955ac49da2e2107b67a' AS "TRUE"; +-- +-- SHA-2 +-- +SET bytea_output TO hex; + +SELECT sha224(''); +SELECT sha224('The quick brown fox jumps over the lazy dog.'); + +SELECT sha256(''); +SELECT sha256('The quick brown fox jumps over the lazy dog.'); + +SELECT sha384(''); +SELECT sha384('The quick brown fox jumps over the lazy dog.'); + +SELECT sha512(''); +SELECT sha512('The quick brown fox jumps over the lazy dog.'); + -- -- test behavior of escape_string_warning and standard_conforming_strings options -- @@ -523,6 +560,7 @@ select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' as f4, ' -- -- Additional string functions -- +SET bytea_output TO escape; SELECT initcap('hi THOMAS'); diff --git a/src/test/regress/sql/subselect.sql b/src/test/regress/sql/subselect.sql index 2fc0e26ca0..843f511b3d 100644 --- a/src/test/regress/sql/subselect.sql +++ b/src/test/regress/sql/subselect.sql @@ -99,8 +99,8 @@ SELECT *, pg_typeof(f1) FROM -- ... unless there's context to suggest differently -explain verbose select '42' union all select '43'; -explain verbose select '42' union all select 43; +explain (verbose, costs off) select '42' union all select '43'; +explain (verbose, costs off) select '42' union all select 43; -- check materialization of an initplan reference (bug #14524) explain (verbose, costs off) @@ -376,6 +376,24 @@ select q from (select max(f1) from int4_tbl group by f1 order by f1) q; with q as (select max(f1) from int4_tbl group by f1 order by f1) select q from q; +-- +-- Test case for sublinks pulled up into joinaliasvars lists in an +-- inherited update/delete query +-- + +begin; -- this shouldn't delete anything, but be safe + +delete from road +where exists ( + select 1 + from + int4_tbl cross join + ( select f1, array(select q1 from int8_tbl) as arr + from text_tbl ) ss + where road.name = ss.f1 ); + +rollback; + -- -- Test case for sublinks pushed down into subselects via join alias expansion -- @@ -454,6 +472,16 @@ explain (verbose, costs off) create temp table nocolumns(); select exists(select * from nocolumns); +-- +-- Check behavior with a SubPlan in VALUES (bug #14924) +-- +select val.x + from generate_series(1,10) as s(i), + lateral ( + values ((select s.i + 1)), (s.i + 101) + ) as val(x) +where s.i < 10 and (select val.x) < 110; + -- -- Check sane behavior with nested IN SubLinks -- @@ -540,3 +568,60 @@ select * from where tattle(x, u); drop function tattle(x int, y int); + +-- +-- Test that LIMIT can be pushed to SORT through a subquery that just projects +-- columns. We check for that having happened by looking to see if EXPLAIN +-- ANALYZE shows that a top-N sort was used. We must suppress or filter away +-- all the non-invariant parts of the EXPLAIN ANALYZE output. +-- +create table sq_limit (pk int primary key, c1 int, c2 int); +insert into sq_limit values + (1, 1, 1), + (2, 2, 2), + (3, 3, 3), + (4, 4, 4), + (5, 1, 1), + (6, 2, 2), + (7, 3, 3), + (8, 4, 4); + +create function explain_sq_limit() returns setof text language plpgsql as +$$ +declare ln text; +begin + for ln in + explain (analyze, summary off, timing off, costs off) + select * from (select pk,c2 from sq_limit order by c1,pk) as x limit 3 + loop + ln := regexp_replace(ln, 'Memory: \S*', 'Memory: xxx'); + -- this case might occur if force_parallel_mode is on: + ln := regexp_replace(ln, 'Worker 0: Sort Method', 'Sort Method'); + return next ln; + end loop; +end; +$$; + +select * from explain_sq_limit(); + +select * from (select pk,c2 from sq_limit order by c1,pk) as x limit 3; + +drop function explain_sq_limit(); + +drop table sq_limit; + +-- +-- Ensure that backward scan direction isn't propagated into +-- expression subqueries (bug #15336) +-- + +begin; + +declare c1 scroll cursor for + select * from generate_series(1,4) i + where i <> all (values (2),(3)); + +move forward all in c1; +fetch backward all in c1; + +commit; diff --git a/src/test/regress/sql/temp.sql b/src/test/regress/sql/temp.sql index 5183c727f5..1beccc6ceb 100644 --- a/src/test/regress/sql/temp.sql +++ b/src/test/regress/sql/temp.sql @@ -151,3 +151,76 @@ select whoami(); select pg_temp.whoami(); drop table public.whereami; + +-- For partitioned temp tables, ON COMMIT actions ignore storage-less +-- partitioned tables. +begin; +create temp table temp_parted_oncommit (a int) + partition by list (a) on commit delete rows; +create temp table temp_parted_oncommit_1 + partition of temp_parted_oncommit + for values in (1) on commit delete rows; +insert into temp_parted_oncommit values (1); +commit; +-- partitions are emptied by the previous commit +select * from temp_parted_oncommit; +drop table temp_parted_oncommit; + +-- Check dependencies between ON COMMIT actions with a partitioned +-- table and its partitions. Using ON COMMIT DROP on a parent removes +-- the whole set. +begin; +create temp table temp_parted_oncommit_test (a int) + partition by list (a) on commit drop; +create temp table temp_parted_oncommit_test1 + partition of temp_parted_oncommit_test + for values in (1) on commit delete rows; +create temp table temp_parted_oncommit_test2 + partition of temp_parted_oncommit_test + for values in (2) on commit drop; +insert into temp_parted_oncommit_test values (1), (2); +commit; +-- no relations remain in this case. +select relname from pg_class where relname like 'temp_parted_oncommit_test%'; +-- Using ON COMMIT DELETE on a partitioned table does not remove +-- all rows if partitions preserve their data. +begin; +create temp table temp_parted_oncommit_test (a int) + partition by list (a) on commit delete rows; +create temp table temp_parted_oncommit_test1 + partition of temp_parted_oncommit_test + for values in (1) on commit preserve rows; +create temp table temp_parted_oncommit_test2 + partition of temp_parted_oncommit_test + for values in (2) on commit drop; +insert into temp_parted_oncommit_test values (1), (2); +commit; +-- Data from the remaining partition is still here as its rows are +-- preserved. +select * from temp_parted_oncommit_test; +-- two relations remain in this case. +select relname from pg_class where relname like 'temp_parted_oncommit_test%'; +drop table temp_parted_oncommit_test; + +-- Check dependencies between ON COMMIT actions with inheritance trees. +-- Using ON COMMIT DROP on a parent removes the whole set. +begin; +create temp table temp_inh_oncommit_test (a int) on commit drop; +create temp table temp_inh_oncommit_test1 () + inherits(temp_inh_oncommit_test) on commit delete rows; +insert into temp_inh_oncommit_test1 values (1); +commit; +-- no relations remain in this case +select relname from pg_class where relname like 'temp_inh_oncommit_test%'; +-- Data on the parent is removed, and the child goes away. +begin; +create temp table temp_inh_oncommit_test (a int) on commit delete rows; +create temp table temp_inh_oncommit_test1 () + inherits(temp_inh_oncommit_test) on commit drop; +insert into temp_inh_oncommit_test1 values (1); +insert into temp_inh_oncommit_test values (1); +commit; +select * from temp_inh_oncommit_test; +-- one relation remains +select relname from pg_class where relname like 'temp_inh_oncommit_test%'; +drop table temp_inh_oncommit_test; diff --git a/src/test/regress/sql/timestamptz.sql b/src/test/regress/sql/timestamptz.sql index 97e57a2403..f17d153fcc 100644 --- a/src/test/regress/sql/timestamptz.sql +++ b/src/test/regress/sql/timestamptz.sql @@ -248,21 +248,25 @@ SELECT '' AS to_char_10, to_char(d1, 'IYYY IYY IY I IW IDDD ID') SELECT '' AS to_char_11, to_char(d1, 'FMIYYY FMIYY FMIY FMI FMIW FMIDDD FMID') FROM TIMESTAMPTZ_TBL; --- Check OF with various zone offsets, particularly fractional hours +-- Check OF, TZH, TZM with various zone offsets, particularly fractional hours SET timezone = '00:00'; -SELECT to_char(now(), 'OF'); +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; SET timezone = '+02:00'; -SELECT to_char(now(), 'OF'); +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; SET timezone = '-13:00'; -SELECT to_char(now(), 'OF'); +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; SET timezone = '-00:30'; -SELECT to_char(now(), 'OF'); +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; SET timezone = '00:30'; -SELECT to_char(now(), 'OF'); +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; SET timezone = '-04:30'; -SELECT to_char(now(), 'OF'); +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; SET timezone = '04:30'; -SELECT to_char(now(), 'OF'); +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; +SET timezone = '-04:15'; +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; +SET timezone = '04:15'; +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; RESET timezone; CREATE TABLE TIMESTAMPTZ_TST (a int , b timestamptz); diff --git a/src/test/regress/sql/tinterval.sql b/src/test/regress/sql/tinterval.sql deleted file mode 100644 index 42399ce694..0000000000 --- a/src/test/regress/sql/tinterval.sql +++ /dev/null @@ -1,97 +0,0 @@ --- --- TINTERVAL --- - -CREATE TABLE TINTERVAL_TBL (f1 tinterval); - --- Should accept any abstime, --- so do not bother with extensive testing of values - -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["-infinity" "infinity"]'); - -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["May 10, 1947 23:59:12" "Jan 14, 1973 03:14:21"]'); - -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["Sep 4, 1983 23:59:12" "Oct 4, 1983 23:59:12"]'); - -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["epoch" "Mon May 1 00:30:30 1995"]'); - -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["Feb 15 1990 12:15:03" "2001-09-23 11:12:13"]'); - - --- badly formatted tintervals -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["bad time specifications" ""]'); - -INSERT INTO TINTERVAL_TBL (f1) - VALUES ('["" "infinity"]'); - --- test tinterval operators - -SELECT '' AS five, * FROM TINTERVAL_TBL; - --- length == -SELECT '' AS one, t.* - FROM TINTERVAL_TBL t - WHERE t.f1 #= '@ 1 months'; - --- length <> -SELECT '' AS three, t.* - FROM TINTERVAL_TBL t - WHERE t.f1 #<> '@ 1 months'; - --- length < -SELECT '' AS zero, t.* - FROM TINTERVAL_TBL t - WHERE t.f1 #< '@ 1 month'; - --- length <= -SELECT '' AS one, t.* - FROM TINTERVAL_TBL t - WHERE t.f1 #<= '@ 1 month'; - --- length > -SELECT '' AS three, t.* - FROM TINTERVAL_TBL t - WHERE t.f1 #> '@ 1 year'; - --- length >= -SELECT '' AS three, t.* - FROM TINTERVAL_TBL t - WHERE t.f1 #>= '@ 3 years'; - --- overlaps -SELECT '' AS three, t1.* - FROM TINTERVAL_TBL t1 - WHERE t1.f1 && - tinterval '["Aug 15 14:23:19 1983" "Sep 16 14:23:19 1983"]'; - -SELECT '' AS five, t1.f1, t2.f1 - FROM TINTERVAL_TBL t1, TINTERVAL_TBL t2 - WHERE t1.f1 && t2.f1 and - t1.f1 = t2.f1 - ORDER BY t1.f1, t2.f1; - -SELECT '' AS fourteen, t1.f1 AS interval1, t2.f1 AS interval2 - FROM TINTERVAL_TBL t1, TINTERVAL_TBL t2 - WHERE t1.f1 && t2.f1 and not t1.f1 = t2.f1 - ORDER BY interval1, interval2; - --- contains -SELECT '' AS five, t1.f1 - FROM TINTERVAL_TBL t1 - WHERE not t1.f1 << - tinterval '["Aug 15 14:23:19 1980" "Sep 16 14:23:19 1990"]' - ORDER BY t1.f1; - --- make time interval -SELECT '' AS three, t1.f1 - FROM TINTERVAL_TBL t1 - WHERE t1.f1 && - (abstime 'Aug 15 14:23:19 1983' <#> - abstime 'Sep 16 14:23:19 1983') - ORDER BY t1.f1; diff --git a/src/test/regress/sql/transactions.sql b/src/test/regress/sql/transactions.sql index bf9cb05971..2e3739fd6c 100644 --- a/src/test/regress/sql/transactions.sql +++ b/src/test/regress/sql/transactions.sql @@ -100,51 +100,51 @@ COMMIT; -- Subtransactions, basic tests -- create & drop tables SET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE; -CREATE TABLE foobar (a int); +CREATE TABLE trans_foobar (a int); BEGIN; - CREATE TABLE foo (a int); + CREATE TABLE trans_foo (a int); SAVEPOINT one; - DROP TABLE foo; - CREATE TABLE bar (a int); + DROP TABLE trans_foo; + CREATE TABLE trans_bar (a int); ROLLBACK TO SAVEPOINT one; RELEASE SAVEPOINT one; SAVEPOINT two; - CREATE TABLE baz (a int); + CREATE TABLE trans_baz (a int); RELEASE SAVEPOINT two; - drop TABLE foobar; - CREATE TABLE barbaz (a int); + drop TABLE trans_foobar; + CREATE TABLE trans_barbaz (a int); COMMIT; --- should exist: barbaz, baz, foo -SELECT * FROM foo; -- should be empty -SELECT * FROM bar; -- shouldn't exist -SELECT * FROM barbaz; -- should be empty -SELECT * FROM baz; -- should be empty +-- should exist: trans_barbaz, trans_baz, trans_foo +SELECT * FROM trans_foo; -- should be empty +SELECT * FROM trans_bar; -- shouldn't exist +SELECT * FROM trans_barbaz; -- should be empty +SELECT * FROM trans_baz; -- should be empty -- inserts BEGIN; - INSERT INTO foo VALUES (1); + INSERT INTO trans_foo VALUES (1); SAVEPOINT one; - INSERT into bar VALUES (1); + INSERT into trans_bar VALUES (1); ROLLBACK TO one; RELEASE SAVEPOINT one; SAVEPOINT two; - INSERT into barbaz VALUES (1); + INSERT into trans_barbaz VALUES (1); RELEASE two; SAVEPOINT three; SAVEPOINT four; - INSERT INTO foo VALUES (2); + INSERT INTO trans_foo VALUES (2); RELEASE SAVEPOINT four; ROLLBACK TO SAVEPOINT three; RELEASE SAVEPOINT three; - INSERT INTO foo VALUES (3); + INSERT INTO trans_foo VALUES (3); COMMIT; -SELECT * FROM foo; -- should have 1 and 3 -SELECT * FROM barbaz; -- should have 1 +SELECT * FROM trans_foo; -- should have 1 and 3 +SELECT * FROM trans_barbaz; -- should have 1 -- test whole-tree commit BEGIN; SAVEPOINT one; - SELECT foo; + SELECT trans_foo; ROLLBACK TO SAVEPOINT one; RELEASE SAVEPOINT one; SAVEPOINT two; @@ -179,7 +179,7 @@ BEGIN; INSERT INTO savepoints VALUES (4); SAVEPOINT one; INSERT INTO savepoints VALUES (5); - SELECT foo; + SELECT trans_foo; COMMIT; SELECT * FROM savepoints; @@ -329,9 +329,9 @@ BEGIN; INSERT INTO koju VALUES (1); ROLLBACK; -DROP TABLE foo; -DROP TABLE baz; -DROP TABLE barbaz; +DROP TABLE trans_foo; +DROP TABLE trans_baz; +DROP TABLE trans_barbaz; -- test case for problems with revalidating an open relation during abort @@ -419,6 +419,60 @@ DROP FUNCTION create_temp_tab(); DROP FUNCTION invert(x float8); +-- Test assorted behaviors around the implicit transaction block created +-- when multiple SQL commands are sent in a single Query message. These +-- tests rely on the fact that psql will not break SQL commands apart at a +-- backslash-quoted semicolon, but will send them as one Query. + +create temp table i_table (f1 int); + +-- psql will show only the last result in a multi-statement Query +SELECT 1\; SELECT 2\; SELECT 3; + +-- this implicitly commits: +insert into i_table values(1)\; select * from i_table; +-- 1/0 error will cause rolling back the whole implicit transaction +insert into i_table values(2)\; select * from i_table\; select 1/0; +select * from i_table; + +rollback; -- we are not in a transaction at this point + +-- can use regular begin/commit/rollback within a single Query +begin\; insert into i_table values(3)\; commit; +rollback; -- we are not in a transaction at this point +begin\; insert into i_table values(4)\; rollback; +rollback; -- we are not in a transaction at this point + +-- begin converts implicit transaction into a regular one that +-- can extend past the end of the Query +select 1\; begin\; insert into i_table values(5); +commit; +select 1\; begin\; insert into i_table values(6); +rollback; + +-- commit in implicit-transaction state commits but issues a warning. +insert into i_table values(7)\; commit\; insert into i_table values(8)\; select 1/0; +-- similarly, rollback aborts but issues a warning. +insert into i_table values(9)\; rollback\; select 2; + +select * from i_table; + +rollback; -- we are not in a transaction at this point + +-- implicit transaction block is still a transaction block, for e.g. VACUUM +SELECT 1\; VACUUM; +SELECT 1\; COMMIT\; VACUUM; + +-- we disallow savepoint-related commands in implicit-transaction state +SELECT 1\; SAVEPOINT sp; +SELECT 1\; COMMIT\; SAVEPOINT sp; +ROLLBACK TO SAVEPOINT sp\; SELECT 2; +SELECT 2\; RELEASE SAVEPOINT sp\; SELECT 3; + +-- but this is OK, because the BEGIN converts it to a regular xact +SELECT 1\; BEGIN\; SAVEPOINT sp\; ROLLBACK TO SAVEPOINT sp\; COMMIT; + + -- Test for successful cleanup of an aborted transaction at session exit. -- THIS MUST BE THE LAST TEST IN THIS FILE. diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql index b10159a1cf..d7dfd753be 100644 --- a/src/test/regress/sql/triggers.sql +++ b/src/test/regress/sql/triggers.sql @@ -26,13 +26,13 @@ create unique index pkeys_i on pkeys (pkey1, pkey2); create trigger check_fkeys_pkey_exist before insert or update on fkeys for each row - execute procedure + execute function check_primary_key ('fkey1', 'fkey2', 'pkeys', 'pkey1', 'pkey2'); create trigger check_fkeys_pkey2_exist before insert or update on fkeys for each row - execute procedure check_primary_key ('fkey3', 'fkeys2', 'pkey23'); + execute function check_primary_key ('fkey3', 'fkeys2', 'pkey23'); -- -- For fkeys2: @@ -92,44 +92,32 @@ delete from pkeys where pkey1 = 40 and pkey2 = '4'; update pkeys set pkey1 = 7, pkey2 = '70' where pkey1 = 50 and pkey2 = '5'; update pkeys set pkey1 = 7, pkey2 = '70' where pkey1 = 10 and pkey2 = '1'; +SELECT trigger_name, event_manipulation, event_object_schema, event_object_table, + action_order, action_condition, action_orientation, action_timing, + action_reference_old_table, action_reference_new_table + FROM information_schema.triggers + WHERE event_object_table in ('pkeys', 'fkeys', 'fkeys2') + ORDER BY trigger_name COLLATE "C", 2; + DROP TABLE pkeys; DROP TABLE fkeys; DROP TABLE fkeys2; --- -- I've disabled the funny_dup17 test because the new semantics --- -- of AFTER ROW triggers, which get now fired at the end of a --- -- query always, cause funny_dup17 to enter an endless loop. --- -- --- -- Jan --- --- create table dup17 (x int4); --- --- create trigger dup17_before --- before insert on dup17 --- for each row --- execute procedure --- funny_dup17 () --- ; --- --- insert into dup17 values (17); --- select count(*) from dup17; --- insert into dup17 values (17); --- select count(*) from dup17; --- --- drop trigger dup17_before on dup17; --- --- create trigger dup17_after --- after insert on dup17 --- for each row --- execute procedure --- funny_dup17 () --- ; --- insert into dup17 values (13); --- select count(*) from dup17 where x = 13; --- insert into dup17 values (13); --- select count(*) from dup17 where x = 13; --- --- DROP TABLE dup17; +-- Check behavior when trigger returns unmodified trigtuple +create table trigtest (f1 int, f2 text); + +create trigger trigger_return_old + before insert or delete or update on trigtest + for each row execute procedure trigger_return_old(); + +insert into trigtest values(1, 'foo'); +select * from trigtest; +update trigtest set f2 = f2 || 'bar'; +select * from trigtest; +delete from trigtest; +select * from trigtest; + +drop table trigtest; create sequence ttdummy_seq increment 10 start 0 minvalue 0; @@ -279,6 +267,12 @@ CREATE TRIGGER insert_when BEFORE INSERT ON main_table FOR EACH STATEMENT WHEN (true) EXECUTE PROCEDURE trigger_func('insert_when'); CREATE TRIGGER delete_when AFTER DELETE ON main_table FOR EACH STATEMENT WHEN (true) EXECUTE PROCEDURE trigger_func('delete_when'); +SELECT trigger_name, event_manipulation, event_object_schema, event_object_table, + action_order, action_condition, action_orientation, action_timing, + action_reference_old_table, action_reference_new_table + FROM information_schema.triggers + WHERE event_object_table IN ('main_table') + ORDER BY trigger_name COLLATE "C", 2; INSERT INTO main_table (a) VALUES (123), (456); COPY main_table FROM stdin; 123 999 @@ -297,6 +291,16 @@ DROP TRIGGER delete_a ON main_table; DROP TRIGGER insert_when ON main_table; DROP TRIGGER delete_when ON main_table; +-- Test WHEN condition accessing system columns. +create table table_with_oids(a int) with oids; +insert into table_with_oids values (1); +create trigger oid_unchanged_trig after update on table_with_oids + for each row + when (new.oid = old.oid AND new.oid <> 0) + execute procedure trigger_func('after_upd_oid_unchanged'); +update table_with_oids set a = a + 1; +drop table table_with_oids; + -- Test column-level triggers DROP TRIGGER after_upd_row_trig ON main_table; @@ -401,6 +405,11 @@ alter table trigtest disable trigger user; insert into trigtest default values; alter table trigtest enable trigger trigtest_a_stmt_tg; insert into trigtest default values; +set session_replication_role = replica; +insert into trigtest default values; -- does not trigger +alter table trigtest enable always trigger trigtest_a_stmt_tg; +insert into trigtest default values; -- now it does +reset session_replication_role; insert into trigtest2 values(1); insert into trigtest2 values(2); delete from trigtest where i=2; @@ -1287,7 +1296,46 @@ drop view my_view; drop table my_table; -- --- Verify that per-statement triggers are fired for partitioned tables +-- Verify cases that are unsupported with partitioned tables +-- +create table parted_trig (a int) partition by list (a); +create function trigger_nothing() returns trigger + language plpgsql as $$ begin end; $$; +create trigger failed before insert or update or delete on parted_trig + for each row execute procedure trigger_nothing(); +create trigger failed instead of update on parted_trig + for each row execute procedure trigger_nothing(); +create trigger failed after update on parted_trig + referencing old table as old_table + for each row execute procedure trigger_nothing(); +drop table parted_trig; + +-- +-- Verify trigger creation for partitioned tables, and drop behavior +-- +create table trigpart (a int, b int) partition by range (a); +create table trigpart1 partition of trigpart for values from (0) to (1000); +create trigger trg1 after insert on trigpart for each row execute procedure trigger_nothing(); +create table trigpart2 partition of trigpart for values from (1000) to (2000); +create table trigpart3 (like trigpart); +alter table trigpart attach partition trigpart3 for values from (2000) to (3000); +select tgrelid::regclass, tgname, tgfoid::regproc from pg_trigger + where tgrelid::regclass::text like 'trigpart%' order by tgrelid::regclass::text; +drop trigger trg1 on trigpart1; -- fail +drop trigger trg1 on trigpart2; -- fail +drop trigger trg1 on trigpart3; -- fail +drop table trigpart2; -- ok, trigger should be gone in that partition +select tgrelid::regclass, tgname, tgfoid::regproc from pg_trigger + where tgrelid::regclass::text like 'trigpart%' order by tgrelid::regclass::text; +drop trigger trg1 on trigpart; -- ok, all gone +select tgrelid::regclass, tgname, tgfoid::regproc from pg_trigger + where tgrelid::regclass::text like 'trigpart%' order by tgrelid::regclass::text; + +drop table trigpart; +drop function trigger_nothing(); + +-- +-- Verify that triggers are fired for partitioned tables -- create table parted_stmt_trig (a int) partition by list (a); create table parted_stmt_trig1 partition of parted_stmt_trig for values in (1); @@ -1299,7 +1347,7 @@ create table parted2_stmt_trig2 partition of parted2_stmt_trig for values in (2) create or replace function trigger_notice() returns trigger as $$ begin - raise notice 'trigger on % % % for %', TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL; + raise notice 'trigger % on % % % for %', TG_NAME, TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL; if TG_LEVEL = 'ROW' then return NEW; end if; @@ -1307,7 +1355,7 @@ create or replace function trigger_notice() returns trigger as $$ end; $$ language plpgsql; --- insert/update/delete statment-level triggers on the parent +-- insert/update/delete statement-level triggers on the parent create trigger trig_ins_before before insert on parted_stmt_trig for each statement execute procedure trigger_notice(); create trigger trig_ins_after after insert on parted_stmt_trig @@ -1321,28 +1369,40 @@ create trigger trig_del_before before delete on parted_stmt_trig create trigger trig_del_after after delete on parted_stmt_trig for each statement execute procedure trigger_notice(); +-- insert/update/delete row-level triggers on the parent +create trigger trig_ins_after_parent after insert on parted_stmt_trig + for each row execute procedure trigger_notice(); +create trigger trig_upd_after_parent after update on parted_stmt_trig + for each row execute procedure trigger_notice(); +create trigger trig_del_after_parent after delete on parted_stmt_trig + for each row execute procedure trigger_notice(); + -- insert/update/delete row-level triggers on the first partition -create trigger trig_ins_before before insert on parted_stmt_trig1 +create trigger trig_ins_before_child before insert on parted_stmt_trig1 + for each row execute procedure trigger_notice(); +create trigger trig_ins_after_child after insert on parted_stmt_trig1 for each row execute procedure trigger_notice(); -create trigger trig_ins_after after insert on parted_stmt_trig1 +create trigger trig_upd_before_child before update on parted_stmt_trig1 for each row execute procedure trigger_notice(); -create trigger trig_upd_before before update on parted_stmt_trig1 +create trigger trig_upd_after_child after update on parted_stmt_trig1 for each row execute procedure trigger_notice(); -create trigger trig_upd_after after update on parted_stmt_trig1 +create trigger trig_del_before_child before delete on parted_stmt_trig1 + for each row execute procedure trigger_notice(); +create trigger trig_del_after_child after delete on parted_stmt_trig1 for each row execute procedure trigger_notice(); -- insert/update/delete statement-level triggers on the parent -create trigger trig_ins_before before insert on parted2_stmt_trig +create trigger trig_ins_before_3 before insert on parted2_stmt_trig for each statement execute procedure trigger_notice(); -create trigger trig_ins_after after insert on parted2_stmt_trig +create trigger trig_ins_after_3 after insert on parted2_stmt_trig for each statement execute procedure trigger_notice(); -create trigger trig_upd_before before update on parted2_stmt_trig +create trigger trig_upd_before_3 before update on parted2_stmt_trig for each statement execute procedure trigger_notice(); -create trigger trig_upd_after after update on parted2_stmt_trig +create trigger trig_upd_after_3 after update on parted2_stmt_trig for each statement execute procedure trigger_notice(); -create trigger trig_del_before before delete on parted2_stmt_trig +create trigger trig_del_before_3 before delete on parted2_stmt_trig for each statement execute procedure trigger_notice(); -create trigger trig_del_after after delete on parted2_stmt_trig +create trigger trig_del_after_3 after delete on parted2_stmt_trig for each statement execute procedure trigger_notice(); with ins (a) as ( @@ -1366,8 +1426,183 @@ copy parted_stmt_trig1(a) from stdin; 1 \. +-- Disabling a trigger in the parent table should disable children triggers too +alter table parted_stmt_trig disable trigger trig_ins_after_parent; +insert into parted_stmt_trig values (1); +alter table parted_stmt_trig enable trigger trig_ins_after_parent; +insert into parted_stmt_trig values (1); + drop table parted_stmt_trig, parted2_stmt_trig; +-- Verify that triggers fire in alphabetical order +create table parted_trig (a int) partition by range (a); +create table parted_trig_1 partition of parted_trig for values from (0) to (1000) + partition by range (a); +create table parted_trig_1_1 partition of parted_trig_1 for values from (0) to (100); +create table parted_trig_2 partition of parted_trig for values from (1000) to (2000); +create trigger zzz after insert on parted_trig for each row execute procedure trigger_notice(); +create trigger mmm after insert on parted_trig_1_1 for each row execute procedure trigger_notice(); +create trigger aaa after insert on parted_trig_1 for each row execute procedure trigger_notice(); +create trigger bbb after insert on parted_trig for each row execute procedure trigger_notice(); +create trigger qqq after insert on parted_trig_1_1 for each row execute procedure trigger_notice(); +insert into parted_trig values (50), (1500); +drop table parted_trig; + +-- test irregular partitions (i.e., different column definitions), +-- including that the WHEN clause works +create function bark(text) returns bool language plpgsql immutable + as $$ begin raise notice '% <- woof!', $1; return true; end; $$; +create or replace function trigger_notice_ab() returns trigger as $$ + begin + raise notice 'trigger % on % % % for %: (a,b)=(%,%)', + TG_NAME, TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL, + NEW.a, NEW.b; + if TG_LEVEL = 'ROW' then + return NEW; + end if; + return null; + end; + $$ language plpgsql; +create table parted_irreg_ancestor (fd text, b text, fd2 int, fd3 int, a int) + partition by range (b); +alter table parted_irreg_ancestor drop column fd, + drop column fd2, drop column fd3; +create table parted_irreg (fd int, a int, fd2 int, b text) + partition by range (b); +alter table parted_irreg drop column fd, drop column fd2; +alter table parted_irreg_ancestor attach partition parted_irreg + for values from ('aaaa') to ('zzzz'); +create table parted1_irreg (b text, fd int, a int); +alter table parted1_irreg drop column fd; +alter table parted_irreg attach partition parted1_irreg + for values from ('aaaa') to ('bbbb'); +create trigger parted_trig after insert on parted_irreg + for each row execute procedure trigger_notice_ab(); +create trigger parted_trig_odd after insert on parted_irreg for each row + when (bark(new.b) AND new.a % 2 = 1) execute procedure trigger_notice_ab(); +-- we should hear barking for every insert, but parted_trig_odd only emits +-- noise for odd values of a. parted_trig does it for all inserts. +insert into parted_irreg values (1, 'aardvark'), (2, 'aanimals'); +insert into parted1_irreg values ('aardwolf', 2); +insert into parted_irreg_ancestor values ('aasvogel', 3); +drop table parted_irreg_ancestor; + +-- +-- Constraint triggers and partitioned tables +create table parted_constr_ancestor (a int, b text) + partition by range (b); +create table parted_constr (a int, b text) + partition by range (b); +alter table parted_constr_ancestor attach partition parted_constr + for values from ('aaaa') to ('zzzz'); +create table parted1_constr (a int, b text); +alter table parted_constr attach partition parted1_constr + for values from ('aaaa') to ('bbbb'); +create constraint trigger parted_trig after insert on parted_constr_ancestor + deferrable + for each row execute procedure trigger_notice_ab(); +create constraint trigger parted_trig_two after insert on parted_constr + deferrable initially deferred + for each row when (bark(new.b) AND new.a % 2 = 1) + execute procedure trigger_notice_ab(); + +-- The immediate constraint is fired immediately; the WHEN clause of the +-- deferred constraint is also called immediately. The deferred constraint +-- is fired at commit time. +begin; +insert into parted_constr values (1, 'aardvark'); +insert into parted1_constr values (2, 'aardwolf'); +insert into parted_constr_ancestor values (3, 'aasvogel'); +commit; + +-- The WHEN clause is immediate, and both constraint triggers are fired at +-- commit time. +begin; +set constraints parted_trig deferred; +insert into parted_constr values (1, 'aardvark'); +insert into parted1_constr values (2, 'aardwolf'), (3, 'aasvogel'); +commit; +drop table parted_constr_ancestor; +drop function bark(text); + +-- Test that the WHEN clause is set properly to partitions +create table parted_trigger (a int, b text) partition by range (a); +create table parted_trigger_1 partition of parted_trigger for values from (0) to (1000); +create table parted_trigger_2 (drp int, a int, b text); +alter table parted_trigger_2 drop column drp; +alter table parted_trigger attach partition parted_trigger_2 for values from (1000) to (2000); +create trigger parted_trigger after update on parted_trigger + for each row when (new.a % 2 = 1 and length(old.b) >= 2) execute procedure trigger_notice_ab(); +create table parted_trigger_3 (b text, a int) partition by range (length(b)); +create table parted_trigger_3_1 partition of parted_trigger_3 for values from (1) to (3); +create table parted_trigger_3_2 partition of parted_trigger_3 for values from (3) to (5); +alter table parted_trigger attach partition parted_trigger_3 for values from (2000) to (3000); +insert into parted_trigger values + (0, 'a'), (1, 'bbb'), (2, 'bcd'), (3, 'c'), + (1000, 'c'), (1001, 'ddd'), (1002, 'efg'), (1003, 'f'), + (2000, 'e'), (2001, 'fff'), (2002, 'ghi'), (2003, 'h'); +update parted_trigger set a = a + 2; -- notice for odd 'a' values, long 'b' values +drop table parted_trigger; + +-- try a constraint trigger, also +create table parted_referenced (a int); +create table unparted_trigger (a int, b text); -- for comparison purposes +create table parted_trigger (a int, b text) partition by range (a); +create table parted_trigger_1 partition of parted_trigger for values from (0) to (1000); +create table parted_trigger_2 (drp int, a int, b text); +alter table parted_trigger_2 drop column drp; +alter table parted_trigger attach partition parted_trigger_2 for values from (1000) to (2000); +create constraint trigger parted_trigger after update on parted_trigger + from parted_referenced + for each row execute procedure trigger_notice_ab(); +create constraint trigger parted_trigger after update on unparted_trigger + from parted_referenced + for each row execute procedure trigger_notice_ab(); +create table parted_trigger_3 (b text, a int) partition by range (length(b)); +create table parted_trigger_3_1 partition of parted_trigger_3 for values from (1) to (3); +create table parted_trigger_3_2 partition of parted_trigger_3 for values from (3) to (5); +alter table parted_trigger attach partition parted_trigger_3 for values from (2000) to (3000); +select tgname, conname, t.tgrelid::regclass, t.tgconstrrelid::regclass, + c.conrelid::regclass, c.confrelid::regclass + from pg_trigger t join pg_constraint c on (t.tgconstraint = c.oid) + where tgname = 'parted_trigger' + order by t.tgrelid::regclass::text; +drop table parted_referenced, parted_trigger, unparted_trigger; + +-- verify that the "AFTER UPDATE OF columns" event is propagated correctly +create table parted_trigger (a int, b text) partition by range (a); +create table parted_trigger_1 partition of parted_trigger for values from (0) to (1000); +create table parted_trigger_2 (drp int, a int, b text); +alter table parted_trigger_2 drop column drp; +alter table parted_trigger attach partition parted_trigger_2 for values from (1000) to (2000); +create trigger parted_trigger after update of b on parted_trigger + for each row execute procedure trigger_notice_ab(); +create table parted_trigger_3 (b text, a int) partition by range (length(b)); +create table parted_trigger_3_1 partition of parted_trigger_3 for values from (1) to (4); +create table parted_trigger_3_2 partition of parted_trigger_3 for values from (4) to (8); +alter table parted_trigger attach partition parted_trigger_3 for values from (2000) to (3000); +insert into parted_trigger values (0, 'a'), (1000, 'c'), (2000, 'e'), (2001, 'eeee'); +update parted_trigger set a = a + 2; -- no notices here +update parted_trigger set b = b || 'b'; -- all triggers should fire +drop table parted_trigger; + +drop function trigger_notice_ab(); + +-- Make sure we don't end up with unnecessary copies of triggers, when +-- cloning them. +create table trg_clone (a int) partition by range (a); +create table trg_clone1 partition of trg_clone for values from (0) to (1000); +alter table trg_clone add constraint uniq unique (a) deferrable; +create table trg_clone2 partition of trg_clone for values from (1000) to (2000); +create table trg_clone3 partition of trg_clone for values from (2000) to (3000) + partition by range (a); +create table trg_clone_3_3 partition of trg_clone3 for values from (2000) to (2100); +select tgrelid::regclass, count(*) from pg_trigger + where tgrelid::regclass in ('trg_clone', 'trg_clone1', 'trg_clone2', + 'trg_clone3', 'trg_clone_3_3') + group by tgrelid::regclass order by tgrelid::regclass; +drop table trg_clone; + -- -- Test the interaction between transition tables and both kinds of -- inheritance. We'll dump the contents of the transition tables in a @@ -1409,7 +1644,7 @@ $$; -- -- Verify behavior of statement triggers on partition hierarchy with -- transition tables. Tuples should appear to each trigger in the --- format of the the relation the trigger is attached to. +-- format of the relation the trigger is attached to. -- -- set up a partition hierarchy with some different TupleDescriptors @@ -1467,6 +1702,13 @@ create trigger child3_delete_trig after delete on child3 referencing old table as old_table for each statement execute procedure dump_delete(); +SELECT trigger_name, event_manipulation, event_object_schema, event_object_table, + action_order, action_condition, action_orientation, action_timing, + action_reference_old_table, action_reference_new_table + FROM information_schema.triggers + WHERE event_object_table IN ('parent', 'child1', 'child2', 'child3') + ORDER BY trigger_name COLLATE "C", 2; + -- insert directly into children sees respective child-format tuples insert into child1 values ('AAA', 42); insert into child2 values ('BBB', 42); @@ -1729,6 +1971,12 @@ create trigger table2_trig with wcte as (insert into table1 values (42)) insert into table2 values ('hello world'); +with wcte as (insert into table1 values (43)) + insert into table1 values (44); + +select * from table1; +select * from table2; + drop table table1; drop table table2; @@ -1760,6 +2008,39 @@ insert into my_table values (3, 'CCC'), (4, 'DDD') on conflict (a) do update set b = my_table.b || ':' || excluded.b; +-- +-- now using a partitioned table +-- + +create table iocdu_tt_parted (a int primary key, b text) partition by list (a); +create table iocdu_tt_parted1 partition of iocdu_tt_parted for values in (1); +create table iocdu_tt_parted2 partition of iocdu_tt_parted for values in (2); +create table iocdu_tt_parted3 partition of iocdu_tt_parted for values in (3); +create table iocdu_tt_parted4 partition of iocdu_tt_parted for values in (4); +create trigger iocdu_tt_parted_insert_trig + after insert on iocdu_tt_parted referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger iocdu_tt_parted_update_trig + after update on iocdu_tt_parted referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); + +-- inserts only +insert into iocdu_tt_parted values (1, 'AAA'), (2, 'BBB') + on conflict (a) do + update set b = iocdu_tt_parted.b || ':' || excluded.b; + +-- mixture of inserts and updates +insert into iocdu_tt_parted values (1, 'AAA'), (2, 'BBB'), (3, 'CCC'), (4, 'DDD') + on conflict (a) do + update set b = iocdu_tt_parted.b || ':' || excluded.b; + +-- updates only +insert into iocdu_tt_parted values (3, 'CCC'), (4, 'DDD') + on conflict (a) do + update set b = iocdu_tt_parted.b || ':' || excluded.b; + +drop table iocdu_tt_parted; + -- -- Verify that you can't create a trigger with transition tables for -- more than one event. @@ -1769,8 +2050,91 @@ create trigger my_table_multievent_trig after insert or update on my_table referencing new table as new_table for each statement execute procedure dump_insert(); +-- +-- Verify that you can't create a trigger with transition tables with +-- a column list. +-- + +create trigger my_table_col_update_trig + after update of b on my_table referencing new table as new_table + for each statement execute procedure dump_insert(); + drop table my_table; +-- +-- Test firing of triggers with transition tables by foreign key cascades +-- + +create table refd_table (a int primary key, b text); +create table trig_table (a int, b text, + foreign key (a) references refd_table on update cascade on delete cascade +); + +create trigger trig_table_before_trig + before insert or update or delete on trig_table + for each statement execute procedure trigger_func('trig_table'); +create trigger trig_table_insert_trig + after insert on trig_table referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger trig_table_update_trig + after update on trig_table referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +create trigger trig_table_delete_trig + after delete on trig_table referencing old table as old_table + for each statement execute procedure dump_delete(); + +insert into refd_table values + (1, 'one'), + (2, 'two'), + (3, 'three'); +insert into trig_table values + (1, 'one a'), + (1, 'one b'), + (2, 'two a'), + (2, 'two b'), + (3, 'three a'), + (3, 'three b'); + +update refd_table set a = 11 where b = 'one'; + +select * from trig_table; + +delete from refd_table where length(b) = 3; + +select * from trig_table; + +drop table refd_table, trig_table; + +-- +-- self-referential FKs are even more fun +-- + +create table self_ref (a int primary key, + b int references self_ref(a) on delete cascade); + +create trigger self_ref_before_trig + before delete on self_ref + for each statement execute procedure trigger_func('self_ref'); +create trigger self_ref_r_trig + after delete on self_ref referencing old table as old_table + for each row execute procedure dump_delete(); +create trigger self_ref_s_trig + after delete on self_ref referencing old table as old_table + for each statement execute procedure dump_delete(); + +insert into self_ref values (1, null), (2, 1), (3, 2); + +delete from self_ref where a = 1; + +-- without AR trigger, cascaded deletes all end up in one transition table +drop trigger self_ref_r_trig on self_ref; + +insert into self_ref values (1, null), (2, 1), (3, 2), (4, 3); + +delete from self_ref where a = 1; + +drop table self_ref; + -- cleanup drop function dump_insert(); drop function dump_update(); diff --git a/src/test/regress/sql/truncate.sql b/src/test/regress/sql/truncate.sql index fbd1d1a8a5..6ddfb6dd1d 100644 --- a/src/test/regress/sql/truncate.sql +++ b/src/test/regress/sql/truncate.sql @@ -244,3 +244,50 @@ INSERT INTO truncparted VALUES (1, 'a'); TRUNCATE ONLY truncparted; TRUNCATE truncparted; DROP TABLE truncparted; + +-- foreign key on partitioned table: partition key is referencing column. +-- Make sure truncate did execute on all tables +CREATE FUNCTION tp_ins_data() RETURNS void LANGUAGE plpgsql AS $$ + BEGIN + INSERT INTO truncprim VALUES (1), (100), (150); + INSERT INTO truncpart VALUES (1), (100), (150); + END +$$; +CREATE FUNCTION tp_chk_data(OUT pktb regclass, OUT pkval int, OUT fktb regclass, OUT fkval int) + RETURNS SETOF record LANGUAGE plpgsql AS $$ + BEGIN + RETURN QUERY SELECT + pk.tableoid::regclass, pk.a, fk.tableoid::regclass, fk.a + FROM truncprim pk FULL JOIN truncpart fk USING (a) + ORDER BY 2, 4; + END +$$; +CREATE TABLE truncprim (a int PRIMARY KEY); +CREATE TABLE truncpart (a int REFERENCES truncprim) + PARTITION BY RANGE (a); +CREATE TABLE truncpart_1 PARTITION OF truncpart FOR VALUES FROM (0) TO (100); +CREATE TABLE truncpart_2 PARTITION OF truncpart FOR VALUES FROM (100) TO (200) + PARTITION BY RANGE (a); +CREATE TABLE truncpart_2_1 PARTITION OF truncpart_2 FOR VALUES FROM (100) TO (150); +CREATE TABLE truncpart_2_d PARTITION OF truncpart_2 DEFAULT; + +TRUNCATE TABLE truncprim; -- should fail + +select tp_ins_data(); +-- should truncate everything +TRUNCATE TABLE truncprim, truncpart; +select * from tp_chk_data(); + +select tp_ins_data(); +-- should truncate everything +SET client_min_messages TO WARNING; -- suppress cascading notices +TRUNCATE TABLE truncprim CASCADE; +RESET client_min_messages; +SELECT * FROM tp_chk_data(); + +SELECT tp_ins_data(); +-- should truncate all partitions +TRUNCATE TABLE truncpart; +SELECT * FROM tp_chk_data(); +DROP TABLE truncprim, truncpart; +DROP FUNCTION tp_ins_data(), tp_chk_data(); diff --git a/src/test/regress/sql/tsdicts.sql b/src/test/regress/sql/tsdicts.sql index a5a569e1ad..60906f6549 100644 --- a/src/test/regress/sql/tsdicts.sql +++ b/src/test/regress/sql/tsdicts.sql @@ -66,11 +66,14 @@ SELECT ts_lexize('hunspell_long', 'rebook'); SELECT ts_lexize('hunspell_long', 'unbookings'); SELECT ts_lexize('hunspell_long', 'unbooking'); SELECT ts_lexize('hunspell_long', 'unbook'); +SELECT ts_lexize('hunspell_long', 'booked'); SELECT ts_lexize('hunspell_long', 'footklubber'); SELECT ts_lexize('hunspell_long', 'footballklubber'); SELECT ts_lexize('hunspell_long', 'ballyklubber'); +SELECT ts_lexize('hunspell_long', 'ballsklubber'); SELECT ts_lexize('hunspell_long', 'footballyklubber'); +SELECT ts_lexize('hunspell_long', 'ex-machina'); -- Test ISpell dictionary with hunspell affix file with FLAG num parameter CREATE TEXT SEARCH DICTIONARY hunspell_num ( @@ -80,6 +83,7 @@ CREATE TEXT SEARCH DICTIONARY hunspell_num ( ); SELECT ts_lexize('hunspell_num', 'skies'); +SELECT ts_lexize('hunspell_num', 'sk'); SELECT ts_lexize('hunspell_num', 'bookings'); SELECT ts_lexize('hunspell_num', 'booking'); SELECT ts_lexize('hunspell_num', 'foot'); @@ -90,6 +94,7 @@ SELECT ts_lexize('hunspell_num', 'rebook'); SELECT ts_lexize('hunspell_num', 'unbookings'); SELECT ts_lexize('hunspell_num', 'unbooking'); SELECT ts_lexize('hunspell_num', 'unbook'); +SELECT ts_lexize('hunspell_num', 'booked'); SELECT ts_lexize('hunspell_num', 'footklubber'); SELECT ts_lexize('hunspell_num', 'footballklubber'); @@ -188,3 +193,11 @@ ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR SELECT to_tsvector('thesaurus_tst', 'one postgres one two one two three one'); SELECT to_tsvector('thesaurus_tst', 'Supernovae star is very new star and usually called supernovae (abbreviation SN)'); SELECT to_tsvector('thesaurus_tst', 'Booking tickets is looking like a booking a tickets'); + +-- invalid: non-lowercase quoted identifiers +CREATE TEXT SEARCH DICTIONARY tsdict_case +( + Template = ispell, + "DictFile" = ispell_sample, + "AffFile" = ispell_sample +); diff --git a/src/test/regress/sql/tsearch.sql b/src/test/regress/sql/tsearch.sql index e4b21f8f18..637bfb3012 100644 --- a/src/test/regress/sql/tsearch.sql +++ b/src/test/regress/sql/tsearch.sql @@ -145,6 +145,10 @@ SELECT * from ts_debug('english', 'http://www.harewoodsolutions.co.uk/press.aspx SELECT * from ts_debug('english', 'http://aew.wer0c.ewr/id?ad=qwe&dw'); SELECT * from ts_debug('english', 'http://5aew.werc.ewr:8100/?'); SELECT * from ts_debug('english', '5aew.werc.ewr:8100/?xx'); +SELECT token, alias, + dictionaries, dictionaries is null as dnull, array_dims(dictionaries) as ddims, + lexemes, lexemes is null as lnull, array_dims(lexemes) as ldims +from ts_debug('english', 'a title'); -- to_tsquery @@ -535,3 +539,96 @@ create index phrase_index_test_idx on phrase_index_test using gin(fts); set enable_seqscan = off; select * from phrase_index_test where fts @@ phraseto_tsquery('english', 'fat cat'); set enable_seqscan = on; + +-- test websearch_to_tsquery function +select websearch_to_tsquery('simple', 'I have a fat:*ABCD cat'); +select websearch_to_tsquery('simple', 'orange:**AABBCCDD'); +select websearch_to_tsquery('simple', 'fat:A!cat:B|rat:C<'); +select websearch_to_tsquery('simple', 'fat:A : cat:B'); + +select websearch_to_tsquery('simple', 'fat*rat'); +select websearch_to_tsquery('simple', 'fat-rat'); +select websearch_to_tsquery('simple', 'fat_rat'); + +-- weights are completely ignored +select websearch_to_tsquery('simple', 'abc : def'); +select websearch_to_tsquery('simple', 'abc:def'); +select websearch_to_tsquery('simple', 'a:::b'); +select websearch_to_tsquery('simple', 'abc:d'); +select websearch_to_tsquery('simple', ':'); + +-- these operators are ignored +select websearch_to_tsquery('simple', 'abc & def'); +select websearch_to_tsquery('simple', 'abc | def'); +select websearch_to_tsquery('simple', 'abc <-> def'); +select websearch_to_tsquery('simple', 'abc (pg or class)'); + +-- NOT is ignored in quotes +select websearch_to_tsquery('english', 'My brand new smartphone'); +select websearch_to_tsquery('english', 'My brand "new smartphone"'); +select websearch_to_tsquery('english', 'My brand "new -smartphone"'); + +-- test OR operator +select websearch_to_tsquery('simple', 'cat or rat'); +select websearch_to_tsquery('simple', 'cat OR rat'); +select websearch_to_tsquery('simple', 'cat "OR" rat'); +select websearch_to_tsquery('simple', 'cat OR'); +select websearch_to_tsquery('simple', 'OR rat'); +select websearch_to_tsquery('simple', '"fat cat OR rat"'); +select websearch_to_tsquery('simple', 'fat (cat OR rat'); +select websearch_to_tsquery('simple', 'or OR or'); + +-- OR is an operator here ... +select websearch_to_tsquery('simple', '"fat cat"or"fat rat"'); +select websearch_to_tsquery('simple', 'fat or(rat'); +select websearch_to_tsquery('simple', 'fat or)rat'); +select websearch_to_tsquery('simple', 'fat or&rat'); +select websearch_to_tsquery('simple', 'fat or|rat'); +select websearch_to_tsquery('simple', 'fat or!rat'); +select websearch_to_tsquery('simple', 'fat orrat'); +select websearch_to_tsquery('simple', 'fat or '); + +-- ... but not here +select websearch_to_tsquery('simple', 'abc orange'); +select websearch_to_tsquery('simple', 'abc OR1234'); +select websearch_to_tsquery('simple', 'abc or-abc'); +select websearch_to_tsquery('simple', 'abc OR_abc'); + +-- test quotes +select websearch_to_tsquery('english', '"pg_class pg'); +select websearch_to_tsquery('english', 'pg_class pg"'); +select websearch_to_tsquery('english', '"pg_class pg"'); +select websearch_to_tsquery('english', 'abc "pg_class pg"'); +select websearch_to_tsquery('english', '"pg_class pg" def'); +select websearch_to_tsquery('english', 'abc "pg pg_class pg" def'); +select websearch_to_tsquery('english', ' or "pg pg_class pg" or '); +select websearch_to_tsquery('english', '""pg pg_class pg""'); +select websearch_to_tsquery('english', 'abc """"" def'); +select websearch_to_tsquery('english', 'cat -"fat rat"'); +select websearch_to_tsquery('english', 'cat -"fat rat" cheese'); +select websearch_to_tsquery('english', 'abc "def -"'); +select websearch_to_tsquery('english', 'abc "def :"'); + +select websearch_to_tsquery('english', '"A fat cat" has just eaten a -rat.'); +select websearch_to_tsquery('english', '"A fat cat" has just eaten OR !rat.'); +select websearch_to_tsquery('english', '"A fat cat" has just (+eaten OR -rat)'); + +select websearch_to_tsquery('english', 'this is ----fine'); +select websearch_to_tsquery('english', '(()) )))) this ||| is && -fine, "dear friend" OR good'); +select websearch_to_tsquery('english', 'an old <-> cat " is fine &&& too'); + +select websearch_to_tsquery('english', '"A the" OR just on'); +select websearch_to_tsquery('english', '"a fat cat" ate a rat'); + +select to_tsvector('english', 'A fat cat ate a rat') @@ + websearch_to_tsquery('english', '"a fat cat" ate a rat'); + +select to_tsvector('english', 'A fat grey cat ate a rat') @@ + websearch_to_tsquery('english', '"a fat cat" ate a rat'); + +-- cases handled by gettoken_tsvector() +select websearch_to_tsquery(''''); +select websearch_to_tsquery('''abc''''def'''); +select websearch_to_tsquery('\abc'); +select websearch_to_tsquery('\'); diff --git a/src/test/regress/sql/tsrf.sql b/src/test/regress/sql/tsrf.sql index ae1900bce1..0a1e8e5666 100644 --- a/src/test/regress/sql/tsrf.sql +++ b/src/test/regress/sql/tsrf.sql @@ -88,6 +88,11 @@ SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(d SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY g; reset enable_hashagg; +-- case with degenerate ORDER BY +explain (verbose, costs off) +select 'foo' as f, generate_series(1,2) as g from few order by 1; +select 'foo' as f, generate_series(1,2) as g from few order by 1; + -- data modification CREATE TABLE fewmore AS SELECT generate_series(1,3) AS data; INSERT INTO fewmore VALUES(generate_series(4,5)); diff --git a/src/test/regress/sql/type_sanity.sql b/src/test/regress/sql/type_sanity.sql index 4c65814008..f9aeea3214 100644 --- a/src/test/regress/sql/type_sanity.sql +++ b/src/test/regress/sql/type_sanity.sql @@ -104,6 +104,30 @@ WHERE p1.typinput = p2.oid AND NOT p2.proargtypes[1] = 'oid'::regtype AND p2.proargtypes[2] = 'int4'::regtype)); +-- Check for type of the variadic array parameter's elements. +-- provariadic should be ANYOID if the type of the last element is ANYOID, +-- ANYELEMENTOID if the type of the last element is ANYARRAYOID, and otherwise +-- the element type corresponding to the array type. + +SELECT oid::regprocedure, provariadic::regtype, proargtypes::regtype[] +FROM pg_proc +WHERE provariadic != 0 +AND case proargtypes[array_length(proargtypes, 1)-1] + WHEN 2276 THEN 2276 -- any -> any + WHEN 2277 THEN 2283 -- anyarray -> anyelement + ELSE (SELECT t.oid + FROM pg_type t + WHERE t.typarray = proargtypes[array_length(proargtypes, 1)-1]) + END != provariadic; + +-- Check that all and only those functions with a variadic type have +-- a variadic argument. +SELECT oid::regprocedure, proargmodes, provariadic +FROM pg_proc +WHERE (proargmodes IS NOT NULL AND 'v' = any(proargmodes)) + IS DISTINCT FROM + (provariadic != 0); + -- As of 8.0, this check finds refcursor, which is borrowing -- other types' I/O routines SELECT p1.oid, p1.typname, p2.oid, p2.proname diff --git a/src/test/regress/sql/union.sql b/src/test/regress/sql/union.sql index c0317cccb4..eed7c8d34b 100644 --- a/src/test/regress/sql/union.sql +++ b/src/test/regress/sql/union.sql @@ -190,6 +190,49 @@ SELECT q1 FROM int8_tbl EXCEPT (((SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1))) (((((select * from int8_tbl))))); +-- +-- Check behavior with empty select list (allowed since 9.4) +-- + +select union select; +select intersect select; +select except select; + +-- check hashed implementation +set enable_hashagg = true; +set enable_sort = false; + +explain (costs off) +select from generate_series(1,5) union select from generate_series(1,3); +explain (costs off) +select from generate_series(1,5) intersect select from generate_series(1,3); + +select from generate_series(1,5) union select from generate_series(1,3); +select from generate_series(1,5) union all select from generate_series(1,3); +select from generate_series(1,5) intersect select from generate_series(1,3); +select from generate_series(1,5) intersect all select from generate_series(1,3); +select from generate_series(1,5) except select from generate_series(1,3); +select from generate_series(1,5) except all select from generate_series(1,3); + +-- check sorted implementation +set enable_hashagg = false; +set enable_sort = true; + +explain (costs off) +select from generate_series(1,5) union select from generate_series(1,3); +explain (costs off) +select from generate_series(1,5) intersect select from generate_series(1,3); + +select from generate_series(1,5) union select from generate_series(1,3); +select from generate_series(1,5) union all select from generate_series(1,3); +select from generate_series(1,5) intersect select from generate_series(1,3); +select from generate_series(1,5) intersect all select from generate_series(1,3); +select from generate_series(1,5) except select from generate_series(1,3); +select from generate_series(1,5) except all select from generate_series(1,3); + +reset enable_hashagg; +reset enable_sort; + -- -- Check handling of a case with unknown constants. We don't guarantee -- an undecorated constant will work in all cases, but historically this diff --git a/src/test/regress/sql/updatable_views.sql b/src/test/regress/sql/updatable_views.sql index a6ba5aad9e..dc6d5cbe35 100644 --- a/src/test/regress/sql/updatable_views.sql +++ b/src/test/regress/sql/updatable_views.sql @@ -26,8 +26,8 @@ CREATE VIEW rw_view15 AS SELECT a, upper(b) FROM base_tbl; -- Expression/functio CREATE VIEW rw_view16 AS SELECT a, b, a AS aa FROM base_tbl; -- Repeated column may be part of an updatable view CREATE VIEW ro_view17 AS SELECT * FROM ro_view1; -- Base relation not updatable CREATE VIEW ro_view18 AS SELECT * FROM (VALUES(1)) AS tmp(a); -- VALUES in rangetable -CREATE SEQUENCE seq; -CREATE VIEW ro_view19 AS SELECT * FROM seq; -- View based on a sequence +CREATE SEQUENCE uv_seq; +CREATE VIEW ro_view19 AS SELECT * FROM uv_seq; -- View based on a sequence CREATE VIEW ro_view20 AS SELECT a, b, generate_series(1, a) g FROM base_tbl; -- SRF in targetlist not supported SELECT table_name, is_insertable_into @@ -100,7 +100,7 @@ UPDATE ro_view20 SET b=upper(b); DROP TABLE base_tbl CASCADE; DROP VIEW ro_view10, ro_view12, ro_view18; -DROP SEQUENCE seq CASCADE; +DROP SEQUENCE uv_seq CASCADE; -- simple updatable view @@ -459,6 +459,82 @@ RESET SESSION AUTHORIZATION; DROP TABLE base_tbl CASCADE; +-- nested-view permissions + +CREATE TABLE base_tbl(a int, b text, c float); +INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0); + +SET SESSION AUTHORIZATION regress_view_user1; +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl; +SELECT * FROM rw_view1; -- not allowed +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- not allowed + +SET SESSION AUTHORIZATION regress_view_user2; +CREATE VIEW rw_view2 AS SELECT * FROM rw_view1; +SELECT * FROM rw_view2; -- not allowed +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed + +RESET SESSION AUTHORIZATION; +GRANT SELECT ON base_tbl TO regress_view_user1; + +SET SESSION AUTHORIZATION regress_view_user1; +SELECT * FROM rw_view1; +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- not allowed + +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; -- not allowed +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed + +SET SESSION AUTHORIZATION regress_view_user1; +GRANT SELECT ON rw_view1 TO regress_view_user2; + +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed + +RESET SESSION AUTHORIZATION; +GRANT UPDATE ON base_tbl TO regress_view_user1; + +SET SESSION AUTHORIZATION regress_view_user1; +SELECT * FROM rw_view1; +SELECT * FROM rw_view1 FOR UPDATE; +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; + +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed + +SET SESSION AUTHORIZATION regress_view_user1; +GRANT UPDATE ON rw_view1 TO regress_view_user2; + +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; +SELECT * FROM rw_view2 FOR UPDATE; +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; + +RESET SESSION AUTHORIZATION; +REVOKE UPDATE ON base_tbl FROM regress_view_user1; + +SET SESSION AUTHORIZATION regress_view_user1; +SELECT * FROM rw_view1; +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- not allowed + +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed + +RESET SESSION AUTHORIZATION; + +DROP TABLE base_tbl CASCADE; + DROP USER regress_view_user1; DROP USER regress_view_user2; @@ -1114,33 +1190,33 @@ DROP VIEW v1; DROP TABLE t1; -- check that an auto-updatable view on a partitioned table works correctly -create table pt (a int, b int, v varchar) partition by range (a, b); -create table pt1 (b int not null, v varchar, a int not null) partition by range (b); -create table pt11 (like pt1); -alter table pt11 drop a; -alter table pt11 add a int; -alter table pt11 drop a; -alter table pt11 add a int not null; -alter table pt1 attach partition pt11 for values from (2) to (5); -alter table pt attach partition pt1 for values from (1, 2) to (1, 10); - -create view ptv as select * from pt; +create table uv_pt (a int, b int, v varchar) partition by range (a, b); +create table uv_pt1 (b int not null, v varchar, a int not null) partition by range (b); +create table uv_pt11 (like uv_pt1); +alter table uv_pt11 drop a; +alter table uv_pt11 add a int; +alter table uv_pt11 drop a; +alter table uv_pt11 add a int not null; +alter table uv_pt1 attach partition uv_pt11 for values from (2) to (5); +alter table uv_pt attach partition uv_pt1 for values from (1, 2) to (1, 10); + +create view uv_ptv as select * from uv_pt; select events & 4 != 0 AS upd, events & 8 != 0 AS ins, events & 16 != 0 AS del - from pg_catalog.pg_relation_is_updatable('pt'::regclass, false) t(events); -select pg_catalog.pg_column_is_updatable('pt'::regclass, 1::smallint, false); -select pg_catalog.pg_column_is_updatable('pt'::regclass, 2::smallint, false); + from pg_catalog.pg_relation_is_updatable('uv_pt'::regclass, false) t(events); +select pg_catalog.pg_column_is_updatable('uv_pt'::regclass, 1::smallint, false); +select pg_catalog.pg_column_is_updatable('uv_pt'::regclass, 2::smallint, false); select table_name, is_updatable, is_insertable_into - from information_schema.views where table_name = 'ptv'; + from information_schema.views where table_name = 'uv_ptv'; select table_name, column_name, is_updatable - from information_schema.columns where table_name = 'ptv' order by column_name; -insert into ptv values (1, 2); -select tableoid::regclass, * from pt; -create view ptv_wco as select * from pt where a = 0 with check option; -insert into ptv_wco values (1, 2); -drop view ptv, ptv_wco; -drop table pt, pt1, pt11; + from information_schema.columns where table_name = 'uv_ptv' order by column_name; +insert into uv_ptv values (1, 2); +select tableoid::regclass, * from uv_pt; +create view uv_ptv_wco as select * from uv_pt where a = 0 with check option; +insert into uv_ptv_wco values (1, 2); +drop view uv_ptv, uv_ptv_wco; +drop table uv_pt, uv_pt1, uv_pt11; -- check that wholerow vars appearing in WITH CHECK OPTION constraint expressions -- work fine with partitioned tables @@ -1168,3 +1244,138 @@ insert into wcowrtest_v2 values (2, 'no such row in sometable'); drop view wcowrtest_v, wcowrtest_v2; drop table wcowrtest, sometable; + +-- Check INSERT .. ON CONFLICT DO UPDATE works correctly when the view's +-- columns are named and ordered differently than the underlying table's. +create table uv_iocu_tab (a text unique, b float); +insert into uv_iocu_tab values ('xyxyxy', 0); +create view uv_iocu_view as + select b, b+1 as c, a, '2.0'::text as two from uv_iocu_tab; + +insert into uv_iocu_view (a, b) values ('xyxyxy', 1) + on conflict (a) do update set b = uv_iocu_view.b; +select * from uv_iocu_tab; +insert into uv_iocu_view (a, b) values ('xyxyxy', 1) + on conflict (a) do update set b = excluded.b; +select * from uv_iocu_tab; + +-- OK to access view columns that are not present in underlying base +-- relation in the ON CONFLICT portion of the query +insert into uv_iocu_view (a, b) values ('xyxyxy', 3) + on conflict (a) do update set b = cast(excluded.two as float); +select * from uv_iocu_tab; + +explain (costs off) +insert into uv_iocu_view (a, b) values ('xyxyxy', 3) + on conflict (a) do update set b = excluded.b where excluded.c > 0; + +insert into uv_iocu_view (a, b) values ('xyxyxy', 3) + on conflict (a) do update set b = excluded.b where excluded.c > 0; +select * from uv_iocu_tab; + +drop view uv_iocu_view; +drop table uv_iocu_tab; + +-- Test whole-row references to the view +create table uv_iocu_tab (a int unique, b text); +create view uv_iocu_view as + select b as bb, a as aa, uv_iocu_tab::text as cc from uv_iocu_tab; + +insert into uv_iocu_view (aa,bb) values (1,'x'); +explain (costs off) +insert into uv_iocu_view (aa,bb) values (1,'y') + on conflict (aa) do update set bb = 'Rejected: '||excluded.* + where excluded.aa > 0 + and excluded.bb != '' + and excluded.cc is not null; +insert into uv_iocu_view (aa,bb) values (1,'y') + on conflict (aa) do update set bb = 'Rejected: '||excluded.* + where excluded.aa > 0 + and excluded.bb != '' + and excluded.cc is not null; +select * from uv_iocu_view; + +-- Test omitting a column of the base relation +delete from uv_iocu_view; +insert into uv_iocu_view (aa,bb) values (1,'x'); +insert into uv_iocu_view (aa) values (1) + on conflict (aa) do update set bb = 'Rejected: '||excluded.*; +select * from uv_iocu_view; + +alter table uv_iocu_tab alter column b set default 'table default'; +insert into uv_iocu_view (aa) values (1) + on conflict (aa) do update set bb = 'Rejected: '||excluded.*; +select * from uv_iocu_view; + +alter view uv_iocu_view alter column bb set default 'view default'; +insert into uv_iocu_view (aa) values (1) + on conflict (aa) do update set bb = 'Rejected: '||excluded.*; +select * from uv_iocu_view; + +-- Should fail to update non-updatable columns +insert into uv_iocu_view (aa) values (1) + on conflict (aa) do update set cc = 'XXX'; + +drop view uv_iocu_view; +drop table uv_iocu_tab; + +-- ON CONFLICT DO UPDATE permissions checks +create user regress_view_user1; +create user regress_view_user2; + +set session authorization regress_view_user1; +create table base_tbl(a int unique, b text, c float); +insert into base_tbl values (1,'xxx',1.0); +create view rw_view1 as select b as bb, c as cc, a as aa from base_tbl; + +grant select (aa,bb) on rw_view1 to regress_view_user2; +grant insert on rw_view1 to regress_view_user2; +grant update (bb) on rw_view1 to regress_view_user2; + +set session authorization regress_view_user2; +insert into rw_view1 values ('yyy',2.0,1) + on conflict (aa) do update set bb = excluded.cc; -- Not allowed +insert into rw_view1 values ('yyy',2.0,1) + on conflict (aa) do update set bb = rw_view1.cc; -- Not allowed +insert into rw_view1 values ('yyy',2.0,1) + on conflict (aa) do update set bb = excluded.bb; -- OK +insert into rw_view1 values ('zzz',2.0,1) + on conflict (aa) do update set bb = rw_view1.bb||'xxx'; -- OK +insert into rw_view1 values ('zzz',2.0,1) + on conflict (aa) do update set cc = 3.0; -- Not allowed +reset session authorization; +select * from base_tbl; + +set session authorization regress_view_user1; +grant select (a,b) on base_tbl to regress_view_user2; +grant insert (a,b) on base_tbl to regress_view_user2; +grant update (a,b) on base_tbl to regress_view_user2; + +set session authorization regress_view_user2; +create view rw_view2 as select b as bb, c as cc, a as aa from base_tbl; +insert into rw_view2 (aa,bb) values (1,'xxx') + on conflict (aa) do update set bb = excluded.bb; -- Not allowed +create view rw_view3 as select b as bb, a as aa from base_tbl; +insert into rw_view3 (aa,bb) values (1,'xxx') + on conflict (aa) do update set bb = excluded.bb; -- OK +reset session authorization; +select * from base_tbl; + +set session authorization regress_view_user2; +create view rw_view4 as select aa, bb, cc FROM rw_view1; +insert into rw_view4 (aa,bb) values (1,'yyy') + on conflict (aa) do update set bb = excluded.bb; -- Not allowed +create view rw_view5 as select aa, bb FROM rw_view1; +insert into rw_view5 (aa,bb) values (1,'yyy') + on conflict (aa) do update set bb = excluded.bb; -- OK +reset session authorization; +select * from base_tbl; + +drop view rw_view5; +drop view rw_view4; +drop view rw_view3; +drop view rw_view2; +drop view rw_view1; +drop table base_tbl; +drop user regress_view_user1; +drop user regress_view_user2; diff --git a/src/test/regress/sql/update.sql b/src/test/regress/sql/update.sql index 663711997b..c9bb3b53d3 100644 --- a/src/test/regress/sql/update.sql +++ b/src/test/regress/sql/update.sql @@ -107,23 +107,485 @@ INSERT INTO upsert_test VALUES (1, 'Bat') ON CONFLICT(a) DROP TABLE update_test; DROP TABLE upsert_test; --- update to a partition should check partition bound constraint for the new tuple -create table range_parted ( + +--------------------------- +-- UPDATE with row movement +--------------------------- + +-- When a partitioned table receives an UPDATE to the partitioned key and the +-- new values no longer meet the partition's bound, the row must be moved to +-- the correct partition for the new partition key (if one exists). We must +-- also ensure that updatable views on partitioned tables properly enforce any +-- WITH CHECK OPTION that is defined. The situation with triggers in this case +-- also requires thorough testing as partition key updates causing row +-- movement convert UPDATEs into DELETE+INSERT. + +CREATE TABLE range_parted ( a text, + b bigint, + c numeric, + d int, + e varchar +) PARTITION BY RANGE (a, b); + +-- Create partitions intentionally in descending bound order, so as to test +-- that update-row-movement works with the leaf partitions not in bound order. +CREATE TABLE part_b_20_b_30 (e varchar, c numeric, a text, b bigint, d int); +ALTER TABLE range_parted ATTACH PARTITION part_b_20_b_30 FOR VALUES FROM ('b', 20) TO ('b', 30); +CREATE TABLE part_b_10_b_20 (e varchar, c numeric, a text, b bigint, d int) PARTITION BY RANGE (c); +CREATE TABLE part_b_1_b_10 PARTITION OF range_parted FOR VALUES FROM ('b', 1) TO ('b', 10); +ALTER TABLE range_parted ATTACH PARTITION part_b_10_b_20 FOR VALUES FROM ('b', 10) TO ('b', 20); +CREATE TABLE part_a_10_a_20 PARTITION OF range_parted FOR VALUES FROM ('a', 10) TO ('a', 20); +CREATE TABLE part_a_1_a_10 PARTITION OF range_parted FOR VALUES FROM ('a', 1) TO ('a', 10); + +-- Check that partition-key UPDATE works sanely on a partitioned table that +-- does not have any child partitions. +UPDATE part_b_10_b_20 set b = b - 6; + +-- Create some more partitions following the above pattern of descending bound +-- order, but let's make the situation a bit more complex by having the +-- attribute numbers of the columns vary from their parent partition. +CREATE TABLE part_c_100_200 (e varchar, c numeric, a text, b bigint, d int) PARTITION BY range (abs(d)); +ALTER TABLE part_c_100_200 DROP COLUMN e, DROP COLUMN c, DROP COLUMN a; +ALTER TABLE part_c_100_200 ADD COLUMN c numeric, ADD COLUMN e varchar, ADD COLUMN a text; +ALTER TABLE part_c_100_200 DROP COLUMN b; +ALTER TABLE part_c_100_200 ADD COLUMN b bigint; +CREATE TABLE part_d_1_15 PARTITION OF part_c_100_200 FOR VALUES FROM (1) TO (15); +CREATE TABLE part_d_15_20 PARTITION OF part_c_100_200 FOR VALUES FROM (15) TO (20); + +ALTER TABLE part_b_10_b_20 ATTACH PARTITION part_c_100_200 FOR VALUES FROM (100) TO (200); + +CREATE TABLE part_c_1_100 (e varchar, d int, c numeric, b bigint, a text); +ALTER TABLE part_b_10_b_20 ATTACH PARTITION part_c_1_100 FOR VALUES FROM (1) TO (100); + +\set init_range_parted 'truncate range_parted; insert into range_parted VALUES (''a'', 1, 1, 1), (''a'', 10, 200, 1), (''b'', 12, 96, 1), (''b'', 13, 97, 2), (''b'', 15, 105, 16), (''b'', 17, 105, 19)' +\set show_data 'select tableoid::regclass::text COLLATE "C" partname, * from range_parted ORDER BY 1, 2, 3, 4, 5, 6' +:init_range_parted; +:show_data; + +-- The order of subplans should be in bound order +EXPLAIN (costs off) UPDATE range_parted set c = c - 50 WHERE c > 97; + +-- fail, row movement happens only within the partition subtree. +UPDATE part_c_100_200 set c = c - 20, d = c WHERE c = 105; +-- fail, no partition key update, so no attempt to move tuple, +-- but "a = 'a'" violates partition constraint enforced by root partition) +UPDATE part_b_10_b_20 set a = 'a'; +-- ok, partition key update, no constraint violation +UPDATE range_parted set d = d - 10 WHERE d > 10; +-- ok, no partition key update, no constraint violation +UPDATE range_parted set e = d; +-- No row found +UPDATE part_c_1_100 set c = c + 20 WHERE c = 98; +-- ok, row movement +UPDATE part_b_10_b_20 set c = c + 20 returning c, b, a; +:show_data; + +-- fail, row movement happens only within the partition subtree. +UPDATE part_b_10_b_20 set b = b - 6 WHERE c > 116 returning *; +-- ok, row movement, with subset of rows moved into different partition. +UPDATE range_parted set b = b - 6 WHERE c > 116 returning a, b + c; + +:show_data; + +-- Common table needed for multiple test scenarios. +CREATE TABLE mintab(c1 int); +INSERT into mintab VALUES (120); + +-- update partition key using updatable view. +CREATE VIEW upview AS SELECT * FROM range_parted WHERE (select c > c1 FROM mintab) WITH CHECK OPTION; +-- ok +UPDATE upview set c = 199 WHERE b = 4; +-- fail, check option violation +UPDATE upview set c = 120 WHERE b = 4; +-- fail, row movement with check option violation +UPDATE upview set a = 'b', b = 15, c = 120 WHERE b = 4; +-- ok, row movement, check option passes +UPDATE upview set a = 'b', b = 15 WHERE b = 4; + +:show_data; + +-- cleanup +DROP VIEW upview; + +-- RETURNING having whole-row vars. +:init_range_parted; +UPDATE range_parted set c = 95 WHERE a = 'b' and b > 10 and c > 100 returning (range_parted), *; +:show_data; + + +-- Transition tables with update row movement +:init_range_parted; + +CREATE FUNCTION trans_updatetrigfunc() RETURNS trigger LANGUAGE plpgsql AS +$$ + begin + raise notice 'trigger = %, old table = %, new table = %', + TG_NAME, + (select string_agg(old_table::text, ', ' ORDER BY a) FROM old_table), + (select string_agg(new_table::text, ', ' ORDER BY a) FROM new_table); + return null; + end; +$$; + +CREATE TRIGGER trans_updatetrig + AFTER UPDATE ON range_parted REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); + +UPDATE range_parted set c = (case when c = 96 then 110 else c + 1 end ) WHERE a = 'b' and b > 10 and c >= 96; +:show_data; +:init_range_parted; + +-- Enabling OLD TABLE capture for both DELETE as well as UPDATE stmt triggers +-- should not cause DELETEd rows to be captured twice. Similar thing for +-- INSERT triggers and inserted rows. +CREATE TRIGGER trans_deletetrig + AFTER DELETE ON range_parted REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); +CREATE TRIGGER trans_inserttrig + AFTER INSERT ON range_parted REFERENCING NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); +UPDATE range_parted set c = c + 50 WHERE a = 'b' and b > 10 and c >= 96; +:show_data; +DROP TRIGGER trans_deletetrig ON range_parted; +DROP TRIGGER trans_inserttrig ON range_parted; +-- Don't drop trans_updatetrig yet. It is required below. + +-- Test with transition tuple conversion happening for rows moved into the +-- new partition. This requires a trigger that references transition table +-- (we already have trans_updatetrig). For inserted rows, the conversion +-- is not usually needed, because the original tuple is already compatible with +-- the desired transition tuple format. But conversion happens when there is a +-- BR trigger because the trigger can change the inserted row. So install a +-- BR triggers on those child partitions where the rows will be moved. +CREATE FUNCTION func_parted_mod_b() RETURNS trigger AS $$ +BEGIN + NEW.b = NEW.b + 1; + return NEW; +END $$ language plpgsql; +CREATE TRIGGER trig_c1_100 BEFORE UPDATE OR INSERT ON part_c_1_100 + FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); +CREATE TRIGGER trig_d1_15 BEFORE UPDATE OR INSERT ON part_d_1_15 + FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); +CREATE TRIGGER trig_d15_20 BEFORE UPDATE OR INSERT ON part_d_15_20 + FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); +:init_range_parted; +UPDATE range_parted set c = (case when c = 96 then 110 else c + 1 end) WHERE a = 'b' and b > 10 and c >= 96; +:show_data; +:init_range_parted; +UPDATE range_parted set c = c + 50 WHERE a = 'b' and b > 10 and c >= 96; +:show_data; + +-- Case where per-partition tuple conversion map array is allocated, but the +-- map is not required for the particular tuple that is routed, thanks to +-- matching table attributes of the partition and the target table. +:init_range_parted; +UPDATE range_parted set b = 15 WHERE b = 1; +:show_data; + +DROP TRIGGER trans_updatetrig ON range_parted; +DROP TRIGGER trig_c1_100 ON part_c_1_100; +DROP TRIGGER trig_d1_15 ON part_d_1_15; +DROP TRIGGER trig_d15_20 ON part_d_15_20; +DROP FUNCTION func_parted_mod_b(); + +-- RLS policies with update-row-movement +----------------------------------------- + +ALTER TABLE range_parted ENABLE ROW LEVEL SECURITY; +CREATE USER regress_range_parted_user; +GRANT ALL ON range_parted, mintab TO regress_range_parted_user; +CREATE POLICY seeall ON range_parted AS PERMISSIVE FOR SELECT USING (true); +CREATE POLICY policy_range_parted ON range_parted for UPDATE USING (true) WITH CHECK (c % 2 = 0); + +:init_range_parted; +SET SESSION AUTHORIZATION regress_range_parted_user; +-- This should fail with RLS violation error while moving row from +-- part_a_10_a_20 to part_d_1_15, because we are setting 'c' to an odd number. +UPDATE range_parted set a = 'b', c = 151 WHERE a = 'a' and c = 200; + +RESET SESSION AUTHORIZATION; +-- Create a trigger on part_d_1_15 +CREATE FUNCTION func_d_1_15() RETURNS trigger AS $$ +BEGIN + NEW.c = NEW.c + 1; -- Make even numbers odd, or vice versa + return NEW; +END $$ LANGUAGE plpgsql; +CREATE TRIGGER trig_d_1_15 BEFORE INSERT ON part_d_1_15 + FOR EACH ROW EXECUTE PROCEDURE func_d_1_15(); + +:init_range_parted; +SET SESSION AUTHORIZATION regress_range_parted_user; + +-- Here, RLS checks should succeed while moving row from part_a_10_a_20 to +-- part_d_1_15. Even though the UPDATE is setting 'c' to an odd number, the +-- trigger at the destination partition again makes it an even number. +UPDATE range_parted set a = 'b', c = 151 WHERE a = 'a' and c = 200; + +RESET SESSION AUTHORIZATION; +:init_range_parted; +SET SESSION AUTHORIZATION regress_range_parted_user; +-- This should fail with RLS violation error. Even though the UPDATE is setting +-- 'c' to an even number, the trigger at the destination partition again makes +-- it an odd number. +UPDATE range_parted set a = 'b', c = 150 WHERE a = 'a' and c = 200; + +-- Cleanup +RESET SESSION AUTHORIZATION; +DROP TRIGGER trig_d_1_15 ON part_d_1_15; +DROP FUNCTION func_d_1_15(); + +-- Policy expression contains SubPlan +RESET SESSION AUTHORIZATION; +:init_range_parted; +CREATE POLICY policy_range_parted_subplan on range_parted + AS RESTRICTIVE for UPDATE USING (true) + WITH CHECK ((SELECT range_parted.c <= c1 FROM mintab)); +SET SESSION AUTHORIZATION regress_range_parted_user; +-- fail, mintab has row with c1 = 120 +UPDATE range_parted set a = 'b', c = 122 WHERE a = 'a' and c = 200; +-- ok +UPDATE range_parted set a = 'b', c = 120 WHERE a = 'a' and c = 200; + +-- RLS policy expression contains whole row. + +RESET SESSION AUTHORIZATION; +:init_range_parted; +CREATE POLICY policy_range_parted_wholerow on range_parted AS RESTRICTIVE for UPDATE USING (true) + WITH CHECK (range_parted = row('b', 10, 112, 1, NULL)::range_parted); +SET SESSION AUTHORIZATION regress_range_parted_user; +-- ok, should pass the RLS check +UPDATE range_parted set a = 'b', c = 112 WHERE a = 'a' and c = 200; +RESET SESSION AUTHORIZATION; +:init_range_parted; +SET SESSION AUTHORIZATION regress_range_parted_user; +-- fail, the whole row RLS check should fail +UPDATE range_parted set a = 'b', c = 116 WHERE a = 'a' and c = 200; + +-- Cleanup +RESET SESSION AUTHORIZATION; +DROP POLICY policy_range_parted ON range_parted; +DROP POLICY policy_range_parted_subplan ON range_parted; +DROP POLICY policy_range_parted_wholerow ON range_parted; +REVOKE ALL ON range_parted, mintab FROM regress_range_parted_user; +DROP USER regress_range_parted_user; +DROP TABLE mintab; + + +-- statement triggers with update row movement +--------------------------------------------------- + +:init_range_parted; + +CREATE FUNCTION trigfunc() returns trigger language plpgsql as +$$ + begin + raise notice 'trigger = % fired on table % during %', + TG_NAME, TG_TABLE_NAME, TG_OP; + return null; + end; +$$; +-- Triggers on root partition +CREATE TRIGGER parent_delete_trig + AFTER DELETE ON range_parted for each statement execute procedure trigfunc(); +CREATE TRIGGER parent_update_trig + AFTER UPDATE ON range_parted for each statement execute procedure trigfunc(); +CREATE TRIGGER parent_insert_trig + AFTER INSERT ON range_parted for each statement execute procedure trigfunc(); + +-- Triggers on leaf partition part_c_1_100 +CREATE TRIGGER c1_delete_trig + AFTER DELETE ON part_c_1_100 for each statement execute procedure trigfunc(); +CREATE TRIGGER c1_update_trig + AFTER UPDATE ON part_c_1_100 for each statement execute procedure trigfunc(); +CREATE TRIGGER c1_insert_trig + AFTER INSERT ON part_c_1_100 for each statement execute procedure trigfunc(); + +-- Triggers on leaf partition part_d_1_15 +CREATE TRIGGER d1_delete_trig + AFTER DELETE ON part_d_1_15 for each statement execute procedure trigfunc(); +CREATE TRIGGER d1_update_trig + AFTER UPDATE ON part_d_1_15 for each statement execute procedure trigfunc(); +CREATE TRIGGER d1_insert_trig + AFTER INSERT ON part_d_1_15 for each statement execute procedure trigfunc(); +-- Triggers on leaf partition part_d_15_20 +CREATE TRIGGER d15_delete_trig + AFTER DELETE ON part_d_15_20 for each statement execute procedure trigfunc(); +CREATE TRIGGER d15_update_trig + AFTER UPDATE ON part_d_15_20 for each statement execute procedure trigfunc(); +CREATE TRIGGER d15_insert_trig + AFTER INSERT ON part_d_15_20 for each statement execute procedure trigfunc(); + +-- Move all rows from part_c_100_200 to part_c_1_100. None of the delete or +-- insert statement triggers should be fired. +UPDATE range_parted set c = c - 50 WHERE c > 97; +:show_data; + +DROP TRIGGER parent_delete_trig ON range_parted; +DROP TRIGGER parent_update_trig ON range_parted; +DROP TRIGGER parent_insert_trig ON range_parted; +DROP TRIGGER c1_delete_trig ON part_c_1_100; +DROP TRIGGER c1_update_trig ON part_c_1_100; +DROP TRIGGER c1_insert_trig ON part_c_1_100; +DROP TRIGGER d1_delete_trig ON part_d_1_15; +DROP TRIGGER d1_update_trig ON part_d_1_15; +DROP TRIGGER d1_insert_trig ON part_d_1_15; +DROP TRIGGER d15_delete_trig ON part_d_15_20; +DROP TRIGGER d15_update_trig ON part_d_15_20; +DROP TRIGGER d15_insert_trig ON part_d_15_20; + + +-- Creating default partition for range +:init_range_parted; +create table part_def partition of range_parted default; +\d+ part_def +insert into range_parted values ('c', 9); +-- ok +update part_def set a = 'd' where a = 'c'; +-- fail +update part_def set a = 'a' where a = 'd'; + +:show_data; + +-- Update row movement from non-default to default partition. +-- fail, default partition is not under part_a_10_a_20; +UPDATE part_a_10_a_20 set a = 'ad' WHERE a = 'a'; +-- ok +UPDATE range_parted set a = 'ad' WHERE a = 'a'; +UPDATE range_parted set a = 'bd' WHERE a = 'b'; +:show_data; +-- Update row movement from default to non-default partitions. +-- ok +UPDATE range_parted set a = 'a' WHERE a = 'ad'; +UPDATE range_parted set a = 'b' WHERE a = 'bd'; +:show_data; + +-- Cleanup: range_parted no longer needed. +DROP TABLE range_parted; + +CREATE TABLE list_parted ( + a text, + b int +) PARTITION BY list (a); +CREATE TABLE list_part1 PARTITION OF list_parted for VALUES in ('a', 'b'); +CREATE TABLE list_default PARTITION OF list_parted default; +INSERT into list_part1 VALUES ('a', 1); +INSERT into list_default VALUES ('d', 10); + +-- fail +UPDATE list_default set a = 'a' WHERE a = 'd'; +-- ok +UPDATE list_default set a = 'x' WHERE a = 'd'; + +DROP TABLE list_parted; + +-------------- +-- Some more update-partition-key test scenarios below. This time use list +-- partitions. +-------------- + +-- Setup for list partitions +CREATE TABLE list_parted (a numeric, b int, c int8) PARTITION BY list (a); +CREATE TABLE sub_parted PARTITION OF list_parted for VALUES in (1) PARTITION BY list (b); + +CREATE TABLE sub_part1(b int, c int8, a numeric); +ALTER TABLE sub_parted ATTACH PARTITION sub_part1 for VALUES in (1); +CREATE TABLE sub_part2(b int, c int8, a numeric); +ALTER TABLE sub_parted ATTACH PARTITION sub_part2 for VALUES in (2); + +CREATE TABLE list_part1(a numeric, b int, c int8); +ALTER TABLE list_parted ATTACH PARTITION list_part1 for VALUES in (2,3); + +INSERT into list_parted VALUES (2,5,50); +INSERT into list_parted VALUES (3,6,60); +INSERT into sub_parted VALUES (1,1,60); +INSERT into sub_parted VALUES (1,2,10); + +-- Test partition constraint violation when intermediate ancestor is used and +-- constraint is inherited from upper root. +UPDATE sub_parted set a = 2 WHERE c = 10; + +-- Test update-partition-key, where the unpruned partitions do not have their +-- partition keys updated. +SELECT tableoid::regclass::text, * FROM list_parted WHERE a = 2 ORDER BY 1; +UPDATE list_parted set b = c + a WHERE a = 2; +SELECT tableoid::regclass::text, * FROM list_parted WHERE a = 2 ORDER BY 1; + + +-- Test the case where BR UPDATE triggers change the partition key. +CREATE FUNCTION func_parted_mod_b() returns trigger as $$ +BEGIN + NEW.b = 2; -- This is changing partition key column. + return NEW; +END $$ LANGUAGE plpgsql; +CREATE TRIGGER parted_mod_b before update on sub_part1 + for each row execute procedure func_parted_mod_b(); + +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; + +-- This should do the tuple routing even though there is no explicit +-- partition-key update, because there is a trigger on sub_part1. +UPDATE list_parted set c = 70 WHERE b = 1; +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; + +DROP TRIGGER parted_mod_b ON sub_part1; + +-- If BR DELETE trigger prevented DELETE from happening, we should also skip +-- the INSERT if that delete is part of UPDATE=>DELETE+INSERT. +CREATE OR REPLACE FUNCTION func_parted_mod_b() returns trigger as $$ +BEGIN + raise notice 'Trigger: Got OLD row %, but returning NULL', OLD; + return NULL; +END $$ LANGUAGE plpgsql; +CREATE TRIGGER trig_skip_delete before delete on sub_part2 + for each row execute procedure func_parted_mod_b(); +UPDATE list_parted set b = 1 WHERE c = 70; +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; +-- Drop the trigger. Now the row should be moved. +DROP TRIGGER trig_skip_delete ON sub_part2; +UPDATE list_parted set b = 1 WHERE c = 70; +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; +DROP FUNCTION func_parted_mod_b(); + +-- UPDATE partition-key with FROM clause. If join produces multiple output +-- rows for the same row to be modified, we should tuple-route the row only +-- once. There should not be any rows inserted. +CREATE TABLE non_parted (id int); +INSERT into non_parted VALUES (1), (1), (1), (2), (2), (2), (3), (3), (3); +UPDATE list_parted t1 set a = 2 FROM non_parted t2 WHERE t1.a = t2.id and a = 1; +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; +DROP TABLE non_parted; + +-- Cleanup: list_parted no longer needed. +DROP TABLE list_parted; + +-- create custom operator class and hash function, for the same reason +-- explained in alter_table.sql +create or replace function dummy_hashint4(a int4, seed int8) returns int8 as +$$ begin return (a + seed); end; $$ language 'plpgsql' immutable; +create operator class custom_opclass for type int4 using hash as +operator 1 = , function 2 dummy_hashint4(int4, int8); + +create table hash_parted ( + a int, b int -) partition by range (a, b); -create table part_a_1_a_10 partition of range_parted for values from ('a', 1) to ('a', 10); -create table part_a_10_a_20 partition of range_parted for values from ('a', 10) to ('a', 20); -create table part_b_1_b_10 partition of range_parted for values from ('b', 1) to ('b', 10); -create table part_b_10_b_20 partition of range_parted for values from ('b', 10) to ('b', 20); -insert into part_a_1_a_10 values ('a', 1); -insert into part_b_10_b_20 values ('b', 10); +) partition by hash (a custom_opclass, b custom_opclass); +create table hpart1 partition of hash_parted for values with (modulus 2, remainder 1); +create table hpart2 partition of hash_parted for values with (modulus 4, remainder 2); +create table hpart3 partition of hash_parted for values with (modulus 8, remainder 0); +create table hpart4 partition of hash_parted for values with (modulus 8, remainder 4); +insert into hpart1 values (1, 1); +insert into hpart2 values (2, 5); +insert into hpart4 values (3, 4); -- fail -update part_a_1_a_10 set a = 'b' where a = 'a'; -update range_parted set b = b - 1 where b = 10; +update hpart1 set a = 3, b=4 where a = 1; +-- ok, row movement +update hash_parted set b = b - 1 where b = 1; -- ok -update range_parted set b = b + 1 where b = 10; +update hash_parted set b = b + 8 where b = 1; -- cleanup -drop table range_parted; +drop table hash_parted; +drop operator class custom_opclass using hash; +drop function dummy_hashint4(a int4, seed int8); diff --git a/src/test/regress/sql/vacuum.sql b/src/test/regress/sql/vacuum.sql index 7c5fb04917..9defa0d8b2 100644 --- a/src/test/regress/sql/vacuum.sql +++ b/src/test/regress/sql/vacuum.sql @@ -62,9 +62,6 @@ VACUUM FULL vactst; VACUUM (DISABLE_PAGE_SKIPPING) vaccluster; -DROP TABLE vaccluster; -DROP TABLE vactst; - -- partitioned table CREATE TABLE vacparted (a int, b char) PARTITION BY LIST (a); CREATE TABLE vacparted1 PARTITION OF vacparted FOR VALUES IN (1); @@ -73,4 +70,116 @@ UPDATE vacparted SET b = 'b'; VACUUM (ANALYZE) vacparted; VACUUM (FULL) vacparted; VACUUM (FREEZE) vacparted; + +-- check behavior with duplicate column mentions +VACUUM ANALYZE vacparted(a,b,a); +ANALYZE vacparted(a,b,b); + +-- multiple tables specified +VACUUM vaccluster, vactst; +VACUUM vacparted, does_not_exist; +VACUUM (FREEZE) vacparted, vaccluster, vactst; +VACUUM (FREEZE) does_not_exist, vaccluster; +VACUUM ANALYZE vactst, vacparted (a); +VACUUM ANALYZE vactst (does_not_exist), vacparted (b); +VACUUM FULL vacparted, vactst; +VACUUM FULL vactst, vacparted (a, b), vaccluster (i); +ANALYZE vactst, vacparted; +ANALYZE vacparted (b), vactst; +ANALYZE vactst, does_not_exist, vacparted; +ANALYZE vactst (i), vacparted (does_not_exist); + +-- parenthesized syntax for ANALYZE +ANALYZE (VERBOSE) does_not_exist; +ANALYZE (nonexistent-arg) does_not_exist; + +-- ensure argument order independence, and that SKIP_LOCKED on non-existing +-- relation still errors out. +ANALYZE (SKIP_LOCKED, VERBOSE) does_not_exist; +ANALYZE (VERBOSE, SKIP_LOCKED) does_not_exist; + +-- SKIP_LOCKED option +VACUUM (SKIP_LOCKED) vactst; +VACUUM (SKIP_LOCKED, FULL) vactst; +ANALYZE (SKIP_LOCKED) vactst; + +DROP TABLE vaccluster; +DROP TABLE vactst; DROP TABLE vacparted; + +-- relation ownership, WARNING logs generated as all are skipped. +CREATE TABLE vacowned (a int); +CREATE TABLE vacowned_parted (a int) PARTITION BY LIST (a); +CREATE TABLE vacowned_part1 PARTITION OF vacowned_parted FOR VALUES IN (1); +CREATE TABLE vacowned_part2 PARTITION OF vacowned_parted FOR VALUES IN (2); +CREATE ROLE regress_vacuum; +SET ROLE regress_vacuum; +-- Simple table +VACUUM vacowned; +ANALYZE vacowned; +VACUUM (ANALYZE) vacowned; +-- Catalog +VACUUM pg_catalog.pg_class; +ANALYZE pg_catalog.pg_class; +VACUUM (ANALYZE) pg_catalog.pg_class; +-- Shared catalog +VACUUM pg_catalog.pg_authid; +ANALYZE pg_catalog.pg_authid; +VACUUM (ANALYZE) pg_catalog.pg_authid; +-- Partitioned table and its partitions, nothing owned by other user. +-- Relations are not listed in a single command to test ownership +-- independently. +VACUUM vacowned_parted; +VACUUM vacowned_part1; +VACUUM vacowned_part2; +ANALYZE vacowned_parted; +ANALYZE vacowned_part1; +ANALYZE vacowned_part2; +VACUUM (ANALYZE) vacowned_parted; +VACUUM (ANALYZE) vacowned_part1; +VACUUM (ANALYZE) vacowned_part2; +RESET ROLE; +-- Partitioned table and one partition owned by other user. +ALTER TABLE vacowned_parted OWNER TO regress_vacuum; +ALTER TABLE vacowned_part1 OWNER TO regress_vacuum; +SET ROLE regress_vacuum; +VACUUM vacowned_parted; +VACUUM vacowned_part1; +VACUUM vacowned_part2; +ANALYZE vacowned_parted; +ANALYZE vacowned_part1; +ANALYZE vacowned_part2; +VACUUM (ANALYZE) vacowned_parted; +VACUUM (ANALYZE) vacowned_part1; +VACUUM (ANALYZE) vacowned_part2; +RESET ROLE; +-- Only one partition owned by other user. +ALTER TABLE vacowned_parted OWNER TO CURRENT_USER; +SET ROLE regress_vacuum; +VACUUM vacowned_parted; +VACUUM vacowned_part1; +VACUUM vacowned_part2; +ANALYZE vacowned_parted; +ANALYZE vacowned_part1; +ANALYZE vacowned_part2; +VACUUM (ANALYZE) vacowned_parted; +VACUUM (ANALYZE) vacowned_part1; +VACUUM (ANALYZE) vacowned_part2; +RESET ROLE; +-- Only partitioned table owned by other user. +ALTER TABLE vacowned_parted OWNER TO regress_vacuum; +ALTER TABLE vacowned_part1 OWNER TO CURRENT_USER; +SET ROLE regress_vacuum; +VACUUM vacowned_parted; +VACUUM vacowned_part1; +VACUUM vacowned_part2; +ANALYZE vacowned_parted; +ANALYZE vacowned_part1; +ANALYZE vacowned_part2; +VACUUM (ANALYZE) vacowned_parted; +VACUUM (ANALYZE) vacowned_part1; +VACUUM (ANALYZE) vacowned_part2; +RESET ROLE; +DROP TABLE vacowned; +DROP TABLE vacowned_parted; +DROP ROLE regress_vacuum; diff --git a/src/test/regress/sql/window.sql b/src/test/regress/sql/window.sql index e2a1a1cdd5..fc6d4cc903 100644 --- a/src/test/regress/sql/window.sql +++ b/src/test/regress/sql/window.sql @@ -189,6 +189,46 @@ SELECT sum(unique1) over (rows between 2 preceding and 2 following), unique1, four FROM tenk1 WHERE unique1 < 10; +SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude no others), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + SELECT sum(unique1) over (rows between 2 preceding and 1 preceding), unique1, four FROM tenk1 WHERE unique1 < 10; @@ -205,10 +245,17 @@ SELECT sum(unique1) over (w range between current row and unbounded following), unique1, four FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); --- fail: not implemented yet -SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding), +SELECT sum(unique1) over (w range between unbounded preceding and current row exclude current row), unique1, four -FROM tenk1 WHERE unique1 < 10; +FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); + +SELECT sum(unique1) over (w range between unbounded preceding and current row exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); + +SELECT sum(unique1) over (w range between unbounded preceding and current row exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); SELECT first_value(unique1) over w, nth_value(unique1, 2) over w AS nth_2, @@ -230,9 +277,562 @@ SELECT * FROM v_window; SELECT pg_get_viewdef('v_window'); +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following + exclude current row) as sum_rows FROM generate_series(1, 10) i; + +SELECT * FROM v_window; + +SELECT pg_get_viewdef('v_window'); + +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following + exclude group) as sum_rows FROM generate_series(1, 10) i; + +SELECT * FROM v_window; + +SELECT pg_get_viewdef('v_window'); + +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following + exclude ties) as sum_rows FROM generate_series(1, 10) i; + +SELECT * FROM v_window; + +SELECT pg_get_viewdef('v_window'); + +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following + exclude no others) as sum_rows FROM generate_series(1, 10) i; + +SELECT * FROM v_window; + +SELECT pg_get_viewdef('v_window'); + +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i groups between 1 preceding and 1 following) as sum_rows FROM generate_series(1, 10) i; + +SELECT * FROM v_window; + +SELECT pg_get_viewdef('v_window'); + +DROP VIEW v_window; + +CREATE TEMP VIEW v_window AS + SELECT i, min(i) over (order by i range between '1 day' preceding and '10 days' following) as min_i + FROM generate_series(now(), now()+'100 days'::interval, '1 hour') i; + +SELECT pg_get_viewdef('v_window'); + +-- RANGE offset PRECEDING/FOLLOWING tests + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four desc range between 2::int8 preceding and 1::int2 preceding), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude no others), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following + exclude current row),unique1, four +FROM tenk1 WHERE unique1 < 10; + +select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following), + salary, enroll_date from empsalary; + +select sum(salary) over (order by enroll_date desc range between '1 year'::interval preceding and '1 year'::interval following), + salary, enroll_date from empsalary; + +select sum(salary) over (order by enroll_date desc range between '1 year'::interval following and '1 year'::interval following), + salary, enroll_date from empsalary; + +select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following + exclude current row), salary, enroll_date from empsalary; + +select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following + exclude group), salary, enroll_date from empsalary; + +select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following + exclude ties), salary, enroll_date from empsalary; + +select first_value(salary) over(order by salary range between 1000 preceding and 1000 following), + lead(salary) over(order by salary range between 1000 preceding and 1000 following), + nth_value(salary, 1) over(order by salary range between 1000 preceding and 1000 following), + salary from empsalary; + +select last_value(salary) over(order by salary range between 1000 preceding and 1000 following), + lag(salary) over(order by salary range between 1000 preceding and 1000 following), + salary from empsalary; + +select first_value(salary) over(order by salary range between 1000 following and 3000 following + exclude current row), + lead(salary) over(order by salary range between 1000 following and 3000 following exclude ties), + nth_value(salary, 1) over(order by salary range between 1000 following and 3000 following + exclude ties), + salary from empsalary; + +select last_value(salary) over(order by salary range between 1000 following and 3000 following + exclude group), + lag(salary) over(order by salary range between 1000 following and 3000 following exclude group), + salary from empsalary; + +select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude ties), + last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following), + salary, enroll_date from empsalary; + +select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude ties), + last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude ties), + salary, enroll_date from empsalary; + +select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude group), + last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude group), + salary, enroll_date from empsalary; + +select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude current row), + last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude current row), + salary, enroll_date from empsalary; + +-- RANGE offset PRECEDING/FOLLOWING with null values +select x, y, + first_value(y) over w, + last_value(y) over w +from + (select x, x as y from generate_series(1,5) as x + union all select null, 42 + union all select null, 43) ss +window w as + (order by x asc nulls first range between 2 preceding and 2 following); + +select x, y, + first_value(y) over w, + last_value(y) over w +from + (select x, x as y from generate_series(1,5) as x + union all select null, 42 + union all select null, 43) ss +window w as + (order by x asc nulls last range between 2 preceding and 2 following); + +select x, y, + first_value(y) over w, + last_value(y) over w +from + (select x, x as y from generate_series(1,5) as x + union all select null, 42 + union all select null, 43) ss +window w as + (order by x desc nulls first range between 2 preceding and 2 following); + +select x, y, + first_value(y) over w, + last_value(y) over w +from + (select x, x as y from generate_series(1,5) as x + union all select null, 42 + union all select null, 43) ss +window w as + (order by x desc nulls last range between 2 preceding and 2 following); + +-- Check overflow behavior for various integer sizes + +select x, last_value(x) over (order by x::smallint range between current row and 2147450884 following) +from generate_series(32764, 32766) x; + +select x, last_value(x) over (order by x::smallint desc range between current row and 2147450885 following) +from generate_series(-32766, -32764) x; + +select x, last_value(x) over (order by x range between current row and 4 following) +from generate_series(2147483644, 2147483646) x; + +select x, last_value(x) over (order by x desc range between current row and 5 following) +from generate_series(-2147483646, -2147483644) x; + +select x, last_value(x) over (order by x range between current row and 4 following) +from generate_series(9223372036854775804, 9223372036854775806) x; + +select x, last_value(x) over (order by x desc range between current row and 5 following) +from generate_series(-9223372036854775806, -9223372036854775804) x; + +-- Test in_range for other numeric datatypes + +create temp table numerics( + id int, + f_float4 float4, + f_float8 float8, + f_numeric numeric +); + +insert into numerics values +(0, '-infinity', '-infinity', '-1000'), -- numeric type lacks infinities +(1, -3, -3, -3), +(2, -1, -1, -1), +(3, 0, 0, 0), +(4, 1.1, 1.1, 1.1), +(5, 1.12, 1.12, 1.12), +(6, 2, 2, 2), +(7, 100, 100, 100), +(8, 'infinity', 'infinity', '1000'), +(9, 'NaN', 'NaN', 'NaN'); + +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 1 preceding and 1 following); +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 1 preceding and 1.1::float4 following); +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 'inf' preceding and 'inf' following); +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 1.1 preceding and 'NaN' following); -- error, NaN disallowed + +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 1 preceding and 1 following); +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 1 preceding and 1.1::float8 following); +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 'inf' preceding and 'inf' following); +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 1.1 preceding and 'NaN' following); -- error, NaN disallowed + +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 1 preceding and 1 following); +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 1 preceding and 1.1::numeric following); +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 1 preceding and 1.1::float8 following); -- currently unsupported +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 1.1 preceding and 'NaN' following); -- error, NaN disallowed + +-- Test in_range for other datetime datatypes + +create temp table datetimes( + id int, + f_time time, + f_timetz timetz, + f_interval interval, + f_timestamptz timestamptz, + f_timestamp timestamp +); + +insert into datetimes values +(1, '11:00', '11:00 BST', '1 year', '2000-10-19 10:23:54+01', '2000-10-19 10:23:54'), +(2, '12:00', '12:00 BST', '2 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'), +(3, '13:00', '13:00 BST', '3 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'), +(4, '14:00', '14:00 BST', '4 years', '2002-10-19 10:23:54+01', '2002-10-19 10:23:54'), +(5, '15:00', '15:00 BST', '5 years', '2003-10-19 10:23:54+01', '2003-10-19 10:23:54'), +(6, '15:00', '15:00 BST', '5 years', '2004-10-19 10:23:54+01', '2004-10-19 10:23:54'), +(7, '17:00', '17:00 BST', '7 years', '2005-10-19 10:23:54+01', '2005-10-19 10:23:54'), +(8, '18:00', '18:00 BST', '8 years', '2006-10-19 10:23:54+01', '2006-10-19 10:23:54'), +(9, '19:00', '19:00 BST', '9 years', '2007-10-19 10:23:54+01', '2007-10-19 10:23:54'), +(10, '20:00', '20:00 BST', '10 years', '2008-10-19 10:23:54+01', '2008-10-19 10:23:54'); + +select id, f_time, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_time range between + '70 min'::interval preceding and '2 hours'::interval following); + +select id, f_time, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_time desc range between + '70 min' preceding and '2 hours' following); + +select id, f_timetz, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timetz range between + '70 min'::interval preceding and '2 hours'::interval following); + +select id, f_timetz, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timetz desc range between + '70 min' preceding and '2 hours' following); + +select id, f_interval, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_interval range between + '1 year'::interval preceding and '1 year'::interval following); + +select id, f_interval, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_interval desc range between + '1 year' preceding and '1 year' following); + +select id, f_timestamptz, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timestamptz range between + '1 year'::interval preceding and '1 year'::interval following); + +select id, f_timestamptz, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timestamptz desc range between + '1 year' preceding and '1 year' following); + +select id, f_timestamp, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timestamp range between + '1 year'::interval preceding and '1 year'::interval following); + +select id, f_timestamp, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timestamp desc range between + '1 year' preceding and '1 year' following); + +-- RANGE offset PRECEDING/FOLLOWING error cases +select sum(salary) over (order by enroll_date, salary range between '1 year'::interval preceding and '2 years'::interval following + exclude ties), salary, enroll_date from empsalary; + +select sum(salary) over (range between '1 year'::interval preceding and '2 years'::interval following + exclude ties), salary, enroll_date from empsalary; + +select sum(salary) over (order by depname range between '1 year'::interval preceding and '2 years'::interval following + exclude ties), salary, enroll_date from empsalary; + +select max(enroll_date) over (order by enroll_date range between 1 preceding and 2 following + exclude ties), salary, enroll_date from empsalary; + +select max(enroll_date) over (order by salary range between -1 preceding and 2 following + exclude ties), salary, enroll_date from empsalary; + +select max(enroll_date) over (order by salary range between 1 preceding and -2 following + exclude ties), salary, enroll_date from empsalary; + +select max(enroll_date) over (order by salary range between '1 year'::interval preceding and '2 years'::interval following + exclude ties), salary, enroll_date from empsalary; + +select max(enroll_date) over (order by enroll_date range between '1 year'::interval preceding and '-2 years'::interval following + exclude ties), salary, enroll_date from empsalary; + +-- GROUPS tests + +SELECT sum(unique1) over (order by four groups between unbounded preceding and current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four groups between unbounded preceding and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four groups between current row and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four groups between 1 preceding and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four groups between 1 following and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four groups between unbounded preceding and 2 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 preceding), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four groups between 0 preceding and 0 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following + exclude current row), unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following + exclude group), unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following + exclude ties), unique1, four +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (partition by ten + order by four groups between 0 preceding and 0 following),unique1, four, ten +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (partition by ten + order by four groups between 0 preceding and 0 following exclude current row), unique1, four, ten +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (partition by ten + order by four groups between 0 preceding and 0 following exclude group), unique1, four, ten +FROM tenk1 WHERE unique1 < 10; + +SELECT sum(unique1) over (partition by ten + order by four groups between 0 preceding and 0 following exclude ties), unique1, four, ten +FROM tenk1 WHERE unique1 < 10; + +select first_value(salary) over(order by enroll_date groups between 1 preceding and 1 following), + lead(salary) over(order by enroll_date groups between 1 preceding and 1 following), + nth_value(salary, 1) over(order by enroll_date groups between 1 preceding and 1 following), + salary, enroll_date from empsalary; + +select last_value(salary) over(order by enroll_date groups between 1 preceding and 1 following), + lag(salary) over(order by enroll_date groups between 1 preceding and 1 following), + salary, enroll_date from empsalary; + +select first_value(salary) over(order by enroll_date groups between 1 following and 3 following + exclude current row), + lead(salary) over(order by enroll_date groups between 1 following and 3 following exclude ties), + nth_value(salary, 1) over(order by enroll_date groups between 1 following and 3 following + exclude ties), + salary, enroll_date from empsalary; + +select last_value(salary) over(order by enroll_date groups between 1 following and 3 following + exclude group), + lag(salary) over(order by enroll_date groups between 1 following and 3 following exclude group), + salary, enroll_date from empsalary; + +-- Show differences in offset interpretation between ROWS, RANGE, and GROUPS +WITH cte (x) AS ( + SELECT * FROM generate_series(1, 35, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following); + +WITH cte (x) AS ( + SELECT * FROM generate_series(1, 35, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x range between 1 preceding and 1 following); + +WITH cte (x) AS ( + SELECT * FROM generate_series(1, 35, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following); + +WITH cte (x) AS ( + select 1 union all select 1 union all select 1 union all + SELECT * FROM generate_series(5, 49, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following); + +WITH cte (x) AS ( + select 1 union all select 1 union all select 1 union all + SELECT * FROM generate_series(5, 49, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x range between 1 preceding and 1 following); + +WITH cte (x) AS ( + select 1 union all select 1 union all select 1 union all + SELECT * FROM generate_series(5, 49, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following); + -- with UNION SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk2)s LIMIT 0; +-- check some degenerate cases +create temp table t1 (f1 int, f2 int8); +insert into t1 values (1,1),(1,2),(2,2); + +select f1, sum(f1) over (partition by f1 + range between 1 preceding and 1 following) +from t1 where f1 = f2; -- error, must have order by +explain (costs off) +select f1, sum(f1) over (partition by f1 order by f2 + range between 1 preceding and 1 following) +from t1 where f1 = f2; +select f1, sum(f1) over (partition by f1 order by f2 + range between 1 preceding and 1 following) +from t1 where f1 = f2; +select f1, sum(f1) over (partition by f1, f1 order by f2 + range between 2 preceding and 1 preceding) +from t1 where f1 = f2; +select f1, sum(f1) over (partition by f1, f2 order by f2 + range between 1 following and 2 following) +from t1 where f1 = f2; + +select f1, sum(f1) over (partition by f1 + groups between 1 preceding and 1 following) +from t1 where f1 = f2; -- error, must have order by +explain (costs off) +select f1, sum(f1) over (partition by f1 order by f2 + groups between 1 preceding and 1 following) +from t1 where f1 = f2; +select f1, sum(f1) over (partition by f1 order by f2 + groups between 1 preceding and 1 following) +from t1 where f1 = f2; +select f1, sum(f1) over (partition by f1, f1 order by f2 + groups between 2 preceding and 1 preceding) +from t1 where f1 = f2; +select f1, sum(f1) over (partition by f1, f2 order by f2 + groups between 1 following and 2 following) +from t1 where f1 = f2; + -- ordering by a non-integer constant is allowed SELECT rank() OVER (ORDER BY length('abc')); @@ -292,6 +892,22 @@ SELECT * FROM FROM empsalary) emp WHERE depname = 'sales'; +-- Test Sort node collapsing +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT depname, + sum(salary) OVER (PARTITION BY depname order by empno) depsalary, + min(salary) OVER (PARTITION BY depname, empno order by enroll_date) depminsalary + FROM empsalary) emp +WHERE depname = 'sales'; + +-- Test Sort node reordering +EXPLAIN (COSTS OFF) +SELECT + lead(1) OVER (PARTITION BY depname ORDER BY salary, enroll_date), + lag(1) OVER (PARTITION BY depname ORDER BY salary,enroll_date,empno) +FROM empsalary; + -- cleanup DROP TABLE empsalary; diff --git a/src/test/regress/sql/with.sql b/src/test/regress/sql/with.sql index 8ae5184d0f..f85645efde 100644 --- a/src/test/regress/sql/with.sql +++ b/src/test/regress/sql/with.sql @@ -805,46 +805,46 @@ SELECT * FROM t LIMIT 10; SELECT * FROM y; -- data-modifying WITH containing INSERT...ON CONFLICT DO UPDATE -CREATE TABLE z AS SELECT i AS k, (i || ' v')::text v FROM generate_series(1, 16, 3) i; -ALTER TABLE z ADD UNIQUE (k); +CREATE TABLE withz AS SELECT i AS k, (i || ' v')::text v FROM generate_series(1, 16, 3) i; +ALTER TABLE withz ADD UNIQUE (k); WITH t AS ( - INSERT INTO z SELECT i, 'insert' + INSERT INTO withz SELECT i, 'insert' FROM generate_series(0, 16) i - ON CONFLICT (k) DO UPDATE SET v = z.v || ', now update' + ON CONFLICT (k) DO UPDATE SET v = withz.v || ', now update' RETURNING * ) SELECT * FROM t JOIN y ON t.k = y.a ORDER BY a, k; -- Test EXCLUDED.* reference within CTE WITH aa AS ( - INSERT INTO z VALUES(1, 5) ON CONFLICT (k) DO UPDATE SET v = EXCLUDED.v - WHERE z.k != EXCLUDED.k + INSERT INTO withz VALUES(1, 5) ON CONFLICT (k) DO UPDATE SET v = EXCLUDED.v + WHERE withz.k != EXCLUDED.k RETURNING * ) SELECT * FROM aa; -- New query/snapshot demonstrates side-effects of previous query. -SELECT * FROM z ORDER BY k; +SELECT * FROM withz ORDER BY k; -- -- Ensure subqueries within the update clause work, even if they -- reference outside values -- WITH aa AS (SELECT 1 a, 2 b) -INSERT INTO z VALUES(1, 'insert') +INSERT INTO withz VALUES(1, 'insert') ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1); WITH aa AS (SELECT 1 a, 2 b) -INSERT INTO z VALUES(1, 'insert') -ON CONFLICT (k) DO UPDATE SET v = ' update' WHERE z.k = (SELECT a FROM aa); +INSERT INTO withz VALUES(1, 'insert') +ON CONFLICT (k) DO UPDATE SET v = ' update' WHERE withz.k = (SELECT a FROM aa); WITH aa AS (SELECT 1 a, 2 b) -INSERT INTO z VALUES(1, 'insert') +INSERT INTO withz VALUES(1, 'insert') ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1); WITH aa AS (SELECT 'a' a, 'b' b UNION ALL SELECT 'a' a, 'b' b) -INSERT INTO z VALUES(1, 'insert') +INSERT INTO withz VALUES(1, 'insert') ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 'a' LIMIT 1); WITH aa AS (SELECT 1 a, 2 b) -INSERT INTO z VALUES(1, (SELECT b || ' insert' FROM aa WHERE a = 1 )) +INSERT INTO withz VALUES(1, (SELECT b || ' insert' FROM aa WHERE a = 1 )) ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1); -- Update a row more than once, in different parts of a wCTE. That is @@ -853,14 +853,14 @@ ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIM WITH simpletup AS ( SELECT 2 k, 'Green' v), upsert_cte AS ( - INSERT INTO z VALUES(2, 'Blue') ON CONFLICT (k) DO - UPDATE SET (k, v) = (SELECT k, v FROM simpletup WHERE simpletup.k = z.k) + INSERT INTO withz VALUES(2, 'Blue') ON CONFLICT (k) DO + UPDATE SET (k, v) = (SELECT k, v FROM simpletup WHERE simpletup.k = withz.k) RETURNING k, v) -INSERT INTO z VALUES(2, 'Red') ON CONFLICT (k) DO -UPDATE SET (k, v) = (SELECT k, v FROM upsert_cte WHERE upsert_cte.k = z.k) +INSERT INTO withz VALUES(2, 'Red') ON CONFLICT (k) DO +UPDATE SET (k, v) = (SELECT k, v FROM upsert_cte WHERE upsert_cte.k = withz.k) RETURNING k, v; -DROP TABLE z; +DROP TABLE withz; -- check that run to completion happens in proper ordering @@ -1030,4 +1030,12 @@ create table foo (with ordinality); -- fail, WITH is a reserved word with ordinality as (select 1 as x) select * from ordinality; -- check sane response to attempt to modify CTE relation -WITH d AS (SELECT 42) INSERT INTO d VALUES (1); +WITH test AS (SELECT 42) INSERT INTO test VALUES (1); + +-- check response to attempt to modify table with same name as a CTE (perhaps +-- surprisingly it works, because CTEs don't hide tables from data-modifying +-- statements) +create temp table test (i int); +with test as (select 42) insert into test select * from test; +select * from test; +drop table test; diff --git a/src/test/regress/sql/write_parallel.sql b/src/test/regress/sql/write_parallel.sql new file mode 100644 index 0000000000..78b479cedf --- /dev/null +++ b/src/test/regress/sql/write_parallel.sql @@ -0,0 +1,42 @@ +-- +-- PARALLEL +-- + +-- Serializable isolation would disable parallel query, so explicitly use an +-- arbitrary other level. +begin isolation level repeatable read; + +-- encourage use of parallel plans +set parallel_setup_cost=0; +set parallel_tuple_cost=0; +set min_parallel_table_scan_size=0; +set max_parallel_workers_per_gather=4; + +-- +-- Test write operations that has an underlying query that is eligble +-- for parallel plans +-- +explain (costs off) create table parallel_write as + select length(stringu1) from tenk1 group by length(stringu1); +create table parallel_write as + select length(stringu1) from tenk1 group by length(stringu1); +drop table parallel_write; + +explain (costs off) select length(stringu1) into parallel_write + from tenk1 group by length(stringu1); +select length(stringu1) into parallel_write + from tenk1 group by length(stringu1); +drop table parallel_write; + +explain (costs off) create materialized view parallel_mat_view as + select length(stringu1) from tenk1 group by length(stringu1); +create materialized view parallel_mat_view as + select length(stringu1) from tenk1 group by length(stringu1); +drop materialized view parallel_mat_view; + +prepare prep_stmt as select length(stringu1) from tenk1 group by length(stringu1); +explain (costs off) create table parallel_write as execute prep_stmt; +create table parallel_write as execute prep_stmt; +drop table parallel_write; + +rollback; diff --git a/src/test/regress/sql/xml.sql b/src/test/regress/sql/xml.sql index eb4687fb09..3b91b56d5a 100644 --- a/src/test/regress/sql/xml.sql +++ b/src/test/regress/sql/xml.sql @@ -188,6 +188,43 @@ SELECT xpath('count(//*)=0', ''); SELECT xpath('count(//*)=3', ''); SELECT xpath('name(/*)', ''); SELECT xpath('/nosuchtag', ''); +SELECT xpath('root', ''); + +-- Round-trip non-ASCII data through xpath(). +DO $$ +DECLARE + xml_declaration text := ''; + degree_symbol text; + res xml[]; +BEGIN + -- Per the documentation, except when the server encoding is UTF8, xpath() + -- may not work on non-ASCII data. The untranslatable_character and + -- undefined_function traps below, currently dead code, will become relevant + -- if we remove this limitation. + IF current_setting('server_encoding') <> 'UTF8' THEN + RAISE LOG 'skip: encoding % unsupported for xpath', + current_setting('server_encoding'); + RETURN; + END IF; + + degree_symbol := convert_from('\xc2b0', 'UTF8'); + res := xpath('text()', (xml_declaration || + '' || degree_symbol || '')::xml); + IF degree_symbol <> res[1]::text THEN + RAISE 'expected % (%), got % (%)', + degree_symbol, convert_to(degree_symbol, 'UTF8'), + res[1], convert_to(res[1]::text, 'UTF8'); + END IF; +EXCEPTION + -- character with byte sequence 0xc2 0xb0 in encoding "UTF8" has no equivalent in encoding "LATIN8" + WHEN untranslatable_character + -- default conversion function for encoding "UTF8" to "MULE_INTERNAL" does not exist + OR undefined_function + -- unsupported XML feature + OR feature_not_supported THEN + RAISE LOG 'skip: %', SQLERRM; +END +$$; -- Test xmlexists and xpath_exists SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol'); @@ -313,7 +350,7 @@ SELECT xmltable.* PASSING data COLUMNS id int PATH '@id', _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, country_id text PATH 'COUNTRY_ID', region_id int PATH 'REGION_ID', size float PATH 'SIZE', @@ -326,7 +363,7 @@ CREATE VIEW xmltableview1 AS SELECT xmltable.* PASSING data COLUMNS id int PATH '@id', _id FOR ORDINALITY, - country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, country_id text PATH 'COUNTRY_ID', region_id int PATH 'REGION_ID', size float PATH 'SIZE', @@ -387,7 +424,7 @@ SELECT * FROM xmltable('/root' passing 'a1aa2aa1aa2a bbbbxxxcccc' COLUMNS element text PATH 'element/text()'); -- should fail -- CDATA test -select * from xmltable('r' passing ' &"<>!foo]]>2' columns c text); +select * from xmltable('d/r' passing ' &"<>!foo]]>2' columns c text); -- XML builtin entities SELECT * FROM xmltable('/x/a' PASSING ''"&<>' COLUMNS ent text); diff --git a/src/test/ssl/Makefile b/src/test/ssl/Makefile index e4437d19c3..97389c90f8 100644 --- a/src/test/ssl/Makefile +++ b/src/test/ssl/Makefile @@ -2,7 +2,7 @@ # # Makefile for src/test/ssl # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/test/ssl/Makefile @@ -13,6 +13,8 @@ subdir = src/test/ssl top_builddir = ../../.. include $(top_builddir)/src/Makefile.global +export with_openssl + CERTIFICATES := server_ca server-cn-and-alt-names \ server-cn-only server-single-alt-name server-multiple-alt-names \ server-no-names server-revoked server-ss \ @@ -20,6 +22,7 @@ CERTIFICATES := server_ca server-cn-and-alt-names \ root_ca SSLFILES := $(CERTIFICATES:%=ssl/%.key) $(CERTIFICATES:%=ssl/%.crt) \ + ssl/server-password.key \ ssl/client.crl ssl/server.crl ssl/root.crl \ ssl/both-cas-1.crt ssl/both-cas-2.crt \ ssl/root+server_ca.crt ssl/root+server.crl \ @@ -29,7 +32,7 @@ SSLFILES := $(CERTIFICATES:%=ssl/%.key) $(CERTIFICATES:%=ssl/%.crt) \ # This target generates all the key and certificate files. sslfiles: $(SSLFILES) -# Openssl requires a directory to put all generated certificates in. We don't +# OpenSSL requires a directory to put all generated certificates in. We don't # use this for anything, but we need a location. ssl/new_certs_dir: mkdir ssl/new_certs_dir @@ -69,6 +72,10 @@ ssl/server-ss.crt: ssl/server-cn-only.key ssl/server-cn-only.crt server-cn-only. openssl x509 -req -days 10000 -in ssl/server-ss.csr -signkey ssl/server-cn-only.key -out ssl/server-ss.crt -extensions v3_req -extfile server-cn-only.config rm ssl/server-ss.csr +# Password-protected version of server-cn-only.key +ssl/server-password.key: ssl/server-cn-only.key + openssl rsa -des -in $< -out $@ -passout 'pass:secret1' + # Client certificate, signed by the client CA: ssl/client.crt: ssl/client.key ssl/client_ca.crt openssl req -new -key ssl/client.key -out ssl/client.csr -config client.config @@ -132,3 +139,6 @@ clean distclean maintainer-clean: check: $(prove_check) + +installcheck: + $(prove_installcheck) diff --git a/src/test/ssl/README b/src/test/ssl/README index 50fa14e287..84baa478ce 100644 --- a/src/test/ssl/README +++ b/src/test/ssl/README @@ -7,15 +7,27 @@ This directory contains a test suite for SSL support. It tests both client-side functionality, i.e. verifying server certificates, and server-side functionality, i.e. certificate authorization. +CAUTION: The test server run by this test is configured to listen for +TCP connections on localhost. Any user on the same host is able to +log in to the test server while the tests are running. Do not run this +suite on a multi-user system where you don't trust all local users! + Running the tests ================= +NOTE: You must have given the --enable-tap-tests argument to configure. + +Run make check +or + make installcheck +You can use "make installcheck" if you previously did "make install". +In that case, the code in the installation tree is tested. With +"make check", a temporary installation tree is built from the current +sources and then tested. -NOTE: This creates a temporary installation, and sets it up to listen for TCP -connections on localhost. Any user on the same host is allowed to log in to -the test installation while the tests are running. Do not run this suite -on a multi-user system where you don't trust all local users! +Either way, this test initializes, starts, and stops a test Postgres +cluster that is accessible to other local users! Certificates ============ @@ -43,6 +55,9 @@ server-no-names server-ss same as server-cn-only, but self-signed. +server-password + same as server-cn-only, but password-protected. + client a client certificate, for user "ssltestuser". Signed by client_ca. diff --git a/src/test/ssl/ServerSetup.pm b/src/test/ssl/ServerSetup.pm index f63c81cfc6..3b451a360a 100644 --- a/src/test/ssl/ServerSetup.pm +++ b/src/test/ssl/ServerSetup.pm @@ -26,9 +26,48 @@ use Test::More; use Exporter 'import'; our @EXPORT = qw( - configure_test_server_for_ssl switch_server_cert + configure_test_server_for_ssl + switch_server_cert + test_connect_fails + test_connect_ok ); +# Define a couple of helper functions to test connecting to the server. + +# The first argument is a base connection string to use for connection. +# The second argument is a complementary connection string. +sub test_connect_ok +{ + local $Test::Builder::Level = $Test::Builder::Level + 1; + + my ($common_connstr, $connstr, $test_name) = @_; + + my $cmd = [ + 'psql', '-X', '-A', '-t', '-c', + "SELECT \$\$connected with $connstr\$\$", + '-d', "$common_connstr $connstr" + ]; + + command_ok($cmd, $test_name); + return; +} + +sub test_connect_fails +{ + local $Test::Builder::Level = $Test::Builder::Level + 1; + + my ($common_connstr, $connstr, $expected_stderr, $test_name) = @_; + + my $cmd = [ + 'psql', '-X', '-A', '-t', '-c', + "SELECT \$\$connected with $connstr\$\$", + '-d', "$common_connstr $connstr" + ]; + + command_fails_like($cmd, $expected_stderr, $test_name); + return; +} + # Copy a set of files, taking into account wildcards sub copy_files { @@ -42,12 +81,12 @@ sub copy_files copy($orig_file, "$dest/$base_file") or die "Could not copy $orig_file to $dest"; } + return; } sub configure_test_server_for_ssl { - my $node = $_[0]; - my $serverhost = $_[1]; + my ($node, $serverhost, $authmethod, $password, $password_enc) = @_; my $pgdata = $node->data_dir; @@ -57,6 +96,17 @@ sub configure_test_server_for_ssl $node->psql('postgres', "CREATE DATABASE trustdb"); $node->psql('postgres', "CREATE DATABASE certdb"); + # Update password of each user as needed. + if (defined($password)) + { + $node->psql('postgres', + "SET password_encryption='$password_enc'; ALTER USER ssltestuser PASSWORD '$password';" + ); + $node->psql('postgres', + "SET password_encryption='$password_enc'; ALTER USER anotheruser PASSWORD '$password';" + ); + } + # enable logging etc. open my $conf, '>>', "$pgdata/postgresql.conf"; print $conf "fsync=off\n"; @@ -74,7 +124,7 @@ sub configure_test_server_for_ssl open my $sslconf, '>', "$pgdata/sslconfig.conf"; close $sslconf; -# Copy all server certificates and keys, and client root cert, to the data dir + # Copy all server certificates and keys, and client root cert, to the data dir copy_files("ssl/server-*.crt", $pgdata); copy_files("ssl/server-*.key", $pgdata); chmod(0600, glob "$pgdata/server-*.key") or die $!; @@ -86,7 +136,9 @@ sub configure_test_server_for_ssl $node->restart; # Change pg_hba after restart because hostssl requires ssl=on - configure_hba_for_ssl($node, $serverhost); + configure_hba_for_ssl($node, $serverhost, $authmethod); + + return; } # Change the configuration to use given server cert file, and reload @@ -98,9 +150,6 @@ sub switch_server_cert my $cafile = $_[2] || "root+client_ca"; my $pgdata = $node->data_dir; - note - "reloading server with certfile \"$certfile\" and cafile \"$cafile\""; - open my $sslconf, '>', "$pgdata/sslconfig.conf"; print $sslconf "ssl=on\n"; print $sslconf "ssl_ca_file='$cafile.crt'\n"; @@ -109,29 +158,32 @@ sub switch_server_cert print $sslconf "ssl_crl_file='root+client.crl'\n"; close $sslconf; - $node->reload; + $node->restart; + return; } sub configure_hba_for_ssl { - my $node = $_[0]; - my $serverhost = $_[1]; - my $pgdata = $node->data_dir; - - # Only accept SSL connections from localhost. Our tests don't depend on this - # but seems best to keep it as narrow as possible for security reasons. - # - # When connecting to certdb, also check the client certificate. + my ($node, $serverhost, $authmethod) = @_; + my $pgdata = $node->data_dir; + + # Only accept SSL connections from localhost. Our tests don't depend on this + # but seems best to keep it as narrow as possible for security reasons. + # + # When connecting to certdb, also check the client certificate. open my $hba, '>', "$pgdata/pg_hba.conf"; print $hba -"# TYPE DATABASE USER ADDRESS METHOD\n"; + "# TYPE DATABASE USER ADDRESS METHOD\n"; print $hba -"hostssl trustdb ssltestuser $serverhost/32 trust\n"; + "hostssl trustdb all $serverhost/32 $authmethod\n"; print $hba -"hostssl trustdb ssltestuser ::1/128 trust\n"; + "hostssl trustdb all ::1/128 $authmethod\n"; print $hba -"hostssl certdb ssltestuser $serverhost/32 cert\n"; + "hostssl certdb all $serverhost/32 cert\n"; print $hba -"hostssl certdb ssltestuser ::1/128 cert\n"; + "hostssl certdb all ::1/128 cert\n"; close $hba; + return; } + +1; diff --git a/src/test/ssl/ssl/.gitignore b/src/test/ssl/ssl/.gitignore index 10b74f0848..af753d4c7d 100644 --- a/src/test/ssl/ssl/.gitignore +++ b/src/test/ssl/ssl/.gitignore @@ -1,3 +1,3 @@ /*.old /new_certs_dir/ -/client_tmp.key +/client*_tmp.key diff --git a/src/test/ssl/ssl/server-password.key b/src/test/ssl/ssl/server-password.key new file mode 100644 index 0000000000..adcd38ab88 --- /dev/null +++ b/src/test/ssl/ssl/server-password.key @@ -0,0 +1,18 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-CBC,2FAEFD1C1B2C881C + +PGi9r3pm05iUwz5QbZik+ZNu0fHNaX8LJFZqpOhg0TV38csLtQ2PRjZ0Q/diBlVT +SD8JJnIvwPoIWXyMMTax/krFL0CpbFqgAzD4CEgfWxGNhwnMD1DkNaYp/UF/NfuF +7TqXomUlcH/pVaZlu7G0wrIo5rnjef70I7GEY2vwT5adSLsUBAgrs/u3MAAx/Wh4 +PkVxZELmyiH/8MdIevodjRcJrgIzRheEph39eHrWKgWeSbO0DEQK91vv3prICwo2 +w2iU0Zohf92QuquA2MKZWruCHb4A4HusUZf3Zc14Yueu/HyztSrHmFeBp0amlWep +/o6mx274XVj7IpanOPPM4qEhrF97LHdaSEPn9HwxvvV4GFJDNCVEBl4zuaHo0N8C +85GPazIxUWB3CB9PrtXduxeI22lwrIiUdmzA68EXHD7Wg8R90397MNMOomLgfNcu +rXarrTXmTNgOa20hc1Ue5AXg9fVS9V/5GP4Dn9SX/CdaE1rz0b73N/ViQzVrS9Ne +n04qYPbnf+MQmFWnzMXctZbYG6jDCbuGFIGP4i/LG+wOE8Rntu8Re9re+HANu5VJ +Ht20wYOGZIpNwo4YenxvPeTTlbB0Qcma2lnw2bt19owpNQVIeTnRQXxZs3/Y3a+A ++/B8VvIkQ0u0EpnSVLBetEmJqtOQvBz7c4Z+0Cl+DL1bTqrDn54MxUBap6dgU+/1 +R6pxx1F0ZTtQauVmO8n3rWKwOGG5NeMhf4iId2JWpw39VtRk8LNtnGUbUAbL5znY +rkUVyJstQg6U6kNTgDWQ1nBxCzlRz2xpHyghnyxLkMpW5ECpmwwLDQ== +-----END RSA PRIVATE KEY----- diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl index 32df273929..2b875a3c95 100644 --- a/src/test/ssl/t/001_ssltests.pl +++ b/src/test/ssl/t/001_ssltests.pl @@ -2,10 +2,19 @@ use warnings; use PostgresNode; use TestLib; -use Test::More tests => 40; +use Test::More; use ServerSetup; use File::Copy; +if ($ENV{with_openssl} eq 'yes') +{ + plan tests => 65; +} +else +{ + plan skip_all => 'SSL not supported by this build'; +} + #### Some configuration # This is the hostname used to connect to the server. This cannot be a @@ -13,50 +22,23 @@ # postgresql-ssl-regression.test. my $SERVERHOSTADDR = '127.0.0.1'; -# Define a couple of helper functions to test connecting to the server. - +# Allocation of base connection string shared among multiple tests. my $common_connstr; -sub run_test_psql -{ - my $connstr = $_[0]; - my $logstring = $_[1]; - - my $cmd = [ - 'psql', '-X', '-A', '-t', '-c', "SELECT 'connected with $connstr'", - '-d', "$connstr" ]; - - my $result = run_log($cmd); - return $result; -} - -# -# The first argument is a (part of a) connection string, and it's also printed -# out as the test case name. It is appended to $common_connstr global variable, -# which also contains a libpq connection string. -sub test_connect_ok -{ - my $connstr = $_[0]; - - my $result = - run_test_psql("$common_connstr $connstr", "(should succeed)"); - ok($result, $connstr); -} - -sub test_connect_fails -{ - my $connstr = $_[0]; - - my $result = run_test_psql("$common_connstr $connstr", "(should fail)"); - ok(!$result, "$connstr (should fail)"); -} - # The client's private key must not be world-readable, so take a copy # of the key stored in the code tree and update its permissions. copy("ssl/client.key", "ssl/client_tmp.key"); chmod 0600, "ssl/client_tmp.key"; +copy("ssl/client-revoked.key", "ssl/client-revoked_tmp.key"); +chmod 0600, "ssl/client-revoked_tmp.key"; -#### Part 0. Set up the server. +# Also make a copy of that explicitly world-readable. We can't +# necessarily rely on the file in the source tree having those +# permissions. +copy("ssl/client.key", "ssl/client_wrongperms_tmp.key"); +chmod 0644, "ssl/client_wrongperms_tmp.key"; + +#### Set up the server. note "setting up data directory"; my $node = get_new_node('master'); @@ -67,10 +49,40 @@ sub test_connect_fails $ENV{PGHOST} = $node->host; $ENV{PGPORT} = $node->port; $node->start; -configure_test_server_for_ssl($node, $SERVERHOSTADDR); -switch_server_cert($node, 'server-cn-only'); -### Part 1. Run client-side tests. +# Run this before we lock down access below. +my $result = $node->safe_psql('postgres', "SHOW ssl_library"); +is($result, 'OpenSSL', 'ssl_library parameter'); + +configure_test_server_for_ssl($node, $SERVERHOSTADDR, 'trust'); + +note "testing password-protected keys"; + +open my $sslconf, '>', $node->data_dir . "/sslconfig.conf"; +print $sslconf "ssl=on\n"; +print $sslconf "ssl_cert_file='server-cn-only.crt'\n"; +print $sslconf "ssl_key_file='server-password.key'\n"; +print $sslconf "ssl_passphrase_command='echo wrongpassword'\n"; +close $sslconf; + +command_fails( + [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ], + 'restart fails with password-protected key file with wrong password'); +$node->_update_pid(0); + +open $sslconf, '>', $node->data_dir . "/sslconfig.conf"; +print $sslconf "ssl=on\n"; +print $sslconf "ssl_cert_file='server-cn-only.crt'\n"; +print $sslconf "ssl_key_file='server-password.key'\n"; +print $sslconf "ssl_passphrase_command='echo secret1'\n"; +close $sslconf; + +command_ok( + [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ], + 'restart succeeds with password-protected key file'); +$node->_update_pid(1); + +### Run client-side tests. ### ### Test that libpq accepts/rejects the connection correctly, depending ### on sslmode and whether the server's certificate looks correct. No @@ -78,160 +90,281 @@ sub test_connect_fails note "running client tests"; +switch_server_cert($node, 'server-cn-only'); + $common_connstr = -"user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test"; + "user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test"; -# The server should not accept non-SSL connections -note "test that the server doesn't accept non-SSL connections"; -test_connect_fails("sslmode=disable"); +# The server should not accept non-SSL connections. +test_connect_fails( + $common_connstr, "sslmode=disable", + qr/\Qno pg_hba.conf entry\E/, + "server doesn't accept non-SSL connections"); # Try without a root cert. In sslmode=require, this should work. In verify-ca -# or verify-full mode it should fail -note "connect without server root cert"; -test_connect_ok("sslrootcert=invalid sslmode=require"); -test_connect_fails("sslrootcert=invalid sslmode=verify-ca"); -test_connect_fails("sslrootcert=invalid sslmode=verify-full"); - -# Try with wrong root cert, should fail. (we're using the client CA as the -# root, but the server's key is signed by the server CA) -note "connect without wrong server root cert"; -test_connect_fails("sslrootcert=ssl/client_ca.crt sslmode=require"); -test_connect_fails("sslrootcert=ssl/client_ca.crt sslmode=verify-ca"); -test_connect_fails("sslrootcert=ssl/client_ca.crt sslmode=verify-full"); +# or verify-full mode it should fail. +test_connect_ok( + $common_connstr, + "sslrootcert=invalid sslmode=require", + "connect without server root cert sslmode=require"); +test_connect_fails( + $common_connstr, + "sslrootcert=invalid sslmode=verify-ca", + qr/root certificate file "invalid" does not exist/, + "connect without server root cert sslmode=verify-ca"); +test_connect_fails( + $common_connstr, + "sslrootcert=invalid sslmode=verify-full", + qr/root certificate file "invalid" does not exist/, + "connect without server root cert sslmode=verify-full"); + +# Try with wrong root cert, should fail. (We're using the client CA as the +# root, but the server's key is signed by the server CA.) +test_connect_fails($common_connstr, + "sslrootcert=ssl/client_ca.crt sslmode=require", + qr/SSL error/, "connect with wrong server root cert sslmode=require"); +test_connect_fails($common_connstr, + "sslrootcert=ssl/client_ca.crt sslmode=verify-ca", + qr/SSL error/, "connect with wrong server root cert sslmode=verify-ca"); +test_connect_fails($common_connstr, + "sslrootcert=ssl/client_ca.crt sslmode=verify-full", + qr/SSL error/, "connect with wrong server root cert sslmode=verify-full"); # Try with just the server CA's cert. This fails because the root file # must contain the whole chain up to the root CA. -note "connect with server CA cert, without root CA"; -test_connect_fails("sslrootcert=ssl/server_ca.crt sslmode=verify-ca"); +test_connect_fails($common_connstr, + "sslrootcert=ssl/server_ca.crt sslmode=verify-ca", + qr/SSL error/, "connect with server CA cert, without root CA"); # And finally, with the correct root cert. -note "connect with correct server CA cert file"; -test_connect_ok("sslrootcert=ssl/root+server_ca.crt sslmode=require"); -test_connect_ok("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca"); -test_connect_ok("sslrootcert=ssl/root+server_ca.crt sslmode=verify-full"); +test_connect_ok( + $common_connstr, + "sslrootcert=ssl/root+server_ca.crt sslmode=require", + "connect with correct server CA cert file sslmode=require"); +test_connect_ok( + $common_connstr, + "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca", + "connect with correct server CA cert file sslmode=verify-ca"); +test_connect_ok( + $common_connstr, + "sslrootcert=ssl/root+server_ca.crt sslmode=verify-full", + "connect with correct server CA cert file sslmode=verify-full"); # Test with cert root file that contains two certificates. The client should # be able to pick the right one, regardless of the order in the file. -test_connect_ok("sslrootcert=ssl/both-cas-1.crt sslmode=verify-ca"); -test_connect_ok("sslrootcert=ssl/both-cas-2.crt sslmode=verify-ca"); +test_connect_ok( + $common_connstr, + "sslrootcert=ssl/both-cas-1.crt sslmode=verify-ca", + "cert root file that contains two certificates, order 1"); +test_connect_ok( + $common_connstr, + "sslrootcert=ssl/both-cas-2.crt sslmode=verify-ca", + "cert root file that contains two certificates, order 2"); -note "testing sslcrl option with a non-revoked cert"; +# CRL tests # Invalid CRL filename is the same as no CRL, succeeds test_connect_ok( - "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=invalid"); + $common_connstr, + "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=invalid", + "sslcrl option with invalid file name"); # A CRL belonging to a different CA is not accepted, fails test_connect_fails( -"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/client.crl"); + $common_connstr, + "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/client.crl", + qr/SSL error/, + "CRL belonging to a different CA"); # With the correct CRL, succeeds (this cert is not revoked) test_connect_ok( -"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl" -); + $common_connstr, + "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl", + "CRL with a non-revoked cert"); # Check that connecting with verify-full fails, when the hostname doesn't # match the hostname in the server's certificate. -note "test mismatch between hostname and server certificate"; $common_connstr = -"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full"; + "user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR"; -test_connect_ok("sslmode=require host=wronghost.test"); -test_connect_ok("sslmode=verify-ca host=wronghost.test"); -test_connect_fails("sslmode=verify-full host=wronghost.test"); +test_connect_ok( + $common_connstr, + "sslmode=require host=wronghost.test", + "mismatch between host name and server certificate sslmode=require"); +test_connect_ok( + $common_connstr, + "sslmode=verify-ca host=wronghost.test", + "mismatch between host name and server certificate sslmode=verify-ca"); +test_connect_fails( + $common_connstr, + "sslmode=verify-full host=wronghost.test", + qr/\Qserver certificate for "common-name.pg-ssltest.test" does not match host name "wronghost.test"\E/, + "mismatch between host name and server certificate sslmode=verify-full"); # Test Subject Alternative Names. switch_server_cert($node, 'server-multiple-alt-names'); -note "test hostname matching with X.509 Subject Alternative Names"; $common_connstr = -"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full"; + "user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full"; -test_connect_ok("host=dns1.alt-name.pg-ssltest.test"); -test_connect_ok("host=dns2.alt-name.pg-ssltest.test"); -test_connect_ok("host=foo.wildcard.pg-ssltest.test"); +test_connect_ok( + $common_connstr, + "host=dns1.alt-name.pg-ssltest.test", + "host name matching with X.509 Subject Alternative Names 1"); +test_connect_ok( + $common_connstr, + "host=dns2.alt-name.pg-ssltest.test", + "host name matching with X.509 Subject Alternative Names 2"); +test_connect_ok( + $common_connstr, + "host=foo.wildcard.pg-ssltest.test", + "host name matching with X.509 Subject Alternative Names wildcard"); -test_connect_fails("host=wronghost.alt-name.pg-ssltest.test"); -test_connect_fails("host=deep.subdomain.wildcard.pg-ssltest.test"); +test_connect_fails( + $common_connstr, + "host=wronghost.alt-name.pg-ssltest.test", + qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 2 other names) does not match host name "wronghost.alt-name.pg-ssltest.test"\E/, + "host name not matching with X.509 Subject Alternative Names"); +test_connect_fails( + $common_connstr, + "host=deep.subdomain.wildcard.pg-ssltest.test", + qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 2 other names) does not match host name "deep.subdomain.wildcard.pg-ssltest.test"\E/, + "host name not matching with X.509 Subject Alternative Names wildcard"); # Test certificate with a single Subject Alternative Name. (this gives a # slightly different error message, that's all) switch_server_cert($node, 'server-single-alt-name'); -note "test hostname matching with a single X.509 Subject Alternative Name"; $common_connstr = -"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full"; + "user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full"; -test_connect_ok("host=single.alt-name.pg-ssltest.test"); +test_connect_ok( + $common_connstr, + "host=single.alt-name.pg-ssltest.test", + "host name matching with a single X.509 Subject Alternative Name"); -test_connect_fails("host=wronghost.alt-name.pg-ssltest.test"); -test_connect_fails("host=deep.subdomain.wildcard.pg-ssltest.test"); +test_connect_fails( + $common_connstr, + "host=wronghost.alt-name.pg-ssltest.test", + qr/\Qserver certificate for "single.alt-name.pg-ssltest.test" does not match host name "wronghost.alt-name.pg-ssltest.test"\E/, + "host name not matching with a single X.509 Subject Alternative Name"); +test_connect_fails( + $common_connstr, + "host=deep.subdomain.wildcard.pg-ssltest.test", + qr/\Qserver certificate for "single.alt-name.pg-ssltest.test" does not match host name "deep.subdomain.wildcard.pg-ssltest.test"\E/, + "host name not matching with a single X.509 Subject Alternative Name wildcard" +); # Test server certificate with a CN and SANs. Per RFCs 2818 and 6125, the CN # should be ignored when the certificate has both. switch_server_cert($node, 'server-cn-and-alt-names'); -note "test certificate with both a CN and SANs"; $common_connstr = -"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full"; + "user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full"; -test_connect_ok("host=dns1.alt-name.pg-ssltest.test"); -test_connect_ok("host=dns2.alt-name.pg-ssltest.test"); -test_connect_fails("host=common-name.pg-ssltest.test"); +test_connect_ok( + $common_connstr, + "host=dns1.alt-name.pg-ssltest.test", + "certificate with both a CN and SANs 1"); +test_connect_ok( + $common_connstr, + "host=dns2.alt-name.pg-ssltest.test", + "certificate with both a CN and SANs 2"); +test_connect_fails( + $common_connstr, + "host=common-name.pg-ssltest.test", + qr/\Qserver certificate for "dns1.alt-name.pg-ssltest.test" (and 1 other name) does not match host name "common-name.pg-ssltest.test"\E/, + "certificate with both a CN and SANs ignores CN"); # Finally, test a server certificate that has no CN or SANs. Of course, that's # not a very sensible certificate, but libpq should handle it gracefully. switch_server_cert($node, 'server-no-names'); $common_connstr = -"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR"; + "user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR"; -test_connect_ok("sslmode=verify-ca host=common-name.pg-ssltest.test"); -test_connect_fails("sslmode=verify-full host=common-name.pg-ssltest.test"); +test_connect_ok( + $common_connstr, + "sslmode=verify-ca host=common-name.pg-ssltest.test", + "server certificate without CN or SANs sslmode=verify-ca"); +test_connect_fails( + $common_connstr, + "sslmode=verify-full host=common-name.pg-ssltest.test", + qr/could not get server's host name from server certificate/, + "server certificate without CN or SANs sslmode=verify-full"); # Test that the CRL works -note "testing client-side CRL"; switch_server_cert($node, 'server-revoked'); $common_connstr = -"user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test"; + "user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test"; # Without the CRL, succeeds. With it, fails. -test_connect_ok("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca"); +test_connect_ok( + $common_connstr, + "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca", + "connects without client-side CRL"); test_connect_fails( -"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl" -); + $common_connstr, + "sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl", + qr/SSL error/, + "does not connect with client-side CRL"); -### Part 2. Server-side tests. +### Server-side tests. ### ### Test certificate authorization. -note "testing certificate authorization"; +note "running server tests"; + $common_connstr = -"sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR"; + "sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR"; # no client cert -test_connect_fails("user=ssltestuser sslcert=invalid"); +test_connect_fails( + $common_connstr, + "user=ssltestuser sslcert=invalid", + qr/connection requires a valid client certificate/, + "certificate authorization fails without client cert"); # correct client cert test_connect_ok( - "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key"); + $common_connstr, + "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key", + "certificate authorization succeeds with correct client cert"); + +# client key with wrong permissions +test_connect_fails( + $common_connstr, + "user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_wrongperms_tmp.key", + qr!\Qprivate key file "ssl/client_wrongperms_tmp.key" has group or world access\E!, + "certificate authorization fails because of file permissions"); # client cert belonging to another user test_connect_fails( - "user=anotheruser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key"); + $common_connstr, + "user=anotheruser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key", + qr/certificate authentication failed for user "anotheruser"/, + "certificate authorization fails with client cert belonging to another user" +); # revoked client cert test_connect_fails( -"user=ssltestuser sslcert=ssl/client-revoked.crt sslkey=ssl/client-revoked.key" -); + $common_connstr, + "user=ssltestuser sslcert=ssl/client-revoked.crt sslkey=ssl/client-revoked_tmp.key", + qr/SSL error/, + "certificate authorization fails with revoked client cert"); # intermediate client_ca.crt is provided by client, and isn't in server's ssl_ca_file switch_server_cert($node, 'server-cn-only', 'root_ca'); $common_connstr = -"user=ssltestuser dbname=certdb sslkey=ssl/client_tmp.key sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR"; + "user=ssltestuser dbname=certdb sslkey=ssl/client_tmp.key sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR"; -test_connect_ok("sslmode=require sslcert=ssl/client+client_ca.crt"); -test_connect_fails("sslmode=require sslcert=ssl/client.crt"); +test_connect_ok( + $common_connstr, + "sslmode=require sslcert=ssl/client+client_ca.crt", + "intermediate client certificate is provided by client"); +test_connect_fails($common_connstr, "sslmode=require sslcert=ssl/client.crt", + qr/SSL error/, "intermediate client certificate is missing"); # clean up -unlink "ssl/client_tmp.key"; +unlink("ssl/client_tmp.key", "ssl/client_wrongperms_tmp.key", + "ssl/client-revoked_tmp.key"); diff --git a/src/test/ssl/t/002_scram.pl b/src/test/ssl/t/002_scram.pl new file mode 100644 index 0000000000..b460a7fa8a --- /dev/null +++ b/src/test/ssl/t/002_scram.pl @@ -0,0 +1,48 @@ +# Test SCRAM authentication and TLS channel binding types + +use strict; +use warnings; +use PostgresNode; +use TestLib; +use Test::More; +use ServerSetup; +use File::Copy; + +if ($ENV{with_openssl} ne 'yes') +{ + plan skip_all => 'SSL not supported by this build'; +} + +my $number_of_tests = 1; + +# This is the hostname used to connect to the server. +my $SERVERHOSTADDR = '127.0.0.1'; + +# Allocation of base connection string shared among multiple tests. +my $common_connstr; + +# Set up the server. + +note "setting up data directory"; +my $node = get_new_node('master'); +$node->init; + +# PGHOST is enforced here to set up the node, subsequent connections +# will use a dedicated connection string. +$ENV{PGHOST} = $node->host; +$ENV{PGPORT} = $node->port; +$node->start; + +# Configure server for SSL connections, with password handling. +configure_test_server_for_ssl($node, $SERVERHOSTADDR, "scram-sha-256", + "pass", "scram-sha-256"); +switch_server_cert($node, 'server-cn-only'); +$ENV{PGPASSWORD} = "pass"; +$common_connstr = + "user=ssltestuser dbname=trustdb sslmode=require hostaddr=$SERVERHOSTADDR"; + +# Default settings +test_connect_ok($common_connstr, '', + "Basic SCRAM authentication with SSL"); + +done_testing($number_of_tests); diff --git a/src/test/subscription/Makefile b/src/test/subscription/Makefile index d423ff3662..0f3d2098ad 100644 --- a/src/test/subscription/Makefile +++ b/src/test/subscription/Makefile @@ -2,7 +2,7 @@ # # Makefile for src/test/subscription # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/test/subscription/Makefile @@ -18,5 +18,8 @@ EXTRA_INSTALL = contrib/hstore check: $(prove_check) +installcheck: + $(prove_installcheck) + clean distclean maintainer-clean: rm -rf tmp_check diff --git a/src/test/subscription/README b/src/test/subscription/README index e9e93755b7..fb5382e120 100644 --- a/src/test/subscription/README +++ b/src/test/subscription/README @@ -8,9 +8,16 @@ This directory contains a test suite for subscription/logical replication. Running the tests ================= - make check +NOTE: You must have given the --enable-tap-tests argument to configure. -NOTE: This creates a temporary installation, and some tests may -create one or multiple nodes, for the purpose of the tests. +Run + make check +or + make installcheck +You can use "make installcheck" if you previously did "make install" +(including installing the hstore extension). In that case, the code +in the installation tree is tested. With "make check", a temporary +installation tree is built from the current sources and then tested. -NOTE: This requires the --enable-tap-tests argument to configure. +Either way, this test initializes, starts, and stops several test Postgres +clusters. diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl index 0136c79d4b..d94458e00e 100644 --- a/src/test/subscription/t/001_rep_changes.pl +++ b/src/test/subscription/t/001_rep_changes.pl @@ -3,7 +3,7 @@ use warnings; use PostgresNode; use TestLib; -use Test::More tests => 16; +use Test::More tests => 17; # Initialize publisher node my $node_publisher = get_new_node('publisher'); @@ -31,6 +31,9 @@ "CREATE TABLE tab_mixed (a int primary key, b text)"); $node_publisher->safe_psql('postgres', "INSERT INTO tab_mixed (a, b) VALUES (1, 'foo')"); +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab_include (a int, b text, CONSTRAINT covering PRIMARY KEY(a) INCLUDE(b))" +); # Setup structure on subscriber $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_notrep (a int)"); @@ -44,31 +47,32 @@ $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_mixed (c text, b text, a int primary key)"); +# replication of the table with included index +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab_include (a int, b text, CONSTRAINT covering PRIMARY KEY(a) INCLUDE(b))" +); + # Setup logical replication my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; $node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub"); $node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub_ins_only WITH (publish = insert)"); $node_publisher->safe_psql('postgres', -"ALTER PUBLICATION tap_pub ADD TABLE tab_rep, tab_full, tab_full2, tab_mixed" + "ALTER PUBLICATION tap_pub ADD TABLE tab_rep, tab_full, tab_full2, tab_mixed, tab_include" ); $node_publisher->safe_psql('postgres', "ALTER PUBLICATION tap_pub_ins_only ADD TABLE tab_ins"); my $appname = 'tap_sub'; $node_subscriber->safe_psql('postgres', -"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub, tap_pub_ins_only" + "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub, tap_pub_ins_only" ); -# Wait for subscriber to finish initialization -my $caughtup_query = -"SELECT pg_current_wal_lsn() <= replay_lsn FROM pg_stat_replication WHERE application_name = '$appname';"; -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); # Also wait for initial table sync to finish my $synced_query = -"SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');"; + "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');"; $node_subscriber->poll_query_until('postgres', $synced_query) or die "Timed out while waiting for subscriber to synchronize data"; @@ -93,8 +97,13 @@ $node_publisher->safe_psql('postgres', "INSERT INTO tab_mixed VALUES (2, 'bar')"); -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->safe_psql('postgres', + "INSERT INTO tab_include SELECT generate_series(1,50)"); +$node_publisher->safe_psql('postgres', + "DELETE FROM tab_include WHERE a > 20"); +$node_publisher->safe_psql('postgres', "UPDATE tab_include SET a = -a"); + +$node_publisher->wait_for_catchup($appname); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins"); @@ -109,6 +118,11 @@ is( $result, qq(|foo|1 |bar|2), 'check replicated changes with different column order'); +$result = $node_subscriber->safe_psql('postgres', + "SELECT count(*), min(a), max(a) FROM tab_include"); +is($result, qq(20|-20|-1), + 'check replicated changes with primary key index with included columns'); + # insert some duplicate rows $node_publisher->safe_psql('postgres', "INSERT INTO tab_full SELECT generate_series(1,10)"); @@ -132,9 +146,7 @@ $node_publisher->safe_psql('postgres', "UPDATE tab_full2 SET x = 'bb' WHERE x = 'b'"); -# Wait for subscription to catch up -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_full"); @@ -156,28 +168,32 @@ "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';" ); $node_subscriber->safe_psql('postgres', -"ALTER SUBSCRIPTION tap_sub CONNECTION 'application_name=$appname $publisher_connstr'" + "ALTER SUBSCRIPTION tap_sub CONNECTION 'application_name=$appname $publisher_connstr'" ); $node_publisher->poll_query_until('postgres', -"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';" + "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';" ) or die "Timed out while waiting for apply to restart"; $oldpid = $node_publisher->safe_psql('postgres', "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';" ); $node_subscriber->safe_psql('postgres', -"ALTER SUBSCRIPTION tap_sub SET PUBLICATION tap_pub_ins_only WITH (copy_data = false)" + "ALTER SUBSCRIPTION tap_sub SET PUBLICATION tap_pub_ins_only WITH (copy_data = false)" ); $node_publisher->poll_query_until('postgres', -"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';" + "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';" ) or die "Timed out while waiting for apply to restart"; $node_publisher->safe_psql('postgres', "INSERT INTO tab_ins SELECT generate_series(1001,1100)"); $node_publisher->safe_psql('postgres', "DELETE FROM tab_rep"); -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +# Restart the publisher and check the state of the subscriber which +# should be in a streaming state after catching up. +$node_publisher->stop('fast'); +$node_publisher->start; + +$node_publisher->wait_for_catchup($appname); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins"); @@ -200,8 +216,7 @@ ); $node_publisher->safe_psql('postgres', "INSERT INTO tab_full VALUES(0)"); -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); # note that data are different on provider and subscriber $result = $node_subscriber->safe_psql('postgres', @@ -220,7 +235,7 @@ $node_subscriber->safe_psql('postgres', "ALTER SUBSCRIPTION tap_sub RENAME TO tap_sub_renamed"); $node_publisher->poll_query_until('postgres', -"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';" + "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';" ) or die "Timed out while waiting for apply to restart"; # check all the cleanup diff --git a/src/test/subscription/t/002_types.pl b/src/test/subscription/t/002_types.pl index 3ca027ecb4..30a3841bca 100644 --- a/src/test/subscription/t/002_types.pl +++ b/src/test/subscription/t/002_types.pl @@ -4,7 +4,7 @@ use warnings; use PostgresNode; use TestLib; -use Test::More tests => 3; +use Test::More tests => 4; # Initialize publisher node my $node_publisher = get_new_node('publisher'); @@ -90,7 +90,13 @@ CREATE TABLE public.tst_hstore ( a INTEGER PRIMARY KEY, b public.hstore - );); + ); + + SET check_function_bodies=off; + CREATE FUNCTION public.monot_incr(int) RETURNS bool LANGUAGE sql + AS ' select \$1 > max(a) from public.tst_dom_constr; '; + CREATE DOMAIN monot_int AS int CHECK (monot_incr(VALUE)); + CREATE TABLE public.tst_dom_constr (a monot_int);); # Setup structure on both nodes $node_publisher->safe_psql('postgres', $ddl); @@ -103,18 +109,14 @@ my $appname = 'tap_sub'; $node_subscriber->safe_psql('postgres', -"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (slot_name = tap_sub_slot)" + "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (slot_name = tap_sub_slot)" ); -# Wait for subscriber to finish initialization -my $caughtup_query = -"SELECT pg_current_wal_lsn() <= replay_lsn FROM pg_stat_replication WHERE application_name = '$appname';"; -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); # Wait for initial sync to finish as well my $synced_query = -"SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('s', 'r');"; + "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('s', 'r');"; $node_subscriber->poll_query_until('postgres', $synced_query) or die "Timed out while waiting for subscriber to synchronize data"; @@ -244,10 +246,12 @@ (2, '"zzz"=>"foo"'), (3, '"123"=>"321"'), (4, '"yellow horse"=>"moaned"'); + + -- tst_dom_constr + INSERT INTO tst_dom_constr VALUES (10); )); -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); # Check the data on subscriber my $result = $node_subscriber->safe_psql( @@ -368,8 +372,7 @@ UPDATE tst_hstore SET b = '"also"=>"updated"' WHERE a = 3; )); -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); # Check the data on subscriber $result = $node_subscriber->safe_psql( @@ -489,8 +492,7 @@ DELETE FROM tst_hstore WHERE a = 1; )); -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); # Check the data on subscriber $result = $node_subscriber->safe_psql( @@ -548,5 +550,15 @@ 4|"yellow horse"=>"moaned"', 'check replicated deletes on subscriber'); +# Test a domain with a constraint backed by a SQL-language function, +# which needs an active snapshot in order to operate. +$node_publisher->safe_psql('postgres', "INSERT INTO tst_dom_constr VALUES (11)"); + +$node_publisher->wait_for_catchup($appname); + +$result = + $node_subscriber->safe_psql('postgres', "SELECT sum(a) FROM tst_dom_constr"); +is($result, '21', 'sql-function constraint on domain'); + $node_subscriber->stop('fast'); $node_publisher->stop('fast'); diff --git a/src/test/subscription/t/003_constraints.pl b/src/test/subscription/t/003_constraints.pl index 06863aef84..a5b548ecee 100644 --- a/src/test/subscription/t/003_constraints.pl +++ b/src/test/subscription/t/003_constraints.pl @@ -1,4 +1,4 @@ -# Basic logical replication test +# This test checks that constraints work on subscriber use strict; use warnings; use PostgresNode; @@ -19,14 +19,14 @@ $node_publisher->safe_psql('postgres', "CREATE TABLE tab_fk (bid int PRIMARY KEY);"); $node_publisher->safe_psql('postgres', -"CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));" + "CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));" ); # Setup structure on subscriber $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_fk (bid int PRIMARY KEY);"); $node_subscriber->safe_psql('postgres', -"CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));" + "CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));" ); # Setup logical replication @@ -36,22 +36,17 @@ my $appname = 'tap_sub'; $node_subscriber->safe_psql('postgres', -"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)" + "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)" ); -# Wait for subscriber to finish initialization -my $caughtup_query = -"SELECT pg_current_wal_lsn() <= replay_lsn FROM pg_stat_replication WHERE application_name = '$appname';"; -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); $node_publisher->safe_psql('postgres', "INSERT INTO tab_fk (bid) VALUES (1);"); $node_publisher->safe_psql('postgres', "INSERT INTO tab_fk_ref (id, bid) VALUES (1, 1);"); -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); # Check data on subscriber my $result = $node_subscriber->safe_psql('postgres', @@ -69,8 +64,7 @@ $node_publisher->safe_psql('postgres', "INSERT INTO tab_fk_ref (id, bid) VALUES (2, 2);"); -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); # FK is not enforced on subscriber $result = $node_subscriber->safe_psql('postgres', @@ -104,8 +98,7 @@ BEGIN $node_publisher->safe_psql('postgres', "INSERT INTO tab_fk_ref (id, bid) VALUES (10, 10);"); -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); # The row should be skipped on subscriber $result = $node_subscriber->safe_psql('postgres', diff --git a/src/test/subscription/t/004_sync.pl b/src/test/subscription/t/004_sync.pl index 05fd2f0e6c..f8b8f1a3d2 100644 --- a/src/test/subscription/t/004_sync.pl +++ b/src/test/subscription/t/004_sync.pl @@ -34,18 +34,14 @@ my $appname = 'tap_sub'; $node_subscriber->safe_psql('postgres', -"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub" + "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub" ); -# Wait for subscriber to finish initialization -my $caughtup_query = -"SELECT pg_current_wal_lsn() <= replay_lsn FROM pg_stat_replication WHERE application_name = '$appname';"; -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); # Also wait for initial table sync to finish my $synced_query = -"SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');"; + "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');"; $node_subscriber->poll_query_until('postgres', $synced_query) or die "Timed out while waiting for subscriber to synchronize data"; @@ -61,7 +57,7 @@ # recreate the subscription, it will try to do initial copy $node_subscriber->safe_psql('postgres', -"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub" + "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub" ); # but it will be stuck on data copy as it will fail on constraint @@ -83,12 +79,12 @@ # now check another subscription for the same node pair $node_subscriber->safe_psql('postgres', -"CREATE SUBSCRIPTION tap_sub2 CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)" + "CREATE SUBSCRIPTION tap_sub2 CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)" ); # wait for it to start $node_subscriber->poll_query_until('postgres', -"SELECT pid IS NOT NULL FROM pg_stat_subscription WHERE subname = 'tap_sub2' AND relid IS NULL" + "SELECT pid IS NOT NULL FROM pg_stat_subscription WHERE subname = 'tap_sub2' AND relid IS NULL" ) or die "Timed out while waiting for subscriber to start"; # and drop both subscriptions @@ -105,7 +101,7 @@ # recreate the subscription again $node_subscriber->safe_psql('postgres', -"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub" + "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub" ); # and wait for data sync to finish again @@ -120,13 +116,11 @@ # add new table on subscriber $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_rep_next (a int)"); -# setup structure with existing data on pubisher +# setup structure with existing data on publisher $node_publisher->safe_psql('postgres', "CREATE TABLE tab_rep_next (a) AS SELECT generate_series(1,10)"); -# Wait for subscription to catch up -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next"); @@ -149,9 +143,7 @@ $node_publisher->safe_psql('postgres', "INSERT INTO tab_rep_next SELECT generate_series(1,10)"); -# Wait for subscription to catch up -$node_publisher->poll_query_until('postgres', $caughtup_query) - or die "Timed out while waiting for subscriber to catch up"; +$node_publisher->wait_for_catchup($appname); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next"); diff --git a/src/test/subscription/t/005_encoding.pl b/src/test/subscription/t/005_encoding.pl index 26a40c0b7f..1977aa5cfe 100644 --- a/src/test/subscription/t/005_encoding.pl +++ b/src/test/subscription/t/005_encoding.pl @@ -5,15 +5,6 @@ use TestLib; use Test::More tests => 1; -sub wait_for_caught_up -{ - my ($node, $appname) = @_; - - $node->poll_query_until('postgres', -"SELECT pg_current_wal_lsn() <= replay_lsn FROM pg_stat_replication WHERE application_name = '$appname';" - ) or die "Timed out while waiting for subscriber to catch up"; -} - my $node_publisher = get_new_node('publisher'); $node_publisher->init( allows_streaming => 'logical', @@ -36,15 +27,21 @@ sub wait_for_caught_up $node_publisher->safe_psql('postgres', "CREATE PUBLICATION mypub FOR ALL TABLES;"); $node_subscriber->safe_psql('postgres', -"CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;" + "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;" ); -wait_for_caught_up($node_publisher, $appname); +$node_publisher->wait_for_catchup($appname); + +# Wait for initial sync to finish as well +my $synced_query = + "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('s', 'r');"; +$node_subscriber->poll_query_until('postgres', $synced_query) + or die "Timed out while waiting for subscriber to synchronize data"; $node_publisher->safe_psql('postgres', q{INSERT INTO test1 VALUES (1, E'Mot\xc3\xb6rhead')}); # hand-rolled UTF-8 -wait_for_caught_up($node_publisher, $appname); +$node_publisher->wait_for_catchup($appname); is( $node_subscriber->safe_psql( 'postgres', q{SELECT a FROM test1 WHERE b = E'Mot\xf6rhead'} diff --git a/src/test/subscription/t/006_rewrite.pl b/src/test/subscription/t/006_rewrite.pl new file mode 100644 index 0000000000..e470c071d2 --- /dev/null +++ b/src/test/subscription/t/006_rewrite.pl @@ -0,0 +1,66 @@ +# Test logical replication behavior with heap rewrites +use strict; +use warnings; +use PostgresNode; +use TestLib; +use Test::More tests => 2; + +my $node_publisher = get_new_node('publisher'); +$node_publisher->init(allows_streaming => 'logical'); +$node_publisher->start; + +my $node_subscriber = get_new_node('subscriber'); +$node_subscriber->init(allows_streaming => 'logical'); +$node_subscriber->start; + +my $ddl = "CREATE TABLE test1 (a int, b text);"; +$node_publisher->safe_psql('postgres', $ddl); +$node_subscriber->safe_psql('postgres', $ddl); + +my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; +my $appname = 'encoding_test'; + +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION mypub FOR ALL TABLES;"); +$node_subscriber->safe_psql('postgres', + "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;" +); + +$node_publisher->wait_for_catchup($appname); + +# Wait for initial sync to finish as well +my $synced_query = + "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('s', 'r');"; +$node_subscriber->poll_query_until('postgres', $synced_query) + or die "Timed out while waiting for subscriber to synchronize data"; + +$node_publisher->safe_psql('postgres', + q{INSERT INTO test1 (a, b) VALUES (1, 'one'), (2, 'two');}); + +$node_publisher->wait_for_catchup($appname); + +is( $node_subscriber->safe_psql('postgres', q{SELECT a, b FROM test1}), + qq(1|one +2|two), + 'initial data replicated to subscriber'); + +# DDL that causes a heap rewrite +my $ddl2 = "ALTER TABLE test1 ADD c int NOT NULL DEFAULT 0;"; +$node_subscriber->safe_psql('postgres', $ddl2); +$node_publisher->safe_psql('postgres', $ddl2); + +$node_publisher->wait_for_catchup($appname); + +$node_publisher->safe_psql('postgres', + q{INSERT INTO test1 (a, b, c) VALUES (3, 'three', 33);}); + +$node_publisher->wait_for_catchup($appname); + +is( $node_subscriber->safe_psql('postgres', q{SELECT a, b, c FROM test1}), + qq(1|one|0 +2|two|0 +3|three|33), + 'data replicated to subscriber'); + +$node_subscriber->stop; +$node_publisher->stop; diff --git a/src/test/subscription/t/007_ddl.pl b/src/test/subscription/t/007_ddl.pl new file mode 100644 index 0000000000..2697ee5c58 --- /dev/null +++ b/src/test/subscription/t/007_ddl.pl @@ -0,0 +1,43 @@ +# Test some logical replication DDL behavior +use strict; +use warnings; +use PostgresNode; +use TestLib; +use Test::More tests => 1; + +my $node_publisher = get_new_node('publisher'); +$node_publisher->init(allows_streaming => 'logical'); +$node_publisher->start; + +my $node_subscriber = get_new_node('subscriber'); +$node_subscriber->init(allows_streaming => 'logical'); +$node_subscriber->start; + +my $ddl = "CREATE TABLE test1 (a int, b text);"; +$node_publisher->safe_psql('postgres', $ddl); +$node_subscriber->safe_psql('postgres', $ddl); + +my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; +my $appname = 'replication_test'; + +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION mypub FOR ALL TABLES;"); +$node_subscriber->safe_psql('postgres', + "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;" +); + +$node_publisher->wait_for_catchup($appname); + +$node_subscriber->safe_psql( + 'postgres', q{ +BEGIN; +ALTER SUBSCRIPTION mysub DISABLE; +ALTER SUBSCRIPTION mysub SET (slot_name = NONE); +DROP SUBSCRIPTION mysub; +COMMIT; +}); + +pass "subscription disable and drop in same transaction did not hang"; + +$node_subscriber->stop; +$node_publisher->stop; diff --git a/src/test/subscription/t/008_diff_schema.pl b/src/test/subscription/t/008_diff_schema.pl new file mode 100644 index 0000000000..22b76f1b17 --- /dev/null +++ b/src/test/subscription/t/008_diff_schema.pl @@ -0,0 +1,93 @@ +# Test behavior with different schema on subscriber +use strict; +use warnings; +use PostgresNode; +use TestLib; +use Test::More tests => 4; + +# Create publisher node +my $node_publisher = get_new_node('publisher'); +$node_publisher->init(allows_streaming => 'logical'); +$node_publisher->start; + +# Create subscriber node +my $node_subscriber = get_new_node('subscriber'); +$node_subscriber->init(allows_streaming => 'logical'); +$node_subscriber->start; + +# Create some preexisting content on publisher +$node_publisher->safe_psql('postgres', + "CREATE TABLE test_tab (a int primary key, b varchar)"); +$node_publisher->safe_psql('postgres', + "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')"); + +# Setup structure on subscriber +$node_subscriber->safe_psql('postgres', + "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999, e int GENERATED BY DEFAULT AS IDENTITY)" +); + +# Setup logical replication +my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION tap_pub FOR TABLE test_tab"); + +my $appname = 'tap_sub'; +$node_subscriber->safe_psql('postgres', + "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub" +); + +$node_publisher->wait_for_catchup($appname); + +# Also wait for initial table sync to finish +my $synced_query = + "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');"; +$node_subscriber->poll_query_until('postgres', $synced_query) + or die "Timed out while waiting for subscriber to synchronize data"; + +my $result = + $node_subscriber->safe_psql('postgres', + "SELECT count(*), count(c), count(d = 999) FROM test_tab"); +is($result, qq(2|2|2), 'check initial data was copied to subscriber'); + +# Update the rows on the publisher and check the additional columns on +# subscriber didn't change +$node_publisher->safe_psql('postgres', "UPDATE test_tab SET b = md5(b)"); + +$node_publisher->wait_for_catchup($appname); + +$result = + $node_subscriber->safe_psql('postgres', + "SELECT count(*), count(c), count(d = 999), count(e) FROM test_tab"); +is($result, qq(2|2|2|2), + 'check extra columns contain local defaults after copy'); + +# Change the local values of the extra columns on the subscriber, +# update publisher, and check that subscriber retains the expected +# values +$node_subscriber->safe_psql('postgres', + "UPDATE test_tab SET c = 'epoch'::timestamptz + 987654321 * interval '1s'" +); +$node_publisher->safe_psql('postgres', + "UPDATE test_tab SET b = md5(a::text)"); + +$node_publisher->wait_for_catchup($appname); + +$result = $node_subscriber->safe_psql('postgres', + "SELECT count(*), count(extract(epoch from c) = 987654321), count(d = 999) FROM test_tab" +); +is($result, qq(2|2|2), 'check extra columns contain locally changed data'); + +# Another insert +$node_publisher->safe_psql('postgres', + "INSERT INTO test_tab VALUES (3, 'baz')"); + +$node_publisher->wait_for_catchup($appname); + +$result = + $node_subscriber->safe_psql('postgres', + "SELECT count(*), count(c), count(d = 999), count(e) FROM test_tab"); +is($result, qq(3|3|3|3), + 'check extra columns contain local defaults after apply'); + +$node_subscriber->stop; +$node_publisher->stop; diff --git a/src/test/subscription/t/009_matviews.pl b/src/test/subscription/t/009_matviews.pl new file mode 100644 index 0000000000..ea2ee420ca --- /dev/null +++ b/src/test/subscription/t/009_matviews.pl @@ -0,0 +1,50 @@ +# Test materialized views behavior +use strict; +use warnings; +use PostgresNode; +use TestLib; +use Test::More tests => 1; + +my $node_publisher = get_new_node('publisher'); +$node_publisher->init(allows_streaming => 'logical'); +$node_publisher->start; + +my $node_subscriber = get_new_node('subscriber'); +$node_subscriber->init(allows_streaming => 'logical'); +$node_subscriber->start; + +my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; +my $appname = 'replication_test'; + +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION mypub FOR ALL TABLES;"); +$node_subscriber->safe_psql('postgres', + "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;" +); + +$node_publisher->safe_psql('postgres', + q{CREATE TABLE test1 (a int PRIMARY KEY, b text)}); +$node_publisher->safe_psql('postgres', + q{INSERT INTO test1 (a, b) VALUES (1, 'one'), (2, 'two');}); + +$node_subscriber->safe_psql('postgres', + q{CREATE TABLE test1 (a int PRIMARY KEY, b text);}); + +$node_publisher->wait_for_catchup($appname); + +# Materialized views are not supported by logical replication, but +# logical decoding does produce change information for them, so we +# need to make sure they are properly ignored. (bug #15044) + +# create a MV with some data +$node_publisher->safe_psql('postgres', + q{CREATE MATERIALIZED VIEW testmv1 AS SELECT * FROM test1;}); +$node_publisher->wait_for_catchup($appname); + +# There is no equivalent relation on the subscriber, but MV data is +# not replicated, so this does not hang. + +pass "materialized view data not replicated"; + +$node_subscriber->stop; +$node_publisher->stop; diff --git a/src/test/subscription/t/010_truncate.pl b/src/test/subscription/t/010_truncate.pl new file mode 100644 index 0000000000..de1443b55f --- /dev/null +++ b/src/test/subscription/t/010_truncate.pl @@ -0,0 +1,160 @@ +# Test TRUNCATE +use strict; +use warnings; +use PostgresNode; +use TestLib; +use Test::More tests => 9; + +# setup + +my $node_publisher = get_new_node('publisher'); +$node_publisher->init(allows_streaming => 'logical'); +$node_publisher->start; + +my $node_subscriber = get_new_node('subscriber'); +$node_subscriber->init(allows_streaming => 'logical'); +$node_subscriber->start; + +my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; + +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab1 (a int PRIMARY KEY)"); + +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab1 (a int PRIMARY KEY)"); + +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab2 (a int PRIMARY KEY)"); + +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab2 (a int PRIMARY KEY)"); + +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab3 (a int PRIMARY KEY)"); + +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab3 (a int PRIMARY KEY)"); + +$node_publisher->safe_psql('postgres', + "CREATE TABLE tab4 (x int PRIMARY KEY, y int REFERENCES tab3)"); + +$node_subscriber->safe_psql('postgres', + "CREATE TABLE tab4 (x int PRIMARY KEY, y int REFERENCES tab3)"); + +$node_subscriber->safe_psql('postgres', + "CREATE SEQUENCE seq1 OWNED BY tab1.a"); +$node_subscriber->safe_psql('postgres', "ALTER SEQUENCE seq1 START 101"); + +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION pub1 FOR TABLE tab1"); +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION pub2 FOR TABLE tab2 WITH (publish = insert)"); +$node_publisher->safe_psql('postgres', + "CREATE PUBLICATION pub3 FOR TABLE tab3, tab4"); +$node_subscriber->safe_psql('postgres', + "CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr application_name=sub1' PUBLICATION pub1" +); +$node_subscriber->safe_psql('postgres', + "CREATE SUBSCRIPTION sub2 CONNECTION '$publisher_connstr application_name=sub2' PUBLICATION pub2" +); +$node_subscriber->safe_psql('postgres', + "CREATE SUBSCRIPTION sub3 CONNECTION '$publisher_connstr application_name=sub3' PUBLICATION pub3" +); + +# Wait for initial sync of all subscriptions +my $synced_query = + "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');"; +$node_subscriber->poll_query_until('postgres', $synced_query) + or die "Timed out while waiting for subscriber to synchronize data"; + +# insert data to truncate + +$node_subscriber->safe_psql('postgres', + "INSERT INTO tab1 VALUES (1), (2), (3)"); + +$node_publisher->wait_for_catchup('sub1'); + +# truncate and check + +$node_publisher->safe_psql('postgres', "TRUNCATE tab1"); + +$node_publisher->wait_for_catchup('sub1'); + +my $result = $node_subscriber->safe_psql('postgres', + "SELECT count(*), min(a), max(a) FROM tab1"); +is($result, qq(0||), 'truncate replicated'); + +$result = $node_subscriber->safe_psql('postgres', "SELECT nextval('seq1')"); +is($result, qq(1), 'sequence not restarted'); + +# truncate with restart identity + +$node_publisher->safe_psql('postgres', "TRUNCATE tab1 RESTART IDENTITY"); + +$node_publisher->wait_for_catchup('sub1'); + +$result = $node_subscriber->safe_psql('postgres', "SELECT nextval('seq1')"); +is($result, qq(101), 'truncate restarted identities'); + +# test publication that does not replicate truncate + +$node_subscriber->safe_psql('postgres', + "INSERT INTO tab2 VALUES (1), (2), (3)"); + +$node_publisher->safe_psql('postgres', "TRUNCATE tab2"); + +$node_publisher->wait_for_catchup('sub2'); + +$result = $node_subscriber->safe_psql('postgres', + "SELECT count(*), min(a), max(a) FROM tab2"); +is($result, qq(3|1|3), 'truncate not replicated'); + +$node_publisher->safe_psql('postgres', + "ALTER PUBLICATION pub2 SET (publish = 'insert, truncate')"); + +$node_publisher->safe_psql('postgres', "TRUNCATE tab2"); + +$node_publisher->wait_for_catchup('sub2'); + +$result = $node_subscriber->safe_psql('postgres', + "SELECT count(*), min(a), max(a) FROM tab2"); +is($result, qq(0||), 'truncate replicated after publication change'); + +# test multiple tables connected by foreign keys + +$node_subscriber->safe_psql('postgres', + "INSERT INTO tab3 VALUES (1), (2), (3)"); +$node_subscriber->safe_psql('postgres', + "INSERT INTO tab4 VALUES (11, 1), (111, 1), (22, 2)"); + +$node_publisher->safe_psql('postgres', "TRUNCATE tab3, tab4"); + +$node_publisher->wait_for_catchup('sub3'); + +$result = $node_subscriber->safe_psql('postgres', + "SELECT count(*), min(a), max(a) FROM tab3"); +is($result, qq(0||), 'truncate of multiple tables replicated'); +$result = $node_subscriber->safe_psql('postgres', + "SELECT count(*), min(x), max(x) FROM tab4"); +is($result, qq(0||), 'truncate of multiple tables replicated'); + +# test truncate of multiple tables, some of which are not published + +$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION sub2"); +$node_publisher->safe_psql('postgres', "DROP PUBLICATION pub2"); + +$node_subscriber->safe_psql('postgres', + "INSERT INTO tab1 VALUES (1), (2), (3)"); +$node_subscriber->safe_psql('postgres', + "INSERT INTO tab2 VALUES (1), (2), (3)"); + +$node_publisher->safe_psql('postgres', "TRUNCATE tab1, tab2"); + +$node_publisher->wait_for_catchup('sub1'); + +$result = $node_subscriber->safe_psql('postgres', + "SELECT count(*), min(a), max(a) FROM tab1"); +is($result, qq(0||), 'truncate of multiple tables some not published'); +$result = $node_subscriber->safe_psql('postgres', + "SELECT count(*), min(a), max(a) FROM tab2"); +is($result, qq(3|1|3), 'truncate of multiple tables some not published'); diff --git a/src/test/thread/Makefile b/src/test/thread/Makefile index 66c22691ae..bf2b07be56 100644 --- a/src/test/thread/Makefile +++ b/src/test/thread/Makefile @@ -2,7 +2,7 @@ # # Makefile for tools/thread # -# Copyright (c) 2003-2017, PostgreSQL Global Development Group +# Copyright (c) 2003-2018, PostgreSQL Global Development Group # # src/test/thread/Makefile # diff --git a/src/test/thread/thread_test.c b/src/test/thread/thread_test.c index 32ce80e57f..31452317a6 100644 --- a/src/test/thread/thread_test.c +++ b/src/test/thread/thread_test.c @@ -3,7 +3,7 @@ * test_thread_funcs.c * libc thread test program * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/test/thread/thread_test.c @@ -22,19 +22,9 @@ #if !defined(IN_CONFIGURE) && !defined(WIN32) #include "postgres.h" -#else -/* From src/include/c.h" */ -#ifndef bool -typedef char bool; -#endif -#ifndef true -#define true ((bool) 1) -#endif - -#ifndef false -#define false ((bool) 0) -#endif +/* we want to know what the native strerror does, not pg_strerror */ +#undef strerror #endif #include @@ -93,23 +83,23 @@ static volatile int errno2_set = 0; #ifndef HAVE_STRERROR_R static char *strerror_p1; static char *strerror_p2; -static bool strerror_threadsafe = false; +static int strerror_threadsafe = 0; #endif #if !defined(WIN32) && !defined(HAVE_GETPWUID_R) static struct passwd *passwd_p1; static struct passwd *passwd_p2; -static bool getpwuid_threadsafe = false; +static int getpwuid_threadsafe = 0; #endif #if !defined(HAVE_GETADDRINFO) && !defined(HAVE_GETHOSTBYNAME_R) static struct hostent *hostent_p1; static struct hostent *hostent_p2; static char myhostname[MAXHOSTNAMELEN]; -static bool gethostbyname_threadsafe = false; +static int gethostbyname_threadsafe = 0; #endif -static bool platform_is_threadsafe = true; +static int platform_is_threadsafe = 1; int main(int argc, char *argv[]) @@ -187,17 +177,17 @@ main(int argc, char *argv[]) #ifndef HAVE_STRERROR_R if (strerror_p1 != strerror_p2) - strerror_threadsafe = true; + strerror_threadsafe = 1; #endif #if !defined(WIN32) && !defined(HAVE_GETPWUID_R) if (passwd_p1 != passwd_p2) - getpwuid_threadsafe = true; + getpwuid_threadsafe = 1; #endif #if !defined(HAVE_GETADDRINFO) && !defined(HAVE_GETHOSTBYNAME_R) if (hostent_p1 != hostent_p2) - gethostbyname_threadsafe = true; + gethostbyname_threadsafe = 1; #endif /* close down threads */ @@ -210,7 +200,7 @@ main(int argc, char *argv[]) /* report results */ #ifdef HAVE_STRERROR_R - printf("Your system has sterror_r(); it does not need strerror().\n"); + printf("Your system has strerror_r(); it does not need strerror().\n"); #else printf("Your system uses strerror() which is "); if (strerror_threadsafe) @@ -218,7 +208,7 @@ main(int argc, char *argv[]) else { printf("not thread-safe. **\n"); - platform_is_threadsafe = false; + platform_is_threadsafe = 0; } #endif @@ -233,7 +223,7 @@ main(int argc, char *argv[]) else { printf("not thread-safe. **\n"); - platform_is_threadsafe = false; + platform_is_threadsafe = 0; } #endif @@ -249,7 +239,7 @@ main(int argc, char *argv[]) else { printf("not thread-safe. **\n"); - platform_is_threadsafe = false; + platform_is_threadsafe = 0; } #endif diff --git a/src/timezone/Makefile b/src/timezone/Makefile index bed5727f53..87493da8b3 100644 --- a/src/timezone/Makefile +++ b/src/timezone/Makefile @@ -21,10 +21,8 @@ OBJS= localtime.o strftime.o pgtz.o # files needed to build zic utility program ZICOBJS= zic.o $(WIN32RES) -# timezone data files -TZDATA = africa antarctica asia australasia europe northamerica southamerica \ - pacificnew etcetera factory backward systemv -TZDATAFILES = $(TZDATA:%=$(srcdir)/data/%) +# we now distribute the timezone data as a single file +TZDATAFILES = $(srcdir)/data/tzdata.zi # which zone should determine the DST rules (not the specific UTC offset!) # for POSIX-style timezone specs diff --git a/src/timezone/README b/src/timezone/README index 912e0c1b39..379349a78c 100644 --- a/src/timezone/README +++ b/src/timezone/README @@ -2,10 +2,12 @@ src/timezone/README This is a PostgreSQL adapted version of the IANA timezone library from - http://www.iana.org/time-zones + https://www.iana.org/time-zones -The latest versions of both the tzdata and tzcode tarballs are normally -available right from that page. Historical versions can be found +The latest version of the timezone data and library source code is +available right from that page. It's best to get the merged file +tzdb-NNNNX.tar.lz, since the other archive formats omit tzdata.zi. +Historical versions, as well as release announcements, can be found elsewhere on the site. Since time zone rules change frequently in some parts of the world, @@ -17,11 +19,14 @@ changes that might affect interpretation of the data files. Time Zone data ============== -The data files under data/ are an exact copy of the latest tzdata set, -except that we omit some files that are not of interest for our purposes. +We distribute the time zone source data as-is under src/timezone/data/. +Currently, we distribute just the abbreviated single-file format +"tzdata.zi", to reduce the size of our tarballs as well as churn +in our git repo. Feeding that file to zic produces the same compiled +output as feeding the bulkier individual data files would do. -While the files under data/ can just be duplicated when updating, manual -effort is needed to update the time zone abbreviation lists under tznames/. +While data/tzdata.zi can just be duplicated when updating, manual effort +is needed to update the time zone abbreviation lists under tznames/. These need to be changed whenever new abbreviations are invented or the UTC offset associated with an existing abbreviation changes. To detect if this has happened, after installing new files under data/ do @@ -50,7 +55,7 @@ match properly on the old version. Time Zone code ============== -The code in this directory is currently synced with tzcode release 2017b. +The code in this directory is currently synced with tzcode release 2018g. There are many cosmetic (and not so cosmetic) differences from the original tzcode library, but diffs in the upstream version should usually be propagated to our version. Here are some notes about that. @@ -68,19 +73,26 @@ fixed that.) includes relying on configure's results rather than hand-hacked #defines, and not relying on features that may not exist on old systems. (In particular this means using Postgres' definitions of the int32 and -int64 typedefs, not int_fast32_t/int_fast64_t.) +int64 typedefs, not int_fast32_t/int_fast64_t. Likewise we use +PG_INT32_MIN/MAX not INT32_MIN/MAX.) * Since Postgres is typically built on a system that has its own copy of the functions, we must avoid conflicting with those. This mandates renaming typedef time_t to pg_time_t, and similarly for most other exposed names. +* zic.c's typedef "lineno" is renamed to "lineno_t", because having +"lineno" in our typedefs list would cause unfortunate pgindent behavior +in some other files where we have variables named that. + * We have exposed the tzload() and tzparse() internal functions, and slightly modified the API of the former, in part because it now relies on our own pg_open_tzfile() rather than opening files for itself. -* tzparse() is adjusted to cache the result of loading the TZDEFRULES -zone, so that that's not repeated more than once per process. +* tzparse() is adjusted to avoid loading the TZDEFRULES zone unless +really necessary, and to ignore any leap-second data it may supply. +We also cache the result of loading the TZDEFRULES zone, so that +that's not repeated more than once per process. * There's a fair amount of code we don't need and have removed, including all the nonstandard optional APIs. We have also added @@ -101,8 +113,11 @@ to first run the tzcode source files through a sed filter like this: -e 's/\bregister[ \t]//g' \ -e 's/int_fast32_t/int32/g' \ -e 's/int_fast64_t/int64/g' \ + -e 's/INT32_MIN/PG_INT32_MIN/g' \ + -e 's/INT32_MAX/PG_INT32_MAX/g' \ -e 's/struct[ \t]+tm\b/struct pg_tm/g' \ -e 's/\btime_t\b/pg_time_t/g' \ + -e 's/lineno/lineno_t/g' \ and then run them through pgindent. (The first three sed patterns deal with conversion of their block comment style to something pgindent diff --git a/src/timezone/data/africa b/src/timezone/data/africa deleted file mode 100644 index dcc20b9b1c..0000000000 --- a/src/timezone/data/africa +++ /dev/null @@ -1,1192 +0,0 @@ -# This file is in the public domain, so clarified as of -# 2009-05-17 by Arthur David Olson. - -# This file is by no means authoritative; if you think you know better, -# go ahead and edit the file (and please send any changes to -# tz@iana.org for general use in the future). For more, please see -# the file CONTRIBUTING in the tz distribution. - -# From Paul Eggert (2017-02-20): -# -# Unless otherwise specified, the source for data through 1990 is: -# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition), -# San Diego: ACS Publications, Inc. (2003). -# Unfortunately this book contains many errors and cites no sources. -# -# Many years ago Gwillim Law wrote that a good source -# for time zone data was the International Air Transport -# Association's Standard Schedules Information Manual (IATA SSIM), -# published semiannually. Law sent in several helpful summaries -# of the IATA's data after 1990. Except where otherwise noted, -# IATA SSIM is the source for entries after 1990. -# -# Another source occasionally used is Edward W. Whitman, World Time Differences, -# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), which -# I found in the UCLA library. -# -# For data circa 1899, a common source is: -# Milne J. Civil time. Geogr J. 1899 Feb;13(2):173-94. -# http://www.jstor.org/stable/1774359 -# -# A reliable and entertaining source about time zones is -# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997). -# -# European-style abbreviations are commonly used along the Mediterranean. -# For sub-Saharan Africa abbreviations were less standardized. -# Previous editions of this database used WAT, CAT, SAT, and EAT -# for UT +00 through +03, respectively, -# but in 1997 Mark R V Murray reported that -# 'SAST' is the official abbreviation for +02 in the country of South Africa, -# 'CAT' is commonly used for +02 in countries north of South Africa, and -# 'WAT' is probably the best name for +01, as the common phrase for -# the area that includes Nigeria is "West Africa". -# -# To summarize, the following abbreviations seemed to have some currency: -# +00 GMT Greenwich Mean Time -# +02 CAT Central Africa Time -# +02 SAST South Africa Standard Time -# and Murray suggested the following abbreviation: -# +01 WAT West Africa Time -# Murray's suggestion seems to have caught on in news reports and the like. -# I vaguely recall 'WAT' also being used for -01 in the past but -# cannot now come up with solid citations. -# -# I invented the following abbreviations; corrections are welcome! -# +02 WAST West Africa Summer Time -# +03 CAST Central Africa Summer Time (no longer used) -# +03 SAST South Africa Summer Time (no longer used) -# +03 EAT East Africa Time -# 'EAT' also seems to have caught on; the others are rare but are paired -# with better-attested non-DST abbreviations. - -# Algeria -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Algeria 1916 only - Jun 14 23:00s 1:00 S -Rule Algeria 1916 1919 - Oct Sun>=1 23:00s 0 - -Rule Algeria 1917 only - Mar 24 23:00s 1:00 S -Rule Algeria 1918 only - Mar 9 23:00s 1:00 S -Rule Algeria 1919 only - Mar 1 23:00s 1:00 S -Rule Algeria 1920 only - Feb 14 23:00s 1:00 S -Rule Algeria 1920 only - Oct 23 23:00s 0 - -Rule Algeria 1921 only - Mar 14 23:00s 1:00 S -Rule Algeria 1921 only - Jun 21 23:00s 0 - -Rule Algeria 1939 only - Sep 11 23:00s 1:00 S -Rule Algeria 1939 only - Nov 19 1:00 0 - -Rule Algeria 1944 1945 - Apr Mon>=1 2:00 1:00 S -Rule Algeria 1944 only - Oct 8 2:00 0 - -Rule Algeria 1945 only - Sep 16 1:00 0 - -Rule Algeria 1971 only - Apr 25 23:00s 1:00 S -Rule Algeria 1971 only - Sep 26 23:00s 0 - -Rule Algeria 1977 only - May 6 0:00 1:00 S -Rule Algeria 1977 only - Oct 21 0:00 0 - -Rule Algeria 1978 only - Mar 24 1:00 1:00 S -Rule Algeria 1978 only - Sep 22 3:00 0 - -Rule Algeria 1980 only - Apr 25 0:00 1:00 S -Rule Algeria 1980 only - Oct 31 2:00 0 - -# Shanks & Pottenger give 0:09:20 for Paris Mean Time; go with Howse's -# more precise 0:09:21. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Algiers 0:12:12 - LMT 1891 Mar 15 0:01 - 0:09:21 - PMT 1911 Mar 11 # Paris Mean Time - 0:00 Algeria WE%sT 1940 Feb 25 2:00 - 1:00 Algeria CE%sT 1946 Oct 7 - 0:00 - WET 1956 Jan 29 - 1:00 - CET 1963 Apr 14 - 0:00 Algeria WE%sT 1977 Oct 21 - 1:00 Algeria CE%sT 1979 Oct 26 - 0:00 Algeria WE%sT 1981 May - 1:00 - CET - -# Angola -# Benin -# See Africa/Lagos. - -# Botswana -# See Africa/Maputo. - -# Burkina Faso -# See Africa/Abidjan. - -# Burundi -# See Africa/Maputo. - -# Cameroon -# See Africa/Lagos. - -# Cape Verde / Cabo Verde -# -# Shanks gives 1907 for the transition to +02. -# Perhaps the 1911-05-26 Portuguese decree -# https://dre.pt/pdf1sdip/1911/05/12500/23132313.pdf -# merely made it official? -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Atlantic/Cape_Verde -1:34:04 - LMT 1907 # Praia - -2:00 - -02 1942 Sep - -2:00 1:00 -01 1945 Oct 15 - -2:00 - -02 1975 Nov 25 2:00 - -1:00 - -01 - -# Central African Republic -# See Africa/Lagos. - -# Chad -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Ndjamena 1:00:12 - LMT 1912 # N'Djamena - 1:00 - WAT 1979 Oct 14 - 1:00 1:00 WAST 1980 Mar 8 - 1:00 - WAT - -# Comoros -# See Africa/Nairobi. - -# Democratic Republic of the Congo -# See Africa/Lagos for the western part and Africa/Maputo for the eastern. - -# Republic of the Congo -# See Africa/Lagos. - -# Côte d'Ivoire / Ivory Coast -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Abidjan -0:16:08 - LMT 1912 - 0:00 - GMT -Link Africa/Abidjan Africa/Bamako # Mali -Link Africa/Abidjan Africa/Banjul # Gambia -Link Africa/Abidjan Africa/Conakry # Guinea -Link Africa/Abidjan Africa/Dakar # Senegal -Link Africa/Abidjan Africa/Freetown # Sierra Leone -Link Africa/Abidjan Africa/Lome # Togo -Link Africa/Abidjan Africa/Nouakchott # Mauritania -Link Africa/Abidjan Africa/Ouagadougou # Burkina Faso -Link Africa/Abidjan Africa/Sao_Tome # São Tomé and Príncipe -Link Africa/Abidjan Atlantic/St_Helena # St Helena - -# Djibouti -# See Africa/Nairobi. - -############################################################################### - -# Egypt - -# Milne says Cairo used 2:05:08.9, the local mean time of the Abbasizeh -# observatory; round to nearest. Milne also says that the official time for -# Egypt was mean noon at the Great Pyramid, 2:04:30.5, but apparently this -# did not apply to Cairo, Alexandria, or Port Said. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Egypt 1940 only - Jul 15 0:00 1:00 S -Rule Egypt 1940 only - Oct 1 0:00 0 - -Rule Egypt 1941 only - Apr 15 0:00 1:00 S -Rule Egypt 1941 only - Sep 16 0:00 0 - -Rule Egypt 1942 1944 - Apr 1 0:00 1:00 S -Rule Egypt 1942 only - Oct 27 0:00 0 - -Rule Egypt 1943 1945 - Nov 1 0:00 0 - -Rule Egypt 1945 only - Apr 16 0:00 1:00 S -Rule Egypt 1957 only - May 10 0:00 1:00 S -Rule Egypt 1957 1958 - Oct 1 0:00 0 - -Rule Egypt 1958 only - May 1 0:00 1:00 S -Rule Egypt 1959 1981 - May 1 1:00 1:00 S -Rule Egypt 1959 1965 - Sep 30 3:00 0 - -Rule Egypt 1966 1994 - Oct 1 3:00 0 - -Rule Egypt 1982 only - Jul 25 1:00 1:00 S -Rule Egypt 1983 only - Jul 12 1:00 1:00 S -Rule Egypt 1984 1988 - May 1 1:00 1:00 S -Rule Egypt 1989 only - May 6 1:00 1:00 S -Rule Egypt 1990 1994 - May 1 1:00 1:00 S -# IATA (after 1990) says transitions are at 0:00. -# Go with IATA starting in 1995, except correct 1995 entry from 09-30 to 09-29. - -# From Alexander Krivenyshev (2011-04-20): -# "...Egypt's interim cabinet decided on Wednesday to cancel daylight -# saving time after a poll posted on its website showed the majority of -# Egyptians would approve the cancellation." -# -# Egypt to cancel daylight saving time -# http://www.almasryalyoum.com/en/node/407168 -# or -# http://www.worldtimezone.com/dst_news/dst_news_egypt04.html -Rule Egypt 1995 2010 - Apr lastFri 0:00s 1:00 S -Rule Egypt 1995 2005 - Sep lastThu 24:00 0 - -# From Steffen Thorsen (2006-09-19): -# The Egyptian Gazette, issue 41,090 (2006-09-18), page 1, reports: -# Egypt will turn back clocks by one hour at the midnight of Thursday -# after observing the daylight saving time since May. -# http://news.gom.com.eg/gazette/pdf/2006/09/18/01.pdf -Rule Egypt 2006 only - Sep 21 24:00 0 - -# From Dirk Losch (2007-08-14): -# I received a mail from an airline which says that the daylight -# saving time in Egypt will end in the night of 2007-09-06 to 2007-09-07. -# From Jesper Nørgaard Welen (2007-08-15): [The following agree:] -# http://www.nentjes.info/Bill/bill5.htm -# http://www.timeanddate.com/worldclock/city.html?n=53 -# From Steffen Thorsen (2007-09-04): The official information...: -# http://www.sis.gov.eg/En/EgyptOnline/Miscellaneous/000002/0207000000000000001580.htm -Rule Egypt 2007 only - Sep Thu>=1 24:00 0 - -# From Abdelrahman Hassan (2007-09-06): -# Due to the Hijri (lunar Islamic calendar) year being 11 days shorter -# than the year of the Gregorian calendar, Ramadan shifts earlier each -# year. This year it will be observed September 13 (September is quite -# hot in Egypt), and the idea is to make fasting easier for workers by -# shifting business hours one hour out of daytime heat. Consequently, -# unless discontinued, next DST may end Thursday 28 August 2008. -# From Paul Eggert (2007-08-17): -# For lack of better info, assume the new rule is last Thursday in August. - -# From Petr Machata (2009-04-06): -# The following appeared in Red Hat bugzilla[1] (edited): -# -# > $ zdump -v /usr/share/zoneinfo/Africa/Cairo | grep 2009 -# > /usr/share/zoneinfo/Africa/Cairo Thu Apr 23 21:59:59 2009 UTC = Thu = -# Apr 23 -# > 23:59:59 2009 EET isdst=0 gmtoff=7200 -# > /usr/share/zoneinfo/Africa/Cairo Thu Apr 23 22:00:00 2009 UTC = Fri = -# Apr 24 -# > 01:00:00 2009 EEST isdst=1 gmtoff=10800 -# > /usr/share/zoneinfo/Africa/Cairo Thu Aug 27 20:59:59 2009 UTC = Thu = -# Aug 27 -# > 23:59:59 2009 EEST isdst=1 gmtoff=10800 -# > /usr/share/zoneinfo/Africa/Cairo Thu Aug 27 21:00:00 2009 UTC = Thu = -# Aug 27 -# > 23:00:00 2009 EET isdst=0 gmtoff=7200 -# -# > end date should be Thu Sep 24 2009 (Last Thursday in September at 23:59= -# :59) -# > http://support.microsoft.com/kb/958729/ -# -# timeanddate[2] and another site I've found[3] also support that. -# -# [1] https://bugzilla.redhat.com/show_bug.cgi?id=492263 -# [2] http://www.timeanddate.com/worldclock/clockchange.html?n=53 -# [3] http://wwp.greenwichmeantime.com/time-zone/africa/egypt/ - -# From Arthur David Olson (2009-04-20): -# In 2009 (and for the next several years), Ramadan ends before the fourth -# Thursday in September; Egypt is expected to revert to the last Thursday -# in September. - -# From Steffen Thorsen (2009-08-11): -# We have been able to confirm the August change with the Egyptian Cabinet -# Information and Decision Support Center: -# http://www.timeanddate.com/news/time/egypt-dst-ends-2009.html -# -# The Middle East News Agency -# http://www.mena.org.eg/index.aspx -# also reports "Egypt starts winter time on August 21" -# today in article numbered "71, 11/08/2009 12:25 GMT." -# Only the title above is available without a subscription to their service, -# and can be found by searching for "winter" in their search engine -# (at least today). - -# From Alexander Krivenyshev (2010-07-20): -# According to News from Egypt - Al-Masry Al-Youm Egypt's cabinet has -# decided that Daylight Saving Time will not be used in Egypt during -# Ramadan. -# -# Arabic translation: -# "Clocks to go back during Ramadan - and then forward again" -# http://www.almasryalyoum.com/en/news/clocks-go-back-during-ramadan-and-then-forward-again -# http://www.worldtimezone.com/dst_news/dst_news_egypt02.html - -# From Ahmad El-Dardiry (2014-05-07): -# Egypt is to change back to Daylight system on May 15 -# http://english.ahram.org.eg/NewsContent/1/64/100735/Egypt/Politics-/Egypts-government-to-reapply-daylight-saving-time-.aspx - -# From Gunther Vermier (2014-05-13): -# our Egypt office confirms that the change will be at 15 May "midnight" (24:00) - -# From Imed Chihi (2014-06-04): -# We have finally "located" a precise official reference about the DST changes -# in Egypt. The Ministers Cabinet decision is explained at -# http://www.cabinet.gov.eg/Media/CabinetMeetingsDetails.aspx?id=347 ... -# [T]his (Arabic) site is not accessible outside Egypt, but the page ... -# translates into: "With regard to daylight saving time, it is scheduled to -# take effect at exactly twelve o'clock this evening, Thursday, 15 MAY 2014, -# to be suspended by twelve o'clock on the evening of Thursday, 26 JUN 2014, -# and re-established again at the end of the month of Ramadan, at twelve -# o'clock on the evening of Thursday, 31 JUL 2014." This statement has been -# reproduced by other (more accessible) sites[, e.g.,]... -# http://elgornal.net/news/news.aspx?id=4699258 - -# From Paul Eggert (2014-06-04): -# Sarah El Deeb and Lee Keath of AP report that the Egyptian government says -# the change is because of blackouts in Cairo, even though Ahram Online (cited -# above) says DST had no affect on electricity consumption. There is -# no information about when DST will end this fall. See: -# http://abcnews.go.com/International/wireStory/el-sissi-pushes-egyptians-line-23614833 - -# From Steffen Thorsen (2015-04-08): -# Egypt will start DST on midnight after Thursday, April 30, 2015. -# This is based on a law (no 35) from May 15, 2014 saying it starts the last -# Thursday of April.... Clocks will still be turned back for Ramadan, but -# dates not yet announced.... -# http://almogaz.com/news/weird-news/2015/04/05/1947105 ... -# http://www.timeanddate.com/news/time/egypt-starts-dst-2015.html - -# From Ahmed Nazmy (2015-04-20): -# Egypt's ministers cabinet just announced ... that it will cancel DST at -# least for 2015. -# -# From Tim Parenti (2015-04-20): -# http://english.ahram.org.eg/WriterArticles/NewsContentP/1/128195/Egypt/No-daylight-saving-this-summer-Egypts-prime-minist.aspx -# "Egypt's cabinet agreed on Monday not to switch clocks for daylight saving -# time this summer, and carry out studies on the possibility of canceling the -# practice altogether in future years." -# -# From Paul Eggert (2015-04-24): -# Yesterday the office of Egyptian President El-Sisi announced his -# decision to abandon DST permanently. See Ahram Online 2015-04-24. -# http://english.ahram.org.eg/NewsContent/1/64/128509/Egypt/Politics-/Sisi-cancels-daylight-saving-time-in-Egypt.aspx - -# From Steffen Thorsen (2016-04-29): -# Egypt will have DST from July 7 until the end of October.... -# http://english.ahram.org.eg/NewsContentP/1/204655/Egypt/Daylight-savings-time-returning-to-Egypt-on--July.aspx -# From Mina Samuel (2016-07-04): -# Egyptian government took the decision to cancel the DST, - -Rule Egypt 2008 only - Aug lastThu 24:00 0 - -Rule Egypt 2009 only - Aug 20 24:00 0 - -Rule Egypt 2010 only - Aug 10 24:00 0 - -Rule Egypt 2010 only - Sep 9 24:00 1:00 S -Rule Egypt 2010 only - Sep lastThu 24:00 0 - -Rule Egypt 2014 only - May 15 24:00 1:00 S -Rule Egypt 2014 only - Jun 26 24:00 0 - -Rule Egypt 2014 only - Jul 31 24:00 1:00 S -Rule Egypt 2014 only - Sep lastThu 24:00 0 - - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Cairo 2:05:09 - LMT 1900 Oct - 2:00 Egypt EE%sT - -# Equatorial Guinea -# See Africa/Lagos. - -# Eritrea -# Ethiopia -# See Africa/Nairobi. - -# Gabon -# See Africa/Lagos. - -# Gambia -# See Africa/Abidjan. - -# Ghana -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -# Whitman says DST was observed from 1931 to "the present"; -# Shanks & Pottenger say 1936 to 1942; -# and September 1 to January 1 is given by: -# Scott Keltie J, Epstein M (eds), The Statesman's Year-Book, -# 57th ed. Macmillan, London (1920), OCLC 609408015, pp xxviii. -# For lack of better info, assume DST was observed from 1920 to 1942. -Rule Ghana 1920 1942 - Sep 1 0:00 0:20 GHST -Rule Ghana 1920 1942 - Dec 31 0:00 0 GMT -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Accra -0:00:52 - LMT 1918 - 0:00 Ghana GMT/+0020 - -# Guinea -# See Africa/Abidjan. - -# Guinea-Bissau -# -# Shanks gives 1911-05-26 for the transition to WAT, -# evidently confusing the date of the Portuguese decree -# https://dre.pt/pdf1sdip/1911/05/12500/23132313.pdf -# with the date that it took effect, namely 1912-01-01. -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Bissau -1:02:20 - LMT 1912 Jan 1 - -1:00 - -01 1975 - 0:00 - GMT - -# Kenya -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Nairobi 2:27:16 - LMT 1928 Jul - 3:00 - EAT 1930 - 2:30 - +0230 1940 - 2:45 - +0245 1960 - 3:00 - EAT -Link Africa/Nairobi Africa/Addis_Ababa # Ethiopia -Link Africa/Nairobi Africa/Asmara # Eritrea -Link Africa/Nairobi Africa/Dar_es_Salaam # Tanzania -Link Africa/Nairobi Africa/Djibouti -Link Africa/Nairobi Africa/Kampala # Uganda -Link Africa/Nairobi Africa/Mogadishu # Somalia -Link Africa/Nairobi Indian/Antananarivo # Madagascar -Link Africa/Nairobi Indian/Comoro -Link Africa/Nairobi Indian/Mayotte - -# Lesotho -# See Africa/Johannesburg. - -# Liberia -# -# From Paul Eggert (2017-03-02): -# -# The Nautical Almanac for the Year 1970, p 264, is the source for -0:44:30. -# -# In 1972 Liberia was the last country to switch from a UTC offset -# that was not a multiple of 15 or 20 minutes. The 1972 change was on -# 1972-01-07, according to an entry dated 1972-01-04 on p 330 of: -# Presidential Papers: First year of the administration of -# President William R. Tolbert, Jr., July 23, 1971-July 31, 1972. -# Monrovia: Executive Mansion. -# -# Use the abbreviation "MMT" before 1972, as the more-accurate numeric -# abbreviation "-004430" would be one byte over the POSIX limit. -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Monrovia -0:43:08 - LMT 1882 - -0:43:08 - MMT 1919 Mar # Monrovia Mean Time - -0:44:30 - MMT 1972 Jan 7 # approximately MMT - 0:00 - GMT - -############################################################################### - -# Libya - -# From Even Scharning (2012-11-10): -# Libya set their time one hour back at 02:00 on Saturday November 10. -# http://www.libyaherald.com/2012/11/04/clocks-to-go-back-an-hour-on-saturday/ -# Here is an official source [in Arabic]: http://ls.ly/fb6Yc -# -# Steffen Thorsen forwarded a translation (2012-11-10) in -# http://mm.icann.org/pipermail/tz/2012-November/018451.html -# -# From Tim Parenti (2012-11-11): -# Treat the 2012-11-10 change as a zone change from UTC+2 to UTC+1. -# The DST rules planned for 2013 and onward roughly mirror those of Europe -# (either two days before them or five days after them, so as to fall on -# lastFri instead of lastSun). - -# From Even Scharning (2013-10-25): -# The scheduled end of DST in Libya on Friday, October 25, 2013 was -# cancelled yesterday.... -# http://www.libyaherald.com/2013/10/24/correction-no-time-change-tomorrow/ -# -# From Paul Eggert (2013-10-25): -# For now, assume they're reverting to the pre-2012 rules of permanent UT +02. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Libya 1951 only - Oct 14 2:00 1:00 S -Rule Libya 1952 only - Jan 1 0:00 0 - -Rule Libya 1953 only - Oct 9 2:00 1:00 S -Rule Libya 1954 only - Jan 1 0:00 0 - -Rule Libya 1955 only - Sep 30 0:00 1:00 S -Rule Libya 1956 only - Jan 1 0:00 0 - -Rule Libya 1982 1984 - Apr 1 0:00 1:00 S -Rule Libya 1982 1985 - Oct 1 0:00 0 - -Rule Libya 1985 only - Apr 6 0:00 1:00 S -Rule Libya 1986 only - Apr 4 0:00 1:00 S -Rule Libya 1986 only - Oct 3 0:00 0 - -Rule Libya 1987 1989 - Apr 1 0:00 1:00 S -Rule Libya 1987 1989 - Oct 1 0:00 0 - -Rule Libya 1997 only - Apr 4 0:00 1:00 S -Rule Libya 1997 only - Oct 4 0:00 0 - -Rule Libya 2013 only - Mar lastFri 1:00 1:00 S -Rule Libya 2013 only - Oct lastFri 2:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Tripoli 0:52:44 - LMT 1920 - 1:00 Libya CE%sT 1959 - 2:00 - EET 1982 - 1:00 Libya CE%sT 1990 May 4 -# The 1996 and 1997 entries are from Shanks & Pottenger; -# the IATA SSIM data entries contain some obvious errors. - 2:00 - EET 1996 Sep 30 - 1:00 Libya CE%sT 1997 Oct 4 - 2:00 - EET 2012 Nov 10 2:00 - 1:00 Libya CE%sT 2013 Oct 25 2:00 - 2:00 - EET - -# Madagascar -# See Africa/Nairobi. - -# Malawi -# See Africa/Maputo. - -# Mali -# Mauritania -# See Africa/Abidjan. - -# Mauritius - -# From Steffen Thorsen (2008-06-25): -# Mauritius plans to observe DST from 2008-11-01 to 2009-03-31 on a trial -# basis.... -# It seems that Mauritius observed daylight saving time from 1982-10-10 to -# 1983-03-20 as well, but that was not successful.... -# http://www.timeanddate.com/news/time/mauritius-daylight-saving-time.html - -# From Alex Krivenyshev (2008-06-25): -# http://economicdevelopment.gov.mu/portal/site/Mainhomepage/menuitem.a42b24128104d9845dabddd154508a0c/?content_id=0a7cee8b5d69a110VgnVCM1000000a04a8c0RCRD - -# From Arthur David Olson (2008-06-30): -# The www.timeanddate.com article cited by Steffen Thorsen notes that "A -# final decision has yet to be made on the times that daylight saving -# would begin and end on these dates." As a place holder, use midnight. - -# From Paul Eggert (2008-06-30): -# Follow Thorsen on DST in 1982/1983, instead of Shanks & Pottenger. - -# From Steffen Thorsen (2008-07-10): -# According to -# http://www.lexpress.mu/display_article.php?news_id=111216 -# (in French), Mauritius will start and end their DST a few days earlier -# than previously announced (2008-11-01 to 2009-03-31). The new start -# date is 2008-10-26 at 02:00 and the new end date is 2009-03-27 (no time -# given, but it is probably at either 2 or 3 wall clock time). -# -# A little strange though, since the article says that they moved the date -# to align itself with Europe and USA which also change time on that date, -# but that means they have not paid attention to what happened in -# USA/Canada last year (DST ends first Sunday in November). I also wonder -# why that they end on a Friday, instead of aligning with Europe which -# changes two days later. - -# From Alex Krivenyshev (2008-07-11): -# Seems that English language article "The revival of daylight saving -# time: Energy conservation?"- No. 16578 (07/11/2008) was originally -# published on Monday, June 30, 2008... -# -# I guess that article in French "Le gouvernement avance l'introduction -# de l'heure d'été" stating that DST in Mauritius starting on October 26 -# and ending on March 27, 2009 is the most recent one.... -# http://www.worldtimezone.com/dst_news/dst_news_mauritius02.html - -# From Riad M. Hossen Ally (2008-08-03): -# The Government of Mauritius weblink -# http://www.gov.mu/portal/site/pmosite/menuitem.4ca0efdee47462e7440a600248a521ca/?content_id=4728ca68b2a5b110VgnVCM1000000a04a8c0RCRD -# Cabinet Decision of July 18th, 2008 states as follows: -# -# 4. ...Cabinet has agreed to the introduction into the National Assembly -# of the Time Bill which provides for the introduction of summer time in -# Mauritius. The summer time period which will be of one hour ahead of -# the standard time, will be aligned with that in Europe and the United -# States of America. It will start at two o'clock in the morning on the -# last Sunday of October and will end at two o'clock in the morning on -# the last Sunday of March the following year. The summer time for the -# year 2008-2009 will, therefore, be effective as from 26 October 2008 -# and end on 29 March 2009. - -# From Ed Maste (2008-10-07): -# THE TIME BILL (No. XXVII of 2008) Explanatory Memorandum states the -# beginning / ending of summer time is 2 o'clock standard time in the -# morning of the last Sunday of October / last Sunday of March. -# http://www.gov.mu/portal/goc/assemblysite/file/bill2708.pdf - -# From Steffen Thorsen (2009-06-05): -# According to several sources, Mauritius will not continue to observe -# DST the coming summer... -# -# Some sources, in French: -# http://www.defimedia.info/news/946/Rashid-Beebeejaun-:-%C2%AB-L%E2%80%99heure-d%E2%80%99%C3%A9t%C3%A9-ne-sera-pas-appliqu%C3%A9e-cette-ann%C3%A9e-%C2%BB -# http://lexpress.mu/Story/3398~Beebeejaun---Les-objectifs-d-%C3%A9conomie-d-%C3%A9nergie-de-l-heure-d-%C3%A9t%C3%A9-ont-%C3%A9t%C3%A9-atteints- -# -# Our wrap-up: -# http://www.timeanddate.com/news/time/mauritius-dst-will-not-repeat.html - -# From Arthur David Olson (2009-07-11): -# The "mauritius-dst-will-not-repeat" wrapup includes this: -# "The trial ended on March 29, 2009, when the clocks moved back by one hour -# at 2am (or 02:00) local time..." - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Mauritius 1982 only - Oct 10 0:00 1:00 S -Rule Mauritius 1983 only - Mar 21 0:00 0 - -Rule Mauritius 2008 only - Oct lastSun 2:00 1:00 S -Rule Mauritius 2009 only - Mar lastSun 2:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Indian/Mauritius 3:50:00 - LMT 1907 # Port Louis - 4:00 Mauritius +04/+05 -# Agalega Is, Rodriguez -# no information; probably like Indian/Mauritius - -# Mayotte -# See Africa/Nairobi. - -# Morocco -# See the 'europe' file for Spanish Morocco (Africa/Ceuta). - -# From Alex Krivenyshev (2008-05-09): -# Here is an article that Morocco plan to introduce Daylight Saving Time between -# 1 June, 2008 and 27 September, 2008. -# -# "... Morocco is to save energy by adjusting its clock during summer so it will -# be one hour ahead of GMT between 1 June and 27 September, according to -# Communication Minister and Government Spokesman, Khalid Naciri...." -# -# http://www.worldtimezone.net/dst_news/dst_news_morocco01.html -# http://en.afrik.com/news11892.html - -# From Alex Krivenyshev (2008-05-09): -# The Morocco time change can be confirmed on Morocco web site Maghreb Arabe -# Presse: -# http://www.map.ma/eng/sections/box3/morocco_shifts_to_da/view -# -# Morocco shifts to daylight time on June 1st through September 27, Govt. -# spokesman. - -# From Patrice Scattolin (2008-05-09): -# According to this article: -# http://www.avmaroc.com/actualite/heure-dete-comment-a127896.html -# (and republished here: ) -# the changes occur at midnight: -# -# Saturday night May 31st at midnight (which in French is to be -# interpreted as the night between Saturday and Sunday) -# Sunday night the 28th at midnight -# -# Seeing that the 28th is Monday, I am guessing that she intends to say -# the midnight of the 28th which is the midnight between Sunday and -# Monday, which jives with other sources that say that it's inclusive -# June 1st to Sept 27th. -# -# The decision was taken by decree *2-08-224 *but I can't find the decree -# published on the web. -# -# It's also confirmed here: -# http://www.maroc.ma/NR/exeres/FACF141F-D910-44B0-B7FA-6E03733425D1.htm -# on a government portal as being between June 1st and Sept 27th (not yet -# posted in English). -# -# The following Google query will generate many relevant hits: -# http://www.google.com/search?hl=en&q=Conseil+de+gouvernement+maroc+heure+avance&btnG=Search - -# From Steffen Thorsen (2008-08-27): -# Morocco will change the clocks back on the midnight between August 31 -# and September 1. They originally planned to observe DST to near the end -# of September: -# -# One article about it (in French): -# http://www.menara.ma/fr/Actualites/Maroc/Societe/ci.retour_a_l_heure_gmt_a_partir_du_dimanche_31_aout_a_minuit_officiel_.default -# -# We have some further details posted here: -# http://www.timeanddate.com/news/time/morocco-ends-dst-early-2008.html - -# From Steffen Thorsen (2009-03-17): -# Morocco will observe DST from 2009-06-01 00:00 to 2009-08-21 00:00 according -# to many sources, such as -# http://news.marweb.com/morocco/entertainment/morocco-daylight-saving.html -# http://www.medi1sat.ma/fr/depeche.aspx?idp=2312 -# (French) -# -# Our summary: -# http://www.timeanddate.com/news/time/morocco-starts-dst-2009.html - -# From Alexander Krivenyshev (2009-03-17): -# Here is a link to official document from Royaume du Maroc Premier Ministre, -# Ministère de la Modernisation des Secteurs Publics -# -# Under Article 1 of Royal Decree No. 455-67 of Act 23 safar 1387 (2 June 1967) -# concerning the amendment of the legal time, the Ministry of Modernization of -# Public Sectors announced that the official time in the Kingdom will be -# advanced 60 minutes from Sunday 31 May 2009 at midnight. -# -# http://www.mmsp.gov.ma/francais/Actualites_fr/PDF_Actualites_Fr/HeureEte_FR.pdf -# http://www.worldtimezone.com/dst_news/dst_news_morocco03.html - -# From Steffen Thorsen (2010-04-13): -# Several news media in Morocco report that the Ministry of Modernization -# of Public Sectors has announced that Morocco will have DST from -# 2010-05-02 to 2010-08-08. -# -# Example: -# http://www.lavieeco.com/actualites/4099-le-maroc-passera-a-l-heure-d-ete-gmt1-le-2-mai.html -# (French) -# Our page: -# http://www.timeanddate.com/news/time/morocco-starts-dst-2010.html - -# From Dan Abitol (2011-03-30): -# ...Rules for Africa/Casablanca are the following (24h format) -# The 3rd April 2011 at 00:00:00, [it] will be 3rd April 01:00:00 -# The 31st July 2011 at 00:59:59, [it] will be 31st July 00:00:00 -# ...Official links of change in morocco -# The change was broadcast on the FM Radio -# I ve called ANRT (telecom regulations in Morocco) at -# +212.537.71.84.00 -# http://www.anrt.net.ma/fr/ -# They said that -# http://www.map.ma/fr/sections/accueil/l_heure_legale_au_ma/view -# is the official publication to look at. -# They said that the decision was already taken. -# -# More articles in the press -# http://www.yabiladi.com/articles/details/5058/secret-l-heure-d-ete-maroc-leve.html -# http://www.lematin.ma/Actualite/Express/Article.asp?id=148923 -# http://www.lavieeco.com/actualite/Le-Maroc-passe-sur-GMT%2B1-a-partir-de-dim - -# From Petr Machata (2011-03-30): -# They have it written in English here: -# http://www.map.ma/eng/sections/home/morocco_to_spring_fo/view -# -# It says there that "Morocco will resume its standard time on July 31, -# 2011 at midnight." Now they don't say whether they mean midnight of -# wall clock time (i.e. 11pm UTC), but that's what I would assume. It has -# also been like that in the past. - -# From Alexander Krivenyshev (2012-03-09): -# According to Infomédiaire web site from Morocco (infomediaire.ma), -# on March 9, 2012, (in French) Heure légale: -# Le Maroc adopte officiellement l'heure d'été -# http://www.infomediaire.ma/news/maroc/heure-l%C3%A9gale-le-maroc-adopte-officiellement-lheure-d%C3%A9t%C3%A9 -# Governing Council adopted draft decree, that Morocco DST starts on -# the last Sunday of March (March 25, 2012) and ends on -# last Sunday of September (September 30, 2012) -# except the month of Ramadan. -# or (brief) -# http://www.worldtimezone.com/dst_news/dst_news_morocco06.html - -# From Arthur David Olson (2012-03-10): -# The infomediaire.ma source indicates that the system is to be in -# effect every year. It gives 03H00 as the "fall back" time of day; -# it lacks a "spring forward" time of day; assume 2:00 XXX. -# Wait on specifying the Ramadan exception for details about -# start date, start time of day, end date, and end time of day XXX. - -# From Christophe Tropamer (2012-03-16): -# Seen Morocco change again: -# http://www.le2uminutes.com/actualite.php -# "...à partir du dernier dimanche d'avril et non fins mars, -# comme annoncé précédemment." - -# From Milamber Space Network (2012-07-17): -# The official return to GMT is announced by the Moroccan government: -# http://www.mmsp.gov.ma/fr/actualites.aspx?id=288 [in French] -# -# Google translation, lightly edited: -# Back to the standard time of the Kingdom (GMT) -# Pursuant to Decree No. 2-12-126 issued on 26 Jumada (I) 1433 (April 18, -# 2012) and in accordance with the order of Mr. President of the -# Government No. 3-47-12 issued on 24 Sha'ban (11 July 2012), the Ministry -# of Public Service and Administration Modernization announces the return -# of the legal time of the Kingdom (GMT) from Friday, July 20, 2012 until -# Monday, August 20, 2012. So the time will be delayed by 60 minutes from -# 3:00 am Friday, July 20, 2012 and will again be advanced by 60 minutes -# August 20, 2012 from 2:00 am. - -# From Paul Eggert (2013-03-06): -# Morocco's daylight-saving transitions due to Ramadan seem to be -# announced a bit in advance. On 2012-07-11 the Moroccan government -# announced that year's Ramadan daylight-saving transitions would be -# 2012-07-20 and 2012-08-20; see -# http://www.mmsp.gov.ma/fr/actualites.aspx?id=288 - -# From Andrew Paprocki (2013-07-02): -# Morocco announced that the year's Ramadan daylight-savings -# transitions would be 2013-07-07 and 2013-08-10; see: -# http://www.maroc.ma/en/news/morocco-suspends-daylight-saving-time-july-7-aug10 - -# From Steffen Thorsen (2013-09-28): -# Morocco extends DST by one month, on very short notice, just 1 day -# before it was going to end. There is a new decree (2.13.781) for -# this, where DST from now on goes from last Sunday of March at 02:00 -# to last Sunday of October at 03:00, similar to EU rules. Official -# source (French): -# http://www.maroc.gov.ma/fr/actualites/lhoraire-dete-gmt1-maintenu-jusquau-27-octobre-2013 -# Another source (specifying the time for start and end in the decree): -# http://www.lemag.ma/Heure-d-ete-au-Maroc-jusqu-au-27-octobre_a75620.html - -# From Sebastien Willemijns (2014-03-18): -# http://www.afriquinfos.com/articles/2014/3/18/maroc-heure-dete-avancez-tous-horloges-247891.asp - -# From Milamber Space Network (2014-06-05): -# The Moroccan government has recently announced that the country will return -# to standard time at 03:00 on Saturday, June 28, 2014 local time.... DST -# will resume again at 02:00 on Saturday, August 2, 2014.... -# http://www.mmsp.gov.ma/fr/actualites.aspx?id=586 - -# From Milamber (2015-06-08): -# (Google Translation) The hour will thus be delayed 60 minutes -# Sunday, June 14 at 3:00, the ministry said in a statement, adding -# that the time will be advanced again 60 minutes Sunday, July 19, -# 2015 at 2:00. The move comes under 2.12.126 Decree of 26 Jumada I -# 1433 (18 April 2012) and the decision of the Head of Government of -# 16 N. 3-29-15 Chaaban 1435 (4 June 2015). -# Source (french): -# http://lnt.ma/le-maroc-reculera-dune-heure-le-dimanche-14-juin/ -# -# From Milamber (2015-06-09): -# http://www.mmsp.gov.ma/fr/actualites.aspx?id=863 -# -# From Michael Deckers (2015-06-09): -# [The gov.ma announcement] would (probably) make the switch on 2015-07-19 go -# from 03:00 to 04:00 rather than from 02:00 to 03:00, as in the patch.... -# I think the patch is correct and the quoted text is wrong; the text in -# agrees -# with the patch. - -# From Paul Eggert (2015-06-08): -# For now, guess that later spring and fall transitions will use 2015's rules, -# and guess that Morocco will switch to standard time at 03:00 the last -# Sunday before Ramadan, and back to DST at 02:00 the first Sunday after -# Ramadan. To implement this, transition dates for 2016 through 2037 were -# determined by running the following program under GNU Emacs 24.3, with the -# results integrated by hand into the table below. -# (let ((islamic-year 1437)) -# (require 'cal-islam) -# (while (< islamic-year 1460) -# (let ((a (calendar-islamic-to-absolute (list 9 1 islamic-year))) -# (b (calendar-islamic-to-absolute (list 10 1 islamic-year))) -# (sunday 0)) -# (while (/= sunday (mod (setq a (1- a)) 7))) -# (while (/= sunday (mod b 7)) -# (setq b (1+ b))) -# (setq a (calendar-gregorian-from-absolute a)) -# (setq b (calendar-gregorian-from-absolute b)) -# (insert -# (format -# (concat "Rule\tMorocco\t%d\tonly\t-\t%s\t%2d\t 3:00\t0\t-\n" -# "Rule\tMorocco\t%d\tonly\t-\t%s\t%2d\t 2:00\t1:00\tS\n") -# (car (cdr (cdr a))) (calendar-month-name (car a) t) (car (cdr a)) -# (car (cdr (cdr b))) (calendar-month-name (car b) t) (car (cdr b))))) -# (setq islamic-year (+ 1 islamic-year)))) - -# RULE NAME FROM TO TYPE IN ON AT SAVE LETTER/S - -Rule Morocco 1939 only - Sep 12 0:00 1:00 S -Rule Morocco 1939 only - Nov 19 0:00 0 - -Rule Morocco 1940 only - Feb 25 0:00 1:00 S -Rule Morocco 1945 only - Nov 18 0:00 0 - -Rule Morocco 1950 only - Jun 11 0:00 1:00 S -Rule Morocco 1950 only - Oct 29 0:00 0 - -Rule Morocco 1967 only - Jun 3 12:00 1:00 S -Rule Morocco 1967 only - Oct 1 0:00 0 - -Rule Morocco 1974 only - Jun 24 0:00 1:00 S -Rule Morocco 1974 only - Sep 1 0:00 0 - -Rule Morocco 1976 1977 - May 1 0:00 1:00 S -Rule Morocco 1976 only - Aug 1 0:00 0 - -Rule Morocco 1977 only - Sep 28 0:00 0 - -Rule Morocco 1978 only - Jun 1 0:00 1:00 S -Rule Morocco 1978 only - Aug 4 0:00 0 - -Rule Morocco 2008 only - Jun 1 0:00 1:00 S -Rule Morocco 2008 only - Sep 1 0:00 0 - -Rule Morocco 2009 only - Jun 1 0:00 1:00 S -Rule Morocco 2009 only - Aug 21 0:00 0 - -Rule Morocco 2010 only - May 2 0:00 1:00 S -Rule Morocco 2010 only - Aug 8 0:00 0 - -Rule Morocco 2011 only - Apr 3 0:00 1:00 S -Rule Morocco 2011 only - Jul 31 0:00 0 - -Rule Morocco 2012 2013 - Apr lastSun 2:00 1:00 S -Rule Morocco 2012 only - Jul 20 3:00 0 - -Rule Morocco 2012 only - Aug 20 2:00 1:00 S -Rule Morocco 2012 only - Sep 30 3:00 0 - -Rule Morocco 2013 only - Jul 7 3:00 0 - -Rule Morocco 2013 only - Aug 10 2:00 1:00 S -Rule Morocco 2013 max - Oct lastSun 3:00 0 - -Rule Morocco 2014 2021 - Mar lastSun 2:00 1:00 S -Rule Morocco 2014 only - Jun 28 3:00 0 - -Rule Morocco 2014 only - Aug 2 2:00 1:00 S -Rule Morocco 2015 only - Jun 14 3:00 0 - -Rule Morocco 2015 only - Jul 19 2:00 1:00 S -Rule Morocco 2016 only - Jun 5 3:00 0 - -Rule Morocco 2016 only - Jul 10 2:00 1:00 S -Rule Morocco 2017 only - May 21 3:00 0 - -Rule Morocco 2017 only - Jul 2 2:00 1:00 S -Rule Morocco 2018 only - May 13 3:00 0 - -Rule Morocco 2018 only - Jun 17 2:00 1:00 S -Rule Morocco 2019 only - May 5 3:00 0 - -Rule Morocco 2019 only - Jun 9 2:00 1:00 S -Rule Morocco 2020 only - Apr 19 3:00 0 - -Rule Morocco 2020 only - May 24 2:00 1:00 S -Rule Morocco 2021 only - Apr 11 3:00 0 - -Rule Morocco 2021 only - May 16 2:00 1:00 S -Rule Morocco 2022 only - May 8 2:00 1:00 S -Rule Morocco 2023 only - Apr 23 2:00 1:00 S -Rule Morocco 2024 only - Apr 14 2:00 1:00 S -Rule Morocco 2025 only - Apr 6 2:00 1:00 S -Rule Morocco 2026 max - Mar lastSun 2:00 1:00 S -Rule Morocco 2036 only - Oct 19 3:00 0 - -Rule Morocco 2037 only - Oct 4 3:00 0 - - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Casablanca -0:30:20 - LMT 1913 Oct 26 - 0:00 Morocco WE%sT 1984 Mar 16 - 1:00 - CET 1986 - 0:00 Morocco WE%sT - -# Western Sahara -# -# From Gwillim Law (2013-10-22): -# A correspondent who is usually well informed about time zone matters -# ... says that Western Sahara observes daylight saving time, just as -# Morocco does. -# -# From Paul Eggert (2013-10-23): -# Assume that this has been true since Western Sahara switched to GMT, -# since most of it was then controlled by Morocco. - -Zone Africa/El_Aaiun -0:52:48 - LMT 1934 Jan # El Aaiún - -1:00 - -01 1976 Apr 14 - 0:00 Morocco WE%sT - -# Mozambique -# -# Shanks gives 1903-03-01 for the transition to CAT. -# Perhaps the 1911-05-26 Portuguese decree -# https://dre.pt/pdf1sdip/1911/05/12500/23132313.pdf -# merely made it official? -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Maputo 2:10:20 - LMT 1903 Mar - 2:00 - CAT -Link Africa/Maputo Africa/Blantyre # Malawi -Link Africa/Maputo Africa/Bujumbura # Burundi -Link Africa/Maputo Africa/Gaborone # Botswana -Link Africa/Maputo Africa/Harare # Zimbabwe -Link Africa/Maputo Africa/Kigali # Rwanda -Link Africa/Maputo Africa/Lubumbashi # E Dem. Rep. of Congo -Link Africa/Maputo Africa/Lusaka # Zambia - -# Namibia -# The 1994-04-03 transition is from Shanks & Pottenger. -# Shanks & Pottenger report no DST after 1998-04; go with IATA. - -# From Petronella Sibeene (2007-03-30): -# http://allafrica.com/stories/200703300178.html -# While the entire country changes its time, Katima Mulilo and other -# settlements in Caprivi unofficially will not because the sun there -# rises and sets earlier compared to other regions. Chief of -# Forecasting Riaan van Zyl explained that the far eastern parts of -# the country are close to 40 minutes earlier in sunrise than the rest -# of the country. -# -# From Paul Eggert (2017-02-22): -# Although the Zambezi Region (formerly known as Caprivi) informally -# observes Botswana time, we have no details about historical practice. -# In the meantime people there can use Africa/Gaborone. -# See: Immanuel S. The Namibian. 2017-02-23. -# http://www.namibian.com.na/51480/read/Time-change-divides-lawmakers - -# RULE NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Namibia 1994 max - Sep Sun>=1 2:00 1:00 S -Rule Namibia 1995 max - Apr Sun>=1 2:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Windhoek 1:08:24 - LMT 1892 Feb 8 - 1:30 - +0130 1903 Mar - 2:00 - SAST 1942 Sep 20 2:00 - 2:00 1:00 SAST 1943 Mar 21 2:00 - 2:00 - SAST 1990 Mar 21 # independence - 2:00 - CAT 1994 Apr 3 - 1:00 Namibia WA%sT - -# Niger -# See Africa/Lagos. - -# Nigeria -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Lagos 0:13:36 - LMT 1919 Sep - 1:00 - WAT -Link Africa/Lagos Africa/Bangui # Central African Republic -Link Africa/Lagos Africa/Brazzaville # Rep. of the Congo -Link Africa/Lagos Africa/Douala # Cameroon -Link Africa/Lagos Africa/Kinshasa # Dem. Rep. of the Congo (west) -Link Africa/Lagos Africa/Libreville # Gabon -Link Africa/Lagos Africa/Luanda # Angola -Link Africa/Lagos Africa/Malabo # Equatorial Guinea -Link Africa/Lagos Africa/Niamey # Niger -Link Africa/Lagos Africa/Porto-Novo # Benin - -# Réunion -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Indian/Reunion 3:41:52 - LMT 1911 Jun # Saint-Denis - 4:00 - +04 -# -# Crozet Islands also observes Réunion time; see the 'antarctica' file. -# -# Scattered Islands (Îles Éparses) administered from Réunion are as follows. -# The following information about them is taken from -# Îles Éparses (, 1997-07-22, -# in French; no longer available as of 1999-08-17). -# We have no info about their time zone histories. -# -# Bassas da India - uninhabited -# Europa Island - inhabited from 1905 to 1910 by two families -# Glorioso Is - inhabited until at least 1958 -# Juan de Nova - uninhabited -# Tromelin - inhabited until at least 1958 - -# Rwanda -# See Africa/Maputo. - -# St Helena -# See Africa/Abidjan. -# The other parts of the St Helena territory are similar: -# Tristan da Cunha: on GMT, say Whitman and the CIA -# Ascension: on GMT, say the USNO (1995-12-21) and the CIA -# Gough (scientific station since 1955; sealers wintered previously): -# on GMT, says the CIA -# Inaccessible, Nightingale: uninhabited - -# São Tomé and Príncipe -# Senegal -# See Africa/Abidjan. - -# Seychelles -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Indian/Mahe 3:41:48 - LMT 1906 Jun # Victoria - 4:00 - +04 -# From Paul Eggert (2001-05-30): -# Aldabra, Farquhar, and Desroches, originally dependencies of the -# Seychelles, were transferred to the British Indian Ocean Territory -# in 1965 and returned to Seychelles control in 1976. We don't know -# whether this affected their time zone, so omit this for now. -# Possibly the islands were uninhabited. - -# Sierra Leone -# See Africa/Abidjan. - -# Somalia -# See Africa/Nairobi. - -# South Africa -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule SA 1942 1943 - Sep Sun>=15 2:00 1:00 - -Rule SA 1943 1944 - Mar Sun>=15 2:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Johannesburg 1:52:00 - LMT 1892 Feb 8 - 1:30 - SAST 1903 Mar - 2:00 SA SAST -Link Africa/Johannesburg Africa/Maseru # Lesotho -Link Africa/Johannesburg Africa/Mbabane # Swaziland -# -# Marion and Prince Edward Is -# scientific station since 1947 -# no information - -# Sudan -# -# From -# Sudan News Agency (2000-01-13), -# also reported by Michaël De Beukelaer-Dossche via Steffen Thorsen: -# Clocks will be moved ahead for 60 minutes all over the Sudan as of noon -# Saturday.... This was announced Thursday by Caretaker State Minister for -# Manpower Abdul-Rahman Nur-Eddin. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Sudan 1970 only - May 1 0:00 1:00 S -Rule Sudan 1970 1985 - Oct 15 0:00 0 - -Rule Sudan 1971 only - Apr 30 0:00 1:00 S -Rule Sudan 1972 1985 - Apr lastSun 0:00 1:00 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Khartoum 2:10:08 - LMT 1931 - 2:00 Sudan CA%sT 2000 Jan 15 12:00 - 3:00 - EAT - -# South Sudan -Link Africa/Khartoum Africa/Juba - -# Swaziland -# See Africa/Johannesburg. - -# Tanzania -# See Africa/Nairobi. - -# Togo -# See Africa/Abidjan. - -# Tunisia - -# From Gwillim Law (2005-04-30): -# My correspondent, Risto Nykänen, has alerted me to another adoption of DST, -# this time in Tunisia. According to Yahoo France News -# , in a story attributed to AP -# and dated 2005-04-26, "Tunisia has decided to advance its official time by -# one hour, starting on Sunday, May 1. Henceforth, Tunisian time will be -# UTC+2 instead of UTC+1. The change will take place at 23:00 UTC next -# Saturday." (My translation) -# -# From Oscar van Vlijmen (2005-05-02): -# La Presse, the first national daily newspaper ... -# http://www.lapresse.tn/archives/archives280405/actualites/lheure.html -# ... DST for 2005: on: Sun May 1 0h standard time, off: Fri Sept. 30, -# 1h standard time. -# -# From Atef Loukil (2006-03-28): -# The daylight saving time will be the same each year: -# Beginning : the last Sunday of March at 02:00 -# Ending : the last Sunday of October at 03:00 ... -# http://www.tap.info.tn/en/index.php?option=com_content&task=view&id=1188&Itemid=50 - -# From Steffen Thorsen (2009-03-16): -# According to several news sources, Tunisia will not observe DST this year. -# (Arabic) -# http://www.elbashayer.com/?page=viewn&nid=42546 -# http://www.babnet.net/kiwidetail-15295.asp -# -# We have also confirmed this with the US embassy in Tunisia. -# We have a wrap-up about this on the following page: -# http://www.timeanddate.com/news/time/tunisia-cancels-dst-2009.html - -# From Alexander Krivenyshev (2009-03-17): -# Here is a link to Tunis Afrique Presse News Agency -# -# Standard time to be kept the whole year long (tap.info.tn): -# -# (in English) -# http://www.tap.info.tn/en/index.php?option=com_content&task=view&id=26813&Itemid=157 -# -# (in Arabic) -# http://www.tap.info.tn/ar/index.php?option=com_content&task=view&id=61240&Itemid=1 - -# From Arthur David Olson (2009-03-18): -# The Tunis Afrique Presse News Agency notice contains this: "This measure is -# due to the fact that the fasting month of Ramadan coincides with the period -# concerned by summer time. Therefore, the standard time will be kept -# unchanged the whole year long." So foregoing DST seems to be an exception -# (albeit one that may be repeated in the future). - -# From Alexander Krivenyshev (2010-03-27): -# According to some news reports Tunis confirmed not to use DST in 2010 -# -# (translation): -# "The Tunisian government has decided to abandon DST, which was scheduled on -# Sunday... -# Tunisian authorities had suspended the DST for the first time last year also -# coincided with the month of Ramadan..." -# -# (in Arabic) -# http://www.moheet.com/show_news.aspx?nid=358861&pg=1 -# http://www.almadenahnews.com/newss/news.php?c=118&id=38036 -# http://www.worldtimezone.com/dst_news/dst_news_tunis02.html - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Tunisia 1939 only - Apr 15 23:00s 1:00 S -Rule Tunisia 1939 only - Nov 18 23:00s 0 - -Rule Tunisia 1940 only - Feb 25 23:00s 1:00 S -Rule Tunisia 1941 only - Oct 6 0:00 0 - -Rule Tunisia 1942 only - Mar 9 0:00 1:00 S -Rule Tunisia 1942 only - Nov 2 3:00 0 - -Rule Tunisia 1943 only - Mar 29 2:00 1:00 S -Rule Tunisia 1943 only - Apr 17 2:00 0 - -Rule Tunisia 1943 only - Apr 25 2:00 1:00 S -Rule Tunisia 1943 only - Oct 4 2:00 0 - -Rule Tunisia 1944 1945 - Apr Mon>=1 2:00 1:00 S -Rule Tunisia 1944 only - Oct 8 0:00 0 - -Rule Tunisia 1945 only - Sep 16 0:00 0 - -Rule Tunisia 1977 only - Apr 30 0:00s 1:00 S -Rule Tunisia 1977 only - Sep 24 0:00s 0 - -Rule Tunisia 1978 only - May 1 0:00s 1:00 S -Rule Tunisia 1978 only - Oct 1 0:00s 0 - -Rule Tunisia 1988 only - Jun 1 0:00s 1:00 S -Rule Tunisia 1988 1990 - Sep lastSun 0:00s 0 - -Rule Tunisia 1989 only - Mar 26 0:00s 1:00 S -Rule Tunisia 1990 only - May 1 0:00s 1:00 S -Rule Tunisia 2005 only - May 1 0:00s 1:00 S -Rule Tunisia 2005 only - Sep 30 1:00s 0 - -Rule Tunisia 2006 2008 - Mar lastSun 2:00s 1:00 S -Rule Tunisia 2006 2008 - Oct lastSun 2:00s 0 - - -# Shanks & Pottenger give 0:09:20 for Paris Mean Time; go with Howse's -# more precise 0:09:21. -# Shanks & Pottenger say the 1911 switch was on Mar 9; go with Howse's Mar 11. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Africa/Tunis 0:40:44 - LMT 1881 May 12 - 0:09:21 - PMT 1911 Mar 11 # Paris Mean Time - 1:00 Tunisia CE%sT - -# Uganda -# See Africa/Nairobi. - -# Zambia -# Zimbabwe -# See Africa/Maputo. diff --git a/src/timezone/data/antarctica b/src/timezone/data/antarctica deleted file mode 100644 index 3332d66842..0000000000 --- a/src/timezone/data/antarctica +++ /dev/null @@ -1,340 +0,0 @@ -# This file is in the public domain, so clarified as of -# 2009-05-17 by Arthur David Olson. - -# From Paul Eggert (1999-11-15): -# To keep things manageable, we list only locations occupied year-round; see -# COMNAP - Stations and Bases -# http://www.comnap.aq/comnap/comnap.nsf/P/Stations/ -# and -# Summary of the Peri-Antarctic Islands (1998-07-23) -# http://www.spri.cam.ac.uk/bob/periant.htm -# for information. -# Unless otherwise specified, we have no time zone information. - -# FORMAT is '-00' and GMTOFF is 0 for locations while uninhabited. - -# Argentina - year-round bases -# Belgrano II, Confin Coast, -770227-0343737, since 1972-02-05 -# Carlini, Potter Cove, King George Island, -6414-0602320, since 1982-01 -# Esperanza, Hope Bay, -6323-05659, since 1952-12-17 -# Marambio, -6414-05637, since 1969-10-29 -# Orcadas, Laurie I, -6016-04444, since 1904-02-22 -# San Martín, Barry I, -6808-06706, since 1951-03-21 -# (except 1960-03 / 1976-03-21) - -# Australia - territories -# Heard Island, McDonald Islands (uninhabited) -# previously sealers and scientific personnel wintered -# Margaret Turner reports -# http://web.archive.org/web/20021204222245/http://www.dstc.qut.edu.au/DST/marg/daylight.html -# (1999-09-30) that they're UT +05, with no DST; -# presumably this is when they have visitors. -# -# year-round bases -# Casey, Bailey Peninsula, -6617+11032, since 1969 -# Davis, Vestfold Hills, -6835+07759, since 1957-01-13 -# (except 1964-11 - 1969-02) -# Mawson, Holme Bay, -6736+06253, since 1954-02-13 - -# From Steffen Thorsen (2009-03-11): -# Three Australian stations in Antarctica have changed their time zone: -# Casey moved from UTC+8 to UTC+11 -# Davis moved from UTC+7 to UTC+5 -# Mawson moved from UTC+6 to UTC+5 -# The changes occurred on 2009-10-18 at 02:00 (local times). -# -# Government source: (Australian Antarctic Division) -# http://www.aad.gov.au/default.asp?casid=37079 -# -# We have more background information here: -# http://www.timeanddate.com/news/time/antarctica-new-times.html - -# From Steffen Thorsen (2010-03-10): -# We got these changes from the Australian Antarctic Division: ... -# -# - Casey station reverted to its normal time of UTC+8 on 5 March 2010. -# The change to UTC+11 is being considered as a regular summer thing but -# has not been decided yet. -# -# - Davis station will revert to its normal time of UTC+7 at 10 March 2010 -# 20:00 UTC. -# -# - Mawson station stays on UTC+5. -# -# Background: -# http://www.timeanddate.com/news/time/antartica-time-changes-2010.html - -# From Steffen Thorsen (2016-10-28): -# Australian Antarctica Division informed us that Casey changed time -# zone to UTC+11 in "the morning of 22nd October 2016". - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Antarctica/Casey 0 - -00 1969 - 8:00 - +08 2009 Oct 18 2:00 - 11:00 - +11 2010 Mar 5 2:00 - 8:00 - +08 2011 Oct 28 2:00 - 11:00 - +11 2012 Feb 21 17:00u - 8:00 - +08 2016 Oct 22 - 11:00 - +11 -Zone Antarctica/Davis 0 - -00 1957 Jan 13 - 7:00 - +07 1964 Nov - 0 - -00 1969 Feb - 7:00 - +07 2009 Oct 18 2:00 - 5:00 - +05 2010 Mar 10 20:00u - 7:00 - +07 2011 Oct 28 2:00 - 5:00 - +05 2012 Feb 21 20:00u - 7:00 - +07 -Zone Antarctica/Mawson 0 - -00 1954 Feb 13 - 6:00 - +06 2009 Oct 18 2:00 - 5:00 - +05 -# References: -# Casey Weather (1998-02-26) -# http://www.antdiv.gov.au/aad/exop/sfo/casey/casey_aws.html -# Davis Station, Antarctica (1998-02-26) -# http://www.antdiv.gov.au/aad/exop/sfo/davis/video.html -# Mawson Station, Antarctica (1998-02-25) -# http://www.antdiv.gov.au/aad/exop/sfo/mawson/video.html - -# Belgium - year-round base -# Princess Elisabeth, Queen Maud Land, -713412+0231200, since 2007 - -# Brazil - year-round base -# Ferraz, King George Island, -6205+05824, since 1983/4 - -# Bulgaria - year-round base -# St. Kliment Ohridski, Livingston Island, -623829-0602153, since 1988 - -# Chile - year-round bases and towns -# Escudero, South Shetland Is, -621157-0585735, since 1994 -# Frei Montalva, King George Island, -6214-05848, since 1969-03-07 -# O'Higgins, Antarctic Peninsula, -6319-05704, since 1948-02 -# Prat, -6230-05941 -# Villa Las Estrellas (a town), around the Frei base, since 1984-04-09 -# These locations employ Region of Magallanes time; use -# TZ='America/Punta_Arenas'. - -# China - year-round bases -# Great Wall, King George Island, -6213-05858, since 1985-02-20 -# Zhongshan, Larsemann Hills, Prydz Bay, -6922+07623, since 1989-02-26 - -# France - year-round bases (also see "France & Italy") -# -# From Antoine Leca (1997-01-20): -# Time data entries are from Nicole Pailleau at the IFRTP -# (French Institute for Polar Research and Technology). -# She confirms that French Southern Territories and Terre Adélie bases -# don't observe daylight saving time, even if Terre Adélie supplies came -# from Tasmania. -# -# French Southern Territories with year-round inhabitants -# -# Alfred Faure, Possession Island, Crozet Islands, -462551+0515152, since 1964; -# sealing & whaling stations operated variously 1802/1911+; -# see Indian/Reunion. -# -# Martin-de-Viviès, Amsterdam Island, -374105+0773155, since 1950 -# Port-aux-Français, Kerguelen Islands, -492110+0701303, since 1951; -# whaling & sealing station operated 1908/1914, 1920/1929, and 1951/1956 -# -# St Paul Island - near Amsterdam, uninhabited -# fishing stations operated variously 1819/1931 -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Indian/Kerguelen 0 - -00 1950 # Port-aux-Français - 5:00 - +05 -# -# year-round base in the main continent -# Dumont d'Urville, Île des Pétrels, -6640+14001, since 1956-11 -# (2005-12-05) -# -# Another base at Port-Martin, 50km east, began operation in 1947. -# It was destroyed by fire on 1952-01-14. -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Antarctica/DumontDUrville 0 - -00 1947 - 10:00 - +10 1952 Jan 14 - 0 - -00 1956 Nov - 10:00 - +10 - -# France & Italy - year-round base -# Concordia, -750600+1232000, since 2005 - -# Germany - year-round base -# Neumayer III, -704080-0081602, since 2009 - -# India - year-round bases -# Bharati, -692428+0761114, since 2012 -# Maitri, -704558+0114356, since 1989 - -# Italy - year-round base (also see "France & Italy") -# Zuchelli, Terra Nova Bay, -744140+1640647, since 1986 - -# Japan - year-round bases -# Syowa (also known as Showa), -690022+0393524, since 1957 -# -# From Hideyuki Suzuki (1999-02-06): -# In all Japanese stations, +0300 is used as the standard time. -# -# Syowa station, which is the first antarctic station of Japan, -# was established on 1957-01-29. Since Syowa station is still the main -# station of Japan, it's appropriate for the principal location. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Antarctica/Syowa 0 - -00 1957 Jan 29 - 3:00 - +03 -# See: -# NIPR Antarctic Research Activities (1999-08-17) -# http://www.nipr.ac.jp/english/ara01.html - -# S Korea - year-round base -# Jang Bogo, Terra Nova Bay, -743700+1641205 since 2014 -# King Sejong, King George Island, -6213-05847, since 1988 - -# New Zealand - claims -# Balleny Islands (never inhabited) -# Scott Island (never inhabited) -# -# year-round base -# Scott Base, Ross Island, since 1957-01. -# See Pacific/Auckland. - -# Norway - territories -# Bouvet (never inhabited) -# -# claims -# Peter I Island (never inhabited) -# -# year-round base -# Troll, Queen Maud Land, -720041+0023206, since 2005-02-12 -# -# From Paul-Inge Flakstad (2014-03-10): -# I recently had a long dialog about this with the developer of timegenie.com. -# In the absence of specific dates, he decided to choose some likely ones: -# GMT +1 - From March 1 to the last Sunday in March -# GMT +2 - From the last Sunday in March until the last Sunday in October -# GMT +1 - From the last Sunday in October until November 7 -# GMT +0 - From November 7 until March 1 -# The dates for switching to and from UTC+0 will probably not be absolutely -# correct, but they should be quite close to the actual dates. -# -# From Paul Eggert (2014-03-21): -# The CET-switching Troll rules require zic from tz 2014b or later, so as -# suggested by Bengt-Inge Larsson comment them out for now, and approximate -# with only UTC and CEST. Uncomment them when 2014b is more prevalent. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -#Rule Troll 2005 max - Mar 1 1:00u 1:00 +01 -Rule Troll 2005 max - Mar lastSun 1:00u 2:00 +02 -#Rule Troll 2005 max - Oct lastSun 1:00u 1:00 +01 -#Rule Troll 2004 max - Nov 7 1:00u 0:00 +00 -# Remove the following line when uncommenting the above '#Rule' lines. -Rule Troll 2004 max - Oct lastSun 1:00u 0:00 +00 -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Antarctica/Troll 0 - -00 2005 Feb 12 - 0:00 Troll %s - -# Poland - year-round base -# Arctowski, King George Island, -620945-0582745, since 1977 - -# Romania - year-bound base -# Law-Racoviță, Larsemann Hills, -692319+0762251, since 1986 - -# Russia - year-round bases -# Bellingshausen, King George Island, -621159-0585337, since 1968-02-22 -# Mirny, Davis coast, -6633+09301, since 1956-02 -# Molodezhnaya, Alasheyev Bay, -6740+04551, -# year-round from 1962-02 to 1999-07-01 -# Novolazarevskaya, Queen Maud Land, -7046+01150, -# year-round from 1960/61 to 1992 - -# Vostok, since 1957-12-16, temporarily closed 1994-02/1994-11 -# From Craig Mundell (1994-12-15): -# http://quest.arc.nasa.gov/antarctica/QA/computers/Directions,Time,ZIP -# Vostok, which is one of the Russian stations, is set on the same -# time as Moscow, Russia. -# -# From Lee Hotz (2001-03-08): -# I queried the folks at Columbia who spent the summer at Vostok and this is -# what they had to say about time there: -# "in the US Camp (East Camp) we have been on New Zealand (McMurdo) -# time, which is 12 hours ahead of GMT. The Russian Station Vostok was -# 6 hours behind that (although only 2 miles away, i.e. 6 hours ahead -# of GMT). This is a time zone I think two hours east of Moscow. The -# natural time zone is in between the two: 8 hours ahead of GMT." -# -# From Paul Eggert (2001-05-04): -# This seems to be hopelessly confusing, so I asked Lee Hotz about it -# in person. He said that some Antarctic locations set their local -# time so that noon is the warmest part of the day, and that this -# changes during the year and does not necessarily correspond to mean -# solar noon. So the Vostok time might have been whatever the clocks -# happened to be during their visit. So we still don't really know what time -# it is at Vostok. But we'll guess +06. -# -Zone Antarctica/Vostok 0 - -00 1957 Dec 16 - 6:00 - +06 - -# S Africa - year-round bases -# Marion Island, -4653+03752 -# SANAE IV, Vesleskarvet, Queen Maud Land, -714022-0025026, since 1997 - -# Ukraine - year-round base -# Vernadsky (formerly Faraday), Galindez Island, -651445-0641526, since 1954 - -# United Kingdom -# -# British Antarctic Territories (BAT) claims -# South Orkney Islands -# scientific station from 1903 -# whaling station at Signy I 1920/1926 -# South Shetland Islands -# -# year-round bases -# Bird Island, South Georgia, -5400-03803, since 1983 -# Deception Island, -6259-06034, whaling station 1912/1931, -# scientific station 1943/1967, -# previously sealers and a scientific expedition wintered by accident, -# and a garrison was deployed briefly -# Halley, Coates Land, -7535-02604, since 1956-01-06 -# Halley is on a moving ice shelf and is periodically relocated -# so that it is never more than 10km from its nominal location. -# Rothera, Adelaide Island, -6734-6808, since 1976-12-01 -# -# From Paul Eggert (2002-10-22) -# says Rothera is -03 all year. -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Antarctica/Rothera 0 - -00 1976 Dec 1 - -3:00 - -03 - -# Uruguay - year round base -# Artigas, King George Island, -621104-0585107 - -# USA - year-round bases -# -# Palmer, Anvers Island, since 1965 (moved 2 miles in 1968) -# See 'southamerica' for Antarctica/Palmer, since it uses South American DST. -# -# McMurdo Station, Ross Island, since 1955-12 -# Amundsen-Scott South Pole Station, continuously occupied since 1956-11-20 -# -# From Chris Carrier (1996-06-27): -# Siple, the first commander of the South Pole station, -# stated that he would have liked to have kept GMT at the station, -# but that he found it more convenient to keep GMT+12 -# as supplies for the station were coming from McMurdo Sound, -# which was on GMT+12 because New Zealand was on GMT+12 all year -# at that time (1957). (Source: Siple's book 90 Degrees South.) -# -# From Susan Smith -# http://www.cybertours.com/whs/pole10.html -# (1995-11-13 16:24:56 +1300, no longer available): -# We use the same time as McMurdo does. -# And they use the same time as Christchurch, NZ does.... -# One last quirk about South Pole time. -# All the electric clocks are usually wrong. -# Something about the generators running at 60.1hertz or something -# makes all of the clocks run fast. So every couple of days, -# we have to go around and set them back 5 minutes or so. -# Maybe if we let them run fast all of the time, we'd get to leave here sooner!! -# -# See 'australasia' for Antarctica/McMurdo. diff --git a/src/timezone/data/asia b/src/timezone/data/asia deleted file mode 100644 index 35774c6d7e..0000000000 --- a/src/timezone/data/asia +++ /dev/null @@ -1,3084 +0,0 @@ -# This file is in the public domain, so clarified as of -# 2009-05-17 by Arthur David Olson. - -# This file is by no means authoritative; if you think you know better, -# go ahead and edit the file (and please send any changes to -# tz@iana.org for general use in the future). For more, please see -# the file CONTRIBUTING in the tz distribution. - -# From Paul Eggert (2017-01-13): -# -# Unless otherwise specified, the source for data through 1990 is: -# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition), -# San Diego: ACS Publications, Inc. (2003). -# Unfortunately this book contains many errors and cites no sources. -# -# Many years ago Gwillim Law wrote that a good source -# for time zone data was the International Air Transport -# Association's Standard Schedules Information Manual (IATA SSIM), -# published semiannually. Law sent in several helpful summaries -# of the IATA's data after 1990. Except where otherwise noted, -# IATA SSIM is the source for entries after 1990. -# -# Another source occasionally used is Edward W. Whitman, World Time Differences, -# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), which -# I found in the UCLA library. -# -# For data circa 1899, a common source is: -# Milne J. Civil time. Geogr J. 1899 Feb;13(2):173-94. -# http://www.jstor.org/stable/1774359 -# -# For Russian data circa 1919, a source is: -# Byalokoz EL. New Counting of Time in Russia since July 1, 1919. -# (See the 'europe' file for a fuller citation.) -# -# A reliable and entertaining source about time zones is -# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997). -# -# The following alphabetic abbreviations appear in these tables: -# std dst -# LMT Local Mean Time -# 2:00 EET EEST Eastern European Time -# 2:00 IST IDT Israel -# 5:30 IST India -# 7:00 WIB west Indonesia (Waktu Indonesia Barat) -# 8:00 WITA central Indonesia (Waktu Indonesia Tengah) -# 8:00 CST China -# 8:30 KST KDT Korea when at +0830 -# 9:00 WIT east Indonesia (Waktu Indonesia Timur) -# 9:00 JST JDT Japan -# 9:00 KST KDT Korea when at +09 -# 9:30 ACST Australian Central Standard Time -# Otherwise, these tables typically use numeric abbreviations like +03 -# and +0330 for integer hour and minute UTC offsets. Although earlier -# editions invented alphabetic time zone abbreviations for every -# offset, this did not reflect common practice. -# -# See the 'europe' file for Russia and Turkey in Asia. - -# From Guy Harris: -# Incorporates data for Singapore from Robert Elz' asia 1.1, as well as -# additional information from Tom Yap, Sun Microsystems Intercontinental -# Technical Support (including a page from the Official Airline Guide - -# Worldwide Edition). - -############################################################################### - -# These rules are stolen from the 'europe' file. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule EUAsia 1981 max - Mar lastSun 1:00u 1:00 S -Rule EUAsia 1979 1995 - Sep lastSun 1:00u 0 - -Rule EUAsia 1996 max - Oct lastSun 1:00u 0 - -Rule E-EurAsia 1981 max - Mar lastSun 0:00 1:00 S -Rule E-EurAsia 1979 1995 - Sep lastSun 0:00 0 - -Rule E-EurAsia 1996 max - Oct lastSun 0:00 0 - -Rule RussiaAsia 1981 1984 - Apr 1 0:00 1:00 S -Rule RussiaAsia 1981 1983 - Oct 1 0:00 0 - -Rule RussiaAsia 1984 1995 - Sep lastSun 2:00s 0 - -Rule RussiaAsia 1985 2011 - Mar lastSun 2:00s 1:00 S -Rule RussiaAsia 1996 2011 - Oct lastSun 2:00s 0 - - -# Afghanistan -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Kabul 4:36:48 - LMT 1890 - 4:00 - +04 1945 - 4:30 - +0430 - -# Armenia -# From Paul Eggert (2006-03-22): -# Shanks & Pottenger have Yerevan switching to 3:00 (with Russian DST) -# in spring 1991, then to 4:00 with no DST in fall 1995, then -# readopting Russian DST in 1997. Go with Shanks & Pottenger, even -# when they disagree with others. Edgar Der-Danieliantz -# reported (1996-05-04) that Yerevan probably wouldn't use DST -# in 1996, though it did use DST in 1995. IATA SSIM (1991/1998) reports that -# Armenia switched from 3:00 to 4:00 in 1998 and observed DST after 1991, -# but started switching at 3:00s in 1998. - -# From Arthur David Olson (2011-06-15): -# While Russia abandoned DST in 2011, Armenia may choose to -# follow Russia's "old" rules. - -# From Alexander Krivenyshev (2012-02-10): -# According to News Armenia, on Feb 9, 2012, -# http://newsarmenia.ru/society/20120209/42609695.html -# -# The Armenia National Assembly adopted final reading of Amendments to the -# Law "On procedure of calculation time on the territory of the Republic of -# Armenia" according to which Armenia [is] abolishing Daylight Saving Time. -# or -# (brief) -# http://www.worldtimezone.com/dst_news/dst_news_armenia03.html -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Yerevan 2:58:00 - LMT 1924 May 2 - 3:00 - +03 1957 Mar - 4:00 RussiaAsia +04/+05 1991 Mar 31 2:00s - 3:00 RussiaAsia +03/+04 1995 Sep 24 2:00s - 4:00 - +04 1997 - 4:00 RussiaAsia +04/+05 - -# Azerbaijan - -# From Rustam Aliyev of the Azerbaijan Internet Forum (2005-10-23): -# According to the resolution of Cabinet of Ministers, 1997 -# From Paul Eggert (2015-09-17): It was Resolution No. 21 (1997-03-17). -# http://code.az/files/daylight_res.pdf - -# From Steffen Thorsen (2016-03-17): -# ... the Azerbaijani Cabinet of Ministers has cancelled switching to -# daylight saving time.... -# http://www.azernews.az/azerbaijan/94137.html -# http://vestnikkavkaza.net/news/Azerbaijani-Cabinet-of-Ministers-cancels-daylight-saving-time.html -# http://en.apa.az/xeber_azerbaijan_abolishes_daylight_savings_ti_240862.html - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Azer 1997 2015 - Mar lastSun 4:00 1:00 S -Rule Azer 1997 2015 - Oct lastSun 5:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Baku 3:19:24 - LMT 1924 May 2 - 3:00 - +03 1957 Mar - 4:00 RussiaAsia +04/+05 1991 Mar 31 2:00s - 3:00 RussiaAsia +03/+04 1992 Sep lastSun 2:00s - 4:00 - +04 1996 - 4:00 EUAsia +04/+05 1997 - 4:00 Azer +04/+05 - -# Bahrain -# See Asia/Qatar. - -# Bangladesh -# From Alexander Krivenyshev (2009-05-13): -# According to newspaper Asian Tribune (May 6, 2009) Bangladesh may introduce -# Daylight Saving Time from June 16 to Sept 30 -# -# Bangladesh to introduce daylight saving time likely from June 16 -# http://www.asiantribune.com/?q=node/17288 -# http://www.worldtimezone.com/dst_news/dst_news_bangladesh02.html -# -# "... Bangladesh government has decided to switch daylight saving time from -# June -# 16 till September 30 in a bid to ensure maximum use of daylight to cope with -# crippling power crisis. " -# -# The switch will remain in effect from June 16 to Sept 30 (2009) but if -# implemented the next year, it will come in force from April 1, 2010 - -# From Steffen Thorsen (2009-06-02): -# They have finally decided now, but changed the start date to midnight between -# the 19th and 20th, and they have not set the end date yet. -# -# Some sources: -# http://in.reuters.com/article/southAsiaNews/idINIndia-40017620090601 -# http://bdnews24.com/details.php?id=85889&cid=2 -# -# Our wrap-up: -# http://www.timeanddate.com/news/time/bangladesh-daylight-saving-2009.html - -# From A. N. M. Kamrus Saadat (2009-06-15): -# Finally we've got the official mail regarding DST start time where DST start -# time is mentioned as Jun 19 2009, 23:00 from BTRC (Bangladesh -# Telecommunication Regulatory Commission). -# -# No DST end date has been announced yet. - -# From Alexander Krivenyshev (2009-09-25): -# Bangladesh won't go back to Standard Time from October 1, 2009, -# instead it will continue DST measure till the cabinet makes a fresh decision. -# -# Following report by same newspaper-"The Daily Star Friday": -# "DST change awaits cabinet decision-Clock won't go back by 1-hr from Oct 1" -# http://www.thedailystar.net/newDesign/news-details.php?nid=107021 -# http://www.worldtimezone.com/dst_news/dst_news_bangladesh04.html - -# From Steffen Thorsen (2009-10-13): -# IANS (Indo-Asian News Service) now reports: -# Bangladesh has decided that the clock advanced by an hour to make -# maximum use of daylight hours as an energy saving measure would -# "continue for an indefinite period." -# -# One of many places where it is published: -# http://www.thaindian.com/newsportal/business/bangladesh-to-continue-indefinitely-with-advanced-time_100259987.html - -# From Alexander Krivenyshev (2009-12-24): -# According to Bangladesh newspaper "The Daily Star," -# Bangladesh will change its clock back to Standard Time on Dec 31, 2009. -# -# Clock goes back 1-hr on Dec 31 night. -# http://www.thedailystar.net/newDesign/news-details.php?nid=119228 -# http://www.worldtimezone.com/dst_news/dst_news_bangladesh05.html -# -# "...The government yesterday decided to put the clock back by one hour -# on December 31 midnight and the new time will continue until March 31, -# 2010 midnight. The decision came at a cabinet meeting at the Prime -# Minister's Office last night..." - -# From Alexander Krivenyshev (2010-03-22): -# According to Bangladesh newspaper "The Daily Star," -# Cabinet cancels Daylight Saving Time -# http://www.thedailystar.net/newDesign/latest_news.php?nid=22817 -# http://www.worldtimezone.com/dst_news/dst_news_bangladesh06.html - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Dhaka 2009 only - Jun 19 23:00 1:00 S -Rule Dhaka 2009 only - Dec 31 24:00 0 - - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Dhaka 6:01:40 - LMT 1890 - 5:53:20 - HMT 1941 Oct # Howrah Mean Time? - 6:30 - +0630 1942 May 15 - 5:30 - +0530 1942 Sep - 6:30 - +0630 1951 Sep 30 - 6:00 - +06 2009 - 6:00 Dhaka +06/+07 - -# Bhutan -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Thimphu 5:58:36 - LMT 1947 Aug 15 # or Thimbu - 5:30 - +0530 1987 Oct - 6:00 - +06 - -# British Indian Ocean Territory -# Whitman and the 1995 CIA time zone map say 5:00, but the -# 1997 and later maps say 6:00. Assume the switch occurred in 1996. -# We have no information as to when standard time was introduced; -# assume it occurred in 1907, the same year as Mauritius (which -# then contained the Chagos Archipelago). -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Indian/Chagos 4:49:40 - LMT 1907 - 5:00 - +05 1996 - 6:00 - +06 - -# Brunei -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Brunei 7:39:40 - LMT 1926 Mar # Bandar Seri Begawan - 7:30 - +0730 1933 - 8:00 - +08 - -# Burma / Myanmar - -# Milne says 6:24:40 was the meridian of the time ball observatory at Rangoon. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Yangon 6:24:40 - LMT 1880 # or Rangoon - 6:24:40 - RMT 1920 # Rangoon Mean Time? - 6:30 - +0630 1942 May - 9:00 - +09 1945 May 3 - 6:30 - +0630 - -# Cambodia -# See Asia/Bangkok. - - -# China - -# From Guy Harris: -# People's Republic of China. Yes, they really have only one time zone. - -# From Bob Devine (1988-01-28): -# No they don't. See TIME mag, 1986-02-17 p.52. Even though -# China is across 4 physical time zones, before Feb 1, 1986 only the -# Peking (Beijing) time zone was recognized. Since that date, China -# has two of 'em - Peking's and Ürümqi (named after the capital of -# the Xinjiang Uyghur Autonomous Region). I don't know about DST for it. -# -# . . .I just deleted the DST table and this editor makes it too -# painful to suck in another copy. So, here is what I have for -# DST start/end dates for Peking's time zone (info from AP): -# -# 1986 May 4 - Sept 14 -# 1987 mid-April - ?? - -# From U. S. Naval Observatory (1989-01-19): -# CHINA 8 H AHEAD OF UTC ALL OF CHINA, INCL TAIWAN -# CHINA 9 H AHEAD OF UTC APR 17 - SEP 10 - -# From Paul Eggert (2008-02-11): -# Jim Mann, "A clumsy embrace for another western custom: China on daylight -# time - sort of", Los Angeles Times, 1986-05-05 ... [says] that China began -# observing daylight saving time in 1986. - -# From Paul Eggert (2014-06-30): -# Shanks & Pottenger have China switching to a single time zone in 1980, but -# this doesn't seem to be correct. They also write that China observed summer -# DST from 1986 through 1991, which seems to match the above commentary, so -# go with them for DST rules as follows: -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Shang 1940 only - Jun 3 0:00 1:00 D -Rule Shang 1940 1941 - Oct 1 0:00 0 S -Rule Shang 1941 only - Mar 16 0:00 1:00 D -Rule PRC 1986 only - May 4 0:00 1:00 D -Rule PRC 1986 1991 - Sep Sun>=11 0:00 0 S -Rule PRC 1987 1991 - Apr Sun>=10 0:00 1:00 D - -# From Anthony Fok (2001-12-20): -# BTW, I did some research on-line and found some info regarding these five -# historic timezones from some Taiwan websites. And yes, there are official -# Chinese names for these locales (before 1949). -# -# From Jesper Nørgaard Welen (2006-07-14): -# I have investigated the timezones around 1970 on the -# http://www.astro.com/atlas site [with provinces and county -# boundaries summarized below].... A few other exceptions were two -# counties on the Sichuan side of the Xizang-Sichuan border, -# counties Dege and Baiyu which lies on the Sichuan side and are -# therefore supposed to be GMT+7, Xizang region being GMT+6, but Dege -# county is GMT+8 according to astro.com while Baiyu county is GMT+6 -# (could be true), for the moment I am assuming that those two -# counties are mistakes in the astro.com data. - -# From Paul Eggert (2017-01-05): -# Alois Treindl kindly sent me translations of the following two sources: -# -# (1) -# Guo Qingsheng (National Time-Service Center, CAS, Xi'an 710600, China) -# Beijing Time at the Beginning of the PRC -# China Historical Materials of Science and Technology -# (Zhongguo ke ji shi liao, 中国科技史料), Vol. 24, No. 1 (2003) -# It gives evidence that at the beginning of the PRC, Beijing time was -# officially apparent solar time! However, Guo also says that the -# evidence is dubious, as the relevant institute of astronomy had not -# been taken over by the PRC yet. It's plausible that apparent solar -# time was announced but never implemented, and that people continued -# to use UT+8. As the Shanghai radio station (and I presume the -# observatory) was still under control of French missionaries, it -# could well have ignored any such mandate. -# -# (2) -# Guo Qing-sheng (Shaanxi Astronomical Observatory, CAS, Xi'an 710600, China) -# A Study on the Standard Time Changes for the Past 100 Years in China -# [undated and unknown publication location] -# It says several things: -# * The Qing dynasty used local apparent solar time throughout China. -# * The Republic of China instituted Beijing mean solar time effective -# the official calendar book of 1914. -# * The French Concession in Shanghai set up signal stations in -# French docks in the 1890s, controlled by Xujiahui (Zikawei) -# Observatory and set to local mean time. -# * "From the end of the 19th century" it changed to UT+8. -# * Chinese Customs (by then reduced to a tool of foreign powers) -# eventually standardized on this time for all ports, and it -# became used by railways as well. -# * In 1918 the Central Observatory proposed dividing China into -# five time zones (see below for details). This caught on -# at first only in coastal areas observing UT+8. -# * During WWII all of China was in theory was at UT+7. In practice -# this was ignored in the west, and I presume was ignored in -# Japanese-occupied territory. -# * Japanese-occupied Manchuria was at UT+9, i.e., Japan time. -# * The five-zone plan was resurrected after WWII and officially put into -# place (with some modifications) in March 1948. It's not clear -# how well it was observed in areas under Nationalist control. -# * The People's Liberation Army used UT+8 during the civil war. -# -# An AP article "Shanghai Internat'l Area Little Changed" in the -# Lewiston (ME) Daily Sun (1939-05-29), p 17, said "Even the time is -# different - the occupied districts going by Tokyo time, an hour -# ahead of that prevailing in the rest of Shanghai." Guess that the -# Xujiahui Observatory was under French control and stuck with UT +08. -# -# In earlier versions of this file, China had many separate Zone entries, but -# this was based on what were apparently incorrect data in Shanks & Pottenger. -# This has now been simplified to the two entries Asia/Shanghai and -# Asia/Urumqi, with the others being links for backward compatibility. -# Proposed in 1918 and theoretically in effect until 1949 (although in practice -# mainly observed in coastal areas), the five zones were: -# -# Changbai Time ("Long-white Time", Long-white = Heilongjiang area) UT +08:30 -# Now part of Asia/Shanghai; its pre-1970 times are not recorded here. -# Heilongjiang (except Mohe county), Jilin -# -# Zhongyuan Time ("Central plain Time") UT +08 -# Now part of Asia/Shanghai. -# most of China -# Milne gives 8:05:43.2 for Xujiahui Observatory time; round to nearest. -# Guo says Shanghai switched to UT +08 "from the end of the 19th century". -# -# Long-shu Time (probably as Long and Shu were two names of the area) UT +07 -# Now part of Asia/Shanghai; its pre-1970 times are not recorded here. -# Guangxi, Guizhou, Hainan, Ningxia, Sichuan, Shaanxi, and Yunnan; -# most of Gansu; west Inner Mongolia; east Qinghai; and the Guangdong -# counties Deqing, Enping, Kaiping, Luoding, Taishan, Xinxing, -# Yangchun, Yangjiang, Yu'nan, and Yunfu. -# -# Xin-zang Time ("Xinjiang-Tibet Time") UT +06 -# This region is now part of either Asia/Urumqi or Asia/Shanghai with -# current boundaries uncertain; times before 1970 for areas that -# disagree with Ürümqi or Shanghai are not recorded here. -# The Gansu counties Aksay, Anxi, Dunhuang, Subei; west Qinghai; -# the Guangdong counties Xuwen, Haikang, Suixi, Lianjiang, -# Zhanjiang, Wuchuan, Huazhou, Gaozhou, Maoming, Dianbai, and Xinyi; -# east Tibet, including Lhasa, Chamdo, Shigaise, Jimsar, Shawan and Hutubi; -# east Xinjiang, including Ürümqi, Turpan, Karamay, Korla, Minfeng, Jinghe, -# Wusu, Qiemo, Xinyan, Wulanwusu, Jinghe, Yumin, Tacheng, Tuoli, Emin, -# Shihezi, Changji, Yanqi, Heshuo, Tuokexun, Tulufan, Shanshan, Hami, -# Fukang, Kuitun, Kumukuli, Miquan, Qitai, and Turfan. -# -# Kunlun Time UT +05:30 -# This region is now in the same status as Xin-zang Time (see above). -# West Tibet, including Pulan, Aheqi, Shufu, Shule; -# West Xinjiang, including Aksu, Atushi, Yining, Hetian, Cele, Luopu, Nileke, -# Zhaosu, Tekesi, Gongliu, Chabuchaer, Huocheng, Bole, Pishan, Suiding, -# and Yarkand. - -# From Luther Ma (2009-10-17): -# Almost all (>99.9%) ethnic Chinese (properly ethnic Han) living in -# Xinjiang use Chinese Standard Time. Some are aware of Xinjiang time, -# but have no need of it. All planes, trains, and schools function on -# what is called "Beijing time." When Han make an appointment in Chinese -# they implicitly use Beijing time. -# -# On the other hand, ethnic Uyghurs, who make up about half the -# population of Xinjiang, typically use "Xinjiang time" which is two -# hours behind Beijing time, or UT +06. The government of the Xinjiang -# Uyghur Autonomous Region, (XAUR, or just Xinjiang for short) as well as -# local governments such as the Ürümqi city government use both times in -# publications, referring to what is popularly called Xinjiang time as -# "Ürümqi time." When Uyghurs make an appointment in the Uyghur language -# they almost invariably use Xinjiang time. -# -# (Their ethnic Han compatriots would typically have no clue of its -# widespread use, however, because so extremely few of them are fluent in -# Uyghur, comparable to the number of Anglo-Americans fluent in Navajo.) -# -# (...As with the rest of China there was a brief interval ending in 1990 -# or 1991 when summer time was in use. The confusion was severe, with -# the province not having dual times but four times in use at the same -# time. Some areas remained on standard Xinjiang time or Beijing time and -# others moving their clocks ahead.) - -# From Luther Ma (2009-11-19): -# With the risk of being redundant to previous answers these are the most common -# English "transliterations" (w/o using non-English symbols): -# -# 1. Wulumuqi... -# 2. Kashi... -# 3. Urumqi... -# 4. Kashgar... -# ... -# 5. It seems that Uyghurs in Ürümqi has been using Xinjiang since at least the -# 1960's. I know of one Han, now over 50, who grew up in the surrounding -# countryside and used Xinjiang time as a child. -# -# 6. Likewise for Kashgar and the rest of south Xinjiang I don't know of any -# start date for Xinjiang time. -# -# Without having access to local historical records, nor the ability to legally -# publish them, I would go with October 1, 1949, when Xinjiang became the Uyghur -# Autonomous Region under the PRC. (Before that Uyghurs, of course, would also -# not be using Beijing time, but some local time.) - -# From David Cochrane (2014-03-26): -# Just a confirmation that Ürümqi time was implemented in Ürümqi on 1 Feb 1986: -# http://content.time.com/time/magazine/article/0,9171,960684,00.html - -# From Luther Ma (2014-04-22): -# I have interviewed numerous people of various nationalities and from -# different localities in Xinjiang and can confirm the information in Guo's -# report regarding Xinjiang, as well as the Time article reference by David -# Cochrane. Whether officially recognized or not (and both are officially -# recognized), two separate times have been in use in Xinjiang since at least -# the Cultural Revolution: Xinjiang Time (XJT), aka Ürümqi Time or local time; -# and Beijing Time. There is no confusion in Xinjiang as to which name refers -# to which time. Both are widely used in the province, although in some -# population groups might be use one to the exclusion of the other. The only -# problem is that computers and smart phones list Ürümqi (or Kashgar) as -# having the same time as Beijing. - -# From Paul Eggert (2014-06-30): -# In the early days of the PRC, Tibet was given its own time zone (UT +06) -# but this was withdrawn in 1959 and never reinstated; see Tubten Khétsun, -# Memories of life in Lhasa under Chinese Rule, Columbia U Press, ISBN -# 978-0231142861 (2008), translator's introduction by Matthew Akester, p x. -# As this is before our 1970 cutoff, Tibet doesn't need a separate zone. -# -# Xinjiang Time is well-documented as being officially recognized. E.g., see -# "The Working-Calendar for The Xinjiang Uygur Autonomous Region Government" -# (2014-04-22). -# Unfortunately, we have no good records of time in Xinjiang before 1986. -# During the 20th century parts of Xinjiang were ruled by the Qing dynasty, -# the Republic of China, various warlords, the First and Second East Turkestan -# Republics, the Soviet Union, the Kuomintang, and the People's Republic of -# China, and tracking down all these organizations' timekeeping rules would be -# quite a trick. Approximate this lost history by a transition from LMT to -# UT +06 at the start of 1928, the year of accession of the warlord Jin Shuren, -# which happens to be the date given by Shanks & Pottenger (no doubt as a -# guess) as the transition from LMT. Ignore the usage of +08 before -# 1986-02-01 under the theory that the transition date to +08 is unknown and -# that the sort of users who prefer Asia/Urumqi now typically ignored the -# +08 mandate back then. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -# Beijing time, used throughout China; represented by Shanghai. -Zone Asia/Shanghai 8:05:43 - LMT 1901 - 8:00 Shang C%sT 1949 - 8:00 PRC C%sT -# Xinjiang time, used by many in western China; represented by Ürümqi / Ürümchi -# / Wulumuqi. (Please use Asia/Shanghai if you prefer Beijing time.) -Zone Asia/Urumqi 5:50:20 - LMT 1928 - 6:00 - +06 - - -# Hong Kong (Xianggang) - -# Milne gives 7:36:41.7; round this. - -# From Lee Yiu Chung (2009-10-24): -# I found there are some mistakes for the...DST rule for Hong -# Kong. [According] to the DST record from Hong Kong Observatory (actually, -# it is not [an] observatory, but the official meteorological agency of HK, -# and also serves as the official timing agency), there are some missing -# and incorrect rules. Although the exact switch over time is missing, I -# think 3:30 is correct. The official DST record for Hong Kong can be -# obtained from -# http://www.hko.gov.hk/gts/time/Summertime.htm - -# From Arthur David Olson (2009-10-28): -# Here are the dates given at -# http://www.hko.gov.hk/gts/time/Summertime.htm -# as of 2009-10-28: -# Year Period -# 1941 1 Apr to 30 Sep -# 1942 Whole year -# 1943 Whole year -# 1944 Whole year -# 1945 Whole year -# 1946 20 Apr to 1 Dec -# 1947 13 Apr to 30 Dec -# 1948 2 May to 31 Oct -# 1949 3 Apr to 30 Oct -# 1950 2 Apr to 29 Oct -# 1951 1 Apr to 28 Oct -# 1952 6 Apr to 25 Oct -# 1953 5 Apr to 1 Nov -# 1954 21 Mar to 31 Oct -# 1955 20 Mar to 6 Nov -# 1956 18 Mar to 4 Nov -# 1957 24 Mar to 3 Nov -# 1958 23 Mar to 2 Nov -# 1959 22 Mar to 1 Nov -# 1960 20 Mar to 6 Nov -# 1961 19 Mar to 5 Nov -# 1962 18 Mar to 4 Nov -# 1963 24 Mar to 3 Nov -# 1964 22 Mar to 1 Nov -# 1965 18 Apr to 17 Oct -# 1966 17 Apr to 16 Oct -# 1967 16 Apr to 22 Oct -# 1968 21 Apr to 20 Oct -# 1969 20 Apr to 19 Oct -# 1970 19 Apr to 18 Oct -# 1971 18 Apr to 17 Oct -# 1972 16 Apr to 22 Oct -# 1973 22 Apr to 21 Oct -# 1973/74 30 Dec 73 to 20 Oct 74 -# 1975 20 Apr to 19 Oct -# 1976 18 Apr to 17 Oct -# 1977 Nil -# 1978 Nil -# 1979 13 May to 21 Oct -# 1980 to Now Nil -# The page does not give start or end times of day. -# The page does not give a start date for 1942. -# The page does not givw an end date for 1945. -# The Japanese occupation of Hong Kong began on 1941-12-25. -# The Japanese surrender of Hong Kong was signed 1945-09-15. -# For lack of anything better, use start of those days as the transition times. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule HK 1941 only - Apr 1 3:30 1:00 S -Rule HK 1941 only - Sep 30 3:30 0 - -Rule HK 1946 only - Apr 20 3:30 1:00 S -Rule HK 1946 only - Dec 1 3:30 0 - -Rule HK 1947 only - Apr 13 3:30 1:00 S -Rule HK 1947 only - Dec 30 3:30 0 - -Rule HK 1948 only - May 2 3:30 1:00 S -Rule HK 1948 1951 - Oct lastSun 3:30 0 - -Rule HK 1952 only - Oct 25 3:30 0 - -Rule HK 1949 1953 - Apr Sun>=1 3:30 1:00 S -Rule HK 1953 only - Nov 1 3:30 0 - -Rule HK 1954 1964 - Mar Sun>=18 3:30 1:00 S -Rule HK 1954 only - Oct 31 3:30 0 - -Rule HK 1955 1964 - Nov Sun>=1 3:30 0 - -Rule HK 1965 1976 - Apr Sun>=16 3:30 1:00 S -Rule HK 1965 1976 - Oct Sun>=16 3:30 0 - -Rule HK 1973 only - Dec 30 3:30 1:00 S -Rule HK 1979 only - May Sun>=8 3:30 1:00 S -Rule HK 1979 only - Oct Sun>=16 3:30 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Hong_Kong 7:36:42 - LMT 1904 Oct 30 - 8:00 HK HK%sT 1941 Dec 25 - 9:00 - JST 1945 Sep 15 - 8:00 HK HK%sT - -############################################################################### - -# Taiwan - -# From smallufo (2010-04-03): -# According to Taiwan's CWB [Central Weather Bureau], -# http://www.cwb.gov.tw/V6/astronomy/cdata/summert.htm -# Taipei has DST in 1979 between July 1st and Sep 30. - -# From Yu-Cheng Chuang (2013-07-12): -# On Dec 28, 1895, the Meiji Emperor announced Ordinance No. 167 of -# Meiji Year 28 "The clause about standard time", mentioned that -# Taiwan and Penghu Islands, as well as Yaeyama and Miyako Islands -# (both in Okinawa) adopt the Western Standard Time which is based on -# 120E. The adoption began from Jan 1, 1896. The original text can be -# found on Wikisource: -# http://ja.wikisource.org/wiki/標準時ニ關スル件_(公布時) -# ... This could be the first adoption of time zone in Taiwan, because -# during the Qing Dynasty, it seems that there was no time zone -# declared officially. -# -# Later, in the beginning of World War II, on Sep 25, 1937, the Showa -# Emperor announced Ordinance No. 529 of Showa Year 12 "The clause of -# revision in the ordinance No. 167 of Meiji year 28 about standard -# time", in which abolished the adoption of Western Standard Time in -# western islands (listed above), which means the whole Japan -# territory, including later occupations, adopt Japan Central Time -# (UTC+9). The adoption began on Oct 1, 1937. The original text can -# be found on Wikisource: -# http://ja.wikisource.org/wiki/明治二十八年勅令第百六十七號標準時ニ關スル件中改正ノ件 -# -# That is, the time zone of Taipei switched to UTC+9 on Oct 1, 1937. - -# From Yu-Cheng Chuang (2014-07-02): -# I've found more evidence about when the time zone was switched from UTC+9 -# back to UTC+8 after WW2. I believe it was on Sep 21, 1945. In a document -# during Japanese era [1] in which the officer told the staff to change time -# zone back to Western Standard Time (UTC+8) on Sep 21. And in another -# history page of National Cheng Kung University [2], on Sep 21 there is a -# note "from today, switch back to Western Standard Time". From these two -# materials, I believe that the time zone change happened on Sep 21. And -# today I have found another monthly journal called "The Astronomical Herald" -# from The Astronomical Society of Japan [3] in which it mentioned the fact -# that: -# -# 1. Standard Time of the Country (Japan) was adopted on Jan 1, 1888, using -# the time at 135E (GMT+9) -# -# 2. Standard Time of the Country was renamed to Central Standard Time, on Jan -# 1, 1898, and on the same day, the new territories Taiwan and Penghu islands, -# as well as Yaeyama and Miyako islands, adopted a new time zone called -# Western Standard Time, which is in GMT+8. -# -# 3. Western Standard Time was deprecated on Sep 30, 1937. From then all the -# territories of Japan adopted the same time zone, which is Central Standard -# Time. -# -# [1] Academica Historica, Taiwan: -# http://163.29.208.22:8080/govsaleShowImage/connect_img.php?s=00101738900090036&e=00101738900090037 -# [2] Nat'l Cheng Kung University 70th Anniversary Special Site: -# http://www.ncku.edu.tw/~ncku70/menu/001/01_01.htm -# [3] Yukio Niimi, The Standard Time in Japan (1997), p.475: -# http://www.asj.or.jp/geppou/archive_open/1997/pdf/19971001c.pdf - -# Yu-Cheng Chuang (2014-07-03): -# I finally have found the real official gazette about changing back to -# Western Standard Time on Sep 21 in Taiwan. It's Taiwan Governor-General -# Bulletin No. 386 in Showa 20 years (1945), published on Sep 19, 1945. [1] ... -# [It] abolishes Bulletin No. 207 in Showa 12 years (1937), which is a local -# bulletin in Taiwan for that Ordinance No. 529. It also mentioned that 1am on -# Sep 21, 1945 will be 12am on Sep 21. I think this bulletin is much more -# official than the one I mentioned in my first mail, because it's from the -# top-level government in Taiwan. If you're going to quote any resource, this -# would be a good one. -# [1] Taiwan Governor-General Gazette, No. 1018, Sep 19, 1945: -# http://db2.th.gov.tw/db2/view/viewImg.php?imgcode=0072031018a&num=19&bgn=019&end=019&otherImg=&type=gener - -# From Yu-Cheng Chuang (2014-07-02): -# In 1946, DST in Taiwan was from May 15 and ended on Sep 30. The info from -# Central Weather Bureau website was not correct. -# -# Original Bulletin: -# http://subtpg.tpg.gov.tw/og/image2.asp?f=03502F0AKM1AF -# http://subtpg.tpg.gov.tw/og/image2.asp?f=0350300AKM1B0 (cont.) -# -# In 1947, DST in Taiwan was expanded to Oct 31. There is a backup of that -# telegram announcement from Taiwan Province Government: -# -# http://subtpg.tpg.gov.tw/og/image2.asp?f=0360310AKZ431 -# -# Here is a brief translation: -# -# The Summer Time this year is adopted from midnight Apr 15 until Sep 20 -# midnight. To save (energy?) consumption, we're expanding Summer Time -# adoption till Oct 31 midnight. -# -# The Central Weather Bureau website didn't mention that, however it can -# be found from historical government announcement database. - -# From Paul Eggert (2014-07-03): -# As per Yu-Cheng Chuang, say that Taiwan was at UT +09 from 1937-10-01 -# until 1945-09-21 at 01:00, overriding Shanks & Pottenger. -# Likewise, use Yu-Cheng Chuang's data for DST in Taiwan. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Taiwan 1946 only - May 15 0:00 1:00 D -Rule Taiwan 1946 only - Oct 1 0:00 0 S -Rule Taiwan 1947 only - Apr 15 0:00 1:00 D -Rule Taiwan 1947 only - Nov 1 0:00 0 S -Rule Taiwan 1948 1951 - May 1 0:00 1:00 D -Rule Taiwan 1948 1951 - Oct 1 0:00 0 S -Rule Taiwan 1952 only - Mar 1 0:00 1:00 D -Rule Taiwan 1952 1954 - Nov 1 0:00 0 S -Rule Taiwan 1953 1959 - Apr 1 0:00 1:00 D -Rule Taiwan 1955 1961 - Oct 1 0:00 0 S -Rule Taiwan 1960 1961 - Jun 1 0:00 1:00 D -Rule Taiwan 1974 1975 - Apr 1 0:00 1:00 D -Rule Taiwan 1974 1975 - Oct 1 0:00 0 S -Rule Taiwan 1979 only - Jul 1 0:00 1:00 D -Rule Taiwan 1979 only - Oct 1 0:00 0 S - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -# Taipei or Taibei or T'ai-pei -Zone Asia/Taipei 8:06:00 - LMT 1896 Jan 1 - 8:00 - CST 1937 Oct 1 - 9:00 - JST 1945 Sep 21 1:00 - 8:00 Taiwan C%sT - -# Macau (Macao, Aomen) -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Macau 1961 1962 - Mar Sun>=16 3:30 1:00 D -Rule Macau 1961 1964 - Nov Sun>=1 3:30 0 S -Rule Macau 1963 only - Mar Sun>=16 0:00 1:00 D -Rule Macau 1964 only - Mar Sun>=16 3:30 1:00 D -Rule Macau 1965 only - Mar Sun>=16 0:00 1:00 D -Rule Macau 1965 only - Oct 31 0:00 0 S -Rule Macau 1966 1971 - Apr Sun>=16 3:30 1:00 D -Rule Macau 1966 1971 - Oct Sun>=16 3:30 0 S -Rule Macau 1972 1974 - Apr Sun>=15 0:00 1:00 D -Rule Macau 1972 1973 - Oct Sun>=15 0:00 0 S -Rule Macau 1974 1977 - Oct Sun>=15 3:30 0 S -Rule Macau 1975 1977 - Apr Sun>=15 3:30 1:00 D -Rule Macau 1978 1980 - Apr Sun>=15 0:00 1:00 D -Rule Macau 1978 1980 - Oct Sun>=15 0:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Macau 7:34:20 - LMT 1912 Jan 1 - 8:00 Macau C%sT - - -############################################################################### - -# Cyprus - -# Milne says the Eastern Telegraph Company used 2:14:00. Stick with LMT. -# IATA SSIM (1998-09) has Cyprus using EU rules for the first time. - -# From Paul Eggert (2016-09-09): -# Yesterday's Cyprus Mail reports that Northern Cyprus followed Turkey's -# lead and switched from +02/+03 to +03 year-round. -# http://cyprus-mail.com/2016/09/08/two-time-zones-cyprus-turkey-will-not-turn-clocks-back-next-month/ -# -# From Even Scharning (2016-10-31): -# Looks like the time zone split in Cyprus went through last night. -# http://cyprus-mail.com/2016/10/30/cyprus-new-division-two-time-zones-now-reality/ - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Cyprus 1975 only - Apr 13 0:00 1:00 S -Rule Cyprus 1975 only - Oct 12 0:00 0 - -Rule Cyprus 1976 only - May 15 0:00 1:00 S -Rule Cyprus 1976 only - Oct 11 0:00 0 - -Rule Cyprus 1977 1980 - Apr Sun>=1 0:00 1:00 S -Rule Cyprus 1977 only - Sep 25 0:00 0 - -Rule Cyprus 1978 only - Oct 2 0:00 0 - -Rule Cyprus 1979 1997 - Sep lastSun 0:00 0 - -Rule Cyprus 1981 1998 - Mar lastSun 0:00 1:00 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Nicosia 2:13:28 - LMT 1921 Nov 14 - 2:00 Cyprus EE%sT 1998 Sep - 2:00 EUAsia EE%sT -Zone Asia/Famagusta 2:15:48 - LMT 1921 Nov 14 - 2:00 Cyprus EE%sT 1998 Sep - 2:00 EUAsia EE%sT 2016 Sep 8 - 3:00 - +03 - -# Classically, Cyprus belongs to Asia; e.g. see Herodotus, Histories, I.72. -# However, for various reasons many users expect to find it under Europe. -Link Asia/Nicosia Europe/Nicosia - -# Georgia -# From Paul Eggert (1994-11-19): -# Today's _Economist_ (p 60) reports that Georgia moved its clocks forward -# an hour recently, due to a law proposed by Zurab Murvanidze, -# an MP who went on a hunger strike for 11 days to force discussion about it! -# We have no details, but we'll guess they didn't move the clocks back in fall. -# -# From Mathew Englander, quoting AP (1996-10-23 13:05-04): -# Instead of putting back clocks at the end of October, Georgia -# will stay on daylight savings time this winter to save energy, -# President Eduard Shevardnadze decreed Wednesday. -# -# From the BBC via Joseph S. Myers (2004-06-27): -# -# Georgia moved closer to Western Europe on Sunday... The former Soviet -# republic has changed its time zone back to that of Moscow. As a result it -# is now just four hours ahead of Greenwich Mean Time, rather than five hours -# ahead. The switch was decreed by the pro-Western president of Georgia, -# Mikheil Saakashvili, who said the change was partly prompted by the process -# of integration into Europe. - -# From Teimuraz Abashidze (2005-11-07): -# Government of Georgia ... decided to NOT CHANGE daylight savings time on -# [Oct.] 30, as it was done before during last more than 10 years. -# Currently, we are in fact GMT +4:00, as before 30 October it was GMT -# +3:00.... The problem is, there is NO FORMAL LAW or governmental document -# about it. As far as I can find, I was told, that there is no document, -# because we just DIDN'T ISSUE document about switching to winter time.... -# I don't know what can be done, especially knowing that some years ago our -# DST rules where changed THREE TIMES during one month. - -# Milne 1899 says Tbilisi (Tiflis) time was 2:59:05.7. -# Byalokoz 1919 says Georgia was 2:59:11. -# Go with Byalokoz. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Tbilisi 2:59:11 - LMT 1880 - 2:59:11 - TBMT 1924 May 2 # Tbilisi Mean Time - 3:00 - +03 1957 Mar - 4:00 RussiaAsia +04/+05 1991 Mar 31 2:00s - 3:00 RussiaAsia +03/+04 1992 - 3:00 E-EurAsia +03/+04 1994 Sep lastSun - 4:00 E-EurAsia +04/+05 1996 Oct lastSun - 4:00 1:00 +05 1997 Mar lastSun - 4:00 E-EurAsia +04/+05 2004 Jun 27 - 3:00 RussiaAsia +03/+04 2005 Mar lastSun 2:00 - 4:00 - +04 - -# East Timor - -# See Indonesia for the 1945 transition. - -# From João Carrascalão, brother of the former governor of East Timor, in -# East Timor may be late for its millennium -# (1999-12-26/31): -# Portugal tried to change the time forward in 1974 because the sun -# rises too early but the suggestion raised a lot of problems with the -# Timorese and I still don't think it would work today because it -# conflicts with their way of life. - -# From Paul Eggert (2000-12-04): -# We don't have any record of the above attempt. -# Most likely our records are incomplete, but we have no better data. - -# From Manoel de Almeida e Silva, Deputy Spokesman for the UN Secretary-General -# http://www.hri.org/news/world/undh/2000/00-08-16.undh.html -# (2000-08-16): -# The Cabinet of the East Timor Transition Administration decided -# today to advance East Timor's time by one hour. The time change, -# which will be permanent, with no seasonal adjustment, will happen at -# midnight on Saturday, September 16. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Dili 8:22:20 - LMT 1912 Jan 1 - 8:00 - +08 1942 Feb 21 23:00 - 9:00 - +09 1976 May 3 - 8:00 - +08 2000 Sep 17 0:00 - 9:00 - +09 - -# India - -# From Ian P. Beacock, in "A brief history of (modern) time", The Atlantic -# http://www.theatlantic.com/technology/archive/2015/12/the-creation-of-modern-time/421419/ -# (2015-12-22): -# In January 1906, several thousand cotton-mill workers rioted on the -# outskirts of Bombay.... They were protesting the proposed abolition of -# local time in favor of Indian Standard Time.... Journalists called this -# dispute the "Battle of the Clocks." It lasted nearly half a century. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Kolkata 5:53:28 - LMT 1880 # Kolkata - 5:53:20 - HMT 1941 Oct # Howrah Mean Time? - 6:30 - +0630 1942 May 15 - 5:30 - IST 1942 Sep - 5:30 1:00 +0630 1945 Oct 15 - 5:30 - IST -# The following are like Asia/Kolkata: -# Andaman Is -# Lakshadweep (Laccadive, Minicoy and Amindivi Is) -# Nicobar Is - -# Indonesia -# -# From Paul Eggert (2014-09-06): -# The 1876 Report of the Secretary of the [US] Navy, p 306 says that Batavia -# civil time was 7:07:12.5; round to even for Jakarta. -# -# From Gwillim Law (2001-05-28), overriding Shanks & Pottenger: -# http://www.sumatera-inc.com/go_to_invest/about_indonesia.asp#standtime -# says that Indonesia's time zones changed on 1988-01-01. Looking at some -# time zone maps, I think that must refer to Western Borneo (Kalimantan Barat -# and Kalimantan Tengah) switching from UTC+8 to UTC+7. -# -# From Paul Eggert (2007-03-10): -# Here is another correction to Shanks & Pottenger. -# JohnTWB writes that Japanese forces did not surrender control in -# Indonesia until 1945-09-01 00:00 at the earliest (in Jakarta) and -# other formal surrender ceremonies were September 9, 11, and 13, plus -# September 12 for the regional surrender to Mountbatten in Singapore. -# These would be the earliest possible times for a change. -# Régimes horaires pour le monde entier, by Henri Le Corre, (Éditions -# Traditionnelles, 1987, Paris) says that Java and Madura switched -# from UT +09 to +07:30 on 1945-09-23, and gives 1944-09-01 for Jayapura -# (Hollandia). For now, assume all Indonesian locations other than Jayapura -# switched on 1945-09-23. -# -# From Paul Eggert (2013-08-11): -# Normally the tz database uses English-language abbreviations, but in -# Indonesia it's typical to use Indonesian-language abbreviations even -# when writing in English. For example, see the English-language -# summary published by the Time and Frequency Laboratory of the -# Research Center for Calibration, Instrumentation and Metrology, -# Indonesia, (2006-09-29). -# The time zone abbreviations and UT offsets are: -# -# WIB - +07 - Waktu Indonesia Barat (Indonesia western time) -# WITA - +08 - Waktu Indonesia Tengah (Indonesia central time) -# WIT - +09 - Waktu Indonesia Timur (Indonesia eastern time) -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -# Java, Sumatra -Zone Asia/Jakarta 7:07:12 - LMT 1867 Aug 10 -# Shanks & Pottenger say the next transition was at 1924 Jan 1 0:13, -# but this must be a typo. - 7:07:12 - BMT 1923 Dec 31 23:47:12 # Batavia - 7:20 - +0720 1932 Nov - 7:30 - +0730 1942 Mar 23 - 9:00 - +09 1945 Sep 23 - 7:30 - +0730 1948 May - 8:00 - +08 1950 May - 7:30 - +0730 1964 - 7:00 - WIB -# west and central Borneo -Zone Asia/Pontianak 7:17:20 - LMT 1908 May - 7:17:20 - PMT 1932 Nov # Pontianak MT - 7:30 - +0730 1942 Jan 29 - 9:00 - +09 1945 Sep 23 - 7:30 - +0730 1948 May - 8:00 - +08 1950 May - 7:30 - +0730 1964 - 8:00 - WITA 1988 Jan 1 - 7:00 - WIB -# Sulawesi, Lesser Sundas, east and south Borneo -Zone Asia/Makassar 7:57:36 - LMT 1920 - 7:57:36 - MMT 1932 Nov # Macassar MT - 8:00 - +08 1942 Feb 9 - 9:00 - +09 1945 Sep 23 - 8:00 - WITA -# Maluku Islands, West Papua, Papua -Zone Asia/Jayapura 9:22:48 - LMT 1932 Nov - 9:00 - +09 1944 Sep 1 - 9:30 - +0930 1964 - 9:00 - WIT - -# Iran - -# From Roozbeh Pournader (2003-03-15): -# This is an English translation of what I just found (originally in Persian). -# The Gregorian dates in brackets are mine: -# -# Official Newspaper No. 13548-1370/6/25 [1991-09-16] -# No. 16760/T233 H 1370/6/10 [1991-09-01] -# -# The Rule About Change of the Official Time of the Country -# -# The Board of Ministers, in the meeting dated 1370/5/23 [1991-08-14], -# based on the suggestion number 2221/D dated 1370/4/22 [1991-07-13] -# of the Country's Organization for Official and Employment Affairs, -# and referring to the law for equating the working hours of workers -# and officers in the whole country dated 1359/4/23 [1980-07-14], and -# for synchronizing the official times of the country, agreed that: -# -# The official time of the country will should move forward one hour -# at the 24[:00] hours of the first day of Farvardin and should return -# to its previous state at the 24[:00] hours of the 30th day of -# Shahrivar. -# -# First Deputy to the President - Hassan Habibi -# -# From personal experience, that agrees with what has been followed -# for at least the last 5 years. Before that, for a few years, the -# date used was the first Thursday night of Farvardin and the last -# Thursday night of Shahrivar, but I can't give exact dates.... -# -# From Roozbeh Pournader (2005-04-05): -# The text of the Iranian law, in effect since 1925, clearly mentions -# that the true solar year is the measure, and there is no arithmetic -# leap year calculation involved. There has never been any serious -# plan to change that law.... -# -# From Paul Eggert (2006-03-22): -# Go with Shanks & Pottenger before Sept. 1991, and with Pournader thereafter. -# I used Ed Reingold's cal-persia in GNU Emacs 21.2 to check Persian dates, -# stopping after 2037 when 32-bit time_t's overflow. -# That cal-persia used Birashk's approximation, which disagrees with the solar -# calendar predictions for the year 2025, so I corrected those dates by hand. -# -# From Oscar van Vlijmen (2005-03-30), writing about future -# discrepancies between cal-persia and the Iranian calendar: -# For 2091 solar-longitude-after yields 2091-03-20 08:40:07.7 UT for -# the vernal equinox and that gets so close to 12:00 some local -# Iranian time that the definition of the correct location needs to be -# known exactly, amongst other factors. 2157 is even closer: -# 2157-03-20 08:37:15.5 UT. But the Gregorian year 2025 should give -# no interpretation problem whatsoever. By the way, another instant -# in the near future where there will be a discrepancy between -# arithmetical and astronomical Iranian calendars will be in 2058: -# vernal equinox on 2058-03-20 09:03:05.9 UT. The Java version of -# Reingold's/Dershowitz' calculator gives correctly the Gregorian date -# 2058-03-21 for 1 Farvardin 1437 (astronomical). -# -# From Steffen Thorsen (2006-03-22): -# Several of my users have reported that Iran will not observe DST anymore: -# http://www.irna.ir/en/news/view/line-17/0603193812164948.htm -# -# From Reuters (2007-09-16), with a heads-up from Jesper Nørgaard Welen: -# ... the Guardian Council ... approved a law on Sunday to re-introduce -# daylight saving time ... -# http://uk.reuters.com/article/oilRpt/idUKBLA65048420070916 -# -# From Roozbeh Pournader (2007-11-05): -# This is quoted from Official Gazette of the Islamic Republic of -# Iran, Volume 63, No. 18242, dated Tuesday 1386/6/24 -# [2007-10-16]. I am doing the best translation I can:... -# The official time of the country will be moved forward for one hour -# on the 24 hours of the first day of the month of Farvardin and will -# be changed back to its previous state on the 24 hours of the -# thirtieth day of Shahrivar. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Iran 1978 1980 - Mar 21 0:00 1:00 D -Rule Iran 1978 only - Oct 21 0:00 0 S -Rule Iran 1979 only - Sep 19 0:00 0 S -Rule Iran 1980 only - Sep 23 0:00 0 S -Rule Iran 1991 only - May 3 0:00 1:00 D -Rule Iran 1992 1995 - Mar 22 0:00 1:00 D -Rule Iran 1991 1995 - Sep 22 0:00 0 S -Rule Iran 1996 only - Mar 21 0:00 1:00 D -Rule Iran 1996 only - Sep 21 0:00 0 S -Rule Iran 1997 1999 - Mar 22 0:00 1:00 D -Rule Iran 1997 1999 - Sep 22 0:00 0 S -Rule Iran 2000 only - Mar 21 0:00 1:00 D -Rule Iran 2000 only - Sep 21 0:00 0 S -Rule Iran 2001 2003 - Mar 22 0:00 1:00 D -Rule Iran 2001 2003 - Sep 22 0:00 0 S -Rule Iran 2004 only - Mar 21 0:00 1:00 D -Rule Iran 2004 only - Sep 21 0:00 0 S -Rule Iran 2005 only - Mar 22 0:00 1:00 D -Rule Iran 2005 only - Sep 22 0:00 0 S -Rule Iran 2008 only - Mar 21 0:00 1:00 D -Rule Iran 2008 only - Sep 21 0:00 0 S -Rule Iran 2009 2011 - Mar 22 0:00 1:00 D -Rule Iran 2009 2011 - Sep 22 0:00 0 S -Rule Iran 2012 only - Mar 21 0:00 1:00 D -Rule Iran 2012 only - Sep 21 0:00 0 S -Rule Iran 2013 2015 - Mar 22 0:00 1:00 D -Rule Iran 2013 2015 - Sep 22 0:00 0 S -Rule Iran 2016 only - Mar 21 0:00 1:00 D -Rule Iran 2016 only - Sep 21 0:00 0 S -Rule Iran 2017 2019 - Mar 22 0:00 1:00 D -Rule Iran 2017 2019 - Sep 22 0:00 0 S -Rule Iran 2020 only - Mar 21 0:00 1:00 D -Rule Iran 2020 only - Sep 21 0:00 0 S -Rule Iran 2021 2023 - Mar 22 0:00 1:00 D -Rule Iran 2021 2023 - Sep 22 0:00 0 S -Rule Iran 2024 only - Mar 21 0:00 1:00 D -Rule Iran 2024 only - Sep 21 0:00 0 S -Rule Iran 2025 2027 - Mar 22 0:00 1:00 D -Rule Iran 2025 2027 - Sep 22 0:00 0 S -Rule Iran 2028 2029 - Mar 21 0:00 1:00 D -Rule Iran 2028 2029 - Sep 21 0:00 0 S -Rule Iran 2030 2031 - Mar 22 0:00 1:00 D -Rule Iran 2030 2031 - Sep 22 0:00 0 S -Rule Iran 2032 2033 - Mar 21 0:00 1:00 D -Rule Iran 2032 2033 - Sep 21 0:00 0 S -Rule Iran 2034 2035 - Mar 22 0:00 1:00 D -Rule Iran 2034 2035 - Sep 22 0:00 0 S -# -# The following rules are approximations starting in the year 2038. -# These are the best post-2037 approximations available, given the -# restrictions of a single rule using a Gregorian-based data format. -# At some point this table will need to be extended, though quite -# possibly Iran will change the rules first. -Rule Iran 2036 max - Mar 21 0:00 1:00 D -Rule Iran 2036 max - Sep 21 0:00 0 S - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Tehran 3:25:44 - LMT 1916 - 3:25:44 - TMT 1946 # Tehran Mean Time - 3:30 - +0330 1977 Nov - 4:00 Iran +04/+05 1979 - 3:30 Iran +0330/+0430 - - -# Iraq -# -# From Jonathan Lennox (2000-06-12): -# An article in this week's Economist ("Inside the Saddam-free zone", p. 50 in -# the U.S. edition) on the Iraqi Kurds contains a paragraph: -# "The three northern provinces ... switched their clocks this spring and -# are an hour ahead of Baghdad." -# -# But Rives McDow (2000-06-18) quotes a contact in Iraqi-Kurdistan as follows: -# In the past, some Kurdish nationalists, as a protest to the Iraqi -# Government, did not adhere to daylight saving time. They referred -# to daylight saving as Saddam time. But, as of today, the time zone -# in Iraqi-Kurdistan is on standard time with Baghdad, Iraq. -# -# So we'll ignore the Economist's claim. - -# From Steffen Thorsen (2008-03-10): -# The cabinet in Iraq abolished DST last week, according to the following -# news sources (in Arabic): -# http://www.aljeeran.net/wesima_articles/news-20080305-98602.html -# http://www.aswataliraq.info/look/article.tpl?id=2047&IdLanguage=17&IdPublication=4&NrArticle=71743&NrIssue=1&NrSection=10 -# -# We have published a short article in English about the change: -# http://www.timeanddate.com/news/time/iraq-dumps-daylight-saving.html - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Iraq 1982 only - May 1 0:00 1:00 D -Rule Iraq 1982 1984 - Oct 1 0:00 0 S -Rule Iraq 1983 only - Mar 31 0:00 1:00 D -Rule Iraq 1984 1985 - Apr 1 0:00 1:00 D -Rule Iraq 1985 1990 - Sep lastSun 1:00s 0 S -Rule Iraq 1986 1990 - Mar lastSun 1:00s 1:00 D -# IATA SSIM (1991/1996) says Apr 1 12:01am UTC; guess the ':01' is a typo. -# Shanks & Pottenger say Iraq did not observe DST 1992/1997; ignore this. -# -Rule Iraq 1991 2007 - Apr 1 3:00s 1:00 D -Rule Iraq 1991 2007 - Oct 1 3:00s 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Baghdad 2:57:40 - LMT 1890 - 2:57:36 - BMT 1918 # Baghdad Mean Time? - 3:00 - +03 1982 May - 3:00 Iraq +03/+04 - - -############################################################################### - -# Israel - -# From Ephraim Silverberg (2001-01-11): -# -# I coined "IST/IDT" circa 1988. Until then there were three -# different abbreviations in use: -# -# JST Jerusalem Standard Time [Danny Braniss, Hebrew University] -# IZT Israel Zonal (sic) Time [Prof. Haim Papo, Technion] -# EEST Eastern Europe Standard Time [used by almost everyone else] -# -# Since timezones should be called by country and not capital cities, -# I ruled out JST. As Israel is in Asia Minor and not Eastern Europe, -# EEST was equally unacceptable. Since "zonal" was not compatible with -# any other timezone abbreviation, I felt that 'IST' was the way to go -# and, indeed, it has received almost universal acceptance in timezone -# settings in Israeli computers. -# -# In any case, I am happy to share timezone abbreviations with India, -# high on my favorite-country list (and not only because my wife's -# family is from India). - -# From Shanks & Pottenger: -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Zion 1940 only - Jun 1 0:00 1:00 D -Rule Zion 1942 1944 - Nov 1 0:00 0 S -Rule Zion 1943 only - Apr 1 2:00 1:00 D -Rule Zion 1944 only - Apr 1 0:00 1:00 D -Rule Zion 1945 only - Apr 16 0:00 1:00 D -Rule Zion 1945 only - Nov 1 2:00 0 S -Rule Zion 1946 only - Apr 16 2:00 1:00 D -Rule Zion 1946 only - Nov 1 0:00 0 S -Rule Zion 1948 only - May 23 0:00 2:00 DD -Rule Zion 1948 only - Sep 1 0:00 1:00 D -Rule Zion 1948 1949 - Nov 1 2:00 0 S -Rule Zion 1949 only - May 1 0:00 1:00 D -Rule Zion 1950 only - Apr 16 0:00 1:00 D -Rule Zion 1950 only - Sep 15 3:00 0 S -Rule Zion 1951 only - Apr 1 0:00 1:00 D -Rule Zion 1951 only - Nov 11 3:00 0 S -Rule Zion 1952 only - Apr 20 2:00 1:00 D -Rule Zion 1952 only - Oct 19 3:00 0 S -Rule Zion 1953 only - Apr 12 2:00 1:00 D -Rule Zion 1953 only - Sep 13 3:00 0 S -Rule Zion 1954 only - Jun 13 0:00 1:00 D -Rule Zion 1954 only - Sep 12 0:00 0 S -Rule Zion 1955 only - Jun 11 2:00 1:00 D -Rule Zion 1955 only - Sep 11 0:00 0 S -Rule Zion 1956 only - Jun 3 0:00 1:00 D -Rule Zion 1956 only - Sep 30 3:00 0 S -Rule Zion 1957 only - Apr 29 2:00 1:00 D -Rule Zion 1957 only - Sep 22 0:00 0 S -Rule Zion 1974 only - Jul 7 0:00 1:00 D -Rule Zion 1974 only - Oct 13 0:00 0 S -Rule Zion 1975 only - Apr 20 0:00 1:00 D -Rule Zion 1975 only - Aug 31 0:00 0 S -Rule Zion 1985 only - Apr 14 0:00 1:00 D -Rule Zion 1985 only - Sep 15 0:00 0 S -Rule Zion 1986 only - May 18 0:00 1:00 D -Rule Zion 1986 only - Sep 7 0:00 0 S -Rule Zion 1987 only - Apr 15 0:00 1:00 D -Rule Zion 1987 only - Sep 13 0:00 0 S - -# From Avigdor Finkelstein (2014-03-05): -# I check the Parliament (Knesset) records and there it's stated that the -# [1988] transition should take place on Saturday night, when the Sabbath -# ends and changes to Sunday. -Rule Zion 1988 only - Apr 10 0:00 1:00 D -Rule Zion 1988 only - Sep 4 0:00 0 S - -# From Ephraim Silverberg -# (1997-03-04, 1998-03-16, 1998-12-28, 2000-01-17, 2000-07-25, 2004-12-22, -# and 2005-02-17): - -# According to the Office of the Secretary General of the Ministry of -# Interior, there is NO set rule for Daylight-Savings/Standard time changes. -# One thing is entrenched in law, however: that there must be at least 150 -# days of daylight savings time annually. From 1993-1998, the change to -# daylight savings time was on a Friday morning from midnight IST to -# 1 a.m IDT; up until 1998, the change back to standard time was on a -# Saturday night from midnight daylight savings time to 11 p.m. standard -# time. 1996 is an exception to this rule where the change back to standard -# time took place on Sunday night instead of Saturday night to avoid -# conflicts with the Jewish New Year. In 1999, the change to -# daylight savings time was still on a Friday morning but from -# 2 a.m. IST to 3 a.m. IDT; furthermore, the change back to standard time -# was also on a Friday morning from 2 a.m. IDT to 1 a.m. IST for -# 1999 only. In the year 2000, the change to daylight savings time was -# similar to 1999, but although the change back will be on a Friday, it -# will take place from 1 a.m. IDT to midnight IST. Starting in 2001, all -# changes to/from will take place at 1 a.m. old time, but now there is no -# rule as to what day of the week it will take place in as the start date -# (except in 2003) is the night after the Passover Seder (i.e. the eve -# of the 16th of Nisan in the lunar Hebrew calendar) and the end date -# (except in 2002) is three nights before Yom Kippur [Day of Atonement] -# (the eve of the 7th of Tishrei in the lunar Hebrew calendar). - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Zion 1989 only - Apr 30 0:00 1:00 D -Rule Zion 1989 only - Sep 3 0:00 0 S -Rule Zion 1990 only - Mar 25 0:00 1:00 D -Rule Zion 1990 only - Aug 26 0:00 0 S -Rule Zion 1991 only - Mar 24 0:00 1:00 D -Rule Zion 1991 only - Sep 1 0:00 0 S -Rule Zion 1992 only - Mar 29 0:00 1:00 D -Rule Zion 1992 only - Sep 6 0:00 0 S -Rule Zion 1993 only - Apr 2 0:00 1:00 D -Rule Zion 1993 only - Sep 5 0:00 0 S - -# The dates for 1994-1995 were obtained from Office of the Spokeswoman for the -# Ministry of Interior, Jerusalem, Israel. The spokeswoman can be reached by -# calling the office directly at 972-2-6701447 or 972-2-6701448. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Zion 1994 only - Apr 1 0:00 1:00 D -Rule Zion 1994 only - Aug 28 0:00 0 S -Rule Zion 1995 only - Mar 31 0:00 1:00 D -Rule Zion 1995 only - Sep 3 0:00 0 S - -# The dates for 1996 were determined by the Minister of Interior of the -# time, Haim Ramon. The official announcement regarding 1996-1998 -# (with the dates for 1997-1998 no longer being relevant) can be viewed at: -# -# ftp://ftp.cs.huji.ac.il/pub/tz/announcements/1996-1998.ramon.ps.gz -# -# The dates for 1997-1998 were altered by his successor, Rabbi Eli Suissa. -# -# The official announcements for the years 1997-1999 can be viewed at: -# -# ftp://ftp.cs.huji.ac.il/pub/tz/announcements/YYYY.ps.gz -# -# where YYYY is the relevant year. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Zion 1996 only - Mar 15 0:00 1:00 D -Rule Zion 1996 only - Sep 16 0:00 0 S -Rule Zion 1997 only - Mar 21 0:00 1:00 D -Rule Zion 1997 only - Sep 14 0:00 0 S -Rule Zion 1998 only - Mar 20 0:00 1:00 D -Rule Zion 1998 only - Sep 6 0:00 0 S -Rule Zion 1999 only - Apr 2 2:00 1:00 D -Rule Zion 1999 only - Sep 3 2:00 0 S - -# The Knesset Interior Committee has changed the dates for 2000 for -# the third time in just over a year and have set new dates for the -# years 2001-2004 as well. -# -# The official announcement for the start date of 2000 can be viewed at: -# -# ftp://ftp.cs.huji.ac.il/pub/tz/announcements/2000-start.ps.gz -# -# The official announcement for the end date of 2000 and the dates -# for the years 2001-2004 can be viewed at: -# -# ftp://ftp.cs.huji.ac.il/pub/tz/announcements/2000-2004.ps.gz - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Zion 2000 only - Apr 14 2:00 1:00 D -Rule Zion 2000 only - Oct 6 1:00 0 S -Rule Zion 2001 only - Apr 9 1:00 1:00 D -Rule Zion 2001 only - Sep 24 1:00 0 S -Rule Zion 2002 only - Mar 29 1:00 1:00 D -Rule Zion 2002 only - Oct 7 1:00 0 S -Rule Zion 2003 only - Mar 28 1:00 1:00 D -Rule Zion 2003 only - Oct 3 1:00 0 S -Rule Zion 2004 only - Apr 7 1:00 1:00 D -Rule Zion 2004 only - Sep 22 1:00 0 S - -# The proposed law agreed upon by the Knesset Interior Committee on -# 2005-02-14 is that, for 2005 and beyond, DST starts at 02:00 the -# last Friday before April 2nd (i.e. the last Friday in March or April -# 1st itself if it falls on a Friday) and ends at 02:00 on the Saturday -# night _before_ the fast of Yom Kippur. -# -# Those who can read Hebrew can view the announcement at: -# -# ftp://ftp.cs.huji.ac.il/pub/tz/announcements/2005+beyond.ps - -# From Paul Eggert (2012-10-26): -# I used Ephraim Silverberg's dst-israel.el program -# (2005-02-20) -# along with Ed Reingold's cal-hebrew in GNU Emacs 21.4, -# to generate the transitions from 2005 through 2012. -# (I replaced "lastFri" with "Fri>=26" by hand.) -# The spring transitions all correspond to the following Rule: -# -# Rule Zion 2005 2012 - Mar Fri>=26 2:00 1:00 D -# -# but older zic implementations (e.g., Solaris 8) do not support -# "Fri>=26" to mean April 1 in years like 2005, so for now we list the -# springtime transitions explicitly. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Zion 2005 only - Apr 1 2:00 1:00 D -Rule Zion 2005 only - Oct 9 2:00 0 S -Rule Zion 2006 2010 - Mar Fri>=26 2:00 1:00 D -Rule Zion 2006 only - Oct 1 2:00 0 S -Rule Zion 2007 only - Sep 16 2:00 0 S -Rule Zion 2008 only - Oct 5 2:00 0 S -Rule Zion 2009 only - Sep 27 2:00 0 S -Rule Zion 2010 only - Sep 12 2:00 0 S -Rule Zion 2011 only - Apr 1 2:00 1:00 D -Rule Zion 2011 only - Oct 2 2:00 0 S -Rule Zion 2012 only - Mar Fri>=26 2:00 1:00 D -Rule Zion 2012 only - Sep 23 2:00 0 S - -# From Ephraim Silverberg (2013-06-27): -# On June 23, 2013, the Israeli government approved changes to the -# Time Decree Law. The next day, the changes passed the First Reading -# in the Knesset. The law is expected to pass the Second and Third -# (final) Readings by the beginning of September 2013. -# -# As of 2013, DST starts at 02:00 on the Friday before the last Sunday -# in March. DST ends at 02:00 on the last Sunday of October. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Zion 2013 max - Mar Fri>=23 2:00 1:00 D -Rule Zion 2013 max - Oct lastSun 2:00 0 S - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Jerusalem 2:20:54 - LMT 1880 - 2:20:40 - JMT 1918 # Jerusalem Mean Time? - 2:00 Zion I%sT - - - -############################################################################### - -# Japan - -# '9:00' and 'JST' is from Guy Harris. - -# From Paul Eggert (1995-03-06): -# Today's _Asahi Evening News_ (page 4) reports that Japan had -# daylight saving between 1948 and 1951, but "the system was discontinued -# because the public believed it would lead to longer working hours." - -# From Mayumi Negishi in the 2005-08-10 Japan Times: -# http://www.japantimes.co.jp/cgi-bin/getarticle.pl5?nn20050810f2.htm -# Occupation authorities imposed daylight-saving time on Japan on -# [1948-05-01].... But lack of prior debate and the execution of -# daylight-saving time just three days after the bill was passed generated -# deep hatred of the concept.... The Diet unceremoniously passed a bill to -# dump the unpopular system in October 1951, less than a month after the San -# Francisco Peace Treaty was signed. (A government poll in 1951 showed 53% -# of the Japanese wanted to scrap daylight-saving time, as opposed to 30% who -# wanted to keep it.) - -# From Paul Eggert (2006-03-22): -# Shanks & Pottenger write that DST in Japan during those years was as follows: -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Japan 1948 only - May Sun>=1 2:00 1:00 D -Rule Japan 1948 1951 - Sep Sat>=8 2:00 0 S -Rule Japan 1949 only - Apr Sun>=1 2:00 1:00 D -Rule Japan 1950 1951 - May Sun>=1 2:00 1:00 D -# but the only locations using it (for birth certificates, presumably, since -# their audience is astrologers) were US military bases. For now, assume -# that for most purposes daylight-saving time was observed; otherwise, what -# would have been the point of the 1951 poll? - -# From Hideyuki Suzuki (1998-11-09): -# 'Tokyo' usually stands for the former location of Tokyo Astronomical -# Observatory: 139 degrees 44' 40.90" E (9h 18m 58.727s), -# 35 degrees 39' 16.0" N. -# This data is from 'Rika Nenpyou (Chronological Scientific Tables) 1996' -# edited by National Astronomical Observatory of Japan.... -# JST (Japan Standard Time) has been used since 1888-01-01 00:00 (JST). -# The law is enacted on 1886-07-07. - -# From Hideyuki Suzuki (1998-11-16): -# The ordinance No. 51 (1886) established "standard time" in Japan, -# which stands for the time on 135 degrees E. -# In the ordinance No. 167 (1895), "standard time" was renamed to "central -# standard time". And the same ordinance also established "western standard -# time", which stands for the time on 120 degrees E.... But "western standard -# time" was abolished in the ordinance No. 529 (1937). In the ordinance No. -# 167, there is no mention regarding for what place western standard time is -# standard.... -# -# I wrote "ordinance" above, but I don't know how to translate. -# In Japanese it's "chokurei", which means ordinance from emperor. - -# From Yu-Cheng Chuang (2013-07-12): -# ...the Meiji Emperor announced Ordinance No. 167 of Meiji Year 28 "The clause -# about standard time" ... The adoption began from Jan 1, 1896. -# http://ja.wikisource.org/wiki/標準時ニ關スル件_(公布時) -# -# ...the Showa Emperor announced Ordinance No. 529 of Showa Year 12 ... which -# means the whole Japan territory, including later occupations, adopt Japan -# Central Time (UTC+9). The adoption began on Oct 1, 1937. -# http://ja.wikisource.org/wiki/明治二十八年勅令第百六十七號標準時ニ關スル件中改正ノ件 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Tokyo 9:18:59 - LMT 1887 Dec 31 15:00u - 9:00 Japan J%sT -# Since 1938, all Japanese possessions have been like Asia/Tokyo. - -# Jordan -# -# From -# Jordan Week (1999-07-01) via Steffen Thorsen (1999-09-09): -# Clocks in Jordan were forwarded one hour on Wednesday at midnight, -# in accordance with the government's decision to implement summer time -# all year round. -# -# From -# Jordan Week (1999-09-30) via Steffen Thorsen (1999-11-09): -# Winter time starts today Thursday, 30 September. Clocks will be turned back -# by one hour. This is the latest government decision and it's final! -# The decision was taken because of the increase in working hours in -# government's departments from six to seven hours. -# -# From Paul Eggert (2005-11-22): -# Starting 2003 transitions are from Steffen Thorsen's web site timeanddate.com. -# -# From Steffen Thorsen (2005-11-23): -# For Jordan I have received multiple independent user reports every year -# about DST end dates, as the end-rule is different every year. -# -# From Steffen Thorsen (2006-10-01), after a heads-up from Hilal Malawi: -# http://www.petranews.gov.jo/nepras/2006/Sep/05/4000.htm -# "Jordan will switch to winter time on Friday, October 27". -# - -# From Steffen Thorsen (2009-04-02): -# This single one might be good enough, (2009-03-24, Arabic): -# http://petra.gov.jo/Artical.aspx?Lng=2&Section=8&Artical=95279 -# -# Google's translation: -# -# > The Council of Ministers decided in 2002 to adopt the principle of timely -# > submission of the summer at 60 minutes as of midnight on the last Thursday -# > of the month of March of each year. -# -# So - this means the midnight between Thursday and Friday since 2002. - -# From Arthur David Olson (2009-04-06): -# We still have Jordan switching to DST on Thursdays in 2000 and 2001. - -# From Steffen Thorsen (2012-10-25): -# Yesterday the government in Jordan announced that they will not -# switch back to standard time this winter, so the will stay on DST -# until about the same time next year (at least). -# http://www.petra.gov.jo/Public_News/Nws_NewsDetails.aspx?NewsID=88950 - -# From Steffen Thorsen (2013-12-11): -# Jordan Times and other sources say that Jordan is going back to -# UTC+2 on 2013-12-19 at midnight: -# http://jordantimes.com/govt-decides-to-switch-back-to-wintertime -# Official, in Arabic: -# http://www.petra.gov.jo/public_news/Nws_NewsDetails.aspx?Menu_ID=&Site_Id=2&lang=1&NewsID=133230&CatID=14 -# ... Our background/permalink about it -# http://www.timeanddate.com/news/time/jordan-reverses-dst-decision.html -# ... -# http://www.petra.gov.jo/Public_News/Nws_NewsDetails.aspx?lang=2&site_id=1&NewsID=133313&Type=P -# ... says midnight for the coming one and 1:00 for the ones in the future -# (and they will use DST again next year, using the normal schedule). - -# From Paul Eggert (2013-12-11): -# As Steffen suggested, consider the past 21-month experiment to be DST. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Jordan 1973 only - Jun 6 0:00 1:00 S -Rule Jordan 1973 1975 - Oct 1 0:00 0 - -Rule Jordan 1974 1977 - May 1 0:00 1:00 S -Rule Jordan 1976 only - Nov 1 0:00 0 - -Rule Jordan 1977 only - Oct 1 0:00 0 - -Rule Jordan 1978 only - Apr 30 0:00 1:00 S -Rule Jordan 1978 only - Sep 30 0:00 0 - -Rule Jordan 1985 only - Apr 1 0:00 1:00 S -Rule Jordan 1985 only - Oct 1 0:00 0 - -Rule Jordan 1986 1988 - Apr Fri>=1 0:00 1:00 S -Rule Jordan 1986 1990 - Oct Fri>=1 0:00 0 - -Rule Jordan 1989 only - May 8 0:00 1:00 S -Rule Jordan 1990 only - Apr 27 0:00 1:00 S -Rule Jordan 1991 only - Apr 17 0:00 1:00 S -Rule Jordan 1991 only - Sep 27 0:00 0 - -Rule Jordan 1992 only - Apr 10 0:00 1:00 S -Rule Jordan 1992 1993 - Oct Fri>=1 0:00 0 - -Rule Jordan 1993 1998 - Apr Fri>=1 0:00 1:00 S -Rule Jordan 1994 only - Sep Fri>=15 0:00 0 - -Rule Jordan 1995 1998 - Sep Fri>=15 0:00s 0 - -Rule Jordan 1999 only - Jul 1 0:00s 1:00 S -Rule Jordan 1999 2002 - Sep lastFri 0:00s 0 - -Rule Jordan 2000 2001 - Mar lastThu 0:00s 1:00 S -Rule Jordan 2002 2012 - Mar lastThu 24:00 1:00 S -Rule Jordan 2003 only - Oct 24 0:00s 0 - -Rule Jordan 2004 only - Oct 15 0:00s 0 - -Rule Jordan 2005 only - Sep lastFri 0:00s 0 - -Rule Jordan 2006 2011 - Oct lastFri 0:00s 0 - -Rule Jordan 2013 only - Dec 20 0:00 0 - -Rule Jordan 2014 max - Mar lastThu 24:00 1:00 S -Rule Jordan 2014 max - Oct lastFri 0:00s 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Amman 2:23:44 - LMT 1931 - 2:00 Jordan EE%sT - - -# Kazakhstan - -# From Kazakhstan Embassy's News Bulletin No. 11 -# (2005-03-21): -# The Government of Kazakhstan passed a resolution March 15 abolishing -# daylight saving time citing lack of economic benefits and health -# complications coupled with a decrease in productivity. -# -# From Branislav Kojic (in Astana) via Gwillim Law (2005-06-28): -# ... what happened was that the former Kazakhstan Eastern time zone -# was "blended" with the Central zone. Therefore, Kazakhstan now has -# two time zones, and difference between them is one hour. The zone -# closer to UTC is the former Western zone (probably still called the -# same), encompassing four provinces in the west: Aqtöbe, Atyraū, -# Mangghystaū, and West Kazakhstan. The other zone encompasses -# everything else.... I guess that would make Kazakhstan time zones -# de jure UTC+5 and UTC+6 respectively. - -# From Stepan Golosunov (2016-03-27): -# Review of the linked documents from http://adilet.zan.kz/ -# produced the following data for post-1991 Kazakhstan: -# -# 0. Act of the Cabinet of Ministers of the USSR -# from 1991-02-04 No. 20 -# http://pravo.gov.ru/proxy/ips/?docbody=&nd=102010545 -# removed the extra hour ("decree time") on the territory of the USSR -# starting with the last Sunday of March 1991. -# It also allowed (but not mandated) Kazakh SSR, Kirghiz SSR, Tajik SSR, -# Turkmen SSR and Uzbek SSR to not have "summer" time. -# -# The 1992-01-13 act also refers to the act of the Cabinet of Ministers -# of the Kazakh SSR from 1991-03-20 No. 170 "About the act of the Cabinet -# of Ministers of the USSR from 1991-02-04 No. 20" but I didn't found its -# text. -# -# According to Izvestia newspaper No. 68 (23334) from 1991-03-20 -# (page 6; available at http://libinfo.org/newsr/newsr2574.djvu via -# http://libinfo.org/index.php?id=58564) on 1991-03-31 at 2:00 during -# transition to "summer" time: -# Republic of Georgia, Latvian SSR, Lithuanian SSR, SSR Moldova, -# Estonian SSR; Komi ASSR; Kaliningrad oblast; Nenets autonomous okrug -# were to move clocks 1 hour forward. -# Kazakh SSR (excluding Uralsk oblast); Republic of Kyrgyzstan, Tajik -# SSR; Andijan, Jizzakh, Namangan, Sirdarya, Tashkent, Fergana oblasts -# of the Uzbek SSR were to move clocks 1 hour backwards. -# Other territories were to not move clocks. -# When the "summer" time would end on 1991-09-29, clocks were to be -# moved 1 hour backwards on the territory of the USSR excluding -# Kazakhstan, Kirghizia, Uzbekistan, Turkmenia, Tajikistan. -# -# Apparently there were last minute changes. Apparently Kazakh act No. 170 -# was one of such changes. -# -# https://ru.wikipedia.org/wiki/Декретное время -# claims that Sovetskaya Rossiya newspaper on 1991-03-29 published that -# Nenets autonomous okrug, Komi and Kazakhstan (excluding Uralsk oblast) -# were to not move clocks and Uralsk oblast was to move clocks -# forward; on 1991-09-29 Kazakhstan was to move clocks backwards. -# (Probably there were changes even after that publication. There is an -# article claiming that Kaliningrad oblast decided on 1991-03-29 to not -# move clocks.) -# -# This implies that on 1991-03-31 Asia/Oral remained on +04/+05 while -# the rest of Kazakhstan switched from +06/+07 to +05/06 or from +05/06 -# to +04/+05. It's unclear how Qyzylorda oblast moved into the fifth -# time belt. (By switching from +04/+05 to +05/+06 on 1991-09-29?) ... -# -# 1. Act of the Cabinet of Ministers of the Republic of Kazakhstan -# from 1992-01-13 No. 28 -# http://adilet.zan.kz/rus/docs/P920000028_ -# (text includes modification from the 1996 act) -# introduced new rules for calculation of time, mirroring Russian -# 1992-01-08 act. It specified that time would be calculated -# according to time belts plus extra hour ("decree time"), moved clocks -# on the whole territory of Kazakhstan 1 hour forward on 1992-01-19 at -# 2:00, specified DST rules. It acknowledged that Kazakhstan was -# located in the fourth and the fifth time belts and specified the -# border between them to be located east of Qostanay and Aktyubinsk -# oblasts (notably including Turgai and Qyzylorda oblasts into the fifth -# time belt). -# -# This means switch on 1992-01-19 at 2:00 from +04/+05 to +05/+06 for -# Asia/Aqtau, Asia/Aqtobe, Asia/Oral, Atyraū and Qostanay oblasts; from -# +05/+06 to +06/+07 for Asia/Almaty and Asia/Qyzylorda (and Arkalyk).... -# -# 2. Act of the Cabinet of Ministers of the Republic of Kazakhstan -# from 1992-03-27 No. 284 -# http://adilet.zan.kz/rus/docs/P920000284_ -# cancels extra hour ("decree time") for Uralsk and Qyzylorda oblasts -# since the last Sunday of March 1992, while keeping them in the fourth -# and the fifth time belts respectively. -# -# 3. Order of the Prime Minister of the Republic of Kazakhstan -# from 1994-09-23 No. 384 -# http://adilet.zan.kz/rus/docs/R940000384_ -# cancels the extra hour ("decree time") on the territory of Mangghystaū -# oblast since the last Sunday of September 1994 (saying that time on -# the territory would correspond to the third time belt as a -# result).... -# -# 4. Act of the Government of the Republic of Kazakhstan -# from 1996-05-08 No. 575 -# http://adilet.zan.kz/rus/docs/P960000575_ -# amends the 1992-01-13 act to end summer time in October instead -# of September, mirroring identical Russian change from 1996-04-23 act. -# -# 5. Act of the Government of the Republic of Kazakhstan -# from 1999-03-26 No. 305 -# http://adilet.zan.kz/rus/docs/P990000305_ -# cancels the extra hour ("decree time") for Atyraū oblast since the -# last Sunday of March 1999 while retaining the oblast in the fourth -# time belt. -# -# This means change from +05/+06 to +04/+05.... -# -# 6. Act of the Government of the Republic of Kazakhstan -# from 2000-11-23 No. 1749 -# http://adilet.zan.kz/rus/archive/docs/P000001749_/23.11.2000 -# replaces the previous five documents. -# -# The only changes I noticed are in definition of the border between the -# fourth and the fifth time belts. They account for changes in spelling -# and administrative division (splitting of Turgai oblast in 1997 -# probably changed time in territories incorporated into Qostanay oblast -# (including Arkalyk) from +06/+07 to +05/+06) and move Qyzylorda oblast -# from being in the fifth time belt and not using decree time into the -# fourth time belt (no change in practice). -# -# 7. Act of the Government of the Republic of Kazakhstan -# from 2003-12-29 No. 1342 -# http://adilet.zan.kz/rus/docs/P030001342_ -# modified the 2000-11-23 act. No relevant changes, apparently. -# -# 8. Act of the Government of the Republic of Kazakhstan -# from 2004-07-20 No. 775 -# http://adilet.zan.kz/rus/archive/docs/P040000775_/20.07.2004 -# modified the 2000-11-23 act to move Qostanay and Qyzylorda oblasts into -# the fifth time belt and add Aktobe oblast to the list of regions not -# using extra hour ("decree time"), leaving Kazakhstan with only 2 time -# zones (+04/+05 and +06/+07). The changes were to be implemented -# during DST transitions in 2004 and 2005 but the acts got radically -# amended before implementation happened. -# -# 9. Act of the Government of the Republic of Kazakhstan -# from 2004-09-15 No. 1059 -# http://adilet.zan.kz/rus/docs/P040001059_ -# modified the 2000-11-23 act to remove exceptions from the "decree time" -# (leaving Kazakhstan in +05/+06 and +06/+07 zones), amended the -# 2004-07-20 act to implement changes for Atyraū, West Kazakhstan, -# Qostanay, Qyzylorda and Mangghystaū oblasts by not moving clocks -# during the 2004 transition to "winter" time. -# -# This means transition from +04/+05 to +05/+06 for Atyraū oblast (no -# zone currently), Asia/Oral, Asia/Aqtau and transition from +05/+06 to -# +06/+07 for Qostanay oblast (Qostanay and Arkalyk, no zones currently) -# and Asia/Qyzylorda on 2004-10-31 at 3:00.... -# -# 10. Act of the Government of the Republic of Kazakhstan -# from 2005-03-15 No. 231 -# http://adilet.zan.kz/rus/docs/P050000231_ -# removes DST provisions from the 2000-11-23 act, removes most of the -# (already implemented) provisions from the 2004-07-20 and 2004-09-15 -# acts, comes into effect 10 days after official publication. -# The only practical effect seems to be the abolition of the summer -# time. -# -# Unamended version of the act of the Government of the Russian Federation -# No. 23 from 1992-01-08 [See 'europe' file for details]. -# Kazakh 1992-01-13 act appears to provide the same rules and 1992-03-27 -# act was to be enacted on the last Sunday of March 1992. - -# From Stepan Golosunov (2016-11-08): -# Turgai reorganization should affect only southern part of Qostanay -# oblast. Which should probably be separated into Asia/Arkalyk zone. -# (There were also 1970, 1988 and 1990 Turgai oblast reorganizations -# according to wikipedia.) -# -# [For Qostanay] http://www.ng.kz/gazeta/195/hranit/ -# suggests that clocks were to be moved 40 minutes backwards on -# 1920-01-01 to the fourth time belt. But I do not understand -# how that could happen.... -# -# [For Atyrau and Oral] 1919 decree -# (http://www.worldtimezone.com/dst_news/dst_news_russia-1919-02-08.html -# and in Byalokoz) lists Ural river (plus 10 versts on its left bank) in -# the third time belt (before 1930 this means +03). - -# From Paul Eggert (2016-12-06): -# The tables below reflect Golosunov's remarks, with exceptions as noted. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -# -# Almaty (formerly Alma-Ata), representing most locations in Kazakhstan -# This includes KZ-AKM, KZ-ALA, KZ-ALM, KZ-AST, KZ-BAY, KZ-VOS, KZ-ZHA, -# KZ-KAR, KZ-SEV, KZ-PAV, and KZ-YUZ. -Zone Asia/Almaty 5:07:48 - LMT 1924 May 2 # or Alma-Ata - 5:00 - +05 1930 Jun 21 - 6:00 RussiaAsia +06/+07 1991 Mar 31 2:00s - 5:00 RussiaAsia +05/+06 1992 Jan 19 2:00s - 6:00 RussiaAsia +06/+07 2004 Oct 31 2:00s - 6:00 - +06 -# Qyzylorda (aka Kyzylorda, Kizilorda, Kzyl-Orda, etc.) (KZ-KZY) -# This currently includes Qostanay (aka Kostanay, Kustanay) (KZ-KUS); -# see comments below. -Zone Asia/Qyzylorda 4:21:52 - LMT 1924 May 2 - 4:00 - +04 1930 Jun 21 - 5:00 - +05 1981 Apr 1 - 5:00 1:00 +06 1981 Oct 1 - 6:00 - +06 1982 Apr 1 - 5:00 RussiaAsia +05/+06 1991 Mar 31 2:00s - 4:00 RussiaAsia +04/+05 1991 Sep 29 2:00s - 5:00 RussiaAsia +05/+06 1992 Jan 19 2:00s - 6:00 RussiaAsia +06/+07 1992 Mar 29 2:00s - 5:00 RussiaAsia +05/+06 2004 Oct 31 2:00s - 6:00 - +06 -# The following zone is like Asia/Qyzylorda except for being one -# hour earlier from 1991-09-29 to 1992-03-29. The 1991/2 rules for -# Qostanay are unclear partly because of the 1997 Turgai -# reorganization, so this zone is commented out for now. -#Zone Asia/Qostanay 4:14:20 - LMT 1924 May 2 -# 4:00 - +04 1930 Jun 21 -# 5:00 - +05 1981 Apr 1 -# 5:00 1:00 +06 1981 Oct 1 -# 6:00 - +06 1982 Apr 1 -# 5:00 RussiaAsia +05/+06 1991 Mar 31 2:00s -# 4:00 RussiaAsia +04/+05 1992 Jan 19 2:00s -# 5:00 RussiaAsia +05/+06 2004 Oct 31 2:00s -# 6:00 - +06 -# -# Aqtöbe (aka Aktobe, formerly Aktyubinsk) (KZ-AKT) -Zone Asia/Aqtobe 3:48:40 - LMT 1924 May 2 - 4:00 - +04 1930 Jun 21 - 5:00 - +05 1981 Apr 1 - 5:00 1:00 +06 1981 Oct 1 - 6:00 - +06 1982 Apr 1 - 5:00 RussiaAsia +05/+06 1991 Mar 31 2:00s - 4:00 RussiaAsia +04/+05 1992 Jan 19 2:00s - 5:00 RussiaAsia +05/+06 2004 Oct 31 2:00s - 5:00 - +05 -# Mangghystaū (KZ-MAN) -# Aqtau was not founded until 1963, but it represents an inhabited region, -# so include time stamps before 1963. -Zone Asia/Aqtau 3:21:04 - LMT 1924 May 2 - 4:00 - +04 1930 Jun 21 - 5:00 - +05 1981 Oct 1 - 6:00 - +06 1982 Apr 1 - 5:00 RussiaAsia +05/+06 1991 Mar 31 2:00s - 4:00 RussiaAsia +04/+05 1992 Jan 19 2:00s - 5:00 RussiaAsia +05/+06 1994 Sep 25 2:00s - 4:00 RussiaAsia +04/+05 2004 Oct 31 2:00s - 5:00 - +05 -# Atyraū (KZ-ATY) is like Mangghystaū except it switched from -# +04/+05 to +05/+06 in spring 1999, not fall 1994. -Zone Asia/Atyrau 3:27:44 - LMT 1924 May 2 - 3:00 - +03 1930 Jun 21 - 5:00 - +05 1981 Oct 1 - 6:00 - +06 1982 Apr 1 - 5:00 RussiaAsia +05/+06 1991 Mar 31 2:00s - 4:00 RussiaAsia +04/+05 1992 Jan 19 2:00s - 5:00 RussiaAsia +05/+06 1999 Mar 28 2:00s - 4:00 RussiaAsia +04/+05 2004 Oct 31 2:00s - 5:00 - +05 -# West Kazakhstan (KZ-ZAP) -# From Paul Eggert (2016-03-18): -# The 1989 transition is from USSR act No. 227 (1989-03-14). -Zone Asia/Oral 3:25:24 - LMT 1924 May 2 # or Ural'sk - 3:00 - +03 1930 Jun 21 - 5:00 - +05 1981 Apr 1 - 5:00 1:00 +06 1981 Oct 1 - 6:00 - +06 1982 Apr 1 - 5:00 RussiaAsia +05/+06 1989 Mar 26 2:00s - 4:00 RussiaAsia +04/+05 1992 Jan 19 2:00s - 5:00 RussiaAsia +05/+06 1992 Mar 29 2:00s - 4:00 RussiaAsia +04/+05 2004 Oct 31 2:00s - 5:00 - +05 - -# Kyrgyzstan (Kirgizstan) -# Transitions through 1991 are from Shanks & Pottenger. - -# From Paul Eggert (2005-08-15): -# According to an article dated today in the Kyrgyzstan Development Gateway -# http://eng.gateway.kg/cgi-bin/page.pl?id=1&story_name=doc9979.shtml -# Kyrgyzstan is canceling the daylight saving time system. I take the article -# to mean that they will leave their clocks at 6 hours ahead of UTC. -# From Malik Abdugaliev (2005-09-21): -# Our government cancels daylight saving time 6th of August 2005. -# From 2005-08-12 our GMT-offset is +6, w/o any daylight saving. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Kyrgyz 1992 1996 - Apr Sun>=7 0:00s 1:00 S -Rule Kyrgyz 1992 1996 - Sep lastSun 0:00 0 - -Rule Kyrgyz 1997 2005 - Mar lastSun 2:30 1:00 S -Rule Kyrgyz 1997 2004 - Oct lastSun 2:30 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Bishkek 4:58:24 - LMT 1924 May 2 - 5:00 - +05 1930 Jun 21 - 6:00 RussiaAsia +06/+07 1991 Mar 31 2:00s - 5:00 RussiaAsia +05/+06 1991 Aug 31 2:00 - 5:00 Kyrgyz +05/+06 2005 Aug 12 - 6:00 - +06 - -############################################################################### - -# Korea (North and South) - -# From Annie I. Bang (2006-07-10): -# http://www.koreaherald.com/view.php?ud=200607100012 -# Korea ran a daylight saving program from 1949-61 but stopped it -# during the 1950-53 Korean War. The system was temporarily enforced -# between 1987 and 1988 ... - -# From Sanghyuk Jung (2014-10-29): -# http://mm.icann.org/pipermail/tz/2014-October/021830.html -# According to the Korean Wikipedia -# http://ko.wikipedia.org/wiki/한국_표준시 -# [oldid=12896437 2014-09-04 08:03 UTC] -# DST in Republic of Korea was as follows.... And I checked old -# newspapers in Korean, all articles correspond with data in Wikipedia. -# For example, the article in 1948 (Korean Language) proved that DST -# started at June 1 in that year. For another example, the article in -# 1988 said that DST started at 2:00 AM in that year. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule ROK 1948 only - Jun 1 0:00 1:00 D -Rule ROK 1948 only - Sep 13 0:00 0 S -Rule ROK 1949 only - Apr 3 0:00 1:00 D -Rule ROK 1949 1951 - Sep Sun>=8 0:00 0 S -Rule ROK 1950 only - Apr 1 0:00 1:00 D -Rule ROK 1951 only - May 6 0:00 1:00 D -Rule ROK 1955 only - May 5 0:00 1:00 D -Rule ROK 1955 only - Sep 9 0:00 0 S -Rule ROK 1956 only - May 20 0:00 1:00 D -Rule ROK 1956 only - Sep 30 0:00 0 S -Rule ROK 1957 1960 - May Sun>=1 0:00 1:00 D -Rule ROK 1957 1960 - Sep Sun>=18 0:00 0 S -Rule ROK 1987 1988 - May Sun>=8 2:00 1:00 D -Rule ROK 1987 1988 - Oct Sun>=8 3:00 0 S - -# From Paul Eggert (2016-08-23): -# The Korean Wikipedia entry gives the following sources for UT offsets: -# -# 1908: Official Journal Article No. 3994 (decree No. 5) -# 1912: Governor-General of Korea Official Gazette Issue No. 367 -# (Announcement No. 338) -# 1954: Presidential Decree No. 876 (1954-03-17) -# 1961: Law No. 676 (1961-08-07) -# -# (Another source "1987: Law No. 3919 (1986-12-31)" was in the 2014-10-30 -# edition of the Korean Wikipedia entry.) -# -# I guessed that time zone abbreviations through 1945 followed the same -# rules as discussed under Taiwan, with nominal switches from JST to KST -# when the respective cities were taken over by the Allies after WWII. -# -# For Pyongyang, guess no changes from World War II until 2015, as we -# have no information otherwise. - -# From Steffen Thorsen (2015-08-07): -# According to many news sources, North Korea is going to change to -# the 8:30 time zone on August 15, one example: -# http://www.bbc.com/news/world-asia-33815049 -# -# From Paul Eggert (2015-08-15): -# Bells rang out midnight (00:00) Friday as part of the celebrations. See: -# Talmadge E. North Korea celebrates new time zone, 'Pyongyang Time' -# http://news.yahoo.com/north-korea-celebrates-time-zone-pyongyang-time-164038128.html -# There is no common English-language abbreviation for this time zone. -# Use KST, as that's what we already use for 1954-1961 in ROK. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Seoul 8:27:52 - LMT 1908 Apr 1 - 8:30 - KST 1912 Jan 1 - 9:00 - JST 1945 Sep 8 - 9:00 - KST 1954 Mar 21 - 8:30 ROK K%sT 1961 Aug 10 - 9:00 ROK K%sT -Zone Asia/Pyongyang 8:23:00 - LMT 1908 Apr 1 - 8:30 - KST 1912 Jan 1 - 9:00 - JST 1945 Aug 24 - 9:00 - KST 2015 Aug 15 00:00 - 8:30 - KST - -############################################################################### - -# Kuwait -# See Asia/Riyadh. - -# Laos -# See Asia/Bangkok. - - -# Lebanon -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Lebanon 1920 only - Mar 28 0:00 1:00 S -Rule Lebanon 1920 only - Oct 25 0:00 0 - -Rule Lebanon 1921 only - Apr 3 0:00 1:00 S -Rule Lebanon 1921 only - Oct 3 0:00 0 - -Rule Lebanon 1922 only - Mar 26 0:00 1:00 S -Rule Lebanon 1922 only - Oct 8 0:00 0 - -Rule Lebanon 1923 only - Apr 22 0:00 1:00 S -Rule Lebanon 1923 only - Sep 16 0:00 0 - -Rule Lebanon 1957 1961 - May 1 0:00 1:00 S -Rule Lebanon 1957 1961 - Oct 1 0:00 0 - -Rule Lebanon 1972 only - Jun 22 0:00 1:00 S -Rule Lebanon 1972 1977 - Oct 1 0:00 0 - -Rule Lebanon 1973 1977 - May 1 0:00 1:00 S -Rule Lebanon 1978 only - Apr 30 0:00 1:00 S -Rule Lebanon 1978 only - Sep 30 0:00 0 - -Rule Lebanon 1984 1987 - May 1 0:00 1:00 S -Rule Lebanon 1984 1991 - Oct 16 0:00 0 - -Rule Lebanon 1988 only - Jun 1 0:00 1:00 S -Rule Lebanon 1989 only - May 10 0:00 1:00 S -Rule Lebanon 1990 1992 - May 1 0:00 1:00 S -Rule Lebanon 1992 only - Oct 4 0:00 0 - -Rule Lebanon 1993 max - Mar lastSun 0:00 1:00 S -Rule Lebanon 1993 1998 - Sep lastSun 0:00 0 - -Rule Lebanon 1999 max - Oct lastSun 0:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Beirut 2:22:00 - LMT 1880 - 2:00 Lebanon EE%sT - -# Malaysia -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule NBorneo 1935 1941 - Sep 14 0:00 0:20 TS # one-Third Summer -Rule NBorneo 1935 1941 - Dec 14 0:00 0 - -# -# peninsular Malaysia -# taken from Mok Ly Yng (2003-10-30) -# http://www.math.nus.edu.sg/aslaksen/teaching/timezone.html -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Kuala_Lumpur 6:46:46 - LMT 1901 Jan 1 - 6:55:25 - SMT 1905 Jun 1 # Singapore M.T. - 7:00 - +07 1933 Jan 1 - 7:00 0:20 +0720 1936 Jan 1 - 7:20 - +0720 1941 Sep 1 - 7:30 - +0730 1942 Feb 16 - 9:00 - +09 1945 Sep 12 - 7:30 - +0730 1982 Jan 1 - 8:00 - +08 -# Sabah & Sarawak -# From Paul Eggert (2014-08-12): -# The data entries here are mostly from Shanks & Pottenger, but the 1942, 1945 -# and 1982 transition dates are from Mok Ly Yng. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Kuching 7:21:20 - LMT 1926 Mar - 7:30 - +0730 1933 - 8:00 NBorneo +08/+0820 1942 Feb 16 - 9:00 - +09 1945 Sep 12 - 8:00 - +08 - -# Maldives -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Indian/Maldives 4:54:00 - LMT 1880 # Male - 4:54:00 - MMT 1960 # Male Mean Time - 5:00 - +05 - -# Mongolia - -# Shanks & Pottenger say that Mongolia has three time zones, but -# The USNO (1995-12-21) and the CIA map Standard Time Zones of the World -# (2005-03) both say that it has just one. - -# From Oscar van Vlijmen (1999-12-11): -# General Information Mongolia -# (1999-09) -# "Time: Mongolia has two time zones. Three westernmost provinces of -# Bayan-Ölgii, Uvs, and Hovd are one hour earlier than the capital city, and -# the rest of the country follows the Ulaanbaatar time, which is UTC/GMT plus -# eight hours." - -# From Rives McDow (1999-12-13): -# Mongolia discontinued the use of daylight savings time in 1999; 1998 -# being the last year it was implemented. The dates of implementation I am -# unsure of, but most probably it was similar to Russia, except for the time -# of implementation may have been different.... -# Some maps in the past have indicated that there was an additional time -# zone in the eastern part of Mongolia, including the provinces of Dornod, -# Sükhbaatar, and possibly Khentii. - -# From Paul Eggert (1999-12-15): -# Naming and spelling is tricky in Mongolia. -# We'll use Hovd (also spelled Chovd and Khovd) to represent the west zone; -# the capital of the Hovd province is sometimes called Hovd, sometimes Dund-Us, -# and sometimes Jirgalanta (with variant spellings), but the name Hovd -# is good enough for our purposes. - -# From Rives McDow (2001-05-13): -# In addition to Mongolia starting daylight savings as reported earlier -# (adopted DST on 2001-04-27 02:00 local time, ending 2001-09-28), -# there are three time zones. -# -# Provinces [at 7:00]: Bayan-Ölgii, Uvs, Khovd, Zavkhan, Govi-Altai -# Provinces [at 8:00]: Khövsgöl, Bulgan, Arkhangai, Khentii, Töv, -# Bayankhongor, Övörkhangai, Dundgovi, Dornogovi, Ömnögovi -# Provinces [at 9:00]: Dornod, Sükhbaatar -# -# [The province of Selenge is omitted from the above lists.] - -# From Ganbold Ts., Ulaanbaatar (2004-04-17): -# Daylight saving occurs at 02:00 local time last Saturday of March. -# It will change back to normal at 02:00 local time last Saturday of -# September.... As I remember this rule was changed in 2001. -# -# From Paul Eggert (2004-04-17): -# For now, assume Rives McDow's informant got confused about Friday vs -# Saturday, and that his 2001 dates should have 1 added to them. - -# From Paul Eggert (2005-07-26): -# We have wildly conflicting information about Mongolia's time zones. -# Bill Bonnet (2005-05-19) reports that the US Embassy in Ulaanbaatar says -# there is only one time zone and that DST is observed, citing Microsoft -# Windows XP as the source. Risto Nykänen (2005-05-16) reports that -# travelmongolia.org says there are two time zones (UT +07, +08) with no DST. -# Oscar van Vlijmen (2005-05-20) reports that the Mongolian Embassy in -# Washington, DC says there are two time zones, with DST observed. -# He also found -# http://ubpost.mongolnews.mn/index.php?subaction=showcomments&id=1111634894&archive=&start_from=&ucat=1& -# which also says that there is DST, and which has a comment by "Toddius" -# (2005-03-31 06:05 +0700) saying "Mongolia actually has 3.5 time zones. -# The West (OLGII) is +7 GMT, most of the country is ULAT is +8 GMT -# and some Eastern provinces are +9 GMT but Sükhbaatar Aimag is SUHK +8.5 GMT. -# The SUKH timezone is new this year, it is one of the few things the -# parliament passed during the tumultuous winter session." -# For now, let's ignore this information, until we have more confirmation. - -# From Ganbold Ts. (2007-02-26): -# Parliament of Mongolia has just changed the daylight-saving rule in February. -# They decided not to adopt daylight-saving time.... -# http://www.mongolnews.mn/index.php?module=unuudur&sec=view&id=15742 - -# From Deborah Goldsmith (2008-03-30): -# We received a bug report claiming that the tz database UTC offset for -# Asia/Choibalsan (GMT+09:00) is incorrect, and that it should be GMT -# +08:00 instead. Different sources appear to disagree with the tz -# database on this, e.g.: -# -# http://www.timeanddate.com/worldclock/city.html?n=1026 -# http://www.worldtimeserver.com/current_time_in_MN.aspx -# -# both say GMT+08:00. - -# From Steffen Thorsen (2008-03-31): -# eznis airways, which operates several domestic flights, has a flight -# schedule here: -# http://www.eznis.com/Container.jsp?id=112 -# (click the English flag for English) -# -# There it appears that flights between Choibalsan and Ulaanbaatar arrive -# about 1:35 - 1:50 hours later in local clock time, no matter the -# direction, while Ulaanbaatar-Khovd takes 2 hours in the Eastern -# direction and 3:35 back, which indicates that Ulaanbaatar and Khovd are -# in different time zones (like we know about), while Choibalsan and -# Ulaanbaatar are in the same time zone (correction needed). - -# From Arthur David Olson (2008-05-19): -# Assume that Choibalsan is indeed offset by 8:00. -# XXX--in the absence of better information, assume that transition -# was at the start of 2008-03-31 (the day of Steffen Thorsen's report); -# this is almost surely wrong. - -# From Ganbold Tsagaankhuu (2015-03-10): -# It seems like yesterday Mongolian Government meeting has concluded to use -# daylight saving time in Mongolia.... Starting at 2:00AM of last Saturday of -# March 2015, daylight saving time starts. And 00:00AM of last Saturday of -# September daylight saving time ends. Source: -# http://zasag.mn/news/view/8969 - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Mongol 1983 1984 - Apr 1 0:00 1:00 S -Rule Mongol 1983 only - Oct 1 0:00 0 - -# Shanks & Pottenger and IATA SSIM say 1990s switches occurred at 00:00, -# but McDow says the 2001 switches occurred at 02:00. Also, IATA SSIM -# (1996-09) says 1996-10-25. Go with Shanks & Pottenger through 1998. -# -# Shanks & Pottenger say that the Sept. 1984 through Sept. 1990 switches -# in Choibalsan (more precisely, in Dornod and Sükhbaatar) took place -# at 02:00 standard time, not at 00:00 local time as in the rest of -# the country. That would be odd, and possibly is a result of their -# correction of 02:00 (in the previous edition) not being done correctly -# in the latest edition; so ignore it for now. - -# From Ganbold Tsagaankhuu (2017-02-09): -# Mongolian Government meeting has concluded today to cancel daylight -# saving time adoption in Mongolia. Source: http://zasag.mn/news/view/16192 - -Rule Mongol 1985 1998 - Mar lastSun 0:00 1:00 S -Rule Mongol 1984 1998 - Sep lastSun 0:00 0 - -# IATA SSIM (1999-09) says Mongolia no longer observes DST. -Rule Mongol 2001 only - Apr lastSat 2:00 1:00 S -Rule Mongol 2001 2006 - Sep lastSat 2:00 0 - -Rule Mongol 2002 2006 - Mar lastSat 2:00 1:00 S -Rule Mongol 2015 2016 - Mar lastSat 2:00 1:00 S -Rule Mongol 2015 2016 - Sep lastSat 0:00 0 - - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -# Hovd, a.k.a. Chovd, Dund-Us, Dzhargalant, Khovd, Jirgalanta -Zone Asia/Hovd 6:06:36 - LMT 1905 Aug - 6:00 - +06 1978 - 7:00 Mongol +07/+08 -# Ulaanbaatar, a.k.a. Ulan Bataar, Ulan Bator, Urga -Zone Asia/Ulaanbaatar 7:07:32 - LMT 1905 Aug - 7:00 - +07 1978 - 8:00 Mongol +08/+09 -# Choibalsan, a.k.a. Bajan Tümen, Bajan Tumen, Chojbalsan, -# Choybalsan, Sanbejse, Tchoibalsan -Zone Asia/Choibalsan 7:38:00 - LMT 1905 Aug - 7:00 - +07 1978 - 8:00 - +08 1983 Apr - 9:00 Mongol +09/+10 2008 Mar 31 - 8:00 Mongol +08/+09 - -# Nepal -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Kathmandu 5:41:16 - LMT 1920 - 5:30 - +0530 1986 - 5:45 - +0545 - -# Oman -# See Asia/Dubai. - -# Pakistan - -# From Rives McDow (2002-03-13): -# I have been advised that Pakistan has decided to adopt dst on a -# TRIAL basis for one year, starting 00:01 local time on April 7, 2002 -# and ending at 00:01 local time October 6, 2002. This is what I was -# told, but I believe that the actual time of change may be 00:00; the -# 00:01 was to make it clear which day it was on. - -# From Paul Eggert (2002-03-15): -# Jesper Nørgaard found this URL: -# http://www.pak.gov.pk/public/news/app/app06_dec.htm -# (dated 2001-12-06) which says that the Cabinet adopted a scheme "to -# advance the clocks by one hour on the night between the first -# Saturday and Sunday of April and revert to the original position on -# 15th October each year". This agrees with McDow's 04-07 at 00:00, -# but disagrees about the October transition, and makes it sound like -# it's not on a trial basis. Also, the "between the first Saturday -# and Sunday of April" phrase, if taken literally, means that the -# transition takes place at 00:00 on the first Sunday on or after 04-02. - -# From Paul Eggert (2003-02-09): -# DAWN reported on 2002-10-05 -# that 2002 DST ended that day at midnight. Go with McDow for now. - -# From Steffen Thorsen (2003-03-14): -# According to http://www.dawn.com/2003/03/07/top15.htm -# there will be no DST in Pakistan this year: -# -# ISLAMABAD, March 6: Information and Media Development Minister Sheikh -# Rashid Ahmed on Thursday said the cabinet had reversed a previous -# decision to advance clocks by one hour in summer and put them back by -# one hour in winter with the aim of saving light hours and energy. -# -# The minister told a news conference that the experiment had rather -# shown 8 per cent higher consumption of electricity. - -# From Alex Krivenyshev (2008-05-15): -# -# Here is an article that Pakistan plan to introduce Daylight Saving Time -# on June 1, 2008 for 3 months. -# -# "... The federal cabinet on Wednesday announced a new conservation plan to -# help reduce load shedding by approving the closure of commercial centres at -# 9pm and moving clocks forward by one hour for the next three months. ...." -# -# http://www.worldtimezone.net/dst_news/dst_news_pakistan01.html -# http://www.dailytimes.com.pk/default.asp?page=2008%5C05%5C15%5Cstory_15-5-2008_pg1_4 - -# From Arthur David Olson (2008-05-19): -# XXX--midnight transitions is a guess; 2008 only is a guess. - -# From Alexander Krivenyshev (2008-08-28): -# Pakistan government has decided to keep the watches one-hour advanced -# for another 2 months - plan to return to Standard Time on October 31 -# instead of August 31. -# -# http://www.worldtimezone.com/dst_news/dst_news_pakistan02.html -# http://dailymailnews.com/200808/28/news/dmbrn03.html - -# From Alexander Krivenyshev (2009-04-08): -# Based on previous media reports that "... proposed plan to -# advance clocks by one hour from May 1 will cause disturbance -# to the working schedules rather than bringing discipline in -# official working." -# http://www.thenews.com.pk/daily_detail.asp?id=171280 -# -# recent news that instead of May 2009 - Pakistan plan to -# introduce DST from April 15, 2009 -# -# FYI: Associated Press Of Pakistan -# April 08, 2009 -# Cabinet okays proposal to advance clocks by one hour from April 15 -# http://www.app.com.pk/en_/index.php?option=com_content&task=view&id=73043&Itemid=1 -# http://www.worldtimezone.com/dst_news/dst_news_pakistan05.html -# -# .... -# The Federal Cabinet on Wednesday approved the proposal to -# advance clocks in the country by one hour from April 15 to -# conserve energy" - -# From Steffen Thorsen (2009-09-17): -# "The News International," Pakistan reports that: "The Federal -# Government has decided to restore the previous time by moving the -# clocks backward by one hour from October 1. A formal announcement to -# this effect will be made after the Prime Minister grants approval in -# this regard." -# http://www.thenews.com.pk/updates.asp?id=87168 - -# From Alexander Krivenyshev (2009-09-28): -# According to Associated Press Of Pakistan, it is confirmed that -# Pakistan clocks across the country would be turned back by an hour from -# October 1, 2009. -# -# "Clocks to go back one hour from 1 Oct" -# http://www.app.com.pk/en_/index.php?option=com_content&task=view&id=86715&Itemid=2 -# http://www.worldtimezone.com/dst_news/dst_news_pakistan07.htm -# -# From Steffen Thorsen (2009-09-29): -# Now they seem to have changed their mind, November 1 is the new date: -# http://www.thenews.com.pk/top_story_detail.asp?Id=24742 -# "The country's clocks will be reversed by one hour on November 1. -# Officials of Federal Ministry for Interior told this to Geo News on -# Monday." -# -# And more importantly, it seems that these dates will be kept every year: -# "It has now been decided that clocks will be wound forward by one hour -# on April 15 and reversed by an hour on November 1 every year without -# obtaining prior approval, the officials added." -# -# We have confirmed this year's end date with both with the Ministry of -# Water and Power and the Pakistan Electric Power Company: -# http://www.timeanddate.com/news/time/pakistan-ends-dst09.html - -# From Christoph Göhre (2009-10-01): -# [T]he German Consulate General in Karachi reported me today that Pakistan -# will go back to standard time on 1st of November. - -# From Steffen Thorsen (2010-03-26): -# Steffen Thorsen wrote: -# > On Thursday (2010-03-25) it was announced that DST would start in -# > Pakistan on 2010-04-01. -# > -# > Then today, the president said that they might have to revert the -# > decision if it is not supported by the parliament. So at the time -# > being, it seems unclear if DST will be actually observed or not - but -# > April 1 could be a more likely date than April 15. -# Now, it seems that the decision to not observe DST in final: -# -# "Govt Withdraws Plan To Advance Clocks" -# http://www.apakistannews.com/govt-withdraws-plan-to-advance-clocks-172041 -# -# "People laud PM's announcement to end DST" -# http://www.app.com.pk/en_/index.php?option=com_content&task=view&id=99374&Itemid=2 - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Pakistan 2002 only - Apr Sun>=2 0:00 1:00 S -Rule Pakistan 2002 only - Oct Sun>=2 0:00 0 - -Rule Pakistan 2008 only - Jun 1 0:00 1:00 S -Rule Pakistan 2008 2009 - Nov 1 0:00 0 - -Rule Pakistan 2009 only - Apr 15 0:00 1:00 S - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Karachi 4:28:12 - LMT 1907 - 5:30 - +0530 1942 Sep - 5:30 1:00 +0630 1945 Oct 15 - 5:30 - +0530 1951 Sep 30 - 5:00 - +05 1971 Mar 26 - 5:00 Pakistan PK%sT # Pakistan Time - -# Palestine - -# From Amos Shapir (1998-02-15): -# -# From 1917 until 1948-05-15, all of Palestine, including the parts now -# known as the Gaza Strip and the West Bank, was under British rule. -# Therefore the rules given for Israel for that period, apply there too... -# -# The Gaza Strip was under Egyptian rule between 1948-05-15 until 1967-06-05 -# (except a short occupation by Israel from 1956-11 till 1957-03, but no -# time zone was affected then). It was never formally annexed to Egypt, -# though. -# -# The rest of Palestine was under Jordanian rule at that time, formally -# annexed in 1950 as the West Bank (and the word "Trans" was dropped from -# the country's previous name of "the Hashemite Kingdom of the -# Trans-Jordan"). So the rules for Jordan for that time apply. Major -# towns in that area are Nablus (Shchem), El-Halil (Hebron), Ramallah, and -# East Jerusalem. -# -# Both areas were occupied by Israel in June 1967, but not annexed (except -# for East Jerusalem). They were on Israel time since then; there might -# have been a Military Governor's order about time zones, but I'm not aware -# of any (such orders may have been issued semi-annually whenever summer -# time was in effect, but maybe the legal aspect of time was just neglected). -# -# The Palestinian Authority was established in 1993, and got hold of most -# towns in the West Bank and Gaza by 1995. I know that in order to -# demonstrate...independence, they have been switching to -# summer time and back on a different schedule than Israel's, but I don't -# know when this was started, or what algorithm is used (most likely the -# Jordanian one). -# -# To summarize, the table should probably look something like that: -# -# Area \ when | 1918-1947 | 1948-1967 | 1967-1995 | 1996- -# ------------+-----------+-----------+-----------+----------- -# Israel | Zion | Zion | Zion | Zion -# West bank | Zion | Jordan | Zion | Jordan -# Gaza | Zion | Egypt | Zion | Jordan -# -# I guess more info may be available from the PA's web page (if/when they -# have one). - -# From Paul Eggert (2006-03-22): -# Shanks & Pottenger write that Gaza did not observe DST until 1957, but go -# with Shapir and assume that it observed DST from 1940 through 1947, -# and that it used Jordanian rules starting in 1996. -# We don't yet need a separate entry for the West Bank, since -# the only differences between it and Gaza that we know about -# occurred before our cutoff date of 1970. -# However, as we get more information, we may need to add entries -# for parts of the West Bank as they transitioned from Israel's rules -# to Palestine's rules. - -# From IINS News Service - Israel - 1998-03-23 10:38:07 Israel time, -# forwarded by Ephraim Silverberg: -# -# Despite the fact that Israel changed over to daylight savings time -# last week, the PLO Authority (PA) has decided not to turn its clocks -# one-hour forward at this time. As a sign of independence from Israeli rule, -# the PA has decided to implement DST in April. - -# From Paul Eggert (1999-09-20): -# Daoud Kuttab writes in Holiday havoc -# http://www.jpost.com/com/Archive/22.Apr.1999/Opinion/Article-2.html -# (Jerusalem Post, 1999-04-22) that -# the Palestinian National Authority changed to DST on 1999-04-15. -# I vaguely recall that they switch back in October (sorry, forgot the source). -# For now, let's assume that the spring switch was at 24:00, -# and that they switch at 0:00 on the 3rd Fridays of April and October. - -# From Paul Eggert (2005-11-22): -# Starting 2004 transitions are from Steffen Thorsen's web site timeanddate.com. - -# From Steffen Thorsen (2005-11-23): -# A user from Gaza reported that Gaza made the change early because of -# the Ramadan. Next year Ramadan will be even earlier, so I think -# there is a good chance next year's end date will be around two weeks -# earlier - the same goes for Jordan. - -# From Steffen Thorsen (2006-08-17): -# I was informed by a user in Bethlehem that in Bethlehem it started the -# same day as Israel, and after checking with other users in the area, I -# was informed that they started DST one day after Israel. I was not -# able to find any authoritative sources at the time, nor details if -# Gaza changed as well, but presumed Gaza to follow the same rules as -# the West Bank. - -# From Steffen Thorsen (2006-09-26): -# according to the Palestine News Network (2006-09-19): -# http://english.pnn.ps/index.php?option=com_content&task=view&id=596&Itemid=5 -# > The Council of Ministers announced that this year its winter schedule -# > will begin early, as of midnight Thursday. It is also time to turn -# > back the clocks for winter. Friday will begin an hour late this week. -# I guess it is likely that next year's date will be moved as well, -# because of the Ramadan. - -# From Jesper Nørgaard Welen (2007-09-18): -# According to Steffen Thorsen's web site the Gaza Strip and the rest of the -# Palestinian territories left DST early on 13.th. of September at 2:00. - -# From Paul Eggert (2007-09-20): -# My understanding is that Gaza and the West Bank disagree even over when -# the weekend is (Thursday+Friday versus Friday+Saturday), so I'd be a bit -# surprised if they agreed about DST. But for now, assume they agree. -# For lack of better information, predict that future changes will be -# the 2nd Thursday of September at 02:00. - -# From Alexander Krivenyshev (2008-08-28): -# Here is an article, that Mideast running on different clocks at Ramadan. -# -# Gaza Strip (as Egypt) ended DST at midnight Thursday (Aug 28, 2008), while -# the West Bank will end Daylight Saving Time at midnight Sunday (Aug 31, 2008). -# -# http://www.guardian.co.uk/world/feedarticle/7759001 -# http://www.abcnews.go.com/International/wireStory?id=5676087 -# http://www.worldtimezone.com/dst_news/dst_news_gazastrip01.html - -# From Alexander Krivenyshev (2009-03-26): -# According to the Palestine News Network (arabic.pnn.ps), Palestinian -# government decided to start Daylight Time on Thursday night March -# 26 and continue until the night of 27 September 2009. -# -# (in Arabic) -# http://arabic.pnn.ps/index.php?option=com_content&task=view&id=50850 -# -# (English translation) -# http://www.worldtimezone.com/dst_news/dst_news_westbank01.html - -# From Steffen Thorsen (2009-08-31): -# Palestine's Council of Ministers announced that they will revert back to -# winter time on Friday, 2009-09-04. -# -# One news source: -# http://www.safa.ps/ara/?action=showdetail&seid=4158 -# (Palestinian press agency, Arabic), -# Google translate: "Decided that the Palestinian government in Ramallah -# headed by Salam Fayyad, the start of work in time for the winter of -# 2009, starting on Friday approved the fourth delay Sept. clock sixty -# minutes per hour as of Friday morning." -# -# We are not sure if Gaza will do the same, last year they had a different -# end date, we will keep this page updated: -# http://www.timeanddate.com/news/time/westbank-gaza-dst-2009.html - -# From Alexander Krivenyshev (2009-09-02): -# Seems that Gaza Strip will go back to Winter Time same date as West Bank. -# -# According to Palestinian Ministry Of Interior, West Bank and Gaza Strip plan -# to change time back to Standard time on September 4, 2009. -# -# "Winter time unite the West Bank and Gaza" -# (from Palestinian National Authority): -# http://www.moi.gov.ps/en/?page=633167343250594025&nid=11505 -# http://www.worldtimezone.com/dst_news/dst_news_gazastrip02.html - -# From Alexander Krivenyshev (2010-03-19): -# According to Voice of Palestine DST will last for 191 days, from March -# 26, 2010 till "the last Sunday before the tenth day of Tishri -# (October), each year" (October 03, 2010?) -# -# http://palvoice.org/forums/showthread.php?t=245697 -# (in Arabic) -# http://www.worldtimezone.com/dst_news/dst_news_westbank03.html - -# From Steffen Thorsen (2010-03-24): -# ...Ma'an News Agency reports that Hamas cabinet has decided it will -# start one day later, at 12:01am. Not sure if they really mean 12:01am or -# noon though: -# -# http://www.maannews.net/eng/ViewDetails.aspx?ID=271178 -# (Ma'an News Agency) -# "At 12:01am Friday, clocks in Israel and the West Bank will change to -# 1:01am, while Gaza clocks will change at 12:01am Saturday morning." - -# From Steffen Thorsen (2010-08-11): -# According to several sources, including -# http://www.maannews.net/eng/ViewDetails.aspx?ID=306795 -# the clocks were set back one hour at 2010-08-11 00:00:00 local time in -# Gaza and the West Bank. -# Some more background info: -# http://www.timeanddate.com/news/time/westbank-gaza-end-dst-2010.html - -# From Steffen Thorsen (2011-08-26): -# Gaza and the West Bank did go back to standard time in the beginning of -# August, and will now enter daylight saving time again on 2011-08-30 -# 00:00 (so two periods of DST in 2011). The pause was because of -# Ramadan. -# -# http://www.maannews.net/eng/ViewDetails.aspx?ID=416217 -# Additional info: -# http://www.timeanddate.com/news/time/palestine-dst-2011.html - -# From Alexander Krivenyshev (2011-08-27): -# According to the article in The Jerusalem Post: -# "...Earlier this month, the Palestinian government in the West Bank decided to -# move to standard time for 30 days, during Ramadan. The Palestinians in the -# Gaza Strip accepted the change and also moved their clocks one hour back. -# The Hamas government said on Saturday that it won't observe summertime after -# the Muslim feast of Id al-Fitr, which begins on Tuesday..." -# ... -# http://www.jpost.com/MiddleEast/Article.aspx?id=235650 -# http://www.worldtimezone.com/dst_news/dst_news_gazastrip05.html -# The rules for Egypt are stolen from the 'africa' file. - -# From Steffen Thorsen (2011-09-30): -# West Bank did end Daylight Saving Time this morning/midnight (2011-09-30 -# 00:00). -# So West Bank and Gaza now have the same time again. -# -# Many sources, including: -# http://www.maannews.net/eng/ViewDetails.aspx?ID=424808 - -# From Steffen Thorsen (2012-03-26): -# Palestinian news sources tell that both Gaza and West Bank will start DST -# on Friday (Thursday midnight, 2012-03-29 24:00). -# Some of many sources in Arabic: -# http://www.samanews.com/index.php?act=Show&id=122638 -# -# http://safa.ps/details/news/74352/%D8%A8%D8%AF%D8%A1-%D8%A7%D9%84%D8%AA%D9%88%D9%82%D9%8A%D8%AA-%D8%A7%D9%84%D8%B5%D9%8A%D9%81%D9%8A-%D8%A8%D8%A7%D9%84%D8%B6%D9%81%D8%A9-%D9%88%D8%BA%D8%B2%D8%A9-%D9%84%D9%8A%D9%84%D8%A9-%D8%A7%D9%84%D8%AC%D9%85%D8%B9%D8%A9.html -# -# Our brief summary: -# http://www.timeanddate.com/news/time/gaza-west-bank-dst-2012.html - -# From Steffen Thorsen (2013-03-26): -# The following news sources tells that Palestine will "start daylight saving -# time from midnight on Friday, March 29, 2013" (translated). -# [These are in Arabic and are for Gaza and for Ramallah, respectively.] -# http://www.samanews.com/index.php?act=Show&id=154120 -# http://safa.ps/details/news/99844/%D8%B1%D8%A7%D9%85-%D8%A7%D9%84%D9%84%D9%87-%D8%A8%D8%AF%D8%A1-%D8%A7%D9%84%D8%AA%D9%88%D9%82%D9%8A%D8%AA-%D8%A7%D9%84%D8%B5%D9%8A%D9%81%D9%8A-29-%D8%A7%D9%84%D8%AC%D8%A7%D8%B1%D9%8A.html - -# From Steffen Thorsen (2013-09-24): -# The Gaza and West Bank are ending DST Thursday at midnight -# (2013-09-27 00:00:00) (one hour earlier than last year...). -# This source in English, says "that winter time will go into effect -# at midnight on Thursday in the West Bank and Gaza Strip": -# http://english.wafa.ps/index.php?action=detail&id=23246 -# official source...: -# http://www.palestinecabinet.gov.ps/ar/Views/ViewDetails.aspx?pid=1252 - -# From Steffen Thorsen (2015-03-03): -# Sources such as http://www.alquds.com/news/article/view/id/548257 -# and http://www.raya.ps/ar/news/890705.html say Palestine areas will -# start DST on 2015-03-28 00:00 which is one day later than expected. -# -# From Paul Eggert (2015-03-03): -# http://www.timeanddate.com/time/change/west-bank/ramallah?year=2014 -# says that the fall 2014 transition was Oct 23 at 24:00. - -# From Hannah Kreitem (2016-03-09): -# http://www.palestinecabinet.gov.ps/WebSite/ar/ViewDetails?ID=31728 -# [Google translation]: "The Council also decided to start daylight -# saving in Palestine as of one o'clock on Saturday morning, -# 2016-03-26, to provide the clock 60 minutes ahead." -# -# From Paul Eggert (2016-03-12): -# Predict spring transitions on March's last Saturday at 01:00 from now on. - -# From Sharef Mustafa (2016-10-19): -# [T]he Palestinian cabinet decision (Mar 8th 2016) published on -# http://www.palestinecabinet.gov.ps/WebSite/Upload/Decree/GOV_17/16032016134830.pdf -# states that summer time will end on Oct 29th at 01:00. -# -# From Tim Parenti (2016-10-19): -# Predict fall transitions on October's last Saturday at 01:00 from now on. -# This is consistent with the 2016 transition as well as our spring -# predictions. -# -# From Paul Eggert (2016-10-19): -# It's also consistent with predictions in the following URLs today: -# http://www.timeanddate.com/time/change/gaza-strip/gaza -# http://www.timeanddate.com/time/change/west-bank/hebron - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule EgyptAsia 1957 only - May 10 0:00 1:00 S -Rule EgyptAsia 1957 1958 - Oct 1 0:00 0 - -Rule EgyptAsia 1958 only - May 1 0:00 1:00 S -Rule EgyptAsia 1959 1967 - May 1 1:00 1:00 S -Rule EgyptAsia 1959 1965 - Sep 30 3:00 0 - -Rule EgyptAsia 1966 only - Oct 1 3:00 0 - - -Rule Palestine 1999 2005 - Apr Fri>=15 0:00 1:00 S -Rule Palestine 1999 2003 - Oct Fri>=15 0:00 0 - -Rule Palestine 2004 only - Oct 1 1:00 0 - -Rule Palestine 2005 only - Oct 4 2:00 0 - -Rule Palestine 2006 2007 - Apr 1 0:00 1:00 S -Rule Palestine 2006 only - Sep 22 0:00 0 - -Rule Palestine 2007 only - Sep Thu>=8 2:00 0 - -Rule Palestine 2008 2009 - Mar lastFri 0:00 1:00 S -Rule Palestine 2008 only - Sep 1 0:00 0 - -Rule Palestine 2009 only - Sep Fri>=1 1:00 0 - -Rule Palestine 2010 only - Mar 26 0:00 1:00 S -Rule Palestine 2010 only - Aug 11 0:00 0 - -Rule Palestine 2011 only - Apr 1 0:01 1:00 S -Rule Palestine 2011 only - Aug 1 0:00 0 - -Rule Palestine 2011 only - Aug 30 0:00 1:00 S -Rule Palestine 2011 only - Sep 30 0:00 0 - -Rule Palestine 2012 2014 - Mar lastThu 24:00 1:00 S -Rule Palestine 2012 only - Sep 21 1:00 0 - -Rule Palestine 2013 only - Sep Fri>=21 0:00 0 - -Rule Palestine 2014 2015 - Oct Fri>=21 0:00 0 - -Rule Palestine 2015 only - Mar lastFri 24:00 1:00 S -Rule Palestine 2016 max - Mar lastSat 1:00 1:00 S -Rule Palestine 2016 max - Oct lastSat 1:00 0 - - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Gaza 2:17:52 - LMT 1900 Oct - 2:00 Zion EET/EEST 1948 May 15 - 2:00 EgyptAsia EE%sT 1967 Jun 5 - 2:00 Zion I%sT 1996 - 2:00 Jordan EE%sT 1999 - 2:00 Palestine EE%sT 2008 Aug 29 0:00 - 2:00 - EET 2008 Sep - 2:00 Palestine EE%sT 2010 - 2:00 - EET 2010 Mar 27 0:01 - 2:00 Palestine EE%sT 2011 Aug 1 - 2:00 - EET 2012 - 2:00 Palestine EE%sT - -Zone Asia/Hebron 2:20:23 - LMT 1900 Oct - 2:00 Zion EET/EEST 1948 May 15 - 2:00 EgyptAsia EE%sT 1967 Jun 5 - 2:00 Zion I%sT 1996 - 2:00 Jordan EE%sT 1999 - 2:00 Palestine EE%sT - -# Paracel Is -# no information - -# Philippines -# On 1844-08-16, Narciso Clavería, governor-general of the -# Philippines, issued a proclamation announcing that 1844-12-30 was to -# be immediately followed by 1845-01-01; see R.H. van Gent's -# History of the International Date Line -# http://www.staff.science.uu.nl/~gent0113/idl/idl_philippines.htm -# The rest of the data entries are from Shanks & Pottenger. - -# From Jesper Nørgaard Welen (2006-04-26): -# ... claims that Philippines had DST last time in 1990: -# http://story.philippinetimes.com/p.x/ct/9/id/145be20cc6b121c0/cid/3e5bbccc730d258c/ -# [a story dated 2006-04-25 by Cris Larano of Dow Jones Newswires, -# but no details] - -# From Paul Eggert (2014-08-14): -# The following source says DST may be instituted November-January and again -# March-June, but this is not definite. It also says DST was last proclaimed -# during the Ramos administration (1992-1998); but again, no details. -# Carcamo D. PNoy urged to declare use of daylight saving time. -# Philippine Star 2014-08-05 -# http://www.philstar.com/headlines/2014/08/05/1354152/pnoy-urged-declare-use-daylight-saving-time - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Phil 1936 only - Nov 1 0:00 1:00 S -Rule Phil 1937 only - Feb 1 0:00 0 - -Rule Phil 1954 only - Apr 12 0:00 1:00 S -Rule Phil 1954 only - Jul 1 0:00 0 - -Rule Phil 1978 only - Mar 22 0:00 1:00 S -Rule Phil 1978 only - Sep 21 0:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Manila -15:56:00 - LMT 1844 Dec 31 - 8:04:00 - LMT 1899 May 11 - 8:00 Phil +08/+09 1942 May - 9:00 - +09 1944 Nov - 8:00 Phil +08/+09 - -# Qatar -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Qatar 3:26:08 - LMT 1920 # Al Dawhah / Doha - 4:00 - +04 1972 Jun - 3:00 - +03 -Link Asia/Qatar Asia/Bahrain - -# Saudi Arabia -# -# From Paul Eggert (2014-07-15): -# Time in Saudi Arabia and other countries in the Arabian peninsula was not -# standardized until relatively recently; we don't know when, and possibly it -# has never been made official. Richard P Hunt, in "Islam city yielding to -# modern times", New York Times (1961-04-09), p 20, wrote that only airlines -# observed standard time, and that people in Jeddah mostly observed quasi-solar -# time, doing so by setting their watches at sunrise to 6 o'clock (or to 12 -# o'clock for "Arab" time). -# -# The TZ database cannot represent quasi-solar time; airline time is the best -# we can do. The 1946 foreign air news digest of the U.S. Civil Aeronautics -# Board (OCLC 42299995) reported that the "... Arabian Government, inaugurated -# a weekly Dhahran-Cairo service, via the Saudi Arabian cities of Riyadh and -# Jidda, on March 14, 1947". Shanks & Pottenger guessed 1950; go with the -# earlier date. -# -# Shanks & Pottenger also state that until 1968-05-01 Saudi Arabia had two -# time zones; the other zone, at UT +04, was in the far eastern part of -# the country. Ignore this, as it's before our 1970 cutoff. -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Riyadh 3:06:52 - LMT 1947 Mar 14 - 3:00 - +03 -Link Asia/Riyadh Asia/Aden # Yemen -Link Asia/Riyadh Asia/Kuwait - -# Singapore -# taken from Mok Ly Yng (2003-10-30) -# http://www.math.nus.edu.sg/aslaksen/teaching/timezone.html -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Singapore 6:55:25 - LMT 1901 Jan 1 - 6:55:25 - SMT 1905 Jun 1 # Singapore M.T. - 7:00 - +07 1933 Jan 1 - 7:00 0:20 +0720 1936 Jan 1 - 7:20 - +0720 1941 Sep 1 - 7:30 - +0730 1942 Feb 16 - 9:00 - +09 1945 Sep 12 - 7:30 - +0730 1982 Jan 1 - 8:00 - +08 - -# Spratly Is -# no information - -# Sri Lanka - -# From Paul Eggert (2013-02-21): -# Milne says "Madras mean time use from May 1, 1898. Prior to this Colombo -# mean time, 5h. 4m. 21.9s. F., was used." But 5:04:21.9 differs considerably -# from Colombo's meridian 5:19:24, so for now ignore Milne and stick with -# Shanks and Pottenger. - -# From Paul Eggert (1996-09-03): -# "Sri Lanka advances clock by an hour to avoid blackout" -# (, 1996-05-24, -# no longer available as of 1999-08-17) -# reported "the country's standard time will be put forward by one hour at -# midnight Friday (1830 GMT) 'in the light of the present power crisis'." -# -# From Dharmasiri Senanayake, Sri Lanka Media Minister (1996-10-24), as quoted -# by Shamindra in Daily News - Hot News Section -# (1996-10-26): -# With effect from 12.30 a.m. on 26th October 1996 -# Sri Lanka will be six (06) hours ahead of GMT. - -# From Jesper Nørgaard Welen (2006-04-14), quoting Sri Lanka News Online -# (2006-04-13): -# 0030 hrs on April 15, 2006 (midnight of April 14, 2006 +30 minutes) -# at present, become 2400 hours of April 14, 2006 (midnight of April 14, 2006). - -# From Peter Apps and Ranga Sirila of Reuters (2006-04-12) in: -# http://today.reuters.co.uk/news/newsArticle.aspx?type=scienceNews&storyID=2006-04-12T172228Z_01_COL295762_RTRIDST_0_SCIENCE-SRILANKA-TIME-DC.XML -# [The Tamil Tigers] never accepted the original 1996 time change and simply -# kept their clocks set five and a half hours ahead of Greenwich Mean -# Time (GMT), in line with neighbor India. -# From Paul Eggert (2006-04-18): -# People who live in regions under Tamil control can use [TZ='Asia/Kolkata'], -# as that zone has agreed with the Tamil areas since our cutoff date of 1970. - -# From Sadika Sumanapala (2016-10-19): -# According to http://www.sltime.org (maintained by Measurement Units, -# Standards & Services Department, Sri Lanka) abbreviation for Sri Lanka -# standard time is SLST. -# -# From Paul Eggert (2016-10-18): -# "SLST" seems to be reasonably recent and rarely-used outside time -# zone nerd sources. I searched Google News and found three uses of -# it in the International Business Times of India in February and -# March of this year when discussing cricket match times, but nothing -# since then (though there has been a lot of cricket) and nothing in -# other English-language news sources. Our old abbreviation "LKT" is -# even worse. For now, let's use a numeric abbreviation; we can -# switch to "SLST" if it catches on. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Colombo 5:19:24 - LMT 1880 - 5:19:32 - MMT 1906 # Moratuwa Mean Time - 5:30 - +0530 1942 Jan 5 - 5:30 0:30 +06 1942 Sep - 5:30 1:00 +0630 1945 Oct 16 2:00 - 5:30 - +0530 1996 May 25 0:00 - 6:30 - +0630 1996 Oct 26 0:30 - 6:00 - +06 2006 Apr 15 0:30 - 5:30 - +0530 - -# Syria -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Syria 1920 1923 - Apr Sun>=15 2:00 1:00 S -Rule Syria 1920 1923 - Oct Sun>=1 2:00 0 - -Rule Syria 1962 only - Apr 29 2:00 1:00 S -Rule Syria 1962 only - Oct 1 2:00 0 - -Rule Syria 1963 1965 - May 1 2:00 1:00 S -Rule Syria 1963 only - Sep 30 2:00 0 - -Rule Syria 1964 only - Oct 1 2:00 0 - -Rule Syria 1965 only - Sep 30 2:00 0 - -Rule Syria 1966 only - Apr 24 2:00 1:00 S -Rule Syria 1966 1976 - Oct 1 2:00 0 - -Rule Syria 1967 1978 - May 1 2:00 1:00 S -Rule Syria 1977 1978 - Sep 1 2:00 0 - -Rule Syria 1983 1984 - Apr 9 2:00 1:00 S -Rule Syria 1983 1984 - Oct 1 2:00 0 - -Rule Syria 1986 only - Feb 16 2:00 1:00 S -Rule Syria 1986 only - Oct 9 2:00 0 - -Rule Syria 1987 only - Mar 1 2:00 1:00 S -Rule Syria 1987 1988 - Oct 31 2:00 0 - -Rule Syria 1988 only - Mar 15 2:00 1:00 S -Rule Syria 1989 only - Mar 31 2:00 1:00 S -Rule Syria 1989 only - Oct 1 2:00 0 - -Rule Syria 1990 only - Apr 1 2:00 1:00 S -Rule Syria 1990 only - Sep 30 2:00 0 - -Rule Syria 1991 only - Apr 1 0:00 1:00 S -Rule Syria 1991 1992 - Oct 1 0:00 0 - -Rule Syria 1992 only - Apr 8 0:00 1:00 S -Rule Syria 1993 only - Mar 26 0:00 1:00 S -Rule Syria 1993 only - Sep 25 0:00 0 - -# IATA SSIM (1998-02) says 1998-04-02; -# (1998-09) says 1999-03-29 and 1999-09-29; (1999-02) says 1999-04-02, -# 2000-04-02, and 2001-04-02; (1999-09) says 2000-03-31 and 2001-03-31; -# (2006) says 2006-03-31 and 2006-09-22; -# for now ignore all these claims and go with Shanks & Pottenger, -# except for the 2006-09-22 claim (which seems right for Ramadan). -Rule Syria 1994 1996 - Apr 1 0:00 1:00 S -Rule Syria 1994 2005 - Oct 1 0:00 0 - -Rule Syria 1997 1998 - Mar lastMon 0:00 1:00 S -Rule Syria 1999 2006 - Apr 1 0:00 1:00 S -# From Stephen Colebourne (2006-09-18): -# According to IATA data, Syria will change DST on 21st September [21:00 UTC] -# this year [only].... This is probably related to Ramadan, like Egypt. -Rule Syria 2006 only - Sep 22 0:00 0 - -# From Paul Eggert (2007-03-29): -# Today the AP reported "Syria will switch to summertime at midnight Thursday." -# http://www.iht.com/articles/ap/2007/03/29/africa/ME-GEN-Syria-Time-Change.php -Rule Syria 2007 only - Mar lastFri 0:00 1:00 S -# From Jesper Nørgaard (2007-10-27): -# The sister center ICARDA of my work CIMMYT is confirming that Syria DST will -# not take place 1st November at 0:00 o'clock but 1st November at 24:00 or -# rather Midnight between Thursday and Friday. This does make more sense than -# having it between Wednesday and Thursday (two workdays in Syria) since the -# weekend in Syria is not Saturday and Sunday, but Friday and Saturday. So now -# it is implemented at midnight of the last workday before weekend... -# -# From Steffen Thorsen (2007-10-27): -# Jesper Nørgaard Welen wrote: -# -# > "Winter local time in Syria will be observed at midnight of Thursday 1 -# > November 2007, and the clock will be put back 1 hour." -# -# I found confirmation on this in this gov.sy-article (Arabic): -# http://wehda.alwehda.gov.sy/_print_veiw.asp?FileName=12521710520070926111247 -# -# which using Google's translate tools says: -# Council of Ministers also approved the commencement of work on -# identifying the winter time as of Friday, 2/11/2007 where the 60th -# minute delay at midnight Thursday 1/11/2007. -Rule Syria 2007 only - Nov Fri>=1 0:00 0 - - -# From Stephen Colebourne (2008-03-17): -# For everyone's info, I saw an IATA time zone change for [Syria] for -# this month (March 2008) in the last day or so.... -# Country Time Standard --- DST Start --- --- DST End --- DST -# Name Zone Variation Time Date Time Date -# Variation -# Syrian Arab -# Republic SY +0200 2200 03APR08 2100 30SEP08 +0300 -# 2200 02APR09 2100 30SEP09 +0300 -# 2200 01APR10 2100 30SEP10 +0300 - -# From Arthur David Olson (2008-03-17): -# Here's a link to English-language coverage by the Syrian Arab News -# Agency (SANA)... -# http://www.sana.sy/eng/21/2008/03/11/165173.htm -# ...which reads (in part) "The Cabinet approved the suggestion of the -# Ministry of Electricity to begin daylight savings time on Friday April -# 4th, advancing clocks one hour ahead on midnight of Thursday April 3rd." -# Since Syria is two hours east of UTC, the 2200 and 2100 transition times -# shown above match up with midnight in Syria. - -# From Arthur David Olson (2008-03-18): -# My best guess at a Syrian rule is "the Friday nearest April 1"; -# coding that involves either using a "Mar Fri>=29" construct that old time zone -# compilers can't handle or having multiple Rules (a la Israel). -# For now, use "Apr Fri>=1", and go with IATA on a uniform Sep 30 end. - -# From Steffen Thorsen (2008-10-07): -# Syria has now officially decided to end DST on 2008-11-01 this year, -# according to the following article in the Syrian Arab News Agency (SANA). -# -# The article is in Arabic, and seems to tell that they will go back to -# winter time on 2008-11-01 at 00:00 local daylight time (delaying/setting -# clocks back 60 minutes). -# -# http://sana.sy/ara/2/2008/10/07/195459.htm - -# From Steffen Thorsen (2009-03-19): -# Syria will start DST on 2009-03-27 00:00 this year according to many sources, -# two examples: -# -# http://www.sana.sy/eng/21/2009/03/17/217563.htm -# (English, Syrian Arab News # Agency) -# http://thawra.alwehda.gov.sy/_View_news2.asp?FileName=94459258720090318012209 -# (Arabic, gov-site) -# -# We have not found any sources saying anything about when DST ends this year. -# -# Our summary -# http://www.timeanddate.com/news/time/syria-dst-starts-march-27-2009.html - -# From Steffen Thorsen (2009-10-27): -# The Syrian Arab News Network on 2009-09-29 reported that Syria will -# revert back to winter (standard) time on midnight between Thursday -# 2009-10-29 and Friday 2009-10-30: -# http://www.sana.sy/ara/2/2009/09/29/247012.htm (Arabic) - -# From Arthur David Olson (2009-10-28): -# We'll see if future DST switching times turn out to be end of the last -# Thursday of the month or the start of the last Friday of the month or -# something else. For now, use the start of the last Friday. - -# From Steffen Thorsen (2010-03-17): -# The "Syrian News Station" reported on 2010-03-16 that the Council of -# Ministers has decided that Syria will start DST on midnight Thursday -# 2010-04-01: (midnight between Thursday and Friday): -# http://sns.sy/sns/?path=news/read/11421 (Arabic) - -# From Steffen Thorsen (2012-03-26): -# Today, Syria's government announced that they will start DST early on Friday -# (00:00). This is a bit earlier than the past two years. -# -# From Syrian Arab News Agency, in Arabic: -# http://www.sana.sy/ara/2/2012/03/26/408215.htm -# -# Our brief summary: -# http://www.timeanddate.com/news/time/syria-dst-2012.html - -# From Arthur David Olson (2012-03-27): -# Assume last Friday in March going forward XXX. - -Rule Syria 2008 only - Apr Fri>=1 0:00 1:00 S -Rule Syria 2008 only - Nov 1 0:00 0 - -Rule Syria 2009 only - Mar lastFri 0:00 1:00 S -Rule Syria 2010 2011 - Apr Fri>=1 0:00 1:00 S -Rule Syria 2012 max - Mar lastFri 0:00 1:00 S -Rule Syria 2009 max - Oct lastFri 0:00 0 - - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Damascus 2:25:12 - LMT 1920 # Dimashq - 2:00 Syria EE%sT - -# Tajikistan -# From Shanks & Pottenger. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Dushanbe 4:35:12 - LMT 1924 May 2 - 5:00 - +05 1930 Jun 21 - 6:00 RussiaAsia +06/+07 1991 Mar 31 2:00s - 5:00 1:00 +05/+06 1991 Sep 9 2:00s - 5:00 - +05 - -# Thailand -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Bangkok 6:42:04 - LMT 1880 - 6:42:04 - BMT 1920 Apr # Bangkok Mean Time - 7:00 - +07 -Link Asia/Bangkok Asia/Phnom_Penh # Cambodia -Link Asia/Bangkok Asia/Vientiane # Laos - -# Turkmenistan -# From Shanks & Pottenger. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Ashgabat 3:53:32 - LMT 1924 May 2 # or Ashkhabad - 4:00 - +04 1930 Jun 21 - 5:00 RussiaAsia +05/+06 1991 Mar 31 2:00 - 4:00 RussiaAsia +04/+05 1992 Jan 19 2:00 - 5:00 - +05 - -# United Arab Emirates -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Dubai 3:41:12 - LMT 1920 - 4:00 - +04 -Link Asia/Dubai Asia/Muscat # Oman - -# Uzbekistan -# Byalokoz 1919 says Uzbekistan was 4:27:53. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Samarkand 4:27:53 - LMT 1924 May 2 - 4:00 - +04 1930 Jun 21 - 5:00 - +05 1981 Apr 1 - 5:00 1:00 +06 1981 Oct 1 - 6:00 - +06 1982 Apr 1 - 5:00 RussiaAsia +05/+06 1992 - 5:00 - +05 -# Milne says Tashkent was 4:37:10.8; round to nearest. -Zone Asia/Tashkent 4:37:11 - LMT 1924 May 2 - 5:00 - +05 1930 Jun 21 - 6:00 RussiaAsia +06/+07 1991 Mar 31 2:00 - 5:00 RussiaAsia +05/+06 1992 - 5:00 - +05 - -# Vietnam - -# From Paul Eggert (2014-10-04): -# Milne gives 7:16:56 for the meridian of Saigon in 1899, as being -# used in Lower Laos, Cambodia, and Annam. But this is quite a ways -# from Saigon's location. For now, ignore this and stick with Shanks -# and Pottenger for LMT before 1906. - -# From Arthur David Olson (2008-03-18): -# The English-language name of Vietnam's most populous city is "Ho Chi Minh -# City"; use Ho_Chi_Minh below to avoid a name of more than 14 characters. - -# From Paul Eggert (2014-10-21) after a heads-up from Trần Ngọc Quân: -# Trần Tiến Bình's authoritative book "Lịch Việt Nam: thế kỷ XX-XXI (1901-2100)" -# (Nhà xuất bản Văn Hoá - Thông Tin, Hanoi, 2005), pp 49-50, -# is quoted verbatim in: -# http://www.thoigian.com.vn/?mPage=P80D01 -# is translated by Brian Inglis in: -# http://mm.icann.org/pipermail/tz/2014-October/021654.html -# and is the basis for the information below. -# -# The 1906 transition was effective July 1 and standardized Indochina to -# Phù Liễn Observatory, legally 104 deg. 17'17" east of Paris. -# It's unclear whether this meant legal Paris Mean Time (00:09:21) or -# the Paris Meridian (2 deg. 20'14.03" E); the former yields 07:06:30.1333... -# and the latter 07:06:29.333... so either way it rounds to 07:06:30, -# which is used below even though the modern-day Phù Liễn Observatory -# is closer to 07:06:31. Abbreviate Phù Liễn Mean Time as PLMT. -# -# The following transitions occurred in Indochina in general (before 1954) -# and in South Vietnam in particular (after 1954): -# To 07:00 on 1911-05-01. -# To 08:00 on 1942-12-31 at 23:00. -# To 09:00 in 1945-03-14 at 23:00. -# To 07:00 on 1945-09-02 in Vietnam. -# To 08:00 on 1947-04-01 in French-controlled Indochina. -# To 07:00 on 1955-07-01 in South Vietnam. -# To 08:00 on 1959-12-31 at 23:00 in South Vietnam. -# To 07:00 on 1975-06-13 in South Vietnam. -# -# Trần cites the following sources; it's unclear which supplied the info above. -# -# Hoàng Xuân Hãn: "Lịch và lịch Việt Nam". Tập san Khoa học Xã hội, -# No. 9, Paris, February 1982. -# -# Lê Thành Lân: "Lịch và niên biểu lịch sử hai mươi thế kỷ (0001-2010)", -# NXB Thống kê, Hanoi, 2000. -# -# Lê Thành Lân: "Lịch hai thế kỷ (1802-2010) và các lịch vĩnh cửu", -# NXB Thuận Hoá, Huế, 1995. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Asia/Ho_Chi_Minh 7:06:40 - LMT 1906 Jul 1 - 7:06:30 - PLMT 1911 May 1 # Phù Liễn MT - 7:00 - +07 1942 Dec 31 23:00 - 8:00 - +08 1945 Mar 14 23:00 - 9:00 - +09 1945 Sep 2 - 7:00 - +07 1947 Apr 1 - 8:00 - +08 1955 Jul 1 - 7:00 - +07 1959 Dec 31 23:00 - 8:00 - +08 1975 Jun 13 - 7:00 - +07 - -# Yemen -# See Asia/Riyadh. diff --git a/src/timezone/data/australasia b/src/timezone/data/australasia deleted file mode 100644 index d389ae134a..0000000000 --- a/src/timezone/data/australasia +++ /dev/null @@ -1,1778 +0,0 @@ -# This file is in the public domain, so clarified as of -# 2009-05-17 by Arthur David Olson. - -# This file also includes Pacific islands. - -# Notes are at the end of this file - -############################################################################### - -# Australia - -# Please see the notes below for the controversy about "EST" versus "AEST" etc. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Aus 1917 only - Jan 1 0:01 1:00 D -Rule Aus 1917 only - Mar 25 2:00 0 S -Rule Aus 1942 only - Jan 1 2:00 1:00 D -Rule Aus 1942 only - Mar 29 2:00 0 S -Rule Aus 1942 only - Sep 27 2:00 1:00 D -Rule Aus 1943 1944 - Mar lastSun 2:00 0 S -Rule Aus 1943 only - Oct 3 2:00 1:00 D -# Go with Whitman and the Australian National Standards Commission, which -# says W Australia didn't use DST in 1943/1944. Ignore Whitman's claim that -# 1944/1945 was just like 1943/1944. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -# Northern Territory -Zone Australia/Darwin 8:43:20 - LMT 1895 Feb - 9:00 - ACST 1899 May - 9:30 Aus AC%sT -# Western Australia -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule AW 1974 only - Oct lastSun 2:00s 1:00 D -Rule AW 1975 only - Mar Sun>=1 2:00s 0 S -Rule AW 1983 only - Oct lastSun 2:00s 1:00 D -Rule AW 1984 only - Mar Sun>=1 2:00s 0 S -Rule AW 1991 only - Nov 17 2:00s 1:00 D -Rule AW 1992 only - Mar Sun>=1 2:00s 0 S -Rule AW 2006 only - Dec 3 2:00s 1:00 D -Rule AW 2007 2009 - Mar lastSun 2:00s 0 S -Rule AW 2007 2008 - Oct lastSun 2:00s 1:00 D -Zone Australia/Perth 7:43:24 - LMT 1895 Dec - 8:00 Aus AW%sT 1943 Jul - 8:00 AW AW%sT -Zone Australia/Eucla 8:35:28 - LMT 1895 Dec - 8:45 Aus +0845/+0945 1943 Jul - 8:45 AW +0845/+0945 - -# Queensland -# -# From Alex Livingston (1996-11-01): -# I have heard or read more than once that some resort islands off the coast -# of Queensland chose to keep observing daylight-saving time even after -# Queensland ceased to. -# -# From Paul Eggert (1996-11-22): -# IATA SSIM (1993-02/1994-09) say that the Holiday Islands (Hayman, Lindeman, -# Hamilton) observed DST for two years after the rest of Queensland stopped. -# Hamilton is the largest, but there is also a Hamilton in Victoria, -# so use Lindeman. -# -# From J William Piggott (2016-02-20): -# There is no location named Holiday Islands in Queensland Australia; holiday -# islands is a colloquial term used globally. Hayman and Lindeman are at the -# north and south extremes of the Whitsunday Islands archipelago, and -# Hamilton is in between; it is reasonable to believe that this time zone -# applies to all of the Whitsundays. -# http://www.australia.gov.au/about-australia/australian-story/austn-islands -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule AQ 1971 only - Oct lastSun 2:00s 1:00 D -Rule AQ 1972 only - Feb lastSun 2:00s 0 S -Rule AQ 1989 1991 - Oct lastSun 2:00s 1:00 D -Rule AQ 1990 1992 - Mar Sun>=1 2:00s 0 S -Rule Holiday 1992 1993 - Oct lastSun 2:00s 1:00 D -Rule Holiday 1993 1994 - Mar Sun>=1 2:00s 0 S -Zone Australia/Brisbane 10:12:08 - LMT 1895 - 10:00 Aus AE%sT 1971 - 10:00 AQ AE%sT -Zone Australia/Lindeman 9:55:56 - LMT 1895 - 10:00 Aus AE%sT 1971 - 10:00 AQ AE%sT 1992 Jul - 10:00 Holiday AE%sT - -# South Australia -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule AS 1971 1985 - Oct lastSun 2:00s 1:00 D -Rule AS 1986 only - Oct 19 2:00s 1:00 D -Rule AS 1987 2007 - Oct lastSun 2:00s 1:00 D -Rule AS 1972 only - Feb 27 2:00s 0 S -Rule AS 1973 1985 - Mar Sun>=1 2:00s 0 S -Rule AS 1986 1990 - Mar Sun>=15 2:00s 0 S -Rule AS 1991 only - Mar 3 2:00s 0 S -Rule AS 1992 only - Mar 22 2:00s 0 S -Rule AS 1993 only - Mar 7 2:00s 0 S -Rule AS 1994 only - Mar 20 2:00s 0 S -Rule AS 1995 2005 - Mar lastSun 2:00s 0 S -Rule AS 2006 only - Apr 2 2:00s 0 S -Rule AS 2007 only - Mar lastSun 2:00s 0 S -Rule AS 2008 max - Apr Sun>=1 2:00s 0 S -Rule AS 2008 max - Oct Sun>=1 2:00s 1:00 D -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Australia/Adelaide 9:14:20 - LMT 1895 Feb - 9:00 - ACST 1899 May - 9:30 Aus AC%sT 1971 - 9:30 AS AC%sT - -# Tasmania -# -# From Paul Eggert (2005-08-16): -# http://www.bom.gov.au/climate/averages/tables/dst_times.shtml -# says King Island didn't observe DST from WWII until late 1971. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule AT 1967 only - Oct Sun>=1 2:00s 1:00 D -Rule AT 1968 only - Mar lastSun 2:00s 0 S -Rule AT 1968 1985 - Oct lastSun 2:00s 1:00 D -Rule AT 1969 1971 - Mar Sun>=8 2:00s 0 S -Rule AT 1972 only - Feb lastSun 2:00s 0 S -Rule AT 1973 1981 - Mar Sun>=1 2:00s 0 S -Rule AT 1982 1983 - Mar lastSun 2:00s 0 S -Rule AT 1984 1986 - Mar Sun>=1 2:00s 0 S -Rule AT 1986 only - Oct Sun>=15 2:00s 1:00 D -Rule AT 1987 1990 - Mar Sun>=15 2:00s 0 S -Rule AT 1987 only - Oct Sun>=22 2:00s 1:00 D -Rule AT 1988 1990 - Oct lastSun 2:00s 1:00 D -Rule AT 1991 1999 - Oct Sun>=1 2:00s 1:00 D -Rule AT 1991 2005 - Mar lastSun 2:00s 0 S -Rule AT 2000 only - Aug lastSun 2:00s 1:00 D -Rule AT 2001 max - Oct Sun>=1 2:00s 1:00 D -Rule AT 2006 only - Apr Sun>=1 2:00s 0 S -Rule AT 2007 only - Mar lastSun 2:00s 0 S -Rule AT 2008 max - Apr Sun>=1 2:00s 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Australia/Hobart 9:49:16 - LMT 1895 Sep - 10:00 - AEST 1916 Oct 1 2:00 - 10:00 1:00 AEDT 1917 Feb - 10:00 Aus AE%sT 1967 - 10:00 AT AE%sT -Zone Australia/Currie 9:35:28 - LMT 1895 Sep - 10:00 - AEST 1916 Oct 1 2:00 - 10:00 1:00 AEDT 1917 Feb - 10:00 Aus AE%sT 1971 Jul - 10:00 AT AE%sT - -# Victoria -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule AV 1971 1985 - Oct lastSun 2:00s 1:00 D -Rule AV 1972 only - Feb lastSun 2:00s 0 S -Rule AV 1973 1985 - Mar Sun>=1 2:00s 0 S -Rule AV 1986 1990 - Mar Sun>=15 2:00s 0 S -Rule AV 1986 1987 - Oct Sun>=15 2:00s 1:00 D -Rule AV 1988 1999 - Oct lastSun 2:00s 1:00 D -Rule AV 1991 1994 - Mar Sun>=1 2:00s 0 S -Rule AV 1995 2005 - Mar lastSun 2:00s 0 S -Rule AV 2000 only - Aug lastSun 2:00s 1:00 D -Rule AV 2001 2007 - Oct lastSun 2:00s 1:00 D -Rule AV 2006 only - Apr Sun>=1 2:00s 0 S -Rule AV 2007 only - Mar lastSun 2:00s 0 S -Rule AV 2008 max - Apr Sun>=1 2:00s 0 S -Rule AV 2008 max - Oct Sun>=1 2:00s 1:00 D -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Australia/Melbourne 9:39:52 - LMT 1895 Feb - 10:00 Aus AE%sT 1971 - 10:00 AV AE%sT - -# New South Wales -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule AN 1971 1985 - Oct lastSun 2:00s 1:00 D -Rule AN 1972 only - Feb 27 2:00s 0 S -Rule AN 1973 1981 - Mar Sun>=1 2:00s 0 S -Rule AN 1982 only - Apr Sun>=1 2:00s 0 S -Rule AN 1983 1985 - Mar Sun>=1 2:00s 0 S -Rule AN 1986 1989 - Mar Sun>=15 2:00s 0 S -Rule AN 1986 only - Oct 19 2:00s 1:00 D -Rule AN 1987 1999 - Oct lastSun 2:00s 1:00 D -Rule AN 1990 1995 - Mar Sun>=1 2:00s 0 S -Rule AN 1996 2005 - Mar lastSun 2:00s 0 S -Rule AN 2000 only - Aug lastSun 2:00s 1:00 D -Rule AN 2001 2007 - Oct lastSun 2:00s 1:00 D -Rule AN 2006 only - Apr Sun>=1 2:00s 0 S -Rule AN 2007 only - Mar lastSun 2:00s 0 S -Rule AN 2008 max - Apr Sun>=1 2:00s 0 S -Rule AN 2008 max - Oct Sun>=1 2:00s 1:00 D -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Australia/Sydney 10:04:52 - LMT 1895 Feb - 10:00 Aus AE%sT 1971 - 10:00 AN AE%sT -Zone Australia/Broken_Hill 9:25:48 - LMT 1895 Feb - 10:00 - AEST 1896 Aug 23 - 9:00 - ACST 1899 May - 9:30 Aus AC%sT 1971 - 9:30 AN AC%sT 2000 - 9:30 AS AC%sT - -# Lord Howe Island -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule LH 1981 1984 - Oct lastSun 2:00 1:00 D -Rule LH 1982 1985 - Mar Sun>=1 2:00 0 S -Rule LH 1985 only - Oct lastSun 2:00 0:30 D -Rule LH 1986 1989 - Mar Sun>=15 2:00 0 S -Rule LH 1986 only - Oct 19 2:00 0:30 D -Rule LH 1987 1999 - Oct lastSun 2:00 0:30 D -Rule LH 1990 1995 - Mar Sun>=1 2:00 0 S -Rule LH 1996 2005 - Mar lastSun 2:00 0 S -Rule LH 2000 only - Aug lastSun 2:00 0:30 D -Rule LH 2001 2007 - Oct lastSun 2:00 0:30 D -Rule LH 2006 only - Apr Sun>=1 2:00 0 S -Rule LH 2007 only - Mar lastSun 2:00 0 S -Rule LH 2008 max - Apr Sun>=1 2:00 0 S -Rule LH 2008 max - Oct Sun>=1 2:00 0:30 D -Zone Australia/Lord_Howe 10:36:20 - LMT 1895 Feb - 10:00 - AEST 1981 Mar - 10:30 LH +1030/+1130 1985 Jul - 10:30 LH +1030/+11 - -# Australian miscellany -# -# Ashmore Is, Cartier -# no indigenous inhabitants; only seasonal caretakers -# no times are set -# -# Coral Sea Is -# no indigenous inhabitants; only meteorologists -# no times are set -# -# Macquarie -# Permanent occupation (scientific station) 1911-1915 and since 25 March 1948; -# sealing and penguin oil station operated Nov 1899 to Apr 1919. See the -# Tasmania Parks & Wildlife Service history of sealing at Macquarie Island -# http://www.parks.tas.gov.au/index.aspx?base=1828 -# http://www.parks.tas.gov.au/index.aspx?base=1831 -# Guess that it was like Australia/Hobart while inhabited before 2010. -# -# From Steffen Thorsen (2010-03-10): -# We got these changes from the Australian Antarctic Division: -# - Macquarie Island will stay on UTC+11 for winter and therefore not -# switch back from daylight savings time when other parts of Australia do -# on 4 April. -# -# From Arthur David Olson (2013-05-23): -# The 1919 transition is overspecified below so pre-2013 zics -# will produce a binary file with an [A]EST-type as the first 32-bit type; -# this is required for correct handling of times before 1916 by -# pre-2013 versions of localtime. -Zone Antarctica/Macquarie 0 - -00 1899 Nov - 10:00 - AEST 1916 Oct 1 2:00 - 10:00 1:00 AEDT 1917 Feb - 10:00 Aus AE%sT 1919 Apr 1 0:00s - 0 - -00 1948 Mar 25 - 10:00 Aus AE%sT 1967 - 10:00 AT AE%sT 2010 Apr 4 3:00 - 11:00 - +11 - -# Christmas -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Indian/Christmas 7:02:52 - LMT 1895 Feb - 7:00 - +07 - -# Cocos (Keeling) Is -# These islands were ruled by the Ross family from about 1830 to 1978. -# We don't know when standard time was introduced; for now, we guess 1900. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Indian/Cocos 6:27:40 - LMT 1900 - 6:30 - +0630 - - -# Fiji - -# Milne gives 11:55:44 for Suva. - -# From Alexander Krivenyshev (2009-11-10): -# According to Fiji Broadcasting Corporation, Fiji plans to re-introduce DST -# from November 29th 2009 to April 25th 2010. -# -# "Daylight savings to commence this month" -# http://www.radiofiji.com.fj/fullstory.php?id=23719 -# http://www.worldtimezone.com/dst_news/dst_news_fiji01.html - -# From Steffen Thorsen (2009-11-10): -# The Fiji Government has posted some more details about the approved -# amendments: -# http://www.fiji.gov.fj/publish/page_16198.shtml - -# From Steffen Thorsen (2010-03-03): -# The Cabinet in Fiji has decided to end DST about a month early, on -# 2010-03-28 at 03:00. -# The plan is to observe DST again, from 2010-10-24 to sometime in March -# 2011 (last Sunday a good guess?). -# -# Official source: -# http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=1096:3310-cabinet-approves-change-in-daylight-savings-dates&catid=49:cabinet-releases&Itemid=166 -# -# A bit more background info here: -# http://www.timeanddate.com/news/time/fiji-dst-ends-march-2010.html - -# From Alexander Krivenyshev (2010-10-24): -# According to Radio Fiji and Fiji Times online, Fiji will end DST 3 -# weeks earlier than expected - on March 6, 2011, not March 27, 2011... -# Here is confirmation from Government of the Republic of the Fiji Islands, -# Ministry of Information (fiji.gov.fj) web site: -# http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=2608:daylight-savings&catid=71:press-releases&Itemid=155 -# http://www.worldtimezone.com/dst_news/dst_news_fiji04.html - -# From Steffen Thorsen (2011-10-03): -# Now the dates have been confirmed, and at least our start date -# assumption was correct (end date was one week wrong). -# -# http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=4966:daylight-saving-starts-in-fiji&catid=71:press-releases&Itemid=155 -# which says -# Members of the public are reminded to change their time to one hour in -# advance at 2am to 3am on October 23, 2011 and one hour back at 3am to -# 2am on February 26 next year. - -# From Ken Rylander (2011-10-24) -# Another change to the Fiji DST end date. In the TZ database the end date for -# Fiji DST 2012, is currently Feb 26. This has been changed to Jan 22. -# -# http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=5017:amendments-to-daylight-savings&catid=71:press-releases&Itemid=155 -# states: -# -# The end of daylight saving scheduled initially for the 26th of February 2012 -# has been brought forward to the 22nd of January 2012. -# The commencement of daylight saving will remain unchanged and start -# on the 23rd of October, 2011. - -# From the Fiji Government Online Portal (2012-08-21) via Steffen Thorsen: -# The Minister for Labour, Industrial Relations and Employment Mr Jone Usamate -# today confirmed that Fiji will start daylight savings at 2 am on Sunday 21st -# October 2012 and end at 3 am on Sunday 20th January 2013. -# http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=6702&catid=71&Itemid=155 - -# From the Fijian Government Media Center (2013-08-30) via David Wheeler: -# Fiji will start daylight savings on Sunday 27th October, 2013 ... -# move clocks forward by one hour from 2am -# http://www.fiji.gov.fj/Media-Center/Press-Releases/DAYLIGHT-SAVING-STARTS-ON-SUNDAY,-27th-OCTOBER-201.aspx - -# From Steffen Thorsen (2013-01-10): -# Fiji will end DST on 2014-01-19 02:00: -# http://www.fiji.gov.fj/Media-Center/Press-Releases/DAYLIGHT-SAVINGS-TO-END-THIS-MONTH-%281%29.aspx - -# From Ken Rylander (2014-10-20): -# DST will start Nov. 2 this year. -# http://www.fiji.gov.fj/Media-Center/Press-Releases/DAYLIGHT-SAVING-STARTS-ON-SUNDAY,-NOVEMBER-2ND.aspx - -# From a government order dated 2015-08-26 and published as Legal Notice No. 77 -# in the Government of Fiji Gazette Supplement No. 24 (2015-08-28), -# via Ken Rylander (2015-09-02): -# the daylight saving period is 1 hour in advance of the standard time -# commencing at 2.00 am on Sunday 1st November, 2015 and ending at -# 3.00 am on Sunday 17th January, 2016. - -# From Raymond Kumar (2016-10-04): -# http://www.fiji.gov.fj/Media-Center/Press-Releases/DAYLIGHT-SAVING-STARTS-ON-6th-NOVEMBER,-2016.aspx -# "Fiji's daylight savings will begin on Sunday, 6 November 2016, when -# clocks go forward an hour at 2am to 3am.... Daylight Saving will -# end at 3.00am on Sunday 15th January 2017." - -# From Paul Eggert (2016-10-03): -# For now, guess DST from 02:00 the first Sunday in November to -# 03:00 the third Sunday in January. Although ad hoc, it matches -# transitions since late 2014 and seems more likely to match future -# practice than guessing no DST. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Fiji 1998 1999 - Nov Sun>=1 2:00 1:00 S -Rule Fiji 1999 2000 - Feb lastSun 3:00 0 - -Rule Fiji 2009 only - Nov 29 2:00 1:00 S -Rule Fiji 2010 only - Mar lastSun 3:00 0 - -Rule Fiji 2010 2013 - Oct Sun>=21 2:00 1:00 S -Rule Fiji 2011 only - Mar Sun>=1 3:00 0 - -Rule Fiji 2012 2013 - Jan Sun>=18 3:00 0 - -Rule Fiji 2014 only - Jan Sun>=18 2:00 0 - -Rule Fiji 2014 max - Nov Sun>=1 2:00 1:00 S -Rule Fiji 2015 max - Jan Sun>=15 3:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Fiji 11:55:44 - LMT 1915 Oct 26 # Suva - 12:00 Fiji +12/+13 - -# French Polynesia -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Gambier -8:59:48 - LMT 1912 Oct # Rikitea - -9:00 - -09 -Zone Pacific/Marquesas -9:18:00 - LMT 1912 Oct - -9:30 - -0930 -Zone Pacific/Tahiti -9:58:16 - LMT 1912 Oct # Papeete - -10:00 - -10 -# Clipperton (near North America) is administered from French Polynesia; -# it is uninhabited. - -# Guam -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Guam -14:21:00 - LMT 1844 Dec 31 - 9:39:00 - LMT 1901 # Agana - 10:00 - GST 2000 Dec 23 # Guam - 10:00 - ChST # Chamorro Standard Time -Link Pacific/Guam Pacific/Saipan # N Mariana Is - -# Kiribati -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Tarawa 11:32:04 - LMT 1901 # Bairiki - 12:00 - +12 -Zone Pacific/Enderbury -11:24:20 - LMT 1901 - -12:00 - -12 1979 Oct - -11:00 - -11 1995 - 13:00 - +13 -Zone Pacific/Kiritimati -10:29:20 - LMT 1901 - -10:40 - -1040 1979 Oct - -10:00 - -10 1995 - 14:00 - +14 - -# N Mariana Is -# See Pacific/Guam. - -# Marshall Is -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Majuro 11:24:48 - LMT 1901 - 11:00 - +11 1969 Oct - 12:00 - +12 -Zone Pacific/Kwajalein 11:09:20 - LMT 1901 - 11:00 - +11 1969 Oct - -12:00 - -12 1993 Aug 20 - 12:00 - +12 - -# Micronesia -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Chuuk 10:07:08 - LMT 1901 - 10:00 - +10 -Zone Pacific/Pohnpei 10:32:52 - LMT 1901 # Kolonia - 11:00 - +11 -Zone Pacific/Kosrae 10:51:56 - LMT 1901 - 11:00 - +11 1969 Oct - 12:00 - +12 1999 - 11:00 - +11 - -# Nauru -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Nauru 11:07:40 - LMT 1921 Jan 15 # Uaobe - 11:30 - +1130 1942 Mar 15 - 9:00 - +09 1944 Aug 15 - 11:30 - +1130 1979 May - 12:00 - +12 - -# New Caledonia -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule NC 1977 1978 - Dec Sun>=1 0:00 1:00 S -Rule NC 1978 1979 - Feb 27 0:00 0 - -Rule NC 1996 only - Dec 1 2:00s 1:00 S -# Shanks & Pottenger say the following was at 2:00; go with IATA. -Rule NC 1997 only - Mar 2 2:00s 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Noumea 11:05:48 - LMT 1912 Jan 13 # Nouméa - 11:00 NC +11/+12 - - -############################################################################### - -# New Zealand - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule NZ 1927 only - Nov 6 2:00 1:00 S -Rule NZ 1928 only - Mar 4 2:00 0 M -Rule NZ 1928 1933 - Oct Sun>=8 2:00 0:30 S -Rule NZ 1929 1933 - Mar Sun>=15 2:00 0 M -Rule NZ 1934 1940 - Apr lastSun 2:00 0 M -Rule NZ 1934 1940 - Sep lastSun 2:00 0:30 S -Rule NZ 1946 only - Jan 1 0:00 0 S -# Since 1957 Chatham has been 45 minutes ahead of NZ, but there's no -# convenient single notation for the date and time of this transition -# so we must duplicate the Rule lines. -Rule NZ 1974 only - Nov Sun>=1 2:00s 1:00 D -Rule Chatham 1974 only - Nov Sun>=1 2:45s 1:00 D -Rule NZ 1975 only - Feb lastSun 2:00s 0 S -Rule Chatham 1975 only - Feb lastSun 2:45s 0 S -Rule NZ 1975 1988 - Oct lastSun 2:00s 1:00 D -Rule Chatham 1975 1988 - Oct lastSun 2:45s 1:00 D -Rule NZ 1976 1989 - Mar Sun>=1 2:00s 0 S -Rule Chatham 1976 1989 - Mar Sun>=1 2:45s 0 S -Rule NZ 1989 only - Oct Sun>=8 2:00s 1:00 D -Rule Chatham 1989 only - Oct Sun>=8 2:45s 1:00 D -Rule NZ 1990 2006 - Oct Sun>=1 2:00s 1:00 D -Rule Chatham 1990 2006 - Oct Sun>=1 2:45s 1:00 D -Rule NZ 1990 2007 - Mar Sun>=15 2:00s 0 S -Rule Chatham 1990 2007 - Mar Sun>=15 2:45s 0 S -Rule NZ 2007 max - Sep lastSun 2:00s 1:00 D -Rule Chatham 2007 max - Sep lastSun 2:45s 1:00 D -Rule NZ 2008 max - Apr Sun>=1 2:00s 0 S -Rule Chatham 2008 max - Apr Sun>=1 2:45s 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Auckland 11:39:04 - LMT 1868 Nov 2 - 11:30 NZ NZ%sT 1946 Jan 1 - 12:00 NZ NZ%sT -Zone Pacific/Chatham 12:13:48 - LMT 1868 Nov 2 - 12:15 - +1215 1946 Jan 1 - 12:45 Chatham +1245/+1345 - -Link Pacific/Auckland Antarctica/McMurdo - -# Auckland Is -# uninhabited; Māori and Moriori, colonial settlers, pastoralists, sealers, -# and scientific personnel have wintered - -# Campbell I -# minor whaling stations operated 1909/1914 -# scientific station operated 1941/1995; -# previously whalers, sealers, pastoralists, and scientific personnel wintered -# was probably like Pacific/Auckland - -# Cook Is -# From Shanks & Pottenger: -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Cook 1978 only - Nov 12 0:00 0:30 HS -Rule Cook 1979 1991 - Mar Sun>=1 0:00 0 - -Rule Cook 1979 1990 - Oct lastSun 0:00 0:30 HS -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Rarotonga -10:39:04 - LMT 1901 # Avarua - -10:30 - -1030 1978 Nov 12 - -10:00 Cook -10/-0930 - -############################################################################### - - -# Niue -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Niue -11:19:40 - LMT 1901 # Alofi - -11:20 - -1120 1951 - -11:30 - -1130 1978 Oct 1 - -11:00 - -11 - -# Norfolk -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Norfolk 11:11:52 - LMT 1901 # Kingston - 11:12 - +1112 1951 - 11:30 - +1130 1974 Oct 27 02:00 - 11:30 1:00 +1230 1975 Mar 2 02:00 - 11:30 - +1130 2015 Oct 4 02:00 - 11:00 - +11 - -# Palau (Belau) -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Palau 8:57:56 - LMT 1901 # Koror - 9:00 - +09 - -# Papua New Guinea -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Port_Moresby 9:48:40 - LMT 1880 - 9:48:32 - PMMT 1895 # Port Moresby Mean Time - 10:00 - +10 -# -# From Paul Eggert (2014-10-13): -# Base the Bougainville entry on the Arawa-Kieta region, which appears to have -# the most people even though it was devastated in the Bougainville Civil War. -# -# Although Shanks gives 1942-03-15 / 1943-11-01 for UT +09, these dates -# are apparently rough guesswork from the starts of military campaigns. -# The World War II entries below are instead based on Arawa-Kieta. -# The Japanese occupied Kieta in July 1942, -# according to the Pacific War Online Encyclopedia -# http://pwencycl.kgbudge.com/B/o/Bougainville.htm -# and seem to have controlled it until their 1945-08-21 surrender. -# -# The Autonomous Region of Bougainville switched from UT +10 to +11 -# on 2014-12-28 at 02:00. They call +11 "Bougainville Standard Time". -# See: -# http://www.bougainville24.com/bougainville-issues/bougainville-gets-own-timezone/ -# -Zone Pacific/Bougainville 10:22:16 - LMT 1880 - 9:48:32 - PMMT 1895 - 10:00 - +10 1942 Jul - 9:00 - +09 1945 Aug 21 - 10:00 - +10 2014 Dec 28 2:00 - 11:00 - +11 - -# Pitcairn -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Pitcairn -8:40:20 - LMT 1901 # Adamstown - -8:30 - -0830 1998 Apr 27 0:00 - -8:00 - -08 - -# American Samoa -Zone Pacific/Pago_Pago 12:37:12 - LMT 1879 Jul 5 - -11:22:48 - LMT 1911 - -11:00 - SST # S=Samoa -Link Pacific/Pago_Pago Pacific/Midway # in US minor outlying islands - -# Samoa (formerly and also known as Western Samoa) - -# From Steffen Thorsen (2009-10-16): -# We have been in contact with the government of Samoa again, and received -# the following info: -# -# "Cabinet has now approved Daylight Saving to be effected next year -# commencing from the last Sunday of September 2010 and conclude first -# Sunday of April 2011." -# -# Background info: -# http://www.timeanddate.com/news/time/samoa-dst-plan-2009.html -# -# Samoa's Daylight Saving Time Act 2009 is available here, but does not -# contain any dates: -# http://www.parliament.gov.ws/documents/acts/Daylight%20Saving%20Act%20%202009%20%28English%29%20-%20Final%207-7-091.pdf - -# From Laupue Raymond Hughes (2010-10-07): -# Please see -# http://www.mcil.gov.ws -# the Ministry of Commerce, Industry and Labour (sideframe) "Last Sunday -# September 2010 (26/09/10) - adjust clocks forward from 12:00 midnight -# to 01:00am and First Sunday April 2011 (03/04/11) - adjust clocks -# backwards from 1:00am to 12:00am" - -# From Laupue Raymond Hughes (2011-03-07): -# [http://www.mcil.gov.ws/ftcd/daylight_saving_2011.pdf] -# -# ... when the standard time strikes the hour of four o'clock (4.00am -# or 0400 Hours) on the 2nd April 2011, then all instruments used to -# measure standard time are to be adjusted/changed to three o'clock -# (3:00am or 0300Hrs). - -# From David Zülke (2011-05-09): -# Subject: Samoa to move timezone from east to west of international date line -# -# http://www.morningstar.co.uk/uk/markets/newsfeeditem.aspx?id=138501958347963 - -# From Paul Eggert (2014-06-27): -# The International Date Line Act 2011 -# http://www.parliament.gov.ws/images/ACTS/International_Date_Line_Act__2011_-_Eng.pdf -# changed Samoa from UT -11 to +13, effective "12 o'clock midnight, on -# Thursday 29th December 2011". The International Date Line was adjusted -# accordingly. - -# From Laupue Raymond Hughes (2011-09-02): -# http://www.mcil.gov.ws/mcil_publications.html -# -# here is the official website publication for Samoa DST and dateline change -# -# DST -# Year End Time Start Time -# 2011 - - - - - - 24 September 3:00am to 4:00am -# 2012 01 April 4:00am to 3:00am - - - - - - -# -# Dateline Change skip Friday 30th Dec 2011 -# Thursday 29th December 2011 23:59:59 Hours -# Saturday 31st December 2011 00:00:00 Hours -# -# From Nicholas Pereira (2012-09-10): -# Daylight Saving Time commences on Sunday 30th September 2012 and -# ends on Sunday 7th of April 2013.... -# http://www.mcil.gov.ws/mcil_publications.html -# -# From Paul Eggert (2014-07-08): -# That web page currently lists transitions for 2012/3 and 2013/4. -# Assume the pattern instituted in 2012 will continue indefinitely. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule WS 2010 only - Sep lastSun 0:00 1 D -Rule WS 2011 only - Apr Sat>=1 4:00 0 S -Rule WS 2011 only - Sep lastSat 3:00 1 D -Rule WS 2012 max - Apr Sun>=1 4:00 0 S -Rule WS 2012 max - Sep lastSun 3:00 1 D -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Apia 12:33:04 - LMT 1879 Jul 5 - -11:26:56 - LMT 1911 - -11:30 - -1130 1950 - -11:00 WS -11/-10 2011 Dec 29 24:00 - 13:00 WS +13/+14 - -# Solomon Is -# excludes Bougainville, for which see Papua New Guinea -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Guadalcanal 10:39:48 - LMT 1912 Oct # Honiara - 11:00 - +11 - -# Tokelau -# -# From Gwillim Law (2011-12-29) -# A correspondent informed me that Tokelau, like Samoa, will be skipping -# December 31 this year ... -# -# From Steffen Thorsen (2012-07-25) -# ... we double checked by calling hotels and offices based in Tokelau asking -# about the time there, and they all told a time that agrees with UTC+13.... -# Shanks says UTC-10 from 1901 [but] ... there is a good chance the change -# actually was to UTC-11 back then. -# -# From Paul Eggert (2012-07-25) -# A Google Books snippet of Appendix to the Journals of the House of -# Representatives of New Zealand, Session 1948, -# , page 65, says Tokelau -# was "11 hours slow on G.M.T." Go with Thorsen and assume Shanks & Pottenger -# are off by an hour starting in 1901. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Fakaofo -11:24:56 - LMT 1901 - -11:00 - -11 2011 Dec 30 - 13:00 - +13 - -# Tonga -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Tonga 1999 only - Oct 7 2:00s 1:00 S -Rule Tonga 2000 only - Mar 19 2:00s 0 - -Rule Tonga 2000 2001 - Nov Sun>=1 2:00 1:00 S -Rule Tonga 2001 2002 - Jan lastSun 2:00 0 - -Rule Tonga 2016 max - Nov Sun>=1 2:00 1:00 S -Rule Tonga 2017 max - Jan Sun>=15 3:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Tongatapu 12:19:20 - LMT 1901 - 12:20 - +1220 1941 - 13:00 - +13 1999 - 13:00 Tonga +13/+14 - -# Tuvalu -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Funafuti 11:56:52 - LMT 1901 - 12:00 - +12 - - -# US minor outlying islands - -# Howland, Baker -# Howland was mined for guano by American companies 1857-1878 and British -# 1886-1891; Baker was similar but exact dates are not known. -# Inhabited by civilians 1935-1942; U.S. military bases 1943-1944; -# uninhabited thereafter. -# Howland observed Hawaii Standard Time (UT -10:30) in 1937; -# see page 206 of Elgen M. Long and Marie K. Long, -# Amelia Earhart: the Mystery Solved, Simon & Schuster (2000). -# So most likely Howland and Baker observed Hawaii Time from 1935 -# until they were abandoned after the war. - -# Jarvis -# Mined for guano by American companies 1857-1879 and British 1883?-1891?. -# Inhabited by civilians 1935-1942; IGY scientific base 1957-1958; -# uninhabited thereafter. -# no information; was probably like Pacific/Kiritimati - -# Johnston -# -# From Paul Eggert (2017-02-10): -# Sometimes Johnston kept Hawaii time, and sometimes it was an hour behind. -# Details are uncertain. We have no data for Johnston after 1970, so -# treat it like Hawaii for now. Since Johnston is now uninhabited, -# its link to Pacific/Honolulu is in the 'backward' file. -# -# In his memoirs of June 6th to October 4, 1945 -# (2005), Herbert C. Bach writes, -# "We started our letdown to Kwajalein Atoll and landed there at 5:00 AM -# Johnston time, 1:30 AM Kwajalein time." This was in June 1945, and -# confirms that Johnston kept the same time as Honolulu in summer 1945. -# -# From Lyle McElhaney (2014-03-11): -# [W]hen JI was being used for that [atomic bomb] testing, the time being used -# was not Hawaiian time but rather the same time being used on the ships, -# which had a GMT offset of -11 hours. This apparently applied to at least the -# time from Operation Newsreel (Hardtack I/Teak shot, 1958-08-01) to the last -# Operation Fishbowl shot (Tightrope, 1962-11-04).... [See] Herman Hoerlin, -# "The United States High-Altitude Test Experience: A Review Emphasizing the -# Impact on the Environment", Los Alamos LA-6405, Oct 1976. -# http://www.fas.org/sgp/othergov/doe/lanl/docs1/00322994.pdf -# See the table on page 4 where he lists GMT and local times for the tests; a -# footnote for the JI tests reads that local time is "JI time = Hawaii Time -# Minus One Hour". - -# Kingman -# uninhabited - -# Midway -# See Pacific/Pago_Pago. - -# Palmyra -# uninhabited since World War II; was probably like Pacific/Kiritimati - -# Wake -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Wake 11:06:28 - LMT 1901 - 12:00 - +12 - - -# Vanuatu -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Vanuatu 1983 only - Sep 25 0:00 1:00 S -Rule Vanuatu 1984 1991 - Mar Sun>=23 0:00 0 - -Rule Vanuatu 1984 only - Oct 23 0:00 1:00 S -Rule Vanuatu 1985 1991 - Sep Sun>=23 0:00 1:00 S -Rule Vanuatu 1992 1993 - Jan Sun>=23 0:00 0 - -Rule Vanuatu 1992 only - Oct Sun>=23 0:00 1:00 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Efate 11:13:16 - LMT 1912 Jan 13 # Vila - 11:00 Vanuatu +11/+12 - -# Wallis and Futuna -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Wallis 12:15:20 - LMT 1901 - 12:00 - +12 - -############################################################################### - -# NOTES - -# This file is by no means authoritative; if you think you know better, -# go ahead and edit the file (and please send any changes to -# tz@iana.org for general use in the future). For more, please see -# the file CONTRIBUTING in the tz distribution. - -# From Paul Eggert (2017-02-10): -# -# Unless otherwise specified, the source for data through 1990 is: -# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition), -# San Diego: ACS Publications, Inc. (2003). -# Unfortunately this book contains many errors and cites no sources. -# -# Many years ago Gwillim Law wrote that a good source -# for time zone data was the International Air Transport -# Association's Standard Schedules Information Manual (IATA SSIM), -# published semiannually. Law sent in several helpful summaries -# of the IATA's data after 1990. Except where otherwise noted, -# IATA SSIM is the source for entries after 1990. -# -# Another source occasionally used is Edward W. Whitman, World Time Differences, -# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), which -# I found in the UCLA library. -# -# For data circa 1899, a common source is: -# Milne J. Civil time. Geogr J. 1899 Feb;13(2):173-94. -# http://www.jstor.org/stable/1774359 -# -# A reliable and entertaining source about time zones is -# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997). -# -# The following abbreviations are from other sources. -# Corrections are welcome! -# std dst -# LMT Local Mean Time -# 8:00 AWST AWDT Western Australia -# 9:30 ACST ACDT Central Australia -# 10:00 AEST AEDT Eastern Australia -# 10:00 GST Guam through 2000 -# 10:00 ChST Chamorro -# 11:30 NZMT NZST New Zealand through 1945 -# 12:00 NZST NZDT New Zealand 1946-present -# -11:00 SST Samoa -# -10:00 HST Hawaii -# -# See the 'northamerica' file for Hawaii. -# See the 'southamerica' file for Easter I and the Galápagos Is. - -############################################################################### - -# Australia - -# From Paul Eggert (2014-06-30): -# Daylight saving time has long been controversial in Australia, pitting -# region against region, rural against urban, and local against global. -# For example, in her review of Graeme Davison's _The Unforgiving -# Minute: how Australians learned to tell the time_ (1993), Perth native -# Phillipa J Martyr wrote, "The section entitled 'Saving Daylight' was -# very informative, but was (as can, sadly, only be expected from a -# Melbourne-based study) replete with the usual chuckleheaded -# Queenslanders and straw-chewing yokels from the West prattling fables -# about fading curtains and crazed farm animals." -# Electronic Journal of Australian and New Zealand History (1997-03-03) -# http://www.jcu.edu.au/aff/history/reviews/davison.htm - -# From Paul Eggert (2005-12-08): -# Implementation Dates of Daylight Saving Time within Australia -# http://www.bom.gov.au/climate/averages/tables/dst_times.shtml -# summarizes daylight saving issues in Australia. - -# From Arthur David Olson (2005-12-12): -# Lawlink NSW:Daylight Saving in New South Wales -# http://www.lawlink.nsw.gov.au/lawlink/Corporate/ll_agdinfo.nsf/pages/community_relations_daylight_saving -# covers New South Wales in particular. - -# From John Mackin (1991-03-06): -# We in Australia have _never_ referred to DST as 'daylight' time. -# It is called 'summer' time. Now by a happy coincidence, 'summer' -# and 'standard' happen to start with the same letter; hence, the -# abbreviation does _not_ change... -# The legislation does not actually define abbreviations, at least -# in this State, but the abbreviation is just commonly taken to be the -# initials of the phrase, and the legislation here uniformly uses -# the phrase 'summer time' and does not use the phrase 'daylight -# time'. -# Announcers on the Commonwealth radio network, the ABC (for Australian -# Broadcasting Commission), use the phrases 'Eastern Standard Time' -# or 'Eastern Summer Time'. (Note, though, that as I say in the -# current australasia file, there is really no such thing.) Announcers -# on its overseas service, Radio Australia, use the same phrases -# prefixed by the word 'Australian' when referring to local times; -# time announcements on that service, naturally enough, are made in UTC. - -# From Paul Eggert (2014-06-30): -# -# Inspired by Mackin's remarks quoted above, earlier versions of this -# file used "EST" for both Eastern Standard Time and Eastern Summer -# Time in Australia, and similarly for "CST", "CWST", and "WST". -# However, these abbreviations were confusing and were not common -# practice among Australians, and there were justifiable complaints -# about them, so I attempted to survey current Australian usage. -# For the tz database, the full English phrase is not that important; -# what matters is the abbreviation. It's difficult to survey the web -# directly for abbreviation usage, as there are so many false hits for -# strings like "EST" and "EDT", so I looked for pages that defined an -# abbreviation for eastern or central DST in Australia, and got the -# following numbers of unique hits for the listed Google queries: -# -# 10 "Eastern Daylight Time AEST" site:au [some are false hits] -# 10 "Eastern Summer Time AEST" site:au -# 10 "Summer Time AEDT" site:au -# 13 "EDST Eastern Daylight Saving Time" site:au -# 18 "Summer Time ESST" site:au -# 28 "Eastern Daylight Saving Time EDST" site:au -# 39 "EDT Eastern Daylight Time" site:au [some are false hits] -# 53 "Eastern Daylight Time EDT" site:au [some are false hits] -# 54 "AEDT Australian Eastern Daylight Time" site:au -# 182 "Eastern Daylight Time AEDT" site:au -# -# 17 "Central Daylight Time CDT" site:au [some are false hits] -# 46 "Central Daylight Time ACDT" site:au -# -# I tried several other variants (e.g., "Eastern Summer Time EST") but -# they all returned fewer than 10 unique hits. I also looked for pages -# mentioning both "western standard time" and an abbreviation, since -# there is no WST in the US to generate false hits, and found: -# -# 156 "western standard time" AWST site:au -# 226 "western standard time" WST site:au -# -# I then surveyed the top ten newspapers in Australia by circulation as -# listed in Wikipedia, using Google queries like "AEDT site:heraldsun.com.au" -# and obtaining estimated counts from the initial page of search results. -# All ten papers greatly preferred "AEDT" to "EDT". The papers -# surveyed were the Herald Sun, The Daily Telegraph, The Courier-Mail, -# The Sydney Morning Herald, The West Australian, The Age, The Advertiser, -# The Australian, The Financial Review, and The Herald (Newcastle). -# -# I also searched for historical usage, to see whether abbreviations -# like "AEDT" are new. A Trove search -# found only one newspaper (The Canberra Times) with a house style -# dating back to the 1970s, I expect because other newspapers weren't -# fully indexed. The Canberra Times strongly preferred abbreviations -# like "AEDT". The first occurrence of "AEDT" was a World Weather -# column (1971-11-17, page 24), and of "ACDT" was a Scoreboard column -# (1993-01-24, p 16). The style was the typical usage but was not -# strictly enforced; for example, "Welcome to the twilight zones ..." -# (1994-10-29, p 1) uses the abbreviations AEST/AEDT, CST/CDT, and -# WST, and goes on to say, "The confusion and frustration some feel -# about the lack of uniformity among Australia's six states and two -# territories has prompted one group to form its very own political -# party -- the Sydney-based Daylight Saving Extension Party." -# -# I also surveyed federal government sources. They did not agree: -# -# The Australian Government (2014-03-26) -# http://australia.gov.au/about-australia/our-country/time -# (This document was produced by the Department of Finance.) -# AEST ACST AWST AEDT ACDT -# -# Bureau of Meteorology (2012-11-08) -# http://www.bom.gov.au/climate/averages/tables/daysavtm.shtml -# EST CST WST EDT CDT -# -# Civil Aviation Safety Authority (undated) -# http://services.casa.gov.au/outnback/inc/pages/episode3/episode-3_time_zones.shtml -# EST CST WST (no abbreviations given for DST) -# -# Geoscience Australia (2011-11-24) -# http://www.ga.gov.au/geodesy/astro/sunrise.jsp -# AEST ACST AWST AEDT ACDT -# -# Parliamentary Library (2008-11-10) -# http://www.aph.gov.au/binaries/library/pubs/rp/2008-09/09rp14.pdf -# EST CST WST preferred for standard time; AEST AEDT ACST ACDT also used -# -# The Transport Safety Bureau has an extensive series of accident reports, -# and investigators seem to use whatever abbreviation they like. -# Googling site:atsb.gov.au found the following number of unique hits: -# 311 "ESuT", 195 "EDT", 26 "AEDT", 83 "CSuT", 46 "CDT". -# "_SuT" tended to appear in older reports, and "A_DT" tended to -# appear in reports of events with international implications. -# -# From the above it appears that there is a working consensus in -# Australia to use trailing "DT" for daylight saving time; although -# some sources use trailing "SST" or "ST" or "SuT" they are by far in -# the minority. The case for leading "A" is weaker, but since it -# seems to be preferred in the overall web and is preferred in all -# the leading newspaper websites and in many government departments, -# it has a stronger case than omitting the leading "A". The current -# version of the database therefore uses abbreviations like "AEST" and -# "AEDT" for Australian time zones. - -# From Paul Eggert (1995-12-19): -# Shanks & Pottenger report 2:00 for all autumn changes in Australia and NZ. -# Mark Prior writes that his newspaper -# reports that NSW's fall 1995 change will occur at 2:00, -# but Robert Elz says it's been 3:00 in Victoria since 1970 -# and perhaps the newspaper's '2:00' is referring to standard time. -# For now we'll continue to assume 2:00s for changes since 1960. - -# From Eric Ulevik (1998-01-05): -# -# Here are some URLs to Australian time legislation. These URLs are stable, -# and should probably be included in the data file. There are probably more -# relevant entries in this database. -# -# NSW (including LHI and Broken Hill): -# Standard Time Act 1987 (updated 1995-04-04) -# http://www.austlii.edu.au/au/legis/nsw/consol_act/sta1987137/index.html -# ACT -# Standard Time and Summer Time Act 1972 -# http://www.austlii.edu.au/au/legis/act/consol_act/stasta1972279/index.html -# SA -# Standard Time Act, 1898 -# http://www.austlii.edu.au/au/legis/sa/consol_act/sta1898137/index.html - -# From David Grosz (2005-06-13): -# It was announced last week that Daylight Saving would be extended by -# one week next year to allow for the 2006 Commonwealth Games. -# Daylight Saving is now to end for next year only on the first Sunday -# in April instead of the last Sunday in March. -# -# From Gwillim Law (2005-06-14): -# I did some Googling and found that all of those states (and territory) plan -# to extend DST together in 2006. -# ACT: http://www.cmd.act.gov.au/mediareleases/fileread.cfm?file=86.txt -# New South Wales: http://www.thecouriermail.news.com.au/common/story_page/0,5936,15538869%255E1702,00.html -# South Australia: http://www.news.com.au/story/0,10117,15555031-1246,00.html -# Tasmania: http://www.media.tas.gov.au/release.php?id=14772 -# Victoria: I wasn't able to find anything separate, but the other articles -# allude to it. -# But not Queensland -# http://www.news.com.au/story/0,10117,15564030-1248,00.html - -# Northern Territory - -# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06): -# # The NORTHERN TERRITORY.. [ Courtesy N.T. Dept of the Chief Minister ] -# # [ Nov 1990 ] -# # N.T. have never utilised any DST due to sub-tropical/tropical location. -# ... -# Zone Australia/North 9:30 - CST - -# From Bradley White (1991-03-04): -# A recent excerpt from an Australian newspaper... -# the Northern Territory do[es] not have daylight saving. - -# Western Australia - -# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06): -# # The state of WESTERN AUSTRALIA.. [ Courtesy W.A. dept Premier+Cabinet ] -# # [ Nov 1990 ] -# # W.A. suffers from a great deal of public and political opposition to -# # DST in principle. A bill is brought before parliament in most years, but -# # usually defeated either in the upper house, or in party caucus -# # before reaching parliament. -# ... -# Zone Australia/West 8:00 AW %sST -# ... -# Rule AW 1974 only - Oct lastSun 2:00 1:00 D -# Rule AW 1975 only - Mar Sun>=1 3:00 0 W -# Rule AW 1983 only - Oct lastSun 2:00 1:00 D -# Rule AW 1984 only - Mar Sun>=1 3:00 0 W - -# From Bradley White (1991-03-04): -# A recent excerpt from an Australian newspaper... -# Western Australia...do[es] not have daylight saving. - -# From John D. Newman via Bradley White (1991-11-02): -# Western Australia is still on "winter time". Some DH in Sydney -# rang me at home a few days ago at 6.00am. (He had just arrived at -# work at 9.00am.) -# W.A. is switching to Summer Time on Nov 17th just to confuse -# everybody again. - -# From Arthur David Olson (1992-03-08): -# The 1992 ending date used in the rules is a best guess; -# it matches what was used in the past. - -# The Australian Bureau of Meteorology FAQ -# http://www.bom.gov.au/faq/faqgen.htm -# (1999-09-27) writes that Giles Meteorological Station uses -# South Australian time even though it's located in Western Australia. - -# Queensland -# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06): -# # The state of QUEENSLAND.. [ Courtesy Qld. Dept Premier Econ&Trade Devel ] -# # [ Dec 1990 ] -# ... -# Zone Australia/Queensland 10:00 AQ %sST -# ... -# Rule AQ 1971 only - Oct lastSun 2:00 1:00 D -# Rule AQ 1972 only - Feb lastSun 3:00 0 E -# Rule AQ 1989 max - Oct lastSun 2:00 1:00 D -# Rule AQ 1990 max - Mar Sun>=1 3:00 0 E - -# From Bradley White (1989-12-24): -# "Australia/Queensland" now observes daylight time (i.e. from -# October 1989). - -# From Bradley White (1991-03-04): -# A recent excerpt from an Australian newspaper... -# ...Queensland...[has] agreed to end daylight saving -# at 3am tomorrow (March 3)... - -# From John Mackin (1991-03-06): -# I can certainly confirm for my part that Daylight Saving in NSW did in fact -# end on Sunday, 3 March. I don't know at what hour, though. (It surprised -# me.) - -# From Bradley White (1992-03-08): -# ...there was recently a referendum in Queensland which resulted -# in the experimental daylight saving system being abandoned. So, ... -# ... -# Rule QLD 1989 1991 - Oct lastSun 2:00 1:00 D -# Rule QLD 1990 1992 - Mar Sun>=1 3:00 0 S -# ... - -# From Arthur David Olson (1992-03-08): -# The chosen rules the union of the 1971/1972 change and the 1989-1992 changes. - -# From Christopher Hunt (2006-11-21), after an advance warning -# from Jesper Nørgaard Welen (2006-11-01): -# WA are trialing DST for three years. -# http://www.parliament.wa.gov.au/parliament/bills.nsf/9A1B183144403DA54825721200088DF1/$File/Bill175-1B.pdf - -# From Rives McDow (2002-04-09): -# The most interesting region I have found consists of three towns on the -# southern coast.... South Australia observes daylight saving time; Western -# Australia does not. The two states are one and a half hours apart. The -# residents decided to forget about this nonsense of changing the clock so -# much and set the local time 20 hours and 45 minutes from the -# international date line, or right in the middle of the time of South -# Australia and Western Australia.... -# -# From Paul Eggert (2002-04-09): -# This is confirmed by the section entitled -# "What's the deal with time zones???" in -# http://www.earthsci.unimelb.edu.au/~awatkins/null.html -# -# From Alex Livingston (2006-12-07): -# ... it was just on four years ago that I drove along the Eyre Highway, -# which passes through eastern Western Australia close to the southern -# coast of the continent. -# -# I paid particular attention to the time kept there. There can be no -# dispute that UTC+08:45 was considered "the time" from the border -# village just inside the border with South Australia to as far west -# as just east of Caiguna. There can also be no dispute that Eucla is -# the largest population centre in this zone.... -# -# Now that Western Australia is observing daylight saving, the -# question arose whether this part of the state would follow suit. I -# just called the border village and confirmed that indeed they have, -# meaning that they are now observing UTC+09:45. -# -# (2006-12-09): -# I personally doubt that either experimentation with daylight saving -# in WA or its introduction in SA had anything to do with the genesis -# of this time zone. My hunch is that it's been around since well -# before 1975. I remember seeing it noted on road maps decades ago. - -# From Paul Eggert (2006-12-15): -# For lack of better info, assume the tradition dates back to the -# introduction of standard time in 1895. - - -# southeast Australia -# -# From Paul Eggert (2007-07-23): -# Starting autumn 2008 Victoria, NSW, South Australia, Tasmania and the ACT -# end DST the first Sunday in April and start DST the first Sunday in October. -# http://www.theage.com.au/news/national/daylight-savings-to-span-six-months/2007/06/27/1182623966703.html - - -# South Australia - -# From Bradley White (1991-03-04): -# A recent excerpt from an Australian newspaper... -# ...South Australia...[has] agreed to end daylight saving -# at 3am tomorrow (March 3)... - -# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06): -# # The state of SOUTH AUSTRALIA....[ Courtesy of S.A. Dept of Labour ] -# # [ Nov 1990 ] -# ... -# Zone Australia/South 9:30 AS %sST -# ... -# Rule AS 1971 max - Oct lastSun 2:00 1:00 D -# Rule AS 1972 1985 - Mar Sun>=1 3:00 0 C -# Rule AS 1986 1990 - Mar Sun>=15 3:00 0 C -# Rule AS 1991 max - Mar Sun>=1 3:00 0 C - -# From Bradley White (1992-03-11): -# Recent correspondence with a friend in Adelaide -# contained the following exchange: "Due to the Adelaide Festival, -# South Australia delays setting back our clocks for a few weeks." - -# From Robert Elz (1992-03-13): -# I heard that apparently (or at least, it appears that) -# South Aus will have an extra 3 weeks daylight saving every even -# numbered year (from 1990). That's when the Adelaide Festival -# is on... - -# From Robert Elz (1992-03-16, 00:57:07 +1000): -# DST didn't end in Adelaide today (yesterday).... -# But whether it's "4th Sunday" or "2nd last Sunday" I have no idea whatever... -# (it's just as likely to be "the Sunday we pick for this year"...). - -# From Bradley White (1994-04-11): -# If Sun, 15 March, 1992 was at +1030 as kre asserts, but yet Sun, 20 March, -# 1994 was at +0930 as John Connolly's customer seems to assert, then I can -# only conclude that the actual rule is more complicated.... - -# From John Warburton (1994-10-07): -# The new Daylight Savings dates for South Australia ... -# was gazetted in the Government Hansard on Sep 26 1994.... -# start on last Sunday in October and end in last sunday in March. - -# From Paul Eggert (2007-07-23): -# See "southeast Australia" above for 2008 and later. - -# Tasmania - -# The rules for 1967 through 1991 were reported by George Shepherd -# via Simon Woodhead via Robert Elz (1991-03-06): -# # The state of TASMANIA.. [Courtesy Tasmanian Dept of Premier + Cabinet ] -# # [ Nov 1990 ] - -# From Bill Hart via Guy Harris (1991-10-10): -# Oh yes, the new daylight savings rules are uniquely tasmanian, we have -# 6 weeks a year now when we are out of sync with the rest of Australia -# (but nothing new about that). - -# From Alex Livingston (1999-10-04): -# I heard on the ABC (Australian Broadcasting Corporation) radio news on the -# (long) weekend that Tasmania, which usually goes its own way in this regard, -# has decided to join with most of NSW, the ACT, and most of Victoria -# (Australia) and start daylight saving on the last Sunday in August in 2000 -# instead of the first Sunday in October. - -# Sim Alam (2000-07-03) reported a legal citation for the 2000/2001 rules: -# http://www.thelaw.tas.gov.au/fragview/42++1968+GS3A@EN+2000070300 - -# From Paul Eggert (2007-07-23): -# See "southeast Australia" above for 2008 and later. - -# Victoria - -# The rules for 1971 through 1991 were reported by George Shepherd -# via Simon Woodhead via Robert Elz (1991-03-06): -# # The state of VICTORIA.. [ Courtesy of Vic. Dept of Premier + Cabinet ] -# # [ Nov 1990 ] - -# From Scott Harrington (2001-08-29): -# On KQED's "City Arts and Lectures" program last night I heard an -# interesting story about daylight savings time. Dr. John Heilbron was -# discussing his book "The Sun in the Church: Cathedrals as Solar -# Observatories"[1], and in particular the Shrine of Remembrance[2] located -# in Melbourne, Australia. -# -# Apparently the shrine's main purpose is a beam of sunlight which -# illuminates a special spot on the floor at the 11th hour of the 11th day -# of the 11th month (Remembrance Day) every year in memory of Australia's -# fallen WWI soldiers. And if you go there on Nov. 11, at 11am local time, -# you will indeed see the sunbeam illuminate the special spot at the -# expected time. -# -# However, that is only because of some special mirror contraption that had -# to be employed, since due to daylight savings time, the true solar time of -# the remembrance moment occurs one hour later (or earlier?). Perhaps -# someone with more information on this jury-rig can tell us more. -# -# [1] http://www.hup.harvard.edu/catalog/HEISUN.html -# [2] http://www.shrine.org.au - -# From Paul Eggert (2007-07-23): -# See "southeast Australia" above for 2008 and later. - -# New South Wales - -# From Arthur David Olson: -# New South Wales and subjurisdictions have their own ideas of a fun time. -# Based on law library research by John Mackin, -# who notes: -# In Australia, time is not legislated federally, but rather by the -# individual states. Thus, while such terms as "Eastern Standard Time" -# [I mean, of course, Australian EST, not any other kind] are in common -# use, _they have NO REAL MEANING_, as they are not defined in the -# legislation. This is very important to understand. -# I have researched New South Wales time only... - -# From Eric Ulevik (1999-05-26): -# DST will start in NSW on the last Sunday of August, rather than the usual -# October in 2000. See: Matthew Moore, -# Two months more daylight saving, Sydney Morning Herald (1999-05-26). -# http://www.smh.com.au/news/9905/26/pageone/pageone4.html - -# From Paul Eggert (1999-09-27): -# See the following official NSW source: -# Daylight Saving in New South Wales. -# http://dir.gis.nsw.gov.au/cgi-bin/genobject/document/other/daylightsaving/tigGmZ -# -# Narrabri Shire (NSW) council has announced it will ignore the extension of -# daylight saving next year. See: -# Narrabri Council to ignore daylight saving -# http://abc.net.au/news/regionals/neweng/monthly/regeng-22jul1999-1.htm -# (1999-07-22). For now, we'll wait to see if this really happens. -# -# Victoria will following NSW. See: -# Vic to extend daylight saving (1999-07-28) -# http://abc.net.au/local/news/olympics/1999/07/item19990728112314_1.htm -# -# However, South Australia rejected the DST request. See: -# South Australia rejects Olympics daylight savings request (1999-07-19) -# http://abc.net.au/news/olympics/1999/07/item19990719151754_1.htm -# -# Queensland also will not observe DST for the Olympics. See: -# Qld says no to daylight savings for Olympics -# http://abc.net.au/news/olympics/1999/06/item19990601114608_1.htm -# (1999-06-01), which quotes Queensland Premier Peter Beattie as saying -# "Look you've got to remember in my family when this came up last time -# I voted for it, my wife voted against it and she said to me it's all very -# well for you, you don't have to worry about getting the children out of -# bed, getting them to school, getting them to sleep at night. -# I've been through all this argument domestically...my wife rules." -# -# Broken Hill will stick with South Australian time in 2000. See: -# Broken Hill to be behind the times (1999-07-21) -# http://abc.net.au/news/regionals/brokenh/monthly/regbrok-21jul1999-6.htm - -# IATA SSIM (1998-09) says that the spring 2000 change for Australian -# Capital Territory, New South Wales except Lord Howe Island and Broken -# Hill, and Victoria will be August 27, presumably due to the Sydney Olympics. - -# From Eric Ulevik, referring to Sydney's Sun Herald (2000-08-13), page 29: -# The Queensland Premier Peter Beattie is encouraging northern NSW -# towns to use Queensland time. - -# From Paul Eggert (2007-07-23): -# See "southeast Australia" above for 2008 and later. - -# Yancowinna - -# From John Mackin (1989-01-04): -# 'Broken Hill' means the County of Yancowinna. - -# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06): -# # YANCOWINNA.. [ Confirmation courtesy of Broken Hill Postmaster ] -# # [ Dec 1990 ] -# ... -# # Yancowinna uses Central Standard Time, despite [its] location on the -# # New South Wales side of the S.A. border. Most business and social dealings -# # are with CST zones, therefore CST is legislated by local government -# # although the switch to Summer Time occurs in line with N.S.W. There have -# # been years when this did not apply, but the historical data is not -# # presently available. -# Zone Australia/Yancowinna 9:30 AY %sST -# ... -# Rule AY 1971 1985 - Oct lastSun 2:00 1:00 D -# Rule AY 1972 only - Feb lastSun 3:00 0 C -# [followed by other Rules] - -# Lord Howe Island - -# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06): -# LHI... [ Courtesy of Pauline Van Winsen ] -# [ Dec 1990 ] -# Lord Howe Island is located off the New South Wales coast, and is half an -# hour ahead of NSW time. - -# From James Lonergan, Secretary, Lord Howe Island Board (2000-01-27): -# Lord Howe Island summer time in 2000/2001 will commence on the same -# date as the rest of NSW (i.e. 2000-08-27). For your information the -# Lord Howe Island Board (controlling authority for the Island) is -# seeking the community's views on various options for summer time -# arrangements on the Island, e.g. advance clocks by 1 full hour -# instead of only 30 minutes. [Dependent] on the wishes of residents -# the Board may approach the NSW government to change the existing -# arrangements. The starting date for summer time on the Island will -# however always coincide with the rest of NSW. - -# From James Lonergan, Secretary, Lord Howe Island Board (2000-10-25): -# Lord Howe Island advances clocks by 30 minutes during DST in NSW and retards -# clocks by 30 minutes when DST finishes. Since DST was most recently -# introduced in NSW, the "changeover" time on the Island has been 02:00 as -# shown on clocks on LHI. I guess this means that for 30 minutes at the start -# of DST, LHI is actually 1 hour ahead of the rest of NSW. - -# From Paul Eggert (2006-03-22): -# For Lord Howe dates we use Shanks & Pottenger through 1989, and -# Lonergan thereafter. For times we use Lonergan. - -# From Paul Eggert (2007-07-23): -# See "southeast Australia" above for 2008 and later. - -# From Steffen Thorsen (2009-04-28): -# According to the official press release, South Australia's extended daylight -# saving period will continue with the same rules as used during the 2008-2009 -# summer (southern hemisphere). -# -# From -# http://www.safework.sa.gov.au/uploaded_files/DaylightDatesSet.pdf -# The extended daylight saving period that South Australia has been trialling -# for over the last year is now set to be ongoing. -# Daylight saving will continue to start on the first Sunday in October each -# year and finish on the first Sunday in April the following year. -# Industrial Relations Minister, Paul Caica, says this provides South Australia -# with a consistent half hour time difference with NSW, Victoria, Tasmania and -# the ACT for all 52 weeks of the year... -# -# We have a wrap-up here: -# http://www.timeanddate.com/news/time/south-australia-extends-dst.html -############################################################################### - -# New Zealand - -# From Mark Davies (1990-10-03): -# the 1989/90 year was a trial of an extended "daylight saving" period. -# This trial was deemed successful and the extended period adopted for -# subsequent years (with the addition of a further week at the start). -# source - phone call to Ministry of Internal Affairs Head Office. - -# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06): -# # The Country of New Zealand (Australia's east island -) Gee they hate that! -# # or is Australia the west island of N.Z. -# # [ courtesy of Geoff Tribble.. Auckland N.Z. ] -# # [ Nov 1990 ] -# ... -# Rule NZ 1974 1988 - Oct lastSun 2:00 1:00 D -# Rule NZ 1989 max - Oct Sun>=1 2:00 1:00 D -# Rule NZ 1975 1989 - Mar Sun>=1 3:00 0 S -# Rule NZ 1990 max - Mar lastSun 3:00 0 S -# ... -# Zone NZ 12:00 NZ NZ%sT # New Zealand -# Zone NZ-CHAT 12:45 - NZ-CHAT # Chatham Island - -# From Arthur David Olson (1992-03-08): -# The chosen rules use the Davies October 8 values for the start of DST in 1989 -# rather than the October 1 value. - -# From Paul Eggert (1995-12-19); -# Shank & Pottenger report 2:00 for all autumn changes in Australia and NZ. -# Robert Uzgalis writes that the New Zealand Daylight -# Savings Time Order in Council dated 1990-06-18 specifies 2:00 standard -# time on both the first Sunday in October and the third Sunday in March. -# As with Australia, we'll assume the tradition is 2:00s, not 2:00. -# -# From Paul Eggert (2006-03-22): -# The Department of Internal Affairs (DIA) maintains a brief history, -# as does Carol Squires; see tz-link.htm for the full references. -# Use these sources in preference to Shanks & Pottenger. -# -# For Chatham, IATA SSIM (1991/1999) gives the NZ rules but with -# transitions at 2:45 local standard time; this confirms that Chatham -# is always exactly 45 minutes ahead of Auckland. - -# From Colin Sharples (2007-04-30): -# DST will now start on the last Sunday in September, and end on the -# first Sunday in April. The changes take effect this year, meaning -# that DST will begin on 2007-09-30 2008-04-06. -# http://www.dia.govt.nz/diawebsite.nsf/wpg_URL/Services-Daylight-Saving-Daylight-saving-to-be-extended - -# From Paul Eggert (2014-07-14): -# Chatham Island time was formally standardized on 1957-01-01 by -# New Zealand's Standard Time Amendment Act 1956 (1956-10-26). -# http://www.austlii.edu.au/nz/legis/hist_act/staa19561956n100244.pdf -# According to Google Books snippet view, a speaker in the New Zealand -# parliamentary debates in 1956 said "Clause 78 makes provision for standard -# time in the Chatham Islands. The time there is 45 minutes in advance of New -# Zealand time. I understand that is the time they keep locally, anyhow." -# For now, assume this practice goes back to the introduction of standard time -# in New Zealand, as this would make Chatham Islands time almost exactly match -# LMT back when New Zealand was at UT +11:30; also, assume Chatham Islands did -# not observe New Zealand's prewar DST. - -############################################################################### - - -# Fiji - -# Howse writes (p 153) that in 1879 the British governor of Fiji -# enacted an ordinance standardizing the islands on Antipodean Time -# instead of the American system (which was one day behind). - -# From Rives McDow (1998-10-08): -# Fiji will introduce DST effective 0200 local time, 1998-11-01 -# until 0300 local time 1999-02-28. Each year the DST period will -# be from the first Sunday in November until the last Sunday in February. - -# From Paul Eggert (2000-01-08): -# IATA SSIM (1999-09) says DST ends 0100 local time. Go with McDow. - -# From the BBC World Service in -# http://news.bbc.co.uk/2/hi/asia-pacific/205226.stm (1998-10-31 16:03 UTC): -# The Fijian government says the main reasons for the time change is to -# improve productivity and reduce road accidents.... [T]he move is also -# intended to boost Fiji's ability to attract tourists to witness the dawning -# of the new millennium. - -# http://www.fiji.gov.fj/press/2000_09/2000_09_13-05.shtml (2000-09-13) -# reports that Fiji has discontinued DST. - - -# Kiribati - -# From Paul Eggert (1996-01-22): -# Today's _Wall Street Journal_ (page 1) reports that Kiribati -# "declared it the same day [throughout] the country as of Jan. 1, 1995" -# as part of the competition to be first into the 21st century. - - -# Kwajalein - -# In comp.risks 14.87 (26 August 1993), Peter Neumann writes: -# I wonder what happened in Kwajalein, where there was NO Friday, -# 1993-08-20. Thursday night at midnight Kwajalein switched sides with -# respect to the International Date Line, to rejoin its fellow islands, -# going from 11:59 p.m. Thursday to 12:00 m. Saturday in a blink. - - -# N Mariana Is, Guam - -# Howse writes (p 153) "The Spaniards, on the other hand, reached the -# Philippines and the Ladrones from America," and implies that the Ladrones -# (now called the Marianas) kept American date for quite some time. -# For now, we assume the Ladrones switched at the same time as the Philippines; -# see Asia/Manila. - -# US Public Law 106-564 (2000-12-23) made UT +10 the official standard time, -# under the name "Chamorro Standard Time". There is no official abbreviation, -# but Congressman Robert A. Underwood, author of the bill that became law, -# wrote in a press release (2000-12-27) that he will seek the use of "ChST". - - -# Micronesia - -# Alan Eugene Davis writes (1996-03-16), -# "I am certain, having lived there for the past decade, that 'Truk' -# (now properly known as Chuuk) ... is in the time zone GMT+10." -# -# Shanks & Pottenger write that Truk switched from UT +10 to +11 -# on 1978-10-01; ignore this for now. - -# From Paul Eggert (1999-10-29): -# The Federated States of Micronesia Visitors Board writes in -# The Federated States of Micronesia - Visitor Information (1999-01-26) -# http://www.fsmgov.org/info/clocks.html -# that Truk and Yap are UT +10, and Ponape and Kosrae are +11. -# We don't know when Kosrae switched from +12; assume January 1 for now. - - -# Midway - -# From Charles T O'Connor, KMTH DJ (1956), -# quoted in the KTMH section of the Radio Heritage Collection -# (2002-12-31): -# For the past two months we've been on what is known as Daylight -# Saving Time. This time has put us on air at 5am in the morning, -# your time down there in New Zealand. Starting September 2, 1956 -# we'll again go back to Standard Time. This'll mean that we'll go to -# air at 6am your time. -# -# From Paul Eggert (2003-03-23): -# We don't know the date of that quote, but we'll guess they -# started DST on June 3. Possibly DST was observed other years -# in Midway, but we have no record of it. - -# Norfolk - -# From Alexander Krivenyshev (2015-09-23): -# Norfolk Island will change ... from +1130 to +1100: -# https://www.comlaw.gov.au/Details/F2015L01483/Explanatory%20Statement/Text -# ... at 12.30 am (by legal time in New South Wales) on 4 October 2015. -# http://www.norfolkisland.gov.nf/nia/MediaRelease/Media%20Release%20Norfolk%20Island%20Standard%20Time%20Change.pdf - -# From Paul Eggert (2015-09-23): -# Transitions before 2015 are from timeanddate.com, which consulted -# the Norfolk Island Museum and the Australian Bureau of Meteorology's -# Norfolk Island station, and found no record of Norfolk observing DST -# other than in 1974/5. See: -# http://www.timeanddate.com/time/australia/norfolk-island.html - -# Pitcairn - -# From Rives McDow (1999-11-08): -# A Proclamation was signed by the Governor of Pitcairn on the 27th March 1998 -# with regard to Pitcairn Standard Time. The Proclamation is as follows. -# -# The local time for general purposes in the Islands shall be -# Co-ordinated Universal time minus 8 hours and shall be known -# as Pitcairn Standard Time. -# -# ... I have also seen Pitcairn listed as UTC minus 9 hours in several -# references, and can only assume that this was an error in interpretation -# somehow in light of this proclamation. - -# From Rives McDow (1999-11-09): -# The Proclamation regarding Pitcairn time came into effect on 27 April 1998 -# ... at midnight. - -# From Howie Phelps (1999-11-10), who talked to a Pitcairner via shortwave: -# Betty Christian told me yesterday that their local time is the same as -# Pacific Standard Time. They used to be 1/2 hour different from us here in -# Sacramento but it was changed a couple of years ago. - - -# (Western) Samoa and American Samoa - -# Howse writes (p 153, citing p 10 of the 1883-11-18 New York Herald) -# that in 1879 the King of Samoa decided to change -# "the date in his kingdom from the Antipodean to the American system, -# ordaining - by a masterpiece of diplomatic flattery - that -# the Fourth of July should be celebrated twice in that year." - -# Although Shanks & Pottenger says they both switched to UT -11:30 -# in 1911, and to -11 in 1950. many earlier sources give -11 -# for American Samoa, e.g., the US National Bureau of Standards -# circular "Standard Time Throughout the World", 1932. -# Assume American Samoa switched to -11 in 1911, not 1950, -# and that after 1950 they agreed until (western) Samoa skipped a -# day in 2011. Assume also that the Samoas follow the US and New -# Zealand's "ST"/"DT" style of daylight-saving abbreviations. - -# Tonga - -# From Paul Eggert (1996-01-22): -# Today's _Wall Street Journal_ (p 1) reports that "Tonga has been plotting -# to sneak ahead of [New Zealanders] by introducing daylight-saving time." -# Since Kiribati has moved the Date Line it's not clear what Tonga will do. - -# Don Mundell writes in the 1997-02-20 Tonga Chronicle -# How Tonga became 'The Land where Time Begins': -# http://www.tongatapu.net.to/tonga/homeland/timebegins.htm -# -# Until 1941 Tonga maintained a standard time 50 minutes ahead of NZST -# 12 hours and 20 minutes ahead of GMT. When New Zealand adjusted its -# standard time in 1940s, Tonga had the choice of subtracting from its -# local time to come on the same standard time as New Zealand or of -# advancing its time to maintain the differential of 13 degrees -# (approximately 50 minutes ahead of New Zealand time). -# -# Because His Majesty King Tāufaʻāhau Tupou IV, then Crown Prince -# Tungī, preferred to ensure Tonga's title as the land where time -# begins, the Legislative Assembly approved the latter change. -# -# But some of the older, more conservative members from the outer -# islands objected. "If at midnight on Dec. 31, we move ahead 40 -# minutes, as your Royal Highness wishes, what becomes of the 40 -# minutes we have lost?" -# -# The Crown Prince, presented an unanswerable argument: "Remember that -# on the World Day of Prayer, you would be the first people on Earth -# to say your prayers in the morning." - -# From Paul Eggert (2006-03-22): -# Shanks & Pottenger say the transition was on 1968-10-01; go with Mundell. - -# From Eric Ulevik (1999-05-03): -# Tonga's director of tourism, who is also secretary of the National Millennium -# Committee, has a plan to get Tonga back in front. -# He has proposed a one-off move to tropical daylight saving for Tonga from -# October to March, which has won approval in principle from the Tongan -# Government. - -# From Steffen Thorsen (1999-09-09): -# * Tonga will introduce DST in November -# -# I was given this link by John Letts: -# http://news.bbc.co.uk/hi/english/world/asia-pacific/newsid_424000/424764.stm -# -# I have not been able to find exact dates for the transition in November -# yet. By reading this article it seems like Fiji will be 14 hours ahead -# of UTC as well, but as far as I know Fiji will only be 13 hours ahead -# (12 + 1 hour DST). - -# From Arthur David Olson (1999-09-20): -# According to : -# "Daylight Savings Time will take effect on Oct. 2 through April 15, 2000 -# and annually thereafter from the first Saturday in October through the -# third Saturday of April. Under the system approved by Privy Council on -# Sept. 10, clocks must be turned ahead one hour on the opening day and -# set back an hour on the closing date." -# Alas, no indication of the time of day. - -# From Rives McDow (1999-10-06): -# Tonga started its Daylight Saving on Saturday morning October 2nd at 0200am. -# Daylight Saving ends on April 16 at 0300am which is Sunday morning. - -# From Steffen Thorsen (2000-10-31): -# Back in March I found a notice on the website http://www.tongaonline.com -# that Tonga changed back to standard time one month early, on March 19 -# instead of the original reported date April 16. Unfortunately, the article -# is no longer available on the site, and I did not make a copy of the -# text, and I have forgotten to report it here. -# (Original URL was ) - -# From Rives McDow (2000-12-01): -# Tonga is observing DST as of 2000-11-04 and will stop on 2001-01-27. - -# From Sione Moala-Mafi (2001-09-20) via Rives McDow: -# At 2:00am on the first Sunday of November, the standard time in the Kingdom -# shall be moved forward by one hour to 3:00am. At 2:00am on the last Sunday -# of January the standard time in the Kingdom shall be moved backward by one -# hour to 1:00am. - -# From Pulu ʻAnau (2002-11-05): -# The law was for 3 years, supposedly to get renewed. It wasn't. - -# From Pulu ʻAnau (2016-10-27): -# http://mic.gov.to/news-today/press-releases/6375-daylight-saving-set-to-run-from-6-november-2016-to-15-january-2017 -# Cannot find anyone who knows the rules, has seen the duration or has seen -# the cabinet decision, but it appears we are following Fiji's rule set. -# -# From Tim Parenti (2016-10-26): -# Assume Tonga will observe DST from the first Sunday in November at 02:00 -# through the third Sunday in January at 03:00, like Fiji, for now. - -# Wake - -# From Vernice Anderson, Personal Secretary to Philip Jessup, -# US Ambassador At Large (oral history interview, 1971-02-02): -# -# Saturday, the 14th [of October, 1950] - ... The time was all the -# more confusing at that point, because we had crossed the -# International Date Line, thus getting two Sundays. Furthermore, we -# discovered that Wake Island had two hours of daylight saving time -# making calculation of time in Washington difficult if not almost -# impossible. -# -# http://www.trumanlibrary.org/wake/meeting.htm - -# From Paul Eggert (2003-03-23): -# We have no other report of DST in Wake Island, so omit this info for now. - -############################################################################### - -# The International Date Line - -# From Gwillim Law (2000-01-03): -# -# The International Date Line is not defined by any international standard, -# convention, or treaty. Mapmakers are free to draw it as they please. -# Reputable mapmakers will simply ensure that every point of land appears on -# the correct side of the IDL, according to the date legally observed there. -# -# When Kiribati adopted a uniform date in 1995, thereby moving the Phoenix and -# Line Islands to the west side of the IDL (or, if you prefer, moving the IDL -# to the east side of the Phoenix and Line Islands), I suppose that most -# mapmakers redrew the IDL following the boundary of Kiribati. Even that line -# has a rather arbitrary nature. The straight-line boundaries between Pacific -# island nations that are shown on many maps are based on an international -# convention, but are not legally binding national borders.... The date is -# governed by the IDL; therefore, even on the high seas, there may be some -# places as late as fourteen hours later than UTC. And, since the IDL is not -# an international standard, there are some places on the high seas where the -# correct date is ambiguous. - -# From Wikipedia (2005-08-31): -# Before 1920, all ships kept local apparent time on the high seas by setting -# their clocks at night or at the morning sight so that, given the ship's -# speed and direction, it would be 12 o'clock when the Sun crossed the ship's -# meridian (12 o'clock = local apparent noon). During 1917, at the -# Anglo-French Conference on Time-keeping at Sea, it was recommended that all -# ships, both military and civilian, should adopt hourly standard time zones -# on the high seas. Whenever a ship was within the territorial waters of any -# nation it would use that nation's standard time. The captain was permitted -# to change his ship's clocks at a time of his choice following his ship's -# entry into another zone time - he often chose midnight. These zones were -# adopted by all major fleets between 1920 and 1925 but not by many -# independent merchant ships until World War II. - -# From Paul Eggert, using references suggested by Oscar van Vlijmen -# (2005-03-20): -# -# The American Practical Navigator (2002) -# http://pollux.nss.nima.mil/pubs/pubs_j_apn_sections.html?rid=187 -# talks only about the 180-degree meridian with respect to ships in -# international waters; it ignores the international date line. diff --git a/src/timezone/data/backward b/src/timezone/data/backward deleted file mode 100644 index 09f2a31b68..0000000000 --- a/src/timezone/data/backward +++ /dev/null @@ -1,126 +0,0 @@ -# This file is in the public domain, so clarified as of -# 2009-05-17 by Arthur David Olson. - -# This file provides links between current names for time zones -# and their old names. Many names changed in late 1993. - -# Link TARGET LINK-NAME -Link Africa/Nairobi Africa/Asmera -Link Africa/Abidjan Africa/Timbuktu -Link America/Argentina/Catamarca America/Argentina/ComodRivadavia -Link America/Adak America/Atka -Link America/Argentina/Buenos_Aires America/Buenos_Aires -Link America/Argentina/Catamarca America/Catamarca -Link America/Atikokan America/Coral_Harbour -Link America/Argentina/Cordoba America/Cordoba -Link America/Tijuana America/Ensenada -Link America/Indiana/Indianapolis America/Fort_Wayne -Link America/Indiana/Indianapolis America/Indianapolis -Link America/Argentina/Jujuy America/Jujuy -Link America/Indiana/Knox America/Knox_IN -Link America/Kentucky/Louisville America/Louisville -Link America/Argentina/Mendoza America/Mendoza -Link America/Toronto America/Montreal -Link America/Rio_Branco America/Porto_Acre -Link America/Argentina/Cordoba America/Rosario -Link America/Tijuana America/Santa_Isabel -Link America/Denver America/Shiprock -Link America/Port_of_Spain America/Virgin -Link Pacific/Auckland Antarctica/South_Pole -Link Asia/Ashgabat Asia/Ashkhabad -Link Asia/Kolkata Asia/Calcutta -Link Asia/Shanghai Asia/Chongqing -Link Asia/Shanghai Asia/Chungking -Link Asia/Dhaka Asia/Dacca -Link Asia/Shanghai Asia/Harbin -Link Asia/Urumqi Asia/Kashgar -Link Asia/Kathmandu Asia/Katmandu -Link Asia/Macau Asia/Macao -Link Asia/Yangon Asia/Rangoon -Link Asia/Ho_Chi_Minh Asia/Saigon -Link Asia/Jerusalem Asia/Tel_Aviv -Link Asia/Thimphu Asia/Thimbu -Link Asia/Makassar Asia/Ujung_Pandang -Link Asia/Ulaanbaatar Asia/Ulan_Bator -Link Atlantic/Faroe Atlantic/Faeroe -Link Europe/Oslo Atlantic/Jan_Mayen -Link Australia/Sydney Australia/ACT -Link Australia/Sydney Australia/Canberra -Link Australia/Lord_Howe Australia/LHI -Link Australia/Sydney Australia/NSW -Link Australia/Darwin Australia/North -Link Australia/Brisbane Australia/Queensland -Link Australia/Adelaide Australia/South -Link Australia/Hobart Australia/Tasmania -Link Australia/Melbourne Australia/Victoria -Link Australia/Perth Australia/West -Link Australia/Broken_Hill Australia/Yancowinna -Link America/Rio_Branco Brazil/Acre -Link America/Noronha Brazil/DeNoronha -Link America/Sao_Paulo Brazil/East -Link America/Manaus Brazil/West -Link America/Halifax Canada/Atlantic -Link America/Winnipeg Canada/Central -Link America/Regina Canada/East-Saskatchewan -Link America/Toronto Canada/Eastern -Link America/Edmonton Canada/Mountain -Link America/St_Johns Canada/Newfoundland -Link America/Vancouver Canada/Pacific -Link America/Regina Canada/Saskatchewan -Link America/Whitehorse Canada/Yukon -Link America/Santiago Chile/Continental -Link Pacific/Easter Chile/EasterIsland -Link America/Havana Cuba -Link Africa/Cairo Egypt -Link Europe/Dublin Eire -Link Europe/London Europe/Belfast -Link Europe/Chisinau Europe/Tiraspol -Link Europe/London GB -Link Europe/London GB-Eire -Link Etc/GMT GMT+0 -Link Etc/GMT GMT-0 -Link Etc/GMT GMT0 -Link Etc/GMT Greenwich -Link Asia/Hong_Kong Hongkong -Link Atlantic/Reykjavik Iceland -Link Asia/Tehran Iran -Link Asia/Jerusalem Israel -Link America/Jamaica Jamaica -Link Asia/Tokyo Japan -Link Pacific/Kwajalein Kwajalein -Link Africa/Tripoli Libya -Link America/Tijuana Mexico/BajaNorte -Link America/Mazatlan Mexico/BajaSur -Link America/Mexico_City Mexico/General -Link Pacific/Auckland NZ -Link Pacific/Chatham NZ-CHAT -Link America/Denver Navajo -Link Asia/Shanghai PRC -Link Pacific/Honolulu Pacific/Johnston -Link Pacific/Pohnpei Pacific/Ponape -Link Pacific/Pago_Pago Pacific/Samoa -Link Pacific/Chuuk Pacific/Truk -Link Pacific/Chuuk Pacific/Yap -Link Europe/Warsaw Poland -Link Europe/Lisbon Portugal -Link Asia/Taipei ROC -Link Asia/Seoul ROK -Link Asia/Singapore Singapore -Link Europe/Istanbul Turkey -Link Etc/UCT UCT -Link America/Anchorage US/Alaska -Link America/Adak US/Aleutian -Link America/Phoenix US/Arizona -Link America/Chicago US/Central -Link America/Indiana/Indianapolis US/East-Indiana -Link America/New_York US/Eastern -Link Pacific/Honolulu US/Hawaii -Link America/Indiana/Knox US/Indiana-Starke -Link America/Detroit US/Michigan -Link America/Denver US/Mountain -Link America/Los_Angeles US/Pacific -Link Pacific/Pago_Pago US/Samoa -Link Etc/UTC UTC -Link Etc/UTC Universal -Link Europe/Moscow W-SU -Link Etc/UTC Zulu diff --git a/src/timezone/data/backzone b/src/timezone/data/backzone deleted file mode 100644 index 9ce78316c2..0000000000 --- a/src/timezone/data/backzone +++ /dev/null @@ -1,675 +0,0 @@ -# Zones that go back beyond the scope of the tz database - -# This file is in the public domain. - -# This file is by no means authoritative; if you think you know -# better, go ahead and edit it (and please send any changes to -# tz@iana.org for general use in the future). For more, please see -# the file CONTRIBUTING in the tz distribution. - - -# From Paul Eggert (2014-10-31): - -# This file contains data outside the normal scope of the tz database, -# in that its zones do not differ from normal tz zones after 1970. -# Links in this file point to zones in this file, superseding links in -# the file 'backward'. - -# Although zones in this file may be of some use for analyzing -# pre-1970 time stamps, they are less reliable, cover only a tiny -# sliver of the pre-1970 era, and cannot feasibly be improved to cover -# most of the era. Because the zones are out of normal scope for the -# database, less effort is put into maintaining this file. Many of -# the zones were formerly in other source files, but were removed or -# replaced by links as their data entries were questionable and/or they -# differed from other zones only in pre-1970 time stamps. - -# Unless otherwise specified, the source for data through 1990 is: -# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition), -# San Diego: ACS Publications, Inc. (2003). -# Unfortunately this book contains many errors and cites no sources. - -# This file is not intended to be compiled standalone, as it -# assumes rules from other files. In the tz distribution, use -# 'make PACKRATDATA=backzone zones' to compile and install this file. - -# Zones are sorted by zone name. Each zone is preceded by the -# name of the country that the zone is in, along with any other -# commentary and rules associated with the entry. -# -# As explained in the zic man page, the zone columns are: -# Zone NAME GMTOFF RULES FORMAT [UNTIL] - -# Ethiopia -# From Paul Eggert (2014-07-31): -# Like the Swahili of Kenya and Tanzania, many Ethiopians keep a -# 12-hour clock starting at our 06:00, so their "8 o'clock" is our -# 02:00 or 14:00. Keep this in mind when you ask the time in Amharic. -# -# Shanks & Pottenger write that Ethiopia had six narrowly-spaced time -# zones between 1870 and 1890, that they merged to 38E50 (2:35:20) in -# 1890, and that they switched to 3:00 on 1936-05-05. Perhaps 38E50 -# was for Adis Dera. Quite likely the Shanks data entries are wrong -# anyway. -Zone Africa/Addis_Ababa 2:34:48 - LMT 1870 - 2:35:20 - ADMT 1936 May 5 # Adis Dera MT - 3:00 - EAT - -# Eritrea -Zone Africa/Asmara 2:35:32 - LMT 1870 - 2:35:32 - AMT 1890 # Asmara Mean Time - 2:35:20 - ADMT 1936 May 5 # Adis Dera MT - 3:00 - EAT -Link Africa/Asmara Africa/Asmera - -# Mali (southern) -Zone Africa/Bamako -0:32:00 - LMT 1912 - 0:00 - GMT 1934 Feb 26 - -1:00 - -01 1960 Jun 20 - 0:00 - GMT - -# Central African Republic -Zone Africa/Bangui 1:14:20 - LMT 1912 - 1:00 - WAT - -# Gambia -Zone Africa/Banjul -1:06:36 - LMT 1912 - -1:06:36 - BMT 1935 # Banjul Mean Time - -1:00 - -01 1964 - 0:00 - GMT - -# Malawi -Zone Africa/Blantyre 2:20:00 - LMT 1903 Mar - 2:00 - CAT - -# Republic of the Congo -Zone Africa/Brazzaville 1:01:08 - LMT 1912 - 1:00 - WAT - -# Burundi -Zone Africa/Bujumbura 1:57:28 - LMT 1890 - 2:00 - CAT - -# Guinea -Zone Africa/Conakry -0:54:52 - LMT 1912 - 0:00 - GMT 1934 Feb 26 - -1:00 - -01 1960 - 0:00 - GMT - -# Senegal -Zone Africa/Dakar -1:09:44 - LMT 1912 - -1:00 - -01 1941 Jun - 0:00 - GMT - -# Tanzania -Zone Africa/Dar_es_Salaam 2:37:08 - LMT 1931 - 3:00 - EAT 1948 - 2:45 - +0245 1961 - 3:00 - EAT - -# Djibouti -Zone Africa/Djibouti 2:52:36 - LMT 1911 Jul - 3:00 - EAT - -# Cameroon -# Whitman says they switched to 1:00 in 1920; go with Shanks & Pottenger. -Zone Africa/Douala 0:38:48 - LMT 1912 - 1:00 - WAT -# Sierra Leone -# From Paul Eggert (2014-08-12): -# The following table is from Shanks & Pottenger, but it can't be right. -# Whitman gives Mar 31 - Aug 31 for 1931 on. -# The International Hydrographic Bulletin, 1932-33, p 63 says that -# Sierra Leone would advance its clocks by 20 minutes on 1933-10-01. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule SL 1935 1942 - Jun 1 0:00 0:40 -0020 -Rule SL 1935 1942 - Oct 1 0:00 0 -01 -Rule SL 1957 1962 - Jun 1 0:00 1:00 +01 -Rule SL 1957 1962 - Sep 1 0:00 0 GMT -Zone Africa/Freetown -0:53:00 - LMT 1882 - -0:53:00 - FMT 1913 Jun # Freetown Mean Time - -1:00 SL %s 1957 - 0:00 SL GMT/+01 - -# Botswana -# From Paul Eggert (2013-02-21): -# Milne says they were regulated by the Cape Town Signal in 1899; -# assume they switched to 2:00 when Cape Town did. -Zone Africa/Gaborone 1:43:40 - LMT 1885 - 1:30 - SAST 1903 Mar - 2:00 - CAT 1943 Sep 19 2:00 - 2:00 1:00 CAST 1944 Mar 19 2:00 - 2:00 - CAT - -# Zimbabwe -Zone Africa/Harare 2:04:12 - LMT 1903 Mar - 2:00 - CAT - -# South Sudan -Zone Africa/Juba 2:06:24 - LMT 1931 - 2:00 Sudan CA%sT 2000 Jan 15 12:00 - 3:00 - EAT - -# Uganda -Zone Africa/Kampala 2:09:40 - LMT 1928 Jul - 3:00 - EAT 1930 - 2:30 - +0230 1948 - 2:45 - +0245 1957 - 3:00 - EAT - -# Rwanda -Zone Africa/Kigali 2:00:16 - LMT 1935 Jun - 2:00 - CAT - -# Democratic Republic of the Congo (west) -Zone Africa/Kinshasa 1:01:12 - LMT 1897 Nov 9 - 1:00 - WAT - -# Gabon -Zone Africa/Libreville 0:37:48 - LMT 1912 - 1:00 - WAT - -# Togo -Zone Africa/Lome 0:04:52 - LMT 1893 - 0:00 - GMT - -# Angola -# -# Shanks gives 1911-05-26 for the transition to WAT, -# evidently confusing the date of the Portuguese decree -# https://dre.pt/pdf1sdip/1911/05/12500/23132313.pdf -# with the date that it took effect, namely 1912-01-01. -# -Zone Africa/Luanda 0:52:56 - LMT 1892 - 0:52:04 - +005204 1912 Jan 1 - 1:00 - WAT - -# Democratic Republic of the Congo (east) -Zone Africa/Lubumbashi 1:49:52 - LMT 1897 Nov 9 - 2:00 - CAT - -# Zambia -Zone Africa/Lusaka 1:53:08 - LMT 1903 Mar - 2:00 - CAT - -# Equatorial Guinea -# -# Although Shanks says that Malabo switched from UT +00 to +01 on 1963-12-15, -# a Google Books search says that London Calling, Issues 432-465 (1948), p 19, -# says that Spanish Guinea was at +01 back then. The Shanks data entries -# are most likely wrong, but we have nothing better; use them here for now. -# -Zone Africa/Malabo 0:35:08 - LMT 1912 - 0:00 - GMT 1963 Dec 15 - 1:00 - WAT - -# Lesotho -Zone Africa/Maseru 1:50:00 - LMT 1903 Mar - 2:00 - SAST 1943 Sep 19 2:00 - 2:00 1:00 SAST 1944 Mar 19 2:00 - 2:00 - SAST - -# Swaziland -Zone Africa/Mbabane 2:04:24 - LMT 1903 Mar - 2:00 - SAST - -# Somalia -Zone Africa/Mogadishu 3:01:28 - LMT 1893 Nov - 3:00 - EAT 1931 - 2:30 - +0230 1957 - 3:00 - EAT - -# Niger -Zone Africa/Niamey 0:08:28 - LMT 1912 - -1:00 - -01 1934 Feb 26 - 0:00 - GMT 1960 - 1:00 - WAT - -# Mauritania -Zone Africa/Nouakchott -1:03:48 - LMT 1912 - 0:00 - GMT 1934 Feb 26 - -1:00 - -01 1960 Nov 28 - 0:00 - GMT - -# Burkina Faso -Zone Africa/Ouagadougou -0:06:04 - LMT 1912 - 0:00 - GMT - -# Benin -# Whitman says they switched to 1:00 in 1946, not 1934; -# go with Shanks & Pottenger. -Zone Africa/Porto-Novo 0:10:28 - LMT 1912 Jan 1 - 0:00 - GMT 1934 Feb 26 - 1:00 - WAT - -# São Tomé and Príncipe -Zone Africa/Sao_Tome 0:26:56 - LMT 1884 - -0:36:32 - LMT 1912 # Lisbon Mean Time - 0:00 - GMT - -# Mali (northern) -Zone Africa/Timbuktu -0:12:04 - LMT 1912 - 0:00 - GMT - -# Anguilla -Zone America/Anguilla -4:12:16 - LMT 1912 Mar 2 - -4:00 - AST - -# Antigua and Barbuda -Zone America/Antigua -4:07:12 - LMT 1912 Mar 2 - -5:00 - EST 1951 - -4:00 - AST - -# Chubut, Argentina -# The name "Comodoro Rivadavia" exceeds the 14-byte POSIX limit. -Zone America/Argentina/ComodRivadavia -4:30:00 - LMT 1894 Oct 31 - -4:16:48 - CMT 1920 May - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1991 Mar 3 - -4:00 - -04 1991 Oct 20 - -3:00 Arg -03/-02 1999 Oct 3 - -4:00 Arg -04/-03 2000 Mar 3 - -3:00 - -03 2004 Jun 1 - -4:00 - -04 2004 Jun 20 - -3:00 - -03 - -# Aruba -Zone America/Aruba -4:40:24 - LMT 1912 Feb 12 # Oranjestad - -4:30 - -0430 1965 - -4:00 - AST - -# Cayman Is -Zone America/Cayman -5:25:32 - LMT 1890 # Georgetown - -5:07:11 - KMT 1912 Feb # Kingston Mean Time - -5:00 - EST - -# Canada -Zone America/Coral_Harbour -5:32:40 - LMT 1884 - -5:00 NT_YK E%sT 1946 - -5:00 - EST - -# Dominica -Zone America/Dominica -4:05:36 - LMT 1911 Jul 1 0:01 # Roseau - -4:00 - AST - -# Baja California -# See 'northamerica' for why this entry is here rather than there. -Zone America/Ensenada -7:46:28 - LMT 1922 Jan 1 0:13:32 - -8:00 - PST 1927 Jun 10 23:00 - -7:00 - MST 1930 Nov 16 - -8:00 - PST 1942 Apr - -7:00 - MST 1949 Jan 14 - -8:00 - PST 1996 - -8:00 Mexico P%sT - -# Grenada -Zone America/Grenada -4:07:00 - LMT 1911 Jul # St George's - -4:00 - AST - -# Guadeloupe -Zone America/Guadeloupe -4:06:08 - LMT 1911 Jun 8 # Pointe-à-Pitre - -4:00 - AST - -# Canada -# -# From Paul Eggert (2015-03-24): -# Since 1970 most of Quebec has been like Toronto; see -# America/Toronto. However, earlier versions of the tz database -# mistakenly relied on data from Shanks & Pottenger saying that Quebec -# differed from Ontario after 1970, and the following rules and zone -# were created for most of Quebec from the incorrect Shanks & -# Pottenger data. The post-1970 entries have been corrected, but the -# pre-1970 entries are unchecked and probably have errors. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Mont 1917 only - Mar 25 2:00 1:00 D -Rule Mont 1917 only - Apr 24 0:00 0 S -Rule Mont 1919 only - Mar 31 2:30 1:00 D -Rule Mont 1919 only - Oct 25 2:30 0 S -Rule Mont 1920 only - May 2 2:30 1:00 D -Rule Mont 1920 1922 - Oct Sun>=1 2:30 0 S -Rule Mont 1921 only - May 1 2:00 1:00 D -Rule Mont 1922 only - Apr 30 2:00 1:00 D -Rule Mont 1924 only - May 17 2:00 1:00 D -Rule Mont 1924 1926 - Sep lastSun 2:30 0 S -Rule Mont 1925 1926 - May Sun>=1 2:00 1:00 D -Rule Mont 1927 1937 - Apr lastSat 24:00 1:00 D -Rule Mont 1927 1937 - Sep lastSat 24:00 0 S -Rule Mont 1938 1940 - Apr lastSun 0:00 1:00 D -Rule Mont 1938 1939 - Sep lastSun 0:00 0 S -Rule Mont 1946 1973 - Apr lastSun 2:00 1:00 D -Rule Mont 1945 1948 - Sep lastSun 2:00 0 S -Rule Mont 1949 1950 - Oct lastSun 2:00 0 S -Rule Mont 1951 1956 - Sep lastSun 2:00 0 S -Rule Mont 1957 1973 - Oct lastSun 2:00 0 S -Zone America/Montreal -4:54:16 - LMT 1884 - -5:00 Mont E%sT 1918 - -5:00 Canada E%sT 1919 - -5:00 Mont E%sT 1942 Feb 9 2:00s - -5:00 Canada E%sT 1946 - -5:00 Mont E%sT 1974 - -5:00 Canada E%sT - -# Montserrat -# From Paul Eggert (2006-03-22): -# In 1995 volcanic eruptions forced evacuation of Plymouth, the capital. -# world.gazetteer.com says Cork Hill is the most populous location now. -Zone America/Montserrat -4:08:52 - LMT 1911 Jul 1 0:01 # Cork Hill - -4:00 - AST - -# Argentina -# This entry was intended for the following areas, but has been superseded by -# more detailed zones. -# Santa Fe (SF), Entre Ríos (ER), Corrientes (CN), Misiones (MN), Chaco (CC), -# Formosa (FM), La Pampa (LP), Chubut (CH) -Zone America/Rosario -4:02:40 - LMT 1894 Nov - -4:16:44 - CMT 1920 May - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1991 Jul - -3:00 - -03 1999 Oct 3 0:00 - -4:00 Arg -04/-03 2000 Mar 3 0:00 - -3:00 - -03 - -# St Kitts-Nevis -Zone America/St_Kitts -4:10:52 - LMT 1912 Mar 2 # Basseterre - -4:00 - AST - -# St Lucia -Zone America/St_Lucia -4:04:00 - LMT 1890 # Castries - -4:04:00 - CMT 1912 # Castries Mean Time - -4:00 - AST - -# Virgin Is -Zone America/St_Thomas -4:19:44 - LMT 1911 Jul # Charlotte Amalie - -4:00 - AST - -# St Vincent and the Grenadines -Zone America/St_Vincent -4:04:56 - LMT 1890 # Kingstown - -4:04:56 - KMT 1912 # Kingstown Mean Time - -4:00 - AST - -# British Virgin Is -Zone America/Tortola -4:18:28 - LMT 1911 Jul # Road Town - -4:00 - AST - -# McMurdo, Ross Island, since 1955-12 -Zone Antarctica/McMurdo 0 - -00 1956 - 12:00 NZ NZ%sT -Link Antarctica/McMurdo Antarctica/South_Pole - -# Yemen -# Milne says 2:59:54 was the meridian of the saluting battery at Aden, -# and that Yemen was at 1:55:56, the meridian of the Hagia Sophia. -Zone Asia/Aden 2:59:54 - LMT 1950 - 3:00 - +03 - -# Bahrain -Zone Asia/Bahrain 3:22:20 - LMT 1920 # Manamah - 4:00 - +04 1972 Jun - 3:00 - +03 - -# India -# -# From Paul Eggert (2014-09-06): -# The 1876 Report of the Secretary of the [US] Navy, p 305 says that Madras -# civil time was 5:20:57.3. -# -# From Paul Eggert (2014-08-21): -# In tomorrow's The Hindu, Nitya Menon reports that India had two civil time -# zones starting in 1884, one in Bombay and one in Calcutta, and that railways -# used a third time zone based on Madras time (80 deg. 18'30" E). Also, -# in 1881 Bombay briefly switched to Madras time, but switched back. See: -# http://www.thehindu.com/news/cities/chennai/madras-375-when-madras-clocked-the-time/article6339393.ece -#Zone Asia/Chennai [not enough info to complete] - -# China -# Long-shu Time (probably due to Long and Shu being two names of that area) -# Guangxi, Guizhou, Hainan, Ningxia, Sichuan, Shaanxi, and Yunnan; -# most of Gansu; west Inner Mongolia; west Qinghai; and the Guangdong -# counties Deqing, Enping, Kaiping, Luoding, Taishan, Xinxing, -# Yangchun, Yangjiang, Yu'nan, and Yunfu. -Zone Asia/Chongqing 7:06:20 - LMT 1928 # or Chungking - 7:00 - +07 1980 May - 8:00 PRC C%sT -Link Asia/Chongqing Asia/Chungking - -# Vietnam -# From Paul Eggert (2014-10-13): -# See Asia/Ho_Chi_Minh for the source for this data. -# Trần's book says the 1954-55 transition to 07:00 in Hanoi was in -# October 1954, with exact date and time unspecified. -Zone Asia/Hanoi 7:03:24 - LMT 1906 Jul 1 - 7:06:30 - PLMT 1911 May 1 - 7:00 - +07 1942 Dec 31 23:00 - 8:00 - +08 1945 Mar 14 23:00 - 9:00 - +09 1945 Sep 2 - 7:00 - +07 1947 Apr 1 - 8:00 - +08 1954 Oct - 7:00 - +07 - -# China -# Changbai Time ("Long-white Time", Long-white = Heilongjiang area) -# Heilongjiang (except Mohe county), Jilin -Zone Asia/Harbin 8:26:44 - LMT 1928 # or Haerbin - 8:30 - +0830 1932 Mar - 8:00 - CST 1940 - 9:00 - +09 1966 May - 8:30 - +0830 1980 May - 8:00 PRC C%sT - -# far west China -Zone Asia/Kashgar 5:03:56 - LMT 1928 # or Kashi or Kaxgar - 5:30 - +0530 1940 - 5:00 - +05 1980 May - 8:00 PRC C%sT - -# Kuwait -Zone Asia/Kuwait 3:11:56 - LMT 1950 - 3:00 - +03 - - -# Oman -# Milne says 3:54:24 was the meridian of the Muscat Tidal Observatory. -Zone Asia/Muscat 3:54:24 - LMT 1920 - 4:00 - +04 - -# India -# From Paul Eggert (2014-08-11), after a heads-up from Stephen Colebourne: -# According to a Portuguese decree (1911-05-26) -# https://dre.pt/pdf1sdip/1911/05/12500/23132313.pdf -# Portuguese India switched to UT +05 on 1912-01-01. -#Zone Asia/Panaji [not enough info to complete] - -# Cambodia -# From Paul Eggert (2014-10-11): -# See Asia/Ho_Chi_Minh for the source for most of this data. Also, guess -# (1) Cambodia reverted to UT +07 on 1945-09-02, when Vietnam did, and -# (2) they also reverted to +07 on 1953-11-09, the date of independence. -# These guesses are probably wrong but they're better than guessing no -# transitions there. -Zone Asia/Phnom_Penh 6:59:40 - LMT 1906 Jul 1 - 7:06:30 - PLMT 1911 May 1 - 7:00 - +07 1942 Dec 31 23:00 - 8:00 - +08 1945 Mar 14 23:00 - 9:00 - +09 1945 Sep 2 - 7:00 - +07 1947 Apr 1 - 8:00 - +08 1953 Nov 9 - 7:00 - +07 - -# Israel -Zone Asia/Tel_Aviv 2:19:04 - LMT 1880 - 2:21 - JMT 1918 - 2:00 Zion I%sT - -# Laos -# From Paul Eggert (2014-10-11): -# See Asia/Ho_Chi_Minh for the source for most of this data. -# Trần's book says that Laos reverted to UT +07 on 1955-04-15. -# Also, guess that Laos reverted to +07 on 1945-09-02, when Vietnam did; -# this is probably wrong but it's better than guessing no transition. -Zone Asia/Vientiane 6:50:24 - LMT 1906 Jul 1 - 7:06:30 - PLMT 1911 May 1 - 7:00 - +07 1942 Dec 31 23:00 - 8:00 - +08 1945 Mar 14 23:00 - 9:00 - +09 1945 Sep 2 - 7:00 - +07 1947 Apr 1 - 8:00 - +08 1955 Apr 15 - 7:00 - +07 - -# Jan Mayen -# From Whitman: -Zone Atlantic/Jan_Mayen -1:00 - -01 - -# St Helena -Zone Atlantic/St_Helena -0:22:48 - LMT 1890 # Jamestown - -0:22:48 - JMT 1951 # Jamestown Mean Time - 0:00 - GMT - -# Northern Ireland -Zone Europe/Belfast -0:23:40 - LMT 1880 Aug 2 - -0:25:21 - DMT 1916 May 21 2:00 - # DMT = Dublin/Dunsink MT - -0:25:21 1:00 IST 1916 Oct 1 2:00s - # IST = Irish Summer Time - 0:00 GB-Eire %s 1968 Oct 27 - 1:00 - BST 1971 Oct 31 2:00u - 0:00 GB-Eire %s 1996 - 0:00 EU GMT/BST - -# Guernsey -# Data from Joseph S. Myers -# http://mm.icann.org/pipermail/tz/2013-September/019883.html -# References to be added -# LMT Location - 49.27N -2.33E - St.Peter Port -Zone Europe/Guernsey -0:09:19 - LMT 1913 Jun 18 - 0:00 GB-Eire %s 1940 Jul 2 - 1:00 C-Eur CE%sT 1945 May 8 - 0:00 GB-Eire %s 1968 Oct 27 - 1:00 - BST 1971 Oct 31 2:00u - 0:00 GB-Eire %s 1996 - 0:00 EU GMT/BST - -# Isle of Man -# -# From Lester Caine (2013-09-04): -# The Isle of Man legislation is now on-line at -# , starting with the original Statutory -# Time Act in 1883 and including additional confirmation of some of -# the dates of the 'Summer Time' orders originating at -# Westminster. There is a little uncertainty as to the starting date -# of the first summer time in 1916 which may have be announced a -# couple of days late. There is still a substantial number of -# documents to work through, but it is thought that every GB change -# was also implemented on the island. -# -# AT4 of 1883 - The Statutory Time et cetera Act 1883 - -# LMT Location - 54.1508N -4.4814E - Tynwald Hill ( Manx parliament ) -Zone Europe/Isle_of_Man -0:17:55 - LMT 1883 Mar 30 0:00s - 0:00 GB-Eire %s 1968 Oct 27 - 1:00 - BST 1971 Oct 31 2:00u - 0:00 GB-Eire %s 1996 - 0:00 EU GMT/BST - -# Jersey -# Data from Joseph S. Myers -# http://mm.icann.org/pipermail/tz/2013-September/019883.html -# References to be added -# LMT Location - 49.187N -2.107E - St. Helier -Zone Europe/Jersey -0:08:25 - LMT 1898 Jun 11 16:00u - 0:00 GB-Eire %s 1940 Jul 2 - 1:00 C-Eur CE%sT 1945 May 8 - 0:00 GB-Eire %s 1968 Oct 27 - 1:00 - BST 1971 Oct 31 2:00u - 0:00 GB-Eire %s 1996 - 0:00 EU GMT/BST - -# Slovenia -Zone Europe/Ljubljana 0:58:04 - LMT 1884 - 1:00 - CET 1941 Apr 18 23:00 - 1:00 C-Eur CE%sT 1945 May 8 2:00s - 1:00 1:00 CEST 1945 Sep 16 2:00s - 1:00 - CET 1982 Nov 27 - 1:00 EU CE%sT - -# Bosnia and Herzegovina -Zone Europe/Sarajevo 1:13:40 - LMT 1884 - 1:00 - CET 1941 Apr 18 23:00 - 1:00 C-Eur CE%sT 1945 May 8 2:00s - 1:00 1:00 CEST 1945 Sep 16 2:00s - 1:00 - CET 1982 Nov 27 - 1:00 EU CE%sT - -# Macedonia -Zone Europe/Skopje 1:25:44 - LMT 1884 - 1:00 - CET 1941 Apr 18 23:00 - 1:00 C-Eur CE%sT 1945 May 8 2:00s - 1:00 1:00 CEST 1945 Sep 16 2:00s - 1:00 - CET 1982 Nov 27 - 1:00 EU CE%sT - -# Moldova / Transnistria -Zone Europe/Tiraspol 1:58:32 - LMT 1880 - 1:55 - CMT 1918 Feb 15 # Chisinau MT - 1:44:24 - BMT 1931 Jul 24 # Bucharest MT - 2:00 Romania EE%sT 1940 Aug 15 - 2:00 1:00 EEST 1941 Jul 17 - 1:00 C-Eur CE%sT 1944 Aug 24 - 3:00 Russia MSK/MSD 1991 Mar 31 2:00 - 2:00 Russia EE%sT 1992 Jan 19 2:00 - 3:00 Russia MSK/MSD - -# Liechtenstein -Zone Europe/Vaduz 0:38:04 - LMT 1894 Jun - 1:00 - CET 1981 - 1:00 EU CE%sT - -# Croatia -Zone Europe/Zagreb 1:03:52 - LMT 1884 - 1:00 - CET 1941 Apr 18 23:00 - 1:00 C-Eur CE%sT 1945 May 8 2:00s - 1:00 1:00 CEST 1945 Sep 16 2:00s - 1:00 - CET 1982 Nov 27 - 1:00 EU CE%sT - -# Madagascar -Zone Indian/Antananarivo 3:10:04 - LMT 1911 Jul - 3:00 - EAT 1954 Feb 27 23:00s - 3:00 1:00 EAST 1954 May 29 23:00s - 3:00 - EAT - -# Comoros -Zone Indian/Comoro 2:53:04 - LMT 1911 Jul # Moroni, Gran Comoro - 3:00 - EAT - -# Mayotte -Zone Indian/Mayotte 3:00:56 - LMT 1911 Jul # Mamoutzou - 3:00 - EAT - -# US minor outlying islands -Zone Pacific/Johnston -10:00 - HST - -# US minor outlying islands -# -# From Mark Brader (2005-01-23): -# [Fallacies and Fantasies of Air Transport History, by R.E.G. Davies, -# published 1994 by Paladwr Press, McLean, VA, USA; ISBN 0-9626483-5-3] -# reproduced a Pan American Airways timetable from 1936, for their weekly -# "Orient Express" flights between San Francisco and Manila, and connecting -# flights to Chicago and the US East Coast. As it uses some time zone -# designations that I've never seen before:.... -# Fri. 6:30A Lv. HONOLOLU (Pearl Harbor), H.I. H.L.T. Ar. 5:30P Sun. -# " 3:00P Ar. MIDWAY ISLAND . . . . . . . . . M.L.T. Lv. 6:00A " -# -Zone Pacific/Midway -11:49:28 - LMT 1901 - -11:00 - -11 1956 Jun 3 - -11:00 1:00 -10 1956 Sep 2 - -11:00 - -11 - -# N Mariana Is -Zone Pacific/Saipan -14:17:00 - LMT 1844 Dec 31 - 9:43:00 - LMT 1901 - 9:00 - +09 1969 Oct - 10:00 - +10 2000 Dec 23 - 10:00 - ChST # Chamorro Standard Time diff --git a/src/timezone/data/etcetera b/src/timezone/data/etcetera deleted file mode 100644 index f5fa4c94b4..0000000000 --- a/src/timezone/data/etcetera +++ /dev/null @@ -1,78 +0,0 @@ -# This file is in the public domain, so clarified as of -# 2009-05-17 by Arthur David Olson. - -# These entries are mostly present for historical reasons, so that -# people in areas not otherwise covered by the tz files could "zic -l" -# to a time zone that was right for their area. These days, the -# tz files cover almost all the inhabited world, and the only practical -# need now for the entries that are not on UTC are for ships at sea -# that cannot use POSIX TZ settings. - -# Starting with POSIX 1003.1-2001, the entries below are all -# unnecessary as settings for the TZ environment variable. E.g., -# instead of TZ='Etc/GMT+4' one can use the POSIX setting TZ='<-04>+4'. -# -# Do not use a POSIX TZ setting like TZ='GMT+4', which is four hours -# behind GMT but uses the completely misleading abbreviation "GMT". - -Zone Etc/GMT 0 - GMT -Zone Etc/UTC 0 - UTC -Zone Etc/UCT 0 - UCT - -# The following link uses older naming conventions, -# but it belongs here, not in the file 'backward', -# as functions like gmtime load the "GMT" file to handle leap seconds properly. -# We want this to work even on installations that omit the other older names. -Link Etc/GMT GMT - -Link Etc/UTC Etc/Universal -Link Etc/UTC Etc/Zulu - -Link Etc/GMT Etc/Greenwich -Link Etc/GMT Etc/GMT-0 -Link Etc/GMT Etc/GMT+0 -Link Etc/GMT Etc/GMT0 - -# Be consistent with POSIX TZ settings in the Zone names, -# even though this is the opposite of what many people expect. -# POSIX has positive signs west of Greenwich, but many people expect -# positive signs east of Greenwich. For example, TZ='Etc/GMT+4' uses -# the abbreviation "-04" and corresponds to 4 hours behind UT -# (i.e. west of Greenwich) even though many people would expect it to -# mean 4 hours ahead of UT (i.e. east of Greenwich). - -# Earlier incarnations of this package were not POSIX-compliant, -# and had lines such as -# Zone GMT-12 -12 - GMT-1200 -# We did not want things to change quietly if someone accustomed to the old -# way does a -# zic -l GMT-12 -# so we moved the names into the Etc subdirectory. -# Also, the time zone abbreviations are now compatible with %z. - -Zone Etc/GMT-14 14 - +14 -Zone Etc/GMT-13 13 - +13 -Zone Etc/GMT-12 12 - +12 -Zone Etc/GMT-11 11 - +11 -Zone Etc/GMT-10 10 - +10 -Zone Etc/GMT-9 9 - +09 -Zone Etc/GMT-8 8 - +08 -Zone Etc/GMT-7 7 - +07 -Zone Etc/GMT-6 6 - +06 -Zone Etc/GMT-5 5 - +05 -Zone Etc/GMT-4 4 - +04 -Zone Etc/GMT-3 3 - +03 -Zone Etc/GMT-2 2 - +02 -Zone Etc/GMT-1 1 - +01 -Zone Etc/GMT+1 -1 - -01 -Zone Etc/GMT+2 -2 - -02 -Zone Etc/GMT+3 -3 - -03 -Zone Etc/GMT+4 -4 - -04 -Zone Etc/GMT+5 -5 - -05 -Zone Etc/GMT+6 -6 - -06 -Zone Etc/GMT+7 -7 - -07 -Zone Etc/GMT+8 -8 - -08 -Zone Etc/GMT+9 -9 - -09 -Zone Etc/GMT+10 -10 - -10 -Zone Etc/GMT+11 -11 - -11 -Zone Etc/GMT+12 -12 - -12 diff --git a/src/timezone/data/europe b/src/timezone/data/europe deleted file mode 100644 index 558b9f168f..0000000000 --- a/src/timezone/data/europe +++ /dev/null @@ -1,3839 +0,0 @@ -# This file is in the public domain, so clarified as of -# 2009-05-17 by Arthur David Olson. - -# This file is by no means authoritative; if you think you know better, -# go ahead and edit the file (and please send any changes to -# tz@iana.org for general use in the future). For more, please see -# the file CONTRIBUTING in the tz distribution. - -# From Paul Eggert (2017-02-10): -# -# Unless otherwise specified, the source for data through 1990 is: -# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition), -# San Diego: ACS Publications, Inc. (2003). -# Unfortunately this book contains many errors and cites no sources. -# -# Many years ago Gwillim Law wrote that a good source -# for time zone data was the International Air Transport -# Association's Standard Schedules Information Manual (IATA SSIM), -# published semiannually. Law sent in several helpful summaries -# of the IATA's data after 1990. Except where otherwise noted, -# IATA SSIM is the source for entries after 1990. -# -# A reliable and entertaining source about time zones is -# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997). -# -# Except where otherwise noted, Shanks & Pottenger is the source for -# entries through 1991, and IATA SSIM is the source for entries afterwards. -# -# Other sources occasionally used include: -# -# Edward W. Whitman, World Time Differences, -# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), -# which I found in the UCLA library. -# -# William Willett, The Waste of Daylight, 19th edition -# -# [PDF] (1914-03) -# -# Milne J. Civil time. Geogr J. 1899 Feb;13(2):173-94 -# . He writes: -# "It is requested that corrections and additions to these tables -# may be sent to Mr. John Milne, Royal Geographical Society, -# Savile Row, London." Nowadays please email them to tz@iana.org. -# -# Byalokoz EL. New Counting of Time in Russia since July 1, 1919. -# This Russian-language source was consulted by Vladimir Karpinsky; see -# http://mm.icann.org/pipermail/tz/2014-August/021320.html -# The full Russian citation is: -# Бялокоз, Евгений Людвигович. Новый счет времени в течении суток -# введенный декретом Совета народных комиссаров для всей России с 1-го -# июля 1919 г. / Изд. 2-е Междуведомственной комиссии. - Петроград: -# Десятая гос. тип., 1919. -# http://resolver.gpntb.ru/purl?docushare/dsweb/Get/Resource-2011/Byalokoz__E.L.__Novyy__schet__vremeni__v__techenie__sutok__izd__2(1).pdf -# -# Brazil's Divisão Serviço da Hora (DSHO), -# History of Summer Time -# -# (1998-09-21, in Portuguese) -# -# I invented the abbreviations marked '*' in the following table; -# the rest are variants of the "xMT" pattern for a city's mean time, -# or are from other sources. Corrections are welcome! -# std dst 2dst -# LMT Local Mean Time -# -4:00 AST ADT Atlantic -# 0:00 GMT BST BDST Greenwich, British Summer -# 0:00 GMT IST Greenwich, Irish Summer -# 0:00 WET WEST WEMT Western Europe -# 0:19:32.13 AMT* NST* Amsterdam, Netherlands Summer (1835-1937) -# 1:00 BST British Standard (1968-1971) -# 1:00 CET CEST CEMT Central Europe -# 1:00:14 SET Swedish (1879-1899) -# 1:36:34 RMT* LST* Riga, Latvian Summer (1880-1926)* -# 2:00 EET EEST Eastern Europe -# 3:00 MSK MSD MDST* Moscow - -# From Peter Ilieve (1994-12-04), -# The original six [EU members]: Belgium, France, (West) Germany, Italy, -# Luxembourg, the Netherlands. -# Plus, from 1 Jan 73: Denmark, Ireland, United Kingdom. -# Plus, from 1 Jan 81: Greece. -# Plus, from 1 Jan 86: Spain, Portugal. -# Plus, from 1 Jan 95: Austria, Finland, Sweden. (Norway negotiated terms for -# entry but in a referendum on 28 Nov 94 the people voted No by 52.2% to 47.8% -# on a turnout of 88.6%. This was almost the same result as Norway's previous -# referendum in 1972, they are the only country to have said No twice. -# Referendums in the other three countries voted Yes.) -# ... -# Estonia ... uses EU dates but not at 01:00 GMT, they use midnight GMT. -# I don't think they know yet what they will do from 1996 onwards. -# ... -# There shouldn't be any [current members who are not using EU rules]. -# A Directive has the force of law, member states are obliged to enact -# national law to implement it. The only contentious issue was the -# different end date for the UK and Ireland, and this was always allowed -# in the Directive. - - -############################################################################### - -# Britain (United Kingdom) and Ireland (Eire) - -# From Peter Ilieve (1994-07-06): -# -# On 17 Jan 1994 the Independent, a UK quality newspaper, had a piece about -# historical vistas along the Thames in west London. There was a photo -# and a sketch map showing some of the sightlines involved. One paragraph -# of the text said: -# -# 'An old stone obelisk marking a forgotten terrestrial meridian stands -# beside the river at Kew. In the 18th century, before time and longitude -# was standardised by the Royal Observatory in Greenwich, scholars observed -# this stone and the movement of stars from Kew Observatory nearby. They -# made their calculations and set the time for the Horse Guards and Parliament, -# but now the stone is obscured by scrubwood and can only be seen by walking -# along the towpath within a few yards of it.' -# -# I have a one inch to one mile map of London and my estimate of the stone's -# position is 51 degrees 28' 30" N, 0 degrees 18' 45" W. The longitude should -# be within about +-2". The Ordnance Survey grid reference is TQ172761. -# -# [This yields GMTOFF = -0:01:15 for London LMT in the 18th century.] - -# From Paul Eggert (1993-11-18): -# -# Howse writes that Britain was the first country to use standard time. -# The railways cared most about the inconsistencies of local mean time, -# and it was they who forced a uniform time on the country. -# The original idea was credited to Dr. William Hyde Wollaston (1766-1828) -# and was popularized by Abraham Follett Osler (1808-1903). -# The first railway to adopt London time was the Great Western Railway -# in November 1840; other railways followed suit, and by 1847 most -# (though not all) railways used London time. On 1847-09-22 the -# Railway Clearing House, an industry standards body, recommended that GMT be -# adopted at all stations as soon as the General Post Office permitted it. -# The transition occurred on 12-01 for the L&NW, the Caledonian, -# and presumably other railways; the January 1848 Bradshaw's lists many -# railways as using GMT. By 1855 the vast majority of public -# clocks in Britain were set to GMT (though some, like the great clock -# on Tom Tower at Christ Church, Oxford, were fitted with two minute hands, -# one for local time and one for GMT). The last major holdout was the legal -# system, which stubbornly stuck to local time for many years, leading -# to oddities like polls opening at 08:13 and closing at 16:13. -# The legal system finally switched to GMT when the Statutes (Definition -# of Time) Act took effect; it received the Royal Assent on 1880-08-02. -# -# In the tables below, we condense this complicated story into a single -# transition date for London, namely 1847-12-01. We don't know as much -# about Dublin, so we use 1880-08-02, the legal transition time. - -# From Paul Eggert (2014-07-19): -# The ancients had no need for daylight saving, as they kept time -# informally or via hours whose length depended on the time of year. -# Daylight saving time in its modern sense was invented by the -# New Zealand entomologist George Vernon Hudson (1867-1946), -# whose day job as a postal clerk led him to value -# after-hours daylight in which to pursue his research. -# In 1895 he presented a paper to the Wellington Philosophical Society -# that proposed a two-hour daylight-saving shift. See: -# Hudson GV. On seasonal time-adjustment in countries south of lat. 30 deg. -# Transactions and Proceedings of the New Zealand Institute. 1895;28:734 -# http://rsnz.natlib.govt.nz/volume/rsnz_28/rsnz_28_00_006110.html -# Although some interest was expressed in New Zealand, his proposal -# did not find its way into law and eventually it was almost forgotten. -# -# In England, DST was independently reinvented by William Willett (1857-1915), -# a London builder and member of the Royal Astronomical Society -# who circulated a pamphlet "The Waste of Daylight" (1907) -# that proposed advancing clocks 20 minutes on each of four Sundays in April, -# and retarding them by the same amount on four Sundays in September. -# A bill was drafted in 1909 and introduced in Parliament several times, -# but it met with ridicule and opposition, especially from farming interests. -# Later editions of the pamphlet proposed one-hour summer time, and -# it was eventually adopted as a wartime measure in 1916. -# See: Summer Time Arrives Early, The Times (2000-05-18). -# A monument to Willett was unveiled on 1927-05-21, in an open space in -# a 45-acre wood near Chislehurst, Kent that was purchased by popular -# subscription and open to the public. On the south face of the monolith, -# designed by G. W. Miller, is the William Willett Memorial Sundial, -# which is permanently set to Summer Time. - -# From Winston Churchill (1934-04-28): -# It is one of the paradoxes of history that we should owe the boon of -# summer time, which gives every year to the people of this country -# between 160 and 170 hours more daylight leisure, to a war which -# plunged Europe into darkness for four years, and shook the -# foundations of civilization throughout the world. -# -- "A Silent Toast to William Willett", Pictorial Weekly; -# republished in Finest Hour (Spring 2002) 1(114):26 -# http://www.winstonchurchill.org/images/finesthour/Vol.01%20No.114.pdf - -# From Paul Eggert (2015-08-08): -# The OED Supplement says that the English originally said "Daylight Saving" -# when they were debating the adoption of DST in 1908; but by 1916 this -# term appears only in quotes taken from DST's opponents, whereas the -# proponents (who eventually won the argument) are quoted as using "Summer". -# The term "Summer Time" was introduced by Herbert Samuel, Home Secretary; see: -# Viscount Samuel. Leisure in a Democracy. Cambridge University Press -# ISBN 978-1-107-49471-8 (1949, reissued 2015), p 8. - -# From Arthur David Olson (1989-01-19): -# A source at the British Information Office in New York avers that it's -# known as "British" Summer Time in all parts of the United Kingdom. - -# Date: 4 Jan 89 08:57:25 GMT (Wed) -# From: Jonathan Leffler -# [British Summer Time] is fixed annually by Act of Parliament. -# If you can predict what Parliament will do, you should be in -# politics making a fortune, not computing. - -# From Chris Carrier (1996-06-14): -# I remember reading in various wartime issues of the London Times the -# acronym BDST for British Double Summer Time. Look for the published -# time of sunrise and sunset in The Times, when BDST was in effect, and -# if you find a zone reference it will say, "All times B.D.S.T." - -# From Joseph S. Myers (1999-09-02): -# ... some military cables (WO 219/4100 - this is a copy from the -# main SHAEF archives held in the US National Archives, SHAEF/5252/8/516) -# agree that the usage is BDST (this appears in a message dated 17 Feb 1945). - -# From Joseph S. Myers (2000-10-03): -# On 18th April 1941, Sir Stephen Tallents of the BBC wrote to Sir -# Alexander Maxwell of the Home Office asking whether there was any -# official designation; the reply of the 21st was that there wasn't -# but he couldn't think of anything better than the "Double British -# Summer Time" that the BBC had been using informally. -# http://www.polyomino.org.uk/british-time/bbc-19410418.png -# http://www.polyomino.org.uk/british-time/ho-19410421.png - -# From Sir Alexander Maxwell in the above-mentioned letter (1941-04-21): -# [N]o official designation has as far as I know been adopted for the time -# which is to be introduced in May.... -# I cannot think of anything better than "Double British Summer Time" -# which could not be said to run counter to any official description. - -# From Paul Eggert (2000-10-02): -# Howse writes (p 157) 'DBST' too, but 'BDST' seems to have been common -# and follows the more usual convention of putting the location name first, -# so we use 'BDST'. - -# Peter Ilieve (1998-04-19) described at length -# the history of summer time legislation in the United Kingdom. -# Since 1998 Joseph S. Myers has been updating -# and extending this list, which can be found in -# http://www.polyomino.org.uk/british-time/ - -# From Joseph S. Myers (1998-01-06): -# -# The legal time in the UK outside of summer time is definitely GMT, not UTC; -# see Lord Tanlaw's speech -# http://www.publications.parliament.uk/pa/ld199798/ldhansrd/vo970611/text/70611-10.htm#70611-10_head0 -# (Lords Hansard 11 June 1997 columns 964 to 976). - -# From Paul Eggert (2006-03-22): -# -# For lack of other data, follow Shanks & Pottenger for Eire in 1940-1948. -# -# Given Ilieve and Myers's data, the following claims by Shanks & Pottenger -# are incorrect: -# * Wales did not switch from GMT to daylight saving time until -# 1921 Apr 3, when they began to conform with the rest of Great Britain. -# Actually, Wales was identical after 1880. -# * Eire had two transitions on 1916 Oct 1. -# It actually just had one transition. -# * Northern Ireland used single daylight saving time throughout WW II. -# Actually, it conformed to Britain. -# * GB-Eire changed standard time to 1 hour ahead of GMT on 1968-02-18. -# Actually, that date saw the usual switch to summer time. -# Standard time was not changed until 1968-10-27 (the clocks didn't change). -# -# Here is another incorrect claim by Shanks & Pottenger: -# * Jersey, Guernsey, and the Isle of Man did not switch from GMT -# to daylight saving time until 1921 Apr 3, when they began to -# conform with Great Britain. -# S.R.&O. 1916, No. 382 and HO 45/10811/312364 (quoted above) say otherwise. -# -# The following claim by Shanks & Pottenger is possible though doubtful; -# we'll ignore it for now. -# * Dublin's 1971-10-31 switch was at 02:00, even though London's was 03:00. -# -# -# Whitman says Dublin Mean Time was -0:25:21, which is more precise than -# Shanks & Pottenger. -# Perhaps this was Dunsink Observatory Time, as Dunsink Observatory -# (8 km NW of Dublin's center) seemingly was to Dublin as Greenwich was -# to London. For example: -# -# "Timeball on the ballast office is down. Dunsink time." -# -- James Joyce, Ulysses - -# "Countess Markievicz ... claimed that the [1916] abolition of Dublin Mean Time -# was among various actions undertaken by the 'English' government that -# would 'put the whole country into the SF (Sinn Féin) camp'. She claimed -# Irish 'public feeling (was) outraged by forcing of English time on us'." -# -- Parsons M. Dublin lost its time zone - and 25 minutes - after 1916 Rising. -# Irish Times 2014-10-27. -# http://www.irishtimes.com/news/politics/dublin-lost-its-time-zone-and-25-minutes-after-1916-rising-1.1977411 - -# From Joseph S. Myers (2005-01-26): -# Irish laws are available online at . -# These include various relating to legal time, for example: -# -# ZZA13Y1923.html ZZA12Y1924.html ZZA8Y1925.html ZZSIV20PG1267.html -# -# ZZSI71Y1947.html ZZSI128Y1948.html ZZSI23Y1949.html ZZSI41Y1950.html -# ZZSI27Y1951.html ZZSI73Y1952.html -# -# ZZSI11Y1961.html ZZSI232Y1961.html ZZSI182Y1962.html -# ZZSI167Y1963.html ZZSI257Y1964.html ZZSI198Y1967.html -# ZZA23Y1968.html ZZA17Y1971.html -# -# ZZSI67Y1981.html ZZSI212Y1982.html ZZSI45Y1986.html -# ZZSI264Y1988.html ZZSI52Y1990.html ZZSI371Y1992.html -# ZZSI395Y1994.html ZZSI484Y1997.html ZZSI506Y2001.html -# -# [These are all relative to the root, e.g., the first is -# .] -# -# (These are those I found, but there could be more. In any case these -# should allow various updates to the comments in the europe file to cover -# the laws applicable in Ireland.) -# -# (Note that the time in the Republic of Ireland since 1968 has been defined -# in terms of standard time being GMT+1 with a period of winter time when it -# is GMT, rather than standard time being GMT with a period of summer time -# being GMT+1.) - -# From Paul Eggert (1999-03-28): -# Clive Feather (, 1997-03-31) -# reports that Folkestone (Cheriton) Shuttle Terminal uses Concession Time -# (CT), equivalent to French civil time. -# Julian Hill (, 1998-09-30) reports that -# trains between Dollands Moor (the freight facility next door) -# and Frethun run in CT. -# My admittedly uninformed guess is that the terminal has two authorities, -# the French concession operators and the British civil authorities, -# and that the time depends on who you're talking to. -# If, say, the British police were called to the station for some reason, -# I would expect the official police report to use GMT/BST and not CET/CEST. -# This is a borderline case, but for now let's stick to GMT/BST. - -# From an anonymous contributor (1996-06-02): -# The law governing time in Ireland is under Statutory Instrument SI 395/94, -# which gives force to European Union 7th Council Directive No. 94/21/EC. -# Under this directive, the Minister for Justice in Ireland makes appropriate -# regulations. I spoke this morning with the Secretary of the Department of -# Justice (tel +353 1 678 9711) who confirmed to me that the correct name is -# "Irish Summer Time", abbreviated to "IST". - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -# Summer Time Act, 1916 -Rule GB-Eire 1916 only - May 21 2:00s 1:00 BST -Rule GB-Eire 1916 only - Oct 1 2:00s 0 GMT -# S.R.&O. 1917, No. 358 -Rule GB-Eire 1917 only - Apr 8 2:00s 1:00 BST -Rule GB-Eire 1917 only - Sep 17 2:00s 0 GMT -# S.R.&O. 1918, No. 274 -Rule GB-Eire 1918 only - Mar 24 2:00s 1:00 BST -Rule GB-Eire 1918 only - Sep 30 2:00s 0 GMT -# S.R.&O. 1919, No. 297 -Rule GB-Eire 1919 only - Mar 30 2:00s 1:00 BST -Rule GB-Eire 1919 only - Sep 29 2:00s 0 GMT -# S.R.&O. 1920, No. 458 -Rule GB-Eire 1920 only - Mar 28 2:00s 1:00 BST -# S.R.&O. 1920, No. 1844 -Rule GB-Eire 1920 only - Oct 25 2:00s 0 GMT -# S.R.&O. 1921, No. 363 -Rule GB-Eire 1921 only - Apr 3 2:00s 1:00 BST -Rule GB-Eire 1921 only - Oct 3 2:00s 0 GMT -# S.R.&O. 1922, No. 264 -Rule GB-Eire 1922 only - Mar 26 2:00s 1:00 BST -Rule GB-Eire 1922 only - Oct 8 2:00s 0 GMT -# The Summer Time Act, 1922 -Rule GB-Eire 1923 only - Apr Sun>=16 2:00s 1:00 BST -Rule GB-Eire 1923 1924 - Sep Sun>=16 2:00s 0 GMT -Rule GB-Eire 1924 only - Apr Sun>=9 2:00s 1:00 BST -Rule GB-Eire 1925 1926 - Apr Sun>=16 2:00s 1:00 BST -# The Summer Time Act, 1925 -Rule GB-Eire 1925 1938 - Oct Sun>=2 2:00s 0 GMT -Rule GB-Eire 1927 only - Apr Sun>=9 2:00s 1:00 BST -Rule GB-Eire 1928 1929 - Apr Sun>=16 2:00s 1:00 BST -Rule GB-Eire 1930 only - Apr Sun>=9 2:00s 1:00 BST -Rule GB-Eire 1931 1932 - Apr Sun>=16 2:00s 1:00 BST -Rule GB-Eire 1933 only - Apr Sun>=9 2:00s 1:00 BST -Rule GB-Eire 1934 only - Apr Sun>=16 2:00s 1:00 BST -Rule GB-Eire 1935 only - Apr Sun>=9 2:00s 1:00 BST -Rule GB-Eire 1936 1937 - Apr Sun>=16 2:00s 1:00 BST -Rule GB-Eire 1938 only - Apr Sun>=9 2:00s 1:00 BST -Rule GB-Eire 1939 only - Apr Sun>=16 2:00s 1:00 BST -# S.R.&O. 1939, No. 1379 -Rule GB-Eire 1939 only - Nov Sun>=16 2:00s 0 GMT -# S.R.&O. 1940, No. 172 and No. 1883 -Rule GB-Eire 1940 only - Feb Sun>=23 2:00s 1:00 BST -# S.R.&O. 1941, No. 476 -Rule GB-Eire 1941 only - May Sun>=2 1:00s 2:00 BDST -Rule GB-Eire 1941 1943 - Aug Sun>=9 1:00s 1:00 BST -# S.R.&O. 1942, No. 506 -Rule GB-Eire 1942 1944 - Apr Sun>=2 1:00s 2:00 BDST -# S.R.&O. 1944, No. 932 -Rule GB-Eire 1944 only - Sep Sun>=16 1:00s 1:00 BST -# S.R.&O. 1945, No. 312 -Rule GB-Eire 1945 only - Apr Mon>=2 1:00s 2:00 BDST -Rule GB-Eire 1945 only - Jul Sun>=9 1:00s 1:00 BST -# S.R.&O. 1945, No. 1208 -Rule GB-Eire 1945 1946 - Oct Sun>=2 2:00s 0 GMT -Rule GB-Eire 1946 only - Apr Sun>=9 2:00s 1:00 BST -# The Summer Time Act, 1947 -Rule GB-Eire 1947 only - Mar 16 2:00s 1:00 BST -Rule GB-Eire 1947 only - Apr 13 1:00s 2:00 BDST -Rule GB-Eire 1947 only - Aug 10 1:00s 1:00 BST -Rule GB-Eire 1947 only - Nov 2 2:00s 0 GMT -# Summer Time Order, 1948 (S.I. 1948/495) -Rule GB-Eire 1948 only - Mar 14 2:00s 1:00 BST -Rule GB-Eire 1948 only - Oct 31 2:00s 0 GMT -# Summer Time Order, 1949 (S.I. 1949/373) -Rule GB-Eire 1949 only - Apr 3 2:00s 1:00 BST -Rule GB-Eire 1949 only - Oct 30 2:00s 0 GMT -# Summer Time Order, 1950 (S.I. 1950/518) -# Summer Time Order, 1951 (S.I. 1951/430) -# Summer Time Order, 1952 (S.I. 1952/451) -Rule GB-Eire 1950 1952 - Apr Sun>=14 2:00s 1:00 BST -Rule GB-Eire 1950 1952 - Oct Sun>=21 2:00s 0 GMT -# revert to the rules of the Summer Time Act, 1925 -Rule GB-Eire 1953 only - Apr Sun>=16 2:00s 1:00 BST -Rule GB-Eire 1953 1960 - Oct Sun>=2 2:00s 0 GMT -Rule GB-Eire 1954 only - Apr Sun>=9 2:00s 1:00 BST -Rule GB-Eire 1955 1956 - Apr Sun>=16 2:00s 1:00 BST -Rule GB-Eire 1957 only - Apr Sun>=9 2:00s 1:00 BST -Rule GB-Eire 1958 1959 - Apr Sun>=16 2:00s 1:00 BST -Rule GB-Eire 1960 only - Apr Sun>=9 2:00s 1:00 BST -# Summer Time Order, 1961 (S.I. 1961/71) -# Summer Time (1962) Order, 1961 (S.I. 1961/2465) -# Summer Time Order, 1963 (S.I. 1963/81) -Rule GB-Eire 1961 1963 - Mar lastSun 2:00s 1:00 BST -Rule GB-Eire 1961 1968 - Oct Sun>=23 2:00s 0 GMT -# Summer Time (1964) Order, 1963 (S.I. 1963/2101) -# Summer Time Order, 1964 (S.I. 1964/1201) -# Summer Time Order, 1967 (S.I. 1967/1148) -Rule GB-Eire 1964 1967 - Mar Sun>=19 2:00s 1:00 BST -# Summer Time Order, 1968 (S.I. 1968/117) -Rule GB-Eire 1968 only - Feb 18 2:00s 1:00 BST -# The British Standard Time Act, 1968 -# (no summer time) -# The Summer Time Act, 1972 -Rule GB-Eire 1972 1980 - Mar Sun>=16 2:00s 1:00 BST -Rule GB-Eire 1972 1980 - Oct Sun>=23 2:00s 0 GMT -# Summer Time Order, 1980 (S.I. 1980/1089) -# Summer Time Order, 1982 (S.I. 1982/1673) -# Summer Time Order, 1986 (S.I. 1986/223) -# Summer Time Order, 1988 (S.I. 1988/931) -Rule GB-Eire 1981 1995 - Mar lastSun 1:00u 1:00 BST -Rule GB-Eire 1981 1989 - Oct Sun>=23 1:00u 0 GMT -# Summer Time Order, 1989 (S.I. 1989/985) -# Summer Time Order, 1992 (S.I. 1992/1729) -# Summer Time Order 1994 (S.I. 1994/2798) -Rule GB-Eire 1990 1995 - Oct Sun>=22 1:00u 0 GMT -# Summer Time Order 1997 (S.I. 1997/2982) -# See EU for rules starting in 1996. -# -# Use Europe/London for Jersey, Guernsey, and the Isle of Man. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/London -0:01:15 - LMT 1847 Dec 1 0:00s - 0:00 GB-Eire %s 1968 Oct 27 - 1:00 - BST 1971 Oct 31 2:00u - 0:00 GB-Eire %s 1996 - 0:00 EU GMT/BST -Link Europe/London Europe/Jersey -Link Europe/London Europe/Guernsey -Link Europe/London Europe/Isle_of_Man - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Dublin -0:25:00 - LMT 1880 Aug 2 - -0:25:21 - DMT 1916 May 21 2:00 # Dublin MT - -0:25:21 1:00 IST 1916 Oct 1 2:00s - 0:00 GB-Eire %s 1921 Dec 6 # independence - 0:00 GB-Eire GMT/IST 1940 Feb 25 2:00 - 0:00 1:00 IST 1946 Oct 6 2:00 - 0:00 - GMT 1947 Mar 16 2:00 - 0:00 1:00 IST 1947 Nov 2 2:00 - 0:00 - GMT 1948 Apr 18 2:00 - 0:00 GB-Eire GMT/IST 1968 Oct 27 - 1:00 - IST 1971 Oct 31 2:00u - 0:00 GB-Eire GMT/IST 1996 - 0:00 EU GMT/IST - -############################################################################### - -# Europe - -# EU rules are for the European Union, previously known as the EC, EEC, -# Common Market, etc. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule EU 1977 1980 - Apr Sun>=1 1:00u 1:00 S -Rule EU 1977 only - Sep lastSun 1:00u 0 - -Rule EU 1978 only - Oct 1 1:00u 0 - -Rule EU 1979 1995 - Sep lastSun 1:00u 0 - -Rule EU 1981 max - Mar lastSun 1:00u 1:00 S -Rule EU 1996 max - Oct lastSun 1:00u 0 - -# The most recent directive covers the years starting in 2002. See: -# Directive 2000/84/EC of the European Parliament and of the Council -# of 19 January 2001 on summer-time arrangements. -# http://eur-lex.europa.eu/LexUriServ/LexUriServ.do?uri=CELEX:32000L0084:EN:NOT - -# W-Eur differs from EU only in that W-Eur uses standard time. -Rule W-Eur 1977 1980 - Apr Sun>=1 1:00s 1:00 S -Rule W-Eur 1977 only - Sep lastSun 1:00s 0 - -Rule W-Eur 1978 only - Oct 1 1:00s 0 - -Rule W-Eur 1979 1995 - Sep lastSun 1:00s 0 - -Rule W-Eur 1981 max - Mar lastSun 1:00s 1:00 S -Rule W-Eur 1996 max - Oct lastSun 1:00s 0 - - -# Older C-Eur rules are for convenience in the tables. -# From 1977 on, C-Eur differs from EU only in that C-Eur uses standard time. -Rule C-Eur 1916 only - Apr 30 23:00 1:00 S -Rule C-Eur 1916 only - Oct 1 1:00 0 - -Rule C-Eur 1917 1918 - Apr Mon>=15 2:00s 1:00 S -Rule C-Eur 1917 1918 - Sep Mon>=15 2:00s 0 - -Rule C-Eur 1940 only - Apr 1 2:00s 1:00 S -Rule C-Eur 1942 only - Nov 2 2:00s 0 - -Rule C-Eur 1943 only - Mar 29 2:00s 1:00 S -Rule C-Eur 1943 only - Oct 4 2:00s 0 - -Rule C-Eur 1944 1945 - Apr Mon>=1 2:00s 1:00 S -# Whitman gives 1944 Oct 7; go with Shanks & Pottenger. -Rule C-Eur 1944 only - Oct 2 2:00s 0 - -# From Jesper Nørgaard Welen (2008-07-13): -# -# I found what is probably a typo of 2:00 which should perhaps be 2:00s -# in the C-Eur rule from tz database version 2008d (this part was -# corrected in version 2008d). The circumstantial evidence is simply the -# tz database itself, as seen below: -# -# Zone Europe/Paris 0:09:21 - LMT 1891 Mar 15 0:01 -# 0:00 France WE%sT 1945 Sep 16 3:00 -# -# Zone Europe/Monaco 0:29:32 - LMT 1891 Mar 15 -# 0:00 France WE%sT 1945 Sep 16 3:00 -# -# Zone Europe/Belgrade 1:22:00 - LMT 1884 -# 1:00 1:00 CEST 1945 Sep 16 2:00s -# -# Rule France 1945 only - Sep 16 3:00 0 - -# Rule Belgium 1945 only - Sep 16 2:00s 0 - -# Rule Neth 1945 only - Sep 16 2:00s 0 - -# -# The rule line to be changed is: -# -# Rule C-Eur 1945 only - Sep 16 2:00 0 - -# -# It seems that Paris, Monaco, Rule France, Rule Belgium all agree on -# 2:00 standard time, e.g. 3:00 local time. However there are no -# countries that use C-Eur rules in September 1945, so the only items -# affected are apparently these fictitious zones that translate acronyms -# CET and MET: -# -# Zone CET 1:00 C-Eur CE%sT -# Zone MET 1:00 C-Eur ME%sT -# -# It this is right then the corrected version would look like: -# -# Rule C-Eur 1945 only - Sep 16 2:00s 0 - -# -# A small step for mankind though 8-) -Rule C-Eur 1945 only - Sep 16 2:00s 0 - -Rule C-Eur 1977 1980 - Apr Sun>=1 2:00s 1:00 S -Rule C-Eur 1977 only - Sep lastSun 2:00s 0 - -Rule C-Eur 1978 only - Oct 1 2:00s 0 - -Rule C-Eur 1979 1995 - Sep lastSun 2:00s 0 - -Rule C-Eur 1981 max - Mar lastSun 2:00s 1:00 S -Rule C-Eur 1996 max - Oct lastSun 2:00s 0 - - -# E-Eur differs from EU only in that E-Eur switches at midnight local time. -Rule E-Eur 1977 1980 - Apr Sun>=1 0:00 1:00 S -Rule E-Eur 1977 only - Sep lastSun 0:00 0 - -Rule E-Eur 1978 only - Oct 1 0:00 0 - -Rule E-Eur 1979 1995 - Sep lastSun 0:00 0 - -Rule E-Eur 1981 max - Mar lastSun 0:00 1:00 S -Rule E-Eur 1996 max - Oct lastSun 0:00 0 - - - -# Daylight saving time for Russia and the Soviet Union -# -# The 1917-1921 decree URLs are from Alexander Belopolsky (2016-08-23). - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Russia 1917 only - Jul 1 23:00 1:00 MST # Moscow Summer Time -# -# Decree No. 142 (1917-12-22) http://istmat.info/node/28137 -Rule Russia 1917 only - Dec 28 0:00 0 MMT # Moscow Mean Time -# -# Decree No. 497 (1918-05-30) http://istmat.info/node/30001 -Rule Russia 1918 only - May 31 22:00 2:00 MDST # Moscow Double Summer Time -Rule Russia 1918 only - Sep 16 1:00 1:00 MST -# -# Decree No. 258 (1919-05-29) http://istmat.info/node/37949 -Rule Russia 1919 only - May 31 23:00 2:00 MDST -# -Rule Russia 1919 only - Jul 1 0:00u 1:00 MSD -Rule Russia 1919 only - Aug 16 0:00 0 MSK -# -# Decree No. 63 (1921-02-03) http://istmat.info/node/45840 -Rule Russia 1921 only - Feb 14 23:00 1:00 MSD -# -# Decree No. 121 (1921-03-07) http://istmat.info/node/45949 -Rule Russia 1921 only - Mar 20 23:00 2:00 +05 -# -Rule Russia 1921 only - Sep 1 0:00 1:00 MSD -Rule Russia 1921 only - Oct 1 0:00 0 - -# Act No. 925 of the Council of Ministers of the USSR (1980-10-24): -Rule Russia 1981 1984 - Apr 1 0:00 1:00 S -Rule Russia 1981 1983 - Oct 1 0:00 0 - -# Act No. 967 of the Council of Ministers of the USSR (1984-09-13), repeated in -# Act No. 227 of the Council of Ministers of the USSR (1989-03-14): -Rule Russia 1984 1995 - Sep lastSun 2:00s 0 - -Rule Russia 1985 2010 - Mar lastSun 2:00s 1:00 S -# -Rule Russia 1996 2010 - Oct lastSun 2:00s 0 - -# As described below, Russia's 2014 change affects Zone data, not Rule data. - -# From Stepan Golosunov (2016-03-07): -# Wikipedia and other sources refer to the Act of the Council of -# Ministers of the USSR from 1988-01-04 No. 5 and the Act of the -# Council of Ministers of the USSR from 1989-03-14 No. 227. -# -# I did not find full texts of these acts. For the 1989 one we have -# title at http://base.garant.ru/70754136/ : -# "About change in calculation of time on the territories of -# Lithuanian SSR, Latvian SSR and Estonian SSR, Astrakhan, -# Kaliningrad, Kirov, Kuybyshev, Ulyanovsk and Uralsk oblasts". -# And http://astrozet.net/files/Zones/DOC/RU/1980-925.txt appears to -# contain quotes from both acts: Since last Sunday of March 1988 rules -# of the second time belt are installed in Volgograd and Saratov -# oblasts. Since last Sunday of March 1989: -# a) Lithuanian SSR, Latvian SSR, Estonian SSR, Kaliningrad oblast: -# second time belt rules without extra hour (Moscow-1); -# b) Astrakhan, Kirov, Kuybyshev, Ulyanovsk oblasts: second time belt -# rules (Moscow time) -# c) Uralsk oblast: third time belt rules (Moscow+1). - -# From Stepan Golosunov (2016-03-27): -# Unamended version of the act of the -# Government of the Russian Federation No. 23 from 08.01.1992 -# http://pravo.gov.ru/proxy/ips/?docbody=&nd=102014034&rdk=0 -# says that every year clocks were to be moved forward on last Sunday -# of March at 2 hours and moved backwards on last Sunday of September -# at 3 hours. It was amended in 1996 to replace September with October. - -# From Alexander Krivenyshev (2011-06-14): -# According to Kremlin press service, Russian President Dmitry Medvedev -# signed a federal law "On calculation of time" on June 9, 2011. -# According to the law Russia is abolishing daylight saving time. -# -# Medvedev signed a law "On the Calculation of Time" (in russian): -# http://bmockbe.ru/events/?ID=7583 -# -# Medvedev signed a law on the calculation of the time (in russian): -# http://www.regnum.ru/news/polit/1413906.html - -# From Arthur David Olson (2011-06-15): -# Take "abolishing daylight saving time" to mean that time is now considered -# to be standard. - -# These are for backward compatibility with older versions. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone WET 0:00 EU WE%sT -Zone CET 1:00 C-Eur CE%sT -Zone MET 1:00 C-Eur ME%sT -Zone EET 2:00 EU EE%sT - -# Previous editions of this database used abbreviations like MET DST -# for Central European Summer Time, but this didn't agree with common usage. - -# From Markus Kuhn (1996-07-12): -# The official German names ... are -# -# Mitteleuropäische Zeit (MEZ) = UTC+01:00 -# Mitteleuropäische Sommerzeit (MESZ) = UTC+02:00 -# -# as defined in the German Time Act (Gesetz über die Zeitbestimmung (ZeitG), -# 1978-07-25, Bundesgesetzblatt, Jahrgang 1978, Teil I, S. 1110-1111).... -# I wrote ... to the German Federal Physical-Technical Institution -# -# Physikalisch-Technische Bundesanstalt (PTB) -# Laboratorium 4.41 "Zeiteinheit" -# Postfach 3345 -# D-38023 Braunschweig -# phone: +49 531 592-0 -# -# ... I received today an answer letter from Dr. Peter Hetzel, head of the PTB -# department for time and frequency transmission. He explained that the -# PTB translates MEZ and MESZ into English as -# -# Central European Time (CET) = UTC+01:00 -# Central European Summer Time (CEST) = UTC+02:00 - - -# Albania -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Albania 1940 only - Jun 16 0:00 1:00 S -Rule Albania 1942 only - Nov 2 3:00 0 - -Rule Albania 1943 only - Mar 29 2:00 1:00 S -Rule Albania 1943 only - Apr 10 3:00 0 - -Rule Albania 1974 only - May 4 0:00 1:00 S -Rule Albania 1974 only - Oct 2 0:00 0 - -Rule Albania 1975 only - May 1 0:00 1:00 S -Rule Albania 1975 only - Oct 2 0:00 0 - -Rule Albania 1976 only - May 2 0:00 1:00 S -Rule Albania 1976 only - Oct 3 0:00 0 - -Rule Albania 1977 only - May 8 0:00 1:00 S -Rule Albania 1977 only - Oct 2 0:00 0 - -Rule Albania 1978 only - May 6 0:00 1:00 S -Rule Albania 1978 only - Oct 1 0:00 0 - -Rule Albania 1979 only - May 5 0:00 1:00 S -Rule Albania 1979 only - Sep 30 0:00 0 - -Rule Albania 1980 only - May 3 0:00 1:00 S -Rule Albania 1980 only - Oct 4 0:00 0 - -Rule Albania 1981 only - Apr 26 0:00 1:00 S -Rule Albania 1981 only - Sep 27 0:00 0 - -Rule Albania 1982 only - May 2 0:00 1:00 S -Rule Albania 1982 only - Oct 3 0:00 0 - -Rule Albania 1983 only - Apr 18 0:00 1:00 S -Rule Albania 1983 only - Oct 1 0:00 0 - -Rule Albania 1984 only - Apr 1 0:00 1:00 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Tirane 1:19:20 - LMT 1914 - 1:00 - CET 1940 Jun 16 - 1:00 Albania CE%sT 1984 Jul - 1:00 EU CE%sT - -# Andorra -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Andorra 0:06:04 - LMT 1901 - 0:00 - WET 1946 Sep 30 - 1:00 - CET 1985 Mar 31 2:00 - 1:00 EU CE%sT - -# Austria - -# Milne says Vienna time was 1:05:21. - -# From Paul Eggert (2006-03-22): Shanks & Pottenger give 1918-06-16 and -# 1945-11-18, but the Austrian Federal Office of Metrology and -# Surveying (BEV) gives 1918-09-16 and for Vienna gives the "alleged" -# date of 1945-04-12 with no time. For the 1980-04-06 transition -# Shanks & Pottenger give 02:00, the BEV 00:00. Go with the BEV, -# and guess 02:00 for 1945-04-12. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Austria 1920 only - Apr 5 2:00s 1:00 S -Rule Austria 1920 only - Sep 13 2:00s 0 - -Rule Austria 1946 only - Apr 14 2:00s 1:00 S -Rule Austria 1946 1948 - Oct Sun>=1 2:00s 0 - -Rule Austria 1947 only - Apr 6 2:00s 1:00 S -Rule Austria 1948 only - Apr 18 2:00s 1:00 S -Rule Austria 1980 only - Apr 6 0:00 1:00 S -Rule Austria 1980 only - Sep 28 0:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Vienna 1:05:21 - LMT 1893 Apr - 1:00 C-Eur CE%sT 1920 - 1:00 Austria CE%sT 1940 Apr 1 2:00s - 1:00 C-Eur CE%sT 1945 Apr 2 2:00s - 1:00 1:00 CEST 1945 Apr 12 2:00s - 1:00 - CET 1946 - 1:00 Austria CE%sT 1981 - 1:00 EU CE%sT - -# Belarus -# -# From Stepan Golosunov (2016-07-02): -# http://www.lawbelarus.com/repub/sub30/texf9611.htm -# (Act of the Cabinet of Ministers of the Republic of Belarus from -# 1992-03-25 No. 157) ... says clocks were to be moved forward at 2:00 -# on last Sunday of March and backward at 3:00 on last Sunday of September -# (the same as previous USSR and contemporary Russian regulations). -# -# From Yauhen Kharuzhy (2011-09-16): -# By latest Belarus government act Europe/Minsk timezone was changed to -# GMT+3 without DST (was GMT+2 with DST). -# -# Sources (Russian language): -# http://www.belta.by/ru/all_news/society/V-Belarusi-otmenjaetsja-perexod-na-sezonnoe-vremja_i_572952.html -# http://naviny.by/rubrics/society/2011/09/16/ic_articles_116_175144/ -# http://news.tut.by/society/250578.html -# -# From Alexander Bokovoy (2014-10-09): -# Belarussian government decided against changing to winter time.... -# http://eng.belta.by/all_news/society/Belarus-decides-against-adjusting-time-in-Russias-wake_i_76335.html -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Minsk 1:50:16 - LMT 1880 - 1:50 - MMT 1924 May 2 # Minsk Mean Time - 2:00 - EET 1930 Jun 21 - 3:00 - MSK 1941 Jun 28 - 1:00 C-Eur CE%sT 1944 Jul 3 - 3:00 Russia MSK/MSD 1990 - 3:00 - MSK 1991 Mar 31 2:00s - 2:00 Russia EE%sT 2011 Mar 27 2:00s - 3:00 - +03 - -# Belgium -# -# From Paul Eggert (1997-07-02): -# Entries from 1918 through 1991 are taken from: -# Annuaire de L'Observatoire Royal de Belgique, -# Avenue Circulaire, 3, B-1180 BRUXELLES, CLVIIe année, 1991 -# (Imprimerie HAYEZ, s.p.r.l., Rue Fin, 4, 1080 BRUXELLES, MCMXC), -# pp 8-9. -# LMT before 1892 was 0:17:30, according to the official journal of Belgium: -# Moniteur Belge, Samedi 30 Avril 1892, N.121. -# Thanks to Pascal Delmoitie for these references. -# The 1918 rules are listed for completeness; they apply to unoccupied Belgium. -# Assume Brussels switched to WET in 1918 when the armistice took effect. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Belgium 1918 only - Mar 9 0:00s 1:00 S -Rule Belgium 1918 1919 - Oct Sat>=1 23:00s 0 - -Rule Belgium 1919 only - Mar 1 23:00s 1:00 S -Rule Belgium 1920 only - Feb 14 23:00s 1:00 S -Rule Belgium 1920 only - Oct 23 23:00s 0 - -Rule Belgium 1921 only - Mar 14 23:00s 1:00 S -Rule Belgium 1921 only - Oct 25 23:00s 0 - -Rule Belgium 1922 only - Mar 25 23:00s 1:00 S -Rule Belgium 1922 1927 - Oct Sat>=1 23:00s 0 - -Rule Belgium 1923 only - Apr 21 23:00s 1:00 S -Rule Belgium 1924 only - Mar 29 23:00s 1:00 S -Rule Belgium 1925 only - Apr 4 23:00s 1:00 S -# DSH writes that a royal decree of 1926-02-22 specified the Sun following 3rd -# Sat in Apr (except if it's Easter, in which case it's one Sunday earlier), -# to Sun following 1st Sat in Oct, and that a royal decree of 1928-09-15 -# changed the transition times to 02:00 GMT. -Rule Belgium 1926 only - Apr 17 23:00s 1:00 S -Rule Belgium 1927 only - Apr 9 23:00s 1:00 S -Rule Belgium 1928 only - Apr 14 23:00s 1:00 S -Rule Belgium 1928 1938 - Oct Sun>=2 2:00s 0 - -Rule Belgium 1929 only - Apr 21 2:00s 1:00 S -Rule Belgium 1930 only - Apr 13 2:00s 1:00 S -Rule Belgium 1931 only - Apr 19 2:00s 1:00 S -Rule Belgium 1932 only - Apr 3 2:00s 1:00 S -Rule Belgium 1933 only - Mar 26 2:00s 1:00 S -Rule Belgium 1934 only - Apr 8 2:00s 1:00 S -Rule Belgium 1935 only - Mar 31 2:00s 1:00 S -Rule Belgium 1936 only - Apr 19 2:00s 1:00 S -Rule Belgium 1937 only - Apr 4 2:00s 1:00 S -Rule Belgium 1938 only - Mar 27 2:00s 1:00 S -Rule Belgium 1939 only - Apr 16 2:00s 1:00 S -Rule Belgium 1939 only - Nov 19 2:00s 0 - -Rule Belgium 1940 only - Feb 25 2:00s 1:00 S -Rule Belgium 1944 only - Sep 17 2:00s 0 - -Rule Belgium 1945 only - Apr 2 2:00s 1:00 S -Rule Belgium 1945 only - Sep 16 2:00s 0 - -Rule Belgium 1946 only - May 19 2:00s 1:00 S -Rule Belgium 1946 only - Oct 7 2:00s 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Brussels 0:17:30 - LMT 1880 - 0:17:30 - BMT 1892 May 1 12:00 # Brussels MT - 0:00 - WET 1914 Nov 8 - 1:00 - CET 1916 May 1 0:00 - 1:00 C-Eur CE%sT 1918 Nov 11 11:00u - 0:00 Belgium WE%sT 1940 May 20 2:00s - 1:00 C-Eur CE%sT 1944 Sep 3 - 1:00 Belgium CE%sT 1977 - 1:00 EU CE%sT - -# Bosnia and Herzegovina -# See Europe/Belgrade. - -# Bulgaria -# -# From Plamen Simenov via Steffen Thorsen (1999-09-09): -# A document of Government of Bulgaria (No. 94/1997) says: -# EET -> EETDST is in 03:00 Local time in last Sunday of March ... -# EETDST -> EET is in 04:00 Local time in last Sunday of October -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Bulg 1979 only - Mar 31 23:00 1:00 S -Rule Bulg 1979 only - Oct 1 1:00 0 - -Rule Bulg 1980 1982 - Apr Sat>=1 23:00 1:00 S -Rule Bulg 1980 only - Sep 29 1:00 0 - -Rule Bulg 1981 only - Sep 27 2:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Sofia 1:33:16 - LMT 1880 - 1:56:56 - IMT 1894 Nov 30 # Istanbul MT? - 2:00 - EET 1942 Nov 2 3:00 - 1:00 C-Eur CE%sT 1945 - 1:00 - CET 1945 Apr 2 3:00 - 2:00 - EET 1979 Mar 31 23:00 - 2:00 Bulg EE%sT 1982 Sep 26 3:00 - 2:00 C-Eur EE%sT 1991 - 2:00 E-Eur EE%sT 1997 - 2:00 EU EE%sT - -# Croatia -# See Europe/Belgrade. - -# Cyprus -# Please see the 'asia' file for Asia/Nicosia. - -# Czech Republic / Czechia -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Czech 1945 only - Apr 8 2:00s 1:00 S -Rule Czech 1945 only - Nov 18 2:00s 0 - -Rule Czech 1946 only - May 6 2:00s 1:00 S -Rule Czech 1946 1949 - Oct Sun>=1 2:00s 0 - -Rule Czech 1947 only - Apr 20 2:00s 1:00 S -Rule Czech 1948 only - Apr 18 2:00s 1:00 S -Rule Czech 1949 only - Apr 9 2:00s 1:00 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Prague 0:57:44 - LMT 1850 - 0:57:44 - PMT 1891 Oct # Prague Mean Time - 1:00 C-Eur CE%sT 1944 Sep 17 2:00s - 1:00 Czech CE%sT 1979 - 1:00 EU CE%sT -# Use Europe/Prague also for Slovakia. - -# Denmark, Faroe Islands, and Greenland - -# From Jesper Nørgaard Welen (2005-04-26): -# http://www.hum.aau.dk/~poe/tid/tine/DanskTid.htm says that the law -# [introducing standard time] was in effect from 1894-01-01.... -# The page http://www.retsinfo.dk/_GETDOCI_/ACCN/A18930008330-REGL -# confirms this, and states that the law was put forth 1893-03-29. -# -# The EU treaty with effect from 1973: -# http://www.retsinfo.dk/_GETDOCI_/ACCN/A19722110030-REGL -# -# This provoked a new law from 1974 to make possible summer time changes -# in subsequent decrees with the law -# http://www.retsinfo.dk/_GETDOCI_/ACCN/A19740022330-REGL -# -# It seems however that no decree was set forward until 1980. I have -# not found any decree, but in another related law, the effecting DST -# changes are stated explicitly to be from 1980-04-06 at 02:00 to -# 1980-09-28 at 02:00. If this is true, this differs slightly from -# the EU rule in that DST runs to 02:00, not 03:00. We don't know -# when Denmark began using the EU rule correctly, but we have only -# confirmation of the 1980-time, so I presume it was correct in 1981: -# The law is about the management of the extra hour, concerning -# working hours reported and effect on obligatory-rest rules (which -# was suspended on that night): -# http://www.retsinfo.dk/_GETDOCI_/ACCN/C19801120554-REGL - -# From Jesper Nørgaard Welen (2005-06-11): -# The Herning Folkeblad (1980-09-26) reported that the night between -# Saturday and Sunday the clock is set back from three to two. - -# From Paul Eggert (2005-06-11): -# Hence the "02:00" of the 1980 law refers to standard time, not -# wall-clock time, and so the EU rules were in effect in 1980. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Denmark 1916 only - May 14 23:00 1:00 S -Rule Denmark 1916 only - Sep 30 23:00 0 - -Rule Denmark 1940 only - May 15 0:00 1:00 S -Rule Denmark 1945 only - Apr 2 2:00s 1:00 S -Rule Denmark 1945 only - Aug 15 2:00s 0 - -Rule Denmark 1946 only - May 1 2:00s 1:00 S -Rule Denmark 1946 only - Sep 1 2:00s 0 - -Rule Denmark 1947 only - May 4 2:00s 1:00 S -Rule Denmark 1947 only - Aug 10 2:00s 0 - -Rule Denmark 1948 only - May 9 2:00s 1:00 S -Rule Denmark 1948 only - Aug 8 2:00s 0 - -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Copenhagen 0:50:20 - LMT 1890 - 0:50:20 - CMT 1894 Jan 1 # Copenhagen MT - 1:00 Denmark CE%sT 1942 Nov 2 2:00s - 1:00 C-Eur CE%sT 1945 Apr 2 2:00 - 1:00 Denmark CE%sT 1980 - 1:00 EU CE%sT -Zone Atlantic/Faroe -0:27:04 - LMT 1908 Jan 11 # Tórshavn - 0:00 - WET 1981 - 0:00 EU WE%sT -# -# From Paul Eggert (2004-10-31): -# During World War II, Germany maintained secret manned weather stations in -# East Greenland and Franz Josef Land, but we don't know their time zones. -# My source for this is Wilhelm Dege's book mentioned under Svalbard. -# -# From Paul Eggert (2006-03-22): -# Greenland joined the EU as part of Denmark, obtained home rule on 1979-05-01, -# and left the EU on 1985-02-01. It therefore should have been using EU -# rules at least through 1984. Shanks & Pottenger say Scoresbysund and Godthåb -# used C-Eur rules after 1980, but IATA SSIM (1991/1996) says they use EU -# rules since at least 1991. Assume EU rules since 1980. - -# From Gwillim Law (2001-06-06), citing -# (2001-03-15), -# and with translations corrected by Steffen Thorsen: -# -# Greenland has four local times, and the relation to UTC -# is according to the following time line: -# -# The military zone near Thule UTC-4 -# Standard Greenland time UTC-3 -# Scoresbysund UTC-1 -# Danmarkshavn UTC -# -# In the military area near Thule and in Danmarkshavn DST will not be -# introduced. - -# From Rives McDow (2001-11-01): -# -# I correspond regularly with the Dansk Polarcenter, and wrote them at -# the time to clarify the situation in Thule. Unfortunately, I have -# not heard back from them regarding my recent letter. [But I have -# info from earlier correspondence.] -# -# According to the center, a very small local time zone around Thule -# Air Base keeps the time according to UTC-4, implementing daylight -# savings using North America rules, changing the time at 02:00 local time.... -# -# The east coast of Greenland north of the community of Scoresbysund -# uses UTC in the same way as in Iceland, year round, with no dst. -# There are just a few stations on this coast, including the -# Danmarkshavn ICAO weather station mentioned in your September 29th -# email. The other stations are two sledge patrol stations in -# Mestersvig and Daneborg, the air force base at Station Nord, and the -# DPC research station at Zackenberg. -# -# Scoresbysund and two small villages nearby keep time UTC-1 and use -# the same daylight savings time period as in West Greenland (Godthåb). -# -# The rest of Greenland, including Godthåb (this area, although it -# includes central Greenland, is known as west Greenland), keeps time -# UTC-3, with daylight savings methods according to European rules. -# -# It is common procedure to use UTC 0 in the wilderness of East and -# North Greenland, because it is mainly Icelandic aircraft operators -# maintaining traffic in these areas. However, the official status of -# this area is that it sticks with Godthåb time. This area might be -# considered a dual time zone in some respects because of this. - -# From Rives McDow (2001-11-19): -# I heard back from someone stationed at Thule; the time change took place -# there at 2:00 AM. - -# From Paul Eggert (2006-03-22): -# From 1997 on the CIA map shows Danmarkshavn on GMT; -# the 1995 map as like Godthåb. -# For lack of better info, assume they were like Godthåb before 1996. -# startkart.no says Thule does not observe DST, but this is clearly an error, -# so go with Shanks & Pottenger for Thule transitions until this year. -# For 2007 on assume Thule will stay in sync with US DST rules. - -# From J William Piggott (2016-02-20): -# "Greenland north of the community of Scoresbysund" is officially named -# "National Park" by Executive Order: -# http://naalakkersuisut.gl/~/media/Nanoq/Files/Attached%20Files/Engelske-tekster/Legislation/Executive%20Order%20National%20Park.rtf -# It is their only National Park. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Thule 1991 1992 - Mar lastSun 2:00 1:00 D -Rule Thule 1991 1992 - Sep lastSun 2:00 0 S -Rule Thule 1993 2006 - Apr Sun>=1 2:00 1:00 D -Rule Thule 1993 2006 - Oct lastSun 2:00 0 S -Rule Thule 2007 max - Mar Sun>=8 2:00 1:00 D -Rule Thule 2007 max - Nov Sun>=1 2:00 0 S -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Danmarkshavn -1:14:40 - LMT 1916 Jul 28 - -3:00 - -03 1980 Apr 6 2:00 - -3:00 EU -03/-02 1996 - 0:00 - GMT -Zone America/Scoresbysund -1:27:52 - LMT 1916 Jul 28 # Ittoqqortoormiit - -2:00 - -02 1980 Apr 6 2:00 - -2:00 C-Eur -02/-01 1981 Mar 29 - -1:00 EU -01/+00 -Zone America/Godthab -3:26:56 - LMT 1916 Jul 28 # Nuuk - -3:00 - -03 1980 Apr 6 2:00 - -3:00 EU -03/-02 -Zone America/Thule -4:35:08 - LMT 1916 Jul 28 # Pituffik air base - -4:00 Thule A%sT - -# Estonia -# -# From Paul Eggert (2016-03-18): -# The 1989 transition is from USSR act No. 227 (1989-03-14). -# -# From Peter Ilieve (1994-10-15): -# A relative in Tallinn confirms the accuracy of the data for 1989 onwards -# [through 1994] and gives the legal authority for it, -# a regulation of the Government of Estonia, No. 111 of 1989.... -# -# From Peter Ilieve (1996-10-28): -# [IATA SSIM (1992/1996) claims that the Baltic republics switch at 01:00s, -# but a relative confirms that Estonia still switches at 02:00s, writing:] -# "I do not [know] exactly but there are some little different -# (confusing) rules for International Air and Railway Transport Schedules -# conversion in Sunday connected with end of summer time in Estonia.... -# A discussion is running about the summer time efficiency and effect on -# human physiology. It seems that Estonia maybe will not change to -# summer time next spring." - -# From Peter Ilieve (1998-11-04), heavily edited: -# The 1998-09-22 Estonian time law -# http://trip.rk.ee/cgi-bin/thw?${BASE}=akt&${OOHTML}=rtd&TA=1998&TO=1&AN=1390 -# refers to the Eighth Directive and cites the association agreement between -# the EU and Estonia, ratified by the Estonian law (RT II 1995, 22-27, 120). -# -# I also asked [my relative] whether they use any standard abbreviation -# for their standard and summer times. He says no, they use "suveaeg" -# (summer time) and "talveaeg" (winter time). - -# From The Baltic Times (1999-09-09) -# via Steffen Thorsen: -# This year will mark the last time Estonia shifts to summer time, -# a council of the ruling coalition announced Sept. 6.... -# But what this could mean for Estonia's chances of joining the European -# Union are still unclear. In 1994, the EU declared summer time compulsory -# for all member states until 2001. Brussels has yet to decide what to do -# after that. - -# From Mart Oruaas (2000-01-29): -# Regulation No. 301 (1999-10-12) obsoletes previous regulation -# No. 206 (1998-09-22) and thus sticks Estonia to +02:00 GMT for all -# the year round. The regulation is effective 1999-11-01. - -# From Toomas Soome (2002-02-21): -# The Estonian government has changed once again timezone politics. -# Now we are using again EU rules. -# -# From Urmet Jänes (2002-03-28): -# The legislative reference is Government decree No. 84 on 2002-02-21. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Tallinn 1:39:00 - LMT 1880 - 1:39:00 - TMT 1918 Feb # Tallinn Mean Time - 1:00 C-Eur CE%sT 1919 Jul - 1:39:00 - TMT 1921 May - 2:00 - EET 1940 Aug 6 - 3:00 - MSK 1941 Sep 15 - 1:00 C-Eur CE%sT 1944 Sep 22 - 3:00 Russia MSK/MSD 1989 Mar 26 2:00s - 2:00 1:00 EEST 1989 Sep 24 2:00s - 2:00 C-Eur EE%sT 1998 Sep 22 - 2:00 EU EE%sT 1999 Oct 31 4:00 - 2:00 - EET 2002 Feb 21 - 2:00 EU EE%sT - -# Finland - -# From Hannu Strang (1994-09-25 06:03:37 UTC): -# Well, here in Helsinki we're just changing from summer time to regular one, -# and it's supposed to change at 4am... - -# From Janne Snabb (2010-07-15): -# -# I noticed that the Finland data is not accurate for years 1981 and 1982. -# During these two first trial years the DST adjustment was made one hour -# earlier than in forthcoming years. Starting 1983 the adjustment was made -# according to the central European standards. -# -# This is documented in Heikki Oja: Aikakirja 2007, published by The Almanac -# Office of University of Helsinki, ISBN 952-10-3221-9, available online (in -# Finnish) at -# http://almanakka.helsinki.fi/aikakirja/Aikakirja2007kokonaan.pdf -# -# Page 105 (56 in PDF version) has a handy table of all past daylight savings -# transitions. It is easy enough to interpret without Finnish skills. -# -# This is also confirmed by Finnish Broadcasting Company's archive at: -# http://www.yle.fi/elavaarkisto/?s=s&g=1&ag=5&t=&a=3401 -# -# The news clip from 1981 says that "the time between 2 and 3 o'clock does not -# exist tonight." - -# From Konstantin Hyppönen (2014-06-13): -# [Heikki Oja's book Aikakirja 2013] -# http://almanakka.helsinki.fi/images/aikakirja/Aikakirja2013kokonaan.pdf -# pages 104-105, including a scan from a newspaper published on Apr 2 1942 -# say that ... [o]n Apr 2 1942, 24 o'clock (which means Apr 3 1942, -# 00:00), clocks were moved one hour forward. The newspaper -# mentions "on the night from Thursday to Friday".... -# On Oct 4 1942, clocks were moved at 1:00 one hour backwards. -# -# From Paul Eggert (2014-06-14): -# Go with Oja over Shanks. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Finland 1942 only - Apr 2 24:00 1:00 S -Rule Finland 1942 only - Oct 4 1:00 0 - -Rule Finland 1981 1982 - Mar lastSun 2:00 1:00 S -Rule Finland 1981 1982 - Sep lastSun 3:00 0 - - -# Milne says Helsinki (Helsingfors) time was 1:39:49.2 (official document); -# round to nearest. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Helsinki 1:39:49 - LMT 1878 May 31 - 1:39:49 - HMT 1921 May # Helsinki Mean Time - 2:00 Finland EE%sT 1983 - 2:00 EU EE%sT - -# Åland Is -Link Europe/Helsinki Europe/Mariehamn - - -# France - -# From Ciro Discepolo (2000-12-20): -# -# Henri Le Corre, Régimes horaires pour le monde entier, Éditions -# Traditionnelles - Paris 2 books, 1993 -# -# Gabriel, Traité de l'heure dans le monde, Guy Trédaniel, -# Paris, 1991 -# -# Françoise Gauquelin, Problèmes de l'heure résolus en astrologie, -# Guy Trédaniel, Paris 1987 - - -# -# Shank & Pottenger seem to use '24:00' ambiguously; resolve it with Whitman. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule France 1916 only - Jun 14 23:00s 1:00 S -Rule France 1916 1919 - Oct Sun>=1 23:00s 0 - -Rule France 1917 only - Mar 24 23:00s 1:00 S -Rule France 1918 only - Mar 9 23:00s 1:00 S -Rule France 1919 only - Mar 1 23:00s 1:00 S -Rule France 1920 only - Feb 14 23:00s 1:00 S -Rule France 1920 only - Oct 23 23:00s 0 - -Rule France 1921 only - Mar 14 23:00s 1:00 S -Rule France 1921 only - Oct 25 23:00s 0 - -Rule France 1922 only - Mar 25 23:00s 1:00 S -# DSH writes that a law of 1923-05-24 specified 3rd Sat in Apr at 23:00 to 1st -# Sat in Oct at 24:00; and that in 1930, because of Easter, the transitions -# were Apr 12 and Oct 5. Go with Shanks & Pottenger. -Rule France 1922 1938 - Oct Sat>=1 23:00s 0 - -Rule France 1923 only - May 26 23:00s 1:00 S -Rule France 1924 only - Mar 29 23:00s 1:00 S -Rule France 1925 only - Apr 4 23:00s 1:00 S -Rule France 1926 only - Apr 17 23:00s 1:00 S -Rule France 1927 only - Apr 9 23:00s 1:00 S -Rule France 1928 only - Apr 14 23:00s 1:00 S -Rule France 1929 only - Apr 20 23:00s 1:00 S -Rule France 1930 only - Apr 12 23:00s 1:00 S -Rule France 1931 only - Apr 18 23:00s 1:00 S -Rule France 1932 only - Apr 2 23:00s 1:00 S -Rule France 1933 only - Mar 25 23:00s 1:00 S -Rule France 1934 only - Apr 7 23:00s 1:00 S -Rule France 1935 only - Mar 30 23:00s 1:00 S -Rule France 1936 only - Apr 18 23:00s 1:00 S -Rule France 1937 only - Apr 3 23:00s 1:00 S -Rule France 1938 only - Mar 26 23:00s 1:00 S -Rule France 1939 only - Apr 15 23:00s 1:00 S -Rule France 1939 only - Nov 18 23:00s 0 - -Rule France 1940 only - Feb 25 2:00 1:00 S -# The French rules for 1941-1944 were not used in Paris, but Shanks & Pottenger -# write that they were used in Monaco and in many French locations. -# Le Corre writes that the upper limit of the free zone was Arnéguy, Orthez, -# Mont-de-Marsan, Bazas, Langon, Lamothe-Montravel, Marœuil, La -# Rochefoucauld, Champagne-Mouton, La Roche-Posay, La Haye-Descartes, -# Loches, Montrichard, Vierzon, Bourges, Moulins, Digoin, -# Paray-le-Monial, Montceau-les-Mines, Chalon-sur-Saône, Arbois, -# Dole, Morez, St-Claude, and Collonges (Haute-Savoie). -Rule France 1941 only - May 5 0:00 2:00 M # Midsummer -# Shanks & Pottenger say this transition occurred at Oct 6 1:00, -# but go with Denis Excoffier (1997-12-12), -# who quotes the Ephémérides astronomiques for 1998 from Bureau des Longitudes -# as saying 5/10/41 22hUT. -Rule France 1941 only - Oct 6 0:00 1:00 S -Rule France 1942 only - Mar 9 0:00 2:00 M -Rule France 1942 only - Nov 2 3:00 1:00 S -Rule France 1943 only - Mar 29 2:00 2:00 M -Rule France 1943 only - Oct 4 3:00 1:00 S -Rule France 1944 only - Apr 3 2:00 2:00 M -Rule France 1944 only - Oct 8 1:00 1:00 S -Rule France 1945 only - Apr 2 2:00 2:00 M -Rule France 1945 only - Sep 16 3:00 0 - -# Shanks & Pottenger give Mar 28 2:00 and Sep 26 3:00; -# go with Excoffier's 28/3/76 0hUT and 25/9/76 23hUT. -Rule France 1976 only - Mar 28 1:00 1:00 S -Rule France 1976 only - Sep 26 1:00 0 - -# Shanks & Pottenger give 0:09:20 for Paris Mean Time, and Whitman 0:09:05, -# but Howse quotes the actual French legislation as saying 0:09:21. -# Go with Howse. Howse writes that the time in France was officially based -# on PMT-0:09:21 until 1978-08-09, when the time base finally switched to UTC. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Paris 0:09:21 - LMT 1891 Mar 15 0:01 - 0:09:21 - PMT 1911 Mar 11 0:01 # Paris MT -# Shanks & Pottenger give 1940 Jun 14 0:00; go with Excoffier and Le Corre. - 0:00 France WE%sT 1940 Jun 14 23:00 -# Le Corre says Paris stuck with occupied-France time after the liberation; -# go with Shanks & Pottenger. - 1:00 C-Eur CE%sT 1944 Aug 25 - 0:00 France WE%sT 1945 Sep 16 3:00 - 1:00 France CE%sT 1977 - 1:00 EU CE%sT - -# Germany - -# From Markus Kuhn (1998-09-29): -# The German time zone web site by the Physikalisch-Technische -# Bundesanstalt contains DST information back to 1916. -# [See tz-link.htm for the URL.] - -# From Jörg Schilling (2002-10-23): -# In 1945, Berlin was switched to Moscow Summer time (GMT+4) by -# http://www.dhm.de/lemo/html/biografien/BersarinNikolai/ -# General [Nikolai] Bersarin. - -# From Paul Eggert (2003-03-08): -# http://www.parlament-berlin.de/pds-fraktion.nsf/727459127c8b66ee8525662300459099/defc77cb784f180ac1256c2b0030274b/$FILE/bersarint.pdf -# says that Bersarin issued an order to use Moscow time on May 20. -# However, Moscow did not observe daylight saving in 1945, so -# this was equivalent to UT +03, not +04. - - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Germany 1946 only - Apr 14 2:00s 1:00 S -Rule Germany 1946 only - Oct 7 2:00s 0 - -Rule Germany 1947 1949 - Oct Sun>=1 2:00s 0 - -# http://www.ptb.de/de/org/4/44/441/salt.htm says the following transition -# occurred at 3:00 MEZ, not the 2:00 MEZ given in Shanks & Pottenger. -# Go with the PTB. -Rule Germany 1947 only - Apr 6 3:00s 1:00 S -Rule Germany 1947 only - May 11 2:00s 2:00 M -Rule Germany 1947 only - Jun 29 3:00 1:00 S -Rule Germany 1948 only - Apr 18 2:00s 1:00 S -Rule Germany 1949 only - Apr 10 2:00s 1:00 S - -Rule SovietZone 1945 only - May 24 2:00 2:00 M # Midsummer -Rule SovietZone 1945 only - Sep 24 3:00 1:00 S -Rule SovietZone 1945 only - Nov 18 2:00s 0 - - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Berlin 0:53:28 - LMT 1893 Apr - 1:00 C-Eur CE%sT 1945 May 24 2:00 - 1:00 SovietZone CE%sT 1946 - 1:00 Germany CE%sT 1980 - 1:00 EU CE%sT - -# From Tobias Conradi (2011-09-12): -# Büsingen , surrounded by the Swiss canton -# Schaffhausen, did not start observing DST in 1980 as the rest of DE -# (West Germany at that time) and DD (East Germany at that time) did. -# DD merged into DE, the area is currently covered by code DE in ISO 3166-1, -# which in turn is covered by the zone Europe/Berlin. -# -# Source for the time in Büsingen 1980: -# http://www.srf.ch/player/video?id=c012c029-03b7-4c2b-9164-aa5902cd58d3 - -# From Arthur David Olson (2012-03-03): -# Büsingen and Zurich have shared clocks since 1970. - -Link Europe/Zurich Europe/Busingen - -# Georgia -# Please see the "asia" file for Asia/Tbilisi. -# Herodotus (Histories, IV.45) says Georgia north of the Phasis (now Rioni) -# is in Europe. Our reference location Tbilisi is in the Asian part. - -# Gibraltar -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Gibraltar -0:21:24 - LMT 1880 Aug 2 0:00s - 0:00 GB-Eire %s 1957 Apr 14 2:00 - 1:00 - CET 1982 - 1:00 EU CE%sT - -# Greece -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -# Whitman gives 1932 Jul 5 - Nov 1; go with Shanks & Pottenger. -Rule Greece 1932 only - Jul 7 0:00 1:00 S -Rule Greece 1932 only - Sep 1 0:00 0 - -# Whitman gives 1941 Apr 25 - ?; go with Shanks & Pottenger. -Rule Greece 1941 only - Apr 7 0:00 1:00 S -# Whitman gives 1942 Feb 2 - ?; go with Shanks & Pottenger. -Rule Greece 1942 only - Nov 2 3:00 0 - -Rule Greece 1943 only - Mar 30 0:00 1:00 S -Rule Greece 1943 only - Oct 4 0:00 0 - -# Whitman gives 1944 Oct 3 - Oct 31; go with Shanks & Pottenger. -Rule Greece 1952 only - Jul 1 0:00 1:00 S -Rule Greece 1952 only - Nov 2 0:00 0 - -Rule Greece 1975 only - Apr 12 0:00s 1:00 S -Rule Greece 1975 only - Nov 26 0:00s 0 - -Rule Greece 1976 only - Apr 11 2:00s 1:00 S -Rule Greece 1976 only - Oct 10 2:00s 0 - -Rule Greece 1977 1978 - Apr Sun>=1 2:00s 1:00 S -Rule Greece 1977 only - Sep 26 2:00s 0 - -Rule Greece 1978 only - Sep 24 4:00 0 - -Rule Greece 1979 only - Apr 1 9:00 1:00 S -Rule Greece 1979 only - Sep 29 2:00 0 - -Rule Greece 1980 only - Apr 1 0:00 1:00 S -Rule Greece 1980 only - Sep 28 0:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Athens 1:34:52 - LMT 1895 Sep 14 - 1:34:52 - AMT 1916 Jul 28 0:01 # Athens MT - 2:00 Greece EE%sT 1941 Apr 30 - 1:00 Greece CE%sT 1944 Apr 4 - 2:00 Greece EE%sT 1981 - # Shanks & Pottenger say it switched to C-Eur in 1981; - # go with EU instead, since Greece joined it on Jan 1. - 2:00 EU EE%sT - -# Hungary -# From Paul Eggert (2014-07-15): -# Dates for 1916-1945 are taken from: -# Oross A. Jelen a múlt jövője: a nyári időszámítás Magyarországon 1916-1945. -# National Archives of Hungary (2012-10-29). -# http://mnl.gov.hu/a_het_dokumentuma/a_nyari_idoszamitas_magyarorszagon_19161945.html -# This source does not always give times, which are taken from Shanks -# & Pottenger (which disagree about the dates). -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Hungary 1918 only - Apr 1 3:00 1:00 S -Rule Hungary 1918 only - Sep 16 3:00 0 - -Rule Hungary 1919 only - Apr 15 3:00 1:00 S -Rule Hungary 1919 only - Nov 24 3:00 0 - -Rule Hungary 1945 only - May 1 23:00 1:00 S -Rule Hungary 1945 only - Nov 1 0:00 0 - -Rule Hungary 1946 only - Mar 31 2:00s 1:00 S -Rule Hungary 1946 1949 - Oct Sun>=1 2:00s 0 - -Rule Hungary 1947 1949 - Apr Sun>=4 2:00s 1:00 S -Rule Hungary 1950 only - Apr 17 2:00s 1:00 S -Rule Hungary 1950 only - Oct 23 2:00s 0 - -Rule Hungary 1954 1955 - May 23 0:00 1:00 S -Rule Hungary 1954 1955 - Oct 3 0:00 0 - -Rule Hungary 1956 only - Jun Sun>=1 0:00 1:00 S -Rule Hungary 1956 only - Sep lastSun 0:00 0 - -Rule Hungary 1957 only - Jun Sun>=1 1:00 1:00 S -Rule Hungary 1957 only - Sep lastSun 3:00 0 - -Rule Hungary 1980 only - Apr 6 1:00 1:00 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Budapest 1:16:20 - LMT 1890 Oct - 1:00 C-Eur CE%sT 1918 - 1:00 Hungary CE%sT 1941 Apr 8 - 1:00 C-Eur CE%sT 1945 - 1:00 Hungary CE%sT 1980 Sep 28 2:00s - 1:00 EU CE%sT - -# Iceland -# -# From Adam David (1993-11-06): -# The name of the timezone in Iceland for system / mail / news purposes is GMT. -# -# (1993-12-05): -# This material is paraphrased from the 1988 edition of the University of -# Iceland Almanak. -# -# From January 1st, 1908 the whole of Iceland was standardised at 1 hour -# behind GMT. Previously, local mean solar time was used in different parts -# of Iceland, the almanak had been based on Reykjavik mean solar time which -# was 1 hour and 28 minutes behind GMT. -# -# "first day of winter" referred to [below] means the first day of the 26 weeks -# of winter, according to the old icelandic calendar that dates back to the -# time the norsemen first settled Iceland. The first day of winter is always -# Saturday, but is not dependent on the Julian or Gregorian calendars. -# -# (1993-12-10): -# I have a reference from the Oxford Icelandic-English dictionary for the -# beginning of winter, which ties it to the ecclesiastical calendar (and thus -# to the julian/gregorian calendar) over the period in question. -# the winter begins on the Saturday next before St. Luke's day -# (old style), or on St. Luke's day, if a Saturday. -# St. Luke's day ought to be traceable from ecclesiastical sources. "old style" -# might be a reference to the Julian calendar as opposed to Gregorian, or it -# might mean something else (???). -# -# From Paul Eggert (2014-11-22): -# The information below is taken from the 1988 Almanak; see -# http://www.almanak.hi.is/klukkan.html -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Iceland 1917 1919 - Feb 19 23:00 1:00 S -Rule Iceland 1917 only - Oct 21 1:00 0 - -Rule Iceland 1918 1919 - Nov 16 1:00 0 - -Rule Iceland 1921 only - Mar 19 23:00 1:00 S -Rule Iceland 1921 only - Jun 23 1:00 0 - -Rule Iceland 1939 only - Apr 29 23:00 1:00 S -Rule Iceland 1939 only - Oct 29 2:00 0 - -Rule Iceland 1940 only - Feb 25 2:00 1:00 S -Rule Iceland 1940 1941 - Nov Sun>=2 1:00s 0 - -Rule Iceland 1941 1942 - Mar Sun>=2 1:00s 1:00 S -# 1943-1946 - first Sunday in March until first Sunday in winter -Rule Iceland 1943 1946 - Mar Sun>=1 1:00s 1:00 S -Rule Iceland 1942 1948 - Oct Sun>=22 1:00s 0 - -# 1947-1967 - first Sunday in April until first Sunday in winter -Rule Iceland 1947 1967 - Apr Sun>=1 1:00s 1:00 S -# 1949 and 1967 Oct transitions delayed by 1 week -Rule Iceland 1949 only - Oct 30 1:00s 0 - -Rule Iceland 1950 1966 - Oct Sun>=22 1:00s 0 - -Rule Iceland 1967 only - Oct 29 1:00s 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Atlantic/Reykjavik -1:28 - LMT 1908 - -1:00 Iceland -01/+00 1968 Apr 7 1:00s - 0:00 - GMT - -# Italy -# -# From Paul Eggert (2001-03-06): -# Sicily and Sardinia each had their own time zones from 1866 to 1893, -# called Palermo Time (+00:53:28) and Cagliari Time (+00:36:32). -# During World War II, German-controlled Italy used German time. -# But these events all occurred before the 1970 cutoff, -# so record only the time in Rome. -# -# From Michael Deckers (2016-10-24): -# http://www.ac-ilsestante.it/MERIDIANE/ora_legale quotes a law of 1893-08-10 -# ... [translated as] "The preceding dispositions will enter into -# force at the instant at which, according to the time specified in -# the 1st article, the 1st of November 1893 will begin...." -# -# From Pierpaolo Bernardi (2016-10-20): -# The authoritative source for time in Italy is the national metrological -# institute, which has a summary page of historical DST data at -# http://www.inrim.it/res/tf/ora_legale_i.shtml -# (2016-10-24): -# http://www.renzobaldini.it/le-ore-legali-in-italia/ -# has still different data for 1944. It divides Italy in two, as -# there were effectively two governments at the time, north of Gothic -# Line German controlled territory, official government RSI, and south -# of the Gothic Line, controlled by allied armies. -# -# From Brian Inglis (2016-10-23): -# Viceregal LEGISLATIVE DECREE. 14 September 1944, no. 219. -# Restoration of Standard Time. (044U0219) (OJ 62 of 30.9.1944) ... -# Given the R. law decreed on 1944-03-29, no. 92, by which standard time is -# advanced to sixty minutes later starting at hour two on 1944-04-02; ... -# Starting at hour three on the date 1944-09-17 standard time will be resumed. -# -# From Paul Eggert (2016-10-27): -# Go with INRiM for DST rules, except as corrected by Inglis for 1944 -# for the Kingdom of Italy. This is consistent with Renzo Baldini. -# Model Rome's occupation by using using C-Eur rules from 1943-09-10 -# to 1944-06-04; although Rome was an open city during this period, it -# was effectively controlled by Germany. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Italy 1916 only - Jun 3 24:00 1:00 S -Rule Italy 1916 1917 - Sep 30 24:00 0 - -Rule Italy 1917 only - Mar 31 24:00 1:00 S -Rule Italy 1918 only - Mar 9 24:00 1:00 S -Rule Italy 1918 only - Oct 6 24:00 0 - -Rule Italy 1919 only - Mar 1 24:00 1:00 S -Rule Italy 1919 only - Oct 4 24:00 0 - -Rule Italy 1920 only - Mar 20 24:00 1:00 S -Rule Italy 1920 only - Sep 18 24:00 0 - -Rule Italy 1940 only - Jun 14 24:00 1:00 S -Rule Italy 1942 only - Nov 2 2:00s 0 - -Rule Italy 1943 only - Mar 29 2:00s 1:00 S -Rule Italy 1943 only - Oct 4 2:00s 0 - -Rule Italy 1944 only - Apr 2 2:00s 1:00 S -Rule Italy 1944 only - Sep 17 2:00s 0 - -Rule Italy 1945 only - Apr 2 2:00 1:00 S -Rule Italy 1945 only - Sep 15 1:00 0 - -Rule Italy 1946 only - Mar 17 2:00s 1:00 S -Rule Italy 1946 only - Oct 6 2:00s 0 - -Rule Italy 1947 only - Mar 16 0:00s 1:00 S -Rule Italy 1947 only - Oct 5 0:00s 0 - -Rule Italy 1948 only - Feb 29 2:00s 1:00 S -Rule Italy 1948 only - Oct 3 2:00s 0 - -Rule Italy 1966 1968 - May Sun>=22 0:00s 1:00 S -Rule Italy 1966 only - Sep 24 24:00 0 - -Rule Italy 1967 1969 - Sep Sun>=22 0:00s 0 - -Rule Italy 1969 only - Jun 1 0:00s 1:00 S -Rule Italy 1970 only - May 31 0:00s 1:00 S -Rule Italy 1970 only - Sep lastSun 0:00s 0 - -Rule Italy 1971 1972 - May Sun>=22 0:00s 1:00 S -Rule Italy 1971 only - Sep lastSun 0:00s 0 - -Rule Italy 1972 only - Oct 1 0:00s 0 - -Rule Italy 1973 only - Jun 3 0:00s 1:00 S -Rule Italy 1973 1974 - Sep lastSun 0:00s 0 - -Rule Italy 1974 only - May 26 0:00s 1:00 S -Rule Italy 1975 only - Jun 1 0:00s 1:00 S -Rule Italy 1975 1977 - Sep lastSun 0:00s 0 - -Rule Italy 1976 only - May 30 0:00s 1:00 S -Rule Italy 1977 1979 - May Sun>=22 0:00s 1:00 S -Rule Italy 1978 only - Oct 1 0:00s 0 - -Rule Italy 1979 only - Sep 30 0:00s 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Rome 0:49:56 - LMT 1866 Sep 22 - 0:49:56 - RMT 1893 Oct 31 23:49:56 # Rome Mean - 1:00 Italy CE%sT 1943 Sep 10 - 1:00 C-Eur CE%sT 1944 Jun 4 - 1:00 Italy CE%sT 1980 - 1:00 EU CE%sT - -Link Europe/Rome Europe/Vatican -Link Europe/Rome Europe/San_Marino - -# Latvia - -# From Liene Kanepe (1998-09-17): - -# I asked about this matter Scientific Secretary of the Institute of Astronomy -# of The University of Latvia Dr. paed Mr. Ilgonis Vilks. I also searched the -# correct data in juridical acts and I found some juridical documents about -# changes in the counting of time in Latvia from 1981.... -# -# Act No. 35 of the Council of Ministers of Latvian SSR of 1981-01-22 ... -# according to the Act No. 925 of the Council of Ministers of USSR of 1980-10-24 -# ...: all year round the time of 2nd time zone + 1 hour, in addition turning -# the hands of the clock 1 hour forward on 1 April at 00:00 (GMT 31 March 21:00) -# and 1 hour backward on the 1 October at 00:00 (GMT 30 September 20:00). -# -# Act No. 592 of the Council of Ministers of Latvian SSR of 1984-09-24 ... -# according to the Act No. 967 of the Council of Ministers of USSR of 1984-09-13 -# ...: all year round the time of 2nd time zone + 1 hour, in addition turning -# the hands of the clock 1 hour forward on the last Sunday of March at 02:00 -# (GMT 23:00 on the previous day) and 1 hour backward on the last Sunday of -# September at 03:00 (GMT 23:00 on the previous day). -# -# Act No. 81 of the Council of Ministers of Latvian SSR of 1989-03-22 ... -# according to the Act No. 227 of the Council of Ministers of USSR of 1989-03-14 -# ...: since the last Sunday of March 1989 in Lithuanian SSR, Latvian SSR, -# Estonian SSR and Kaliningrad region of Russian Federation all year round the -# time of 2nd time zone (Moscow time minus one hour). On the territory of Latvia -# transition to summer time is performed on the last Sunday of March at 02:00 -# (GMT 00:00), turning the hands of the clock 1 hour forward. The end of -# daylight saving time is performed on the last Sunday of September at 03:00 -# (GMT 00:00), turning the hands of the clock 1 hour backward. Exception is -# 1989-03-26, when we must not turn the hands of the clock.... -# -# The Regulations of the Cabinet of Ministers of the Republic of Latvia of -# 1997-01-21 on transition to Summer time ... established the same order of -# daylight savings time settings as in the States of the European Union. - -# From Andrei Ivanov (2000-03-06): -# This year Latvia will not switch to Daylight Savings Time (as specified in -# The Regulations of the Cabinet of Ministers of the Rep. of Latvia of -# 29-Feb-2000 (No. 79) , -# in Latvian for subscribers only). - -# From RFE/RL Newsline -# http://www.rferl.org/newsline/2001/01/3-CEE/cee-030101.html -# (2001-01-03), noted after a heads-up by Rives McDow: -# The Latvian government on 2 January decided that the country will -# institute daylight-saving time this spring, LETA reported. -# Last February the three Baltic states decided not to turn back their -# clocks one hour in the spring.... -# Minister of Economy Aigars Kalvītis noted that Latvia had too few -# daylight hours and thus decided to comply with a draft European -# Commission directive that provides for instituting daylight-saving -# time in EU countries between 2002 and 2006. The Latvian government -# urged Lithuania and Estonia to adopt a similar time policy, but it -# appears that they will not do so.... - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Latvia 1989 1996 - Mar lastSun 2:00s 1:00 S -Rule Latvia 1989 1996 - Sep lastSun 2:00s 0 - - -# Milne 1899 says Riga was 1:36:28 (Polytechnique House time). -# Byalokoz 1919 says Latvia was 1:36:34. -# Go with Byalokoz. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Riga 1:36:34 - LMT 1880 - 1:36:34 - RMT 1918 Apr 15 2:00 # Riga MT - 1:36:34 1:00 LST 1918 Sep 16 3:00 # Latvian ST - 1:36:34 - RMT 1919 Apr 1 2:00 - 1:36:34 1:00 LST 1919 May 22 3:00 - 1:36:34 - RMT 1926 May 11 - 2:00 - EET 1940 Aug 5 - 3:00 - MSK 1941 Jul - 1:00 C-Eur CE%sT 1944 Oct 13 - 3:00 Russia MSK/MSD 1989 Mar lastSun 2:00s - 2:00 1:00 EEST 1989 Sep lastSun 2:00s - 2:00 Latvia EE%sT 1997 Jan 21 - 2:00 EU EE%sT 2000 Feb 29 - 2:00 - EET 2001 Jan 2 - 2:00 EU EE%sT - -# Liechtenstein - -# From Paul Eggert (2013-09-09): -# Shanks & Pottenger say Vaduz is like Zurich. - -# From Alois Treindl (2013-09-18): -# http://www.eliechtensteinensia.li/LIJ/1978/1938-1978/1941.pdf -# ... confirms on p. 6 that Liechtenstein followed Switzerland in 1941 and 1942. -# I ... translate only the last two paragraphs: -# ... during second world war, in the years 1941 and 1942, Liechtenstein -# introduced daylight saving time, adapting to Switzerland. From 1943 on -# central European time was in force throughout the year. -# From a report of the duke's government to the high council, -# regarding the introduction of a time law, of 31 May 1977. - -Link Europe/Zurich Europe/Vaduz - - -# Lithuania - -# From Paul Eggert (2016-03-18): -# The 1989 transition is from USSR act No. 227 (1989-03-14). - -# From Paul Eggert (1996-11-22): -# IATA SSIM (1992/1996) says Lithuania uses W-Eur rules, but since it is -# known to be wrong about Estonia and Latvia, assume it's wrong here too. - -# From Marius Gedminas (1998-08-07): -# I would like to inform that in this year Lithuanian time zone -# (Europe/Vilnius) was changed. - -# From ELTA No. 972 (2582) (1999-09-29) , -# via Steffen Thorsen: -# Lithuania has shifted back to the second time zone (GMT plus two hours) -# to be valid here starting from October 31, -# as decided by the national government on Wednesday.... -# The Lithuanian government also announced plans to consider a -# motion to give up shifting to summer time in spring, as it was -# already done by Estonia. - -# From the Fact File, Lithuanian State Department of Tourism -# (2000-03-27): -# Local time is GMT+2 hours ..., no daylight saving. - -# From a user via Klaus Marten (2003-02-07): -# As a candidate for membership of the European Union, Lithuania will -# observe Summer Time in 2003, changing its clocks at the times laid -# down in EU Directive 2000/84 of 19.I.01 (i.e. at the same times as its -# neighbour Latvia). The text of the Lithuanian government Order of -# 7.XI.02 to this effect can be found at -# http://www.lrvk.lt/nut/11/n1749.htm - - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Vilnius 1:41:16 - LMT 1880 - 1:24:00 - WMT 1917 # Warsaw Mean Time - 1:35:36 - KMT 1919 Oct 10 # Kaunas Mean Time - 1:00 - CET 1920 Jul 12 - 2:00 - EET 1920 Oct 9 - 1:00 - CET 1940 Aug 3 - 3:00 - MSK 1941 Jun 24 - 1:00 C-Eur CE%sT 1944 Aug - 3:00 Russia MSK/MSD 1989 Mar 26 2:00s - 2:00 Russia EE%sT 1991 Sep 29 2:00s - 2:00 C-Eur EE%sT 1998 - 2:00 - EET 1998 Mar 29 1:00u - 1:00 EU CE%sT 1999 Oct 31 1:00u - 2:00 - EET 2003 Jan 1 - 2:00 EU EE%sT - -# Luxembourg -# Whitman disagrees with most of these dates in minor ways; -# go with Shanks & Pottenger. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Lux 1916 only - May 14 23:00 1:00 S -Rule Lux 1916 only - Oct 1 1:00 0 - -Rule Lux 1917 only - Apr 28 23:00 1:00 S -Rule Lux 1917 only - Sep 17 1:00 0 - -Rule Lux 1918 only - Apr Mon>=15 2:00s 1:00 S -Rule Lux 1918 only - Sep Mon>=15 2:00s 0 - -Rule Lux 1919 only - Mar 1 23:00 1:00 S -Rule Lux 1919 only - Oct 5 3:00 0 - -Rule Lux 1920 only - Feb 14 23:00 1:00 S -Rule Lux 1920 only - Oct 24 2:00 0 - -Rule Lux 1921 only - Mar 14 23:00 1:00 S -Rule Lux 1921 only - Oct 26 2:00 0 - -Rule Lux 1922 only - Mar 25 23:00 1:00 S -Rule Lux 1922 only - Oct Sun>=2 1:00 0 - -Rule Lux 1923 only - Apr 21 23:00 1:00 S -Rule Lux 1923 only - Oct Sun>=2 2:00 0 - -Rule Lux 1924 only - Mar 29 23:00 1:00 S -Rule Lux 1924 1928 - Oct Sun>=2 1:00 0 - -Rule Lux 1925 only - Apr 5 23:00 1:00 S -Rule Lux 1926 only - Apr 17 23:00 1:00 S -Rule Lux 1927 only - Apr 9 23:00 1:00 S -Rule Lux 1928 only - Apr 14 23:00 1:00 S -Rule Lux 1929 only - Apr 20 23:00 1:00 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Luxembourg 0:24:36 - LMT 1904 Jun - 1:00 Lux CE%sT 1918 Nov 25 - 0:00 Lux WE%sT 1929 Oct 6 2:00s - 0:00 Belgium WE%sT 1940 May 14 3:00 - 1:00 C-Eur WE%sT 1944 Sep 18 3:00 - 1:00 Belgium CE%sT 1977 - 1:00 EU CE%sT - -# Macedonia -# See Europe/Belgrade. - -# Malta -# -# From Paul Eggert (2016-10-21): -# Assume 1900-1972 was like Rome, overriding Shanks. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Malta 1973 only - Mar 31 0:00s 1:00 S -Rule Malta 1973 only - Sep 29 0:00s 0 - -Rule Malta 1974 only - Apr 21 0:00s 1:00 S -Rule Malta 1974 only - Sep 16 0:00s 0 - -Rule Malta 1975 1979 - Apr Sun>=15 2:00 1:00 S -Rule Malta 1975 1980 - Sep Sun>=15 2:00 0 - -Rule Malta 1980 only - Mar 31 2:00 1:00 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Malta 0:58:04 - LMT 1893 Nov 2 0:00s # Valletta - 1:00 Italy CE%sT 1973 Mar 31 - 1:00 Malta CE%sT 1981 - 1:00 EU CE%sT - -# Moldova - -# From Stepan Golosunov (2016-03-07): -# the act of the government of the Republic of Moldova Nr. 132 from 1990-05-04 -# http://lex.justice.md/viewdoc.php?action=view&view=doc&id=298782&lang=2 -# ... says that since 1990-05-06 on the territory of the Moldavian SSR -# time would be calculated as the standard time of the second time belt -# plus one hour of the "summer" time. To implement that clocks would be -# adjusted one hour backwards at 1990-05-06 2:00. After that "summer" -# time would be cancelled last Sunday of September at 3:00 and -# reintroduced last Sunday of March at 2:00. - -# From Paul Eggert (2006-03-22): -# A previous version of this database followed Shanks & Pottenger, who write -# that Tiraspol switched to Moscow time on 1992-01-19 at 02:00. -# However, this is most likely an error, as Moldova declared independence -# on 1991-08-27 (the 1992-01-19 date is that of a Russian decree). -# In early 1992 there was large-scale interethnic violence in the area -# and it's possible that some Russophones continued to observe Moscow time. -# But [two people] separately reported via -# Jesper Nørgaard that as of 2001-01-24 Tiraspol was like Chisinau. -# The Tiraspol entry has therefore been removed for now. -# -# From Alexander Krivenyshev (2011-10-17): -# Pridnestrovian Moldavian Republic (PMR, also known as -# "Pridnestrovie") has abolished seasonal clock change (no transition -# to the Winter Time). -# -# News (in Russian): -# http://www.kyivpost.ua/russia/news/pridnestrove-otkazalos-ot-perehoda-na-zimnee-vremya-30954.html -# http://www.allmoldova.com/moldova-news/1249064116.html -# -# The substance of this change (reinstatement of the Tiraspol entry) -# is from a patch from Petr Machata (2011-10-17) -# -# From Tim Parenti (2011-10-19) -# In addition, being situated at +4651+2938 would give Tiraspol -# a pre-1880 LMT offset of 1:58:32. -# -# (which agrees with the earlier entry that had been removed) -# -# From Alexander Krivenyshev (2011-10-26) -# NO need to divide Moldova into two timezones at this point. -# As of today, Transnistria (Pridnestrovie)- Tiraspol reversed its own -# decision to abolish DST this winter. -# Following Moldova and neighboring Ukraine- Transnistria (Pridnestrovie)- -# Tiraspol will go back to winter time on October 30, 2011. -# News from Moldova (in russian): -# http://ru.publika.md/link_317061.html - -# From Roman Tudos (2015-07-02): -# http://lex.justice.md/index.php?action=view&view=doc&lang=1&id=355077 -# From Paul Eggert (2015-07-01): -# The abovementioned official link to IGO1445-868/2014 states that -# 2014-10-26's fallback transition occurred at 03:00 local time. Also, -# http://www.trm.md/en/social/la-30-martie-vom-trece-la-ora-de-vara -# says the 2014-03-30 spring-forward transition was at 02:00 local time. -# Guess that since 1997 Moldova has switched one hour before the EU. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Moldova 1997 max - Mar lastSun 2:00 1:00 S -Rule Moldova 1997 max - Oct lastSun 3:00 0 - - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Chisinau 1:55:20 - LMT 1880 - 1:55 - CMT 1918 Feb 15 # Chisinau MT - 1:44:24 - BMT 1931 Jul 24 # Bucharest MT - 2:00 Romania EE%sT 1940 Aug 15 - 2:00 1:00 EEST 1941 Jul 17 - 1:00 C-Eur CE%sT 1944 Aug 24 - 3:00 Russia MSK/MSD 1990 May 6 2:00 - 2:00 Russia EE%sT 1992 - 2:00 E-Eur EE%sT 1997 -# See Romania commentary for the guessed 1997 transition to EU rules. - 2:00 Moldova EE%sT - -# Monaco -# Shanks & Pottenger give 0:09:20 for Paris Mean Time; go with Howse's -# more precise 0:09:21. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Monaco 0:29:32 - LMT 1891 Mar 15 - 0:09:21 - PMT 1911 Mar 11 # Paris Mean Time - 0:00 France WE%sT 1945 Sep 16 3:00 - 1:00 France CE%sT 1977 - 1:00 EU CE%sT - -# Montenegro -# See Europe/Belgrade. - -# Netherlands - -# Howse writes that the Netherlands' railways used GMT between 1892 and 1940, -# but for other purposes the Netherlands used Amsterdam mean time. - -# However, Robert H. van Gent writes (2001-04-01): -# Howse's statement is only correct up to 1909. From 1909-05-01 (00:00:00 -# Amsterdam mean time) onwards, the whole of the Netherlands (including -# the Dutch railways) was required by law to observe Amsterdam mean time -# (19 minutes 32.13 seconds ahead of GMT). This had already been the -# common practice (except for the railways) for many decades but it was -# not until 1909 when the Dutch government finally defined this by law. -# On 1937-07-01 this was changed to 20 minutes (exactly) ahead of GMT and -# was generally known as Dutch Time ("Nederlandse Tijd"). -# -# (2001-04-08): -# 1892-05-01 was the date when the Dutch railways were by law required to -# observe GMT while the remainder of the Netherlands adhered to the common -# practice of following Amsterdam mean time. -# -# (2001-04-09): -# In 1835 the authorities of the province of North Holland requested the -# municipal authorities of the towns and cities in the province to observe -# Amsterdam mean time but I do not know in how many cases this request was -# actually followed. -# -# From 1852 onwards the Dutch telegraph offices were by law required to -# observe Amsterdam mean time. As the time signals from the observatory of -# Leiden were also distributed by the telegraph system, I assume that most -# places linked up with the telegraph (and railway) system automatically -# adopted Amsterdam mean time. -# -# Although the early Dutch railway companies initially observed a variety -# of times, most of them had adopted Amsterdam mean time by 1858 but it -# was not until 1866 when they were all required by law to observe -# Amsterdam mean time. - -# The data entries before 1945 are taken from -# http://www.staff.science.uu.nl/~gent0113/wettijd/wettijd.htm - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Neth 1916 only - May 1 0:00 1:00 NST # Netherlands Summer Time -Rule Neth 1916 only - Oct 1 0:00 0 AMT # Amsterdam Mean Time -Rule Neth 1917 only - Apr 16 2:00s 1:00 NST -Rule Neth 1917 only - Sep 17 2:00s 0 AMT -Rule Neth 1918 1921 - Apr Mon>=1 2:00s 1:00 NST -Rule Neth 1918 1921 - Sep lastMon 2:00s 0 AMT -Rule Neth 1922 only - Mar lastSun 2:00s 1:00 NST -Rule Neth 1922 1936 - Oct Sun>=2 2:00s 0 AMT -Rule Neth 1923 only - Jun Fri>=1 2:00s 1:00 NST -Rule Neth 1924 only - Mar lastSun 2:00s 1:00 NST -Rule Neth 1925 only - Jun Fri>=1 2:00s 1:00 NST -# From 1926 through 1939 DST began 05-15, except that it was delayed by a week -# in years when 05-15 fell in the Pentecost weekend. -Rule Neth 1926 1931 - May 15 2:00s 1:00 NST -Rule Neth 1932 only - May 22 2:00s 1:00 NST -Rule Neth 1933 1936 - May 15 2:00s 1:00 NST -Rule Neth 1937 only - May 22 2:00s 1:00 NST -Rule Neth 1937 only - Jul 1 0:00 1:00 S -Rule Neth 1937 1939 - Oct Sun>=2 2:00s 0 - -Rule Neth 1938 1939 - May 15 2:00s 1:00 S -Rule Neth 1945 only - Apr 2 2:00s 1:00 S -Rule Neth 1945 only - Sep 16 2:00s 0 - -# -# Amsterdam Mean Time was +00:19:32.13 exactly, but the .13 is omitted -# below because the current format requires GMTOFF to be an integer. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Amsterdam 0:19:32 - LMT 1835 - 0:19:32 Neth %s 1937 Jul 1 - 0:20 Neth +0020/+0120 1940 May 16 0:00 - 1:00 C-Eur CE%sT 1945 Apr 2 2:00 - 1:00 Neth CE%sT 1977 - 1:00 EU CE%sT - -# Norway -# http://met.no/met/met_lex/q_u/sommertid.html (2004-01) agrees with Shanks & -# Pottenger. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Norway 1916 only - May 22 1:00 1:00 S -Rule Norway 1916 only - Sep 30 0:00 0 - -Rule Norway 1945 only - Apr 2 2:00s 1:00 S -Rule Norway 1945 only - Oct 1 2:00s 0 - -Rule Norway 1959 1964 - Mar Sun>=15 2:00s 1:00 S -Rule Norway 1959 1965 - Sep Sun>=15 2:00s 0 - -Rule Norway 1965 only - Apr 25 2:00s 1:00 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Oslo 0:43:00 - LMT 1895 Jan 1 - 1:00 Norway CE%sT 1940 Aug 10 23:00 - 1:00 C-Eur CE%sT 1945 Apr 2 2:00 - 1:00 Norway CE%sT 1980 - 1:00 EU CE%sT - -# Svalbard & Jan Mayen - -# From Steffen Thorsen (2001-05-01): -# Although I could not find it explicitly, it seems that Jan Mayen and -# Svalbard have been using the same time as Norway at least since the -# time they were declared as parts of Norway. Svalbard was declared -# as a part of Norway by law of 1925-07-17 no 11, section 4 and Jan -# Mayen by law of 1930-02-27 no 2, section 2. (From -# and -# ). The law/regulation -# for normal/standard time in Norway is from 1894-06-29 no 1 (came -# into operation on 1895-01-01) and Svalbard/Jan Mayen seem to be a -# part of this law since 1925/1930. (From -# ) I have not been -# able to find if Jan Mayen used a different time zone (e.g. -0100) -# before 1930. Jan Mayen has only been "inhabited" since 1921 by -# Norwegian meteorologists and maybe used the same time as Norway ever -# since 1921. Svalbard (Arctic/Longyearbyen) has been inhabited since -# before 1895, and therefore probably changed the local time somewhere -# between 1895 and 1925 (inclusive). - -# From Paul Eggert (2013-09-04): -# -# Actually, Jan Mayen was never occupied by Germany during World War II, -# so it must have diverged from Oslo time during the war, as Oslo was -# keeping Berlin time. -# -# says that the meteorologists -# burned down their station in 1940 and left the island, but returned in -# 1941 with a small Norwegian garrison and continued operations despite -# frequent air attacks from Germans. In 1943 the Americans established a -# radiolocating station on the island, called "Atlantic City". Possibly -# the UT offset changed during the war, but I think it unlikely that -# Jan Mayen used German daylight-saving rules. -# -# Svalbard is more complicated, as it was raided in August 1941 by an -# Allied party that evacuated the civilian population to England (says -# ). The Svalbard FAQ -# says that the Germans were -# expelled on 1942-05-14. However, small parties of Germans did return, -# and according to Wilhelm Dege's book "War North of 80" (1954) -# http://www.ucalgary.ca/UofC/departments/UP/1-55238/1-55238-110-2.html -# the German armed forces at the Svalbard weather station code-named -# Haudegen did not surrender to the Allies until September 1945. -# -# All these events predate our cutoff date of 1970, so use Europe/Oslo -# for these regions. -Link Europe/Oslo Arctic/Longyearbyen - -# Poland - -# The 1919 dates and times can be found in Tygodnik Urzędowy nr 1 (1919-03-20), -# pp 1-2. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Poland 1918 1919 - Sep 16 2:00s 0 - -Rule Poland 1919 only - Apr 15 2:00s 1:00 S -Rule Poland 1944 only - Apr 3 2:00s 1:00 S -# Whitman gives 1944 Nov 30; go with Shanks & Pottenger. -Rule Poland 1944 only - Oct 4 2:00 0 - -# For 1944-1948 Whitman gives the previous day; go with Shanks & Pottenger. -Rule Poland 1945 only - Apr 29 0:00 1:00 S -Rule Poland 1945 only - Nov 1 0:00 0 - -# For 1946 on the source is Kazimierz Borkowski, -# Toruń Center for Astronomy, Dept. of Radio Astronomy, Nicolaus Copernicus U., -# http://www.astro.uni.torun.pl/~kb/Artykuly/U-PA/Czas2.htm#tth_tAb1 -# Thanks to Przemysław Augustyniak (2005-05-28) for this reference. -# He also gives these further references: -# Mon Pol nr 13, poz 162 (1995) -# Druk nr 2180 (2003) -Rule Poland 1946 only - Apr 14 0:00s 1:00 S -Rule Poland 1946 only - Oct 7 2:00s 0 - -Rule Poland 1947 only - May 4 2:00s 1:00 S -Rule Poland 1947 1949 - Oct Sun>=1 2:00s 0 - -Rule Poland 1948 only - Apr 18 2:00s 1:00 S -Rule Poland 1949 only - Apr 10 2:00s 1:00 S -Rule Poland 1957 only - Jun 2 1:00s 1:00 S -Rule Poland 1957 1958 - Sep lastSun 1:00s 0 - -Rule Poland 1958 only - Mar 30 1:00s 1:00 S -Rule Poland 1959 only - May 31 1:00s 1:00 S -Rule Poland 1959 1961 - Oct Sun>=1 1:00s 0 - -Rule Poland 1960 only - Apr 3 1:00s 1:00 S -Rule Poland 1961 1964 - May lastSun 1:00s 1:00 S -Rule Poland 1962 1964 - Sep lastSun 1:00s 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Warsaw 1:24:00 - LMT 1880 - 1:24:00 - WMT 1915 Aug 5 # Warsaw Mean Time - 1:00 C-Eur CE%sT 1918 Sep 16 3:00 - 2:00 Poland EE%sT 1922 Jun - 1:00 Poland CE%sT 1940 Jun 23 2:00 - 1:00 C-Eur CE%sT 1944 Oct - 1:00 Poland CE%sT 1977 - 1:00 W-Eur CE%sT 1988 - 1:00 EU CE%sT - -# Portugal -# -# From Paul Eggert (2014-08-11), after a heads-up from Stephen Colebourne: -# According to a Portuguese decree (1911-05-26) -# http://dre.pt/pdf1sdip/1911/05/12500/23132313.pdf -# Lisbon was at -0:36:44.68, but switched to GMT on 1912-01-01 at 00:00. -# Round the old offset to -0:36:45. This agrees with Willett but disagrees -# with Shanks, who says the transition occurred on 1911-05-24 at 00:00 for -# Europe/Lisbon, Atlantic/Azores, and Atlantic/Madeira. -# -# From Rui Pedro Salgueiro (1992-11-12): -# Portugal has recently (September, 27) changed timezone -# (from WET to MET or CET) to harmonize with EEC. -# -# Martin Bruckmann (1996-02-29) reports via Peter Ilieve -# that Portugal is reverting to 0:00 by not moving its clocks this spring. -# The new Prime Minister was fed up with getting up in the dark in the winter. -# -# From Paul Eggert (1996-11-12): -# IATA SSIM (1991-09) reports several 1991-09 and 1992-09 transitions -# at 02:00u, not 01:00u. Assume that these are typos. -# IATA SSIM (1991/1992) reports that the Azores were at -1:00. -# IATA SSIM (1993-02) says +0:00; later issues (through 1996-09) say -1:00. -# Guess that the Azores changed to EU rules in 1992 (since that's when Portugal -# harmonized with the EU), and that they stayed +0:00 that winter. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -# DSH writes that despite Decree 1,469 (1915), the change to the clocks was not -# done every year, depending on what Spain did, because of railroad schedules. -# Go with Shanks & Pottenger. -Rule Port 1916 only - Jun 17 23:00 1:00 S -# Whitman gives 1916 Oct 31; go with Shanks & Pottenger. -Rule Port 1916 only - Nov 1 1:00 0 - -Rule Port 1917 only - Feb 28 23:00s 1:00 S -Rule Port 1917 1921 - Oct 14 23:00s 0 - -Rule Port 1918 only - Mar 1 23:00s 1:00 S -Rule Port 1919 only - Feb 28 23:00s 1:00 S -Rule Port 1920 only - Feb 29 23:00s 1:00 S -Rule Port 1921 only - Feb 28 23:00s 1:00 S -Rule Port 1924 only - Apr 16 23:00s 1:00 S -Rule Port 1924 only - Oct 14 23:00s 0 - -Rule Port 1926 only - Apr 17 23:00s 1:00 S -Rule Port 1926 1929 - Oct Sat>=1 23:00s 0 - -Rule Port 1927 only - Apr 9 23:00s 1:00 S -Rule Port 1928 only - Apr 14 23:00s 1:00 S -Rule Port 1929 only - Apr 20 23:00s 1:00 S -Rule Port 1931 only - Apr 18 23:00s 1:00 S -# Whitman gives 1931 Oct 8; go with Shanks & Pottenger. -Rule Port 1931 1932 - Oct Sat>=1 23:00s 0 - -Rule Port 1932 only - Apr 2 23:00s 1:00 S -Rule Port 1934 only - Apr 7 23:00s 1:00 S -# Whitman gives 1934 Oct 5; go with Shanks & Pottenger. -Rule Port 1934 1938 - Oct Sat>=1 23:00s 0 - -# Shanks & Pottenger give 1935 Apr 30; go with Whitman. -Rule Port 1935 only - Mar 30 23:00s 1:00 S -Rule Port 1936 only - Apr 18 23:00s 1:00 S -# Whitman gives 1937 Apr 2; go with Shanks & Pottenger. -Rule Port 1937 only - Apr 3 23:00s 1:00 S -Rule Port 1938 only - Mar 26 23:00s 1:00 S -Rule Port 1939 only - Apr 15 23:00s 1:00 S -# Whitman gives 1939 Oct 7; go with Shanks & Pottenger. -Rule Port 1939 only - Nov 18 23:00s 0 - -Rule Port 1940 only - Feb 24 23:00s 1:00 S -# Shanks & Pottenger give 1940 Oct 7; go with Whitman. -Rule Port 1940 1941 - Oct 5 23:00s 0 - -Rule Port 1941 only - Apr 5 23:00s 1:00 S -Rule Port 1942 1945 - Mar Sat>=8 23:00s 1:00 S -Rule Port 1942 only - Apr 25 22:00s 2:00 M # Midsummer -Rule Port 1942 only - Aug 15 22:00s 1:00 S -Rule Port 1942 1945 - Oct Sat>=24 23:00s 0 - -Rule Port 1943 only - Apr 17 22:00s 2:00 M -Rule Port 1943 1945 - Aug Sat>=25 22:00s 1:00 S -Rule Port 1944 1945 - Apr Sat>=21 22:00s 2:00 M -Rule Port 1946 only - Apr Sat>=1 23:00s 1:00 S -Rule Port 1946 only - Oct Sat>=1 23:00s 0 - -Rule Port 1947 1949 - Apr Sun>=1 2:00s 1:00 S -Rule Port 1947 1949 - Oct Sun>=1 2:00s 0 - -# Shanks & Pottenger say DST was observed in 1950; go with Whitman. -# Whitman gives Oct lastSun for 1952 on; go with Shanks & Pottenger. -Rule Port 1951 1965 - Apr Sun>=1 2:00s 1:00 S -Rule Port 1951 1965 - Oct Sun>=1 2:00s 0 - -Rule Port 1977 only - Mar 27 0:00s 1:00 S -Rule Port 1977 only - Sep 25 0:00s 0 - -Rule Port 1978 1979 - Apr Sun>=1 0:00s 1:00 S -Rule Port 1978 only - Oct 1 0:00s 0 - -Rule Port 1979 1982 - Sep lastSun 1:00s 0 - -Rule Port 1980 only - Mar lastSun 0:00s 1:00 S -Rule Port 1981 1982 - Mar lastSun 1:00s 1:00 S -Rule Port 1983 only - Mar lastSun 2:00s 1:00 S -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Lisbon -0:36:45 - LMT 1884 - -0:36:45 - LMT 1912 Jan 1 # Lisbon Mean Time - 0:00 Port WE%sT 1966 Apr 3 2:00 - 1:00 - CET 1976 Sep 26 1:00 - 0:00 Port WE%sT 1983 Sep 25 1:00s - 0:00 W-Eur WE%sT 1992 Sep 27 1:00s - 1:00 EU CE%sT 1996 Mar 31 1:00u - 0:00 EU WE%sT -# This Zone can be simplified once we assume zic %z. -Zone Atlantic/Azores -1:42:40 - LMT 1884 # Ponta Delgada - -1:54:32 - HMT 1912 Jan 1 # Horta Mean Time - -2:00 Port -02/-01 1942 Apr 25 22:00s - -2:00 Port +00 1942 Aug 15 22:00s - -2:00 Port -02/-01 1943 Apr 17 22:00s - -2:00 Port +00 1943 Aug 28 22:00s - -2:00 Port -02/-01 1944 Apr 22 22:00s - -2:00 Port +00 1944 Aug 26 22:00s - -2:00 Port -02/-01 1945 Apr 21 22:00s - -2:00 Port +00 1945 Aug 25 22:00s - -2:00 Port -02/-01 1966 Apr 3 2:00 - -1:00 Port -01/+00 1983 Sep 25 1:00s - -1:00 W-Eur -01/+00 1992 Sep 27 1:00s - 0:00 EU WE%sT 1993 Mar 28 1:00u - -1:00 EU -01/+00 -# This Zone can be simplified once we assume zic %z. -Zone Atlantic/Madeira -1:07:36 - LMT 1884 # Funchal - -1:07:36 - FMT 1912 Jan 1 # Funchal Mean Time - -1:00 Port -01/+00 1942 Apr 25 22:00s - -1:00 Port +01 1942 Aug 15 22:00s - -1:00 Port -01/+00 1943 Apr 17 22:00s - -1:00 Port +01 1943 Aug 28 22:00s - -1:00 Port -01/+00 1944 Apr 22 22:00s - -1:00 Port +01 1944 Aug 26 22:00s - -1:00 Port -01/+00 1945 Apr 21 22:00s - -1:00 Port +01 1945 Aug 25 22:00s - -1:00 Port -01/+00 1966 Apr 3 2:00 - 0:00 Port WE%sT 1983 Sep 25 1:00s - 0:00 EU WE%sT - -# Romania -# -# From Paul Eggert (1999-10-07): -# Nine O'clock -# (1998-10-23) reports that the switch occurred at -# 04:00 local time in fall 1998. For lack of better info, -# assume that Romania and Moldova switched to EU rules in 1997, -# the same year as Bulgaria. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Romania 1932 only - May 21 0:00s 1:00 S -Rule Romania 1932 1939 - Oct Sun>=1 0:00s 0 - -Rule Romania 1933 1939 - Apr Sun>=2 0:00s 1:00 S -Rule Romania 1979 only - May 27 0:00 1:00 S -Rule Romania 1979 only - Sep lastSun 0:00 0 - -Rule Romania 1980 only - Apr 5 23:00 1:00 S -Rule Romania 1980 only - Sep lastSun 1:00 0 - -Rule Romania 1991 1993 - Mar lastSun 0:00s 1:00 S -Rule Romania 1991 1993 - Sep lastSun 0:00s 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Bucharest 1:44:24 - LMT 1891 Oct - 1:44:24 - BMT 1931 Jul 24 # Bucharest MT - 2:00 Romania EE%sT 1981 Mar 29 2:00s - 2:00 C-Eur EE%sT 1991 - 2:00 Romania EE%sT 1994 - 2:00 E-Eur EE%sT 1997 - 2:00 EU EE%sT - - -# Russia - -# From Alexander Krivenyshev (2011-09-15): -# Based on last Russian Government Decree No. 725 on August 31, 2011 -# (Government document -# http://www.government.ru/gov/results/16355/print/ -# in Russian) -# there are few corrections have to be made for some Russian time zones... -# All updated Russian Time Zones were placed in table and translated to English -# by WorldTimeZone.com at the link below: -# http://www.worldtimezone.com/dst_news/dst_news_russia36.htm - -# From Sanjeev Gupta (2011-09-27): -# Scans of [Decree No. 23 of January 8, 1992] are available at: -# http://government.consultant.ru/page.aspx?1223966 -# They are in Cyrillic letters (presumably Russian). - -# From Arthur David Olson (2012-05-09): -# Regarding the instant when clocks in time-zone-shifting parts of Russia -# changed in September 2011: -# -# One source is -# http://government.ru/gov/results/16355/ -# which, according to translate.google.com, begins "Decree of August 31, -# 2011 No. 725" and contains no other dates or "effective date" information. -# -# Another source is -# http://www.rg.ru/2011/09/06/chas-zona-dok.html -# which, according to translate.google.com, begins "Resolution of the -# Government of the Russian Federation on August 31, 2011 N 725" and also -# contains "Date first official publication: September 6, 2011 Posted on: -# in the 'RG' - Federal Issue No. 5573 September 6, 2011" but which -# does not contain any "effective date" information. -# -# Another source is -# http://en.wikipedia.org/wiki/Oymyakonsky_District#cite_note-RuTime-7 -# which, in note 8, contains "Resolution No. 725 of August 31, 2011... -# Effective as of after 7 days following the day of the official publication" -# but which does not contain any reference to September 6, 2011. -# -# The Wikipedia article refers to -# http://base.consultant.ru/cons/cgi/online.cgi?req=doc;base=LAW;n=118896 -# which seems to copy the text of the government.ru page. -# -# Tobias Conradi combines Wikipedia's -# "as of after 7 days following the day of the official publication" -# with www.rg.ru's "Date of first official publication: September 6, 2011" to -# get September 13, 2011 as the cutover date (unusually, a Tuesday, as Tobias -# Conradi notes). -# -# None of the sources indicates a time of day for changing clocks. -# -# Go with 2011-09-13 0:00s. - -# From Alexander Krivenyshev (2014-07-01): -# According to the Russian news (ITAR-TASS News Agency) -# http://en.itar-tass.com/russia/738562 -# the State Duma has approved ... the draft bill on returning to -# winter time standard and return Russia 11 time zones. The new -# regulations will come into effect on October 26, 2014 at 02:00 ... -# http://asozd2.duma.gov.ru/main.nsf/%28Spravka%29?OpenAgent&RN=431985-6&02 -# Here is a link where we put together table (based on approved Bill N -# 431985-6) with proposed 11 Russian time zones and corresponding -# areas/cities/administrative centers in the Russian Federation (in English): -# http://www.worldtimezone.com/dst_news/dst_news_russia65.html -# -# From Alexander Krivenyshev (2014-07-22): -# Putin signed the Federal Law 431985-6 ... (in Russian) -# http://itar-tass.com/obschestvo/1333711 -# http://www.pravo.gov.ru:8080/page.aspx?111660 -# http://www.kremlin.ru/acts/46279 -# From October 26, 2014 the new Russian time zone map will looks like this: -# http://www.worldtimezone.com/dst_news/dst_news_russia-map-2014-07.html - -# From Paul Eggert (2006-03-22): -# Moscow time zone abbreviations after 1919-07-01, and Moscow rules after 1991, -# are from Andrey A. Chernov. The rest is from Shanks & Pottenger, -# except we follow Chernov's report that 1992 DST transitions were Sat -# 23:00, not Sun 02:00s. -# -# From Stanislaw A. Kuzikowski (1994-06-29): -# But now it is some months since Novosibirsk is 3 hours ahead of Moscow! -# I do not know why they have decided to make this change; -# as far as I remember it was done exactly during winter->summer switching -# so we (Novosibirsk) simply did not switch. -# -# From Andrey A. Chernov (1996-10-04): -# 'MSK' and 'MSD' were born and used initially on Moscow computers with -# UNIX-like OSes by several developer groups (e.g. Demos group, Kiae group).... -# The next step was the UUCP network, the Relcom predecessor -# (used mainly for mail), and MSK/MSD was actively used there. -# -# From Chris Carrier (1996-10-30): -# According to a friend of mine who rode the Trans-Siberian Railroad from -# Moscow to Irkutsk in 1995, public air and rail transport in Russia ... -# still follows Moscow time, no matter where in Russia it is located. -# -# For Grozny, Chechnya, we have the following story from -# John Daniszewski, "Scavengers in the Rubble", Los Angeles Times (2001-02-07): -# News - often false - is spread by word of mouth. A rumor that it was -# time to move the clocks back put this whole city out of sync with -# the rest of Russia for two weeks - even soldiers stationed here began -# enforcing curfew at the wrong time. -# -# From Gwillim Law (2001-06-05): -# There's considerable evidence that Sakhalin Island used to be in -# UTC+11, and has changed to UTC+10, in this decade. I start with the -# SSIM, which listed Yuzhno-Sakhalinsk in zone RU10 along with Magadan -# until February 1997, and then in RU9 with Khabarovsk and Vladivostok -# since September 1997.... Although the Kuril Islands are -# administratively part of Sakhalin oblast', they appear to have -# remained on UTC+11 along with Magadan. - -# From Tim Parenti (2014-07-06): -# The comments detailing the coverage of each Russian zone are meant to assist -# with maintenance only and represent our best guesses as to which regions -# are covered by each zone. They are not meant to be taken as an authoritative -# listing. The region codes listed come from -# http://en.wikipedia.org/w/?title=Federal_subjects_of_Russia&oldid=611810498 -# and are used for convenience only; no guarantees are made regarding their -# future stability. ISO 3166-2:RU codes are also listed for first-level -# divisions where available. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] - - -# From Tim Parenti (2014-07-03): -# Europe/Kaliningrad covers... -# 39 RU-KGD Kaliningrad Oblast - -# From Paul Eggert (2016-03-18): -# The 1989 transition is from USSR act No. 227 (1989-03-14). - -# From Stepan Golosunov (2016-03-07): -# http://www.rgo.ru/ru/kaliningradskoe-oblastnoe-otdelenie/ob-otdelenii/publikacii/kak-nam-zhilos-bez-letnego-vremeni -# confirms that the 1989 change to Moscow-1 was implemented. -# (The article, though, is misattributed to 1990 while saying that -# summer->winter transition would be done on the 24 of September. But -# 1990-09-24 was Monday, while 1989-09-24 was Sunday as expected.) -# ... -# http://www.kaliningradka.ru/site_pc/cherez/index.php?ELEMENT_ID=40091 -# says that Kaliningrad switched to Moscow-1 on 1989-03-26, avoided -# at the last moment switch to Moscow-1 on 1991-03-31, switched to -# Moscow on 1991-11-03, switched to Moscow-1 on 1992-01-19. - -Zone Europe/Kaliningrad 1:22:00 - LMT 1893 Apr - 1:00 C-Eur CE%sT 1945 - 2:00 Poland CE%sT 1946 - 3:00 Russia MSK/MSD 1989 Mar 26 2:00s - 2:00 Russia EE%sT 2011 Mar 27 2:00s - 3:00 - +03 2014 Oct 26 2:00s - 2:00 - EET - - -# From Paul Eggert (2016-02-21), per Tim Parenti (2014-07-03) and -# Oscar van Vlijmen (2001-08-25): -# Europe/Moscow covers... -# 01 RU-AD Adygea, Republic of -# 05 RU-DA Dagestan, Republic of -# 06 RU-IN Ingushetia, Republic of -# 07 RU-KB Kabardino-Balkar Republic -# 08 RU-KL Kalmykia, Republic of -# 09 RU-KC Karachay-Cherkess Republic -# 10 RU-KR Karelia, Republic of -# 11 RU-KO Komi Republic -# 12 RU-ME Mari El Republic -# 13 RU-MO Mordovia, Republic of -# 15 RU-SE North Ossetia-Alania, Republic of -# 16 RU-TA Tatarstan, Republic of -# 20 RU-CE Chechen Republic -# 21 RU-CU Chuvash Republic -# 23 RU-KDA Krasnodar Krai -# 26 RU-STA Stavropol Krai -# 29 RU-ARK Arkhangelsk Oblast -# 31 RU-BEL Belgorod Oblast -# 32 RU-BRY Bryansk Oblast -# 33 RU-VLA Vladimir Oblast -# 35 RU-VLG Vologda Oblast -# 36 RU-VOR Voronezh Oblast -# 37 RU-IVA Ivanovo Oblast -# 40 RU-KLU Kaluga Oblast -# 44 RU-KOS Kostroma Oblast -# 46 RU-KRS Kursk Oblast -# 47 RU-LEN Leningrad Oblast -# 48 RU-LIP Lipetsk Oblast -# 50 RU-MOS Moscow Oblast -# 51 RU-MUR Murmansk Oblast -# 52 RU-NIZ Nizhny Novgorod Oblast -# 53 RU-NGR Novgorod Oblast -# 57 RU-ORL Oryol Oblast -# 58 RU-PNZ Penza Oblast -# 60 RU-PSK Pskov Oblast -# 61 RU-ROS Rostov Oblast -# 62 RU-RYA Ryazan Oblast -# 67 RU-SMO Smolensk Oblast -# 68 RU-TAM Tambov Oblast -# 69 RU-TVE Tver Oblast -# 71 RU-TUL Tula Oblast -# 76 RU-YAR Yaroslavl Oblast -# 77 RU-MOW Moscow -# 78 RU-SPE Saint Petersburg -# 83 RU-NEN Nenets Autonomous Okrug - -# From Paul Eggert (2016-08-23): -# The Soviets switched to UT-based time in 1919. Decree No. 59 -# (1919-02-08) http://istmat.info/node/35567 established UT-based time -# zones, and Decree No. 147 (1919-03-29) http://istmat.info/node/35854 -# specified a transition date of 1919-07-01, apparently at 00:00 UT. -# No doubt only the Soviet-controlled regions switched on that date; -# later transitions to UT-based time in other parts of Russia are -# taken from what appear to be guesses by Shanks. -# (Thanks to Alexander Belopolsky for pointers to the decrees.) - -# From Stepan Golosunov (2016-03-07): -# 11. Regions-violators, 1981-1982. -# Wikipedia refers to -# http://maps.monetonos.ru/maps/raznoe/Old_Maps/Old_Maps/Articles/022/3_1981.html -# http://besp.narod.ru/nauka_1981_3.htm -# -# The second link provides two articles scanned from the Nauka i Zhizn -# magazine No. 3, 1981 and a scan of the short article attributed to -# the Trud newspaper from February 1982. The first link provides the -# same Nauka i Zhizn articles converted to the text form (but misses -# time belt changes map). -# -# The second Nauka i Zhizn article says that in addition to -# introduction of summer time on 1981-04-01 there are some time belt -# border changes on 1981-10-01, mostly affecting Nenets Autonomous -# Okrug, Krasnoyarsk Krai, Yakutia, Magadan Oblast and Chukotka -# according to the provided map (colored one). In addition to that -# "time violators" (regions which were not using rules of the time -# belts in which they were located) would not be moving off the DST on -# 1981-10-01 to restore the decree time usage. (Komi ASSR was -# supposed to repeat that move in October 1982 to account for the 2 -# hour difference.) Map depicting "time violators" before 1981-10-01 -# is also provided. -# -# The article from Trud says that 1981-10-01 changes caused problems -# and some territories would be moved to pre-1981-10-01 time by not -# moving to summer time on 1982-04-01. Namely: Dagestan, -# Kabardino-Balkar, Kalmyk, Komi, Mari, Mordovian, North Ossetian, -# Tatar, Chechen-Ingush and Chuvash ASSR, Krasnodar and Stavropol -# krais, Arkhangelsk, Vladimir, Vologda, Voronezh, Gorky, Ivanovo, -# Kostroma, Lipetsk, Penza, Rostov, Ryazan, Tambov, Tyumen and -# Yaroslavl oblasts, Nenets and Evenk autonomous okrugs, Khatangsky -# district of Taymyr Autonomous Okrug. As a result Evenk Autonomous -# Okrug and Khatangsky district of Taymyr Autonomous Okrug would end -# up on Moscow+4, Tyumen Oblast on Moscow+2 and the rest on Moscow -# time. -# -# http://astrozet.net/files/Zones/DOC/RU/1980-925.txt -# attributes the 1982 changes to the Act of the Council of Ministers -# of the USSR No. 126 from 18.02.1982. 1980-925.txt also adds -# Udmurtia to the list of affected territories and lists Khatangsky -# district separately from Taymyr Autonomous Okrug. Probably erroneously. -# -# The affected territories are currently listed under Europe/Moscow, -# Asia/Yekaterinburg and Asia/Krasnoyarsk. -# -# 12. Udmurtia -# The fact that Udmurtia is depicted as a violator in the Nauka i -# Zhizn article hints at Izhevsk being on different time from -# Kuybyshev before 1981-10-01. Udmurtia is not mentioned in the 1989 act. -# http://astrozet.net/files/Zones/DOC/RU/1980-925.txt -# implies Udmurtia was on Moscow time after 1982-04-01. -# Wikipedia implies Udmurtia being on Moscow+1 until 1991. -# -# ... -# -# All Russian zones are supposed to have by default a -1 change at -# 1991-03-31 2:00 (cancellation of the decree time in the USSR) and a +1 -# change at 1992-01-19 2:00 (restoration of the decree time in Russia). -# -# There were some exceptions, though. -# Wikipedia says newspapers listed Astrakhan, Saratov, Kirov, Volgograd, -# Izhevsk, Grozny, Kazan and Samara as such exceptions for the 1992 -# change. (Different newspapers providing different lists. And some -# lists found in the internet are quite wild.) -# -# And apparently some exceptions were reverted in the last moment. -# http://www.kaliningradka.ru/site_pc/cherez/index.php?ELEMENT_ID=40091 -# says that Kaliningrad decided not to be an exception 2 days before the -# 1991-03-31 switch and one person at -# http://izhevsk.ru/forum_light_message/50/682597-m8369040.html -# says he remembers that Samara opted out of the 1992-01-19 exception -# 2 days before the switch. -# -# -# From Paul Eggert (2016-03-18): -# Given the above, we appear to be missing some Zone entries for the -# chaotic early 1980s in Russia. It's not clear what these entries -# should be. For now, sweep this under the rug and just document the -# time in Moscow. - -# From Vladimir Karpinsky (2014-07-08): -# LMT in Moscow (before Jul 3, 1916) is 2:30:17, that was defined by Moscow -# Observatory (coordinates: 55 deg. 45'29.70", 37 deg. 34'05.30").... -# LMT in Moscow since Jul 3, 1916 is 2:31:01 as a result of new standard. -# (The info is from the book by Byalokoz ... p. 18.) -# The time in St. Petersburg as capital of Russia was defined by -# Pulkov observatory, near St. Petersburg. In 1916 LMT Moscow -# was synchronized with LMT St. Petersburg (+30 minutes), (Pulkov observatory -# coordinates: 59 deg. 46'18.70", 30 deg. 19'40.70") so 30 deg. 19'40.70" > -# 2h01m18.7s = 2:01:19. LMT Moscow = LMT St.Petersburg + 30m 2:01:19 + 0:30 = -# 2:31:19 ... -# -# From Paul Eggert (2014-07-08): -# Milne does not list Moscow, but suggests that its time might be listed in -# Résumés mensuels et annuels des observations météorologiques (1895). -# Presumably this is OCLC 85825704, a journal published with parallel text in -# Russian and French. This source has not been located; go with Karpinsky. - -Zone Europe/Moscow 2:30:17 - LMT 1880 - 2:30:17 - MMT 1916 Jul 3 # Moscow Mean Time - 2:31:19 Russia %s 1919 Jul 1 0:00u - 3:00 Russia %s 1921 Oct - 3:00 Russia MSK/MSD 1922 Oct - 2:00 - EET 1930 Jun 21 - 3:00 Russia MSK/MSD 1991 Mar 31 2:00s - 2:00 Russia EE%sT 1992 Jan 19 2:00s - 3:00 Russia MSK/MSD 2011 Mar 27 2:00s - 4:00 - MSK 2014 Oct 26 2:00s - 3:00 - MSK - - -# From Paul Eggert (2016-12-06): -# Europe/Simferopol covers Crimea. - -Zone Europe/Simferopol 2:16:24 - LMT 1880 - 2:16 - SMT 1924 May 2 # Simferopol Mean T - 2:00 - EET 1930 Jun 21 - 3:00 - MSK 1941 Nov - 1:00 C-Eur CE%sT 1944 Apr 13 - 3:00 Russia MSK/MSD 1990 - 3:00 - MSK 1990 Jul 1 2:00 - 2:00 - EET 1992 -# Central Crimea used Moscow time 1994/1997. -# -# From Paul Eggert (2006-03-22): -# The _Economist_ (1994-05-28, p 45) reports that central Crimea switched -# from Kiev to Moscow time sometime after the January 1994 elections. -# Shanks (1999) says "date of change uncertain", but implies that it happened -# sometime between the 1994 DST switches. Shanks & Pottenger simply say -# 1994-09-25 03:00, but that can't be right. For now, guess it -# changed in May. - 2:00 E-Eur EE%sT 1994 May -# From IATA SSIM (1994/1997), which also says that Kerch is still like Kiev. - 3:00 E-Eur MSK/MSD 1996 Mar 31 0:00s - 3:00 1:00 MSD 1996 Oct 27 3:00s -# IATA SSIM (1997-09) says Crimea switched to EET/EEST. -# Assume it happened in March by not changing the clocks. - 3:00 Russia MSK/MSD 1997 - 3:00 - MSK 1997 Mar lastSun 1:00u -# From Alexander Krivenyshev (2014-03-17): -# time change at 2:00 (2am) on March 30, 2014 -# http://vz.ru/news/2014/3/17/677464.html -# From Paul Eggert (2014-03-30): -# Simferopol and Sevastopol reportedly changed their central town clocks -# late the previous day, but this appears to have been ceremonial -# and the discrepancies are small enough to not worry about. - 2:00 EU EE%sT 2014 Mar 30 2:00 - 4:00 - MSK 2014 Oct 26 2:00s - 3:00 - MSK - - -# From Paul Eggert (2016-03-18): -# Europe/Astrakhan covers: -# 30 RU-AST Astrakhan Oblast -# -# The 1989 transition is from USSR act No. 227 (1989-03-14). - -# From Alexander Krivenyshev (2016-01-12): -# On February 10, 2016 Astrakhan Oblast got approval by the Federation -# Council to change its time zone to UTC+4 (from current UTC+3 Moscow time).... -# This Federal Law shall enter into force on 27 March 2016 at 02:00. -# From Matt Johnson (2016-03-09): -# http://publication.pravo.gov.ru/Document/View/0001201602150056 - -Zone Europe/Astrakhan 3:12:12 - LMT 1924 May - 3:00 - +03 1930 Jun 21 - 4:00 Russia +04/+05 1989 Mar 26 2:00s - 3:00 Russia +03/+04 1991 Mar 31 2:00s - 4:00 - +04 1992 Mar 29 2:00s - 3:00 Russia +03/+04 2011 Mar 27 2:00s - 4:00 - +04 2014 Oct 26 2:00s - 3:00 - +03 2016 Mar 27 2:00s - 4:00 - +04 - -# From Paul Eggert (2016-11-11): -# Europe/Volgograd covers: -# 34 RU-VGG Volgograd Oblast -# The 1988 transition is from USSR act No. 5 (1988-01-04). - -Zone Europe/Volgograd 2:57:40 - LMT 1920 Jan 3 - 3:00 - +03 1930 Jun 21 - 4:00 - +04 1961 Nov 11 - 4:00 Russia +04/+05 1988 Mar 27 2:00s - 3:00 Russia +03/+04 1991 Mar 31 2:00s - 4:00 - +04 1992 Mar 29 2:00s - 3:00 Russia +03/+04 2011 Mar 27 2:00s - 4:00 - +04 2014 Oct 26 2:00s - 3:00 - +03 - -# From Paul Eggert (2016-11-11): -# Europe/Saratov covers: -# 64 RU-SAR Saratov Oblast - -# From Yuri Konotopov (2016-11-11): -# Dec 4, 2016 02:00 UTC+3.... Saratov Region's local time will be ... UTC+4. -# From Stepan Golosunov (2016-11-11): -# ... Byalokoz listed Saratov on 03:04:18. -# From Stepan Golosunov (2016-11-22): -# http://publication.pravo.gov.ru/Document/View/0001201611220031 - -Zone Europe/Saratov 3:04:18 - LMT 1919 Jul 1 0:00u - 3:00 - +03 1930 Jun 21 - 4:00 Russia +04/+05 1988 Mar 27 2:00s - 3:00 Russia +03/+04 1991 Mar 31 2:00s - 4:00 - +04 1992 Mar 29 2:00s - 3:00 Russia +03/+04 2011 Mar 27 2:00s - 4:00 - +04 2014 Oct 26 2:00s - 3:00 - +03 2016 Dec 4 2:00s - 4:00 - +04 - -# From Paul Eggert (2016-03-18): -# Europe/Kirov covers: -# 43 RU-KIR Kirov Oblast -# The 1989 transition is from USSR act No. 227 (1989-03-14). -# -Zone Europe/Kirov 3:18:48 - LMT 1919 Jul 1 0:00u - 3:00 - +03 1930 Jun 21 - 4:00 Russia +04/+05 1989 Mar 26 2:00s - 3:00 Russia +03/+04 1991 Mar 31 2:00s - 4:00 - +04 1992 Mar 29 2:00s - 3:00 Russia +03/+04 2011 Mar 27 2:00s - 4:00 - +04 2014 Oct 26 2:00s - 3:00 - +03 - -# From Tim Parenti (2014-07-03), per Oscar van Vlijmen (2001-08-25): -# Europe/Samara covers... -# 18 RU-UD Udmurt Republic -# 63 RU-SAM Samara Oblast - -# From Paul Eggert (2016-03-18): -# Byalokoz 1919 says Samara was 3:20:20. -# The 1989 transition is from USSR act No. 227 (1989-03-14). - -Zone Europe/Samara 3:20:20 - LMT 1919 Jul 1 0:00u - 3:00 - +03 1930 Jun 21 - 4:00 - +04 1935 Jan 27 - 4:00 Russia +04/+05 1989 Mar 26 2:00s - 3:00 Russia +03/+04 1991 Mar 31 2:00s - 2:00 Russia +02/+03 1991 Sep 29 2:00s - 3:00 - +03 1991 Oct 20 3:00 - 4:00 Russia +04/+05 2010 Mar 28 2:00s - 3:00 Russia +03/+04 2011 Mar 27 2:00s - 4:00 - +04 - -# From Paul Eggert (2016-03-18): -# Europe/Ulyanovsk covers: -# 73 RU-ULY Ulyanovsk Oblast - -# The 1989 transition is from USSR act No. 227 (1989-03-14). - -# From Alexander Krivenyshev (2016-02-17): -# Ulyanovsk ... on their way to change time zones by March 27, 2016 at 2am. -# Ulyanovsk Oblast ... from MSK to MSK+1 (UTC+3 to UTC+4) ... -# 920582-6 ... 02/17/2016 The State Duma passed the bill in the first reading. -# From Matt Johnson (2016-03-09): -# http://publication.pravo.gov.ru/Document/View/0001201603090051 - -Zone Europe/Ulyanovsk 3:13:36 - LMT 1919 Jul 1 0:00u - 3:00 - +03 1930 Jun 21 - 4:00 Russia +04/+05 1989 Mar 26 2:00s - 3:00 Russia +03/+04 1991 Mar 31 2:00s - 2:00 Russia +02/+03 1992 Jan 19 2:00s - 3:00 Russia +03/+04 2011 Mar 27 2:00s - 4:00 - +04 2014 Oct 26 2:00s - 3:00 - +03 2016 Mar 27 2:00s - 4:00 - +04 - -# From Tim Parenti (2014-07-03), per Oscar van Vlijmen (2001-08-25): -# Asia/Yekaterinburg covers... -# 02 RU-BA Bashkortostan, Republic of -# 90 RU-PER Perm Krai -# 45 RU-KGN Kurgan Oblast -# 56 RU-ORE Orenburg Oblast -# 66 RU-SVE Sverdlovsk Oblast -# 72 RU-TYU Tyumen Oblast -# 74 RU-CHE Chelyabinsk Oblast -# 86 RU-KHM Khanty-Mansi Autonomous Okrug - Yugra -# 89 RU-YAN Yamalo-Nenets Autonomous Okrug -# -# Note: Effective 2005-12-01, (59) Perm Oblast and (81) Komi-Permyak -# Autonomous Okrug merged to form (90, RU-PER) Perm Krai. - -# Milne says Yekaterinburg was 4:02:32.9; round to nearest. -# Byalokoz 1919 says its provincial time was based on Perm, at 3:45:05. -# Assume it switched on 1916-07-03, the time of the new standard. -# The 1919 and 1930 transitions are from Shanks. - -Zone Asia/Yekaterinburg 4:02:33 - LMT 1916 Jul 3 - 3:45:05 - PMT 1919 Jul 15 4:00 - 4:00 - +04 1930 Jun 21 - 5:00 Russia +05/+06 1991 Mar 31 2:00s - 4:00 Russia +04/+05 1992 Jan 19 2:00s - 5:00 Russia +05/+06 2011 Mar 27 2:00s - 6:00 - +06 2014 Oct 26 2:00s - 5:00 - +05 - - -# From Tim Parenti (2014-07-03), per Oscar van Vlijmen (2001-08-25): -# Asia/Omsk covers... -# 55 RU-OMS Omsk Oblast - -# Byalokoz 1919 says Omsk was 4:53:30. - -Zone Asia/Omsk 4:53:30 - LMT 1919 Nov 14 - 5:00 - +05 1930 Jun 21 - 6:00 Russia +06/+07 1991 Mar 31 2:00s - 5:00 Russia +05/+06 1992 Jan 19 2:00s - 6:00 Russia +06/+07 2011 Mar 27 2:00s - 7:00 - +07 2014 Oct 26 2:00s - 6:00 - +06 - -# From Paul Eggert (2016-02-22): -# Asia/Barnaul covers: -# 04 RU-AL Altai Republic -# 22 RU-ALT Altai Krai - -# Data before 1991 are from Shanks & Pottenger. - -# From Stepan Golosunov (2016-03-07): -# Letter of Bank of Russia from 1995-05-25 -# http://www.bestpravo.ru/rossijskoje/lj-akty/y3a.htm -# suggests that Altai Republic transitioned to Moscow+3 on -# 1995-05-28. -# -# http://regnum.ru/news/society/1957270.html -# has some historical data for Altai Krai: -# before 1957: west part on UTC+6, east on UTC+7 -# after 1957: UTC+7 -# since 1995: UTC+6 -# http://barnaul.rusplt.ru/index/pochemu_altajskij_kraj_okazalsja_v_neprivychnom_chasovom_pojase-17648.html -# confirms that and provides more details including 1995-05-28 transition date. - -# From Alexander Krivenyshev (2016-02-17): -# Altai Krai and Altai Republic on their way to change time zones -# by March 27, 2016 at 2am.... -# Altai Republic / Gorno-Altaysk MSK+3 to MSK+4 (UTC+6 to UTC+7) ... -# Altai Krai / Barnaul MSK+3 to MSK+4 (UTC+6 to UTC+7) -# From Matt Johnson (2016-03-09): -# http://publication.pravo.gov.ru/Document/View/0001201603090043 -# http://publication.pravo.gov.ru/Document/View/0001201603090038 - -Zone Asia/Barnaul 5:35:00 - LMT 1919 Dec 10 - 6:00 - +06 1930 Jun 21 - 7:00 Russia +07/+08 1991 Mar 31 2:00s - 6:00 Russia +06/+07 1992 Jan 19 2:00s - 7:00 Russia +07/+08 1995 May 28 - 6:00 Russia +06/+07 2011 Mar 27 2:00s - 7:00 - +07 2014 Oct 26 2:00s - 6:00 - +06 2016 Mar 27 2:00s - 7:00 - +07 - -# From Paul Eggert (2016-03-18): -# Asia/Novosibirsk covers: -# 54 RU-NVS Novosibirsk Oblast - -# From Stepan Golosunov (2016-05-30): -# http://asozd2.duma.gov.ru/main.nsf/(Spravka)?OpenAgent&RN=1085784-6 -# moves Novosibirsk oblast from UTC+6 to UTC+7. -# From Stepan Golosunov (2016-07-04): -# The law was signed yesterday and published today on -# http://publication.pravo.gov.ru/Document/View/0001201607040064 - -Zone Asia/Novosibirsk 5:31:40 - LMT 1919 Dec 14 6:00 - 6:00 - +06 1930 Jun 21 - 7:00 Russia +07/+08 1991 Mar 31 2:00s - 6:00 Russia +06/+07 1992 Jan 19 2:00s - 7:00 Russia +07/+08 1993 May 23 # say Shanks & P. - 6:00 Russia +06/+07 2011 Mar 27 2:00s - 7:00 - +07 2014 Oct 26 2:00s - 6:00 - +06 2016 Jul 24 2:00s - 7:00 - +07 - -# From Paul Eggert (2016-03-18): -# Asia/Tomsk covers: -# 70 RU-TOM Tomsk Oblast - -# From Stepan Golosunov (2016-03-24): -# Byalokoz listed Tomsk at 5:39:51. - -# From Stanislaw A. Kuzikowski (1994-06-29): -# Tomsk is still 4 hours ahead of Moscow. - -# From Stepan Golosunov (2016-03-19): -# http://pravo.gov.ru/proxy/ips/?docbody=&nd=102075743 -# (fifth time belt being UTC+5+1(decree time) -# / UTC+5+1(decree time)+1(summer time)) ... -# Note that time belts (numbered from 2 (Moscow) to 12 according to their -# GMT/UTC offset and having too many exceptions like regions formally -# belonging to one belt but using time from another) were replaced -# with time zones in 2011 with different numbering (there was a -# 2-hour gap between second and third zones in 2011-2014). - -# From Stepan Golosunov (2016-04-12): -# http://asozd2.duma.gov.ru/main.nsf/(SpravkaNew)?OpenAgent&RN=1006865-6 -# This bill was approved in the first reading today. It moves Tomsk oblast -# from UTC+6 to UTC+7 and is supposed to come into effect on 2016-05-29 at -# 2:00. The bill needs to be approved in the second and the third readings by -# the State Duma, approved by the Federation Council, signed by the President -# and published to become a law. Minor changes in the text are to be expected -# before the second reading (references need to be updated to account for the -# recent changes). -# -# Judging by the ultra-short one-day amendments period, recent similar laws, -# the State Duma schedule and the Federation Council schedule -# http://www.duma.gov.ru/legislative/planning/day-shedule/por_vesna_2016/ -# http://council.gov.ru/activity/meetings/schedule/63303 -# I speculate that the final text of the bill will be proposed tomorrow, the -# bill will be approved in the second and the third readings on Friday, -# approved by the Federation Council on 2016-04-20, signed by the President and -# published as a law around 2016-04-26. - -# From Matt Johnson (2016-04-26): -# http://publication.pravo.gov.ru/Document/View/0001201604260048 - -Zone Asia/Tomsk 5:39:51 - LMT 1919 Dec 22 - 6:00 - +06 1930 Jun 21 - 7:00 Russia +07/+08 1991 Mar 31 2:00s - 6:00 Russia +06/+07 1992 Jan 19 2:00s - 7:00 Russia +07/+08 2002 May 1 3:00 - 6:00 Russia +06/+07 2011 Mar 27 2:00s - 7:00 - +07 2014 Oct 26 2:00s - 6:00 - +06 2016 May 29 2:00s - 7:00 - +07 - - -# From Tim Parenti (2014-07-03): -# Asia/Novokuznetsk covers... -# 42 RU-KEM Kemerovo Oblast - -# From Alexander Krivenyshev (2009-10-13): -# Kemerovo oblast' (Kemerovo region) in Russia will change current time zone on -# March 28, 2010: -# from current Russia Zone 6 - Krasnoyarsk Time Zone (KRA) UTC +0700 -# to Russia Zone 5 - Novosibirsk Time Zone (NOV) UTC +0600 -# -# This is according to Government of Russia decree No. 740, on September -# 14, 2009 "Application in the territory of the Kemerovo region the Fifth -# time zone." ("Russia Zone 5" or old "USSR Zone 5" is GMT +0600) -# -# Russian Government web site (Russian language) -# http://www.government.ru/content/governmentactivity/rfgovernmentdecisions/archive/2009/09/14/991633.htm -# or Russian-English translation by WorldTimeZone.com with reference -# map to local region and new Russia Time Zone map after March 28, 2010 -# http://www.worldtimezone.com/dst_news/dst_news_russia03.html -# -# Thus, when Russia will switch to DST on the night of March 28, 2010 -# Kemerovo region (Kemerovo oblast') will not change the clock. - -# From Tim Parenti (2014-07-02), per Alexander Krivenyshev (2014-07-02): -# The Kemerovo region will remain at UTC+7 through the 2014-10-26 change, thus -# realigning itself with KRAT. - -Zone Asia/Novokuznetsk 5:48:48 - LMT 1924 May 1 - 6:00 - +06 1930 Jun 21 - 7:00 Russia +07/+08 1991 Mar 31 2:00s - 6:00 Russia +06/+07 1992 Jan 19 2:00s - 7:00 Russia +07/+08 2010 Mar 28 2:00s - 6:00 Russia +06/+07 2011 Mar 27 2:00s - 7:00 - +07 - -# From Tim Parenti (2014-07-03), per Oscar van Vlijmen (2001-08-25): -# Asia/Krasnoyarsk covers... -# 17 RU-TY Tuva Republic -# 19 RU-KK Khakassia, Republic of -# 24 RU-KYA Krasnoyarsk Krai -# -# Note: Effective 2007-01-01, (88) Evenk Autonomous Okrug and (84) Taymyr -# Autonomous Okrug were merged into (24, RU-KYA) Krasnoyarsk Krai. - -# Byalokoz 1919 says Krasnoyarsk was 6:11:26. - -Zone Asia/Krasnoyarsk 6:11:26 - LMT 1920 Jan 6 - 6:00 - +06 1930 Jun 21 - 7:00 Russia +07/+08 1991 Mar 31 2:00s - 6:00 Russia +06/+07 1992 Jan 19 2:00s - 7:00 Russia +07/+08 2011 Mar 27 2:00s - 8:00 - +08 2014 Oct 26 2:00s - 7:00 - +07 - - -# From Tim Parenti (2014-07-03), per Oscar van Vlijmen (2001-08-25): -# Asia/Irkutsk covers... -# 03 RU-BU Buryatia, Republic of -# 38 RU-IRK Irkutsk Oblast -# -# Note: Effective 2008-01-01, (85) Ust-Orda Buryat Autonomous Okrug was -# merged into (38, RU-IRK) Irkutsk Oblast. - -# Milne 1899 says Irkutsk was 6:57:15. -# Byalokoz 1919 says Irkutsk was 6:57:05. -# Go with Byalokoz. - -Zone Asia/Irkutsk 6:57:05 - LMT 1880 - 6:57:05 - IMT 1920 Jan 25 # Irkutsk Mean Time - 7:00 - +07 1930 Jun 21 - 8:00 Russia +08/+09 1991 Mar 31 2:00s - 7:00 Russia +07/+08 1992 Jan 19 2:00s - 8:00 Russia +08/+09 2011 Mar 27 2:00s - 9:00 - +09 2014 Oct 26 2:00s - 8:00 - +08 - - -# From Tim Parenti (2014-07-06): -# Asia/Chita covers... -# 92 RU-ZAB Zabaykalsky Krai -# -# Note: Effective 2008-03-01, (75) Chita Oblast and (80) Agin-Buryat -# Autonomous Okrug merged to form (92, RU-ZAB) Zabaykalsky Krai. - -# From Alexander Krivenyshev (2016-01-02): -# [The] time zone in the Trans-Baikal Territory (Zabaykalsky Krai) - -# Asia/Chita [is changing] from UTC+8 to UTC+9. Effective date will -# be March 27, 2016 at 2:00am.... -# http://publication.pravo.gov.ru/Document/View/0001201512300107 - -Zone Asia/Chita 7:33:52 - LMT 1919 Dec 15 - 8:00 - +08 1930 Jun 21 - 9:00 Russia +09/+10 1991 Mar 31 2:00s - 8:00 Russia +08/+09 1992 Jan 19 2:00s - 9:00 Russia +09/+10 2011 Mar 27 2:00s - 10:00 - +10 2014 Oct 26 2:00s - 8:00 - +08 2016 Mar 27 2:00 - 9:00 - +09 - - -# From Tim Parenti (2014-07-03), per Oscar van Vlijmen (2009-11-29): -# Asia/Yakutsk covers... -# 28 RU-AMU Amur Oblast -# -# ...and parts of (14, RU-SA) Sakha (Yakutia) Republic: -# 14-02 **** Aldansky District -# 14-04 **** Amginsky District -# 14-05 **** Anabarsky District -# 14-06 **** Bulunsky District -# 14-07 **** Verkhnevilyuysky District -# 14-10 **** Vilyuysky District -# 14-11 **** Gorny District -# 14-12 **** Zhigansky District -# 14-13 **** Kobyaysky District -# 14-14 **** Lensky District -# 14-15 **** Megino-Kangalassky District -# 14-16 **** Mirninsky District -# 14-18 **** Namsky District -# 14-19 **** Neryungrinsky District -# 14-21 **** Nyurbinsky District -# 14-23 **** Olenyoksky District -# 14-24 **** Olyokminsky District -# 14-26 **** Suntarsky District -# 14-27 **** Tattinsky District -# 14-29 **** Ust-Aldansky District -# 14-32 **** Khangalassky District -# 14-33 **** Churapchinsky District -# 14-34 **** Eveno-Bytantaysky National District - -# From Tim Parenti (2014-07-03): -# Our commentary seems to have lost mention of (14-19) Neryungrinsky District. -# Since the surrounding districts of Sakha are all YAKT, assume this is, too. -# Also assume its history has been the same as the rest of Asia/Yakutsk. - -# Byalokoz 1919 says Yakutsk was 8:38:58. - -Zone Asia/Yakutsk 8:38:58 - LMT 1919 Dec 15 - 8:00 - +08 1930 Jun 21 - 9:00 Russia +09/+10 1991 Mar 31 2:00s - 8:00 Russia +08/+09 1992 Jan 19 2:00s - 9:00 Russia +09/+10 2011 Mar 27 2:00s - 10:00 - +10 2014 Oct 26 2:00s - 9:00 - +09 - - -# From Tim Parenti (2014-07-03), per Oscar van Vlijmen (2009-11-29): -# Asia/Vladivostok covers... -# 25 RU-PRI Primorsky Krai -# 27 RU-KHA Khabarovsk Krai -# 79 RU-YEV Jewish Autonomous Oblast -# -# ...and parts of (14, RU-SA) Sakha (Yakutia) Republic: -# 14-09 **** Verkhoyansky District -# 14-31 **** Ust-Yansky District - -# Milne 1899 says Vladivostok was 8:47:33.5. -# Byalokoz 1919 says Vladivostok was 8:47:31. -# Go with Byalokoz. - -Zone Asia/Vladivostok 8:47:31 - LMT 1922 Nov 15 - 9:00 - +09 1930 Jun 21 - 10:00 Russia +10/+11 1991 Mar 31 2:00s - 9:00 Russia +09/+10 1992 Jan 19 2:00s - 10:00 Russia +10/+11 2011 Mar 27 2:00s - 11:00 - +11 2014 Oct 26 2:00s - 10:00 - +10 - - -# From Tim Parenti (2014-07-03): -# Asia/Khandyga covers parts of (14, RU-SA) Sakha (Yakutia) Republic: -# 14-28 **** Tomponsky District -# 14-30 **** Ust-Maysky District - -# From Arthur David Olson (2012-05-09): -# Tomponskij and Ust'-Majskij switched from Vladivostok time to Yakutsk time -# in 2011. - -# From Paul Eggert (2012-11-25): -# Shanks and Pottenger (2003) has Khandyga on Yakutsk time. -# Make a wild guess that it switched to Vladivostok time in 2004. -# This transition is no doubt wrong, but we have no better info. - -Zone Asia/Khandyga 9:02:13 - LMT 1919 Dec 15 - 8:00 - +08 1930 Jun 21 - 9:00 Russia +09/+10 1991 Mar 31 2:00s - 8:00 Russia +08/+09 1992 Jan 19 2:00s - 9:00 Russia +09/+10 2004 - 10:00 Russia +10/+11 2011 Mar 27 2:00s - 11:00 - +11 2011 Sep 13 0:00s # Decree 725? - 10:00 - +10 2014 Oct 26 2:00s - 9:00 - +09 - - -# From Tim Parenti (2014-07-03): -# Asia/Sakhalin covers... -# 65 RU-SAK Sakhalin Oblast -# ...with the exception of: -# 65-11 **** Severo-Kurilsky District (North Kuril Islands) - -# From Matt Johnson (2016-02-22): -# Asia/Sakhalin is moving (in entirety) from UTC+10 to UTC+11 ... -# (2016-03-09): -# http://publication.pravo.gov.ru/Document/View/0001201603090044 - -# The Zone name should be Asia/Yuzhno-Sakhalinsk, but that's too long. -Zone Asia/Sakhalin 9:30:48 - LMT 1905 Aug 23 - 9:00 - +09 1945 Aug 25 - 11:00 Russia +11/+12 1991 Mar 31 2:00s # Sakhalin T - 10:00 Russia +10/+11 1992 Jan 19 2:00s - 11:00 Russia +11/+12 1997 Mar lastSun 2:00s - 10:00 Russia +10/+11 2011 Mar 27 2:00s - 11:00 - +11 2014 Oct 26 2:00s - 10:00 - +10 2016 Mar 27 2:00s - 11:00 - +11 - - -# From Tim Parenti (2014-07-03), per Oscar van Vlijmen (2009-11-29): -# Asia/Magadan covers... -# 49 RU-MAG Magadan Oblast - -# From Tim Parenti (2014-07-06), per Alexander Krivenyshev (2014-07-02): -# Magadan Oblast is moving from UTC+12 to UTC+10 on 2014-10-26; however, -# several districts of Sakha Republic as well as Severo-Kurilsky District of -# the Sakhalin Oblast (also known as the North Kuril Islands), represented -# until now by Asia/Magadan, will instead move to UTC+11. These regions will -# need their own zone. - -# From Alexander Krivenyshev (2016-03-27): -# ... draft bill 948300-6 to change its time zone from UTC+10 to UTC+11 ... -# will take ... effect ... on April 24, 2016 at 2 o'clock -# -# From Matt Johnson (2016-04-05): -# ... signed by the President today ... -# http://publication.pravo.gov.ru/Document/View/0001201604050038 - -Zone Asia/Magadan 10:03:12 - LMT 1924 May 2 - 10:00 - +10 1930 Jun 21 # Magadan Time - 11:00 Russia +11/+12 1991 Mar 31 2:00s - 10:00 Russia +10/+11 1992 Jan 19 2:00s - 11:00 Russia +11/+12 2011 Mar 27 2:00s - 12:00 - +12 2014 Oct 26 2:00s - 10:00 - +10 2016 Apr 24 2:00s - 11:00 - +11 - - -# From Tim Parenti (2014-07-06): -# Asia/Srednekolymsk covers parts of (14, RU-SA) Sakha (Yakutia) Republic: -# 14-01 **** Abyysky District -# 14-03 **** Allaikhovsky District -# 14-08 **** Verkhnekolymsky District -# 14-17 **** Momsky District -# 14-20 **** Nizhnekolymsky District -# 14-25 **** Srednekolymsky District -# -# ...and parts of (65, RU-SAK) Sakhalin Oblast: -# 65-11 **** Severo-Kurilsky District (North Kuril Islands) - -# From Tim Parenti (2014-07-02): -# Oymyakonsky District of Sakha Republic (represented by Ust-Nera), along with -# most of Sakhalin Oblast (represented by Sakhalin) will be moving to UTC+10 on -# 2014-10-26 to stay aligned with VLAT/SAKT; however, Severo-Kurilsky District -# of the Sakhalin Oblast (also known as the North Kuril Islands, represented by -# Severo-Kurilsk) will remain on UTC+11. - -# From Tim Parenti (2014-07-06): -# Assume North Kuril Islands have history like Magadan before 2011-03-27. -# There is a decent chance this is wrong, in which case a new zone -# Asia/Severo-Kurilsk would become necessary. -# -# Srednekolymsk and Zyryanka are the most populous places amongst these -# districts, but have very similar populations. In fact, Wikipedia currently -# lists them both as having 3528 people, exactly 1668 males and 1860 females -# each! (Yikes!) -# http://en.wikipedia.org/w/?title=Srednekolymsky_District&oldid=603435276 -# http://en.wikipedia.org/w/?title=Verkhnekolymsky_District&oldid=594378493 -# Assume this is a mistake, albeit an amusing one. -# -# Looking at censuses, the populations of the two municipalities seem to have -# fluctuated recently. Zyryanka was more populous than Srednekolymsk in the -# 1989 and 2002 censuses, but Srednekolymsk was more populous in the most -# recent (2010) census, 3525 to 3170. (See pages 195 and 197 of -# http://www.gks.ru/free_doc/new_site/perepis2010/croc/Documents/Vol1/pub-01-05.pdf -# in Russian.) In addition, Srednekolymsk appears to be a much older -# settlement and the population of Zyryanka seems to be declining. -# Go with Srednekolymsk. - -Zone Asia/Srednekolymsk 10:14:52 - LMT 1924 May 2 - 10:00 - +10 1930 Jun 21 - 11:00 Russia +11/+12 1991 Mar 31 2:00s - 10:00 Russia +10/+11 1992 Jan 19 2:00s - 11:00 Russia +11/+12 2011 Mar 27 2:00s - 12:00 - +12 2014 Oct 26 2:00s - 11:00 - +11 - - -# From Tim Parenti (2014-07-03): -# Asia/Ust-Nera covers parts of (14, RU-SA) Sakha (Yakutia) Republic: -# 14-22 **** Oymyakonsky District - -# From Arthur David Olson (2012-05-09): -# Ojmyakonskij [and the Kuril Islands] switched from -# Magadan time to Vladivostok time in 2011. -# -# From Tim Parenti (2014-07-06), per Alexander Krivenyshev (2014-07-02): -# It's unlikely that any of the Kuril Islands were involved in such a switch, -# as the South and Middle Kurils have been on UTC+11 (SAKT) with the rest of -# Sakhalin Oblast since at least 2011-09, and the North Kurils have been on -# UTC+12 since at least then, too. - -Zone Asia/Ust-Nera 9:32:54 - LMT 1919 Dec 15 - 8:00 - +08 1930 Jun 21 - 9:00 Russia +09/+10 1981 Apr 1 - 11:00 Russia +11/+12 1991 Mar 31 2:00s - 10:00 Russia +10/+11 1992 Jan 19 2:00s - 11:00 Russia +11/+12 2011 Mar 27 2:00s - 12:00 - +12 2011 Sep 13 0:00s # Decree 725? - 11:00 - +11 2014 Oct 26 2:00s - 10:00 - +10 - - -# From Tim Parenti (2014-07-03), per Oscar van Vlijmen (2001-08-25): -# Asia/Kamchatka covers... -# 91 RU-KAM Kamchatka Krai -# -# Note: Effective 2007-07-01, (41) Kamchatka Oblast and (82) Koryak -# Autonomous Okrug merged to form (91, RU-KAM) Kamchatka Krai. - -# The Zone name should be Asia/Petropavlovsk-Kamchatski or perhaps -# Asia/Petropavlovsk-Kamchatsky, but these are too long. -Zone Asia/Kamchatka 10:34:36 - LMT 1922 Nov 10 - 11:00 - +11 1930 Jun 21 - 12:00 Russia +12/+13 1991 Mar 31 2:00s - 11:00 Russia +11/+12 1992 Jan 19 2:00s - 12:00 Russia +12/+13 2010 Mar 28 2:00s - 11:00 Russia +11/+12 2011 Mar 27 2:00s - 12:00 - +12 - - -# From Tim Parenti (2014-07-03): -# Asia/Anadyr covers... -# 87 RU-CHU Chukotka Autonomous Okrug - -Zone Asia/Anadyr 11:49:56 - LMT 1924 May 2 - 12:00 - +12 1930 Jun 21 - 13:00 Russia +13/+14 1982 Apr 1 0:00s - 12:00 Russia +12/+13 1991 Mar 31 2:00s - 11:00 Russia +11/+12 1992 Jan 19 2:00s - 12:00 Russia +12/+13 2010 Mar 28 2:00s - 11:00 Russia +11/+12 2011 Mar 27 2:00s - 12:00 - +12 - - -# San Marino -# See Europe/Rome. - -# Serbia -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Belgrade 1:22:00 - LMT 1884 - 1:00 - CET 1941 Apr 18 23:00 - 1:00 C-Eur CE%sT 1945 - 1:00 - CET 1945 May 8 2:00s - 1:00 1:00 CEST 1945 Sep 16 2:00s -# Metod Koželj reports that the legal date of -# transition to EU rules was 1982-11-27, for all of Yugoslavia at the time. -# Shanks & Pottenger don't give as much detail, so go with Koželj. - 1:00 - CET 1982 Nov 27 - 1:00 EU CE%sT -Link Europe/Belgrade Europe/Ljubljana # Slovenia -Link Europe/Belgrade Europe/Podgorica # Montenegro -Link Europe/Belgrade Europe/Sarajevo # Bosnia and Herzegovina -Link Europe/Belgrade Europe/Skopje # Macedonia -Link Europe/Belgrade Europe/Zagreb # Croatia - -# Slovakia -Link Europe/Prague Europe/Bratislava - -# Slovenia -# See Europe/Belgrade. - -# Spain -# -# From Paul Eggert (2016-12-14): -# -# The source for Europe/Madrid before 2013 is: -# Planesas P. La hora oficial en España y sus cambios. -# Anuario del Observatorio Astronómico de Madrid (2013, in Spanish). -# http://astronomia.ign.es/rknowsys-theme/images/webAstro/paginas/documentos/Anuario/lahoraoficialenespana.pdf -# As this source says that historical time in the Canaries is obscure, -# and it does not discuss Ceuta, stick with Shanks for now for that data. -# -# In the 1918 and 1919 fallback transitions in Spain, the clock for -# the hour-longer day officially kept going after midnight, so that -# the repeated instances of that day's 00:00 hour were 24 hours apart, -# with a fallback transition from the second occurrence of 00:59... to -# the next day's 00:00. Our data format cannot represent this -# directly, and instead repeats the first hour of the next day, with a -# fallback transition from the next day's 00:59... to 00:00. - -# From Michael Deckers (2016-12-15): -# The Royal Decree of 1900-06-26 quoted by Planesas, online at -# https://www.boe.es/datos/pdfs/BOE//1900/209/A00383-00384.pdf -# says in its article 5 (my translation): -# These dispositions will enter into force beginning with the -# instant at which, according to the time indicated in article 1, -# the 1st day of January of 1901 will begin. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Spain 1918 only - Apr 15 23:00 1:00 S -Rule Spain 1918 1919 - Oct 6 24:00s 0 - -Rule Spain 1919 only - Apr 6 23:00 1:00 S -Rule Spain 1924 only - Apr 16 23:00 1:00 S -Rule Spain 1924 only - Oct 4 24:00s 0 - -Rule Spain 1926 only - Apr 17 23:00 1:00 S -Rule Spain 1926 1929 - Oct Sat>=1 24:00s 0 - -Rule Spain 1927 only - Apr 9 23:00 1:00 S -Rule Spain 1928 only - Apr 15 0:00 1:00 S -Rule Spain 1929 only - Apr 20 23:00 1:00 S -# Republican Spain during the civil war; it controlled Madrid until 1939-03-28. -Rule Spain 1937 only - Jun 16 23:00 1:00 S -Rule Spain 1937 only - Oct 2 24:00s 0 - -Rule Spain 1938 only - Apr 2 23:00 1:00 S -Rule Spain 1938 only - Apr 30 23:00 2:00 M -Rule Spain 1938 only - Oct 2 24:00 1:00 S -# The following rules are for unified Spain again. -# -# Planesas does not say what happened in Madrid between its fall on -# 1939-03-28 and the Nationalist spring-forward transition on -# 1939-04-15. For lack of better info, assume Madrid's clocks did not -# change during that period. -# -# The first rule is commented out, as it is redundant for Republican Spain. -#Rule Spain 1939 only - Apr 15 23:00 1:00 S -Rule Spain 1939 only - Oct 7 24:00s 0 - -Rule Spain 1942 only - May 2 23:00 1:00 S -Rule Spain 1942 only - Sep 1 1:00 0 - -Rule Spain 1943 1946 - Apr Sat>=13 23:00 1:00 S -Rule Spain 1943 1944 - Oct Sun>=1 1:00 0 - -Rule Spain 1945 1946 - Sep lastSun 1:00 0 - -Rule Spain 1949 only - Apr 30 23:00 1:00 S -Rule Spain 1949 only - Oct 2 1:00 0 - -Rule Spain 1974 1975 - Apr Sat>=12 23:00 1:00 S -Rule Spain 1974 1975 - Oct Sun>=1 1:00 0 - -Rule Spain 1976 only - Mar 27 23:00 1:00 S -Rule Spain 1976 1977 - Sep lastSun 1:00 0 - -Rule Spain 1977 only - Apr 2 23:00 1:00 S -Rule Spain 1978 only - Apr 2 2:00s 1:00 S -Rule Spain 1978 only - Oct 1 2:00s 0 - -# Nationalist Spain during the civil war -#Rule NatSpain 1937 only - May 22 23:00 1:00 S -#Rule NatSpain 1937 1938 - Oct Sat>=1 24:00s 0 - -#Rule NatSpain 1938 only - Mar 26 23:00 1:00 S -# The following rules are copied from Morocco from 1967 through 1978. -Rule SpainAfrica 1967 only - Jun 3 12:00 1:00 S -Rule SpainAfrica 1967 only - Oct 1 0:00 0 - -Rule SpainAfrica 1974 only - Jun 24 0:00 1:00 S -Rule SpainAfrica 1974 only - Sep 1 0:00 0 - -Rule SpainAfrica 1976 1977 - May 1 0:00 1:00 S -Rule SpainAfrica 1976 only - Aug 1 0:00 0 - -Rule SpainAfrica 1977 only - Sep 28 0:00 0 - -Rule SpainAfrica 1978 only - Jun 1 0:00 1:00 S -Rule SpainAfrica 1978 only - Aug 4 0:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Madrid -0:14:44 - LMT 1900 Dec 31 23:45:16 - 0:00 Spain WE%sT 1940 Mar 16 23:00 - 1:00 Spain CE%sT 1979 - 1:00 EU CE%sT -Zone Africa/Ceuta -0:21:16 - LMT 1900 Dec 31 23:38:44 - 0:00 - WET 1918 May 6 23:00 - 0:00 1:00 WEST 1918 Oct 7 23:00 - 0:00 - WET 1924 - 0:00 Spain WE%sT 1929 - 0:00 SpainAfrica WE%sT 1984 Mar 16 - 1:00 - CET 1986 - 1:00 EU CE%sT -Zone Atlantic/Canary -1:01:36 - LMT 1922 Mar # Las Palmas de Gran C. - -1:00 - -01 1946 Sep 30 1:00 - 0:00 - WET 1980 Apr 6 0:00s - 0:00 1:00 WEST 1980 Sep 28 1:00u - 0:00 EU WE%sT -# IATA SSIM (1996-09) says the Canaries switch at 2:00u, not 1:00u. -# Ignore this for now, as the Canaries are part of the EU. - -# Sweden - -# From Ivan Nilsson (2001-04-13), superseding Shanks & Pottenger: -# -# The law "Svensk författningssamling 1878, no 14" about standard time in 1879: -# From the beginning of 1879 (that is 01-01 00:00) the time for all -# places in the country is "the mean solar time for the meridian at -# three degrees, or twelve minutes of time, to the west of the -# meridian of the Observatory of Stockholm". The law is dated 1878-05-31. -# -# The observatory at that time had the meridian 18 degrees 03' 30" -# eastern longitude = 01:12:14 in time. Less 12 minutes gives the -# national standard time as 01:00:14 ahead of GMT.... -# -# About the beginning of CET in Sweden. The lawtext ("Svensk -# författningssamling 1899, no 44") states, that "from the beginning -# of 1900... ... the same as the mean solar time for the meridian at -# the distance of one hour of time from the meridian of the English -# observatory at Greenwich, or at 12 minutes 14 seconds to the west -# from the meridian of the Observatory of Stockholm". The law is dated -# 1899-06-16. In short: At 1900-01-01 00:00:00 the new standard time -# in Sweden is 01:00:00 ahead of GMT. -# -# 1916: The lawtext ("Svensk författningssamling 1916, no 124") states -# that "1916-05-15 is considered to begin one hour earlier". It is -# pretty obvious that at 05-14 23:00 the clocks are set to 05-15 00:00.... -# Further the law says, that "1916-09-30 is considered to end one hour later". -# -# The laws regulating [DST] are available on the site of the Swedish -# Parliament beginning with 1985 - the laws regulating 1980/1984 are -# not available on the site (to my knowledge they are only available -# in Swedish): (type -# "sommartid" without the quotes in the field "Fritext" and then click -# the Sök-button). -# -# (2001-05-13): -# -# I have now found a newspaper stating that at 1916-10-01 01:00 -# summertime the church-clocks etc were set back one hour to show -# 1916-10-01 00:00 standard time. The article also reports that some -# people thought the switch to standard time would take place already -# at 1916-10-01 00:00 summer time, but they had to wait for another -# hour before the event took place. -# -# Source: The newspaper "Dagens Nyheter", 1916-10-01, page 7 upper left. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Stockholm 1:12:12 - LMT 1879 Jan 1 - 1:00:14 - SET 1900 Jan 1 # Swedish Time - 1:00 - CET 1916 May 14 23:00 - 1:00 1:00 CEST 1916 Oct 1 1:00 - 1:00 - CET 1980 - 1:00 EU CE%sT - -# Switzerland -# From Howse: -# By the end of the 18th century clocks and watches became commonplace -# and their performance improved enormously. Communities began to keep -# mean time in preference to apparent time - Geneva from 1780 .... -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -# From Whitman (who writes "Midnight?"): -# Rule Swiss 1940 only - Nov 2 0:00 1:00 S -# Rule Swiss 1940 only - Dec 31 0:00 0 - -# From Shanks & Pottenger: -# Rule Swiss 1941 1942 - May Sun>=1 2:00 1:00 S -# Rule Swiss 1941 1942 - Oct Sun>=1 0:00 0 - - -# From Alois Treindl (2008-12-17): -# I have researched the DST usage in Switzerland during the 1940ies. -# -# As I wrote in an earlier message, I suspected the current tzdata values -# to be wrong. This is now verified. -# -# I have found copies of the original ruling by the Swiss Federal -# government, in 'Eidgenössische Gesetzessammlung 1941 and 1942' (Swiss -# federal law collection)... -# -# DST began on Monday 5 May 1941, 1:00 am by shifting the clocks to 2:00 am -# DST ended on Monday 6 Oct 1941, 2:00 am by shifting the clocks to 1:00 am. -# -# DST began on Monday, 4 May 1942 at 01:00 am -# DST ended on Monday, 5 Oct 1942 at 02:00 am -# -# There was no DST in 1940, I have checked the law collection carefully. -# It is also indicated by the fact that the 1942 entry in the law -# collection points back to 1941 as a reference, but no reference to any -# other years are made. -# -# Newspaper articles I have read in the archives on 6 May 1941 reported -# about the introduction of DST (Sommerzeit in German) during the previous -# night as an absolute novelty, because this was the first time that such -# a thing had happened in Switzerland. -# -# I have also checked 1916, because one book source (Gabriel, Traité de -# l'heure dans le monde) claims that Switzerland had DST in 1916. This is -# false, no official document could be found. Probably Gabriel got misled -# by references to Germany, which introduced DST in 1916 for the first time. -# -# The tzdata rules for Switzerland must be changed to: -# Rule Swiss 1941 1942 - May Mon>=1 1:00 1:00 S -# Rule Swiss 1941 1942 - Oct Mon>=1 2:00 0 - -# -# The 1940 rules must be deleted. -# -# One further detail for Switzerland, which is probably out of scope for -# most users of tzdata: The [Europe/Zurich zone] ... -# describes all of Switzerland correctly, with the exception of -# the Canton de Genève (Geneva, Genf). Between 1848 and 1894 Geneva did not -# follow Bern Mean Time but kept its own local mean time. -# To represent this, an extra zone would be needed. -# -# From Alois Treindl (2013-09-11): -# The Federal regulations say -# http://www.admin.ch/opc/de/classified-compilation/20071096/index.html -# ... the meridian for Bern mean time ... is 7 degrees 26' 22.50". -# Expressed in time, it is 0h29m45.5s. - -# From Pierre-Yves Berger (2013-09-11): -# the "Circulaire du conseil fédéral" (December 11 1893) -# http://www.amtsdruckschriften.bar.admin.ch/viewOrigDoc.do?id=10071353 -# clearly states that the [1894-06-01] change should be done at midnight -# but if no one is present after 11 at night, could be postponed until one -# hour before the beginning of service. - -# From Paul Eggert (2013-09-11): -# Round BMT to the nearest even second, 0:29:46. -# -# We can find no reliable source for Shanks's assertion that all of Switzerland -# except Geneva switched to Bern Mean Time at 00:00 on 1848-09-12. This book: -# -# Jakob Messerli. Gleichmässig, pünktlich, schnell. Zeiteinteilung und -# Zeitgebrauch in der Schweiz im 19. Jahrhundert. Chronos, Zurich 1995, -# ISBN 3-905311-68-2, OCLC 717570797. -# -# suggests that the transition was more gradual, and that the Swiss did not -# agree about civil time during the transition. The timekeeping it gives the -# most detail for is postal and telegraph time: here, federal legislation (the -# "Bundesgesetz über die Erstellung von elektrischen Telegraphen") passed on -# 1851-11-23, and an official implementation notice was published 1853-07-16 -# (Bundesblatt 1853, Bd. II, S. 859). On p 72 Messerli writes that in -# practice since July 1853 Bernese time was used in "all postal and telegraph -# offices in Switzerland from Geneva to St. Gallen and Basel to Chiasso" -# (Google translation). For now, model this transition as occurring on -# 1853-07-16, though it probably occurred at some other date in Zurich, and -# legal civil time probably changed at still some other transition date. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Swiss 1941 1942 - May Mon>=1 1:00 1:00 S -Rule Swiss 1941 1942 - Oct Mon>=1 2:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Zurich 0:34:08 - LMT 1853 Jul 16 # See above comment. - 0:29:46 - BMT 1894 Jun # Bern Mean Time - 1:00 Swiss CE%sT 1981 - 1:00 EU CE%sT - -# Turkey - -# From Kıvanç Yazan (2016-09-25): -# 1) For 1986-2006, DST started at 01:00 local and ended at 02:00 local, with -# no exceptions. -# 2) 1994's lastSun was overridden with Mar 20 ... -# Here are official papers: -# http://www.resmigazete.gov.tr/arsiv/19032.pdf - page 2 for 1986 -# http://www.resmigazete.gov.tr/arsiv/19400.pdf - page 4 for 1987 -# http://www.resmigazete.gov.tr/arsiv/19752.pdf - page 15 for 1988 -# http://www.resmigazete.gov.tr/arsiv/20102.pdf - page 6 for 1989 -# http://www.resmigazete.gov.tr/arsiv/20464.pdf - page 1 for 1990 - 1992 -# http://www.resmigazete.gov.tr/arsiv/21531.pdf - page 15 for 1993 - 1995 -# http://www.resmigazete.gov.tr/arsiv/21879.pdf - page 1 for overriding 1994 -# http://www.resmigazete.gov.tr/arsiv/22588.pdf - page 1 for 1996, 1997 -# http://www.resmigazete.gov.tr/arsiv/23286.pdf - page 10 for 1998 - 2000 -# http://www.resmigazete.gov.tr/eskiler/2001/03/20010324.htm#2 - for 2001 -# http://www.resmigazete.gov.tr/eskiler/2002/03/20020316.htm#2 - for 2002-2006 -# From Paul Eggert (2016-09-25): -# Prefer the above sources to Shanks & Pottenger for time stamps after 1985. - -# From Steffen Thorsen (2007-03-09): -# Starting 2007 though, it seems that they are adopting EU's 1:00 UTC -# start/end time, according to the following page (2007-03-07): -# http://www.ntvmsnbc.com/news/402029.asp -# The official document is located here - it is in Turkish...: -# http://rega.basbakanlik.gov.tr/eskiler/2007/03/20070307-7.htm -# I was able to locate the following seemingly official document -# (on a non-government server though) describing dates between 2002 and 2006: -# http://www.alomaliye.com/bkk_2002_3769.htm - -# From Gökdeniz Karadağ (2011-03-10): -# According to the articles linked below, Turkey will change into summer -# time zone (GMT+3) on March 28, 2011 at 3:00 a.m. instead of March 27. -# This change is due to a nationwide exam on 27th. -# http://www.worldbulletin.net/?aType=haber&ArticleID=70872 -# Turkish: -# http://www.hurriyet.com.tr/ekonomi/17230464.asp?gid=373 - -# From Faruk Pasin (2014-02-14): -# The DST for Turkey has been changed for this year because of the -# Turkish Local election.... -# http://www.sabah.com.tr/Ekonomi/2014/02/12/yaz-saatinde-onemli-degisiklik -# ... so Turkey will move clocks forward one hour on March 31 at 3:00 a.m. -# From Randal L. Schwartz (2014-04-15): -# Having landed on a flight from the states to Istanbul (via AMS) on March 31, -# I can tell you that NOBODY (even the airlines) respected this timezone DST -# change delay. Maybe the word just didn't get out in time. -# From Paul Eggert (2014-06-15): -# The press reported massive confusion, as election officials obeyed the rule -# change but cell phones (and airline baggage systems) did not. See: -# Kostidis M. Eventful elections in Turkey. Balkan News Agency -# http://www.balkaneu.com/eventful-elections-turkey/ 2014-03-30. -# I guess the best we can do is document the official time. - -# From Fatih (2015-09-29): -# It's officially announced now by the Ministry of Energy. -# Turkey delays winter time to 8th of November 04:00 -# http://www.aa.com.tr/tr/turkiye/yaz-saati-uygulamasi-8-kasimda-sona-erecek/362217 -# -# From BBC News (2015-10-25): -# Confused Turks are asking "what's the time?" after automatic clocks defied a -# government decision ... "For the next two weeks #Turkey is on EEST... Erdogan -# Engineered Standard Time," said Twitter user @aysekarahasan. -# http://www.bbc.com/news/world-europe-34631326 - -# From Burak AYDIN (2016-09-08): -# Turkey will stay in Daylight Saving Time even in winter.... -# http://www.resmigazete.gov.tr/eskiler/2016/09/20160908-2.pdf -# -# From Paul Eggert (2016-09-07): -# The change is permanent, so this is the new standard time in Turkey. -# It takes effect today, which is not much notice. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Turkey 1916 only - May 1 0:00 1:00 S -Rule Turkey 1916 only - Oct 1 0:00 0 - -Rule Turkey 1920 only - Mar 28 0:00 1:00 S -Rule Turkey 1920 only - Oct 25 0:00 0 - -Rule Turkey 1921 only - Apr 3 0:00 1:00 S -Rule Turkey 1921 only - Oct 3 0:00 0 - -Rule Turkey 1922 only - Mar 26 0:00 1:00 S -Rule Turkey 1922 only - Oct 8 0:00 0 - -# Whitman gives 1923 Apr 28 - Sep 16 and no DST in 1924-1925; -# go with Shanks & Pottenger. -Rule Turkey 1924 only - May 13 0:00 1:00 S -Rule Turkey 1924 1925 - Oct 1 0:00 0 - -Rule Turkey 1925 only - May 1 0:00 1:00 S -Rule Turkey 1940 only - Jun 30 0:00 1:00 S -Rule Turkey 1940 only - Oct 5 0:00 0 - -Rule Turkey 1940 only - Dec 1 0:00 1:00 S -Rule Turkey 1941 only - Sep 21 0:00 0 - -Rule Turkey 1942 only - Apr 1 0:00 1:00 S -# Whitman omits the next two transition and gives 1945 Oct 1; -# go with Shanks & Pottenger. -Rule Turkey 1942 only - Nov 1 0:00 0 - -Rule Turkey 1945 only - Apr 2 0:00 1:00 S -Rule Turkey 1945 only - Oct 8 0:00 0 - -Rule Turkey 1946 only - Jun 1 0:00 1:00 S -Rule Turkey 1946 only - Oct 1 0:00 0 - -Rule Turkey 1947 1948 - Apr Sun>=16 0:00 1:00 S -Rule Turkey 1947 1950 - Oct Sun>=2 0:00 0 - -Rule Turkey 1949 only - Apr 10 0:00 1:00 S -Rule Turkey 1950 only - Apr 19 0:00 1:00 S -Rule Turkey 1951 only - Apr 22 0:00 1:00 S -Rule Turkey 1951 only - Oct 8 0:00 0 - -Rule Turkey 1962 only - Jul 15 0:00 1:00 S -Rule Turkey 1962 only - Oct 8 0:00 0 - -Rule Turkey 1964 only - May 15 0:00 1:00 S -Rule Turkey 1964 only - Oct 1 0:00 0 - -Rule Turkey 1970 1972 - May Sun>=2 0:00 1:00 S -Rule Turkey 1970 1972 - Oct Sun>=2 0:00 0 - -Rule Turkey 1973 only - Jun 3 1:00 1:00 S -Rule Turkey 1973 only - Nov 4 3:00 0 - -Rule Turkey 1974 only - Mar 31 2:00 1:00 S -Rule Turkey 1974 only - Nov 3 5:00 0 - -Rule Turkey 1975 only - Mar 30 0:00 1:00 S -Rule Turkey 1975 1976 - Oct lastSun 0:00 0 - -Rule Turkey 1976 only - Jun 1 0:00 1:00 S -Rule Turkey 1977 1978 - Apr Sun>=1 0:00 1:00 S -Rule Turkey 1977 only - Oct 16 0:00 0 - -Rule Turkey 1979 1980 - Apr Sun>=1 3:00 1:00 S -Rule Turkey 1979 1982 - Oct Mon>=11 0:00 0 - -Rule Turkey 1981 1982 - Mar lastSun 3:00 1:00 S -Rule Turkey 1983 only - Jul 31 0:00 1:00 S -Rule Turkey 1983 only - Oct 2 0:00 0 - -Rule Turkey 1985 only - Apr 20 0:00 1:00 S -Rule Turkey 1985 only - Sep 28 0:00 0 - -Rule Turkey 1986 1993 - Mar lastSun 1:00s 1:00 S -Rule Turkey 1986 1995 - Sep lastSun 1:00s 0 - -Rule Turkey 1994 only - Mar 20 1:00s 1:00 S -Rule Turkey 1995 2006 - Mar lastSun 1:00s 1:00 S -Rule Turkey 1996 2006 - Oct lastSun 1:00s 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Europe/Istanbul 1:55:52 - LMT 1880 - 1:56:56 - IMT 1910 Oct # Istanbul Mean Time? - 2:00 Turkey EE%sT 1978 Oct 15 - 3:00 Turkey +03/+04 1985 Apr 20 - 2:00 Turkey EE%sT 2007 - 2:00 EU EE%sT 2011 Mar 27 1:00u - 2:00 - EET 2011 Mar 28 1:00u - 2:00 EU EE%sT 2014 Mar 30 1:00u - 2:00 - EET 2014 Mar 31 1:00u - 2:00 EU EE%sT 2015 Oct 25 1:00u - 2:00 1:00 EEST 2015 Nov 8 1:00u - 2:00 EU EE%sT 2016 Sep 7 - 3:00 - +03 -Link Europe/Istanbul Asia/Istanbul # Istanbul is in both continents. - -# Ukraine -# -# From Igor Karpov, who works for the Ukrainian Ministry of Justice, -# via Garrett Wollman (2003-01-27): -# BTW, I've found the official document on this matter. It's government -# regulations No. 509, May 13, 1996. In my poor translation it says: -# "Time in Ukraine is set to second timezone (Kiev time). Each last Sunday -# of March at 3am the time is changing to 4am and each last Sunday of -# October the time at 4am is changing to 3am" - -# From Alexander Krivenyshev (2011-09-20): -# On September 20, 2011 the deputies of the Verkhovna Rada agreed to -# abolish the transfer clock to winter time. -# -# Bill No. 8330 of MP from the Party of Regions Oleg Nadoshi got -# approval from 266 deputies. -# -# Ukraine abolishes transfer back to the winter time (in Russian) -# http://news.mail.ru/politics/6861560/ -# -# The Ukrainians will no longer change the clock (in Russian) -# http://www.segodnya.ua/news/14290482.html -# -# Deputies cancelled the winter time (in Russian) -# http://www.pravda.com.ua/rus/news/2011/09/20/6600616/ -# -# From Philip Pizzey (2011-10-18): -# Today my Ukrainian colleagues have informed me that the -# Ukrainian parliament have decided that they will go to winter -# time this year after all. -# -# From Udo Schwedt (2011-10-18): -# As far as I understand, the recent change to the Ukrainian time zone -# (Europe/Kiev) to introduce permanent daylight saving time (similar -# to Russia) was reverted today: -# http://portal.rada.gov.ua/rada/control/en/publish/article/info_left?art_id=287324&cat_id=105995 -# -# Also reported by Alexander Bokovoy (2011-10-18) who also noted: -# The law documents themselves are at -# http://w1.c1.rada.gov.ua/pls/zweb_n/webproc4_1?id=&pf3511=41484 - -# From Vladimir in Moscow via Alois Treindl re Kiev time 1991/2 (2014-02-28): -# First in Ukraine they changed Time zone from UTC+3 to UTC+2 with DST: -# 03 25 1990 02:00 -03.00 1 Time Zone 3 with DST -# 07 01 1990 02:00 -02.00 1 Time Zone 2 with DST -# * Ukrainian Government's Resolution of 18.06.1990, No. 134. -# http://search.ligazakon.ua/l_doc2.nsf/link1/T001500.html -# -# They did not end DST in September, 1990 (according to the law, -# "summer time" was still in action): -# 09 30 1990 03:00 -02.00 1 Time Zone 2 with DST -# * Ukrainian Government's Resolution of 21.09.1990, No. 272. -# http://search.ligazakon.ua/l_doc2.nsf/link1/KP900272.html -# -# Again no change in March, 1991 ("summer time" in action): -# 03 31 1991 02:00 -02.00 1 Time Zone 2 with DST -# -# DST ended in September 1991 ("summer time" ended): -# 09 29 1991 03:00 -02.00 0 Time Zone 2, no DST -# * Ukrainian Government's Resolution of 25.09.1991, No. 225. -# http://www.uazakon.com/documents/date_21/pg_iwgdoc.htm -# This is an answer. -# -# Since 1992 they had normal DST procedure: -# 03 29 1992 02:00 -02.00 1 DST started -# 09 27 1992 03:00 -02.00 0 DST ended -# * Ukrainian Government's Resolution of 20.03.1992, No. 139. -# http://www.uazakon.com/documents/date_8u/pg_grcasa.htm - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -# Most of Ukraine since 1970 has been like Kiev. -# "Kyiv" is the transliteration of the Ukrainian name, but -# "Kiev" is more common in English. -Zone Europe/Kiev 2:02:04 - LMT 1880 - 2:02:04 - KMT 1924 May 2 # Kiev Mean Time - 2:00 - EET 1930 Jun 21 - 3:00 - MSK 1941 Sep 20 - 1:00 C-Eur CE%sT 1943 Nov 6 - 3:00 Russia MSK/MSD 1990 Jul 1 2:00 - 2:00 1:00 EEST 1991 Sep 29 3:00 - 2:00 E-Eur EE%sT 1995 - 2:00 EU EE%sT -# Ruthenia used CET 1990/1991. -# "Uzhhorod" is the transliteration of the Rusyn/Ukrainian pronunciation, but -# "Uzhgorod" is more common in English. -Zone Europe/Uzhgorod 1:29:12 - LMT 1890 Oct - 1:00 - CET 1940 - 1:00 C-Eur CE%sT 1944 Oct - 1:00 1:00 CEST 1944 Oct 26 - 1:00 - CET 1945 Jun 29 - 3:00 Russia MSK/MSD 1990 - 3:00 - MSK 1990 Jul 1 2:00 - 1:00 - CET 1991 Mar 31 3:00 - 2:00 - EET 1992 - 2:00 E-Eur EE%sT 1995 - 2:00 EU EE%sT -# Zaporozh'ye and eastern Lugansk oblasts observed DST 1990/1991. -# "Zaporizhia" is the transliteration of the Ukrainian name, but -# "Zaporozh'ye" is more common in English. Use the common English -# spelling, except omit the apostrophe as it is not allowed in -# portable Posix file names. -Zone Europe/Zaporozhye 2:20:40 - LMT 1880 - 2:20 - +0220 1924 May 2 - 2:00 - EET 1930 Jun 21 - 3:00 - MSK 1941 Aug 25 - 1:00 C-Eur CE%sT 1943 Oct 25 - 3:00 Russia MSK/MSD 1991 Mar 31 2:00 - 2:00 E-Eur EE%sT 1995 - 2:00 EU EE%sT - -# Vatican City -# See Europe/Rome. - -############################################################################### - -# One source shows that Bulgaria, Cyprus, Finland, and Greece observe DST from -# the last Sunday in March to the last Sunday in September in 1986. -# The source shows Romania changing a day later than everybody else. -# -# According to Bernard Sieloff's source, Poland is in the MET time zone but -# uses the WE DST rules. The Western USSR uses EET+1 and ME DST rules. -# Bernard Sieloff's source claims Romania switches on the same day, but at -# 00:00 standard time (i.e., 01:00 DST). It also claims that Turkey -# switches on the same day, but switches on at 01:00 standard time -# and off at 00:00 standard time (i.e., 01:00 DST) - -# ... -# Date: Wed, 28 Jan 87 16:56:27 -0100 -# From: Tom Hofmann -# ... -# -# ...the European time rules are...standardized since 1981, when -# most European countries started DST. Before that year, only -# a few countries (UK, France, Italy) had DST, each according -# to own national rules. In 1981, however, DST started on -# 'Apr firstSun', and not on 'Mar lastSun' as in the following -# years... -# But also since 1981 there are some more national exceptions -# than listed in 'europe': Switzerland, for example, joined DST -# one year later, Denmark ended DST on 'Oct 1' instead of 'Sep -# lastSun' in 1981 - I don't know how they handle now. -# -# Finally, DST ist always from 'Apr 1' to 'Oct 1' in the -# Soviet Union (as far as I know). -# -# Tom Hofmann, Scientific Computer Center, CIBA-GEIGY AG, -# 4002 Basle, Switzerland -# ... - -# ... -# Date: Wed, 4 Feb 87 22:35:22 +0100 -# From: Dik T. Winter -# ... -# -# The information from Tom Hofmann is (as far as I know) not entirely correct. -# After a request from chongo at amdahl I tried to retrieve all information -# about DST in Europe. I was able to find all from about 1969. -# -# ...standardization on DST in Europe started in about 1977 with switches on -# first Sunday in April and last Sunday in September... -# In 1981 UK joined Europe insofar that -# the starting day for both shifted to last Sunday in March. And from 1982 -# the whole of Europe used DST, with switch dates April 1 and October 1 in -# the Sov[i]et Union. In 1985 the SU reverted to standard Europe[a]n switch -# dates... -# -# It should also be remembered that time-zones are not constants; e.g. -# Portugal switched in 1976 from MET (or CET) to WET with DST... -# Note also that though there were rules for switch dates not -# all countries abided to these dates, and many individual deviations -# occurred, though not since 1982 I believe. Another note: it is always -# assumed that DST is 1 hour ahead of normal time, this need not be the -# case; at least in the Netherlands there have been times when DST was 2 hours -# in advance of normal time. -# -# ... -# dik t. winter, cwi, amsterdam, nederland -# ... - -# From Bob Devine (1988-01-28): -# ... -# Greece: Last Sunday in April to last Sunday in September (iffy on dates). -# Since 1978. Change at midnight. -# ... -# Monaco: has same DST as France. -# ... diff --git a/src/timezone/data/factory b/src/timezone/data/factory deleted file mode 100644 index 75fa4a11c3..0000000000 --- a/src/timezone/data/factory +++ /dev/null @@ -1,10 +0,0 @@ -# This file is in the public domain, so clarified as of -# 2009-05-17 by Arthur David Olson. - -# For distributors who don't want to put time zone specification in -# their installation procedures. Users that run 'date' will get the -# time zone abbreviation "-00", indicating that the actual time zone -# is unknown. - -# Zone NAME GMTOFF RULES FORMAT -Zone Factory 0 - -00 diff --git a/src/timezone/data/northamerica b/src/timezone/data/northamerica deleted file mode 100644 index 6ede9dcd96..0000000000 --- a/src/timezone/data/northamerica +++ /dev/null @@ -1,3374 +0,0 @@ -# This file is in the public domain, so clarified as of -# 2009-05-17 by Arthur David Olson. - -# also includes Central America and the Caribbean - -# This file is by no means authoritative; if you think you know better, -# go ahead and edit the file (and please send any changes to -# tz@iana.org for general use in the future). For more, please see -# the file CONTRIBUTING in the tz distribution. - -# From Paul Eggert (1999-03-22): -# A reliable and entertaining source about time zones is -# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997). - -############################################################################### - -# United States - -# From Paul Eggert (1999-03-31): -# Howse writes (pp 121-125) that time zones were invented by -# Professor Charles Ferdinand Dowd (1825-1904), -# Principal of Temple Grove Ladies' Seminary (Saratoga Springs, NY). -# His pamphlet "A System of National Time for Railroads" (1870) -# was the result of his proposals at the Convention of Railroad Trunk Lines -# in New York City (1869-10). His 1870 proposal was based on Washington, DC, -# but in 1872-05 he moved the proposed origin to Greenwich. - -# From Paul Eggert (2016-09-21): -# Dowd's proposal left many details unresolved, such as where to draw -# lines between time zones. The key individual who made time zones -# work in the US was William Frederick Allen - railway engineer, -# managing editor of the Travelers' Guide, and secretary of the -# General Time Convention, a railway standardization group. Allen -# spent months in dialogs with scientific and railway leaders, -# developed a workable plan to institute time zones, and presented it -# to the General Time Convention on 1883-04-11, saying that his plan -# meant "local time would be practically abolished" - a plus for -# railway scheduling. By the next convention on 1883-10-11 nearly all -# railroads had agreed and it took effect on 1883-11-18 at 12:00. -# That Sunday was called the "day of two noons", as the eastern parts -# of the new zones observed noon twice. Allen witnessed the -# transition in New York City, writing: -# -# I heard the bells of St. Paul's strike on the old time. Four -# minutes later, obedient to the electrical signal from the Naval -# Observatory ... the time-ball made its rapid descent, the chimes -# of old Trinity rang twelve measured strokes, and local time was -# abandoned, probably forever. -# -# Most of the US soon followed suit. See: -# Bartky IR. The adoption of standard time. Technol Cult 1989 Jan;30(1):25-56. -# http://dx.doi.org/10.2307/3105430 - -# From Paul Eggert (2005-04-16): -# That 1883 transition occurred at 12:00 new time, not at 12:00 old time. -# See p 46 of David Prerau, Seize the daylight, Thunder's Mouth Press (2005). - -# From Paul Eggert (2006-03-22): -# A good source for time zone historical data in the US is -# Thomas G. Shanks, The American Atlas (5th edition), -# San Diego: ACS Publications, Inc. (1991). -# Make sure you have the errata sheet; the book is somewhat useless without it. -# It is the source for most of the pre-1991 US entries below. - -# From Paul Eggert (2001-03-06): -# Daylight Saving Time was first suggested as a joke by Benjamin Franklin -# in his whimsical essay "An Economical Project for Diminishing the Cost -# of Light" published in the Journal de Paris (1784-04-26). -# Not everyone is happy with the results: -# -# I don't really care how time is reckoned so long as there is some -# agreement about it, but I object to being told that I am saving -# daylight when my reason tells me that I am doing nothing of the kind. -# I even object to the implication that I am wasting something -# valuable if I stay in bed after the sun has risen. As an admirer -# of moonlight I resent the bossy insistence of those who want to -# reduce my time for enjoying it. At the back of the Daylight Saving -# scheme I detect the bony, blue-fingered hand of Puritanism, eager -# to push people into bed earlier, and get them up earlier, to make -# them healthy, wealthy and wise in spite of themselves. -# -# -- Robertson Davies, The diary of Samuel Marchbanks, -# Clarke, Irwin (1947), XIX, Sunday -# -# For more about the first ten years of DST in the United States, see -# Robert Garland, Ten years of daylight saving from the Pittsburgh standpoint -# (Carnegie Library of Pittsburgh, 1927). -# http://www.clpgh.org/exhibit/dst.html -# -# Shanks says that DST was called "War Time" in the US in 1918 and 1919. -# However, DST was imposed by the Standard Time Act of 1918, which -# was the first nationwide legal time standard, and apparently -# time was just called "Standard Time" or "Daylight Saving Time". - -# From Arthur David Olson: -# US Daylight Saving Time ended on the last Sunday of *October* in 1974. -# See, for example, the front page of the Saturday, 1974-10-26 -# and Sunday, 1974-10-27 editions of the Washington Post. - -# From Arthur David Olson: -# Before the Uniform Time Act of 1966 took effect in 1967, observance of -# Daylight Saving Time in the US was by local option, except during wartime. - -# From Arthur David Olson (2000-09-25): -# Last night I heard part of a rebroadcast of a 1945 Arch Oboler radio drama. -# In the introduction, Oboler spoke of "Eastern Peace Time." -# An AltaVista search turned up: -# http://rowayton.org/rhs/hstaug45.html -# "When the time is announced over the radio now, it is 'Eastern Peace -# Time' instead of the old familiar 'Eastern War Time.' Peace is wonderful." -# (August 1945) by way of confirmation. - -# From Joseph Gallant citing -# George H. Douglas, _The Early Days of Radio Broadcasting_ (1987): -# At 7 P.M. (Eastern War Time) [on 1945-08-14], the networks were set -# to switch to London for Attlee's address, but the American people -# never got to hear his speech live. According to one press account, -# CBS' Bob Trout was first to announce the word of Japan's surrender, -# but a few seconds later, NBC, ABC and Mutual also flashed the word -# of surrender, all of whom interrupting the bells of Big Ben in -# London which were to precede Mr. Attlee's speech. - -# From Paul Eggert (2003-02-09): It was Robert St John, not Bob Trout. From -# Myrna Oliver's obituary of St John on page B16 of today's Los Angeles Times: -# -# ... a war-weary U.S. clung to radios, awaiting word of Japan's surrender. -# Any announcement from Asia would reach St. John's New York newsroom on a -# wire service teletype machine, which had prescribed signals for major news. -# Associated Press, for example, would ring five bells before spewing out -# typed copy of an important story, and 10 bells for news "of transcendental -# importance." -# -# On Aug. 14, stalling while talking steadily into the NBC networks' open -# microphone, St. John heard five bells and waited only to hear a sixth bell, -# before announcing confidently: "Ladies and gentlemen, World War II is over. -# The Japanese have agreed to our surrender terms." -# -# He had scored a 20-second scoop on other broadcasters. - -# From Arthur David Olson (2005-08-22): -# Paul has been careful to use the "US" rules only in those locations -# that are part of the United States; this reflects the real scope of -# U.S. government action. So even though the "US" rules have changed -# in the latest release, other countries won't be affected. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule US 1918 1919 - Mar lastSun 2:00 1:00 D -Rule US 1918 1919 - Oct lastSun 2:00 0 S -Rule US 1942 only - Feb 9 2:00 1:00 W # War -Rule US 1945 only - Aug 14 23:00u 1:00 P # Peace -Rule US 1945 only - Sep lastSun 2:00 0 S -Rule US 1967 2006 - Oct lastSun 2:00 0 S -Rule US 1967 1973 - Apr lastSun 2:00 1:00 D -Rule US 1974 only - Jan 6 2:00 1:00 D -Rule US 1975 only - Feb 23 2:00 1:00 D -Rule US 1976 1986 - Apr lastSun 2:00 1:00 D -Rule US 1987 2006 - Apr Sun>=1 2:00 1:00 D -Rule US 2007 max - Mar Sun>=8 2:00 1:00 D -Rule US 2007 max - Nov Sun>=1 2:00 0 S - -# From Arthur David Olson, 2005-12-19 -# We generate the files specified below to guard against old files with -# obsolete information being left in the time zone binary directory. -# We limit the list to names that have appeared in previous versions of -# this time zone package. -# We do these as separate Zones rather than as Links to avoid problems if -# a particular place changes whether it observes DST. -# We put these specifications here in the northamerica file both to -# increase the chances that they'll actually get compiled and to -# avoid the need to duplicate the US rules in another file. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone EST -5:00 - EST -Zone MST -7:00 - MST -Zone HST -10:00 - HST -Zone EST5EDT -5:00 US E%sT -Zone CST6CDT -6:00 US C%sT -Zone MST7MDT -7:00 US M%sT -Zone PST8PDT -8:00 US P%sT - -# From U. S. Naval Observatory (1989-01-19): -# USA EASTERN 5 H BEHIND UTC NEW YORK, WASHINGTON -# USA EASTERN 4 H BEHIND UTC APR 3 - OCT 30 -# USA CENTRAL 6 H BEHIND UTC CHICAGO, HOUSTON -# USA CENTRAL 5 H BEHIND UTC APR 3 - OCT 30 -# USA MOUNTAIN 7 H BEHIND UTC DENVER -# USA MOUNTAIN 6 H BEHIND UTC APR 3 - OCT 30 -# USA PACIFIC 8 H BEHIND UTC L.A., SAN FRANCISCO -# USA PACIFIC 7 H BEHIND UTC APR 3 - OCT 30 -# USA ALASKA STD 9 H BEHIND UTC MOST OF ALASKA (AKST) -# USA ALASKA STD 8 H BEHIND UTC APR 3 - OCT 30 (AKDT) -# USA ALEUTIAN 10 H BEHIND UTC ISLANDS WEST OF 170W -# USA " 9 H BEHIND UTC APR 3 - OCT 30 -# USA HAWAII 10 H BEHIND UTC -# USA BERING 11 H BEHIND UTC SAMOA, MIDWAY - -# From Arthur David Olson (1989-01-21): -# The above dates are for 1988. -# Note the "AKST" and "AKDT" abbreviations, the claim that there's -# no DST in Samoa, and the claim that there is DST in Alaska and the -# Aleutians. - -# From Arthur David Olson (1988-02-13): -# Legal standard time zone names, from United States Code (1982 Edition and -# Supplement III), Title 15, Chapter 6, Section 260 and forward. First, names -# up to 1967-04-01 (when most provisions of the Uniform Time Act of 1966 -# took effect), as explained in sections 263 and 261: -# (none) -# United States standard eastern time -# United States standard mountain time -# United States standard central time -# United States standard Pacific time -# (none) -# United States standard Alaska time -# (none) -# Next, names from 1967-04-01 until 1983-11-30 (the date for -# public law 98-181): -# Atlantic standard time -# eastern standard time -# central standard time -# mountain standard time -# Pacific standard time -# Yukon standard time -# Alaska-Hawaii standard time -# Bering standard time -# And after 1983-11-30: -# Atlantic standard time -# eastern standard time -# central standard time -# mountain standard time -# Pacific standard time -# Alaska standard time -# Hawaii-Aleutian standard time -# Samoa standard time -# The law doesn't give abbreviations. -# -# From Paul Eggert (2016-12-19): -# Here are URLs for the 1918 and 1966 legislation: -# http://uscode.house.gov/statviewer.htm?volume=40&page=451 -# http://uscode.house.gov/statviewer.htm?volume=80&page=108 -# Although the 1918 names were officially "United States Standard -# Eastern Time" and similarly for "Central", "Mountain", "Pacific", -# and "Alaska", in practice "Standard" was placed just before "Time", -# as codified in 1966. In practice, Alaska time was abbreviated "AST" -# before 1968. Summarizing the 1967 name changes: -# 1918 names 1967 names -# -08 Standard Pacific Time (PST) Pacific standard time (PST) -# -09 (unofficial) Yukon (YST) Yukon standard time (YST) -# -10 Standard Alaska Time (AST) Alaska-Hawaii standard time (AHST) -# -11 (unofficial) Nome (NST) Bering standard time (BST) -# -# From Paul Eggert (2000-01-08), following a heads-up from Rives McDow: -# Public law 106-564 (2000-12-23) introduced ... "Chamorro Standard Time" -# for time in Guam and the Northern Marianas. See the file "australasia". -# -# From Paul Eggert (2015-04-17): -# HST and HDT are standardized abbreviations for Hawaii-Aleutian -# standard and daylight times. See section 9.47 (p 234) of the -# U.S. Government Printing Office Style Manual (2008) -# http://www.gpo.gov/fdsys/pkg/GPO-STYLEMANUAL-2008/pdf/GPO-STYLEMANUAL-2008.pdf - -# From Arthur David Olson, 2005-08-09 -# The following was signed into law on 2005-08-08. -# -# H.R. 6, Energy Policy Act of 2005, SEC. 110. DAYLIGHT SAVINGS. -# (a) Amendment.--Section 3(a) of the Uniform Time Act of 1966 (15 -# U.S.C. 260a(a)) is amended-- -# (1) by striking "first Sunday of April" and inserting "second -# Sunday of March"; and -# (2) by striking "last Sunday of October" and inserting "first -# Sunday of November'. -# (b) Effective Date.--Subsection (a) shall take effect 1 year after the -# date of enactment of this Act or March 1, 2007, whichever is later. -# (c) Report to Congress.--Not later than 9 months after the effective -# date stated in subsection (b), the Secretary shall report to Congress -# on the impact of this section on energy consumption in the United -# States. -# (d) Right to Revert.--Congress retains the right to revert the -# Daylight Saving Time back to the 2005 time schedules once the -# Department study is complete. - -# US eastern time, represented by New York - -# Connecticut, Delaware, District of Columbia, most of Florida, -# Georgia, southeast Indiana (Dearborn and Ohio counties), eastern Kentucky -# (except America/Kentucky/Louisville below), Maine, Maryland, Massachusetts, -# New Hampshire, New Jersey, New York, North Carolina, Ohio, -# Pennsylvania, Rhode Island, South Carolina, eastern Tennessee, -# Vermont, Virginia, West Virginia - -# From Dave Cantor (2004-11-02): -# Early this summer I had the occasion to visit the Mount Washington -# Observatory weather station atop (of course!) Mount Washington [, NH].... -# One of the staff members said that the station was on Eastern Standard Time -# and didn't change their clocks for Daylight Saving ... so that their -# reports will always have times which are 5 hours behind UTC. - -# From Paul Eggert (2005-08-26): -# According to today's Huntsville Times -# http://www.al.com/news/huntsvilletimes/index.ssf?/base/news/1125047783228320.xml&coll=1 -# a few towns on Alabama's "eastern border with Georgia, such as Phenix City -# in Russell County, Lanett in Chambers County and some towns in Lee County, -# set their watches and clocks on Eastern time." It quotes H.H. "Bubba" -# Roberts, city administrator in Phenix City. as saying "We are in the Central -# time zone, but we do go by the Eastern time zone because so many people work -# in Columbus." -# -# From Paul Eggert (2017-02-22): -# Four cities are involved. The two not mentioned above are Smiths Station -# and Valley. Barbara Brooks, Valley's assistant treasurer, heard it started -# because West Point Pepperell textile mills were in Alabama while the -# corporate office was in Georgia, and residents voted to keep Eastern -# time even after the mills closed. See: Kazek K. Did you know which -# Alabama towns are in a different time zone? al.com 2017-02-06. -# http://www.al.com/living/index.ssf/2017/02/do_you_know_which_alabama_town.html - -# From Paul Eggert (2014-09-06): -# Monthly Notices of the Royal Astronomical Society 44, 4 (1884-02-08), 208 -# says that New York City Hall time was 3 minutes 58.4 seconds fast of -# Eastern time (i.e., -4:56:01.6) just before the 1883 switch. Round to the -# nearest second. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule NYC 1920 only - Mar lastSun 2:00 1:00 D -Rule NYC 1920 only - Oct lastSun 2:00 0 S -Rule NYC 1921 1966 - Apr lastSun 2:00 1:00 D -Rule NYC 1921 1954 - Sep lastSun 2:00 0 S -Rule NYC 1955 1966 - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/New_York -4:56:02 - LMT 1883 Nov 18 12:03:58 - -5:00 US E%sT 1920 - -5:00 NYC E%sT 1942 - -5:00 US E%sT 1946 - -5:00 NYC E%sT 1967 - -5:00 US E%sT - -# US central time, represented by Chicago - -# Alabama, Arkansas, Florida panhandle (Bay, Calhoun, Escambia, -# Gulf, Holmes, Jackson, Okaloosa, Santa Rosa, Walton, and -# Washington counties), Illinois, western Indiana -# (Gibson, Jasper, Lake, LaPorte, Newton, Porter, Posey, Spencer, -# Vanderburgh, and Warrick counties), Iowa, most of Kansas, western -# Kentucky, Louisiana, Minnesota, Mississippi, Missouri, eastern -# Nebraska, eastern North Dakota, Oklahoma, eastern South Dakota, -# western Tennessee, most of Texas, Wisconsin - -# From Larry M. Smith (2006-04-26) re Wisconsin: -# http://www.legis.state.wi.us/statutes/Stat0175.pdf ... -# is currently enforced at the 01:00 time of change. Because the local -# "bar time" in the state corresponds to 02:00, a number of citations -# are issued for the "sale of class 'B' alcohol after prohibited -# hours" within the deviated hour of this change every year.... -# -# From Douglas R. Bomberg (2007-03-12): -# Wisconsin has enacted (nearly eleventh-hour) legislation to get WI -# Statue 175 closer in synch with the US Congress' intent.... -# http://www.legis.state.wi.us/2007/data/acts/07Act3.pdf - -# From an email administrator of the City of Fort Pierre, SD (2015-12-21): -# Fort Pierre is technically located in the Mountain time zone as is -# the rest of Stanley County. Most of Stanley County and Fort Pierre -# uses the Central time zone due to doing most of their business in -# Pierre so it simplifies schedules. I have lived in Stanley County -# all my life and it has been that way since I can remember. (43 years!) -# -# From Paul Eggert (2015-12-25): -# Assume this practice predates 1970, so Fort Pierre can use America/Chicago. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule Chicago 1920 only - Jun 13 2:00 1:00 D -Rule Chicago 1920 1921 - Oct lastSun 2:00 0 S -Rule Chicago 1921 only - Mar lastSun 2:00 1:00 D -Rule Chicago 1922 1966 - Apr lastSun 2:00 1:00 D -Rule Chicago 1922 1954 - Sep lastSun 2:00 0 S -Rule Chicago 1955 1966 - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Chicago -5:50:36 - LMT 1883 Nov 18 12:09:24 - -6:00 US C%sT 1920 - -6:00 Chicago C%sT 1936 Mar 1 2:00 - -5:00 - EST 1936 Nov 15 2:00 - -6:00 Chicago C%sT 1942 - -6:00 US C%sT 1946 - -6:00 Chicago C%sT 1967 - -6:00 US C%sT -# Oliver County, ND switched from mountain to central time on 1992-10-25. -Zone America/North_Dakota/Center -6:45:12 - LMT 1883 Nov 18 12:14:48 - -7:00 US M%sT 1992 Oct 25 2:00 - -6:00 US C%sT -# Morton County, ND, switched from mountain to central time on -# 2003-10-26, except for the area around Mandan which was already central time. -# See . -# Officially this switch also included part of Sioux County, and -# Jones, Mellette, and Todd Counties in South Dakota; -# but in practice these other counties were already observing central time. -# See . -Zone America/North_Dakota/New_Salem -6:45:39 - LMT 1883 Nov 18 12:14:21 - -7:00 US M%sT 2003 Oct 26 2:00 - -6:00 US C%sT - -# From Josh Findley (2011-01-21): -# ...it appears that Mercer County, North Dakota, changed from the -# mountain time zone to the central time zone at the last transition from -# daylight-saving to standard time (on Nov. 7, 2010): -# http://www.gpo.gov/fdsys/pkg/FR-2010-09-29/html/2010-24376.htm -# http://www.bismarcktribune.com/news/local/article_1eb1b588-c758-11df-b472-001cc4c03286.html - -# From Andy Lipscomb (2011-01-24): -# ...according to the Census Bureau, the largest city is Beulah (although -# it's commonly referred to as Beulah-Hazen, with Hazen being the next -# largest city in Mercer County). Google Maps places Beulah's city hall -# at 47 degrees 15' 51" N, 101 degrees 46' 40" W, which yields an offset -# of 6h47'07". - -Zone America/North_Dakota/Beulah -6:47:07 - LMT 1883 Nov 18 12:12:53 - -7:00 US M%sT 2010 Nov 7 2:00 - -6:00 US C%sT - -# US mountain time, represented by Denver -# -# Colorado, far western Kansas, Montana, western -# Nebraska, Nevada border (Jackpot, Owyhee, and Mountain City), -# New Mexico, southwestern North Dakota, -# western South Dakota, far western Texas (El Paso County, Hudspeth County, -# and Pine Springs and Nickel Creek in Culberson County), Utah, Wyoming -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule Denver 1920 1921 - Mar lastSun 2:00 1:00 D -Rule Denver 1920 only - Oct lastSun 2:00 0 S -Rule Denver 1921 only - May 22 2:00 0 S -Rule Denver 1965 1966 - Apr lastSun 2:00 1:00 D -Rule Denver 1965 1966 - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Denver -6:59:56 - LMT 1883 Nov 18 12:00:04 - -7:00 US M%sT 1920 - -7:00 Denver M%sT 1942 - -7:00 US M%sT 1946 - -7:00 Denver M%sT 1967 - -7:00 US M%sT - -# US Pacific time, represented by Los Angeles -# -# California, northern Idaho (Benewah, Bonner, Boundary, Clearwater, -# Kootenai, Latah, Lewis, Nez Perce, and Shoshone counties, Idaho county -# north of the Salmon River, and the towns of Burgdorf and Warren), -# Nevada (except West Wendover), Oregon (except the northern 3/4 of -# Malheur county), and Washington - -# From Paul Eggert (2016-08-20): -# In early February 1948, in response to California's electricity shortage, -# PG&E changed power frequency from 60 to 59.5 Hz during daylight hours, -# causing electric clocks to lose six minutes per day. (This did not change -# legal time, and is not part of the data here.) See: -# Ross SA. An energy crisis from the past: Northern California in 1948. -# Working Paper No. 8, Institute of Governmental Studies, UC Berkeley, -# 1973-11. http://escholarship.org/uc/item/8x22k30c -# -# In another measure to save electricity, DST was instituted from 1948-03-14 -# at 02:01 to 1949-01-16 at 02:00, with the governor having the option to move -# the fallback transition earlier. See pages 3-4 of: -# http://clerk.assembly.ca.gov/sites/clerk.assembly.ca.gov/files/archive/Statutes/1948/48Vol1_Chapters.pdf -# -# In response: -# -# Governor Warren received a torrent of objecting mail, and it is not too much -# to speculate that the objections to Daylight Saving Time were one important -# factor in the defeat of the Dewey-Warren Presidential ticket in California. -# -- Ross, p 25 -# -# On December 8 the governor exercised the option, setting the date to January 1 -# (LA Times 1948-12-09). The transition time was 02:00 (LA Times 1949-01-01). -# -# Despite the controversy, in 1949 California voters approved Proposition 12, -# which established DST from April's last Sunday at 01:00 until September's -# last Sunday at 02:00. This was amended by 1962's Proposition 6, which changed -# the fall-back date to October's last Sunday. See: -# http://repository.uchastings.edu/cgi/viewcontent.cgi?article=1501&context=ca_ballot_props -# http://repository.uchastings.edu/cgi/viewcontent.cgi?article=1636&context=ca_ballot_props -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule CA 1948 only - Mar 14 2:01 1:00 D -Rule CA 1949 only - Jan 1 2:00 0 S -Rule CA 1950 1966 - Apr lastSun 1:00 1:00 D -Rule CA 1950 1961 - Sep lastSun 2:00 0 S -Rule CA 1962 1966 - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Los_Angeles -7:52:58 - LMT 1883 Nov 18 12:07:02 - -8:00 US P%sT 1946 - -8:00 CA P%sT 1967 - -8:00 US P%sT - -# Alaska -# AK%sT is the modern abbreviation for -09 per USNO. -# -# From Paul Eggert (2001-05-30): -# Howse writes that Alaska switched from the Julian to the Gregorian calendar, -# and from east-of-GMT to west-of-GMT days, when the US bought it from Russia. -# This was on 1867-10-18, a Friday; the previous day was 1867-10-06 Julian, -# also a Friday. Include only the time zone part of this transition, -# ignoring the switch from Julian to Gregorian, since we can't represent -# the Julian calendar. -# -# As far as we know, none of the exact locations mentioned below were -# permanently inhabited in 1867 by anyone using either calendar. -# (Yakutat was colonized by the Russians in 1799, but the settlement -# was destroyed in 1805 by a Yakutat-kon war party.) However, there -# were nearby inhabitants in some cases and for our purposes perhaps -# it's best to simply use the official transition. - -# From Paul Eggert (2014-07-18): -# One opinion of the early-1980s turmoil in Alaska over time zones and -# daylight saving time appeared as graffiti on a Juneau airport wall: -# "Welcome to Juneau. Please turn your watch back to the 19th century." -# See: Turner W. Alaska's four time zones now two. NY Times 1983-11-01. -# http://www.nytimes.com/1983/11/01/us/alaska-s-four-time-zones-now-two.html -# -# Steve Ferguson (2011-01-31) referred to the following source: -# Norris F. Keeping time in Alaska: national directives, local response. -# Alaska History 2001;16(1-2). -# http://alaskahistoricalsociety.org/discover-alaska/glimpses-of-the-past/keeping-time-in-alaska/ - -# From Arthur David Olson (2011-02-01): -# Here's database-relevant material from the 2001 "Alaska History" article: -# -# On September 20 [1979]...DOT...officials decreed that on April 27, -# 1980, Juneau and other nearby communities would move to Yukon Time. -# Sitka, Petersburg, Wrangell, and Ketchikan, however, would remain on -# Pacific Time. -# -# ...on September 22, 1980, DOT Secretary Neil E. Goldschmidt rescinded the -# Department's September 1979 decision. Juneau and other communities in -# northern Southeast reverted to Pacific Time on October 26. -# -# On October 28 [1983]...the Metlakatla Indian Community Council voted -# unanimously to keep the reservation on Pacific Time. -# -# According to DOT official Joanne Petrie, Indian reservations are not -# bound to follow time zones imposed by neighboring jurisdictions. -# -# (The last is consistent with how the database now handles the Navajo -# Nation.) - -# From Arthur David Olson (2011-02-09): -# I just spoke by phone with a staff member at the Metlakatla Indian -# Community office (using contact information available at -# http://www.commerce.state.ak.us/dca/commdb/CIS.cfm?Comm_Boro_name=Metlakatla -# It's shortly after 1:00 here on the east coast of the United States; -# the staffer said it was shortly after 10:00 there. When I asked whether -# that meant they were on Pacific time, they said no - they were on their -# own time. I asked about daylight saving; they said it wasn't used. I -# did not inquire about practices in the past. - -# From Arthur David Olson (2011-08-17): -# For lack of better information, assume that Metlakatla's -# abandonment of use of daylight saving resulted from the 1983 vote. - -# From Steffen Thorsen (2015-11-09): -# It seems Metlakatla did go off PST on Sunday, November 1, changing -# their time to AKST and are going to follow Alaska's DST, switching -# between AKST and AKDT from now on.... -# http://www.krbd.org/2015/10/30/annette-island-times-they-are-a-changing/ - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Juneau 15:02:19 - LMT 1867 Oct 18 - -8:57:41 - LMT 1900 Aug 20 12:00 - -8:00 - PST 1942 - -8:00 US P%sT 1946 - -8:00 - PST 1969 - -8:00 US P%sT 1980 Apr 27 2:00 - -9:00 US Y%sT 1980 Oct 26 2:00 - -8:00 US P%sT 1983 Oct 30 2:00 - -9:00 US Y%sT 1983 Nov 30 - -9:00 US AK%sT -Zone America/Sitka 14:58:47 - LMT 1867 Oct 18 - -9:01:13 - LMT 1900 Aug 20 12:00 - -8:00 - PST 1942 - -8:00 US P%sT 1946 - -8:00 - PST 1969 - -8:00 US P%sT 1983 Oct 30 2:00 - -9:00 US Y%sT 1983 Nov 30 - -9:00 US AK%sT -Zone America/Metlakatla 15:13:42 - LMT 1867 Oct 18 - -8:46:18 - LMT 1900 Aug 20 12:00 - -8:00 - PST 1942 - -8:00 US P%sT 1946 - -8:00 - PST 1969 - -8:00 US P%sT 1983 Oct 30 2:00 - -8:00 - PST 2015 Nov 1 2:00 - -9:00 US AK%sT -Zone America/Yakutat 14:41:05 - LMT 1867 Oct 18 - -9:18:55 - LMT 1900 Aug 20 12:00 - -9:00 - YST 1942 - -9:00 US Y%sT 1946 - -9:00 - YST 1969 - -9:00 US Y%sT 1983 Nov 30 - -9:00 US AK%sT -Zone America/Anchorage 14:00:24 - LMT 1867 Oct 18 - -9:59:36 - LMT 1900 Aug 20 12:00 - -10:00 - AST 1942 - -10:00 US A%sT 1967 Apr - -10:00 - AHST 1969 - -10:00 US AH%sT 1983 Oct 30 2:00 - -9:00 US Y%sT 1983 Nov 30 - -9:00 US AK%sT -Zone America/Nome 12:58:21 - LMT 1867 Oct 18 - -11:01:38 - LMT 1900 Aug 20 12:00 - -11:00 - NST 1942 - -11:00 US N%sT 1946 - -11:00 - NST 1967 Apr - -11:00 - BST 1969 - -11:00 US B%sT 1983 Oct 30 2:00 - -9:00 US Y%sT 1983 Nov 30 - -9:00 US AK%sT -Zone America/Adak 12:13:21 - LMT 1867 Oct 18 - -11:46:38 - LMT 1900 Aug 20 12:00 - -11:00 - NST 1942 - -11:00 US N%sT 1946 - -11:00 - NST 1967 Apr - -11:00 - BST 1969 - -11:00 US B%sT 1983 Oct 30 2:00 - -10:00 US AH%sT 1983 Nov 30 - -10:00 US H%sT -# The following switches don't quite make our 1970 cutoff. -# -# Shanks writes that part of southwest Alaska (e.g. Aniak) -# switched from -11:00 to -10:00 on 1968-09-22 at 02:00, -# and another part (e.g. Akiak) made the same switch five weeks later. -# -# From David Flater (2004-11-09): -# In e-mail, 2004-11-02, Ray Hudson, historian/liaison to the Unalaska -# Historic Preservation Commission, provided this information, which -# suggests that Unalaska deviated from statutory time from early 1967 -# possibly until 1983: -# -# Minutes of the Unalaska City Council Meeting, January 10, 1967: -# "Except for St. Paul and Akutan, Unalaska is the only important -# location not on Alaska Standard Time. The following resolution was -# made by William Robinson and seconded by Henry Swanson: Be it -# resolved that the City of Unalaska hereby goes to Alaska Standard -# Time as of midnight Friday, January 13, 1967 (1 A.M. Saturday, -# January 14, Alaska Standard Time.) This resolution was passed with -# three votes for and one against." - -# Hawaii - -# From Arthur David Olson (2010-12-09): -# "Hawaiian Time" by Robert C. Schmitt and Doak C. Cox appears on pages 207-225 -# of volume 26 of The Hawaiian Journal of History (1992). As of 2010-12-09, -# the article is available at -# http://evols.library.manoa.hawaii.edu/bitstream/10524/239/2/JL26215.pdf -# and indicates that standard time was adopted effective noon, January -# 13, 1896 (page 218), that in "1933, the Legislature decreed daylight -# saving for the period between the last Sunday of each April and the -# last Sunday of each September, but less than a month later repealed the -# act," (page 220), that year-round daylight saving time was in effect -# from 1942-02-09 to 1945-09-30 (page 221, with no time of day given for -# when clocks changed) and that clocks were changed by 30 minutes -# effective the second Sunday of June, 1947 (page 219, with no time of -# day given for when clocks changed). A footnote for the 1933 changes -# cites Session Laws of Hawaii 1933, "Act. 90 (approved 26 Apr. 1933) -# and Act 163 (approved 21 May 1933)." - -# From Arthur David Olson (2011-01-19): -# The following is from "Laws of the Territory of Hawaii Passed by the -# Seventeenth Legislature: Regular Session 1933," available (as of -# 2011-01-19) at American University's Pence Law Library. Page 85: "Act -# 90...At 2 o'clock ante meridian of the last Sunday in April of each -# year, the standard time of this Territory shall be advanced one -# hour...This Act shall take effect upon its approval. Approved this 26th -# day of April, A. D. 1933. LAWRENCE M JUDD, Governor of the Territory of -# Hawaii." Page 172: "Act 163...Act 90 of the Session Laws of 1933 is -# hereby repealed...This Act shall take effect upon its approval, upon -# which date the standard time of this Territory shall be restored to -# that existing immediately prior to the taking effect of said Act 90. -# Approved this 21st day of May, A. D. 1933. LAWRENCE M. JUDD, Governor -# of the Territory of Hawaii." -# -# Note that 1933-05-21 was a Sunday. -# We're left to guess the time of day when Act 163 was approved; guess noon. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Pacific/Honolulu -10:31:26 - LMT 1896 Jan 13 12:00 - -10:30 - HST 1933 Apr 30 2:00 - -10:30 1:00 HDT 1933 May 21 12:00 - -10:30 - HST 1942 Feb 9 2:00 - -10:30 1:00 HDT 1945 Sep 30 2:00 - -10:30 - HST 1947 Jun 8 2:00 - -10:00 - HST - -# Now we turn to US areas that have diverged from the consensus since 1970. - -# Arizona mostly uses MST. - -# From Paul Eggert (2002-10-20): -# -# The information in the rest of this paragraph is derived from the -# Daylight Saving Time web page -# (2002-01-23) -# maintained by the Arizona State Library, Archives and Public Records. -# Between 1944-01-01 and 1944-04-01 the State of Arizona used standard -# time, but by federal law railroads, airlines, bus lines, military -# personnel, and some engaged in interstate commerce continued to -# observe war (i.e., daylight saving) time. The 1944-03-17 Phoenix -# Gazette says that was the date the law changed, and that 04-01 was -# the date the state's clocks would change. In 1945 the State of -# Arizona used standard time all year, again with exceptions only as -# mandated by federal law. Arizona observed DST in 1967, but Arizona -# Laws 1968, ch. 183 (effective 1968-03-21) repealed DST. -# -# Shanks says the 1944 experiment came to an end on 1944-03-17. -# Go with the Arizona State Library instead. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Phoenix -7:28:18 - LMT 1883 Nov 18 11:31:42 - -7:00 US M%sT 1944 Jan 1 0:01 - -7:00 - MST 1944 Apr 1 0:01 - -7:00 US M%sT 1944 Oct 1 0:01 - -7:00 - MST 1967 - -7:00 US M%sT 1968 Mar 21 - -7:00 - MST -# From Arthur David Olson (1988-02-13): -# A writer from the Inter Tribal Council of Arizona, Inc., -# notes in private correspondence dated 1987-12-28 that "Presently, only the -# Navajo Nation participates in the Daylight Saving Time policy, due to its -# large size and location in three states." (The "only" means that other -# tribal nations don't use DST.) -# -# From Paul Eggert (2013-08-26): -# See America/Denver for a zone appropriate for the Navajo Nation. - -# Southern Idaho (Ada, Adams, Bannock, Bear Lake, Bingham, Blaine, -# Boise, Bonneville, Butte, Camas, Canyon, Caribou, Cassia, Clark, -# Custer, Elmore, Franklin, Fremont, Gem, Gooding, Jefferson, Jerome, -# Lemhi, Lincoln, Madison, Minidoka, Oneida, Owyhee, Payette, Power, -# Teton, Twin Falls, Valley, Washington counties, and the southern -# quarter of Idaho county) and eastern Oregon (most of Malheur County) -# switched four weeks late in 1974. -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Boise -7:44:49 - LMT 1883 Nov 18 12:15:11 - -8:00 US P%sT 1923 May 13 2:00 - -7:00 US M%sT 1974 - -7:00 - MST 1974 Feb 3 2:00 - -7:00 US M%sT - -# Indiana -# -# For a map of Indiana's time zone regions, see: -# http://en.wikipedia.org/wiki/Time_in_Indiana -# -# From Paul Eggert (2007-08-17): -# Since 1970, most of Indiana has been like America/Indiana/Indianapolis, -# with the following exceptions: -# -# - Gibson, Jasper, Lake, LaPorte, Newton, Porter, Posey, Spencer, -# Vanderburgh, and Warrick counties have been like America/Chicago. -# -# - Dearborn and Ohio counties have been like America/New_York. -# -# - Clark, Floyd, and Harrison counties have been like -# America/Kentucky/Louisville. -# -# - Crawford, Daviess, Dubois, Knox, Martin, Perry, Pike, Pulaski, Starke, -# and Switzerland counties have their own time zone histories as noted below. -# -# Shanks partitioned Indiana into 345 regions, each with its own time history, -# and wrote "Even newspaper reports present contradictory information." -# Those Hoosiers! Such a flighty and changeable people! -# Fortunately, most of the complexity occurred before our cutoff date of 1970. -# -# Other than Indianapolis, the Indiana place names are so nondescript -# that they would be ambiguous if we left them at the 'America' level. -# So we reluctantly put them all in a subdirectory 'America/Indiana'. - -# From Paul Eggert (2014-06-26): -# https://www.federalregister.gov/articles/2006/01/20/06-563/standard-time-zone-boundary-in-the-state-of-indiana -# says "DOT is relocating the time zone boundary in Indiana to move Starke, -# Pulaski, Knox, Daviess, Martin, Pike, Dubois, and Perry Counties from the -# Eastern Time Zone to the Central Time Zone.... The effective date of -# this rule is 2 a.m. EST Sunday, April 2, 2006, which is the -# changeover date from standard time to Daylight Saving Time." -# Strictly speaking, this meant the affected counties changed their -# clocks twice that night, but this obviously was in error. The intent -# was that 01:59:59 EST be followed by 02:00:00 CDT. - -# From Gwillim Law (2007-02-10): -# The Associated Press has been reporting that Pulaski County, Indiana is -# going to switch from Central to Eastern Time on March 11, 2007.... -# http://www.indystar.com/apps/pbcs.dll/article?AID=/20070207/LOCAL190108/702070524/0/LOCAL - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule Indianapolis 1941 only - Jun 22 2:00 1:00 D -Rule Indianapolis 1941 1954 - Sep lastSun 2:00 0 S -Rule Indianapolis 1946 1954 - Apr lastSun 2:00 1:00 D -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Indiana/Indianapolis -5:44:38 - LMT 1883 Nov 18 12:15:22 - -6:00 US C%sT 1920 - -6:00 Indianapolis C%sT 1942 - -6:00 US C%sT 1946 - -6:00 Indianapolis C%sT 1955 Apr 24 2:00 - -5:00 - EST 1957 Sep 29 2:00 - -6:00 - CST 1958 Apr 27 2:00 - -5:00 - EST 1969 - -5:00 US E%sT 1971 - -5:00 - EST 2006 - -5:00 US E%sT -# -# Eastern Crawford County, Indiana, left its clocks alone in 1974, -# as well as from 1976 through 2005. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule Marengo 1951 only - Apr lastSun 2:00 1:00 D -Rule Marengo 1951 only - Sep lastSun 2:00 0 S -Rule Marengo 1954 1960 - Apr lastSun 2:00 1:00 D -Rule Marengo 1954 1960 - Sep lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Indiana/Marengo -5:45:23 - LMT 1883 Nov 18 12:14:37 - -6:00 US C%sT 1951 - -6:00 Marengo C%sT 1961 Apr 30 2:00 - -5:00 - EST 1969 - -5:00 US E%sT 1974 Jan 6 2:00 - -6:00 1:00 CDT 1974 Oct 27 2:00 - -5:00 US E%sT 1976 - -5:00 - EST 2006 - -5:00 US E%sT -# -# Daviess, Dubois, Knox, and Martin Counties, Indiana, -# switched from eastern to central time in April 2006, then switched back -# in November 2007. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule Vincennes 1946 only - Apr lastSun 2:00 1:00 D -Rule Vincennes 1946 only - Sep lastSun 2:00 0 S -Rule Vincennes 1953 1954 - Apr lastSun 2:00 1:00 D -Rule Vincennes 1953 1959 - Sep lastSun 2:00 0 S -Rule Vincennes 1955 only - May 1 0:00 1:00 D -Rule Vincennes 1956 1963 - Apr lastSun 2:00 1:00 D -Rule Vincennes 1960 only - Oct lastSun 2:00 0 S -Rule Vincennes 1961 only - Sep lastSun 2:00 0 S -Rule Vincennes 1962 1963 - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Indiana/Vincennes -5:50:07 - LMT 1883 Nov 18 12:09:53 - -6:00 US C%sT 1946 - -6:00 Vincennes C%sT 1964 Apr 26 2:00 - -5:00 - EST 1969 - -5:00 US E%sT 1971 - -5:00 - EST 2006 Apr 2 2:00 - -6:00 US C%sT 2007 Nov 4 2:00 - -5:00 US E%sT -# -# Perry County, Indiana, switched from eastern to central time in April 2006. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule Perry 1946 only - Apr lastSun 2:00 1:00 D -Rule Perry 1946 only - Sep lastSun 2:00 0 S -Rule Perry 1953 1954 - Apr lastSun 2:00 1:00 D -Rule Perry 1953 1959 - Sep lastSun 2:00 0 S -Rule Perry 1955 only - May 1 0:00 1:00 D -Rule Perry 1956 1963 - Apr lastSun 2:00 1:00 D -Rule Perry 1960 only - Oct lastSun 2:00 0 S -Rule Perry 1961 only - Sep lastSun 2:00 0 S -Rule Perry 1962 1963 - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Indiana/Tell_City -5:47:03 - LMT 1883 Nov 18 12:12:57 - -6:00 US C%sT 1946 - -6:00 Perry C%sT 1964 Apr 26 2:00 - -5:00 - EST 1969 - -5:00 US E%sT 1971 - -5:00 - EST 2006 Apr 2 2:00 - -6:00 US C%sT -# -# Pike County, Indiana moved from central to eastern time in 1977, -# then switched back in 2006, then switched back again in 2007. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule Pike 1955 only - May 1 0:00 1:00 D -Rule Pike 1955 1960 - Sep lastSun 2:00 0 S -Rule Pike 1956 1964 - Apr lastSun 2:00 1:00 D -Rule Pike 1961 1964 - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Indiana/Petersburg -5:49:07 - LMT 1883 Nov 18 12:10:53 - -6:00 US C%sT 1955 - -6:00 Pike C%sT 1965 Apr 25 2:00 - -5:00 - EST 1966 Oct 30 2:00 - -6:00 US C%sT 1977 Oct 30 2:00 - -5:00 - EST 2006 Apr 2 2:00 - -6:00 US C%sT 2007 Nov 4 2:00 - -5:00 US E%sT -# -# Starke County, Indiana moved from central to eastern time in 1991, -# then switched back in 2006. -# From Arthur David Olson (1991-10-28): -# An article on page A3 of the Sunday, 1991-10-27 Washington Post -# notes that Starke County switched from Central time to Eastern time as of -# 1991-10-27. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule Starke 1947 1961 - Apr lastSun 2:00 1:00 D -Rule Starke 1947 1954 - Sep lastSun 2:00 0 S -Rule Starke 1955 1956 - Oct lastSun 2:00 0 S -Rule Starke 1957 1958 - Sep lastSun 2:00 0 S -Rule Starke 1959 1961 - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Indiana/Knox -5:46:30 - LMT 1883 Nov 18 12:13:30 - -6:00 US C%sT 1947 - -6:00 Starke C%sT 1962 Apr 29 2:00 - -5:00 - EST 1963 Oct 27 2:00 - -6:00 US C%sT 1991 Oct 27 2:00 - -5:00 - EST 2006 Apr 2 2:00 - -6:00 US C%sT -# -# Pulaski County, Indiana, switched from eastern to central time in -# April 2006 and then switched back in March 2007. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule Pulaski 1946 1960 - Apr lastSun 2:00 1:00 D -Rule Pulaski 1946 1954 - Sep lastSun 2:00 0 S -Rule Pulaski 1955 1956 - Oct lastSun 2:00 0 S -Rule Pulaski 1957 1960 - Sep lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Indiana/Winamac -5:46:25 - LMT 1883 Nov 18 12:13:35 - -6:00 US C%sT 1946 - -6:00 Pulaski C%sT 1961 Apr 30 2:00 - -5:00 - EST 1969 - -5:00 US E%sT 1971 - -5:00 - EST 2006 Apr 2 2:00 - -6:00 US C%sT 2007 Mar 11 2:00 - -5:00 US E%sT -# -# Switzerland County, Indiana, did not observe DST from 1973 through 2005. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Indiana/Vevay -5:40:16 - LMT 1883 Nov 18 12:19:44 - -6:00 US C%sT 1954 Apr 25 2:00 - -5:00 - EST 1969 - -5:00 US E%sT 1973 - -5:00 - EST 2006 - -5:00 US E%sT - -# Part of Kentucky left its clocks alone in 1974. -# This also includes Clark, Floyd, and Harrison counties in Indiana. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule Louisville 1921 only - May 1 2:00 1:00 D -Rule Louisville 1921 only - Sep 1 2:00 0 S -Rule Louisville 1941 1961 - Apr lastSun 2:00 1:00 D -Rule Louisville 1941 only - Sep lastSun 2:00 0 S -Rule Louisville 1946 only - Jun 2 2:00 0 S -Rule Louisville 1950 1955 - Sep lastSun 2:00 0 S -Rule Louisville 1956 1960 - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Kentucky/Louisville -5:43:02 - LMT 1883 Nov 18 12:16:58 - -6:00 US C%sT 1921 - -6:00 Louisville C%sT 1942 - -6:00 US C%sT 1946 - -6:00 Louisville C%sT 1961 Jul 23 2:00 - -5:00 - EST 1968 - -5:00 US E%sT 1974 Jan 6 2:00 - -6:00 1:00 CDT 1974 Oct 27 2:00 - -5:00 US E%sT -# -# Wayne County, Kentucky -# -# From Lake Cumberland LIFE -# http://www.lake-cumberland.com/life/archive/news990129time.shtml -# (1999-01-29) via WKYM-101.7: -# Clinton County has joined Wayne County in asking the DoT to change from -# the Central to the Eastern time zone.... The Wayne County government made -# the same request in December. And while Russell County officials have not -# taken action, the majority of respondents to a poll conducted there in -# August indicated they would like to change to "fast time" also. -# The three Lake Cumberland counties are the farthest east of any U.S. -# location in the Central time zone. -# -# From Rich Wales (2000-08-29): -# After prolonged debate, and despite continuing deep differences of opinion, -# Wayne County (central Kentucky) is switching from Central (-0600) to Eastern -# (-0500) time. They won't "fall back" this year. See Sara Shipley, -# The difference an hour makes, Nando Times (2000-08-29 15:33 -0400). -# -# From Paul Eggert (2001-07-16): -# The final rule was published in the -# Federal Register 65, 160 (2000-08-17), pp 50154-50158. -# http://frwebgate.access.gpo.gov/cgi-bin/getdoc.cgi?dbname=2000_register&docid=fr17au00-22 -# -Zone America/Kentucky/Monticello -5:39:24 - LMT 1883 Nov 18 12:20:36 - -6:00 US C%sT 1946 - -6:00 - CST 1968 - -6:00 US C%sT 2000 Oct 29 2:00 - -5:00 US E%sT - - -# From Rives McDow (2000-08-30): -# Here ... are all the changes in the US since 1985. -# Kearny County, KS (put all of county on central; -# previously split between MST and CST) ... 1990-10 -# Starke County, IN (from CST to EST) ... 1991-10 -# Oliver County, ND (from MST to CST) ... 1992-10 -# West Wendover, NV (from PST TO MST) ... 1999-10 -# Wayne County, KY (from CST to EST) ... 2000-10 -# -# From Paul Eggert (2001-07-17): -# We don't know where the line used to be within Kearny County, KS, -# so omit that change for now. -# See America/Indiana/Knox for the Starke County, IN change. -# See America/North_Dakota/Center for the Oliver County, ND change. -# West Wendover, NV officially switched from Pacific to mountain time on -# 1999-10-31. See the -# Federal Register 64, 203 (1999-10-21), pp 56705-56707. -# http://frwebgate.access.gpo.gov/cgi-bin/getdoc.cgi?dbname=1999_register&docid=fr21oc99-15 -# However, the Federal Register says that West Wendover already operated -# on mountain time, and the rule merely made this official; -# hence a separate tz entry is not needed. - -# Michigan -# -# From Bob Devine (1988-01-28): -# Michigan didn't observe DST from 1968 to 1973. -# -# From Paul Eggert (1999-03-31): -# Shanks writes that Michigan started using standard time on 1885-09-18, -# but Howse writes (pp 124-125, referring to Popular Astronomy, 1901-01) -# that Detroit kept -# -# local time until 1900 when the City Council decreed that clocks should -# be put back twenty-eight minutes to Central Standard Time. Half the -# city obeyed, half refused. After considerable debate, the decision -# was rescinded and the city reverted to Sun time. A derisive offer to -# erect a sundial in front of the city hall was referred to the -# Committee on Sewers. Then, in 1905, Central time was adopted -# by city vote. -# -# This story is too entertaining to be false, so go with Howse over Shanks. -# -# From Paul Eggert (2001-03-06): -# Garland (1927) writes "Cleveland and Detroit advanced their clocks -# one hour in 1914." This change is not in Shanks. We have no more -# info, so omit this for now. -# -# Most of Michigan observed DST from 1973 on, but was a bit late in 1975. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule Detroit 1948 only - Apr lastSun 2:00 1:00 D -Rule Detroit 1948 only - Sep lastSun 2:00 0 S -Rule Detroit 1967 only - Jun 14 2:00 1:00 D -Rule Detroit 1967 only - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Detroit -5:32:11 - LMT 1905 - -6:00 - CST 1915 May 15 2:00 - -5:00 - EST 1942 - -5:00 US E%sT 1946 - -5:00 Detroit E%sT 1973 - -5:00 US E%sT 1975 - -5:00 - EST 1975 Apr 27 2:00 - -5:00 US E%sT -# -# Dickinson, Gogebic, Iron, and Menominee Counties, Michigan, -# switched from EST to CST/CDT in 1973. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER -Rule Menominee 1946 only - Apr lastSun 2:00 1:00 D -Rule Menominee 1946 only - Sep lastSun 2:00 0 S -Rule Menominee 1966 only - Apr lastSun 2:00 1:00 D -Rule Menominee 1966 only - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Menominee -5:50:27 - LMT 1885 Sep 18 12:00 - -6:00 US C%sT 1946 - -6:00 Menominee C%sT 1969 Apr 27 2:00 - -5:00 - EST 1973 Apr 29 2:00 - -6:00 US C%sT - -# Navassa -# administered by the US Fish and Wildlife Service -# claimed by US under the provisions of the 1856 Guano Islands Act -# also claimed by Haiti -# occupied 1857/1900 by the Navassa Phosphate Co -# US lighthouse 1917/1996-09 -# currently uninhabited -# see Mark Fineman, "An Isle Rich in Guano and Discord", -# _Los Angeles Times_ (1998-11-10), A1, A10; it cites -# Jimmy Skaggs, _The Great Guano Rush_ (1994). - -################################################################################ - - -# From Paul Eggert (2017-02-10): -# -# Unless otherwise specified, the source for data through 1990 is: -# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition), -# San Diego: ACS Publications, Inc. (2003). -# Unfortunately this book contains many errors and cites no sources. -# -# Many years ago Gwillim Law wrote that a good source -# for time zone data was the International Air Transport -# Association's Standard Schedules Information Manual (IATA SSIM), -# published semiannually. Law sent in several helpful summaries -# of the IATA's data after 1990. Except where otherwise noted, -# IATA SSIM is the source for entries after 1990. -# -# Other sources occasionally used include: -# -# Edward W. Whitman, World Time Differences, -# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), -# which I found in the UCLA library. -# -# William Willett, The Waste of Daylight, 19th edition -# -# [PDF] (1914-03) -# -# Milne J. Civil time. Geogr J. 1899 Feb;13(2):173-94 -# . -# -# See the 'europe' file for Greenland. - -# Canada - -# From Alain LaBonté (1994-11-14): -# I post here the time zone abbreviations standardized in Canada -# for both English and French in the CAN/CSA-Z234.4-89 standard.... -# -# UTC Standard time Daylight saving time -# offset French English French English -# -2:30 - - HAT NDT -# -3 - - HAA ADT -# -3:30 HNT NST - - -# -4 HNA AST HAE EDT -# -5 HNE EST HAC CDT -# -6 HNC CST HAR MDT -# -7 HNR MST HAP PDT -# -8 HNP PST HAY YDT -# -9 HNY YST - - -# -# HN: Heure Normale ST: Standard Time -# HA: Heure Avancée DT: Daylight saving Time -# -# A: de l'Atlantique Atlantic -# C: du Centre Central -# E: de l'Est Eastern -# M: Mountain -# N: Newfoundland -# P: du Pacifique Pacific -# R: des Rocheuses -# T: de Terre-Neuve -# Y: du Yukon Yukon -# -# From Paul Eggert (1994-11-22): -# Alas, this sort of thing must be handled by localization software. - -# Unless otherwise specified, the data entries for Canada are all from Shanks -# & Pottenger. - -# From Chris Walton (2006-04-01, 2006-04-25, 2006-06-26, 2007-01-31, -# 2007-03-01): -# The British Columbia government announced yesterday that it will -# adjust daylight savings next year to align with changes in the -# U.S. and the rest of Canada.... -# http://www2.news.gov.bc.ca/news_releases_2005-2009/2006AG0014-000330.htm -# ... -# Nova Scotia -# Daylight saving time will be extended by four weeks starting in 2007.... -# http://www.gov.ns.ca/just/regulations/rg2/2006/ma1206.pdf -# -# [For New Brunswick] the new legislation dictates that the time change is to -# be done at 02:00 instead of 00:01. -# http://www.gnb.ca/0062/acts/BBA-2006/Chap-19.pdf -# ... -# Manitoba has traditionally changed the clock every fall at 03:00. -# As of 2006, the transition is to take place one hour earlier at 02:00. -# http://web2.gov.mb.ca/laws/statutes/ccsm/o030e.php -# ... -# [Alberta, Ontario, Quebec] will follow US rules. -# http://www.qp.gov.ab.ca/documents/spring/CH03_06.CFM -# http://www.e-laws.gov.on.ca/DBLaws/Source/Regs/English/2006/R06111_e.htm -# http://www2.publicationsduquebec.gouv.qc.ca/dynamicSearch/telecharge.php?type=5&file=2006C39A.PDF -# ... -# P.E.I. will follow US rules.... -# http://www.assembly.pe.ca/bills/pdf_chapter/62/3/chapter-41.pdf -# ... -# Province of Newfoundland and Labrador.... -# http://www.hoa.gov.nl.ca/hoa/bills/Bill0634.htm -# ... -# Yukon -# http://www.gov.yk.ca/legislation/regs/oic2006_127.pdf -# ... -# N.W.T. will follow US rules. Whoever maintains the government web site -# does not seem to believe in bookmarks. To see the news release, click the -# following link and search for "Daylight Savings Time Change". Press the -# "Daylight Savings Time Change" link; it will fire off a popup using -# JavaScript. -# http://www.exec.gov.nt.ca/currentnews/currentPR.asp?mode=archive -# ... -# Nunavut -# An amendment to the Interpretation Act was registered on February 19/2007.... -# http://action.attavik.ca/home/justice-gn/attach/2007/gaz02part2.pdf - -# From Paul Eggert (2014-10-18): -# H. David Matthews and Mary Vincent's map -# "It's about TIME", _Canadian Geographic_ (September-October 1998) -# http://www.canadiangeographic.ca/Magazine/SO98/alacarte.asp -# contains detailed boundaries for regions observing nonstandard -# time and daylight saving time arrangements in Canada circa 1998. -# -# National Research Council Canada maintains info about time zones and DST. -# http://www.nrc-cnrc.gc.ca/eng/services/time/time_zones.html -# http://www.nrc-cnrc.gc.ca/eng/services/time/faq/index.html#Q5 -# Its unofficial information is often taken from Matthews and Vincent. - -# From Paul Eggert (2006-06-27): -# For now, assume all of DST-observing Canada will fall into line with the -# new US DST rules, - -# From Chris Walton (2011-12-01) -# In the first of Tammy Hardwick's articles -# http://www.ilovecreston.com/?p=articles&t=spec&ar=260 -# she quotes the Friday November 1/1918 edition of the Creston Review. -# The quote includes these two statements: -# 'Sunday the CPR went back to the old system of time...' -# '... The daylight saving scheme was dropped all over Canada at the same time,' -# These statements refer to a transition from daylight time to standard time -# that occurred nationally on Sunday October 27/1918. This transition was -# also documented in the Saturday October 26/1918 edition of the Toronto Star. - -# In light of that evidence, we alter the date from the earlier believed -# Oct 31, to Oct 27, 1918 (and Sunday is a more likely transition day -# than Thursday) in all Canadian rulesets. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Canada 1918 only - Apr 14 2:00 1:00 D -Rule Canada 1918 only - Oct 27 2:00 0 S -Rule Canada 1942 only - Feb 9 2:00 1:00 W # War -Rule Canada 1945 only - Aug 14 23:00u 1:00 P # Peace -Rule Canada 1945 only - Sep 30 2:00 0 S -Rule Canada 1974 1986 - Apr lastSun 2:00 1:00 D -Rule Canada 1974 2006 - Oct lastSun 2:00 0 S -Rule Canada 1987 2006 - Apr Sun>=1 2:00 1:00 D -Rule Canada 2007 max - Mar Sun>=8 2:00 1:00 D -Rule Canada 2007 max - Nov Sun>=1 2:00 0 S - - -# Newfoundland and Labrador - -# From Paul Eggert (2000-10-02): -# Matthews and Vincent (1998) write that Labrador should use NST/NDT, -# but the only part of Labrador that follows the rules is the -# southeast corner, including Port Hope Simpson and Mary's Harbour, -# but excluding, say, Black Tickle. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule StJohns 1917 only - Apr 8 2:00 1:00 D -Rule StJohns 1917 only - Sep 17 2:00 0 S -# Whitman gives 1919 Apr 5 and 1920 Apr 5; go with Shanks & Pottenger. -Rule StJohns 1919 only - May 5 23:00 1:00 D -Rule StJohns 1919 only - Aug 12 23:00 0 S -# For 1931-1935 Whitman gives Apr same date; go with Shanks & Pottenger. -Rule StJohns 1920 1935 - May Sun>=1 23:00 1:00 D -Rule StJohns 1920 1935 - Oct lastSun 23:00 0 S -# For 1936-1941 Whitman gives May Sun>=8 and Oct Sun>=1; go with Shanks & -# Pottenger. -Rule StJohns 1936 1941 - May Mon>=9 0:00 1:00 D -Rule StJohns 1936 1941 - Oct Mon>=2 0:00 0 S -# Whitman gives the following transitions: -# 1942 03-01/12-31, 1943 05-30/09-05, 1944 07-10/09-02, 1945 01-01/10-07 -# but go with Shanks & Pottenger and assume they used Canadian rules. -# For 1946-9 Whitman gives May 5,4,9,1 - Oct 1,5,3,2, and for 1950 he gives -# Apr 30 - Sep 24; go with Shanks & Pottenger. -Rule StJohns 1946 1950 - May Sun>=8 2:00 1:00 D -Rule StJohns 1946 1950 - Oct Sun>=2 2:00 0 S -Rule StJohns 1951 1986 - Apr lastSun 2:00 1:00 D -Rule StJohns 1951 1959 - Sep lastSun 2:00 0 S -Rule StJohns 1960 1986 - Oct lastSun 2:00 0 S -# From Paul Eggert (2000-10-02): -# INMS (2000-09-12) says that, since 1988 at least, Newfoundland switches -# at 00:01 local time. For now, assume it started in 1987. - -# From Michael Pelley (2011-09-12): -# We received today, Monday, September 12, 2011, notification that the -# changes to the Newfoundland Standard Time Act have been proclaimed. -# The change in the Act stipulates that the change from Daylight Savings -# Time to Standard Time and from Standard Time to Daylight Savings Time -# now occurs at 2:00AM. -# ... -# http://www.assembly.nl.ca/legislation/sr/annualstatutes/2011/1106.chp.htm -# ... -# MICHAEL PELLEY | Manager of Enterprise Architecture - Solution Delivery -# Office of the Chief Information Officer -# Executive Council -# Government of Newfoundland & Labrador - -Rule StJohns 1987 only - Apr Sun>=1 0:01 1:00 D -Rule StJohns 1987 2006 - Oct lastSun 0:01 0 S -Rule StJohns 1988 only - Apr Sun>=1 0:01 2:00 DD -Rule StJohns 1989 2006 - Apr Sun>=1 0:01 1:00 D -Rule StJohns 2007 2011 - Mar Sun>=8 0:01 1:00 D -Rule StJohns 2007 2010 - Nov Sun>=1 0:01 0 S -# -# St John's has an apostrophe, but Posix file names can't have apostrophes. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/St_Johns -3:30:52 - LMT 1884 - -3:30:52 StJohns N%sT 1918 - -3:30:52 Canada N%sT 1919 - -3:30:52 StJohns N%sT 1935 Mar 30 - -3:30 StJohns N%sT 1942 May 11 - -3:30 Canada N%sT 1946 - -3:30 StJohns N%sT 2011 Nov - -3:30 Canada N%sT - -# most of east Labrador - -# The name 'Happy Valley-Goose Bay' is too long; use 'Goose Bay'. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Goose_Bay -4:01:40 - LMT 1884 # Happy Valley-Goose Bay - -3:30:52 - NST 1918 - -3:30:52 Canada N%sT 1919 - -3:30:52 - NST 1935 Mar 30 - -3:30 - NST 1936 - -3:30 StJohns N%sT 1942 May 11 - -3:30 Canada N%sT 1946 - -3:30 StJohns N%sT 1966 Mar 15 2:00 - -4:00 StJohns A%sT 2011 Nov - -4:00 Canada A%sT - - -# west Labrador, Nova Scotia, Prince Edward I - -# From Brian Inglis (2015-07-20): -# From the historical weather station records available at: -# https://weatherspark.com/history/28351/1971/Sydney-Nova-Scotia-Canada -# Sydney shares the same time history as Glace Bay, so was -# likely to be the same across the island.... -# Sydney, as the capital and most populous location, or Cape Breton, would -# have been better names for the zone had we known this in 1996. - -# From Paul Eggert (2015-07-20): -# Shanks & Pottenger write that since 1970 most of this region has been like -# Halifax. Many locales did not observe peacetime DST until 1972; -# the Cape Breton area, represented by Glace Bay, is the largest we know of -# (Glace Bay was perhaps not the best name choice but no point changing now). -# Shanks & Pottenger also write that Liverpool, NS was the only town -# in Canada to observe DST in 1971 but not 1970; for now we'll assume -# this is a typo. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Halifax 1916 only - Apr 1 0:00 1:00 D -Rule Halifax 1916 only - Oct 1 0:00 0 S -Rule Halifax 1920 only - May 9 0:00 1:00 D -Rule Halifax 1920 only - Aug 29 0:00 0 S -Rule Halifax 1921 only - May 6 0:00 1:00 D -Rule Halifax 1921 1922 - Sep 5 0:00 0 S -Rule Halifax 1922 only - Apr 30 0:00 1:00 D -Rule Halifax 1923 1925 - May Sun>=1 0:00 1:00 D -Rule Halifax 1923 only - Sep 4 0:00 0 S -Rule Halifax 1924 only - Sep 15 0:00 0 S -Rule Halifax 1925 only - Sep 28 0:00 0 S -Rule Halifax 1926 only - May 16 0:00 1:00 D -Rule Halifax 1926 only - Sep 13 0:00 0 S -Rule Halifax 1927 only - May 1 0:00 1:00 D -Rule Halifax 1927 only - Sep 26 0:00 0 S -Rule Halifax 1928 1931 - May Sun>=8 0:00 1:00 D -Rule Halifax 1928 only - Sep 9 0:00 0 S -Rule Halifax 1929 only - Sep 3 0:00 0 S -Rule Halifax 1930 only - Sep 15 0:00 0 S -Rule Halifax 1931 1932 - Sep Mon>=24 0:00 0 S -Rule Halifax 1932 only - May 1 0:00 1:00 D -Rule Halifax 1933 only - Apr 30 0:00 1:00 D -Rule Halifax 1933 only - Oct 2 0:00 0 S -Rule Halifax 1934 only - May 20 0:00 1:00 D -Rule Halifax 1934 only - Sep 16 0:00 0 S -Rule Halifax 1935 only - Jun 2 0:00 1:00 D -Rule Halifax 1935 only - Sep 30 0:00 0 S -Rule Halifax 1936 only - Jun 1 0:00 1:00 D -Rule Halifax 1936 only - Sep 14 0:00 0 S -Rule Halifax 1937 1938 - May Sun>=1 0:00 1:00 D -Rule Halifax 1937 1941 - Sep Mon>=24 0:00 0 S -Rule Halifax 1939 only - May 28 0:00 1:00 D -Rule Halifax 1940 1941 - May Sun>=1 0:00 1:00 D -Rule Halifax 1946 1949 - Apr lastSun 2:00 1:00 D -Rule Halifax 1946 1949 - Sep lastSun 2:00 0 S -Rule Halifax 1951 1954 - Apr lastSun 2:00 1:00 D -Rule Halifax 1951 1954 - Sep lastSun 2:00 0 S -Rule Halifax 1956 1959 - Apr lastSun 2:00 1:00 D -Rule Halifax 1956 1959 - Sep lastSun 2:00 0 S -Rule Halifax 1962 1973 - Apr lastSun 2:00 1:00 D -Rule Halifax 1962 1973 - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Halifax -4:14:24 - LMT 1902 Jun 15 - -4:00 Halifax A%sT 1918 - -4:00 Canada A%sT 1919 - -4:00 Halifax A%sT 1942 Feb 9 2:00s - -4:00 Canada A%sT 1946 - -4:00 Halifax A%sT 1974 - -4:00 Canada A%sT -Zone America/Glace_Bay -3:59:48 - LMT 1902 Jun 15 - -4:00 Canada A%sT 1953 - -4:00 Halifax A%sT 1954 - -4:00 - AST 1972 - -4:00 Halifax A%sT 1974 - -4:00 Canada A%sT - -# New Brunswick - -# From Paul Eggert (2007-01-31): -# The Time Definition Act -# says they changed at 00:01 through 2006, and -# makes it -# clear that this was the case since at least 1993. -# For now, assume it started in 1993. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Moncton 1933 1935 - Jun Sun>=8 1:00 1:00 D -Rule Moncton 1933 1935 - Sep Sun>=8 1:00 0 S -Rule Moncton 1936 1938 - Jun Sun>=1 1:00 1:00 D -Rule Moncton 1936 1938 - Sep Sun>=1 1:00 0 S -Rule Moncton 1939 only - May 27 1:00 1:00 D -Rule Moncton 1939 1941 - Sep Sat>=21 1:00 0 S -Rule Moncton 1940 only - May 19 1:00 1:00 D -Rule Moncton 1941 only - May 4 1:00 1:00 D -Rule Moncton 1946 1972 - Apr lastSun 2:00 1:00 D -Rule Moncton 1946 1956 - Sep lastSun 2:00 0 S -Rule Moncton 1957 1972 - Oct lastSun 2:00 0 S -Rule Moncton 1993 2006 - Apr Sun>=1 0:01 1:00 D -Rule Moncton 1993 2006 - Oct lastSun 0:01 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Moncton -4:19:08 - LMT 1883 Dec 9 - -5:00 - EST 1902 Jun 15 - -4:00 Canada A%sT 1933 - -4:00 Moncton A%sT 1942 - -4:00 Canada A%sT 1946 - -4:00 Moncton A%sT 1973 - -4:00 Canada A%sT 1993 - -4:00 Moncton A%sT 2007 - -4:00 Canada A%sT - -# Quebec - -# From Paul Eggert (2015-03-24): -# See America/Toronto for most of Quebec, including Montreal. -# -# Matthews and Vincent (1998) also write that Quebec east of the -63 -# meridian is supposed to observe AST, but residents as far east as -# Natashquan use EST/EDT, and residents east of Natashquan use AST. -# The Quebec department of justice writes in -# "The situation in Minganie and Basse-Côte-Nord" -# http://www.justice.gouv.qc.ca/english/publications/generale/temps-minganie-a.htm -# that the coastal strip from just east of Natashquan to Blanc-Sablon -# observes Atlantic standard time all year round. -# http://www.assnat.qc.ca/Media/Process.aspx?MediaId=ANQ.Vigie.Bll.DocumentGenerique_8845en -# says this common practice was codified into law as of 2007. -# For lack of better info, guess this practice began around 1970, contra to -# Shanks & Pottenger who have this region observing AST/ADT. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Blanc-Sablon -3:48:28 - LMT 1884 - -4:00 Canada A%sT 1970 - -4:00 - AST - -# Ontario - -# From Paul Eggert (2006-07-09): -# Shanks & Pottenger write that since 1970 most of Ontario has been like -# Toronto. -# Thunder Bay skipped DST in 1973. -# Many smaller locales did not observe peacetime DST until 1974; -# Nipigon (EST) and Rainy River (CST) are the largest that we know of. -# Far west Ontario is like Winnipeg; far east Quebec is like Halifax. - -# From Mark Brader (2003-07-26): -# [According to the Toronto Star] Orillia, Ontario, adopted DST -# effective Saturday, 1912-06-22, 22:00; the article mentions that -# Port Arthur (now part of Thunder Bay, Ontario) as well as Moose Jaw -# have already done so. In Orillia DST was to run until Saturday, -# 1912-08-31 (no time mentioned), but it was met with considerable -# hostility from certain segments of the public, and was revoked after -# only two weeks - I copied it as Saturday, 1912-07-07, 22:00, but -# presumably that should be -07-06. (1912-06-19, -07-12; also letters -# earlier in June). -# -# Kenora, Ontario, was to abandon DST on 1914-06-01 (-05-21). - -# From Paul Eggert (1997-10-17): -# Mark Brader writes that an article in the 1997-10-14 Toronto Star -# says that Atikokan, Ontario currently does not observe DST, -# but will vote on 11-10 whether to use EST/EDT. -# He also writes that the Ontario Time Act (1990, Chapter T.9) -# http://www.gov.on.ca/MBS/english/publications/statregs/conttext.html -# says that Ontario east of 90W uses EST/EDT, and west of 90W uses CST/CDT. -# Officially Atikokan is therefore on CST/CDT, and most likely this report -# concerns a non-official time observed as a matter of local practice. -# -# From Paul Eggert (2000-10-02): -# Matthews and Vincent (1998) write that Atikokan, Pickle Lake, and -# New Osnaburgh observe CST all year, that Big Trout Lake observes -# CST/CDT, and that Upsala and Shebandowan observe EST/EDT, all in -# violation of the official Ontario rules. -# -# From Paul Eggert (2006-07-09): -# Chris Walton (2006-07-06) mentioned an article by Stephanie MacLellan in the -# 2005-07-21 Chronicle-Journal, which said: -# -# The clocks in Atikokan stay set on standard time year-round. -# This means they spend about half the time on central time and -# the other half on eastern time. -# -# For the most part, the system works, Mayor Dennis Brown said. -# -# "The majority of businesses in Atikokan deal more with Eastern -# Canada, but there are some that deal with Western Canada," he -# said. "I don't see any changes happening here." -# -# Walton also writes "Supposedly Pickle Lake and Mishkeegogamang -# [New Osnaburgh] follow the same practice." - -# From Garry McKinnon (2006-07-14) via Chris Walton: -# I chatted with a member of my board who has an outstanding memory -# and a long history in Atikokan (and in the telecom industry) and he -# can say for certain that Atikokan has been practicing the current -# time keeping since 1952, at least. - -# From Paul Eggert (2006-07-17): -# Shanks & Pottenger say that Atikokan has agreed with Rainy River -# ever since standard time was introduced, but the information from -# McKinnon sounds more authoritative. For now, assume that Atikokan -# switched to EST immediately after WWII era daylight saving time -# ended. This matches the old (less-populous) America/Coral_Harbour -# entry since our cutoff date of 1970, so we can move -# America/Coral_Harbour to the 'backward' file. - -# From Mark Brader (2010-03-06): -# -# Currently the database has: -# -# # Ontario -# -# # From Paul Eggert (2006-07-09): -# # Shanks & Pottenger write that since 1970 most of Ontario has been like -# # Toronto. -# # Thunder Bay skipped DST in 1973. -# # Many smaller locales did not observe peacetime DST until 1974; -# # Nipigon (EST) and Rainy River (CST) are the largest that we know of. -# -# In the (Toronto) Globe and Mail for Saturday, 1955-09-24, in the bottom -# right corner of page 1, it says that Toronto will return to standard -# time at 2 am Sunday morning (which agrees with the database), and that: -# -# The one-hour setback will go into effect throughout most of Ontario, -# except in areas like Windsor which remains on standard time all year. -# -# Windsor is, of course, a lot larger than Nipigon. -# -# I only came across this incidentally. I don't know if Windsor began -# observing DST when Detroit did, or in 1974, or on some other date. -# -# By the way, the article continues by noting that: -# -# Some cities in the United States have pushed the deadline back -# three weeks and will change over from daylight saving in October. - -# From Arthur David Olson (2010-07-17): -# -# "Standard Time and Time Zones in Canada" appeared in -# The Journal of The Royal Astronomical Society of Canada, -# volume 26, number 2 (February 1932) and, as of 2010-07-17, -# was available at -# http://adsabs.harvard.edu/full/1932JRASC..26...49S -# -# It includes the text below (starting on page 57): -# -# A list of the places in Canada using daylight saving time would -# require yearly revision. From information kindly furnished by -# the provincial governments and by the postmasters in many cities -# and towns, it is found that the following places used daylight sav- -# ing in 1930. The information for the province of Quebec is definite, -# for the other provinces only approximate: -# -# Province Daylight saving time used -# Prince Edward Island Not used. -# Nova Scotia In Halifax only. -# New Brunswick In St. John only. -# Quebec In the following places: -# Montreal Lachine -# Quebec Mont-Royal -# Lévis Iberville -# St. Lambert Cap de la Madelèine -# Verdun Loretteville -# Westmount Richmond -# Outremont St. Jérôme -# Longueuil Greenfield Park -# Arvida Waterloo -# Chambly-Canton Beaulieu -# Melbourne La Tuque -# St. Théophile Buckingham -# Ontario Used generally in the cities and towns along -# the southerly part of the province. Not -# used in the northwesterly part. -# Manitoba Not used. -# Saskatchewan In Regina only. -# Alberta Not used. -# British Columbia Not used. -# -# With some exceptions, the use of daylight saving may be said to be limited -# to those cities and towns lying between Quebec city and Windsor, Ont. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Toronto 1919 only - Mar 30 23:30 1:00 D -Rule Toronto 1919 only - Oct 26 0:00 0 S -Rule Toronto 1920 only - May 2 2:00 1:00 D -Rule Toronto 1920 only - Sep 26 0:00 0 S -Rule Toronto 1921 only - May 15 2:00 1:00 D -Rule Toronto 1921 only - Sep 15 2:00 0 S -Rule Toronto 1922 1923 - May Sun>=8 2:00 1:00 D -# Shanks & Pottenger say 1923-09-19; assume it's a typo and that "-16" -# was meant. -Rule Toronto 1922 1926 - Sep Sun>=15 2:00 0 S -Rule Toronto 1924 1927 - May Sun>=1 2:00 1:00 D -# The 1927-to-1939 rules can be expressed more simply as -# Rule Toronto 1927 1937 - Sep Sun>=25 2:00 0 S -# Rule Toronto 1928 1937 - Apr Sun>=25 2:00 1:00 D -# Rule Toronto 1938 1940 - Apr lastSun 2:00 1:00 D -# Rule Toronto 1938 1939 - Sep lastSun 2:00 0 S -# The rules below avoid use of Sun>=25 -# (which pre-2004 versions of zic cannot handle). -Rule Toronto 1927 1932 - Sep lastSun 2:00 0 S -Rule Toronto 1928 1931 - Apr lastSun 2:00 1:00 D -Rule Toronto 1932 only - May 1 2:00 1:00 D -Rule Toronto 1933 1940 - Apr lastSun 2:00 1:00 D -Rule Toronto 1933 only - Oct 1 2:00 0 S -Rule Toronto 1934 1939 - Sep lastSun 2:00 0 S -Rule Toronto 1945 1946 - Sep lastSun 2:00 0 S -Rule Toronto 1946 only - Apr lastSun 2:00 1:00 D -Rule Toronto 1947 1949 - Apr lastSun 0:00 1:00 D -Rule Toronto 1947 1948 - Sep lastSun 0:00 0 S -Rule Toronto 1949 only - Nov lastSun 0:00 0 S -Rule Toronto 1950 1973 - Apr lastSun 2:00 1:00 D -Rule Toronto 1950 only - Nov lastSun 2:00 0 S -Rule Toronto 1951 1956 - Sep lastSun 2:00 0 S -# Shanks & Pottenger say Toronto ended DST a week early in 1971, -# namely on 1971-10-24, but Mark Brader wrote (2003-05-31) that this -# is wrong, and that he had confirmed it by checking the 1971-10-30 -# Toronto Star, which said that DST was ending 1971-10-31 as usual. -Rule Toronto 1957 1973 - Oct lastSun 2:00 0 S - -# From Paul Eggert (2003-07-27): -# Willett (1914-03) writes (p. 17) "In the Cities of Fort William, and -# Port Arthur, Ontario, the principle of the Bill has been in -# operation for the past three years, and in the City of Moose Jaw, -# Saskatchewan, for one year." - -# From David Bryan via Tory Tronrud, Director/Curator, -# Thunder Bay Museum (2003-11-12): -# There is some suggestion, however, that, by-law or not, daylight -# savings time was being practiced in Fort William and Port Arthur -# before 1909.... [I]n 1910, the line between the Eastern and Central -# Time Zones was permanently moved about two hundred miles west to -# include the Thunder Bay area.... When Canada adopted daylight -# savings time in 1916, Fort William and Port Arthur, having done so -# already, did not change their clocks.... During the Second World -# War,... [t]he cities agreed to implement DST during the summer -# months for the remainder of the war years. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Toronto -5:17:32 - LMT 1895 - -5:00 Canada E%sT 1919 - -5:00 Toronto E%sT 1942 Feb 9 2:00s - -5:00 Canada E%sT 1946 - -5:00 Toronto E%sT 1974 - -5:00 Canada E%sT -Zone America/Thunder_Bay -5:57:00 - LMT 1895 - -6:00 - CST 1910 - -5:00 - EST 1942 - -5:00 Canada E%sT 1970 - -5:00 Toronto E%sT 1973 - -5:00 - EST 1974 - -5:00 Canada E%sT -Zone America/Nipigon -5:53:04 - LMT 1895 - -5:00 Canada E%sT 1940 Sep 29 - -5:00 1:00 EDT 1942 Feb 9 2:00s - -5:00 Canada E%sT -Zone America/Rainy_River -6:18:16 - LMT 1895 - -6:00 Canada C%sT 1940 Sep 29 - -6:00 1:00 CDT 1942 Feb 9 2:00s - -6:00 Canada C%sT -Zone America/Atikokan -6:06:28 - LMT 1895 - -6:00 Canada C%sT 1940 Sep 29 - -6:00 1:00 CDT 1942 Feb 9 2:00s - -6:00 Canada C%sT 1945 Sep 30 2:00 - -5:00 - EST - - -# Manitoba - -# From Rob Douglas (2006-04-06): -# the old Manitoba Time Act - as amended by Bill 2, assented to -# March 27, 1987 ... said ... -# "between two o'clock Central Standard Time in the morning of -# the first Sunday of April of each year and two o'clock Central -# Standard Time in the morning of the last Sunday of October next -# following, one hour in advance of Central Standard Time."... -# I believe that the English legislation [of the old time act] had -# been assented to (March 22, 1967).... -# Also, as far as I can tell, there was no order-in-council varying -# the time of Daylight Saving Time for 2005 and so the provisions of -# the 1987 version would apply - the changeover was at 2:00 Central -# Standard Time (i.e. not until 3:00 Central Daylight Time). - -# From Paul Eggert (2006-04-10): -# Shanks & Pottenger say Manitoba switched at 02:00 (not 02:00s) -# starting 1966. Since 02:00s is clearly correct for 1967 on, assume -# it was also 02:00s in 1966. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Winn 1916 only - Apr 23 0:00 1:00 D -Rule Winn 1916 only - Sep 17 0:00 0 S -Rule Winn 1918 only - Apr 14 2:00 1:00 D -Rule Winn 1918 only - Oct 27 2:00 0 S -Rule Winn 1937 only - May 16 2:00 1:00 D -Rule Winn 1937 only - Sep 26 2:00 0 S -Rule Winn 1942 only - Feb 9 2:00 1:00 W # War -Rule Winn 1945 only - Aug 14 23:00u 1:00 P # Peace -Rule Winn 1945 only - Sep lastSun 2:00 0 S -Rule Winn 1946 only - May 12 2:00 1:00 D -Rule Winn 1946 only - Oct 13 2:00 0 S -Rule Winn 1947 1949 - Apr lastSun 2:00 1:00 D -Rule Winn 1947 1949 - Sep lastSun 2:00 0 S -Rule Winn 1950 only - May 1 2:00 1:00 D -Rule Winn 1950 only - Sep 30 2:00 0 S -Rule Winn 1951 1960 - Apr lastSun 2:00 1:00 D -Rule Winn 1951 1958 - Sep lastSun 2:00 0 S -Rule Winn 1959 only - Oct lastSun 2:00 0 S -Rule Winn 1960 only - Sep lastSun 2:00 0 S -Rule Winn 1963 only - Apr lastSun 2:00 1:00 D -Rule Winn 1963 only - Sep 22 2:00 0 S -Rule Winn 1966 1986 - Apr lastSun 2:00s 1:00 D -Rule Winn 1966 2005 - Oct lastSun 2:00s 0 S -Rule Winn 1987 2005 - Apr Sun>=1 2:00s 1:00 D -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Winnipeg -6:28:36 - LMT 1887 Jul 16 - -6:00 Winn C%sT 2006 - -6:00 Canada C%sT - - -# Saskatchewan - -# From Mark Brader (2003-07-26): -# The first actual adoption of DST in Canada was at the municipal -# level. As the [Toronto] Star put it (1912-06-07), "While people -# elsewhere have long been talking of legislation to save daylight, -# the city of Moose Jaw [Saskatchewan] has acted on its own hook." -# DST in Moose Jaw began on Saturday, 1912-06-01 (no time mentioned: -# presumably late evening, as below), and would run until "the end of -# the summer". The discrepancy between municipal time and railroad -# time was noted. - -# From Paul Eggert (2003-07-27): -# Willett (1914-03) notes that DST "has been in operation ... in the -# City of Moose Jaw, Saskatchewan, for one year." - -# From Paul Eggert (2006-03-22): -# Shanks & Pottenger say that since 1970 this region has mostly been as Regina. -# Some western towns (e.g. Swift Current) switched from MST/MDT to CST in 1972. -# Other western towns (e.g. Lloydminster) are like Edmonton. -# Matthews and Vincent (1998) write that Denare Beach and Creighton -# are like Winnipeg, in violation of Saskatchewan law. - -# From W. Jones (1992-11-06): -# The. . .below is based on information I got from our law library, the -# provincial archives, and the provincial Community Services department. -# A precise history would require digging through newspaper archives, and -# since you didn't say what you wanted, I didn't bother. -# -# Saskatchewan is split by a time zone meridian (105W) and over the years -# the boundary became pretty ragged as communities near it reevaluated -# their affiliations in one direction or the other. In 1965 a provincial -# referendum favoured legislating common time practices. -# -# On 15 April 1966 the Time Act (c. T-14, Revised Statutes of -# Saskatchewan 1978) was proclaimed, and established that the eastern -# part of Saskatchewan would use CST year round, that districts in -# northwest Saskatchewan would by default follow CST but could opt to -# follow Mountain Time rules (thus 1 hour difference in the winter and -# zero in the summer), and that districts in southwest Saskatchewan would -# by default follow MT but could opt to follow CST. -# -# It took a few years for the dust to settle (I know one story of a town -# on one time zone having its school in another, such that a mom had to -# serve her family lunch in two shifts), but presently it seems that only -# a few towns on the border with Alberta (e.g. Lloydminster) follow MT -# rules any more; all other districts appear to have used CST year round -# since sometime in the 1960s. - -# From Chris Walton (2006-06-26): -# The Saskatchewan time act which was last updated in 1996 is about 30 pages -# long and rather painful to read. -# http://www.qp.gov.sk.ca/documents/English/Statutes/Statutes/T14.pdf - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Regina 1918 only - Apr 14 2:00 1:00 D -Rule Regina 1918 only - Oct 27 2:00 0 S -Rule Regina 1930 1934 - May Sun>=1 0:00 1:00 D -Rule Regina 1930 1934 - Oct Sun>=1 0:00 0 S -Rule Regina 1937 1941 - Apr Sun>=8 0:00 1:00 D -Rule Regina 1937 only - Oct Sun>=8 0:00 0 S -Rule Regina 1938 only - Oct Sun>=1 0:00 0 S -Rule Regina 1939 1941 - Oct Sun>=8 0:00 0 S -Rule Regina 1942 only - Feb 9 2:00 1:00 W # War -Rule Regina 1945 only - Aug 14 23:00u 1:00 P # Peace -Rule Regina 1945 only - Sep lastSun 2:00 0 S -Rule Regina 1946 only - Apr Sun>=8 2:00 1:00 D -Rule Regina 1946 only - Oct Sun>=8 2:00 0 S -Rule Regina 1947 1957 - Apr lastSun 2:00 1:00 D -Rule Regina 1947 1957 - Sep lastSun 2:00 0 S -Rule Regina 1959 only - Apr lastSun 2:00 1:00 D -Rule Regina 1959 only - Oct lastSun 2:00 0 S -# -Rule Swift 1957 only - Apr lastSun 2:00 1:00 D -Rule Swift 1957 only - Oct lastSun 2:00 0 S -Rule Swift 1959 1961 - Apr lastSun 2:00 1:00 D -Rule Swift 1959 only - Oct lastSun 2:00 0 S -Rule Swift 1960 1961 - Sep lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Regina -6:58:36 - LMT 1905 Sep - -7:00 Regina M%sT 1960 Apr lastSun 2:00 - -6:00 - CST -Zone America/Swift_Current -7:11:20 - LMT 1905 Sep - -7:00 Canada M%sT 1946 Apr lastSun 2:00 - -7:00 Regina M%sT 1950 - -7:00 Swift M%sT 1972 Apr lastSun 2:00 - -6:00 - CST - - -# Alberta - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Edm 1918 1919 - Apr Sun>=8 2:00 1:00 D -Rule Edm 1918 only - Oct 27 2:00 0 S -Rule Edm 1919 only - May 27 2:00 0 S -Rule Edm 1920 1923 - Apr lastSun 2:00 1:00 D -Rule Edm 1920 only - Oct lastSun 2:00 0 S -Rule Edm 1921 1923 - Sep lastSun 2:00 0 S -Rule Edm 1942 only - Feb 9 2:00 1:00 W # War -Rule Edm 1945 only - Aug 14 23:00u 1:00 P # Peace -Rule Edm 1945 only - Sep lastSun 2:00 0 S -Rule Edm 1947 only - Apr lastSun 2:00 1:00 D -Rule Edm 1947 only - Sep lastSun 2:00 0 S -Rule Edm 1967 only - Apr lastSun 2:00 1:00 D -Rule Edm 1967 only - Oct lastSun 2:00 0 S -Rule Edm 1969 only - Apr lastSun 2:00 1:00 D -Rule Edm 1969 only - Oct lastSun 2:00 0 S -Rule Edm 1972 1986 - Apr lastSun 2:00 1:00 D -Rule Edm 1972 2006 - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Edmonton -7:33:52 - LMT 1906 Sep - -7:00 Edm M%sT 1987 - -7:00 Canada M%sT - - -# British Columbia - -# From Paul Eggert (2006-03-22): -# Shanks & Pottenger write that since 1970 most of this region has -# been like Vancouver. -# Dawson Creek uses MST. Much of east BC is like Edmonton. -# Matthews and Vincent (1998) write that Creston is like Dawson Creek. - -# It seems though that (re: Creston) is not entirely correct: - -# From Chris Walton (2011-12-01): -# There are two areas within the Canadian province of British Columbia -# that do not currently observe daylight saving: -# a) The Creston Valley (includes the town of Creston and surrounding area) -# b) The eastern half of the Peace River Regional District -# (includes the cities of Dawson Creek and Fort St. John) - -# Earlier this year I stumbled across a detailed article about the time -# keeping history of Creston; it was written by Tammy Hardwick who is the -# manager of the Creston & District Museum. The article was written in May 2009. -# http://www.ilovecreston.com/?p=articles&t=spec&ar=260 -# According to the article, Creston has not changed its clocks since June 1918. -# i.e. Creston has been stuck on UTC-7 for 93 years. -# Dawson Creek, on the other hand, changed its clocks as recently as April 1972. - -# Unfortunately the exact date for the time change in June 1918 remains -# unknown and will be difficult to ascertain. I e-mailed Tammy a few months -# ago to ask if Sunday June 2 was a reasonable guess. She said it was just -# as plausible as any other date (in June). She also said that after writing -# the article she had discovered another time change in 1916; this is the -# subject of another article which she wrote in October 2010. -# http://www.creston.museum.bc.ca/index.php?module=comments&uop=view_comment&cm+id=56 - -# Here is a summary of the three clock change events in Creston's history: -# 1. 1884 or 1885: adoption of Mountain Standard Time (GMT-7) -# Exact date unknown -# 2. Oct 1916: switch to Pacific Standard Time (GMT-8) -# Exact date in October unknown; Sunday October 1 is a reasonable guess. -# 3. June 1918: switch to Pacific Daylight Time (GMT-7) -# Exact date in June unknown; Sunday June 2 is a reasonable guess. -# note 1: -# On Oct 27/1918 when daylight saving ended in the rest of Canada, -# Creston did not change its clocks. -# note 2: -# During WWII when the Federal Government legislated a mandatory clock change, -# Creston did not oblige. -# note 3: -# There is no guarantee that Creston will remain on Mountain Standard Time -# (UTC-7) forever. -# The subject was debated at least once this year by the town Council. -# http://www.bclocalnews.com/kootenay_rockies/crestonvalleyadvance/news/116760809.html - -# During a period WWII, summer time (Daylight saying) was mandatory in Canada. -# In Creston, that was handled by shifting the area to PST (-8:00) then applying -# summer time to cause the offset to be -7:00, the same as it had been before -# the change. It can be argued that the timezone abbreviation during this -# period should be PDT rather than MST, but that doesn't seem important enough -# (to anyone) to further complicate the rules. - -# The transition dates (and times) are guesses. - -# From Matt Johnson (2015-09-21): -# Fort Nelson, BC, Canada will cancel DST this year. So while previously they -# were aligned with America/Vancouver, they're now aligned with -# America/Dawson_Creek. -# http://www.northernrockies.ca/EN/meta/news/archives/2015/northern-rockies-time-change.html -# -# From Tim Parenti (2015-09-23): -# This requires a new zone for the Northern Rockies Regional Municipality, -# America/Fort_Nelson. The resolution of 2014-12-08 was reached following a -# 2014-11-15 poll with nearly 75% support. Effectively, the municipality has -# been on MST (-0700) like Dawson Creek since it advanced its clocks on -# 2015-03-08. -# -# From Paul Eggert (2015-09-23): -# Shanks says Fort Nelson did not observe DST in 1946, unlike Vancouver. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Vanc 1918 only - Apr 14 2:00 1:00 D -Rule Vanc 1918 only - Oct 27 2:00 0 S -Rule Vanc 1942 only - Feb 9 2:00 1:00 W # War -Rule Vanc 1945 only - Aug 14 23:00u 1:00 P # Peace -Rule Vanc 1945 only - Sep 30 2:00 0 S -Rule Vanc 1946 1986 - Apr lastSun 2:00 1:00 D -Rule Vanc 1946 only - Oct 13 2:00 0 S -Rule Vanc 1947 1961 - Sep lastSun 2:00 0 S -Rule Vanc 1962 2006 - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Vancouver -8:12:28 - LMT 1884 - -8:00 Vanc P%sT 1987 - -8:00 Canada P%sT -Zone America/Dawson_Creek -8:00:56 - LMT 1884 - -8:00 Canada P%sT 1947 - -8:00 Vanc P%sT 1972 Aug 30 2:00 - -7:00 - MST -Zone America/Fort_Nelson -8:10:47 - LMT 1884 - -8:00 Vanc P%sT 1946 - -8:00 - PST 1947 - -8:00 Vanc P%sT 1987 - -8:00 Canada P%sT 2015 Mar 8 2:00 - -7:00 - MST -Zone America/Creston -7:46:04 - LMT 1884 - -7:00 - MST 1916 Oct 1 - -8:00 - PST 1918 Jun 2 - -7:00 - MST - -# Northwest Territories, Nunavut, Yukon - -# From Paul Eggert (2006-03-22): -# Dawson switched to PST in 1973. Inuvik switched to MST in 1979. -# Mathew Englander (1996-10-07) gives the following refs: -# * 1967. Paragraph 28(34)(g) of the Interpretation Act, S.C. 1967-68, -# c. 7 defines Yukon standard time as UTC-9.... -# see Interpretation Act, R.S.C. 1985, c. I-21, s. 35(1). -# [http://canlii.ca/t/7vhg] -# * C.O. 1973/214 switched Yukon to PST on 1973-10-28 00:00. -# * O.I.C. 1980/02 established DST. -# * O.I.C. 1987/056 changed DST to Apr firstSun 2:00 to Oct lastSun 2:00. - -# From Brian Inglis (2015-04-14): -# -# I tried to trace the history of Yukon time and found the following -# regulations, giving the reference title and URL if found, regulation name, -# and relevant quote if available. Each regulation specifically revokes its -# predecessor. The final reference is to the current Interpretation Act -# authorizing and resulting from these regulatory changes. -# -# Only recent regulations were retrievable via Yukon government site search or -# index, and only some via Canadian legal sources. Other sources used include -# articles titled "Standard Time and Time Zones in Canada" from JRASC via ADS -# Abstracts, cited by ADO for 1932 ..., and updated versions from 1958 and -# 1970 quoted below; each article includes current extracts from provincial -# and territorial ST and DST regulations at the end, summaries and details of -# standard times and daylight saving time at many locations across Canada, -# with time zone maps, tables and calculations for Canadian Sunrise, Sunset, -# and LMST; they also cover many countries and global locations, with a chart -# and table showing current Universal Time offsets, and may be useful as -# another source of information for 1970 and earlier. -# -# * Standard Time and Time Zones in Canada; Smith, C.C.; JRASC, Vol. 26, -# pp.49-77; February 1932; SAO/NASA Astrophysics Data System (ADS) -# http://adsabs.harvard.edu/abs/1932JRASC..26...49S from p.75: -# Yukon Interpretation Ordinance -# Yukon standard time is the local mean time at the one hundred and -# thirty-fifth meridian. -# -# * Standard Time and Time Zones in Canada; Smith, C.C.; Thomson, Malcolm M.; -# JRASC, Vol. 52, pp.193-223; October 1958; SAO/NASA Astrophysics Data System -# (ADS) http://adsabs.harvard.edu/abs/1958JRASC..52..193S from pp.220-1: -# Yukon Interpretation Ordinance, 1955, Chap. 16. -# -# (1) Subject to this section, standard time shall be reckoned as nine -# hours behind Greenwich Time and called Yukon Standard Time. -# -# (2) Notwithstanding subsection (1), the Commissioner may make regulations -# varying the manner of reckoning standard time. -# -# * Yukon Territory Commissioner's Order 1966-20 Interpretation Ordinance -# http://? - no online source found -# -# * Standard Time and Time Zones in Canada; Thomson, Malcolm M.; JRASC, -# Vol. 64, pp.129-162; June 1970; SAO/NASA Astrophysics Data System (ADS) -# http://adsabs.harvard.edu/abs/1970JRASC..64..129T from p.156: Yukon -# Territory Commissioner's Order 1967-59 Interpretation Ordinance ... -# -# 1. Commissioner's Order 1966-20 dated at Whitehorse in the Yukon -# Territory on 27th January, 1966, is hereby revoked. -# -# 2. Yukon (East) Standard Time as defined by section 36 of the -# Interpretation Ordinance from and after mid-night on the 28th day of May, -# 1967 shall be reckoned in the same manner as Pacific Standard Time, that -# is to say, eight hours behind Greenwich Time in the area of the Yukon -# Territory lying east of the 138th degree longitude west. -# -# 3. In the remainder of the Territory, lying west of the 138th degree -# longitude west, Yukon (West) Standard Time shall be reckoned as nine -# hours behind Greenwich Time. -# -# * Yukon Standard Time defined as Pacific Standard Time, YCO 1973/214 -# http://www.canlii.org/en/yk/laws/regu/yco-1973-214/latest/yco-1973-214.html -# C.O. 1973/214 INTERPRETATION ACT ... -# -# 1. Effective October 28, 1973 Commissioner's Order 1967/59 is hereby -# revoked. -# -# 2. Yukon Standard Time as defined by section 36 of the Interpretation -# Act from and after midnight on the twenty-eighth day of October, 1973 -# shall be reckoned in the same manner as Pacific Standard Time, that is -# to say eight hours behind Greenwich Time. -# -# * O.I.C. 1980/02 INTERPRETATION ACT -# http://? - no online source found -# -# * Yukon Daylight Saving Time, YOIC 1987/56 -# http://www.canlii.org/en/yk/laws/regu/yoic-1987-56/latest/yoic-1987-56.html -# O.I.C. 1987/056 INTERPRETATION ACT ... -# -# In every year between -# (a) two o'clock in the morning in the first Sunday in April, and -# (b) two o'clock in the morning in the last Sunday in October, -# Standard Time shall be reckoned as seven hours behind Greenwich Time and -# called Yukon Daylight Saving Time. -# ... -# Dated ... 9th day of March, A.D., 1987. -# -# * Yukon Daylight Saving Time 2006, YOIC 2006/127 -# http://www.canlii.org/en/yk/laws/regu/yoic-2006-127/latest/yoic-2006-127.html -# O.I.C. 2006/127 INTERPRETATION ACT ... -# -# 1. In Yukon each year the time for general purposes shall be 7 hours -# behind Greenwich mean time during the period commencing at two o'clock -# in the forenoon on the second Sunday of March and ending at two o'clock -# in the forenoon on the first Sunday of November and shall be called -# Yukon Daylight Saving Time. -# -# 2. Order-in-Council 1987/56 is revoked. -# -# 3. This order comes into force January 1, 2007. -# -# * Interpretation Act, RSY 2002, c 125 -# http://www.canlii.org/en/yk/laws/stat/rsy-2002-c-125/latest/rsy-2002-c-125.html - -# From Rives McDow (1999-09-04): -# Nunavut ... moved ... to incorporate the whole territory into one time zone. -# Nunavut moves to single time zone Oct. 31 -# http://www.nunatsiaq.com/nunavut/nvt90903_13.html -# -# From Antoine Leca (1999-09-06): -# We then need to create a new timezone for the Kitikmeot region of Nunavut -# to differentiate it from the Yellowknife region. - -# From Paul Eggert (1999-09-20): -# Basic Facts: The New Territory -# http://www.nunavut.com/basicfacts/english/basicfacts_1territory.html -# (1999) reports that Pangnirtung operates on eastern time, -# and that Coral Harbour does not observe DST. We don't know when -# Pangnirtung switched to eastern time; we'll guess 1995. - -# From Rives McDow (1999-11-08): -# On October 31, when the rest of Nunavut went to Central time, -# Pangnirtung wobbled. Here is the result of their wobble: -# -# The following businesses and organizations in Pangnirtung use Central Time: -# -# First Air, Power Corp, Nunavut Construction, Health Center, RCMP, -# Eastern Arctic National Parks, A & D Specialist -# -# The following businesses and organizations in Pangnirtung use Eastern Time: -# -# Hamlet office, All other businesses, Both schools, Airport operator -# -# This has made for an interesting situation there, which warranted the news. -# No one there that I spoke with seems concerned, or has plans to -# change the local methods of keeping time, as it evidently does not -# really interfere with any activities or make things difficult locally. -# They plan to celebrate New Year's turn-over twice, one hour apart, -# so it appears that the situation will last at least that long. -# The Nunavut Intergovernmental Affairs hopes that they will "come to -# their senses", but the locals evidently don't see any problem with -# the current state of affairs. - -# From Michaela Rodrigue, writing in the -# Nunatsiaq News (1999-11-19): -# http://www.nunatsiaq.com/archives/nunavut991130/nvt91119_17.html -# Clyde River, Pangnirtung and Sanikiluaq now operate with two time zones, -# central - or Nunavut time - for government offices, and eastern time -# for municipal offices and schools.... Igloolik [was similar but then] -# made the switch to central time on Saturday, Nov. 6. - -# From Paul Eggert (2000-10-02): -# Matthews and Vincent (1998) say the following, but we lack histories -# for these potential new Zones. -# -# The Canadian Forces station at Alert uses Eastern Time while the -# handful of residents at the Eureka weather station [in the Central -# zone] skip daylight savings. Baffin Island, which is crossed by the -# Central, Eastern and Atlantic Time zones only uses Eastern Time. -# Gjoa Haven, Taloyoak and Pelly Bay all use Mountain instead of -# Central Time and Southampton Island [in the Central zone] is not -# required to use daylight savings. - -# From -# Nunavut now has two time zones (2000-11-10): -# The Nunavut government would allow its employees in Kugluktuk and -# Cambridge Bay to operate on central time year-round, putting them -# one hour behind the rest of Nunavut for six months during the winter. -# At the end of October the two communities had rebelled against -# Nunavut's unified time zone, refusing to shift to eastern time with -# the rest of the territory for the winter. Cambridge Bay remained on -# central time, while Kugluktuk, even farther west, reverted to -# mountain time, which they had used before the advent of Nunavut's -# unified time zone in 1999. -# -# From Rives McDow (2001-01-20), quoting the Nunavut government: -# The preceding decision came into effect at midnight, Saturday Nov 4, 2000. - -# From Paul Eggert (2000-12-04): -# Let's just keep track of the official times for now. - -# From Rives McDow (2001-03-07): -# The premier of Nunavut has issued a ministerial statement advising -# that effective 2001-04-01, the territory of Nunavut will revert -# back to three time zones (mountain, central, and eastern). Of the -# cities in Nunavut, Coral Harbor is the only one that I know of that -# has said it will not observe dst, staying on EST year round. I'm -# checking for more info, and will get back to you if I come up with -# more. -# [Also see (2001-03-09).] - -# From Gwillim Law (2005-05-21): -# According to ... -# http://www.canadiangeographic.ca/Magazine/SO98/geomap.asp -# (from a 1998 Canadian Geographic article), the de facto and de jure time -# for Southampton Island (at the north end of Hudson Bay) is UTC-5 all year -# round. Using Google, it's easy to find other websites that confirm this. -# I wasn't able to find how far back this time regimen goes, but since it -# predates the creation of Nunavut, it probably goes back many years.... -# The Inuktitut name of Coral Harbour is Sallit, but it's rarely used. -# -# From Paul Eggert (2014-10-17): -# For lack of better information, assume that Southampton Island observed -# daylight saving only during wartime. Gwillim Law's email also -# mentioned maps now maintained by National Research Council Canada; -# see above for an up-to-date link. - -# From Chris Walton (2007-03-01): -# ... the community of Resolute (located on Cornwallis Island in -# Nunavut) moved from Central Time to Eastern Time last November. -# Basically the community did not change its clocks at the end of -# daylight saving.... -# http://www.nnsl.com/frames/newspapers/2006-11/nov13_06none.html - -# From Chris Walton (2011-03-21): -# Back in 2007 I initiated the creation of a new "zone file" for Resolute -# Bay. Resolute Bay is a small community located about 900km north of -# the Arctic Circle. The zone file was required because Resolute Bay had -# decided to use UTC-5 instead of UTC-6 for the winter of 2006-2007. -# -# According to new information which I received last week, Resolute Bay -# went back to using UTC-6 in the winter of 2007-2008... -# -# On March 11/2007 most of Canada went onto daylight saving. On March -# 14/2007 I phoned the Resolute Bay hamlet office to do a "time check." I -# talked to somebody that was both knowledgeable and helpful. I was able -# to confirm that Resolute Bay was still operating on UTC-5. It was -# explained to me that Resolute Bay had been on the Eastern Time zone -# (EST) in the winter, and was now back on the Central Time zone (CDT). -# i.e. the time zone had changed twice in the last year but the clocks -# had not moved. The residents had to know which time zone they were in -# so they could follow the correct TV schedule... -# -# On Nov 02/2008 most of Canada went onto standard time. On Nov 03/2008 I -# phoned the Resolute Bay hamlet office...[D]ue to the challenging nature -# of the phone call, I decided to seek out an alternate source of -# information. I found an e-mail address for somebody by the name of -# Stephanie Adams whose job was listed as "Inns North Support Officer for -# Arctic Co-operatives." I was under the impression that Stephanie lived -# and worked in Resolute Bay... -# -# On March 14/2011 I phoned the hamlet office again. I was told that -# Resolute Bay had been using Central Standard Time over the winter of -# 2010-2011 and that the clocks had therefore been moved one hour ahead -# on March 13/2011. The person I talked to was aware that Resolute Bay -# had previously experimented with Eastern Standard Time but he could not -# tell me when the practice had stopped. -# -# On March 17/2011 I searched the Web to find an e-mail address of -# somebody that might be able to tell me exactly when Resolute Bay went -# off Eastern Standard Time. I stumbled on the name "Aziz Kheraj." Aziz -# used to be the mayor of Resolute Bay and he apparently owns half the -# businesses including "South Camp Inn." This website has some info on -# Aziz: -# http://www.uphere.ca/node/493 -# -# I sent Aziz an e-mail asking when Resolute Bay had stopped using -# Eastern Standard Time. -# -# Aziz responded quickly with this: "hi, The time was not changed for the -# 1 year only, the following year, the community went back to the old way -# of "spring ahead-fall behind" currently we are zulu plus 5 hrs and in -# the winter Zulu plus 6 hrs" -# -# This of course conflicted with everything I had ascertained in November 2008. -# -# I sent Aziz a copy of my 2008 e-mail exchange with Stephanie. Aziz -# responded with this: "Hi, Stephanie lives in Winnipeg. I live here, You -# may want to check with the weather office in Resolute Bay or do a -# search on the weather through Env. Canada. web site" -# -# If I had realized the Stephanie did not live in Resolute Bay I would -# never have contacted her. I now believe that all the information I -# obtained in November 2008 should be ignored... -# I apologize for reporting incorrect information in 2008. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule NT_YK 1918 only - Apr 14 2:00 1:00 D -Rule NT_YK 1918 only - Oct 27 2:00 0 S -Rule NT_YK 1919 only - May 25 2:00 1:00 D -Rule NT_YK 1919 only - Nov 1 0:00 0 S -Rule NT_YK 1942 only - Feb 9 2:00 1:00 W # War -Rule NT_YK 1945 only - Aug 14 23:00u 1:00 P # Peace -Rule NT_YK 1945 only - Sep 30 2:00 0 S -Rule NT_YK 1965 only - Apr lastSun 0:00 2:00 DD -Rule NT_YK 1965 only - Oct lastSun 2:00 0 S -Rule NT_YK 1980 1986 - Apr lastSun 2:00 1:00 D -Rule NT_YK 1980 2006 - Oct lastSun 2:00 0 S -Rule NT_YK 1987 2006 - Apr Sun>=1 2:00 1:00 D -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -# aka Panniqtuuq -Zone America/Pangnirtung 0 - -00 1921 # trading post est. - -4:00 NT_YK A%sT 1995 Apr Sun>=1 2:00 - -5:00 Canada E%sT 1999 Oct 31 2:00 - -6:00 Canada C%sT 2000 Oct 29 2:00 - -5:00 Canada E%sT -# formerly Frobisher Bay -Zone America/Iqaluit 0 - -00 1942 Aug # Frobisher Bay est. - -5:00 NT_YK E%sT 1999 Oct 31 2:00 - -6:00 Canada C%sT 2000 Oct 29 2:00 - -5:00 Canada E%sT -# aka Qausuittuq -Zone America/Resolute 0 - -00 1947 Aug 31 # Resolute founded - -6:00 NT_YK C%sT 2000 Oct 29 2:00 - -5:00 - EST 2001 Apr 1 3:00 - -6:00 Canada C%sT 2006 Oct 29 2:00 - -5:00 - EST 2007 Mar 11 3:00 - -6:00 Canada C%sT -# aka Kangiqiniq -Zone America/Rankin_Inlet 0 - -00 1957 # Rankin Inlet founded - -6:00 NT_YK C%sT 2000 Oct 29 2:00 - -5:00 - EST 2001 Apr 1 3:00 - -6:00 Canada C%sT -# aka Iqaluktuuttiaq -Zone America/Cambridge_Bay 0 - -00 1920 # trading post est.? - -7:00 NT_YK M%sT 1999 Oct 31 2:00 - -6:00 Canada C%sT 2000 Oct 29 2:00 - -5:00 - EST 2000 Nov 5 0:00 - -6:00 - CST 2001 Apr 1 3:00 - -7:00 Canada M%sT -Zone America/Yellowknife 0 - -00 1935 # Yellowknife founded? - -7:00 NT_YK M%sT 1980 - -7:00 Canada M%sT -Zone America/Inuvik 0 - -00 1953 # Inuvik founded - -8:00 NT_YK P%sT 1979 Apr lastSun 2:00 - -7:00 NT_YK M%sT 1980 - -7:00 Canada M%sT -Zone America/Whitehorse -9:00:12 - LMT 1900 Aug 20 - -9:00 NT_YK Y%sT 1967 May 28 0:00 - -8:00 NT_YK P%sT 1980 - -8:00 Canada P%sT -Zone America/Dawson -9:17:40 - LMT 1900 Aug 20 - -9:00 NT_YK Y%sT 1973 Oct 28 0:00 - -8:00 NT_YK P%sT 1980 - -8:00 Canada P%sT - - -############################################################################### - -# Mexico - -# From Paul Eggert (2014-12-07): -# The Investigation and Analysis Service of the -# Mexican Library of Congress (MLoC) has published a -# history of Mexican local time (in Spanish) -# http://www.diputados.gob.mx/bibliot/publica/inveyana/polisoc/horver/index.htm -# -# Here are the discrepancies between Shanks & Pottenger (S&P) and the MLoC. -# (In all cases we go with the MLoC.) -# S&P report that Baja was at -8:00 in 1922/1923. -# S&P say the 1930 transition in Baja was 1930-11-16. -# S&P report no DST during summer 1931. -# S&P report a transition at 1932-03-30 23:00, not 1932-04-01. - -# From Gwillim Law (2001-02-20): -# There are some other discrepancies between the Decrees page and the -# tz database. I think they can best be explained by supposing that -# the researchers who prepared the Decrees page failed to find some of -# the relevant documents. - -# From Alan Perry (1996-02-15): -# A guy from our Mexico subsidiary finally found the Presidential Decree -# outlining the timezone changes in Mexico. -# -# ------------- Begin Forwarded Message ------------- -# -# I finally got my hands on the Official Presidential Decree that sets up the -# rules for the DST changes. The rules are: -# -# 1. The country is divided in 3 timezones: -# - Baja California Norte (the Mexico/BajaNorte TZ) -# - Baja California Sur, Nayarit, Sinaloa and Sonora (the Mexico/BajaSur TZ) -# - The rest of the country (the Mexico/General TZ) -# -# 2. From the first Sunday in April at 2:00 AM to the last Sunday in October -# at 2:00 AM, the times in each zone are as follows: -# BajaNorte: GMT+7 -# BajaSur: GMT+6 -# General: GMT+5 -# -# 3. The rest of the year, the times are as follows: -# BajaNorte: GMT+8 -# BajaSur: GMT+7 -# General: GMT+6 -# -# The Decree was published in Mexico's Official Newspaper on January 4th. -# -# -------------- End Forwarded Message -------------- -# From Paul Eggert (1996-06-12): -# For an English translation of the decree, see -# "Diario Oficial: Time Zone Changeover" (1996-01-04). -# http://mexico-travel.com/extra/timezone_eng.html - -# From Rives McDow (1998-10-08): -# The State of Quintana Roo has reverted back to central STD and DST times -# (i.e. UTC -0600 and -0500 as of 1998-08-02). - -# From Rives McDow (2000-01-10): -# Effective April 4, 1999 at 2:00 AM local time, Sonora changed to the time -# zone 5 hours from the International Date Line, and will not observe daylight -# savings time so as to stay on the same time zone as the southern part of -# Arizona year round. - -# From Jesper Nørgaard, translating -# (2001-01-17): -# In Oaxaca, the 55.000 teachers from the Section 22 of the National -# Syndicate of Education Workers, refuse to apply daylight saving each -# year, so that the more than 10,000 schools work at normal hour the -# whole year. - -# From Gwillim Law (2001-01-19): -# ... says -# (translated):... -# January 17, 2000 - The Energy Secretary, Ernesto Martens, announced -# that Summer Time will be reduced from seven to five months, starting -# this year.... -# http://www.publico.com.mx/scripts/texto3.asp?action=pagina&pag=21&pos=p&secc=naci&date=01/17/2001 -# [translated], says "summer time will ... take effect on the first Sunday -# in May, and end on the last Sunday of September. - -# From Arthur David Olson (2001-01-25): -# The 2001-01-24 traditional Washington Post contained the page one -# story "Timely Issue Divides Mexicans."... -# http://www.washingtonpost.com/wp-dyn/articles/A37383-2001Jan23.html -# ... Mexico City Mayor López Obrador "...is threatening to keep -# Mexico City and its 20 million residents on a different time than -# the rest of the country..." In particular, López Obrador would abolish -# observation of Daylight Saving Time. - -# Official statute published by the Energy Department -# http://www.conae.gob.mx/ahorro/decretohorver2001.html#decre -# (2001-02-01) shows Baja and Chihauhua as still using US DST rules, -# and Sonora with no DST. This was reported by Jesper Nørgaard (2001-02-03). - -# From Paul Eggert (2001-03-03): -# -# http://www.latimes.com/news/nation/20010303/t000018766.html -# James F. Smith writes in today's LA Times -# * Sonora will continue to observe standard time. -# * Last week Mexico City's mayor Andrés Manuel López Obrador decreed that -# the Federal District will not adopt DST. -# * 4 of 16 district leaders announced they'll ignore the decree. -# * The decree does not affect federal-controlled facilities including -# the airport, banks, hospitals, and schools. -# -# For now we'll assume that the Federal District will bow to federal rules. - -# From Jesper Nørgaard (2001-04-01): -# I found some references to the Mexican application of daylight -# saving, which modifies what I had already sent you, stating earlier -# that a number of northern Mexican states would go on daylight -# saving. The modification reverts this to only cover Baja California -# (Norte), while all other states (except Sonora, who has no daylight -# saving all year) will follow the original decree of president -# Vicente Fox, starting daylight saving May 6, 2001 and ending -# September 30, 2001. -# References: "Diario de Monterrey" -# Palabra (2001-03-31) - -# From Reuters (2001-09-04): -# Mexico's Supreme Court on Tuesday declared that daylight savings was -# unconstitutional in Mexico City, creating the possibility the -# capital will be in a different time zone from the rest of the nation -# next year.... The Supreme Court's ruling takes effect at 2:00 -# a.m. (0800 GMT) on Sept. 30, when Mexico is scheduled to revert to -# standard time. "This is so residents of the Federal District are not -# subject to unexpected time changes," a statement from the court said. - -# From Jesper Nørgaard Welen (2002-03-12): -# ... consulting my local grocery store(!) and my coworkers, they all insisted -# that a new decision had been made to reinstate US style DST in Mexico.... -# http://www.conae.gob.mx/ahorro/horaver2001_m1_2002.html (2002-02-20) -# confirms this. Sonora as usual is the only state where DST is not applied. - -# From Steffen Thorsen (2009-12-28): -# -# Steffen Thorsen wrote: -# > Mexico's House of Representatives has approved a proposal for northern -# > Mexico's border cities to share the same daylight saving schedule as -# > the United States. -# Now this has passed both the Congress and the Senate, so starting from -# 2010, some border regions will be the same: -# http://www.signonsandiego.com/news/2009/dec/28/clocks-will-match-both-sides-border/ -# http://www.elmananarey.com/diario/noticia/nacional/noticias/empatan_horario_de_frontera_con_eu/621939 -# (Spanish) -# -# Could not find the new law text, but the proposed law text changes are here: -# http://gaceta.diputados.gob.mx/Gaceta/61/2009/dic/20091210-V.pdf -# (Gaceta Parlamentaria) -# -# There is also a list of the votes here: -# http://gaceta.diputados.gob.mx/Gaceta/61/2009/dic/V2-101209.html -# -# Our page: -# http://www.timeanddate.com/news/time/north-mexico-dst-change.html - -# From Arthur David Olson (2010-01-20): -# The page -# http://dof.gob.mx/nota_detalle.php?codigo=5127480&fecha=06/01/2010 -# includes this text: -# En los municipios fronterizos de Tijuana y Mexicali en Baja California; -# Juárez y Ojinaga en Chihuahua; Acuña y Piedras Negras en Coahuila; -# Anáhuac en Nuevo León; y Nuevo Laredo, Reynosa y Matamoros en -# Tamaulipas, la aplicación de este horario estacional surtirá efecto -# desde las dos horas del segundo domingo de marzo y concluirá a las dos -# horas del primer domingo de noviembre. -# En los municipios fronterizos que se encuentren ubicados en la franja -# fronteriza norte en el territorio comprendido entre la línea -# internacional y la línea paralela ubicada a una distancia de veinte -# kilómetros, así como la Ciudad de Ensenada, Baja California, hacia el -# interior del país, la aplicación de este horario estacional surtirá -# efecto desde las dos horas del segundo domingo de marzo y concluirá a -# las dos horas del primer domingo de noviembre. - -# From Steffen Thorsen (2014-12-08), translated by Gwillim Law: -# The Mexican state of Quintana Roo will likely change to EST in 2015. -# -# http://www.unioncancun.mx/articulo/2014/12/04/medio-ambiente/congreso-aprueba-una-hora-mas-de-sol-en-qroo -# "With this change, the time conflict that has existed between the municipios -# of Quintana Roo and the municipio of Felipe Carrillo Puerto may come to an -# end. The latter declared itself in rebellion 15 years ago when a time change -# was initiated in Mexico, and since then it has refused to change its time -# zone along with the rest of the country." -# -# From Steffen Thorsen (2015-01-14), translated by Gwillim Law: -# http://sipse.com/novedades/confirman-aplicacion-de-nueva-zona-horaria-para-quintana-roo-132331.html -# "...the new time zone will come into effect at two o'clock on the first Sunday -# of February, when we will have to advance the clock one hour from its current -# time..." -# Also, the new zone will not use DST. -# -# From Carlos Raúl Perasso (2015-02-02): -# The decree that modifies the Mexican Hour System Law has finally -# been published at the Diario Oficial de la Federación -# http://www.dof.gob.mx/nota_detalle.php?codigo=5380123&fecha=31/01/2015 -# It establishes 5 zones for Mexico: -# 1- Zona Centro (Central Zone): Corresponds to longitude 90 W, -# includes most of Mexico, excluding what's mentioned below. -# 2- Zona Pacífico (Pacific Zone): Longitude 105 W, includes the -# states of Baja California Sur; Chihuahua; Nayarit (excluding Bahía -# de Banderas which lies in Central Zone); Sinaloa and Sonora. -# 3- Zona Noroeste (Northwest Zone): Longitude 120 W, includes the -# state of Baja California. -# 4- Zona Sureste (Southeast Zone): Longitude 75 W, includes the state -# of Quintana Roo. -# 5- The islands, reefs and keys shall take their timezone from the -# longitude they are located at. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Mexico 1939 only - Feb 5 0:00 1:00 D -Rule Mexico 1939 only - Jun 25 0:00 0 S -Rule Mexico 1940 only - Dec 9 0:00 1:00 D -Rule Mexico 1941 only - Apr 1 0:00 0 S -Rule Mexico 1943 only - Dec 16 0:00 1:00 W # War -Rule Mexico 1944 only - May 1 0:00 0 S -Rule Mexico 1950 only - Feb 12 0:00 1:00 D -Rule Mexico 1950 only - Jul 30 0:00 0 S -Rule Mexico 1996 2000 - Apr Sun>=1 2:00 1:00 D -Rule Mexico 1996 2000 - Oct lastSun 2:00 0 S -Rule Mexico 2001 only - May Sun>=1 2:00 1:00 D -Rule Mexico 2001 only - Sep lastSun 2:00 0 S -Rule Mexico 2002 max - Apr Sun>=1 2:00 1:00 D -Rule Mexico 2002 max - Oct lastSun 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -# Quintana Roo; represented by Cancún -Zone America/Cancun -5:47:04 - LMT 1922 Jan 1 0:12:56 - -6:00 - CST 1981 Dec 23 - -5:00 Mexico E%sT 1998 Aug 2 2:00 - -6:00 Mexico C%sT 2015 Feb 1 2:00 - -5:00 - EST -# Campeche, Yucatán; represented by Mérida -Zone America/Merida -5:58:28 - LMT 1922 Jan 1 0:01:32 - -6:00 - CST 1981 Dec 23 - -5:00 - EST 1982 Dec 2 - -6:00 Mexico C%sT -# Coahuila, Nuevo León, Tamaulipas (near US border) -# This includes the following municipalities: -# in Coahuila: Ocampo, Acuña, Zaragoza, Jiménez, Piedras Negras, Nava, -# Guerrero, Hidalgo. -# in Nuevo León: Anáhuac, Los Aldama. -# in Tamaulipas: Nuevo Laredo, Guerrero, Mier, Miguel Alemán, Camargo, -# Gustavo Díaz Ordaz, Reynosa, Río Bravo, Valle Hermoso, Matamoros. -# See: Inicia mañana Horario de Verano en zona fronteriza, El Universal, -# 2016-03-12 -# http://www.eluniversal.com.mx/articulo/estados/2016/03/12/inicia-manana-horario-de-verano-en-zona-fronteriza -Zone America/Matamoros -6:40:00 - LMT 1921 Dec 31 23:20:00 - -6:00 - CST 1988 - -6:00 US C%sT 1989 - -6:00 Mexico C%sT 2010 - -6:00 US C%sT -# Durango; Coahuila, Nuevo León, Tamaulipas (away from US border) -Zone America/Monterrey -6:41:16 - LMT 1921 Dec 31 23:18:44 - -6:00 - CST 1988 - -6:00 US C%sT 1989 - -6:00 Mexico C%sT -# Central Mexico -Zone America/Mexico_City -6:36:36 - LMT 1922 Jan 1 0:23:24 - -7:00 - MST 1927 Jun 10 23:00 - -6:00 - CST 1930 Nov 15 - -7:00 - MST 1931 May 1 23:00 - -6:00 - CST 1931 Oct - -7:00 - MST 1932 Apr 1 - -6:00 Mexico C%sT 2001 Sep 30 2:00 - -6:00 - CST 2002 Feb 20 - -6:00 Mexico C%sT -# Chihuahua (near US border) -# This includes the municipalities of Janos, Ascensión, Juárez, Guadalupe, -# Práxedis G Guerrero, Coyame del Sotol, Ojinaga, and Manuel Benavides. -# (See the 2016-03-12 El Universal source mentioned above.) -Zone America/Ojinaga -6:57:40 - LMT 1922 Jan 1 0:02:20 - -7:00 - MST 1927 Jun 10 23:00 - -6:00 - CST 1930 Nov 15 - -7:00 - MST 1931 May 1 23:00 - -6:00 - CST 1931 Oct - -7:00 - MST 1932 Apr 1 - -6:00 - CST 1996 - -6:00 Mexico C%sT 1998 - -6:00 - CST 1998 Apr Sun>=1 3:00 - -7:00 Mexico M%sT 2010 - -7:00 US M%sT -# Chihuahua (away from US border) -Zone America/Chihuahua -7:04:20 - LMT 1921 Dec 31 23:55:40 - -7:00 - MST 1927 Jun 10 23:00 - -6:00 - CST 1930 Nov 15 - -7:00 - MST 1931 May 1 23:00 - -6:00 - CST 1931 Oct - -7:00 - MST 1932 Apr 1 - -6:00 - CST 1996 - -6:00 Mexico C%sT 1998 - -6:00 - CST 1998 Apr Sun>=1 3:00 - -7:00 Mexico M%sT -# Sonora -Zone America/Hermosillo -7:23:52 - LMT 1921 Dec 31 23:36:08 - -7:00 - MST 1927 Jun 10 23:00 - -6:00 - CST 1930 Nov 15 - -7:00 - MST 1931 May 1 23:00 - -6:00 - CST 1931 Oct - -7:00 - MST 1932 Apr 1 - -6:00 - CST 1942 Apr 24 - -7:00 - MST 1949 Jan 14 - -8:00 - PST 1970 - -7:00 Mexico M%sT 1999 - -7:00 - MST - -# From Alexander Krivenyshev (2010-04-21): -# According to news, Bahía de Banderas (Mexican state of Nayarit) -# changed time zone UTC-7 to new time zone UTC-6 on April 4, 2010 (to -# share the same time zone as nearby city Puerto Vallarta, Jalisco). -# -# (Spanish) -# Bahía de Banderas homologa su horario al del centro del -# país, a partir de este domingo -# http://www.nayarit.gob.mx/notes.asp?id=20748 -# -# Bahía de Banderas homologa su horario con el del Centro del -# País -# http://www.bahiadebanderas.gob.mx/principal/index.php?option=com_content&view=article&id=261:bahia-de-banderas-homologa-su-horario-con-el-del-centro-del-pais&catid=42:comunicacion-social&Itemid=50 -# -# (English) -# Puerto Vallarta and Bahía de Banderas: One Time Zone -# http://virtualvallarta.com/puertovallarta/puertovallarta/localnews/2009-12-03-Puerto-Vallarta-and-Bahia-de-Banderas-One-Time-Zone.shtml -# http://www.worldtimezone.com/dst_news/dst_news_mexico08.html -# -# "Mexico's Senate approved the amendments to the Mexican Schedule System that -# will allow Bahía de Banderas and Puerto Vallarta to share the same time -# zone ..." -# Baja California Sur, Nayarit, Sinaloa - -# From Arthur David Olson (2010-05-01): -# Use "Bahia_Banderas" to keep the name to fourteen characters. - -# Mazatlán -Zone America/Mazatlan -7:05:40 - LMT 1921 Dec 31 23:54:20 - -7:00 - MST 1927 Jun 10 23:00 - -6:00 - CST 1930 Nov 15 - -7:00 - MST 1931 May 1 23:00 - -6:00 - CST 1931 Oct - -7:00 - MST 1932 Apr 1 - -6:00 - CST 1942 Apr 24 - -7:00 - MST 1949 Jan 14 - -8:00 - PST 1970 - -7:00 Mexico M%sT - -# Bahía de Banderas -Zone America/Bahia_Banderas -7:01:00 - LMT 1921 Dec 31 23:59:00 - -7:00 - MST 1927 Jun 10 23:00 - -6:00 - CST 1930 Nov 15 - -7:00 - MST 1931 May 1 23:00 - -6:00 - CST 1931 Oct - -7:00 - MST 1932 Apr 1 - -6:00 - CST 1942 Apr 24 - -7:00 - MST 1949 Jan 14 - -8:00 - PST 1970 - -7:00 Mexico M%sT 2010 Apr 4 2:00 - -6:00 Mexico C%sT - -# Baja California -Zone America/Tijuana -7:48:04 - LMT 1922 Jan 1 0:11:56 - -7:00 - MST 1924 - -8:00 - PST 1927 Jun 10 23:00 - -7:00 - MST 1930 Nov 15 - -8:00 - PST 1931 Apr 1 - -8:00 1:00 PDT 1931 Sep 30 - -8:00 - PST 1942 Apr 24 - -8:00 1:00 PWT 1945 Aug 14 23:00u - -8:00 1:00 PPT 1945 Nov 12 # Peace - -8:00 - PST 1948 Apr 5 - -8:00 1:00 PDT 1949 Jan 14 - -8:00 - PST 1954 - -8:00 CA P%sT 1961 - -8:00 - PST 1976 - -8:00 US P%sT 1996 - -8:00 Mexico P%sT 2001 - -8:00 US P%sT 2002 Feb 20 - -8:00 Mexico P%sT 2010 - -8:00 US P%sT -# From Paul Eggert (2006-03-22): -# Formerly there was an America/Ensenada zone, which differed from -# America/Tijuana only in that it did not observe DST from 1976 -# through 1995. This was as per Shanks (1999). But Shanks & Pottenger say -# Ensenada did not observe DST from 1948 through 1975. Guy Harris reports -# that the 1987 OAG says "Only Ensenada, Mexicali, San Felipe and -# Tijuana observe DST," which agrees with Shanks & Pottenger but implies that -# DST-observance was a town-by-town matter back then. This concerns -# data after 1970 so most likely there should be at least one Zone -# other than America/Tijuana for Baja, but it's not clear yet what its -# name or contents should be. -# -# From Paul Eggert (2015-10-08): -# Formerly there was an America/Santa_Isabel zone, but this appears to -# have come from a misreading of -# http://dof.gob.mx/nota_detalle.php?codigo=5127480&fecha=06/01/2010 -# It has been moved to the 'backward' file. -# -# -# Revillagigedo Is -# no information - -############################################################################### - -# Anguilla -# Antigua and Barbuda -# See America/Port_of_Spain. - -# Bahamas -# -# For 1899 Milne gives -5:09:29.5; round that. -# -# From Sue Williams (2006-12-07): -# The Bahamas announced about a month ago that they plan to change their DST -# rules to sync with the U.S. starting in 2007.... -# http://www.jonesbahamas.com/?c=45&a=10412 - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Bahamas 1964 1975 - Oct lastSun 2:00 0 S -Rule Bahamas 1964 1975 - Apr lastSun 2:00 1:00 D -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Nassau -5:09:30 - LMT 1912 Mar 2 - -5:00 Bahamas E%sT 1976 - -5:00 US E%sT - -# Barbados - -# For 1899 Milne gives -3:58:29.2; round that. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Barb 1977 only - Jun 12 2:00 1:00 D -Rule Barb 1977 1978 - Oct Sun>=1 2:00 0 S -Rule Barb 1978 1980 - Apr Sun>=15 2:00 1:00 D -Rule Barb 1979 only - Sep 30 2:00 0 S -Rule Barb 1980 only - Sep 25 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Barbados -3:58:29 - LMT 1924 # Bridgetown - -3:58:29 - BMT 1932 # Bridgetown Mean Time - -4:00 Barb A%sT - -# Belize -# Whitman entirely disagrees with Shanks; go with Shanks & Pottenger. -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Belize 1918 1942 - Oct Sun>=2 0:00 0:30 -0530 -Rule Belize 1919 1943 - Feb Sun>=9 0:00 0 CST -Rule Belize 1973 only - Dec 5 0:00 1:00 CDT -Rule Belize 1974 only - Feb 9 0:00 0 CST -Rule Belize 1982 only - Dec 18 0:00 1:00 CDT -Rule Belize 1983 only - Feb 12 0:00 0 CST -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Belize -5:52:48 - LMT 1912 Apr - -6:00 Belize %s - -# Bermuda - -# For 1899 Milne gives -4:19:18.3 as the meridian of the clock tower, -# Bermuda dockyard, Ireland I; round that. - -# From Dan Jones, reporting in The Royal Gazette (2006-06-26): - -# Next year, however, clocks in the US will go forward on the second Sunday -# in March, until the first Sunday in November. And, after the Time Zone -# (Seasonal Variation) Bill 2006 was passed in the House of Assembly on -# Friday, the same thing will happen in Bermuda. -# http://www.theroyalgazette.com/apps/pbcs.dll/article?AID=/20060529/NEWS/105290135 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Atlantic/Bermuda -4:19:18 - LMT 1930 Jan 1 2:00 # Hamilton - -4:00 - AST 1974 Apr 28 2:00 - -4:00 Canada A%sT 1976 - -4:00 US A%sT - -# Cayman Is -# See America/Panama. - -# Costa Rica - -# Milne gives -5:36:13.3 as San José mean time; round to nearest. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule CR 1979 1980 - Feb lastSun 0:00 1:00 D -Rule CR 1979 1980 - Jun Sun>=1 0:00 0 S -Rule CR 1991 1992 - Jan Sat>=15 0:00 1:00 D -# IATA SSIM (1991-09) says the following was at 1:00; -# go with Shanks & Pottenger. -Rule CR 1991 only - Jul 1 0:00 0 S -Rule CR 1992 only - Mar 15 0:00 0 S -# There are too many San Josés elsewhere, so we'll use 'Costa Rica'. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Costa_Rica -5:36:13 - LMT 1890 # San José - -5:36:13 - SJMT 1921 Jan 15 # San José Mean Time - -6:00 CR C%sT -# Coco -# no information; probably like America/Costa_Rica - -# Cuba - -# From Paul Eggert (2013-02-21): -# Milne gives -5:28:50.45 for the observatory at Havana, -5:29:23.57 -# for the port, and -5:30 for meteorological observations. -# For now, stick with Shanks & Pottenger. - -# From Arthur David Olson (1999-03-29): -# The 1999-03-28 exhibition baseball game held in Havana, Cuba, between -# the Cuban National Team and the Baltimore Orioles was carried live on -# the Orioles Radio Network, including affiliate WTOP in Washington, DC. -# During the game, play-by-play announcer Jim Hunter noted that -# "We'll be losing two hours of sleep...Cuba switched to Daylight Saving -# Time today." (The "two hour" remark referred to losing one hour of -# sleep on 1999-03-28 - when the announcers were in Cuba as it switched -# to DST - and one more hour on 1999-04-04 - when the announcers will have -# returned to Baltimore, which switches on that date.) - -# From Steffen Thorsen (2013-11-11): -# DST start in Cuba in 2004 ... does not follow the same rules as the -# years before. The correct date should be Sunday 2004-03-28 00:00 ... -# https://web.archive.org/web/20040402060750/http://www.granma.cu/espanol/2004/marzo/sab27/reloj.html - -# From Evert van der Veer via Steffen Thorsen (2004-10-28): -# Cuba is not going back to standard time this year. -# From Paul Eggert (2006-03-22): -# http://www.granma.cu/ingles/2004/septiembre/juev30/41medid-i.html -# says that it's due to a problem at the Antonio Guiteras -# thermoelectric plant, and says "This October there will be no return -# to normal hours (after daylight saving time)". -# For now, let's assume that it's a temporary measure. - -# From Carlos A. Carnero Delgado (2005-11-12): -# This year (just like in 2004-2005) there's no change in time zone -# adjustment in Cuba. We will stay in daylight saving time: -# http://www.granma.cu/espanol/2005/noviembre/mier9/horario.html - -# From Jesper Nørgaard Welen (2006-10-21): -# An article in GRANMA INTERNACIONAL claims that Cuba will end -# the 3 years of permanent DST next weekend, see -# http://www.granma.cu/ingles/2006/octubre/lun16/43horario.html -# "On Saturday night, October 28 going into Sunday, October 29, at 01:00, -# watches should be set back one hour - going back to 00:00 hours - returning -# to the normal schedule.... - -# From Paul Eggert (2007-03-02): -# , dated yesterday, -# says Cuban clocks will advance at midnight on March 10. -# For lack of better information, assume Cuba will use US rules, -# except that it switches at midnight standard time as usual. -# -# From Steffen Thorsen (2007-10-25): -# Carlos Alberto Fonseca Arauz informed me that Cuba will end DST one week -# earlier - on the last Sunday of October, just like in 2006. -# -# He supplied these references: -# -# http://www.prensalatina.com.mx/article.asp?ID=%7B4CC32C1B-A9F7-42FB-8A07-8631AFC923AF%7D&language=ES -# http://actualidad.terra.es/sociedad/articulo/cuba_llama_ahorrar_energia_cambio_1957044.htm -# -# From Alex Krivenyshev (2007-10-25): -# Here is also article from Granma (Cuba): -# -# Regirá el Horario Normal desde el próximo domingo 28 de octubre -# http://www.granma.cubaweb.cu/2007/10/24/nacional/artic07.html -# -# http://www.worldtimezone.com/dst_news/dst_news_cuba03.html - -# From Arthur David Olson (2008-03-09): -# I'm in Maryland which is now observing United States Eastern Daylight -# Time. At 9:44 local time I used RealPlayer to listen to -# http://media.enet.cu/radioreloj -# a Cuban information station, and heard -# the time announced as "ocho cuarenta y cuatro" ("eight forty-four"), -# indicating that Cuba is still on standard time. - -# From Steffen Thorsen (2008-03-12): -# It seems that Cuba will start DST on Sunday, 2007-03-16... -# It was announced yesterday, according to this source (in Spanish): -# http://www.nnc.cubaweb.cu/marzo-2008/cien-1-11-3-08.htm -# -# Some more background information is posted here: -# http://www.timeanddate.com/news/time/cuba-starts-dst-march-16.html -# -# The article also says that Cuba has been observing DST since 1963, -# while Shanks (and tzdata) has 1965 as the first date (except in the -# 1940's). Many other web pages in Cuba also claim that it has been -# observed since 1963, but with the exception of 1970 - an exception -# which is not present in tzdata/Shanks. So there is a chance we need to -# change some historic records as well. -# -# One example: -# http://www.radiohc.cu/espanol/noticias/mar07/11mar/hor.htm - -# From Jesper Nørgaard Welen (2008-03-13): -# The Cuban time change has just been confirmed on the most authoritative -# web site, the Granma. Please check out -# http://www.granma.cubaweb.cu/2008/03/13/nacional/artic10.html -# -# Basically as expected after Steffen Thorsen's information, the change -# will take place midnight between Saturday and Sunday. - -# From Arthur David Olson (2008-03-12): -# Assume Sun>=15 (third Sunday) going forward. - -# From Alexander Krivenyshev (2009-03-04) -# According to the Radio Reloj - Cuba will start Daylight Saving Time on -# midnight between Saturday, March 07, 2009 and Sunday, March 08, 2009- -# not on midnight March 14 / March 15 as previously thought. -# -# http://www.worldtimezone.com/dst_news/dst_news_cuba05.html -# (in Spanish) - -# From Arthur David Olson (2009-03-09) -# I listened over the Internet to -# http://media.enet.cu/readioreloj -# this morning; when it was 10:05 a. m. here in Bethesda, Maryland the -# the time was announced as "diez cinco" - the same time as here, indicating -# that has indeed switched to DST. Assume second Sunday from 2009 forward. - -# From Steffen Thorsen (2011-03-08): -# Granma announced that Cuba is going to start DST on 2011-03-20 00:00:00 -# this year. Nothing about the end date known so far (if that has -# changed at all). -# -# Source: -# http://granma.co.cu/2011/03/08/nacional/artic01.html -# -# Our info: -# http://www.timeanddate.com/news/time/cuba-starts-dst-2011.html -# -# From Steffen Thorsen (2011-10-30) -# Cuba will end DST two weeks later this year. Instead of going back -# tonight, it has been delayed to 2011-11-13 at 01:00. -# -# One source (Spanish) -# http://www.radioangulo.cu/noticias/cuba/17105-cuba-restablecera-el-horario-del-meridiano-de-greenwich.html -# -# Our page: -# http://www.timeanddate.com/news/time/cuba-time-changes-2011.html -# -# From Steffen Thorsen (2012-03-01) -# According to Radio Reloj, Cuba will start DST on Midnight between March -# 31 and April 1. -# -# Radio Reloj has the following info (Spanish): -# http://www.radioreloj.cu/index.php/noticias-radio-reloj/71-miscelaneas/7529-cuba-aplicara-el-horario-de-verano-desde-el-1-de-abril -# -# Our info on it: -# http://www.timeanddate.com/news/time/cuba-starts-dst-2012.html - -# From Steffen Thorsen (2012-11-03): -# Radio Reloj and many other sources report that Cuba is changing back -# to standard time on 2012-11-04: -# http://www.radioreloj.cu/index.php/noticias-radio-reloj/36-nacionales/9961-regira-horario-normal-en-cuba-desde-el-domingo-cuatro-de-noviembre -# From Paul Eggert (2012-11-03): -# For now, assume the future rule is first Sunday in November. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Cuba 1928 only - Jun 10 0:00 1:00 D -Rule Cuba 1928 only - Oct 10 0:00 0 S -Rule Cuba 1940 1942 - Jun Sun>=1 0:00 1:00 D -Rule Cuba 1940 1942 - Sep Sun>=1 0:00 0 S -Rule Cuba 1945 1946 - Jun Sun>=1 0:00 1:00 D -Rule Cuba 1945 1946 - Sep Sun>=1 0:00 0 S -Rule Cuba 1965 only - Jun 1 0:00 1:00 D -Rule Cuba 1965 only - Sep 30 0:00 0 S -Rule Cuba 1966 only - May 29 0:00 1:00 D -Rule Cuba 1966 only - Oct 2 0:00 0 S -Rule Cuba 1967 only - Apr 8 0:00 1:00 D -Rule Cuba 1967 1968 - Sep Sun>=8 0:00 0 S -Rule Cuba 1968 only - Apr 14 0:00 1:00 D -Rule Cuba 1969 1977 - Apr lastSun 0:00 1:00 D -Rule Cuba 1969 1971 - Oct lastSun 0:00 0 S -Rule Cuba 1972 1974 - Oct 8 0:00 0 S -Rule Cuba 1975 1977 - Oct lastSun 0:00 0 S -Rule Cuba 1978 only - May 7 0:00 1:00 D -Rule Cuba 1978 1990 - Oct Sun>=8 0:00 0 S -Rule Cuba 1979 1980 - Mar Sun>=15 0:00 1:00 D -Rule Cuba 1981 1985 - May Sun>=5 0:00 1:00 D -Rule Cuba 1986 1989 - Mar Sun>=14 0:00 1:00 D -Rule Cuba 1990 1997 - Apr Sun>=1 0:00 1:00 D -Rule Cuba 1991 1995 - Oct Sun>=8 0:00s 0 S -Rule Cuba 1996 only - Oct 6 0:00s 0 S -Rule Cuba 1997 only - Oct 12 0:00s 0 S -Rule Cuba 1998 1999 - Mar lastSun 0:00s 1:00 D -Rule Cuba 1998 2003 - Oct lastSun 0:00s 0 S -Rule Cuba 2000 2003 - Apr Sun>=1 0:00s 1:00 D -Rule Cuba 2004 only - Mar lastSun 0:00s 1:00 D -Rule Cuba 2006 2010 - Oct lastSun 0:00s 0 S -Rule Cuba 2007 only - Mar Sun>=8 0:00s 1:00 D -Rule Cuba 2008 only - Mar Sun>=15 0:00s 1:00 D -Rule Cuba 2009 2010 - Mar Sun>=8 0:00s 1:00 D -Rule Cuba 2011 only - Mar Sun>=15 0:00s 1:00 D -Rule Cuba 2011 only - Nov 13 0:00s 0 S -Rule Cuba 2012 only - Apr 1 0:00s 1:00 D -Rule Cuba 2012 max - Nov Sun>=1 0:00s 0 S -Rule Cuba 2013 max - Mar Sun>=8 0:00s 1:00 D - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Havana -5:29:28 - LMT 1890 - -5:29:36 - HMT 1925 Jul 19 12:00 # Havana MT - -5:00 Cuba C%sT - -# Dominica -# See America/Port_of_Spain. - -# Dominican Republic - -# From Steffen Thorsen (2000-10-30): -# Enrique Morales reported to me that the Dominican Republic has changed the -# time zone to Eastern Standard Time as of Sunday 29 at 2 am.... -# http://www.listin.com.do/antes/261000/republica/princi.html - -# From Paul Eggert (2000-12-04): -# That URL (2000-10-26, in Spanish) says they planned to use US-style DST. - -# From Rives McDow (2000-12-01): -# Dominican Republic changed its mind and presidential decree on Tuesday, -# November 28, 2000, with a new decree. On Sunday, December 3 at 1:00 AM the -# Dominican Republic will be reverting to 8 hours from the International Date -# Line, and will not be using DST in the foreseeable future. The reason they -# decided to use DST was to be in synch with Puerto Rico, who was also going -# to implement DST. When Puerto Rico didn't implement DST, the president -# decided to revert. - - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule DR 1966 only - Oct 30 0:00 1:00 EDT -Rule DR 1967 only - Feb 28 0:00 0 EST -Rule DR 1969 1973 - Oct lastSun 0:00 0:30 -0430 -Rule DR 1970 only - Feb 21 0:00 0 EST -Rule DR 1971 only - Jan 20 0:00 0 EST -Rule DR 1972 1974 - Jan 21 0:00 0 EST -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Santo_Domingo -4:39:36 - LMT 1890 - -4:40 - SDMT 1933 Apr 1 12:00 # S. Dom. MT - -5:00 DR %s 1974 Oct 27 - -4:00 - AST 2000 Oct 29 2:00 - -5:00 US E%sT 2000 Dec 3 1:00 - -4:00 - AST - -# El Salvador - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Salv 1987 1988 - May Sun>=1 0:00 1:00 D -Rule Salv 1987 1988 - Sep lastSun 0:00 0 S -# There are too many San Salvadors elsewhere, so use America/El_Salvador -# instead of America/San_Salvador. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/El_Salvador -5:56:48 - LMT 1921 # San Salvador - -6:00 Salv C%sT - -# Grenada -# Guadeloupe -# St Barthélemy -# St Martin (French part) -# See America/Port_of_Spain. - -# Guatemala -# -# From Gwillim Law (2006-04-22), after a heads-up from Oscar van Vlijmen: -# Diario Co Latino, at -# , -# says in an article dated 2006-04-19 that the Guatemalan government had -# decided on that date to advance official time by 60 minutes, to lessen the -# impact of the elevated cost of oil.... Daylight saving time will last from -# 2006-04-29 24:00 (Guatemalan standard time) to 2006-09-30 (time unspecified). -# From Paul Eggert (2006-06-22): -# The Ministry of Energy and Mines, press release CP-15/2006 -# (2006-04-19), says DST ends at 24:00. See -# http://www.sieca.org.gt/Sitio_publico/Energeticos/Doc/Medidas/Cambio_Horario_Nac_190406.pdf - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Guat 1973 only - Nov 25 0:00 1:00 D -Rule Guat 1974 only - Feb 24 0:00 0 S -Rule Guat 1983 only - May 21 0:00 1:00 D -Rule Guat 1983 only - Sep 22 0:00 0 S -Rule Guat 1991 only - Mar 23 0:00 1:00 D -Rule Guat 1991 only - Sep 7 0:00 0 S -Rule Guat 2006 only - Apr 30 0:00 1:00 D -Rule Guat 2006 only - Oct 1 0:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Guatemala -6:02:04 - LMT 1918 Oct 5 - -6:00 Guat C%sT - -# Haiti -# From Gwillim Law (2005-04-15): -# Risto O. Nykänen wrote me that Haiti is now on DST. -# I searched for confirmation, and I found a press release -# on the Web page of the Haitian Consulate in Chicago (2005-03-31), -# . Translated from French, it says: -# -# "The Prime Minister's Communication Office notifies the public in general -# and the press in particular that, following a decision of the Interior -# Ministry and the Territorial Collectivities [I suppose that means the -# provinces], Haiti will move to Eastern Daylight Time in the night from next -# Saturday the 2nd to Sunday the 3rd. -# -# "Consequently, the Prime Minister's Communication Office wishes to inform -# the population that the country's clocks will be set forward one hour -# starting at midnight. This provision will hold until the last Saturday in -# October 2005. -# -# "Port-au-Prince, March 31, 2005" -# -# From Steffen Thorsen (2006-04-04): -# I have been informed by users that Haiti observes DST this year like -# last year, so the current "only" rule for 2005 might be changed to a -# "max" rule or to last until 2006. (Who knows if they will observe DST -# next year or if they will extend their DST like US/Canada next year). -# -# I have found this article about it (in French): -# http://www.haitipressnetwork.com/news.cfm?articleID=7612 -# -# The reason seems to be an energy crisis. - -# From Stephen Colebourne (2007-02-22): -# Some IATA info: Haiti won't be having DST in 2007. - -# From Steffen Thorsen (2012-03-11): -# According to several news sources, Haiti will observe DST this year, -# apparently using the same start and end date as USA/Canada. -# So this means they have already changed their time. -# -# http://www.alterpresse.org/spip.php?article12510 -# http://radiovision2000haiti.net/home/?p=13253 -# -# From Arthur David Olson (2012-03-11): -# The alterpresse.org source seems to show a US-style leap from 2:00 a.m. to -# 3:00 a.m. rather than the traditional Haitian jump at midnight. -# Assume a US-style fall back as well. - -# From Steffen Thorsen (2013-03-10): -# It appears that Haiti is observing DST this year as well, same rules -# as US/Canada. They did it last year as well, and it looks like they -# are going to observe DST every year now... -# -# http://radiovision2000haiti.net/public/haiti-avis-changement-dheure-dimanche/ -# http://www.canalplushaiti.net/?p=6714 - -# From Steffen Thorsen (2016-03-12): -# Jean Antoine, editor of www.haiti-reference.com informed us that Haiti -# are not going on DST this year. Several other resources confirm this: ... -# http://www.radiotelevisioncaraibes.com/presse/heure_d_t_pas_de_changement_d_heure_pr_vu_pour_cet_ann_e.html -# http://www.vantbefinfo.com/changement-dheure-pas-pour-haiti/ -# http://news.anmwe.com/haiti-lheure-nationale-ne-sera-ni-avancee-ni-reculee-cette-annee/ - -# From Steffen Thorsen (2017-03-12): -# We have received 4 mails from different people telling that Haiti -# has started DST again today, and this source seems to confirm that, -# I have not been able to find a more authoritative source: -# https://www.haitilibre.com/en/news-20319-haiti-notices-time-change-in-haiti.html - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Haiti 1983 only - May 8 0:00 1:00 D -Rule Haiti 1984 1987 - Apr lastSun 0:00 1:00 D -Rule Haiti 1983 1987 - Oct lastSun 0:00 0 S -# Shanks & Pottenger say AT is 2:00, but IATA SSIM (1991/1997) says 1:00s. -# Go with IATA. -Rule Haiti 1988 1997 - Apr Sun>=1 1:00s 1:00 D -Rule Haiti 1988 1997 - Oct lastSun 1:00s 0 S -Rule Haiti 2005 2006 - Apr Sun>=1 0:00 1:00 D -Rule Haiti 2005 2006 - Oct lastSun 0:00 0 S -Rule Haiti 2012 2015 - Mar Sun>=8 2:00 1:00 D -Rule Haiti 2012 2015 - Nov Sun>=1 2:00 0 S -Rule Haiti 2017 max - Mar Sun>=8 2:00 1:00 D -Rule Haiti 2017 max - Nov Sun>=1 2:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Port-au-Prince -4:49:20 - LMT 1890 - -4:49 - PPMT 1917 Jan 24 12:00 # P-a-P MT - -5:00 Haiti E%sT - -# Honduras -# Shanks & Pottenger say 1921 Jan 1; go with Whitman's more precise Apr 1. - -# From Paul Eggert (2006-05-05): -# worldtimezone.com reports a 2006-05-02 Spanish-language AP article -# saying Honduras will start using DST midnight Saturday, effective 4 -# months until September. La Tribuna reported today -# that Manuel Zelaya, the president -# of Honduras, refused to back down on this. - -# From Jesper Nørgaard Welen (2006-08-08): -# It seems that Honduras has returned from DST to standard time this Monday at -# 00:00 hours (prolonging Sunday to 25 hours duration). -# http://www.worldtimezone.com/dst_news/dst_news_honduras04.html - -# From Paul Eggert (2006-08-08): -# Also see Diario El Heraldo, The country returns to standard time (2006-08-08). -# http://www.elheraldo.hn/nota.php?nid=54941&sec=12 -# It mentions executive decree 18-2006. - -# From Steffen Thorsen (2006-08-17): -# Honduras will observe DST from 2007 to 2009, exact dates are not -# published, I have located this authoritative source: -# http://www.presidencia.gob.hn/noticia.aspx?nId=47 - -# From Steffen Thorsen (2007-03-30): -# http://www.laprensahn.com/pais_nota.php?id04962=7386 -# So it seems that Honduras will not enter DST this year.... - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Hond 1987 1988 - May Sun>=1 0:00 1:00 D -Rule Hond 1987 1988 - Sep lastSun 0:00 0 S -Rule Hond 2006 only - May Sun>=1 0:00 1:00 D -Rule Hond 2006 only - Aug Mon>=1 0:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Tegucigalpa -5:48:52 - LMT 1921 Apr - -6:00 Hond C%sT -# -# Great Swan I ceded by US to Honduras in 1972 - -# Jamaica -# Shanks & Pottenger give -5:07:12, but Milne records -5:07:10.41 from an -# unspecified official document, and says "This time is used throughout the -# island". Go with Milne. Round to the nearest second as required by zic. -# -# Shanks & Pottenger give April 28 for the 1974 spring-forward transition, but -# Lance Neita writes that Prime Minister Michael Manley decreed it January 5. -# Assume Neita meant Jan 6 02:00, the same as the US. Neita also writes that -# Manley's supporters associated this act with Manley's nickname "Joshua" -# (recall that in the Bible the sun stood still at Joshua's request), -# and with the Rod of Correction which Manley said he had received from -# Haile Selassie, Emperor of Ethiopia. See: -# Neita L. The politician in all of us. Jamaica Observer 2014-09-20 -# http://www.jamaicaobserver.com/columns/The-politician-in-all-of-us_17573647 -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Jamaica -5:07:11 - LMT 1890 # Kingston - -5:07:11 - KMT 1912 Feb # Kingston Mean Time - -5:00 - EST 1974 - -5:00 US E%sT 1984 - -5:00 - EST - -# Martinique -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Martinique -4:04:20 - LMT 1890 # Fort-de-France - -4:04:20 - FFMT 1911 May # Fort-de-France MT - -4:00 - AST 1980 Apr 6 - -4:00 1:00 ADT 1980 Sep 28 - -4:00 - AST - -# Montserrat -# See America/Port_of_Spain. - -# Nicaragua -# -# This uses Shanks & Pottenger for times before 2005. -# -# From Steffen Thorsen (2005-04-12): -# I've got reports from 8 different people that Nicaragua just started -# DST on Sunday 2005-04-10, in order to save energy because of -# expensive petroleum. The exact end date for DST is not yet -# announced, only "September" but some sites also say "mid-September". -# Some background information is available on the President's official site: -# http://www.presidencia.gob.ni/Presidencia/Files_index/Secretaria/Notas%20de%20Prensa/Presidente/2005/ABRIL/Gobierno-de-nicaragua-adelanta-hora-oficial-06abril.htm -# The Decree, no 23-2005 is available here: -# http://www.presidencia.gob.ni/buscador_gaceta/BD/DECRETOS/2005/Decreto%2023-2005%20Se%20adelanta%20en%20una%20hora%20en%20todo%20el%20territorio%20nacional%20apartir%20de%20las%2024horas%20del%2009%20de%20Abril.pdf -# -# From Paul Eggert (2005-05-01): -# The decree doesn't say anything about daylight saving, but for now let's -# assume that it is daylight saving.... -# -# From Gwillim Law (2005-04-21): -# The Associated Press story on the time change, which can be found at -# http://www.lapalmainteractivo.com/guias/content/gen/ap/America_Latina/AMC_GEN_NICARAGUA_HORA.html -# and elsewhere, says (fifth paragraph, translated from Spanish): "The last -# time that a change of clocks was applied to save energy was in the year 2000 -# during the Arnoldo Alemán administration."... -# The northamerica file says that Nicaragua has been on UTC-6 continuously -# since December 1998. I wasn't able to find any details of Nicaraguan time -# changes in 2000. Perhaps a note could be added to the northamerica file, to -# the effect that we have indirect evidence that DST was observed in 2000. -# -# From Jesper Nørgaard Welen (2005-11-02): -# Nicaragua left DST the 2005-10-02 at 00:00 (local time). -# http://www.presidencia.gob.ni/presidencia/files_index/secretaria/comunicados/2005/septiembre/26septiembre-cambio-hora.htm -# (2005-09-26) -# -# From Jesper Nørgaard Welen (2006-05-05): -# http://www.elnuevodiario.com.ni/2006/05/01/nacionales/18410 -# (my informal translation) -# By order of the president of the republic, Enrique Bolaños, Nicaragua -# advanced by sixty minutes their official time, yesterday at 2 in the -# morning, and will stay that way until 30th of September. -# -# From Jesper Nørgaard Welen (2006-09-30): -# http://www.presidencia.gob.ni/buscador_gaceta/BD/DECRETOS/2006/D-063-2006P-PRN-Cambio-Hora.pdf -# My informal translation runs: -# The natural sun time is restored in all the national territory, in that the -# time is returned one hour at 01:00 am of October 1 of 2006. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Nic 1979 1980 - Mar Sun>=16 0:00 1:00 D -Rule Nic 1979 1980 - Jun Mon>=23 0:00 0 S -Rule Nic 2005 only - Apr 10 0:00 1:00 D -Rule Nic 2005 only - Oct Sun>=1 0:00 0 S -Rule Nic 2006 only - Apr 30 2:00 1:00 D -Rule Nic 2006 only - Oct Sun>=1 1:00 0 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Managua -5:45:08 - LMT 1890 - -5:45:12 - MMT 1934 Jun 23 # Managua Mean Time? - -6:00 - CST 1973 May - -5:00 - EST 1975 Feb 16 - -6:00 Nic C%sT 1992 Jan 1 4:00 - -5:00 - EST 1992 Sep 24 - -6:00 - CST 1993 - -5:00 - EST 1997 - -6:00 Nic C%sT - -# Panama -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Panama -5:18:08 - LMT 1890 - -5:19:36 - CMT 1908 Apr 22 # Colón Mean Time - -5:00 - EST -Link America/Panama America/Cayman - -# Puerto Rico -# There are too many San Juans elsewhere, so we'll use 'Puerto_Rico'. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Puerto_Rico -4:24:25 - LMT 1899 Mar 28 12:00 # San Juan - -4:00 - AST 1942 May 3 - -4:00 US A%sT 1946 - -4:00 - AST - -# St Kitts-Nevis -# St Lucia -# See America/Port_of_Spain. - -# St Pierre and Miquelon -# There are too many St Pierres elsewhere, so we'll use 'Miquelon'. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Miquelon -3:44:40 - LMT 1911 May 15 # St Pierre - -4:00 - AST 1980 May - -3:00 - -03 1987 - -3:00 Canada -03/-02 - -# St Vincent and the Grenadines -# See America/Port_of_Spain. - -# Turks and Caicos -# -# From Chris Dunn in -# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=415007 -# (2007-03-15): In the Turks & Caicos Islands (America/Grand_Turk) the -# daylight saving dates for time changes have been adjusted to match -# the recent U.S. change of dates. -# -# From Brian Inglis (2007-04-28): -# http://www.turksandcaicos.tc/calendar/index.htm [2007-04-26] -# there is an entry for Nov 4 "Daylight Savings Time Ends 2007" and three -# rows before that there is an out of date entry for Oct: -# "Eastern Standard Times Begins 2007 -# Clocks are set back one hour at 2:00 a.m. local Daylight Saving Time" -# indicating that the normal ET rules are followed. -# -# From Paul Eggert (2014-08-19): -# The 2014-08-13 Cabinet meeting decided to stay on UT -04 year-round. See: -# http://tcweeklynews.com/daylight-savings-time-to-be-maintained-p5353-127.htm -# Model this as a switch from EST/EDT to AST ... -# From Chris Walton (2014-11-04): -# ... the TCI government appears to have delayed the switch to -# "permanent daylight saving time" by one year.... -# http://tcweeklynews.com/time-change-to-go-ahead-this-november-p5437-127.htm -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Grand_Turk -4:44:32 - LMT 1890 - -5:07:11 - KMT 1912 Feb # Kingston Mean Time - -5:00 - EST 1979 - -5:00 US E%sT 2015 Nov Sun>=1 2:00 - -4:00 - AST - -# British Virgin Is -# Virgin Is -# See America/Port_of_Spain. - - -# Local Variables: -# coding: utf-8 -# End: diff --git a/src/timezone/data/pacificnew b/src/timezone/data/pacificnew deleted file mode 100644 index 734943486b..0000000000 --- a/src/timezone/data/pacificnew +++ /dev/null @@ -1,27 +0,0 @@ -# This file is in the public domain, so clarified as of -# 2009-05-17 by Arthur David Olson. - -# From Arthur David Olson (1989-04-05): -# On 1989-04-05, the U. S. House of Representatives passed (238-154) a bill -# establishing "Pacific Presidential Election Time"; it was not acted on -# by the Senate or signed into law by the President. -# You might want to change the "PE" (Presidential Election) below to -# "Q" (Quadrennial) to maintain three-character zone abbreviations. -# If you're really conservative, you might want to change it to "D". -# Avoid "L" (Leap Year), which won't be true in 2100. - -# If Presidential Election Time is ever established, replace "XXXX" below -# with the year the law takes effect and uncomment the "##" lines. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -## Rule Twilite XXXX max - Apr Sun>=1 2:00 1:00 D -## Rule Twilite XXXX max uspres Oct lastSun 2:00 1:00 PE -## Rule Twilite XXXX max uspres Nov Sun>=7 2:00 0 S -## Rule Twilite XXXX max nonpres Oct lastSun 2:00 0 S - -# Zone NAME GMTOFF RULES/SAVE FORMAT [UNTIL] -## Zone America/Los_Angeles-PET -8:00 US P%sT XXXX -## -8:00 Twilite P%sT - -# For now... -Link America/Los_Angeles US/Pacific-New ## diff --git a/src/timezone/data/southamerica b/src/timezone/data/southamerica deleted file mode 100644 index 6038c3b65c..0000000000 --- a/src/timezone/data/southamerica +++ /dev/null @@ -1,1793 +0,0 @@ -# This file is in the public domain, so clarified as of -# 2009-05-17 by Arthur David Olson. - -# This file is by no means authoritative; if you think you know better, -# go ahead and edit the file (and please send any changes to -# tz@iana.org for general use in the future). For more, please see -# the file CONTRIBUTING in the tz distribution. - -# From Paul Eggert (2016-12-05): -# -# Unless otherwise specified, the source for data through 1990 is: -# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition), -# San Diego: ACS Publications, Inc. (2003). -# Unfortunately this book contains many errors and cites no sources. -# -# Many years ago Gwillim Law wrote that a good source -# for time zone data was the International Air Transport -# Association's Standard Schedules Information Manual (IATA SSIM), -# published semiannually. Law sent in several helpful summaries -# of the IATA's data after 1990. Except where otherwise noted, -# IATA SSIM is the source for entries after 1990. -# -# For data circa 1899, a common source is: -# Milne J. Civil time. Geogr J. 1899 Feb;13(2):173-94. -# http://www.jstor.org/stable/1774359 -# -# These tables use numeric abbreviations like -03 and -0330 for -# integer hour and minute UTC offsets. Although earlier editions used -# alphabetic time zone abbreviations, these abbreviations were -# invented and did not reflect common practice. - -############################################################################### - -############################################################################### - -# Argentina - -# From Bob Devine (1988-01-28): -# Argentina: first Sunday in October to first Sunday in April since 1976. -# Double Summer time from 1969 to 1974. Switches at midnight. - -# From U. S. Naval Observatory (1988-01-19): -# ARGENTINA 3 H BEHIND UTC - -# From Hernan G. Otero (1995-06-26): -# I am sending modifications to the Argentine time zone table... -# AR was chosen because they are the ISO letters that represent Argentina. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Arg 1930 only - Dec 1 0:00 1:00 S -Rule Arg 1931 only - Apr 1 0:00 0 - -Rule Arg 1931 only - Oct 15 0:00 1:00 S -Rule Arg 1932 1940 - Mar 1 0:00 0 - -Rule Arg 1932 1939 - Nov 1 0:00 1:00 S -Rule Arg 1940 only - Jul 1 0:00 1:00 S -Rule Arg 1941 only - Jun 15 0:00 0 - -Rule Arg 1941 only - Oct 15 0:00 1:00 S -Rule Arg 1943 only - Aug 1 0:00 0 - -Rule Arg 1943 only - Oct 15 0:00 1:00 S -Rule Arg 1946 only - Mar 1 0:00 0 - -Rule Arg 1946 only - Oct 1 0:00 1:00 S -Rule Arg 1963 only - Oct 1 0:00 0 - -Rule Arg 1963 only - Dec 15 0:00 1:00 S -Rule Arg 1964 1966 - Mar 1 0:00 0 - -Rule Arg 1964 1966 - Oct 15 0:00 1:00 S -Rule Arg 1967 only - Apr 2 0:00 0 - -Rule Arg 1967 1968 - Oct Sun>=1 0:00 1:00 S -Rule Arg 1968 1969 - Apr Sun>=1 0:00 0 - -Rule Arg 1974 only - Jan 23 0:00 1:00 S -Rule Arg 1974 only - May 1 0:00 0 - -Rule Arg 1988 only - Dec 1 0:00 1:00 S -# -# From Hernan G. Otero (1995-06-26): -# These corrections were contributed by InterSoft Argentina S.A., -# obtaining the data from the: -# Talleres de Hidrografía Naval Argentina -# (Argentine Naval Hydrography Institute) -Rule Arg 1989 1993 - Mar Sun>=1 0:00 0 - -Rule Arg 1989 1992 - Oct Sun>=15 0:00 1:00 S -# -# From Hernan G. Otero (1995-06-26): -# From this moment on, the law that mandated the daylight saving -# time corrections was derogated and no more modifications -# to the time zones (for daylight saving) are now made. -# -# From Rives McDow (2000-01-10): -# On October 3, 1999, 0:00 local, Argentina implemented daylight savings time, -# which did not result in the switch of a time zone, as they stayed 9 hours -# from the International Date Line. -Rule Arg 1999 only - Oct Sun>=1 0:00 1:00 S -# From Paul Eggert (2007-12-28): -# DST was set to expire on March 5, not March 3, but since it was converted -# to standard time on March 3 it's more convenient for us to pretend that -# it ended on March 3. -Rule Arg 2000 only - Mar 3 0:00 0 - -# -# From Peter Gradelski via Steffen Thorsen (2000-03-01): -# We just checked with our São Paulo office and they say the government of -# Argentina decided not to become one of the countries that go on or off DST. -# So Buenos Aires should be -3 hours from GMT at all times. -# -# From Fabián L. Arce Jofré (2000-04-04): -# The law that claimed DST for Argentina was derogated by President Fernando -# de la Rúa on March 2, 2000, because it would make people spend more energy -# in the winter time, rather than less. The change took effect on March 3. -# -# From Mariano Absatz (2001-06-06): -# one of the major newspapers here in Argentina said that the 1999 -# Timezone Law (which never was effectively applied) will (would?) be -# in effect.... The article is at -# http://ar.clarin.com/diario/2001-06-06/e-01701.htm -# ... The Law itself is "Ley No. 25155", sanctioned on 1999-08-25, enacted -# 1999-09-17, and published 1999-09-21. The official publication is at: -# http://www.boletin.jus.gov.ar/BON/Primera/1999/09-Septiembre/21/PDF/BO21-09-99LEG.PDF -# Regretfully, you have to subscribe (and pay) for the on-line version.... -# -# (2001-06-12): -# the timezone for Argentina will not change next Sunday. -# Apparently it will do so on Sunday 24th.... -# http://ar.clarin.com/diario/2001-06-12/s-03501.htm -# -# (2001-06-25): -# Last Friday (yes, the last working day before the date of the change), the -# Senate annulled the 1999 law that introduced the changes later postponed. -# http://www.clarin.com.ar/diario/2001-06-22/s-03601.htm -# It remains the vote of the Deputies..., but it will be the same.... -# This kind of things had always been done this way in Argentina. -# We are still -03:00 all year round in all of the country. -# -# From Steffen Thorsen (2007-12-21): -# A user (Leonardo Chaim) reported that Argentina will adopt DST.... -# all of the country (all Zone-entries) are affected. News reports like -# http://www.lanacion.com.ar/opinion/nota.asp?nota_id=973037 indicate -# that Argentina will use DST next year as well, from October to -# March, although exact rules are not given. -# -# From Jesper Nørgaard Welen (2007-12-26) -# The last hurdle of Argentina DST is over, the proposal was approved in -# the lower chamber too (Diputados) with a vote 192 for and 2 against. -# By the way thanks to Mariano Absatz and Daniel Mario Vega for the link to -# the original scanned proposal, where the dates and the zero hours are -# clear and unambiguous...This is the article about final approval: -# http://www.lanacion.com.ar/politica/nota.asp?nota_id=973996 -# -# From Paul Eggert (2007-12-22): -# For dates after mid-2008, the following rules are my guesses and -# are quite possibly wrong, but are more likely than no DST at all. - -# From Alexander Krivenyshev (2008-09-05): -# As per message from Carlos Alberto Fonseca Arauz (Nicaragua), -# Argentina will start DST on Sunday October 19, 2008. -# -# http://www.worldtimezone.com/dst_news/dst_news_argentina03.html -# http://www.impulsobaires.com.ar/nota.php?id=57832 (in spanish) - -# From Juan Manuel Docile in https://bugs.gentoo.org/240339 (2008-10-07) -# via Rodrigo Severo: -# Argentinian law No. 25.155 is no longer valid. -# http://www.infoleg.gov.ar/infolegInternet/anexos/60000-64999/60036/norma.htm -# The new one is law No. 26.350 -# http://www.infoleg.gov.ar/infolegInternet/anexos/135000-139999/136191/norma.htm -# So there is no summer time in Argentina for now. - -# From Mariano Absatz (2008-10-20): -# Decree 1693/2008 applies Law 26.350 for the summer 2008/2009 establishing DST -# in Argentina from 2008-10-19 until 2009-03-15. -# http://www.boletinoficial.gov.ar/Bora.Portal/CustomControls/PdfContent.aspx?fp=16102008&pi=3&pf=4&s=0&sec=01 -# - -# Decree 1705/2008 excepting 12 Provinces from applying DST in the summer -# 2008/2009: Catamarca, La Rioja, Mendoza, Salta, San Juan, San Luis, La -# Pampa, Neuquén, Rio Negro, Chubut, Santa Cruz and Tierra del Fuego -# http://www.boletinoficial.gov.ar/Bora.Portal/CustomControls/PdfContent.aspx?fp=17102008&pi=1&pf=1&s=0&sec=01 -# -# Press release 235 dated Saturday October 18th, from the Government of the -# Province of Jujuy saying it will not apply DST either (even when it was not -# included in Decree 1705/2008). -# http://www.jujuy.gov.ar/index2/partes_prensa/18_10_08/235-181008.doc - -# From fullinet (2009-10-18): -# As announced in -# http://www.argentina.gob.ar/argentina/portal/paginas.dhtml?pagina=356 -# (an official .gob.ar) under title: "Sin Cambio de Hora" -# (English: "No hour change"). -# -# "Por el momento, el Gobierno Nacional resolvió no modificar la hora -# oficial, decisión que estaba en estudio para su implementación el -# domingo 18 de octubre. Desde el Ministerio de Planificación se anunció -# que la Argentina hoy, en estas condiciones meteorológicas, no necesita -# la modificación del huso horario, ya que 2009 nos encuentra con -# crecimiento en la producción y distribución energética." - -Rule Arg 2007 only - Dec 30 0:00 1:00 S -Rule Arg 2008 2009 - Mar Sun>=15 0:00 0 - -Rule Arg 2008 only - Oct Sun>=15 0:00 1:00 S - -# From Mariano Absatz (2004-05-21): -# Today it was officially published that the Province of Mendoza is changing -# its timezone this winter... starting tomorrow night.... -# http://www.gobernac.mendoza.gov.ar/boletin/pdf/20040521-27158-normas.pdf -# From Paul Eggert (2004-05-24): -# It's Law No. 7,210. This change is due to a public power emergency, so for -# now we'll assume it's for this year only. -# -# From Paul Eggert (2014-08-09): -# Hora de verano para la República Argentina -# http://buenasiembra.com.ar/esoterismo/astrologia/hora-de-verano-de-la-republica-argentina-27.html -# says that standard time in Argentina from 1894-10-31 -# to 1920-05-01 was -4:16:48.25. Go with this more-precise value -# over Shanks & Pottenger. -# -# From Mariano Absatz (2004-06-05): -# These media articles from a major newspaper mostly cover the current state: -# http://www.lanacion.com.ar/04/05/27/de_604825.asp -# http://www.lanacion.com.ar/04/05/28/de_605203.asp -# -# The following eight (8) provinces pulled clocks back to UTC-04:00 at -# midnight Monday May 31st. (that is, the night between 05/31 and 06/01). -# Apparently, all nine provinces would go back to UTC-03:00 at the same -# time in October 17th. -# -# Catamarca, Chubut, La Rioja, San Juan, San Luis, Santa Cruz, -# Tierra del Fuego, Tucumán. -# -# From Mariano Absatz (2004-06-14): -# ... this weekend, the Province of Tucumán decided it'd go back to UTC-03:00 -# yesterday midnight (that is, at 24:00 Saturday 12th), since the people's -# annoyance with the change is much higher than the power savings obtained.... -# -# From Gwillim Law (2004-06-14): -# http://www.lanacion.com.ar/04/06/10/de_609078.asp ... -# "The time change in Tierra del Fuego was a conflicted decision from -# the start. The government had decreed that the measure would take -# effect on June 1, but a normative error forced the new time to begin -# three days earlier, from a Saturday to a Sunday.... -# Our understanding was that the change was originally scheduled to take place -# on June 1 at 00:00 in Chubut, Santa Cruz, Tierra del Fuego (and some other -# provinces). Sunday was May 30, only two days earlier. So the article -# contains a contradiction. I would give more credence to the Saturday/Sunday -# date than the "three days earlier" phrase, and conclude that Tierra del -# Fuego set its clocks back at 2004-05-30 00:00. -# -# From Steffen Thorsen (2004-10-05): -# The previous law 7210 which changed the province of Mendoza's time zone -# back in May have been modified slightly in a new law 7277, which set the -# new end date to 2004-09-26 (original date was 2004-10-17). -# http://www.gobernac.mendoza.gov.ar/boletin/pdf/20040924-27244-normas.pdf -# -# From Mariano Absatz (2004-10-05): -# San Juan changed from UTC-03:00 to UTC-04:00 at midnight between -# Sunday, May 30th and Monday, May 31st. It changed back to UTC-03:00 -# at midnight between Saturday, July 24th and Sunday, July 25th.... -# http://www.sanjuan.gov.ar/prensa/archivo/000329.html -# http://www.sanjuan.gov.ar/prensa/archivo/000426.html -# http://www.sanjuan.gov.ar/prensa/archivo/000441.html - -# From Alex Krivenyshev (2008-01-17): -# Here are articles that Argentina Province San Luis is planning to end DST -# as earlier as upcoming Monday January 21, 2008 or February 2008: -# -# Provincia argentina retrasa reloj y marca diferencia con resto del país -# (Argentine Province delayed clock and mark difference with the rest of the -# country) -# http://cl.invertia.com/noticias/noticia.aspx?idNoticia=200801171849_EFE_ET4373&idtel -# -# Es inminente que en San Luis atrasen una hora los relojes -# (It is imminent in San Luis clocks one hour delay) -# http://www.lagaceta.com.ar/nota/253414/Economia/Es-inminente-que-en-San-Luis-atrasen-una-hora-los-relojes.html -# http://www.worldtimezone.net/dst_news/dst_news_argentina02.html - -# From Jesper Nørgaard Welen (2008-01-18): -# The page of the San Luis provincial government -# http://www.sanluis.gov.ar/notas.asp?idCanal=0&id=22812 -# confirms what Alex Krivenyshev has earlier sent to the tz -# emailing list about that San Luis plans to return to standard -# time much earlier than the rest of the country. It also -# confirms that upon request the provinces San Juan and Mendoza -# refused to follow San Luis in this change. -# -# The change is supposed to take place Monday the 21st at 0:00 -# hours. As far as I understand it if this goes ahead, we need -# a new timezone for San Luis (although there are also documented -# independent changes in the southamerica file of San Luis in -# 1990 and 1991 which has not been confirmed). - -# From Jesper Nørgaard Welen (2008-01-25): -# Unfortunately the below page has become defunct, about the San Luis -# time change. Perhaps because it now is part of a group of pages "Most -# important pages of 2008." -# -# You can use -# http://www.sanluis.gov.ar/notas.asp?idCanal=8141&id=22834 -# instead it seems. Or use "Buscador" from the main page of the San Luis -# government, and fill in "huso" and click OK, and you will get 3 pages -# from which the first one is identical to the above. - -# From Mariano Absatz (2008-01-28): -# I can confirm that the Province of San Luis (and so far only that -# province) decided to go back to UTC-3 effective midnight Jan 20th 2008 -# (that is, Monday 21st at 0:00 is the time the clocks were delayed back -# 1 hour), and they intend to keep UTC-3 as their timezone all year round -# (that is, unless they change their mind any minute now). -# -# So we'll have to add yet another city to 'southamerica' (I think San -# Luis city is the mos populated city in the Province, so it'd be -# America/Argentina/San_Luis... of course I can't remember if San Luis's -# history of particular changes goes along with Mendoza or San Juan :-( -# (I only remember not being able to collect hard facts about San Luis -# back in 2004, when these provinces changed to UTC-4 for a few days, I -# mailed them personally and never got an answer). - -# From Paul Eggert (2014-08-12): -# Unless otherwise specified, data entries are from Shanks & Pottenger through -# 1992, from the IATA otherwise. As noted below, Shanks & Pottenger say that -# America/Cordoba split into 6 subregions during 1991/1992, one of which -# was America/San_Luis, but we haven't verified this yet so for now we'll -# keep America/Cordoba a single region rather than splitting it into the -# other 5 subregions. - -# From Mariano Absatz (2009-03-13): -# Yesterday (with our usual 2-day notice) the Province of San Luis -# decided that next Sunday instead of "staying" @utc-03:00 they will go -# to utc-04:00 until the second Saturday in October... -# -# The press release is at -# http://www.sanluis.gov.ar/SL/Paginas/NoticiaDetalle.asp?TemaId=1&InfoPrensaId=3102 -# (I couldn't find the decree, but www.sanluis.gov.ar -# is the official page for the Province Government.) -# -# There's also a note in only one of the major national papers ... -# http://www.lanacion.com.ar/nota.asp?nota_id=1107912 -# -# The press release says [quick and dirty translation]: -# ... announced that next Sunday, at 00:00, Puntanos (the San Luis -# inhabitants) will have to turn back one hour their clocks -# -# Since then, San Luis will establish its own Province timezone. Thus, -# during 2009, this timezone change will run from 00:00 the third Sunday -# in March until 24:00 of the second Saturday in October. - -# From Mariano Absatz (2009-10-16): -# ...the Province of San Luis is a case in itself. -# -# The Law at -# http://www.diputadossanluis.gov.ar/diputadosasp/paginas/verNorma.asp?NormaID=276 -# is ambiguous because establishes a calendar from the 2nd Sunday in -# October at 0:00 thru the 2nd Saturday in March at 24:00 and the -# complement of that starting on the 2nd Sunday of March at 0:00 and -# ending on the 2nd Saturday of March at 24:00. -# -# This clearly breaks every time the 1st of March or October is a Sunday. -# -# IMHO, the "spirit of the Law" is to make the changes at 0:00 on the 2nd -# Sunday of October and March. -# -# The problem is that the changes in the rest of the Provinces that did -# change in 2007/2008, were made according to the Federal Law and Decrees -# that did so on the 3rd Sunday of October and March. -# -# In fact, San Luis actually switched from UTC-4 to UTC-3 last Sunday -# (October 11th) at 0:00. -# -# So I guess a new set of rules, besides "Arg", must be made and the last -# America/Argentina/San_Luis entries should change to use these... -# ... - -# From Alexander Krivenyshev (2010-04-09): -# According to news reports from El Diario de la República Province San -# Luis, Argentina (standard time UTC-04) will keep Daylight Saving Time -# after April 11, 2010 - will continue to have same time as rest of -# Argentina (UTC-3) (no DST). -# -# Confirmaron la prórroga del huso horario de verano (Spanish) -# http://www.eldiariodelarepublica.com/index.php?option=com_content&task=view&id=29383&Itemid=9 -# or (some English translation): -# http://www.worldtimezone.com/dst_news/dst_news_argentina08.html - -# From Mariano Absatz (2010-04-12): -# yes...I can confirm this...and given that San Luis keeps calling -# UTC-03:00 "summer time", we should't just let San Luis go back to "Arg" -# rules...San Luis is still using "Western ARgentina Time" and it got -# stuck on Summer daylight savings time even though the summer is over. - -# From Paul Eggert (2013-09-05): -# Perhaps San Luis operates on the legal fiction that it is at -04 -# with perpetual summer time, but ordinary usage typically seems to -# just say it's at -03; see, for example, -# http://es.wikipedia.org/wiki/Hora_oficial_argentina -# We've documented similar situations as being plain changes to -# standard time, so let's do that here too. This does not change UTC -# offsets, only tm_isdst and the time zone abbreviations. One minor -# plus is that this silences a zic complaint that there's no POSIX TZ -# setting for time stamps past 2038. - -# From Paul Eggert (2013-02-21): -# Milne says Córdoba time was -4:16:48.2. Round to the nearest second. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -# -# Buenos Aires (BA), Capital Federal (CF), -Zone America/Argentina/Buenos_Aires -3:53:48 - LMT 1894 Oct 31 - -4:16:48 - CMT 1920 May # Córdoba Mean Time - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1999 Oct 3 - -4:00 Arg -04/-03 2000 Mar 3 - -3:00 Arg -03/-02 -# -# Córdoba (CB), Santa Fe (SF), Entre Ríos (ER), Corrientes (CN), Misiones (MN), -# Chaco (CC), Formosa (FM), Santiago del Estero (SE) -# -# Shanks & Pottenger also make the following claims, which we haven't verified: -# - Formosa switched to -3:00 on 1991-01-07. -# - Misiones switched to -3:00 on 1990-12-29. -# - Chaco switched to -3:00 on 1991-01-04. -# - Santiago del Estero switched to -4:00 on 1991-04-01, -# then to -3:00 on 1991-04-26. -# -Zone America/Argentina/Cordoba -4:16:48 - LMT 1894 Oct 31 - -4:16:48 - CMT 1920 May - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1991 Mar 3 - -4:00 - -04 1991 Oct 20 - -3:00 Arg -03/-02 1999 Oct 3 - -4:00 Arg -04/-03 2000 Mar 3 - -3:00 Arg -03/-02 -# -# Salta (SA), La Pampa (LP), Neuquén (NQ), Rio Negro (RN) -Zone America/Argentina/Salta -4:21:40 - LMT 1894 Oct 31 - -4:16:48 - CMT 1920 May - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1991 Mar 3 - -4:00 - -04 1991 Oct 20 - -3:00 Arg -03/-02 1999 Oct 3 - -4:00 Arg -04/-03 2000 Mar 3 - -3:00 Arg -03/-02 2008 Oct 18 - -3:00 - -03 -# -# Tucumán (TM) -Zone America/Argentina/Tucuman -4:20:52 - LMT 1894 Oct 31 - -4:16:48 - CMT 1920 May - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1991 Mar 3 - -4:00 - -04 1991 Oct 20 - -3:00 Arg -03/-02 1999 Oct 3 - -4:00 Arg -04/-03 2000 Mar 3 - -3:00 - -03 2004 Jun 1 - -4:00 - -04 2004 Jun 13 - -3:00 Arg -03/-02 -# -# La Rioja (LR) -Zone America/Argentina/La_Rioja -4:27:24 - LMT 1894 Oct 31 - -4:16:48 - CMT 1920 May - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1991 Mar 1 - -4:00 - -04 1991 May 7 - -3:00 Arg -03/-02 1999 Oct 3 - -4:00 Arg -04/-03 2000 Mar 3 - -3:00 - -03 2004 Jun 1 - -4:00 - -04 2004 Jun 20 - -3:00 Arg -03/-02 2008 Oct 18 - -3:00 - -03 -# -# San Juan (SJ) -Zone America/Argentina/San_Juan -4:34:04 - LMT 1894 Oct 31 - -4:16:48 - CMT 1920 May - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1991 Mar 1 - -4:00 - -04 1991 May 7 - -3:00 Arg -03/-02 1999 Oct 3 - -4:00 Arg -04/-03 2000 Mar 3 - -3:00 - -03 2004 May 31 - -4:00 - -04 2004 Jul 25 - -3:00 Arg -03/-02 2008 Oct 18 - -3:00 - -03 -# -# Jujuy (JY) -Zone America/Argentina/Jujuy -4:21:12 - LMT 1894 Oct 31 - -4:16:48 - CMT 1920 May - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1990 Mar 4 - -4:00 - -04 1990 Oct 28 - -4:00 1:00 -03 1991 Mar 17 - -4:00 - -04 1991 Oct 6 - -3:00 1:00 -02 1992 - -3:00 Arg -03/-02 1999 Oct 3 - -4:00 Arg -04/-03 2000 Mar 3 - -3:00 Arg -03/-02 2008 Oct 18 - -3:00 - -03 -# -# Catamarca (CT), Chubut (CH) -Zone America/Argentina/Catamarca -4:23:08 - LMT 1894 Oct 31 - -4:16:48 - CMT 1920 May - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1991 Mar 3 - -4:00 - -04 1991 Oct 20 - -3:00 Arg -03/-02 1999 Oct 3 - -4:00 Arg -04/-03 2000 Mar 3 - -3:00 - -03 2004 Jun 1 - -4:00 - -04 2004 Jun 20 - -3:00 Arg -03/-02 2008 Oct 18 - -3:00 - -03 -# -# Mendoza (MZ) -Zone America/Argentina/Mendoza -4:35:16 - LMT 1894 Oct 31 - -4:16:48 - CMT 1920 May - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1990 Mar 4 - -4:00 - -04 1990 Oct 15 - -4:00 1:00 -03 1991 Mar 1 - -4:00 - -04 1991 Oct 15 - -4:00 1:00 -03 1992 Mar 1 - -4:00 - -04 1992 Oct 18 - -3:00 Arg -03/-02 1999 Oct 3 - -4:00 Arg -04/-03 2000 Mar 3 - -3:00 - -03 2004 May 23 - -4:00 - -04 2004 Sep 26 - -3:00 Arg -03/-02 2008 Oct 18 - -3:00 - -03 -# -# San Luis (SL) - -Rule SanLuis 2008 2009 - Mar Sun>=8 0:00 0 - -Rule SanLuis 2007 2008 - Oct Sun>=8 0:00 1:00 S - -Zone America/Argentina/San_Luis -4:25:24 - LMT 1894 Oct 31 - -4:16:48 - CMT 1920 May - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1990 - -3:00 1:00 -02 1990 Mar 14 - -4:00 - -04 1990 Oct 15 - -4:00 1:00 -03 1991 Mar 1 - -4:00 - -04 1991 Jun 1 - -3:00 - -03 1999 Oct 3 - -4:00 1:00 -03 2000 Mar 3 - -3:00 - -03 2004 May 31 - -4:00 - -04 2004 Jul 25 - -3:00 Arg -03/-02 2008 Jan 21 - -4:00 SanLuis -04/-03 2009 Oct 11 - -3:00 - -03 -# -# Santa Cruz (SC) -Zone America/Argentina/Rio_Gallegos -4:36:52 - LMT 1894 Oct 31 - -4:16:48 - CMT 1920 May - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1999 Oct 3 - -4:00 Arg -04/-03 2000 Mar 3 - -3:00 - -03 2004 Jun 1 - -4:00 - -04 2004 Jun 20 - -3:00 Arg -03/-02 2008 Oct 18 - -3:00 - -03 -# -# Tierra del Fuego, Antártida e Islas del Atlántico Sur (TF) -Zone America/Argentina/Ushuaia -4:33:12 - LMT 1894 Oct 31 - -4:16:48 - CMT 1920 May - -4:00 - -04 1930 Dec - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1999 Oct 3 - -4:00 Arg -04/-03 2000 Mar 3 - -3:00 - -03 2004 May 30 - -4:00 - -04 2004 Jun 20 - -3:00 Arg -03/-02 2008 Oct 18 - -3:00 - -03 - -# Aruba -Link America/Curacao America/Aruba - -# Bolivia -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/La_Paz -4:32:36 - LMT 1890 - -4:32:36 - CMT 1931 Oct 15 # Calamarca MT - -4:32:36 1:00 BOST 1932 Mar 21 # Bolivia ST - -4:00 - -04 - -# Brazil - -# From Paul Eggert (1993-11-18): -# The mayor of Rio recently attempted to change the time zone rules -# just in his city, in order to leave more summer time for the tourist trade. -# The rule change lasted only part of the day; -# the federal government refused to follow the city's rules, and business -# was in a chaos, so the mayor backed down that afternoon. - -# From IATA SSIM (1996-02): -# _Only_ the following states in BR1 observe DST: Rio Grande do Sul (RS), -# Santa Catarina (SC), Paraná (PR), São Paulo (SP), Rio de Janeiro (RJ), -# Espírito Santo (ES), Minas Gerais (MG), Bahia (BA), Goiás (GO), -# Distrito Federal (DF), Tocantins (TO), Sergipe [SE] and Alagoas [AL]. -# [The last three states are new to this issue of the IATA SSIM.] - -# From Gwillim Law (1996-10-07): -# Geography, history (Tocantins was part of Goiás until 1989), and other -# sources of time zone information lead me to believe that AL, SE, and TO were -# always in BR1, and so the only change was whether or not they observed DST.... -# The earliest issue of the SSIM I have is 2/91. Each issue from then until -# 9/95 says that DST is observed only in the ten states I quoted from 9/95, -# along with Mato Grosso (MT) and Mato Grosso do Sul (MS), which are in BR2 -# (UTC-4).... The other two time zones given for Brazil are BR3, which is -# UTC-5, no DST, and applies only in the state of Acre (AC); and BR4, which is -# UTC-2, and applies to Fernando de Noronha (formerly FN, but I believe it's -# become part of the state of Pernambuco). The boundary between BR1 and BR2 -# has never been clearly stated. They've simply been called East and West. -# However, some conclusions can be drawn from another IATA manual: the Airline -# Coding Directory, which lists close to 400 airports in Brazil. For each -# airport it gives a time zone which is coded to the SSIM. From that -# information, I'm led to conclude that the states of Amapá (AP), Ceará (CE), -# Maranhão (MA), Paraíba (PR), Pernambuco (PE), Piauí (PI), and Rio Grande do -# Norte (RN), and the eastern part of Pará (PA) are all in BR1 without DST. - -# From Marcos Tadeu (1998-09-27): -# Brazilian official page - -# From Jesper Nørgaard (2000-11-03): -# [For an official list of which regions in Brazil use which time zones, see:] -# http://pcdsh01.on.br/Fusbr.htm -# http://pcdsh01.on.br/Fusbrhv.htm - -# From Celso Doria via David Madeo (2002-10-09): -# The reason for the delay this year has to do with elections in Brazil. -# -# Unlike in the United States, elections in Brazil are 100% computerized and -# the results are known almost immediately. Yesterday, it was the first -# round of the elections when 115 million Brazilians voted for President, -# Governor, Senators, Federal Deputies, and State Deputies. Nobody is -# counting (or re-counting) votes anymore and we know there will be a second -# round for the Presidency and also for some Governors. The 2nd round will -# take place on October 27th. -# -# The reason why the DST will only begin November 3rd is that the thousands -# of electoral machines used cannot have their time changed, and since the -# Constitution says the elections must begin at 8:00 AM and end at 5:00 PM, -# the Government decided to postpone DST, instead of changing the Constitution -# (maybe, for the next elections, it will be possible to change the clock)... - -# From Rodrigo Severo (2004-10-04): -# It's just the biannual change made necessary by the much hyped, supposedly -# modern Brazilian eletronic voting machines which, apparently, can't deal -# with a time change between the first and the second rounds of the elections. - -# From Steffen Thorsen (2007-09-20): -# Brazil will start DST on 2007-10-14 00:00 and end on 2008-02-17 00:00: -# http://www.mme.gov.br/site/news/detail.do;jsessionid=BBA06811AFCAAC28F0285210913513DA?newsId=13975 - -# From Paul Schulze (2008-06-24): -# ...by law number 11.662 of April 24, 2008 (published in the "Diario -# Oficial da União"...) in Brazil there are changes in the timezones, -# effective today (00:00am at June 24, 2008) as follows: -# -# a) The timezone UTC+5 is extinguished, with all the Acre state and the -# part of the Amazonas state that had this timezone now being put to the -# timezone UTC+4 -# b) The whole Pará state now is put at timezone UTC+3, instead of just -# part of it, as was before. -# -# This change follows a proposal of senator Tiao Viana of Acre state, that -# proposed it due to concerns about open television channels displaying -# programs inappropriate to youths in the states that had the timezone -# UTC+5 too early in the night. In the occasion, some more corrections -# were proposed, trying to unify the timezones of any given state. This -# change modifies timezone rules defined in decree 2.784 of 18 June, -# 1913. - -# From Rodrigo Severo (2008-06-24): -# Just correcting the URL: -# https://www.in.gov.br/imprensa/visualiza/index.jsp?jornal=do&secao=1&pagina=1&data=25/04/2008 -# -# As a result of the above Decree I believe the America/Rio_Branco -# timezone shall be modified from UTC-5 to UTC-4 and a new timezone shall -# be created to represent the...west side of the Pará State. I -# suggest this new timezone be called Santarem as the most -# important/populated city in the affected area. -# -# This new timezone would be the same as the Rio_Branco timezone up to -# the 2008/06/24 change which would be to UTC-3 instead of UTC-4. - -# From Alex Krivenyshev (2008-06-24): -# This is a quick reference page for New and Old Brazil Time Zones map. -# http://www.worldtimezone.com/brazil-time-new-old.php -# -# - 4 time zones replaced by 3 time zones - eliminating time zone UTC-05 -# (state Acre and the part of the Amazonas will be UTC/GMT-04) - western -# part of Par state is moving to one timezone UTC-03 (from UTC-04). - -# From Paul Eggert (2002-10-10): -# The official decrees referenced below are mostly taken from -# Decretos sobre o Horário de Verão no Brasil. -# http://pcdsh01.on.br/DecHV.html - -# From Steffen Thorsen (2008-08-29): -# As announced by the government and many newspapers in Brazil late -# yesterday, Brazil will start DST on 2008-10-19 (need to change rule) and -# it will end on 2009-02-15 (current rule for Brazil is fine). Based on -# past years experience with the elections, there was a good chance that -# the start was postponed to November, but it did not happen this year. -# -# It has not yet been posted to http://pcdsh01.on.br/DecHV.html -# -# An official page about it: -# http://www.mme.gov.br/site/news/detail.do?newsId=16722 -# Note that this link does not always work directly, but must be accessed -# by going to -# http://www.mme.gov.br/first -# -# One example link that works directly: -# http://jornale.com.br/index.php?option=com_content&task=view&id=13530&Itemid=54 -# (Portuguese) -# -# We have a written a short article about it as well: -# http://www.timeanddate.com/news/time/brazil-dst-2008-2009.html -# -# From Alexander Krivenyshev (2011-10-04): -# State Bahia will return to Daylight savings time this year after 8 years off. -# The announcement was made by Governor Jaques Wagner in an interview to a -# television station in Salvador. - -# In Portuguese: -# http://g1.globo.com/bahia/noticia/2011/10/governador-jaques-wagner-confirma-horario-de-verao-na-bahia.html -# http://noticias.terra.com.br/brasil/noticias/0,,OI5390887-EI8139,00-Bahia+volta+a+ter+horario+de+verao+apos+oito+anos.html - -# From Guilherme Bernardes Rodrigues (2011-10-07): -# There is news in the media, however there is still no decree about it. -# I just send a e-mail to Zulmira Brandao at http://pcdsh01.on.br/ the -# official agency about time in Brazil, and she confirmed that the old rule is -# still in force. - -# From Guilherme Bernardes Rodrigues (2011-10-14) -# It's official, the President signed a decree that includes Bahia in summer -# time. -# [ and in a second message (same day): ] -# I found the decree. -# -# DECRETO No. 7.584, DE 13 DE OUTUBRO DE 2011 -# Link : -# http://www.in.gov.br/visualiza/index.jsp?data=13/10/2011&jornal=1000&pagina=6&totalArquivos=6 - -# From Kelley Cook (2012-10-16): -# The governor of state of Bahia in Brazil announced on Thursday that -# due to public pressure, he is reversing the DST policy they implemented -# last year and will not be going to Summer Time on October 21st.... -# http://www.correio24horas.com.br/r/artigo/apos-pressoes-wagner-suspende-horario-de-verao-na-bahia - -# From Rodrigo Severo (2012-10-16): -# Tocantins state will have DST. -# http://noticias.terra.com.br/brasil/noticias/0,,OI6232536-EI306.html - -# From Steffen Thorsen (2013-09-20): -# Tocantins in Brazil is very likely not to observe DST from October.... -# http://conexaoto.com.br/2013/09/18/ministerio-confirma-que-tocantins-esta-fora-do-horario-de-verao-em-2013-mas-falta-publicacao-de-decreto -# We will keep this article updated when this is confirmed: -# http://www.timeanddate.com/news/time/brazil-starts-dst-2013.html - -# From Steffen Thorsen (2013-10-17): -# http://www.timeanddate.com/news/time/acre-amazonas-change-time-zone.html -# Senator Jorge Viana announced that Acre will change time zone on November 10. -# He did not specify the time of the change, nor if western parts of Amazonas -# will change as well. -# -# From Paul Eggert (2013-10-17): -# For now, assume western Amazonas will change as well. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -# Decree 20,466 (1931-10-01) -# Decree 21,896 (1932-01-10) -Rule Brazil 1931 only - Oct 3 11:00 1:00 S -Rule Brazil 1932 1933 - Apr 1 0:00 0 - -Rule Brazil 1932 only - Oct 3 0:00 1:00 S -# Decree 23,195 (1933-10-10) -# revoked DST. -# Decree 27,496 (1949-11-24) -# Decree 27,998 (1950-04-13) -Rule Brazil 1949 1952 - Dec 1 0:00 1:00 S -Rule Brazil 1950 only - Apr 16 1:00 0 - -Rule Brazil 1951 1952 - Apr 1 0:00 0 - -# Decree 32,308 (1953-02-24) -Rule Brazil 1953 only - Mar 1 0:00 0 - -# Decree 34,724 (1953-11-30) -# revoked DST. -# Decree 52,700 (1963-10-18) -# established DST from 1963-10-23 00:00 to 1964-02-29 00:00 -# in SP, RJ, GB, MG, ES, due to the prolongation of the drought. -# Decree 53,071 (1963-12-03) -# extended the above decree to all of the national territory on 12-09. -Rule Brazil 1963 only - Dec 9 0:00 1:00 S -# Decree 53,604 (1964-02-25) -# extended summer time by one day to 1964-03-01 00:00 (start of school). -Rule Brazil 1964 only - Mar 1 0:00 0 - -# Decree 55,639 (1965-01-27) -Rule Brazil 1965 only - Jan 31 0:00 1:00 S -Rule Brazil 1965 only - Mar 31 0:00 0 - -# Decree 57,303 (1965-11-22) -Rule Brazil 1965 only - Dec 1 0:00 1:00 S -# Decree 57,843 (1966-02-18) -Rule Brazil 1966 1968 - Mar 1 0:00 0 - -Rule Brazil 1966 1967 - Nov 1 0:00 1:00 S -# Decree 63,429 (1968-10-15) -# revoked DST. -# Decree 91,698 (1985-09-27) -Rule Brazil 1985 only - Nov 2 0:00 1:00 S -# Decree 92,310 (1986-01-21) -# Decree 92,463 (1986-03-13) -Rule Brazil 1986 only - Mar 15 0:00 0 - -# Decree 93,316 (1986-10-01) -Rule Brazil 1986 only - Oct 25 0:00 1:00 S -Rule Brazil 1987 only - Feb 14 0:00 0 - -# Decree 94,922 (1987-09-22) -Rule Brazil 1987 only - Oct 25 0:00 1:00 S -Rule Brazil 1988 only - Feb 7 0:00 0 - -# Decree 96,676 (1988-09-12) -# except for the states of AC, AM, PA, RR, RO, and AP (then a territory) -Rule Brazil 1988 only - Oct 16 0:00 1:00 S -Rule Brazil 1989 only - Jan 29 0:00 0 - -# Decree 98,077 (1989-08-21) -# with the same exceptions -Rule Brazil 1989 only - Oct 15 0:00 1:00 S -Rule Brazil 1990 only - Feb 11 0:00 0 - -# Decree 99,530 (1990-09-17) -# adopted by RS, SC, PR, SP, RJ, ES, MG, GO, MS, DF. -# Decree 99,629 (1990-10-19) adds BA, MT. -Rule Brazil 1990 only - Oct 21 0:00 1:00 S -Rule Brazil 1991 only - Feb 17 0:00 0 - -# Unnumbered decree (1991-09-25) -# adopted by RS, SC, PR, SP, RJ, ES, MG, BA, GO, MT, MS, DF. -Rule Brazil 1991 only - Oct 20 0:00 1:00 S -Rule Brazil 1992 only - Feb 9 0:00 0 - -# Unnumbered decree (1992-10-16) -# adopted by same states. -Rule Brazil 1992 only - Oct 25 0:00 1:00 S -Rule Brazil 1993 only - Jan 31 0:00 0 - -# Decree 942 (1993-09-28) -# adopted by same states, plus AM. -# Decree 1,252 (1994-09-22; -# web page corrected 2004-01-07) adopted by same states, minus AM. -# Decree 1,636 (1995-09-14) -# adopted by same states, plus MT and TO. -# Decree 1,674 (1995-10-13) -# adds AL, SE. -Rule Brazil 1993 1995 - Oct Sun>=11 0:00 1:00 S -Rule Brazil 1994 1995 - Feb Sun>=15 0:00 0 - -Rule Brazil 1996 only - Feb 11 0:00 0 - -# Decree 2,000 (1996-09-04) -# adopted by same states, minus AL, SE. -Rule Brazil 1996 only - Oct 6 0:00 1:00 S -Rule Brazil 1997 only - Feb 16 0:00 0 - -# From Daniel C. Sobral (1998-02-12): -# In 1997, the DS began on October 6. The stated reason was that -# because international television networks ignored Brazil's policy on DS, -# they bought the wrong times on satellite for coverage of Pope's visit. -# This year, the ending date of DS was postponed to March 1 -# to help dealing with the shortages of electric power. -# -# Decree 2,317 (1997-09-04), adopted by same states. -Rule Brazil 1997 only - Oct 6 0:00 1:00 S -# Decree 2,495 -# (1998-02-10) -Rule Brazil 1998 only - Mar 1 0:00 0 - -# Decree 2,780 (1998-09-11) -# adopted by the same states as before. -Rule Brazil 1998 only - Oct 11 0:00 1:00 S -Rule Brazil 1999 only - Feb 21 0:00 0 - -# Decree 3,150 -# (1999-08-23) adopted by same states. -# Decree 3,188 (1999-09-30) -# adds SE, AL, PB, PE, RN, CE, PI, MA and RR. -Rule Brazil 1999 only - Oct 3 0:00 1:00 S -Rule Brazil 2000 only - Feb 27 0:00 0 - -# Decree 3,592 (2000-09-06) -# adopted by the same states as before. -# Decree 3,630 (2000-10-13) -# repeals DST in PE and RR, effective 2000-10-15 00:00. -# Decree 3,632 (2000-10-17) -# repeals DST in SE, AL, PB, RN, CE, PI and MA, effective 2000-10-22 00:00. -# Decree 3,916 -# (2001-09-13) reestablishes DST in AL, CE, MA, PB, PE, PI, RN, SE. -Rule Brazil 2000 2001 - Oct Sun>=8 0:00 1:00 S -Rule Brazil 2001 2006 - Feb Sun>=15 0:00 0 - -# Decree 4,399 (2002-10-01) repeals DST in AL, CE, MA, PB, PE, PI, RN, SE. -# 4,399 -Rule Brazil 2002 only - Nov 3 0:00 1:00 S -# Decree 4,844 (2003-09-24; corrected 2003-09-26) repeals DST in BA, MT, TO. -# 4,844 -Rule Brazil 2003 only - Oct 19 0:00 1:00 S -# Decree 5,223 (2004-10-01) reestablishes DST in MT. -# 5,223 -Rule Brazil 2004 only - Nov 2 0:00 1:00 S -# Decree 5,539 (2005-09-19), -# adopted by the same states as before. -Rule Brazil 2005 only - Oct 16 0:00 1:00 S -# Decree 5,920 (2006-10-03), -# adopted by the same states as before. -Rule Brazil 2006 only - Nov 5 0:00 1:00 S -Rule Brazil 2007 only - Feb 25 0:00 0 - -# Decree 6,212 (2007-09-26), -# adopted by the same states as before. -Rule Brazil 2007 only - Oct Sun>=8 0:00 1:00 S -# From Frederico A. C. Neves (2008-09-10): -# According to this decree -# http://www.planalto.gov.br/ccivil_03/_Ato2007-2010/2008/Decreto/D6558.htm -# [t]he DST period in Brazil now on will be from the 3rd Oct Sunday to the -# 3rd Feb Sunday. There is an exception on the return date when this is -# the Carnival Sunday then the return date will be the next Sunday... -Rule Brazil 2008 max - Oct Sun>=15 0:00 1:00 S -Rule Brazil 2008 2011 - Feb Sun>=15 0:00 0 - -Rule Brazil 2012 only - Feb Sun>=22 0:00 0 - -Rule Brazil 2013 2014 - Feb Sun>=15 0:00 0 - -Rule Brazil 2015 only - Feb Sun>=22 0:00 0 - -Rule Brazil 2016 2022 - Feb Sun>=15 0:00 0 - -Rule Brazil 2023 only - Feb Sun>=22 0:00 0 - -Rule Brazil 2024 2025 - Feb Sun>=15 0:00 0 - -Rule Brazil 2026 only - Feb Sun>=22 0:00 0 - -Rule Brazil 2027 2033 - Feb Sun>=15 0:00 0 - -Rule Brazil 2034 only - Feb Sun>=22 0:00 0 - -Rule Brazil 2035 2036 - Feb Sun>=15 0:00 0 - -Rule Brazil 2037 only - Feb Sun>=22 0:00 0 - -# From Arthur David Olson (2008-09-29): -# The next is wrong in some years but is better than nothing. -Rule Brazil 2038 max - Feb Sun>=15 0:00 0 - - -# The latest ruleset listed above says that the following states observe DST: -# DF, ES, GO, MG, MS, MT, PR, RJ, RS, SC, SP. - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -# -# Fernando de Noronha (administratively part of PE) -Zone America/Noronha -2:09:40 - LMT 1914 - -2:00 Brazil -02/-01 1990 Sep 17 - -2:00 - -02 1999 Sep 30 - -2:00 Brazil -02/-01 2000 Oct 15 - -2:00 - -02 2001 Sep 13 - -2:00 Brazil -02/-01 2002 Oct 1 - -2:00 - -02 -# Other Atlantic islands have no permanent settlement. -# These include Trindade and Martim Vaz (administratively part of ES), -# Rocas Atoll (RN), and the St Peter and St Paul Archipelago (PE). -# Fernando de Noronha was a separate territory from 1942-09-02 to 1989-01-01; -# it also included the Penedos. -# -# Amapá (AP), east Pará (PA) -# East Pará includes Belém, Marabá, Serra Norte, and São Félix do Xingu. -# The division between east and west Pará is the river Xingu. -# In the north a very small part from the river Javary (now Jari I guess, -# the border with Amapá) to the Amazon, then to the Xingu. -Zone America/Belem -3:13:56 - LMT 1914 - -3:00 Brazil -03/-02 1988 Sep 12 - -3:00 - -03 -# -# west Pará (PA) -# West Pará includes Altamira, Óbidos, Prainha, Oriximiná, and Santarém. -Zone America/Santarem -3:38:48 - LMT 1914 - -4:00 Brazil -04/-03 1988 Sep 12 - -4:00 - -04 2008 Jun 24 0:00 - -3:00 - -03 -# -# Maranhão (MA), Piauí (PI), Ceará (CE), Rio Grande do Norte (RN), -# Paraíba (PB) -Zone America/Fortaleza -2:34:00 - LMT 1914 - -3:00 Brazil -03/-02 1990 Sep 17 - -3:00 - -03 1999 Sep 30 - -3:00 Brazil -03/-02 2000 Oct 22 - -3:00 - -03 2001 Sep 13 - -3:00 Brazil -03/-02 2002 Oct 1 - -3:00 - -03 -# -# Pernambuco (PE) (except Atlantic islands) -Zone America/Recife -2:19:36 - LMT 1914 - -3:00 Brazil -03/-02 1990 Sep 17 - -3:00 - -03 1999 Sep 30 - -3:00 Brazil -03/-02 2000 Oct 15 - -3:00 - -03 2001 Sep 13 - -3:00 Brazil -03/-02 2002 Oct 1 - -3:00 - -03 -# -# Tocantins (TO) -Zone America/Araguaina -3:12:48 - LMT 1914 - -3:00 Brazil -03/-02 1990 Sep 17 - -3:00 - -03 1995 Sep 14 - -3:00 Brazil -03/-02 2003 Sep 24 - -3:00 - -03 2012 Oct 21 - -3:00 Brazil -03/-02 2013 Sep - -3:00 - -03 -# -# Alagoas (AL), Sergipe (SE) -Zone America/Maceio -2:22:52 - LMT 1914 - -3:00 Brazil -03/-02 1990 Sep 17 - -3:00 - -03 1995 Oct 13 - -3:00 Brazil -03/-02 1996 Sep 4 - -3:00 - -03 1999 Sep 30 - -3:00 Brazil -03/-02 2000 Oct 22 - -3:00 - -03 2001 Sep 13 - -3:00 Brazil -03/-02 2002 Oct 1 - -3:00 - -03 -# -# Bahia (BA) -# There are too many Salvadors elsewhere, so use America/Bahia instead -# of America/Salvador. -Zone America/Bahia -2:34:04 - LMT 1914 - -3:00 Brazil -03/-02 2003 Sep 24 - -3:00 - -03 2011 Oct 16 - -3:00 Brazil -03/-02 2012 Oct 21 - -3:00 - -03 -# -# Goiás (GO), Distrito Federal (DF), Minas Gerais (MG), -# Espírito Santo (ES), Rio de Janeiro (RJ), São Paulo (SP), Paraná (PR), -# Santa Catarina (SC), Rio Grande do Sul (RS) -Zone America/Sao_Paulo -3:06:28 - LMT 1914 - -3:00 Brazil -03/-02 1963 Oct 23 0:00 - -3:00 1:00 -02 1964 - -3:00 Brazil -03/-02 -# -# Mato Grosso do Sul (MS) -Zone America/Campo_Grande -3:38:28 - LMT 1914 - -4:00 Brazil -04/-03 -# -# Mato Grosso (MT) -Zone America/Cuiaba -3:44:20 - LMT 1914 - -4:00 Brazil -04/-03 2003 Sep 24 - -4:00 - -04 2004 Oct 1 - -4:00 Brazil -04/-03 -# -# Rondônia (RO) -Zone America/Porto_Velho -4:15:36 - LMT 1914 - -4:00 Brazil -04/-03 1988 Sep 12 - -4:00 - -04 -# -# Roraima (RR) -Zone America/Boa_Vista -4:02:40 - LMT 1914 - -4:00 Brazil -04/-03 1988 Sep 12 - -4:00 - -04 1999 Sep 30 - -4:00 Brazil -04/-03 2000 Oct 15 - -4:00 - -04 -# -# east Amazonas (AM): Boca do Acre, Jutaí, Manaus, Floriano Peixoto -# The great circle line from Tabatinga to Porto Acre divides -# east from west Amazonas. -Zone America/Manaus -4:00:04 - LMT 1914 - -4:00 Brazil -04/-03 1988 Sep 12 - -4:00 - -04 1993 Sep 28 - -4:00 Brazil -04/-03 1994 Sep 22 - -4:00 - -04 -# -# west Amazonas (AM): Atalaia do Norte, Boca do Maoco, Benjamin Constant, -# Eirunepé, Envira, Ipixuna -Zone America/Eirunepe -4:39:28 - LMT 1914 - -5:00 Brazil -05/-04 1988 Sep 12 - -5:00 - -05 1993 Sep 28 - -5:00 Brazil -05/-04 1994 Sep 22 - -5:00 - -05 2008 Jun 24 0:00 - -4:00 - -04 2013 Nov 10 - -5:00 - -05 -# -# Acre (AC) -Zone America/Rio_Branco -4:31:12 - LMT 1914 - -5:00 Brazil -05/-04 1988 Sep 12 - -5:00 - -05 2008 Jun 24 0:00 - -4:00 - -04 2013 Nov 10 - -5:00 - -05 - -# Chile - -# From Paul Eggert (2015-04-03): -# Shanks & Pottenger says America/Santiago introduced standard time in -# 1890 and rounds its UTC offset to 70W40; guess that in practice this -# was the same offset as in 1916-1919. It also says Pacific/Easter -# standardized on 109W22 in 1890; assume this didn't change the clocks. -# -# Dates for America/Santiago from 1910 to 2004 are primarily from -# the following source, cited by Oscar van Vlijmen (2006-10-08): -# [1] Chile Law -# http://www.webexhibits.org/daylightsaving/chile.html -# This contains a copy of a this official table: -# Cambios en la hora oficial de Chile desde 1900 (retrieved 2008-03-30) -# http://web.archive.org/web/20080330200901/http://www.horaoficial.cl/cambio.htm -# [1] needs several corrections, though. -# -# The first set of corrections is from: -# [2] History of the Official Time of Chile -# http://www.horaoficial.cl/ing/horaof_ing.html (retrieved 2012-03-06). See: -# http://web.archive.org/web/20120306042032/http://www.horaoficial.cl/ing/horaof_ing.html -# This is an English translation of: -# Historia de la hora oficial de Chile (retrieved 2012-10-24). See: -# http://web.archive.org/web/20121024234627/http://www.horaoficial.cl/horaof.htm -# A fancier Spanish version (requiring mouse-clicking) is at: -# http://www.horaoficial.cl/historia_hora.html -# Conflicts between [1] and [2] were resolved as follows: -# -# - [1] says the 1910 transition was Jan 1, [2] says Jan 10 and cites -# Boletín No. 1, Aviso No. 1 (1910). Go with [2]. -# -# - [1] says SMT was -4:42:45, [2] says Chile's official time from -# 1916 to 1919 was -4:42:46.3, the meridian of Chile's National -# Astronomical Observatory (OAN), then located in what is now -# Quinta Normal in Santiago. Go with [2], rounding it to -4:42:46. -# -# - [1] says the 1918 transition was Sep 1, [2] says Sep 10 and cites -# Boletín No. 22, Aviso No. 129/1918 (1918-08-23). Go with [2]. -# -# - [1] does not give times for transitions; assume they occur -# at midnight mainland time, the current common practice. However, -# go with [2]'s specification of 23:00 for the 1947-05-21 transition. -# -# Another correction to [1] is from Jesper Nørgaard Welen, who -# wrote (2006-10-08), "I think that there are some obvious mistakes in -# the suggested link from Oscar van Vlijmen,... for instance entry 66 -# says that GMT-4 ended 1990-09-12 while entry 67 only begins GMT-3 at -# 1990-09-15 (they should have been 1990-09-15 and 1990-09-16 -# respectively), but anyhow it clears up some doubts too." -# -# Data for Pacific/Easter from 1910 through 1967 come from Shanks & -# Pottenger. After that, for lack of better info assume -# Pacific/Easter is always two hours behind America/Santiago; -# this is known to work for DST transitions starting in 2008 and -# may well be true for earlier transitions. - -# From Eduardo Krell (1995-10-19): -# The law says to switch to DST at midnight [24:00] on the second SATURDAY -# of October.... The law is the same for March and October. -# (1998-09-29): -# Because of the drought this year, the government decided to go into -# DST earlier (saturday 9/26 at 24:00). This is a one-time change only ... -# (unless there's another dry season next year, I guess). - -# From Julio I. Pacheco Troncoso (1999-03-18): -# Because of the same drought, the government decided to end DST later, -# on April 3, (one-time change). - -# From Germán Poo-Caamaño (2008-03-03): -# Due to drought, Chile extends Daylight Time in three weeks. This -# is one-time change (Saturday 3/29 at 24:00 for America/Santiago -# and Saturday 3/29 at 22:00 for Pacific/Easter) -# The Supreme Decree is located at -# http://www.shoa.cl/servicios/supremo316.pdf -# -# From José Miguel Garrido (2008-03-05): -# http://www.shoa.cl/noticias/2008/04hora/hora.htm - -# From Angel Chiang (2010-03-04): -# Subject: DST in Chile exceptionally extended to 3 April due to earthquake -# http://www.gobiernodechile.cl/viewNoticia.aspx?idArticulo=30098 -# -# From Arthur David Olson (2010-03-06): -# Angel Chiang's message confirmed by Julio Pacheco; Julio provided a patch. - -# From Glenn Eychaner (2011-03-28): -# http://diario.elmercurio.com/2011/03/28/_portada/_portada/noticias/7565897A-CA86-49E6-9E03-660B21A4883E.htm?id=3D{7565897A-CA86-49E6-9E03-660B21A4883E} -# In English: -# Chile's clocks will go back an hour this year on the 7th of May instead -# of this Saturday. They will go forward again the 3rd Saturday in -# August, not in October as they have since 1968. - -# From Mauricio Parada (2012-02-22), translated by Glenn Eychaner (2012-02-23): -# As stated in the website of the Chilean Energy Ministry -# http://www.minenergia.cl/ministerio/noticias/generales/gobierno-anuncia-fechas-de-cambio-de.html -# The Chilean Government has decided to postpone the entrance into winter time -# (to leave DST) from March 11 2012 to April 28th 2012.... -# Quote from the website communication: -# -# 6. For the year 2012, the dates of entry into winter time will be as follows: -# a. Saturday April 28, 2012, clocks should go back 60 minutes; that is, at -# 23:59:59, instead of passing to 0:00, the time should be adjusted to be 23:00 -# of the same day. -# b. Saturday, September 1, 2012, clocks should go forward 60 minutes; that is, -# at 23:59:59, instead of passing to 0:00, the time should be adjusted to be -# 01:00 on September 2. - -# From Steffen Thorsen (2013-02-15): -# According to several news sources, Chile has extended DST this year, -# they will end DST later and start DST earlier than planned. They -# hope to save energy. The new end date is 2013-04-28 00:00 and new -# start date is 2013-09-08 00:00.... -# http://www.gob.cl/informa/2013/02/15/gobierno-anuncia-fechas-de-cambio-de-hora-para-el-ano-2013.htm - -# From José Miguel Garrido (2014-02-19): -# Today appeared in the Diario Oficial a decree amending the time change -# dates to 2014. -# DST End: last Saturday of April 2014 (Sun 27 Apr 2014 03:00 UTC) -# DST Start: first Saturday of September 2014 (Sun 07 Sep 2014 04:00 UTC) -# http://www.diariooficial.interior.gob.cl//media/2014/02/19/do-20140219.pdf - -# From Eduardo Romero Urra (2015-03-03): -# Today has been published officially that Chile will use the DST time -# permanently until March 25 of 2017 -# http://www.diariooficial.interior.gob.cl/media/2015/03/03/1-large.jpg -# -# From Paul Eggert (2015-03-03): -# For now, assume that the extension will persist indefinitely. - -# From Juan Correa (2016-03-18): -# The decree regarding DST has been published in today's Official Gazette: -# http://www.diariooficial.interior.gob.cl/versiones-anteriores/do/20160318/ -# http://www.leychile.cl/Navegar?idNorma=1088502 -# It does consider the second Saturday of May and August as the dates -# for the transition; and it lists DST dates until 2019, but I think -# this scheme will stick. -# -# From Paul Eggert (2016-03-18): -# For now, assume the pattern holds for the indefinite future. -# The decree says transitions occur at 24:00; in practice this appears -# to mean 24:00 mainland time, not 24:00 local time, so that Easter -# Island is always two hours behind the mainland. - -# From Juan Correa (2016-12-04): -# Magallanes region ... will keep DST (UTC -3) all year round.... -# http://www.soychile.cl/Santiago/Sociedad/2016/12/04/433428/Bachelet-firmo-el-decreto-para-establecer-un-horario-unico-para-la-Region-de-Magallanes.aspx -# -# From Deborah Goldsmith (2017-01-19): -# http://www.diariooficial.interior.gob.cl/publicaciones/2017/01/17/41660/01/1169626.pdf -# From Paul Eggert (2017-01-19): -# The above says the Magallanes change expires 2019-05-11 at 24:00, -# so in theory, they will revert to -04/-03 after that, which means -# they will switch from -03 to -04 one hour after Santiago does that day. -# For now, assume that they will not revert. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Chile 1927 1931 - Sep 1 0:00 1:00 S -Rule Chile 1928 1932 - Apr 1 0:00 0 - -Rule Chile 1968 only - Nov 3 4:00u 1:00 S -Rule Chile 1969 only - Mar 30 3:00u 0 - -Rule Chile 1969 only - Nov 23 4:00u 1:00 S -Rule Chile 1970 only - Mar 29 3:00u 0 - -Rule Chile 1971 only - Mar 14 3:00u 0 - -Rule Chile 1970 1972 - Oct Sun>=9 4:00u 1:00 S -Rule Chile 1972 1986 - Mar Sun>=9 3:00u 0 - -Rule Chile 1973 only - Sep 30 4:00u 1:00 S -Rule Chile 1974 1987 - Oct Sun>=9 4:00u 1:00 S -Rule Chile 1987 only - Apr 12 3:00u 0 - -Rule Chile 1988 1990 - Mar Sun>=9 3:00u 0 - -Rule Chile 1988 1989 - Oct Sun>=9 4:00u 1:00 S -Rule Chile 1990 only - Sep 16 4:00u 1:00 S -Rule Chile 1991 1996 - Mar Sun>=9 3:00u 0 - -Rule Chile 1991 1997 - Oct Sun>=9 4:00u 1:00 S -Rule Chile 1997 only - Mar 30 3:00u 0 - -Rule Chile 1998 only - Mar Sun>=9 3:00u 0 - -Rule Chile 1998 only - Sep 27 4:00u 1:00 S -Rule Chile 1999 only - Apr 4 3:00u 0 - -Rule Chile 1999 2010 - Oct Sun>=9 4:00u 1:00 S -Rule Chile 2000 2007 - Mar Sun>=9 3:00u 0 - -# N.B.: the end of March 29 in Chile is March 30 in Universal time, -# which is used below in specifying the transition. -Rule Chile 2008 only - Mar 30 3:00u 0 - -Rule Chile 2009 only - Mar Sun>=9 3:00u 0 - -Rule Chile 2010 only - Apr Sun>=1 3:00u 0 - -Rule Chile 2011 only - May Sun>=2 3:00u 0 - -Rule Chile 2011 only - Aug Sun>=16 4:00u 1:00 S -Rule Chile 2012 2014 - Apr Sun>=23 3:00u 0 - -Rule Chile 2012 2014 - Sep Sun>=2 4:00u 1:00 S -Rule Chile 2016 max - May Sun>=9 3:00u 0 - -Rule Chile 2016 max - Aug Sun>=9 4:00u 1:00 S -# IATA SSIM anomalies: (1992-02) says 1992-03-14; -# (1996-09) says 1998-03-08. Ignore these. -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Santiago -4:42:46 - LMT 1890 - -4:42:46 - SMT 1910 Jan 10 # Santiago Mean Time - -5:00 - -05 1916 Jul 1 - -4:42:46 - SMT 1918 Sep 10 - -4:00 - -04 1919 Jul 1 - -4:42:46 - SMT 1927 Sep 1 - -5:00 Chile -05/-04 1932 Sep 1 - -4:00 - -04 1942 Jun 1 - -5:00 - -05 1942 Aug 1 - -4:00 - -04 1946 Jul 15 - -4:00 1:00 -03 1946 Sep 1 # central Chile - -4:00 - -04 1947 Apr 1 - -5:00 - -05 1947 May 21 23:00 - -4:00 Chile -04/-03 -Zone America/Punta_Arenas -4:43:40 - LMT 1890 - -4:42:46 - SMT 1910 Jan 10 - -5:00 - -05 1916 Jul 1 - -4:42:46 - SMT 1918 Sep 10 - -4:00 - -04 1919 Jul 1 - -4:42:46 - SMT 1927 Sep 1 - -5:00 Chile -05/-04 1932 Sep 1 - -4:00 - -04 1942 Jun 1 - -5:00 - -05 1942 Aug 1 - -4:00 - -04 1947 Apr 1 - -5:00 - -05 1947 May 21 23:00 - -4:00 Chile -04/-03 2016 Dec 4 - -3:00 - -03 -Zone Pacific/Easter -7:17:28 - LMT 1890 - -7:17:28 - EMT 1932 Sep # Easter Mean Time - -7:00 Chile -07/-06 1982 Mar 14 3:00u # Easter Time - -6:00 Chile -06/-05 -# -# Salas y Gómez Island is uninhabited. -# Other Chilean locations, including Juan Fernández Is, Desventuradas Is, -# and Antarctic bases, are like America/Santiago. - -# Antarctic base using South American rules -# (See the file 'antarctica' for more.) -# -# Palmer, Anvers Island, since 1965 (moved 2 miles in 1968) -# -# From Ethan Dicks (1996-10-06): -# It keeps the same time as Punta Arenas, Chile, because, just like us -# and the South Pole, that's the other end of their supply line.... -# I verified with someone who was there that since 1980, -# Palmer has followed Chile. Prior to that, before the Falklands War, -# Palmer used to be supplied from Argentina. -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Antarctica/Palmer 0 - -00 1965 - -4:00 Arg -04/-03 1969 Oct 5 - -3:00 Arg -03/-02 1982 May - -4:00 Chile -04/-03 2016 Dec 4 - -3:00 - -03 - -# Colombia - -# Milne gives 4:56:16.4 for Bogotá time in 1899; round to nearest. He writes, -# "A variation of fifteen minutes in the public clocks of Bogota is not rare." - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule CO 1992 only - May 3 0:00 1:00 S -Rule CO 1993 only - Apr 4 0:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Bogota -4:56:16 - LMT 1884 Mar 13 - -4:56:16 - BMT 1914 Nov 23 # Bogotá Mean Time - -5:00 CO -05/-04 -# Malpelo, Providencia, San Andres -# no information; probably like America/Bogota - -# Curaçao - -# Milne gives 4:35:46.9 for Curaçao mean time; round to nearest. -# -# From Paul Eggert (2006-03-22): -# Shanks & Pottenger say that The Bottom and Philipsburg have been at -# -4:00 since standard time was introduced on 1912-03-02; and that -# Kralendijk and Rincon used Kralendijk Mean Time (-4:33:08) from -# 1912-02-02 to 1965-01-01. The former is dubious, since S&P also say -# Saba Island has been like Curaçao. -# This all predates our 1970 cutoff, though. -# -# By July 2007 Curaçao and St Maarten are planned to become -# associated states within the Netherlands, much like Aruba; -# Bonaire, Saba and St Eustatius would become directly part of the -# Netherlands as Kingdom Islands. This won't affect their time zones -# though, as far as we know. -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Curacao -4:35:47 - LMT 1912 Feb 12 # Willemstad - -4:30 - -0430 1965 - -4:00 - AST - -# From Arthur David Olson (2011-06-15): -# use links for places with new iso3166 codes. -# The name "Lower Prince's Quarter" is both longer than fourteen characters -# and contains an apostrophe; use "Lower_Princes" below. - -Link America/Curacao America/Lower_Princes # Sint Maarten -Link America/Curacao America/Kralendijk # Caribbean Netherlands - -# Ecuador -# -# Milne says the Central and South American Telegraph Company used -5:24:15. -# -# From Alois Treindl (2016-12-15): -# http://www.elcomercio.com/actualidad/hora-sixto-1993.html -# ... Whether the law applied also to Galápagos, I do not know. -# From Paul Eggert (2016-12-15): -# http://www.elcomercio.com/afull/modificacion-husohorario-ecuador-presidentes-decreto.html -# This says President Sixto Durán Ballén signed decree No. 285, which -# established DST from 1992-11-28 to 1993-02-05; it does not give transition -# times. The people called it "hora de Sixto" ("Sixto hour"). The change did -# not go over well; a popular song "Qué hora es" by Jaime Guevara had lyrics -# that included "Amanecía en mitad de la noche, los guaguas iban a clase sin -# sol" ("It was dawning in the middle of the night, the buses went to class -# without sun"). Although Ballén's campaign slogan was "Ni un paso atrás" -# (Not one step back), the clocks went back in 1993 and the experiment was not -# repeated. For now, assume transitions were at 00:00 local time country-wide. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Ecuador 1992 only - Nov 28 0:00 1:00 S -Rule Ecuador 1993 only - Feb 5 0:00 0 - -# -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Guayaquil -5:19:20 - LMT 1890 - -5:14:00 - QMT 1931 # Quito Mean Time - -5:00 Ecuador -05/-04 -Zone Pacific/Galapagos -5:58:24 - LMT 1931 # Puerto Baquerizo Moreno - -5:00 - -05 1986 - -6:00 Ecuador -06/-05 - -# Falklands - -# From Paul Eggert (2006-03-22): -# Between 1990 and 2000 inclusive, Shanks & Pottenger and the IATA agree except -# the IATA gives 1996-09-08. Go with Shanks & Pottenger. - -# From Falkland Islands Government Office, London (2001-01-22) -# via Jesper Nørgaard: -# ... the clocks revert back to Local Mean Time at 2 am on Sunday 15 -# April 2001 and advance one hour to summer time at 2 am on Sunday 2 -# September. It is anticipated that the clocks will revert back at 2 -# am on Sunday 21 April 2002 and advance to summer time at 2 am on -# Sunday 1 September. - -# From Rives McDow (2001-02-13): -# -# I have communicated several times with people there, and the last -# time I had communications that was helpful was in 1998. Here is -# what was said then: -# -# "The general rule was that Stanley used daylight saving and the Camp -# did not. However for various reasons many people in the Camp have -# started to use daylight saving (known locally as 'Stanley Time') -# There is no rule as to who uses daylight saving - it is a matter of -# personal choice and so it is impossible to draw a map showing who -# uses it and who does not. Any list would be out of date as soon as -# it was produced. This year daylight saving ended on April 18/19th -# and started again on September 12/13th. I do not know what the rule -# is, but can find out if you like. We do not change at the same time -# as UK or Chile." -# -# I did have in my notes that the rule was "Second Saturday in Sep at -# 0:00 until third Saturday in Apr at 0:00". I think that this does -# not agree in some cases with Shanks; is this true? -# -# Also, there is no mention in the list that some areas in the -# Falklands do not use DST. I have found in my communications there -# that these areas are on the western half of East Falkland and all of -# West Falkland. Stanley is the only place that consistently observes -# DST. Again, as in other places in the world, the farmers don't like -# it. West Falkland is almost entirely sheep farmers. -# -# I know one lady there that keeps a list of which farm keeps DST and -# which doesn't each year. She runs a shop in Stanley, and says that -# the list changes each year. She uses it to communicate to her -# customers, catching them when they are home for lunch or dinner. - -# From Paul Eggert (2001-03-05): -# For now, we'll just record the time in Stanley, since we have no -# better info. - -# From Steffen Thorsen (2011-04-01): -# The Falkland Islands will not turn back clocks this winter, but stay on -# daylight saving time. -# -# One source: -# http://www.falklandnews.com/public/story.cfm?get=5914&source=3 -# -# We have gotten this confirmed by a clerk of the legislative assembly: -# Normally the clocks revert to Local Mean Time (UTC/GMT -4 hours) on the -# third Sunday of April at 0200hrs and advance to Summer Time (UTC/GMT -3 -# hours) on the first Sunday of September at 0200hrs. -# -# IMPORTANT NOTE: During 2011, on a trial basis, the Falkland Islands -# will not revert to local mean time, but clocks will remain on Summer -# time (UTC/GMT - 3 hours) throughout the whole of 2011. Any long term -# change to local time following the trial period will be notified. -# -# From Andrew Newman (2012-02-24) -# A letter from Justin McPhee, Chief Executive, -# Cable & Wireless Falkland Islands (dated 2012-02-22) -# states... -# The current Atlantic/Stanley entry under South America expects the -# clocks to go back to standard Falklands Time (FKT) on the 15th April. -# The database entry states that in 2011 Stanley was staying on fixed -# summer time on a trial basis only. FIG need to contact IANA and/or -# the maintainers of the database to inform them we're adopting -# the same policy this year and suggest recommendations for future years. -# -# For now we will assume permanent summer time for the Falklands -# until advised differently (to apply for 2012 and beyond, after the 2011 -# experiment was apparently successful.) -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Falk 1937 1938 - Sep lastSun 0:00 1:00 S -Rule Falk 1938 1942 - Mar Sun>=19 0:00 0 - -Rule Falk 1939 only - Oct 1 0:00 1:00 S -Rule Falk 1940 1942 - Sep lastSun 0:00 1:00 S -Rule Falk 1943 only - Jan 1 0:00 0 - -Rule Falk 1983 only - Sep lastSun 0:00 1:00 S -Rule Falk 1984 1985 - Apr lastSun 0:00 0 - -Rule Falk 1984 only - Sep 16 0:00 1:00 S -Rule Falk 1985 2000 - Sep Sun>=9 0:00 1:00 S -Rule Falk 1986 2000 - Apr Sun>=16 0:00 0 - -Rule Falk 2001 2010 - Apr Sun>=15 2:00 0 - -Rule Falk 2001 2010 - Sep Sun>=1 2:00 1:00 S -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Atlantic/Stanley -3:51:24 - LMT 1890 - -3:51:24 - SMT 1912 Mar 12 # Stanley Mean Time - -4:00 Falk -04/-03 1983 May - -3:00 Falk -03/-02 1985 Sep 15 - -4:00 Falk -04/-03 2010 Sep 5 2:00 - -3:00 - -03 - -# French Guiana -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Cayenne -3:29:20 - LMT 1911 Jul - -4:00 - -04 1967 Oct - -3:00 - -03 - -# Guyana -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Guyana -3:52:40 - LMT 1915 Mar # Georgetown - -3:45 - -0345 1975 Jul 31 - -3:00 - -03 1991 -# IATA SSIM (1996-06) says -4:00. Assume a 1991 switch. - -4:00 - -04 - -# Paraguay -# -# From Paul Eggert (2006-03-22): -# Shanks & Pottenger say that spring transitions are 01:00 -> 02:00, -# and autumn transitions are 00:00 -> 23:00. Go with pre-1999 -# editions of Shanks, and with the IATA, who say transitions occur at 00:00. -# -# From Waldemar Villamayor-Venialbo (2013-09-20): -# No time of the day is established for the adjustment, so people normally -# adjust their clocks at 0 hour of the given dates. -# -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Para 1975 1988 - Oct 1 0:00 1:00 S -Rule Para 1975 1978 - Mar 1 0:00 0 - -Rule Para 1979 1991 - Apr 1 0:00 0 - -Rule Para 1989 only - Oct 22 0:00 1:00 S -Rule Para 1990 only - Oct 1 0:00 1:00 S -Rule Para 1991 only - Oct 6 0:00 1:00 S -Rule Para 1992 only - Mar 1 0:00 0 - -Rule Para 1992 only - Oct 5 0:00 1:00 S -Rule Para 1993 only - Mar 31 0:00 0 - -Rule Para 1993 1995 - Oct 1 0:00 1:00 S -Rule Para 1994 1995 - Feb lastSun 0:00 0 - -Rule Para 1996 only - Mar 1 0:00 0 - -# IATA SSIM (2000-02) says 1999-10-10; ignore this for now. -# From Steffen Thorsen (2000-10-02): -# I have three independent reports that Paraguay changed to DST this Sunday -# (10-01). -# -# Translated by Gwillim Law (2001-02-27) from -# Noticias, a daily paper in Asunción, Paraguay (2000-10-01): -# http://www.diarionoticias.com.py/011000/nacional/naciona1.htm -# Starting at 0:00 today, the clock will be set forward 60 minutes, in -# fulfillment of Decree No. 7,273 of the Executive Power.... The time change -# system has been operating for several years. Formerly there was a separate -# decree each year; the new law has the same effect, but permanently. Every -# year, the time will change on the first Sunday of October; likewise, the -# clock will be set back on the first Sunday of March. -# -Rule Para 1996 2001 - Oct Sun>=1 0:00 1:00 S -# IATA SSIM (1997-09) says Mar 1; go with Shanks & Pottenger. -Rule Para 1997 only - Feb lastSun 0:00 0 - -# Shanks & Pottenger say 1999-02-28; IATA SSIM (1999-02) says 1999-02-27, but -# (1999-09) reports no date; go with above sources and Gerd Knops (2001-02-27). -Rule Para 1998 2001 - Mar Sun>=1 0:00 0 - -# From Rives McDow (2002-02-28): -# A decree was issued in Paraguay (No. 16350) on 2002-02-26 that changed the -# dst method to be from the first Sunday in September to the first Sunday in -# April. -Rule Para 2002 2004 - Apr Sun>=1 0:00 0 - -Rule Para 2002 2003 - Sep Sun>=1 0:00 1:00 S -# -# From Jesper Nørgaard Welen (2005-01-02): -# There are several sources that claim that Paraguay made -# a timezone rule change in autumn 2004. -# From Steffen Thorsen (2005-01-05): -# Decree 1,867 (2004-03-05) -# From Carlos Raúl Perasso via Jesper Nørgaard Welen (2006-10-13) -# http://www.presidencia.gov.py/decretos/D1867.pdf -Rule Para 2004 2009 - Oct Sun>=15 0:00 1:00 S -Rule Para 2005 2009 - Mar Sun>=8 0:00 0 - -# From Carlos Raúl Perasso (2010-02-18): -# By decree number 3958 issued yesterday -# http://www.presidencia.gov.py/v1/wp-content/uploads/2010/02/decreto3958.pdf -# Paraguay changes its DST schedule, postponing the March rule to April and -# modifying the October date. The decree reads: -# ... -# Art. 1. It is hereby established that from the second Sunday of the month of -# April of this year (2010), the official time is to be set back 60 minutes, -# and that on the first Sunday of the month of October, it is to be set -# forward 60 minutes, in all the territory of the Paraguayan Republic. -# ... -Rule Para 2010 max - Oct Sun>=1 0:00 1:00 S -Rule Para 2010 2012 - Apr Sun>=8 0:00 0 - -# -# From Steffen Thorsen (2013-03-07): -# Paraguay will end DST on 2013-03-24 00:00.... -# http://www.ande.gov.py/interna.php?id=1075 -# -# From Carlos Raúl Perasso (2013-03-15): -# The change in Paraguay is now final. Decree number 10780 -# http://www.presidencia.gov.py/uploads/pdf/presidencia-3b86ff4b691c79d4f5927ca964922ec74772ce857c02ca054a52a37b49afc7fb.pdf -# From Carlos Raúl Perasso (2014-02-28): -# Decree 1264 can be found at: -# http://www.presidencia.gov.py/archivos/documentos/DECRETO1264_ey9r8zai.pdf -Rule Para 2013 max - Mar Sun>=22 0:00 0 - - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Asuncion -3:50:40 - LMT 1890 - -3:50:40 - AMT 1931 Oct 10 # Asunción Mean Time - -4:00 - -04 1972 Oct - -3:00 - -03 1974 Apr - -4:00 Para -04/-03 - -# Peru -# -# From Evelyn C. Leeper via Mark Brader (2003-10-26) -# : -# When we were in Peru in 1985-1986, they apparently switched over -# sometime between December 29 and January 3 while we were on the Amazon. -# -# From Paul Eggert (2006-03-22): -# Shanks & Pottenger don't have this transition. Assume 1986 was like 1987. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule Peru 1938 only - Jan 1 0:00 1:00 S -Rule Peru 1938 only - Apr 1 0:00 0 - -Rule Peru 1938 1939 - Sep lastSun 0:00 1:00 S -Rule Peru 1939 1940 - Mar Sun>=24 0:00 0 - -Rule Peru 1986 1987 - Jan 1 0:00 1:00 S -Rule Peru 1986 1987 - Apr 1 0:00 0 - -Rule Peru 1990 only - Jan 1 0:00 1:00 S -Rule Peru 1990 only - Apr 1 0:00 0 - -# IATA is ambiguous for 1993/1995; go with Shanks & Pottenger. -Rule Peru 1994 only - Jan 1 0:00 1:00 S -Rule Peru 1994 only - Apr 1 0:00 0 - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Lima -5:08:12 - LMT 1890 - -5:08:36 - LMT 1908 Jul 28 # Lima Mean Time? - -5:00 Peru -05/-04 - -# South Georgia -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone Atlantic/South_Georgia -2:26:08 - LMT 1890 # Grytviken - -2:00 - -02 - -# South Sandwich Is -# uninhabited; scientific personnel have wintered - -# Suriname -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Paramaribo -3:40:40 - LMT 1911 - -3:40:52 - PMT 1935 # Paramaribo Mean Time - -3:40:36 - PMT 1945 Oct # The capital moved? - -3:30 - -0330 1984 Oct - -3:00 - -03 - -# Trinidad and Tobago -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Port_of_Spain -4:06:04 - LMT 1912 Mar 2 - -4:00 - AST - -# These all agree with Trinidad and Tobago since 1970. -Link America/Port_of_Spain America/Anguilla -Link America/Port_of_Spain America/Antigua -Link America/Port_of_Spain America/Dominica -Link America/Port_of_Spain America/Grenada -Link America/Port_of_Spain America/Guadeloupe -Link America/Port_of_Spain America/Marigot # St Martin (French part) -Link America/Port_of_Spain America/Montserrat -Link America/Port_of_Spain America/St_Barthelemy # St Barthélemy -Link America/Port_of_Spain America/St_Kitts # St Kitts & Nevis -Link America/Port_of_Spain America/St_Lucia -Link America/Port_of_Spain America/St_Thomas # Virgin Islands (US) -Link America/Port_of_Spain America/St_Vincent -Link America/Port_of_Spain America/Tortola # Virgin Islands (UK) - -# Uruguay -# From Paul Eggert (1993-11-18): -# Uruguay wins the prize for the strangest peacetime manipulation of the rules. -# From Shanks & Pottenger: -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -# Whitman gives 1923 Oct 1; go with Shanks & Pottenger. -Rule Uruguay 1923 only - Oct 2 0:00 0:30 HS -Rule Uruguay 1924 1926 - Apr 1 0:00 0 - -Rule Uruguay 1924 1925 - Oct 1 0:00 0:30 HS -Rule Uruguay 1933 1935 - Oct lastSun 0:00 0:30 HS -# Shanks & Pottenger give 1935 Apr 1 0:00 & 1936 Mar 30 0:00; go with Whitman. -Rule Uruguay 1934 1936 - Mar Sat>=25 23:30s 0 - -Rule Uruguay 1936 only - Nov 1 0:00 0:30 HS -Rule Uruguay 1937 1941 - Mar lastSun 0:00 0 - -# Whitman gives 1937 Oct 3; go with Shanks & Pottenger. -Rule Uruguay 1937 1940 - Oct lastSun 0:00 0:30 HS -# Whitman gives 1941 Oct 24 - 1942 Mar 27, 1942 Dec 14 - 1943 Apr 13, -# and 1943 Apr 13 "to present time"; go with Shanks & Pottenger. -Rule Uruguay 1941 only - Aug 1 0:00 0:30 HS -Rule Uruguay 1942 only - Jan 1 0:00 0 - -Rule Uruguay 1942 only - Dec 14 0:00 1:00 S -Rule Uruguay 1943 only - Mar 14 0:00 0 - -Rule Uruguay 1959 only - May 24 0:00 1:00 S -Rule Uruguay 1959 only - Nov 15 0:00 0 - -Rule Uruguay 1960 only - Jan 17 0:00 1:00 S -Rule Uruguay 1960 only - Mar 6 0:00 0 - -Rule Uruguay 1965 1967 - Apr Sun>=1 0:00 1:00 S -Rule Uruguay 1965 only - Sep 26 0:00 0 - -Rule Uruguay 1966 1967 - Oct 31 0:00 0 - -Rule Uruguay 1968 1970 - May 27 0:00 0:30 HS -Rule Uruguay 1968 1970 - Dec 2 0:00 0 - -Rule Uruguay 1972 only - Apr 24 0:00 1:00 S -Rule Uruguay 1972 only - Aug 15 0:00 0 - -Rule Uruguay 1974 only - Mar 10 0:00 0:30 HS -Rule Uruguay 1974 only - Dec 22 0:00 1:00 S -Rule Uruguay 1976 only - Oct 1 0:00 0 - -Rule Uruguay 1977 only - Dec 4 0:00 1:00 S -Rule Uruguay 1978 only - Apr 1 0:00 0 - -Rule Uruguay 1979 only - Oct 1 0:00 1:00 S -Rule Uruguay 1980 only - May 1 0:00 0 - -Rule Uruguay 1987 only - Dec 14 0:00 1:00 S -Rule Uruguay 1988 only - Mar 14 0:00 0 - -Rule Uruguay 1988 only - Dec 11 0:00 1:00 S -Rule Uruguay 1989 only - Mar 12 0:00 0 - -Rule Uruguay 1989 only - Oct 29 0:00 1:00 S -# Shanks & Pottenger say no DST was observed in 1990/1 and 1991/2, -# and that 1992/3's DST was from 10-25 to 03-01. Go with IATA. -Rule Uruguay 1990 1992 - Mar Sun>=1 0:00 0 - -Rule Uruguay 1990 1991 - Oct Sun>=21 0:00 1:00 S -Rule Uruguay 1992 only - Oct 18 0:00 1:00 S -Rule Uruguay 1993 only - Feb 28 0:00 0 - -# From Eduardo Cota (2004-09-20): -# The Uruguayan government has decreed a change in the local time.... -# http://www.presidencia.gub.uy/decretos/2004091502.htm -Rule Uruguay 2004 only - Sep 19 0:00 1:00 S -# From Steffen Thorsen (2005-03-11): -# Uruguay's DST was scheduled to end on Sunday, 2005-03-13, but in order to -# save energy ... it was postponed two weeks.... -# http://www.presidencia.gub.uy/_Web/noticias/2005/03/2005031005.htm -Rule Uruguay 2005 only - Mar 27 2:00 0 - -# From Eduardo Cota (2005-09-27): -# http://www.presidencia.gub.uy/_Web/decretos/2005/09/CM%20119_09%2009%202005_00001.PDF -# This means that from 2005-10-09 at 02:00 local time, until 2006-03-12 at -# 02:00 local time, official time in Uruguay will be at GMT -2. -Rule Uruguay 2005 only - Oct 9 2:00 1:00 S -Rule Uruguay 2006 only - Mar 12 2:00 0 - -# From Jesper Nørgaard Welen (2006-09-06): -# http://www.presidencia.gub.uy/_web/decretos/2006/09/CM%20210_08%2006%202006_00001.PDF -# -# From Steffen Thorsen (2015-06-30): -# ... it looks like they will not be using DST the coming summer: -# http://www.elobservador.com.uy/gobierno-resolvio-que-no-habra-cambio-horario-verano-n656787 -# http://www.republica.com.uy/este-ano-no-se-modificara-el-huso-horario-en-uruguay/523760/ -# From Paul Eggert (2015-06-30): -# Apparently restaurateurs complained that DST caused people to go to the beach -# instead of out to dinner. -# From Pablo Camargo (2015-07-13): -# http://archivo.presidencia.gub.uy/sci/decretos/2015/06/cons_min_201.pdf -# [dated 2015-06-29; repeals Decree 311/006 dated 2006-09-04] -Rule Uruguay 2006 2014 - Oct Sun>=1 2:00 1:00 S -Rule Uruguay 2007 2015 - Mar Sun>=8 2:00 0 - - -# This Zone can be simplified once we assume zic %z. -Zone America/Montevideo -3:44:44 - LMT 1898 Jun 28 - -3:44:44 - MMT 1920 May 1 # Montevideo MT - -3:30 Uruguay -0330/-03 1942 Dec 14 - -3:00 Uruguay -03/-02 1968 - -3:00 Uruguay -03/-0230 1971 - -3:00 Uruguay -03/-02 1974 - -3:00 Uruguay -03/-0230 1974 Dec 22 - -3:00 Uruguay -03/-02 - -# Venezuela -# -# From Paul Eggert (2015-07-28): -# For the 1965 transition see Gaceta Oficial No. 27.619 (1964-12-15), p 205.533 -# http://www.pgr.gob.ve/dmdocuments/1964/27619.pdf -# -# From John Stainforth (2007-11-28): -# ... the change for Venezuela originally expected for 2007-12-31 has -# been brought forward to 2007-12-09. The official announcement was -# published today in the "Gaceta Oficial de la República Bolivariana -# de Venezuela, número 38.819" (official document for all laws or -# resolution publication) -# http://www.globovision.com/news.php?nid=72208 - -# From Alexander Krivenyshev (2016-04-15): -# https://actualidad.rt.com/actualidad/204758-venezuela-modificar-huso-horario-sequia-elnino -# -# From Paul Eggert (2016-04-15): -# Clocks advance 30 minutes on 2016-05-01 at 02:30.... -# "'Venezuela's new time-zone: hours without light, hours without water, -# hours of presidential broadcasts, hours of lines,' quipped comedian -# Jean Mary Curró ...". See: Cawthorne A, Kai D. Venezuela scraps -# half-hour time difference set by Chavez. Reuters 2016-04-15 14:50 -0400 -# http://www.reuters.com/article/us-venezuela-timezone-idUSKCN0XC2BE -# -# From Matt Johnson (2016-04-20): -# ... published in the official Gazette [2016-04-18], here: -# http://historico.tsj.gob.ve/gaceta_ext/abril/1842016/E-1842016-4551.pdf - -# Zone NAME GMTOFF RULES FORMAT [UNTIL] -Zone America/Caracas -4:27:44 - LMT 1890 - -4:27:40 - CMT 1912 Feb 12 # Caracas Mean Time? - -4:30 - -0430 1965 Jan 1 0:00 - -4:00 - -04 2007 Dec 9 3:00 - -4:30 - -0430 2016 May 1 2:30 - -4:00 - -04 diff --git a/src/timezone/data/systemv b/src/timezone/data/systemv deleted file mode 100644 index d9e2995756..0000000000 --- a/src/timezone/data/systemv +++ /dev/null @@ -1,37 +0,0 @@ -# This file is in the public domain, so clarified as of -# 2009-05-17 by Arthur David Olson. - -# Old rules, should the need arise. -# No attempt is made to handle Newfoundland, since it cannot be expressed -# using the System V "TZ" scheme (half-hour offset), or anything outside -# North America (no support for non-standard DST start/end dates), nor -# the changes in the DST rules in the US after 1976 (which occurred after -# the old rules were written). -# -# If you need the old rules, uncomment ## lines. -# Compile this *without* leap second correction for true conformance. - -# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S -Rule SystemV min 1973 - Apr lastSun 2:00 1:00 D -Rule SystemV min 1973 - Oct lastSun 2:00 0 S -Rule SystemV 1974 only - Jan 6 2:00 1:00 D -Rule SystemV 1974 only - Nov lastSun 2:00 0 S -Rule SystemV 1975 only - Feb 23 2:00 1:00 D -Rule SystemV 1975 only - Oct lastSun 2:00 0 S -Rule SystemV 1976 max - Apr lastSun 2:00 1:00 D -Rule SystemV 1976 max - Oct lastSun 2:00 0 S - -# Zone NAME GMTOFF RULES/SAVE FORMAT [UNTIL] -## Zone SystemV/AST4ADT -4:00 SystemV A%sT -## Zone SystemV/EST5EDT -5:00 SystemV E%sT -## Zone SystemV/CST6CDT -6:00 SystemV C%sT -## Zone SystemV/MST7MDT -7:00 SystemV M%sT -## Zone SystemV/PST8PDT -8:00 SystemV P%sT -## Zone SystemV/YST9YDT -9:00 SystemV Y%sT -## Zone SystemV/AST4 -4:00 - AST -## Zone SystemV/EST5 -5:00 - EST -## Zone SystemV/CST6 -6:00 - CST -## Zone SystemV/MST7 -7:00 - MST -## Zone SystemV/PST8 -8:00 - PST -## Zone SystemV/YST9 -9:00 - YST -## Zone SystemV/HST10 -10:00 - HST diff --git a/src/timezone/data/tzdata.zi b/src/timezone/data/tzdata.zi new file mode 100644 index 0000000000..21cccc4657 --- /dev/null +++ b/src/timezone/data/tzdata.zi @@ -0,0 +1,4177 @@ +# version 2018g +# This zic input file is in the public domain. +R d 1916 o - Jun 14 23s 1 S +R d 1916 1919 - O Sun>=1 23s 0 - +R d 1917 o - Mar 24 23s 1 S +R d 1918 o - Mar 9 23s 1 S +R d 1919 o - Mar 1 23s 1 S +R d 1920 o - F 14 23s 1 S +R d 1920 o - O 23 23s 0 - +R d 1921 o - Mar 14 23s 1 S +R d 1921 o - Jun 21 23s 0 - +R d 1939 o - S 11 23s 1 S +R d 1939 o - N 19 1 0 - +R d 1944 1945 - Ap M>=1 2 1 S +R d 1944 o - O 8 2 0 - +R d 1945 o - S 16 1 0 - +R d 1971 o - Ap 25 23s 1 S +R d 1971 o - S 26 23s 0 - +R d 1977 o - May 6 0 1 S +R d 1977 o - O 21 0 0 - +R d 1978 o - Mar 24 1 1 S +R d 1978 o - S 22 3 0 - +R d 1980 o - Ap 25 0 1 S +R d 1980 o - O 31 2 0 - +Z Africa/Algiers 0:12:12 - LMT 1891 Mar 15 0:1 +0:9:21 - PMT 1911 Mar 11 +0 d WE%sT 1940 F 25 2 +1 d CE%sT 1946 O 7 +0 - WET 1956 Ja 29 +1 - CET 1963 Ap 14 +0 d WE%sT 1977 O 21 +1 d CE%sT 1979 O 26 +0 d WE%sT 1981 May +1 - CET +Z Atlantic/Cape_Verde -1:34:4 - LMT 1912 Ja 1 2u +-2 - -02 1942 S +-2 1 -01 1945 O 15 +-2 - -02 1975 N 25 2 +-1 - -01 +Z Africa/Ndjamena 1:0:12 - LMT 1912 +1 - WAT 1979 O 14 +1 1 WAST 1980 Mar 8 +1 - WAT +Z Africa/Abidjan -0:16:8 - LMT 1912 +0 - GMT +Li Africa/Abidjan Africa/Bamako +Li Africa/Abidjan Africa/Banjul +Li Africa/Abidjan Africa/Conakry +Li Africa/Abidjan Africa/Dakar +Li Africa/Abidjan Africa/Freetown +Li Africa/Abidjan Africa/Lome +Li Africa/Abidjan Africa/Nouakchott +Li Africa/Abidjan Africa/Ouagadougou +Li Africa/Abidjan Atlantic/St_Helena +R K 1940 o - Jul 15 0 1 S +R K 1940 o - O 1 0 0 - +R K 1941 o - Ap 15 0 1 S +R K 1941 o - S 16 0 0 - +R K 1942 1944 - Ap 1 0 1 S +R K 1942 o - O 27 0 0 - +R K 1943 1945 - N 1 0 0 - +R K 1945 o - Ap 16 0 1 S +R K 1957 o - May 10 0 1 S +R K 1957 1958 - O 1 0 0 - +R K 1958 o - May 1 0 1 S +R K 1959 1981 - May 1 1 1 S +R K 1959 1965 - S 30 3 0 - +R K 1966 1994 - O 1 3 0 - +R K 1982 o - Jul 25 1 1 S +R K 1983 o - Jul 12 1 1 S +R K 1984 1988 - May 1 1 1 S +R K 1989 o - May 6 1 1 S +R K 1990 1994 - May 1 1 1 S +R K 1995 2010 - Ap lastF 0s 1 S +R K 1995 2005 - S lastTh 24 0 - +R K 2006 o - S 21 24 0 - +R K 2007 o - S Th>=1 24 0 - +R K 2008 o - Au lastTh 24 0 - +R K 2009 o - Au 20 24 0 - +R K 2010 o - Au 10 24 0 - +R K 2010 o - S 9 24 1 S +R K 2010 o - S lastTh 24 0 - +R K 2014 o - May 15 24 1 S +R K 2014 o - Jun 26 24 0 - +R K 2014 o - Jul 31 24 1 S +R K 2014 o - S lastTh 24 0 - +Z Africa/Cairo 2:5:9 - LMT 1900 O +2 K EE%sT +R GH 1920 1942 - S 1 0 0:20 - +R GH 1920 1942 - D 31 0 0 - +Z Africa/Accra -0:0:52 - LMT 1918 +0 GH GMT/+0020 +Z Africa/Bissau -1:2:20 - LMT 1912 Ja 1 1u +-1 - -01 1975 +0 - GMT +Z Africa/Nairobi 2:27:16 - LMT 1928 Jul +3 - EAT 1930 +2:30 - +0230 1940 +2:45 - +0245 1960 +3 - EAT +Li Africa/Nairobi Africa/Addis_Ababa +Li Africa/Nairobi Africa/Asmara +Li Africa/Nairobi Africa/Dar_es_Salaam +Li Africa/Nairobi Africa/Djibouti +Li Africa/Nairobi Africa/Kampala +Li Africa/Nairobi Africa/Mogadishu +Li Africa/Nairobi Indian/Antananarivo +Li Africa/Nairobi Indian/Comoro +Li Africa/Nairobi Indian/Mayotte +Z Africa/Monrovia -0:43:8 - LMT 1882 +-0:43:8 - MMT 1919 Mar +-0:44:30 - MMT 1972 Ja 7 +0 - GMT +R L 1951 o - O 14 2 1 S +R L 1952 o - Ja 1 0 0 - +R L 1953 o - O 9 2 1 S +R L 1954 o - Ja 1 0 0 - +R L 1955 o - S 30 0 1 S +R L 1956 o - Ja 1 0 0 - +R L 1982 1984 - Ap 1 0 1 S +R L 1982 1985 - O 1 0 0 - +R L 1985 o - Ap 6 0 1 S +R L 1986 o - Ap 4 0 1 S +R L 1986 o - O 3 0 0 - +R L 1987 1989 - Ap 1 0 1 S +R L 1987 1989 - O 1 0 0 - +R L 1997 o - Ap 4 0 1 S +R L 1997 o - O 4 0 0 - +R L 2013 o - Mar lastF 1 1 S +R L 2013 o - O lastF 2 0 - +Z Africa/Tripoli 0:52:44 - LMT 1920 +1 L CE%sT 1959 +2 - EET 1982 +1 L CE%sT 1990 May 4 +2 - EET 1996 S 30 +1 L CE%sT 1997 O 4 +2 - EET 2012 N 10 2 +1 L CE%sT 2013 O 25 2 +2 - EET +R MU 1982 o - O 10 0 1 - +R MU 1983 o - Mar 21 0 0 - +R MU 2008 o - O lastSun 2 1 - +R MU 2009 o - Mar lastSun 2 0 - +Z Indian/Mauritius 3:50 - LMT 1907 +4 MU +04/+05 +R M 1939 o - S 12 0 1 - +R M 1939 o - N 19 0 0 - +R M 1940 o - F 25 0 1 - +R M 1945 o - N 18 0 0 - +R M 1950 o - Jun 11 0 1 - +R M 1950 o - O 29 0 0 - +R M 1967 o - Jun 3 12 1 - +R M 1967 o - O 1 0 0 - +R M 1974 o - Jun 24 0 1 - +R M 1974 o - S 1 0 0 - +R M 1976 1977 - May 1 0 1 - +R M 1976 o - Au 1 0 0 - +R M 1977 o - S 28 0 0 - +R M 1978 o - Jun 1 0 1 - +R M 1978 o - Au 4 0 0 - +R M 2008 o - Jun 1 0 1 - +R M 2008 o - S 1 0 0 - +R M 2009 o - Jun 1 0 1 - +R M 2009 o - Au 21 0 0 - +R M 2010 o - May 2 0 1 - +R M 2010 o - Au 8 0 0 - +R M 2011 o - Ap 3 0 1 - +R M 2011 o - Jul 31 0 0 - +R M 2012 2013 - Ap lastSun 2 1 - +R M 2012 o - Jul 20 3 0 - +R M 2012 o - Au 20 2 1 - +R M 2012 o - S 30 3 0 - +R M 2013 o - Jul 7 3 0 - +R M 2013 o - Au 10 2 1 - +R M 2013 2018 - O lastSun 3 0 - +R M 2014 2018 - Mar lastSun 2 1 - +R M 2014 o - Jun 28 3 0 - +R M 2014 o - Au 2 2 1 - +R M 2015 o - Jun 14 3 0 - +R M 2015 o - Jul 19 2 1 - +R M 2016 o - Jun 5 3 0 - +R M 2016 o - Jul 10 2 1 - +R M 2017 o - May 21 3 0 - +R M 2017 o - Jul 2 2 1 - +R M 2018 o - May 13 3 0 - +R M 2018 o - Jun 17 2 1 - +Z Africa/Casablanca -0:30:20 - LMT 1913 O 26 +0 M +00/+01 1984 Mar 16 +1 - +01 1986 +0 M +00/+01 2018 O 27 +1 - +01 +Z Africa/El_Aaiun -0:52:48 - LMT 1934 +-1 - -01 1976 Ap 14 +0 M +00/+01 2018 O 27 +1 - +01 +Z Africa/Maputo 2:10:20 - LMT 1903 Mar +2 - CAT +Li Africa/Maputo Africa/Blantyre +Li Africa/Maputo Africa/Bujumbura +Li Africa/Maputo Africa/Gaborone +Li Africa/Maputo Africa/Harare +Li Africa/Maputo Africa/Kigali +Li Africa/Maputo Africa/Lubumbashi +Li Africa/Maputo Africa/Lusaka +R NA 1994 o - Mar 21 0 -1 WAT +R NA 1994 2017 - S Sun>=1 2 0 CAT +R NA 1995 2017 - Ap Sun>=1 2 -1 WAT +Z Africa/Windhoek 1:8:24 - LMT 1892 F 8 +1:30 - +0130 1903 Mar +2 - SAST 1942 S 20 2 +2 1 SAST 1943 Mar 21 2 +2 - SAST 1990 Mar 21 +2 NA %s +Z Africa/Lagos 0:13:36 - LMT 1919 S +1 - WAT +Li Africa/Lagos Africa/Bangui +Li Africa/Lagos Africa/Brazzaville +Li Africa/Lagos Africa/Douala +Li Africa/Lagos Africa/Kinshasa +Li Africa/Lagos Africa/Libreville +Li Africa/Lagos Africa/Luanda +Li Africa/Lagos Africa/Malabo +Li Africa/Lagos Africa/Niamey +Li Africa/Lagos Africa/Porto-Novo +Z Indian/Reunion 3:41:52 - LMT 1911 Jun +4 - +04 +Z Africa/Sao_Tome 0:26:56 - LMT 1884 +-0:36:45 - LMT 1912 Ja 1 0u +0 - GMT 2018 Ja 1 1 +1 - WAT +Z Indian/Mahe 3:41:48 - LMT 1906 Jun +4 - +04 +R SA 1942 1943 - S Sun>=15 2 1 - +R SA 1943 1944 - Mar Sun>=15 2 0 - +Z Africa/Johannesburg 1:52 - LMT 1892 F 8 +1:30 - SAST 1903 Mar +2 SA SAST +Li Africa/Johannesburg Africa/Maseru +Li Africa/Johannesburg Africa/Mbabane +R SD 1970 o - May 1 0 1 S +R SD 1970 1985 - O 15 0 0 - +R SD 1971 o - Ap 30 0 1 S +R SD 1972 1985 - Ap lastSun 0 1 S +Z Africa/Khartoum 2:10:8 - LMT 1931 +2 SD CA%sT 2000 Ja 15 12 +3 - EAT 2017 N +2 - CAT +Z Africa/Juba 2:6:28 - LMT 1931 +2 SD CA%sT 2000 Ja 15 12 +3 - EAT +R n 1939 o - Ap 15 23s 1 S +R n 1939 o - N 18 23s 0 - +R n 1940 o - F 25 23s 1 S +R n 1941 o - O 6 0 0 - +R n 1942 o - Mar 9 0 1 S +R n 1942 o - N 2 3 0 - +R n 1943 o - Mar 29 2 1 S +R n 1943 o - Ap 17 2 0 - +R n 1943 o - Ap 25 2 1 S +R n 1943 o - O 4 2 0 - +R n 1944 1945 - Ap M>=1 2 1 S +R n 1944 o - O 8 0 0 - +R n 1945 o - S 16 0 0 - +R n 1977 o - Ap 30 0s 1 S +R n 1977 o - S 24 0s 0 - +R n 1978 o - May 1 0s 1 S +R n 1978 o - O 1 0s 0 - +R n 1988 o - Jun 1 0s 1 S +R n 1988 1990 - S lastSun 0s 0 - +R n 1989 o - Mar 26 0s 1 S +R n 1990 o - May 1 0s 1 S +R n 2005 o - May 1 0s 1 S +R n 2005 o - S 30 1s 0 - +R n 2006 2008 - Mar lastSun 2s 1 S +R n 2006 2008 - O lastSun 2s 0 - +Z Africa/Tunis 0:40:44 - LMT 1881 May 12 +0:9:21 - PMT 1911 Mar 11 +1 n CE%sT +Z Antarctica/Casey 0 - -00 1969 +8 - +08 2009 O 18 2 +11 - +11 2010 Mar 5 2 +8 - +08 2011 O 28 2 +11 - +11 2012 F 21 17u +8 - +08 2016 O 22 +11 - +11 2018 Mar 11 4 +8 - +08 +Z Antarctica/Davis 0 - -00 1957 Ja 13 +7 - +07 1964 N +0 - -00 1969 F +7 - +07 2009 O 18 2 +5 - +05 2010 Mar 10 20u +7 - +07 2011 O 28 2 +5 - +05 2012 F 21 20u +7 - +07 +Z Antarctica/Mawson 0 - -00 1954 F 13 +6 - +06 2009 O 18 2 +5 - +05 +Z Indian/Kerguelen 0 - -00 1950 +5 - +05 +Z Antarctica/DumontDUrville 0 - -00 1947 +10 - +10 1952 Ja 14 +0 - -00 1956 N +10 - +10 +Z Antarctica/Syowa 0 - -00 1957 Ja 29 +3 - +03 +R Tr 2005 ma - Mar lastSun 1u 2 +02 +R Tr 2004 ma - O lastSun 1u 0 +00 +Z Antarctica/Troll 0 - -00 2005 F 12 +0 Tr %s +Z Antarctica/Vostok 0 - -00 1957 D 16 +6 - +06 +Z Antarctica/Rothera 0 - -00 1976 D +-3 - -03 +Z Asia/Kabul 4:36:48 - LMT 1890 +4 - +04 1945 +4:30 - +0430 +R AM 2011 o - Mar lastSun 2s 1 - +R AM 2011 o - O lastSun 2s 0 - +Z Asia/Yerevan 2:58 - LMT 1924 May 2 +3 - +03 1957 Mar +4 R +04/+05 1991 Mar 31 2s +3 R +03/+04 1995 S 24 2s +4 - +04 1997 +4 R +04/+05 2011 +4 AM +04/+05 +R AZ 1997 2015 - Mar lastSun 4 1 - +R AZ 1997 2015 - O lastSun 5 0 - +Z Asia/Baku 3:19:24 - LMT 1924 May 2 +3 - +03 1957 Mar +4 R +04/+05 1991 Mar 31 2s +3 R +03/+04 1992 S lastSun 2s +4 - +04 1996 +4 E +04/+05 1997 +4 AZ +04/+05 +R BD 2009 o - Jun 19 23 1 - +R BD 2009 o - D 31 24 0 - +Z Asia/Dhaka 6:1:40 - LMT 1890 +5:53:20 - HMT 1941 O +6:30 - +0630 1942 May 15 +5:30 - +0530 1942 S +6:30 - +0630 1951 S 30 +6 - +06 2009 +6 BD +06/+07 +Z Asia/Thimphu 5:58:36 - LMT 1947 Au 15 +5:30 - +0530 1987 O +6 - +06 +Z Indian/Chagos 4:49:40 - LMT 1907 +5 - +05 1996 +6 - +06 +Z Asia/Brunei 7:39:40 - LMT 1926 Mar +7:30 - +0730 1933 +8 - +08 +Z Asia/Yangon 6:24:47 - LMT 1880 +6:24:47 - RMT 1920 +6:30 - +0630 1942 May +9 - +09 1945 May 3 +6:30 - +0630 +R Sh 1940 o - Jun 1 0 1 D +R Sh 1940 o - O 12 24 0 S +R Sh 1941 o - Mar 15 0 1 D +R Sh 1941 o - N 1 24 0 S +R Sh 1942 o - Ja 31 0 1 D +R Sh 1945 o - S 1 24 0 S +R Sh 1946 o - May 15 0 1 D +R Sh 1946 o - S 30 24 0 S +R Sh 1947 o - Ap 15 0 1 D +R Sh 1947 o - O 31 24 0 S +R Sh 1948 1949 - May 1 0 1 D +R Sh 1948 1949 - S 30 24 0 S +R CN 1986 o - May 4 2 1 D +R CN 1986 1991 - S Sun>=11 2 0 S +R CN 1987 1991 - Ap Sun>=11 2 1 D +Z Asia/Shanghai 8:5:43 - LMT 1901 +8 Sh C%sT 1949 May 28 +8 CN C%sT +Z Asia/Urumqi 5:50:20 - LMT 1928 +6 - +06 +R HK 1941 o - Ap 1 3:30 1 S +R HK 1941 o - S 30 3:30 0 - +R HK 1946 o - Ap 20 3:30 1 S +R HK 1946 o - D 1 3:30 0 - +R HK 1947 o - Ap 13 3:30 1 S +R HK 1947 o - D 30 3:30 0 - +R HK 1948 o - May 2 3:30 1 S +R HK 1948 1951 - O lastSun 3:30 0 - +R HK 1952 o - O 25 3:30 0 - +R HK 1949 1953 - Ap Sun>=1 3:30 1 S +R HK 1953 o - N 1 3:30 0 - +R HK 1954 1964 - Mar Sun>=18 3:30 1 S +R HK 1954 o - O 31 3:30 0 - +R HK 1955 1964 - N Sun>=1 3:30 0 - +R HK 1965 1976 - Ap Sun>=16 3:30 1 S +R HK 1965 1976 - O Sun>=16 3:30 0 - +R HK 1973 o - D 30 3:30 1 S +R HK 1979 o - May Sun>=8 3:30 1 S +R HK 1979 o - O Sun>=16 3:30 0 - +Z Asia/Hong_Kong 7:36:42 - LMT 1904 O 30 +8 HK HK%sT 1941 D 25 +9 - JST 1945 S 15 +8 HK HK%sT +R f 1946 o - May 15 0 1 D +R f 1946 o - O 1 0 0 S +R f 1947 o - Ap 15 0 1 D +R f 1947 o - N 1 0 0 S +R f 1948 1951 - May 1 0 1 D +R f 1948 1951 - O 1 0 0 S +R f 1952 o - Mar 1 0 1 D +R f 1952 1954 - N 1 0 0 S +R f 1953 1959 - Ap 1 0 1 D +R f 1955 1961 - O 1 0 0 S +R f 1960 1961 - Jun 1 0 1 D +R f 1974 1975 - Ap 1 0 1 D +R f 1974 1975 - O 1 0 0 S +R f 1979 o - Jul 1 0 1 D +R f 1979 o - O 1 0 0 S +Z Asia/Taipei 8:6 - LMT 1896 +8 - CST 1937 O +9 - JST 1945 S 21 1 +8 f C%sT +R _ 1942 1943 - Ap 30 23 1 - +R _ 1942 o - N 17 23 0 - +R _ 1943 o - S 30 23 0 S +R _ 1946 o - Ap 30 23s 1 D +R _ 1946 o - S 30 23s 0 S +R _ 1947 o - Ap 19 23s 1 D +R _ 1947 o - N 30 23s 0 S +R _ 1948 o - May 2 23s 1 D +R _ 1948 o - O 31 23s 0 S +R _ 1949 1950 - Ap Sat>=1 23s 1 D +R _ 1949 1950 - O lastSat 23s 0 S +R _ 1951 o - Mar 31 23s 1 D +R _ 1951 o - O 28 23s 0 S +R _ 1952 1953 - Ap Sat>=1 23s 1 D +R _ 1952 o - N 1 23s 0 S +R _ 1953 1954 - O lastSat 23s 0 S +R _ 1954 1956 - Mar Sat>=17 23s 1 D +R _ 1955 o - N 5 23s 0 S +R _ 1956 1964 - N Sun>=1 3:30 0 S +R _ 1957 1964 - Mar Sun>=18 3:30 1 D +R _ 1965 1973 - Ap Sun>=16 3:30 1 D +R _ 1965 1966 - O Sun>=16 2:30 0 S +R _ 1967 1976 - O Sun>=16 3:30 0 S +R _ 1973 o - D 30 3:30 1 D +R _ 1975 1976 - Ap Sun>=16 3:30 1 D +R _ 1979 o - May 13 3:30 1 D +R _ 1979 o - O Sun>=16 3:30 0 S +Z Asia/Macau 7:34:10 - LMT 1904 O 30 +8 - CST 1941 D 21 23 +9 _ +09/+10 1945 S 30 24 +8 _ C%sT +R CY 1975 o - Ap 13 0 1 S +R CY 1975 o - O 12 0 0 - +R CY 1976 o - May 15 0 1 S +R CY 1976 o - O 11 0 0 - +R CY 1977 1980 - Ap Sun>=1 0 1 S +R CY 1977 o - S 25 0 0 - +R CY 1978 o - O 2 0 0 - +R CY 1979 1997 - S lastSun 0 0 - +R CY 1981 1998 - Mar lastSun 0 1 S +Z Asia/Nicosia 2:13:28 - LMT 1921 N 14 +2 CY EE%sT 1998 S +2 E EE%sT +Z Asia/Famagusta 2:15:48 - LMT 1921 N 14 +2 CY EE%sT 1998 S +2 E EE%sT 2016 S 8 +3 - +03 2017 O 29 1u +2 E EE%sT +Li Asia/Nicosia Europe/Nicosia +Z Asia/Tbilisi 2:59:11 - LMT 1880 +2:59:11 - TBMT 1924 May 2 +3 - +03 1957 Mar +4 R +04/+05 1991 Mar 31 2s +3 R +03/+04 1992 +3 e +03/+04 1994 S lastSun +4 e +04/+05 1996 O lastSun +4 1 +05 1997 Mar lastSun +4 e +04/+05 2004 Jun 27 +3 R +03/+04 2005 Mar lastSun 2 +4 - +04 +Z Asia/Dili 8:22:20 - LMT 1912 +8 - +08 1942 F 21 23 +9 - +09 1976 May 3 +8 - +08 2000 S 17 +9 - +09 +Z Asia/Kolkata 5:53:28 - LMT 1854 Jun 28 +5:53:20 - HMT 1870 +5:21:10 - MMT 1906 +5:30 - IST 1941 O +5:30 1 +0630 1942 May 15 +5:30 - IST 1942 S +5:30 1 +0630 1945 O 15 +5:30 - IST +Z Asia/Jakarta 7:7:12 - LMT 1867 Au 10 +7:7:12 - BMT 1923 D 31 23:47:12 +7:20 - +0720 1932 N +7:30 - +0730 1942 Mar 23 +9 - +09 1945 S 23 +7:30 - +0730 1948 May +8 - +08 1950 May +7:30 - +0730 1964 +7 - WIB +Z Asia/Pontianak 7:17:20 - LMT 1908 May +7:17:20 - PMT 1932 N +7:30 - +0730 1942 Ja 29 +9 - +09 1945 S 23 +7:30 - +0730 1948 May +8 - +08 1950 May +7:30 - +0730 1964 +8 - WITA 1988 +7 - WIB +Z Asia/Makassar 7:57:36 - LMT 1920 +7:57:36 - MMT 1932 N +8 - +08 1942 F 9 +9 - +09 1945 S 23 +8 - WITA +Z Asia/Jayapura 9:22:48 - LMT 1932 N +9 - +09 1944 S +9:30 - +0930 1964 +9 - WIT +R i 1978 1980 - Mar 21 0 1 - +R i 1978 o - O 21 0 0 - +R i 1979 o - S 19 0 0 - +R i 1980 o - S 23 0 0 - +R i 1991 o - May 3 0 1 - +R i 1992 1995 - Mar 22 0 1 - +R i 1991 1995 - S 22 0 0 - +R i 1996 o - Mar 21 0 1 - +R i 1996 o - S 21 0 0 - +R i 1997 1999 - Mar 22 0 1 - +R i 1997 1999 - S 22 0 0 - +R i 2000 o - Mar 21 0 1 - +R i 2000 o - S 21 0 0 - +R i 2001 2003 - Mar 22 0 1 - +R i 2001 2003 - S 22 0 0 - +R i 2004 o - Mar 21 0 1 - +R i 2004 o - S 21 0 0 - +R i 2005 o - Mar 22 0 1 - +R i 2005 o - S 22 0 0 - +R i 2008 o - Mar 21 0 1 - +R i 2008 o - S 21 0 0 - +R i 2009 2011 - Mar 22 0 1 - +R i 2009 2011 - S 22 0 0 - +R i 2012 o - Mar 21 0 1 - +R i 2012 o - S 21 0 0 - +R i 2013 2015 - Mar 22 0 1 - +R i 2013 2015 - S 22 0 0 - +R i 2016 o - Mar 21 0 1 - +R i 2016 o - S 21 0 0 - +R i 2017 2019 - Mar 22 0 1 - +R i 2017 2019 - S 22 0 0 - +R i 2020 o - Mar 21 0 1 - +R i 2020 o - S 21 0 0 - +R i 2021 2023 - Mar 22 0 1 - +R i 2021 2023 - S 22 0 0 - +R i 2024 o - Mar 21 0 1 - +R i 2024 o - S 21 0 0 - +R i 2025 2027 - Mar 22 0 1 - +R i 2025 2027 - S 22 0 0 - +R i 2028 2029 - Mar 21 0 1 - +R i 2028 2029 - S 21 0 0 - +R i 2030 2031 - Mar 22 0 1 - +R i 2030 2031 - S 22 0 0 - +R i 2032 2033 - Mar 21 0 1 - +R i 2032 2033 - S 21 0 0 - +R i 2034 2035 - Mar 22 0 1 - +R i 2034 2035 - S 22 0 0 - +R i 2036 ma - Mar 21 0 1 - +R i 2036 ma - S 21 0 0 - +Z Asia/Tehran 3:25:44 - LMT 1916 +3:25:44 - TMT 1946 +3:30 - +0330 1977 N +4 i +04/+05 1979 +3:30 i +0330/+0430 +R IQ 1982 o - May 1 0 1 - +R IQ 1982 1984 - O 1 0 0 - +R IQ 1983 o - Mar 31 0 1 - +R IQ 1984 1985 - Ap 1 0 1 - +R IQ 1985 1990 - S lastSun 1s 0 - +R IQ 1986 1990 - Mar lastSun 1s 1 - +R IQ 1991 2007 - Ap 1 3s 1 - +R IQ 1991 2007 - O 1 3s 0 - +Z Asia/Baghdad 2:57:40 - LMT 1890 +2:57:36 - BMT 1918 +3 - +03 1982 May +3 IQ +03/+04 +R Z 1940 o - Jun 1 0 1 D +R Z 1942 1944 - N 1 0 0 S +R Z 1943 o - Ap 1 2 1 D +R Z 1944 o - Ap 1 0 1 D +R Z 1945 o - Ap 16 0 1 D +R Z 1945 o - N 1 2 0 S +R Z 1946 o - Ap 16 2 1 D +R Z 1946 o - N 1 0 0 S +R Z 1948 o - May 23 0 2 DD +R Z 1948 o - S 1 0 1 D +R Z 1948 1949 - N 1 2 0 S +R Z 1949 o - May 1 0 1 D +R Z 1950 o - Ap 16 0 1 D +R Z 1950 o - S 15 3 0 S +R Z 1951 o - Ap 1 0 1 D +R Z 1951 o - N 11 3 0 S +R Z 1952 o - Ap 20 2 1 D +R Z 1952 o - O 19 3 0 S +R Z 1953 o - Ap 12 2 1 D +R Z 1953 o - S 13 3 0 S +R Z 1954 o - Jun 13 0 1 D +R Z 1954 o - S 12 0 0 S +R Z 1955 o - Jun 11 2 1 D +R Z 1955 o - S 11 0 0 S +R Z 1956 o - Jun 3 0 1 D +R Z 1956 o - S 30 3 0 S +R Z 1957 o - Ap 29 2 1 D +R Z 1957 o - S 22 0 0 S +R Z 1974 o - Jul 7 0 1 D +R Z 1974 o - O 13 0 0 S +R Z 1975 o - Ap 20 0 1 D +R Z 1975 o - Au 31 0 0 S +R Z 1985 o - Ap 14 0 1 D +R Z 1985 o - S 15 0 0 S +R Z 1986 o - May 18 0 1 D +R Z 1986 o - S 7 0 0 S +R Z 1987 o - Ap 15 0 1 D +R Z 1987 o - S 13 0 0 S +R Z 1988 o - Ap 10 0 1 D +R Z 1988 o - S 4 0 0 S +R Z 1989 o - Ap 30 0 1 D +R Z 1989 o - S 3 0 0 S +R Z 1990 o - Mar 25 0 1 D +R Z 1990 o - Au 26 0 0 S +R Z 1991 o - Mar 24 0 1 D +R Z 1991 o - S 1 0 0 S +R Z 1992 o - Mar 29 0 1 D +R Z 1992 o - S 6 0 0 S +R Z 1993 o - Ap 2 0 1 D +R Z 1993 o - S 5 0 0 S +R Z 1994 o - Ap 1 0 1 D +R Z 1994 o - Au 28 0 0 S +R Z 1995 o - Mar 31 0 1 D +R Z 1995 o - S 3 0 0 S +R Z 1996 o - Mar 15 0 1 D +R Z 1996 o - S 16 0 0 S +R Z 1997 o - Mar 21 0 1 D +R Z 1997 o - S 14 0 0 S +R Z 1998 o - Mar 20 0 1 D +R Z 1998 o - S 6 0 0 S +R Z 1999 o - Ap 2 2 1 D +R Z 1999 o - S 3 2 0 S +R Z 2000 o - Ap 14 2 1 D +R Z 2000 o - O 6 1 0 S +R Z 2001 o - Ap 9 1 1 D +R Z 2001 o - S 24 1 0 S +R Z 2002 o - Mar 29 1 1 D +R Z 2002 o - O 7 1 0 S +R Z 2003 o - Mar 28 1 1 D +R Z 2003 o - O 3 1 0 S +R Z 2004 o - Ap 7 1 1 D +R Z 2004 o - S 22 1 0 S +R Z 2005 o - Ap 1 2 1 D +R Z 2005 o - O 9 2 0 S +R Z 2006 2010 - Mar F>=26 2 1 D +R Z 2006 o - O 1 2 0 S +R Z 2007 o - S 16 2 0 S +R Z 2008 o - O 5 2 0 S +R Z 2009 o - S 27 2 0 S +R Z 2010 o - S 12 2 0 S +R Z 2011 o - Ap 1 2 1 D +R Z 2011 o - O 2 2 0 S +R Z 2012 o - Mar F>=26 2 1 D +R Z 2012 o - S 23 2 0 S +R Z 2013 ma - Mar F>=23 2 1 D +R Z 2013 ma - O lastSun 2 0 S +Z Asia/Jerusalem 2:20:54 - LMT 1880 +2:20:40 - JMT 1918 +2 Z I%sT +R JP 1948 o - May Sat>=1 24 1 D +R JP 1948 1951 - S Sat>=8 25 0 S +R JP 1949 o - Ap Sat>=1 24 1 D +R JP 1950 1951 - May Sat>=1 24 1 D +Z Asia/Tokyo 9:18:59 - LMT 1887 D 31 15u +9 JP J%sT +R J 1973 o - Jun 6 0 1 S +R J 1973 1975 - O 1 0 0 - +R J 1974 1977 - May 1 0 1 S +R J 1976 o - N 1 0 0 - +R J 1977 o - O 1 0 0 - +R J 1978 o - Ap 30 0 1 S +R J 1978 o - S 30 0 0 - +R J 1985 o - Ap 1 0 1 S +R J 1985 o - O 1 0 0 - +R J 1986 1988 - Ap F>=1 0 1 S +R J 1986 1990 - O F>=1 0 0 - +R J 1989 o - May 8 0 1 S +R J 1990 o - Ap 27 0 1 S +R J 1991 o - Ap 17 0 1 S +R J 1991 o - S 27 0 0 - +R J 1992 o - Ap 10 0 1 S +R J 1992 1993 - O F>=1 0 0 - +R J 1993 1998 - Ap F>=1 0 1 S +R J 1994 o - S F>=15 0 0 - +R J 1995 1998 - S F>=15 0s 0 - +R J 1999 o - Jul 1 0s 1 S +R J 1999 2002 - S lastF 0s 0 - +R J 2000 2001 - Mar lastTh 0s 1 S +R J 2002 2012 - Mar lastTh 24 1 S +R J 2003 o - O 24 0s 0 - +R J 2004 o - O 15 0s 0 - +R J 2005 o - S lastF 0s 0 - +R J 2006 2011 - O lastF 0s 0 - +R J 2013 o - D 20 0 0 - +R J 2014 ma - Mar lastTh 24 1 S +R J 2014 ma - O lastF 0s 0 - +Z Asia/Amman 2:23:44 - LMT 1931 +2 J EE%sT +Z Asia/Almaty 5:7:48 - LMT 1924 May 2 +5 - +05 1930 Jun 21 +6 R +06/+07 1991 Mar 31 2s +5 R +05/+06 1992 Ja 19 2s +6 R +06/+07 2004 O 31 2s +6 - +06 +Z Asia/Qyzylorda 4:21:52 - LMT 1924 May 2 +4 - +04 1930 Jun 21 +5 - +05 1981 Ap +5 1 +06 1981 O +6 - +06 1982 Ap +5 R +05/+06 1991 Mar 31 2s +4 R +04/+05 1991 S 29 2s +5 R +05/+06 1992 Ja 19 2s +6 R +06/+07 1992 Mar 29 2s +5 R +05/+06 2004 O 31 2s +6 - +06 +Z Asia/Aqtobe 3:48:40 - LMT 1924 May 2 +4 - +04 1930 Jun 21 +5 - +05 1981 Ap +5 1 +06 1981 O +6 - +06 1982 Ap +5 R +05/+06 1991 Mar 31 2s +4 R +04/+05 1992 Ja 19 2s +5 R +05/+06 2004 O 31 2s +5 - +05 +Z Asia/Aqtau 3:21:4 - LMT 1924 May 2 +4 - +04 1930 Jun 21 +5 - +05 1981 O +6 - +06 1982 Ap +5 R +05/+06 1991 Mar 31 2s +4 R +04/+05 1992 Ja 19 2s +5 R +05/+06 1994 S 25 2s +4 R +04/+05 2004 O 31 2s +5 - +05 +Z Asia/Atyrau 3:27:44 - LMT 1924 May 2 +3 - +03 1930 Jun 21 +5 - +05 1981 O +6 - +06 1982 Ap +5 R +05/+06 1991 Mar 31 2s +4 R +04/+05 1992 Ja 19 2s +5 R +05/+06 1999 Mar 28 2s +4 R +04/+05 2004 O 31 2s +5 - +05 +Z Asia/Oral 3:25:24 - LMT 1924 May 2 +3 - +03 1930 Jun 21 +5 - +05 1981 Ap +5 1 +06 1981 O +6 - +06 1982 Ap +5 R +05/+06 1989 Mar 26 2s +4 R +04/+05 1992 Ja 19 2s +5 R +05/+06 1992 Mar 29 2s +4 R +04/+05 2004 O 31 2s +5 - +05 +R KG 1992 1996 - Ap Sun>=7 0s 1 - +R KG 1992 1996 - S lastSun 0 0 - +R KG 1997 2005 - Mar lastSun 2:30 1 - +R KG 1997 2004 - O lastSun 2:30 0 - +Z Asia/Bishkek 4:58:24 - LMT 1924 May 2 +5 - +05 1930 Jun 21 +6 R +06/+07 1991 Mar 31 2s +5 R +05/+06 1991 Au 31 2 +5 KG +05/+06 2005 Au 12 +6 - +06 +R KR 1948 o - Jun 1 0 1 D +R KR 1948 o - S 13 0 0 S +R KR 1949 o - Ap 3 0 1 D +R KR 1949 1951 - S Sun>=8 0 0 S +R KR 1950 o - Ap 1 0 1 D +R KR 1951 o - May 6 0 1 D +R KR 1955 o - May 5 0 1 D +R KR 1955 o - S 9 0 0 S +R KR 1956 o - May 20 0 1 D +R KR 1956 o - S 30 0 0 S +R KR 1957 1960 - May Sun>=1 0 1 D +R KR 1957 1960 - S Sun>=18 0 0 S +R KR 1987 1988 - May Sun>=8 2 1 D +R KR 1987 1988 - O Sun>=8 3 0 S +Z Asia/Seoul 8:27:52 - LMT 1908 Ap +8:30 - KST 1912 +9 - JST 1945 S 8 +9 - KST 1954 Mar 21 +8:30 KR K%sT 1961 Au 10 +9 KR K%sT +Z Asia/Pyongyang 8:23 - LMT 1908 Ap +8:30 - KST 1912 +9 - JST 1945 Au 24 +9 - KST 2015 Au 15 +8:30 - KST 2018 May 4 23:30 +9 - KST +R l 1920 o - Mar 28 0 1 S +R l 1920 o - O 25 0 0 - +R l 1921 o - Ap 3 0 1 S +R l 1921 o - O 3 0 0 - +R l 1922 o - Mar 26 0 1 S +R l 1922 o - O 8 0 0 - +R l 1923 o - Ap 22 0 1 S +R l 1923 o - S 16 0 0 - +R l 1957 1961 - May 1 0 1 S +R l 1957 1961 - O 1 0 0 - +R l 1972 o - Jun 22 0 1 S +R l 1972 1977 - O 1 0 0 - +R l 1973 1977 - May 1 0 1 S +R l 1978 o - Ap 30 0 1 S +R l 1978 o - S 30 0 0 - +R l 1984 1987 - May 1 0 1 S +R l 1984 1991 - O 16 0 0 - +R l 1988 o - Jun 1 0 1 S +R l 1989 o - May 10 0 1 S +R l 1990 1992 - May 1 0 1 S +R l 1992 o - O 4 0 0 - +R l 1993 ma - Mar lastSun 0 1 S +R l 1993 1998 - S lastSun 0 0 - +R l 1999 ma - O lastSun 0 0 - +Z Asia/Beirut 2:22 - LMT 1880 +2 l EE%sT +R NB 1935 1941 - S 14 0 0:20 - +R NB 1935 1941 - D 14 0 0 - +Z Asia/Kuala_Lumpur 6:46:46 - LMT 1901 +6:55:25 - SMT 1905 Jun +7 - +07 1933 +7 0:20 +0720 1936 +7:20 - +0720 1941 S +7:30 - +0730 1942 F 16 +9 - +09 1945 S 12 +7:30 - +0730 1982 +8 - +08 +Z Asia/Kuching 7:21:20 - LMT 1926 Mar +7:30 - +0730 1933 +8 NB +08/+0820 1942 F 16 +9 - +09 1945 S 12 +8 - +08 +Z Indian/Maldives 4:54 - LMT 1880 +4:54 - MMT 1960 +5 - +05 +R X 1983 1984 - Ap 1 0 1 - +R X 1983 o - O 1 0 0 - +R X 1985 1998 - Mar lastSun 0 1 - +R X 1984 1998 - S lastSun 0 0 - +R X 2001 o - Ap lastSat 2 1 - +R X 2001 2006 - S lastSat 2 0 - +R X 2002 2006 - Mar lastSat 2 1 - +R X 2015 2016 - Mar lastSat 2 1 - +R X 2015 2016 - S lastSat 0 0 - +Z Asia/Hovd 6:6:36 - LMT 1905 Au +6 - +06 1978 +7 X +07/+08 +Z Asia/Ulaanbaatar 7:7:32 - LMT 1905 Au +7 - +07 1978 +8 X +08/+09 +Z Asia/Choibalsan 7:38 - LMT 1905 Au +7 - +07 1978 +8 - +08 1983 Ap +9 X +09/+10 2008 Mar 31 +8 X +08/+09 +Z Asia/Kathmandu 5:41:16 - LMT 1920 +5:30 - +0530 1986 +5:45 - +0545 +R PK 2002 o - Ap Sun>=2 0 1 S +R PK 2002 o - O Sun>=2 0 0 - +R PK 2008 o - Jun 1 0 1 S +R PK 2008 2009 - N 1 0 0 - +R PK 2009 o - Ap 15 0 1 S +Z Asia/Karachi 4:28:12 - LMT 1907 +5:30 - +0530 1942 S +5:30 1 +0630 1945 O 15 +5:30 - +0530 1951 S 30 +5 - +05 1971 Mar 26 +5 PK PK%sT +R P 1999 2005 - Ap F>=15 0 1 S +R P 1999 2003 - O F>=15 0 0 - +R P 2004 o - O 1 1 0 - +R P 2005 o - O 4 2 0 - +R P 2006 2007 - Ap 1 0 1 S +R P 2006 o - S 22 0 0 - +R P 2007 o - S Th>=8 2 0 - +R P 2008 2009 - Mar lastF 0 1 S +R P 2008 o - S 1 0 0 - +R P 2009 o - S F>=1 1 0 - +R P 2010 o - Mar 26 0 1 S +R P 2010 o - Au 11 0 0 - +R P 2011 o - Ap 1 0:1 1 S +R P 2011 o - Au 1 0 0 - +R P 2011 o - Au 30 0 1 S +R P 2011 o - S 30 0 0 - +R P 2012 2014 - Mar lastTh 24 1 S +R P 2012 o - S 21 1 0 - +R P 2013 o - S F>=21 0 0 - +R P 2014 2015 - O F>=21 0 0 - +R P 2015 o - Mar lastF 24 1 S +R P 2016 ma - Mar Sat>=22 1 1 S +R P 2016 ma - O lastSat 1 0 - +Z Asia/Gaza 2:17:52 - LMT 1900 O +2 Z EET/EEST 1948 May 15 +2 K EE%sT 1967 Jun 5 +2 Z I%sT 1996 +2 J EE%sT 1999 +2 P EE%sT 2008 Au 29 +2 - EET 2008 S +2 P EE%sT 2010 +2 - EET 2010 Mar 27 0:1 +2 P EE%sT 2011 Au +2 - EET 2012 +2 P EE%sT +Z Asia/Hebron 2:20:23 - LMT 1900 O +2 Z EET/EEST 1948 May 15 +2 K EE%sT 1967 Jun 5 +2 Z I%sT 1996 +2 J EE%sT 1999 +2 P EE%sT +R PH 1936 o - N 1 0 1 D +R PH 1937 o - F 1 0 0 S +R PH 1954 o - Ap 12 0 1 D +R PH 1954 o - Jul 1 0 0 S +R PH 1978 o - Mar 22 0 1 D +R PH 1978 o - S 21 0 0 S +Z Asia/Manila -15:56 - LMT 1844 D 31 +8:4 - LMT 1899 May 11 +8 PH P%sT 1942 May +9 - JST 1944 N +8 PH P%sT +Z Asia/Qatar 3:26:8 - LMT 1920 +4 - +04 1972 Jun +3 - +03 +Li Asia/Qatar Asia/Bahrain +Z Asia/Riyadh 3:6:52 - LMT 1947 Mar 14 +3 - +03 +Li Asia/Riyadh Asia/Aden +Li Asia/Riyadh Asia/Kuwait +Z Asia/Singapore 6:55:25 - LMT 1901 +6:55:25 - SMT 1905 Jun +7 - +07 1933 +7 0:20 +0720 1936 +7:20 - +0720 1941 S +7:30 - +0730 1942 F 16 +9 - +09 1945 S 12 +7:30 - +0730 1982 +8 - +08 +Z Asia/Colombo 5:19:24 - LMT 1880 +5:19:32 - MMT 1906 +5:30 - +0530 1942 Ja 5 +5:30 0:30 +06 1942 S +5:30 1 +0630 1945 O 16 2 +5:30 - +0530 1996 May 25 +6:30 - +0630 1996 O 26 0:30 +6 - +06 2006 Ap 15 0:30 +5:30 - +0530 +R S 1920 1923 - Ap Sun>=15 2 1 S +R S 1920 1923 - O Sun>=1 2 0 - +R S 1962 o - Ap 29 2 1 S +R S 1962 o - O 1 2 0 - +R S 1963 1965 - May 1 2 1 S +R S 1963 o - S 30 2 0 - +R S 1964 o - O 1 2 0 - +R S 1965 o - S 30 2 0 - +R S 1966 o - Ap 24 2 1 S +R S 1966 1976 - O 1 2 0 - +R S 1967 1978 - May 1 2 1 S +R S 1977 1978 - S 1 2 0 - +R S 1983 1984 - Ap 9 2 1 S +R S 1983 1984 - O 1 2 0 - +R S 1986 o - F 16 2 1 S +R S 1986 o - O 9 2 0 - +R S 1987 o - Mar 1 2 1 S +R S 1987 1988 - O 31 2 0 - +R S 1988 o - Mar 15 2 1 S +R S 1989 o - Mar 31 2 1 S +R S 1989 o - O 1 2 0 - +R S 1990 o - Ap 1 2 1 S +R S 1990 o - S 30 2 0 - +R S 1991 o - Ap 1 0 1 S +R S 1991 1992 - O 1 0 0 - +R S 1992 o - Ap 8 0 1 S +R S 1993 o - Mar 26 0 1 S +R S 1993 o - S 25 0 0 - +R S 1994 1996 - Ap 1 0 1 S +R S 1994 2005 - O 1 0 0 - +R S 1997 1998 - Mar lastM 0 1 S +R S 1999 2006 - Ap 1 0 1 S +R S 2006 o - S 22 0 0 - +R S 2007 o - Mar lastF 0 1 S +R S 2007 o - N F>=1 0 0 - +R S 2008 o - Ap F>=1 0 1 S +R S 2008 o - N 1 0 0 - +R S 2009 o - Mar lastF 0 1 S +R S 2010 2011 - Ap F>=1 0 1 S +R S 2012 ma - Mar lastF 0 1 S +R S 2009 ma - O lastF 0 0 - +Z Asia/Damascus 2:25:12 - LMT 1920 +2 S EE%sT +Z Asia/Dushanbe 4:35:12 - LMT 1924 May 2 +5 - +05 1930 Jun 21 +6 R +06/+07 1991 Mar 31 2s +5 1 +05/+06 1991 S 9 2s +5 - +05 +Z Asia/Bangkok 6:42:4 - LMT 1880 +6:42:4 - BMT 1920 Ap +7 - +07 +Li Asia/Bangkok Asia/Phnom_Penh +Li Asia/Bangkok Asia/Vientiane +Z Asia/Ashgabat 3:53:32 - LMT 1924 May 2 +4 - +04 1930 Jun 21 +5 R +05/+06 1991 Mar 31 2 +4 R +04/+05 1992 Ja 19 2 +5 - +05 +Z Asia/Dubai 3:41:12 - LMT 1920 +4 - +04 +Li Asia/Dubai Asia/Muscat +Z Asia/Samarkand 4:27:53 - LMT 1924 May 2 +4 - +04 1930 Jun 21 +5 - +05 1981 Ap +5 1 +06 1981 O +6 - +06 1982 Ap +5 R +05/+06 1992 +5 - +05 +Z Asia/Tashkent 4:37:11 - LMT 1924 May 2 +5 - +05 1930 Jun 21 +6 R +06/+07 1991 Mar 31 2 +5 R +05/+06 1992 +5 - +05 +Z Asia/Ho_Chi_Minh 7:6:40 - LMT 1906 Jul +7:6:30 - PLMT 1911 May +7 - +07 1942 D 31 23 +8 - +08 1945 Mar 14 23 +9 - +09 1945 S 2 +7 - +07 1947 Ap +8 - +08 1955 Jul +7 - +07 1959 D 31 23 +8 - +08 1975 Jun 13 +7 - +07 +R AU 1917 o - Ja 1 0:1 1 D +R AU 1917 o - Mar 25 2 0 S +R AU 1942 o - Ja 1 2 1 D +R AU 1942 o - Mar 29 2 0 S +R AU 1942 o - S 27 2 1 D +R AU 1943 1944 - Mar lastSun 2 0 S +R AU 1943 o - O 3 2 1 D +Z Australia/Darwin 8:43:20 - LMT 1895 F +9 - ACST 1899 May +9:30 AU AC%sT +R AW 1974 o - O lastSun 2s 1 D +R AW 1975 o - Mar Sun>=1 2s 0 S +R AW 1983 o - O lastSun 2s 1 D +R AW 1984 o - Mar Sun>=1 2s 0 S +R AW 1991 o - N 17 2s 1 D +R AW 1992 o - Mar Sun>=1 2s 0 S +R AW 2006 o - D 3 2s 1 D +R AW 2007 2009 - Mar lastSun 2s 0 S +R AW 2007 2008 - O lastSun 2s 1 D +Z Australia/Perth 7:43:24 - LMT 1895 D +8 AU AW%sT 1943 Jul +8 AW AW%sT +Z Australia/Eucla 8:35:28 - LMT 1895 D +8:45 AU +0845/+0945 1943 Jul +8:45 AW +0845/+0945 +R AQ 1971 o - O lastSun 2s 1 D +R AQ 1972 o - F lastSun 2s 0 S +R AQ 1989 1991 - O lastSun 2s 1 D +R AQ 1990 1992 - Mar Sun>=1 2s 0 S +R Ho 1992 1993 - O lastSun 2s 1 D +R Ho 1993 1994 - Mar Sun>=1 2s 0 S +Z Australia/Brisbane 10:12:8 - LMT 1895 +10 AU AE%sT 1971 +10 AQ AE%sT +Z Australia/Lindeman 9:55:56 - LMT 1895 +10 AU AE%sT 1971 +10 AQ AE%sT 1992 Jul +10 Ho AE%sT +R AS 1971 1985 - O lastSun 2s 1 D +R AS 1986 o - O 19 2s 1 D +R AS 1987 2007 - O lastSun 2s 1 D +R AS 1972 o - F 27 2s 0 S +R AS 1973 1985 - Mar Sun>=1 2s 0 S +R AS 1986 1990 - Mar Sun>=15 2s 0 S +R AS 1991 o - Mar 3 2s 0 S +R AS 1992 o - Mar 22 2s 0 S +R AS 1993 o - Mar 7 2s 0 S +R AS 1994 o - Mar 20 2s 0 S +R AS 1995 2005 - Mar lastSun 2s 0 S +R AS 2006 o - Ap 2 2s 0 S +R AS 2007 o - Mar lastSun 2s 0 S +R AS 2008 ma - Ap Sun>=1 2s 0 S +R AS 2008 ma - O Sun>=1 2s 1 D +Z Australia/Adelaide 9:14:20 - LMT 1895 F +9 - ACST 1899 May +9:30 AU AC%sT 1971 +9:30 AS AC%sT +R AT 1967 o - O Sun>=1 2s 1 D +R AT 1968 o - Mar lastSun 2s 0 S +R AT 1968 1985 - O lastSun 2s 1 D +R AT 1969 1971 - Mar Sun>=8 2s 0 S +R AT 1972 o - F lastSun 2s 0 S +R AT 1973 1981 - Mar Sun>=1 2s 0 S +R AT 1982 1983 - Mar lastSun 2s 0 S +R AT 1984 1986 - Mar Sun>=1 2s 0 S +R AT 1986 o - O Sun>=15 2s 1 D +R AT 1987 1990 - Mar Sun>=15 2s 0 S +R AT 1987 o - O Sun>=22 2s 1 D +R AT 1988 1990 - O lastSun 2s 1 D +R AT 1991 1999 - O Sun>=1 2s 1 D +R AT 1991 2005 - Mar lastSun 2s 0 S +R AT 2000 o - Au lastSun 2s 1 D +R AT 2001 ma - O Sun>=1 2s 1 D +R AT 2006 o - Ap Sun>=1 2s 0 S +R AT 2007 o - Mar lastSun 2s 0 S +R AT 2008 ma - Ap Sun>=1 2s 0 S +Z Australia/Hobart 9:49:16 - LMT 1895 S +10 - AEST 1916 O 1 2 +10 1 AEDT 1917 F +10 AU AE%sT 1967 +10 AT AE%sT +Z Australia/Currie 9:35:28 - LMT 1895 S +10 - AEST 1916 O 1 2 +10 1 AEDT 1917 F +10 AU AE%sT 1971 Jul +10 AT AE%sT +R AV 1971 1985 - O lastSun 2s 1 D +R AV 1972 o - F lastSun 2s 0 S +R AV 1973 1985 - Mar Sun>=1 2s 0 S +R AV 1986 1990 - Mar Sun>=15 2s 0 S +R AV 1986 1987 - O Sun>=15 2s 1 D +R AV 1988 1999 - O lastSun 2s 1 D +R AV 1991 1994 - Mar Sun>=1 2s 0 S +R AV 1995 2005 - Mar lastSun 2s 0 S +R AV 2000 o - Au lastSun 2s 1 D +R AV 2001 2007 - O lastSun 2s 1 D +R AV 2006 o - Ap Sun>=1 2s 0 S +R AV 2007 o - Mar lastSun 2s 0 S +R AV 2008 ma - Ap Sun>=1 2s 0 S +R AV 2008 ma - O Sun>=1 2s 1 D +Z Australia/Melbourne 9:39:52 - LMT 1895 F +10 AU AE%sT 1971 +10 AV AE%sT +R AN 1971 1985 - O lastSun 2s 1 D +R AN 1972 o - F 27 2s 0 S +R AN 1973 1981 - Mar Sun>=1 2s 0 S +R AN 1982 o - Ap Sun>=1 2s 0 S +R AN 1983 1985 - Mar Sun>=1 2s 0 S +R AN 1986 1989 - Mar Sun>=15 2s 0 S +R AN 1986 o - O 19 2s 1 D +R AN 1987 1999 - O lastSun 2s 1 D +R AN 1990 1995 - Mar Sun>=1 2s 0 S +R AN 1996 2005 - Mar lastSun 2s 0 S +R AN 2000 o - Au lastSun 2s 1 D +R AN 2001 2007 - O lastSun 2s 1 D +R AN 2006 o - Ap Sun>=1 2s 0 S +R AN 2007 o - Mar lastSun 2s 0 S +R AN 2008 ma - Ap Sun>=1 2s 0 S +R AN 2008 ma - O Sun>=1 2s 1 D +Z Australia/Sydney 10:4:52 - LMT 1895 F +10 AU AE%sT 1971 +10 AN AE%sT +Z Australia/Broken_Hill 9:25:48 - LMT 1895 F +10 - AEST 1896 Au 23 +9 - ACST 1899 May +9:30 AU AC%sT 1971 +9:30 AN AC%sT 2000 +9:30 AS AC%sT +R LH 1981 1984 - O lastSun 2 1 - +R LH 1982 1985 - Mar Sun>=1 2 0 - +R LH 1985 o - O lastSun 2 0:30 - +R LH 1986 1989 - Mar Sun>=15 2 0 - +R LH 1986 o - O 19 2 0:30 - +R LH 1987 1999 - O lastSun 2 0:30 - +R LH 1990 1995 - Mar Sun>=1 2 0 - +R LH 1996 2005 - Mar lastSun 2 0 - +R LH 2000 o - Au lastSun 2 0:30 - +R LH 2001 2007 - O lastSun 2 0:30 - +R LH 2006 o - Ap Sun>=1 2 0 - +R LH 2007 o - Mar lastSun 2 0 - +R LH 2008 ma - Ap Sun>=1 2 0 - +R LH 2008 ma - O Sun>=1 2 0:30 - +Z Australia/Lord_Howe 10:36:20 - LMT 1895 F +10 - AEST 1981 Mar +10:30 LH +1030/+1130 1985 Jul +10:30 LH +1030/+11 +Z Antarctica/Macquarie 0 - -00 1899 N +10 - AEST 1916 O 1 2 +10 1 AEDT 1917 F +10 AU AE%sT 1919 Ap 1 0s +0 - -00 1948 Mar 25 +10 AU AE%sT 1967 +10 AT AE%sT 2010 Ap 4 3 +11 - +11 +Z Indian/Christmas 7:2:52 - LMT 1895 F +7 - +07 +Z Indian/Cocos 6:27:40 - LMT 1900 +6:30 - +0630 +R FJ 1998 1999 - N Sun>=1 2 1 - +R FJ 1999 2000 - F lastSun 3 0 - +R FJ 2009 o - N 29 2 1 - +R FJ 2010 o - Mar lastSun 3 0 - +R FJ 2010 2013 - O Sun>=21 2 1 - +R FJ 2011 o - Mar Sun>=1 3 0 - +R FJ 2012 2013 - Ja Sun>=18 3 0 - +R FJ 2014 o - Ja Sun>=18 2 0 - +R FJ 2014 ma - N Sun>=1 2 1 - +R FJ 2015 ma - Ja Sun>=13 3 0 - +Z Pacific/Fiji 11:55:44 - LMT 1915 O 26 +12 FJ +12/+13 +Z Pacific/Gambier -8:59:48 - LMT 1912 O +-9 - -09 +Z Pacific/Marquesas -9:18 - LMT 1912 O +-9:30 - -0930 +Z Pacific/Tahiti -9:58:16 - LMT 1912 O +-10 - -10 +Z Pacific/Guam -14:21 - LMT 1844 D 31 +9:39 - LMT 1901 +10 - GST 2000 D 23 +10 - ChST +Li Pacific/Guam Pacific/Saipan +Z Pacific/Tarawa 11:32:4 - LMT 1901 +12 - +12 +Z Pacific/Enderbury -11:24:20 - LMT 1901 +-12 - -12 1979 O +-11 - -11 1994 D 31 +13 - +13 +Z Pacific/Kiritimati -10:29:20 - LMT 1901 +-10:40 - -1040 1979 O +-10 - -10 1994 D 31 +14 - +14 +Z Pacific/Majuro 11:24:48 - LMT 1901 +11 - +11 1969 O +12 - +12 +Z Pacific/Kwajalein 11:9:20 - LMT 1901 +11 - +11 1969 O +-12 - -12 1993 Au 20 +12 - +12 +Z Pacific/Chuuk 10:7:8 - LMT 1901 +10 - +10 +Z Pacific/Pohnpei 10:32:52 - LMT 1901 +11 - +11 +Z Pacific/Kosrae 10:51:56 - LMT 1901 +11 - +11 1969 O +12 - +12 1999 +11 - +11 +Z Pacific/Nauru 11:7:40 - LMT 1921 Ja 15 +11:30 - +1130 1942 Mar 15 +9 - +09 1944 Au 15 +11:30 - +1130 1979 May +12 - +12 +R NC 1977 1978 - D Sun>=1 0 1 - +R NC 1978 1979 - F 27 0 0 - +R NC 1996 o - D 1 2s 1 - +R NC 1997 o - Mar 2 2s 0 - +Z Pacific/Noumea 11:5:48 - LMT 1912 Ja 13 +11 NC +11/+12 +R NZ 1927 o - N 6 2 1 S +R NZ 1928 o - Mar 4 2 0 M +R NZ 1928 1933 - O Sun>=8 2 0:30 S +R NZ 1929 1933 - Mar Sun>=15 2 0 M +R NZ 1934 1940 - Ap lastSun 2 0 M +R NZ 1934 1940 - S lastSun 2 0:30 S +R NZ 1946 o - Ja 1 0 0 S +R NZ 1974 o - N Sun>=1 2s 1 D +R k 1974 o - N Sun>=1 2:45s 1 - +R NZ 1975 o - F lastSun 2s 0 S +R k 1975 o - F lastSun 2:45s 0 - +R NZ 1975 1988 - O lastSun 2s 1 D +R k 1975 1988 - O lastSun 2:45s 1 - +R NZ 1976 1989 - Mar Sun>=1 2s 0 S +R k 1976 1989 - Mar Sun>=1 2:45s 0 - +R NZ 1989 o - O Sun>=8 2s 1 D +R k 1989 o - O Sun>=8 2:45s 1 - +R NZ 1990 2006 - O Sun>=1 2s 1 D +R k 1990 2006 - O Sun>=1 2:45s 1 - +R NZ 1990 2007 - Mar Sun>=15 2s 0 S +R k 1990 2007 - Mar Sun>=15 2:45s 0 - +R NZ 2007 ma - S lastSun 2s 1 D +R k 2007 ma - S lastSun 2:45s 1 - +R NZ 2008 ma - Ap Sun>=1 2s 0 S +R k 2008 ma - Ap Sun>=1 2:45s 0 - +Z Pacific/Auckland 11:39:4 - LMT 1868 N 2 +11:30 NZ NZ%sT 1946 +12 NZ NZ%sT +Z Pacific/Chatham 12:13:48 - LMT 1868 N 2 +12:15 - +1215 1946 +12:45 k +1245/+1345 +Li Pacific/Auckland Antarctica/McMurdo +R CK 1978 o - N 12 0 0:30 - +R CK 1979 1991 - Mar Sun>=1 0 0 - +R CK 1979 1990 - O lastSun 0 0:30 - +Z Pacific/Rarotonga -10:39:4 - LMT 1901 +-10:30 - -1030 1978 N 12 +-10 CK -10/-0930 +Z Pacific/Niue -11:19:40 - LMT 1901 +-11:20 - -1120 1951 +-11:30 - -1130 1978 O +-11 - -11 +Z Pacific/Norfolk 11:11:52 - LMT 1901 +11:12 - +1112 1951 +11:30 - +1130 1974 O 27 2 +11:30 1 +1230 1975 Mar 2 2 +11:30 - +1130 2015 O 4 2 +11 - +11 +Z Pacific/Palau 8:57:56 - LMT 1901 +9 - +09 +Z Pacific/Port_Moresby 9:48:40 - LMT 1880 +9:48:32 - PMMT 1895 +10 - +10 +Z Pacific/Bougainville 10:22:16 - LMT 1880 +9:48:32 - PMMT 1895 +10 - +10 1942 Jul +9 - +09 1945 Au 21 +10 - +10 2014 D 28 2 +11 - +11 +Z Pacific/Pitcairn -8:40:20 - LMT 1901 +-8:30 - -0830 1998 Ap 27 +-8 - -08 +Z Pacific/Pago_Pago 12:37:12 - LMT 1892 Jul 5 +-11:22:48 - LMT 1911 +-11 - SST +Li Pacific/Pago_Pago Pacific/Midway +R WS 2010 o - S lastSun 0 1 - +R WS 2011 o - Ap Sat>=1 4 0 - +R WS 2011 o - S lastSat 3 1 - +R WS 2012 ma - Ap Sun>=1 4 0 - +R WS 2012 ma - S lastSun 3 1 - +Z Pacific/Apia 12:33:4 - LMT 1892 Jul 5 +-11:26:56 - LMT 1911 +-11:30 - -1130 1950 +-11 WS -11/-10 2011 D 29 24 +13 WS +13/+14 +Z Pacific/Guadalcanal 10:39:48 - LMT 1912 O +11 - +11 +Z Pacific/Fakaofo -11:24:56 - LMT 1901 +-11 - -11 2011 D 30 +13 - +13 +R TO 1999 o - O 7 2s 1 - +R TO 2000 o - Mar 19 2s 0 - +R TO 2000 2001 - N Sun>=1 2 1 - +R TO 2001 2002 - Ja lastSun 2 0 - +R TO 2016 o - N Sun>=1 2 1 - +R TO 2017 o - Ja Sun>=15 3 0 - +Z Pacific/Tongatapu 12:19:20 - LMT 1901 +12:20 - +1220 1941 +13 - +13 1999 +13 TO +13/+14 +Z Pacific/Funafuti 11:56:52 - LMT 1901 +12 - +12 +Z Pacific/Wake 11:6:28 - LMT 1901 +12 - +12 +R VU 1983 o - S 25 0 1 - +R VU 1984 1991 - Mar Sun>=23 0 0 - +R VU 1984 o - O 23 0 1 - +R VU 1985 1991 - S Sun>=23 0 1 - +R VU 1992 1993 - Ja Sun>=23 0 0 - +R VU 1992 o - O Sun>=23 0 1 - +Z Pacific/Efate 11:13:16 - LMT 1912 Ja 13 +11 VU +11/+12 +Z Pacific/Wallis 12:15:20 - LMT 1901 +12 - +12 +R G 1916 o - May 21 2s 1 BST +R G 1916 o - O 1 2s 0 GMT +R G 1917 o - Ap 8 2s 1 BST +R G 1917 o - S 17 2s 0 GMT +R G 1918 o - Mar 24 2s 1 BST +R G 1918 o - S 30 2s 0 GMT +R G 1919 o - Mar 30 2s 1 BST +R G 1919 o - S 29 2s 0 GMT +R G 1920 o - Mar 28 2s 1 BST +R G 1920 o - O 25 2s 0 GMT +R G 1921 o - Ap 3 2s 1 BST +R G 1921 o - O 3 2s 0 GMT +R G 1922 o - Mar 26 2s 1 BST +R G 1922 o - O 8 2s 0 GMT +R G 1923 o - Ap Sun>=16 2s 1 BST +R G 1923 1924 - S Sun>=16 2s 0 GMT +R G 1924 o - Ap Sun>=9 2s 1 BST +R G 1925 1926 - Ap Sun>=16 2s 1 BST +R G 1925 1938 - O Sun>=2 2s 0 GMT +R G 1927 o - Ap Sun>=9 2s 1 BST +R G 1928 1929 - Ap Sun>=16 2s 1 BST +R G 1930 o - Ap Sun>=9 2s 1 BST +R G 1931 1932 - Ap Sun>=16 2s 1 BST +R G 1933 o - Ap Sun>=9 2s 1 BST +R G 1934 o - Ap Sun>=16 2s 1 BST +R G 1935 o - Ap Sun>=9 2s 1 BST +R G 1936 1937 - Ap Sun>=16 2s 1 BST +R G 1938 o - Ap Sun>=9 2s 1 BST +R G 1939 o - Ap Sun>=16 2s 1 BST +R G 1939 o - N Sun>=16 2s 0 GMT +R G 1940 o - F Sun>=23 2s 1 BST +R G 1941 o - May Sun>=2 1s 2 BDST +R G 1941 1943 - Au Sun>=9 1s 1 BST +R G 1942 1944 - Ap Sun>=2 1s 2 BDST +R G 1944 o - S Sun>=16 1s 1 BST +R G 1945 o - Ap M>=2 1s 2 BDST +R G 1945 o - Jul Sun>=9 1s 1 BST +R G 1945 1946 - O Sun>=2 2s 0 GMT +R G 1946 o - Ap Sun>=9 2s 1 BST +R G 1947 o - Mar 16 2s 1 BST +R G 1947 o - Ap 13 1s 2 BDST +R G 1947 o - Au 10 1s 1 BST +R G 1947 o - N 2 2s 0 GMT +R G 1948 o - Mar 14 2s 1 BST +R G 1948 o - O 31 2s 0 GMT +R G 1949 o - Ap 3 2s 1 BST +R G 1949 o - O 30 2s 0 GMT +R G 1950 1952 - Ap Sun>=14 2s 1 BST +R G 1950 1952 - O Sun>=21 2s 0 GMT +R G 1953 o - Ap Sun>=16 2s 1 BST +R G 1953 1960 - O Sun>=2 2s 0 GMT +R G 1954 o - Ap Sun>=9 2s 1 BST +R G 1955 1956 - Ap Sun>=16 2s 1 BST +R G 1957 o - Ap Sun>=9 2s 1 BST +R G 1958 1959 - Ap Sun>=16 2s 1 BST +R G 1960 o - Ap Sun>=9 2s 1 BST +R G 1961 1963 - Mar lastSun 2s 1 BST +R G 1961 1968 - O Sun>=23 2s 0 GMT +R G 1964 1967 - Mar Sun>=19 2s 1 BST +R G 1968 o - F 18 2s 1 BST +R G 1972 1980 - Mar Sun>=16 2s 1 BST +R G 1972 1980 - O Sun>=23 2s 0 GMT +R G 1981 1995 - Mar lastSun 1u 1 BST +R G 1981 1989 - O Sun>=23 1u 0 GMT +R G 1990 1995 - O Sun>=22 1u 0 GMT +Z Europe/London -0:1:15 - LMT 1847 D 1 0s +0 G %s 1968 O 27 +1 - BST 1971 O 31 2u +0 G %s 1996 +0 E GMT/BST +Li Europe/London Europe/Jersey +Li Europe/London Europe/Guernsey +Li Europe/London Europe/Isle_of_Man +R IE 1971 o - O 31 2u -1 - +R IE 1972 1980 - Mar Sun>=16 2u 0 - +R IE 1972 1980 - O Sun>=23 2u -1 - +R IE 1981 ma - Mar lastSun 1u 0 - +R IE 1981 1989 - O Sun>=23 1u -1 - +R IE 1990 1995 - O Sun>=22 1u -1 - +R IE 1996 ma - O lastSun 1u -1 - +Z Europe/Dublin -0:25 - LMT 1880 Au 2 +-0:25:21 - DMT 1916 May 21 2s +-0:25:21 1 IST 1916 O 1 2s +0 G %s 1921 D 6 +0 G GMT/IST 1940 F 25 2s +0 1 IST 1946 O 6 2s +0 - GMT 1947 Mar 16 2s +0 1 IST 1947 N 2 2s +0 - GMT 1948 Ap 18 2s +0 G GMT/IST 1968 O 27 +1 IE IST/GMT +R E 1977 1980 - Ap Sun>=1 1u 1 S +R E 1977 o - S lastSun 1u 0 - +R E 1978 o - O 1 1u 0 - +R E 1979 1995 - S lastSun 1u 0 - +R E 1981 ma - Mar lastSun 1u 1 S +R E 1996 ma - O lastSun 1u 0 - +R W- 1977 1980 - Ap Sun>=1 1s 1 S +R W- 1977 o - S lastSun 1s 0 - +R W- 1978 o - O 1 1s 0 - +R W- 1979 1995 - S lastSun 1s 0 - +R W- 1981 ma - Mar lastSun 1s 1 S +R W- 1996 ma - O lastSun 1s 0 - +R c 1916 o - Ap 30 23 1 S +R c 1916 o - O 1 1 0 - +R c 1917 1918 - Ap M>=15 2s 1 S +R c 1917 1918 - S M>=15 2s 0 - +R c 1940 o - Ap 1 2s 1 S +R c 1942 o - N 2 2s 0 - +R c 1943 o - Mar 29 2s 1 S +R c 1943 o - O 4 2s 0 - +R c 1944 1945 - Ap M>=1 2s 1 S +R c 1944 o - O 2 2s 0 - +R c 1945 o - S 16 2s 0 - +R c 1977 1980 - Ap Sun>=1 2s 1 S +R c 1977 o - S lastSun 2s 0 - +R c 1978 o - O 1 2s 0 - +R c 1979 1995 - S lastSun 2s 0 - +R c 1981 ma - Mar lastSun 2s 1 S +R c 1996 ma - O lastSun 2s 0 - +R e 1977 1980 - Ap Sun>=1 0 1 S +R e 1977 o - S lastSun 0 0 - +R e 1978 o - O 1 0 0 - +R e 1979 1995 - S lastSun 0 0 - +R e 1981 ma - Mar lastSun 0 1 S +R e 1996 ma - O lastSun 0 0 - +R R 1917 o - Jul 1 23 1 MST +R R 1917 o - D 28 0 0 MMT +R R 1918 o - May 31 22 2 MDST +R R 1918 o - S 16 1 1 MST +R R 1919 o - May 31 23 2 MDST +R R 1919 o - Jul 1 0u 1 MSD +R R 1919 o - Au 16 0 0 MSK +R R 1921 o - F 14 23 1 MSD +R R 1921 o - Mar 20 23 2 +05 +R R 1921 o - S 1 0 1 MSD +R R 1921 o - O 1 0 0 - +R R 1981 1984 - Ap 1 0 1 S +R R 1981 1983 - O 1 0 0 - +R R 1984 1995 - S lastSun 2s 0 - +R R 1985 2010 - Mar lastSun 2s 1 S +R R 1996 2010 - O lastSun 2s 0 - +Z WET 0 E WE%sT +Z CET 1 c CE%sT +Z MET 1 c ME%sT +Z EET 2 E EE%sT +R q 1940 o - Jun 16 0 1 S +R q 1942 o - N 2 3 0 - +R q 1943 o - Mar 29 2 1 S +R q 1943 o - Ap 10 3 0 - +R q 1974 o - May 4 0 1 S +R q 1974 o - O 2 0 0 - +R q 1975 o - May 1 0 1 S +R q 1975 o - O 2 0 0 - +R q 1976 o - May 2 0 1 S +R q 1976 o - O 3 0 0 - +R q 1977 o - May 8 0 1 S +R q 1977 o - O 2 0 0 - +R q 1978 o - May 6 0 1 S +R q 1978 o - O 1 0 0 - +R q 1979 o - May 5 0 1 S +R q 1979 o - S 30 0 0 - +R q 1980 o - May 3 0 1 S +R q 1980 o - O 4 0 0 - +R q 1981 o - Ap 26 0 1 S +R q 1981 o - S 27 0 0 - +R q 1982 o - May 2 0 1 S +R q 1982 o - O 3 0 0 - +R q 1983 o - Ap 18 0 1 S +R q 1983 o - O 1 0 0 - +R q 1984 o - Ap 1 0 1 S +Z Europe/Tirane 1:19:20 - LMT 1914 +1 - CET 1940 Jun 16 +1 q CE%sT 1984 Jul +1 E CE%sT +Z Europe/Andorra 0:6:4 - LMT 1901 +0 - WET 1946 S 30 +1 - CET 1985 Mar 31 2 +1 E CE%sT +R a 1920 o - Ap 5 2s 1 S +R a 1920 o - S 13 2s 0 - +R a 1946 o - Ap 14 2s 1 S +R a 1946 1948 - O Sun>=1 2s 0 - +R a 1947 o - Ap 6 2s 1 S +R a 1948 o - Ap 18 2s 1 S +R a 1980 o - Ap 6 0 1 S +R a 1980 o - S 28 0 0 - +Z Europe/Vienna 1:5:21 - LMT 1893 Ap +1 c CE%sT 1920 +1 a CE%sT 1940 Ap 1 2s +1 c CE%sT 1945 Ap 2 2s +1 1 CEST 1945 Ap 12 2s +1 - CET 1946 +1 a CE%sT 1981 +1 E CE%sT +Z Europe/Minsk 1:50:16 - LMT 1880 +1:50 - MMT 1924 May 2 +2 - EET 1930 Jun 21 +3 - MSK 1941 Jun 28 +1 c CE%sT 1944 Jul 3 +3 R MSK/MSD 1990 +3 - MSK 1991 Mar 31 2s +2 R EE%sT 2011 Mar 27 2s +3 - +03 +R b 1918 o - Mar 9 0s 1 S +R b 1918 1919 - O Sat>=1 23s 0 - +R b 1919 o - Mar 1 23s 1 S +R b 1920 o - F 14 23s 1 S +R b 1920 o - O 23 23s 0 - +R b 1921 o - Mar 14 23s 1 S +R b 1921 o - O 25 23s 0 - +R b 1922 o - Mar 25 23s 1 S +R b 1922 1927 - O Sat>=1 23s 0 - +R b 1923 o - Ap 21 23s 1 S +R b 1924 o - Mar 29 23s 1 S +R b 1925 o - Ap 4 23s 1 S +R b 1926 o - Ap 17 23s 1 S +R b 1927 o - Ap 9 23s 1 S +R b 1928 o - Ap 14 23s 1 S +R b 1928 1938 - O Sun>=2 2s 0 - +R b 1929 o - Ap 21 2s 1 S +R b 1930 o - Ap 13 2s 1 S +R b 1931 o - Ap 19 2s 1 S +R b 1932 o - Ap 3 2s 1 S +R b 1933 o - Mar 26 2s 1 S +R b 1934 o - Ap 8 2s 1 S +R b 1935 o - Mar 31 2s 1 S +R b 1936 o - Ap 19 2s 1 S +R b 1937 o - Ap 4 2s 1 S +R b 1938 o - Mar 27 2s 1 S +R b 1939 o - Ap 16 2s 1 S +R b 1939 o - N 19 2s 0 - +R b 1940 o - F 25 2s 1 S +R b 1944 o - S 17 2s 0 - +R b 1945 o - Ap 2 2s 1 S +R b 1945 o - S 16 2s 0 - +R b 1946 o - May 19 2s 1 S +R b 1946 o - O 7 2s 0 - +Z Europe/Brussels 0:17:30 - LMT 1880 +0:17:30 - BMT 1892 May 1 12 +0 - WET 1914 N 8 +1 - CET 1916 May +1 c CE%sT 1918 N 11 11u +0 b WE%sT 1940 May 20 2s +1 c CE%sT 1944 S 3 +1 b CE%sT 1977 +1 E CE%sT +R BG 1979 o - Mar 31 23 1 S +R BG 1979 o - O 1 1 0 - +R BG 1980 1982 - Ap Sat>=1 23 1 S +R BG 1980 o - S 29 1 0 - +R BG 1981 o - S 27 2 0 - +Z Europe/Sofia 1:33:16 - LMT 1880 +1:56:56 - IMT 1894 N 30 +2 - EET 1942 N 2 3 +1 c CE%sT 1945 +1 - CET 1945 Ap 2 3 +2 - EET 1979 Mar 31 23 +2 BG EE%sT 1982 S 26 3 +2 c EE%sT 1991 +2 e EE%sT 1997 +2 E EE%sT +R CZ 1945 o - Ap M>=1 2s 1 S +R CZ 1945 o - O 1 2s 0 - +R CZ 1946 o - May 6 2s 1 S +R CZ 1946 1949 - O Sun>=1 2s 0 - +R CZ 1947 1948 - Ap Sun>=15 2s 1 S +R CZ 1949 o - Ap 9 2s 1 S +Z Europe/Prague 0:57:44 - LMT 1850 +0:57:44 - PMT 1891 O +1 c CE%sT 1945 May 9 +1 CZ CE%sT 1946 D 1 3 +1 -1 GMT 1947 F 23 2 +1 CZ CE%sT 1979 +1 E CE%sT +R D 1916 o - May 14 23 1 S +R D 1916 o - S 30 23 0 - +R D 1940 o - May 15 0 1 S +R D 1945 o - Ap 2 2s 1 S +R D 1945 o - Au 15 2s 0 - +R D 1946 o - May 1 2s 1 S +R D 1946 o - S 1 2s 0 - +R D 1947 o - May 4 2s 1 S +R D 1947 o - Au 10 2s 0 - +R D 1948 o - May 9 2s 1 S +R D 1948 o - Au 8 2s 0 - +Z Europe/Copenhagen 0:50:20 - LMT 1890 +0:50:20 - CMT 1894 +1 D CE%sT 1942 N 2 2s +1 c CE%sT 1945 Ap 2 2 +1 D CE%sT 1980 +1 E CE%sT +Z Atlantic/Faroe -0:27:4 - LMT 1908 Ja 11 +0 - WET 1981 +0 E WE%sT +R Th 1991 1992 - Mar lastSun 2 1 D +R Th 1991 1992 - S lastSun 2 0 S +R Th 1993 2006 - Ap Sun>=1 2 1 D +R Th 1993 2006 - O lastSun 2 0 S +R Th 2007 ma - Mar Sun>=8 2 1 D +R Th 2007 ma - N Sun>=1 2 0 S +Z America/Danmarkshavn -1:14:40 - LMT 1916 Jul 28 +-3 - -03 1980 Ap 6 2 +-3 E -03/-02 1996 +0 - GMT +Z America/Scoresbysund -1:27:52 - LMT 1916 Jul 28 +-2 - -02 1980 Ap 6 2 +-2 c -02/-01 1981 Mar 29 +-1 E -01/+00 +Z America/Godthab -3:26:56 - LMT 1916 Jul 28 +-3 - -03 1980 Ap 6 2 +-3 E -03/-02 +Z America/Thule -4:35:8 - LMT 1916 Jul 28 +-4 Th A%sT +Z Europe/Tallinn 1:39 - LMT 1880 +1:39 - TMT 1918 F +1 c CE%sT 1919 Jul +1:39 - TMT 1921 May +2 - EET 1940 Au 6 +3 - MSK 1941 S 15 +1 c CE%sT 1944 S 22 +3 R MSK/MSD 1989 Mar 26 2s +2 1 EEST 1989 S 24 2s +2 c EE%sT 1998 S 22 +2 E EE%sT 1999 O 31 4 +2 - EET 2002 F 21 +2 E EE%sT +R FI 1942 o - Ap 2 24 1 S +R FI 1942 o - O 4 1 0 - +R FI 1981 1982 - Mar lastSun 2 1 S +R FI 1981 1982 - S lastSun 3 0 - +Z Europe/Helsinki 1:39:49 - LMT 1878 May 31 +1:39:49 - HMT 1921 May +2 FI EE%sT 1983 +2 E EE%sT +Li Europe/Helsinki Europe/Mariehamn +R F 1916 o - Jun 14 23s 1 S +R F 1916 1919 - O Sun>=1 23s 0 - +R F 1917 o - Mar 24 23s 1 S +R F 1918 o - Mar 9 23s 1 S +R F 1919 o - Mar 1 23s 1 S +R F 1920 o - F 14 23s 1 S +R F 1920 o - O 23 23s 0 - +R F 1921 o - Mar 14 23s 1 S +R F 1921 o - O 25 23s 0 - +R F 1922 o - Mar 25 23s 1 S +R F 1922 1938 - O Sat>=1 23s 0 - +R F 1923 o - May 26 23s 1 S +R F 1924 o - Mar 29 23s 1 S +R F 1925 o - Ap 4 23s 1 S +R F 1926 o - Ap 17 23s 1 S +R F 1927 o - Ap 9 23s 1 S +R F 1928 o - Ap 14 23s 1 S +R F 1929 o - Ap 20 23s 1 S +R F 1930 o - Ap 12 23s 1 S +R F 1931 o - Ap 18 23s 1 S +R F 1932 o - Ap 2 23s 1 S +R F 1933 o - Mar 25 23s 1 S +R F 1934 o - Ap 7 23s 1 S +R F 1935 o - Mar 30 23s 1 S +R F 1936 o - Ap 18 23s 1 S +R F 1937 o - Ap 3 23s 1 S +R F 1938 o - Mar 26 23s 1 S +R F 1939 o - Ap 15 23s 1 S +R F 1939 o - N 18 23s 0 - +R F 1940 o - F 25 2 1 S +R F 1941 o - May 5 0 2 M +R F 1941 o - O 6 0 1 S +R F 1942 o - Mar 9 0 2 M +R F 1942 o - N 2 3 1 S +R F 1943 o - Mar 29 2 2 M +R F 1943 o - O 4 3 1 S +R F 1944 o - Ap 3 2 2 M +R F 1944 o - O 8 1 1 S +R F 1945 o - Ap 2 2 2 M +R F 1945 o - S 16 3 0 - +R F 1976 o - Mar 28 1 1 S +R F 1976 o - S 26 1 0 - +Z Europe/Paris 0:9:21 - LMT 1891 Mar 15 0:1 +0:9:21 - PMT 1911 Mar 11 0:1 +0 F WE%sT 1940 Jun 14 23 +1 c CE%sT 1944 Au 25 +0 F WE%sT 1945 S 16 3 +1 F CE%sT 1977 +1 E CE%sT +R DE 1946 o - Ap 14 2s 1 S +R DE 1946 o - O 7 2s 0 - +R DE 1947 1949 - O Sun>=1 2s 0 - +R DE 1947 o - Ap 6 3s 1 S +R DE 1947 o - May 11 2s 2 M +R DE 1947 o - Jun 29 3 1 S +R DE 1948 o - Ap 18 2s 1 S +R DE 1949 o - Ap 10 2s 1 S +R So 1945 o - May 24 2 2 M +R So 1945 o - S 24 3 1 S +R So 1945 o - N 18 2s 0 - +Z Europe/Berlin 0:53:28 - LMT 1893 Ap +1 c CE%sT 1945 May 24 2 +1 So CE%sT 1946 +1 DE CE%sT 1980 +1 E CE%sT +Li Europe/Zurich Europe/Busingen +Z Europe/Gibraltar -0:21:24 - LMT 1880 Au 2 0s +0 G %s 1957 Ap 14 2 +1 - CET 1982 +1 E CE%sT +R g 1932 o - Jul 7 0 1 S +R g 1932 o - S 1 0 0 - +R g 1941 o - Ap 7 0 1 S +R g 1942 o - N 2 3 0 - +R g 1943 o - Mar 30 0 1 S +R g 1943 o - O 4 0 0 - +R g 1952 o - Jul 1 0 1 S +R g 1952 o - N 2 0 0 - +R g 1975 o - Ap 12 0s 1 S +R g 1975 o - N 26 0s 0 - +R g 1976 o - Ap 11 2s 1 S +R g 1976 o - O 10 2s 0 - +R g 1977 1978 - Ap Sun>=1 2s 1 S +R g 1977 o - S 26 2s 0 - +R g 1978 o - S 24 4 0 - +R g 1979 o - Ap 1 9 1 S +R g 1979 o - S 29 2 0 - +R g 1980 o - Ap 1 0 1 S +R g 1980 o - S 28 0 0 - +Z Europe/Athens 1:34:52 - LMT 1895 S 14 +1:34:52 - AMT 1916 Jul 28 0:1 +2 g EE%sT 1941 Ap 30 +1 g CE%sT 1944 Ap 4 +2 g EE%sT 1981 +2 E EE%sT +R h 1918 o - Ap 1 3 1 S +R h 1918 o - S 16 3 0 - +R h 1919 o - Ap 15 3 1 S +R h 1919 o - N 24 3 0 - +R h 1945 o - May 1 23 1 S +R h 1945 o - N 1 0 0 - +R h 1946 o - Mar 31 2s 1 S +R h 1946 1949 - O Sun>=1 2s 0 - +R h 1947 1949 - Ap Sun>=4 2s 1 S +R h 1950 o - Ap 17 2s 1 S +R h 1950 o - O 23 2s 0 - +R h 1954 1955 - May 23 0 1 S +R h 1954 1955 - O 3 0 0 - +R h 1956 o - Jun Sun>=1 0 1 S +R h 1956 o - S lastSun 0 0 - +R h 1957 o - Jun Sun>=1 1 1 S +R h 1957 o - S lastSun 3 0 - +R h 1980 o - Ap 6 1 1 S +Z Europe/Budapest 1:16:20 - LMT 1890 O +1 c CE%sT 1918 +1 h CE%sT 1941 Ap 8 +1 c CE%sT 1945 +1 h CE%sT 1980 S 28 2s +1 E CE%sT +R w 1917 1919 - F 19 23 1 - +R w 1917 o - O 21 1 0 - +R w 1918 1919 - N 16 1 0 - +R w 1921 o - Mar 19 23 1 - +R w 1921 o - Jun 23 1 0 - +R w 1939 o - Ap 29 23 1 - +R w 1939 o - O 29 2 0 - +R w 1940 o - F 25 2 1 - +R w 1940 1941 - N Sun>=2 1s 0 - +R w 1941 1942 - Mar Sun>=2 1s 1 - +R w 1943 1946 - Mar Sun>=1 1s 1 - +R w 1942 1948 - O Sun>=22 1s 0 - +R w 1947 1967 - Ap Sun>=1 1s 1 - +R w 1949 o - O 30 1s 0 - +R w 1950 1966 - O Sun>=22 1s 0 - +R w 1967 o - O 29 1s 0 - +Z Atlantic/Reykjavik -1:28 - LMT 1908 +-1 w -01/+00 1968 Ap 7 1s +0 - GMT +R I 1916 o - Jun 3 24 1 S +R I 1916 1917 - S 30 24 0 - +R I 1917 o - Mar 31 24 1 S +R I 1918 o - Mar 9 24 1 S +R I 1918 o - O 6 24 0 - +R I 1919 o - Mar 1 24 1 S +R I 1919 o - O 4 24 0 - +R I 1920 o - Mar 20 24 1 S +R I 1920 o - S 18 24 0 - +R I 1940 o - Jun 14 24 1 S +R I 1942 o - N 2 2s 0 - +R I 1943 o - Mar 29 2s 1 S +R I 1943 o - O 4 2s 0 - +R I 1944 o - Ap 2 2s 1 S +R I 1944 o - S 17 2s 0 - +R I 1945 o - Ap 2 2 1 S +R I 1945 o - S 15 1 0 - +R I 1946 o - Mar 17 2s 1 S +R I 1946 o - O 6 2s 0 - +R I 1947 o - Mar 16 0s 1 S +R I 1947 o - O 5 0s 0 - +R I 1948 o - F 29 2s 1 S +R I 1948 o - O 3 2s 0 - +R I 1966 1968 - May Sun>=22 0s 1 S +R I 1966 o - S 24 24 0 - +R I 1967 1969 - S Sun>=22 0s 0 - +R I 1969 o - Jun 1 0s 1 S +R I 1970 o - May 31 0s 1 S +R I 1970 o - S lastSun 0s 0 - +R I 1971 1972 - May Sun>=22 0s 1 S +R I 1971 o - S lastSun 0s 0 - +R I 1972 o - O 1 0s 0 - +R I 1973 o - Jun 3 0s 1 S +R I 1973 1974 - S lastSun 0s 0 - +R I 1974 o - May 26 0s 1 S +R I 1975 o - Jun 1 0s 1 S +R I 1975 1977 - S lastSun 0s 0 - +R I 1976 o - May 30 0s 1 S +R I 1977 1979 - May Sun>=22 0s 1 S +R I 1978 o - O 1 0s 0 - +R I 1979 o - S 30 0s 0 - +Z Europe/Rome 0:49:56 - LMT 1866 S 22 +0:49:56 - RMT 1893 O 31 23:49:56 +1 I CE%sT 1943 S 10 +1 c CE%sT 1944 Jun 4 +1 I CE%sT 1980 +1 E CE%sT +Li Europe/Rome Europe/Vatican +Li Europe/Rome Europe/San_Marino +R LV 1989 1996 - Mar lastSun 2s 1 S +R LV 1989 1996 - S lastSun 2s 0 - +Z Europe/Riga 1:36:34 - LMT 1880 +1:36:34 - RMT 1918 Ap 15 2 +1:36:34 1 LST 1918 S 16 3 +1:36:34 - RMT 1919 Ap 1 2 +1:36:34 1 LST 1919 May 22 3 +1:36:34 - RMT 1926 May 11 +2 - EET 1940 Au 5 +3 - MSK 1941 Jul +1 c CE%sT 1944 O 13 +3 R MSK/MSD 1989 Mar lastSun 2s +2 1 EEST 1989 S lastSun 2s +2 LV EE%sT 1997 Ja 21 +2 E EE%sT 2000 F 29 +2 - EET 2001 Ja 2 +2 E EE%sT +Li Europe/Zurich Europe/Vaduz +Z Europe/Vilnius 1:41:16 - LMT 1880 +1:24 - WMT 1917 +1:35:36 - KMT 1919 O 10 +1 - CET 1920 Jul 12 +2 - EET 1920 O 9 +1 - CET 1940 Au 3 +3 - MSK 1941 Jun 24 +1 c CE%sT 1944 Au +3 R MSK/MSD 1989 Mar 26 2s +2 R EE%sT 1991 S 29 2s +2 c EE%sT 1998 +2 - EET 1998 Mar 29 1u +1 E CE%sT 1999 O 31 1u +2 - EET 2003 +2 E EE%sT +R LX 1916 o - May 14 23 1 S +R LX 1916 o - O 1 1 0 - +R LX 1917 o - Ap 28 23 1 S +R LX 1917 o - S 17 1 0 - +R LX 1918 o - Ap M>=15 2s 1 S +R LX 1918 o - S M>=15 2s 0 - +R LX 1919 o - Mar 1 23 1 S +R LX 1919 o - O 5 3 0 - +R LX 1920 o - F 14 23 1 S +R LX 1920 o - O 24 2 0 - +R LX 1921 o - Mar 14 23 1 S +R LX 1921 o - O 26 2 0 - +R LX 1922 o - Mar 25 23 1 S +R LX 1922 o - O Sun>=2 1 0 - +R LX 1923 o - Ap 21 23 1 S +R LX 1923 o - O Sun>=2 2 0 - +R LX 1924 o - Mar 29 23 1 S +R LX 1924 1928 - O Sun>=2 1 0 - +R LX 1925 o - Ap 5 23 1 S +R LX 1926 o - Ap 17 23 1 S +R LX 1927 o - Ap 9 23 1 S +R LX 1928 o - Ap 14 23 1 S +R LX 1929 o - Ap 20 23 1 S +Z Europe/Luxembourg 0:24:36 - LMT 1904 Jun +1 LX CE%sT 1918 N 25 +0 LX WE%sT 1929 O 6 2s +0 b WE%sT 1940 May 14 3 +1 c WE%sT 1944 S 18 3 +1 b CE%sT 1977 +1 E CE%sT +R MT 1973 o - Mar 31 0s 1 S +R MT 1973 o - S 29 0s 0 - +R MT 1974 o - Ap 21 0s 1 S +R MT 1974 o - S 16 0s 0 - +R MT 1975 1979 - Ap Sun>=15 2 1 S +R MT 1975 1980 - S Sun>=15 2 0 - +R MT 1980 o - Mar 31 2 1 S +Z Europe/Malta 0:58:4 - LMT 1893 N 2 0s +1 I CE%sT 1973 Mar 31 +1 MT CE%sT 1981 +1 E CE%sT +R MD 1997 ma - Mar lastSun 2 1 S +R MD 1997 ma - O lastSun 3 0 - +Z Europe/Chisinau 1:55:20 - LMT 1880 +1:55 - CMT 1918 F 15 +1:44:24 - BMT 1931 Jul 24 +2 z EE%sT 1940 Au 15 +2 1 EEST 1941 Jul 17 +1 c CE%sT 1944 Au 24 +3 R MSK/MSD 1990 May 6 2 +2 R EE%sT 1992 +2 e EE%sT 1997 +2 MD EE%sT +Z Europe/Monaco 0:29:32 - LMT 1891 Mar 15 +0:9:21 - PMT 1911 Mar 11 +0 F WE%sT 1945 S 16 3 +1 F CE%sT 1977 +1 E CE%sT +R N 1916 o - May 1 0 1 NST +R N 1916 o - O 1 0 0 AMT +R N 1917 o - Ap 16 2s 1 NST +R N 1917 o - S 17 2s 0 AMT +R N 1918 1921 - Ap M>=1 2s 1 NST +R N 1918 1921 - S lastM 2s 0 AMT +R N 1922 o - Mar lastSun 2s 1 NST +R N 1922 1936 - O Sun>=2 2s 0 AMT +R N 1923 o - Jun F>=1 2s 1 NST +R N 1924 o - Mar lastSun 2s 1 NST +R N 1925 o - Jun F>=1 2s 1 NST +R N 1926 1931 - May 15 2s 1 NST +R N 1932 o - May 22 2s 1 NST +R N 1933 1936 - May 15 2s 1 NST +R N 1937 o - May 22 2s 1 NST +R N 1937 o - Jul 1 0 1 S +R N 1937 1939 - O Sun>=2 2s 0 - +R N 1938 1939 - May 15 2s 1 S +R N 1945 o - Ap 2 2s 1 S +R N 1945 o - S 16 2s 0 - +Z Europe/Amsterdam 0:19:32 - LMT 1835 +0:19:32 N %s 1937 Jul +0:20 N +0020/+0120 1940 May 16 +1 c CE%sT 1945 Ap 2 2 +1 N CE%sT 1977 +1 E CE%sT +R NO 1916 o - May 22 1 1 S +R NO 1916 o - S 30 0 0 - +R NO 1945 o - Ap 2 2s 1 S +R NO 1945 o - O 1 2s 0 - +R NO 1959 1964 - Mar Sun>=15 2s 1 S +R NO 1959 1965 - S Sun>=15 2s 0 - +R NO 1965 o - Ap 25 2s 1 S +Z Europe/Oslo 0:43 - LMT 1895 +1 NO CE%sT 1940 Au 10 23 +1 c CE%sT 1945 Ap 2 2 +1 NO CE%sT 1980 +1 E CE%sT +Li Europe/Oslo Arctic/Longyearbyen +R O 1918 1919 - S 16 2s 0 - +R O 1919 o - Ap 15 2s 1 S +R O 1944 o - Ap 3 2s 1 S +R O 1944 o - O 4 2 0 - +R O 1945 o - Ap 29 0 1 S +R O 1945 o - N 1 0 0 - +R O 1946 o - Ap 14 0s 1 S +R O 1946 o - O 7 2s 0 - +R O 1947 o - May 4 2s 1 S +R O 1947 1949 - O Sun>=1 2s 0 - +R O 1948 o - Ap 18 2s 1 S +R O 1949 o - Ap 10 2s 1 S +R O 1957 o - Jun 2 1s 1 S +R O 1957 1958 - S lastSun 1s 0 - +R O 1958 o - Mar 30 1s 1 S +R O 1959 o - May 31 1s 1 S +R O 1959 1961 - O Sun>=1 1s 0 - +R O 1960 o - Ap 3 1s 1 S +R O 1961 1964 - May lastSun 1s 1 S +R O 1962 1964 - S lastSun 1s 0 - +Z Europe/Warsaw 1:24 - LMT 1880 +1:24 - WMT 1915 Au 5 +1 c CE%sT 1918 S 16 3 +2 O EE%sT 1922 Jun +1 O CE%sT 1940 Jun 23 2 +1 c CE%sT 1944 O +1 O CE%sT 1977 +1 W- CE%sT 1988 +1 E CE%sT +R p 1916 o - Jun 17 23 1 S +R p 1916 o - N 1 1 0 - +R p 1917 o - F 28 23s 1 S +R p 1917 1921 - O 14 23s 0 - +R p 1918 o - Mar 1 23s 1 S +R p 1919 o - F 28 23s 1 S +R p 1920 o - F 29 23s 1 S +R p 1921 o - F 28 23s 1 S +R p 1924 o - Ap 16 23s 1 S +R p 1924 o - O 14 23s 0 - +R p 1926 o - Ap 17 23s 1 S +R p 1926 1929 - O Sat>=1 23s 0 - +R p 1927 o - Ap 9 23s 1 S +R p 1928 o - Ap 14 23s 1 S +R p 1929 o - Ap 20 23s 1 S +R p 1931 o - Ap 18 23s 1 S +R p 1931 1932 - O Sat>=1 23s 0 - +R p 1932 o - Ap 2 23s 1 S +R p 1934 o - Ap 7 23s 1 S +R p 1934 1938 - O Sat>=1 23s 0 - +R p 1935 o - Mar 30 23s 1 S +R p 1936 o - Ap 18 23s 1 S +R p 1937 o - Ap 3 23s 1 S +R p 1938 o - Mar 26 23s 1 S +R p 1939 o - Ap 15 23s 1 S +R p 1939 o - N 18 23s 0 - +R p 1940 o - F 24 23s 1 S +R p 1940 1941 - O 5 23s 0 - +R p 1941 o - Ap 5 23s 1 S +R p 1942 1945 - Mar Sat>=8 23s 1 S +R p 1942 o - Ap 25 22s 2 M +R p 1942 o - Au 15 22s 1 S +R p 1942 1945 - O Sat>=24 23s 0 - +R p 1943 o - Ap 17 22s 2 M +R p 1943 1945 - Au Sat>=25 22s 1 S +R p 1944 1945 - Ap Sat>=21 22s 2 M +R p 1946 o - Ap Sat>=1 23s 1 S +R p 1946 o - O Sat>=1 23s 0 - +R p 1947 1949 - Ap Sun>=1 2s 1 S +R p 1947 1949 - O Sun>=1 2s 0 - +R p 1951 1965 - Ap Sun>=1 2s 1 S +R p 1951 1965 - O Sun>=1 2s 0 - +R p 1977 o - Mar 27 0s 1 S +R p 1977 o - S 25 0s 0 - +R p 1978 1979 - Ap Sun>=1 0s 1 S +R p 1978 o - O 1 0s 0 - +R p 1979 1982 - S lastSun 1s 0 - +R p 1980 o - Mar lastSun 0s 1 S +R p 1981 1982 - Mar lastSun 1s 1 S +R p 1983 o - Mar lastSun 2s 1 S +Z Europe/Lisbon -0:36:45 - LMT 1884 +-0:36:45 - LMT 1912 Ja 1 0u +0 p WE%sT 1966 Ap 3 2 +1 - CET 1976 S 26 1 +0 p WE%sT 1983 S 25 1s +0 W- WE%sT 1992 S 27 1s +1 E CE%sT 1996 Mar 31 1u +0 E WE%sT +Z Atlantic/Azores -1:42:40 - LMT 1884 +-1:54:32 - HMT 1912 Ja 1 2u +-2 p -02/-01 1942 Ap 25 22s +-2 p +00 1942 Au 15 22s +-2 p -02/-01 1943 Ap 17 22s +-2 p +00 1943 Au 28 22s +-2 p -02/-01 1944 Ap 22 22s +-2 p +00 1944 Au 26 22s +-2 p -02/-01 1945 Ap 21 22s +-2 p +00 1945 Au 25 22s +-2 p -02/-01 1966 Ap 3 2 +-1 p -01/+00 1983 S 25 1s +-1 W- -01/+00 1992 S 27 1s +0 E WE%sT 1993 Mar 28 1u +-1 E -01/+00 +Z Atlantic/Madeira -1:7:36 - LMT 1884 +-1:7:36 - FMT 1912 Ja 1 1u +-1 p -01/+00 1942 Ap 25 22s +-1 p +01 1942 Au 15 22s +-1 p -01/+00 1943 Ap 17 22s +-1 p +01 1943 Au 28 22s +-1 p -01/+00 1944 Ap 22 22s +-1 p +01 1944 Au 26 22s +-1 p -01/+00 1945 Ap 21 22s +-1 p +01 1945 Au 25 22s +-1 p -01/+00 1966 Ap 3 2 +0 p WE%sT 1983 S 25 1s +0 E WE%sT +R z 1932 o - May 21 0s 1 S +R z 1932 1939 - O Sun>=1 0s 0 - +R z 1933 1939 - Ap Sun>=2 0s 1 S +R z 1979 o - May 27 0 1 S +R z 1979 o - S lastSun 0 0 - +R z 1980 o - Ap 5 23 1 S +R z 1980 o - S lastSun 1 0 - +R z 1991 1993 - Mar lastSun 0s 1 S +R z 1991 1993 - S lastSun 0s 0 - +Z Europe/Bucharest 1:44:24 - LMT 1891 O +1:44:24 - BMT 1931 Jul 24 +2 z EE%sT 1981 Mar 29 2s +2 c EE%sT 1991 +2 z EE%sT 1994 +2 e EE%sT 1997 +2 E EE%sT +Z Europe/Kaliningrad 1:22 - LMT 1893 Ap +1 c CE%sT 1945 +2 O CE%sT 1946 +3 R MSK/MSD 1989 Mar 26 2s +2 R EE%sT 2011 Mar 27 2s +3 - +03 2014 O 26 2s +2 - EET +Z Europe/Moscow 2:30:17 - LMT 1880 +2:30:17 - MMT 1916 Jul 3 +2:31:19 R %s 1919 Jul 1 0u +3 R %s 1921 O +3 R MSK/MSD 1922 O +2 - EET 1930 Jun 21 +3 R MSK/MSD 1991 Mar 31 2s +2 R EE%sT 1992 Ja 19 2s +3 R MSK/MSD 2011 Mar 27 2s +4 - MSK 2014 O 26 2s +3 - MSK +Z Europe/Simferopol 2:16:24 - LMT 1880 +2:16 - SMT 1924 May 2 +2 - EET 1930 Jun 21 +3 - MSK 1941 N +1 c CE%sT 1944 Ap 13 +3 R MSK/MSD 1990 +3 - MSK 1990 Jul 1 2 +2 - EET 1992 +2 e EE%sT 1994 May +3 e MSK/MSD 1996 Mar 31 0s +3 1 MSD 1996 O 27 3s +3 R MSK/MSD 1997 +3 - MSK 1997 Mar lastSun 1u +2 E EE%sT 2014 Mar 30 2 +4 - MSK 2014 O 26 2s +3 - MSK +Z Europe/Astrakhan 3:12:12 - LMT 1924 May +3 - +03 1930 Jun 21 +4 R +04/+05 1989 Mar 26 2s +3 R +03/+04 1991 Mar 31 2s +4 - +04 1992 Mar 29 2s +3 R +03/+04 2011 Mar 27 2s +4 - +04 2014 O 26 2s +3 - +03 2016 Mar 27 2s +4 - +04 +Z Europe/Volgograd 2:57:40 - LMT 1920 Ja 3 +3 - +03 1930 Jun 21 +4 - +04 1961 N 11 +4 R +04/+05 1988 Mar 27 2s +3 R +03/+04 1991 Mar 31 2s +4 - +04 1992 Mar 29 2s +3 R +03/+04 2011 Mar 27 2s +4 - +04 2014 O 26 2s +3 - +03 2018 O 28 2s +4 - +04 +Z Europe/Saratov 3:4:18 - LMT 1919 Jul 1 0u +3 - +03 1930 Jun 21 +4 R +04/+05 1988 Mar 27 2s +3 R +03/+04 1991 Mar 31 2s +4 - +04 1992 Mar 29 2s +3 R +03/+04 2011 Mar 27 2s +4 - +04 2014 O 26 2s +3 - +03 2016 D 4 2s +4 - +04 +Z Europe/Kirov 3:18:48 - LMT 1919 Jul 1 0u +3 - +03 1930 Jun 21 +4 R +04/+05 1989 Mar 26 2s +3 R +03/+04 1991 Mar 31 2s +4 - +04 1992 Mar 29 2s +3 R +03/+04 2011 Mar 27 2s +4 - +04 2014 O 26 2s +3 - +03 +Z Europe/Samara 3:20:20 - LMT 1919 Jul 1 0u +3 - +03 1930 Jun 21 +4 - +04 1935 Ja 27 +4 R +04/+05 1989 Mar 26 2s +3 R +03/+04 1991 Mar 31 2s +2 R +02/+03 1991 S 29 2s +3 - +03 1991 O 20 3 +4 R +04/+05 2010 Mar 28 2s +3 R +03/+04 2011 Mar 27 2s +4 - +04 +Z Europe/Ulyanovsk 3:13:36 - LMT 1919 Jul 1 0u +3 - +03 1930 Jun 21 +4 R +04/+05 1989 Mar 26 2s +3 R +03/+04 1991 Mar 31 2s +2 R +02/+03 1992 Ja 19 2s +3 R +03/+04 2011 Mar 27 2s +4 - +04 2014 O 26 2s +3 - +03 2016 Mar 27 2s +4 - +04 +Z Asia/Yekaterinburg 4:2:33 - LMT 1916 Jul 3 +3:45:5 - PMT 1919 Jul 15 4 +4 - +04 1930 Jun 21 +5 R +05/+06 1991 Mar 31 2s +4 R +04/+05 1992 Ja 19 2s +5 R +05/+06 2011 Mar 27 2s +6 - +06 2014 O 26 2s +5 - +05 +Z Asia/Omsk 4:53:30 - LMT 1919 N 14 +5 - +05 1930 Jun 21 +6 R +06/+07 1991 Mar 31 2s +5 R +05/+06 1992 Ja 19 2s +6 R +06/+07 2011 Mar 27 2s +7 - +07 2014 O 26 2s +6 - +06 +Z Asia/Barnaul 5:35 - LMT 1919 D 10 +6 - +06 1930 Jun 21 +7 R +07/+08 1991 Mar 31 2s +6 R +06/+07 1992 Ja 19 2s +7 R +07/+08 1995 May 28 +6 R +06/+07 2011 Mar 27 2s +7 - +07 2014 O 26 2s +6 - +06 2016 Mar 27 2s +7 - +07 +Z Asia/Novosibirsk 5:31:40 - LMT 1919 D 14 6 +6 - +06 1930 Jun 21 +7 R +07/+08 1991 Mar 31 2s +6 R +06/+07 1992 Ja 19 2s +7 R +07/+08 1993 May 23 +6 R +06/+07 2011 Mar 27 2s +7 - +07 2014 O 26 2s +6 - +06 2016 Jul 24 2s +7 - +07 +Z Asia/Tomsk 5:39:51 - LMT 1919 D 22 +6 - +06 1930 Jun 21 +7 R +07/+08 1991 Mar 31 2s +6 R +06/+07 1992 Ja 19 2s +7 R +07/+08 2002 May 1 3 +6 R +06/+07 2011 Mar 27 2s +7 - +07 2014 O 26 2s +6 - +06 2016 May 29 2s +7 - +07 +Z Asia/Novokuznetsk 5:48:48 - LMT 1924 May +6 - +06 1930 Jun 21 +7 R +07/+08 1991 Mar 31 2s +6 R +06/+07 1992 Ja 19 2s +7 R +07/+08 2010 Mar 28 2s +6 R +06/+07 2011 Mar 27 2s +7 - +07 +Z Asia/Krasnoyarsk 6:11:26 - LMT 1920 Ja 6 +6 - +06 1930 Jun 21 +7 R +07/+08 1991 Mar 31 2s +6 R +06/+07 1992 Ja 19 2s +7 R +07/+08 2011 Mar 27 2s +8 - +08 2014 O 26 2s +7 - +07 +Z Asia/Irkutsk 6:57:5 - LMT 1880 +6:57:5 - IMT 1920 Ja 25 +7 - +07 1930 Jun 21 +8 R +08/+09 1991 Mar 31 2s +7 R +07/+08 1992 Ja 19 2s +8 R +08/+09 2011 Mar 27 2s +9 - +09 2014 O 26 2s +8 - +08 +Z Asia/Chita 7:33:52 - LMT 1919 D 15 +8 - +08 1930 Jun 21 +9 R +09/+10 1991 Mar 31 2s +8 R +08/+09 1992 Ja 19 2s +9 R +09/+10 2011 Mar 27 2s +10 - +10 2014 O 26 2s +8 - +08 2016 Mar 27 2 +9 - +09 +Z Asia/Yakutsk 8:38:58 - LMT 1919 D 15 +8 - +08 1930 Jun 21 +9 R +09/+10 1991 Mar 31 2s +8 R +08/+09 1992 Ja 19 2s +9 R +09/+10 2011 Mar 27 2s +10 - +10 2014 O 26 2s +9 - +09 +Z Asia/Vladivostok 8:47:31 - LMT 1922 N 15 +9 - +09 1930 Jun 21 +10 R +10/+11 1991 Mar 31 2s +9 R +09/+10 1992 Ja 19 2s +10 R +10/+11 2011 Mar 27 2s +11 - +11 2014 O 26 2s +10 - +10 +Z Asia/Khandyga 9:2:13 - LMT 1919 D 15 +8 - +08 1930 Jun 21 +9 R +09/+10 1991 Mar 31 2s +8 R +08/+09 1992 Ja 19 2s +9 R +09/+10 2004 +10 R +10/+11 2011 Mar 27 2s +11 - +11 2011 S 13 0s +10 - +10 2014 O 26 2s +9 - +09 +Z Asia/Sakhalin 9:30:48 - LMT 1905 Au 23 +9 - +09 1945 Au 25 +11 R +11/+12 1991 Mar 31 2s +10 R +10/+11 1992 Ja 19 2s +11 R +11/+12 1997 Mar lastSun 2s +10 R +10/+11 2011 Mar 27 2s +11 - +11 2014 O 26 2s +10 - +10 2016 Mar 27 2s +11 - +11 +Z Asia/Magadan 10:3:12 - LMT 1924 May 2 +10 - +10 1930 Jun 21 +11 R +11/+12 1991 Mar 31 2s +10 R +10/+11 1992 Ja 19 2s +11 R +11/+12 2011 Mar 27 2s +12 - +12 2014 O 26 2s +10 - +10 2016 Ap 24 2s +11 - +11 +Z Asia/Srednekolymsk 10:14:52 - LMT 1924 May 2 +10 - +10 1930 Jun 21 +11 R +11/+12 1991 Mar 31 2s +10 R +10/+11 1992 Ja 19 2s +11 R +11/+12 2011 Mar 27 2s +12 - +12 2014 O 26 2s +11 - +11 +Z Asia/Ust-Nera 9:32:54 - LMT 1919 D 15 +8 - +08 1930 Jun 21 +9 R +09/+10 1981 Ap +11 R +11/+12 1991 Mar 31 2s +10 R +10/+11 1992 Ja 19 2s +11 R +11/+12 2011 Mar 27 2s +12 - +12 2011 S 13 0s +11 - +11 2014 O 26 2s +10 - +10 +Z Asia/Kamchatka 10:34:36 - LMT 1922 N 10 +11 - +11 1930 Jun 21 +12 R +12/+13 1991 Mar 31 2s +11 R +11/+12 1992 Ja 19 2s +12 R +12/+13 2010 Mar 28 2s +11 R +11/+12 2011 Mar 27 2s +12 - +12 +Z Asia/Anadyr 11:49:56 - LMT 1924 May 2 +12 - +12 1930 Jun 21 +13 R +13/+14 1982 Ap 1 0s +12 R +12/+13 1991 Mar 31 2s +11 R +11/+12 1992 Ja 19 2s +12 R +12/+13 2010 Mar 28 2s +11 R +11/+12 2011 Mar 27 2s +12 - +12 +Z Europe/Belgrade 1:22 - LMT 1884 +1 - CET 1941 Ap 18 23 +1 c CE%sT 1945 +1 - CET 1945 May 8 2s +1 1 CEST 1945 S 16 2s +1 - CET 1982 N 27 +1 E CE%sT +Li Europe/Belgrade Europe/Ljubljana +Li Europe/Belgrade Europe/Podgorica +Li Europe/Belgrade Europe/Sarajevo +Li Europe/Belgrade Europe/Skopje +Li Europe/Belgrade Europe/Zagreb +Li Europe/Prague Europe/Bratislava +R s 1918 o - Ap 15 23 1 S +R s 1918 1919 - O 6 24s 0 - +R s 1919 o - Ap 6 23 1 S +R s 1924 o - Ap 16 23 1 S +R s 1924 o - O 4 24s 0 - +R s 1926 o - Ap 17 23 1 S +R s 1926 1929 - O Sat>=1 24s 0 - +R s 1927 o - Ap 9 23 1 S +R s 1928 o - Ap 15 0 1 S +R s 1929 o - Ap 20 23 1 S +R s 1937 o - Jun 16 23 1 S +R s 1937 o - O 2 24s 0 - +R s 1938 o - Ap 2 23 1 S +R s 1938 o - Ap 30 23 2 M +R s 1938 o - O 2 24 1 S +R s 1939 o - O 7 24s 0 - +R s 1942 o - May 2 23 1 S +R s 1942 o - S 1 1 0 - +R s 1943 1946 - Ap Sat>=13 23 1 S +R s 1943 1944 - O Sun>=1 1 0 - +R s 1945 1946 - S lastSun 1 0 - +R s 1949 o - Ap 30 23 1 S +R s 1949 o - O 2 1 0 - +R s 1974 1975 - Ap Sat>=12 23 1 S +R s 1974 1975 - O Sun>=1 1 0 - +R s 1976 o - Mar 27 23 1 S +R s 1976 1977 - S lastSun 1 0 - +R s 1977 o - Ap 2 23 1 S +R s 1978 o - Ap 2 2s 1 S +R s 1978 o - O 1 2s 0 - +R Sp 1967 o - Jun 3 12 1 S +R Sp 1967 o - O 1 0 0 - +R Sp 1974 o - Jun 24 0 1 S +R Sp 1974 o - S 1 0 0 - +R Sp 1976 1977 - May 1 0 1 S +R Sp 1976 o - Au 1 0 0 - +R Sp 1977 o - S 28 0 0 - +R Sp 1978 o - Jun 1 0 1 S +R Sp 1978 o - Au 4 0 0 - +Z Europe/Madrid -0:14:44 - LMT 1900 D 31 23:45:16 +0 s WE%sT 1940 Mar 16 23 +1 s CE%sT 1979 +1 E CE%sT +Z Africa/Ceuta -0:21:16 - LMT 1900 D 31 23:38:44 +0 - WET 1918 May 6 23 +0 1 WEST 1918 O 7 23 +0 - WET 1924 +0 s WE%sT 1929 +0 - WET 1967 +0 Sp WE%sT 1984 Mar 16 +1 - CET 1986 +1 E CE%sT +Z Atlantic/Canary -1:1:36 - LMT 1922 Mar +-1 - -01 1946 S 30 1 +0 - WET 1980 Ap 6 0s +0 1 WEST 1980 S 28 1u +0 E WE%sT +Z Europe/Stockholm 1:12:12 - LMT 1879 +1:0:14 - SET 1900 +1 - CET 1916 May 14 23 +1 1 CEST 1916 O 1 1 +1 - CET 1980 +1 E CE%sT +R CH 1941 1942 - May M>=1 1 1 S +R CH 1941 1942 - O M>=1 2 0 - +Z Europe/Zurich 0:34:8 - LMT 1853 Jul 16 +0:29:46 - BMT 1894 Jun +1 CH CE%sT 1981 +1 E CE%sT +R T 1916 o - May 1 0 1 S +R T 1916 o - O 1 0 0 - +R T 1920 o - Mar 28 0 1 S +R T 1920 o - O 25 0 0 - +R T 1921 o - Ap 3 0 1 S +R T 1921 o - O 3 0 0 - +R T 1922 o - Mar 26 0 1 S +R T 1922 o - O 8 0 0 - +R T 1924 o - May 13 0 1 S +R T 1924 1925 - O 1 0 0 - +R T 1925 o - May 1 0 1 S +R T 1940 o - Jun 30 0 1 S +R T 1940 o - O 5 0 0 - +R T 1940 o - D 1 0 1 S +R T 1941 o - S 21 0 0 - +R T 1942 o - Ap 1 0 1 S +R T 1942 o - N 1 0 0 - +R T 1945 o - Ap 2 0 1 S +R T 1945 o - O 8 0 0 - +R T 1946 o - Jun 1 0 1 S +R T 1946 o - O 1 0 0 - +R T 1947 1948 - Ap Sun>=16 0 1 S +R T 1947 1950 - O Sun>=2 0 0 - +R T 1949 o - Ap 10 0 1 S +R T 1950 o - Ap 19 0 1 S +R T 1951 o - Ap 22 0 1 S +R T 1951 o - O 8 0 0 - +R T 1962 o - Jul 15 0 1 S +R T 1962 o - O 8 0 0 - +R T 1964 o - May 15 0 1 S +R T 1964 o - O 1 0 0 - +R T 1970 1972 - May Sun>=2 0 1 S +R T 1970 1972 - O Sun>=2 0 0 - +R T 1973 o - Jun 3 1 1 S +R T 1973 o - N 4 3 0 - +R T 1974 o - Mar 31 2 1 S +R T 1974 o - N 3 5 0 - +R T 1975 o - Mar 30 0 1 S +R T 1975 1976 - O lastSun 0 0 - +R T 1976 o - Jun 1 0 1 S +R T 1977 1978 - Ap Sun>=1 0 1 S +R T 1977 o - O 16 0 0 - +R T 1979 1980 - Ap Sun>=1 3 1 S +R T 1979 1982 - O M>=11 0 0 - +R T 1981 1982 - Mar lastSun 3 1 S +R T 1983 o - Jul 31 0 1 S +R T 1983 o - O 2 0 0 - +R T 1985 o - Ap 20 0 1 S +R T 1985 o - S 28 0 0 - +R T 1986 1993 - Mar lastSun 1s 1 S +R T 1986 1995 - S lastSun 1s 0 - +R T 1994 o - Mar 20 1s 1 S +R T 1995 2006 - Mar lastSun 1s 1 S +R T 1996 2006 - O lastSun 1s 0 - +Z Europe/Istanbul 1:55:52 - LMT 1880 +1:56:56 - IMT 1910 O +2 T EE%sT 1978 O 15 +3 T +03/+04 1985 Ap 20 +2 T EE%sT 2007 +2 E EE%sT 2011 Mar 27 1u +2 - EET 2011 Mar 28 1u +2 E EE%sT 2014 Mar 30 1u +2 - EET 2014 Mar 31 1u +2 E EE%sT 2015 O 25 1u +2 1 EEST 2015 N 8 1u +2 E EE%sT 2016 S 7 +3 - +03 +Li Europe/Istanbul Asia/Istanbul +Z Europe/Kiev 2:2:4 - LMT 1880 +2:2:4 - KMT 1924 May 2 +2 - EET 1930 Jun 21 +3 - MSK 1941 S 20 +1 c CE%sT 1943 N 6 +3 R MSK/MSD 1990 Jul 1 2 +2 1 EEST 1991 S 29 3 +2 e EE%sT 1995 +2 E EE%sT +Z Europe/Uzhgorod 1:29:12 - LMT 1890 O +1 - CET 1940 +1 c CE%sT 1944 O +1 1 CEST 1944 O 26 +1 - CET 1945 Jun 29 +3 R MSK/MSD 1990 +3 - MSK 1990 Jul 1 2 +1 - CET 1991 Mar 31 3 +2 - EET 1992 +2 e EE%sT 1995 +2 E EE%sT +Z Europe/Zaporozhye 2:20:40 - LMT 1880 +2:20 - +0220 1924 May 2 +2 - EET 1930 Jun 21 +3 - MSK 1941 Au 25 +1 c CE%sT 1943 O 25 +3 R MSK/MSD 1991 Mar 31 2 +2 e EE%sT 1995 +2 E EE%sT +R u 1918 1919 - Mar lastSun 2 1 D +R u 1918 1919 - O lastSun 2 0 S +R u 1942 o - F 9 2 1 W +R u 1945 o - Au 14 23u 1 P +R u 1945 o - S lastSun 2 0 S +R u 1967 2006 - O lastSun 2 0 S +R u 1967 1973 - Ap lastSun 2 1 D +R u 1974 o - Ja 6 2 1 D +R u 1975 o - F 23 2 1 D +R u 1976 1986 - Ap lastSun 2 1 D +R u 1987 2006 - Ap Sun>=1 2 1 D +R u 2007 ma - Mar Sun>=8 2 1 D +R u 2007 ma - N Sun>=1 2 0 S +Z EST -5 - EST +Z MST -7 - MST +Z HST -10 - HST +Z EST5EDT -5 u E%sT +Z CST6CDT -6 u C%sT +Z MST7MDT -7 u M%sT +Z PST8PDT -8 u P%sT +R NY 1920 o - Mar lastSun 2 1 D +R NY 1920 o - O lastSun 2 0 S +R NY 1921 1966 - Ap lastSun 2 1 D +R NY 1921 1954 - S lastSun 2 0 S +R NY 1955 1966 - O lastSun 2 0 S +Z America/New_York -4:56:2 - LMT 1883 N 18 12:3:58 +-5 u E%sT 1920 +-5 NY E%sT 1942 +-5 u E%sT 1946 +-5 NY E%sT 1967 +-5 u E%sT +R Ch 1920 o - Jun 13 2 1 D +R Ch 1920 1921 - O lastSun 2 0 S +R Ch 1921 o - Mar lastSun 2 1 D +R Ch 1922 1966 - Ap lastSun 2 1 D +R Ch 1922 1954 - S lastSun 2 0 S +R Ch 1955 1966 - O lastSun 2 0 S +Z America/Chicago -5:50:36 - LMT 1883 N 18 12:9:24 +-6 u C%sT 1920 +-6 Ch C%sT 1936 Mar 1 2 +-5 - EST 1936 N 15 2 +-6 Ch C%sT 1942 +-6 u C%sT 1946 +-6 Ch C%sT 1967 +-6 u C%sT +Z America/North_Dakota/Center -6:45:12 - LMT 1883 N 18 12:14:48 +-7 u M%sT 1992 O 25 2 +-6 u C%sT +Z America/North_Dakota/New_Salem -6:45:39 - LMT 1883 N 18 12:14:21 +-7 u M%sT 2003 O 26 2 +-6 u C%sT +Z America/North_Dakota/Beulah -6:47:7 - LMT 1883 N 18 12:12:53 +-7 u M%sT 2010 N 7 2 +-6 u C%sT +R De 1920 1921 - Mar lastSun 2 1 D +R De 1920 o - O lastSun 2 0 S +R De 1921 o - May 22 2 0 S +R De 1965 1966 - Ap lastSun 2 1 D +R De 1965 1966 - O lastSun 2 0 S +Z America/Denver -6:59:56 - LMT 1883 N 18 12:0:4 +-7 u M%sT 1920 +-7 De M%sT 1942 +-7 u M%sT 1946 +-7 De M%sT 1967 +-7 u M%sT +R CA 1948 o - Mar 14 2:1 1 D +R CA 1949 o - Ja 1 2 0 S +R CA 1950 1966 - Ap lastSun 1 1 D +R CA 1950 1961 - S lastSun 2 0 S +R CA 1962 1966 - O lastSun 2 0 S +Z America/Los_Angeles -7:52:58 - LMT 1883 N 18 12:7:2 +-8 u P%sT 1946 +-8 CA P%sT 1967 +-8 u P%sT +Z America/Juneau 15:2:19 - LMT 1867 O 19 15:33:32 +-8:57:41 - LMT 1900 Au 20 12 +-8 - PST 1942 +-8 u P%sT 1946 +-8 - PST 1969 +-8 u P%sT 1980 Ap 27 2 +-9 u Y%sT 1980 O 26 2 +-8 u P%sT 1983 O 30 2 +-9 u Y%sT 1983 N 30 +-9 u AK%sT +Z America/Sitka 14:58:47 - LMT 1867 O 19 15:30 +-9:1:13 - LMT 1900 Au 20 12 +-8 - PST 1942 +-8 u P%sT 1946 +-8 - PST 1969 +-8 u P%sT 1983 O 30 2 +-9 u Y%sT 1983 N 30 +-9 u AK%sT +Z America/Metlakatla 15:13:42 - LMT 1867 O 19 15:44:55 +-8:46:18 - LMT 1900 Au 20 12 +-8 - PST 1942 +-8 u P%sT 1946 +-8 - PST 1969 +-8 u P%sT 1983 O 30 2 +-8 - PST 2015 N 1 2 +-9 u AK%sT +Z America/Yakutat 14:41:5 - LMT 1867 O 19 15:12:18 +-9:18:55 - LMT 1900 Au 20 12 +-9 - YST 1942 +-9 u Y%sT 1946 +-9 - YST 1969 +-9 u Y%sT 1983 N 30 +-9 u AK%sT +Z America/Anchorage 14:0:24 - LMT 1867 O 19 14:31:37 +-9:59:36 - LMT 1900 Au 20 12 +-10 - AST 1942 +-10 u A%sT 1967 Ap +-10 - AHST 1969 +-10 u AH%sT 1983 O 30 2 +-9 u Y%sT 1983 N 30 +-9 u AK%sT +Z America/Nome 12:58:22 - LMT 1867 O 19 13:29:35 +-11:1:38 - LMT 1900 Au 20 12 +-11 - NST 1942 +-11 u N%sT 1946 +-11 - NST 1967 Ap +-11 - BST 1969 +-11 u B%sT 1983 O 30 2 +-9 u Y%sT 1983 N 30 +-9 u AK%sT +Z America/Adak 12:13:22 - LMT 1867 O 19 12:44:35 +-11:46:38 - LMT 1900 Au 20 12 +-11 - NST 1942 +-11 u N%sT 1946 +-11 - NST 1967 Ap +-11 - BST 1969 +-11 u B%sT 1983 O 30 2 +-10 u AH%sT 1983 N 30 +-10 u H%sT +Z Pacific/Honolulu -10:31:26 - LMT 1896 Ja 13 12 +-10:30 - HST 1933 Ap 30 2 +-10:30 1 HDT 1933 May 21 12 +-10:30 u H%sT 1947 Jun 8 2 +-10 - HST +Z America/Phoenix -7:28:18 - LMT 1883 N 18 11:31:42 +-7 u M%sT 1944 Ja 1 0:1 +-7 - MST 1944 Ap 1 0:1 +-7 u M%sT 1944 O 1 0:1 +-7 - MST 1967 +-7 u M%sT 1968 Mar 21 +-7 - MST +Z America/Boise -7:44:49 - LMT 1883 N 18 12:15:11 +-8 u P%sT 1923 May 13 2 +-7 u M%sT 1974 +-7 - MST 1974 F 3 2 +-7 u M%sT +R In 1941 o - Jun 22 2 1 D +R In 1941 1954 - S lastSun 2 0 S +R In 1946 1954 - Ap lastSun 2 1 D +Z America/Indiana/Indianapolis -5:44:38 - LMT 1883 N 18 12:15:22 +-6 u C%sT 1920 +-6 In C%sT 1942 +-6 u C%sT 1946 +-6 In C%sT 1955 Ap 24 2 +-5 - EST 1957 S 29 2 +-6 - CST 1958 Ap 27 2 +-5 - EST 1969 +-5 u E%sT 1971 +-5 - EST 2006 +-5 u E%sT +R Ma 1951 o - Ap lastSun 2 1 D +R Ma 1951 o - S lastSun 2 0 S +R Ma 1954 1960 - Ap lastSun 2 1 D +R Ma 1954 1960 - S lastSun 2 0 S +Z America/Indiana/Marengo -5:45:23 - LMT 1883 N 18 12:14:37 +-6 u C%sT 1951 +-6 Ma C%sT 1961 Ap 30 2 +-5 - EST 1969 +-5 u E%sT 1974 Ja 6 2 +-6 1 CDT 1974 O 27 2 +-5 u E%sT 1976 +-5 - EST 2006 +-5 u E%sT +R V 1946 o - Ap lastSun 2 1 D +R V 1946 o - S lastSun 2 0 S +R V 1953 1954 - Ap lastSun 2 1 D +R V 1953 1959 - S lastSun 2 0 S +R V 1955 o - May 1 0 1 D +R V 1956 1963 - Ap lastSun 2 1 D +R V 1960 o - O lastSun 2 0 S +R V 1961 o - S lastSun 2 0 S +R V 1962 1963 - O lastSun 2 0 S +Z America/Indiana/Vincennes -5:50:7 - LMT 1883 N 18 12:9:53 +-6 u C%sT 1946 +-6 V C%sT 1964 Ap 26 2 +-5 - EST 1969 +-5 u E%sT 1971 +-5 - EST 2006 Ap 2 2 +-6 u C%sT 2007 N 4 2 +-5 u E%sT +R Pe 1946 o - Ap lastSun 2 1 D +R Pe 1946 o - S lastSun 2 0 S +R Pe 1953 1954 - Ap lastSun 2 1 D +R Pe 1953 1959 - S lastSun 2 0 S +R Pe 1955 o - May 1 0 1 D +R Pe 1956 1963 - Ap lastSun 2 1 D +R Pe 1960 o - O lastSun 2 0 S +R Pe 1961 o - S lastSun 2 0 S +R Pe 1962 1963 - O lastSun 2 0 S +Z America/Indiana/Tell_City -5:47:3 - LMT 1883 N 18 12:12:57 +-6 u C%sT 1946 +-6 Pe C%sT 1964 Ap 26 2 +-5 - EST 1969 +-5 u E%sT 1971 +-5 - EST 2006 Ap 2 2 +-6 u C%sT +R Pi 1955 o - May 1 0 1 D +R Pi 1955 1960 - S lastSun 2 0 S +R Pi 1956 1964 - Ap lastSun 2 1 D +R Pi 1961 1964 - O lastSun 2 0 S +Z America/Indiana/Petersburg -5:49:7 - LMT 1883 N 18 12:10:53 +-6 u C%sT 1955 +-6 Pi C%sT 1965 Ap 25 2 +-5 - EST 1966 O 30 2 +-6 u C%sT 1977 O 30 2 +-5 - EST 2006 Ap 2 2 +-6 u C%sT 2007 N 4 2 +-5 u E%sT +R St 1947 1961 - Ap lastSun 2 1 D +R St 1947 1954 - S lastSun 2 0 S +R St 1955 1956 - O lastSun 2 0 S +R St 1957 1958 - S lastSun 2 0 S +R St 1959 1961 - O lastSun 2 0 S +Z America/Indiana/Knox -5:46:30 - LMT 1883 N 18 12:13:30 +-6 u C%sT 1947 +-6 St C%sT 1962 Ap 29 2 +-5 - EST 1963 O 27 2 +-6 u C%sT 1991 O 27 2 +-5 - EST 2006 Ap 2 2 +-6 u C%sT +R Pu 1946 1960 - Ap lastSun 2 1 D +R Pu 1946 1954 - S lastSun 2 0 S +R Pu 1955 1956 - O lastSun 2 0 S +R Pu 1957 1960 - S lastSun 2 0 S +Z America/Indiana/Winamac -5:46:25 - LMT 1883 N 18 12:13:35 +-6 u C%sT 1946 +-6 Pu C%sT 1961 Ap 30 2 +-5 - EST 1969 +-5 u E%sT 1971 +-5 - EST 2006 Ap 2 2 +-6 u C%sT 2007 Mar 11 2 +-5 u E%sT +Z America/Indiana/Vevay -5:40:16 - LMT 1883 N 18 12:19:44 +-6 u C%sT 1954 Ap 25 2 +-5 - EST 1969 +-5 u E%sT 1973 +-5 - EST 2006 +-5 u E%sT +R v 1921 o - May 1 2 1 D +R v 1921 o - S 1 2 0 S +R v 1941 1961 - Ap lastSun 2 1 D +R v 1941 o - S lastSun 2 0 S +R v 1946 o - Jun 2 2 0 S +R v 1950 1955 - S lastSun 2 0 S +R v 1956 1960 - O lastSun 2 0 S +Z America/Kentucky/Louisville -5:43:2 - LMT 1883 N 18 12:16:58 +-6 u C%sT 1921 +-6 v C%sT 1942 +-6 u C%sT 1946 +-6 v C%sT 1961 Jul 23 2 +-5 - EST 1968 +-5 u E%sT 1974 Ja 6 2 +-6 1 CDT 1974 O 27 2 +-5 u E%sT +Z America/Kentucky/Monticello -5:39:24 - LMT 1883 N 18 12:20:36 +-6 u C%sT 1946 +-6 - CST 1968 +-6 u C%sT 2000 O 29 2 +-5 u E%sT +R Dt 1948 o - Ap lastSun 2 1 D +R Dt 1948 o - S lastSun 2 0 S +Z America/Detroit -5:32:11 - LMT 1905 +-6 - CST 1915 May 15 2 +-5 - EST 1942 +-5 u E%sT 1946 +-5 Dt E%sT 1973 +-5 u E%sT 1975 +-5 - EST 1975 Ap 27 2 +-5 u E%sT +R Me 1946 o - Ap lastSun 2 1 D +R Me 1946 o - S lastSun 2 0 S +R Me 1966 o - Ap lastSun 2 1 D +R Me 1966 o - O lastSun 2 0 S +Z America/Menominee -5:50:27 - LMT 1885 S 18 12 +-6 u C%sT 1946 +-6 Me C%sT 1969 Ap 27 2 +-5 - EST 1973 Ap 29 2 +-6 u C%sT +R C 1918 o - Ap 14 2 1 D +R C 1918 o - O 27 2 0 S +R C 1942 o - F 9 2 1 W +R C 1945 o - Au 14 23u 1 P +R C 1945 o - S 30 2 0 S +R C 1974 1986 - Ap lastSun 2 1 D +R C 1974 2006 - O lastSun 2 0 S +R C 1987 2006 - Ap Sun>=1 2 1 D +R C 2007 ma - Mar Sun>=8 2 1 D +R C 2007 ma - N Sun>=1 2 0 S +R j 1917 o - Ap 8 2 1 D +R j 1917 o - S 17 2 0 S +R j 1919 o - May 5 23 1 D +R j 1919 o - Au 12 23 0 S +R j 1920 1935 - May Sun>=1 23 1 D +R j 1920 1935 - O lastSun 23 0 S +R j 1936 1941 - May M>=9 0 1 D +R j 1936 1941 - O M>=2 0 0 S +R j 1946 1950 - May Sun>=8 2 1 D +R j 1946 1950 - O Sun>=2 2 0 S +R j 1951 1986 - Ap lastSun 2 1 D +R j 1951 1959 - S lastSun 2 0 S +R j 1960 1986 - O lastSun 2 0 S +R j 1987 o - Ap Sun>=1 0:1 1 D +R j 1987 2006 - O lastSun 0:1 0 S +R j 1988 o - Ap Sun>=1 0:1 2 DD +R j 1989 2006 - Ap Sun>=1 0:1 1 D +R j 2007 2011 - Mar Sun>=8 0:1 1 D +R j 2007 2010 - N Sun>=1 0:1 0 S +Z America/St_Johns -3:30:52 - LMT 1884 +-3:30:52 j N%sT 1918 +-3:30:52 C N%sT 1919 +-3:30:52 j N%sT 1935 Mar 30 +-3:30 j N%sT 1942 May 11 +-3:30 C N%sT 1946 +-3:30 j N%sT 2011 N +-3:30 C N%sT +Z America/Goose_Bay -4:1:40 - LMT 1884 +-3:30:52 - NST 1918 +-3:30:52 C N%sT 1919 +-3:30:52 - NST 1935 Mar 30 +-3:30 - NST 1936 +-3:30 j N%sT 1942 May 11 +-3:30 C N%sT 1946 +-3:30 j N%sT 1966 Mar 15 2 +-4 j A%sT 2011 N +-4 C A%sT +R H 1916 o - Ap 1 0 1 D +R H 1916 o - O 1 0 0 S +R H 1920 o - May 9 0 1 D +R H 1920 o - Au 29 0 0 S +R H 1921 o - May 6 0 1 D +R H 1921 1922 - S 5 0 0 S +R H 1922 o - Ap 30 0 1 D +R H 1923 1925 - May Sun>=1 0 1 D +R H 1923 o - S 4 0 0 S +R H 1924 o - S 15 0 0 S +R H 1925 o - S 28 0 0 S +R H 1926 o - May 16 0 1 D +R H 1926 o - S 13 0 0 S +R H 1927 o - May 1 0 1 D +R H 1927 o - S 26 0 0 S +R H 1928 1931 - May Sun>=8 0 1 D +R H 1928 o - S 9 0 0 S +R H 1929 o - S 3 0 0 S +R H 1930 o - S 15 0 0 S +R H 1931 1932 - S M>=24 0 0 S +R H 1932 o - May 1 0 1 D +R H 1933 o - Ap 30 0 1 D +R H 1933 o - O 2 0 0 S +R H 1934 o - May 20 0 1 D +R H 1934 o - S 16 0 0 S +R H 1935 o - Jun 2 0 1 D +R H 1935 o - S 30 0 0 S +R H 1936 o - Jun 1 0 1 D +R H 1936 o - S 14 0 0 S +R H 1937 1938 - May Sun>=1 0 1 D +R H 1937 1941 - S M>=24 0 0 S +R H 1939 o - May 28 0 1 D +R H 1940 1941 - May Sun>=1 0 1 D +R H 1946 1949 - Ap lastSun 2 1 D +R H 1946 1949 - S lastSun 2 0 S +R H 1951 1954 - Ap lastSun 2 1 D +R H 1951 1954 - S lastSun 2 0 S +R H 1956 1959 - Ap lastSun 2 1 D +R H 1956 1959 - S lastSun 2 0 S +R H 1962 1973 - Ap lastSun 2 1 D +R H 1962 1973 - O lastSun 2 0 S +Z America/Halifax -4:14:24 - LMT 1902 Jun 15 +-4 H A%sT 1918 +-4 C A%sT 1919 +-4 H A%sT 1942 F 9 2s +-4 C A%sT 1946 +-4 H A%sT 1974 +-4 C A%sT +Z America/Glace_Bay -3:59:48 - LMT 1902 Jun 15 +-4 C A%sT 1953 +-4 H A%sT 1954 +-4 - AST 1972 +-4 H A%sT 1974 +-4 C A%sT +R o 1933 1935 - Jun Sun>=8 1 1 D +R o 1933 1935 - S Sun>=8 1 0 S +R o 1936 1938 - Jun Sun>=1 1 1 D +R o 1936 1938 - S Sun>=1 1 0 S +R o 1939 o - May 27 1 1 D +R o 1939 1941 - S Sat>=21 1 0 S +R o 1940 o - May 19 1 1 D +R o 1941 o - May 4 1 1 D +R o 1946 1972 - Ap lastSun 2 1 D +R o 1946 1956 - S lastSun 2 0 S +R o 1957 1972 - O lastSun 2 0 S +R o 1993 2006 - Ap Sun>=1 0:1 1 D +R o 1993 2006 - O lastSun 0:1 0 S +Z America/Moncton -4:19:8 - LMT 1883 D 9 +-5 - EST 1902 Jun 15 +-4 C A%sT 1933 +-4 o A%sT 1942 +-4 C A%sT 1946 +-4 o A%sT 1973 +-4 C A%sT 1993 +-4 o A%sT 2007 +-4 C A%sT +Z America/Blanc-Sablon -3:48:28 - LMT 1884 +-4 C A%sT 1970 +-4 - AST +R t 1919 o - Mar 30 23:30 1 D +R t 1919 o - O 26 0 0 S +R t 1920 o - May 2 2 1 D +R t 1920 o - S 26 0 0 S +R t 1921 o - May 15 2 1 D +R t 1921 o - S 15 2 0 S +R t 1922 1923 - May Sun>=8 2 1 D +R t 1922 1926 - S Sun>=15 2 0 S +R t 1924 1927 - May Sun>=1 2 1 D +R t 1927 1932 - S lastSun 2 0 S +R t 1928 1931 - Ap lastSun 2 1 D +R t 1932 o - May 1 2 1 D +R t 1933 1940 - Ap lastSun 2 1 D +R t 1933 o - O 1 2 0 S +R t 1934 1939 - S lastSun 2 0 S +R t 1945 1946 - S lastSun 2 0 S +R t 1946 o - Ap lastSun 2 1 D +R t 1947 1949 - Ap lastSun 0 1 D +R t 1947 1948 - S lastSun 0 0 S +R t 1949 o - N lastSun 0 0 S +R t 1950 1973 - Ap lastSun 2 1 D +R t 1950 o - N lastSun 2 0 S +R t 1951 1956 - S lastSun 2 0 S +R t 1957 1973 - O lastSun 2 0 S +Z America/Toronto -5:17:32 - LMT 1895 +-5 C E%sT 1919 +-5 t E%sT 1942 F 9 2s +-5 C E%sT 1946 +-5 t E%sT 1974 +-5 C E%sT +Z America/Thunder_Bay -5:57 - LMT 1895 +-6 - CST 1910 +-5 - EST 1942 +-5 C E%sT 1970 +-5 t E%sT 1973 +-5 - EST 1974 +-5 C E%sT +Z America/Nipigon -5:53:4 - LMT 1895 +-5 C E%sT 1940 S 29 +-5 1 EDT 1942 F 9 2s +-5 C E%sT +Z America/Rainy_River -6:18:16 - LMT 1895 +-6 C C%sT 1940 S 29 +-6 1 CDT 1942 F 9 2s +-6 C C%sT +Z America/Atikokan -6:6:28 - LMT 1895 +-6 C C%sT 1940 S 29 +-6 1 CDT 1942 F 9 2s +-6 C C%sT 1945 S 30 2 +-5 - EST +R W 1916 o - Ap 23 0 1 D +R W 1916 o - S 17 0 0 S +R W 1918 o - Ap 14 2 1 D +R W 1918 o - O 27 2 0 S +R W 1937 o - May 16 2 1 D +R W 1937 o - S 26 2 0 S +R W 1942 o - F 9 2 1 W +R W 1945 o - Au 14 23u 1 P +R W 1945 o - S lastSun 2 0 S +R W 1946 o - May 12 2 1 D +R W 1946 o - O 13 2 0 S +R W 1947 1949 - Ap lastSun 2 1 D +R W 1947 1949 - S lastSun 2 0 S +R W 1950 o - May 1 2 1 D +R W 1950 o - S 30 2 0 S +R W 1951 1960 - Ap lastSun 2 1 D +R W 1951 1958 - S lastSun 2 0 S +R W 1959 o - O lastSun 2 0 S +R W 1960 o - S lastSun 2 0 S +R W 1963 o - Ap lastSun 2 1 D +R W 1963 o - S 22 2 0 S +R W 1966 1986 - Ap lastSun 2s 1 D +R W 1966 2005 - O lastSun 2s 0 S +R W 1987 2005 - Ap Sun>=1 2s 1 D +Z America/Winnipeg -6:28:36 - LMT 1887 Jul 16 +-6 W C%sT 2006 +-6 C C%sT +R r 1918 o - Ap 14 2 1 D +R r 1918 o - O 27 2 0 S +R r 1930 1934 - May Sun>=1 0 1 D +R r 1930 1934 - O Sun>=1 0 0 S +R r 1937 1941 - Ap Sun>=8 0 1 D +R r 1937 o - O Sun>=8 0 0 S +R r 1938 o - O Sun>=1 0 0 S +R r 1939 1941 - O Sun>=8 0 0 S +R r 1942 o - F 9 2 1 W +R r 1945 o - Au 14 23u 1 P +R r 1945 o - S lastSun 2 0 S +R r 1946 o - Ap Sun>=8 2 1 D +R r 1946 o - O Sun>=8 2 0 S +R r 1947 1957 - Ap lastSun 2 1 D +R r 1947 1957 - S lastSun 2 0 S +R r 1959 o - Ap lastSun 2 1 D +R r 1959 o - O lastSun 2 0 S +R Sw 1957 o - Ap lastSun 2 1 D +R Sw 1957 o - O lastSun 2 0 S +R Sw 1959 1961 - Ap lastSun 2 1 D +R Sw 1959 o - O lastSun 2 0 S +R Sw 1960 1961 - S lastSun 2 0 S +Z America/Regina -6:58:36 - LMT 1905 S +-7 r M%sT 1960 Ap lastSun 2 +-6 - CST +Z America/Swift_Current -7:11:20 - LMT 1905 S +-7 C M%sT 1946 Ap lastSun 2 +-7 r M%sT 1950 +-7 Sw M%sT 1972 Ap lastSun 2 +-6 - CST +R Ed 1918 1919 - Ap Sun>=8 2 1 D +R Ed 1918 o - O 27 2 0 S +R Ed 1919 o - May 27 2 0 S +R Ed 1920 1923 - Ap lastSun 2 1 D +R Ed 1920 o - O lastSun 2 0 S +R Ed 1921 1923 - S lastSun 2 0 S +R Ed 1942 o - F 9 2 1 W +R Ed 1945 o - Au 14 23u 1 P +R Ed 1945 o - S lastSun 2 0 S +R Ed 1947 o - Ap lastSun 2 1 D +R Ed 1947 o - S lastSun 2 0 S +R Ed 1967 o - Ap lastSun 2 1 D +R Ed 1967 o - O lastSun 2 0 S +R Ed 1969 o - Ap lastSun 2 1 D +R Ed 1969 o - O lastSun 2 0 S +R Ed 1972 1986 - Ap lastSun 2 1 D +R Ed 1972 2006 - O lastSun 2 0 S +Z America/Edmonton -7:33:52 - LMT 1906 S +-7 Ed M%sT 1987 +-7 C M%sT +R Va 1918 o - Ap 14 2 1 D +R Va 1918 o - O 27 2 0 S +R Va 1942 o - F 9 2 1 W +R Va 1945 o - Au 14 23u 1 P +R Va 1945 o - S 30 2 0 S +R Va 1946 1986 - Ap lastSun 2 1 D +R Va 1946 o - O 13 2 0 S +R Va 1947 1961 - S lastSun 2 0 S +R Va 1962 2006 - O lastSun 2 0 S +Z America/Vancouver -8:12:28 - LMT 1884 +-8 Va P%sT 1987 +-8 C P%sT +Z America/Dawson_Creek -8:0:56 - LMT 1884 +-8 C P%sT 1947 +-8 Va P%sT 1972 Au 30 2 +-7 - MST +Z America/Fort_Nelson -8:10:47 - LMT 1884 +-8 Va P%sT 1946 +-8 - PST 1947 +-8 Va P%sT 1987 +-8 C P%sT 2015 Mar 8 2 +-7 - MST +Z America/Creston -7:46:4 - LMT 1884 +-7 - MST 1916 O +-8 - PST 1918 Jun 2 +-7 - MST +R Y 1918 o - Ap 14 2 1 D +R Y 1918 o - O 27 2 0 S +R Y 1919 o - May 25 2 1 D +R Y 1919 o - N 1 0 0 S +R Y 1942 o - F 9 2 1 W +R Y 1945 o - Au 14 23u 1 P +R Y 1945 o - S 30 2 0 S +R Y 1965 o - Ap lastSun 0 2 DD +R Y 1965 o - O lastSun 2 0 S +R Y 1980 1986 - Ap lastSun 2 1 D +R Y 1980 2006 - O lastSun 2 0 S +R Y 1987 2006 - Ap Sun>=1 2 1 D +Z America/Pangnirtung 0 - -00 1921 +-4 Y A%sT 1995 Ap Sun>=1 2 +-5 C E%sT 1999 O 31 2 +-6 C C%sT 2000 O 29 2 +-5 C E%sT +Z America/Iqaluit 0 - -00 1942 Au +-5 Y E%sT 1999 O 31 2 +-6 C C%sT 2000 O 29 2 +-5 C E%sT +Z America/Resolute 0 - -00 1947 Au 31 +-6 Y C%sT 2000 O 29 2 +-5 - EST 2001 Ap 1 3 +-6 C C%sT 2006 O 29 2 +-5 - EST 2007 Mar 11 3 +-6 C C%sT +Z America/Rankin_Inlet 0 - -00 1957 +-6 Y C%sT 2000 O 29 2 +-5 - EST 2001 Ap 1 3 +-6 C C%sT +Z America/Cambridge_Bay 0 - -00 1920 +-7 Y M%sT 1999 O 31 2 +-6 C C%sT 2000 O 29 2 +-5 - EST 2000 N 5 +-6 - CST 2001 Ap 1 3 +-7 C M%sT +Z America/Yellowknife 0 - -00 1935 +-7 Y M%sT 1980 +-7 C M%sT +Z America/Inuvik 0 - -00 1953 +-8 Y P%sT 1979 Ap lastSun 2 +-7 Y M%sT 1980 +-7 C M%sT +Z America/Whitehorse -9:0:12 - LMT 1900 Au 20 +-9 Y Y%sT 1967 May 28 +-8 Y P%sT 1980 +-8 C P%sT +Z America/Dawson -9:17:40 - LMT 1900 Au 20 +-9 Y Y%sT 1973 O 28 +-8 Y P%sT 1980 +-8 C P%sT +R m 1939 o - F 5 0 1 D +R m 1939 o - Jun 25 0 0 S +R m 1940 o - D 9 0 1 D +R m 1941 o - Ap 1 0 0 S +R m 1943 o - D 16 0 1 W +R m 1944 o - May 1 0 0 S +R m 1950 o - F 12 0 1 D +R m 1950 o - Jul 30 0 0 S +R m 1996 2000 - Ap Sun>=1 2 1 D +R m 1996 2000 - O lastSun 2 0 S +R m 2001 o - May Sun>=1 2 1 D +R m 2001 o - S lastSun 2 0 S +R m 2002 ma - Ap Sun>=1 2 1 D +R m 2002 ma - O lastSun 2 0 S +Z America/Cancun -5:47:4 - LMT 1922 Ja 1 0:12:56 +-6 - CST 1981 D 23 +-5 m E%sT 1998 Au 2 2 +-6 m C%sT 2015 F 1 2 +-5 - EST +Z America/Merida -5:58:28 - LMT 1922 Ja 1 0:1:32 +-6 - CST 1981 D 23 +-5 - EST 1982 D 2 +-6 m C%sT +Z America/Matamoros -6:40 - LMT 1921 D 31 23:20 +-6 - CST 1988 +-6 u C%sT 1989 +-6 m C%sT 2010 +-6 u C%sT +Z America/Monterrey -6:41:16 - LMT 1921 D 31 23:18:44 +-6 - CST 1988 +-6 u C%sT 1989 +-6 m C%sT +Z America/Mexico_City -6:36:36 - LMT 1922 Ja 1 0:23:24 +-7 - MST 1927 Jun 10 23 +-6 - CST 1930 N 15 +-7 - MST 1931 May 1 23 +-6 - CST 1931 O +-7 - MST 1932 Ap +-6 m C%sT 2001 S 30 2 +-6 - CST 2002 F 20 +-6 m C%sT +Z America/Ojinaga -6:57:40 - LMT 1922 Ja 1 0:2:20 +-7 - MST 1927 Jun 10 23 +-6 - CST 1930 N 15 +-7 - MST 1931 May 1 23 +-6 - CST 1931 O +-7 - MST 1932 Ap +-6 - CST 1996 +-6 m C%sT 1998 +-6 - CST 1998 Ap Sun>=1 3 +-7 m M%sT 2010 +-7 u M%sT +Z America/Chihuahua -7:4:20 - LMT 1921 D 31 23:55:40 +-7 - MST 1927 Jun 10 23 +-6 - CST 1930 N 15 +-7 - MST 1931 May 1 23 +-6 - CST 1931 O +-7 - MST 1932 Ap +-6 - CST 1996 +-6 m C%sT 1998 +-6 - CST 1998 Ap Sun>=1 3 +-7 m M%sT +Z America/Hermosillo -7:23:52 - LMT 1921 D 31 23:36:8 +-7 - MST 1927 Jun 10 23 +-6 - CST 1930 N 15 +-7 - MST 1931 May 1 23 +-6 - CST 1931 O +-7 - MST 1932 Ap +-6 - CST 1942 Ap 24 +-7 - MST 1949 Ja 14 +-8 - PST 1970 +-7 m M%sT 1999 +-7 - MST +Z America/Mazatlan -7:5:40 - LMT 1921 D 31 23:54:20 +-7 - MST 1927 Jun 10 23 +-6 - CST 1930 N 15 +-7 - MST 1931 May 1 23 +-6 - CST 1931 O +-7 - MST 1932 Ap +-6 - CST 1942 Ap 24 +-7 - MST 1949 Ja 14 +-8 - PST 1970 +-7 m M%sT +Z America/Bahia_Banderas -7:1 - LMT 1921 D 31 23:59 +-7 - MST 1927 Jun 10 23 +-6 - CST 1930 N 15 +-7 - MST 1931 May 1 23 +-6 - CST 1931 O +-7 - MST 1932 Ap +-6 - CST 1942 Ap 24 +-7 - MST 1949 Ja 14 +-8 - PST 1970 +-7 m M%sT 2010 Ap 4 2 +-6 m C%sT +Z America/Tijuana -7:48:4 - LMT 1922 Ja 1 0:11:56 +-7 - MST 1924 +-8 - PST 1927 Jun 10 23 +-7 - MST 1930 N 15 +-8 - PST 1931 Ap +-8 1 PDT 1931 S 30 +-8 - PST 1942 Ap 24 +-8 1 PWT 1945 Au 14 23u +-8 1 PPT 1945 N 12 +-8 - PST 1948 Ap 5 +-8 1 PDT 1949 Ja 14 +-8 - PST 1954 +-8 CA P%sT 1961 +-8 - PST 1976 +-8 u P%sT 1996 +-8 m P%sT 2001 +-8 u P%sT 2002 F 20 +-8 m P%sT 2010 +-8 u P%sT +R BS 1964 1975 - O lastSun 2 0 S +R BS 1964 1975 - Ap lastSun 2 1 D +Z America/Nassau -5:9:30 - LMT 1912 Mar 2 +-5 BS E%sT 1976 +-5 u E%sT +R BB 1977 o - Jun 12 2 1 D +R BB 1977 1978 - O Sun>=1 2 0 S +R BB 1978 1980 - Ap Sun>=15 2 1 D +R BB 1979 o - S 30 2 0 S +R BB 1980 o - S 25 2 0 S +Z America/Barbados -3:58:29 - LMT 1924 +-3:58:29 - BMT 1932 +-4 BB A%sT +R BZ 1918 1942 - O Sun>=2 0 0:30 -0530 +R BZ 1919 1943 - F Sun>=9 0 0 CST +R BZ 1973 o - D 5 0 1 CDT +R BZ 1974 o - F 9 0 0 CST +R BZ 1982 o - D 18 0 1 CDT +R BZ 1983 o - F 12 0 0 CST +Z America/Belize -5:52:48 - LMT 1912 Ap +-6 BZ %s +Z Atlantic/Bermuda -4:19:18 - LMT 1930 Ja 1 2 +-4 - AST 1974 Ap 28 2 +-4 C A%sT 1976 +-4 u A%sT +R CR 1979 1980 - F lastSun 0 1 D +R CR 1979 1980 - Jun Sun>=1 0 0 S +R CR 1991 1992 - Ja Sat>=15 0 1 D +R CR 1991 o - Jul 1 0 0 S +R CR 1992 o - Mar 15 0 0 S +Z America/Costa_Rica -5:36:13 - LMT 1890 +-5:36:13 - SJMT 1921 Ja 15 +-6 CR C%sT +R Q 1928 o - Jun 10 0 1 D +R Q 1928 o - O 10 0 0 S +R Q 1940 1942 - Jun Sun>=1 0 1 D +R Q 1940 1942 - S Sun>=1 0 0 S +R Q 1945 1946 - Jun Sun>=1 0 1 D +R Q 1945 1946 - S Sun>=1 0 0 S +R Q 1965 o - Jun 1 0 1 D +R Q 1965 o - S 30 0 0 S +R Q 1966 o - May 29 0 1 D +R Q 1966 o - O 2 0 0 S +R Q 1967 o - Ap 8 0 1 D +R Q 1967 1968 - S Sun>=8 0 0 S +R Q 1968 o - Ap 14 0 1 D +R Q 1969 1977 - Ap lastSun 0 1 D +R Q 1969 1971 - O lastSun 0 0 S +R Q 1972 1974 - O 8 0 0 S +R Q 1975 1977 - O lastSun 0 0 S +R Q 1978 o - May 7 0 1 D +R Q 1978 1990 - O Sun>=8 0 0 S +R Q 1979 1980 - Mar Sun>=15 0 1 D +R Q 1981 1985 - May Sun>=5 0 1 D +R Q 1986 1989 - Mar Sun>=14 0 1 D +R Q 1990 1997 - Ap Sun>=1 0 1 D +R Q 1991 1995 - O Sun>=8 0s 0 S +R Q 1996 o - O 6 0s 0 S +R Q 1997 o - O 12 0s 0 S +R Q 1998 1999 - Mar lastSun 0s 1 D +R Q 1998 2003 - O lastSun 0s 0 S +R Q 2000 2003 - Ap Sun>=1 0s 1 D +R Q 2004 o - Mar lastSun 0s 1 D +R Q 2006 2010 - O lastSun 0s 0 S +R Q 2007 o - Mar Sun>=8 0s 1 D +R Q 2008 o - Mar Sun>=15 0s 1 D +R Q 2009 2010 - Mar Sun>=8 0s 1 D +R Q 2011 o - Mar Sun>=15 0s 1 D +R Q 2011 o - N 13 0s 0 S +R Q 2012 o - Ap 1 0s 1 D +R Q 2012 ma - N Sun>=1 0s 0 S +R Q 2013 ma - Mar Sun>=8 0s 1 D +Z America/Havana -5:29:28 - LMT 1890 +-5:29:36 - HMT 1925 Jul 19 12 +-5 Q C%sT +R DO 1966 o - O 30 0 1 EDT +R DO 1967 o - F 28 0 0 EST +R DO 1969 1973 - O lastSun 0 0:30 -0430 +R DO 1970 o - F 21 0 0 EST +R DO 1971 o - Ja 20 0 0 EST +R DO 1972 1974 - Ja 21 0 0 EST +Z America/Santo_Domingo -4:39:36 - LMT 1890 +-4:40 - SDMT 1933 Ap 1 12 +-5 DO %s 1974 O 27 +-4 - AST 2000 O 29 2 +-5 u E%sT 2000 D 3 1 +-4 - AST +R SV 1987 1988 - May Sun>=1 0 1 D +R SV 1987 1988 - S lastSun 0 0 S +Z America/El_Salvador -5:56:48 - LMT 1921 +-6 SV C%sT +R GT 1973 o - N 25 0 1 D +R GT 1974 o - F 24 0 0 S +R GT 1983 o - May 21 0 1 D +R GT 1983 o - S 22 0 0 S +R GT 1991 o - Mar 23 0 1 D +R GT 1991 o - S 7 0 0 S +R GT 2006 o - Ap 30 0 1 D +R GT 2006 o - O 1 0 0 S +Z America/Guatemala -6:2:4 - LMT 1918 O 5 +-6 GT C%sT +R HT 1983 o - May 8 0 1 D +R HT 1984 1987 - Ap lastSun 0 1 D +R HT 1983 1987 - O lastSun 0 0 S +R HT 1988 1997 - Ap Sun>=1 1s 1 D +R HT 1988 1997 - O lastSun 1s 0 S +R HT 2005 2006 - Ap Sun>=1 0 1 D +R HT 2005 2006 - O lastSun 0 0 S +R HT 2012 2015 - Mar Sun>=8 2 1 D +R HT 2012 2015 - N Sun>=1 2 0 S +R HT 2017 ma - Mar Sun>=8 2 1 D +R HT 2017 ma - N Sun>=1 2 0 S +Z America/Port-au-Prince -4:49:20 - LMT 1890 +-4:49 - PPMT 1917 Ja 24 12 +-5 HT E%sT +R HN 1987 1988 - May Sun>=1 0 1 D +R HN 1987 1988 - S lastSun 0 0 S +R HN 2006 o - May Sun>=1 0 1 D +R HN 2006 o - Au M>=1 0 0 S +Z America/Tegucigalpa -5:48:52 - LMT 1921 Ap +-6 HN C%sT +Z America/Jamaica -5:7:10 - LMT 1890 +-5:7:10 - KMT 1912 F +-5 - EST 1974 +-5 u E%sT 1984 +-5 - EST +Z America/Martinique -4:4:20 - LMT 1890 +-4:4:20 - FFMT 1911 May +-4 - AST 1980 Ap 6 +-4 1 ADT 1980 S 28 +-4 - AST +R NI 1979 1980 - Mar Sun>=16 0 1 D +R NI 1979 1980 - Jun M>=23 0 0 S +R NI 2005 o - Ap 10 0 1 D +R NI 2005 o - O Sun>=1 0 0 S +R NI 2006 o - Ap 30 2 1 D +R NI 2006 o - O Sun>=1 1 0 S +Z America/Managua -5:45:8 - LMT 1890 +-5:45:12 - MMT 1934 Jun 23 +-6 - CST 1973 May +-5 - EST 1975 F 16 +-6 NI C%sT 1992 Ja 1 4 +-5 - EST 1992 S 24 +-6 - CST 1993 +-5 - EST 1997 +-6 NI C%sT +Z America/Panama -5:18:8 - LMT 1890 +-5:19:36 - CMT 1908 Ap 22 +-5 - EST +Li America/Panama America/Cayman +Z America/Puerto_Rico -4:24:25 - LMT 1899 Mar 28 12 +-4 - AST 1942 May 3 +-4 u A%sT 1946 +-4 - AST +Z America/Miquelon -3:44:40 - LMT 1911 May 15 +-4 - AST 1980 May +-3 - -03 1987 +-3 C -03/-02 +Z America/Grand_Turk -4:44:32 - LMT 1890 +-5:7:10 - KMT 1912 F +-5 - EST 1979 +-5 u E%sT 2015 N Sun>=1 2 +-4 - AST 2018 Mar 11 3 +-5 u E%sT +R A 1930 o - D 1 0 1 - +R A 1931 o - Ap 1 0 0 - +R A 1931 o - O 15 0 1 - +R A 1932 1940 - Mar 1 0 0 - +R A 1932 1939 - N 1 0 1 - +R A 1940 o - Jul 1 0 1 - +R A 1941 o - Jun 15 0 0 - +R A 1941 o - O 15 0 1 - +R A 1943 o - Au 1 0 0 - +R A 1943 o - O 15 0 1 - +R A 1946 o - Mar 1 0 0 - +R A 1946 o - O 1 0 1 - +R A 1963 o - O 1 0 0 - +R A 1963 o - D 15 0 1 - +R A 1964 1966 - Mar 1 0 0 - +R A 1964 1966 - O 15 0 1 - +R A 1967 o - Ap 2 0 0 - +R A 1967 1968 - O Sun>=1 0 1 - +R A 1968 1969 - Ap Sun>=1 0 0 - +R A 1974 o - Ja 23 0 1 - +R A 1974 o - May 1 0 0 - +R A 1988 o - D 1 0 1 - +R A 1989 1993 - Mar Sun>=1 0 0 - +R A 1989 1992 - O Sun>=15 0 1 - +R A 1999 o - O Sun>=1 0 1 - +R A 2000 o - Mar 3 0 0 - +R A 2007 o - D 30 0 1 - +R A 2008 2009 - Mar Sun>=15 0 0 - +R A 2008 o - O Sun>=15 0 1 - +Z America/Argentina/Buenos_Aires -3:53:48 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 A -04/-03 1969 O 5 +-3 A -03/-02 1999 O 3 +-4 A -04/-03 2000 Mar 3 +-3 A -03/-02 +Z America/Argentina/Cordoba -4:16:48 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 A -04/-03 1969 O 5 +-3 A -03/-02 1991 Mar 3 +-4 - -04 1991 O 20 +-3 A -03/-02 1999 O 3 +-4 A -04/-03 2000 Mar 3 +-3 A -03/-02 +Z America/Argentina/Salta -4:21:40 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 A -04/-03 1969 O 5 +-3 A -03/-02 1991 Mar 3 +-4 - -04 1991 O 20 +-3 A -03/-02 1999 O 3 +-4 A -04/-03 2000 Mar 3 +-3 A -03/-02 2008 O 18 +-3 - -03 +Z America/Argentina/Tucuman -4:20:52 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 A -04/-03 1969 O 5 +-3 A -03/-02 1991 Mar 3 +-4 - -04 1991 O 20 +-3 A -03/-02 1999 O 3 +-4 A -04/-03 2000 Mar 3 +-3 - -03 2004 Jun +-4 - -04 2004 Jun 13 +-3 A -03/-02 +Z America/Argentina/La_Rioja -4:27:24 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 A -04/-03 1969 O 5 +-3 A -03/-02 1991 Mar +-4 - -04 1991 May 7 +-3 A -03/-02 1999 O 3 +-4 A -04/-03 2000 Mar 3 +-3 - -03 2004 Jun +-4 - -04 2004 Jun 20 +-3 A -03/-02 2008 O 18 +-3 - -03 +Z America/Argentina/San_Juan -4:34:4 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 A -04/-03 1969 O 5 +-3 A -03/-02 1991 Mar +-4 - -04 1991 May 7 +-3 A -03/-02 1999 O 3 +-4 A -04/-03 2000 Mar 3 +-3 - -03 2004 May 31 +-4 - -04 2004 Jul 25 +-3 A -03/-02 2008 O 18 +-3 - -03 +Z America/Argentina/Jujuy -4:21:12 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 A -04/-03 1969 O 5 +-3 A -03/-02 1990 Mar 4 +-4 - -04 1990 O 28 +-4 1 -03 1991 Mar 17 +-4 - -04 1991 O 6 +-3 1 -02 1992 +-3 A -03/-02 1999 O 3 +-4 A -04/-03 2000 Mar 3 +-3 A -03/-02 2008 O 18 +-3 - -03 +Z America/Argentina/Catamarca -4:23:8 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 A -04/-03 1969 O 5 +-3 A -03/-02 1991 Mar 3 +-4 - -04 1991 O 20 +-3 A -03/-02 1999 O 3 +-4 A -04/-03 2000 Mar 3 +-3 - -03 2004 Jun +-4 - -04 2004 Jun 20 +-3 A -03/-02 2008 O 18 +-3 - -03 +Z America/Argentina/Mendoza -4:35:16 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 A -04/-03 1969 O 5 +-3 A -03/-02 1990 Mar 4 +-4 - -04 1990 O 15 +-4 1 -03 1991 Mar +-4 - -04 1991 O 15 +-4 1 -03 1992 Mar +-4 - -04 1992 O 18 +-3 A -03/-02 1999 O 3 +-4 A -04/-03 2000 Mar 3 +-3 - -03 2004 May 23 +-4 - -04 2004 S 26 +-3 A -03/-02 2008 O 18 +-3 - -03 +R Sa 2008 2009 - Mar Sun>=8 0 0 - +R Sa 2007 2008 - O Sun>=8 0 1 - +Z America/Argentina/San_Luis -4:25:24 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 A -04/-03 1969 O 5 +-3 A -03/-02 1990 +-3 1 -02 1990 Mar 14 +-4 - -04 1990 O 15 +-4 1 -03 1991 Mar +-4 - -04 1991 Jun +-3 - -03 1999 O 3 +-4 1 -03 2000 Mar 3 +-3 - -03 2004 May 31 +-4 - -04 2004 Jul 25 +-3 A -03/-02 2008 Ja 21 +-4 Sa -04/-03 2009 O 11 +-3 - -03 +Z America/Argentina/Rio_Gallegos -4:36:52 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 A -04/-03 1969 O 5 +-3 A -03/-02 1999 O 3 +-4 A -04/-03 2000 Mar 3 +-3 - -03 2004 Jun +-4 - -04 2004 Jun 20 +-3 A -03/-02 2008 O 18 +-3 - -03 +Z America/Argentina/Ushuaia -4:33:12 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 A -04/-03 1969 O 5 +-3 A -03/-02 1999 O 3 +-4 A -04/-03 2000 Mar 3 +-3 - -03 2004 May 30 +-4 - -04 2004 Jun 20 +-3 A -03/-02 2008 O 18 +-3 - -03 +Li America/Curacao America/Aruba +Z America/La_Paz -4:32:36 - LMT 1890 +-4:32:36 - CMT 1931 O 15 +-4:32:36 1 BST 1932 Mar 21 +-4 - -04 +R B 1931 o - O 3 11 1 - +R B 1932 1933 - Ap 1 0 0 - +R B 1932 o - O 3 0 1 - +R B 1949 1952 - D 1 0 1 - +R B 1950 o - Ap 16 1 0 - +R B 1951 1952 - Ap 1 0 0 - +R B 1953 o - Mar 1 0 0 - +R B 1963 o - D 9 0 1 - +R B 1964 o - Mar 1 0 0 - +R B 1965 o - Ja 31 0 1 - +R B 1965 o - Mar 31 0 0 - +R B 1965 o - D 1 0 1 - +R B 1966 1968 - Mar 1 0 0 - +R B 1966 1967 - N 1 0 1 - +R B 1985 o - N 2 0 1 - +R B 1986 o - Mar 15 0 0 - +R B 1986 o - O 25 0 1 - +R B 1987 o - F 14 0 0 - +R B 1987 o - O 25 0 1 - +R B 1988 o - F 7 0 0 - +R B 1988 o - O 16 0 1 - +R B 1989 o - Ja 29 0 0 - +R B 1989 o - O 15 0 1 - +R B 1990 o - F 11 0 0 - +R B 1990 o - O 21 0 1 - +R B 1991 o - F 17 0 0 - +R B 1991 o - O 20 0 1 - +R B 1992 o - F 9 0 0 - +R B 1992 o - O 25 0 1 - +R B 1993 o - Ja 31 0 0 - +R B 1993 1995 - O Sun>=11 0 1 - +R B 1994 1995 - F Sun>=15 0 0 - +R B 1996 o - F 11 0 0 - +R B 1996 o - O 6 0 1 - +R B 1997 o - F 16 0 0 - +R B 1997 o - O 6 0 1 - +R B 1998 o - Mar 1 0 0 - +R B 1998 o - O 11 0 1 - +R B 1999 o - F 21 0 0 - +R B 1999 o - O 3 0 1 - +R B 2000 o - F 27 0 0 - +R B 2000 2001 - O Sun>=8 0 1 - +R B 2001 2006 - F Sun>=15 0 0 - +R B 2002 o - N 3 0 1 - +R B 2003 o - O 19 0 1 - +R B 2004 o - N 2 0 1 - +R B 2005 o - O 16 0 1 - +R B 2006 o - N 5 0 1 - +R B 2007 o - F 25 0 0 - +R B 2007 o - O Sun>=8 0 1 - +R B 2008 2017 - O Sun>=15 0 1 - +R B 2008 2011 - F Sun>=15 0 0 - +R B 2012 o - F Sun>=22 0 0 - +R B 2013 2014 - F Sun>=15 0 0 - +R B 2015 o - F Sun>=22 0 0 - +R B 2016 2022 - F Sun>=15 0 0 - +R B 2018 ma - N Sun>=1 0 1 - +R B 2023 o - F Sun>=22 0 0 - +R B 2024 2025 - F Sun>=15 0 0 - +R B 2026 o - F Sun>=22 0 0 - +R B 2027 2033 - F Sun>=15 0 0 - +R B 2034 o - F Sun>=22 0 0 - +R B 2035 2036 - F Sun>=15 0 0 - +R B 2037 o - F Sun>=22 0 0 - +R B 2038 ma - F Sun>=15 0 0 - +Z America/Noronha -2:9:40 - LMT 1914 +-2 B -02/-01 1990 S 17 +-2 - -02 1999 S 30 +-2 B -02/-01 2000 O 15 +-2 - -02 2001 S 13 +-2 B -02/-01 2002 O +-2 - -02 +Z America/Belem -3:13:56 - LMT 1914 +-3 B -03/-02 1988 S 12 +-3 - -03 +Z America/Santarem -3:38:48 - LMT 1914 +-4 B -04/-03 1988 S 12 +-4 - -04 2008 Jun 24 +-3 - -03 +Z America/Fortaleza -2:34 - LMT 1914 +-3 B -03/-02 1990 S 17 +-3 - -03 1999 S 30 +-3 B -03/-02 2000 O 22 +-3 - -03 2001 S 13 +-3 B -03/-02 2002 O +-3 - -03 +Z America/Recife -2:19:36 - LMT 1914 +-3 B -03/-02 1990 S 17 +-3 - -03 1999 S 30 +-3 B -03/-02 2000 O 15 +-3 - -03 2001 S 13 +-3 B -03/-02 2002 O +-3 - -03 +Z America/Araguaina -3:12:48 - LMT 1914 +-3 B -03/-02 1990 S 17 +-3 - -03 1995 S 14 +-3 B -03/-02 2003 S 24 +-3 - -03 2012 O 21 +-3 B -03/-02 2013 S +-3 - -03 +Z America/Maceio -2:22:52 - LMT 1914 +-3 B -03/-02 1990 S 17 +-3 - -03 1995 O 13 +-3 B -03/-02 1996 S 4 +-3 - -03 1999 S 30 +-3 B -03/-02 2000 O 22 +-3 - -03 2001 S 13 +-3 B -03/-02 2002 O +-3 - -03 +Z America/Bahia -2:34:4 - LMT 1914 +-3 B -03/-02 2003 S 24 +-3 - -03 2011 O 16 +-3 B -03/-02 2012 O 21 +-3 - -03 +Z America/Sao_Paulo -3:6:28 - LMT 1914 +-3 B -03/-02 1963 O 23 +-3 1 -02 1964 +-3 B -03/-02 +Z America/Campo_Grande -3:38:28 - LMT 1914 +-4 B -04/-03 +Z America/Cuiaba -3:44:20 - LMT 1914 +-4 B -04/-03 2003 S 24 +-4 - -04 2004 O +-4 B -04/-03 +Z America/Porto_Velho -4:15:36 - LMT 1914 +-4 B -04/-03 1988 S 12 +-4 - -04 +Z America/Boa_Vista -4:2:40 - LMT 1914 +-4 B -04/-03 1988 S 12 +-4 - -04 1999 S 30 +-4 B -04/-03 2000 O 15 +-4 - -04 +Z America/Manaus -4:0:4 - LMT 1914 +-4 B -04/-03 1988 S 12 +-4 - -04 1993 S 28 +-4 B -04/-03 1994 S 22 +-4 - -04 +Z America/Eirunepe -4:39:28 - LMT 1914 +-5 B -05/-04 1988 S 12 +-5 - -05 1993 S 28 +-5 B -05/-04 1994 S 22 +-5 - -05 2008 Jun 24 +-4 - -04 2013 N 10 +-5 - -05 +Z America/Rio_Branco -4:31:12 - LMT 1914 +-5 B -05/-04 1988 S 12 +-5 - -05 2008 Jun 24 +-4 - -04 2013 N 10 +-5 - -05 +R x 1927 1931 - S 1 0 1 - +R x 1928 1932 - Ap 1 0 0 - +R x 1968 o - N 3 4u 1 - +R x 1969 o - Mar 30 3u 0 - +R x 1969 o - N 23 4u 1 - +R x 1970 o - Mar 29 3u 0 - +R x 1971 o - Mar 14 3u 0 - +R x 1970 1972 - O Sun>=9 4u 1 - +R x 1972 1986 - Mar Sun>=9 3u 0 - +R x 1973 o - S 30 4u 1 - +R x 1974 1987 - O Sun>=9 4u 1 - +R x 1987 o - Ap 12 3u 0 - +R x 1988 1990 - Mar Sun>=9 3u 0 - +R x 1988 1989 - O Sun>=9 4u 1 - +R x 1990 o - S 16 4u 1 - +R x 1991 1996 - Mar Sun>=9 3u 0 - +R x 1991 1997 - O Sun>=9 4u 1 - +R x 1997 o - Mar 30 3u 0 - +R x 1998 o - Mar Sun>=9 3u 0 - +R x 1998 o - S 27 4u 1 - +R x 1999 o - Ap 4 3u 0 - +R x 1999 2010 - O Sun>=9 4u 1 - +R x 2000 2007 - Mar Sun>=9 3u 0 - +R x 2008 o - Mar 30 3u 0 - +R x 2009 o - Mar Sun>=9 3u 0 - +R x 2010 o - Ap Sun>=1 3u 0 - +R x 2011 o - May Sun>=2 3u 0 - +R x 2011 o - Au Sun>=16 4u 1 - +R x 2012 2014 - Ap Sun>=23 3u 0 - +R x 2012 2014 - S Sun>=2 4u 1 - +R x 2016 2018 - May Sun>=9 3u 0 - +R x 2016 2018 - Au Sun>=9 4u 1 - +R x 2019 ma - Ap Sun>=2 3u 0 - +R x 2019 ma - S Sun>=2 4u 1 - +Z America/Santiago -4:42:46 - LMT 1890 +-4:42:46 - SMT 1910 Ja 10 +-5 - -05 1916 Jul +-4:42:46 - SMT 1918 S 10 +-4 - -04 1919 Jul +-4:42:46 - SMT 1927 S +-5 x -05/-04 1932 S +-4 - -04 1942 Jun +-5 - -05 1942 Au +-4 - -04 1946 Jul 15 +-4 1 -03 1946 S +-4 - -04 1947 Ap +-5 - -05 1947 May 21 23 +-4 x -04/-03 +Z America/Punta_Arenas -4:43:40 - LMT 1890 +-4:42:46 - SMT 1910 Ja 10 +-5 - -05 1916 Jul +-4:42:46 - SMT 1918 S 10 +-4 - -04 1919 Jul +-4:42:46 - SMT 1927 S +-5 x -05/-04 1932 S +-4 - -04 1942 Jun +-5 - -05 1942 Au +-4 - -04 1947 Ap +-5 - -05 1947 May 21 23 +-4 x -04/-03 2016 D 4 +-3 - -03 +Z Pacific/Easter -7:17:28 - LMT 1890 +-7:17:28 - EMT 1932 S +-7 x -07/-06 1982 Mar 14 3u +-6 x -06/-05 +Z Antarctica/Palmer 0 - -00 1965 +-4 A -04/-03 1969 O 5 +-3 A -03/-02 1982 May +-4 x -04/-03 2016 D 4 +-3 - -03 +R CO 1992 o - May 3 0 1 - +R CO 1993 o - Ap 4 0 0 - +Z America/Bogota -4:56:16 - LMT 1884 Mar 13 +-4:56:16 - BMT 1914 N 23 +-5 CO -05/-04 +Z America/Curacao -4:35:47 - LMT 1912 F 12 +-4:30 - -0430 1965 +-4 - AST +Li America/Curacao America/Lower_Princes +Li America/Curacao America/Kralendijk +R EC 1992 o - N 28 0 1 - +R EC 1993 o - F 5 0 0 - +Z America/Guayaquil -5:19:20 - LMT 1890 +-5:14 - QMT 1931 +-5 EC -05/-04 +Z Pacific/Galapagos -5:58:24 - LMT 1931 +-5 - -05 1986 +-6 EC -06/-05 +R FK 1937 1938 - S lastSun 0 1 - +R FK 1938 1942 - Mar Sun>=19 0 0 - +R FK 1939 o - O 1 0 1 - +R FK 1940 1942 - S lastSun 0 1 - +R FK 1943 o - Ja 1 0 0 - +R FK 1983 o - S lastSun 0 1 - +R FK 1984 1985 - Ap lastSun 0 0 - +R FK 1984 o - S 16 0 1 - +R FK 1985 2000 - S Sun>=9 0 1 - +R FK 1986 2000 - Ap Sun>=16 0 0 - +R FK 2001 2010 - Ap Sun>=15 2 0 - +R FK 2001 2010 - S Sun>=1 2 1 - +Z Atlantic/Stanley -3:51:24 - LMT 1890 +-3:51:24 - SMT 1912 Mar 12 +-4 FK -04/-03 1983 May +-3 FK -03/-02 1985 S 15 +-4 FK -04/-03 2010 S 5 2 +-3 - -03 +Z America/Cayenne -3:29:20 - LMT 1911 Jul +-4 - -04 1967 O +-3 - -03 +Z America/Guyana -3:52:40 - LMT 1915 Mar +-3:45 - -0345 1975 Jul 31 +-3 - -03 1991 +-4 - -04 +R y 1975 1988 - O 1 0 1 - +R y 1975 1978 - Mar 1 0 0 - +R y 1979 1991 - Ap 1 0 0 - +R y 1989 o - O 22 0 1 - +R y 1990 o - O 1 0 1 - +R y 1991 o - O 6 0 1 - +R y 1992 o - Mar 1 0 0 - +R y 1992 o - O 5 0 1 - +R y 1993 o - Mar 31 0 0 - +R y 1993 1995 - O 1 0 1 - +R y 1994 1995 - F lastSun 0 0 - +R y 1996 o - Mar 1 0 0 - +R y 1996 2001 - O Sun>=1 0 1 - +R y 1997 o - F lastSun 0 0 - +R y 1998 2001 - Mar Sun>=1 0 0 - +R y 2002 2004 - Ap Sun>=1 0 0 - +R y 2002 2003 - S Sun>=1 0 1 - +R y 2004 2009 - O Sun>=15 0 1 - +R y 2005 2009 - Mar Sun>=8 0 0 - +R y 2010 ma - O Sun>=1 0 1 - +R y 2010 2012 - Ap Sun>=8 0 0 - +R y 2013 ma - Mar Sun>=22 0 0 - +Z America/Asuncion -3:50:40 - LMT 1890 +-3:50:40 - AMT 1931 O 10 +-4 - -04 1972 O +-3 - -03 1974 Ap +-4 y -04/-03 +R PE 1938 o - Ja 1 0 1 - +R PE 1938 o - Ap 1 0 0 - +R PE 1938 1939 - S lastSun 0 1 - +R PE 1939 1940 - Mar Sun>=24 0 0 - +R PE 1986 1987 - Ja 1 0 1 - +R PE 1986 1987 - Ap 1 0 0 - +R PE 1990 o - Ja 1 0 1 - +R PE 1990 o - Ap 1 0 0 - +R PE 1994 o - Ja 1 0 1 - +R PE 1994 o - Ap 1 0 0 - +Z America/Lima -5:8:12 - LMT 1890 +-5:8:36 - LMT 1908 Jul 28 +-5 PE -05/-04 +Z Atlantic/South_Georgia -2:26:8 - LMT 1890 +-2 - -02 +Z America/Paramaribo -3:40:40 - LMT 1911 +-3:40:52 - PMT 1935 +-3:40:36 - PMT 1945 O +-3:30 - -0330 1984 O +-3 - -03 +Z America/Port_of_Spain -4:6:4 - LMT 1912 Mar 2 +-4 - AST +Li America/Port_of_Spain America/Anguilla +Li America/Port_of_Spain America/Antigua +Li America/Port_of_Spain America/Dominica +Li America/Port_of_Spain America/Grenada +Li America/Port_of_Spain America/Guadeloupe +Li America/Port_of_Spain America/Marigot +Li America/Port_of_Spain America/Montserrat +Li America/Port_of_Spain America/St_Barthelemy +Li America/Port_of_Spain America/St_Kitts +Li America/Port_of_Spain America/St_Lucia +Li America/Port_of_Spain America/St_Thomas +Li America/Port_of_Spain America/St_Vincent +Li America/Port_of_Spain America/Tortola +R U 1923 1925 - O 1 0 0:30 - +R U 1924 1926 - Ap 1 0 0 - +R U 1933 1938 - O lastSun 0 0:30 - +R U 1934 1941 - Mar lastSat 24 0 - +R U 1939 o - O 1 0 0:30 - +R U 1940 o - O 27 0 0:30 - +R U 1941 o - Au 1 0 0:30 - +R U 1942 o - D 14 0 0:30 - +R U 1943 o - Mar 14 0 0 - +R U 1959 o - May 24 0 0:30 - +R U 1959 o - N 15 0 0 - +R U 1960 o - Ja 17 0 1 - +R U 1960 o - Mar 6 0 0 - +R U 1965 o - Ap 4 0 1 - +R U 1965 o - S 26 0 0 - +R U 1968 o - May 27 0 0:30 - +R U 1968 o - D 1 0 0 - +R U 1970 o - Ap 25 0 1 - +R U 1970 o - Jun 14 0 0 - +R U 1972 o - Ap 23 0 1 - +R U 1972 o - Jul 16 0 0 - +R U 1974 o - Ja 13 0 1:30 - +R U 1974 o - Mar 10 0 0:30 - +R U 1974 o - S 1 0 0 - +R U 1974 o - D 22 0 1 - +R U 1975 o - Mar 30 0 0 - +R U 1976 o - D 19 0 1 - +R U 1977 o - Mar 6 0 0 - +R U 1977 o - D 4 0 1 - +R U 1978 1979 - Mar Sun>=1 0 0 - +R U 1978 o - D 17 0 1 - +R U 1979 o - Ap 29 0 1 - +R U 1980 o - Mar 16 0 0 - +R U 1987 o - D 14 0 1 - +R U 1988 o - F 28 0 0 - +R U 1988 o - D 11 0 1 - +R U 1989 o - Mar 5 0 0 - +R U 1989 o - O 29 0 1 - +R U 1990 o - F 25 0 0 - +R U 1990 1991 - O Sun>=21 0 1 - +R U 1991 1992 - Mar Sun>=1 0 0 - +R U 1992 o - O 18 0 1 - +R U 1993 o - F 28 0 0 - +R U 2004 o - S 19 0 1 - +R U 2005 o - Mar 27 2 0 - +R U 2005 o - O 9 2 1 - +R U 2006 2015 - Mar Sun>=8 2 0 - +R U 2006 2014 - O Sun>=1 2 1 - +Z America/Montevideo -3:44:51 - LMT 1908 Jun 10 +-3:44:51 - MMT 1920 May +-4 - -04 1923 O +-3:30 U -0330/-03 1942 D 14 +-3 U -03/-0230 1960 +-3 U -03/-02 1968 +-3 U -03/-0230 1970 +-3 U -03/-02 1974 +-3 U -03/-0130 1974 Mar 10 +-3 U -03/-0230 1974 D 22 +-3 U -03/-02 +Z America/Caracas -4:27:44 - LMT 1890 +-4:27:40 - CMT 1912 F 12 +-4:30 - -0430 1965 +-4 - -04 2007 D 9 3 +-4:30 - -0430 2016 May 1 2:30 +-4 - -04 +Z Etc/GMT 0 - GMT +Z Etc/UTC 0 - UTC +Z Etc/UCT 0 - UCT +Li Etc/GMT GMT +Li Etc/UTC Etc/Universal +Li Etc/UTC Etc/Zulu +Li Etc/GMT Etc/Greenwich +Li Etc/GMT Etc/GMT-0 +Li Etc/GMT Etc/GMT+0 +Li Etc/GMT Etc/GMT0 +Z Etc/GMT-14 14 - +14 +Z Etc/GMT-13 13 - +13 +Z Etc/GMT-12 12 - +12 +Z Etc/GMT-11 11 - +11 +Z Etc/GMT-10 10 - +10 +Z Etc/GMT-9 9 - +09 +Z Etc/GMT-8 8 - +08 +Z Etc/GMT-7 7 - +07 +Z Etc/GMT-6 6 - +06 +Z Etc/GMT-5 5 - +05 +Z Etc/GMT-4 4 - +04 +Z Etc/GMT-3 3 - +03 +Z Etc/GMT-2 2 - +02 +Z Etc/GMT-1 1 - +01 +Z Etc/GMT+1 -1 - -01 +Z Etc/GMT+2 -2 - -02 +Z Etc/GMT+3 -3 - -03 +Z Etc/GMT+4 -4 - -04 +Z Etc/GMT+5 -5 - -05 +Z Etc/GMT+6 -6 - -06 +Z Etc/GMT+7 -7 - -07 +Z Etc/GMT+8 -8 - -08 +Z Etc/GMT+9 -9 - -09 +Z Etc/GMT+10 -10 - -10 +Z Etc/GMT+11 -11 - -11 +Z Etc/GMT+12 -12 - -12 +Z Factory 0 - -00 +Li Africa/Nairobi Africa/Asmera +Li Africa/Abidjan Africa/Timbuktu +Li America/Argentina/Catamarca America/Argentina/ComodRivadavia +Li America/Adak America/Atka +Li America/Argentina/Buenos_Aires America/Buenos_Aires +Li America/Argentina/Catamarca America/Catamarca +Li America/Atikokan America/Coral_Harbour +Li America/Argentina/Cordoba America/Cordoba +Li America/Tijuana America/Ensenada +Li America/Indiana/Indianapolis America/Fort_Wayne +Li America/Indiana/Indianapolis America/Indianapolis +Li America/Argentina/Jujuy America/Jujuy +Li America/Indiana/Knox America/Knox_IN +Li America/Kentucky/Louisville America/Louisville +Li America/Argentina/Mendoza America/Mendoza +Li America/Toronto America/Montreal +Li America/Rio_Branco America/Porto_Acre +Li America/Argentina/Cordoba America/Rosario +Li America/Tijuana America/Santa_Isabel +Li America/Denver America/Shiprock +Li America/Port_of_Spain America/Virgin +Li Pacific/Auckland Antarctica/South_Pole +Li Asia/Ashgabat Asia/Ashkhabad +Li Asia/Kolkata Asia/Calcutta +Li Asia/Shanghai Asia/Chongqing +Li Asia/Shanghai Asia/Chungking +Li Asia/Dhaka Asia/Dacca +Li Asia/Shanghai Asia/Harbin +Li Asia/Urumqi Asia/Kashgar +Li Asia/Kathmandu Asia/Katmandu +Li Asia/Macau Asia/Macao +Li Asia/Yangon Asia/Rangoon +Li Asia/Ho_Chi_Minh Asia/Saigon +Li Asia/Jerusalem Asia/Tel_Aviv +Li Asia/Thimphu Asia/Thimbu +Li Asia/Makassar Asia/Ujung_Pandang +Li Asia/Ulaanbaatar Asia/Ulan_Bator +Li Atlantic/Faroe Atlantic/Faeroe +Li Europe/Oslo Atlantic/Jan_Mayen +Li Australia/Sydney Australia/ACT +Li Australia/Sydney Australia/Canberra +Li Australia/Lord_Howe Australia/LHI +Li Australia/Sydney Australia/NSW +Li Australia/Darwin Australia/North +Li Australia/Brisbane Australia/Queensland +Li Australia/Adelaide Australia/South +Li Australia/Hobart Australia/Tasmania +Li Australia/Melbourne Australia/Victoria +Li Australia/Perth Australia/West +Li Australia/Broken_Hill Australia/Yancowinna +Li America/Rio_Branco Brazil/Acre +Li America/Noronha Brazil/DeNoronha +Li America/Sao_Paulo Brazil/East +Li America/Manaus Brazil/West +Li America/Halifax Canada/Atlantic +Li America/Winnipeg Canada/Central +Li America/Toronto Canada/Eastern +Li America/Edmonton Canada/Mountain +Li America/St_Johns Canada/Newfoundland +Li America/Vancouver Canada/Pacific +Li America/Regina Canada/Saskatchewan +Li America/Whitehorse Canada/Yukon +Li America/Santiago Chile/Continental +Li Pacific/Easter Chile/EasterIsland +Li America/Havana Cuba +Li Africa/Cairo Egypt +Li Europe/Dublin Eire +Li Europe/London Europe/Belfast +Li Europe/Chisinau Europe/Tiraspol +Li Europe/London GB +Li Europe/London GB-Eire +Li Etc/GMT GMT+0 +Li Etc/GMT GMT-0 +Li Etc/GMT GMT0 +Li Etc/GMT Greenwich +Li Asia/Hong_Kong Hongkong +Li Atlantic/Reykjavik Iceland +Li Asia/Tehran Iran +Li Asia/Jerusalem Israel +Li America/Jamaica Jamaica +Li Asia/Tokyo Japan +Li Pacific/Kwajalein Kwajalein +Li Africa/Tripoli Libya +Li America/Tijuana Mexico/BajaNorte +Li America/Mazatlan Mexico/BajaSur +Li America/Mexico_City Mexico/General +Li Pacific/Auckland NZ +Li Pacific/Chatham NZ-CHAT +Li America/Denver Navajo +Li Asia/Shanghai PRC +Li Pacific/Honolulu Pacific/Johnston +Li Pacific/Pohnpei Pacific/Ponape +Li Pacific/Pago_Pago Pacific/Samoa +Li Pacific/Chuuk Pacific/Truk +Li Pacific/Chuuk Pacific/Yap +Li Europe/Warsaw Poland +Li Europe/Lisbon Portugal +Li Asia/Taipei ROC +Li Asia/Seoul ROK +Li Asia/Singapore Singapore +Li Europe/Istanbul Turkey +Li Etc/UCT UCT +Li America/Anchorage US/Alaska +Li America/Adak US/Aleutian +Li America/Phoenix US/Arizona +Li America/Chicago US/Central +Li America/Indiana/Indianapolis US/East-Indiana +Li America/New_York US/Eastern +Li Pacific/Honolulu US/Hawaii +Li America/Indiana/Knox US/Indiana-Starke +Li America/Detroit US/Michigan +Li America/Denver US/Mountain +Li America/Los_Angeles US/Pacific +Li Pacific/Pago_Pago US/Samoa +Li Etc/UTC UTC +Li Etc/UTC Universal +Li Europe/Moscow W-SU +Li Etc/UTC Zulu diff --git a/src/timezone/known_abbrevs.txt b/src/timezone/known_abbrevs.txt index eb48069d87..2ae443a7d8 100644 --- a/src/timezone/known_abbrevs.txt +++ b/src/timezone/known_abbrevs.txt @@ -70,15 +70,15 @@ EEST 10800 D EET 7200 EST -18000 GMT 0 +GMT 0 D HDT -32400 D HKT 28800 HST -36000 IDT 10800 D IST 19800 -IST 3600 D +IST 3600 IST 7200 JST 32400 -KST 30600 KST 32400 MDT -21600 D MEST 7200 D @@ -92,11 +92,11 @@ NZST 43200 PDT -25200 D PKT 18000 PST -28800 +PST 28800 SAST 7200 SST -39600 UCT 0 UTC 0 -WAST 7200 D WAT 3600 WEST 3600 D WET 0 diff --git a/src/timezone/localtime.c b/src/timezone/localtime.c index 08642d1236..96e62aff3b 100644 --- a/src/timezone/localtime.c +++ b/src/timezone/localtime.c @@ -1,3 +1,5 @@ +/* Convert timestamp from pg_time_t to struct pg_tm. */ + /* * This file is in the public domain, so clarified as of * 1996-06-05 by Arthur David Olson. @@ -26,15 +28,15 @@ #ifndef WILDABBR /* * Someone might make incorrect use of a time zone abbreviation: - * 1. They might reference tzname[0] before calling tzset (explicitly + * 1. They might reference tzname[0] before calling tzset (explicitly * or implicitly). - * 2. They might reference tzname[1] before calling tzset (explicitly + * 2. They might reference tzname[1] before calling tzset (explicitly * or implicitly). - * 3. They might reference tzname[1] after setting to a time zone + * 3. They might reference tzname[1] after setting to a time zone * in which Daylight Saving Time is never observed. - * 4. They might reference tzname[0] after setting to a time zone + * 4. They might reference tzname[0] after setting to a time zone * in which Standard Time is never observed. - * 5. They might reference tm.TM_ZONE after calling offtime. + * 5. They might reference tm.TM_ZONE after calling offtime. * What's best to do in the above cases is open to debate; * for now, we just set things up so that in any of the five cases * WILDABBR is used. Another possibility: initialize tzname[0] to the @@ -50,25 +52,20 @@ static const char wildabbr[] = WILDABBR; static const char gmt[] = "GMT"; -/* The minimum and maximum finite time values. This assumes no padding. */ -static const pg_time_t time_t_min = MINVAL(pg_time_t, TYPE_BIT(pg_time_t)); -static const pg_time_t time_t_max = MAXVAL(pg_time_t, TYPE_BIT(pg_time_t)); - /* - * We cache the result of trying to load the TZDEFRULES zone here. + * PG: We cache the result of trying to load the TZDEFRULES zone here. * tzdefrules_loaded is 0 if not tried yet, +1 if good, -1 if failed. */ -static struct state tzdefrules_s; +static struct state *tzdefrules_s = NULL; static int tzdefrules_loaded = 0; /* * The DST rules to use if TZ has no rules and we can't load TZDEFRULES. - * We default to US rules as of 1999-08-17. - * POSIX 1003.1 section 8.1.1 says that the default DST rules are - * implementation dependent; for historical reasons, US rules are a - * common default. + * Default to US rules as of 2017-05-07. + * POSIX does not specify the default DST rules; + * for historical reasons, US rules are a common default. */ -#define TZDEFRULESTRING ",M4.1.0,M10.5.0" +#define TZDEFRULESTRING ",M3.2.0,M11.1.0" /* structs ttinfo, lsinfo, state have been moved to pgtz.h */ @@ -122,7 +119,7 @@ init_ttinfo(struct ttinfo *s, int32 gmtoff, bool isdst, int abbrind) } static int32 -detzcode(const char *codep) +detzcode(const char *const codep) { int32 result; int i; @@ -148,7 +145,7 @@ detzcode(const char *codep) } static int64 -detzcode64(const char *codep) +detzcode64(const char *const codep) { uint64 result; int i; @@ -195,10 +192,8 @@ union input_buffer /* Local storage needed for 'tzloadbody'. */ union local_storage { - /* We don't need the "fullname" member */ - /* The results of analyzing the file's contents after it is opened. */ - struct + struct file_analysis { /* The input buffer. */ union input_buffer u; @@ -206,6 +201,8 @@ union local_storage /* A temporary state used for parsing a TZ string in the file. */ struct state st; } u; + + /* We don't need the "fullname" member */ }; /* Load tz data from the file named NAME into *SP. Read extended @@ -255,14 +252,21 @@ tzloadbody(char const *name, char *canonname, struct state *sp, bool doextend, { int32 ttisstdcnt = detzcode(up->tzhead.tzh_ttisstdcnt); int32 ttisgmtcnt = detzcode(up->tzhead.tzh_ttisgmtcnt); + int64 prevtr = 0; + int32 prevcorr = 0; int32 leapcnt = detzcode(up->tzhead.tzh_leapcnt); int32 timecnt = detzcode(up->tzhead.tzh_timecnt); int32 typecnt = detzcode(up->tzhead.tzh_typecnt); int32 charcnt = detzcode(up->tzhead.tzh_charcnt); char const *p = up->buf + tzheadsize; + /* + * Although tzfile(5) currently requires typecnt to be nonzero, + * support future formats that may allow zero typecnt in files that + * have a TZ string and no transitions. + */ if (!(0 <= leapcnt && leapcnt < TZ_MAX_LEAPS - && 0 < typecnt && typecnt < TZ_MAX_TYPES + && 0 <= typecnt && typecnt < TZ_MAX_TYPES && 0 <= timecnt && timecnt < TZ_MAX_TIMES && 0 <= charcnt && charcnt < TZ_MAX_CHARS && (ttisstdcnt == typecnt || ttisstdcnt == 0) @@ -285,8 +289,8 @@ tzloadbody(char const *name, char *canonname, struct state *sp, bool doextend, /* * Read transitions, discarding those out of pg_time_t range. But - * pretend the last transition before time_t_min occurred at - * time_t_min. + * pretend the last transition before TIME_T_MIN occurred at + * TIME_T_MIN. */ timecnt = 0; for (i = 0; i < sp->timecnt; ++i) @@ -294,12 +298,12 @@ tzloadbody(char const *name, char *canonname, struct state *sp, bool doextend, int64 at = stored == 4 ? detzcode(p) : detzcode64(p); - sp->types[i] = at <= time_t_max; + sp->types[i] = at <= TIME_T_MAX; if (sp->types[i]) { pg_time_t attime - = ((TYPE_SIGNED(pg_time_t) ? at < time_t_min : at < 0) - ? time_t_min : at); + = ((TYPE_SIGNED(pg_time_t) ? at < TIME_T_MIN : at < 0) + ? TIME_T_MIN : at); if (timecnt && attime <= sp->ats[timecnt - 1]) { @@ -354,20 +358,22 @@ tzloadbody(char const *name, char *canonname, struct state *sp, bool doextend, int32 corr = detzcode(p + stored); p += stored + 4; - if (tr <= time_t_max) + /* Leap seconds cannot occur before the Epoch. */ + if (tr < 0) + return EINVAL; + if (tr <= TIME_T_MAX) { - pg_time_t trans - = ((TYPE_SIGNED(pg_time_t) ? tr < time_t_min : tr < 0) - ? time_t_min : tr); - - if (leapcnt && trans <= sp->lsis[leapcnt - 1].ls_trans) - { - if (trans < sp->lsis[leapcnt - 1].ls_trans) - return EINVAL; - leapcnt--; - } - sp->lsis[leapcnt].ls_trans = trans; - sp->lsis[leapcnt].ls_corr = corr; + /* + * Leap seconds cannot occur more than once per UTC month, and + * UTC months are at least 28 days long (minus 1 second for a + * negative leap second). Each leap second's correction must + * differ from the previous one's by 1 second. + */ + if (tr - prevtr < 28 * SECSPERDAY - 1 + || (corr != prevcorr - 1 && corr != prevcorr + 1)) + return EINVAL; + sp->lsis[leapcnt].ls_trans = prevtr = tr; + sp->lsis[leapcnt].ls_corr = prevcorr = corr; leapcnt++; } } @@ -417,8 +423,7 @@ tzloadbody(char const *name, char *canonname, struct state *sp, bool doextend, struct state *ts = &lsp->u.st; up->buf[nread - 1] = '\0'; - if (tzparse(&up->buf[1], ts, false) - && ts->typecnt == 2) + if (tzparse(&up->buf[1], ts, false)) { /* * Attempt to reuse existing abbreviations. Without this, @@ -431,7 +436,7 @@ tzloadbody(char const *name, char *canonname, struct state *sp, bool doextend, int gotabbr = 0; int charcnt = sp->charcnt; - for (i = 0; i < 2; i++) + for (i = 0; i < ts->typecnt; i++) { char *tsabbr = ts->chars + ts->ttis[i].tt_abbrind; int j; @@ -456,7 +461,7 @@ tzloadbody(char const *name, char *canonname, struct state *sp, bool doextend, } } } - if (gotabbr == 2) + if (gotabbr == ts->typecnt) { sp->charcnt = charcnt; @@ -471,7 +476,8 @@ tzloadbody(char const *name, char *canonname, struct state *sp, bool doextend, sp->timecnt--; for (i = 0; i < ts->timecnt; i++) - if (sp->ats[sp->timecnt - 1] < ts->ats[i]) + if (sp->timecnt == 0 + || sp->ats[sp->timecnt - 1] < ts->ats[i]) break; while (i < ts->timecnt && sp->timecnt < TZ_MAX_TIMES) @@ -482,11 +488,13 @@ tzloadbody(char const *name, char *canonname, struct state *sp, bool doextend, sp->timecnt++; i++; } - sp->ttis[sp->typecnt++] = ts->ttis[0]; - sp->ttis[sp->typecnt++] = ts->ttis[1]; + for (i = 0; i < ts->typecnt; i++) + sp->ttis[sp->typecnt++] = ts->ttis[i]; } } } + if (sp->typecnt == 0) + return EINVAL; if (sp->timecnt > 1) { for (i = 1; i < sp->timecnt; ++i) @@ -507,6 +515,18 @@ tzloadbody(char const *name, char *canonname, struct state *sp, bool doextend, } } + /* + * Infer sp->defaulttype from the data. Although this default type is + * always zero for data from recent tzdb releases, things are trickier for + * data from tzdb 2018e or earlier. + * + * The first set of heuristics work around bugs in 32-bit data generated + * by tzdb 2013c or earlier. The workaround is for zones like + * Australia/Macquarie where timestamps before the first transition have a + * time type that is not the earliest standard-time type. See: + * https://mm.icann.org/pipermail/tz/2013-May/019368.html + */ + /* * If type 0 is unused in transitions, it's the type to use for early * times. @@ -529,6 +549,11 @@ tzloadbody(char const *name, char *canonname, struct state *sp, bool doextend, break; } + /* + * The next heuristics are for data generated by tzdb 2018e or earlier, + * for zones like EST5EDT where the first transition is to DST. + */ + /* * If no result yet, find the first standard type. If there is none, punt * to type zero. @@ -543,7 +568,14 @@ tzloadbody(char const *name, char *canonname, struct state *sp, bool doextend, break; } } + + /* + * A simple 'sp->defaulttype = 0;' would suffice here if we didn't have to + * worry about 2018e-or-earlier data. Even simpler would be to remove the + * defaulttype member and just use 0 in its place. + */ sp->defaulttype = i; + return 0; } @@ -602,10 +634,11 @@ static const int year_lengths[2] = { }; /* - * Given a pointer into a time zone string, scan until a character that is not - * a valid character in a zone name is found. Return a pointer to that - * character. + * Given a pointer into a timezone string, scan until a character that is not + * a valid character in a time zone abbreviation is found. + * Return a pointer to that character. */ + static const char * getzname(const char *strp) { @@ -618,15 +651,17 @@ getzname(const char *strp) } /* - * Given a pointer into an extended time zone string, scan until the ending - * delimiter of the zone name is located. Return a pointer to the delimiter. + * Given a pointer into an extended timezone string, scan until the ending + * delimiter of the time zone abbreviation is located. + * Return a pointer to the delimiter. * * As with getzname above, the legal character set is actually quite * restricted, with other characters producing undefined results. * We don't do any checking here; checking is done later in common-case code. */ + static const char * -getqzname(const char *strp, int delim) +getqzname(const char *strp, const int delim) { int c; @@ -636,13 +671,14 @@ getqzname(const char *strp, int delim) } /* - * Given a pointer into a time zone string, extract a number from that string. + * Given a pointer into a timezone string, extract a number from that string. * Check that the number is within a specified range; if it is not, return * NULL. * Otherwise, return a pointer to the first character not part of the number. */ + static const char * -getnum(const char *strp, int *nump, int min, int max) +getnum(const char *strp, int *const nump, const int min, const int max) { char c; int num; @@ -664,14 +700,15 @@ getnum(const char *strp, int *nump, int min, int max) } /* - * Given a pointer into a time zone string, extract a number of seconds, + * Given a pointer into a timezone string, extract a number of seconds, * in hh[:mm[:ss]] form, from the string. * If any error occurs, return NULL. * Otherwise, return a pointer to the first character not part of the number * of seconds. */ + static const char * -getsecs(const char *strp, int32 *secsp) +getsecs(const char *strp, int32 *const secsp) { int num; @@ -705,13 +742,14 @@ getsecs(const char *strp, int32 *secsp) } /* - * Given a pointer into a time zone string, extract an offset, in + * Given a pointer into a timezone string, extract an offset, in * [+-]hh[:mm[:ss]] form, from the string. * If any error occurs, return NULL. * Otherwise, return a pointer to the first character not part of the time. */ + static const char * -getoffset(const char *strp, int32 *offsetp) +getoffset(const char *strp, int32 *const offsetp) { bool neg = false; @@ -731,13 +769,14 @@ getoffset(const char *strp, int32 *offsetp) } /* - * Given a pointer into a time zone string, extract a rule in the form + * Given a pointer into a timezone string, extract a rule in the form * date[/time]. See POSIX section 8 for the format of "date" and "time". * If a valid rule is not found, return NULL. * Otherwise, return a pointer to the first character not part of the rule. */ + static const char * -getrule(const char *strp, struct rule *rulep) +getrule(const char *strp, struct rule *const rulep) { if (*strp == 'J') { @@ -796,14 +835,15 @@ getrule(const char *strp, struct rule *rulep) * Given a year, a rule, and the offset from UT at the time that rule takes * effect, calculate the year-relative time that rule takes effect. */ + static int32 -transtime(int year, const struct rule *rulep, - int32 offset) +transtime(const int year, const struct rule *const rulep, + const int32 offset) { bool leapyear; int32 value; - int i, - d, + int i; + int d, m1, yy0, yy1, @@ -909,20 +949,10 @@ tzparse(const char *name, struct state *sp, bool lastditch) stdname = name; if (lastditch) { - /* - * This is intentionally somewhat different from the IANA code. We do - * not want to invoke tzload() in the lastditch case: we can't assume - * pg_open_tzfile() is sane yet, and we don't care about leap seconds - * anyway. - */ + /* Unlike IANA, don't assume name is exactly "GMT" */ stdlen = strlen(name); /* length of standard zone name */ name += stdlen; - if (stdlen >= sizeof sp->chars) - stdlen = (sizeof sp->chars) - 1; - charcnt = stdlen + 1; stdoffset = 0; - sp->goback = sp->goahead = false; /* simulate failed tzload() */ - load_ok = false; } else { @@ -946,27 +976,23 @@ tzparse(const char *name, struct state *sp, bool lastditch) name = getoffset(name, &stdoffset); if (name == NULL) return false; - charcnt = stdlen + 1; - if (sizeof sp->chars < charcnt) - return false; - - /* - * This bit also differs from the IANA code, which doesn't make any - * attempt to avoid repetitive loadings of the TZDEFRULES zone. - */ - if (tzdefrules_loaded == 0) - { - if (tzload(TZDEFRULES, NULL, &tzdefrules_s, false) == 0) - tzdefrules_loaded = 1; - else - tzdefrules_loaded = -1; - } - load_ok = (tzdefrules_loaded > 0); - if (load_ok) - memcpy(sp, &tzdefrules_s, sizeof(struct state)); } - if (!load_ok) - sp->leapcnt = 0; /* so, we're off a little */ + charcnt = stdlen + 1; + if (sizeof sp->chars < charcnt) + return false; + + /* + * The IANA code always tries tzload(TZDEFRULES) here. We do not want to + * do that; it would be bad news in the lastditch case, where we can't + * assume pg_open_tzfile() is sane yet. Moreover, the only reason to do + * it unconditionally is to absorb the TZDEFRULES zone's leap second info, + * which we don't want to do anyway. Without that, we only need to load + * TZDEFRULES if the zone name specifies DST but doesn't incorporate a + * POSIX-style transition date rule, which is not a common case. + */ + sp->goback = sp->goahead = false; /* simulate failed tzload() */ + sp->leapcnt = 0; /* intentionally assume no leap seconds */ + if (*name != '\0') { if (*name == '<') @@ -982,7 +1008,7 @@ tzparse(const char *name, struct state *sp, bool lastditch) { dstname = name; name = getzname(name); - dstlen = name - dstname; /* length of DST zone name */ + dstlen = name - dstname; /* length of DST abbr. */ } if (!dstlen) return false; @@ -997,8 +1023,38 @@ tzparse(const char *name, struct state *sp, bool lastditch) } else dstoffset = stdoffset - SECSPERHOUR; - if (*name == '\0' && !load_ok) - name = TZDEFRULESTRING; + if (*name == '\0') + { + /* + * The POSIX zone name does not provide a transition-date rule. + * Here we must load the TZDEFRULES zone, if possible, to serve as + * source data for the transition dates. Unlike the IANA code, we + * try to cache the data so it's only loaded once. + */ + if (tzdefrules_loaded == 0) + { + /* Allocate on first use */ + if (tzdefrules_s == NULL) + tzdefrules_s = (struct state *) malloc(sizeof(struct state)); + if (tzdefrules_s != NULL) + { + if (tzload(TZDEFRULES, NULL, tzdefrules_s, false) == 0) + tzdefrules_loaded = 1; + else + tzdefrules_loaded = -1; + /* In any case, we ignore leap-second data from the file */ + tzdefrules_s->leapcnt = 0; + } + } + load_ok = (tzdefrules_loaded > 0); + if (load_ok) + memcpy(sp, tzdefrules_s, sizeof(struct state)); + else + { + /* If we can't load TZDEFRULES, fall back to hard-wired rule */ + name = TZDEFRULESTRING; + } + } if (*name == ',' || *name == ';') { struct rule start; @@ -1024,8 +1080,8 @@ tzparse(const char *name, struct state *sp, bool lastditch) /* * Two transitions per year, from EPOCH_YEAR forward. */ - init_ttinfo(&sp->ttis[0], -dstoffset, true, stdlen + 1); - init_ttinfo(&sp->ttis[1], -stdoffset, false, 0); + init_ttinfo(&sp->ttis[0], -stdoffset, false, 0); + init_ttinfo(&sp->ttis[1], -dstoffset, true, stdlen + 1); sp->defaulttype = 0; timecnt = 0; janfirst = 0; @@ -1074,19 +1130,15 @@ tzparse(const char *name, struct state *sp, bool lastditch) if (!increment_overflow_time (&sp->ats[timecnt], janoffset + starttime)) - sp->types[timecnt++] = reversed; - else if (janoffset) - sp->defaulttype = reversed; + sp->types[timecnt++] = !reversed; sp->ats[timecnt] = janfirst; if (!increment_overflow_time (&sp->ats[timecnt], janoffset + endtime)) { - sp->types[timecnt++] = !reversed; + sp->types[timecnt++] = reversed; yearlim = year + YEARSPERREPEAT + 1; } - else if (janoffset) - sp->defaulttype = !reversed; } if (increment_overflow_time (&janfirst, janoffset + yearsecs)) @@ -1095,7 +1147,10 @@ tzparse(const char *name, struct state *sp, bool lastditch) } sp->timecnt = timecnt; if (!timecnt) + { + sp->ttis[0] = sp->ttis[1]; sp->typecnt = 1; /* Perpetual DST. */ + } else if (YEARSPERREPEAT < year - yearbeg) sp->goback = sp->goahead = true; } @@ -1158,12 +1213,12 @@ tzparse(const char *name, struct state *sp, bool lastditch) else { /* - * If summer time is in effect, and the transition time - * was not specified as standard time, add the summer time - * offset to the transition time; otherwise, add the - * standard time offset to the transition time. + * If daylight saving time is in effect, and the + * transition time was not specified as standard time, add + * the daylight saving time offset to the transition time; + * otherwise, add the standard time offset to the + * transition time. */ - /* * Transitions from DST to DDST will effectively disappear * since POSIX provides for only one DST offset. @@ -1217,7 +1272,7 @@ tzparse(const char *name, struct state *sp, bool lastditch) } static void -gmtload(struct state *sp) +gmtload(struct state *const sp) { if (tzload(gmt, NULL, sp, true) != 0) tzparse(gmt, sp, true); @@ -1232,7 +1287,7 @@ gmtload(struct state *sp) */ static struct pg_tm * localsub(struct state const *sp, pg_time_t const *timep, - struct pg_tm *tmp) + struct pg_tm *const tmp) { const struct ttinfo *ttisp; int i; @@ -1300,6 +1355,11 @@ localsub(struct state const *sp, pg_time_t const *timep, } ttisp = &sp->ttis[i]; + /* + * To get (wrong) behavior that's compatible with System V Release 2.0 + * you'd replace the statement below with t += ttisp->tt_gmtoff; + * timesub(&t, 0L, sp, tmp); + */ result = timesub(&t, ttisp->tt_gmtoff, sp, tmp); if (result) { @@ -1322,21 +1382,25 @@ pg_localtime(const pg_time_t *timep, const pg_tz *tz) * * Except we have a private "struct state" for GMT, so no sp is passed in. */ + static struct pg_tm * -gmtsub(pg_time_t const *timep, int32 offset, struct pg_tm *tmp) +gmtsub(pg_time_t const *timep, int32 offset, + struct pg_tm *tmp) { struct pg_tm *result; /* GMT timezone state data is kept here */ - static struct state gmtmem; - static bool gmt_is_set = false; -#define gmtptr (&gmtmem) + static struct state *gmtptr = NULL; - if (!gmt_is_set) + if (gmtptr == NULL) { - gmt_is_set = true; + /* Allocate on first use */ + gmtptr = (struct state *) malloc(sizeof(struct state)); + if (gmtptr == NULL) + return NULL; /* errno should be set by malloc */ gmtload(gmtptr); } + result = timesub(timep, offset, gmtptr, tmp); /* @@ -1361,11 +1425,19 @@ pg_gmtime(const pg_time_t *timep) * Return the number of leap years through the end of the given year * where, to make the math easy, the answer for year zero is defined as zero. */ + +static int +leaps_thru_end_of_nonneg(int y) +{ + return y / 4 - y / 100 + y / 400; +} + static int leaps_thru_end_of(const int y) { - return (y >= 0) ? (y / 4 - y / 100 + y / 400) : - -(leaps_thru_end_of(-(y + 1)) + 1); + return (y < 0 + ? -1 - leaps_thru_end_of_nonneg(-1 - y) + : leaps_thru_end_of_nonneg(y)); } static struct pg_tm * @@ -1390,22 +1462,9 @@ timesub(const pg_time_t *timep, int32 offset, lp = &sp->lsis[i]; if (*timep >= lp->ls_trans) { - if (*timep == lp->ls_trans) - { - hit = ((i == 0 && lp->ls_corr > 0) || - lp->ls_corr > sp->lsis[i - 1].ls_corr); - if (hit) - while (i > 0 && - sp->lsis[i].ls_trans == - sp->lsis[i - 1].ls_trans + 1 && - sp->lsis[i].ls_corr == - sp->lsis[i - 1].ls_corr + 1) - { - ++hit; - --i; - } - } corr = lp->ls_corr; + hit = (*timep == lp->ls_trans + && (i == 0 ? 0 : lp[-1].ls_corr) < corr); break; } } @@ -1529,13 +1588,13 @@ increment_overflow_time(pg_time_t *tp, int32 j) { /*---------- * This is like - * 'if (! (time_t_min <= *tp + j && *tp + j <= time_t_max)) ...', + * 'if (! (TIME_T_MIN <= *tp + j && *tp + j <= TIME_T_MAX)) ...', * except that it does the right thing even if *tp + j would overflow. *---------- */ if (!(j < 0 - ? (TYPE_SIGNED(pg_time_t) ? time_t_min - j <= *tp : -1 - j < *tp) - : *tp <= time_t_max - j)) + ? (TYPE_SIGNED(pg_time_t) ? TIME_T_MIN - j <= *tp : -1 - j < *tp) + : *tp <= TIME_T_MAX - j)) return true; *tp += j; return false; diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c index a73dc6188b..7a476eabf7 100644 --- a/src/timezone/pgtz.c +++ b/src/timezone/pgtz.c @@ -3,7 +3,7 @@ * pgtz.c * Timezone Library Integration Functions * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/timezone/pgtz.c @@ -156,15 +156,8 @@ scan_directory_ci(const char *dirname, const char *fname, int fnamelen, struct dirent *direntry; dirdesc = AllocateDir(dirname); - if (!dirdesc) - { - ereport(LOG, - (errcode_for_file_access(), - errmsg("could not open directory \"%s\": %m", dirname))); - return false; - } - while ((direntry = ReadDir(dirdesc, dirname)) != NULL) + while ((direntry = ReadDirExtended(dirdesc, dirname, LOG)) != NULL) { /* * Ignore . and .., plus any other "hidden" files. This is a security diff --git a/src/timezone/pgtz.h b/src/timezone/pgtz.h index 3d89ba00a7..a07dced583 100644 --- a/src/timezone/pgtz.h +++ b/src/timezone/pgtz.h @@ -6,7 +6,7 @@ * Note: this file contains only definitions that are private to the * timezone library. Public definitions are in pgtime.h. * - * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group * * IDENTIFICATION * src/timezone/pgtz.h @@ -49,10 +49,16 @@ struct state pg_time_t ats[TZ_MAX_TIMES]; unsigned char types[TZ_MAX_TIMES]; struct ttinfo ttis[TZ_MAX_TYPES]; - char chars[BIGGEST(BIGGEST(TZ_MAX_CHARS + 1, 3 /* sizeof gmt */ ), + char chars[BIGGEST(BIGGEST(TZ_MAX_CHARS + 1, 4 /* sizeof gmt */ ), (2 * (TZ_STRLEN_MAX + 1)))]; struct lsinfo lsis[TZ_MAX_LEAPS]; - int defaulttype; /* for early times or if no transitions */ + + /* + * The time type to use for early times or if no transitions. It is always + * zero for recent tzdb releases. It might be nonzero for data from tzdb + * 2018e or earlier. + */ + int defaulttype; }; diff --git a/src/timezone/private.h b/src/timezone/private.h index c141fb6131..a952c4dc30 100644 --- a/src/timezone/private.h +++ b/src/timezone/private.h @@ -1,4 +1,7 @@ +/* Private header for tzdb code. */ + #ifndef PRIVATE_H + #define PRIVATE_H /* @@ -38,30 +41,9 @@ #define EOVERFLOW EINVAL #endif -#ifndef WIFEXITED -#define WIFEXITED(status) (((status) & 0xff) == 0) -#endif /* !defined WIFEXITED */ -#ifndef WEXITSTATUS -#define WEXITSTATUS(status) (((status) >> 8) & 0xff) -#endif /* !defined WEXITSTATUS */ - /* Unlike 's isdigit, this also works if c < 0 | c > UCHAR_MAX. */ #define is_digit(c) ((unsigned)(c) - '0' <= 9) -#ifndef SIZE_MAX -#define SIZE_MAX ((size_t) -1) -#endif - -/* - * SunOS 4.1.1 libraries lack remove. - */ - -#ifndef remove -extern int unlink(const char *filename); - -#define remove unlink -#endif /* !defined remove */ - /* * Finally, some convenience items. @@ -82,6 +64,10 @@ extern int unlink(const char *filename); #define MINVAL(t, b) \ ((t) (TYPE_SIGNED(t) ? - TWOS_COMPLEMENT(t) - MAXVAL(t, b) : 0)) +/* The extreme time values, assuming no padding. */ +#define TIME_T_MIN MINVAL(pg_time_t, TYPE_BIT(pg_time_t)) +#define TIME_T_MAX MAXVAL(pg_time_t, TYPE_BIT(pg_time_t)) + /* * 302 / 1000 is log10(2.0) rounded up. * Subtract one for the sign bit if the type is signed; @@ -95,7 +81,7 @@ extern int unlink(const char *filename); /* * INITIALIZE(x) */ -#define INITIALIZE(x) ((x) = 0) +#define INITIALIZE(x) ((x) = 0) #undef _ #define _(msgid) (msgid) @@ -150,7 +136,7 @@ extern int unlink(const char *filename); * or * isleap(a + b) == isleap(a % 400 + b % 400) * This is true even if % means modulo rather than Fortran remainder - * (which is allowed by C89 but not C99). + * (which is allowed by C89 but not by C99 or later). * We use this to avoid addition overflow problems. */ diff --git a/src/timezone/strftime.c b/src/timezone/strftime.c index 7cbafc9d83..217dd375f3 100644 --- a/src/timezone/strftime.c +++ b/src/timezone/strftime.c @@ -1,4 +1,4 @@ -/* Convert a broken-down timestamp to a string. */ +/* Convert a broken-down timestamp to a string. */ /* * Copyright 1989 The Regents of the University of California. @@ -82,17 +82,17 @@ static const struct lc_time_T C_time_locale = { /* * x_fmt * - * C99 requires this format. Using just numbers (as here) makes Quakers - * happier; it's also compatible with SVR4. + * C99 and later require this format. Using just numbers (as here) makes + * Quakers happier; it's also compatible with SVR4. */ "%m/%d/%y", /* * c_fmt * - * C99 requires this format. Previously this code used "%D %X", but we now - * conform to C99. Note that "%a %b %d %H:%M:%S %Y" is used by Solaris - * 2.3. + * C99 and later require this format. Previously this code used "%D %X", + * but we now conform to C99. Note that "%a %b %d %H:%M:%S %Y" is used by + * Solaris 2.3. */ "%a %b %e %T %Y", @@ -106,26 +106,24 @@ static const struct lc_time_T C_time_locale = { "%a %b %e %H:%M:%S %Z %Y" }; +enum warn +{ + IN_NONE, IN_SOME, IN_THIS, IN_ALL +}; + static char *_add(const char *, char *, const char *); static char *_conv(int, const char *, char *, const char *); -static char *_fmt(const char *, const struct pg_tm *, char *, - const char *, int *); -static char *_yconv(int, int, bool, bool, char *, const char *); - -#define IN_NONE 0 -#define IN_SOME 1 -#define IN_THIS 2 -#define IN_ALL 3 +static char *_fmt(const char *, const struct pg_tm *, char *, const char *, + enum warn *); +static char *_yconv(int, int, bool, bool, char *, char const *); size_t -pg_strftime(char *s, size_t maxsize, const char *format, - const struct pg_tm *t) +pg_strftime(char *s, size_t maxsize, const char *format, const struct pg_tm *t) { char *p; - int warn; + enum warn warn = IN_NONE; - warn = IN_NONE; p = _fmt(format, t, s, s + maxsize, &warn); if (p == s + maxsize) return 0; @@ -134,8 +132,8 @@ pg_strftime(char *s, size_t maxsize, const char *format, } static char * -_fmt(const char *format, const struct pg_tm *t, char *pt, const char *ptlim, - int *warnp) +_fmt(const char *format, const struct pg_tm *t, char *pt, + const char *ptlim, enum warn *warnp) { for (; *format; ++format) { @@ -184,7 +182,7 @@ _fmt(const char *format, const struct pg_tm *t, char *pt, const char *ptlim, continue; case 'c': { - int warn2 = IN_SOME; + enum warn warn2 = IN_SOME; pt = _fmt(Locale->c_fmt, t, pt, ptlim, &warn2); if (warn2 == IN_ALL) @@ -203,9 +201,9 @@ _fmt(const char *format, const struct pg_tm *t, char *pt, const char *ptlim, case 'O': /* - * C99 locale modifiers. The sequences %Ec %EC %Ex %EX - * %Ey %EY %Od %oe %OH %OI %Om %OM %OS %Ou %OU %OV %Ow - * %OW %Oy are supposed to provide alternate + * Locale modifiers of C99 and later. The sequences %Ec + * %EC %Ex %EX %Ey %EY %Od %oe %OH %OI %Om %OM %OS %Ou %OU + * %OV %Ow %OW %Oy are supposed to provide alternative * representations. */ goto label; @@ -229,9 +227,9 @@ _fmt(const char *format, const struct pg_tm *t, char *pt, const char *ptlim, case 'k': /* - * This used to be... _conv(t->tm_hour % 12 ? t->tm_hour - * % 12 : 12, 2, ' '); ...and has been changed to the - * below to match SunOS 4.1.1 and Arnold Robbins' strftime + * This used to be... _conv(t->tm_hour % 12 ? t->tm_hour % + * 12 : 12, 2, ' '); ...and has been changed to the below + * to match SunOS 4.1.1 and Arnold Robbins' strftime * version 3.0. That is, "%k" and "%l" have been swapped. * (ado, 1993-05-24) */ @@ -249,7 +247,7 @@ _fmt(const char *format, const struct pg_tm *t, char *pt, const char *ptlim, case 'l': /* - * This used to be... _conv(t->tm_hour, 2, ' '); ...and + * This used to be... _conv(t->tm_hour, 2, ' '); ...and * has been changed to the below to match SunOS 4.1.1 and * Arnold Robbin's strftime version 3.0. That is, "%k" and * "%l" have been swapped. (ado, 1993-05-24) @@ -313,7 +311,7 @@ _fmt(const char *format, const struct pg_tm *t, char *pt, const char *ptlim, * (01-53)." * (ado, 1993-05-24) * - * From by Markus Kuhn: + * From by Markus Kuhn: * "Week 01 of a year is per definition the first week which has the * Thursday in this year, which is equivalent to the week which contains * the fourth day of January. In other words, the first week of a new year @@ -417,7 +415,7 @@ _fmt(const char *format, const struct pg_tm *t, char *pt, const char *ptlim, continue; case 'x': { - int warn2 = IN_SOME; + enum warn warn2 = IN_SOME; pt = _fmt(Locale->x_fmt, t, pt, ptlim, &warn2); if (warn2 == IN_ALL) @@ -442,8 +440,9 @@ _fmt(const char *format, const struct pg_tm *t, char *pt, const char *ptlim, pt = _add(t->tm_zone, pt, ptlim); /* - * C99 says that %Z must be replaced by the empty string - * if the time zone is not determinable. + * C99 and later say that %Z must be replaced by the empty + * string if the time zone abbreviation is not + * determinable. */ continue; case 'z': @@ -483,7 +482,7 @@ _fmt(const char *format, const struct pg_tm *t, char *pt, const char *ptlim, /* * X311J/88-090 (4.12.3.5): if conversion char is - * undefined, behavior is undefined. Print out the + * undefined, behavior is undefined. Print out the * character itself as printf(3) also does. */ default: @@ -521,6 +520,7 @@ _add(const char *str, char *pt, const char *ptlim) * same output as %Y, and that %Y contains at least 4 bytes, * with more only if necessary. */ + static char * _yconv(int a, int b, bool convert_top, bool convert_yy, char *pt, const char *ptlim) @@ -528,7 +528,7 @@ _yconv(int a, int b, bool convert_top, bool convert_yy, int lead; int trail; -#define DIVISOR 100 +#define DIVISOR 100 trail = a % DIVISOR + b % DIVISOR; lead = a / DIVISOR + b / DIVISOR + trail / DIVISOR; trail %= DIVISOR; diff --git a/src/timezone/tzfile.h b/src/timezone/tzfile.h index 2843833e49..6c97808397 100644 --- a/src/timezone/tzfile.h +++ b/src/timezone/tzfile.h @@ -1,4 +1,7 @@ +/* Layout and location of TZif files. */ + #ifndef TZFILE_H + #define TZFILE_H /* @@ -21,14 +24,14 @@ * Information about time zone files. */ -#define TZDEFAULT "localtime" +#define TZDEFAULT "/etc/localtime" #define TZDEFRULES "posixrules" /* * Each file begins with. . . */ -#define TZ_MAGIC "TZif" +#define TZ_MAGIC "TZif" struct tzhead { @@ -50,8 +53,8 @@ struct tzhead * tzh_timecnt (unsigned char)s types of local time starting at above * tzh_typecnt repetitions of * one (char [4]) coded UT offset in seconds - * one (unsigned char) used to set tm_isdst - * one (unsigned char) that's an abbreviation list index + * one (unsigned char) used to set tm_isdst + * one (unsigned char) that's an abbreviation list index * tzh_charcnt (char)s '\0'-terminated zone abbreviations * tzh_leapcnt repetitions of * one (char [4]) coded leap second transition times diff --git a/src/timezone/tznames/Africa.txt b/src/timezone/tznames/Africa.txt index 0bd0c405f6..2ea08a6508 100644 --- a/src/timezone/tznames/Africa.txt +++ b/src/timezone/tznames/Africa.txt @@ -147,8 +147,7 @@ GMT 0 # Greenwich Mean Time # - SAST South Australian Standard Time (not in IANA database) SAST 7200 # South Africa Standard Time # (Africa/Johannesburg) -WAST 7200 D # West Africa Summer Time - # (Africa/Windhoek) +WAST 7200 D # West Africa Summer Time (obsolete) WAT 3600 # West Africa Time # (Africa/Bangui) # (Africa/Brazzaville) diff --git a/src/timezone/tznames/America.txt b/src/timezone/tznames/America.txt index 1c5eb1f8c5..2594c375f6 100644 --- a/src/timezone/tznames/America.txt +++ b/src/timezone/tznames/America.txt @@ -237,6 +237,9 @@ PDT -25200 D # Pacific Daylight Time PET -18000 # Peru Time (obsolete) PMDT -7200 D # Pierre & Miquelon Daylight Time (obsolete) PMST -10800 # Pierre & Miquelon Standard Time (obsolete) +# CONFLICT! PST is not unique +# Other timezones: +# - PST: Philippine Standard Time PST -28800 # Pacific Standard Time # (America/Dawson) # (America/Los_Angeles) diff --git a/src/timezone/tznames/Asia.txt b/src/timezone/tznames/Asia.txt index 4e365b0028..113333995a 100644 --- a/src/timezone/tznames/Asia.txt +++ b/src/timezone/tznames/Asia.txt @@ -118,13 +118,13 @@ IRST Asia/Tehran # Iran Standard Time (obsolete) IRT 12600 # Iran Time (not in IANA database) # CONFLICT! IST is not unique # Other timezones: -# - IST: Irish Summer Time (Europe) +# - IST: Irish Standard Time (Europe) # - IST: Israel Standard Time (Asia) IST 19800 # Indian Standard Time # (Asia/Calcutta) # CONFLICT! IST is not unique # Other timezones: -# - IST: Irish Summer Time (Europe) +# - IST: Irish Standard Time (Europe) # - IST: Indian Standard Time (Asia) IST 7200 # Israel Standard Time # (Asia/Jerusalem) @@ -158,6 +158,10 @@ PKT 18000 # Pakistan Time # (Asia/Karachi) PKST 21600 D # Pakistan Summer Time # (Asia/Karachi) +# CONFLICT! PST is not unique +# Other timezones: +# - PST: Pacific Standard Time (America) +PST 28800 # Philippine Standard Time QYZT 21600 # Kizilorda Time (obsolete) SAKST Asia/Sakhalin # Sakhalin Summer Time (obsolete) SAKT Asia/Sakhalin # Sakhalin Time (obsolete) diff --git a/src/timezone/tznames/Default b/src/timezone/tznames/Default index 80eb1b1290..1532413bfa 100644 --- a/src/timezone/tznames/Default +++ b/src/timezone/tznames/Default @@ -181,6 +181,9 @@ PDT -25200 D # Pacific Daylight Time # (America/Whitehorse) PMDT -7200 D # Pierre & Miquelon Daylight Time (obsolete) PMST -10800 # Pierre & Miquelon Standard Time (obsolete) +# CONFLICT! PST is not unique +# Other timezones: +# - PST: Philippine Standard Time PST -28800 # Pacific Standard Time # (America/Dawson) # (America/Los_Angeles) @@ -236,7 +239,7 @@ IRKT Asia/Irkutsk # Irkutsk Time (obsolete) IRT 12600 # Iran Time (not in IANA database) # CONFLICT! IST is not unique # Other timezones: -# - IST: Irish Summer Time (Europe) +# - IST: Irish Standard Time (Europe) # - IST: Indian Standard Time (Asia) IST 7200 # Israel Standard Time # (Asia/Jerusalem) diff --git a/src/timezone/tznames/Europe.txt b/src/timezone/tznames/Europe.txt index 0cb49f156b..86378bf8e9 100644 --- a/src/timezone/tznames/Europe.txt +++ b/src/timezone/tznames/Europe.txt @@ -180,7 +180,7 @@ GMT 0 # Greenwich Mean Time # Other timezones: # - IST: Indian Standard Time (Asia) # - IST: Israel Standard Time (Asia) -IST 3600 D # Irish Summer Time +IST 3600 # Irish Standard Time # (Europe/Dublin) MEST 7200 D # Middle Europe Summer Time # (MET) diff --git a/src/timezone/tznames/Pacific.txt b/src/timezone/tznames/Pacific.txt index c86248bbc7..c30008cb04 100644 --- a/src/timezone/tznames/Pacific.txt +++ b/src/timezone/tznames/Pacific.txt @@ -52,6 +52,9 @@ NZST 43200 # New Zealand Standard Time PGT 36000 # Papua New Guinea Time (obsolete) PHOT Pacific/Enderbury # Phoenix Islands Time (Kiribati) (obsolete) PONT 39600 # Ponape Time (Micronesia) (obsolete) +# CONFLICT! PST is not unique +# Other timezones: +# - PST: Philippine Standard Time PST -28800 # Pacific Standard Time # (America/Dawson) # (America/Los_Angeles) diff --git a/src/timezone/zic.c b/src/timezone/zic.c index 27c841be9e..4613919afe 100644 --- a/src/timezone/zic.c +++ b/src/timezone/zic.c @@ -1,3 +1,5 @@ +/* Compile .zi time zone data into TZif binary files. */ + /* * This file is in the public domain, so clarified as of * 2006-07-17 by Arthur David Olson. @@ -11,22 +13,21 @@ #include #include #include -#include #include "pg_getopt.h" #include "private.h" #include "tzfile.h" -#define ZIC_VERSION_PRE_2013 '2' -#define ZIC_VERSION '3' +#define ZIC_VERSION_PRE_2013 '2' +#define ZIC_VERSION '3' typedef int64 zic_t; #define ZIC_MIN PG_INT64_MIN #define ZIC_MAX PG_INT64_MAX #ifndef ZIC_MAX_ABBR_LEN_WO_WARN -#define ZIC_MAX_ABBR_LEN_WO_WARN 6 +#define ZIC_MAX_ABBR_LEN_WO_WARN 6 #endif /* !defined ZIC_MAX_ABBR_LEN_WO_WARN */ #ifndef WIN32 @@ -40,15 +41,22 @@ typedef int64 zic_t; #define linkat(fromdir, from, todir, to, flag) \ (itssymlink(from) ? (errno = ENOTSUP, -1) : link(from, to)) #endif +/* Port to native MS-Windows and to ancient UNIX. */ +#if !defined S_ISDIR && defined S_IFDIR && defined S_IFMT +#define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR) +#endif /* The maximum ptrdiff_t value, for pre-C99 platforms. */ #ifndef PTRDIFF_MAX static ptrdiff_t const PTRDIFF_MAX = MAXVAL(ptrdiff_t, TYPE_BIT(ptrdiff_t)); #endif -/* The type and printf format for line numbers. */ +/* + * The type for line numbers. In Postgres, use %d to format them; upstream + * uses PRIdMAX but we prefer not to rely on that, not least because it + * results in platform-dependent strings to be translated. + */ typedef int lineno_t; -#define PRIdLINENO "d" struct rule { @@ -72,7 +80,9 @@ struct rule bool r_todisstd; /* above is standard time if 1 or wall clock * time if 0 */ bool r_todisgmt; /* above is GMT if 1 or local time if 0 */ - zic_t r_stdoff; /* offset from standard time */ + bool r_isdst; /* is this daylight saving time? */ + zic_t r_stdoff; /* offset from default time (which is usually + * standard time) */ const char *r_abbrvar; /* variable part of abbreviation */ bool r_todo; /* a rule to do (used in outzone) */ @@ -94,10 +104,11 @@ struct zone const char *z_name; zic_t z_gmtoff; - const char *z_rule; + char *z_rule; const char *z_format; char z_format_specifier; + bool z_isdst; zic_t z_stdoff; struct rule *z_rules; @@ -121,8 +132,8 @@ static void adjleap(void); static void associate(void); static void dolink(const char *, const char *, bool); static char **getfields(char *buf); -static zic_t gethms(const char *string, const char *errstring, - bool); +static zic_t gethms(const char *string, const char *errstring); +static zic_t getstdoff(char *, bool *); static void infile(const char *filename); static void inleap(char **fields, int nfields); static void inlink(char **fields, int nfields); @@ -152,13 +163,15 @@ enum PERCENT_Z_LEN_BOUND = sizeof "+995959" - 1}; /* If true, work around a bug in Qt 5.6.1 and earlier, which mishandles - tz binary files whose POSIX-TZ-style strings contain '<'; see + TZif files whose POSIX-TZ-style strings contain '<'; see QTBUG-53071 . This workaround will no longer be needed when Qt 5.6.1 and earlier are obsolete, say in the year 2021. */ +#ifndef WORK_AROUND_QTBUG_53071 enum { WORK_AROUND_QTBUG_53071 = true}; +#endif static int charcnt; static bool errors; @@ -201,7 +214,7 @@ static int typecnt; #define ZF_RULE 3 #define ZF_FORMAT 4 #define ZF_TILYEAR 5 -#define ZF_TILMONTH 6 +#define ZF_TILMONTH 6 #define ZF_TILDAY 7 #define ZF_TILTIME 8 #define ZONE_MINFIELDS 5 @@ -214,12 +227,12 @@ static int typecnt; #define ZFC_GMTOFF 0 #define ZFC_RULE 1 #define ZFC_FORMAT 2 -#define ZFC_TILYEAR 3 +#define ZFC_TILYEAR 3 #define ZFC_TILMONTH 4 #define ZFC_TILDAY 5 -#define ZFC_TILTIME 6 -#define ZONEC_MINFIELDS 3 -#define ZONEC_MAXFIELDS 7 +#define ZFC_TILTIME 6 +#define ZONEC_MINFIELDS 3 +#define ZONEC_MAXFIELDS 7 /* * Which files are which on a Rule line. @@ -234,7 +247,7 @@ static int typecnt; #define RF_TOD 7 #define RF_STDOFF 8 #define RF_ABBRVAR 9 -#define RULE_FIELDS 10 +#define RULE_FIELDS 10 /* * Which fields are which on a Link line. @@ -242,7 +255,7 @@ static int typecnt; #define LF_FROM 1 #define LF_TO 2 -#define LINK_FIELDS 3 +#define LINK_FIELDS 3 /* * Which fields are which on a Leap line. @@ -254,7 +267,7 @@ static int typecnt; #define LP_TIME 4 #define LP_CORR 5 #define LP_ROLL 6 -#define LEAP_FIELDS 7 +#define LEAP_FIELDS 7 /* * Year synonyms. @@ -293,10 +306,13 @@ struct lookup static struct lookup const *byword(const char *string, const struct lookup *lp); -static struct lookup const line_codes[] = { +static struct lookup const zi_line_codes[] = { {"Rule", LC_RULE}, {"Zone", LC_ZONE}, {"Link", LC_LINK}, + {NULL, 0} +}; +static struct lookup const leap_line_codes[] = { {"Leap", LC_LEAP}, {NULL, 0} }; @@ -435,7 +451,8 @@ growalloc(void *ptr, size_t itemsize, ptrdiff_t nitems, ptrdiff_t *nitems_alloc) return ptr; else { - ptrdiff_t amax = PTRDIFF_MAX - WORK_AROUND_QTBUG_53071; + ptrdiff_t nitems_max = PTRDIFF_MAX - WORK_AROUND_QTBUG_53071; + ptrdiff_t amax = nitems_max < SIZE_MAX ? nitems_max : SIZE_MAX; if ((amax - 1) / 3 * 2 < *nitems_alloc) memory_exhausted(_("integer overflow")); @@ -467,14 +484,14 @@ static void verror(const char *string, va_list args) { /* - * Match the format of "cc" to allow sh users to zic ... 2>&1 | error -t + * Match the format of "cc" to allow sh users to zic ... 2>&1 | error -t * "*" -v on BSD systems. */ if (filename) - fprintf(stderr, _("\"%s\", line %" PRIdLINENO ": "), filename, linenum); + fprintf(stderr, _("\"%s\", line %d: "), filename, linenum); vfprintf(stderr, string, args); if (rfilename != NULL) - fprintf(stderr, _(" (rule from \"%s\", line %" PRIdLINENO ")"), + fprintf(stderr, _(" (rule from \"%s\", line %d)"), rfilename, rlinenum); fprintf(stderr, "\n"); } @@ -524,7 +541,7 @@ usage(FILE *stream, int status) fprintf(stream, _("%s: usage is %s [ --version ] [ --help ] [ -v ] [ -P ] \\\n" "\t[ -l localtime ] [ -p posixrules ] [ -d directory ] \\\n" - "\t[ -L leapseconds ] [ filename ... ]\n\n" + "\t[ -t localtime-link ] [ -L leapseconds ] [ filename ... ]\n\n" "Report bugs to %s.\n"), progname, progname, PACKAGE_BUGREPORT); if (status == EXIT_SUCCESS) @@ -560,10 +577,11 @@ static const char *psxrules; static const char *lcltime; static const char *directory; static const char *leapsec; +static const char *tzdefault; static const char *yitcommand; int -main(int argc, char *argv[]) +main(int argc, char **argv) { int c, k; @@ -572,7 +590,7 @@ main(int argc, char *argv[]) #ifndef WIN32 umask(umask(S_IWGRP | S_IWOTH) | (S_IWGRP | S_IWOTH)); -#endif /* !WIN32 */ +#endif progname = argv[0]; if (TYPE_BIT(zic_t) <64) { @@ -591,7 +609,7 @@ main(int argc, char *argv[]) { usage(stdout, EXIT_SUCCESS); } - while ((c = getopt(argc, argv, "d:l:p:L:vPsy:")) != EOF && c != -1) + while ((c = getopt(argc, argv, "d:l:L:p:Pst:vy:")) != EOF && c != -1) switch (c) { default: @@ -629,9 +647,23 @@ main(int argc, char *argv[]) return EXIT_FAILURE; } break; + case 't': + if (tzdefault != NULL) + { + fprintf(stderr, + _("%s: More than one -t option" + " specified\n"), + progname); + return EXIT_FAILURE; + } + tzdefault = optarg; + break; case 'y': if (yitcommand == NULL) + { + warning(_("-y is obsolescent")); yitcommand = strdup(optarg); + } else { fprintf(stderr, @@ -666,6 +698,8 @@ main(int argc, char *argv[]) usage(stderr, EXIT_FAILURE); /* usage message by request */ if (directory == NULL) directory = "data"; + if (tzdefault == NULL) + tzdefault = TZDEFAULT; if (yitcommand == NULL) yitcommand = "yearistype"; @@ -707,7 +741,7 @@ main(int argc, char *argv[]) if (lcltime != NULL) { eat(_("command line"), 1); - dolink(lcltime, TZDEFAULT, true); + dolink(lcltime, tzdefault, true); } if (psxrules != NULL) { @@ -907,10 +941,12 @@ dolink(char const *fromfield, char const *tofield, bool staysymlink) char const *contents = absolute ? fromfield : linkalloc; int symlink_errno = symlink(contents, tofield) == 0 ? 0 : errno; - if (symlink_errno == ENOENT && !todirs_made) + if (!todirs_made + && (symlink_errno == ENOENT || symlink_errno == ENOTSUP)) { mkdirs(tofield, true); - symlink_errno = symlink(contents, tofield) == 0 ? 0 : errno; + if (symlink_errno == ENOENT) + symlink_errno = symlink(contents, tofield) == 0 ? 0 : errno; } free(linkalloc); if (symlink_errno == 0) @@ -960,53 +996,11 @@ dolink(char const *fromfield, char const *tofield, bool staysymlink) } } -#define TIME_T_BITS_IN_FILE 64 +#define TIME_T_BITS_IN_FILE 64 static zic_t const min_time = MINVAL(zic_t, TIME_T_BITS_IN_FILE); static zic_t const max_time = MAXVAL(zic_t, TIME_T_BITS_IN_FILE); -/* - * Estimated time of the Big Bang, in seconds since the POSIX epoch. - * rounded downward to the negation of a power of two that is - * comfortably outside the error bounds. - * - * For the time of the Big Bang, see: - * - * Ade PAR, Aghanim N, Armitage-Caplan C et al. Planck 2013 results. - * I. Overview of products and scientific results. - * arXiv:1303.5062 2013-03-20 20:10:01 UTC - * [PDF] - * - * Page 36, Table 9, row Age/Gyr, column Planck+WP+highL+BAO 68% limits - * gives the value 13.798 plus-or-minus 0.037 billion years. - * Multiplying this by 1000000000 and then by 31557600 (the number of - * seconds in an astronomical year) gives a value that is comfortably - * less than 2**59, so BIG_BANG is - 2**59. - * - * BIG_BANG is approximate, and may change in future versions. - * Please do not rely on its exact value. - */ - -#ifndef BIG_BANG -#define BIG_BANG (- (((zic_t) 1) << 59)) -#endif - -/* If true, work around GNOME bug 730332 - - by refusing to output time stamps before BIG_BANG. - Such time stamps are physically suspect anyway. - - The GNOME bug is scheduled to be fixed in GNOME 3.22, and if so - this workaround will no longer be needed when GNOME 3.21 and - earlier are obsolete, say in the year 2021. */ -enum -{ -WORK_AROUND_GNOME_BUG_730332 = true}; - -static const zic_t early_time = (WORK_AROUND_GNOME_BUG_730332 - ? BIG_BANG - : MINVAL(zic_t, TIME_T_BITS_IN_FILE)); - /* Return true if NAME is a directory. */ static bool itsdir(char const *name) @@ -1131,8 +1125,7 @@ associate(void) * Maybe we have a local standard time offset. */ eat(zp->z_filename, zp->z_linenum); - zp->z_stdoff = gethms(zp->z_rule, _("unruly zone"), - true); + zp->z_stdoff = getstdoff(zp->z_rule, &zp->z_isdst); /* * Note, though, that if there's no rule, a '%s' in the format is @@ -1199,9 +1192,14 @@ infile(const char *name) /* nothing to do */ } else if (wantcont) + { wantcont = inzcont(fields, nfields); + } else { + struct lookup const *line_codes + = name == leapsec ? leap_line_codes : zi_line_codes; + lp = byword(fields[0], line_codes); if (lp == NULL) error(_("input line of unknown type")); @@ -1220,12 +1218,7 @@ infile(const char *name) wantcont = false; break; case LC_LEAP: - if (name != leapsec) - warning(_("%s: Leap line in non leap" - " seconds file %s"), - progname, name); - else - inleap(fields, nfields); + inleap(fields, nfields); wantcont = false; break; default: /* "cannot happen" */ @@ -1249,33 +1242,58 @@ infile(const char *name) * A null string maps to zero. * Call error with errstring and return zero on errors. */ + static zic_t -gethms(char const *string, char const *errstring, bool signable) +gethms(char const *string, char const *errstring) { /* PG: make hh be int not zic_t to avoid sscanf portability issues */ int hh; - int mm, - ss, - sign; - char xs; + int sign, + mm = 0, + ss = 0; + char hhx, + mmx, + ssx, + xr = '0', + xs; + int tenths = 0; + bool ok = true; if (string == NULL || *string == '\0') return 0; - if (!signable) - sign = 1; - else if (*string == '-') + if (*string == '-') { sign = -1; ++string; } else sign = 1; - if (sscanf(string, "%d%c", &hh, &xs) == 1) - mm = ss = 0; - else if (sscanf(string, "%d:%d%c", &hh, &mm, &xs) == 2) - ss = 0; - else if (sscanf(string, "%d:%d:%d%c", &hh, &mm, &ss, &xs) - != 3) + switch (sscanf(string, + "%d%c%d%c%d%c%1d%*[0]%c%*[0123456789]%c", + &hh, &hhx, &mm, &mmx, &ss, &ssx, &tenths, &xr, &xs)) + { + default: + ok = false; + break; + case 8: + ok = '0' <= xr && xr <= '9'; + /* fallthrough */ + case 7: + ok &= ssx == '.'; + if (ok && noise) + warning(_("fractional seconds rejected by" + " pre-2018 versions of zic")); + /* fallthrough */ + case 5: + ok &= mmx == ':'; + /* fallthrough */ + case 3: + ok &= hhx == ':'; + /* fallthrough */ + case 1: + break; + } + if (!ok) { error("%s", errstring); return 0; @@ -1295,6 +1313,7 @@ gethms(char const *string, char const *errstring, bool signable) return 0; } #endif + ss += 5 + ((ss ^ 1) & (xr == '0')) <= tenths; /* Round to even. */ if (noise && (hh > HOURSPERDAY || (hh == HOURSPERDAY && (mm != 0 || ss != 0)))) warning(_("values over 24 hours not handled by pre-2007 versions of zic")); @@ -1302,6 +1321,34 @@ gethms(char const *string, char const *errstring, bool signable) sign * (mm * SECSPERMIN + ss)); } +static zic_t +getstdoff(char *field, bool *isdst) +{ + int dst = -1; + zic_t stdoff; + size_t fieldlen = strlen(field); + + if (fieldlen != 0) + { + char *ep = field + fieldlen - 1; + + switch (*ep) + { + case 'd': + dst = 1; + *ep = '\0'; + break; + case 's': + dst = 0; + *ep = '\0'; + break; + } + } + stdoff = gethms(field, _("invalid saved time")); + *isdst = dst < 0 ? stdoff != 0 : dst; + return stdoff; +} + static void inrule(char **fields, int nfields) { @@ -1312,14 +1359,33 @@ inrule(char **fields, int nfields) error(_("wrong number of fields on Rule line")); return; } - if (*fields[RF_NAME] == '\0') + switch (*fields[RF_NAME]) { - error(_("nameless rule")); - return; + case '\0': + case ' ': + case '\f': + case '\n': + case '\r': + case '\t': + case '\v': + case '+': + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + error(_("Invalid rule name \"%s\""), fields[RF_NAME]); + return; } r.r_filename = filename; r.r_linenum = linenum; - r.r_stdoff = gethms(fields[RF_STDOFF], _("invalid saved time"), true); + r.r_stdoff = getstdoff(fields[RF_STDOFF], &r.r_isdst); rulesub(&r, fields[RF_LOYEAR], fields[RF_HIYEAR], fields[RF_COMMAND], fields[RF_MONTH], fields[RF_DAY], fields[RF_TOD]); r.r_name = ecpyalloc(fields[RF_NAME]); @@ -1340,11 +1406,11 @@ inzone(char **fields, int nfields) error(_("wrong number of fields on Zone line")); return false; } - if (strcmp(fields[ZF_NAME], TZDEFAULT) == 0 && lcltime != NULL) + if (lcltime != NULL && strcmp(fields[ZF_NAME], tzdefault) == 0) { error( _("\"Zone %s\" line and -l option are mutually exclusive"), - TZDEFAULT); + tzdefault); return false; } if (strcmp(fields[ZF_NAME], TZDEFRULES) == 0 && psxrules != NULL) @@ -1359,7 +1425,7 @@ inzone(char **fields, int nfields) strcmp(zones[i].z_name, fields[ZF_NAME]) == 0) { error(_("duplicate zone name %s" - " (file \"%s\", line %" PRIdLINENO ")"), + " (file \"%s\", line %d)"), fields[ZF_NAME], zones[i].z_filename, zones[i].z_linenum); @@ -1420,7 +1486,7 @@ inzsub(char **fields, int nfields, bool iscont) } z.z_filename = filename; z.z_linenum = linenum; - z.z_gmtoff = gethms(fields[i_gmtoff], _("invalid UT offset"), true); + z.z_gmtoff = gethms(fields[i_gmtoff], _("invalid UT offset")); if ((cp = strchr(fields[i_format], '%')) != NULL) { if ((*++cp != 's' && *cp != 'z') || strchr(cp, '%') @@ -1562,7 +1628,7 @@ inleap(char **fields, int nfields) return; } t = dayoff * SECSPERDAY; - tod = gethms(fields[LP_TIME], _("invalid time of day"), false); + tod = gethms(fields[LP_TIME], _("invalid time of day")); cp = fields[LP_CORR]; { bool positive; @@ -1573,21 +1639,11 @@ inleap(char **fields, int nfields) positive = false; count = 1; } - else if (strcmp(cp, "--") == 0) - { - positive = false; - count = 2; - } else if (strcmp(cp, "+") == 0) { positive = true; count = 1; } - else if (strcmp(cp, "++") == 0) - { - positive = true; - count = 2; - } else { error(_("illegal CORRECTION field on Leap line")); @@ -1599,9 +1655,9 @@ inleap(char **fields, int nfields) return; } t = tadd(t, tod); - if (t < early_time) + if (t < 0) { - error(_("leap second precedes Big Bang")); + error(_("leap second precedes Epoch")); return; } leapadd(t, positive, lp->l_value, count); @@ -1680,7 +1736,7 @@ rulesub(struct rule *rp, const char *loyearp, const char *hiyearp, break; } } - rp->r_tod = gethms(dp, _("invalid time of day"), false); + rp->r_tod = gethms(dp, _("invalid time of day")); free(dp); /* @@ -1753,11 +1809,14 @@ rulesub(struct rule *rp, const char *loyearp, const char *hiyearp, error(_("typed single year")); return; } + warning(_("year type \"%s\" is obsolete; use \"-\" instead"), + typep); rp->r_yrtype = ecpyalloc(typep); } /* - * Day work. Accept things such as: 1 last-Sunday Sun<=20 Sun>=7 + * Day work. Accept things such as: 1 lastSunday last-Sunday + * (undocumented; warn about this) Sun<=20 Sun>=7 */ dp = ecpyalloc(dayp); if ((lp = byword(dp, lasts)) != NULL) @@ -1855,14 +1914,44 @@ atcomp(const void *avp, const void *bvp) return (a < b) ? -1 : (a > b); } -static bool -is32(const zic_t x) +static void +swaptypes(int i, int j) { - return x == ((zic_t) ((int32) x)); + { + zic_t t = gmtoffs[i]; + + gmtoffs[i] = gmtoffs[j]; + gmtoffs[j] = t; + } + { + char t = isdsts[i]; + + isdsts[i] = isdsts[j]; + isdsts[j] = t; + } + { + unsigned char t = abbrinds[i]; + + abbrinds[i] = abbrinds[j]; + abbrinds[j] = t; + } + { + bool t = ttisstds[i]; + + ttisstds[i] = ttisstds[j]; + ttisstds[j] = t; + } + { + bool t = ttisgmts[i]; + + ttisgmts[i] = ttisgmts[j]; + ttisgmts[j] = t; + } } static void -writezone(const char *const name, const char *const string, char version) +writezone(const char *const name, const char *const string, char version, + int defaulttype) { FILE *fp; ptrdiff_t i, @@ -1878,7 +1967,12 @@ writezone(const char *const name, const char *const string, char version) zic_t one = 1; zic_t y2038_boundary = one << 31; ptrdiff_t nats = timecnt + WORK_AROUND_QTBUG_53071; - zic_t *ats = emalloc(size_product(nats, sizeof *ats + 1)); + + /* + * Allocate the ATS and TYPES arrays via a single malloc, as this is a bit + * faster. + */ + zic_t *ats = emalloc(MAXALIGN(size_product(nats, sizeof *ats + 1))); void *typesptr = ats + nats; unsigned char *types = typesptr; @@ -1897,14 +1991,12 @@ writezone(const char *const name, const char *const string, char version) toi = 0; fromi = 0; - while (fromi < timecnt && attypes[fromi].at < early_time) - ++fromi; for (; fromi < timecnt; ++fromi) { - if (toi > 1 && ((attypes[fromi].at + - gmtoffs[attypes[toi - 1].type]) <= - (attypes[toi - 1].at + - gmtoffs[attypes[toi - 2].type]))) + if (toi != 0 && ((attypes[fromi].at + + gmtoffs[attypes[toi - 1].type]) <= + (attypes[toi - 1].at + gmtoffs[toi == 1 ? 0 + : attypes[toi - 2].type]))) { attypes[toi - 1].type = attypes[fromi].type; @@ -1938,20 +2030,6 @@ writezone(const char *const name, const char *const string, char version) types[i] = attypes[i].type; } - /* - * Work around QTBUG-53071 for time stamps less than y2038_boundary - 1, - * by inserting a no-op transition at time y2038_boundary - 1. This works - * only for timestamps before the boundary, which should be good enough in - * practice as QTBUG-53071 should be long-dead by 2038. - */ - if (WORK_AROUND_QTBUG_53071 && timecnt != 0 - && ats[timecnt - 1] < y2038_boundary - 1 && strchr(string, '<')) - { - ats[timecnt] = y2038_boundary - 1; - types[timecnt] = types[timecnt - 1]; - timecnt++; - } - /* * Correct for leap seconds. */ @@ -1966,6 +2044,23 @@ writezone(const char *const name, const char *const string, char version) } } + /* + * Work around QTBUG-53071 for timestamps less than y2038_boundary - 1, by + * inserting a no-op transition at time y2038_boundary - 1. This works + * only for timestamps before the boundary, which should be good enough in + * practice as QTBUG-53071 should be long-dead by 2038. Do this after + * correcting for leap seconds, as the idea is to insert a transition just + * before 32-bit pg_time_t rolls around, and this occurs at a slightly + * different moment if transitions are leap-second corrected. + */ + if (WORK_AROUND_QTBUG_53071 && timecnt != 0 + && ats[timecnt - 1] < y2038_boundary - 1 && strchr(string, '<')) + { + ats[timecnt] = y2038_boundary - 1; + types[timecnt] = types[timecnt - 1]; + timecnt++; + } + /* * Figure out 32-bit-limited starts and counts. */ @@ -1973,25 +2068,24 @@ writezone(const char *const name, const char *const string, char version) timei32 = 0; leapcnt32 = leapcnt; leapi32 = 0; - while (timecnt32 > 0 && !is32(ats[timecnt32 - 1])) + while (0 < timecnt32 && PG_INT32_MAX < ats[timecnt32 - 1]) --timecnt32; - while (timecnt32 > 0 && !is32(ats[timei32])) + while (1 < timecnt32 && ats[timei32] < PG_INT32_MIN + && ats[timei32 + 1] <= PG_INT32_MIN) { + /* + * Discard too-low transitions, except keep any last too-low + * transition if no transition is exactly at PG_INT32_MIN. The kept + * transition will be output as an PG_INT32_MIN "transition" + * appropriate for buggy 32-bit clients that do not use time type 0 + * for timestamps before the first transition; see below. + */ --timecnt32; ++timei32; } - - /* - * Output an INT32_MIN "transition" if appropriate; see below. - */ - if (timei32 > 0 && ats[timei32] > PG_INT32_MIN) - { - --timei32; - ++timecnt32; - } - while (leapcnt32 > 0 && !is32(trans[leapcnt32 - 1])) + while (0 < leapcnt32 && PG_INT32_MAX < trans[leapcnt32 - 1]) --leapcnt32; - while (leapcnt32 > 0 && !is32(trans[leapi32])) + while (0 < leapcnt32 && trans[leapi32] < PG_INT32_MIN) { --leapcnt32; ++leapi32; @@ -2036,7 +2130,8 @@ writezone(const char *const name, const char *const string, char version) int thisleapi, thisleapcnt, thisleaplim; - int writetype[TZ_MAX_TYPES]; + int old0; + char omittype[TZ_MAX_TYPES]; int typemap[TZ_MAX_TYPES]; int thistypecnt; char thischars[TZ_MAX_CHARS]; @@ -2064,28 +2159,19 @@ writezone(const char *const name, const char *const string, char version) error(_("too many transition times")); thistimelim = thistimei + thistimecnt; thisleaplim = thisleapi + thisleapcnt; - for (i = 0; i < typecnt; ++i) - writetype[i] = thistimecnt == timecnt; - if (thistimecnt == 0) - { - /* - * No transition times fall in the current (32- or 64-bit) window. - */ - if (typecnt != 0) - writetype[typecnt - 1] = true; - } - else - { - for (i = thistimei - 1; i < thistimelim; ++i) - if (i >= 0) - writetype[types[i]] = true; + memset(omittype, true, typecnt); + omittype[defaulttype] = false; + for (i = thistimei; i < thistimelim; i++) + omittype[types[i]] = false; + + /* + * Reorder types to make DEFAULTTYPE type 0. Use TYPEMAP to swap OLD0 + * and DEFAULTTYPE so that DEFAULTTYPE appears as type 0 in the output + * instead of OLD0. TYPEMAP also omits unused types. + */ + old0 = strlen(omittype); + swaptypes(old0, defaulttype); - /* - * For America/Godthab and Antarctica/Palmer - */ - if (thistimei == 0) - writetype[0] = true; - } #ifndef LEAVE_SOME_PRE_2011_SYSTEMS_IN_THE_LURCH /* @@ -2107,8 +2193,8 @@ writezone(const char *const name, const char *const string, char version) mrudst = types[i]; else mrustd = types[i]; - for (i = 0; i < typecnt; ++i) - if (writetype[i]) + for (i = old0; i < typecnt; i++) + if (!omittype[i]) { if (isdsts[i]) hidst = i; @@ -2125,7 +2211,7 @@ writezone(const char *const name, const char *const string, char version) ttisstds[mrudst], ttisgmts[mrudst]); isdsts[mrudst] = 1; - writetype[type] = true; + omittype[type] = false; } if (histd >= 0 && mrustd >= 0 && histd != mrustd && gmtoffs[histd] != gmtoffs[mrustd]) @@ -2137,22 +2223,26 @@ writezone(const char *const name, const char *const string, char version) ttisstds[mrustd], ttisgmts[mrustd]); isdsts[mrustd] = 0; - writetype[type] = true; + omittype[type] = false; } } #endif /* !defined * LEAVE_SOME_PRE_2011_SYSTEMS_IN_THE_LURCH */ thistypecnt = 0; - for (i = 0; i < typecnt; ++i) - typemap[i] = writetype[i] ? thistypecnt++ : -1; + for (i = old0; i < typecnt; i++) + if (!omittype[i]) + typemap[i == old0 ? defaulttype + : i == defaulttype ? old0 : i] + = thistypecnt++; + for (i = 0; i < sizeof indmap / sizeof indmap[0]; ++i) indmap[i] = -1; thischarcnt = 0; - for (i = 0; i < typecnt; ++i) + for (i = old0; i < typecnt; i++) { char *thisabbr; - if (!writetype[i]) + if (omittype[i]) continue; if (indmap[abbrinds[i]] >= 0) continue; @@ -2169,7 +2259,7 @@ writezone(const char *const name, const char *const string, char version) } #define DO(field) fwrite(tzh.field, sizeof tzh.field, 1, fp) tzh = tzh0; - strncpy(tzh.tzh_magic, TZ_MAGIC, sizeof tzh.tzh_magic); + memcpy(tzh.tzh_magic, TZ_MAGIC, sizeof tzh.tzh_magic); tzh.tzh_version[0] = version; convert(thistypecnt, tzh.tzh_ttisgmtcnt); convert(thistypecnt, tzh.tzh_ttisstdcnt); @@ -2187,23 +2277,16 @@ writezone(const char *const name, const char *const string, char version) DO(tzh_typecnt); DO(tzh_charcnt); #undef DO - for (i = thistimei; i < thistimelim; ++i) - if (pass == 1) - /* - * Output an INT32_MIN "transition" if appropriate; see above. - */ - puttzcode(((ats[i] < PG_INT32_MIN) ? - PG_INT32_MIN : ats[i]), fp); - else + /* PG: print current timezone abbreviations if requested */ + if (print_abbrevs && pass == 2) + { + /* Print "type" data for periods ending after print_cutoff */ + for (i = thistimei; i < thistimelim; ++i) { - puttzcode64(ats[i], fp); - - /* Print current timezone abbreviations if requested */ - if (print_abbrevs && - (i == thistimelim - 1 || ats[i + 1] > print_cutoff)) + if (i == thistimelim - 1 || ats[i + 1] > print_cutoff) { - unsigned char tm = typemap[types[i]]; + unsigned char tm = types[i]; char *thisabbrev = &thischars[indmap[abbrinds[tm]]]; /* filter out assorted junk entries */ @@ -2215,6 +2298,33 @@ writezone(const char *const name, const char *const string, char version) isdsts[tm] ? "\tD" : ""); } } + /* Print the default type if we have no transitions at all */ + if (thistimei >= thistimelim) + { + unsigned char tm = defaulttype; + char *thisabbrev = &thischars[indmap[abbrinds[tm]]]; + + /* filter out assorted junk entries */ + if (strcmp(thisabbrev, GRANDPARENTED) != 0 && + strcmp(thisabbrev, "zzz") != 0) + fprintf(stdout, "%s\t" INT64_FORMAT "%s\n", + thisabbrev, + gmtoffs[tm], + isdsts[tm] ? "\tD" : ""); + } + } + + for (i = thistimei; i < thistimelim; ++i) + if (pass == 1) + + /* + * Output an PG_INT32_MIN "transition" if appropriate; see + * above. + */ + puttzcode(((ats[i] < PG_INT32_MIN) ? + PG_INT32_MIN : ats[i]), fp); + else + puttzcode64(ats[i], fp); for (i = thistimei; i < thistimelim; ++i) { unsigned char uc; @@ -2222,8 +2332,8 @@ writezone(const char *const name, const char *const string, char version) uc = typemap[types[i]]; fwrite(&uc, sizeof uc, 1, fp); } - for (i = 0; i < typecnt; ++i) - if (writetype[i]) + for (i = old0; i < typecnt; i++) + if (!omittype[i]) { puttzcode(gmtoffs[i], fp); putc(isdsts[i], fp); @@ -2266,12 +2376,13 @@ writezone(const char *const name, const char *const string, char version) puttzcode64(todo, fp); puttzcode(corr[i], fp); } - for (i = 0; i < typecnt; ++i) - if (writetype[i]) + for (i = old0; i < typecnt; i++) + if (!omittype[i]) putc(ttisstds[i], fp); - for (i = 0; i < typecnt; ++i) - if (writetype[i]) + for (i = old0; i < typecnt; i++) + if (!omittype[i]) putc(ttisgmts[i], fp); + swaptypes(old0, defaulttype); } fprintf(fp, "\n%s\n", string); close_file(fp, directory, name); @@ -2297,7 +2408,7 @@ abbroffset(char *buf, zic_t offset) offset /= MINSPERHOUR; if (100 <= offset) { - error(_("%%z UTC offset magnitude exceeds 99:59:59")); + error(_("%%z UT offset magnitude exceeds 99:59:59")); return "%z"; } else @@ -2324,7 +2435,7 @@ abbroffset(char *buf, zic_t offset) static size_t doabbr(char *abbr, struct zone const *zp, char const *letters, - zic_t stdoff, bool doquotes) + bool isdst, zic_t stdoff, bool doquotes) { char *cp; char *slashp; @@ -2342,7 +2453,7 @@ doabbr(char *abbr, struct zone const *zp, char const *letters, letters = "%s"; sprintf(abbr, format, letters); } - else if (stdoff != 0) + else if (isdst) { strcpy(abbr, slashp + 1); } @@ -2469,7 +2580,7 @@ stringrule(char *result, const struct rule *const rp, const zic_t dstoff, } if (rp->r_todisgmt) tod += gmtoff; - if (rp->r_todisstd && rp->r_stdoff == 0) + if (rp->r_todisstd && !rp->r_isdst) tod += dstoff; if (tod != 2 * SECSPERMIN * MINSPERHOUR) { @@ -2534,7 +2645,7 @@ stringzone(char *result, struct zone const *zpfirst, ptrdiff_t zonecount) continue; if (rp->r_yrtype != NULL) continue; - if (rp->r_stdoff == 0) + if (!rp->r_isdst) { if (stdrp == NULL) stdrp = rp; @@ -2560,7 +2671,7 @@ stringzone(char *result, struct zone const *zpfirst, ptrdiff_t zonecount) for (i = 0; i < zp->z_nrules; ++i) { rp = &zp->z_rules[i]; - if (rp->r_stdoff == 0 && rule_cmp(stdabbrrp, rp) < 0) + if (!rp->r_isdst && rule_cmp(stdabbrrp, rp) < 0) stdabbrrp = rp; if (rule_cmp(stdrp, rp) < 0) stdrp = rp; @@ -2574,7 +2685,7 @@ stringzone(char *result, struct zone const *zpfirst, ptrdiff_t zonecount) if (stdrp != NULL && stdrp->r_hiyear == 2037) return YEAR_BY_YEAR_ZONE; - if (stdrp != NULL && stdrp->r_stdoff != 0) + if (stdrp != NULL && stdrp->r_isdst) { /* Perpetual DST. */ dstr.r_month = TM_JANUARY; @@ -2582,6 +2693,7 @@ stringzone(char *result, struct zone const *zpfirst, ptrdiff_t zonecount) dstr.r_dayofmonth = 1; dstr.r_tod = 0; dstr.r_todisstd = dstr.r_todisgmt = false; + dstr.r_isdst = stdrp->r_isdst; dstr.r_stdoff = stdrp->r_stdoff; dstr.r_abbrvar = stdrp->r_abbrvar; stdr.r_month = TM_DECEMBER; @@ -2589,6 +2701,7 @@ stringzone(char *result, struct zone const *zpfirst, ptrdiff_t zonecount) stdr.r_dayofmonth = 31; stdr.r_tod = SECSPERDAY + stdrp->r_stdoff; stdr.r_todisstd = stdr.r_todisgmt = false; + stdr.r_isdst = false; stdr.r_stdoff = 0; stdr.r_abbrvar = (stdabbrrp ? stdabbrrp->r_abbrvar : ""); @@ -2596,10 +2709,10 @@ stringzone(char *result, struct zone const *zpfirst, ptrdiff_t zonecount) stdrp = &stdr; } } - if (stdrp == NULL && (zp->z_nrules != 0 || zp->z_stdoff != 0)) + if (stdrp == NULL && (zp->z_nrules != 0 || zp->z_isdst)) return -1; abbrvar = (stdrp == NULL) ? "" : stdrp->r_abbrvar; - len = doabbr(result, zp, abbrvar, 0, true); + len = doabbr(result, zp, abbrvar, false, 0, true); offsetlen = stringoffset(result + len, -zp->z_gmtoff); if (!offsetlen) { @@ -2609,7 +2722,8 @@ stringzone(char *result, struct zone const *zpfirst, ptrdiff_t zonecount) len += offsetlen; if (dstrp == NULL) return compat; - len += doabbr(result + len, zp, dstrp->r_abbrvar, dstrp->r_stdoff, true); + len += doabbr(result + len, zp, dstrp->r_abbrvar, + dstrp->r_isdst, dstrp->r_stdoff, true); if (dstrp->r_stdoff != SECSPERMIN * MINSPERHOUR) { offsetlen = stringoffset(result + len, @@ -2674,6 +2788,7 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) zic_t one = 1; zic_t y2038_boundary = one << 31; zic_t max_year0; + int defaulttype = -1; max_abbr_len = 2 + max_format_len + max_abbrvar_len; max_envvar_len = 2 * max_abbr_len + 5 * 9; @@ -2797,9 +2912,9 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) */ stdoff = 0; zp = &zpfirst[i]; - usestart = i > 0 && (zp - 1)->z_untiltime > early_time; + usestart = i > 0 && (zp - 1)->z_untiltime > min_time; useuntil = i < (zonecount - 1); - if (useuntil && zp->z_untiltime <= early_time) + if (useuntil && zp->z_untiltime <= min_time) continue; gmtoff = zp->z_gmtoff; eat(zp->z_filename, zp->z_linenum); @@ -2808,9 +2923,9 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) if (zp->z_nrules == 0) { stdoff = zp->z_stdoff; - doabbr(startbuf, zp, NULL, stdoff, false); + doabbr(startbuf, zp, NULL, zp->z_isdst, stdoff, false); type = addtype(oadd(zp->z_gmtoff, stdoff), - startbuf, stdoff != 0, startttisstd, + startbuf, zp->z_isdst, startttisstd, startttisgmt); if (usestart) { @@ -2818,7 +2933,7 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) usestart = false; } else - addtt(early_time, type); + defaulttype = type; } else for (year = min_year; year <= max_year; ++year) @@ -2850,9 +2965,10 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) { ptrdiff_t k; zic_t jtime, - ktime = 0; + ktime; zic_t offset; + INITIALIZE(ktime); if (useuntil) { /* @@ -2924,16 +3040,19 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) stdoff); doabbr(startbuf, zp, rp->r_abbrvar, + rp->r_isdst, rp->r_stdoff, false); continue; } if (*startbuf == '\0' && - startoff == oadd(zp->z_gmtoff, stdoff)) + startoff == oadd(zp->z_gmtoff, + stdoff)) { doabbr(startbuf, zp, rp->r_abbrvar, + rp->r_isdst, rp->r_stdoff, false); } @@ -2941,10 +3060,12 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) eats(zp->z_filename, zp->z_linenum, rp->r_filename, rp->r_linenum); doabbr(ab, zp, rp->r_abbrvar, - rp->r_stdoff, false); + rp->r_isdst, rp->r_stdoff, false); offset = oadd(zp->z_gmtoff, rp->r_stdoff); - type = addtype(offset, ab, rp->r_stdoff != 0, + type = addtype(offset, ab, rp->r_isdst, rp->r_todisstd, rp->r_todisgmt); + if (defaulttype < 0 && !rp->r_isdst) + defaulttype = type; if (rp->r_hiyear == ZIC_MAX && !(0 <= lastatmax && ktime < attypes[lastatmax].at)) @@ -2963,11 +3084,15 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) if (*startbuf == '\0') error(_("cannot determine time zone abbreviation to use just after until time")); else - addtt(starttime, - addtype(startoff, startbuf, - startoff != zp->z_gmtoff, - startttisstd, - startttisgmt)); + { + bool isdst = startoff != zp->z_gmtoff; + + type = addtype(startoff, startbuf, isdst, + startttisstd, startttisgmt); + if (defaulttype < 0 && !isdst) + defaulttype = type; + addtt(starttime, type); + } } /* @@ -2984,6 +3109,8 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) starttime = tadd(starttime, -gmtoff); } } + if (defaulttype < 0) + defaulttype = 0; if (0 <= lastatmax) attypes[lastatmax].dontmerge = true; if (do_extend) @@ -3013,7 +3140,7 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) attypes[timecnt - 1].dontmerge = true; } } - writezone(zpfirst->z_name, envvar, version); + writezone(zpfirst->z_name, envvar, version, defaulttype); free(startbuf); free(ab); free(envvar); @@ -3022,21 +3149,6 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) static void addtt(zic_t starttime, int type) { - if (starttime <= early_time - || (timecnt == 1 && attypes[0].at < early_time)) - { - gmtoffs[0] = gmtoffs[type]; - isdsts[0] = isdsts[type]; - ttisstds[0] = ttisstds[type]; - ttisgmts[0] = ttisgmts[type]; - if (abbrinds[type] != 0) - strcpy(chars, &chars[abbrinds[type]]); - abbrinds[0] = 0; - charcnt = strlen(chars) + 1; - typecnt = 1; - timecnt = 0; - type = 0; - } attypes = growalloc(attypes, sizeof *attypes, timecnt, &timecnt_alloc); attypes[timecnt].at = starttime; attypes[timecnt].dontmerge = false; @@ -3104,14 +3216,7 @@ leapadd(zic_t t, bool positive, int rolling, int count) } for (i = 0; i < leapcnt; ++i) if (t <= trans[i]) - { - if (t == trans[i]) - { - error(_("repeated leap second moment")); - exit(EXIT_FAILURE); - } break; - } do { for (j = leapcnt; j > i; --j) @@ -3132,12 +3237,19 @@ adjleap(void) { int i; zic_t last = 0; + zic_t prevtrans = 0; /* * propagate leap seconds forward */ for (i = 0; i < leapcnt; ++i) { + if (trans[i] - prevtrans < 28 * SECSPERDAY) + { + error(_("Leap seconds too close together")); + exit(EXIT_FAILURE); + } + prevtrans = trans[i]; trans[i] = tadd(trans[i], last); last = corr[i] += last; } @@ -3191,7 +3303,7 @@ yearistype(zic_t year, const char *type) exit(EXIT_FAILURE); } -/* Is A a space character in the C locale? */ +/* Is A a space character in the C locale? */ static bool is_space(char a) { @@ -3274,7 +3386,7 @@ is_alpha(char a) } /* If A is an uppercase character in the C locale, return its lowercase - * counterpart. Otherwise, return A. */ + counterpart. Otherwise, return A. */ static char lowerit(char a) { @@ -3362,6 +3474,19 @@ itsabbr(const char *abbr, const char *word) return true; } +/* Return true if ABBR is an initial prefix of WORD, ignoring ASCII case. */ + +static bool +ciprefix(char const *abbr, char const *word) +{ + do + if (!*abbr) + return true; + while (lowerit(*abbr++) == lowerit(*word++)); + + return false; +} + static const struct lookup * byword(const char *word, const struct lookup *table) { @@ -3371,6 +3496,23 @@ byword(const char *word, const struct lookup *table) if (word == NULL || table == NULL) return NULL; + /* + * If TABLE is LASTS and the word starts with "last" followed by a + * non-'-', skip the "last" and look in WDAY_NAMES instead. Warn about any + * usage of the undocumented prefix "last-". + */ + if (table == lasts && ciprefix("last", word) && word[4]) + { + if (word[4] == '-') + warning(_("\"%s\" is undocumented; use \"last%s\" instead"), + word, word + 5); + else + { + word += 4; + table = wday_names; + } + } + /* * Look for exact match. */ @@ -3383,13 +3525,31 @@ byword(const char *word, const struct lookup *table) */ foundlp = NULL; for (lp = table; lp->l_word != NULL; ++lp) - if (itsabbr(word, lp->l_word)) + if (ciprefix(word, lp->l_word)) { if (foundlp == NULL) foundlp = lp; else return NULL; /* multiple inexact matches */ } + + /* Warn about any backward-compatibility issue with pre-2017c zic. */ + if (foundlp) + { + bool pre_2017c_match = false; + + for (lp = table; lp->l_word; lp++) + if (itsabbr(word, lp->l_word)) + { + if (pre_2017c_match) + { + warning(_("\"%s\" is ambiguous in pre-2017c zic"), word); + break; + } + pre_2017c_match = true; + } + } + return foundlp; } @@ -3493,6 +3653,18 @@ rpytime(const struct rule *rp, zic_t wantedy) dayoff = 0; m = TM_JANUARY; y = EPOCH_YEAR; + if (y < wantedy) + { + wantedy -= y; + dayoff = (wantedy / YEARSPERREPEAT) * (SECSPERREPEAT / SECSPERDAY); + wantedy %= YEARSPERREPEAT; + wantedy += y; + } + else if (wantedy < 0) + { + dayoff = (wantedy / YEARSPERREPEAT) * (SECSPERREPEAT / SECSPERDAY); + wantedy %= YEARSPERREPEAT; + } while (wantedy != y) { if (wantedy > y) @@ -3571,7 +3743,6 @@ will not work with pre-2004 versions of zic")); if (dayoff > max_time / SECSPERDAY) return max_time; t = (zic_t) dayoff * SECSPERDAY; - return tadd(t, rp->r_tod); } @@ -3621,11 +3792,15 @@ mkdirs(char const *argname, bool ancestors) cp = name = ecpyalloc(argname); + /* + * On MS-Windows systems, do not worry about drive letters or backslashes, + * as this should suffice in practice. Time zone names do not use drive + * letters and backslashes. If the -d option of zic does not name an + * already-existing directory, it can use slashes to separate the + * already-existing ancestor prefix from the to-be-created subdirectories. + */ + /* Do not mkdir a root directory, as it must exist. */ -#ifdef WIN32 - if (is_alpha(name[0]) && name[1] == ':') - cp += 2; -#endif while (*cp == '/') cp++; diff --git a/src/tools/RELEASE_CHANGES b/src/tools/RELEASE_CHANGES index b7963c2449..65a1ffc83d 100644 --- a/src/tools/RELEASE_CHANGES +++ b/src/tools/RELEASE_CHANGES @@ -73,9 +73,11 @@ Starting a New Development Cycle * Create a branch in git for maintenance of the previous release o on master branch, do: git pull # be sure you have the latest "master" - git push origin master:refs/heads/"new-branch-name" + git branch "new-branch-name" + git push -u origin "new-branch-name" for example, - git push origin master:refs/heads/REL_10_STABLE + git branch REL_11_STABLE + git push -u origin REL_11_STABLE * Add new branch's name to list in src/tools/git_changelog @@ -83,6 +85,8 @@ Starting a New Development Cycle * Run "src/tools/version_stamp.pl devel", then run autoconf +* Get the buildfarm's 'branches_of_interest.txt' file updated with the new + branch. Creating Back-Branch Release Notes ================================== diff --git a/src/tools/check_bison_recursion.pl b/src/tools/check_bison_recursion.pl index 14590663c6..913faa02f7 100755 --- a/src/tools/check_bison_recursion.pl +++ b/src/tools/check_bison_recursion.pl @@ -16,7 +16,7 @@ # To use: run bison with the -v switch, then feed the produced y.output # file to this script. # -# Copyright (c) 2011-2017, PostgreSQL Global Development Group +# Copyright (c) 2011-2018, PostgreSQL Global Development Group # # src/tools/check_bison_recursion.pl ################################################################# @@ -82,7 +82,7 @@ && !grep { $cur_nonterminal eq $_ } @rhs) { print -"Right recursion in rule $rule_number: $cur_nonterminal := $rhs\n"; + "Right recursion in rule $rule_number: $cur_nonterminal := $rhs\n"; } } } diff --git a/src/tools/copyright.pl b/src/tools/copyright.pl index 53942f12f0..08b9e5b42e 100755 --- a/src/tools/copyright.pl +++ b/src/tools/copyright.pl @@ -2,7 +2,7 @@ ################################################################# # copyright.pl -- update copyright notices throughout the source tree, idempotently. # -# Copyright (c) 2011-2017, PostgreSQL Global Development Group +# Copyright (c) 2011-2018, PostgreSQL Global Development Group # # src/tools/copyright.pl # @@ -62,6 +62,7 @@ sub wanted $line =~ s/$cc (\d{4}), $pgdg/$ccliteral $1-$year, $pgdg/i; } untie @lines; + return; } print "Manually update:\n"; diff --git a/src/tools/editors/emacs.samples b/src/tools/editors/emacs.samples index d9cd47cef2..a7152b04bc 100644 --- a/src/tools/editors/emacs.samples +++ b/src/tools/editors/emacs.samples @@ -48,7 +48,7 @@ (setq perl-brace-imaginary-offset 0) (setq perl-brace-offset 0) (setq perl-continued-brace-offset 4) - (setq perl-continued-statement-offset 4) + (setq perl-continued-statement-offset 2) (setq perl-indent-level 4) (setq perl-label-offset -2) (setq indent-tabs-mode t) @@ -62,12 +62,23 @@ ;;; documentation files -(add-hook 'sgml-mode-hook - (defun postgresql-sgml-mode-hook () +;; *.sgml files are actually XML +(add-to-list 'auto-mode-alist '("/postgres\\(ql\\)?/.*\\.sgml\\'" . nxml-mode)) + +(add-hook 'nxml-mode-hook + (defun postgresql-xml-mode-hook () (when (string-match "/postgres\\(ql\\)?/" buffer-file-name) (setq fill-column 78) - (setq indent-tabs-mode nil) - (setq sgml-basic-offset 1)))) + (setq indent-tabs-mode nil)))) + +;; The *.xsl files use 2-space indent, which is consistent with +;; docbook-xsl sources and also the nxml-mode default. But the *.sgml +;; files use 1-space indent, mostly for historical reasons at this +;; point. +(add-hook 'nxml-mode-hook + (defun postgresql-xml-src-mode-hook () + (when (string-match "/postgres\\(ql\\)?/.*\\.sgml\\'" buffer-file-name) + (setq nxml-child-indent 1)))) ;;; Makefiles diff --git a/src/tools/findoidjoins/Makefile b/src/tools/findoidjoins/Makefile index 5410d85ec2..1af0a93a89 100644 --- a/src/tools/findoidjoins/Makefile +++ b/src/tools/findoidjoins/Makefile @@ -2,7 +2,7 @@ # # Makefile for src/tools/findoidjoins # -# Copyright (c) 2003-2017, PostgreSQL Global Development Group +# Copyright (c) 2003-2018, PostgreSQL Global Development Group # # src/tools/findoidjoins/Makefile # @@ -13,7 +13,7 @@ top_builddir = ../../.. include $(top_builddir)/src/Makefile.global override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS) -override LDFLAGS := $(libpq_pgport) $(LDFLAGS) +LDFLAGS_INTERNAL += $(libpq_pgport) OBJS= findoidjoins.o diff --git a/src/tools/findoidjoins/README b/src/tools/findoidjoins/README index 7c5a5cf4b9..305454ab9a 100644 --- a/src/tools/findoidjoins/README +++ b/src/tools/findoidjoins/README @@ -5,7 +5,7 @@ findoidjoins This program scans a database and prints oid fields (also reg* fields) and the tables they join to. It is normally used to check the system -catalog join relationships (shown below for 10devel as of 2017-05-15). +catalog join relationships (shown below for 11devel as of 2018-05-07). Historically this has been run against an empty database such as template1, but there's a problem with that approach: some of the catalogs are empty @@ -16,7 +16,7 @@ catalogs in interesting ways. Note that unexpected matches may indicate bogus entries in system tables; don't accept a peculiar match without question. In particular, a field shown as joining to more than one target table is probably messed up. -In v10, the *only* fields that should join to more than one target +Currently, the *only* fields that should join to more than one target table are: pg_description.objoid, pg_depend.objid, pg_depend.refobjid, pg_shdescription.objoid, pg_shdepend.objid, pg_shdepend.refobjid, @@ -35,7 +35,7 @@ regression test. The oidjoins test should be updated after any revision in the patterns of cross-links between system tables. (Typically we update it at the end of each development cycle.) -NOTE: as of v10, make_oidjoins_check produces two bogus join checks: +NOTE: currently, make_oidjoins_check produces two bogus join checks: Join pg_catalog.pg_class.relfilenode => pg_catalog.pg_class.oid Join pg_catalog.pg_database.datlastsysoid => pg_catalog.pg_database.oid These are artifacts and should not be added to the oidjoins regression test. @@ -106,6 +106,7 @@ Join pg_catalog.pg_constraint.connamespace => pg_catalog.pg_namespace.oid Join pg_catalog.pg_constraint.conrelid => pg_catalog.pg_class.oid Join pg_catalog.pg_constraint.contypid => pg_catalog.pg_type.oid Join pg_catalog.pg_constraint.conindid => pg_catalog.pg_class.oid +Join pg_catalog.pg_constraint.conparentid => pg_catalog.pg_constraint.oid Join pg_catalog.pg_constraint.confrelid => pg_catalog.pg_class.oid Join pg_catalog.pg_conversion.connamespace => pg_catalog.pg_namespace.oid Join pg_catalog.pg_conversion.conowner => pg_catalog.pg_authid.oid @@ -154,6 +155,7 @@ Join pg_catalog.pg_opfamily.opfmethod => pg_catalog.pg_am.oid Join pg_catalog.pg_opfamily.opfnamespace => pg_catalog.pg_namespace.oid Join pg_catalog.pg_opfamily.opfowner => pg_catalog.pg_authid.oid Join pg_catalog.pg_partitioned_table.partrelid => pg_catalog.pg_class.oid +Join pg_catalog.pg_partitioned_table.partdefid => pg_catalog.pg_class.oid Join pg_catalog.pg_policy.polrelid => pg_catalog.pg_class.oid Join pg_catalog.pg_proc.pronamespace => pg_catalog.pg_namespace.oid Join pg_catalog.pg_proc.proowner => pg_catalog.pg_authid.oid diff --git a/src/tools/findoidjoins/findoidjoins.c b/src/tools/findoidjoins/findoidjoins.c index 7ce519d726..cbb7b59adc 100644 --- a/src/tools/findoidjoins/findoidjoins.c +++ b/src/tools/findoidjoins/findoidjoins.c @@ -1,14 +1,15 @@ /* * findoidjoins.c * - * Copyright (c) 2002-2017, PostgreSQL Global Development Group + * Copyright (c) 2002-2018, PostgreSQL Global Development Group * * src/tools/findoidjoins/findoidjoins.c */ #include "postgres_fe.h" -#include "catalog/pg_class.h" +#include "catalog/pg_class_d.h" +#include "fe_utils/connect.h" #include "libpq-fe.h" #include "pqexpbuffer.h" @@ -46,6 +47,14 @@ main(int argc, char **argv) exit(EXIT_FAILURE); } + res = PQexec(conn, ALWAYS_SECURE_SEARCH_PATH_SQL); + if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) + { + fprintf(stderr, "sql error: %s\n", PQerrorMessage(conn)); + exit(EXIT_FAILURE); + } + PQclear(res); + /* Get a list of relations that have OIDs */ printfPQExpBuffer(&sql, "%s", diff --git a/src/tools/fix-old-flex-code.pl b/src/tools/fix-old-flex-code.pl index da99875599..0e0b572673 100644 --- a/src/tools/fix-old-flex-code.pl +++ b/src/tools/fix-old-flex-code.pl @@ -8,7 +8,7 @@ # let's suppress it by inserting a dummy reference to the variable. # (That's exactly what 2.5.36 and later do ...) # -# Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/tools/fix-old-flex-code.pl @@ -37,7 +37,7 @@ # Apply the desired patch. $ccode =~ -s|(struct yyguts_t \* yyg = \(struct yyguts_t\*\)yyscanner; /\* This var may be unused depending upon options. \*/ + s|(struct yyguts_t \* yyg = \(struct yyguts_t\*\)yyscanner; /\* This var may be unused depending upon options. \*/ .*?) return yy_is_jam \? 0 : yy_current_state; |$1 diff --git a/src/tools/git-external-diff b/src/tools/git-external-diff index 59fa36624c..39ddd01b3d 100644 --- a/src/tools/git-external-diff +++ b/src/tools/git-external-diff @@ -7,6 +7,41 @@ # path old-file old-hash old-mode new-file new-hash new-mode # 'path' is the git-tree-relative path of the file being diff'ed +=comment + +This info is copied from the old wiki page on Working with git: + +Context diffs with Git + +Copy git-external-diff into libexec/git-core/ of your git installation +and configure git to use that wrapper with: + + git config [--global] diff.external git-external-diff + +--global makes the configuration global for your user - otherwise it is +just configured for the current repository. + +For every command which displays diffs in some way you can use the +parameter "--[no-]-ext-diff" to enable respectively disable using the +external diff command. + +For the git diff command --ext-diff is enabled by default - for any +other command like git log -p or git format-patch it is not! + +This method should work on all platforms supported by git. + +If you do not want to configure the external wrapper permanently or you +want to overwrite it you can also invoke git like: + + export GIT_EXTERNAL_DIFF=git-external-diff + git diff --[no-]ext-diff + +Alternatively, configure a git alias in ~/.gitconfig or .git/config: + + [alias] + cdiff = !GIT_EXTERNAL_DIFF=git-context-diff git diff +=cut + old_hash="$3" new_hash=$(git hash-object "$5") diff --git a/src/tools/git_changelog b/src/tools/git_changelog index a66b64467f..0a714e27b3 100755 --- a/src/tools/git_changelog +++ b/src/tools/git_changelog @@ -57,7 +57,7 @@ require IPC::Open2; # (We could get this from "git branches", but not worth the trouble.) # NB: master must be first! my @BRANCHES = qw(master - REL_10_STABLE REL9_6_STABLE REL9_5_STABLE + REL_11_STABLE REL_10_STABLE REL9_6_STABLE REL9_5_STABLE REL9_4_STABLE REL9_3_STABLE REL9_2_STABLE REL9_1_STABLE REL9_0_STABLE REL8_4_STABLE REL8_3_STABLE REL8_2_STABLE REL8_1_STABLE REL8_0_STABLE REL7_4_STABLE REL7_3_STABLE REL7_2_STABLE REL7_1_STABLE REL7_0_PATCHES @@ -102,7 +102,8 @@ my %rel_tags; { my $commit = $1; my $tag = $2; - if ( $tag =~ /^REL\d+_\d+$/ + if ( $tag =~ /^REL_\d+_\d+$/ + || $tag =~ /^REL\d+_\d+$/ || $tag =~ /^REL\d+_\d+_\d+$/) { $rel_tags{$commit} = $tag; @@ -198,6 +199,7 @@ for my $branch (@BRANCHES) $last_tag = $sprout_tags{$commit}; # normalize branch names for making sprout tags + $last_tag =~ s/^(REL_\d+).*/$1_BR/; $last_tag =~ s/^(REL\d+_\d+).*/$1_BR/; } $c->{'last_tag'} = $last_tag; @@ -315,7 +317,8 @@ sub push_commit 'message' => $c->{'message'}, 'commit' => $c->{'commit'}, 'commits' => [], - 'timestamp' => $ts }; + 'timestamp' => $ts + }; push @{ $all_commits{$ht} }, $cc; } @@ -324,11 +327,13 @@ sub push_commit 'branch' => $c->{'branch'}, 'commit' => $c->{'commit'}, 'date' => $c->{'date'}, - 'last_tag' => $c->{'last_tag'} }; + 'last_tag' => $c->{'last_tag'} + }; push @{ $cc->{'commits'} }, $smallc; push @{ $all_commits_by_branch{ $c->{'branch'} } }, $cc; $cc->{'branch_position'}{ $c->{'branch'} } = -1 + @{ $all_commits_by_branch{ $c->{'branch'} } }; + return; } sub hash_commit @@ -341,7 +346,7 @@ sub parse_datetime { my ($dt) = @_; $dt =~ -/^(\d\d\d\d)-(\d\d)-(\d\d)\s+(\d\d):(\d\d):(\d\d)\s+([-+])(\d\d)(\d\d)$/; + /^(\d\d\d\d)-(\d\d)-(\d\d)\s+(\d\d):(\d\d):(\d\d)\s+([-+])(\d\d)(\d\d)$/; my $gm = Time::Local::timegm($6, $5, $4, $3, $2 - 1, $1); my $tzoffset = ($8 * 60 + $9) * 60; $tzoffset = -$tzoffset if $7 eq '-'; @@ -351,6 +356,7 @@ sub parse_datetime sub output_str { ($oldest_first) ? ($output_line .= sprintf(shift, @_)) : printf(@_); + return; } sub output_details @@ -391,6 +397,7 @@ sub output_details } } output_str("\n"); + return; } sub usage diff --git a/src/tools/ifaddrs/Makefile b/src/tools/ifaddrs/Makefile index a5153207fe..9e0e8945f8 100644 --- a/src/tools/ifaddrs/Makefile +++ b/src/tools/ifaddrs/Makefile @@ -2,7 +2,7 @@ # # Makefile for src/tools/ifaddrs # -# Copyright (c) 2003-2017, PostgreSQL Global Development Group +# Copyright (c) 2003-2018, PostgreSQL Global Development Group # # src/tools/ifaddrs/Makefile # diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm index 18bded431f..7e1c9ac848 100644 --- a/src/tools/msvc/Install.pm +++ b/src/tools/msvc/Install.pm @@ -37,9 +37,10 @@ sub lcopy unlink $target || confess "Could not delete $target\n"; } - copy($src, $target) + (my $retval = copy($src, $target)) || confess "Could not copy $src to $target\n"; + return $retval; } sub Install @@ -95,13 +96,14 @@ sub Install my @top_dir = ("src"); @top_dir = ("src\\bin", "src\\interfaces") if ($insttype eq "client"); File::Find::find( - { wanted => sub { + { + wanted => sub { /^.*\.sample\z/s && push(@$sample_files, $File::Find::name); # Don't find files of in-tree temporary installations. $_ eq 'share' and $File::Find::prune = 1; - } + } }, @top_dir); CopySetOfFiles('config files', $sample_files, $target . '/share/'); @@ -134,6 +136,9 @@ sub Install CopyFiles( 'Information schema data', $target . '/share/', 'src/backend/catalog/', 'sql_features.txt'); + CopyFiles( + 'Error code data', $target . '/share/', + 'src/backend/utils/', 'errcodes.txt'); GenerateConversionScript($target); GenerateTimezoneFiles($target, $conf); GenerateTsearchFiles($target); @@ -152,13 +157,14 @@ sub Install push @pldirs, "src/pl/plpython" if $config->{python}; push @pldirs, "src/pl/tcl" if $config->{tcl}; File::Find::find( - { wanted => sub { + { + wanted => sub { /^(.*--.*\.sql|.*\.control)\z/s && push(@$pl_extension_files, $File::Find::name); # Don't find files of in-tree temporary installations. $_ eq 'share' and $File::Find::prune = 1; - } + } }, @pldirs); CopySetOfFiles('PL Extension files', @@ -168,6 +174,7 @@ sub Install GenerateNLSFiles($target, $config->{nls}, $majorver) if ($config->{nls}); print "Installation complete.\n"; + return; } sub EnsureDirectories @@ -178,6 +185,7 @@ sub EnsureDirectories { mkdir $target . '/' . $d unless -d ($target . '/' . $d); } + return; } sub CopyFiles @@ -192,9 +200,10 @@ sub CopyFiles print "."; $f = $basedir . $f; die "No file $f\n" if (!-f $f); - lcopy($f, $target . basename($f)); + lcopy($f, $target . basename($f)) || croak "Could not copy $f: $!\n"; } print "\n"; + return; } sub CopySetOfFiles @@ -210,6 +219,7 @@ sub CopySetOfFiles lcopy($_, $tgt) || croak "Could not copy $_: $!\n"; } print "\n"; + return; } sub CopySolutionOutput @@ -335,6 +345,7 @@ sub CopySolutionOutput print "."; } print "\n"; + return; } sub GenerateConversionScript @@ -358,20 +369,21 @@ sub GenerateConversionScript my $obj = shift @pieces; $sql .= "-- $se --> $de\n"; $sql .= -"CREATE OR REPLACE FUNCTION $func (INTEGER, INTEGER, CSTRING, INTERNAL, INTEGER) RETURNS VOID AS '\$libdir/$obj', '$func' LANGUAGE C STRICT;\n"; + "CREATE OR REPLACE FUNCTION $func (INTEGER, INTEGER, CSTRING, INTERNAL, INTEGER) RETURNS VOID AS '\$libdir/$obj', '$func' LANGUAGE C STRICT;\n"; $sql .= -"COMMENT ON FUNCTION $func(INTEGER, INTEGER, CSTRING, INTERNAL, INTEGER) IS 'internal conversion function for $se to $de';\n"; + "COMMENT ON FUNCTION $func(INTEGER, INTEGER, CSTRING, INTERNAL, INTEGER) IS 'internal conversion function for $se to $de';\n"; $sql .= "DROP CONVERSION pg_catalog.$name;\n"; $sql .= -"CREATE DEFAULT CONVERSION pg_catalog.$name FOR '$se' TO '$de' FROM $func;\n"; + "CREATE DEFAULT CONVERSION pg_catalog.$name FOR '$se' TO '$de' FROM $func;\n"; $sql .= -"COMMENT ON CONVERSION pg_catalog.$name IS 'conversion for $se to $de';\n\n"; + "COMMENT ON CONVERSION pg_catalog.$name IS 'conversion for $se to $de';\n\n"; } open($F, '>', "$target/share/conversion_create.sql") || die "Could not write to conversion_create.sql\n"; print $F $sql; close($F); print "\n"; + return; } sub GenerateTimezoneFiles @@ -381,8 +393,8 @@ sub GenerateTimezoneFiles my $mf = read_file("src/timezone/Makefile"); $mf =~ s{\\\r?\n}{}g; - $mf =~ /^TZDATA\s*:?=\s*(.*)$/m - || die "Could not find TZDATA line in timezone makefile\n"; + $mf =~ /^TZDATAFILES\s*:?=\s*(.*)$/m + || die "Could not find TZDATAFILES line in timezone makefile\n"; my @tzfiles = split /\s+/, $1; $mf =~ /^POSIXRULES\s*:?=\s*(.*)$/m @@ -397,11 +409,13 @@ sub GenerateTimezoneFiles foreach (@tzfiles) { my $tzfile = $_; - push(@args, "src/timezone/data/$tzfile"); + $tzfile =~ s|\$\(srcdir\)|src/timezone|; + push(@args, $tzfile); } system(@args); print "\n"; + return; } sub GenerateTsearchFiles @@ -443,6 +457,7 @@ sub GenerateTsearchFiles } close($F); print "\n"; + return; } sub CopyContribFiles @@ -457,20 +472,19 @@ sub CopyContribFiles opendir($D, $subdir) || croak "Could not opendir on $subdir!\n"; while (my $d = readdir($D)) { - # These configuration-based exclusions must match vcregress.pl - next if ($d eq "uuid-ossp" && !defined($config->{uuid})); - next if ($d eq "sslinfo" && !defined($config->{openssl})); - next if ($d eq "xml2" && !defined($config->{xml})); - next if ($d eq "hstore_plperl" && !defined($config->{perl})); - next if ($d eq "hstore_plpython" && !defined($config->{python})); - next if ($d eq "ltree_plpython" && !defined($config->{python})); + next if ($d eq "uuid-ossp" && !defined($config->{uuid})); + next if ($d eq "sslinfo" && !defined($config->{openssl})); + next if ($d eq "xml2" && !defined($config->{xml})); + next if ($d =~ /_plperl$/ && !defined($config->{perl})); + next if ($d =~ /_plpython$/ && !defined($config->{python})); next if ($d eq "sepgsql"); CopySubdirFiles($subdir, $d, $config, $target); } } print "\n"; + return; } sub CopySubdirFiles @@ -540,6 +554,49 @@ sub CopySubdirFiles } } + { + $flist = ''; + if ($mf =~ /^HEADERS\s*=\s*(.*)$/m) { $flist .= $1 } + my @modlist = (); + my %fmodlist = (); + while ($mf =~ /^HEADERS_([^\s=]+)\s*=\s*(.*)$/mg) { $fmodlist{$1} .= $2 } + + if ($mf =~ /^MODULE_big\s*=\s*(.*)$/m) + { + push @modlist, $1; + if ($flist ne '') + { + $fmodlist{$1} = $flist; + $flist = ''; + } + } + elsif ($mf =~ /^MODULES\s*=\s*(.*)$/m) + { + push @modlist, split /\s+/, $1; + } + + croak "HEADERS requires MODULE_big in $subdir $module" + if $flist ne ''; + + foreach my $mod (keys %fmodlist) + { + croak "HEADERS_$mod for unknown module in $subdir $module" + unless grep { $_ eq $mod } @modlist; + $flist = ParseAndCleanRule($fmodlist{$mod}, $mf); + EnsureDirectories($target, + "include", "include/server", + "include/server/$moduledir", + "include/server/$moduledir/$mod"); + foreach my $f (split /\s+/, $flist) + { + lcopy("$subdir/$module/$f", + "$target/include/server/$moduledir/$mod/" . basename($f)) + || croak("Could not copy file $f in $subdir $module"); + print '.'; + } + } + } + $flist = ''; if ($mf =~ /^DOCS\s*=\s*(.*)$/mg) { $flist .= $1 } if ($flist ne '') @@ -548,7 +605,7 @@ sub CopySubdirFiles # Special case for contrib/spi $flist = -"autoinc.example insert_username.example moddatetime.example refint.example timetravel.example" + "autoinc.example insert_username.example moddatetime.example refint.example" if ($module eq 'spi'); foreach my $f (split /\s+/, $flist) { @@ -557,6 +614,7 @@ sub CopySubdirFiles print '.'; } } + return; } sub ParseAndCleanRule @@ -596,7 +654,7 @@ sub CopyIncludeFiles 'Public headers', $target . '/include/', 'src/include/', 'postgres_ext.h', 'pg_config.h', 'pg_config_ext.h', - 'pg_config_os.h', 'dynloader.h', + 'pg_config_os.h', 'pg_config_manual.h'); lcopy('src/include/libpq/libpq-fs.h', $target . '/include/libpq/') || croak 'Could not copy libpq-fs.h'; @@ -620,8 +678,7 @@ sub CopyIncludeFiles CopyFiles( 'Server headers', $target . '/include/server/', - 'src/include/', 'pg_config.h', 'pg_config_ext.h', 'pg_config_os.h', - 'dynloader.h'); + 'src/include/', 'pg_config.h', 'pg_config_ext.h', 'pg_config_os.h'); CopyFiles( 'Grammar header', $target . '/include/server/parser/', @@ -672,6 +729,7 @@ sub CopyIncludeFiles $target . '/include/informix/esql/', 'src/interfaces/ecpg/include/', split /\s+/, $1); + return; } sub GenerateNLSFiles @@ -684,10 +742,11 @@ sub GenerateNLSFiles EnsureDirectories($target, "share/locale"); my @flist; File::Find::find( - { wanted => sub { + { + wanted => sub { /^nls\.mk\z/s && !push(@flist, $File::Find::name); - } + } }, "src"); foreach (@flist) @@ -707,13 +766,14 @@ sub GenerateNLSFiles my @args = ( "$nlspath\\bin\\msgfmt", '-o', -"$target\\share\\locale\\$lang\\LC_MESSAGES\\$prgm-$majorver.mo", + "$target\\share\\locale\\$lang\\LC_MESSAGES\\$prgm-$majorver.mo", $_); system(@args) && croak("Could not run msgfmt on $dir\\$_"); print "."; } } print "\n"; + return; } sub DetermineMajorVersion diff --git a/src/tools/msvc/MSBuildProject.pm b/src/tools/msvc/MSBuildProject.pm index 27329f9e36..149213378c 100644 --- a/src/tools/msvc/MSBuildProject.pm +++ b/src/tools/msvc/MSBuildProject.pm @@ -1,7 +1,7 @@ package MSBuildProject; # -# Package that encapsulates a MSBuild project file (Visual C++ 2010 or greater) +# Package that encapsulates a MSBuild project file (Visual C++ 2013 or greater) # # src/tools/msvc/MSBuildProject.pm # @@ -11,6 +11,8 @@ use strict; use warnings; use base qw(Project); +no warnings qw(redefine); ## no critic + sub _new { my $classname = shift; @@ -65,17 +67,22 @@ EOF $self->WriteItemDefinitionGroup( $f, 'Debug', - { defs => "_DEBUG;DEBUG=1", + { + defs => "_DEBUG;DEBUG=1", opt => 'Disabled', strpool => 'false', - runtime => 'MultiThreadedDebugDLL' }); + runtime => 'MultiThreadedDebugDLL' + }); $self->WriteItemDefinitionGroup( $f, 'Release', - { defs => "", + { + defs => "", opt => 'Full', strpool => 'true', - runtime => 'MultiThreadedDLL' }); + runtime => 'MultiThreadedDLL' + }); + return; } sub AddDefine @@ -83,6 +90,7 @@ sub AddDefine my ($self, $def) = @_; $self->{defines} .= $def . ';'; + return; } sub WriteReferences @@ -108,6 +116,7 @@ EOF EOF } + return; } sub WriteFiles @@ -170,7 +179,7 @@ EOF if ($grammarFile =~ /\.y$/) { $outputFile =~ -s{^src\\pl\\plpgsql\\src\\gram.c$}{src\\pl\\plpgsql\\src\\pl_gram.c}; + s{^src\\pl\\plpgsql\\src\\gram.c$}{src\\pl\\plpgsql\\src\\pl_gram.c}; print $f < Running bison on $grammarFile @@ -219,6 +228,7 @@ EOF EOF } + return; } sub WriteConfigurationHeader @@ -230,6 +240,7 @@ sub WriteConfigurationHeader $self->{platform} EOF + return; } sub WriteConfigurationPropertyGroup @@ -246,8 +257,10 @@ sub WriteConfigurationPropertyGroup false MultiByte $p->{wholeopt} + $self->{PlatformToolset} EOF + return; } sub WritePropertySheetsPropertyGroup @@ -258,6 +271,7 @@ sub WritePropertySheetsPropertyGroup EOF + return; } sub WriteAdditionalProperties @@ -268,6 +282,7 @@ sub WriteAdditionalProperties .\\$cfgname\\$self->{name}\\ false EOF + return; } sub WriteItemDefinitionGroup @@ -320,13 +335,15 @@ sub WriteItemDefinitionGroup false .\\$cfgname\\$self->{name}\\$self->{name}.map false + + Console $targetmachine EOF if ($self->{disablelinkerwarnings}) { print $f -" /ignore:$self->{disablelinkerwarnings} \%(AdditionalOptions)\n"; + " /ignore:$self->{disablelinkerwarnings} \%(AdditionalOptions)\n"; } if ($self->{implib}) { @@ -358,6 +375,7 @@ EOF print $f < EOF + return; } sub Footer @@ -371,81 +389,45 @@ sub Footer EOF + return; } -package VC2010Project; +package VC2013Project; # -# Package that encapsulates a Visual C++ 2010 project file +# Package that encapsulates a Visual C++ 2013 project file # use strict; use warnings; use base qw(MSBuildProject); +no warnings qw(redefine); ## no critic + sub new { my $classname = shift; my $self = $classname->SUPER::_new(@_); bless($self, $classname); - $self->{vcver} = '10.00'; + $self->{vcver} = '12.00'; + $self->{PlatformToolset} = 'v120'; + $self->{ToolsVersion} = '12.0'; return $self; } -package VC2012Project; +package VC2015Project; # -# Package that encapsulates a Visual C++ 2012 project file +# Package that encapsulates a Visual C++ 2015 project file # use strict; use warnings; use base qw(MSBuildProject); -sub new -{ - my $classname = shift; - my $self = $classname->SUPER::_new(@_); - bless($self, $classname); - - $self->{vcver} = '11.00'; - $self->{PlatformToolset} = 'v110'; - - return $self; -} - -# This override adds the element -# to the PropertyGroup labeled "Configuration" -sub WriteConfigurationPropertyGroup -{ - my ($self, $f, $cfgname, $p) = @_; - my $cfgtype = - ($self->{type} eq "exe") - ? 'Application' - : ($self->{type} eq "dll" ? 'DynamicLibrary' : 'StaticLibrary'); - - print $f < - $cfgtype - false - MultiByte - $p->{wholeopt} - $self->{PlatformToolset} - -EOF -} - -package VC2013Project; - -# -# Package that encapsulates a Visual C++ 2013 project file -# - -use strict; -use warnings; -use base qw(VC2012Project); +no warnings qw(redefine); ## no critic sub new { @@ -453,22 +435,24 @@ sub new my $self = $classname->SUPER::_new(@_); bless($self, $classname); - $self->{vcver} = '12.00'; - $self->{PlatformToolset} = 'v120'; - $self->{ToolsVersion} = '12.0'; + $self->{vcver} = '14.00'; + $self->{PlatformToolset} = 'v140'; + $self->{ToolsVersion} = '14.0'; return $self; } -package VC2015Project; +package VC2017Project; # -# Package that encapsulates a Visual C++ 2015 project file +# Package that encapsulates a Visual C++ 2017 project file # use strict; use warnings; -use base qw(VC2012Project); +use base qw(MSBuildProject); + +no warnings qw(redefine); ## no critic sub new { @@ -476,9 +460,9 @@ sub new my $self = $classname->SUPER::_new(@_); bless($self, $classname); - $self->{vcver} = '14.00'; - $self->{PlatformToolset} = 'v140'; - $self->{ToolsVersion} = '14.0'; + $self->{vcver} = '15.00'; + $self->{PlatformToolset} = 'v141'; + $self->{ToolsVersion} = '15.0'; return $self; } diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index 159e79ee7d..b562044fa7 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -28,6 +28,7 @@ my $libpgcommon; my $libpgfeutils; my $postgres; my $libpq; +my @unlink_on_exit; # Set of variables for modules in contrib/ and src/test/modules/ my $contrib_defines = { 'refint' => 'REFINT_VERBOSE' }; @@ -38,10 +39,12 @@ my $contrib_extralibs = undef; my $contrib_extraincludes = { 'dblink' => ['src/backend'] }; my $contrib_extrasource = { 'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ], - 'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], }; + 'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], +}; my @contrib_excludes = ( 'commit_ts', 'hstore_plperl', 'hstore_plpython', 'intagg', + 'jsonb_plperl', 'jsonb_plpython', 'ltree_plpython', 'pgcrypto', 'sepgsql', 'brin', 'test_extensions', 'test_pg_dump', @@ -62,14 +65,17 @@ my $frontend_extralibs = { 'initdb' => ['ws2_32.lib'], 'pg_restore' => ['ws2_32.lib'], 'pgbench' => ['ws2_32.lib'], - 'psql' => ['ws2_32.lib'] }; + 'psql' => ['ws2_32.lib'] +}; my $frontend_extraincludes = { 'initdb' => ['src/timezone'], - 'psql' => ['src/backend'] }; + 'psql' => ['src/backend'] +}; my $frontend_extrasource = { 'psql' => ['src/bin/psql/psqlscanslash.l'], 'pgbench' => - [ 'src/bin/pgbench/exprscan.l', 'src/bin/pgbench/exprparse.y' ] }; + [ 'src/bin/pgbench/exprscan.l', 'src/bin/pgbench/exprparse.y' ] +}; my @frontend_excludes = ( 'pgevent', 'pg_basebackup', 'pg_rewind', 'pg_dump', 'pg_waldump', 'scripts'); @@ -90,16 +96,18 @@ sub mkvcbuild chklocale.c crypt.c fls.c fseeko.c getrusage.c inet_aton.c random.c srandom.c getaddrinfo.c gettimeofday.c inet_net_ntop.c kill.c open.c erand48.c snprintf.c strlcat.c strlcpy.c dirmod.c noblock.c path.c + dirent.c dlopen.c getopt.c getopt_long.c + pread.c pwrite.c pg_strong_random.c pgcheckdir.c pgmkdirp.c pgsleep.c pgstrcasecmp.c pqsignal.c mkdtemp.c qsort.c qsort_arg.c quotes.c system.c - sprompt.c tar.c thread.c getopt.c getopt_long.c dirent.c + sprompt.c strerror.c tar.c thread.c win32env.c win32error.c win32security.c win32setlocale.c); push(@pgportfiles, 'rint.c') if ($vsVersion < '12.00'); if ($vsVersion >= '9.00') { - push(@pgportfiles, 'pg_crc32c_choose.c'); + push(@pgportfiles, 'pg_crc32c_sse42_choose.c'); push(@pgportfiles, 'pg_crc32c_sse42.c'); push(@pgportfiles, 'pg_crc32c_sb8.c'); } @@ -109,8 +117,9 @@ sub mkvcbuild } our @pgcommonallfiles = qw( - base64.c config_info.c controldata_utils.c exec.c ip.c keywords.c - md5.c pg_lzcompress.c pgfnames.c psprintf.c relpath.c rmtree.c + base64.c config_info.c controldata_utils.c exec.c file_perm.c ip.c + keywords.c link-canary.c md5.c + pg_lzcompress.c pgfnames.c psprintf.c relpath.c rmtree.c saslprep.c scram-common.c string.c unicode_norm.c username.c wait_error.c); @@ -130,7 +139,7 @@ sub mkvcbuild our @pgcommonbkndfiles = @pgcommonallfiles; our @pgfeutilsfiles = qw( - mbprint.c print.c psqlscan.l psqlscan.c simple_list.c string_utils.c); + conditional.c mbprint.c print.c psqlscan.l psqlscan.c simple_list.c string_utils.c); $libpgport = $solution->AddProject('libpgport', 'lib', 'misc'); $libpgport->AddDefine('FRONTEND'); @@ -149,9 +158,6 @@ sub mkvcbuild $postgres->AddIncludeDir('src/backend'); $postgres->AddDir('src/backend/port/win32'); $postgres->AddFile('src/backend/utils/fmgrtab.c'); - $postgres->ReplaceFile( - 'src/backend/port/dynloader.c', - 'src/backend/port/dynloader/win32.c'); $postgres->ReplaceFile('src/backend/port/pg_sema.c', 'src/backend/port/win32_sema.c'); $postgres->ReplaceFile('src/backend/port/pg_shmem.c', @@ -177,10 +183,11 @@ sub mkvcbuild $postgres->AddLibrary('wldap32.lib') if ($solution->{options}->{ldap}); $postgres->FullExportDLL('postgres.lib'); - # The OBJS scraper doesn't know about ifdefs, so remove be-secure-openssl.c - # if building without OpenSSL + # The OBJS scraper doesn't know about ifdefs, so remove appropriate files + # if building without OpenSSL. if (!$solution->{options}->{openssl}) { + $postgres->RemoveFile('src/backend/libpq/be-secure-common.c'); $postgres->RemoveFile('src/backend/libpq/be-secure-openssl.c'); } @@ -234,19 +241,14 @@ sub mkvcbuild $libpq->UseDef('src/interfaces/libpq/libpqdll.def'); $libpq->ReplaceFile('src/interfaces/libpq/libpqrc.c', 'src/interfaces/libpq/libpq.rc'); - $libpq->AddReference($libpgport); + $libpq->AddReference($libpgcommon, $libpgport); - # The OBJS scraper doesn't know about ifdefs, so remove fe-secure-openssl.c - # and sha2_openssl.c if building without OpenSSL, and remove sha2.c if - # building with OpenSSL. + # The OBJS scraper doesn't know about ifdefs, so remove appropriate files + # if building without OpenSSL. if (!$solution->{options}->{openssl}) { + $libpq->RemoveFile('src/interfaces/libpq/fe-secure-common.c'); $libpq->RemoveFile('src/interfaces/libpq/fe-secure-openssl.c'); - $libpq->RemoveFile('src/common/sha2_openssl.c'); - } - else - { - $libpq->RemoveFile('src/common/sha2.c'); } my $libpqwalreceiver = @@ -263,7 +265,7 @@ sub mkvcbuild 'libpgtypes', 'dll', 'interfaces', 'src/interfaces/ecpg/pgtypeslib'); $pgtypes->AddDefine('FRONTEND'); - $pgtypes->AddReference($libpgport); + $pgtypes->AddReference($libpgcommon, $libpgport); $pgtypes->UseDef('src/interfaces/ecpg/pgtypeslib/pgtypeslib.def'); $pgtypes->AddIncludeDir('src/interfaces/ecpg/include'); @@ -500,13 +502,18 @@ sub mkvcbuild my $hstore_plpython = AddTransformModule( 'hstore_plpython' . $pymajorver, 'contrib/hstore_plpython', 'plpython' . $pymajorver, 'src/pl/plpython', - 'hstore', 'contrib/hstore'); + 'hstore', 'contrib'); $hstore_plpython->AddDefine( 'PLPYTHON_LIBNAME="plpython' . $pymajorver . '"'); + my $jsonb_plpython = AddTransformModule( + 'jsonb_plpython' . $pymajorver, 'contrib/jsonb_plpython', + 'plpython' . $pymajorver, 'src/pl/plpython'); + $jsonb_plpython->AddDefine( + 'PLPYTHON_LIBNAME="plpython' . $pymajorver . '"'); my $ltree_plpython = AddTransformModule( 'ltree_plpython' . $pymajorver, 'contrib/ltree_plpython', 'plpython' . $pymajorver, 'src/pl/plpython', - 'ltree', 'contrib/ltree'); + 'ltree', 'contrib'); $ltree_plpython->AddDefine( 'PLPYTHON_LIBNAME="plpython' . $pymajorver . '"'); } @@ -517,22 +524,157 @@ sub mkvcbuild my $plperl = $solution->AddProject('plperl', 'dll', 'PLs', 'src/pl/plperl'); $plperl->AddIncludeDir($solution->{options}->{perl} . '/lib/CORE'); + $plperl->AddReference($postgres); + + my $perl_path = $solution->{options}->{perl} . '\lib\CORE\*perl*'; + + # ActivePerl 5.16 provided perl516.lib; 5.18 provided libperl518.a + # Starting with ActivePerl 5.24, both perlnn.lib and libperlnn.a are provided. + # In this case, prefer .lib. + my @perl_libs = + reverse sort grep { /perl\d+\.lib$|libperl\d+\.a$/ } + glob($perl_path); + if (@perl_libs > 0) + { + $plperl->AddLibrary($perl_libs[0]); + } + else + { + die + "could not identify perl library version matching pattern $perl_path\n"; + } # Add defines from Perl's ccflags; see PGAC_CHECK_PERL_EMBED_CCFLAGS my @perl_embed_ccflags; foreach my $f (split(" ", $Config{ccflags})) { - if ( $f =~ /^-D[^_]/ - || $f =~ /^-D_USE_32BIT_TIME_T/) + if ($f =~ /^-D[^_]/) { $f =~ s/\-D//; push(@perl_embed_ccflags, $f); } } - # Also, a hack to prevent duplicate definitions of uid_t/gid_t + # hack to prevent duplicate definitions of uid_t/gid_t push(@perl_embed_ccflags, 'PLPERL_HAVE_UID_GID'); + # Windows offers several 32-bit ABIs. Perl is sensitive to + # sizeof(time_t), one of the ABI dimensions. To get 32-bit time_t, + # use "cl -D_USE_32BIT_TIME_T" or plain "gcc". For 64-bit time_t, use + # "gcc -D__MINGW_USE_VC2005_COMPAT" or plain "cl". Before MSVC 2005, + # plain "cl" chose 32-bit time_t. PostgreSQL doesn't support building + # with pre-MSVC-2005 compilers, but it does support linking to Perl + # built with such a compiler. MSVC-built Perl 5.13.4 and later report + # -D_USE_32BIT_TIME_T in $Config{ccflags} if applicable, but + # MinGW-built Perl never reports -D_USE_32BIT_TIME_T despite typically + # needing it. Ignore the $Config{ccflags} opinion about + # -D_USE_32BIT_TIME_T, and use a runtime test to deduce the ABI Perl + # expects. Specifically, test use of PL_modglobal, which maps to a + # PerlInterpreter field whose position depends on sizeof(time_t). + if ($solution->{platform} eq 'Win32') + { + my $source_file = 'conftest.c'; + my $obj = 'conftest.obj'; + my $exe = 'conftest.exe'; + my @conftest = ($source_file, $obj, $exe); + push @unlink_on_exit, @conftest; + unlink $source_file; + open my $o, '>', $source_file + || croak "Could not write to $source_file"; + print $o ' + /* compare to plperl.h */ + #define __inline__ __inline + #define PERL_NO_GET_CONTEXT + #include + #include + + int + main(int argc, char **argv) + { + int dummy_argc = 1; + char *dummy_argv[1] = {""}; + char *dummy_env[1] = {NULL}; + static PerlInterpreter *interp; + + PERL_SYS_INIT3(&dummy_argc, (char ***) &dummy_argv, + (char ***) &dummy_env); + interp = perl_alloc(); + perl_construct(interp); + { + dTHX; + const char key[] = "dummy"; + + PL_exit_flags |= PERL_EXIT_DESTRUCT_END; + hv_store(PL_modglobal, key, sizeof(key) - 1, newSViv(1), 0); + return hv_fetch(PL_modglobal, key, sizeof(key) - 1, 0) == NULL; + } + } +'; + close $o; + + # Build $source_file with a given #define, and return a true value + # if a run of the resulting binary exits successfully. + my $try_define = sub { + my $define = shift; + + unlink $obj, $exe; + my @cmd = ( + 'cl', + '-I' . $solution->{options}->{perl} . '/lib/CORE', + (map { "-D$_" } @perl_embed_ccflags, $define || ()), + $source_file, + '/link', + $perl_libs[0]); + my $compile_output = `@cmd 2>&1`; + -f $exe || die "Failed to build Perl test:\n$compile_output"; + + { + + # Some builds exhibit runtime failure through Perl warning + # 'Can't spawn "conftest.exe"'; suppress that. + no warnings; + + # Disable error dialog boxes like we do in the postmaster. + # Here, we run code that triggers relevant errors. + use Win32API::File qw(SetErrorMode :SEM_); + my $oldmode = SetErrorMode( + SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); + system(".\\$exe"); + SetErrorMode($oldmode); + } + + return !($? >> 8); + }; + + my $define_32bit_time = '_USE_32BIT_TIME_T'; + my $ok_now = $try_define->(undef); + my $ok_32bit = $try_define->($define_32bit_time); + unlink @conftest; + if (!$ok_now && !$ok_32bit) + { + + # Unsupported configuration. Since we used %Config from the + # Perl running the build scripts, this is expected if + # attempting to link with some other Perl. + die "Perl test fails with or without -D$define_32bit_time"; + } + elsif ($ok_now && $ok_32bit) + { + + # Resulting build may work, but it's especially important to + # verify with "vcregress plcheck". A refined test may avoid + # this outcome. + warn "Perl test passes with or without -D$define_32bit_time"; + } + elsif ($ok_32bit) + { + push(@perl_embed_ccflags, $define_32bit_time); + } # else $ok_now, hence no flag required + } + + print "CFLAGS recommended by Perl: $Config{ccflags}\n"; + print "CFLAGS to compile embedded Perl: ", + (join ' ', map { "-D$_" } @perl_embed_ccflags), "\n"; foreach my $f (@perl_embed_ccflags) { $plperl->AddDefine($f); @@ -543,7 +685,7 @@ sub mkvcbuild (my $xsc = $xs) =~ s/\.xs/.c/; if (Solution::IsNewer("$plperlsrc$xsc", "$plperlsrc$xs")) { - my $xsubppdir = first { -e "$_/ExtUtils/xsubpp" } @INC; + my $xsubppdir = first { -e "$_/ExtUtils/xsubpp" } (@INC); print "Building $plperlsrc$xsc...\n"; system( $solution->{options}->{perl} . '/bin/perl ' @@ -602,29 +744,20 @@ sub mkvcbuild die 'Failed to create plperl_opmask.h' . "\n"; } } - $plperl->AddReference($postgres); - my $perl_path = $solution->{options}->{perl} . '\lib\CORE\perl*.lib'; - my @perl_libs = - grep { /perl\d+.lib$/ } glob($perl_path); - if (@perl_libs == 1) - { - $plperl->AddLibrary($perl_libs[0]); - } - else - { - die -"could not identify perl library version matching pattern $perl_path\n"; - } - # Add transform module dependent on plperl + # Add transform modules dependent on plperl my $hstore_plperl = AddTransformModule( 'hstore_plperl', 'contrib/hstore_plperl', 'plperl', 'src/pl/plperl', - 'hstore', 'contrib/hstore'); + 'hstore', 'contrib'); + my $jsonb_plperl = AddTransformModule( + 'jsonb_plperl', 'contrib/jsonb_plperl', + 'plperl', 'src/pl/plperl'); foreach my $f (@perl_embed_ccflags) { $hstore_plperl->AddDefine($f); + $jsonb_plperl->AddDefine($f); } } @@ -722,24 +855,27 @@ sub AddSimpleFrontend # Add a simple transform module sub AddTransformModule { - my $n = shift; - my $n_src = shift; - my $pl_proj_name = shift; - my $pl_src = shift; - my $transform_name = shift; - my $transform_src = shift; - - my $transform_proj = undef; - foreach my $proj (@{ $solution->{projects}->{'contrib'} }) + my $n = shift; + my $n_src = shift; + my $pl_proj_name = shift; + my $pl_src = shift; + my $type_name = shift; + my $type_src = shift; + + my $type_proj = undef; + if ($type_name) { - if ($proj->{name} eq $transform_name) + foreach my $proj (@{ $solution->{projects}->{'contrib'} }) { - $transform_proj = $proj; - last; + if ($proj->{name} eq $type_name) + { + $type_proj = $proj; + last; + } } + die "could not find base module $type_name for transform module $n" + if (!defined($type_proj)); } - die "could not find base module $transform_name for transform module $n" - if (!defined($transform_proj)); my $pl_proj = undef; foreach my $proj (@{ $solution->{projects}->{'PLs'} }) @@ -770,13 +906,16 @@ sub AddTransformModule } # Add base module dependencies - $p->AddIncludeDir($transform_src); - $p->AddIncludeDir($transform_proj->{includes}); - foreach my $trans_lib (@{ $transform_proj->{libraries} }) + if ($type_proj) { - $p->AddLibrary($trans_lib); + $p->AddIncludeDir($type_src); + $p->AddIncludeDir($type_proj->{includes}); + foreach my $type_lib (@{ $type_proj->{libraries} }) + { + $p->AddLibrary($type_lib); + } + $p->AddReference($type_proj); } - $p->AddReference($transform_proj); return $p; } @@ -819,6 +958,7 @@ sub AddContrib # Are there any output data files to build? GenerateContribSqlFiles($n, $mf); + return; } sub GenerateContribSqlFiles @@ -855,7 +995,7 @@ sub GenerateContribSqlFiles print "Building $out from $in (contrib/$n)...\n"; my $cont = Project::read_file("contrib/$n/$in"); my $dn = $out; - $dn =~ s/\.sql$//; + $dn =~ s/\.sql$//; $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; my $o; open($o, '>', "contrib/$n/$out") @@ -865,6 +1005,7 @@ sub GenerateContribSqlFiles } } } + return; } sub AdjustContribProj @@ -875,6 +1016,7 @@ sub AdjustContribProj \@contrib_uselibpq, \@contrib_uselibpgport, \@contrib_uselibpgcommon, $contrib_extralibs, $contrib_extrasource, $contrib_extraincludes); + return; } sub AdjustFrontendProj @@ -885,6 +1027,7 @@ sub AdjustFrontendProj \@frontend_uselibpq, \@frontend_uselibpgport, \@frontend_uselibpgcommon, $frontend_extralibs, $frontend_extrasource, $frontend_extraincludes); + return; } sub AdjustModule @@ -941,6 +1084,12 @@ sub AdjustModule $proj->AddFile($i); } } + return; +} + +END +{ + unlink @unlink_on_exit; } 1; diff --git a/src/tools/msvc/Project.pm b/src/tools/msvc/Project.pm index 9817b9439a..0d3554659b 100644 --- a/src/tools/msvc/Project.pm +++ b/src/tools/msvc/Project.pm @@ -16,7 +16,8 @@ sub _new my $good_types = { lib => 1, exe => 1, - dll => 1, }; + dll => 1, + }; confess("Bad project type: $type\n") unless exists $good_types->{$type}; my $self = { name => $name, @@ -32,7 +33,8 @@ sub _new solution => $solution, disablewarnings => '4018;4244;4273;4102;4090;4267', disablelinkerwarnings => '', - platform => $solution->{platform}, }; + platform => $solution->{platform}, + }; bless($self, $classname); return $self; @@ -43,6 +45,7 @@ sub AddFile my ($self, $filename) = @_; $self->{files}->{$filename} = 1; + return; } sub AddFiles @@ -54,6 +57,7 @@ sub AddFiles { $self->{files}->{ $dir . "/" . $f } = 1; } + return; } sub ReplaceFile @@ -108,6 +112,7 @@ sub RelocateFiles $self->AddFile($targetdir . '/' . basename($f)); } } + return; } sub AddReference @@ -120,6 +125,7 @@ sub AddReference $self->AddLibrary( "__CFGNAME__/" . $ref->{name} . "/" . $ref->{name} . ".lib"); } + return; } sub AddLibrary @@ -136,6 +142,7 @@ sub AddLibrary { push @{ $self->{suffixlib} }, $lib; } + return; } sub AddIncludeDir @@ -147,6 +154,7 @@ sub AddIncludeDir $self->{includes} .= ';'; } $self->{includes} .= $inc; + return; } sub AddPrefixInclude @@ -154,6 +162,7 @@ sub AddPrefixInclude my ($self, $inc) = @_; $self->{prefixincludes} = $inc . ';' . $self->{prefixincludes}; + return; } sub AddDefine @@ -162,6 +171,7 @@ sub AddDefine $def =~ s/"/""/g; $self->{defines} .= $def . ';'; + return; } sub FullExportDLL @@ -171,6 +181,7 @@ sub FullExportDLL $self->{builddef} = 1; $self->{def} = "./__CFGNAME__/$self->{name}/$self->{name}.def"; $self->{implib} = "__CFGNAME__/$self->{name}/$libname"; + return; } sub UseDef @@ -178,6 +189,7 @@ sub UseDef my ($self, $def) = @_; $self->{def} = $def; + return; } sub AddDir @@ -192,7 +204,7 @@ sub AddDir { next if $subdir eq "\$(top_builddir)/src/timezone" - ; #special case for non-standard include + ; #special case for non-standard include next if $reldir . "/" . $subdir eq "src/backend/port/darwin"; @@ -217,6 +229,7 @@ sub AddDir if ($filter eq "LIBOBJS") { + no warnings qw(once); if (grep(/$p/, @main::pgportfiles, @main::pgcommonfiles) == 1) { @@ -282,6 +295,7 @@ sub AddDir } $self->AddDirResourceFile($reldir); + return; } # If the directory's Makefile bears a description string, add a resource file. @@ -297,6 +311,7 @@ sub AddDirResourceFile if ($mf =~ /^PGAPPICON\s*=\s*(.*)$/m) { $ico = $1; } $self->AddResourceFile($reldir, $desc, $ico); } + return; } sub AddResourceFile @@ -330,6 +345,7 @@ sub AddResourceFile close($i); } $self->AddFile("$dir/win32ver.rc"); + return; } sub DisableLinkerWarnings @@ -339,21 +355,22 @@ sub DisableLinkerWarnings $self->{disablelinkerwarnings} .= ',' unless ($self->{disablelinkerwarnings} eq ''); $self->{disablelinkerwarnings} .= $warnings; + return; } sub Save { my ($self) = @_; -# If doing DLL and haven't specified a DEF file, do a full export of all symbols -# in the project. + # If doing DLL and haven't specified a DEF file, do a full export of all symbols + # in the project. if ($self->{type} eq "dll" && !$self->{def}) { $self->FullExportDLL($self->{name} . ".lib"); } -# Warning 4197 is about double exporting, disable this per -# http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=99193 + # Warning 4197 is about double exporting, disable this per + # http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=99193 $self->DisableLinkerWarnings('4197') if ($self->{platform} eq 'x64'); # Dump the project @@ -364,6 +381,7 @@ sub Save $self->WriteFiles($f); $self->Footer($f); close($f); + return; } sub GetAdditionalLinkerDependencies diff --git a/src/tools/msvc/README b/src/tools/msvc/README index b61ddb8791..4ab81d3402 100644 --- a/src/tools/msvc/README +++ b/src/tools/msvc/README @@ -4,7 +4,7 @@ MSVC build ========== This directory contains the tools required to build PostgreSQL using -Microsoft Visual Studio 2005 - 2011. This builds the whole backend, not just +Microsoft Visual Studio 2013 - 2017. This builds the whole backend, not just the libpq frontend library. For more information, see the documentation chapter "Installation on Windows" and the description below. @@ -47,7 +47,6 @@ arguments. - User tools - build.pl tool to build the binaries -builddoc.pl tool to build the docs clean.bat batch file for cleaning up generated files install.pl tool to install the generated files mkvcbuild.pl tool to generate the Visual Studio build files @@ -68,14 +67,12 @@ Install.pm module containing the install logic Mkvcbuild.pm module containing the code to generate the Visual Studio build (project/solution) files MSBuildProject.pm module containing the code to generate MSBuild based - project files (Visual Studio 2010 or greater) + project files (Visual Studio 2013 or greater) Project.pm module containing the common code to generate the Visual Studio project files. Also provides the common interface of all project file generators Solution.pm module containing the code to generate the Visual Studio solution files. -VCBuildProject.pm module containing the code to generate VCBuild based - project files (Visual Studio 2005/2008) VSObjectFactory.pm factory module providing the code to create the appropriate project/solution files for the current environment @@ -91,13 +88,12 @@ config_default.pl to create the configuration arguments. These configuration arguments are passed over to Mkvcbuild::mkvcbuild (Mkvcbuild.pm) which creates the Visual Studio project and solution files. It does this by using VSObjectFactory::CreateSolution to create an object -implementing the Solution interface (this could be either a VS2005Solution, -a VS2008Solution, a VS2010Solution or a VS2012Solution, all in Solution.pm, -depending on the user's build environment) and adding objects implementing -the corresponding Project interface (VC2005Project or VC2008Project from -VCBuildProject.pm or VC2010Project or VC2012Project from MSBuildProject.pm) -to it. +implementing the Solution interface (this could be either a VS2013Solution, +or a VS2015Solution or a VS2017Solution, all in Solution.pm, depending on +the user's build environment) and adding objects implementing the corresponding +Project interface (VC2013Project or VC2015Project or VC2017Project from +MSBuildProject.pm) to it. When Solution::Save is called, the implementations of Solution and Project save their content in the appropriate format. -The final step of starting the appropriate build program (msbuild or vcbuild) -is performed in build.pl again. +The final step of starting the appropriate build program (msbuild) is +performed in build.pl again. diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm index 01e5846b63..68cf812f01 100644 --- a/src/tools/msvc/Solution.pm +++ b/src/tools/msvc/Solution.pm @@ -10,6 +10,8 @@ use strict; use warnings; use VSObjectFactory; +no warnings qw(redefine); ## no critic + sub _new { my $classname = shift; @@ -22,7 +24,8 @@ sub _new VisualStudioVersion => undef, MinimumVisualStudioVersion => undef, vcver => undef, - platform => undef, }; + platform => undef, + }; bless($self, $classname); $self->DeterminePlatform(); @@ -51,7 +54,7 @@ sub _new unless $options->{wal_blocksize}; # undef or 0 means default die "Bad wal_blocksize $options->{wal_blocksize}" unless grep { $_ == $options->{wal_blocksize} } - (1, 2, 4, 8, 16, 32, 64); + (1, 2, 4, 8, 16, 32, 64); $options->{wal_segsize} = 16 unless $options->{wal_segsize}; # undef or 0 means default die "Bad wal_segsize $options->{wal_segsize}" @@ -74,6 +77,7 @@ sub DeterminePlatform $? >> 8 == 0 or die "cl command not found"; $self->{platform} = ($output =~ /^\/favor:<.+AMD64/m) ? 'x64' : 'Win32'; print "Detected hardware platform: $self->{platform}\n"; + return; } # Return 1 if $oldfile is newer than $newfile, or if $newfile doesn't exist. @@ -81,6 +85,7 @@ sub DeterminePlatform sub IsNewer { my ($newfile, $oldfile) = @_; + -e $oldfile or warn "source file \"$oldfile\" does not exist"; if ( $oldfile ne 'src/tools/msvc/config.pl' && $oldfile ne 'src/tools/msvc/config_default.pl') { @@ -110,6 +115,7 @@ sub copyFile } close($i); close($o); + return; } sub GenerateFiles @@ -156,7 +162,7 @@ sub GenerateFiles { s{PG_VERSION "[^"]+"}{PG_VERSION "$self->{strver}$extraver"}; s{PG_VERSION_NUM \d+}{PG_VERSION_NUM $self->{numver}}; -s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, compiled by Visual C++ build " CppAsString2(_MSC_VER) ", $bits-bit"}; + s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, compiled by Visual C++ build " CppAsString2(_MSC_VER) ", $bits-bit"}; print $o $_; } print $o "#define PG_MAJORVERSION \"$self->{majorver}\"\n"; @@ -175,12 +181,9 @@ s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, c "\n"; print $o "#define RELSEG_SIZE ", (1024 / $self->{options}->{blocksize}) * - $self->{options}->{segsize} * - 1024, "\n"; + $self->{options}->{segsize} * 1024, "\n"; print $o "#define XLOG_BLCKSZ ", 1024 * $self->{options}->{wal_blocksize}, "\n"; - print $o "#define XLOG_SEG_SIZE (", $self->{options}->{wal_segsize}, - " * 1024 * 1024)\n"; if ($self->{options}->{float4byval}) { @@ -265,15 +268,24 @@ s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, c "src/interfaces/ecpg/pgtypeslib/exports.txt", "LIBPGTYPES"); - if (IsNewer( - 'src/backend/utils/fmgrtab.c', 'src/include/catalog/pg_proc.h')) + chdir('src/backend/utils'); + my $pg_language_dat = '../../../src/include/catalog/pg_language.dat'; + my $pg_proc_dat = '../../../src/include/catalog/pg_proc.dat'; + if ( IsNewer('fmgr-stamp', 'Gen_fmgrtab.pl') + || IsNewer('fmgr-stamp', '../catalog/Catalog.pm') + || IsNewer('fmgr-stamp', $pg_language_dat) + || IsNewer('fmgr-stamp', $pg_proc_dat) + || IsNewer('fmgr-stamp', '../../../src/include/access/transam.h')) { - print "Generating fmgrtab.c, fmgroids.h, fmgrprotos.h...\n"; - chdir('src/backend/utils'); system( -"perl -I ../catalog Gen_fmgrtab.pl ../../../src/include/catalog/pg_proc.h"); - chdir('../../..'); + "perl -I ../catalog Gen_fmgrtab.pl -I../../../src/include/ $pg_language_dat $pg_proc_dat" + ); + open(my $f, '>', 'fmgr-stamp') + || confess "Could not touch fmgr-stamp"; + close($f); } + chdir('../../..'); + if (IsNewer( 'src/include/utils/fmgroids.h', 'src/backend/utils/fmgroids.h')) @@ -309,29 +321,22 @@ s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, c 'src/include/storage/lwlocknames.h'); } - if (IsNewer( - 'src/include/dynloader.h', 'src/backend/port/dynloader/win32.h')) - { - copyFile('src/backend/port/dynloader/win32.h', - 'src/include/dynloader.h'); - } - if (IsNewer('src/include/utils/probes.h', 'src/backend/utils/probes.d')) { print "Generating probes.h...\n"; system( -'perl src/backend/utils/Gen_dummy_probes.pl src/backend/utils/probes.d > src/include/utils/probes.h' + 'perl src/backend/utils/Gen_dummy_probes.pl src/backend/utils/probes.d > src/include/utils/probes.h' ); } if ($self->{options}->{python} && IsNewer( 'src/pl/plpython/spiexceptions.h', - 'src/include/backend/errcodes.txt')) + 'src/backend/utils/errcodes.txt')) { print "Generating spiexceptions.h...\n"; system( -'perl src/pl/plpython/generate-spiexceptions.pl src/backend/utils/errcodes.txt > src/pl/plpython/spiexceptions.h' + 'perl src/pl/plpython/generate-spiexceptions.pl src/backend/utils/errcodes.txt > src/pl/plpython/spiexceptions.h' ); } @@ -341,7 +346,7 @@ s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, c { print "Generating errcodes.h...\n"; system( -'perl src/backend/utils/generate-errcodes.pl src/backend/utils/errcodes.txt > src/backend/utils/errcodes.h' + 'perl src/backend/utils/generate-errcodes.pl src/backend/utils/errcodes.txt > src/backend/utils/errcodes.h' ); copyFile('src/backend/utils/errcodes.h', 'src/include/utils/errcodes.h'); @@ -353,7 +358,7 @@ s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, c { print "Generating plerrcodes.h...\n"; system( -'perl src/pl/plpgsql/src/generate-plerrcodes.pl src/backend/utils/errcodes.txt > src/pl/plpgsql/src/plerrcodes.h' + 'perl src/pl/plpgsql/src/generate-plerrcodes.pl src/backend/utils/errcodes.txt > src/pl/plpgsql/src/plerrcodes.h' ); } @@ -363,7 +368,7 @@ s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, c { print "Generating pltclerrcodes.h...\n"; system( -'perl src/pl/tcl/generate-pltclerrcodes.pl src/backend/utils/errcodes.txt > src/pl/tcl/pltclerrcodes.h' + 'perl src/pl/tcl/generate-pltclerrcodes.pl src/backend/utils/errcodes.txt > src/pl/tcl/pltclerrcodes.h' ); } @@ -373,7 +378,7 @@ s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, c { print "Generating qsort_tuple.c...\n"; system( -'perl src/backend/utils/sort/gen_qsort_tuple.pl > src/backend/utils/sort/qsort_tuple.c' + 'perl src/backend/utils/sort/gen_qsort_tuple.pl > src/backend/utils/sort/qsort_tuple.c' ); } @@ -425,10 +430,11 @@ s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, c || confess "Could not open ecpg_config.h"; print $o < 1200) -#define HAVE_LONG_LONG_INT_64 +#define HAVE_LONG_LONG_INT 1 +#define HAVE_LONG_LONG_INT_64 1 +#endif #define ENABLE_THREAD_SAFETY 1 EOF - print $o "#endif\n"; close($o); } @@ -456,29 +462,66 @@ EOF my $mf = Project::read_file('src/backend/catalog/Makefile'); $mf =~ s{\\\r?\n}{}g; - $mf =~ /^POSTGRES_BKI_SRCS\s*:?=[^,]+,(.*)\)$/gm - || croak "Could not find POSTGRES_BKI_SRCS in Makefile\n"; - my @allbki = split /\s+/, $1; - foreach my $bki (@allbki) + $mf =~ /^CATALOG_HEADERS\s*:?=(.*)$/gm + || croak "Could not find CATALOG_HEADERS in Makefile\n"; + my @bki_srcs = split /\s+/, $1; + push @bki_srcs, 'toasting.h'; + push @bki_srcs, 'indexing.h'; + $mf =~ /^POSTGRES_BKI_DATA\s*:?=[^,]+,(.*)\)$/gm + || croak "Could not find POSTGRES_BKI_DATA in Makefile\n"; + my @bki_data = split /\s+/, $1; + + my $need_genbki = 0; + foreach my $bki (@bki_srcs, @bki_data) { next if $bki eq ""; if (IsNewer( - 'src/backend/catalog/postgres.bki', + 'src/backend/catalog/bki-stamp', "src/include/catalog/$bki")) { - print "Generating postgres.bki and schemapg.h...\n"; - chdir('src/backend/catalog'); - my $bki_srcs = join(' ../../../src/include/catalog/', @allbki); - system( -"perl genbki.pl -I../../../src/include/catalog --set-version=$self->{majorver} $bki_srcs" - ); - chdir('../../..'); - copyFile( - 'src/backend/catalog/schemapg.h', - 'src/include/catalog/schemapg.h'); + $need_genbki = 1; last; } } + $need_genbki = 1 + if IsNewer('src/backend/catalog/bki-stamp', + 'src/backend/catalog/genbki.pl'); + $need_genbki = 1 + if IsNewer('src/backend/catalog/bki-stamp', + 'src/backend/catalog/Catalog.pm'); + if ($need_genbki) + { + chdir('src/backend/catalog'); + my $bki_srcs = join(' ../../../src/include/catalog/', @bki_srcs); + system("perl genbki.pl --set-version=$self->{majorver} $bki_srcs"); + open(my $f, '>', 'bki-stamp') + || confess "Could not touch bki-stamp"; + close($f); + chdir('../../..'); + } + + if (IsNewer( + 'src/include/catalog/header-stamp', + 'src/backend/catalog/bki-stamp')) + { + # Copy generated headers to include directory. + opendir(my $dh, 'src/backend/catalog/') + || die "Can't opendir src/backend/catalog/ $!"; + my @def_headers = grep { /pg_\w+_d\.h$/ } readdir($dh); + closedir $dh; + foreach my $def_header (@def_headers) + { + copyFile( + "src/backend/catalog/$def_header", + "src/include/catalog/$def_header"); + } + copyFile( + 'src/backend/catalog/schemapg.h', + 'src/include/catalog/schemapg.h'); + open(my $chs, '>', 'src/include/catalog/header-stamp') + || confess "Could not touch header-stamp"; + close($chs); + } open(my $o, '>', "doc/src/sgml/version.sgml") || croak "Could not write to version.sgml\n"; @@ -487,6 +530,7 @@ EOF {majorver}"> EOF close($o); + return; } sub GenerateDefFile @@ -509,6 +553,7 @@ sub GenerateDefFile close($of); close($if); } + return; } sub AddProject @@ -536,10 +581,12 @@ sub AddProject } else { + # We don't expect the config-specific library to be here, + # so don't ask for it in last parameter $proj->AddLibrary( - $self->{options}->{openssl} . '\lib\ssleay32.lib', 1); + $self->{options}->{openssl} . '\lib\ssleay32.lib', 0); $proj->AddLibrary( - $self->{options}->{openssl} . '\lib\libeay32.lib', 1); + $self->{options}->{openssl} . '\lib\libeay32.lib', 0); } } if ($self->{options}->{nls}) @@ -679,6 +726,7 @@ EOF EndGlobal EOF close($sln); + return; } sub GetFakeConfigure @@ -705,56 +753,10 @@ sub GetFakeConfigure return $cfg; } -package VS2005Solution; - -# -# Package that encapsulates a Visual Studio 2005 solution file -# - -use strict; -use warnings; -use base qw(Solution); - -sub new -{ - my $classname = shift; - my $self = $classname->SUPER::_new(@_); - bless($self, $classname); - - $self->{solutionFileVersion} = '9.00'; - $self->{vcver} = '8.00'; - $self->{visualStudioName} = 'Visual Studio 2005'; - - return $self; -} - -package VS2008Solution; - -# -# Package that encapsulates a Visual Studio 2008 solution file -# - -use strict; -use warnings; -use base qw(Solution); - -sub new -{ - my $classname = shift; - my $self = $classname->SUPER::_new(@_); - bless($self, $classname); - - $self->{solutionFileVersion} = '10.00'; - $self->{vcver} = '9.00'; - $self->{visualStudioName} = 'Visual Studio 2008'; - - return $self; -} - -package VS2010Solution; +package VS2013Solution; # -# Package that encapsulates a Visual Studio 2010 solution file +# Package that encapsulates a Visual Studio 2013 solution file # use Carp; @@ -762,29 +764,7 @@ use strict; use warnings; use base qw(Solution); -sub new -{ - my $classname = shift; - my $self = $classname->SUPER::_new(@_); - bless($self, $classname); - - $self->{solutionFileVersion} = '11.00'; - $self->{vcver} = '10.00'; - $self->{visualStudioName} = 'Visual Studio 2010'; - - return $self; -} - -package VS2012Solution; - -# -# Package that encapsulates a Visual Studio 2012 solution file -# - -use Carp; -use strict; -use warnings; -use base qw(Solution); +no warnings qw(redefine); ## no critic sub new { @@ -792,17 +772,19 @@ sub new my $self = $classname->SUPER::_new(@_); bless($self, $classname); - $self->{solutionFileVersion} = '12.00'; - $self->{vcver} = '11.00'; - $self->{visualStudioName} = 'Visual Studio 2012'; + $self->{solutionFileVersion} = '12.00'; + $self->{vcver} = '12.00'; + $self->{visualStudioName} = 'Visual Studio 2013'; + $self->{VisualStudioVersion} = '12.0.21005.1'; + $self->{MinimumVisualStudioVersion} = '10.0.40219.1'; return $self; } -package VS2013Solution; +package VS2015Solution; # -# Package that encapsulates a Visual Studio 2013 solution file +# Package that encapsulates a Visual Studio 2015 solution file # use Carp; @@ -810,6 +792,8 @@ use strict; use warnings; use base qw(Solution); +no warnings qw(redefine); ## no critic + sub new { my $classname = shift; @@ -817,18 +801,18 @@ sub new bless($self, $classname); $self->{solutionFileVersion} = '12.00'; - $self->{vcver} = '12.00'; - $self->{visualStudioName} = 'Visual Studio 2013'; - $self->{VisualStudioVersion} = '12.0.21005.1'; + $self->{vcver} = '14.00'; + $self->{visualStudioName} = 'Visual Studio 2015'; + $self->{VisualStudioVersion} = '14.0.24730.2'; $self->{MinimumVisualStudioVersion} = '10.0.40219.1'; return $self; } -package VS2015Solution; +package VS2017Solution; # -# Package that encapsulates a Visual Studio 2015 solution file +# Package that encapsulates a Visual Studio 2017 solution file # use Carp; @@ -836,6 +820,8 @@ use strict; use warnings; use base qw(Solution); +no warnings qw(redefine); ## no critic + sub new { my $classname = shift; @@ -843,9 +829,9 @@ sub new bless($self, $classname); $self->{solutionFileVersion} = '12.00'; - $self->{vcver} = '14.00'; - $self->{visualStudioName} = 'Visual Studio 2015'; - $self->{VisualStudioVersion} = '14.0.24730.2'; + $self->{vcver} = '15.00'; + $self->{visualStudioName} = 'Visual Studio 2017'; + $self->{VisualStudioVersion} = '15.0.26730.3'; $self->{MinimumVisualStudioVersion} = '10.0.40219.1'; return $self; diff --git a/src/tools/msvc/VCBuildProject.pm b/src/tools/msvc/VCBuildProject.pm deleted file mode 100644 index 669ba1730b..0000000000 --- a/src/tools/msvc/VCBuildProject.pm +++ /dev/null @@ -1,294 +0,0 @@ -package VCBuildProject; - -# -# Package that encapsulates a VCBuild (Visual C++ 2005/2008) project file -# -# src/tools/msvc/VCBuildProject.pm -# - -use Carp; -use strict; -use warnings; -use base qw(Project); - -sub _new -{ - my $classname = shift; - my $self = $classname->SUPER::_new(@_); - bless($self, $classname); - - $self->{filenameExtension} = '.vcproj'; - - return $self; -} - -sub WriteHeader -{ - my ($self, $f) = @_; - - print $f < - - - -EOF - - $self->WriteConfiguration( - $f, 'Debug', - { defs => "_DEBUG;DEBUG=1", - wholeopt => 0, - opt => 0, - strpool => 'false', - runtime => 3 }); - $self->WriteConfiguration( - $f, - 'Release', - { defs => "", - wholeopt => 0, - opt => 3, - strpool => 'true', - runtime => 2 }); - print $f < -EOF - $self->WriteReferences($f); -} - -sub WriteFiles -{ - my ($self, $f) = @_; - print $f < -EOF - my @dirstack = (); - my %uniquefiles; - foreach my $fileNameWithPath (sort keys %{ $self->{files} }) - { - confess "Bad format filename '$fileNameWithPath'\n" - unless ($fileNameWithPath =~ m!^(.*)/([^/]+)\.(c|cpp|y|l|rc)$!); - my $dir = $1; - my $file = $2; - - # Walk backwards down the directory stack and close any dirs - # we're done with. - while ($#dirstack >= 0) - { - if (join('/', @dirstack) eq - substr($dir, 0, length(join('/', @dirstack)))) - { - last if (length($dir) == length(join('/', @dirstack))); - last - if (substr($dir, length(join('/', @dirstack)), 1) eq '/'); - } - print $f ' ' x $#dirstack . " \n"; - pop @dirstack; - } - - # Now walk forwards and create whatever directories are needed - while (join('/', @dirstack) ne $dir) - { - my $left = substr($dir, length(join('/', @dirstack))); - $left =~ s/^\///; - my @pieces = split /\//, $left; - push @dirstack, $pieces[0]; - print $f ' ' x $#dirstack - . " \n"; - } - - # VC builds do not like file paths with forward slashes. - my $fileNameWithPathFormatted = $fileNameWithPath; - $fileNameWithPathFormatted =~ s/\//\\/g; - - print $f ' ' x $#dirstack - . " ' - . $self->GenerateCustomTool( - 'Running bison on ' . $fileNameWithPath, - "perl src/tools/msvc/pgbison.pl $fileNameWithPath", $of) - . '' . "\n"; - } - elsif ($fileNameWithPath =~ /\.l$/) - { - my $of = $fileNameWithPath; - $of =~ s/\.l$/.c/; - print $f '>' - . $self->GenerateCustomTool( - 'Running flex on ' . $fileNameWithPath, - "perl src/tools/msvc/pgflex.pl $fileNameWithPath", $of) - . '' . "\n"; - } - elsif (defined($uniquefiles{$file})) - { - - # File already exists, so fake a new name - my $obj = $dir; - $obj =~ s!/!_!g; - print $f -">{platform}\">{name}\\$obj" - . "_$file.obj\" />{platform}\">{name}\\$obj" - . "_$file.obj\" />\n"; - } - else - { - $uniquefiles{$file} = 1; - print $f " />\n"; - } - } - while ($#dirstack >= 0) - { - print $f ' ' x $#dirstack . " \n"; - pop @dirstack; - } - print $f < -EOF -} - -sub Footer -{ - my ($self, $f) = @_; - - print $f < - -EOF -} - -sub WriteConfiguration -{ - my ($self, $f, $cfgname, $p) = @_; - my $cfgtype = - ($self->{type} eq "exe") ? 1 : ($self->{type} eq "dll" ? 2 : 4); - my $libs = $self->GetAdditionalLinkerDependencies($cfgname, ' '); - - my $targetmachine = $self->{platform} eq 'Win32' ? 1 : 17; - - print $f < - - {disablelinkerwarnings}) - { - print $f -"\t\tAdditionalOptions=\"/ignore:$self->{disablelinkerwarnings}\"\n"; - } - if ($self->{implib}) - { - my $l = $self->{implib}; - $l =~ s/__CFGNAME__/$cfgname/g; - print $f "\t\tImportLibrary=\"$l\"\n"; - } - if ($self->{def}) - { - my $d = $self->{def}; - $d =~ s/__CFGNAME__/$cfgname/g; - print $f "\t\tModuleDefinitionFile=\"$d\"\n"; - } - - print $f "\t/>\n"; - print $f -"\t{name}\\$self->{name}.lib\" IgnoreDefaultLibraryNames=\"libc\" />\n"; - print $f -"\t\n"; - if ($self->{builddef}) - { - print $f -"\t{name} $self->{platform}\" />\n"; - } - print $f < -EOF -} - -sub WriteReferences -{ - my ($self, $f) = @_; - print $f " \n"; - foreach my $ref (@{ $self->{references} }) - { - print $f -" {guid}\" Name=\"$ref->{name}\" />\n"; - } - print $f " \n"; -} - -sub GenerateCustomTool -{ - my ($self, $desc, $tool, $output, $cfg) = @_; - if (!defined($cfg)) - { - return $self->GenerateCustomTool($desc, $tool, $output, 'Debug') - . $self->GenerateCustomTool($desc, $tool, $output, 'Release'); - } - return -"{platform}\">"; -} - -package VC2005Project; - -# -# Package that encapsulates a Visual C++ 2005 project file -# - -use strict; -use warnings; -use base qw(VCBuildProject); - -sub new -{ - my $classname = shift; - my $self = $classname->SUPER::_new(@_); - bless($self, $classname); - - $self->{vcver} = '8.00'; - - return $self; -} - -package VC2008Project; - -# -# Package that encapsulates a Visual C++ 2008 project file -# - -use strict; -use warnings; -use base qw(VCBuildProject); - -sub new -{ - my $classname = shift; - my $self = $classname->SUPER::_new(@_); - bless($self, $classname); - - $self->{vcver} = '9.00'; - - return $self; -} - -1; diff --git a/src/tools/msvc/VSObjectFactory.pm b/src/tools/msvc/VSObjectFactory.pm index 4190ada618..1a94cd866e 100644 --- a/src/tools/msvc/VSObjectFactory.pm +++ b/src/tools/msvc/VSObjectFactory.pm @@ -13,13 +13,14 @@ use warnings; use Exporter; use Project; use Solution; -use VCBuildProject; use MSBuildProject; our (@ISA, @EXPORT); @ISA = qw(Exporter); @EXPORT = qw(CreateSolution CreateProject DetermineVisualStudioVersion); +no warnings qw(redefine); ## no critic + sub CreateSolution { my $visualStudioVersion = shift; @@ -29,23 +30,7 @@ sub CreateSolution $visualStudioVersion = DetermineVisualStudioVersion(); } - if ($visualStudioVersion eq '8.00') - { - return new VS2005Solution(@_); - } - elsif ($visualStudioVersion eq '9.00') - { - return new VS2008Solution(@_); - } - elsif ($visualStudioVersion eq '10.00') - { - return new VS2010Solution(@_); - } - elsif ($visualStudioVersion eq '11.00') - { - return new VS2012Solution(@_); - } - elsif ($visualStudioVersion eq '12.00') + if ($visualStudioVersion eq '12.00') { return new VS2013Solution(@_); } @@ -53,8 +38,16 @@ sub CreateSolution { return new VS2015Solution(@_); } + + # visual 2017 hasn't changed the nmake version to 15, so adjust the check to support it. + elsif (($visualStudioVersion ge '14.10') + or ($visualStudioVersion eq '15.00')) + { + return new VS2017Solution(@_); + } else { + croak $visualStudioVersion; croak "The requested Visual Studio version is not supported."; } } @@ -68,23 +61,7 @@ sub CreateProject $visualStudioVersion = DetermineVisualStudioVersion(); } - if ($visualStudioVersion eq '8.00') - { - return new VC2005Project(@_); - } - elsif ($visualStudioVersion eq '9.00') - { - return new VC2008Project(@_); - } - elsif ($visualStudioVersion eq '10.00') - { - return new VC2010Project(@_); - } - elsif ($visualStudioVersion eq '11.00') - { - return new VC2012Project(@_); - } - elsif ($visualStudioVersion eq '12.00') + if ($visualStudioVersion eq '12.00') { return new VC2013Project(@_); } @@ -92,8 +69,16 @@ sub CreateProject { return new VC2015Project(@_); } + + # visual 2017 hasn't changed the nmake version to 15, so adjust the check to support it. + elsif (($visualStudioVersion ge '14.10') + or ($visualStudioVersion eq '15.00')) + { + return new VC2017Project(@_); + } else { + croak $visualStudioVersion; croak "The requested Visual Studio version is not supported."; } } @@ -107,29 +92,31 @@ sub DetermineVisualStudioVersion my $output = `nmake /? 2>&1`; $? >> 8 == 0 or croak -"Unable to determine Visual Studio version: The nmake command wasn't found."; + "Unable to determine Visual Studio version: The nmake command wasn't found."; if ($output =~ /(\d+)\.(\d+)\.\d+(\.\d+)?$/m) { return _GetVisualStudioVersion($1, $2); } croak -"Unable to determine Visual Studio version: The nmake version could not be determined."; + "Unable to determine Visual Studio version: The nmake version could not be determined."; } sub _GetVisualStudioVersion { my ($major, $minor) = @_; + + # visual 2017 hasn't changed the nmake version to 15, so still using the older version for comparison. if ($major > 14) { carp -"The determined version of Visual Studio is newer than the latest supported version. Returning the latest supported version instead."; + "The determined version of Visual Studio is newer than the latest supported version. Returning the latest supported version instead."; return '14.00'; } elsif ($major < 6) { croak -"Unable to determine Visual Studio version: Visual Studio versions before 6.0 aren't supported."; + "Unable to determine Visual Studio version: Visual Studio versions before 6.0 aren't supported."; } return "$major.$minor"; } diff --git a/src/tools/msvc/build.pl b/src/tools/msvc/build.pl index 744c1f7d6e..35649fe5a2 100644 --- a/src/tools/msvc/build.pl +++ b/src/tools/msvc/build.pl @@ -53,20 +53,17 @@ BEGIN # ... and do it -if ($buildwhat and $vcver >= 10.00) +if ($buildwhat) { system( -"msbuild $buildwhat.vcxproj /verbosity:normal $msbflags /p:Configuration=$bconf" + "msbuild $buildwhat.vcxproj /verbosity:normal $msbflags /p:Configuration=$bconf" ); } -elsif ($buildwhat) -{ - system("vcbuild $msbflags $buildwhat.vcproj $bconf"); -} else { system( -"msbuild pgsql.sln /verbosity:normal $msbflags /p:Configuration=$bconf"); + "msbuild pgsql.sln /verbosity:normal $msbflags /p:Configuration=$bconf" + ); } # report status diff --git a/src/tools/msvc/builddoc.bat b/src/tools/msvc/builddoc.bat deleted file mode 100755 index 024706989e..0000000000 --- a/src/tools/msvc/builddoc.bat +++ /dev/null @@ -1,7 +0,0 @@ -@echo off - -REM src/tools/msvc/builddoc.bat -REM all the logic for this now belongs in builddoc.pl. This file really -REM only exists so you don't have to type "perl builddoc.pl" -REM Resist any temptation to add any logic here. -@perl builddoc.pl %* diff --git a/src/tools/msvc/builddoc.pl b/src/tools/msvc/builddoc.pl deleted file mode 100644 index e0b5c50b34..0000000000 --- a/src/tools/msvc/builddoc.pl +++ /dev/null @@ -1,124 +0,0 @@ -# -*-perl-*- hey - emacs - this is a perl file - -# Adjust path for your docbook installation in buildenv.pl - -# src/tools/msvc/builddoc.pl -# translated from an earlier .bat file - -use strict; -use File::Copy; -use Cwd qw(abs_path getcwd); - -my $startdir = getcwd(); - -my $openjade = 'openjade-1.3.1'; -my $dsssl = 'docbook-dsssl-1.79'; - -chdir '../../..' if (-d '../msvc' && -d '../../../src'); - -noversion() unless -e 'doc/src/sgml/version.sgml'; - -do 'src/tools/msvc/buildenv.pl' if -e 'src/tools/msvc/buildenv.pl'; - -my $docroot = $ENV{DOCROOT}; -die "bad DOCROOT '$docroot'" unless ($docroot && -d $docroot); - -my @notfound; -foreach my $dir ('docbook', $openjade, $dsssl) -{ - push(@notfound, $dir) unless -d "$docroot/$dir"; -} -missing() if @notfound; - -my $arg = shift; -renamefiles(); - -chdir 'doc/src/sgml'; - -$ENV{SGML_CATALOG_FILES} = - "$docroot/$openjade/dsssl/catalog;" . "$docroot/docbook/docbook.cat"; - -my $cmd; - -# openjade exits below with a harmless non-zero status, so we -# can't die on "failure" - -$cmd = - "perl mk_feature_tables.pl YES " - . "../../../src/backend/catalog/sql_feature_packages.txt " - . "../../../src/backend/catalog/sql_features.txt " - . "> features-supported.sgml"; -system($cmd); -die "features_supported" if $?; -$cmd = - "perl mk_feature_tables.pl NO " - . "\"../../../src/backend/catalog/sql_feature_packages.txt\" " - . "\"../../../src/backend/catalog/sql_features.txt\" " - . "> features-unsupported.sgml"; -system($cmd); -die "features_unsupported" if $?; -$cmd = -"perl generate-errcodes-table.pl \"../../../src/backend/utils/errcodes.txt\" " - . "> errcodes-table.sgml"; -system($cmd); -die "errcodes-table" if $?; - -print "Running first build...\n"; -$cmd = - "\"$docroot/$openjade/bin/openjade\" -V html-index -wall " - . "-wno-unused-param -wno-empty -D . -c \"$docroot/$dsssl/catalog\" " - . "-d stylesheet.dsl -i output-html -t sgml postgres.sgml 2>&1 " - . "| findstr /V \"DTDDECL catalog entries are not supported\" "; -system($cmd); # die "openjade" if $?; -print "Running collateindex...\n"; -$cmd = "perl \"$docroot/$dsssl/bin/collateindex.pl\" -f -g -i bookindex " - . "-o bookindex.sgml HTML.index"; -system($cmd); -die "collateindex" if $?; -mkdir "html"; -print "Running second build...\n"; -$cmd = - "\"$docroot/$openjade/bin/openjade\" -wall -wno-unused-param -wno-empty " - . "-D . -c \"$docroot/$dsssl/catalog\" -d stylesheet.dsl -t sgml " - . "-i output-html -i include-index postgres.sgml 2>&1 " - . "| findstr /V \"DTDDECL catalog entries are not supported\" "; - -system($cmd); # die "openjade" if $?; - -copy "stylesheet.css", "html/stylesheet.css"; - -print "Docs build complete.\n"; - -exit; - -######################################################## - -sub renamefiles -{ - - # Rename ISO entity files - my $savedir = getcwd(); - chdir "$docroot/docbook"; - foreach my $f (glob('ISO*')) - { - next if $f =~ /\.gml$/i; - my $nf = $f; - $nf =~ s/ISO(.*)/ISO-$1.gml/; - move $f, $nf; - } - chdir $savedir; - -} - -sub missing -{ - print STDERR "could not find $docroot/$_\n" foreach (@notfound); - exit 1; -} - -sub noversion -{ - print STDERR "Could not find version.sgml. ", - "Please run mkvcbuild.pl first!\n"; - exit 1; -} diff --git a/src/tools/msvc/clean.bat b/src/tools/msvc/clean.bat index 0a88b52536..7a23a2b55f 100755 --- a/src/tools/msvc/clean.bat +++ b/src/tools/msvc/clean.bat @@ -40,7 +40,6 @@ REM Delete files created with GenerateFiles() in Solution.pm if exist src\include\pg_config.h del /q src\include\pg_config.h if exist src\include\pg_config_ext.h del /q src\include\pg_config_ext.h if exist src\include\pg_config_os.h del /q src\include\pg_config_os.h -if exist src\include\dynloader.h del /q src\include\dynloader.h if %DIST%==1 if exist src\backend\parser\gram.h del /q src\backend\parser\gram.h if exist src\include\utils\errcodes.h del /q src\include\utils\errcodes.h if exist src\include\utils\fmgroids.h del /q src\include\utils\fmgroids.h @@ -48,15 +47,18 @@ if exist src\include\utils\fmgrprotos.h del /q src\include\utils\fmgrprotos.h if exist src\include\storage\lwlocknames.h del /q src\include\storage\lwlocknames.h if exist src\include\utils\probes.h del /q src\include\utils\probes.h if exist src\include\catalog\schemapg.h del /q src\include\catalog\schemapg.h +if exist src\include\catalog\pg_*_d.h del /q src\include\catalog\pg_*_d.h +if exist src\include\catalog\header-stamp del /q src\include\catalog\header-stamp if exist doc\src\sgml\version.sgml del /q doc\src\sgml\version.sgml if %DIST%==1 if exist src\backend\utils\fmgroids.h del /q src\backend\utils\fmgroids.h if %DIST%==1 if exist src\backend\utils\fmgrprotos.h del /q src\backend\utils\fmgrprotos.h if %DIST%==1 if exist src\backend\utils\fmgrtab.c del /q src\backend\utils\fmgrtab.c +if %DIST%==1 if exist src\backend\utils\fmgr-stamp del /q src\backend\utils\fmgr-stamp +if %DIST%==1 if exist src\backend\utils\errcodes.h del /q src\backend\utils\errcodes.h if %DIST%==1 if exist src\backend\storage\lmgr\lwlocknames.c del /q src\backend\storage\lmgr\lwlocknames.c if %DIST%==1 if exist src\backend\storage\lmgr\lwlocknames.h del /q src\backend\storage\lmgr\lwlocknames.h if %DIST%==1 if exist src\pl\plpython\spiexceptions.h del /q src\pl\plpython\spiexceptions.h -if %DIST%==1 if exist src\backend\utils\errcodes.h del /q src\backend\utils\errcodes.h if %DIST%==1 if exist src\pl\plpgsql\src\plerrcodes.h del /q src\pl\plpgsql\src\plerrcodes.h if %DIST%==1 if exist src\pl\tcl\pltclerrcodes.h del /q src\pl\tcl\pltclerrcodes.h if %DIST%==1 if exist src\backend\utils\sort\qsort_tuple.c del /q src\backend\utils\sort\qsort_tuple.c @@ -67,6 +69,8 @@ if %DIST%==1 if exist src\backend\catalog\postgres.bki del /q src\backend\catalo if %DIST%==1 if exist src\backend\catalog\postgres.description del /q src\backend\catalog\postgres.description if %DIST%==1 if exist src\backend\catalog\postgres.shdescription del /q src\backend\catalog\postgres.shdescription if %DIST%==1 if exist src\backend\catalog\schemapg.h del /q src\backend\catalog\schemapg.h +if %DIST%==1 if exist src\backend\catalog\pg_*_d.h del /q src\backend\catalog\pg_*_d.h +if %DIST%==1 if exist src\backend\catalog\bki-stamp del /q src\backend\catalog\bki-stamp if %DIST%==1 if exist src\backend\parser\scan.c del /q src\backend\parser\scan.c if %DIST%==1 if exist src\backend\parser\gram.c del /q src\backend\parser\gram.c if %DIST%==1 if exist src\backend\bootstrap\bootscanner.c del /q src\backend\bootstrap\bootscanner.c @@ -116,15 +120,8 @@ if exist src\test\regress\autoinc.dll del /q src\test\regress\autoinc.dll if %DIST%==1 if exist src\test\isolation\specscanner.c del /q src\test\isolation\specscanner.c if %DIST%==1 if exist src\test\isolation\specparse.c del /q src\test\isolation\specparse.c -if exist src\bin\initdb\tmp_check rd /s /q src\bin\initdb\tmp_check -if exist src\bin\pg_basebackup\tmp_check rd /s /q src\bin\pg_basebackup\tmp_check -if exist src\bin\pg_config\tmp_check rd /s /q src\bin\pg_config\tmp_check -if exist src\bin\pg_controldata\tmp_check rd /s /q src\bin\pg_controldata\tmp_check -if exist src\bin\pg_ctl\tmp_check rd /s /q src\bin\pg_ctl\tmp_check -if exist src\bin\pg_rewind\tmp_check rd /s /q src\bin\pg_rewind\tmp_check -if exist src\bin\pgbench\tmp_check rd /s /q src\bin\pgbench\tmp_check -if exist src\bin\scripts\tmp_check rd /s /q src\bin\scripts\tmp_check -if exist src\test\recovery\tmp_check rd /s /q src\test\recovery\tmp_check +for /d %%f in (contrib\* src\bin\* src\test\* src\test\modules\* + ) do if exist %%f\tmp_check rd /s /q %%f\tmp_check REM Clean up datafiles built with contrib REM cd contrib diff --git a/src/tools/msvc/config_default.pl b/src/tools/msvc/config_default.pl index 4d69dc2a2e..d7a9fc5039 100644 --- a/src/tools/msvc/config_default.pl +++ b/src/tools/msvc/config_default.pl @@ -18,7 +18,7 @@ icu => undef, # --with-icu= nls => undef, # --enable-nls= tap_tests => undef, # --enable-tap-tests - tcl => undef, # --with-tls= + tcl => undef, # --with-tcl= perl => undef, # --with-perl python => undef, # --with-python= openssl => undef, # --with-openssl= diff --git a/src/tools/msvc/dummylib/README b/src/tools/msvc/dummylib/README new file mode 100644 index 0000000000..7b63d0ed43 --- /dev/null +++ b/src/tools/msvc/dummylib/README @@ -0,0 +1,13 @@ + +src/tools/msvc/dummylib + +This directory contains just enough of a dummy library to allow checking of +the programs in src/tools/msvc and src/tools/win32tzlist.pl with +perl -cw, even on machines that lack the Win32 perl infrastructure. + +invoke via: + +PERL5LIB=src/tools/msvc/dummylib perl -cw $file + +This is the only use that should be made of this directory. Attempting actually +running of any programs using this library will result in a lot of grief. diff --git a/src/tools/msvc/dummylib/Win32.pm b/src/tools/msvc/dummylib/Win32.pm new file mode 100644 index 0000000000..079e276f24 --- /dev/null +++ b/src/tools/msvc/dummylib/Win32.pm @@ -0,0 +1,4 @@ +package Win32; +use strict; +use warnings; +1; diff --git a/src/tools/msvc/dummylib/Win32/Registry.pm b/src/tools/msvc/dummylib/Win32/Registry.pm new file mode 100644 index 0000000000..1433b1fb54 --- /dev/null +++ b/src/tools/msvc/dummylib/Win32/Registry.pm @@ -0,0 +1,13 @@ +package Win32::Registry; + +use strict; +use warnings; + +use vars qw($HKEY_LOCAL_MACHINE); + +use Exporter (); +our (@EXPORT, @ISA); +@ISA = qw(Exporter); +@EXPORT = qw($HKEY_LOCAL_MACHINE); + +1; diff --git a/src/tools/msvc/dummylib/Win32API/File.pm b/src/tools/msvc/dummylib/Win32API/File.pm new file mode 100644 index 0000000000..bfba9cc7d6 --- /dev/null +++ b/src/tools/msvc/dummylib/Win32API/File.pm @@ -0,0 +1,14 @@ +package Win32API::File; + +use strict; +use warnings; + +use constant { SEM_FAILCRITICALERRORS => 1, SEM_NOGPFAULTERRORBOX => 2 }; +sub SetErrormode { } +use Exporter; +our (@ISA, @EXPORT_OK, %EXPORT_TAGS); +@ISA = qw(Exporter); +@EXPORT_OK = qw(SetErrorMode SEM_FAILCRITICALERRORS SEM_NOGPFAULTERRORBOX); +%EXPORT_TAGS = (SEM_ => [qw(SEM_FAILCRITICALERRORS SEM_NOGPFAULTERRORBOX)]); + +1; diff --git a/src/tools/msvc/ecpg_regression.proj b/src/tools/msvc/ecpg_regression.proj index 745aa19e1b..9fa4a69021 100644 --- a/src/tools/msvc/ecpg_regression.proj +++ b/src/tools/msvc/ecpg_regression.proj @@ -33,6 +33,9 @@ + + + diff --git a/src/tools/msvc/gendef.pl b/src/tools/msvc/gendef.pl index 96122750f1..77c3a775b0 100644 --- a/src/tools/msvc/gendef.pl +++ b/src/tools/msvc/gendef.pl @@ -20,6 +20,7 @@ sub dumpsyms system("dumpbin /symbols /out:$tmpfile $_ >NUL") && die "Could not call dumpbin"; rename($tmpfile, $symfile); + return; } # Given a symbol file path, loops over its contents @@ -32,44 +33,44 @@ sub dumpsyms sub extract_syms { my ($symfile, $def) = @_; - open(my $f, '<', $symfile) || die "Could not open $symfile for $_\n"; + open(my $f, '<', $symfile) || die "Could not open $symfile for $_: $!\n"; while (<$f>) { - # Expected symbol lines look like: - # - # 0 1 2 3 4 5 6 - # IDX SYMBOL SECT SYMTYPE SYMSTATIC SYMNAME - # ------------------------------------------------------------------------ - # 02E 00000130 SECTA notype External | _standbyState - # 02F 00000009 SECT9 notype Static | _LocalRecoveryInProgress - # 064 00000020 SECTC notype () Static | _XLogCheckBuffer - # 065 00000000 UNDEF notype () External | _BufferGetTag - # - # See http://msdn.microsoft.com/en-us/library/b842y285.aspx - # - # We're not interested in the symbol index or offset. - # - # SECT[ION] is only examined to see whether the symbol is defined in a - # COFF section of the local object file; if UNDEF, it's a symbol to be - # resolved at link time from another object so we can't export it. - # - # SYMTYPE is always notype for C symbols as there's no typeinfo and no - # way to get the symbol type from name (de)mangling. However, we care - # if "notype" is suffixed by "()" or not. The presence of () means the - # symbol is a function, the absence means it isn't. - # - # SYMSTATIC indicates whether it's a compilation-unit local "static" - # symbol ("Static"), or whether it's available for use from other - # compilation units ("External"). We export all symbols that aren't - # static as part of the whole program DLL interface to produce UNIX-like - # default linkage. - # - # SYMNAME is, obviously, the symbol name. The leading underscore - # indicates that the _cdecl calling convention is used. See - # http://www.unixwiz.net/techtips/win32-callconv.html - # http://www.codeproject.com/Articles/1388/Calling-Conventions-Demystified - # + # Expected symbol lines look like: + # + # 0 1 2 3 4 5 6 + # IDX SYMBOL SECT SYMTYPE SYMSTATIC SYMNAME + # ------------------------------------------------------------------------ + # 02E 00000130 SECTA notype External | _standbyState + # 02F 00000009 SECT9 notype Static | _LocalRecoveryInProgress + # 064 00000020 SECTC notype () Static | _XLogCheckBuffer + # 065 00000000 UNDEF notype () External | _BufferGetTag + # + # See http://msdn.microsoft.com/en-us/library/b842y285.aspx + # + # We're not interested in the symbol index or offset. + # + # SECT[ION] is only examined to see whether the symbol is defined in a + # COFF section of the local object file; if UNDEF, it's a symbol to be + # resolved at link time from another object so we can't export it. + # + # SYMTYPE is always notype for C symbols as there's no typeinfo and no + # way to get the symbol type from name (de)mangling. However, we care + # if "notype" is suffixed by "()" or not. The presence of () means the + # symbol is a function, the absence means it isn't. + # + # SYMSTATIC indicates whether it's a compilation-unit local "static" + # symbol ("Static"), or whether it's available for use from other + # compilation units ("External"). We export all symbols that aren't + # static as part of the whole program DLL interface to produce UNIX-like + # default linkage. + # + # SYMNAME is, obviously, the symbol name. The leading underscore + # indicates that the _cdecl calling convention is used. See + # http://www.unixwiz.net/techtips/win32-callconv.html + # http://www.codeproject.com/Articles/1388/Calling-Conventions-Demystified + # s/notype \(\)/func/g; s/notype/data/g; @@ -116,6 +117,7 @@ sub extract_syms $def->{ $pieces[6] } = $pieces[3]; } close($f); + return; } sub writedef @@ -143,6 +145,7 @@ sub writedef } } close($fh); + return; } @@ -155,8 +158,8 @@ sub usage usage() unless scalar(@ARGV) == 2 - && ( ($ARGV[0] =~ /\\([^\\]+$)/) - && ($ARGV[1] eq 'Win32' || $ARGV[1] eq 'x64')); + && ( ($ARGV[0] =~ /\\([^\\]+$)/) + && ($ARGV[1] eq 'Win32' || $ARGV[1] eq 'x64')); my $defname = uc $1; my $deffile = "$ARGV[0]/$defname.def"; my $platform = $ARGV[1]; diff --git a/src/tools/msvc/install.pl b/src/tools/msvc/install.pl index b2d7f9e040..90425ca8fc 100755 --- a/src/tools/msvc/install.pl +++ b/src/tools/msvc/install.pl @@ -6,6 +6,10 @@ use strict; use warnings; +use File::Basename; +use File::Spec; +BEGIN { use lib File::Spec->rel2abs(dirname(__FILE__)); } + use Install qw(Install); # buildenv.pl is for specifying the build environment settings diff --git a/src/tools/msvc/mkvcbuild.pl b/src/tools/msvc/mkvcbuild.pl index 9255dff022..34e861375c 100644 --- a/src/tools/msvc/mkvcbuild.pl +++ b/src/tools/msvc/mkvcbuild.pl @@ -7,6 +7,10 @@ use strict; use warnings; +use File::Basename; +use File::Spec; +BEGIN { use lib File::Spec->rel2abs(dirname(__FILE__)); } + use Mkvcbuild; chdir('..\..\..') if (-d '..\msvc' && -d '..\..\..\src'); diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl index 2904679114..599b521014 100644 --- a/src/tools/msvc/vcregress.pl +++ b/src/tools/msvc/vcregress.pl @@ -10,6 +10,9 @@ use File::Basename; use File::Copy; use File::Find (); +use File::Path qw(rmtree); +use File::Spec; +BEGIN { use lib File::Spec->rel2abs(dirname(__FILE__)); } use Install qw(Install); @@ -34,7 +37,7 @@ my $what = shift || ""; if ($what =~ -/^(check|installcheck|plcheck|contribcheck|modulescheck|ecpgcheck|isolationcheck|upgradecheck|bincheck|recoverycheck|taptest)$/i + /^(check|installcheck|plcheck|contribcheck|modulescheck|ecpgcheck|isolationcheck|upgradecheck|bincheck|recoverycheck|taptest)$/i ) { $what = uc $what; @@ -104,12 +107,14 @@ sub installcheck "--dlpath=.", "--bindir=../../../$Config/psql", "--schedule=${schedule}_schedule", + "--max-concurrent-tests=20", "--encoding=SQL_ASCII", "--no-locale"); push(@args, $maxconn) if $maxconn; system(@args); my $status = $? >> 8; exit $status if $status; + return; } sub check @@ -122,6 +127,7 @@ sub check "--dlpath=.", "--bindir=", "--schedule=${schedule}_schedule", + "--max-concurrent-tests=20", "--encoding=SQL_ASCII", "--no-locale", "--temp-instance=./tmp_check"); @@ -130,6 +136,7 @@ sub check system(@args); my $status = $? >> 8; exit $status if $status; + return; } sub ecpgcheck @@ -155,6 +162,7 @@ sub ecpgcheck system(@args); $status = $? >> 8; exit $status if $status; + return; } sub isolationcheck @@ -171,6 +179,7 @@ sub isolationcheck system(@args); my $status = $? >> 8; exit $status if $status; + return; } sub tap_check @@ -199,6 +208,7 @@ sub tap_check $ENV{TESTDIR} = "$dir"; + rmtree('tmp_check'); system(@args); my $status = $? >> 8; return $status; @@ -222,6 +232,7 @@ sub bincheck $mstat ||= $status; } exit $mstat if $mstat; + return; } sub taptest @@ -242,28 +253,95 @@ sub taptest InstallTemp(); my $status = tap_check(@args); exit $status if $status; + return; +} + +sub mangle_plpython3 +{ + my $tests = shift; + mkdir "results" unless -d "results"; + mkdir "sql/python3"; + mkdir "results/python3"; + mkdir "expected/python3"; + + foreach my $test (@$tests) + { + local $/ = undef; + foreach my $dir ('sql', 'expected') + { + my $extension = ($dir eq 'sql' ? 'sql' : 'out'); + + my @files = + glob("$dir/$test.$extension $dir/${test}_[0-9].$extension"); + foreach my $file (@files) + { + open(my $handle, '<', $file) + || die "test file $file not found"; + my $contents = <$handle>; + close($handle); + do + { + s/except ([[:alpha:]][[:alpha:].]*), *([[:alpha:]][[:alpha:]]*):/except $1 as $2:/g; + s///g; + s///g; + s/([0-9][0-9]*)L/$1/g; + s/([ [{])u"/$1"/g; + s/([ [{])u'/$1'/g; + s/def next/def __next__/g; + s/LANGUAGE plpython2?u/LANGUAGE plpython3u/g; + s/EXTENSION ([^ ]*_)*plpython2?u/EXTENSION $1plpython3u/g; + s/installing required extension "plpython2u"/installing required extension "plpython3u"/g; + } + for ($contents); + my $base = basename $file; + open($handle, '>', "$dir/python3/$base") + || die "opening python 3 file for $file"; + print $handle $contents; + close($handle); + } + } + } + do { s!^!python3/!; } + foreach (@$tests); + return @$tests; } sub plcheck { - chdir "../../pl"; + chdir "$topdir/src/pl"; - foreach my $pl (glob("*")) + foreach my $dir (glob("*/src *")) { - next unless -d "$pl/sql" && -d "$pl/expected"; - my $lang = $pl eq 'tcl' ? 'pltcl' : $pl; + next unless -d "$dir/sql" && -d "$dir/expected"; + my $lang; + if ($dir eq 'plpgsql/src') + { + $lang = 'plpgsql'; + } + elsif ($dir eq 'tcl') + { + $lang = 'pltcl'; + } + else + { + $lang = $dir; + } if ($lang eq 'plpython') { - next unless -d "../../$Config/plpython2"; + next + unless -d "$topdir/$Config/plpython2" + || -d "$topdir/$Config/plpython3"; $lang = 'plpythonu'; } else { - next unless -d "../../$Config/$lang"; + next unless -d "$topdir/$Config/$lang"; } my @lang_args = ("--load-extension=$lang"); - chdir $pl; + chdir $dir; my @tests = fetchTests(); + @tests = mangle_plpython3(\@tests) + if $lang eq 'plpythonu' && -d "$topdir/$Config/plpython3"; if ($lang eq 'plperl') { @@ -279,25 +357,29 @@ sub plcheck push(@tests, 'plperl_plperlu'); } } + elsif ($lang eq 'plpythonu' && -d "$topdir/$Config/plpython3") + { + @lang_args = (); + } print "============================================================\n"; print "Checking $lang\n"; my @args = ( - "../../../$Config/pg_regress/pg_regress", - "--bindir=../../../$Config/psql", + "$topdir/$Config/pg_regress/pg_regress", + "--bindir=$topdir/$Config/psql", "--dbname=pl_regression", @lang_args, @tests); system(@args); my $status = $? >> 8; exit $status if $status; - chdir ".."; + chdir "$topdir/src/pl"; } - chdir "../../.."; + chdir "$topdir"; + return; } sub subdircheck { - my $subdir = shift; my $module = shift; if ( !-d "$module/sql" @@ -311,45 +393,38 @@ sub subdircheck my @tests = fetchTests(); my @opts = fetchRegressOpts(); - # Add some options for transform modules, see their respective - # Makefile for more details regarding Python-version specific + # Special processing for python transform modules, see their respective + # Makefiles for more details regarding Python-version specific # dependencies. - if ( $module eq "hstore_plpython" - || $module eq "ltree_plpython") + if ($module =~ /_plpython$/) { die "Python not enabled in configuration" if !defined($config->{python}); - # Attempt to get python version and location. - # Assume python.exe in specified dir. - my $pythonprog = "import sys;" . "print(str(sys.version_info[0]))"; - my $prefixcmd = $config->{python} . "\\python -c \"$pythonprog\""; - my $pyver = `$prefixcmd`; - die "Could not query for python version!\n" if $?; - chomp($pyver); - if ($pyver eq "2") + @opts = grep { $_ !~ /plpythonu/ } @opts; + + if (-d "$topdir/$Config/plpython2") { push @opts, "--load-extension=plpythonu"; push @opts, '--load-extension=' . $module . 'u'; } else { - - # disable tests on python3 for now. - chdir ".."; - return; + # must be python 3 + @tests = mangle_plpython3(\@tests); } } - print "============================================================\n"; print "Checking $module\n"; my @args = ( "$topdir/$Config/pg_regress/pg_regress", "--bindir=${topdir}/${Config}/psql", "--dbname=contrib_regression", @opts, @tests); + print join(' ', @args), "\n"; system(@args); chdir ".."; + return; } sub contribcheck @@ -358,21 +433,20 @@ sub contribcheck my $mstat = 0; foreach my $module (glob("*")) { - # these configuration-based exclusions must match Install.pm - next if ($module eq "uuid-ossp" && !defined($config->{uuid})); - next if ($module eq "sslinfo" && !defined($config->{openssl})); - next if ($module eq "xml2" && !defined($config->{xml})); - next if ($module eq "hstore_plperl" && !defined($config->{perl})); - next if ($module eq "hstore_plpython" && !defined($config->{python})); - next if ($module eq "ltree_plpython" && !defined($config->{python})); + next if ($module eq "uuid-ossp" && !defined($config->{uuid})); + next if ($module eq "sslinfo" && !defined($config->{openssl})); + next if ($module eq "xml2" && !defined($config->{xml})); + next if ($module =~ /_plperl$/ && !defined($config->{perl})); + next if ($module =~ /_plpython$/ && !defined($config->{python})); next if ($module eq "sepgsql"); - subdircheck("$topdir/contrib", $module); + subdircheck($module); my $status = $? >> 8; $mstat ||= $status; } exit $mstat if $mstat; + return; } sub modulescheck @@ -381,11 +455,12 @@ sub modulescheck my $mstat = 0; foreach my $module (glob("*")) { - subdircheck("$topdir/src/test/modules", $module); + subdircheck($module); my $status = $? >> 8; $mstat ||= $status; } exit $mstat if $mstat; + return; } sub recoverycheck @@ -396,6 +471,7 @@ sub recoverycheck my $dir = "$topdir/src/test/recovery"; my $status = tap_check($dir); exit $status if $status; + return; } # Run "initdb", then reconfigure authentication. @@ -440,6 +516,7 @@ sub generate_db system('createdb', quote_system_arg($dbname)); my $status = $? >> 8; exit $status if $status; + return; } sub upgradecheck @@ -482,7 +559,7 @@ sub upgradecheck generate_db('', 91, 127, ''); print "\nSetting up data for upgrading\n\n"; - installcheck(); + installcheck('parallel'); # now we can chdir into the source dir chdir "$topdir/src/bin/pg_upgrade"; @@ -525,6 +602,7 @@ sub upgradecheck print "dumps not identical!\n"; exit(1); } + return; } sub fetchRegressOpts @@ -619,6 +697,7 @@ sub InstallTemp Install("$tmp_installdir", "all", $config); } $ENV{PATH} = "$tmp_installdir/bin;$ENV{PATH}"; + return; } sub usage diff --git a/src/tools/perlcheck/find_perl_files b/src/tools/perlcheck/find_perl_files new file mode 100644 index 0000000000..fd99dab83b --- /dev/null +++ b/src/tools/perlcheck/find_perl_files @@ -0,0 +1,14 @@ +# src/tools/perlcheck/find_perl_files + +# shell function to find all perl files in the source tree + +find_perl_files () { + { + # take all .pl and .pm files + find . -type f -name '*.p[lm]' -print + # take executable files that file(1) thinks are perl files + find . -type f -perm -100 -exec file {} \; -print | + egrep -i ':.*perl[0-9]*\>' | + cut -d: -f1 + } | sort -u | grep -v '^\./\.git/' +} diff --git a/src/tools/perlcheck/perlcriticrc b/src/tools/perlcheck/perlcriticrc new file mode 100644 index 0000000000..12c09a453e --- /dev/null +++ b/src/tools/perlcheck/perlcriticrc @@ -0,0 +1,18 @@ +###################################################################### +# +# src/tools/perlcheck/perlcriticrc +# +# config file for perlcritic for Postgres project +# +##################################################################### + +severity = 5 + +theme = core + +# allow octal constants with leading zeros +[-ValuesAndExpressions::ProhibitLeadingZeros] + +# for now raise severity of this to level 5 +[Subroutines::RequireFinalReturn] +severity = 5 diff --git a/src/tools/perlcheck/pgperlcritic b/src/tools/perlcheck/pgperlcritic new file mode 100755 index 0000000000..1c2f787580 --- /dev/null +++ b/src/tools/perlcheck/pgperlcritic @@ -0,0 +1,20 @@ +#!/bin/sh + +# src/tools/perlcheck/pgperlcritic + +test -f src/tools/perlcheck/perlcriticrc || { + echo could not find src/tools/perlcheck/perlcriticrc + exit 1 + } + +set -e + +# set this to override default perlcritic program: +PERLCRITIC=${PERLCRITIC:-perlcritic} + +. src/tools/perlcheck/find_perl_files + +find_perl_files | xargs $PERLCRITIC \ + --quiet \ + --program-extensions .pl \ + --profile=src/tools/perlcheck/perlcriticrc diff --git a/src/tools/perlcheck/pgperlsyncheck b/src/tools/perlcheck/pgperlsyncheck new file mode 100755 index 0000000000..74f1584b8e --- /dev/null +++ b/src/tools/perlcheck/pgperlsyncheck @@ -0,0 +1,16 @@ +#!/bin/sh + +# script to detect compile time errors and warnings in all perl files + +INCLUDES="-I src/tools/msvc -I src/tools/msvc/dummylib -I src/backend/catalog" +INCLUDES="-I src/test/perl -I src/backend/utils/mb/Unicode $INCLUDES" +INCLUDES="-I src/bin/pg_rewind -I src/test/ssl $INCLUDES" + +set -e + +. src/tools/perlcheck/find_perl_files + +# for zsh +setopt shwordsplit 2>/dev/null || true + +find_perl_files | xargs -L 1 perl $INCLUDES -cw 2>&1 | grep -v OK diff --git a/src/tools/pginclude/pgcheckdefines b/src/tools/pginclude/pgcheckdefines index aa7c9c2fc1..4edf7fc56e 100755 --- a/src/tools/pginclude/pgcheckdefines +++ b/src/tools/pginclude/pgcheckdefines @@ -58,7 +58,7 @@ while (<$pipe>) chomp; push @hfiles, $_ unless m|^src/include/port/| - || m|^src/backend/port/\w+/|; + || m|^src/backend/port/\w+/|; } close $pipe or die "$FIND failed: $!"; @@ -119,7 +119,7 @@ foreach my $file (@hfiles, @cfiles) $top_builddir = $top_builddir . "/.."; } $MAKECMD = -"$MAKE -qp 'subdir=$subdir' 'top_builddir=$top_builddir' -f '$top_builddir/src/Makefile.global'"; + "$MAKE -qp 'subdir=$subdir' 'top_builddir=$top_builddir' -f '$top_builddir/src/Makefile.global'"; } my ($CPPFLAGS, $CFLAGS, $CFLAGS_SL, $PTHREAD_CFLAGS, $CC); @@ -297,4 +297,6 @@ sub checkit print "$file references $symbol, defined in @places\n"; # print "includes: @includes\n"; + + return; } diff --git a/src/tools/pgindent/README b/src/tools/pgindent/README index ae425da285..044b380fed 100644 --- a/src/tools/pgindent/README +++ b/src/tools/pgindent/README @@ -14,9 +14,14 @@ PREREQUISITES: git clone https://git.postgresql.org/git/pg_bsd_indent.git then follow the directions in README.pg_bsd_indent therein. -2) Install perltidy. Please be sure it is v20090616 (older and newer - versions make different formatting choices, and we want consistency). See - https://sourceforge.net/projects/perltidy/files/perltidy/perltidy-20090616/ +2) Install perltidy. Please be sure it is version 20170521 (older and newer + versions make different formatting choices, and we want consistency). + You can get the correct version from + https://cpan.metacpan.org/authors/id/S/SH/SHANCOCK/ + To install, follow the usual install process for a Perl module + ("man perlmodinstall" explains it). Or, if you have cpan installed, + this should work: + cpan SHANCOCK/Perl-Tidy-20170521.tar.gz DOING THE INDENT RUN: @@ -43,6 +48,12 @@ DOING THE INDENT RUN: If you want to use some perltidy version that's not in your PATH, first set the PERLTIDY environment variable to point to it. +5) Reformat the bootstrap catalog data files: + + cd src/include/catalog + make reformat-dat-files + cd ../../.. + VALIDATION: 1) Check for any newly-created files using "git status"; there shouldn't diff --git a/src/tools/pgindent/exclude_file_patterns b/src/tools/pgindent/exclude_file_patterns index cb2f902a90..c8efc9a913 100644 --- a/src/tools/pgindent/exclude_file_patterns +++ b/src/tools/pgindent/exclude_file_patterns @@ -5,3 +5,6 @@ /ecpg/test/expected/ /snowball/libstemmer/ /pl/plperl/ppport\.h$ +/jit/llvmjit\.h$ +/tmp_check/ +/tmp_install/ diff --git a/src/tools/pgindent/perltidyrc b/src/tools/pgindent/perltidyrc index e8ae7c5d8b..9f09f0a64e 100644 --- a/src/tools/pgindent/perltidyrc +++ b/src/tools/pgindent/perltidyrc @@ -1,12 +1,16 @@ --add-whitespace --backup-and-modify-in-place +--backup-file-extension=/ --delete-old-whitespace --entab-leading-whitespace=4 --keep-old-blank-lines=2 --maximum-line-length=78 +--nooutdent-long-comments +--nooutdent-long-quotes --nospace-for-semicolon --opening-brace-on-new-line --output-line-ending=unix --paren-tightness=2 ---vertical-tightness=2 ---vertical-tightness-closing=2 +--paren-vertical-tightness=2 +--paren-vertical-tightness-closing=2 +--noblanks-before-comments diff --git a/src/tools/pgindent/pgindent b/src/tools/pgindent/pgindent index a32aaa64f3..2d81672e15 100755 --- a/src/tools/pgindent/pgindent +++ b/src/tools/pgindent/pgindent @@ -16,7 +16,7 @@ my $INDENT_VERSION = "2.0"; # Our standard indent settings my $indent_opts = -"-bad -bap -bbb -bc -bl -cli1 -cp33 -cdb -nce -d0 -di12 -nfc1 -i4 -l79 -lp -lpl -nip -npro -sac -tpg -ts4"; + "-bad -bap -bbb -bc -bl -cli1 -cp33 -cdb -nce -d0 -di12 -nfc1 -i4 -l79 -lp -lpl -nip -npro -sac -tpg -ts4"; my $devnull = File::Spec->devnull; @@ -50,6 +50,20 @@ $code_base ||= '.' unless @ARGV; $excludes ||= "$code_base/src/tools/pgindent/exclude_file_patterns" if $code_base && -f "$code_base/src/tools/pgindent/exclude_file_patterns"; +# The typedef list that's mechanically extracted by the buildfarm may omit +# some names we want to treat like typedefs, e.g. "bool" (which is a macro +# according to ), and may include some names we don't want +# treated as typedefs, although various headers that some builds include +# might make them so. For the moment we just hardwire a whitelist of names +# to add and a blacklist of names to remove; eventually this may need to be +# easier to configure. Note that the typedefs need trailing newlines. +my @whitelist = ("bool\n"); + +my %blacklist = map { +"$_\n" => 1 } qw( + ANY FD_SET U abs allocfunc boolean date digit ilist interval iterator other + pointer printfunc reference string timestamp type wrap +); + # globals my @files; my $filtered_typedefs_fh; @@ -68,7 +82,7 @@ sub check_indent if (`$indent --version` !~ m/ $INDENT_VERSION$/) { print STDERR -"You do not appear to have $indent version $INDENT_VERSION installed on your system.\n"; + "You do not appear to have $indent version $INDENT_VERSION installed on your system.\n"; exit 1; } @@ -79,6 +93,8 @@ sub check_indent "You appear to have GNU indent rather than BSD indent.\n"; exit 1; } + + return; } @@ -118,9 +134,11 @@ sub load_typedefs } } - # remove certain entries - @typedefs = - grep { !m/^(FD_SET|date|interval|timestamp|ANY)\n?$/ } @typedefs; + # add whitelisted entries + push(@typedefs, @whitelist); + + # remove blacklisted entries + @typedefs = grep { !$blacklist{$_} } @typedefs; # write filtered typedefs my $filter_typedefs_fh = new File::Temp(TEMPLATE => "pgtypedefXXXXX"); @@ -146,6 +164,7 @@ sub process_exclude } close($eh); } + return; } @@ -173,6 +192,7 @@ sub write_source || die "cannot open file \"$source_filename\": $!\n"; print $src_fh $source; close($src_fh); + return; } @@ -195,11 +215,11 @@ sub pre_indent my $extern_c_start = '/* Open extern "C" */'; my $extern_c_stop = '/* Close extern "C" */'; $source =~ -s!(^#ifdef[ \t]+__cplusplus.*\nextern[ \t]+"C"[ \t]*\n)\{[ \t]*$!$1$extern_c_start!gm; + s!(^#ifdef[ \t]+__cplusplus.*\nextern[ \t]+"C"[ \t]*\n)\{[ \t]*$!$1$extern_c_start!gm; $source =~ s!(^#ifdef[ \t]+__cplusplus.*\n)\}[ \t]*$!$1$extern_c_stop!gm; - # Protect backslashes in DATA() and wrapping in CATALOG() - $source =~ s!^((DATA|CATALOG)\(.*)$!/*$1*/!gm; + # Protect wrapping in CATALOG() + $source =~ s!^(CATALOG\(.*)$!/*$1*/!gm; return $source; } @@ -210,8 +230,8 @@ sub post_indent my $source = shift; my $source_filename = shift; - # Restore DATA/CATALOG lines - $source =~ s!^/\*((DATA|CATALOG)\(.*)\*/$!$1!gm; + # Restore CATALOG lines + $source =~ s!^/\*(CATALOG\(.*)\*/$!$1!gm; # Put back braces for extern "C" $source =~ s!^/\* Open extern "C" \*/$!{!gm; @@ -300,6 +320,7 @@ sub diff . $pre_fh->filename . " " . $post_fh->filename . " >&2"); + return; } @@ -345,6 +366,7 @@ sub run_build $ENV{PGINDENT} = abs_path('pg_bsd_indent'); chdir $save_dir; + return; } @@ -366,6 +388,7 @@ sub build_clean system("rm -rf src/tools/pgindent/pg_bsd_indent"); system("rm -f src/tools/pgindent/tmp_typedefs.list"); + return; } @@ -373,13 +396,14 @@ sub build_clean # get the list of files under code base, if it's set File::Find::find( - { wanted => sub { + { + wanted => sub { my ($dev, $ino, $mode, $nlink, $uid, $gid); (($dev, $ino, $mode, $nlink, $uid, $gid) = lstat($_)) && -f _ && /^.*\.[ch]\z/s && push(@files, $File::Find::name); - } + } }, $code_base) if $code_base; diff --git a/src/tools/pgindent/pgperltidy b/src/tools/pgindent/pgperltidy index 6098e18428..5e704119eb 100755 --- a/src/tools/pgindent/pgperltidy +++ b/src/tools/pgindent/pgperltidy @@ -7,17 +7,6 @@ set -e # set this to override default perltidy program: PERLTIDY=${PERLTIDY:-perltidy} -# locate all Perl files in the tree -( - # take all .pl and .pm files - find . -type f -a \( -name '*.pl' -o -name '*.pm' \) - # take executable files that file(1) thinks are perl files - find . -type f -perm -100 -exec file {} \; | - egrep -i ':.*perl[0-9]*\>' | - cut -d: -f1 -) | -sort -u | -xargs $PERLTIDY --profile=src/tools/pgindent/perltidyrc +. src/tools/perlcheck/find_perl_files -# perltidyrc specifies --backup-and-modify-in-place, so get rid of .bak files -find . -type f -name '*.bak' | xargs rm +find_perl_files | xargs $PERLTIDY --profile=src/tools/pgindent/perltidyrc diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 8166d86ca1..9fe950b29d 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -1,5 +1,6 @@ ABITVEC ACCESS_ALLOWED_ACE +ACL ACL_SIZE_INFORMATION AFFIX ASN1_INTEGER @@ -20,7 +21,6 @@ Acl AclItem AclMaskHow AclMode -AclObjectKind AclResult AcquireSampleRowsFunc ActiveSnapshotElt @@ -34,6 +34,9 @@ AfterTriggerEventList AfterTriggerShared AfterTriggerSharedData AfterTriggersData +AfterTriggersQueryData +AfterTriggersTableData +AfterTriggersTransData Agg AggClauseCosts AggInfo @@ -55,6 +58,7 @@ AllocChunk AllocPointer AllocSet AllocSetContext +AllocSetFreeList AllocateDesc AllocateDescKind AlterCollationStmt @@ -125,7 +129,6 @@ ArrayMetaState ArrayParseState ArrayRef ArrayRefState -ArrayRemapInfo ArrayType AsyncQueueControl AsyncQueueEntry @@ -135,14 +138,15 @@ AttoptCacheEntry AttoptCacheKey AttrDefInfo AttrDefault +AttrMissing AttrNumber AttributeOpts AuthRequest +AutoPrewarmSharedState AutoVacOpts AutoVacuumShmemStruct AutoVacuumWorkItem AutoVacuumWorkItemType -AutovacWorkItems AuxProcType BF_ctx BF_key @@ -163,6 +167,7 @@ BTArrayKeyInfo BTBuildState BTCycleId BTIndexStat +BTLeader BTMetaPageData BTOneVacInfo BTPS_State @@ -176,6 +181,7 @@ BTScanOpaqueData BTScanPos BTScanPosData BTScanPosItem +BTShared BTSortArrayContext BTSpool BTStack @@ -193,8 +199,10 @@ BackgroundWorker BackgroundWorkerArray BackgroundWorkerHandle BackgroundWorkerSlot +Barrier BaseBackupCmd BeginDirectModify_function +BeginForeignInsert_function BeginForeignModify_function BeginForeignScan_function BeginSampleScan_function @@ -218,6 +226,7 @@ BlobInfo Block BlockId BlockIdData +BlockInfoRecord BlockNumber BlockSampler BlockSamplerData @@ -272,6 +281,8 @@ BuiltinScript BulkInsertState CACHESIGN CAC_state +CCFastEqualFN +CCHashFN CEOUC_WAIT_MODE CFuncHashTabEntry CHAR @@ -288,6 +299,8 @@ CV C_block CachedPlan CachedPlanSource +CallContext +CallStmt CancelRequestPacket CaseExpr CaseTestExpr @@ -312,10 +325,12 @@ CkptTsStatus ClientAuthentication_hook_type ClientData ClonePtrType +ClonedConstraint ClosePortalStmt ClosePtrType Clump ClusterInfo +ClusterOption ClusterStmt CmdType CoalesceExpr @@ -340,6 +355,7 @@ ColumnCompareData ColumnDef ColumnIOData ColumnRef +ColumnsHashData CombinationGenerator ComboCidEntry ComboCidEntryData @@ -355,6 +371,7 @@ CommitTimestampShared CommonEntry CommonTableExpr CompareScalarsContext +CompiledExprState CompositeIOData CompositeTypeStmt CompoundAffixFlag @@ -537,6 +554,7 @@ EndBlobPtrType EndBlobsPtrType EndDataPtrType EndDirectModify_function +EndForeignInsert_function EndForeignModify_function EndForeignScan_function EndSampleScan_function @@ -563,6 +581,7 @@ ExceptionLabelMap ExceptionMap ExclusiveBackupState ExecAuxRowMark +ExecEvalSubroutine ExecForeignDelete_function ExecForeignInsert_function ExecForeignUpdate_function @@ -584,6 +603,8 @@ ExecutorStart_hook_type ExpandedArrayHeader ExpandedObjectHeader ExpandedObjectMethods +ExpandedRecordFieldInfo +ExpandedRecordHeader ExplainDirectModify_function ExplainForeignModify_function ExplainForeignScan_function @@ -598,6 +619,7 @@ ExprContextCallbackFunction ExprContext_CB ExprDoneCond ExprEvalOp +ExprEvalOpLookup ExprEvalStep ExprState ExprStateEvalFunc @@ -628,9 +650,9 @@ FieldStore File FileFdwExecutionState FileFdwPlanState -FileName FileNameMap FindSplitData +FixedParallelExecutorState FixedParallelState FixedParamState FlagMode @@ -820,7 +842,13 @@ GatherMergeState GatherPath GatherState Gene +GeneratePruningStepsContext +GenerationBlock +GenerationChunk +GenerationContext +GenerationPointer GenericCosts +GenericIndexOpts GenericXLogState GeqoPrivateData GetForeignJoinPaths_function @@ -865,12 +893,12 @@ GistNSN GistSplitUnion GistSplitVector GlobalTransaction -GrantObjectType GrantRoleStmt GrantStmt GrantTargetType Group GroupPath +GroupPathExtraData GroupState GroupVarInfo GroupingFunc @@ -924,6 +952,7 @@ HashBuildState HashCompareFunc HashCopyFunc HashIndexStat +HashInstrumentation HashJoin HashJoinState HashJoinTable @@ -937,6 +966,7 @@ HashPageStat HashPath HashScanOpaque HashScanOpaqueData +HashScanPosData HashScanPosItem HashSkewBucket HashState @@ -957,6 +987,7 @@ HistControl HotStandbyState I32 ICU_Convert_Func +ID INFIX INT128 INTERFACE_INFO @@ -979,6 +1010,7 @@ Index IndexAMProperty IndexAmRoutine IndexArrayKeyInfo +IndexAttachInfo IndexAttrBitmapKind IndexBuildCallback IndexBuildResult @@ -1009,6 +1041,7 @@ InferenceElem InfoItem InhInfo InheritableSocket +InheritanceKind InitSampleScan_function InitializeDSMForeignScan_function InitializeWorkerForeignScan_function @@ -1017,13 +1050,13 @@ InsertStmt Instrumentation Int128AggState Int8TransTypeData +IntRBTreeNode InternalDefaultACL InternalGrant Interval IntoClause InvalidationChunk InvalidationListHeader -InvertedWalkNextStep IpcMemoryId IpcMemoryKey IpcSemaphoreId @@ -1045,6 +1078,12 @@ JOBOBJECTINFOCLASS JOBOBJECT_BASIC_LIMIT_INFORMATION JOBOBJECT_BASIC_UI_RESTRICTIONS JOBOBJECT_SECURITY_LIMIT_INFORMATION +JitContext +JitProviderCallbacks +JitProviderCompileExprCB +JitProviderInit +JitProviderReleaseContextCB +JitProviderResetAfterErrorCB Join JoinCostWorkspace JoinExpr @@ -1085,6 +1124,24 @@ LDAPMessage LDAPURLDesc LDAP_TIMEVAL LINE +LLVMAttributeRef +LLVMBasicBlockRef +LLVMBuilderRef +LLVMIntPredicate +LLVMJitContext +LLVMJitHandle +LLVMMemoryBufferRef +LLVMModuleRef +LLVMOrcJITStackRef +LLVMOrcModuleHandle +LLVMOrcTargetAddress +LLVMPassManagerBuilderRef +LLVMPassManagerRef +LLVMSharedModuleRef +LLVMTargetMachineRef +LLVMTargetRef +LLVMTypeRef +LLVMValueRef LOCALLOCK LOCALLOCKOWNER LOCALLOCKTAG @@ -1095,6 +1152,7 @@ LOCKMETHODID LOCKMODE LOCKTAG LONG +LONG_PTR LOOP LPBYTE LPCTSTR @@ -1108,6 +1166,7 @@ LPTSTR LPVOID LPWSTR LSEG +LUID LVRelStats LWLock LWLockHandle @@ -1155,6 +1214,7 @@ LockRowsState LockStmt LockTagType LockTupleMode +LockViewRecurse_context LockWaitPolicy LockingClause LogOpts @@ -1166,6 +1226,7 @@ LogicalDecodeFilterByOriginCB LogicalDecodeMessageCB LogicalDecodeShutdownCB LogicalDecodeStartupCB +LogicalDecodeTruncateCB LogicalDecodingContext LogicalErrorCallbackState LogicalOutputPluginInit @@ -1205,6 +1266,7 @@ MemoryContextCallbackFunction MemoryContextCounters MemoryContextData MemoryContextMethods +MemoryStatsPrintFunc MergeAppend MergeAppendPath MergeAppendState @@ -1213,6 +1275,7 @@ MergeJoinClause MergeJoinState MergePath MergeScanSelCache +MetaCommand MinMaxAggInfo MinMaxAggPath MinMaxExpr @@ -1220,6 +1283,7 @@ MinMaxOp MinimalTuple MinimalTupleData MinmaxOpaque +MissingPtr ModifyTable ModifyTablePath ModifyTableState @@ -1311,6 +1375,7 @@ OnCommitItem OnConflictAction OnConflictClause OnConflictExpr +OnConflictSetState OpBtreeInterpretation OpClassCacheEnt OpExpr @@ -1410,6 +1475,7 @@ PLpgSQL_label_type PLpgSQL_nsitem PLpgSQL_nsitem_type PLpgSQL_plugin +PLpgSQL_promise_type PLpgSQL_raise_option PLpgSQL_raise_option_type PLpgSQL_rec @@ -1420,8 +1486,10 @@ PLpgSQL_stmt PLpgSQL_stmt_assert PLpgSQL_stmt_assign PLpgSQL_stmt_block +PLpgSQL_stmt_call PLpgSQL_stmt_case PLpgSQL_stmt_close +PLpgSQL_stmt_commit PLpgSQL_stmt_dynexecute PLpgSQL_stmt_dynfors PLpgSQL_stmt_execsql @@ -1441,6 +1509,8 @@ PLpgSQL_stmt_raise PLpgSQL_stmt_return PLpgSQL_stmt_return_next PLpgSQL_stmt_return_query +PLpgSQL_stmt_rollback +PLpgSQL_stmt_set PLpgSQL_stmt_type PLpgSQL_stmt_while PLpgSQL_trigtype @@ -1450,13 +1520,18 @@ PLpgSQL_var PLpgSQL_variable PLwdatum PLword +PLyArrayToOb PLyCursorObject PLyDatumToOb PLyDatumToObFunc PLyExceptionEntry PLyExecutionContext +PLyObToArray PLyObToDatum PLyObToDatumFunc +PLyObToDomain +PLyObToScalar +PLyObToTransform PLyObToTuple PLyObject_AsString_t PLyPlanObject @@ -1466,13 +1541,13 @@ PLyProcedureKey PLyResultObject PLySRFState PLySavedArgs +PLyScalarToOb PLySubtransactionData PLySubtransactionObject +PLyTransformToOb PLyTupleToOb -PLyTypeInfo -PLyTypeInput -PLyTypeOutput PLyUnicode_FromStringAndSize_t +PLy_elog_impl_t PMINIDUMP_CALLBACK_INFORMATION PMINIDUMP_EXCEPTION_INFORMATION PMINIDUMP_USER_STREAM_INFORMATION @@ -1506,7 +1581,9 @@ PSQL_ECHO_HIDDEN PSQL_ERROR_ROLLBACK PTEntryArray PTIterationArray +PTOKEN_PRIVILEGES PTOKEN_USER +PULONG PUTENVPROC PVOID PX_Alias @@ -1522,16 +1599,24 @@ PageHeaderData PageXLogRecPtr PagetableEntry Pairs +ParallelAppendState ParallelBitmapHeapState ParallelCompletionPtr ParallelContext ParallelExecutorInfo +ParallelHashGrowth +ParallelHashJoinBatch +ParallelHashJoinBatchAccessor +ParallelHashJoinState ParallelHeapScanDesc +ParallelHeapScanDescData ParallelIndexScanDesc ParallelSlot ParallelState +ParallelWorkerContext ParallelWorkerInfo Param +ParamCompileHook ParamExecData ParamExternData ParamFetchHook @@ -1551,6 +1636,8 @@ ParsedText ParsedWord ParserSetupHook ParserState +PartClauseInfo +PartClauseMatchStatus PartitionBoundInfo PartitionBoundInfoData PartitionBoundSpec @@ -1560,13 +1647,24 @@ PartitionDescData PartitionDispatch PartitionDispatchData PartitionElem +PartitionHashBound PartitionKey PartitionListValue +PartitionPruneCombineOp +PartitionPruneContext +PartitionPruneInfo +PartitionPruneState +PartitionPruneStep +PartitionPruneStepCombine +PartitionPruneStepOp +PartitionPruningData PartitionRangeBound PartitionRangeDatum PartitionRangeDatumKind +PartitionScheme PartitionSpec -PartitionedChildRelInfo +PartitionTupleRouting +PartitionwiseAggregateType PasswordType Path PathClauseUsage @@ -1664,6 +1762,7 @@ Pool PopulateArrayContext PopulateArrayState PopulateRecordCache +PopulateRecordsetCache PopulateRecordsetState Port Portal @@ -1711,6 +1810,7 @@ ProjectionPath ProtocolVersion PrsStorage PruneState +PruneStepResult PsqlScanCallbacks PsqlScanQuoteType PsqlScanResult @@ -1775,7 +1875,6 @@ RangeBox RangeFunction RangeIOData RangeQueryClause -RangeRemapInfo RangeSubselect RangeTableFunc RangeTableFuncCol @@ -1788,6 +1887,7 @@ RangeVar RangeVarGetRelidCallback RawColumnDefault RawStmt +ReInitializeDSMForeignScan_function ReScanForeignScan_function ReadBufPtrType ReadBufferMode @@ -1799,8 +1899,7 @@ RecheckForeignScan_function RecordCacheEntry RecordCompareData RecordIOData -RecordRemapInfo -RecordTypmodMap +RecoveryLockListsEntry RecoveryTargetAction RecoveryTargetType RectBox @@ -1845,6 +1944,7 @@ RenameStmt ReopenPtrType ReorderBuffer ReorderBufferApplyChangeCB +ReorderBufferApplyTruncateCB ReorderBufferBeginCB ReorderBufferChange ReorderBufferCommitCB @@ -1860,6 +1960,7 @@ ReorderBufferTupleCidEnt ReorderBufferTupleCidKey ReorderTuple RepOriginId +ReparameterizeForeignPathByChild_function ReplaceVarsFromTargetList_context ReplaceVarsNoMatchOption ReplicaIdentityStmt @@ -1931,6 +2032,7 @@ SID_AND_ATTRIBUTES SID_IDENTIFIER_AUTHORITY SID_NAME_USE SISeg +SIZE_T SMgrRelation SMgrRelationData SOCKADDR @@ -1985,6 +2087,7 @@ SeqScanState SeqTable SeqTableData SerCommitSeqNo +SerializedReindexState SerializedSnapshotData Session SessionBackupState @@ -2007,6 +2110,8 @@ SharedBitmapState SharedDependencyObjectType SharedDependencyType SharedExecutorInstrumentation +SharedFileSet +SharedHashInfo SharedInvalCatalogMsg SharedInvalCatcacheMsg SharedInvalRelcacheMsg @@ -2014,6 +2119,16 @@ SharedInvalRelmapMsg SharedInvalSmgrMsg SharedInvalSnapshotMsg SharedInvalidationMessage +SharedRecordTableEntry +SharedRecordTableKey +SharedRecordTypmodRegistry +SharedSortInfo +SharedTuplestore +SharedTuplestoreAccessor +SharedTuplestoreChunk +SharedTuplestoreParticipant +SharedTypmodTableEntry +Sharedsort ShellTypeInfo ShippableCacheEntry ShippableCacheKey @@ -2058,6 +2173,7 @@ Sort SortBy SortByDir SortByNulls +SortCoordinate SortGroupClause SortItem SortPath @@ -2112,6 +2228,7 @@ StdAnalyzeData StdRdOptions Step StopList +StopWorkersData StrategyNumber StreamCtl StringInfo @@ -2149,6 +2266,7 @@ TBlockState TIDBitmap TOKEN_DEFAULT_DACL TOKEN_INFORMATION_CLASS +TOKEN_PRIVILEGES TOKEN_USER TParser TParserCharTest @@ -2201,6 +2319,7 @@ TableSpaceOpts TablespaceList TablespaceListCell TapeBlockTrailer +TapeShare TarMethodData TarMethodFile TargetEntry @@ -2256,6 +2375,7 @@ TransformJsonStringValuesState TransitionCaptureState TrgmArc TrgmArcInfo +TrgmBound TrgmColor TrgmColorInfo TrgmNFA @@ -2287,9 +2407,10 @@ TupleHashEntryData TupleHashIterator TupleHashTable TupleQueueReader -TupleRemapClass -TupleRemapInfo TupleTableSlot +TuplesortInstrumentation +TuplesortMethod +TuplesortSpaceType Tuplesortstate Tuplestorestate TwoPhaseCallback @@ -2313,13 +2434,13 @@ TypeCat TypeFuncClass TypeInfo TypeName +U U32 U8 UChar UCharIterator UCollator UConverter -UEnumeration UErrorCode UINT ULARGE_INTEGER @@ -2343,6 +2464,7 @@ UserOpts VacAttrStats VacAttrStatsP VacuumParams +VacuumRelation VacuumStmt Value ValuesScan @@ -2363,6 +2485,7 @@ VariableShowStmt VariableSpace VariableStatData VariableSubstituteHook +VersionedQuery Vfd ViewCheckOption ViewOptions @@ -2487,6 +2610,8 @@ XmlTableBuilderData YYLTYPE YYSTYPE YY_BUFFER_STATE +ZipfCache +ZipfCell _SPI_connection _SPI_plan __AssignProcessToJobObject @@ -2498,6 +2623,7 @@ __RegisterWaitForSingleObject __SetInformationJobObject _resultmap _stringlist +abs acquireLocksOnSubLinks_context adjust_appendrel_attrs_context allocfunc @@ -2537,9 +2663,11 @@ bgworker_main_type binaryheap binaryheap_comparator bitmapword +bits16 bits32 bits8 -bool +bloom_filter +boolean brin_column_state bytea cached_re_str @@ -2551,7 +2679,6 @@ check_network_data check_object_relabel_type check_password_hook_type check_ungrouped_columns_context -chkpass chr clock_t cmpEntriesArg @@ -2586,6 +2713,7 @@ deparse_expr_cxt deparse_namespace destructor dev_t +digit directory_fctx disassembledLeaf dlist_head @@ -2599,9 +2727,19 @@ dsa_area_pool dsa_area_span dsa_handle dsa_pointer +dsa_pointer_atomic dsa_segment_header dsa_segment_index dsa_segment_map +dshash_compare_function +dshash_hash +dshash_hash_function +dshash_parameters +dshash_partition +dshash_table +dshash_table_control +dshash_table_handle +dshash_table_item dsm_control_header dsm_control_item dsm_handle @@ -2702,6 +2840,7 @@ hstoreUniquePairs_t hstoreUpgrade_t hyperLogLogState ifState +ilist import_error_callback_arg indexed_tlist inet @@ -2727,6 +2866,7 @@ intptr_t intvKEY itemIdSort itemIdSortData +iterator jmp_buf join_search_hook_type json_aelem_action @@ -2740,6 +2880,7 @@ lclTocEntry leafSegmentInfo line_t lineno_t +list_qsort_comparator locale_t locate_agg_of_level_context locate_var_of_level_context @@ -2792,6 +2933,7 @@ oidvector on_dsm_detach_callback on_exit_nicely_callback ossl_EVP_cipher_func +other output_type pagetable_hash pagetable_iterator @@ -2871,6 +3013,7 @@ pltcl_proc_desc pltcl_proc_key pltcl_proc_ptr pltcl_query_desc +pointer pos_trgm post_parse_analyze_hook_type pqbool @@ -2883,6 +3026,7 @@ printTextFormat printTextLineFormat printTextLineWrap printTextRule +printfunc priv_map process_file_callback_t process_sublinks_context @@ -2913,6 +3057,7 @@ rb_combiner rb_comparator rb_freefunc reduce_outer_joins_state +reference regex_arc_t regex_t regexp @@ -2992,6 +3137,7 @@ spgxlogVacuumLeaf spgxlogVacuumRedirect spgxlogVacuumRoot split_pathtarget_context +split_pathtarget_item sql_error_callback_arg sqlparseInfo sqlparseState @@ -3005,6 +3151,7 @@ stmtCacheEntry storeInfo storeRes_func stream_stop_callback +string substitute_actual_parameters_context substitute_actual_srf_parameters_context substitute_multiple_relids_context @@ -3031,11 +3178,14 @@ trgm_mb_char trivalue tsKEY ts_db_fctx +ts_parserstate +ts_tokenizer ts_tokentype tsearch_readline_state tuplehash_hash tuplehash_iterator txid +type tzEntry u1byte u4byte @@ -3049,6 +3199,7 @@ uint16_t uint32 uint32_t uint64 +uint64_t uint8 uint8_t uintptr_t @@ -3080,6 +3231,7 @@ walrcv_disconnect_fn walrcv_endstreaming_fn walrcv_exec_fn walrcv_get_conninfo_fn +walrcv_get_senderinfo_fn walrcv_identify_system_fn walrcv_readtimelinehistoryfile_fn walrcv_receive_fn @@ -3092,6 +3244,7 @@ win32_pthread wint_t worker_state worktable +wrap xl_brin_createidx xl_brin_desummarize xl_brin_insert @@ -3138,6 +3291,7 @@ xl_heap_lock_updated xl_heap_multi_insert xl_heap_new_cid xl_heap_rewrite_mapping +xl_heap_truncate xl_heap_update xl_heap_visible xl_invalid_page @@ -3168,6 +3322,7 @@ xl_xact_invals xl_xact_origin xl_xact_parsed_abort xl_xact_parsed_commit +xl_xact_parsed_prepare xl_xact_relfilenodes xl_xact_subxacts xl_xact_twophase diff --git a/src/tools/pgtest b/src/tools/pgtest index 79954940d2..70f6a62ad0 100755 --- a/src/tools/pgtest +++ b/src/tools/pgtest @@ -19,13 +19,28 @@ mkdir /tmp/$$ TMP="/tmp/$$" if [ "X$1" != "X-n" ] -then PGCLEAN=clean -else shift +then CLEAN="Y" +else CLEAN="" + shift fi +rm -f tmp_install/log/install.log + # Run "make check" and store return code in $TMP/ret. # Display output but also capture it in $TMP/0. -($MAKE "$@" $PGCLEAN check 2>&1; echo "$?" > $TMP/ret) | tee $TMP/0 +( + if [ "$CLEAN" ] + then $MAKE "$@" clean 2>&1 + echo "$?" > $TMP/ret + fi + if [ $(cat $TMP/ret) -eq 0 ] + then $MAKE "$@" 2>&1 && $MAKE "$@" check 2>&1 + echo "$?" > $TMP/ret + fi +) | tee $TMP/0 + +# Grab possible warnings from install.log +[ -e tmp_install/log/install.log ] && cat tmp_install/log/install.log >> $TMP/0 # If success, display warnings if [ $(cat $TMP/ret) -eq 0 ] diff --git a/src/tools/testint128.c b/src/tools/testint128.c index afdfd15cb0..559b1ea264 100644 --- a/src/tools/testint128.c +++ b/src/tools/testint128.c @@ -6,7 +6,7 @@ * This is a standalone test program that compares the behavior of an * implementation in int128.h to an (assumed correct) int128 native type. * - * Copyright (c) 2017, PostgreSQL Global Development Group + * Copyright (c) 2017-2018, PostgreSQL Global Development Group * * * IDENTIFICATION diff --git a/src/tools/valgrind.supp b/src/tools/valgrind.supp index af03051260..ec47a228ae 100644 --- a/src/tools/valgrind.supp +++ b/src/tools/valgrind.supp @@ -52,9 +52,9 @@ { padding_XLogRecData_write Memcheck:Param - write(buf) + pwrite64(buf) - ... + ... fun:XLogWrite } diff --git a/src/tools/version_stamp.pl b/src/tools/version_stamp.pl index 90ccf9cbaf..41f5d764b7 100755 --- a/src/tools/version_stamp.pl +++ b/src/tools/version_stamp.pl @@ -3,7 +3,7 @@ ################################################################# # version_stamp.pl -- update version stamps throughout the source tree # -# Copyright (c) 2008-2017, PostgreSQL Global Development Group +# Copyright (c) 2008-2018, PostgreSQL Global Development Group # # src/tools/version_stamp.pl ################################################################# @@ -24,7 +24,7 @@ # Major version is hard-wired into the script. We update it when we branch # a new development version. -my $majorversion = 11; +my $majorversion = 12; # Validate argument and compute derived variables my $minor = shift; @@ -83,8 +83,8 @@ open(my $fh, '<', "configure.in") || die "could not read configure.in: $!\n"; while (<$fh>) { - if ( -m/^m4_if\(m4_defn\(\[m4_PACKAGE_VERSION\]\), \[(.*)\], \[\], \[m4_fatal/) + if (m/^m4_if\(m4_defn\(\[m4_PACKAGE_VERSION\]\), \[(.*)\], \[\], \[m4_fatal/ + ) { $aconfver = $1; last; @@ -99,29 +99,29 @@ my $fixedfiles = ""; sed_file("configure.in", -"-e 's/AC_INIT(\\[PostgreSQL\\], \\[[0-9a-z.]*\\]/AC_INIT([PostgreSQL], [$fullversion]/'" + "-e 's/AC_INIT(\\[PostgreSQL\\], \\[[0-9a-z.]*\\]/AC_INIT([PostgreSQL], [$fullversion]/'" ); sed_file("doc/bug.template", -"-e 's/PostgreSQL version (example: PostgreSQL .*) *: PostgreSQL .*/PostgreSQL version (example: PostgreSQL $fullversion): PostgreSQL $fullversion/'" + "-e 's/PostgreSQL version (example: PostgreSQL .*) *: PostgreSQL .*/PostgreSQL version (example: PostgreSQL $fullversion): PostgreSQL $fullversion/'" ); sed_file("src/include/pg_config.h.win32", -"-e 's/#define PACKAGE_STRING \"PostgreSQL .*\"/#define PACKAGE_STRING \"PostgreSQL $fullversion\"/' " + "-e 's/#define PACKAGE_STRING \"PostgreSQL .*\"/#define PACKAGE_STRING \"PostgreSQL $fullversion\"/' " . "-e 's/#define PACKAGE_VERSION \".*\"/#define PACKAGE_VERSION \"$fullversion\"/' " . "-e 's/#define PG_VERSION \".*\"/#define PG_VERSION \"$fullversion\"/' " . "-e 's/#define PG_VERSION_NUM .*/#define PG_VERSION_NUM $padnumericversion/'" ); sed_file("src/interfaces/libpq/libpq.rc.in", -"-e 's/FILEVERSION [0-9]*,[0-9]*,[0-9]*,0/FILEVERSION $majorversion,0,$numericminor,0/' " + "-e 's/FILEVERSION [0-9]*,[0-9]*,[0-9]*,0/FILEVERSION $majorversion,0,$numericminor,0/' " . "-e 's/PRODUCTVERSION [0-9]*,[0-9]*,[0-9]*,0/PRODUCTVERSION $majorversion,0,$numericminor,0/' " . "-e 's/VALUE \"FileVersion\", \"[0-9.]*/VALUE \"FileVersion\", \"$numericversion/' " . "-e 's/VALUE \"ProductVersion\", \"[0-9.]*/VALUE \"ProductVersion\", \"$numericversion/'" ); sed_file("src/port/win32ver.rc", -"-e 's/FILEVERSION [0-9]*,[0-9]*,[0-9]*,0/FILEVERSION $majorversion,0,$numericminor,0/' " + "-e 's/FILEVERSION [0-9]*,[0-9]*,[0-9]*,0/FILEVERSION $majorversion,0,$numericminor,0/' " . "-e 's/PRODUCTVERSION [0-9]*,[0-9]*,[0-9]*,0/PRODUCTVERSION $majorversion,0,$numericminor,0/'" ); @@ -141,4 +141,5 @@ sub sed_file or die "mv failed: $?"; $fixedfiles .= "\t$filename\n"; + return; } diff --git a/src/tools/win32tzlist.pl b/src/tools/win32tzlist.pl index 0bdcc3610f..0fb561b44b 100755 --- a/src/tools/win32tzlist.pl +++ b/src/tools/win32tzlist.pl @@ -2,7 +2,7 @@ # # win32tzlist.pl -- compare Windows timezone information # -# Copyright (c) 2008-2017, PostgreSQL Global Development Group +# Copyright (c) 2008-2018, PostgreSQL Global Development Group # # src/tools/win32tzlist.pl ################################################################# @@ -47,9 +47,11 @@ die "Incomplete timezone data for $keyname!\n" unless ($vals{Std} && $vals{Dlt} && $vals{Display}); push @system_zones, - { 'std' => $vals{Std}->[2], + { + 'std' => $vals{Std}->[2], 'dlt' => $vals{Dlt}->[2], - 'display' => clean_displayname($vals{Display}->[2]), }; + 'display' => clean_displayname($vals{Display}->[2]), + }; } $basekey->Close(); @@ -75,10 +77,12 @@ m/{\s+"([^"]+)",\s+"([^"]+)",\s+"([^"]+)",?\s+},\s+\/\*(.+?)\*\//gs) { push @file_zones, - { 'std' => $1, + { + 'std' => $1, 'dlt' => $2, 'match' => $3, - 'display' => clean_displayname($4), }; + 'display' => clean_displayname($4), + }; } # @@ -102,7 +106,7 @@ if ($sys->{display} ne $file->{display}) { print -"Timezone $sys->{std} changed displayname ('$sys->{display}' from '$file->{display}')!\n"; + "Timezone $sys->{std} changed displayname ('$sys->{display}' from '$file->{display}')!\n"; } last; } @@ -119,7 +123,7 @@ for my $z (@add) { print -"\t{\n\t\t\"$z->{std}\", \"$z->{dlt}\",\n\t\t\"FIXME\"\n\t},\t\t\t\t\t\t\t/* $z->{display} */\n"; + "\t{\n\t\t\"$z->{std}\", \"$z->{dlt}\",\n\t\t\"FIXME\"\n\t},\t\t\t\t\t\t\t/* $z->{display} */\n"; } } diff --git a/src/tutorial/complex.c b/src/tutorial/complex.c index 1b5ebc2ff0..6798a9e6ba 100644 --- a/src/tutorial/complex.c +++ b/src/tutorial/complex.c @@ -38,8 +38,8 @@ complex_in(PG_FUNCTION_ARGS) if (sscanf(str, " ( %lf , %lf )", &x, &y) != 2) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input syntax for complex: \"%s\"", - str))); + errmsg("invalid input syntax for type %s: \"%s\"", + "complex", str))); result = (Complex *) palloc(sizeof(Complex)); result->x = x; diff --git a/src/tutorial/complex.source b/src/tutorial/complex.source index 035c7a7d13..2fc3e501ce 100644 --- a/src/tutorial/complex.source +++ b/src/tutorial/complex.source @@ -5,7 +5,7 @@ -- use this new type. -- -- --- Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +-- Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group -- Portions Copyright (c) 1994, Regents of the University of California -- -- src/tutorial/complex.source @@ -174,7 +174,7 @@ CREATE OPERATOR < ( CREATE OPERATOR <= ( leftarg = complex, rightarg = complex, procedure = complex_abs_le, commutator = >= , negator = > , - restrict = scalarltsel, join = scalarltjoinsel + restrict = scalarlesel, join = scalarlejoinsel ); CREATE OPERATOR = ( leftarg = complex, rightarg = complex, procedure = complex_abs_eq, @@ -186,7 +186,7 @@ CREATE OPERATOR = ( CREATE OPERATOR >= ( leftarg = complex, rightarg = complex, procedure = complex_abs_ge, commutator = <= , negator = < , - restrict = scalargtsel, join = scalargtjoinsel + restrict = scalargesel, join = scalargejoinsel ); CREATE OPERATOR > ( leftarg = complex, rightarg = complex, procedure = complex_abs_gt, diff --git a/src/tutorial/syscat.source b/src/tutorial/syscat.source index 2f97642a39..7b3c4a5637 100644 --- a/src/tutorial/syscat.source +++ b/src/tutorial/syscat.source @@ -4,7 +4,7 @@ -- sample queries to the system catalogs -- -- --- Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group +-- Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group -- Portions Copyright (c) 1994, Regents of the University of California -- -- src/tutorial/syscat.source From 8005187a4459bcf98e909d7f0765d051bf82ee37 Mon Sep 17 00:00:00 2001 From: Samuel Marks Date: Sun, 11 Nov 2018 23:52:09 +1100 Subject: [PATCH 2/2] Restore port/dynloader; use CMakeLists.txt from stalkerg/postgres_cmake/pull/44; resolve `HAVE_INT128` macro redefinition warning --- CMakeLists.txt | 16 +- contrib/CMakeLists.txt | 1 - contrib/chkpass/CMakeLists.txt | 22 -- contrib/citext/CMakeLists.txt | 1 + contrib/hstore/CMakeLists.txt | 1 + contrib/pg_stat_statements/CMakeLists.txt | 2 +- contrib/pgstattuple/CMakeLists.txt | 2 +- contrib/spi/CMakeLists.txt | 1 - src/backend/CMakeLists.txt | 301 ++++++++++++---------- src/backend/port/dynloader/aix.c | 7 + src/backend/port/dynloader/aix.h | 39 +++ src/backend/port/dynloader/cygwin.c | 3 + src/backend/port/dynloader/cygwin.h | 36 +++ src/backend/port/dynloader/darwin.c | 138 ++++++++++ src/backend/port/dynloader/darwin.h | 8 + src/backend/port/dynloader/freebsd.c | 106 ++++++++ src/backend/port/dynloader/freebsd.h | 58 +++++ src/backend/port/dynloader/hpux.c | 68 +++++ src/backend/port/dynloader/hpux.h | 25 ++ src/backend/port/dynloader/linux.c | 133 ++++++++++ src/backend/port/dynloader/linux.h | 44 ++++ src/backend/port/dynloader/netbsd.c | 106 ++++++++ src/backend/port/dynloader/netbsd.h | 59 +++++ src/backend/port/dynloader/openbsd.c | 106 ++++++++ src/backend/port/dynloader/openbsd.h | 58 +++++ src/backend/port/dynloader/solaris.c | 7 + src/backend/port/dynloader/solaris.h | 38 +++ src/backend/port/dynloader/win32.c | 85 ++++++ src/backend/port/dynloader/win32.h | 19 ++ src/backend/snowball/CMakeLists.txt | 60 +++-- src/bin/psql/CMakeLists.txt | 22 +- src/include/c.h | 3 + src/timezone/CMakeLists.txt | 103 +++----- 33 files changed, 1420 insertions(+), 258 deletions(-) delete mode 100644 contrib/chkpass/CMakeLists.txt create mode 100644 src/backend/port/dynloader/aix.c create mode 100644 src/backend/port/dynloader/aix.h create mode 100644 src/backend/port/dynloader/cygwin.c create mode 100644 src/backend/port/dynloader/cygwin.h create mode 100644 src/backend/port/dynloader/darwin.c create mode 100644 src/backend/port/dynloader/darwin.h create mode 100644 src/backend/port/dynloader/freebsd.c create mode 100644 src/backend/port/dynloader/freebsd.h create mode 100644 src/backend/port/dynloader/hpux.c create mode 100644 src/backend/port/dynloader/hpux.h create mode 100644 src/backend/port/dynloader/linux.c create mode 100644 src/backend/port/dynloader/linux.h create mode 100644 src/backend/port/dynloader/netbsd.c create mode 100644 src/backend/port/dynloader/netbsd.h create mode 100644 src/backend/port/dynloader/openbsd.c create mode 100644 src/backend/port/dynloader/openbsd.h create mode 100644 src/backend/port/dynloader/solaris.c create mode 100644 src/backend/port/dynloader/solaris.h create mode 100644 src/backend/port/dynloader/win32.c create mode 100644 src/backend/port/dynloader/win32.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 120c89414f..e42fdeeab5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,7 +25,7 @@ set(POSTGRES_VERSION ${POSTGRES_MAJOR_VERSION}.${POSTGRES_MINOR_VERSION}.${POSTGRES_PATCH_VERSION}) set(PG_VERSION "11devel") -set(PG_VERSION_NUM 100000) +set(PG_VERSION_NUM 110000) set(PACKAGE_BUGREPORT "pgsql-bugs@postgresql.org") # Offer the user the choice of overriding the installation directories @@ -286,6 +286,15 @@ if(USE_BONJOUR) endif() endif() +option(WITH_SYSTEM_TZDATA "Define to use system tzdata information." OFF) +if (WITH_SYSTEM_TZDATA) + if (EXISTS ${WITH_SYSTEM_TZDATA}) + add_compile_options(-DSYSTEMTZDIR="${WITH_SYSTEM_TZDATA}") + else() + message(FATAL_ERROR "tzdata directory \"${WITH_SYSTEM_TZDATA}\" does not exists") + endif() +endif() + option(STRONG_RANDOM "Strong random number source" ON) option(STRONG_RANDOM_SOURCE "which random number source to use - openssl, win32, dev" OFF) if(STRONG_RANDOM) @@ -691,6 +700,11 @@ if(OPENSSL_FOUND) set(CMAKE_REQUIRED_INCLUDES "${CMAKE_REQUIRED_INCLUDES};${OPENSSL_INCLUDE_DIR}") set(CMAKE_REQUIRED_LIBRARIES ${OPENSSL_LIBRARIES}) check_function_exists(SSL_get_current_compression HAVE_SSL_GET_CURRENT_COMPRESSION) + check_function_exists(BIO_get_data HAVE_BIO_GET_DATA) + check_function_exists(BIO_meth_new HAVE_BIO_METH_NEW) + message(STATUS "OpenSSL version: ${OPENSSL_VERSION} libs: ${OPENSSL_LIBRARIES} include: ${OPENSSL_INCLUDE_DIR}") +else() + message(STATUS "OpenSSL: off") endif(OPENSSL_FOUND) if(USE_SYSTEMD) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 71a2d77a79..d8e91d42ba 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -26,7 +26,6 @@ add_subdirectory(auto_explain) add_subdirectory(btree_gin) add_subdirectory(btree_gist) add_subdirectory(bloom) -add_subdirectory(chkpass) add_subdirectory(citext) add_subdirectory(cube) add_subdirectory(dblink) diff --git a/contrib/chkpass/CMakeLists.txt b/contrib/chkpass/CMakeLists.txt deleted file mode 100644 index 1265b65d2a..0000000000 --- a/contrib/chkpass/CMakeLists.txt +++ /dev/null @@ -1,22 +0,0 @@ -include_directories("${PROJECT_SOURCE_DIR}/src/include") - -set(extension_name chkpass) - -add_library(${extension_name} ${PLUGIN_TYPE} ${extension_name}.c) -target_link_libraries(${extension_name} ${contrib_libs}) -if(CRYPT_LIB) - target_link_libraries(${extension_name} ${CRYPT_LIB}) -endif() -set_target_properties(${extension_name} PROPERTIES PREFIX "") -add_dependencies(${extension_name} postgres) -if (MSVC) - gen_def(${extension_name}) -endif() -if(NOT PGXS) - CMAKE_SET_TARGET_FOLDER(${extension_name} contrib) -endif() -install(TARGETS ${extension_name} - RUNTIME DESTINATION ${PGBINDIR} - LIBRARY DESTINATION ${LIBDIR}) -install(FILES ${extension_name}.control ${extension_name}--1.0.sql ${extension_name}--unpackaged--1.0.sql - DESTINATION ${PGSHAREDIR}/extension) diff --git a/contrib/citext/CMakeLists.txt b/contrib/citext/CMakeLists.txt index 4d33eb6a72..bc95c7ecfb 100644 --- a/contrib/citext/CMakeLists.txt +++ b/contrib/citext/CMakeLists.txt @@ -17,6 +17,7 @@ install(TARGETS ${extension_name} LIBRARY DESTINATION ${LIBDIR}) install(FILES ${extension_name}.control + ${extension_name}--1.4--1.5.sql ${extension_name}--1.4.sql ${extension_name}--1.3--1.4.sql ${extension_name}--1.2--1.3.sql diff --git a/contrib/hstore/CMakeLists.txt b/contrib/hstore/CMakeLists.txt index b75ff4d3ad..a9fcb6005b 100644 --- a/contrib/hstore/CMakeLists.txt +++ b/contrib/hstore/CMakeLists.txt @@ -26,6 +26,7 @@ install(TARGETS ${extension_name} LIBRARY DESTINATION ${LIBDIR}) install(FILES ${extension_name}.control + ${extension_name}--1.4--1.5.sql ${extension_name}--1.4.sql ${extension_name}--1.0--1.1.sql ${extension_name}--1.1--1.2.sql diff --git a/contrib/pg_stat_statements/CMakeLists.txt b/contrib/pg_stat_statements/CMakeLists.txt index 81df5bb3a4..047f0e0c22 100644 --- a/contrib/pg_stat_statements/CMakeLists.txt +++ b/contrib/pg_stat_statements/CMakeLists.txt @@ -17,8 +17,8 @@ install(TARGETS ${extension_name} LIBRARY DESTINATION ${LIBDIR}) install(FILES ${extension_name}.control - ${extension_name}--1.4.sql ${extension_name}--1.4--1.5.sql + ${extension_name}--1.4.sql ${extension_name}--1.3--1.4.sql ${extension_name}--1.2--1.3.sql ${extension_name}--1.1--1.2.sql diff --git a/contrib/pgstattuple/CMakeLists.txt b/contrib/pgstattuple/CMakeLists.txt index 217f6f8243..94fee3e220 100644 --- a/contrib/pgstattuple/CMakeLists.txt +++ b/contrib/pgstattuple/CMakeLists.txt @@ -21,8 +21,8 @@ install(TARGETS ${extension_name} LIBRARY DESTINATION ${LIBDIR}) install(FILES ${extension_name}.control - ${extension_name}--1.4.sql ${extension_name}--1.4--1.5.sql + ${extension_name}--1.4.sql ${extension_name}--1.3--1.4.sql ${extension_name}--1.2--1.3.sql ${extension_name}--1.1--1.2.sql diff --git a/contrib/spi/CMakeLists.txt b/contrib/spi/CMakeLists.txt index 1e95e7c4aa..948df0ea92 100644 --- a/contrib/spi/CMakeLists.txt +++ b/contrib/spi/CMakeLists.txt @@ -6,7 +6,6 @@ set(modules_list insert_username moddatetime refint - timetravel ) foreach(loop_var IN ITEMS ${modules_list}) diff --git a/src/backend/CMakeLists.txt b/src/backend/CMakeLists.txt index fcdf74aacd..14c25b92d9 100644 --- a/src/backend/CMakeLists.txt +++ b/src/backend/CMakeLists.txt @@ -5,11 +5,13 @@ include_directories( ) set(postgres_include_catalog "${PROJECT_SOURCE_DIR}/src/include/catalog") +set(postgres_include_dir "${PROJECT_SOURCE_DIR}/src/include") set(gen_fmgrtab_depend utils/Gen_fmgrtab.pl ${CMAKE_CURRENT_SOURCE_DIR}/catalog/Catalog.pm ${postgres_include_catalog}/pg_proc.h + ${postgres_include_dir}/access/transam.h ) set(gen_fmgrtab_output ${PROJECT_SOURCE_DIR}/src/include/utils/fmgroids.h @@ -20,7 +22,7 @@ set(gen_fmgrtab_output add_custom_command( MAIN_DEPENDENCY ${gen_fmgrtab_depend} OUTPUT ${gen_fmgrtab_output} - COMMAND ${PERL_EXECUTABLE} -I${CMAKE_CURRENT_SOURCE_DIR}/catalog Gen_fmgrtab.pl ${postgres_include_catalog}/pg_proc.h + COMMAND ${PERL_EXECUTABLE} -I${CMAKE_CURRENT_SOURCE_DIR}/catalog Gen_fmgrtab.pl -I${postgres_include_dir} ${postgres_include_catalog}/pg_proc.h COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/utils/fmgroids.h ${PROJECT_SOURCE_DIR}/src/include/utils/fmgroids.h COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/utils/fmgrprotos.h ${PROJECT_SOURCE_DIR}/src/include/utils/fmgrprotos.h WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/utils @@ -140,6 +142,7 @@ set(access_SRCS access/common/scankey.c access/common/tupconvert.c access/common/tupdesc.c + access/common/session.c access/gin/ginutil.c access/gin/gininsert.c @@ -481,6 +484,7 @@ set(executor_SRCS executor/nodeTableFuncscan.c executor/tstoreReceiver.c executor/spi.c + executor/execPartition.c ) set(lib_SRCS @@ -492,6 +496,7 @@ set(lib_SRCS lib/rbtree.c lib/stringinfo.c lib/knapsack.c + lib/dshash.c ) if(OPENSSL_FOUND) @@ -514,159 +519,195 @@ set(libpq_SRCS ) set(utils_SRCS - utils/error/assert.c - utils/error/elog.c - utils/cache/attoptcache.c - utils/cache/catcache.c - utils/cache/evtcache.c - utils/cache/inval.c - utils/cache/plancache.c - utils/cache/relcache.c - utils/cache/relmapper.c - utils/cache/relfilenodemap.c - utils/cache/spccache.c - utils/cache/syscache.c - utils/cache/lsyscache.c - utils/cache/typcache.c - utils/cache/ts_cache.c - utils/mmgr/aset.c - utils/mmgr/dsa.c - utils/mmgr/mcxt.c - utils/mmgr/portalmem.c - utils/mmgr/freepage.c - utils/mmgr/memdebug.c - utils/mmgr/slab.c - utils/adt/acl.c - utils/adt/arrayfuncs.c - utils/adt/array_expanded.c + utils/misc/tzparser.c + utils/misc/guc.c + utils/misc/ps_status.c + utils/misc/timeout.c + utils/misc/sampling.c + utils/misc/queryenvironment.c + utils/misc/pg_config.c + utils/misc/pg_controldata.c + utils/misc/rls.c + utils/misc/pg_rusage.c + utils/misc/backend_random.c + utils/misc/superuser.c + utils/misc/help_config.c + utils/init/miscinit.c + utils/init/postinit.c + utils/init/globals.c + utils/adt/geo_selfuncs.c utils/adt/array_selfuncs.c - utils/adt/array_typanalyze.c - utils/adt/array_userfuncs.c - utils/adt/arrayutils.c - utils/adt/ascii.c - utils/adt/bool.c + utils/adt/json.c + utils/adt/xml.c + utils/adt/tsquery_op.c utils/adt/cash.c - utils/adt/char.c - utils/adt/date.c - utils/adt/datetime.c + utils/adt/network_gist.c + utils/adt/network.c + utils/adt/rangetypes_typanalyze.c + utils/adt/tsginidx.c + utils/adt/varchar.c utils/adt/datum.c - utils/adt/dbsize.c - utils/adt/domains.c - utils/adt/encode.c - utils/adt/enum.c - utils/adt/expandeddatum.c - utils/adt/float.c - utils/adt/format_type.c - utils/adt/formatting.c + utils/adt/ri_triggers.c + utils/adt/tsquery_gist.c + utils/adt/ascii.c + utils/adt/rangetypes_selfuncs.c + utils/adt/like_match.c + utils/adt/char.c + utils/adt/jsonfuncs.c utils/adt/genfile.c - utils/adt/geo_ops.c - utils/adt/geo_selfuncs.c - utils/adt/geo_spgist.c + utils/adt/array_userfuncs.c + utils/adt/oracle_compat.c + utils/adt/tsquery_rewrite.c utils/adt/inet_cidr_ntop.c - utils/adt/inet_net_pton.c - utils/adt/int.c - utils/adt/int8.c - utils/adt/json.c - utils/adt/jsonb.c - utils/adt/jsonb_gin.c - utils/adt/jsonb_op.c - utils/adt/jsonb_util.c - utils/adt/jsonfuncs.c - utils/adt/like.c - #utils/adt/like_match.c - utils/adt/lockfuncs.c + utils/adt/tsgistidx.c utils/adt/mac.c - utils/adt/misc.c - utils/adt/nabstime.c - utils/adt/name.c - utils/adt/network.c - utils/adt/network_gist.c + utils/adt/trigfuncs.c utils/adt/network_selfuncs.c - utils/adt/numeric.c - utils/adt/numutils.c - utils/adt/oid.c - utils/adt/oracle_compat.c - utils/adt/orderedsetaggs.c - utils/adt/pg_locale.c - utils/adt/pg_lsn.c - utils/adt/pg_upgrade_support.c - utils/adt/pgstatfuncs.c - utils/adt/pseudotypes.c - utils/adt/quote.c - utils/adt/rangetypes.c - utils/adt/rangetypes_gist.c - utils/adt/rangetypes_selfuncs.c - utils/adt/rangetypes_spgist.c - utils/adt/rangetypes_typanalyze.c + utils/adt/dbsize.c + utils/adt/jsonb.c + utils/adt/array_typanalyze.c + utils/adt/expandeddatum.c + utils/adt/levenshtein.c + utils/adt/tsquery_util.c + utils/adt/expandedrecord.c + utils/adt/tsvector_parser.c + utils/adt/geo_spgist.c + utils/adt/int8.c + utils/adt/tsvector_op.c + utils/adt/tid.c + utils/adt/txid.c utils/adt/regexp.c - utils/adt/regproc.c - utils/adt/ri_triggers.c - utils/adt/rowtypes.c + utils/adt/arrayfuncs.c utils/adt/ruleutils.c - utils/adt/selfuncs.c - utils/adt/tid.c utils/adt/timestamp.c - utils/adt/trigfuncs.c - utils/adt/tsginidx.c - utils/adt/tsgistidx.c - utils/adt/tsquery.c - utils/adt/tsquery_cleanup.c - utils/adt/tsquery_gist.c - utils/adt/tsquery_op.c - utils/adt/tsquery_rewrite.c - utils/adt/tsquery_util.c + utils/adt/mac8.c + utils/adt/encode.c + utils/adt/pg_locale.c + utils/adt/like.c + utils/adt/geo_ops.c + utils/adt/varlena.c + utils/adt/quote.c + utils/adt/jsonb_op.c + utils/adt/rangetypes.c + utils/adt/cryptohashes.c utils/adt/tsrank.c + utils/adt/lockfuncs.c + utils/adt/windowfuncs.c + utils/adt/array_expanded.c + utils/adt/pg_lsn.c + utils/adt/oid.c + utils/adt/selfuncs.c + utils/adt/xid.c + utils/adt/formatting.c utils/adt/tsvector.c - utils/adt/tsvector_op.c - utils/adt/tsvector_parser.c - utils/adt/txid.c + utils/adt/numeric.c + utils/adt/enum.c + utils/adt/tsquery_cleanup.c + utils/adt/jsonb_util.c utils/adt/uuid.c + utils/adt/partitionfuncs.c utils/adt/varbit.c - utils/adt/varchar.c - utils/adt/varlena.c + utils/adt/float.c + utils/adt/name.c utils/adt/amutils.c + utils/adt/pgstatfuncs.c utils/adt/version.c - utils/adt/windowfuncs.c - utils/adt/xid.c - utils/adt/xml.c + utils/adt/acl.c + utils/adt/pseudotypes.c + utils/adt/numutils.c + utils/adt/arrayutils.c + utils/adt/regproc.c + utils/adt/date.c + utils/adt/orderedsetaggs.c + utils/adt/jsonb_gin.c + utils/adt/rangetypes_spgist.c + utils/adt/tsquery.c + utils/adt/bool.c + utils/adt/misc.c + utils/adt/domains.c + utils/adt/inet_net_pton.c + utils/adt/int.c + utils/adt/format_type.c utils/adt/network_spgist.c - utils/adt/mac8.c - utils/fmgr/dfmgr.c - utils/fmgr/fmgr.c - utils/fmgr/funcapi.c - utils/hash/dynahash.c - utils/hash/hashfn.c - utils/hash/pg_crc.c - utils/init/globals.c - utils/init/miscinit.c - utils/init/postinit.c + utils/adt/pg_upgrade_support.c + utils/adt/rangetypes_gist.c + utils/adt/datetime.c + utils/adt/rowtypes.c + utils/cache/inval.c + utils/cache/partcache.c + utils/cache/relmapper.c + utils/cache/evtcache.c + utils/cache/catcache.c + utils/cache/lsyscache.c + utils/cache/typcache.c + utils/cache/relcache.c + utils/cache/relfilenodemap.c + utils/cache/syscache.c + utils/cache/spccache.c + utils/cache/attoptcache.c + utils/cache/plancache.c + utils/cache/ts_cache.c utils/resowner/resowner.c utils/sort/logtape.c + utils/sort/sharedtuplestore.c utils/sort/sortsupport.c - utils/sort/tuplesort.c utils/sort/tuplestore.c + utils/sort/tuplesort.c + utils/hash/pg_crc.c + utils/hash/dynahash.c + utils/hash/hashfn.c + utils/mmgr/freepage.c + utils/mmgr/slab.c + utils/mmgr/portalmem.c + utils/mmgr/dsa.c + utils/mmgr/mcxt.c + utils/mmgr/aset.c + utils/mmgr/memdebug.c + utils/mmgr/generation.c + utils/fmgr/fmgr.c + utils/fmgr/funcapi.c + utils/fmgr/dfmgr.c utils/time/combocid.c - utils/time/tqual.c utils/time/snapmgr.c - utils/misc/guc.c - utils/misc/help_config.c - utils/misc/pg_rusage.c - utils/misc/pg_controldata.c - utils/misc/pg_config.c - utils/misc/ps_status.c - utils/misc/rls.c - utils/misc/sampling.c - utils/misc/superuser.c - utils/misc/timeout.c - utils/misc/tzparser.c - utils/misc/backend_random.c - utils/misc/queryenvironment.c - utils/mb/encnames.c - utils/mb/conv.c - utils/mb/mbutils.c - utils/mb/wchar.c + utils/time/tqual.c utils/mb/wstrcmp.c + utils/mb/win866.c + utils/mb/wchar.c + utils/mb/win1251.c + utils/mb/conv.c utils/mb/wstrncmp.c + utils/mb/iso.c + utils/mb/conversion_procs/cyrillic_and_mic/cyrillic_and_mic.c + utils/mb/conversion_procs/euc_kr_and_mic/euc_kr_and_mic.c + utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c + utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c + utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c + utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c + utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c + utils/mb/conversion_procs/utf8_and_ascii/utf8_and_ascii.c + utils/mb/conversion_procs/utf8_and_gbk/utf8_and_gbk.c + utils/mb/conversion_procs/ascii_and_mic/ascii_and_mic.c + utils/mb/conversion_procs/utf8_and_sjis2004/utf8_and_sjis2004.c + utils/mb/conversion_procs/latin2_and_win1250/latin2_and_win1250.c + utils/mb/conversion_procs/utf8_and_big5/utf8_and_big5.c + utils/mb/conversion_procs/latin_and_mic/latin_and_mic.c + utils/mb/conversion_procs/utf8_and_uhc/utf8_and_uhc.c + utils/mb/conversion_procs/utf8_and_sjis/utf8_and_sjis.c + utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c + utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c + utils/mb/conversion_procs/euc2004_sjis2004/euc2004_sjis2004.c + utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c + utils/mb/conversion_procs/utf8_and_iso8859_1/utf8_and_iso8859_1.c + utils/mb/conversion_procs/euc_tw_and_big5/big5.c + utils/mb/conversion_procs/euc_tw_and_big5/euc_tw_and_big5.c + utils/mb/conversion_procs/utf8_and_euc2004/utf8_and_euc2004.c + utils/mb/conversion_procs/euc_cn_and_mic/euc_cn_and_mic.c + utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c + utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c + utils/mb/encnames.c + utils/mb/mbutils.c + utils/error/elog.c + utils/error/assert.c ) set(nodes_SRCS @@ -896,6 +937,7 @@ set(storage_SRCS storage/file/buffile.c storage/file/copydir.c storage/file/reinit.c + storage/file/sharedfileset.c storage/freespace/freespace.c storage/freespace/fsmpage.c storage/freespace/indexfsm.c @@ -914,6 +956,7 @@ set(storage_SRCS storage/ipc/sinvaladt.c storage/ipc/standby.c storage/ipc/latch.c + storage/ipc/barrier.c storage/large_object/inv_api.c storage/lmgr/lmgr.c # Need some for check s_lock storage/lmgr/lock.c diff --git a/src/backend/port/dynloader/aix.c b/src/backend/port/dynloader/aix.c new file mode 100644 index 0000000000..bf6ec257e7 --- /dev/null +++ b/src/backend/port/dynloader/aix.c @@ -0,0 +1,7 @@ +/* + * src/backend/port/dynloader/aix.c + * + * Dummy file used for nothing at this point + * + * see aix.h + */ diff --git a/src/backend/port/dynloader/aix.h b/src/backend/port/dynloader/aix.h new file mode 100644 index 0000000000..4b1bad6e45 --- /dev/null +++ b/src/backend/port/dynloader/aix.h @@ -0,0 +1,39 @@ +/*------------------------------------------------------------------------- + * + * aix.h + * prototypes for AIX-specific routines + * + * + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/backend/port/dynloader/aix.h + * + *------------------------------------------------------------------------- + */ + +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include +#include "utils/dynamic_loader.h" /* pgrminclude ignore */ + +/* + * In some older systems, the RTLD_NOW flag isn't defined and the mode + * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted + * if available, but it doesn't exist everywhere. + * If it doesn't exist, set it to 0 so it has no effect. + */ +#ifndef RTLD_NOW +#define RTLD_NOW 1 +#endif +#ifndef RTLD_GLOBAL +#define RTLD_GLOBAL 0 +#endif + +#define pg_dlopen(f) dlopen((f), RTLD_NOW | RTLD_GLOBAL) +#define pg_dlsym(h, f) ((PGFunction) dlsym(h, f)) +#define pg_dlclose(h) dlclose(h) +#define pg_dlerror() dlerror() + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/cygwin.c b/src/backend/port/dynloader/cygwin.c new file mode 100644 index 0000000000..5c52bf6147 --- /dev/null +++ b/src/backend/port/dynloader/cygwin.c @@ -0,0 +1,3 @@ +/* src/backend/port/dynloader/cygwin.c */ + +/* Dummy file used for nothing at this point; see cygwin.h */ diff --git a/src/backend/port/dynloader/cygwin.h b/src/backend/port/dynloader/cygwin.h new file mode 100644 index 0000000000..5d819cfd7b --- /dev/null +++ b/src/backend/port/dynloader/cygwin.h @@ -0,0 +1,36 @@ +/*------------------------------------------------------------------------- + * + * Dynamic loader declarations for Cygwin + * + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/backend/port/dynloader/cygwin.h + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include +#include "utils/dynamic_loader.h" /* pgrminclude ignore */ + +/* + * In some older systems, the RTLD_NOW flag isn't defined and the mode + * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted + * if available, but it doesn't exist everywhere. + * If it doesn't exist, set it to 0 so it has no effect. + */ +#ifndef RTLD_NOW +#define RTLD_NOW 1 +#endif +#ifndef RTLD_GLOBAL +#define RTLD_GLOBAL 0 +#endif + +#define pg_dlopen(f) dlopen((f), RTLD_NOW | RTLD_GLOBAL) +#define pg_dlsym dlsym +#define pg_dlclose dlclose +#define pg_dlerror dlerror + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/darwin.c b/src/backend/port/dynloader/darwin.c new file mode 100644 index 0000000000..f8fdeaf122 --- /dev/null +++ b/src/backend/port/dynloader/darwin.c @@ -0,0 +1,138 @@ +/* + * Dynamic loading support for macOS (Darwin) + * + * If dlopen() is available (Darwin 10.3 and later), we just use it. + * Otherwise we emulate it with the older, now deprecated, NSLinkModule API. + * + * src/backend/port/dynloader/darwin.c + */ +#include "postgres.h" + +#ifdef HAVE_DLOPEN +#include +#else +#include +#endif + +#include "dynloader.h" + + +#ifdef HAVE_DLOPEN + +void * +pg_dlopen(char *filename) +{ + return dlopen(filename, RTLD_NOW | RTLD_GLOBAL); +} + +void +pg_dlclose(void *handle) +{ + dlclose(handle); +} + +PGFunction +pg_dlsym(void *handle, char *funcname) +{ + /* Do not prepend an underscore: see dlopen(3) */ + return dlsym(handle, funcname); +} + +char * +pg_dlerror(void) +{ + return dlerror(); +} +#else /* !HAVE_DLOPEN */ + +/* + * These routines were taken from the Apache source, but were made + * available with a PostgreSQL-compatible license. Kudos Wilfredo + * Sánchez . + */ + +static NSObjectFileImageReturnCode cofiff_result = NSObjectFileImageFailure; + +void * +pg_dlopen(char *filename) +{ + NSObjectFileImage image; + + cofiff_result = NSCreateObjectFileImageFromFile(filename, &image); + if (cofiff_result != NSObjectFileImageSuccess) + return NULL; + return NSLinkModule(image, filename, + NSLINKMODULE_OPTION_BINDNOW | + NSLINKMODULE_OPTION_RETURN_ON_ERROR); +} + +void +pg_dlclose(void *handle) +{ + NSUnLinkModule(handle, FALSE); +} + +PGFunction +pg_dlsym(void *handle, char *funcname) +{ + NSSymbol symbol; + char *symname = (char *) malloc(strlen(funcname) + 2); + + if (!symname) + return NULL; + + sprintf(symname, "_%s", funcname); + if (NSIsSymbolNameDefined(symname)) + { + symbol = NSLookupAndBindSymbol(symname); + + free(symname); + return (PGFunction) NSAddressOfSymbol(symbol); + } + else + { + free(symname); + return NULL; + } +} + +char * +pg_dlerror(void) +{ + NSLinkEditErrors c; + int errorNumber; + const char *fileName; + const char *errorString = NULL; + + switch (cofiff_result) + { + case NSObjectFileImageSuccess: + /* must have failed in NSLinkModule */ + NSLinkEditError(&c, &errorNumber, &fileName, &errorString); + if (errorString == NULL || *errorString == '\0') + errorString = "unknown link-edit failure"; + break; + case NSObjectFileImageFailure: + errorString = "failed to open object file"; + break; + case NSObjectFileImageInappropriateFile: + errorString = "inappropriate object file"; + break; + case NSObjectFileImageArch: + errorString = "object file is for wrong architecture"; + break; + case NSObjectFileImageFormat: + errorString = "object file has wrong format"; + break; + case NSObjectFileImageAccess: + errorString = "insufficient permissions for object file"; + break; + default: + errorString = "unknown failure to open object file"; + break; + } + + return (char *) errorString; +} + +#endif /* HAVE_DLOPEN */ diff --git a/src/backend/port/dynloader/darwin.h b/src/backend/port/dynloader/darwin.h new file mode 100644 index 0000000000..44a3bd6b82 --- /dev/null +++ b/src/backend/port/dynloader/darwin.h @@ -0,0 +1,8 @@ +/* src/backend/port/dynloader/darwin.h */ + +#include "fmgr.h" + +void *pg_dlopen(char *filename); +PGFunction pg_dlsym(void *handle, char *funcname); +void pg_dlclose(void *handle); +char *pg_dlerror(void); diff --git a/src/backend/port/dynloader/freebsd.c b/src/backend/port/dynloader/freebsd.c new file mode 100644 index 0000000000..23547b06bb --- /dev/null +++ b/src/backend/port/dynloader/freebsd.c @@ -0,0 +1,106 @@ +/* + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * src/backend/port/dynloader/freebsd.c + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if defined(LIBC_SCCS) && !defined(lint) +static char sccsid[] = "@(#)dl.c 5.4 (Berkeley) 2/23/91"; +#endif /* LIBC_SCCS and not lint */ + +#include "postgres.h" + +#include +#include +#include + +#include "dynloader.h" + +static char error_message[BUFSIZ]; + +char * +BSD44_derived_dlerror(void) +{ + static char ret[BUFSIZ]; + + strcpy(ret, error_message); + error_message[0] = 0; + return (ret[0] == 0) ? NULL : ret; +} + +void * +BSD44_derived_dlopen(const char *file, int num) +{ +#if !defined(HAVE_DLOPEN) + snprintf(error_message, sizeof(error_message), + "dlopen (%s) not supported", file); + return NULL; +#else + void *vp; + + if ((vp = dlopen((char *) file, num)) == NULL) + snprintf(error_message, sizeof(error_message), + "dlopen (%s) failed: %s", file, dlerror()); + return vp; +#endif +} + +void * +BSD44_derived_dlsym(void *handle, const char *name) +{ +#if !defined(HAVE_DLOPEN) + snprintf(error_message, sizeof(error_message), + "dlsym (%s) failed", name); + return NULL; +#else + void *vp; + +#ifndef __ELF__ + char buf[BUFSIZ]; + + if (*name != '_') + { + snprintf(buf, sizeof(buf), "_%s", name); + name = buf; + } +#endif /* !__ELF__ */ + if ((vp = dlsym(handle, (char *) name)) == NULL) + snprintf(error_message, sizeof(error_message), + "dlsym (%s) failed", name); + return vp; +#endif +} + +void +BSD44_derived_dlclose(void *handle) +{ +#if defined(HAVE_DLOPEN) + dlclose(handle); +#endif +} diff --git a/src/backend/port/dynloader/freebsd.h b/src/backend/port/dynloader/freebsd.h new file mode 100644 index 0000000000..6faf07f962 --- /dev/null +++ b/src/backend/port/dynloader/freebsd.h @@ -0,0 +1,58 @@ +/*------------------------------------------------------------------------- + * + * freebsd.h + * port-specific prototypes for FreeBSD + * + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/backend/port/dynloader/freebsd.h + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include +#include +#include + +#include "utils/dynamic_loader.h" /* pgrminclude ignore */ + +/* + * Dynamic Loader on NetBSD 1.0. + * + * this dynamic loader uses the system dynamic loading interface for shared + * libraries (ie. dlopen/dlsym/dlclose). The user must specify a shared + * library as the file to be dynamically loaded. + * + * agc - I know this is all a bit crufty, but it does work, is fairly + * portable, and works (the stipulation that the d.l. function must + * begin with an underscore is fairly tricky, and some versions of + * NetBSD (like 1.0, and 1.0A pre June 1995) have no dlerror.) + */ + +/* + * In some older systems, the RTLD_NOW flag isn't defined and the mode + * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted + * if available, but it doesn't exist everywhere. + * If it doesn't exist, set it to 0 so it has no effect. + */ +#ifndef RTLD_NOW +#define RTLD_NOW 1 +#endif +#ifndef RTLD_GLOBAL +#define RTLD_GLOBAL 0 +#endif + +#define pg_dlopen(f) BSD44_derived_dlopen((f), RTLD_NOW | RTLD_GLOBAL) +#define pg_dlsym BSD44_derived_dlsym +#define pg_dlclose BSD44_derived_dlclose +#define pg_dlerror BSD44_derived_dlerror + +char *BSD44_derived_dlerror(void); +void *BSD44_derived_dlopen(const char *filename, int num); +void *BSD44_derived_dlsym(void *handle, const char *name); +void BSD44_derived_dlclose(void *handle); + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/hpux.c b/src/backend/port/dynloader/hpux.c new file mode 100644 index 0000000000..5a0e40146d --- /dev/null +++ b/src/backend/port/dynloader/hpux.c @@ -0,0 +1,68 @@ +/*------------------------------------------------------------------------- + * + * dynloader.c + * dynamic loader for HP-UX using the shared library mechanism + * + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/backend/port/dynloader/hpux.c + * + * NOTES + * all functions are defined here -- it's impossible to trace the + * shl_* routines from the bundled HP-UX debugger. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +/* System includes */ +#include +#include + +#include "dynloader.h" +#include "utils/dynamic_loader.h" + +void * +pg_dlopen(char *filename) +{ + /* + * Use BIND_IMMEDIATE so that undefined symbols cause a failure return + * from shl_load(), rather than an abort() later on when we attempt to + * call the library! + */ + shl_t handle = shl_load(filename, + BIND_IMMEDIATE | BIND_VERBOSE | DYNAMIC_PATH, + 0L); + + return (void *) handle; +} + +PGFunction +pg_dlsym(void *handle, char *funcname) +{ + PGFunction f; + + if (shl_findsym((shl_t *) & handle, funcname, TYPE_PROCEDURE, &f) == -1) + f = (PGFunction) NULL; + return f; +} + +void +pg_dlclose(void *handle) +{ + shl_unload((shl_t) handle); +} + +char * +pg_dlerror(void) +{ + static char errmsg[] = "shl_load failed"; + + if (errno) + return strerror(errno); + + return errmsg; +} diff --git a/src/backend/port/dynloader/hpux.h b/src/backend/port/dynloader/hpux.h new file mode 100644 index 0000000000..0a17454f2b --- /dev/null +++ b/src/backend/port/dynloader/hpux.h @@ -0,0 +1,25 @@ +/*------------------------------------------------------------------------- + * + * dynloader.h + * dynamic loader for HP-UX using the shared library mechanism + * + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/backend/port/dynloader/hpux.h + * + * NOTES + * all functions are defined here -- it's impossible to trace the + * shl_* routines from the bundled HP-UX debugger. + * + *------------------------------------------------------------------------- + */ +/* System includes */ +#include "fmgr.h" + +extern void *pg_dlopen(char *filename); +extern PGFunction pg_dlsym(void *handle, char *funcname); +extern void pg_dlclose(void *handle); +extern char *pg_dlerror(void); diff --git a/src/backend/port/dynloader/linux.c b/src/backend/port/dynloader/linux.c new file mode 100644 index 0000000000..38e19f7484 --- /dev/null +++ b/src/backend/port/dynloader/linux.c @@ -0,0 +1,133 @@ +/*------------------------------------------------------------------------- + * + * linux.c + * Dynamic Loader for Postgres for Linux, generated from those for + * Ultrix. + * + * You need to install the dld library on your Linux system! + * + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/backend/port/dynloader/linux.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#ifdef HAVE_DLD_H +#include +#endif + +#include "dynloader.h" +#include "miscadmin.h" + + +#ifndef HAVE_DLOPEN + +void * +pg_dlopen(char *filename) +{ +#ifndef HAVE_DLD_H + elog(ERROR, "dynamic load not supported"); + return NULL; +#else + static int dl_initialized = 0; + + /* + * initializes the dynamic loader with the executable's pathname. (only + * needs to do this the first time pg_dlopen is called.) + */ + if (!dl_initialized) + { + if (dld_init(dld_find_executable(my_exec_path))) + return NULL; + + /* + * if there are undefined symbols, we want dl to search from the + * following libraries also. + */ + dl_initialized = 1; + } + + /* + * link the file, then check for undefined symbols! + */ + if (dld_link(filename)) + return NULL; + + /* + * If undefined symbols: try to link with the C and math libraries! This + * could be smarter, if the dynamic linker was able to handle shared libs! + */ + if (dld_undefined_sym_count > 0) + { + if (dld_link("/usr/lib/libc.a")) + { + elog(WARNING, "could not link C library"); + return NULL; + } + if (dld_undefined_sym_count > 0) + { + if (dld_link("/usr/lib/libm.a")) + { + elog(WARNING, "could not link math library"); + return NULL; + } + if (dld_undefined_sym_count > 0) + { + int count = dld_undefined_sym_count; + char **list = dld_list_undefined_sym(); + + /* list the undefined symbols, if any */ + do + { + elog(WARNING, "\"%s\" is undefined", *list); + list++; + count--; + } while (count > 0); + + dld_unlink_by_file(filename, 1); + return NULL; + } + } + } + + return (void *) strdup(filename); +#endif +} + +PGFunction +pg_dlsym(void *handle, char *funcname) +{ +#ifndef HAVE_DLD_H + return NULL; +#else + return (PGFunction) dld_get_func((funcname)); +#endif +} + +void +pg_dlclose(void *handle) +{ +#ifndef HAVE_DLD_H +#else + dld_unlink_by_file(handle, 1); + free(handle); +#endif +} + +char * +pg_dlerror(void) +{ +#ifndef HAVE_DLD_H + return "dynaloader unsupported"; +#else + return dld_strerror(dld_errno); +#endif +} + +#endif /* !HAVE_DLOPEN */ diff --git a/src/backend/port/dynloader/linux.h b/src/backend/port/dynloader/linux.h new file mode 100644 index 0000000000..d2c25df033 --- /dev/null +++ b/src/backend/port/dynloader/linux.h @@ -0,0 +1,44 @@ +/*------------------------------------------------------------------------- + * + * linux.h + * Port-specific prototypes for Linux + * + * + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/backend/port/dynloader/linux.h + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include "utils/dynamic_loader.h" /* pgrminclude ignore */ +#ifdef HAVE_DLOPEN +#include +#endif + + +#ifdef HAVE_DLOPEN + +/* + * In some older systems, the RTLD_NOW flag isn't defined and the mode + * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted + * if available, but it doesn't exist everywhere. + * If it doesn't exist, set it to 0 so it has no effect. + */ +#ifndef RTLD_NOW +#define RTLD_NOW 1 +#endif +#ifndef RTLD_GLOBAL +#define RTLD_GLOBAL 0 +#endif + +#define pg_dlopen(f) dlopen((f), RTLD_NOW | RTLD_GLOBAL) +#define pg_dlsym dlsym +#define pg_dlclose dlclose +#define pg_dlerror dlerror +#endif /* HAVE_DLOPEN */ + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/netbsd.c b/src/backend/port/dynloader/netbsd.c new file mode 100644 index 0000000000..475d746514 --- /dev/null +++ b/src/backend/port/dynloader/netbsd.c @@ -0,0 +1,106 @@ +/* + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * src/backend/port/dynloader/netbsd.c + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if defined(LIBC_SCCS) && !defined(lint) +static char sccsid[] = "@(#)dl.c 5.4 (Berkeley) 2/23/91"; +#endif /* LIBC_SCCS and not lint */ + +#include "postgres.h" + +#include +#include +#include + +#include "dynloader.h" + +static char error_message[BUFSIZ]; + +char * +BSD44_derived_dlerror(void) +{ + static char ret[BUFSIZ]; + + strcpy(ret, error_message); + error_message[0] = 0; + return (ret[0] == 0) ? NULL : ret; +} + +void * +BSD44_derived_dlopen(const char *file, int num) +{ +#if !defined(HAVE_DLOPEN) + snprintf(error_message, sizeof(error_message), + "dlopen (%s) not supported", file); + return NULL; +#else + void *vp; + + if ((vp = dlopen((char *) file, num)) == NULL) + snprintf(error_message, sizeof(error_message), + "dlopen (%s) failed: %s", file, dlerror()); + return vp; +#endif +} + +void * +BSD44_derived_dlsym(void *handle, const char *name) +{ +#if !defined(HAVE_DLOPEN) + snprintf(error_message, sizeof(error_message), + "dlsym (%s) failed", name); + return NULL; +#else + void *vp; + +#ifndef __ELF__ + char buf[BUFSIZ]; + + if (*name != '_') + { + snprintf(buf, sizeof(buf), "_%s", name); + name = buf; + } +#endif /* !__ELF__ */ + if ((vp = dlsym(handle, (char *) name)) == NULL) + snprintf(error_message, sizeof(error_message), + "dlsym (%s) failed", name); + return vp; +#endif +} + +void +BSD44_derived_dlclose(void *handle) +{ +#if defined(HAVE_DLOPEN) + dlclose(handle); +#endif +} diff --git a/src/backend/port/dynloader/netbsd.h b/src/backend/port/dynloader/netbsd.h new file mode 100644 index 0000000000..2ca332256b --- /dev/null +++ b/src/backend/port/dynloader/netbsd.h @@ -0,0 +1,59 @@ +/*------------------------------------------------------------------------- + * + * netbsd.h + * port-specific prototypes for NetBSD + * + * + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/backend/port/dynloader/netbsd.h + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include +#include +#include + +#include "utils/dynamic_loader.h" /* pgrminclude ignore */ + +/* + * Dynamic Loader on NetBSD 1.0. + * + * this dynamic loader uses the system dynamic loading interface for shared + * libraries (ie. dlopen/dlsym/dlclose). The user must specify a shared + * library as the file to be dynamically loaded. + * + * agc - I know this is all a bit crufty, but it does work, is fairly + * portable, and works (the stipulation that the d.l. function must + * begin with an underscore is fairly tricky, and some versions of + * NetBSD (like 1.0, and 1.0A pre June 1995) have no dlerror.) + */ + +/* + * In some older systems, the RTLD_NOW flag isn't defined and the mode + * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted + * if available, but it doesn't exist everywhere. + * If it doesn't exist, set it to 0 so it has no effect. + */ +#ifndef RTLD_NOW +#define RTLD_NOW 1 +#endif +#ifndef RTLD_GLOBAL +#define RTLD_GLOBAL 0 +#endif + +#define pg_dlopen(f) BSD44_derived_dlopen((f), RTLD_NOW | RTLD_GLOBAL) +#define pg_dlsym BSD44_derived_dlsym +#define pg_dlclose BSD44_derived_dlclose +#define pg_dlerror BSD44_derived_dlerror + +char *BSD44_derived_dlerror(void); +void *BSD44_derived_dlopen(const char *filename, int num); +void *BSD44_derived_dlsym(void *handle, const char *name); +void BSD44_derived_dlclose(void *handle); + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/openbsd.c b/src/backend/port/dynloader/openbsd.c new file mode 100644 index 0000000000..7b481b90d1 --- /dev/null +++ b/src/backend/port/dynloader/openbsd.c @@ -0,0 +1,106 @@ +/* + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * src/backend/port/dynloader/openbsd.c + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if defined(LIBC_SCCS) && !defined(lint) +static char sccsid[] = "@(#)dl.c 5.4 (Berkeley) 2/23/91"; +#endif /* LIBC_SCCS and not lint */ + +#include "postgres.h" + +#include +#include +#include + +#include "dynloader.h" + +static char error_message[BUFSIZ]; + +char * +BSD44_derived_dlerror(void) +{ + static char ret[BUFSIZ]; + + strcpy(ret, error_message); + error_message[0] = 0; + return (ret[0] == 0) ? NULL : ret; +} + +void * +BSD44_derived_dlopen(const char *file, int num) +{ +#if !defined(HAVE_DLOPEN) + snprintf(error_message, sizeof(error_message), + "dlopen (%s) not supported", file); + return NULL; +#else + void *vp; + + if ((vp = dlopen((char *) file, num)) == NULL) + snprintf(error_message, sizeof(error_message), + "dlopen (%s) failed: %s", file, dlerror()); + return vp; +#endif +} + +void * +BSD44_derived_dlsym(void *handle, const char *name) +{ +#if !defined(HAVE_DLOPEN) + snprintf(error_message, sizeof(error_message), + "dlsym (%s) failed", name); + return NULL; +#else + void *vp; + +#ifndef __ELF__ + char buf[BUFSIZ]; + + if (*name != '_') + { + snprintf(buf, sizeof(buf), "_%s", name); + name = buf; + } +#endif /* !__ELF__ */ + if ((vp = dlsym(handle, (char *) name)) == NULL) + snprintf(error_message, sizeof(error_message), + "dlsym (%s) failed", name); + return vp; +#endif +} + +void +BSD44_derived_dlclose(void *handle) +{ +#if defined(HAVE_DLOPEN) + dlclose(handle); +#endif +} diff --git a/src/backend/port/dynloader/openbsd.h b/src/backend/port/dynloader/openbsd.h new file mode 100644 index 0000000000..1130f39b41 --- /dev/null +++ b/src/backend/port/dynloader/openbsd.h @@ -0,0 +1,58 @@ +/*------------------------------------------------------------------------- + * + * openbsd.h + * port-specific prototypes for OpenBSD + * + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/backend/port/dynloader/openbsd.h + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include +#include +#include + +#include "utils/dynamic_loader.h" /* pgrminclude ignore */ + +/* + * Dynamic Loader on NetBSD 1.0. + * + * this dynamic loader uses the system dynamic loading interface for shared + * libraries (ie. dlopen/dlsym/dlclose). The user must specify a shared + * library as the file to be dynamically loaded. + * + * agc - I know this is all a bit crufty, but it does work, is fairly + * portable, and works (the stipulation that the d.l. function must + * begin with an underscore is fairly tricky, and some versions of + * NetBSD (like 1.0, and 1.0A pre June 1995) have no dlerror.) + */ + +/* + * In some older systems, the RTLD_NOW flag isn't defined and the mode + * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted + * if available, but it doesn't exist everywhere. + * If it doesn't exist, set it to 0 so it has no effect. + */ +#ifndef RTLD_NOW +#define RTLD_NOW 1 +#endif +#ifndef RTLD_GLOBAL +#define RTLD_GLOBAL 0 +#endif + +#define pg_dlopen(f) BSD44_derived_dlopen((f), RTLD_NOW | RTLD_GLOBAL) +#define pg_dlsym BSD44_derived_dlsym +#define pg_dlclose BSD44_derived_dlclose +#define pg_dlerror BSD44_derived_dlerror + +char *BSD44_derived_dlerror(void); +void *BSD44_derived_dlopen(const char *filename, int num); +void *BSD44_derived_dlsym(void *handle, const char *name); +void BSD44_derived_dlclose(void *handle); + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/solaris.c b/src/backend/port/dynloader/solaris.c new file mode 100644 index 0000000000..19adcedc5e --- /dev/null +++ b/src/backend/port/dynloader/solaris.c @@ -0,0 +1,7 @@ +/* + * src/backend/port/dynloader/solaris.c + * + * Dummy file used for nothing at this point + * + * see solaris.h + */ diff --git a/src/backend/port/dynloader/solaris.h b/src/backend/port/dynloader/solaris.h new file mode 100644 index 0000000000..e7638ff0fc --- /dev/null +++ b/src/backend/port/dynloader/solaris.h @@ -0,0 +1,38 @@ +/*------------------------------------------------------------------------- + * + * solaris.h + * port-specific prototypes for Solaris + * + * + * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/backend/port/dynloader/solaris.h + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include +#include "utils/dynamic_loader.h" /* pgrminclude ignore */ + +/* + * In some older systems, the RTLD_NOW flag isn't defined and the mode + * argument to dlopen must always be 1. The RTLD_GLOBAL flag is wanted + * if available, but it doesn't exist everywhere. + * If it doesn't exist, set it to 0 so it has no effect. + */ +#ifndef RTLD_NOW +#define RTLD_NOW 1 +#endif +#ifndef RTLD_GLOBAL +#define RTLD_GLOBAL 0 +#endif + +#define pg_dlopen(f) dlopen((f), RTLD_NOW | RTLD_GLOBAL) +#define pg_dlsym dlsym +#define pg_dlclose dlclose +#define pg_dlerror dlerror + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/dynloader/win32.c b/src/backend/port/dynloader/win32.c new file mode 100644 index 0000000000..c59823e367 --- /dev/null +++ b/src/backend/port/dynloader/win32.c @@ -0,0 +1,85 @@ +/* src/backend/port/dynloader/win32.c */ + +#include "postgres.h" + +char *dlerror(void); +int dlclose(void *handle); +void *dlsym(void *handle, const char *symbol); +void *dlopen(const char *path, int mode); + +static char last_dyn_error[512]; + +static void +set_dl_error(void) +{ + DWORD err = GetLastError(); + + if (FormatMessage(FORMAT_MESSAGE_IGNORE_INSERTS | + FORMAT_MESSAGE_FROM_SYSTEM, + NULL, + err, + MAKELANGID(LANG_ENGLISH, SUBLANG_DEFAULT), + last_dyn_error, + sizeof(last_dyn_error) - 1, + NULL) == 0) + { + snprintf(last_dyn_error, sizeof(last_dyn_error) - 1, + "unknown error %lu", err); + } +} + +char * +dlerror(void) +{ + if (last_dyn_error[0]) + return last_dyn_error; + else + return NULL; +} + +int +dlclose(void *handle) +{ + if (!FreeLibrary((HMODULE) handle)) + { + set_dl_error(); + return 1; + } + last_dyn_error[0] = 0; + return 0; +} + +void * +dlsym(void *handle, const char *symbol) +{ + void *ptr; + + ptr = GetProcAddress((HMODULE) handle, symbol); + if (!ptr) + { + set_dl_error(); + return NULL; + } + last_dyn_error[0] = 0; + return ptr; +} + +void * +dlopen(const char *path, int mode) +{ + HMODULE h; + int prevmode; + + /* Disable popup error messages when loading DLLs */ + prevmode = SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOOPENFILEERRORBOX); + h = LoadLibrary(path); + SetErrorMode(prevmode); + + if (!h) + { + set_dl_error(); + return NULL; + } + last_dyn_error[0] = 0; + return (void *) h; +} diff --git a/src/backend/port/dynloader/win32.h b/src/backend/port/dynloader/win32.h new file mode 100644 index 0000000000..ddbf866520 --- /dev/null +++ b/src/backend/port/dynloader/win32.h @@ -0,0 +1,19 @@ +/* + * src/backend/port/dynloader/win32.h + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include "utils/dynamic_loader.h" /* pgrminclude ignore */ + +#define pg_dlopen(f) dlopen((f), 1) +#define pg_dlsym dlsym +#define pg_dlclose dlclose +#define pg_dlerror dlerror + +char *dlerror(void); +int dlclose(void *handle); +void *dlsym(void *handle, const char *symbol); +void *dlopen(const char *path, int mode); + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/snowball/CMakeLists.txt b/src/backend/snowball/CMakeLists.txt index d39cac1cb5..34128f9322 100644 --- a/src/backend/snowball/CMakeLists.txt +++ b/src/backend/snowball/CMakeLists.txt @@ -59,39 +59,47 @@ endforeach() set(snowball_SRCS dict_snowball.c - libstemmer/api.c - libstemmer/utilities.c - libstemmer/stem_ISO_8859_1_danish.c - libstemmer/stem_ISO_8859_1_dutch.c - libstemmer/stem_ISO_8859_1_english.c - libstemmer/stem_ISO_8859_1_finnish.c - libstemmer/stem_ISO_8859_1_french.c libstemmer/stem_ISO_8859_1_german.c - libstemmer/stem_ISO_8859_1_hungarian.c - libstemmer/stem_ISO_8859_1_italian.c - libstemmer/stem_ISO_8859_1_norwegian.c - libstemmer/stem_ISO_8859_1_porter.c - libstemmer/stem_ISO_8859_1_portuguese.c - libstemmer/stem_ISO_8859_1_spanish.c - libstemmer/stem_ISO_8859_1_swedish.c + libstemmer/stem_UTF_8_french.c libstemmer/stem_ISO_8859_2_romanian.c - libstemmer/stem_KOI8_R_russian.c + libstemmer/api.c + libstemmer/stem_UTF_8_arabic.c + libstemmer/stem_ISO_8859_1_irish.c + libstemmer/stem_UTF_8_swedish.c libstemmer/stem_UTF_8_danish.c - libstemmer/stem_UTF_8_dutch.c - libstemmer/stem_UTF_8_english.c - libstemmer/stem_UTF_8_finnish.c - libstemmer/stem_UTF_8_french.c - libstemmer/stem_UTF_8_german.c - libstemmer/stem_UTF_8_hungarian.c + libstemmer/stem_UTF_8_portuguese.c + libstemmer/stem_ISO_8859_1_norwegian.c + libstemmer/stem_UTF_8_porter.c + libstemmer/stem_ISO_8859_1_finnish.c libstemmer/stem_UTF_8_italian.c libstemmer/stem_UTF_8_norwegian.c - libstemmer/stem_UTF_8_porter.c - libstemmer/stem_UTF_8_portuguese.c - libstemmer/stem_UTF_8_romanian.c - libstemmer/stem_UTF_8_russian.c + libstemmer/stem_UTF_8_tamil.c + libstemmer/stem_ISO_8859_1_spanish.c + libstemmer/stem_ISO_8859_1_portuguese.c + libstemmer/stem_UTF_8_dutch.c + libstemmer/stem_UTF_8_english.c + libstemmer/stem_UTF_8_indonesian.c + libstemmer/stem_KOI8_R_russian.c libstemmer/stem_UTF_8_spanish.c - libstemmer/stem_UTF_8_swedish.c libstemmer/stem_UTF_8_turkish.c + libstemmer/stem_UTF_8_russian.c + libstemmer/stem_ISO_8859_1_english.c + libstemmer/stem_ISO_8859_1_dutch.c + libstemmer/stem_UTF_8_lithuanian.c + libstemmer/stem_ISO_8859_1_porter.c + libstemmer/stem_UTF_8_finnish.c + libstemmer/stem_ISO_8859_1_italian.c + libstemmer/stem_UTF_8_romanian.c + libstemmer/utilities.c + libstemmer/stem_ISO_8859_2_hungarian.c + libstemmer/stem_ISO_8859_1_indonesian.c + libstemmer/stem_ISO_8859_1_swedish.c + libstemmer/stem_ISO_8859_1_danish.c + libstemmer/stem_UTF_8_nepali.c + libstemmer/stem_UTF_8_hungarian.c + libstemmer/stem_UTF_8_irish.c + libstemmer/stem_ISO_8859_1_french.c + libstemmer/stem_UTF_8_german.c ) add_library(dict_snowball ${PLUGIN_TYPE} ${snowball_SRCS}) diff --git a/src/bin/psql/CMakeLists.txt b/src/bin/psql/CMakeLists.txt index c62ae23997..47d9e05e02 100644 --- a/src/bin/psql/CMakeLists.txt +++ b/src/bin/psql/CMakeLists.txt @@ -17,25 +17,21 @@ flex_target(SCANSLASH_SCAN ) set(psql_SRCS - command.c common.c - conditional.c - help.c - help.h - input.c - stringutils.c - mainloop.c + variables.c + command.c copy.c - copy.h - startup.c prompt.c - variables.c + mainloop.c + crosstabview.c + input.c + help.c large_obj.c - describe.c tab-complete.c + stringutils.c + describe.c + startup.c ../pg_dump/dumputils.c - sql_help.c - crosstabview.c ${FLEX_SCANSLASH_SCAN_OUTPUTS} ) diff --git a/src/include/c.h b/src/include/c.h index 08c554aaa5..6b01bfb7e9 100644 --- a/src/include/c.h +++ b/src/include/c.h @@ -410,7 +410,10 @@ typedef unsigned long long int uint64; */ #if defined(PG_INT128_TYPE) #if defined(pg_attribute_aligned) || ALIGNOF_PG_INT128_TYPE <= MAXIMUM_ALIGNOF + +#ifndef HAVE_INT128 #define HAVE_INT128 1 +#endif typedef PG_INT128_TYPE int128 #if defined(pg_attribute_aligned) diff --git a/src/timezone/CMakeLists.txt b/src/timezone/CMakeLists.txt index 0441276988..ca48feba2d 100644 --- a/src/timezone/CMakeLists.txt +++ b/src/timezone/CMakeLists.txt @@ -1,34 +1,12 @@ -#TODO compile timezones - include_directories( - "${PROJECT_SOURCE_DIR}/src/include/libpq" - "${PROJECT_SOURCE_DIR}/src/interfaces/libpq" - "${PROJECT_SOURCE_DIR}/src/bin/pg_dump" -) - - - -set(tzdata - ${CMAKE_CURRENT_SOURCE_DIR}/data/africa - ${CMAKE_CURRENT_SOURCE_DIR}/data/antarctica - ${CMAKE_CURRENT_SOURCE_DIR}/data/asia - ${CMAKE_CURRENT_SOURCE_DIR}/data/australasia - ${CMAKE_CURRENT_SOURCE_DIR}/data/europe - ${CMAKE_CURRENT_SOURCE_DIR}/data/northamerica - ${CMAKE_CURRENT_SOURCE_DIR}/data/southamerica - ${CMAKE_CURRENT_SOURCE_DIR}/data/pacificnew - ${CMAKE_CURRENT_SOURCE_DIR}/data/etcetera - ${CMAKE_CURRENT_SOURCE_DIR}/data/factory - ${CMAKE_CURRENT_SOURCE_DIR}/data/backward - ${CMAKE_CURRENT_SOURCE_DIR}/data/systemv + "${PROJECT_SOURCE_DIR}/src/include/libpq" + "${PROJECT_SOURCE_DIR}/src/interfaces/libpq" + "${PROJECT_SOURCE_DIR}/src/bin/pg_dump" ) -add_executable(zic - zic.c -) -target_link_libraries(zic - pgport -) +set(tzdata_files + ${CMAKE_CURRENT_SOURCE_DIR}/data/tzdata.zi + ) if(MSVC) foreach( OUTPUTCONFIG ${CMAKE_CONFIGURATION_TYPES} ) @@ -39,43 +17,42 @@ if(MSVC) endforeach( OUTPUTCONFIG CMAKE_CONFIGURATION_TYPES ) endif(MSVC) - -install(FILES - tznames/Africa.txt - tznames/America.txt - tznames/Antarctica.txt - tznames/Asia.txt - tznames/Atlantic.txt - tznames/Australia.txt - tznames/Etc.txt - tznames/Europe.txt - tznames/Indian.txt - tznames/Pacific.txt - tznames/Default - tznames/Australia - tznames/India -DESTINATION ${PGSHAREDIR}/timezonesets) - -function(JOIN VALUES GLUE OUTPUT) - string (REGEX REPLACE "([^\\]|^);" "\\1${GLUE}" _TMP_STR "${VALUES}") - string (REGEX REPLACE "[\\](.)" "\\1" _TMP_STR "${_TMP_STR}") #fixes escaping - set (${OUTPUT} "${_TMP_STR}" PARENT_SCOPE) -endfunction() - -if(MINGW) - string(SUBSTRING ${PGSHAREDIR} 1 1 CHAR_POS) - if(CHAR_POS STREQUAL ":") - string(SUBSTRING ${PGSHAREDIR} 2 -1 PGSHAREDIR_ZIC) +install(FILES + tznames/Africa.txt + tznames/America.txt + tznames/Antarctica.txt + tznames/Asia.txt + tznames/Atlantic.txt + tznames/Australia.txt + tznames/Etc.txt + tznames/Europe.txt + tznames/Indian.txt + tznames/Pacific.txt + tznames/Default + tznames/Australia + tznames/India + DESTINATION ${PGSHAREDIR}/timezonesets) + +if(WITH_SYSTEM_TZDATA) + add_compile_options(-DSYSTEMTZDIR="${WITH_SYSTEM_TZDATA}") +else() + if(MINGW) + string(SUBSTRING ${PGSHAREDIR} 1 1 CHAR_POS) + if(CHAR_POS STREQUAL ":") + string(SUBSTRING ${PGSHAREDIR} 2 -1 PGSHAREDIR_ZIC) + else() + set(PGSHAREDIR_ZIC ${PGSHAREDIR}) + endif() else() set(PGSHAREDIR_ZIC ${PGSHAREDIR}) endif() -else() - set(PGSHAREDIR_ZIC ${PGSHAREDIR}) -endif() -JOIN("${tzdata}" " " tzdata_string) -install( - CODE "message('${CMAKE_CURRENT_BINARY_DIR}/zic${CMAKE_EXECUTABLE_SUFFIX} \$ENV{DESTDIR}${PGSHAREDIR_ZIC}/timezone -p US/Eastern ${tzdata_string}')" - CODE "execute_process(COMMAND ${CMAKE_CURRENT_BINARY_DIR}/zic${CMAKE_EXECUTABLE_SUFFIX} -d \"\$ENV{DESTDIR}${PGSHAREDIR_ZIC}/timezone\" -p \"US/Eastern\" ${tzdata_string})" -) + add_executable(zic zic.c) + target_link_libraries(zic pgport) + set(ZIC ${CMAKE_CURRENT_BINARY_DIR}/zic${CMAKE_EXECUTABLE_SUFFIX}) + set(POSIXRULES "US/Eastern") + + install(CODE "execute_process(COMMAND ${ZIC} -d ${CMAKE_CURRENT_BINARY_DIR}/timezone -p ${POSIXRULES} ${tzdata_files})") + install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/timezone DESTINATION ${PGSHAREDIR_ZIC}) +endif()